diff --git a/README.md b/README.md index 154df8298fab5ecf322016157858e08cd1bccbe1..8e1a2f5555c5c73ef0f74cef16572640e59b7d4c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,16 @@ --- -license: apache-2.0 +license: apache 2.0 --- +# LibriSpeech pruned_transducer_stateless7_streaming + +This model is based on the icefall `pruned_transducer_stateless7_streaming` recipe, +but it the model parameters are modified to be smaller in size. It can be +considered a streaming version of [this model](https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-20M-2023-01-28) and follows +the same parameter configuration. + +## Performance Record + +| Decoding method | test-clean | test-other | +|---------------------------|------------|------------| +| greedy search | 3.94 | 9.79 | +| modified beam search | 3.88 | 9.53 | diff --git a/data/lang_bpe_500/bpe.model b/data/lang_bpe_500/bpe.model new file mode 100644 index 0000000000000000000000000000000000000000..0a7fdb4e15f063e06d9936c71e13525b31c588e3 --- /dev/null +++ b/data/lang_bpe_500/bpe.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c53433de083c4a6ad12d034550ef22de68cec62c4f58932a7b6b8b2f1e743fa5 +size 244865 diff --git a/exp/cpu_jit.pt b/exp/cpu_jit.pt new file mode 100644 index 0000000000000000000000000000000000000000..627b583dfaf1c5acc75c4a3177a44079a1ff9467 --- /dev/null +++ b/exp/cpu_jit.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:322c5e910063a3c88188e9371d7746209515e783e1fbd4bc10a07b96916a1daf +size 134186908 diff --git a/exp/pretrained.pt b/exp/pretrained.pt new file mode 100644 index 0000000000000000000000000000000000000000..81ed3fb5caa0b98aa38aebd0b013907d7d9e67fb --- /dev/null +++ b/exp/pretrained.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3222015e7c994de8bc9fd7f03414d6e34e9f81345380d6ee247f55457ac1ff67 +size 82986907 diff --git a/exp/tensorboard/events.out.tfevents.1675637915.r7n07.429298.0 b/exp/tensorboard/events.out.tfevents.1675637915.r7n07.429298.0 new file mode 100644 index 0000000000000000000000000000000000000000..1930a62709a0347f3f4a6a9c42bdff69920c64dc --- /dev/null +++ b/exp/tensorboard/events.out.tfevents.1675637915.r7n07.429298.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6139d1abe9b94257a13f6a4357402edcecb1552ca414ae70f2e4c571668093a6 +size 2108670 diff --git a/exp/tensorboard/events.out.tfevents.1675917773.r8n07.149353.0 b/exp/tensorboard/events.out.tfevents.1675917773.r8n07.149353.0 new file mode 100644 index 0000000000000000000000000000000000000000..712c3bd750ee38b029660dc9d59b932931941ac5 --- /dev/null +++ b/exp/tensorboard/events.out.tfevents.1675917773.r8n07.149353.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cba317a5708a495eff4efc2dbdfd8ef06753f25208fe549291341dbe89ab0d9e +size 234625 diff --git a/log/greedy_search/errs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/errs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba6c0b2d3365796ed2b11b652df9175fbf0665b0 --- /dev/null +++ b/log/greedy_search/errs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,13026 @@ +%WER = 3.94 +Errors: 243 insertions, 178 deletions, 1651 substitutions, over 52576 reference words (50747 correct) +Search below for sections starting with PER-UTT DETAILS:, SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS: + +PER-UTT DETAILS: corr or (ref->hyp) +1089-134686-0000-1733: HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED (FLOUR FATTENED->FLOWER FAT AND) SAUCE +1089-134686-0001-1734: STUFF IT INTO YOU HIS BELLY COUNSELLED HIM +1089-134686-0002-1735: AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE (BROTHELS->BRAFFLELS) +1089-134686-0003-1736: (HELLO->HALLO) BERTIE ANY GOOD IN YOUR MIND +1089-134686-0004-1737: NUMBER TEN FRESH (NELLY IS->NELLIERS) WAITING ON YOU GOOD NIGHT HUSBAND +1089-134686-0005-1738: THE MUSIC CAME NEARER AND HE RECALLED THE WORDS THE WORDS OF SHELLEY'S FRAGMENT UPON THE MOON WANDERING COMPANIONLESS PALE FOR WEARINESS +1089-134686-0006-1739: THE DULL LIGHT FELL MORE FAINTLY UPON THE PAGE WHEREON ANOTHER EQUATION BEGAN TO UNFOLD ITSELF SLOWLY AND TO SPREAD ABROAD ITS WIDENING TAIL +1089-134686-0007-1740: A COLD LUCID INDIFFERENCE REIGNED IN HIS SOUL +1089-134686-0008-1741: THE CHAOS IN WHICH HIS ARDOUR EXTINGUISHED ITSELF WAS A COLD INDIFFERENT KNOWLEDGE OF HIMSELF +1089-134686-0009-1742: AT MOST BY AN ALMS GIVEN TO A BEGGAR WHOSE BLESSING HE FLED FROM HE MIGHT HOPE WEARILY TO WIN FOR HIMSELF SOME MEASURE OF ACTUAL GRACE +1089-134686-0010-1743: WELL NOW ENNIS I DECLARE YOU HAVE A HEAD AND SO HAS MY STICK +1089-134686-0011-1744: ON SATURDAY MORNINGS WHEN THE SODALITY MET IN THE CHAPEL TO RECITE THE LITTLE OFFICE HIS PLACE WAS A CUSHIONED KNEELING DESK AT THE RIGHT OF THE ALTAR FROM WHICH HE LED HIS WING OF BOYS THROUGH THE RESPONSES +1089-134686-0012-1745: HER EYES SEEMED TO REGARD HIM WITH MILD PITY HER HOLINESS A STRANGE LIGHT GLOWING FAINTLY UPON HER FRAIL FLESH DID NOT HUMILIATE THE SINNER WHO APPROACHED HER +1089-134686-0013-1746: IF EVER HE WAS IMPELLED TO CAST SIN FROM HIM AND TO REPENT THE IMPULSE THAT MOVED HIM WAS THE WISH TO BE HER KNIGHT +1089-134686-0014-1747: HE TRIED TO THINK HOW IT COULD BE +1089-134686-0015-1748: BUT THE DUSK DEEPENING IN THE SCHOOLROOM COVERED OVER HIS THOUGHTS THE BELL RANG +1089-134686-0016-1749: THEN YOU CAN ASK HIM QUESTIONS ON THE CATECHISM DEDALUS +1089-134686-0017-1750: STEPHEN LEANING BACK AND DRAWING IDLY ON HIS SCRIBBLER LISTENED TO THE TALK ABOUT HIM WHICH HERON CHECKED FROM TIME TO TIME BY SAYING +1089-134686-0018-1751: IT WAS STRANGE TOO THAT HE FOUND AN ARID PLEASURE IN FOLLOWING UP TO THE END THE RIGID LINES OF THE DOCTRINES OF THE CHURCH AND PENETRATING INTO OBSCURE SILENCES ONLY TO HEAR AND FEEL THE MORE DEEPLY HIS OWN CONDEMNATION +1089-134686-0019-1752: THE SENTENCE OF SAINT JAMES WHICH SAYS THAT HE WHO OFFENDS AGAINST ONE COMMANDMENT BECOMES GUILTY OF ALL HAD SEEMED TO HIM FIRST A SWOLLEN PHRASE UNTIL HE HAD BEGUN TO GROPE IN THE DARKNESS OF HIS OWN STATE +1089-134686-0020-1753: IF A MAN HAD STOLEN A POUND IN HIS YOUTH AND HAD USED THAT POUND TO AMASS A HUGE FORTUNE HOW MUCH WAS HE OBLIGED TO GIVE BACK THE POUND HE HAD STOLEN ONLY (OR->WERE) THE POUND TOGETHER WITH THE COMPOUND INTEREST ACCRUING UPON IT OR ALL HIS HUGE FORTUNE +1089-134686-0021-1754: IF A LAYMAN IN GIVING BAPTISM POUR THE WATER BEFORE SAYING THE WORDS IS THE CHILD BAPTIZED +1089-134686-0022-1755: HOW COMES IT THAT WHILE THE FIRST (BEATITUDE->BE ATTITUDE) PROMISES THE KINGDOM OF HEAVEN TO THE POOR OF HEART THE SECOND (BEATITUDE->BE ATTITUDE) PROMISES ALSO TO THE MEEK THAT THEY SHALL POSSESS THE LAND +1089-134686-0023-1756: WHY WAS THE SACRAMENT OF THE EUCHARIST INSTITUTED UNDER THE TWO SPECIES OF BREAD AND WINE IF JESUS CHRIST BE PRESENT BODY AND BLOOD SOUL AND DIVINITY IN THE BREAD ALONE AND IN THE WINE ALONE +1089-134686-0024-1757: IF THE WINE CHANGE INTO VINEGAR AND THE HOST CRUMBLE INTO CORRUPTION AFTER THEY HAVE BEEN CONSECRATED IS JESUS CHRIST STILL PRESENT UNDER THEIR SPECIES AS GOD AND AS MAN +1089-134686-0025-1758: A GENTLE KICK FROM THE TALL BOY IN THE BENCH BEHIND URGED STEPHEN TO ASK A DIFFICULT QUESTION +1089-134686-0026-1759: THE RECTOR DID NOT ASK FOR A CATECHISM TO HEAR THE LESSON FROM +1089-134686-0027-1760: HE CLASPED HIS HANDS ON THE DESK AND SAID +1089-134686-0028-1761: THE RETREAT WILL BEGIN ON WEDNESDAY AFTERNOON IN (HONOUR->HONOR) OF SAINT FRANCIS (XAVIER->SAVIER) WHOSE FEAST DAY IS SATURDAY +1089-134686-0029-1762: ON FRIDAY CONFESSION WILL BE HEARD ALL THE AFTERNOON AFTER BEADS +1089-134686-0030-1763: BEWARE OF MAKING THAT MISTAKE +1089-134686-0031-1764: STEPHEN'S HEART BEGAN SLOWLY TO FOLD AND FADE WITH FEAR LIKE A WITHERING FLOWER +1089-134686-0032-1765: HE IS CALLED AS YOU KNOW THE APOSTLE OF THE INDIES +1089-134686-0033-1766: A GREAT SAINT SAINT FRANCIS (XAVIER->ZEVIER) +1089-134686-0034-1767: THE RECTOR PAUSED AND THEN SHAKING HIS CLASPED HANDS BEFORE HIM WENT ON +1089-134686-0035-1768: HE HAD THE FAITH IN HIM THAT MOVES MOUNTAINS +1089-134686-0036-1769: A GREAT SAINT SAINT FRANCIS (XAVIER->ZEVIER) +1089-134686-0037-1770: IN THE SILENCE THEIR DARK FIRE KINDLED THE DUSK INTO A TAWNY GLOW +1089-134691-0000-1707: HE COULD WAIT NO LONGER +1089-134691-0001-1708: FOR A FULL HOUR HE HAD PACED UP AND DOWN WAITING BUT HE COULD WAIT NO LONGER +1089-134691-0002-1709: HE SET OFF ABRUPTLY FOR THE BULL WALKING RAPIDLY LEST HIS FATHER'S SHRILL WHISTLE MIGHT CALL HIM BACK AND IN A FEW MOMENTS HE HAD ROUNDED THE CURVE AT THE POLICE BARRACK AND WAS SAFE +1089-134691-0003-1710: THE UNIVERSITY +1089-134691-0004-1711: PRIDE AFTER SATISFACTION UPLIFTED HIM LIKE LONG SLOW WAVES +1089-134691-0005-1712: WHOSE FEET ARE AS THE FEET OF (HARTS->HEARTS) AND UNDERNEATH THE EVERLASTING ARMS +1089-134691-0006-1713: THE PRIDE OF THAT DIM IMAGE BROUGHT BACK TO HIS MIND THE DIGNITY OF THE OFFICE HE HAD REFUSED +1089-134691-0007-1714: SOON THE WHOLE BRIDGE WAS TREMBLING AND RESOUNDING +1089-134691-0008-1715: THE UNCOUTH FACES PASSED HIM TWO BY TWO STAINED YELLOW OR RED OR LIVID BY THE SEA AND AS HE STROVE TO LOOK AT THEM WITH EASE AND INDIFFERENCE A FAINT STAIN OF PERSONAL SHAME AND COMMISERATION ROSE TO HIS OWN FACE +1089-134691-0009-1716: ANGRY WITH HIMSELF HE TRIED TO HIDE HIS FACE FROM THEIR EYES BY GAZING DOWN SIDEWAYS INTO THE SHALLOW SWIRLING WATER UNDER THE BRIDGE BUT HE STILL SAW A REFLECTION THEREIN OF THEIR TOP HEAVY SILK HATS AND HUMBLE TAPE LIKE COLLARS AND LOOSELY HANGING CLERICAL CLOTHES BROTHER (HICKEY->HICKY) +1089-134691-0010-1717: BROTHER (MAC ARDLE->MICARDLE) BROTHER (KEOGH->KIOPH) +1089-134691-0011-1718: THEIR PIETY WOULD BE LIKE THEIR NAMES LIKE THEIR FACES LIKE THEIR CLOTHES AND (IT->*) WAS IDLE FOR HIM TO TELL HIMSELF THAT THEIR HUMBLE AND CONTRITE HEARTS IT MIGHT BE PAID A FAR RICHER TRIBUTE OF DEVOTION THAN HIS HAD EVER BEEN A GIFT TENFOLD MORE ACCEPTABLE THAN HIS ELABORATE ADORATION +1089-134691-0012-1719: IT WAS IDLE FOR HIM TO MOVE HIMSELF TO BE GENEROUS TOWARDS THEM TO TELL HIMSELF THAT IF HE EVER CAME TO THEIR GATES STRIPPED OF HIS PRIDE BEATEN AND IN (BEGGAR'S->BEGGARS) WEEDS THAT THEY WOULD BE GENEROUS TOWARDS HIM LOVING HIM AS THEMSELVES +1089-134691-0013-1720: IDLE AND EMBITTERING FINALLY TO ARGUE AGAINST HIS OWN DISPASSIONATE CERTITUDE THAT THE COMMANDMENT OF LOVE BADE US NOT TO LOVE OUR NEIGHBOUR AS OURSELVES WITH THE SAME AMOUNT AND INTENSITY OF LOVE BUT TO LOVE HIM AS OURSELVES WITH THE SAME KIND OF LOVE +1089-134691-0014-1721: THE PHRASE AND THE DAY AND THE SCENE HARMONIZED IN (A CHORD->ACCORD) +1089-134691-0015-1722: WORDS WAS IT THEIR (COLOURS->COLORS) +1089-134691-0016-1723: THEY WERE VOYAGING ACROSS THE DESERTS OF THE SKY A HOST OF NOMADS ON THE MARCH VOYAGING HIGH OVER IRELAND WESTWARD BOUND +1089-134691-0017-1724: THE EUROPE THEY HAD COME FROM LAY OUT THERE BEYOND THE IRISH SEA EUROPE OF STRANGE TONGUES AND (VALLEYED->VALLED) AND (WOODBEGIRT->WOULD BE GIRT) AND (CITADELLED->CITADEL) AND OF ENTRENCHED AND MARSHALLED RACES +1089-134691-0018-1725: AGAIN AGAIN +1089-134691-0019-1726: A VOICE FROM BEYOND THE WORLD WAS CALLING +1089-134691-0020-1727: (HELLO STEPHANOS->HALLO STUFFANOS) HERE COMES THE (DEDALUS->DAEDALUS) +1089-134691-0021-1728: THEIR DIVING STONE POISED ON ITS RUDE SUPPORTS AND ROCKING UNDER THEIR PLUNGES AND THE ROUGH HEWN STONES OF THE SLOPING (BREAKWATER->BRAKE WATER) OVER WHICH THEY SCRAMBLED IN THEIR (HORSEPLAY->HORSE PLAY) GLEAMED WITH COLD WET LUSTRE +1089-134691-0022-1729: HE STOOD STILL IN DEFERENCE TO THEIR CALLS AND PARRIED THEIR BANTER WITH EASY WORDS +1089-134691-0023-1730: IT WAS A PAIN TO SEE THEM AND A SWORD LIKE PAIN TO SEE THE SIGNS OF ADOLESCENCE THAT MADE REPELLENT THEIR PITIABLE NAKEDNESS +1089-134691-0024-1731: (STEPHANOS DEDALOS->STEPHANO'S DEAD LOSS) +1089-134691-0025-1732: A MOMENT BEFORE THE GHOST OF THE ANCIENT KINGDOM OF THE DANES HAD LOOKED FORTH THROUGH THE VESTURE OF THE (HAZEWRAPPED->HAYES WRAPPED) CITY +1188-133604-0000-1771: YOU WILL FIND ME CONTINUALLY SPEAKING OF FOUR MEN TITIAN (HOLBEIN->HOLBINE) TURNER AND (TINTORET->TINCTARETTE) IN ALMOST THE SAME TERMS +1188-133604-0001-1772: (THEY->THE) UNITE EVERY QUALITY AND SOMETIMES YOU WILL FIND ME REFERRING TO THEM AS COLORISTS SOMETIMES AS (CHIAROSCURISTS->KIARASCURISTS) +1188-133604-0002-1773: BY BEING STUDIOUS OF COLOR THEY ARE STUDIOUS OF DIVISION AND WHILE THE (CHIAROSCURIST->CURE SCURUS) DEVOTES HIMSELF TO THE REPRESENTATION OF DEGREES OF FORCE IN ONE THING (UNSEPARATED->ON SEPARATED) LIGHT THE COLORISTS HAVE FOR THEIR FUNCTION THE ATTAINMENT OF BEAUTY BY ARRANGEMENT OF THE DIVISIONS OF LIGHT +1188-133604-0003-1774: MY FIRST AND PRINCIPAL REASON WAS THAT THEY ENFORCED BEYOND ALL RESISTANCE ON ANY STUDENT WHO MIGHT ATTEMPT TO COPY THEM THIS METHOD OF LAYING PORTIONS OF DISTINCT HUE SIDE BY SIDE +1188-133604-0004-1775: SOME OF THE TOUCHES INDEED WHEN THE TINT HAS BEEN MIXED WITH MUCH WATER HAVE BEEN LAID IN LITTLE DROPS OR PONDS SO THAT THE PIGMENT MIGHT CRYSTALLIZE HARD AT THE EDGE +1188-133604-0005-1776: IT IS THE HEAD OF A PARROT WITH A LITTLE FLOWER IN HIS BEAK FROM A PICTURE OF (CARPACCIO'S->CARPATCHIO'S) ONE OF HIS SERIES OF THE LIFE OF SAINT GEORGE +1188-133604-0006-1777: THEN HE COMES TO THE BEAK OF IT +1188-133604-0007-1778: THE BROWN GROUND BENEATH IS LEFT FOR THE MOST PART ONE TOUCH OF BLACK IS PUT FOR THE HOLLOW (TWO->TOO) DELICATE LINES OF DARK (GRAY DEFINE->GREY TO FIND) THE OUTER CURVE AND ONE LITTLE QUIVERING TOUCH OF WHITE DRAWS THE INNER EDGE OF THE MANDIBLE +1188-133604-0008-1779: FOR BELIEVE ME THE FINAL PHILOSOPHY OF ART CAN ONLY RATIFY THEIR OPINION THAT THE BEAUTY OF A COCK ROBIN IS TO BE (RED->READ) AND OF A GRASS PLOT TO BE GREEN AND THE BEST SKILL OF ART IS (IN->AN) INSTANTLY SEIZING ON THE MANIFOLD DELICIOUSNESS OF LIGHT WHICH YOU CAN ONLY SEIZE BY PRECISION OF INSTANTANEOUS TOUCH +1188-133604-0009-1780: NOW YOU WILL SEE IN THESE STUDIES THAT THE MOMENT THE (WHITE->WIGHT) IS (INCLOSED->ENCLOSED) PROPERLY AND (HARMONIZED->HARMONIZE) WITH THE OTHER HUES IT BECOMES SOMEHOW MORE PRECIOUS AND PEARLY THAN THE WHITE PAPER AND THAT I AM NOT AFRAID TO LEAVE A WHOLE FIELD OF UNTREATED WHITE PAPER ALL ROUND IT BEING SURE THAT EVEN THE LITTLE DIAMONDS IN THE ROUND WINDOW WILL TELL AS JEWELS IF THEY ARE GRADATED JUSTLY +1188-133604-0010-1781: BUT IN THIS (VIGNETTE->VINEY) COPIED FROM TURNER YOU HAVE THE TWO PRINCIPLES BROUGHT OUT PERFECTLY +1188-133604-0011-1782: THEY ARE BEYOND ALL OTHER WORKS (THAT->THAN) I KNOW EXISTING DEPENDENT FOR THEIR EFFECT ON LOW SUBDUED TONES THEIR FAVORITE CHOICE IN TIME OF DAY BEING EITHER DAWN OR TWILIGHT AND EVEN THEIR BRIGHTEST SUNSETS PRODUCED CHIEFLY OUT OF GRAY PAPER +1188-133604-0012-1783: IT MAY BE THAT A GREAT (COLORIST->COLOR LIST) WILL USE HIS UTMOST FORCE OF COLOR AS A SINGER HIS FULL POWER OF VOICE BUT LOUD OR LOW THE VIRTUE IS IN BOTH CASES ALWAYS IN REFINEMENT NEVER IN LOUDNESS +1188-133604-0013-1784: IT MUST REMEMBER BE ONE OR THE OTHER +1188-133604-0014-1785: DO NOT THEREFORE THINK THAT THE GOTHIC (SCHOOL IS->SCHOOLS) AN EASY ONE +1188-133604-0015-1786: THE LAW OF THAT SCHOOL IS THAT EVERYTHING SHALL BE SEEN CLEARLY OR AT LEAST ONLY IN SUCH MIST OR FAINTNESS AS SHALL BE DELIGHTFUL AND I HAVE NO DOUBT THAT THE BEST INTRODUCTION TO IT WOULD BE THE ELEMENTARY PRACTICE OF PAINTING EVERY STUDY ON A GOLDEN GROUND +1188-133604-0016-1787: THIS AT ONCE COMPELS YOU TO UNDERSTAND THAT THE WORK IS TO BE IMAGINATIVE AND DECORATIVE THAT IT REPRESENTS BEAUTIFUL THINGS IN THE CLEAREST WAY BUT NOT UNDER EXISTING CONDITIONS AND THAT IN FACT YOU ARE PRODUCING (JEWELER'S->JEWELLER'S) WORK RATHER THAN PICTURES +1188-133604-0017-1788: THAT A STYLE IS RESTRAINED OR SEVERE DOES NOT MEAN THAT IT IS ALSO ERRONEOUS +1188-133604-0018-1789: IN ALL EARLY GOTHIC ART INDEED YOU WILL FIND FAILURE OF THIS KIND ESPECIALLY DISTORTION AND RIGIDITY WHICH ARE IN MANY RESPECTS PAINFULLY TO BE COMPARED WITH THE SPLENDID REPOSE OF CLASSIC ART +1188-133604-0019-1790: THE LARGE LETTER CONTAINS INDEED ENTIRELY FEEBLE AND ILL DRAWN FIGURES THAT IS MERELY CHILDISH AND FAILING WORK OF AN INFERIOR HAND IT IS NOT CHARACTERISTIC OF GOTHIC OR ANY OTHER SCHOOL +1188-133604-0020-1791: BUT OBSERVE YOU CAN ONLY DO THIS ON ONE CONDITION THAT OF STRIVING ALSO TO CREATE IN REALITY THE BEAUTY WHICH YOU SEEK IN IMAGINATION +1188-133604-0021-1792: IT WILL BE WHOLLY IMPOSSIBLE FOR YOU TO RETAIN THE TRANQUILLITY OF TEMPER AND FELICITY OF FAITH NECESSARY FOR NOBLE (PURIST->PUREST) PAINTING UNLESS YOU ARE ACTIVELY ENGAGED IN PROMOTING THE FELICITY AND PEACE OF PRACTICAL LIFE +1188-133604-0022-1793: YOU MUST LOOK AT HIM IN THE FACE FIGHT HIM CONQUER HIM WITH WHAT (SCATHE->SCATH) YOU MAY YOU NEED NOT THINK TO KEEP OUT OF THE WAY OF HIM +1188-133604-0023-1794: THE (COLORIST->CHOLERAIST) SAYS FIRST OF ALL AS MY DELICIOUS (PAROQUET->PERICE) WAS RUBY SO THIS NASTY VIPER SHALL BE BLACK AND THEN IS THE QUESTION CAN I ROUND HIM OFF EVEN THOUGH HE IS BLACK AND MAKE HIM SLIMY AND YET SPRINGY AND CLOSE DOWN CLOTTED LIKE A POOL OF BLACK BLOOD ON THE EARTH ALL THE SAME +1188-133604-0024-1795: NOTHING WILL BE MORE PRECIOUS TO YOU I THINK IN THE PRACTICAL STUDY OF ART THAN THE CONVICTION WHICH WILL FORCE ITSELF ON YOU MORE AND MORE EVERY HOUR OF THE WAY ALL THINGS ARE BOUND TOGETHER LITTLE AND GREAT IN SPIRIT AND IN MATTER +1188-133604-0025-1796: YOU KNOW (I HAVE->I'VE) JUST BEEN TELLING YOU HOW THIS SCHOOL OF MATERIALISM (AND->IN) CLAY INVOLVED ITSELF AT LAST IN CLOUD AND FIRE +1188-133604-0026-1797: HERE IS AN EQUALLY TYPICAL GREEK SCHOOL LANDSCAPE BY WILSON LOST WHOLLY IN GOLDEN MIST THE TREES SO SLIGHTLY DRAWN THAT YOU DON'T KNOW IF THEY ARE TREES OR TOWERS AND NO CARE FOR COLOR (WHATEVER->WHATSOEVER) PERFECTLY DECEPTIVE AND (MARVELOUS->MARVELLOUS) EFFECT OF SUNSHINE THROUGH THE MIST APOLLO (AND->IN) THE PYTHON +1188-133604-0027-1798: NOW HERE IS RAPHAEL EXACTLY BETWEEN THE TWO TREES STILL DRAWN LEAF BY LEAF (WHOLLY->HOLY) FORMAL BUT BEAUTIFUL MIST COMING GRADUALLY INTO THE DISTANCE +1188-133604-0028-1799: WELL THEN LAST HERE IS (TURNER'S->TURNERS) GREEK SCHOOL OF THE HIGHEST CLASS AND YOU DEFINE HIS ART ABSOLUTELY AS FIRST THE DISPLAYING INTENSELY AND WITH THE STERNEST INTELLECT OF NATURAL FORM AS IT IS AND THEN THE ENVELOPMENT OF IT WITH CLOUD AND FIRE +1188-133604-0029-1800: ONLY THERE ARE TWO SORTS OF CLOUD (AND->IN) FIRE +1188-133604-0030-1801: HE KNOWS THEM BOTH +1188-133604-0031-1802: THERE'S ONE AND THERE'S ANOTHER THE DUDLEY AND THE FLINT +1188-133604-0032-1803: IT IS ONLY A PENCIL OUTLINE BY EDWARD (BURNE->BYRNE) JONES IN ILLUSTRATION OF THE STORY OF PSYCHE IT IS THE INTRODUCTION OF PSYCHE AFTER ALL HER TROUBLES (INTO->AND TO) HEAVEN +1188-133604-0033-1804: EVERY PLANT IN THE GRASS IS SET (FORMALLY->FORMERLY) GROWS PERFECTLY AND MAY BE REALIZED COMPLETELY +1188-133604-0034-1805: EXQUISITE ORDER AND UNIVERSAL WITH ETERNAL LIFE AND LIGHT THIS IS THE FAITH AND EFFORT OF THE SCHOOLS OF (CRYSTAL->CRISTEL) AND YOU MAY DESCRIBE AND COMPLETE THEIR WORK QUITE LITERALLY BY TAKING ANY VERSES OF CHAUCER IN HIS TENDER MOOD (AND->IN) OBSERVING HOW HE INSISTS ON THE CLEARNESS AND BRIGHTNESS FIRST AND THEN ON THE ORDER +1188-133604-0035-1806: THUS IN CHAUCER'S DREAM +1188-133604-0036-1807: IN BOTH THESE HIGH MYTHICAL SUBJECTS THE SURROUNDING NATURE THOUGH SUFFERING IS STILL DIGNIFIED AND BEAUTIFUL +1188-133604-0037-1808: EVERY LINE IN WHICH THE MASTER TRACES IT EVEN WHERE SEEMINGLY NEGLIGENT IS LOVELY AND SET DOWN WITH A MEDITATIVE CALMNESS WHICH MAKES THESE TWO ETCHINGS CAPABLE OF BEING PLACED BESIDE THE MOST TRANQUIL WORK OF (HOLBEIN->HOLBINE) OR (DUERER->DURE) +1188-133604-0038-1809: BUT NOW HERE IS A SUBJECT OF WHICH YOU WILL WONDER AT FIRST WHY TURNER DREW IT AT ALL +1188-133604-0039-1810: IT HAS NO BEAUTY WHATSOEVER NO SPECIALTY OF PICTURESQUENESS (AND->IN) ALL ITS LINES ARE CRAMPED AND POOR +1188-133604-0040-1811: THE CRAMPNESS AND THE POVERTY ARE ALL INTENDED +1188-133604-0041-1812: IT IS A GLEANER BRINGING DOWN HER ONE SHEAF OF CORN TO AN OLD (WATERMILL->WATER MILL) ITSELF MOSSY AND RENT SCARCELY ABLE TO GET ITS STONES TO TURN +1188-133604-0042-1813: THE SCENE IS ABSOLUTELY ARCADIAN +1188-133604-0043-1814: SEE THAT YOUR (LIVES->LIES) BE IN NOTHING WORSE THAN A BOY'S CLIMBING FOR HIS ENTANGLED KITE +1188-133604-0044-1815: IT WILL BE WELL FOR YOU IF YOU JOIN NOT WITH THOSE WHO INSTEAD OF KITES FLY FALCONS WHO INSTEAD OF OBEYING THE LAST WORDS OF THE GREAT CLOUD SHEPHERD TO FEED HIS SHEEP LIVE THE LIVES HOW MUCH LESS THAN VANITY OF THE WAR WOLF AND THE (GIER EAGLE->GEAREAGLE) +121-121726-0000-2558: ALSO A POPULAR CONTRIVANCE WHEREBY LOVE MAKING MAY BE SUSPENDED BUT NOT STOPPED DURING THE PICNIC SEASON +121-121726-0001-2559: (HARANGUE->HARANG) THE TIRESOME PRODUCT OF A TIRELESS TONGUE +121-121726-0002-2560: ANGOR PAIN PAINFUL TO HEAR +121-121726-0003-2561: HAY FEVER A HEART TROUBLE CAUSED BY FALLING IN LOVE WITH A GRASS WIDOW +121-121726-0004-2562: HEAVEN A GOOD PLACE TO BE RAISED TO +121-121726-0005-2563: HEDGE (A FENCE->OFFENCE) +121-121726-0006-2564: HEREDITY THE CAUSE OF ALL OUR FAULTS +121-121726-0007-2565: HORSE SENSE A DEGREE OF WISDOM THAT KEEPS ONE FROM BETTING ON THE RACES +121-121726-0008-2566: HOSE MAN'S EXCUSE FOR WETTING THE WALK +121-121726-0009-2567: HOTEL A PLACE WHERE A GUEST OFTEN GIVES UP GOOD DOLLARS FOR POOR QUARTERS +121-121726-0010-2568: (HOUSECLEANING->HOUSE CLEANING) A DOMESTIC UPHEAVAL THAT MAKES IT EASY FOR THE GOVERNMENT TO ENLIST ALL THE SOLDIERS IT NEEDS +121-121726-0011-2569: HUSBAND THE NEXT THING TO A WIFE +121-121726-0012-2570: HUSSY WOMAN AND BOND TIE +121-121726-0013-2571: TIED TO A WOMAN +121-121726-0014-2572: HYPOCRITE A HORSE DEALER +121-123852-0000-2615: THOSE PRETTY WRONGS THAT LIBERTY COMMITS WHEN I AM (SOMETIME->SOME TIME) ABSENT FROM THY HEART THY BEAUTY AND THY YEARS (FULL->FALL) WELL BEFITS FOR STILL TEMPTATION FOLLOWS WHERE THOU ART +121-123852-0001-2616: (AY->I) ME +121-123852-0002-2617: NO MATTER THEN ALTHOUGH MY FOOT DID STAND UPON THE FARTHEST EARTH (REMOV'D->REMOVED) FROM THEE FOR NIMBLE THOUGHT CAN JUMP BOTH SEA AND LAND AS SOON AS THINK THE PLACE WHERE HE WOULD BE BUT AH +121-123852-0003-2618: THOUGHT KILLS ME THAT I AM NOT (THOUGHT->BOUGHT) TO LEAP LARGE LENGTHS OF MILES WHEN THOU ART GONE BUT THAT SO MUCH OF EARTH AND WATER WROUGHT I MUST ATTEND TIME'S LEISURE WITH MY MOAN RECEIVING (NOUGHT->NOT) BY ELEMENTS SO SLOW BUT HEAVY TEARS BADGES OF EITHER'S WOE +121-123852-0004-2619: MY HEART DOTH PLEAD THAT THOU IN HIM DOST LIE A CLOSET NEVER (PIERC'D->PIERCED) WITH CRYSTAL EYES BUT THE DEFENDANT DOTH THAT PLEA DENY AND SAYS IN HIM THY FAIR APPEARANCE LIES +121-123859-0000-2573: YOU ARE MY ALL THE WORLD AND I MUST STRIVE TO KNOW MY SHAMES AND PRAISES FROM YOUR TONGUE NONE ELSE TO ME NOR I TO NONE ALIVE THAT MY (STEEL'D->STEELED) SENSE OR CHANGES RIGHT OR WRONG +121-123859-0001-2574: (O->OH) TIS THE FIRST TIS FLATTERY IN MY SEEING AND MY GREAT MIND MOST KINGLY DRINKS IT UP MINE EYE WELL KNOWS WHAT WITH HIS GUST IS (GREEING->GREEN) AND TO HIS PALATE DOTH PREPARE THE CUP IF IT BE (POISON'D->POISONED) TIS THE LESSER SIN THAT MINE EYE LOVES IT AND DOTH FIRST BEGIN +121-123859-0002-2575: BUT RECKONING TIME WHOSE (MILLION'D->MILLIONED) ACCIDENTS CREEP IN TWIXT VOWS AND CHANGE DECREES OF KINGS TAN SACRED BEAUTY BLUNT THE (SHARP'ST INTENTS->SHARPEST INTENSE) DIVERT STRONG MINDS TO THE COURSE OF ALTERING THINGS ALAS WHY FEARING OF TIME'S TYRANNY MIGHT I NOT THEN SAY NOW I LOVE YOU BEST WHEN I WAS CERTAIN (O'ER INCERTAINTY->OR IN CERTAINTY) CROWNING THE PRESENT DOUBTING OF THE REST +121-123859-0003-2576: LOVE IS A BABE THEN MIGHT I NOT SAY SO TO GIVE FULL GROWTH TO THAT WHICH STILL DOTH GROW +121-123859-0004-2577: SO I RETURN (REBUK'D->REBUKED) TO MY CONTENT AND GAIN BY ILL THRICE MORE THAN I HAVE SPENT +121-127105-0000-2578: IT WAS THIS OBSERVATION THAT DREW FROM DOUGLAS NOT IMMEDIATELY BUT LATER IN THE EVENING A REPLY THAT HAD THE INTERESTING CONSEQUENCE TO WHICH I CALL ATTENTION +121-127105-0001-2579: (SOMEONE->SOME ONE) ELSE TOLD A STORY NOT PARTICULARLY EFFECTIVE WHICH I SAW HE WAS NOT FOLLOWING +121-127105-0002-2580: CRIED ONE OF THE WOMEN HE TOOK NO NOTICE OF HER HE LOOKED AT ME BUT AS IF INSTEAD OF ME HE SAW WHAT HE SPOKE OF +121-127105-0003-2581: THERE WAS A UNANIMOUS GROAN AT THIS AND MUCH REPROACH AFTER WHICH IN HIS PREOCCUPIED WAY HE EXPLAINED +121-127105-0004-2582: THE (STORY'S->STORIES) WRITTEN +121-127105-0005-2583: (I COULD WRITE->THY GOOD RIGHT) TO MY MAN AND ENCLOSE THE KEY HE COULD SEND DOWN THE PACKET AS HE FINDS IT +121-127105-0006-2584: THE OTHERS RESENTED POSTPONEMENT BUT IT WAS JUST HIS SCRUPLES THAT CHARMED ME +121-127105-0007-2585: TO THIS HIS ANSWER WAS PROMPT OH THANK GOD NO AND IS THE RECORD YOURS +121-127105-0008-2586: HE HUNG FIRE AGAIN A WOMAN'S +121-127105-0009-2587: SHE HAS BEEN DEAD THESE TWENTY YEARS +121-127105-0010-2588: SHE SENT ME THE PAGES IN QUESTION BEFORE SHE DIED +121-127105-0011-2589: SHE WAS THE MOST AGREEABLE WOMAN I'VE EVER KNOWN IN HER POSITION SHE WOULD HAVE BEEN WORTHY OF ANY WHATEVER +121-127105-0012-2590: (IT WASN'T->TWASN'T) SIMPLY THAT SHE SAID SO BUT THAT I KNEW SHE HADN'T I WAS SURE I COULD SEE +121-127105-0013-2591: YOU'LL EASILY JUDGE WHY WHEN YOU HEAR BECAUSE THE THING HAD BEEN SUCH A SCARE HE CONTINUED TO FIX ME +121-127105-0014-2592: YOU ARE ACUTE +121-127105-0015-2593: HE QUITTED THE FIRE AND DROPPED BACK INTO HIS CHAIR +121-127105-0016-2594: PROBABLY NOT TILL THE SECOND POST +121-127105-0017-2595: IT WAS ALMOST THE TONE OF HOPE EVERYBODY WILL STAY +121-127105-0018-2596: CRIED THE LADIES WHOSE DEPARTURE HAD BEEN FIXED +121-127105-0019-2597: MISSUS GRIFFIN HOWEVER EXPRESSED THE NEED FOR (A->*) LITTLE MORE LIGHT +121-127105-0020-2598: WHO WAS IT SHE WAS IN LOVE WITH THE STORY WILL TELL I TOOK UPON MYSELF TO REPLY OH I CAN'T WAIT FOR THE STORY THE STORY WON'T TELL SAID DOUGLAS NOT IN ANY LITERAL VULGAR WAY MORE'S THE PITY THEN +121-127105-0021-2599: WON'T YOU TELL DOUGLAS +121-127105-0022-2600: (WELL->FOR) IF I DON'T KNOW WHO SHE WAS IN LOVE WITH I KNOW WHO HE WAS +121-127105-0023-2601: LET ME SAY HERE DISTINCTLY TO HAVE DONE WITH IT THAT THIS NARRATIVE FROM AN EXACT TRANSCRIPT OF MY OWN MADE MUCH LATER IS WHAT I SHALL PRESENTLY GIVE +121-127105-0024-2602: POOR DOUGLAS BEFORE HIS DEATH WHEN IT WAS IN SIGHT COMMITTED TO ME THE MANUSCRIPT THAT REACHED HIM ON THE THIRD OF THESE DAYS AND THAT ON THE SAME SPOT WITH IMMENSE EFFECT HE BEGAN TO READ TO OUR HUSHED LITTLE CIRCLE ON THE NIGHT OF THE FOURTH +121-127105-0025-2603: THE DEPARTING LADIES WHO HAD SAID THEY WOULD STAY DIDN'T OF COURSE THANK HEAVEN STAY THEY DEPARTED IN CONSEQUENCE OF ARRANGEMENTS MADE IN A RAGE OF CURIOSITY AS THEY PROFESSED PRODUCED BY THE TOUCHES WITH WHICH HE HAD ALREADY WORKED US UP +121-127105-0026-2604: THE FIRST OF THESE TOUCHES CONVEYED THAT THE WRITTEN STATEMENT TOOK UP THE TALE AT A POINT AFTER IT HAD IN A MANNER BEGUN +121-127105-0027-2605: HE HAD FOR HIS OWN TOWN RESIDENCE A BIG HOUSE FILLED WITH THE SPOILS OF TRAVEL AND THE TROPHIES OF THE CHASE BUT IT WAS TO HIS COUNTRY HOME AN OLD FAMILY PLACE IN ESSEX THAT HE WISHED HER IMMEDIATELY TO PROCEED +121-127105-0028-2606: THE AWKWARD THING WAS THAT THEY HAD PRACTICALLY NO OTHER RELATIONS AND THAT HIS OWN AFFAIRS TOOK UP ALL HIS TIME +121-127105-0029-2607: THERE WERE PLENTY OF PEOPLE TO HELP BUT OF COURSE THE YOUNG LADY WHO SHOULD GO DOWN AS GOVERNESS WOULD BE IN SUPREME AUTHORITY +121-127105-0030-2608: I DON'T ANTICIPATE +121-127105-0031-2609: SHE WAS YOUNG UNTRIED NERVOUS IT WAS A VISION OF SERIOUS DUTIES (AND->IN) LITTLE COMPANY OF REALLY GREAT LONELINESS +121-127105-0032-2610: YES BUT THAT'S JUST THE BEAUTY OF HER PASSION +121-127105-0033-2611: IT WAS THE BEAUTY OF IT +121-127105-0034-2612: IT SOUNDED DULL (IT->THAT) SOUNDED STRANGE AND ALL THE MORE SO BECAUSE OF HIS MAIN CONDITION WHICH WAS +121-127105-0035-2613: SHE PROMISED TO DO THIS AND SHE MENTIONED TO ME THAT WHEN FOR A MOMENT DISBURDENED DELIGHTED HE HELD HER HAND THANKING HER FOR THE SACRIFICE SHE ALREADY FELT REWARDED +121-127105-0036-2614: BUT WAS THAT ALL HER REWARD ONE OF THE LADIES ASKED +1221-135766-0000-1305: HOW STRANGE IT SEEMED TO THE SAD WOMAN AS SHE WATCHED THE GROWTH AND THE BEAUTY THAT BECAME EVERY DAY MORE BRILLIANT AND THE INTELLIGENCE THAT THREW ITS QUIVERING SUNSHINE OVER THE TINY FEATURES OF THIS CHILD +1221-135766-0001-1306: GOD AS A DIRECT CONSEQUENCE OF THE SIN WHICH MAN THUS PUNISHED HAD GIVEN HER A LOVELY CHILD WHOSE PLACE WAS ON THAT SAME (DISHONOURED->DISHONORED) BOSOM TO CONNECT HER PARENT FOR EVER WITH THE RACE AND DESCENT OF MORTALS AND TO BE FINALLY A BLESSED SOUL IN HEAVEN +1221-135766-0002-1307: YET THESE THOUGHTS AFFECTED HESTER PRYNNE LESS WITH HOPE THAN APPREHENSION +1221-135766-0003-1308: THE CHILD HAD A NATIVE GRACE WHICH DOES NOT INVARIABLY (CO EXIST->COEXIST) WITH FAULTLESS BEAUTY ITS ATTIRE HOWEVER SIMPLE ALWAYS IMPRESSED THE BEHOLDER AS IF IT WERE THE VERY GARB THAT PRECISELY BECAME IT BEST +1221-135766-0004-1309: THIS OUTWARD MUTABILITY INDICATED AND DID NOT MORE THAN FAIRLY EXPRESS THE VARIOUS PROPERTIES OF HER INNER LIFE +1221-135766-0005-1310: HESTER COULD ONLY ACCOUNT FOR THE CHILD'S CHARACTER AND EVEN THEN MOST VAGUELY AND IMPERFECTLY BY RECALLING WHAT SHE HERSELF HAD BEEN DURING THAT MOMENTOUS PERIOD WHILE PEARL WAS IMBIBING HER SOUL FROM THE SPIRITUAL WORLD AND HER BODILY FRAME FROM ITS MATERIAL OF EARTH +1221-135766-0006-1311: THEY WERE NOW ILLUMINATED BY THE MORNING RADIANCE OF A YOUNG CHILD'S DISPOSITION BUT LATER IN THE DAY OF EARTHLY EXISTENCE MIGHT BE PROLIFIC OF THE STORM AND WHIRLWIND +1221-135766-0007-1312: HESTER PRYNNE NEVERTHELESS THE LOVING MOTHER OF THIS ONE CHILD RAN LITTLE RISK OF ERRING ON THE SIDE OF UNDUE SEVERITY +1221-135766-0008-1313: MINDFUL HOWEVER OF HER OWN ERRORS AND MISFORTUNES SHE EARLY SOUGHT TO IMPOSE A TENDER BUT STRICT CONTROL OVER THE INFANT IMMORTALITY THAT WAS COMMITTED TO HER CHARGE +1221-135766-0009-1314: AS TO ANY OTHER KIND OF DISCIPLINE WHETHER ADDRESSED TO HER MIND OR HEART LITTLE PEARL MIGHT OR MIGHT NOT BE WITHIN ITS REACH IN ACCORDANCE WITH THE CAPRICE THAT RULED THE MOMENT +1221-135766-0010-1315: IT WAS A LOOK SO INTELLIGENT YET INEXPLICABLE PERVERSE SOMETIMES SO MALICIOUS BUT GENERALLY ACCOMPANIED BY A WILD FLOW OF SPIRITS THAT HESTER COULD NOT HELP QUESTIONING AT SUCH MOMENTS WHETHER PEARL WAS A HUMAN CHILD +1221-135766-0011-1316: BEHOLDING IT HESTER WAS CONSTRAINED TO RUSH TOWARDS THE CHILD TO PURSUE THE LITTLE ELF IN THE FLIGHT WHICH SHE INVARIABLY BEGAN TO SNATCH HER TO HER BOSOM WITH A CLOSE PRESSURE AND EARNEST KISSES NOT SO MUCH FROM OVERFLOWING LOVE AS TO ASSURE HERSELF THAT PEARL WAS FLESH AND BLOOD AND NOT UTTERLY DELUSIVE +1221-135766-0012-1317: BROODING OVER ALL THESE MATTERS THE MOTHER FELT LIKE ONE WHO HAS EVOKED A SPIRIT BUT BY SOME IRREGULARITY IN THE PROCESS OF CONJURATION HAS FAILED TO WIN THE MASTER WORD THAT SHOULD CONTROL THIS NEW AND INCOMPREHENSIBLE INTELLIGENCE +1221-135766-0013-1318: PEARL WAS A BORN OUTCAST OF THE INFANTILE WORLD +1221-135766-0014-1319: PEARL (SAW->SAUL) AND GAZED INTENTLY BUT NEVER SOUGHT TO MAKE ACQUAINTANCE +1221-135766-0015-1320: IF SPOKEN TO SHE WOULD NOT SPEAK AGAIN +1221-135767-0000-1280: HESTER PRYNNE WENT ONE DAY TO THE MANSION OF GOVERNOR BELLINGHAM WITH A PAIR OF GLOVES WHICH SHE HAD FRINGED AND EMBROIDERED TO HIS ORDER AND WHICH WERE TO BE WORN ON SOME GREAT OCCASION OF STATE FOR THOUGH THE CHANCES OF A POPULAR ELECTION HAD CAUSED THIS FORMER RULER TO DESCEND A STEP OR TWO FROM THE HIGHEST RANK HE STILL HELD AN (HONOURABLE->HONORABLE) AND INFLUENTIAL PLACE AMONG THE COLONIAL MAGISTRACY +1221-135767-0001-1281: ANOTHER AND FAR MORE IMPORTANT REASON THAN THE DELIVERY OF A PAIR OF EMBROIDERED GLOVES IMPELLED HESTER AT THIS TIME TO SEEK AN INTERVIEW WITH A PERSONAGE OF SO MUCH POWER AND ACTIVITY IN THE AFFAIRS OF THE SETTLEMENT +1221-135767-0002-1282: AT THAT EPOCH OF PRISTINE SIMPLICITY HOWEVER MATTERS OF EVEN SLIGHTER PUBLIC INTEREST AND OF FAR LESS INTRINSIC WEIGHT THAN THE WELFARE OF HESTER AND HER CHILD WERE STRANGELY MIXED UP WITH THE DELIBERATIONS OF LEGISLATORS AND ACTS OF STATE +1221-135767-0003-1283: THE PERIOD WAS HARDLY IF AT ALL EARLIER THAN THAT OF OUR STORY WHEN A DISPUTE CONCERNING THE RIGHT OF PROPERTY IN A PIG NOT ONLY CAUSED A FIERCE AND BITTER CONTEST IN THE LEGISLATIVE BODY OF THE COLONY BUT RESULTED IN AN IMPORTANT MODIFICATION OF THE FRAMEWORK ITSELF OF THE LEGISLATURE +1221-135767-0004-1284: WE HAVE SPOKEN OF (PEARL'S->PEARLS) RICH AND LUXURIANT BEAUTY A BEAUTY THAT SHONE WITH DEEP AND VIVID TINTS A BRIGHT COMPLEXION EYES POSSESSING INTENSITY BOTH OF DEPTH AND GLOW AND HAIR ALREADY OF A DEEP GLOSSY BROWN AND WHICH IN AFTER YEARS WOULD BE NEARLY AKIN TO BLACK +1221-135767-0005-1285: IT WAS THE SCARLET LETTER IN ANOTHER FORM THE SCARLET LETTER ENDOWED WITH LIFE +1221-135767-0006-1286: THE MOTHER HERSELF AS IF THE RED IGNOMINY WERE SO DEEPLY SCORCHED INTO HER BRAIN THAT ALL HER CONCEPTIONS ASSUMED ITS FORM HAD CAREFULLY WROUGHT OUT THE SIMILITUDE LAVISHING MANY HOURS OF MORBID INGENUITY TO CREATE AN ANALOGY BETWEEN THE OBJECT OF HER AFFECTION AND THE EMBLEM OF HER GUILT AND TORTURE +1221-135767-0007-1287: BUT IN TRUTH PEARL WAS THE ONE AS WELL AS THE OTHER AND ONLY IN CONSEQUENCE OF THAT IDENTITY HAD HESTER CONTRIVED SO PERFECTLY TO REPRESENT THE SCARLET LETTER IN HER APPEARANCE +1221-135767-0008-1288: COME THEREFORE AND LET US FLING MUD AT THEM +1221-135767-0009-1289: BUT PEARL WHO WAS A DAUNTLESS CHILD AFTER FROWNING STAMPING HER FOOT AND SHAKING HER LITTLE HAND WITH A VARIETY OF THREATENING GESTURES SUDDENLY MADE A RUSH AT THE KNOT OF HER ENEMIES AND PUT THEM ALL TO FLIGHT +1221-135767-0010-1290: SHE SCREAMED AND SHOUTED TOO WITH A TERRIFIC VOLUME OF SOUND WHICH DOUBTLESS CAUSED THE HEARTS OF THE FUGITIVES TO QUAKE WITHIN THEM +1221-135767-0011-1291: IT WAS FURTHER DECORATED WITH STRANGE AND SEEMINGLY CABALISTIC FIGURES AND DIAGRAMS SUITABLE TO THE QUAINT TASTE OF THE AGE WHICH HAD BEEN DRAWN IN THE STUCCO WHEN NEWLY LAID ON AND HAD NOW GROWN HARD AND DURABLE FOR THE ADMIRATION OF AFTER TIMES +1221-135767-0012-1292: THEY APPROACHED THE DOOR WHICH WAS OF AN ARCHED FORM AND FLANKED ON EACH SIDE BY A NARROW TOWER OR PROJECTION OF THE EDIFICE IN BOTH OF WHICH WERE LATTICE WINDOWS THE WOODEN SHUTTERS TO CLOSE OVER THEM AT NEED +1221-135767-0013-1293: LIFTING THE IRON HAMMER THAT HUNG AT THE PORTAL HESTER PRYNNE GAVE A SUMMONS WHICH WAS ANSWERED BY ONE OF THE GOVERNOR'S BOND (SERVANT->SERVANTS) A FREE BORN ENGLISHMAN BUT NOW A SEVEN YEARS SLAVE +1221-135767-0014-1294: YEA HIS HONOURABLE WORSHIP IS WITHIN BUT HE HATH A GODLY MINISTER OR TWO WITH HIM AND LIKEWISE A LEECH +1221-135767-0015-1295: YE MAY NOT SEE HIS WORSHIP NOW +1221-135767-0016-1296: WITH MANY VARIATIONS SUGGESTED BY THE NATURE OF HIS BUILDING MATERIALS DIVERSITY OF CLIMATE AND A DIFFERENT MODE OF SOCIAL LIFE GOVERNOR BELLINGHAM HAD PLANNED HIS NEW HABITATION AFTER THE RESIDENCES OF GENTLEMEN OF (FAIR ESTATE->FAIREST STATE) IN HIS NATIVE LAND +1221-135767-0017-1297: ON THE TABLE IN TOKEN THAT THE SENTIMENT OF OLD ENGLISH HOSPITALITY HAD NOT BEEN LEFT BEHIND STOOD A LARGE PEWTER TANKARD AT THE BOTTOM OF WHICH HAD HESTER OR PEARL PEEPED INTO IT THEY MIGHT HAVE SEEN THE FROTHY REMNANT OF A RECENT DRAUGHT OF ALE +1221-135767-0018-1298: LITTLE PEARL WHO WAS AS GREATLY PLEASED WITH THE GLEAMING (ARMOUR->ARMOR) AS SHE HAD BEEN WITH THE GLITTERING (FRONTISPIECE->FRONTESPIECE) OF THE HOUSE SPENT SOME TIME LOOKING INTO THE POLISHED MIRROR OF THE BREASTPLATE +1221-135767-0019-1299: MOTHER CRIED SHE I SEE YOU HERE LOOK LOOK +1221-135767-0020-1300: IN TRUTH SHE SEEMED ABSOLUTELY HIDDEN BEHIND IT +1221-135767-0021-1301: PEARL ACCORDINGLY RAN TO THE BOW WINDOW AT THE FURTHER END OF THE HALL AND LOOKED ALONG THE VISTA OF A GARDEN WALK CARPETED WITH CLOSELY SHAVEN GRASS AND BORDERED WITH SOME RUDE AND (IMMATURE->IMMITOR) ATTEMPT AT SHRUBBERY +1221-135767-0022-1302: BUT THE PROPRIETOR APPEARED ALREADY TO HAVE RELINQUISHED AS HOPELESS THE EFFORT TO PERPETUATE ON THIS SIDE OF THE ATLANTIC IN A HARD SOIL AND AMID THE CLOSE STRUGGLE FOR SUBSISTENCE THE NATIVE ENGLISH TASTE FOR ORNAMENTAL GARDENING +1221-135767-0023-1303: THERE WERE A FEW ROSE BUSHES HOWEVER AND A NUMBER OF APPLE TREES PROBABLY THE DESCENDANTS OF THOSE PLANTED BY THE REVEREND MISTER BLACKSTONE THE FIRST SETTLER OF THE PENINSULA THAT HALF MYTHOLOGICAL PERSONAGE WHO RIDES THROUGH OUR EARLY ANNALS SEATED ON THE BACK OF A BULL +1221-135767-0024-1304: PEARL SEEING THE ROSE BUSHES BEGAN TO CRY FOR A RED ROSE AND WOULD NOT BE PACIFIED +1284-1180-0000-829: HE WORE BLUE SILK STOCKINGS BLUE (KNEE PANTS->KNEEP HANDS) WITH GOLD BUCKLES A BLUE RUFFLED WAIST AND A JACKET OF BRIGHT BLUE BRAIDED WITH GOLD +1284-1180-0001-830: HIS HAT HAD A PEAKED CROWN (AND->IN) A FLAT BRIM AND AROUND THE BRIM WAS A ROW OF TINY GOLDEN BELLS THAT TINKLED WHEN HE MOVED +1284-1180-0002-831: INSTEAD OF SHOES THE OLD (MAN->MEN) WORE BOOTS WITH (TURNOVER->TURN OVER) TOPS AND HIS BLUE COAT HAD WIDE CUFFS OF GOLD BRAID +1284-1180-0003-832: FOR A LONG TIME HE HAD WISHED TO EXPLORE THE BEAUTIFUL LAND OF OZ IN WHICH THEY LIVED +1284-1180-0004-833: WHEN THEY WERE OUTSIDE (UNC->UN) SIMPLY LATCHED THE DOOR AND STARTED UP THE PATH +1284-1180-0005-834: NO ONE WOULD DISTURB THEIR LITTLE HOUSE EVEN IF (ANYONE->ANY ONE) CAME SO FAR INTO THE THICK FOREST WHILE THEY WERE GONE +1284-1180-0006-835: AT THE FOOT OF THE MOUNTAIN THAT SEPARATED THE COUNTRY OF THE MUNCHKINS FROM THE COUNTRY OF THE (GILLIKINS->GYLICANS) THE PATH DIVIDED +1284-1180-0007-836: HE KNEW IT WOULD TAKE THEM TO THE HOUSE OF THE CROOKED MAGICIAN WHOM HE HAD NEVER SEEN BUT WHO WAS (THEIR->THERE) NEAREST (NEIGHBOR->NEIGHBOUR) +1284-1180-0008-837: ALL THE MORNING THEY TRUDGED UP THE MOUNTAIN PATH AND AT NOON (UNC AND->UNCAN) OJO SAT ON A FALLEN TREE TRUNK AND ATE THE LAST OF THE BREAD WHICH THE OLD MUNCHKIN HAD PLACED IN HIS POCKET +1284-1180-0009-838: THEN THEY STARTED ON AGAIN AND TWO HOURS LATER CAME IN SIGHT OF THE HOUSE OF DOCTOR PIPT +1284-1180-0010-839: (UNC KNOCKED AT->UNCONOCTED) THE DOOR OF THE HOUSE (AND A->INTO) CHUBBY PLEASANT FACED WOMAN DRESSED ALL IN BLUE OPENED IT AND GREETED THE VISITORS WITH A SMILE +1284-1180-0011-840: I AM MY DEAR AND ALL STRANGERS ARE WELCOME TO MY HOME +1284-1180-0012-841: WE HAVE COME FROM (A FAR->AFAR) LONELIER PLACE THAN THIS A LONELIER PLACE +1284-1180-0013-842: AND YOU MUST BE OJO THE UNLUCKY SHE ADDED +1284-1180-0014-843: OJO HAD NEVER EATEN SUCH A FINE MEAL IN ALL HIS LIFE +1284-1180-0015-844: WE ARE (TRAVELING->TRAVELLING) REPLIED OJO AND WE STOPPED AT YOUR HOUSE JUST (TO->A) REST AND REFRESH OURSELVES +1284-1180-0016-845: THE WOMAN SEEMED THOUGHTFUL +1284-1180-0017-846: AT ONE END STOOD A GREAT FIREPLACE IN WHICH A BLUE LOG WAS BLAZING WITH A BLUE FLAME AND OVER THE FIRE HUNG FOUR KETTLES IN A ROW ALL BUBBLING AND STEAMING AT A GREAT RATE +1284-1180-0018-847: IT TAKES ME SEVERAL YEARS TO MAKE THIS MAGIC POWDER BUT AT THIS MOMENT I AM PLEASED TO SAY IT IS NEARLY DONE YOU SEE I AM MAKING IT FOR MY GOOD WIFE MARGOLOTTE WHO WANTS TO USE SOME OF IT FOR A PURPOSE OF HER OWN +1284-1180-0019-848: YOU MUST KNOW SAID MARGOLOTTE WHEN THEY WERE ALL SEATED TOGETHER ON THE BROAD WINDOW SEAT THAT MY HUSBAND FOOLISHLY GAVE AWAY ALL THE POWDER OF LIFE HE FIRST MADE TO OLD (MOMBI->MOMBY) THE WITCH WHO USED TO LIVE IN THE COUNTRY OF THE (GILLIKINS->GILLICKINS) TO THE NORTH OF HERE +1284-1180-0020-849: THE FIRST LOT WE TESTED ON OUR GLASS CAT WHICH NOT ONLY BEGAN TO LIVE BUT HAS LIVED EVER SINCE +1284-1180-0021-850: I THINK THE NEXT GLASS CAT THE MAGICIAN MAKES WILL HAVE NEITHER BRAINS NOR HEART FOR THEN IT WILL NOT OBJECT TO CATCHING MICE AND (MAY->THEY) PROVE OF SOME USE TO US +1284-1180-0022-851: (I'M->I AM) AFRAID I DON'T KNOW MUCH ABOUT THE LAND OF OZ +1284-1180-0023-852: YOU SEE (I'VE->I HAVE) LIVED ALL MY LIFE WITH UNC NUNKIE THE SILENT ONE AND THERE WAS NO ONE TO TELL ME ANYTHING +1284-1180-0024-853: THAT IS ONE REASON YOU ARE OJO THE UNLUCKY SAID THE WOMAN IN (A->*) SYMPATHETIC TONE +1284-1180-0025-854: I THINK I MUST SHOW YOU MY PATCHWORK GIRL SAID MARGOLOTTE LAUGHING AT THE BOY'S ASTONISHMENT FOR SHE IS RATHER DIFFICULT TO EXPLAIN +1284-1180-0026-855: BUT FIRST I WILL TELL YOU THAT (FOR->FROM) MANY YEARS I HAVE LONGED FOR A SERVANT TO HELP ME WITH THE HOUSEWORK AND TO (COOK->COPE) THE MEALS AND WASH THE DISHES +1284-1180-0027-856: YET THAT TASK WAS NOT SO EASY AS YOU MAY SUPPOSE +1284-1180-0028-857: A BED QUILT MADE OF PATCHES OF DIFFERENT KINDS AND (COLORS->COLLARS) OF CLOTH ALL NEATLY SEWED TOGETHER +1284-1180-0029-858: SOMETIMES IT IS CALLED A CRAZY QUILT BECAUSE THE PATCHES AND COLORS ARE SO MIXED UP +1284-1180-0030-859: WHEN I FOUND IT I SAID TO MYSELF THAT IT WOULD DO NICELY FOR MY SERVANT GIRL FOR WHEN SHE WAS BROUGHT TO LIFE SHE WOULD NOT BE PROUD NOR HAUGHTY AS THE GLASS CAT IS FOR SUCH A DREADFUL MIXTURE OF (COLORS->COLOURS) WOULD DISCOURAGE HER FROM TRYING TO BE AS DIGNIFIED AS THE BLUE MUNCHKINS ARE +1284-1180-0031-860: AT THE EMERALD CITY WHERE OUR PRINCESS (OZMA->OSMO) LIVES GREEN IS THE POPULAR COLOR +1284-1180-0032-861: I WILL SHOW YOU WHAT A GOOD JOB I DID AND SHE WENT TO A TALL CUPBOARD AND THREW OPEN THE DOORS +1284-1181-0000-807: OJO EXAMINED THIS CURIOUS CONTRIVANCE WITH WONDER +1284-1181-0001-808: (MARGOLOTTE->MARGOLOT) HAD FIRST MADE THE GIRL'S FORM FROM THE PATCHWORK QUILT AND THEN SHE HAD DRESSED IT WITH A PATCHWORK SKIRT AND AN APRON WITH POCKETS IN IT USING THE SAME (GAY->GAME) MATERIAL THROUGHOUT +1284-1181-0002-809: THE HEAD OF THE PATCHWORK GIRL WAS THE MOST CURIOUS PART OF HER +1284-1181-0003-810: THE HAIR WAS OF BROWN YARN AND HUNG DOWN ON HER NECK (IN->AND) SEVERAL NEAT BRAIDS +1284-1181-0004-811: GOLD IS THE MOST COMMON (METAL->MEDAL) IN THE LAND OF OZ AND IS USED FOR MANY PURPOSES BECAUSE IT IS SOFT AND PLIABLE +1284-1181-0005-812: NO I FORGOT ALL ABOUT THE BRAINS EXCLAIMED THE WOMAN +1284-1181-0006-813: WELL THAT MAY BE TRUE AGREED MARGOLOTTE BUT ON THE CONTRARY A SERVANT WITH TOO MUCH BRAINS IS SURE TO BECOME INDEPENDENT AND HIGH AND MIGHTY AND FEEL ABOVE HER WORK +1284-1181-0007-814: SHE POURED INTO THE DISH A QUANTITY FROM EACH OF THESE BOTTLES +1284-1181-0008-815: I THINK THAT WILL DO SHE CONTINUED FOR THE OTHER QUALITIES ARE NOT NEEDED IN A SERVANT +1284-1181-0009-816: SHE RAN TO HER HUSBAND'S SIDE AT ONCE AND HELPED HIM LIFT THE FOUR KETTLES FROM THE FIRE +1284-1181-0010-817: THEIR CONTENTS HAD ALL BOILED AWAY LEAVING IN THE BOTTOM OF EACH KETTLE A FEW GRAINS OF FINE WHITE POWDER +1284-1181-0011-818: VERY CAREFULLY THE MAGICIAN REMOVED THIS POWDER PLACING IT (ALL TOGETHER->ALTOGETHER) IN A GOLDEN DISH WHERE HE MIXED IT WITH A GOLDEN SPOON +1284-1181-0012-819: NO ONE SAW HIM DO THIS FOR ALL WERE LOOKING AT THE POWDER OF LIFE BUT SOON THE WOMAN REMEMBERED WHAT SHE HAD BEEN DOING AND CAME BACK TO THE CUPBOARD +1284-1181-0013-820: OJO BECAME A BIT UNEASY AT THIS FOR HE HAD ALREADY PUT QUITE A LOT OF THE CLEVERNESS POWDER IN THE DISH BUT HE DARED NOT INTERFERE AND SO HE COMFORTED HIMSELF WITH THE THOUGHT THAT ONE CANNOT HAVE TOO MUCH CLEVERNESS +1284-1181-0014-821: HE SELECTED A SMALL GOLD BOTTLE WITH A PEPPER BOX TOP SO THAT THE POWDER MIGHT BE SPRINKLED ON ANY OBJECT THROUGH THE SMALL HOLES +1284-1181-0015-822: MOST PEOPLE TALK TOO MUCH SO IT IS A RELIEF TO FIND ONE WHO TALKS TOO LITTLE +1284-1181-0016-823: I AM NOT ALLOWED TO PERFORM MAGIC EXCEPT FOR MY OWN AMUSEMENT HE TOLD HIS VISITORS AS HE LIGHTED A PIPE WITH A CROOKED STEM AND BEGAN TO SMOKE +1284-1181-0017-824: THE WIZARD OF OZ WHO USED TO BE A HUMBUG AND KNEW NO MAGIC AT ALL HAS BEEN TAKING LESSONS OF GLINDA AND I'M TOLD HE IS GETTING TO BE A PRETTY GOOD WIZARD BUT HE IS MERELY THE ASSISTANT OF THE GREAT SORCERESS +1284-1181-0018-825: IT TRULY IS ASSERTED THE MAGICIAN +1284-1181-0019-826: I NOW USE THEM AS ORNAMENTAL STATUARY IN MY GARDEN +1284-1181-0020-827: DEAR ME WHAT A (CHATTERBOX YOU'RE->CHATTER BOX ARE) GETTING TO BE (UNC->YUNK) REMARKED THE MAGICIAN WHO WAS PLEASED WITH THE COMPLIMENT +1284-1181-0021-828: ASKED THE VOICE IN SCORNFUL ACCENTS +1284-134647-0000-862: THE GRATEFUL APPLAUSE OF THE CLERGY HAS CONSECRATED THE MEMORY OF A PRINCE WHO INDULGED THEIR PASSIONS AND PROMOTED THEIR INTEREST +1284-134647-0001-863: THE EDICT OF MILAN THE GREAT CHARTER OF TOLERATION HAD CONFIRMED TO EACH INDIVIDUAL OF THE ROMAN WORLD THE PRIVILEGE OF CHOOSING AND PROFESSING HIS OWN RELIGION +1284-134647-0002-864: BUT THIS INESTIMABLE PRIVILEGE WAS SOON VIOLATED WITH THE KNOWLEDGE OF TRUTH THE EMPEROR (IMBIBED->IBED) THE MAXIMS OF PERSECUTION AND THE SECTS WHICH DISSENTED FROM THE CATHOLIC CHURCH WERE AFFLICTED AND OPPRESSED BY THE TRIUMPH OF CHRISTIANITY +1284-134647-0003-865: CONSTANTINE EASILY BELIEVED THAT THE HERETICS WHO PRESUMED TO DISPUTE HIS OPINIONS OR TO OPPOSE HIS COMMANDS WERE GUILTY OF THE MOST ABSURD AND CRIMINAL OBSTINACY AND THAT A SEASONABLE APPLICATION OF MODERATE SEVERITIES MIGHT SAVE THOSE UNHAPPY MEN FROM THE DANGER OF AN EVERLASTING CONDEMNATION +1284-134647-0004-866: SOME OF THE PENAL REGULATIONS WERE COPIED FROM THE EDICTS OF DIOCLETIAN AND THIS METHOD OF CONVERSION WAS APPLAUDED BY THE SAME BISHOPS WHO HAD (FELT->FELLED) THE HAND OF OPPRESSION AND PLEADED FOR THE RIGHTS OF HUMANITY +1284-134647-0005-867: THEY ASSERTED WITH CONFIDENCE AND ALMOST WITH EXULTATION THAT THE APOSTOLICAL SUCCESSION WAS INTERRUPTED THAT ALL THE BISHOPS OF EUROPE AND ASIA WERE INFECTED BY THE CONTAGION OF GUILT AND SCHISM AND THAT THE PREROGATIVES OF THE CATHOLIC CHURCH WERE CONFINED TO THE CHOSEN PORTION OF THE AFRICAN BELIEVERS WHO ALONE HAD PRESERVED INVIOLATE THE INTEGRITY OF THEIR FAITH AND DISCIPLINE +1284-134647-0006-868: BISHOPS VIRGINS AND EVEN SPOTLESS INFANTS WERE SUBJECTED TO THE DISGRACE OF A PUBLIC PENANCE BEFORE THEY COULD BE ADMITTED TO THE COMMUNION OF THE DONATISTS +1284-134647-0007-869: (PROSCRIBED->PRESCRIBED) BY THE CIVIL AND ECCLESIASTICAL POWERS OF THE EMPIRE THE (DONATISTS->DONATIST) STILL MAINTAINED IN SOME PROVINCES PARTICULARLY IN (NUMIDIA->MEDIA) THEIR SUPERIOR NUMBERS AND FOUR HUNDRED BISHOPS ACKNOWLEDGED THE JURISDICTION OF THEIR PRIMATE +1320-122612-0000-120: SINCE THE PERIOD OF OUR TALE THE ACTIVE SPIRIT OF THE COUNTRY HAS SURROUNDED IT WITH A BELT OF RICH AND THRIVING SETTLEMENTS THOUGH NONE BUT THE HUNTER OR THE SAVAGE IS EVER KNOWN EVEN NOW TO PENETRATE ITS WILD RECESSES +1320-122612-0001-121: THE DEWS WERE SUFFERED TO EXHALE AND THE SUN HAD DISPERSED THE MISTS AND WAS SHEDDING A STRONG AND CLEAR LIGHT IN THE FOREST WHEN THE (TRAVELERS->TRAVELLERS) RESUMED THEIR JOURNEY +1320-122612-0002-122: AFTER PROCEEDING A FEW MILES THE PROGRESS OF HAWKEYE WHO LED THE ADVANCE BECAME MORE DELIBERATE AND WATCHFUL +1320-122612-0003-123: HE OFTEN STOPPED TO EXAMINE THE TREES NOR DID HE CROSS A RIVULET WITHOUT ATTENTIVELY CONSIDERING THE QUANTITY THE VELOCITY AND THE (COLOR->COLOUR) OF ITS WATERS +1320-122612-0004-124: DISTRUSTING HIS OWN JUDGMENT HIS APPEALS TO THE OPINION OF CHINGACHGOOK WERE FREQUENT AND EARNEST +1320-122612-0005-125: YET HERE ARE WE (WITHIN->WITH AN) A SHORT RANGE OF THE (SCAROONS->SCARONS) AND NOT A SIGN OF A TRAIL HAVE WE CROSSED +1320-122612-0006-126: LET US RETRACE OUR STEPS AND EXAMINE AS WE GO WITH KEENER EYES +1320-122612-0007-127: (CHINGACHGOOK->INGACHGOOK) HAD CAUGHT THE LOOK AND MOTIONING WITH HIS HAND HE BADE HIM SPEAK +1320-122612-0008-128: THE EYES OF THE WHOLE PARTY FOLLOWED THE UNEXPECTED MOVEMENT AND READ THEIR SUCCESS IN THE AIR OF TRIUMPH THAT THE YOUTH ASSUMED +1320-122612-0009-129: IT WOULD HAVE BEEN MORE WONDERFUL HAD HE SPOKEN WITHOUT A BIDDING +1320-122612-0010-130: SEE SAID UNCAS POINTING NORTH AND SOUTH AT THE EVIDENT MARKS OF THE BROAD TRAIL ON EITHER SIDE OF HIM THE DARK HAIR HAS GONE TOWARD THE FOREST +1320-122612-0011-131: IF A ROCK OR A RIVULET OR A BIT OF EARTH HARDER THAN COMMON SEVERED THE LINKS OF THE (CLEW->CLUE) THEY FOLLOWED THE TRUE EYE OF THE SCOUT RECOVERED THEM AT A DISTANCE AND SELDOM RENDERED THE DELAY OF A SINGLE MOMENT NECESSARY +1320-122612-0012-132: EXTINGUISHED BRANDS WERE LYING AROUND A SPRING THE OFFALS OF A DEER WERE SCATTERED ABOUT THE PLACE AND THE TREES BORE EVIDENT MARKS OF HAVING BEEN BROWSED BY THE HORSES +1320-122612-0013-133: A CIRCLE OF A FEW HUNDRED FEET IN CIRCUMFERENCE WAS DRAWN AND EACH OF THE PARTY TOOK A SEGMENT FOR HIS PORTION +1320-122612-0014-134: THE EXAMINATION HOWEVER RESULTED IN NO DISCOVERY +1320-122612-0015-135: THE WHOLE PARTY CROWDED TO THE SPOT WHERE UNCAS POINTED OUT THE IMPRESSION OF A MOCCASIN IN THE MOIST (ALLUVION->ALLUVIAN) +1320-122612-0016-136: RUN BACK UNCAS AND BRING ME THE SIZE OF THE SINGER'S FOOT +1320-122617-0000-78: NOTWITHSTANDING THE HIGH RESOLUTION OF HAWKEYE HE FULLY COMPREHENDED ALL THE DIFFICULTIES AND DANGER HE WAS ABOUT TO INCUR +1320-122617-0001-79: IN HIS RETURN TO THE CAMP HIS ACUTE AND PRACTISED INTELLECTS WERE INTENTLY ENGAGED IN DEVISING MEANS TO COUNTERACT A WATCHFULNESS AND SUSPICION ON THE PART OF HIS ENEMIES THAT HE KNEW WERE IN NO DEGREE INFERIOR TO HIS OWN +1320-122617-0002-80: IN OTHER WORDS WHILE HE HAD IMPLICIT FAITH IN THE ABILITY OF (BALAAM'S->BALEM'S) ASS TO SPEAK HE WAS SOMEWHAT (SKEPTICAL->SCEPTICAL) ON THE SUBJECT OF A BEAR'S SINGING AND YET HE HAD BEEN ASSURED OF THE LATTER ON THE TESTIMONY OF HIS OWN EXQUISITE ORGANS +1320-122617-0003-81: THERE WAS SOMETHING IN HIS AIR AND MANNER THAT BETRAYED TO THE SCOUT THE UTTER CONFUSION OF THE STATE OF HIS MIND +1320-122617-0004-82: THE INGENIOUS HAWKEYE WHO RECALLED THE HASTY MANNER IN WHICH THE OTHER HAD ABANDONED HIS POST AT THE BEDSIDE OF THE SICK WOMAN WAS NOT WITHOUT HIS SUSPICIONS CONCERNING THE SUBJECT OF SO MUCH SOLEMN DELIBERATION +1320-122617-0005-83: THE BEAR SHOOK HIS SHAGGY SIDES AND THEN A WELL KNOWN VOICE REPLIED +1320-122617-0006-84: CAN THESE THINGS BE RETURNED DAVID BREATHING MORE FREELY AS THE TRUTH BEGAN TO DAWN UPON HIM +1320-122617-0007-85: COME COME RETURNED HAWKEYE UNCASING HIS HONEST COUNTENANCE THE BETTER TO ASSURE THE WAVERING CONFIDENCE OF HIS COMPANION YOU MAY SEE A SKIN WHICH IF IT BE NOT AS WHITE AS ONE OF THE GENTLE ONES HAS NO TINGE OF RED TO IT THAT THE WINDS OF THE HEAVEN AND THE SUN HAVE NOT BESTOWED NOW LET US TO BUSINESS +1320-122617-0008-86: THE YOUNG MAN IS IN BONDAGE AND MUCH I FEAR HIS DEATH IS DECREED +1320-122617-0009-87: I GREATLY (MOURN->MOURNED) THAT ONE SO WELL DISPOSED SHOULD DIE IN HIS IGNORANCE AND I HAVE SOUGHT A GOODLY HYMN CAN YOU LEAD ME TO HIM +1320-122617-0010-88: THE TASK WILL NOT BE DIFFICULT RETURNED DAVID HESITATING THOUGH I GREATLY FEAR YOUR PRESENCE WOULD RATHER INCREASE THAN MITIGATE HIS UNHAPPY FORTUNES +1320-122617-0011-89: THE LODGE IN WHICH UNCAS WAS CONFINED WAS IN THE VERY (CENTER->CENTRE) OF THE VILLAGE AND IN A SITUATION PERHAPS MORE DIFFICULT THAN ANY OTHER TO APPROACH OR LEAVE WITHOUT OBSERVATION +1320-122617-0012-90: FOUR OR FIVE OF THE LATTER ONLY LINGERED ABOUT THE DOOR OF THE PRISON OF UNCAS WARY BUT CLOSE OBSERVERS OF THE MANNER OF THEIR CAPTIVE +1320-122617-0013-91: DELIVERED IN A STRONG TONE OF ASSENT ANNOUNCED THE GRATIFICATION THE SAVAGE WOULD RECEIVE (IN->AND) WITNESSING SUCH AN EXHIBITION OF WEAKNESS (IN->AND) AN ENEMY SO LONG HATED AND SO MUCH FEARED +1320-122617-0014-92: THEY DREW BACK A LITTLE FROM THE ENTRANCE AND MOTIONED TO THE SUPPOSED (CONJURER->CONJUROR) TO ENTER +1320-122617-0015-93: BUT THE BEAR INSTEAD OF OBEYING MAINTAINED THE SEAT IT HAD TAKEN AND GROWLED +1320-122617-0016-94: THE CUNNING MAN IS AFRAID THAT HIS BREATH WILL BLOW UPON HIS BROTHERS AND TAKE AWAY THEIR COURAGE TOO CONTINUED DAVID IMPROVING THE HINT HE RECEIVED THEY MUST STAND FURTHER OFF +1320-122617-0017-95: THEN AS IF SATISFIED OF THEIR SAFETY THE SCOUT LEFT HIS POSITION AND SLOWLY ENTERED THE PLACE +1320-122617-0018-96: IT WAS SILENT AND GLOOMY BEING TENANTED SOLELY BY THE CAPTIVE AND LIGHTED BY THE DYING EMBERS OF A FIRE WHICH HAD BEEN USED FOR THE (PURPOSED->PURPOSE) OF COOKERY +1320-122617-0019-97: UNCAS OCCUPIED A DISTANT CORNER IN A RECLINING ATTITUDE BEING RIGIDLY BOUND BOTH HANDS AND FEET BY STRONG AND PAINFUL (WITHES->WIDTHS) +1320-122617-0020-98: THE SCOUT WHO HAD LEFT DAVID AT THE DOOR TO ASCERTAIN THEY WERE NOT OBSERVED THOUGHT IT PRUDENT TO PRESERVE HIS DISGUISE UNTIL ASSURED OF THEIR PRIVACY +1320-122617-0021-99: WHAT SHALL WE DO WITH THE MINGOES AT THE DOOR THEY COUNT SIX AND (THIS->THE) SINGER IS AS GOOD AS NOTHING +1320-122617-0022-100: THE DELAWARES ARE CHILDREN OF THE TORTOISE AND (THEY OUTSTRIP->THE OUTSTRIPPED) THE DEER +1320-122617-0023-101: UNCAS WHO HAD ALREADY APPROACHED THE DOOR IN READINESS TO LEAD THE WAY NOW RECOILED AND PLACED HIMSELF ONCE MORE IN THE BOTTOM OF THE LODGE +1320-122617-0024-102: BUT HAWKEYE WHO WAS TOO MUCH OCCUPIED WITH HIS OWN THOUGHTS TO NOTE THE MOVEMENT CONTINUED SPEAKING MORE TO HIMSELF THAN TO HIS COMPANION +1320-122617-0025-103: SO UNCAS YOU HAD BETTER TAKE THE LEAD WHILE I WILL PUT ON THE SKIN AGAIN AND TRUST TO CUNNING FOR WANT OF SPEED +1320-122617-0026-104: WELL WHAT CAN'T BE DONE BY MAIN COURAGE (IN->AND) WAR MUST BE DONE BY CIRCUMVENTION +1320-122617-0027-105: AS SOON AS THESE DISPOSITIONS WERE MADE THE SCOUT TURNED TO DAVID AND GAVE HIM HIS PARTING INSTRUCTIONS +1320-122617-0028-106: MY PURSUITS ARE PEACEFUL AND MY TEMPER I HUMBLY TRUST IS GREATLY GIVEN TO MERCY AND LOVE RETURNED DAVID A LITTLE NETTLED AT SO DIRECT AN ATTACK ON HIS MANHOOD BUT THERE ARE NONE WHO CAN SAY THAT I HAVE EVER FORGOTTEN MY FAITH IN THE LORD EVEN IN THE GREATEST STRAITS +1320-122617-0029-107: IF YOU ARE NOT THEN KNOCKED ON THE HEAD YOUR BEING A (NON COMPOSSER->NONCOMPOSTER) WILL PROTECT YOU AND YOU'LL THEN HAVE A GOOD REASON TO EXPECT TO DIE IN YOUR BED +1320-122617-0030-108: (SO CHOOSE->SUSE) FOR YOURSELF TO MAKE A RUSH OR TARRY HERE +1320-122617-0031-109: BRAVELY AND GENEROUSLY HAS HE BATTLED IN MY BEHALF AND THIS AND MORE WILL I DARE IN HIS SERVICE +1320-122617-0032-110: KEEP SILENT AS LONG AS MAY BE AND IT WOULD BE WISE WHEN YOU DO SPEAK TO BREAK OUT SUDDENLY IN ONE OF YOUR SHOUTINGS WHICH WILL SERVE TO REMIND THE INDIANS THAT YOU ARE NOT ALTOGETHER AS RESPONSIBLE AS MEN SHOULD BE +1320-122617-0033-111: IF HOWEVER THEY TAKE YOUR SCALP AS I TRUST AND BELIEVE THEY WILL NOT DEPEND ON IT UNCAS AND I WILL NOT FORGET THE DEED BUT REVENGE IT AS BECOMES TRUE WARRIORS AND TRUSTY FRIENDS +1320-122617-0034-112: HOLD SAID DAVID PERCEIVING THAT WITH THIS ASSURANCE THEY WERE ABOUT TO LEAVE HIM I AM AN UNWORTHY AND HUMBLE FOLLOWER OF ONE WHO TAUGHT NOT THE DAMNABLE PRINCIPLE OF REVENGE +1320-122617-0035-113: THEN HEAVING A HEAVY SIGH PROBABLY AMONG THE LAST HE EVER DREW IN PINING FOR A CONDITION HE HAD SO LONG ABANDONED HE ADDED IT IS WHAT I WOULD WISH TO PRACTISE MYSELF AS ONE WITHOUT A CROSS OF BLOOD THOUGH IT IS NOT ALWAYS EASY TO DEAL WITH AN INDIAN AS YOU WOULD WITH A FELLOW CHRISTIAN +1320-122617-0036-114: GOD BLESS YOU FRIEND I DO BELIEVE YOUR (SCENT IS->SIN HAS) NOT GREATLY WRONG WHEN THE MATTER IS DULY CONSIDERED AND KEEPING ETERNITY BEFORE THE EYES THOUGH MUCH DEPENDS ON THE NATURAL GIFTS (AND->IN) THE FORCE OF TEMPTATION +1320-122617-0037-115: THE DELAWARE DOG HE SAID LEANING FORWARD AND PEERING THROUGH THE DIM LIGHT TO CATCH THE EXPRESSION OF THE OTHER'S FEATURES IS HE AFRAID +1320-122617-0038-116: WILL THE HURONS HEAR HIS GROANS +1320-122617-0039-117: THE (MOHICAN->MOHICANS) STARTED ON HIS FEET AND SHOOK HIS SHAGGY COVERING AS THOUGH THE ANIMAL HE COUNTERFEITED WAS ABOUT TO MAKE SOME DESPERATE EFFORT +1320-122617-0040-118: HE HAD NO OCCASION TO DELAY FOR AT THE NEXT INSTANT A BURST OF CRIES FILLED THE OUTER AIR AND RAN ALONG THE WHOLE EXTENT OF THE VILLAGE +1320-122617-0041-119: UNCAS CAST HIS SKIN AND STEPPED FORTH IN HIS OWN BEAUTIFUL PROPORTIONS +1580-141083-0000-1949: I WILL ENDEAVOUR IN MY STATEMENT TO AVOID SUCH TERMS AS WOULD SERVE TO LIMIT THE EVENTS TO ANY PARTICULAR PLACE OR GIVE A CLUE AS TO THE PEOPLE CONCERNED +1580-141083-0001-1950: I HAD ALWAYS KNOWN HIM TO BE RESTLESS IN HIS MANNER BUT ON THIS PARTICULAR OCCASION HE WAS IN SUCH A STATE OF UNCONTROLLABLE AGITATION THAT IT WAS CLEAR SOMETHING VERY UNUSUAL HAD OCCURRED +1580-141083-0002-1951: MY FRIEND'S TEMPER HAD NOT IMPROVED SINCE HE HAD BEEN DEPRIVED OF THE CONGENIAL SURROUNDINGS OF BAKER STREET +1580-141083-0003-1952: WITHOUT HIS (SCRAPBOOKS->SCRAP BOOKS) HIS CHEMICALS AND HIS HOMELY UNTIDINESS HE WAS AN UNCOMFORTABLE MAN +1580-141083-0004-1953: I HAD TO READ IT OVER CAREFULLY AS THE TEXT MUST BE ABSOLUTELY CORRECT +1580-141083-0005-1954: I WAS ABSENT RATHER MORE THAN AN HOUR +1580-141083-0006-1955: THE ONLY DUPLICATE WHICH EXISTED SO FAR AS I KNEW WAS THAT WHICH BELONGED TO MY SERVANT (BANNISTER->BANISTER) A MAN WHO HAS LOOKED AFTER MY ROOM FOR TEN YEARS AND WHOSE HONESTY IS ABSOLUTELY ABOVE SUSPICION +1580-141083-0007-1956: THE MOMENT I LOOKED AT MY TABLE I WAS AWARE THAT (SOMEONE->SOME ONE) HAD RUMMAGED AMONG MY PAPERS +1580-141083-0008-1957: THE PROOF WAS IN THREE LONG SLIPS I HAD LEFT THEM ALL TOGETHER +1580-141083-0009-1958: (THE ALTERNATIVE->THEY ALL TURNED OF) WAS THAT (SOMEONE->SOME ONE) PASSING HAD OBSERVED THE KEY IN THE DOOR HAD KNOWN THAT I WAS OUT AND HAD ENTERED TO LOOK AT THE PAPERS +1580-141083-0010-1959: I GAVE HIM A LITTLE BRANDY AND LEFT HIM COLLAPSED IN A CHAIR WHILE I MADE A MOST CAREFUL EXAMINATION OF THE ROOM +1580-141083-0011-1960: A BROKEN TIP OF LEAD WAS LYING THERE ALSO +1580-141083-0012-1961: NOT ONLY THIS BUT ON THE TABLE I FOUND A SMALL BALL OF BLACK DOUGH OR CLAY WITH SPECKS OF SOMETHING WHICH LOOKS LIKE SAWDUST IN IT +1580-141083-0013-1962: ABOVE ALL THINGS I DESIRE TO SETTLE THE MATTER QUIETLY AND DISCREETLY +1580-141083-0014-1963: TO THE BEST OF MY BELIEF THEY WERE ROLLED UP +1580-141083-0015-1964: DID (ANYONE->ANY ONE) KNOW THAT THESE PROOFS WOULD BE THERE NO ONE SAVE THE PRINTER +1580-141083-0016-1965: I WAS IN SUCH A HURRY TO COME TO YOU YOU LEFT YOUR DOOR OPEN +1580-141083-0017-1966: SO IT SEEMS TO ME +1580-141083-0018-1967: NOW MISTER (SOAMES->SOLMES) AT YOUR DISPOSAL +1580-141083-0019-1968: ABOVE WERE THREE STUDENTS ONE ON EACH STORY +1580-141083-0020-1969: THEN HE APPROACHED IT AND STANDING ON TIPTOE WITH HIS NECK CRANED HE LOOKED INTO THE ROOM +1580-141083-0021-1970: THERE IS NO OPENING EXCEPT THE ONE (PANE->PAIN) SAID OUR LEARNED GUIDE +1580-141083-0022-1971: I AM AFRAID THERE ARE NO SIGNS HERE SAID HE +1580-141083-0023-1972: ONE COULD HARDLY HOPE FOR ANY UPON SO DRY A DAY +1580-141083-0024-1973: YOU LEFT HIM IN A CHAIR YOU SAY WHICH CHAIR BY THE WINDOW THERE +1580-141083-0025-1974: THE (MAN->MEN) ENTERED AND TOOK THE PAPERS SHEET BY SHEET FROM THE CENTRAL TABLE +1580-141083-0026-1975: AS A MATTER OF FACT HE COULD NOT SAID (SOAMES->SOLMES) FOR I ENTERED BY THE SIDE DOOR +1580-141083-0027-1976: HOW LONG WOULD IT TAKE HIM TO DO THAT USING EVERY POSSIBLE CONTRACTION A QUARTER OF AN HOUR NOT LESS +1580-141083-0028-1977: THEN HE TOSSED IT DOWN AND SEIZED THE NEXT +1580-141083-0029-1978: HE WAS IN THE MIDST OF THAT WHEN YOUR RETURN CAUSED HIM TO MAKE A VERY HURRIED RETREAT VERY HURRIED SINCE HE HAD NOT TIME TO REPLACE THE PAPERS WHICH WOULD TELL YOU THAT HE HAD BEEN THERE +1580-141083-0030-1979: MISTER (SOAMES->PSALMS) WAS SOMEWHAT OVERWHELMED BY THIS FLOOD OF INFORMATION +1580-141083-0031-1980: HOLMES HELD OUT A SMALL CHIP WITH THE LETTERS N N AND A SPACE OF CLEAR WOOD AFTER THEM YOU SEE +1580-141083-0032-1981: WATSON I HAVE ALWAYS DONE YOU (AN->AND) INJUSTICE THERE ARE OTHERS +1580-141083-0033-1982: I WAS HOPING THAT IF THE PAPER ON WHICH HE WROTE WAS THIN SOME TRACE OF IT MIGHT COME THROUGH UPON THIS POLISHED SURFACE NO I SEE NOTHING +1580-141083-0034-1983: AS HOLMES DREW THE CURTAIN I WAS AWARE FROM SOME LITTLE RIGIDITY AND (ALERTNESS->INERTNESS) OF HIS ATTITUDE THAT HE WAS PREPARED FOR AN EMERGENCY +1580-141083-0035-1984: HOLMES TURNED AWAY AND STOOPED SUDDENLY TO THE FLOOR (HALLOA WHAT'S->HULLO WHAT IS) THIS +1580-141083-0036-1985: HOLMES HELD IT OUT ON HIS OPEN PALM IN THE GLARE OF THE ELECTRIC LIGHT +1580-141083-0037-1986: WHAT COULD HE DO HE CAUGHT UP EVERYTHING WHICH WOULD BETRAY HIM AND HE RUSHED INTO YOUR BEDROOM TO CONCEAL HIMSELF +1580-141083-0038-1987: I UNDERSTAND YOU TO SAY THAT THERE ARE THREE STUDENTS WHO USE THIS (STAIR->STARE) AND ARE IN THE HABIT OF PASSING YOUR DOOR YES THERE ARE +1580-141083-0039-1988: AND THEY ARE ALL IN FOR (THIS->THE) EXAMINATION YES +1580-141083-0040-1989: ONE HARDLY LIKES TO THROW SUSPICION WHERE THERE ARE NO PROOFS +1580-141083-0041-1990: LET US (HEAR->SEE) THE SUSPICIONS I WILL LOOK AFTER THE PROOFS +1580-141083-0042-1991: MY SCHOLAR HAS BEEN LEFT VERY POOR BUT HE IS HARD WORKING AND INDUSTRIOUS HE WILL DO WELL +1580-141083-0043-1992: THE TOP FLOOR BELONGS TO (MILES->MYLES) MC LAREN +1580-141083-0044-1993: I DARE NOT GO SO FAR AS THAT BUT OF THE THREE HE IS PERHAPS THE LEAST UNLIKELY +1580-141083-0045-1994: HE WAS STILL SUFFERING FROM THIS SUDDEN DISTURBANCE OF THE QUIET ROUTINE OF HIS LIFE +1580-141083-0046-1995: BUT I HAVE OCCASIONALLY DONE THE SAME THING AT OTHER TIMES +1580-141083-0047-1996: DID YOU LOOK AT THESE PAPERS ON THE TABLE +1580-141083-0048-1997: HOW CAME YOU TO LEAVE THE KEY IN THE DOOR +1580-141083-0049-1998: (ANYONE->ANY ONE) IN THE ROOM COULD GET OUT YES SIR +1580-141083-0050-1999: I (*->HAVE) REALLY DON'T THINK HE KNEW MUCH ABOUT IT MISTER HOLMES +1580-141083-0051-2000: ONLY FOR A MINUTE OR SO +1580-141083-0052-2001: OH I WOULD NOT VENTURE TO SAY SIR +1580-141083-0053-2002: YOU HAVEN'T SEEN ANY OF THEM NO SIR +1580-141084-0000-2003: IT WAS THE INDIAN WHOSE DARK SILHOUETTE APPEARED SUDDENLY UPON HIS BLIND +1580-141084-0001-2004: HE WAS PACING SWIFTLY UP AND DOWN HIS ROOM +1580-141084-0002-2005: (THIS->THE) SET OF ROOMS IS QUITE THE OLDEST IN THE COLLEGE AND IT IS NOT UNUSUAL FOR VISITORS TO GO OVER THEM +1580-141084-0003-2006: NO NAMES PLEASE SAID HOLMES AS WE KNOCKED AT (GILCHRIST'S->GILCRE'S) DOOR +1580-141084-0004-2007: OF COURSE HE DID NOT REALIZE THAT IT WAS I WHO WAS KNOCKING BUT NONE THE LESS HIS CONDUCT WAS VERY UNCOURTEOUS AND INDEED UNDER THE CIRCUMSTANCES RATHER SUSPICIOUS +1580-141084-0005-2008: THAT IS VERY IMPORTANT SAID HOLMES +1580-141084-0006-2009: YOU DON'T SEEM TO REALIZE THE POSITION +1580-141084-0007-2010: TO MORROW IS THE EXAMINATION +1580-141084-0008-2011: I CANNOT ALLOW THE EXAMINATION TO BE HELD IF ONE OF THE PAPERS HAS BEEN TAMPERED WITH THE SITUATION MUST BE FACED +1580-141084-0009-2012: IT IS POSSIBLE THAT I MAY BE IN A POSITION THEN TO INDICATE SOME COURSE OF ACTION +1580-141084-0010-2013: I WILL TAKE THE BLACK CLAY WITH ME ALSO THE PENCIL CUTTINGS GOOD (BYE->BY) +1580-141084-0011-2014: WHEN WE WERE OUT IN THE DARKNESS OF THE QUADRANGLE WE AGAIN LOOKED UP AT THE WINDOWS +1580-141084-0012-2015: THE FOUL MOUTHED FELLOW AT THE TOP +1580-141084-0013-2016: HE IS THE ONE WITH THE WORST RECORD +1580-141084-0014-2017: WHY (BANNISTER->BANISTER) THE SERVANT WHAT'S HIS GAME IN THE MATTER +1580-141084-0015-2018: HE IMPRESSED ME AS BEING A PERFECTLY HONEST MAN +1580-141084-0016-2019: MY FRIEND DID NOT APPEAR TO BE DEPRESSED BY HIS FAILURE BUT SHRUGGED HIS SHOULDERS IN HALF HUMOROUS RESIGNATION +1580-141084-0017-2020: NO GOOD MY DEAR WATSON +1580-141084-0018-2021: I THINK SO YOU HAVE FORMED A CONCLUSION +1580-141084-0019-2022: YES MY DEAR WATSON I HAVE SOLVED THE MYSTERY +1580-141084-0020-2023: LOOK AT THAT HE HELD OUT HIS HAND +1580-141084-0021-2024: ON THE PALM WERE THREE LITTLE PYRAMIDS OF BLACK DOUGHY CLAY +1580-141084-0022-2025: AND ONE MORE THIS MORNING +1580-141084-0023-2026: IN A FEW HOURS THE EXAMINATION WOULD COMMENCE AND HE WAS STILL IN THE DILEMMA BETWEEN MAKING THE FACTS PUBLIC AND ALLOWING THE CULPRIT TO COMPETE FOR THE VALUABLE SCHOLARSHIP +1580-141084-0024-2027: HE COULD HARDLY STAND STILL SO GREAT WAS HIS MENTAL AGITATION AND HE RAN TOWARDS (HOLMES->HOMES) WITH TWO EAGER HANDS OUTSTRETCHED THANK HEAVEN THAT YOU HAVE COME +1580-141084-0025-2028: YOU KNOW HIM I THINK SO +1580-141084-0026-2029: IF THIS MATTER IS NOT TO BECOME PUBLIC WE MUST GIVE OURSELVES CERTAIN POWERS AND RESOLVE OURSELVES INTO A SMALL PRIVATE COURT MARTIAL +1580-141084-0027-2030: NO SIR CERTAINLY NOT +1580-141084-0028-2031: THERE WAS NO MAN SIR +1580-141084-0029-2032: HIS TROUBLED BLUE EYES GLANCED AT EACH OF US AND FINALLY RESTED WITH AN EXPRESSION OF BLANK DISMAY UPON (BANNISTER->BANISTER) IN THE FARTHER CORNER +1580-141084-0030-2033: JUST CLOSE THE DOOR SAID HOLMES +1580-141084-0031-2034: WE WANT TO KNOW MISTER (GILCHRIST->GOST) HOW YOU AN HONOURABLE MAN EVER CAME TO COMMIT SUCH AN ACTION AS THAT OF YESTERDAY +1580-141084-0032-2035: FOR A MOMENT (GILCHRIST->GILCRIS) WITH UPRAISED HAND TRIED TO CONTROL HIS WRITHING FEATURES +1580-141084-0033-2036: COME COME SAID HOLMES KINDLY IT IS HUMAN TO ERR AND AT LEAST NO ONE CAN ACCUSE YOU OF BEING A CALLOUS CRIMINAL +1580-141084-0034-2037: WELL WELL DON'T TROUBLE TO ANSWER LISTEN AND SEE THAT I DO YOU (NO->KNOW) INJUSTICE +1580-141084-0035-2038: HE COULD EXAMINE THE PAPERS IN HIS OWN OFFICE +1580-141084-0036-2039: THE INDIAN I ALSO THOUGHT NOTHING OF +1580-141084-0037-2040: WHEN I APPROACHED YOUR ROOM I EXAMINED THE WINDOW +1580-141084-0038-2041: NO ONE LESS THAN THAT WOULD HAVE A CHANCE +1580-141084-0039-2042: I ENTERED AND I TOOK YOU INTO MY CONFIDENCE AS TO THE SUGGESTIONS OF THE SIDE TABLE +1580-141084-0040-2043: HE RETURNED CARRYING HIS JUMPING SHOES WHICH ARE PROVIDED AS YOU ARE (AWARE->WEAR) WITH SEVERAL SHARP SPIKES +1580-141084-0041-2044: NO HARM WOULD HAVE BEEN DONE HAD IT NOT BEEN THAT AS HE PASSED YOUR DOOR HE PERCEIVED THE KEY WHICH HAD BEEN LEFT BY THE CARELESSNESS OF YOUR SERVANT +1580-141084-0042-2045: A SUDDEN IMPULSE CAME OVER HIM TO ENTER AND SEE IF THEY WERE INDEED THE PROOFS +1580-141084-0043-2046: HE PUT HIS SHOES ON THE TABLE +1580-141084-0044-2047: GLOVES SAID THE YOUNG MAN +1580-141084-0045-2048: SUDDENLY HE HEARD HIM AT THE VERY DOOR THERE WAS NO POSSIBLE ESCAPE +1580-141084-0046-2049: HAVE I TOLD THE TRUTH MISTER (GILCHRIST->GORIST) +1580-141084-0047-2050: I HAVE A LETTER HERE MISTER (SOAMES->SOLMES) WHICH I WROTE TO YOU EARLY THIS MORNING IN THE MIDDLE OF A RESTLESS NIGHT +1580-141084-0048-2051: IT (WILL->WOULD) BE CLEAR TO YOU FROM WHAT I HAVE SAID THAT ONLY YOU COULD HAVE LET THIS YOUNG MAN OUT SINCE YOU WERE LEFT IN THE ROOM AND MUST HAVE LOCKED THE DOOR WHEN YOU WENT OUT +1580-141084-0049-2052: IT WAS SIMPLE ENOUGH SIR IF YOU ONLY HAD KNOWN BUT WITH ALL YOUR CLEVERNESS IT WAS IMPOSSIBLE THAT YOU COULD KNOW +1580-141084-0050-2053: IF MISTER (SOAMES->SOLMES) SAW THEM THE GAME WAS UP +1995-1826-0000-750: IN THE DEBATE BETWEEN THE SENIOR SOCIETIES HER DEFENCE OF THE FIFTEENTH AMENDMENT HAD BEEN NOT ONLY A NOTABLE BIT OF REASONING BUT DELIVERED WITH REAL ENTHUSIASM +1995-1826-0001-751: THE SOUTH SHE HAD NOT THOUGHT OF SERIOUSLY AND YET KNOWING OF ITS DELIGHTFUL HOSPITALITY AND MILD CLIMATE SHE WAS NOT AVERSE TO CHARLESTON OR NEW ORLEANS +1995-1826-0002-752: JOHN TAYLOR WHO HAD SUPPORTED HER THROUGH COLLEGE WAS INTERESTED IN COTTON +1995-1826-0003-753: BETTER GO HE HAD (COUNSELLED->COUNSELS) SENTENTIOUSLY +1995-1826-0004-754: MIGHT LEARN SOMETHING USEFUL DOWN THERE +1995-1826-0005-755: BUT JOHN THERE'S NO SOCIETY JUST ELEMENTARY WORK +1995-1826-0006-756: BEEN LOOKING UP (TOOMS->TOMB'S) COUNTY +1995-1826-0007-757: (FIND SOME CRESSWELLS->FIVE CROSS WHIRLS) THERE BIG PLANTATIONS RATED AT TWO HUNDRED AND FIFTY THOUSAND DOLLARS +1995-1826-0008-758: SOME OTHERS TOO BIG COTTON COUNTY +1995-1826-0009-759: YOU OUGHT TO KNOW JOHN IF I TEACH NEGROES I'LL SCARCELY SEE MUCH OF PEOPLE IN MY OWN CLASS +1995-1826-0010-760: AT ANY RATE I SAY GO +1995-1826-0011-761: HERE SHE WAS TEACHING DIRTY CHILDREN AND THE SMELL OF CONFUSED ODORS AND BODILY PERSPIRATION WAS TO HER AT TIMES UNBEARABLE +1995-1826-0012-762: SHE WANTED A GLANCE OF THE NEW BOOKS (AND->IN) PERIODICALS AND TALK OF (GREAT->GRATE) PHILANTHROPIES AND REFORMS +1995-1826-0013-763: SO FOR THE HUNDREDTH TIME SHE WAS THINKING (TODAY->TO DAY) AS SHE WALKED ALONE UP THE LANE BACK OF THE BARN AND THEN SLOWLY DOWN THROUGH THE BOTTOMS +1995-1826-0014-764: COTTON SHE PAUSED +1995-1826-0015-765: SHE HAD ALMOST FORGOTTEN THAT IT WAS HERE WITHIN TOUCH (AND->IN) SIGHT +1995-1826-0016-766: THE GLIMMERING SEA OF DELICATE LEAVES WHISPERED AND MURMURED BEFORE HER STRETCHING AWAY TO THE NORTHWARD +1995-1826-0017-767: THERE MIGHT BE A BIT OF POETRY HERE AND THERE BUT MOST OF THIS PLACE WAS SUCH DESPERATE PROSE +1995-1826-0018-768: HER REGARD SHIFTED TO THE GREEN STALKS AND LEAVES AGAIN AND SHE STARTED TO MOVE AWAY +1995-1826-0019-769: COTTON IS A WONDERFUL THING IS IT NOT BOYS SHE SAID RATHER PRIMLY +1995-1826-0020-770: MISS TAYLOR DID NOT KNOW MUCH ABOUT COTTON BUT AT LEAST ONE MORE (REMARK->REMARKED) SEEMED CALLED FOR +1995-1826-0021-771: DON'T KNOW (WELL->OO) OF ALL THINGS INWARDLY COMMENTED MISS TAYLOR (LITERALLY->THAT A) BORN IN COTTON AND OH WELL AS MUCH AS TO ASK WHAT'S THE USE SHE TURNED AGAIN TO GO +1995-1826-0022-772: I SUPPOSE THOUGH IT'S TOO EARLY FOR THEM THEN CAME THE EXPLOSION +1995-1826-0023-773: (GOOBERS->GOULD WAS) DON'T GROW ON THE TOPS OF (VINES->ICE) BUT (UNDERGROUND->UNDER GROUND) ON THE (ROOTS->WOODS) LIKE (YAMS IS->A M) THAT SO +1995-1826-0024-774: THE GOLDEN FLEECE IT'S THE SILVER FLEECE HE (HARKENED->HEARKENED) +1995-1826-0025-775: (SOME TIME YOU'LL TELL ME->SOMETIME YOU DAMNLY) PLEASE WON'T YOU +1995-1826-0026-776: (NOW->THOU) FOR ONE LITTLE HALF HOUR SHE HAD BEEN A WOMAN TALKING TO A BOY NO NOT EVEN THAT SHE HAD BEEN TALKING JUST TALKING THERE WERE NO PERSONS IN THE CONVERSATION JUST THINGS ONE THING COTTON +1995-1836-0000-735: THE HON (CHARLES->*) SMITH MISS SARAH'S BROTHER WAS WALKING SWIFTLY UPTOWN FROM MISTER EASTERLY'S WALL STREET OFFICE AND HIS FACE WAS PALE +1995-1836-0001-736: AT LAST THE COTTON COMBINE WAS TO ALL APPEARANCES (AN->AND) ASSURED FACT AND HE WAS SLATED FOR THE SENATE +1995-1836-0002-737: WHY SHOULD HE NOT BE AS OTHER MEN +1995-1836-0003-738: SHE WAS NOT HERSELF (A NOTABLY->UNNOTABLY) INTELLIGENT WOMAN SHE GREATLY ADMIRED INTELLIGENCE OR WHATEVER LOOKED TO HER LIKE INTELLIGENCE IN OTHERS +1995-1836-0004-739: AS SHE AWAITED HER (GUESTS->GUESS) SHE SURVEYED THE TABLE WITH BOTH SATISFACTION AND (DISQUIETUDE->AS QUIETUDE) FOR HER SOCIAL FUNCTIONS WERE FEW (TONIGHT->TO NIGHT) THERE WERE SHE CHECKED THEM OFF ON HER FINGERS SIR JAMES (CREIGHTON->CRIGHTON) THE RICH ENGLISH MANUFACTURER AND LADY (CREIGHTON->KRETON) MISTER AND MISSUS (VANDERPOOL->VAN DERBOOLE) MISTER HARRY (CRESSWELL->CRESWELL) AND HIS SISTER JOHN TAYLOR AND HIS SISTER AND MISTER CHARLES SMITH WHOM THE EVENING PAPERS MENTIONED AS LIKELY TO BE (UNITED->UTIT) STATES SENATOR FROM NEW JERSEY A SELECTION OF GUESTS THAT HAD BEEN DETERMINED UNKNOWN TO THE HOSTESS BY THE MEETING OF COTTON INTERESTS EARLIER IN THE DAY +1995-1836-0005-740: MISSUS (GREY->GRAY) HAD MET SOUTHERNERS BEFORE BUT NOT INTIMATELY AND SHE ALWAYS HAD IN MIND VIVIDLY THEIR CRUELTY TO POOR NEGROES A SUBJECT SHE MADE A POINT OF INTRODUCING FORTHWITH +1995-1836-0006-741: SHE WAS THEREFORE MOST AGREEABLY SURPRISED TO HEAR MISTER (CRESSWELL->CRESWELL) EXPRESS HIMSELF SO CORDIALLY AS APPROVING OF NEGRO EDUCATION +1995-1836-0007-742: (BUT YOU->DO) BELIEVE IN SOME EDUCATION ASKED MARY TAYLOR +1995-1836-0008-743: I BELIEVE IN THE TRAINING OF PEOPLE TO (THEIR HIGHEST->THE HOUSE) CAPACITY THE ENGLISHMAN HERE HEARTILY SECONDED HIM +1995-1836-0009-744: BUT (CRESSWELL->CRESWELL) ADDED SIGNIFICANTLY CAPACITY DIFFERS ENORMOUSLY BETWEEN RACES +1995-1836-0010-745: THE (VANDERPOOLS->VANDER POOLS) WERE SURE (OF->*) THIS AND THE ENGLISHMAN INSTANCING INDIA BECAME QUITE ELOQUENT MISSUS (GREY->GRAY) WAS MYSTIFIED BUT HARDLY DARED ADMIT IT THE GENERAL TREND OF THE CONVERSATION SEEMED TO BE THAT MOST INDIVIDUALS NEEDED TO BE SUBMITTED TO THE SHARPEST SCRUTINY BEFORE BEING ALLOWED MUCH EDUCATION AND AS FOR THE LOWER RACES IT WAS SIMPLY CRIMINAL TO OPEN SUCH USELESS OPPORTUNITIES TO THEM +1995-1836-0011-746: (POSITIVELY->WAS ACTIVELY) HEROIC ADDED (CRESSWELL->CHRISWELL) AVOIDING HIS SISTER'S EYES +1995-1836-0012-747: BUT (WE'RE->WE ARE) NOT (ER->A) EXACTLY (WELCOMED->WELCOME) +1995-1836-0013-748: (MARY->MERRY) TAYLOR HOWEVER RELATED THE TALE OF (ZORA->ZORAH) TO MISSUS (GREY'S->GRAY'S) PRIVATE EAR LATER +1995-1836-0014-749: FORTUNATELY SAID MISTER (VANDERPOOL NORTHERNERS AND->VAN DERPOOL NORTHERNOSING) SOUTHERNERS (ARE ARRIVING->ALL RIVING) AT A BETTER MUTUAL UNDERSTANDING ON MOST OF THESE MATTERS +1995-1837-0000-777: HE KNEW THE SILVER FLEECE HIS AND (ZORA'S->ZORAS) MUST BE RUINED +1995-1837-0001-778: IT WAS THE FIRST GREAT SORROW OF HIS LIFE IT WAS NOT SO MUCH THE LOSS OF THE (COTTON->CONTIN) ITSELF BUT THE FANTASY THE HOPES THE DREAMS BUILT AROUND IT +1995-1837-0002-779: AH THE SWAMP THE CRUEL SWAMP +1995-1837-0003-780: (THE->WHO) REVELATION OF HIS LOVE LIGHTED AND BRIGHTENED SLOWLY TILL IT FLAMED LIKE A SUNRISE OVER HIM AND LEFT HIM IN BURNING WONDER +1995-1837-0004-781: HE PANTED TO KNOW IF SHE TOO KNEW OR (KNEW->NEW) AND CARED NOT OR CARED AND KNEW NOT +1995-1837-0005-782: SHE WAS SO STRANGE (AND->IN) HUMAN A CREATURE +1995-1837-0006-783: THE WORLD WAS WATER VEILED IN MISTS +1995-1837-0007-784: THEN OF A SUDDEN AT MIDDAY THE SUN SHOT OUT HOT AND STILL NO BREATH OF AIR STIRRED THE SKY WAS LIKE BLUE STEEL THE EARTH STEAMED +1995-1837-0008-785: WHERE WAS THE USE OF IMAGINING +1995-1837-0009-786: THE LAGOON HAD BEEN LEVEL WITH THE (DYKES->DIKES) A WEEK AGO AND NOW +1995-1837-0010-787: PERHAPS SHE TOO MIGHT BE THERE WAITING WEEPING +1995-1837-0011-788: HE STARTED AT THE THOUGHT HE HURRIED FORTH SADLY +1995-1837-0012-789: (HE SPLASHED->HIS BLASHED) AND STAMPED ALONG FARTHER AND FARTHER ONWARD UNTIL HE NEARED THE RAMPART OF THE CLEARING AND PUT FOOT UPON THE TREE BRIDGE +1995-1837-0013-790: THEN HE LOOKED DOWN THE LAGOON WAS DRY +1995-1837-0014-791: HE STOOD A MOMENT BEWILDERED THEN TURNED AND RUSHED UPON THE ISLAND A GREAT SHEET OF DAZZLING SUNLIGHT SWEPT THE PLACE AND BENEATH LAY A MIGHTY MASS OF OLIVE GREEN THICK TALL WET AND WILLOWY +1995-1837-0015-792: THE SQUARES OF COTTON SHARP EDGED HEAVY WERE JUST ABOUT TO BURST TO (BOLLS->BOWLS) +1995-1837-0016-793: FOR ONE LONG MOMENT HE PAUSED STUPID AGAPE WITH UTTER AMAZEMENT THEN LEANED DIZZILY AGAINST A TREE +1995-1837-0017-794: HE GAZED ABOUT PERPLEXED ASTONISHED +1995-1837-0018-795: HERE LAY THE READING OF THE RIDDLE WITH INFINITE WORK AND PAIN SOME ONE HAD DUG A CANAL FROM THE LAGOON TO THE CREEK INTO WHICH THE FORMER HAD DRAINED BY A LONG AND CROOKED WAY THUS ALLOWING IT TO EMPTY DIRECTLY +1995-1837-0019-796: HE SAT DOWN WEAK BEWILDERED AND ONE THOUGHT WAS UPPERMOST (ZORA->SORA) +1995-1837-0020-797: THE YEARS OF THE DAYS OF HER DYING WERE TEN +1995-1837-0021-798: THE HOPE AND DREAM OF HARVEST WAS UPON THE LAND +1995-1837-0022-799: UP IN THE SICK ROOM ZORA LAY ON THE LITTLE WHITE BED +1995-1837-0023-800: THE (NET->NED) AND WEB OF ENDLESS THINGS HAD BEEN CRAWLING AND CREEPING AROUND HER SHE HAD STRUGGLED IN DUMB SPEECHLESS TERROR AGAINST SOME MIGHTY GRASPING THAT STROVE FOR HER LIFE WITH GNARLED AND CREEPING FINGERS BUT NOW AT LAST (WEAKLY->WEEKLY) SHE OPENED HER EYES AND QUESTIONED +1995-1837-0024-801: FOR A WHILE SHE LAY IN HER CHAIR IN HAPPY DREAMY PLEASURE (AT->ITS) SUN AND BIRD AND TREE +1995-1837-0025-802: SHE ROSE WITH A FLEETING GLANCE GATHERED THE SHAWL (ROUND->AROUND) HER THEN GLIDING FORWARD WAVERING TREMULOUS SLIPPED ACROSS THE ROAD AND INTO THE SWAMP +1995-1837-0026-803: SHE HAD BEEN BORN WITHIN ITS BORDERS WITHIN (ITS->HIS) BORDERS SHE HAD LIVED AND GROWN AND WITHIN ITS (BORDERS->BORDER) SHE HAD MET HER LOVE +1995-1837-0027-804: ON SHE HURRIED UNTIL SWEEPING DOWN TO THE LAGOON AND THE ISLAND LO THE COTTON LAY BEFORE HER +1995-1837-0028-805: THE CHAIR WAS EMPTY BUT HE KNEW +1995-1837-0029-806: HE DARTED THROUGH THE TREES AND PAUSED A TALL MAN STRONGLY BUT SLIMLY MADE +2094-142345-0000-308: IT IS A VERY FINE OLD PLACE OF RED BRICK SOFTENED BY A PALE POWDERY LICHEN WHICH HAS DISPERSED ITSELF WITH HAPPY IRREGULARITY SO AS TO BRING THE RED BRICK INTO TERMS OF FRIENDLY COMPANIONSHIP WITH (THE->A) LIMESTONE ORNAMENTS SURROUNDING THE THREE GABLES THE WINDOWS AND THE DOOR PLACE +2094-142345-0001-309: BUT THE WINDOWS ARE PATCHED WITH WOODEN PANES AND THE DOOR I THINK IS LIKE THE GATE IT IS NEVER OPENED +2094-142345-0002-310: FOR IT IS A SOLID HEAVY HANDSOME DOOR AND MUST ONCE HAVE BEEN IN THE HABIT OF (SHUTTING->SHEDDING) WITH A SONOROUS BANG BEHIND (A LIVERIED->THE LIVERYED) LACKEY WHO HAD JUST SEEN HIS MASTER AND MISTRESS OFF THE GROUNDS IN A CARRIAGE AND PAIR +2094-142345-0003-311: A LARGE OPEN FIREPLACE WITH RUSTY DOGS IN IT AND A BARE BOARDED FLOOR AT THE FAR END FLEECES OF WOOL STACKED UP IN THE MIDDLE OF THE FLOOR SOME EMPTY CORN BAGS +2094-142345-0004-312: AND WHAT THROUGH THE LEFT HAND WINDOW +2094-142345-0005-313: SEVERAL CLOTHES HORSES (A PILLION->APILLION) A SPINNING WHEEL AND AN OLD BOX WIDE OPEN AND STUFFED FULL OF COLOURED RAGS +2094-142345-0006-314: AT THE EDGE OF THIS BOX THERE LIES A GREAT WOODEN DOLL WHICH SO FAR AS MUTILATION IS CONCERNED BEARS A STRONG RESEMBLANCE TO THE FINEST GREEK SCULPTURE AND ESPECIALLY IN THE TOTAL LOSS OF ITS NOSE +2094-142345-0007-315: THE HISTORY OF THE HOUSE IS PLAIN NOW +2094-142345-0008-316: BUT THERE IS ALWAYS (A->AS) STRONGER SENSE OF LIFE WHEN THE SUN IS BRILLIANT AFTER RAIN AND NOW HE IS POURING DOWN HIS BEAMS AND MAKING SPARKLES AMONG THE WET STRAW AND LIGHTING UP EVERY PATCH OF VIVID GREEN MOSS ON THE RED TILES OF THE (COW SHED->COWSHED) AND TURNING EVEN THE MUDDY WATER THAT IS HURRYING ALONG THE CHANNEL TO THE DRAIN INTO A MIRROR FOR THE YELLOW (BILLED->BUILD) DUCKS WHO ARE SEIZING THE OPPORTUNITY OF GETTING A DRINK WITH AS MUCH BODY IN IT AS POSSIBLE +2094-142345-0009-317: FOR THE GREAT BARN DOORS ARE THROWN WIDE OPEN AND MEN ARE BUSY THERE MENDING THE HARNESS UNDER THE SUPERINTENDENCE OF MISTER GOBY THE (WHITTAW->WIDOW) OTHERWISE SADDLER WHO ENTERTAINS THEM WITH THE LATEST (TREDDLESTON->TREADLESTON) GOSSIP +2094-142345-0010-318: (HETTY->HETTY'S) SORREL OFTEN TOOK THE OPPORTUNITY WHEN HER AUNT'S BACK WAS TURNED OF LOOKING AT THE PLEASING REFLECTION OF HERSELF IN THOSE POLISHED (SURFACES->SERVICES) FOR THE OAK TABLE WAS USUALLY TURNED UP LIKE A SCREEN AND WAS MORE FOR ORNAMENT THAN FOR USE AND SHE COULD SEE HERSELF SOMETIMES IN THE GREAT ROUND PEWTER DISHES THAT WERE RANGED ON THE SHELVES ABOVE THE LONG DEAL DINNER TABLE OR IN THE HOBS OF THE GRATE WHICH ALWAYS SHONE LIKE JASPER +2094-142345-0011-319: DO NOT SUPPOSE HOWEVER THAT MISSUS POYSER WAS ELDERLY OR SHREWISH IN HER APPEARANCE SHE WAS A GOOD LOOKING WOMAN NOT MORE THAN EIGHT AND THIRTY (OF->A) FAIR COMPLEXION AND SANDY HAIR (WELL SHAPEN LIGHT FOOTED->WHILE SHAKEN LIGHTFOOTED) +2094-142345-0012-320: THE FAMILY LIKENESS BETWEEN HER AND HER NIECE DINAH MORRIS WITH (THE->A) CONTRAST BETWEEN HER KEENNESS AND DINAH'S SERAPHIC GENTLENESS OF EXPRESSION MIGHT HAVE SERVED A PAINTER AS AN EXCELLENT SUGGESTION FOR (A->*) MARTHA AND MARY +2094-142345-0013-321: HER TONGUE WAS NOT LESS KEEN THAN HER EYE AND WHENEVER A DAMSEL CAME WITHIN (EARSHOT->EAR SHOT) SEEMED TO TAKE UP AN UNFINISHED LECTURE AS A BARREL ORGAN TAKES UP A TUNE PRECISELY AT THE POINT WHERE IT HAD LEFT OFF +2094-142345-0014-322: THE FACT THAT IT WAS CHURNING DAY WAS ANOTHER REASON WHY IT WAS INCONVENIENT TO HAVE THE (WHITTAWS->WIDOWS) AND WHY CONSEQUENTLY MISSUS POYSER SHOULD SCOLD MOLLY THE HOUSEMAID WITH UNUSUAL SEVERITY +2094-142345-0015-323: TO ALL APPEARANCE MOLLY HAD GOT THROUGH HER AFTER DINNER WORK IN AN EXEMPLARY MANNER HAD CLEANED HERSELF WITH GREAT DISPATCH AND NOW CAME TO ASK SUBMISSIVELY IF SHE SHOULD SIT DOWN TO HER SPINNING TILL MILKING TIME +2094-142345-0016-324: SPINNING INDEED +2094-142345-0017-325: I NEVER KNEW YOUR EQUALS FOR GALLOWSNESS +2094-142345-0018-326: WHO TAUGHT YOU TO SCRUB A FLOOR I SHOULD LIKE TO KNOW +2094-142345-0019-327: COMB THE WOOL FOR THE (WHITTAWS->WIDOWS) INDEED +2094-142345-0020-328: THAT'S WHAT YOU'D LIKE TO BE DOING IS IT +2094-142345-0021-329: THAT'S THE WAY WITH YOU THAT'S THE ROAD YOU'D ALL LIKE TO GO HEADLONGS TO RUIN +2094-142345-0022-330: MISTER (OTTLEY'S->OUTLEY'S) INDEED +2094-142345-0023-331: (YOU'RE->YOU ARE) A RARE (UN FOR SITTING->IN PROCEEDING) DOWN TO YOUR WORK A LITTLE WHILE AFTER (IT'S->ITS) TIME TO PUT BY +2094-142345-0024-332: (MUNNY->MONEY) MY IRON'S (TWITE TOLD->TIGHT) PEASE PUT IT DOWN TO WARM +2094-142345-0025-333: COLD IS IT MY DARLING BLESS YOUR SWEET FACE +2094-142345-0026-334: SHE'S GOING TO PUT THE IRONING THINGS AWAY +2094-142345-0027-335: (MUNNY->MONEY) I (TOULD IKE->DID LIKE) TO DO INTO (DE->THE) BARN TO TOMMY TO SEE (DE WHITTAWD->THE WID ODD) +2094-142345-0028-336: NO NO (NO->*) TOTTY (UD->HAD) GET HER FEET WET SAID MISSUS POYSER CARRYING AWAY HER IRON +2094-142345-0029-337: DID EVER ANYBODY SEE THE LIKE SCREAMED MISSUS POYSER RUNNING TOWARDS THE TABLE WHEN HER EYE HAD FALLEN ON THE BLUE STREAM +2094-142345-0030-338: TOTTY HOWEVER HAD DESCENDED FROM HER CHAIR WITH GREAT SWIFTNESS AND WAS ALREADY IN RETREAT TOWARDS THE DAIRY WITH A SORT OF WADDLING RUN AND AN AMOUNT OF FAT ON THE NAPE OF HER NECK WHICH MADE HER LOOK LIKE THE METAMORPHOSIS OF A WHITE SUCKLING PIG +2094-142345-0031-339: AND SHE WAS VERY FOND OF YOU TOO AUNT RACHEL +2094-142345-0032-340: I OFTEN HEARD HER TALK OF YOU IN THE SAME SORT OF WAY +2094-142345-0033-341: WHEN SHE HAD THAT (BAD->BAN) ILLNESS AND I WAS ONLY ELEVEN YEARS OLD SHE USED TO SAY YOU'LL HAVE A FRIEND ON EARTH IN YOUR AUNT RACHEL IF I'M TAKEN FROM YOU FOR SHE HAS A KIND HEART AND I'M SURE I'VE FOUND IT SO +2094-142345-0034-342: AND THERE'S LINEN IN THE HOUSE AS I COULD WELL SPARE YOU FOR (I'VE->I) GOT LOTS (O->OF) SHEETING AND TABLE CLOTHING AND (TOWELLING AS->TOWELINGS) ISN'T MADE UP +2094-142345-0035-343: BUT NOT MORE THAN WHAT'S IN THE BIBLE (AUNT->AND) SAID DINAH +2094-142345-0036-344: NAY DEAR AUNT YOU NEVER HEARD ME SAY THAT ALL PEOPLE ARE CALLED TO FORSAKE THEIR WORK AND THEIR FAMILIES +2094-142345-0037-345: WE CAN ALL BE SERVANTS OF GOD WHEREVER OUR LOT IS CAST BUT HE GIVES US DIFFERENT SORTS OF WORK ACCORDING AS HE FITS US FOR IT AND CALLS US TO IT +2094-142345-0038-346: I CAN NO MORE HELP SPENDING MY LIFE IN TRYING TO DO WHAT I CAN FOR THE SOULS OF OTHERS (THAN->THEN) YOU COULD HELP RUNNING IF YOU HEARD LITTLE TOTTY CRYING AT THE OTHER END OF THE HOUSE THE VOICE WOULD GO TO YOUR HEART YOU WOULD THINK THE DEAR CHILD WAS IN TROUBLE OR IN DANGER AND YOU COULDN'T REST WITHOUT RUNNING TO HELP HER AND COMFORT HER +2094-142345-0039-347: I'VE STRONG ASSURANCE THAT NO EVIL WILL HAPPEN TO YOU AND MY UNCLE AND THE CHILDREN FROM ANYTHING (I'VE->I HAVE) DONE +2094-142345-0040-348: I DIDN'T PREACH WITHOUT DIRECTION +2094-142345-0041-349: DIRECTION +2094-142345-0042-350: I (HANNA->HAD A) COMMON PATIENCE WITH YOU +2094-142345-0043-351: BY THIS TIME THE TWO GENTLEMEN HAD REACHED THE PALINGS AND HAD GOT DOWN FROM THEIR HORSES IT WAS PLAIN THEY MEANT TO COME IN +2094-142345-0044-352: SAID MISTER IRWINE WITH HIS STATELY CORDIALITY +2094-142345-0045-353: OH SIR DON'T MENTION IT SAID MISSUS POYSER +2094-142345-0046-354: I DELIGHT IN YOUR KITCHEN +2094-142345-0047-355: POYSER IS NOT AT HOME IS HE +2094-142345-0048-356: SAID CAPTAIN (DONNITHORNE SEATING->DONNYTHORNE SITTING) HIMSELF WHERE HE COULD SEE ALONG THE SHORT PASSAGE TO THE OPEN DAIRY DOOR +2094-142345-0049-357: NO SIR HE ISN'T HE'S GONE TO (ROSSETER->ROSSOTER) TO SEE MISTER WEST THE FACTOR ABOUT THE WOOL +2094-142345-0050-358: BUT THERE'S FATHER (THE->IN) BARN SIR IF HE'D BE OF ANY USE +2094-142345-0051-359: NO THANK YOU I'LL JUST LOOK AT THE (WHELPS->WHELMS) AND LEAVE A MESSAGE ABOUT THEM WITH YOUR SHEPHERD +2094-142345-0052-360: I MUST COME ANOTHER DAY AND SEE YOUR HUSBAND I WANT TO HAVE A CONSULTATION WITH HIM ABOUT HORSES +2094-142345-0053-361: FOR IF HE'S ANYWHERE ON THE FARM WE CAN SEND FOR HIM IN A MINUTE +2094-142345-0054-362: OH SIR SAID MISSUS POYSER RATHER ALARMED YOU WOULDN'T LIKE IT AT ALL +2094-142345-0055-363: BUT YOU KNOW MORE ABOUT THAT THAN I DO SIR +2094-142345-0056-364: I THINK I SHOULD BE DOING YOU A SERVICE TO TURN YOU OUT OF SUCH A PLACE +2094-142345-0057-365: I (KNOW HIS->KNOWS) FARM IS IN BETTER ORDER THAN ANY OTHER WITHIN TEN MILES OF US AND AS FOR THE KITCHEN HE ADDED SMILING I DON'T BELIEVE THERE'S ONE IN THE KINGDOM TO BEAT IT +2094-142345-0058-366: BY THE (BY I'VE->BYE I HAVE) NEVER SEEN YOUR DAIRY I MUST SEE YOUR (DAIRY->DEARIE) MISSUS POYSER +2094-142345-0059-367: THIS MISSUS POYSER SAID BLUSHING AND BELIEVING THAT THE CAPTAIN WAS REALLY INTERESTED IN HER MILK PANS AND WOULD ADJUST HIS OPINION OF HER TO THE APPEARANCE OF HER DAIRY +2094-142345-0060-368: OH I'VE NO DOUBT IT'S IN CAPITAL ORDER +2300-131720-0000-1816: THE PARIS PLANT LIKE THAT AT THE CRYSTAL PALACE WAS A TEMPORARY EXHIBIT +2300-131720-0001-1817: THE LONDON PLANT WAS LESS TEMPORARY BUT NOT PERMANENT SUPPLYING BEFORE IT WAS TORN OUT NO FEWER THAN THREE THOUSAND LAMPS IN HOTELS CHURCHES STORES AND DWELLINGS IN THE VICINITY OF HOLBORN (VIADUCT->VIADUK) +2300-131720-0002-1818: THERE MESSRS JOHNSON AND HAMMER PUT INTO PRACTICE MANY OF THE IDEAS NOW STANDARD IN THE ART AND SECURED MUCH USEFUL DATA FOR THE WORK IN NEW YORK OF WHICH THE STORY HAS JUST BEEN TOLD +2300-131720-0003-1819: THE DYNAMO ELECTRIC MACHINE THOUGH SMALL WAS ROBUST FOR UNDER ALL THE VARYING SPEEDS OF WATER POWER AND THE VICISSITUDES OF THE PLANT TO WHICH IT BELONGED IT CONTINUED IN ACTIVE USE UNTIL EIGHTEEN NINETY NINE SEVENTEEN YEARS +2300-131720-0004-1820: OWING TO HIS INSISTENCE ON LOW PRESSURE DIRECT CURRENT FOR USE IN DENSELY POPULATED DISTRICTS AS THE ONLY SAFE AND TRULY UNIVERSAL PROFITABLE WAY OF DELIVERING ELECTRICAL ENERGY TO THE CONSUMERS EDISON HAS BEEN FREQUENTLY SPOKEN OF AS AN OPPONENT OF THE ALTERNATING CURRENT +2300-131720-0005-1821: WHY IF WE ERECT A STATION AT THE FALLS IT IS A GREAT ECONOMY TO GET IT UP TO THE CITY +2300-131720-0006-1822: THERE SEEMS NO GOOD REASON FOR BELIEVING THAT IT WILL CHANGE +2300-131720-0007-1823: BROAD AS THE PRAIRIES AND FREE IN THOUGHT AS THE WINDS THAT (SWEEP->SWEPT) THEM HE IS IDIOSYNCRATICALLY OPPOSED TO (LOOSE AND->LOOSEN) WASTEFUL METHODS TO PLANS OF EMPIRE THAT NEGLECT THE POOR AT THE GATE +2300-131720-0008-1824: EVERYTHING HE HAS DONE HAS BEEN AIMED AT THE CONSERVATION OF ENERGY THE CONTRACTION OF SPACE THE INTENSIFICATION OF CULTURE +2300-131720-0009-1825: FOR SOME YEARS IT WAS NOT FOUND FEASIBLE TO OPERATE MOTORS ON ALTERNATING CURRENT CIRCUITS AND THAT REASON WAS OFTEN URGED AGAINST IT SERIOUSLY +2300-131720-0010-1826: IT COULD NOT BE USED FOR ELECTROPLATING OR DEPOSITION NOR COULD IT CHARGE STORAGE BATTERIES ALL OF WHICH ARE EASILY WITHIN THE ABILITY OF THE DIRECT CURRENT +2300-131720-0011-1827: BUT WHEN IT CAME TO BE A QUESTION OF LIGHTING A SCATTERED SUBURB A GROUP OF DWELLINGS ON THE OUTSKIRTS A REMOTE COUNTRY RESIDENCE OR A FARM HOUSE THE ALTERNATING CURRENT IN ALL ELEMENTS SAVE ITS DANGER WAS AND IS IDEAL +2300-131720-0012-1828: EDISON WAS INTOLERANT OF SHAM AND (SHODDY->SHOTTY) AND NOTHING WOULD SATISFY HIM THAT COULD NOT STAND CROSS EXAMINATION BY MICROSCOPE TEST TUBE AND GALVANOMETER +2300-131720-0013-1829: UNLESS HE COULD SECURE AN ENGINE OF SMOOTHER RUNNING AND MORE EXACTLY (GOVERNED->GOVERN) AND REGULATED THAN THOSE AVAILABLE FOR HIS DYNAMO AND LAMP EDISON REALIZED THAT HE WOULD FIND IT ALMOST IMPOSSIBLE TO GIVE A STEADY LIGHT +2300-131720-0014-1830: MISTER EDISON WAS A LEADER FAR AHEAD OF THE TIME +2300-131720-0015-1831: HE OBTAINED THE DESIRED SPEED AND LOAD WITH A FRICTION (BRAKE->BREAK) ALSO REGULATOR OF SPEED BUT WAITED FOR AN INDICATOR TO VERIFY IT +2300-131720-0016-1832: THEN AGAIN THERE WAS NO KNOWN WAY TO (LUBRICATE->LUBRICADE) AN ENGINE FOR CONTINUOUS RUNNING AND MISTER EDISON INFORMED ME THAT AS A MARINE ENGINE STARTED BEFORE THE SHIP LEFT NEW YORK AND CONTINUED RUNNING UNTIL IT REACHED ITS HOME PORT SO AN ENGINE FOR HIS PURPOSES MUST PRODUCE LIGHT AT ALL TIMES +2300-131720-0017-1833: EDISON HAD INSTALLED HIS HISTORIC FIRST GREAT CENTRAL STATION SYSTEM IN NEW YORK ON THE MULTIPLE ARC SYSTEM COVERED BY HIS FEEDER AND MAIN INVENTION WHICH RESULTED IN A NOTABLE SAVING IN THE COST OF CONDUCTORS AS AGAINST A (STRAIGHT->STRAIT) TWO WIRE SYSTEM THROUGHOUT OF THE TREE KIND +2300-131720-0018-1834: HE SOON FORESAW THAT STILL GREATER ECONOMY WOULD BE NECESSARY FOR COMMERCIAL SUCCESS NOT ALONE FOR THE LARGER TERRITORY OPENING BUT FOR THE COMPACT (DISTRICTS->DISTRICT) OF LARGE CITIES +2300-131720-0019-1835: THE STRONG POSITION HELD BY THE EDISON SYSTEM UNDER THE STRENUOUS COMPETITION (THAT->IT) WAS ALREADY SPRINGING UP WAS ENORMOUSLY IMPROVED BY THE INTRODUCTION OF THE THREE WIRE SYSTEM AND (IT->HE) GAVE AN IMMEDIATE IMPETUS TO (INCANDESCENT->INCONDESCENT) LIGHTING +2300-131720-0020-1836: IT WAS SPECIALLY SUITED FOR A TRIAL PLANT ALSO IN THE EARLY DAYS WHEN A YIELD OF SIX OR EIGHT LAMPS TO THE HORSE (POWER->BOWER) WAS CONSIDERED SUBJECT FOR CONGRATULATION +2300-131720-0021-1837: THE STREET CONDUCTORS WERE OF THE OVERHEAD POLE LINE CONSTRUCTION AND WERE INSTALLED BY THE CONSTRUCTION COMPANY THAT HAD BEEN ORGANIZED BY EDISON TO BUILD (AND->AN) EQUIP CENTRAL STATIONS +2300-131720-0022-1838: MEANWHILE HE HAD CALLED UPON ME TO MAKE A REPORT OF THE THREE WIRE SYSTEM KNOWN IN ENGLAND AS THE HOPKINSON BOTH DOCTOR JOHN HOPKINSON AND MISTER EDISON BEING INDEPENDENT (INVENTORS->IN VENORS) AT PRACTICALLY THE SAME TIME +2300-131720-0023-1839: I THINK HE WAS PERHAPS MORE APPRECIATIVE (THAN->THAT) I WAS OF THE DISCIPLINE OF THE EDISON CONSTRUCTION DEPARTMENT AND THOUGHT IT WOULD BE WELL FOR US TO WAIT UNTIL THE MORNING OF THE FOURTH BEFORE WE STARTED UP +2300-131720-0024-1840: BUT THE PLANT RAN AND IT WAS THE FIRST THREE WIRE STATION IN THIS COUNTRY +2300-131720-0025-1841: THEY WERE LATER USED AS (RESERVE->RESERVED) MACHINES AND FINALLY WITH THE ENGINE RETIRED FROM SERVICE AS PART OF THE COLLECTION OF EDISONIA BUT THEY REMAIN IN PRACTICALLY AS GOOD CONDITION AS WHEN INSTALLED IN EIGHTEEN EIGHTY THREE +2300-131720-0026-1842: THE ARC LAMP INSTALLED OUTSIDE A CUSTOMER'S PREMISES OR IN A CIRCUIT FOR PUBLIC STREET LIGHTING BURNED SO MANY HOURS NIGHTLY SO MANY NIGHTS IN THE MONTH AND WAS PAID FOR AT THAT RATE SUBJECT TO REBATE FOR HOURS WHEN THE LAMP MIGHT BE OUT THROUGH ACCIDENT +2300-131720-0027-1843: EDISON HELD THAT THE ELECTRICITY SOLD MUST BE MEASURED JUST LIKE GAS OR WATER AND HE PROCEEDED TO DEVELOP A METER +2300-131720-0028-1844: THERE WAS INFINITE SCEPTICISM AROUND HIM ON THE SUBJECT AND WHILE OTHER INVENTORS WERE ALSO GIVING THE SUBJECT THEIR THOUGHT THE PUBLIC TOOK IT FOR GRANTED THAT ANYTHING SO UTTERLY INTANGIBLE AS ELECTRICITY THAT COULD NOT BE SEEN OR WEIGHED AND ONLY GAVE SECONDARY EVIDENCE OF ITSELF AT THE EXACT POINT OF USE COULD NOT BE BROUGHT TO ACCURATE REGISTRATION +2300-131720-0029-1845: HENCE THE EDISON ELECTROLYTIC METER IS NO LONGER USED DESPITE ITS EXCELLENT QUALITIES +2300-131720-0030-1846: THE PRINCIPLE EMPLOYED IN THE EDISON ELECTROLYTIC METER IS THAT WHICH EXEMPLIFIES THE POWER OF ELECTRICITY TO DECOMPOSE A CHEMICAL SUBSTANCE +2300-131720-0031-1847: ASSOCIATED WITH THIS SIMPLE FORM OF APPARATUS WERE VARIOUS INGENIOUS DETAILS AND REFINEMENTS TO SECURE REGULARITY OF OPERATION FREEDOM FROM INACCURACY AND IMMUNITY FROM SUCH TAMPERING AS WOULD PERMIT THEFT OF CURRENT OR DAMAGE +2300-131720-0032-1848: THE STANDARD EDISON METER PRACTICE WAS TO REMOVE THE CELLS ONCE A MONTH TO THE (METER->METEOR) ROOM OF THE CENTRAL STATION COMPANY FOR EXAMINATION ANOTHER SET BEING SUBSTITUTED +2300-131720-0033-1849: IN DECEMBER EIGHTEEN EIGHTY EIGHT MISTER W J JENKS READ AN INTERESTING PAPER BEFORE THE AMERICAN INSTITUTE OF ELECTRICAL ENGINEERS ON THE SIX YEARS OF PRACTICAL EXPERIENCE HAD UP TO THAT TIME WITH THE (METER->METRE) THEN MORE GENERALLY IN USE THAN ANY OTHER +2300-131720-0034-1850: THE OTHERS HAVING BEEN IN OPERATION TOO SHORT A TIME TO SHOW DEFINITE RESULTS ALTHOUGH THEY ALSO WENT QUICKLY TO A DIVIDEND BASIS +2300-131720-0035-1851: IN THIS CONNECTION IT SHOULD BE MENTIONED THAT THE ASSOCIATION OF EDISON ILLUMINATING COMPANIES IN THE SAME YEAR ADOPTED RESOLUTIONS UNANIMOUSLY TO THE EFFECT THAT THE EDISON METER WAS ACCURATE AND THAT ITS USE WAS NOT EXPENSIVE FOR STATIONS ABOVE ONE THOUSAND LIGHTS AND THAT THE BEST FINANCIAL RESULTS WERE INVARIABLY SECURED IN A STATION SELLING CURRENT BY (METER->METRE) +2300-131720-0036-1852: THE (METER->METRE) CONTINUED IN GENERAL SERVICE DURING EIGHTEEN NINETY NINE AND PROBABLY UP TO THE CLOSE OF THE CENTURY +2300-131720-0037-1853: HE WEIGHED AND (REWEIGHED->REWAYED) THE (METER PLATES->METERPLATES) AND PURSUED EVERY LINE OF INVESTIGATION IMAGINABLE BUT ALL IN VAIN +2300-131720-0038-1854: HE FELT HE WAS UP AGAINST IT AND THAT PERHAPS ANOTHER KIND OF A JOB WOULD SUIT HIM BETTER +2300-131720-0039-1855: THE PROBLEM WAS SOLVED +2300-131720-0040-1856: WE WERE MORE INTERESTED IN THE TECHNICAL CONDITION OF THE STATION THAN IN THE COMMERCIAL PART +2300-131720-0041-1857: WE HAD (METERS->METRES) IN WHICH THERE WERE TWO BOTTLES OF LIQUID +237-126133-0000-2407: HERE SHE WOULD STAY COMFORTED AND (SOOTHED->SOOTHE) AMONG THE LOVELY PLANTS AND RICH EXOTICS REJOICING THE HEART OF OLD TURNER THE GARDENER WHO SINCE POLLY'S FIRST RAPTUROUS ENTRANCE HAD TAKEN HER INTO HIS GOOD GRACES FOR ALL TIME +237-126133-0001-2408: EVERY CHANCE SHE COULD STEAL AFTER PRACTICE HOURS WERE OVER AND AFTER THE CLAMOROUS DEMANDS OF THE BOYS UPON HER TIME WERE FULLY SATISFIED WAS SEIZED TO FLY ON THE WINGS OF THE WIND TO THE FLOWERS +237-126133-0002-2409: THEN DEAR SAID MISSUS WHITNEY YOU MUST BE KINDER TO HER THAN EVER THINK WHAT IT WOULD BE FOR ONE OF YOU TO BE AWAY FROM HOME EVEN AMONG FRIENDS +237-126133-0003-2410: SOMEHOW OF ALL THE DAYS WHEN THE HOME FEELING WAS THE STRONGEST THIS DAY IT SEEMED AS IF SHE COULD BEAR IT NO LONGER +237-126133-0004-2411: IF SHE COULD ONLY SEE PHRONSIE FOR JUST ONE MOMENT +237-126133-0005-2412: OH SHE'S ALWAYS AT THE PIANO SAID VAN SHE MUST BE THERE NOW SOMEWHERE AND THEN SOMEBODY LAUGHED +237-126133-0006-2413: AT THIS THE BUNDLE OPENED SUDDENLY AND OUT POPPED PHRONSIE +237-126133-0007-2414: BUT POLLY COULDN'T SPEAK AND IF JASPER HADN'T CAUGHT HER JUST IN TIME SHE WOULD HAVE TUMBLED OVER BACKWARD FROM THE STOOL PHRONSIE AND ALL +237-126133-0008-2415: ASKED PHRONSIE WITH HER LITTLE FACE CLOSE TO POLLY'S OWN +237-126133-0009-2416: NOW YOU'LL STAY CRIED VAN SAY POLLY WON'T YOU +237-126133-0010-2417: OH YOU ARE THE DEAREST AND BEST MISTER KING I EVER SAW BUT HOW DID YOU MAKE MAMMY LET HER COME +237-126133-0011-2418: ISN'T HE SPLENDID CRIED JASPER IN INTENSE PRIDE SWELLING UP FATHER KNEW HOW TO DO IT +237-126133-0012-2419: THERE THERE HE SAID SOOTHINGLY PATTING HER BROWN FUZZY HEAD +237-126133-0013-2420: I KNOW GASPED POLLY CONTROLLING HER SOBS I WON'T ONLY I CAN'T THANK YOU +237-126133-0014-2421: ASKED PHRONSIE IN INTENSE INTEREST SLIPPING DOWN OUT OF POLLY'S ARMS AND CROWDING UP CLOSE TO JASPER'S SIDE +237-126133-0015-2422: YES ALL ALONE BY HIMSELF ASSERTED JASPER VEHEMENTLY AND WINKING FURIOUSLY TO THE OTHERS TO STOP THEIR LAUGHING HE DID NOW TRULY PHRONSIE +237-126133-0016-2423: OH NO (JASPER->JAPS HER) I MUST GO BY MY VERY OWN SELF +237-126133-0017-2424: THERE JAP YOU'VE (CAUGHT->GOT) IT LAUGHED PERCY WHILE THE OTHERS SCREAMED AT THE SIGHT OF JASPER'S FACE +237-126133-0018-2425: DON'T MIND IT POLLY WHISPERED JASPER TWASN'T HER FAULT +237-126133-0019-2426: DEAR ME EJACULATED THE OLD GENTLEMAN IN THE UTMOST AMAZEMENT AND SUCH A TIME AS I'VE HAD TO GET HER HERE TOO +237-126133-0020-2427: HOW DID HER MOTHER EVER LET HER GO +237-126133-0021-2428: SHE ASKED IMPULSIVELY I DIDN'T BELIEVE YOU COULD PERSUADE HER FATHER +237-126133-0022-2429: I DIDN'T HAVE ANY FEARS IF I WORKED IT RIGHTLY SAID THE OLD GENTLEMAN COMPLACENTLY +237-126133-0023-2430: HE CRIED (IN->AND) HIGH DUDGEON JUST AS IF HE OWNED THE WHOLE OF THE PEPPERS AND COULD DISPOSE OF THEM ALL TO SUIT HIS FANCY +237-126133-0024-2431: AND THE OLD GENTLEMAN WAS SO DELIGHTED WITH HIS SUCCESS THAT HE HAD TO BURST OUT INTO A SERIES OF SHORT HAPPY BITS OF LAUGHTER THAT OCCUPIED QUITE A SPACE OF TIME +237-126133-0025-2432: AT LAST HE CAME OUT OF THEM AND WIPED HIS FACE VIGOROUSLY +237-134493-0000-2388: IT IS SIXTEEN YEARS SINCE JOHN (BERGSON->BERKS AND) DIED +237-134493-0001-2389: HIS WIFE NOW LIES BESIDE HIM AND THE WHITE SHAFT THAT MARKS THEIR GRAVES GLEAMS ACROSS THE WHEAT FIELDS +237-134493-0002-2390: FROM THE NORWEGIAN GRAVEYARD ONE LOOKS OUT OVER A VAST CHECKER BOARD MARKED OFF IN SQUARES OF WHEAT AND CORN LIGHT AND DARK (DARK->*) AND LIGHT +237-134493-0003-2391: FROM THE GRAVEYARD GATE ONE CAN COUNT A DOZEN (GAYLY->GAILY) PAINTED (FARMHOUSES->FARM HOUSES) THE GILDED WEATHER (VANES->VEINS) ON THE BIG RED BARNS WINK AT EACH OTHER ACROSS THE GREEN AND BROWN AND YELLOW FIELDS +237-134493-0004-2392: THE AIR AND THE EARTH ARE CURIOUSLY MATED AND INTERMINGLED AS IF THE ONE WERE THE BREATH OF THE OTHER +237-134493-0005-2393: HE WAS A SPLENDID FIGURE OF A BOY TALL AND STRAIGHT AS A YOUNG PINE TREE WITH A HANDSOME HEAD AND STORMY GRAY EYES DEEPLY SET UNDER A SERIOUS BROW +237-134493-0006-2394: THAT'S NOT MUCH OF A JOB FOR AN ATHLETE HERE I'VE BEEN (TO->*) TOWN AND BACK +237-134493-0007-2395: (ALEXANDRA LETS->ALEXANDER THAT'S) YOU SLEEP LATE +237-134493-0008-2396: SHE GATHERED UP HER REINS +237-134493-0009-2397: PLEASE WAIT FOR ME MARIE (EMIL->AMYL) COAXED +237-134493-0010-2398: I NEVER SEE (LOU'S SCYTHE->LOOSE SIGH) OVER HERE +237-134493-0011-2399: HOW BROWN YOU'VE GOT SINCE YOU CAME HOME I WISH I HAD AN ATHLETE TO MOW MY ORCHARD +237-134493-0012-2400: I GET WET TO MY KNEES WHEN I GO DOWN TO (PICK CHERRIES->PICTURES) +237-134493-0013-2401: INDEED HE HAD LOOKED AWAY WITH (THE->A) PURPOSE OF NOT SEEING IT +237-134493-0014-2402: THEY THINK (YOU'RE->YOU ARE) PROUD BECAUSE YOU'VE BEEN AWAY TO SCHOOL OR SOMETHING +237-134493-0015-2403: THERE WAS SOMETHING INDIVIDUAL ABOUT THE GREAT FARM A MOST UNUSUAL TRIMNESS AND CARE FOR DETAIL +237-134493-0016-2404: ON EITHER SIDE OF THE ROAD FOR A MILE BEFORE YOU REACHED THE FOOT OF THE HILL STOOD TALL (OSAGE->O SAGE) ORANGE HEDGES THEIR GLOSSY GREEN MARKING OFF THE YELLOW FIELDS +237-134493-0017-2405: ANY ONE THEREABOUTS WOULD HAVE TOLD YOU THAT THIS WAS ONE OF THE RICHEST FARMS ON THE DIVIDE AND THAT THE FARMER WAS A WOMAN ALEXANDRA BERGSON +237-134493-0018-2406: THERE IS EVEN A WHITE ROW OF BEEHIVES IN THE ORCHARD UNDER THE WALNUT TREES +237-134500-0000-2345: FRANK READ ENGLISH SLOWLY AND THE MORE HE READ ABOUT THIS DIVORCE CASE THE ANGRIER HE GREW +237-134500-0001-2346: MARIE SIGHED +237-134500-0002-2347: A (BRISK WIND->BRAY SQUINT) HAD COME UP AND WAS DRIVING PUFFY WHITE CLOUDS ACROSS THE SKY +237-134500-0003-2348: THE ORCHARD WAS SPARKLING AND RIPPLING IN THE SUN +237-134500-0004-2349: THAT INVITATION DECIDED HER +237-134500-0005-2350: OH BUT I'M GLAD TO GET THIS PLACE MOWED +237-134500-0006-2351: JUST SMELL THE WILD ROSES THEY ARE ALWAYS SO SPICY AFTER A RAIN +237-134500-0007-2352: WE NEVER HAD SO MANY OF THEM IN HERE BEFORE +237-134500-0008-2353: I SUPPOSE IT'S THE WET SEASON WILL YOU HAVE TO CUT THEM TOO +237-134500-0009-2354: I SUPPOSE THAT'S THE (WET->WHITE) SEASON TOO THEN +237-134500-0010-2355: IT'S EXCITING TO SEE EVERYTHING GROWING SO FAST AND TO GET THE GRASS CUT +237-134500-0011-2356: AREN'T YOU SPLASHED LOOK AT THE SPIDER (WEBS->WHIPS) ALL OVER THE GRASS +237-134500-0012-2357: IN A FEW MOMENTS HE HEARD THE CHERRIES DROPPING SMARTLY INTO THE PAIL AND HE BEGAN TO SWING HIS SCYTHE WITH THAT LONG EVEN STROKE THAT FEW AMERICAN BOYS EVER LEARN +237-134500-0013-2358: MARIE PICKED (*->THE) CHERRIES AND SANG SOFTLY TO HERSELF STRIPPING ONE GLITTERING (BRANCH->RANCH) AFTER ANOTHER SHIVERING WHEN SHE CAUGHT A SHOWER OF RAINDROPS ON HER NECK AND HAIR +237-134500-0014-2359: AND (EMIL->AMIEL) MOWED HIS WAY SLOWLY DOWN TOWARD THE CHERRY TREES +237-134500-0015-2360: THAT SUMMER THE RAINS HAD BEEN SO MANY AND OPPORTUNE THAT IT WAS ALMOST MORE THAN (SHABATA->CHEBATA) AND HIS MAN COULD DO TO KEEP UP WITH THE CORN THE ORCHARD WAS A NEGLECTED WILDERNESS +237-134500-0016-2361: I DON'T KNOW ALL OF THEM BUT I KNOW LINDENS ARE +237-134500-0017-2362: IF I FEEL THAT WAY I FEEL THAT WAY +237-134500-0018-2363: HE REACHED UP AMONG THE BRANCHES AND BEGAN TO PICK THE SWEET INSIPID FRUIT LONG IVORY COLORED BERRIES TIPPED WITH FAINT PINK LIKE WHITE CORAL THAT FALL TO THE GROUND UNHEEDED ALL SUMMER THROUGH +237-134500-0019-2364: HE DROPPED A HANDFUL INTO HER LAP +237-134500-0020-2365: YES DON'T YOU +237-134500-0021-2366: OH EVER SO MUCH ONLY HE SEEMS KIND OF STAID AND SCHOOL TEACHERY +237-134500-0022-2367: WHEN SHE USED TO TELL ME ABOUT HIM I ALWAYS WONDERED WHETHER SHE WASN'T A LITTLE IN LOVE WITH HIM +237-134500-0023-2368: IT WOULD SERVE YOU ALL RIGHT IF SHE WALKED OFF WITH (CARL->KARL) +237-134500-0024-2369: I LIKE TO TALK TO (CARL->KARL) ABOUT NEW YORK AND WHAT A FELLOW CAN DO THERE +237-134500-0025-2370: OH (EMIL->AMIEL) +237-134500-0026-2371: SURELY YOU ARE NOT THINKING OF GOING OFF THERE +237-134500-0027-2372: MARIE'S FACE FELL UNDER HIS BROODING GAZE +237-134500-0028-2373: (I'M->I AM) SURE (ALEXANDRA HOPES->ALEXANDER HELPS) YOU WILL STAY ON HERE SHE MURMURED +237-134500-0029-2374: I DON'T WANT TO STAND AROUND AND LOOK ON +237-134500-0030-2375: I WANT TO BE DOING SOMETHING ON MY OWN ACCOUNT +237-134500-0031-2376: SOMETIMES I DON'T WANT TO DO ANYTHING AT ALL AND SOMETIMES I WANT TO PULL THE FOUR CORNERS OF THE DIVIDE TOGETHER HE THREW OUT HIS ARM AND BROUGHT IT BACK WITH A JERK SO LIKE A (TABLE CLOTH->TABLECLOTH) +237-134500-0032-2377: I GET TIRED OF SEEING (MEN->MAN) AND HORSES GOING UP AND DOWN UP AND DOWN +237-134500-0033-2378: I WISH YOU WEREN'T SO RESTLESS AND DIDN'T GET SO WORKED UP OVER THINGS SHE SAID SADLY +237-134500-0034-2379: THANK YOU HE RETURNED SHORTLY +237-134500-0035-2380: AND (YOU->WHO) NEVER USED TO BE CROSS TO ME +237-134500-0036-2381: I CAN'T PLAY WITH YOU LIKE A LITTLE BOY ANY MORE HE SAID SLOWLY THAT'S WHAT YOU MISS MARIE +237-134500-0037-2382: BUT (EMIL->AMIEL) IF I UNDERSTAND (THEN->IN) ALL OUR GOOD TIMES ARE OVER WE CAN NEVER DO NICE THINGS TOGETHER ANY MORE +237-134500-0038-2383: AND ANYHOW THERE'S NOTHING (TO->TOO) UNDERSTAND +237-134500-0039-2384: THAT WON'T LAST IT WILL GO AWAY AND THINGS WILL BE JUST AS THEY USED TO +237-134500-0040-2385: I PRAY FOR YOU BUT THAT'S NOT THE SAME AS IF YOU PRAYED YOURSELF +237-134500-0041-2386: I CAN'T PRAY TO HAVE THE THINGS I WANT HE SAID SLOWLY AND I WON'T PRAY NOT TO HAVE THEM NOT IF I'M DAMNED FOR IT +237-134500-0042-2387: THEN ALL OUR GOOD TIMES ARE OVER +260-123286-0000-200: SATURDAY AUGUST FIFTEENTH THE SEA UNBROKEN ALL ROUND NO LAND IN SIGHT +260-123286-0001-201: THE HORIZON SEEMS EXTREMELY DISTANT +260-123286-0002-202: ALL MY DANGER AND SUFFERINGS WERE NEEDED TO STRIKE A SPARK OF HUMAN FEELING OUT OF HIM BUT NOW THAT I AM WELL HIS NATURE HAS RESUMED ITS SWAY +260-123286-0003-203: YOU SEEM ANXIOUS MY UNCLE I SAID SEEING HIM CONTINUALLY WITH HIS GLASS TO HIS EYE ANXIOUS +260-123286-0004-204: ONE MIGHT BE WITH LESS REASON THAN NOW +260-123286-0005-205: I AM NOT COMPLAINING THAT THE RATE IS SLOW BUT THAT THE (SEA->SEAT) IS SO WIDE +260-123286-0006-206: WE ARE LOSING TIME AND THE FACT IS I HAVE NOT COME ALL THIS WAY TO TAKE A LITTLE SAIL UPON A POND ON A RAFT +260-123286-0007-207: HE CALLED (THIS->THE) SEA (A POND->UPON) AND OUR LONG VOYAGE TAKING A LITTLE SAIL +260-123286-0008-208: THEREFORE DON'T TALK TO ME ABOUT VIEWS AND PROSPECTS +260-123286-0009-209: I TAKE THIS AS MY ANSWER AND I LEAVE THE PROFESSOR TO BITE HIS LIPS WITH IMPATIENCE +260-123286-0010-210: SUNDAY AUGUST SIXTEENTH +260-123286-0011-211: NOTHING NEW (WEATHER->WHETHER) UNCHANGED THE WIND FRESHENS +260-123286-0012-212: BUT THERE SEEMED NO REASON (TO->OF) FEAR +260-123286-0013-213: THE SHADOW OF THE RAFT WAS CLEARLY OUTLINED UPON THE SURFACE OF THE WAVES +260-123286-0014-214: TRULY (THIS->THE) SEA IS OF INFINITE WIDTH +260-123286-0015-215: IT MUST BE AS WIDE AS THE MEDITERRANEAN OR THE ATLANTIC AND WHY NOT +260-123286-0016-216: THESE THOUGHTS AGITATED ME ALL DAY AND MY IMAGINATION SCARCELY (CALMED->CALM) DOWN AFTER SEVERAL HOURS (SLEEP->SLEEVE) +260-123286-0017-217: I SHUDDER AS I RECALL THESE MONSTERS TO MY REMEMBRANCE +260-123286-0018-218: I SAW AT THE HAMBURG MUSEUM THE SKELETON OF ONE OF THESE CREATURES THIRTY FEET IN LENGTH +260-123286-0019-219: I SUPPOSE PROFESSOR LIEDENBROCK WAS OF MY OPINION TOO AND EVEN SHARED MY FEARS FOR AFTER HAVING EXAMINED THE PICK HIS EYES TRAVERSED THE OCEAN FROM SIDE TO SIDE +260-123286-0020-220: TUESDAY AUGUST EIGHTEENTH +260-123286-0021-221: DURING HIS WATCH I SLEPT +260-123286-0022-222: TWO HOURS AFTERWARDS A TERRIBLE SHOCK AWOKE ME +260-123286-0023-223: THE RAFT WAS HEAVED UP ON A WATERY MOUNTAIN AND PITCHED DOWN AGAIN AT A DISTANCE OF TWENTY FATHOMS +260-123286-0024-224: THERE'S A (WHALE->WAIL) A (WHALE->WELL) CRIED THE PROFESSOR +260-123286-0025-225: (FLIGHT->FIGHT) WAS OUT OF THE QUESTION NOW THE REPTILES ROSE THEY WHEELED AROUND OUR LITTLE RAFT WITH A RAPIDITY GREATER THAN THAT OF EXPRESS TRAINS +260-123286-0026-226: TWO (MONSTERS->MASTERS) ONLY WERE CREATING ALL THIS COMMOTION AND BEFORE MY EYES ARE (TWO->TOO) REPTILES OF THE PRIMITIVE WORLD +260-123286-0027-227: I CAN DISTINGUISH THE EYE OF THE (ICHTHYOSAURUS->ICT THEASURUS) GLOWING LIKE A RED HOT (COAL->CO) AND AS LARGE AS A MAN'S HEAD +260-123286-0028-228: ITS JAW IS ENORMOUS AND ACCORDING TO NATURALISTS IT IS ARMED WITH NO LESS THAN ONE HUNDRED AND EIGHTY TWO TEETH +260-123286-0029-229: THOSE HUGE CREATURES ATTACKED EACH OTHER WITH THE GREATEST ANIMOSITY +260-123286-0030-230: SUDDENLY THE (ICHTHYOSAURUS->ICTOISORUS) AND THE (PLESIOSAURUS->PLECIUS) DISAPPEAR BELOW LEAVING A (WHIRLPOOL->WAR POOL) EDDYING IN THE WATER +260-123286-0031-231: AS FOR THE (ICHTHYOSAURUS->ITTHIASORIS) HAS HE RETURNED (TO HIS->WHOSE) SUBMARINE CAVERN +260-123288-0000-232: THE ROARINGS BECOME LOST IN THE DISTANCE +260-123288-0001-233: THE WEATHER IF WE MAY USE (THAT->THE) TERM WILL CHANGE BEFORE (LONG->LAWN) +260-123288-0002-234: THE ATMOSPHERE IS CHARGED WITH (VAPOURS->VAPORS) PERVADED WITH THE ELECTRICITY GENERATED BY THE EVAPORATION OF (SALINE->SAILING) WATERS +260-123288-0003-235: THE ELECTRIC LIGHT CAN SCARCELY PENETRATE THROUGH THE DENSE CURTAIN WHICH (HAS->IS) DROPPED OVER THE THEATRE ON WHICH THE BATTLE OF THE ELEMENTS IS ABOUT TO BE WAGED +260-123288-0004-236: THE AIR IS HEAVY THE SEA IS CALM +260-123288-0005-237: FROM TIME TO TIME A FLEECY TUFT OF (MIST->MISTS) WITH YET SOME GLEAMING LIGHT LEFT UPON IT DROPS DOWN UPON THE DENSE FLOOR OF GREY AND LOSES ITSELF IN THE (OPAQUE->OPE) AND IMPENETRABLE MASS +260-123288-0006-238: THE ATMOSPHERE (IS->AS) EVIDENTLY CHARGED (AND->IN) SURCHARGED WITH ELECTRICITY +260-123288-0007-239: THE WIND NEVER LULLS BUT TO ACQUIRE INCREASED STRENGTH THE VAST BANK OF HEAVY CLOUDS IS A HUGE RESERVOIR OF FEARFUL WINDY GUSTS AND RUSHING STORMS +260-123288-0008-240: THERE'S A HEAVY STORM COMING ON I CRIED POINTING TOWARDS THE HORIZON +260-123288-0009-241: THOSE CLOUDS SEEM AS IF THEY WERE GOING TO CRUSH THE SEA +260-123288-0010-242: ON THE MAST ALREADY I SEE THE LIGHT PLAY OF A (LAMBENT->LAMENT) SAINT (ELMO'S->AIRABLE'S) FIRE THE OUTSTRETCHED (SAIL->SILL) CATCHES NOT A BREATH OF WIND AND HANGS LIKE A SHEET OF LEAD +260-123288-0011-243: BUT IF WE HAVE NOW CEASED TO ADVANCE WHY DO WE YET LEAVE THAT (SAIL->SALE) LOOSE WHICH AT THE FIRST SHOCK OF (THE->A) TEMPEST MAY CAPSIZE US IN A MOMENT +260-123288-0012-244: THAT WILL BE (*->THE) SAFEST NO NO NEVER +260-123288-0013-245: THE PILED UP (VAPOURS CONDENSE->VAPORS CONTENTS) INTO WATER AND THE AIR PUT INTO VIOLENT ACTION TO SUPPLY THE VACUUM LEFT BY THE CONDENSATION OF THE (MISTS->MIST) ROUSES ITSELF INTO A WHIRLWIND +260-123288-0014-246: HANS STIRS NOT +260-123288-0015-247: FROM THE UNDER SURFACE OF THE CLOUDS THERE ARE CONTINUAL (EMISSIONS->MISSIONS) OF LURID LIGHT ELECTRIC MATTER IS IN CONTINUAL EVOLUTION FROM THEIR COMPONENT MOLECULES THE GASEOUS ELEMENTS OF THE AIR NEED TO BE SLAKED WITH MOISTURE FOR INNUMERABLE COLUMNS OF WATER RUSH UPWARDS INTO THE AIR AND FALL BACK AGAIN IN WHITE FOAM +260-123288-0016-248: I REFER TO THE THERMOMETER IT INDICATES THE FIGURE IS OBLITERATED +260-123288-0017-249: IS THE ATMOSPHERIC CONDITION HAVING ONCE REACHED (THIS DENSITY->OSTENSITY) TO BECOME FINAL +260-123288-0018-250: THE RAFT BEARS ON STILL TO THE SOUTH EAST +260-123288-0019-251: AT NOON THE VIOLENCE OF THE STORM REDOUBLES +260-123288-0020-252: EACH OF US IS LASHED (TO->IN) SOME PART OF THE RAFT +260-123288-0021-253: THE WAVES RISE ABOVE OUR HEADS +260-123288-0022-254: THEY (SEEM->SEEMED) TO BE WE ARE LOST BUT I AM NOT SURE +260-123288-0023-255: HE (NODS->GNAWEDS) HIS CONSENT +260-123288-0024-256: THE (FIREBALL->FIRE BALL) HALF OF IT WHITE HALF AZURE BLUE AND THE SIZE OF A TEN INCH (SHELL->CHILL) MOVED SLOWLY ABOUT THE RAFT BUT REVOLVING ON ITS OWN AXIS WITH ASTONISHING VELOCITY AS IF (WHIPPED->WHIP) ROUND BY THE FORCE OF THE WHIRLWIND +260-123288-0025-257: HERE IT COMES THERE IT GLIDES NOW IT IS UP THE RAGGED STUMP OF THE MAST THENCE IT LIGHTLY LEAPS ON THE PROVISION BAG DESCENDS WITH A LIGHT BOUND AND JUST SKIMS THE POWDER MAGAZINE HORRIBLE +260-123288-0026-258: WE SHALL BE BLOWN UP BUT NO THE DAZZLING DISK OF MYSTERIOUS LIGHT NIMBLY LEAPS ASIDE IT APPROACHES HANS WHO FIXES HIS BLUE EYE UPON IT STEADILY IT THREATENS THE HEAD OF MY UNCLE WHO FALLS UPON HIS KNEES WITH HIS HEAD DOWN TO AVOID IT +260-123288-0027-259: A SUFFOCATING SMELL OF NITROGEN FILLS THE AIR IT ENTERS THE THROAT IT FILLS THE LUNGS +260-123288-0028-260: WE SUFFER STIFLING PAINS +260-123440-0000-179: AND HOW ODD THE DIRECTIONS WILL LOOK +260-123440-0001-180: POOR ALICE +260-123440-0002-181: IT WAS THE WHITE RABBIT RETURNING SPLENDIDLY DRESSED WITH A PAIR OF WHITE KID GLOVES IN ONE HAND AND A LARGE FAN IN THE OTHER HE CAME TROTTING ALONG IN A GREAT HURRY MUTTERING TO HIMSELF AS HE CAME OH THE DUCHESS THE DUCHESS +260-123440-0003-182: OH WON'T SHE BE SAVAGE IF I'VE KEPT HER WAITING +260-123440-0004-183: ALICE TOOK UP THE FAN AND GLOVES AND AS THE HALL WAS VERY HOT SHE KEPT FANNING HERSELF ALL THE TIME SHE WENT ON TALKING DEAR DEAR HOW QUEER EVERYTHING IS TO DAY +260-123440-0005-184: AND YESTERDAY (THINGS->THANKS) WENT ON JUST AS USUAL +260-123440-0006-185: I WONDER IF I'VE BEEN CHANGED IN THE NIGHT +260-123440-0007-186: I ALMOST THINK I CAN REMEMBER FEELING (A->*) LITTLE DIFFERENT +260-123440-0008-187: I'LL TRY IF I KNOW ALL THE THINGS I USED TO KNOW +260-123440-0009-188: I SHALL NEVER GET TO TWENTY AT THAT RATE +260-123440-0010-189: HOW CHEERFULLY HE SEEMS TO GRIN HOW NEATLY SPREAD HIS CLAWS AND WELCOME LITTLE FISHES IN WITH GENTLY SMILING JAWS +260-123440-0011-190: NO I'VE MADE UP MY MIND ABOUT IT IF (I'M MABEL->I MAYBEL) I'LL STAY DOWN HERE +260-123440-0012-191: IT'LL BE NO USE (THEIR->THEY'RE) PUTTING THEIR HEADS DOWN AND SAYING COME UP AGAIN DEAR +260-123440-0013-192: I AM SO VERY TIRED OF BEING ALL ALONE HERE +260-123440-0014-193: AND I DECLARE IT'S TOO BAD THAT IT IS +260-123440-0015-194: I WISH I HADN'T CRIED SO MUCH SAID ALICE AS SHE SWAM ABOUT TRYING TO FIND HER WAY OUT +260-123440-0016-195: I SHALL BE PUNISHED FOR IT NOW I SUPPOSE BY BEING DROWNED IN MY OWN TEARS +260-123440-0017-196: THAT WILL BE A QUEER THING TO BE SURE +260-123440-0018-197: I AM VERY TIRED OF SWIMMING ABOUT HERE O MOUSE +260-123440-0019-198: CRIED ALICE AGAIN FOR THIS TIME THE MOUSE WAS BRISTLING ALL OVER AND SHE FELT CERTAIN IT MUST BE REALLY OFFENDED +260-123440-0020-199: WE WON'T TALK ABOUT HER ANY MORE IF YOU'D RATHER NOT WE INDEED +2830-3979-0000-1120: WE WANT YOU TO HELP US PUBLISH SOME LEADING WORK OF (LUTHER'S->LUTHERS) FOR THE GENERAL AMERICAN MARKET WILL YOU DO IT +2830-3979-0001-1121: THE CONDITION IS THAT I WILL BE PERMITTED TO MAKE LUTHER TALK AMERICAN (STREAMLINE->STREAM LINE) HIM SO TO SPEAK BECAUSE YOU WILL NEVER GET PEOPLE WHETHER IN OR OUTSIDE THE LUTHERAN CHURCH ACTUALLY TO READ LUTHER UNLESS WE MAKE HIM TALK AS HE WOULD TALK (TODAY->TO DAY) TO AMERICANS +2830-3979-0002-1122: LET US BEGIN WITH THAT HIS COMMENTARY (ON GALATIANS->ONGOLATIONS) +2830-3979-0003-1123: THE UNDERTAKING WHICH SEEMED SO ATTRACTIVE WHEN VIEWED AS A LITERARY TASK PROVED A MOST DIFFICULT ONE AND AT TIMES BECAME OPPRESSIVE +2830-3979-0004-1124: IT WAS WRITTEN IN LATIN +2830-3979-0005-1125: THE WORK HAD TO BE CONDENSED +2830-3979-0006-1126: A WORD SHOULD NOW BE SAID ABOUT THE ORIGIN OF LUTHER'S COMMENTARY (ON GALATIANS->ANGULATIONS) +2830-3979-0007-1127: MUCH LATER WHEN A FRIEND OF HIS WAS PREPARING AN (EDITION->ADDITION) OF ALL HIS LATIN WORKS HE REMARKED TO HIS HOME CIRCLE IF I HAD MY WAY ABOUT IT THEY WOULD REPUBLISH ONLY THOSE OF MY BOOKS WHICH HAVE DOCTRINE (MY GALATIANS->MIGALLATIONS) FOR INSTANCE +2830-3979-0008-1128: IN OTHER WORDS THESE THREE MEN TOOK DOWN THE LECTURES WHICH LUTHER ADDRESSED TO HIS STUDENTS IN THE COURSE OF GALATIANS AND (ROERER->ROAR) PREPARED THE MANUSCRIPT FOR THE PRINTER +2830-3979-0009-1129: IT PRESENTS LIKE NO OTHER OF LUTHER'S WRITINGS THE CENTRAL THOUGHT OF CHRISTIANITY THE JUSTIFICATION OF THE SINNER FOR THE SAKE OF (CHRIST'S->CHRIST) MERITS ALONE +2830-3979-0010-1130: BUT THE ESSENCE OF LUTHER'S LECTURES IS THERE +2830-3979-0011-1131: THE LORD WHO HAS GIVEN US POWER TO TEACH AND TO HEAR LET HIM ALSO GIVE US THE POWER TO SERVE AND TO DO LUKE (TWO->TOO) +2830-3979-0012-1132: THE WORD OF OUR GOD SHALL STAND (FOREVER->FOR EVER) +2830-3980-0000-1043: IN EVERY WAY THEY SOUGHT TO UNDERMINE THE AUTHORITY OF SAINT PAUL +2830-3980-0001-1044: THEY SAID TO THE GALATIANS YOU HAVE NO RIGHT TO THINK HIGHLY OF PAUL +2830-3980-0002-1045: HE WAS THE LAST TO TURN TO CHRIST +2830-3980-0003-1046: PAUL CAME LATER (AND IS->IN HIS) BENEATH US +2830-3980-0004-1047: INDEED HE PERSECUTED THE CHURCH OF CHRIST FOR A LONG TIME +2830-3980-0005-1048: DO YOU SUPPOSE THAT GOD FOR THE SAKE OF A FEW LUTHERAN HERETICS WOULD DISOWN HIS ENTIRE CHURCH +2830-3980-0006-1049: AGAINST THESE BOASTING FALSE APOSTLES PAUL BOLDLY DEFENDS HIS APOSTOLIC AUTHORITY AND MINISTRY +2830-3980-0007-1050: AS THE AMBASSADOR OF A GOVERNMENT IS HONORED FOR HIS OFFICE AND NOT FOR HIS PRIVATE PERSON SO THE MINISTER OF CHRIST SHOULD EXALT HIS OFFICE IN ORDER TO GAIN AUTHORITY AMONG MEN +2830-3980-0008-1051: PAUL TAKES PRIDE IN HIS MINISTRY NOT TO HIS OWN (PRAISE->PHRASE) BUT TO THE PRAISE OF GOD +2830-3980-0009-1052: PAUL AN APOSTLE NOT OF MEN ET CETERA +2830-3980-0010-1053: EITHER HE CALLS MINISTERS THROUGH THE AGENCY OF MEN OR HE CALLS THEM DIRECTLY AS HE CALLED THE PROPHETS AND APOSTLES +2830-3980-0011-1054: PAUL DECLARES THAT THE FALSE APOSTLES (WERE CALLED OR SENT->RECALL THEIR SCENT) NEITHER BY MEN NOR BY MAN +2830-3980-0012-1055: THE MOST THEY COULD CLAIM IS THAT THEY WERE SENT BY OTHERS +2830-3980-0013-1056: HE MENTIONS THE APOSTLES FIRST BECAUSE THEY WERE APPOINTED DIRECTLY BY GOD +2830-3980-0014-1057: THE CALL IS NOT TO BE TAKEN LIGHTLY +2830-3980-0015-1058: FOR A PERSON TO POSSESS KNOWLEDGE IS NOT ENOUGH +2830-3980-0016-1059: IT SPOILS ONE'S BEST WORK +2830-3980-0017-1060: WHEN I WAS A YOUNG MAN I THOUGHT PAUL WAS MAKING TOO MUCH OF HIS CALL +2830-3980-0018-1061: I DID NOT THEN REALIZE THE IMPORTANCE OF THE MINISTRY +2830-3980-0019-1062: I KNEW NOTHING OF THE DOCTRINE OF FAITH BECAUSE WE WERE TAUGHT (SOPHISTRY->SOPHISTRI) INSTEAD OF CERTAINTY AND NOBODY UNDERSTOOD SPIRITUAL BOASTING +2830-3980-0020-1063: THIS IS NO SINFUL PRIDE IT IS (HOLY->WHOLLY) PRIDE +2830-3980-0021-1064: AND GOD THE FATHER WHO RAISED HIM FROM THE DEAD +2830-3980-0022-1065: THE (CLAUSE SEEMS SUPERFLUOUS->CLAWS SEEMED SUPERVOUS) ON FIRST SIGHT +2830-3980-0023-1066: THESE PERVERTERS OF THE RIGHTEOUSNESS OF CHRIST RESIST THE FATHER AND THE SON AND THE WORKS OF THEM BOTH +2830-3980-0024-1067: IN THIS WHOLE EPISTLE PAUL TREATS OF THE RESURRECTION OF CHRIST +2830-3980-0025-1068: BY HIS RESURRECTION CHRIST WON THE VICTORY OVER LAW SIN FLESH WORLD DEVIL DEATH HELL AND EVERY EVIL +2830-3980-0026-1069: (VERSE->FIRST) TWO +2830-3980-0027-1070: AND ALL THE BRETHREN WHICH ARE WITH ME +2830-3980-0028-1071: THIS SHOULD GO FAR IN SHUTTING THE MOUTHS OF THE FALSE APOSTLES +2830-3980-0029-1072: ALTHOUGH THE BRETHREN WITH ME ARE NOT APOSTLES LIKE MYSELF YET THEY ARE ALL OF ONE MIND WITH ME THINK WRITE AND TEACH AS I DO +2830-3980-0030-1073: THEY DO NOT GO WHERE THE ENEMIES OF THE GOSPEL PREDOMINATE THEY GO (WHERE->WITH) THE CHRISTIANS ARE +2830-3980-0031-1074: WHY DO THEY NOT INVADE THE CATHOLIC PROVINCES AND PREACH THEIR DOCTRINE TO GODLESS PRINCES BISHOPS AND DOCTORS AS WE HAVE DONE BY THE HELP OF GOD +2830-3980-0032-1075: WE LOOK FOR THAT REWARD WHICH (EYE->I) HATH NOT SEEN NOR EAR HEARD NEITHER HATH ENTERED INTO THE HEART OF MAN +2830-3980-0033-1076: NOT ALL THE GALATIANS HAD BECOME PERVERTED +2830-3980-0034-1077: THESE MEANS CANNOT BE CONTAMINATED +2830-3980-0035-1078: THEY (REMAIN->REMAINED) DIVINE REGARDLESS OF MEN'S OPINION +2830-3980-0036-1079: WHEREVER THE MEANS OF GRACE ARE FOUND THERE IS THE HOLY CHURCH EVEN THOUGH ANTICHRIST REIGNS THERE +2830-3980-0037-1080: SO MUCH FOR THE TITLE OF THE EPISTLE NOW FOLLOWS THE GREETING OF THE APOSTLE VERSE THREE +2830-3980-0038-1081: GRACE BE TO YOU (AND->IN) PEACE FROM GOD THE FATHER AND FROM OUR LORD JESUS CHRIST +2830-3980-0039-1082: THE TERMS OF GRACE AND PEACE ARE COMMON TERMS WITH PAUL AND ARE NOW PRETTY WELL UNDERSTOOD +2830-3980-0040-1083: THE GREETING OF THE APOSTLE IS REFRESHING +2830-3980-0041-1084: GRACE INVOLVES THE REMISSION OF SINS PEACE AND A HAPPY CONSCIENCE +2830-3980-0042-1085: THE WORLD (BRANDS->BRAINS) THIS A PERNICIOUS DOCTRINE +2830-3980-0043-1086: EXPERIENCE PROVES THIS +2830-3980-0044-1087: HOWEVER THE GRACE AND PEACE OF GOD WILL +2830-3980-0045-1088: MEN SHOULD NOT SPECULATE ABOUT THE NATURE OF GOD +2830-3980-0046-1089: WAS IT NOT ENOUGH TO SAY FROM GOD THE FATHER +2830-3980-0047-1090: TO DO SO IS TO LOSE GOD ALTOGETHER BECAUSE GOD BECOMES INTOLERABLE WHEN WE SEEK TO MEASURE AND TO COMPREHEND HIS INFINITE MAJESTY +2830-3980-0048-1091: HE CAME DOWN TO EARTH LIVED AMONG MEN SUFFERED WAS CRUCIFIED AND THEN HE DIED STANDING CLEARLY BEFORE US SO THAT OUR HEARTS AND EYES MAY FASTEN UPON HIM +2830-3980-0049-1092: EMBRACE HIM AND FORGET ABOUT THE NATURE OF GOD +2830-3980-0050-1093: DID NOT CHRIST HIMSELF SAY I AM THE WAY AND THE TRUTH AND THE LIFE NO MAN COMETH UNTO THE FATHER BUT BY ME +2830-3980-0051-1094: WHEN YOU ARGUE ABOUT THE NATURE OF GOD APART FROM THE QUESTION OF JUSTIFICATION YOU MAY BE AS PROFOUND AS YOU LIKE +2830-3980-0052-1095: WE ARE TO HEAR CHRIST WHO HAS BEEN APPOINTED BY THE FATHER AS OUR DIVINE TEACHER +2830-3980-0053-1096: AT THE SAME TIME PAUL CONFIRMS OUR CREED THAT CHRIST IS VERY GOD +2830-3980-0054-1097: THAT CHRIST IS VERY GOD IS APPARENT IN THAT PAUL ASCRIBES TO HIM DIVINE POWERS EQUALLY WITH THE FATHER AS FOR INSTANCE THE POWER (TO DISPENSE->DOES SPENCE) GRACE AND PEACE +2830-3980-0055-1098: TO BESTOW PEACE AND GRACE LIES IN THE PROVINCE OF GOD WHO ALONE CAN CREATE THESE BLESSINGS THE ANGELS CANNOT +2830-3980-0056-1099: OTHERWISE PAUL SHOULD HAVE WRITTEN GRACE FROM GOD THE FATHER (AND->IN) PEACE FROM OUR LORD JESUS CHRIST +2830-3980-0057-1100: THE ARIANS TOOK CHRIST FOR A NOBLE AND PERFECT CREATURE SUPERIOR EVEN TO THE ANGELS BECAUSE BY HIM GOD CREATED HEAVEN AND EARTH +2830-3980-0058-1101: MOHAMMED ALSO SPEAKS HIGHLY OF CHRIST +2830-3980-0059-1102: PAUL STICKS TO HIS THEME +2830-3980-0060-1103: HE NEVER LOSES SIGHT OF THE PURPOSE OF HIS EPISTLE +2830-3980-0061-1104: NOT GOLD OR SILVER OR (PASCHAL->PASSIONAL) LAMBS OR AN ANGEL BUT HIMSELF WHAT FOR +2830-3980-0062-1105: NOT FOR A CROWN OR A KINGDOM OR (OUR->A) GOODNESS (BUT FOR->BEFORE) OUR SINS +2830-3980-0063-1106: UNDERSCORE THESE WORDS FOR THEY ARE FULL OF COMFORT FOR SORE CONSCIENCES +2830-3980-0064-1107: HOW MAY WE OBTAIN REMISSION OF OUR SINS +2830-3980-0065-1108: PAUL ANSWERS THE MAN WHO IS NAMED JESUS CHRIST AND THE SON OF GOD GAVE HIMSELF FOR OUR SINS +2830-3980-0066-1109: SINCE CHRIST WAS GIVEN FOR OUR SINS IT STANDS TO REASON THAT THEY CANNOT BE PUT AWAY BY OUR OWN EFFORTS +2830-3980-0067-1110: THIS SENTENCE ALSO DEFINES OUR SINS AS GREAT SO GREAT IN FACT THAT THE WHOLE WORLD COULD NOT MAKE AMENDS FOR A SINGLE SIN +2830-3980-0068-1111: THE GREATNESS OF THE RANSOM CHRIST THE SON OF GOD INDICATES THIS +2830-3980-0069-1112: THE VICIOUS CHARACTER OF SIN IS BROUGHT OUT BY THE WORDS WHO GAVE HIMSELF FOR OUR SINS +2830-3980-0070-1113: BUT WE ARE CARELESS WE MAKE LIGHT OF SIN +2830-3980-0071-1114: WE THINK THAT BY SOME LITTLE WORK OR MERIT WE CAN DISMISS (SIN->IN) +2830-3980-0072-1115: THIS PASSAGE THEN BEARS OUT THE FACT THAT ALL MEN ARE SOLD UNDER SIN +2830-3980-0073-1116: THIS ATTITUDE SPRINGS FROM A FALSE CONCEPTION OF SIN THE CONCEPTION THAT SIN IS A SMALL MATTER EASILY TAKEN CARE OF BY GOOD WORKS THAT WE MUST PRESENT OURSELVES (UNTO->INTO) GOD WITH A GOOD CONSCIENCE THAT WE MUST FEEL NO SIN BEFORE WE MAY FEEL THAT CHRIST WAS GIVEN FOR OUR SINS +2830-3980-0074-1117: THIS ATTITUDE IS UNIVERSAL AND PARTICULARLY DEVELOPED IN THOSE WHO CONSIDER THEMSELVES BETTER THAN OTHERS +2830-3980-0075-1118: BUT THE REAL SIGNIFICANCE AND COMFORT OF THE WORDS FOR OUR SINS IS LOST UPON THEM +2830-3980-0076-1119: ON THE OTHER HAND WE ARE NOT TO REGARD THEM AS SO TERRIBLE THAT WE MUST DESPAIR +2961-960-0000-497: HE PASSES ABRUPTLY FROM PERSONS TO IDEAS AND NUMBERS AND FROM IDEAS AND NUMBERS TO PERSONS FROM THE HEAVENS TO (MAN->MEN) FROM ASTRONOMY TO PHYSIOLOGY HE CONFUSES OR RATHER DOES NOT DISTINGUISH SUBJECT AND OBJECT FIRST AND FINAL CAUSES AND IS DREAMING OF GEOMETRICAL FIGURES LOST IN A FLUX OF SENSE +2961-960-0001-498: THE INFLUENCE (WITH->WHICH) THE (TIMAEUS->TIMAS) HAS EXERCISED UPON POSTERITY IS DUE PARTLY TO A MISUNDERSTANDING +2961-960-0002-499: IN THE SUPPOSED DEPTHS OF THIS DIALOGUE THE NEO (PLATONISTS->PLATINISTS) FOUND HIDDEN MEANINGS (AND->IN) CONNECTIONS WITH THE JEWISH AND CHRISTIAN SCRIPTURES AND OUT OF THEM THEY ELICITED DOCTRINES QUITE AT VARIANCE WITH THE SPIRIT OF PLATO +2961-960-0003-500: THEY WERE ABSORBED IN HIS THEOLOGY AND WERE UNDER THE DOMINION OF HIS NAME WHILE THAT WHICH WAS TRULY GREAT AND TRULY (CHARACTERISTIC->CORRECTURISTIC) IN HIM HIS EFFORT TO REALIZE AND CONNECT ABSTRACTIONS WAS NOT UNDERSTOOD BY THEM AT ALL +2961-960-0004-501: THERE IS NO DANGER OF THE MODERN COMMENTATORS ON THE (TIMAEUS->TIMEUS) FALLING INTO THE ABSURDITIES OF THE (NEO PLATONISTS->NEOPLATANISTS) +2961-960-0005-502: IN THE PRESENT DAY WE ARE WELL AWARE THAT AN ANCIENT PHILOSOPHER IS TO BE INTERPRETED FROM HIMSELF AND BY THE CONTEMPORARY HISTORY OF THOUGHT +2961-960-0006-503: THE FANCIES OF THE (NEO PLATONISTS->NEW PLATINISTS) ARE ONLY INTERESTING TO US BECAUSE THEY EXHIBIT A PHASE OF THE HUMAN MIND WHICH (PREVAILED->PREVAIL) WIDELY IN THE FIRST CENTURIES OF THE CHRISTIAN ERA AND IS NOT WHOLLY EXTINCT IN OUR OWN DAY +2961-960-0007-504: BUT THEY HAVE NOTHING TO DO WITH THE INTERPRETATION OF PLATO AND IN SPIRIT THEY ARE OPPOSED TO HIM +2961-960-0008-505: WE DO NOT KNOW HOW PLATO WOULD HAVE ARRANGED HIS OWN (DIALOGUES->DIALECTS) OR WHETHER THE THOUGHT OF ARRANGING ANY OF THEM BESIDES THE (TWO TRILOGIES->TUTRILOGIES) WHICH HE HAS EXPRESSLY CONNECTED WAS EVER PRESENT TO HIS MIND +2961-960-0009-506: THE DIALOGUE IS PRIMARILY CONCERNED WITH THE ANIMAL CREATION INCLUDING UNDER THIS TERM THE HEAVENLY BODIES AND WITH MAN ONLY AS ONE AMONG THE ANIMALS +2961-960-0010-507: BUT HE HAS NOT AS YET (DEFINED->THE FIND) THIS INTERMEDIATE TERRITORY WHICH LIES SOMEWHERE BETWEEN MEDICINE AND MATHEMATICS AND HE WOULD HAVE FELT THAT THERE WAS AS GREAT AN IMPIETY IN RANKING THEORIES OF PHYSICS FIRST IN THE ORDER OF KNOWLEDGE AS IN PLACING THE BODY BEFORE THE SOUL +2961-960-0011-508: WITH (HERACLEITUS->HERACLITUS) HE ACKNOWLEDGES THE PERPETUAL FLUX LIKE (ANAXAGORAS->AN EXAGGARIST) HE ASSERTS THE PREDOMINANCE OF MIND ALTHOUGH ADMITTING AN ELEMENT OF NECESSITY WHICH REASON IS INCAPABLE OF SUBDUING LIKE THE (PYTHAGOREANS->PITHAGORIANS) HE SUPPOSES THE MYSTERY OF THE WORLD TO BE CONTAINED IN NUMBER +2961-960-0012-509: MANY IF NOT ALL THE ELEMENTS OF THE (PRE SOCRATIC->PRESOCRATIC) PHILOSOPHY ARE INCLUDED IN THE (TIMAEUS->TIMAS) +2961-960-0013-510: IT IS PROBABLE THAT THE RELATION OF THE IDEAS TO GOD OR OF GOD TO THE WORLD WAS DIFFERENTLY CONCEIVED BY HIM AT DIFFERENT TIMES OF HIS LIFE +2961-960-0014-511: THE IDEAS ALSO REMAIN BUT THEY HAVE BECOME TYPES IN NATURE FORMS OF MEN ANIMALS BIRDS FISHES +2961-960-0015-512: THE STYLE AND PLAN OF THE (TIMAEUS->TIMIRS) DIFFER GREATLY FROM THAT OF ANY OTHER OF THE PLATONIC DIALOGUES +2961-960-0016-513: BUT PLATO HAS NOT THE SAME (MASTERY->MYSTERY) OVER HIS INSTRUMENT WHICH HE EXHIBITS IN THE (PHAEDRUS->FEGERIS) OR (SYMPOSIUM->SUPPOSIUM) +2961-960-0017-514: NOTHING CAN EXCEED THE BEAUTY OR ART OF (THE->*) INTRODUCTION IN WHICH (HE IS->HIS) USING WORDS AFTER HIS ACCUSTOMED MANNER +2961-960-0018-515: BUT IN THE REST OF THE WORK THE POWER OF LANGUAGE SEEMS TO FAIL HIM AND THE DRAMATIC FORM IS WHOLLY GIVEN UP +2961-960-0019-516: HE COULD WRITE IN ONE STYLE BUT NOT IN ANOTHER AND THE GREEK LANGUAGE HAD NOT AS YET BEEN FASHIONED BY ANY POET OR PHILOSOPHER TO DESCRIBE PHYSICAL PHENOMENA +2961-960-0020-517: AND HENCE WE FIND THE SAME SORT OF CLUMSINESS IN THE (TIMAEUS->TIMAS) OF PLATO WHICH CHARACTERIZES THE PHILOSOPHICAL POEM OF LUCRETIUS +2961-960-0021-518: THERE IS A WANT OF FLOW AND OFTEN A DEFECT OF RHYTHM THE MEANING IS SOMETIMES OBSCURE AND THERE IS A GREATER USE OF APPOSITION (AND->IN) MORE OF REPETITION THAN OCCURS IN PLATO'S EARLIER WRITINGS +2961-960-0022-519: PLATO HAD NOT THE COMMAND OF HIS MATERIALS WHICH WOULD HAVE ENABLED HIM TO PRODUCE A PERFECT WORK OF ART +2961-961-0000-520: SOCRATES BEGINS (THE TIMAEUS->TO TIMAS) WITH A SUMMARY OF THE REPUBLIC +2961-961-0001-521: AND NOW HE DESIRES TO SEE THE IDEAL STATE SET IN MOTION HE WOULD LIKE TO KNOW HOW SHE BEHAVED IN SOME GREAT STRUGGLE +2961-961-0002-522: AND THEREFORE TO YOU I TURN (TIMAEUS->TO ME AS) CITIZEN OF LOCRIS WHO ARE AT ONCE A PHILOSOPHER (AND->IN) A STATESMAN AND TO YOU (CRITIAS->CRITIUS) WHOM ALL ATHENIANS KNOW TO BE SIMILARLY ACCOMPLISHED AND TO HERMOCRATES (WHO IS->WHOSE) ALSO FITTED BY NATURE AND EDUCATION TO SHARE IN OUR DISCOURSE +2961-961-0003-523: I WILL IF (TIMAEUS->TO ME AS) APPROVES I APPROVE +2961-961-0004-524: LISTEN THEN SOCRATES TO A TALE OF (SOLON'S->SILENCE) WHO BEING THE FRIEND OF (DROPIDAS MY->DROPIDUS BY) GREAT GRANDFATHER TOLD IT TO MY GRANDFATHER (CRITIAS->CRITIUS) AND HE TOLD ME +2961-961-0005-525: SOME POEMS OF (SOLON->SOLID) WERE RECITED BY THE BOYS +2961-961-0006-526: AND WHAT WAS THE SUBJECT OF THE POEM SAID THE PERSON WHO MADE THE REMARK +2961-961-0007-527: THE SUBJECT WAS A VERY NOBLE ONE HE DESCRIBED THE MOST FAMOUS ACTION IN WHICH THE ATHENIAN PEOPLE WERE EVER ENGAGED +2961-961-0008-528: BUT THE MEMORY OF THEIR EXPLOITS (HAS->HAD) PASSED AWAY OWING TO THE LAPSE OF TIME AND THE EXTINCTION OF THE ACTORS +2961-961-0009-529: TELL US SAID THE OTHER THE WHOLE STORY AND WHERE SOLON HEARD (THE->THIS) STORY +2961-961-0010-530: BUT IN EGYPT THE TRADITIONS OF OUR OWN AND OTHER LANDS ARE BY US REGISTERED (FOR EVER->FOREVER) IN OUR TEMPLES +2961-961-0011-531: THE GENEALOGIES WHICH YOU HAVE RECITED TO US OUT OF YOUR OWN (ANNALS SOLON->ANNAL SOLEMN) ARE A MERE CHILDREN'S STORY +2961-961-0012-532: FOR IN THE TIMES BEFORE THE GREAT FLOOD ATHENS WAS THE GREATEST AND BEST OF CITIES AND (DID->DEAD) THE NOBLEST DEEDS AND HAD THE BEST CONSTITUTION OF ANY UNDER THE FACE OF HEAVEN +2961-961-0013-533: (SOLON->SULLEN) MARVELLED AND DESIRED TO BE INFORMED OF THE PARTICULARS +2961-961-0014-534: NINE THOUSAND YEARS HAVE ELAPSED SINCE (SHE FOUNDED->YOU FOUND IT) YOURS AND EIGHT THOUSAND SINCE (SHE FOUNDED->YOU FOUND IT) OURS AS OUR ANNALS RECORD +2961-961-0015-535: MANY LAWS EXIST AMONG US WHICH ARE THE COUNTERPART OF YOURS AS THEY WERE IN THE OLDEN TIME +2961-961-0016-536: I WILL BRIEFLY DESCRIBE THEM TO YOU AND YOU SHALL READ THE ACCOUNT OF THEM AT YOUR LEISURE IN THE SACRED REGISTERS +2961-961-0017-537: OBSERVE AGAIN WHAT CARE THE LAW TOOK IN THE PURSUIT OF WISDOM SEARCHING OUT THE DEEP THINGS OF THE WORLD AND APPLYING THEM TO THE USE OF (MAN->MEN) +2961-961-0018-538: THE MOST (FAMOUS->FAME AS) OF THEM ALL WAS THE OVERTHROW OF THE ISLAND OF ATLANTIS +2961-961-0019-539: FOR AT THE PERIL OF HER OWN EXISTENCE AND WHEN THE (OTHER->OTTER) HELLENES HAD DESERTED HER SHE REPELLED (THE->*) INVADER AND OF HER OWN ACCORD GAVE LIBERTY TO ALL THE NATIONS WITHIN THE PILLARS +2961-961-0020-540: THIS IS THE EXPLANATION OF THE SHALLOWS WHICH ARE FOUND IN THAT PART OF THE ATLANTIC OCEAN +2961-961-0021-541: BUT I WOULD NOT SPEAK AT THE TIME BECAUSE I WANTED TO REFRESH MY MEMORY +2961-961-0022-542: THEN (NOW->THOU) LET ME EXPLAIN TO YOU THE ORDER OF OUR ENTERTAINMENT FIRST (TIMAEUS->TIMAS) WHO IS A NATURAL PHILOSOPHER WILL SPEAK OF THE ORIGIN OF THE WORLD GOING DOWN TO THE CREATION OF (MAN->MEN) AND THEN I SHALL RECEIVE THE MEN WHOM HE HAS CREATED AND SOME OF WHOM WILL HAVE BEEN EDUCATED BY YOU AND INTRODUCE THEM TO YOU AS THE LOST ATHENIAN CITIZENS OF WHOM THE EGYPTIAN (RECORD->RECORDS) SPOKE +3570-5694-0000-2433: (BUT->BETTER) ALREADY AT A POINT IN ECONOMIC EVOLUTION FAR (ANTEDATING->ANTIDATING) THE EMERGENCE OF THE LADY (SPECIALISED->SPECIALIZED) CONSUMPTION OF GOODS AS AN EVIDENCE OF PECUNIARY STRENGTH HAD BEGUN TO WORK OUT IN A MORE OR LESS (ELABORATE->CELEBRATE) SYSTEM +3570-5694-0001-2434: (THE UTILITY->THEATILITY) OF CONSUMPTION AS AN EVIDENCE OF WEALTH IS TO BE CLASSED AS A DERIVATIVE GROWTH +3570-5694-0002-2435: SUCH CONSUMPTION AS FALLS (TO->THROUGH) THE WOMEN IS MERELY INCIDENTAL TO THEIR WORK IT IS A MEANS TO THEIR CONTINUED (LABOUR->LABOR) AND NOT (A->TO) CONSUMPTION DIRECTED TO THEIR OWN COMFORT AND (FULNESS->FULLNESS) OF LIFE +3570-5694-0003-2436: WITH A FURTHER ADVANCE IN CULTURE THIS (TABU->TABOO) MAY (CHANGE->CHANGED) INTO SIMPLE CUSTOM OF A MORE OR LESS RIGOROUS CHARACTER BUT WHATEVER BE THE THEORETICAL BASIS OF THE DISTINCTION WHICH IS MAINTAINED WHETHER IT BE (*->AT) A (TABU->BOO) OR A LARGER CONVENTIONALITY THE FEATURES OF THE CONVENTIONAL SCHEME OF CONSUMPTION DO NOT CHANGE EASILY +3570-5694-0004-2437: IN THE NATURE OF THINGS LUXURIES AND THE COMFORTS OF LIFE BELONG TO THE LEISURE CLASS +3570-5694-0005-2438: UNDER THE (TABU->TABOO) CERTAIN VICTUALS AND MORE PARTICULARLY CERTAIN BEVERAGES ARE STRICTLY RESERVED FOR THE USE OF THE SUPERIOR CLASS +3570-5694-0006-2439: (DRUNKENNESS->DRINKENNESS) AND THE OTHER (PATHOLOGICAL->PETHOLOGICAL) CONSEQUENCES OF THE (FREE USE->FREWS) OF STIMULANTS THEREFORE TEND IN THEIR TURN TO BECOME (HONORIFIC->UNERRIFIC) AS BEING A MARK AT THE SECOND REMOVE OF THE SUPERIOR STATUS OF THOSE WHO ARE ABLE TO AFFORD THE INDULGENCE +3570-5694-0007-2440: IT HAS EVEN HAPPENED THAT THE NAME FOR CERTAIN DISEASED CONDITIONS OF THE BODY ARISING FROM SUCH AN ORIGIN HAS PASSED INTO EVERYDAY SPEECH AS A SYNONYM FOR NOBLE OR GENTLE +3570-5694-0008-2441: THE CONSUMPTION OF LUXURIES IN THE TRUE SENSE IS A CONSUMPTION DIRECTED TO THE COMFORT OF THE CONSUMER HIMSELF AND IS THEREFORE A MARK OF THE MASTER +3570-5694-0009-2442: WITH MANY QUALIFICATIONS WITH MORE QUALIFICATIONS AS THE PATRIARCHAL TRADITION HAS GRADUALLY WEAKENED THE GENERAL RULE IS FELT TO BE RIGHT AND BINDING THAT WOMEN SHOULD CONSUME ONLY FOR THE BENEFIT OF THEIR MASTERS +3570-5694-0010-2443: THE OBJECTION OF COURSE PRESENTS ITSELF THAT EXPENDITURE ON WOMEN'S DRESS AND HOUSEHOLD PARAPHERNALIA IS AN OBVIOUS EXCEPTION TO THIS RULE BUT IT WILL APPEAR IN THE SEQUEL THAT THIS EXCEPTION IS MUCH MORE OBVIOUS THAN SUBSTANTIAL +3570-5694-0011-2444: THE CUSTOM OF FESTIVE GATHERINGS PROBABLY ORIGINATED IN MOTIVES OF CONVIVIALITY AND RELIGION THESE MOTIVES ARE ALSO PRESENT IN THE LATER DEVELOPMENT BUT THEY DO NOT CONTINUE TO BE THE SOLE MOTIVES +3570-5694-0012-2445: THERE IS A MORE OR LESS ELABORATE SYSTEM OF RANK AND (GRADES->GRATES) +3570-5694-0013-2446: THIS DIFFERENTIATION IS FURTHERED BY THE INHERITANCE OF WEALTH AND THE CONSEQUENT INHERITANCE OF GENTILITY +3570-5694-0014-2447: MANY OF THESE (AFFILIATED->HAVE FILLIOTTED) GENTLEMEN OF LEISURE ARE AT THE SAME TIME (LESSER MEN->LESS AMEN) OF SUBSTANCE IN THEIR OWN RIGHT SO THAT SOME OF THEM ARE SCARCELY AT ALL OTHERS ONLY PARTIALLY TO BE RATED AS VICARIOUS CONSUMERS +3570-5694-0015-2448: SO MANY OF THEM HOWEVER AS MAKE UP THE RETAINER AND HANGERS ON OF THE PATRON MAY BE CLASSED AS VICARIOUS CONSUMER WITHOUT QUALIFICATION +3570-5694-0016-2449: MANY OF THESE AGAIN AND ALSO MANY OF THE OTHER (*->ARE) ARISTOCRACY OF LESS DEGREE HAVE IN TURN ATTACHED TO THEIR PERSONS A MORE OR LESS COMPREHENSIVE GROUP OF VICARIOUS CONSUMER IN THE PERSONS OF THEIR WIVES AND CHILDREN THEIR SERVANTS RETAINERS ET CETERA +3570-5694-0017-2450: THE WEARING OF UNIFORMS (OR->A) LIVERIES IMPLIES A CONSIDERABLE DEGREE OF DEPENDENCE AND MAY EVEN BE SAID TO BE A MARK OF SERVITUDE REAL OR OSTENSIBLE +3570-5694-0018-2451: THE WEARERS OF UNIFORMS AND LIVERIES MAY BE ROUGHLY DIVIDED INTO TWO CLASSES THE FREE AND THE SERVILE OR THE NOBLE AND THE IGNOBLE +3570-5694-0019-2452: BUT THE GENERAL DISTINCTION IS NOT ON THAT ACCOUNT TO BE OVERLOOKED +3570-5694-0020-2453: SO THOSE (OFFICES->OFFICERS) WHICH ARE BY RIGHT THE PROPER EMPLOYMENT OF THE LEISURE CLASS ARE NOBLE SUCH AS GOVERNMENT FIGHTING HUNTING THE CARE OF ARMS AND ACCOUTREMENTS AND THE LIKE IN SHORT THOSE WHICH MAY BE CLASSED AS OSTENSIBLY PREDATORY EMPLOYMENTS +3570-5694-0021-2454: WHENEVER AS IN THESE CASES THE MENIAL SERVICE IN QUESTION HAS TO DO DIRECTLY WITH (THE->A) PRIMARY LEISURE EMPLOYMENTS OF FIGHTING AND HUNTING IT EASILY ACQUIRES A REFLECTED HONORIFIC CHARACTER +3570-5694-0022-2455: THE LIVERY BECOMES OBNOXIOUS TO NEARLY ALL WHO ARE REQUIRED TO WEAR IT +3570-5695-0000-2456: (IN->AND) A GENERAL WAY THOUGH NOT WHOLLY NOR CONSISTENTLY THESE TWO GROUPS COINCIDE +3570-5695-0001-2457: THE DEPENDENT WHO WAS FIRST DELEGATED FOR THESE DUTIES WAS THE WIFE OR THE CHIEF WIFE AND AS WOULD BE EXPECTED IN THE LATER DEVELOPMENT OF THE INSTITUTION WHEN THE NUMBER OF PERSONS BY WHOM THESE DUTIES ARE (CUSTOMARILY->CUSTOMARY) PERFORMED (GRADUALLY NARROWS->GRADUAL AND ARROWS) THE WIFE REMAINS THE LAST +3570-5695-0002-2458: BUT AS WE DESCEND THE SOCIAL SCALE THE POINT IS PRESENTLY REACHED WHERE THE DUTIES OF (VICARIOUS->YARE'S) LEISURE AND CONSUMPTION DEVOLVE UPON THE WIFE ALONE +3570-5695-0003-2459: IN THE COMMUNITIES OF THE WESTERN CULTURE THIS POINT IS AT PRESENT FOUND AMONG THE LOWER MIDDLE CLASS +3570-5695-0004-2460: IF BEAUTY OR COMFORT IS ACHIEVED AND IT IS A MORE OR LESS FORTUITOUS CIRCUMSTANCE IF THEY ARE THEY MUST BE ACHIEVED BY MEANS AND METHODS THAT COMMEND THEMSELVES TO THE GREAT ECONOMIC LAW OF WASTED EFFORT +3570-5695-0005-2461: THE MAN OF THE HOUSEHOLD ALSO CAN DO SOMETHING IN THIS DIRECTION AND INDEED HE COMMONLY DOES BUT WITH A STILL LOWER (DESCENT->DISSENT) INTO THE LEVELS OF INDIGENCE ALONG THE MARGIN OF THE SLUMS THE MAN AND PRESENTLY ALSO THE CHILDREN VIRTUALLY (CEASE->SEIZED) TO CONSUME VALUABLE GOODS FOR APPEARANCES AND THE WOMAN REMAINS VIRTUALLY THE SOLE EXPONENT OF THE HOUSEHOLD'S PECUNIARY DECENCY +3570-5695-0006-2462: VERY MUCH OF SQUALOR AND DISCOMFORT WILL BE ENDURED BEFORE THE LAST TRINKET OR THE LAST (PRETENSE->PRETENCE) OF PECUNIARY (DECENCY IS->DECENCIES) PUT AWAY +3570-5695-0007-2463: THERE IS NO CLASS AND NO COUNTRY THAT HAS YIELDED SO (ABJECTLY->OBJECTLY) BEFORE THE PRESSURE OF PHYSICAL WANT AS TO DENY THEMSELVES ALL GRATIFICATION OF THIS HIGHER OR SPIRITUAL NEED +3570-5695-0008-2464: THE QUESTION IS WHICH OF THE TWO METHODS WILL MOST EFFECTIVELY REACH THE PERSONS WHOSE CONVICTIONS IT IS DESIRED TO (AFFECT->EFFECT) +3570-5695-0009-2465: EACH WILL THEREFORE SERVE ABOUT EQUALLY WELL DURING THE EARLIER STAGES OF SOCIAL GROWTH +3570-5695-0010-2466: THE MODERN ORGANIZATION OF INDUSTRY WORKS IN THE SAME DIRECTION ALSO BY ANOTHER LINE +3570-5695-0011-2467: IT IS EVIDENT THEREFORE THAT THE PRESENT TREND OF THE DEVELOPMENT IS IN THE DIRECTION OF HEIGHTENING THE UTILITY OF CONSPICUOUS CONSUMPTION AS COMPARED WITH LEISURE +3570-5695-0012-2468: IT IS ALSO NOTICEABLE THAT THE (SERVICEABILITY->SURFABILITY) OF CONSUMPTION AS A MEANS OF REPUTE AS WELL AS THE INSISTENCE ON IT AS AN ELEMENT OF DECENCY IS AT ITS BEST IN THOSE PORTIONS OF THE COMMUNITY WHERE THE HUMAN CONTACT OF THE INDIVIDUAL IS WIDEST AND THE MOBILITY OF THE POPULATION IS GREATEST +3570-5695-0013-2469: CONSUMPTION BECOMES A LARGER ELEMENT IN THE STANDARD OF LIVING IN THE CITY THAN IN THE COUNTRY +3570-5695-0014-2470: AMONG THE COUNTRY POPULATION ITS (PLACE IS->PLACES) TO SOME EXTENT TAKEN BY SAVINGS AND HOME COMFORTS KNOWN THROUGH THE MEDIUM OF (NEIGHBORHOOD GOSSIP->NEIGHBOURHOOD GOSSIPS) SUFFICIENTLY TO SERVE THE LIKE GENERAL PURPOSE OF PECUNIARY REPUTE +3570-5695-0015-2471: THE RESULT IS A GREAT MOBILITY OF THE LABOR EMPLOYED IN PRINTING PERHAPS GREATER THAN IN ANY OTHER EQUALLY WELL DEFINED AND CONSIDERABLE BODY OF WORKMEN +3570-5696-0000-2472: UNDER THE SIMPLE TEST OF EFFECTIVENESS FOR ADVERTISING WE SHOULD EXPECT TO FIND LEISURE AND THE CONSPICUOUS CONSUMPTION OF GOODS DIVIDING THE FIELD OF PECUNIARY EMULATION PRETTY EVENLY BETWEEN THEM AT THE OUTSET +3570-5696-0001-2473: BUT THE ACTUAL COURSE OF DEVELOPMENT HAS BEEN SOMEWHAT DIFFERENT FROM THIS IDEAL SCHEME LEISURE HELD THE FIRST PLACE AT THE START AND CAME TO (HOLD A->ALL THE) RANK (VERY MUCH->VEREMENT) ABOVE WASTEFUL CONSUMPTION OF GOODS BOTH AS A DIRECT EXPONENT OF WEALTH AND AS AN ELEMENT IN THE STANDARD OF DECENCY DURING THE (QUASI->COURSE I) PEACEABLE CULTURE +3570-5696-0002-2474: (OTHER->ARE THE) CIRCUMSTANCES PERMITTING THAT INSTINCT DISPOSES MEN TO LOOK WITH (FAVOR->FAVOUR) UPON PRODUCTIVE EFFICIENCY AND ON WHATEVER IS OF HUMAN USE +3570-5696-0003-2475: (A->I) RECONCILIATION BETWEEN THE TWO CONFLICTING REQUIREMENTS IS (EFFECTED->AFFECTED) BY (A->*) RESORT TO MAKE BELIEVE (MANY->MEN) AND INTRICATE POLITE OBSERVANCES AND SOCIAL DUTIES OF A CEREMONIAL NATURE ARE DEVELOPED MANY ORGANIZATIONS ARE FOUNDED WITH SOME SPECIOUS OBJECT OF AMELIORATION EMBODIED IN THEIR OFFICIAL (STYLE AND->STYLANT) TITLE THERE IS MUCH COMING AND GOING AND A DEAL OF TALK TO THE END THAT THE (TALKERS->TALK IS) MAY NOT HAVE OCCASION TO REFLECT ON WHAT IS THE EFFECTUAL ECONOMIC VALUE OF THEIR TRAFFIC +3570-5696-0004-2476: THE (SALIENT->SAILORED) FEATURES OF THIS DEVELOPMENT OF DOMESTIC SERVICE HAVE ALREADY BEEN INDICATED +3570-5696-0005-2477: THROUGHOUT THE ENTIRE (EVOLUTION->REVOLUTION) OF CONSPICUOUS EXPENDITURE WHETHER OF GOODS OR OF SERVICES OR HUMAN LIFE RUNS THE OBVIOUS IMPLICATION THAT IN ORDER TO EFFECTUALLY MEND THE (CONSUMER'S->CONSUMERS) GOOD FAME IT MUST BE AN EXPENDITURE OF SUPERFLUITIES +3570-5696-0006-2478: AS USED IN THE SPEECH OF (EVERYDAY->EVERY DAY) LIFE THE WORD CARRIES AN UNDERTONE OF DEPRECATION +3570-5696-0007-2479: THE USE OF THE WORD WASTE AS A TECHNICAL TERM THEREFORE IMPLIES NO DEPRECATION OF THE MOTIVES OR OF THE ENDS SOUGHT BY THE CONSUMER UNDER THIS CANON OF CONSPICUOUS WASTE +3570-5696-0008-2480: BUT IT IS (ON OTHER GROUNDS WORTH->ANOTHER GROUND') NOTING THAT THE TERM WASTE IN THE LANGUAGE OF (EVERYDAY->EVERY DAY) LIFE IMPLIES DEPRECATION OF WHAT IS CHARACTERIZED AS WASTEFUL +3570-5696-0009-2481: IN STRICT ACCURACY NOTHING SHOULD BE INCLUDED UNDER THE HEAD OF CONSPICUOUS WASTE BUT SUCH EXPENDITURE AS IS INCURRED ON THE GROUND OF AN INVIDIOUS PECUNIARY COMPARISON +3570-5696-0010-2482: AN ARTICLE MAY BE USEFUL AND WASTEFUL BOTH AND ITS UTILITY TO THE CONSUMER MAY BE MADE UP OF USE AND WASTE IN THE MOST VARYING PROPORTIONS +3575-170457-0000-369: AND OFTEN HAS MY MOTHER SAID WHILE ON HER LAP I LAID MY HEAD SHE FEARED FOR TIME I WAS NOT MADE BUT FOR ETERNITY +3575-170457-0001-370: WHY ARE WE TO BE DENIED EACH OTHER'S SOCIETY +3575-170457-0002-371: WHY ARE WE TO BE DIVIDED +3575-170457-0003-372: SURELY IT MUST BE BECAUSE WE ARE IN DANGER OF LOVING EACH OTHER TOO WELL OF LOSING SIGHT OF THE CREATOR (IN->AND) IDOLATRY OF THE CREATURE +3575-170457-0004-373: WE USED TO DISPUTE ABOUT POLITICS AND RELIGION +3575-170457-0005-374: SHE (A TORY AND->ATTORIAN) CLERGYMAN'S DAUGHTER WAS ALWAYS IN A MINORITY OF ONE IN OUR HOUSE (OF->A) VIOLENT (DISSENT->DESCENT) AND RADICALISM +3575-170457-0006-375: HER FEEBLE HEALTH GAVE HER HER YIELDING MANNER FOR SHE COULD NEVER OPPOSE ANY ONE WITHOUT GATHERING UP ALL HER STRENGTH FOR THE STRUGGLE +3575-170457-0007-376: HE SPOKE FRENCH PERFECTLY I HAVE BEEN TOLD WHEN NEED WAS BUT DELIGHTED USUALLY IN TALKING THE BROADEST YORKSHIRE +3575-170457-0008-377: AND SO LIFE AND DEATH HAVE DISPERSED THE CIRCLE OF VIOLENT RADICALS AND DISSENTERS INTO WHICH TWENTY YEARS AGO THE LITTLE QUIET RESOLUTE CLERGYMAN'S DAUGHTER WAS RECEIVED AND BY WHOM SHE WAS TRULY LOVED AND (HONOURED->HONORED) +3575-170457-0009-378: JANUARY AND FEBRUARY OF EIGHTEEN THIRTY SEVEN HAD PASSED AWAY AND STILL THERE WAS NO REPLY FROM (SOUTHEY->SALVIE) +3575-170457-0010-379: I AM NOT DEPRECIATING IT WHEN I SAY THAT IN THESE TIMES IT IS NOT RARE +3575-170457-0011-380: BUT IT IS NOT WITH A VIEW TO DISTINCTION THAT YOU SHOULD CULTIVATE THIS TALENT IF YOU CONSULT YOUR OWN HAPPINESS +3575-170457-0012-381: YOU WILL SAY THAT A WOMAN HAS NO NEED OF SUCH A CAUTION THERE CAN BE NO PERIL IN IT FOR HER +3575-170457-0013-382: THE MORE SHE IS ENGAGED IN HER PROPER DUTIES THE LESS LEISURE WILL SHE HAVE FOR IT EVEN AS AN ACCOMPLISHMENT AND A RECREATION +3575-170457-0014-383: TO THOSE DUTIES YOU HAVE NOT YET BEEN CALLED AND WHEN YOU ARE YOU WILL BE LESS EAGER FOR CELEBRITY +3575-170457-0015-384: BUT DO NOT SUPPOSE THAT I DISPARAGE THE GIFT WHICH YOU POSSESS NOR THAT I WOULD DISCOURAGE YOU FROM EXERCISING IT I ONLY EXHORT YOU SO TO THINK OF IT AND SO TO USE IT AS TO RENDER IT CONDUCIVE TO YOUR OWN PERMANENT GOOD +3575-170457-0016-385: FAREWELL MADAM +3575-170457-0017-386: THOUGH I MAY BE BUT AN UNGRACIOUS ADVISER YOU WILL ALLOW ME THEREFORE TO SUBSCRIBE MYSELF WITH THE BEST WISHES FOR YOUR HAPPINESS HERE AND HEREAFTER YOUR TRUE FRIEND ROBERT (SOUTHEY->SELVIE) +3575-170457-0018-387: SIR MARCH SIXTEENTH +3575-170457-0019-388: I (HAD->HAVE) NOT VENTURED TO HOPE FOR SUCH A REPLY SO CONSIDERATE IN ITS TONE SO NOBLE IN ITS SPIRIT +3575-170457-0020-389: I KNOW THE FIRST LETTER I WROTE TO YOU WAS ALL SENSELESS TRASH FROM BEGINNING TO END BUT I AM NOT ALTOGETHER THE IDLE DREAMING BEING IT WOULD SEEM TO DENOTE +3575-170457-0021-390: I THOUGHT IT THEREFORE MY DUTY WHEN I LEFT SCHOOL TO BECOME A GOVERNESS +3575-170457-0022-391: IN THE EVENINGS I (CONFESS->CONFESSED) I DO THINK BUT I NEVER TROUBLE (ANY ONE->ANYONE) ELSE WITH MY THOUGHTS +3575-170457-0023-392: I CAREFULLY AVOID ANY APPEARANCE OF PREOCCUPATION AND (ECCENTRICITY->EXCENTRICITY) WHICH MIGHT LEAD THOSE I LIVE AMONGST TO SUSPECT THE NATURE OF MY PURSUITS +3575-170457-0024-393: I DON'T ALWAYS SUCCEED FOR SOMETIMES WHEN I'M TEACHING OR SEWING I WOULD RATHER BE READING (OR->A) WRITING BUT I (TRY->TRIED) TO DENY MYSELF AND MY FATHER'S APPROBATION AMPLY REWARDED ME FOR THE PRIVATION +3575-170457-0025-394: AGAIN I THANK YOU THIS INCIDENT I SUPPOSE WILL BE RENEWED NO MORE IF I LIVE TO BE AN OLD WOMAN I SHALL REMEMBER IT THIRTY YEARS HENCE AS A BRIGHT DREAM +3575-170457-0026-395: P S PRAY SIR EXCUSE ME FOR WRITING TO YOU A SECOND TIME I COULD NOT HELP WRITING PARTLY TO TELL YOU HOW THANKFUL I AM FOR YOUR KINDNESS AND PARTLY TO LET YOU KNOW THAT YOUR ADVICE SHALL NOT BE WASTED HOWEVER SORROWFULLY AND RELUCTANTLY IT MAY BE AT FIRST FOLLOWED C (B->*) +3575-170457-0027-396: I CANNOT DENY MYSELF THE GRATIFICATION OF INSERTING (SOUTHEY'S->SO THESE) REPLY +3575-170457-0028-397: (KESWICK->KEZWICK) MARCH TWENTY SECOND EIGHTEEN THIRTY SEVEN DEAR MADAM +3575-170457-0029-398: YOUR LETTER HAS GIVEN ME GREAT PLEASURE AND I SHOULD NOT FORGIVE MYSELF IF I DID NOT TELL YOU SO +3575-170457-0030-399: OF THIS SECOND LETTER ALSO SHE SPOKE AND TOLD ME THAT IT CONTAINED AN INVITATION FOR HER TO GO AND SEE THE POET IF EVER SHE VISITED THE LAKES +3575-170457-0031-400: ON AUGUST TWENTY SEVENTH EIGHTEEN THIRTY SEVEN SHE WRITES +3575-170457-0032-401: COME COME (I AM->I'M) GETTING REALLY TIRED OF YOUR ABSENCE +3575-170457-0033-402: SATURDAY AFTER SATURDAY COMES (ROUND->AROUND) AND I CAN HAVE NO HOPE OF HEARING YOUR KNOCK AT THE DOOR AND THEN BEING TOLD THAT (MISS E->MISSY) IS COME OH DEAR +3575-170457-0034-403: IN THIS MONOTONOUS LIFE OF (MINE->MIND) THAT WAS A PLEASANT EVENT +3575-170457-0035-404: I WISH (IT WOULD->YOU WERE) RECUR AGAIN BUT IT WILL TAKE TWO OR THREE INTERVIEWS BEFORE THE STIFFNESS THE ESTRANGEMENT OF THIS LONG SEPARATION WILL WEAR AWAY +3575-170457-0036-405: MY EYES (FILL WITH->FILLED) TEARS WHEN I CONTRAST THE BLISS OF SUCH A STATE BRIGHTENED BY HOPES OF THE FUTURE WITH THE MELANCHOLY STATE I NOW LIVE IN UNCERTAIN THAT I EVER FELT TRUE CONTRITION WANDERING IN THOUGHT (AND DEED->INDEED) LONGING FOR HOLINESS WHICH I SHALL NEVER NEVER OBTAIN SMITTEN AT TIMES TO THE HEART WITH THE CONVICTION THAT GHASTLY CALVINISTIC DOCTRINES ARE TRUE DARKENED IN SHORT BY THE VERY SHADOWS OF SPIRITUAL DEATH +3575-170457-0037-406: IF CHRISTIAN PERFECTION BE NECESSARY TO SALVATION I SHALL NEVER BE SAVED MY HEART IS A VERY (HOTBED->HOT BED) FOR SINFUL THOUGHTS AND WHEN I DECIDE ON AN ACTION I SCARCELY REMEMBER TO LOOK TO MY REDEEMER FOR (*->A) DIRECTION +3575-170457-0038-407: AND MEANTIME I KNOW THE GREATNESS OF JEHOVAH I ACKNOWLEDGE THE PERFECTION OF HIS WORD I ADORE THE PURITY OF THE CHRISTIAN FAITH MY THEORY IS RIGHT MY PRACTICE HORRIBLY WRONG +3575-170457-0039-408: THE (CHRISTMAS->CHRIST) HOLIDAYS CAME AND SHE AND ANNE RETURNED TO THE PARSONAGE AND TO THAT HAPPY HOME CIRCLE IN WHICH ALONE THEIR NATURES EXPANDED AMONGST ALL OTHER PEOPLE THEY SHRIVELLED UP MORE OR LESS +3575-170457-0040-409: INDEED THERE WERE ONLY ONE OR TWO STRANGERS WHO COULD BE ADMITTED AMONG THE SISTERS WITHOUT PRODUCING THE SAME RESULT +3575-170457-0041-410: SHE WAS GONE OUT INTO THE VILLAGE ON SOME ERRAND WHEN AS SHE WAS DESCENDING THE STEEP STREET HER FOOT SLIPPED ON THE ICE AND SHE FELL (IT->HE) WAS DARK AND NO ONE SAW HER MISCHANCE TILL AFTER A TIME HER GROANS ATTRACTED THE ATTENTION OF A PASSER BY +3575-170457-0042-411: UNFORTUNATELY THE FRACTURE COULD NOT BE SET TILL SIX O'CLOCK THE NEXT MORNING AS NO SURGEON WAS TO BE HAD BEFORE THAT TIME AND SHE NOW LIES AT (OUR->HER) HOUSE IN A VERY DOUBTFUL AND DANGEROUS STATE +3575-170457-0043-412: HOWEVER REMEMBERING WHAT YOU TOLD ME NAMELY THAT YOU HAD COMMENDED THE MATTER TO A HIGHER DECISION THAN OURS AND THAT YOU WERE RESOLVED TO SUBMIT WITH RESIGNATION TO THAT DECISION WHATEVER IT MIGHT BE I HOLD IT MY DUTY TO YIELD ALSO AND TO BE SILENT (IT->AND) MAY BE ALL FOR THE BEST +3575-170457-0044-413: AFTER THIS DISAPPOINTMENT I NEVER DARE RECKON WITH CERTAINTY ON THE ENJOYMENT OF A PLEASURE AGAIN IT SEEMS AS IF SOME FATALITY STOOD BETWEEN YOU AND ME +3575-170457-0045-414: I AM NOT GOOD ENOUGH FOR YOU AND YOU MUST BE KEPT FROM THE CONTAMINATION OF (TOO->TWO) INTIMATE SOCIETY +3575-170457-0046-415: A GOOD (NEIGHBOUR->NEIGHBOR) OF THE BRONTES A CLEVER INTELLIGENT YORKSHIRE WOMAN WHO KEEPS A (DRUGGIST'S->DRUGGIST) SHOP IN HAWORTH (AND->*) FROM HER OCCUPATION HER EXPERIENCE AND EXCELLENT SENSE HOLDS THE POSITION OF VILLAGE (DOCTRESS->DOCTRIS) AND NURSE AND AS SUCH HAS BEEN A FRIEND IN MANY A TIME OF TRIAL AND SICKNESS AND DEATH IN THE HOUSEHOLDS ROUND TOLD ME A CHARACTERISTIC LITTLE INCIDENT CONNECTED WITH TABBY'S FRACTURED LEG +3575-170457-0047-416: TABBY HAD LIVED WITH THEM FOR TEN OR TWELVE YEARS AND WAS AS CHARLOTTE EXPRESSED IT ONE OF THE FAMILY +3575-170457-0048-417: HE (REFUSED->REFUSE) AT FIRST TO LISTEN TO THE CAREFUL ADVICE IT WAS REPUGNANT TO HIS LIBERAL NATURE +3575-170457-0049-418: THIS DECISION WAS COMMUNICATED TO THE GIRLS +3575-170457-0050-419: TABBY HAD TENDED THEM IN THEIR CHILDHOOD THEY AND NONE OTHER SHOULD TEND HER IN HER INFIRMITY AND AGE +3575-170457-0051-420: AT TEA TIME THEY WERE SAD AND SILENT AND THE MEAL WENT AWAY UNTOUCHED BY ANY OF THE THREE +3575-170457-0052-421: SHE HAD ANOTHER WEIGHT ON HER MIND THIS CHRISTMAS +3575-170457-0053-422: BUT ANNE HAD BEGUN TO SUFFER JUST BEFORE THE HOLIDAYS AND CHARLOTTE WATCHED OVER HER YOUNGER SISTERS WITH (THE->A) JEALOUS VIGILANCE OF SOME WILD CREATURE THAT CHANGES HER VERY NATURE IF DANGER THREATENS HER YOUNG +3575-170457-0054-423: STUNG BY ANXIETY FOR THIS LITTLE SISTER SHE UPBRAIDED MISS W FOR HER FANCIED INDIFFERENCE TO (ANNE'S->AN) STATE OF HEALTH +3575-170457-0055-424: STILL HER HEART HAD RECEIVED A SHOCK IN THE PERCEPTION OF ANNE'S DELICACY AND ALL THESE HOLIDAYS SHE WATCHED OVER HER WITH THE LONGING FOND ANXIETY WHICH IS SO FULL OF SUDDEN PANGS OF FEAR +3575-170457-0056-425: I DOUBT WHETHER (BRANWELL->BROWNWELL) WAS MAINTAINING HIMSELF AT THIS TIME +3729-6852-0000-1660: TO CELEBRATE THE ARRIVAL OF HER SON (SILVIA->SYLVIA) GAVE A SPLENDID SUPPER TO WHICH SHE HAD INVITED ALL HER RELATIVES AND IT WAS A GOOD OPPORTUNITY FOR ME TO MAKE THEIR ACQUAINTANCE +3729-6852-0001-1661: WITHOUT SAYING IT POSITIVELY SHE MADE ME UNDERSTAND THAT BEING HERSELF AN ILLUSTRIOUS MEMBER OF THE REPUBLIC OF LETTERS SHE WAS WELL AWARE THAT SHE WAS SPEAKING TO AN INSECT +3729-6852-0002-1662: IN ORDER TO PLEASE HER I SPOKE TO HER OF THE (ABBE->ABBEY) CONTI AND I HAD OCCASION TO QUOTE TWO LINES OF THAT PROFOUND WRITER +3729-6852-0003-1663: (MADAM->MADAME) CORRECTED ME WITH A PATRONIZING AIR FOR MY PRONUNCIATION OF THE WORD (SCEVRA->SCAVER) WHICH MEANS DIVIDED SAYING THAT IT OUGHT TO BE PRONOUNCED (SCEURA->SKURA) AND SHE ADDED THAT I OUGHT TO BE VERY GLAD TO HAVE LEARNED SO MUCH ON THE FIRST DAY OF MY ARRIVAL IN PARIS TELLING ME THAT IT WOULD BE AN IMPORTANT DAY IN MY LIFE +3729-6852-0004-1664: HER FACE WAS AN ENIGMA FOR IT INSPIRED (EVERYONE->EVERY ONE) WITH THE WARMEST SYMPATHY AND YET IF YOU EXAMINED IT ATTENTIVELY THERE WAS NOT ONE BEAUTIFUL FEATURE SHE COULD NOT BE CALLED HANDSOME BUT NO ONE COULD HAVE THOUGHT HER UGLY +3729-6852-0005-1665: (SILVIA->SYLVIA) WAS THE ADORATION OF FRANCE AND HER TALENT WAS THE REAL SUPPORT OF ALL THE COMEDIES WHICH THE GREATEST AUTHORS WROTE FOR HER ESPECIALLY OF THE PLAYS OF MARIVAUX FOR WITHOUT HER HIS COMEDIES WOULD NEVER HAVE GONE TO (POSTERITY->PROSTERITY) +3729-6852-0006-1666: (SILVIA->SYLVIA) DID NOT THINK THAT HER GOOD CONDUCT WAS A MERIT FOR SHE KNEW THAT SHE WAS VIRTUOUS ONLY BECAUSE HER SELF LOVE COMPELLED HER TO BE SO AND SHE NEVER EXHIBITED ANY PRIDE OR ASSUMED ANY SUPERIORITY TOWARDS HER THEATRICAL SISTERS ALTHOUGH SATISFIED TO SHINE BY THEIR TALENT OR THEIR BEAUTY THEY CARED LITTLE ABOUT RENDERING THEMSELVES CONSPICUOUS BY THEIR VIRTUE +3729-6852-0007-1667: TWO YEARS BEFORE HER DEATH I SAW HER PERFORM THE CHARACTER OF MARIANNE IN THE COMEDY OF (MARIVAUX->MARAVO) AND IN SPITE OF HER AGE AND DECLINING HEALTH THE ILLUSION WAS COMPLETE +3729-6852-0008-1668: SHE WAS HONOURABLY BURIED IN THE CHURCH OF SAINT (SAUVEUR->SAVER) WITHOUT THE SLIGHTEST OPPOSITION FROM THE VENERABLE PRIEST WHO FAR FROM SHARING THE ANTI (CHRISTAIN->CHRISTIAN) INTOLERANCY OF THE CLERGY IN GENERAL SAID THAT HER PROFESSION AS AN ACTRESS HAD NOT HINDERED HER FROM BEING A GOOD CHRISTIAN AND THAT THE EARTH WAS (THE->A) COMMON MOTHER OF ALL HUMAN BEINGS AS JESUS CHRIST HAD BEEN THE SAVIOUR OF ALL MANKIND +3729-6852-0009-1669: YOU WILL FORGIVE ME DEAR READER IF I HAVE MADE YOU ATTEND THE FUNERAL OF (SILVIA->SYLVIA) TEN YEARS BEFORE HER DEATH BELIEVE ME I HAVE NO INTENTION OF PERFORMING A MIRACLE YOU MAY CONSOLE YOURSELF WITH THE IDEA THAT I SHALL SPARE YOU THAT UNPLEASANT TASK WHEN POOR (SILVIA->SYLVIA) DIES +3729-6852-0010-1670: I NEVER HAD ANY FAMILY +3729-6852-0011-1671: I HAD A NAME I BELIEVE IN MY YOUNG DAYS BUT I HAVE FORGOTTEN IT SINCE I HAVE BEEN IN SERVICE +3729-6852-0012-1672: I SHALL CALL YOU (ESPRIT->A SPREE) +3729-6852-0013-1673: YOU DO ME A GREAT (HONOUR->HONOR) +3729-6852-0014-1674: HERE GO AND GET ME CHANGE FOR A LOUIS I HAVE IT SIR +3729-6852-0015-1675: AT YOUR SERVICE SIR +3729-6852-0016-1676: MADAME (QUINSON->QUINCENT) BESIDES CAN ANSWER YOUR (ENQUIRIES->INQUIRIES) +3729-6852-0017-1677: I SEE A QUANTITY OF CHAIRS FOR HIRE AT THE RATE OF ONE (SOU->SOUS) MEN READING THE NEWSPAPER UNDER THE SHADE OF THE TREES GIRLS AND MEN BREAKFASTING EITHER ALONE OR IN COMPANY WAITERS WHO WERE RAPIDLY GOING UP AND DOWN A NARROW STAIRCASE HIDDEN UNDER THE FOLIAGE +3729-6852-0018-1678: I SIT DOWN AT A SMALL TABLE A WAITER COMES IMMEDIATELY TO (ENQUIRE->INQUIRE) MY WISHES +3729-6852-0019-1679: I TELL HIM TO GIVE ME SOME COFFEE IF IT IS GOOD +3729-6852-0020-1680: THEN TURNING TOWARDS ME HE SAYS THAT I LOOK LIKE A FOREIGNER AND WHEN I SAY THAT I AM AN ITALIAN HE BEGINS TO SPEAK TO ME OF THE (COURT OF->CORPS) THE CITY OF THE THEATRES AND AT LAST HE OFFERS TO ACCOMPANY ME EVERYWHERE +3729-6852-0021-1681: I THANK HIM AND TAKE MY LEAVE +3729-6852-0022-1682: I ADDRESS HIM IN ITALIAN AND HE ANSWERS VERY WITTILY BUT HIS WAY OF SPEAKING MAKES ME SMILE AND I TELL HIM WHY +3729-6852-0023-1683: MY REMARK PLEASES HIM BUT I SOON PROVE TO HIM THAT IT IS NOT THE RIGHT WAY TO SPEAK HOWEVER PERFECT MAY HAVE BEEN THE LANGUAGE OF THAT ANCIENT WRITER +3729-6852-0024-1684: I SEE A CROWD IN ONE CORNER OF THE GARDEN EVERYBODY STANDING STILL AND LOOKING UP +3729-6852-0025-1685: IS THERE NOT A MERIDIAN EVERYWHERE +3729-6852-0026-1686: YES BUT THE MERIDIAN OF THE PALAIS ROYAL IS THE MOST EXACT +3729-6852-0027-1687: THAT IS TRUE (BADAUDERIE->BAD DEALT GREE) +3729-6852-0028-1688: ALL THESE HONEST PERSONS ARE WAITING THEIR TURN TO GET THEIR SNUFF BOXES FILLED +3729-6852-0029-1689: IT IS SOLD EVERYWHERE BUT FOR THE LAST THREE WEEKS NOBODY WILL USE ANY SNUFF BUT (THAT->THAT'S) SOLD AT THE (CIVET->SAVE) CAT +3729-6852-0030-1690: IS IT BETTER THAN ANYWHERE ELSE +3729-6852-0031-1691: BUT HOW DID SHE MANAGE TO RENDER IT SO FASHIONABLE +3729-6852-0032-1692: SIMPLY BY STOPPING HER CARRIAGE TWO OR THREE TIMES BEFORE THE SHOP TO HAVE HER SNUFF BOX FILLED AND BY SAYING ALOUD TO THE YOUNG GIRL WHO HANDED BACK THE BOX THAT HER SNUFF WAS THE VERY BEST IN PARIS +3729-6852-0033-1693: YOU ARE NOW IN THE ONLY COUNTRY IN THE WORLD WHERE WIT CAN MAKE A FORTUNE BY SELLING EITHER A GENUINE OR A FALSE ARTICLE IN THE FIRST CASE IT RECEIVES THE WELCOME OF INTELLIGENT AND TALENTED PEOPLE AND IN THE SECOND FOOLS ARE ALWAYS READY TO REWARD IT FOR SILLINESS IS TRULY A CHARACTERISTIC OF THE PEOPLE HERE AND HOWEVER WONDERFUL IT MAY APPEAR SILLINESS IS THE DAUGHTER OF WIT +3729-6852-0034-1694: LET A MAN RUN AND EVERYBODY WILL RUN AFTER HIM THE CROWD WILL NOT STOP UNLESS THE MAN IS PROVED TO BE MAD BUT TO PROVE IT IS INDEED A DIFFICULT TASK BECAUSE WE HAVE A CROWD OF MEN WHO MAD FROM THEIR BIRTH ARE STILL CONSIDERED WISE +3729-6852-0035-1695: IT SEEMS TO ME I REPLIED THAT SUCH APPROVAL SUCH RATIFICATION OF THE OPINION EXPRESSED BY THE KING THE PRINCES OF THE BLOOD ET CETERA IS RATHER A PROOF OF THE AFFECTION FELT FOR THEM BY THE NATION FOR THE FRENCH CARRY THAT AFFECTION TO SUCH AN EXTENT THAT THEY (BELIEVE->BELIEVED) THEM INFALLIBLE +3729-6852-0036-1696: WHEN THE KING COMES TO PARIS EVERYBODY CALLS OUT VIVE LE (ROI->ROY) +3729-6852-0037-1697: SHE INTRODUCED ME TO ALL HER GUESTS AND GAVE ME SOME PARTICULARS RESPECTING EVERY ONE OF THEM +3729-6852-0038-1698: WHAT SIR I SAID TO HIM AM I FORTUNATE ENOUGH TO SEE YOU +3729-6852-0039-1699: HE HIMSELF RECITED THE SAME PASSAGE IN FRENCH AND POLITELY POINTED OUT THE PARTS IN WHICH HE THOUGHT THAT I HAD IMPROVED ON THE ORIGINAL +3729-6852-0040-1700: FOR THE FIRST DAY SIR I THINK THAT WHAT YOU HAVE DONE GIVES GREAT HOPES OF YOU AND WITHOUT ANY DOUBT YOU WILL MAKE RAPID PROGRESS +3729-6852-0041-1701: I BELIEVE IT SIR AND THAT IS WHAT I FEAR THEREFORE THE PRINCIPAL OBJECT OF MY VISIT HERE IS TO DEVOTE MYSELF ENTIRELY TO THE STUDY OF THE FRENCH LANGUAGE +3729-6852-0042-1702: I AM A VERY UNPLEASANT PUPIL ALWAYS ASKING QUESTIONS CURIOUS TROUBLESOME INSATIABLE AND EVEN SUPPOSING THAT I COULD MEET WITH THE TEACHER I REQUIRE I AM AFRAID I AM NOT RICH ENOUGH TO PAY HIM +3729-6852-0043-1703: I RESIDE IN THE (MARAIS RUE->MARA GRUE) DE (DOUZE PORTES->DUSPORT) +3729-6852-0044-1704: I WILL MAKE YOU TRANSLATE THEM INTO FRENCH AND YOU NEED NOT BE AFRAID OF MY FINDING YOU INSATIABLE +3729-6852-0045-1705: HE HAD A GOOD APPETITE (COULD TELL->COTEL) A GOOD STORY WITHOUT LAUGHING WAS CELEBRATED FOR HIS WITTY REPARTEES AND HIS SOCIABLE MANNERS BUT HE SPENT HIS LIFE AT HOME SELDOM GOING OUT AND SEEING HARDLY (ANYONE->ANY ONE) BECAUSE HE ALWAYS HAD A PIPE IN HIS MOUTH AND WAS SURROUNDED BY AT LEAST TWENTY CATS WITH WHICH HE WOULD AMUSE HIMSELF ALL DAY +3729-6852-0046-1706: HIS HOUSEKEEPER HAD THE MANAGEMENT OF EVERYTHING SHE NEVER ALLOWED HIM TO BE IN NEED OF ANYTHING AND SHE GAVE NO ACCOUNT OF HIS MONEY WHICH SHE KEPT ALTOGETHER BECAUSE HE NEVER ASKED HER TO RENDER ANY ACCOUNTS +4077-13751-0000-1258: ON THE SIXTH OF APRIL EIGHTEEN THIRTY THE CHURCH OF JESUS CHRIST OF LATTER DAY SAINTS WAS (FORMALLY->FORMERLY) ORGANIZED AND THUS TOOK ON A LEGAL EXISTENCE +4077-13751-0001-1259: ITS ORIGIN WAS SMALL A GERM AN INSIGNIFICANT SEED HARDLY TO BE THOUGHT OF AS LIKELY TO AROUSE OPPOSITION +4077-13751-0002-1260: INSTEAD OF BUT SIX REGULARLY AFFILIATED MEMBERS AND AT MOST TWO SCORE OF ADHERENTS THE ORGANIZATION NUMBERS (TODAY->TO DAY) MANY HUNDRED THOUSAND SOULS +4077-13751-0003-1261: IN PLACE OF A SINGLE HAMLET IN THE SMALLEST CORNER OF WHICH THE MEMBERS COULD HAVE CONGREGATED THERE NOW ARE ABOUT SEVENTY STAKES OF ZION AND ABOUT SEVEN HUNDRED ORGANIZED WARDS EACH WARD AND STAKE WITH ITS FULL COMPLEMENT OF OFFICERS AND PRIESTHOOD ORGANIZATIONS +4077-13751-0004-1262: THE (PRACTISE->PRACTICE) OF GATHERING ITS PROSELYTES INTO ONE PLACE PREVENTS THE (BUILDING->BILLING) UP AND STRENGTHENING OF FOREIGN BRANCHES AND INASMUCH AS EXTENSIVE AND STRONG ORGANIZATIONS ARE SELDOM MET WITH ABROAD VERY ERRONEOUS IDEAS EXIST CONCERNING THE STRENGTH OF THE CHURCH +4077-13751-0005-1263: NEVERTHELESS THE MUSTARD SEED AMONG THE SMALLEST OF ALL SEEDS (HAS ATTAINED->HESITATED) THE PROPORTIONS OF A TREE AND THE BIRDS OF THE AIR ARE NESTING IN ITS BRANCHES THE ACORN IS NOW (AN->IN) OAK OFFERING PROTECTION AND THE SWEETS OF SATISFACTION TO EVERY EARNEST PILGRIM JOURNEYING ITS WAY FOR TRUTH +4077-13751-0006-1264: THEIR EYES WERE FROM THE FIRST TURNED IN ANTICIPATION TOWARD THE EVENING SUN NOT MERELY THAT THE WORK OF (PROSELYTING->PROSELLING) SHOULD BE CARRIED ON IN THE WEST BUT THAT THE HEADQUARTERS OF THE CHURCH SHOULD BE (THERE->THEIR) ESTABLISHED +4077-13751-0007-1265: THE BOOK (OF->OR) MORMON HAD TAUGHT (THE->THAT) PEOPLE THE TRUE ORIGIN AND DESTINY OF THE AMERICAN INDIANS AND TOWARD THIS DARK SKINNED REMNANT OF A ONCE MIGHTY PEOPLE THE MISSIONARIES OF MORMONISM EARLY TURNED THEIR EYES AND WITH THEIR EYES WENT THEIR HEARTS AND THEIR HOPES +4077-13751-0008-1266: IT IS NOTABLE THAT THE INDIAN TRIBES HAVE GENERALLY REGARDED (THE->THEIR) RELIGION OF THE LATTER DAY SAINTS WITH FAVOR SEEING IN THE BOOK (OF->A) MORMON STRIKING AGREEMENT WITH THEIR OWN TRADITIONS +4077-13751-0009-1267: THE FIRST WELL ESTABLISHED SEAT OF THE CHURCH WAS IN THE PRETTY LITTLE TOWN OF (KIRTLAND->CURTLEND) OHIO ALMOST WITHIN SIGHT OF LAKE ERIE AND HERE SOON ROSE THE FIRST TEMPLE OF MODERN TIMES +4077-13751-0010-1268: TO THE FERVENT LATTER DAY SAINT A TEMPLE IS NOT SIMPLY A CHURCH BUILDING A HOUSE FOR RELIGIOUS ASSEMBLY +4077-13751-0011-1269: SOON THOUSANDS OF CONVERTS HAD RENTED OR PURCHASED HOMES IN MISSOURI INDEPENDENCE JACKSON COUNTY BEING THEIR CENTER BUT FROM THE FIRST THEY WERE UNPOPULAR AMONG THE (MISSOURIANS->MISSOURIENS) +4077-13751-0012-1270: THE LIEUTENANT GOVERNOR (LILBURN->LITTLE BURN) W (BOGGS->BOGS) AFTERWARD GOVERNOR WAS A PRONOUNCED MORMON HATER AND THROUGHOUT THE PERIOD OF THE TROUBLES HE (MANIFESTED->MANIFEST HIS) SYMPATHY WITH THE PERSECUTORS +4077-13751-0013-1271: THEIR SUFFERINGS HAVE NEVER YET BEEN FITLY CHRONICLED BY HUMAN SCRIBE +4077-13751-0014-1272: MAKING THEIR WAY ACROSS THE RIVER MOST OF THE REFUGEES FOUND SHELTER AMONG THE MORE HOSPITABLE PEOPLE OF CLAY COUNTY AND AFTERWARD ESTABLISHED THEMSELVES IN (CALDWELL->COLDWELL) COUNTY (THEREIN->THEY WERE IN) FOUNDING THE CITY OF FAR WEST +4077-13751-0015-1273: A SMALL SETTLEMENT HAD BEEN FOUNDED BY MORMON FAMILIES ON SHOAL CREEK AND HERE ON THE THIRTIETH OF OCTOBER EIGHTEEN THIRTY EIGHT A COMPANY OF TWO HUNDRED AND FORTY FELL UPON THE HAPLESS SETTLERS AND (BUTCHERED A->BUTCHER TO) SCORE +4077-13751-0016-1274: BE IT SAID TO THE HONOR OF SOME OF THE OFFICERS ENTRUSTED WITH (THE->A) TERRIBLE COMMISSION THAT WHEN THEY LEARNED ITS TRUE SIGNIFICANCE THEY RESIGNED THEIR AUTHORITY RATHER THAN HAVE ANYTHING TO DO WITH WHAT THEY DESIGNATED A COLD BLOODED BUTCHERY +4077-13751-0017-1275: OH WHAT A RECORD TO READ WHAT A PICTURE TO GAZE UPON HOW AWFUL THE FACT +4077-13751-0018-1276: AMERICAN (SCHOOL BOYS->SCHOOLBOYS) READ WITH EMOTIONS OF HORROR OF THE (ALBIGENSES->ALBIGENZAS) DRIVEN BEATEN AND KILLED WITH A (PAPAL->PEPPEL) LEGATE DIRECTING THE BUTCHERY AND OF THE (VAUDOIS->FAUDOIR) HUNTED AND HOUNDED LIKE BEASTS AS THE EFFECT OF A ROYAL DECREE AND THEY YET SHALL READ IN THE HISTORY OF THEIR OWN COUNTRY OF SCENES AS TERRIBLE AS THESE IN THE EXHIBITION OF INJUSTICE AND INHUMAN HATE +4077-13751-0019-1277: WHO BEGAN THE QUARREL WAS IT THE MORMONS +4077-13751-0020-1278: AS (A->THE) SAMPLE OF THE PRESS (COMMENTS->COMETS) AGAINST THE BRUTALITY OF THE (MISSOURIANS->MISSOURIANCE) I QUOTE A PARAGRAPH FROM THE QUINCY ARGUS MARCH SIXTEENTH EIGHTEEN THIRTY NINE +4077-13751-0021-1279: IT WILL BE OBSERVED THAT AN ORGANIZED MOB AIDED BY MANY OF THE CIVIL AND MILITARY OFFICERS OF MISSOURI WITH GOVERNOR (BOGGS->BOX) AT THEIR HEAD HAVE BEEN THE PROMINENT ACTORS IN THIS BUSINESS INCITED TOO IT APPEARS AGAINST THE MORMONS BY POLITICAL HATRED AND BY THE ADDITIONAL MOTIVES OF PLUNDER AND REVENGE +4077-13754-0000-1241: THE ARMY FOUND THE PEOPLE IN POVERTY AND LEFT THEM IN COMPARATIVE WEALTH +4077-13754-0001-1242: BUT A WORD FURTHER CONCERNING THE EXPEDITION IN GENERAL +4077-13754-0002-1243: IT WAS THROUGH (FLOYD'S->FLUD'S) ADVICE THAT (BUCHANAN ORDERED->BUCCATAN ORDER) THE MILITARY EXPEDITION TO UTAH OSTENSIBLY TO INSTALL CERTAIN FEDERAL OFFICIALS AND TO REPRESS AN ALLEGED INFANTILE REBELLION WHICH IN FACT HAD NEVER COME INTO EXISTENCE BUT IN REALITY TO FURTHER THE (INTERESTS->INTRICTS) OF THE SECESSIONISTS +4077-13754-0003-1244: MOREOVER HAD THE PEOPLE BEEN INCLINED TO REBELLION WHAT GREATER OPPORTUNITY COULD THEY HAVE WISHED +4077-13754-0004-1245: ALREADY A NORTH AND (A->THE) SOUTH WERE TALKED OF WHY NOT SET UP ALSO A WEST +4077-13754-0005-1246: THEY KNEW (NO->*) NORTH (NO->NOR) SOUTH (NO->NOR) EAST NO WEST THEY STOOD POSITIVELY BY THE CONSTITUTION AND WOULD HAVE NOTHING TO DO IN THE BLOODY STRIFE BETWEEN BROTHERS UNLESS INDEED THEY WERE SUMMONED BY THE AUTHORITY TO WHICH THEY HAD ALREADY ONCE LOYALLY RESPONDED TO FURNISH MEN (AND->IN) ARMS FOR THEIR COUNTRY'S NEED +4077-13754-0006-1247: WHAT THE LATTER DAY SAINTS CALL CELESTIAL MARRIAGE IS CHARACTERISTIC OF THE CHURCH AND IS IN VERY GENERAL (PRACTISE->PRACTICE) BUT OF CELESTIAL MARRIAGE PLURALITY OF WIVES WAS AN INCIDENT NEVER AN ESSENTIAL +4077-13754-0007-1248: WE BELIEVE IN A LITERAL RESURRECTION AND AN ACTUAL HEREAFTER IN WHICH FUTURE (STATE->STATES) SHALL BE RECOGNIZED EVERY SANCTIFIED AND AUTHORIZED RELATIONSHIP EXISTING HERE ON EARTH OF PARENT AND CHILD BROTHER AND SISTER HUSBAND AND WIFE +4077-13754-0008-1249: IT HAS BEEN MY PRIVILEGE TO TREAD THE SOIL OF MANY LANDS TO OBSERVE THE CUSTOMS AND STUDY THE HABITS OF MORE NATIONS THAN ONE AND I HAVE YET TO FIND THE PLACE AND MEET THE PEOPLE (WHERE AND WITH->WHEREINWITH) WHOM THE PURITY OF MAN AND WOMAN IS HELD MORE PRECIOUS THAN AMONG THE MALIGNED MORMONS IN THE MOUNTAIN VALLEYS OF THE WEST +4077-13754-0009-1250: AT THE INCEPTION OF (PLURAL->PEARL) MARRIAGE AMONG THE LATTER DAY SAINTS THERE WAS NO LAW NATIONAL OR STATE AGAINST ITS (PRACTISE->PRACTICE) +4077-13754-0010-1251: IN EIGHTEEN SIXTY TWO A LAW WAS ENACTED WITH (THE->A) PURPOSE OF SUPPRESSING PLURAL MARRIAGE AND AS HAD BEEN PREDICTED IN THE NATIONAL SENATE (PRIOR->PRAYER) TO ITS PASSAGE IT LAY FOR MANY YEARS A DEAD LETTER +4077-13754-0011-1252: FEDERAL JUDGES AND UNITED STATES ATTORNEYS IN (UTAH->UTA) WHO WERE NOT MORMONS NOR LOVERS OF (MORMONISM->WARMONISM) REFUSED TO ENTERTAIN COMPLAINTS OR PROSECUTE CASES UNDER THE LAW BECAUSE OF ITS MANIFEST INJUSTICE AND INADEQUACY +4077-13754-0012-1253: THIS MEANT THAT FOR AN ALLEGED MISDEMEANOR FOR WHICH CONGRESS PRESCRIBED A MAXIMUM PENALTY OF SIX MONTHS IMPRISONMENT AND A FINE OF THREE HUNDRED DOLLARS A MAN MIGHT BE IMPRISONED FOR LIFE (AYE->I) FOR MANY TERMS OF A MAN'S NATURAL LIFE DID THE COURT'S POWER TO ENFORCE ITS SENTENCES EXTEND SO FAR AND MIGHT BE FINED MILLIONS OF DOLLARS +4077-13754-0013-1254: BEFORE THIS TRAVESTY ON THE ADMINISTRATION OF LAW COULD BE BROUGHT BEFORE THE COURT OF LAST RESORT AND THERE (MEET->MET) WITH THE REVERSAL AND REBUKE IT DESERVED MEN WERE IMPRISONED UNDER (SENTENCES->SENTENCE) OF MANY YEARS DURATION +4077-13754-0014-1255: THE PEOPLE CONTESTED THESE MEASURES ONE BY ONE IN THE COURTS PRESENTING IN CASE AFTER CASE THE DIFFERENT PHASES OF THE SUBJECT AND URGING THE UNCONSTITUTIONALITY OF THE MEASURE +4077-13754-0015-1256: THEN THE CHURCH WAS DISINCORPORATED AND ITS PROPERTY BOTH REAL AND PERSONAL CONFISCATED AND (ESCHEATED->ISIATED) TO THE GOVERNMENT OF THE UNITED STATES AND ALTHOUGH THE PERSONAL PROPERTY WAS SOON RESTORED REAL ESTATE OF GREAT VALUE LONG LAY IN THE HANDS OF THE (COURT'S->COURTS) RECEIVER AND THE MORMON CHURCH HAD TO PAY THE NATIONAL GOVERNMENT (HIGH->HIGHER) RENTAL ON ITS OWN PROPERTY +4077-13754-0016-1257: AND SO THE STORY OF MORMONISM RUNS ON ITS FINALE HAS NOT YET BEEN WRITTEN THE CURRENT PRESS PRESENTS CONTINUOUSLY NEW STAGES OF ITS PROGRESS NEW DEVELOPMENTS OF ITS PLAN +4446-2271-0000-1133: (MAINHALL->MAIN HALL) LIKED ALEXANDER BECAUSE HE WAS AN ENGINEER +4446-2271-0001-1134: (HE->WE) HAD (PRECONCEIVED->FREQUENCEDE) IDEAS ABOUT EVERYTHING AND HIS IDEA ABOUT AMERICANS WAS THAT THEY SHOULD BE ENGINEERS OR MECHANICS +4446-2271-0002-1135: (IT'S->ITS) TREMENDOUSLY WELL PUT ON TOO +4446-2271-0003-1136: IT'S BEEN ON ONLY TWO WEEKS AND I'VE BEEN HALF A DOZEN TIMES ALREADY +4446-2271-0004-1137: DO YOU KNOW ALEXANDER (MAINHALL->MAIN HALL) LOOKED WITH PERPLEXITY UP INTO THE TOP OF THE HANSOM AND RUBBED HIS PINK CHEEK WITH HIS GLOVED FINGER DO YOU KNOW I SOMETIMES THINK OF TAKING TO CRITICISM SERIOUSLY MYSELF +4446-2271-0005-1138: SHE SAVES HER HAND TOO SHE'S AT HER BEST IN THE SECOND ACT +4446-2271-0006-1139: HE'S BEEN WANTING TO MARRY (HILDA->HILDER) THESE THREE YEARS AND MORE +4446-2271-0007-1140: SHE DOESN'T TAKE UP WITH ANYBODY YOU KNOW +4446-2271-0008-1141: IRENE (BURGOYNE->WERE GOING) ONE OF HER FAMILY TOLD ME IN CONFIDENCE THAT THERE WAS A ROMANCE SOMEWHERE BACK IN THE BEGINNING +4446-2271-0009-1142: (MAINHALL->MAIN HOLE) VOUCHED FOR HER CONSTANCY WITH A LOFTINESS THAT MADE ALEXANDER SMILE EVEN WHILE A KIND OF RAPID EXCITEMENT WAS TINGLING THROUGH HIM +4446-2271-0010-1143: HE'S ANOTHER WHO'S AWFULLY KEEN ABOUT HER LET ME INTRODUCE YOU +4446-2271-0011-1144: SIR (HARRY TOWNE->HARRYTOWN) MISTER BARTLEY ALEXANDER THE AMERICAN ENGINEER +4446-2271-0012-1145: I SAY SIR HARRY THE LITTLE GIRL'S GOING FAMOUSLY TO NIGHT ISN'T SHE +4446-2271-0013-1146: (DO->*) YOU KNOW I THOUGHT THE DANCE A BIT CONSCIOUS TO NIGHT FOR THE FIRST TIME +4446-2271-0014-1147: (WESTMERE->WESTMER) AND I WERE BACK AFTER THE FIRST ACT AND WE THOUGHT SHE SEEMED QUITE UNCERTAIN OF HERSELF +4446-2271-0015-1148: A LITTLE ATTACK OF NERVES POSSIBLY +4446-2271-0016-1149: (HE->IT) WAS BEGINNING TO FEEL (A->THE) KEEN INTEREST IN THE SLENDER BAREFOOT DONKEY GIRL WHO SLIPPED IN AND OUT OF THE PLAY SINGING LIKE SOME ONE WINDING THROUGH A HILLY FIELD +4446-2271-0017-1150: ONE NIGHT WHEN HE AND WINIFRED WERE SITTING TOGETHER ON THE BRIDGE HE TOLD HER (THAT->THE) THINGS HAD HAPPENED WHILE HE WAS STUDYING ABROAD THAT HE WAS SORRY FOR ONE THING IN PARTICULAR AND HE ASKED HER WHETHER SHE THOUGHT SHE OUGHT TO KNOW ABOUT THEM +4446-2271-0018-1151: SHE CONSIDERED (*->FOR) A MOMENT AND THEN SAID NO I THINK NOT (THOUGH->THE WAY) I AM GLAD YOU ASK ME +4446-2271-0019-1152: AFTER THAT IT WAS EASY TO FORGET ACTUALLY TO FORGET +4446-2271-0020-1153: OF COURSE HE REFLECTED SHE ALWAYS HAD THAT COMBINATION OF SOMETHING HOMELY AND SENSIBLE AND SOMETHING UTTERLY WILD AND DAFT +4446-2271-0021-1154: SHE MUST CARE ABOUT THE THEATRE A GREAT DEAL MORE THAN SHE USED TO +4446-2271-0022-1155: I'M GLAD SHE'S HELD HER OWN SINCE +4446-2271-0023-1156: AFTER ALL WE WERE AWFULLY YOUNG +4446-2271-0024-1157: I SHOULDN'T WONDER IF SHE COULD LAUGH ABOUT IT WITH ME NOW +4446-2273-0000-1158: HILDA WAS VERY NICE TO HIM AND HE SAT ON THE EDGE OF HIS CHAIR FLUSHED WITH HIS CONVERSATIONAL EFFORTS AND MOVING HIS CHIN ABOUT NERVOUSLY OVER HIS HIGH COLLAR +4446-2273-0001-1159: THEY ASKED HIM TO COME TO SEE THEM IN CHELSEA AND THEY SPOKE VERY TENDERLY OF HILDA +4446-2273-0002-1160: LAMB WOULDN'T CARE A GREAT DEAL ABOUT MANY OF THEM I FANCY +4446-2273-0003-1161: WHEN BARTLEY ARRIVED AT BEDFORD SQUARE ON SUNDAY EVENING MARIE THE PRETTY LITTLE FRENCH GIRL MET HIM AT THE DOOR AND CONDUCTED HIM UPSTAIRS +4446-2273-0004-1162: I SHOULD NEVER HAVE ASKED YOU IF MOLLY HAD BEEN HERE FOR I REMEMBER YOU DON'T LIKE ENGLISH COOKERY +4446-2273-0005-1163: I HAVEN'T HAD A CHANCE YET TO TELL YOU WHAT A JOLLY LITTLE PLACE I THINK THIS IS +4446-2273-0006-1164: THEY ARE ALL SKETCHES MADE ABOUT THE VILLA (D'ESTE->DESTA) YOU SEE +4446-2273-0007-1165: THOSE FELLOWS ARE ALL VERY LOYAL EVEN (MAINHALL->MAIN HALL) +4446-2273-0008-1166: I'VE MANAGED TO SAVE SOMETHING EVERY YEAR AND THAT WITH HELPING MY THREE SISTERS NOW AND THEN AND TIDING POOR COUSIN (MIKE->MICHAEL) OVER BAD SEASONS +4446-2273-0009-1167: IT'S NOT PARTICULARLY RARE SHE SAID BUT SOME OF IT WAS MY MOTHER'S +4446-2273-0010-1168: THERE WAS WATERCRESS SOUP AND SOLE AND A DELIGHTFUL (OMELETTE->OMELET) STUFFED WITH MUSHROOMS AND TRUFFLES AND TWO SMALL RARE DUCKLINGS AND ARTICHOKES AND A DRY YELLOW (RHONE->ROAN) WINE OF WHICH BARTLEY HAD ALWAYS BEEN VERY FOND +4446-2273-0011-1169: THERE IS NOTHING ELSE THAT LOOKS SO JOLLY +4446-2273-0012-1170: THANK YOU BUT I DON'T LIKE IT SO WELL AS THIS +4446-2273-0013-1171: HAVE YOU BEEN IN PARIS MUCH THESE LATE YEARS +4446-2273-0014-1172: THERE ARE (*->A) FEW CHANGES IN THE OLD QUARTER +4446-2273-0015-1173: DON'T I THOUGH I'M SO SORRY TO HEAR IT HOW DID HER SON TURN OUT +4446-2273-0016-1174: HER HAIR IS STILL LIKE FLAX AND HER BLUE EYES ARE JUST LIKE A BABY'S AND SHE HAS THE SAME THREE FRECKLES ON HER LITTLE NOSE AND TALKS ABOUT GOING BACK TO HER (BAINS DE MER->BANDOMERE) +4446-2273-0017-1175: HOW JOLLY IT WAS BEING YOUNG HILDA +4446-2273-0018-1176: DO YOU REMEMBER THAT FIRST WALK WE TOOK TOGETHER IN PARIS +4446-2273-0019-1177: COME WE'LL HAVE OUR COFFEE IN THE OTHER ROOM AND YOU CAN SMOKE +4446-2273-0020-1178: I THINK WE DID SHE ANSWERED DEMURELY +4446-2273-0021-1179: WHAT SHE WANTED FROM US WAS NEITHER OUR FLOWERS NOR OUR (FRANCS->FRANKS) BUT JUST OUR YOUTH +4446-2273-0022-1180: THEY WERE BOTH REMEMBERING WHAT THE WOMAN HAD SAID WHEN SHE TOOK THE MONEY GOD GIVE YOU A HAPPY LOVE +4446-2273-0023-1181: THE STRANGE WOMAN AND HER PASSIONATE SENTENCE THAT RANG OUT SO SHARPLY HAD FRIGHTENED THEM BOTH +4446-2273-0024-1182: BARTLEY STARTED WHEN HILDA RANG THE LITTLE BELL BESIDE HER DEAR ME WHY DID YOU DO THAT +4446-2273-0025-1183: IT WAS VERY JOLLY HE MURMURED LAZILY AS MARIE CAME IN TO TAKE AWAY THE COFFEE +4446-2273-0026-1184: HAVE I TOLD YOU ABOUT MY NEW PLAY +4446-2273-0027-1185: WHEN SHE FINISHED ALEXANDER SHOOK HIMSELF OUT OF A REVERIE +4446-2273-0028-1186: NONSENSE OF COURSE I CAN'T REALLY SING EXCEPT THE WAY MY MOTHER AND GRANDMOTHER DID BEFORE ME +4446-2273-0029-1187: IT'S REALLY TOO WARM IN THIS ROOM TO SING DON'T YOU FEEL IT +4446-2273-0030-1188: ALEXANDER WENT OVER AND OPENED THE WINDOW FOR HER +4446-2273-0031-1189: THERE JUST IN FRONT +4446-2273-0032-1190: HE STOOD A LITTLE BEHIND HER AND TRIED TO STEADY HIMSELF AS HE SAID IT'S SOFT AND MISTY SEE HOW WHITE THE STARS ARE +4446-2273-0033-1191: FOR A LONG TIME NEITHER HILDA NOR BARTLEY SPOKE +4446-2273-0034-1192: HE FELT A TREMOR RUN THROUGH THE SLENDER YELLOW FIGURE IN FRONT OF HIM +4446-2273-0035-1193: BARTLEY LEANED OVER HER SHOULDER WITHOUT TOUCHING HER AND WHISPERED IN HER EAR YOU ARE GIVING ME A CHANCE YES +4446-2273-0036-1194: ALEXANDER (UNCLENCHED->CLENCHED) THE TWO HANDS AT HIS SIDES +4446-2275-0000-1195: THE STOP AT QUEENSTOWN THE TEDIOUS PASSAGE (UP->OF) THE (MERSEY->MERCY) WERE THINGS THAT HE NOTED DIMLY THROUGH HIS GROWING IMPATIENCE +4446-2275-0001-1196: SHE BLUSHED AND SMILED AND FUMBLED HIS CARD IN HER CONFUSION BEFORE SHE RAN UPSTAIRS +4446-2275-0002-1197: ALEXANDER PACED UP AND DOWN THE HALLWAY BUTTONING AND UNBUTTONING HIS OVERCOAT UNTIL SHE RETURNED AND TOOK HIM UP TO HILDA'S LIVING ROOM +4446-2275-0003-1198: THE ROOM WAS EMPTY WHEN HE ENTERED +4446-2275-0004-1199: ALEXANDER DID NOT SIT DOWN +4446-2275-0005-1200: I FELT IT IN MY BONES WHEN I WOKE THIS MORNING THAT SOMETHING SPLENDID WAS GOING TO TURN UP +4446-2275-0006-1201: I THOUGHT IT MIGHT BE SISTER KATE OR COUSIN MIKE WOULD BE HAPPENING ALONG +4446-2275-0007-1202: SHE PUSHED HIM TOWARD THE BIG CHAIR BY THE FIRE AND SAT DOWN ON A STOOL AT THE OPPOSITE SIDE OF THE HEARTH HER KNEES DRAWN UP TO HER CHIN LAUGHING LIKE A HAPPY LITTLE GIRL +4446-2275-0008-1203: WHEN DID YOU COME BARTLEY AND HOW DID IT HAPPEN YOU HAVEN'T SPOKEN A WORD +4446-2275-0009-1204: I GOT IN ABOUT TEN MINUTES AGO +4446-2275-0010-1205: ALEXANDER LEANED FORWARD AND WARMED HIS HANDS BEFORE THE BLAZE +4446-2275-0011-1206: BARTLEY BENT (LOWER->LOWERED) OVER THE FIRE +4446-2275-0012-1207: SHE LOOKED AT HIS HEAVY SHOULDERS (AND->IN) BIG DETERMINED HEAD THRUST FORWARD LIKE A CATAPULT IN LEASH +4446-2275-0013-1208: I'LL DO ANYTHING YOU WISH ME TO BARTLEY SHE SAID TREMULOUSLY +4446-2275-0014-1209: I CAN'T STAND SEEING YOU MISERABLE +4446-2275-0015-1210: HE PULLED UP A WINDOW AS IF THE AIR WERE HEAVY +4446-2275-0016-1211: HILDA WATCHED HIM FROM (HER->THE) CORNER TREMBLING AND SCARCELY BREATHING DARK SHADOWS GROWING ABOUT HER EYES (IT->*) +4446-2275-0017-1212: BUT IT'S WORSE NOW IT'S UNBEARABLE +4446-2275-0018-1213: I GET NOTHING BUT MISERY OUT OF EITHER +4446-2275-0019-1214: THE WORLD IS ALL THERE JUST AS IT USED TO BE BUT I CAN'T GET AT IT ANY MORE +4446-2275-0020-1215: IT WAS MYSELF I WAS DEFYING (HILDA->HELDA) +4446-2275-0021-1216: (HILDA'S->HELDA'S) FACE QUIVERED BUT SHE WHISPERED YES I THINK IT MUST HAVE BEEN +4446-2275-0022-1217: BUT WHY DIDN'T YOU TELL ME WHEN YOU WERE HERE IN THE SUMMER +4446-2275-0023-1218: ALEXANDER GROANED I MEANT TO BUT SOMEHOW I COULDN'T +4446-2275-0024-1219: SHE PRESSED HIS HAND GENTLY IN GRATITUDE +4446-2275-0025-1220: WEREN'T YOU HAPPY THEN AT ALL +4446-2275-0026-1221: SHE CLOSED HER EYES AND TOOK A DEEP BREATH AS IF TO DRAW IN AGAIN THE FRAGRANCE OF THOSE DAYS +4446-2275-0027-1222: HE MOVED UNEASILY AND HIS CHAIR CREAKED +4446-2275-0028-1223: YES YES SHE HURRIED PULLING HER HAND GENTLY AWAY FROM HIM +4446-2275-0029-1224: PLEASE TELL ME ONE THING BARTLEY AT LEAST TELL ME THAT YOU BELIEVE I THOUGHT I WAS MAKING YOU HAPPY +4446-2275-0030-1225: YES (HILDA->HELDA) I KNOW THAT HE SAID SIMPLY +4446-2275-0031-1226: I UNDERSTAND BARTLEY I WAS WRONG +4446-2275-0032-1227: BUT I DIDN'T KNOW YOU'VE ONLY TO TELL ME NOW +4446-2275-0033-1228: WHAT I MEAN IS THAT I WANT YOU TO PROMISE NEVER TO SEE ME AGAIN NO MATTER HOW OFTEN I COME NO MATTER HOW HARD I BEG +4446-2275-0034-1229: KEEP AWAY IF YOU WISH WHEN HAVE I EVER FOLLOWED YOU +4446-2275-0035-1230: ALEXANDER ROSE AND SHOOK HIMSELF ANGRILY YES I KNOW I'M COWARDLY +4446-2275-0036-1231: HE TOOK (HER->A) ROUGHLY IN HIS ARMS DO YOU KNOW WHAT I MEAN +4446-2275-0037-1232: (OH->O) BARTLEY WHAT AM I TO DO +4446-2275-0038-1233: I WILL ASK THE LEAST IMAGINABLE BUT I MUST HAVE SOMETHING +4446-2275-0039-1234: I MUST KNOW ABOUT YOU +4446-2275-0040-1235: THE SIGHT OF YOU BARTLEY TO SEE YOU LIVING AND HAPPY AND SUCCESSFUL CAN I NEVER MAKE YOU UNDERSTAND WHAT THAT MEANS TO ME +4446-2275-0041-1236: YOU SEE LOVING SOME ONE AS I LOVE YOU MAKES THE WHOLE WORLD DIFFERENT +4446-2275-0042-1237: AND THEN YOU CAME BACK NOT CARING VERY MUCH BUT IT MADE NO DIFFERENCE +4446-2275-0043-1238: BARTLEY BENT OVER AND TOOK HER IN HIS ARMS KISSING HER MOUTH AND HER WET TIRED EYES +4446-2275-0044-1239: (DON'T->A TALL) CRY DON'T CRY HE WHISPERED +4446-2275-0045-1240: (WE'VE->WITH) TORTURED EACH OTHER ENOUGH FOR (TONIGHT->TO NIGHT) +4507-16021-0000-1469: CHAPTER ONE ORIGIN +4507-16021-0001-1470: IT ENGENDERS A WHOLE WORLD (LA PEGRE->LAPE) FOR WHICH (READ->RED) THEFT AND A HELL LA (PEGRENNE->PAGRIN) FOR WHICH (READ->RED) HUNGER +4507-16021-0002-1471: THUS IDLENESS IS THE MOTHER +4507-16021-0003-1472: SHE HAS A SON THEFT AND A DAUGHTER HUNGER +4507-16021-0004-1473: WHAT IS SLANG +4507-16021-0005-1474: WE HAVE NEVER UNDERSTOOD THIS SORT OF OBJECTIONS +4507-16021-0006-1475: SLANG IS ODIOUS +4507-16021-0007-1476: SLANG MAKES ONE SHUDDER +4507-16021-0008-1477: WHO DENIES THAT OF COURSE IT DOES +4507-16021-0009-1478: WHEN IT IS A QUESTION OF PROBING A WOUND A GULF A SOCIETY SINCE (WHEN->ONE) HAS IT BEEN CONSIDERED WRONG TO GO TOO FAR TO GO TO THE BOTTOM +4507-16021-0010-1479: WE HAVE ALWAYS THOUGHT THAT IT WAS SOMETIMES A COURAGEOUS ACT AND AT LEAST A SIMPLE AND USEFUL DEED WORTHY OF THE SYMPATHETIC ATTENTION WHICH DUTY ACCEPTED AND FULFILLED MERITS +4507-16021-0011-1480: WHY SHOULD ONE NOT EXPLORE EVERYTHING AND STUDY EVERYTHING +4507-16021-0012-1481: WHY SHOULD ONE HALT ON THE WAY +4507-16021-0013-1482: NOTHING IS MORE LUGUBRIOUS THAN THE CONTEMPLATION THUS IN ITS NUDITY IN THE BROAD LIGHT OF THOUGHT OF THE HORRIBLE SWARMING OF SLANG +4507-16021-0014-1483: (NOW->NO) WHEN HAS HORROR EVER EXCLUDED STUDY +4507-16021-0015-1484: SINCE WHEN HAS MALADY BANISHED MEDICINE +4507-16021-0016-1485: CAN ONE IMAGINE A NATURALIST REFUSING TO STUDY THE VIPER THE BAT THE SCORPION THE CENTIPEDE THE (TARANTULA->TURANSULA) AND ONE WHO WOULD CAST THEM BACK INTO THEIR DARKNESS SAYING (OH->O) HOW UGLY THAT IS +4507-16021-0017-1486: HE WOULD BE LIKE A PHILOLOGIST REFUSING TO EXAMINE A FACT IN LANGUAGE A PHILOSOPHER HESITATING TO SCRUTINIZE A FACT IN HUMANITY +4507-16021-0018-1487: WHAT IS SLANG PROPERLY SPEAKING +4507-16021-0019-1488: IT IS THE LANGUAGE OF WRETCHEDNESS +4507-16021-0020-1489: WE MAY BE STOPPED THE FACT MAY BE PUT TO US IN GENERAL TERMS WHICH IS ONE WAY OF ATTENUATING IT WE MAY BE TOLD THAT ALL TRADES PROFESSIONS IT MAY BE ADDED ALL THE ACCIDENTS OF THE SOCIAL HIERARCHY AND ALL FORMS OF INTELLIGENCE HAVE THEIR OWN SLANG +4507-16021-0021-1490: THE PAINTER WHO SAYS MY GRINDER THE NOTARY WHO SAYS MY SKIP THE GUTTER THE (HAIRDRESSER->HAIR DRESSER) WHO SAYS MY (MEALYBACK->MEALEY BACK) THE COBBLER WHO SAYS MY CUB TALKS (SLANG->SLING) +4507-16021-0022-1491: THERE IS THE (SLANG->SLAYING) OF THE AFFECTED LADY AS WELL AS OF THE (PRECIEUSES->PURSUS) +4507-16021-0023-1492: THE SUGAR MANUFACTURER WHO SAYS LOAF CLARIFIED LUMPS BASTARD COMMON BURNT THIS HONEST MANUFACTURER TALKS SLANG +4507-16021-0024-1493: ALGEBRA MEDICINE (BOTANY->BARTANY) HAVE EACH THEIR SLANG +4507-16021-0025-1494: TO MEET THE NEEDS OF THIS CONFLICT WRETCHEDNESS HAS INVENTED A LANGUAGE OF COMBAT WHICH IS SLANG +4507-16021-0026-1495: TO KEEP AFLOAT AND TO RESCUE FROM OBLIVION TO HOLD ABOVE THE GULF (WERE->WHERE) IT BUT A FRAGMENT OF SOME LANGUAGE WHICH MAN HAS SPOKEN AND WHICH WOULD OTHERWISE BE LOST THAT IS TO SAY ONE OF THE ELEMENTS GOOD OR BAD OF WHICH CIVILIZATION IS COMPOSED OR BY WHICH IT IS COMPLICATED TO EXTEND THE RECORDS OF SOCIAL OBSERVATION IS TO SERVE CIVILIZATION ITSELF +4507-16021-0027-1496: PHOENICIAN VERY GOOD +4507-16021-0028-1497: EVEN DIALECT LET THAT PASS +4507-16021-0029-1498: TO THIS WE REPLY IN ONE WORD ONLY +4507-16021-0030-1499: ASSUREDLY IF THE TONGUE WHICH A NATION OR A PROVINCE HAS SPOKEN IS WORTHY OF INTEREST THE LANGUAGE WHICH HAS BEEN SPOKEN BY A MISERY IS STILL MORE WORTHY OF ATTENTION AND STUDY +4507-16021-0031-1500: AND THEN WE INSIST UPON IT THE STUDY OF SOCIAL DEFORMITIES AND INFIRMITIES AND THE TASK OF POINTING THEM OUT WITH (A->THE) VIEW TO REMEDY IS NOT A BUSINESS IN WHICH (CHOICE IS->CHOICEST) PERMITTED +4507-16021-0032-1501: HE MUST DESCEND WITH HIS HEART FULL OF CHARITY AND SEVERITY AT THE SAME TIME AS A BROTHER AND AS A JUDGE TO THOSE IMPENETRABLE CASEMATES (WHERE->WERE) CRAWL PELL MELL THOSE WHO BLEED AND THOSE WHO DEAL THE BLOW THOSE WHO WEEP (AND->IN) THOSE WHO CURSE THOSE WHO FAST (AND->IN) THOSE WHO DEVOUR THOSE WHO ENDURE EVIL AND THOSE WHO INFLICT IT +4507-16021-0033-1502: DO WE REALLY KNOW THE MOUNTAIN WELL WHEN WE ARE NOT ACQUAINTED WITH THE CAVERN +4507-16021-0034-1503: THEY CONSTITUTE TWO DIFFERENT ORDERS OF FACTS WHICH CORRESPOND TO EACH OTHER WHICH ARE ALWAYS INTERLACED AND WHICH OFTEN BRING FORTH RESULTS +4507-16021-0035-1504: TRUE HISTORY BEING A MIXTURE OF ALL THINGS THE TRUE HISTORIAN MINGLES IN EVERYTHING +4507-16021-0036-1505: FACTS FORM ONE OF THESE AND IDEAS THE OTHER +4507-16021-0037-1506: THERE IT CLOTHES ITSELF IN WORD MASKS IN METAPHOR RAGS +4507-16021-0038-1507: IN THIS (GUISE->SKIES) IT BECOMES HORRIBLE +4507-16021-0039-1508: ONE PERCEIVES WITHOUT UNDERSTANDING IT A HIDEOUS MURMUR SOUNDING ALMOST LIKE HUMAN ACCENTS BUT MORE NEARLY RESEMBLING A HOWL THAN AN ARTICULATE WORD +4507-16021-0040-1509: ONE THINKS ONE HEARS HYDRAS TALKING +4507-16021-0041-1510: IT IS UNINTELLIGIBLE IN THE DARK +4507-16021-0042-1511: IT IS BLACK (IN->AND) MISFORTUNE IT IS BLACKER STILL (IN->AND) CRIME THESE TWO BLACKNESSES AMALGAMATED (COMPOSE SLANG->COMPOSED SLING) +4507-16021-0043-1512: THE EARTH IS NOT DEVOID OF RESEMBLANCE TO A JAIL +4507-16021-0044-1513: LOOK CLOSELY AT LIFE +4507-16021-0045-1514: IT IS SO MADE THAT EVERYWHERE WE FEEL THE SENSE OF PUNISHMENT +4507-16021-0046-1515: EACH DAY HAS ITS OWN GREAT GRIEF (OR->FOR) ITS LITTLE CARE +4507-16021-0047-1516: YESTERDAY YOU WERE TREMBLING FOR A HEALTH THAT IS DEAR TO YOU TO DAY YOU FEAR FOR YOUR OWN TO MORROW IT WILL BE ANXIETY ABOUT MONEY THE DAY AFTER TO MORROW THE (DIATRIBE->DIETRIBE) OF A SLANDERER THE DAY AFTER THAT THE MISFORTUNE OF SOME FRIEND THEN THE PREVAILING WEATHER THEN SOMETHING THAT HAS BEEN BROKEN OR LOST THEN A PLEASURE WITH WHICH YOUR CONSCIENCE AND YOUR VERTEBRAL COLUMN REPROACH YOU AGAIN THE COURSE OF PUBLIC AFFAIRS +4507-16021-0048-1517: THIS WITHOUT RECKONING IN THE PAINS OF THE HEART AND SO (IT->TO) GOES ON +4507-16021-0049-1518: THERE IS HARDLY ONE DAY OUT OF A HUNDRED WHICH IS WHOLLY JOYOUS AND SUNNY +4507-16021-0050-1519: AND YOU BELONG TO THAT SMALL CLASS WHO ARE HAPPY +4507-16021-0051-1520: IN THIS (WORLD->WORLD'S) EVIDENTLY THE VESTIBULE OF ANOTHER THERE ARE NO FORTUNATE +4507-16021-0052-1521: THE REAL HUMAN DIVISION IS THIS THE LUMINOUS AND THE SHADY +4507-16021-0053-1522: TO DIMINISH THE NUMBER OF THE SHADY TO AUGMENT THE NUMBER OF THE LUMINOUS THAT IS THE OBJECT +4507-16021-0054-1523: THAT IS WHY WE CRY EDUCATION SCIENCE +4507-16021-0055-1524: TO TEACH READING MEANS TO LIGHT THE FIRE EVERY SYLLABLE (SPELLED->SPELL'D) OUT SPARKLES +4507-16021-0056-1525: HOWEVER HE WHO SAYS LIGHT DOES NOT NECESSARILY SAY JOY +4507-16021-0057-1526: PEOPLE SUFFER IN THE LIGHT EXCESS BURNS +4507-16021-0058-1527: THE FLAME IS THE ENEMY OF THE WING +4507-16021-0059-1528: TO BURN WITHOUT CEASING TO FLY THEREIN LIES THE MARVEL OF GENIUS +4970-29093-0000-2093: YOU'LL NEVER DIG IT OUT OF THE (ASTOR->ASTER) LIBRARY +4970-29093-0001-2094: TO THE YOUNG AMERICAN HERE OR ELSEWHERE THE PATHS TO FORTUNE ARE INNUMERABLE AND ALL OPEN THERE IS INVITATION IN THE AIR AND SUCCESS IN ALL HIS WIDE HORIZON +4970-29093-0002-2095: HE HAS NO TRADITIONS TO BIND HIM OR GUIDE HIM AND HIS IMPULSE IS TO BREAK AWAY FROM THE OCCUPATION HIS FATHER HAS FOLLOWED AND MAKE A NEW WAY FOR HIMSELF +4970-29093-0003-2096: THE MODEST FELLOW WOULD HAVE LIKED FAME THRUST UPON HIM FOR SOME WORTHY ACHIEVEMENT IT MIGHT BE FOR A BOOK OR FOR THE (SKILLFUL->SKILFUL) MANAGEMENT OF SOME GREAT NEWSPAPER OR FOR SOME DARING EXPEDITION LIKE THAT OF LIEUTENANT STRAIN OR DOCTOR KANE +4970-29093-0004-2097: HE WAS UNABLE TO DECIDE EXACTLY WHAT IT SHOULD BE +4970-29093-0005-2098: SOMETIMES HE THOUGHT HE WOULD LIKE TO STAND IN A CONSPICUOUS PULPIT AND HUMBLY PREACH THE GOSPEL OF REPENTANCE AND IT EVEN CROSSED HIS MIND THAT IT WOULD BE NOBLE TO GIVE HIMSELF TO A MISSIONARY LIFE TO SOME BENIGHTED REGION WHERE THE DATE PALM (GROWS->GROVES) AND THE NIGHTINGALE'S VOICE IS IN TUNE AND THE (BUL BUL->BULL BULL) SINGS ON THE (OFF->OPT) NIGHTS +4970-29093-0006-2099: LAW SEEMED TO HIM WELL ENOUGH AS A SCIENCE BUT HE NEVER COULD DISCOVER A PRACTICAL CASE WHERE IT APPEARED TO HIM WORTH WHILE TO GO TO LAW AND ALL THE CLIENTS WHO STOPPED WITH THIS NEW CLERK IN THE (ANTE ROOM->ANTEROOM) OF THE LAW OFFICE WHERE HE WAS WRITING PHILIP INVARIABLY ADVISED TO SETTLE NO MATTER HOW BUT SETTLE GREATLY TO THE DISGUST OF HIS EMPLOYER WHO KNEW THAT JUSTICE BETWEEN MAN AND MAN COULD ONLY BE ATTAINED BY THE RECOGNIZED PROCESSES WITH THE ATTENDANT FEES +4970-29093-0007-2100: IT IS SUCH A NOBLE AMBITION THAT IT IS A PITY IT HAS USUALLY SUCH A SHALLOW FOUNDATION +4970-29093-0008-2101: HE WANTED TO BEGIN AT THE TOP OF THE LADDER +4970-29093-0009-2102: PHILIP THEREFORE READ DILIGENTLY IN THE (ASTOR->ASTER) LIBRARY PLANNED LITERARY WORKS THAT SHOULD COMPEL ATTENTION AND NURSED HIS GENIUS +4970-29093-0010-2103: HE HAD NO FRIEND WISE ENOUGH TO TELL HIM TO STEP INTO THE DORKING CONVENTION (THEN->THAN) IN SESSION MAKE A SKETCH OF THE MEN AND WOMEN ON THE PLATFORM AND TAKE IT TO THE EDITOR OF THE DAILY (GRAPEVINE->GRAPE VINE) AND SEE WHAT HE COULD GET A LINE FOR IT +4970-29093-0011-2104: (O->OH) VERY WELL SAID (GRINGO->GRINGAUD) TURNING AWAY WITH A SHADE OF CONTEMPT YOU'LL FIND IF YOU ARE GOING INTO LITERATURE AND NEWSPAPER WORK THAT YOU CAN'T AFFORD A CONSCIENCE LIKE THAT +4970-29093-0012-2105: BUT PHILIP DID AFFORD IT AND HE WROTE THANKING HIS FRIENDS AND DECLINING BECAUSE HE SAID THE POLITICAL SCHEME WOULD FAIL AND OUGHT TO FAIL +4970-29093-0013-2106: AND HE WENT BACK TO HIS BOOKS AND TO HIS WAITING FOR AN OPENING LARGE ENOUGH FOR HIS DIGNIFIED ENTRANCE INTO THE LITERARY WORLD +4970-29093-0014-2107: WELL I'M GOING AS AN ENGINEER YOU (CAN->COULD) GO AS ONE +4970-29093-0015-2108: YOU CAN BEGIN BY CARRYING A ROD AND PUTTING DOWN THE FIGURES +4970-29093-0016-2109: NO (ITS NOT->IT'S OUGHT) TOO SOON +4970-29093-0017-2110: I'VE BEEN READY TO GO ANYWHERE FOR SIX MONTHS +4970-29093-0018-2111: THE TWO YOUNG MEN WHO WERE BY THIS TIME FULL OF THE ADVENTURE WENT DOWN TO THE WALL STREET OFFICE OF HENRY'S UNCLE AND HAD A TALK WITH THAT WILY OPERATOR +4970-29093-0019-2112: THE NIGHT WAS SPENT IN PACKING UP AND WRITING LETTERS FOR PHILIP WOULD NOT TAKE SUCH AN IMPORTANT STEP WITHOUT INFORMING HIS FRIENDS +4970-29093-0020-2113: WHY IT'S (IN->A) MISSOURI SOMEWHERE ON THE FRONTIER I THINK WE'LL GET A MAP +4970-29093-0021-2114: I WAS AFRAID IT WAS NEARER HOME +4970-29093-0022-2115: HE KNEW HIS UNCLE WOULD BE GLAD TO HEAR THAT HE HAD AT LAST TURNED HIS THOUGHTS TO A PRACTICAL MATTER +4970-29093-0023-2116: HE WELL KNEW THE PERILS OF THE FRONTIER THE SAVAGE STATE OF SOCIETY THE LURKING INDIANS AND THE DANGERS OF FEVER +4970-29095-0000-2054: SHE WAS TIRED OF OTHER THINGS +4970-29095-0001-2055: SHE TRIED THIS MORNING AN (AIR->HOUR) OR TWO UPON THE PIANO (SANG->SAYING) A SIMPLE SONG (IN->AND) A SWEET BUT SLIGHTLY METALLIC VOICE AND THEN SEATING HERSELF BY THE OPEN WINDOW READ PHILIP'S LETTER +4970-29095-0002-2056: WELL MOTHER SAID THE YOUNG STUDENT LOOKING UP WITH A SHADE OF IMPATIENCE +4970-29095-0003-2057: I HOPE THEE TOLD THE ELDERS THAT FATHER AND I ARE RESPONSIBLE FOR THE PIANO AND THAT MUCH AS THEE LOVES MUSIC THEE IS NEVER IN THE ROOM WHEN IT IS PLAYED +4970-29095-0004-2058: I HEARD FATHER TELL COUSIN ABNER THAT HE WAS WHIPPED SO OFTEN FOR WHISTLING WHEN HE WAS A BOY THAT HE WAS DETERMINED TO HAVE WHAT COMPENSATION HE COULD GET NOW +4970-29095-0005-2059: THY WAYS GREATLY TRY ME RUTH AND ALL THY RELATIONS +4970-29095-0006-2060: IS THY FATHER WILLING THEE SHOULD GO AWAY TO A SCHOOL OF THE WORLD'S PEOPLE +4970-29095-0007-2061: I HAVE NOT ASKED HIM RUTH REPLIED WITH A LOOK THAT MIGHT IMPLY THAT SHE WAS ONE OF THOSE DETERMINED LITTLE BODIES WHO FIRST MADE UP HER OWN MIND AND THEN COMPELLED OTHERS TO MAKE UP THEIRS IN ACCORDANCE WITH HERS +4970-29095-0008-2062: MOTHER (I'M->I AM) GOING TO (STUDY->SET E) MEDICINE +4970-29095-0009-2063: MARGARET BOLTON ALMOST LOST FOR A MOMENT HER HABITUAL PLACIDITY +4970-29095-0010-2064: (THEE->THE) STUDY MEDICINE +4970-29095-0011-2065: DOES THEE THINK THEE COULD STAND (IT->AT) SIX MONTHS +4970-29095-0012-2066: AND BESIDES SUPPOSE THEE DOES LEARN MEDICINE +4970-29095-0013-2067: I WILL (PRACTICE->PRACTISE) IT +4970-29095-0014-2068: (WHERE->WHERE'S) THEE AND THY FAMILY ARE KNOWN +4970-29095-0015-2069: IF I CAN GET (PATIENTS->PATIENCE) +4970-29095-0016-2070: RUTH SAT QUITE STILL FOR A TIME WITH FACE INTENT AND FLUSHED IT WAS OUT NOW +4970-29095-0017-2071: THE (SIGHT SEERS->SIGHTSEERS) RETURNED IN HIGH SPIRITS FROM THE CITY +4970-29095-0018-2072: RUTH ASKED THE ENTHUSIASTS IF THEY WOULD LIKE TO LIVE IN SUCH A SOUNDING (MAUSOLEUM->MUZOLEUM) WITH ITS GREAT HALLS AND ECHOING ROOMS AND NO COMFORTABLE PLACE IN IT FOR THE ACCOMMODATION OF ANY BODY +4970-29095-0019-2073: AND THEN THERE WAS BROAD STREET +4970-29095-0020-2074: THERE (*->IS) CERTAINLY WAS NO END TO IT AND EVEN RUTH WAS PHILADELPHIAN ENOUGH TO BELIEVE THAT A STREET OUGHT NOT TO HAVE ANY END OR ARCHITECTURAL (POINT->BLINT) UPON WHICH THE WEARY EYE COULD REST +4970-29095-0021-2075: BUT NEITHER SAINT (GIRARD->GERARD) NOR BROAD STREET NEITHER WONDERS OF THE (MINT->MENT) NOR THE GLORIES OF THE HALL WHERE THE GHOSTS OF OUR FATHERS SIT ALWAYS SIGNING THE DECLARATION (IMPRESSED->IMPRESS) THE VISITORS SO MUCH AS THE SPLENDORS OF THE CHESTNUT STREET WINDOWS AND THE BARGAINS ON EIGHTH STREET +4970-29095-0022-2076: IS THEE GOING TO THE YEARLY MEETING RUTH ASKED ONE OF THE GIRLS +4970-29095-0023-2077: I HAVE NOTHING TO WEAR REPLIED THAT DEMURE PERSON +4970-29095-0024-2078: IT HAS OCCUPIED MOTHER A LONG TIME TO FIND (AT->*) THE SHOPS THE EXACT SHADE FOR HER NEW BONNET +4970-29095-0025-2079: AND THEE WON'T GO WHY SHOULD I +4970-29095-0026-2080: IF I GO TO MEETING AT ALL I LIKE BEST TO SIT IN THE QUIET OLD HOUSE IN GERMANTOWN WHERE THE WINDOWS ARE ALL OPEN AND I CAN SEE THE TREES AND (HEAR->HERE) THE STIR OF THE LEAVES +4970-29095-0027-2081: IT'S SUCH A CRUSH AT THE YEARLY MEETING AT ARCH STREET AND THEN THERE'S THE ROW OF SLEEK LOOKING YOUNG MEN WHO (LINE->LIE IN) THE CURBSTONE AND STARE AT US AS WE COME OUT +4970-29095-0028-2082: HE DOESN'T SAY BUT IT'S ON THE FRONTIER AND ON THE MAP EVERYTHING BEYOND IT IS MARKED INDIANS AND DESERT AND LOOKS AS DESOLATE AS A (WEDNESDAY->WINDSAY) MEETING (HUMPH->*) IT WAS TIME FOR HIM TO DO SOMETHING +4970-29095-0029-2083: IS HE GOING TO START A DAILY NEWSPAPER AMONG THE (KICK A POOS->KICKAPOOS) +4970-29095-0030-2084: FATHER (THEE'S UNJUST TO PHILIP->THESE UNJUSTIFILL UP) HE'S GOING INTO BUSINESS +4970-29095-0031-2085: HE DOESN'T SAY EXACTLY WHAT IT IS SAID RUTH A LITTLE DUBIOUSLY BUT IT'S SOMETHING ABOUT LAND AND RAILROADS AND (THEE KNOWS->THE NOSE) FATHER THAT FORTUNES ARE MADE NOBODY KNOWS EXACTLY HOW IN A NEW COUNTRY +4970-29095-0032-2086: BUT PHILIP IS HONEST AND HE HAS TALENT ENOUGH IF HE WILL STOP SCRIBBLING TO MAKE HIS WAY +4970-29095-0033-2087: WHAT A (BOX WOMEN->BOXWOMEN) ARE PUT INTO MEASURED FOR IT AND PUT IN YOUNG IF WE GO ANYWHERE IT'S IN A BOX VEILED AND PINIONED AND SHUT IN BY DISABILITIES +4970-29095-0034-2088: WHY SHOULD I RUST AND BE STUPID AND SIT IN (INACTION->AN ACTION) BECAUSE I AM A GIRL +4970-29095-0035-2089: AND IF I HAD A FORTUNE WOULD THEE WANT ME TO LEAD A USELESS LIFE +4970-29095-0036-2090: HAS (THEE->THE) CONSULTED THY MOTHER ABOUT A CAREER I SUPPOSE IT IS A CAREER (*->OF) THEE WANTS +4970-29095-0037-2091: BUT THAT WISE AND PLACID WOMAN UNDERSTOOD THE SWEET REBEL A GREAT DEAL BETTER THAN RUTH UNDERSTOOD HERSELF +4970-29095-0038-2092: RUTH WAS GLAD TO HEAR THAT PHILIP HAD MADE A PUSH INTO THE WORLD AND SHE WAS SURE THAT HIS TALENT AND COURAGE WOULD MAKE (A WAY->AWAY) FOR HIM +4992-23283-0000-2140: BUT THE MORE FORGETFULNESS HAD THEN PREVAILED THE MORE POWERFUL WAS THE FORCE OF REMEMBRANCE WHEN SHE AWOKE +4992-23283-0001-2141: MISS MILNER'S HEALTH IS NOT GOOD +4992-23283-0002-2142: SAID MISSUS (HORTON->WHARTON) A FEW MINUTES AFTER +4992-23283-0003-2143: SO THERE IS TO ME ADDED SANDFORD WITH A SARCASTIC SNEER +4992-23283-0004-2144: AND YET YOU MUST OWN HER (BEHAVIOUR->BEHAVIOR) HAS WARRANTED THEM HAS IT NOT BEEN IN THIS PARTICULAR INCOHERENT AND UNACCOUNTABLE +4992-23283-0005-2145: NOT THAT I KNOW OF NOT ONE MORE THAT I KNOW OF HE REPLIED WITH ASTONISHMENT AT WHAT SHE HAD INSINUATED AND YET WITH A PERFECT ASSURANCE THAT SHE WAS IN THE WRONG +4992-23283-0006-2146: PERHAPS I AM MISTAKEN ANSWERED SHE +4992-23283-0007-2147: TO ASK ANY MORE QUESTIONS OF YOU I BELIEVE WOULD BE UNFAIR +4992-23283-0008-2148: HE SEEMED TO WAIT FOR HER REPLY BUT AS SHE MADE NONE HE PROCEEDED +4992-23283-0009-2149: (OH->O) MY LORD CRIED MISS WOODLEY WITH A MOST FORCIBLE ACCENT YOU ARE THE LAST PERSON ON EARTH SHE WOULD PARDON ME FOR ENTRUSTING +4992-23283-0010-2150: BUT IN SUCH A CASE MISS MILNER'S ELECTION OF A HUSBAND SHALL NOT DIRECT MINE +4992-23283-0011-2151: IF SHE DOES NOT KNOW HOW TO ESTIMATE HER OWN VALUE I DO +4992-23283-0012-2152: INDEPENDENT OF HER FORTUNE SHE HAS BEAUTY TO CAPTIVATE THE HEART OF ANY MAN AND WITH ALL HER FOLLIES SHE HAS A FRANKNESS IN HER MANNER AN UNAFFECTED WISDOM IN HER THOUGHTS (*->OF) A VIVACITY IN HER CONVERSATION AND WITHAL A SOFTNESS IN HER DEMEANOUR THAT MIGHT ALONE ENGAGE THE AFFECTIONS OF A MAN OF THE NICEST SENTIMENTS AND THE STRONGEST UNDERSTANDING +4992-23283-0013-2153: MY LORD MISS MILNER'S TASTE IS NOT A DEPRAVED ONE IT IS BUT TOO REFINED +4992-23283-0014-2154: WHAT CAN YOU MEAN BY THAT MISS WOODLEY YOU TALK MYSTERIOUSLY +4992-23283-0015-2155: IS SHE NOT AFRAID THAT I WILL THWART HER INCLINATIONS +4992-23283-0016-2156: AGAIN HE SEARCHED HIS OWN THOUGHTS NOR (INEFFECTUALLY->IN EFFECTUALLY) AS BEFORE +4992-23283-0017-2157: MISS WOODLEY WAS TOO LITTLE VERSED IN THE SUBJECT TO KNOW THIS WOULD HAVE BEEN NOT TO LOVE AT ALL AT LEAST NOT TO THE EXTENT OF BREAKING THROUGH ENGAGEMENTS AND ALL THE VARIOUS OBSTACLES THAT STILL (MILITATED->MITIGATED) AGAINST THEIR UNION +4992-23283-0018-2158: TO RELIEVE HER FROM BOTH HE LAID HIS HAND WITH FORCE UPON HIS HEART AND SAID DO YOU BELIEVE ME +4992-23283-0019-2159: I WILL MAKE NO UNJUST USE OF WHAT I KNOW HE REPLIED WITH FIRMNESS I BELIEVE YOU MY LORD +4992-23283-0020-2160: I HAVE NEVER YET HOWEVER BEEN VANQUISHED BY THEM AND EVEN UPON THIS OCCASION MY REASON SHALL COMBAT THEM TO THE LAST AND MY REASON SHALL FAIL ME BEFORE I DO WRONG +4992-41797-0000-2117: YES DEAD THESE FOUR YEARS (AN->AND) A GOOD JOB FOR HER TOO +4992-41797-0001-2118: WELL AS I SAY IT'S AN AWFUL QUEER WORLD THEY CLAP ALL THE BURGLARS (INTO JAIL->*) AND (*->DOWN) THE MURDERERS AND THE (WIFE->WHITE) BEATERS (I'VE->I) ALLERS THOUGHT A GENTLE REPROOF WOULD BE ENOUGH PUNISHMENT FOR A WIFE BEATER CAUSE HE PROBABLY HAS A LOT (O->OF) PROVOCATION THAT NOBODY KNOWS AND THE (FIREBUGS->FIRE BUGS) CAN'T THINK (O->OF) THE RIGHT NAME (SOMETHING->SOMETHIN) LIKE (CENDENARIES AN->SENDIARIES AND) THE BREAKERS (O->OF) THE PEACE (AN->AND) WHAT NOT (AN->AND) YET THE LAW HAS (NOTHIN->NOTHING) TO SAY TO A MAN LIKE (HEN LORD->HANDLED) +4992-41797-0002-2119: GRANDFATHER WAS ALEXANDER CAREY L (L->*) D DOCTOR OF LAWS THAT IS +4992-41797-0003-2120: MISTER POPHAM LAID DOWN HIS BRUSH +4992-41797-0004-2121: I (SWAN TO MAN->SWAY INTO MEN) HE EJACULATED IF YOU DON'T WORK HARD YOU CAN'T KEEP UP WITH THE TIMES DOCTOR OF LAWS +4992-41797-0005-2122: DONE HE (AIN'T->HAIN'T) DONE A THING (HE'D OUGHTER SENCE->HE ORDERED SINCE) HE WAS BORN +4992-41797-0006-2123: HE KEEPS THE THOU SHALT NOT COMMANDMENTS FIRST RATE (HEN LORD->HENLOORD) DOES +4992-41797-0007-2124: HE (GIVE->GAVE) UP HIS POSITION AND SHUT THE FAMILY UP IN THAT TOMB OF A HOUSE (SO T->SODIN) HE (COULD->COULDN'T) STUDY HIS BOOKS +4992-41797-0008-2125: MISTER POPHAM EXAGGERATED NOTHING BUT ON THE CONTRARY LEFT MUCH UNSAID IN HIS NARRATIVE OF THE FAMILY AT THE HOUSE OF LORDS +4992-41797-0009-2126: HENRY LORD WITH (THE->A) DEGREE OF PH (D->*) TO HIS CREDIT HAD BEEN PROFESSOR OF ZOOLOGY AT A NEW ENGLAND COLLEGE BUT HAD RESIGNED HIS POST IN ORDER TO WRITE A SERIES OF SCIENTIFIC TEXT BOOKS +4992-41797-0010-2127: ALWAYS IRRITABLE COLD INDIFFERENT HE HAD GROWN RAPIDLY MORE SO AS YEARS WENT ON +4992-41797-0011-2128: WHATEVER APPEALED TO HER SENSE OF BEAUTY WAS STRAIGHTWAY TRANSFERRED TO PAPER OR (CANVAS->GAMBUS) +4992-41797-0012-2129: SHE IS WILD TO KNOW HOW TO DO THINGS +4992-41797-0013-2130: SHE MAKES EFFORT AFTER EFFORT TREMBLING WITH EAGERNESS (AND->THAN) WHEN SHE FAILS TO REPRODUCE WHAT SHE SEES SHE WORKS HERSELF INTO A FRENZY OF GRIEF AND DISAPPOINTMENT +4992-41797-0014-2131: WHEN SHE COULD NOT MAKE A RABBIT OR A BIRD LOOK REAL ON PAPER SHE SEARCHED IN HER FATHER'S BOOKS FOR PICTURES OF ITS BONES +4992-41797-0015-2132: CYRIL THERE MUST BE SOME BETTER WAY OF DOING I JUST DRAW THE OUTLINE OF AN ANIMAL AND THEN I PUT HAIRS OR FEATHERS ON IT THEY HAVE NO BODIES +4992-41797-0016-2133: THEY COULDN'T RUN (NOR->OR) MOVE THEY'RE JUST PASTEBOARD +4992-41797-0017-2134: HE WOULDN'T SEARCH SO DON'T WORRY REPLIED CYRIL QUIETLY AND THE TWO LOOKED AT EACH OTHER AND KNEW THAT IT WAS SO +4992-41797-0018-2135: THERE IN THE CEDAR HOLLOW THEN LIVED OLIVE LORD AN ANGRY RESENTFUL LITTLE CREATURE WEIGHED DOWN BY A FIERCE SENSE OF INJURY +4992-41797-0019-2136: (OLIVE'S->ALL OF THIS) MOURNFUL BLACK EYES MET NANCY'S SPARKLING BROWN ONES +4992-41797-0020-2137: NANCY'S CURLY CHESTNUT CROP SHONE IN THE SUN AND OLIVE'S THICK BLACK (PLAITS->PLATES) LOOKED BLACKER BY CONTRAST +4992-41797-0021-2138: (SHE'S->SHE IS) WONDERFUL MORE WONDERFUL (THAN->IN) ANYBODY WE'VE EVER SEEN ANYWHERE AND SHE DRAWS BETTER THAN THE TEACHER IN CHARLESTOWN +4992-41797-0022-2139: SHE'S OLDER THAN I AM BUT SO TINY AND SAD AND SHY THAT SHE SEEMS LIKE A CHILD +4992-41806-0000-2161: NATTY HARMON TRIED THE KITCHEN PUMP SECRETLY SEVERAL TIMES DURING THE EVENING FOR THE WATER HAD TO RUN UP HILL ALL THE WAY FROM THE WELL TO THE KITCHEN SINK AND HE BELIEVED THIS TO BE (A->*) CONTINUAL MIRACLE THAT MIGHT GIVE OUT AT ANY MOMENT +4992-41806-0001-2162: TO NIGHT THERE WAS NO NEED OF EXTRA HEAT AND THERE WERE GREAT CEREMONIES TO BE OBSERVED IN LIGHTING THE FIRES ON THE HEARTHSTONES +4992-41806-0002-2163: THEY BEGAN WITH THE ONE IN THE FAMILY SITTING ROOM COLONEL WHEELER RALPH THURSTON MISTER AND MISSUS BILL HARMON WITH NATTY AND (RUFUS->RUFFUS) MISTER AND MISSUS (POPHAM->POPPUM) WITH DIGBY AND (LALLIE->LALLY) JOY ALL STANDING IN ADMIRING GROUPS AND THRILLING WITH DELIGHT AT THE ORDER OF EVENTS +4992-41806-0003-2164: (KATHLEEN->CATHERINE) WAVED THE TORCH TO AND FRO AS SHE RECITED SOME BEAUTIFUL LINES WRITTEN FOR SOME SUCH PURPOSE AS THAT WHICH CALLED THEM TOGETHER TO NIGHT +4992-41806-0004-2165: BURN FIRE BURN FLICKER FLICKER FLAME +4992-41806-0005-2166: NEXT CAME (OLIVE'S->OLIVES) TURN TO HELP IN THE CEREMONIES +4992-41806-0006-2167: RALPH THURSTON HAD FOUND A LINE OF LATIN FOR THEM IN HIS BELOVED (HORACE TIBI SPLENDET->HORRENTS TIBEE SPLENDID) FOCUS FOR YOU THE HEARTH FIRE SHINES +4992-41806-0007-2168: OLIVE HAD PAINTED THE MOTTO ON A LONG NARROW PANEL OF CANVAS AND GIVING IT TO MISTER POPHAM STOOD BY THE FIRESIDE WHILE HE DEFTLY FITTED IT INTO THE PLACE PREPARED FOR IT +4992-41806-0008-2169: OLIVE HAS ANOTHER LOVELY GIFT FOR THE YELLOW HOUSE SAID MOTHER CAREY RISING AND TO CARRY OUT THE NEXT PART OF THE PROGRAMME WE SHALL HAVE TO GO IN PROCESSION UPSTAIRS TO MY BEDROOM +4992-41806-0009-2170: EXCLAIMED BILL HARMON TO HIS WIFE AS THEY WENT THROUGH THE LIGHTED HALL +4992-41806-0010-2171: AIN'T THEY THE GREATEST +4992-41806-0011-2172: MOTHER CAREY POURED COFFEE NANCY CHOCOLATE AND THE OTHERS (HELPED SERVE->HELP SERVED) THE SANDWICHES AND CAKE DOUGHNUTS AND TARTS +4992-41806-0012-2173: AT THAT MOMENT THE GENTLEMAN ENTERED BEARING A HUGE OBJECT CONCEALED BY A PIECE OF GREEN (FELT->FIL) +4992-41806-0013-2174: APPROACHING THE DINING TABLE HE CAREFULLY PLACED THE ARTICLE IN THE CENTRE AND REMOVED THE CLOTH +4992-41806-0014-2175: (THINKS I->THINK SADD) TO MYSELF I NEVER SEEN ANYTHING (OSH POPHAM COULDN'T MEND->I WAS POPLED GOODN'T MEN) IF HE TOOK TIME ENOUGH AND GLUE ENOUGH SO I CARRIED THIS LITTLE FELLER HOME IN A (BUSHEL->BUSH O) BASKET ONE NIGHT LAST MONTH (AN->AND) I'VE SPENT ELEVEN (EVENIN'S PUTTIN->EVENINGS PUTTING) HIM TOGETHER +4992-41806-0015-2176: MISSUS HARMON THOUGHT HE SANG TOO MUCH AND TOLD HER HUSBAND PRIVATELY THAT IF HE WAS A CANARY BIRD SHE SHOULD WANT TO KEEP A TABLE COVER OVER HIS HEAD MOST OF THE TIME BUT HE WAS IMMENSELY POPULAR WITH THE REST OF HIS AUDIENCE +4992-41806-0016-2177: THE FACE OF THE MAHOGANY SHONE WITH DELIGHT AND WHY NOT WHEN IT WAS DOING EVERYTHING ALMOST EVERYTHING WITHIN THE SCOPE OF A PIANO AND YET THE FAMILY HAD ENJOYED WEEKS OF GOOD NOURISHING MEALS ON WHAT HAD BEEN SAVED BY ITS EXERTIONS +4992-41806-0017-2178: WE SHUT OUR EYES THE FLOWERS BLOOM ON WE MURMUR BUT THE (CORN EARS->CORNEERS) FILL WE CHOOSE THE SHADOW BUT THE SUN THAT (CASTS->CAST) IT SHINES BEHIND US STILL +5105-28233-0000-1649: LENGTH OF SERVICE FOURTEEN YEARS THREE MONTHS AND FIVE DAYS +5105-28233-0001-1650: HE SEEMED BORN TO PLEASE WITHOUT BEING CONSCIOUS OF THE POWER HE POSSESSED +5105-28233-0002-1651: IT MUST BE OWNED AND NO ONE WAS MORE READY TO CONFESS IT THAN HIMSELF THAT HIS LITERARY ATTAINMENTS WERE BY NO MEANS OF A HIGH ORDER +5105-28233-0003-1652: WE DON'T (SPIN->SPEND) TOPS IS A FAVORITE SAYING AMONGST ARTILLERY OFFICERS INDICATING THAT THEY DO NOT SHIRK THEIR DUTY BY FRIVOLOUS PURSUITS BUT IT MUST BE CONFESSED THAT SERVADAC BEING NATURALLY IDLE WAS VERY MUCH GIVEN TO SPINNING TOPS +5105-28233-0004-1653: ONCE IN ACTION HE WAS LEADING A DETACHMENT OF INFANTRY THROUGH AN (INTRENCHMENT->ENTRENCHMENT) +5105-28233-0005-1654: SOMETIMES HE WOULD WANDER ON FOOT UPON THE SANDY SHORE AND SOMETIMES HE WOULD ENJOY A RIDE ALONG THE SUMMIT OF THE CLIFF ALTOGETHER BEING IN NO HURRY AT ALL TO BRING HIS TASK TO AN END +5105-28233-0006-1655: NO CATHEDRAL NOT EVEN BURGOS ITSELF COULD VIE WITH THE CHURCH AT (MONTMARTRE->MONT MARTRA) +5105-28233-0007-1656: BEN ZOOF'S MOST AMBITIOUS DESIRE WAS TO INDUCE THE CAPTAIN TO GO WITH HIM AND END HIS DAYS IN HIS MUCH LOVED HOME AND SO INCESSANTLY WERE SERVADAC'S EARS BESIEGED WITH DESCRIPTIONS OF THE UNPARALLELED BEAUTIES AND ADVANTAGES OF THIS EIGHTEENTH (ARRONDISSEMENT->ARE ON DE SAINT) OF PARIS THAT HE COULD SCARCELY HEAR THE NAME OF (MONTMARTRE->MONTMARTRA) WITHOUT A CONSCIOUS THRILL OF AVERSION +5105-28233-0008-1657: WHEN A PRIVATE (IN->AND) THE EIGHTH CAVALRY HE HAD BEEN ON THE POINT OF QUITTING THE ARMY AT TWENTY EIGHT YEARS OF AGE BUT UNEXPECTEDLY HE HAD BEEN APPOINTED ORDERLY TO CAPTAIN SERVADAC +5105-28233-0009-1658: THE BOND OF UNION THUS EFFECTED COULD NEVER BE SEVERED AND ALTHOUGH BEN (ZOOF'S->ZOV'S) ACHIEVEMENTS HAD FAIRLY EARNED HIM THE RIGHT OF RETIREMENT HE FIRMLY DECLINED ALL HONORS OR ANY PENSION THAT MIGHT PART HIM FROM HIS SUPERIOR OFFICER +5105-28233-0010-1659: (UNLIKE->I MAKE) HIS MASTER HE MADE NO PRETENSION TO ANY GIFT OF POETIC POWER BUT HIS INEXHAUSTIBLE MEMORY MADE HIM A LIVING ENCYCLOPAEDIA AND FOR HIS STOCK OF ANECDOTES AND TROOPER'S TALES HE WAS MATCHLESS +5105-28240-0000-1624: FAST AS HIS LEGS COULD CARRY HIM SERVADAC HAD MADE HIS WAY TO THE TOP OF THE CLIFF +5105-28240-0001-1625: IT WAS QUITE TRUE THAT A VESSEL WAS IN SIGHT HARDLY MORE THAN SIX MILES FROM THE SHORE BUT OWING TO THE INCREASE IN THE EARTH'S CONVEXITY AND THE CONSEQUENT LIMITATION OF THE RANGE OF VISION THE RIGGING OF THE TOPMASTS ALONE WAS VISIBLE ABOVE THE WATER +5105-28240-0002-1626: EXCLAIMED SERVADAC KEEPING HIS EYE UNMOVED AT HIS TELESCOPE +5105-28240-0003-1627: SHE IS UNDER (SAIL->SALE) BUT SHE IS COUNT TIMASCHEFF'S YACHT HE WAS RIGHT +5105-28240-0004-1628: IF THE COUNT WERE ON BOARD A STRANGE FATALITY WAS BRINGING HIM TO THE PRESENCE OF HIS RIVAL +5105-28240-0005-1629: HE RECKONED THEREFORE NOT ONLY UPON ASCERTAINING THE EXTENT OF THE LATE CATASTROPHE BUT UPON LEARNING ITS CAUSE +5105-28240-0006-1630: THE WIND BEING ADVERSE THE (DOBRYNA->DOBRINA) DID NOT MAKE VERY RAPID PROGRESS BUT AS THE WEATHER IN SPITE OF A FEW CLOUDS REMAINED CALM AND THE SEA WAS QUITE SMOOTH SHE WAS ENABLED TO HOLD A STEADY COURSE +5105-28240-0007-1631: SERVADAC TOOK IT FOR GRANTED THAT THE (DOBRYNA->DOBRINA) WAS ENDEAVORING TO PUT IN +5105-28240-0008-1632: A NARROW CHANNEL FORMED A PASSAGE THROUGH THE RIDGE OF ROCKS THAT PROTECTED IT FROM THE OPEN SEA AND WHICH EVEN IN THE ROUGHEST WEATHER WOULD (ENSURE->INSURE) THE CALMNESS OF ITS WATERS +5105-28240-0009-1633: SLIGHTLY CHANGING HER COURSE SHE FIRST STRUCK HER MAINSAIL AND IN ORDER TO FACILITATE THE MOVEMENTS OF HER HELMSMAN SOON CARRIED NOTHING BUT HER TWO TOPSAILS BRIGANTINE AND JIB +5105-28240-0010-1634: CAPTAIN SERVADAC HASTENED (TOWARDS->TOWARD) HIM +5105-28240-0011-1635: I LEFT YOU ON A CONTINENT AND HERE I HAVE THE HONOR OF FINDING YOU ON AN ISLAND +5105-28240-0012-1636: NEVER MIND NOW INTERPOSED THE CAPTAIN WE WILL TALK OF THAT BY AND BY +5105-28240-0013-1637: NOTHING MORE THAN YOU KNOW YOURSELF +5105-28240-0014-1638: ARE YOU CERTAIN THAT THIS IS THE MEDITERRANEAN +5105-28240-0015-1639: FOR SOME MOMENTS HE SEEMED PERFECTLY STUPEFIED (*->AND) THEN RECOVERING HIMSELF HE BEGAN TO OVERWHELM THE COUNT WITH A TORRENT OF QUESTIONS +5105-28240-0016-1640: TO ALL THESE INQUIRIES THE COUNT RESPONDED IN THE AFFIRMATIVE +5105-28240-0017-1641: SOME MYSTERIOUS FORCE SEEMED TO HAVE BROUGHT ABOUT A CONVULSION OF THE ELEMENTS +5105-28240-0018-1642: YOU WILL TAKE ME ON BOARD COUNT WILL YOU NOT +5105-28240-0019-1643: MY YACHT IS AT YOUR SERVICE SIR EVEN SHOULD YOU REQUIRE TO MAKE A TOUR ROUND THE WORLD +5105-28240-0020-1644: THE COUNT SHOOK HIS HEAD +5105-28240-0021-1645: BEFORE STARTING IT WAS INDISPENSABLE THAT THE ENGINE OF THE (DOBRYNA->DOBRINA) SHOULD BE REPAIRED TO SAIL UNDER CANVAS ONLY WOULD IN CONTRARY WINDS AND ROUGH SEAS BE BOTH TEDIOUS AND DIFFICULT +5105-28240-0022-1646: IT WAS ON THE LAST DAY OF JANUARY THAT THE REPAIRS OF THE SCHOONER WERE COMPLETED +5105-28240-0023-1647: A SLIGHT DIMINUTION IN THE EXCESSIVELY HIGH TEMPERATURE WHICH HAD PREVAILED FOR THE LAST FEW WEEKS WAS THE ONLY APPARENT CHANGE IN THE GENERAL ORDER OF THINGS BUT WHETHER THIS WAS TO BE ATTRIBUTED TO ANY ALTERATION IN THE EARTH'S ORBIT WAS A QUESTION WHICH WOULD STILL REQUIRE SEVERAL DAYS TO DECIDE +5105-28240-0024-1648: DOUBTS NOW AROSE AND SOME DISCUSSION FOLLOWED WHETHER OR NOT IT WAS DESIRABLE FOR BEN ZOOF TO ACCOMPANY HIS MASTER +5105-28241-0000-1604: HER SEA GOING QUALITIES WERE EXCELLENT AND WOULD HAVE AMPLY SUFFICED FOR A CIRCUMNAVIGATION OF THE GLOBE +5105-28241-0001-1605: AFTER AN APPRENTICESHIP ON A MERCHANT SHIP HE HAD ENTERED THE IMPERIAL NAVY AND HAD ALREADY REACHED THE RANK OF LIEUTENANT WHEN THE COUNT APPOINTED HIM TO THE CHARGE OF HIS OWN PRIVATE YACHT IN WHICH HE WAS ACCUSTOMED TO SPEND BY FAR THE GREATER PART OF HIS TIME THROUGHOUT THE WINTER GENERALLY CRUISING IN THE MEDITERRANEAN WHILST IN THE SUMMER HE VISITED MORE NORTHERN WATERS +5105-28241-0002-1606: THE LATE ASTOUNDING EVENTS HOWEVER HAD RENDERED PROCOPE MANIFESTLY UNEASY AND NOT THE LESS SO FROM HIS CONSCIOUSNESS THAT THE COUNT SECRETLY PARTOOK OF HIS OWN ANXIETY +5105-28241-0003-1607: STEAM UP AND CANVAS SPREAD THE SCHOONER STARTED EASTWARDS +5105-28241-0004-1608: ALTHOUGH ONLY A MODERATE BREEZE WAS BLOWING THE SEA WAS ROUGH A CIRCUMSTANCE TO BE ACCOUNTED FOR ONLY BY THE DIMINUTION IN THE FORCE OF THE EARTH'S ATTRACTION RENDERING THE LIQUID (PARTICLES->PARTICLE) SO BUOYANT THAT BY THE MERE EFFECT OF OSCILLATION THEY WERE CARRIED TO A HEIGHT THAT WAS QUITE UNPRECEDENTED +5105-28241-0005-1609: FOR A FEW MILES SHE FOLLOWED THE LINE HITHERTO PRESUMABLY OCCUPIED BY THE COAST OF ALGERIA BUT NO LAND APPEARED TO THE SOUTH +5105-28241-0006-1610: THE LOG AND THE COMPASS THEREFORE WERE ABLE TO BE CALLED UPON TO DO THE WORK OF THE SEXTANT WHICH HAD BECOME UTTERLY USELESS +5105-28241-0007-1611: (THERE IS->THERE'S) NO FEAR OF THAT SIR +5105-28241-0008-1612: (*->AT) THE EARTH HAS UNDOUBTEDLY ENTERED UPON A NEW ORBIT BUT SHE IS NOT INCURRING ANY PROBABLE RISK OF BEING PRECIPITATED (ONTO->ON TO) THE SUN +5105-28241-0009-1613: AND WHAT DEMONSTRATION DO YOU OFFER ASKED SERVADAC EAGERLY THAT IT WILL NOT HAPPEN +5105-28241-0010-1614: OCEAN (REIGNED->RAINED) SUPREME +5105-28241-0011-1615: ALL THE IMAGES OF HIS PAST LIFE FLOATED UPON HIS MEMORY HIS THOUGHTS SPED AWAY TO HIS NATIVE FRANCE ONLY TO RETURN AGAIN TO WONDER WHETHER THE DEPTHS OF OCEAN WOULD REVEAL ANY TRACES OF THE ALGERIAN METROPOLIS +5105-28241-0012-1616: IS IT NOT IMPOSSIBLE HE MURMURED ALOUD THAT ANY CITY SHOULD DISAPPEAR SO COMPLETELY +5105-28241-0013-1617: WOULD NOT THE LOFTIEST EMINENCES OF THE CITY AT LEAST BE VISIBLE +5105-28241-0014-1618: ANOTHER CIRCUMSTANCE WAS MOST REMARKABLE +5105-28241-0015-1619: TO THE SURPRISE OF ALL AND ESPECIALLY OF LIEUTENANT PROCOPE THE LINE INDICATED A BOTTOM AT A NEARLY UNIFORM DEPTH OF FROM FOUR TO FIVE FATHOMS AND ALTHOUGH THE SOUNDING WAS PERSEVERED WITH CONTINUOUSLY FOR MORE THAN TWO HOURS OVER A CONSIDERABLE AREA THE DIFFERENCES OF LEVEL WERE INSIGNIFICANT NOT CORRESPONDING IN ANY DEGREE TO WHAT WOULD BE EXPECTED OVER THE SITE OF A CITY THAT HAD BEEN TERRACED LIKE THE SEATS OF AN (AMPHITHEATER->AMPHITHEATRE) +5105-28241-0016-1620: YOU MUST SEE LIEUTENANT I SHOULD THINK THAT WE ARE NOT SO NEAR THE COAST OF ALGERIA AS YOU IMAGINED +5105-28241-0017-1621: AFTER PONDERING (AWHILE->A WHILE) HE SAID IF WE WERE FARTHER AWAY I SHOULD EXPECT TO FIND A DEPTH OF TWO OR THREE HUNDRED FATHOMS INSTEAD OF FIVE FATHOMS FIVE FATHOMS +5105-28241-0018-1622: ITS DEPTH REMAINED INVARIABLE STILL FOUR OR AT MOST FIVE FATHOMS AND ALTHOUGH ITS BOTTOM WAS ASSIDUOUSLY DREDGED IT WAS ONLY TO PROVE IT BARREN OF MARINE PRODUCTION OF ANY TYPE +5105-28241-0019-1623: NOTHING WAS TO BE DONE BUT TO PUT ABOUT AND RETURN IN DISAPPOINTMENT (TOWARDS->TOWARD) THE NORTH +5142-33396-0000-898: AT ANOTHER TIME (HARALD->HAROLD) ASKED +5142-33396-0001-899: WHAT IS YOUR COUNTRY OLAF HAVE YOU ALWAYS BEEN A THRALL THE THRALL'S EYES FLASHED +5142-33396-0002-900: TWO HUNDRED WARRIORS FEASTED IN HIS HALL AND FOLLOWED HIM TO BATTLE +5142-33396-0003-901: THE REST OF YOU OFF A VIKING HE HAD THREE SHIPS +5142-33396-0004-902: THESE HE GAVE TO THREE OF MY BROTHERS +5142-33396-0005-903: BUT I STAYED THAT SPRING AND BUILT ME A BOAT +5142-33396-0006-904: I MADE HER (FOR->*) ONLY TWENTY (OARS->WARS) BECAUSE I THOUGHT FEW MEN WOULD FOLLOW ME FOR I WAS YOUNG FIFTEEN YEARS OLD +5142-33396-0007-905: AT THE PROW I CARVED THE HEAD WITH OPEN MOUTH AND FORKED TONGUE THRUST OUT +5142-33396-0008-906: I PAINTED THE EYES RED FOR ANGER +5142-33396-0009-907: THERE STAND SO I SAID AND GLARE AND HISS AT MY FOES +5142-33396-0010-908: IN (THE->A) STERN I (CURVED->CARVED) THE TAIL UP ALMOST AS HIGH AS THE HEAD +5142-33396-0011-909: THERE SHE SAT ON THE ROLLERS AS FAIR A SHIP AS I EVER SAW +5142-33396-0012-910: THEN I WILL GET ME A FARM AND WILL (WINTER->WIN HER) IN THAT LAND NOW WHO WILL FOLLOW ME +5142-33396-0013-911: HE IS BUT A BOY THE (MEN->MAN) SAID +5142-33396-0014-912: THIRTY MEN ONE AFTER ANOTHER RAISED THEIR HORNS AND SAID +5142-33396-0015-913: AS OUR BOAT FLASHED DOWN THE ROLLERS INTO THE WATER I MADE THIS SONG AND SANG IT +5142-33396-0016-914: SO WE (HARRIED->HURRIED) THE COAST OF NORWAY +5142-33396-0017-915: WE ATE (AT->IT) MANY MEN'S TABLES UNINVITED +5142-33396-0018-916: (MY->I) DRAGON'S BELLY IS NEVER FULL AND ON BOARD WENT THE GOLD +5142-33396-0019-917: OH IT IS BETTER TO LIVE ON THE SEA AND LET OTHER MEN RAISE YOUR CROPS AND COOK YOUR MEALS +5142-33396-0020-918: A HOUSE SMELLS OF SMOKE A (SHIP SMELLS->SHIP'S MILLS) OF FROLIC +5142-33396-0021-919: UP AND DOWN THE WATER WE WENT TO GET MUCH WEALTH AND MUCH FROLIC +5142-33396-0022-920: WHAT OF THE FARM (OLAF->ALL OFF) NOT YET I ANSWERED VIKING IS BETTER FOR SUMMER +5142-33396-0023-921: IT WAS SO DARK THAT I COULD SEE NOTHING BUT A FEW SPARKS ON THE HEARTH +5142-33396-0024-922: I STOOD WITH MY BACK TO THE WALL FOR I WANTED NO SWORD REACHING OUT OF THE DARK FOR ME +5142-33396-0025-923: COME COME I CALLED WHEN NO ONE OBEYED A FIRE +5142-33396-0026-924: MY MEN LAUGHED YES A STINGY HOST +5142-33396-0027-925: HE ACTS AS THOUGH HE (HAD->IS) NOT EXPECTED US +5142-33396-0028-926: ON A BENCH IN A FAR CORNER WERE A DOZEN PEOPLE HUDDLED TOGETHER +5142-33396-0029-927: BRING IN THE TABLE WE ARE HUNGRY +5142-33396-0030-928: THE THRALLS WERE (BRINGING->RINGING) IN A GREAT POT OF MEAT +5142-33396-0031-929: THEY SET UP A CRANE OVER THE FIRE AND HUNG THE POT UPON IT AND WE SAT AND WATCHED IT BOIL WHILE WE JOKED AT LAST THE SUPPER BEGAN +5142-33396-0032-930: THE FARMER SAT GLOOMILY ON THE BENCH AND WOULD NOT EAT AND YOU CANNOT WONDER FOR HE SAW US PUTTING POTFULS OF HIS GOOD BEEF AND (BASKET->BASKEY) LOADS OF BREAD (INTO->AND) OUR BIG MOUTHS +5142-33396-0033-931: YOU WOULD NOT EAT WITH US YOU CANNOT SAY NO TO HALF OF MY ALE I DRINK THIS TO YOUR HEALTH +5142-33396-0034-932: THEN I DRANK HALF OF THE HORNFUL AND (SENT->SET) THE REST ACROSS THE FIRE TO THE FARMER HE TOOK IT AND SMILED SAYING +5142-33396-0035-933: DID YOU EVER HAVE SUCH A LORDLY GUEST BEFORE I WENT ON +5142-33396-0036-934: SO I WILL GIVE OUT THIS LAW THAT MY MEN SHALL NEVER LEAVE YOU ALONE +5142-33396-0037-935: (HAKON->HAWKIN) THERE SHALL BE YOUR CONSTANT COMPANION FRIEND FARMER +5142-33396-0038-936: HE SHALL NOT LEAVE YOU DAY OR NIGHT WHETHER YOU ARE WORKING OR PLAYING OR SLEEPING +5142-33396-0039-937: I (NAMED->NAME) NINE OTHERS AND SAID +5142-33396-0040-938: AND THESE SHALL FOLLOW YOUR THRALLS IN THE SAME WAY +5142-33396-0041-939: SO I SET GUARDS OVER (EVERY ONE->EVERYONE) IN THAT HOUSE +5142-33396-0042-940: SO NO TALES GOT OUT TO THE NEIGHBORS BESIDES IT WAS A LONELY PLACE AND BY GOOD LUCK NO ONE CAME THAT WAY +5142-33396-0043-941: THEIR EYES DANCED BIG (THORLEIF->TORE LEAF) STOOD UP AND STRETCHED HIMSELF +5142-33396-0044-942: (I AM->I'M) STIFF WITH LONG (SITTING->CITY) HE SAID I ITCH FOR A FIGHT I TURNED TO THE FARMER +5142-33396-0045-943: THIS IS OUR LAST FEAST WITH YOU I SAID +5142-33396-0046-944: BY THE BEARD OF ODIN I CRIED YOU HAVE TAKEN OUR JOKE LIKE A MAN +5142-33396-0047-945: MY MEN POUNDED THE TABLE WITH THEIR FISTS +5142-33396-0048-946: BY THE HAMMER (OF THOR->OTHOR) SHOUTED GRIM (HERE->THERE) IS NO STINGY COWARD +5142-33396-0049-947: HERE FRIEND TAKE IT AND HE THRUST (IT->*) INTO THE FARMER'S HAND +5142-33396-0050-948: MAY YOU DRINK (HEART'S EASE->HEARTSEASE) FROM IT FOR MANY YEARS +5142-33396-0051-949: AND WITH IT I LEAVE YOU A NAME (SIF->SIFT) THE FRIENDLY I SHALL HOPE TO DRINK WITH YOU (SOMETIME->SOME TIME) IN VALHALLA +5142-33396-0052-950: HERE IS A RING FOR (SIF->SIFT) THE FRIENDLY AND HERE IS A BRACELET (*->AND) A SWORD WOULD NOT BE ASHAMED TO HANG AT YOUR SIDE +5142-33396-0053-951: I TOOK FIVE GREAT BRACELETS OF GOLD FROM OUR TREASURE CHEST AND GAVE THEM TO HIM +5142-33396-0054-952: THAT IS THE BEST WAY TO DECIDE FOR THE SPEAR WILL ALWAYS POINT SOMEWHERE AND ONE THING IS AS GOOD AS ANOTHER +5142-33396-0055-953: THAT TIME IT POINTED US INTO YOUR FATHER'S SHIPS +5142-33396-0056-954: HERE THEY SAID (IS->AS) A RASCAL WHO HAS BEEN HARRYING OUR (COASTS->COAST) +5142-33396-0057-955: WE SUNK HIS SHIP AND MEN BUT HIM WE BROUGHT TO YOU +5142-33396-0058-956: A ROBBER VIKING SAID THE KING AND (*->HE) SCOWLED AT ME +5142-33396-0059-957: YES AND WITH ALL YOUR FINGERS IT TOOK YOU A YEAR TO CATCH ME THE KING FROWNED MORE ANGRILY +5142-33396-0060-958: TAKE HIM OUT (THORKEL->TORCOAL) AND LET HIM TASTE YOUR SWORD +5142-33396-0061-959: YOUR MOTHER THE QUEEN WAS STANDING BY +5142-33396-0062-960: NOW SHE PUT HER HAND ON HIS ARM AND SMILED AND SAID +5142-33396-0063-961: AND WOULD HE NOT BE A GOOD GIFT FOR OUR BABY +5142-33396-0064-962: YOUR FATHER THOUGHT A MOMENT (THEN->AND) LOOKED AT YOUR MOTHER AND SMILED +5142-33396-0065-963: SOFT HEART HE SAID GENTLY TO HER THEN TO (THORKEL->TORQUAL) WELL LET HIM GO (THORKEL->TORKO) +5142-33396-0066-964: THEN HE TURNED TO ME AGAIN FROWNING +5142-33396-0067-965: BUT YOUNG SHARP TONGUE NOW THAT (WE HAVE->WE'VE) CAUGHT YOU WE WILL PUT YOU INTO A TRAP THAT YOU CANNOT GET OUT OF +5142-33396-0068-966: SO I LIVED AND NOW (AM->I'M) YOUR TOOTH THRALL WELL IT IS THE LUCK OF WAR +5142-36377-0000-870: IT WAS ONE OF THE MASTERLY AND CHARMING STORIES OF DUMAS THE ELDER +5142-36377-0001-871: IN FIVE MINUTES I WAS IN A NEW WORLD AND MY MELANCHOLY ROOM WAS FULL OF THE LIVELIEST FRENCH COMPANY +5142-36377-0002-872: THE SOUND OF AN IMPERATIVE AND UNCOMPROMISING BELL RECALLED ME IN DUE TIME TO THE REGIONS OF REALITY +5142-36377-0003-873: AMBROSE MET ME AT THE BOTTOM OF THE STAIRS AND SHOWED ME THE WAY TO THE SUPPER ROOM +5142-36377-0004-874: SHE SIGNED TO ME WITH A GHOSTLY SOLEMNITY TO TAKE THE VACANT PLACE ON THE LEFT OF HER FATHER +5142-36377-0005-875: THE DOOR OPENED AGAIN WHILE I WAS STILL STUDYING THE TWO BROTHERS WITHOUT I HONESTLY (CONFESS->CONFESSED) BEING VERY FAVORABLY IMPRESSED BY EITHER OF THEM +5142-36377-0006-876: A NEW MEMBER OF THE FAMILY CIRCLE WHO INSTANTLY ATTRACTED MY ATTENTION ENTERED THE ROOM +5142-36377-0007-877: A LITTLE CRACKED THAT IN THE POPULAR PHRASE WAS MY IMPRESSION OF THE STRANGER WHO NOW MADE HIS APPEARANCE IN THE SUPPER ROOM +5142-36377-0008-878: MISTER (MEADOWCROFT->MEDICRAFT) THE ELDER HAVING NOT SPOKEN ONE WORD THUS FAR HIMSELF INTRODUCED THE NEWCOMER TO ME WITH A SIDE GLANCE AT HIS SONS WHICH HAD SOMETHING LIKE DEFIANCE IN IT A GLANCE WHICH AS I WAS SORRY TO NOTICE WAS RETURNED WITH THE DEFIANCE ON THEIR SIDE BY THE TWO YOUNG MEN +5142-36377-0009-879: PHILIP (LEFRANK->LENG) THIS IS MY OVERLOOKER MISTER (JAGO->YAGO) SAID THE OLD MAN (FORMALLY->FORMERLY) PRESENTING US +5142-36377-0010-880: HE IS NOT WELL HE HAS COME OVER THE OCEAN FOR REST AND CHANGE (OF SCENE->IS SEEN) +5142-36377-0011-881: (MISTER JAGO->THE TRIAGO) IS AN AMERICAN PHILIP +5142-36377-0012-882: MAKE ACQUAINTANCE WITH MISTER (JAGO SIT->SIP) TOGETHER +5142-36377-0013-883: THEY POINTEDLY DREW BACK FROM JOHN JAGO AS HE APPROACHED THE EMPTY CHAIR NEXT TO ME AND MOVED ROUND TO THE OPPOSITE SIDE OF THE TABLE +5142-36377-0014-884: A PRETTY GIRL AND SO FAR AS I COULD JUDGE (BY->MY) APPEARANCES A GOOD GIRL TOO DESCRIBING HER GENERALLY I MAY SAY THAT SHE HAD A SMALL HEAD WELL CARRIED AND WELL SET ON HER SHOULDERS BRIGHT GRAY EYES THAT LOOKED AT YOU HONESTLY AND MEANT WHAT THEY LOOKED A TRIM SLIGHT LITTLE FIGURE TOO SLIGHT FOR OUR ENGLISH NOTIONS OF BEAUTY A STRONG AMERICAN ACCENT AND A RARE THING IN AMERICA A PLEASANTLY TONED VOICE WHICH MADE THE ACCENT AGREEABLE TO ENGLISH (EARS->YEARS) +5142-36377-0015-885: OUR FIRST IMPRESSIONS OF PEOPLE ARE IN NINE CASES (OUT OF->AT A) TEN THE RIGHT IMPRESSIONS +5142-36377-0016-886: FOR ONCE IN A WAY I PROVED A TRUE PROPHET +5142-36377-0017-887: THE ONLY CHEERFUL CONVERSATION WAS THE CONVERSATION ACROSS THE TABLE BETWEEN NAOMI AND ME +5142-36377-0018-888: HE LOOKED UP (AT NAOMI->AND NOW AND ME) DOUBTINGLY FROM HIS PLATE AND LOOKED DOWN AGAIN SLOWLY WITH A FROWN +5142-36377-0019-889: WHEN I ADDRESSED HIM HE ANSWERED CONSTRAINEDLY +5142-36377-0020-890: A MORE DREARY AND MORE DISUNITED FAMILY PARTY I NEVER SAT AT THE TABLE WITH +5142-36377-0021-891: ENVY HATRED MALICE AND UNCHARITABLENESS ARE NEVER SO ESSENTIALLY DETESTABLE TO MY MIND AS WHEN THEY ARE ANIMATED BY (A->THE) SENSE OF PROPRIETY AND WORK UNDER THE SURFACE BUT FOR MY INTEREST IN (NAOMI->THEY OWE ME) AND MY OTHER INTEREST IN THE LITTLE LOVE LOOKS WHICH I NOW AND THEN SURPRISED PASSING BETWEEN HER AND AMBROSE I SHOULD NEVER HAVE SAT THROUGH THAT SUPPER +5142-36377-0022-892: I WISH YOU GOOD NIGHT SHE LAID HER BONY HANDS ON THE BACK OF MISTER (MEADOWCROFT'S->METICOFF'S) INVALID CHAIR (CUT->CAUGHT) HIM SHORT IN HIS FAREWELL SALUTATION TO ME AND WHEELED HIM OUT TO HIS BED AS IF SHE WERE WHEELING HIM OUT TO HIS GRAVE +5142-36377-0023-893: YOU WERE QUITE RIGHT TO SAY NO AMBROSE BEGAN NEVER SMOKE WITH JOHN (JAGO->IAGO) HIS CIGARS WILL POISON YOU +5142-36377-0024-894: (NAOMI->THEY ONLY) SHOOK HER FOREFINGER REPROACHFULLY AT THEM AS IF THE TWO STURDY YOUNG FARMERS HAD BEEN TWO CHILDREN +5142-36377-0025-895: SILAS SLUNK AWAY WITHOUT A WORD OF PROTEST AMBROSE STOOD HIS GROUND EVIDENTLY BENT ON MAKING HIS PEACE (WITH NAOMI->WHEN THEY ARMY) BEFORE HE LEFT HER SEEING THAT I WAS IN THE WAY I WALKED ASIDE TOWARD A GLASS DOOR AT THE LOWER END OF THE ROOM +5142-36586-0000-967: IT IS MANIFEST THAT MAN IS NOW SUBJECT TO MUCH VARIABILITY +5142-36586-0001-968: SO IT IS WITH THE LOWER ANIMALS +5142-36586-0002-969: THE (VARIABILITY->VERY ABILITY) OF MULTIPLE PARTS +5142-36586-0003-970: BUT THIS SUBJECT WILL BE MORE PROPERLY DISCUSSED WHEN WE TREAT OF THE DIFFERENT RACES OF MANKIND +5142-36586-0004-971: EFFECTS OF THE INCREASED USE AND DISUSE OF PARTS +5142-36600-0000-896: CHAPTER SEVEN ON THE RACES OF MAN +5142-36600-0001-897: (IN->AND) DETERMINING WHETHER TWO OR MORE ALLIED FORMS OUGHT TO BE RANKED (AS->A) SPECIES OR VARIETIES NATURALISTS ARE PRACTICALLY GUIDED BY THE FOLLOWING CONSIDERATIONS NAMELY THE AMOUNT OF DIFFERENCE BETWEEN THEM AND WHETHER SUCH DIFFERENCES RELATE TO FEW OR MANY POINTS OF STRUCTURE AND WHETHER THEY ARE OF PHYSIOLOGICAL IMPORTANCE BUT MORE ESPECIALLY WHETHER THEY ARE CONSTANT +5639-40744-0000-137: ELEVEN O'CLOCK HAD STRUCK IT WAS A FINE CLEAR NIGHT (THEY->THERE) WERE THE ONLY PERSONS ON THE ROAD AND THEY SAUNTERED LEISURELY ALONG TO AVOID PAYING THE PRICE OF FATIGUE FOR THE RECREATION PROVIDED FOR THE TOLEDANS IN (THEIR->THE) VALLEY OR ON THE BANKS OF THEIR RIVER +5639-40744-0001-138: SECURE AS HE THOUGHT IN THE CAREFUL ADMINISTRATION OF JUSTICE IN THAT CITY AND THE CHARACTER OF ITS WELL DISPOSED INHABITANTS THE GOOD (HIDALGO->HADALGO) WAS FAR FROM THINKING THAT ANY DISASTER COULD (BEFAL->BEFALL) HIS FAMILY +5639-40744-0002-139: (RODOLFO->RUDOLPHO) AND HIS COMPANIONS WITH THEIR FACES MUFFLED IN THEIR CLOAKS STARED RUDELY AND INSOLENTLY AT THE MOTHER THE DAUGHTER AND THE SERVANT MAID +5639-40744-0003-140: IN A MOMENT HE COMMUNICATED HIS THOUGHTS TO HIS COMPANIONS AND IN THE NEXT MOMENT THEY RESOLVED TO TURN BACK AND CARRY HER OFF TO PLEASE (RODOLFO->RUDOLPHO) FOR THE RICH WHO ARE OPEN HANDED ALWAYS FIND PARASITES READY TO ENCOURAGE THEIR BAD PROPENSITIES AND THUS TO CONCEIVE THIS WICKED DESIGN TO COMMUNICATE IT APPROVE IT RESOLVE ON RAVISHING (LEOCADIA->LOCATIA) AND TO CARRY THAT DESIGN INTO EFFECT WAS THE WORK OF A MOMENT +5639-40744-0004-141: THEY DREW THEIR SWORDS HID THEIR FACES IN THE FLAPS OF THEIR CLOAKS TURNED BACK AND SOON CAME IN FRONT OF THE LITTLE PARTY WHO HAD NOT YET DONE GIVING THANKS TO GOD FOR THEIR ESCAPE FROM THOSE AUDACIOUS MEN +5639-40744-0005-142: FINALLY THE ONE PARTY WENT OFF EXULTING AND THE OTHER WAS LEFT IN DESOLATION AND WOE +5639-40744-0006-143: (RODOLFO->RUDOLPHO) ARRIVED AT HIS OWN HOUSE WITHOUT ANY IMPEDIMENT (AND LEOCADIA'S->ALYOCADIA'S) PARENTS REACHED THEIRS (HEART BROKEN->HEARTBROKEN) AND DESPAIRING +5639-40744-0007-144: MEANWHILE (RODOLFO->RUDOLPHO) HAD (LEOCADIA->LOCALIA) SAFE IN HIS CUSTODY AND IN HIS OWN APARTMENT +5639-40744-0008-145: WHO TOUCHES ME AM I IN BED +5639-40744-0009-146: MOTHER DEAR FATHER DO YOU HEAR ME +5639-40744-0010-147: IT IS THE ONLY AMENDS I ASK OF YOU FOR THE WRONG YOU HAVE DONE ME +5639-40744-0011-148: SHE FOUND THE DOOR BUT IT WAS LOCKED OUTSIDE +5639-40744-0012-149: SHE SUCCEEDED IN OPENING THE WINDOW AND THE MOONLIGHT SHONE IN SO BRIGHTLY THAT SHE COULD DISTINGUISH THE COLOUR OF SOME DAMASK (HANGINGS->HANGING) IN THE ROOM +5639-40744-0013-150: SHE SAW THAT THE BED WAS GILDED AND SO RICH THAT IT SEEMED THAT OF A PRINCE (*->THE) RATHER (THAN->THAT) OF A PRIVATE GENTLEMAN +5639-40744-0014-151: AMONG OTHER THINGS ON WHICH SHE CAST HER EYES WAS A SMALL CRUCIFIX OF SOLID SILVER STANDING ON A CABINET NEAR THE WINDOW +5639-40744-0015-152: THIS PERSON WAS (RODOLFO->RUDOLPHU) WHO THOUGH HE HAD GONE TO LOOK FOR HIS FRIENDS HAD CHANGED HIS MIND IN THAT RESPECT NOT THINKING IT ADVISABLE TO ACQUAINT THEM WITH WHAT HAD PASSED BETWEEN HIM AND THE GIRL +5639-40744-0016-153: ON THE CONTRARY HE RESOLVED TO TELL THEM THAT REPENTING OF HIS VIOLENCE AND MOVED BY (HER->A) TEARS HE HAD ONLY CARRIED HER HALF WAY TOWARDS HIS HOUSE AND THEN LET HER GO +5639-40744-0017-154: CHOKING WITH EMOTION (LEOCADI->LOCATIA) MADE A SIGN TO HER PARENTS THAT SHE WISHED TO BE ALONE WITH THEM +5639-40744-0018-155: THAT WOULD BE VERY WELL MY CHILD REPLIED HER FATHER IF YOUR PLAN WERE NOT LIABLE TO BE FRUSTRATED BY ORDINARY CUNNING BUT NO DOUBT THIS IMAGE (HAS->HAD) BEEN ALREADY MISSED BY ITS OWNER AND HE WILL HAVE SET IT DOWN FOR CERTAIN THAT IT WAS TAKEN OUT OF THE ROOM BY THE PERSON HE LOCKED UP THERE +5639-40744-0019-156: WHAT YOU HAD BEST DO MY CHILD IS TO KEEP IT AND PRAY TO IT THAT (SINCE->SINS) IT WAS A WITNESS TO YOUR UNDOING IT WILL DEIGN TO VINDICATE YOUR CAUSE BY ITS RIGHTEOUS JUDGMENT +5639-40744-0020-157: THUS DID (THIS->THE) HUMANE AND RIGHT MINDED FATHER COMFORT HIS UNHAPPY DAUGHTER AND HER MOTHER EMBRACING HER AGAIN DID ALL SHE COULD TO SOOTHE (HER->THE) FEELINGS +5639-40744-0021-158: SHE MEANWHILE (PASSED->PAST) HER LIFE WITH HER PARENTS IN THE STRICTEST RETIREMENT NEVER LETTING HERSELF BE SEEN BUT SHUNNING EVERY EYE LEST IT SHOULD READ HER MISFORTUNE IN HER FACE +5639-40744-0022-159: TIME ROLLED ON THE HOUR OF HER DELIVERY ARRIVED IT TOOK PLACE IN THE UTMOST SECRECY HER MOTHER TAKING (UPON->UP ON) HER THE OFFICE OF MIDWIFE (AND->AS) SHE GAVE BIRTH TO A SON ONE OF THE MOST BEAUTIFUL EVER SEEN +5639-40744-0023-160: (WHEN->AND) THE BOY WALKED THROUGH THE STREETS BLESSINGS (WERE->WHERE) SHOWERED (UPON->UP ON) HIM BY ALL WHO SAW HIM (BLESSINGS->BLESSING) UPON HIS BEAUTY UPON THE MOTHER THAT BORE HIM UPON THE FATHER THAT BEGOT HIM UPON THOSE WHO BROUGHT HIM UP SO WELL +5639-40744-0024-161: ONE DAY WHEN THE BOY WAS SENT BY HIS GRANDFATHER WITH A MESSAGE TO A RELATION HE PASSED ALONG A STREET IN WHICH THERE WAS A GREAT CONCOURSE OF HORSEMEN +5639-40744-0025-162: THE BED SHE TOO WELL REMEMBERED WAS THERE AND ABOVE ALL THE CABINET ON WHICH HAD STOOD THE IMAGE SHE HAD TAKEN AWAY WAS STILL ON THE SAME SPOT +5639-40744-0026-163: (LUIS->LOUIS) WAS OUT OF DANGER IN A FORTNIGHT IN A MONTH HE ROSE FROM HIS BED AND (DURING->DREWING) ALL THAT TIME HE WAS VISITED DAILY BY HIS MOTHER AND GRANDMOTHER AND TREATED BY THE MASTER AND MISTRESS OF THE HOUSE AS IF HE WAS THEIR OWN CHILD +5639-40744-0027-164: THUS SAYING AND PRESSING THE CRUCIFIX TO HER BREAST SHE FELL FAINTING INTO THE ARMS OF DONA (ESTAFANIA->ESTAPHANIA) WHO AS A GENTLEWOMAN TO WHOSE SEX PITY IS (AS->A) NATURAL AS CRUELTY (IS->AS) TO MAN INSTANTLY PRESSED HER LIPS TO THOSE OF THE FAINTING GIRL SHEDDING OVER HER SO MANY TEARS THAT THERE NEEDED NO OTHER SPRINKLING OF WATER TO RECOVER (LEOCADIA->LOCATIA) FROM HER SWOON +5639-40744-0028-165: I HAVE GREAT THINGS TO TELL YOU SENOR SAID (DONA ESTAFANIA->DORIS DANIA) TO HER HUSBAND THE CREAM AND SUBSTANCE OF WHICH IS THIS THE FAINTING GIRL BEFORE YOU IS YOUR DAUGHTER AND (THAT->THE) BOY IS YOUR GRANDSON +5639-40744-0029-166: THIS TRUTH WHICH I HAVE LEARNED FROM HER LIPS IS CONFIRMED BY HIS FACE IN WHICH WE HAVE BOTH BEHELD THAT OF OUR SON +5639-40744-0030-167: JUST THEN (LEOCADIA->LEOCAYA) CAME TO HERSELF AND EMBRACING THE CROSS SEEMED CHANGED INTO A SEA OF TEARS AND THE GENTLEMAN (REMAINED->REMAINING) IN (UTTER BEWILDERMENT->OUT OF A WILDERMENT) UNTIL HIS WIFE HAD REPEATED TO HIM FROM BEGINNING TO END (LEOCADIA'S->LOCATEOUS) WHOLE STORY AND HE BELIEVED IT THROUGH THE BLESSED DISPENSATION OF HEAVEN WHICH HAD CONFIRMED IT BY SO MANY CONVINCING TESTIMONIES +5639-40744-0031-168: SO PERSUASIVE WERE HER ENTREATIES AND SO STRONG HER ASSURANCES THAT NO HARM WHATEVER COULD RESULT TO THEM FROM THE INFORMATION SHE SOUGHT THEY WERE INDUCED TO CONFESS THAT ONE SUMMER'S NIGHT THE SAME SHE HAD MENTIONED THEMSELVES AND ANOTHER FRIEND BEING OUT ON (A->THE) STROLL WITH (RODOLFO->RUDOLPHO) THEY HAD BEEN CONCERNED IN THE (ABDUCTION->ADOPTION) OF A GIRL WHOM (RODOLFO->RUDOLPHO) CARRIED OFF WHILST THE REST OF THEM DETAINED HER FAMILY WHO MADE A GREAT OUTCRY AND WOULD HAVE DEFENDED HER IF THEY COULD +5639-40744-0032-169: FOR GOD'S SAKE MY LADY MOTHER GIVE ME A WIFE WHO (WOULD->WILL) BE AN AGREEABLE COMPANION NOT ONE WHO WILL DISGUST ME SO THAT WE MAY BOTH BEAR EVENLY AND WITH MUTUAL GOOD WILL THE YOKE (IMPOSED->AND POST) ON US BY HEAVEN INSTEAD OF PULLING THIS WAY AND THAT WAY AND FRETTING EACH OTHER TO DEATH +5639-40744-0033-170: HER BEARING WAS GRACEFUL AND ANIMATED SHE LED HER SON BY THE HAND AND BEFORE HER WALKED TWO MAIDS WITH WAX LIGHTS AND SILVER CANDLESTICKS +5639-40744-0034-171: ALL ROSE TO DO HER REVERENCE AS IF SOMETHING FROM HEAVEN HAD MIRACULOUSLY APPEARED BEFORE THEM BUT GAZING ON HER (ENTRANCED->AND TRANCED) WITH ADMIRATION NOT ONE OF THEM WAS ABLE TO ADDRESS A SINGLE WORD TO HER +5639-40744-0035-172: SHE REFLECTED HOW NEAR SHE STOOD TO THE CRISIS WHICH WAS TO DETERMINE WHETHER SHE WAS TO BE BLESSED OR UNHAPPY FOR EVER AND RACKED BY THE INTENSITY OF HER EMOTIONS SHE SUDDENLY CHANGED (COLOUR->COLOR) HER HEAD DROPPED AND SHE FELL FORWARD IN A SWOON INTO THE ARMS OF THE DISMAYED (ESTAFANIA->STEFFANIA) +5639-40744-0036-173: HIS MOTHER HAD LEFT HER TO HIM AS BEING HER DESTINED PROTECTOR BUT WHEN SHE SAW THAT HE TOO WAS INSENSIBLE SHE WAS NEAR MAKING A THIRD AND WOULD HAVE DONE SO HAD HE NOT COME TO HIMSELF +5639-40744-0037-174: KNOW THEN SON OF MY HEART THAT THIS FAINTING LADY IS YOUR REAL BRIDE I SAY REAL BECAUSE SHE IS THE ONE WHOM YOUR FATHER AND I HAVE CHOSEN FOR YOU AND (THE->A) PORTRAIT WAS A PRETENCE +5639-40744-0038-175: JUST AT (THE->A) MOMENT WHEN THE TEARS OF THE PITYING BEHOLDERS FLOWED FASTEST AND (THEIR->THERE) EJACULATIONS WERE MOST EXPRESSIVE OF DESPAIR (LEOCADIA->LE OCCADIA) GAVE SIGNS OF RECOVERY AND BROUGHT BACK GLADNESS (TO->THROUGH) THE HEARTS OF ALL +5639-40744-0039-176: WHEN SHE CAME TO HER SENSES AND BLUSHING TO FIND HERSELF IN (RODOLFO'S->RIDOLPH'S) ARMS WOULD HAVE DISENGAGED HERSELF NO SENORA HE SAID THAT MUST NOT BE STRIVE NOT TO WITHDRAW FROM THE ARMS OF HIM WHO HOLDS YOU IN HIS SOUL +5639-40744-0040-177: THIS WAS DONE FOR THE EVENT TOOK PLACE AT A TIME (WHEN->BUT) THE CONSENT OF THE PARTIES WAS SUFFICIENT FOR THE CELEBRATION OF (A->THE) MARRIAGE WITHOUT ANY OF THE PRELIMINARY FORMALITIES WHICH ARE NOW SO PROPERLY REQUIRED +5639-40744-0041-178: NOR WAS (RODOLFO->RUDOLPHAL) LESS SURPRISED THAN THEY AND (THE->A) BETTER TO ASSURE HIMSELF OF SO WONDERFUL A FACT HE BEGGED (LEOCADIA->LOCATIA) TO GIVE HIM SOME TOKEN WHICH SHOULD MAKE PERFECTLY CLEAR TO HIM THAT WHICH INDEED HE DID NOT DOUBT SINCE IT WAS AUTHENTICATED BY HIS PARENTS +5683-32865-0000-2483: YOU KNOW CAPTAIN LAKE +5683-32865-0001-2484: SAID LORD CHELFORD ADDRESSING ME +5683-32865-0002-2485: HE HAD HIS HAND UPON LAKE'S SHOULDER +5683-32865-0003-2486: THEY ARE COUSINS YOU KNOW WE ARE ALL COUSINS +5683-32865-0004-2487: WHATEVER LORD CHELFORD SAID MISS BRANDON RECEIVED IT VERY GRACIOUSLY AND EVEN WITH A MOMENTARY SMILE +5683-32865-0005-2488: BUT HER GREETING TO CAPTAIN (LAKE->LEAK) WAS MORE THAN USUALLY HAUGHTY AND FROZEN AND HER FEATURES I FANCIED PARTICULARLY PROUD AND PALE +5683-32865-0006-2489: AT DINNER LAKE WAS EASY AND AMUSING +5683-32865-0007-2490: (I'M->I AM) GLAD YOU LIKE IT SAYS WYLDER CHUCKLING BENIGNANTLY ON IT OVER HIS SHOULDER +5683-32865-0008-2491: I BELIEVE I HAVE A LITTLE TASTE THAT WAY THOSE ARE ALL REAL YOU KNOW THOSE JEWELS +5683-32865-0009-2492: AND HE PLACED IT IN THAT GENTLEMAN'S FINGERS WHO NOW TOOK HIS TURN AT THE LAMP AND CONTEMPLATED THE LITTLE (PARALLELOGRAM->PARALLELLOGRAM) WITH A GLEAM OF SLY AMUSEMENT +5683-32865-0010-2493: I WAS THINKING IT'S VERY LIKE THE ACE OF HEARTS ANSWERED THE CAPTAIN SOFTLY SMILING ON +5683-32865-0011-2494: WHEREUPON LAKE LAUGHED QUIETLY STILL LOOKING ON THE ACE OF HEARTS WITH HIS SLY EYES +5683-32865-0012-2495: AND (WYLDER LAUGHED->WHILE THEIR LEFT) TOO MORE SUDDENLY AND NOISILY THAN THE (HUMOUR->HUMOR) OF THE JOKE SEEMED QUITE TO CALL FOR AND GLANCED A GRIM LOOK FROM THE CORNERS OF HIS EYES (ON LAKE->UNLAKE) BUT THE GALLANT CAPTAIN DID NOT SEEM TO PERCEIVE IT AND AFTER A FEW SECONDS MORE HE HANDED IT VERY INNOCENTLY BACK TO MISSUS DOROTHY ONLY REMARKING +5683-32865-0013-2496: DO YOU KNOW LAKE OH I REALLY CAN'T TELL BUT HE'LL SOON TIRE OF COUNTRY LIFE +5683-32865-0014-2497: HE'S NOT A MAN FOR COUNTRY QUARTERS +5683-32865-0015-2498: I HAD (A->*) HORRID DREAM ABOUT HIM LAST NIGHT THAT +5683-32865-0016-2499: OH I KNOW THAT'S (LORNE->LORN) BRANDON +5683-32865-0017-2500: ALL THE TIME HE WAS TALKING TO ME HIS ANGRY LITTLE EYES WERE FOLLOWING LAKE +5683-32866-0000-2527: MISS LAKE DECLINED THE CARRIAGE TO NIGHT +5683-32866-0001-2528: AND HE ADDED (SOMETHING->SOME THINGS) STILL LESS COMPLIMENTARY +5683-32866-0002-2529: BUT DON'T THESE VERY WISE THINGS SOMETIMES TURN OUT VERY FOOLISHLY +5683-32866-0003-2530: IN THE MEANTIME I HAD FORMED A NEW IDEA OF HER +5683-32866-0004-2531: BY THIS TIME LORD CHELFORD AND WYLDER RETURNED AND DISGUSTED RATHER WITH MYSELF I RUMINATED ON MY WANT OF (GENERAL SHIP->GENERALSHIP) +5683-32866-0005-2532: AND HE MADE A LITTLE DIP OF HIS CANE TOWARDS BRANDON HALL OVER HIS SHOULDER +5683-32866-0006-2533: YES SO THEY SAID BUT THAT WOULD I THINK HAVE BEEN WORSE +5683-32866-0007-2534: IF A FELLOW'S BEEN A LITTLE BIT WILD (HE'S BEELZEBUB->HE IS BEALES A BUB) AT ONCE +5683-32866-0008-2535: BRACTON'S A VERY GOOD FELLOW I CAN ASSURE YOU +5683-32866-0009-2536: I DON'T KNOW (AND->ONE) CAN'T SAY HOW YOU (FINE->FIND) GENTLEMEN (DEFINE->TO FIND) WICKEDNESS ONLY AS AN OBSCURE FEMALE I SPEAK ACCORDING TO MY LIGHTS AND HE IS GENERALLY THOUGHT THE WICKEDEST MAN IN THIS COUNTY +5683-32866-0010-2537: WELL YOU KNOW RADIE WOMEN LIKE WICKED FELLOWS IT IS CONTRAST I SUPPOSE BUT THEY DO AND I'M SURE FROM WHAT BRACTON HAS SAID TO ME I KNOW HIM INTIMATELY THAT DORCAS LIKES HIM AND I CAN'T CONCEIVE WHY THEY ARE NOT MARRIED +5683-32866-0011-2538: THEIR WALK CONTINUED SILENT FOR THE GREATER PART NEITHER WAS QUITE SATISFIED WITH THE OTHER BUT RACHEL AT LAST SAID +5683-32866-0012-2539: NOW THAT'S IMPOSSIBLE RADIE FOR I REALLY DON'T THINK I ONCE THOUGHT OF HIM ALL THIS EVENING EXCEPT JUST WHILE WE WERE TALKING +5683-32866-0013-2540: THERE WAS A BRIGHT MOONLIGHT BROKEN BY THE SHADOWS OF OVERHANGING BOUGHS AND WITHERED LEAVES AND THE MOTTLED LIGHTS AND SHADOWS GLIDED ODDLY ACROSS HIS PALE FEATURES +5683-32866-0014-2541: DON'T INSULT ME STANLEY BY TALKING AGAIN AS YOU DID THIS MORNING +5683-32866-0015-2542: WHAT I SAY IS ALTOGETHER ON YOUR OWN ACCOUNT +5683-32866-0016-2543: MARK MY WORDS YOU'LL FIND HIM TOO STRONG FOR YOU (AYE->AY) AND TOO DEEP +5683-32866-0017-2544: I AM VERY UNEASY ABOUT IT WHATEVER IT IS I CAN'T HELP IT +5683-32866-0018-2545: TO MY MIND THERE HAS ALWAYS BEEN SOMETHING INEXPRESSIBLY AWFUL IN FAMILY FEUDS +5683-32866-0019-2546: THE MYSTERY OF THEIR ORIGIN THEIR CAPACITY FOR EVOLVING LATENT FACULTIES OF CRIME AND THE (STEADY->STUDY) VITALITY WITH WHICH THEY SURVIVE THE HEARSE AND SPEAK THEIR DEEP (MOUTHED->MOUTH) MALIGNITIES IN EVERY NEW BORN GENERATION HAVE ASSOCIATED THEM SOMEHOW IN MY MIND WITH A SPELL OF LIFE EXCEEDING AND DISTINCT FROM HUMAN AND (A SPECIAL->ESPECIAL) SATANIC ACTION +5683-32866-0020-2547: THE FLOOR MORE THAN ANYTHING ELSE SHOWED THE GREAT AGE OF THE ROOM +5683-32866-0021-2548: MY BED WAS (UNEXCEPTIONABLY->UNEXCEPTIONALLY) COMFORTABLE BUT IN MY THEN MOOD I COULD HAVE WISHED IT A GREAT DEAL MORE MODERN +5683-32866-0022-2549: ITS CURTAINS WERE OF THICK AND FADED TAPESTRY +5683-32866-0023-2550: ALL THE FURNITURE BELONGED TO OTHER TIMES +5683-32866-0024-2551: I SHAN'T TROUBLE YOU ABOUT MY TRAIN OF THOUGHTS OR FANCIES BUT I BEGAN TO FEEL VERY LIKE A GENTLEMAN IN A GHOST STORY WATCHING EXPERIMENTALLY IN A HAUNTED CHAMBER +5683-32866-0025-2552: I DID NOT EVEN TAKE THE PRECAUTION OF SMOKING UP THE CHIMNEY +5683-32866-0026-2553: I BOLDLY LIGHTED MY (CHEROOT->JERUET) +5683-32866-0027-2554: A COLD BRIGHT MOON WAS SHINING WITH CLEAR SHARP LIGHTS AND SHADOWS +5683-32866-0028-2555: THE SOMBRE OLD TREES LIKE GIGANTIC HEARSE PLUMES BLACK AND AWFUL +5683-32866-0029-2556: SOMEHOW I HAD GROWN NERVOUS +5683-32866-0030-2557: A LITTLE BIT OF PLASTER TUMBLED DOWN THE CHIMNEY AND STARTLED ME CONFOUNDEDLY +5683-32879-0000-2501: IT WAS NOT VERY MUCH PAST ELEVEN THAT MORNING WHEN THE PONY CARRIAGE FROM BRANDON DREW UP BEFORE THE LITTLE GARDEN WICKET OF REDMAN'S FARM +5683-32879-0001-2502: (WELL->WHILE) SHE WAS BETTER THOUGH SHE HAD HAD A BAD NIGHT +5683-32879-0002-2503: SO THERE CAME A STEP AND A LITTLE RUSTLING OF FEMININE DRAPERIES THE SMALL DOOR OPENED AND RACHEL ENTERED WITH HER HAND EXTENDED AND A PALE SMILE OF WELCOME +5683-32879-0003-2504: WOMEN CAN HIDE THEIR PAIN BETTER THAN WE MEN AND BEAR IT BETTER TOO EXCEPT WHEN SHAME DROPS FIRE INTO THE DREADFUL CHALICE +5683-32879-0004-2505: BUT POOR RACHEL LAKE HAD MORE THAN THAT STOICAL HYPOCRISY WHICH ENABLES THE TORTURED SPIRITS OF HER SEX TO LIFT A PALE FACE THROUGH THE FLAMES AND SMILE +5683-32879-0005-2506: THIS TRANSIENT SPRING AND LIGHTING UP ARE BEAUTIFUL A GLAMOUR BEGUILING OUR SENSES +5683-32879-0006-2507: THERE WAS SOMETHING OF SWEETNESS AND FONDNESS IN HER TONES AND MANNER WHICH WAS NEW TO RACHEL AND COMFORTING AND SHE RETURNED THE GREETING AS KINDLY AND FELT MORE LIKE HER FORMER SELF +5683-32879-0007-2508: RACHEL'S PALE AND SHARPENED FEATURES AND DILATED EYE STRUCK HER WITH A PAINFUL SURPRISE +5683-32879-0008-2509: YOU HAVE BEEN SO ILL MY POOR RACHEL +5683-32879-0009-2510: ILL AND TROUBLED DEAR TROUBLED IN MIND AND MISERABLY NERVOUS +5683-32879-0010-2511: POOR RACHEL HER NATURE RECOILED FROM DECEIT AND SHE TOLD AT ALL EVENTS AS MUCH OF THE TRUTH AS SHE DARED +5683-32879-0011-2512: SHE SPOKE WITH A SUDDEN ENERGY WHICH PARTOOK (OF->A) FEAR AND PASSION AND FLUSHED HER THIN CHEEK AND MADE HER LANGUID EYES FLASH +5683-32879-0012-2513: THANK YOU RACHEL MY COUSIN RACHEL MY ONLY FRIEND +5683-32879-0013-2514: CHELFORD HAD A NOTE FROM MISTER WYLDER THIS MORNING ANOTHER NOTE HIS COMING DELAYED AND SOMETHING OF HIS HAVING TO SEE SOME PERSON WHO (IS->WAS) ABROAD CONTINUED DORCAS AFTER A LITTLE PAUSE +5683-32879-0014-2515: YES SOMETHING EVERYTHING SAID RACHEL HURRIEDLY LOOKING FROWNINGLY AT A FLOWER WHICH SHE WAS TWIRLING IN HER FINGERS +5683-32879-0015-2516: YES SAID RACHEL +5683-32879-0016-2517: AND THE WAN ORACLE HAVING SPOKEN SHE (SATE->SAT) DOWN IN THE SAME SORT OF ABSTRACTION AGAIN BESIDE DORCAS AND SHE LOOKED FULL IN HER COUSIN'S EYES +5683-32879-0017-2518: OF MARK WYLDER I SAY THIS HIS NAME HAS BEEN FOR YEARS HATEFUL TO ME AND RECENTLY IT HAS BECOME FRIGHTFUL AND YOU WILL PROMISE ME SIMPLY THIS THAT YOU WILL NEVER ASK ME TO SPEAK AGAIN ABOUT HIM +5683-32879-0018-2519: IT IS AN ANTIPATHY AN ANTIPATHY I CANNOT GET OVER DEAR DORCAS YOU MAY THINK IT A MADNESS BUT DON'T BLAME ME +5683-32879-0019-2520: I HAVE VERY FEW TO LOVE ME NOW AND I THOUGHT YOU MIGHT LOVE ME AS I HAVE BEGUN TO LOVE YOU +5683-32879-0020-2521: AND SHE THREW HER ARMS ROUND HER COUSIN'S NECK AND BRAVE RACHEL AT LAST BURST INTO TEARS +5683-32879-0021-2522: DORCAS IN HER STRANGE WAY WAS MOVED +5683-32879-0022-2523: I LIKE YOU STILL RACHEL I'M SURE I'LL ALWAYS LIKE YOU +5683-32879-0023-2524: YOU RESEMBLE ME RACHEL YOU ARE FEARLESS AND INFLEXIBLE AND GENEROUS +5683-32879-0024-2525: YES RACHEL I DO LOVE YOU +5683-32879-0025-2526: THANK YOU DORCAS DEAR +61-70968-0000-2179: HE BEGAN A CONFUSED COMPLAINT AGAINST THE WIZARD WHO HAD VANISHED BEHIND THE CURTAIN ON THE LEFT +61-70968-0001-2180: GIVE NOT SO EARNEST A MIND TO THESE MUMMERIES CHILD +61-70968-0002-2181: A GOLDEN FORTUNE AND A HAPPY LIFE +61-70968-0003-2182: HE WAS LIKE UNTO MY FATHER IN A WAY AND YET WAS NOT MY FATHER +61-70968-0004-2183: ALSO THERE WAS A STRIPLING PAGE WHO TURNED INTO A MAID +61-70968-0005-2184: THIS WAS SO SWEET A LADY SIR AND IN SOME MANNER I DO THINK SHE DIED +61-70968-0006-2185: BUT THEN THE PICTURE WAS GONE AS QUICKLY AS IT CAME +61-70968-0007-2186: SISTER NELL DO YOU HEAR THESE MARVELS +61-70968-0008-2187: TAKE YOUR PLACE AND LET US SEE WHAT THE CRYSTAL CAN SHOW TO YOU +61-70968-0009-2188: LIKE AS NOT YOUNG MASTER THOUGH I AM AN OLD MAN +61-70968-0010-2189: FORTHWITH ALL RAN TO THE OPENING OF THE TENT TO SEE WHAT MIGHT BE AMISS BUT MASTER WILL WHO PEEPED OUT FIRST NEEDED NO MORE THAN ONE GLANCE +61-70968-0011-2190: HE GAVE WAY TO THE OTHERS VERY READILY AND RETREATED UNPERCEIVED BY THE SQUIRE AND MISTRESS FITZOOTH TO THE REAR OF THE TENT +61-70968-0012-2191: CRIES OF (A NOTTINGHAM A->UNNOTTINGHAM ARE) NOTTINGHAM +61-70968-0013-2192: BEFORE THEM FLED THE STROLLER AND HIS THREE SONS CAPLESS AND TERRIFIED +61-70968-0014-2193: WHAT IS THE TUMULT AND RIOTING CRIED OUT THE SQUIRE AUTHORITATIVELY AND HE BLEW TWICE ON (A->THE) SILVER WHISTLE WHICH HUNG AT HIS BELT +61-70968-0015-2194: NAY WE REFUSED THEIR REQUEST MOST POLITELY MOST NOBLE SAID THE LITTLE STROLLER +61-70968-0016-2195: AND THEN THEY BECAME VEXED AND WOULD HAVE SNATCHED YOUR PURSE FROM US +61-70968-0017-2196: I COULD NOT SEE MY BOY (INJURED->INJURE) EXCELLENCE FOR BUT DOING HIS DUTY AS ONE OF CUMBERLAND'S SONS +61-70968-0018-2197: SO I DID PUSH THIS FELLOW +61-70968-0019-2198: IT IS ENOUGH SAID GEORGE GAMEWELL SHARPLY (AND->AS) HE TURNED UPON THE CROWD +61-70968-0020-2199: (SHAME->SHEEM) ON YOU CITIZENS CRIED HE I BLUSH FOR MY FELLOWS OF NOTTINGHAM +61-70968-0021-2200: SURELY WE CAN SUBMIT WITH GOOD GRACE +61-70968-0022-2201: TIS FINE FOR YOU TO TALK OLD MAN ANSWERED THE LEAN SULLEN APPRENTICE +61-70968-0023-2202: BUT I WRESTLED WITH THIS FELLOW AND DO KNOW THAT HE PLAYED UNFAIRLY IN THE SECOND BOUT +61-70968-0024-2203: SPOKE THE SQUIRE LOSING ALL (PATIENCE->PATIENT) AND IT WAS TO YOU THAT I GAVE ANOTHER (PURSE IN->PERSON) CONSOLATION +61-70968-0025-2204: COME TO ME MEN HERE HERE HE RAISED HIS VOICE STILL LOUDER +61-70968-0026-2205: THE STROLLERS TOOK THEIR PART IN IT WITH HEARTY ZEST NOW THAT THEY HAD SOME CHANCE OF BEATING OFF THEIR FOES +61-70968-0027-2206: ROBIN AND THE LITTLE TUMBLER BETWEEN THEM TRIED TO FORCE THE SQUIRE TO STAND BACK AND VERY VALIANTLY DID THESE TWO COMPORT THEMSELVES +61-70968-0028-2207: THE HEAD AND CHIEF OF THE RIOT THE NOTTINGHAM (APPRENTICE->APPRENTICED) WITH CLENCHED FISTS THREATENED MONTFICHET +61-70968-0029-2208: THE SQUIRE HELPED TO THRUST THEM ALL IN AND ENTERED SWIFTLY HIMSELF +61-70968-0030-2209: NOW BE SILENT ON YOUR LIVES HE BEGAN BUT THE CAPTURED APPRENTICE SET UP AN INSTANT SHOUT +61-70968-0031-2210: SILENCE YOU KNAVE CRIED MONTFICHET +61-70968-0032-2211: HE FELT FOR AND FOUND THE WIZARD'S BLACK CLOTH THE SQUIRE WAS QUITE OUT OF BREATH +61-70968-0033-2212: THRUSTING OPEN THE PROPER ENTRANCE OF THE TENT ROBIN SUDDENLY RUSHED FORTH WITH HIS BURDEN WITH A GREAT SHOUT +61-70968-0034-2213: A MONTFICHET A MONTFICHET GAMEWELL TO THE RESCUE +61-70968-0035-2214: TAKING ADVANTAGE OF THIS THE SQUIRE'S FEW MEN REDOUBLED THEIR EFFORTS AND ENCOURAGED BY (ROBIN'S->ROBINS) AND THE LITTLE STROLLER'S CRIES FOUGHT THEIR WAY TO HIM +61-70968-0036-2215: GEORGE MONTFICHET WILL NEVER FORGET THIS DAY +61-70968-0037-2216: WHAT IS YOUR NAME LORDING ASKED THE LITTLE STROLLER PRESENTLY +61-70968-0038-2217: ROBIN FITZOOTH +61-70968-0039-2218: AND MINE IS WILL STUTELEY SHALL WE BE COMRADES +61-70968-0040-2219: RIGHT WILLINGLY FOR BETWEEN US WE HAVE WON THE BATTLE ANSWERED ROBIN +61-70968-0041-2220: I LIKE YOU WILL YOU ARE THE SECOND WILL THAT I HAVE MET AND LIKED WITHIN TWO DAYS IS THERE A SIGN IN THAT +61-70968-0042-2221: (MONTFICHET->MARTFICHET) CALLED OUT FOR ROBIN TO GIVE HIM AN ARM +61-70968-0043-2222: FRIENDS SAID MONTFICHET FAINTLY TO THE WRESTLERS BEAR US ESCORT SO FAR AS THE SHERIFF'S HOUSE +61-70968-0044-2223: IT WILL NOT BE SAFE FOR YOU TO STAY HERE NOW +61-70968-0045-2224: PRAY FOLLOW US WITH MINE AND MY LORD SHERIFF'S MEN +61-70968-0046-2225: (NOTTINGHAM->NODDING HIM) CASTLE WAS REACHED AND ADMITTANCE WAS DEMANDED +61-70968-0047-2226: MASTER MONCEUX THE SHERIFF OF NOTTINGHAM WAS MIGHTILY PUT ABOUT WHEN TOLD OF THE RIOTING +61-70968-0048-2227: AND HENRY MIGHT RETURN TO ENGLAND AT ANY MOMENT +61-70968-0049-2228: HAVE YOUR WILL CHILD IF THE BOY ALSO (WILLS->WILDS) IT MONTFICHET ANSWERED FEELING TOO ILL TO OPPOSE ANYTHING VERY STRONGLY JUST THEN +61-70968-0050-2229: HE MADE AN EFFORT TO HIDE HIS CONDITION FROM THEM ALL AND ROBIN FELT HIS FINGERS TIGHTEN UPON HIS ARM +61-70968-0051-2230: (BEG->BEGGED) ME A ROOM OF THE SHERIFF CHILD QUICKLY +61-70968-0052-2231: BUT WHO IS THIS FELLOW PLUCKING (AT YOUR SLEEVE->IT OR STEVE) +61-70968-0053-2232: HE IS MY ESQUIRE EXCELLENCY RETURNED ROBIN WITH DIGNITY +61-70968-0054-2233: MISTRESS FITZOOTH HAD BEEN CARRIED OFF BY THE SHERIFF'S DAUGHTER AND HER MAIDS AS SOON AS THEY HAD ENTERED THE HOUSE SO THAT ROBIN ALONE HAD THE CARE OF (MONTFICHET->MONT VICHET) +61-70968-0055-2234: ROBIN WAS GLAD WHEN AT LENGTH THEY WERE LEFT TO THEIR OWN DEVICES +61-70968-0056-2235: THE WINE DID CERTAINLY BRING BACK THE COLOR TO THE SQUIRE'S CHEEKS +61-70968-0057-2236: THESE ESCAPADES ARE NOT FOR OLD GAMEWELL LAD HIS DAY HAS COME TO TWILIGHT +61-70968-0058-2237: WILL YOU FORGIVE ME NOW +61-70968-0059-2238: IT WILL BE NO DISAPPOINTMENT TO ME +61-70968-0060-2239: NO THANKS I AM GLAD TO GIVE YOU SUCH EASY HAPPINESS +61-70968-0061-2240: YOU ARE A WORTHY LEECH WILL PRESENTLY WHISPERED ROBIN THE WINE HAS WORKED A MARVEL +61-70968-0062-2241: (AY->I) AND SHOW YOU SOME PRETTY TRICKS +61-70970-0000-2242: YOUNG FITZOOTH HAD BEEN COMMANDED TO HIS MOTHER'S CHAMBER SO SOON AS HE HAD COME OUT FROM HIS CONVERSE WITH THE SQUIRE +61-70970-0001-2243: THERE (BEFELL->BEFEL) AN ANXIOUS INTERVIEW MISTRESS FITZOOTH ARGUING (FOR->FOUR) AND AGAINST THE SQUIRE'S PROJECT IN A BREATH +61-70970-0002-2244: MOST OF ALL ROBIN THOUGHT OF HIS FATHER WHAT WOULD HE COUNSEL +61-70970-0003-2245: IF FOR A WHIM YOU BEGGAR YOURSELF I CANNOT STAY YOU +61-70970-0004-2246: BUT TAKE IT WHILST I LIVE AND (WEAR->WHERE) MONTFICHET'S SHIELD IN THE DAYS WHEN MY EYES CAN BE REJOICED BY SO BRAVE A SIGHT FOR YOU WILL (NE'ER->NEVER) DISGRACE OUR (SCUTCHEON->DUCHEN) I WARRANT ME +61-70970-0005-2247: THE LAD HAD CHECKED HIM THEN +61-70970-0006-2248: NEVER THAT SIR HE HAD SAID +61-70970-0007-2249: HE WAS IN DEEP CONVERSE WITH THE CLERK AND ENTERED THE HALL HOLDING HIM BY THE ARM +61-70970-0008-2250: NOW TO BED BOY +61-70970-0009-2251: TIS LATE AND I GO MYSELF WITHIN A SHORT SPACE +61-70970-0010-2252: DISMISS YOUR SQUIRE ROBIN AND BID ME GOOD (E E N->EVEN) +61-70970-0011-2253: AS ANY IN ENGLAND I WOULD SAY SAID GAMEWELL PROUDLY THAT IS IN HIS DAY +61-70970-0012-2254: YET HE WILL TEACH YOU A FEW TRICKS WHEN MORNING IS COME +61-70970-0013-2255: THERE WAS NO CHANCE TO ALTER HIS SLEEPING ROOM TO ONE NEARER TO GAMEWELL'S CHAMBER +61-70970-0014-2256: PRESENTLY HE CROSSED THE FLOOR OF HIS ROOM WITH DECIDED STEP +61-70970-0015-2257: WILL CRIED HE SOFTLY AND STUTELEY WHO HAD CHOSEN HIS COUCH ACROSS THE DOOR OF HIS YOUNG MASTER'S CHAMBER SPRANG UP AT ONCE IN ANSWER +61-70970-0016-2258: WE WILL GO OUT TOGETHER TO THE BOWER THERE IS A WAY DOWN TO THE COURT FROM MY WINDOW +61-70970-0017-2259: REST AND BE STILL UNTIL I WARN YOU +61-70970-0018-2260: THE HOURS PASSED WEARILY BY AND MOVEMENT COULD YET BE HEARD ABOUT THE HALL +61-70970-0019-2261: AT LAST ALL WAS QUIET AND BLACK IN THE COURTYARD OF GAMEWELL +61-70970-0020-2262: WILL WHISPERED ROBIN OPENING HIS DOOR AS HE SPOKE ARE YOU READY +61-70970-0021-2263: THEY THEN RENEWED THEIR JOURNEY AND UNDER THE BETTER LIGHT MADE A SAFE CROSSING OF THE STABLE ROOFS +61-70970-0022-2264: ROBIN ENTERED THE HUT DRAGGING THE UNWILLING ESQUIRE AFTER HIM +61-70970-0023-2265: BE NOT SO FOOLISH FRIEND SAID FITZOOTH CROSSLY +61-70970-0024-2266: THEY MOVED THEREAFTER CAUTIOUSLY ABOUT THE HUT GROPING BEFORE AND ABOUT THEM TO FIND SOMETHING TO SHOW THAT (*->THE) WARRENTON HAD FULFILLED HIS MISSION +61-70970-0025-2267: THEY WERE UPON THE VERGE OF AN OPEN TRAP IN THE FAR CORNER OF THE HUT AND STUTELEY HAD TRIPPED OVER THE EDGE OF THE REVERSED FLAP MOUTH OF THIS PIT +61-70970-0026-2268: (FITZOOTH'S->FITUTH'S) HAND RESTED AT LAST UPON THE (TOP RUNG->TOPRUNG) OF A LADDER AND SLOWLY THE TRUTH CAME TO HIM +61-70970-0027-2269: ROBIN CAREFULLY DESCENDED THE LADDER AND FOUND HIMSELF SOON UPON FIRM ROCKY GROUND +61-70970-0028-2270: STUTELEY WAS BY HIS SIDE IN A FLASH AND THEN THEY BOTH BEGAN FEELING ABOUT THEM TO ASCERTAIN THE SHAPE AND CHARACTER OF THIS VAULT +61-70970-0029-2271: FROM THE BLACKNESS BEHIND THE LIGHT THEY HEARD A VOICE (WARRENTON'S->WARRENTONS) +61-70970-0030-2272: SAVE ME MASTERS BUT YOU STARTLED ME RARELY +61-70970-0031-2273: CRIED HE WAVING THE (LANTHORN->LANTERN) BEFORE HIM TO MAKE SURE THAT THESE WERE NO GHOSTS IN FRONT OF HIM +61-70970-0032-2274: (ENQUIRED->INQUIRED) ROBIN WITH HIS (SUSPICIONS->SUSPICION) STILL UPON HIM +61-70970-0033-2275: TRULY SUCH A HORSE (SHOULD->WOULD) BE WORTH MUCH IN NOTTINGHAM FAIR +61-70970-0034-2276: NAY NAY LORDING ANSWERED WARRENTON WITH A HALF LAUGH +61-70970-0035-2277: WARRENTON SPOKE THUS WITH SIGNIFICANCE TO SHOW ROBIN THAT HE WAS NOT TO THINK (GEOFFREY'S->JEFFREY'S) CLAIMS TO THE ESTATE WOULD BE PASSED BY +61-70970-0036-2278: ROBIN FITZOOTH SAW THAT HIS DOUBTS OF WARRENTON HAD BEEN UNFAIR AND HE BECAME ASHAMED OF HIMSELF FOR (HARBORING->HARBOURING) THEM +61-70970-0037-2279: HIS TONES RANG PLEASANTLY (ON WARRENTON'S->UNWARRANTON'S) EARS AND FORTHWITH (A->THE) GOOD FELLOWSHIP WAS HERALDED BETWEEN THEM +61-70970-0038-2280: THE OLD SERVANT TOLD HIM QUIETLY AS THEY CREPT BACK TO GAMEWELL THAT THIS (PASSAGE WAY->PASSAGEWAY) LED FROM THE HUT IN THE (PLEASANCE->PLEASANTS) TO SHERWOOD AND THAT (GEOFFREY->JEFFREY) FOR THE TIME WAS HIDING WITH THE OUTLAWS IN THE FOREST +61-70970-0039-2281: HE (IMPLORES->IMPLORS) US TO BE DISCREET AS THE GRAVE IN THIS MATTER FOR IN SOOTH HIS LIFE IS IN THE HOLLOW OF OUR HANDS +61-70970-0040-2282: THEY (REGAINED->REGAIN) THEIR APARTMENT APPARENTLY WITHOUT DISTURBING THE HOUSEHOLD OF (GAMEWELL->GAINWELL) +672-122797-0000-1529: OUT IN THE (WOODS->WOOD) STOOD A NICE LITTLE FIR TREE +672-122797-0001-1530: THE PLACE HE HAD WAS A VERY GOOD ONE THE SUN SHONE ON HIM AS TO FRESH AIR THERE WAS ENOUGH OF THAT AND ROUND HIM GREW MANY LARGE SIZED COMRADES PINES AS WELL AS (FIRS->FURS) +672-122797-0002-1531: HE DID NOT THINK OF THE WARM SUN AND OF THE FRESH AIR HE DID NOT CARE FOR THE LITTLE COTTAGE CHILDREN THAT RAN ABOUT (AND->IN) PRATTLED WHEN THEY WERE IN THE WOODS LOOKING FOR WILD STRAWBERRIES +672-122797-0003-1532: BUT THIS WAS WHAT THE TREE COULD NOT BEAR TO HEAR +672-122797-0004-1533: IN WINTER WHEN THE SNOW LAY GLITTERING ON THE GROUND A HARE WOULD OFTEN COME LEAPING ALONG AND JUMP RIGHT OVER THE LITTLE TREE +672-122797-0005-1534: OH THAT MADE HIM SO ANGRY +672-122797-0006-1535: TO GROW AND GROW TO GET OLDER AND BE TALL THOUGHT THE TREE THAT AFTER ALL IS THE MOST DELIGHTFUL THING IN THE WORLD +672-122797-0007-1536: IN AUTUMN THE (WOOD CUTTERS->WOODCUTTERS) ALWAYS CAME AND FELLED SOME OF THE LARGEST TREES +672-122797-0008-1537: THIS HAPPENED EVERY YEAR AND THE YOUNG FIR TREE THAT HAD NOW GROWN TO A VERY COMELY (SIZE->SIZED) TREMBLED AT THE SIGHT FOR THE MAGNIFICENT GREAT TREES FELL TO THE EARTH WITH NOISE AND CRACKING THE BRANCHES WERE LOPPED OFF AND THE TREES LOOKED LONG AND BARE THEY WERE HARDLY TO BE (RECOGNISED->RECOGNIZED) AND THEN THEY WERE LAID IN CARTS AND THE HORSES DRAGGED THEM OUT OF THE WOOD +672-122797-0009-1538: HAVE YOU NOT MET (THEM ANYWHERE->THE MANYWHERE) +672-122797-0010-1539: REJOICE IN THY GROWTH SAID THE SUNBEAMS +672-122797-0011-1540: AND THEN WHAT HAPPENS THEN +672-122797-0012-1541: I WOULD FAIN KNOW IF I AM DESTINED FOR SO GLORIOUS A CAREER CRIED THE TREE REJOICING +672-122797-0013-1542: I AM NOW TALL AND MY BRANCHES SPREAD LIKE THE OTHERS THAT WERE CARRIED OFF LAST YEAR OH +672-122797-0014-1543: WERE I BUT ALREADY ON THE CART +672-122797-0015-1544: (WERE->WHERE) I IN THE WARM ROOM WITH ALL (THE SPLENDOR->BUT SPLENDOUR) AND MAGNIFICENCE +672-122797-0016-1545: YES (THEN->AND) SOMETHING BETTER SOMETHING STILL GRANDER WILL SURELY FOLLOW OR WHEREFORE SHOULD THEY THUS ORNAMENT ME +672-122797-0017-1546: SOMETHING BETTER (SOMETHING->OR SOME THING) STILL GRANDER MUST FOLLOW BUT WHAT +672-122797-0018-1547: REJOICE IN OUR PRESENCE SAID THE (AIR AND->HEIR IN) THE SUNLIGHT +672-122797-0019-1548: REJOICE IN THY OWN FRESH YOUTH +672-122797-0020-1549: BUT THE TREE DID NOT REJOICE AT ALL HE GREW AND GREW AND WAS GREEN BOTH WINTER AND SUMMER +672-122797-0021-1550: AND TOWARDS CHRISTMAS HE WAS ONE OF THE FIRST THAT WAS CUT DOWN +672-122797-0022-1551: THE AXE STRUCK DEEP INTO THE VERY PITH THE TREE FELL TO THE EARTH WITH A SIGH HE FELT A PANG IT WAS LIKE A SWOON HE COULD NOT THINK OF HAPPINESS FOR HE WAS SORROWFUL AT BEING SEPARATED FROM HIS HOME FROM THE PLACE WHERE HE HAD SPRUNG UP +672-122797-0023-1552: HE WELL KNEW THAT HE SHOULD NEVER SEE HIS DEAR OLD COMRADES THE LITTLE BUSHES AND FLOWERS AROUND HIM (ANYMORE->ANY MORE) PERHAPS NOT EVEN THE BIRDS +672-122797-0024-1553: THE DEPARTURE WAS NOT AT ALL AGREEABLE +672-122797-0025-1554: THE TREE ONLY CAME TO HIMSELF WHEN HE WAS UNLOADED IN A (COURT YARD->COURTYARD) WITH THE OTHER TREES AND HEARD A MAN SAY THAT ONE IS SPLENDID WE DON'T WANT THE OTHERS +672-122797-0026-1555: THERE TOO WERE LARGE EASY CHAIRS SILKEN SOFAS LARGE TABLES FULL OF PICTURE BOOKS AND FULL OF TOYS WORTH HUNDREDS AND HUNDREDS OF CROWNS AT LEAST THE CHILDREN SAID SO +672-122797-0027-1556: THE SERVANTS AS WELL AS THE YOUNG LADIES DECORATED IT +672-122797-0028-1557: THIS EVENING THEY ALL SAID +672-122797-0029-1558: HOW IT WILL SHINE THIS EVENING +672-122797-0030-1559: PERHAPS THE OTHER TREES FROM THE FOREST WILL COME TO LOOK AT ME +672-122797-0031-1560: IT BLAZED UP FAMOUSLY HELP HELP +672-122797-0032-1561: CRIED THE YOUNG LADIES AND THEY QUICKLY PUT OUT THE FIRE +672-122797-0033-1562: A STORY +672-122797-0034-1563: A STORY CRIED THE CHILDREN DRAWING A LITTLE FAT MAN TOWARDS THE TREE +672-122797-0035-1564: BUT I SHALL TELL ONLY ONE STORY +672-122797-0036-1565: HUMPY (DUMPY->DON'T BE) FELL DOWNSTAIRS AND YET HE MARRIED THE PRINCESS +672-122797-0037-1566: THAT'S THE WAY OF THE WORLD +672-122797-0038-1567: THOUGHT THE FIR TREE AND BELIEVED IT ALL BECAUSE THE MAN WHO TOLD THE STORY WAS SO GOOD LOOKING WELL WELL +672-122797-0039-1568: I WON'T TREMBLE TO MORROW THOUGHT THE FIR TREE +672-122797-0040-1569: AND THE WHOLE NIGHT THE TREE STOOD STILL AND IN DEEP THOUGHT +672-122797-0041-1570: IN THE MORNING THE SERVANT AND THE HOUSEMAID CAME IN +672-122797-0042-1571: BUT THEY DRAGGED HIM OUT OF THE ROOM AND UP THE STAIRS INTO THE LOFT AND HERE (IN->IT) A DARK CORNER WHERE NO DAYLIGHT COULD ENTER THEY LEFT HIM +672-122797-0043-1572: WHAT'S THE MEANING OF THIS THOUGHT THE TREE +672-122797-0044-1573: AND HE LEANED AGAINST THE WALL LOST IN REVERIE +672-122797-0045-1574: TIME ENOUGH HAD HE TOO FOR HIS REFLECTIONS FOR DAYS AND NIGHTS PASSED ON AND NOBODY CAME UP AND WHEN AT LAST SOMEBODY DID COME IT WAS ONLY TO PUT SOME GREAT TRUNKS IN A CORNER OUT OF THE WAY +672-122797-0046-1575: TIS NOW WINTER OUT OF DOORS THOUGHT THE TREE +672-122797-0047-1576: HOW KIND MAN IS AFTER ALL +672-122797-0048-1577: IF IT ONLY WERE NOT SO DARK HERE AND SO TERRIBLY LONELY +672-122797-0049-1578: SQUEAK SQUEAK +672-122797-0050-1579: THEY SNUFFED ABOUT THE FIR TREE AND RUSTLED AMONG THE BRANCHES +672-122797-0051-1580: I AM BY NO MEANS OLD SAID THE FIR TREE +672-122797-0052-1581: THERE'S MANY A ONE CONSIDERABLY OLDER THAN I AM +672-122797-0053-1582: THEY WERE SO EXTREMELY CURIOUS +672-122797-0054-1583: I KNOW NO SUCH PLACE SAID THE TREE +672-122797-0055-1584: AND THEN HE TOLD ALL ABOUT HIS YOUTH AND THE LITTLE MICE HAD NEVER HEARD THE LIKE BEFORE AND THEY LISTENED AND SAID +672-122797-0056-1585: SAID THE FIR TREE THINKING OVER WHAT HE HAD HIMSELF RELATED +672-122797-0057-1586: YES IN REALITY THOSE WERE HAPPY TIMES +672-122797-0058-1587: WHO (IS->IT'S) HUMPY (DUMPY->DUMPEY) ASKED THE MICE +672-122797-0059-1588: ONLY THAT ONE ANSWERED THE TREE +672-122797-0060-1589: IT IS A VERY STUPID STORY +672-122797-0061-1590: DON'T YOU KNOW ONE ABOUT BACON AND TALLOW CANDLES CAN'T YOU TELL ANY LARDER STORIES +672-122797-0062-1591: NO SAID THE TREE +672-122797-0063-1592: THEN GOOD BYE SAID THE RATS AND THEY WENT HOME +672-122797-0064-1593: AT LAST THE LITTLE MICE STAYED AWAY ALSO AND THE TREE SIGHED AFTER ALL IT WAS VERY PLEASANT WHEN THE SLEEK LITTLE MICE SAT ROUND ME AND LISTENED TO WHAT I TOLD THEM +672-122797-0065-1594: NOW THAT TOO IS OVER +672-122797-0066-1595: WHY ONE MORNING THERE CAME A QUANTITY OF PEOPLE AND SET TO WORK IN THE LOFT +672-122797-0067-1596: THE TRUNKS WERE MOVED THE TREE WAS PULLED OUT AND THROWN RATHER HARD IT IS TRUE DOWN ON THE FLOOR BUT A MAN DREW HIM (TOWARDS->TOWARD) THE STAIRS WHERE THE DAYLIGHT SHONE +672-122797-0068-1597: BUT IT WAS NOT THE FIR TREE THAT THEY MEANT +672-122797-0069-1598: IT WAS IN A CORNER THAT HE LAY AMONG WEEDS AND NETTLES +672-122797-0070-1599: THE GOLDEN STAR OF TINSEL WAS STILL ON THE TOP OF THE TREE AND GLITTERED IN THE SUNSHINE +672-122797-0071-1600: IN THE (COURT YARD->COURTYARD) SOME OF THE (MERRY->MARRIED) CHILDREN WERE PLAYING WHO HAD DANCED AT CHRISTMAS ROUND THE FIR TREE AND WERE SO GLAD AT THE SIGHT OF HIM +672-122797-0072-1601: AND THE GARDENER'S BOY CHOPPED THE TREE INTO SMALL PIECES THERE WAS A WHOLE HEAP LYING THERE +672-122797-0073-1602: THE WOOD FLAMED UP SPLENDIDLY UNDER THE LARGE BREWING COPPER AND (IT SIGHED->ITS SIDE) SO DEEPLY +672-122797-0074-1603: HOWEVER THAT WAS OVER NOW THE TREE GONE THE STORY AT AN END +6829-68769-0000-1858: KENNETH AND BETH REFRAINED FROM TELLING THE OTHER GIRLS OR UNCLE JOHN OF OLD WILL (ROGERS'S->ROGERS) VISIT BUT THEY GOT MISTER WATSON IN THE LIBRARY AND QUESTIONED HIM CLOSELY ABOUT THE PENALTY FOR FORGING A CHECK +6829-68769-0001-1859: IT WAS A SERIOUS CRIME INDEED MISTER WATSON TOLD THEM AND TOM GATES BADE FAIR TO SERVE A LENGTHY TERM IN (*->THE) STATE'S PRISON AS A CONSEQUENCE OF HIS RASH ACT +6829-68769-0002-1860: I CAN'T SEE IT IN THAT LIGHT SAID THE OLD LAWYER +6829-68769-0003-1861: IT WAS A DELIBERATE THEFT FROM HIS EMPLOYERS TO PROTECT A GIRL HE LOVED +6829-68769-0004-1862: BUT THEY COULD NOT HAVE PROVEN A CASE AGAINST LUCY IF SHE WAS INNOCENT AND ALL THEIR THREATS OF ARRESTING HER WERE PROBABLY (*->A) MERE BLUFF +6829-68769-0005-1863: HE WAS SOFT HEARTED AND IMPETUOUS SAID BETH AND BEING IN LOVE HE DIDN'T STOP TO COUNT THE COST +6829-68769-0006-1864: IF THE PROSECUTION WERE WITHDRAWN AND THE CASE SETTLED WITH THE VICTIM OF THE FORGED CHECK THEN THE YOUNG MAN WOULD BE ALLOWED HIS FREEDOM +6829-68769-0007-1865: BUT UNDER THE CIRCUMSTANCES I DOUBT (IF->OF) SUCH AN ARRANGEMENT COULD BE MADE +6829-68769-0008-1866: (FAIRVIEW WAS->FAIR VIEWS) TWELVE MILES AWAY BUT BY TEN O'CLOCK THEY DREW UP AT THE COUNTY (JAIL->TRAIL) +6829-68769-0009-1867: THEY WERE RECEIVED IN THE LITTLE OFFICE BY A MAN NAMED MARKHAM WHO WAS THE JAILER +6829-68769-0010-1868: WE WISH TO TALK WITH HIM ANSWERED KENNETH TALK +6829-68769-0011-1869: I'M RUNNING FOR REPRESENTATIVE ON THE REPUBLICAN TICKET SAID KENNETH QUIETLY +6829-68769-0012-1870: (OH->I'LL) SAY THAT'S DIFFERENT OBSERVED MARKHAM ALTERING HIS DEMEANOR +6829-68769-0013-1871: (MAY WE SEE->MAYBE SEA) GATES AT ONCE ASKED KENNETH +6829-68769-0014-1872: THEY FOLLOWED THE JAILER ALONG A SUCCESSION OF PASSAGES +6829-68769-0015-1873: SOMETIMES (I'M->ON) THAT (YEARNING FOR A->YEARNIN FUR AS) SMOKE I'M NEARLY CRAZY (AN->AND) I DUNNO WHICH IS (WORST DYIN->WORSE DYING) ONE WAY OR (ANOTHER->THE OTHER) +6829-68769-0016-1874: HE UNLOCKED THE DOOR AND CALLED HERE'S VISITORS TOM +6829-68769-0017-1875: WORSE TOM WORSE (N->THAN) EVER REPLIED THE JAILER GLOOMILY +6829-68769-0018-1876: (MISS DE->MISTER) GRAF SAID KENNETH NOTICING THE BOY'S FACE CRITICALLY AS HE STOOD WHERE THE LIGHT FROM THE PASSAGE FELL UPON IT +6829-68769-0019-1877: (SORRY->SIR) WE HAVEN'T ANY RECEPTION ROOM IN THE JAIL +6829-68769-0020-1878: SIT DOWN PLEASE SAID GATES IN A CHEERFUL AND PLEASANT VOICE THERE'S A BENCH HERE +6829-68769-0021-1879: A FRESH WHOLESOME LOOKING BOY WAS TOM GATES (WITH->WAS) STEADY GRAY EYES AN INTELLIGENT FOREHEAD BUT A SENSITIVE RATHER WEAK MOUTH +6829-68769-0022-1880: WE HAVE HEARD SOMETHING OF YOUR STORY SAID KENNETH AND ARE INTERESTED IN IT +6829-68769-0023-1881: I DIDN'T STOP TO THINK WHETHER IT WAS FOOLISH OR NOT I DID IT AND I'M GLAD I DID (*->IT) +6829-68769-0024-1882: OLD WILL IS A FINE FELLOW BUT POOR AND HELPLESS SINCE MISSUS ROGERS HAD HER ACCIDENT +6829-68769-0025-1883: THEN ROGERS WOULDN'T DO ANYTHING BUT LEAD HER AROUND AND WAIT UPON HER AND THE PLACE WENT TO RACK AND RUIN +6829-68769-0026-1884: HE SPOKE SIMPLY BUT PACED UP AND DOWN THE NARROW CELL IN FRONT OF THEM +6829-68769-0027-1885: WHOSE NAME DID YOU SIGN TO THE CHECK ASKED KENNETH +6829-68769-0028-1886: HE IS SUPPOSED TO SIGN ALL THE CHECKS OF THE CONCERN +6829-68769-0029-1887: IT'S A STOCK COMPANY (AND->IN) RICH +6829-68769-0030-1888: I WAS (BOOKKEEPER->BIT KEEPER) SO IT WAS EASY TO GET A BLANK CHECK AND FORGE THE SIGNATURE +6829-68769-0031-1889: AS REGARDS MY ROBBING THE COMPANY I'LL SAY THAT I SAVED (THEM->HIM) A HEAVY (LOSS->LOST) ONE DAY +6829-68769-0032-1890: I DISCOVERED AND PUT OUT A FIRE THAT WOULD HAVE DESTROYED THE WHOLE PLANT BUT (MARSHALL->MARTIAL) NEVER EVEN THANKED ME +6829-68769-0033-1891: IT WAS BETTER FOR HIM TO THINK THE GIRL UNFEELING THAN TO KNOW THE TRUTH +6829-68769-0034-1892: I'M GOING TO SEE MISTER (MARSHALL->MARSHAL) SAID KENNETH AND DISCOVER WHAT I CAN DO TO ASSIST YOU THANK YOU SIR +6829-68769-0035-1893: IT WON'T BE MUCH BUT I'M GRATEFUL TO FIND A FRIEND +6829-68769-0036-1894: THEY LEFT HIM THEN FOR THE JAILER ARRIVED TO UNLOCK THE DOOR AND ESCORT THEM TO THE OFFICE +6829-68769-0037-1895: I'VE SEEN LOTS OF THAT KIND IN MY DAY +6829-68769-0038-1896: AND IT RUINS A MAN'S DISPOSITION +6829-68769-0039-1897: HE LOOKED UP RATHER UNGRACIOUSLY BUT MOTIONED THEM TO BE SEATED +6829-68769-0040-1898: SOME GIRL HAS BEEN (*->IN) HERE TWICE TO INTERVIEW MY MEN AND I HAVE REFUSED TO ADMIT HER +6829-68769-0041-1899: I'M NOT ELECTIONEERING JUST NOW +6829-68769-0042-1900: OH WELL SIR WHAT ABOUT (HIM->EM) +6829-68769-0043-1901: AND HE DESERVES A TERM IN STATE'S PRISON +6829-68769-0044-1902: IT HAS COST ME TWICE SIXTY DOLLARS IN ANNOYANCE +6829-68769-0045-1903: I'LL PAY ALL THE (COSTS->COST) BESIDES +6829-68769-0046-1904: YOU'RE FOOLISH WHY SHOULD YOU DO ALL THIS +6829-68769-0047-1905: I HAVE MY OWN REASONS MISTER MARSHALL +6829-68769-0048-1906: GIVE ME A CHECK FOR A HUNDRED AND FIFTY AND I'LL TURN OVER TO YOU THE (FORGED->FORCH) CHECK AND (QUASH->CRASH) FURTHER PROCEEDINGS +6829-68769-0049-1907: HE DETESTED THE GRASPING DISPOSITION THAT WOULD ENDEAVOR TO TAKE ADVANTAGE OF HIS EVIDENT DESIRE TO HELP YOUNG GATES +6829-68769-0050-1908: BETH UNEASY AT (HIS->A) SILENCE NUDGED HIM +6829-68769-0051-1909: THERE WAS A GRIM SMILE OF AMUSEMENT ON HIS SHREWD FACE +6829-68769-0052-1910: HE MIGHT HAVE HAD THAT FORGED CHECK FOR THE FACE OF IT IF HE'D BEEN SHARP +6829-68769-0053-1911: AND TO THINK WE CAN SAVE ALL THAT MISERY AND DESPAIR BY THE PAYMENT OF A HUNDRED AND FIFTY DOLLARS +6829-68771-0000-1912: SO TO THE SURPRISE OF THE DEMOCRATIC COMMITTEE AND ALL HIS FRIENDS MISTER HOPKINS ANNOUNCED THAT HE WOULD OPPOSE (FORBES'S->FORCE) AGGRESSIVE CAMPAIGN WITH AN EQUAL AGGRESSIVENESS AND SPEND AS MANY DOLLARS IN DOING SO AS MIGHT BE NECESSARY +6829-68771-0001-1913: ONE OF MISTER (HOPKINS'S->HOPKINS) FIRST TASKS AFTER CALLING HIS FAITHFUL (HENCHMEN->HENCHMAN) AROUND HIM WAS TO MAKE A CAREFUL (CANVASS->CANVAS) OF THE VOTERS OF HIS DISTRICT TO SEE WHAT WAS STILL TO BE ACCOMPLISHED +6829-68771-0002-1914: THE WEAK (KNEED->NEED) CONTINGENCY MUST BE STRENGTHENED AND FORTIFIED AND A COUPLE OF HUNDRED VOTES IN ONE WAY OR (ANOTHER->THE OTHER) SECURED FROM THE OPPOSITION +6829-68771-0003-1915: THE DEMOCRATIC COMMITTEE FIGURED OUT A WAY TO DO THIS +6829-68771-0004-1916: UNDER ORDINARY CONDITIONS REYNOLDS WAS SURE TO BE ELECTED BUT THE COMMITTEE PROPOSED TO SACRIFICE HIM IN ORDER TO ELECT HOPKINS +6829-68771-0005-1917: THE ONLY THING NECESSARY WAS TO FIX SETH REYNOLDS AND THIS HOPKINS ARRANGED PERSONALLY +6829-68771-0006-1918: AND THIS WAS WHY KENNETH AND BETH DISCOVERED HIM CONVERSING WITH THE YOUNG WOMAN IN THE BUGGY +6829-68771-0007-1919: THE DESCRIPTION SHE GAVE OF THE COMING RECEPTION TO THE (WOMAN'S->WOMEN'S) POLITICAL LEAGUE WAS SO HUMOROUS AND DIVERTING THAT THEY WERE BOTH LAUGHING HEARTILY OVER THE THING WHEN THE YOUNG PEOPLE PASSED THEM AND THUS MISTER HOPKINS FAILED TO NOTICE WHO THE (OCCUPANTS->OCCUPANT) OF THE OTHER VEHICLE WERE +6829-68771-0008-1920: THESE WOMEN WERE FLATTERED BY THE ATTENTION OF THE YOUNG LADY AND HAD PROMISED TO ASSIST IN ELECTING MISTER FORBES +6829-68771-0009-1921: LOUISE HOPED FOR EXCELLENT RESULTS FROM THIS ORGANIZATION AND WISHED THE ENTERTAINMENT TO BE SO EFFECTIVE IN WINNING THEIR GOOD WILL THAT THEY WOULD WORK EARNESTLY FOR THE CAUSE IN WHICH THEY WERE ENLISTED +6829-68771-0010-1922: THE (FAIRVIEW->FAIR VIEW) BAND WAS ENGAGED TO DISCOURSE AS MUCH HARMONY AS IT COULD PRODUCE AND THE RESOURCES OF THE GREAT HOUSE WERE TAXED TO ENTERTAIN THE GUESTS +6829-68771-0011-1923: TABLES WERE SPREAD ON THE LAWN AND A DAINTY BUT SUBSTANTIAL REPAST WAS TO BE SERVED +6829-68771-0012-1924: THIS WAS THE FIRST OCCASION WITHIN A GENERATION WHEN SUCH AN ENTERTAINMENT HAD BEEN GIVEN AT ELMHURST AND THE ONLY (ONE->WHEN) WITHIN THE MEMORY OF MAN (WHERE->WERE) THE NEIGHBORS AND COUNTRY PEOPLE HAD BEEN (*->THE) INVITED GUESTS +6829-68771-0013-1925: THE (ATTENDANCE->ATTENDANTS) WAS UNEXPECTEDLY LARGE AND THE GIRLS WERE DELIGHTED FORESEEING GREAT SUCCESS FOR THEIR (FETE->FIGHT) +6829-68771-0014-1926: WE OUGHT TO HAVE MORE (ATTENDANTS->ATTENDANCE) BETH SAID LOUISE APPROACHING HER COUSIN +6829-68771-0015-1927: WON'T YOU RUN INTO THE HOUSE AND SEE IF MARTHA CAN'T SPARE ONE OR TWO MORE (MAIDS->MATES) +6829-68771-0016-1928: SHE WAS VERY FOND OF THE YOUNG LADIES WHOM SHE HAD KNOWN WHEN AUNT JANE WAS (THE->THEIR) MISTRESS HERE AND BETH WAS HER (ESPECIAL->SPECIAL) FAVORITE +6829-68771-0017-1929: THE HOUSEKEEPER LED THE WAY (AND->IN) BETH FOLLOWED +6829-68771-0018-1930: FOR A MOMENT BETH STOOD STARING WHILE THE NEW MAID REGARDED HER WITH COMPOSURE AND (A->OF) SLIGHT SMILE UPON HER BEAUTIFUL FACE +6829-68771-0019-1931: SHE WAS DRESSED IN THE REGULATION COSTUME OF THE MAIDS AT ELMHURST A (PLAIN->PLAYING) BLACK GOWN WITH (*->A) WHITE APRON AND CAP +6829-68771-0020-1932: THEN SHE GAVE A LITTLE LAUGH AND REPLIED NO MISS BETH I'M ELIZABETH (PARSONS->PARSON'S) +6829-68771-0021-1933: BUT IT CAN'T BE PROTESTED THE GIRL +6829-68771-0022-1934: I ATTEND TO THE HOUSEHOLD MENDING YOU KNOW AND CARE FOR THE LINEN +6829-68771-0023-1935: YOU SPEAK LIKE AN EDUCATED PERSON SAID BETH WONDERINGLY WHERE IS YOUR HOME +6829-68771-0024-1936: FOR THE FIRST TIME THE MAID SEEMED A LITTLE CONFUSED AND HER GAZE WANDERED FROM THE FACE OF HER VISITOR +6829-68771-0025-1937: SHE SAT DOWN IN A ROCKING CHAIR AND CLASPING HER HANDS IN HER LAP (ROCKED->ROCK) SLOWLY BACK AND FORTH I'M SORRY SAID BETH +6829-68771-0026-1938: ELIZA (PARSONS->PARSON) SHOOK HER HEAD +6829-68771-0027-1939: (THEY->FATE) THEY EXCITE ME IN SOME WAY AND I I CAN'T BEAR THEM YOU MUST EXCUSE ME +6829-68771-0028-1940: SHE EVEN SEEMED MILDLY AMUSED AT THE ATTENTION SHE ATTRACTED +6829-68771-0029-1941: BETH WAS A BEAUTIFUL GIRL THE HANDSOMEST OF THE THREE COUSINS BY FAR YET ELIZA SURPASSED HER (IN->A) NATURAL CHARM AND SEEMED WELL AWARE OF THE FACT +6829-68771-0030-1942: HER MANNER WAS NEITHER INDEPENDENT NOR ASSERTIVE BUT RATHER ONE OF WELL BRED COMPOSURE AND CALM RELIANCE +6829-68771-0031-1943: HER EYES WANDERED TO THE MAID'S HANDS +6829-68771-0032-1944: HOWEVER HER FEATURES (AND FORM->INFORM) MIGHT REPRESS ANY EVIDENCE OF NERVOUSNESS THESE HANDS TOLD A DIFFERENT STORY +6829-68771-0033-1945: SHE ROSE QUICKLY TO HER FEET WITH AN IMPETUOUS GESTURE THAT MADE HER VISITOR CATCH HER BREATH +6829-68771-0034-1946: I WISH I KNEW MYSELF SHE CRIED FIERCELY +6829-68771-0035-1947: WILL YOU LEAVE ME ALONE IN MY OWN ROOM OR MUST I GO AWAY TO ESCAPE YOU +6829-68771-0036-1948: ELIZA CLOSED THE DOOR BEHIND HER WITH A DECIDED SLAM AND A KEY CLICKED IN THE LOCK +6930-75918-0000-0: CONCORD RETURNED TO ITS PLACE AMIDST THE TENTS +6930-75918-0001-1: THE ENGLISH (FORWARDED->FOOTED) TO THE FRENCH BASKETS OF FLOWERS OF WHICH THEY HAD MADE A PLENTIFUL PROVISION TO GREET THE ARRIVAL OF THE YOUNG PRINCESS THE FRENCH IN RETURN INVITED THE ENGLISH TO A SUPPER WHICH WAS TO BE GIVEN THE NEXT DAY +6930-75918-0002-2: CONGRATULATIONS WERE POURED IN UPON THE PRINCESS EVERYWHERE DURING HER JOURNEY +6930-75918-0003-3: FROM THE RESPECT PAID HER ON ALL SIDES SHE SEEMED LIKE A QUEEN AND FROM THE ADORATION WITH WHICH SHE WAS TREATED BY TWO OR THREE SHE APPEARED AN OBJECT OF WORSHIP THE QUEEN MOTHER GAVE THE FRENCH THE MOST AFFECTIONATE RECEPTION FRANCE WAS HER NATIVE COUNTRY AND SHE HAD SUFFERED TOO MUCH UNHAPPINESS IN ENGLAND FOR ENGLAND TO HAVE MADE HER FORGET FRANCE +6930-75918-0004-4: SHE TAUGHT HER DAUGHTER THEN BY HER OWN AFFECTION FOR IT THAT LOVE FOR A COUNTRY WHERE THEY HAD BOTH BEEN HOSPITABLY RECEIVED AND WHERE A BRILLIANT FUTURE OPENED (BEFORE->FOR) THEM +6930-75918-0005-5: THE COUNT HAD THROWN HIMSELF BACK ON HIS SEAT LEANING HIS SHOULDERS AGAINST THE PARTITION OF THE TENT AND REMAINED THUS HIS FACE BURIED IN HIS HANDS WITH HEAVING CHEST AND RESTLESS LIMBS +6930-75918-0006-6: THIS HAS INDEED BEEN A HARASSING DAY CONTINUED THE YOUNG MAN HIS EYES FIXED UPON HIS FRIEND +6930-75918-0007-7: YOU WILL BE FRANK WITH ME I ALWAYS AM +6930-75918-0008-8: CAN YOU IMAGINE (WHY->MY) BUCKINGHAM HAS BEEN SO VIOLENT I SUSPECT +6930-75918-0009-9: IT IS YOU WHO ARE MISTAKEN RAOUL I HAVE READ HIS DISTRESS IN HIS EYES IN HIS EVERY GESTURE AND ACTION THE WHOLE DAY +6930-75918-0010-10: I CAN PERCEIVE LOVE CLEARLY ENOUGH +6930-75918-0011-11: I AM CONVINCED OF WHAT I SAY SAID THE COUNT +6930-75918-0012-12: IT IS ANNOYANCE THEN +6930-75918-0013-13: IN THOSE VERY TERMS I EVEN ADDED MORE +6930-75918-0014-14: BUT CONTINUED RAOUL NOT INTERRUPTED BY THIS MOVEMENT OF HIS FRIEND HEAVEN BE PRAISED THE FRENCH WHO ARE PRONOUNCED TO BE THOUGHTLESS AND INDISCREET RECKLESS EVEN ARE CAPABLE OF BRINGING A CALM AND SOUND JUDGMENT TO BEAR ON MATTERS OF SUCH HIGH IMPORTANCE +6930-75918-0015-15: THUS IT IS THAT THE HONOR OF THREE IS SAVED OUR (COUNTRY'S->COUNTRY) OUR (MASTER'S->MASTERS) AND OUR OWN +6930-75918-0016-16: YES I NEED REPOSE MANY THINGS HAVE AGITATED ME TO DAY BOTH IN MIND AND BODY WHEN YOU RETURN TO MORROW I SHALL NO LONGER BE THE SAME MAN +6930-75918-0017-17: BUT IN THIS FRIENDLY PRESSURE RAOUL COULD DETECT THE NERVOUS AGITATION OF A GREAT INTERNAL CONFLICT +6930-75918-0018-18: THE NIGHT WAS CLEAR STARLIT AND SPLENDID THE TEMPEST HAD PASSED AWAY AND THE SWEET INFLUENCES OF THE EVENING HAD RESTORED LIFE PEACE AND SECURITY EVERYWHERE +6930-75918-0019-19: UPON THE LARGE SQUARE IN FRONT OF THE HOTEL THE SHADOWS OF THE TENTS INTERSECTED BY THE GOLDEN MOONBEAMS FORMED AS IT WERE A HUGE MOSAIC OF JET AND YELLOW FLAGSTONES +6930-75918-0020-20: (BRAGELONNE->BRAGGLIN) WATCHED FOR SOME TIME THE CONDUCT OF THE TWO LOVERS LISTENED TO THE LOUD AND UNCIVIL SLUMBERS OF MANICAMP WHO SNORED AS IMPERIOUSLY AS THOUGH HE WAS WEARING HIS BLUE AND GOLD INSTEAD OF HIS VIOLET SUIT +6930-76324-0000-21: GOLIATH MAKES ANOTHER DISCOVERY +6930-76324-0001-22: (THEY->THERE) WERE CERTAINLY NO (NEARER->NEAR) THE SOLUTION OF THEIR PROBLEM +6930-76324-0002-23: THE POOR LITTLE THINGS CRIED CYNTHIA THINK OF THEM HAVING BEEN TURNED TO THE WALL ALL THESE YEARS +6930-76324-0003-24: NOW WHAT (WAS->IS) THE SENSE OF IT (TWO->TOO) INNOCENT BABIES LIKE THAT +6930-76324-0004-25: BUT JOYCE HAD NOT BEEN LISTENING ALL AT ONCE SHE PUT DOWN HER CANDLE ON THE TABLE AND FACED HER COMPANION +6930-76324-0005-26: THE TWIN BROTHER DID SOMETHING SHE DIDN'T LIKE AND SHE TURNED HIS PICTURE TO THE WALL +6930-76324-0006-27: HERS HAPPENED TO BE (IN->ON) THE SAME FRAME TOO BUT SHE EVIDENTLY DIDN'T CARE ABOUT (THAT->IT) +6930-76324-0007-28: NOW WHAT HAVE YOU TO SAY CYNTHIA SPRAGUE +6930-76324-0008-29: I THOUGHT WE WERE STUMPED AGAIN WHEN I FIRST SAW THAT PICTURE BUT (IT'S BEEN->IT SPIN) OF SOME USE AFTER ALL +6930-76324-0009-30: DO YOU SUPPOSE THE MINIATURE WAS A COPY OF THE SAME THING +6930-76324-0010-31: (WHAT->ONE) IN THE WORLD IS (THAT->IT) QUERIED JOYCE +6930-76324-0011-32: (THEY->MAY) WORRY ME TERRIBLY (AND BESIDES->EMBICIDES) I'D LIKE TO SEE WHAT THIS LOVELY FURNITURE LOOKS LIKE WITHOUT SUCH QUANTITIES OF DUST ALL OVER IT GOOD SCHEME (CYN->SIN) +6930-76324-0012-33: (WE'LL->WILL) COME (IN->AND) HERE THIS AFTERNOON WITH OLD CLOTHES ON AND (HAVE->HALF) A REGULAR HOUSE CLEANING +6930-76324-0013-34: (IT->YOU) CAN'T HURT ANYTHING I'M SURE FOR WE WON'T DISTURB THINGS AT ALL +6930-76324-0014-35: THIS THOUGHT HOWEVER DID NOT ENTER THE HEADS OF THE ENTHUSIASTIC PAIR +6930-76324-0015-36: SMUGGLING THE HOUSE CLEANING (PARAPHERNALIA->PAIR FERNALIA) INTO THE CELLAR WINDOW UNOBSERVED THAT AFTERNOON PROVED NO EASY TASK FOR CYNTHIA HAD ADDED A WHISK BROOM AND DUST PAN TO THE OUTFIT +6930-76324-0016-37: THE LURE PROVED TOO MUCH FOR HIM AND HE CAME SPORTING AFTER IT AS (FRISKILY->FRISKLY) AS A YOUNG KITTEN MUCH TO CYNTHIA'S DELIGHT WHEN SHE CAUGHT SIGHT OF HIM +6930-76324-0017-38: OH LET HIM COME ALONG SHE URGED I DO LOVE TO SEE HIM ABOUT THAT OLD HOUSE +6930-76324-0018-39: HE MAKES IT SORT OF (COZIER->COSIER) +6930-76324-0019-40: NOW (LET'S->ITS) DUST THE FURNITURE AND PICTURES +6930-76324-0020-41: YET LITTLE AS IT WAS IT HAD ALREADY MADE A VAST DIFFERENCE IN THE ASPECT OF THE ROOM +6930-76324-0021-42: SURFACE (DUST->DUS) AT LEAST HAD BEEN REMOVED AND THE FINE OLD FURNITURE GAVE A HINT OF ITS REAL ELEGANCE AND POLISH +6930-76324-0022-43: THEN SHE SUDDENLY REMARKED +6930-76324-0023-44: AND MY POCKET MONEY IS GETTING LOW AGAIN AND YOU HAVEN'T ANY LEFT AS USUAL +6930-76324-0024-45: THEY SAY ILLUMINATION BY CANDLE LIGHT IS THE PRETTIEST IN THE WORLD +6930-76324-0025-46: WHY (IT'S->IT) GOLIATH AS USUAL THEY BOTH CRIED PEERING IN +6930-76324-0026-47: ISN'T HE THE GREATEST FOR GETTING INTO ODD CORNERS +6930-76324-0027-48: FORGETTING ALL THEIR WEARINESS THEY SEIZED THEIR CANDLES AND SCURRIED THROUGH THE HOUSE FINDING (AN->ON) OCCASIONAL PAPER TUCKED AWAY IN SOME ODD CORNER +6930-76324-0028-49: WELL I'M CONVINCED THAT THE BOARDED UP HOUSE MYSTERY HAPPENED NOT EARLIER THAN APRIL SIXTEENTH EIGHTEEN SIXTY ONE AND PROBABLY NOT MUCH LATER +6930-81414-0000-50: NO WORDS WERE SPOKEN NO LANGUAGE WAS UTTERED SAVE THAT OF WAILING AND HISSING AND THAT SOMEHOW WAS INDISTINCT AS IF IT EXISTED IN FANCY AND NOT IN REALITY +6930-81414-0001-51: I HEARD A NOISE BEHIND I TURNED AND SAW (KAFFAR->KAFFIR) HIS BLACK EYES SHINING WHILE IN HIS HAND HE HELD A GLEAMING KNIFE HE LIFTED IT ABOVE HIS HEAD AS IF TO STRIKE BUT I HAD THE STRENGTH OF TEN MEN AND I HURLED HIM FROM ME +6930-81414-0002-52: ONWARD SAID A DISTANT VOICE +6930-81414-0003-53: NO SOUND BROKE THE STILLNESS OF THE NIGHT +6930-81414-0004-54: THE STORY OF ITS EVIL INFLUENCE CAME BACK TO ME AND IN MY BEWILDERED CONDITION I WONDERED WHETHER THERE WAS NOT SOME TRUTH IN WHAT HAD BEEN SAID +6930-81414-0005-55: WHAT WAS THAT +6930-81414-0006-56: WHAT THEN A HUMAN HAND LARGE AND (SHAPELY->SHABBY) APPEARED DISTINCTLY ON THE SURFACE OF THE POND +6930-81414-0007-57: NOTHING MORE NOT EVEN THE WRIST TO WHICH IT MIGHT BE ATTACHED +6930-81414-0008-58: IT DID NOT BECKON OR INDEED MOVE AT ALL IT WAS AS STILL AS THE HAND OF DEATH +6930-81414-0009-59: I AWOKE TO CONSCIOUSNESS FIGHTING AT FIRST IT SEEMED AS IF I WAS FIGHTING WITH (A->THE) PHANTOM BUT GRADUALLY MY OPPONENT BECAME MORE REAL TO ME IT WAS (KAFFAR->KAFFIR) +6930-81414-0010-60: A SOUND OF VOICES A FLASH OF LIGHT +6930-81414-0011-61: A FEELING OF FREEDOM AND I WAS AWAKE WHERE +6930-81414-0012-62: SAID ANOTHER VOICE WHICH I RECOGNIZED AS VOLTAIRE'S (KAFFAR->KAFFIR) +6930-81414-0013-63: I HAD SCARCELY KNOWN (WHAT->WHEN) I HAD BEEN SAYING OR DOING UP TO THIS TIME BUT AS HE SPOKE I LOOKED AT MY HAND +6930-81414-0014-64: IN THE LIGHT OF THE MOON I SAW A KNIFE RED WITH BLOOD AND MY HAND TOO WAS ALSO (DISCOLOURED->DISCOLORED) +6930-81414-0015-65: I DO NOT KNOW I AM DAZED BEWILDERED +6930-81414-0016-66: BUT THAT IS (KAFFAR'S->KAFFIR'S) KNIFE +6930-81414-0017-67: I KNOW HE HAD IT THIS VERY EVENING +6930-81414-0018-68: I (REMEMBER->REMEMBERED) SAYING HAVE WE BEEN TOGETHER +6930-81414-0019-69: (VOLTAIRE->OLD CHAIR) PICKED UP SOMETHING FROM THE GROUND AND LOOKED AT IT +6930-81414-0020-70: I SAY YOU DO KNOW WHAT THIS MEANS AND YOU MUST TELL US +6930-81414-0021-71: A TERRIBLE THOUGHT FLASHED INTO MY MIND +6930-81414-0022-72: I HAD AGAIN BEEN ACTING UNDER THE INFLUENCE OF THIS MAN'S POWER +6930-81414-0023-73: PERCHANCE (TOO KAFFAR'S->TO KAFFIR'S) DEATH MIGHT SERVE HIM IN GOOD STEAD +6930-81414-0024-74: MY TONGUE REFUSED TO ARTICULATE MY POWER OF SPEECH (LEFT->LAUGHED) ME +6930-81414-0025-75: MY POSITION WAS TOO TERRIBLE +6930-81414-0026-76: MY OVERWROUGHT NERVES YIELDED AT LAST +6930-81414-0027-77: FOR SOME TIME AFTER THAT I REMEMBERED NOTHING DISTINCTLY +7021-79730-0000-1399: THE THREE MODES OF MANAGEMENT +7021-79730-0001-1400: TO SUPPOSE THAT THE OBJECT OF THIS WORK IS TO AID IN (EFFECTING->AFFECTING) SUCH A SUBSTITUTION AS THAT IS ENTIRELY TO MISTAKE ITS NATURE AND DESIGN +7021-79730-0002-1401: BY REASON AND AFFECTION +7021-79730-0003-1402: AS THE (CHAISE->CHASE) DRIVES AWAY MARY STANDS BEWILDERED AND PERPLEXED ON THE (DOOR STEP->DOORSTEP) HER MIND IN A TUMULT OF EXCITEMENT IN WHICH HATRED OF THE DOCTOR DISTRUST AND SUSPICION OF HER MOTHER DISAPPOINTMENT VEXATION AND ILL (HUMOR->HUMOUR) SURGE AND SWELL AMONG THOSE DELICATE ORGANIZATIONS ON WHICH THE STRUCTURE AND DEVELOPMENT OF THE SOUL SO CLOSELY DEPEND DOING PERHAPS AN IRREPARABLE INJURY +7021-79730-0004-1403: THE MOTHER AS SOON AS THE (CHAISE->CHASE) IS SO FAR TURNED THAT MARY CAN NO LONGER WATCH THE EXPRESSION OF HER COUNTENANCE GOES AWAY FROM THE DOOR WITH A SMILE OF COMPLACENCY AND SATISFACTION (UPON->ON) HER FACE AT THE INGENUITY AND SUCCESS OF HER LITTLE ARTIFICE +7021-79730-0005-1404: SO YOU WILL BE A GOOD GIRL I KNOW AND NOT MAKE ANY TROUBLE BUT WILL STAY AT HOME CONTENTEDLY WON'T YOU +7021-79730-0006-1405: THE MOTHER IN MANAGING THE CASE IN THIS WAY (RELIES->REALIZE) PARTLY ON CONVINCING THE REASON OF THE CHILD AND PARTLY ON AN APPEAL TO HER AFFECTION +7021-79730-0007-1406: IF YOU SHOULD NOT BE A GOOD GIRL BUT SHOULD SHOW SIGNS OF MAKING US ANY TROUBLE I SHALL HAVE TO SEND YOU OUT SOMEWHERE TO THE BACK PART OF THE HOUSE UNTIL WE ARE GONE +7021-79730-0008-1407: BUT THIS LAST SUPPOSITION IS ALMOST ALWAYS UNNECESSARY FOR IF MARY HAS BEEN HABITUALLY MANAGED ON THIS PRINCIPLE SHE WILL NOT MAKE ANY TROUBLE +7021-79730-0009-1408: IT IS INDEED TRUE THAT THE IMPORTANCE OF TACT AND SKILL IN THE TRAINING OF THE YOUNG AND OF CULTIVATING THEIR REASON AND SECURING THEIR AFFECTION (CAN NOT->CANNOT) BE OVERRATED +7021-79740-0000-1384: TO SUCH PERSONS THESE INDIRECT MODES OF TRAINING CHILDREN IN HABITS OF SUBORDINATION TO THEIR WILL OR RATHER OF YIELDING TO THEIR INFLUENCE ARE SPECIALLY USEFUL +7021-79740-0001-1385: DELLA HAD A YOUNG SISTER NAMED MARIA AND A COUSIN WHOSE NAME WAS JANE +7021-79740-0002-1386: NOW (DELIA->GELIA) CONTRIVED TO OBTAIN A GREAT INFLUENCE AND (ASCENDENCY->ASCENDANCY) OVER THE MINDS OF THE CHILDREN BY MEANS OF THESE DOLLS +7021-79740-0003-1387: TO GIVE AN IDEA OF THESE CONVERSATIONS I WILL REPORT ONE OF THEM IN FULL +7021-79740-0004-1388: YOU HAVE COME (ANDELLA ANDELLA->AND DELLA AND DELLA) WAS THE NAME OF (JANE'S DOLL->JANE STALL) TO MAKE ROSALIE A VISIT +7021-79740-0005-1389: I AM VERY GLAD +7021-79740-0006-1390: I EXPECT YOU HAVE BEEN A VERY GOOD GIRL ANDELLA SINCE YOU WERE HERE LAST +7021-79740-0007-1391: THEN TURNING TO JANE SHE ASKED IN A SOMEWHAT ALTERED TONE HAS SHE BEEN A GOOD GIRL JANE +7021-79740-0008-1392: FOR INSTANCE ONE DAY THE CHILDREN HAD BEEN PLAYING UPON THE PIAZZA WITH BLOCKS AND OTHER PLAYTHINGS AND FINALLY HAD GONE INTO THE HOUSE LEAVING ALL THE THINGS ON THE FLOOR OF THE PIAZZA INSTEAD OF PUTTING THEM AWAY IN THEIR PLACES AS THEY OUGHT TO HAVE DONE +7021-79740-0009-1393: THEY WERE NOW PLAYING WITH THEIR DOLLS IN THE (PARLOR->PARLOUR) +7021-79740-0010-1394: (DELIA->DELHIA) CAME TO THE (PARLOR->PARLOUR) AND WITH AN AIR OF GREAT MYSTERY BECKONED THE CHILDREN ASIDE AND SAID TO THEM IN A WHISPER LEAVE (ANDELLA->AND ELLA) AND ROSALIE HERE AND DON'T SAY A WORD TO THEM +7021-79740-0011-1395: SO SAYING SHE LED THE WAY ON TIPTOE FOLLOWED BY THE CHILDREN OUT OF THE ROOM AND ROUND BY A CIRCUITOUS ROUTE TO THE PIAZZA THERE +7021-79740-0012-1396: SAID SHE POINTING TO THE PLAYTHINGS SEE +7021-79740-0013-1397: PUT THESE PLAYTHINGS ALL AWAY QUICK AND CAREFULLY AND WE WILL NOT LET THEM KNOW (ANY THING->ANYTHING) ABOUT YOUR LEAVING THEM OUT +7021-79740-0014-1398: AND THIS METHOD OF TREATING THE CASE WAS MUCH MORE EFFECTUAL IN MAKING THEM DISPOSED TO AVOID COMMITTING A SIMILAR FAULT ANOTHER TIME THAN ANY DIRECT REBUKES OR EXPRESSIONS OF DISPLEASURE ADDRESSED PERSONALLY TO THEM WOULD HAVE BEEN +7021-79759-0000-1378: NATURE OF THE EFFECT PRODUCED BY EARLY IMPRESSIONS +7021-79759-0001-1379: THAT IS COMPARATIVELY NOTHING +7021-79759-0002-1380: THEY ARE CHIEFLY FORMED FROM COMBINATIONS OF THE IMPRESSIONS MADE IN CHILDHOOD +7021-79759-0003-1381: VAST IMPORTANCE AND INFLUENCE OF THIS MENTAL FURNISHING +7021-79759-0004-1382: WITHOUT GOING TO ANY SUCH EXTREME AS THIS WE CAN EASILY SEE ON REFLECTION HOW VAST AN INFLUENCE ON THE IDEAS AND CONCEPTIONS AS WELL AS ON THE PRINCIPLES OF ACTION (IN->AND) MATURE YEARS MUST BE EXERTED BY THE NATURE AND CHARACTER OF THE IMAGES WHICH THE PERIOD OF INFANCY AND CHILDHOOD (IMPRESSES->IMPRESS) UPON THE MIND +7021-79759-0005-1383: THE PAIN PRODUCED BY AN ACT OF HASTY AND ANGRY VIOLENCE TO WHICH A FATHER SUBJECTS HIS SON MAY SOON PASS AWAY BUT THE MEMORY OF IT DOES NOT PASS AWAY WITH THE PAIN +7021-85628-0000-1409: BUT (ANDERS->ANDREWS) CARED NOTHING ABOUT THAT +7021-85628-0001-1410: HE MADE A BOW SO DEEP THAT HIS BACK CAME NEAR BREAKING AND HE WAS DUMBFOUNDED I CAN TELL YOU WHEN HE SAW IT WAS NOBODY BUT (ANDERS->ANDREW'S) +7021-85628-0002-1411: HE WAS SUCH A BIG BOY THAT HE WORE HIGH BOOTS AND CARRIED A JACK KNIFE +7021-85628-0003-1412: NOW THIS KNIFE WAS A SPLENDID ONE THOUGH HALF THE BLADE WAS GONE AND THE HANDLE WAS A LITTLE CRACKED AND (ANDERS->ANDREWS) KNEW THAT ONE IS ALMOST A MAN AS SOON AS ONE HAS A (JACK KNIFE->JACKKNIFE) +7021-85628-0004-1413: YES WHY NOT THOUGHT ANDERS +7021-85628-0005-1414: SEEING THAT I AM SO FINE I MAY AS WELL GO AND VISIT THE KING +7021-85628-0006-1415: I AM GOING TO THE COURT BALL ANSWERED (ANDERS->ANDREWS) +7021-85628-0007-1416: AND SHE TOOK (ANDERS->ANDREW'S) HAND AND WALKED WITH HIM UP THE BROAD MARBLE STAIRS WHERE SOLDIERS WERE POSTED AT EVERY THIRD STEP AND THROUGH THE MAGNIFICENT HALLS WHERE COURTIERS IN SILK AND VELVET STOOD BOWING WHEREVER HE WENT +7021-85628-0008-1417: FOR LIKE AS NOT THEY MUST HAVE THOUGHT HIM A PRINCE WHEN THEY SAW HIS FINE CAP +7021-85628-0009-1418: AT THE FARTHER END OF THE LARGEST HALL A TABLE WAS SET WITH GOLDEN CUPS AND GOLDEN PLATES IN LONG ROWS +7021-85628-0010-1419: ON HUGE SILVER PLATTERS WERE PYRAMIDS OF TARTS AND CAKES AND RED WINE SPARKLED IN GLITTERING DECANTERS +7021-85628-0011-1420: THE PRINCESS SAT DOWN UNDER A BLUE CANOPY WITH BOUQUETS OF ROSES AND SHE LET (ANDERS->ANDREW) SIT IN A GOLDEN CHAIR BY HER SIDE +7021-85628-0012-1421: BUT YOU MUST NOT EAT WITH YOUR CAP ON YOUR HEAD SHE SAID AND WAS GOING TO TAKE IT OFF +7021-85628-0013-1422: THE PRINCESS CERTAINLY WAS BEAUTIFUL AND HE WOULD HAVE DEARLY LIKED TO BE KISSED BY HER BUT THE CAP WHICH HIS MOTHER HAD MADE HE WOULD NOT GIVE UP ON ANY CONDITION +7021-85628-0014-1423: HE ONLY SHOOK HIS HEAD +7021-85628-0015-1424: WELL BUT NOW SAID THE PRINCESS AND SHE FILLED HIS POCKETS WITH CAKES AND PUT HER OWN HEAVY GOLD CHAIN AROUND HIS NECK AND BENT DOWN AND KISSED HIM +7021-85628-0016-1425: THAT IS A VERY FINE CAP YOU HAVE HE SAID +7021-85628-0017-1426: SO IT IS SAID (ANDERS->ANDREWS) +7021-85628-0018-1427: AND IT IS MADE OF MOTHER'S BEST YARN AND SHE KNITTED IT HERSELF AND EVERYBODY WANTS TO GET IT AWAY FROM ME +7021-85628-0019-1428: WITH ONE JUMP (ANDERS->ANDREWS) GOT OUT OF HIS CHAIR +7021-85628-0020-1429: HE DARTED LIKE AN ARROW THROUGH ALL THE HALLS DOWN ALL THE STAIRS AND ACROSS THE YARD +7021-85628-0021-1430: HE STILL HELD ON TO IT WITH BOTH HANDS AS HE RUSHED INTO HIS MOTHER'S COTTAGE +7021-85628-0022-1431: AND ALL HIS BROTHERS AND SISTERS STOOD ROUND AND LISTENED WITH THEIR MOUTHS OPEN +7021-85628-0023-1432: BUT WHEN HIS BIG BROTHER HEARD THAT HE HAD REFUSED TO GIVE HIS CAP FOR A KING'S GOLDEN CROWN HE SAID THAT ANDERS WAS A STUPID +7021-85628-0024-1433: (ANDERS->ANDREW'S) FACE GREW RED +7021-85628-0025-1434: BUT HIS MOTHER HUGGED HIM CLOSE +7021-85628-0026-1435: NO MY LITTLE (SON->FUN) SHE SAID +7021-85628-0027-1436: IF YOU DRESSED IN SILK AND GOLD FROM TOP TO TOE YOU COULD NOT LOOK ANY NICER THAN IN YOUR LITTLE RED CAP +7127-75946-0000-467: AT THE CONCLUSION OF THE BANQUET WHICH WAS SERVED AT FIVE O'CLOCK THE KING ENTERED HIS CABINET WHERE HIS TAILORS WERE AWAITING HIM FOR THE PURPOSE OF TRYING ON THE CELEBRATED COSTUME REPRESENTING SPRING WHICH WAS THE RESULT OF SO MUCH IMAGINATION AND HAD COST SO MANY EFFORTS OF THOUGHT TO THE DESIGNERS AND ORNAMENT WORKERS OF THE COURT +7127-75946-0001-468: AH VERY WELL +7127-75946-0002-469: LET HIM COME IN THEN SAID THE KING AND AS IF COLBERT HAD BEEN LISTENING AT THE DOOR FOR THE PURPOSE OF KEEPING HIMSELF (AU COURANT->ACCURANT) WITH THE CONVERSATION HE ENTERED AS SOON AS THE KING HAD PRONOUNCED HIS NAME TO THE TWO COURTIERS +7127-75946-0003-470: GENTLEMEN TO YOUR POSTS WHEREUPON SAINT (AIGNAN->ENG YON) AND (VILLEROY->VILLAY) TOOK THEIR LEAVE +7127-75946-0004-471: CERTAINLY SIRE BUT I MUST HAVE MONEY TO DO THAT WHAT +7127-75946-0005-472: WHAT DO YOU MEAN INQUIRED (LOUIS->LOUISE) +7127-75946-0006-473: HE HAS GIVEN THEM WITH TOO MUCH GRACE NOT TO HAVE OTHERS STILL TO GIVE IF THEY ARE REQUIRED WHICH IS THE CASE AT THE PRESENT MOMENT +7127-75946-0007-474: IT IS NECESSARY THEREFORE THAT HE SHOULD COMPLY THE KING FROWNED +7127-75946-0008-475: DOES YOUR MAJESTY THEN NO LONGER BELIEVE THE DISLOYAL ATTEMPT +7127-75946-0009-476: NOT AT ALL YOU ARE ON THE CONTRARY MOST AGREEABLE TO ME +7127-75946-0010-477: YOUR MAJESTY'S PLAN THEN IN THIS AFFAIR IS +7127-75946-0011-478: YOU WILL TAKE THEM FROM MY PRIVATE TREASURE +7127-75946-0012-479: THE NEWS CIRCULATED WITH THE RAPIDITY OF LIGHTNING DURING ITS PROGRESS IT KINDLED EVERY VARIETY OF COQUETRY DESIRE AND WILD AMBITION +7127-75946-0013-480: THE KING HAD COMPLETED HIS (TOILETTE->TOILET) BY NINE O'CLOCK HE APPEARED IN AN OPEN CARRIAGE DECORATED WITH BRANCHES OF TREES AND FLOWERS +7127-75946-0014-481: THE QUEENS HAD TAKEN THEIR SEATS UPON A MAGNIFICENT (DIAS->DAIS) OR PLATFORM ERECTED UPON THE BORDERS OF THE LAKE IN A (THEATER->THEATRE) OF WONDERFUL ELEGANCE OF CONSTRUCTION +7127-75946-0015-482: SUDDENLY FOR THE PURPOSE OF RESTORING PEACE AND ORDER (SPRING->SPRANG) ACCOMPANIED BY HIS WHOLE COURT MADE HIS APPEARANCE +7127-75946-0016-483: THE SEASONS ALLIES OF SPRING FOLLOWED HIM CLOSELY TO FORM A QUADRILLE WHICH AFTER MANY WORDS OF MORE OR LESS FLATTERING IMPORT WAS THE COMMENCEMENT OF THE DANCE +7127-75946-0017-484: HIS LEGS THE BEST SHAPED AT COURT WERE DISPLAYED TO GREAT ADVANTAGE IN FLESH (COLORED->COLOURED) SILKEN HOSE (OF->A) SILK SO FINE AND SO TRANSPARENT THAT IT SEEMED ALMOST LIKE FLESH ITSELF +7127-75946-0018-485: THERE WAS SOMETHING IN HIS CARRIAGE WHICH RESEMBLED THE BUOYANT MOVEMENTS OF AN IMMORTAL AND HE DID NOT DANCE SO MUCH AS (SEEM->SEEMED) TO SOAR ALONG +7127-75946-0019-486: YES IT IS SUPPRESSED +7127-75946-0020-487: FAR FROM IT SIRE YOUR MAJESTY (HAVING->HEAVEN) GIVEN NO DIRECTIONS ABOUT IT THE MUSICIANS HAVE RETAINED IT +7127-75946-0021-488: YES SIRE AND READY DRESSED FOR THE BALLET +7127-75946-0022-489: SIRE HE SAID YOUR MAJESTY'S MOST DEVOTED SERVANT APPROACHES TO PERFORM A SERVICE ON THIS OCCASION WITH SIMILAR ZEAL THAT HE HAS ALREADY SHOWN ON THE FIELD OF BATTLE +7127-75946-0023-490: THE KING SEEMED ONLY PLEASED WITH EVERY ONE PRESENT +7127-75946-0024-491: MONSIEUR WAS THE ONLY ONE WHO DID NOT UNDERSTAND ANYTHING ABOUT THE MATTER +7127-75946-0025-492: THE BALLET BEGAN THE EFFECT WAS MORE THAN BEAUTIFUL +7127-75946-0026-493: WHEN THE MUSIC BY ITS BURSTS OF MELODY CARRIED AWAY THESE ILLUSTRIOUS DANCERS WHEN (THE->THIS) SIMPLE UNTUTORED PANTOMIME OF THAT PERIOD ONLY THE MORE NATURAL ON ACCOUNT OF THE VERY INDIFFERENT ACTING OF THE AUGUST ACTORS HAD REACHED ITS CULMINATING POINT OF TRIUMPH THE (THEATER->THEATRE) SHOOK WITH TUMULTUOUS APPLAUSE +7127-75946-0027-494: DISDAINFUL OF A SUCCESS OF WHICH MADAME SHOWED NO (ACKNOWLEDGEMENT->ACKNOWLEDGMENT) HE THOUGHT OF NOTHING BUT BOLDLY REGAINING THE (MARKED->MARKET) PREFERENCE OF THE PRINCESS +7127-75946-0028-495: BY DEGREES ALL HIS HAPPINESS ALL HIS BRILLIANCY SUBSIDED INTO REGRET AND UNEASINESS SO THAT HIS LIMBS LOST THEIR POWER HIS ARMS HUNG HEAVILY BY HIS SIDES AND HIS HEAD DROOPED AS THOUGH HE WAS STUPEFIED +7127-75946-0029-496: THE KING WHO HAD FROM THIS MOMENT BECOME IN REALITY THE PRINCIPAL DANCER IN THE QUADRILLE CAST A LOOK UPON HIS VANQUISHED RIVAL +7127-75947-0000-426: EVERY ONE COULD OBSERVE HIS AGITATION AND PROSTRATION A PROSTRATION WHICH WAS INDEED THE MORE REMARKABLE SINCE PEOPLE WERE NOT ACCUSTOMED TO SEE HIM WITH HIS ARMS HANGING LISTLESSLY BY HIS SIDE HIS HEAD BEWILDERED AND HIS EYES WITH ALL THEIR BRIGHT INTELLIGENCE (BEDIMMED->BE DIMMED) +7127-75947-0001-427: UPON THIS MADAME DEIGNED TO TURN HER EYES LANGUISHINGLY TOWARDS THE COMTE OBSERVING +7127-75947-0002-428: DO YOU THINK SO SHE REPLIED WITH INDIFFERENCE +7127-75947-0003-429: YES THE CHARACTER WHICH YOUR ROYAL HIGHNESS ASSUMED IS IN PERFECT HARMONY WITH YOUR OWN +7127-75947-0004-430: EXPLAIN YOURSELF +7127-75947-0005-431: I ALLUDE TO THE GODDESS +7127-75947-0006-432: THE PRINCESS INQUIRED NO +7127-75947-0007-433: SHE THEN ROSE HUMMING THE AIR TO WHICH SHE WAS PRESENTLY GOING TO DANCE +7127-75947-0008-434: THE ARROW PIERCED HIS HEART AND WOUNDED HIM MORTALLY +7127-75947-0009-435: A QUARTER OF AN HOUR AFTERWARDS HE RETURNED TO THE (THEATER->THEATRE) BUT IT WILL BE READILY BELIEVED THAT IT WAS ONLY A POWERFUL EFFORT OF REASON OVER HIS GREAT EXCITEMENT THAT ENABLED HIM TO GO BACK OR PERHAPS FOR LOVE IS THUS STRANGELY CONSTITUTED HE FOUND IT IMPOSSIBLE EVEN TO REMAIN MUCH LONGER SEPARATED FROM (THE->THEIR) PRESENCE OF ONE WHO HAD BROKEN HIS HEART +7127-75947-0010-436: WHEN SHE PERCEIVED THE YOUNG MAN SHE ROSE LIKE A WOMAN SURPRISED IN THE MIDST OF IDEAS SHE WAS DESIROUS OF CONCEALING FROM HERSELF +7127-75947-0011-437: REMAIN I IMPLORE YOU THE EVENING IS MOST LOVELY +7127-75947-0012-438: INDEED AH +7127-75947-0013-439: I REMEMBER NOW AND I CONGRATULATE MYSELF DO YOU LOVE ANY ONE +7127-75947-0014-440: FORGIVE ME I HARDLY KNOW WHAT I AM SAYING A THOUSAND TIMES FORGIVE ME MADAME WAS RIGHT QUITE RIGHT THIS BRUTAL EXILE HAS COMPLETELY TURNED MY BRAIN +7127-75947-0015-441: THERE CANNOT BE A DOUBT HE RECEIVED YOU KINDLY FOR IN FACT YOU RETURNED WITHOUT HIS PERMISSION +7127-75947-0016-442: OH MADEMOISELLE WHY HAVE I NOT A DEVOTED SISTER OR A TRUE FRIEND SUCH AS YOURSELF +7127-75947-0017-443: WHAT ALREADY HERE THEY SAID TO HER +7127-75947-0018-444: I HAVE BEEN HERE THIS QUARTER OF AN HOUR REPLIED LA (VALLIERE->VALLIERS) +7127-75947-0019-445: DID NOT THE DANCING AMUSE YOU NO +7127-75947-0020-446: NO MORE THAN THE DANCING +7127-75947-0021-447: LA (VALLIERE->VALLIERS) IS QUITE A (POETESS->POETES) SAID (TONNAY CHARENTE->TONY CHARLET) +7127-75947-0022-448: I AM A WOMAN AND THERE ARE FEW LIKE ME WHOEVER LOVES ME FLATTERS ME WHOEVER FLATTERS ME PLEASES ME AND WHOEVER PLEASES WELL SAID MONTALAIS YOU DO NOT FINISH +7127-75947-0023-449: IT IS TOO DIFFICULT REPLIED MADEMOISELLE (DE TONNAY CHARENTE->DENISCHALANT) LAUGHING LOUDLY +7127-75947-0024-450: LOOK YONDER DO YOU NOT SEE THE MOON SLOWLY RISING SILVERING THE TOPMOST BRANCHES OF THE CHESTNUTS AND THE (OAKS->YOLKS) +7127-75947-0025-451: EXQUISITE SOFT TURF OF THE WOODS THE HAPPINESS WHICH YOUR FRIENDSHIP CONFERS UPON ME +7127-75947-0026-452: WELL SAID MADEMOISELLE (DE TONNAY CHARENTE->DENISCHALANT) I ALSO THINK A GOOD DEAL BUT I TAKE CARE +7127-75947-0027-453: TO SAY NOTHING SAID MONTALAIS SO THAT WHEN MADEMOISELLE (DE TONNAY CHARENTE->DENISCHERANT) THINKS (ATHENAIS->ETHNE) IS THE ONLY ONE WHO KNOWS IT +7127-75947-0028-454: QUICK QUICK THEN AMONG THE HIGH REED GRASS SAID MONTALAIS STOOP (ATHENAIS->ETHINE) YOU ARE SO TALL +7127-75947-0029-455: THE YOUNG GIRLS HAD INDEED MADE THEMSELVES SMALL INDEED INVISIBLE +7127-75947-0030-456: SHE WAS HERE JUST NOW SAID THE COUNT +7127-75947-0031-457: YOU ARE POSITIVE THEN +7127-75947-0032-458: YES BUT PERHAPS I FRIGHTENED HER (IN->AND) WHAT WAY +7127-75947-0033-459: HOW IS IT LA (VALLIERE->VALLIERS) SAID MADEMOISELLE (DE TONNAY CHARENTE->DENISCHANT) THAT THE VICOMTE DE (BRAGELONNE->BREG ALONE) SPOKE OF YOU AS LOUISE +7127-75947-0034-460: IT SEEMS THE KING WILL NOT CONSENT TO IT +7127-75947-0035-461: GOOD GRACIOUS HAS THE KING ANY RIGHT TO INTERFERE IN MATTERS OF THAT KIND +7127-75947-0036-462: I GIVE MY CONSENT +7127-75947-0037-463: OH I AM SPEAKING SERIOUSLY REPLIED MONTALAIS AND MY OPINION IN THIS CASE IS QUITE AS GOOD AS THE (KING'S->KING AS) I SUPPOSE IS IT NOT LOUISE +7127-75947-0038-464: LET US RUN THEN SAID ALL THREE AND GRACEFULLY LIFTING UP THE LONG SKIRTS OF THEIR SILK DRESSES THEY LIGHTLY RAN ACROSS THE OPEN SPACE BETWEEN THE LAKE AND THE THICKEST COVERT OF THE PARK +7127-75947-0039-465: IN FACT THE SOUND OF MADAME'S AND THE QUEEN'S CARRIAGES COULD BE HEARD IN THE DISTANCE UPON THE HARD DRY GROUND OF THE ROADS FOLLOWED BY THE (MOUNTED->MOUNTAIN) CAVALIERS +7127-75947-0040-466: IN THIS WAY THE FETE OF THE WHOLE COURT WAS A FETE ALSO FOR THE MYSTERIOUS INHABITANTS OF THE FOREST FOR CERTAINLY THE DEER IN THE BRAKE THE PHEASANT ON THE BRANCH THE FOX IN ITS HOLE WERE ALL LISTENING +7176-88083-0000-707: ALL ABOUT HIM WAS A TUMULT OF BRIGHT AND BROKEN COLOR SCATTERED (IN->AND) BROAD SPLASHES +7176-88083-0001-708: THE (MERGANSER->MERGANCER) HAD A (CRESTED->CRUSTED) HEAD OF IRIDESCENT GREEN BLACK A BROAD COLLAR OF LUSTROUS WHITE BLACK BACK BLACK AND WHITE WINGS WHITE BELLY SIDES FINELY PENCILLED IN BLACK AND WHITE AND A BREAST OF RICH CHESTNUT RED STREAKED WITH BLACK +7176-88083-0002-709: HIS FEET WERE RED HIS LONG NARROW BEAK WITH ITS (SAW->SALL) TOOTHED EDGES AND SHARP HOOKED TIP WAS BRIGHT RED +7176-88083-0003-710: BUT HERE HE WAS AT A TERRIBLE DISADVANTAGE AS COMPARED WITH THE OWLS HAWKS AND EAGLES HE HAD NO RENDING CLAWS +7176-88083-0004-711: BUT SUDDENLY STRAIGHT AND SWIFT AS A DIVING (CORMORANT->COMRADE) HE SHOT DOWN INTO THE TORRENT AND DISAPPEARED BENEATH THE SURFACE +7176-88083-0005-712: ONCE FAIRLY A WING HOWEVER HE WHEELED AND MADE BACK HURRIEDLY FOR HIS PERCH +7176-88083-0006-713: IT MIGHT HAVE SEEMED THAT A TROUT OF THIS SIZE WAS A FAIRLY SUBSTANTIAL MEAL +7176-88083-0007-714: BUT SUCH WAS HIS KEENNESS THAT EVEN WHILE THE WIDE FLUKES OF HIS ENGORGED VICTIM WERE STILL STICKING OUT AT THE CORNERS OF HIS BEAK HIS FIERCE RED EYES WERE ONCE MORE PEERING DOWNWARD INTO THE TORRENT IN SEARCH OF FRESH PREY +7176-88083-0008-715: IN DESPAIR HE HURLED HIMSELF DOWNWARD TOO SOON +7176-88083-0009-716: THE GREAT HAWK (FOLLOWED->FOWLED) HURRIEDLY TO RETRIEVE HIS PREY FROM THE GROUND +7176-88083-0010-717: THE CAT GROWLED SOFTLY PICKED UP THE PRIZE IN HER JAWS AND TROTTED INTO THE BUSHES TO DEVOUR IT +7176-88083-0011-718: IN FACT HE HAD JUST FINISHED IT THE LAST OF THE TROUT'S TAIL HAD JUST VANISHED WITH A SPASM DOWN HIS STRAINED GULLET WHEN THE BAFFLED HAWK CAUGHT SIGHT OF HIM AND SWOOPED +7176-88083-0012-719: THE HAWK ALIGHTED ON THE DEAD BRANCH AND SAT UPRIGHT MOTIONLESS AS IF SURPRISED +7176-88083-0013-720: LIKE HIS UNFORTUNATE LITTLE COUSIN THE TEAL HE TOO HAD FELT THE FEAR OF DEATH SMITTEN INTO HIS HEART AND WAS HEADING DESPERATELY FOR THE REFUGE OF SOME DARK OVERHANGING BANK DEEP FRINGED WITH WEEDS WHERE THE DREADFUL EYE OF THE HAWK SHOULD NOT DISCERN HIM +7176-88083-0014-721: THE HAWK SAT UPON THE BRANCH AND WATCHED HIS QUARRY SWIMMING BENEATH THE SURFACE +7176-88083-0015-722: ALMOST INSTANTLY HE WAS FORCED TO THE TOP +7176-88083-0016-723: STRAIGHTWAY (*->IN) THE HAWK GLIDED FROM HIS PERCH AND DARTED AFTER HIM +7176-88083-0017-724: BUT AT THIS POINT IN THE RAPIDS IT WAS IMPOSSIBLE FOR HIM TO STAY DOWN +7176-88083-0018-725: BUT THIS FREQUENTER OF THE HEIGHTS OF AIR FOR ALL HIS SAVAGE (VALOR->VALOUR) WAS TROUBLED AT THE LEAPING WAVES AND THE TOSSING FOAM OF THESE MAD RAPIDS HE DID NOT UNDERSTAND THEM +7176-88083-0019-726: AS HE FLEW HIS (DOWN REACHING->DOWNREACHING) CLUTCHING TALONS WERE NOT HALF A YARD ABOVE THE FUGITIVE'S HEAD +7176-88083-0020-727: WHERE THE (WAVES->WAVE IS) FOR AN INSTANT SANK THEY CAME CLOSER BUT NOT QUITE WITHIN GRASPING REACH +7176-88083-0021-728: BUT AS BEFORE THE LEAPING WAVES OF THE RAPIDS WERE TOO MUCH FOR HIS PURSUER AND HE WAS ABLE TO FLAP HIS WAY ONWARD IN A CLOUD OF FOAM WHILE DOOM HUNG LOW ABOVE HIS HEAD YET HESITATED TO STRIKE +7176-88083-0022-729: THE HAWK EMBITTERED BY THE LOSS OF HIS FIRST QUARRY HAD BECOME AS DOGGED IN PURSUIT AS A WEASEL NOT TO BE SHAKEN OFF OR EVADED OR DECEIVED +7176-88083-0023-730: HE HAD A LOT OF LINE OUT AND THE PLACE WAS NONE TOO FREE FOR A LONG CAST BUT HE WAS IMPATIENT TO DROP HIS FLIES AGAIN ON THE SPOT WHERE THE BIG FISH WAS FEEDING +7176-88083-0024-731: THE LAST DROP FLY AS LUCK WOULD HAVE IT CAUGHT JUST IN THE CORNER OF THE HAWK'S ANGRILY OPEN BEAK HOOKING ITSELF FIRMLY +7176-88083-0025-732: AT THE SUDDEN SHARP STING OF IT THE GREAT BIRD TURNED HIS HEAD AND NOTICED FOR THE FIRST TIME THE FISHERMAN STANDING ON THE BANK +7176-88083-0026-733: THE DRAG UPON HIS BEAK AND THE LIGHT CHECK UPON HIS WINGS WERE INEXPLICABLE TO HIM AND APPALLING +7176-88083-0027-734: (THEN->THAN) THE LEADER PARTED FROM THE LINE +7176-92135-0000-661: HE IS A WELCOME FIGURE AT THE GARDEN PARTIES OF THE ELECT WHO ARE ALWAYS READY TO ENCOURAGE HIM BY ACCEPTING FREE SEATS FOR HIS PLAY ACTOR MANAGERS NOD TO HIM EDITORS ALLOW HIM TO CONTRIBUTE WITHOUT CHARGE TO A (SYMPOSIUM->SUPPOSIUM) ON THE PRICE OF GOLF BALLS +7176-92135-0001-662: IN SHORT HE BECOMES A PROMINENT FIGURE IN LONDON SOCIETY AND IF HE IS NOT CAREFUL SOMEBODY WILL SAY SO +7176-92135-0002-663: BUT EVEN THE UNSUCCESSFUL DRAMATIST HAS HIS MOMENTS +7176-92135-0003-664: (YOUR->YOU ARE) PLAY MUST BE NOT MERELY A GOOD PLAY BUT A SUCCESSFUL ONE +7176-92135-0004-665: FRANKLY I CANNOT ALWAYS SAY +7176-92135-0005-666: BUT SUPPOSE YOU SAID I'M FOND OF WRITING MY PEOPLE ALWAYS SAY MY LETTERS HOME ARE GOOD ENOUGH FOR PUNCH +7176-92135-0006-667: I'VE GOT A LITTLE IDEA FOR A PLAY ABOUT A MAN AND A WOMAN AND ANOTHER WOMAN AND BUT PERHAPS (I'D->I) BETTER KEEP THE PLOT A SECRET FOR THE MOMENT +7176-92135-0007-668: ANYHOW IT'S (*->A) JOLLY EXCITING AND I CAN DO THE DIALOGUE ALL RIGHT +7176-92135-0008-669: LEND ME YOUR EAR FOR TEN MINUTES AND YOU SHALL LEARN JUST WHAT STAGECRAFT IS +7176-92135-0009-670: AND I SHOULD BEGIN WITH A SHORT (HOMILY->HUMMILY) ON SOLILOQUY +7176-92135-0010-671: (HAM->HIM) TO BE OR NOT TO BE +7176-92135-0011-672: NOW THE OBJECT OF THIS (SOLILOQUY->SOLOQUIE) IS PLAIN +7176-92135-0012-673: INDEED IRRESOLUTION (BEING->MEAN) THE (KEYNOTE->KEEN OUT) OF HAMLET'S SOLILOQUY A CLEVER PLAYER COULD TO SOME EXTENT INDICATE THE WHOLE THIRTY LINES BY A (SILENT->SILAGE) WORKING OF THE (JAW->JOB) BUT AT THE SAME TIME IT WOULD BE IDLE TO DENY THAT HE WOULD MISS THE FINER SHADES OF THE DRAMATIST'S MEANING +7176-92135-0013-674: WE MODERNS HOWEVER SEE THE ABSURDITY OF IT +7176-92135-0014-675: IF IT BE GRANTED FIRST THAT THE THOUGHTS OF A CERTAIN CHARACTER SHOULD BE KNOWN TO THE AUDIENCE AND SECONDLY THAT SOLILOQUY OR THE HABIT OF THINKING ALOUD IS IN OPPOSITION TO MODERN STAGE (TECHNIQUE HOW->TYPE HALL) SHALL A SOLILOQUY BE AVOIDED WITHOUT DAMAGE TO THE PLAY +7176-92135-0015-676: AND SO ON TILL YOU GET (TO->*) THE END (WHEN OPHELIA->ONE OF VILLIA) MIGHT SAY AH YES OR SOMETHING NON COMMITTAL OF THAT SORT +7176-92135-0016-677: THIS WOULD BE AN EASY WAY OF DOING IT BUT IT WOULD NOT BE THE BEST WAY FOR THE REASON THAT IT IS TOO EASY TO CALL ATTENTION TO ITSELF +7176-92135-0017-678: IN THE OLD BADLY MADE PLAY IT WAS FREQUENTLY NECESSARY FOR ONE OF THE CHARACTERS TO TAKE THE AUDIENCE INTO HIS CONFIDENCE +7176-92135-0018-679: IN THE MODERN WELL CONSTRUCTED PLAY HE SIMPLY RINGS UP AN IMAGINARY CONFEDERATE AND TELLS HIM WHAT HE IS GOING TO DO COULD ANYTHING BE MORE NATURAL +7176-92135-0019-680: I WANT DOUBLE NINE (HAL LO->HELLO) +7176-92135-0020-681: (DOUBLE->DOUBLED) NINE TWO THREE (ELSINORE->ELZINORE) DOUBLE (NINE->NOT) YES (HALLO->HELLO) IS THAT YOU HORATIO HAMLET SPEAKING +7176-92135-0021-682: I SAY I'VE BEEN (WONDERING->WANDERING) ABOUT THIS BUSINESS +7176-92135-0022-683: TO BE OR NOT TO BE THAT IS THE QUESTION WHETHER TIS NOBLER IN THE MIND TO SUFFER THE SLINGS AND ARROWS WHAT NO HAMLET SPEAKING +7176-92135-0023-684: YOU GAVE ME DOUBLE FIVE I WANT DOUBLE NINE (HALLO->HELLO) IS THAT YOU HORATIO HAMLET SPEAKING +7176-92135-0024-685: TO BE OR NOT TO BE THAT IS THE QUESTION WHETHER TIS NOBLER +7176-92135-0025-686: IT IS TO LET HAMLET IF THAT (HAPPEN->HAPPENED) TO BE THE NAME OF YOUR CHARACTER (ENTER WITH->INTO) A SMALL DOG PET FALCON MONGOOSE TAME BEAR (OR WHATEVER->ORDER) ANIMAL IS MOST IN KEEPING WITH THE PART AND CONFIDE IN THIS ANIMAL SUCH SORROWS HOPES OR SECRET HISTORY AS THE AUDIENCE HAS GOT TO KNOW +7176-92135-0026-687: ENTER HAMLET WITH HIS FAVOURITE (BOAR HOUND->BOARHOUND) +7176-92135-0027-688: LADY (LARKSPUR STARTS->LARKSBURG START) SUDDENLY AND TURNS (TOWARDS->TOWARD) HIM +7176-92135-0028-689: (LARKSPUR BIT->LARKSBUR BID) ME AGAIN THIS MORNING FOR THE THIRD TIME +7176-92135-0029-690: I WANT TO GET AWAY FROM IT ALL (SWOONS->SWOON) +7176-92135-0030-691: ENTER LORD ARTHUR (FLUFFINOSE->FLUFFINO'S) +7176-92135-0031-692: AND THERE YOU ARE YOU WILL OF COURSE APPRECIATE THAT THE (UNFINISHED SENTENCES->UNFINISHANCES) NOT ONLY SAVE TIME BUT ALSO MAKE THE MANOEUVRING VERY MUCH MORE NATURAL +7176-92135-0032-693: HOW YOU MAY BE WONDERING ARE (YOU->YE) TO BEGIN YOUR MASTERPIECE +7176-92135-0033-694: RELAPSES INTO SILENCE FOR THE REST OF THE EVENING +7176-92135-0034-695: THE DUCHESS OF SOUTHBRIDGE (TO->TWO) LORD REGGIE OH REGGIE WHAT DID YOU SAY +7176-92135-0035-696: THEN LORD (TUPPENY->TOPPENNY) WELL WHAT ABOUT AUCTION +7176-92135-0036-697: THE CROWD DRIFTS OFF (LEAVING->LEAPING) THE HERO AND HEROINE ALONE IN THE MIDDLE OF THE STAGE AND THEN YOU CAN BEGIN +7176-92135-0037-698: THEN IS THE TIME TO INTRODUCE A MEAL ON THE STAGE +7176-92135-0038-699: A (STAGE->SAGE) MEAL IS POPULAR BECAUSE IT (PROVES->PROVED) TO THE AUDIENCE THAT THE ACTORS EVEN WHEN CALLED CHARLES (HAWTREY->HOLTREE) OR (OWEN NARES->OWENAIRS) ARE REAL PEOPLE JUST LIKE YOU AND ME +7176-92135-0039-700: (TEA->T) PLEASE MATTHEWS BUTLER IMPASSIVELY +7176-92135-0040-701: HOSTESS REPLACES LUMP AND INCLINES EMPTY TEAPOT OVER TRAY FOR A MOMENT THEN (HANDS HIM->HANDSOME) A CUP PAINTED BROWN INSIDE (THUS->LUST) DECEIVING THE GENTLEMAN WITH THE TELESCOPE IN THE UPPER CIRCLE +7176-92135-0041-702: RE ENTER BUTLER AND THREE FOOTMEN WHO (REMOVE->MOVED) THE TEA THINGS HOSTESS (TO GUEST->TWO GUESTS) +7176-92135-0042-703: (IN->AND) NOVELS THE HERO HAS OFTEN PUSHED HIS MEALS AWAY UNTASTED BUT NO (STAGE->STEED) HERO WOULD DO ANYTHING SO UNNATURAL AS THIS +7176-92135-0043-704: TWO (BITES->WHITES) ARE MADE AND THE BREAD IS CRUMBLED WITH AN AIR OF GREAT EAGERNESS INDEED ONE FEELS THAT IN REAL LIFE THE GUEST WOULD CLUTCH HOLD OF THE FOOTMAN AND SAY HALF A (MO OLD->MOLD) CHAP I HAVEN'T NEARLY FINISHED BUT THE ACTOR IS BETTER SCHOOLED THAN THIS +7176-92135-0044-705: BUT IT IS (THE->A) CIGARETTE WHICH CHIEFLY HAS BROUGHT THE MODERN DRAMA TO ITS PRESENT STATE OF PERFECTION +7176-92135-0045-706: LORD JOHN TAKING OUT GOLD (CIGARETTE->SICK RED) CASE FROM HIS LEFT HAND UPPER WAISTCOAT POCKET +7729-102255-0000-261: THE BOGUS LEGISLATURE NUMBERED THIRTY SIX MEMBERS +7729-102255-0001-262: THIS WAS AT THE MARCH ELECTION EIGHTEEN FIFTY FIVE +7729-102255-0002-263: THAT SUMMER'S EMIGRATION HOWEVER BEING MAINLY FROM THE FREE STATES GREATLY CHANGED THE RELATIVE STRENGTH OF THE TWO PARTIES +7729-102255-0003-264: FOR GENERAL SERVICE THEREFORE REQUIRING NO SPECIAL EFFORT THE NUMERICAL STRENGTH OF THE FACTIONS WAS ABOUT EQUAL WHILE ON EXTRAORDINARY OCCASIONS THE TWO THOUSAND BORDER RUFFIAN (RESERVE->RESERVED) LYING A LITTLE FARTHER BACK FROM THE STATE LINE COULD AT ANY TIME EASILY TURN THE SCALE +7729-102255-0004-265: THE FREE STATE MEN HAD ONLY THEIR CONVICTIONS THEIR INTELLIGENCE THEIR COURAGE AND THE MORAL SUPPORT OF THE NORTH THE CONSPIRACY HAD ITS SECRET COMBINATION THE TERRITORIAL OFFICIALS THE LEGISLATURE THE BOGUS LAWS THE COURTS THE MILITIA OFFICERS THE PRESIDENT AND THE ARMY +7729-102255-0005-266: THIS WAS A FORMIDABLE ARRAY OF ADVANTAGES SLAVERY WAS PLAYING WITH LOADED DICE +7729-102255-0006-267: COMING BY WAY OF THE MISSOURI RIVER TOWNS HE FELL FIRST AMONG BORDER RUFFIAN COMPANIONSHIP AND INFLUENCES AND PERHAPS HAVING HIS INCLINATIONS ALREADY (MOLDED->MOULDED) BY HIS WASHINGTON INSTRUCTIONS HIS EARLY IMPRESSIONS WERE DECIDEDLY ADVERSE TO THE FREE STATE CAUSE +7729-102255-0007-268: HIS RECEPTION SPEECH AT WESTPORT IN WHICH HE MAINTAINED THE LEGALITY OF THE LEGISLATURE AND HIS DETERMINATION TO ENFORCE THEIR LAWS DELIGHTED HIS PRO SLAVERY AUDITORS +7729-102255-0008-269: ALL THE TERRITORIAL DIGNITARIES WERE PRESENT GOVERNOR SHANNON PRESIDED JOHN CALHOUN THE SURVEYOR GENERAL MADE THE PRINCIPAL SPEECH A DENUNCIATION OF THE (ABOLITIONISTS->ABOLITIONIST) SUPPORTING THE (TOPEKA->TOPECA) MOVEMENT CHIEF JUSTICE (LECOMPTE->LE COMTE) DIGNIFIED THE OCCASION WITH APPROVING REMARKS +7729-102255-0009-270: ALL (DISSENT->DESCENT) ALL NON COMPLIANCE ALL HESITATION ALL MERE SILENCE EVEN WERE IN THEIR STRONGHOLD TOWNS LIKE (LEAVENWORTH->LEVIN WORTH) BRANDED AS ABOLITIONISM DECLARED TO BE HOSTILITY TO THE PUBLIC WELFARE AND PUNISHED WITH PROSCRIPTION PERSONAL VIOLENCE EXPULSION AND FREQUENTLY DEATH +7729-102255-0010-271: OF THE (LYNCHINGS->LUNCHINGS) THE MOBS AND THE MURDERS IT WOULD BE IMPOSSIBLE EXCEPT IN A VERY EXTENDED WORK TO NOTE THE FREQUENT AND ATROCIOUS DETAILS +7729-102255-0011-272: THE PRESENT CHAPTERS CAN ONLY TOUCH UPON THE MORE SALIENT MOVEMENTS OF THE CIVIL WAR IN KANSAS WHICH HAPPILY (WERE->ARE) NOT SANGUINARY IF HOWEVER THE INDIVIDUAL AND MORE ISOLATED CASES OF BLOODSHED COULD BE DESCRIBED THEY WOULD SHOW A STARTLING AGGREGATE OF BARBARITY AND (*->A) LOSS OF LIFE FOR OPINION'S SAKE +7729-102255-0012-273: SEVERAL HUNDRED FREE STATE MEN PROMPTLY RESPONDED TO THE SUMMONS +7729-102255-0013-274: IT WAS IN FACT THE BEST WEAPON OF ITS DAY +7729-102255-0014-275: THE LEADERS OF THE CONSPIRACY BECAME DISTRUSTFUL OF THEIR POWER TO CRUSH THE TOWN +7729-102255-0015-276: ONE OF HIS MILITIA GENERALS SUGGESTED THAT THE GOVERNOR SHOULD REQUIRE THE OUTLAWS AT LAWRENCE AND ELSEWHERE TO SURRENDER THE (SHARPS->SHARP'S) RIFLES ANOTHER WROTE ASKING HIM TO CALL OUT THE GOVERNMENT TROOPS AT FORT (LEAVENWORTH->LEVINWORTH) +7729-102255-0016-277: THE GOVERNOR ON HIS PART BECOMING DOUBTFUL OF THE LEGALITY OF EMPLOYING MISSOURI MILITIA TO ENFORCE KANSAS LAWS WAS ALSO EAGER TO SECURE THE HELP OF FEDERAL TROOPS +7729-102255-0017-278: SHERIFF JONES HAD HIS POCKETS ALWAYS FULL OF WRITS ISSUED IN THE SPIRIT OF PERSECUTION BUT WAS OFTEN BAFFLED BY THE SHARP WITS AND READY RESOURCES OF THE FREE STATE PEOPLE AND SOMETIMES DEFIED OUTRIGHT +7729-102255-0018-279: LITTLE BY LITTLE HOWEVER THE LATTER BECAME HEMMED AND BOUND IN THE MESHES OF THE VARIOUS DEVICES AND PROCEEDINGS WHICH THE TERRITORIAL OFFICIALS EVOLVED FROM THE (BOGUS->VOGUS) LAWS +7729-102255-0019-280: TO EMBARRASS THIS DAMAGING EXPOSURE JUDGE (LECOMPTE->LECOMTE) ISSUED A WRIT AGAINST THE EX GOVERNOR ON A FRIVOLOUS CHARGE OF CONTEMPT +7729-102255-0020-281: THE INCIDENT WAS NOT VIOLENT NOR EVEN DRAMATIC NO POSSE WAS (SUMMONED->SUMMON) NO FURTHER EFFORT MADE AND (REEDER->READER) FEARING PERSONAL VIOLENCE SOON FLED IN DISGUISE +7729-102255-0021-282: BUT THE AFFAIR WAS MAGNIFIED AS A CROWNING PROOF THAT THE FREE STATE MEN WERE INSURRECTIONISTS AND OUTLAWS +7729-102255-0022-283: FROM THESE AGAIN SPRANG BARRICADED AND FORTIFIED DWELLINGS CAMPS AND (SCOUTING->SCOUT) PARTIES FINALLY CULMINATING (IN->AND) ROVING GUERRILLA (BANDS->VANS) HALF PARTISAN HALF PREDATORY +7729-102255-0023-284: THEIR DISTINCTIVE CHARACTERS HOWEVER DISPLAY ONE BROAD AND UNFAILING DIFFERENCE +7729-102255-0024-285: THE FREE STATE MEN CLUNG TO THEIR PRAIRIE TOWNS AND (PRAIRIE RAVINES->PRAIRINES) WITH ALL THE OBSTINACY AND COURAGE OF TRUE DEFENDERS OF THEIR HOMES AND FIRESIDES +7729-102255-0025-286: (THEIR->THERE) ASSUMED CHARACTER CHANGED WITH THEIR CHANGING OPPORTUNITIES OR NECESSITIES +7729-102255-0026-287: IN THE SHOOTING OF SHERIFF JONES IN LAWRENCE AND IN THE REFUSAL OF EX GOVERNOR (BEEDER->READER) TO ALLOW THE DEPUTY MARSHAL TO ARREST HIM THEY DISCOVERED GRAVE (OFFENSES->OFFENCES) AGAINST THE TERRITORIAL AND (*->THE) UNITED STATES LAWS +7729-102255-0027-288: FOOTNOTE SUMNER TO SHANNON MAY TWELFTH EIGHTEEN FIFTY SIX +7729-102255-0028-289: PRIVATE PERSONS WHO HAD (LEASED->LEAST) THE FREE STATE HOTEL VAINLY BESOUGHT THE VARIOUS AUTHORITIES TO (PREVENT->PRESENT) THE DESTRUCTION OF THEIR PROPERTY +7729-102255-0029-290: TEN DAYS WERE CONSUMED IN THESE NEGOTIATIONS BUT THE SPIRIT OF VENGEANCE REFUSED TO YIELD +7729-102255-0030-291: HE SUMMONED HALF A DOZEN CITIZENS TO JOIN HIS POSSE WHO FOLLOWED OBEYED AND ASSISTED HIM +7729-102255-0031-292: HE CONTINUED HIS PRETENDED SEARCH AND TO GIVE COLOR TO HIS ERRAND MADE (TWO ARRESTS->TO ARREST) +7729-102255-0032-293: THE FREE STATE HOTEL A STONE BUILDING IN DIMENSIONS FIFTY BY SEVENTY FEET THREE STORIES HIGH AND HANDSOMELY FURNISHED PREVIOUSLY OCCUPIED ONLY FOR LODGING ROOMS ON THAT DAY FOR THE FIRST TIME OPENED ITS TABLE ACCOMMODATIONS TO THE PUBLIC AND PROVIDED A FREE DINNER IN HONOR OF THE OCCASION +7729-102255-0033-294: AS HE HAD PROMISED TO PROTECT THE HOTEL THE REASSURED CITIZENS BEGAN TO LAUGH AT THEIR OWN FEARS +7729-102255-0034-295: TO THEIR SORROW THEY WERE SOON UNDECEIVED +7729-102255-0035-296: THE MILITARY FORCE PARTLY RABBLE PARTLY ORGANIZED (HAD->HEAD) MEANWHILE MOVED INTO THE TOWN +7729-102255-0036-297: HE PLANTED (A COMPANY->ACCOMPANIED) BEFORE THE HOTEL AND DEMANDED A SURRENDER OF THE ARMS BELONGING TO THE FREE STATE MILITARY COMPANIES +7729-102255-0037-298: HALF AN HOUR LATER TURNING A DEAF EAR TO ALL REMONSTRANCE HE GAVE THE PROPRIETORS UNTIL FIVE O'CLOCK TO REMOVE THEIR FAMILIES AND PERSONAL PROPERTY FROM THE FREE STATE HOTEL +7729-102255-0038-299: (ATCHISON->ADJUT) WHO HAD BEEN HARANGUING THE MOB PLANTED HIS TWO GUNS BEFORE THE BUILDING AND TRAINED THEM UPON IT +7729-102255-0039-300: THE INMATES BEING REMOVED AT THE APPOINTED HOUR A FEW CANNON BALLS WERE FIRED THROUGH THE STONE WALLS +7729-102255-0040-301: IN THIS INCIDENT CONTRASTING THE CREATIVE AND THE DESTRUCTIVE SPIRIT OF THE FACTIONS THE (EMIGRANT AID->IMMIGRANT AIDS) SOCIETY OF MASSACHUSETTS FINDS ITS MOST HONORABLE AND TRIUMPHANT VINDICATION +7729-102255-0041-302: THE WHOLE PROCEEDING WAS SO CHILDISH THE MISERABLE PLOT SO TRANSPARENT THE (OUTRAGE->OUTRAGED) SO GROSS AS TO BRING DISGUST TO THE BETTER CLASS OF BORDER RUFFIANS WHO WERE WITNESSES AND ACCESSORIES +7729-102255-0042-303: (RELOCATED->RE LOCATED) FOOTNOTE GOVERNOR ROBINSON BEING ON HIS WAY EAST THE STEAMBOAT ON WHICH HE WAS (TRAVELING->TRAVELLING) STOPPED AT LEXINGTON MISSOURI +7729-102255-0043-304: IN A FEW DAYS AN OFFICER CAME WITH A REQUISITION FROM GOVERNOR SHANNON AND TOOK THE PRISONER BY (LAND TO->LANDA) WESTPORT AND AFTERWARDS FROM THERE TO (KANSAS->KANSA) CITY (AND LEAVENWORTH->IN LEVINWORTH) +7729-102255-0044-305: (HERE HE->HARRY) WAS PLACED IN THE CUSTODY OF CAPTAIN MARTIN OF THE KICKAPOO RANGERS WHO PROVED A KIND JAILER AND MATERIALLY ASSISTED IN PROTECTING HIM FROM THE DANGEROUS INTENTIONS OF THE MOB WHICH AT THAT TIME HELD (LEAVENWORTH->LEVIN WORTH) UNDER (A->THE) REIGN OF TERROR +7729-102255-0045-306: CAPTAIN MARTIN SAID I SHALL GIVE YOU A PISTOL TO HELP PROTECT YOURSELF IF WORSE COMES TO WORST +7729-102255-0046-307: IN THE EARLY MORNING OF THE NEXT DAY MAY TWENTY NINTH A COMPANY OF DRAGOONS WITH ONE EMPTY SADDLE CAME DOWN FROM THE FORT AND WHILE THE PRO SLAVERY MEN STILL SLEPT THE PRISONER AND HIS ESCORT WERE ON THEIR WAY ACROSS THE PRAIRIES TO LECOMPTON IN THE CHARGE OF OFFICERS OF THE UNITED STATES ARMY +8224-274381-0000-1451: THOUGH THROWN INTO PRISON FOR THIS ENTERPRISE AND DETAINED SOME TIME HE WAS NOT DISCOURAGED BUT STILL CONTINUED BY HIS COUNTENANCE AND PROTECTION TO INFUSE SPIRIT INTO THE DISTRESSED ROYALISTS +8224-274381-0001-1452: AMONG OTHER PERSONS OF DISTINCTION WHO UNITED THEMSELVES TO HIM WAS LORD NAPIER OF (MERCHISTON->MURCHISTON) SON OF THE FAMOUS INVENTOR OF THE LOGARITHMS THE PERSON TO WHOM THE TITLE OF A GREAT MAN IS MORE JUSTLY DUE THAN TO ANY OTHER WHOM HIS COUNTRY EVER PRODUCED +8224-274381-0002-1453: WHILE THE FORMER FORETOLD THAT THE SCOTTISH COVENANTERS WERE SECRETLY FORMING A UNION WITH THE ENGLISH PARLIAMENT AND (INCULCATED->INCALCATED) THE NECESSITY OF PREVENTING THEM BY SOME VIGOROUS UNDERTAKING THE LATTER STILL INSISTED THAT EVERY SUCH ATTEMPT WOULD PRECIPITATE THEM INTO MEASURES TO WHICH OTHERWISE THEY WERE NOT PERHAPS INCLINED +8224-274381-0003-1454: THE KING'S EARS WERE NOW OPEN TO MONTROSE'S (COUNSELS->COUNCILS) WHO PROPOSED NONE BUT THE BOLDEST AND MOST DARING AGREEABLY TO THE DESPERATE STATE OF THE ROYAL CAUSE IN SCOTLAND +8224-274381-0004-1455: FIVE HUNDRED MEN MORE WHO HAD BEEN LEVIED BY THE COVENANTERS WERE PERSUADED TO EMBRACE THE ROYAL CAUSE AND WITH THIS COMBINED FORCE HE HASTENED TO ATTACK LORD (ELCHO->ELKOE) WHO LAY AT PERTH WITH AN ARMY OF SIX THOUSAND MEN ASSEMBLED UPON THE FIRST NEWS OF THE IRISH INVASION +8224-274381-0005-1456: DREADING THE SUPERIOR POWER OF ARGYLE WHO HAVING JOINED HIS VASSALS TO A FORCE LEVIED BY THE PUBLIC WAS APPROACHING WITH A CONSIDERABLE ARMY MONTROSE HASTENED (NORTHWARDS->NORTHWARD) IN ORDER TO ROUSE AGAIN THE MARQUIS OF (HUNTLEY->HUNTLY) AND THE GORDONS WHO HAVING BEFORE HASTILY TAKEN ARMS HAD BEEN INSTANTLY SUPPRESSED BY THE COVENANTERS +8224-274381-0006-1457: THIS NOBLEMAN'S CHARACTER THOUGH CELEBRATED FOR POLITICAL COURAGE AND CONDUCT WAS VERY LOW FOR MILITARY PROWESS AND AFTER SOME SKIRMISHES IN WHICH HE WAS WORSTED HE HERE ALLOWED MONTROSE TO ESCAPE HIM +8224-274381-0007-1458: BY QUICK MARCHES THROUGH THESE INACCESSIBLE MOUNTAINS THAT GENERAL FREED HIMSELF FROM THE SUPERIOR FORCES OF THE COVENANTERS +8224-274381-0008-1459: WITH THESE AND SOME (REENFORCEMENTS->REINFORCEMENTS) OF THE (ATHOLEMEN->ETHEL MEN) AND (MACDONALDS->MON DONALDS) WHOM HE HAD RECALLED MONTROSE FELL SUDDENLY UPON ARGYLE'S COUNTRY AND LET LOOSE UPON IT ALL THE RAGE OF WAR CARRYING OFF THE CATTLE BURNING THE HOUSES AND PUTTING THE INHABITANTS TO THE SWORD +8224-274381-0009-1460: THIS SEVERITY BY WHICH MONTROSE SULLIED HIS VICTORIES WAS THE RESULT OF PRIVATE ANIMOSITY AGAINST THE CHIEFTAIN AS MUCH AS OF ZEAL FOR THE PUBLIC CAUSE ARGYLE COLLECTING THREE THOUSAND MEN MARCHED IN QUEST OF THE ENEMY WHO HAD RETIRED WITH THEIR PLUNDER AND HE LAY AT (INNERLOCHY->INERLOCKY) SUPPOSING HIMSELF STILL AT A CONSIDERABLE DISTANCE FROM THEM +8224-274381-0010-1461: BY A QUICK AND UNEXPECTED MARCH MONTROSE HASTENED TO (INNERLOCHY->IN A LOCKY) AND PRESENTED HIMSELF IN ORDER OF BATTLE BEFORE THE SURPRISED BUT NOT (AFFRIGHTENED->A FRIGHTENED) COVENANTERS +8224-274381-0011-1462: HIS CONDUCT AND PRESENCE OF MIND IN THIS EMERGENCE APPEARED CONSPICUOUS +8224-274381-0012-1463: MONTROSE WEAK IN CAVALRY HERE LINED HIS TROOPS OF HORSE WITH INFANTRY AND AFTER PUTTING THE ENEMY'S HORSE TO ROUT FELL WITH UNITED FORCE UPON THEIR FOOT WHO WERE ENTIRELY CUT IN PIECES THOUGH WITH THE LOSS OF THE GALLANT LORD GORDON ON THE PART OF THE ROYALISTS +8224-274381-0013-1464: FROM THE SAME MEN NEW REGIMENTS AND NEW COMPANIES WERE FORMED DIFFERENT OFFICERS APPOINTED AND THE WHOLE MILITARY FORCE PUT INTO SUCH HANDS AS THE INDEPENDENTS COULD RELY ON +8224-274381-0014-1465: BESIDES MEMBERS OF PARLIAMENT WHO WERE EXCLUDED MANY OFFICERS UNWILLING TO SERVE UNDER THE NEW GENERALS THREW UP THEIR COMMISSIONS AND (UNWARILY->THEN WARILY) FACILITATED THE PROJECT OF PUTTING THE ARMY ENTIRELY INTO THE HANDS OF THAT FACTION +8224-274381-0015-1466: THOUGH THE DISCIPLINE OF THE FORMER PARLIAMENTARY ARMY WAS NOT CONTEMPTIBLE A MORE EXACT PLAN WAS INTRODUCED AND RIGOROUSLY EXECUTED BY THESE NEW COMMANDERS +8224-274381-0016-1467: VALOR INDEED WAS VERY GENERALLY DIFFUSED OVER THE ONE PARTY AS WELL AS THE OTHER DURING THIS PERIOD DISCIPLINE ALSO WAS ATTAINED BY THE FORCES OF THE PARLIAMENT BUT THE PERFECTION OF THE MILITARY ART IN CONCERTING THE GENERAL PLANS OF ACTION AND THE OPERATIONS OF THE FIELD SEEMS STILL ON BOTH SIDES TO HAVE BEEN IN A GREAT MEASURE WANTING +8224-274381-0017-1468: HISTORIANS AT LEAST PERHAPS FROM THEIR OWN IGNORANCE AND INEXPERIENCE HAVE NOT REMARKED ANY THING BUT A HEADLONG IMPETUOUS CONDUCT EACH PARTY HURRYING TO A BATTLE WHERE VALOR AND FORTUNE CHIEFLY (DETERMINED->DETERMINE) THE SUCCESS +8224-274384-0000-1437: HE PASSED THROUGH HENLEY SAINT (ALBANS->ALBAN'S) AND CAME SO NEAR TO LONDON AS HARROW ON THE HILL +8224-274384-0001-1438: THE SCOTTISH GENERALS AND COMMISSIONERS AFFECTED GREAT SURPRISE ON THE APPEARANCE OF THE KING AND THOUGH THEY PAID HIM ALL THE EXTERIOR RESPECT DUE TO HIS DIGNITY THEY INSTANTLY SET A GUARD UPON HIM UNDER COLOR OF PROTECTION AND MADE HIM IN REALITY A PRISONER +8224-274384-0002-1439: THEY INFORMED THE ENGLISH PARLIAMENT OF THIS UNEXPECTED INCIDENT AND ASSURED THEM THAT THEY HAD ENTERED INTO NO PRIVATE TREATY WITH THE KING +8224-274384-0003-1440: OR HATH HE GIVEN US ANY GIFT +8224-274384-0004-1441: AND THE MEN OF ISRAEL ANSWERED THE MEN OF JUDAH AND SAID WE HAVE TEN PARTS IN THE KING AND WE HAVE ALSO MORE RIGHT IN DAVID THAN YE WHY THEN DID YE DESPISE US THAT OUR ADVICE SHOULD NOT BE FIRST HAD IN BRINGING BACK OUR KING +8224-274384-0005-1442: ANOTHER PREACHER AFTER REPROACHING HIM TO HIS FACE WITH HIS MISGOVERNMENT ORDERED THIS (PSALM->SUM) TO BE SUNG +8224-274384-0006-1443: THE KING STOOD UP AND CALLED FOR THAT PSALM WHICH BEGINS WITH THESE WORDS +8224-274384-0007-1444: HAVE MERCY LORD ON ME I PRAY FOR MEN (WOULD->WITH) ME DEVOUR +8224-274384-0008-1445: THE GOOD NATURED AUDIENCE IN PITY TO (FALLEN->FALL AND) MAJESTY SHOWED FOR ONCE GREATER DEFERENCE TO THE KING THAN TO THE MINISTER AND SUNG THE PSALM WHICH THE FORMER HAD CALLED FOR +8224-274384-0009-1446: THE PARLIAMENT AND THE SCOTS LAID THEIR PROPOSALS BEFORE THE KING +8224-274384-0010-1447: BEFORE THE SETTLEMENT OF TERMS THE ADMINISTRATION MUST BE POSSESSED ENTIRELY BY THE PARLIAMENTS OF BOTH KINGDOMS AND HOW INCOMPATIBLE THAT SCHEME WITH THE LIBERTY OF THE KING IS EASILY IMAGINED +8224-274384-0011-1448: THE ENGLISH IT IS EVIDENT HAD THEY NOT BEEN PREVIOUSLY ASSURED OF RECEIVING THE KING WOULD NEVER HAVE PARTED WITH SO CONSIDERABLE A SUM AND WHILE THEY WEAKENED THEMSELVES BY THE SAME MEASURE HAVE STRENGTHENED A PEOPLE WITH WHOM THEY MUST AFTERWARDS HAVE SO MATERIAL AN INTEREST TO DISCUSS +8224-274384-0012-1449: IF ANY STILL RETAINED (RANCOR->RANK OR) AGAINST HIM IN HIS PRESENT CONDITION THEY PASSED IN SILENCE WHILE HIS WELL WISHERS MORE GENEROUS THAN PRUDENT ACCOMPANIED HIS MARCH WITH TEARS WITH ACCLAMATIONS AND WITH PRAYERS FOR HIS SAFETY +8224-274384-0013-1450: HIS DEATH IN THIS CONJUNCTURE WAS A PUBLIC MISFORTUNE +8230-279154-0000-617: THE ANALYSIS OF KNOWLEDGE WILL OCCUPY US UNTIL THE END OF THE THIRTEENTH LECTURE AND IS THE MOST DIFFICULT PART OF OUR WHOLE ENTERPRISE +8230-279154-0001-618: WHAT IS CALLED PERCEPTION DIFFERS FROM SENSATION BY THE FACT THAT THE SENSATIONAL INGREDIENTS BRING UP HABITUAL ASSOCIATES IMAGES AND EXPECTATIONS OF THEIR USUAL (CORRELATES->COROLLETS) ALL OF WHICH ARE SUBJECTIVELY INDISTINGUISHABLE FROM THE SENSATION +8230-279154-0002-619: WHETHER OR NOT THIS PRINCIPLE IS LIABLE TO EXCEPTIONS (EVERYONE->EVERY ONE) WOULD AGREE THAT (IS->IT) HAS A BROAD MEASURE OF TRUTH THOUGH THE WORD EXACTLY MIGHT SEEM AN OVERSTATEMENT AND IT MIGHT SEEM MORE CORRECT TO SAY THAT IDEAS APPROXIMATELY REPRESENT IMPRESSIONS +8230-279154-0003-620: AND WHAT SORT OF EVIDENCE IS LOGICALLY POSSIBLE +8230-279154-0004-621: THERE IS NO LOGICAL IMPOSSIBILITY IN THE HYPOTHESIS THAT THE WORLD SPRANG INTO BEING FIVE MINUTES AGO EXACTLY AS IT THEN WAS WITH (A->THE) POPULATION THAT REMEMBERED A WHOLLY UNREAL PAST +8230-279154-0005-622: ALL THAT I AM DOING IS TO USE ITS LOGICAL TENABILITY AS A HELP IN THE ANALYSIS OF WHAT OCCURS WHEN WE REMEMBER +8230-279154-0006-623: THE BEHAVIOURIST WHO ATTEMPTS TO MAKE PSYCHOLOGY A RECORD OF (BEHAVIOUR->BEHAVIOR) HAS TO TRUST HIS MEMORY IN MAKING THE RECORD +8230-279154-0007-624: HABIT IS A CONCEPT INVOLVING THE OCCURRENCE OF SIMILAR EVENTS AT DIFFERENT TIMES IF THE (BEHAVIOURIST FEELS->BEHAVIORIST FILLS) CONFIDENT THAT THERE IS SUCH A PHENOMENON AS HABIT THAT CAN ONLY BE BECAUSE HE TRUSTS HIS MEMORY WHEN IT ASSURES HIM THAT THERE HAVE BEEN OTHER TIMES +8230-279154-0008-625: BUT I DO NOT THINK SUCH AN (INFERENCE->EFFERENCE) IS WARRANTED +8230-279154-0009-626: OUR CONFIDENCE OR LACK OF CONFIDENCE IN THE ACCURACY OF A MEMORY IMAGE MUST IN FUNDAMENTAL CASES BE BASED UPON A CHARACTERISTIC OF THE IMAGE ITSELF SINCE WE CANNOT EVOKE THE PAST BODILY AND COMPARE IT WITH THE PRESENT IMAGE +8230-279154-0010-627: WE SOMETIMES HAVE IMAGES THAT ARE BY NO MEANS PECULIARLY VAGUE WHICH YET WE DO NOT TRUST FOR EXAMPLE UNDER THE INFLUENCE OF FATIGUE WE MAY SEE A FRIEND'S FACE VIVIDLY AND CLEARLY BUT HORRIBLY DISTORTED +8230-279154-0011-628: SOME IMAGES LIKE SOME SENSATIONS FEEL VERY FAMILIAR WHILE OTHERS FEEL STRANGE +8230-279154-0012-629: FAMILIARITY IS A (FEELING->FILLING) CAPABLE OF DEGREES +8230-279154-0013-630: IN AN IMAGE OF A WELL KNOWN FACE FOR EXAMPLE SOME PARTS MAY FEEL MORE FAMILIAR THAN OTHERS WHEN THIS HAPPENS WE HAVE MORE BELIEF IN THE ACCURACY OF THE FAMILIAR PARTS THAN IN THAT OF THE UNFAMILIAR PARTS +8230-279154-0014-631: I COME NOW TO THE OTHER CHARACTERISTIC WHICH MEMORY IMAGES MUST HAVE IN ORDER TO ACCOUNT FOR OUR KNOWLEDGE OF THE PAST +8230-279154-0015-632: THEY MUST HAVE SOME CHARACTERISTIC WHICH MAKES US REGARD THEM AS REFERRING TO MORE OR LESS REMOTE PORTIONS OF THE PAST +8230-279154-0016-633: IN ACTUAL FACT THERE ARE DOUBTLESS VARIOUS FACTORS THAT CONCUR IN GIVING US THE FEELING OF GREATER OR LESS REMOTENESS IN SOME REMEMBERED EVENT +8230-279154-0017-634: THERE MAY BE A SPECIFIC FEELING WHICH COULD BE CALLED THE FEELING OF PASTNESS ESPECIALLY WHERE IMMEDIATE MEMORY IS CONCERNED +8230-279154-0018-635: THERE IS OF COURSE A DIFFERENCE BETWEEN KNOWING THE TEMPORAL RELATION OF A REMEMBERED EVENT TO THE PRESENT AND KNOWING THE TIME ORDER OF TWO REMEMBERED EVENTS +8230-279154-0019-636: IT WOULD SEEM THAT ONLY RATHER RECENT EVENTS CAN BE PLACED AT ALL ACCURATELY BY MEANS OF FEELINGS GIVING THEIR TEMPORAL RELATION TO THE PRESENT BUT IT IS CLEAR THAT SUCH FEELINGS MUST PLAY AN ESSENTIAL PART IN THE PROCESS OF DATING REMEMBERED EVENTS +8230-279154-0020-637: IF WE HAD RETAINED THE SUBJECT OR ACT IN KNOWLEDGE THE WHOLE PROBLEM OF MEMORY WOULD HAVE BEEN COMPARATIVELY SIMPLE +8230-279154-0021-638: REMEMBERING HAS TO BE A PRESENT OCCURRENCE IN SOME WAY RESEMBLING OR RELATED TO WHAT IS REMEMBERED +8230-279154-0022-639: SOME POINTS MAY BE TAKEN AS FIXED AND SUCH AS ANY THEORY OF MEMORY MUST ARRIVE AT +8230-279154-0023-640: IN THIS CASE AS IN MOST OTHERS WHAT MAY BE TAKEN AS CERTAIN IN ADVANCE IS RATHER VAGUE +8230-279154-0024-641: THE FIRST OF OUR VAGUE BUT INDUBITABLE DATA IS THAT THERE IS KNOWLEDGE OF THE PAST +8230-279154-0025-642: WE MIGHT PROVISIONALLY THOUGH PERHAPS NOT QUITE CORRECTLY DEFINE MEMORY AS THAT WAY OF KNOWING ABOUT THE PAST WHICH HAS NO ANALOGUE IN OUR KNOWLEDGE OF THE FUTURE SUCH A DEFINITION WOULD AT LEAST SERVE TO MARK THE PROBLEM WITH WHICH WE ARE CONCERNED THOUGH SOME EXPECTATIONS MAY DESERVE TO RANK WITH MEMORY AS REGARDS IMMEDIACY +8230-279154-0026-643: THIS DISTINCTION IS VITAL TO THE UNDERSTANDING OF MEMORY BUT IT IS NOT SO EASY TO CARRY OUT IN PRACTICE AS IT IS TO DRAW IN THEORY +8230-279154-0027-644: A (GRAMOPHONE->GRAMMON) BY THE HELP OF SUITABLE RECORDS MIGHT RELATE TO US THE INCIDENTS OF ITS PAST AND PEOPLE ARE NOT SO DIFFERENT FROM GRAMOPHONES AS THEY LIKE TO BELIEVE +8230-279154-0028-645: I CAN SET TO WORK NOW TO REMEMBER THINGS I NEVER REMEMBERED BEFORE SUCH AS WHAT I HAD TO EAT FOR BREAKFAST THIS MORNING AND IT CAN HARDLY BE WHOLLY HABIT THAT ENABLES ME TO DO THIS +8230-279154-0029-646: THE FACT THAT A MAN CAN RECITE A POEM DOES NOT SHOW THAT HE REMEMBERS ANY PREVIOUS OCCASION ON WHICH HE HAS RECITED OR READ IT +8230-279154-0030-647: (SEMON'S->SIMMONS) TWO BOOKS MENTIONED IN AN EARLIER LECTURE DO NOT TOUCH KNOWLEDGE MEMORY AT ALL CLOSELY +8230-279154-0031-648: THEY GIVE LAWS ACCORDING TO WHICH IMAGES OF PAST OCCURRENCES COME INTO OUR MINDS BUT DO NOT DISCUSS OUR BELIEF THAT THESE IMAGES REFER TO PAST OCCURRENCES WHICH IS WHAT CONSTITUTES KNOWLEDGE MEMORY +8230-279154-0032-649: IT IS THIS THAT IS OF INTEREST TO THEORY OF KNOWLEDGE +8230-279154-0033-650: IT IS BY NO MEANS ALWAYS RELIABLE ALMOST EVERYBODY HAS AT SOME TIME EXPERIENCED THE WELL KNOWN ILLUSION THAT ALL THAT IS HAPPENING NOW HAPPENED BEFORE AT SOME TIME +8230-279154-0034-651: WHENEVER THE SENSE OF FAMILIARITY OCCURS WITHOUT A DEFINITE OBJECT IT (LEADS->LEAVES) US TO SEARCH THE ENVIRONMENT UNTIL WE ARE SATISFIED THAT WE HAVE FOUND THE APPROPRIATE OBJECT WHICH LEADS US TO THE JUDGMENT THIS IS FAMILIAR +8230-279154-0035-652: THUS NO KNOWLEDGE AS TO THE PAST IS TO BE DERIVED FROM THE FEELING OF FAMILIARITY ALONE +8230-279154-0036-653: A FURTHER STAGE IS RECOGNITION +8230-279154-0037-654: RECOGNITION IN THIS SENSE DOES NOT NECESSARILY INVOLVE MORE THAN A HABIT OF ASSOCIATION THE KIND OF OBJECT WE ARE SEEING AT THE MOMENT IS ASSOCIATED WITH THE WORD CAT OR WITH AN AUDITORY IMAGE OF PURRING OR WHATEVER OTHER CHARACTERISTIC WE MAY HAPPEN TO RECOGNIZE IN THE CAT OF THE MOMENT +8230-279154-0038-655: WE ARE OF COURSE IN FACT ABLE TO JUDGE WHEN WE RECOGNIZE AN OBJECT THAT WE HAVE SEEN IT BEFORE BUT THIS JUDGMENT IS SOMETHING OVER AND ABOVE RECOGNITION IN THIS FIRST SENSE AND MAY VERY PROBABLY BE IMPOSSIBLE TO ANIMALS THAT NEVERTHELESS HAVE THE EXPERIENCE OF RECOGNITION IN THIS FIRST SENSE OF THE WORD +8230-279154-0039-656: THIS KNOWLEDGE IS MEMORY IN ONE SENSE THOUGH IN ANOTHER IT IS NOT +8230-279154-0040-657: THERE ARE HOWEVER SEVERAL POINTS IN WHICH SUCH AN ACCOUNT OF RECOGNITION IS INADEQUATE TO BEGIN WITH IT MIGHT SEEM AT FIRST SIGHT MORE CORRECT TO DEFINE RECOGNITION AS I HAVE SEEN THIS BEFORE THAN AS THIS HAS EXISTED BEFORE +8230-279154-0041-658: THE DEFINITION OF MY EXPERIENCE IS DIFFICULT BROADLY SPEAKING IT IS EVERYTHING THAT IS CONNECTED WITH WHAT I AM EXPERIENCING NOW BY CERTAIN LINKS OF WHICH THE VARIOUS FORMS OF MEMORY ARE AMONG THE MOST IMPORTANT +8230-279154-0042-659: THUS IF I RECOGNIZE A THING THE OCCASION OF ITS PREVIOUS EXISTENCE IN VIRTUE OF WHICH I RECOGNIZE IT FORMS PART OF MY EXPERIENCE BY DEFINITION RECOGNITION WILL BE ONE OF THE MARKS BY WHICH MY EXPERIENCE IS SINGLED OUT FROM THE REST OF THE WORLD +8230-279154-0043-660: OF COURSE THE WORDS THIS HAS EXISTED BEFORE ARE A VERY INADEQUATE TRANSLATION OF WHAT ACTUALLY HAPPENS WHEN WE FORM A JUDGMENT OF RECOGNITION BUT THAT IS UNAVOIDABLE WORDS ARE FRAMED TO EXPRESS A LEVEL OF THOUGHT WHICH IS BY NO MEANS PRIMITIVE AND ARE QUITE INCAPABLE OF EXPRESSING SUCH AN ELEMENTARY OCCURRENCE AS RECOGNITION +8455-210777-0000-972: I REMAINED THERE ALONE FOR MANY HOURS BUT I MUST ACKNOWLEDGE THAT BEFORE I LEFT THE CHAMBERS I HAD GRADUALLY BROUGHT MYSELF TO LOOK AT THE MATTER IN ANOTHER LIGHT +8455-210777-0001-973: HAD (EVA CRASWELLER->EVER CRUSSWELLER) NOT BEEN GOOD LOOKING HAD JACK BEEN STILL AT COLLEGE HAD SIR KENNINGTON OVAL REMAINED IN ENGLAND HAD MISTER (BUNNIT->BUNNOT) AND THE BAR KEEPER NOT SUCCEEDED IN STOPPING MY CARRIAGE ON THE HILL SHOULD I HAVE SUCCEEDED IN (ARRANGING->A RADIAN) FOR THE FINAL DEPARTURE OF MY OLD FRIEND +8455-210777-0002-974: ON ARRIVING AT HOME AT MY OWN RESIDENCE I FOUND THAT OUR SALON WAS FILLED WITH A BRILLIANT COMPANY +8455-210777-0003-975: AS I SPOKE I MADE HIM A GRACIOUS BOW AND I THINK I SHOWED HIM BY MY MODE OF ADDRESS THAT I DID NOT BEAR ANY GRUDGE AS TO MY INDIVIDUAL SELF +8455-210777-0004-976: I HAVE COME TO YOUR SHORES MISTER PRESIDENT WITH THE PURPOSE OF SEEING HOW THINGS ARE PROGRESSING IN THIS DISTANT QUARTER OF THE WORLD +8455-210777-0005-977: WE HAVE OUR LITTLE STRUGGLES HERE AS ELSEWHERE AND ALL THINGS CANNOT BE DONE BY ROSE WATER +8455-210777-0006-978: WE ARE QUITE SATISFIED NOW CAPTAIN (BATTLEAX->BATTLE AXE) SAID MY WIFE +8455-210777-0007-979: QUITE SATISFIED SAID EVA +8455-210777-0008-980: THE LADIES IN COMPLIANCE WITH THAT SOFTNESS OF HEART WHICH IS THEIR CHARACTERISTIC ARE ON ONE SIDE AND THE MEN BY WHOM THE WORLD HAS TO BE MANAGED (ARE->OR) ON THE OTHER +8455-210777-0009-981: NO DOUBT IN PROCESS OF TIME THE LADIES WILL FOLLOW +8455-210777-0010-982: THEIR (MASTERS->MASTER) SAID MISSUS NEVERBEND +8455-210777-0011-983: I DID NOT MEAN SAID CAPTAIN (BATTLEAX->BATTLEX) TO TOUCH UPON PUBLIC SUBJECTS AT SUCH A MOMENT AS THIS +8455-210777-0012-984: MISSUS NEVERBEND YOU MUST INDEED BE PROUD OF YOUR SON +8455-210777-0013-985: JACK HAD BEEN STANDING IN THE FAR CORNER OF THE ROOM TALKING TO EVA AND WAS NOW REDUCED TO SILENCE BY HIS PRAISES +8455-210777-0014-986: SIR KENNINGTON OVAL IS A VERY FINE PLAYER SAID MY WIFE +8455-210777-0015-987: I (AND->AM) MY WIFE AND SON AND THE TWO (CRASWELLERS->CRESTWELLERS) AND THREE OR FOUR OTHERS AGREED TO DINE ON BOARD THE SHIP ON THE NEXT +8455-210777-0016-988: THIS I FELT WAS PAID TO ME AS BEING PRESIDENT OF THE REPUBLIC AND I ENDEAVOURED TO BEHAVE MYSELF WITH SUCH MINGLED HUMILITY AND DIGNITY AS MIGHT (BEFIT->BE FIT) THE OCCASION BUT I COULD NOT BUT FEEL THAT SOMETHING WAS WANTING TO THE SIMPLICITY OF MY ORDINARY LIFE +8455-210777-0017-989: MY WIFE ON THE SPUR OF THE MOMENT MANAGED TO GIVE THE (GENTLEMEN->GENTLEMAN) A VERY GOOD DINNER +8455-210777-0018-990: THIS SHE SAID WAS TRUE HOSPITALITY AND I AM NOT SURE THAT I DID NOT AGREE WITH (HER->THERE) +8455-210777-0019-991: THEN THERE WERE THREE OR FOUR LEADING MEN OF THE COMMUNITY WITH THEIR WIVES WHO WERE FOR THE MOST PART THE FATHERS AND MOTHERS OF THE YOUNG LADIES +8455-210777-0020-992: OH YES SAID JACK AND I'M NOWHERE +8455-210777-0021-993: BUT I MEAN TO HAVE MY INNINGS BEFORE LONG +8455-210777-0022-994: OF WHAT MISSUS NEVERBEND HAD GONE THROUGH IN PROVIDING BIRDS BEASTS AND FISHES NOT TO TALK OF TARTS AND JELLIES FOR THE DINNER OF THAT DAY NO ONE BUT MYSELF CAN HAVE ANY IDEA BUT IT MUST BE ADMITTED THAT SHE ACCOMPLISHED HER TASK WITH THOROUGH SUCCESS +8455-210777-0023-995: WE SAT WITH THE (OFFICERS->OFFICER) SOME LITTLE TIME AFTER DINNER AND THEN WENT ASHORE +8455-210777-0024-996: HOW MUCH OF EVIL OF REAL ACCOMPLISHED EVIL HAD THERE NOT OCCURRED TO ME DURING THE LAST FEW DAYS +8455-210777-0025-997: WHAT COULD I DO NOW BUT JUST LAY MYSELF DOWN AND DIE +8455-210777-0026-998: AND THE DEATH OF WHICH I DREAMT COULD NOT ALAS +8455-210777-0027-999: WHEN THIS CAPTAIN SHOULD HAVE TAKEN HIMSELF AND HIS VESSEL BACK TO ENGLAND I WOULD RETIRE TO A SMALL FARM WHICH I POSSESSED AT THE (FARTHEST->FURTHEST) SIDE OF THE ISLAND AND THERE IN SECLUSION (WOULD->WHAT) I END MY DAYS +8455-210777-0028-1000: JACK WOULD BECOME EVA'S HAPPY HUSBAND AND WOULD REMAIN AMIDST THE HURRIED DUTIES OF THE EAGER WORLD +8455-210777-0029-1001: THINKING OF ALL THIS I WENT TO SLEEP +8455-210777-0030-1002: MISTER NEVERBEND BEGAN THE CAPTAIN AND I (OBSERVED->OBSERVE) THAT UP TO THAT MOMENT HE HAD GENERALLY ADDRESSED ME AS PRESIDENT IT CANNOT BE DENIED THAT WE HAVE COME HERE ON AN UNPLEASANT MISSION +8455-210777-0031-1003: YOU HAVE RECEIVED US WITH ALL THAT COURTESY AND HOSPITALITY FOR WHICH YOUR CHARACTER IN ENGLAND (STANDS->STAND) SO HIGH +8455-210777-0032-1004: IT IS A DUTY SAID I +8455-210777-0033-1005: BUT YOUR POWER IS SO SUPERIOR TO ANY THAT I CAN ADVANCE AS TO MAKE US HERE FEEL THAT THERE IS NO DISGRACE IN YIELDING TO IT +8455-210777-0034-1006: NOT A DOUBT BUT HAD YOUR FORCE BEEN ONLY DOUBLE OR (TREBLE->TROUBLE) OUR OWN I SHOULD HAVE FOUND IT MY DUTY TO STRUGGLE WITH YOU +8455-210777-0035-1007: THAT IS ALL QUITE TRUE MISTER NEVERBEND SAID SIR FERDINANDO BROWN +8455-210777-0036-1008: I CAN AFFORD TO SMILE BECAUSE I AM ABSOLUTELY POWERLESS BEFORE YOU BUT I DO NOT THE LESS FEEL THAT IN A MATTER (IN->OF) WHICH THE PROGRESS OF THE WORLD IS CONCERNED I OR RATHER WE HAVE BEEN PUT DOWN BY BRUTE FORCE +8455-210777-0037-1009: YOU HAVE COME TO US THREATENING US WITH ABSOLUTE DESTRUCTION +8455-210777-0038-1010: THEREFORE I FEEL MYSELF QUITE ABLE AS PRESIDENT OF THIS REPUBLIC TO RECEIVE YOU WITH A COURTESY DUE TO THE SERVANTS OF A FRIENDLY ALLY +8455-210777-0039-1011: I CAN ASSURE YOU HE HAS NOT EVEN ALLOWED ME TO SEE THE TRIGGER SINCE I HAVE BEEN ON BOARD +8455-210777-0040-1012: THEN SAID SIR FERDINANDO THERE IS NOTHING FOR IT BUT THAT (HE->WE) MUST TAKE YOU WITH HIM +8455-210777-0041-1013: THERE CAME UPON ME A SUDDEN SHOCK WHEN I HEARD THESE WORDS WHICH EXCEEDED ANYTHING WHICH I HAD YET FELT +8455-210777-0042-1014: YOU HEAR WHAT SIR FERDINANDO BROWN HAS SAID REPLIED CAPTAIN (BATTLEAX->BATTLEX) +8455-210777-0043-1015: BUT WHAT IS THE DELICATE MISSION I ASKED +8455-210777-0044-1016: I WAS TO BE TAKEN AWAY AND CARRIED TO ENGLAND OR ELSEWHERE OR DROWNED UPON THE VOYAGE IT MATTERED NOT WHICH +8455-210777-0045-1017: THEN THE REPUBLIC OF (BRITANNULA->BRITAIN YULA) WAS TO BE DECLARED AS NON EXISTENT AND THE BRITISH FLAG WAS TO BE EXALTED AND A BRITISH GOVERNOR INSTALLED IN THE EXECUTIVE CHAMBERS +8455-210777-0046-1018: YOU MAY BE QUITE SURE (IT'S->TO) THERE SAID CAPTAIN (BATTLEAX->BATTLE AXE) AND THAT I CAN SO USE IT AS TO HALF OBLITERATE YOUR TOWN WITHIN TWO MINUTES OF MY RETURN ON BOARD +8455-210777-0047-1019: YOU PROPOSE TO KIDNAP ME I SAID +8455-210777-0048-1020: WHAT (WOULD->WILL) BECOME OF YOUR GUN WERE I TO KIDNAP YOU +8455-210777-0049-1021: LIEUTENANT (CROSSTREES->CROSS TREES) IS A VERY GALLANT OFFICER +8455-210777-0050-1022: ONE OF US ALWAYS REMAINS ON BOARD WHILE THE OTHER IS ON SHORE +8455-210777-0051-1023: WHAT WORLD WIDE INIQUITY SUCH A SPEECH AS THAT DISCLOSES SAID I STILL TURNING MYSELF TO THE CAPTAIN FOR THOUGH I WOULD HAVE CRUSHED THEM BOTH BY MY WORDS HAD IT BEEN POSSIBLE MY DISLIKE (CENTRED->SENATE) ITSELF ON SIR FERDINANDO +8455-210777-0052-1024: YOU WILL ALLOW ME TO SUGGEST SAID HE THAT THAT IS A MATTER OF OPINION +8455-210777-0053-1025: WERE I TO COMPLY WITH YOUR ORDERS WITHOUT EXPRESSING MY OWN OPINION I SHOULD SEEM TO HAVE DONE SO WILLINGLY HEREAFTER +8455-210777-0054-1026: THE LETTER RAN AS FOLLOWS +8455-210777-0055-1027: SIR I HAVE IT IN COMMAND TO INFORM YOUR EXCELLENCY THAT YOU HAVE BEEN APPOINTED GOVERNOR OF THE CROWN COLONY WHICH IS CALLED (BRITANNULA->BRITAIN ULLA) +8455-210777-0056-1028: THE PECULIAR CIRCUMSTANCES OF THE COLONY ARE WITHIN YOUR EXCELLENCY'S KNOWLEDGE +8455-210777-0057-1029: BUT IN THEIR SELECTION OF A CONSTITUTION THE (BRITANNULISTS->BRITAIN UILESTS) HAVE UNFORTUNATELY ALLOWED THEMSELVES BUT ONE (DELIBERATIVE->DELIBERATE) ASSEMBLY AND HENCE (HAVE->HAS) SPRUNG THEIR PRESENT DIFFICULTIES +8455-210777-0058-1030: IT IS FOUNDED ON THE ACKNOWLEDGED WEAKNESS OF THOSE WHO SURVIVE THAT PERIOD OF LIFE AT WHICH MEN CEASE TO WORK +8455-210777-0059-1031: BUT IT IS SURMISED THAT YOU WILL FIND DIFFICULTIES IN THE WAY OF YOUR ENTERING AT ONCE UPON YOUR (GOVERNMENT->GOVERNOR) +8455-210777-0060-1032: THE JOHN BRIGHT (IS ARMED->HIS ARM) WITH A WEAPON OF GREAT POWER AGAINST WHICH IT IS IMPOSSIBLE THAT THE PEOPLE OF (BRITANNULA->BRITAIN EULO) SHOULD PREVAIL +8455-210777-0061-1033: YOU WILL CARRY OUT WITH YOU ONE HUNDRED MEN OF THE NORTH NORTH WEST BIRMINGHAM REGIMENT WHICH WILL PROBABLY SUFFICE FOR YOUR OWN SECURITY AS IT IS THOUGHT THAT IF MISTER NEVERBEND BE WITHDRAWN THE PEOPLE WILL REVERT EASILY TO THEIR OLD HABITS OF OBEDIENCE +8455-210777-0062-1034: WHEN DO YOU INTEND THAT (THE->THAT) JOHN BRIGHT SHALL START +8455-210777-0063-1035: TO DAY I SHOUTED +8455-210777-0064-1036: AND I HAVE NO ONE READY TO WHOM I CAN GIVE UP THE (ARCHIVES OF->ARCHIVE) THE GOVERNMENT +8455-210777-0065-1037: I SHALL BE HAPPY TO TAKE CHARGE OF THEM SAID SIR FERDINANDO +8455-210777-0066-1038: THEY OF COURSE MUST ALL BE ALTERED +8455-210777-0067-1039: OR OF THE HABITS OF OUR PEOPLE IT IS QUITE IMPOSSIBLE +8455-210777-0068-1040: YOUR POWER IS SUFFICIENT I SAID +8455-210777-0069-1041: IF YOU WILL GIVE US YOUR PROMISE TO MEET CAPTAIN (BATTLEAX->ADELAX) HERE AT THIS TIME TO MORROW WE WILL STRETCH A POINT AND DELAY THE DEPARTURE OF THE JOHN BRIGHT FOR TWENTY FOUR HOURS +8455-210777-0070-1042: AND THIS PLAN WAS ADOPTED TOO IN ORDER TO EXTRACT FROM ME A PROMISE THAT I WOULD DEPART IN PEACE +8463-287645-0000-543: THIS WAS WHAT DID THE MISCHIEF SO FAR AS THE RUNNING AWAY WAS CONCERNED +8463-287645-0001-544: IT IS HARDLY NECESSARY TO SAY MORE OF THEM HERE +8463-287645-0002-545: FROM THE MANNER IN WHICH HE EXPRESSED HIMSELF WITH REGARD TO ROBERT (HOLLAN->HOLLAND) NO MAN IN THE WHOLE RANGE OF HIS RECOLLECTIONS WILL BE LONGER REMEMBERED THAN HE HIS (ENTHRALMENT->ENTHRALLMENT) WHILE UNDER (HOLLAN->HOLLAND) WILL HARDLY EVER BE FORGOTTEN +8463-287645-0003-546: OF THIS PARTY EDWARD A BOY OF SEVENTEEN CALLED FORTH MUCH SYMPATHY HE TOO WAS CLAIMED BY (HOLLAN->HOLLAND) +8463-287645-0004-547: JOHN WESLEY (COMBASH->COMBATCH) JACOB TAYLOR AND THOMAS EDWARD SKINNER +8463-287645-0005-548: (A FEW->IF YOU) YEARS BACK ONE OF THEIR SLAVES A COACHMAN WAS KEPT ON THE COACH BOX ONE (COLD->CALLED) NIGHT WHEN THEY WERE OUT AT A BALL UNTIL HE BECAME ALMOST FROZEN TO DEATH IN FACT HE DID DIE IN THE INFIRMARY FROM THE EFFECTS OF THE FROST ABOUT ONE WEEK AFTERWARDS +8463-287645-0006-549: THE DOCTOR WHO ATTENDED THE (INJURED->ANCIENT) CREATURE IN THIS CASE WAS SIMPLY TOLD THAT SHE SLIPPED AND FELL DOWN (*->THE) STAIRS AS SHE WAS COMING DOWN +8463-287645-0007-550: ANOTHER CASE SAID JOHN (WESLEY->WESTLEY) WAS A LITTLE GIRL HALF GROWN WHO WAS WASHING WINDOWS (UP STAIRS->UPSTAIRS) ONE DAY AND UNLUCKILY FELL ASLEEP IN THE WINDOW AND IN THIS POSITION WAS FOUND BY HER MISTRESS IN A RAGE THE MISTRESS (HIT->HID) HER A HEAVY SLAP KNOCKED HER OUT OF THE WINDOW AND SHE FELL TO THE PAVEMENT AND DIED IN A FEW HOURS FROM THE EFFECTS THEREOF +8463-287645-0008-551: AS USUAL NOTHING WAS DONE IN THE WAY OF PUNISHMENT +8463-287645-0009-552: I NEVER KNEW OF BUT ONE MAN WHO COULD EVER PLEASE HIM +8463-287645-0010-553: HE WORKED ME VERY HARD HE WANTED TO BE BEATING ME ALL THE TIME +8463-287645-0011-554: SHE WAS A LARGE HOMELY WOMAN THEY WERE COMMON WHITE PEOPLE WITH NO REPUTATION IN THE COMMUNITY +8463-287645-0012-555: SUBSTANTIALLY THIS WAS JACOB'S UNVARNISHED DESCRIPTION OF HIS MASTER AND MISTRESS +8463-287645-0013-556: AS TO HIS AGE AND ALSO THE NAME OF HIS MASTER JACOB'S STATEMENT VARIED SOMEWHAT FROM THE ADVERTISEMENT +8463-287645-0014-557: OF STARTING I DIDN'T KNOW THE WAY TO COME +8463-294825-0000-558: IT'S ALMOST BEYOND CONJECTURE +8463-294825-0001-559: THIS REALITY BEGINS TO EXPLAIN THE DARK POWER AND (OTHERWORLDLY->OTHER WORLDLY) FASCINATION OF TWENTY THOUSAND LEAGUES UNDER THE SEAS +8463-294825-0002-560: FIRST AS A PARIS (STOCKBROKER->DOCKBROKER) LATER AS A CELEBRATED AUTHOR AND YACHTSMAN HE WENT ON FREQUENT VOYAGES TO BRITAIN AMERICA THE MEDITERRANEAN +8463-294825-0003-561: NEMO BUILDS A FABULOUS (FUTURISTIC->FUTURESTIC) SUBMARINE THE NAUTILUS THEN CONDUCTS AN UNDERWATER CAMPAIGN OF VENGEANCE AGAINST HIS IMPERIALIST OPPRESSOR +8463-294825-0004-562: IN ALL THE NOVEL (HAD->HEAD) A DIFFICULT (GESTATION->JUST STATION) +8463-294825-0005-563: OTHER SUBTLETIES OCCUR INSIDE EACH EPISODE THE TEXTURES SPARKLING WITH WIT INFORMATION AND INSIGHT +8463-294825-0006-564: HIS SPECIFICATIONS FOR AN OPEN SEA SUBMARINE AND A SELF (CONTAINED->CONTAINING) DIVING SUIT WERE DECADES BEFORE THEIR TIME YET MODERN TECHNOLOGY BEARS THEM OUT TRIUMPHANTLY +8463-294825-0007-565: EVEN THE SUPPORTING CAST IS SHREWDLY DRAWN PROFESSOR ARONNAX THE CAREER SCIENTIST CAUGHT IN AN ETHICAL CONFLICT CONSEIL THE COMPULSIVE CLASSIFIER WHO SUPPLIES HUMOROUS TAG LINES FOR (VERNE'S->VERNS) FAST FACTS THE HARPOONER NED LAND A CREATURE OF CONSTANT APPETITES MAN AS HEROIC ANIMAL +8463-294825-0008-566: BUT MUCH OF THE (NOVEL'S->NOVELS) BROODING POWER COMES FROM CAPTAIN NEMO +8463-294825-0009-567: THIS COMPULSION LEADS NEMO INTO UGLY CONTRADICTIONS (HE'S->HE IS) A FIGHTER FOR FREEDOM YET ALL WHO BOARD HIS SHIP ARE IMPRISONED THERE FOR GOOD HE WORKS TO SAVE LIVES BOTH HUMAN AND ANIMAL YET HE HIMSELF CREATES A (HOLOCAUST->HOHLAST) HE DETESTS IMPERIALISM YET HE LAYS PERSONAL CLAIM TO THE SOUTH POLE +8463-294825-0010-568: AND IN THIS LAST ACTION HE FALLS INTO THE CLASSIC SIN OF PRIDE +8463-294825-0011-569: (HE'S->HIS) SWIFTLY PUNISHED +8463-294825-0012-570: THE NAUTILUS NEARLY PERISHES IN THE ANTARCTIC AND NEMO SINKS INTO A GROWING DEPRESSION +8463-294825-0013-571: FOR MANY THEN THIS BOOK HAS BEEN A SOURCE OF FASCINATION SURELY ONE OF THE MOST INFLUENTIAL NOVELS EVER WRITTEN (AN->AND) INSPIRATION FOR SUCH SCIENTISTS AND DISCOVERERS AS (ENGINEER->ENGINEERS) SIMON LAKE OCEANOGRAPHER WILLIAM (BEEBE POLAR TRAVELER SIR ERNEST->B POLLAR TRAVELLERS ARE EARNEST) SHACKLETON +8463-294825-0014-572: FATHOM SIX FEET +8463-294825-0015-573: (GRAM->GRAHAM) ROUGHLY (ONE->WON) TWENTY EIGHTH OF AN OUNCE +8463-294825-0016-574: (MILLIGRAM->MILAGRAM) ROUGHLY (ONE->WON) TWENTY EIGHT (THOUSAND->THOUSANDTH) OF AN OUNCE +8463-294825-0017-575: (LITER->LEADER) ROUGHLY (ONE QUART->WON COURT) +8463-294825-0018-576: METER ROUGHLY ONE YARD THREE INCHES +8463-294825-0019-577: MILLIMETER ROUGHLY (ONE->WON) TWENTY FIFTH OF AN INCH +8463-294828-0000-578: CHAPTER THREE AS MASTER WISHES +8463-294828-0001-579: THREE SECONDS BEFORE THE ARRIVAL OF J B HOBSON'S LETTER I (NO->KNOW) MORE DREAMED OF CHASING THE UNICORN THAN OF TRYING FOR THE (NORTHWEST->NORTH WEST) PASSAGE +8463-294828-0002-580: EVEN SO I HAD JUST RETURNED FROM AN ARDUOUS JOURNEY EXHAUSTED AND BADLY NEEDING (A REST->ARREST) +8463-294828-0003-581: I WANTED NOTHING MORE THAN TO SEE MY COUNTRY AGAIN MY FRIENDS MY MODEST QUARTERS BY THE BOTANICAL GARDENS MY DEARLY BELOVED COLLECTIONS +8463-294828-0004-582: BUT NOW NOTHING COULD HOLD ME BACK +8463-294828-0005-583: CONSEIL WAS MY (MANSERVANT->MAN'S SERVANT) +8463-294828-0006-584: FROM RUBBING SHOULDERS WITH SCIENTISTS IN OUR LITTLE UNIVERSE BY THE BOTANICAL GARDENS THE BOY HAD COME TO KNOW A THING OR TWO +8463-294828-0007-585: CLASSIFYING WAS EVERYTHING TO HIM SO HE KNEW NOTHING ELSE (WELL->WILL) VERSED IN (THE->A) THEORY OF CLASSIFICATION HE WAS POORLY VERSED IN ITS PRACTICAL APPLICATION AND I DOUBT THAT HE COULD TELL A SPERM WHALE FROM A BALEEN WHALE +8463-294828-0008-586: AND YET WHAT A FINE GALLANT LAD +8463-294828-0009-587: NOT ONCE DID HE COMMENT ON THE LENGTH OR THE HARDSHIPS OF (A->THE) JOURNEY +8463-294828-0010-588: NEVER DID HE OBJECT TO BUCKLING UP HIS (SUITCASE->SUIT CASE) FOR ANY COUNTRY WHATEVER CHINA OR THE CONGO NO MATTER HOW FAR OFF IT WAS +8463-294828-0011-589: HE WENT HERE THERE AND EVERYWHERE IN PERFECT CONTENTMENT +8463-294828-0012-590: PLEASE FORGIVE ME FOR THIS UNDERHANDED WAY OF ADMITTING (*->THAT) I HAD TURNED FORTY +8463-294828-0013-591: HE WAS A FANATIC ON FORMALITY AND HE ONLY ADDRESSED ME IN THE THIRD PERSON TO THE POINT WHERE IT GOT (TIRESOME->TO HYAHSOME) +8463-294828-0014-592: THERE WAS GOOD REASON TO STOP AND THINK EVEN FOR THE WORLD'S MOST EMOTIONLESS MAN +8463-294828-0015-593: CONSEIL I CALLED A THIRD TIME CONSEIL APPEARED +8463-294828-0016-594: (DID->DEAD) MASTER SUMMON ME HE SAID ENTERING +8463-294828-0017-595: PACK AS MUCH INTO MY TRUNK AS YOU CAN MY (TRAVELING->TRAVELLING) KIT MY SUITS SHIRTS AND SOCKS DON'T BOTHER COUNTING (JUST->JEST) SQUEEZE IT ALL IN AND HURRY +8463-294828-0018-596: WE'LL DEAL WITH THEM LATER WHAT +8463-294828-0019-597: ANYHOW WE'LL (LEAVE->LIVE) INSTRUCTIONS TO SHIP THE WHOLE MENAGERIE TO FRANCE +8463-294828-0020-598: YES WE ARE CERTAINLY I REPLIED EVASIVELY BUT AFTER WE MAKE A DETOUR +8463-294828-0021-599: A (ROUTE->ROUT) SLIGHTLY LESS DIRECT THAT'S ALL +8463-294828-0022-600: (WE'RE->WERE) LEAVING ON THE ABRAHAM LINCOLN +8463-294828-0023-601: YOU SEE MY FRIEND IT'S AN ISSUE OF THE MONSTER THE NOTORIOUS NARWHALE +8463-294828-0024-602: WE DON'T KNOW WHERE IT WILL TAKE US +8463-294828-0025-603: BUT WE'RE GOING JUST THE SAME +8463-294828-0026-604: WE HAVE A COMMANDER (WHO'S->WHOSE) GAME FOR ANYTHING +8463-294828-0027-605: I LEFT INSTRUCTIONS FOR SHIPPING MY CONTAINERS OF STUFFED ANIMALS AND DRIED PLANTS TO PARIS FRANCE +8463-294828-0028-606: I OPENED A LINE OF CREDIT SUFFICIENT TO COVER THE (BABIRUSA->BABAROUSA) AND CONSEIL AT MY HEELS I JUMPED INTO A CARRIAGE +8463-294828-0029-607: OUR BAGGAGE WAS IMMEDIATELY CARRIED TO THE DECK OF THE FRIGATE I RUSHED ABOARD +8463-294828-0030-608: I ASKED FOR COMMANDER FARRAGUT +8463-294828-0031-609: ONE OF THE SAILORS LED ME TO THE (AFTERDECK->AFTER DECK) WHERE I STOOD IN THE PRESENCE OF A SMART LOOKING OFFICER WHO EXTENDED HIS HAND TO ME +8463-294828-0032-610: IN PERSON WELCOME ABOARD PROFESSOR YOUR CABIN IS WAITING FOR YOU +8463-294828-0033-611: I WAS WELL SATISFIED WITH MY CABIN WHICH WAS LOCATED IN THE STERN AND OPENED INTO THE (OFFICERS->OFFICER'S) MESS +8463-294828-0034-612: (WE'LL->WILL) BE QUITE COMFORTABLE HERE I TOLD CONSEIL +8463-294828-0035-613: AND SO IF (I'D->I HAD) BEEN DELAYED BY A QUARTER OF AN HOUR OR EVEN LESS THE FRIGATE WOULD HAVE GONE WITHOUT ME AND I WOULD HAVE MISSED OUT ON THIS UNEARTHLY EXTRAORDINARY AND INCONCEIVABLE EXPEDITION WHOSE TRUE STORY MIGHT WELL MEET WITH SOME SKEPTICISM +8463-294828-0036-614: THE WHARVES OF BROOKLYN AND EVERY PART OF NEW YORK BORDERING THE EAST RIVER WERE CROWDED WITH CURIOSITY SEEKERS +8463-294828-0037-615: DEPARTING FROM FIVE HUNDRED THOUSAND THROATS THREE CHEERS BURST FORTH IN SUCCESSION +8463-294828-0038-616: THOUSANDS OF HANDKERCHIEFS WERE WAVING ABOVE THESE TIGHTLY PACKED MASSES HAILING THE ABRAHAM LINCOLN UNTIL IT REACHED THE WATERS OF THE HUDSON RIVER AT THE TIP OF THE LONG (PENINSULA->PRONUNCILA) THAT FORMS NEW YORK CITY +8555-284447-0000-2299: THEN HE RUSHED (DOWN STAIRS->DOWNSTAIRS) INTO THE COURTYARD SHOUTING LOUDLY FOR HIS SOLDIERS AND THREATENING TO PATCH EVERYBODY IN HIS DOMINIONS (IF->AT) THE SAILORMAN WAS NOT RECAPTURED +8555-284447-0001-2300: HOLD HIM FAST (*->TO) MY MEN AND AS SOON AS I'VE HAD MY COFFEE (AND->AN) OATMEAL (I'LL->I WILL) TAKE HIM TO THE ROOM OF THE GREAT KNIFE AND PATCH HIM +8555-284447-0002-2301: I WOULDN'T MIND A CUP (O->OF) COFFEE MYSELF SAID CAP'N BILL (I'VE->I HAVE) HAD (CONSID'BLE->CONSIDERABLE) EXERCISE THIS (MORNIN->MORNING) AND I'M (ALL READY->ALREADY) FOR (BREAKFAS->BREAKFAST) +8555-284447-0003-2302: BUT CAP'N BILL MADE NO SUCH ATTEMPT KNOWING IT WOULD BE USELESS +8555-284447-0004-2303: AS SOON AS THEY ENTERED THE ROOM OF THE GREAT KNIFE THE BOOLOOROO GAVE A YELL OF DISAPPOINTMENT +8555-284447-0005-2304: THE ROOM OF THE GREAT KNIFE WAS HIGH AND BIG AND AROUND IT RAN ROWS OF BENCHES FOR THE SPECTATORS TO SIT UPON +8555-284447-0006-2305: IN ONE PLACE AT THE HEAD OF THE ROOM WAS A RAISED PLATFORM FOR THE ROYAL FAMILY WITH ELEGANT THRONE CHAIRS FOR THE KING AND QUEEN AND SIX SMALLER BUT RICHLY UPHOLSTERED CHAIRS FOR THE SNUBNOSED PRINCESSES +8555-284447-0007-2306: THEREFORE HER MAJESTY PAID NO ATTENTION TO (ANYONE->ANY ONE) AND NO ONE PAID ANY ATTENTION TO HER +8555-284447-0008-2307: RICH JEWELS OF BLUE STONES GLITTERED UPON THEIR PERSONS AND THE ROYAL LADIES WERE FULLY AS (GORGEOUS->CORGEOUS) AS THEY WERE (HAUGHTY->HALTING) AND OVERBEARING +8555-284447-0009-2308: (MORNIN->MORNING) GIRLS (HOPE YE FEEL->O BELL) AS WELL AS YE LOOK +8555-284447-0010-2309: CONTROL YOURSELVES MY DEARS REPLIED THE BOOLOOROO THE WORST PUNISHMENT I KNOW HOW TO INFLICT ON (ANYONE->ANY ONE) THIS PRISONER IS ABOUT TO SUFFER (YOU'LL->YOU WILL) SEE A VERY PRETTY PATCHING MY ROYAL DAUGHTERS +8555-284447-0011-2310: SUPPOSE IT'S (A FRIEND->OF BRAND) +8555-284447-0012-2311: THE CAPTAIN SHOOK HIS HEAD +8555-284447-0013-2312: WHY YOU (SAID->SIT) TO (FETCH->VEGET) THE FIRST LIVING CREATURE WE MET AND THAT WAS (THIS BILLYGOAT->THE SPILLY GOAT) REPLIED THE CAPTAIN PANTING HARD AS HE HELD FAST TO ONE OF THE GOAT'S HORNS +8555-284447-0014-2313: THE IDEA OF PATCHING CAP'N BILL TO A GOAT WAS VASTLY AMUSING TO HIM AND THE MORE HE THOUGHT OF IT THE MORE HE ROARED WITH LAUGHTER +8555-284447-0015-2314: THEY LOOK SOMETHING ALIKE YOU KNOW SUGGESTED THE CAPTAIN OF THE GUARDS LOOKING FROM ONE TO THE OTHER DOUBTFULLY AND THEY'RE NEARLY THE SAME SIZE IF (YOU->HE) STAND (THE GOAT->A BOAT) ON HIS HIND LEGS THEY'VE BOTH GOT THE SAME STYLE OF WHISKERS AND THEY'RE BOTH OF (EM->THEM) OBSTINATE AND DANGEROUS SO THEY OUGHT TO MAKE A GOOD PATCH SPLENDID +8555-284447-0016-2315: FINE GLORIOUS +8555-284447-0017-2316: WHEN THIS HAD BEEN ACCOMPLISHED THE BOOLOOROO LEANED OVER TO TRY TO DISCOVER WHY THE FRAME ROLLED AWAY SEEMINGLY OF ITS OWN ACCORD AND HE WAS THE MORE PUZZLED BECAUSE IT HAD NEVER DONE SUCH A THING BEFORE +8555-284447-0018-2317: AT ONCE THE GOAT GAVE A LEAP (ESCAPED->ESCAPE) FROM THE SOLDIERS AND WITH BOWED HEAD RUSHED UPON THE BOOLOOROO +8555-284447-0019-2318: BEFORE ANY (COULD->GOOD) STOP HIM HE BUTTED HIS MAJESTY SO FURIOUSLY THAT THE KING SOARED FAR INTO THE AIR AND TUMBLED IN A HEAP AMONG THE BENCHES WHERE HE LAY MOANING AND GROANING +8555-284447-0020-2319: THE (GOAT'S WARLIKE->GOATS WORE LIKE) SPIRIT WAS ROUSED BY THIS SUCCESSFUL ATTACK +8555-284447-0021-2320: THEN THEY SPED IN GREAT HASTE FOR THE DOOR AND THE GOAT GAVE A FINAL (BUTT->BUT) THAT SENT (THE->A) ROW OF ROYAL LADIES ALL DIVING INTO THE CORRIDOR IN ANOTHER TANGLE WHEREUPON THEY SHRIEKED IN A MANNER THAT TERRIFIED EVERYONE WITHIN SOUND OF THEIR VOICES +8555-284447-0022-2321: I HAD A NOTION IT WAS YOU (MATE AS SAVED->MADE TO SEE) ME FROM THE KNIFE +8555-284447-0023-2322: I (COULDN'T->COULDN') SHIVER MUCH (BEIN->BEING) BOUND SO TIGHT BUT WHEN I'M LOOSE I MEAN TO HAVE (JUS ONE->JUST SWUNG) GOOD SHIVER TO RELIEVE MY (FEELIN'S->FEELINS) +8555-284447-0024-2323: COME AND GET THE BOOLOOROO SHE SAID GOING TOWARD THE BENCHES +8555-284449-0000-2324: SO THEY WERE QUITE WILLING TO OBEY THE ORDERS OF THEIR GIRL QUEEN AND IN A SHORT TIME THE (BLASTS->BLAST) OF TRUMPETS AND ROLL OF DRUMS AND CLASHING OF CYMBALS TOLD TROT AND CAP'N BILL THAT THE BLUE BANDS HAD (ASSEMBLED->A SIMPLED) BEFORE THE PALACE +8555-284449-0001-2325: THEN THEY ALL MARCHED OUT A LITTLE WAY INTO THE FIELDS AND FOUND THAT THE ARMY OF PINKIES HAD ALREADY FORMED AND WAS ADVANCING STEADILY TOWARD THEM +8555-284449-0002-2326: AT THE HEAD OF THE PINKIES WERE GHIP GHISIZZLE AND BUTTON BRIGHT WHO HAD THE PARROT ON HIS SHOULDER AND THEY WERE SUPPORTED BY CAPTAIN (CORALIE->CORLEY) AND CAPTAIN (TINTINT->TINTANT) AND ROSALIE THE WITCH +8555-284449-0003-2327: WHEN THE (BLUESKINS->BLUESKIN) SAW GHIP GHISIZZLE THEY RAISED ANOTHER GREAT SHOUT FOR HE WAS THE (FAVORITE->FAVOURITE) OF THE SOLDIERS AND VERY POPULAR WITH ALL THE PEOPLE +8555-284449-0004-2328: SINCE LAST THURSDAY I (GHIP->*) GHISIZZLE HAVE BEEN THE LAWFUL BOOLOOROO OF THE BLUE COUNTRY BUT NOW THAT YOU ARE CONQUERED BY QUEEN TROT I SUPPOSE I AM CONQUERED TOO AND YOU HAVE NO BOOLOOROO AT ALL +8555-284449-0005-2329: WHEN HE FINISHED SHE SAID CHEERFULLY +8555-284449-0006-2330: DON'T WORRY SIZZLE DEAR (IT'LL->IT) ALL COME RIGHT PRETTY SOON +8555-284449-0007-2331: NOW THEN LET'S ENTER THE CITY (AN->AND) ENJOY THE (GRAND->GREAT) FEAST (THAT'S->ITS) BEING COOKED I'M NEARLY STARVED MYSELF FOR THIS (CONQUERIN KINGDOMS->CONQUERING KINGDOM'S) IS HARD WORK +8555-284449-0008-2332: THEN SHE GAVE ROSALIE BACK HER MAGIC RING THANKING THE KIND (WITCH->WHICH) FOR ALL SHE HAD DONE FOR THEM +8555-284449-0009-2333: YOU ARE MATE REPLIED THE SAILOR +8555-284449-0010-2334: IT WILL BE SUCH A SATISFACTION +8555-284449-0011-2335: THE GUARDS HAD A TERRIBLE STRUGGLE WITH THE GOAT WHICH WAS LOOSE IN THE ROOM AND STILL WANTED TO FIGHT BUT FINALLY THEY SUBDUED THE ANIMAL AND THEN THEY TOOK THE BOOLOOROO OUT OF THE FRAME (HE WAS->WHOSE) TIED IN AND BROUGHT BOTH HIM AND THE GOAT BEFORE QUEEN TROT WHO AWAITED THEM IN THE THRONE ROOM OF THE PALACE +8555-284449-0012-2336: (I'LL->I WILL) GLADLY DO THAT PROMISED THE NEW BOOLOOROO AND I'LL FEED THE (HONORABLE GOAT->HON GO TO) ALL THE SHAVINGS AND LEATHER AND TIN CANS HE CAN EAT BESIDES THE GRASS +8555-284449-0013-2337: (SCUSE->EXCUSE) ME SAID TROT I NEGLECTED TO TELL YOU THAT YOU'RE NOT THE BOOLOOROO ANY MORE +8555-284449-0014-2338: THE FORMER BOOLOOROO GROANED +8555-284449-0015-2339: I'LL (NOT->NOW) BE WICKED ANY MORE SIGHED THE OLD BOOLOOROO I'LL REFORM +8555-284449-0016-2340: AS A PRIVATE CITIZEN I SHALL BE A MODEL OF DEPORTMENT BECAUSE IT WOULD BE DANGEROUS TO BE OTHERWISE +8555-284449-0017-2341: WHEN FIRST THEY ENTERED THE THRONE ROOM THEY TRIED TO BE AS HAUGHTY AND SCORNFUL AS EVER BUT THE BLUES WHO WERE ASSEMBLED THERE ALL LAUGHED AT THEM AND JEERED THEM FOR THERE WAS NOT A SINGLE PERSON IN ALL THE BLUE COUNTRY WHO LOVED THE PRINCESSES THE LEAST LITTLE BIT +8555-284449-0018-2342: SO GHIP GHISIZZLE ORDERED THE CAPTAIN TO TAKE A FILE OF SOLDIERS AND ESCORT THE RAVING BEAUTIES TO THEIR NEW HOME +8555-284449-0019-2343: THAT EVENING TROT GAVE A GRAND BALL IN THE PALACE TO WHICH THE MOST IMPORTANT OF THE PINKIES (AND->IN) THE BLUESKINS WERE INVITED +8555-284449-0020-2344: THE COMBINED BANDS OF BOTH THE COUNTRIES PLAYED THE MUSIC AND A FINE SUPPER WAS SERVED +8555-292519-0000-2283: BRIGHTER THAN EARLY (DAWN'S->DAWNS) MOST BRILLIANT DYE ARE BLOWN CLEAR BANDS OF (COLOR->COLOUR) THROUGH THE SKY THAT SWIRL AND SWEEP AND MEET TO BREAK AND FOAM LIKE RAINBOW VEILS UPON A BUBBLE'S DOME +8555-292519-0001-2284: GUIDED BY YOU HOW WE MIGHT STROLL TOWARDS DEATH OUR ONLY MUSIC ONE ANOTHER'S BREATH THROUGH GARDENS INTIMATE WITH HOLLYHOCKS WHERE SILENT POPPIES BURN BETWEEN THE ROCKS BY POOLS WHERE BIRCHES BEND TO CONFIDANTS ABOVE GREEN WATERS (SCUMMED->SCUMBED) WITH (*->A) LILY PLANTS +8555-292519-0002-2285: VENICE +8555-292519-0003-2286: IN A SUNSET GLOWING OF CRIMSON AND GOLD SHE LIES THE GLORY OF THE WORLD A (BEACHED->BEECHED) KING'S GALLEY (WHOSE->WHO) SAILS ARE FURLED WHO IS HUNG WITH TAPESTRIES RICH AND OLD +8555-292519-0004-2287: THE PITY THAT WE MUST COME AND GO +8555-292519-0005-2288: WHILE THE OLD GOLD AND THE MARBLE STAYS (FOREVER->FOR EVER) GLEAMING ITS SOFT STRONG BLAZE CALM IN THE EARLY EVENING GLOW +8555-292519-0006-2289: THE PLEASANT GRAVEYARD OF MY SOUL WITH SENTIMENTAL CYPRESS TREES AND FLOWERS IS FILLED THAT I MAY STROLL IN MEDITATION AT MY EASE +8555-292519-0007-2290: IT IS MY HEART HUNG IN THE SKY AND NO CLOUDS EVER FLOAT BETWEEN THE (GRAVE->GRAY) FLOWERS AND MY HEART ON HIGH +8555-292519-0008-2291: OVER THE TRACK LINED CITY STREET THE YOUNG (MEN->MAN) THE GRINNING MEN PASS +8555-292519-0009-2292: (HO->HOME) YE SAILS THAT SEEM TO (WANDER IN->WONDER AND) DREAM FILLED MEADOWS SAY IS THE SHORE WHERE I STAND THE ONLY FIELD OF STRUGGLE OR ARE YE HIT AND BATTERED OUT THERE BY WAVES AND WIND GUSTS AS YE TACK OVER A CLASHING SEA OF WATERY ECHOES +8555-292519-0010-2293: OLD DANCES ARE SIMPLIFIED OF THEIR YEARNING BLEACHED BY TIME +8555-292519-0011-2294: HE HAD GOT INTO HER COURTYARD +8555-292519-0012-2295: THROUGH THE BLACK NIGHT RAIN HE SANG TO HER WINDOW BARS +8555-292519-0013-2296: THAT WAS BUT RUSTLING OF (DRIPPING->TRIPPING) PLANTS IN THE DARK +8555-292519-0014-2297: SHE WAS ALONE THAT NIGHT +8555-292519-0015-2298: HE HAD BROKEN INTO HER COURTYARD +908-157963-0000-1321: TO FADE AWAY LIKE MORNING BEAUTY FROM HER MORTAL DAY DOWN BY THE RIVER OF (ADONA->ADONNA) HER SOFT (VOICE IS->VOICES) HEARD AND THUS HER GENTLE LAMENTATION FALLS LIKE MORNING DEW +908-157963-0001-1322: O LIFE OF THIS OUR SPRING +908-157963-0002-1323: WHY FADES THE LOTUS OF THE WATER +908-157963-0003-1324: WHY FADE THESE CHILDREN OF THE SPRING +908-157963-0004-1325: (THEL->FELL) IS LIKE A (WATRY->WATERY) BOW AND LIKE A PARTING CLOUD LIKE A REFLECTION IN A GLASS LIKE SHADOWS IN THE WATER LIKE DREAMS OF INFANTS LIKE A SMILE UPON AN (INFANTS->INFANT'S) FACE +908-157963-0005-1326: LIKE THE DOVES (VOICE->BOYS) LIKE TRANSIENT DAY LIKE MUSIC IN THE AIR AH +908-157963-0006-1327: AND GENTLE SLEEP THE SLEEP OF DEATH AND GENTLY HEAR THE VOICE OF HIM THAT WALKETH IN THE GARDEN IN THE EVENING TIME +908-157963-0007-1328: THE (LILLY->LILY) OF THE VALLEY BREATHING IN THE HUMBLE GRASS (ANSWERD->ANSWERED) THE LOVELY (MAID AND->MAIDEN) SAID I AM A (WATRY->WATCHERY) WEED AND I AM VERY SMALL AND LOVE TO DWELL IN LOWLY VALES SO WEAK THE GILDED BUTTERFLY SCARCE (PERCHES->PURCHASE) ON MY HEAD YET I AM VISITED FROM HEAVEN AND HE THAT SMILES ON ALL WALKS IN THE VALLEY AND EACH MORN OVER ME SPREADS HIS HAND SAYING REJOICE THOU HUMBLE GRASS THOU (NEW BORN->NEWBORN) LILY FLOWER +908-157963-0008-1329: THOU GENTLE MAID OF SILENT VALLEYS AND OF MODEST BROOKS FOR THOU (SHALL->SHALT) BE CLOTHED IN LIGHT AND FED WITH MORNING (MANNA->MANA) TILL (SUMMERS->SUMMER'S) HEAT MELTS THEE BESIDE THE FOUNTAINS AND THE SPRINGS TO FLOURISH IN ETERNAL VALES THEY WHY SHOULD (THEL->THOU) COMPLAIN +908-157963-0009-1330: WHY SHOULD THE MISTRESS OF THE (VALES->VEILS) OF HAR UTTER A SIGH +908-157963-0010-1331: SHE (CEASD->CEASED) AND (SMILD->SMILED) IN TEARS THEN SAT DOWN IN HER SILVER SHRINE +908-157963-0011-1332: WHICH THOU DOST SCATTER ON EVERY LITTLE BLADE OF GRASS THAT SPRINGS REVIVES THE MILKED COW AND TAMES THE FIRE BREATHING STEED +908-157963-0012-1333: BUT (THEL->THOU) IS LIKE A FAINT CLOUD KINDLED AT THE RISING SUN I VANISH FROM MY PEARLY THRONE AND WHO SHALL FIND MY PLACE +908-157963-0013-1334: AND (WHY IT->WYAT) SCATTERS ITS BRIGHT BEAUTY (THRO->THROUGH) THE (HUMID->HUMAN) AIR +908-157963-0014-1335: DESCEND O LITTLE CLOUD AND HOVER BEFORE THE EYES OF (THEL->FELL) +908-157963-0015-1336: O LITTLE CLOUD THE VIRGIN SAID I CHARGE THEE TO TELL ME WHY THOU COMPLAINEST NOW WHEN IN ONE HOUR THOU FADE AWAY THEN WE SHALL SEEK THEE BUT NOT FIND AH (THEL->FELL) IS LIKE TO THEE +908-157963-0016-1337: I PASS AWAY YET I COMPLAIN AND NO ONE HEARS MY VOICE +908-157963-0017-1338: THE CLOUD THEN (SHEWD->SHOWED) HIS GOLDEN HEAD AND HIS BRIGHT FORM (EMERG'D->EMERGED) +908-157963-0018-1339: AND (FEAREST->FEAR'ST) THOU BECAUSE I VANISH AND AM SEEN NO MORE +908-157963-0019-1340: IT IS TO TENFOLD LIFE TO LOVE TO PEACE AND RAPTURES (HOLY->WHOLLY) UNSEEN DESCENDING WEIGH MY LIGHT WINGS UPON BALMY FLOWERS AND COURT THE FAIR EYED DEW TO TAKE ME TO HER SHINING TENT THE WEEPING VIRGIN TREMBLING KNEELS BEFORE THE RISEN SUN +908-157963-0020-1341: TILL WE ARISE (LINK'D->LINKED) IN A GOLDEN BAND AND NEVER PART BUT WALK UNITED BEARING FOOD TO ALL OUR TENDER FLOWERS +908-157963-0021-1342: LIVES NOT ALONE NOR (OR->OF) ITSELF FEAR NOT AND I WILL CALL THE WEAK WORM FROM ITS LOWLY BED AND THOU SHALT HEAR ITS VOICE +908-157963-0022-1343: COME FORTH WORM AND THE SILENT VALLEY TO THY PENSIVE QUEEN +908-157963-0023-1344: THE HELPLESS WORM AROSE AND SAT UPON THE (LILLYS->LILY'S) LEAF AND THE BRIGHT CLOUD (SAILD->SAILED) ON TO FIND HIS PARTNER IN THE VALE +908-157963-0024-1345: IMAGE OF WEAKNESS ART THOU BUT A WORM +908-157963-0025-1346: I SEE THEY LAY HELPLESS AND NAKED WEEPING AND NONE TO ANSWER NONE TO CHERISH THEE WITH (MOTHERS->MOTHER'S) SMILES +908-157963-0026-1347: AND SAYS THOU MOTHER OF MY CHILDREN I HAVE LOVED THEE AND I HAVE GIVEN THEE A CROWN THAT NONE CAN TAKE AWAY +908-157963-0027-1348: AND LAY ME DOWN IN THY COLD BED AND LEAVE MY SHINING LOT +908-157963-0028-1349: OR AN EYE OF GIFTS AND GRACES (SHOWRING->SHOWERING) FRUITS AND COINED GOLD +908-157963-0029-1350: WHY A TONGUE (IMPRESS'D->IMPRESSED) WITH HONEY FROM EVERY WIND +908-157963-0030-1351: WHY AN EAR A WHIRLPOOL FIERCE TO DRAW CREATIONS IN +908-31957-0000-1352: ALL IS SAID WITHOUT A WORD +908-31957-0001-1353: I SIT BENEATH THY LOOKS AS CHILDREN DO IN THE NOON SUN WITH SOULS THAT TREMBLE THROUGH THEIR HAPPY EYELIDS FROM AN UNAVERRED YET (PRODIGAL->CHRONICAL) INWARD JOY +908-31957-0002-1354: I DID NOT WRONG MYSELF SO BUT I PLACED A WRONG ON THEE +908-31957-0003-1355: WHEN CALLED BEFORE I TOLD HOW HASTILY I DROPPED MY FLOWERS OR (BRAKE->BREAK) OFF FROM A GAME +908-31957-0004-1356: SHALL I NEVER MISS HOME TALK AND BLESSING AND THE COMMON KISS THAT COMES TO EACH IN TURN NOR COUNT IT STRANGE WHEN I LOOK UP TO DROP ON A NEW RANGE OF WALLS AND FLOORS ANOTHER HOME THAN THIS +908-31957-0005-1357: ALAS I HAVE GRIEVED SO I AM HARD TO LOVE +908-31957-0006-1358: OPEN THY HEART WIDE AND FOLD WITHIN THE WET WINGS OF THY DOVE +908-31957-0007-1359: COULD IT MEAN TO LAST A LOVE SET PENDULOUS BETWEEN SORROW AND SORROW +908-31957-0008-1360: NAY I RATHER THRILLED DISTRUSTING EVERY LIGHT THAT SEEMED TO GILD THE ONWARD PATH (AND FEARED->IN FEAR) TO (OVERLEAN->OVERLENE) A FINGER EVEN +908-31957-0009-1361: AND THOUGH I HAVE GROWN SERENE AND STRONG SINCE THEN I THINK THAT GOD HAS WILLED A STILL RENEWABLE FEAR +908-31957-0010-1362: O LOVE O TROTH +908-31957-0011-1363: AND LOVE BE FALSE +908-31957-0012-1364: IF HE TO KEEP ONE OATH MUST LOSE ONE JOY BY HIS LIFE'S STAR FORETOLD +908-31957-0013-1365: SLOW TO WORLD GREETINGS QUICK WITH ITS O LIST WHEN THE (ANGELS->ANGEL) SPEAK +908-31957-0014-1366: A RING OF AMETHYST I COULD NOT WEAR HERE PLAINER TO MY SIGHT THAN THAT FIRST KISS +908-31957-0015-1367: THAT WAS THE CHRISM OF LOVE WHICH (LOVE'S->LOVES) OWN CROWN WITH SANCTIFYING SWEETNESS DID (PRECEDE->PROCEED) THE THIRD UPON MY LIPS WAS FOLDED DOWN (IN PERFECT->IMPERFECT) PURPLE STATE SINCE WHEN INDEED I HAVE BEEN PROUD AND SAID MY LOVE MY OWN +908-31957-0016-1368: DEAREST TEACH ME SO TO POUR OUT GRATITUDE AS THOU DOST GOOD +908-31957-0017-1369: MUSSULMANS AND (GIAOURS->GEY ORDS) THROW KERCHIEFS AT A SMILE AND HAVE NO RUTH FOR ANY WEEPING +908-31957-0018-1370: BUT THOU ART NOT SUCH A LOVER MY BELOVED +908-31957-0019-1371: THOU CANST WAIT THROUGH SORROW AND SICKNESS TO BRING SOULS TO TOUCH AND THINK IT SOON WHEN OTHERS CRY TOO LATE +908-31957-0020-1372: I (THANK->THINK) ALL WHO HAVE LOVED ME IN THEIR HEARTS WITH THANKS AND LOVE FROM MINE +908-31957-0021-1373: OH TO SHOOT MY SOUL'S FULL MEANING INTO FUTURE YEARS THAT THEY SHOULD LEND IT UTTERANCE AND SALUTE LOVE THAT ENDURES FROM LIFE THAT DISAPPEARS +908-31957-0022-1374: THEN I LONG TRIED BY NATURAL ILLS RECEIVED THE COMFORT FAST WHILE BUDDING AT THY SIGHT MY PILGRIM'S STAFF GAVE OUT GREEN LEAVES WITH MORNING DEWS (IMPEARLED->IMPERILLED) +908-31957-0023-1375: I LOVE THEE FREELY AS MEN STRIVE FOR RIGHT I LOVE THEE PURELY AS THEY TURN FROM (PRAISE->PREISE) +908-31957-0024-1376: I LOVE THEE WITH THE PASSION PUT TO USE IN MY OLD (GRIEFS->GREEDS) AND WITH MY CHILDHOOD'S FAITH +908-31957-0025-1377: I LOVE THEE WITH A LOVE I SEEMED TO LOSE WITH MY LOST SAINTS I LOVE THEE WITH THE BREATH SMILES TEARS OF ALL MY LIFE AND IF GOD CHOOSE I SHALL BUT LOVE THEE BETTER AFTER DEATH + +SUBSTITUTIONS: count ref -> hyp +29 AND -> IN +19 IN -> AND +18 THE -> A +15 A -> THE +10 AN -> AND +7 THIS -> THE +6 RODOLFO -> RUDOLPHO +6 OF -> A +6 MAN -> MEN +6 I'VE -> I +6 ANYONE -> ANY +5 TIMAEUS -> TIMAS +5 SILVIA -> SYLVIA +5 O -> OF +5 I'M -> I +5 ANDERS -> ANDREWS +4 TWO -> TOO +4 TOWARDS -> TOWARD +4 SOAMES -> SOLMES +4 ONE -> WON +4 MAINHALL -> MAIN +3 WERE -> WHERE +3 VALLIERE -> VALLIERS +3 TRAVELING -> TRAVELLING +3 TODAY -> TO +3 THEL -> FELL +3 THEIR -> THERE +3 THEE -> THE +3 THEATER -> THEATRE +3 THE -> THEIR +3 THAT -> THE +3 THAT -> IT +3 SOMEONE -> SOME +3 PRACTISE -> PRACTICE +3 OH -> O +3 NAOMI -> THEY +3 METER -> METRE +3 MEN -> MAN +3 LEOCADIA -> LOCATIA +3 KAFFAR -> KAFFIR +3 IS -> AS +3 HOLLAN -> HOLLAND +3 FORMALLY -> FORMERLY +3 EMIL -> AMIEL +3 DOBRYNA -> DOBRINA +3 CRESSWELL -> CRESWELL +3 BRITANNULA -> BRITAIN +3 BANNISTER -> BANISTER +3 ANDERS -> ANDREW'S +2 YOU'RE -> YOU +2 XAVIER -> ZEVIER +2 WOULD -> WILL +2 WHITTAWS -> WIDOWS +2 WHERE -> WERE +2 WHEN -> ONE +2 WELL -> WHILE +2 WE'LL -> WILL +2 VAPOURS -> VAPORS +2 VANDERPOOL -> VAN +2 UPON -> UP +2 TONIGHT -> TO +2 TO -> TWO +2 TO -> THROUGH +2 TIMAEUS -> TO +2 THEY -> THERE +2 THEY -> THE +2 THEN -> THAN +2 THEN -> AND +2 THEL -> THOU +2 THEIR -> THE +2 THE -> THIS +2 THE -> THAT +2 THAN -> THAT +2 TABU -> TABOO +2 SYMPOSIUM -> SUPPOSIUM +2 SOMETIME -> SOME +2 SLANG -> SLING +2 SIF -> SIFT +2 SHE -> YOU +2 SEEM -> SEEMED +2 SAIL -> SALE +2 ROUND -> AROUND +2 RESERVE -> RESERVED +2 READ -> RED +2 PLATONISTS -> PLATINISTS +2 PARLOR -> PARLOUR +2 OR -> A +2 O -> OH +2 NOW -> THOU +2 NO -> NOR +2 NO -> KNOW +2 MUNNY -> MONEY +2 MORNIN -> MORNING +2 LEAVENWORTH -> LEVINWORTH +2 LEAVENWORTH -> LEVIN +2 KAFFAR'S -> KAFFIR'S +2 IT'S -> ITS +2 IT'S -> IT +2 IT -> YOU +2 IT -> HE +2 IS -> HIS +2 INTO -> AND +2 IN -> A +2 I'LL -> I +2 I'D -> I +2 I -> I'M +2 HONOUR -> HONOR +2 HOLY -> WHOLLY +2 HOLBEIN -> HOLBINE +2 HILDA -> HELDA +2 HER -> THE +2 HER -> A +2 HELLO -> HALLO +2 HE'S -> HE +2 HE -> WE +2 HE -> HIS +2 HAS -> HAD +2 HALLO -> HELLO +2 HAD -> HEAD +2 GREY -> GRAY +2 FOUNDED -> FOUND +2 FOREVER -> FOR +2 FAIRVIEW -> FAIR +2 EVERYONE -> EVERY +2 EVERYDAY -> EVERY +2 DISSENT -> DESCENT +2 DID -> DEAD +2 DEFINE -> TO +2 DE -> THE +2 DE -> DENISCHALANT +2 CRITIAS -> CRITIUS +2 COURT -> COURTYARD +2 COULD -> GOOD +2 CONFESS -> CONFESSED +2 COLOR -> COLOUR +2 CHAISE -> CHASE +2 CARL -> KARL +2 BUL -> BULL +2 BRAKE -> BREAK +2 BEHAVIOUR -> BEHAVIOR +2 BEATITUDE -> BE +2 BATTLEAX -> BATTLEX +2 BATTLEAX -> BATTLE +2 AY -> I +2 AT -> IT +2 ASTOR -> ASTER +2 AS -> A +2 ANOTHER -> THE +2 ANDELLA -> AND +2 AND -> AS +2 AND -> AN +2 ALEXANDRA -> ALEXANDER +2 A -> TO +2 A -> OF +2 A -> AS +1 ZORA'S -> ZORAS +1 ZORA -> ZORAH +1 ZORA -> SORA +1 ZOOF'S -> ZOV'S +1 YOUR -> YOU +1 YOUR -> OR +1 YOU'RE -> BOX +1 YOU'LL -> YOU +1 YOU'LL -> DAMNLY +1 YOU -> YE +1 YOU -> WHO +1 YOU -> HE +1 YEARNING -> YEARNIN +1 YE -> BELL +1 YAMS -> A +1 XAVIER -> SAVIER +1 WYLDER -> WHILE +1 WRITE -> RIGHT +1 WOULD -> WITH +1 WOULD -> WHAT +1 WOULD -> WERE +1 WORST -> WORSE +1 WORLD -> WORLD'S +1 WOODS -> WOOD +1 WOODBEGIRT -> WOULD +1 WOOD -> WOODCUTTERS +1 WONDERING -> WANDERING +1 WOMAN'S -> WOMEN'S +1 WITHIN -> WITH +1 WITHES -> WIDTHS +1 WITH -> WHICH +1 WITH -> WHEN +1 WITH -> WAS +1 WITCH -> WHICH +1 WINTER -> WIN +1 WIND -> SQUINT +1 WILLS -> WILDS +1 WILL -> WOULD +1 WIFE -> WHITE +1 WHY -> WYAT +1 WHY -> MY +1 WHOSE -> WHO +1 WHOLLY -> HOLY +1 WHO'S -> WHOSE +1 WHO -> WHOSE +1 WHITTAWD -> WID +1 WHITTAW -> WIDOW +1 WHITE -> WIGHT +1 WHIRLPOOL -> WAR +1 WHIPPED -> WHIP +1 WHERE -> WITH +1 WHERE -> WHEREINWITH +1 WHERE -> WHERE'S +1 WHEN -> BUT +1 WHEN -> AND +1 WHELPS -> WHELMS +1 WHATEVER -> WHATSOEVER +1 WHAT'S -> WHAT +1 WHAT -> WHEN +1 WHAT -> ONE +1 WHALE -> WELL +1 WHALE -> WAIL +1 WET -> WHITE +1 WESTMERE -> WESTMER +1 WESLEY -> WESTLEY +1 WERE -> RECALL +1 WERE -> ARE +1 WELL -> WILL +1 WELL -> OO +1 WELL -> FOR +1 WELCOMED -> WELCOME +1 WEDNESDAY -> WINDSAY +1 WEBS -> WHIPS +1 WEATHER -> WHETHER +1 WEAR -> WHERE +1 WEAKLY -> WEEKLY +1 WE'VE -> WITH +1 WE'RE -> WERE +1 WE'RE -> WE +1 WE -> WE'VE +1 WE -> SEA +1 WAVES -> WAVE +1 WATRY -> WATERY +1 WATRY -> WATCHERY +1 WATERMILL -> WATER +1 WAS -> VIEWS +1 WAS -> IS +1 WARRENTON'S -> WARRENTONS +1 WARLIKE -> WORE +1 WANDER -> WONDER +1 VOLTAIRE -> OLD +1 VOICE -> VOICES +1 VOICE -> BOYS +1 VINES -> ICE +1 VILLEROY -> VILLAY +1 VIGNETTE -> VINEY +1 VICARIOUS -> YARE'S +1 VIADUCT -> VIADUK +1 VERY -> VEREMENT +1 VERSE -> FIRST +1 VERNE'S -> VERNS +1 VAUDOIS -> FAUDOIR +1 VARIABILITY -> VERY +1 VANES -> VEINS +1 VANDERPOOLS -> VANDER +1 VALOR -> VALOUR +1 VALLEYED -> VALLED +1 VALES -> VEILS +1 UTTER -> OUT +1 UTAH -> UTA +1 UPON -> ON +1 UP -> UPSTAIRS +1 UP -> OF +1 UNWARILY -> THEN +1 UNTO -> INTO +1 UNSEPARATED -> ON +1 UNLIKE -> I +1 UNJUST -> UNJUSTIFILL +1 UNITED -> UTIT +1 UNFINISHED -> UNFINISHANCES +1 UNEXCEPTIONABLY -> UNEXCEPTIONALLY +1 UNDERGROUND -> UNDER +1 UNCLENCHED -> CLENCHED +1 UNC -> YUNK +1 UNC -> UNCONOCTED +1 UNC -> UNCAN +1 UNC -> UN +1 UN -> IN +1 UD -> HAD +1 TWO -> TUTRILOGIES +1 TWO -> TO +1 TWITE -> TIGHT +1 TURNOVER -> TURN +1 TURNER'S -> TURNERS +1 TUPPENY -> TOPPENNY +1 TRY -> TRIED +1 TREDDLESTON -> TREADLESTON +1 TREBLE -> TROUBLE +1 TRAVELERS -> TRAVELLERS +1 TRAVELER -> TRAVELLERS +1 TOWELLING -> TOWELINGS +1 TOULD -> DID +1 TOPEKA -> TOPECA +1 TOP -> TOPRUNG +1 TOOMS -> TOMB'S +1 TOO -> TWO +1 TOO -> TO +1 TONNAY -> TONY +1 TOILETTE -> TOILET +1 TO -> WHOSE +1 TO -> UP +1 TO -> TOO +1 TO -> OF +1 TO -> INTO +1 TO -> IN +1 TO -> DOES +1 TO -> A +1 TIRESOME -> TO +1 TINTORET -> TINCTARETTE +1 TINTINT -> TINTANT +1 TIME -> YOU +1 TIMAEUS -> TIMIRS +1 TIMAEUS -> TIMEUS +1 TIBI -> TIBEE +1 THUS -> LUST +1 THRO -> THROUGH +1 THOUSAND -> THOUSANDTH +1 THOUGHT -> BOUGHT +1 THOUGH -> THE +1 THORLEIF -> TORE +1 THORKEL -> TORQUAL +1 THORKEL -> TORKO +1 THORKEL -> TORCOAL +1 THIS -> OSTENSITY +1 THINKS -> THINK +1 THINGS -> THANKS +1 THEY -> MAY +1 THEY -> FATE +1 THEREIN -> THEY +1 THERE -> THERE'S +1 THERE -> THEIR +1 THEN -> IN +1 THEM -> THE +1 THEM -> HIM +1 THEIR -> THEY'RE +1 THEE'S -> THESE +1 THE -> WHO +1 THE -> TO +1 THE -> THEY +1 THE -> THEATILITY +1 THE -> IN +1 THE -> BUT +1 THAT'S -> ITS +1 THAT -> THAT'S +1 THAT -> THAN +1 THANK -> THINK +1 THAN -> THEN +1 THAN -> IN +1 TECHNIQUE -> TYPE +1 TEA -> T +1 TARANTULA -> TURANSULA +1 TALKERS -> TALK +1 TABU -> BOO +1 TABLE -> TABLECLOTH +1 SWOONS -> SWOON +1 SWEEP -> SWEPT +1 SWAN -> SWAY +1 SUSPICIONS -> SUSPICION +1 SURFACES -> SERVICES +1 SUPERFLUOUS -> SUPERVOUS +1 SUMMONED -> SUMMON +1 SUMMERS -> SUMMER'S +1 SUITCASE -> SUIT +1 STYLE -> STYLANT +1 STUDY -> SET +1 STREAMLINE -> STREAM +1 STRAIGHT -> STRAIT +1 STORY'S -> STORIES +1 STOCKBROKER -> DOCKBROKER +1 STEPHANOS -> STUFFANOS +1 STEPHANOS -> STEPHANO'S +1 STEEL'D -> STEELED +1 STEADY -> STUDY +1 STATE -> STATES +1 STARTS -> START +1 STANDS -> STAND +1 STAIR -> STARE +1 STAGE -> STEED +1 STAGE -> SAGE +1 SPRING -> SPRANG +1 SPLENDOR -> SPLENDOUR +1 SPLENDET -> SPLENDID +1 SPLASHED -> BLASHED +1 SPIN -> SPEND +1 SPELLED -> SPELL'D +1 SPECIALISED -> SPECIALIZED +1 SOUTHEY'S -> SO +1 SOUTHEY -> SELVIE +1 SOUTHEY -> SALVIE +1 SOU -> SOUS +1 SORRY -> SIR +1 SOPHISTRY -> SOPHISTRI +1 SOOTHED -> SOOTHE +1 SON -> FUN +1 SOMETHING -> SOMETHIN +1 SOMETHING -> SOME +1 SOMETHING -> OR +1 SOME -> SOMETIME +1 SOME -> CROSS +1 SOLON'S -> SILENCE +1 SOLON -> SULLEN +1 SOLON -> SOLID +1 SOLON -> SOLEMN +1 SOLILOQUY -> SOLOQUIE +1 SOAMES -> PSALMS +1 SO -> SUSE +1 SO -> SODIN +1 SMILD -> SMILED +1 SMELLS -> MILLS +1 SLEEVE -> STEVE +1 SLEEP -> SLEEVE +1 SLANG -> SLAYING +1 SKILLFUL -> SKILFUL +1 SKEPTICAL -> SCEPTICAL +1 SIZE -> SIZED +1 SITTING -> CITY +1 SIR -> ARE +1 SINCE -> SINS +1 SIN -> IN +1 SILENT -> SILAGE +1 SIGHT -> SIGHTSEERS +1 SIGHED -> SIDE +1 SHUTTING -> SHEDDING +1 SHOWRING -> SHOWERING +1 SHOULD -> WOULD +1 SHODDY -> SHOTTY +1 SHIP -> SHIP'S +1 SHEWD -> SHOWED +1 SHELL -> CHILL +1 SHE'S -> SHE +1 SHARPS -> SHARP'S +1 SHARP'ST -> SHARPEST +1 SHAPEN -> SHAKEN +1 SHAPELY -> SHABBY +1 SHAME -> SHEEM +1 SHALL -> SHALT +1 SHABATA -> CHEBATA +1 SERVICEABILITY -> SURFABILITY +1 SERVE -> SERVED +1 SERVANT -> SERVANTS +1 SENTENCES -> SENTENCE +1 SENT -> SET +1 SENCE -> SINCE +1 SEMON'S -> SIMMONS +1 SEEMS -> SEEMED +1 SEATING -> SITTING +1 SEA -> SEAT +1 SCYTHE -> SIGH +1 SCUTCHEON -> DUCHEN +1 SCUSE -> EXCUSE +1 SCUMMED -> SCUMBED +1 SCRAPBOOKS -> SCRAP +1 SCOUTING -> SCOUT +1 SCHOOL -> SCHOOLS +1 SCHOOL -> SCHOOLBOYS +1 SCEVRA -> SCAVER +1 SCEURA -> SKURA +1 SCENT -> SIN +1 SCENE -> SEEN +1 SCATHE -> SCATH +1 SCAROONS -> SCARONS +1 SAW -> SAUL +1 SAW -> SALL +1 SAVED -> SEE +1 SAUVEUR -> SAVER +1 SATE -> SAT +1 SANG -> SAYING +1 SALINE -> SAILING +1 SALIENT -> SAILORED +1 SAILD -> SAILED +1 SAIL -> SILL +1 SAID -> SIT +1 RUFUS -> RUFFUS +1 RUE -> GRUE +1 ROUTE -> ROUT +1 ROSSETER -> ROSSOTER +1 ROOTS -> WOODS +1 ROI -> ROY +1 ROGERS'S -> ROGERS +1 ROERER -> ROAR +1 RODOLFO'S -> RIDOLPH'S +1 RODOLFO -> RUDOLPHU +1 RODOLFO -> RUDOLPHAL +1 ROCKED -> ROCK +1 ROBIN'S -> ROBINS +1 RHONE -> ROAN +1 REWEIGHED -> REWAYED +1 REMOVE -> MOVED +1 REMOV'D -> REMOVED +1 REMEMBER -> REMEMBERED +1 REMARK -> REMARKED +1 REMAINED -> REMAINING +1 REMAIN -> REMAINED +1 RELOCATED -> RE +1 RELIES -> REALIZE +1 REIGNED -> RAINED +1 REGAINED -> REGAIN +1 REFUSED -> REFUSE +1 REENFORCEMENTS -> REINFORCEMENTS +1 REEDER -> READER +1 RED -> READ +1 RECORD -> RECORDS +1 RECOGNISED -> RECOGNIZED +1 REBUK'D -> REBUKED +1 RANCOR -> RANK +1 QUINSON -> QUINCENT +1 QUASI -> COURSE +1 QUASH -> CRASH +1 QUART -> COURT +1 PYTHAGOREANS -> PITHAGORIANS +1 PUTTIN -> PUTTING +1 PURSE -> PERSON +1 PURPOSED -> PURPOSE +1 PURIST -> PUREST +1 PSALM -> SUM +1 PROVES -> PROVED +1 PROSELYTING -> PROSELLING +1 PROSCRIBED -> PRESCRIBED +1 PRODIGAL -> CHRONICAL +1 PRIOR -> PRAYER +1 PREVENT -> PRESENT +1 PREVAILED -> PREVAIL +1 PRETENSE -> PRETENCE +1 PRECONCEIVED -> FREQUENCEDE +1 PRECIEUSES -> PURSUS +1 PRECEDE -> PROCEED +1 PRE -> PRESOCRATIC +1 PRAISE -> PREISE +1 PRAISE -> PHRASE +1 PRAIRIE -> PRAIRINES +1 PRACTICE -> PRACTISE +1 POWER -> BOWER +1 POSTERITY -> PROSTERITY +1 POSITIVELY -> WAS +1 POPHAM -> WAS +1 POPHAM -> POPPUM +1 POLAR -> POLLAR +1 POISON'D -> POISONED +1 POINT -> BLINT +1 POETESS -> POETES +1 PLURAL -> PEARL +1 PLESIOSAURUS -> PLECIUS +1 PLEASANCE -> PLEASANTS +1 PLAITS -> PLATES +1 PLAIN -> PLAYING +1 PLACE -> PLACES +1 PIERC'D -> PIERCED +1 PICK -> PICTURES +1 PHAEDRUS -> FEGERIS +1 PERCHES -> PURCHASE +1 PENINSULA -> PRONUNCILA +1 PEGRENNE -> PAGRIN +1 PEARL'S -> PEARLS +1 PATIENTS -> PATIENCE +1 PATIENCE -> PATIENT +1 PATHOLOGICAL -> PETHOLOGICAL +1 PASSED -> PAST +1 PASSAGE -> PASSAGEWAY +1 PASCHAL -> PASSIONAL +1 PARTICLES -> PARTICLE +1 PARSONS -> PARSON'S +1 PARSONS -> PARSON +1 PAROQUET -> PERICE +1 PARAPHERNALIA -> PAIR +1 PARALLELOGRAM -> PARALLELLOGRAM +1 PAPAL -> PEPPEL +1 PANTS -> HANDS +1 PANE -> PAIN +1 OZMA -> OSMO +1 OWEN -> OWENAIRS +1 OVERLEAN -> OVERLENE +1 OUTSTRIP -> OUTSTRIPPED +1 OUTRAGE -> OUTRAGED +1 OUT -> AT +1 OUR -> HER +1 OUR -> A +1 OUGHTER -> ORDERED +1 OTTLEY'S -> OUTLEY'S +1 OTHERWORLDLY -> OTHER +1 OTHER -> OTTER +1 OTHER -> GROUND' +1 OTHER -> ARE +1 OSH -> I +1 OSAGE -> O +1 ORDERED -> ORDER +1 OR -> WERE +1 OR -> SCENT +1 OR -> ORDER +1 OR -> OF +1 OR -> FOR +1 OPHELIA -> OF +1 OPAQUE -> OPE +1 ONTO -> ON +1 ONE -> WHEN +1 ONE -> SWUNG +1 ON -> UNWARRANTON'S +1 ON -> UNLAKE +1 ON -> ONGOLATIONS +1 ON -> ANOTHER +1 ON -> ANGULATIONS +1 OMELETTE -> OMELET +1 OLIVE'S -> OLIVES +1 OLIVE'S -> ALL +1 OLAF -> ALL +1 OH -> I'LL +1 OFFICES -> OFFICERS +1 OFFICERS -> OFFICER'S +1 OFFICERS -> OFFICER +1 OFFENSES -> OFFENCES +1 OFF -> OPT +1 OF -> OTHOR +1 OF -> OR +1 OF -> IS +1 OCCUPANTS -> OCCUPANT +1 OBSERVED -> OBSERVE +1 OARS -> WARS +1 OAKS -> YOLKS +1 O'ER -> OR +1 NUMIDIA -> MEDIA +1 NOW -> NO +1 NOVEL'S -> NOVELS +1 NOUGHT -> NOT +1 NOTTINGHAM -> NODDING +1 NOTTINGHAM -> ARE +1 NOTHIN -> NOTHING +1 NOT -> OUGHT +1 NOT -> NOW +1 NORTHWEST -> NORTH +1 NORTHWARDS -> NORTHWARD +1 NORTHERNERS -> DERPOOL +1 NOR -> OR +1 NON -> NONCOMPOSTER +1 NODS -> GNAWEDS +1 NINE -> NOT +1 NEW -> NEWBORN +1 NET -> NED +1 NEO -> NEW +1 NEO -> NEOPLATANISTS +1 NELLY -> NELLIERS +1 NEIGHBOUR -> NEIGHBOR +1 NEIGHBORHOOD -> NEIGHBOURHOOD +1 NEIGHBOR -> NEIGHBOUR +1 NEARER -> NEAR +1 NE'ER -> NEVER +1 NARROWS -> AND +1 NAOMI -> NOW +1 NAMED -> NAME +1 N -> THAN +1 MY -> MIGALLATIONS +1 MY -> I +1 MY -> BY +1 MOUTHED -> MOUTH +1 MOURN -> MOURNED +1 MOUNTED -> MOUNTAIN +1 MOTHERS -> MOTHER'S +1 MORMONISM -> WARMONISM +1 MONTMARTRE -> MONTMARTRA +1 MONTMARTRE -> MONT +1 MONTFICHET -> MONT +1 MONTFICHET -> MARTFICHET +1 MONSTERS -> MASTERS +1 MOMBI -> MOMBY +1 MOLDED -> MOULDED +1 MOHICAN -> MOHICANS +1 MO -> MOLD +1 MISTS -> MIST +1 MISTER -> THE +1 MIST -> MISTS +1 MISSOURIANS -> MISSOURIENS +1 MISSOURIANS -> MISSOURIANCE +1 MISS -> MISTER +1 MISS -> MISSY +1 MINT -> MENT +1 MINE -> MIND +1 MILLION'D -> MILLIONED +1 MILLIGRAM -> MILAGRAM +1 MILITATED -> MITIGATED +1 MILES -> MYLES +1 MIKE -> MICHAEL +1 METERS -> METRES +1 METER -> METERPLATES +1 METER -> METEOR +1 METAL -> MEDAL +1 MERSEY -> MERCY +1 MERRY -> MARRIED +1 MERGANSER -> MERGANCER +1 MERCHISTON -> MURCHISTON +1 MEND -> GOODN'T +1 MEN -> AMEN +1 MEET -> MET +1 MEALYBACK -> MEALEY +1 MEADOWCROFT'S -> METICOFF'S +1 MEADOWCROFT -> MEDICRAFT +1 MAY -> THEY +1 MAY -> MAYBE +1 MAUSOLEUM -> MUZOLEUM +1 MATE -> MADE +1 MASTERY -> MYSTERY +1 MASTERS -> MASTER +1 MASTER'S -> MASTERS +1 MARY -> MERRY +1 MARVELOUS -> MARVELLOUS +1 MARSHALL -> MARTIAL +1 MARSHALL -> MARSHAL +1 MARKED -> MARKET +1 MARIVAUX -> MARAVO +1 MARGOLOTTE -> MARGOLOT +1 MARAIS -> MARA +1 MANY -> MEN +1 MANSERVANT -> MAN'S +1 MANNA -> MANA +1 MANIFESTED -> MANIFEST +1 MAIDS -> MATES +1 MAID -> MAIDEN +1 MADAM -> MADAME +1 MACDONALDS -> MON +1 MAC -> MICARDLE +1 MABEL -> MAYBEL +1 LYNCHINGS -> LUNCHINGS +1 LUTHER'S -> LUTHERS +1 LUIS -> LOUIS +1 LUBRICATE -> LUBRICADE +1 LOWER -> LOWERED +1 LOVE'S -> LOVES +1 LOUIS -> LOUISE +1 LOU'S -> LOOSE +1 LOSS -> LOST +1 LORNE -> LORN +1 LOOSE -> LOOSEN +1 LONG -> LAWN +1 LIVES -> LIES +1 LIVERIED -> LIVERYED +1 LITERALLY -> THAT +1 LITER -> LEADER +1 LINK'D -> LINKED +1 LINE -> LIE +1 LILLYS -> LILY'S +1 LILLY -> LILY +1 LILBURN -> LITTLE +1 LIGHT -> LIGHTFOOTED +1 LETS -> THAT'S +1 LET'S -> ITS +1 LESSER -> LESS +1 LEOCADIA'S -> LOCATEOUS +1 LEOCADIA -> LOCALIA +1 LEOCADIA -> LEOCAYA +1 LEOCADIA -> LE +1 LEOCADI -> LOCATIA +1 LEFT -> LAUGHED +1 LEFRANK -> LENG +1 LECOMPTE -> LECOMTE +1 LECOMPTE -> LE +1 LEAVING -> LEAPING +1 LEAVE -> LIVE +1 LEASED -> LEAST +1 LEADS -> LEAVES +1 LAUGHED -> THEIR +1 LARKSPUR -> LARKSBURG +1 LARKSPUR -> LARKSBUR +1 LANTHORN -> LANTERN +1 LAND -> LANDA +1 LAMBENT -> LAMENT +1 LALLIE -> LALLY +1 LAKE -> LEAK +1 LABOUR -> LABOR +1 LA -> LAPE +1 KNOWS -> NOSE +1 KNOW -> KNOWS +1 KNEW -> NEW +1 KNEED -> NEED +1 KNEE -> KNEEP +1 KIRTLAND -> CURTLEND +1 KINGDOMS -> KINGDOM'S +1 KING'S -> KING +1 KICK -> KICKAPOOS +1 KEYNOTE -> KEEN +1 KESWICK -> KEZWICK +1 KEOGH -> KIOPH +1 KATHLEEN -> CATHERINE +1 KANSAS -> KANSA +1 JUST -> JEST +1 JUS -> JUST +1 JEWELER'S -> JEWELLER'S +1 JAW -> JOB +1 JASPER -> JAPS +1 JANE'S -> JANE +1 JAIL -> TRAIL +1 JAGO -> YAGO +1 JAGO -> TRIAGO +1 JAGO -> SIP +1 JAGO -> IAGO +1 JACK -> JACKKNIFE +1 ITS -> IT'S +1 ITS -> HIS +1 IT'S -> TO +1 IT'LL -> IT +1 IT -> TWASN'T +1 IT -> TO +1 IT -> THAT +1 IT -> ITS +1 IT -> AT +1 IT -> AND +1 IS -> WAS +1 IS -> M +1 IS -> IT'S +1 IS -> IT +1 IS -> HAS +1 INVENTORS -> IN +1 INTRENCHMENT -> ENTRENCHMENT +1 INTERESTS -> INTRICTS +1 INTENTS -> INTENSE +1 INNERLOCHY -> INERLOCKY +1 INNERLOCHY -> IN +1 INJURED -> INJURE +1 INJURED -> ANCIENT +1 INFERENCE -> EFFERENCE +1 INFANTS -> INFANT'S +1 INEFFECTUALLY -> IN +1 INCULCATED -> INCALCATED +1 INCLOSED -> ENCLOSED +1 INCERTAINTY -> IN +1 INCANDESCENT -> INCONDESCENT +1 INACTION -> AN +1 IN -> ON +1 IN -> OF +1 IN -> IT +1 IN -> IMPERFECT +1 IN -> AN +1 IMPRESSES -> IMPRESS +1 IMPRESSED -> IMPRESS +1 IMPRESS'D -> IMPRESSED +1 IMPOSED -> AND +1 IMPLORES -> IMPLORS +1 IMPEARLED -> IMPERILLED +1 IMMATURE -> IMMITOR +1 IMBIBED -> IBED +1 IKE -> LIKE +1 IF -> OF +1 IF -> AT +1 ICHTHYOSAURUS -> ITTHIASORIS +1 ICHTHYOSAURUS -> ICTOISORUS +1 ICHTHYOSAURUS -> ICT +1 I'M -> ON +1 I -> THY +1 I -> SADD +1 I -> I'VE +1 HUNTLEY -> HUNTLY +1 HUMOUR -> HUMOR +1 HUMOR -> HUMOUR +1 HUMID -> HUMAN +1 HOW -> HALL +1 HOUSECLEANING -> HOUSE +1 HOTBED -> HOT +1 HORTON -> WHARTON +1 HORSEPLAY -> HORSE +1 HORACE -> HORRENTS +1 HOPKINS'S -> HOPKINS +1 HOPES -> HELPS +1 HOPE -> O +1 HONOURED -> HONORED +1 HONOURABLE -> HONORABLE +1 HONORIFIC -> UNERRIFIC +1 HONORABLE -> HON +1 HOMILY -> HUMMILY +1 HOLOCAUST -> HOHLAST +1 HOLMES -> HOMES +1 HOLD -> ALL +1 HO -> HOME +1 HIT -> HID +1 HIS -> A +1 HIM -> EM +1 HILDA'S -> HELDA'S +1 HILDA -> HILDER +1 HIGHEST -> HOUSE +1 HIGH -> HIGHER +1 HIDALGO -> HADALGO +1 HICKEY -> HICKY +1 HETTY -> HETTY'S +1 HERE -> THERE +1 HERE -> HARRY +1 HERACLEITUS -> HERACLITUS +1 HER -> THERE +1 HENCHMEN -> HENCHMAN +1 HEN -> HENLOORD +1 HEN -> HANDLED +1 HELPED -> HELP +1 HEART'S -> HEARTSEASE +1 HEART -> HEARTBROKEN +1 HEAR -> SEE +1 HEAR -> HERE +1 HE'S -> HIS +1 HE'D -> HE +1 HE -> WHOSE +1 HE -> IT +1 HAZEWRAPPED -> HAYES +1 HAWTREY -> HOLTREE +1 HAVING -> HEAVEN +1 HAVE -> HAS +1 HAVE -> HALF +1 HAUGHTY -> HALTING +1 HAS -> IS +1 HAS -> HESITATED +1 HARTS -> HEARTS +1 HARRY -> HARRYTOWN +1 HARRIED -> HURRIED +1 HARMONIZED -> HARMONIZE +1 HARKENED -> HEARKENED +1 HARBORING -> HARBOURING +1 HARANGUE -> HARANG +1 HARALD -> HAROLD +1 HAPPEN -> HAPPENED +1 HANNA -> HAD +1 HANGINGS -> HANGING +1 HANDS -> HANDSOME +1 HAM -> HIM +1 HALLOA -> HULLO +1 HAL -> HELLO +1 HAKON -> HAWKIN +1 HAIRDRESSER -> HAIR +1 HAD -> IS +1 HAD -> HAVE +1 GUISE -> SKIES +1 GUESTS -> GUESS +1 GUEST -> GUESTS +1 GROWS -> GROVES +1 GRINGO -> GRINGAUD +1 GRIEFS -> GREEDS +1 GREY'S -> GRAY'S +1 GREEING -> GREEN +1 GREAT -> GRATE +1 GRAY -> GREY +1 GRAVE -> GRAY +1 GRAPEVINE -> GRAPE +1 GRAND -> GREAT +1 GRAMOPHONE -> GRAMMON +1 GRAM -> GRAHAM +1 GRADUALLY -> GRADUAL +1 GRADES -> GRATES +1 GOVERNMENT -> GOVERNOR +1 GOVERNED -> GOVERN +1 GOSSIP -> GOSSIPS +1 GORGEOUS -> CORGEOUS +1 GOOBERS -> GOULD +1 GOAT'S -> GOATS +1 GOAT -> GO +1 GOAT -> BOAT +1 GIVE -> GAVE +1 GIRARD -> GERARD +1 GILLIKINS -> GYLICANS +1 GILLIKINS -> GILLICKINS +1 GILCHRIST'S -> GILCRE'S +1 GILCHRIST -> GOST +1 GILCHRIST -> GORIST +1 GILCHRIST -> GILCRIS +1 GIER -> GEAREAGLE +1 GIAOURS -> GEY +1 GESTATION -> JUST +1 GEOFFREY'S -> JEFFREY'S +1 GEOFFREY -> JEFFREY +1 GENTLEMEN -> GENTLEMAN +1 GENERAL -> GENERALSHIP +1 GAYLY -> GAILY +1 GAY -> GAME +1 GAMEWELL -> GAINWELL +1 FUTURISTIC -> FUTURESTIC +1 FULNESS -> FULLNESS +1 FULL -> FALL +1 FRONTISPIECE -> FRONTESPIECE +1 FRISKILY -> FRISKLY +1 FRIEND -> BRAND +1 FREE -> FREWS +1 FRANCS -> FRANKS +1 FORWARDED -> FOOTED +1 FORGED -> FORCH +1 FORBES'S -> FORCE +1 FOR -> PROCEEDING +1 FOR -> FUR +1 FOR -> FROM +1 FOR -> FOUR +1 FOR -> FOREVER +1 FOLLOWED -> FOWLED +1 FLUFFINOSE -> FLUFFINO'S +1 FLOYD'S -> FLUD'S +1 FLOUR -> FLOWER +1 FLIGHT -> FIGHT +1 FITZOOTH'S -> FITUTH'S +1 FIRS -> FURS +1 FIREBUGS -> FIRE +1 FIREBALL -> FIRE +1 FINE -> FIND +1 FIND -> FIVE +1 FILL -> FILLED +1 FEW -> YOU +1 FETE -> FIGHT +1 FETCH -> VEGET +1 FELT -> FIL +1 FELT -> FELLED +1 FEELS -> FILLS +1 FEELING -> FILLING +1 FEELIN'S -> FEELINS +1 FEAREST -> FEAR'ST +1 FEARED -> FEAR +1 FAVORITE -> FAVOURITE +1 FAVOR -> FAVOUR +1 FATTENED -> FAT +1 FARTHEST -> FURTHEST +1 FARMHOUSES -> FARM +1 FAMOUS -> FAME +1 FALLEN -> FALL +1 FAIR -> FAIREST +1 EYE -> I +1 EVOLUTION -> REVOLUTION +1 EVERY -> EVERYONE +1 EVENIN'S -> EVENINGS +1 EVA -> EVER +1 ESTATE -> STATE +1 ESTAFANIA -> STEFFANIA +1 ESTAFANIA -> ESTAPHANIA +1 ESTAFANIA -> DANIA +1 ESPRIT -> A +1 ESPECIAL -> SPECIAL +1 ESCHEATED -> ISIATED +1 ESCAPED -> ESCAPE +1 ERNEST -> EARNEST +1 ER -> A +1 ENTRANCED -> AND +1 ENTHRALMENT -> ENTHRALLMENT +1 ENTER -> INTO +1 ENSURE -> INSURE +1 ENQUIRIES -> INQUIRIES +1 ENQUIRED -> INQUIRED +1 ENQUIRE -> INQUIRE +1 ENGINEER -> ENGINEERS +1 EMISSIONS -> MISSIONS +1 EMIL -> AMYL +1 EMIGRANT -> IMMIGRANT +1 EMERG'D -> EMERGED +1 EM -> THEM +1 ELSINORE -> ELZINORE +1 ELMO'S -> AIRABLE'S +1 ELCHO -> ELKOE +1 ELABORATE -> CELEBRATE +1 EFFECTING -> AFFECTING +1 EFFECTED -> AFFECTED +1 EDITION -> ADDITION +1 ECCENTRICITY -> EXCENTRICITY +1 EARSHOT -> EAR +1 EARS -> YEARS +1 E -> EVEN +1 DYKES -> DIKES +1 DYIN -> DYING +1 DUST -> DUS +1 DURING -> DREWING +1 DUMPY -> DUMPEY +1 DUMPY -> DON'T +1 DUERER -> DURE +1 DRUNKENNESS -> DRINKENNESS +1 DRUGGIST'S -> DRUGGIST +1 DROPIDAS -> DROPIDUS +1 DRIPPING -> TRIPPING +1 DOWN -> DOWNSTAIRS +1 DOWN -> DOWNREACHING +1 DOUZE -> DUSPORT +1 DOUBLE -> DOUBLED +1 DOOR -> DOORSTEP +1 DONNITHORNE -> DONNYTHORNE +1 DONATISTS -> DONATIST +1 DONA -> DORIS +1 DON'T -> A +1 DOLL -> STALL +1 DOCTRESS -> DOCTRIS +1 DISTRICTS -> DISTRICT +1 DISQUIETUDE -> AS +1 DISPENSE -> SPENCE +1 DISHONOURED -> DISHONORED +1 DISCOLOURED -> DISCOLORED +1 DIATRIBE -> DIETRIBE +1 DIAS -> DAIS +1 DIALOGUES -> DIALECTS +1 DETERMINED -> DETERMINE +1 DESCENT -> DISSENT +1 DELIBERATIVE -> DELIBERATE +1 DELIA -> GELIA +1 DELIA -> DELHIA +1 DEFINED -> THE +1 DEDALUS -> DAEDALUS +1 DEDALOS -> DEAD +1 DECENCY -> DECENCIES +1 DE -> DENISCHERANT +1 DE -> DENISCHANT +1 DAWN'S -> DAWNS +1 DAIRY -> DEARIE +1 D'ESTE -> DESTA +1 CYN -> SIN +1 CUT -> CAUGHT +1 CUSTOMARILY -> CUSTOMARY +1 CURVED -> CARVED +1 CRYSTAL -> CRISTEL +1 CROSSTREES -> CROSS +1 CRESTED -> CRUSTED +1 CRESSWELLS -> WHIRLS +1 CRESSWELL -> CHRISWELL +1 CREIGHTON -> KRETON +1 CREIGHTON -> CRIGHTON +1 CRASWELLERS -> CRESTWELLERS +1 CRASWELLER -> CRUSSWELLER +1 COZIER -> COSIER +1 COW -> COWSHED +1 COURT'S -> COURTS +1 COURT -> CORPS +1 COUNTRY'S -> COUNTRY +1 COUNSELS -> COUNCILS +1 COUNSELLED -> COUNSELS +1 COULDN'T -> POPLED +1 COULDN'T -> COULDN' +1 COULD -> COULDN'T +1 COULD -> COTEL +1 COTTON -> CONTIN +1 COSTS -> COST +1 CORRELATES -> COROLLETS +1 CORN -> CORNEERS +1 CORMORANT -> COMRADE +1 CORALIE -> CORLEY +1 COOK -> COPE +1 CONTAINED -> CONTAINING +1 CONSUMER'S -> CONSUMERS +1 CONSID'BLE -> CONSIDERABLE +1 CONQUERIN -> CONQUERING +1 CONJURER -> CONJUROR +1 CONDENSE -> CONTENTS +1 COMPOSE -> COMPOSED +1 COMMENTS -> COMETS +1 COMBASH -> COMBATCH +1 COLOURS -> COLORS +1 COLOUR -> COLOR +1 COLORS -> COLOURS +1 COLORS -> COLLARS +1 COLORIST -> COLOR +1 COLORIST -> CHOLERAIST +1 COLORED -> COLOURED +1 COLD -> CALLED +1 COASTS -> COAST +1 COAL -> CO +1 CO -> COEXIST +1 CLEW -> CLUE +1 CLAUSE -> CLAWS +1 CIVET -> SAVE +1 CITADELLED -> CITADEL +1 CIGARETTE -> SICK +1 CHRISTMAS -> CHRIST +1 CHRISTAIN -> CHRISTIAN +1 CHRIST'S -> CHRIST +1 CHOICE -> CHOICEST +1 CHINGACHGOOK -> INGACHGOOK +1 CHIAROSCURISTS -> KIARASCURISTS +1 CHIAROSCURIST -> CURE +1 CHEROOT -> JERUET +1 CHATTERBOX -> CHATTER +1 CHARENTE -> CHARLET +1 CHARACTERISTIC -> CORRECTURISTIC +1 CHANGE -> CHANGED +1 CENTRED -> SENATE +1 CENTER -> CENTRE +1 CENDENARIES -> SENDIARIES +1 CEASE -> SEIZED +1 CEASD -> CEASED +1 CAUGHT -> GOT +1 CASTS -> CAST +1 CARPACCIO'S -> CARPATCHIO'S +1 CANVASS -> CANVAS +1 CANVAS -> GAMBUS +1 CAN -> COULD +1 CAN -> CANNOT +1 CALMED -> CALM +1 CALLED -> THEIR +1 CALDWELL -> COLDWELL +1 BYE -> BY +1 BY -> MY +1 BY -> BYE +1 BUTT -> BUT +1 BUTCHERED -> BUTCHER +1 BUT -> DO +1 BUT -> BETTER +1 BUT -> BEFORE +1 BUSHEL -> BUSH +1 BURNE -> BYRNE +1 BURGOYNE -> WERE +1 BUNNIT -> BUNNOT +1 BUILDING -> BILLING +1 BUCHANAN -> BUCCATAN +1 BROTHELS -> BRAFFLELS +1 BRITANNULISTS -> BRITAIN +1 BRISK -> BRAY +1 BRINGING -> RINGING +1 BREAKWATER -> BRAKE +1 BREAKFAS -> BREAKFAST +1 BRANWELL -> BROWNWELL +1 BRANDS -> BRAINS +1 BRANCH -> RANCH +1 BRAGELONNE -> BREG +1 BRAGELONNE -> BRAGGLIN +1 BOX -> BOXWOMEN +1 BOTANY -> BARTANY +1 BORDERS -> BORDER +1 BOOKKEEPER -> BIT +1 BOLLS -> BOWLS +1 BOGUS -> VOGUS +1 BOGGS -> BOX +1 BOGGS -> BOGS +1 BOAR -> BOARHOUND +1 BLUESKINS -> BLUESKIN +1 BLESSINGS -> BLESSING +1 BLASTS -> BLAST +1 BITES -> WHITES +1 BIT -> BID +1 BILLYGOAT -> SPILLY +1 BILLED -> BUILD +1 BEWILDERMENT -> OF +1 BERGSON -> BERKS +1 BELIEVE -> BELIEVED +1 BEING -> MEAN +1 BEIN -> BEING +1 BEHAVIOURIST -> BEHAVIORIST +1 BEGGAR'S -> BEGGARS +1 BEG -> BEGGED +1 BEFORE -> FOR +1 BEFIT -> BE +1 BEFELL -> BEFEL +1 BEFAL -> BEFALL +1 BEEN -> SPIN +1 BEELZEBUB -> IS +1 BEEDER -> READER +1 BEEBE -> B +1 BEDIMMED -> BE +1 BEACHED -> BEECHED +1 BATTLEAX -> ADELAX +1 BASKET -> BASKEY +1 BANDS -> VANS +1 BALAAM'S -> BALEM'S +1 BAINS -> BANDOMERE +1 BADAUDERIE -> BAD +1 BAD -> BAN +1 BABIRUSA -> BABAROUSA +1 AYE -> I +1 AYE -> AY +1 AWHILE -> A +1 AWARE -> WEAR +1 AUNT -> AND +1 AU -> ACCURANT +1 ATTENDANTS -> ATTENDANCE +1 ATTENDANCE -> ATTENDANTS +1 ATHOLEMEN -> ETHEL +1 ATHENAIS -> ETHNE +1 ATHENAIS -> ETHINE +1 ATCHISON -> ADJUT +1 AT -> ITS +1 AT -> AND +1 ASSEMBLED -> A +1 ASCENDENCY -> ASCENDANCY +1 AS -> TO +1 ARRONDISSEMENT -> ARE +1 ARRIVING -> RIVING +1 ARRESTS -> ARREST +1 ARRANGING -> A +1 ARMOUR -> ARMOR +1 ARMED -> ARM +1 ARE -> OR +1 ARE -> ALL +1 ARCHIVES -> ARCHIVE +1 APPRENTICE -> APPRENTICED +1 ANYWHERE -> MANYWHERE +1 ANYMORE -> ANY +1 ANY -> ANYTHING +1 ANY -> ANYONE +1 ANTEDATING -> ANTIDATING +1 ANTE -> ANTEROOM +1 ANSWERD -> ANSWERED +1 ANNE'S -> AN +1 ANNALS -> ANNAL +1 ANGELS -> ANGEL +1 ANDERS -> ANDREW +1 ANDELLA -> DELLA +1 AND -> THAN +1 AND -> ONE +1 AND -> NORTHERNOSING +1 AND -> INTO +1 AND -> INFORM +1 AND -> INDEED +1 AND -> EMBICIDES +1 AND -> AM +1 AND -> ALYOCADIA'S +1 ANAXAGORAS -> AN +1 AN -> ON +1 AN -> IN +1 AMPHITHEATER -> AMPHITHEATRE +1 AM -> I'M +1 ALTERNATIVE -> ALL +1 ALLUVION -> ALLUVIAN +1 ALL -> ALTOGETHER +1 ALL -> ALREADY +1 ALERTNESS -> INERTNESS +1 ALBIGENSES -> ALBIGENZAS +1 ALBANS -> ALBAN'S +1 AIR -> HOUR +1 AIR -> HEIR +1 AIN'T -> HAIN'T +1 AIGNAN -> ENG +1 AID -> AIDS +1 AFTERDECK -> AFTER +1 AFFRIGHTENED -> A +1 AFFILIATED -> HAVE +1 AFFECT -> EFFECT +1 ADONA -> ADONNA +1 ACKNOWLEDGEMENT -> ACKNOWLEDGMENT +1 ABOLITIONISTS -> ABOLITIONIST +1 ABJECTLY -> OBJECTLY +1 ABDUCTION -> ADOPTION +1 ABBE -> ABBEY +1 A -> UPON +1 A -> UNNOTTINGHAM +1 A -> UNNOTABLY +1 A -> OFFENCE +1 A -> IF +1 A -> I +1 A -> ESPECIAL +1 A -> AWAY +1 A -> ATTORIAN +1 A -> ARREST +1 A -> APILLION +1 A -> AFAR +1 A -> ACCORD +1 A -> ACCOMPANIED + +DELETIONS: count ref +10 A +9 IS +7 AND +4 TONNAY +4 IT +4 CHARENTE +3 WITH +3 TO +3 OF +3 GALATIANS +2 YARD +2 WAY +2 THE +2 TELL +2 STAIRS +2 ONE +2 NO +2 LORD +2 HIS +2 HAVE +2 FOR +2 E +2 DE +2 AT +2 AM +1 YOU +1 WORTH +1 WOMEN +1 WHATEVER +1 WASN'T +1 WAS +1 WARRENTON'S +1 UTILITY +1 USE +1 TRILOGIES +1 TOWNE +1 TORY +1 TOLD +1 TOGETHER +1 THOR +1 THING +1 T +1 STEP +1 SPECIAL +1 SOCRATIC +1 SITTING +1 SIT +1 SHIP +1 SHED +1 SENTENCES +1 SENT +1 SEERS +1 SEE +1 RUNG +1 ROOM +1 REST +1 READY +1 REACHING +1 RAVINES +1 PORTES +1 POOS +1 POND +1 PLATONISTS +1 PLATES +1 PILLION +1 PHILIP +1 PERFECT +1 PEGRE +1 OLD +1 NOTABLY +1 NOT +1 NARES +1 N +1 MUCH +1 MER +1 ME +1 LO +1 LEOCADIA'S +1 LAKE +1 L +1 KNOCKED +1 KNIFE +1 JAIL +1 INTO +1 IN +1 HUMPH +1 HOUND +1 HIM +1 HE +1 GROUNDS +1 GHIP +1 FORM +1 FOOTED +1 FENCE +1 FEEL +1 FAR +1 EXIST +1 EVER +1 EASE +1 EARS +1 EAGLE +1 DO +1 DENSITY +1 DEED +1 DARK +1 D +1 CUTTERS +1 COURANT +1 COMPOSSER +1 COMPANY +1 CLOTH +1 CHORD +1 CHOOSE +1 CHERRIES +1 CHARLES +1 BROKEN +1 BOYS +1 BORN +1 BESIDES +1 B +1 ATTAINED +1 AS +1 ARDLE + +INSERTIONS: count hyp +12 A +11 ONE +9 THE +7 AND +6 IS +6 ARE +5 HAVE +5 DAY +4 TO +4 OF +4 ME +4 IN +4 AS +4 AM +3 WILL +3 ON +3 IT +3 HALL +3 FIND +2 WORTH +2 TIME +2 OTHER +2 NIGHT +2 MEN +2 HER +2 EVER +2 BE +2 AXE +2 ATTITUDE +2 AT +1 YULA +1 YON +1 WRAPPED +1 WORLDLY +1 WILDERMENT +1 WHILE +1 WEST +1 WERE +1 WAY +1 WATER +1 WAS +1 WARILY +1 VINE +1 VILLIA +1 VIEW +1 VICHET +1 VENORS +1 ULLA +1 UILESTS +1 TURNED +1 TREES +1 TRANCED +1 THIS +1 THINGS +1 THING +1 THESE +1 THEASURUS +1 THAT +1 TALL +1 STATION +1 SPREE +1 SOME +1 SIMPLED +1 SHOT +1 SERVANT +1 SEPARATED +1 SCURUS +1 SAINT +1 SAGE +1 RED +1 RADIAN +1 QUIETUDE +1 POST +1 POOLS +1 POOL +1 PLAY +1 OWE +1 OVER +1 OUT +1 ORDS +1 OR +1 ONLY +1 OFF +1 ODD +1 OCCADIA +1 O +1 MORE +1 MILL +1 MARTRA +1 MAKE +1 LOSS +1 LOCKY +1 LOCATED +1 LIST +1 LINE +1 LIKE +1 LEFT +1 LEAF +1 KEEPER +1 I +1 HYAHSOME +1 HOUSES +1 HOLE +1 HIS +1 HIM +1 HE +1 HAD +1 GROUND +1 GREE +1 GOING +1 GOAT +1 GIRT +1 FRIGHTENED +1 FOR +1 FIT +1 FILLIOTTED +1 FERNALIA +1 EXAGGARIST +1 EULO +1 ELLA +1 EFFECTUALLY +1 E +1 DRESSER +1 DOWN +1 DONALDS +1 DIMMED +1 DERBOOLE +1 DELLA +1 DECK +1 DEALT +1 DE +1 COMTE +1 CLEANING +1 CHAIR +1 CERTAINTY +1 CASE +1 BURN +1 BUGS +1 BUB +1 BOOKS +1 BED +1 BEALES +1 BALL +1 BACK +1 ARROWS +1 ARMY +1 AN +1 ALONE +1 ACTIVELY +1 ACTION +1 ABILITY + +PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp +A 1121 100 1166 1176 +AND 1738 98 1787 1787 +THE 3428 84 3461 3479 +IN 878 71 905 922 +TO 1325 37 1340 1347 +OF 1787 30 1799 1805 +IS 449 30 468 460 +I 706 29 711 730 +IT 544 28 558 558 +ONE 183 23 191 198 +AN 153 19 165 160 +AS 379 16 383 391 +THAT 602 15 610 609 +YOU 414 14 418 424 +OR 169 14 176 176 +ON 274 14 279 283 +HE 519 14 526 526 +O 7 13 14 13 +MEN 58 13 62 67 +FOR 413 13 420 419 +ARE 180 13 182 191 +WERE 181 12 186 188 +THEY 203 12 209 209 +THEIR 167 12 173 173 +THIS 255 11 263 258 +HAVE 211 11 215 218 +AT 278 11 284 283 +WITH 418 10 424 422 +HIS 470 10 473 477 +WILL 142 9 143 150 +WHERE 44 9 49 48 +TWO 65 9 71 68 +TIMAEUS 0 9 9 0 +THERE 135 9 137 142 +THAN 84 9 88 89 +SOME 85 9 87 92 +MAN 61 9 67 64 +I'M 27 9 33 30 +HAD 317 9 321 322 +DE 2 9 10 3 +ANY 83 9 85 90 +ANDERS 2 9 11 2 +WOULD 136 8 141 139 +WAS 576 8 579 581 +RODOLFO 0 8 8 0 +ITS 80 8 82 86 +HER 319 8 324 322 +AM 56 8 59 61 +WHEN 129 7 133 132 +TOO 59 7 61 64 +THEN 120 7 125 122 +NO 163 7 169 164 +IT'S 24 7 29 26 +INTO 101 7 104 105 +I'VE 17 7 23 18 +ANYONE 0 7 6 1 +ALL 223 7 225 228 +WELL 70 6 75 71 +RUDOLPHO 0 6 0 6 +OTHER 62 6 65 65 +OH 29 6 33 31 +LEOCADIA 0 6 6 0 +HAS 104 6 108 106 +BUT 341 6 344 344 +BE 314 6 314 320 +WHOSE 13 5 14 17 +WE 150 5 152 153 +UP 108 5 110 111 +TONNAY 0 5 5 0 +TIMAS 0 5 0 5 +THEL 0 5 5 0 +SYLVIA 0 5 0 5 +SOAMES 0 5 5 0 +SILVIA 0 5 5 0 +NOW 91 5 94 93 +NOT 335 5 338 337 +MY 222 5 225 224 +METER 6 5 11 6 +ME 183 5 184 187 +HIM 213 5 215 216 +HELLO 0 5 2 3 +FIND 21 5 22 25 +EVERY 30 5 31 34 +DAY 50 5 50 55 +COULD 91 5 95 92 +CHARENTE 0 5 5 0 +BATTLEAX 0 5 5 0 +ANDREWS 0 5 0 5 +WON 2 4 2 6 +WHO 153 4 154 156 +WHILE 34 4 34 38 +WHAT 111 4 113 113 +UPON 91 4 94 92 +UNC 1 4 5 1 +TOWARDS 15 4 19 15 +TOWARD 8 4 8 12 +THOU 18 4 18 22 +SOLMES 0 4 0 4 +RED 18 4 19 21 +PRACTISE 1 4 4 2 +PRACTICE 6 4 7 9 +NAOMI 1 4 5 1 +MAINHALL 0 4 4 0 +MAIN 3 4 3 7 +LOCATIA 0 4 0 4 +LEAVENWORTH 0 4 4 0 +JAGO 1 4 5 1 +HALLO 0 4 2 2 +HALL 9 4 9 13 +GRAY 4 4 5 7 +EVER 34 4 35 37 +EMIL 0 4 4 0 +E 0 4 3 1 +CRESSWELL 0 4 4 0 +COURT 9 4 12 10 +COLOR 8 4 10 10 +BY 246 4 248 248 +BRITAIN 1 4 1 5 +YOU'RE 2 3 5 2 +XAVIER 0 3 3 0 +WORTH 3 3 4 5 +WHOLLY 8 3 9 10 +WHITE 22 3 23 24 +WAY 71 3 73 72 +VALLIERS 0 3 0 3 +VALLIERE 0 3 3 0 +TRAVELLING 0 3 0 3 +TRAVELING 0 3 3 0 +TODAY 0 3 3 0 +TIME 86 3 87 88 +THROUGH 42 3 42 45 +THORKEL 0 3 3 0 +THEM 120 3 122 121 +THEE 27 3 30 27 +THEATRE 2 3 2 5 +THEATER 0 3 3 0 +THAT'S 13 3 14 15 +TABU 0 3 3 0 +SOMETIME 0 3 2 1 +SOMETHING 34 3 37 34 +SOMEONE 0 3 3 0 +SOLON 1 3 4 1 +SO 195 3 197 196 +SLANG 9 3 12 9 +SITTING 2 3 4 3 +SIN 12 3 13 14 +SHE 279 3 281 280 +SEEMED 30 3 30 33 +SEE 64 3 65 66 +SAIL 3 3 6 3 +READ 16 3 18 17 +PLATONISTS 0 3 3 0 +OUT 99 3 100 101 +OFFICERS 8 3 10 9 +NOR 20 3 21 22 +NEW 34 3 35 36 +METRE 0 3 0 3 +MAY 54 3 56 55 +MASTERS 2 3 3 4 +KNOW 75 3 76 77 +KAFFIR 0 3 0 3 +KAFFAR 0 3 3 0 +JUST 41 3 42 43 +IF 129 3 131 130 +ICHTHYOSAURUS 0 3 3 0 +I'LL 12 3 14 13 +HOLY 1 3 3 2 +HOLLAND 0 3 0 3 +HOLLAN 0 3 3 0 +HILDA 6 3 9 6 +HERE 68 3 70 69 +HE'S 6 3 9 6 +GREY 1 3 3 2 +GOAT 5 3 7 6 +GILCHRIST 0 3 3 0 +GALATIANS 3 3 6 3 +FORMERLY 0 3 0 3 +FORMALLY 0 3 3 0 +FOREVER 0 3 2 1 +FELL 16 3 16 19 +FAIR 6 3 7 8 +EVERYONE 1 3 3 2 +ESTAFANIA 0 3 3 0 +DOWN 71 3 73 72 +DOBRYNA 0 3 3 0 +DOBRINA 0 3 0 3 +DISSENT 0 3 2 1 +DID 65 3 67 66 +DESCENT 1 3 2 3 +DEAD 5 3 5 8 +CRESWELL 0 3 0 3 +COULDN'T 4 3 6 5 +COLOUR 1 3 2 3 +COLORS 1 3 3 2 +BRITANNULA 0 3 3 0 +BRAKE 1 3 3 2 +BOX 7 3 8 9 +BANNISTER 0 3 3 0 +BANISTER 0 3 0 3 +AY 0 3 2 1 +ANOTHER 34 3 36 35 +ANDREW'S 0 3 0 3 +ANDELLA 1 3 4 1 +AMIEL 0 3 0 3 +ZORA 1 2 3 1 +ZEVIER 0 2 0 2 +YOUR 107 2 109 107 +YOU'LL 7 2 9 7 +YE 7 2 8 8 +YARD 3 2 5 3 +WOODS 2 2 3 3 +WOOD 3 2 4 4 +WIDOWS 0 2 0 2 +WHY 42 2 44 42 +WHITTAWS 0 2 2 0 +WHICH 216 2 216 218 +WHATEVER 11 2 13 11 +WHALE 2 2 4 2 +WEAR 4 2 5 5 +WE'VE 1 2 2 2 +WE'RE 1 2 3 1 +WE'LL 4 2 6 4 +WATRY 0 2 2 0 +WATER 19 2 19 21 +WARRENTON'S 0 2 2 0 +VOICE 16 2 18 16 +VERY 82 2 83 83 +VAPOURS 0 2 2 0 +VAPORS 0 2 0 2 +VANDERPOOL 0 2 2 0 +VAN 2 2 2 4 +UN 0 2 1 1 +TRAVELLERS 0 2 0 2 +TONIGHT 0 2 2 0 +THINK 52 2 52 54 +THINGS 33 2 34 34 +THING 21 2 22 22 +THESE 68 2 68 70 +TELL 32 2 34 32 +TABOO 0 2 0 2 +T 0 2 1 1 +SYMPOSIUM 0 2 2 0 +SUPPOSIUM 0 2 0 2 +STUDY 11 2 12 12 +STEPHANOS 0 2 2 0 +STATE 26 2 27 27 +STAIRS 6 2 8 6 +STAGE 4 2 6 4 +SPIN 0 2 1 1 +SPECIAL 1 2 2 2 +SOUTHEY 0 2 2 0 +SLING 0 2 0 2 +SLEEVE 0 2 1 1 +SIT 10 2 11 11 +SIR 35 2 36 36 +SINCE 24 2 25 25 +SIFT 0 2 0 2 +SIF 0 2 2 0 +SHIP 7 2 9 7 +SET 19 2 19 21 +SERVANT 11 2 12 12 +SENTENCES 1 2 3 1 +SENT 4 2 6 4 +SEEM 11 2 13 11 +SEA 17 2 18 18 +SCHOOL 9 2 11 9 +SCENT 0 2 1 1 +SAW 21 2 23 21 +SALE 0 2 0 2 +SAGE 0 2 0 2 +ROUND 15 2 17 15 +RESERVED 1 2 1 3 +RESERVE 0 2 2 0 +REMAINED 5 2 6 6 +READER 1 2 1 3 +PRAISE 1 2 3 1 +POPHAM 3 2 5 3 +PLATINISTS 0 2 0 2 +PLATES 1 2 2 2 +PATIENCE 1 2 2 2 +PARSONS 0 2 2 0 +PARLOUR 0 2 0 2 +PARLOR 0 2 2 0 +OUR 79 2 81 79 +ORDERED 2 2 3 3 +ORDER 22 2 22 24 +OLIVE'S 1 2 3 1 +OLD 39 2 40 40 +OFF 24 2 25 25 +NOTTINGHAM 5 2 7 5 +NIGHT 24 2 24 26 +NEO 1 2 3 1 +NEIGHBOUR 1 2 2 2 +NEIGHBOR 0 2 1 1 +N 2 2 4 2 +MUNNY 0 2 2 0 +MORNING 21 2 21 23 +MORNIN 0 2 2 0 +MONTMARTRE 0 2 2 0 +MONTFICHET 7 2 9 7 +MONT 0 2 0 2 +MONEY 5 2 5 7 +MISTS 2 2 3 3 +MISTER 47 2 48 48 +MIST 4 2 5 5 +MISSOURIANS 0 2 2 0 +MISS 16 2 18 16 +MERRY 0 2 1 1 +MARSHALL 1 2 3 1 +LOUIS 1 2 2 2 +LOSS 5 2 6 6 +LORD 21 2 23 21 +LOOSE 4 2 5 5 +LINE 12 2 13 13 +LIKE 105 2 105 107 +LEVINWORTH 0 2 0 2 +LEVIN 0 2 0 2 +LEOCADIA'S 0 2 2 0 +LEFT 33 2 34 34 +LECOMPTE 0 2 2 0 +LE 1 2 1 3 +LAUGHED 5 2 6 6 +LARKSPUR 0 2 2 0 +LAKE 11 2 13 11 +KNOWS 5 2 6 6 +KARL 0 2 0 2 +KAFFIR'S 0 2 0 2 +KAFFAR'S 0 2 2 0 +JAIL 2 2 4 2 +INNERLOCHY 0 2 2 0 +INJURED 0 2 2 0 +IMPRESSED 3 2 4 4 +IMPRESS 0 2 0 2 +I'D 1 2 3 1 +HUMOUR 0 2 1 1 +HUMOR 0 2 1 1 +HOUSE 34 2 34 36 +HONOUR 0 2 2 0 +HONORABLE 1 2 2 2 +HONOR 4 2 4 6 +HOLBINE 0 2 0 2 +HOLBEIN 0 2 2 0 +HEN 0 2 2 0 +HELDA 0 2 0 2 +HEAR 18 2 20 18 +HEAD 36 2 36 38 +HARRY 2 2 3 3 +HANDS 16 2 17 17 +GUESTS 4 2 5 5 +GREAT 73 2 74 74 +GOOD 69 2 69 71 +GILLIKINS 0 2 2 0 +FOUNDED 3 2 5 3 +FOUND 21 2 21 23 +FOOTED 0 2 1 1 +FIRE 22 2 22 24 +FIGHT 3 2 3 5 +FELT 17 2 19 17 +FALL 2 2 2 4 +FAIRVIEW 0 2 2 0 +EVERYDAY 1 2 3 1 +ESPECIAL 0 2 1 1 +EM 0 2 1 1 +EARS 3 2 5 3 +DUMPY 0 2 2 0 +DON'T 37 2 38 38 +DO 93 2 94 94 +DENISCHALANT 0 2 0 2 +DELLA 1 2 1 3 +DELIA 0 2 2 0 +DEFINE 3 2 5 3 +CROSS 5 2 5 7 +CRITIUS 0 2 0 2 +CRITIAS 0 2 2 0 +CREIGHTON 0 2 2 0 +COURTYARD 4 2 4 6 +COUNSELS 0 2 1 1 +CONFESSED 1 2 1 3 +CONFESS 2 2 4 2 +COLOURS 0 2 1 1 +COLORIST 0 2 2 0 +CO 0 2 1 1 +CHRIST 22 2 22 24 +CHASE 1 2 1 3 +CHAISE 0 2 2 0 +CAUGHT 9 2 10 10 +CARL 0 2 2 0 +CANVAS 3 2 4 4 +CAN 64 2 66 64 +CALLED 23 2 24 24 +BYE 1 2 2 2 +BULL 2 2 2 4 +BUL 0 2 2 0 +BREAK 3 2 3 5 +BRAGELONNE 0 2 2 0 +BOYS 5 2 6 6 +BOGGS 0 2 2 0 +BIT 8 2 9 9 +BEING 39 2 40 40 +BEHAVIOUR 0 2 2 0 +BEHAVIOR 0 2 0 2 +BEFORE 74 2 75 75 +BEATITUDE 0 2 2 0 +BATTLEX 0 2 0 2 +BATTLE 6 2 6 8 +BAD 5 2 6 6 +B 1 2 2 2 +AYE 0 2 2 0 +AXE 1 2 1 3 +ATTITUDE 4 2 4 6 +ATTENDANTS 0 2 1 1 +ATTENDANCE 0 2 1 1 +ATHENAIS 0 2 2 0 +ASTOR 0 2 2 0 +ASTER 0 2 0 2 +ARREST 1 2 1 3 +AROUND 12 2 12 14 +ALEXANDRA 1 2 3 1 +ALEXANDER 13 2 13 15 +AIR 23 2 25 23 +ZOV'S 0 1 0 1 +ZORAS 0 1 0 1 +ZORAH 0 1 0 1 +ZORA'S 0 1 1 0 +ZOOF'S 1 1 2 1 +YUNK 0 1 0 1 +YULA 0 1 0 1 +YON 0 1 0 1 +YOLKS 0 1 0 1 +YEARS 34 1 34 35 +YEARNING 1 1 2 1 +YEARNIN 0 1 0 1 +YARE'S 0 1 0 1 +YAMS 0 1 1 0 +YAGO 0 1 0 1 +WYLDER 4 1 5 4 +WYAT 0 1 0 1 +WRITE 3 1 4 3 +WRAPPED 0 1 0 1 +WORST 3 1 4 3 +WORSE 6 1 6 7 +WORLDLY 0 1 0 1 +WORLD'S 2 1 2 3 +WORLD 36 1 37 36 +WORE 3 1 3 4 +WOODCUTTERS 0 1 0 1 +WOODBEGIRT 0 1 1 0 +WONDERING 1 1 2 1 +WONDER 7 1 7 8 +WOMEN'S 1 1 1 2 +WOMEN 7 1 8 7 +WOMAN'S 1 1 2 1 +WITHIN 22 1 23 22 +WITHES 0 1 1 0 +WITCH 2 1 3 2 +WINTER 4 1 5 4 +WINDSAY 0 1 0 1 +WIND 7 1 8 7 +WIN 2 1 2 3 +WILLS 0 1 1 0 +WILDS 0 1 0 1 +WILDERMENT 0 1 0 1 +WIGHT 0 1 0 1 +WIFE 16 1 17 16 +WIDTHS 0 1 0 1 +WIDOW 1 1 1 2 +WID 0 1 0 1 +WHO'S 1 1 2 1 +WHITTAWD 0 1 1 0 +WHITTAW 0 1 1 0 +WHITES 0 1 0 1 +WHIRLS 0 1 0 1 +WHIRLPOOL 1 1 2 1 +WHIPS 0 1 0 1 +WHIPPED 1 1 2 1 +WHIP 0 1 0 1 +WHETHER 23 1 23 24 +WHEREINWITH 0 1 0 1 +WHERE'S 0 1 0 1 +WHELPS 0 1 1 0 +WHELMS 0 1 0 1 +WHATSOEVER 1 1 1 2 +WHAT'S 4 1 5 4 +WHARTON 0 1 0 1 +WET 8 1 9 8 +WESTMERE 0 1 1 0 +WESTMER 0 1 0 1 +WESTLEY 0 1 0 1 +WEST 7 1 7 8 +WESLEY 1 1 2 1 +WELCOMED 0 1 1 0 +WELCOME 6 1 6 7 +WEEKLY 0 1 0 1 +WEDNESDAY 1 1 2 1 +WEBS 0 1 1 0 +WEATHER 5 1 6 5 +WEAKLY 0 1 1 0 +WAVES 6 1 7 6 +WAVE 0 1 0 1 +WATERY 2 1 2 3 +WATERMILL 0 1 1 0 +WATCHERY 0 1 0 1 +WASN'T 1 1 2 1 +WARS 0 1 0 1 +WARRENTONS 0 1 0 1 +WARMONISM 0 1 0 1 +WARLIKE 0 1 1 0 +WARILY 0 1 0 1 +WAR 5 1 5 6 +WANDERING 2 1 2 3 +WANDER 1 1 2 1 +WAIL 0 1 0 1 +VOLTAIRE 0 1 1 0 +VOICES 2 1 2 3 +VOGUS 0 1 0 1 +VINEY 0 1 0 1 +VINES 0 1 1 0 +VINE 0 1 0 1 +VILLIA 0 1 0 1 +VILLEROY 0 1 1 0 +VILLAY 0 1 0 1 +VIGNETTE 0 1 1 0 +VIEWS 1 1 1 2 +VIEW 2 1 2 3 +VICHET 0 1 0 1 +VICARIOUS 3 1 4 3 +VIADUK 0 1 0 1 +VIADUCT 0 1 1 0 +VERSE 1 1 2 1 +VERNS 0 1 0 1 +VERNE'S 0 1 1 0 +VEREMENT 0 1 0 1 +VENORS 0 1 0 1 +VEINS 0 1 0 1 +VEILS 1 1 1 2 +VEGET 0 1 0 1 +VAUDOIS 0 1 1 0 +VARIABILITY 1 1 2 1 +VANS 0 1 0 1 +VANES 0 1 1 0 +VANDERPOOLS 0 1 1 0 +VANDER 0 1 0 1 +VALOUR 0 1 0 1 +VALOR 2 1 3 2 +VALLEYED 0 1 1 0 +VALLED 0 1 0 1 +VALES 2 1 3 2 +UTTER 3 1 4 3 +UTIT 0 1 0 1 +UTILITY 2 1 3 2 +UTAH 1 1 2 1 +UTA 0 1 0 1 +USE 30 1 31 30 +UPSTAIRS 3 1 3 4 +UNWARRANTON'S 0 1 0 1 +UNWARILY 0 1 1 0 +UNTO 2 1 3 2 +UNSEPARATED 0 1 1 0 +UNNOTTINGHAM 0 1 0 1 +UNNOTABLY 0 1 0 1 +UNLIKE 0 1 1 0 +UNLAKE 0 1 0 1 +UNJUSTIFILL 0 1 0 1 +UNJUST 1 1 2 1 +UNITED 7 1 8 7 +UNFINISHED 1 1 2 1 +UNFINISHANCES 0 1 0 1 +UNEXCEPTIONALLY 0 1 0 1 +UNEXCEPTIONABLY 0 1 1 0 +UNERRIFIC 0 1 0 1 +UNDERGROUND 0 1 1 0 +UNDER 40 1 40 41 +UNCONOCTED 0 1 0 1 +UNCLENCHED 0 1 1 0 +UNCAN 0 1 0 1 +ULLA 0 1 0 1 +UILESTS 0 1 0 1 +UD 0 1 1 0 +TYPE 1 1 1 2 +TWITE 0 1 1 0 +TWASN'T 1 1 1 2 +TUTRILOGIES 0 1 0 1 +TURNOVER 0 1 1 0 +TURNERS 0 1 0 1 +TURNER'S 0 1 1 0 +TURNED 21 1 21 22 +TURN 18 1 18 19 +TURANSULA 0 1 0 1 +TUPPENY 0 1 1 0 +TRY 3 1 4 3 +TROUBLE 8 1 8 9 +TRIPPING 0 1 0 1 +TRILOGIES 0 1 1 0 +TRIED 9 1 9 10 +TRIAGO 0 1 0 1 +TREES 19 1 19 20 +TREDDLESTON 0 1 1 0 +TREBLE 0 1 1 0 +TREADLESTON 0 1 0 1 +TRAVELERS 0 1 1 0 +TRAVELER 0 1 1 0 +TRANCED 0 1 0 1 +TRAIL 2 1 2 3 +TOWNE 0 1 1 0 +TOWELLING 0 1 1 0 +TOWELINGS 0 1 0 1 +TOULD 0 1 1 0 +TORY 0 1 1 0 +TORQUAL 0 1 0 1 +TORKO 0 1 0 1 +TORE 0 1 0 1 +TORCOAL 0 1 0 1 +TOPRUNG 0 1 0 1 +TOPPENNY 0 1 0 1 +TOPEKA 0 1 1 0 +TOPECA 0 1 0 1 +TOP 10 1 11 10 +TOOMS 0 1 1 0 +TONY 0 1 0 1 +TOMB'S 0 1 0 1 +TOLD 31 1 32 31 +TOILETTE 0 1 1 0 +TOILET 0 1 0 1 +TOGETHER 15 1 16 15 +TIRESOME 1 1 2 1 +TINTORET 0 1 1 0 +TINTINT 0 1 1 0 +TINTANT 0 1 0 1 +TINCTARETTE 0 1 0 1 +TIMIRS 0 1 0 1 +TIMEUS 0 1 0 1 +TIGHT 1 1 1 2 +TIBI 0 1 1 0 +TIBEE 0 1 0 1 +THY 17 1 17 18 +THUS 20 1 21 20 +THRO 0 1 1 0 +THOUSANDTH 0 1 0 1 +THOUSAND 12 1 13 12 +THOUGHT 53 1 54 53 +THOUGH 32 1 33 32 +THORLEIF 0 1 1 0 +THOR 0 1 1 0 +THINKS 2 1 3 2 +THEY'RE 3 1 3 4 +THEREIN 2 1 3 2 +THERE'S 12 1 12 13 +THEE'S 0 1 1 0 +THEATILITY 0 1 0 1 +THEASURUS 0 1 0 1 +THANKS 3 1 3 4 +THANK 12 1 13 12 +TECHNIQUE 0 1 1 0 +TEA 2 1 3 2 +TARANTULA 0 1 1 0 +TALL 9 1 9 10 +TALKERS 0 1 1 0 +TALK 19 1 19 20 +TABLECLOTH 0 1 0 1 +TABLE 23 1 24 23 +SWUNG 0 1 0 1 +SWOONS 0 1 1 0 +SWOON 3 1 3 4 +SWEPT 1 1 1 2 +SWEEP 1 1 2 1 +SWAY 1 1 1 2 +SWAN 0 1 1 0 +SUSPICIONS 2 1 3 2 +SUSPICION 4 1 4 5 +SUSE 0 1 0 1 +SURFACES 0 1 1 0 +SURFABILITY 0 1 0 1 +SUPERVOUS 0 1 0 1 +SUPERFLUOUS 0 1 1 0 +SUMMONED 2 1 3 2 +SUMMON 1 1 1 2 +SUMMERS 0 1 1 0 +SUMMER'S 2 1 2 3 +SUM 1 1 1 2 +SULLEN 1 1 1 2 +SUITCASE 0 1 1 0 +SUIT 4 1 4 5 +STYLE 4 1 5 4 +STYLANT 0 1 0 1 +STUFFANOS 0 1 0 1 +STREAMLINE 0 1 1 0 +STREAM 1 1 1 2 +STRAIT 0 1 0 1 +STRAIGHT 2 1 3 2 +STORY'S 0 1 1 0 +STORIES 3 1 3 4 +STOCKBROKER 0 1 1 0 +STEVE 0 1 0 1 +STEPHANO'S 0 1 0 1 +STEP 6 1 7 6 +STEFFANIA 0 1 0 1 +STEELED 0 1 0 1 +STEEL'D 0 1 1 0 +STEED 1 1 1 2 +STEADY 4 1 5 4 +STATION 6 1 6 7 +STATES 6 1 6 7 +STARTS 0 1 1 0 +START 3 1 3 4 +STARE 1 1 1 2 +STANDS 2 1 3 2 +STAND 13 1 13 14 +STALL 0 1 0 1 +STAIR 0 1 1 0 +SQUINT 0 1 0 1 +SPRING 7 1 8 7 +SPREE 0 1 0 1 +SPRANG 3 1 3 4 +SPLENDOUR 0 1 0 1 +SPLENDOR 0 1 1 0 +SPLENDID 9 1 9 10 +SPLENDET 0 1 1 0 +SPLASHED 1 1 2 1 +SPILLY 0 1 0 1 +SPEND 2 1 2 3 +SPENCE 0 1 0 1 +SPELLED 0 1 1 0 +SPELL'D 0 1 0 1 +SPECIALIZED 0 1 0 1 +SPECIALISED 0 1 1 0 +SOUTHEY'S 0 1 1 0 +SOUS 0 1 0 1 +SOU 0 1 1 0 +SORRY 4 1 5 4 +SORA 0 1 0 1 +SOPHISTRY 0 1 1 0 +SOPHISTRI 0 1 0 1 +SOOTHED 0 1 1 0 +SOOTHE 1 1 1 2 +SON 14 1 15 14 +SOMETHIN 0 1 0 1 +SOLOQUIE 0 1 0 1 +SOLON'S 0 1 1 0 +SOLILOQUY 4 1 5 4 +SOLID 2 1 2 3 +SOLEMN 1 1 1 2 +SODIN 0 1 0 1 +SOCRATIC 0 1 1 0 +SMILED 4 1 4 5 +SMILD 0 1 1 0 +SMELLS 1 1 2 1 +SLEEP 4 1 5 4 +SLAYING 0 1 0 1 +SKURA 0 1 0 1 +SKILLFUL 0 1 1 0 +SKILFUL 0 1 0 1 +SKIES 0 1 0 1 +SKEPTICAL 0 1 1 0 +SIZED 1 1 1 2 +SIZE 4 1 5 4 +SIP 0 1 0 1 +SINS 9 1 9 10 +SIMPLED 0 1 0 1 +SIMMONS 0 1 0 1 +SILL 0 1 0 1 +SILENT 10 1 11 10 +SILENCE 7 1 7 8 +SILAGE 0 1 0 1 +SIGHTSEERS 0 1 0 1 +SIGHT 19 1 20 19 +SIGHED 3 1 4 3 +SIGH 3 1 3 4 +SIDE 23 1 23 24 +SICK 2 1 2 3 +SHUTTING 1 1 2 1 +SHOWRING 0 1 1 0 +SHOWERING 0 1 0 1 +SHOWED 5 1 5 6 +SHOULD 60 1 61 60 +SHOTTY 0 1 0 1 +SHOT 2 1 2 3 +SHODDY 0 1 1 0 +SHIP'S 0 1 0 1 +SHEWD 0 1 1 0 +SHELL 0 1 1 0 +SHEEM 0 1 0 1 +SHEDDING 2 1 2 3 +SHED 0 1 1 0 +SHE'S 5 1 6 5 +SHARPS 0 1 1 0 +SHARPEST 1 1 1 2 +SHARP'ST 0 1 1 0 +SHARP'S 0 1 0 1 +SHAPEN 0 1 1 0 +SHAPELY 0 1 1 0 +SHAME 2 1 3 2 +SHALT 2 1 2 3 +SHALL 43 1 44 43 +SHAKEN 1 1 1 2 +SHABBY 0 1 0 1 +SHABATA 0 1 1 0 +SERVICES 1 1 1 2 +SERVICEABILITY 0 1 1 0 +SERVED 4 1 4 5 +SERVE 11 1 12 11 +SERVANTS 4 1 4 5 +SEPARATED 3 1 3 4 +SENTENCE 3 1 3 4 +SENDIARIES 0 1 0 1 +SENCE 0 1 1 0 +SENATE 2 1 2 3 +SEMON'S 0 1 1 0 +SELVIE 0 1 0 1 +SEIZED 3 1 3 4 +SEERS 0 1 1 0 +SEEN 16 1 16 17 +SEEMS 11 1 12 11 +SEATING 1 1 2 1 +SEAT 4 1 4 5 +SCYTHE 1 1 2 1 +SCUTCHEON 0 1 1 0 +SCUSE 0 1 1 0 +SCURUS 0 1 0 1 +SCUMMED 0 1 1 0 +SCUMBED 0 1 0 1 +SCRAPBOOKS 0 1 1 0 +SCRAP 0 1 0 1 +SCOUTING 0 1 1 0 +SCOUT 5 1 5 6 +SCHOOLS 1 1 1 2 +SCHOOLBOYS 0 1 0 1 +SCEVRA 0 1 1 0 +SCEURA 0 1 1 0 +SCEPTICAL 0 1 0 1 +SCENE 2 1 3 2 +SCAVER 0 1 0 1 +SCATHE 0 1 1 0 +SCATH 0 1 0 1 +SCAROONS 0 1 1 0 +SCARONS 0 1 0 1 +SAYING 15 1 15 16 +SAVIER 0 1 0 1 +SAVER 0 1 0 1 +SAVED 4 1 5 4 +SAVE 9 1 9 10 +SAUVEUR 0 1 1 0 +SAUL 0 1 0 1 +SATE 0 1 1 0 +SAT 18 1 18 19 +SANG 4 1 5 4 +SALVIE 0 1 0 1 +SALL 0 1 0 1 +SALINE 0 1 1 0 +SALIENT 1 1 2 1 +SAINT 14 1 14 15 +SAILORED 0 1 0 1 +SAILING 0 1 0 1 +SAILED 0 1 0 1 +SAILD 0 1 1 0 +SAID 160 1 161 160 +SADD 0 1 0 1 +RUNG 0 1 1 0 +RUFUS 0 1 1 0 +RUFFUS 0 1 0 1 +RUE 0 1 1 0 +RUDOLPHU 0 1 0 1 +RUDOLPHAL 0 1 0 1 +ROY 0 1 0 1 +ROUTE 1 1 2 1 +ROUT 1 1 1 2 +ROSSOTER 0 1 0 1 +ROSSETER 0 1 1 0 +ROOTS 0 1 1 0 +ROOM 40 1 41 40 +ROI 0 1 1 0 +ROGERS'S 0 1 1 0 +ROGERS 2 1 2 3 +ROERER 0 1 1 0 +RODOLFO'S 0 1 1 0 +ROCKED 0 1 1 0 +ROCK 1 1 1 2 +ROBINS 0 1 0 1 +ROBIN'S 0 1 1 0 +ROAR 0 1 0 1 +ROAN 0 1 0 1 +RIVING 0 1 0 1 +RINGING 0 1 0 1 +RIGHT 25 1 25 26 +RIDOLPH'S 0 1 0 1 +RHONE 0 1 1 0 +REWEIGHED 0 1 1 0 +REWAYED 0 1 0 1 +REVOLUTION 0 1 0 1 +REST 13 1 14 13 +REMOVED 4 1 4 5 +REMOVE 3 1 4 3 +REMOV'D 0 1 1 0 +REMEMBERED 11 1 11 12 +REMEMBER 9 1 10 9 +REMARKED 4 1 4 5 +REMARK 2 1 3 2 +REMAINING 0 1 0 1 +REMAIN 5 1 6 5 +RELOCATED 0 1 1 0 +RELIES 0 1 1 0 +REINFORCEMENTS 0 1 0 1 +REIGNED 1 1 2 1 +REGAINED 0 1 1 0 +REGAIN 0 1 0 1 +REFUSED 7 1 8 7 +REFUSE 0 1 0 1 +REENFORCEMENTS 0 1 1 0 +REEDER 0 1 1 0 +RECORDS 2 1 2 3 +RECORD 6 1 7 6 +RECOGNIZED 3 1 3 4 +RECOGNISED 0 1 1 0 +RECALL 1 1 1 2 +REBUKED 0 1 0 1 +REBUK'D 0 1 1 0 +REALIZE 4 1 4 5 +READY 9 1 10 9 +REACHING 1 1 2 1 +RE 1 1 1 2 +RAVINES 0 1 1 0 +RANK 5 1 5 6 +RANCOR 0 1 1 0 +RANCH 0 1 0 1 +RAINED 0 1 0 1 +RADIAN 0 1 0 1 +QUINSON 0 1 1 0 +QUINCENT 0 1 0 1 +QUIETUDE 0 1 0 1 +QUASI 0 1 1 0 +QUASH 0 1 1 0 +QUART 0 1 1 0 +PYTHAGOREANS 0 1 1 0 +PUTTING 7 1 7 8 +PUTTIN 0 1 1 0 +PURSUS 0 1 0 1 +PURSE 1 1 2 1 +PURPOSED 0 1 1 0 +PURPOSE 10 1 10 11 +PURIST 0 1 1 0 +PUREST 0 1 0 1 +PURCHASE 0 1 0 1 +PSALMS 0 1 0 1 +PSALM 2 1 3 2 +PROVES 1 1 2 1 +PROVED 6 1 6 7 +PROSTERITY 0 1 0 1 +PROSELYTING 0 1 1 0 +PROSELLING 0 1 0 1 +PROSCRIBED 0 1 1 0 +PRONUNCILA 0 1 0 1 +PRODIGAL 0 1 1 0 +PROCEEDING 2 1 2 3 +PROCEED 1 1 1 2 +PRIOR 0 1 1 0 +PREVENT 0 1 1 0 +PREVAILED 2 1 3 2 +PREVAIL 1 1 1 2 +PRETENSE 0 1 1 0 +PRETENCE 1 1 1 2 +PRESOCRATIC 0 1 0 1 +PRESENT 20 1 20 21 +PRESCRIBED 1 1 1 2 +PREISE 0 1 0 1 +PRECONCEIVED 0 1 1 0 +PRECIEUSES 0 1 1 0 +PRECEDE 0 1 1 0 +PRE 0 1 1 0 +PRAYER 0 1 0 1 +PRAIRINES 0 1 0 1 +PRAIRIE 1 1 2 1 +POWER 21 1 22 21 +POSTERITY 1 1 2 1 +POST 3 1 3 4 +POSITIVELY 2 1 3 2 +PORTES 0 1 1 0 +POPPUM 0 1 0 1 +POPLED 0 1 0 1 +POOS 0 1 1 0 +POOLS 1 1 1 2 +POOL 1 1 1 2 +POND 2 1 3 2 +POLLAR 0 1 0 1 +POLAR 0 1 1 0 +POISONED 0 1 0 1 +POISON'D 0 1 1 0 +POINT 13 1 14 13 +POETESS 0 1 1 0 +POETES 0 1 0 1 +PLURAL 1 1 2 1 +PLESIOSAURUS 0 1 1 0 +PLECIUS 0 1 0 1 +PLEASANTS 0 1 0 1 +PLEASANCE 0 1 1 0 +PLAYING 5 1 5 6 +PLAY 12 1 12 13 +PLAITS 0 1 1 0 +PLAIN 3 1 4 3 +PLACES 1 1 1 2 +PLACE 38 1 39 38 +PITHAGORIANS 0 1 0 1 +PILLION 0 1 1 0 +PIERCED 1 1 1 2 +PIERC'D 0 1 1 0 +PICTURES 3 1 3 4 +PICK 2 1 3 2 +PHRASE 3 1 3 4 +PHILIP 8 1 9 8 +PHAEDRUS 0 1 1 0 +PETHOLOGICAL 0 1 0 1 +PERSON 13 1 13 14 +PERICE 0 1 0 1 +PERFECT 6 1 7 6 +PERCHES 0 1 1 0 +PEPPEL 0 1 0 1 +PENINSULA 1 1 2 1 +PEGRENNE 0 1 1 0 +PEGRE 0 1 1 0 +PEARLS 0 1 0 1 +PEARL'S 0 1 1 0 +PEARL 12 1 12 13 +PATIENTS 0 1 1 0 +PATIENT 0 1 0 1 +PATHOLOGICAL 0 1 1 0 +PAST 12 1 12 13 +PASSIONAL 0 1 0 1 +PASSED 14 1 15 14 +PASSAGEWAY 0 1 0 1 +PASSAGE 8 1 9 8 +PASCHAL 0 1 1 0 +PARTICLES 0 1 1 0 +PARTICLE 0 1 0 1 +PARSON'S 0 1 0 1 +PARSON 0 1 0 1 +PAROQUET 0 1 1 0 +PARAPHERNALIA 1 1 2 1 +PARALLELOGRAM 0 1 1 0 +PARALLELLOGRAM 0 1 0 1 +PAPAL 0 1 1 0 +PANTS 0 1 1 0 +PANE 0 1 1 0 +PAIR 5 1 5 6 +PAIN 7 1 7 8 +PAGRIN 0 1 0 1 +OZMA 0 1 1 0 +OWENAIRS 0 1 0 1 +OWEN 0 1 1 0 +OWE 0 1 0 1 +OVERLENE 0 1 0 1 +OVERLEAN 0 1 1 0 +OVER 59 1 59 60 +OUTSTRIPPED 0 1 0 1 +OUTSTRIP 0 1 1 0 +OUTRAGED 0 1 0 1 +OUTRAGE 0 1 1 0 +OUTLEY'S 0 1 0 1 +OUGHTER 0 1 1 0 +OUGHT 10 1 10 11 +OTTLEY'S 0 1 1 0 +OTTER 0 1 0 1 +OTHOR 0 1 0 1 +OTHERWORLDLY 0 1 1 0 +OSTENSITY 0 1 0 1 +OSMO 0 1 0 1 +OSH 0 1 1 0 +OSAGE 0 1 1 0 +ORDS 0 1 0 1 +OPT 0 1 0 1 +OPHELIA 0 1 1 0 +OPE 0 1 0 1 +OPAQUE 0 1 1 0 +OO 0 1 0 1 +ONTO 0 1 1 0 +ONLY 77 1 77 78 +ONGOLATIONS 0 1 0 1 +OMELETTE 0 1 1 0 +OMELET 0 1 0 1 +OLIVES 0 1 0 1 +OLAF 1 1 2 1 +OFFICES 0 1 1 0 +OFFICER'S 0 1 0 1 +OFFICER 4 1 4 5 +OFFENSES 0 1 1 0 +OFFENCES 0 1 0 1 +OFFENCE 0 1 0 1 +ODD 3 1 3 4 +OCCUPANTS 0 1 1 0 +OCCUPANT 0 1 0 1 +OCCADIA 0 1 0 1 +OBSERVED 5 1 6 5 +OBSERVE 4 1 4 5 +OBJECTLY 0 1 0 1 +OARS 0 1 1 0 +OAKS 0 1 1 0 +O'ER 0 1 1 0 +NUMIDIA 0 1 1 0 +NOVELS 2 1 2 3 +NOVEL'S 0 1 1 0 +NOUGHT 0 1 1 0 +NOTHING 33 1 33 34 +NOTHIN 0 1 1 0 +NOTABLY 0 1 1 0 +NOSE 2 1 2 3 +NORTHWEST 0 1 1 0 +NORTHWARDS 0 1 1 0 +NORTHWARD 1 1 1 2 +NORTHERNOSING 0 1 0 1 +NORTHERNERS 0 1 1 0 +NORTH 8 1 8 9 +NONCOMPOSTER 0 1 0 1 +NON 3 1 4 3 +NODS 0 1 1 0 +NODDING 0 1 0 1 +NINE 10 1 11 10 +NEWBORN 0 1 0 1 +NEVER 63 1 63 64 +NET 0 1 1 0 +NEOPLATANISTS 0 1 0 1 +NELLY 0 1 1 0 +NELLIERS 0 1 0 1 +NEIGHBOURHOOD 0 1 0 1 +NEIGHBORHOOD 0 1 1 0 +NEED 12 1 12 13 +NED 1 1 1 2 +NEARER 3 1 4 3 +NEAR 6 1 6 7 +NE'ER 0 1 1 0 +NARROWS 0 1 1 0 +NARES 0 1 1 0 +NAMED 3 1 4 3 +NAME 14 1 14 15 +MYSTERY 5 1 5 6 +MYLES 0 1 0 1 +MUZOLEUM 0 1 0 1 +MURCHISTON 0 1 0 1 +MUCH 68 1 69 68 +MOVED 10 1 10 11 +MOUTHED 1 1 2 1 +MOUTH 5 1 5 6 +MOURNED 0 1 0 1 +MOURN 0 1 1 0 +MOUNTED 0 1 1 0 +MOUNTAIN 5 1 5 6 +MOULDED 0 1 0 1 +MOTHERS 1 1 2 1 +MOTHER'S 4 1 4 5 +MORMONISM 2 1 3 2 +MORE 119 1 119 120 +MONTMARTRA 0 1 0 1 +MONSTERS 1 1 2 1 +MON 0 1 0 1 +MOMBY 0 1 0 1 +MOMBI 0 1 1 0 +MOLDED 0 1 1 0 +MOLD 0 1 0 1 +MOHICANS 0 1 0 1 +MOHICAN 0 1 1 0 +MO 0 1 1 0 +MITIGATED 0 1 0 1 +MISSY 0 1 0 1 +MISSOURIENS 0 1 0 1 +MISSOURIANCE 0 1 0 1 +MISSIONS 0 1 0 1 +MINT 0 1 1 0 +MINE 6 1 7 6 +MIND 29 1 29 30 +MILLS 0 1 0 1 +MILLIONED 0 1 0 1 +MILLION'D 0 1 1 0 +MILLIGRAM 0 1 1 0 +MILL 0 1 0 1 +MILITATED 0 1 1 0 +MILES 6 1 7 6 +MILAGRAM 0 1 0 1 +MIKE 1 1 2 1 +MIGALLATIONS 0 1 0 1 +MICHAEL 0 1 0 1 +MICARDLE 0 1 0 1 +METRES 0 1 0 1 +METICOFF'S 0 1 0 1 +METERS 0 1 1 0 +METERPLATES 0 1 0 1 +METEOR 0 1 0 1 +METAL 0 1 1 0 +MET 10 1 10 11 +MERSEY 0 1 1 0 +MERGANSER 0 1 1 0 +MERGANCER 0 1 0 1 +MERCY 2 1 2 3 +MERCHISTON 0 1 1 0 +MER 0 1 1 0 +MENT 0 1 0 1 +MEND 1 1 2 1 +MEET 6 1 7 6 +MEDICRAFT 0 1 0 1 +MEDIA 0 1 0 1 +MEDAL 0 1 0 1 +MEAN 9 1 9 10 +MEALYBACK 0 1 1 0 +MEALEY 0 1 0 1 +MEADOWCROFT'S 0 1 1 0 +MEADOWCROFT 0 1 1 0 +MAYBEL 0 1 0 1 +MAYBE 0 1 0 1 +MAUSOLEUM 0 1 1 0 +MATES 0 1 0 1 +MATE 1 1 2 1 +MASTERY 0 1 1 0 +MASTER'S 1 1 2 1 +MASTER 14 1 14 15 +MARY 5 1 6 5 +MARVELOUS 0 1 1 0 +MARVELLOUS 0 1 0 1 +MARTRA 0 1 0 1 +MARTIAL 1 1 1 2 +MARTFICHET 0 1 0 1 +MARSHAL 1 1 1 2 +MARRIED 2 1 2 3 +MARKET 1 1 1 2 +MARKED 2 1 3 2 +MARIVAUX 1 1 2 1 +MARGOLOTTE 4 1 5 4 +MARGOLOT 0 1 0 1 +MARAVO 0 1 0 1 +MARAIS 0 1 1 0 +MARA 0 1 0 1 +MANYWHERE 0 1 0 1 +MANY 40 1 41 40 +MANSERVANT 0 1 1 0 +MANNA 0 1 1 0 +MANIFESTED 0 1 1 0 +MANIFEST 2 1 2 3 +MANA 0 1 0 1 +MAN'S 5 1 5 6 +MAKE 40 1 40 41 +MAIDS 3 1 4 3 +MAIDEN 0 1 0 1 +MAID 5 1 6 5 +MADE 61 1 61 62 +MADAME 4 1 4 5 +MADAM 2 1 3 2 +MACDONALDS 0 1 1 0 +MAC 0 1 1 0 +MABEL 0 1 1 0 +M 0 1 0 1 +LYNCHINGS 0 1 1 0 +LUTHERS 0 1 0 1 +LUTHER'S 3 1 4 3 +LUST 0 1 0 1 +LUNCHINGS 0 1 0 1 +LUIS 0 1 1 0 +LUBRICATE 0 1 1 0 +LUBRICADE 0 1 0 1 +LOWERED 0 1 0 1 +LOWER 5 1 6 5 +LOVES 3 1 3 4 +LOVE'S 0 1 1 0 +LOUISE 4 1 4 5 +LOU'S 0 1 1 0 +LOST 12 1 12 13 +LORNE 0 1 1 0 +LORN 0 1 0 1 +LOOSEN 0 1 0 1 +LONG 28 1 29 28 +LOCKY 0 1 0 1 +LOCATEOUS 0 1 0 1 +LOCATED 1 1 1 2 +LOCALIA 0 1 0 1 +LO 1 1 2 1 +LIVES 5 1 6 5 +LIVERYED 0 1 0 1 +LIVERIED 0 1 1 0 +LIVE 9 1 9 10 +LITTLE 101 1 101 102 +LITERALLY 1 1 2 1 +LITER 0 1 1 0 +LIST 1 1 1 2 +LINKED 0 1 0 1 +LINK'D 0 1 1 0 +LILY'S 0 1 0 1 +LILY 2 1 2 3 +LILLYS 0 1 1 0 +LILLY 0 1 1 0 +LILBURN 0 1 1 0 +LIGHTFOOTED 0 1 0 1 +LIGHT 38 1 39 38 +LIES 8 1 8 9 +LIE 1 1 1 2 +LETS 0 1 1 0 +LET'S 1 1 2 1 +LESSER 1 1 2 1 +LESS 28 1 28 29 +LEOCAYA 0 1 0 1 +LEOCADI 0 1 1 0 +LENG 0 1 0 1 +LEFRANK 0 1 1 0 +LECOMTE 0 1 0 1 +LEAVING 5 1 6 5 +LEAVES 5 1 5 6 +LEAVE 15 1 16 15 +LEAST 15 1 15 16 +LEASED 0 1 1 0 +LEAPING 3 1 3 4 +LEAK 0 1 0 1 +LEAF 3 1 3 4 +LEADS 2 1 3 2 +LEADER 2 1 2 3 +LAWN 1 1 1 2 +LARKSBURG 0 1 0 1 +LARKSBUR 0 1 0 1 +LAPE 0 1 0 1 +LANTHORN 0 1 1 0 +LANTERN 0 1 0 1 +LANDA 0 1 0 1 +LAND 12 1 13 12 +LAMENT 0 1 0 1 +LAMBENT 0 1 1 0 +LALLY 0 1 0 1 +LALLIE 0 1 1 0 +LABOUR 0 1 1 0 +LABOR 1 1 1 2 +LA 4 1 5 4 +L 1 1 2 1 +KRETON 0 1 0 1 +KNOCKED 3 1 4 3 +KNIFE 9 1 10 9 +KNEW 24 1 25 24 +KNEEP 0 1 0 1 +KNEED 0 1 1 0 +KNEE 0 1 1 0 +KIRTLAND 0 1 1 0 +KIOPH 0 1 0 1 +KINGDOMS 1 1 2 1 +KINGDOM'S 0 1 0 1 +KING'S 3 1 4 3 +KING 26 1 26 27 +KICKAPOOS 0 1 0 1 +KICK 1 1 2 1 +KIARASCURISTS 0 1 0 1 +KEZWICK 0 1 0 1 +KEYNOTE 0 1 1 0 +KESWICK 0 1 1 0 +KEOGH 0 1 1 0 +KEEPER 1 1 1 2 +KEEN 3 1 3 4 +KATHLEEN 0 1 1 0 +KANSAS 2 1 3 2 +KANSA 0 1 0 1 +JUS 0 1 1 0 +JOB 4 1 4 5 +JEWELLER'S 0 1 0 1 +JEWELER'S 0 1 1 0 +JEST 0 1 0 1 +JERUET 0 1 0 1 +JEFFREY'S 0 1 0 1 +JEFFREY 0 1 0 1 +JAW 1 1 2 1 +JASPER 5 1 6 5 +JAPS 0 1 0 1 +JANE'S 0 1 1 0 +JANE 4 1 4 5 +JACKKNIFE 0 1 0 1 +JACK 5 1 6 5 +ITTHIASORIS 0 1 0 1 +IT'LL 1 1 2 1 +ISIATED 0 1 0 1 +INVENTORS 1 1 2 1 +INTRICTS 0 1 0 1 +INTRENCHMENT 0 1 1 0 +INTERESTS 1 1 2 1 +INTENTS 0 1 1 0 +INTENSE 2 1 2 3 +INSURE 0 1 0 1 +INQUIRIES 1 1 1 2 +INQUIRED 2 1 2 3 +INQUIRE 0 1 0 1 +INJURE 0 1 0 1 +INGACHGOOK 0 1 0 1 +INFORM 1 1 1 2 +INFERENCE 0 1 1 0 +INFANTS 2 1 3 2 +INFANT'S 0 1 0 1 +INERTNESS 0 1 0 1 +INERLOCKY 0 1 0 1 +INEFFECTUALLY 0 1 1 0 +INDEED 29 1 29 30 +INCULCATED 0 1 1 0 +INCONDESCENT 0 1 0 1 +INCLOSED 0 1 1 0 +INCERTAINTY 0 1 1 0 +INCANDESCENT 0 1 1 0 +INCALCATED 0 1 0 1 +INACTION 0 1 1 0 +IMPRESSES 0 1 1 0 +IMPRESS'D 0 1 1 0 +IMPOSED 0 1 1 0 +IMPLORS 0 1 0 1 +IMPLORES 0 1 1 0 +IMPERILLED 0 1 0 1 +IMPERFECT 0 1 0 1 +IMPEARLED 0 1 1 0 +IMMITOR 0 1 0 1 +IMMIGRANT 0 1 0 1 +IMMATURE 0 1 1 0 +IMBIBED 0 1 1 0 +IKE 0 1 1 0 +ICTOISORUS 0 1 0 1 +ICT 0 1 0 1 +ICE 1 1 1 2 +IBED 0 1 0 1 +IAGO 0 1 0 1 +HYAHSOME 0 1 0 1 +HURRIED 6 1 6 7 +HUNTLY 0 1 0 1 +HUNTLEY 0 1 1 0 +HUMPH 0 1 1 0 +HUMMILY 0 1 0 1 +HUMID 0 1 1 0 +HUMAN 15 1 15 16 +HULLO 0 1 0 1 +HOW 49 1 50 49 +HOUSES 1 1 1 2 +HOUSECLEANING 0 1 1 0 +HOUR 12 1 12 13 +HOUND 0 1 1 0 +HOTBED 0 1 1 0 +HOT 3 1 3 4 +HORTON 0 1 1 0 +HORSEPLAY 0 1 1 0 +HORSE 6 1 6 7 +HORRENTS 0 1 0 1 +HORACE 0 1 1 0 +HOPKINS'S 0 1 1 0 +HOPKINS 4 1 4 5 +HOPES 5 1 6 5 +HOPE 9 1 10 9 +HONOURED 0 1 1 0 +HONOURABLE 2 1 3 2 +HONORIFIC 1 1 2 1 +HONORED 1 1 1 2 +HON 1 1 1 2 +HOMILY 0 1 1 0 +HOMES 2 1 2 3 +HOME 23 1 23 24 +HOLTREE 0 1 0 1 +HOLOCAUST 0 1 1 0 +HOLMES 9 1 10 9 +HOLE 1 1 1 2 +HOLD 7 1 8 7 +HOHLAST 0 1 0 1 +HO 0 1 1 0 +HIT 1 1 2 1 +HILDER 0 1 0 1 +HILDA'S 1 1 2 1 +HIGHEST 2 1 3 2 +HIGHER 2 1 2 3 +HIGH 17 1 18 17 +HIDALGO 0 1 1 0 +HID 1 1 1 2 +HICKY 0 1 0 1 +HICKEY 0 1 1 0 +HETTY'S 0 1 0 1 +HETTY 0 1 1 0 +HESITATED 1 1 1 2 +HERACLITUS 0 1 0 1 +HERACLEITUS 0 1 1 0 +HENLOORD 0 1 0 1 +HENCHMEN 0 1 1 0 +HENCHMAN 0 1 0 1 +HELPS 0 1 0 1 +HELPED 2 1 3 2 +HELP 18 1 18 19 +HELDA'S 0 1 0 1 +HEIR 0 1 0 1 +HEAVEN 14 1 14 15 +HEARTSEASE 0 1 0 1 +HEARTS 8 1 8 9 +HEARTBROKEN 0 1 0 1 +HEART'S 0 1 1 0 +HEART 27 1 28 27 +HEARKENED 0 1 0 1 +HE'D 2 1 3 2 +HAZEWRAPPED 0 1 1 0 +HAYES 0 1 0 1 +HAWTREY 0 1 1 0 +HAWKIN 0 1 0 1 +HAVING 11 1 12 11 +HAUGHTY 3 1 4 3 +HARTS 0 1 1 0 +HARRYTOWN 0 1 0 1 +HARRIED 0 1 1 0 +HAROLD 0 1 0 1 +HARMONIZED 1 1 2 1 +HARMONIZE 0 1 0 1 +HARKENED 0 1 1 0 +HARBOURING 0 1 0 1 +HARBORING 0 1 1 0 +HARANGUE 0 1 1 0 +HARANG 0 1 0 1 +HARALD 0 1 1 0 +HAPPENED 6 1 6 7 +HAPPEN 4 1 5 4 +HANNA 0 1 1 0 +HANGINGS 0 1 1 0 +HANGING 2 1 2 3 +HANDSOME 3 1 3 4 +HANDLED 0 1 0 1 +HAM 0 1 1 0 +HALTING 0 1 0 1 +HALLOA 0 1 1 0 +HALF 19 1 19 20 +HAL 0 1 1 0 +HAKON 0 1 1 0 +HAIRDRESSER 0 1 1 0 +HAIR 6 1 6 7 +HAIN'T 0 1 0 1 +HADALGO 0 1 0 1 +GYLICANS 0 1 0 1 +GUISE 0 1 1 0 +GUEST 3 1 4 3 +GUESS 0 1 0 1 +GRUE 0 1 0 1 +GROWS 1 1 2 1 +GROVES 0 1 0 1 +GROUNDS 1 1 2 1 +GROUND' 0 1 0 1 +GROUND 10 1 10 11 +GRINGO 0 1 1 0 +GRINGAUD 0 1 0 1 +GRIEFS 0 1 1 0 +GREY'S 0 1 1 0 +GREEN 12 1 12 13 +GREEING 0 1 1 0 +GREEDS 0 1 0 1 +GREE 0 1 0 1 +GRAY'S 0 1 0 1 +GRAVE 3 1 4 3 +GRATES 0 1 0 1 +GRATE 1 1 1 2 +GRAPEVINE 0 1 1 0 +GRAPE 0 1 0 1 +GRAND 1 1 2 1 +GRAMOPHONE 0 1 1 0 +GRAMMON 0 1 0 1 +GRAM 0 1 1 0 +GRAHAM 0 1 0 1 +GRADUALLY 4 1 5 4 +GRADUAL 0 1 0 1 +GRADES 0 1 1 0 +GOVERNOR 14 1 14 15 +GOVERNMENT 7 1 8 7 +GOVERNED 0 1 1 0 +GOVERN 0 1 0 1 +GOULD 0 1 0 1 +GOT 13 1 13 14 +GOST 0 1 0 1 +GOSSIPS 0 1 0 1 +GOSSIP 1 1 2 1 +GORIST 0 1 0 1 +GORGEOUS 0 1 1 0 +GOODN'T 0 1 0 1 +GOOBERS 0 1 1 0 +GOING 26 1 26 27 +GOATS 0 1 0 1 +GOAT'S 1 1 2 1 +GO 37 1 37 38 +GNAWEDS 0 1 0 1 +GIVE 29 1 30 29 +GIRT 0 1 0 1 +GIRARD 0 1 1 0 +GILLICKINS 0 1 0 1 +GILCRIS 0 1 0 1 +GILCRE'S 0 1 0 1 +GILCHRIST'S 0 1 1 0 +GIER 0 1 1 0 +GIAOURS 0 1 1 0 +GHIP 3 1 4 3 +GEY 0 1 0 1 +GESTATION 0 1 1 0 +GERARD 0 1 0 1 +GEOFFREY'S 0 1 1 0 +GEOFFREY 0 1 1 0 +GENTLEMEN 5 1 6 5 +GENTLEMAN 8 1 8 9 +GENERALSHIP 0 1 0 1 +GENERAL 16 1 17 16 +GELIA 0 1 0 1 +GEAREAGLE 0 1 0 1 +GAYLY 0 1 1 0 +GAY 0 1 1 0 +GAVE 31 1 31 32 +GAMEWELL 6 1 7 6 +GAME 4 1 4 5 +GAMBUS 0 1 0 1 +GAINWELL 0 1 0 1 +GAILY 0 1 0 1 +FUTURISTIC 0 1 1 0 +FUTURESTIC 0 1 0 1 +FURTHEST 0 1 0 1 +FURS 0 1 0 1 +FUR 0 1 0 1 +FUN 0 1 0 1 +FULNESS 0 1 1 0 +FULLNESS 0 1 0 1 +FULL 17 1 18 17 +FRONTISPIECE 0 1 1 0 +FRONTESPIECE 0 1 0 1 +FROM 187 1 187 188 +FRISKLY 0 1 0 1 +FRISKILY 0 1 1 0 +FRIGHTENED 2 1 2 3 +FRIEND 20 1 21 20 +FREWS 0 1 0 1 +FREQUENCEDE 0 1 0 1 +FREE 17 1 18 17 +FRANKS 0 1 0 1 +FRANCS 0 1 1 0 +FOWLED 0 1 0 1 +FOUR 12 1 12 13 +FORWARDED 0 1 1 0 +FORM 11 1 12 11 +FORGED 2 1 3 2 +FORCH 0 1 0 1 +FORCE 17 1 17 18 +FORBES'S 0 1 1 0 +FOLLOWED 14 1 15 14 +FLUFFINOSE 0 1 1 0 +FLUFFINO'S 0 1 0 1 +FLUD'S 0 1 0 1 +FLOYD'S 0 1 1 0 +FLOWER 4 1 4 5 +FLOUR 0 1 1 0 +FLIGHT 2 1 3 2 +FIVE 15 1 15 16 +FITZOOTH'S 0 1 1 0 +FITUTH'S 0 1 0 1 +FIT 0 1 0 1 +FIRST 67 1 67 68 +FIRS 0 1 1 0 +FIREBUGS 0 1 1 0 +FIREBALL 0 1 1 0 +FINE 16 1 17 16 +FILLS 2 1 2 3 +FILLIOTTED 0 1 0 1 +FILLING 0 1 0 1 +FILLED 8 1 8 9 +FILL 1 1 2 1 +FIL 0 1 0 1 +FEW 27 1 28 27 +FETE 2 1 3 2 +FETCH 0 1 1 0 +FERNALIA 0 1 0 1 +FENCE 0 1 1 0 +FELLED 1 1 1 2 +FEGERIS 0 1 0 1 +FEELS 1 1 2 1 +FEELINS 0 1 0 1 +FEELING 10 1 11 10 +FEELIN'S 0 1 1 0 +FEEL 17 1 18 17 +FEAREST 0 1 1 0 +FEARED 2 1 3 2 +FEAR'ST 0 1 0 1 +FEAR 12 1 12 13 +FAVOURITE 1 1 1 2 +FAVOUR 0 1 0 1 +FAVORITE 3 1 4 3 +FAVOR 1 1 2 1 +FAUDOIR 0 1 0 1 +FATTENED 0 1 1 0 +FATE 0 1 0 1 +FAT 3 1 3 4 +FARTHEST 1 1 2 1 +FARMHOUSES 0 1 1 0 +FARM 8 1 8 9 +FAR 29 1 30 29 +FAMOUS 2 1 3 2 +FAME 2 1 2 3 +FALLEN 2 1 3 2 +FAIREST 0 1 0 1 +EYE 14 1 15 14 +EXIST 2 1 3 2 +EXCUSE 3 1 3 4 +EXCENTRICITY 0 1 0 1 +EXAGGARIST 0 1 0 1 +EVOLUTION 2 1 3 2 +EVENINGS 1 1 1 2 +EVENIN'S 0 1 1 0 +EVEN 51 1 51 52 +EVA 2 1 3 2 +EULO 0 1 0 1 +ETHNE 0 1 0 1 +ETHINE 0 1 0 1 +ETHEL 0 1 0 1 +ESTATE 2 1 3 2 +ESTAPHANIA 0 1 0 1 +ESPRIT 0 1 1 0 +ESCHEATED 0 1 1 0 +ESCAPED 0 1 1 0 +ESCAPE 4 1 4 5 +ERNEST 0 1 1 0 +ER 0 1 1 0 +ENTRENCHMENT 0 1 0 1 +ENTRANCED 0 1 1 0 +ENTHRALMENT 0 1 1 0 +ENTHRALLMENT 0 1 0 1 +ENTER 8 1 9 8 +ENSURE 0 1 1 0 +ENQUIRIES 0 1 1 0 +ENQUIRED 0 1 1 0 +ENQUIRE 0 1 1 0 +ENGINEERS 2 1 2 3 +ENGINEER 3 1 4 3 +ENG 0 1 0 1 +ENCLOSED 0 1 0 1 +EMISSIONS 0 1 1 0 +EMIGRANT 0 1 1 0 +EMERGED 0 1 0 1 +EMERG'D 0 1 1 0 +EMBICIDES 0 1 0 1 +ELZINORE 0 1 0 1 +ELSINORE 0 1 1 0 +ELMO'S 0 1 1 0 +ELLA 0 1 0 1 +ELKOE 0 1 0 1 +ELCHO 0 1 1 0 +ELABORATE 2 1 3 2 +EFFERENCE 0 1 0 1 +EFFECTUALLY 1 1 1 2 +EFFECTING 0 1 1 0 +EFFECTED 1 1 2 1 +EFFECT 9 1 9 10 +EDITION 0 1 1 0 +ECCENTRICITY 0 1 1 0 +EASE 2 1 3 2 +EARSHOT 0 1 1 0 +EARNEST 4 1 4 5 +EAR 6 1 6 7 +EAGLE 0 1 1 0 +DYKES 0 1 1 0 +DYING 2 1 2 3 +DYIN 0 1 1 0 +DUST 3 1 4 3 +DUSPORT 0 1 0 1 +DUS 0 1 0 1 +DURING 11 1 12 11 +DURE 0 1 0 1 +DUMPEY 0 1 0 1 +DUERER 0 1 1 0 +DUCHEN 0 1 0 1 +DRUNKENNESS 0 1 1 0 +DRUGGIST'S 0 1 1 0 +DRUGGIST 0 1 0 1 +DROPIDUS 0 1 0 1 +DROPIDAS 0 1 1 0 +DRIPPING 0 1 1 0 +DRINKENNESS 0 1 0 1 +DREWING 0 1 0 1 +DRESSER 0 1 0 1 +DOWNSTAIRS 1 1 1 2 +DOWNREACHING 0 1 0 1 +DOUZE 0 1 1 0 +DOUBLED 0 1 0 1 +DOUBLE 5 1 6 5 +DORIS 0 1 0 1 +DOORSTEP 0 1 0 1 +DOOR 35 1 36 35 +DONNYTHORNE 0 1 0 1 +DONNITHORNE 0 1 1 0 +DONATISTS 1 1 2 1 +DONATIST 0 1 0 1 +DONALDS 0 1 0 1 +DONA 1 1 2 1 +DOLL 1 1 2 1 +DOES 14 1 14 15 +DOCTRIS 0 1 0 1 +DOCTRESS 0 1 1 0 +DOCKBROKER 0 1 0 1 +DISTRICTS 1 1 2 1 +DISTRICT 1 1 1 2 +DISQUIETUDE 0 1 1 0 +DISPENSE 0 1 1 0 +DISHONOURED 0 1 1 0 +DISHONORED 0 1 0 1 +DISCOLOURED 0 1 1 0 +DISCOLORED 0 1 0 1 +DIMMED 0 1 0 1 +DIKES 0 1 0 1 +DIETRIBE 0 1 0 1 +DIATRIBE 0 1 1 0 +DIAS 0 1 1 0 +DIALOGUES 1 1 2 1 +DIALECTS 0 1 0 1 +DETERMINED 4 1 5 4 +DETERMINE 1 1 1 2 +DESTA 0 1 0 1 +DERPOOL 0 1 0 1 +DERBOOLE 0 1 0 1 +DENSITY 0 1 1 0 +DENISCHERANT 0 1 0 1 +DENISCHANT 0 1 0 1 +DELIBERATIVE 0 1 1 0 +DELIBERATE 2 1 2 3 +DELHIA 0 1 0 1 +DEFINED 1 1 2 1 +DEED 2 1 3 2 +DEDALUS 1 1 2 1 +DEDALOS 0 1 1 0 +DECK 1 1 1 2 +DECENCY 3 1 4 3 +DECENCIES 0 1 0 1 +DEARIE 0 1 0 1 +DEALT 0 1 0 1 +DAWNS 0 1 0 1 +DAWN'S 0 1 1 0 +DARK 16 1 17 16 +DANIA 0 1 0 1 +DAMNLY 0 1 0 1 +DAIS 0 1 0 1 +DAIRY 4 1 5 4 +DAEDALUS 0 1 0 1 +D'ESTE 0 1 1 0 +D 1 1 2 1 +CYN 0 1 1 0 +CUTTERS 0 1 1 0 +CUT 4 1 5 4 +CUSTOMARY 0 1 0 1 +CUSTOMARILY 0 1 1 0 +CURVED 0 1 1 0 +CURTLEND 0 1 0 1 +CURE 0 1 0 1 +CRYSTAL 3 1 4 3 +CRUSTED 0 1 0 1 +CRUSSWELLER 0 1 0 1 +CROSSTREES 0 1 1 0 +CRISTEL 0 1 0 1 +CRIGHTON 0 1 0 1 +CRESTWELLERS 0 1 0 1 +CRESTED 0 1 1 0 +CRESSWELLS 0 1 1 0 +CRASWELLERS 0 1 1 0 +CRASWELLER 0 1 1 0 +CRASH 0 1 0 1 +COZIER 0 1 1 0 +COWSHED 0 1 0 1 +COW 1 1 2 1 +COURTS 2 1 2 3 +COURT'S 1 1 2 1 +COURSE 19 1 19 20 +COURANT 0 1 1 0 +COUNTRY'S 1 1 2 1 +COUNTRY 25 1 25 26 +COUNSELLED 1 1 2 1 +COUNCILS 0 1 0 1 +COULDN' 0 1 0 1 +COTTON 11 1 12 11 +COTEL 0 1 0 1 +COSTS 0 1 1 0 +COST 4 1 4 5 +COSIER 0 1 0 1 +CORRELATES 0 1 1 0 +CORRECTURISTIC 0 1 0 1 +CORPS 0 1 0 1 +COROLLETS 0 1 0 1 +CORNEERS 0 1 0 1 +CORN 4 1 5 4 +CORMORANT 0 1 1 0 +CORLEY 0 1 0 1 +CORGEOUS 0 1 0 1 +CORALIE 0 1 1 0 +COPE 0 1 0 1 +COOK 1 1 2 1 +CONTIN 0 1 0 1 +CONTENTS 1 1 1 2 +CONTAINING 0 1 0 1 +CONTAINED 2 1 3 2 +CONSUMERS 2 1 2 3 +CONSUMER'S 0 1 1 0 +CONSIDERABLE 6 1 6 7 +CONSID'BLE 0 1 1 0 +CONQUERING 0 1 0 1 +CONQUERIN 0 1 1 0 +CONJUROR 0 1 0 1 +CONJURER 0 1 1 0 +CONDENSE 0 1 1 0 +COMTE 1 1 1 2 +COMRADE 0 1 0 1 +COMPOSSER 0 1 1 0 +COMPOSED 1 1 1 2 +COMPOSE 0 1 1 0 +COMPANY 10 1 11 10 +COMMENTS 0 1 1 0 +COMETS 0 1 0 1 +COMBATCH 0 1 0 1 +COMBASH 0 1 1 0 +COLOURED 1 1 1 2 +COLORED 1 1 2 1 +COLLARS 1 1 1 2 +COLDWELL 0 1 0 1 +COLD 8 1 9 8 +COEXIST 0 1 0 1 +COASTS 0 1 1 0 +COAST 3 1 3 4 +COAL 0 1 1 0 +CLUE 1 1 1 2 +CLOTH 3 1 4 3 +CLEW 0 1 1 0 +CLENCHED 1 1 1 2 +CLEANING 2 1 2 3 +CLAWS 2 1 2 3 +CLAUSE 0 1 1 0 +CIVET 0 1 1 0 +CITY 15 1 15 16 +CITADELLED 0 1 1 0 +CITADEL 0 1 0 1 +CIGARETTE 1 1 2 1 +CHRONICAL 0 1 0 1 +CHRISWELL 0 1 0 1 +CHRISTMAS 3 1 4 3 +CHRISTIAN 6 1 6 7 +CHRISTAIN 0 1 1 0 +CHRIST'S 0 1 1 0 +CHORD 0 1 1 0 +CHOOSE 2 1 3 2 +CHOLERAIST 0 1 0 1 +CHOICEST 0 1 0 1 +CHOICE 1 1 2 1 +CHINGACHGOOK 1 1 2 1 +CHILL 0 1 0 1 +CHIAROSCURISTS 0 1 1 0 +CHIAROSCURIST 0 1 1 0 +CHERRIES 2 1 3 2 +CHEROOT 0 1 1 0 +CHEBATA 0 1 0 1 +CHATTERBOX 0 1 1 0 +CHATTER 0 1 0 1 +CHARLET 0 1 0 1 +CHARLES 2 1 3 2 +CHARACTERISTIC 9 1 10 9 +CHANGED 6 1 6 7 +CHANGE 8 1 9 8 +CHAIR 15 1 15 16 +CERTAINTY 2 1 2 3 +CENTRED 0 1 1 0 +CENTRE 1 1 1 2 +CENTER 1 1 2 1 +CENDENARIES 0 1 1 0 +CELEBRATE 1 1 1 2 +CEASED 1 1 1 2 +CEASE 1 1 2 1 +CEASD 0 1 1 0 +CATHERINE 0 1 0 1 +CASTS 0 1 1 0 +CAST 8 1 8 9 +CASE 16 1 16 17 +CARVED 1 1 1 2 +CARPATCHIO'S 0 1 0 1 +CARPACCIO'S 0 1 1 0 +CANVASS 0 1 1 0 +CANNOT 16 1 16 17 +CALMED 0 1 1 0 +CALM 5 1 5 6 +CALDWELL 0 1 1 0 +BYRNE 0 1 0 1 +BUTT 0 1 1 0 +BUTCHERED 0 1 1 0 +BUTCHER 0 1 0 1 +BUSHEL 0 1 1 0 +BUSH 0 1 0 1 +BURNE 0 1 1 0 +BURN 4 1 4 5 +BURGOYNE 0 1 1 0 +BUNNOT 0 1 0 1 +BUNNIT 0 1 1 0 +BUILDING 4 1 5 4 +BUILD 1 1 1 2 +BUGS 0 1 0 1 +BUCHANAN 0 1 1 0 +BUCCATAN 0 1 0 1 +BUB 0 1 0 1 +BROWNWELL 0 1 0 1 +BROTHELS 0 1 1 0 +BROKEN 6 1 7 6 +BRITANNULISTS 0 1 1 0 +BRISK 0 1 1 0 +BRINGING 4 1 5 4 +BREG 0 1 0 1 +BREAKWATER 0 1 1 0 +BREAKFAST 1 1 1 2 +BREAKFAS 0 1 1 0 +BRAY 0 1 0 1 +BRANWELL 0 1 1 0 +BRANDS 1 1 2 1 +BRAND 0 1 0 1 +BRANCH 3 1 4 3 +BRAINS 3 1 3 4 +BRAGGLIN 0 1 0 1 +BRAFFLELS 0 1 0 1 +BOXWOMEN 0 1 0 1 +BOWLS 0 1 0 1 +BOWER 1 1 1 2 +BOUGHT 0 1 0 1 +BOTANY 0 1 1 0 +BORN 7 1 8 7 +BORDERS 3 1 4 3 +BORDER 3 1 3 4 +BOOKS 8 1 8 9 +BOOKKEEPER 0 1 1 0 +BOO 0 1 0 1 +BOLLS 0 1 1 0 +BOGUS 2 1 3 2 +BOGS 0 1 0 1 +BOAT 2 1 2 3 +BOARHOUND 0 1 0 1 +BOAR 0 1 1 0 +BLUESKINS 1 1 2 1 +BLUESKIN 0 1 0 1 +BLINT 0 1 0 1 +BLESSINGS 2 1 3 2 +BLESSING 2 1 2 3 +BLASTS 0 1 1 0 +BLAST 0 1 0 1 +BLASHED 0 1 0 1 +BITES 0 1 1 0 +BILLYGOAT 0 1 1 0 +BILLING 0 1 0 1 +BILLED 0 1 1 0 +BID 1 1 1 2 +BEWILDERMENT 0 1 1 0 +BETTER 25 1 25 26 +BESIDES 7 1 8 7 +BERKS 0 1 0 1 +BERGSON 1 1 2 1 +BELL 3 1 3 4 +BELIEVED 5 1 5 6 +BELIEVE 20 1 21 20 +BEIN 0 1 1 0 +BEHAVIOURIST 1 1 2 1 +BEHAVIORIST 0 1 0 1 +BEGGED 1 1 1 2 +BEGGARS 0 1 0 1 +BEGGAR'S 0 1 1 0 +BEG 1 1 2 1 +BEFIT 0 1 1 0 +BEFELL 0 1 1 0 +BEFEL 0 1 0 1 +BEFALL 0 1 0 1 +BEFAL 0 1 1 0 +BEEN 136 1 137 136 +BEELZEBUB 0 1 1 0 +BEEDER 0 1 1 0 +BEECHED 0 1 0 1 +BEEBE 0 1 1 0 +BEDIMMED 0 1 1 0 +BED 12 1 12 13 +BEALES 0 1 0 1 +BEACHED 0 1 1 0 +BASKEY 0 1 0 1 +BASKET 1 1 2 1 +BARTANY 0 1 0 1 +BANDS 3 1 4 3 +BANDOMERE 0 1 0 1 +BAN 0 1 0 1 +BALL 4 1 4 5 +BALEM'S 0 1 0 1 +BALAAM'S 0 1 1 0 +BAINS 0 1 1 0 +BADAUDERIE 0 1 1 0 +BACK 45 1 45 46 +BABIRUSA 0 1 1 0 +BABAROUSA 0 1 0 1 +AWHILE 0 1 1 0 +AWAY 50 1 50 51 +AWARE 5 1 6 5 +AUNT 4 1 5 4 +AU 0 1 1 0 +ATTORIAN 0 1 0 1 +ATTAINED 2 1 3 2 +ATHOLEMEN 0 1 1 0 +ATCHISON 0 1 1 0 +ASSEMBLED 2 1 3 2 +ASCENDENCY 0 1 1 0 +ASCENDANCY 0 1 0 1 +ARROWS 1 1 1 2 +ARRONDISSEMENT 0 1 1 0 +ARRIVING 1 1 2 1 +ARRESTS 0 1 1 0 +ARRANGING 1 1 2 1 +ARMY 9 1 9 10 +ARMOUR 0 1 1 0 +ARMOR 0 1 0 1 +ARMED 1 1 2 1 +ARM 5 1 5 6 +ARDLE 0 1 1 0 +ARCHIVES 0 1 1 0 +ARCHIVE 0 1 0 1 +APPRENTICED 0 1 0 1 +APPRENTICE 2 1 3 2 +APILLION 0 1 0 1 +ANYWHERE 5 1 6 5 +ANYTHING 17 1 17 18 +ANYMORE 0 1 1 0 +ANTIDATING 0 1 0 1 +ANTEROOM 0 1 0 1 +ANTEDATING 0 1 1 0 +ANTE 0 1 1 0 +ANSWERED 14 1 14 15 +ANSWERD 0 1 1 0 +ANNE'S 1 1 2 1 +ANNALS 2 1 3 2 +ANNAL 0 1 0 1 +ANGULATIONS 0 1 0 1 +ANGELS 2 1 3 2 +ANGEL 1 1 1 2 +ANDREW 0 1 0 1 +ANCIENT 3 1 3 4 +ANAXAGORAS 0 1 1 0 +AMYL 0 1 0 1 +AMPHITHEATRE 0 1 0 1 +AMPHITHEATER 0 1 1 0 +AMEN 0 1 0 1 +ALYOCADIA'S 0 1 0 1 +ALTOGETHER 6 1 6 7 +ALTERNATIVE 0 1 1 0 +ALREADY 22 1 22 23 +ALONE 23 1 23 24 +ALLUVION 0 1 1 0 +ALLUVIAN 0 1 0 1 +ALERTNESS 0 1 1 0 +ALBIGENZAS 0 1 0 1 +ALBIGENSES 0 1 1 0 +ALBANS 0 1 1 0 +ALBAN'S 0 1 0 1 +AIRABLE'S 0 1 0 1 +AIN'T 1 1 2 1 +AIGNAN 0 1 1 0 +AIDS 0 1 0 1 +AID 1 1 2 1 +AFTERDECK 0 1 1 0 +AFTER 58 1 58 59 +AFFRIGHTENED 0 1 1 0 +AFFILIATED 1 1 2 1 +AFFECTING 0 1 0 1 +AFFECTED 3 1 3 4 +AFFECT 0 1 1 0 +AFAR 0 1 0 1 +ADOPTION 0 1 0 1 +ADONNA 0 1 0 1 +ADONA 0 1 1 0 +ADJUT 0 1 0 1 +ADELAX 0 1 0 1 +ADDITION 0 1 0 1 +ACTIVELY 1 1 1 2 +ACTION 11 1 11 12 +ACKNOWLEDGMENT 0 1 0 1 +ACKNOWLEDGEMENT 0 1 1 0 +ACCURANT 0 1 0 1 +ACCORD 2 1 2 3 +ACCOMPANIED 3 1 3 4 +ABOLITIONISTS 0 1 1 0 +ABOLITIONIST 0 1 0 1 +ABJECTLY 0 1 1 0 +ABILITY 2 1 2 3 +ABDUCTION 0 1 1 0 +ABBEY 0 1 0 1 +ABBE 0 1 1 0 +ZOOLOGY 1 0 1 1 +ZOOF 1 0 1 1 +ZION 1 0 1 1 +ZEST 1 0 1 1 +ZEAL 2 0 2 2 +YOUTH 5 0 5 5 +YOURSELVES 1 0 1 1 +YOURSELF 8 0 8 8 +YOURS 3 0 3 3 +YOUNGER 1 0 1 1 +YOUNG 43 0 43 43 +YOU'VE 4 0 4 4 +YOU'D 3 0 3 3 +YORKSHIRE 2 0 2 2 +YORK 6 0 6 6 +YONDER 1 0 1 1 +YOKE 1 0 1 1 +YIELDING 3 0 3 3 +YIELDED 2 0 2 2 +YIELD 3 0 3 3 +YET 43 0 43 43 +YESTERDAY 3 0 3 3 +YES 33 0 33 33 +YELLOW 9 0 9 9 +YELL 1 0 1 1 +YEARLY 2 0 2 2 +YEAR 5 0 5 5 +YEA 1 0 1 1 +YARN 2 0 2 2 +YACHTSMAN 1 0 1 1 +YACHT 3 0 3 3 +WROUGHT 2 0 2 2 +WROTE 6 0 6 6 +WRONGS 1 0 1 1 +WRONG 10 0 10 10 +WRITTEN 7 0 7 7 +WRITS 1 0 1 1 +WRITINGS 2 0 2 2 +WRITING 6 0 6 6 +WRITHING 1 0 1 1 +WRITES 1 0 1 1 +WRITER 2 0 2 2 +WRIT 1 0 1 1 +WRIST 1 0 1 1 +WRETCHEDNESS 2 0 2 2 +WRESTLERS 1 0 1 1 +WRESTLED 1 0 1 1 +WOUNDED 1 0 1 1 +WOUND 1 0 1 1 +WOULDN'T 5 0 5 5 +WORTHY 6 0 6 6 +WORSTED 1 0 1 1 +WORSHIP 3 0 3 3 +WORRY 3 0 3 3 +WORN 1 0 1 1 +WORM 4 0 4 4 +WORKS 8 0 8 8 +WORKMEN 1 0 1 1 +WORKING 3 0 3 3 +WORKERS 1 0 1 1 +WORKED 5 0 5 5 +WORK 34 0 34 34 +WORDS 20 0 20 20 +WORD 20 0 20 20 +WOOL 3 0 3 3 +WOODLEY 3 0 3 3 +WOODEN 3 0 3 3 +WONDERS 1 0 1 1 +WONDERINGLY 1 0 1 1 +WONDERFUL 7 0 7 7 +WONDERED 2 0 2 2 +WON'T 15 0 15 15 +WOMAN 28 0 28 28 +WOLF 1 0 1 1 +WOKE 1 0 1 1 +WOE 2 0 2 2 +WIZARD'S 1 0 1 1 +WIZARD 3 0 3 3 +WIVES 3 0 3 3 +WITTY 1 0 1 1 +WITTILY 1 0 1 1 +WITS 1 0 1 1 +WITNESSING 1 0 1 1 +WITNESSES 1 0 1 1 +WITNESS 1 0 1 1 +WITHOUT 37 0 37 37 +WITHERING 1 0 1 1 +WITHERED 1 0 1 1 +WITHDRAWN 2 0 2 2 +WITHDRAW 1 0 1 1 +WITHAL 1 0 1 1 +WIT 3 0 3 3 +WISHES 3 0 3 3 +WISHERS 1 0 1 1 +WISHED 6 0 6 6 +WISH 11 0 11 11 +WISE 5 0 5 5 +WISDOM 3 0 3 3 +WIRE 4 0 4 4 +WIPED 1 0 1 1 +WINNING 1 0 1 1 +WINKING 1 0 1 1 +WINK 1 0 1 1 +WINIFRED 1 0 1 1 +WINGS 5 0 5 5 +WING 3 0 3 3 +WINE 7 0 7 7 +WINDY 1 0 1 1 +WINDS 3 0 3 3 +WINDOWS 7 0 7 7 +WINDOW 16 0 16 16 +WINDING 1 0 1 1 +WILY 1 0 1 1 +WILSON 1 0 1 1 +WILLOWY 1 0 1 1 +WILLINGLY 2 0 2 2 +WILLING 2 0 2 2 +WILLIAM 1 0 1 1 +WILLED 1 0 1 1 +WILDERNESS 1 0 1 1 +WILD 9 0 9 9 +WIDTH 1 0 1 1 +WIDEST 1 0 1 1 +WIDENING 1 0 1 1 +WIDELY 1 0 1 1 +WIDE 9 0 9 9 +WICKET 1 0 1 1 +WICKEDNESS 1 0 1 1 +WICKEDEST 1 0 1 1 +WICKED 3 0 3 3 +WHOM 18 0 18 18 +WHOLESOME 1 0 1 1 +WHOLE 25 0 25 25 +WHOEVER 3 0 3 3 +WHITNEY 1 0 1 1 +WHISTLING 1 0 1 1 +WHISTLE 2 0 2 2 +WHISPERED 7 0 7 7 +WHISPER 1 0 1 1 +WHISKERS 1 0 1 1 +WHISK 1 0 1 1 +WHIRLWIND 3 0 3 3 +WHIM 1 0 1 1 +WHILST 3 0 3 3 +WHEREVER 3 0 3 3 +WHEREUPON 3 0 3 3 +WHEREON 1 0 1 1 +WHEREFORE 1 0 1 1 +WHEREBY 1 0 1 1 +WHENEVER 3 0 3 3 +WHEELING 1 0 1 1 +WHEELER 1 0 1 1 +WHEELED 3 0 3 3 +WHEEL 1 0 1 1 +WHEAT 2 0 2 2 +WHARVES 1 0 1 1 +WETTING 1 0 1 1 +WESTWARD 1 0 1 1 +WESTPORT 2 0 2 2 +WESTERN 1 0 1 1 +WEREN'T 2 0 2 2 +WENT 25 0 25 25 +WELFARE 2 0 2 2 +WEIGHT 2 0 2 2 +WEIGHED 3 0 3 3 +WEIGH 1 0 1 1 +WEEPING 4 0 4 4 +WEEP 1 0 1 1 +WEEKS 4 0 4 4 +WEEK 2 0 2 2 +WEEDS 3 0 3 3 +WEED 1 0 1 1 +WEB 1 0 1 1 +WEASEL 1 0 1 1 +WEARY 1 0 1 1 +WEARING 2 0 2 2 +WEARINESS 2 0 2 2 +WEARILY 2 0 2 2 +WEARERS 1 0 1 1 +WEAPON 2 0 2 2 +WEALTH 5 0 5 5 +WEAKNESS 3 0 3 3 +WEAKENED 2 0 2 2 +WEAK 6 0 6 6 +WAYS 1 0 1 1 +WAX 1 0 1 1 +WAVING 2 0 2 2 +WAVERING 2 0 2 2 +WAVED 1 0 1 1 +WATSON 5 0 5 5 +WATERS 6 0 6 6 +WATERCRESS 1 0 1 1 +WATCHING 1 0 1 1 +WATCHFULNESS 1 0 1 1 +WATCHFUL 1 0 1 1 +WATCHED 7 0 7 7 +WATCH 2 0 2 2 +WASTEFUL 4 0 4 4 +WASTED 2 0 2 2 +WASTE 5 0 5 5 +WASHINGTON 1 0 1 1 +WASHING 1 0 1 1 +WASH 1 0 1 1 +WARY 1 0 1 1 +WARRIORS 2 0 2 2 +WARRENTON 4 0 4 4 +WARRANTED 2 0 2 2 +WARRANT 1 0 1 1 +WARN 1 0 1 1 +WARMEST 1 0 1 1 +WARMED 1 0 1 1 +WARM 4 0 4 4 +WARDS 1 0 1 1 +WARD 1 0 1 1 +WANTS 3 0 3 3 +WANTING 3 0 3 3 +WANTED 8 0 8 8 +WANT 19 0 19 19 +WANDERED 2 0 2 2 +WAN 1 0 1 1 +WALNUT 1 0 1 1 +WALLS 2 0 2 2 +WALL 6 0 6 6 +WALKS 1 0 1 1 +WALKING 2 0 2 2 +WALKETH 1 0 1 1 +WALKED 6 0 6 6 +WALK 5 0 5 5 +WAITING 7 0 7 7 +WAITERS 1 0 1 1 +WAITER 1 0 1 1 +WAITED 1 0 1 1 +WAIT 8 0 8 8 +WAISTCOAT 1 0 1 1 +WAIST 1 0 1 1 +WAILING 1 0 1 1 +WAGED 1 0 1 1 +WADDLING 1 0 1 1 +W 3 0 3 3 +VULGAR 1 0 1 1 +VOYAGING 2 0 2 2 +VOYAGES 1 0 1 1 +VOYAGE 2 0 2 2 +VOWS 1 0 1 1 +VOUCHED 1 0 1 1 +VOTES 1 0 1 1 +VOTERS 1 0 1 1 +VOLUME 1 0 1 1 +VOLTAIRE'S 1 0 1 1 +VIVIDLY 2 0 2 2 +VIVID 2 0 2 2 +VIVE 1 0 1 1 +VIVACITY 1 0 1 1 +VITALITY 1 0 1 1 +VITAL 1 0 1 1 +VISTA 1 0 1 1 +VISITORS 5 0 5 5 +VISITOR 2 0 2 2 +VISITED 4 0 4 4 +VISIT 4 0 4 4 +VISION 2 0 2 2 +VISIBLE 2 0 2 2 +VIRTUOUS 1 0 1 1 +VIRTUE 3 0 3 3 +VIRTUALLY 2 0 2 2 +VIRGINS 1 0 1 1 +VIRGIN 2 0 2 2 +VIPER 2 0 2 2 +VIOLET 1 0 1 1 +VIOLENT 5 0 5 5 +VIOLENCE 5 0 5 5 +VIOLATED 1 0 1 1 +VINEGAR 1 0 1 1 +VINDICATION 1 0 1 1 +VINDICATE 1 0 1 1 +VILLAGE 4 0 4 4 +VILLA 1 0 1 1 +VIKING 3 0 3 3 +VIGOROUSLY 1 0 1 1 +VIGOROUS 1 0 1 1 +VIGILANCE 1 0 1 1 +VIEWED 1 0 1 1 +VIE 1 0 1 1 +VICTUALS 1 0 1 1 +VICTORY 1 0 1 1 +VICTORIES 1 0 1 1 +VICTIM 2 0 2 2 +VICOMTE 1 0 1 1 +VICISSITUDES 1 0 1 1 +VICIOUS 1 0 1 1 +VICINITY 1 0 1 1 +VEXED 1 0 1 1 +VEXATION 1 0 1 1 +VESTURE 1 0 1 1 +VESTIBULE 1 0 1 1 +VESSEL 2 0 2 2 +VERTEBRAL 1 0 1 1 +VERSES 1 0 1 1 +VERSED 3 0 3 3 +VERIFY 1 0 1 1 +VERGE 1 0 1 1 +VENTURED 1 0 1 1 +VENTURE 1 0 1 1 +VENICE 1 0 1 1 +VENGEANCE 2 0 2 2 +VENERABLE 1 0 1 1 +VELVET 1 0 1 1 +VELOCITY 2 0 2 2 +VEILED 2 0 2 2 +VEHICLE 1 0 1 1 +VEHEMENTLY 1 0 1 1 +VAULT 1 0 1 1 +VASTLY 1 0 1 1 +VAST 5 0 5 5 +VASSALS 1 0 1 1 +VARYING 2 0 2 2 +VARIOUS 7 0 7 7 +VARIETY 2 0 2 2 +VARIETIES 1 0 1 1 +VARIED 1 0 1 1 +VARIATIONS 1 0 1 1 +VARIANCE 1 0 1 1 +VANQUISHED 2 0 2 2 +VANITY 1 0 1 1 +VANISHED 2 0 2 2 +VANISH 2 0 2 2 +VALUE 3 0 3 3 +VALUABLE 2 0 2 2 +VALLEYS 2 0 2 2 +VALLEY 4 0 4 4 +VALIANTLY 1 0 1 1 +VALHALLA 1 0 1 1 +VALE 1 0 1 1 +VAINLY 1 0 1 1 +VAIN 1 0 1 1 +VAGUELY 1 0 1 1 +VAGUE 3 0 3 3 +VACUUM 1 0 1 1 +VACANT 1 0 1 1 +UTTERLY 4 0 4 4 +UTTERED 1 0 1 1 +UTTERANCE 1 0 1 1 +UTMOST 3 0 3 3 +USUALLY 4 0 4 4 +USUAL 5 0 5 5 +USING 3 0 3 3 +USELESS 4 0 4 4 +USEFUL 5 0 5 5 +USED 17 0 17 17 +US 60 0 60 60 +URGING 1 0 1 1 +URGED 3 0 3 3 +UPWARDS 1 0 1 1 +UPTOWN 1 0 1 1 +UPRIGHT 1 0 1 1 +UPRAISED 1 0 1 1 +UPPERMOST 1 0 1 1 +UPPER 2 0 2 2 +UPLIFTED 1 0 1 1 +UPHOLSTERED 1 0 1 1 +UPHEAVAL 1 0 1 1 +UPBRAIDED 1 0 1 1 +UNWORTHY 1 0 1 1 +UNWILLING 2 0 2 2 +UNVARNISHED 1 0 1 1 +UNUSUAL 4 0 4 4 +UNTUTORED 1 0 1 1 +UNTRIED 1 0 1 1 +UNTREATED 1 0 1 1 +UNTOUCHED 1 0 1 1 +UNTIL 16 0 16 16 +UNTIDINESS 1 0 1 1 +UNTASTED 1 0 1 1 +UNSUCCESSFUL 1 0 1 1 +UNSEEN 1 0 1 1 +UNSAID 1 0 1 1 +UNREAL 1 0 1 1 +UNPRECEDENTED 1 0 1 1 +UNPOPULAR 1 0 1 1 +UNPLEASANT 3 0 3 3 +UNPERCEIVED 1 0 1 1 +UNPARALLELED 1 0 1 1 +UNOBSERVED 1 0 1 1 +UNNECESSARY 1 0 1 1 +UNNATURAL 1 0 1 1 +UNMOVED 1 0 1 1 +UNLUCKY 2 0 2 2 +UNLUCKILY 1 0 1 1 +UNLOCKED 1 0 1 1 +UNLOCK 1 0 1 1 +UNLOADED 1 0 1 1 +UNLIKELY 1 0 1 1 +UNLESS 5 0 5 5 +UNKNOWN 1 0 1 1 +UNIVERSITY 1 0 1 1 +UNIVERSE 1 0 1 1 +UNIVERSAL 3 0 3 3 +UNITE 1 0 1 1 +UNION 3 0 3 3 +UNINVITED 1 0 1 1 +UNINTELLIGIBLE 1 0 1 1 +UNIFORMS 2 0 2 2 +UNIFORM 1 0 1 1 +UNICORN 1 0 1 1 +UNHEEDED 1 0 1 1 +UNHAPPY 4 0 4 4 +UNHAPPINESS 1 0 1 1 +UNGRACIOUSLY 1 0 1 1 +UNGRACIOUS 1 0 1 1 +UNFORTUNATELY 2 0 2 2 +UNFORTUNATE 1 0 1 1 +UNFOLD 1 0 1 1 +UNFEELING 1 0 1 1 +UNFAMILIAR 1 0 1 1 +UNFAIRLY 1 0 1 1 +UNFAIR 2 0 2 2 +UNFAILING 1 0 1 1 +UNEXPECTEDLY 2 0 2 2 +UNEXPECTED 3 0 3 3 +UNEASY 4 0 4 4 +UNEASINESS 1 0 1 1 +UNEASILY 1 0 1 1 +UNEARTHLY 1 0 1 1 +UNDUE 1 0 1 1 +UNDOUBTEDLY 1 0 1 1 +UNDOING 1 0 1 1 +UNDERWATER 1 0 1 1 +UNDERTONE 1 0 1 1 +UNDERTAKING 2 0 2 2 +UNDERSTOOD 6 0 6 6 +UNDERSTANDING 4 0 4 4 +UNDERSTAND 9 0 9 9 +UNDERSCORE 1 0 1 1 +UNDERNEATH 1 0 1 1 +UNDERMINE 1 0 1 1 +UNDERHANDED 1 0 1 1 +UNDECEIVED 1 0 1 1 +UNCOUTH 1 0 1 1 +UNCOURTEOUS 1 0 1 1 +UNCONTROLLABLE 1 0 1 1 +UNCONSTITUTIONALITY 1 0 1 1 +UNCOMPROMISING 1 0 1 1 +UNCOMFORTABLE 1 0 1 1 +UNCLE 6 0 6 6 +UNCIVIL 1 0 1 1 +UNCHARITABLENESS 1 0 1 1 +UNCHANGED 1 0 1 1 +UNCERTAIN 2 0 2 2 +UNCASING 1 0 1 1 +UNCAS 10 0 10 10 +UNBUTTONING 1 0 1 1 +UNBROKEN 1 0 1 1 +UNBEARABLE 2 0 2 2 +UNAVOIDABLE 1 0 1 1 +UNAVERRED 1 0 1 1 +UNANIMOUSLY 1 0 1 1 +UNANIMOUS 1 0 1 1 +UNAFFECTED 1 0 1 1 +UNACCOUNTABLE 1 0 1 1 +UNABLE 1 0 1 1 +UGLY 3 0 3 3 +TYRANNY 1 0 1 1 +TYPICAL 1 0 1 1 +TYPES 1 0 1 1 +TWIXT 1 0 1 1 +TWIRLING 1 0 1 1 +TWIN 1 0 1 1 +TWILIGHT 2 0 2 2 +TWICE 3 0 3 3 +TWENTY 15 0 15 15 +TWELVE 2 0 2 2 +TWELFTH 1 0 1 1 +TURNS 1 0 1 1 +TURNIPS 1 0 1 1 +TURNING 6 0 6 6 +TURNER 4 0 4 4 +TURF 1 0 1 1 +TUNE 2 0 2 2 +TUMULTUOUS 1 0 1 1 +TUMULT 3 0 3 3 +TUMBLER 1 0 1 1 +TUMBLED 3 0 3 3 +TUFT 1 0 1 1 +TUESDAY 1 0 1 1 +TUCKED 1 0 1 1 +TUBE 1 0 1 1 +TRYING 5 0 5 5 +TRUTH 13 0 13 13 +TRUSTY 1 0 1 1 +TRUSTS 1 0 1 1 +TRUST 5 0 5 5 +TRUNKS 2 0 2 2 +TRUNK 2 0 2 2 +TRUMPETS 1 0 1 1 +TRULY 9 0 9 9 +TRUFFLES 1 0 1 1 +TRUE 21 0 21 21 +TRUDGED 1 0 1 1 +TROUT'S 1 0 1 1 +TROUT 1 0 1 1 +TROUBLESOME 1 0 1 1 +TROUBLES 2 0 2 2 +TROUBLED 4 0 4 4 +TROTTING 1 0 1 1 +TROTTED 1 0 1 1 +TROTH 1 0 1 1 +TROT 5 0 5 5 +TROPHIES 1 0 1 1 +TROOPS 3 0 3 3 +TROOPER'S 1 0 1 1 +TRIUMPHANTLY 1 0 1 1 +TRIUMPHANT 1 0 1 1 +TRIUMPH 3 0 3 3 +TRIPPED 1 0 1 1 +TRINKET 1 0 1 1 +TRIMNESS 1 0 1 1 +TRIM 1 0 1 1 +TRIGGER 1 0 1 1 +TRICKS 2 0 2 2 +TRIBUTE 1 0 1 1 +TRIBES 1 0 1 1 +TRIAL 2 0 2 2 +TREND 2 0 2 2 +TREMULOUSLY 1 0 1 1 +TREMULOUS 1 0 1 1 +TREMOR 1 0 1 1 +TREMENDOUSLY 1 0 1 1 +TREMBLING 5 0 5 5 +TREMBLED 1 0 1 1 +TREMBLE 2 0 2 2 +TREE 35 0 35 35 +TREATY 1 0 1 1 +TREATS 1 0 1 1 +TREATING 1 0 1 1 +TREATED 2 0 2 2 +TREAT 1 0 1 1 +TREASURE 2 0 2 2 +TREAD 1 0 1 1 +TRAY 1 0 1 1 +TRAVESTY 1 0 1 1 +TRAVERSED 1 0 1 1 +TRAVEL 1 0 1 1 +TRASH 1 0 1 1 +TRAP 2 0 2 2 +TRANSPARENT 2 0 2 2 +TRANSLATION 1 0 1 1 +TRANSLATE 1 0 1 1 +TRANSIENT 2 0 2 2 +TRANSFERRED 1 0 1 1 +TRANSCRIPT 1 0 1 1 +TRANQUILLITY 1 0 1 1 +TRANQUIL 1 0 1 1 +TRAINS 1 0 1 1 +TRAINING 3 0 3 3 +TRAINED 1 0 1 1 +TRAIN 1 0 1 1 +TRAFFIC 1 0 1 1 +TRADITIONS 3 0 3 3 +TRADITION 1 0 1 1 +TRADES 1 0 1 1 +TRACK 1 0 1 1 +TRACES 2 0 2 2 +TRACE 1 0 1 1 +TOYS 1 0 1 1 +TOWNS 3 0 3 3 +TOWN 6 0 6 6 +TOWERS 1 0 1 1 +TOWER 1 0 1 1 +TOUR 1 0 1 1 +TOUCHING 1 0 1 1 +TOUCHES 4 0 4 4 +TOUCH 8 0 8 8 +TOTTY 3 0 3 3 +TOTAL 1 0 1 1 +TOSSING 1 0 1 1 +TOSSED 1 0 1 1 +TORTURED 2 0 2 2 +TORTURE 1 0 1 1 +TORTOISE 1 0 1 1 +TORRENT 3 0 3 3 +TORN 1 0 1 1 +TORCH 1 0 1 1 +TOPSAILS 1 0 1 1 +TOPS 4 0 4 4 +TOPMOST 1 0 1 1 +TOPMASTS 1 0 1 1 +TOOTHED 1 0 1 1 +TOOTH 1 0 1 1 +TOOK 33 0 33 33 +TONGUES 1 0 1 1 +TONGUE 8 0 8 8 +TONES 3 0 3 3 +TONED 1 0 1 1 +TONE 5 0 5 5 +TOMMY 1 0 1 1 +TOMB 1 0 1 1 +TOM 4 0 4 4 +TOLERATION 1 0 1 1 +TOLEDANS 1 0 1 1 +TOKEN 2 0 2 2 +TOE 1 0 1 1 +TITLE 3 0 3 3 +TITIAN 1 0 1 1 +TIS 8 0 8 8 +TIRELESS 1 0 1 1 +TIRED 6 0 6 6 +TIRE 1 0 1 1 +TIPTOE 2 0 2 2 +TIPPED 1 0 1 1 +TIP 3 0 3 3 +TINY 3 0 3 3 +TINTS 1 0 1 1 +TINT 1 0 1 1 +TINSEL 1 0 1 1 +TINKLED 1 0 1 1 +TINGLING 1 0 1 1 +TINGE 1 0 1 1 +TIN 1 0 1 1 +TIMES 21 0 21 21 +TIME'S 2 0 2 2 +TIMASCHEFF'S 1 0 1 1 +TILL 8 0 8 8 +TILES 1 0 1 1 +TIGHTLY 1 0 1 1 +TIGHTEN 1 0 1 1 +TIED 2 0 2 2 +TIE 1 0 1 1 +TIDING 1 0 1 1 +TICKET 1 0 1 1 +THWART 1 0 1 1 +THURSTON 2 0 2 2 +THURSDAY 1 0 1 1 +THRUSTING 1 0 1 1 +THRUST 5 0 5 5 +THROWN 4 0 4 4 +THROW 2 0 2 2 +THROUGHOUT 5 0 5 5 +THRONE 4 0 4 4 +THROATS 1 0 1 1 +THROAT 1 0 1 1 +THRIVING 1 0 1 1 +THRILLING 1 0 1 1 +THRILLED 1 0 1 1 +THRILL 1 0 1 1 +THRICE 1 0 1 1 +THREW 5 0 5 5 +THREE 41 0 41 41 +THREATS 1 0 1 1 +THREATENS 2 0 2 2 +THREATENING 3 0 3 3 +THREATENED 1 0 1 1 +THRALLS 2 0 2 2 +THRALL'S 1 0 1 1 +THRALL 2 0 2 2 +THOUSANDS 2 0 2 2 +THOUGHTS 13 0 13 13 +THOUGHTLESS 1 0 1 1 +THOUGHTFUL 1 0 1 1 +THOSE 37 0 37 37 +THOROUGH 1 0 1 1 +THOMAS 1 0 1 1 +THIRTY 12 0 12 12 +THIRTIETH 1 0 1 1 +THIRTEENTH 1 0 1 1 +THIRD 7 0 7 7 +THINKING 8 0 8 8 +THIN 2 0 2 2 +THICKEST 1 0 1 1 +THICK 5 0 5 5 +THEY'VE 1 0 1 1 +THERMOMETER 1 0 1 1 +THEREOF 1 0 1 1 +THEREFORE 20 0 20 20 +THEREAFTER 1 0 1 1 +THEREABOUTS 1 0 1 1 +THEORY 5 0 5 5 +THEORIES 1 0 1 1 +THEORETICAL 1 0 1 1 +THEOLOGY 1 0 1 1 +THENCE 1 0 1 1 +THEMSELVES 12 0 12 12 +THEME 1 0 1 1 +THEIRS 2 0 2 2 +THEFT 4 0 4 4 +THEATRICAL 1 0 1 1 +THEATRES 1 0 1 1 +THANKING 3 0 3 3 +THANKFUL 1 0 1 1 +THANKED 1 0 1 1 +TEXTURES 1 0 1 1 +TEXT 2 0 2 2 +TESTIMONY 1 0 1 1 +TESTIMONIES 1 0 1 1 +TESTED 1 0 1 1 +TEST 2 0 2 2 +TERROR 2 0 2 2 +TERRITORY 2 0 2 2 +TERRITORIAL 4 0 4 4 +TERRIFIED 2 0 2 2 +TERRIFIC 1 0 1 1 +TERRIBLY 2 0 2 2 +TERRIBLE 8 0 8 8 +TERRACED 1 0 1 1 +TERMS 9 0 9 9 +TERM 6 0 6 6 +TENTS 2 0 2 2 +TENT 5 0 5 5 +TENFOLD 2 0 2 2 +TENDERLY 1 0 1 1 +TENDER 3 0 3 3 +TENDED 1 0 1 1 +TEND 2 0 2 2 +TENANTED 1 0 1 1 +TENABILITY 1 0 1 1 +TEN 14 0 14 14 +TEMPTATION 2 0 2 2 +TEMPORARY 2 0 2 2 +TEMPORAL 2 0 2 2 +TEMPLES 1 0 1 1 +TEMPLE 2 0 2 2 +TEMPEST 2 0 2 2 +TEMPERATURE 1 0 1 1 +TEMPER 3 0 3 3 +TELLS 1 0 1 1 +TELLING 3 0 3 3 +TELESCOPE 2 0 2 2 +TEETH 1 0 1 1 +TEDIOUS 2 0 2 2 +TECHNOLOGY 1 0 1 1 +TECHNICAL 2 0 2 2 +TEARS 11 0 11 11 +TEAPOT 1 0 1 1 +TEAL 1 0 1 1 +TEACHING 2 0 2 2 +TEACHERY 1 0 1 1 +TEACHER 3 0 3 3 +TEACH 6 0 6 6 +TAYLOR 7 0 7 7 +TAXED 1 0 1 1 +TAWNY 1 0 1 1 +TAUGHT 5 0 5 5 +TASTE 5 0 5 5 +TASKS 1 0 1 1 +TASK 9 0 9 9 +TARTS 3 0 3 3 +TARRY 1 0 1 1 +TAPESTRY 1 0 1 1 +TAPESTRIES 1 0 1 1 +TAPE 1 0 1 1 +TANKARD 1 0 1 1 +TANGLE 1 0 1 1 +TAN 1 0 1 1 +TAMPERING 1 0 1 1 +TAMPERED 1 0 1 1 +TAMES 1 0 1 1 +TAME 1 0 1 1 +TALONS 1 0 1 1 +TALLOW 1 0 1 1 +TALKS 4 0 4 4 +TALKING 10 0 10 10 +TALKED 1 0 1 1 +TALES 2 0 2 2 +TALENTED 1 0 1 1 +TALENT 5 0 5 5 +TALE 4 0 4 4 +TAKING 7 0 7 7 +TAKES 3 0 3 3 +TAKEN 15 0 15 15 +TAKE 34 0 34 34 +TAILORS 1 0 1 1 +TAIL 3 0 3 3 +TAG 1 0 1 1 +TACT 1 0 1 1 +TACK 1 0 1 1 +TABLES 3 0 3 3 +TABBY'S 1 0 1 1 +TABBY 2 0 2 2 +SYSTEM 8 0 8 8 +SYNONYM 1 0 1 1 +SYMPATHY 3 0 3 3 +SYMPATHETIC 2 0 2 2 +SYLLABLE 1 0 1 1 +SWORDS 1 0 1 1 +SWORD 5 0 5 5 +SWOOPED 1 0 1 1 +SWOLLEN 1 0 1 1 +SWIRLING 1 0 1 1 +SWIRL 1 0 1 1 +SWING 1 0 1 1 +SWIMMING 2 0 2 2 +SWIFTNESS 1 0 1 1 +SWIFTLY 4 0 4 4 +SWIFT 1 0 1 1 +SWELLING 1 0 1 1 +SWELL 1 0 1 1 +SWEETS 1 0 1 1 +SWEETNESS 2 0 2 2 +SWEET 6 0 6 6 +SWEEPING 1 0 1 1 +SWARMING 1 0 1 1 +SWAMP 3 0 3 3 +SWAM 1 0 1 1 +SUSPICIOUS 1 0 1 1 +SUSPENDED 1 0 1 1 +SUSPECT 2 0 2 2 +SURVIVE 2 0 2 2 +SURVEYOR 1 0 1 1 +SURVEYED 1 0 1 1 +SURROUNDINGS 1 0 1 1 +SURROUNDING 2 0 2 2 +SURROUNDED 2 0 2 2 +SURRENDER 2 0 2 2 +SURPRISED 6 0 6 6 +SURPRISE 4 0 4 4 +SURPASSED 1 0 1 1 +SURMISED 1 0 1 1 +SURGEON 1 0 1 1 +SURGE 1 0 1 1 +SURFACE 8 0 8 8 +SURELY 5 0 5 5 +SURE 16 0 16 16 +SURCHARGED 1 0 1 1 +SUPREME 2 0 2 2 +SUPPRESSING 1 0 1 1 +SUPPRESSED 2 0 2 2 +SUPPOSITION 1 0 1 1 +SUPPOSING 2 0 2 2 +SUPPOSES 1 0 1 1 +SUPPOSED 3 0 3 3 +SUPPOSE 19 0 19 19 +SUPPORTS 1 0 1 1 +SUPPORTING 2 0 2 2 +SUPPORTED 2 0 2 2 +SUPPORT 2 0 2 2 +SUPPLYING 1 0 1 1 +SUPPLY 1 0 1 1 +SUPPLIES 1 0 1 1 +SUPPER 7 0 7 7 +SUPERIORITY 1 0 1 1 +SUPERIOR 8 0 8 8 +SUPERINTENDENCE 1 0 1 1 +SUPERFLUITIES 1 0 1 1 +SUNSHINE 3 0 3 3 +SUNSETS 1 0 1 1 +SUNSET 1 0 1 1 +SUNRISE 1 0 1 1 +SUNNY 1 0 1 1 +SUNLIGHT 2 0 2 2 +SUNK 1 0 1 1 +SUNG 2 0 2 2 +SUNDAY 2 0 2 2 +SUNBEAMS 1 0 1 1 +SUN 15 0 15 15 +SUMNER 1 0 1 1 +SUMMONS 2 0 2 2 +SUMMIT 1 0 1 1 +SUMMER 6 0 6 6 +SUMMARY 1 0 1 1 +SULLIED 1 0 1 1 +SUITS 1 0 1 1 +SUITED 1 0 1 1 +SUITABLE 2 0 2 2 +SUGGESTIONS 1 0 1 1 +SUGGESTION 1 0 1 1 +SUGGESTED 3 0 3 3 +SUGGEST 1 0 1 1 +SUGAR 1 0 1 1 +SUFFOCATING 1 0 1 1 +SUFFICIENTLY 1 0 1 1 +SUFFICIENT 3 0 3 3 +SUFFICED 1 0 1 1 +SUFFICE 1 0 1 1 +SUFFERINGS 2 0 2 2 +SUFFERING 2 0 2 2 +SUFFERED 3 0 3 3 +SUFFER 5 0 5 5 +SUDDENLY 15 0 15 15 +SUDDEN 7 0 7 7 +SUCKLING 1 0 1 1 +SUCH 67 0 67 67 +SUCCESSION 3 0 3 3 +SUCCESSFUL 3 0 3 3 +SUCCESS 9 0 9 9 +SUCCEEDED 3 0 3 3 +SUCCEED 1 0 1 1 +SUBURB 1 0 1 1 +SUBTLETIES 1 0 1 1 +SUBSTITUTION 1 0 1 1 +SUBSTITUTED 1 0 1 1 +SUBSTANTIALLY 1 0 1 1 +SUBSTANTIAL 3 0 3 3 +SUBSTANCE 3 0 3 3 +SUBSISTENCE 1 0 1 1 +SUBSIDED 1 0 1 1 +SUBSCRIBE 1 0 1 1 +SUBORDINATION 1 0 1 1 +SUBMITTED 1 0 1 1 +SUBMIT 2 0 2 2 +SUBMISSIVELY 1 0 1 1 +SUBMARINE 3 0 3 3 +SUBJECTS 3 0 3 3 +SUBJECTIVELY 1 0 1 1 +SUBJECTED 1 0 1 1 +SUBJECT 16 0 16 16 +SUBDUING 1 0 1 1 +SUBDUED 2 0 2 2 +STUTELEY 4 0 4 4 +STURDY 1 0 1 1 +STUPID 4 0 4 4 +STUPEFIED 2 0 2 2 +STUNG 1 0 1 1 +STUMPED 1 0 1 1 +STUMP 1 0 1 1 +STUFFED 3 0 3 3 +STUFF 1 0 1 1 +STUDYING 2 0 2 2 +STUDIOUS 2 0 2 2 +STUDIES 1 0 1 1 +STUDENTS 3 0 3 3 +STUDENT 2 0 2 2 +STUCCO 1 0 1 1 +STRUGGLES 1 0 1 1 +STRUGGLED 1 0 1 1 +STRUGGLE 6 0 6 6 +STRUCTURE 2 0 2 2 +STRUCK 4 0 4 4 +STROVE 2 0 2 2 +STRONGLY 2 0 2 2 +STRONGHOLD 1 0 1 1 +STRONGEST 2 0 2 2 +STRONGER 1 0 1 1 +STRONG 13 0 13 13 +STROLLERS 1 0 1 1 +STROLLER'S 1 0 1 1 +STROLLER 3 0 3 3 +STROLL 3 0 3 3 +STROKE 1 0 1 1 +STRIVING 1 0 1 1 +STRIVE 3 0 3 3 +STRIPPING 1 0 1 1 +STRIPPED 1 0 1 1 +STRIPLING 1 0 1 1 +STRIKING 1 0 1 1 +STRIKE 3 0 3 3 +STRIFE 1 0 1 1 +STRICTLY 1 0 1 1 +STRICTEST 1 0 1 1 +STRICT 2 0 2 2 +STRETCHING 1 0 1 1 +STRETCHED 1 0 1 1 +STRETCH 1 0 1 1 +STRENUOUS 1 0 1 1 +STRENGTHENING 1 0 1 1 +STRENGTHENED 2 0 2 2 +STRENGTH 7 0 7 7 +STREETS 1 0 1 1 +STREET 14 0 14 14 +STREAKED 1 0 1 1 +STRAWBERRIES 1 0 1 1 +STRAW 1 0 1 1 +STRANGERS 2 0 2 2 +STRANGER 1 0 1 1 +STRANGELY 2 0 2 2 +STRANGE 12 0 12 12 +STRAITS 1 0 1 1 +STRAINED 1 0 1 1 +STRAIN 1 0 1 1 +STRAIGHTWAY 2 0 2 2 +STORY 25 0 25 25 +STORMY 1 0 1 1 +STORMS 1 0 1 1 +STORM 3 0 3 3 +STORES 1 0 1 1 +STORAGE 1 0 1 1 +STOPPING 2 0 2 2 +STOPPED 6 0 6 6 +STOP 8 0 8 8 +STOOPED 1 0 1 1 +STOOP 1 0 1 1 +STOOL 2 0 2 2 +STOOD 22 0 22 22 +STONES 3 0 3 3 +STONE 3 0 3 3 +STOLEN 2 0 2 2 +STOICAL 1 0 1 1 +STOCKINGS 1 0 1 1 +STOCK 2 0 2 2 +STIRS 1 0 1 1 +STIRRED 1 0 1 1 +STIR 1 0 1 1 +STINGY 2 0 2 2 +STING 1 0 1 1 +STIMULANTS 1 0 1 1 +STILLNESS 1 0 1 1 +STILL 55 0 55 55 +STIFLING 1 0 1 1 +STIFFNESS 1 0 1 1 +STIFF 1 0 1 1 +STICKS 1 0 1 1 +STICKING 1 0 1 1 +STICK 1 0 1 1 +STEW 1 0 1 1 +STERNEST 1 0 1 1 +STERN 2 0 2 2 +STEPS 1 0 1 1 +STEPPED 1 0 1 1 +STEPHEN'S 1 0 1 1 +STEPHEN 2 0 2 2 +STEM 1 0 1 1 +STEEP 1 0 1 1 +STEEL 1 0 1 1 +STEAMING 1 0 1 1 +STEAMED 1 0 1 1 +STEAMBOAT 1 0 1 1 +STEAM 1 0 1 1 +STEAL 1 0 1 1 +STEADILY 2 0 2 2 +STEAD 1 0 1 1 +STAYS 1 0 1 1 +STAYED 2 0 2 2 +STAY 11 0 11 11 +STATUS 1 0 1 1 +STATUARY 1 0 1 1 +STATIONS 2 0 2 2 +STATESMAN 1 0 1 1 +STATEMENT 3 0 3 3 +STATELY 1 0 1 1 +STATE'S 2 0 2 2 +STARVED 1 0 1 1 +STARTLING 1 0 1 1 +STARTLED 2 0 2 2 +STARTING 2 0 2 2 +STARTED 9 0 9 9 +STARS 1 0 1 1 +STARLIT 1 0 1 1 +STARING 1 0 1 1 +STARED 1 0 1 1 +STAR 2 0 2 2 +STANLEY 1 0 1 1 +STANDING 8 0 8 8 +STANDARD 4 0 4 4 +STAMPING 1 0 1 1 +STAMPED 1 0 1 1 +STALKS 1 0 1 1 +STAKES 1 0 1 1 +STAKE 1 0 1 1 +STAIRCASE 1 0 1 1 +STAINED 1 0 1 1 +STAIN 1 0 1 1 +STAID 1 0 1 1 +STAGES 2 0 2 2 +STAGECRAFT 1 0 1 1 +STAFF 1 0 1 1 +STACKED 1 0 1 1 +STABLE 1 0 1 1 +SQUIRE'S 3 0 3 3 +SQUIRE 8 0 8 8 +SQUEEZE 1 0 1 1 +SQUEAK 2 0 2 2 +SQUARES 2 0 2 2 +SQUARE 2 0 2 2 +SQUALOR 1 0 1 1 +SQUALID 1 0 1 1 +SPUR 1 0 1 1 +SPRUNG 2 0 2 2 +SPRINKLING 1 0 1 1 +SPRINKLED 1 0 1 1 +SPRINGY 1 0 1 1 +SPRINGS 3 0 3 3 +SPRINGING 1 0 1 1 +SPREADS 1 0 1 1 +SPREAD 5 0 5 5 +SPRAGUE 1 0 1 1 +SPOTLESS 1 0 1 1 +SPOT 4 0 4 4 +SPORTING 1 0 1 1 +SPOON 1 0 1 1 +SPOKEN 11 0 11 11 +SPOKE 15 0 15 15 +SPOILS 2 0 2 2 +SPLENDORS 1 0 1 1 +SPLENDIDLY 2 0 2 2 +SPLASHES 1 0 1 1 +SPITE 2 0 2 2 +SPIRITUAL 4 0 4 4 +SPIRITS 3 0 3 3 +SPIRIT 11 0 11 11 +SPINNING 4 0 4 4 +SPIKES 1 0 1 1 +SPIDER 1 0 1 1 +SPICY 1 0 1 1 +SPERM 1 0 1 1 +SPENT 5 0 5 5 +SPENDING 1 0 1 1 +SPELL 1 0 1 1 +SPEEDS 1 0 1 1 +SPEED 3 0 3 3 +SPEECHLESS 1 0 1 1 +SPEECH 6 0 6 6 +SPED 2 0 2 2 +SPECULATE 1 0 1 1 +SPECTATORS 1 0 1 1 +SPECKS 1 0 1 1 +SPECIOUS 1 0 1 1 +SPECIFICATIONS 1 0 1 1 +SPECIFIC 1 0 1 1 +SPECIES 3 0 3 3 +SPECIALTY 1 0 1 1 +SPECIALLY 2 0 2 2 +SPEAR 1 0 1 1 +SPEAKS 1 0 1 1 +SPEAKING 10 0 10 10 +SPEAK 15 0 15 15 +SPASM 1 0 1 1 +SPARKS 1 0 1 1 +SPARKLING 3 0 3 3 +SPARKLES 2 0 2 2 +SPARKLED 1 0 1 1 +SPARK 1 0 1 1 +SPARE 3 0 3 3 +SPACE 5 0 5 5 +SOUTHERNERS 2 0 2 2 +SOUTHBRIDGE 1 0 1 1 +SOUTH 7 0 7 7 +SOURCE 1 0 1 1 +SOUP 1 0 1 1 +SOUNDING 3 0 3 3 +SOUNDED 2 0 2 2 +SOUND 7 0 7 7 +SOULS 4 0 4 4 +SOUL'S 1 0 1 1 +SOUL 8 0 8 8 +SOUGHT 6 0 6 6 +SORTS 2 0 2 2 +SORT 8 0 8 8 +SORROWS 1 0 1 1 +SORROWFULLY 1 0 1 1 +SORROWFUL 1 0 1 1 +SORROW 5 0 5 5 +SORREL 1 0 1 1 +SORE 1 0 1 1 +SORCERESS 1 0 1 1 +SOOTHINGLY 1 0 1 1 +SOOTH 1 0 1 1 +SOON 28 0 28 28 +SONS 3 0 3 3 +SONOROUS 1 0 1 1 +SONG 2 0 2 2 +SOMEWHERE 6 0 6 6 +SOMEWHAT 5 0 5 5 +SOMETIMES 18 0 18 18 +SOMEHOW 6 0 6 6 +SOMEBODY 3 0 3 3 +SOMBRE 1 0 1 1 +SOLVED 2 0 2 2 +SOLUTION 1 0 1 1 +SOLEMNITY 1 0 1 1 +SOLELY 1 0 1 1 +SOLE 3 0 3 3 +SOLDIERS 6 0 6 6 +SOLD 4 0 4 4 +SOIL 2 0 2 2 +SOFTNESS 2 0 2 2 +SOFTLY 4 0 4 4 +SOFTENED 1 0 1 1 +SOFT 7 0 7 7 +SOFAS 1 0 1 1 +SODALITY 1 0 1 1 +SOCRATES 2 0 2 2 +SOCKS 1 0 1 1 +SOCIETY 7 0 7 7 +SOCIETIES 1 0 1 1 +SOCIAL 8 0 8 8 +SOCIABLE 1 0 1 1 +SOBS 1 0 1 1 +SOARED 1 0 1 1 +SOAR 1 0 1 1 +SNUFFED 1 0 1 1 +SNUFF 4 0 4 4 +SNUBNOSED 1 0 1 1 +SNOW 1 0 1 1 +SNORED 1 0 1 1 +SNEER 1 0 1 1 +SNATCHED 1 0 1 1 +SNATCH 1 0 1 1 +SMUGGLING 1 0 1 1 +SMOOTHER 1 0 1 1 +SMOOTH 1 0 1 1 +SMOKING 1 0 1 1 +SMOKE 5 0 5 5 +SMITTEN 2 0 2 2 +SMITH 2 0 2 2 +SMILING 3 0 3 3 +SMILES 3 0 3 3 +SMILE 12 0 12 12 +SMELL 3 0 3 3 +SMARTLY 1 0 1 1 +SMART 1 0 1 1 +SMALLEST 2 0 2 2 +SMALLER 1 0 1 1 +SMALL 20 0 20 20 +SLY 2 0 2 2 +SLUNK 1 0 1 1 +SLUMS 1 0 1 1 +SLUMBERS 1 0 1 1 +SLOWLY 14 0 14 14 +SLOW 4 0 4 4 +SLOPING 1 0 1 1 +SLIPS 1 0 1 1 +SLIPPING 1 0 1 1 +SLIPPED 4 0 4 4 +SLINGS 1 0 1 1 +SLIMY 1 0 1 1 +SLIMLY 1 0 1 1 +SLIGHTLY 4 0 4 4 +SLIGHTEST 1 0 1 1 +SLIGHTER 1 0 1 1 +SLIGHT 4 0 4 4 +SLEPT 2 0 2 2 +SLENDER 2 0 2 2 +SLEEPING 2 0 2 2 +SLEEK 2 0 2 2 +SLAVES 1 0 1 1 +SLAVERY 3 0 3 3 +SLAVE 1 0 1 1 +SLATED 1 0 1 1 +SLAP 1 0 1 1 +SLANDERER 1 0 1 1 +SLAM 1 0 1 1 +SLAKED 1 0 1 1 +SKY 5 0 5 5 +SKIRTS 1 0 1 1 +SKIRT 1 0 1 1 +SKIRMISHES 1 0 1 1 +SKIP 1 0 1 1 +SKINNER 1 0 1 1 +SKINNED 1 0 1 1 +SKIN 3 0 3 3 +SKIMS 1 0 1 1 +SKILL 2 0 2 2 +SKETCHES 1 0 1 1 +SKETCH 1 0 1 1 +SKEPTICISM 1 0 1 1 +SKELETON 1 0 1 1 +SIZZLE 1 0 1 1 +SIXTY 3 0 3 3 +SIXTH 1 0 1 1 +SIXTEENTH 4 0 4 4 +SIXTEEN 1 0 1 1 +SIX 14 0 14 14 +SITUATION 2 0 2 2 +SITE 1 0 1 1 +SISTERS 5 0 5 5 +SISTER'S 1 0 1 1 +SISTER 8 0 8 8 +SIRE 4 0 4 4 +SINNER 2 0 2 2 +SINKS 1 0 1 1 +SINK 1 0 1 1 +SINGS 1 0 1 1 +SINGLED 1 0 1 1 +SINGLE 5 0 5 5 +SINGING 2 0 2 2 +SINGER'S 1 0 1 1 +SINGER 2 0 2 2 +SING 2 0 2 2 +SINFUL 2 0 2 2 +SIMPLY 10 0 10 10 +SIMPLIFIED 1 0 1 1 +SIMPLICITY 2 0 2 2 +SIMPLE 9 0 9 9 +SIMON 1 0 1 1 +SIMILITUDE 1 0 1 1 +SIMILARLY 1 0 1 1 +SIMILAR 3 0 3 3 +SILVERING 1 0 1 1 +SILVER 8 0 8 8 +SILLINESS 2 0 2 2 +SILKEN 2 0 2 2 +SILK 6 0 6 6 +SILHOUETTE 1 0 1 1 +SILENCES 1 0 1 1 +SILAS 1 0 1 1 +SIGNS 4 0 4 4 +SIGNING 1 0 1 1 +SIGNIFICANTLY 1 0 1 1 +SIGNIFICANCE 3 0 3 3 +SIGNED 1 0 1 1 +SIGNATURE 1 0 1 1 +SIGN 5 0 5 5 +SIDEWAYS 1 0 1 1 +SIDES 6 0 6 6 +SICKNESS 2 0 2 2 +SHY 1 0 1 1 +SHUTTERS 1 0 1 1 +SHUT 3 0 3 3 +SHUNNING 1 0 1 1 +SHUDDER 2 0 2 2 +SHRUGGED 1 0 1 1 +SHRUBBERY 1 0 1 1 +SHRIVELLED 1 0 1 1 +SHRINE 1 0 1 1 +SHRILL 1 0 1 1 +SHRIEKED 1 0 1 1 +SHREWISH 1 0 1 1 +SHREWDLY 1 0 1 1 +SHREWD 1 0 1 1 +SHOWN 1 0 1 1 +SHOWERED 1 0 1 1 +SHOWER 1 0 1 1 +SHOW 10 0 10 10 +SHOUTINGS 1 0 1 1 +SHOUTING 1 0 1 1 +SHOUTED 3 0 3 3 +SHOUT 3 0 3 3 +SHOULDN'T 1 0 1 1 +SHOULDERS 5 0 5 5 +SHOULDER 5 0 5 5 +SHORTLY 1 0 1 1 +SHORT 11 0 11 11 +SHORES 1 0 1 1 +SHORE 4 0 4 4 +SHOPS 1 0 1 1 +SHOP 2 0 2 2 +SHOOTING 1 0 1 1 +SHOOT 1 0 1 1 +SHOOK 10 0 10 10 +SHONE 7 0 7 7 +SHOES 3 0 3 3 +SHOCK 4 0 4 4 +SHOAL 1 0 1 1 +SHIVERING 1 0 1 1 +SHIVER 2 0 2 2 +SHIRTS 1 0 1 1 +SHIRK 1 0 1 1 +SHIPS 2 0 2 2 +SHIPPING 1 0 1 1 +SHINING 4 0 4 4 +SHINES 2 0 2 2 +SHINE 2 0 2 2 +SHIFTED 1 0 1 1 +SHIELD 1 0 1 1 +SHERWOOD 1 0 1 1 +SHERIFF'S 3 0 3 3 +SHERIFF 4 0 4 4 +SHEPHERD 2 0 2 2 +SHELVES 1 0 1 1 +SHELTER 1 0 1 1 +SHELLEY'S 1 0 1 1 +SHEETING 1 0 1 1 +SHEET 4 0 4 4 +SHEEP 1 0 1 1 +SHEAF 1 0 1 1 +SHAWL 1 0 1 1 +SHAVINGS 1 0 1 1 +SHAVEN 1 0 1 1 +SHARPLY 2 0 2 2 +SHARPENED 1 0 1 1 +SHARP 8 0 8 8 +SHARING 1 0 1 1 +SHARED 1 0 1 1 +SHARE 1 0 1 1 +SHAPED 1 0 1 1 +SHAPE 1 0 1 1 +SHANNON 3 0 3 3 +SHAN'T 1 0 1 1 +SHAMES 1 0 1 1 +SHAM 1 0 1 1 +SHALLOWS 1 0 1 1 +SHALLOW 2 0 2 2 +SHAKING 2 0 2 2 +SHAGGY 2 0 2 2 +SHAFT 1 0 1 1 +SHADY 2 0 2 2 +SHADOWS 7 0 7 7 +SHADOW 2 0 2 2 +SHADES 1 0 1 1 +SHADE 4 0 4 4 +SHACKLETON 1 0 1 1 +SEXTANT 1 0 1 1 +SEX 2 0 2 2 +SEWING 1 0 1 1 +SEWED 1 0 1 1 +SEVERITY 4 0 4 4 +SEVERITIES 1 0 1 1 +SEVERED 2 0 2 2 +SEVERE 1 0 1 1 +SEVERAL 9 0 9 9 +SEVENTY 2 0 2 2 +SEVENTH 1 0 1 1 +SEVENTEEN 2 0 2 2 +SEVEN 6 0 6 6 +SETTLERS 1 0 1 1 +SETTLER 1 0 1 1 +SETTLEMENTS 1 0 1 1 +SETTLEMENT 3 0 3 3 +SETTLED 1 0 1 1 +SETTLE 3 0 3 3 +SETH 1 0 1 1 +SESSION 1 0 1 1 +SERVITUDE 1 0 1 1 +SERVILE 1 0 1 1 +SERVICE 12 0 12 12 +SERVADAC'S 1 0 1 1 +SERVADAC 7 0 7 7 +SERIOUSLY 4 0 4 4 +SERIOUS 3 0 3 3 +SERIES 3 0 3 3 +SERENE 1 0 1 1 +SERAPHIC 1 0 1 1 +SEQUEL 1 0 1 1 +SEPARATION 1 0 1 1 +SENTIMENTS 1 0 1 1 +SENTIMENTAL 1 0 1 1 +SENTIMENT 1 0 1 1 +SENTENTIOUSLY 1 0 1 1 +SENSITIVE 1 0 1 1 +SENSIBLE 1 0 1 1 +SENSES 2 0 2 2 +SENSELESS 1 0 1 1 +SENSE 16 0 16 16 +SENSATIONS 1 0 1 1 +SENSATIONAL 1 0 1 1 +SENSATION 2 0 2 2 +SENORA 1 0 1 1 +SENOR 1 0 1 1 +SENIOR 1 0 1 1 +SEND 3 0 3 3 +SENATOR 1 0 1 1 +SELLING 2 0 2 2 +SELF 5 0 5 5 +SELECTION 2 0 2 2 +SELECTED 1 0 1 1 +SELDOM 3 0 3 3 +SEIZING 2 0 2 2 +SEIZE 1 0 1 1 +SEGMENT 1 0 1 1 +SEES 1 0 1 1 +SEEMINGLY 3 0 3 3 +SEEKERS 1 0 1 1 +SEEK 4 0 4 4 +SEEING 12 0 12 12 +SEEDS 1 0 1 1 +SEED 2 0 2 2 +SECURITY 2 0 2 2 +SECURING 1 0 1 1 +SECURED 3 0 3 3 +SECURE 4 0 4 4 +SECTS 1 0 1 1 +SECRETLY 3 0 3 3 +SECRET 3 0 3 3 +SECRECY 1 0 1 1 +SECONDS 2 0 2 2 +SECONDLY 1 0 1 1 +SECONDED 1 0 1 1 +SECONDARY 1 0 1 1 +SECOND 10 0 10 10 +SECLUSION 1 0 1 1 +SECESSIONISTS 1 0 1 1 +SEATS 3 0 3 3 +SEATED 3 0 3 3 +SEASONS 2 0 2 2 +SEASONABLE 1 0 1 1 +SEASON 3 0 3 3 +SEAS 2 0 2 2 +SEARCHING 1 0 1 1 +SEARCHED 2 0 2 2 +SEARCH 4 0 4 4 +SCURRIED 1 0 1 1 +SCULPTURE 1 0 1 1 +SCRUTINY 1 0 1 1 +SCRUTINIZE 1 0 1 1 +SCRUPLES 1 0 1 1 +SCRUB 1 0 1 1 +SCRIPTURES 1 0 1 1 +SCRIBE 1 0 1 1 +SCRIBBLING 1 0 1 1 +SCRIBBLER 1 0 1 1 +SCREEN 1 0 1 1 +SCREAMED 3 0 3 3 +SCRAMBLED 1 0 1 1 +SCOWLED 1 0 1 1 +SCOTTISH 2 0 2 2 +SCOTS 1 0 1 1 +SCOTLAND 1 0 1 1 +SCORPION 1 0 1 1 +SCORNFUL 2 0 2 2 +SCORE 2 0 2 2 +SCORCHED 1 0 1 1 +SCOPE 1 0 1 1 +SCOLD 1 0 1 1 +SCIENTISTS 2 0 2 2 +SCIENTIST 1 0 1 1 +SCIENTIFIC 1 0 1 1 +SCIENCE 2 0 2 2 +SCHOONER 2 0 2 2 +SCHOOLROOM 1 0 1 1 +SCHOOLED 1 0 1 1 +SCHOLARSHIP 1 0 1 1 +SCHOLAR 1 0 1 1 +SCHISM 1 0 1 1 +SCHEME 5 0 5 5 +SCEPTICISM 1 0 1 1 +SCENES 1 0 1 1 +SCATTERS 1 0 1 1 +SCATTERED 3 0 3 3 +SCATTER 1 0 1 1 +SCARLET 3 0 3 3 +SCARE 1 0 1 1 +SCARCELY 9 0 9 9 +SCARCE 1 0 1 1 +SCALP 1 0 1 1 +SCALE 2 0 2 2 +SAYS 12 0 12 12 +SAY 51 0 51 51 +SAWDUST 1 0 1 1 +SAVIOUR 1 0 1 1 +SAVINGS 1 0 1 1 +SAVING 1 0 1 1 +SAVES 1 0 1 1 +SAVAGE 5 0 5 5 +SAUNTERED 1 0 1 1 +SAUCE 1 0 1 1 +SATURDAY 5 0 5 5 +SATISFY 1 0 1 1 +SATISFIED 8 0 8 8 +SATISFACTION 5 0 5 5 +SATANIC 1 0 1 1 +SARCASTIC 1 0 1 1 +SARAH'S 1 0 1 1 +SANK 1 0 1 1 +SANGUINARY 1 0 1 1 +SANDY 2 0 2 2 +SANDWICHES 1 0 1 1 +SANDFORD 1 0 1 1 +SANCTIFYING 1 0 1 1 +SANCTIFIED 1 0 1 1 +SAMPLE 1 0 1 1 +SAME 35 0 35 35 +SALVATION 1 0 1 1 +SALUTE 1 0 1 1 +SALUTATION 1 0 1 1 +SALON 1 0 1 1 +SAKE 4 0 4 4 +SAINTS 5 0 5 5 +SAILS 2 0 2 2 +SAILORS 1 0 1 1 +SAILORMAN 1 0 1 1 +SAILOR 1 0 1 1 +SAFETY 2 0 2 2 +SAFEST 1 0 1 1 +SAFE 5 0 5 5 +SADLY 2 0 2 2 +SADDLER 1 0 1 1 +SADDLE 1 0 1 1 +SAD 3 0 3 3 +SACRIFICE 2 0 2 2 +SACRED 2 0 2 2 +SACRAMENT 1 0 1 1 +S 1 0 1 1 +RUTH 10 0 10 10 +RUSTY 1 0 1 1 +RUSTLING 2 0 2 2 +RUSTLED 1 0 1 1 +RUST 1 0 1 1 +RUSHING 1 0 1 1 +RUSHED 7 0 7 7 +RUSH 4 0 4 4 +RUNS 2 0 2 2 +RUNNING 8 0 8 8 +RUN 9 0 9 9 +RUMMAGED 1 0 1 1 +RUMINATED 1 0 1 1 +RULER 1 0 1 1 +RULED 1 0 1 1 +RULE 2 0 2 2 +RUINS 1 0 1 1 +RUINED 1 0 1 1 +RUIN 2 0 2 2 +RUFFLED 1 0 1 1 +RUFFIANS 1 0 1 1 +RUFFIAN 2 0 2 2 +RUDELY 1 0 1 1 +RUDE 2 0 2 2 +RUBY 1 0 1 1 +RUBBING 1 0 1 1 +RUBBED 1 0 1 1 +ROYALISTS 2 0 2 2 +ROYAL 9 0 9 9 +ROWS 2 0 2 2 +ROW 5 0 5 5 +ROVING 1 0 1 1 +ROUTINE 1 0 1 1 +ROUSES 1 0 1 1 +ROUSED 1 0 1 1 +ROUSE 1 0 1 1 +ROUNDED 1 0 1 1 +ROUGHLY 7 0 7 7 +ROUGHEST 1 0 1 1 +ROUGH 3 0 3 3 +ROSES 2 0 2 2 +ROSE 14 0 14 14 +ROSALIE 4 0 4 4 +ROOMS 3 0 3 3 +ROOFS 1 0 1 1 +ROMANCE 1 0 1 1 +ROMAN 1 0 1 1 +ROLLERS 2 0 2 2 +ROLLED 3 0 3 3 +ROLL 1 0 1 1 +ROD 1 0 1 1 +ROCKY 1 0 1 1 +ROCKS 2 0 2 2 +ROCKING 2 0 2 2 +ROBUST 1 0 1 1 +ROBINSON 1 0 1 1 +ROBIN 19 0 19 19 +ROBERT 2 0 2 2 +ROBBING 1 0 1 1 +ROBBER 1 0 1 1 +ROARINGS 1 0 1 1 +ROARED 1 0 1 1 +ROADS 1 0 1 1 +ROAD 4 0 4 4 +RIVULET 2 0 2 2 +RIVER 6 0 6 6 +RIVAL 2 0 2 2 +RISK 2 0 2 2 +RISING 3 0 3 3 +RISEN 1 0 1 1 +RISE 1 0 1 1 +RIPPLING 1 0 1 1 +RIOTING 2 0 2 2 +RIOT 1 0 1 1 +RINGS 1 0 1 1 +RING 3 0 3 3 +RIGOROUSLY 1 0 1 1 +RIGOROUS 1 0 1 1 +RIGIDLY 1 0 1 1 +RIGIDITY 2 0 2 2 +RIGID 1 0 1 1 +RIGHTS 1 0 1 1 +RIGHTLY 1 0 1 1 +RIGHTEOUSNESS 1 0 1 1 +RIGHTEOUS 1 0 1 1 +RIGGING 1 0 1 1 +RIFLES 1 0 1 1 +RIDGE 1 0 1 1 +RIDES 1 0 1 1 +RIDE 1 0 1 1 +RIDDLE 1 0 1 1 +RICHLY 1 0 1 1 +RICHEST 1 0 1 1 +RICHER 1 0 1 1 +RICH 11 0 11 11 +RHYTHM 1 0 1 1 +REYNOLDS 2 0 2 2 +REWARDED 2 0 2 2 +REWARD 3 0 3 3 +REVOLVING 1 0 1 1 +REVIVES 1 0 1 1 +REVERT 1 0 1 1 +REVERSED 1 0 1 1 +REVERSAL 1 0 1 1 +REVERIE 2 0 2 2 +REVEREND 1 0 1 1 +REVERENCE 1 0 1 1 +REVENGE 3 0 3 3 +REVELATION 1 0 1 1 +REVEAL 1 0 1 1 +RETURNING 1 0 1 1 +RETURNED 18 0 18 18 +RETURN 9 0 9 9 +RETRIEVE 1 0 1 1 +RETREATED 1 0 1 1 +RETREAT 3 0 3 3 +RETRACE 1 0 1 1 +RETIREMENT 2 0 2 2 +RETIRED 2 0 2 2 +RETIRE 1 0 1 1 +RETAINERS 1 0 1 1 +RETAINER 1 0 1 1 +RETAINED 3 0 3 3 +RETAIN 1 0 1 1 +RESURRECTION 3 0 3 3 +RESUMED 2 0 2 2 +RESULTS 4 0 4 4 +RESULTED 3 0 3 3 +RESULT 5 0 5 5 +RESTRAINED 1 0 1 1 +RESTORING 1 0 1 1 +RESTORED 2 0 2 2 +RESTLESS 4 0 4 4 +RESTED 2 0 2 2 +RESPONSIBLE 2 0 2 2 +RESPONSES 1 0 1 1 +RESPONDED 3 0 3 3 +RESPECTS 1 0 1 1 +RESPECTING 1 0 1 1 +RESPECT 3 0 3 3 +RESOURCES 2 0 2 2 +RESOUNDING 1 0 1 1 +RESORT 2 0 2 2 +RESOLVED 3 0 3 3 +RESOLVE 2 0 2 2 +RESOLUTIONS 1 0 1 1 +RESOLUTION 1 0 1 1 +RESOLUTE 1 0 1 1 +RESISTANCE 1 0 1 1 +RESIST 1 0 1 1 +RESIGNED 2 0 2 2 +RESIGNATION 2 0 2 2 +RESIDENCES 1 0 1 1 +RESIDENCE 3 0 3 3 +RESIDE 1 0 1 1 +RESERVOIR 1 0 1 1 +RESENTFUL 1 0 1 1 +RESENTED 1 0 1 1 +RESEMBLING 2 0 2 2 +RESEMBLED 1 0 1 1 +RESEMBLE 1 0 1 1 +RESEMBLANCE 2 0 2 2 +RESCUE 2 0 2 2 +REQUISITION 1 0 1 1 +REQUIRING 1 0 1 1 +REQUIREMENTS 1 0 1 1 +REQUIRED 3 0 3 3 +REQUIRE 4 0 4 4 +REQUEST 1 0 1 1 +REPUTE 2 0 2 2 +REPUTATION 1 0 1 1 +REPUGNANT 1 0 1 1 +REPUBLISH 1 0 1 1 +REPUBLICAN 1 0 1 1 +REPUBLIC 5 0 5 5 +REPTILES 2 0 2 2 +REPROOF 1 0 1 1 +REPRODUCE 1 0 1 1 +REPROACHING 1 0 1 1 +REPROACHFULLY 1 0 1 1 +REPROACH 2 0 2 2 +REPRESS 2 0 2 2 +REPRESENTS 1 0 1 1 +REPRESENTING 1 0 1 1 +REPRESENTATIVE 1 0 1 1 +REPRESENTATION 1 0 1 1 +REPRESENT 2 0 2 2 +REPOSE 2 0 2 2 +REPORT 2 0 2 2 +REPLY 7 0 7 7 +REPLIED 20 0 20 20 +REPLACES 1 0 1 1 +REPLACE 1 0 1 1 +REPETITION 1 0 1 1 +REPENTING 1 0 1 1 +REPENTANCE 1 0 1 1 +REPENT 1 0 1 1 +REPELLENT 1 0 1 1 +REPELLED 1 0 1 1 +REPEATED 1 0 1 1 +REPAST 1 0 1 1 +REPARTEES 1 0 1 1 +REPAIRS 1 0 1 1 +REPAIRED 1 0 1 1 +RENTED 1 0 1 1 +RENTAL 1 0 1 1 +RENT 1 0 1 1 +RENEWED 2 0 2 2 +RENEWABLE 1 0 1 1 +RENDING 1 0 1 1 +RENDERING 2 0 2 2 +RENDERED 2 0 2 2 +RENDER 3 0 3 3 +REMOTENESS 1 0 1 1 +REMOTE 2 0 2 2 +REMONSTRANCE 1 0 1 1 +REMNANT 2 0 2 2 +REMISSION 2 0 2 2 +REMIND 1 0 1 1 +REMEMBRANCE 2 0 2 2 +REMEMBERS 1 0 1 1 +REMEMBERING 3 0 3 3 +REMEDY 1 0 1 1 +REMARKS 1 0 1 1 +REMARKING 1 0 1 1 +REMARKABLE 2 0 2 2 +REMAINS 3 0 3 3 +RELY 1 0 1 1 +RELUCTANTLY 1 0 1 1 +RELINQUISHED 1 0 1 1 +RELIGIOUS 1 0 1 1 +RELIGION 4 0 4 4 +RELIEVE 2 0 2 2 +RELIEF 1 0 1 1 +RELIANCE 1 0 1 1 +RELIABLE 1 0 1 1 +RELATIVES 1 0 1 1 +RELATIVE 1 0 1 1 +RELATIONSHIP 1 0 1 1 +RELATIONS 2 0 2 2 +RELATION 4 0 4 4 +RELATED 3 0 3 3 +RELATE 2 0 2 2 +RELAPSES 1 0 1 1 +REJOICING 2 0 2 2 +REJOICED 1 0 1 1 +REJOICE 5 0 5 5 +REINS 1 0 1 1 +REIGNS 1 0 1 1 +REIGN 1 0 1 1 +REGULATOR 1 0 1 1 +REGULATIONS 1 0 1 1 +REGULATION 1 0 1 1 +REGULATED 1 0 1 1 +REGULARLY 1 0 1 1 +REGULARITY 1 0 1 1 +REGULAR 1 0 1 1 +REGRET 1 0 1 1 +REGISTRATION 1 0 1 1 +REGISTERS 1 0 1 1 +REGISTERED 1 0 1 1 +REGIONS 1 0 1 1 +REGION 1 0 1 1 +REGIMENTS 1 0 1 1 +REGIMENT 1 0 1 1 +REGGIE 2 0 2 2 +REGARDS 2 0 2 2 +REGARDLESS 1 0 1 1 +REGARDED 2 0 2 2 +REGARD 5 0 5 5 +REGAINING 1 0 1 1 +REFUSING 2 0 2 2 +REFUSAL 1 0 1 1 +REFUGEES 1 0 1 1 +REFUGE 1 0 1 1 +REFRESHING 1 0 1 1 +REFRESH 2 0 2 2 +REFRAINED 1 0 1 1 +REFORMS 1 0 1 1 +REFORM 1 0 1 1 +REFLECTIONS 1 0 1 1 +REFLECTION 4 0 4 4 +REFLECTED 3 0 3 3 +REFLECT 1 0 1 1 +REFINEMENTS 1 0 1 1 +REFINEMENT 1 0 1 1 +REFINED 1 0 1 1 +REFERRING 2 0 2 2 +REFER 2 0 2 2 +REED 1 0 1 1 +REDUCED 1 0 1 1 +REDOUBLES 1 0 1 1 +REDOUBLED 1 0 1 1 +REDMAN'S 1 0 1 1 +REDEEMER 1 0 1 1 +RECUR 1 0 1 1 +RECTOR 2 0 2 2 +RECREATION 2 0 2 2 +RECOVERY 1 0 1 1 +RECOVERING 1 0 1 1 +RECOVERED 1 0 1 1 +RECOVER 1 0 1 1 +RECONCILIATION 1 0 1 1 +RECOLLECTIONS 1 0 1 1 +RECOILED 2 0 2 2 +RECOGNIZE 4 0 4 4 +RECOGNITION 9 0 9 9 +RECLINING 1 0 1 1 +RECKONING 2 0 2 2 +RECKONED 1 0 1 1 +RECKON 1 0 1 1 +RECKLESS 1 0 1 1 +RECITED 5 0 5 5 +RECITE 2 0 2 2 +RECESSES 1 0 1 1 +RECEPTION 4 0 4 4 +RECENTLY 1 0 1 1 +RECENT 2 0 2 2 +RECEIVING 2 0 2 2 +RECEIVES 1 0 1 1 +RECEIVER 1 0 1 1 +RECEIVED 9 0 9 9 +RECEIVE 3 0 3 3 +RECAPTURED 1 0 1 1 +RECALLING 1 0 1 1 +RECALLED 4 0 4 4 +REBUKES 1 0 1 1 +REBUKE 1 0 1 1 +REBELLION 2 0 2 2 +REBEL 1 0 1 1 +REBATE 1 0 1 1 +REASSURED 1 0 1 1 +REASONS 1 0 1 1 +REASONING 1 0 1 1 +REASON 19 0 19 19 +REAR 1 0 1 1 +REALLY 10 0 10 10 +REALIZED 2 0 2 2 +REALITY 8 0 8 8 +REAL 16 0 16 16 +READING 4 0 4 4 +READINESS 1 0 1 1 +READILY 2 0 2 2 +REACHED 12 0 12 12 +REACH 3 0 3 3 +RAVISHING 1 0 1 1 +RAVING 1 0 1 1 +RATS 1 0 1 1 +RATIFY 1 0 1 1 +RATIFICATION 1 0 1 1 +RATHER 23 0 23 23 +RATED 2 0 2 2 +RATE 7 0 7 7 +RASH 1 0 1 1 +RASCAL 1 0 1 1 +RARELY 1 0 1 1 +RARE 5 0 5 5 +RAPTUROUS 1 0 1 1 +RAPTURES 1 0 1 1 +RAPIDS 3 0 3 3 +RAPIDLY 3 0 3 3 +RAPIDITY 2 0 2 2 +RAPID 3 0 3 3 +RAPHAEL 1 0 1 1 +RAOUL 3 0 3 3 +RANSOM 1 0 1 1 +RANKING 1 0 1 1 +RANKED 1 0 1 1 +RANGERS 1 0 1 1 +RANGED 1 0 1 1 +RANGE 4 0 4 4 +RANG 4 0 4 4 +RAN 12 0 12 12 +RAMPART 1 0 1 1 +RALPH 2 0 2 2 +RAISED 6 0 6 6 +RAISE 1 0 1 1 +RAINS 1 0 1 1 +RAINDROPS 1 0 1 1 +RAINBOW 1 0 1 1 +RAIN 3 0 3 3 +RAILROADS 1 0 1 1 +RAGS 2 0 2 2 +RAGGED 1 0 1 1 +RAGE 3 0 3 3 +RAFT 7 0 7 7 +RADIE 2 0 2 2 +RADICALS 1 0 1 1 +RADICALISM 1 0 1 1 +RADIANCE 1 0 1 1 +RACKED 1 0 1 1 +RACK 1 0 1 1 +RACHEL'S 1 0 1 1 +RACHEL 16 0 16 16 +RACES 6 0 6 6 +RACE 1 0 1 1 +RABBLE 1 0 1 1 +RABBIT 2 0 2 2 +QUOTE 2 0 2 2 +QUIVERING 2 0 2 2 +QUIVERED 1 0 1 1 +QUITTING 1 0 1 1 +QUITTED 1 0 1 1 +QUITE 29 0 29 29 +QUINCY 1 0 1 1 +QUILT 3 0 3 3 +QUIETLY 5 0 5 5 +QUIET 4 0 4 4 +QUICKLY 5 0 5 5 +QUICK 6 0 6 6 +QUESTIONS 4 0 4 4 +QUESTIONING 1 0 1 1 +QUESTIONED 2 0 2 2 +QUESTION 12 0 12 12 +QUEST 1 0 1 1 +QUERIED 1 0 1 1 +QUEER 3 0 3 3 +QUEENSTOWN 1 0 1 1 +QUEENS 1 0 1 1 +QUEEN'S 1 0 1 1 +QUEEN 8 0 8 8 +QUARTERS 3 0 3 3 +QUARTER 7 0 7 7 +QUARRY 2 0 2 2 +QUARREL 1 0 1 1 +QUANTITY 4 0 4 4 +QUANTITIES 1 0 1 1 +QUALITY 1 0 1 1 +QUALITIES 3 0 3 3 +QUALIFICATIONS 2 0 2 2 +QUALIFICATION 1 0 1 1 +QUAKE 1 0 1 1 +QUAINT 1 0 1 1 +QUADRILLE 2 0 2 2 +QUADRANGLE 1 0 1 1 +PYTHON 1 0 1 1 +PYRAMIDS 2 0 2 2 +PUZZLED 1 0 1 1 +PUT 32 0 32 32 +PUSHED 2 0 2 2 +PUSH 2 0 2 2 +PURSUITS 3 0 3 3 +PURSUIT 2 0 2 2 +PURSUER 1 0 1 1 +PURSUED 1 0 1 1 +PURSUE 1 0 1 1 +PURRING 1 0 1 1 +PURPOSES 2 0 2 2 +PURPLE 1 0 1 1 +PURITY 2 0 2 2 +PURELY 1 0 1 1 +PURCHASED 1 0 1 1 +PUPIL 1 0 1 1 +PUNISHMENT 4 0 4 4 +PUNISHED 4 0 4 4 +PUNCH 1 0 1 1 +PUMP 1 0 1 1 +PULPIT 1 0 1 1 +PULLING 2 0 2 2 +PULLED 2 0 2 2 +PULL 1 0 1 1 +PUFFY 1 0 1 1 +PUBLISH 1 0 1 1 +PUBLIC 13 0 13 13 +PSYCHOLOGY 1 0 1 1 +PSYCHE 2 0 2 2 +PRYNNE 4 0 4 4 +PRUDENT 2 0 2 2 +PROWESS 1 0 1 1 +PROW 1 0 1 1 +PROVOCATION 1 0 1 1 +PROVISIONALLY 1 0 1 1 +PROVISION 2 0 2 2 +PROVINCES 2 0 2 2 +PROVINCE 2 0 2 2 +PROVIDING 1 0 1 1 +PROVIDED 3 0 3 3 +PROVEN 1 0 1 1 +PROVE 4 0 4 4 +PROUDLY 1 0 1 1 +PROUD 5 0 5 5 +PROTESTED 1 0 1 1 +PROTEST 1 0 1 1 +PROTECTOR 1 0 1 1 +PROTECTION 3 0 3 3 +PROTECTING 1 0 1 1 +PROTECTED 1 0 1 1 +PROTECT 4 0 4 4 +PROSTRATION 2 0 2 2 +PROSPECTS 1 0 1 1 +PROSELYTES 1 0 1 1 +PROSECUTION 1 0 1 1 +PROSECUTE 1 0 1 1 +PROSE 1 0 1 1 +PROSCRIPTION 1 0 1 1 +PROPRIETY 1 0 1 1 +PROPRIETORS 1 0 1 1 +PROPRIETOR 1 0 1 1 +PROPOSED 2 0 2 2 +PROPOSE 1 0 1 1 +PROPOSALS 1 0 1 1 +PROPORTIONS 3 0 3 3 +PROPHETS 1 0 1 1 +PROPHET 1 0 1 1 +PROPERTY 6 0 6 6 +PROPERTIES 1 0 1 1 +PROPERLY 4 0 4 4 +PROPER 3 0 3 3 +PROPENSITIES 1 0 1 1 +PROOFS 4 0 4 4 +PROOF 3 0 3 3 +PRONUNCIATION 1 0 1 1 +PRONOUNCED 4 0 4 4 +PROMPTLY 1 0 1 1 +PROMPT 1 0 1 1 +PROMOTING 1 0 1 1 +PROMOTED 1 0 1 1 +PROMISES 2 0 2 2 +PROMISED 4 0 4 4 +PROMISE 4 0 4 4 +PROMINENT 2 0 2 2 +PROLIFIC 1 0 1 1 +PROJECTION 1 0 1 1 +PROJECT 2 0 2 2 +PROGRESSING 1 0 1 1 +PROGRESS 6 0 6 6 +PROGRAMME 1 0 1 1 +PROFOUND 2 0 2 2 +PROFITABLE 1 0 1 1 +PROFESSOR 6 0 6 6 +PROFESSIONS 1 0 1 1 +PROFESSION 1 0 1 1 +PROFESSING 1 0 1 1 +PROFESSED 1 0 1 1 +PRODUCTIVE 1 0 1 1 +PRODUCTION 1 0 1 1 +PRODUCT 1 0 1 1 +PRODUCING 2 0 2 2 +PRODUCED 5 0 5 5 +PRODUCE 3 0 3 3 +PROCOPE 2 0 2 2 +PROCESSION 1 0 1 1 +PROCESSES 1 0 1 1 +PROCESS 3 0 3 3 +PROCEEDINGS 2 0 2 2 +PROCEEDED 2 0 2 2 +PROBLEM 4 0 4 4 +PROBING 1 0 1 1 +PROBABLY 10 0 10 10 +PROBABLE 2 0 2 2 +PRO 2 0 2 2 +PRIZE 1 0 1 1 +PRIVILEGE 3 0 3 3 +PRIVATION 1 0 1 1 +PRIVATELY 1 0 1 1 +PRIVATE 11 0 11 11 +PRIVACY 1 0 1 1 +PRISTINE 1 0 1 1 +PRISONER 4 0 4 4 +PRISON 4 0 4 4 +PRINTING 1 0 1 1 +PRINTER 2 0 2 2 +PRINCIPLES 2 0 2 2 +PRINCIPLE 4 0 4 4 +PRINCIPAL 4 0 4 4 +PRINCESSES 2 0 2 2 +PRINCESS 9 0 9 9 +PRINCES 2 0 2 2 +PRINCE 3 0 3 3 +PRIMLY 1 0 1 1 +PRIMITIVE 2 0 2 2 +PRIMATE 1 0 1 1 +PRIMARY 1 0 1 1 +PRIMARILY 1 0 1 1 +PRIESTHOOD 1 0 1 1 +PRIEST 1 0 1 1 +PRIDE 9 0 9 9 +PRICE 2 0 2 2 +PREY 2 0 2 2 +PREVIOUSLY 2 0 2 2 +PREVIOUS 2 0 2 2 +PREVENTS 1 0 1 1 +PREVENTING 1 0 1 1 +PREVAILING 1 0 1 1 +PRETTY 10 0 10 10 +PRETTIEST 1 0 1 1 +PRETENSION 1 0 1 1 +PRETENDED 1 0 1 1 +PRESUMED 1 0 1 1 +PRESUMABLY 1 0 1 1 +PRESSURE 4 0 4 4 +PRESSING 1 0 1 1 +PRESSED 2 0 2 2 +PRESS 2 0 2 2 +PRESIDENT 5 0 5 5 +PRESIDED 1 0 1 1 +PRESERVED 1 0 1 1 +PRESERVE 1 0 1 1 +PRESENTS 3 0 3 3 +PRESENTLY 7 0 7 7 +PRESENTING 2 0 2 2 +PRESENTED 1 0 1 1 +PRESENCE 6 0 6 6 +PREROGATIVES 1 0 1 1 +PREPARING 1 0 1 1 +PREPARED 3 0 3 3 +PREPARE 1 0 1 1 +PREOCCUPIED 1 0 1 1 +PREOCCUPATION 1 0 1 1 +PREMISES 1 0 1 1 +PRELIMINARY 1 0 1 1 +PREFERENCE 1 0 1 1 +PREDOMINATE 1 0 1 1 +PREDOMINANCE 1 0 1 1 +PREDICTED 1 0 1 1 +PREDATORY 2 0 2 2 +PRECISION 1 0 1 1 +PRECISELY 2 0 2 2 +PRECIPITATED 1 0 1 1 +PRECIPITATE 1 0 1 1 +PRECIOUS 3 0 3 3 +PRECAUTION 1 0 1 1 +PREACHER 1 0 1 1 +PREACH 3 0 3 3 +PRAYERS 1 0 1 1 +PRAYED 1 0 1 1 +PRAY 7 0 7 7 +PRATTLED 1 0 1 1 +PRAISES 2 0 2 2 +PRAISED 1 0 1 1 +PRAIRIES 2 0 2 2 +PRACTISED 1 0 1 1 +PRACTICALLY 4 0 4 4 +PRACTICAL 6 0 6 6 +POYSER 9 0 9 9 +POWERS 3 0 3 3 +POWERLESS 1 0 1 1 +POWERFUL 2 0 2 2 +POWDERY 1 0 1 1 +POWDER 8 0 8 8 +POVERTY 2 0 2 2 +POURING 1 0 1 1 +POURED 3 0 3 3 +POUR 2 0 2 2 +POUNDED 1 0 1 1 +POUND 4 0 4 4 +POTFULS 1 0 1 1 +POTATOES 1 0 1 1 +POT 2 0 2 2 +POSTS 1 0 1 1 +POSTPONEMENT 1 0 1 1 +POSTED 1 0 1 1 +POSSIBLY 1 0 1 1 +POSSIBLE 6 0 6 6 +POSSESSING 1 0 1 1 +POSSESSED 3 0 3 3 +POSSESS 3 0 3 3 +POSSE 2 0 2 2 +POSITIVE 1 0 1 1 +POSITION 9 0 9 9 +PORTRAIT 1 0 1 1 +PORTIONS 3 0 3 3 +PORTION 2 0 2 2 +PORTAL 1 0 1 1 +PORT 1 0 1 1 +POPULATION 3 0 3 3 +POPULATED 1 0 1 1 +POPULAR 7 0 7 7 +POPPIES 1 0 1 1 +POPPED 1 0 1 1 +POORLY 1 0 1 1 +POOR 15 0 15 15 +PONY 1 0 1 1 +PONDS 1 0 1 1 +PONDERING 1 0 1 1 +POLLY'S 3 0 3 3 +POLLY 4 0 4 4 +POLITICS 1 0 1 1 +POLITICAL 4 0 4 4 +POLITELY 2 0 2 2 +POLITE 1 0 1 1 +POLISHED 3 0 3 3 +POLISH 1 0 1 1 +POLICE 1 0 1 1 +POLE 2 0 2 2 +POISON 1 0 1 1 +POISED 1 0 1 1 +POINTS 3 0 3 3 +POINTING 4 0 4 4 +POINTEDLY 1 0 1 1 +POINTED 3 0 3 3 +POETRY 1 0 1 1 +POETIC 1 0 1 1 +POET 2 0 2 2 +POEMS 1 0 1 1 +POEM 3 0 3 3 +POCKETS 3 0 3 3 +POCKET 3 0 3 3 +PLURALITY 1 0 1 1 +PLUNGES 1 0 1 1 +PLUNDER 2 0 2 2 +PLUMES 1 0 1 1 +PLUCKING 1 0 1 1 +PLOT 3 0 3 3 +PLIABLE 1 0 1 1 +PLENTY 1 0 1 1 +PLENTIFUL 1 0 1 1 +PLEASURE 5 0 5 5 +PLEASING 1 0 1 1 +PLEASES 3 0 3 3 +PLEASED 4 0 4 4 +PLEASE 11 0 11 11 +PLEASANTLY 2 0 2 2 +PLEASANT 5 0 5 5 +PLEADED 1 0 1 1 +PLEAD 1 0 1 1 +PLEA 1 0 1 1 +PLAYTHINGS 3 0 3 3 +PLAYS 1 0 1 1 +PLAYER 2 0 2 2 +PLAYED 3 0 3 3 +PLATTERS 1 0 1 1 +PLATONIC 1 0 1 1 +PLATO'S 1 0 1 1 +PLATO 6 0 6 6 +PLATFORM 3 0 3 3 +PLATE 1 0 1 1 +PLASTER 1 0 1 1 +PLANTS 4 0 4 4 +PLANTED 3 0 3 3 +PLANTATIONS 1 0 1 1 +PLANT 7 0 7 7 +PLANS 2 0 2 2 +PLANNED 2 0 2 2 +PLAN 6 0 6 6 +PLAINER 1 0 1 1 +PLACING 2 0 2 2 +PLACIDITY 1 0 1 1 +PLACID 1 0 1 1 +PLACED 8 0 8 8 +PITYING 1 0 1 1 +PITY 6 0 6 6 +PITIABLE 1 0 1 1 +PITH 1 0 1 1 +PITCHED 1 0 1 1 +PIT 1 0 1 1 +PISTOL 1 0 1 1 +PIPT 1 0 1 1 +PIPE 2 0 2 2 +PINKIES 3 0 3 3 +PINK 2 0 2 2 +PINIONED 1 0 1 1 +PINING 1 0 1 1 +PINES 1 0 1 1 +PINE 1 0 1 1 +PILLARS 1 0 1 1 +PILGRIM'S 1 0 1 1 +PILGRIM 1 0 1 1 +PILED 1 0 1 1 +PIGMENT 1 0 1 1 +PIG 2 0 2 2 +PIETY 1 0 1 1 +PIECES 3 0 3 3 +PIECE 1 0 1 1 +PICTURESQUENESS 1 0 1 1 +PICTURE 6 0 6 6 +PICNIC 1 0 1 1 +PICKED 3 0 3 3 +PIAZZA 3 0 3 3 +PIANO 4 0 4 4 +PHYSIOLOGY 1 0 1 1 +PHYSIOLOGICAL 1 0 1 1 +PHYSICS 1 0 1 1 +PHYSICAL 2 0 2 2 +PHRONSIE 6 0 6 6 +PHOENICIAN 1 0 1 1 +PHILOSOPHY 2 0 2 2 +PHILOSOPHICAL 1 0 1 1 +PHILOSOPHER 5 0 5 5 +PHILOLOGIST 1 0 1 1 +PHILIP'S 1 0 1 1 +PHILANTHROPIES 1 0 1 1 +PHILADELPHIAN 1 0 1 1 +PHENOMENON 1 0 1 1 +PHENOMENA 1 0 1 1 +PHEASANT 1 0 1 1 +PHASES 1 0 1 1 +PHASE 1 0 1 1 +PHANTOM 1 0 1 1 +PH 1 0 1 1 +PEWTER 2 0 2 2 +PET 1 0 1 1 +PERVERTERS 1 0 1 1 +PERVERTED 1 0 1 1 +PERVERSE 1 0 1 1 +PERVADED 1 0 1 1 +PERTH 1 0 1 1 +PERSUASIVE 1 0 1 1 +PERSUADED 1 0 1 1 +PERSUADE 1 0 1 1 +PERSPIRATION 1 0 1 1 +PERSONS 13 0 13 13 +PERSONALLY 2 0 2 2 +PERSONAL 7 0 7 7 +PERSONAGE 2 0 2 2 +PERSEVERED 1 0 1 1 +PERSECUTORS 1 0 1 1 +PERSECUTION 2 0 2 2 +PERSECUTED 1 0 1 1 +PERPLEXITY 1 0 1 1 +PERPLEXED 2 0 2 2 +PERPETUATE 1 0 1 1 +PERPETUAL 1 0 1 1 +PERNICIOUS 1 0 1 1 +PERMITTING 1 0 1 1 +PERMITTED 2 0 2 2 +PERMIT 1 0 1 1 +PERMISSION 1 0 1 1 +PERMANENT 2 0 2 2 +PERISHES 1 0 1 1 +PERIODICALS 1 0 1 1 +PERIOD 8 0 8 8 +PERILS 1 0 1 1 +PERIL 2 0 2 2 +PERHAPS 17 0 17 17 +PERFORMING 1 0 1 1 +PERFORMED 1 0 1 1 +PERFORM 3 0 3 3 +PERFECTLY 8 0 8 8 +PERFECTION 4 0 4 4 +PERCY 1 0 1 1 +PERCHANCE 1 0 1 1 +PERCH 2 0 2 2 +PERCEPTION 2 0 2 2 +PERCEIVING 1 0 1 1 +PERCEIVES 1 0 1 1 +PERCEIVED 2 0 2 2 +PERCEIVE 2 0 2 2 +PEPPERS 1 0 1 1 +PEPPERED 1 0 1 1 +PEPPER 1 0 1 1 +PEOPLE 36 0 36 36 +PENSIVE 1 0 1 1 +PENSION 1 0 1 1 +PENETRATING 1 0 1 1 +PENETRATE 2 0 2 2 +PENDULOUS 1 0 1 1 +PENCILLED 1 0 1 1 +PENCIL 2 0 2 2 +PENANCE 1 0 1 1 +PENALTY 2 0 2 2 +PENAL 1 0 1 1 +PELL 1 0 1 1 +PEERING 3 0 3 3 +PEEPED 2 0 2 2 +PECUNIARY 6 0 6 6 +PECULIARLY 1 0 1 1 +PECULIAR 1 0 1 1 +PEASE 1 0 1 1 +PEARLY 2 0 2 2 +PEAKED 1 0 1 1 +PEACEFUL 1 0 1 1 +PEACEABLE 1 0 1 1 +PEACE 14 0 14 14 +PAYMENT 1 0 1 1 +PAYING 1 0 1 1 +PAY 3 0 3 3 +PAVEMENT 1 0 1 1 +PAUSED 4 0 4 4 +PAUSE 1 0 1 1 +PAUL 15 0 15 15 +PATTING 1 0 1 1 +PATRONIZING 1 0 1 1 +PATRON 1 0 1 1 +PATRIARCHAL 1 0 1 1 +PATHS 1 0 1 1 +PATH 4 0 4 4 +PATCHWORK 4 0 4 4 +PATCHING 2 0 2 2 +PATCHES 2 0 2 2 +PATCHED 1 0 1 1 +PATCH 4 0 4 4 +PASTNESS 1 0 1 1 +PASTEBOARD 1 0 1 1 +PASSIONS 1 0 1 1 +PASSIONATE 1 0 1 1 +PASSION 3 0 3 3 +PASSING 3 0 3 3 +PASSES 1 0 1 1 +PASSER 1 0 1 1 +PASSAGES 1 0 1 1 +PASS 5 0 5 5 +PARTY 9 0 9 9 +PARTS 7 0 7 7 +PARTOOK 2 0 2 2 +PARTNER 1 0 1 1 +PARTLY 7 0 7 7 +PARTITION 1 0 1 1 +PARTISAN 1 0 1 1 +PARTING 2 0 2 2 +PARTIES 4 0 4 4 +PARTICULARS 2 0 2 2 +PARTICULARLY 6 0 6 6 +PARTICULAR 4 0 4 4 +PARTIALLY 1 0 1 1 +PARTED 2 0 2 2 +PART 22 0 22 22 +PARSONAGE 1 0 1 1 +PARROT 2 0 2 2 +PARRIED 1 0 1 1 +PARLIAMENTS 1 0 1 1 +PARLIAMENTARY 1 0 1 1 +PARLIAMENT 5 0 5 5 +PARK 1 0 1 1 +PARIS 9 0 9 9 +PARENTS 4 0 4 4 +PARENT 2 0 2 2 +PARDON 1 0 1 1 +PARASITES 1 0 1 1 +PARAGRAPH 1 0 1 1 +PAPERS 8 0 8 8 +PAPER 8 0 8 8 +PANTOMIME 1 0 1 1 +PANTING 1 0 1 1 +PANTED 1 0 1 1 +PANS 1 0 1 1 +PANGS 1 0 1 1 +PANG 1 0 1 1 +PANES 1 0 1 1 +PANEL 1 0 1 1 +PAN 1 0 1 1 +PALM 3 0 3 3 +PALINGS 1 0 1 1 +PALE 8 0 8 8 +PALATE 1 0 1 1 +PALAIS 1 0 1 1 +PALACE 4 0 4 4 +PAINTING 2 0 2 2 +PAINTER 2 0 2 2 +PAINTED 4 0 4 4 +PAINS 2 0 2 2 +PAINFULLY 1 0 1 1 +PAINFUL 3 0 3 3 +PAIL 1 0 1 1 +PAID 7 0 7 7 +PAGES 1 0 1 1 +PAGE 2 0 2 2 +PACKING 1 0 1 1 +PACKET 1 0 1 1 +PACKED 1 0 1 1 +PACK 1 0 1 1 +PACING 1 0 1 1 +PACIFIED 1 0 1 1 +PACED 3 0 3 3 +P 1 0 1 1 +OZ 4 0 4 4 +OWNER 1 0 1 1 +OWNED 2 0 2 2 +OWN 69 0 69 69 +OWLS 1 0 1 1 +OWING 3 0 3 3 +OVERWROUGHT 1 0 1 1 +OVERWHELMED 1 0 1 1 +OVERWHELM 1 0 1 1 +OVERTHROW 1 0 1 1 +OVERSTATEMENT 1 0 1 1 +OVERRATED 1 0 1 1 +OVERLOOKER 1 0 1 1 +OVERLOOKED 1 0 1 1 +OVERHEAD 1 0 1 1 +OVERHANGING 2 0 2 2 +OVERFLOWING 1 0 1 1 +OVERCOAT 1 0 1 1 +OVERBEARING 1 0 1 1 +OVAL 2 0 2 2 +OUTWARD 1 0 1 1 +OUTSTRETCHED 2 0 2 2 +OUTSKIRTS 1 0 1 1 +OUTSIDE 4 0 4 4 +OUTSET 1 0 1 1 +OUTRIGHT 1 0 1 1 +OUTLINED 1 0 1 1 +OUTLINE 2 0 2 2 +OUTLAWS 3 0 3 3 +OUTFIT 1 0 1 1 +OUTER 2 0 2 2 +OUTCRY 1 0 1 1 +OUTCAST 1 0 1 1 +OURSELVES 6 0 6 6 +OURS 2 0 2 2 +OUNCE 2 0 2 2 +OTHERWISE 5 0 5 5 +OTHERS 23 0 23 23 +OTHER'S 2 0 2 2 +OSTENSIBLY 2 0 2 2 +OSTENSIBLE 1 0 1 1 +OSCILLATION 1 0 1 1 +ORNAMENTS 1 0 1 1 +ORNAMENTAL 2 0 2 2 +ORNAMENT 3 0 3 3 +ORLEANS 1 0 1 1 +ORIGINATED 1 0 1 1 +ORIGINAL 1 0 1 1 +ORIGIN 7 0 7 7 +ORGANS 1 0 1 1 +ORGANIZED 5 0 5 5 +ORGANIZATIONS 4 0 4 4 +ORGANIZATION 3 0 3 3 +ORGAN 1 0 1 1 +ORDINARY 3 0 3 3 +ORDERS 3 0 3 3 +ORDERLY 1 0 1 1 +ORCHARD 4 0 4 4 +ORBIT 2 0 2 2 +ORANGE 1 0 1 1 +ORACLE 1 0 1 1 +OPPRESSOR 1 0 1 1 +OPPRESSIVE 1 0 1 1 +OPPRESSION 1 0 1 1 +OPPRESSED 1 0 1 1 +OPPOSITION 4 0 4 4 +OPPOSITE 2 0 2 2 +OPPOSED 2 0 2 2 +OPPOSE 4 0 4 4 +OPPORTUNITY 4 0 4 4 +OPPORTUNITIES 2 0 2 2 +OPPORTUNE 1 0 1 1 +OPPONENT 2 0 2 2 +OPINIONS 1 0 1 1 +OPINION'S 1 0 1 1 +OPINION 9 0 9 9 +OPERATOR 1 0 1 1 +OPERATIONS 1 0 1 1 +OPERATION 2 0 2 2 +OPERATE 1 0 1 1 +OPENING 6 0 6 6 +OPENED 11 0 11 11 +OPEN 23 0 23 23 +ONWARD 4 0 4 4 +ONES 2 0 2 2 +ONE'S 1 0 1 1 +ONCE 22 0 22 22 +OLIVE 4 0 4 4 +OLDEST 1 0 1 1 +OLDER 3 0 3 3 +OLDEN 1 0 1 1 +OJO 7 0 7 7 +OHIO 1 0 1 1 +OFTEN 13 0 13 13 +OFFICIALS 3 0 3 3 +OFFICIAL 1 0 1 1 +OFFICE 11 0 11 11 +OFFERS 1 0 1 1 +OFFERING 1 0 1 1 +OFFER 1 0 1 1 +OFFENDS 1 0 1 1 +OFFENDED 1 0 1 1 +OFFALS 1 0 1 1 +ODORS 1 0 1 1 +ODIOUS 1 0 1 1 +ODIN 1 0 1 1 +ODDLY 1 0 1 1 +OCTOBER 1 0 1 1 +OCEANOGRAPHER 1 0 1 1 +OCEAN 5 0 5 5 +OCCURS 3 0 3 3 +OCCURRENCES 2 0 2 2 +OCCURRENCE 3 0 3 3 +OCCURRED 2 0 2 2 +OCCUR 1 0 1 1 +OCCUPY 1 0 1 1 +OCCUPIED 6 0 6 6 +OCCUPATION 2 0 2 2 +OCCASIONS 1 0 1 1 +OCCASIONALLY 1 0 1 1 +OCCASIONAL 1 0 1 1 +OCCASION 13 0 13 13 +OBVIOUS 3 0 3 3 +OBTAINED 1 0 1 1 +OBTAIN 3 0 3 3 +OBSTINATE 1 0 1 1 +OBSTINACY 2 0 2 2 +OBSTACLES 1 0 1 1 +OBSERVING 2 0 2 2 +OBSERVERS 1 0 1 1 +OBSERVATION 3 0 3 3 +OBSERVANCES 1 0 1 1 +OBSCURE 3 0 3 3 +OBNOXIOUS 1 0 1 1 +OBLIVION 1 0 1 1 +OBLITERATED 1 0 1 1 +OBLITERATE 1 0 1 1 +OBLIGED 1 0 1 1 +OBJECTIONS 1 0 1 1 +OBJECTION 1 0 1 1 +OBJECT 16 0 16 16 +OBEYING 2 0 2 2 +OBEYED 2 0 2 2 +OBEY 1 0 1 1 +OBEDIENCE 1 0 1 1 +OATMEAL 1 0 1 1 +OATH 1 0 1 1 +OAK 2 0 2 2 +O'CLOCK 6 0 6 6 +NURSED 1 0 1 1 +NURSE 1 0 1 1 +NUNKIE 1 0 1 1 +NUMERICAL 1 0 1 1 +NUMBERS 4 0 4 4 +NUMBERED 1 0 1 1 +NUMBER 6 0 6 6 +NUDITY 1 0 1 1 +NUDGED 1 0 1 1 +NOWHERE 1 0 1 1 +NOVEL 1 0 1 1 +NOURISHING 1 0 1 1 +NOTWITHSTANDING 1 0 1 1 +NOTORIOUS 1 0 1 1 +NOTIONS 1 0 1 1 +NOTION 1 0 1 1 +NOTING 1 0 1 1 +NOTICING 1 0 1 1 +NOTICED 1 0 1 1 +NOTICEABLE 1 0 1 1 +NOTICE 3 0 3 3 +NOTED 1 0 1 1 +NOTE 4 0 4 4 +NOTARY 1 0 1 1 +NOTABLE 3 0 3 3 +NORWEGIAN 1 0 1 1 +NORWAY 1 0 1 1 +NORTHERN 1 0 1 1 +NOON 3 0 3 3 +NONSENSE 1 0 1 1 +NONE 12 0 12 12 +NOMADS 1 0 1 1 +NOISILY 1 0 1 1 +NOISE 2 0 2 2 +NOD 1 0 1 1 +NOBODY 6 0 6 6 +NOBLEST 1 0 1 1 +NOBLER 2 0 2 2 +NOBLEMAN'S 1 0 1 1 +NOBLE 10 0 10 10 +NITROGEN 1 0 1 1 +NINTH 1 0 1 1 +NINETY 2 0 2 2 +NIMBLY 1 0 1 1 +NIMBLE 1 0 1 1 +NIGHTS 3 0 3 3 +NIGHTLY 1 0 1 1 +NIGHTINGALE'S 1 0 1 1 +NIGHTFALL 1 0 1 1 +NIECE 1 0 1 1 +NICEST 1 0 1 1 +NICER 1 0 1 1 +NICELY 1 0 1 1 +NICE 3 0 3 3 +NEXT 12 0 12 12 +NEWSPAPER 4 0 4 4 +NEWS 2 0 2 2 +NEWLY 1 0 1 1 +NEWCOMER 1 0 1 1 +NEVERTHELESS 3 0 3 3 +NEVERBEND 6 0 6 6 +NETTLES 1 0 1 1 +NETTLED 1 0 1 1 +NESTING 1 0 1 1 +NERVOUSNESS 1 0 1 1 +NERVOUSLY 1 0 1 1 +NERVOUS 4 0 4 4 +NERVES 2 0 2 2 +NEMO 4 0 4 4 +NELL 1 0 1 1 +NEITHER 9 0 9 9 +NEIGHBORS 2 0 2 2 +NEGROES 2 0 2 2 +NEGRO 1 0 1 1 +NEGOTIATIONS 1 0 1 1 +NEGLIGENT 1 0 1 1 +NEGLECTED 2 0 2 2 +NEGLECT 1 0 1 1 +NEEDS 2 0 2 2 +NEEDING 1 0 1 1 +NEEDED 5 0 5 5 +NECK 6 0 6 6 +NECESSITY 2 0 2 2 +NECESSITIES 1 0 1 1 +NECESSARY 9 0 9 9 +NECESSARILY 2 0 2 2 +NEATLY 2 0 2 2 +NEAT 1 0 1 1 +NEARLY 10 0 10 10 +NEAREST 1 0 1 1 +NEARED 1 0 1 1 +NAY 5 0 5 5 +NAVY 1 0 1 1 +NAUTILUS 2 0 2 2 +NATURES 1 0 1 1 +NATURED 1 0 1 1 +NATURE 17 0 17 17 +NATURALLY 1 0 1 1 +NATURALISTS 2 0 2 2 +NATURALIST 1 0 1 1 +NATURAL 10 0 10 10 +NATTY 2 0 2 2 +NATIVE 5 0 5 5 +NATIONS 2 0 2 2 +NATIONAL 3 0 3 3 +NATION 2 0 2 2 +NASTY 1 0 1 1 +NARWHALE 1 0 1 1 +NARROW 6 0 6 6 +NARRATIVE 2 0 2 2 +NAPIER 1 0 1 1 +NAPE 1 0 1 1 +NANCY'S 2 0 2 2 +NANCY 1 0 1 1 +NAMES 2 0 2 2 +NAMELY 2 0 2 2 +NAKEDNESS 1 0 1 1 +NAKED 1 0 1 1 +MYTHOLOGICAL 1 0 1 1 +MYTHICAL 1 0 1 1 +MYSTIFIED 1 0 1 1 +MYSTERIOUSLY 1 0 1 1 +MYSTERIOUS 3 0 3 3 +MYSELF 25 0 25 25 +MUTUAL 2 0 2 2 +MUTTON 1 0 1 1 +MUTTERING 1 0 1 1 +MUTILATION 1 0 1 1 +MUTABILITY 1 0 1 1 +MUSTARD 1 0 1 1 +MUST 66 0 66 66 +MUSSULMANS 1 0 1 1 +MUSICIANS 1 0 1 1 +MUSIC 6 0 6 6 +MUSHROOMS 1 0 1 1 +MUSEUM 1 0 1 1 +MURMURED 4 0 4 4 +MURMUR 2 0 2 2 +MURDERS 1 0 1 1 +MURDERERS 1 0 1 1 +MUNCHKINS 2 0 2 2 +MUNCHKIN 1 0 1 1 +MUMMERIES 1 0 1 1 +MULTIPLE 2 0 2 2 +MUFFLED 1 0 1 1 +MUDDY 1 0 1 1 +MUD 1 0 1 1 +MOWED 2 0 2 2 +MOW 1 0 1 1 +MOVING 1 0 1 1 +MOVES 1 0 1 1 +MOVEMENTS 3 0 3 3 +MOVEMENT 5 0 5 5 +MOVE 4 0 4 4 +MOUTHS 3 0 3 3 +MOUSE 2 0 2 2 +MOURNFUL 1 0 1 1 +MOUNTAINS 2 0 2 2 +MOTTO 1 0 1 1 +MOTTLED 1 0 1 1 +MOTORS 1 0 1 1 +MOTIVES 5 0 5 5 +MOTIONLESS 1 0 1 1 +MOTIONING 1 0 1 1 +MOTIONED 2 0 2 2 +MOTION 1 0 1 1 +MOTHER 32 0 32 32 +MOST 51 0 51 51 +MOSSY 1 0 1 1 +MOSS 1 0 1 1 +MOSAIC 1 0 1 1 +MORTALS 1 0 1 1 +MORTALLY 1 0 1 1 +MORTAL 1 0 1 1 +MORROW 6 0 6 6 +MORRIS 1 0 1 1 +MORNINGS 1 0 1 1 +MORN 1 0 1 1 +MORMONS 4 0 4 4 +MORMON 5 0 5 5 +MOREOVER 1 0 1 1 +MORE'S 1 0 1 1 +MORBID 1 0 1 1 +MORAL 1 0 1 1 +MOONLIGHT 2 0 2 2 +MOONBEAMS 1 0 1 1 +MOON 4 0 4 4 +MOOD 2 0 2 2 +MONTROSE'S 1 0 1 1 +MONTROSE 6 0 6 6 +MONTHS 4 0 4 4 +MONTH 4 0 4 4 +MONTFICHET'S 1 0 1 1 +MONTALAIS 4 0 4 4 +MONSTER 1 0 1 1 +MONSIEUR 1 0 1 1 +MONOTONOUS 1 0 1 1 +MONGOOSE 1 0 1 1 +MONCEUX 1 0 1 1 +MOMENTS 5 0 5 5 +MOMENTOUS 1 0 1 1 +MOMENTARY 1 0 1 1 +MOMENT 32 0 32 32 +MOLLY 3 0 3 3 +MOLECULES 1 0 1 1 +MOISTURE 1 0 1 1 +MOIST 1 0 1 1 +MOHAMMED 1 0 1 1 +MODIFICATION 1 0 1 1 +MODEST 3 0 3 3 +MODES 2 0 2 2 +MODERNS 1 0 1 1 +MODERN 8 0 8 8 +MODERATE 2 0 2 2 +MODEL 1 0 1 1 +MODE 2 0 2 2 +MOCCASIN 1 0 1 1 +MOBS 1 0 1 1 +MOBILITY 2 0 2 2 +MOB 3 0 3 3 +MOANING 1 0 1 1 +MOAN 1 0 1 1 +MIXTURE 2 0 2 2 +MIXED 4 0 4 4 +MITIGATE 1 0 1 1 +MISUNDERSTANDING 1 0 1 1 +MISTY 1 0 1 1 +MISTRESS 10 0 10 10 +MISTAKEN 2 0 2 2 +MISTAKE 2 0 2 2 +MISSUS 23 0 23 23 +MISSOURI 6 0 6 6 +MISSIONARY 1 0 1 1 +MISSIONARIES 1 0 1 1 +MISSION 3 0 3 3 +MISSED 2 0 2 2 +MISGOVERNMENT 1 0 1 1 +MISFORTUNES 1 0 1 1 +MISFORTUNE 4 0 4 4 +MISERY 3 0 3 3 +MISERABLY 1 0 1 1 +MISERABLE 2 0 2 2 +MISDEMEANOR 1 0 1 1 +MISCHIEF 1 0 1 1 +MISCHANCE 1 0 1 1 +MIRROR 2 0 2 2 +MIRACULOUSLY 1 0 1 1 +MIRACLE 2 0 2 2 +MINUTES 6 0 6 6 +MINUTE 2 0 2 2 +MINORITY 1 0 1 1 +MINISTRY 3 0 3 3 +MINISTERS 1 0 1 1 +MINISTER 3 0 3 3 +MINIATURE 1 0 1 1 +MINGOES 1 0 1 1 +MINGLES 1 0 1 1 +MINGLED 1 0 1 1 +MINDS 3 0 3 3 +MINDFUL 1 0 1 1 +MINDED 1 0 1 1 +MILNER'S 3 0 3 3 +MILLIONS 1 0 1 1 +MILLIMETER 1 0 1 1 +MILKING 1 0 1 1 +MILKED 1 0 1 1 +MILK 1 0 1 1 +MILITIA 3 0 3 3 +MILITARY 7 0 7 7 +MILE 1 0 1 1 +MILDLY 1 0 1 1 +MILD 2 0 2 2 +MILAN 1 0 1 1 +MIGHTY 4 0 4 4 +MIGHTILY 1 0 1 1 +MIGHT 48 0 48 48 +MIDWIFE 1 0 1 1 +MIDST 2 0 2 2 +MIDDLE 4 0 4 4 +MIDDAY 1 0 1 1 +MICROSCOPE 1 0 1 1 +MICE 5 0 5 5 +METROPOLIS 1 0 1 1 +METHODS 3 0 3 3 +METHOD 3 0 3 3 +METAPHOR 1 0 1 1 +METAMORPHOSIS 1 0 1 1 +METALLIC 1 0 1 1 +MESSRS 1 0 1 1 +MESSAGE 2 0 2 2 +MESS 1 0 1 1 +MESHES 1 0 1 1 +MERITS 2 0 2 2 +MERIT 2 0 2 2 +MERIDIAN 2 0 2 2 +MERELY 5 0 5 5 +MERE 4 0 4 4 +MERCHANT 1 0 1 1 +MENTIONS 1 0 1 1 +MENTIONED 5 0 5 5 +MENTION 1 0 1 1 +MENTAL 2 0 2 2 +MENIAL 1 0 1 1 +MENDING 2 0 2 2 +MENAGERIE 1 0 1 1 +MEN'S 2 0 2 2 +MEMORY 21 0 21 21 +MEMBERS 4 0 4 4 +MEMBER 2 0 2 2 +MELTS 1 0 1 1 +MELODY 1 0 1 1 +MELL 1 0 1 1 +MELANCHOLY 2 0 2 2 +MEETING 5 0 5 5 +MEEK 1 0 1 1 +MEDIUM 1 0 1 1 +MEDITERRANEAN 4 0 4 4 +MEDITATIVE 1 0 1 1 +MEDITATION 1 0 1 1 +MEDICINE 6 0 6 6 +MECHANICS 1 0 1 1 +MEAT 1 0 1 1 +MEASURES 2 0 2 2 +MEASURED 2 0 2 2 +MEASURE 6 0 6 6 +MEANWHILE 4 0 4 4 +MEANTIME 2 0 2 2 +MEANT 5 0 5 5 +MEANS 17 0 17 17 +MEANINGS 1 0 1 1 +MEANING 4 0 4 4 +MEALS 4 0 4 4 +MEAL 5 0 5 5 +MEADOWS 1 0 1 1 +MC 1 0 1 1 +MAXIMUM 1 0 1 1 +MAXIMS 1 0 1 1 +MATURE 1 0 1 1 +MATTHEWS 1 0 1 1 +MATTERS 5 0 5 5 +MATTERED 1 0 1 1 +MATTER 20 0 20 20 +MATHEMATICS 1 0 1 1 +MATERIALS 2 0 2 2 +MATERIALLY 1 0 1 1 +MATERIALISM 1 0 1 1 +MATERIAL 3 0 3 3 +MATED 1 0 1 1 +MATCHLESS 1 0 1 1 +MASTERPIECE 1 0 1 1 +MASTERLY 1 0 1 1 +MAST 2 0 2 2 +MASSES 1 0 1 1 +MASSACHUSETTS 1 0 1 1 +MASS 2 0 2 2 +MASKS 1 0 1 1 +MARVELS 1 0 1 1 +MARVELLED 1 0 1 1 +MARVEL 2 0 2 2 +MARTIN 2 0 2 2 +MARTHA 2 0 2 2 +MARSHALLED 1 0 1 1 +MARRY 1 0 1 1 +MARRIAGE 5 0 5 5 +MARQUIS 1 0 1 1 +MARKS 4 0 4 4 +MARKING 1 0 1 1 +MARKHAM 2 0 2 2 +MARK 6 0 6 6 +MARINE 2 0 2 2 +MARIE'S 1 0 1 1 +MARIE 6 0 6 6 +MARIANNE 1 0 1 1 +MARIA 1 0 1 1 +MARGIN 1 0 1 1 +MARGARET 1 0 1 1 +MARCHES 1 0 1 1 +MARCHED 2 0 2 2 +MARCH 7 0 7 7 +MARBLE 2 0 2 2 +MAP 2 0 2 2 +MANUSCRIPT 2 0 2 2 +MANUFACTURER 3 0 3 3 +MANSION 1 0 1 1 +MANOEUVRING 1 0 1 1 +MANNERS 1 0 1 1 +MANNER 14 0 14 14 +MANKIND 2 0 2 2 +MANIFOLD 1 0 1 1 +MANIFESTLY 1 0 1 1 +MANICAMP 1 0 1 1 +MANHOOD 1 0 1 1 +MANDIBLE 1 0 1 1 +MANAGING 1 0 1 1 +MANAGERS 1 0 1 1 +MANAGEMENT 3 0 3 3 +MANAGED 4 0 4 4 +MANAGE 1 0 1 1 +MAMMY 1 0 1 1 +MALIGNITIES 1 0 1 1 +MALIGNED 1 0 1 1 +MALICIOUS 1 0 1 1 +MALICE 1 0 1 1 +MALADY 1 0 1 1 +MAKING 13 0 13 13 +MAKES 10 0 10 10 +MAJESTY'S 2 0 2 2 +MAJESTY 6 0 6 6 +MAINTAINING 1 0 1 1 +MAINTAINED 4 0 4 4 +MAINSAIL 1 0 1 1 +MAINLY 1 0 1 1 +MAID'S 1 0 1 1 +MAHOGANY 1 0 1 1 +MAGNIFIED 1 0 1 1 +MAGNIFICENT 3 0 3 3 +MAGNIFICENCE 1 0 1 1 +MAGISTRACY 1 0 1 1 +MAGICIAN 5 0 5 5 +MAGIC 4 0 4 4 +MAGAZINE 1 0 1 1 +MADNESS 1 0 1 1 +MADEMOISELLE 5 0 5 5 +MADAME'S 1 0 1 1 +MAD 3 0 3 3 +MACHINES 1 0 1 1 +MACHINE 1 0 1 1 +LYING 4 0 4 4 +LUXURIES 2 0 2 2 +LUXURIANT 1 0 1 1 +LUTHERAN 2 0 2 2 +LUTHER 3 0 3 3 +LUSTROUS 1 0 1 1 +LUSTRE 1 0 1 1 +LURKING 1 0 1 1 +LURID 1 0 1 1 +LURE 1 0 1 1 +LUNGS 1 0 1 1 +LUMPS 1 0 1 1 +LUMP 1 0 1 1 +LUMINOUS 2 0 2 2 +LULLS 1 0 1 1 +LUKE 1 0 1 1 +LUGUBRIOUS 1 0 1 1 +LUCY 1 0 1 1 +LUCRETIUS 1 0 1 1 +LUCK 3 0 3 3 +LUCID 1 0 1 1 +LOYALLY 1 0 1 1 +LOYAL 1 0 1 1 +LOWLY 2 0 2 2 +LOW 6 0 6 6 +LOVING 4 0 4 4 +LOVERS 2 0 2 2 +LOVER 1 0 1 1 +LOVELY 7 0 7 7 +LOVED 6 0 6 6 +LOVE 48 0 48 48 +LOUDNESS 1 0 1 1 +LOUDLY 2 0 2 2 +LOUDER 1 0 1 1 +LOUD 2 0 2 2 +LOTUS 1 0 1 1 +LOTS 2 0 2 2 +LOT 6 0 6 6 +LOSING 3 0 3 3 +LOSES 2 0 2 2 +LOSE 3 0 3 3 +LORDS 1 0 1 1 +LORDLY 1 0 1 1 +LORDING 2 0 2 2 +LOPPED 1 0 1 1 +LOOSELY 1 0 1 1 +LOOKS 7 0 7 7 +LOOKING 16 0 16 16 +LOOKED 24 0 24 24 +LOOK 32 0 32 32 +LONGING 2 0 2 2 +LONGER 9 0 9 9 +LONGED 1 0 1 1 +LONELY 2 0 2 2 +LONELINESS 1 0 1 1 +LONELIER 2 0 2 2 +LONDON 3 0 3 3 +LOGICALLY 1 0 1 1 +LOGICAL 2 0 2 2 +LOGARITHMS 1 0 1 1 +LOG 2 0 2 2 +LOFTINESS 1 0 1 1 +LOFTIEST 1 0 1 1 +LOFT 2 0 2 2 +LODGING 1 0 1 1 +LODGE 2 0 2 2 +LOCRIS 1 0 1 1 +LOCKED 3 0 3 3 +LOCK 1 0 1 1 +LOAF 1 0 1 1 +LOADS 1 0 1 1 +LOADED 1 0 1 1 +LOAD 1 0 1 1 +LIVING 5 0 5 5 +LIVID 1 0 1 1 +LIVERY 1 0 1 1 +LIVERIES 2 0 2 2 +LIVELIEST 1 0 1 1 +LIVED 8 0 8 8 +LITERATURE 1 0 1 1 +LITERARY 4 0 4 4 +LITERAL 2 0 2 2 +LISTLESSLY 1 0 1 1 +LISTENING 3 0 3 3 +LISTENED 5 0 5 5 +LISTEN 3 0 3 3 +LIQUID 2 0 2 2 +LIPS 4 0 4 4 +LINKS 2 0 2 2 +LINGERED 1 0 1 1 +LINES 7 0 7 7 +LINEN 2 0 2 2 +LINED 2 0 2 2 +LINDENS 1 0 1 1 +LINCOLN 2 0 2 2 +LIMITATION 1 0 1 1 +LIMIT 1 0 1 1 +LIMESTONE 1 0 1 1 +LIMBS 2 0 2 2 +LIKEWISE 1 0 1 1 +LIKES 2 0 2 2 +LIKENESS 1 0 1 1 +LIKELY 2 0 2 2 +LIKED 4 0 4 4 +LIGHTS 5 0 5 5 +LIGHTNING 1 0 1 1 +LIGHTLY 3 0 3 3 +LIGHTING 6 0 6 6 +LIGHTED 5 0 5 5 +LIFTING 2 0 2 2 +LIFTED 1 0 1 1 +LIFT 2 0 2 2 +LIFE'S 1 0 1 1 +LIFE 47 0 47 47 +LIEUTENANT 6 0 6 6 +LIEDENBROCK 1 0 1 1 +LICHEN 1 0 1 1 +LIBRARY 3 0 3 3 +LIBERTY 3 0 3 3 +LIBERAL 1 0 1 1 +LIABLE 2 0 2 2 +LEXINGTON 1 0 1 1 +LEVIED 2 0 2 2 +LEVELS 1 0 1 1 +LEVEL 3 0 3 3 +LETTING 1 0 1 1 +LETTERS 4 0 4 4 +LETTER 12 0 12 12 +LET 27 0 27 27 +LEST 2 0 2 2 +LESSONS 1 0 1 1 +LESSON 1 0 1 1 +LENGTHY 1 0 1 1 +LENGTHS 1 0 1 1 +LENGTH 4 0 4 4 +LEND 2 0 2 2 +LEISURELY 1 0 1 1 +LEISURE 11 0 11 11 +LEGS 3 0 3 3 +LEGISLATURE 4 0 4 4 +LEGISLATORS 1 0 1 1 +LEGISLATIVE 1 0 1 1 +LEGATE 1 0 1 1 +LEGALITY 2 0 2 2 +LEGAL 1 0 1 1 +LEG 1 0 1 1 +LEECH 2 0 2 2 +LED 7 0 7 7 +LECTURES 2 0 2 2 +LECTURE 3 0 3 3 +LECOMPTON 1 0 1 1 +LEATHER 1 0 1 1 +LEASH 1 0 1 1 +LEARNING 1 0 1 1 +LEARNED 4 0 4 4 +LEARN 4 0 4 4 +LEAPS 2 0 2 2 +LEAP 2 0 2 2 +LEANING 3 0 3 3 +LEANED 5 0 5 5 +LEAN 1 0 1 1 +LEAGUES 1 0 1 1 +LEAGUE 1 0 1 1 +LEADING 3 0 3 3 +LEADERS 1 0 1 1 +LEAD 8 0 8 8 +LAZILY 1 0 1 1 +LAYS 1 0 1 1 +LAYMAN 1 0 1 1 +LAYING 1 0 1 1 +LAY 16 0 16 16 +LAWYER 1 0 1 1 +LAWS 9 0 9 9 +LAWRENCE 2 0 2 2 +LAWFUL 1 0 1 1 +LAW 13 0 13 13 +LAVISHING 1 0 1 1 +LAUGHTER 2 0 2 2 +LAUGHING 6 0 6 6 +LAUGH 4 0 4 4 +LATTICE 1 0 1 1 +LATTER 9 0 9 9 +LATIN 3 0 3 3 +LATEST 1 0 1 1 +LATER 14 0 14 14 +LATENT 1 0 1 1 +LATE 6 0 6 6 +LATCHED 1 0 1 1 +LAST 41 0 41 41 +LASHED 1 0 1 1 +LARGEST 2 0 2 2 +LARGER 3 0 3 3 +LARGE 16 0 16 16 +LAREN 1 0 1 1 +LARDER 1 0 1 1 +LAPSE 1 0 1 1 +LAP 3 0 3 3 +LANGUISHINGLY 1 0 1 1 +LANGUID 1 0 1 1 +LANGUAGE 11 0 11 11 +LANE 1 0 1 1 +LANDSCAPE 1 0 1 1 +LANDS 2 0 2 2 +LAMPS 3 0 3 3 +LAMP 4 0 4 4 +LAMENTATION 1 0 1 1 +LAMBS 1 0 1 1 +LAMB 1 0 1 1 +LAKES 1 0 1 1 +LAKE'S 1 0 1 1 +LAID 8 0 8 8 +LAGOON 4 0 4 4 +LADY 9 0 9 9 +LADLED 1 0 1 1 +LADIES 11 0 11 11 +LADDER 3 0 3 3 +LAD 3 0 3 3 +LACKEY 1 0 1 1 +LACK 1 0 1 1 +KNOWN 15 0 15 15 +KNOWLEDGE 15 0 15 15 +KNOWING 5 0 5 5 +KNOT 1 0 1 1 +KNOCKING 1 0 1 1 +KNOCK 1 0 1 1 +KNITTED 1 0 1 1 +KNIGHT 1 0 1 1 +KNEES 3 0 3 3 +KNEELS 1 0 1 1 +KNEELING 1 0 1 1 +KNAVE 1 0 1 1 +KITTEN 1 0 1 1 +KITES 1 0 1 1 +KITE 1 0 1 1 +KITCHEN 4 0 4 4 +KIT 1 0 1 1 +KISSING 1 0 1 1 +KISSES 1 0 1 1 +KISSED 2 0 2 2 +KISS 2 0 2 2 +KINGS 1 0 1 1 +KINGLY 1 0 1 1 +KINGDOM 4 0 4 4 +KINDS 1 0 1 1 +KINDNESS 1 0 1 1 +KINDLY 3 0 3 3 +KINDLED 3 0 3 3 +KINDER 1 0 1 1 +KIND 14 0 14 14 +KILLS 1 0 1 1 +KILLED 1 0 1 1 +KIDNAP 2 0 2 2 +KID 1 0 1 1 +KICKAPOO 1 0 1 1 +KEY 5 0 5 5 +KETTLES 2 0 2 2 +KETTLE 1 0 1 1 +KERCHIEFS 1 0 1 1 +KEPT 5 0 5 5 +KENNINGTON 2 0 2 2 +KENNETH 9 0 9 9 +KEEPS 3 0 3 3 +KEEPING 4 0 4 4 +KEEP 10 0 10 10 +KEENNESS 2 0 2 2 +KEENER 1 0 1 1 +KATE 1 0 1 1 +KANE 1 0 1 1 +JUSTLY 2 0 2 2 +JUSTIFICATION 2 0 2 2 +JUSTICE 3 0 3 3 +JURISDICTION 1 0 1 1 +JUMPING 1 0 1 1 +JUMPED 1 0 1 1 +JUMP 3 0 3 3 +JUDGMENT 6 0 6 6 +JUDGES 1 0 1 1 +JUDGE 5 0 5 5 +JUDAH 1 0 1 1 +JOYOUS 1 0 1 1 +JOYCE 2 0 2 2 +JOY 4 0 4 4 +JOURNEYING 1 0 1 1 +JOURNEY 5 0 5 5 +JONES 3 0 3 3 +JOLLY 5 0 5 5 +JOKED 1 0 1 1 +JOKE 2 0 2 2 +JOINED 1 0 1 1 +JOIN 2 0 2 2 +JOHNSON 1 0 1 1 +JOHN 16 0 16 16 +JIB 1 0 1 1 +JEWISH 1 0 1 1 +JEWELS 3 0 3 3 +JET 1 0 1 1 +JESUS 7 0 7 7 +JERSEY 1 0 1 1 +JERK 1 0 1 1 +JENKS 1 0 1 1 +JELLIES 1 0 1 1 +JEHOVAH 1 0 1 1 +JEERED 1 0 1 1 +JEALOUS 1 0 1 1 +JAWS 2 0 2 2 +JASPER'S 2 0 2 2 +JAP 1 0 1 1 +JANUARY 2 0 2 2 +JAMES 2 0 2 2 +JAILER 5 0 5 5 +JACOB'S 2 0 2 2 +JACOB 1 0 1 1 +JACKSON 1 0 1 1 +JACKET 1 0 1 1 +J 2 0 2 2 +IVORY 1 0 1 1 +ITSELF 21 0 21 21 +ITCH 1 0 1 1 +ITALIAN 2 0 2 2 +ISSUED 2 0 2 2 +ISSUE 1 0 1 1 +ISRAEL 1 0 1 1 +ISOLATED 1 0 1 1 +ISN'T 5 0 5 5 +ISLAND 5 0 5 5 +IRWINE 1 0 1 1 +IRRITABLE 1 0 1 1 +IRRESOLUTION 1 0 1 1 +IRREPARABLE 1 0 1 1 +IRREGULARITY 2 0 2 2 +IRONING 1 0 1 1 +IRON'S 1 0 1 1 +IRON 2 0 2 2 +IRISH 2 0 2 2 +IRIDESCENT 1 0 1 1 +IRENE 1 0 1 1 +IRELAND 1 0 1 1 +INWARDLY 1 0 1 1 +INWARD 1 0 1 1 +INVOLVING 1 0 1 1 +INVOLVES 1 0 1 1 +INVOLVED 1 0 1 1 +INVOLVE 1 0 1 1 +INVITED 4 0 4 4 +INVITATION 3 0 3 3 +INVISIBLE 1 0 1 1 +INVIOLATE 1 0 1 1 +INVIDIOUS 1 0 1 1 +INVESTIGATION 1 0 1 1 +INVENTOR 1 0 1 1 +INVENTION 1 0 1 1 +INVENTED 1 0 1 1 +INVASION 1 0 1 1 +INVARIABLY 4 0 4 4 +INVARIABLE 1 0 1 1 +INVALID 1 0 1 1 +INVADER 1 0 1 1 +INVADE 1 0 1 1 +INTRODUCTION 4 0 4 4 +INTRODUCING 1 0 1 1 +INTRODUCED 3 0 3 3 +INTRODUCE 3 0 3 3 +INTRINSIC 1 0 1 1 +INTRICATE 1 0 1 1 +INTOLERANT 1 0 1 1 +INTOLERANCY 1 0 1 1 +INTOLERABLE 1 0 1 1 +INTIMATELY 2 0 2 2 +INTIMATE 2 0 2 2 +INTERVIEWS 1 0 1 1 +INTERVIEW 3 0 3 3 +INTERSECTED 1 0 1 1 +INTERRUPTED 2 0 2 2 +INTERPRETED 1 0 1 1 +INTERPRETATION 1 0 1 1 +INTERPOSED 1 0 1 1 +INTERNAL 1 0 1 1 +INTERMINGLED 1 0 1 1 +INTERMEDIATE 1 0 1 1 +INTERLACED 1 0 1 1 +INTERFERE 2 0 2 2 +INTERESTING 3 0 3 3 +INTERESTED 4 0 4 4 +INTEREST 10 0 10 10 +INTENTLY 2 0 2 2 +INTENTIONS 1 0 1 1 +INTENTION 1 0 1 1 +INTENT 1 0 1 1 +INTENSITY 3 0 3 3 +INTENSIFICATION 1 0 1 1 +INTENSELY 1 0 1 1 +INTENDED 1 0 1 1 +INTEND 1 0 1 1 +INTELLIGENT 5 0 5 5 +INTELLIGENCE 7 0 7 7 +INTELLECTS 1 0 1 1 +INTELLECT 1 0 1 1 +INTEGRITY 1 0 1 1 +INTANGIBLE 1 0 1 1 +INSURRECTIONISTS 1 0 1 1 +INSULT 1 0 1 1 +INSTRUMENT 1 0 1 1 +INSTRUCTIONS 4 0 4 4 +INSTITUTION 1 0 1 1 +INSTITUTED 1 0 1 1 +INSTITUTE 1 0 1 1 +INSTINCT 1 0 1 1 +INSTEAD 11 0 11 11 +INSTANTLY 6 0 6 6 +INSTANTANEOUS 1 0 1 1 +INSTANT 3 0 3 3 +INSTANCING 1 0 1 1 +INSTANCE 3 0 3 3 +INSTALLED 5 0 5 5 +INSTALL 1 0 1 1 +INSPIRED 1 0 1 1 +INSPIRATION 1 0 1 1 +INSOLENTLY 1 0 1 1 +INSISTS 1 0 1 1 +INSISTENCE 2 0 2 2 +INSISTED 1 0 1 1 +INSIST 1 0 1 1 +INSIPID 1 0 1 1 +INSINUATED 1 0 1 1 +INSIGNIFICANT 2 0 2 2 +INSIGHT 1 0 1 1 +INSIDE 2 0 2 2 +INSERTING 1 0 1 1 +INSENSIBLE 1 0 1 1 +INSECT 1 0 1 1 +INSATIABLE 2 0 2 2 +INNUMERABLE 2 0 2 2 +INNOCENTLY 1 0 1 1 +INNOCENT 2 0 2 2 +INNINGS 1 0 1 1 +INNER 2 0 2 2 +INMATES 1 0 1 1 +INJUSTICE 4 0 4 4 +INJURY 2 0 2 2 +INIQUITY 1 0 1 1 +INHUMAN 1 0 1 1 +INHERITANCE 2 0 2 2 +INHABITANTS 3 0 3 3 +INGREDIENTS 1 0 1 1 +INGENUITY 2 0 2 2 +INGENIOUS 2 0 2 2 +INFUSE 1 0 1 1 +INFORMING 1 0 1 1 +INFORMED 3 0 3 3 +INFORMATION 3 0 3 3 +INFLUENTIAL 2 0 2 2 +INFLUENCES 2 0 2 2 +INFLUENCE 8 0 8 8 +INFLICT 2 0 2 2 +INFLEXIBLE 1 0 1 1 +INFIRMITY 1 0 1 1 +INFIRMITIES 1 0 1 1 +INFIRMARY 1 0 1 1 +INFINITE 4 0 4 4 +INFERIOR 2 0 2 2 +INFECTED 1 0 1 1 +INFANTRY 2 0 2 2 +INFANTILE 2 0 2 2 +INFANT 1 0 1 1 +INFANCY 1 0 1 1 +INFALLIBLE 1 0 1 1 +INEXPRESSIBLY 1 0 1 1 +INEXPLICABLE 2 0 2 2 +INEXPERIENCE 1 0 1 1 +INEXHAUSTIBLE 1 0 1 1 +INESTIMABLE 1 0 1 1 +INDUSTRY 1 0 1 1 +INDUSTRIOUS 1 0 1 1 +INDULGENCE 1 0 1 1 +INDULGED 1 0 1 1 +INDUCED 1 0 1 1 +INDUCE 1 0 1 1 +INDUBITABLE 1 0 1 1 +INDIVIDUALS 1 0 1 1 +INDIVIDUAL 5 0 5 5 +INDISTINGUISHABLE 1 0 1 1 +INDISTINCT 1 0 1 1 +INDISPENSABLE 1 0 1 1 +INDISCREET 1 0 1 1 +INDIRECT 1 0 1 1 +INDIGENCE 1 0 1 1 +INDIFFERENT 3 0 3 3 +INDIFFERENCE 4 0 4 4 +INDIES 1 0 1 1 +INDICATOR 1 0 1 1 +INDICATING 1 0 1 1 +INDICATES 2 0 2 2 +INDICATED 3 0 3 3 +INDICATE 2 0 2 2 +INDIANS 4 0 4 4 +INDIAN 4 0 4 4 +INDIA 1 0 1 1 +INDEPENDENTS 1 0 1 1 +INDEPENDENT 4 0 4 4 +INDEPENDENCE 1 0 1 1 +INCURRING 1 0 1 1 +INCURRED 1 0 1 1 +INCUR 1 0 1 1 +INCREASED 2 0 2 2 +INCREASE 2 0 2 2 +INCONVENIENT 1 0 1 1 +INCONCEIVABLE 1 0 1 1 +INCOMPREHENSIBLE 1 0 1 1 +INCOMPATIBLE 1 0 1 1 +INCOHERENT 1 0 1 1 +INCLUDING 1 0 1 1 +INCLUDED 2 0 2 2 +INCLINES 1 0 1 1 +INCLINED 2 0 2 2 +INCLINATIONS 2 0 2 2 +INCITED 1 0 1 1 +INCIDENTS 1 0 1 1 +INCIDENTAL 1 0 1 1 +INCIDENT 6 0 6 6 +INCHES 1 0 1 1 +INCH 2 0 2 2 +INCESSANTLY 1 0 1 1 +INCEPTION 1 0 1 1 +INCAPABLE 2 0 2 2 +INASMUCH 1 0 1 1 +INADEQUATE 2 0 2 2 +INADEQUACY 1 0 1 1 +INACCURACY 1 0 1 1 +INACCESSIBLE 1 0 1 1 +IMPULSIVELY 1 0 1 1 +IMPULSE 3 0 3 3 +IMPROVING 1 0 1 1 +IMPROVED 3 0 3 3 +IMPRISONMENT 1 0 1 1 +IMPRISONED 3 0 3 3 +IMPRESSIONS 6 0 6 6 +IMPRESSION 2 0 2 2 +IMPOSSIBLE 11 0 11 11 +IMPOSSIBILITY 1 0 1 1 +IMPOSE 1 0 1 1 +IMPORTANT 7 0 7 7 +IMPORTANCE 5 0 5 5 +IMPORT 1 0 1 1 +IMPLY 1 0 1 1 +IMPLORE 1 0 1 1 +IMPLIES 3 0 3 3 +IMPLICIT 1 0 1 1 +IMPLICATION 1 0 1 1 +IMPIETY 1 0 1 1 +IMPETUS 1 0 1 1 +IMPETUOUS 3 0 3 3 +IMPERIOUSLY 1 0 1 1 +IMPERIALIST 1 0 1 1 +IMPERIALISM 1 0 1 1 +IMPERIAL 1 0 1 1 +IMPERFECTLY 1 0 1 1 +IMPERATIVE 1 0 1 1 +IMPENETRABLE 2 0 2 2 +IMPELLED 2 0 2 2 +IMPEDIMENT 1 0 1 1 +IMPATIENT 1 0 1 1 +IMPATIENCE 3 0 3 3 +IMPASSIVELY 1 0 1 1 +IMMUNITY 1 0 1 1 +IMMORTALITY 1 0 1 1 +IMMORTAL 1 0 1 1 +IMMENSELY 1 0 1 1 +IMMENSE 1 0 1 1 +IMMEDIATELY 4 0 4 4 +IMMEDIATE 2 0 2 2 +IMMEDIACY 1 0 1 1 +IMBIBING 1 0 1 1 +IMAGINING 1 0 1 1 +IMAGINED 2 0 2 2 +IMAGINE 2 0 2 2 +IMAGINATIVE 1 0 1 1 +IMAGINATION 3 0 3 3 +IMAGINARY 1 0 1 1 +IMAGINABLE 2 0 2 2 +IMAGES 8 0 8 8 +IMAGE 9 0 9 9 +ILLUSTRIOUS 2 0 2 2 +ILLUSTRATION 1 0 1 1 +ILLUSION 2 0 2 2 +ILLUMINATION 1 0 1 1 +ILLUMINATING 1 0 1 1 +ILLUMINATED 1 0 1 1 +ILLS 1 0 1 1 +ILLNESS 1 0 1 1 +ILL 6 0 6 6 +IGNORANCE 2 0 2 2 +IGNOMINY 1 0 1 1 +IGNOBLE 1 0 1 1 +IDOLATRY 1 0 1 1 +IDLY 1 0 1 1 +IDLENESS 1 0 1 1 +IDLE 6 0 6 6 +IDIOSYNCRATICALLY 1 0 1 1 +IDENTITY 1 0 1 1 +IDEAS 11 0 11 11 +IDEAL 3 0 3 3 +IDEA 7 0 7 7 +HYPOTHESIS 1 0 1 1 +HYPOCRITE 1 0 1 1 +HYPOCRISY 1 0 1 1 +HYMN 1 0 1 1 +HYDRAS 1 0 1 1 +HUT 4 0 4 4 +HUSSY 1 0 1 1 +HUSHED 1 0 1 1 +HUSBAND'S 1 0 1 1 +HUSBAND 9 0 9 9 +HURT 1 0 1 1 +HURRYING 2 0 2 2 +HURRY 4 0 4 4 +HURRIEDLY 3 0 3 3 +HURONS 1 0 1 1 +HURLED 2 0 2 2 +HUNTING 2 0 2 2 +HUNTER 1 0 1 1 +HUNTED 1 0 1 1 +HUNGRY 1 0 1 1 +HUNGER 2 0 2 2 +HUNG 10 0 10 10 +HUNDREDTH 1 0 1 1 +HUNDREDS 2 0 2 2 +HUNDRED 18 0 18 18 +HUMPY 2 0 2 2 +HUMOROUS 3 0 3 3 +HUMMING 1 0 1 1 +HUMILITY 1 0 1 1 +HUMILIATE 1 0 1 1 +HUMBUG 1 0 1 1 +HUMBLY 2 0 2 2 +HUMBLE 5 0 5 5 +HUMANITY 2 0 2 2 +HUMANE 1 0 1 1 +HUGGED 1 0 1 1 +HUGE 7 0 7 7 +HUES 1 0 1 1 +HUE 1 0 1 1 +HUDSON 1 0 1 1 +HUDDLED 1 0 1 1 +HOWL 1 0 1 1 +HOWEVER 29 0 29 29 +HOVER 1 0 1 1 +HOUSEWORK 1 0 1 1 +HOUSEMAID 2 0 2 2 +HOUSEKEEPER 2 0 2 2 +HOUSEHOLDS 1 0 1 1 +HOUSEHOLD'S 1 0 1 1 +HOUSEHOLD 4 0 4 4 +HOURS 13 0 13 13 +HOUNDED 1 0 1 1 +HOTELS 1 0 1 1 +HOTEL 7 0 7 7 +HOSTILITY 1 0 1 1 +HOSTESS 3 0 3 3 +HOST 3 0 3 3 +HOSPITALITY 4 0 4 4 +HOSPITABLY 1 0 1 1 +HOSPITABLE 1 0 1 1 +HOSE 2 0 2 2 +HORSES 6 0 6 6 +HORSEMEN 1 0 1 1 +HORROR 2 0 2 2 +HORRID 1 0 1 1 +HORRIBLY 2 0 2 2 +HORRIBLE 3 0 3 3 +HORNS 2 0 2 2 +HORNFUL 1 0 1 1 +HORIZON 3 0 3 3 +HORATIO 2 0 2 2 +HOPKINSON 2 0 2 2 +HOPING 1 0 1 1 +HOPELESS 1 0 1 1 +HOPED 2 0 2 2 +HOOKING 1 0 1 1 +HOOKED 1 0 1 1 +HONOURABLY 1 0 1 1 +HONORS 1 0 1 1 +HONEY 1 0 1 1 +HONESTY 1 0 1 1 +HONESTLY 2 0 2 2 +HONEST 5 0 5 5 +HOMELY 3 0 3 3 +HOLLYHOCKS 1 0 1 1 +HOLLOW 3 0 3 3 +HOLINESS 2 0 2 2 +HOLIDAYS 3 0 3 3 +HOLES 1 0 1 1 +HOLDS 2 0 2 2 +HOLDING 1 0 1 1 +HOLBORN 1 0 1 1 +HOBSON'S 1 0 1 1 +HOBS 1 0 1 1 +HITHERTO 1 0 1 1 +HISTORY 5 0 5 5 +HISTORIC 1 0 1 1 +HISTORIANS 1 0 1 1 +HISTORIAN 1 0 1 1 +HISSING 1 0 1 1 +HISS 1 0 1 1 +HIRE 1 0 1 1 +HINT 2 0 2 2 +HINDERED 1 0 1 1 +HIND 1 0 1 1 +HIMSELF 49 0 49 49 +HILLY 1 0 1 1 +HILL 4 0 4 4 +HIGHNESS 1 0 1 1 +HIGHLY 2 0 2 2 +HIERARCHY 1 0 1 1 +HIDING 1 0 1 1 +HIDEOUS 1 0 1 1 +HIDE 3 0 3 3 +HIDDEN 3 0 3 3 +HEWN 1 0 1 1 +HESTER 11 0 11 11 +HESITATION 1 0 1 1 +HESITATING 2 0 2 2 +HERSELF 20 0 20 20 +HERS 2 0 2 2 +HERON 1 0 1 1 +HEROINE 1 0 1 1 +HEROIC 2 0 2 2 +HERO 3 0 3 3 +HERMOCRATES 1 0 1 1 +HERETICS 2 0 2 2 +HEREDITY 1 0 1 1 +HEREAFTER 3 0 3 3 +HERE'S 1 0 1 1 +HERALDED 1 0 1 1 +HENRY'S 1 0 1 1 +HENRY 2 0 2 2 +HENLEY 1 0 1 1 +HENCE 4 0 4 4 +HEMMED 1 0 1 1 +HELPLESS 3 0 3 3 +HELPING 1 0 1 1 +HELMSMAN 1 0 1 1 +HELLENES 1 0 1 1 +HELL 2 0 2 2 +HELD 15 0 15 15 +HEIGHTS 1 0 1 1 +HEIGHTENING 1 0 1 1 +HEIGHT 1 0 1 1 +HEELS 1 0 1 1 +HEDGES 1 0 1 1 +HEDGE 1 0 1 1 +HEAVY 13 0 13 13 +HEAVING 2 0 2 2 +HEAVILY 1 0 1 1 +HEAVENS 1 0 1 1 +HEAVENLY 1 0 1 1 +HEAVED 1 0 1 1 +HEAT 2 0 2 2 +HEARTY 1 0 1 1 +HEARTILY 2 0 2 2 +HEARTHSTONES 1 0 1 1 +HEARTH 3 0 3 3 +HEARTED 1 0 1 1 +HEARSE 2 0 2 2 +HEARS 2 0 2 2 +HEARING 1 0 1 1 +HEARD 19 0 19 19 +HEAP 2 0 2 2 +HEALTH 6 0 6 6 +HEADS 3 0 3 3 +HEADQUARTERS 1 0 1 1 +HEADLONGS 1 0 1 1 +HEADLONG 1 0 1 1 +HEADING 1 0 1 1 +HE'LL 1 0 1 1 +HAY 1 0 1 1 +HAWORTH 1 0 1 1 +HAWKS 1 0 1 1 +HAWKEYE 5 0 5 5 +HAWK'S 1 0 1 1 +HAWK 7 0 7 7 +HAVEN'T 6 0 6 6 +HAUNTED 1 0 1 1 +HATS 1 0 1 1 +HATRED 3 0 3 3 +HATH 4 0 4 4 +HATER 1 0 1 1 +HATEFUL 1 0 1 1 +HATED 1 0 1 1 +HATE 1 0 1 1 +HAT 1 0 1 1 +HASTY 2 0 2 2 +HASTILY 2 0 2 2 +HASTENED 4 0 4 4 +HASTE 1 0 1 1 +HARVEST 1 0 1 1 +HARRYING 1 0 1 1 +HARROW 1 0 1 1 +HARPOONER 1 0 1 1 +HARNESS 1 0 1 1 +HARMONY 2 0 2 2 +HARMON 4 0 4 4 +HARM 2 0 2 2 +HARE 1 0 1 1 +HARDSHIPS 1 0 1 1 +HARDLY 14 0 14 14 +HARDER 1 0 1 1 +HARD 12 0 12 12 +HARASSING 1 0 1 1 +HARANGUING 1 0 1 1 +HAR 1 0 1 1 +HAPPY 16 0 16 16 +HAPPINESS 6 0 6 6 +HAPPILY 1 0 1 1 +HAPPENS 3 0 3 3 +HAPPENING 2 0 2 2 +HAPLESS 1 0 1 1 +HANSOM 1 0 1 1 +HANS 2 0 2 2 +HANGS 1 0 1 1 +HANGERS 1 0 1 1 +HANG 1 0 1 1 +HANDSOMEST 1 0 1 1 +HANDSOMELY 1 0 1 1 +HANDLE 1 0 1 1 +HANDKERCHIEFS 1 0 1 1 +HANDFUL 1 0 1 1 +HANDED 3 0 3 3 +HAND 29 0 29 29 +HAMMER 3 0 3 3 +HAMLET'S 1 0 1 1 +HAMLET 6 0 6 6 +HAMBURG 1 0 1 1 +HALT 1 0 1 1 +HALLWAY 1 0 1 1 +HALLS 3 0 3 3 +HAIRS 1 0 1 1 +HAILING 1 0 1 1 +HADN'T 3 0 3 3 +HABITUALLY 1 0 1 1 +HABITUAL 2 0 2 2 +HABITS 4 0 4 4 +HABITATION 1 0 1 1 +HABIT 7 0 7 7 +GUTTER 1 0 1 1 +GUSTS 2 0 2 2 +GUST 1 0 1 1 +GUNS 1 0 1 1 +GUN 1 0 1 1 +GULLET 1 0 1 1 +GULF 2 0 2 2 +GUILTY 2 0 2 2 +GUILT 2 0 2 2 +GUIDED 2 0 2 2 +GUIDE 2 0 2 2 +GUERRILLA 1 0 1 1 +GUARDS 3 0 3 3 +GUARD 1 0 1 1 +GRUDGE 1 0 1 1 +GROWTH 5 0 5 5 +GROWN 7 0 7 7 +GROWLED 2 0 2 2 +GROWING 4 0 4 4 +GROW 4 0 4 4 +GROUPS 2 0 2 2 +GROUP 2 0 2 2 +GROSS 1 0 1 1 +GROPING 1 0 1 1 +GROPE 1 0 1 1 +GROANS 2 0 2 2 +GROANING 1 0 1 1 +GROANED 2 0 2 2 +GROAN 1 0 1 1 +GRINNING 1 0 1 1 +GRINDER 1 0 1 1 +GRIN 1 0 1 1 +GRIM 3 0 3 3 +GRIFFIN 1 0 1 1 +GRIEVED 1 0 1 1 +GRIEF 2 0 2 2 +GREW 5 0 5 5 +GREETINGS 1 0 1 1 +GREETING 4 0 4 4 +GREETED 1 0 1 1 +GREET 1 0 1 1 +GREEK 4 0 4 4 +GREATNESS 2 0 2 2 +GREATLY 10 0 10 10 +GREATEST 7 0 7 7 +GREATER 9 0 9 9 +GRAVEYARD 3 0 3 3 +GRAVES 1 0 1 1 +GRATITUDE 2 0 2 2 +GRATIFICATION 3 0 3 3 +GRATEFUL 2 0 2 2 +GRASS 11 0 11 11 +GRASPING 3 0 3 3 +GRANTED 3 0 3 3 +GRANDSON 1 0 1 1 +GRANDMOTHER 2 0 2 2 +GRANDFATHER 4 0 4 4 +GRANDER 2 0 2 2 +GRAMOPHONES 1 0 1 1 +GRAINS 1 0 1 1 +GRAF 1 0 1 1 +GRADATED 1 0 1 1 +GRACIOUSLY 1 0 1 1 +GRACIOUS 2 0 2 2 +GRACES 2 0 2 2 +GRACEFULLY 1 0 1 1 +GRACEFUL 1 0 1 1 +GRACE 12 0 12 12 +GOWN 1 0 1 1 +GOVERNOR'S 1 0 1 1 +GOVERNESS 2 0 2 2 +GOTHIC 3 0 3 3 +GOSPEL 2 0 2 2 +GORDONS 1 0 1 1 +GORDON 1 0 1 1 +GOODS 5 0 5 5 +GOODNESS 1 0 1 1 +GOODLY 1 0 1 1 +GONE 14 0 14 14 +GOLIATH 2 0 2 2 +GOLF 1 0 1 1 +GOLDEN 15 0 15 15 +GOLD 15 0 15 15 +GOES 2 0 2 2 +GODLY 1 0 1 1 +GODLESS 1 0 1 1 +GODDESS 1 0 1 1 +GOD'S 1 0 1 1 +GOD 33 0 33 33 +GOBY 1 0 1 1 +GNARLED 1 0 1 1 +GLUE 1 0 1 1 +GLOWING 3 0 3 3 +GLOW 3 0 3 3 +GLOVES 5 0 5 5 +GLOVED 1 0 1 1 +GLOSSY 2 0 2 2 +GLORY 1 0 1 1 +GLORIOUS 2 0 2 2 +GLORIES 1 0 1 1 +GLOOMY 1 0 1 1 +GLOOMILY 2 0 2 2 +GLOBE 1 0 1 1 +GLITTERING 4 0 4 4 +GLITTERED 2 0 2 2 +GLINDA 1 0 1 1 +GLIMMERING 1 0 1 1 +GLIDING 1 0 1 1 +GLIDES 1 0 1 1 +GLIDED 2 0 2 2 +GLEANER 1 0 1 1 +GLEAMS 1 0 1 1 +GLEAMING 4 0 4 4 +GLEAMED 1 0 1 1 +GLEAM 1 0 1 1 +GLASS 6 0 6 6 +GLARE 2 0 2 2 +GLANCED 2 0 2 2 +GLANCE 5 0 5 5 +GLAMOUR 1 0 1 1 +GLADNESS 1 0 1 1 +GLADLY 1 0 1 1 +GLAD 12 0 12 12 +GIVING 7 0 7 7 +GIVES 3 0 3 3 +GIVEN 15 0 15 15 +GIRLS 7 0 7 7 +GIRL'S 2 0 2 2 +GIRL 25 0 25 25 +GILDED 3 0 3 3 +GILD 1 0 1 1 +GIGANTIC 1 0 1 1 +GIFTS 2 0 2 2 +GIFT 6 0 6 6 +GHOSTS 2 0 2 2 +GHOSTLY 1 0 1 1 +GHOST 2 0 2 2 +GHISIZZLE 4 0 4 4 +GHASTLY 1 0 1 1 +GETTING 6 0 6 6 +GET 30 0 30 30 +GESTURES 1 0 1 1 +GESTURE 2 0 2 2 +GERMANTOWN 1 0 1 1 +GERM 1 0 1 1 +GEORGE 3 0 3 3 +GEOMETRICAL 1 0 1 1 +GENUINE 1 0 1 1 +GENTLY 5 0 5 5 +GENTLEWOMAN 1 0 1 1 +GENTLENESS 1 0 1 1 +GENTLEMAN'S 1 0 1 1 +GENTLE 7 0 7 7 +GENTILITY 1 0 1 1 +GENIUS 2 0 2 2 +GENEROUSLY 1 0 1 1 +GENEROUS 4 0 4 4 +GENERATION 2 0 2 2 +GENERATED 1 0 1 1 +GENERALS 3 0 3 3 +GENERALLY 8 0 8 8 +GENEALOGIES 1 0 1 1 +GAZING 2 0 2 2 +GAZED 2 0 2 2 +GAZE 3 0 3 3 +GATHERINGS 1 0 1 1 +GATHERING 2 0 2 2 +GATHERED 2 0 2 2 +GATES 6 0 6 6 +GATE 3 0 3 3 +GASPED 1 0 1 1 +GASEOUS 1 0 1 1 +GAS 1 0 1 1 +GARDENS 3 0 3 3 +GARDENING 1 0 1 1 +GARDENER'S 1 0 1 1 +GARDENER 1 0 1 1 +GARDEN 6 0 6 6 +GARB 1 0 1 1 +GAMEWELL'S 1 0 1 1 +GALVANOMETER 1 0 1 1 +GALLOWSNESS 1 0 1 1 +GALLEY 1 0 1 1 +GALLANT 4 0 4 4 +GAIN 2 0 2 2 +GABLES 1 0 1 1 +FUZZY 1 0 1 1 +FUTURE 5 0 5 5 +FURTHERED 1 0 1 1 +FURTHER 9 0 9 9 +FURNITURE 4 0 4 4 +FURNISHING 1 0 1 1 +FURNISHED 1 0 1 1 +FURNISH 1 0 1 1 +FURLED 1 0 1 1 +FURIOUSLY 2 0 2 2 +FUNERAL 1 0 1 1 +FUNDAMENTAL 1 0 1 1 +FUNCTIONS 1 0 1 1 +FUNCTION 1 0 1 1 +FUMBLED 1 0 1 1 +FULLY 3 0 3 3 +FULFILLED 2 0 2 2 +FUGITIVES 1 0 1 1 +FUGITIVE'S 1 0 1 1 +FRUSTRATED 1 0 1 1 +FRUITS 1 0 1 1 +FRUIT 1 0 1 1 +FROZEN 2 0 2 2 +FROWNINGLY 1 0 1 1 +FROWNING 2 0 2 2 +FROWNED 2 0 2 2 +FROWN 1 0 1 1 +FROTHY 1 0 1 1 +FROST 1 0 1 1 +FRONTIER 3 0 3 3 +FRONT 6 0 6 6 +FROLIC 2 0 2 2 +FRO 1 0 1 1 +FRIVOLOUS 2 0 2 2 +FRINGED 2 0 2 2 +FRIGHTFUL 1 0 1 1 +FRIGATE 2 0 2 2 +FRIENDSHIP 1 0 1 1 +FRIENDS 8 0 8 8 +FRIENDLY 5 0 5 5 +FRIEND'S 2 0 2 2 +FRIDAY 1 0 1 1 +FRICTION 1 0 1 1 +FRETTING 1 0 1 1 +FRESHENS 1 0 1 1 +FRESH 6 0 6 6 +FREQUENTLY 3 0 3 3 +FREQUENTER 1 0 1 1 +FREQUENT 3 0 3 3 +FRENZY 1 0 1 1 +FRENCH 11 0 11 11 +FREELY 2 0 2 2 +FREEDOM 4 0 4 4 +FREED 1 0 1 1 +FRECKLES 1 0 1 1 +FRANKNESS 1 0 1 1 +FRANKLY 1 0 1 1 +FRANK 2 0 2 2 +FRANCIS 3 0 3 3 +FRANCE 6 0 6 6 +FRAMEWORK 1 0 1 1 +FRAMED 1 0 1 1 +FRAME 4 0 4 4 +FRAIL 1 0 1 1 +FRAGRANCE 1 0 1 1 +FRAGMENT 2 0 2 2 +FRACTURED 1 0 1 1 +FRACTURE 1 0 1 1 +FOX 1 0 1 1 +FOURTH 2 0 2 2 +FOURTEEN 1 0 1 1 +FOUNTAINS 1 0 1 1 +FOUNDING 1 0 1 1 +FOUNDATION 1 0 1 1 +FOUL 1 0 1 1 +FOUGHT 1 0 1 1 +FORWARD 5 0 5 5 +FORTY 2 0 2 2 +FORTUNES 2 0 2 2 +FORTUNE 8 0 8 8 +FORTUNATELY 1 0 1 1 +FORTUNATE 2 0 2 2 +FORTUITOUS 1 0 1 1 +FORTNIGHT 1 0 1 1 +FORTIFIED 2 0 2 2 +FORTHWITH 3 0 3 3 +FORTH 9 0 9 9 +FORT 2 0 2 2 +FORSAKE 1 0 1 1 +FORMS 6 0 6 6 +FORMING 1 0 1 1 +FORMIDABLE 1 0 1 1 +FORMER 7 0 7 7 +FORMED 7 0 7 7 +FORMALITY 1 0 1 1 +FORMALITIES 1 0 1 1 +FORMAL 1 0 1 1 +FORKED 1 0 1 1 +FORGOTTEN 4 0 4 4 +FORGOT 1 0 1 1 +FORGIVE 6 0 6 6 +FORGING 1 0 1 1 +FORGETTING 1 0 1 1 +FORGETFULNESS 1 0 1 1 +FORGET 6 0 6 6 +FORGE 1 0 1 1 +FORETOLD 2 0 2 2 +FOREST 6 0 6 6 +FORESEEING 1 0 1 1 +FORESAW 1 0 1 1 +FOREIGNER 1 0 1 1 +FOREIGN 1 0 1 1 +FOREHEAD 1 0 1 1 +FOREFINGER 1 0 1 1 +FORCIBLE 1 0 1 1 +FORCES 2 0 2 2 +FORCED 1 0 1 1 +FORBES 1 0 1 1 +FOOTNOTE 2 0 2 2 +FOOTMEN 1 0 1 1 +FOOTMAN 1 0 1 1 +FOOT 9 0 9 9 +FOOLS 1 0 1 1 +FOOLISHLY 2 0 2 2 +FOOLISH 3 0 3 3 +FOOD 1 0 1 1 +FONDNESS 1 0 1 1 +FOND 5 0 5 5 +FOLLOWS 3 0 3 3 +FOLLOWING 4 0 4 4 +FOLLOWER 1 0 1 1 +FOLLOW 7 0 7 7 +FOLLIES 1 0 1 1 +FOLIAGE 1 0 1 1 +FOLDED 1 0 1 1 +FOLD 2 0 2 2 +FOES 2 0 2 2 +FOCUS 1 0 1 1 +FOAM 4 0 4 4 +FLY 4 0 4 4 +FLUX 2 0 2 2 +FLUSHED 3 0 3 3 +FLUKES 1 0 1 1 +FLOWERS 11 0 11 11 +FLOWED 1 0 1 1 +FLOW 2 0 2 2 +FLOURISH 1 0 1 1 +FLOORS 1 0 1 1 +FLOOR 10 0 10 10 +FLOOD 2 0 2 2 +FLOATED 1 0 1 1 +FLOAT 1 0 1 1 +FLINT 1 0 1 1 +FLING 1 0 1 1 +FLIES 1 0 1 1 +FLICKER 2 0 2 2 +FLEW 1 0 1 1 +FLESH 5 0 5 5 +FLEETING 1 0 1 1 +FLEECY 1 0 1 1 +FLEECES 1 0 1 1 +FLEECE 3 0 3 3 +FLED 3 0 3 3 +FLAX 1 0 1 1 +FLATTERY 1 0 1 1 +FLATTERS 2 0 2 2 +FLATTERING 1 0 1 1 +FLATTERED 1 0 1 1 +FLAT 1 0 1 1 +FLASHED 3 0 3 3 +FLASH 3 0 3 3 +FLAPS 1 0 1 1 +FLAP 2 0 2 2 +FLANKED 1 0 1 1 +FLAMES 1 0 1 1 +FLAMED 2 0 2 2 +FLAME 3 0 3 3 +FLAGSTONES 1 0 1 1 +FLAG 1 0 1 1 +FIXES 1 0 1 1 +FIXED 3 0 3 3 +FIX 2 0 2 2 +FITZOOTH 7 0 7 7 +FITTED 2 0 2 2 +FITS 1 0 1 1 +FITLY 1 0 1 1 +FISTS 2 0 2 2 +FISHES 3 0 3 3 +FISHERMAN 1 0 1 1 +FISH 1 0 1 1 +FIRMNESS 1 0 1 1 +FIRMLY 2 0 2 2 +FIRM 1 0 1 1 +FIRESIDES 1 0 1 1 +FIRESIDE 1 0 1 1 +FIRES 1 0 1 1 +FIREPLACE 2 0 2 2 +FIRED 1 0 1 1 +FIR 9 0 9 9 +FINISHED 4 0 4 4 +FINISH 1 0 1 1 +FINGERS 6 0 6 6 +FINGER 2 0 2 2 +FINEST 1 0 1 1 +FINER 1 0 1 1 +FINELY 1 0 1 1 +FINED 1 0 1 1 +FINDS 2 0 2 2 +FINDING 3 0 3 3 +FINANCIAL 1 0 1 1 +FINALLY 8 0 8 8 +FINALE 1 0 1 1 +FINAL 5 0 5 5 +FILE 1 0 1 1 +FIGURES 4 0 4 4 +FIGURED 1 0 1 1 +FIGURE 6 0 6 6 +FIGHTING 4 0 4 4 +FIGHTER 1 0 1 1 +FIFTY 6 0 6 6 +FIFTH 1 0 1 1 +FIFTEENTH 2 0 2 2 +FIFTEEN 1 0 1 1 +FIERCELY 1 0 1 1 +FIERCE 4 0 4 4 +FIELDS 4 0 4 4 +FIELD 6 0 6 6 +FEWER 1 0 1 1 +FEVER 2 0 2 2 +FEUDS 1 0 1 1 +FESTIVE 1 0 1 1 +FERVENT 1 0 1 1 +FERDINANDO 5 0 5 5 +FEMININE 1 0 1 1 +FEMALE 1 0 1 1 +FELLOWSHIP 1 0 1 1 +FELLOWS 3 0 3 3 +FELLOW'S 1 0 1 1 +FELLOW 9 0 9 9 +FELLER 1 0 1 1 +FELICITY 2 0 2 2 +FEET 11 0 11 11 +FEES 1 0 1 1 +FEELINGS 3 0 3 3 +FEEDING 1 0 1 1 +FEEDER 1 0 1 1 +FEED 2 0 2 2 +FEEBLE 2 0 2 2 +FEDERAL 3 0 3 3 +FED 1 0 1 1 +FEBRUARY 1 0 1 1 +FEATURES 9 0 9 9 +FEATURE 1 0 1 1 +FEATHERS 1 0 1 1 +FEASTED 1 0 1 1 +FEAST 3 0 3 3 +FEASIBLE 1 0 1 1 +FEARS 3 0 3 3 +FEARLESS 1 0 1 1 +FEARING 2 0 2 2 +FEARFUL 1 0 1 1 +FAVORABLY 1 0 1 1 +FAULTS 1 0 1 1 +FAULTLESS 1 0 1 1 +FAULT 2 0 2 2 +FATIGUE 2 0 2 2 +FATHOMS 6 0 6 6 +FATHOM 1 0 1 1 +FATHERS 2 0 2 2 +FATHER'S 4 0 4 4 +FATHER 28 0 28 28 +FATALITY 2 0 2 2 +FASTEST 1 0 1 1 +FASTEN 1 0 1 1 +FAST 7 0 7 7 +FASHIONED 1 0 1 1 +FASHIONABLE 1 0 1 1 +FASCINATION 2 0 2 2 +FARTHER 6 0 6 6 +FARRAGUT 1 0 1 1 +FARMS 1 0 1 1 +FARMERS 1 0 1 1 +FARMER'S 1 0 1 1 +FARMER 5 0 5 5 +FAREWELL 2 0 2 2 +FANTASY 1 0 1 1 +FANNING 1 0 1 1 +FANCY 3 0 3 3 +FANCIES 2 0 2 2 +FANCIED 2 0 2 2 +FANATIC 1 0 1 1 +FAN 2 0 2 2 +FAMOUSLY 2 0 2 2 +FAMILY 16 0 16 16 +FAMILIES 3 0 3 3 +FAMILIARITY 3 0 3 3 +FAMILIAR 4 0 4 4 +FALSE 6 0 6 6 +FALLS 5 0 5 5 +FALLING 2 0 2 2 +FALCONS 1 0 1 1 +FALCON 1 0 1 1 +FAITHFUL 1 0 1 1 +FAITH 9 0 9 9 +FAIRLY 4 0 4 4 +FAINTNESS 1 0 1 1 +FAINTLY 3 0 3 3 +FAINTING 4 0 4 4 +FAINT 3 0 3 3 +FAIN 1 0 1 1 +FAILURE 2 0 2 2 +FAILS 1 0 1 1 +FAILING 1 0 1 1 +FAILED 2 0 2 2 +FAIL 4 0 4 4 +FADES 1 0 1 1 +FADED 1 0 1 1 +FADE 4 0 4 4 +FACULTIES 1 0 1 1 +FACTS 4 0 4 4 +FACTORS 1 0 1 1 +FACTOR 1 0 1 1 +FACTIONS 2 0 2 2 +FACTION 1 0 1 1 +FACT 23 0 23 23 +FACILITATED 1 0 1 1 +FACILITATE 1 0 1 1 +FACES 4 0 4 4 +FACED 3 0 3 3 +FACE 29 0 29 29 +FABULOUS 1 0 1 1 +EYES 44 0 44 44 +EYELIDS 1 0 1 1 +EYED 1 0 1 1 +EXULTING 1 0 1 1 +EXULTATION 1 0 1 1 +EXTREMELY 2 0 2 2 +EXTREME 1 0 1 1 +EXTRAORDINARY 2 0 2 2 +EXTRACT 1 0 1 1 +EXTRA 1 0 1 1 +EXTINGUISHED 2 0 2 2 +EXTINCTION 1 0 1 1 +EXTINCT 1 0 1 1 +EXTERIOR 1 0 1 1 +EXTENT 6 0 6 6 +EXTENSIVE 1 0 1 1 +EXTENDED 3 0 3 3 +EXTEND 2 0 2 2 +EXQUISITE 3 0 3 3 +EXPULSION 1 0 1 1 +EXPRESSLY 1 0 1 1 +EXPRESSIVE 1 0 1 1 +EXPRESSIONS 1 0 1 1 +EXPRESSION 4 0 4 4 +EXPRESSING 2 0 2 2 +EXPRESSED 4 0 4 4 +EXPRESS 4 0 4 4 +EXPOSURE 1 0 1 1 +EXPONENT 2 0 2 2 +EXPLOSION 1 0 1 1 +EXPLORE 2 0 2 2 +EXPLOITS 1 0 1 1 +EXPLANATION 1 0 1 1 +EXPLAINED 1 0 1 1 +EXPLAIN 4 0 4 4 +EXPERIMENTALLY 1 0 1 1 +EXPERIENCING 1 0 1 1 +EXPERIENCED 1 0 1 1 +EXPERIENCE 7 0 7 7 +EXPENSIVE 1 0 1 1 +EXPENDITURE 4 0 4 4 +EXPEDITION 4 0 4 4 +EXPECTED 3 0 3 3 +EXPECTATIONS 2 0 2 2 +EXPECT 4 0 4 4 +EXPANDED 1 0 1 1 +EXOTICS 1 0 1 1 +EXISTING 3 0 3 3 +EXISTENT 1 0 1 1 +EXISTENCE 5 0 5 5 +EXISTED 4 0 4 4 +EXILE 1 0 1 1 +EXHORT 1 0 1 1 +EXHIBITS 1 0 1 1 +EXHIBITION 2 0 2 2 +EXHIBITED 1 0 1 1 +EXHIBIT 2 0 2 2 +EXHAUSTED 1 0 1 1 +EXHALE 1 0 1 1 +EXERTIONS 1 0 1 1 +EXERTED 1 0 1 1 +EXERCISING 1 0 1 1 +EXERCISED 1 0 1 1 +EXERCISE 1 0 1 1 +EXEMPLIFIES 1 0 1 1 +EXEMPLARY 1 0 1 1 +EXECUTIVE 1 0 1 1 +EXECUTED 1 0 1 1 +EXCLUDED 2 0 2 2 +EXCLAIMED 3 0 3 3 +EXCITING 2 0 2 2 +EXCITEMENT 3 0 3 3 +EXCITE 1 0 1 1 +EXCESSIVELY 1 0 1 1 +EXCESS 1 0 1 1 +EXCEPTIONS 1 0 1 1 +EXCEPTION 2 0 2 2 +EXCEPT 6 0 6 6 +EXCELLENT 5 0 5 5 +EXCELLENCY'S 1 0 1 1 +EXCELLENCY 2 0 2 2 +EXCELLENCE 1 0 1 1 +EXCEEDING 1 0 1 1 +EXCEEDED 1 0 1 1 +EXCEED 1 0 1 1 +EXAMPLE 2 0 2 2 +EXAMINED 4 0 4 4 +EXAMINE 4 0 4 4 +EXAMINATION 8 0 8 8 +EXALTED 1 0 1 1 +EXALT 1 0 1 1 +EXAGGERATED 1 0 1 1 +EXACTLY 8 0 8 8 +EXACT 5 0 5 5 +EX 2 0 2 2 +EVOLVING 1 0 1 1 +EVOLVED 1 0 1 1 +EVOKED 1 0 1 1 +EVOKE 1 0 1 1 +EVIL 6 0 6 6 +EVIDENTLY 4 0 4 4 +EVIDENT 5 0 5 5 +EVIDENCE 5 0 5 5 +EVERYWHERE 7 0 7 7 +EVERYTHING 16 0 16 16 +EVERYBODY 7 0 7 7 +EVERLASTING 2 0 2 2 +EVENTS 8 0 8 8 +EVENT 4 0 4 4 +EVENLY 2 0 2 2 +EVENING 15 0 15 15 +EVASIVELY 1 0 1 1 +EVAPORATION 1 0 1 1 +EVADED 1 0 1 1 +EVA'S 1 0 1 1 +EUROPE 3 0 3 3 +EUCHARIST 1 0 1 1 +ETHICAL 1 0 1 1 +ETERNITY 2 0 2 2 +ETERNAL 2 0 2 2 +ETCHINGS 1 0 1 1 +ET 3 0 3 3 +ESTRANGEMENT 1 0 1 1 +ESTIMATE 1 0 1 1 +ESTABLISHED 3 0 3 3 +ESSEX 1 0 1 1 +ESSENTIALLY 1 0 1 1 +ESSENTIAL 2 0 2 2 +ESSENCE 1 0 1 1 +ESQUIRE 2 0 2 2 +ESPECIALLY 6 0 6 6 +ESCORT 4 0 4 4 +ESCAPADES 1 0 1 1 +ERRORS 1 0 1 1 +ERRONEOUS 2 0 2 2 +ERRING 1 0 1 1 +ERRAND 2 0 2 2 +ERR 1 0 1 1 +ERIE 1 0 1 1 +ERECTED 1 0 1 1 +ERECT 1 0 1 1 +ERA 1 0 1 1 +EQUIP 1 0 1 1 +EQUATION 1 0 1 1 +EQUALS 1 0 1 1 +EQUALLY 4 0 4 4 +EQUAL 2 0 2 2 +EPOCH 1 0 1 1 +EPISTLE 3 0 3 3 +EPISODE 1 0 1 1 +ENVY 1 0 1 1 +ENVIRONMENT 1 0 1 1 +ENVELOPMENT 1 0 1 1 +ENTRUSTING 1 0 1 1 +ENTRUSTED 1 0 1 1 +ENTRENCHED 1 0 1 1 +ENTREATIES 1 0 1 1 +ENTRANCE 4 0 4 4 +ENTIRELY 6 0 6 6 +ENTIRE 2 0 2 2 +ENTHUSIASTS 1 0 1 1 +ENTHUSIASTIC 1 0 1 1 +ENTHUSIASM 1 0 1 1 +ENTERTAINS 1 0 1 1 +ENTERTAINMENT 3 0 3 3 +ENTERTAIN 2 0 2 2 +ENTERS 1 0 1 1 +ENTERPRISE 2 0 2 2 +ENTERING 2 0 2 2 +ENTERED 21 0 21 21 +ENTANGLED 1 0 1 1 +ENOUGH 20 0 20 20 +ENORMOUSLY 2 0 2 2 +ENORMOUS 1 0 1 1 +ENNIS 1 0 1 1 +ENLISTED 1 0 1 1 +ENLIST 1 0 1 1 +ENJOYMENT 1 0 1 1 +ENJOYED 1 0 1 1 +ENJOY 2 0 2 2 +ENIGMA 1 0 1 1 +ENGORGED 1 0 1 1 +ENGLISHMAN 3 0 3 3 +ENGLISH 12 0 12 12 +ENGLAND 10 0 10 10 +ENGINE 6 0 6 6 +ENGENDERS 1 0 1 1 +ENGAGEMENTS 1 0 1 1 +ENGAGED 5 0 5 5 +ENGAGE 1 0 1 1 +ENFORCED 1 0 1 1 +ENFORCE 3 0 3 3 +ENERGY 3 0 3 3 +ENEMY'S 1 0 1 1 +ENEMY 3 0 3 3 +ENEMIES 3 0 3 3 +ENDURES 1 0 1 1 +ENDURED 1 0 1 1 +ENDURE 1 0 1 1 +ENDS 1 0 1 1 +ENDOWED 1 0 1 1 +ENDLESS 1 0 1 1 +ENDEAVOURED 1 0 1 1 +ENDEAVOUR 1 0 1 1 +ENDEAVORING 1 0 1 1 +ENDEAVOR 1 0 1 1 +END 18 0 18 18 +ENCYCLOPAEDIA 1 0 1 1 +ENCOURAGED 1 0 1 1 +ENCOURAGE 2 0 2 2 +ENCLOSE 1 0 1 1 +ENACTED 1 0 1 1 +ENABLES 2 0 2 2 +ENABLED 3 0 3 3 +EMULATION 1 0 1 1 +EMPTY 7 0 7 7 +EMPLOYMENTS 2 0 2 2 +EMPLOYMENT 1 0 1 1 +EMPLOYING 1 0 1 1 +EMPLOYERS 1 0 1 1 +EMPLOYER 1 0 1 1 +EMPLOYED 2 0 2 2 +EMPIRE 2 0 2 2 +EMPEROR 1 0 1 1 +EMOTIONS 2 0 2 2 +EMOTIONLESS 1 0 1 1 +EMOTION 1 0 1 1 +EMINENCES 1 0 1 1 +EMIGRATION 1 0 1 1 +EMERGENCY 1 0 1 1 +EMERGENCE 2 0 2 2 +EMERALD 1 0 1 1 +EMBROIDERED 2 0 2 2 +EMBRACING 2 0 2 2 +EMBRACE 2 0 2 2 +EMBODIED 1 0 1 1 +EMBLEM 1 0 1 1 +EMBITTERING 1 0 1 1 +EMBITTERED 1 0 1 1 +EMBERS 1 0 1 1 +EMBARRASS 1 0 1 1 +ELSEWHERE 4 0 4 4 +ELSE 7 0 7 7 +ELOQUENT 1 0 1 1 +ELMHURST 2 0 2 2 +ELIZABETH 1 0 1 1 +ELIZA 3 0 3 3 +ELICITED 1 0 1 1 +ELF 1 0 1 1 +ELEVEN 4 0 4 4 +ELEMENTS 7 0 7 7 +ELEMENTARY 3 0 3 3 +ELEMENT 4 0 4 4 +ELEGANT 1 0 1 1 +ELEGANCE 2 0 2 2 +ELECTROPLATING 1 0 1 1 +ELECTROLYTIC 2 0 2 2 +ELECTRICITY 5 0 5 5 +ELECTRICAL 2 0 2 2 +ELECTRIC 4 0 4 4 +ELECTIONEERING 1 0 1 1 +ELECTION 3 0 3 3 +ELECTING 1 0 1 1 +ELECTED 1 0 1 1 +ELECT 2 0 2 2 +ELDERS 1 0 1 1 +ELDERLY 1 0 1 1 +ELDER 2 0 2 2 +ELAPSED 1 0 1 1 +EJACULATIONS 1 0 1 1 +EJACULATED 2 0 2 2 +EITHER'S 1 0 1 1 +EITHER 8 0 8 8 +EIGHTY 3 0 3 3 +EIGHTH 3 0 3 3 +EIGHTEENTH 2 0 2 2 +EIGHTEEN 14 0 14 14 +EIGHT 7 0 7 7 +EGYPTIAN 1 0 1 1 +EGYPT 1 0 1 1 +EFFORTS 4 0 4 4 +EFFORT 11 0 11 11 +EFFICIENCY 1 0 1 1 +EFFECTUAL 2 0 2 2 +EFFECTS 3 0 3 3 +EFFECTIVENESS 1 0 1 1 +EFFECTIVELY 1 0 1 1 +EFFECTIVE 2 0 2 2 +EDWARD 3 0 3 3 +EDUCATION 5 0 5 5 +EDUCATED 2 0 2 2 +EDITORS 1 0 1 1 +EDITOR 1 0 1 1 +EDISONIA 1 0 1 1 +EDISON 16 0 16 16 +EDIFICE 1 0 1 1 +EDICTS 1 0 1 1 +EDICT 1 0 1 1 +EDGES 1 0 1 1 +EDGED 1 0 1 1 +EDGE 5 0 5 5 +EDDYING 1 0 1 1 +ECONOMY 2 0 2 2 +ECONOMIC 3 0 3 3 +ECHOING 1 0 1 1 +ECHOES 1 0 1 1 +ECCLESIASTICAL 1 0 1 1 +EATEN 1 0 1 1 +EAT 5 0 5 5 +EASY 14 0 14 14 +EASTWARDS 1 0 1 1 +EASTERLY'S 1 0 1 1 +EAST 4 0 4 4 +EASILY 10 0 10 10 +EARTHLY 1 0 1 1 +EARTH'S 3 0 3 3 +EARTH 17 0 17 17 +EARNESTLY 1 0 1 1 +EARNED 1 0 1 1 +EARLY 13 0 13 13 +EARLIER 6 0 6 6 +EAGLES 1 0 1 1 +EAGERNESS 2 0 2 2 +EAGERLY 1 0 1 1 +EAGER 4 0 4 4 +EACH 24 0 24 24 +DYNAMO 2 0 2 2 +DYE 1 0 1 1 +DWELLINGS 3 0 3 3 +DWELL 1 0 1 1 +DUTY 7 0 7 7 +DUTIES 8 0 8 8 +DUSK 2 0 2 2 +DURATION 1 0 1 1 +DURABLE 1 0 1 1 +DUPLICATE 1 0 1 1 +DUNNO 1 0 1 1 +DUMBFOUNDED 1 0 1 1 +DUMB 1 0 1 1 +DUMAS 1 0 1 1 +DULY 1 0 1 1 +DULL 2 0 2 2 +DUG 1 0 1 1 +DUE 5 0 5 5 +DUDLEY 1 0 1 1 +DUDGEON 1 0 1 1 +DUCKS 1 0 1 1 +DUCKLINGS 1 0 1 1 +DUCHESS 3 0 3 3 +DUBIOUSLY 1 0 1 1 +DRY 4 0 4 4 +DRUMS 1 0 1 1 +DROWNED 2 0 2 2 +DROPS 3 0 3 3 +DROPPING 1 0 1 1 +DROPPED 5 0 5 5 +DROP 3 0 3 3 +DROOPED 1 0 1 1 +DRIVING 1 0 1 1 +DRIVES 1 0 1 1 +DRIVEN 1 0 1 1 +DRINKS 1 0 1 1 +DRINK 4 0 4 4 +DRIFTS 1 0 1 1 +DRIED 1 0 1 1 +DREW 10 0 10 10 +DRESSES 1 0 1 1 +DRESSED 6 0 6 6 +DRESS 1 0 1 1 +DREDGED 1 0 1 1 +DREARY 1 0 1 1 +DREAMY 1 0 1 1 +DREAMT 1 0 1 1 +DREAMS 2 0 2 2 +DREAMING 2 0 2 2 +DREAMED 1 0 1 1 +DREAM 5 0 5 5 +DREADING 1 0 1 1 +DREADFUL 3 0 3 3 +DRAWS 2 0 2 2 +DRAWN 7 0 7 7 +DRAWING 2 0 2 2 +DRAW 4 0 4 4 +DRAUGHT 1 0 1 1 +DRAPERIES 1 0 1 1 +DRANK 1 0 1 1 +DRAMATIST'S 1 0 1 1 +DRAMATIST 1 0 1 1 +DRAMATIC 2 0 2 2 +DRAMA 1 0 1 1 +DRAINED 1 0 1 1 +DRAIN 1 0 1 1 +DRAGOONS 1 0 1 1 +DRAGON'S 1 0 1 1 +DRAGGING 1 0 1 1 +DRAGGED 2 0 2 2 +DRAG 1 0 1 1 +DOZEN 4 0 4 4 +DOWNWARD 2 0 2 2 +DOVES 1 0 1 1 +DOVE 1 0 1 1 +DOUGLAS 4 0 4 4 +DOUGHY 1 0 1 1 +DOUGHNUTS 1 0 1 1 +DOUGH 1 0 1 1 +DOUBTS 2 0 2 2 +DOUBTLESS 2 0 2 2 +DOUBTINGLY 1 0 1 1 +DOUBTING 1 0 1 1 +DOUBTFULLY 1 0 1 1 +DOUBTFUL 2 0 2 2 +DOUBT 11 0 11 11 +DOTH 5 0 5 5 +DOST 3 0 3 3 +DOROTHY 1 0 1 1 +DORKING 1 0 1 1 +DORCAS 6 0 6 6 +DOORS 3 0 3 3 +DOOM 1 0 1 1 +DONKEY 1 0 1 1 +DONE 24 0 24 24 +DOMINIONS 1 0 1 1 +DOMINION 1 0 1 1 +DOMESTIC 2 0 2 2 +DOME 1 0 1 1 +DOLLS 2 0 2 2 +DOLLARS 7 0 7 7 +DOING 12 0 12 12 +DOGS 1 0 1 1 +DOGGED 1 0 1 1 +DOG 2 0 2 2 +DOESN'T 3 0 3 3 +DOCTRINES 3 0 3 3 +DOCTRINE 4 0 4 4 +DOCTORS 1 0 1 1 +DOCTOR 7 0 7 7 +DIZZILY 1 0 1 1 +DIVORCE 1 0 1 1 +DIVISIONS 1 0 1 1 +DIVISION 2 0 2 2 +DIVINITY 1 0 1 1 +DIVING 4 0 4 4 +DIVINE 3 0 3 3 +DIVIDING 1 0 1 1 +DIVIDEND 1 0 1 1 +DIVIDED 4 0 4 4 +DIVIDE 2 0 2 2 +DIVERTING 1 0 1 1 +DIVERT 1 0 1 1 +DIVERSITY 1 0 1 1 +DISUSE 1 0 1 1 +DISUNITED 1 0 1 1 +DISTURBING 1 0 1 1 +DISTURBANCE 1 0 1 1 +DISTURB 2 0 2 2 +DISTRUSTING 2 0 2 2 +DISTRUSTFUL 1 0 1 1 +DISTRUST 1 0 1 1 +DISTRESSED 1 0 1 1 +DISTRESS 1 0 1 1 +DISTORTION 1 0 1 1 +DISTORTED 1 0 1 1 +DISTINGUISH 3 0 3 3 +DISTINCTLY 3 0 3 3 +DISTINCTIVE 1 0 1 1 +DISTINCTION 5 0 5 5 +DISTINCT 2 0 2 2 +DISTANT 4 0 4 4 +DISTANCE 6 0 6 6 +DISSENTERS 1 0 1 1 +DISSENTED 1 0 1 1 +DISPUTE 3 0 3 3 +DISPOSITIONS 1 0 1 1 +DISPOSITION 3 0 3 3 +DISPOSES 1 0 1 1 +DISPOSED 3 0 3 3 +DISPOSE 1 0 1 1 +DISPOSAL 1 0 1 1 +DISPLEASURE 1 0 1 1 +DISPLAYING 1 0 1 1 +DISPLAYED 1 0 1 1 +DISPLAY 1 0 1 1 +DISPERSED 3 0 3 3 +DISPENSATION 1 0 1 1 +DISPATCH 1 0 1 1 +DISPASSIONATE 1 0 1 1 +DISPARAGE 1 0 1 1 +DISOWN 1 0 1 1 +DISMISS 2 0 2 2 +DISMAYED 1 0 1 1 +DISMAY 1 0 1 1 +DISLOYAL 1 0 1 1 +DISLIKE 1 0 1 1 +DISK 1 0 1 1 +DISINCORPORATED 1 0 1 1 +DISHES 2 0 2 2 +DISH 3 0 3 3 +DISGUSTED 1 0 1 1 +DISGUST 3 0 3 3 +DISGUISE 2 0 2 2 +DISGRACE 3 0 3 3 +DISENGAGED 1 0 1 1 +DISEASED 1 0 1 1 +DISDAINFUL 1 0 1 1 +DISCUSSION 1 0 1 1 +DISCUSSED 1 0 1 1 +DISCUSS 2 0 2 2 +DISCREETLY 1 0 1 1 +DISCREET 1 0 1 1 +DISCOVERY 2 0 2 2 +DISCOVERERS 1 0 1 1 +DISCOVERED 3 0 3 3 +DISCOVER 3 0 3 3 +DISCOURSE 2 0 2 2 +DISCOURAGED 1 0 1 1 +DISCOURAGE 2 0 2 2 +DISCOMFORT 1 0 1 1 +DISCLOSES 1 0 1 1 +DISCIPLINE 5 0 5 5 +DISCERN 1 0 1 1 +DISBURDENED 1 0 1 1 +DISASTER 1 0 1 1 +DISAPPOINTMENT 6 0 6 6 +DISAPPEARS 1 0 1 1 +DISAPPEARED 1 0 1 1 +DISAPPEAR 2 0 2 2 +DISADVANTAGE 1 0 1 1 +DISABILITIES 1 0 1 1 +DIRTY 1 0 1 1 +DIRECTLY 4 0 4 4 +DIRECTIONS 2 0 2 2 +DIRECTION 6 0 6 6 +DIRECTING 1 0 1 1 +DIRECTED 2 0 2 2 +DIRECT 8 0 8 8 +DIP 1 0 1 1 +DIOCLETIAN 1 0 1 1 +DINNER 8 0 8 8 +DINING 1 0 1 1 +DINE 1 0 1 1 +DINAH'S 1 0 1 1 +DINAH 2 0 2 2 +DIMLY 1 0 1 1 +DIMINUTION 2 0 2 2 +DIMINISH 1 0 1 1 +DIMENSIONS 1 0 1 1 +DIM 2 0 2 2 +DILIGENTLY 1 0 1 1 +DILEMMA 1 0 1 1 +DILATED 1 0 1 1 +DIGNITY 4 0 4 4 +DIGNITARIES 1 0 1 1 +DIGNIFIED 4 0 4 4 +DIGBY 1 0 1 1 +DIG 1 0 1 1 +DIFFUSED 1 0 1 1 +DIFFICULTIES 3 0 3 3 +DIFFICULT 11 0 11 11 +DIFFERS 2 0 2 2 +DIFFERENTLY 1 0 1 1 +DIFFERENTIATION 1 0 1 1 +DIFFERENT 15 0 15 15 +DIFFERENCES 2 0 2 2 +DIFFERENCE 5 0 5 5 +DIFFER 1 0 1 1 +DIES 1 0 1 1 +DIED 5 0 5 5 +DIE 4 0 4 4 +DIDN'T 12 0 12 12 +DICE 1 0 1 1 +DIAMONDS 1 0 1 1 +DIALOGUE 3 0 3 3 +DIALECT 1 0 1 1 +DIAGRAMS 1 0 1 1 +DEWS 2 0 2 2 +DEW 2 0 2 2 +DEVOUR 3 0 3 3 +DEVOTION 1 0 1 1 +DEVOTES 1 0 1 1 +DEVOTED 2 0 2 2 +DEVOTE 1 0 1 1 +DEVOLVE 1 0 1 1 +DEVOID 1 0 1 1 +DEVISING 1 0 1 1 +DEVIL 1 0 1 1 +DEVICES 2 0 2 2 +DEVELOPMENTS 1 0 1 1 +DEVELOPMENT 6 0 6 6 +DEVELOPED 2 0 2 2 +DEVELOP 1 0 1 1 +DETOUR 1 0 1 1 +DETESTS 1 0 1 1 +DETESTED 1 0 1 1 +DETESTABLE 1 0 1 1 +DETERMINING 1 0 1 1 +DETERMINATION 1 0 1 1 +DETECT 1 0 1 1 +DETAINED 2 0 2 2 +DETAILS 2 0 2 2 +DETAIL 1 0 1 1 +DETACHMENT 1 0 1 1 +DESTRUCTIVE 1 0 1 1 +DESTRUCTION 2 0 2 2 +DESTROYED 1 0 1 1 +DESTINY 1 0 1 1 +DESTINED 2 0 2 2 +DESPITE 1 0 1 1 +DESPISE 1 0 1 1 +DESPERATELY 1 0 1 1 +DESPERATE 3 0 3 3 +DESPAIRING 1 0 1 1 +DESPAIR 4 0 4 4 +DESOLATION 1 0 1 1 +DESOLATE 1 0 1 1 +DESK 2 0 2 2 +DESIROUS 1 0 1 1 +DESIRES 1 0 1 1 +DESIRED 3 0 3 3 +DESIRE 4 0 4 4 +DESIRABLE 1 0 1 1 +DESIGNERS 1 0 1 1 +DESIGNATED 1 0 1 1 +DESIGN 3 0 3 3 +DESERVES 1 0 1 1 +DESERVED 1 0 1 1 +DESERVE 1 0 1 1 +DESERTS 1 0 1 1 +DESERTED 1 0 1 1 +DESERT 1 0 1 1 +DESCRIPTIONS 1 0 1 1 +DESCRIPTION 2 0 2 2 +DESCRIBING 1 0 1 1 +DESCRIBED 2 0 2 2 +DESCRIBE 3 0 3 3 +DESCENDS 1 0 1 1 +DESCENDING 2 0 2 2 +DESCENDED 2 0 2 2 +DESCENDANTS 1 0 1 1 +DESCEND 4 0 4 4 +DERIVED 1 0 1 1 +DERIVATIVE 1 0 1 1 +DEPUTY 1 0 1 1 +DEPTHS 2 0 2 2 +DEPTH 4 0 4 4 +DEPRIVED 1 0 1 1 +DEPRESSION 1 0 1 1 +DEPRESSED 1 0 1 1 +DEPRECIATING 1 0 1 1 +DEPRECATION 3 0 3 3 +DEPRAVED 1 0 1 1 +DEPOSITION 1 0 1 1 +DEPORTMENT 1 0 1 1 +DEPENDS 1 0 1 1 +DEPENDENT 2 0 2 2 +DEPENDENCE 1 0 1 1 +DEPEND 2 0 2 2 +DEPARTURE 4 0 4 4 +DEPARTMENT 1 0 1 1 +DEPARTING 2 0 2 2 +DEPARTED 1 0 1 1 +DEPART 1 0 1 1 +DENY 5 0 5 5 +DENUNCIATION 1 0 1 1 +DENSELY 1 0 1 1 +DENSE 2 0 2 2 +DENOTE 1 0 1 1 +DENIES 1 0 1 1 +DENIED 2 0 2 2 +DEMURELY 1 0 1 1 +DEMURE 1 0 1 1 +DEMONSTRATION 1 0 1 1 +DEMOCRATIC 2 0 2 2 +DEMEANOUR 1 0 1 1 +DEMEANOR 1 0 1 1 +DEMANDS 1 0 1 1 +DEMANDED 2 0 2 2 +DELUSIVE 1 0 1 1 +DELIVERY 2 0 2 2 +DELIVERING 1 0 1 1 +DELIVERED 2 0 2 2 +DELIGHTFUL 4 0 4 4 +DELIGHTED 5 0 5 5 +DELIGHT 4 0 4 4 +DELICIOUSNESS 1 0 1 1 +DELICIOUS 1 0 1 1 +DELICATE 4 0 4 4 +DELICACY 1 0 1 1 +DELIBERATIONS 1 0 1 1 +DELIBERATION 1 0 1 1 +DELEGATED 1 0 1 1 +DELAYED 2 0 2 2 +DELAY 3 0 3 3 +DELAWARES 1 0 1 1 +DELAWARE 1 0 1 1 +DEIGNED 1 0 1 1 +DEIGN 1 0 1 1 +DEGREES 3 0 3 3 +DEGREE 6 0 6 6 +DEFYING 1 0 1 1 +DEFTLY 1 0 1 1 +DEFORMITIES 1 0 1 1 +DEFINITION 3 0 3 3 +DEFINITE 2 0 2 2 +DEFINES 1 0 1 1 +DEFIED 1 0 1 1 +DEFIANCE 2 0 2 2 +DEFERENCE 2 0 2 2 +DEFENDS 1 0 1 1 +DEFENDERS 1 0 1 1 +DEFENDED 1 0 1 1 +DEFENDANT 1 0 1 1 +DEFENCE 1 0 1 1 +DEFECT 1 0 1 1 +DEER 3 0 3 3 +DEEPLY 4 0 4 4 +DEEPENING 1 0 1 1 +DEEP 11 0 11 11 +DEEDS 1 0 1 1 +DECREES 1 0 1 1 +DECREED 1 0 1 1 +DECREE 1 0 1 1 +DECORATIVE 1 0 1 1 +DECORATED 3 0 3 3 +DECOMPOSE 1 0 1 1 +DECLINING 2 0 2 2 +DECLINED 2 0 2 2 +DECLARES 1 0 1 1 +DECLARED 2 0 2 2 +DECLARE 2 0 2 2 +DECLARATION 1 0 1 1 +DECISION 3 0 3 3 +DECIDEDLY 1 0 1 1 +DECIDED 3 0 3 3 +DECIDE 4 0 4 4 +DECEPTIVE 1 0 1 1 +DECEMBER 1 0 1 1 +DECEIVING 1 0 1 1 +DECEIVED 1 0 1 1 +DECEIT 1 0 1 1 +DECANTERS 1 0 1 1 +DECADES 1 0 1 1 +DEBATE 1 0 1 1 +DEATH 19 0 19 19 +DEARS 1 0 1 1 +DEARLY 2 0 2 2 +DEAREST 2 0 2 2 +DEAR 22 0 22 22 +DEALER 1 0 1 1 +DEAL 10 0 10 10 +DEAF 1 0 1 1 +DAZZLING 2 0 2 2 +DAZED 1 0 1 1 +DAYS 16 0 16 16 +DAYLIGHT 2 0 2 2 +DAWN 2 0 2 2 +DAVID 8 0 8 8 +DAUNTLESS 1 0 1 1 +DAUGHTERS 1 0 1 1 +DAUGHTER 9 0 9 9 +DATING 1 0 1 1 +DATE 1 0 1 1 +DATA 2 0 2 2 +DARTED 3 0 3 3 +DARLING 1 0 1 1 +DARKNESS 3 0 3 3 +DARKENED 1 0 1 1 +DARING 2 0 2 2 +DARED 3 0 3 3 +DARE 3 0 3 3 +DANGERS 1 0 1 1 +DANGEROUS 4 0 4 4 +DANGER 9 0 9 9 +DANES 1 0 1 1 +DANCING 2 0 2 2 +DANCES 1 0 1 1 +DANCERS 1 0 1 1 +DANCER 1 0 1 1 +DANCED 2 0 2 2 +DANCE 4 0 4 4 +DAMSEL 1 0 1 1 +DAMNED 1 0 1 1 +DAMNABLE 1 0 1 1 +DAMASK 1 0 1 1 +DAMAGING 1 0 1 1 +DAMAGE 2 0 2 2 +DAINTY 1 0 1 1 +DAILY 3 0 3 3 +DAFT 1 0 1 1 +CYRIL 2 0 2 2 +CYPRESS 1 0 1 1 +CYNTHIA'S 1 0 1 1 +CYNTHIA 3 0 3 3 +CYMBALS 1 0 1 1 +CUTTINGS 1 0 1 1 +CUSTOMS 1 0 1 1 +CUSTOMER'S 1 0 1 1 +CUSTOM 2 0 2 2 +CUSTODY 2 0 2 2 +CUSHIONED 1 0 1 1 +CURVE 2 0 2 2 +CURTAINS 1 0 1 1 +CURTAIN 3 0 3 3 +CURSE 1 0 1 1 +CURRENT 8 0 8 8 +CURLY 1 0 1 1 +CURIOUSLY 1 0 1 1 +CURIOUS 4 0 4 4 +CURIOSITY 2 0 2 2 +CURBSTONE 1 0 1 1 +CUPS 1 0 1 1 +CUPBOARD 2 0 2 2 +CUP 3 0 3 3 +CUNNING 3 0 3 3 +CUMBERLAND'S 1 0 1 1 +CULTURE 4 0 4 4 +CULTIVATING 1 0 1 1 +CULTIVATE 1 0 1 1 +CULPRIT 1 0 1 1 +CULMINATING 2 0 2 2 +CUFFS 1 0 1 1 +CUB 1 0 1 1 +CRYSTALLIZE 1 0 1 1 +CRYING 1 0 1 1 +CRY 5 0 5 5 +CRUSHED 1 0 1 1 +CRUSH 3 0 3 3 +CRUMBLED 1 0 1 1 +CRUMBLE 1 0 1 1 +CRUISING 1 0 1 1 +CRUELTY 2 0 2 2 +CRUEL 1 0 1 1 +CRUCIFIX 2 0 2 2 +CRUCIFIED 1 0 1 1 +CROWNS 1 0 1 1 +CROWNING 2 0 2 2 +CROWN 6 0 6 6 +CROWDING 1 0 1 1 +CROWDED 2 0 2 2 +CROWD 5 0 5 5 +CROSSLY 1 0 1 1 +CROSSING 1 0 1 1 +CROSSED 3 0 3 3 +CROPS 1 0 1 1 +CROP 1 0 1 1 +CROOKED 3 0 3 3 +CRITICISM 1 0 1 1 +CRITICALLY 1 0 1 1 +CRISIS 1 0 1 1 +CRIMSON 1 0 1 1 +CRIMINAL 3 0 3 3 +CRIME 3 0 3 3 +CRIES 3 0 3 3 +CRIED 23 0 23 23 +CREPT 1 0 1 1 +CREEPING 2 0 2 2 +CREEP 1 0 1 1 +CREEK 2 0 2 2 +CREED 1 0 1 1 +CREDIT 2 0 2 2 +CREATURES 2 0 2 2 +CREATURE 8 0 8 8 +CREATOR 1 0 1 1 +CREATIVE 1 0 1 1 +CREATIONS 1 0 1 1 +CREATION 2 0 2 2 +CREATING 1 0 1 1 +CREATES 1 0 1 1 +CREATED 2 0 2 2 +CREATE 3 0 3 3 +CREAM 1 0 1 1 +CREAKED 1 0 1 1 +CRAZY 2 0 2 2 +CRAWLING 1 0 1 1 +CRAWL 1 0 1 1 +CRANED 1 0 1 1 +CRANE 1 0 1 1 +CRAMPNESS 1 0 1 1 +CRAMPED 1 0 1 1 +CRACKING 1 0 1 1 +CRACKED 2 0 2 2 +COWARDLY 1 0 1 1 +COWARD 1 0 1 1 +COVERT 1 0 1 1 +COVERING 1 0 1 1 +COVERED 2 0 2 2 +COVER 2 0 2 2 +COVENANTERS 5 0 5 5 +COUSINS 3 0 3 3 +COUSIN'S 2 0 2 2 +COUSIN 7 0 7 7 +COURTIERS 2 0 2 2 +COURTESY 2 0 2 2 +COURAGEOUS 1 0 1 1 +COURAGE 6 0 6 6 +COUPLE 1 0 1 1 +COUNTY 7 0 7 7 +COUNTRIES 1 0 1 1 +COUNTING 1 0 1 1 +COUNTERPART 1 0 1 1 +COUNTERFEITED 1 0 1 1 +COUNTERACT 1 0 1 1 +COUNTENANCE 3 0 3 3 +COUNT 15 0 15 15 +COUNSEL 1 0 1 1 +COUCH 1 0 1 1 +COTTAGE 2 0 2 2 +COSTUME 2 0 2 2 +CORRUPTION 1 0 1 1 +CORRIDOR 1 0 1 1 +CORRESPONDING 1 0 1 1 +CORRESPOND 1 0 1 1 +CORRECTLY 1 0 1 1 +CORRECTED 1 0 1 1 +CORRECT 3 0 3 3 +CORNERS 4 0 4 4 +CORNER 13 0 13 13 +CORDIALLY 1 0 1 1 +CORDIALITY 1 0 1 1 +CORAL 1 0 1 1 +COQUETRY 1 0 1 1 +COPY 2 0 2 2 +COPPER 1 0 1 1 +COPIED 2 0 2 2 +COOKERY 2 0 2 2 +COOKED 1 0 1 1 +CONVULSION 1 0 1 1 +CONVIVIALITY 1 0 1 1 +CONVINCING 2 0 2 2 +CONVINCED 2 0 2 2 +CONVICTIONS 2 0 2 2 +CONVICTION 2 0 2 2 +CONVEYED 1 0 1 1 +CONVEXITY 1 0 1 1 +CONVERTS 1 0 1 1 +CONVERSION 1 0 1 1 +CONVERSING 1 0 1 1 +CONVERSE 2 0 2 2 +CONVERSATIONS 1 0 1 1 +CONVERSATIONAL 1 0 1 1 +CONVERSATION 6 0 6 6 +CONVENTIONALITY 1 0 1 1 +CONVENTIONAL 1 0 1 1 +CONVENTION 1 0 1 1 +CONTROLLING 1 0 1 1 +CONTROL 4 0 4 4 +CONTRIVED 2 0 2 2 +CONTRIVANCE 2 0 2 2 +CONTRITION 1 0 1 1 +CONTRITE 1 0 1 1 +CONTRIBUTE 1 0 1 1 +CONTRASTING 1 0 1 1 +CONTRAST 4 0 4 4 +CONTRARY 5 0 5 5 +CONTRADICTIONS 1 0 1 1 +CONTRACTION 2 0 2 2 +CONTINUOUSLY 2 0 2 2 +CONTINUOUS 1 0 1 1 +CONTINUED 14 0 14 14 +CONTINUE 1 0 1 1 +CONTINUALLY 2 0 2 2 +CONTINUAL 3 0 3 3 +CONTINGENCY 1 0 1 1 +CONTINENT 1 0 1 1 +CONTI 1 0 1 1 +CONTESTED 1 0 1 1 +CONTEST 1 0 1 1 +CONTENTMENT 1 0 1 1 +CONTENTEDLY 1 0 1 1 +CONTENT 1 0 1 1 +CONTEMPTIBLE 1 0 1 1 +CONTEMPT 2 0 2 2 +CONTEMPORARY 1 0 1 1 +CONTEMPLATION 1 0 1 1 +CONTEMPLATED 1 0 1 1 +CONTAMINATION 1 0 1 1 +CONTAMINATED 1 0 1 1 +CONTAINS 1 0 1 1 +CONTAINERS 1 0 1 1 +CONTAGION 1 0 1 1 +CONTACT 1 0 1 1 +CONSUMPTION 13 0 13 13 +CONSUMER 5 0 5 5 +CONSUMED 1 0 1 1 +CONSUME 2 0 2 2 +CONSULTED 1 0 1 1 +CONSULTATION 1 0 1 1 +CONSULT 1 0 1 1 +CONSTRUCTION 4 0 4 4 +CONSTRUCTED 1 0 1 1 +CONSTRAINEDLY 1 0 1 1 +CONSTRAINED 1 0 1 1 +CONSTITUTION 3 0 3 3 +CONSTITUTES 1 0 1 1 +CONSTITUTED 1 0 1 1 +CONSTITUTE 1 0 1 1 +CONSTANTINE 1 0 1 1 +CONSTANT 3 0 3 3 +CONSTANCY 1 0 1 1 +CONSPIRACY 2 0 2 2 +CONSPICUOUS 8 0 8 8 +CONSOLE 1 0 1 1 +CONSOLATION 1 0 1 1 +CONSISTENTLY 1 0 1 1 +CONSIDERING 1 0 1 1 +CONSIDERED 5 0 5 5 +CONSIDERATIONS 1 0 1 1 +CONSIDERATE 1 0 1 1 +CONSIDERABLY 1 0 1 1 +CONSIDER 1 0 1 1 +CONSERVATION 1 0 1 1 +CONSEQUENTLY 1 0 1 1 +CONSEQUENT 2 0 2 2 +CONSEQUENCES 1 0 1 1 +CONSEQUENCE 5 0 5 5 +CONSENT 4 0 4 4 +CONSEIL 6 0 6 6 +CONSECRATED 2 0 2 2 +CONSCIOUSNESS 2 0 2 2 +CONSCIOUS 3 0 3 3 +CONSCIENCES 1 0 1 1 +CONSCIENCE 4 0 4 4 +CONQUERED 2 0 2 2 +CONQUER 1 0 1 1 +CONNECTIONS 1 0 1 1 +CONNECTION 1 0 1 1 +CONNECTED 3 0 3 3 +CONNECT 2 0 2 2 +CONJURATION 1 0 1 1 +CONJUNCTURE 1 0 1 1 +CONJECTURE 1 0 1 1 +CONGRESS 1 0 1 1 +CONGREGATED 1 0 1 1 +CONGRATULATIONS 1 0 1 1 +CONGRATULATION 1 0 1 1 +CONGRATULATE 1 0 1 1 +CONGO 1 0 1 1 +CONGENIAL 1 0 1 1 +CONFUSION 2 0 2 2 +CONFUSES 1 0 1 1 +CONFUSED 3 0 3 3 +CONFOUNDEDLY 1 0 1 1 +CONFLICTING 1 0 1 1 +CONFLICT 3 0 3 3 +CONFISCATED 1 0 1 1 +CONFIRMS 1 0 1 1 +CONFIRMED 3 0 3 3 +CONFINED 2 0 2 2 +CONFIDENT 1 0 1 1 +CONFIDENCE 7 0 7 7 +CONFIDE 1 0 1 1 +CONFIDANTS 1 0 1 1 +CONFESSION 1 0 1 1 +CONFERS 1 0 1 1 +CONFEDERATE 1 0 1 1 +CONDUCTS 1 0 1 1 +CONDUCTORS 2 0 2 2 +CONDUCTED 1 0 1 1 +CONDUCT 6 0 6 6 +CONDUCIVE 1 0 1 1 +CONDITIONS 3 0 3 3 +CONDITION 11 0 11 11 +CONDENSED 1 0 1 1 +CONDENSATION 1 0 1 1 +CONDEMNATION 2 0 2 2 +CONCUR 1 0 1 1 +CONCOURSE 1 0 1 1 +CONCORD 1 0 1 1 +CONCLUSION 2 0 2 2 +CONCERTING 1 0 1 1 +CONCERNING 4 0 4 4 +CONCERNED 8 0 8 8 +CONCERN 1 0 1 1 +CONCEPTIONS 2 0 2 2 +CONCEPTION 2 0 2 2 +CONCEPT 1 0 1 1 +CONCEIVED 1 0 1 1 +CONCEIVE 2 0 2 2 +CONCEALING 1 0 1 1 +CONCEALED 1 0 1 1 +CONCEAL 1 0 1 1 +COMRADES 3 0 3 3 +COMPULSIVE 1 0 1 1 +COMPULSION 1 0 1 1 +COMPREHENSIVE 1 0 1 1 +COMPREHENDED 1 0 1 1 +COMPREHEND 1 0 1 1 +COMPOUND 1 0 1 1 +COMPOSURE 2 0 2 2 +COMPORT 1 0 1 1 +COMPONENT 1 0 1 1 +COMPLY 2 0 2 2 +COMPLIMENTARY 1 0 1 1 +COMPLIMENT 1 0 1 1 +COMPLICATED 1 0 1 1 +COMPLIANCE 2 0 2 2 +COMPLEXION 2 0 2 2 +COMPLETELY 3 0 3 3 +COMPLETED 2 0 2 2 +COMPLETE 2 0 2 2 +COMPLEMENT 1 0 1 1 +COMPLAINTS 1 0 1 1 +COMPLAINT 1 0 1 1 +COMPLAINING 1 0 1 1 +COMPLAINEST 1 0 1 1 +COMPLAIN 2 0 2 2 +COMPLACENTLY 1 0 1 1 +COMPLACENCY 1 0 1 1 +COMPETITION 1 0 1 1 +COMPETE 1 0 1 1 +COMPENSATION 1 0 1 1 +COMPELS 1 0 1 1 +COMPELLED 2 0 2 2 +COMPEL 1 0 1 1 +COMPASS 1 0 1 1 +COMPARISON 1 0 1 1 +COMPARED 3 0 3 3 +COMPARE 1 0 1 1 +COMPARATIVELY 2 0 2 2 +COMPARATIVE 1 0 1 1 +COMPANIONSHIP 2 0 2 2 +COMPANIONS 2 0 2 2 +COMPANIONLESS 1 0 1 1 +COMPANION 5 0 5 5 +COMPANIES 3 0 3 3 +COMPACT 1 0 1 1 +COMMUNITY 3 0 3 3 +COMMUNITIES 1 0 1 1 +COMMUNION 1 0 1 1 +COMMUNICATED 2 0 2 2 +COMMUNICATE 1 0 1 1 +COMMOTION 1 0 1 1 +COMMONLY 1 0 1 1 +COMMON 8 0 8 8 +COMMITTING 1 0 1 1 +COMMITTEE 3 0 3 3 +COMMITTED 2 0 2 2 +COMMITTAL 1 0 1 1 +COMMITS 1 0 1 1 +COMMIT 1 0 1 1 +COMMISSIONS 1 0 1 1 +COMMISSIONERS 1 0 1 1 +COMMISSION 1 0 1 1 +COMMISERATION 1 0 1 1 +COMMERCIAL 2 0 2 2 +COMMENTED 1 0 1 1 +COMMENTATORS 1 0 1 1 +COMMENTARY 2 0 2 2 +COMMENT 1 0 1 1 +COMMENDED 1 0 1 1 +COMMEND 1 0 1 1 +COMMENCEMENT 1 0 1 1 +COMMENCE 1 0 1 1 +COMMANDS 1 0 1 1 +COMMANDMENTS 1 0 1 1 +COMMANDMENT 2 0 2 2 +COMMANDERS 1 0 1 1 +COMMANDER 2 0 2 2 +COMMANDED 1 0 1 1 +COMMAND 2 0 2 2 +COMING 7 0 7 7 +COMFORTS 2 0 2 2 +COMFORTING 1 0 1 1 +COMFORTED 2 0 2 2 +COMFORTABLE 3 0 3 3 +COMFORT 8 0 8 8 +COMETH 1 0 1 1 +COMES 10 0 10 10 +COMELY 1 0 1 1 +COMEDY 1 0 1 1 +COMEDIES 2 0 2 2 +COME 51 0 51 51 +COMBINED 2 0 2 2 +COMBINE 1 0 1 1 +COMBINATIONS 1 0 1 1 +COMBINATION 2 0 2 2 +COMBAT 2 0 2 2 +COMB 1 0 1 1 +COLUMNS 1 0 1 1 +COLUMN 1 0 1 1 +COLORISTS 2 0 2 2 +COLONY 3 0 3 3 +COLONIAL 1 0 1 1 +COLONEL 1 0 1 1 +COLLEGE 4 0 4 4 +COLLECTIONS 1 0 1 1 +COLLECTION 1 0 1 1 +COLLECTING 1 0 1 1 +COLLAR 2 0 2 2 +COLLAPSED 1 0 1 1 +COLBERT 1 0 1 1 +COINED 1 0 1 1 +COINCIDE 1 0 1 1 +COFFEE 6 0 6 6 +COCK 1 0 1 1 +COBBLER 1 0 1 1 +COAXED 1 0 1 1 +COAT 1 0 1 1 +COACHMAN 1 0 1 1 +COACH 1 0 1 1 +CLUTCHING 1 0 1 1 +CLUTCH 1 0 1 1 +CLUNG 1 0 1 1 +CLUMSINESS 1 0 1 1 +CLOUDS 6 0 6 6 +CLOUD 11 0 11 11 +CLOTTED 1 0 1 1 +CLOTHING 1 0 1 1 +CLOTHES 5 0 5 5 +CLOTHED 1 0 1 1 +CLOSET 1 0 1 1 +CLOSER 1 0 1 1 +CLOSELY 6 0 6 6 +CLOSED 2 0 2 2 +CLOSE 10 0 10 10 +CLOAKS 2 0 2 2 +CLIMBING 1 0 1 1 +CLIMATE 2 0 2 2 +CLIFF 2 0 2 2 +CLIENTS 1 0 1 1 +CLICKED 1 0 1 1 +CLEVERNESS 3 0 3 3 +CLEVER 2 0 2 2 +CLERK 2 0 2 2 +CLERICAL 1 0 1 1 +CLERGYMAN'S 2 0 2 2 +CLERGY 2 0 2 2 +CLEARNESS 1 0 1 1 +CLEARLY 5 0 5 5 +CLEARING 1 0 1 1 +CLEAREST 1 0 1 1 +CLEAR 10 0 10 10 +CLEANED 1 0 1 1 +CLAY 5 0 5 5 +CLASSIFYING 1 0 1 1 +CLASSIFIER 1 0 1 1 +CLASSIFICATION 1 0 1 1 +CLASSIC 2 0 2 2 +CLASSES 1 0 1 1 +CLASSED 3 0 3 3 +CLASS 9 0 9 9 +CLASPING 1 0 1 1 +CLASPED 2 0 2 2 +CLASHING 2 0 2 2 +CLARIFIED 1 0 1 1 +CLAP 1 0 1 1 +CLAMOROUS 1 0 1 1 +CLAIMS 1 0 1 1 +CLAIMED 1 0 1 1 +CLAIM 2 0 2 2 +CIVILIZATION 2 0 2 2 +CIVIL 3 0 3 3 +CITIZENS 4 0 4 4 +CITIZEN 2 0 2 2 +CITIES 2 0 2 2 +CIRCUMVENTION 1 0 1 1 +CIRCUMSTANCES 4 0 4 4 +CIRCUMSTANCE 3 0 3 3 +CIRCUMNAVIGATION 1 0 1 1 +CIRCUMFERENCE 1 0 1 1 +CIRCULATED 1 0 1 1 +CIRCUITS 1 0 1 1 +CIRCUITOUS 1 0 1 1 +CIRCUIT 1 0 1 1 +CIRCLE 7 0 7 7 +CIGARS 1 0 1 1 +CHURNING 1 0 1 1 +CHURCHES 1 0 1 1 +CHURCH 17 0 17 17 +CHUCKLING 1 0 1 1 +CHUBBY 1 0 1 1 +CHRONICLED 1 0 1 1 +CHRISTIANS 1 0 1 1 +CHRISTIANITY 2 0 2 2 +CHRISM 1 0 1 1 +CHOSEN 3 0 3 3 +CHOPPED 1 0 1 1 +CHOOSING 1 0 1 1 +CHOKING 1 0 1 1 +CHOCOLATE 1 0 1 1 +CHIP 1 0 1 1 +CHINA 1 0 1 1 +CHIN 2 0 2 2 +CHIMNEY 2 0 2 2 +CHILDREN'S 1 0 1 1 +CHILDREN 18 0 18 18 +CHILDISH 2 0 2 2 +CHILDHOOD'S 1 0 1 1 +CHILDHOOD 3 0 3 3 +CHILD'S 2 0 2 2 +CHILD 19 0 19 19 +CHIEFTAIN 1 0 1 1 +CHIEFLY 4 0 4 4 +CHIEF 3 0 3 3 +CHESTNUTS 1 0 1 1 +CHESTNUT 3 0 3 3 +CHEST 2 0 2 2 +CHERRY 1 0 1 1 +CHERISH 1 0 1 1 +CHEMICALS 1 0 1 1 +CHEMICAL 1 0 1 1 +CHELSEA 1 0 1 1 +CHELFORD 4 0 4 4 +CHEERS 1 0 1 1 +CHEERFULLY 2 0 2 2 +CHEERFUL 2 0 2 2 +CHEEKS 1 0 1 1 +CHEEK 2 0 2 2 +CHECKS 1 0 1 1 +CHECKER 1 0 1 1 +CHECKED 3 0 3 3 +CHECK 8 0 8 8 +CHAUCER'S 1 0 1 1 +CHAUCER 1 0 1 1 +CHASING 1 0 1 1 +CHARTER 1 0 1 1 +CHARMING 1 0 1 1 +CHARMED 1 0 1 1 +CHARM 1 0 1 1 +CHARLOTTE 2 0 2 2 +CHARLESTOWN 1 0 1 1 +CHARLESTON 1 0 1 1 +CHARITY 1 0 1 1 +CHARGED 2 0 2 2 +CHARGE 8 0 8 8 +CHARACTERS 2 0 2 2 +CHARACTERIZES 1 0 1 1 +CHARACTERIZED 1 0 1 1 +CHARACTER 14 0 14 14 +CHAPTERS 1 0 1 1 +CHAPTER 3 0 3 3 +CHAPEL 1 0 1 1 +CHAP 1 0 1 1 +CHAOS 1 0 1 1 +CHANNEL 2 0 2 2 +CHANGING 2 0 2 2 +CHANGES 3 0 3 3 +CHANCES 1 0 1 1 +CHANCE 6 0 6 6 +CHAMBERS 2 0 2 2 +CHAMBER 4 0 4 4 +CHALICE 1 0 1 1 +CHAIRS 4 0 4 4 +CHAIN 1 0 1 1 +CETERA 3 0 3 3 +CERTITUDE 1 0 1 1 +CERTAINLY 8 0 8 8 +CERTAIN 12 0 12 12 +CEREMONIES 2 0 2 2 +CEREMONIAL 1 0 1 1 +CENTURY 1 0 1 1 +CENTURIES 1 0 1 1 +CENTRAL 5 0 5 5 +CENTIPEDE 1 0 1 1 +CELLS 1 0 1 1 +CELLAR 1 0 1 1 +CELL 1 0 1 1 +CELESTIAL 2 0 2 2 +CELEBRITY 1 0 1 1 +CELEBRATION 1 0 1 1 +CELEBRATED 4 0 4 4 +CEDAR 1 0 1 1 +CEASING 1 0 1 1 +CAVERN 2 0 2 2 +CAVALRY 2 0 2 2 +CAVALIERS 1 0 1 1 +CAUTIOUSLY 1 0 1 1 +CAUTION 1 0 1 1 +CAUSES 1 0 1 1 +CAUSED 5 0 5 5 +CAUSE 9 0 9 9 +CATTLE 1 0 1 1 +CATS 1 0 1 1 +CATHOLIC 3 0 3 3 +CATHEDRAL 1 0 1 1 +CATECHISM 2 0 2 2 +CATCHING 1 0 1 1 +CATCHES 1 0 1 1 +CATCH 3 0 3 3 +CATASTROPHE 1 0 1 1 +CATAPULT 1 0 1 1 +CAT 7 0 7 7 +CASTLE 1 0 1 1 +CASES 6 0 6 6 +CASEMATES 1 0 1 1 +CARTS 1 0 1 1 +CART 1 0 1 1 +CARRYING 4 0 4 4 +CARRY 7 0 7 7 +CARROTS 1 0 1 1 +CARRIES 1 0 1 1 +CARRIED 13 0 13 13 +CARRIAGES 1 0 1 1 +CARRIAGE 8 0 8 8 +CARPETED 1 0 1 1 +CARING 1 0 1 1 +CAREY 3 0 3 3 +CARELESSNESS 1 0 1 1 +CARELESS 1 0 1 1 +CAREFULLY 7 0 7 7 +CAREFUL 5 0 5 5 +CAREER 4 0 4 4 +CARED 4 0 4 4 +CARE 13 0 13 13 +CARD 1 0 1 1 +CAPTURED 1 0 1 1 +CAPTIVE 2 0 2 2 +CAPTIVATE 1 0 1 1 +CAPTAIN 27 0 27 27 +CAPSIZE 1 0 1 1 +CAPRICE 1 0 1 1 +CAPLESS 1 0 1 1 +CAPITAL 1 0 1 1 +CAPACITY 3 0 3 3 +CAPABLE 3 0 3 3 +CAP'N 4 0 4 4 +CAP 7 0 7 7 +CANST 1 0 1 1 +CANS 1 0 1 1 +CANOPY 1 0 1 1 +CANON 1 0 1 1 +CANNON 1 0 1 1 +CANE 1 0 1 1 +CANDLESTICKS 1 0 1 1 +CANDLES 2 0 2 2 +CANDLE 2 0 2 2 +CANARY 1 0 1 1 +CANAL 1 0 1 1 +CAN'T 21 0 21 21 +CAMPS 1 0 1 1 +CAMPAIGN 2 0 2 2 +CAMP 1 0 1 1 +CAME 44 0 44 44 +CALVINISTIC 1 0 1 1 +CALMNESS 2 0 2 2 +CALLS 5 0 5 5 +CALLOUS 1 0 1 1 +CALLING 2 0 2 2 +CALL 10 0 10 10 +CALHOUN 1 0 1 1 +CAKES 2 0 2 2 +CAKE 1 0 1 1 +CABINET 3 0 3 3 +CABIN 2 0 2 2 +CABALISTIC 1 0 1 1 +C 1 0 1 1 +BUTTONING 1 0 1 1 +BUTTON 1 0 1 1 +BUTTERFLY 1 0 1 1 +BUTTED 1 0 1 1 +BUTLER 2 0 2 2 +BUTCHERY 2 0 2 2 +BUSY 1 0 1 1 +BUSINESS 5 0 5 5 +BUSHES 4 0 4 4 +BURSTS 1 0 1 1 +BURST 5 0 5 5 +BURNT 1 0 1 1 +BURNS 1 0 1 1 +BURNING 2 0 2 2 +BURNED 1 0 1 1 +BURIED 2 0 2 2 +BURGOS 1 0 1 1 +BURGLARS 1 0 1 1 +BURDEN 1 0 1 1 +BUOYANT 2 0 2 2 +BUNDLE 1 0 1 1 +BUILT 2 0 2 2 +BUILDS 1 0 1 1 +BUGGY 1 0 1 1 +BUDDING 1 0 1 1 +BUCKLING 1 0 1 1 +BUCKLES 1 0 1 1 +BUCKINGHAM 1 0 1 1 +BUBBLING 1 0 1 1 +BUBBLE'S 1 0 1 1 +BRUTE 1 0 1 1 +BRUTALITY 1 0 1 1 +BRUTAL 1 0 1 1 +BRUSH 1 0 1 1 +BRUISED 1 0 1 1 +BROWSED 1 0 1 1 +BROWN 10 0 10 10 +BROW 1 0 1 1 +BROUGHT 14 0 14 14 +BROTHERS 5 0 5 5 +BROTHER 8 0 8 8 +BROOM 1 0 1 1 +BROOKS 1 0 1 1 +BROOKLYN 1 0 1 1 +BROODING 3 0 3 3 +BRONTES 1 0 1 1 +BROKE 1 0 1 1 +BROADLY 1 0 1 1 +BROADEST 1 0 1 1 +BROAD 11 0 11 11 +BRITISH 2 0 2 2 +BRISTLING 1 0 1 1 +BRING 9 0 9 9 +BRIM 2 0 2 2 +BRILLIANT 5 0 5 5 +BRILLIANCY 1 0 1 1 +BRIGHTNESS 1 0 1 1 +BRIGHTLY 1 0 1 1 +BRIGHTEST 1 0 1 1 +BRIGHTER 1 0 1 1 +BRIGHTENED 2 0 2 2 +BRIGHT 16 0 16 16 +BRIGANTINE 1 0 1 1 +BRIEFLY 1 0 1 1 +BRIDGE 4 0 4 4 +BRIDE 1 0 1 1 +BRICK 2 0 2 2 +BREWING 1 0 1 1 +BRETHREN 2 0 2 2 +BREEZE 1 0 1 1 +BRED 1 0 1 1 +BREATHING 4 0 4 4 +BREATH 10 0 10 10 +BREASTPLATE 1 0 1 1 +BREAST 2 0 2 2 +BREAKING 2 0 2 2 +BREAKFASTING 1 0 1 1 +BREAKERS 1 0 1 1 +BREAD 5 0 5 5 +BRAVELY 1 0 1 1 +BRAVE 2 0 2 2 +BRANDY 1 0 1 1 +BRANDON 4 0 4 4 +BRANDED 1 0 1 1 +BRANCHES 8 0 8 8 +BRAIN 2 0 2 2 +BRAIDS 1 0 1 1 +BRAIDED 1 0 1 1 +BRAID 1 0 1 1 +BRACTON'S 1 0 1 1 +BRACTON 1 0 1 1 +BRACELETS 1 0 1 1 +BRACELET 1 0 1 1 +BOY'S 3 0 3 3 +BOY 17 0 17 17 +BOXES 1 0 1 1 +BOWING 1 0 1 1 +BOWED 1 0 1 1 +BOW 4 0 4 4 +BOUT 1 0 1 1 +BOUQUETS 1 0 1 1 +BOUND 6 0 6 6 +BOUGHS 1 0 1 1 +BOTTOMS 1 0 1 1 +BOTTOM 7 0 7 7 +BOTTLES 2 0 2 2 +BOTTLE 1 0 1 1 +BOTHER 1 0 1 1 +BOTH 34 0 34 34 +BOTANICAL 2 0 2 2 +BOSOM 2 0 2 2 +BORE 2 0 2 2 +BORDERING 1 0 1 1 +BORDERED 1 0 1 1 +BOOTS 2 0 2 2 +BOOLOOROO 12 0 12 12 +BOOK 4 0 4 4 +BONY 1 0 1 1 +BONNET 1 0 1 1 +BONES 2 0 2 2 +BONDAGE 1 0 1 1 +BOND 3 0 3 3 +BOLTON 1 0 1 1 +BOLDLY 3 0 3 3 +BOLDEST 1 0 1 1 +BOILED 1 0 1 1 +BOIL 1 0 1 1 +BODY 8 0 8 8 +BODILY 3 0 3 3 +BODIES 3 0 3 3 +BOASTING 2 0 2 2 +BOARDED 2 0 2 2 +BOARD 9 0 9 9 +BLUSHING 2 0 2 2 +BLUSHED 1 0 1 1 +BLUSH 1 0 1 1 +BLUNT 1 0 1 1 +BLUFF 1 0 1 1 +BLUES 1 0 1 1 +BLUE 21 0 21 21 +BLOWN 2 0 2 2 +BLOWING 1 0 1 1 +BLOW 2 0 2 2 +BLOOM 1 0 1 1 +BLOODY 1 0 1 1 +BLOODSHED 1 0 1 1 +BLOODED 1 0 1 1 +BLOOD 6 0 6 6 +BLOCKS 1 0 1 1 +BLISS 1 0 1 1 +BLIND 1 0 1 1 +BLEW 1 0 1 1 +BLESSED 3 0 3 3 +BLESS 2 0 2 2 +BLEED 1 0 1 1 +BLEACHED 1 0 1 1 +BLAZING 1 0 1 1 +BLAZED 1 0 1 1 +BLAZE 2 0 2 2 +BLANK 2 0 2 2 +BLAME 1 0 1 1 +BLADE 2 0 2 2 +BLACKSTONE 1 0 1 1 +BLACKNESSES 1 0 1 1 +BLACKNESS 1 0 1 1 +BLACKER 2 0 2 2 +BLACK 22 0 22 22 +BITTER 1 0 1 1 +BITS 1 0 1 1 +BITE 1 0 1 1 +BISHOPS 5 0 5 5 +BIRTH 2 0 2 2 +BIRMINGHAM 1 0 1 1 +BIRDS 4 0 4 4 +BIRD 4 0 4 4 +BIRCHES 1 0 1 1 +BINDING 1 0 1 1 +BIND 1 0 1 1 +BILL 6 0 6 6 +BIG 12 0 12 12 +BIDDING 1 0 1 1 +BIBLE 1 0 1 1 +BEYOND 6 0 6 6 +BEWILDERED 6 0 6 6 +BEWARE 1 0 1 1 +BEVERAGES 1 0 1 1 +BETWEEN 25 0 25 25 +BETTING 1 0 1 1 +BETRAYED 1 0 1 1 +BETRAY 1 0 1 1 +BETH 12 0 12 12 +BESTOWED 1 0 1 1 +BESTOW 1 0 1 1 +BEST 22 0 22 22 +BESOUGHT 1 0 1 1 +BESIEGED 1 0 1 1 +BESIDE 5 0 5 5 +BERTIE 1 0 1 1 +BERRIES 1 0 1 1 +BENT 4 0 4 4 +BENIGNANTLY 1 0 1 1 +BENIGHTED 1 0 1 1 +BENEFIT 1 0 1 1 +BENEATH 6 0 6 6 +BEND 1 0 1 1 +BENCHES 3 0 3 3 +BENCH 4 0 4 4 +BEN 3 0 3 3 +BELT 2 0 2 2 +BELOW 1 0 1 1 +BELOVED 3 0 3 3 +BELONGS 1 0 1 1 +BELONGING 1 0 1 1 +BELONGED 3 0 3 3 +BELONG 2 0 2 2 +BELLY 3 0 3 3 +BELLS 1 0 1 1 +BELLINGHAM 2 0 2 2 +BELIEVING 2 0 2 2 +BELIEVERS 1 0 1 1 +BELIEF 3 0 3 3 +BEINGS 1 0 1 1 +BEHOLDING 1 0 1 1 +BEHOLDERS 1 0 1 1 +BEHOLDER 1 0 1 1 +BEHIND 10 0 10 10 +BEHELD 1 0 1 1 +BEHAVED 1 0 1 1 +BEHAVE 1 0 1 1 +BEHALF 1 0 1 1 +BEGUN 5 0 5 5 +BEGUILING 1 0 1 1 +BEGOT 1 0 1 1 +BEGINS 4 0 4 4 +BEGINNING 4 0 4 4 +BEGIN 9 0 9 9 +BEGGAR 2 0 2 2 +BEGAN 22 0 22 22 +BEFITS 1 0 1 1 +BEEHIVES 1 0 1 1 +BEEF 1 0 1 1 +BEDSIDE 1 0 1 1 +BEDROOM 2 0 2 2 +BEDFORD 1 0 1 1 +BECOMING 1 0 1 1 +BECOMES 8 0 8 8 +BECOME 14 0 14 14 +BECKONED 1 0 1 1 +BECKON 1 0 1 1 +BECAUSE 30 0 30 30 +BECAME 12 0 12 12 +BEAUTY 21 0 21 21 +BEAUTIFUL 13 0 13 13 +BEAUTIES 2 0 2 2 +BEATING 2 0 2 2 +BEATERS 1 0 1 1 +BEATER 1 0 1 1 +BEATEN 2 0 2 2 +BEAT 1 0 1 1 +BEASTS 2 0 2 2 +BEARS 4 0 4 4 +BEARING 3 0 3 3 +BEARD 1 0 1 1 +BEAR'S 1 0 1 1 +BEAR 11 0 11 11 +BEAMS 1 0 1 1 +BEAK 6 0 6 6 +BEADS 1 0 1 1 +BATTLED 1 0 1 1 +BATTERIES 1 0 1 1 +BATTERED 1 0 1 1 +BAT 1 0 1 1 +BASTARD 1 0 1 1 +BASKETS 1 0 1 1 +BASIS 2 0 2 2 +BASED 1 0 1 1 +BARTLEY 14 0 14 14 +BARS 1 0 1 1 +BARRICADED 1 0 1 1 +BARREN 1 0 1 1 +BARREL 1 0 1 1 +BARRACK 1 0 1 1 +BARNS 1 0 1 1 +BARN 4 0 4 4 +BARGAINS 1 0 1 1 +BAREFOOT 1 0 1 1 +BARE 2 0 2 2 +BARBARITY 1 0 1 1 +BAR 1 0 1 1 +BAPTIZED 1 0 1 1 +BAPTISM 1 0 1 1 +BANTER 1 0 1 1 +BANQUET 1 0 1 1 +BANKS 1 0 1 1 +BANK 3 0 3 3 +BANISHED 1 0 1 1 +BANG 1 0 1 1 +BAND 2 0 2 2 +BALMY 1 0 1 1 +BALLS 2 0 2 2 +BALLET 2 0 2 2 +BALEEN 1 0 1 1 +BAKER 1 0 1 1 +BAGS 1 0 1 1 +BAGGAGE 1 0 1 1 +BAG 1 0 1 1 +BAFFLED 2 0 2 2 +BADLY 2 0 2 2 +BADGES 1 0 1 1 +BADE 3 0 3 3 +BACON 1 0 1 1 +BACKWARD 1 0 1 1 +BABY'S 1 0 1 1 +BABY 1 0 1 1 +BABIES 1 0 1 1 +BABE 1 0 1 1 +AZURE 1 0 1 1 +AXIS 1 0 1 1 +AWOKE 3 0 3 3 +AWKWARD 1 0 1 1 +AWFULLY 2 0 2 2 +AWFUL 4 0 4 4 +AWAKE 1 0 1 1 +AWAITING 1 0 1 1 +AWAITED 2 0 2 2 +AVOIDING 1 0 1 1 +AVOIDED 1 0 1 1 +AVOID 5 0 5 5 +AVERSION 1 0 1 1 +AVERSE 1 0 1 1 +AVAILABLE 1 0 1 1 +AUTUMN 1 0 1 1 +AUTHORS 1 0 1 1 +AUTHORIZED 1 0 1 1 +AUTHORITY 6 0 6 6 +AUTHORITIES 1 0 1 1 +AUTHORITATIVELY 1 0 1 1 +AUTHOR 1 0 1 1 +AUTHENTICATED 1 0 1 1 +AUNT'S 1 0 1 1 +AUGUST 5 0 5 5 +AUGMENT 1 0 1 1 +AUDITORY 1 0 1 1 +AUDITORS 1 0 1 1 +AUDIENCE 6 0 6 6 +AUDACIOUS 1 0 1 1 +AUCTION 1 0 1 1 +ATTRIBUTED 1 0 1 1 +ATTRACTIVE 1 0 1 1 +ATTRACTION 1 0 1 1 +ATTRACTED 3 0 3 3 +ATTORNEYS 1 0 1 1 +ATTIRE 1 0 1 1 +ATTENUATING 1 0 1 1 +ATTENTIVELY 2 0 2 2 +ATTENTION 11 0 11 11 +ATTENDED 1 0 1 1 +ATTENDANT 1 0 1 1 +ATTEND 3 0 3 3 +ATTEMPTS 1 0 1 1 +ATTEMPT 5 0 5 5 +ATTAINMENTS 1 0 1 1 +ATTAINMENT 1 0 1 1 +ATTACKED 1 0 1 1 +ATTACK 4 0 4 4 +ATTACHED 2 0 2 2 +ATROCIOUS 1 0 1 1 +ATMOSPHERIC 1 0 1 1 +ATMOSPHERE 2 0 2 2 +ATLANTIS 1 0 1 1 +ATLANTIC 3 0 3 3 +ATHLETE 2 0 2 2 +ATHENS 1 0 1 1 +ATHENIANS 1 0 1 1 +ATHENIAN 2 0 2 2 +ATE 2 0 2 2 +ASTRONOMY 1 0 1 1 +ASTOUNDING 1 0 1 1 +ASTONISHMENT 2 0 2 2 +ASTONISHING 1 0 1 1 +ASTONISHED 1 0 1 1 +ASSURES 1 0 1 1 +ASSUREDLY 1 0 1 1 +ASSURED 5 0 5 5 +ASSURE 5 0 5 5 +ASSURANCES 1 0 1 1 +ASSURANCE 3 0 3 3 +ASSUMED 5 0 5 5 +ASSOCIATION 2 0 2 2 +ASSOCIATES 1 0 1 1 +ASSOCIATED 3 0 3 3 +ASSISTED 2 0 2 2 +ASSISTANT 1 0 1 1 +ASSIST 2 0 2 2 +ASSIDUOUSLY 1 0 1 1 +ASSERTS 1 0 1 1 +ASSERTIVE 1 0 1 1 +ASSERTED 3 0 3 3 +ASSENT 1 0 1 1 +ASSEMBLY 2 0 2 2 +ASS 1 0 1 1 +ASPECT 1 0 1 1 +ASLEEP 1 0 1 1 +ASKING 2 0 2 2 +ASKED 22 0 22 22 +ASK 10 0 10 10 +ASIDE 3 0 3 3 +ASIA 1 0 1 1 +ASHORE 1 0 1 1 +ASHAMED 2 0 2 2 +ASCRIBES 1 0 1 1 +ASCERTAINING 1 0 1 1 +ASCERTAIN 2 0 2 2 +ARTILLERY 1 0 1 1 +ARTIFICE 1 0 1 1 +ARTICULATE 2 0 2 2 +ARTICLE 3 0 3 3 +ARTICHOKES 1 0 1 1 +ARTHUR 1 0 1 1 +ART 14 0 14 14 +ARROW 2 0 2 2 +ARRIVED 4 0 4 4 +ARRIVE 1 0 1 1 +ARRIVAL 4 0 4 4 +ARRESTING 1 0 1 1 +ARRAY 1 0 1 1 +ARRANGEMENTS 1 0 1 1 +ARRANGEMENT 2 0 2 2 +ARRANGED 2 0 2 2 +AROUSE 1 0 1 1 +AROSE 2 0 2 2 +ARONNAX 1 0 1 1 +ARMS 15 0 15 15 +ARISTOCRACY 1 0 1 1 +ARISING 1 0 1 1 +ARISE 1 0 1 1 +ARID 1 0 1 1 +ARIANS 1 0 1 1 +ARGYLE'S 1 0 1 1 +ARGYLE 2 0 2 2 +ARGUS 1 0 1 1 +ARGUING 1 0 1 1 +ARGUE 2 0 2 2 +AREN'T 1 0 1 1 +AREA 1 0 1 1 +ARDUOUS 1 0 1 1 +ARDOUR 1 0 1 1 +ARCHITECTURAL 1 0 1 1 +ARCHED 1 0 1 1 +ARCH 1 0 1 1 +ARCADIAN 1 0 1 1 +ARC 2 0 2 2 +APRON 2 0 2 2 +APRIL 2 0 2 2 +APPROXIMATELY 1 0 1 1 +APPROVING 2 0 2 2 +APPROVES 1 0 1 1 +APPROVE 2 0 2 2 +APPROVAL 1 0 1 1 +APPROPRIATE 1 0 1 1 +APPROBATION 1 0 1 1 +APPROACHING 3 0 3 3 +APPROACHES 2 0 2 2 +APPROACHED 6 0 6 6 +APPROACH 1 0 1 1 +APPRENTICESHIP 1 0 1 1 +APPREHENSION 1 0 1 1 +APPRECIATIVE 1 0 1 1 +APPRECIATE 1 0 1 1 +APPOSITION 1 0 1 1 +APPOINTED 7 0 7 7 +APPLYING 1 0 1 1 +APPLICATION 2 0 2 2 +APPLE 1 0 1 1 +APPLAUSE 2 0 2 2 +APPLAUDED 1 0 1 1 +APPETITES 1 0 1 1 +APPETITE 1 0 1 1 +APPEARS 1 0 1 1 +APPEARED 10 0 10 10 +APPEARANCES 3 0 3 3 +APPEARANCE 9 0 9 9 +APPEAR 3 0 3 3 +APPEALS 1 0 1 1 +APPEALED 1 0 1 1 +APPEAL 1 0 1 1 +APPARENTLY 1 0 1 1 +APPARENT 2 0 2 2 +APPARATUS 1 0 1 1 +APPALLING 1 0 1 1 +APOSTOLICAL 1 0 1 1 +APOSTOLIC 1 0 1 1 +APOSTLES 6 0 6 6 +APOSTLE 4 0 4 4 +APOLLO 1 0 1 1 +APARTMENT 2 0 2 2 +APART 1 0 1 1 +ANYHOW 3 0 3 3 +ANYBODY 3 0 3 3 +ANXIOUS 3 0 3 3 +ANXIETY 4 0 4 4 +ANTIPATHY 2 0 2 2 +ANTICIPATION 1 0 1 1 +ANTICIPATE 1 0 1 1 +ANTICHRIST 1 0 1 1 +ANTI 1 0 1 1 +ANTARCTIC 1 0 1 1 +ANSWERS 2 0 2 2 +ANSWER 6 0 6 6 +ANOTHER'S 1 0 1 1 +ANNOYANCE 2 0 2 2 +ANNOUNCED 2 0 2 2 +ANNE 2 0 2 2 +ANIMOSITY 2 0 2 2 +ANIMATED 2 0 2 2 +ANIMALS 5 0 5 5 +ANIMAL 8 0 8 8 +ANGRY 5 0 5 5 +ANGRILY 3 0 3 3 +ANGRIER 1 0 1 1 +ANGOR 1 0 1 1 +ANGER 1 0 1 1 +ANECDOTES 1 0 1 1 +ANALYSIS 2 0 2 2 +ANALOGY 1 0 1 1 +ANALOGUE 1 0 1 1 +AMUSING 2 0 2 2 +AMUSEMENT 3 0 3 3 +AMUSED 1 0 1 1 +AMUSE 2 0 2 2 +AMPLY 2 0 2 2 +AMOUNT 3 0 3 3 +AMONGST 3 0 3 3 +AMONG 29 0 29 29 +AMISS 1 0 1 1 +AMIDST 2 0 2 2 +AMID 1 0 1 1 +AMETHYST 1 0 1 1 +AMERICANS 2 0 2 2 +AMERICAN 10 0 10 10 +AMERICA 2 0 2 2 +AMENDS 2 0 2 2 +AMENDMENT 1 0 1 1 +AMELIORATION 1 0 1 1 +AMBROSE 4 0 4 4 +AMBITIOUS 1 0 1 1 +AMBITION 2 0 2 2 +AMBASSADOR 1 0 1 1 +AMAZEMENT 2 0 2 2 +AMASS 1 0 1 1 +AMALGAMATED 1 0 1 1 +ALWAYS 36 0 36 36 +ALTHOUGH 10 0 10 10 +ALTERNATING 3 0 3 3 +ALTERING 2 0 2 2 +ALTERED 2 0 2 2 +ALTERATION 1 0 1 1 +ALTER 1 0 1 1 +ALTAR 1 0 1 1 +ALSO 36 0 36 36 +ALOUD 3 0 3 3 +ALONG 15 0 15 15 +ALMS 1 0 1 1 +ALMOST 19 0 19 19 +ALLY 1 0 1 1 +ALLUDE 1 0 1 1 +ALLOWING 2 0 2 2 +ALLOWED 7 0 7 7 +ALLOW 5 0 5 5 +ALLIES 1 0 1 1 +ALLIED 1 0 1 1 +ALLERS 1 0 1 1 +ALLEGED 2 0 2 2 +ALIVE 1 0 1 1 +ALIKE 1 0 1 1 +ALIGHTED 1 0 1 1 +ALICE 4 0 4 4 +ALGERIAN 1 0 1 1 +ALGERIA 2 0 2 2 +ALGEBRA 1 0 1 1 +ALE 2 0 2 2 +ALAS 3 0 3 3 +ALARMED 1 0 1 1 +AKIN 1 0 1 1 +AIMED 1 0 1 1 +AIDED 1 0 1 1 +AHEAD 1 0 1 1 +AH 7 0 7 7 +AGREEMENT 1 0 1 1 +AGREED 2 0 2 2 +AGREEABLY 2 0 2 2 +AGREEABLE 5 0 5 5 +AGREE 2 0 2 2 +AGO 4 0 4 4 +AGITATION 4 0 4 4 +AGITATED 2 0 2 2 +AGGRESSIVENESS 1 0 1 1 +AGGRESSIVE 1 0 1 1 +AGGREGATE 1 0 1 1 +AGENCY 1 0 1 1 +AGE 6 0 6 6 +AGAPE 1 0 1 1 +AGAINST 23 0 23 23 +AGAIN 39 0 39 39 +AFTERWARDS 5 0 5 5 +AFTERWARD 2 0 2 2 +AFTERNOON 4 0 4 4 +AFRICAN 1 0 1 1 +AFRAID 9 0 9 9 +AFLOAT 1 0 1 1 +AFFORD 4 0 4 4 +AFFLICTED 1 0 1 1 +AFFIRMATIVE 1 0 1 1 +AFFECTIONS 1 0 1 1 +AFFECTIONATE 1 0 1 1 +AFFECTION 7 0 7 7 +AFFAIRS 3 0 3 3 +AFFAIR 2 0 2 2 +ADVISER 1 0 1 1 +ADVISED 1 0 1 1 +ADVISABLE 1 0 1 1 +ADVICE 4 0 4 4 +ADVERTISING 1 0 1 1 +ADVERTISEMENT 1 0 1 1 +ADVERSE 2 0 2 2 +ADVENTURE 1 0 1 1 +ADVANTAGES 2 0 2 2 +ADVANTAGE 3 0 3 3 +ADVANCING 1 0 1 1 +ADVANCE 5 0 5 5 +ADORE 1 0 1 1 +ADORATION 3 0 3 3 +ADOPTED 2 0 2 2 +ADOLESCENCE 1 0 1 1 +ADMITTING 2 0 2 2 +ADMITTED 3 0 3 3 +ADMITTANCE 1 0 1 1 +ADMIT 2 0 2 2 +ADMIRING 1 0 1 1 +ADMIRED 1 0 1 1 +ADMIRATION 2 0 2 2 +ADMINISTRATION 3 0 3 3 +ADJUST 1 0 1 1 +ADHERENTS 1 0 1 1 +ADDRESSING 1 0 1 1 +ADDRESSED 6 0 6 6 +ADDRESS 3 0 3 3 +ADDITIONAL 1 0 1 1 +ADDED 11 0 11 11 +ACUTE 2 0 2 2 +ACTUALLY 3 0 3 3 +ACTUAL 4 0 4 4 +ACTS 2 0 2 2 +ACTRESS 1 0 1 1 +ACTORS 4 0 4 4 +ACTOR 2 0 2 2 +ACTIVITY 1 0 1 1 +ACTIVE 2 0 2 2 +ACTING 2 0 2 2 +ACT 6 0 6 6 +ACROSS 13 0 13 13 +ACQUIRES 1 0 1 1 +ACQUIRE 1 0 1 1 +ACQUAINTED 1 0 1 1 +ACQUAINTANCE 3 0 3 3 +ACQUAINT 1 0 1 1 +ACORN 1 0 1 1 +ACKNOWLEDGES 1 0 1 1 +ACKNOWLEDGED 2 0 2 2 +ACKNOWLEDGE 2 0 2 2 +ACHIEVEMENTS 1 0 1 1 +ACHIEVEMENT 1 0 1 1 +ACHIEVED 2 0 2 2 +ACE 2 0 2 2 +ACCUSTOMED 3 0 3 3 +ACCUSE 1 0 1 1 +ACCURATELY 1 0 1 1 +ACCURATE 2 0 2 2 +ACCURACY 3 0 3 3 +ACCRUING 1 0 1 1 +ACCOUTREMENTS 1 0 1 1 +ACCOUNTS 1 0 1 1 +ACCOUNTED 1 0 1 1 +ACCOUNT 9 0 9 9 +ACCORDINGLY 1 0 1 1 +ACCORDING 4 0 4 4 +ACCORDANCE 2 0 2 2 +ACCOMPLISHMENT 1 0 1 1 +ACCOMPLISHED 5 0 5 5 +ACCOMPANY 2 0 2 2 +ACCOMMODATIONS 1 0 1 1 +ACCOMMODATION 1 0 1 1 +ACCLAMATIONS 1 0 1 1 +ACCIDENTS 2 0 2 2 +ACCIDENT 2 0 2 2 +ACCESSORIES 1 0 1 1 +ACCEPTING 1 0 1 1 +ACCEPTED 1 0 1 1 +ACCEPTABLE 1 0 1 1 +ACCENTS 2 0 2 2 +ACCENT 3 0 3 3 +ABSURDITY 1 0 1 1 +ABSURDITIES 1 0 1 1 +ABSURD 1 0 1 1 +ABSTRACTIONS 1 0 1 1 +ABSTRACTION 1 0 1 1 +ABSORBED 1 0 1 1 +ABSOLUTELY 6 0 6 6 +ABSOLUTE 1 0 1 1 +ABSENT 2 0 2 2 +ABSENCE 1 0 1 1 +ABRUPTLY 2 0 2 2 +ABROAD 4 0 4 4 +ABRAHAM 2 0 2 2 +ABOVE 17 0 17 17 +ABOUT 85 0 85 85 +ABOLITIONISM 1 0 1 1 +ABOARD 2 0 2 2 +ABNER 1 0 1 1 +ABLE 7 0 7 7 +ABANDONED 2 0 2 2 diff --git a/log/greedy_search/errs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/errs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3ed1ad74619d01dcd8bd4f5fa2f99750eb68c91 --- /dev/null +++ b/log/greedy_search/errs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,15887 @@ +%WER = 9.79 +Errors: 496 insertions, 537 deletions, 4092 substitutions, over 52343 reference words (47714 correct) +Search below for sections starting with PER-UTT DETAILS:, SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS: + +PER-UTT DETAILS: corr or (ref->hyp) +1688-142285-0000-1948: THERE'S IRON THEY SAY IN ALL OUR BLOOD AND A GRAIN OR TWO PERHAPS IS GOOD BUT HIS HE MAKES ME HARSHLY FEEL HAS GOT A LITTLE TOO MUCH OF STEEL ANON +1688-142285-0001-1949: MARGARET SAID MISTER HALE AS HE RETURNED FROM SHOWING HIS GUEST DOWNSTAIRS I COULD NOT HELP WATCHING YOUR FACE WITH SOME ANXIETY WHEN MISTER THORNTON MADE HIS CONFESSION OF HAVING BEEN A SHOP BOY +1688-142285-0002-1950: YOU DON'T MEAN THAT YOU THOUGHT ME SO SILLY +1688-142285-0003-1951: I REALLY (LIKED->LIKE) THAT ACCOUNT OF HIMSELF BETTER THAN ANYTHING ELSE HE SAID +1688-142285-0004-1952: HIS STATEMENT OF HAVING BEEN A SHOP BOY WAS THE THING I (LIKED->LIKE) BEST OF ALL +1688-142285-0005-1953: YOU WHO WERE ALWAYS ACCUSING PEOPLE OF BEING SHOPPY AT HELSTONE +1688-142285-0006-1954: I DON'T THINK MISTER HALE YOU HAVE DONE QUITE RIGHT (IN->HE) INTRODUCING SUCH A PERSON TO US WITHOUT TELLING US WHAT HE HAD BEEN +1688-142285-0007-1955: I REALLY WAS VERY MUCH AFRAID OF SHOWING HIM HOW MUCH SHOCKED I WAS AT SOME (PARTS->PART) OF WHAT HE SAID +1688-142285-0008-1956: HIS FATHER DYING IN MISERABLE CIRCUMSTANCES +1688-142285-0009-1957: WHY IT MIGHT HAVE BEEN IN THE WORKHOUSE +1688-142285-0010-1958: HIS FATHER SPECULATED WILDLY FAILED AND THEN KILLED HIMSELF BECAUSE HE COULD NOT BEAR THE DISGRACE +1688-142285-0011-1959: ALL HIS FORMER FRIENDS SHRUNK FROM THE DISCLOSURES THAT HAD TO BE MADE OF HIS DISHONEST GAMBLING WILD HOPELESS STRUGGLES MADE WITH OTHER PEOPLE'S MONEY TO REGAIN HIS OWN MODERATE PORTION OF WEALTH +1688-142285-0012-1960: NO ONE CAME FORWARDS TO HELP THE MOTHER AND THIS BOY +1688-142285-0013-1961: AT LEAST NO FRIEND CAME FORWARDS IMMEDIATELY AND (MISSUS->MISTER) THORNTON IS NOT ONE I FANCY TO WAIT TILL (TARDY->TIDY) KINDNESS COMES TO FIND HER OUT +1688-142285-0014-1962: SO THEY LEFT MILTON +1688-142285-0015-1963: HOW TAINTED ASKED HER FATHER +1688-142285-0016-1964: OH PAPA BY THAT TESTING EVERYTHING BY THE STANDARD OF WEALTH +1688-142285-0017-1965: WHEN HE SPOKE OF THE MECHANICAL POWERS HE EVIDENTLY LOOKED UPON THEM ONLY AS NEW WAYS OF EXTENDING TRADE AND MAKING MONEY +1688-142285-0018-1966: AND THE POOR MEN AROUND HIM THEY WERE POOR BECAUSE THEY WERE VICIOUS OUT OF THE PALE OF HIS SYMPATHIES BECAUSE THEY HAD NOT HIS IRON NATURE AND THE CAPABILITIES THAT IT GIVES HIM FOR BEING RICH +1688-142285-0019-1967: NOT VICIOUS HE NEVER SAID THAT +1688-142285-0020-1968: (IMPROVIDENT->IN PROVIDENT) AND SELF INDULGENT WERE HIS WORDS +1688-142285-0021-1969: MARGARET WAS COLLECTING HER MOTHER'S WORKING MATERIALS AND PREPARING TO GO TO BED +1688-142285-0022-1970: JUST AS SHE WAS LEAVING THE ROOM SHE HESITATED SHE WAS INCLINED TO MAKE AN ACKNOWLEDGMENT WHICH SHE THOUGHT WOULD PLEASE HER FATHER BUT WHICH TO BE FULL AND TRUE MUST INCLUDE A LITTLE ANNOYANCE +1688-142285-0023-1971: HOWEVER OUT IT CAME +1688-142285-0024-1972: PAPA I DO THINK MISTER THORNTON A VERY REMARKABLE MAN BUT PERSONALLY I DON'T LIKE HIM AT ALL +1688-142285-0025-1973: (AND->HELLO) I DO SAID HER FATHER LAUGHING +1688-142285-0026-1974: PERSONALLY AS YOU CALL IT AND ALL +1688-142285-0027-1975: I DON'T SET HIM UP FOR A HERO OR ANYTHING OF THAT KIND +1688-142285-0028-1976: BUT GOOD NIGHT CHILD +1688-142285-0029-1977: THERE WERE SEVERAL OTHER SIGNS OF SOMETHING WRONG ABOUT MISSUS HALE +1688-142285-0030-1978: SHE AND DIXON HELD MYSTERIOUS CONSULTATIONS IN HER BEDROOM FROM WHICH DIXON WOULD COME OUT CRYING AND CROSS AS WAS (HER CUSTOM->ACCUSTOM) WHEN ANY DISTRESS OF HER MISTRESS CALLED UPON HER SYMPATHY +1688-142285-0031-1979: ONCE MARGARET HAD GONE INTO THE CHAMBER SOON AFTER DIXON (LEFT IT->LIFTED) AND FOUND HER MOTHER ON HER KNEES AND AS MARGARET STOLE OUT SHE CAUGHT A FEW WORDS WHICH WERE EVIDENTLY A PRAYER FOR STRENGTH AND PATIENCE TO (ENDURE->INDUCE) SEVERE BODILY SUFFERING +1688-142285-0032-1980: BUT THOUGH SHE RECEIVED CARESSES AND FOND WORDS BACK AGAIN IN SUCH PROFUSION AS WOULD HAVE GLADDENED HER FORMERLY YET SHE FELT THAT THERE WAS A SECRET WITHHELD FROM HER AND SHE BELIEVED IT BORE SERIOUS REFERENCE TO HER MOTHER'S HEALTH +1688-142285-0033-1981: SHE LAY AWAKE VERY LONG THIS NIGHT PLANNING HOW TO (LESSEN->LISTEN) THE EVIL INFLUENCE OF THEIR MILTON LIFE ON HER MOTHER +1688-142285-0034-1982: A SERVANT (TO->*) GIVE DIXON PERMANENT ASSISTANCE SHOULD BE GOT IF SHE GAVE UP (HER->THE) WHOLE TIME TO THE SEARCH AND THEN AT ANY RATE HER MOTHER MIGHT HAVE ALL THE PERSONAL (ATTENTION->ATTENTIONS) SHE REQUIRED AND HAD BEEN ACCUSTOMED TO HER WHOLE LIFE +1688-142285-0035-1983: VISITING REGISTER (OFFICES->OFFICERS) SEEING ALL MANNER OF UNLIKELY PEOPLE AND VERY FEW IN THE LEAST LIKELY ABSORBED MARGARET'S TIME AND THOUGHTS FOR SEVERAL DAYS +1688-142285-0036-1984: ONE AFTERNOON SHE MET BESSY HIGGINS IN THE STREET AND STOPPED TO SPEAK TO HER +1688-142285-0037-1985: WELL (BESSY->BUSY) HOW ARE YOU +1688-142285-0038-1986: BETTER AND NOT BETTER IF (YO->YOU) KNOW WHAT THAT MEANS +1688-142285-0039-1987: NOT EXACTLY REPLIED MARGARET SMILING +1688-142285-0040-1988: I'M BETTER IN NOT BEING (TORN->TAUGHT) TO PIECES BY COUGHING (O'NIGHTS->OR NIGHTS) BUT I'M WEARY AND TIRED (O->OF) MILTON AND LONGING TO GET AWAY TO THE LAND (O BEULAH->OF BOOLA) AND WHEN I THINK I'M FARTHER AND FARTHER OFF MY HEART SINKS AND I'M NO BETTER I'M WORSE +1688-142285-0041-1989: MARGARET TURNED ROUND TO WALK (ALONGSIDE->LONG SIDE) OF THE GIRL IN HER FEEBLE PROGRESS HOMEWARD +1688-142285-0042-1990: BUT FOR A MINUTE OR TWO SHE DID NOT SPEAK +1688-142285-0043-1991: AT LAST SHE SAID IN A LOW VOICE +1688-142285-0044-1992: BESSY DO YOU WISH TO DIE +1688-142285-0045-1993: BESSY WAS SILENT IN HER TURN FOR A MINUTE OR TWO THEN SHE REPLIED +1688-142285-0046-1994: (NOUGHT->NOT) WORSE THAN MANY OTHERS I RECKON +1688-142285-0047-1995: BUT WHAT WAS IT +1688-142285-0048-1996: YOU KNOW I'M A STRANGER HERE SO PERHAPS I'M NOT SO QUICK AT UNDERSTANDING WHAT YOU MEAN AS IF I'D LIVED ALL MY LIFE (AT->IN) MILTON +1688-142285-0049-1997: I HAD FORGOTTEN WHAT I SAID FOR THE TIME CONTINUED MARGARET QUIETLY +1688-142285-0050-1998: I SHOULD HAVE THOUGHT OF IT AGAIN WHEN I WAS LESS BUSY MAY I GO WITH YOU NOW +1688-142285-0051-1999: THE SHARPNESS IN HER EYE TURNED TO A WISTFUL LONGING AS SHE MET (MARGARET'S->MARGARET) SOFT AND FRIENDLY GAZE +1688-142285-0052-2000: AS THEY TURNED UP INTO A SMALL COURT OPENING OUT (OF->INTO) A SQUALID STREET BESSY SAID +1688-142285-0053-2001: (YO'LL->YOU WILL) NOT BE DAUNTED IF FATHER'S AT HOME AND SPEAKS A BIT GRUFFISH AT FIRST +1688-142285-0054-2002: BUT NICHOLAS WAS NOT AT HOME WHEN THEY ENTERED +1688-142285-0055-2003: GASPED (BESSY->BESSIE) AT LAST +1688-142285-0056-2004: BESSY TOOK A LONG AND FEVERISH DRAUGHT AND THEN FELL BACK AND SHUT HER EYES +1688-142285-0057-2005: MARGARET BENT OVER AND SAID BESSY DON'T BE IMPATIENT WITH YOUR LIFE WHATEVER IT IS OR MAY HAVE BEEN +1688-142285-0058-2006: REMEMBER WHO GAVE IT (*->TO) YOU AND MADE IT WHAT IT IS +1688-142285-0059-2007: NOW I'LL NOT HAVE MY WENCH (PREACHED->PREACH) TO +1688-142285-0060-2008: BUT SURELY SAID MARGARET FACING ROUND YOU BELIEVE IN WHAT I SAID THAT GOD GAVE HER LIFE AND ORDERED WHAT KIND OF LIFE IT WAS TO BE +1688-142285-0061-2009: I BELIEVE WHAT I SEE AND NO MORE +1688-142285-0062-2010: THAT'S WHAT I BELIEVE YOUNG WOMAN +1688-142285-0063-2011: I DON'T BELIEVE ALL I HEAR NO NOT BY A BIG DEAL +1688-142285-0064-2012: BUT (HOO'S->WHOSE) COME AT LAST AND (HOO'S->WHO'S) WELCOME AS LONG AS (HOO'LL->HE'LL) KEEP FROM PREACHING ON WHAT (HOO->WHO) KNOWS (NOUGHT->NOT) ABOUT +1688-142285-0065-2013: IT'S SIMPLE AND NOT FAR TO FETCH NOR HARD TO WORK +1688-142285-0066-2014: BUT THE GIRL ONLY PLEADED THE MORE WITH MARGARET +1688-142285-0067-2015: DON'T THINK HARDLY ON HIM HE'S A GOOD MAN HE IS +1688-142285-0068-2016: I SOMETIMES THINK I SHALL BE (MOPED WI->MILKED WITH) SORROW EVEN IN THE CITY OF GOD IF (FATHER->EITHER) IS NOT THERE +1688-142285-0069-2017: THE FEVERISH COLOUR CAME INTO (HER CHEEK->A CHEEKS) AND THE FEVERISH FLAME INTO HER EYE +1688-142285-0070-2018: BUT (YOU WILL->YOU'LL) BE (THERE->THEIR) FATHER YOU SHALL (OH->O) MY HEART +1688-142285-0071-2019: SHE PUT HER HAND TO IT AND BECAME GHASTLY PALE +1688-142285-0072-2020: MARGARET HELD HER IN HER ARMS AND PUT THE WEARY HEAD TO REST UPON HER BOSOM +1688-142285-0073-2021: PRESENTLY THE SPASM THAT FORESHADOWED DEATH HAD PASSED AWAY AND (BESSY->BUSY) ROUSED HERSELF AND SAID +1688-142285-0074-2022: I'LL GO TO BED IT'S BEST PLACE BUT CATCHING (AT->THAT) MARGARET'S GOWN (YO'LL->YOU'LL) COME AGAIN I KNOW (YO->YOU) WILL BUT JUST SAY IT +1688-142285-0075-2023: (I WILL->OH) COME TO MORROW SAID MARGARET +1688-142285-0076-2024: MARGARET WENT AWAY VERY SAD AND THOUGHTFUL +1688-142285-0077-2025: SHE WAS LATE FOR TEA AT HOME +1688-142285-0078-2026: HAVE YOU MET WITH A SERVANT DEAR +1688-142285-0079-2027: NO MAMMA THAT ANNE BUCKLEY WOULD NEVER HAVE DONE +1688-142285-0080-2028: (SUPPOSE->S'POSE) I TRY SAID MISTER HALE +1688-142285-0081-2029: EVERYBODY ELSE HAS HAD (THEIR->THEY) TURN (AT->UP) THIS GREAT DIFFICULTY NOW LET ME TRY +1688-142285-0082-2030: I MAY BE THE (CINDERELLA->CINRILLA) TO PUT ON THE SLIPPER AFTER ALL +1688-142285-0083-2031: WHAT WOULD YOU DO PAPA HOW WOULD YOU SET ABOUT IT +1688-142285-0084-2032: WHY I WOULD APPLY TO SOME GOOD HOUSE MOTHER TO RECOMMEND ME ONE KNOWN TO HERSELF OR HER SERVANTS +1688-142285-0085-2033: VERY GOOD BUT WE MUST FIRST CATCH OUR HOUSE MOTHER +1688-142285-0086-2034: THE MOTHER OF WHOM HE SPOKE TO US SAID MARGARET +1688-142285-0087-2035: (MISSUS->MISTER) THORNTON THE ONLY MOTHER HE HAS I BELIEVE SAID MISTER HALE QUIETLY +1688-142285-0088-2036: I SHALL LIKE TO SEE HER SHE MUST BE AN UNCOMMON PERSON HER MOTHER ADDED +1688-142285-0089-2037: PERHAPS SHE MAY HAVE A RELATION WHO MIGHT SUIT US AND BE GLAD OF OUR PLACE +1688-142285-0090-2038: SHE SOUNDED TO BE SUCH A CAREFUL ECONOMICAL PERSON THAT I SHOULD LIKE ANY ONE OUT OF THE SAME FAMILY +1688-142285-0091-2039: MY DEAR SAID MISTER HALE ALARMED PRAY DON'T GO OFF ON THAT IDEA +1688-142285-0092-2040: I AM SURE AT ANY RATE SHE WOULD NOT LIKE STRANGERS TO KNOW ANYTHING ABOUT IT +1688-142285-0093-2041: TAKE NOTICE THAT (*->THIS) IS NOT MY KIND OF (HAUGHTINESS->FORTNESS) PAPA IF I HAVE ANY AT ALL WHICH I DON'T AGREE TO THOUGH (YOU'RE->YOU) ALWAYS ACCUSING ME OF IT +1688-142285-0094-2042: I DON'T KNOW POSITIVELY THAT IT IS HERS EITHER BUT FROM LITTLE THINGS I HAVE GATHERED FROM HIM I FANCY SO +1688-142285-0095-2043: THEY CARED TOO LITTLE TO ASK IN WHAT MANNER HER SON HAD SPOKEN ABOUT HER +1998-15444-0000-2204: IF CALLED TO A CASE SUPPOSED (OR->OF) SUSPECTED TO BE ONE OF POISONING THE MEDICAL MAN HAS TWO DUTIES TO PERFORM TO SAVE THE PATIENT'S LIFE AND TO PLACE HIMSELF IN A POSITION TO GIVE EVIDENCE (IF->OF) CALLED (ON TO DO SO->UNTO) +1998-15444-0001-2205: HE SHOULD MAKE INQUIRIES AS TO SYMPTOMS AND TIME AT WHICH FOOD OR MEDICINE WAS LAST TAKEN +1998-15444-0002-2206: HE SHOULD NOTICE THE POSITION AND TEMPERATURE OF THE BODY THE CONDITION OF (RIGOR MORTIS->RIGA MORTARS) MARKS OF (VIOLENCE->FIDANTS) APPEARANCE OF LIPS AND MOUTH +1998-15444-0003-2207: IN MAKING A POST (MORTEM EXAMINATION->MODER MAXIMMUNITION) THE (ALIMENTARY->ELEMENTARY) CANAL SHOULD BE REMOVED AND PRESERVED FOR FURTHER INVESTIGATION +1998-15444-0004-2208: THE GUT (AND->IN) THE (GULLET->COLLEGE) BEING CUT ACROSS BETWEEN THESE LIGATURES THE STOMACH MAY BE REMOVED (ENTIRE->AND TIRED) WITHOUT (SPILLING->SPINNING) ITS CONTENTS +1998-15444-0005-2209: IF THE (MEDICAL PRACTITIONER IS IN DOUBT->MEDICA PRACTITIONERS ENDOWED) ON ANY POINT HE SHOULD OBTAIN (TECHNICAL->TECHNICHAL) ASSISTANCE FROM (SOMEONE->SOME ONE) WHO HAS PAID ATTENTION TO THE SUBJECT +1998-15444-0006-2210: IN A CASE OF ATTEMPTED SUICIDE (BY->FOR) POISONING IS IT THE DUTY OF THE DOCTOR TO INFORM THE POLICE +1998-15444-0007-2211: THE BEST (EMETIC->AMATIC) IS THAT WHICH (IS->*) AT HAND +1998-15444-0008-2212: THE (DOSE->DUST) FOR (AN ADULT->NO DOUBT) IS TEN MINIMS +1998-15444-0009-2213: (APOMORPHINE->EPIMORPHY) IS NOT (ALLIED->ALIT) IN PHYSIOLOGICAL ACTION TO MORPHINE AND MAY BE GIVEN IN CASES OF (NARCOTIC->NAUCOTIC) POISONING +1998-15444-0010-2214: TICKLING THE (FAUCES->FORCES) WITH (A->THE) FEATHER MAY EXCITE (VOMITING->RHOMETTING) +1998-15444-0011-2215: IN USING THE ELASTIC STOMACH TUBE SOME FLUID SHOULD BE INTRODUCED INTO THE STOMACH BEFORE ATTEMPTING TO EMPTY IT OR A PORTION OF THE MUCOUS (MEMBRANE->MEMORANE) MAY BE (SUCKED->SACKED) INTO THE APERTURE +1998-15444-0012-2216: THE TUBE SHOULD BE EXAMINED TO SEE THAT IT IS NOT BROKEN OR CRACKED AS ACCIDENTS HAVE HAPPENED FROM NEGLECTING THIS PRECAUTION +1998-15444-0013-2217: (ANTIDOTES ARE->AND HE DOES A) USUALLY GIVEN HYPODERMICALLY OR IF (BY->THE) MOUTH (IN->AND) THE FORM OF TABLETS +1998-15444-0014-2218: IN THE ABSENCE OF (A->THE) HYPODERMIC SYRINGE THE REMEDY MAY BE GIVEN BY THE RECTUM +1998-15444-0015-2219: NOTICE THE (SMELL->SMILE) COLOUR AND GENERAL APPEARANCE OF THE MATTER SUBMITTED FOR EXAMINATION +1998-15444-0016-2220: FOR THE SEPARATION OF AN (ALKALOID->ACALIT) THE FOLLOWING IS THE PROCESS OF (STAS OTTO->STATS ARE TO) +1998-15444-0017-2221: THIS PROCESS IS BASED UPON THE PRINCIPLE THAT THE (SALTS->SOULS) OF THE (ALKALOIDS->ACCOLITES) ARE SOLUBLE IN (ALCOHOL AND->ACCULENT) WATER AND INSOLUBLE IN ETHER +1998-15444-0018-2222: THE PURE (ALKALOIDS WITH->AKELOITS WAS) THE EXCEPTION OF MORPHINE IN ITS CRYSTALLINE FORM (ARE->A) SOLUBLE (IN ETHER->BENEATH THEM) +1998-15444-0019-2223: TWO (COOL->UR) THE MIXTURE AND FILTER WASH THE RESIDUE WITH STRONG ALCOHOL AND MIX THE (FILTRATES->FIR TRADES) +1998-15444-0020-2224: THE (RESIDUE->READY YOU) MAY BE (SET->SAID) ASIDE FOR THE DETECTION OF THE METALLIC POISONS (IF->OF) SUSPECTED EXPEL THE (ALCOHOL->ACCOHOL) BY CAREFUL EVAPORATION +1998-15444-0021-2225: ON THE EVAPORATION OF THE ALCOHOL THE (RESINOUS->VEZENOUS) AND (FATTY MATTERS->FATIMATUS) SEPARATE +1998-15444-0022-2226: EVAPORATE THE (FILTRATE->FEDERATE) TO A (SYRUP AND->CYRUP AN) EXTRACT WITH SUCCESSIVE PORTIONS OF ABSOLUTE ALCOHOL +1998-15444-0023-2227: SEPARATE THE ETHEREAL SOLUTION AND EVAPORATE +1998-15444-0024-2228: FIVE A PART OF THIS ETHEREAL SOLUTION IS (POURED->PUT) INTO (A->*) WATCH GLASS AND (ALLOWED->ALLOW) TO EVAPORATE +1998-15444-0025-2229: TO PURIFY IT (ADD->EDISM) A (SMALL->*) QUANTITY OF DILUTE (SULPHURIC->SUFFER) ACID AND AFTER EVAPORATING TO THREE QUARTERS OF ITS (BULK->BARK) ADD A (SATURATED->SITUATED) SOLUTION OF CARBONATE OF POTASH OR SODA +1998-15444-0026-2230: (BOIL->BOY) THE (FINELY DIVIDED SUBSTANCE->FINALLY DIVIDEST ABSTANCE) WITH ABOUT ONE (EIGHTH->EIGHTHS) ITS (BULK->BARK) OF PURE (HYDROCHLORIC->HYDROCLOIC) ACID ADD FROM TIME TO TIME POTASSIC (CHLORATE->LOW RAGE) UNTIL THE SOLIDS ARE REDUCED TO A STRAW YELLOW FLUID +1998-15444-0027-2231: THE RESIDUE OF THE MATERIAL AFTER DIGESTION (WITH HYDROCHLORIC->WAS HYDROCLOGIC) ACID AND (POTASSIUM CHLORATE->PROTESTING CHLORODE) MAY HAVE TO BE EXAMINED FOR SILVER LEAD AND BARIUM +1998-29454-0000-2157: A THOUSAND BLESSINGS FROM A GRATEFUL HEART +1998-29454-0001-2158: PERUSAL SAID THE (PAWNBROKER->POND BROKER) THAT'S THE WAY TO (PERNOUNCE->PRONOUNCE) IT +1998-29454-0002-2159: HIS BOOKS TOLD HIM (THAT TREASURE IS->THE TREASURES) BEST HIDDEN (UNDER->ON A) LOOSE BOARDS (UNLESS->AND AS) OF COURSE YOUR HOUSE (HAS->HAD) A SECRET (PANEL->PANNER) WHICH HIS HAD NOT +1998-29454-0003-2160: HE GOT IT UP AND PUSHED HIS TREASURES AS FAR IN AS HE COULD ALONG THE ROUGH (CRUMBLY->CRAMBLY) SURFACE OF THE (LATH->LAS) AND PLASTER +1998-29454-0004-2161: WHEN DICKIE CAME DOWN HIS AUNT (SLIGHTLY SLAPPED->SAT HE SLEPT) HIM AND HE TOOK THE HALFPENNY AND LIMPED OFF OBEDIENTLY +1998-29454-0005-2162: HE HAD NEVER SEEN ONE BEFORE AND IT INTERESTED HIM EXTREMELY +1998-29454-0006-2163: HE LOOKED ABOUT HIM AND KNEW THAT HE DID NOT AT ALL KNOW WHERE HE WAS +1998-29454-0007-2164: WHAT'S (UP MATEY LOST->THAT MATE ASKED) YOUR WAY DICKIE EXPLAINED +1998-29454-0008-2165: WHEN HE SAID (AVE->HAVE) I (BIN->BEEN) ASLEEP +1998-29454-0009-2166: HERE WE ARE SAID THE MAN +1998-29454-0010-2167: NOT (EXACKLY->EXACTLY) SAID THE MAN BUT IT'S ALL RIGHT +1998-29454-0011-2168: WHEN IT WAS OVER THE MAN ASKED DICKIE IF HE COULD WALK A LITTLE WAY AND WHEN DICKIE SAID HE COULD THEY SET OUT IN THE MOST FRIENDLY WAY SIDE BY SIDE +1998-29454-0012-2169: AND THE (TEA->TINEL) AND (ALL AN->*) THE EGG +1998-29454-0013-2170: AND THIS IS THE PRETTIEST PLACE EVER I SEE +1998-29454-0014-2171: I (SHALL->SHOULD) CATCH IT (A FAIR->IF HER) TREAT AS IT IS +1998-29454-0015-2172: SHE WAS (WAITIN->WAITING) FOR THE WOOD TO BOIL THE (KETTLE->CATTLE) WHEN (I->TO) COME OUT MOTHER +1998-29454-0016-2173: (AIN'T->AND) BAD WHEN SHE'S IN A GOOD TEMPER +1998-29454-0017-2174: THAT (AIN'T WHAT SHE'LL->ANNE BUT YOU'LL) BE IN WHEN YOU GETS BACK +1998-29454-0018-2175: I GOT (TO->A) STICK IT SAID (DICKIE->DICKY) SADLY I'D BEST BE GETTING HOME +1998-29454-0019-2176: I WOULDN'T GO (OME->HOME) NOT IF I (WAS YOU->WERE USE) SAID THE MAN +1998-29454-0020-2177: NO SAID DICKIE OH NO NO I NEVER +1998-29454-0021-2178: I (AIN'T IT YER->ENTERTA) HAVE I LIKE WHAT (YER AUNT DO->YOU AREN TO) +1998-29454-0022-2179: WELL (THAT'LL->THAT) SHOW YOU (THE->A) SORT OF (MAN->MEN) I AM +1998-29454-0023-2180: THE MAN'S MANNER WAS SO KIND AND HEARTY THE WHOLE (ADVENTURE->ADVENTUR) WAS SO WONDERFUL AND NEW IS IT COUNTRY WHERE YOU GOING +1998-29454-0024-2181: THE SUN (SHOT->HAD) LONG GOLDEN BEAMS THROUGH THE GAPS (IN->AND) THE HEDGE +1998-29454-0025-2182: A BIRD (PAUSED->PASSED) IN ITS FLIGHT ON (A->*) BRANCH QUITE CLOSE AND CLUNG (THERE SWAYING->THEIR SWAIN) +1998-29454-0026-2183: HE TOOK OUT OF HIS POCKET (A NEW->AND YOUR) ENVELOPE (A NEW SHEET->AND YOU SEED) OF PAPER AND A NEW PENCIL READY SHARPENED BY MACHINERY +1998-29454-0027-2184: (AN->AND) I (ASKS->ASK) YOU LET ME COME (ALONGER->ALONG OF) YOU GOT THAT +1998-29454-0028-2185: (GET IT->GENISH) WROTE DOWN THEN DONE +1998-29454-0029-2186: THEN HE FOLDED IT AND PUT IT IN HIS POCKET +1998-29454-0030-2187: NOW (WE'RE SQUARE->HE IS QUEER) HE SAID +1998-29454-0031-2188: THEY COULD PUT A (MAN->MEN) AWAY FOR LESS THAN THAT +1998-29454-0032-2189: I SEE THAT (THERE IN->THEN) A BOOK SAID (DICKIE->DICK HAD) CHARMED +1998-29454-0033-2190: HE REWARD THE WAKE THE LAST OF THE ENGLISH AND I (WUNNERED->WANTED) WHAT (IT->HAD) STOOD FOR +1998-29454-0034-2191: WILD ONES (AIN'T ALF THE->AND A HALF) SIZE I LAY +1998-29454-0035-2192: ADVENTURES I SHOULD THINK SO +1998-29454-0036-2193: AH SAID (DICKIE->DICKY) AND A (FULL->FOOT) SILENCE FELL BETWEEN THEM +1998-29454-0037-2194: THAT WAS CHARMING BUT IT WAS PLEASANT TOO TO WASH THE (MUD OFF->MADAM) ON THE WET GRASS +1998-29454-0038-2195: (DICKIE->DICKY) ALWAYS REMEMBERED THAT MOMENT +1998-29454-0039-2196: SO YOU SHALL SAID MISTER BEALE A (REG'LER->REGULAR) WASH ALL OVER THIS VERY NIGHT I ALWAYS LIKE A WASH MESELF +1998-29454-0040-2197: SOME (BLOKES->LOAST) THINK IT PAYS TO BE DIRTY BUT IT DON'T +1998-29454-0041-2198: IF (YOU'RE->YOU) CLEAN THEY SAY HONEST POVERTY (AN->AND) IF YOU'RE DIRTY THEY SAY SERVE YOU RIGHT +1998-29454-0042-2199: YOU ARE GOOD SAID DICKIE I DO LIKE YOU +1998-29454-0043-2200: I KNOW YOU WILL SAID DICKIE WITH ENTHUSIASM I KNOW (OW->HOW) GOOD YOU ARE +1998-29454-0044-2201: BLESS ME SAID MISTER BEALE UNCOMFORTABLY WELL (THERE->THEN) +1998-29454-0045-2202: (STEP OUT SONNY->SABATANI) OR WE'LL NEVER GET THERE THIS (SIDE->OUT OF) CHRISTMAS +1998-29454-0046-2203: WELL (YOU'LL->YOU) KNOW ALL ABOUT IT PRESENTLY +1998-29455-0000-2232: THE SINGING AND LAUGHING WENT ON LONG AFTER HE HAD FALLEN ASLEEP AND IF (LATER->LATE) IN THE EVENING (THERE->THEY) WERE LOUD (VOICED->FOREST) ARGUMENTS OR QUARRELS EVEN (DICKIE->DICKY) DID NOT HEAR THEM +1998-29455-0001-2233: WHAT'S (ALL->ON) THAT THERE DICKIE ASKED POINTING TO THE (ODD KNOBBLY->OTT NOBBY) BUNDLES OF ALL SORTS AND SHAPES TIED ON TO THE PERAMBULATOR'S FRONT +1998-29455-0002-2234: TELL (YER->YOU) WHAT (MATE->MADE) LOOKS TO ME AS IF (I'D->I) TOOK A FANCY TO YOU +1998-29455-0003-2235: (SWELP->SWAP) ME HE SAID HELPLESSLY +1998-29455-0004-2236: (OH->O) LOOK SAID (DICKIE->DICKY) THE FLOWERS +1998-29455-0005-2237: (THEY'RE->THEY ARE) ONLY (WEEDS->REEDS) SAID BEALE +1998-29455-0006-2238: BUT I SHALL HAVE THEM (WHILE THEY'RE->WHERE THEY ARE) ALIVE SAID (DICKIE->DICKY) AS HE HAD SAID TO THE PAWNBROKER (ABOUT->BY) THE MOONFLOWERS +1998-29455-0007-2239: (HI->AY) THERE GOES A RABBIT +1998-29455-0008-2240: (SEE IM CROST THE->SEEM QUEST) ROAD THERE SEE (HIM->EM) +1998-29455-0009-2241: HOW BEAUTIFUL SAID (DICKIE->DICKY) WRIGGLING (WITH->WIS) DELIGHT +1998-29455-0010-2242: THIS LIFE OF THE RABBIT AS DESCRIBED BY MISTER BEALE WAS THE CHILD'S FIRST GLIMPSE OF FREEDOM I'D LIKE TO BE A RABBIT +1998-29455-0011-2243: (OW'M I TO WHEEL->ALL MY TOWER) THE (BLOOMIN->ROOM AND) PRAM IF YOU (GOES ON->GO SON) LIKE (AS->US) IF YOU WAS A (BAG->BICK) OF (EELS->FIELDS) +1998-29455-0012-2244: I LIKE YOU (NEXTER->NEXT TO) MY OWN (DADDY->DIRTY) AND MISTER (BAXTER->BEXT THE) NEXT DOOR +1998-29455-0013-2245: THAT'S ALL RIGHT SAID MISTER BEALE AWKWARDLY +1998-29455-0014-2246: DICKIE (QUICK TO->QUICKLY) IMITATE TOUCHED HIS +1998-29455-0015-2247: POOR LITTLE MAN SAID THE LADY YOU MISS YOUR MOTHER DON'T YOU +1998-29455-0016-2248: OH WELL DONE LITTLE (UN->ONE) SAID MISTER (BEALE->BEE) TO HIMSELF +1998-29455-0017-2249: THE TWO TRAVELLERS WERE LEFT FACING EACH OTHER THE RICHER BY A PENNY AND (OH->O) WONDERFUL GOOD FORTUNE A WHOLE HALF CROWN +1998-29455-0018-2250: NO I NEVER SAID DICKIE (ERE'S->YES) THE (STEEVER->STEPLE) +1998-29455-0019-2251: YOU STICK TO THAT SAID (BEALE->BEER) RADIANT WITH DELIGHT (YOU'RE->YOU ARE) A FAIR MASTERPIECE YOU ARE YOU EARNED IT HONEST IF EVER (A KID DONE->KIDNE) +1998-29455-0020-2252: THEY WENT ON UP THE HILL AS HAPPY AS ANY ONE NEED WISH TO BE +1998-29455-0021-2253: PLEASE (DO NOT->DON'T) BE TOO SHOCKED +1998-29455-0022-2254: REMEMBER THAT NEITHER OF THEM KNEW ANY BETTER +1998-29455-0023-2255: TO THE (ELDER->OTHER) TRAMP LIES AND (BEGGING WERE->PEGGING WHEN) NATURAL MEANS OF LIVELIHOOD +1998-29455-0024-2256: BUT YOU SAID THE BED (WITH->WAS) THE GREEN CURTAINS URGED DICKIE +1998-29455-0025-2257: WHICH THIS (AIN'T->END) NOT BY NO MEANS +1998-29455-0026-2258: THE NIGHT IS FULL OF INTERESTING LITTLE SOUNDS THAT WILL NOT AT FIRST LET YOU SLEEP THE RUSTLE OF LITTLE (WILD->WHITE) THINGS (IN->ON) THE (HEDGES->HATCHES) THE BARKING OF DOGS (IN->AND) DISTANT FARMS THE CHIRP OF CRICKETS AND THE CROAKING OF FROGS +1998-29455-0027-2259: THE NEW GAME OF BEGGING AND INVENTING STORIES TO INTEREST THE PEOPLE FROM WHOM IT WAS (WORTH WHILE->WORSE WIDE) TO BEG WENT ON GAILY DAY BY DAY AND WEEK BY WEEK AND DICKIE BY CONSTANT PRACTICE GREW SO CLEVER (AT->*) TAKING HIS PART IN THE ACTING THAT MISTER (BEALE->BEA) WAS QUITE DAZED WITH ADMIRATION +1998-29455-0028-2260: BLESSED (IF I->FOR) EVER SEE SUCH A (NIPPER->NIBBER) HE SAID OVER AND OVER AGAIN +1998-29455-0029-2261: CLEVER AS A (TRAINDAWG E->TRAIN DOG) IS (AN ALL OUTER IS OWN EAD->IN OR OUTER'S OWNETTE) +1998-29455-0030-2262: I (AIN'T->AM) SURE AS I (ADN'T->HADN'T) BETTER STICK TO THE ROAD AND KEEP AWAY FROM OLD (ANDS->ENDS) LIKE (YOU JIM->EUGEUM) +1998-29455-0031-2263: I (OPE E'S CLEVER->OPEUS LOVE) ENOUGH TO DO (WOT E'S TOLD KEEP IS MUG SHUT->WHAT HE STOOTE HE WAS MUCH AT) THAT'S ALL +1998-29455-0032-2264: IF (E'S->HE) STRAIGHT (E'LL->YOU'LL) DO FOR ME AND (IF->*) HE (AIN'T->AND) I'LL DO FOR (IM->HIM) SEE +1998-29455-0033-2265: SEE THAT (BLOKE JUST->LOGIS) NOW SAID MISTER BEALE (YUSS->YES) SAID DICKIE +1998-29455-0034-2266: WELL YOU NEVER SEE (IM->HIM) +1998-29455-0035-2267: IF ANY ONE (ARSTS->ASKS) YOU IF YOU EVER SEE (IM->HIM) YOU NEVER (SET->SAID) EYES ON (IM->HIM) IN ALL (YOUR->YOU'RE) BORN NOT TO REMEMBER (IM->*) +1998-29455-0036-2268: (DICKIE->DICKY) WAS FULL OF QUESTIONS BUT MISTER (BEALE HAD->BEAUT) NO ANSWERS (FOR->WERE) THEM +1998-29455-0037-2269: NOR WAS IT SUNDAY ON WHICH THEY TOOK A REST AND WASHED THEIR SHIRTS ACCORDING TO MISTER BEALE'S RULE OF LIFE +1998-29455-0038-2270: THEY DID NOT STAY THERE BUT WALKED OUT ACROSS THE DOWNS (WHERE->WITH) THE (SKYLARKS WERE->SKYLACKS WAS) SINGING AND ON A DIP OF THE DOWNS CAME UPON GREAT STONE (WALLS->WARDS) AND TOWERS VERY STRONG AND GRAY +1998-29455-0039-2271: WHAT'S THAT THERE SAID (DICKIE->DICKY) +2033-164914-0000-661: REPLIED HE OF A TRUTH I HEARD HIM NOT AND I WOT HIM NOT AND FOLKS ARE ALL SLEEPING +2033-164914-0001-662: BUT SHE SAID WHOMSOEVER THOU SEEST AWAKE HE IS THE (RECITER->RESIDER) +2033-164914-0002-663: THEN SAID THE EUNUCH ART THOU HE WHO REPEATED POETRY BUT NOW AND MY LADY HEARD HIM +2033-164914-0003-664: REJOINED THE EUNUCH WHO THEN WAS THE RECITER POINT HIM OUT TO ME +2033-164914-0004-665: BY ALLAH REPLIED THE FIREMAN I TELL THEE THE TRUTH +2033-164914-0005-666: TELL ME WHAT HAPPENED QUOTH (ZAU AL->OWL) MAKAN +2033-164914-0006-667: WHAT (AILS->ELSE) THEE THEN THAT THOU MUST NEEDS (RECITE->RESIDE) VERSES SEEING THAT WE ARE TIRED OUT WITH WALKING AND WATCHING AND ALL THE FOLK ARE ASLEEP FOR THEY REQUIRE SLEEP TO REST THEM OF THEIR FATIGUE +2033-164914-0007-668: AND HE ALSO (IMPROVISED->PROVISED) THE TWO FOLLOWING DISTICHS +2033-164914-0008-669: WHEN (NUZHAT->UZHAT) AL ZAMAN HEARD THE FIRST IMPROVISATION SHE CALLED TO (MIND->MINE) HER FATHER AND HER MOTHER AND HER BROTHER AND THEIR (WHILOME->WILL ON) HOME THEN SHE WEPT AND CRIED (AT->TO) THE EUNUCH AND SAID TO HIM WOE TO THEE +2033-164914-0009-670: HE WHO RECITED THE FIRST TIME (HATH->HAD) RECITED A SECOND TIME AND I HEARD HIM HARD BY +2033-164914-0010-671: BY ALLAH AN THOU FETCH HIM NOT TO ME I WILL ASSUREDLY ROUSE THE CHAMBERLAIN ON THEE AND HE SHALL BEAT THEE AND CAST THEE OUT +2033-164914-0011-672: BUT TAKE THESE HUNDRED (DINERS->DINNERS) AND GIVE THEM TO THE SINGER AND BRING HIM TO ME GENTLY AND DO HIM NO HURT +2033-164914-0012-673: RETURN QUICKLY AND LINGER NOT +2033-164914-0013-674: WHEN IT WAS THE SEVENTY THIRD NIGHT +2033-164914-0014-675: BUT THE EUNUCH SAID I WILL NOT LEAVE THEE TILL THOU SHOW ME WHO IT WAS THAT RECITED THE VERSES FOR I DREAD RETURNING TO MY LADY WITHOUT HIM +2033-164914-0015-676: NOW WHEN THE FIREMAN HEARD THESE WORDS HE FEARED FOR (ZAU AL MAKAN->ZOAMA KHAN) AND WEPT WITH EXCEEDING WEEPING AND SAID TO THE EUNUCH BY ALLAH IT WAS NOT I AND I KNOW HIM NOT +2033-164914-0016-677: SO GO THOU TO THY STATION AND IF THOU AGAIN MEET ANY ONE AFTER THIS HOUR RECITING AUGHT OF POETRY WHETHER HE BE NEAR OR FAR IT WILL BE I OR SOME ONE I KNOW AND THOU SHALT NOT LEARN OF HIM BUT BY ME +2033-164914-0017-678: THEN HE KISSED THE EUNUCH'S HEAD AND SPAKE HIM FAIR TILL HE WENT AWAY BUT THE (CASTRATO->GASTRATO) FETCHED (A ROUND->AROUND) AND RETURNING SECRETLY CAME AND STOOD BEHIND THE FIREMAN FEARING TO GO BACK TO HIS MISTRESS WITHOUT TIDINGS +2033-164914-0018-679: I SAY WHAT MADE MY (IGNOMY WHATE'ER->CHOMI WHATEVER) THE (BITTER->BEACHER) CUP I DRAIN FAR BE (FRO->FROM) ME (THAT->THY) LAND TO FLEE NOR WILL I BOW TO THOSE WHO BLAME AND FOR SUCH LOVE WOULD DEAL ME SHAME +2033-164914-0019-680: THEN SAID THE EUNUCH TO (ZAU AL->ZA) MAKAN PEACE BE WITH THEE O MY LORD +2033-164914-0020-681: O MY LORD CONTINUED THE EUNUCH AND SHAHRAZAD PERCEIVED (*->THAT) THE DAWN OF DAY AND CEASED TO SAY HER PERMITTED SAY +2033-164914-0021-682: WE WILL DO THEE NO UPRIGHT O MY SON NOR WRONG THEE IN AUGHT BUT OUR OBJECT IS THAT THOU BEND THY GRACIOUS STEPS WITH ME TO MY MISTRESS TO RECEIVE HER ANSWER AND (RETURN IN WEAL->RETURNING WHEEL) AND SAFETY AND THOU SHALT HAVE A HANDSOME PRESENT AS ONE WHO BRINGETH GOOD NEWS +2033-164914-0022-683: THEN THE EUNUCH WENT OUT TO (ZAU AL->ZAO) MAKAN AND SAID TO HIM RECITE WHAT (VERSES->VERSE IS) THOU KNOWEST FOR MY (LADY IS HERE->LADY'S HEAR) HARD BY LISTENING TO THEE AND AFTER I WILL ASK THEE OF THY NAME AND (THY->THINE) NATIVE COUNTRY AND THY CONDITION +2033-164915-0000-643: AND ALSO THESE +2033-164915-0001-644: THEN SHE THREW HERSELF UPON HIM AND HE GATHERED HER TO HIS BOSOM AND (THE TWAIN->ITALY) FELL DOWN IN A FAINTING FIT +2033-164915-0002-645: WHEN THE (EUNUCH->EUNUCHS) SAW (THIS->THESE) CASE HE WONDERED AT THEM AND THROWING OVER THEM SOMEWHAT TO COVER THEM WAITED TILL THEY SHOULD RECOVER +2033-164915-0003-646: AFTER A WHILE THEY CAME TO THEMSELVES AND (NUZHAT->USHART) AL ZAMAN REJOICED WITH EXCEEDING JOY OPPRESSION AND DEPRESSION (LEFT HER->LAUGHTER) AND GLADNESS TOOK THE MASTERY OF HER AND SHE REPEATED THESE VERSES +2033-164915-0004-647: ACCORDINGLY SHE TOLD HIM ALL THAT HAD COME TO HER SINCE THEIR SEPARATION AT THE KHAN AND WHAT HAD HAPPENED TO HER WITH THE BADAWI HOW THE MERCHANT HAD BOUGHT HER OF HIM AND HAD TAKEN HER TO HER BROTHER (SHARRKAN->SHARKAN) AND HAD SOLD HER TO HIM HOW HE HAD FREED HER AT THE TIME OF BUYING HOW HE HAD MADE (A->HER) MARRIAGE CONTRACT WITH HER AND HAD GONE IN TO HER AND HOW THE KING THEIR SIRE HAD SENT AND ASKED FOR HER FROM (SHARRKAN->SHARKAN) +2033-164915-0005-648: BUT NOW GO TO THY MASTER AND BRING HIM QUICKLY TO ME +2033-164915-0006-649: THE CHAMBERLAIN CALLED THE CASTRATO AND CHARGED HIM TO DO ACCORDINGLY SO HE REPLIED I HEAR AND I OBEY AND HE TOOK HIS PAGES WITH HIM AND WENT OUT IN SEARCH OF THE (STOKER->STOCKER) TILL HE FOUND HIM IN THE REAR OF THE CARAVAN (GIRTHING->GIRDING) HIS ASS AND PREPARING FOR FLIGHT +2033-164915-0007-650: SHE SAID IT HATH REACHED ME O AUSPICIOUS KING THAT WHEN THE (STOKER GIRTHED->STOCKER GIRDED) HIS (ASS->EYES) FOR FLIGHT AND BESPAKE HIMSELF SAYING (OH->O) WOULD I KNEW WHAT IS BECOME OF HIM +2033-164915-0008-651: I BELIEVE HE HATH DENOUNCED ME TO THE EUNUCH HENCE THESE PAGES (ET->AT) ABOUT ME AND HE HATH MADE ME AN ACCOMPLICE IN HIS CRIME +2033-164915-0009-652: WHY DIDST THOU SAY I NEVER REPEATED THESE COUPLETS NOR DO I KNOW WHO REPEATED THEM WHEN IT WAS THY COMPANION +2033-164915-0010-653: BUT NOW I WILL NOT LEAVE THEE BETWEEN THIS PLACE AND (BAGHDAD->BAGDAD) AND WHAT BETIDETH THY COMRADE SHALL BETIDE THEE +2033-164915-0011-654: (TWAS->TOWARDS) AS I FEARED THE (COMING ILLS->CARMINALS) DISCERNING BUT (UNTO->ON TO) ALLAH WE ARE ALL RETURNING +2033-164915-0012-655: THEN THE EUNUCH CRIED UPON (*->IN) THE PAGES SAYING TAKE HIM OFF THE ASS +2033-164915-0013-656: AND HE ANSWERED I AM THE CHAMBERLAIN OF THE EMIR OF DAMASCUS KING (SHARRKAN SON OF OMAR BIN->SHARKAN SUNG OVER MARBIN) AL (NU'UMAN->NUMA) LORD OF (BAGHDAD->WABDAD) AND OF THE LAND OF KHORASAN AND I BRING TRIBUTE AND PRESENTS FROM HIM TO HIS FATHER IN BAGHDAD +2033-164915-0014-657: (SO FARE YE->SOPHIA) FORWARDS NO HARM SHALL (BEFAL->BEFALL) YOU TILL YOU JOIN HIS GRAND WAZIR (DANDAN->THAN DAN) +2033-164915-0015-658: THEN HE BADE HIM BE SEATED AND QUESTIONED HIM AND HE REPLIED THAT HE WAS (CHAMBERLAIN->TREMBLING) TO THE EMIR OF DAMASCUS AND WAS BOUND TO KING OMAR WITH PRESENTS AND THE TRIBUTE OF SYRIA +2033-164915-0016-659: SO IT WAS AGREED THAT WE GO TO DAMASCUS AND FETCH THENCE THE KING'S SON (SHARRKAN->SHARKAN) AND (MAKE HIM->MAY CAME) SULTAN OVER HIS FATHER'S REALM +2033-164915-0017-660: AND AMONGST THEM WERE SOME WHO WOULD HAVE CHOSEN THE CADET (ZAU AL->THOUA) MAKAN FOR QUOTH THEY HIS NAME BE LIGHT OF THE PLACE AND HE HATH A SISTER NUZHAT AL ZAMAN (HIGHS->HIES) THE DELIGHT OF THE TIME BUT THEY SET OUT FIVE YEARS AGO FOR AL (HIJAZ->KI JAS) AND NONE WOTTETH WHAT IS BECOME OF THEM +2033-164916-0000-684: SO HE TURNED TO THE WAZIR DANDAN AND SAID TO HIM VERILY YOUR TALE IS A (WONDER->WANDER) OF WONDERS +2033-164916-0001-685: (KNOW->NO) O CHIEF WAZIR THAT HERE WHERE YOU HAVE ENCOUNTERED ME ALLAH HATH GIVEN YOU REST FROM FATIGUE AND BRINGETH YOU YOUR DESIRE AFTER THE EASIEST OF FASHIONS FOR (THAT->LET) HIS ALMIGHTY WILL (RESTORETH->RESTORE IT) TO YOU (ZAU AL->THOU A) MAKAN AND HIS SISTER (NUZHAT->NOSHAT) AL ZAMAN WHEREBY WE WILL SETTLE THE MATTER AS WE EASILY CAN +2033-164916-0002-686: WHEN THE (MINISTER->MEANS SIR) HEARD THESE WORDS HE REJOICED WITH (GREAT->GRAY) JOY AND SAID O CHAMBERLAIN TELL ME THE TALE OF THE TWAIN AND WHAT (BEFEL->BEFELL) THEM AND THE CAUSE OF THEIR LONG ABSENCE +2033-164916-0003-687: (ZAU AL->ZAO) MAKAN BOWED HIS HEAD (AWHILE->A WHILE) AND THEN SAID I ACCEPT (THIS->THE) POSITION FOR INDEED THERE WAS NO REFUSING AND HE WAS CERTIFIED THAT THE CHAMBERLAIN HAD COUNSELLED HIM WELL AND WISELY AND (SET->SAT) HIM ON THE RIGHT WAY +2033-164916-0004-688: THEN HE ADDED O MY UNCLE HOW SHALL I DO WITH MY BROTHER (SHARRKAN->SHARKAN) +2033-164916-0005-689: AFTER (AWHILE->A WHILE) THE DUST DISPERSED AND THERE APPEARED UNDER IT THE ARMY OF BAGHDAD AND KHORASAN A CONQUERING HOST LIKE THE (FULL->POOL) TIDE SEA AND SHAHRAZAD PERCEIVED THE DAWN OF DAY AND CEASED TO SAY HER PERMITTED SAY +2033-164916-0006-690: WHEN IT WAS THE SEVENTY EIGHTH NIGHT +2033-164916-0007-691: (AND IN IT ALL->ANY NEAT OR) REJOICED AT THE ACCESSION OF THE LIGHT OF THE PLACE +2033-164916-0008-692: LASTLY THE MINISTER WENT IN AND KISSED THE GROUND BEFORE (ZAU AL->ZAO) MAKAN WHO ROSE TO MEET HIM SAYING WELCOME O WAZIR AND (SIRE SANS PEER->SIRS SONSPIER) +2033-164916-0009-693: MOREOVER THE SULTAN COMMANDED HIS WAZIR (DANDAN->DAN) CALL (A->AT) TEN DAYS HALT OF THE ARMY THAT HE MIGHT BE PRIVATE WITH HIM AND LEARN FROM HIM HOW AND WHEREFORE HIS FATHER HAD BEEN SLAIN +2033-164916-0010-694: HE THEN REPAIRED TO THE (HEART->HEARTS) OF THE ENCAMPMENT AND ORDERED (*->THAT) THE HOST TO HALT TEN DAYS +2414-128291-0000-2689: WHAT (HATH->HAD) HAPPENED (UNTO->TO) ME +2414-128291-0001-2690: HE ASKED HIMSELF SOMETHING WARM AND LIVING (QUICKENETH->QUICKENED) ME IT MUST BE IN THE NEIGHBOURHOOD +2414-128291-0002-2691: WHEN HOWEVER (ZARATHUSTRA->THE TWO STRAW) WAS QUITE NIGH (UNTO->AND TO) THEM THEN DID HE HEAR PLAINLY (THAT A->WITH) HUMAN VOICE (SPAKE->PIKE) IN THE MIDST OF THE (KINE->KIND) AND (APPARENTLY->A FRIENDLY) ALL OF THEM HAD TURNED THEIR HEADS TOWARDS THE SPEAKER +2414-128291-0003-2692: (WHAT->FOR) DO I HERE SEEK +2414-128291-0004-2693: ANSWERED HE THE SAME THAT THOU SEEKEST THOU MISCHIEF MAKER THAT IS TO SAY HAPPINESS UPON EARTH +2414-128291-0005-2694: FOR I TELL THEE THAT I HAVE ALREADY TALKED HALF A MORNING UNTO THEM AND JUST NOW (WERE->WHERE) THEY ABOUT TO GIVE ME (THEIR->THE) ANSWER +2414-128291-0006-2695: HE WOULD NOT BE RID OF HIS (AFFLICTION->AFFLICATION) +2414-128291-0007-2696: WHO (HATH->HAD) NOT AT PRESENT HIS HEART HIS MOUTH AND HIS EYES FULL OF DISGUST +2414-128291-0008-2697: THOU ALSO THOU ALSO +2414-128291-0009-2698: BUT BEHOLD (THESE KINE->HIS KIND) +2414-128291-0010-2699: (THE KINE->DECLINE) HOWEVER GAZED AT IT ALL AND WONDERED +2414-128291-0011-2700: (WANTON AVIDITY->WARRENTON ALGITTEE) BILIOUS ENVY CAREWORN REVENGE (POPULACE->POPULOUS) PRIDE ALL (THESE->THIS) STRUCK (MINE->MIGHT) EYE +2414-128291-0012-2701: IT IS NO LONGER TRUE (THAT THE->NEITHER) POOR ARE BLESSED +2414-128291-0013-2702: THE KINGDOM OF HEAVEN HOWEVER IS WITH (THE KINE->A KIND) AND WHY IS IT NOT WITH (THE->A) RICH +2414-128291-0014-2703: WHY (DOST->THOSE) THOU TEMPT ME +2414-128291-0015-2704: ANSWERED (THE OTHER->HER) +2414-128291-0016-2705: THOU KNOWEST IT THYSELF BETTER EVEN THAN I +2414-128291-0017-2706: (THUS SPAKE->DOES SPEAK) THE (PEACEFUL->BEATHFUL) ONE AND PUFFED HIMSELF AND (PERSPIRED->POSPIRED) WITH HIS WORDS (SO THAT THE KINE WONDERED ANEW->FERNED A KIND WOUNDED I KNEW) +2414-128291-0018-2707: THOU DOEST VIOLENCE TO THYSELF THOU PREACHER ON THE MOUNT (WHEN->AND) THOU USEST SUCH (SEVERE->SAVIOUR) WORDS +2414-128291-0019-2708: THEY ALSO (ABSTAIN->ABSTAINED) FROM ALL HEAVY (THOUGHTS->TORCH) WHICH INFLATE THE HEART +2414-128291-0020-2709: WELL +2414-128291-0021-2710: (SAID ZARATHUSTRA->SAYS THEREUSTRA) THOU (SHOULDST->SHOULDEST) ALSO SEE (MINE->MY) ANIMALS (MINE->MIGHT) EAGLE AND MY SERPENT THEIR LIKE DO NOT AT PRESENT EXIST ON EARTH +2414-128291-0022-2711: AND TALK TO MINE ANIMALS OF THE HAPPINESS OF ANIMALS +2414-128291-0023-2712: NOW HOWEVER TAKE LEAVE AT ONCE OF (THY KINE->THEIR KIND) THOU STRANGE ONE +2414-128291-0024-2713: THOU (AMIABLE->ADMIABLE) ONE +2414-128291-0025-2714: FOR THEY ARE (THY WARMEST FRIENDS->DIVORITES) AND (PRECEPTORS->PERCEPTIVES) +2414-128291-0026-2715: THOU (EVIL FLATTERER->A SLATTERER) +2414-128292-0000-2618: WHITHER (HATH->HAD) MY (LONESOMENESS GONE->LONESOME DISCOUR) SPAKE HE +2414-128292-0001-2619: MY SHADOW CALLETH ME +2414-128292-0002-2620: WHAT MATTER ABOUT MY SHADOW +2414-128292-0003-2621: LET IT RUN AFTER ME I (RUN->RAN) AWAY FROM IT +2414-128292-0004-2622: THUS (SPAKE ZARATHUSTRA->BIG OR TWO STRIKE) TO HIS HEART AND RAN AWAY +2414-128292-0005-2623: VERILY MY FOLLY HATH GROWN BIG IN THE MOUNTAINS +2414-128292-0006-2624: NOW DO I HEAR SIX OLD (FOOLS->FOOTS) LEGS RATTLING BEHIND ONE ANOTHER +2414-128292-0007-2625: (BUT DOTH ZARATHUSTRA->BY DIRTS ARTISTRA) NEED TO BE FRIGHTENED BY (HIS->A) SHADOW +2414-128292-0008-2626: ALSO (METHINKETH->METHINK IT) THAT AFTER ALL IT (HATH->HAD) LONGER (LEGS->LESS) THAN MINE +2414-128292-0009-2627: FOR WHEN (ZARATHUSTRA SCRUTINISED->THEY ARE TOO STRETS CRIED) HIM WITH HIS GLANCE HE WAS FRIGHTENED AS BY (A SUDDEN->ASSERTED) APPARITION SO SLENDER SWARTHY HOLLOW AND WORN OUT (DID THIS->WITH HIS) FOLLOWER (APPEAR->APPEARED) +2414-128292-0010-2628: (ASKED ZARATHUSTRA VEHEMENTLY->I DECK TO ESTRAVA IMAGED) WHAT (DOEST->DO IS) THOU HERE +2414-128292-0011-2629: AND WHY CALLEST THOU THYSELF MY SHADOW +2414-128292-0012-2630: THOU ART NOT PLEASING (UNTO->IN TO) ME +2414-128292-0013-2631: MUST I EVER BE ON THE WAY +2414-128292-0014-2632: O (EARTH->ART) THOU HAST BECOME (TOO->TO) ROUND FOR ME +2414-128292-0015-2633: (WHEN THE->WITH A) DEVIL (CASTETH->CAST AT) HIS SKIN DOTH NOT HIS NAME ALSO FALL AWAY IT IS ALSO (SKIN->SKINNED) +2414-128292-0016-2634: THE DEVIL HIMSELF IS PERHAPS (SKIN->KIN) +2414-128292-0017-2635: SOMETIMES I MEANT TO LIE AND BEHOLD +2414-128292-0018-2636: THEN (ONLY->ALLEY) DID I (HIT->HATE) THE TRUTH +2414-128292-0019-2637: HOW HAVE I STILL INCLINATION +2414-128292-0020-2638: (HAVE->ERE) I STILL A (GOAL->GOLD) +2414-128292-0021-2639: A (HAVEN TOWARDS WHICH->HEROIND DOOR SPREAD) MY (SAIL IS SET->SAILORS SAID) +2414-128292-0022-2640: FOR IT (DO->TOO) I ASK AND SEEK AND (HAVE SOUGHT BUT HAVE->HATH THOUGHT IT HATH) NOT FOUND IT +2414-128292-0023-2641: (O->OR) ETERNAL EVERYWHERE (O ETERNAL->WHO HAD TURNED OUT) NOWHERE (O ETERNAL->WHO HAD TURNED) IN VAIN +2414-128292-0024-2642: THOU ART MY SHADOW +2414-128292-0025-2643: SAID HE (AT LAST SADLY->ASSALY) +2414-128292-0026-2644: THY DANGER IS (NOT->MUCH) SMALL THOU FREE SPIRIT AND (WANDERER->WONDER) +2414-128292-0027-2645: (THEY SLEEP->DESLEY) QUIETLY THEY (ENJOY->ENJOYED) THEIR NEW SECURITY +2414-128292-0028-2646: (BEWARE LEST->BE REALIZED) IN THE END A NARROW (FAITH CAPTURE THEE->FATE CAPTURED THE) A HARD (RIGOROUS DELUSION->RECKLESS ILLUSION) +2414-128292-0029-2647: FOR NOW EVERYTHING THAT IS NARROW AND FIXED (SEDUCETH->SEDUCE IT) AND (TEMPTETH->TEMPT IT) THEE +2414-128292-0030-2648: THOU HAST LOST (THY GOAL->DAGGOOD) +2414-128292-0031-2649: (THOU->THOUGH) POOR ROVER AND RAMBLER THOU TIRED (BUTTERFLY->BUT TO FLY) +2414-128292-0032-2650: WILT THOU HAVE (A REST->ARREST) AND A HOME THIS EVENING +2414-159411-0000-2653: (ONCE UPON->WHENCE) A (*->WINTER) TIME A BRAHMAN WHO WAS WALKING ALONG THE ROAD CAME UPON AN IRON CAGE IN WHICH A GREAT TIGER (HAD BEEN SHUT->ADMIRED) UP BY THE (VILLAGERS->VILLAGES) WHO CAUGHT HIM +2414-159411-0001-2654: THE (BRAHMAN->BRAMIN) ANSWERED NO I WILL NOT FOR IF I LET YOU OUT OF THE CAGE YOU WILL EAT ME +2414-159411-0002-2655: OH FATHER OF MERCY ANSWERED THE TIGER IN TRUTH THAT I WILL NOT +2414-159411-0003-2656: I WILL NEVER BE SO UNGRATEFUL ONLY LET ME OUT THAT I MAY DRINK SOME WATER AND RETURN +2414-159411-0004-2657: (THEN->AND IN) THE (BRAHMAN->BRAMMING) TOOK PITY ON HIM AND OPENED THE CAGE (DOOR->*) BUT NO SOONER HAD HE (DONE->TURNED) SO THAN THE TIGER JUMPING OUT SAID NOW I WILL EAT YOU FIRST AND DRINK THE WATER AFTERWARDS +2414-159411-0005-2658: SO THE (BRAHMAN->BRAMID) AND THE TIGER WALKED ON TILL THEY CAME TO A (BANYAN->BENDONED) TREE AND THE (BRAHMAN->BRAMMEN) SAID TO IT (BANYAN->BANNY) TREE (BANYAN TREE HEAR->BANDREE HERE) AND GIVE (JUDGMENT->GERMAN) +2414-159411-0006-2659: ON WHAT MUST I GIVE JUDGMENT ASKED THE (BANYAN->BEN) TREE +2414-159411-0007-2660: (THIS TIGER->THE STAGER) SAID (THE BRAHMAN BEGGED->DE BRAMIN BEG) ME TO LET HIM OUT OF HIS CAGE TO DRINK A LITTLE WATER AND HE PROMISED NOT TO HURT ME IF I DID SO BUT NOW THAT I HAVE (LET->LEFT) HIM OUT HE WISHES TO EAT ME +2414-159411-0008-2661: (IS IT JUST->IT'S A JEALOUS) THAT HE SHOULD DO SO OR NO +2414-159411-0009-2662: (LET->LAID) THE TIGER EAT THE MAN FOR MEN ARE (AN->IN) UNGRATEFUL RACE +2414-159411-0010-2663: SIR (CAMEL->CAMELO) SIR (CAMEL->CAMEO) CRIED THE BRAHMAN (HEAR->HERE) AND GIVE (JUDGMENT->GEOGNANT) +2414-159411-0011-2664: AT A LITTLE DISTANCE THEY FOUND A BULLOCK LYING BY THE ROADSIDE +2414-159411-0012-2665: IS IT FAIR THAT HE SHOULD DO SO OR NOT +2414-159411-0013-2666: (LET->LED) THE TIGER EAT THE MAN FOR MEN HAVE NO PITY +2414-159411-0014-2667: THREE OUT OF THE SIX (HAD->AND) GIVEN JUDGMENT AGAINST THE (BRAHMAN BUT->BRAHMEN WITH) STILL HE DID NOT LOSE ALL HOPE AND DETERMINED TO ASK THE OTHER THREE +2414-159411-0015-2668: ON WHAT MUST I GIVE (JUDGMENT->JULIET) ASKED THE EAGLE +2414-159411-0016-2669: THE (BRAHMAN STATED->BRAMIN SUITED) THE CASE AND THE EAGLE ANSWERED WHENEVER MEN SEE ME THEY TRY TO SHOOT ME (THEY CLIMB->DECLINED) THE ROCKS AND (STEAL->STEED) AWAY MY (LITTLE->FEW) ONES +2414-159411-0017-2670: (THEN->IN) THE TIGER BEGAN TO ROAR AND SAID (THE->*) JUDGMENT OF ALL IS AGAINST YOU O (BRAHMAN->BRAHMIN) +2414-159411-0018-2671: AFTER THIS THEY SAW AN ALLIGATOR AND THE (BRAHMAN->BRAMMER) RELATED THE MATTER TO HIM HOPING FOR A MORE (FAVORABLE->FAVOURABLE) VERDICT +2414-159411-0019-2672: (BUT THE ALLIGATOR SAID->WITH AN ADDER TO SIT) WHENEVER I PUT MY NOSE OUT OF THE WATER (MEN TORMENT->MEANTIME AND) ME AND (TRY->TRIED) TO KILL ME +2414-159411-0020-2673: (THE BRAHMAN->NO BROWN MEN) GAVE HIMSELF UP AS LOST BUT AGAIN HE PRAYED THE TIGER TO HAVE PATIENCE AND LET HIM ASK THE OPINION OF THE (SIXTH->SIX) JUDGE +2414-159411-0021-2674: (NOW->ON) THE SIXTH WAS A JACKAL +2414-159411-0022-2675: THE (BRAHMAN->GRANDMOTHER) TOLD HIS STORY AND SAID TO HIM UNCLE JACKAL (UNCLE->AND WILL) JACKAL SAY WHAT IS YOUR JUDGMENT +2414-159411-0023-2676: SHOW ME THE (PLACE->PACE) +2414-159411-0024-2677: (WHEN THEY GOT THERE->AND THE COURT DEER) THE JACKAL SAID (NOW BRAHMAN->NABRAMIN) SHOW ME EXACTLY WHERE YOU STOOD +2414-159411-0025-2678: EXACTLY THERE WAS IT ASKED THE (JACKAL->JACK WHO) +2414-159411-0026-2679: EXACTLY HERE REPLIED THE (BRAHMAN->PROMIN) +2414-159411-0027-2680: (WHERE->THERE) WAS THE TIGER THEN +2414-159411-0028-2681: WHY I STOOD SO SAID THE TIGER JUMPING INTO THE CAGE AND MY HEAD WAS ON THIS SIDE +2414-159411-0029-2682: VERY GOOD SAID (THE JACKAL->TO JACK HOO) BUT I CANNOT JUDGE WITHOUT UNDERSTANDING THE WHOLE MATTER EXACTLY +2414-159411-0030-2683: SHUT AND BOLTED SAID (THE BRAHMAN->DEBRAMIN) +2414-159411-0031-2684: THEN (SHUT->SHET) AND (BOLT->BOLD) IT SAID (THE JACKAL->TO JACK HO) +2414-159411-0032-2685: WHEN THE BRAHMAN HAD (DONE->TURNED) THIS THE JACKAL SAID OH YOU WICKED AND UNGRATEFUL (TIGER->TIRE) +2414-159411-0033-2686: (WHEN THE->WITH A) GOOD (BRAHMAN OPENED YOUR CAGE DOOR->BRAM IN OPEN YOU CAME TO HER) IS TO EAT HIM THE ONLY RETURN YOU WOULD MAKE +2414-159411-0034-2687: PROCEED ON YOUR JOURNEY (FRIEND BRAHMAN->FRIN) +2414-159411-0035-2688: (YOUR ROAD LIES->HE RULED LIVES) THAT WAY (AND MINE->IN MIND) THIS +2414-165385-0000-2651: THUS ACCOMPLISHED (HE->*) EXCITED (THE->*) ADMIRATION OF EVERY SILLY (COQUETTE->COCKET) AND THE ENVY OF EVERY (FLUTTERING COXCOMB->FACTIVE ACCOUNT) BUT BY ALL YOUNG GENTLEMEN AND LADIES OF UNDERSTANDING HE WAS HEARTILY DESPISED AS A MERE CIVILIZED MONKEY +2414-165385-0001-2652: THAT HIS SOUL MIGHT AFTERWARDS OCCUPY SUCH A STATION AS WOULD BE MOST SUITABLE TO HIS CHARACTER IT WAS (SENTENCED->INTENSE) TO INHABIT (THE->A) BODY OF THAT (FINICAL->FINNICAL) GRINNING AND MISCHIEVOUS LITTLE (MIMICK->MIMIC) WITH (FOUR->FULL) LEGS WHICH YOU NOW BEHOLD BEFORE YOU +2609-156975-0000-2367: THEN MOSES WAS AFRAID AND SAID SURELY THE THING IS KNOWN +2609-156975-0001-2368: (HOLD ON HOLD FAST HOLD OUT PATIENCE IS->ERON HER FATS ODOU PATENTS HIS) GENIUS +2609-156975-0002-2369: LET US HAVE FAITH THAT RIGHT (MAKES->MATRON) MIGHT AND IN THAT FAITH LET (US DARE->STARED) TO DO OUR DUTY AS WE UNDERSTAND IT LINCOLN +2609-156975-0003-2370: THE EGYPTIAN BACKGROUND OF THE BONDAGE +2609-156975-0004-2371: EVERY ONE (WHO IS TURBULENT->WHOSE TURBOT) HAS BEEN FOUND BY KING (MERNEPTAH THE TESTIMONY OF->MARNET PATH DETACHEMONY AS) THE OLDEST (BIBLICAL NARRATIVES->BAPLICO NARRATIVE) REGARDING THE SOJOURN OF THE HEBREWS IN EGYPT IS ALSO (IN PERFECT->IMPERFECT) ACCORD WITH THE PICTURE WHICH THE CONTEMPORARY EGYPTIAN (INSCRIPTIONS->SCRIPTIONS) GIVE (OF THE->THIS) PERIOD +2609-156975-0005-2372: THE ABSENCE OF (*->THE) DETAILED (REFERENCE TO->REFERENCES) THE HEBREWS IS THEREFORE PERFECTLY NATURAL +2609-156975-0006-2373: IT SEEMS PROBABLE THAT NOT ALL BUT ONLY PART OF (THE->THAT) TRIBES WHICH (ULTIMATELY COALESCED->ULTIMATE COVETTES) INTO THE HEBREW NATION FOUND THEIR WAY TO EGYPT +2609-156975-0007-2374: THE STORIES REGARDING JOSEPH THE TRADITIONAL (FATHER OF EPHRAIM->FOUND THEIR ATRONE) AND (MANASSEH IMPLY->MANOT SAY INCLINE) THAT THESE STRONG CENTRAL TRIBES POSSIBLY TOGETHER WITH THE SOUTHERN (TRIBES->TRINES) OF (BENJAMIN->BINTAMEN) AND JUDAH (WERE->WHERE) THE CHIEF ACTORS (IN THIS OPENING->WHO THAT SOMETHING) SCENE IN ISRAEL'S HISTORY +2609-156975-0008-2375: THE (BIBLICAL->BEVOCO) NARRATIVES APPARENTLY (DISAGREE REGARDING->DISAGRATING GUARDING) THE (DURATION->DIRECTION) OF THE (SOJOURN->SAJOURN) IN EGYPT +2609-156975-0009-2376: THE LATER (TRADITIONS TEND TO EXTEND->JUDICINT INTERESTING) THE PERIOD +2609-156975-0010-2377: (HERE->YOU) WERE FOUND (SEVERAL INSCRIPTIONS->SHEVARIN SCRIPTIONS) BEARING THE EGYPTIAN NAME OF THE CITY (P ATUM HOUSE OF->PATUM OUTSIDE) THE GOD (ATUM->ANTUM) +2609-156975-0011-2378: A CONTEMPORARY INSCRIPTION (ALSO STATES->ONCE ESTATES) THAT HE (FOUNDED->FOUND A) NEAR (PITHUM->PITTHAM) THE HOUSE OF (RAMSES->RAMESES) A CITY WITH (A->THE) ROYAL RESIDENCE AND (TEMPLES->SIMPLES) +2609-156975-0012-2379: THAT THE HEBREWS WERE (RESTIVE->RENTS OF) UNDER THIS (TYRANNY->SOON) WAS (NATURAL->NATURALLY) INEVITABLE +2609-156975-0013-2380: WAS ANY OTHER PROCEDURE TO BE (EXPECTED->SPECTRE) FROM (A DESPOTIC RULER->IT THAT SPONNET ROAR) OF THAT LAND AND DAY +2609-156975-0014-2381: THE MAKING OF (A->THE) LOYAL PATRIOT +2609-156975-0015-2382: THE STORY OF MOSES (BIRTH AND->BERTH AN) EARLY CHILDHOOD IS ONE OF THE MOST INTERESTING CHAPTERS IN (BIBLICAL->BEPPOCO) HISTORY +2609-156975-0016-2383: (WAS MOSES JUSTIFIED IN RESISTING->WISE MOVES IT'S JUST FUN AND RESISTS IN) THE EGYPTIAN (TASKMASTER->TAX MASTER) +2609-156975-0017-2384: (IS PEONAGE->HIS PINION) ALWAYS (DISASTROUS->DISASTRATES) NOT (ONLY->OWING) TO ITS VICTIMS BUT ALSO TO THE GOVERNMENT IMPOSING IT +2609-156975-0018-2385: NATURALLY HE WENT TO THE LAND (OF MIDIAN->OMIDIAN) +2609-156975-0019-2386: THE WILDERNESS TO THE EAST OF EGYPT (HAD->AND) FOR CENTURIES BEEN THE (PLACE->PLATE) OF (REFUGE FOR->REFUGERY) EGYPTIAN (FUGITIVES->FUGITIVE) +2609-156975-0020-2387: FROM (ABOUT->A BOUT) TWO THOUSAND (B->*) C +2609-156975-0021-2388: ON THE BORDERS OF THE (WILDERNESS->WIDERNESS) HE FOUND CERTAIN (BEDOUIN->BEDOING) HERDSMEN WHO RECEIVED HIM (HOSPITABLY->HOW SPEEDABLY) +2609-156975-0022-2389: THESE (SAND WANDERERS->SEND WONDERS) SENT HIM ON FROM (TRIBE->TIME) TO (TRIBE UNTIL HE REACHED->TRIUMPH INTO A REACH) THE LAND OF (KEDEM EAST->KIEDAM EACH) OF THE DEAD SEA WHERE HE REMAINED FOR A YEAR AND A HALF +2609-156975-0023-2390: LATER HE FOUND HIS WAY TO THE COURT OF ONE OF THE LOCAL KINGS (IN->AND) CENTRAL (PALESTINE->PALASTEIN) WHERE HE MARRIED AND BECAME IN (*->THE) TIME A PROSPEROUS LOCAL PRINCE +2609-156975-0024-2391: THE SCHOOL (OF->AND) THE (WILDERNESS->WEARINESS) +2609-156975-0025-2392: THE STORY (OF->*) MOSES IS IN MANY WAYS CLOSELY PARALLEL TO THAT (OF SINUHIT->AS SOON WIT) +2609-156975-0026-2393: THE PRIEST OF THE (SUB TRIBE->SUBTERRAB) OF THE (KENITES->KANITE) RECEIVED HIM INTO HIS HOME AND GAVE HIM HIS DAUGHTER IN MARRIAGE +2609-156975-0027-2394: NOTE THE (CHARACTERISTIC->CARE OF RIDICT) ORIENTAL (IDEA OF MARRIAGE->AND GIVE MARY'S) +2609-156975-0028-2395: HERE MOSES (LEARNED THE->LEARNS THAT) LESSONS THAT WERE ESSENTIAL FOR HIS TRAINING AS (THE->A) LEADER AND (DELIVERER->DELIVER) OF HIS PEOPLE +2609-156975-0029-2396: (AFTER->ANSWERED) THE CAPTURE OF JERICHO CERTAIN OF THEM WENT UP WITH (THE SOUTHERN TRIBES TO CONQUER->A SUDDEN TRIUMPHS WHO CONCUR) SOUTHERN PALESTINE +2609-156975-0030-2397: MANY MODERN SCHOLARS (DRAW->DRAWING) THE CONCLUSION FROM THE (BIBLICAL->BIBBICAL) NARRATIVE THAT IT WAS FROM THE (KENITES THAT->KENITE SNAT) MOSES FIRST LEARNED OF (YAHWEH->YANAWAY) OR AS THE DISTINCTIVE NAME OF ISRAEL'S (GOD->GONE) WAS (TRANSLATED->TRANSGRATED) BY LATER (JEWISH SCRIBES->TO ITS GRIMES) JEHOVAH +2609-156975-0031-2398: DO THE (EARLIEST HEBREW TRADITIONS->OIETY BERTRADIZANCE) IMPLY THAT (THE ANCESTORS->INCES) OF THE (ISRAELITES->ISRAITS) WERE (WORSHIPPERS->WORSE SUPPOSED) OF JEHOVAH +2609-156975-0032-2399: THE (TITLE OF HIS FATHER IN LAW->TANOV IS FUND DE MAU) IMPLIES (THAT->AT) THIS PRIEST (MINISTERED->MINISTER) AT SOME (WILDERNESS->LITERN) SANCTUARY +2609-156975-0033-2400: MOSES IN THE HOME OF THE (MIDIAN->MENDIAN) PRIEST WAS BROUGHT INTO DIRECT AND CONSTANT CONTACT WITH THE JEHOVAH WORSHIP +2609-156975-0034-2401: THE CRUEL FATE OF HIS PEOPLE (AND->IN) THE PAINFUL EXPERIENCE IN EGYPT THAT HAD DRIVEN HIM INTO THE WILDERNESS PREPARED HIS MIND TO RECEIVE THIS TRAINING +2609-156975-0035-2402: HIS (QUEST->FRENCH) WAS FOR (A JUST->JETS) AND STRONG GOD ABLE TO (DELIVER->DRIVER) THE OPPRESSED +2609-156975-0036-2403: THE (WILDERNESS->WIDENANCE) WITH ITS LURKING FOES AND THE EVER PRESENT DREAD OF HUNGER AND THIRST (DEEPENED HIS SENSE->DEEP IN DESCENTS) OF NEED AND OF DEPENDENCE UPON (A->THE) POWER ABLE TO (GUIDE->GOD) THE (DESTINIES->DEBT'S NEEDS) OF MEN +2609-156975-0037-2404: THE PEASANTS OF THE (VAST ANTOLIAN PLAIN->VATS IN TOWING) IN (*->PLAIN OF) CENTRAL (ASIA->AS A) MINOR (STILL->SO WILL) CALL EVERY LIFE (GIVING->GIVEN) SPRING GOD HATH GIVEN +2609-156975-0038-2405: (THE CONSTANT->THEY CAN'T SENT THE) NECESSITY (OF->A) MEETING THE DANGERS OF THE (WILDERNESS->WIDERNESS) AND (OF->THE) DEFENDING THE (FLOCKS ENTRUSTED TO MOSES->FLAUNT AND TRITESYMOSA'S) CARE DEVELOPED HIS COURAGE AND POWER OF (LEADERSHIP->LEGERSHIP) AND ACTION +2609-157645-0000-2352: EVIDENTLY THE INTENTION (WAS TO MAKE->WHICH MADE) THINGS (PLEASANT->PRESENT) FOR THE ROYAL (FOE OF->FOLK) TOBACCO DURING HIS VISIT +2609-157645-0001-2353: THE (PROHIBITION IN->PROB'S) THE (REGULATION->REGULATING) QUOTED (OF->HER) SMOKING (IN->AND) SAINT MARY'S CHURCH REFERRED IT MAY BE NOTED TO THE ACT WHICH WAS HELD THEREIN +2609-157645-0002-2354: SOMETIMES TOBACCO (WAS->IS) USED IN CHURCH FOR (DISINFECTING OR DEODORIZING->DISINFACT AND ORDEALIZING) PURPOSES +2609-157645-0003-2355: (BLACKBURN->BRACKBURN) ARCHBISHOP OF YORK WAS A GREAT SMOKER +2609-157645-0004-2356: ON ONE OCCASION HE WAS AT SAINT MARY'S CHURCH (NOTTINGHAM->NINETEEN HAM) FOR A (CONFIRMATION->CONFIRMATON) +2609-157645-0005-2357: ANOTHER EIGHTEENTH CENTURY CLERICAL WORTHY THE FAMOUS (DOCTOR PARR->DOCTRIPOS) AN INVETERATE SMOKER WAS ACCUSTOMED TO DO (WHAT MISTER DISNEY->AT MIDSR DYSNEY) PREVENTED (ARCHBISHOP->ARCHBISH AT) BLACKBURN FROM DOING HE SMOKED IN HIS (VESTRY->VETCHERY) AT HATTON +2609-157645-0006-2358: (PARR->POOR) WAS SUCH A (CONTINUAL SMOKER->CONTINUOUS MOKER) THAT (ANYONE->ANY ONE) WHO CAME INTO HIS COMPANY (IF HE->FEET) HAD NEVER SMOKED BEFORE (HAD->AND) TO (LEARN->LEARNED) THE USE OF A PIPE AS A MEANS OF SELF DEFENCE +2609-157645-0007-2359: ONE SUNDAY SAYS MISTER (DITCHFIELD->DIXFIELD) HE HAD (AN EXTRA->IN THAT SIR) PIPE AND (JOSHUA->JAUNTS HER) THE CLERK TOLD HIM THAT THE PEOPLE WERE GETTING (*->THEM) IMPATIENT +2609-157645-0008-2360: (LET->THEM TO) THEM SING (ANOTHER PSALM SAID->AND NEITHER PSALMS SAY THAT) THE CURATE +2609-157645-0009-2361: THEY HAVE SIR REPLIED THE CLERK +2609-157645-0010-2362: THEN LET THEM SING THE HUNDRED AND NINETEENTH REPLIED THE CURATE +2609-157645-0011-2363: SIX ARMS THE (NEAREST->NURSE) WITHIN REACH PRESENTED WITH AN OBEDIENT START (*->AND) AS MANY TOBACCO (POUCHES->PIUCES) TO THE MAN OF OFFICE +2609-157645-0012-2364: DAVID (DEANS HOWEVER->DEAN SAMUR) DID NOT AT ALL (APPROVE->IMPROVE) THIS IRREVERENCE +2609-157645-0013-2365: (GOING TO->GO INTO) CHURCH (AT HAYES IN THOSE->AUNT HAZE AND THUS THE) DAYS (MUST HAVE->MISS I'VE) BEEN (QUITE->ACQUAINT) AN (EXCITING EXPERIENCE->THESE SIGNING INSPIRANTS) +2609-157645-0014-2366: WHEN THESE MEN (IN->AND) THE (COURSE->COURTS) OF MY REMONSTRANCE FOUND (*->OUT) THAT (I->*) WAS NOT GOING TO CONTINUE THE CUSTOM THEY NO LONGER CARED TO BE COMMUNICANTS +2609-169640-0000-2406: (PROAS->PRATS) IN THAT QUARTER WERE USUALLY (DISTRUSTED->DISTRUDGED) BY (SHIPS IT->THE STIPS AT) IS TRUE BUT THE SEA (IS FULL OF->FLORID) THEM (AND FAR->FOR) MORE (ARE->OR) INNOCENT THAN (ARE->OUR) GUILTY OF ANY (ACTS->ACT) OF (VIOLENCE->ONLENETS) +2609-169640-0001-2407: (AN HOUR->NOW I) AFTER THE SUN HAD SET THE WIND FELL TO (A->AN) LIGHT AIR (THAT JUST->DID ITS) KEPT (STEERAGE WAY->STEERING) ON THE SHIP +2609-169640-0002-2408: FORTUNATELY THE JOHN WAS NOT ONLY FAST BUT (SHE->SEA) MINDED HER (HELM->HAIL) AS (A->THE) LIGHT FOOTED GIRL (TURNS->TURNED) IN A (LIVELY->LOVELY) DANCE +2609-169640-0003-2409: (I NEVER->AND EVER) WAS IN A BETTER STEERING SHIP (MOST ESPECIALLY IN MODERATE->POSSES SPENT FOR AND MONDER IT) WEATHER +2609-169640-0004-2410: MISTER MARBLE HE (I DO->OUGHT TO) BELIEVE WAS (FAIRLY SNOOZING->FAIRLY'S NEWSING) ON THE (HEN COOPS->HINCOUPS) BEING LIKE THE (SAILS->SAILORS) AS ONE MIGHT SAY (BARELY ASLEEP->VARIOUSLY) +2609-169640-0005-2411: AT THAT MOMENT I (HEARD->HAD) A NOISE (ONE->WHEN) FAMILIAR TO (SEAMEN->SEE MEN) THAT OF AN OAR (FALLING->FOLLOWING) IN (A->THE) BOAT +2609-169640-0006-2412: (*->AS) I (SANG OUT->YET) SAIL HO AND (CLOSE ABOARD->CLOSER BROAD) +2609-169640-0007-2413: HE WAS (TOO MUCH->CHIMNETS) OF A SEAMAN TO REQUIRE A SECOND LOOK IN ORDER TO (ASCERTAIN WHAT->ASSERT BUT) WAS TO BE DONE +2609-169640-0008-2414: (ALTHOUGH->ON THOSE) THEY WENT THREE FEET TO OUR TWO THIS GAVE (US A->UP SOME) MOMENT OF (BREATHING->BREASING) TIME +2609-169640-0009-2415: AS OUR (SHEETS->SEATS) WERE (ALL FLYING->OFF LYING) FORWARD AND REMAINED SO FOR A FEW MINUTES IT GAVE ME (*->A) LEISURE TO LOOK ABOUT +2609-169640-0010-2416: I SOON SAW BOTH (PROAS AND GLAD ENOUGH->PROPS IN GRINDING UP) WAS I TO PERCEIVE THAT THEY HAD NOT APPROACHED MATERIALLY (NEARER->NEAR) +2609-169640-0011-2417: (MISTER KITE OBSERVED THIS->MICHIG) ALSO (AND REMARKED->IN REMARK) THAT OUR MOVEMENTS HAD BEEN SO PROMPT AS TO TAKE THE (RASCALS->RASCUOUS) ABACK +2609-169640-0012-2418: A (BREATHLESS->BRENT WITCH) STILLNESS SUCCEEDED +2609-169640-0013-2419: THE (PROAS->POETS) DID NOT (ALTER THEIR->ENTER THE) COURSE BUT NEARED (US->ITS) FAST +2609-169640-0014-2420: I HEARD THE (RATTLING->RIDING) OF THE BOARDING (PIKES TOO->PINES TO) AS THEY WERE CUT ADRIFT FROM THE SPANKER BOOM AND FELL UPON THE DECKS +2609-169640-0015-2421: (KITE WENT AFT->KINDLING APT) AND RETURNED WITH THREE OR FOUR MUSKETS AND AS MANY PIKES +2609-169640-0016-2422: THE STILLNESS THAT (REIGNED->RANGED) ON BOTH SIDES WAS LIKE THAT OF DEATH +2609-169640-0017-2423: THE JOHN BEHAVED BEAUTIFULLY (AND->HE) CAME ROUND LIKE A (TOP->TAR) +2609-169640-0018-2424: THE QUESTION WAS NOW WHETHER WE COULD PASS THEM OR NOT BEFORE THEY GOT (NEAR ENOUGH->NEARING UP) TO (GRAPPLE->GRANTPLE) +2609-169640-0019-2425: THE CAPTAIN BEHAVED PERFECTLY (WELL IN THIS->AWAY ON ITS) CRITICAL INSTANT COMMANDING A DEAD SILENCE (AND->IN) THE (CLOSEST ATTENTION->CLOSETS INTENTION) TO HIS ORDERS +2609-169640-0020-2426: (NOT A SOUL->NOW AND SO) ON BOARD THE JOHN WAS (HURT->SHARP) +2609-169640-0021-2427: (ON OUR SIDE->WHEN OURSAND) WE GAVE THE (GENTLEMEN->GENTLEMAN) THE FOUR (SIXES TWO AT->SAXES TO AUNT) THE (NEAREST->NURSE) AND TWO AT THE (STERN MOST PROA WHICH WAS STILL->STERNMOST PRO WHICHELE) NEAR A CABLE'S LENGTH (DISTANT->OF ITS) +2609-169640-0022-2428: THEY WERE (LIKE->NIGHT) THE (YELLS->YEARS) OF (FIENDS->FIEND) IN (ANGUISH->ENGLISH) +2609-169640-0023-2429: (I DOUBT->AND OUT) IF WE TOUCHED A MAN IN THE (NEAREST PROA->NEW EXPERIOR) +2609-169640-0024-2430: IN (THIS->THAT) STATE THE SHIP PASSED AHEAD (ALL->OF) HER CANVAS (BEING FULL->BEEN FOR) LEAVING THE (PROA MOTIONLESS->PROTINENT) IN HER WAKE +3005-163389-0000-1108: THEY SWARMED UP IN FRONT (OF SHERBURN'S->A SHERBOURNE'S) PALINGS AS THICK AS THEY COULD JAM TOGETHER AND YOU COULDN'T HEAR YOURSELF THINK FOR THE NOISE +3005-163389-0001-1109: SOME SUNG OUT TEAR DOWN THE FENCE TEAR DOWN THE FENCE +3005-163389-0002-1110: THE STILLNESS WAS AWFUL CREEPY AND UNCOMFORTABLE +3005-163389-0003-1111: (SHERBURN->SHERBIN) RUN HIS (EYE->EYES) SLOW ALONG THE CROWD AND WHEREVER IT STRUCK THE PEOPLE TRIED A LITTLE TO (OUT GAZE->OUTGAZE) HIM BUT THEY COULDN'T THEY DROPPED THEIR EYES AND LOOKED SNEAKY +3005-163389-0004-1112: THE AVERAGE MAN'S A COWARD +3005-163389-0005-1113: BECAUSE THEY'RE AFRAID THE MAN'S FRIENDS WILL SHOOT THEM IN THE BACK IN THE (DARKAND IT'S->DARK AND IS) JUST WHAT THEY WOULD DO +3005-163389-0006-1114: SO THEY ALWAYS ACQUIT AND THEN A MAN GOES IN THE NIGHT WITH A HUNDRED (MASKED->MASSED) COWARDS AT HIS BACK AND LYNCHES THE RASCAL +3005-163389-0007-1115: YOU DIDN'T WANT TO COME +3005-163389-0008-1116: BUT A MOB WITHOUT ANY MAN AT THE HEAD OF IT IS BENEATH PITIFULNESS +3005-163389-0009-1117: NOW LEAVE AND TAKE YOUR HALF A MAN WITH YOU (TOSSING HIS->TAUSEN HE HAS) GUN UP ACROSS HIS LEFT ARM AND COCKING IT WHEN HE SAYS THIS +3005-163389-0010-1118: THE CROWD WASHED BACK SUDDEN AND THEN BROKE ALL APART AND WENT TEARING OFF EVERY WHICH WAY AND BUCK HARKNESS HE (HEELED->HEALED) IT AFTER THEM LOOKING TOLERABLE (CHEAP->CHEEK) +3005-163389-0011-1119: (YOU->HE) CAN'T BE TOO CAREFUL +3005-163389-0012-1120: THEY ARGUED AND TRIED TO KEEP HIM OUT BUT HE WOULDN'T LISTEN AND (THE->A) WHOLE SHOW COME TO A (STANDSTILL->FANSTILL) +3005-163389-0013-1121: AND ONE OR TWO WOMEN (BEGUN->BEGAN) TO SCREAM +3005-163389-0014-1122: SO THEN (THE RINGMASTER->A RING MASTER) HE MADE A LITTLE SPEECH AND SAID HE HOPED THERE WOULDN'T BE NO DISTURBANCE AND IF THE MAN WOULD PROMISE HE WOULDN'T MAKE NO MORE TROUBLE HE WOULD LET HIM RIDE IF HE THOUGHT HE COULD STAY ON THE HORSE +3005-163389-0015-1123: IT WARN'T FUNNY TO ME THOUGH I WAS ALL OF A TREMBLE TO SEE HIS DANGER +3005-163389-0016-1124: AND (THE->A) HORSE A GOING LIKE A HOUSE (AFIRE->AFAR) TOO +3005-163389-0017-1125: HE (SHED->SHARED) THEM SO THICK (THEY->THAT) KIND OF CLOGGED UP THE AIR AND ALTOGETHER HE SHED SEVENTEEN SUITS +3005-163389-0018-1126: WHY IT WAS ONE OF HIS OWN MEN +3005-163390-0000-1185: (ANDBUT->AND BUT) NEVER MIND THE REST OF HIS OUTFIT IT WAS JUST WILD BUT IT WAS AWFUL FUNNY +3005-163390-0001-1186: THE PEOPLE MOST KILLED THEMSELVES LAUGHING AND WHEN THE KING GOT DONE CAPERING AND CAPERED OFF BEHIND THE SCENES THEY ROARED AND CLAPPED AND STORMED AND (HAW->*) HAWED TILL HE COME BACK AND DONE IT OVER AGAIN AND AFTER THAT THEY MADE HIM DO IT ANOTHER TIME +3005-163390-0002-1187: TWENTY PEOPLE (SINGS->SANGS) OUT +3005-163390-0003-1188: THE DUKE SAYS YES +3005-163390-0004-1189: EVERYBODY SINGS OUT SOLD +3005-163390-0005-1190: BUT A BIG FINE LOOKING MAN JUMPS UP ON A BENCH (AND->AN) SHOUTS HOLD ON +3005-163390-0006-1191: JUST A WORD GENTLEMEN THEY STOPPED TO LISTEN +3005-163390-0007-1192: WHAT WE WANT IS TO GO OUT OF HERE QUIET AND TALK (THIS->TO) SHOW UP AND SELL THE REST (OF->O) THE TOWN +3005-163390-0008-1193: YOU BET IT IS THE (JEDGE->JUDGE) IS RIGHT EVERYBODY SINGS OUT +3005-163390-0009-1194: WE STRUCK THE RAFT AT THE SAME TIME AND IN LESS THAN TWO SECONDS WE WAS GLIDING DOWN STREAM ALL DARK AND STILL AND EDGING TOWARDS THE MIDDLE OF THE RIVER NOBODY SAYING A WORD +3005-163390-0010-1195: WE NEVER SHOWED A LIGHT TILL WE WAS ABOUT TEN MILE BELOW THE VILLAGE +3005-163390-0011-1196: GREENHORNS (FLATHEADS->FLAT HEADS) +3005-163390-0012-1197: NO I (SAYS->SAY IS) IT DON'T +3005-163390-0013-1198: WELL IT DON'T BECAUSE IT'S IN (THE BREED->TO BREATHE) I RECKON THEY'RE ALL ALIKE +3005-163390-0014-1199: WELL THAT'S WHAT I'M A SAYING ALL KINGS IS MOSTLY (RAPSCALLIONS->RASCALIONS) AS FUR AS I (CAN->KIN) MAKE OUT IS DAT SO +3005-163390-0015-1200: AND LOOK AT CHARLES SECOND AND LOUIS FOURTEEN AND LOUIS FIFTEEN AND JAMES SECOND AND EDWARD SECOND AND RICHARD THIRD AND FORTY MORE BESIDES ALL THEM SAXON HEPTARCHIES THAT USED TO RIP AROUND SO (IN->WHEN) OLD TIMES AND RAISE (CAIN->GAME) +3005-163390-0016-1201: MY YOU OUGHT TO (SEEN->SEE AN) OLD HENRY THE EIGHT WHEN HE WAS IN BLOOM HE WAS A BLOSSOM +3005-163390-0017-1202: (RING->RANG) UP FAIR (ROSAMUN->ROSAMOND) +3005-163390-0018-1203: WELL HENRY HE TAKES A NOTION HE WANTS TO GET UP SOME TROUBLE WITH THIS COUNTRY +3005-163390-0019-1204: S'POSE HE OPENED HIS (MOUTHWHAT THEN->MOUTH WITHIN) +3005-163390-0020-1205: ALL I SAY IS KINGS (IS->AS) KINGS (AND YOU->AN YE) GOT TO MAKE ALLOWANCES +3005-163390-0021-1206: TAKE THEM ALL AROUND THEY'RE A MIGHTY ORNERY LOT IT'S THE WAY THEY'RE RAISED +3005-163390-0022-1207: WELL THEY ALL DO JIM +3005-163390-0023-1208: NOW (DE DUKE->TO DO) HE'S A (TOLERBLE LIKELY->TOLERABLE LIKE THE) MAN IN SOME WAYS +3005-163390-0024-1209: THIS ONE'S A (MIDDLING->MIDDLIN) HARD LOT FOR A DUKE +3005-163390-0025-1210: WHEN I WAKED UP (JUST->JEST) AT DAYBREAK HE WAS SITTING THERE WITH HIS HEAD DOWN BETWIXT HIS KNEES MOANING AND MOURNING TO HIMSELF +3005-163390-0026-1211: IT DON'T SEEM NATURAL BUT I RECKON IT'S SO +3005-163390-0027-1212: HE WAS OFTEN MOANING AND MOURNING (*->IN) THAT WAY NIGHTS WHEN HE JUDGED I WAS ASLEEP AND SAYING (PO LITTLE LIZABETH->POLIT LISBETH) +3005-163390-0028-1213: (DOAN->DON'T) YOU HEAR ME (SHET DE DO->SHUT DEAD DOUGH) +3005-163390-0029-1214: I LAY I MAKE YOU MINE +3005-163390-0030-1215: (JIS AS->IT IS) LOUD AS I COULD YELL +3005-163391-0000-1127: WHICH WAS SOUND ENOUGH JUDGMENT BUT YOU TAKE THE AVERAGE MAN AND HE WOULDN'T WAIT FOR HIM TO (HOWL->HOW) +3005-163391-0001-1128: THE (KING'S DUDS->KING DEADS) WAS ALL BLACK AND HE DID LOOK REAL (SWELL AND->SWELLIN) STARCHY +3005-163391-0002-1129: WHY BEFORE HE LOOKED LIKE THE (ORNERIEST->ORNEIST) OLD RIP THAT EVER WAS BUT NOW WHEN HE'D TAKE OFF HIS NEW WHITE BEAVER AND MAKE A BOW AND DO A SMILE HE LOOKED THAT GRAND AND GOOD AND PIOUS THAT YOU'D SAY HE HAD WALKED RIGHT OUT OF THE ARK AND MAYBE WAS OLD (LEVITICUS->LEVIKUS) HIMSELF +3005-163391-0003-1130: JIM CLEANED UP THE CANOE AND I GOT MY PADDLE READY +3005-163391-0004-1131: (WHER->WERE) YOU BOUND FOR YOUNG MAN +3005-163391-0005-1132: (GIT->GET) ABOARD SAYS THE KING +3005-163391-0006-1133: I DONE SO AND (THEN->THEY) WE ALL THREE STARTED ON AGAIN +3005-163391-0007-1134: THE YOUNG CHAP WAS MIGHTY THANKFUL SAID (IT->HE) WAS TOUGH WORK (TOTING->TOATING) HIS BAGGAGE SUCH WEATHER +3005-163391-0008-1135: (HE ASKED->PIERRE) THE KING WHERE HE WAS GOING AND THE KING TOLD HIM HE'D COME DOWN (THE->A) RIVER AND (LANDED->LAND IT) AT THE OTHER VILLAGE THIS MORNING AND NOW HE WAS GOING UP A FEW (MILE->MILES) TO SEE AN OLD FRIEND ON A FARM UP THERE THE YOUNG FELLOW SAYS +3005-163391-0009-1136: BUT THEN I SAYS AGAIN NO I RECKON IT AIN'T HIM OR ELSE HE WOULDN'T BE (PADDLING->PADDLIN) UP THE RIVER YOU AIN'T HIM ARE YOU +3005-163391-0010-1137: NO MY NAME'S (BLODGETT ELEXANDER BLODGETT->BLADGE IT ALEXANDER BLADGET) REVEREND (ELEXANDER BLODGETT->ALEXANDER BLOTCHETT) I S'POSE I MUST SAY AS I'M ONE (O->OF) THE (LORD'S->LARGE) POOR SERVANTS +3005-163391-0011-1138: (YOU->YE) SEE HE WAS PRETTY OLD (AND GEORGE'S G'YIRLS->AN GEORGE IS GOOD EARL'S) WAS TOO YOUNG TO BE MUCH COMPANY FOR HIM EXCEPT MARY JANE THE RED HEADED ONE AND SO HE WAS (KINDER->KIND OR) LONESOME AFTER GEORGE AND HIS WIFE DIED AND DIDN'T SEEM TO CARE MUCH TO LIVE +3005-163391-0012-1139: (TOO->DO) BAD TOO BAD HE COULDN'T (A->HAVE) LIVED TO SEE HIS (BROTHERS->BROTHER'S) POOR SOUL +3005-163391-0013-1140: I'M GOING IN A SHIP NEXT WEDNESDAY (FOR RYO JANEERO->FER RYEO GENERO) WHERE MY UNCLE (LIVES->IS) +3005-163391-0014-1141: BUT IT'LL BE LOVELY (WISHT->WISHED) I WAS A (GOING->GOIN) +3005-163391-0015-1142: MARY JANE'S NINETEEN SUSAN'S FIFTEEN AND JOANNA'S ABOUT (FOURTEENTHAT'S->FOURTEEN THAT'S) THE ONE THAT GIVES HERSELF TO GOOD WORKS AND HAS A (HARE->HAIR) LIP POOR THINGS +3005-163391-0016-1143: WELL THEY COULD BE WORSE OFF +3005-163391-0017-1144: (OLD->O) PETER HAD FRIENDS AND THEY AIN'T (GOING->GOIN) TO LET THEM COME TO NO HARM +3005-163391-0018-1145: BLAMED IF HE DIDN'T (INQUIRE->ACQUIRE) ABOUT EVERYBODY AND EVERYTHING (IN->AND) THAT BLESSED TOWN AND ALL ABOUT THE (WILKSES->WILTZES) AND ABOUT PETER'S (BUSINESSWHICH->BUSINESS WHICH) WAS A TANNER AND ABOUT (GEORGE'SWHICH->GEORGE'S WHICH) WAS A CARPENTER AND ABOUT (HARVEY'SWHICH->HARVEST WHICH) WAS A DISSENTERING MINISTER AND SO ON AND SO ON THEN HE SAYS +3005-163391-0019-1146: WHEN (THEY'RE->HER) DEEP THEY WON'T STOP FOR A HAIL +3005-163391-0020-1147: WAS PETER (WILKS->WILKES) WELL OFF +3005-163391-0021-1148: WHEN (WE STRUCK->WAS DRAP) THE BOAT SHE WAS ABOUT DONE LOADING (AND->AN) PRETTY SOON SHE GOT OFF +3005-163391-0022-1149: NOW HUSTLE BACK RIGHT OFF AND FETCH THE DUKE UP HERE AND THE NEW CARPET BAGS +3005-163391-0023-1150: SO THEN THEY WAITED FOR A STEAMBOAT +3005-163391-0024-1151: (BUT->THAT) THE KING WAS (CA'M->CALM) HE SAYS +3005-163391-0025-1152: THEY GIVE A GLANCE AT ONE ANOTHER AND NODDED THEIR HEADS AS MUCH AS TO SAY (WHAT D I->WOULD DAT) TELL YOU +3005-163391-0026-1153: THEN ONE OF THEM SAYS KIND (OF->O) SOFT AND GENTLE +3005-163399-0000-1154: PHELPS (*->AS) WAS ONE OF THESE LITTLE ONE HORSE COTTON PLANTATIONS AND THEY ALL LOOK ALIKE +3005-163399-0001-1155: I WENT AROUND AND (CLUMB->CLIMB) OVER THE BACK STILE BY THE (ASH HOPPER->ASHHOPPER) AND STARTED FOR THE KITCHEN +3005-163399-0002-1156: (I->AH) OUT WITH A YES'M (BEFORE->FOUR) I THOUGHT +3005-163399-0003-1157: SO THEN SHE STARTED FOR THE HOUSE LEADING ME BY THE HAND AND THE CHILDREN TAGGING AFTER +3005-163399-0004-1158: WHEN WE GOT THERE SHE SET ME DOWN IN A SPLIT (BOTTOMED->BOTTOM) CHAIR AND SET HERSELF DOWN ON A LITTLE LOW STOOL IN FRONT OF ME HOLDING BOTH OF MY HANDS AND SAYS +3005-163399-0005-1159: WELL IT'S LUCKY BECAUSE SOMETIMES PEOPLE DO GET HURT +3005-163399-0006-1160: AND I THINK HE DIED AFTERWARDS HE WAS A BAPTIST +3005-163399-0007-1161: YES IT WAS (MORTIFICATIONTHAT->MORTIFICATION THAT) WAS IT +3005-163399-0008-1162: YOUR UNCLE'S BEEN UP TO THE TOWN EVERY DAY TO FETCH YOU +3005-163399-0009-1163: YOU MUST (A MET->IMMERED) HIM ON THE ROAD DIDN'T YOU OLDISH MAN (WITH->WIDTH) A +3005-163399-0010-1164: WHY CHILD (IT LL BE STOLE->IT'LL BESTOW) +3005-163399-0011-1165: IT WAS KINDER THIN ICE BUT I SAYS +3005-163399-0012-1166: I HAD MY MIND ON THE CHILDREN ALL THE TIME I WANTED TO (GET->GIT) THEM OUT TO ONE SIDE AND PUMP THEM A LITTLE AND FIND OUT WHO I WAS +3005-163399-0013-1167: (PRETTY->BERTIE) SOON SHE MADE THE COLD (CHILLS->CHILL) STREAK ALL DOWN MY BACK BECAUSE SHE SAYS +3005-163399-0014-1168: I SEE IT WARN'T A BIT OF USE TO TRY TO GO AHEAD I'D GOT TO THROW UP MY HAND +3005-163399-0015-1169: SO I SAYS TO MYSELF HERE'S ANOTHER PLACE WHERE I GOT TO (RESK->REST) THE TRUTH +3005-163399-0016-1170: I OPENED MY MOUTH TO BEGIN BUT SHE GRABBED ME AND HUSTLED ME IN BEHIND THE BED AND SAYS HERE HE COMES +3005-163399-0017-1171: CHILDREN DON'T YOU SAY A WORD +3005-163399-0018-1172: I SEE I WAS IN A FIX NOW +3005-163399-0019-1173: MISSUS (PHELPS->PHILP) SHE (JUMPS->JUMPED) FOR HIM AND SAYS +3005-163399-0020-1174: (HAS->AS) HE COME NO SAYS HER HUSBAND +3005-163399-0021-1175: I CAN'T IMAGINE SAYS THE OLD GENTLEMAN AND I MUST SAY IT MAKES ME DREADFUL UNEASY +3005-163399-0022-1176: UNEASY SHE SAYS I'M READY TO GO DISTRACTED +3005-163399-0023-1177: HE MUST (A->HAVE) COME AND YOU'VE MISSED HIM ALONG THE ROAD +3005-163399-0024-1178: OH DON'T DISTRESS ME ANY (MORE'N->MORE) I'M ALREADY DISTRESSED +3005-163399-0025-1179: WHY SILAS LOOK YONDER UP THE ROAD AIN'T THAT SOMEBODY (COMING->COMIN) +3005-163399-0026-1180: THE OLD GENTLEMAN STARED AND SAYS +3005-163399-0027-1181: I HAIN'T NO IDEA WHO IS IT +3005-163399-0028-1182: (IT'S->IS) TOM SAWYER +3005-163399-0029-1183: BEING TOM SAWYER WAS EASY AND COMFORTABLE AND (IT STAYED EASY->ITS STEESEY) AND COMFORTABLE TILL BY AND BY I HEAR A STEAMBOAT (COUGHING->COFFIN) ALONG DOWN THE RIVER +3005-163399-0030-1184: THEN I SAYS TO MYSELF S'POSE TOM SAWYER COMES DOWN ON THAT BOAT +3080-5032-0000-312: BUT I AM HUGELY PLEASED THAT YOU HAVE SEEN MY LADY +3080-5032-0001-313: I KNEW YOU COULD NOT CHOOSE BUT LIKE HER BUT YET LET ME TELL YOU YOU HAVE SEEN BUT THE WORST OF HER +3080-5032-0002-314: HER CONVERSATION HAS MORE CHARMS (THAN->AND) CAN BE IN MERE BEAUTY AND (HER->A) HUMOUR AND DISPOSITION WOULD MAKE A DEFORMED PERSON APPEAR LOVELY +3080-5032-0003-315: WHY DID YOU NOT SEND ME THAT NEWS AND A GARLAND +3080-5032-0004-316: (WELL->WHY) THE BEST (ON'T->ON IT) IS (*->THAT) I HAVE A SQUIRE NOW THAT IS AS GOOD AS A KNIGHT +3080-5032-0005-317: IN EARNEST WE HAVE HAD SUCH A SKIRMISH AND UPON SO FOOLISH AN OCCASION AS I CANNOT TELL WHICH (IS STRANGEST->YOUR STRANGER'S) +3080-5032-0006-318: ALL THE PEOPLE THAT I HAD EVER IN MY LIFE REFUSED WERE BROUGHT AGAIN UPON THE STAGE LIKE RICHARD THE (THREE S->THIRD'S) GHOSTS TO REPROACH ME (WITHAL AND ALL THE->WITH ALRE) KINDNESS HIS DISCOVERIES COULD MAKE I HAD FOR YOU WAS LAID TO MY CHARGE +3080-5032-0007-319: MY BEST QUALITIES IF I HAVE ANY THAT ARE GOOD SERVED BUT FOR AGGRAVATIONS OF MY FAULT AND I WAS ALLOWED TO HAVE WIT AND UNDERSTANDING AND DISCRETION IN OTHER THINGS THAT IT MIGHT APPEAR I HAD NONE IN THIS +3080-5032-0008-320: TIS A STRANGE CHANGE AND I AM VERY SORRY FOR IT BUT I'LL SWEAR I KNOW NOT HOW TO HELP IT +3080-5032-0009-321: MISTER FISH IS (THE->A) SQUIRE OF DAMES AND HAS SO MANY MISTRESSES THAT ANYBODY MAY PRETEND (A->TO) SHARE IN HIM AND BE BELIEVED (BUT->THAT) THOUGH I HAVE THE HONOUR TO BE HIS NEAR NEIGHBOUR TO SPEAK FREELY I CANNOT BRAG MUCH THAT HE MAKES ANY COURT TO ME AND I KNOW NO YOUNG WOMAN IN THE COUNTRY THAT HE DOES NOT VISIT OFTEN +3080-5032-0010-322: I THINK MY YOUNGEST BROTHER COMES DOWN WITH HIM +3080-5032-0011-323: I CAN NO SOONER GIVE YOU SOME LITTLE HINTS (WHEREABOUTS->WHEREABOUT) THEY LIVE BUT YOU KNOW THEM PRESENTLY AND I MEANT YOU SHOULD BE BEHOLDING TO ME FOR YOUR ACQUAINTANCE +3080-5032-0012-324: BUT IT SEEMS THIS GENTLEMAN IS NOT SO EASY (ACCESS->AXIS) BUT YOU MAY ACKNOWLEDGE SOMETHING DUE TO ME IF I INCLINE HIM TO LOOK GRACIOUSLY UPON YOU AND THEREFORE THERE IS NOT MUCH HARM DONE +3080-5032-0013-325: I HAVE MISSED FOUR FITS AND (*->HAVE) HAD BUT FIVE AND HAVE RECOVERED SO MUCH STRENGTH AS MADE ME VENTURE TO MEET YOUR LETTER ON WEDNESDAY A MILE FROM HOME +3080-5032-0014-326: BUT BESIDES I CAN GIVE YOU OTHERS +3080-5032-0015-327: I AM HERE MUCH MORE OUT OF PEOPLE'S WAY THAN IN TOWN WHERE MY (AUNT->AUNTS) AND SUCH (AS->HAS) PRETEND (AN->AND) INTEREST IN ME AND A POWER OVER ME DO SO PERSECUTE ME WITH (THEIR->DEAR) GOOD NATURE AND TAKE IT SO ILL THAT THEY ARE NOT ACCEPTED AS I WOULD LIVE IN A HOLLOW TREE TO AVOID THEM +3080-5032-0016-328: YOU WILL THINK HIM ALTERED AND IF IT BE POSSIBLE MORE MELANCHOLY THAN HE WAS +3080-5032-0017-329: IF MARRIAGE AGREES NO BETTER (WITH OTHER->WHETHER) PEOPLE THAN IT DOES WITH HIM I SHALL PRAY THAT ALL MY FRIENDS MAY (SCAPE->ESCAPE) IT +3080-5032-0018-330: WELL IN (EARNEST->HONEST) IF I WERE A PRINCE THAT LADY SHOULD BE MY MISTRESS BUT I CAN GIVE NO RULE TO ANY ONE ELSE AND PERHAPS THOSE THAT ARE IN NO DANGER OF LOSING THEIR HEARTS TO HER MAY BE INFINITELY TAKEN WITH ONE I SHOULD NOT VALUE AT ALL FOR SO SAYS THE JUSTINIAN WISE PROVIDENCE HAS ORDAINED IT THAT BY THEIR DIFFERENT HUMOURS EVERYBODY MIGHT FIND SOMETHING TO PLEASE THEMSELVES WITHAL WITHOUT ENVYING THEIR (NEIGHBOURS->NEIGHBORS) +3080-5032-0019-331: THE MATTER IS NOT GREAT FOR I CONFESS I DO NATURALLY HATE THE NOISE AND TALK OF THE WORLD AND SHOULD BE BEST PLEASED NEVER TO BE KNOWN (IN'T->IN) UPON ANY OCCASION WHATSOEVER YET SINCE IT CAN NEVER BE WHOLLY AVOIDED ONE MUST SATISFY ONESELF BY DOING NOTHING THAT ONE NEED CARE (WHO KNOWS->ONE ELSE) +3080-5032-0020-332: (IF I HAD->YOU BY HEART) A PICTURE THAT WERE FIT FOR YOU YOU SHOULD HAVE IT +3080-5032-0021-333: HOW CAN YOU TALK OF DEFYING FORTUNE NOBODY LIVES WITHOUT IT AND THEREFORE WHY SHOULD YOU IMAGINE YOU COULD +3080-5032-0022-334: I KNOW NOT HOW MY BROTHER COMES TO BE SO WELL INFORMED AS YOU SAY BUT I AM CERTAIN HE KNOWS (THE->*) UTMOST OF THE INJURIES YOU HAVE RECEIVED FROM HER +3080-5032-0023-335: WE HAVE HAD ANOTHER DEBATE BUT MUCH MORE CALMLY +3080-5032-0024-336: (AND->THEN) BESIDES THERE WAS A TIME WHEN WE OURSELVES WERE INDIFFERENT TO ONE ANOTHER DID I DO SO THEN OR HAVE I LEARNED IT SINCE +3080-5032-0025-337: I HAVE BEEN STUDYING HOW TOM (CHEEKE->CHEEK) MIGHT COME BY HIS INTELLIGENCE AND I (VERILY->VERY) BELIEVE HE HAS IT FROM MY COUSIN PETERS +3080-5032-0026-338: HOW KINDLY DO I TAKE (THESE->THE) CIVILITIES OF YOUR (FATHER'S->FATHERS) IN EARNEST YOU CANNOT IMAGINE HOW HIS LETTER PLEASED ME +3080-5040-0000-278: WOULD IT WOULD LEAVE ME AND THEN I COULD BELIEVE I SHALL NOT ALWAYS HAVE OCCASION FOR IT +3080-5040-0001-279: MY POOR LADY (VAVASOUR->VAVASOR) IS (CARRIED TO THE->CHARACTER) TOWER AND HER GREAT BELLY COULD NOT EXCUSE HER BECAUSE SHE WAS ACQUAINTED BY SOMEBODY THAT THERE WAS A PLOT AGAINST THE PROTECTOR AND DID NOT DISCOVER IT +3080-5040-0002-280: SHE HAS TOLD NOW ALL THAT WAS TOLD HER BUT VOWS SHE WILL NEVER SAY FROM WHENCE SHE HAD IT WE SHALL SEE WHETHER HER RESOLUTIONS ARE AS UNALTERABLE AS THOSE OF MY LADY (TALMASH->THOMAS) +3080-5040-0003-281: I WONDER HOW SHE BEHAVED HERSELF WHEN SHE WAS MARRIED +3080-5040-0004-282: I NEVER SAW ANY ONE YET THAT DID NOT LOOK SIMPLY AND OUT OF COUNTENANCE (NOR EVER->WHATEVER) KNEW A WEDDING WELL DESIGNED BUT ONE AND THAT WAS OF TWO PERSONS WHO HAD TIME ENOUGH I CONFESS TO CONTRIVE IT AND NOBODY TO PLEASE (IN'T->IN) BUT THEMSELVES +3080-5040-0005-283: THE TRUTH IS I COULD NOT ENDURE TO BE MISSUS BRIDE IN A PUBLIC WEDDING TO BE MADE THE HAPPIEST PERSON ON EARTH +3080-5040-0006-284: DO NOT TAKE IT ILL FOR I WOULD ENDURE IT IF I COULD RATHER THAN FAIL BUT IN EARNEST I DO NOT THINK IT WERE POSSIBLE FOR ME +3080-5040-0007-285: YET IN EARNEST YOUR FATHER WILL NOT FIND MY BROTHER PEYTON WANTING IN CIVILITY THOUGH HE IS NOT A MAN OF MUCH COMPLIMENT UNLESS IT BE IN HIS LETTERS TO ME (NOR->NO) AN UNREASONABLE PERSON IN ANYTHING SO HE WILL ALLOW HIM OUT OF HIS KINDNESS TO HIS WIFE TO SET A HIGHER VALUE UPON HER SISTER THAN SHE DESERVES +3080-5040-0008-286: MY AUNT TOLD ME NO LONGER (AGONE THAN->A GOD IN) YESTERDAY THAT I WAS THE MOST WILFUL WOMAN THAT EVER SHE KNEW AND HAD AN OBSTINACY OF SPIRIT NOTHING COULD OVERCOME TAKE HEED +3080-5040-0009-287: YOU SEE I GIVE YOU FAIR WARNING +3080-5040-0010-288: BY THE NEXT I SHALL BE GONE INTO KENT AND MY OTHER JOURNEY IS LAID ASIDE WHICH I AM NOT DISPLEASED AT BECAUSE IT WOULD HAVE BROKEN OUR INTERCOURSE VERY MUCH +3080-5040-0011-289: HERE ARE SOME VERSES OF (COWLEY'S->COLLEASE) TELL ME HOW YOU LIKE THEM +3080-5040-0012-290: I TOLD YOU IN MY LAST THAT MY (SUFFOLK->SUFFOLD) JOURNEY WAS LAID ASIDE AND THAT INTO KENT HASTENED +3080-5040-0013-291: IF I DROWN BY THE WAY THIS WILL BE MY LAST LETTER AND LIKE A WILL I BEQUEATH ALL MY KINDNESS TO YOU IN IT WITH A CHARGE NEVER TO BESTOW IT ALL UPON ANOTHER MISTRESS LEST MY GHOST RISE AGAIN AND HAUNT YOU +3080-5040-0014-292: INDEED I LIKE HIM EXTREMELY AND HE IS COMMENDED TO ME BY PEOPLE THAT KNOW HIM VERY WELL AND ARE ABLE TO JUDGE FOR A MOST EXCELLENT SERVANT AND FAITHFUL AS POSSIBLE +3080-5040-0015-293: BECAUSE YOU FIND FAULT WITH MY OTHER LETTERS THIS IS LIKE TO BE SHORTER THAN THEY I DID NOT INTEND IT SO THOUGH I CAN ASSURE YOU +3080-5040-0016-294: I DO NOT FIND IT THOUGH I AM TOLD I WAS SO EXTREMELY WHEN I BELIEVED YOU (LOVED->LOVE) ME +3080-5040-0017-295: BUT I AM CALLED UPON +3080-5040-0018-296: DIRECTED FOR YOUR MASTER +3080-5040-0019-297: I SEE YOU CAN (CHIDE->CHID) WHEN YOU PLEASE AND WITH AUTHORITY BUT I DESERVE IT I CONFESS AND ALL I CAN SAY FOR MYSELF IS THAT MY FAULT PROCEEDED FROM A VERY GOOD PRINCIPLE IN ME +3080-5040-0020-298: WE DARE NOT LET OUR TONGUES LIE MORE ON ONE SIDE OF OUR (MOUTHS->MOTHS) THAN (T'OTHER->THE OTHER) FOR FEAR OF OVERTURNING IT +3080-5040-0021-299: YOU ARE SATISFIED I HOPE (ERE->AT) THIS THAT I (SCAPED->ESCAPED) DROWNING +3080-5040-0022-300: BUT I AM TROUBLED MUCH YOU SHOULD MAKE SO ILL A JOURNEY TO SO LITTLE PURPOSE INDEED I (WRIT->WRITE) BY THE FIRST POST AFTER MY ARRIVAL HERE AND CANNOT IMAGINE HOW YOU CAME TO MISS OF MY LETTERS +3080-5040-0023-301: (HOW->OH) WELCOME YOU WILL BE BUT ALAS +3080-5040-0024-302: FOR MY LIFE I CANNOT BEAT INTO THEIR HEADS A PASSION THAT MUST BE SUBJECT TO NO DECAY (AN->AND) EVEN PERFECT KINDNESS THAT MUST LAST PERPETUALLY WITHOUT THE LEAST INTERMISSION +3080-5040-0025-303: THEY LAUGH TO HEAR ME SAY THAT ONE UNKIND WORD WOULD DESTROY ALL THE SATISFACTION OF MY LIFE AND THAT I SHOULD EXPECT OUR KINDNESS SHOULD INCREASE EVERY DAY IF IT WERE POSSIBLE BUT NEVER LESSEN +3080-5040-0026-304: WE GO (ABROAD->BROAD) ALL DAY AND PLAY ALL NIGHT AND (SAY->SEE) OUR (PRAYERS->PRAY AS) WHEN WE HAVE TIME +3080-5040-0027-305: (WELL->WHILE) IN SOBER EARNEST NOW I WOULD NOT LIVE THUS (A->AT) TWELVEMONTH TO GAIN ALL THAT (THE->*) KING HAS LOST UNLESS IT WERE TO GIVE IT HIM AGAIN +3080-5040-0028-306: WILL YOU BE SO GOOD NATURED +3080-5040-0029-307: HE HAS ONE SON AND TIS THE FINEST BOY THAT (E'ER->ERE) YOU SAW AND HAS A NOBLE SPIRIT BUT YET STANDS IN THAT AWE OF HIS FATHER THAT ONE WORD FROM HIM IS AS MUCH AS TWENTY WHIPPINGS +3080-5040-0030-308: YOU MUST GIVE ME LEAVE TO ENTERTAIN (YOU THUS->YOURSELVES) WITH DISCOURSES OF THE FAMILY FOR I CAN TELL YOU NOTHING ELSE FROM HENCE +3080-5040-0031-309: NOT TO KNOW WHEN YOU (WOULD->HAD) COME HOME I CAN ASSURE YOU (NOR->NO) FOR ANY OTHER (OCCASION->CAPTAIN) OF MY OWN BUT WITH A COUSIN OF MINE THAT HAD LONG (DESIGNED->DESIGN) TO MAKE HERSELF SPORT WITH HIM AND DID NOT MISS OF HER AIM +3080-5040-0032-310: IN MY LIFE I NEVER HEARD SO RIDICULOUS A DISCOURSE AS HE MADE US AND NO OLD WOMAN WHO (PASSES->PAUSES) FOR A WITCH COULD HAVE BEEN MORE PUZZLED TO SEEK WHAT TO SAY TO REASONABLE PEOPLE THAN HE WAS +3080-5040-0033-311: EVER SINCE THIS ADVENTURE I HAVE HAD SO GREAT A BELIEF IN ALL THINGS (OF->FOR) THIS NATURE THAT I COULD NOT FORBEAR LAYING A (PEAS COD->PEASE CART) WITH NINE PEAS (IN'T->INTO) UNDER MY DOOR YESTERDAY AND WAS INFORMED BY IT THAT MY HUSBAND'S NAME SHOULD BE THOMAS HOW DO YOU LIKE THAT +3331-159605-0000-695: SHE PULLED HER HAIR DOWN TURNED (HER SKIRT->HIS GOOD) BACK PUT HER FEET ON THE FENDER AND TOOK (PUTTEL->POTTLE) INTO HER LAP ALL OF WHICH ARRANGEMENTS SIGNIFIED THAT SOMETHING VERY IMPORTANT HAD GOT TO BE THOUGHT OVER AND SETTLED +3331-159605-0001-696: THE MORE PROPOSALS THE MORE CREDIT +3331-159605-0002-697: (I VE->I'VE) TRIED IT AND LIKED IT AND MAYBE THIS IS THE CONSEQUENCE OF THAT NIGHT'S FUN +3331-159605-0003-698: JUST SUPPOSE IT IS TRUE THAT HE DOES ASK ME AND I SAY YES +3331-159605-0004-699: WHAT A SPITEFUL THING I AM +3331-159605-0005-700: I COULD DO SO MUCH FOR ALL AT HOME HOW I SHOULD ENJOY THAT +3331-159605-0006-701: (LET ME SEE->THAT MISS C) HOW CAN I BEGIN +3331-159605-0007-702: HE HAS KNOWN HER ALL HER LIFE AND HAS A GOOD INFLUENCE OVER HER +3331-159605-0008-703: NOW AS POLLY WAS BY NO MEANS A PERFECT CREATURE I AM FREE TO CONFESS THAT THE OLD TEMPTATION ASSAILED (HER->HIM) MORE THAN ONCE (THAT->THE) WEEK FOR WHEN THE FIRST EXCITEMENT OF THE DODGING REFORM HAD SUBSIDED SHE MISSED THE PLEASANT LITTLE INTERVIEWS THAT USED TO PUT A CERTAIN (FLAVOR->FLAVOUR) OF (ROMANCE->ROMANS) INTO HER DULL HARD WORKING DAYS +3331-159605-0009-704: I DON'T THINK IT WAS HIS WEALTH ACCOMPLISHMENTS (OR POSITION->OPPOSITION) THAT MOST ATTRACTED POLLY THOUGH THESE DOUBTLESS POSSESSED A GREATER INFLUENCE THAN SHE SUSPECTED +3331-159605-0010-705: IT WAS THAT INDESCRIBABLE SOMETHING WHICH WOMEN ARE QUICK TO SEE AND FEEL IN MEN WHO HAVE BEEN BLESSED (WITH->*) WISE AND GOOD MOTHERS +3331-159605-0011-706: THIS HAD AN ESPECIAL CHARM TO POLLY FOR SHE SOON FOUND THAT THIS SIDE OF HIS CHARACTER WAS NOT SHOWN TO (EVERY ONE->EVERYONE) +3331-159605-0012-707: (LATELY->PLATELY) THIS HAD CHANGED ESPECIALLY TOWARDS POLLY AND IT (FLATTERED->FLUTTERED) HER MORE THAN SHE WOULD CONFESS EVEN TO HERSELF +3331-159605-0013-708: AT FIRST SHE TRIED TO THINK SHE COULD BUT UNFORTUNATELY HEARTS ARE SO CONTRARY THAT THEY WON'T BE OBEDIENT TO REASON WILL OR EVEN (GRATITUDE->CREDIT) +3331-159605-0014-709: POLLY FELT A VERY CORDIAL FRIENDSHIP FOR MISTER (SYDNEY->SIDNEY) BUT NOT ONE PARTICLE OF THE (LOVE WHICH IS->LAW PITCHES) THE ONLY (COIN->KIND) IN WHICH LOVE CAN BE TRULY PAID +3331-159605-0015-710: THIS FINISHED POLLY'S INDECISION AND AFTER THAT NIGHT SHE NEVER ALLOWED HERSELF TO DWELL UPON THE PLEASANT TEMPTATION WHICH CAME IN A GUISE PARTICULARLY ATTRACTIVE TO A YOUNG GIRL WITH A (SPICE->SPIES) OF THE OLD EVE IN HER COMPOSITION +3331-159605-0016-711: WHEN (SATURDAY->SATAN) CAME POLLY STARTED AS USUAL FOR A VISIT TO (BECKY->BACKY) AND BESS BUT (COULD N'T->COULDN'T) RESIST STOPPING AT THE (SHAWS->SHORES) TO LEAVE A LITTLE PARCEL FOR FAN (THOUGH IT->THAT) WAS CALLING TIME +3331-159605-0017-712: A FOOLISH LITTLE SPEECH TO MAKE TO A (DOG->DARK) BUT YOU SEE POLLY WAS ONLY A TENDER HEARTED GIRL TRYING TO (DO->*) HER DUTY +3331-159605-0018-713: TAKE HOLD OF (MASTER CHARLEY'S->MASSA CHARLIE'S) HAND MISS (MAMIE->MAMMY) AND WALK PRETTY LIKE (WILLY->BILLY) AND (FLOSSY->FLOSSIE) SAID THE MAID +3331-159605-0019-714: AT A STREET CORNER A BLACK EYED (SCHOOL BOY->SCHOOLBOY) WAS PARTING FROM A ROSY FACED SCHOOL GIRL WHOSE MUSIC ROLL HE WAS RELUCTANTLY SURRENDERING +3331-159605-0020-715: HOW HE GOT THERE WAS NEVER VERY CLEAR TO POLLY BUT THERE HE WAS FLUSHED AND A LITTLE OUT OF BREATH BUT LOOKING SO GLAD TO SEE HER (THAT->TILL) SHE HAD (N'T->NOT) THE HEART TO BE STIFF AND COOL AS SHE HAD FULLY INTENDED TO BE WHEN THEY MET +3331-159605-0021-716: SHE REALLY COULD (N'T->NOT) HELP IT IT WAS SO PLEASANT TO SEE HIM AGAIN JUST WHEN SHE WAS FEELING SO LONELY +3331-159605-0022-717: THAT IS THE WAY I GET TO THE (ROTHS->ROSS) ANSWERED POLLY +3331-159605-0023-718: SHE DID NOT MEAN TO TELL BUT HIS FRANKNESS WAS (SO->TO) AGREEABLE SHE FORGOT HERSELF +3331-159605-0024-719: BUT I KNOW HER BETTER AND I ASSURE YOU THAT SHE (DOES IMPROVE->DOESN'T PROVE) SHE TRIES TO (MEND HER->MENTAL) FAULTS THOUGH SHE WON'T OWN IT AND (WILL->WAS) SURPRISE YOU SOME DAY BY THE AMOUNT OF HEART AND SENSE AND GOODNESS SHE HAS GOT +3331-159605-0025-720: THANK YOU NO +3331-159605-0026-721: HOW LOVELY THE PARK LOOKS SHE SAID IN GREAT CONFUSION +3331-159605-0027-722: ASKED THE ARTFUL YOUNG MAN LAYING A TRAP INTO WHICH POLLY IMMEDIATELY FELL +3331-159605-0028-723: HE WAS QUICKER TO TAKE A HINT THAN SHE HAD EXPECTED AND BEING BOTH PROUD AND GENEROUS (RESOLVED->WE SOFT) TO SETTLE THE MATTER AT ONCE FOR POLLY'S SAKE AS WELL AS HIS OWN +3331-159605-0029-724: SO WHEN SHE MADE HER LAST (BRILLIANT->POINT) REMARK HE SAID QUIETLY WATCHING HER FACE KEENLY ALL THE WHILE I THOUGHT SO WELL (I M->I'M) GOING OUT OF TOWN ON BUSINESS FOR SEVERAL WEEKS SO YOU CAN ENJOY (YOUR->YOU) LITTLE BIT OF COUNTRY WITHOUT BEING ANNOYED BY ME (ANNOYED->ANNOY IT) +3331-159605-0030-725: SHE THOUGHT SHE HAD A GOOD DEAL OF THE COQUETTE IN HER AND (I VE->I'VE) NO DOUBT THAT WITH TIME AND TRAINING SHE WOULD HAVE BECOME A VERY DANGEROUS LITTLE PERSON BUT NOW SHE WAS FAR (TOO->TO) TRANSPARENT AND STRAIGHTFORWARD BY NATURE EVEN TO TELL A (WHITE LIE CLEVERLY->WIDE LIKE LEVELLY) +3331-159605-0031-726: HE WAS GONE BEFORE SHE COULD DO ANYTHING BUT LOOK UP AT HIM WITH A REMORSEFUL FACE AND SHE WALKED ON FEELING THAT THE FIRST AND PERHAPS THE ONLY LOVER SHE WOULD EVER HAVE HAD READ HIS ANSWER AND ACCEPTED (IT->*) IN SILENCE +3331-159605-0032-727: POLLY DID NOT RETURN TO HER (FAVORITE->FAVOURITE) WALK TILL SHE LEARNED (FROM->FOR) MINNIE THAT UNCLE HAD REALLY LEFT TOWN AND THEN SHE FOUND THAT HIS FRIENDLY COMPANY AND CONVERSATION WAS WHAT HAD MADE THE WAY SO PLEASANT AFTER ALL +3331-159605-0033-728: (WAGGING->WORKING) TO AND FRO AS USUAL WHAT'S THE NEWS WITH YOU +3331-159605-0034-729: PERHAPS (SHE LL JILT->SHE'LL CHILLED) HIM +3331-159605-0035-730: UTTERLY DONE WITH AND LAID UPON THE SHELF +3331-159605-0036-731: (MINNIE->MINNI) SAID THE OTHER DAY SHE WISHED SHE WAS A (PIGEON->PITCHEN) SO SHE COULD PADDLE IN THE (PUDDLES->BOTTLES) AND (NOT->NUT) FUSS ABOUT RUBBERS +3331-159605-0037-732: NOW DON'T BE AFFECTED POLLY BUT JUST TELL ME LIKE A DEAR HAS (N'T->NOT) HE PROPOSED +3331-159605-0038-733: DON'T YOU THINK HE MEANS TO +3331-159605-0039-734: TRULY (TRULY->JULIE) FAN +3331-159605-0040-735: I DON'T MEAN TO BE PRYING BUT I REALLY THOUGHT HE DID +3331-159605-0041-736: WELL I ALWAYS MEANT TO TRY IT IF I GOT A CHANCE AND I HAVE +3331-159605-0042-737: I JUST GAVE HIM A HINT AND HE TOOK IT +3331-159605-0043-738: HE MEANT TO GO AWAY BEFORE THAT SO DON'T THINK HIS HEART IS BROKEN (OR->O) MIND WHAT (SILLY TATTLERS->SIDI TEDLERS) SAY +3331-159605-0044-739: HE UNDERSTOOD AND BEING A GENTLEMAN MADE NO FUSS +3331-159605-0045-740: BUT POLLY IT WOULD HAVE BEEN A GRAND THING FOR YOU +3331-159605-0046-741: I (M ODD->AM AUGHT) YOU KNOW AND PREFER TO BE AN INDEPENDENT SPINSTER AND TEACH MUSIC ALL MY DAYS +3331-159609-0000-742: NEVER MIND WHAT THE BUSINESS WAS (IT SUFFICES->ITS SURFACES) TO SAY THAT IT WAS A GOOD BEGINNING FOR A YOUNG MAN LIKE TOM WHO HAVING BEEN BORN AND BRED IN THE MOST CONSERVATIVE (CLASS->GLASS) OF THE MOST CONCEITED CITY IN NEW ENGLAND NEEDED JUST THE HEALTHY HEARTY SOCIAL INFLUENCES OF THE WEST TO WIDEN HIS VIEWS AND MAKE A MAN OF HIM +3331-159609-0001-743: FORTUNATELY EVERY ONE WAS SO BUSY WITH THE NECESSARY PREPARATIONS THAT THERE WAS NO TIME FOR (ROMANCE->ROMANS) OF ANY SORT AND THE FOUR YOUNG PEOPLE WORKED TOGETHER AS SOBERLY AND SENSIBLY AS IF ALL SORTS OF EMOTIONS WERE NOT (BOTTLED->BOTHERED) UP IN THEIR RESPECTIVE HEARTS +3331-159609-0002-744: PITY THAT THE END SHOULD COME SO SOON BUT THE HOUR DID ITS WORK AND (WENT->WHEN) ITS WAY LEAVING A CLEARER ATMOSPHERE BEHIND (THOUGH->THAT) THE YOUNG FOLKS DID NOT SEE IT THEN FOR THEIR EYES WERE DIM BECAUSE OF THE PARTINGS THAT MUST BE +3331-159609-0003-745: IF IT HAD NOT BEEN FOR TWO THINGS I FEAR SHE NEVER WOULD HAVE STOOD A SUMMER IN TOWN BUT SYDNEY OFTEN CALLED (TILL->TO) HIS VACATION CAME AND A VOLUMINOUS CORRESPONDENCE WITH POLLY (BEGUILED->BEGUILD) THE LONG DAYS +3331-159609-0004-746: (TOM WROTE ONCE->TUMULT ONES) A WEEK TO HIS MOTHER BUT (THE LETTERS->THEY LET US) WERE SHORT AND NOT VERY SATISFACTORY FOR MEN NEVER DO (TELL->SO) THE INTERESTING LITTLE THINGS THAT WOMEN BEST LIKE TO HEAR +3331-159609-0005-747: NO I (M->AM) ONLY TIRED HAD A GOOD DEAL TO DO LATELY AND THE DULL WEATHER MAKES ME (JUST A TRIFLE->CHOS THE TRAVEL) BLUE +3331-159609-0006-748: FORGIVE ME POLLY BUT I CAN'T HELP SAYING IT FOR IT IS THERE AND I WANT TO BE AS TRUE TO YOU AS YOU WERE TO ME IF I CAN +3331-159609-0007-749: I (TRY->TRIED) NOT (TO->A) DECEIVE MYSELF BUT IT DOES SEEM AS IF THERE WAS A CHANCE OF HAPPINESS FOR ME +3331-159609-0008-750: THANK HEAVEN FOR THAT +3331-159609-0009-751: CRIED POLLY WITH THE (HEARTIEST->HARDIEST) SATISFACTION IN HER VOICE +3331-159609-0010-752: POOR POLLY WAS SO TAKEN BY SURPRISE THAT SHE HAD NOT A WORD TO SAY +3331-159609-0011-753: NONE WERE NEEDED (*->HOTEL) HER (TELLTALE->*) FACE ANSWERED FOR HER AS WELL AS THE IMPULSE WHICH MADE HER HIDE HER HEAD IN THE SOFA CUSHION LIKE A FOOLISH OSTRICH (WHEN->AND) THE (HUNTERS->HANDS) ARE AFTER IT +3331-159609-0012-754: ONCE OR TWICE BUT SORT OF (JOKINGLY->CHOKINGLY) AND I THOUGHT IT WAS ONLY SOME LITTLE FLIRTATION +3331-159609-0013-755: IT WAS SO STUPID OF ME NOT TO GUESS BEFORE +3331-159609-0014-756: IT WAS (SO->HER) TENDER EARNEST AND DEFIANT THAT FANNY FORGOT THE DEFENCE OF HER OWN (LOVER->LOVE) IN ADMIRATION OF POLLY'S LOYALTY TO HERS FOR THIS FAITHFUL ALL ABSORBING LOVE WAS A NEW REVELATION TO FANNY WHO WAS USED TO HEARING HER FRIENDS BOAST OF TWO OR THREE LOVERS A YEAR AND CALCULATE THEIR RESPECTIVE VALUES WITH ALMOST AS MUCH COOLNESS AS THE YOUNG MEN DISCUSSED THE FORTUNES OF THE GIRLS THEY WISHED FOR BUT COULD NOT AFFORD TO MARRY +3331-159609-0015-757: I HOPE MARIA (BAILEY IS ALL HE THINKS HER->BAILEY'S ONLY THINK SIR) SHE ADDED SOFTLY FOR I COULD (N'T->NOT) BEAR TO HAVE HIM DISAPPOINTED AGAIN +3331-159609-0016-758: SAID FANNY TURNING HOPEFUL ALL AT ONCE +3331-159609-0017-759: SUPPOSE I SAY A WORD TO TOM JUST INQUIRE AFTER HIS HEART IN A GENERAL WAY YOU KNOW AND GIVE HIM A CHANCE TO TELL ME IF (THERE IS->THERE'S) ANYTHING TO TELL +3331-159609-0018-760: BEAR IT PEOPLE ALWAYS DO (BEAR->BARE) THINGS SOMEHOW ANSWERED POLLY LOOKING AS IF SENTENCE HAD BEEN PASSED UPON HER +3331-159609-0019-761: IT WAS A VERY DIFFERENT (WINTER FROM->WINDOW ON) THE LAST (FOR BOTH->ABOVE) THE GIRLS +3331-159609-0020-762: IF (FANNY->ANY) WANTED TO SHOW HIM WHAT SHE COULD DO TOWARD MAKING A PLEASANT HOME SHE CERTAINLY SUCCEEDED (BETTER THAN->BY THEN) SHE SUSPECTED FOR IN SPITE OF MANY FAILURES AND DISCOURAGEMENTS BEHIND THE SCENES THE LITTLE HOUSE BECAME A MOST ATTRACTIVE PLACE TO MISTER (SYDNEY->SIDNEY) AT LEAST FOR HE WAS MORE THE HOUSE FRIEND THAN EVER AND SEEMED DETERMINED TO PROVE THAT CHANGE OF FORTUNE MADE NO DIFFERENCE TO HIM +3331-159609-0021-763: SHE KEPT MUCH AT HOME WHEN THE DAY'S WORK WAS DONE FINDING IT PLEASANTER TO SIT DREAMING (OVER->OF A) BOOK OR SEWING ALONE THAN TO EXERT HERSELF EVEN TO GO TO THE (SHAWS->SHORES) +3331-159609-0022-764: POLLY WAS NOT AT ALL LIKE HERSELF THAT (WINTER->WINDOW) AND THOSE NEAREST TO HER SAW AND (WONDERED->WANDERED) AT IT MOST +3331-159609-0023-765: FOR NED WAS SO ABSORBED IN BUSINESS THAT HE (IGNORED->NURED) THE WHOLE (BAILEY->BAILIQUE) QUESTION AND LEFT THEM IN (UTTER->OTHER) DARKNESS +3331-159609-0024-766: (FANNY->THEN HE) CAME WALKING IN UPON HER ONE DAY LOOKING AS IF SHE (BROUGHT TIDINGS->POURED HIDINGS) OF SUCH GREAT JOY THAT SHE HARDLY KNEW HOW TO TELL THEM +3331-159609-0025-767: BUT IF WORK BASKETS WERE GIFTED WITH POWERS OF SPEECH THEY COULD TELL STORIES MORE TRUE AND TENDER THAN ANY (WE READ->REED) +3528-168656-0000-864: SHE HAD EVEN BEEN IN SOCIETY BEFORE THE REVOLUTION +3528-168656-0001-865: IT WAS HER PLEASURE AND HER VANITY TO DRAG IN THESE NAMES ON EVERY PRETEXT +3528-168656-0002-866: EVERY YEAR SHE SOLEMNLY RENEWED HER VOWS AND AT THE MOMENT OF TAKING THE OATH SHE SAID TO THE PRIEST MONSEIGNEUR SAINT FRANCOIS GAVE IT TO MONSEIGNEUR (SAINT->SAY) JULIEN MONSEIGNEUR SAINT (JULIEN->JULIAN) GAVE IT TO MONSEIGNEUR SAINT (EUSEBIUS MONSEIGNEUR->EUSCIBIUS MONSIEUR) SAINT (EUSEBIUS->EUSIBIUS) GAVE IT TO MONSEIGNEUR SAINT (PROCOPIUS->PROCOPIAS) ET CETERA ET CETERA +3528-168656-0003-867: AND THE (SCHOOL GIRLS->SCHOOLGIRLS) WOULD BEGIN TO LAUGH NOT IN THEIR SLEEVES BUT UNDER (THEIR->THE) VEILS CHARMING LITTLE STIFLED LAUGHS WHICH MADE THE VOCAL MOTHERS FROWN +3528-168656-0004-868: IT WAS A CENTURY WHICH SPOKE THROUGH HER BUT IT WAS THE EIGHTEENTH CENTURY +3528-168656-0005-869: THE RULE OF (FONTEVRAULT->FONTREVAL) DID NOT FORBID THIS +3528-168656-0006-870: SHE WOULD NOT SHOW (THIS OBJECT->THE SUBJECT) TO (ANYONE->ANY ONE) +3528-168656-0007-871: THUS IT FURNISHED A SUBJECT OF COMMENT FOR ALL THOSE WHO WERE UNOCCUPIED OR BORED IN THE CONVENT +3528-168656-0008-872: SOME UNIQUE CHAPLET SOME AUTHENTIC RELIC +3528-168656-0009-873: THEY LOST THEMSELVES IN CONJECTURES +3528-168656-0010-874: WHEN THE POOR OLD WOMAN DIED THEY RUSHED TO HER CUPBOARD MORE HASTILY THAN WAS FITTING PERHAPS AND OPENED IT +3528-168656-0011-875: HE IS RESISTING FLUTTERING HIS TINY WINGS AND STILL MAKING AN EFFORT TO FLY BUT THE (DANCER IS->DANCERS) LAUGHING WITH (A->US) SATANICAL AIR +3528-168656-0012-876: MORAL LOVE CONQUERED BY THE COLIC +3528-168669-0000-877: THE (PRIORESS->PRIORS) RETURNED AND SEATED HERSELF ONCE MORE ON HER CHAIR +3528-168669-0001-878: WE WILL PRESENT A (STENOGRAPHIC->SYNOGRAPHIC) REPORT OF THE DIALOGUE WHICH THEN ENSUED TO THE BEST OF OUR ABILITY +3528-168669-0002-879: FATHER (FAUVENT->FERVENT) +3528-168669-0003-880: REVEREND MOTHER DO YOU KNOW THE CHAPEL +3528-168669-0004-881: AND YOU HAVE BEEN IN THE CHOIR IN PURSUANCE OF YOUR DUTIES TWO OR THREE TIMES +3528-168669-0005-882: THERE IS A STONE TO BE RAISED HEAVY +3528-168669-0006-883: THE SLAB OF THE PAVEMENT WHICH IS AT THE SIDE OF THE ALTAR +3528-168669-0007-884: THE (SLAB->FLAP) WHICH CLOSES THE VAULT YES +3528-168669-0008-885: IT WOULD BE A GOOD THING TO HAVE TWO MEN FOR IT +3528-168669-0009-886: A WOMAN IS NEVER A MAN +3528-168669-0010-887: BECAUSE (DOM MABILLON->DON MARBULAN) GIVES FOUR HUNDRED AND SEVENTEEN EPISTLES OF SAINT BERNARD WHILE (MERLONUS HORSTIUS->MERLUNUS HORSE) ONLY GIVES THREE HUNDRED AND SIXTY SEVEN I DO NOT DESPISE (MERLONUS HORSTIUS->MERLINUS HORSES) NEITHER DO I +3528-168669-0011-888: (MERIT->MARRIAGE) CONSISTS IN WORKING ACCORDING TO ONE'S STRENGTH A CLOISTER IS NOT A (DOCK YARD->DOCKYARD) +3528-168669-0012-889: (AND->ADD) A WOMAN IS NOT A MAN BUT MY BROTHER IS THE STRONG ONE THOUGH +3528-168669-0013-890: AND CAN YOU GET A (LEVER->LOVER) +3528-168669-0014-891: THERE IS A RING IN THE STONE +3528-168669-0015-892: I WILL PUT THE (LEVER->LOVER) THROUGH IT +3528-168669-0016-893: THAT IS GOOD REVEREND MOTHER I WILL OPEN THE VAULT +3528-168669-0017-894: WILL THAT BE ALL NO +3528-168669-0018-895: GIVE ME YOUR ORDERS VERY REVEREND MOTHER +3528-168669-0019-896: (FAUVENT->FOR THAT) WE HAVE CONFIDENCE IN YOU +3528-168669-0020-897: I AM HERE TO DO ANYTHING YOU WISH +3528-168669-0021-898: AND TO HOLD YOUR PEACE ABOUT EVERYTHING YES (REVEREND->ROBIN) MOTHER +3528-168669-0022-899: WHEN THE (VAULT->VOLT) IS OPEN I WILL CLOSE IT AGAIN +3528-168669-0023-900: BUT BEFORE THAT WHAT REVEREND MOTHER +3528-168669-0024-901: FATHER (FAUVENT REVEREND->FOR REVERE) MOTHER +3528-168669-0025-902: YOU KNOW THAT A MOTHER DIED THIS MORNING +3528-168669-0026-903: NO DID YOU NOT HEAR THE BELL +3528-168669-0027-904: NOTHING CAN BE HEARD AT THE BOTTOM OF THE GARDEN REALLY +3528-168669-0028-905: AND THEN THE WIND IS NOT BLOWING IN MY DIRECTION THIS MORNING +3528-168669-0029-906: IT WAS MOTHER CRUCIFIXION +3528-168669-0030-907: THREE YEARS AGO MADAME DE (BETHUNE->BESOON) A (JANSENIST TURNED->GENT) ORTHODOX MERELY FROM HAVING SEEN MOTHER CRUCIFIXION AT PRAYER AH +3528-168669-0031-908: THE MOTHERS HAVE TAKEN HER TO THE DEAD ROOM WHICH OPENS ON THE CHURCH I KNOW +3528-168669-0032-909: A FINE SIGHT IT WOULD BE TO SEE A MAN ENTER THE DEAD ROOM MORE OFTEN +3528-168669-0033-910: HEY MORE OFTEN +3528-168669-0034-911: WHAT DO YOU SAY +3528-168669-0035-912: I SAY MORE OFTEN MORE OFTEN THAN WHAT +3528-168669-0036-913: REVEREND MOTHER I DID NOT SAY MORE OFTEN THAN WHAT I SAID MORE OFTEN +3528-168669-0037-914: BUT I DID NOT SAY MORE OFTEN +3528-168669-0038-915: AT THAT MOMENT NINE O'CLOCK STRUCK +3528-168669-0039-916: AT NINE O'CLOCK IN THE MORNING AND AT ALL HOURS PRAISED AND (ADORED->ENDURED) BE THE MOST HOLY SACRAMENT OF THE ALTAR SAID THE (PRIORESS->PRIEST) +3528-168669-0040-917: IT CUT MORE OFTEN SHORT +3528-168669-0041-918: FAUCHELEVENT MOPPED HIS FOREHEAD +3528-168669-0042-919: IN HER LIFETIME MOTHER CRUCIFIXION MADE CONVERTS AFTER HER DEATH SHE WILL PERFORM MIRACLES SHE WILL +3528-168669-0043-920: FATHER (FAUVENT->FUVENT) THE COMMUNITY HAS BEEN BLESSED IN MOTHER (CRUCIFIXION->CURSE FICTION) +3528-168669-0044-921: SHE RETAINED HER CONSCIOUSNESS TO THE VERY LAST MOMENT +3528-168669-0045-922: SHE GAVE US HER LAST COMMANDS +3528-168669-0046-923: IF YOU HAD A LITTLE MORE FAITH AND IF YOU COULD HAVE BEEN IN (HER CELL->HERSELF) SHE WOULD HAVE CURED YOUR LEG MERELY BY TOUCHING IT SHE SMILED +3528-168669-0047-924: THERE WAS SOMETHING OF PARADISE IN THAT DEATH +3528-168669-0048-925: FAUCHELEVENT THOUGHT THAT IT WAS AN ORISON WHICH SHE WAS FINISHING +3528-168669-0049-926: (FAUCHELEVENT->FORCHELEVENT) HELD HIS PEACE SHE WENT ON +3528-168669-0050-927: I HAVE CONSULTED UPON THIS POINT MANY ECCLESIASTICS (LABORING->LABOURING) IN OUR LORD WHO OCCUPY THEMSELVES IN THE EXERCISES OF THE CLERICAL LIFE AND WHO BEAR WONDERFUL FRUIT +3528-168669-0051-928: FORTUNATELY THE (PRIORESS->PIRRUS) COMPLETELY ABSORBED IN HER OWN THOUGHTS DID NOT HEAR IT +3528-168669-0052-929: SHE CONTINUED (FATHER FAUVENT->FURTHER PREVENT) +3528-168669-0053-930: YES (REVEREND->REVERE) MOTHER +3528-168669-0054-931: SAINT TERENTIUS BISHOP OF PORT (WHERE->WEAR) THE MOUTH OF THE (TIBER->TYBER) EMPTIES INTO THE SEA REQUESTED THAT ON HIS TOMB MIGHT BE ENGRAVED THE SIGN WHICH WAS PLACED ON THE GRAVES OF (PARRICIDES->PARASITES) IN THE HOPE THAT PASSERS BY WOULD SPIT ON HIS TOMB THIS WAS DONE +3528-168669-0055-932: THE DEAD MUST BE OBEYED SO BE IT +3528-168669-0056-933: FOR THAT MATTER NO REVEREND MOTHER +3528-168669-0057-934: FATHER (FAUVENT->PREVENT) MOTHER CRUCIFIXION WILL BE INTERRED IN THE COFFIN IN WHICH SHE HAS SLEPT FOR THE LAST TWENTY YEARS THAT IS JUST +3528-168669-0058-935: IT IS A CONTINUATION OF HER SLUMBER +3528-168669-0059-936: SO I SHALL HAVE TO NAIL UP THAT COFFIN YES +3528-168669-0060-937: I AM AT THE ORDERS OF THE VERY REVEREND COMMUNITY +3528-168669-0061-938: THE FOUR MOTHER (PRECENTORS->PRESENTERS) WILL ASSIST YOU +3528-168669-0062-939: NO IN (LOWERING->LORING) THE COFFIN +3528-168669-0063-940: WHERE INTO THE VAULT +3528-168669-0064-941: FAUCHELEVENT STARTED THE VAULT UNDER THE ALTAR +3528-168669-0065-942: UNDER THE ALTAR BUT +3528-168669-0066-943: YOU WILL HAVE AN IRON BAR YES BUT +3528-168669-0067-944: YOU WILL RAISE THE STONE WITH THE BAR BY MEANS OF THE RING BUT +3528-168669-0068-945: THE DEAD MUST BE OBEYED TO BE BURIED IN THE VAULT UNDER THE ALTAR OF THE CHAPEL NOT TO GO TO PROFANE EARTH TO REMAIN THERE IN DEATH WHERE SHE PRAYED WHILE LIVING SUCH WAS THE LAST WISH OF MOTHER CRUCIFIXION +3528-168669-0069-946: SHE ASKED IT OF US THAT IS TO SAY COMMANDED US +3528-168669-0070-947: BUT IT IS FORBIDDEN +3528-168669-0071-948: OH I AM A STONE IN YOUR WALLS +3528-168669-0072-949: THINK FATHER (FAUVENT->FRAVAIN) IF SHE WERE TO WORK MIRACLES HERE +3528-168669-0073-950: WHAT A GLORY OF GOD FOR THE COMMUNITY AND MIRACLES ISSUE FROM TOMBS +3528-168669-0074-951: BUT REVEREND MOTHER IF THE (AGENT OF->AGENTIVE) THE SANITARY COMMISSION +3528-168669-0075-952: BUT THE COMMISSARY OF POLICE +3528-168669-0076-953: (CHONODEMAIRE->SHADOW DE MAR) ONE OF THE SEVEN GERMAN KINGS WHO ENTERED AMONG THE GAULS UNDER THE EMPIRE OF CONSTANTIUS EXPRESSLY RECOGNIZED THE RIGHT OF NUNS TO BE BURIED IN RELIGION THAT IS TO SAY BENEATH THE ALTAR +3528-168669-0077-954: THE WORLD IS NOTHING IN THE PRESENCE OF THE CROSS +3528-168669-0078-955: (MARTIN->MERTON) THE ELEVENTH GENERAL OF THE CARTHUSIANS GAVE TO HIS ORDER THIS DEVICE STAT (CRUX DUM VOLVITUR->CREW DOOMFUL) ORBIS +3528-168669-0079-956: THE (PRIORESS->PIRRUS) WHO WAS USUALLY SUBJECTED TO THE BARRIER OF SILENCE AND WHOSE RESERVOIR WAS (OVERFULL->OVER FULL) ROSE AND EXCLAIMED WITH THE LOQUACITY OF A DAM WHICH HAS BROKEN AWAY +3528-168669-0080-957: I HAVE ON MY RIGHT (BENOIT->BENOIS) AND (ON->ALL) MY LEFT BERNARD WHO WAS BERNARD +3528-168669-0081-958: THE FIRST ABBOT OF (CLAIRVAUX->CLERVAL) +3528-168669-0082-959: HIS ORDER HAS PRODUCED FORTY POPES TWO HUNDRED CARDINALS FIFTY PATRIARCHS SIXTEEN HUNDRED ARCHBISHOPS FOUR THOUSAND SIX HUNDRED BISHOPS FOUR EMPERORS TWELVE EMPRESSES FORTY SIX KINGS FORTY ONE QUEENS THREE THOUSAND SIX HUNDRED (CANONIZED->CANNONIZED) SAINTS AND HAS BEEN IN EXISTENCE FOR FOURTEEN HUNDRED YEARS +3528-168669-0083-960: ON ONE SIDE SAINT BERNARD ON THE OTHER THE AGENT OF THE (SANITARY->SENATORY) DEPARTMENT +3528-168669-0084-961: GOD SUBORDINATED TO THE COMMISSARY OF POLICE SUCH (IS->AS) THE AGE SILENCE (FAUVENT->FAVOT) +3528-168669-0085-962: NO ONE DOUBTS THE RIGHT OF THE MONASTERY TO SEPULTURE +3528-168669-0086-963: ONLY FANATICS AND THOSE IN ERROR DENY IT +3528-168669-0087-964: WE LIVE IN TIMES OF TERRIBLE CONFUSION +3528-168669-0088-965: WE ARE IGNORANT AND IMPIOUS +3528-168669-0089-966: AND THEN RELIGION IS ATTACKED WHY +3528-168669-0090-967: BECAUSE THERE HAVE BEEN BAD PRIESTS BECAUSE (SAGITTAIRE->SAGOTARA) BISHOP OF GAP WAS (THE->A) BROTHER OF (SALONE->SALON) BISHOP OF (EMBRUN->AMBRON) AND BECAUSE BOTH OF THEM FOLLOWED (MOMMOL->MAMMA) +3528-168669-0091-968: THEY PERSECUTE THE SAINTS +3528-168669-0092-969: THEY SHUT THEIR EYES TO THE TRUTH DARKNESS IS THE RULE +3528-168669-0093-970: THE MOST FEROCIOUS BEASTS ARE BEASTS WHICH ARE BLIND +3528-168669-0094-971: OH HOW WICKED PEOPLE ARE +3528-168669-0095-972: BY ORDER OF THE KING SIGNIFIES TO DAY BY ORDER OF THE REVOLUTION +3528-168669-0096-973: ONE NO LONGER KNOWS WHAT IS DUE TO THE LIVING OR TO THE DEAD A HOLY DEATH IS PROHIBITED +3528-168669-0097-974: (GAUTHIER->GATHIERRE) BISHOP OF (CHALONS->CHALON) HELD HIS OWN IN THIS MATTER AGAINST OTHO DUKE OF BURGUNDY +3528-168669-0098-975: THE (PRIORESS->PRIORS) TOOK BREATH (THEN->AND) TURNED TO FAUCHELEVENT +3528-168669-0099-976: YOU WILL CLOSE THE COFFIN THE SISTERS WILL CARRY IT TO THE CHAPEL +3528-168669-0100-977: THE OFFICE FOR THE DEAD WILL THEN BE (SAID->SET) +3528-168669-0101-978: BUT SHE WILL HEAR SHE WILL NOT LISTEN +3528-168669-0102-979: BESIDES WHAT THE (CLOISTER->CLOSER) KNOWS THE WORLD LEARNS NOT +3528-168669-0103-980: A PAUSE (ENSUED->IN SUIT) +3528-168669-0104-981: YOU WILL REMOVE YOUR (BELL->BELT) +3528-168669-0105-982: HAS THE DOCTOR FOR THE DEAD PAID HIS VISIT +3528-168669-0106-983: HE WILL PAY IT AT FOUR O'CLOCK TO DAY +3528-168669-0107-984: THE PEAL WHICH ORDERS THE DOCTOR FOR THE (DEAD->DEBT) TO BE SUMMONED HAS ALREADY BEEN (RUNG->RUN) +3528-168669-0108-985: BUT YOU DO NOT UNDERSTAND ANY OF THE PEALS +3528-168669-0109-986: THAT IS WELL FATHER (FAUVENT->VENT) +3528-168669-0110-987: WHERE WILL YOU OBTAIN IT +3528-168669-0111-988: I HAVE MY HEAP OF OLD IRON AT THE BOTTOM OF THE GARDEN +3528-168669-0112-989: (REVEREND->REVERE) MOTHER WHAT +3528-168669-0113-990: IF YOU WERE EVER TO HAVE ANY OTHER JOBS OF THIS SORT MY BROTHER IS THE STRONG MAN FOR YOU A PERFECT TURK +3528-168669-0114-991: YOU WILL DO IT AS SPEEDILY AS POSSIBLE +3528-168669-0115-992: I CANNOT WORK VERY FAST I AM INFIRM THAT IS WHY I REQUIRE AN ASSISTANT I LIMP +3528-168669-0116-993: EVERYTHING MUST HAVE BEEN COMPLETED A GOOD QUARTER OF AN HOUR BEFORE THAT +3528-168669-0117-994: I WILL DO ANYTHING TO PROVE MY ZEAL TOWARDS THE COMMUNITY THESE ARE MY ORDERS I AM TO NAIL UP THE COFFIN +3528-168669-0118-995: AT ELEVEN O'CLOCK EXACTLY I AM TO BE IN THE CHAPEL +3528-168669-0119-996: MOTHER ASCENSION WILL BE THERE TWO MEN WOULD BE BETTER +3528-168669-0120-997: HOWEVER NEVER MIND I SHALL HAVE MY (LEVER->LOVER) +3528-168669-0121-998: AFTER WHICH THERE WILL BE NO TRACE OF ANYTHING +3528-168669-0122-999: THE GOVERNMENT WILL HAVE NO SUSPICION +3528-168669-0123-1000: THE EMPTY COFFIN REMAINS THIS PRODUCED A (PAUSE->PULSE) +3528-168669-0124-1001: WHAT IS TO BE DONE WITH THAT COFFIN FATHER (FAUVENT->PREVENT) +3528-168669-0125-1002: IT WILL BE GIVEN TO THE EARTH EMPTY +3528-168669-0126-1003: AH (THE DE->LIDA) EXCLAIMED FAUCHELEVENT +3528-168669-0127-1004: THE (VIL->VILLE) STUCK FAST IN HIS THROAT +3528-168669-0128-1005: HE MADE HASTE TO IMPROVISE AN EXPEDIENT TO MAKE HER FORGET THE OATH +3528-168669-0129-1006: I WILL PUT EARTH IN THE COFFIN (REVEREND->REVERED) MOTHER THAT WILL PRODUCE THE EFFECT OF A CORPSE +3528-168669-0130-1007: I WILL MAKE THAT MY SPECIAL BUSINESS +3538-142836-0000-1567: (GENERAL->JOE) OBSERVATIONS ON PRESERVES CONFECTIONARY (ICES->EYESES) AND DESSERT DISHES +3538-142836-0001-1568: THE EXPENSE OF PRESERVING THEM WITH SUGAR IS A SERIOUS OBJECTION FOR EXCEPT (THE->A) SUGAR IS USED IN CONSIDERABLE (QUANTITIES->QUALITIES) THE SUCCESS IS VERY UNCERTAIN +3538-142836-0002-1569: FRUIT GATHERED IN WET OR FOGGY WEATHER WILL SOON BE (MILDEWED->MILDED) AND BE OF NO SERVICE FOR PRESERVES +3538-142836-0003-1570: BUT TO DISTINGUISH (THESE PROPERLY->HIS PROPER) REQUIRES VERY GREAT ATTENTION AND CONSIDERABLE EXPERIENCE +3538-142836-0004-1571: IF YOU DIP THE FINGER INTO THE (SYRUP->SERF) AND APPLY (IT->*) TO THE THUMB THE TENACITY OF THE (SYRUP->SURF) WILL ON SEPARATING THE FINGER AND THUMB AFFORD A THREAD WHICH SHORTLY BREAKS THIS IS THE LITTLE THREAD +3538-142836-0005-1572: LET IT BOIL UP AGAIN THEN TAKE IT OFF AND REMOVE CAREFULLY THE SCUM THAT HAS RISEN +3538-142836-0006-1573: IT IS CONSIDERED TO BE SUFFICIENTLY BOILED WHEN SOME TAKEN UP IN A SPOON POURS OUT LIKE OIL +3538-142836-0007-1574: BEFORE SUGAR WAS IN USE HONEY WAS EMPLOYED TO PRESERVE MANY VEGETABLE PRODUCTIONS THOUGH THIS SUBSTANCE (HAS->IS) NOW GIVEN WAY TO THE JUICE OF THE SUGAR CANE +3538-142836-0008-1575: FOURTEEN NINETY NINE +3538-142836-0009-1576: BOIL THEM UP THREE DAYS SUCCESSIVELY SKIMMING EACH TIME AND THEY WILL THEN BE FINISHED AND IN A STATE FIT TO BE PUT INTO POTS FOR USE +3538-142836-0010-1577: THE REASON WHY THE FRUIT IS EMPTIED OUT OF THE PRESERVING PAN INTO AN EARTHEN PAN IS THAT THE ACID OF THE FRUIT ACTS UPON THE COPPER OF WHICH THE PRESERVING PANS ARE USUALLY MADE +3538-142836-0011-1578: FROM THIS EXAMPLE THE PROCESS OF PRESERVING FRUITS BY SYRUP (WILL->WOULD) BE EASILY COMPREHENDED +3538-142836-0012-1579: (THEY->THIS) SHOULD BE DRIED IN THE STOVE OR OVEN ON A (SIEVE->SEA) AND TURNED EVERY SIX OR EIGHT HOURS FRESH (POWDERED->PATTERED) SUGAR BEING SIFTED OVER THEM EVERY TIME THEY ARE TURNED +3538-142836-0013-1580: IN THIS WAY IT IS ALSO THAT ORANGE AND (LEMON CHIPS->LENNONSHIPS) ARE PRESERVED +3538-142836-0014-1581: MARMALADES JAMS AND FRUIT (PASTES->PACE) ARE OF THE SAME NATURE AND ARE NOW IN VERY GENERAL (REQUEST->QUEST) +3538-142836-0015-1582: (MARMALADES->MARVELL EATS) AND (JAMS->JAM'S) DIFFER LITTLE FROM EACH OTHER (THEY ARE->THEIR) PRESERVES OF (A->*) HALF LIQUID CONSISTENCY MADE BY BOILING THE PULP OF FRUITS AND SOMETIMES PART OF THE (RINDS->RHINES) WITH SUGAR +3538-142836-0016-1583: THAT THEY MAY KEEP IT IS NECESSARY NOT TO BE SPARING OF SUGAR FIFTEEN O THREE +3538-142836-0017-1584: IN ALL THE OPERATIONS FOR PRESERVE MAKING WHEN THE PRESERVING PAN IS USED IT SHOULD NOT BE PLACED ON THE FIRE BUT ON A (TRIVET->TRIBUT) UNLESS THE (JAM->JAME) IS MADE ON A HOT PLATE WHEN THIS IS NOT NECESSARY +3538-142836-0018-1585: CONFECTIONARY FIFTEEN O EIGHT +3538-142836-0019-1586: IN SPEAKING OF (CONFECTIONARY IT->CONFECTIONARIES) SHOULD BE REMARKED THAT ALL THE VARIOUS PREPARATIONS ABOVE NAMED COME STRICTLY SPEAKING UNDER THAT HEAD FOR THE VARIOUS FRUITS FLOWERS HERBS (ROOTS AND JUICES->OR SAUCES) WHICH (WHEN->ONE) BOILED WITH SUGAR WERE FORMERLY EMPLOYED IN PHARMACY AS WELL AS FOR SWEETMEATS WERE CALLED CONFECTIONS FROM THE LATIN WORD (CONFICERE->CONFUSE) TO MAKE UP BUT THE TERM CONFECTIONARY EMBRACES A VERY LARGE CLASS INDEED OF SWEET FOOD MANY KINDS OF WHICH SHOULD NOT BE ATTEMPTED IN THE ORDINARY (CUISINE->COSEINE) +3538-142836-0020-1587: (THE->A) THOUSAND AND ONE ORNAMENTAL DISHES THAT ADORN THE TABLES OF THE WEALTHY SHOULD BE PURCHASED FROM THE CONFECTIONER THEY CANNOT PROFITABLY BE MADE AT HOME +3538-142836-0021-1588: HOWEVER AS LATE AS THE (REIGNS->REIGN) OF OUR TWO LAST GEORGES FABULOUS SUMS WERE OFTEN EXPENDED UPON FANCIFUL (DESSERTS->DESERTS) +3538-142836-0022-1589: THE SHAPE OF THE (DISHES->DISH IS) VARIES AT DIFFERENT PERIODS THE PREVAILING FASHION AT PRESENT BEING OVAL AND CIRCULAR DISHES ON STEMS +3538-142836-0023-1590: (ICES->ISIS) +3538-142836-0024-1591: (AT DESSERTS->I DESERTS) OR AT SOME EVENING PARTIES ICES ARE SCARCELY (TO->DID) BE DISPENSED WITH +3538-142836-0025-1592: THE (SPADDLE->SPATTLE) IS GENERALLY MADE OF COPPER KEPT BRIGHT AND CLEAN +3538-142836-0026-1593: THEY SHOULD BE TAKEN IMMEDIATELY AFTER THE REPAST OR SOME HOURS AFTER BECAUSE THE TAKING (*->OF) THESE SUBSTANCES DURING THE PROCESS OF DIGESTION IS APT TO PROVOKE INDISPOSITION +3538-163619-0000-1500: THERE WAS ONCE ON (A->THE) TIME A WIDOWER WHO HAD A (SON->SUDDEN) AND A DAUGHTER BY HIS FIRST (WIFE->WIF) +3538-163619-0001-1501: FROM THE VERY DAY THAT THE NEW WIFE CAME INTO THE HOUSE THERE WAS NO PEACE FOR THE MAN'S CHILDREN AND NOT A CORNER TO BE FOUND WHERE THEY COULD GET ANY REST SO THE BOY THOUGHT THAT THE BEST THING HE COULD DO WAS TO GO OUT INTO THE WORLD AND TRY TO EARN HIS OWN BREAD +3538-163619-0002-1502: BUT HIS SISTER WHO WAS STILL AT HOME FARED WORSE AND WORSE +3538-163619-0003-1503: KISS ME (GIRL->GO) SAID THE HEAD +3538-163619-0004-1504: WHEN THE KING ENTERED AND (SAW IT->SOUGHT) HE STOOD STILL AS IF HE WERE IN FETTERS AND COULD NOT STIR FROM THE SPOT FOR THE PICTURE SEEMED TO HIM SO BEAUTIFUL +3538-163619-0005-1505: (THE YOUTH->THESE) PROMISED TO MAKE ALL THE HASTE HE COULD AND SET FORTH FROM THE KING'S PALACE +3538-163619-0006-1506: AT LAST THEY CAME IN SIGHT OF LAND +3538-163619-0007-1507: WELL IF MY BROTHER SAYS SO I MUST DO IT SAID THE MAN'S DAUGHTER AND SHE FLUNG HER CASKET INTO THE SEA +3538-163619-0008-1508: WHAT IS MY BROTHER SAYING ASKED HIS SISTER AGAIN +3538-163619-0009-1509: ON THE FIRST THURSDAY NIGHT AFTER THIS A BEAUTIFUL MAIDEN CAME INTO THE KITCHEN OF THE PALACE AND BEGGED THE KITCHEN MAID WHO SLEPT THERE TO LEND HER A BRUSH +3538-163619-0010-1510: SHE BEGGED VERY PRETTILY AND GOT IT AND THEN SHE BRUSHED HER HAIR AND THE GOLD DROPPED FROM IT +3538-163619-0011-1511: OUT ON (THEE->THE) UGLY BUSHY BRIDE SLEEPING SO SOFT BY THE YOUNG KING'S SIDE ON SAND AND STONES MY BED I MAKE AND MY BROTHER SLEEPS WITH THE COLD SNAKE UNPITIED AND UNWEPT +3538-163619-0012-1512: I SHALL COME TWICE MORE AND THEN NEVER AGAIN SAID SHE +3538-163619-0013-1513: THIS TIME ALSO AS BEFORE SHE BORROWED A BRUSH AND BRUSHED HER HAIR WITH IT AND THE GOLD DROPPED DOWN AS SHE DID IT AND AGAIN SHE SENT THE DOG OUT THREE TIMES AND WHEN DAY DAWNED SHE DEPARTED BUT AS SHE WAS GOING SHE SAID AS SHE HAD SAID BEFORE I SHALL COME ONCE MORE AND THEN NEVER AGAIN +3538-163619-0014-1514: NO ONE CAN TELL HOW DELIGHTED THE KING WAS TO GET RID OF THAT HIDEOUS BUSHY BRIDE AND GET A QUEEN WHO WAS BRIGHT AND BEAUTIFUL AS DAY ITSELF +3538-163622-0000-1515: WILT THOU SERVE ME AND WATCH MY SEVEN (FOALS->FOOLS) ASKED THE KING +3538-163622-0001-1516: THE YOUTH THOUGHT (THAT->*) IT WAS VERY EASY WORK TO WATCH THE (FOALS->FOLDS) AND THAT HE COULD DO IT WELL ENOUGH +3538-163622-0002-1517: HAST THOU WATCHED FAITHFULLY AND WELL (THE WHOLE DAY->BEHOLDAY) LONG SAID THE KING WHEN THE LAD CAME INTO HIS PRESENCE IN THE EVENING +3538-163622-0003-1518: YES THAT I HAVE SAID THE YOUTH +3538-163622-0004-1519: HE HAD GONE (OUT->AT) ONCE TO SEEK A PLACE HE SAID BUT NEVER WOULD HE DO SUCH A THING AGAIN +3538-163622-0005-1520: THEN THE KING PROMISED HIM THE SAME PUNISHMENT AND THE SAME REWARD THAT HE HAD PROMISED HIS BROTHER +3538-163622-0006-1521: WHEN HE HAD RUN AFTER THE (FOALS->FALLS) FOR A LONG LONG TIME AND WAS HOT AND TIRED HE PASSED BY (A CLEFT->CLEF) IN THE ROCK WHERE AN OLD WOMAN WAS SITTING SPINNING WITH (A DISTAFF->THE DISTANT) AND SHE CALLED TO HIM +3538-163622-0007-1522: (COME HITHER->COMMANDER) COME HITHER MY HANDSOME SON AND LET ME (COMB->CALM) YOUR HAIR +3538-163622-0008-1523: THE YOUTH LIKED THE THOUGHT OF THIS LET THE (FOALS RUN->FOLDS RUM) WHERE THEY CHOSE AND SEATED HIMSELF IN THE CLEFT OF THE ROCK BY THE SIDE OF THE OLD HAG +3538-163622-0009-1524: SO THERE HE SAT WITH HIS HEAD ON HER LAP TAKING HIS EASE THE (LIVELONG->LIVE LONG) DAY +3538-163622-0010-1525: ON THE THIRD DAY (CINDERLAD->SAID THE LAD) WANTED TO SET OUT +3538-163622-0011-1526: THE TWO BROTHERS LAUGHED AT HIM AND HIS FATHER AND MOTHER BEGGED HIM NOT TO GO BUT ALL TO NO PURPOSE AND (CINDERLAD->SINDERLAD) SET OUT ON HIS WAY +3538-163622-0012-1527: I AM WALKING ABOUT IN SEARCH OF A PLACE SAID (CINDERLAD->SINGLEAD) +3538-163622-0013-1528: I WOULD MUCH RATHER HAVE THE PRINCESS SAID CINDERLAD +3538-163622-0014-1529: AND THUS THEY JOURNEYED ONWARDS A LONG LONG WAY +3538-163622-0015-1530: WHEN THEY HAD GONE THUS FOR A LONG LONG WAY THE (FOAL AGAIN->FOLIGAN) ASKED DOST THOU SEE ANYTHING NOW +3538-163622-0016-1531: YES NOW I SEE SOMETHING THAT IS WHITE SAID CINDERLAD +3538-163622-0017-1532: IT LOOKS LIKE THE TRUNK OF A GREAT THICK BIRCH TREE +3538-163622-0018-1533: (CINDERLAD->SIDNEYLOD) TRIED BUT COULD NOT DO IT SO HE HAD TO TAKE A DRAUGHT FROM THE PITCHER AND THEN ONE MORE AND AFTER THAT STILL ANOTHER AND THEN HE WAS ABLE TO WIELD THE SWORD WITH PERFECT EASE +3538-163622-0019-1534: FOR WE ARE BROTHERS OF THE PRINCESS WHOM THOU ART TO HAVE WHEN THOU CANST TELL THE KING WHAT WE EAT AND DRINK BUT THERE IS A MIGHTY TROLL WHO HAS CAST A SPELL OVER US +3538-163622-0020-1535: WHEN THEY HAD TRAVELLED A LONG LONG WAY THE (FOAL->FALL) SAID DOST THOU SEE ANYTHING +3538-163622-0021-1536: AND NOW INQUIRED THE (FOAL SEEST THOU->WHOLE CEASE DONE) NOTHING NOW +3538-163622-0022-1537: NOW THEN SAID THE (FOAL->FOOL) DOST THOU NOT SEE ANYTHING NOW +3538-163622-0023-1538: THAT IS A RIVER SAID THE FOAL AND WE HAVE TO CROSS IT +3538-163622-0024-1539: I HAVE DONE MY BEST REPLIED (CINDERLAD->SIR LAD) +3538-163624-0000-1540: ONCE UPON A TIME THERE WAS A KING IN THE NORTH WHO HAD WON MANY WARS BUT NOW HE WAS OLD +3538-163624-0001-1541: THE OLD KING WENT OUT AND (FOUGHT->THOUGHT) BRAVELY BUT AT LAST HIS SWORD BROKE AND HE WAS WOUNDED AND HIS MEN FLED +3538-163624-0002-1542: BUT IN THE NIGHT WHEN THE BATTLE (WAS->IS) OVER HIS YOUNG WIFE CAME OUT (AND->IN) SEARCHED FOR HIM AMONG THE SLAIN AND AT LAST SHE FOUND HIM AND ASKED WHETHER HE MIGHT BE HEALED +3538-163624-0003-1543: SO (HE ASKED->YES) THE QUEEN HOW DO YOU KNOW IN THE DARK OF NIGHT WHETHER THE HOURS ARE WEARING TO THE MORNING AND SHE SAID +3538-163624-0004-1544: THEN THE OLD MAN SAID DRIVE ALL THE HORSES INTO THE RIVER AND CHOOSE THE ONE THAT SWIMS ACROSS +3538-163624-0005-1545: HE (IS->HAS) NO BIGGER THAN OTHER DRAGONS SAID THE (TUTOR->TUDOR) AND IF (YOU WERE AS->YOURS) BRAVE AS YOUR FATHER YOU WOULD NOT FEAR HIM +3538-163624-0006-1546: THEN THE PERSON WHO HAD KILLED OTTER WENT DOWN AND CAUGHT THE DWARF WHO OWNED ALL THE TREASURE AND TOOK IT FROM HIM +3538-163624-0007-1547: ONLY ONE RING WAS LEFT WHICH THE DWARF WORE AND EVEN THAT WAS TAKEN FROM HIM +3538-163624-0008-1548: SO (REGIN->RIGAN) MADE A SWORD AND (SIGURD->CIGAR) TRIED IT WITH (A->THE) BLOW ON A LUMP OF IRON AND THE SWORD BROKE +3538-163624-0009-1549: THEN (SIGURD->CIGARET) WENT TO HIS MOTHER AND ASKED FOR THE BROKEN PIECES OF HIS FATHER'S BLADE AND GAVE THEM TO (REGIN->RIGAN) +3538-163624-0010-1550: SO (SIGURD->CIGARET) SAID THAT SWORD WOULD DO +3538-163624-0011-1551: THEN HE SAW THE TRACK WHICH THE DRAGON (*->HAD) MADE WHEN HE WENT TO A CLIFF TO DRINK AND THE TRACK WAS AS IF A GREAT RIVER HAD ROLLED ALONG AND LEFT A DEEP VALLEY +3538-163624-0012-1552: BUT (SIGURD->CIGARET) WAITED TILL HALF OF HIM HAD CRAWLED OVER THE PIT AND THEN HE THRUST THE SWORD (GRAM->GRAHAM) RIGHT INTO HIS VERY HEART +3538-163624-0013-1553: (SIGURD->CIGARET) SAID I WOULD TOUCH NONE OF IT IF BY LOSING IT I SHOULD NEVER DIE +3538-163624-0014-1554: BUT ALL MEN DIE AND NO BRAVE MAN (LETS->LET'S) DEATH FRIGHTEN HIM FROM HIS DESIRE +3538-163624-0015-1555: (DIE->GUY) THOU (FAFNIR->FAFNER) AND THEN (FAFNIR->STAFFNER) DIED +3538-163624-0016-1556: THEN (SIGURD->CIGAR) RODE BACK AND MET (REGIN->RIGAN) AND (REGIN->RIGAN) ASKED HIM TO ROAST (FAFNIR'S->FAFNER'S) HEART AND LET HIM TASTE OF IT +3538-163624-0017-1557: SO (SIGURD->SIR GOOD) PUT THE HEART OF (FAFNIR->FAFFNER) ON A STAKE AND ROASTED IT +3538-163624-0018-1558: (THERE IS SIGURD->THERE'S CIGARET) ROASTING (FAFNIR'S->FAFTENNER'S) HEART FOR ANOTHER WHEN HE SHOULD TASTE OF IT HIMSELF AND LEARN ALL WISDOM +3538-163624-0019-1559: THAT LET HIM DO (AND->*) THEN RIDE OVER (HINDFELL->HINFELD) TO THE PLACE WHERE (BRYNHILD->BRINEHILL) SLEEPS +3538-163624-0020-1560: THERE MUST SHE SLEEP TILL THOU (COMEST->COMES) FOR HER WAKING RISE UP AND RIDE FOR NOW SURE SHE WILL SWEAR THE VOW FEARLESS OF BREAKING +3538-163624-0021-1561: THEN HE TOOK THE HELMET OFF THE HEAD OF THE SLEEPER AND BEHOLD SHE WAS A MOST BEAUTIFUL LADY +3538-163624-0022-1562: THEN (SIGURD->CIGARET) RODE AWAY AND HE CAME TO THE HOUSE OF A KING WHO HAD A FAIR DAUGHTER +3538-163624-0023-1563: (THEN BRYNHILD'S->WHEN BURNHIL'S) FATHER TOLD (GUNNAR->GUNNER) THAT SHE WOULD MARRY NONE BUT HIM WHO COULD RIDE THE FLAME IN FRONT OF HER ENCHANTED TOWER AND (THITHER->THAT AS) THEY RODE AND (GUNNAR->GUTTER) SET HIS HORSE AT THE FLAME BUT HE WOULD NOT FACE IT +3538-163624-0024-1564: FOR ONE DAY WHEN (BRYNHILD->BURNEHELD) AND (GUDRUN->GUNDRON) WERE BATHING (BRYNHILD WADED->BURNEHALD WAITED) FARTHEST OUT INTO THE RIVER AND SAID SHE DID THAT TO SHOW SHE WAS (GUIRUN'S->GUNDRUE) SUPERIOR +3538-163624-0025-1565: FOR HER HUSBAND SHE SAID HAD RIDDEN THROUGH THE FLAME WHEN NO OTHER MAN DARED FACE IT +3538-163624-0026-1566: NOT LONG TO WAIT HE SAID TILL THE BITTER SWORD STANDS FAST IN MY HEART AND THOU (WILL->WILT) NOT LIVE LONG WHEN I AM DEAD +367-130732-0000-1466: LOBSTERS AND LOBSTERS +367-130732-0001-1467: WHEN IS A LOBSTER NOT A LOBSTER WHEN IT IS A CRAYFISH +367-130732-0002-1468: THIS QUESTION (AND->IN) ANSWER MIGHT WELL GO INTO THE (PRIMER->PRIMARY) OF INFORMATION FOR (THOSE WHO COME TO SAN->LUCIKAM THE SENT) FRANCISCO FROM THE EAST FOR WHAT IS CALLED A LOBSTER IN (SAN FRANCISCO->FRITZO) IS NOT A (LOBSTER->LOBSURD) AT ALL BUT A CRAYFISH +367-130732-0003-1469: THE PACIFIC (CRAYFISH HOWEVER SERVES->CRAYFISHHORESERVES) EVERY PURPOSE AND WHILE MANY CONTEND THAT ITS MEAT IS NOT SO DELICATE (IN FLAVOR->AND FLARE) AS THAT OF ITS EASTERN COUSIN THE (CALIFORNIAN WILL AS->CALIFORNIA WALLA) STRENUOUSLY (INSIST THAT->INSISTS AND) IT IS BETTER BUT OF COURSE SOMETHING MUST ALWAYS BE ALLOWED FOR THE PATRIOTISM OF THE (CALIFORNIAN->CALIFORNI) +367-130732-0004-1470: A BOOK COULD BE WRITTEN ABOUT THIS RESTAURANT AND THEN ALL WOULD NOT BE TOLD FOR ALL ITS SECRETS CAN NEVER BE KNOWN +367-130732-0005-1471: IT WAS HERE THAT MOST MAGNIFICENT DINNERS WERE ARRANGED IT WAS HERE THAT EXTRAORDINARY DISHES WERE (CONCOCTED->CALLED COCTED) BY (CHEFS->CHEFTS) OF (WORLD WIDE->WOOLWRIGHT) FAME IT WAS HERE THAT LOBSTER (A LA NEWBERG->ALENUBERG) REACHED ITS HIGHEST PERFECTION AND THIS IS THE RECIPE THAT WAS (FOLLOWED->FOLLOW) WHEN IT WAS PREPARED IN THE (DELMONICO->DEMONICO) +367-130732-0006-1472: LOBSTER (A LA->OLLA) NEWBERG +367-130732-0007-1473: ONE POUND OF (LOBSTER->LOBS TO) MEAT ONE TEASPOONFUL OF BUTTER ONE HALF PINT OF CREAM YOLKS OF FOUR EGGS ONE WINE GLASS OF SHERRY LOBSTER FAT +367-130732-0008-1474: PUT THIS IN A DOUBLE (BOILER->WHIRLER) AND LET COOK UNTIL THICK STIRRING CONSTANTLY +367-130732-0009-1475: SERVE IN A CHAFING DISH WITH (THIN SLICES->FLITTON SIZES) OF DRY TOAST +367-130732-0010-1476: KING OF (SHELL FISH->SHELLFISH) +367-130732-0011-1477: ONE HAS TO COME TO (SAN->SENT) FRANCISCO TO PARTAKE OF THE KING OF (SHELL FISH->SHELLFISH) THE MAMMOTH PACIFIC CRAB +367-130732-0012-1478: I SAY COME TO SAN FRANCISCO ADVISEDLY FOR WHILE THE CRAB IS FOUND ALL ALONG THE COAST IT IS PREPARED NOWHERE SO DELICIOUSLY AS IN SAN FRANCISCO +367-130732-0013-1479: (GOBEY'S PASSED->GOBY'S PASS) WITH THE FIRE AND THE LITTLE RESTAURANT BEARING HIS NAME AND IN CHARGE OF HIS WIDOW IN UNION SQUARE AVENUE HAS NOT ATTAINED THE FAME OF THE OLD PLACE +367-130732-0014-1480: IT IS POSSIBLE THAT SHE KNOWS THE SECRET OF PREPARING CRAB AS IT WAS PREPARED IN THE (GOBEY'S->GOBIES) OF BEFORE THE FIRE BUT HIS (PRESTIGE->BESIEGE) DID NOT DESCEND TO HER +367-130732-0015-1481: (GOBEY'S CRAB STEW->GOBIAS CRABS DO) +367-130732-0016-1482: TAKE THE MEAT OF ONE LARGE CRAB SCRAPING OUT ALL (OF->*) THE (FAT->BAT) FROM THE SHELL +367-130732-0017-1483: SOAK THE CRAB (MEAT->ME) IN THE SHERRY TWO HOURS BEFORE COOKING +367-130732-0018-1484: CHOP FINE THE ONION (SWEET->SWEEP) PEPPER (AND TOMATO->INTO METAL) WITH THE ROSEMARY +367-130732-0019-1485: (HEAT THIS->HEATLESS) IN A (STEWPAN->STEWPENT) AND WHEN SIMMERING ADD THE SHERRY AND CRAB (MEAT->ME) AND LET ALL COOK TOGETHER WITH (A->THE) SLOW FIRE FOR EIGHT MINUTES +367-130732-0020-1486: SERVE IN (A->THE) CHAFING DISH WITH TOASTED CRACKERS OR THIN SLICES OF TOASTED BREAD +367-130732-0021-1487: LOBSTER (IN->AND) MINIATURE +367-130732-0022-1488: SO FAR IT HAS BEEN USED MOSTLY FOR GARNISHMENT OF OTHER DISHES AND IT IS ONLY RECENTLY THAT THE (HOF BRAU->WHOLE BROW) HAS BEEN MAKING (A SPECIALTY->ESPECIALTY) OF THEM +367-130732-0023-1489: ALL (OF->*) THE BETTER CLASS RESTAURANTS HOWEVER WILL SERVE THEM IF YOU ORDER THEM +367-130732-0024-1490: THIS IS THE RECIPE FOR EIGHT PEOPLE AND IT IS WELL (*->IT) WORTH TRYING IF YOU ARE GIVING A DINNER OF IMPORTANCE +367-130732-0025-1491: (BISQUE->DISK) OF CRAWFISH +367-130732-0026-1492: TAKE THIRTY CRAWFISH FROM WHICH REMOVE THE GUT CONTAINING THE GALL IN THE FOLLOWING MANNER TAKE FIRM HOLD OF THE (CRAWFISH->CISH) WITH THE LEFT HAND SO AS TO AVOID BEING PINCHED BY ITS (CLAWS->CLOTH) WITH THE THUMB AND FOREFINGER OF THE RIGHT HAND PINCH THE EXTREME END OF THE CENTRAL FIN OF THE TAIL AND WITH A SUDDEN JERK THE GUT WILL BE WITHDRAWN +367-130732-0027-1493: (MINCE OR->MINSER) CUT INTO SMALL DICE A CARROT (AN->AND) ONION ONE HEAD OF CELERY AND A FEW PARSLEY ROOTS AND TO THESE (ADD->AT) A BAY LEAF A SPRIG OF THYME A LITTLE (MINIONETTE->MINOR NUT) PEPPER AND TWO (OUNCES->OUNCE) OF BUTTER +367-130732-0028-1494: PUT THESE INGREDIENTS INTO A STEWPAN AND FRY THEM TEN MINUTES THEN THROW IN THE (CRAWFISH->CROPPISH) AND POUR ON THEM HALF A BOTTLE OF FRENCH WHITE WINE +367-130732-0029-1495: ALLOW (THIS->US) TO BOIL AND THEN ADD A QUART OF STRONG (CONSOMME->CONSUM) AND LET ALL CONTINUE BOILING FOR HALF AN HOUR +367-130732-0030-1496: PICK OUT THE CRAWFISH AND STRAIN THE BROTH THROUGH A NAPKIN BY PRESSURE INTO A BASIN IN ORDER TO EXTRACT ALL THE ESSENCE FROM THE VEGETABLES +367-130732-0031-1497: PICK THE SHELLS (OFF->OF) TWENTY FIVE OF THE CRAWFISH TAILS TRIM THEM NEATLY AND SET THEM ASIDE UNTIL (WANTED->WANTON) +367-130732-0032-1498: RESERVE SOME OF THE SPAWN ALSO (HALF OF->HAPPEN) THE BODY SHELLS WITH WHICH TO MAKE THE (CRAWFISH->COFFISH) BUTTER TO FINISH THE SOUP +367-130732-0033-1499: THIS BUTTER IS MADE AS FOLLOWS PLACE THE SHELLS (ON->IN) A BAKING SHEET IN THE OVEN TO DRY LET THE SHELLS COOL AND THEN POUND THEM IN A MORTAR WITH A LITTLE LOBSTER (CORAL->COAL) AND FOUR OUNCES OF FRESH BUTTER THOROUGHLY BRUISING THE WHOLE TOGETHER SO AS TO MAKE A FINE PASTE +367-293981-0000-1445: I SWEAR (IT->*) ANSWERED SANCHO +367-293981-0001-1446: I SAY SO CONTINUED DON QUIXOTE BECAUSE I HATE TAKING AWAY (ANYONE'S->ANY ONE'S) GOOD NAME +367-293981-0002-1447: I SAY REPLIED SANCHO THAT I SWEAR TO HOLD MY TONGUE ABOUT IT TILL THE END OF YOUR (WORSHIP'S DAYS->WORSHIP STAYS) AND (GOD->GONE) GRANT I MAY BE ABLE TO LET IT OUT (TOMORROW->TO MORROW) +367-293981-0003-1448: THOUGH YOUR WORSHIP WAS NOT SO BADLY OFF HAVING IN YOUR ARMS (THAT INCOMPARABLE->THE INN COMPARABLE) BEAUTY YOU SPOKE OF BUT I WHAT DID I HAVE EXCEPT THE HEAVIEST (WHACKS->WAX THAT) I THINK I HAD IN ALL MY LIFE +367-293981-0004-1449: UNLUCKY ME AND THE MOTHER THAT BORE ME +367-293981-0005-1450: DIDN'T I SAY SO WORSE LUCK TO MY LINE SAID SANCHO +367-293981-0006-1451: IT CANNOT BE THE (MOOR->MORE) ANSWERED DON QUIXOTE FOR THOSE UNDER ENCHANTMENT DO NOT LET THEMSELVES BE SEEN BY ANYONE +367-293981-0007-1452: IF THEY (DON'T->DO NOT) LET THEMSELVES BE SEEN THEY LET THEMSELVES BE FELT SAID SANCHO IF NOT LET MY (SHOULDERS->SHOULDER) SPEAK TO THE POINT +367-293981-0008-1453: (MINE COULD->MIKE HAD) SPEAK TOO SAID DON QUIXOTE BUT THAT IS NOT A (SUFFICIENT->SUSPICIENT) REASON FOR BELIEVING THAT WHAT WE SEE IS THE ENCHANTED MOOR +367-293981-0009-1454: THE (OFFICER->OFFICERS) TURNED (TO HIM->ROOM) AND SAID WELL HOW GOES (IT->A) GOOD MAN +367-293981-0010-1455: SANCHO GOT UP WITH PAIN ENOUGH IN HIS BONES AND WENT (AFTER->OUT TO) THE INNKEEPER IN THE DARK (AND->IN) MEETING THE OFFICER WHO WAS LOOKING TO SEE WHAT HAD BECOME OF HIS ENEMY HE SAID TO HIM (SENOR->SIGNOR) WHOEVER YOU ARE DO US THE FAVOUR AND KINDNESS TO GIVE US A LITTLE ROSEMARY OIL SALT AND (WINE->WHITE) FOR IT IS (WANTED->WATER) TO CURE ONE OF (THE->OUR) BEST KNIGHTS ERRANT ON EARTH WHO LIES ON YONDER BED WOUNDED BY THE HANDS OF THE ENCHANTED MOOR THAT IS IN THIS INN +367-293981-0011-1456: TO BE BRIEF HE TOOK THE (MATERIALS->MATERIORS) OF WHICH HE MADE A COMPOUND MIXING THEM ALL (AND->*) BOILING THEM A GOOD WHILE (*->IT) UNTIL IT SEEMED TO HIM THEY HAD COME TO PERFECTION +367-293981-0012-1457: SANCHO PANZA WHO ALSO REGARDED THE AMENDMENT OF HIS MASTER AS MIRACULOUS BEGGED HIM TO GIVE HIM WHAT WAS LEFT IN THE (PIGSKIN->PICTION) WHICH WAS NO SMALL QUANTITY +367-293981-0013-1458: DON QUIXOTE CONSENTED AND HE TAKING IT WITH BOTH HANDS IN GOOD FAITH AND WITH A BETTER WILL GULPED (*->IT) DOWN AND DRAINED OFF VERY LITTLE LESS THAN HIS MASTER +367-293981-0014-1459: IF YOUR WORSHIP KNEW THAT RETURNED SANCHO WOE BETIDE ME (AND->IN) ALL MY KINDRED WHY DID YOU LET ME TASTE (IT->HIM) +367-293981-0015-1460: SEARCH YOUR MEMORY AND IF YOU FIND ANYTHING OF THIS KIND YOU NEED ONLY TELL ME OF IT AND I PROMISE YOU BY THE ORDER OF KNIGHTHOOD WHICH I HAVE RECEIVED TO PROCURE YOU SATISFACTION (AND->IN) REPARATION TO THE UTMOST OF YOUR DESIRE +367-293981-0016-1461: THEN THIS IS AN (INN->IN) SAID DON QUIXOTE +367-293981-0017-1462: (AND->IN) A VERY RESPECTABLE ONE SAID THE INNKEEPER +367-293981-0018-1463: THE CRIES OF THE POOR (BLANKETED->BLANKET) WRETCH WERE SO LOUD THAT THEY REACHED THE EARS OF HIS MASTER WHO HALTING TO LISTEN ATTENTIVELY WAS PERSUADED THAT SOME NEW ADVENTURE WAS COMING UNTIL HE CLEARLY PERCEIVED THAT IT WAS HIS SQUIRE WHO UTTERED THEM +367-293981-0019-1464: HE SAW HIM RISING AND FALLING IN THE AIR WITH SUCH GRACE AND NIMBLENESS THAT HAD HIS RAGE ALLOWED HIM IT IS MY BELIEF HE WOULD HAVE LAUGHED +367-293981-0020-1465: SANCHO TOOK IT AND AS HE WAS RAISING IT TO HIS MOUTH HE WAS STOPPED BY THE CRIES OF HIS MASTER EXCLAIMING SANCHO MY SON DRINK NOT WATER DRINK IT (NOT->OUT) MY SON FOR IT WILL KILL THEE SEE HERE I (HAVE->HAD) THE BLESSED BALSAM AND HE HELD UP THE FLASK OF LIQUOR AND WITH DRINKING TWO DROPS (OF IT->WHAT) THOU WILT CERTAINLY BE RESTORED +3764-168670-0000-1666: THE STRIDES OF A LAME MAN (ARE->*) LIKE THE OGLING GLANCES OF A ONE EYED MAN THEY DO NOT REACH THEIR GOAL VERY PROMPTLY +3764-168670-0001-1667: COSETTE HAD WAKED UP +3764-168670-0002-1668: JEAN VALJEAN HAD PLACED HER NEAR THE FIRE +3764-168670-0003-1669: YOU WILL WAIT FOR ME AT A LADY'S HOUSE I SHALL COME TO FETCH YOU +3764-168670-0004-1670: EVERYTHING IS ARRANGED AND NOTHING IS SAID FAUCHELEVENT +3764-168670-0005-1671: I HAVE PERMISSION TO BRING YOU IN BUT BEFORE BRINGING YOU IN YOU MUST BE GOT OUT +3764-168670-0006-1672: THAT'S WHERE THE DIFFICULTY LIES +3764-168670-0007-1673: IT IS EASY ENOUGH WITH THE CHILD YOU WILL CARRY HER OUT +3764-168670-0008-1674: AND SHE WILL HOLD HER TONGUE I ANSWER FOR THAT +3764-168670-0009-1675: (FAUCHELEVENT->FOR SCHLEVENT) GRUMBLED MORE TO HIMSELF THAN TO JEAN VALJEAN +3764-168670-0010-1676: YOU UNDERSTAND FATHER (MADELEINE->MADELEIN) THE GOVERNMENT WILL NOTICE IT +3764-168670-0011-1677: JEAN VALJEAN STARED HIM STRAIGHT IN THE EYE AND THOUGHT THAT HE WAS RAVING +3764-168670-0012-1678: (FAUCHELEVENT->FOUCHELEVENT) WENT ON +3764-168670-0013-1679: IT IS TO MORROW THAT I AM TO BRING YOU IN THE (PRIORESS->PRIORS) EXPECTS YOU +3764-168670-0014-1680: THEN HE EXPLAINED TO JEAN VALJEAN THAT THIS WAS HIS RECOMPENSE FOR A SERVICE WHICH HE (FAUCHELEVENT->THRAUCHELEVENT) WAS (TO RENDER->SURRENDER) TO THE COMMUNITY +3764-168670-0015-1681: THAT THE NUN WHO HAD DIED THAT MORNING HAD REQUESTED TO BE BURIED IN THE COFFIN WHICH HAD SERVED HER FOR A BED AND INTERRED IN THE VAULT UNDER THE ALTAR OF THE CHAPEL +3764-168670-0016-1682: THAT THE (PRIORESS->PRIORS) AND THE VOCAL MOTHERS INTENDED TO FULFIL THE WISH OF THE DECEASED +3764-168670-0017-1683: THAT HE (FAUCHELEVENT->FOR SCHLEVENT) WAS TO NAIL UP THE COFFIN IN THE CELL RAISE THE STONE IN THE CHAPEL AND (LOWER->BLOW) THE CORPSE INTO THE VAULT +3764-168670-0018-1684: AND THEN THAT THERE WAS ANOTHER THE EMPTY COFFIN +3764-168670-0019-1685: WHAT IS THAT EMPTY COFFIN +3764-168670-0020-1686: ASKED JEAN VALJEAN FAUCHELEVENT REPLIED +3764-168670-0021-1687: WHAT COFFIN WHAT ADMINISTRATION +3764-168670-0022-1688: (FAUCHELEVENT->SO SLAVENT) WHO WAS SEATED SPRANG UP AS THOUGH A BOMB HAD BURST UNDER HIS CHAIR YOU +3764-168670-0023-1689: YOU KNOW (FAUCHELEVENT->FOURCHELEVENT) WHAT YOU HAVE SAID MOTHER CRUCIFIXION IS DEAD +3764-168670-0024-1690: AND I ADD AND FATHER (MADELEINE->MADELEIN) IS BURIED AH +3764-168670-0025-1691: YOU ARE NOT LIKE OTHER MEN FATHER MADELEINE +3764-168670-0026-1692: THIS OFFERS THE MEANS BUT GIVE ME SOME INFORMATION IN THE FIRST PLACE +3764-168670-0027-1693: HOW LONG IS THE COFFIN SIX FEET +3764-168670-0028-1694: IT IS A CHAMBER ON THE GROUND FLOOR WHICH HAS A GRATED WINDOW OPENING ON THE GARDEN WHICH IS CLOSED ON THE OUTSIDE BY A SHUTTER AND TWO DOORS ONE LEADS INTO THE CONVENT THE OTHER INTO THE CHURCH (WHAT CHURCH->A WATCH) +3764-168670-0029-1695: THE CHURCH IN THE STREET (*->AT) THE CHURCH WHICH ANY ONE CAN ENTER +3764-168670-0030-1696: HAVE YOU THE KEYS TO THOSE TWO DOORS +3764-168670-0031-1697: NO I HAVE THE KEY TO THE DOOR WHICH COMMUNICATES WITH THE CONVENT THE PORTER HAS THE KEY TO THE DOOR WHICH COMMUNICATES WITH THE CHURCH +3764-168670-0032-1698: ONLY TO ALLOW THE UNDERTAKER'S MEN TO ENTER WHEN THEY COME TO GET THE COFFIN +3764-168670-0033-1699: WHO NAILS UP THE COFFIN I DO +3764-168670-0034-1700: WHO SPREADS THE (PALL->POOL) OVER IT +3764-168670-0035-1701: NOT ANOTHER MAN EXCEPT THE POLICE DOCTOR CAN ENTER THE (DEAD ROOM->DEADROOM) THAT IS EVEN WRITTEN ON THE WALL +3764-168670-0036-1702: COULD YOU HIDE ME IN THAT ROOM TO NIGHT WHEN EVERY ONE IS ASLEEP +3764-168670-0037-1703: ABOUT THREE O'CLOCK IN THE AFTERNOON +3764-168670-0038-1704: I SHALL BE HUNGRY I WILL BRING YOU SOMETHING +3764-168670-0039-1705: YOU CAN COME AND NAIL ME UP IN THE COFFIN AT TWO O'CLOCK +3764-168670-0040-1706: (FAUCHELEVENT->FUCHELEVENT) RECOILED AND CRACKED HIS FINGER JOINTS BUT THAT IS IMPOSSIBLE +3764-168670-0041-1707: BAH IMPOSSIBLE TO TAKE A HAMMER AND DRIVE SOME NAILS IN A PLANK +3764-168670-0042-1708: JEAN VALJEAN HAD BEEN IN WORSE STRAITS THAN THIS +3764-168670-0043-1709: ANY MAN WHO HAS BEEN A PRISONER UNDERSTANDS HOW TO CONTRACT HIMSELF TO FIT THE DIAMETER OF THE ESCAPE +3764-168670-0044-1710: WHAT DOES NOT A MAN UNDERGO FOR THE SAKE OF A CURE +3764-168670-0045-1711: TO HAVE HIMSELF NAILED UP IN A CASE AND CARRIED OFF LIKE A (BALE->BAIL) OF GOODS TO LIVE FOR A LONG TIME IN A BOX TO FIND AIR WHERE THERE IS NONE TO ECONOMIZE HIS BREATH FOR HOURS TO KNOW HOW TO STIFLE WITHOUT DYING THIS WAS ONE OF JEAN VALJEAN'S GLOOMY TALENTS +3764-168670-0046-1712: YOU SURELY MUST HAVE A GIMLET YOU WILL MAKE A FEW HOLES HERE AND THERE AROUND MY MOUTH AND YOU WILL NAIL THE TOP (PLANK ON->PLANCORN) LOOSELY GOOD AND WHAT IF YOU SHOULD HAPPEN TO COUGH OR TO SNEEZE +3764-168670-0047-1713: A MAN WHO IS MAKING HIS ESCAPE DOES NOT COUGH OR SNEEZE +3764-168670-0048-1714: WHO IS THERE WHO HAS NOT SAID TO A CAT DO COME IN +3764-168670-0049-1715: THE OVER PRUDENT (CATS->COUNTS) AS THEY ARE AND BECAUSE THEY ARE CATS SOMETIMES INCUR MORE DANGER THAN THE AUDACIOUS +3764-168670-0050-1716: BUT JEAN VALJEAN'S COOLNESS PREVAILED OVER HIM IN SPITE OF HIMSELF HE GRUMBLED +3764-168670-0051-1717: IF YOU ARE SURE OF COMING OUT OF THE COFFIN ALL RIGHT I AM SURE OF GETTING (YOU->*) OUT OF THE GRAVE +3764-168670-0052-1718: AN OLD FELLOW OF THE OLD SCHOOL THE GRAVE DIGGER PUTS THE CORPSES IN THE GRAVE AND I PUT THE GRAVE DIGGER IN MY POCKET +3764-168670-0053-1719: I SHALL FOLLOW THAT IS MY BUSINESS +3764-168670-0054-1720: THE (HEARSE->HOUSE) HALTS THE (UNDERTAKER'S->UNDERTAKERS) MEN (KNOT->NOT) A ROPE AROUND YOUR COFFIN AND LOWER YOU DOWN +3764-168670-0055-1721: THE (PRIEST SAYS->PRIESTS AS) THE PRAYERS MAKES THE SIGN OF THE CROSS SPRINKLES THE HOLY WATER AND TAKES HIS DEPARTURE +3764-168670-0056-1722: ONE OF TWO THINGS WILL HAPPEN HE WILL EITHER BE SOBER OR HE WILL NOT BE SOBER +3764-168670-0057-1723: THAT IS SETTLED FATHER FAUCHELEVENT ALL WILL GO WELL +3764-168671-0000-1724: ON THE FOLLOWING DAY AS THE SUN WAS DECLINING THE VERY RARE (PASSERS->PASSES) BY ON THE BOULEVARD (DU MAINE->DUMEN) PULLED OFF THEIR HATS TO AN OLD FASHIONED HEARSE ORNAMENTED WITH SKULLS CROSS BONES AND TEARS +3764-168671-0001-1725: THIS HEARSE CONTAINED A COFFIN COVERED WITH A WHITE CLOTH OVER WHICH SPREAD A LARGE BLACK CROSS LIKE A HUGE CORPSE WITH DROOPING ARMS +3764-168671-0002-1726: (A MOURNING->THE MORNING) COACH IN WHICH COULD BE SEEN A PRIEST IN HIS SURPLICE AND A CHOIR BOY IN HIS RED CAP FOLLOWED +3764-168671-0003-1727: BEHIND IT CAME AN OLD MAN IN THE GARMENTS OF A LABORER WHO LIMPED ALONG +3764-168671-0004-1728: THE GRAVE DIGGERS BEING THUS BOUND TO SERVICE IN THE EVENING IN SUMMER AND AT NIGHT IN WINTER IN THIS CEMETERY THEY WERE SUBJECTED TO A SPECIAL DISCIPLINE +3764-168671-0005-1729: THESE GATES THEREFORE SWUNG INEXORABLY ON THEIR HINGES AT THE INSTANT WHEN THE SUN DISAPPEARED BEHIND THE DOME OF THE (INVALIDES->INVALID) +3764-168671-0006-1730: DAMPNESS WAS INVADING IT THE FLOWERS WERE DESERTING IT +3764-168671-0007-1731: THE BOURGEOIS DID NOT CARE MUCH ABOUT BEING BURIED IN THE (VAUGIRARD->VIGOR) IT HINTED AT POVERTY (PERE LACHAISE->BAT LACHES) IF YOU PLEASE +3764-168671-0008-1732: TO BE BURIED IN (PERE LACHAISE->PETERS) IS EQUIVALENT TO HAVING FURNITURE OF MAHOGANY IT IS RECOGNIZED AS ELEGANT +3764-168671-0009-1733: THE INTERMENT OF MOTHER CRUCIFIXION IN THE VAULT UNDER THE ALTAR THE EXIT OF COSETTE THE INTRODUCTION OF JEAN VALJEAN (TO->INTO) THE DEAD ROOM ALL HAD BEEN EXECUTED WITHOUT DIFFICULTY AND THERE HAD BEEN NO HITCH LET US REMARK IN PASSING THAT THE BURIAL OF MOTHER CRUCIFIXION UNDER THE ALTAR OF THE CONVENT IS A PERFECTLY VENIAL OFFENCE IN OUR SIGHT +3764-168671-0010-1734: IT IS ONE OF THE FAULTS WHICH RESEMBLE A DUTY +3764-168671-0011-1735: THE NUNS HAD COMMITTED IT NOT ONLY WITHOUT DIFFICULTY BUT EVEN WITH THE APPLAUSE OF THEIR OWN CONSCIENCES +3764-168671-0012-1736: IN THE CLOISTER WHAT IS CALLED THE GOVERNMENT IS ONLY AN INTERMEDDLING WITH AUTHORITY AN INTERFERENCE WHICH IS ALWAYS QUESTIONABLE +3764-168671-0013-1737: MAKE AS MANY LAWS AS YOU PLEASE MEN BUT KEEP THEM FOR YOURSELVES +3764-168671-0014-1738: A PRINCE IS NOTHING IN THE PRESENCE OF A PRINCIPLE +3764-168671-0015-1739: (FAUCHELEVENT->FLUCHELEVENT) LIMPED ALONG BEHIND THE HEARSE IN A VERY CONTENTED FRAME OF MIND +3764-168671-0016-1740: JEAN VALJEAN'S COMPOSURE WAS ONE OF THOSE POWERFUL TRANQUILLITIES WHICH ARE CONTAGIOUS +3764-168671-0017-1741: WHAT REMAINED TO BE DONE WAS A MERE NOTHING +3764-168671-0018-1742: HE PLAYED WITH FATHER (MESTIENNE->MESTINE) +3764-168671-0019-1743: HE DID WHAT HE LIKED WITH HIM HE MADE HIM DANCE ACCORDING TO HIS WHIM +3764-168671-0020-1744: THE PERMISSION FOR INTERMENT MUST BE EXHIBITED +3764-168671-0021-1745: HE WAS A SORT OF LABORING MAN WHO WORE A WAISTCOAT WITH LARGE POCKETS AND CARRIED A MATTOCK UNDER HIS ARM +3764-168671-0022-1746: THE MAN REPLIED THE GRAVE DIGGER +3764-168671-0023-1747: THE GRAVE DIGGER YES +3764-168671-0024-1748: YOU I +3764-168671-0025-1749: FATHER (MESTIENNE->MISS CHANN) IS THE GRAVE DIGGER HE WAS +3764-168671-0026-1750: (FAUCHELEVENT->FUSSION OF WHAT) HAD EXPECTED ANYTHING BUT THIS THAT A GRAVE DIGGER COULD DIE +3764-168671-0027-1751: IT IS TRUE NEVERTHELESS THAT GRAVE DIGGERS DO DIE THEMSELVES +3764-168671-0028-1752: HE HAD HARDLY THE STRENGTH TO STAMMER +3764-168671-0029-1753: BUT HE PERSISTED FEEBLY FATHER (MESTIENNE->MISSED HERE) IS THE GRAVE DIGGER +3764-168671-0030-1754: DO YOU KNOW WHO LITTLE (FATHER LENOIR IS->FATHERLAND WARRITZ) HE IS A JUG OF RED WINE +3764-168671-0031-1755: BUT (YOU ARE->YOU'RE) A JOLLY FELLOW TOO +3764-168671-0032-1756: ARE YOU NOT COMRADE WE'LL GO AND HAVE A DRINK TOGETHER PRESENTLY +3764-168671-0033-1757: THE MAN REPLIED +3764-168671-0034-1758: HE LIMPED MORE OUT OF ANXIETY THAN FROM INFIRMITY +3764-168671-0035-1759: THE GRAVE DIGGER WALKED ON IN FRONT OF HIM +3764-168671-0036-1760: FAUCHELEVENT PASSED THE UNEXPECTED (GRIBIER->CLAVIER) ONCE MORE IN REVIEW +3764-168671-0037-1761: (FAUCHELEVENT->FASHIONEVENT) WHO WAS ILLITERATE BUT VERY SHARP UNDERSTOOD THAT HE HAD TO DEAL WITH A FORMIDABLE SPECIES OF MAN WITH A FINE TALKER HE MUTTERED +3764-168671-0038-1762: (SO->MISS OH) FATHER (MESTIENNE->MESS TIEN) IS DEAD +3764-168671-0039-1763: THE MAN REPLIED COMPLETELY +3764-168671-0040-1764: THE GOOD GOD CONSULTED HIS (NOTE BOOK->NOTEBOOK) WHICH (SHOWS->SHARES) WHEN THE TIME IS UP IT WAS FATHER (MESTIENNE'S->MESTINE'S) TURN FATHER (MESTIENNE->MESS HE HAD) DIED +3764-168671-0041-1765: STAMMERED FAUCHELEVENT IT IS MADE +3764-168671-0042-1766: YOU ARE A PEASANT I AM A PARISIAN +3764-168671-0043-1767: (FAUCHELEVENT->FRESHEN) THOUGHT I AM LOST +3764-168671-0044-1768: THEY WERE ONLY A FEW TURNS OF THE WHEEL DISTANT FROM THE SMALL ALLEY LEADING TO THE (NUNS->NUN'S) CORNER +3764-168671-0045-1769: AND HE ADDED WITH THE SATISFACTION OF A SERIOUS MAN WHO IS TURNING A PHRASE WELL +3764-168671-0046-1770: FORTUNATELY THE SOIL WHICH WAS LIGHT AND WET WITH THE WINTER RAINS CLOGGED THE WHEELS AND RETARDED ITS SPEED +3764-168671-0047-1771: MY FATHER WAS A PORTER AT THE (PRYTANEUM->BRITTANNIUM) TOWN HALL +3764-168671-0048-1772: BUT HE HAD REVERSES HE HAD LOSSES (ON CHANGE->UNCHANGED) I WAS OBLIGED TO RENOUNCE THE PROFESSION OF AUTHOR BUT I AM STILL A PUBLIC WRITER +3764-168671-0049-1773: (SO->SIR) YOU ARE NOT A GRAVE DIGGER THEN +3764-168671-0050-1774: RETURNED FAUCHELEVENT CLUTCHING AT THIS BRANCH FEEBLE AS IT WAS +3764-168671-0051-1775: HERE A REMARK BECOMES NECESSARY +3764-168671-0052-1776: (FAUCHELEVENT->A FISHE) WHATEVER HIS ANGUISH OFFERED A DRINK BUT HE DID NOT EXPLAIN HIMSELF ON ONE POINT WHO WAS TO PAY +3764-168671-0053-1777: THE GRAVE DIGGER WENT ON WITH (A->THE) SUPERIOR SMILE +3764-168671-0054-1778: ONE MUST EAT +3997-180294-0000-1800: THE DUKE COMES EVERY MORNING THEY WILL TELL HIM WHEN HE COMES THAT I AM ASLEEP AND PERHAPS HE WILL WAIT UNTIL I (WAKE->AWAKE) +3997-180294-0001-1801: YES BUT IF I SHOULD ALREADY ASK FOR SOMETHING WHAT +3997-180294-0002-1802: WELL DO IT FOR ME FOR I SWEAR TO YOU (THAT I->THY) DON'T LOVE YOU AS THE OTHERS HAVE LOVED YOU +3997-180294-0003-1803: THERE ARE BOLTS (ON->IN) THE DOOR WRETCH +3997-180294-0004-1804: I DON'T KNOW HOW IT IS BUT IT SEEMS TO ME AS IF I DO +3997-180294-0005-1805: (NOW->THOU) GO I CAN'T KEEP MY EYES OPEN +3997-180294-0006-1806: IT (SEEMED->SEEMS) TO ME AS IF THIS SLEEPING CITY (BELONGED->BELONGS) TO ME I SEARCHED MY MEMORY FOR THE NAMES OF THOSE WHOSE HAPPINESS I HAD ONCE ENVIED AND I COULD NOT RECALL ONE WITHOUT FINDING MYSELF THE HAPPIER +3997-180294-0007-1807: EDUCATION FAMILY FEELING THE SENSE OF DUTY THE FAMILY ARE STRONG SENTINELS BUT THERE ARE NO SENTINELS SO VIGILANT AS NOT TO BE DECEIVED BY A GIRL OF SIXTEEN TO WHOM NATURE BY THE VOICE OF THE MAN SHE LOVES GIVES THE FIRST (COUNSELS->COUNCIL) OF LOVE ALL THE MORE (ARDENT->ARDENTS) BECAUSE THEY SEEM SO PURE +3997-180294-0008-1808: THE MORE (A->*) GIRL BELIEVES IN GOODNESS THE MORE (EASILY->IS WE) WILL SHE GIVE WAY IF NOT TO HER LOVER AT LEAST TO LOVE FOR BEING WITHOUT MISTRUST SHE IS WITHOUT FORCE AND TO WIN HER LOVE (IS->AS) A TRIUMPH THAT CAN BE GAINED BY ANY YOUNG MAN OF FIVE AND TWENTY SEE HOW YOUNG GIRLS ARE WATCHED AND GUARDED +3997-180294-0009-1809: THEN HOW SURELY MUST THEY DESIRE THE WORLD WHICH IS HIDDEN FROM THEM (HOW SURELY->HOSTUALLY) MUST THEY FIND IT TEMPTING HOW SURELY MUST THEY LISTEN TO THE FIRST VOICE WHICH COMES TO TELL ITS SECRETS THROUGH THEIR BARS AND BLESS THE HAND WHICH IS THE FIRST TO RAISE A CORNER OF THE (MYSTERIOUS->MYSTERY) VEIL +3997-180294-0010-1810: WITH THEM THE BODY HAS WORN OUT THE SOUL THE SENSES (HAVE->HALF) BURNED UP THE HEART DISSIPATION HAS BLUNTED THE FEELINGS +3997-180294-0011-1811: THEY LOVE BY PROFESSION AND NOT BY INSTINCT +3997-180294-0012-1812: WHEN A CREATURE WHO HAS ALL HER PAST TO REPROACH HERSELF WITH IS TAKEN ALL AT ONCE BY A PROFOUND SINCERE IRRESISTIBLE LOVE OF WHICH SHE HAD NEVER FELT HERSELF CAPABLE WHEN SHE HAS CONFESSED HER LOVE HOW ABSOLUTELY THE MAN WHOM SHE LOVES DOMINATES HER +3997-180294-0013-1813: THEY KNOW NOT WHAT PROOF TO GIVE +3997-180294-0014-1814: IN ORDER TO DISTURB THE (LABOURERS->LABORERS) IN THE (FIELD->FIELDS) WAS ONE DAY DEVOURED BY A WOLF BECAUSE THOSE WHOM HE HAD SO OFTEN DECEIVED NO LONGER BELIEVED IN HIS CRIES FOR HELP +3997-180294-0015-1815: (IT->THIS) IS THE SAME WITH THESE UNHAPPY WOMEN WHEN THEY LOVE SERIOUSLY +3997-180294-0016-1816: BUT WHEN THE MAN WHO INSPIRES THIS REDEEMING LOVE IS GREAT ENOUGH IN SOUL TO RECEIVE IT WITHOUT REMEMBERING THE PAST WHEN HE GIVES HIMSELF UP TO IT WHEN IN SHORT HE LOVES AS HE IS LOVED THIS MAN (DRAINS->DREAMS) AT ONE (DRAUGHT->DROUGHT) ALL EARTHLY EMOTIONS AND AFTER SUCH A LOVE HIS HEART WILL BE CLOSED TO EVERY OTHER +3997-180294-0017-1817: BUT TO RETURN TO THE FIRST DAY OF MY (LIAISON->LEAR SONG) +3997-180294-0018-1818: WHEN I REACHED HOME I WAS IN A STATE OF MAD GAIETY +3997-180294-0019-1819: THE WOMAN BECOMES THE MAN'S MISTRESS AND LOVES HIM +3997-180294-0020-1820: HOW WHY +3997-180294-0021-1821: MY WHOLE BEING WAS EXALTED INTO JOY AT THE MEMORY OF THE WORDS WE HAD EXCHANGED DURING THAT FIRST NIGHT +3997-180294-0022-1822: HERE ARE MY ORDERS TO NIGHT AT (THE->A) VAUDEVILLE +3997-180294-0023-1823: COME DURING THE THIRD (ENTR'ACTE->AND TRACT) +3997-180294-0024-1824: THE BOXES FILLED ONE AFTER ANOTHER +3997-180294-0025-1825: ONLY ONE (REMAINED->REMAINS) EMPTY THE STAGE BOX +3997-180294-0026-1826: AT THE BEGINNING OF THE THIRD ACT I HEARD THE DOOR OF THE BOX ON WHICH MY EYES HAD BEEN ALMOST CONSTANTLY FIXED OPEN AND MARGUERITE APPEARED +3997-180294-0027-1827: (DID->THAT) SHE (LOVE->LOVED) ME ENOUGH TO BELIEVE THAT THE MORE BEAUTIFUL SHE (LOOKED->LOOKS) THE HAPPIER I SHOULD BE +3997-180294-0028-1828: WHAT IS THE MATTER WITH YOU TO NIGHT SAID MARGUERITE RISING AND COMING TO THE BACK OF THE BOX AND KISSING ME ON THE FOREHEAD +3997-180294-0029-1829: YOU SHOULD GO TO BED SHE REPLIED WITH THAT (IRONICAL AIR->IRONIC ERROR) WHICH WENT SO WELL WITH HER DELICATE AND WITTY FACE +3997-180294-0030-1830: WHERE AT HOME +3997-180294-0031-1831: YOU STILL LOVE ME CAN YOU ASK +3997-180294-0032-1832: BECAUSE YOU DON'T LIKE SEEING HIM +3997-180294-0033-1833: (NONETHELESS->NONE THE LESS) I WAS VERY UNHAPPY ALL THE REST OF THE EVENING AND WENT AWAY VERY SADLY AFTER HAVING SEEN PRUDENCE THE COUNT AND MARGUERITE (GET INTO->GAINED TO) THE CARRIAGE WHICH WAS (WAITING->WINNING) FOR THEM AT THE DOOR +3997-180297-0000-1834: I HAVE NOT COME TO HINDER YOU FROM LEAVING PARIS +3997-180297-0001-1835: YOU IN THE WAY MARGUERITE BUT HOW +3997-180297-0002-1836: WELL YOU MIGHT HAVE HAD A WOMAN HERE SAID PRUDENCE AND IT WOULD HARDLY HAVE BEEN AMUSING FOR HER TO SEE TWO MORE ARRIVE +3997-180297-0003-1837: DURING THIS REMARK MARGUERITE LOOKED AT ME ATTENTIVELY +3997-180297-0004-1838: MY DEAR PRUDENCE I ANSWERED YOU DO NOT KNOW WHAT YOU ARE SAYING +3997-180297-0005-1839: YES BUT BESIDES NOT WISHING TO PUT YOU OUT I WAS SURE THAT IF YOU CAME AS FAR AS MY DOOR YOU WOULD WANT TO COME UP AND AS I COULD NOT LET YOU I DID NOT WISH TO LET YOU GO AWAY BLAMING ME FOR SAYING NO +3997-180297-0006-1840: BECAUSE I AM WATCHED AND THE LEAST SUSPICION MIGHT (DO->TO) ME THE GREATEST HARM +3997-180297-0007-1841: IS THAT REALLY THE ONLY REASON +3997-180297-0008-1842: IF THERE WERE ANY OTHER I WOULD TELL YOU FOR WE ARE NOT TO HAVE ANY SECRETS FROM ONE ANOTHER NOW +3997-180297-0009-1843: (HONESTLY->ON THE STREET) DO YOU CARE FOR ME A LITTLE A GREAT DEAL +3997-180297-0010-1844: I FANCIED FOR A MOMENT THAT (I->IT) MIGHT GIVE MYSELF THAT HAPPINESS FOR SIX MONTHS YOU (WOULD->WILL) NOT HAVE IT YOU INSISTED ON KNOWING THE MEANS +3997-180297-0011-1845: WELL GOOD HEAVENS THE MEANS WERE EASY ENOUGH TO GUESS +3997-180297-0012-1846: I LISTENED AND I GAZED AT MARGUERITE WITH ADMIRATION +3997-180297-0013-1847: WHEN (I->THEY) THOUGHT THAT THIS MARVELLOUS CREATURE WHOSE FEET I HAD ONCE LONGED TO KISS WAS WILLING TO LET ME TAKE MY PLACE IN HER THOUGHTS (MY PART->BY PARTS) IN HER LIFE AND THAT I WAS NOT YET CONTENT WITH WHAT SHE GAVE ME I ASKED IF (MAN'S->MEN'S) DESIRE (HAS->HAD) INDEED LIMITS WHEN SATISFIED AS PROMPTLY AS MINE HAD BEEN IT REACHED AFTER SOMETHING FURTHER +3997-180297-0014-1848: TRULY SHE CONTINUED WE POOR CREATURES OF CHANCE HAVE FANTASTIC (DESIRES->DESIRE) AND INCONCEIVABLE LOVES +3997-180297-0015-1849: WE ARE NOT ALLOWED TO HAVE HEARTS UNDER PENALTY OF BEING HOOTED DOWN AND OF RUINING OUR CREDIT +3997-180297-0016-1850: WE NO LONGER BELONG TO OURSELVES +3997-180297-0017-1851: WE STAND FIRST IN THEIR SELF ESTEEM LAST IN THEIR ESTEEM +3997-180297-0018-1852: NEVER (DO THEY->DID HE) GIVE YOU ADVICE WHICH IS NOT (LUCRATIVE->LOOK ATTENTIVE) +3997-180297-0019-1853: IT MEANS LITTLE ENOUGH TO THEM THAT WE SHOULD HAVE TEN LOVERS EXTRA AS LONG AS THEY GET DRESSES OR A BRACELET OUT OF THEM AND THAT THEY CAN DRIVE (IN OUR->AND ARE) CARRIAGE FROM TIME TO TIME OR COME TO OUR BOX AT THE (THEATRE->FUTURE) +3997-180297-0020-1854: SUCH A MAN I FOUND IN THE DUKE BUT THE DUKE IS OLD AND OLD AGE NEITHER PROTECTS NOR CONSOLES +3997-180297-0021-1855: I THOUGHT I COULD ACCEPT THE LIFE WHICH HE OFFERED ME (BUT->OR) WHAT WOULD YOU HAVE +3997-180297-0022-1856: WHAT I LOVED IN YOU WAS NOT THE MAN WHO WAS BUT THE MAN WHO WAS GOING TO BE +3997-180297-0023-1857: MARGUERITE (TIRED->HIRED) OUT WITH THIS LONG CONFESSION THREW HERSELF BACK ON THE SOFA AND TO STIFLE A SLIGHT COUGH PUT UP HER HANDKERCHIEF TO HER LIPS AND FROM THAT TO HER EYES +3997-180297-0024-1858: MARGUERITE DO WITH ME AS YOU WILL I AM YOUR SLAVE YOUR DOG BUT IN THE NAME OF HEAVEN TEAR UP THE LETTER WHICH I WROTE TO YOU AND DO NOT MAKE ME LEAVE YOU TO MORROW IT WOULD KILL ME +3997-180297-0025-1859: MARGUERITE DREW THE LETTER FROM HER BOSOM AND HANDING IT TO ME WITH A SMILE OF INFINITE SWEETNESS SAID +3997-180297-0026-1860: HERE IT IS I HAVE BROUGHT IT BACK +3997-180297-0027-1861: I TORE THE LETTER INTO FRAGMENTS AND KISSED WITH TEARS THE HAND THAT (*->I) GAVE IT TO ME +3997-180297-0028-1862: LOOK HERE PRUDENCE DO YOU KNOW WHAT HE WANTS SAID MARGUERITE +3997-180297-0029-1863: HE WANTS YOU TO FORGIVE HIM +3997-180297-0030-1864: ONE HAS (TO->TWO) BUT HE (WANTS->ONCE) MORE THAN THAT WHAT THEN +3997-180297-0031-1865: I EMBRACED MARGUERITE UNTIL SHE WAS ALMOST STIFLED +3997-182399-0000-1779: (OL MISTAH->ALL MISTER) BUZZARD GRINNED +3997-182399-0001-1780: THIS SOUNDED LIKE ANOTHER STORY +3997-182399-0002-1781: HE WAS CURIOUS ABOUT THAT BLACK HEADED COUSIN OF (OL MISTAH->ALL MISTER) BUZZARD VERY CURIOUS INDEED +3997-182399-0003-1782: ANYWAY HE WOULD FIND OUT +3997-182399-0004-1783: PLEASE MISTER BUZZARD PLEASE TELL US THE STORY HE BEGGED +3997-182399-0005-1784: NOW (OL MISTAH->ALL MISTER) BUZZARD IS NATURALLY GOOD NATURED AND ACCOMMODATING AND WHEN PETER (BEGGED->BAG) SO HARD HE JUST COULDN'T FIND IT IN HIS HEART TO REFUSE +3997-182399-0006-1785: WAY BACK IN THE DAYS WHEN (GRANDPAP BUZZARD->GRANDPAPAZZARD) HAD HIS (LIL->LITTLE) FALLING OUT WITH (OL->OLD) KING EAGLE AND (DONE FLY->DON FLIES) SO HIGH HE (SCO'TCH->SCORCHED) THE FEATHERS (OFFEN->OFTEN) HIS (HAID->HEAD) HE HAD A COUSIN DID GRANDPAP BUZZARD AND THIS COUSIN WAS (JES->JUST) NATURALLY LAZY AND NO COUNT +3997-182399-0007-1786: LIKE MOST NO COUNT PEOPLE HE USED TO MAKE A REGULAR NUISANCE OF (HISSELF->HIMSELF) POKING HIS NOSE INTO (EV'YBODY'S->EVERY BODY'S) BUSINESS AND NEVER TENDING TO HIS OWN +3997-182399-0008-1787: WASN'T ANYTHING GOING ON THAT THIS TRIFLING MEMBER OF THE BUZZARD (FAM'LY->FAMILY) DIDN'T FIND OUT ABOUT (AND->A) MEDDLE IN HE COULD ASK (MO->MORE) QUESTIONS THAN PETER RABBIT (CAN AN->KENN AND) ANYBODY THAT CAN DO THAT HAS GOT TO ASK A LOT +3997-182399-0009-1788: EVERYBODY LOOKED AT PETER AND LAUGHED +3997-182399-0010-1789: SO WE UNS (SIT->SET) ON THE CHIMNEY TOPS WHENEVER (OL->OLD) JACK FROST GETS TO (STRAYING->STRAIN) DOWN WHERE HE HAVE NO BUSINESS +3997-182399-0011-1790: ONE DAY (THIS->THERE'S) NO COUNT TRIFLING COUSIN OF GRANDPAP (BUZZARD->BAZARD) GET COLD IN HIS FEET +3997-182399-0012-1791: IT WAS ON (A LIL OL->THE LITTLE OLD) HOUSE A (LIL OL->LITTLE OLD) TUMBLE DOWN HOUSE +3997-182399-0013-1792: WHY HE (JES->JUST) STRETCH HIS (FOOL HAID->FULL HEAD) AS FAR DOWN (THAT->THE) CHIMNEY AS HE CAN (AN->AND) LISTEN (AN->AND) LISTEN +3997-182399-0014-1793: BUT HE DON'T MIND THAT +3997-182399-0015-1794: (WILL YO' ALLS->OH YOU ALL) PLEASE SPEAK A (LIL->LOW) LOUDER HE (HOLLER->HOLLERED) DOWN THE CHIMNEY (JES->JUST) LIKE THAT +3997-182399-0016-1795: YES SAH SHE (SHO'LY->SURELY) WAS (PLUMB->PLUM) SCARED +3997-182399-0017-1796: THEY (LIKE->LIKED) TO CHOKE THAT NO (COUNT BUZZARD->COMPOSER) TO DEATH +3997-182399-0018-1797: WHEN HE GET HOME (HE->HE'D) TRY (AN TRY->AND TRIES) TO BRUSH (THAT->US) SOOT OFF BUT IT DONE GET INTO THE SKIN (AN IT->AND IT'S) STAY THERE +3997-182399-0019-1798: A LITTLE SIGH OF SATISFACTION WENT (AROUND->ROUND) THE CIRCLE OF LISTENERS +3997-182399-0020-1799: IT WAS JUST AS GOOD AS ONE OF GRANDFATHER FROG'S +4198-12259-0000-203: DRAW REACH FILL MIX GIVE IT ME WITHOUT WATER +4198-12259-0001-204: SO MY FRIEND SO WHIP ME OFF THIS GLASS NEATLY BRING ME HITHER SOME (CLARET->CLARE IT) A FULL WEEPING GLASS TILL IT RUN OVER +4198-12259-0002-205: A CESSATION AND TRUCE WITH THIRST +4198-12259-0003-206: YOU HAVE (CATCHED->CAST) A COLD GAMMER YEA FORSOOTH SIR +4198-12259-0004-207: BY THE BELLY OF (SANCT->SAINT) BUFF LET US TALK OF OUR DRINK I NEVER DRINK BUT AT MY HOURS LIKE THE POPE'S MULE +4198-12259-0005-208: WHICH WAS FIRST (THIRST OR->THOSE TWO) DRINKING +4198-12259-0006-209: WHAT IT SEEMS I DO NOT DRINK BUT (BY->BUY) AN ATTORNEY +4198-12259-0007-210: DRINK ALWAYS AND YOU SHALL NEVER DIE +4198-12259-0008-211: IF I DRINK NOT I AM A GROUND DRY GRAVELLED AND SPENT I AM STARK DEAD WITHOUT DRINK AND MY SOUL READY TO FLY INTO SOME (MARSH AMONGST->MARS A MONTH'S) FROGS THE SOUL NEVER DWELLS IN A DRY PLACE (DROUTH KILLS->DROUGHT KILLET) IT +4198-12259-0009-212: HE (DRINKS IN->DRINK SO) VAIN THAT (FEELS->FILLS) NOT THE PLEASURE OF IT +4198-12259-0010-213: IT IS ENOUGH TO BREAK BOTH (GIRDS->GORGE) AND (PETREL->PETAL) +4198-12259-0011-214: WHAT DIFFERENCE IS THERE BETWEEN A BOTTLE AND A FLAGON +4198-12259-0012-215: BRAVELY AND WELL PLAYED UPON THE WORDS +4198-12259-0013-216: OUR FATHERS DRANK LUSTILY AND EMPTIED THEIR (CANS->CANES) +4198-12259-0014-217: WELL (CACKED->CAGLE) WELL SUNG +4198-12259-0015-218: COME LET US DRINK WILL YOU SEND NOTHING TO THE RIVER +4198-12259-0016-219: I DRINK NO MORE THAN (A SPONGE->HIS SPINES) +4198-12259-0017-220: I DRINK LIKE A TEMPLAR (KNIGHT->NIGHT) +4198-12259-0018-221: AND I (TANQUAM SPONSUS->TANK QUON SPONSES) +4198-12259-0019-222: AND I (SICUT TERRA SINE AQUA->CICEROSINAQUA) +4198-12259-0020-223: GIVE ME A (SYNONYMON->SINNING) FOR A (GAMMON->GAMIN) OF BACON +4198-12259-0021-224: IT IS THE COMPULSORY OF (DRINKERS->DRAKERS) IT IS A PULLEY +4198-12259-0022-225: A LITTLE RAIN ALLAYS A GREAT DEAL OF WIND LONG TIPPLING BREAKS THE THUNDER +4198-12259-0023-226: BUT IF THERE CAME SUCH LIQUOR (FROM->FOR) MY BALLOCK (WOULD->WERE) YOU NOT WILLINGLY THEREAFTER SUCK THE (UDDER->UTTER) WHENCE IT ISSUED +4198-12259-0024-227: HERE PAGE FILL +4198-12259-0025-228: I APPEAL FROM THIRST AND DISCLAIM ITS JURISDICTION +4198-12259-0026-229: I WAS WONT (HERETOFORE->HERE) TO (*->FOR TO) DRINK OUT ALL BUT NOW I LEAVE NOTHING +4198-12259-0027-230: (HEYDAY->HAY THEE) HERE (ARE TRIPES->A TRIPE'S) FIT FOR OUR SPORT AND IN EARNEST EXCELLENT (GODEBILLIOS->GO TO BEHOLS) OF THE DUN (OX->AX) YOU KNOW WITH THE BLACK (STREAK->STREET) +4198-12259-0028-231: (O->OH) FOR GOD'S SAKE LET US (LASH->LAST) THEM SOUNDLY YET THRIFTILY +4198-12259-0029-232: SPARROWS (WILL NOT->WHEN I) EAT UNLESS YOU BOB THEM ON THE TAIL NOR CAN I DRINK IF I BE NOT FAIRLY SPOKE TO +4198-12259-0030-233: (HO->OH) THIS (WILL BANG IT SOUNDLY->WAS BENITT'S ONLY) +4198-12259-0031-234: BUT THIS (SHALL->I) BANISH (IT->THE) UTTERLY +4198-12259-0032-235: LET US WIND OUR HORNS BY THE SOUND OF FLAGONS AND (BOTTLES->BIDLES) AND CRY ALOUD (THAT->THERE) WHOEVER HATH LOST HIS THIRST COME (NOT->NIGH) HITHER TO SEEK IT +4198-12259-0033-236: THE GREAT GOD MADE THE PLANETS AND WE MAKE THE PLATTERS NEAT +4198-12259-0034-237: APPETITE (COMES->COMBED) WITH EATING SAYS (ANGESTON->ANGER SN) BUT THE THIRST GOES AWAY WITH DRINKING +4198-12259-0035-238: I HAVE A REMEDY AGAINST THIRST QUITE CONTRARY TO THAT WHICH IS GOOD AGAINST (THE BITING->ABIDING) OF A (MAD DOG->MAN DOLE) +4198-12259-0036-239: (WHITE WINE->WHY) HERE (WINE->WHY) BOYS +4198-12259-0037-240: O (LACHRYMA->LACK ROOM I) CHRISTI IT IS OF THE BEST GRAPE +4198-12259-0038-241: (I'FAITH->I FAITH) PURE GREEK GREEK O THE FINE WHITE WINE +4198-12259-0039-242: THERE IS NO ENCHANTMENT NOR CHARM THERE EVERY ONE OF YOU HATH SEEN IT +4198-12259-0040-243: MY (PRENTICESHIP->PREDICUP) IS OUT (I AM->I'M) A FREE MAN AT THIS TRADE +4198-12259-0041-244: (I SHOULD SAY->AS YOU SEE) MASTER (PAST->PASS) +4198-12259-0042-245: (O->OH) THE (DRINKERS->DRAKE IS) THOSE THAT ARE (A->*) DRY O (POOR->PORT) THIRSTY SOULS +4198-12259-0043-246: CLEAR OFF (NEAT->MEAT) SUPERNACULUM +4198-12281-0000-187: ALTHOUGH THE PLAGUE WAS THERE IN THE MOST PART OF ALL THE HOUSES THEY NEVERTHELESS ENTERED EVERYWHERE THEN PLUNDERED AND CARRIED AWAY ALL THAT WAS WITHIN AND YET FOR ALL THIS NOT ONE OF THEM TOOK ANY HURT WHICH IS A MOST WONDERFUL CASE +4198-12281-0001-188: I BESEECH YOU THINK UPON IT +4198-12281-0002-189: NEVERTHELESS AT ALL (ADVENTURES->VENTURES) THEY RANG THE BELLS (AD->AT) CAPITULUM (CAPITULANTES->CAPITULAT DAYS) +4198-12281-0003-190: BY THE VIRTUE OF GOD WHY DO NOT YOU SING PANNIERS FAREWELL (VINTAGE->VENTAGE) IS (DONE->NONE) +4198-12281-0004-191: BY THE BELLY OF (SANCT->SAINT) JAMES (WHAT->WHICH) SHALL WE POOR DEVILS DRINK THE WHILE +4198-12281-0005-192: LORD GOD (DA MIHI POTUM->DALMY HE POT EM) +4198-12281-0006-193: LET HIM BE CARRIED (TO->THE) PRISON FOR TROUBLING THE DIVINE SERVICE +4198-12281-0007-194: WHEREFORE IS IT THAT OUR DEVOTIONS WERE INSTITUTED TO BE SHORT IN THE TIME OF HARVEST AND VINTAGE AND LONG IN (THE->*) ADVENT (AND->IN) ALL THE WINTER +4198-12281-0008-195: (HARK->ARE) YOU MY MASTERS YOU THAT LOVE (THE WINE->THEM WHY) COP'S BODY FOLLOW ME FOR (SANCT->SAINT) ANTHONY BURN ME AS FREELY AS A FAGGOT (IF->*) THEY GET LEAVE TO TASTE ONE DROP OF THE LIQUOR THAT (WILL->WOULD) NOT NOW COME AND FIGHT FOR RELIEF OF THE VINE +4198-12281-0009-196: TO OTHERS AGAIN HE UNJOINTED THE (SPONDYLES->SPONGEALS) OR KNUCKLES OF THE NECK (DISFIGURED->THIS FIGURED) THEIR CHAPS (GASHED->GASH) THEIR FACES MADE THEIR CHEEKS HANG FLAPPING ON THEIR CHIN AND SO SWINGED AND (BALAMMED->BELLAMED) THEM THAT THEY FELL DOWN BEFORE HIM LIKE HAY BEFORE (A MOWER->HIM OVER) +4198-12281-0010-197: TO SOME (WITH A->WOULD THEY) SMART (SOUSE->SOUS) ON (THE->THEIR) EPIGASTER HE WOULD MAKE THEIR (MIDRIFF SWAG->MIDRIFTS WAG) THEN REDOUBLING THE BLOW GAVE THEM SUCH A (HOMEPUSH->HOME PUSH) ON THE NAVEL THAT HE MADE THEIR PUDDINGS TO GUSH OUT +4198-12281-0011-198: BELIEVE (THAT->THEN) IT WAS THE MOST HORRIBLE SPECTACLE THAT EVER ONE SAW +4198-12281-0012-199: (O->ALL) THE HOLY LADY (NYTOUCH->KNIGHTSAGE) SAID ONE THE GOOD (SANCTESS->SANCTUS) O OUR LADY (OF SUCCOURS->OFURUS) SAID ANOTHER HELP HELP +4198-12281-0013-200: SOME DIED WITHOUT SPEAKING OTHERS SPOKE WITHOUT DYING SOME DIED IN SPEAKING OTHERS SPOKE (IN->AND) DYING +4198-12281-0014-201: CAN YOU TELL WITH WHAT INSTRUMENTS THEY DID IT +4198-12281-0015-202: IN THE MEANTIME (FRIAR->FRY) JOHN WITH HIS FORMIDABLE BATON OF THE CROSS GOT TO THE BREACH WHICH THE ENEMIES HAD MADE AND THERE STOOD TO SNATCH UP THOSE THAT ENDEAVOURED TO ESCAPE +4198-61336-0000-247: IT IS SIGNIFICANT TO NOTE IN THIS CONNECTION THAT THE NEW KING WAS AN UNSWERVING ADHERENT OF THE CULT OF (ASHUR->ASHER) BY THE (ADHERENTS->ADHERENCE) OF WHICH HE WAS PROBABLY STRONGLY SUPPORTED +4198-61336-0001-248: AT THE BEGINNING OF HIS REIGN THERE WAS MUCH SOCIAL DISCONTENT AND SUFFERING +4198-61336-0002-249: WELL MIGHT (SHARDURIS->YOURIS) EXCLAIM IN THE WORDS OF THE PROPHET WHERE IS THE KING OF (ARPAD->ARPET) +4198-61336-0003-250: (TIGLATH PILESER->TIGG LAUGHED BELLEZER) HOWEVER CROSSED THE (EUPHRATES->EUPHADIS) AND MOVING NORTHWARD DELIVERED AN UNEXPECTED ATTACK ON THE (URARTIAN->GERGIAN) ARMY (IN QUMMUKH->AND KUMAK) +4198-61336-0004-251: A FIERCE BATTLE ENSUED AND ONE OF (ITS->HIS) DRAMATIC INCIDENTS WAS A SINGLE COMBAT BETWEEN THE RIVAL KINGS +4198-61336-0005-252: AN ATTEMPT WAS MADE TO CAPTURE KING (SHARDURIS->CHARS) WHO LEAPT FROM HIS CHARIOT AND MADE HASTY ESCAPE ON HORSEBACK HOTLY PURSUED IN THE GATHERING DARKNESS BY AN ASSYRIAN (CONTINGENT->CONTENDENT) OF CAVALRY +4198-61336-0006-253: DESPITE THE BLOW DEALT AGAINST (URARTU->YOU ARE TO) ASSYRIA DID NOT IMMEDIATELY REGAIN POSSESSION OF NORTH SYRIA +4198-61336-0007-254: THE SHIFTY (MATI ILU->MANTIL) EITHER (CHERISHED->CHERISH) THE HOPE THAT (SHARDURIS->SHALL DORIS) WOULD RECOVER STRENGTH AND AGAIN INVADE NORTH SYRIA OR THAT HE MIGHT HIMSELF ESTABLISH AN EMPIRE IN THAT REGION +4198-61336-0008-255: (TIGLATH PILESER->TIG LASS BELIEU) HAD THEREFORE TO MARCH WESTWARD AGAIN +4198-61336-0009-256: FOR THREE YEARS HE CONDUCTED VIGOROUS CAMPAIGNS IN THE WESTERN LAND WHERE HE MET WITH VIGOROUS RESISTANCE +4198-61336-0010-257: (ARPAD->OUR PAD) WAS CAPTURED AND (MATI ILU->MEANT TO ILL YOU) DEPOSED AND PROBABLY PUT TO DEATH +4198-61336-0011-258: ONCE AGAIN THE HEBREWS CAME INTO CONTACT WITH (ASSYRIA->THE ZERIA) +4198-61336-0012-259: (ITS FALL MAY->IT'S FOR ME) NOT HAVE BEEN UNCONNECTED WITH THE TREND OF EVENTS IN (ASSYRIA->A SYRIA) DURING THE CLOSING YEARS OF THE MIDDLE EMPIRE +4198-61336-0013-260: (JEHOASH->JOESH) THE GRANDSON OF JEHU HAD ACHIEVED SUCCESSES IN CONFLICT WITH DAMASCUS +4198-61336-0014-261: SIX MONTHS (AFTERWARDS->AFTERWARD) HE WAS ASSASSINATED BY (SHALLUM->CHARLEM) +4198-61336-0015-262: THIS USURPER (HELD SWAY->HELDS WEIGH) AT SAMARIA FOR ONLY A MONTH +4198-61336-0016-263: NO RESISTANCE WAS POSSIBLE ON THE PART OF (MENAHEM->MANY HIM) THE USURPER WHO WAS PROBABLY READY TO WELCOME THE ASSYRIAN CONQUEROR SO THAT BY ARRANGING AN ALLIANCE HE MIGHT SECURE HIS OWN POSITION +4198-61336-0017-264: (TIGLATH PILESER->TAKE THAT PLEASURE) NEXT OPERATED AGAINST THE MEDIAN AND OTHER HILL TRIBES IN THE (NORTH EAST->NORTHEAST) +4198-61336-0018-265: HE OVERTHREW BUILDINGS DESTROYED ORCHARDS AND TRANSPORTED TO NINEVEH THOSE OF THE INHABITANTS HE HAD NOT PUT TO (THE SWORD->THIS WOOD) WITH ALL THE LIVE STOCK HE COULD LAY HANDS ON +4198-61336-0019-266: THUS WAS (URARTU->HERE TO) CRIPPLED AND HUMILIATED IT NEVER REGAINED ITS (FORMER->FORM OF) PRESTIGE AMONG THE NORTHERN STATES +4198-61336-0020-267: IN THE FOLLOWING YEAR (TIGLATH PILESER->TIGLASS BELIEVER) RETURNED TO SYRIA +4198-61336-0021-268: (MENAHEM->MANY) KING OF ISRAEL HAD DIED AND WAS SUCCEEDED BY HIS SON (PEKAHIAH->PEKAHIA) +4198-61336-0022-269: (JUDAH->JULIA) HAD TAKEN ADVANTAGE OF THE DISTURBED CONDITIONS IN ISRAEL TO ASSERT ITS INDEPENDENCE +4198-61336-0023-270: HE CONDEMNED ISRAEL FOR ITS IDOLATRIES AND CRIED +4198-61336-0024-271: FOR (THUS->THIS) SAITH THE LORD UNTO THE HOUSE OF ISRAEL SEEK YE ME (AND YE SHALL->TO) LIVE HAVE YE OFFERED UNTO ME SACRIFICES AND OFFERINGS IN THE WILDERNESS FORTY YEARS O HOUSE OF ISRAEL +4198-61336-0025-272: THE REMNANT OF THE PHILISTINES SHALL PERISH +4198-61336-0026-273: ISRAEL WAS ALSO DEALT WITH +4198-61336-0027-274: HE SWEPT THROUGH ISRAEL LIKE A HURRICANE +4198-61336-0028-275: THE (PHILISTINES->FAIRLY STEAMS) AND (THE->*) ARABIANS OF THE DESERT WERE ALSO SUBDUED +4198-61336-0029-276: HE INVADED BABYLONIA +4198-61336-0030-277: (UKINZER->A KINDRED) TOOK REFUGE IN HIS CAPITAL SHAPIA WHICH HELD OUT SUCCESSFULLY ALTHOUGH THE SURROUNDING COUNTRY WAS RAVAGED AND DESPOILED +4294-14317-0000-1866: AS I THOUGHT THAT THIS WAS DUE TO SOME FAULT IN THE EARTH I WANTED TO MAKE THESE FIRST EXPERIMENTS BEFORE I UNDERTOOK MY PERSEUS +4294-14317-0001-1867: WHEN I SAW (THAT->*) THIS (BUST->BEST) CAME OUT SHARP AND CLEAN I (SET->SAID) AT ONCE TO CONSTRUCT A LITTLE FURNACE IN THE WORKSHOP ERECTED FOR ME BY THE DUKE AFTER MY OWN PLANS AND DESIGN IN THE HOUSE WHICH THE DUKE HAD GIVEN ME +4294-14317-0002-1868: IT WAS AN EXTREMELY DIFFICULT TASK AND I WAS ANXIOUS TO OBSERVE ALL THE NICETIES OF ART WHICH I HAD LEARNED SO AS NOT TO LAPSE INTO SOME ERROR +4294-14317-0003-1869: I IN MY TURN FEEL THE SAME DESIRE AND HOPE TO PLAY MY PART LIKE THEM THEREFORE MY LORD GIVE ME THE LEAVE TO GO +4294-14317-0004-1870: BUT BEWARE OF LETTING (BANDINELLO->BEND NELLO) QUIT YOU RATHER BESTOW UPON HIM ALWAYS MORE THAN HE DEMANDS FOR IF HE GOES INTO FOREIGN PARTS HIS IGNORANCE IS SO PRESUMPTUOUS THAT HE IS JUST THE MAN TO DISGRACE OUR MOST ILLUSTRIOUS SCHOOL +4294-14317-0005-1871: I (ASK->ASKED) NO FURTHER REWARD FOR MY LABOURS UP TO THIS TIME THAN THE GRACIOUS FAVOUR OF YOUR MOST ILLUSTRIOUS EXCELLENCY +4294-14317-0006-1872: THEN I THANKED HIM AND SAID I HAD NO GREATER DESIRE THAN TO SHOW THOSE ENVIOUS FOLK THAT I HAD IT IN ME TO EXECUTE THE PROMISED WORK +4294-14317-0007-1873: I HAD BETTER LOOK TO MY CONDUCT FOR IT (HAD->HAS) COME TO HIS EARS THAT I RELIED UPON HIS FAVOUR TO TAKE IN FIRST ONE MAN AND THEN ANOTHER +4294-14317-0008-1874: I BEGGED HIS MOST ILLUSTRIOUS EXCELLENCY TO NAME A SINGLE PERSON (WHOM I->WHY) HAD EVER TAKEN IN +4294-14317-0009-1875: I SAID MY LORD I THANK YOU AND BEG YOU TO CONDESCEND SO FAR AS TO LISTEN TO FOUR WORDS IT IS TRUE THAT HE LENT ME A PAIR OF OLD SCALES TWO ANVILS AND THREE LITTLE HAMMERS WHICH ARTICLES I BEGGED HIS (WORKMAN GIORGIO DA CORTONA->WORKMEN GEORGIO DE CORTEANA) FIFTEEN DAYS AGO TO FETCH BACK +4294-14317-0010-1876: (GIORGIO->YOUR JOE) CAME FOR THEM (HIMSELF->HIS HEALTH) +4294-14317-0011-1877: I HOPE TO PROVE ON WHAT ACCOUNT THAT SCOUNDREL TRIES TO BRING ME INTO DISGRACE +4294-14317-0012-1878: WHEN HE HAD HEARD THIS SPEECH THE DUKE ROSE UP IN ANGER AND SENT FOR BERNARDONE WHO WAS FORCED TO TAKE FLIGHT AS FAR AS VENICE HE AND (ANTONIO LANDI->ANTONIA LANDIE) WITH HIM +4294-14317-0013-1879: YOU HAD BETTER PUT THIS TO THE PROOF AND I WILL GO AT ONCE TO THE (BARGELLO->BARGIENLO) +4294-14317-0014-1880: I AM WILLING TO ENTER INTO COMPETITION WITH THE ANCIENTS AND FEEL ABLE TO SURPASS THEM FOR SINCE THOSE EARLY DAYS IN WHICH I MADE THE (MEDALS->METALS) OF POPE CLEMENT I HAVE LEARNED SO MUCH THAT I CAN NOW PRODUCE FAR BETTER PIECES OF THE KIND I THINK I CAN ALSO OUTDO THE COINS I STRUCK FOR DUKE (ALESSANDRO->ALISANDRO) WHICH (ARE->IS) STILL HELD IN HIGH ESTEEM IN LIKE MANNER I COULD MAKE FOR YOU LARGE PIECES OF GOLD AND SILVER PLATE AS I DID SO OFTEN FOR THAT NOBLE MONARCH KING FRANCIS OF FRANCE THANKS TO THE GREAT CONVENIENCES HE ALLOWED ME WITHOUT EVER LOSING TIME FOR THE EXECUTION OF COLOSSAL STATUES OR OTHER WORKS OF THE (SCULPTORS->SCULPTOR'S) CRAFT +4294-14317-0015-1881: AFTER SEVERAL MONTHS WERE WASTED AND PIERO WOULD NEITHER WORK NOR PUT MEN TO WORK UPON THE PIECE I MADE HIM GIVE IT BACK +4294-14317-0016-1882: AMONG ARTISTS CERTAIN (ENRAGED->ENRAGE) SCULPTORS LAUGHED AT ME AND CALLED ME THE NEW SCULPTOR +4294-14317-0017-1883: NOW I HOPE TO SHOW THEM THAT I AM AN OLD SCULPTOR IF GOD SHALL GRANT ME THE BOON OF FINISHING MY PERSEUS FOR THAT NOBLE PIAZZA OF HIS MOST ILLUSTRIOUS EXCELLENCY +4294-14317-0018-1884: HAVING THIS EXCELLENT RESOLVE IN HEART I REACHED MY HOME +4294-32859-0000-1942: WYLDER WAS RATHER SURLY AFTER THE LADIES HAD FLOATED AWAY FROM THE SCENE AND HE DRANK HIS LIQUOR DOGGEDLY +4294-32859-0001-1943: IT WAS HIS FANCY I SUPPOSE TO REVIVE CERTAIN SENTIMENTAL RELATIONS WHICH HAD IT MAY BE ONCE EXISTED BETWEEN HIM AND MISS LAKE AND HE WAS A PERSON OF THAT COMBATIVE TEMPERAMENT THAT MAGNIFIES AN OBJECT IN PROPORTION AS ITS PURSUIT IS THWARTED +4294-32859-0002-1944: THE STORY OF (FRIDOLIN->FRIEDLIN) AND (RETZCH'S->WRETCH IS) PRETTY OUTLINES +4294-32859-0003-1945: SIT DOWN BESIDE ME AND I'LL TELL YOU THE STORY +4294-32859-0004-1946: HE ASSISTED AT IT BUT TOOK NO PART AND IN FACT WAS LISTENING TO THAT OTHER CONVERSATION WHICH SOUNDED WITH ITS PLEASANT GABBLE AND LAUGHTER LIKE A LITTLE MUSICAL TINKLE OF BELLS IN THE DISTANCE +4294-32859-0005-1947: BUT HONEST MARK FORGOT THAT YOUNG LADIES DO NOT ALWAYS COME OUT QUITE ALONE AND JUMP UNASSISTED INTO THEIR VEHICLES +4294-35475-0000-1885: BUT THE MIDDLE (SON->SUN) WAS LITTLE AND LORN HE WAS NEITHER DARK NOR FAIR HE WAS NEITHER HANDSOME NOR STRONG +4294-35475-0001-1886: (THROWING->ROWING) HIMSELF ON HIS KNEES BEFORE THE KING HE CRIED (OH->O) ROYAL SIRE BESTOW UPON ME ALSO A SWORD AND A STEED THAT I MAY UP AND (AWAY->WAIT) TO FOLLOW MY BRETHREN +4294-35475-0002-1887: BUT THE KING LAUGHED HIM TO SCORN THOU A SWORD HE QUOTH +4294-35475-0003-1888: IN SOOTH THOU SHALT HAVE ONE BUT IT SHALL BE ONE BEFITTING THY MAIDEN (SIZE->SIGHS) AND COURAGE IF SO SMALL A WEAPON CAN BE FOUND IN ALL MY KINGDOM +4294-35475-0004-1889: FORTHWITH THE GRINNING JESTER BEGAN SHRIEKING WITH LAUGHTER SO THAT THE BELLS UPON HIS MOTLEY CAP WERE ALL SET A JANGLING +4294-35475-0005-1890: I DID BUT LAUGH TO THINK THE SWORD OF (ETHELRIED->EFFLARIDE) HAD BEEN SO QUICKLY FOUND RESPONDED THE JESTER AND HE POINTED TO THE SCISSORS HANGING FROM THE TAILOR'S GIRDLE +4294-35475-0006-1891: ONE NIGHT AS HE LAY IN A DEEP FOREST (TOO->TWO) UNHAPPY TO SLEEP HE HEARD A NOISE NEAR AT HAND IN THE BUSHES +4294-35475-0007-1892: THOU SHALT HAVE THY LIBERTY HE CRIED EVEN THOUGH THOU SHOULDST (REND->RUN) ME IN PIECES THE MOMENT THOU ART FREE +4294-35475-0008-1893: (IT->HE) HAD (*->HITTED) SUDDENLY DISAPPEARED AND IN ITS PLACE STOOD A BEAUTIFUL FAIRY WITH FILMY WINGS WHICH SHONE LIKE RAINBOWS IN THE MOONLIGHT +4294-35475-0009-1894: AT THIS MOMENT THERE WAS A DISTANT RUMBLING AS OF THUNDER TIS THE OGRE CRIED THE FAIRY WE MUST HASTEN +4294-35475-0010-1895: SCISSORS GROW A GIANT'S HEIGHT AND SAVE US FROM THE (OGRE'S->OGRES) MIGHT +4294-35475-0011-1896: HE COULD SEE THE OGRE STANDING POWERLESS TO HURT HIM ON THE OTHER SIDE OF THE CHASM AND GNASHING HIS TEETH EACH ONE OF WHICH WAS AS BIG AS A (MILLSTON->MILLSTONE) +4294-35475-0012-1897: THE SIGHT WAS SO TERRIBLE THAT HE TURNED ON HIS HEEL AND FLED AWAY AS FAST AS HIS FEET COULD CARRY HIM +4294-35475-0013-1898: THOU SHALT NOT BE LEFT A PRISONER IN THIS DISMAL SPOT WHILE I HAVE THE POWER TO HELP THEE +4294-35475-0014-1899: HE LIFTED THE SCISSORS AND WITH ONE STROKE DESTROYED THE WEB AND GAVE THE FLY (ITS FREEDOM->TO READ THEM) +4294-35475-0015-1900: A FAINT GLIMMER OF LIGHT ON THE OPPOSITE WALL SHOWS ME THE KEYHOLE +4294-35475-0016-1901: THE PRINCE SPENT ALL THE FOLLOWING TIME UNTIL MIDNIGHT TRYING TO THINK OF A SUITABLE VERSE TO SAY TO THE SCISSORS +4294-35475-0017-1902: AS HE UTTERED THE WORDS THE SCISSORS LEAPED OUT OF HIS HAND AND BEGAN TO CUT THROUGH THE WOODEN SHUTTERS AS EASILY AS THROUGH (A->ITS) CHEESE +4294-35475-0018-1903: IN (A->THE) VERY SHORT TIME THE PRINCE HAD CRAWLED THROUGH THE OPENING +4294-35475-0019-1904: WHILE HE STOOD LOOKING AROUND HIM IN BEWILDERMENT A FIREFLY ALIGHTED ON HIS (ARM->HEART) FLASHING ITS LITTLE LANTERN IN THE PRINCE'S FACE IT CRIED THIS WAY MY FRIEND THE FLY SENT ME TO GUIDE YOU TO A PLACE OF SAFETY +4294-35475-0020-1905: WHAT IS TO BECOME OF ME CRIED THE POOR PEASANT +4294-35475-0021-1906: MY GRAIN MUST FALL (AND->IN) ROT IN THE FIELD FROM (OVERRIPENESS->OVER RIPENESS) BECAUSE I HAVE NOT THE STRENGTH TO RISE (AND->IN) HARVEST IT THEN INDEED MUST WE ALL STARVE +4294-35475-0022-1907: THE (GRANDAME->GRAND DAME) WHOM HE SUPPLIED WITH FAGOTS THE MERCHANT WHOM HE RESCUED FROM ROBBERS THE KING'S (COUNCILLOR->COUNSELLOR) TO WHOM HE GAVE AID ALL BECAME HIS FRIENDS UP AND DOWN THE LAND TO BEGGAR (OR->O) LORD HOMELESS WANDERER (OR->*) HIGH BORN DAME HE GLADLY GAVE UNSELFISH SERVICE ALL UNSOUGHT AND SUCH AS HE HELPED STRAIGHTWAY BECAME HIS FRIENDS +4294-35475-0023-1908: TO HIM WHO COULD BRING HER BACK TO HER FATHER'S CASTLE SHOULD BE GIVEN THE THRONE AND KINGDOM AS WELL AS THE PRINCESS HERSELF SO FROM FAR AND NEAR INDEED FROM ALMOST EVERY COUNTRY UNDER THE SUN CAME (KNIGHTS->NIGHTS) AND PRINCES TO FIGHT THE OGRE +4294-35475-0024-1909: AMONG THOSE WHO DREW BACK (WERE ETHELRIED'S->WHERE ETHELRE'S) BROTHERS THE THREE THAT WERE DARK AND THE THREE THAT WERE FAIR +4294-35475-0025-1910: BUT (ETHELRIED HEEDED->ETHEL READ HEATED) NOT THEIR TAUNTS +4294-35475-0026-1911: SO THEY ALL CRIED OUT LONG AND LOUD LONG LIVE THE PRINCE (PRINCE CISEAUX->PRINCESO) +4294-9934-0000-1912: HE FELT (WHAT->WITH) THE EARTH MAY POSSIBLY FEEL AT THE MOMENT WHEN IT IS TORN OPEN WITH THE IRON IN ORDER THAT GRAIN MAY BE DEPOSITED WITHIN IT IT FEELS ONLY THE WOUND THE QUIVER OF THE GERM (AND->*) THE JOY OF THE FRUIT ONLY (ARRIVE->ARRIVED) LATER +4294-9934-0001-1913: (HE HAD->HE'D) BUT JUST ACQUIRED A FAITH MUST HE THEN (REJECT IT->REJECTED) ALREADY +4294-9934-0002-1914: HE AFFIRMED TO HIMSELF THAT HE WOULD NOT HE DECLARED TO HIMSELF THAT HE WOULD NOT DOUBT AND HE BEGAN TO DOUBT IN SPITE OF HIMSELF +4294-9934-0003-1915: TO STAND BETWEEN TWO RELIGIONS FROM ONE OF WHICH YOU HAVE NOT AS YET EMERGED (AND->IN) ANOTHER INTO WHICH YOU HAVE NOT YET ENTERED IS INTOLERABLE AND TWILIGHT IS PLEASING ONLY TO BAT LIKE SOULS +4294-9934-0004-1916: MARIUS WAS CLEAR EYED AND HE REQUIRED THE TRUE LIGHT +4294-9934-0005-1917: THE HALF LIGHTS OF DOUBT PAINED HIM +4294-9934-0006-1918: WHATEVER MAY HAVE BEEN HIS DESIRE TO REMAIN WHERE HE WAS HE COULD NOT HALT THERE HE WAS IRRESISTIBLY CONSTRAINED TO CONTINUE TO ADVANCE TO EXAMINE TO THINK TO MARCH FURTHER +4294-9934-0007-1919: HE FEARED AFTER HAVING TAKEN SO MANY STEPS WHICH HAD BROUGHT HIM NEARER TO HIS FATHER TO NOW TAKE A STEP WHICH SHOULD ESTRANGE HIM FROM THAT FATHER +4294-9934-0008-1920: HIS DISCOMFORT WAS AUGMENTED BY ALL THE REFLECTIONS WHICH OCCURRED TO HIM +4294-9934-0009-1921: IN THE TROUBLED STATE OF HIS CONSCIENCE HE NO LONGER THOUGHT OF CERTAIN SERIOUS SIDES OF EXISTENCE +4294-9934-0010-1922: THEY SOON ELBOWED HIM ABRUPTLY +4294-9934-0011-1923: REQUEST (COURFEYRAC->HER FORACT) TO COME AND TALK WITH ME SAID MARIUS +4294-9934-0012-1924: WHAT IS TO BECOME OF YOU SAID (COURFEYRAC->CURFYRAC) +4294-9934-0013-1925: WHAT ARE YOU GOING TO DO I DO NOT KNOW +4294-9934-0014-1926: SILVER GOLD HERE IT IS +4294-9934-0015-1927: YOU WILL THEN HAVE ONLY A PAIR OF TROUSERS A WAISTCOAT A HAT AND A COAT AND MY BOOTS +4294-9934-0016-1928: THAT WILL BE ENOUGH +4294-9934-0017-1929: NO IT IS NOT GOOD WHAT WILL YOU DO AFTER THAT +4294-9934-0018-1930: DO YOU KNOW GERMAN NO +4294-9934-0019-1931: IT IS BADLY PAID WORK BUT ONE CAN LIVE BY IT +4294-9934-0020-1932: THE CLOTHES DEALER WAS SENT FOR +4294-9934-0021-1933: HE PAID TWENTY FRANCS FOR THE CAST OFF GARMENTS THEY WENT TO THE (WATCHMAKER'S->WATCHMAKERS) +4294-9934-0022-1934: HE BOUGHT THE WATCH FOR FORTY FIVE FRANCS +4294-9934-0023-1935: (HELLO->HALLO) I HAD FORGOTTEN THAT SAID MARIUS +4294-9934-0024-1936: THE LANDLORD PRESENTED HIS BILL WHICH HAD TO BE PAID ON THE SPOT +4294-9934-0025-1937: I HAVE TEN FRANCS LEFT SAID MARIUS +4294-9934-0026-1938: THAT WILL BE SWALLOWING A TONGUE VERY FAST OR A HUNDRED SOUS VERY SLOWLY +4294-9934-0027-1939: ONE MORNING ON HIS RETURN FROM THE (LAW->LAST) SCHOOL MARIUS FOUND A LETTER FROM HIS AUNT AND THE SIXTY (PISTOLES->PISTOL) THAT IS TO SAY SIX HUNDRED FRANCS IN GOLD (IN->AND) A SEALED BOX +4294-9934-0028-1940: MARIUS SENT BACK THE THIRTY LOUIS TO HIS AUNT WITH (A->THE) RESPECTFUL LETTER IN WHICH (HE->SHE) STATED THAT HE HAD (SUFFICIENT->SUSPICION) MEANS OF SUBSISTENCE AND THAT HE SHOULD BE ABLE THENCEFORTH TO SUPPLY ALL HIS NEEDS +4294-9934-0029-1941: AT THAT MOMENT HE HAD THREE FRANCS LEFT +4350-10919-0000-2716: HE PERCEIVED THAT IT WAS NO GOOD TALKING TO THE OLD MAN AND THAT THE PRINCIPAL PERSON IN THE HOUSE WAS THE MOTHER +4350-10919-0001-2717: BEFORE HER HE DECIDED TO SCATTER HIS PEARLS +4350-10919-0002-2718: THE PRINCESS WAS DISTRACTED AND DID NOT KNOW WHAT TO DO SHE FELT SHE HAD (SINNED->SENT) AGAINST KITTY +4350-10919-0003-2719: WELL DOCTOR DECIDE OUR FATE SAID THE PRINCESS TELL ME EVERYTHING +4350-10919-0004-2720: IS THERE HOPE SHE MEANT TO SAY BUT HER LIPS QUIVERED AND SHE COULD NOT UTTER THE QUESTION WELL DOCTOR +4350-10919-0005-2721: AS YOU PLEASE THE PRINCESS WENT OUT WITH A SIGH +4350-10919-0006-2722: THE FAMILY DOCTOR RESPECTFULLY CEASED IN THE MIDDLE OF HIS OBSERVATIONS +4350-10919-0007-2723: AND THERE ARE INDICATIONS (MALNUTRITION->MALLETRICIAN) NERVOUS EXCITABILITY AND SO ON +4350-10919-0008-2724: THE QUESTION (STANDS->SENDS) THUS IN PRESENCE OF INDICATIONS OF (TUBERCULOUS->TIBERICAN'S) PROCESS WHAT IS TO BE DONE TO MAINTAIN (NUTRITION->UTRITION) +4350-10919-0009-2725: YES (THAT'S AN->I CAN) UNDERSTOOD THING RESPONDED THE CELEBRATED PHYSICIAN AGAIN GLANCING AT HIS WATCH +4350-10919-0010-2726: BEG PARDON IS THE (YAUSKY BRIDGE DONE YET->YSKEEPER STANDARD) OR SHALL I HAVE TO DRIVE (AROUND->HER ON) +4350-10919-0011-2727: HE ASKED AH IT IS +4350-10919-0012-2728: OH WELL THEN I CAN DO IT IN TWENTY MINUTES +4350-10919-0013-2729: AND HOW ABOUT (A TOUR ABROAD->IT TO HER BROAD) ASKED THE (FAMILY->FELLOW) DOCTOR +4350-10919-0014-2730: WHAT IS WANTED IS (*->THE) MEANS OF IMPROVING (NUTRITION->UTRITION) AND NOT FOR LOWERING IT +4350-10919-0015-2731: THE FAMILY DOCTOR LISTENED ATTENTIVELY AND RESPECTFULLY +4350-10919-0016-2732: BUT IN (FAVOR->FAVOUR) OF FOREIGN TRAVEL I WOULD URGE THE CHANGE OF HABITS THE REMOVAL FROM CONDITIONS CALLING UP REMINISCENCES +4350-10919-0017-2733: AND THEN THE MOTHER WISHES IT HE ADDED +4350-10919-0018-2734: AH WELL IN THAT CASE TO BE SURE LET THEM GO ONLY THOSE GERMAN QUACKS ARE MISCHIEVOUS +4350-10919-0019-2735: (OH TIME'S->O TIMES) UP ALREADY AND HE WENT TO THE DOOR +4350-10919-0020-2736: THE CELEBRATED DOCTOR ANNOUNCED TO THE PRINCESS A FEELING OF WHAT WAS DUE FROM HIM DICTATED HIS DOING SO THAT HE OUGHT TO SEE THE PATIENT ONCE MORE +4350-10919-0021-2737: OH NO ONLY A FEW DETAILS PRINCESS COME THIS WAY +4350-10919-0022-2738: AND THE MOTHER ACCOMPANIED BY THE DOCTOR WENT INTO THE DRAWING ROOM TO KITTY +4350-10919-0023-2739: WHEN THE DOCTOR CAME IN SHE FLUSHED CRIMSON AND HER EYES FILLED WITH TEARS +4350-10919-0024-2740: SHE ANSWERED HIM AND ALL AT ONCE GOT UP FURIOUS +4350-10919-0025-2741: EXCUSE ME DOCTOR BUT THERE IS REALLY NO OBJECT IN THIS +4350-10919-0026-2742: THIS IS THE THIRD TIME YOU'VE ASKED ME THE SAME THING +4350-10919-0027-2743: THE CELEBRATED DOCTOR DID NOT TAKE (OFFENSE->OFFENCE) +4350-10919-0028-2744: NERVOUS IRRITABILITY HE SAID TO THE PRINCESS WHEN (KITTY->KATY) HAD LEFT THE ROOM HOWEVER I HAD FINISHED +4350-10919-0029-2745: AND THE DOCTOR BEGAN SCIENTIFICALLY EXPLAINING TO THE PRINCESS AS AN EXCEPTIONALLY INTELLIGENT WOMAN THE CONDITION OF THE YOUNG PRINCESS AND CONCLUDED BY INSISTING ON THE DRINKING OF THE WATERS WHICH WERE CERTAINLY HARMLESS +4350-10919-0030-2746: (AT->BUT) THE QUESTION SHOULD THEY GO ABROAD THE DOCTOR PLUNGED INTO DEEP MEDITATION AS THOUGH RESOLVING A WEIGHTY PROBLEM +4350-10919-0031-2747: FINALLY HIS DECISION WAS PRONOUNCED THEY WERE TO GO ABROAD BUT TO PUT NO FAITH IN FOREIGN QUACKS AND TO APPLY TO HIM IN ANY NEED +4350-10919-0032-2748: IT SEEMED AS THOUGH SOME PIECE OF GOOD FORTUNE HAD COME TO PASS AFTER THE DOCTOR HAD GONE +4350-10919-0033-2749: THE MOTHER WAS MUCH MORE CHEERFUL WHEN SHE WENT BACK TO HER DAUGHTER AND KITTY PRETENDED TO BE MORE CHEERFUL +4350-9170-0000-2750: EDUCATED PEOPLE OF THE UPPER CLASSES ARE TRYING TO STIFLE THE (EVER GROWING->EVERGREWING) SENSE OF THE NECESSITY OF TRANSFORMING THE EXISTING SOCIAL ORDER +4350-9170-0001-2751: (THIS IS->MISSUS) ABSOLUTELY (INCORRECT->AND CORRECT) +4350-9170-0002-2752: IN THE SOCIAL CONCEPTION OF LIFE IT IS SUPPOSED (THAT->*) SINCE THE AIM OF LIFE IS FOUND IN GROUPS OF INDIVIDUALS INDIVIDUALS (WILL->WHO) VOLUNTARILY SACRIFICE THEIR OWN (INTERESTS->INTEREST) FOR THE (INTERESTS->INTEREST) OF THE GROUP +4350-9170-0003-2753: THE CHAMPIONS OF THE SOCIAL CONCEPTION OF LIFE USUALLY TRY TO CONNECT THE IDEA OF AUTHORITY THAT IS OF VIOLENCE WITH THE IDEA OF MORAL INFLUENCE BUT THIS CONNECTION IS QUITE IMPOSSIBLE +4350-9170-0004-2754: THE MAN WHO (IS->HAS) CONTROLLED BY MORAL INFLUENCE ACTS IN ACCORDANCE WITH HIS OWN DESIRES +4350-9170-0005-2755: THE BASIS OF AUTHORITY IS BODILY VIOLENCE +4350-9170-0006-2756: THE POSSIBILITY OF APPLYING BODILY VIOLENCE (TO->THE) PEOPLE IS PROVIDED ABOVE ALL BY AN ORGANIZATION OF ARMED MEN TRAINED TO ACT IN UNISON (IN->AND) SUBMISSION TO ONE WILL +4350-9170-0007-2757: THESE BANDS OF ARMED MEN SUBMISSIVE TO A SINGLE WILL ARE WHAT CONSTITUTE THE ARMY +4350-9170-0008-2758: THE ARMY HAS ALWAYS BEEN AND STILL IS THE BASIS OF POWER +4350-9170-0009-2759: POWER IS ALWAYS IN THE HANDS OF THOSE WHO CONTROL THE ARMY AND ALL MEN IN POWER FROM THE ROMAN CAESARS TO THE RUSSIAN AND GERMAN EMPERORS TAKE MORE INTEREST IN THEIR ARMY THAN IN ANYTHING (AND->IN) COURT POPULARITY IN THE ARMY KNOWING THAT IF THAT IS ON THEIR SIDE THEIR POWER IS SECURE +4350-9170-0010-2760: INDEED IT COULD NOT BE OTHERWISE +4350-9170-0011-2761: ONLY UNDER THOSE CONDITIONS COULD THE SOCIAL ORGANIZATION BE JUSTIFIED +4350-9170-0012-2762: BUT SINCE THIS IS NOT THE CASE AND ON THE CONTRARY MEN (IN->AND) POWER (ARE->*) ALWAYS FAR FROM BEING SAINTS THROUGH THE VERY FACT OF THEIR POSSESSION OF POWER THE SOCIAL ORGANIZATION BASED ON POWER HAS NO JUSTIFICATION +4350-9170-0013-2763: EVEN IF THERE WAS ONCE (A TIME->*) WHEN OWING TO THE LOW (STANDARD->STANDARDS) OF MORALS (AND->ON) THE DISPOSITION OF MEN TO VIOLENCE THE EXISTENCE OF AN AUTHORITY TO RESTRAIN SUCH VIOLENCE WAS AN ADVANTAGE BECAUSE THE VIOLENCE OF (*->THE) GOVERNMENT WAS LESS THAN THE VIOLENCE OF INDIVIDUALS ONE CANNOT BUT SEE THAT THIS ADVANTAGE COULD NOT BE LASTING +4350-9170-0014-2764: BETWEEN THE MEMBERS OF ONE STATE SUBJECT TO A SINGLE AUTHORITY THE (STRIFE->STRIPE) BETWEEN (*->THE) INDIVIDUALS (SEEMS->SEEMED) STILL LESS AND (THE->A) LIFE OF THE STATE SEEMS EVEN MORE SECURE +4350-9170-0015-2765: IT WAS PRODUCED ON ONE HAND BY THE NATURAL GROWTH OF POPULATION AND ON THE OTHER BY STRUGGLE AND CONQUEST +4350-9170-0016-2766: AFTER CONQUEST THE POWER OF THE EMPEROR PUTS AN END TO INTERNAL DISSENSIONS AND SO THE STATE CONCEPTION OF LIFE JUSTIFIES ITSELF +4350-9170-0017-2767: BUT THIS JUSTIFICATION IS NEVER MORE THAN TEMPORARY +4350-9170-0018-2768: (INTERNAL DISSENSIONS->AND HERALD ASCENSIONS) DISAPPEAR ONLY IN PROPORTION TO THE DEGREE OF OPPRESSION EXERTED BY THE AUTHORITY OVER THE (DISSENTIENT->DYSINTHIAN) INDIVIDUALS +4350-9170-0019-2769: (GOVERNMENT->GOVERN) AUTHORITY EVEN IF IT DOES SUPPRESS PRIVATE VIOLENCE ALWAYS INTRODUCES INTO THE LIFE OF MEN FRESH FORMS OF VIOLENCE WHICH TEND TO BECOME GREATER AND GREATER IN PROPORTION TO THE DURATION AND STRENGTH OF THE GOVERNMENT +4350-9170-0020-2770: AND THEREFORE THE OPPRESSION OF THE (OPPRESSED->OPPRESS) ALWAYS GOES ON GROWING UP TO THE FURTHEST LIMIT BEYOND WHICH IT CANNOT GO WITHOUT KILLING THE GOOSE WITH THE (GOLDEN EGGS->GOLD NICE) +4350-9170-0021-2771: THE MOST CONVINCING EXAMPLE OF THIS IS TO BE FOUND IN THE CONDITION OF THE WORKING CLASSES OF OUR EPOCH WHO ARE IN REALITY NO BETTER THAN THE SLAVES OF ANCIENT TIMES SUBDUED BY CONQUEST +4350-9170-0022-2772: SO IT (HAS->IS) ALWAYS (BEEN->THEN) +4350-9170-0023-2773: FOOTNOTE THE FACT THAT IN AMERICA THE ABUSES OF AUTHORITY EXIST IN SPITE OF THE SMALL NUMBER OF THEIR (TROOPS->TRUCE) NOT ONLY FAILS TO DISPROVE THIS POSITION BUT POSITIVELY CONFIRMS IT +4350-9170-0024-2774: THE UPPER CLASSES KNOW THAT AN ARMY OF FIFTY THOUSAND WILL SOON BE INSUFFICIENT AND NO LONGER RELYING ON PINKERTON'S MEN THEY FEEL THAT (THE->*) SECURITY OF THEIR POSITION DEPENDS ON THE INCREASED STRENGTH OF THE ARMY +4350-9170-0025-2775: THE REASON TO WHICH HE GAVE EXPRESSION IS ESSENTIALLY THE SAME AS THAT WHICH MADE THE FRENCH KINGS AND THE POPES ENGAGE SWISS AND SCOTCH GUARDS AND MAKES THE RUSSIAN AUTHORITIES OF TO DAY SO CAREFULLY DISTRIBUTE THE RECRUITS SO THAT THE REGIMENTS FROM THE (FRONTIERS ARE->FRONTIER THEIR) STATIONED IN CENTRAL DISTRICTS AND THE REGIMENTS FROM THE (CENTER->CENTRE) ARE STATIONED ON THE FRONTIERS +4350-9170-0026-2776: THE MEANING OF (CAPRIVI'S->THE PREVIOUS) SPEECH PUT INTO PLAIN LANGUAGE IS THAT FUNDS ARE NEEDED NOT TO RESIST FOREIGN FOES BUT TO BUY UNDER OFFICERS TO BE READY TO ACT AGAINST THE ENSLAVED TOILING MASSES +4350-9170-0027-2777: AND THIS ABNORMAL ORDER OF (THINGS->THANKS) IS MAINTAINED BY THE ARMY +4350-9170-0028-2778: BUT THERE IS NOT ONLY ONE GOVERNMENT THERE ARE OTHER GOVERNMENTS (EXPLOITING->EXPLODING) THEIR SUBJECTS BY VIOLENCE IN THE SAME WAY AND (*->ARE) ALWAYS READY TO POUNCE DOWN ON ANY OTHER GOVERNMENT AND CARRY OFF THE FRUITS OF THE TOIL OF ITS (ENSLAVED->ENSLAVE) SUBJECTS +4350-9170-0029-2779: AND SO EVERY GOVERNMENT NEEDS AN ARMY ALSO TO PROTECT ITS BOOTY FROM ITS NEIGHBOR BRIGANDS +4350-9170-0030-2780: THIS (INCREASE IS->INCREASES) CONTAGIOUS AS MONTESQUIEU POINTED OUT (ONE->A) HUNDRED (*->AND) FIFTY YEARS AGO +4350-9170-0031-2781: EVERY INCREASE IN THE ARMY OF ONE STATE WITH THE AIM OF SELF (DEFENSE->DEFENCE) AGAINST ITS SUBJECTS BECOMES A (SOURCE->SORT) OF DANGER FOR NEIGHBORING STATES AND CALLS FOR A SIMILAR INCREASE IN THEIR ARMIES +4350-9170-0032-2782: THE DESPOTISM OF (A->THE) GOVERNMENT ALWAYS INCREASES WITH THE STRENGTH OF THE ARMY AND ITS EXTERNAL SUCCESSES AND THE AGGRESSIVENESS OF A GOVERNMENT INCREASES WITH ITS INTERNAL DESPOTISM +4350-9170-0033-2783: THE RIVALRY OF THE EUROPEAN STATES (IN->AND) CONSTANTLY INCREASING THEIR FORCES HAS REDUCED THEM TO THE NECESSITY OF HAVING RECOURSE TO UNIVERSAL MILITARY SERVICE SINCE BY THAT MEANS THE GREATEST POSSIBLE NUMBER OF SOLDIERS IS OBTAINED AT THE LEAST POSSIBLE EXPENSE +4350-9170-0034-2784: AND BY THIS MEANS ALL CITIZENS ARE UNDER ARMS TO SUPPORT THE INIQUITIES (PRACTICED->PRACTISED) UPON THEM (ALL->ALSO) CITIZENS HAVE BECOME THEIR OWN (OPPRESSORS->IMPRESSORS) +4350-9170-0035-2785: THIS INCONSISTENCY HAS BECOME OBVIOUS (IN->AND) UNIVERSAL MILITARY SERVICE +4350-9170-0036-2786: IN FACT THE WHOLE SIGNIFICANCE OF THE SOCIAL CONCEPTION OF LIFE CONSISTS IN MAN'S RECOGNITION OF THE BARBARITY OF STRIFE BETWEEN INDIVIDUALS AND THE TRANSITORINESS OF PERSONAL LIFE ITSELF AND THE TRANSFERENCE OF THE AIM OF LIFE (TO->THE) GROUPS OF PERSONS +4350-9170-0037-2787: BUT WITH UNIVERSAL MILITARY SERVICE IT COMES TO PASS THAT MEN AFTER MAKING EVERY SACRIFICE TO GET RID OF THE CRUELTY OF STRIFE AND THE INSECURITY OF EXISTENCE ARE CALLED UPON TO FACE ALL THE PERILS THEY HAD MEANT TO AVOID +4350-9170-0038-2788: BUT INSTEAD OF DOING THAT THEY (EXPOSE->EXPOSED) THE INDIVIDUALS TO THE SAME NECESSITY OF STRIFE SUBSTITUTING STRIFE WITH INDIVIDUALS OF OTHER STATES FOR STRIFE WITH NEIGHBORS +4350-9170-0039-2789: THE TAXES RAISED FROM THE PEOPLE FOR WAR PREPARATIONS ABSORB THE GREATER PART OF THE PRODUCE OF LABOR WHICH THE ARMY OUGHT TO DEFEND +4350-9170-0040-2790: THE DANGER OF WAR EVER READY TO BREAK OUT RENDERS ALL REFORMS OF LIFE SOCIAL LIFE VAIN AND FRUITLESS +4350-9170-0041-2791: BUT THE (FATAL->FIELD) SIGNIFICANCE OF UNIVERSAL MILITARY SERVICE AS THE MANIFESTATION OF THE CONTRADICTION INHERENT IN THE SOCIAL CONCEPTION OF LIFE IS NOT ONLY APPARENT IN THAT +4350-9170-0042-2792: GOVERNMENTS ASSERT THAT ARMIES ARE NEEDED ABOVE ALL FOR EXTERNAL (DEFENSE->DEFENCE) BUT THAT IS NOT TRUE +4350-9170-0043-2793: (THEY ARE->THERE) NEEDED PRINCIPALLY AGAINST THEIR SUBJECTS AND EVERY MAN UNDER UNIVERSAL MILITARY SERVICE BECOMES AN ACCOMPLICE IN ALL (THE->THAT) ACTS OF VIOLENCE OF THE GOVERNMENT AGAINST THE CITIZENS WITHOUT ANY CHOICE OF HIS OWN +4350-9170-0044-2794: AND FOR THE SAKE OF WHAT (AM->*) I MAKING (THEM->EM) +4350-9170-0045-2795: I AM EXPECTED FOR THE SAKE OF (THE->A) STATE TO MAKE THESE SACRIFICES TO RENOUNCE EVERYTHING THAT CAN BE PRECIOUS TO MAN PEACE FAMILY SECURITY AND HUMAN DIGNITY +4350-9170-0046-2796: EXCEPT FOR THE STATE THEY SAY WE SHOULD BE EXPOSED TO THE ATTACKS OF EVIL DISPOSED PERSONS IN OUR OWN COUNTRY +4350-9170-0047-2797: WE (KNOW->*) NOW (*->KNOW) THAT THREATS AND PUNISHMENTS CANNOT DIMINISH THEIR NUMBER THAT THAT CAN ONLY BE DONE BY CHANGE OF ENVIRONMENT AND MORAL INFLUENCE +4350-9170-0048-2798: SO THAT THE JUSTIFICATION OF STATE VIOLENCE ON THE GROUND OF THE PROTECTION IT GIVES US FROM EVIL (DISPOSED->DISPOSE) PERSONS EVEN IF IT HAD SOME FOUNDATION THREE OR FOUR CENTURIES AGO HAS NONE WHATEVER NOW +4350-9170-0049-2799: EXCEPT FOR THE STATE THEY TELL US WE SHOULD NOT HAVE ANY RELIGION EDUCATION CULTURE MEANS OF COMMUNICATION AND SO ON +4350-9170-0050-2800: WITHOUT THE STATE MEN WOULD NOT HAVE BEEN ABLE TO FORM THE SOCIAL INSTITUTIONS NEEDED FOR DOING (ANY THING->ANYTHING) +4350-9170-0051-2801: THIS ARGUMENT TOO WAS WELL FOUNDED ONLY SOME CENTURIES AGO +4350-9170-0052-2802: THE GREAT EXTENSION OF MEANS OF COMMUNICATION AND INTERCHANGE OF IDEAS HAS MADE MEN COMPLETELY ABLE TO DISPENSE WITH STATE AID IN FORMING SOCIETIES ASSOCIATIONS CORPORATIONS AND CONGRESSES FOR SCIENTIFIC (ECONOMIC->AGONIC) AND POLITICAL OBJECTS +4350-9170-0053-2803: WITHOUT (GOVERNMENTS->GOVERNMENT'S) NATIONS WOULD BE ENSLAVED BY THEIR NEIGHBORS +4350-9170-0054-2804: THE GOVERNMENT THEY TELL US WITH ITS ARMY IS NECESSARY TO DEFEND US FROM NEIGHBORING STATES WHO MIGHT ENSLAVE US +4350-9170-0055-2805: AND IF (DEFENSE->DEFENCE) AGAINST BARBAROUS NATIONS IS MEANT ONE (THOUSANDTH->THOUSAND) PART OF THE TROOPS NOW UNDER ARMS WOULD BE AMPLY SUFFICIENT FOR THAT PURPOSE +4350-9170-0056-2806: THE POWER OF THE STATE FAR FROM BEING A SECURITY AGAINST THE ATTACKS OF OUR NEIGHBORS EXPOSES US ON THE CONTRARY TO MUCH GREATER DANGER OF SUCH ATTACKS +4350-9170-0057-2807: EVEN LOOKING AT IT PRACTICALLY WEIGHING THAT IS TO SAY ALL THE (BURDENS->BIRDS) LAID ON HIM BY THE (STATE->STATES) NO MAN CAN FAIL TO SEE THAT FOR HIM PERSONALLY TO COMPLY WITH (*->THE) STATE DEMANDS AND SERVE IN THE ARMY WOULD IN THE MAJORITY OF CASES BE MORE DISADVANTAGEOUS THAN TO REFUSE TO DO SO +4350-9170-0058-2808: TO RESIST WOULD NEED INDEPENDENT THOUGHT AND EFFORT OF WHICH EVERY MAN IS NOT CAPABLE +4350-9170-0059-2809: SO MUCH FOR THE ADVANTAGES AND DISADVANTAGES OF BOTH LINES OF CONDUCT FOR A MAN OF THE WEALTHY (CLASSES->CLASS) AN OPPRESSOR +4350-9170-0060-2810: FOR A MAN OF THE POOR WORKING CLASS THE ADVANTAGES AND DISADVANTAGES WILL BE THE SAME BUT WITH A GREAT INCREASE OF DISADVANTAGES +4852-28311-0000-2098: SAY YOU KNOW (SUMTHIN->SOMETHING) +4852-28311-0001-2099: CHRIS LOOKED FROM A NICKEL PLATED (FLASHLIGHT->FLASH LIKE) TO A CAR JACK AND SPARK PLUG +4852-28311-0002-2100: (KNOW WHO->NO ONE) NEEDS A JOB (BAD->BAND) THAT'S (JAKEY->JI) HARRIS +4852-28311-0003-2101: O K HE SAID +4852-28311-0004-2102: ONLY WHY DIDN'T YOU ASK HIM YOURSELF +4852-28311-0005-2103: MIKE BECAME UNEASY AND FISHED (AN ELASTIC->AND MOLASTIC) BAND OUT OF HIS POCKET MADE A FLICK OF PAPER AND SENT IT SOARING OUT (INTO M->IN EM) STREET +4852-28311-0006-2104: WELL HE ADMITTED I DID +4852-28311-0007-2105: CHRIS ASKED (AND->HIM) FOR THE FIRST TIME THAT DAY (THE->THAT) HEAVY WEIGHT HE CARRIED WITHIN HIM LIFTED AND LIGHTENED A LITTLE +4852-28311-0008-2106: (THINK HE->THEY CAME) REALLY NEEDS IT HE PURSUED +4852-28311-0009-2107: HE WOULD HAVE LIKED TO GET THE JOB FOR (JAKEY->JAKIE) WHO NEEDED IT BUT SOMEHOW THE TASK OF FACING MISTER WICKER ESPECIALLY NOW THAT THE LIGHT WAS GOING AND DUSK (EDGING->EDGED) INTO THE STREETS WAS NOT WHAT (CHRIS HAD INTENDED->CHRISTEN TENDED) FOR ENDING THE AFTERNOON +4852-28311-0010-2108: MIKE'S EXPRESSION CHANGED AT (*->WHAT) ONCE TO ONE OF TRIUMPH BUT CHRIS WAS ONLY (PARTLY->PARSLY) ENCOURAGED +4852-28311-0011-2109: (BETCHA AREN'T->BUT YOU'RE A) GOIN AFTER ALL (CHRIS->THIS) TURNED ON HIM +4852-28311-0012-2110: MIKE WAS STANDING ON THE CORNER +4852-28311-0013-2111: (AW SHUCKS->AH SHOCKS) +4852-28311-0014-2112: CHRIS STARTED OFF ONCE MORE PASSING (THE->A) BLEAK LITTLE VICTORIAN CHURCH PERCHED ON THE HILL ABOVE MISTER WICKER'S HOUSE +4852-28311-0015-2113: AN EMPTY LOT CUT (*->IN) INTO BY CHURCH LANE GAVE A LOOK OF ISOLATION TO THE (L->ELE) SHAPED BRICK BUILDING THAT SERVED MISTER (WICKER AS->WICKER'S) BOTH HOUSE AND PLACE OF BUSINESS +4852-28311-0016-2114: (THE->NO) LONGER (WING->WINGED) TOWARD THE BACK (HAD->GOT) A BACK DOOR THAT OPENED (ONTO->ON A) WATER STREET THE SPACE BETWEEN THE HOUSE AND (WISCONSIN AVENUE->MISS CONSIN ATTIGUE) HAD BEEN MADE INTO A NEAT OBLONG FLOWER GARDEN FENCED OFF FROM THE SIDEWALK BY BOX (SHRUBS->SHRUGS) AND A WHITE PICKET FENCE +4852-28311-0017-2115: A LIVID YELLOW STAINED THE HORIZON BEYOND THE FACTORIES AND (GRAY->GLAY) CLOUDS LOWERED AND TUMBLED ABOVE +4852-28311-0018-2116: THE AIR WAS GROWING CHILL AND CHRIS DECIDED TO FINISH (HIS->THE) JOB +4852-28311-0019-2117: ALL AT ONCE (HE->YOU) WONDERED HOW HIS MOTHER WAS AND EVERYTHING IN HIM PINCHED AND TIGHTENED ITSELF +4852-28311-0020-2118: AT THE FOOT OF THE HILL HE REACHED THE HOUSE +4852-28311-0021-2119: THERE WERE THREE THINGS THAT ALWAYS CAUGHT HIS EYE AMID THE LITTER OF DUSTY PIECES +4852-28311-0022-2120: ON THE LEFT THE COIL OF ROPE IN THE CENTER (*->OF) THE MODEL OF A SAILING SHIP IN A GREEN GLASS BOTTLE AND ON THE RIGHT THE WOODEN STATUE OF A NEGRO BOY IN BAGGY TROUSERS TURKISH JACKET AND WHITE TURBAN +4852-28311-0023-2121: BUT THE NAME STILL SHOWED AT THE PROW AND MANY A TIME CHRIS SAFE AT HOME IN BED HAD SAILED IMAGINARY VOYAGES IN THE MIRABELLE +4852-28311-0024-2122: (HE HAD->HE'D) NEVER SEEN ANYONE GO INTO MISTER WICKER'S SHOP NOW HE THOUGHT OF IT +4852-28311-0025-2123: HOW THEN DID HE LIVE AND WHAT DID HE EVER SELL +4852-28311-0026-2124: A SUDDEN CAR HORN (WOKE HIM->WALKING) FROM (HIS DREAM->THE STREAM) +4852-28312-0000-2125: OF THE MANY TIMES (HE HAD->YOU) EXAMINED MISTER WICKER'S WINDOW AND (PORED->POURED) OVER THE ROPE (THE->TO) SHIP AND THE NUBIAN BOY HE HAD NEVER GONE INTO MISTER WICKER'S SHOP +4852-28312-0001-2126: SO NOW ALONE UNTIL (SOMEONE->SOME ONE) SHOULD ANSWER THE BELL (HE->THEY) LOOKED EAGERLY IF UNEASILY AROUND HIM +4852-28312-0002-2127: WHAT WITH THE ONE WINDOW AND THE LOWERING DAY OUTSIDE THE LONG NARROW SHOP WAS SOMBER +4852-28312-0003-2128: HEAVY HAND (HEWN->YOU AND) BEAMS CROSSED IT FROM ONE SIDE TO THE OTHER +4852-28312-0004-2129: (MISTER WICKER'S->MISS JOKERS) BACK BEING TOWARD THE SOURCE OF LIGHT CHRIS COULD NOT SEE HIS FACE +4852-28312-0005-2130: THE DOUBLE FANS (OF->A) MINUTE WRINKLES BREAKING FROM EYE CORNER TO TEMPLE AND JOINING WITH THOSE OVER THE (CHEEKBONES->CHEAP BONES) WERE DRAWN INTO THE HORIZONTAL LINES ACROSS THE DOMED FOREHEAD +4852-28312-0006-2131: LITTLE TUFTS OF WHITE (FUZZ->FUZ) ABOVE THE EARS WERE ALL THAT REMAINED OF THE ANTIQUARIAN'S HAIR BUT WHAT DREW AND HELD CHRIS'S GAZE (WERE->WITH) THE OLD MAN'S EYES +4852-28312-0007-2132: (CHRIS BLINKED->CRISP BINKED) AND LOOKED AGAIN YES THEY WERE STILL THERE +4852-28312-0008-2133: (CHRIS SWALLOWED->GRIS SWALLOW) AND HIS VOICE CAME BACK TO HIM +4852-28312-0009-2134: YES SIR HE SAID +4852-28312-0010-2135: I SAW YOUR SIGN AND I KNOW A BOY WHO NEEDS THE JOB +4852-28312-0011-2136: HE'S A SCHOOLMATE OF MINE +4852-28312-0012-2137: (JAKEY HARRIS HIS->JAGGIE ARIST'S) NAME (IS AND->ISN'T) HE REALLY NEEDS THE JOB +4852-28312-0013-2138: I I JUST (WONDERED->WANTED) IF THE PLACE WAS STILL OPEN +4852-28312-0014-2139: WHAT HE SAW WAS A FRESH CHEEKED LAD TALL FOR THIRTEEN STURDY WITH SINCERITY AND GOOD (HUMOR->HUMOUR) IN HIS FACE AND SOMETHING (SENSITIVE->SCENTED) AND APPEALING ABOUT HIS EYES +4852-28312-0015-2140: HE GUESSED THERE MUST BE A LIVELY FIRE IN THAT ROOM BEYOND +4852-28312-0016-2141: WOULD THAT (INTERFERE->INFERE) WITH (JAKEY'S->JAKI GIGGS) GETTING THE JOB SIR +4852-28312-0017-2142: BUT EVEN AS HE SLOWLY TURNED THE THOUGHT PIERCED HIS MIND WHY (HAD->*) HE NOT SEEN THE REFLECTION OF THE HEADLIGHTS OF THE CARS MOVING UP AROUND THE (CORNER OF WATER->CORRIE WALL UNDER) STREET (AND UP->NOT) THE HILL TOWARD THE (TRAFFIC->LIFE) SIGNALS +4852-28312-0018-2143: THE (ROOM->ROME) SEEMED OVERLY STILL +4852-28312-0019-2144: THEN IN THAT SECOND HE TURNED AND FACED ABOUT +4852-28312-0020-2145: THE WIDE BOW (WINDOW->WIND THAT) WAS THERE BEFORE HIM THE THREE OBJECTS HE LIKED BEST SHOWING FROSTY IN THE MOONLIGHT THAT POURED IN FROM ACROSS THE WATER +4852-28312-0021-2146: ACROSS THE WATER WHERE (WAS->IS) THE (FREEWAY->FREE WAY) +4852-28312-0022-2147: IT WAS NO LONGER THERE NOR WERE THE HIGH WALLS AND SMOKESTACKS OF FACTORIES TO BE SEEN +4852-28312-0023-2148: THE WAREHOUSES WERE STILL THERE +4852-28312-0024-2149: (FLABBERGASTED->FLAVAGASTED) AND BREATHLESS CHRIS WAS UNAWARE THAT HE HAD MOVED CLOSER TO PEER OUT THE WINDOW IN EVERY DIRECTION +4852-28312-0025-2150: NO ELECTRIC SIGNS NO LAMPLIT STREETS +4852-28312-0026-2151: WHERE THE PEOPLE'S (DRUGSTORE HAD->DRUG STORE IT) STOOD BUT (A->*) HALF (*->AN) HOUR BEFORE ROSE THE ROOFS OF WHAT WAS EVIDENTLY AN INN +4852-28312-0027-2152: A COURTYARD WAS (SPARSELY->FIRSTLY) LIT BY A FLARING (TORCH OR TWO->TORTURE TO) SHOWING A SWINGING SIGN HUNG ON (A->THE) POST +4852-28312-0028-2153: THE POST (WAS PLANTED->IS BLOODED) AT THE EDGE OF (WHAT->IT) WAS NOW A BROAD AND (MUDDY->MONEY) ROAD +4852-28312-0029-2154: A COACH (WITH ITS TOP->WHEN THEY STOPPED) PILED HIGH WITH LUGGAGE STAMPED (TO A->OR) HALT BESIDE THE FLAGGED COURTYARD +4852-28312-0030-2155: THEY MOVED INTO THE INN THE COACH RATTLED OFF TO THE STABLE +4852-28312-0031-2156: MY WINDOW (HAS->AS) A POWER FOR THOSE FEW WHO ARE TO SEE +4852-28319-0000-2070: THE LEARNING OF MAGIC WAS BY NO MEANS EASY +4852-28319-0001-2071: HE (HAD->*) TOLD HIS MASTER AT ONCE (ABOUT->HE GOT) SIMON (GOSLER->GOSPIR) HIS (HORDE->HOARD) OF MONEY (AND->IN) HIS HIDING PLACES FOR IT +4852-28319-0002-2072: CHRIS THEREFORE THREW HIMSELF (INTO->AND) ALL THE (PRELIMINARIES->PROLIMINARIES) OF HIS TASK +4852-28319-0003-2073: ONE AFTERNOON WHEN HE (*->HAD) RETURNED AFTER A REST TO MISTER WICKER'S STUDY HE SAW THAT THERE WAS SOMETHING NEW IN THE ROOM A (BOWL->BULL) WITH A (GOLDFISH->GOLD FISH) IN IT STOOD ON THE TABLE BUT MISTER WICKER WAS NOT TO BE SEEN +4852-28319-0004-2074: WHAT (SHALL->SHOULD) I DO FIRST +4852-28319-0005-2075: HOW YOU (HAVE IMPROVED->OFTEN PROVED) MY BOY (HE->IT) EXCLAIMED (IT->*) IS NOW TIME FOR YOU TO TRY (AND THIS IS AS GOOD->MISSUS GOT) A CHANGE AS ANY +4852-28319-0006-2076: SUPPOSE I CHANGE AND (CAN'T CHANGE->CATCH ITS) BACK +4852-28319-0007-2077: MISTER WICKER WAITED PATIENTLY BESIDE HIM FOR A FEW MOMENTS FOR CHRIS TO GET UP HIS COURAGE +4852-28319-0008-2078: (THEN AS->THAT IS) NOTHING HAPPENED WITH A VOICE LIKE A WHIP MISTER WICKER SAID (START AT->STARTED) ONCE +4852-28319-0009-2079: THE SENSATION SPREAD FASTER AND FASTER +4852-28319-0010-2080: HIS HEAD SWAM AND HE FELT FAINT (AND->IN) A LITTLE SICK BUT HE PERSISTED THROUGH THE FINAL WORDS +4852-28319-0011-2081: HE THOUGHT (NOT->NOW) WITHOUT A FEELING OF PRIDE AND COMMENCED (*->THE) EXPERIMENTING WITH HIS TAIL AND FINS WITH SUCH ENTHUSIASM AND DELIGHT THAT SOME LITTLE TIME ELAPSED BEFORE MISTER WICKER'S VOICE (BOOMED CLOSE->BOOM BUT OAST) BY +4852-28319-0012-2082: SEVENTY FOUR BOOK ONE THE RETURN +4852-28319-0013-2083: THE FIGURE'S SHOES CARVED IN SOME EASTERN STYLE HAD CURVED UP POINTING TOES +4852-28319-0014-2084: THEN ALL AT ONCE THE IDEA CAME TO CHRIS +4852-28319-0015-2085: IF HE WAS TO BE A MAGICIAN COULD HE MAKE THIS BOY COME TO LIFE +4852-28319-0016-2086: (HE->IT) SQUATTED ON HIS HAUNCHES (EXAMINING->EXAMINED) THE CARVED (WOODEN->WOOD AND) FIGURE ATTENTIVELY AND FELT CONVINCED THAT ONCE ALIVE THE BOY WOULD BE AN IDEAL AND HAPPY COMPANION +4852-28319-0017-2087: BUT HOW DID ONE (*->A) CHANGE INANIMATE TO (ANIMATE->ENEMY) +4852-28319-0018-2088: CHRIS GOT UP AND STOLE BACK TO MISTER WICKER'S DOOR +4852-28319-0019-2089: HE HEARD (THE->THAT) MAGICIAN GOING UP THE SPIRAL STAIRCASE TO HIS ROOM ABOVE AND AFTER CHANGING HIMSELF TO A MOUSE TO SLIP UNDER THE DOOR AND SEE THAT THE ROOM WAS (REALLY->REELING) EMPTY (CHRIS RESUMED HIS->MISTER JAMES'S) PROPER SHAPE AND OPENED THE DOORS OF THE CUPBOARD AT THE FAR END OF THE ROOM +4852-28319-0020-2090: THE AFTERNOON (RAINY->RAINING) BEFORE INCREASED IN STORM +4852-28319-0021-2091: (DUSK->THUS) CAME TWO HOURS BEFORE ITS TIME THUNDER (SNARLED->SNARLS) IN THE SKY +4852-28319-0022-2092: CERTAIN ELEMENTS WERE TO BE MIXED AND POURED AT THE PROPER TIME +4852-28319-0023-2093: MISTER WICKER BEGAN MOVING ABOUT UPSTAIRS THE (FLOORBOARDS->FLOOR BOARDS) CREAKED AND STILL CHRIS COULD NOT LEAVE UNTIL THE (POTION->FOCIN) FUMED AND GLOWED +4852-28319-0024-2094: WITH INFINITE CAUTION CHRIS CLOSED THE DOOR SILENTLY BEHIND HIM AND RUNNING (LIGHTLY FORWARD->LATE BEFORE) REACHED THE FIGURE (OF->AT) THE NEGRO BOY +4852-28319-0025-2095: IT WAS AS IF THE STIFFNESS MELTED +4852-28319-0026-2096: UNDER HIS EYES (THE->*) WOODEN FOLDS OF CLOTH BECAME RICH SILK EMBROIDERY GLEAMED IN ITS REALITY UPON THE COAT AND OH THE FACE +4852-28319-0027-2097: THE WOODEN GRIN LOOSENED THE LARGE EYES TURNED THE HAND HOLDING THE HARD BOUQUET OF CARVED FLOWERS MOVED (AND LET->*) THE BOUQUET FALL +4852-28330-0000-2044: THEY WENT DOWN TO THEIR QUARTERS FIRST +4852-28330-0001-2045: GUESS MISTER FINNEY WENT TO HIS QUARTERS I DON'T REMEMBER SEEING HIM CROSS THE DECK OR COME OVER THAT WAY AT ALL +4852-28330-0002-2046: NEXT NED (CILLEY->SILLY) WAS RELIEVED (AT->TO) THE (HELM->HOME) BY (ELBERT->HILBER) JONES WHO TOOK OVER NED WENT ON DOWN +4852-28330-0003-2047: IT LOOKS TO ME AS IF IT COULD (HAVE BEEN->BE) ONE OF SEVERAL PEOPLE AND I'LL BE SWITCHED IF I KNOW WHO (I'LL KEEP->LOOK GIVE) MY EYES (OPEN->UP AND) +4852-28330-0004-2048: THE MIRABELLE WAS NEARING (TAHITI->TEDI) +4852-28330-0005-2049: (WE'VE->WE) WATER AND FRESH (STORES->STALLS) TO TAKE ON THERE +4852-28330-0006-2050: CHRIS LOST NO TIME AS SOON AS HE COULD DO IT WITHOUT BEING NOTICED (IN->AND) HURRYING DOWN TO HIS CABIN +4852-28330-0007-2051: CERTAINLY MY BOY BOOMED OUT THE CAPTAIN (HIS->AS) BLUE EYES ABRUPTLY KEEN AND PENETRATING +4852-28330-0008-2052: MISTER (FINNEY->FINNELL) WILL BE SOME TIME ON DECK WE CANNOT BE (OVERHEARD->OWN HEARD) IN HERE +4852-28330-0009-2053: HIS FACE (FROZE->ROSE) WITH NERVOUSNESS THAT THIS MIGHT (NOT->*) DO AS AN ANSWER AND HE STOOD STIFF AND STILL BEFORE CAPTAIN BLIZZARD +4852-28330-0010-2054: THE CAPTAIN SAT FORWARD IN HIS CHAIR LOOKING AT HIM FOR A LONG MOMENT CONSIDERING +4852-28330-0011-2055: THEN HE SAID WELL I DO NOT CARE FOR IT I CANNOT SAY (I->THAT) DO +4852-28330-0012-2056: (THIS->THE) SHIP IS MORE TO ME THAN (WIFE OR MOTHER->MY FOREMOTHER) OR FAMILY +4852-28330-0013-2057: (HE->AND) PAUSED FINGERING HIS LOWER LIP AND LOOKING SIDEWAYS (IN A->INTO) REFLECTIVE FASHION AT CHRIS STANDING BEFORE HIM +4852-28330-0014-2058: WE SHALL SAY NO MORE BUT I TRUST YOU UNDERSTAND THE RESPONSIBILITY YOU HAVE +4852-28330-0015-2059: (THIS->THE) SHIP ITS CARGO (AND->IN) ITS MEN WILL BE IN YOUR HANDS +4852-28330-0016-2060: YES SIR I THINK I CAN DO IT SAFELY OR I SHOULD NOT TRY SIR +4852-28330-0017-2061: CAPTAIN BLIZZARD'S ROUND PINK (FACE->FACED) CREASED IN (HIS->ITS) WINNING SMILE +4852-28330-0018-2062: HE THEN WENT ON TO DESCRIBE WHAT ELSE WAS TO FOLLOW THE COVERING OF THE SHIP WITH LEAVES TO MAKE IT BLEND WITH ITS SURROUNDINGS +4852-28330-0019-2063: (CAMOUFLAGE->THE CAMEL FLASH) WAS NOT A WORD THE CAPTAIN OR (ANYONE->ANY ONE) ELSE OF HIS TIME (YET->HE HAD) UNDERSTOOD +4852-28330-0020-2064: WHAT CAN BE SAID DURING THAT TIME SIR (CHRIS THOUGHT->CHRISTO) TO ASK +4852-28330-0021-2065: I AM SOMEWHAT SKILLED (IN->AT) MEDICAMENTS I HAVE TO BE AS (*->A) CAPTAIN OF (A->*) SHIP AND THE CREW KNOW IT +4852-28330-0022-2066: I SHALL SAY THAT YOU ARE IN MY OWN CABIN SO THAT I CAN CARE FOR YOU +4852-28330-0023-2067: NOT SINCE HE HAD LEFT MISTER WICKER (HAD->AND) CHRIS FELT SUCH CONFIDENCE AS HE DID IN THE WORDS AND ACTIONS OF CAPTAIN BLIZZARD +4852-28330-0024-2068: HE KNEW NOW THAT HIS ABSENCE FOR AS LONG AS HE HAD (*->HAD) TO BE AWAY WOULD BE COVERED UP AND SATISFACTORILY ACCOUNTED FOR +4852-28330-0025-2069: THEIR CONVERSATION HAD TAKEN SOME (LITTLE->OF THE) WHILE +533-1066-0000-796: WHEN CHURCHYARDS YAWN +533-1066-0001-797: I KNEW WELL ENOUGH THAT HE MIGHT BE CARRIED (THOUSANDS->THOUSAND) OF MILES IN THE BOX CAR LOCKED IN PERHAPS WITHOUT WATER OR (FOOD->FOLD) +533-1066-0002-798: I AM SURE I (KISSED LIDDY->GUESS LIVY) AND (I HAVE->I'VE) HAD (TERRIBLE->SEVERAL) MOMENTS SINCE WHEN I (SEEM->SEEMED) TO REMEMBER KISSING MISTER JAMIESON TOO IN THE EXCITEMENT +533-1066-0003-799: FORTUNATELY WARNER (AND->UNDER) THE (DETECTIVES->TETE) WERE KEEPING BACHELOR HALL IN THE LODGE +533-1066-0004-800: OUT OF (DEFERENCE TO LIDDY->THEIR FIRST LIVY) THEY WASHED (THEIR->HER) DISHES ONCE (A->TO) DAY AND THEY (CONCOCTED->CONCLUDED) QUEER (MESSES->MASSES) ACCORDING TO THEIR SEVERAL ABILITIES +533-1066-0005-801: MISS (INNES->EANS) HE SAID STOPPING ME AS I WAS ABOUT TO GO TO MY ROOM UP STAIRS HOW ARE YOUR NERVES (TONIGHT->TO NIGHT) +533-1066-0006-802: I HAVE NONE I SAID HAPPILY +533-1066-0007-803: I MEAN HE PERSISTED DO YOU FEEL AS THOUGH YOU COULD GO THROUGH WITH SOMETHING RATHER UNUSUAL +533-1066-0008-804: THE MOST UNUSUAL THING I CAN THINK OF WOULD BE A PEACEFUL NIGHT +533-1066-0009-805: SOMETHING IS GOING TO OCCUR HE SAID +533-1066-0010-806: PUT ON HEAVY SHOES AND SOME (OLD->ALL) DARK CLOTHES AND MAKE UP YOUR MIND NOT (TO->*) BE SURPRISED AT ANYTHING +533-1066-0011-807: (LIDDY->LINING) WAS SLEEPING (THE->*) SLEEP OF THE JUST WHEN I WENT (UP STAIRS->UPSTAIRS) AND I HUNTED OUT MY THINGS CAUTIOUSLY +533-1066-0012-808: (THEY WERE->DO YOU) TALKING (CONFIDENTIALLY->TO FILLIENTLY) TOGETHER BUT WHEN I CAME DOWN THEY CEASED +533-1066-0013-809: (THERE->THEY) WERE A FEW PREPARATIONS TO BE MADE (THE LOCKS->LOGS) TO BE GONE OVER (WINTERS TO BE INSTRUCTED->WINTERSPIN INSTRUCTIVE) AS TO RENEWED (VIGILANCE->VISIONS) AND THEN AFTER EXTINGUISHING THE (HALL->WHOLE) LIGHT WE CREPT IN THE DARKNESS THROUGH THE FRONT DOOR AND INTO THE NIGHT +533-1066-0014-810: I ASKED NO QUESTIONS +533-1066-0015-811: (ONCE->WAS) ONLY SOMEBODY SPOKE AND THEN IT WAS AN EMPHATIC (BIT->WID) OF PROFANITY FROM DOCTOR STEWART WHEN HE RAN INTO A WIRE FENCE +533-1066-0016-812: I (HARDLY->ARE TO) KNOW WHAT I EXPECTED +533-1066-0017-813: THE DOCTOR WAS PUFFING SOMEWHAT WHEN WE FINALLY CAME TO A HALT +533-1066-0018-814: I CONFESS THAT JUST AT THAT MINUTE EVEN SUNNYSIDE SEEMED A CHEERFUL SPOT +533-1066-0019-815: IN SPITE OF MYSELF I DREW MY BREATH IN SHARPLY +533-1066-0020-816: IT WAS ALEX ARMED WITH TWO LONG HANDLED SPADES +533-1066-0021-817: (THE->*) DOCTOR KEPT A (KEEN LOOKOUT->KIN LOOK OUT) BUT NO ONE APPEARED +533-1066-0022-818: THERE'S ONE THING SURE I'LL NOT BE SUSPECTED OF COMPLICITY +533-1066-0023-819: (A->*) DOCTOR IS GENERALLY SUPPOSED TO BE (*->A) HANDIER AT (BURYING->BEARING) FOLKS THAN (AT DIGGING->A TIGING) THEM UP +533-1066-0024-820: I HELD ON TO HIM FRANTICALLY AND SOMEHOW I GOT THERE AND LOOKED DOWN +533-131556-0000-821: BUT HOW AM I TO (GET->HER) OVER THE TEN OR TWELVE DAYS THAT MUST YET ELAPSE BEFORE THEY GO +533-131556-0001-822: FOR NONE COULD (INJURE->ENDURE) ME AS HE HAS DONE OH +533-131556-0002-823: THE (WORD STARES->WORDS TEARS) ME IN THE FACE LIKE A GUILTY CONFESSION BUT IT IS TRUE I HATE HIM I HATE HIM +533-131556-0003-824: I SOMETIMES THINK I OUGHT TO GIVE HIM CREDIT FOR THE GOOD FEELING (HE SIMULATES->SIMILATE) SO WELL AND THEN AGAIN I THINK IT IS MY DUTY TO SUSPECT HIM UNDER THE PECULIAR CIRCUMSTANCES IN WHICH I AM PLACED +533-131556-0004-825: I HAVE DONE WELL TO RECORD (THEM SO MINUTELY->HIM SOMINUTELY) +533-131556-0005-826: THEY (*->HAVE) HAD (BETAKEN->TAKEN) THEMSELVES TO THEIR WORK I LESS (TO->*) DIVERT MY MIND THAN TO DEPRECATE CONVERSATION (HAD PROVIDED MYSELF WITH->AT REVOLT) A BOOK +533-131556-0006-827: I AM TOO WELL ACQUAINTED WITH (YOUR->THEIR) CHARACTER AND CONDUCT TO FEEL ANY REAL FRIENDSHIP FOR YOU AND AS I AM WITHOUT YOUR TALENT FOR (DISSIMULATION->THE SIMULATION) I CANNOT ASSUME THE APPEARANCE OF IT +533-131556-0007-828: (UPON->UP AND) PERUSING THIS SHE TURNED SCARLET AND BIT HER LIP +533-131556-0008-829: YOU MAY GO (MILICENT->MILLICINE) AND SHE'LL (FOLLOW IN->FOLLOWING) A WHILE (MILICENT->MELLICENT) WENT +533-131556-0009-830: (WILL YOU OBLIGE->OLY OBLIGED) ME (HELEN->ALLAN) CONTINUED SHE +533-131556-0010-831: (AH->HA) YOU ARE SUSPICIOUS +533-131556-0011-832: IF I WERE SUSPICIOUS I REPLIED I SHOULD HAVE DISCOVERED YOUR (INFAMY LONG->INFAMYLON) BEFORE +533-131556-0012-833: I ENJOY A (MOONLIGHT->MONTH) RAMBLE AS WELL AS YOU I ANSWERED STEADILY FIXING MY EYES (UPON HER->UP) AND (THE SHRUBBERY->EARTH AND SHRABBERY) HAPPENS TO BE ONE OF MY (FAVOURITE->FAVORITE) RESORTS +533-131556-0013-834: SHE COLOURED (AGAIN->BEGAN) EXCESSIVELY AND REMAINED SILENT (PRESSING->RAISING) HER FINGER AGAINST HER (TEETH->CHEEKS) AND GAZING INTO THE FIRE +533-131556-0014-835: I (WATCHED HER->WATCH FOR) A FEW MOMENTS (WITH A->TO THE) FEELING OF MALEVOLENT GRATIFICATION THEN MOVING TOWARDS THE DOOR I CALMLY ASKED IF SHE HAD ANYTHING MORE TO SAY +533-131556-0015-836: YES YES +533-131556-0016-837: SUPPOSE I DO +533-131556-0017-838: SHE PAUSED IN EVIDENT DISCONCERTION AND PERPLEXITY MINGLED WITH ANGER SHE DARED NOT SHOW +533-131556-0018-839: I CANNOT RENOUNCE WHAT IS DEARER THAN LIFE SHE MUTTERED IN A LOW HURRIED TONE +533-131556-0019-840: IF YOU ARE (GENEROUS->GENERALS) HERE IS A (FITTING->FEELING) OPPORTUNITY FOR THE EXERCISE OF YOUR MAGNANIMITY IF YOU ARE PROUD (HERE->HEAR) AM I YOUR RIVAL (READY TO ACKNOWLEDGE->RETICOSE) MYSELF YOUR (DEBTOR->DAUGHTER) FOR (AN->*) ACT OF (THE->*) MOST NOBLE FORBEARANCE +533-131556-0020-841: I SHALL NOT TELL HIM +533-131556-0021-842: GIVE ME NO THANKS IT IS NOT FOR YOUR SAKE THAT I REFRAIN +533-131556-0022-843: AND (MILICENT->MILLISON) WILL (YOU->IT) TELL HER +533-131556-0023-844: I (WOULD->WILL) NOT FOR MUCH THAT (SHE->YOU) SHOULD (KNOW THE INFAMY AND->NOT EVEN IN) DISGRACE OF HER RELATION +533-131556-0024-845: YOU USE (HARD->OUR) WORDS MISSUS HUNTINGDON BUT I CAN PARDON YOU +533-131556-0025-846: HOW DARE YOU MENTION HIS NAME TO ME +533-131562-0000-847: IT SEEMS VERY INTERESTING LOVE SAID HE LIFTING HIS HEAD AND (TURNING->SHIRTING) TO (WHERE I STOOD->HER EYES TOO) WRINGING MY (HANDS->HAND) IN SILENT (RAGE->RATES) AND ANGUISH BUT IT'S RATHER LONG (I'LL->I) LOOK AT IT SOME OTHER TIME AND MEANWHILE I'LL TROUBLE YOU FOR YOUR (KEYS->CASE) MY DEAR WHAT (KEYS->CASE) +533-131562-0001-848: (THE KEYS->IT A KISS) OF YOUR CABINET DESK (DRAWERS->DRAWER) AND WHATEVER ELSE YOU POSSESS SAID HE RISING AND HOLDING OUT HIS HAND +533-131562-0002-849: THE KEY OF MY (DESK->VES) IN FACT WAS AT THAT MOMENT IN (THE LOCK->LOVE) AND THE OTHERS WERE ATTACHED TO IT +533-131562-0003-850: NOW THEN SNEERED HE WE MUST HAVE A CONFISCATION OF PROPERTY +533-131562-0004-851: AND (PUTTING->PUT IN) THE KEYS INTO HIS POCKET HE WALKED INTO THE LIBRARY +533-131562-0005-852: THAT AND ALL REPLIED THE (MASTER->MERCER) AND THE THINGS WERE CLEARED AWAY +533-131562-0006-853: MISTER HUNTINGDON THEN WENT (UP STAIRS->UPSTAIRS) +533-131562-0007-854: MUTTERED HE STARTING BACK SHE'S (THE->*) VERY DEVIL FOR SPITE +533-131562-0008-855: I (DIDN'T->THEN) SAY (I'D->I'VE) BROKEN IT DID I RETURNED HE +533-131562-0009-856: I SHALL PUT YOU (UPON->UP IN) A SMALL (MONTHLY ALLOWANCE->MOUTHLY ALLOW US) IN FUTURE FOR YOUR OWN PRIVATE EXPENSES AND YOU NEEDN'T TROUBLE YOURSELF ANY MORE ABOUT MY CONCERNS I SHALL LOOK OUT FOR A STEWARD MY DEAR I WON'T EXPOSE YOU TO (THE->*) TEMPTATION +533-131562-0010-857: AND AS FOR THE (HOUSEHOLD->HOUSE OF) MATTERS MISSUS (GREAVES->GREEBS) MUST BE VERY PARTICULAR IN KEEPING HER ACCOUNTS WE MUST GO (UPON->UP IN) AN (ENTIRELY->ENCHANTING) NEW PLAN +533-131562-0011-858: WHAT GREAT DISCOVERY HAVE YOU MADE NOW MISTER (HUNTINGDON->HONEYMAN) +533-131562-0012-859: (HAVE I ATTEMPTED->EVER ATTENDED) TO DEFRAUD YOU +533-131562-0013-860: NOT IN MONEY MATTERS EXACTLY IT SEEMS BUT IT'S BEST TO KEEP OUT OF THE WAY OF TEMPTATION +533-131562-0014-861: HERE (BENSON->BESIN) ENTERED (WITH->*) THE CANDLES AND THERE (FOLLOWED A->FELL THE) BRIEF INTERVAL OF SILENCE I SITTING (STILL IN->STEALING) MY CHAIR AND HE STANDING WITH HIS BACK TO THE FIRE SILENTLY TRIUMPHING IN MY DESPAIR +533-131562-0015-862: I KNOW THAT DAY AFTER DAY SUCH FEELINGS (WILL->TO) RETURN (UPON->UP ON) ME +533-131562-0016-863: I (TRY->TRIED) TO LOOK TO HIM AND RAISE MY HEART TO HEAVEN BUT IT WILL (CLEAVE->CLIFF) TO THE DUST +533-131564-0000-768: VAIN HOPE I FEAR +533-131564-0001-769: MISTER AND MISSUS (HATTERSLEY->HAUGHTERSLEY) HAVE BEEN (STAYING AT THE GROVE A FORTNIGHT->SEEING IT TO GROW BEFORE NIGHT) AND AS (MISTER->MISSUS) HARGRAVE IS STILL ABSENT AND THE WEATHER WAS REMARKABLY FINE (I NEVER PASSED->AND REPAST) A DAY WITHOUT SEEING MY TWO FRIENDS (MILICENT->MILLSON) AND ESTHER EITHER THERE OR HERE +533-131564-0002-770: NO UNLESS YOU CAN TELL ME WHEN TO EXPECT HIM HOME +533-131564-0003-771: I CAN'T (YOU DON'T WANT->EVEN ONE WANTS) HIM DO YOU +533-131564-0004-772: IT IS A RESOLUTION YOU (OUGHT TO HAVE FORMED->ARE REFORMED) LONG AGO +533-131564-0005-773: WE ALL HAVE A BIT OF A LIKING FOR HIM AT THE BOTTOM OF OUR (HEARTS->HEART) THOUGH (WE->IT) CAN'T RESPECT HIM +533-131564-0006-774: NO I'D RATHER BE LIKE MYSELF (BAD AS->WHETHER) I AM +533-131564-0007-775: NEVER MIND MY PLAIN SPEAKING SAID I IT IS FROM THE BEST OF MOTIVES +533-131564-0008-776: BUT TELL ME SHOULD YOU WISH YOUR SONS TO BE LIKE MISTER HUNTINGDON OR EVEN LIKE YOURSELF +533-131564-0009-777: OH NO I COULDN'T STAND THAT +533-131564-0010-778: (FIRE->FORE) AND FURY +533-131564-0011-779: NOW DON'T (BURST->FORCE) INTO A TEMPEST AT THAT +533-131564-0012-780: BUT HANG IT THAT'S NOT MY FAULT +533-131564-0013-781: NOT (YEARS->EARS) FOR SHE'S ONLY FIVE AND TWENTY +533-131564-0014-782: WHAT (WOULD->DID) YOU MAKE OF ME AND THE CHILDREN TO BE SURE THAT (WORRY HER TO->WERE HE HURT) DEATH BETWEEN THEM +533-131564-0015-783: I KNOW THEY ARE BLESS THEM +533-131564-0016-784: (HE FOLLOWED->IF ALL OF) ME INTO THE LIBRARY +533-131564-0017-785: I (SOUGHT->SET) OUT AND PUT INTO HIS HANDS TWO OF (MILICENT'S->MILLSON'S) LETTERS ONE (DATED->DID IT) FROM LONDON AND WRITTEN DURING ONE OF HIS (WILDEST->WALLACE) SEASONS OF RECKLESS DISSIPATION THE OTHER IN THE COUNTRY DURING A LUCID INTERVAL +533-131564-0018-786: THE FORMER WAS FULL OF TROUBLE AND ANGUISH NOT ACCUSING HIM BUT DEEPLY REGRETTING HIS CONNECTION WITH HIS PROFLIGATE COMPANIONS ABUSING MISTER GRIMSBY AND OTHERS INSINUATING BITTER THINGS AGAINST MISTER (HUNTINGDON->HUNTON) AND MOST (INGENIOUSLY->INGENUOUSLY) THROWING THE BLAME OF HER HUSBAND'S MISCONDUCT ON (TO->THE) OTHER (MEN'S->MAN'S) SHOULDERS +533-131564-0019-787: I'VE BEEN (A CURSED->ACCURSED) RASCAL GOD KNOWS SAID HE AS HE GAVE IT (A HEARTY->AN EARTHLY) SQUEEZE BUT YOU SEE IF I DON'T MAKE AMENDS FOR IT (D N->THEN) ME IF I DON'T +533-131564-0020-788: IF YOU INTEND TO REFORM INVOKE GOD'S BLESSING (HIS->IS) MERCY (AND HIS AID->IN THIS APE) NOT (HIS CURSE->DISCOURSE) +533-131564-0021-789: GOD HELP ME THEN FOR (I'M->I AM) SURE I NEED IT +533-131564-0022-790: WHERE'S (MILICENT->MILLICENT) +533-131564-0023-791: NAY NOT I SAID HE TURNING (HER->*) ROUND AND PUSHING (HER->*) TOWARDS ME +533-131564-0024-792: (MILICENT FLEW->MILLISON FLUD) TO THANK ME (OVERFLOWING WITH->OVERWHELMING ITS) GRATITUDE +533-131564-0025-793: CRIED SHE I COULDN'T HAVE (INFLUENCED->EVILISED) HIM I'M SURE BY ANYTHING THAT I COULD HAVE SAID +533-131564-0026-794: YOU NEVER TRIED ME (MILLY->MERELY) SAID HE +533-131564-0027-795: AFTER THAT THEY WILL REPAIR TO THEIR COUNTRY HOME +5442-32873-0000-1365: CAPTAIN LAKE DID NOT LOOK AT (ALL->ON) LIKE A LONDON DANDY NOW +5442-32873-0001-1366: THERE WAS A VERY NATURAL SAVAGERY AND DEJECTION (THERE->THEN) AND A (WILD LEER IN HIS->WIND URINA'S) YELLOW EYES RACHEL SAT DOWN +5442-32873-0002-1367: A SLAVE ONLY THINK A SLAVE +5442-32873-0003-1368: OH FRIGHTFUL FRIGHTFUL IS IT A DREAM +5442-32873-0004-1369: (OH->ALL) FRIGHTFUL (FRIGHTFUL->CRIED FAWN) +5442-32873-0005-1370: STANLEY STANLEY IT WOULD BE MERCY TO KILL ME SHE BROKE (OUT->HER) AGAIN +5442-32873-0006-1371: BRIGHT AND NATTY (WERE THE CHINTZ->WITH A CHIN) CURTAINS AND THE LITTLE TOILET SET OUT NOT (INELEGANTLY->IN ELEGANTLY) AND HER (PET->BED) PIPING GOLDFINCH ASLEEP ON HIS PERCH WITH HIS BIT OF SUGAR BETWEEN THE (WIRES->WISE) OF HIS CAGE HER PILLOW SO WHITE AND UNPRESSED WITH ITS LITTLE EDGING OF LACE +5442-32873-0007-1372: WHEN HE CAME BACK TO THE DRAWING ROOM (A TOILET BOTTLE OF EAU DE COLOGNE->I TOLD IT WHAT HE OF OVERLUME) IN HIS HAND WITH HER LACE HANDKERCHIEF HE BATHED HER (TEMPLES->TEMPLE) AND FOREHEAD +5442-32873-0008-1373: THERE WAS NOTHING VERY BROTHERLY IN HIS LOOK AS HE PEERED INTO (HER->A) PALE SHARP FEATURES DURING THE PROCESS +5442-32873-0009-1374: THERE DON'T MIND ME SHE SAID SHARPLY AND GETTING UP SHE LOOKED DOWN AT HER DRESS AND THIN SHOES AND SEEMING TO RECOLLECT HERSELF SHE TOOK THE CANDLE HE HAD JUST (SET->SAT) DOWN AND (WENT->WHEN) SWIFTLY TO HER ROOM +5442-32873-0010-1375: AND SHE THREW BACK HER (VEIL->VEAL) AND GOING HURRIEDLY TO THE (TOILET->DOLIGHT) MECHANICALLY SURVEYED HERSELF (IN->FROM) THE (GLASS->GLANCE) +5442-32873-0011-1376: (RACHEL LAKE RACHEL->ORIGINALLY LATER) LAKE WHAT ARE YOU NOW +5442-32873-0012-1377: I'LL STAY HERE THAT IS IN THE DRAWING ROOM SHE ANSWERED AND THE FACE WAS WITHDRAWN +5442-32873-0013-1378: (HE SLACKENED->HIS CLACKENED) HIS (PACE->FACE) AND (TAPPED->TAP) SHARPLY AT THE LITTLE WINDOW OF (THAT->THE) MODEST POST OFFICE AT WHICH THE YOUNG LADIES IN THE PONY CARRIAGE HAD PULLED UP THE DAY BEFORE AND WITHIN WHICH LUKE (WAGGOT->WAGGET) WAS WONT TO SLEEP IN A SORT OF WOODEN BOX THAT FOLDED UP AND APPEARED TO BE A CHEST OF DRAWERS ALL DAY +5442-32873-0014-1379: (LUKE TOOK->LOOK TO) CARE OF MISTER (LARKIN'S DOGS->LARKINS DOG) AND GROOMED MISTER (WYLDER'S->WILDER'S) HORSE AND CLEANED UP HIS (DOG->DOOR) CART FOR MARK BEING CLOSE ABOUT MONEY AND FINDING THAT THE THING WAS TO BE DONE MORE CHEAPLY THAT WAY PUT UP HIS HORSE AND DOG CART IN THE POST (OFFICE->OF HIS) PREMISES AND SO EVADED THE LIVERY CHARGES OF THE BRANDON ARMS +5442-32873-0015-1380: (BUT->THE) LUKE WAS (NOT->KNOWN) THERE AND CAPTAIN LAKE RECOLLECTING HIS HABITS AND HIS HAUNT HURRIED ON TO THE SILVER LION WHICH HAS ITS (GABLE->CABLE) TOWARDS (THE->A) COMMON ONLY ABOUT A HUNDRED STEPS AWAY FOR DISTANCES ARE NOT GREAT IN GYLINGDEN +5442-32873-0016-1381: HERE WERE THE FLOW OF SOUL AND OF STOUT LONG PIPES LONG YARNS AND TOLERABLY LONG CREDITS AND THE HUMBLE (SCAPEGRACES->SKIPPED BRACES) OF THE TOWN RESORTED THITHER FOR THE PLEASURES OF A CLUB LIFE AND OFTEN REVELLED DEEP INTO THE SMALL HOURS OF THE MORNING +5442-32873-0017-1382: LOSE NO TIME (AND->WHEN) I'LL GIVE YOU HALF A CROWN +5442-32873-0018-1383: LUKE STUCK ON HIS GREASY (WIDEAWAKE->WIDE AWAKE) AND IN A FEW MINUTES MORE THE (DOG->DOOR) CART WAS (TRUNDLED->TUMBLED) OUT INTO THE LANE AND THE HORSE HARNESSED WENT BETWEEN THE SHAFTS WITH THAT WONDERFUL CHEERFULNESS WITH WHICH THEY (BEAR->BEARED) TO BE CALLED UP (UNDER->AND THE) STARTLING CIRCUMSTANCES (AT->THAT) UNSEASONABLE HOURS +5442-32873-0019-1384: IF I THOUGHT YOU'D (FAIL->FILL) ME NOW (TAMAR->TO MORROW) I SHOULD NEVER COME BACK GOOD NIGHT (TAMAR->TO MORROW) +5442-41168-0000-1385: THE ACT SAID THAT IN CASE OF DIFFERENCE OF OPINION THERE MUST BE A BALLOT +5442-41168-0001-1386: HE WENT UP TO THE TABLE AND STRIKING IT WITH HIS (FINGER RING->FINGERING) HE SHOUTED LOUDLY A BALLOT +5442-41168-0002-1387: HE WAS SHOUTING FOR THE VERY (COURSE SERGEY->COARSE SURGY) IVANOVITCH HAD PROPOSED BUT IT WAS EVIDENT THAT HE HATED HIM AND ALL HIS PARTY AND THIS FEELING OF HATRED SPREAD THROUGH THE WHOLE PARTY AND (ROUSED->RALPHED) IN (OPPOSITION->OUR POSITION) TO IT THE SAME VINDICTIVENESS THOUGH IN A MORE SEEMLY FORM ON THE OTHER SIDE +5442-41168-0003-1388: SHOUTS WERE RAISED AND FOR A MOMENT ALL WAS CONFUSION SO THAT THE MARSHAL OF THE PROVINCE HAD TO CALL FOR (ORDER->OTTO) A BALLOT +5442-41168-0004-1389: WE (SHED->SHUT) OUR BLOOD FOR OUR COUNTRY +5442-41168-0005-1390: THE CONFIDENCE OF THE MONARCH NO (CHECKING->COOKING) THE ACCOUNTS OF THE (MARSHAL HE'S->MARTIAN IS) NOT A CASHIER BUT THAT'S NOT THE POINT +5442-41168-0006-1391: VOTES PLEASE (BEASTLY->PIECE) +5442-41168-0007-1392: THEY EXPRESSED THE MOST IMPLACABLE HATRED +5442-41168-0008-1393: LEVIN DID NOT IN THE LEAST UNDERSTAND WHAT WAS THE MATTER AND HE (MARVELED->MARVELLED) AT THE PASSION WITH WHICH IT WAS DISPUTED WHETHER OR NOT THE DECISION ABOUT (FLEROV->FLAREOV) SHOULD BE PUT TO THE VOTE +5442-41168-0009-1394: HE FORGOT AS (SERGEY IVANOVITCH->SO GEVINOVITCH) EXPLAINED TO HIM AFTERWARDS THIS (SYLLOGISM->SILLIGIOUS EM) THAT IT WAS NECESSARY FOR THE PUBLIC GOOD TO GET RID OF THE MARSHAL OF THE PROVINCE THAT TO GET (RID OF->HER TO) THE MARSHAL IT WAS NECESSARY TO HAVE A MAJORITY OF VOTES THAT (TO GET A->TOGETHER) MAJORITY OF VOTES IT WAS NECESSARY TO SECURE (FLEROV'S->FLIROV'S) RIGHT TO VOTE THAT TO (SECURE->SECURED) THE RECOGNITION OF (FLEROV'S->FLIROV'S) RIGHT TO VOTE THEY MUST DECIDE ON THE INTERPRETATION TO BE PUT ON THE ACT +5442-41168-0010-1395: BUT LEVIN FORGOT ALL THAT AND IT WAS PAINFUL TO HIM TO SEE ALL THESE EXCELLENT PERSONS FOR WHOM HE HAD A RESPECT IN SUCH AN UNPLEASANT AND VICIOUS STATE OF EXCITEMENT +5442-41168-0011-1396: TO (ESCAPE->US GIVE) FROM THIS PAINFUL FEELING HE WENT AWAY INTO THE OTHER ROOM WHERE THERE WAS NOBODY EXCEPT THE WAITERS AT THE (REFRESHMENT->FRESHMENT) BAR +5442-41168-0012-1397: HE PARTICULARLY LIKED THE WAY ONE (GRAY WHISKERED->GREY WAS GOOD) WAITER WHO SHOWED (HIS SCORN->US GONE) FOR THE OTHER YOUNGER ONES AND WAS (JEERED->JOURED) AT BY THEM WAS TEACHING THEM HOW TO FOLD UP NAPKINS PROPERLY +5442-41168-0013-1398: LEVIN ADVANCED BUT UTTERLY FORGETTING WHAT HE WAS TO DO AND MUCH EMBARRASSED HE TURNED TO SERGEY IVANOVITCH WITH THE QUESTION WHERE AM I TO PUT IT +5442-41168-0014-1399: (SERGEY->SO YOU) IVANOVITCH (FROWNED->GROUND) +5442-41168-0015-1400: THAT IS A MATTER FOR EACH MAN'S OWN DECISION HE SAID SEVERELY +5442-41168-0016-1401: HAVING PUT IT IN HE RECOLLECTED THAT HE OUGHT (TO->*) HAVE THRUST HIS LEFT HAND TOO AND SO HE THRUST IT (IN->*) THOUGH TOO LATE AND STILL MORE OVERCOME WITH CONFUSION HE BEAT A HASTY RETREAT INTO THE BACKGROUND +5442-41168-0017-1402: A HUNDRED AND TWENTY SIX FOR ADMISSION NINETY EIGHT AGAINST +5442-41168-0018-1403: SANG (OUT->ALL) THE VOICE OF THE SECRETARY WHO COULD NOT PRONOUNCE (THE->A) LETTER R +5442-41168-0019-1404: THEN THERE WAS A LAUGH (A BUTTON->AT BOTTOM) AND TWO (NUTS->KNOTS) WERE FOUND IN THE BOX +5442-41168-0020-1405: BUT THE OLD PARTY DID NOT CONSIDER THEMSELVES CONQUERED +5442-41168-0021-1406: IN (REPLY SNETKOV->THE PLACE NED GOFF) SPOKE OF THE TRUST (THE->AND) NOBLEMEN OF THE PROVINCE HAD PLACED IN HIM THE (AFFECTION->AFFECTANT) THEY HAD SHOWN HIM WHICH HE DID NOT DESERVE AS HIS ONLY MERIT HAD BEEN HIS ATTACHMENT TO THE NOBILITY TO WHOM HE HAD DEVOTED TWELVE YEARS OF SERVICE +5442-41168-0022-1407: THIS EXPRESSION IN THE MARSHAL'S FACE WAS PARTICULARLY TOUCHING TO LEVIN BECAUSE ONLY THE DAY BEFORE HE HAD BEEN AT HIS HOUSE ABOUT HIS (TRUSTEE->TRUSTY) BUSINESS AND HAD SEEN HIM IN ALL HIS GRANDEUR A KIND HEARTED FATHERLY MAN +5442-41168-0023-1408: IF THERE ARE MEN YOUNGER AND MORE DESERVING THAN I LET THEM SERVE +5442-41168-0024-1409: AND THE MARSHAL DISAPPEARED THROUGH A SIDE DOOR +5442-41168-0025-1410: (THEY->THERE) WERE TO PROCEED IMMEDIATELY TO THE ELECTION +5442-41168-0026-1411: (TWO->DO) NOBLE GENTLEMEN WHO HAD A WEAKNESS FOR STRONG DRINK HAD BEEN MADE DRUNK BY THE PARTISANS OF (SNETKOV->SNACKOV) AND (A->THE) THIRD HAD BEEN ROBBED OF HIS UNIFORM +5442-41168-0027-1412: ON LEARNING THIS THE NEW PARTY HAD MADE HASTE DURING THE (DISPUTE ABOUT FLEROV->DISPUTABLE FLIROFF) TO SEND SOME OF THEIR MEN IN A SLEDGE TO CLOTHE THE STRIPPED (GENTLEMAN->GENTLEMEN) AND TO BRING ALONG ONE OF THE INTOXICATED TO THE MEETING +5442-41169-0000-1413: LEVIN DID NOT CARE TO EAT AND HE WAS NOT SMOKING HE DID NOT WANT TO JOIN HIS OWN FRIENDS THAT IS (SERGEY->SOJI) IVANOVITCH STEPAN ARKADYEVITCH SVIAZHSKY AND THE REST BECAUSE VRONSKY IN (HIS EQUERRY'S->AN EQUEROR'S) UNIFORM WAS STANDING WITH THEM IN EAGER CONVERSATION +5442-41169-0001-1414: HE WENT TO THE WINDOW AND SAT DOWN SCANNING THE GROUPS AND LISTENING TO WHAT WAS BEING SAID AROUND HIM +5442-41169-0002-1415: (HE'S->IS) SUCH A (BLACKGUARD->BLANKARD) +5442-41169-0003-1416: I HAVE TOLD HIM SO BUT IT MAKES NO DIFFERENCE ONLY THINK OF IT +5442-41169-0004-1417: THESE PERSONS WERE UNMISTAKABLY SEEKING A PLACE WHERE THEY COULD TALK WITHOUT BEING OVERHEARD +5442-41169-0005-1418: SHALL WE GO ON YOUR EXCELLENCY FINE CHAMPAGNE +5442-41169-0006-1419: (LAST YEAR AT OUR->LOST YOUR OTHER) DISTRICT (MARSHAL NIKOLAY->MARTIAL NIKOLA) IVANOVITCH'S +5442-41169-0007-1420: OH STILL JUST THE SAME ALWAYS AT A LOSS THE LANDOWNER ANSWERED WITH A RESIGNED SMILE BUT WITH AN EXPRESSION OF SERENITY AND CONVICTION THAT SO IT MUST BE +5442-41169-0008-1421: WHY WHAT IS (THERE->THAT) TO UNDERSTAND +5442-41169-0009-1422: (THERE'S->THERE IS) NO MEANING IN IT AT ALL +5442-41169-0010-1423: THEN TOO ONE MUST KEEP UP CONNECTIONS +5442-41169-0011-1424: IT'S A (MORAL->MORTAL) OBLIGATION OF A SORT +5442-41169-0012-1425: AND THEN TO TELL THE TRUTH THERE'S ONE'S OWN (INTERESTS->INTEREST) +5442-41169-0013-1426: (THEY'RE->THEIR) PROPRIETORS OF A SORT BUT (WE'RE->WE ARE) THE LANDOWNERS +5442-41169-0014-1427: THAT IT MAY BE BUT STILL IT OUGHT TO BE TREATED A LITTLE MORE RESPECTFULLY +5442-41169-0015-1428: IF (WE'RE->WE ARE) LAYING OUT A GARDEN PLANNING ONE BEFORE THE HOUSE YOU KNOW AND THERE (YOU'VE->YOU HAVE) A TREE (THAT'S->THAT) STOOD (FOR->IN) CENTURIES IN THE VERY SPOT OLD AND (GNARLED->KNOLLED) IT MAY BE AND YET YOU DON'T CUT DOWN THE OLD FELLOW TO MAKE ROOM FOR THE (FLOWERBEDS->FLOWER BEDS) BUT LAY OUT YOUR BEDS SO AS TO TAKE ADVANTAGE OF THE TREE +5442-41169-0016-1429: WELL AND HOW IS YOUR LAND DOING +5442-41169-0017-1430: BUT ONE'S WORK IS THROWN IN FOR NOTHING +5442-41169-0018-1431: OH WELL ONE DOES IT WHAT WOULD YOU HAVE +5442-41169-0019-1432: AND (WHAT'S->ONCE) MORE THE LANDOWNER WENT ON LEANING HIS ELBOWS ON THE WINDOW AND CHATTING ON MY SON I MUST TELL YOU HAS NO TASTE FOR IT +5442-41169-0020-1433: SO (THERE'LL->THERE WILL) BE NO ONE TO KEEP IT UP AND YET ONE DOES IT +5442-41169-0021-1434: WE WALKED ABOUT THE FIELDS (AND->ON) THE GARDEN NO SAID HE (STEPAN VASSILIEVITCH->STEP ON MISS LEVITCH) EVERYTHING'S WELL LOOKED AFTER BUT YOUR (GARDEN'S->GARDENS) NEGLECTED +5442-41169-0022-1435: TO MY THINKING (I'D->I'VE) CUT DOWN (THAT LIME TREE->THE LIMETERY) +5442-41169-0023-1436: HERE (YOU'VE->YOUR) THOUSANDS OF LIMES AND EACH WOULD MAKE (TWO->TOO) GOOD BUNDLES OF (BARK->BULK) +5442-41169-0024-1437: YOU'RE MARRIED (I'VE->I) HEARD SAID THE LANDOWNER +5442-41169-0025-1438: YES IT'S (RATHER->ALL THE) STRANGE HE WENT ON +5442-41169-0026-1439: THE (LANDOWNER CHUCKLED->LANDLORD CHLED) UNDER HIS WHITE (MUSTACHES->MOUSTACHES) +5442-41169-0027-1440: WHY DON'T WE (CUT->GOT) DOWN OUR (PARKS->BOX) FOR (TIMBER->TIMBOO) +5442-41169-0028-1441: SAID LEVIN RETURNING TO A THOUGHT THAT HAD STRUCK HIM +5442-41169-0029-1442: THERE'S A CLASS INSTINCT TOO OF WHAT ONE OUGHT AND (OUGHTN'T->OUGHT NOT) TO DO +5442-41169-0030-1443: THERE'S THE PEASANTS TOO I WONDER AT THEM SOMETIMES ANY GOOD PEASANT TRIES TO TAKE ALL THE LAND HE CAN +5442-41169-0031-1444: WITHOUT A RETURN (TOO AT->TO ADD) A SIMPLE (LOSS->LAWS) +5484-24317-0000-571: WHEN HE CAME FROM THE BATH (PROCLUS->PROCKLESS) VISITED HIM AGAIN +5484-24317-0001-572: BUT (HERMON->HERMAN) WAS NOT IN THE MOOD TO SHARE A JOYOUS REVEL AND HE FRANKLY SAID SO ALTHOUGH IMMEDIATELY AFTER HIS RETURN HE HAD ACCEPTED THE INVITATION TO THE FESTIVAL WHICH THE WHOLE FELLOWSHIP OF ARTISTS WOULD GIVE THE FOLLOWING DAY (IN HONOUR->AND HONOR) OF THE (SEVENTIETH->SEVENTEENTH) BIRTHDAY OF THE OLD SCULPTOR (EUPHRANOR->EUPHRANER) +5484-24317-0002-573: SHE WOULD APPEAR HERSELF (AT->A) DESSERT AND THE BANQUET MUST THEREFORE BEGIN AT AN UNUSUALLY EARLY HOUR +5484-24317-0003-574: SO THE ARTIST FOUND HIMSELF OBLIGED TO RELINQUISH HIS OPPOSITION +5484-24317-0004-575: THE BANQUET WAS TO BEGIN IN A FEW HOURS YET HE COULD NOT LET THE DAY PASS WITHOUT SEEING DAPHNE AND TELLING HER THE WORDS OF THE ORACLE +5484-24317-0005-576: HE LONGED WITH ARDENT YEARNING FOR THE SOUND OF HER VOICE AND STILL MORE TO UNBURDEN HIS SORELY TROUBLED SOUL TO HER +5484-24317-0006-577: SINCE HIS RETURN FROM THE ORACLE THE FEAR THAT THE (RESCUED->RESCUE) DEMETER MIGHT YET BE THE WORK OF (MYRTILUS->MERTULIST) HAD AGAIN MASTERED HIM +5484-24317-0007-578: THE APPROVAL AS WELL AS (THE DOUBTS->A DOUBT) WHICH (IT AROUSED->HAD ARISED) IN OTHERS STRENGTHENED HIS OPINION ALTHOUGH EVEN NOW HE COULD NOT SUCCEED IN BRINGING IT INTO HARMONY WITH THE FACTS +5484-24317-0008-579: THEN HE WENT DIRECTLY TO THE (NEIGHBOURING->NEIGHBORING) PALACE THE QUEEN MIGHT HAVE APPEARED ALREADY AND IT WOULD NOT DO TO KEEP HER WAITING +5484-24317-0009-580: HITHERTO THE MERCHANT HAD BEEN INDUCED IT IS TRUE TO ADVANCE LARGE SUMS OF MONEY TO THE QUEEN BUT THE LOYAL DEVOTION WHICH HE SHOWED TO HER ROYAL HUSBAND HAD RENDERED (IT->AN) IMPOSSIBLE TO GIVE HIM EVEN A HINT OF THE CONSPIRACY +5484-24317-0010-581: WHEN (HERMON->HERMANN) ENTERED THE RESIDENCE OF THE (GRAMMATEUS->GRAMMATIUS) IN THE PALACE (THE->THEY) GUESTS HAD ALREADY ASSEMBLED +5484-24317-0011-582: (THE PLACE->THEY PLACED) BY (HERMON'S->HARMONT'S) SIDE WHICH (ALTHEA->ALTHIE) HAD CHOSEN FOR HERSELF WOULD THEN BE GIVEN UP TO (ARSINOE->ARSENO) +5484-24317-0012-583: TRUE AN INTERESTING CONVERSATION STILL HAD POWER TO CHARM HIM BUT OFTEN DURING ITS CONTINUANCE THE FULL CONSCIOUSNESS OF HIS MISFORTUNE FORCED ITSELF UPON HIS MIND FOR THE MAJORITY OF THE SUBJECTS DISCUSSED BY THE ARTISTS CAME TO THEM THROUGH THE MEDIUM OF SIGHT AND REFERRED TO NEW CREATIONS OF ARCHITECTURE SCULPTURE AND PAINTING FROM WHOSE ENJOYMENT HIS BLINDNESS DEBARRED HIM +5484-24317-0013-584: A STRANGER OUT OF HIS OWN SPHERE HE (FELT->FELL) CHILLED AMONG THESE CLOSELY UNITED MEN AND WOMEN TO WHOM NO TIE BOUND HIM SAVE THE PRESENCE OF THE SAME HOST +5484-24317-0014-585: (CRATES->CREATES) HAD REALLY BEEN INVITED IN ORDER TO WIN HIM OVER TO THE QUEEN'S CAUSE BUT CHARMING FAIR HAIRED (NICO->NACO) HAD BEEN COMMISSIONED BY THE CONSPIRATORS TO PERSUADE HIM TO SING (ARSINOE'S->ARSENO'S) PRAISES AMONG HIS PROFESSIONAL ASSOCIATES +5484-24317-0015-586: HIS SON HAD BEEN (THIS->THE) ROYAL (DAME'S->JAMES) FIRST HUSBAND AND SHE HAD DESERTED HIM TO MARRY (LYSIMACHUS->LISAKETH) THE AGED KING OF THRACE +5484-24317-0016-587: THE KING'S SISTER THE OBJECT OF HIS LOVE CRIED (HERMON->HARMON) INCREDULOUSLY +5484-24317-0017-588: WE WOMEN ARE (ONLY AS->EARLIEST) OLD AS WE LOOK AND THE (LEECHES AND TIRING WOMEN->LEECH HAS ENTIRE WOMAN) OF THIS BEAUTY OF FORTY PRACTISE ARTS WHICH GIVE HER THE APPEARANCE OF TWENTY FIVE YET PERHAPS THE KING VALUES HER INTELLECT MORE THAN HER PERSON AND THE WISDOM OF A HUNDRED SERPENTS IS CERTAINLY UNITED IN THIS WOMAN'S HEAD +5484-24317-0018-589: THE THREE MOST TRUSTWORTHY ONES (ARE HERE AMYNTAS->I HEAR I MEANTUS) THE (LEECH->LIEGE) CHRYSIPPUS (AND->IN) THE ADMIRABLE (PROCLUS->PROCLIS) +5484-24317-0019-590: LET US HOPE THAT YOU WILL MAKE THIS THREE LEAVED CLOVER THE LUCK PROMISING (FOUR LEAVED->FOOL LEAVE TO) ONE +5484-24317-0020-591: YOUR UNCLE TOO HAS OFTEN WITH (PRAISEWORTHY->PRAISE WORTHY) GENEROSITY HELPED (ARSINOE->ALSO) IN MANY AN EMBARRASSMENT +5484-24317-0021-592: HOW LONG HE KEPT YOU WAITING (FOR->FROM) THE FIRST WORD CONCERNING A WORK WHICH JUSTLY TRANSPORTED THE WHOLE CITY WITH DELIGHT +5484-24317-0022-593: WHEN HE DID FINALLY SUMMON YOU HE SAID THINGS WHICH MUST HAVE WOUNDED YOU +5484-24317-0023-594: THAT IS GOING TOO FAR REPLIED (HERMON->HERMAN) +5484-24317-0024-595: HE (WINKED AT->WAITED) HER AND MADE A SIGNIFICANT GESTURE AS HE SPOKE AND THEN INFORMED THE BLIND ARTIST HOW GRACIOUSLY (ARSINOE->ARSENO) HAD REMEMBERED HIM WHEN SHE HEARD OF THE REMEDY BY WHOSE AID MANY A WONDERFUL CURE OF BLIND (EYES->EYE) HAD BEEN MADE IN (RHODES->ROADS) +5484-24317-0025-596: THE ROYAL LADY HAD INQUIRED ABOUT HIM AND HIS SUFFERINGS WITH ALMOST SISTERLY INTEREST AND (ALTHEA->ALTHIA) EAGERLY CONFIRMED THE STATEMENT +5484-24317-0026-597: (HERMON->HERMA) LISTENED TO THE (PAIR IN->PARENT) SILENCE +5484-24317-0027-598: THE (RHODIAN->RADIAN) WAS JUST BEGINNING TO PRAISE (ARSINOE->ARSENO) ALSO AS A SPECIAL FRIEND AND CONNOISSEUR OF THE (SCULPTOR'S->SCULPTURES) ART WHEN CRATES (HERMON'S->HERMANN'S) FELLOW STUDENT ASKED THE BLIND ARTIST IN BEHALF OF HIS BEAUTIFUL COMPANION WHY HIS DEMETER WAS PLACED UPON A PEDESTAL (WHICH->WITCH) TO OTHERS AS WELL AS HIMSELF SEEMED TOO HIGH FOR THE SIZE OF THE STATUE +5484-24317-0028-599: YET WHAT MATTERED IT EVEN IF THESE MISERABLE PEOPLE CONSIDERED THEMSELVES DECEIVED AND POINTED THE FINGER OF SCORN AT HIM +5484-24317-0029-600: A WOMAN WHO (YEARNS->URNS) FOR THE REGARD OF ALL MEN AND MAKES LOVE A TOY EASILY LESSENS THE (DEMANDS->DEMAND) SHE IMPOSES UPON INDIVIDUALS +5484-24317-0030-601: ONLY EVEN THOUGH LOVE HAS WHOLLY DISAPPEARED SHE STILL CLAIMS CONSIDERATION AND ALTHEA DID NOT WISH TO LOSE (HERMON'S->HARMON'S) REGARD +5484-24317-0031-602: HOW INDIFFERENT YOU LOOK BUT I TELL YOU HER DEEP BLUE EYES FLASHED AS SHE SPOKE THAT SO LONG AS YOU (WERE->WAS) STILL A GENUINE CREATING ARTIST THE CASE WAS DIFFERENT +5484-24317-0032-603: THOUGH SO LOUD A DENIAL IS WRITTEN ON YOUR FACE I PERSIST IN MY CONVICTION AND THAT NO IDLE DELUSION (ENSNARES->AND SNAS) ME I CAN PROVE +5484-24317-0033-604: IT WAS NAY IT COULD HAVE BEEN NOTHING ELSE THAT VERY SPIDER +5484-24318-0000-605: NOT A SOUND IF YOU VALUE YOUR LIVES +5484-24318-0001-606: TO OFFER RESISTANCE WOULD HAVE BEEN MADNESS FOR EVEN HERMON PERCEIVED BY THE LOUD CLANKING OF WEAPONS (AROUND->ROUND) THEM (THE->THEY) GREATLY SUPERIOR POWER OF THE ENEMY AND THEY WERE ACTING BY THE ORDERS OF THE KING TO THE PRISON NEAR THE PLACE OF EXECUTION +5484-24318-0002-607: WAS HE TO BE LED TO THE EXECUTIONER'S BLOCK +5484-24318-0003-608: WHAT PLEASURE HAD LIFE TO OFFER HIM THE BLIND MAN WHO WAS ALREADY DEAD TO HIS ART +5484-24318-0004-609: OUGHT HE NOT TO GREET (THIS->HIS) SUDDEN END AS (A BOON->THE BOOM) FROM THE IMMORTALS +5484-24318-0005-610: DID IT NOT SPARE HIM A HUMILIATION AS GREAT AND PAINFUL AS COULD BE IMAGINED +5484-24318-0006-611: WHATEVER MIGHT AWAIT HIM HE DESIRED NO BETTER FATE +5484-24318-0007-612: IF HE HAD PASSED INTO ANNIHILATION HE (HERMON->HERMAN) WISHED TO FOLLOW HIM THITHER AND ANNIHILATION CERTAINLY MEANT REDEMPTION FROM PAIN AND MISERY +5484-24318-0008-613: BUT IF HE WERE DESTINED TO MEET HIS (MYRTILUS->BURTULAS) AND HIS MOTHER IN THE WORLD BEYOND THE GRAVE WHAT HAD HE NOT TO TELL THEM HOW SURE HE WAS (OF->A) FINDING A JOYFUL RECEPTION THERE FROM BOTH +5484-24318-0009-614: THE POWER WHICH DELIVERED HIM OVER TO DEATH JUST AT THAT MOMENT WAS NOT NEMESIS NO IT WAS A KINDLY DEITY +5484-24318-0010-615: YET IT WAS NO ILLUSION THAT DECEIVED HIM +5484-24318-0011-616: AGAIN HE HEARD THE BELOVED VOICE AND THIS TIME IT ADDRESSED NOT ONLY HIM BUT WITH THE UTMOST HASTE THE COMMANDER OF THE SOLDIERS +5484-24318-0012-617: SOMETIMES WITH (*->THE) TOUCHING ENTREATY SOMETIMES WITH IMPERIOUS COMMAND SHE PROTESTED AFTER GIVING HIM HER NAME THAT THIS MATTER COULD BE NOTHING BUT AN UNFORTUNATE MISTAKE +5484-24318-0013-618: LASTLY WITH EARNEST WARMTH SHE BESOUGHT HIM BEFORE TAKING THE PRISONERS AWAY TO PERMIT HER TO SPEAK TO THE COMMANDING GENERAL PHILIPPUS HER FATHER'S GUEST WHO SHE WAS CERTAIN WAS IN THE PALACE +5484-24318-0014-619: CRIED (HERMON->HERMAND) IN GRATEFUL AGITATION BUT SHE WOULD NOT LISTEN TO HIM AND (FOLLOWED->FOLLOW) THE SOLDIER WHOM THE CAPTAIN DETAILED TO GUIDE HER INTO THE PALACE +5484-24318-0015-620: TO MORROW YOU SHALL CONFESS TO ME WHO TREACHEROUSLY DIRECTED YOU TO THIS DANGEROUS PATH +5484-24318-0016-621: DAPHNE AGAIN PLEADED FOR THE LIBERATION OF THE PRISONERS BUT (PHILIPPUS SILENCED HER->PHILIP'S SILENCE CHARRED) WITH (THE->A) GRAVE EXCLAMATION THE ORDER OF THE KING +5484-24318-0017-622: AS SOON AS THE CAPTIVE ARTIST WAS ALONE WITH (THE->A) WOMAN HE LOVED HE CLASPED HER HAND POURING FORTH INCOHERENT WORDS OF THE MOST ARDENT GRATITUDE AND WHEN HE FELT HER WARMLY RETURN THE PRESSURE HE COULD NOT RESTRAIN THE DESIRE TO CLASP HER TO HIS HEART +5484-24318-0018-623: IN SPITE OF HIS DEEP MENTAL DISTRESS HE COULD HAVE SHOUTED ALOUD IN HIS DELIGHT AND GRATITUDE +5484-24318-0019-624: HE MIGHT NOW HAVE BEEN PERMITTED TO (BIND->FIND) FOREVER TO HIS LIFE THE WOMAN WHO HAD JUST RESCUED HIM FROM THE GREATEST DANGER BUT THE CONFESSION HE MUST MAKE TO HIS FELLOW ARTISTS IN THE (PALAESTRA->PELUSTER) THE FOLLOWING MORNING STILL SEALED HIS LIPS YET IN THIS HOUR HE FELT THAT HE WAS UNITED TO HER AND OUGHT NOT TO CONCEAL WHAT AWAITED HIM SO OBEYING A STRONG IMPULSE HE EXCLAIMED YOU KNOW THAT I LOVE YOU +5484-24318-0020-625: I LOVE YOU AND HAVE LOVED YOU ALWAYS +5484-24318-0021-626: (DAPHNE->DAPHNEY) EXCLAIMED TENDERLY WHAT MORE (IS->IT'S) NEEDED +5484-24318-0022-627: BUT (HERMON->HERMAN) WITH DROOPING HEAD MURMURED TO MORROW I SHALL NO LONGER BE WHAT I AM NOW +5484-24318-0023-628: THEN (DAPHNE->JAPLY) RAISED HER FACE TO HIS ASKING SO THE DEMETER IS THE WORK OF (MYRTILUS->MYRTULAS) +5484-24318-0024-629: WHAT A TERRIBLE ORDEAL AGAIN AWAITS YOU +5484-24318-0025-630: AND I FOOL BLINDED (ALSO->ALL SOR) IN MIND COULD BE VEXED WITH YOU FOR IT +5484-24318-0026-631: BRING THIS BEFORE YOUR MIND AND EVERYTHING ELSE THAT YOU MUST ACCEPT WITH IT IF YOU CONSENT (WHEN->WITH) THE TIME ARRIVES TO BECOME MINE CONCEAL (AND PALLIATE->IMPALIATE) NOTHING +5484-24318-0027-632: (SO ARCHIAS->SORCAS) INTENDED TO LEAVE THE CITY ON ONE OF HIS OWN SHIPS THAT VERY DAY +5484-24318-0028-633: HE HIMSELF ON THE WAY TO EXPOSE HIMSELF TO THE MALICE AND MOCKERY OF THE WHOLE CITY +5484-24318-0029-634: HIS HEART CONTRACTED PAINFULLY AND HIS SOLICITUDE ABOUT HIS UNCLE'S FATE INCREASED WHEN (PHILIPPUS->PHILIPUS) INFORMED HIM THAT THE CONSPIRATORS HAD BEEN ARRESTED AT THE BANQUET AND HEADED BY (AMYNTAS->A MEANTES) THE (RHODIAN->RODIAN) CHRYSIPPUS AND (PROCLUS->PROCLIS) HAD PERISHED BY THE EXECUTIONER'S SWORD AT SUNRISE +5484-24318-0030-635: BESIDES HE KNEW THAT THE OBJECT OF HIS LOVE WOULD NOT PART FROM HIM WITHOUT GRANTING HIM ONE LAST WORD +5484-24318-0031-636: ON THE WAY HIS (HEART THROBBED->HARD THROPPED) ALMOST TO BURSTING +5484-24318-0032-637: EVEN (DAPHNE'S->AFTER THESE) IMAGE AND WHAT THREATENED HER FATHER AND HER WITH HIM (RECEDED->WAS SEATED) FAR INTO THE BACKGROUND +5484-24318-0033-638: HE WAS APPEARING BEFORE HIS COMPANIONS ONLY TO GIVE TRUTH ITS JUST DUE +5484-24318-0034-639: THE EGYPTIAN (OBEYED->OBEY) AND HIS MASTER CROSSED THE WIDE SPACE STREWN WITH SAND AND APPROACHED THE STAGE WHICH HAD BEEN ERECTED FOR THE (FESTAL->FEAST HELL) PERFORMANCES EVEN HAD HIS EYES RETAINED THE POWER OF SIGHT HIS BLOOD WAS (COURSING->CURSING) SO (WILDLY->WIDELY) THROUGH HIS VEINS THAT HE MIGHT PERHAPS HAVE BEEN UNABLE TO DISTINGUISH THE STATUES AROUND HIM AND THE THOUSANDS OF SPECTATORS WHO CROWDED CLOSELY TOGETHER RICHLY GARLANDED THEIR (CHEEKS->CHIEFS) GLOWING WITH ENTHUSIASM SURROUNDED THE ARENA (HERMON->HERMAN) +5484-24318-0035-640: SHOUTED HIS FRIEND (SOTELES IN->SORTILESS AND) JOYFUL SURPRISE IN THE MIDST OF (THIS->HIS) PAINFUL WALK (HERMON->HAREMON) +5484-24318-0036-641: EVEN WHILE HE BELIEVED HIMSELF TO BE THE CREATOR OF THE DEMETER HE HAD BEEN SERIOUSLY TROUBLED BY THE PRAISE OF SO MANY CRITICS BECAUSE IT HAD EXPOSED HIM TO THE SUSPICION OF HAVING BECOME FAITHLESS TO HIS ART AND HIS NATURE +5484-24318-0037-642: HONOUR TO (MYRTILUS->MYRTULAS) AND HIS ART BUT HE TRUSTED (THIS NOBLE FESTAL->THE SNOWBLE FEAST ELL) ASSEMBLAGE WOULD PARDON THE UNINTENTIONAL DECEPTION AND AID HIS PRAYER FOR RECOVERY +5764-299665-0000-405: AFTERWARD IT WAS SUPPOSED THAT HE WAS SATISFIED WITH THE BLOOD OF OXEN (LAMBS->LAMPS) AND DOVES AND THAT IN EXCHANGE FOR OR (ON->IN) ACCOUNT OF THESE SACRIFICES (THIS->THESE) GOD GAVE (RAIN->REIGN) SUNSHINE AND HARVEST +5764-299665-0001-406: WHETHER HE WAS THE CREATOR OF (YOURSELF->YOUR SELF) AND MYSELF +5764-299665-0002-407: (WHETHER ANY->WEATHER AND A) PRAYER WAS EVER ANSWERED +5764-299665-0003-408: WHY DID HE CREATE THE (INTELLECTUALLY->INTELLECTUAL) INFERIOR +5764-299665-0004-409: WHY DID HE CREATE THE DEFORMED AND HELPLESS WHY DID HE CREATE THE CRIMINAL THE IDIOTIC THE INSANE +5764-299665-0005-410: ARE THE FAILURES (UNDER->AND THE) OBLIGATION TO THEIR CREATOR +5764-299665-0006-411: (IS HE RESPONSIBLE->HIS IRRESPONSIBLE) FOR ALL THE (WARS->WALLS) THAT HAVE BEEN WAGED FOR ALL THE INNOCENT BLOOD THAT HAS BEEN SHED +5764-299665-0007-412: (IS HE->IF YOU) RESPONSIBLE FOR THE CENTURIES OF SLAVERY FOR THE BACKS THAT HAVE BEEN SCARRED WITH (THE->A) LASH FOR THE (BABES->BABE) THAT HAVE BEEN SOLD FROM THE BREASTS OF MOTHERS FOR THE FAMILIES THAT HAVE BEEN SEPARATED AND DESTROYED +5764-299665-0008-413: IS (THIS GOD->THESE GOT) RESPONSIBLE FOR RELIGIOUS PERSECUTION FOR THE INQUISITION FOR THE (THUMB SCREW->TENTH'S CREW) AND RACK AND FOR ALL THE INSTRUMENTS OF TORTURE +5764-299665-0009-414: (DID THIS GOD ALLOW->THESE GOT THE LOAD) THE CRUEL AND VILE TO DESTROY THE BRAVE AND VIRTUOUS +5764-299665-0010-415: DID HE (ALLOW->ALONE) TYRANTS TO SHED (THE->A) BLOOD OF PATRIOTS +5764-299665-0011-416: CAN WE CONCEIVE OF A DEVIL BASE ENOUGH TO PREFER HIS ENEMIES TO HIS FRIENDS +5764-299665-0012-417: HOW CAN WE ACCOUNT FOR THE WILD BEASTS THAT (DEVOUR->THE FOUR) HUMAN BEINGS FOR THE (FANGED->FACT) SERPENTS WHOSE BITE (IS->ITS) DEATH +5764-299665-0013-418: HOW CAN WE ACCOUNT FOR A WORLD (WHERE LIFE FEEDS->WILL LIE FEATS) ON LIFE +5764-299665-0014-419: DID INFINITE WISDOM INTENTIONALLY (PRODUCE->PRODUCED) THE MICROSCOPIC BEASTS THAT FEED UPON THE OPTIC (NERVE->NURSE) THINK OF BLINDING A MAN TO SATISFY THE APPETITE OF A MICROBE +5764-299665-0015-420: FEAR (BUILDS->BIDS) THE ALTAR AND OFFERS THE SACRIFICE +5764-299665-0016-421: FEAR ERECTS THE (CATHEDRAL->CATEURAL) AND BOWS THE HEAD OF MAN IN WORSHIP +5764-299665-0017-422: (LIPS->LITS) RELIGIOUS AND FEARFUL TREMBLINGLY REPEAT THIS PASSAGE THOUGH HE SLAY ME YET (WILL I->WE LIKE) TRUST HIM +5764-299665-0018-423: CAN WE SAY THAT HE CARED FOR THE CHILDREN OF MEN +5764-299665-0019-424: CAN WE SAY THAT HIS MERCY (ENDURETH FOREVER->AND DURE FOR EVER) +5764-299665-0020-425: (DO WE PROVE->THE REPROVE) HIS GOODNESS BY SHOWING THAT HE HAS OPENED THE EARTH AND SWALLOWED (THOUSANDS->THOUSAND) OF HIS HELPLESS CHILDREN (OR->ALL) THAT (WITH->WIT) THE VOLCANOES HE HAS OVERWHELMED THEM WITH RIVERS OF FIRE +5764-299665-0021-426: WAS THERE GOODNESS WAS (THERE->THEIR) WISDOM IN THIS +5764-299665-0022-427: (OUGHT THE SUPERIOR RACES TO->ALL DISAPPEAR RAYS TWO) THANK (GOD->GOT) THAT THEY ARE NOT THE INFERIOR +5764-299665-0023-428: MOST PEOPLE (CLING->CLINK) TO THE SUPERNATURAL +5764-299665-0024-429: IF THEY GIVE UP ONE GOD THEY IMAGINE ANOTHER +5764-299665-0025-430: WHAT IS THIS POWER +5764-299665-0026-431: MAN ADVANCES AND NECESSARILY ADVANCES (THROUGH->TO) EXPERIENCE +5764-299665-0027-432: A MAN WISHING TO GO TO A CERTAIN PLACE (COMES->COME) TO WHERE THE (ROAD->RULE) DIVIDES +5764-299665-0028-433: HE HAS TRIED THAT ROAD AND KNOWS THAT IT IS THE WRONG ROAD +5764-299665-0029-434: A CHILD (CHARMED->SHONE) BY THE BEAUTY OF THE FLAME (GRASPS->GRASPED) IT WITH (ITS->HIS) DIMPLED HAND +5764-299665-0030-435: THE POWER THAT (WORKS->WORK) FOR RIGHTEOUSNESS (HAS->HAD) TAUGHT THE CHILD A LESSON +5764-299665-0031-436: IT IS A RESULT +5764-299665-0032-437: IT IS INSISTED BY THESE THEOLOGIANS AND BY MANY OF THE (SO->SOUL) CALLED PHILOSOPHERS THAT THIS MORAL SENSE THIS SENSE OF DUTY OF OBLIGATION WAS IMPORTED AND THAT CONSCIENCE IS AN EXOTIC +5764-299665-0033-438: (WE LIVE->REALLY) TOGETHER IN FAMILIES TRIBES AND NATIONS +5764-299665-0034-439: THEY ARE PRAISED ADMIRED AND RESPECTED +5764-299665-0035-440: THEY ARE REGARDED AS GOOD THAT IS TO SAY AS MORAL +5764-299665-0036-441: THE MEMBERS WHO ADD TO THE MISERY OF THE FAMILY THE TRIBE (OR->OF) THE NATION ARE CONSIDERED BAD MEMBERS +5764-299665-0037-442: THE GREATEST OF HUMAN BEINGS (HAS->HAD) SAID CONSCIENCE IS BORN OF LOVE +5764-299665-0038-443: AS PEOPLE ADVANCE THE REMOTE CONSEQUENCES ARE PERCEIVED +5764-299665-0039-444: THE IMAGINATION IS CULTIVATED +5764-299665-0040-445: A MAN (PUTS->BUT) HIMSELF IN THE PLACE OF ANOTHER +5764-299665-0041-446: THE SENSE OF DUTY BECOMES STRONGER MORE IMPERATIVE +5764-299665-0042-447: MAN JUDGES HIMSELF +5764-299665-0043-448: IN ALL THIS THERE IS NOTHING SUPERNATURAL +5764-299665-0044-449: MAN HAS DECEIVED HIMSELF +5764-299665-0045-450: (HAS CHRISTIANITY->HISTORY STUNNITY) DONE GOOD +5764-299665-0046-451: WHEN THE CHURCH HAD CONTROL WERE MEN MADE BETTER AND HAPPIER +5764-299665-0047-452: WHAT HAS RELIGION DONE FOR HUNGARY (OR->O) AUSTRIA +5764-299665-0048-453: (COULD->GOOD) THESE COUNTRIES HAVE BEEN WORSE WITHOUT RELIGION +5764-299665-0049-454: COULD THEY HAVE BEEN WORSE HAD THEY HAD ANY OTHER RELIGION THAN CHRISTIANITY +5764-299665-0050-455: WHAT DID CHRISTIANITY DO (FOR->FAULT) THEM +5764-299665-0051-456: THEY HATED PLEASURE +5764-299665-0052-457: THEY MUFFLED ALL THE BELLS OF GLADNESS +5764-299665-0053-458: (THE->DURING) RELIGION OF THE PURITAN WAS AN (UNADULTERATED->ADULTERATED) CURSE +5764-299665-0054-459: THE PURITAN (BELIEVED->BELIEF) THE BIBLE TO BE THE (WORD->WORLD) OF GOD AND THIS BELIEF HAS ALWAYS MADE THOSE WHO HELD IT CRUEL AND WRETCHED +5764-299665-0055-460: LET ME REFER TO JUST ONE FACT SHOWING THE INFLUENCE OF A BELIEF IN THE BIBLE ON HUMAN BEINGS +5764-299665-0056-461: THE QUEEN RECEIVED THE BIBLE KISSED IT AND PLEDGED HERSELF TO DILIGENTLY READ THEREIN +5764-299665-0057-462: IN OTHER WORDS IT WAS JUST AS FIENDISH JUST AS (INFAMOUS->IN FAMOUS) AS THE (CATHOLIC SPIRIT->CATTLE EXPERIMENT) +5764-299665-0058-463: HAS THE (BIBLE->VARIABLE) MADE THE PEOPLE OF (GEORGIA->GEORGE A) KIND AND MERCIFUL +5764-299665-0059-464: (RELIGION HAS->WHO LEGION HAVE) BEEN TRIED AND IN ALL COUNTRIES IN ALL TIMES (HAS->BEST) FAILED +5764-299665-0060-465: RELIGION (HAS->HATH) ALWAYS BEEN THE ENEMY OF SCIENCE OF INVESTIGATION AND THOUGHT +5764-299665-0061-466: (RELIGION HAS->RELIGIONISTS) NEVER MADE (MAN->MEN) FREE +5764-299665-0062-467: (IT HAS->HE JUST) NEVER MADE MAN MORAL TEMPERATE INDUSTRIOUS AND HONEST +5764-299665-0063-468: (ARE CHRISTIANS MORE->AH CHRISTIAN SMALL) TEMPERATE NEARER VIRTUOUS NEARER HONEST THAN SAVAGES +5764-299665-0064-469: CAN WE CURE DISEASE BY SUPPLICATION +5764-299665-0065-470: CAN WE RECEIVE VIRTUE OR (HONOR->HANNER) AS ALMS +5764-299665-0066-471: RELIGION RESTS ON THE IDEA THAT NATURE HAS A MASTER AND THAT THIS MASTER WILL LISTEN TO PRAYER THAT (THIS->HIS) MASTER PUNISHES AND REWARDS THAT HE LOVES PRAISE AND FLATTERY AND HATES THE BRAVE AND FREE +5764-299665-0067-472: WE MUST HAVE (CORNER->CORN THE) STONES +5764-299665-0068-473: THE STRUCTURE MUST HAVE (A BASEMENT->ABASEMENT) +5764-299665-0069-474: IF WE (BUILD->BUILT) WE MUST BEGIN AT THE BOTTOM +5764-299665-0070-475: I HAVE A THEORY AND I HAVE FOUR (CORNER STONES->CORNESTONES) +5764-299665-0071-476: THE FIRST STONE (IS THAT MATTER->EAST AT MATHA) SUBSTANCE CANNOT BE DESTROYED CANNOT BE ANNIHILATED +5764-299665-0072-477: IF THESE (CORNER->CORN THE) STONES ARE FACTS IT FOLLOWS AS A NECESSITY THAT MATTER AND FORCE ARE FROM (AND->END) TO ETERNITY THAT THEY CAN NEITHER BE INCREASED NOR DIMINISHED +5764-299665-0073-478: IT FOLLOWS THAT NOTHING (HAS->HATH) BEEN OR CAN BE CREATED THAT THERE NEVER HAS BEEN OR CAN BE A CREATOR +5764-299665-0074-479: IT (FOLLOWS->FOLLOWED) THAT THERE COULD NOT HAVE BEEN ANY INTELLIGENCE (ANY->AND A) DESIGN BACK OF MATTER AND FORCE +5764-299665-0075-480: I SAY WHAT I THINK +5764-299665-0076-481: EVERY EVENT HAS PARENTS +5764-299665-0077-482: THAT WHICH (HAS->HATH) NOT HAPPENED COULD NOT +5764-299665-0078-483: IN THE INFINITE (CHAIN THERE IS->CHANGE WRITHS) AND THERE CAN BE NO BROKEN NO MISSING LINK +5764-299665-0079-484: WE NOW KNOW THAT OUR FIRST PARENTS WERE NOT FOREIGNERS +5764-299665-0080-485: WE NOW KNOW IF WE KNOW ANYTHING THAT THE UNIVERSE IS NATURAL AND THAT (MEN->MAN) AND WOMEN HAVE BEEN NATURALLY PRODUCED +5764-299665-0081-486: WE KNOW THE PATHS THAT LIFE HAS (TRAVELED->TRAVELLED) +5764-299665-0082-487: WE KNOW THE FOOTSTEPS OF ADVANCE THEY HAVE BEEN TRACED +5764-299665-0083-488: (FOR->FOUR) THOUSANDS OF YEARS MEN AND WOMEN HAVE BEEN TRYING TO REFORM THE WORLD +5764-299665-0084-489: WHY HAVE (THE->*) REFORMERS (FAILED->FAME) +5764-299665-0085-490: THEY DEPEND ON THE (LORD ON LUCK->LOT UNLUCK) AND CHARITY +5764-299665-0086-491: THEY (LIVE BY->LEAVE THY) FRAUD AND VIOLENCE AND BEQUEATH THEIR VICES TO THEIR CHILDREN +5764-299665-0087-492: FAILURE SEEMS TO BE THE TRADEMARK OF NATURE WHY +5764-299665-0088-493: NATURE (PRODUCES->PRODUED) WITHOUT PURPOSE SUSTAINS WITHOUT INTENTION AND DESTROYS WITHOUT THOUGHT +5764-299665-0089-494: (MUST THE->MISTER) WORLD (FOREVER REMAIN THE->FOR EVER REMAINED A) VICTIM OF IGNORANT PASSION +5764-299665-0090-495: WHY SHOULD MEN AND WOMEN HAVE CHILDREN THAT THEY CANNOT TAKE CARE OF CHILDREN THAT ARE (BURDENS->BURGLAR) AND CURSES WHY +5764-299665-0091-496: PASSION IS AND ALWAYS HAS BEEN DEAF +5764-299665-0092-497: LAW CAN PUNISH BUT IT CAN NEITHER REFORM CRIMINALS NOR PREVENT CRIME +5764-299665-0093-498: (THIS->THESE) CANNOT BE DONE BY TALK OR EXAMPLE +5764-299665-0094-499: THIS IS THE SOLUTION OF THE WHOLE QUESTION +5764-299665-0095-500: THIS (FREES WOMAN->FREEZE WOMEN) +5764-299665-0096-501: POVERTY AND CRIME WILL BE (CHILDLESS->CHIMELESS) +5764-299665-0097-502: IT IS FAR BETTER TO BE FREE TO LEAVE THE (FORTS->FAULTS) AND BARRICADES OF FEAR TO STAND ERECT AND (FACE->FAITH) THE FUTURE (WITH A SMILE->WE DESP MINE) +6070-63485-0000-2599: (THEY'RE DONE FOR->THEIR DUNFAR) SAID THE SCHOOLMASTER IN A LOW KEY TO THE (CHOUETTE->SWEAT) OUT WITH (YOUR->OUR) VITRIOL AND MIND YOUR EYE +6070-63485-0001-2600: THE TWO MONSTERS TOOK OFF THEIR SHOES AND MOVED STEALTHILY ALONG KEEPING IN THE SHADOWS OF THE HOUSES +6070-63485-0002-2601: BY MEANS OF THIS STRATAGEM THEY FOLLOWED SO CLOSELY THAT ALTHOUGH WITHIN A FEW STEPS OF (SARAH AND->SEREN) TOM THEY DID NOT HEAR THEM +6070-63485-0003-2602: SARAH AND HER BROTHER HAVING AGAIN PASSED BY THE (TAPIS FRANC->TAPPY FROG) ARRIVED CLOSE TO THE DILAPIDATED HOUSE WHICH WAS PARTLY IN RUINS AND ITS (OPENED->OPEN) CELLARS FORMED A KIND OF GULF ALONG WHICH THE STREET RAN IN THAT DIRECTION +6070-63485-0004-2603: IN AN INSTANT THE SCHOOLMASTER WITH A LEAP RESEMBLING IN STRENGTH AND AGILITY THE SPRING OF A TIGER SEIZED (SEYTON->SEATING) WITH ONE HAND BY THE THROAT AND EXCLAIMED YOUR MONEY OR I WILL FLING YOU INTO THIS (HOLE->HALL) +6070-63485-0005-2604: NO SAID THE OLD BRUTE (GRUMBLINGLY->TREMBLINGLY) NO NOT ONE RING WHAT A SHAME +6070-63485-0006-2605: TOM SEYTON DID NOT LOSE HIS PRESENCE OF MIND DURING THIS SCENE RAPIDLY AND UNEXPECTEDLY AS IT HAD OCCURRED +6070-63485-0007-2606: (OH->U) AH TO LAY A TRAP TO CATCH US REPLIED THE THIEF +6070-63485-0008-2607: THEN ADDRESSING THOMAS (SEYTON->SETTON) YOU KNOW THE (PLAIN->PLANE) OF SAINT DENIS +6070-63485-0009-2608: DID YOU SEE IN THE CABARET WE (HAVE->HAD) JUST LEFT FOR I KNOW YOU AGAIN THE MAN WHOM THE CHARCOAL MAN CAME TO SEEK +6070-63485-0010-2609: CRIED THE SCHOOLMASTER A THOUSAND FRANCS AND I'LL KILL HIM +6070-63485-0011-2610: (WRETCH->THATCH) I DO NOT (SEEK->SEE) HIS LIFE REPLIED SARAH TO THE SCHOOLMASTER +6070-63485-0012-2611: LET'S GO AND MEET HIM +6070-63485-0013-2612: OLD BOY IT WILL PAY FOR LOOKING AFTER +6070-63485-0014-2613: WELL MY WIFE SHALL BE THERE SAID THE SCHOOLMASTER YOU WILL TELL HER WHAT YOU WANT AND I SHALL SEE +6070-63485-0015-2614: IN THE (PLAIN->PLANE) OF SAINT (DENIS->DENY) +6070-63485-0016-2615: BETWEEN SAINT (OUEN->LAUIS) AND THE ROAD OF LA (REVOLTE->REVOLT) AT THE END OF THE ROAD AGREED +6070-63485-0017-2616: HE HAD FORGOTTEN THE ADDRESS OF THE SELF STYLED (FAN->PAMP) PAINTER +6070-63485-0018-2617: THE (FIACRE->FIATHIS) STARTED +6070-86744-0000-2569: (FRANZ->FRANCE) WHO SEEMED ATTRACTED BY SOME INVISIBLE INFLUENCE (TOWARDS->TO WHICH) THE COUNT IN WHICH TERROR WAS STRANGELY MINGLED FELT AN EXTREME RELUCTANCE TO PERMIT HIS FRIEND TO BE EXPOSED ALONE TO THE SINGULAR FASCINATION THAT THIS MYSTERIOUS PERSONAGE SEEMED TO EXERCISE OVER HIM AND THEREFORE MADE NO OBJECTION TO ALBERT'S REQUEST BUT AT ONCE ACCOMPANIED HIM TO THE DESIRED SPOT AND AFTER A SHORT DELAY THE COUNT JOINED THEM IN THE SALON +6070-86744-0001-2570: MY VERY GOOD FRIEND (AND->AN) EXCELLENT NEIGHBOR REPLIED THE COUNT WITH A SMILE YOU REALLY EXAGGERATE MY TRIFLING EXERTIONS +6070-86744-0002-2571: MY FATHER THE COMTE DE MORCERF ALTHOUGH (OF->A) SPANISH ORIGIN POSSESSES CONSIDERABLE INFLUENCE BOTH AT THE COURT OF FRANCE AND MADRID AND I UNHESITATINGLY (PLACE->PLACED) THE BEST SERVICES OF MYSELF AND ALL TO WHOM MY LIFE IS DEAR AT YOUR DISPOSAL +6070-86744-0003-2572: I CAN SCARCELY CREDIT IT +6070-86744-0004-2573: THEN IT IS SETTLED SAID THE COUNT AND I GIVE YOU MY SOLEMN ASSURANCE THAT I ONLY WAITED (AN OPPORTUNITY->IN A PETULITY) LIKE THE PRESENT TO REALIZE PLANS THAT I HAVE LONG MEDITATED +6070-86744-0005-2574: (SHALL WE->SHOW A) MAKE A POSITIVE APPOINTMENT FOR A PARTICULAR DAY AND HOUR INQUIRED THE COUNT ONLY LET ME WARN YOU THAT I AM PROVERBIAL FOR MY PUNCTILIOUS EXACTITUDE IN KEEPING MY ENGAGEMENTS DAY FOR DAY HOUR FOR HOUR SAID ALBERT THAT WILL SUIT ME TO A DOT +6070-86744-0006-2575: SO BE IT THEN REPLIED THE COUNT AND EXTENDING HIS HAND TOWARDS (A CALENDAR->THE CALENDER) SUSPENDED NEAR THE CHIMNEY PIECE HE SAID TO DAY IS THE TWENTY FIRST OF FEBRUARY AND DRAWING OUT HIS WATCH ADDED IT IS EXACTLY HALF PAST TEN O'CLOCK NOW PROMISE ME TO REMEMBER THIS AND EXPECT ME THE TWENTY FIRST OF MAY AT THE SAME HOUR IN THE FORENOON +6070-86744-0007-2576: I RESIDE IN MY FATHER'S HOUSE BUT OCCUPY A PAVILION AT THE FARTHER SIDE OF THE (COURT YARD ENTIRELY->COURTYARD AND TIRELESS) SEPARATED FROM THE MAIN BUILDING +6070-86744-0008-2577: NOW THEN SAID THE COUNT RETURNING HIS TABLETS TO HIS POCKET MAKE YOURSELF PERFECTLY EASY THE HAND OF YOUR TIME (PIECE->PEACE) WILL NOT BE MORE ACCURATE IN MARKING THE TIME THAN MYSELF +6070-86744-0009-2578: THAT DEPENDS WHEN (DO YOU->D'YE) LEAVE +6070-86744-0010-2579: FOR FRANCE NO FOR VENICE I SHALL REMAIN IN ITALY FOR ANOTHER YEAR OR TWO +6070-86744-0011-2580: THEN WE SHALL NOT MEET IN PARIS +6070-86744-0012-2581: I FEAR I SHALL NOT HAVE THAT HONOR +6070-86744-0013-2582: WELL SINCE WE MUST PART SAID THE COUNT HOLDING OUT A HAND TO EACH OF THE YOUNG MEN ALLOW ME TO WISH YOU BOTH (A->AS) SAFE AND PLEASANT JOURNEY +6070-86744-0014-2583: WHAT IS THE MATTER ASKED ALBERT OF FRANZ WHEN THEY HAD RETURNED TO THEIR OWN APARTMENTS YOU (SEEM->SEE) MORE THAN COMMONLY THOUGHTFUL +6070-86744-0015-2584: I WILL (CONFESS->CONSIST) TO YOU ALBERT REPLIED FRANZ THE COUNT IS A VERY SINGULAR PERSON AND THE APPOINTMENT YOU HAVE MADE TO MEET HIM IN PARIS FILLS ME WITH A THOUSAND APPREHENSIONS +6070-86744-0016-2585: DID YOU EVER MEET HIM PREVIOUSLY TO COMING HITHER +6070-86744-0017-2586: UPON MY (HONOR->HONOUR) THEN LISTEN TO ME +6070-86744-0018-2587: HE DWELT WITH CONSIDERABLE FORCE AND ENERGY ON THE ALMOST MAGICAL HOSPITALITY HE HAD RECEIVED FROM THE COUNT AND THE MAGNIFICENCE OF HIS ENTERTAINMENT IN THE (GROTTO->DRATO) OF THE THOUSAND AND ONE NIGHTS HE RECOUNTED WITH CIRCUMSTANTIAL EXACTITUDE ALL THE PARTICULARS OF THE SUPPER THE HASHISH THE STATUES THE DREAM AND HOW AT HIS AWAKENING THERE REMAINED NO PROOF (OR->OF) TRACE OF ALL THESE EVENTS SAVE THE SMALL YACHT SEEN IN THE DISTANT HORIZON DRIVING UNDER FULL SAIL TOWARD PORTO VECCHIO +6070-86744-0019-2588: THEN HE DETAILED THE CONVERSATION OVERHEARD BY HIM AT THE (COLOSSEUM->COLISEUM) BETWEEN THE COUNT AND VAMPA IN WHICH THE COUNT HAD PROMISED TO OBTAIN THE RELEASE OF THE BANDIT PEPPINO (AN->AND) ENGAGEMENT WHICH AS OUR READERS ARE AWARE HE MOST FAITHFULLY FULFILLED +6070-86744-0020-2589: BUT SAID FRANZ THE (CORSICAN->CORSICIAN) BANDITS THAT WERE AMONG THE CREW OF HIS VESSEL +6070-86744-0021-2590: WHY REALLY THE THING SEEMS TO ME SIMPLE ENOUGH +6070-86744-0022-2591: TALKING OF COUNTRIES REPLIED FRANZ OF WHAT (COUNTRY IS->COUNTRIES) THE COUNT WHAT IS HIS NATIVE (TONGUE->DONG) WHENCE DOES HE DERIVE HIS IMMENSE FORTUNE AND WHAT WERE THOSE EVENTS OF HIS EARLY LIFE A LIFE AS MARVELLOUS AS UNKNOWN THAT HAVE (TINCTURED->TINTED) HIS SUCCEEDING YEARS WITH (SO->SORE) DARK AND (GLOOMY A->BLOOMY AND) MISANTHROPY +6070-86744-0023-2592: CERTAINLY THESE ARE QUESTIONS THAT IN YOUR PLACE I SHOULD LIKE TO HAVE ANSWERED +6070-86744-0024-2593: MY DEAR (FRANZ->FRANCE) REPLIED ALBERT WHEN UPON RECEIPT OF MY LETTER YOU FOUND THE NECESSITY OF ASKING THE COUNT'S ASSISTANCE YOU PROMPTLY WENT TO HIM SAYING MY FRIEND ALBERT DE MORCERF IS IN DANGER HELP ME TO DELIVER HIM +6070-86744-0025-2594: WHAT ARE HIS MEANS OF EXISTENCE WHAT IS HIS (BIRTHPLACE->BOTH PLEASE) OF WHAT (COUNTRY IS->COUNTRIES) HE A NATIVE +6070-86744-0026-2595: I CONFESS HE ASKED ME NONE NO HE MERELY CAME AND FREED ME FROM THE HANDS OF (SIGNOR->SENOR) VAMPA WHERE I CAN ASSURE YOU IN SPITE OF ALL MY OUTWARD APPEARANCE OF EASE AND UNCONCERN I DID NOT VERY PARTICULARLY CARE TO REMAIN +6070-86744-0027-2596: AND THIS TIME IT MUST BE CONFESSED THAT CONTRARY TO THE USUAL STATE OF AFFAIRS IN DISCUSSIONS BETWEEN THE YOUNG MEN THE EFFECTIVE ARGUMENTS WERE ALL ON ALBERT'S SIDE +6070-86744-0028-2597: WELL SAID FRANZ WITH A SIGH DO AS YOU PLEASE MY DEAR VISCOUNT FOR YOUR ARGUMENTS ARE BEYOND MY POWERS OF REFUTATION +6070-86744-0029-2598: AND NOW MY DEAR FRANZ LET US TALK OF SOMETHING ELSE +6070-86745-0000-2549: THEN SHOULD ANYTHING APPEAR TO MERIT A MORE MINUTE EXAMINATION (ALBERT DE->I'LL BEAR THE) MORCERF COULD FOLLOW UP HIS RESEARCHES BY MEANS OF A SMALL GATE SIMILAR TO THAT CLOSE TO THE CONCIERGE'S DOOR AND WHICH MERITS (A->ARE) PARTICULAR DESCRIPTION +6070-86745-0001-2550: SHRUBS AND CREEPING PLANTS COVERED THE WINDOWS AND HID FROM THE GARDEN AND COURT THESE TWO APARTMENTS THE ONLY ROOMS INTO WHICH AS THEY WERE ON THE GROUND FLOOR THE PRYING EYES OF THE CURIOUS COULD PENETRATE +6070-86745-0002-2551: AT A QUARTER TO TEN (A VALET->THE VALLED) ENTERED HE COMPOSED WITH A LITTLE (GROOM->ROOM) NAMED JOHN AND WHO ONLY SPOKE ENGLISH ALL ALBERT'S ESTABLISHMENT ALTHOUGH THE COOK OF THE HOTEL WAS ALWAYS AT HIS SERVICE AND ON GREAT OCCASIONS THE (COUNT'S->COUNT) CHASSEUR ALSO +6070-86745-0003-2552: WAIT THEN DURING THE DAY TELL ROSA THAT WHEN I LEAVE THE OPERA I WILL SUP WITH HER AS SHE WISHES +6070-86745-0004-2553: VERY WELL AT HALF PAST TEN +6070-86745-0005-2554: IS THE COUNTESS UP YET +6070-86745-0006-2555: THE VALET LEFT THE ROOM +6070-86745-0007-2556: GOOD MORNING (LUCIEN->MISS YOUNG) GOOD MORNING SAID ALBERT YOUR PUNCTUALITY REALLY ALARMS ME +6070-86745-0008-2557: YOU WHOM I EXPECTED LAST YOU ARRIVE AT FIVE MINUTES TO TEN WHEN THE TIME FIXED WAS HALF PAST +6070-86745-0009-2558: NO NO MY DEAR FELLOW DO NOT CONFOUND OUR PLANS +6070-86745-0010-2559: YES HE HAS NOT MUCH TO COMPLAIN OF (BOURGES->BOURGE) IS THE CAPITAL OF CHARLES (SEVEN->THE SEVENTH) +6070-86745-0011-2560: IT IS FOR THAT REASON YOU SEE ME SO EARLY +6070-86745-0012-2561: I RETURNED HOME AT DAYBREAK AND STROVE TO SLEEP BUT MY HEAD ACHED AND I GOT UP TO HAVE A RIDE FOR AN HOUR +6070-86745-0013-2562: (PESTE->PESTS) I WILL DO NOTHING OF THE KIND THE MOMENT THEY COME FROM GOVERNMENT YOU WOULD FIND THEM EXECRABLE +6070-86745-0014-2563: BESIDES THAT DOES NOT CONCERN THE HOME BUT THE FINANCIAL DEPARTMENT +6070-86745-0015-2564: ABOUT WHAT ABOUT THE PAPERS +6070-86745-0016-2565: IN THE ENTIRE POLITICAL WORLD OF WHICH YOU ARE ONE OF THE LEADERS +6070-86745-0017-2566: THEY SAY THAT IT IS QUITE FAIR AND THAT SOWING SO MUCH RED YOU OUGHT TO REAP A LITTLE BLUE +6070-86745-0018-2567: COME COME THAT IS NOT BAD SAID (LUCIEN->LUCIAN) +6070-86745-0019-2568: WITH (YOUR TALENTS YOU->THE OTALONS HE) WOULD MAKE YOUR FORTUNE IN THREE OR FOUR YEARS +6128-63240-0000-503: THE GENTLEMAN HAD NOT EVEN NEEDED TO SIT DOWN TO BECOME INTERESTED APPARENTLY HE HAD TAKEN UP THE VOLUME FROM A TABLE AS SOON AS HE CAME IN AND STANDING THERE AFTER A SINGLE GLANCE ROUND THE APARTMENT HAD LOST HIMSELF IN (ITS->HIS) PAGES +6128-63240-0001-504: THAT HAS AN UNFLATTERING SOUND FOR ME SAID THE YOUNG MAN +6128-63240-0002-505: SHE IS WILLING TO RISK THAT +6128-63240-0003-506: JUST AS I AM THE VISITOR INQUIRED PRESENTING HIMSELF WITH RATHER A (WORK A DAY->WORKADAY) ASPECT +6128-63240-0004-507: HE WAS TALL AND LEAN AND DRESSED THROUGHOUT IN BLACK HIS SHIRT COLLAR WAS LOW AND WIDE AND THE TRIANGLE OF LINEN A LITTLE (CRUMPLED->CRAMPLED) EXHIBITED BY THE OPENING OF HIS WAISTCOAT WAS ADORNED BY A PIN CONTAINING A SMALL RED STONE +6128-63240-0005-508: IN SPITE OF THIS DECORATION THE YOUNG MAN LOOKED POOR AS (POOR->FAR) AS A YOUNG MAN COULD LOOK WHO HAD SUCH A FINE (HEAD->HAIR) AND SUCH MAGNIFICENT EYES +6128-63240-0006-509: THOSE OF BASIL RANSOM (WERE->WENT) DARK DEEP AND GLOWING HIS HEAD HAD A CHARACTER OF ELEVATION WHICH FAIRLY ADDED TO HIS (STATURE->STATUE) IT WAS A HEAD TO BE SEEN ABOVE THE LEVEL OF A CROWD ON SOME JUDICIAL BENCH OR POLITICAL PLATFORM OR EVEN ON A BRONZE (MEDAL->MEDDLE) +6128-63240-0007-510: THESE THINGS THE EYES ESPECIALLY WITH THEIR SMOULDERING FIRE MIGHT HAVE INDICATED THAT HE WAS TO BE (A->*) GREAT AMERICAN STATESMAN OR ON THE OTHER HAND (THEY->THERE) MIGHT SIMPLY HAVE PROVED THAT HE CAME FROM CAROLINA OR (ALABAMA->ALADAMA) +6128-63240-0008-511: AND YET THE READER WHO LIKES A COMPLETE IMAGE WHO DESIRES TO READ WITH THE SENSES AS WELL AS WITH THE REASON IS ENTREATED NOT TO FORGET THAT HE PROLONGED HIS (CONSONANTS->COUNTENANCE) AND SWALLOWED HIS VOWELS THAT HE WAS GUILTY (OF ELISIONS->VILLAGE) AND INTERPOLATIONS WHICH WERE EQUALLY (UNEXPECTED->INEXPECTED) AND THAT HIS DISCOURSE WAS PERVADED BY SOMETHING SULTRY AND VAST SOMETHING ALMOST AFRICAN IN ITS RICH BASKING TONE SOMETHING THAT SUGGESTED THE TEEMING EXPANSE OF THE COTTON FIELD +6128-63240-0009-512: AND HE TOOK UP HIS HAT VAGUELY A SOFT BLACK HAT WITH A LOW CROWN AND AN IMMENSE STRAIGHT BRIM +6128-63240-0010-513: WELL SO IT IS (THEY->THERE) ARE ALL WITCHES AND WIZARDS MEDIUMS AND SPIRIT (RAPPERS->WRAPPERS) AND (ROARING->ROWING) RADICALS +6128-63240-0011-514: IF YOU ARE GOING TO DINE WITH HER YOU HAD BETTER KNOW IT OH MURDER +6128-63240-0012-515: HE (LOOKED AT->LIFTED) MISSUS (LUNA->LEWINA) WITH INTELLIGENT INCREDULITY +6128-63240-0013-516: SHE WAS ATTRACTIVE AND IMPERTINENT ESPECIALLY THE LATTER +6128-63240-0014-517: HAVE YOU BEEN IN EUROPE +6128-63240-0015-518: NO I HAVEN'T BEEN ANYWHERE +6128-63240-0016-519: SHE HATES IT SHE WOULD LIKE TO ABOLISH IT +6128-63240-0017-520: THIS LAST REMARK HE MADE (AT A VENTURE->THAT ADVENTURE) FOR HE HAD NATURALLY NOT DEVOTED ANY SUPPOSITION WHATEVER TO MISSUS (LUNA->LENA) +6128-63240-0018-521: ARE YOU VERY AMBITIOUS YOU LOOK AS IF YOU WERE +6128-63240-0019-522: AND MISSUS (LUNA->LENA) ADDED THAT NOW SHE WAS BACK SHE DIDN'T KNOW WHAT SHE SHOULD DO +6128-63240-0020-523: ONE DIDN'T EVEN (KNOW->THERE) WHAT ONE HAD COME BACK FOR +6128-63240-0021-524: BESIDES OLIVE DIDN'T WANT HER IN BOSTON AND DIDN'T GO THROUGH THE FORM OF SAYING SO +6128-63240-0022-525: THAT WAS ONE COMFORT WITH (OLIVE->ALIVE) SHE NEVER WENT THROUGH ANY FORMS +6128-63240-0023-526: SHE STOOD THERE LOOKING CONSCIOUSLY AND RATHER SERIOUSLY (AT->AND) MISTER RANSOM A SMILE OF EXCEEDING FAINTNESS PLAYED ABOUT HER LIPS IT WAS JUST PERCEPTIBLE ENOUGH TO LIGHT UP THE NATIVE GRAVITY OF HER FACE +6128-63240-0024-527: HER VOICE WAS LOW AND AGREEABLE A CULTIVATED VOICE AND SHE EXTENDED A SLENDER WHITE HAND TO HER VISITOR (WHO->HER) REMARKED WITH SOME SOLEMNITY HE FELT A CERTAIN GUILT OF PARTICIPATION IN MISSUS LUNA'S INDISCRETION THAT HE WAS INTENSELY HAPPY TO MAKE HER ACQUAINTANCE +6128-63240-0025-528: HE OBSERVED THAT MISS CHANCELLOR'S HAND WAS AT ONCE (COLD AND->CALLED IN) LIMP SHE MERELY PLACED IT IN HIS WITHOUT EXERTING THE SMALLEST PRESSURE +6128-63240-0026-529: I SHALL BE BACK VERY LATE (WE ARE GOING TO A THEATRE->WILL DON'T YOU THE) PARTY THAT'S WHY WE DINE SO EARLY +6128-63240-0027-530: MISSUS (LUNA'S->LEANY'S) FAMILIARITY EXTENDED EVEN TO HER SISTER SHE REMARKED TO MISS CHANCELLOR THAT SHE LOOKED AS IF SHE WERE GOT UP FOR A SEA VOYAGE +6128-63241-0000-557: POOR RANSOM ANNOUNCED THIS (FACT->THAT) TO HIMSELF AS IF HE HAD MADE A GREAT DISCOVERY BUT IN REALITY HE HAD NEVER BEEN SO (BOEOTIAN->BE OCHIAN) AS AT THAT MOMENT +6128-63241-0001-558: THE WOMEN HE HAD HITHERTO KNOWN HAD BEEN MAINLY OF HIS OWN SOFT (CLIME->CLIMB) AND IT WAS NOT OFTEN THEY EXHIBITED THE TENDENCY HE DETECTED AND CURSORILY DEPLORED IN MISSUS LUNA'S SISTER +6128-63241-0002-559: RANSOM WAS PLEASED WITH THE VISION OF THAT REMEDY IT MUST BE REPEATED THAT HE WAS VERY PROVINCIAL +6128-63241-0003-560: HE WAS SORRY FOR HER BUT (HE SAW->HIS SORROW) IN A FLASH THAT NO ONE COULD HELP HER THAT WAS WHAT MADE HER TRAGIC +6128-63241-0004-561: SHE COULD NOT DEFEND HERSELF AGAINST A RICH ADMIRATION A KIND OF TENDERNESS OF ENVY OF ANY ONE WHO HAD BEEN SO HAPPY AS TO HAVE THAT OPPORTUNITY +6128-63241-0005-562: HIS FAMILY WAS RUINED THEY HAD LOST THEIR SLAVES THEIR PROPERTY (THEIR->THE) FRIENDS AND RELATIONS (THEIR->THE) HOME HAD TASTED OF ALL THE CRUELTY OF DEFEAT +6128-63241-0006-563: THE STATE OF MISSISSIPPI (SEEMED->SEEM) TO HIM THE STATE OF DESPAIR SO HE SURRENDERED THE REMNANTS OF HIS PATRIMONY TO HIS MOTHER AND SISTERS AND AT NEARLY THIRTY YEARS OF AGE (ALIGHTED->DELIGHTED) FOR THE FIRST TIME IN NEW YORK IN THE COSTUME OF HIS PROVINCE WITH FIFTY DOLLARS IN HIS POCKET AND A (GNAWING->GNARRING) HUNGER IN HIS HEART +6128-63241-0007-564: IT WAS IN THE FEMALE LINE AS (BASIL->BALES HAD) RANSOM HAD WRITTEN IN ANSWERING HER LETTER WITH A GOOD DEAL OF FORM AND FLOURISH HE SPOKE AS IF THEY HAD BEEN ROYAL HOUSES +6128-63241-0008-565: IF IT HAD BEEN POSSIBLE TO SEND MISSUS RANSOM MONEY OR EVEN CLOTHES SHE WOULD HAVE LIKED THAT BUT SHE HAD NO MEANS OF (ASCERTAINING HOW->ASSERTING HER) SUCH AN OFFERING WOULD BE TAKEN +6128-63241-0009-566: (OLIVE->OLIV) HAD A FEAR OF EVERYTHING BUT HER GREATEST FEAR WAS OF BEING AFRAID +6128-63241-0010-567: SHE HAD ERECTED IT INTO A SORT OF RULE OF CONDUCT THAT WHENEVER SHE SAW A RISK SHE WAS TO TAKE IT AND SHE HAD FREQUENT HUMILIATIONS AT FINDING HERSELF (SAFE->SAVED) AFTER ALL +6128-63241-0011-568: SHE WAS PERFECTLY SAFE AFTER WRITING TO BASIL RANSOM AND INDEED IT WAS DIFFICULT TO SEE WHAT HE COULD HAVE DONE TO HER EXCEPT THANK HER HE WAS ONLY EXCEPTIONALLY SUPERLATIVE FOR HER LETTER AND ASSURE HER THAT HE WOULD COME AND SEE HER THE FIRST TIME HIS BUSINESS HE WAS BEGINNING TO GET A LITTLE SHOULD TAKE HIM TO BOSTON +6128-63241-0012-569: HE WAS TOO SIMPLE TOO MISSISSIPPIAN FOR THAT SHE WAS ALMOST DISAPPOINTED +6128-63241-0013-570: OF ALL THINGS IN THE WORLD CONTENTION WAS MOST SWEET TO HER THOUGH WHY IT IS HARD TO IMAGINE FOR IT ALWAYS COST HER TEARS HEADACHES A DAY OR TWO IN BED (ACUTE EMOTION->ACUTEATION) AND IT WAS VERY POSSIBLE (BASIL->BASER) RANSOM WOULD NOT CARE TO (CONTEND->COMPEND) +6128-63244-0000-531: MISS CHANCELLOR HERSELF HAD THOUGHT SO MUCH ON THE VITAL SUBJECT WOULD NOT SHE MAKE A FEW REMARKS AND GIVE THEM SOME OF HER EXPERIENCES +6128-63244-0001-532: HOW DID THE LADIES (ON->AND) BEACON STREET FEEL ABOUT THE (BALLOT->BURIT) +6128-63244-0002-533: (PERHAPS->THERE) SHE COULD SPEAK FOR THEM MORE THAN FOR SOME OTHERS +6128-63244-0003-534: WITH HER (IMMENSE->MOST) SYMPATHY FOR REFORM SHE FOUND HERSELF SO OFTEN WISHING THAT (REFORMERS WERE->WE FELL IN AS WHERE) A LITTLE DIFFERENT +6128-63244-0004-535: (OLIVE->I) HATED (TO HEAR->DEER) THAT FINE AVENUE TALKED ABOUT AS IF IT WERE SUCH A REMARKABLE PLACE AND TO LIVE THERE (WERE->WHERE) A PROOF OF WORLDLY GLORY +6128-63244-0005-536: ALL SORTS (OF INFERIOR->HAVE CONTRAY YOUR) PEOPLE (LIVED->LIVE) THERE AND SO BRILLIANT A WOMAN AS MISSUS (FARRINDER->FARRENDER) WHO LIVED AT ROXBURY OUGHT NOT TO MIX THINGS UP +6128-63244-0006-537: SHE KNEW HER PLACE IN THE BOSTON (HIERARCHY->HALLWAKE) AND IT WAS NOT WHAT MISSUS (FARRINDER->FARRINGERS) SUPPOSED (SO THAT->SELL HIM) THERE WAS A WANT OF PERSPECTIVE IN TALKING TO HER AS IF SHE HAD BEEN (A->I) REPRESENTATIVE OF THE ARISTOCRACY +6128-63244-0007-538: SHE WISHED TO WORK IN ANOTHER FIELD SHE HAD LONG BEEN PREOCCUPIED WITH THE ROMANCE OF (THE->A) PEOPLE +6128-63244-0008-539: THIS MIGHT SEEM ONE OF THE MOST ACCESSIBLE OF PLEASURES BUT IN POINT OF FACT SHE HAD NOT FOUND IT SO +6128-63244-0009-540: CHARLIE WAS A YOUNG MAN IN A (WHITE->WIDE) OVERCOAT AND A PAPER COLLAR IT WAS (FOR HIM->BOUHAIR) IN THE LAST (ANALYSIS->OF NICES) THAT (THEY->THE) CARED MUCH THE MOST +6128-63244-0010-541: (OLIVE->ALL THE) CHANCELLOR WONDERED HOW MISSUS (FARRINDER->GREYNDER) WOULD TREAT (THAT->THEIR) BRANCH (OF->AT) THE QUESTION +6128-63244-0011-542: (IF->*) IT (BE->HAD BEEN) NECESSARY WE ARE PREPARED TO TAKE CERTAIN STEPS TO CONCILIATE THE SHRINKING +6128-63244-0012-543: (OUR->I'LL) MOVEMENT IS (FOR ALL->FULL) IT APPEALS TO THE MOST DELICATE LADIES +6128-63244-0013-544: (RAISE->THAT IS) THE STANDARD AMONG THEM AND BRING ME (A->YOUR) THOUSAND NAMES +6128-63244-0014-545: I LOOK AFTER THE DETAILS AS WELL AS THE BIG (CURRENTS->CURRANTS) MISSUS (FARRINDER->VERUNDER) ADDED IN A TONE AS EXPLANATORY AS COULD BE EXPECTED OF SUCH A WOMAN AND WITH A SMILE OF WHICH THE SWEETNESS WAS THRILLING TO HER LISTENER +6128-63244-0015-546: SAID (OLIVE->OLD) CHANCELLOR WITH A FACE WHICH SEEMED TO PLEAD FOR A (REMISSION OF->REMISSIONER'S) RESPONSIBILITY +6128-63244-0016-547: (I WANT->HOW WARNED) TO BE NEAR TO THEM TO HELP THEM +6128-63244-0017-548: IT WAS ONE THING TO CHOOSE (FOR->TO) HERSELF BUT NOW THE GREAT REPRESENTATIVE OF THE (ENFRANCHISEMENT->ENCRONTISEMENT) OF THEIR SEX FROM EVERY FORM OF (BONDAGE->BANDAGE) HAD CHOSEN FOR HER +6128-63244-0018-549: THE UNHAPPINESS OF WOMEN +6128-63244-0019-550: THEY WERE HER SISTERS (THEY->THERE) WERE HER OWN AND THE DAY OF THEIR DELIVERY HAD DAWNED +6128-63244-0020-551: THIS WAS THE ONLY SACRED CAUSE THIS WAS THE GREAT THE (JUST REVOLUTION->DESTRULIAN) IT (MUST->WAS) TRIUMPH IT (MUST->WAS) SWEEP EVERYTHING BEFORE IT IT MUST EXACT FROM THE OTHER THE BRUTAL BLOOD STAINED RAVENING RACE THE (LAST->LOST) PARTICLE OF (EXPIATION->EXPLANATION) +6128-63244-0021-552: (THEY WOULD BE->THERE HAD BEEN) NAMES OF WOMEN WEAK INSULTED PERSECUTED BUT DEVOTED IN EVERY (PULSE->PART) OF THEIR BEING TO THE CAUSE AND ASKING NO BETTER FATE THAN TO DIE FOR IT +6128-63244-0022-553: IT (WAS->WILL) NOT CLEAR TO THIS INTERESTING GIRL IN WHAT MANNER SUCH A SACRIFICE AS THIS LAST WOULD BE REQUIRED OF HER BUT SHE (SAW THE->SOLD A) MATTER THROUGH A KIND OF SUNRISE (MIST OF EMOTION->MISTAGINATION) WHICH MADE DANGER AS ROSY (AS->IS) SUCCESS +6128-63244-0023-554: WHEN MISS (BIRDSEYE->BIRD'S EYED) APPROACHED IT TRANSFIGURED HER FAMILIAR HER COMICAL SHAPE AND MADE THE POOR LITTLE (HUMANITARY->HUMANITY) HACK (SEEM->SIMPLE) ALREADY A MARTYR +6128-63244-0024-555: (OLIVE->I LEAVE) CHANCELLOR LOOKED AT HER WITH LOVE REMEMBERED THAT SHE HAD NEVER IN HER LONG (UNREWARDED->AND REWARDED) WEARY LIFE HAD A THOUGHT (OR->OF) AN IMPULSE FOR HERSELF +6128-63244-0025-556: SHE HAD BEEN CONSUMED BY THE PASSION OF SYMPATHY IT HAD (CRUMPLED->CRUMBLED) HER INTO AS MANY CREASES AS AN OLD GLAZED DISTENDED GLOVE +6432-63722-0000-2431: (BUT SCUSE->PECUSE) ME (DIDN'T YO FIGGER ON->THEN YOU'LL FORGON) DOIN SOME (DETECTIN AN GIVE->DETECTIVE AND GIVEN) UP (FISHIN->FISHIN') +6432-63722-0001-2432: AND SHAG WITH THE FREEDOM OF AN OLD SERVANT STOOD LOOKING AT HIS MASTER AS IF NOT QUITE UNDERSTANDING THE NEW TWIST THE AFFAIRS HAD TAKEN +6432-63722-0002-2433: I'M GOING (OFF FISHING->OUR FISHIN) I MAY NOT CATCH ANYTHING I MAY NOT WANT TO AFTER I GET THERE +6432-63722-0003-2434: GET READY (SHAG->SHAGG) YES (SAH COLONEL->I CAN) +6432-63722-0004-2435: AND HAVING PUT HIMSELF IN A FAIR WAY AS HE HOPED TO SOLVE SOME OF THE PROBLEMS CONNECTED WITH THE DARCY CASE COLONEL (ASHLEY->HASHY) WENT DOWN TO POLICE HEADQUARTERS TO LEARN MORE FACTS IN (*->THE) CONNECTION WITH THE MURDER OF THE EAST INDIAN +6432-63722-0005-2436: (PINKUS->PINKIS) AND DONOVAN HAVEN'T THEY (CARROLL YEP->CAROL HEIP) +6432-63722-0006-2437: (CARROLL->GAL) WAS TOO MUCH ENGAGED IN WATCHING THE BLUE SMOKE CURL LAZILY UPWARD FROM HIS CIGAR JUST THEN TO SAY MORE +6432-63722-0007-2438: ARE YOU GOING TO WORK ON THAT CASE COLONEL +6432-63722-0008-2439: BUT HE HADN'T ANY MORE TO DO WITH IT COLONEL THAN THAT CAT +6432-63722-0009-2440: PERHAPS NOT ADMITTED COLONEL ASHLEY +6432-63722-0010-2441: WE'VE GOT OUR MAN AND THAT'S ALL WE WANT +6432-63722-0011-2442: YOU'RE ON THE DARCY CASE THEY TELL ME IN A WAY YES +6432-63722-0012-2443: I'M WORKING IN THE (INTERESTS->INTEREST) OF THE YOUNG MAN +6432-63722-0013-2444: IT'S JUST ONE OF THEM COINCIDENCES LIKE +6432-63722-0014-2445: BUSTED HIS HEAD IN WITH A HEAVY CANDLESTICK ONE OF A PAIR +6432-63722-0015-2446: GAD (EXCLAIMED->EXPLAINED) THE COLONEL +6432-63722-0016-2447: THE VERY PAIR I WAS GOING TO BUY +6432-63722-0017-2448: LOOK HERE COLONEL DO YOU KNOW ANYTHING ABOUT THIS +6432-63722-0018-2449: AND THE DETECTIVE'S PROFESSIONAL INSTINCTS GOT THE UPPER HAND OF HIS FRIENDLINESS NOT THE LEAST IN THE WORLD NOT AS MUCH AS YOU DO WAS THE COOL ANSWER +6432-63722-0019-2450: I HAPPENED TO SEE THOSE CANDLESTICKS IN THE WINDOW OF SINGA (PHUT'S->PHUT) SHOP THE OTHER DAY AND I MADE UP MY MIND TO BUY THEM WHEN I HAD A CHANCE +6432-63722-0020-2451: NOW I'M AFRAID I WON'T BUT HOW DID IT HAPPEN +6432-63722-0021-2452: (PHUT->FAT) I DON'T KNOW WHETHER THAT'S HIS FIRST OR HIS LAST NAME ANYHOW HE HAD A PARTNER NAMED (SHERE->SHEAR) ALI +6432-63722-0022-2453: ANYHOW HE (AND PHUT->INFORT) DIDN'T GET ALONG VERY WELL IT SEEMS +6432-63722-0023-2454: (NEIGHBORS->LABORS) OFTEN HEARD (EM SCRAPPIN->HIM SCRAP IN) A LOT AND THIS AFTERNOON THEY WENT AT IT AGAIN (*->AT) HOT AND HEAVY +6432-63722-0024-2455: (TOWARD->TO OUR) DARK A MAN WENT IN TO BUY A LAMP +6432-63722-0025-2456: HE FOUND THE PLACE WITHOUT A LIGHT IN IT STUMBLED OVER SOMETHING ON THE FLOOR AND THERE WAS ALI'S BODY WITH THE HEAD BUSTED IN AND THIS HEAVY CANDLESTICK NEAR IT +6432-63722-0026-2457: SURE HELD SO TIGHT WE COULD HARDLY GET IT OUT +6432-63722-0027-2458: MAYBE THE FIGHT WAS ABOUT WHO (OWNED->ON) THE WATCH FOR THE (DAGOS->DAGGERS) TALKED IN THEIR FOREIGN LINGO AND NONE OF THE NEIGHBORS COULD TELL WHAT THEY WERE (SAYIN->SAYING) I SEE +6432-63722-0028-2459: AND THE WATCH HAVE YOU IT YES IT'S HERE +6432-63722-0029-2460: THAT'S THE WATCH ANNOUNCED THE HEADQUARTERS DETECTIVE REACHING IN FOR IT GOING (YET->IN) SEE +6432-63722-0030-2461: YOU'RE NOT (AS SQUEAMISH->A SCREAMISH) AS ALL THAT ARE YOU JUST BECAUSE IT WAS IN A DEAD MAN'S (HAND->HANDS) AND (IN->*) A WOMAN'S +6432-63722-0031-2462: AND (DONOVAN'S->DOLOMAN'S) VOICE WAS PLAINLY (SKEPTICAL->SCEPTICAL) +6432-63722-0032-2463: YES IT MAY HAVE SOME ROUGH EDGES ON IT +6432-63722-0033-2464: AND I'VE READ ENOUGH ABOUT GERMS TO KNOW THE DANGER I'D ADVISE YOU TO BE CAREFUL +6432-63722-0034-2465: IF YOU DON'T MIND I SHOULD LIKE TO EXAMINE THIS A BIT +6432-63722-0035-2466: BEFORE THE BIG WIND IN IRELAND SUGGESTED THONG WITH A NOD (AT->OF) HIS IRISH COMPATRIOT (SLIGHTLY LAUGHED->SLIGHTLY'LL HAVE) THE COLONEL +6432-63722-0036-2467: THAT'S RIGHT AGREED THE COLONEL AS HE CONTINUED TO MOVE HIS MAGNIFYING GLASS OVER THE SURFACE OF THE STILL TICKING WATCH +6432-63722-0037-2468: (AND A->IN THE) CLOSE OBSERVER MIGHT HAVE OBSERVED THAT HE DID NOT TOUCH HIS BARE FINGERS TO THE TIMEPIECE BUT POKED IT ABOUT AND TOUCHED IT HERE AND THERE WITH THE END OF A (LEADPENCIL->LEAD PENCIL) +6432-63722-0038-2469: AND (DONOVAN->DONOON) TAKE (A->HER) FRIEND'S ADVICE AND DON'T BE TOO FREE WITH THAT WATCH TOO FREE WITH IT +6432-63722-0039-2470: (ASKED->AS) THE (SURPRISED->SURPRISE) DETECTIVE YES +6432-63722-0040-2471: DON'T SCRATCH YOURSELF ON IT WHATEVER YOU DO WHY NOT +6432-63722-0041-2472: SIMPLY BECAUSE THIS WATCH +6432-63722-0042-2473: SOME ONE OUT (HERE->HER) TO SEE YOU +6432-63722-0043-2474: ALL RIGHT BE THERE IN A SECOND +6432-63722-0044-2475: (SINGA PHUT->SHANGHAT) WAS THE PANTING ANSWER +6432-63722-0045-2476: I WANT TO TALK OVER DARCY'S CASE WITH YOU THE COLONEL HAD SAID AND THE TWO HAD TALKED HAD THOUGHT HAD TALKED AGAIN AND NOW WERE SILENT FOR A TIME +6432-63722-0046-2477: WHAT ARE THE CHANCES OF GETTING HIM OFF LEGALLY IF WE GO AT IT FROM A NEGATIVE STANDPOINT ASKED THE COLONEL +6432-63722-0047-2478: RATHER A HYPOTHETICAL QUESTION COLONEL BUT I SHOULD SAY IT MIGHT BE A FIFTY FIFTY PROPOSITION +6432-63722-0048-2479: AT BEST HE WOULD GET OFF WITH A SCOTCH VERDICT OF NOT (PROVEN->PROVING) BUT HE DOESN'T WANT THAT NOR DO I +6432-63722-0049-2480: AND YOU I DON'T WANT IT EITHER +6432-63722-0050-2481: BUT I WANT TO KNOW JUST WHERE WE STAND NOW I KNOW +6432-63722-0051-2482: BUT I NEED TO DO A LITTLE MORE SMOKING OUT FIRST NOW I WANT TO THINK +6432-63722-0052-2483: IF YOU'LL EXCUSE ME I'LL PRETEND I'M FISHING AND I MAY CATCH SOMETHING +6432-63722-0053-2484: IN FACT I HAVE A FEELING THAT (I'LL->I) LAND MY FISH +6432-63722-0054-2485: (I'D->I) RECOMMEND HIM TO YOU INSTEAD OF BLACKSTONE THANKS (LAUGHED->LAP) KENNETH +6432-63722-0055-2486: WHAT IS IT PERHAPS I CAN HELP YOU +6432-63722-0056-2487: THE OLD ADAGE OF TWO HEADS YOU KNOW +6432-63722-0057-2488: YES IT STILL HOLDS GOOD +6432-63722-0058-2489: NO ALIMONY (REPEATED->REPLIED) THE COLONEL PUZZLED YES JUST THAT +6432-63722-0059-2490: AND THERE'S NO REASON YOU SHOULDN'T KNOW +6432-63723-0000-2491: CHUCKLED THE COLONEL AS HE SKILFULLY PLAYED THE LUCKLESS TROUT NOW STRUGGLING TO GET LOOSE FROM THE HOOK +6432-63723-0001-2492: AND WHEN THE FISH WAS LANDED PANTING ON THE GRASS AND SHAG HAD BEEN ROUSED FROM HIS SLUMBER TO SLIP (THE->A) NOW LIMP FISH INTO THE (CREEL->CREO) COLONEL ASHLEY GAVE A SIGH OF RELIEF AND REMARKED I THINK I SEE IT NOW +6432-63723-0002-2493: THE REASON SHE ASKED NO ALIMONY INQUIRED KENNETH +6432-63723-0003-2494: NO I WASN'T THINKING OF THAT +6432-63723-0004-2495: HOWEVER DON'T THINK I'M NOT INTERESTED IN YOUR CASE I'VE (FISHED->FINISHED) ENOUGH FOR TO DAY +6432-63723-0005-2496: WELL I DON'T KNOW THAT YOU CAN +6432-63723-0006-2497: IT (ISN'T->IS IN) GENERALLY KNOWN WENT ON THE LAWYER THAT THE HOTEL KEEPER'S WIFE HAS LEFT HIM +6432-63723-0007-2498: IT WAS ONE OF WHAT AT FIRST MIGHT BE CALLED REFINED CRUELTY ON HER HUSBAND'S PART DEGENERATING GRADUALLY INTO THAT OF (THE->A) BASER SORT +6432-63723-0008-2499: YOU DON'T MEAN THAT (LARCH->LARGE) STRUCK HER THAT THERE WAS PHYSICAL ABUSE DO YOU ASKED THE COLONEL THAT'S WHAT HE DID +6432-63723-0009-2500: THE COLONEL DID NOT DISCLOSE THE FACT THAT IT WAS NO NEWS TO HIM +6432-63723-0010-2501: AARON GRAFTON'S STATEMENT WAS BEING UNEXPECTEDLY CONFIRMED +6432-63723-0011-2502: HE REMEMBERED THAT CYNTHIA AND GRAFTON HAD ONCE BEEN IN LOVE WITH EACH OTHER +6432-63723-0012-2503: SHE SAID HE HAD STRUCK HER MORE THAN ONCE AND SHE COULD STAND IT NO LONGER +6432-63723-0013-2504: BECAUSE (LARCH->LARGE) MADE NO (DEFENSE->DEFENCE) +6432-63723-0014-2505: (LARCH->LARGE) BY REFUSING TO APPEAR PRACTICALLY ADMITTED THE CHARGES AGAINST HIM AND DID NOT OPPOSE THE SEPARATION +6432-63723-0015-2506: SO I HAD TO LET HER HAVE HER WAY AND WE DID NOT ASK THE COURT FOR MONEY THOUGH I HAD NO SUCH SQUEAMISH FEELINGS WHEN IT CAME TO MY (COUNSEL FEE->COUNCIL FEET) +6432-63723-0016-2507: NO BUT HE WILL OR (I'LL->ELSE) SUE (HIM->EM) AND GET JUDGMENT OH HE'LL PAY ALL RIGHT +6432-63723-0017-2508: AND IT TAKES ALL SORTS OF PERSONS TO MAKE IT UP +6432-63723-0018-2509: STILL I WOULD LIKE TO KNOW +6432-63723-0019-2510: THE (MURDER->MURDERER) OF MISSUS DARCY HAD SOME TIME AGO BEEN SHIFTED OFF THE FRONT PAGE THOUGH IT WOULD GET BACK THERE WHEN THE YOUNG (JEWELER->JEWELLER) WAS TRIED +6432-63723-0020-2511: IT HAD A DOUBLE REPUTATION SO TO SPEAK +6432-63723-0021-2512: GRAVE AND EVEN REVEREND (*->THE) CONVENTIONS ASSEMBLED IN ITS (BALLROOM AND->BALL ROOM IN) POLITICIANS OF THE UPPER IF NOT BETTER CLASS WERE FREQUENTLY SEEN IN ITS DINING ROOM OR CAFE +6432-63723-0022-2513: (LARCH->LARGE) HIMSELF WAS A PECULIAR CHARACTER +6432-63723-0023-2514: IN A SMALLER PLACE HE WOULD HAVE BEEN CALLED A SALOON KEEPER +6432-63723-0024-2515: AND IT WAS THIS MAN RICH (IT WAS->OVER) SAID HANDSOME CERTAINLY THAT (CYNTHIA RATCHFORD->CENTIA RETFORD) HAD MARRIED +6432-63723-0025-2516: TO THIS WAS THE ANSWER WHISPERED MONEY +6432-63723-0026-2517: AND IN A WAY IT WAS TRUE +6432-63723-0027-2518: SHE ALSO SAW AN OPPORTUNITY OF PAYING OLD DEBTS AND REAPING SOME REVENGES +6432-63723-0028-2519: AFTER THE MARRIAGE WHICH WAS A BRILLIANT AND GAY ONE IF NOT HAPPY THE (LARCH->LARGE) HOTEL IT COULD HARDLY BE CALLED (A->*) HOME BECAME THE SCENE OF MANY (FESTIVE OCCASIONS->FESTIVATIONS) +6432-63723-0029-2520: THEN IT WAS SAID OF (LARCH->LARGE) THAT SOON AFTER THE ECHOES OF THE WEDDING CHIMES HAD DIED AWAY HE HAD BEGUN TO TREAT HIS WIFE (WITH->FOR THE) REFINED CRUELTY THAT HIDDEN AWAY FROM THE PUBLIC UNDERNEATH HIS HABITUAL MANNER THERE WAS THE RAWNESS OF THE BRUTE +6432-63723-0030-2521: BUT IT WAS NOTICED THAT THE OLDER AND MORE CONSERVATIVE FAMILIES WERE LESS OFTEN REPRESENTED AND WHEN THEY WERE IT WAS BY SOME OF THE YOUNGER MEMBERS WHOSE REPUTATIONS WERE ALREADY (SMIRCHED->SMARCHED) OR WHO HAD NOT YET ACQUIRED ANY AND WERE WILLING TO TAKE A CHANCE +6432-63723-0031-2522: IT WOULDN'T DO YOU KNOW AFTER THAT STORY CAME OUT FOR ME (AND->IN) THE VICE CHANCELLOR WHO SAT IN (THE->A) CASE AS WELL AS OTHER JUDGES AND MEMBERS OF THE BAR TO BE SEEN THERE KENNETH EXPLAINED TO THE COLONEL +6432-63723-0032-2523: MEANWHILE COLONEL ASHLEY WAS A VERY BUSY MAN AND TO NO ONE DID HE TELL VERY MUCH ABOUT HIS ACTIVITIES HE SAW DARCY FREQUENTLY AT THE JAIL AND TO THAT YOUNG MAN'S PLEADINGS THAT SOMETHING (*->TO) BE DONE ALWAYS RETURNED THE ANSWER +6432-63723-0033-2524: (DON'T->DON) WORRY IT WILL COME OUT ALL RIGHT +6432-63723-0034-2525: I'M GOING (TO RECTIFY->DIRECTIFY) THEM BUT IT WILL TAKE TIME +6432-63723-0035-2526: IT'S HARD FOR MISS MASON TOO ALTHOUGH SHE'S BEARING UP LIKE A MAJOR +6432-63723-0036-2527: SO KING (GOT->GOD) BAIL WHO PUT IT UP +6432-63723-0037-2528: IT WAS (HIGH->TIME) LARCH +6432-63723-0038-2529: THEY TOOK HARRY AWAY (A WHILE AGO->*) +6432-63723-0039-2530: BUT HIS ARE PRETTY UNCERTAIN SHOES TO BE IN JUST THE SAME +6432-63723-0040-2531: ONLY THAT I DARCY HESITATED AND GREW RED +6432-63723-0041-2532: GOOD EVENING COLONEL HE CALLED GENIALLY (WILL->WHERE) YOU JOIN ME IN A (WELSH->WELL) RABBIT +6432-63723-0042-2533: THANK YOU NO +6432-63723-0043-2534: I'M AFRAID MY (DIGESTION->DIAD) ISN'T QUITE UP TO THAT AS I'VE HAD TO CUT OUT MY FISHING OF LATE +6432-63723-0044-2535: NOW AS TO CERTAIN MATTERS IN THE STORE ON THE MORNING OF THE MURDER +6432-63723-0045-2536: (THE->THEY) STOPPED CLOCKS FOR INSTANCE HAVE YOU ANY THEORY +6432-63723-0046-2537: THERE WERE THREE OF THEM THE (CENTER->CENTRE) FIGURE BEING THAT OF HARRY KING AND HE WAS VERY MUCH INTOXICATED +6432-63723-0047-2538: THAT IS NOT ALWAYS BUT SOMETIMES IT HAPPENED TO BE SO NOW +6432-63723-0048-2539: I BEG YOUR PARDON HE SAID IN THE CULTURED TONES HE KNEW SO WELL HOW TO USE YET OF WHICH HE MADE SO LITTLE USE OF LATE +6432-63723-0049-2540: I SAID WHERE HAVE YOU BEEN REMARKED THE OTHER WE'VE MISSED YOU +6432-63723-0050-2541: I SAID I WAS GOLFING HE WENT ON EXCEEDINGLY DISTINCTLY THOUGH WITH AN EFFORT +6432-63723-0051-2542: WHY POLONIUS SOME ONE ASKED +6432-63723-0052-2543: BECAUSE DEAR FRIEND REPLIED KING SOFTLY HE SOMEWHAT RESEMBLES A CERTAIN PERSON HERE WHO TALKS TOO MUCH BUT WHO IS NOT SO WISE AS HE THINKS +6432-63723-0053-2544: THERE WAS A RATTLE OF (COINS ON->COIN DOWN) THE MAHOGANY (BAR AS->BARS) KING SOUGHT TO DISENTANGLE A SINGLE BILL FROM THE (WADDED->WATERED) UP CURRENCY IN HIS POCKET +6432-63723-0054-2545: IT'S IT'S AN ODD COIN AN OLD ROMAN ONE THAT MISSUS DARCY HAD IN HER PRIVATE COLLECTION KEPT IN THE JEWELRY STORE SAFE WAS THE WHISPERED ANSWER +6432-63723-0055-2546: I WENT OVER THEM (*->NEAR) THE (OTHER->*) DAY AND NOTICED SOME WERE MISSING THOUGH I SAW THEM ALL WHEN I PAID A VISIT TO HER JUST A SHORT TIME BEFORE SHE WAS KILLED +6432-63723-0056-2547: THAT WAS HERS WENT ON THE (JEWELER->JUROR) +6432-63723-0057-2548: NOW HARRY KING HAS IT EXCLAIMED COLONEL ASHLEY +6938-70848-0000-1216: EVEN THE SUN CAME OUT PALE AND WATERY AT NOON +6938-70848-0001-1217: THE (COLDS->GOLDS) AND RHEUMATISM OF THE (RAINY->REINY) MONTHS VANISHED +6938-70848-0002-1218: (ASKED A->AS TO) WORKER LAST SUNDAY YOU DID IT WHEN THE YUNKERS +6938-70848-0003-1219: WELL DIDN'T (THEY SHOOT->ISSUED) US ONE MAN EXHIBITED HIS ARM IN A SLING +6938-70848-0004-1220: HAVEN'T I GOT SOMETHING TO REMEMBER THEM BY THE DEVILS +6938-70848-0005-1221: WHO ARE YOU TO DESTROY THE LEGAL GOVERNMENT (WHO IS LENIN->WITH LENNING) A GERMAN +6938-70848-0006-1222: WHO ARE YOU A COUNTER REVOLUTIONIST A PROVOCATOR THEY (BELLOWED->BELOVED) AT HIM +6938-70848-0007-1223: YOU CALL YOURSELVES THE PEOPLE OF (RUSSIA->RACHEL) BUT (YOU'RE->YOU ARE) NOT THE PEOPLE OF (RUSSIA->RATIA) +6938-70848-0008-1224: (THE PEASANTS ARE THE->TO PIECE AND OTHER) PEOPLE OF (RUSSIA->RATIA) WAIT UNTIL THE PEASANTS +6938-70848-0009-1225: WE KNOW WHAT THE PEASANTS WILL SAY AREN'T THEY (WORKINGMEN->WORKING MAN) LIKE OURSELVES +6938-70848-0010-1226: (THESE MEN ESPECIALLY->THIS MAN HAS SPECIALLY) WELCOMED (THE->TO) CALL TO A CONGRESS OF PEASANTS +6938-70848-0011-1227: (THESE->THIS) LAST (WERE->WED) THE YOUNG GENERATION WHO HAD BEEN SERVING IN THE ARMY +6938-70848-0012-1228: WHEREUPON THE OLD (EXECUTIVE->EXECUTED) COMMITTEE LEFT THE HALL +6938-70848-0013-1229: DOWN WITH HIM THEY SHRIEKED +6938-70848-0014-1230: FEARFUL TUMULT CRIES DOWN WITH THE (BOLSHEVIKI->BALL CHEVIKI) +6938-70848-0015-1231: UPON MY RETURN I VISITED (SMOLNY->MOLLY) NO SUCH ACCUSATION WAS MADE AGAINST ME THERE AFTER A BRIEF CONVERSATION I LEFT AND (THAT'S ALL LET ANY ONE->THAT SOUL LATINUE IN) PRESENT MAKE SUCH AN ACCUSATION +6938-70848-0016-1232: MEANWHILE THE QUESTION OF THE (STATUS->STRATUS) OF THE (EXECUTIVE->EXECUTORY) COMMITTEE WAS AGITATING ALL MINDS +6938-70848-0017-1233: BY DECLARING (THE->THEIR) ASSEMBLY (EXTRAORDINARY->EXTRAORDINARILY) CONFERENCE IT HAD BEEN PLANNED TO (BLOCK THE REELECTION->PLUCK THIRD RELECTION) OF THE (EXECUTIVE->EXECUTED) COMMITTEE +6938-70848-0018-1234: BUT THIS (WORKED->WORK) BOTH (WAYS->WAGE) THE (LEFT SOCIALIST REVOLUTIONISTS->LAD SOCIALLY REVOLUTIONIST) DECIDED THAT IF THE CONGRESS HAD NO POWER OVER THE (EXECUTIVE->EXUDY) COMMITTEE (THEN THE EXECUTIVE->TEN TO EXECUTE) COMMITTEE HAD NO POWER OVER THE CONGRESS +6938-70848-0019-1235: ON THE TWENTY SEVENTH OCCURRED THE DEBATE ON THE LAND QUESTION WHICH (REVEALED THE->REVIL TO) DIFFERENCES BETWEEN THE (AGRARIAN PROGRAMME->INGREDIAN PROGRAM) OF THE (BOLSHEVIKI->BULGEHEVIKI) AND THE LEFT SOCIALIST REVOLUTIONARIES +6938-70848-0020-1236: THE CONSTITUENT ASSEMBLY WILL NOT DARE TO BREAK WITH THE WILL OF THE PEOPLE +6938-70848-0021-1237: FOLLOWED HIM LENIN LISTENED TO NOW WITH ABSORBING INTENSITY +6938-70848-0022-1238: THE FIRST (STAGE->AGE) WAS (THE->A) CRUSHING OF AUTOCRACY AND THE (CRUSHING->CRASHING) OF THE POWER OF THE INDUSTRIAL (CAPITALISTS->CAPITALIST) AND (LAND OWNERS->THE LANDOWNERS) WHOSE INTERESTS ARE (CLOSELY RELATED->CLOTHESILY) +6938-70848-0023-1239: (THE DUMAS->DID YOU MESS) AND (ZEMSTVOS->THEM STOOLS) WERE DROPPED +6938-70848-0024-1240: HE KNEW THAT AN AGREEMENT WITH THE BOLSHEVIKI WAS BEING DISCUSSED BUT HE DID NOT KNOW THAT IT HAD BEEN CONCLUDED +6938-70848-0025-1241: HE SPOKE TO THE (RUMP->RUM) CONVENTION +6938-70848-0026-1242: THE (VILLAGES->RELIGIOUS) WILL SAVE US IN THE END +6938-70848-0027-1243: BUT THE PRESENT (MOVEMENT->MOMENT) IS INTERNATIONAL AND THAT IS WHY IT IS INVINCIBLE +6938-70848-0028-1244: THE (WILL->WHEEL) OF MILLIONS OF WORKERS IS (NOW->SOME) CONCENTRATED IN (THIS->THE) HALL +6938-70848-0029-1245: A NEW HUMANITY WILL BE BORN OF THIS WAR +6938-70848-0030-1246: I GREET YOU WITH THE (CHRISTENING->CHRISTIANING) OF A NEW RUSSIAN LIFE AND FREEDOM +7018-75788-0000-135: THEN I TOOK UP A GREAT STONE FROM AMONG THE TREES AND COMING UP TO HIM SMOTE HIM THEREWITH ON THE HEAD WITH ALL MY MIGHT AND CRUSHED IN HIS SKULL AS HE LAY DEAD DRUNK +7018-75788-0001-136: BEHOLD A SHIP WAS MAKING FOR THE ISLAND THROUGH THE DASHING SEA AND CLASHING WAVES +7018-75788-0002-137: HEARING THIS I WAS SORE TROUBLED REMEMBERING WHAT I HAD BEFORE SUFFERED FROM THE APE KIND +7018-75788-0003-138: UPON THIS HE BROUGHT ME A COTTON BAG AND (GIVING->GIVEN) IT TO (ME->HIM HE) SAID TAKE THIS BAG AND FILL IT WITH PEBBLES FROM THE BEACH AND GO FORTH WITH A COMPANY OF THE TOWNSFOLK TO WHOM I WILL GIVE A CHARGE RESPECTING THEE +7018-75788-0004-139: DO AS THEY DO AND (BELIKE->BE LIKE) THOU SHALT GAIN WHAT MAY FURTHER THY RETURN VOYAGE TO THY NATIVE LAND +7018-75788-0005-140: THEN HE CARRIED ME TO THE BEACH WHERE I FILLED MY BAG (*->AND) WITH PEBBLES LARGE AND SMALL AND PRESENTLY WE SAW A COMPANY OF FOLK (ISSUE->ISSUED) FROM THE TOWN EACH BEARING A BAG LIKE MINE FILLED WITH PEBBLES +7018-75788-0006-141: TO THESE HE COMMITTED ME COMMENDING ME TO THEIR CARE AND SAYING THIS MAN IS A STRANGER SO TAKE HIM WITH YOU AND TEACH HIM HOW TO GATHER THAT HE MAY GET HIS DAILY BREAD AND YOU WILL EARN YOUR REWARD AND RECOMPENSE IN HEAVEN +7018-75788-0007-142: NOW SLEEPING UNDER THESE TREES WERE MANY (APES->IPES) WHICH WHEN THEY SAW US ROSE AND FLED FROM US AND SWARMED UP AMONG THE BRANCHES WHEREUPON MY COMPANIONS BEGAN TO PELT THEM WITH WHAT THEY HAD IN THEIR BAGS AND THE APES FELL TO PLUCKING OF THE FRUIT OF THE TREES AND CASTING THEM AT THE FOLK +7018-75788-0008-143: WE WEIGHED ANCHOR AND SHAHRAZAD PERCEIVED THE DAWN OF DAY AND CEASED SAYING HER PERMITTED SAY +7018-75788-0009-144: WHEN IT WAS THE FIVE HUNDRED AND FIFTY NINTH NIGHT +7018-75788-0010-145: AND CEASED NOT SAILING TILL WE ARRIVED SAFELY AT (BASSORAH->PESSORAH) +7018-75788-0011-146: THERE I ABODE A LITTLE AND THEN WENT ON TO (BAGHDAD->BAGDAD) WHERE I ENTERED MY QUARTER AND FOUND MY HOUSE AND (FOREGATHERED->FORGATHERED) WITH MY FAMILY AND SALUTED MY FRIENDS WHO GAVE ME JOY OF MY SAFE RETURN AND I LAID UP ALL MY GOODS AND VALUABLES IN MY STOREHOUSES +7018-75788-0012-147: AFTER WHICH I RETURNED TO MY OLD MERRY WAY OF LIFE AND FORGOT ALL I HAD SUFFERED IN THE GREAT PROFIT AND GAIN I HAD MADE +7018-75788-0013-148: NEXT MORNING AS SOON AS IT WAS LIGHT HE PRAYED THE DAWN PRAYER AND AFTER BLESSING MOHAMMED THE CREAM OF ALL CREATURES BETOOK HIMSELF TO THE HOUSE OF (SINDBAD->SINBAD) THE SEAMAN AND WISHED HIM A GOOD DAY +7018-75788-0014-149: HERE I FOUND A GREAT SHIP READY FOR SEA AND FULL OF MERCHANTS AND NOTABLES WHO HAD WITH THEM GOODS OF PRICE SO I EMBARKED MY BALES THEREIN +7018-75788-0015-150: (HAPLY->HAPPILY) AMONGST YOU IS ONE RIGHTEOUS WHOSE PRAYERS THE LORD WILL ACCEPT +7018-75788-0016-151: PRESENTLY THE SHIP STRUCK THE MOUNTAIN AND BROKE UP AND ALL (AND->THEN) EVERYTHING ON BOARD OF HER WERE PLUNGED INTO THE SEA +7018-75788-0017-152: BUT (IT BURNETH->AT BERNNETH) IN THEIR BELLIES SO THEY CAST IT UP AGAIN AND IT (CONGEALETH->CONCEALETH) ON THE SURFACE OF THE WATER WHEREBY ITS COLOR AND QUANTITIES ARE CHANGED AND AT LAST THE WAVES CAST IT ASHORE AND THE TRAVELLERS AND MERCHANTS WHO KNOW IT (COLLECT IT->COLLECTED) AND SELL IT +7018-75788-0018-153: EACH THAT DIED WE WASHED AND SHROUDED IN SOME OF THE CLOTHES AND LINEN CAST ASHORE BY THE TIDES AND AFTER (A->*) LITTLE THE REST OF MY FELLOWS PERISHED ONE BY ONE TILL I HAD BURIED THE LAST OF THE PARTY AND ABODE ALONE ON THE ISLAND WITH BUT A LITTLE PROVISION LEFT I WHO WAS WONT TO HAVE SO MUCH +7018-75788-0019-154: BUT THERE IS MAJESTY AND THERE IS NO MIGHT SAVE IN ALLAH THE GLORIOUS THE GREAT +7018-75789-0000-155: WHEN IT WAS THE FIVE HUNDRED AND SIXTY FIRST NIGHT +7018-75789-0001-156: THEN (SIGHING->SIGNED) FOR MYSELF I SET TO WORK COLLECTING A NUMBER OF PIECES OF CHINESE AND (COMORIN->CORMOR AND) ALOES WOOD AND I BOUND THEM TOGETHER WITH ROPES FROM THE WRECKAGE THEN I CHOSE OUT FROM THE BROKEN UP (SHIPS->SHIP) STRAIGHT PLANKS OF EVEN SIZE AND FIXED THEM FIRMLY UPON THE (ALOES->ALLO'S) WOOD MAKING ME A BOAT RAFT A LITTLE NARROWER THAN THE CHANNEL OF THE STREAM AND I TIED IT TIGHTLY AND FIRMLY AS THOUGH IT WERE NAILED +7018-75789-0002-157: LAND AFTER LAND SHALT THOU (SEEK AND FIND->SEE CONFINED) BUT NO OTHER LIFE ON THY WISH SHALL WAIT (FRET->FRED) NOT THY SOUL IN THY THOUGHTS (O NIGHT ALL->A KNIGHT OR) WOES SHALL END OR SOONER OR LATE +7018-75789-0003-158: I (ROWED->RIDE) MY CONVEYANCE INTO THE PLACE WHICH WAS INTENSELY DARK AND THE CURRENT CARRIED (*->ME) THE RAFT WITH IT DOWN THE UNDERGROUND CHANNEL +7018-75789-0004-159: AND I THREW MYSELF DOWN UPON MY FACE ON THE RAFT BY REASON OF THE NARROWNESS OF THE CHANNEL WHILST THE STREAM CEASED NOT TO CARRY ME ALONG KNOWING NOT NIGHT FROM DAY FOR THE EXCESS OF THE GLOOM WHICH ENCOMPASSED ME ABOUT (AND->IN) MY TERROR AND CONCERN FOR MYSELF LEST I SHOULD PERISH +7018-75789-0005-160: WHEN I AWOKE AT LAST I FOUND MYSELF IN THE LIGHT OF HEAVEN AND OPENING MY EYES I SAW MYSELF IN A BROAD STREAM AND THE RAFT MOORED TO AN ISLAND IN THE MIDST OF A NUMBER OF INDIANS AND ABYSSINIANS +7018-75789-0006-161: BUT I WAS DELIGHTED AT MY ESCAPE FROM THE RIVER +7018-75789-0007-162: WHEN THEY SAW I UNDERSTOOD THEM (NOT->NIGHT) AND MADE THEM NO ANSWER ONE OF THEM CAME FORWARD AND SAID TO ME IN ARABIC PEACE BE WITH THEE O MY BROTHER +7018-75789-0008-163: O MY BROTHER ANSWERED HE WE ARE HUSBANDMEN AND (TILLERS->TELLERS) OF THE SOIL WHO CAME OUT TO WATER OUR FIELDS (AND->IN) PLANTATIONS AND FINDING THEE ASLEEP ON THIS RAFT LAID HOLD OF IT AND MADE IT FAST BY US AGAINST THOU SHOULDST AWAKE AT THY LEISURE +7018-75789-0009-164: I ANSWERED FOR ALLAH'S SAKE (O->AND) MY LORD ERE I SPEAK GIVE ME SOMEWHAT TO EAT FOR I AM STARVING AND AFTER ASK ME WHAT THOU WILT +7018-75789-0010-165: WHEN IT WAS THE FIVE HUNDRED AND SIXTY SECOND NIGHT +7018-75789-0011-166: SHE SAID IT HATH (REACHED->RAGED) ME O AUSPICIOUS KING THAT SINDBAD THE SEAMAN CONTINUED WHEN I LANDED AND FOUND MYSELF AMONGST THE INDIANS AND ABYSSINIANS AND HAD TAKEN SOME REST THEY CONSULTED AMONG THEMSELVES AND SAID TO ONE ANOTHER THERE IS NO HELP FOR IT BUT WE CARRY HIM WITH US AND PRESENT HIM TO OUR KING THAT HE MAY ACQUAINT HIM WITH HIS ADVENTURES +7018-75789-0012-167: SO I CONSORTED WITH THE CHIEF OF THE ISLANDERS AND THEY PAID ME THE UTMOST RESPECT +7018-75789-0013-168: SO I ROSE WITHOUT STAY OR DELAY AND KISSED THE KING'S HAND AND ACQUAINTED HIM WITH MY LONGING TO SET OUT WITH THE MERCHANTS FOR THAT I PINED AFTER MY PEOPLE AND (MINE->MY) OWN LAND +7018-75789-0014-169: QUOTH HE THOU ART THINE OWN MASTER YET IF IT BE THY WILL TO ABIDE WITH US ON OUR HEAD AND EYES BE IT FOR THOU (GLADDENEST->GLADNESSED) US WITH THY COMPANY +7018-75789-0015-170: BY ALLAH (O->ARE) MY LORD ANSWERED I THOU HAST INDEED OVERWHELMED ME WITH THY FAVOURS AND WELL DOINGS BUT I WEARY FOR A SIGHT OF MY FRIENDS AND FAMILY AND NATIVE COUNTRY +7018-75789-0016-171: THEN I TOOK LEAVE OF HIM AND OF ALL MY INTIMATES AND ACQUAINTANCES IN THE ISLAND AND EMBARKED WITH THE MERCHANTS (AFORESAID->AFOR SAID) +7018-75789-0017-172: HE ASKED ME WHENCE THEY CAME AND I SAID TO HIM BY ALLAH O COMMANDER OF THE FAITHFUL I KNOW NOT THE NAME OF THE CITY NOR THE WAY THITHER +7018-75789-0018-173: FOR STATE PROCESSIONS (A->ARE) THRONE IS (SET->SAID) FOR HIM UPON A HUGE ELEPHANT ELEVEN CUBITS HIGH AND UPON THIS HE SITTETH HAVING HIS GREAT LORDS AND OFFICERS AND GUESTS STANDING IN TWO RANKS ON HIS RIGHT HAND AND ON HIS LEFT +7018-75789-0019-174: HIS LETTER HATH SHOWN ME THIS AND AS FOR THE MIGHTINESS OF HIS DOMINION THOU HAST TOLD US WHAT THOU HAST (EYE->DIE) WITNESSED +7018-75789-0020-175: PRESENTLY MY FRIENDS CAME TO ME AND I DISTRIBUTED PRESENTS AMONG MY FAMILY AND GAVE (ALMS AND->ARMS IN) LARGESSE AFTER WHICH I YIELDED MYSELF TO JOYANCE AND ENJOYMENT MIRTH AND (MERRY MAKING->MERRYMAKING) AND FORGOT ALL THAT I HAD SUFFERED +7018-75789-0021-176: SUCH THEN (O->ARE) MY BROTHERS IS THE HISTORY OF WHAT (BEFEL->BEFELL) ME IN MY SIXTH VOYAGE AND TO MORROW INSHALLAH +7018-75789-0022-177: I WILL TELL YOU THE STORY OF MY SEVENTH AND LAST VOYAGE WHICH IS STILL MORE WONDROUS AND MARVELLOUS THAN THAT OF THE FIRST SIX +7018-75789-0023-178: WHEN IT WAS THE FIVE HUNDRED AND SIXTY THIRD NIGHT +7018-75789-0024-179: SHE SAID IT HATH REACHED ME O AUSPICIOUS KING THAT WHEN SINDBAD THE (SEAMAN->SEAMEN) HAD (RELATED->RELIGHTED) THE HISTORY OF WHAT (BEFEL->BEFELL) HIM IN HIS SIXTH VOYAGE AND ALL THE COMPANY HAD DISPERSED SINDBAD THE LANDSMAN WENT HOME AND SLEPT AS OF (WONT->WANT) +7018-75789-0025-180: THE SEVENTH VOYAGE OF (SINDBAD->SINBAD) THE SEAMAN +7018-75789-0026-181: (KNOW->NO) O COMPANY THAT AFTER MY RETURN FROM MY SIXTH VOYAGE WHICH BROUGHT ME ABUNDANT (PROFIT->PROPHET) I RESUMED MY FORMER LIFE (IN->AND) ALL POSSIBLE JOYANCE AND ENJOYMENT AND MIRTH AND MAKING MERRY DAY AND NIGHT AND I TARRIED SOME TIME IN THIS SOLACE AND SATISFACTION TILL MY SOUL BEGAN ONCE MORE TO LONG TO SAIL THE SEAS AND SEE FOREIGN COUNTRIES AND COMPANY WITH MERCHANTS AND (HEAR->HERE) NEW THINGS +7018-75789-0027-182: SO HAVING MADE UP MY MIND I PACKED UP IN BALES A QUANTITY OF PRECIOUS STUFFS SUITED FOR SEA TRADE AND REPAIRED WITH THEM FROM (BAGHDAD->BAGDAD) CITY TO (BASSORAH->BASSORA) TOWN WHERE I FOUND A SHIP READY FOR SEA AND IN HER (A->OUR) COMPANY OF CONSIDERABLE MERCHANTS +7018-75789-0028-183: BUT THE CAPTAIN AROSE AND TIGHTENING HIS GIRDLE TUCKED UP HIS SKIRTS AND AFTER TAKING REFUGE WITH ALLAH FROM SATAN THE (STONED CLOMB->STONE CLIMBED) TO THE MAST HEAD WHENCE HE LOOKED OUT RIGHT AND LEFT AND GAZING AT THE PASSENGERS AND CREW FELL TO (BUFFETING->BUFFET IN) HIS FACE AND PLUCKING OUT HIS BEARD +7018-75789-0029-184: THIS HE (SET->SAID) IN A SAUCER WETTED WITH A LITTLE WATER AND AFTER WAITING A SHORT TIME SMELT AND TASTED IT AND THEN HE TOOK OUT OF THE CHEST A BOOKLET WHEREIN HE READ AWHILE AND SAID WEEPING (KNOW O->NO ARE) YE PASSENGERS THAT IN THIS BOOK IS A MARVELLOUS MATTER DENOTING THAT WHOSO (COMETH HITHER->COME THITHER) SHALL SURELY DIE WITHOUT HOPE OF ESCAPE FOR THAT THIS OCEAN IS CALLED THE SEA OF THE CLIME OF THE KING WHEREIN IS (THE->A) SEPULCHRE OF OUR LORD SOLOMON SON OF DAVID ON BOTH BE PEACE +7018-75789-0030-185: A SECOND FISH MADE ITS APPEARANCE (THAN->AND) WHICH WE HAD SEEN (NAUGHT->NOUGHT) MORE MONSTROUS +7018-75789-0031-186: WHEN SUDDENLY A VIOLENT SQUALL OF WIND AROSE AND SMOTE THE SHIP WHICH ROSE OUT OF THE WATER AND SETTLED UPON A GREAT REEF THE HAUNT OF SEA MONSTERS WHERE IT BROKE UP AND FELL ASUNDER INTO PLANKS AND ALL AND EVERYTHING ON BOARD WERE PLUNGED INTO THE SEA +7105-2330-0000-2310: UNFORTUNATELY THERE COULD BE NO DOUBT OR MISCONCEPTION AS (TO PLATTERBAFF'S->THE PLATTERBUFF'S) GUILT +7105-2330-0001-2311: HE HAD NOT ONLY PLEADED GUILTY BUT HAD EXPRESSED HIS INTENTION OF REPEATING HIS ESCAPADE IN OTHER DIRECTIONS AS SOON AS CIRCUMSTANCES PERMITTED THROUGHOUT THE TRIAL HE WAS BUSY EXAMINING A SMALL MODEL OF THE FREE TRADE HALL IN MANCHESTER +7105-2330-0002-2312: (THE JURY->VIRTUARY) COULD NOT POSSIBLY FIND THAT THE PRISONER HAD NOT DELIBERATELY AND INTENTIONALLY BLOWN UP (THE->WE) ALBERT HALL THE QUESTION WAS COULD THEY FIND ANY EXTENUATING CIRCUMSTANCES WHICH WOULD PERMIT OF AN ACQUITTAL +7105-2330-0003-2313: OF COURSE ANY SENTENCE WHICH THE LAW MIGHT (FEEL->FILL) COMPELLED TO INFLICT WOULD BE FOLLOWED BY AN IMMEDIATE PARDON BUT IT WAS HIGHLY DESIRABLE FROM THE (GOVERNMENT'S POINT->GOVERNMENT SPITE) OF VIEW THAT THE NECESSITY FOR SUCH AN EXERCISE OF CLEMENCY SHOULD NOT ARISE +7105-2330-0004-2314: (A HEADLONG->I HAD LONG) PARDON (ON->AND) THE EVE OF A (BYE->BI) ELECTION WITH THREATS OF A HEAVY VOTING DEFECTION IF IT WERE WITHHELD OR EVEN DELAYED WOULD NOT NECESSARILY BE A SURRENDER BUT IT WOULD LOOK LIKE ONE +7105-2330-0005-2315: HENCE THE (ANXIETY->ANCIDE) IN THE CROWDED COURT AND IN THE LITTLE GROUPS GATHERED ROUND THE TAPE MACHINES IN WHITEHALL AND (DOWNING->DAWNING) STREET AND OTHER AFFECTED CENTRES +7105-2330-0006-2316: THE JURY (RETURNED->TURN) FROM CONSIDERING THEIR VERDICT THERE WAS A FLUTTER AN EXCITED MURMUR A DEATHLIKE HUSH +7105-2330-0007-2317: (THE FOREMAN->THEREFORE MAN) DELIVERED HIS MESSAGE +7105-2330-0008-2318: THE JURY FIND THE PRISONER GUILTY OF BLOWING UP THE ALBERT HALL +7105-2330-0009-2319: (THE JURY->THEY JERRY) WISH TO ADD A RIDER DRAWING ATTENTION TO THE FACT THAT A BY ELECTION (IS PENDING->EXPENDING) IN THE PARLIAMENTARY DIVISION OF NEMESIS ON HAND +7105-2330-0010-2320: AND (MAY->MADE) THE (LORD->LARD) HAVE MERCY ON THE (POLL->POLE) A JUNIOR (COUNSEL->COUNCIL) EXCLAIMED IRREVERENTLY +7105-2330-0011-2321: FIFTEEN HUNDRED SAID (THE->A) PRIME MINISTER WITH A SHUDDER IT'S TOO HORRIBLE TO THINK OF +7105-2330-0012-2322: OUR MAJORITY LAST TIME WAS ONLY A THOUSAND AND SEVEN +7105-2330-0013-2323: SEVEN THIRTY AMENDED THE PRIME MINISTER WE MUST AVOID ANY APPEARANCE OF PRECIPITANCY +7105-2330-0014-2324: NOT LATER (THAN->THEN) SEVEN THIRTY THEN SAID THE CHIEF (ORGANISER->ORGANIZER) I HAVE PROMISED THE AGENT DOWN THERE THAT HE SHALL BE ABLE TO DISPLAY POSTERS ANNOUNCING (PLATTERBAFF->PLATTER BAFF) IS OUT (BEFORE THE POLL->BEHELD A POLE) OPENS +7105-2330-0015-2325: HE SAID IT WAS (OUR->HER) ONLY CHANCE OF GETTING A TELEGRAM (RADPROP IS->RED RAPIS) IN TO NIGHT +7105-2330-0016-2326: (DESPITE->THIS SPEED) THE EARLINESS OF THE HOUR A SMALL CROWD HAD GATHERED IN THE STREET OUTSIDE AND THE HORRIBLE MENACING (TRELAWNEY->TREE LONGER) REFRAIN OF THE FIFTEEN HUNDRED VOTING MEN CAME IN A STEADY MONOTONOUS CHANT +7105-2330-0017-2327: HE EXCLAIMED WON'T GO +7105-2330-0018-2328: HE SAYS HE NEVER HAS LEFT PRISON WITHOUT A (BRASS BAND->BREASTPAND) TO PLAY HIM OUT AND (HE'S NOT->HE SNUG) GOING TO GO WITHOUT ONE NOW +7105-2330-0019-2329: SAID (THE->A) PRIME MINISTER WE CAN HARDLY BE SUPPOSED TO SUPPLY A (RELEASED->RELIEF) PRISONER WITH A BRASS BAND HOW ON EARTH COULD WE DEFEND IT (ON THE ESTIMATES->UNDEST) +7105-2330-0020-2330: (ANYWAY->AN AWAY) HE WON'T GO UNLESS HE HAS A BAND +7105-2330-0021-2331: (POLL->PAUL) OPENS IN FIVE MINUTES +7105-2330-0022-2332: IS (PLATTERBAFF->FLATHER BATH) OUT YET +7105-2330-0023-2333: IN HEAVEN'S NAME WHY +7105-2330-0024-2334: THE CHIEF (ORGANISER->ORGANIZER) RANG OFF +7105-2330-0025-2335: THIS IS NOT A MOMENT FOR STANDING ON DIGNITY HE OBSERVED BLUNTLY (MUSICIANS->MY SICIENTS) MUST BE SUPPLIED AT ONCE +7105-2330-0026-2336: CAN'T YOU GET A STRIKE PERMIT ASKED THE (ORGANISER->ORGANIZER) +7105-2330-0027-2337: I'LL TRY SAID THE HOME SECRETARY AND WENT TO THE TELEPHONE +7105-2330-0028-2338: EIGHT O'CLOCK STRUCK THE CROWD OUTSIDE CHANTED WITH AN INCREASING VOLUME OF SOUND (WILL VOTE->WITHOUT) THE OTHER WAY +7105-2330-0029-2339: (A->I) TELEGRAM WAS BROUGHT IN +7105-2330-0030-2340: IT WAS FROM THE CENTRAL (COMMITTEE->COME INTO) ROOMS AT NEMESIS +7105-2330-0031-2341: WITHOUT A BAND HE WOULD NOT GO AND THEY HAD NO BAND +7105-2330-0032-2342: (A QUARTER->ACQUIRED THEIR) PAST TEN HALF PAST +7105-2330-0033-2343: HAVE YOU ANY BAND INSTRUMENTS OF AN EASY NATURE TO PLAY +7105-2330-0034-2344: DEMANDED THE CHIEF (ORGANISER->ORGANIZER) OF THE PRISON GOVERNOR DRUMS (CYMBALS->SYMBOLS) THOSE SORT OF THINGS +7105-2330-0035-2345: (THE WARDERS->THOUGH OURS) HAVE A PRIVATE BAND OF THEIR OWN SAID THE GOVERNOR BUT OF COURSE I COULDN'T ALLOW (THE MEN->THEM IN) THEMSELVES +7105-2330-0036-2346: LEND US THE INSTRUMENTS SAID THE CHIEF (ORGANISER->ORGANIZER) +7105-2330-0037-2347: (THE->THEIR) POPULAR SONG OF THE MOMENT REPLIED THE AGITATOR AFTER A MOMENT'S REFLECTION +7105-2330-0038-2348: IT WAS A TUNE THEY HAD ALL HEARD HUNDREDS OF TIMES SO THERE WAS NO DIFFICULTY IN TURNING OUT A PASSABLE IMITATION OF IT TO THE IMPROVISED (STRAINS->TRAINS) OF I (DIDN'T->DON'T) WANT TO DO IT THE (PRISONER STRODE->PRISONERS STROLLED) FORTH TO FREEDOM +7105-2330-0039-2349: THE WORD OF THE SONG HAD REFERENCE IT WAS UNDERSTOOD (TO->THAT) THE INCARCERATING GOVERNMENT AND NOT TO THE DESTROYER OF THE ALBERT HALL +7105-2330-0040-2350: (THE->THIS) SEAT WAS LOST AFTER ALL BY A NARROW (MAJORITY->MATURITY) +7105-2330-0041-2351: THE LOCAL TRADE UNIONISTS TOOK OFFENCE AT THE FACT OF CABINET MINISTERS HAVING PERSONALLY ACTED AS STRIKE BREAKERS AND EVEN THE RELEASE OF (PLATTERBAFF->PLATTERBUFF) FAILED TO PACIFY THEM +7105-2340-0000-2272: WITH THAT NOTORIOUS FAILING OF HIS HE WAS NOT (THE->A) SORT OF PERSON ONE WANTED IN ONE'S HOUSE +7105-2340-0001-2273: WELL THE FAILING STILL EXISTS (DOESN'T IT->DOESNATE) SAID (HER->THE) HUSBAND OR DO YOU SUPPOSE A REFORM OF CHARACTER IS ENTAILED ALONG WITH THE ESTATE +7105-2340-0002-2274: BESIDES CYNICISM APART (HIS BEING->IS VERY) RICH (WILL->WE) MAKE A DIFFERENCE IN THE WAY PEOPLE WILL LOOK AT HIS (FAILING->FEELING) +7105-2340-0003-2275: WHEN A MAN IS ABSOLUTELY WEALTHY NOT MERELY WELL TO DO ALL SUSPICION OF (SORDID->SARDID) MOTIVE (NATURALLY->NATURAL) DISAPPEARS THE THING BECOMES MERELY A (TIRESOME->PARASSOME) MALADY +7105-2340-0004-2276: (WILFRID PIGEONCOTE->WILFRIED DIGEON CODE) HAD SUDDENLY BECOME HEIR TO HIS UNCLE SIR (WILFRID PIGEONCOTE ON THE->WILL FID PIGEON COAT UNDER) DEATH OF HIS COUSIN MAJOR WILFRID PIGEONCOTE WHO HAD SUCCUMBED (TO->*) THE AFTER EFFECTS OF (A POLO->APOLLO) ACCIDENT +7105-2340-0005-2277: (A WILFRID PIGEONCOTE->OUR WILFRED FEATURE) HAD COVERED HIMSELF WITH (HONOURS->HONORS) IN THE COURSE OF MARLBOROUGH'S CAMPAIGNS AND THE NAME WILFRID HAD BEEN A (BAPTISMAL->BABYSMAL) WEAKNESS IN THE FAMILY EVER SINCE THE NEW HEIR TO THE FAMILY DIGNITY AND ESTATES WAS A YOUNG MAN OF ABOUT FIVE AND TWENTY WHO WAS KNOWN MORE BY (REPUTATION->REPETITION) THAN BY PERSON TO (A WIDE->AVOID) CIRCLE OF COUSINS AND KINSFOLK +7105-2340-0006-2278: AND THE REPUTATION WAS AN UNPLEASANT ONE +7105-2340-0007-2279: FROM HIS LATE (SCHOOLDAYS->SCHOOL DAYS) ONWARD HE HAD BEEN POSSESSED BY AN ACUTE AND OBSTINATE FORM OF (KLEPTOMANIA->CLUBTOMANIA) HE HAD THE ACQUISITIVE INSTINCT OF THE COLLECTOR WITHOUT ANY OF THE COLLECTOR'S DISCRIMINATION +7105-2340-0008-2280: (THE->THIS) SEARCH USUALLY PRODUCED A LARGE AND VARIED YIELD THIS IS FUNNY SAID PETER (PIGEONCOTE->PIGEON BOAT) TO HIS WIFE (SOME->I'M) HALF (HOUR->OUR) AFTER THEIR CONVERSATION HERE'S A TELEGRAM FROM WILFRID SAYING HE'S PASSING THROUGH HERE IN HIS (MOTOR->MOTTAR) AND WOULD LIKE TO STOP AND PAY US HIS RESPECTS +7105-2340-0009-2281: (SIGNED WILFRID PIGEONCOTE->SIGN WILFRED PIGEON COAT) +7105-2340-0010-2282: I SUPPOSE (HE'S->IS) BRINGING US A PRESENT (FOR THE->FURTHER) SILVER WEDDING GOOD GRACIOUS +7105-2340-0011-2283: THE TALK FLITTED NERVOUSLY AND HURRIEDLY FROM ONE IMPERSONAL TOPIC TO ANOTHER +7105-2340-0012-2284: IN THE DRAWING ROOM AFTER DINNER THEIR NERVOUSNESS AND AWKWARDNESS INCREASED +7105-2340-0013-2285: OH WE HAVEN'T SHOWN YOU THE SILVER WEDDING PRESENTS SAID MISSUS PETER SUDDENLY AS (THOUGH->THOSE) STRUCK BY A BRILLIANT IDEA (FOR->OF HER) ENTERTAINING THE GUEST HERE THEY ALL ARE +7105-2340-0014-2286: SUCH NICE (USEFUL GIFTS->YEARS FORGIVES) A FEW (DUPLICATES->DIPPLICATES) OF COURSE +7105-2340-0015-2287: SEVEN CREAM JUGS PUT IN PETER +7105-2340-0016-2288: WE FEEL THAT WE MUST LIVE (ON CREAM->UNCLEAN) FOR THE REST OF OUR LIVES +7105-2340-0017-2289: OF COURSE SOME OF THEM CAN BE CHANGED +7105-2340-0018-2290: I PUT IT DOWN BY THE (CLARET->CLARA) JUG SAID WILFRID BUSY WITH ANOTHER OBJECT +7105-2340-0019-2291: (VIGILANCE->VICHILLENZ) WAS NOT COMPLETELY CROWNED WITH A SENSE OF VICTORY +7105-2340-0020-2292: AFTER THEY HAD SAID GOOD NIGHT TO THEIR VISITOR MISSUS PETER EXPRESSED HER CONVICTION THAT HE HAD TAKEN SOMETHING +7105-2340-0021-2293: HOW ON EARTH ARE WE TO KNOW SAID PETER THE MEAN PIG HASN'T BROUGHT US A PRESENT AND I'M HANGED IF HE SHALL CARRY ONE OFF +7105-2340-0022-2294: (IT'S->IS) THE ONLY THING TO DO +7105-2340-0023-2295: (WILFRID->WILFRED) WAS (LATE->LAID) IN COMING DOWN TO BREAKFAST AND HIS (MANNER->MANNERS) SHOWED PLAINLY THAT SOMETHING WAS AMISS +7105-2340-0024-2296: (IT'S->YES AND) AN UNPLEASANT THING TO HAVE TO SAY HE BLURTED OUT PRESENTLY BUT I'M AFRAID YOU MUST HAVE A THIEF AMONG YOUR SERVANTS SOMETHING'S BEEN TAKEN OUT OF MY (PORTMANTEAU->PART MANTLE) +7105-2340-0025-2297: IT WAS A LITTLE PRESENT (FROM->FOR) MY MOTHER AND MYSELF FOR YOUR SILVER WEDDING +7105-2340-0026-2298: I SHOULD HAVE GIVEN IT TO YOU LAST NIGHT AFTER DINNER (ONLY->ON) IT HAPPENED TO BE A (CREAM->QUEEN) JUG AND YOU SEEMED ANNOYED AT HAVING SO MANY DUPLICATES SO I FELT RATHER AWKWARD (ABOUT->OF A) GIVING YOU ANOTHER +7105-2340-0027-2299: (THE SNATCHER->THIS NATURE) HAD BEEN AN ORPHAN (THESE->THIS) MANY YEARS +7105-2340-0028-2300: LADY (ERNESTINE PIGEONCOTE->ERNESTON BEEJON KOTE) HIS MOTHER MOVED IN CIRCLES WHICH WERE ENTIRELY BEYOND THEIR COMPASS OR AMBITIONS AND THE SON WOULD PROBABLY ONE DAY BE AN AMBASSADOR +7105-2340-0029-2301: HUSBAND AND WIFE LOOKED BLANKLY AND DESPERATELY AT ONE ANOTHER +7105-2340-0030-2302: IT WAS MISSUS PETER WHO ARRIVED FIRST AT AN INSPIRATION HOW DREADFUL (TO THINK->THE THING) THERE ARE THIEVES IN THE HOUSE WE (KEEP->GIVE) THE DRAWING ROOM LOCKED UP AT NIGHT OF COURSE BUT ANYTHING MIGHT BE CARRIED OFF WHILE WE (ARE->WERE) AT BREAKFAST +7105-2340-0031-2303: SHE ROSE AND WENT OUT HURRIEDLY AS THOUGH TO ASSURE HERSELF THAT THE DRAWING ROOM WAS NOT BEING STRIPPED OF ITS SILVERWARE AND RETURNED A MOMENT LATER BEARING A CREAM (JUG->CHUG) IN HER HANDS +7105-2340-0032-2304: THE (PIGEONCOTES->PIGEON COATS) HAD TURNED PALER THAN EVER MISSUS PETER HAD A FINAL INSPIRATION +7105-2340-0033-2305: PETER DASHED OUT OF THE ROOM WITH GLAD RELIEF HE HAD LIVED SO LONG DURING THE LAST FEW MINUTES THAT A GOLDEN WEDDING SEEMED WITHIN MEASURABLE DISTANCE +7105-2340-0034-2306: MISSUS PETER TURNED TO HER GUEST WITH CONFIDENTIAL (COYNESS->KINDNESS) +7105-2340-0035-2307: PETER'S LITTLE WEAKNESS (IT RUNS IN THE->A TRANSCENDI) FAMILY GOOD LORD +7105-2340-0036-2308: DO YOU MEAN TO SAY HE'S A (KLEPTOMANIAC->CLEPTOMANIA) LIKE COUSIN SNATCHER +7105-2340-0037-2309: BRAVE LITTLE WOMAN SAID PETER WITH A GASP OF RELIEF I COULD NEVER HAVE DONE IT +7902-96591-0000-0: (I AM->I'M) FROM THE CUTTER LYING OFF THE COAST +7902-96591-0001-1: DON'T CRY HE SAID I WAS OBLIGED TO COME +7902-96591-0002-2: AND AND YOU HAVE NOT FOUND OUT ANYTHING CAME IN QUICK FRIGHTENED TONES +7902-96591-0003-3: I WISH YOU WOULD BELIEVE ME THAT I AM IN AS GREAT TROUBLE ABOUT IT AS YOU ARE +7902-96591-0004-4: THAT MY FATHER SIR RISDON (GRAEME HAS->GRAHAM) SMUGGLED GOODS HERE +7902-96591-0005-5: HE COULD NOT HELP IT HE (HATES THE SMUGGLERS->HATE THIS MOTHERS) YOU SHALL NOT TELL +7902-96591-0006-6: PRAY PRAY SAY YOU WILL NOT (ARCHY->ARCHIE) WAS SILENT +7902-96591-0007-7: THEN AS (ARCHY->ARCHIE) STOOD IN THE DARK LITERALLY AGHAST WITH ASTONISHMENT HE HEARD THE FAINT RUSTLING ONCE MORE AND AGAIN ALL WAS SILENT +7902-96591-0008-8: HE LAUGHED BUT IT WAS A CURIOUS KIND OF LAUGH FULL OF VEXATION INJURED (AMOUR PROPRE->AMOPRA) AS THE FRENCH (CALL OUR->CALLER) LOVE OF (OUR->HER) OWN DIGNITY OF WHICH (ARCHIBALD RAYSTOKE->ARQUEBALD RAYSTROKE) IN THE FULL FLUSH OF HIS YOUNG BELIEF IN HIS IMPORTANCE AS A BRITISH OFFICER HAD A PRETTY GOOD STOCK +7902-96591-0009-9: (IT->AND) ALL COMES OF DRESSING UP IN (THIS->THE) STUPID WAY LIKE A ROUGH FISHER LAD +7902-96591-0010-10: COLD WATER CAME ON THIS IDEA DIRECTLY AS HE RECALLED THE FACT THAT THE DARKNESS WAS INTENSE AND CELIA COULD NOT HAVE SEEN HIM +7902-96591-0011-11: I'LL SOON SHOW THEM THAT I AM NOT GOING TO BE PLAYED WITH +7902-96591-0012-12: FOR IT SUDDENLY OCCURRED TO HIM THAT HE WAS NOT ONLY A PRISONER BUT A PRISONER IN THE POWER OF A VERY RECKLESS SET OF PEOPLE (WHO->AND) WOULD STOP AT NOTHING +7902-96591-0013-13: NO HE THOUGHT TO HIMSELF I DON'T BELIEVE THEY WOULD KILL ME BUT THEY WOULD KNOCK ME ABOUT +7902-96591-0014-14: THE (KICK HE->KICKY) HAD RECEIVED WAS A FORETASTE OF WHAT HE MIGHT EXPECT AND AFTER A LITTLE CONSIDERATION HE CAME TO THE CONCLUSION THAT HIS DUTY WAS TO ESCAPE AND GET BACK TO THE CUTTER AS QUICKLY AS HE COULD +7902-96591-0015-15: TO DO THIS HE MUST SCHEME LIE HID TILL MORNING (THEN->THAN) MAKE FOR THE NEAREST POINT (AND->A) SIGNAL FOR HELP UNLESS A BOAT'S CREW WERE ALREADY (SEARCHING->SURGING) FOR HIM HOW TO ESCAPE +7902-96591-0016-16: THE WINDOW WAS BARRED BUT HE WENT TO IT AND TRIED THE BARS ONE BY ONE TO FIND THEM ALL SOLIDLY FITTED INTO THE STONE SILL +7902-96591-0017-17: NEXT MOMENT AS HE FELT HIS WAY ABOUT HIS HAND TOUCHED AN OLD FASHIONED MARBLE MANTELPIECE FIREPLACE CHIMNEY +7902-96591-0018-18: YES IF (OTHER WAYS->OTHERWAYS) FAILED HE COULD ESCAPE UP THE CHIMNEY +7902-96591-0019-19: NO THAT WAS TOO BAD HE COULD NOT DO THAT +7902-96591-0020-20: SYMPATHY AND PITY FOR THE DWELLERS IN THE (HOZE->HOSE) WERE COMPLETELY GONE NOW AND HE SET HIS TEETH FAST AND MENTALLY CALLED HIMSELF A (WEAK->WEEK) IDIOT FOR EVER THINKING ABOUT SUCH PEOPLE +7902-96591-0021-21: A NARROW TABLE AGAINST THE WALL IN TWO PLACES +7902-96591-0022-22: HE WENT AND TRIED TO FORCE HIS HEAD THROUGH RECALLING AS HE DID THAT WHERE A PERSON'S HEAD WOULD GO THE REST OF THE BODY WOULD PASS +7902-96591-0023-23: BUT THERE WAS NO CHANCE FOR HIS BODY THERE THE HEAD WOULD NOT GO FIRST +7902-96591-0024-24: A FELLOW WHO WAS SHUT UP IN (PRISON->PRISONED) FOR LIFE MIGHT DO IT HE SAID BUT NOT IN A CASE LIKE THIS +7902-96592-0000-25: SURE (YOU'VE LOOKED->YOU LOOK) ROUND EVERYWHERE BOY YES FATHER QUITE +7902-96592-0001-26: I'M GOING HOME TO BREAKFAST +7902-96592-0002-27: SHALL I COME (TOO->TO) FATHER NO +7902-96592-0003-28: STOP HERE TILL SIR RISDON COMES DOWN AND TELL HIM I'M VERY SORRY THAT WE SHOULD HAVE CLEARED OUT LAST NIGHT ONLY A BORN FOOL SAW JERRY (NANDY'S LOBSTER BOAT->ANDY'S LOBSTERBOAT) COMING INTO THE COVE AND CAME RUNNING TO SAY IT WAS A PARTY FROM THE CUTTER YES FATHER +7902-96592-0004-29: TELL HIM NOT TO BE UNEASY TIS ALL RIGHT AND I'LL HAVE EVERYTHING CLEAR AWAY TO NIGHT +7902-96592-0005-30: THE DULL SOUND OF DEPARTING STEPS AND A LOW WHISTLING SOUND COMING DOWN THROUGH THE SKYLIGHT WINDOW INTO THE CABIN WHERE (ARCHY RAYSTOKE->ARCHIE RAY STROKE) LAY WITH HIS HEAVY EYELIDS PRESSED DOWN BY SLEEP +7902-96592-0006-31: WHAT A QUEER DREAM HE THOUGHT TO HIMSELF +7902-96592-0007-32: BUT HOW QUEER FOR MISTER (GURR->GORE) TO BE TALKING LIKE THAT (TO ANDREW TEAL->TOIL) THE BOY WHO (HELPED->HELPS) THE COOK +7902-96592-0008-33: AND WHY DID ANDY CALL MISTER (GURR FATHER->GRAF) +7902-96592-0009-34: THERE WAS AN INTERVAL OF THINKING OVER THIS (KNOTTY->NAUGHTY) QUESTION DURING WHICH THE LOW WHISTLING WENT ON +7902-96592-0010-35: AND I'M HUNGRY TOO (TIME I->TIE) WAS UP I SUPPOSE +7902-96592-0011-36: NO HE WAS NOT DREAMING FOR HE WAS LOOKING OUT ON THE SEA OVER WHICH A FAINT MIST HUNG LIKE WREATHS OF SMOKE +7902-96592-0012-37: WHAT DID THEY SAY FALSE ALARM TELL (SIR RISDON->SERVANTS AND) THEY WOULD CLEAR ALL AWAY TO NIGHT SEE IF ANYTHING HAD BEEN LEFT ABOUT LOBSTER (BOAT->WROTE) +7902-96592-0013-38: ONCE OUT OF THAT ROOM HE COULD RAN AND BY DAYLIGHT THE (SMUGGLERS->SMUGGERS) DARE NOT HUNT HIM DOWN +7902-96592-0014-39: OH THOSE BARS HE MENTALLY EXCLAIMED AND HE WAS ADVANCING (TOWARD->TOWARDS) THEM WHEN JUST AS HE DREW NEAR THERE WAS A RUSTLING NOISE UNDER THE WINDOW A COUPLE OF HANDS SEIZED THE BARS THERE WAS A SCRATCHING OF BOOT TOES AGAINST STONE WORK AND RAM'S FACE APPEARED TO GAZE INTO THE ROOM BY INTENTION BUT INTO THE ASTONISHED COUNTENANCE OF THE YOUNG (MIDSHIPMAN->MITCHIPMAN) INSTEAD +7902-96592-0015-40: (RAM->ROOM) WAS THE FIRST TO RECOVER FROM HIS SURPRISE +7902-96592-0016-41: HULLO HE SAID WHO ARE YOU +7902-96592-0017-42: GO ROUND AND OPEN THE DOOR I WAS SHUT IN LAST NIGHT BY MISTAKE +7902-96592-0018-43: I SAW YOU LAST NIGHT AND WONDERED WHOSE BOY YOU WAS +7902-96592-0019-44: IT WAS (YOU->YOUR) FATHER KICKED FOR SHIRKING AND MY WELL I HARDLY KNOWED YOU +7902-96592-0020-45: NONSENSE +7902-96592-0021-46: WON'T DO SAID RAM (GRINNING->GRINNIE) +7902-96592-0022-47: THINK I DON'T KNOW YOU MISTER ORFICER +7902-96592-0023-48: WON'T DO SAID (RAM->RUM) QUICKLY I KNOW YOU +7902-96592-0024-49: (BEEN->THEN) PLAYING THE SPY THAT'S WHAT YOU'VE BEEN DOING WHO LOCKED YOU IN +7902-96592-0025-50: (ARCHY->ARCHIE) STEPPED BACK TO THE DOOR LISTENING BUT THERE WAS NOT A SOUND +7902-96592-0026-51: HE HAS GONE TO GIVE THE ALARM THOUGHT THE PRISONER AND HE LOOKED EXCITEDLY ROUND FOR A WAY OF ESCAPE +7902-96592-0027-52: NOTHING BUT THE CHIMNEY PRESENTED ITSELF +7902-96592-0028-53: A HAPPY INSPIRATION HAD COME AND PLACING ONE HAND UPON HIS (BREAST->CHEST) HE THRUST IN THE OTHER GAVE A TUG AND DREW OUT HIS LITTLE CURVED DIRK GLANCED AT THE EDGE RAN TO THE WINDOW AND BEGAN TO CUT AT ONE OF THE BARS (LABOUR->LABOR) IN VAIN +7902-96592-0029-54: HE DIVIDED THE (PAINT->PAIN) AND PRODUCED A FEW SQUEAKS (AND->IN) GRATING SOUNDS AS HE (REALISED->REALIZED) THAT THE ATTEMPT WAS MADNESS +7902-96592-0030-55: THE RESULT WAS NOT VERY SATISFACTORY BUT SUFFICIENTLY SO TO MAKE HIM ESSAY THE BAR OF THE WINDOW ONCE MORE PRODUCING A GRATING (EAR ASSAILING->IRRESCELLING) SOUND AS HE FOUND THAT NOW HE DID MAKE A LITTLE IMPRESSION SO LITTLE THOUGH THAT THE PROBABILITY WAS IF HE KEPT ON WORKING WELL FOR TWENTY FOUR HOURS HE WOULD NOT GET THROUGH +7902-96592-0031-56: BUT AT THE END OF FIVE MINUTES HE STOPPED AND THRUST BACK THE (DIRK->DARK) INTO ITS SHEATH +7902-96592-0032-57: NO I CAN'T PART WITH THAT HA HA (HA->*) LAUGHED THE BOY JEERINGLY +7902-96592-0033-58: (BUT I'LL->BLOW) YES I'LL GIVE YOU A GUINEA IF YOU WILL LET ME OUT +7902-96592-0034-59: (GUINEA SAID->GUINEAS OF) THE BOY THINK (I'D->I'LL) DO IT FOR A GUINEA WELL THEN (TWO->TOO) +7902-96592-0035-60: BE QUICK THERE'S A GOOD FELLOW I WANT TO GET AWAY AT ONCE +7902-96592-0036-61: NOT YOU ONLY A SHAM +7902-96592-0037-62: WHY (YOUR->YOU'RE) CLOTHES DON'T FIT YOU AND YOUR (CAP'S->CAPS) PUT ON ALL (SKEW REW->SKEERO) +7902-96592-0038-63: NEVER MIND ABOUT THAT LET ME OUT OF THIS PLACE +7902-96592-0039-64: I TOLD YOU A FISHER BOY CRIED (ARCHY->ARCHIE) IMPATIENTLY BUT TRYING NOT TO OFFEND HIS VISITOR WHO POSSESSED THE POWER OF CONFERRING FREEDOM BY SPEAKING SHARPLY +7902-96592-0040-65: NOT YOU LOOK LIKE A WILD BEAST IN A CAGE LIKE A MONKEY YOU INSOLENT +7902-96592-0041-66: (ARCHY->ARCHIE) CHECKED HIMSELF (AND->IN) THE BOY LAUGHED +7902-96592-0042-67: IT WAS YOUR TURN YESTERDAY IT'S MINE TO DAY WHAT A GAME +7902-96592-0043-68: YOU LAUGHED AND FLEERED AT ME WHEN I WAS ON THE CUTTER'S DECK +7902-96592-0044-69: I SAY YOU DO LOOK (*->LIKE) A (RUM UN->ROMAN) JUST LIKE A BIG MONKEY IN A SHOW +7902-96592-0045-70: RAM SHOWED HIS WHITE TEETH AS HE BURST OUT WITH A LONG LOW FIT OF LAUGHTER +7902-96592-0046-71: (YOU ROPE'S END->EUPS AND) ME HE SAID +7902-96592-0047-72: WHY I COULD TIE YOU UP IN A KNOT AND HEAVE YOU OFF THE CLIFF ANY DAY WHAT A GAME +7902-96592-0048-73: BIT OF A MIDDY FED ON (*->A) SALT TACK AND (WEEVILLY->WEEBLY) BISCUIT TALK OF GIVING ME (ROPE'S END->ROPES AND) +7902-96592-0049-74: ONCE MORE WILL YOU COME AND LET ME OUT NO +7902-96592-0050-75: TO HIS ASTONISHMENT THE BOY DID NOT FLINCH BUT THRUST HIS OWN ARMS THROUGH (PLACING->REPLACING) THEM ABOUT THE MIDDY'S WAIST CLENCHING HIS (HANDS->HAND) BEHIND AND UTTERING A SHARP WHISTLE +7902-96594-0000-76: (SEEMED IN GOOD SPIRITS->SEEMING EXPERIENCE) LAST NIGHT MISTER GURR (EH->HE) +7902-96594-0001-77: YES SIR BUT HE MAY TURN (UP ON->UPON) THE CLIFF AT ANY MOMENT +7902-96594-0002-78: YES MEN QUITE READY YES SIR +7902-96594-0003-79: (THAT'S->IT'S) RIGHT OF COURSE WELL ARMED +7902-96594-0004-80: SOON AS THE SIGNAL COMES WE SHALL PUSH OFF +7902-96594-0005-81: (AWKWARD->OF HER) BIT (O->OF) COUNTRY SIR SIX MILES ROW (BEFORE->FOR) YOU CAN FIND A PLACE TO LAND +7902-96594-0006-82: SO SHALL WE (YET->GET) SIR +7902-96594-0007-83: YOU DON'T THINK MISTER (GURR->GIRL) THAT THEY WOULD DARE TO INJURE HIM IF HE WAS SO UNLUCKY AS TO BE CAUGHT +7902-96594-0008-84: WELL SIR SAID THE MASTER HESITATING SMUGGLERS (ARE->OR) SMUGGLERS +7902-96594-0009-85: CERTAINLY SIR SMUGGLERS ARE SMUGGLERS INDEED +7902-96594-0010-86: (BEG->THEY) PARDON SIR DIDN'T MEAN ANY HARM +7902-96594-0011-87: I'M GETTING VERY ANXIOUS ABOUT MISTER (RAYSTOKE->RAYSTROKE) START AT ONCE SIR +7902-96594-0012-88: NO (WAIT->WHERE) ANOTHER HALF HOUR +7902-96594-0013-89: VERY ILL (ADVISED->ADVICE) THING TO DO +7902-96594-0014-90: (THEN->THAT) I MUST REQUEST THAT YOU WILL NOT MAKE IT AGAIN VERY TRUE +7902-96594-0015-91: (AWK WARD->AWKWARD) MISTER (GURR->GARR) AWKWARD +7902-96594-0016-92: YES SIR OF COURSE +7902-96594-0017-93: SAY (AWK WARD->AWKWARD) IN (*->THE) FUTURE NOT (AWK'ARD->UPWARD) +7902-96594-0018-94: I MEAN ALL ALONE BY MYSELF SIR +7902-96594-0019-95: WHAT FOR THERE AREN'T A PUBLIC HOUSE FOR TEN MILES DIDN'T MEAN THAT +7902-96594-0020-96: THEN WHAT DID (YOU MEAN->JULIA) SPEAK OUT AND DON'T DO THE DOUBLE (SHUFFLE->SHOVEL) ALL OVER MY CLEAN DECK NO SIR +7902-96594-0021-97: (HOPPING->HAVING) ABOUT (LIKE A CAT->THE GUQUET) ON HOT BRICKS +7902-96594-0022-98: NOW THEN WHY DO YOU WANT TO GO (ASHORE->SHORE) +7902-96594-0023-99: (BEG->THEY) PARDON DIDN'T MEAN (NOWT->THAT) SIR SAID THE SAILOR TOUCHING HIS FORELOCK +7902-96594-0024-100: YES SIR SAID THE (MAN HUMBLY->MADAMELY) SHALL I GO AT ONCE SIR +7902-96594-0025-101: NO WAIT +7902-96594-0026-102: (KEEP A->HE WAS) SHARP LOOK OUT ON THE CLIFF (TO SEE IF->AS EVEN) MISTER (RAYSTOKE->RACE JOKE) IS MAKING SIGNALS FOR A BOAT +7902-96594-0027-103: HE SWUNG ROUND WALKED AFT AND BEGAN SWEEPING (THE SHORE->ASHORE) AGAIN WITH HIS GLASS WHILE THE MASTER AND DICK EXCHANGED GLANCES WHICH MEANT A GREAT DEAL +7902-96594-0028-104: AT LAST THE LITTLE (LIEUTENANT->TANNIC) COULD BEAR THE ANXIETY NO LONGER +7902-96594-0029-105: (PIPE->PEP) AWAY THE MEN TO THAT BOAT THERE HE SAID AND AS THE CREW SPRANG IN +7902-96594-0030-106: NOW MISTER (GURR->GURG) HE SAID I'M ONLY GOING TO SAY ONE THING TO YOU IN THE WAY OF INSTRUCTIONS YES SIR +7902-96594-0031-107: BEG PARDON SIR SAID THE MASTER DEPRECATINGLY +7902-96594-0032-108: STEADY MY (LADS->LAD) STEADY CRIED THE MASTER KEEP STROKE AND THEN HE BEGAN TO MAKE PLANS AS TO HIS FIRST PROCEEDINGS (ON->I'M) GETTING ASHORE +7902-96595-0000-109: SAY (MESTER GURR->MISTER GIRK) SAID DICK AFTER ONE OF THESE SEARCHES HE WOULDN'T RUN AWAY WHAT +7902-96595-0001-110: MISTER RAYSTOKE SIR DON'T BE A FOOL +7902-96595-0002-111: WHAT (CHUCKED HIM OFF YONDER->TECHTAMORPHYANDER) +7902-96595-0003-112: (GURR->GER) GLANCED ROUND TO SEE IF THE MEN WERE LOOKING AND THEN SAID (RATHER->WHETHER) HUSKILY (BUT->BE) KINDLY +7902-96595-0004-113: AH EJACULATED DICK SADLY +7902-96595-0005-114: SAY (MESTER GURR SIR->MISTER GURSER) WHICH THANKFUL I AM (TO->FOR) YOU FOR SPEAKING SO BUT YOU DON'T (REALLY->*) THINK AS HE HAS COME TO HARM +7902-96595-0006-115: I HOPE NOT DICK I HOPE NOT BUT (SMUGGLERS->SMOKE WAS) DON'T STAND AT ANYTHING SOMETIMES +7902-96595-0007-116: I DO ASSURE YOU THERE'S NOTHING HERE BUT WHAT YOU MAY SEE +7902-96595-0008-117: IF (YOU'D->YOU) LET ME FINISH YOU'D KNOW SAID (GURR GRUFFLY->GURG ROUGHLY) ONE OF OUR BOYS IS MISSING SEEN (HIM->EM) UP HERE +7902-96595-0009-118: BOY (BOUT->ABOUT) SEVENTEEN WITH A RED CAP NO SIR INDEED (I'VE NOT->OF NONE) +7902-96595-0010-119: DON'T KNOW AS HE HAS BEEN SEEN ABOUT HERE DO YOU SAID (GURR->GIRL) LOOKING AT HER SEARCHINGLY NO SIR +7902-96595-0011-120: IF SHE KNEW EVIL HAD COME TO THE POOR LAD HER FACE WOULD TELL TALES LIKE PRINT +7902-96595-0012-121: I SAID A LAD (BOUT->ABOUT) SEVENTEEN (IN->AND) A RED (CAP LIKE->CATHOLIC) YOURS SAID (GURR->GREW) VERY SHORTLY +7902-96595-0013-122: THE MAN SHOOK HIS HEAD AND STARED AS IF HE DIDN'T HALF UNDERSTAND THE DRIFT OF (WHAT WAS->OGA) SAID +7902-96595-0014-123: HERE (MY LAD->MILAD) WHERE'S YOUR MASTER +7902-96595-0015-124: (EH->THEY) I SAY WHERE'S YOUR MASTER +7902-96595-0016-125: (GURR->GERT) TURNED AWAY IMPATIENTLY AGAIN AND SIGNING TO HIS MEN TO FOLLOW THEY ALL BEGAN TO TRAMP UP THE STEEP TRACK LEADING TOWARD THE (HOZE->HOSE) WITH THE (RABBITS->RABBIT) SCUTTLING AWAY AMONG THE (FURZE->FIRS) AND SHOWING THEIR WHITE COTTONY TAILS FOR A MOMENT AS THEY DARTED DOWN INTO THEIR HOLES +7902-96595-0017-126: I DUNNO MUTTERED DICK AND A (MAN->MEN) CAN'T BE SURE +7902-96595-0018-127: (GURR->DUR) SALUTED (AND STATED->INSTEAD OF) HIS BUSINESS WHILE THE BARONET WHO HAD TURNED (SALLOWER->SALARY) AND MORE (CAREWORN->CARE MORE) THAN HIS LOT DREW A BREATH (*->OF) FULL OF RELIEF ONE OF YOUR (SHIP BOYS->VOYS) HE SAID +7902-96595-0019-128: A LAD LOOKING LIKE A COMMON SAILOR AND WEARING A RED CAP NO SAID SIR RISDON +7902-96595-0020-129: I HAVE SEEN NO ONE ANSWERING TO THE DESCRIPTION HERE +7902-96595-0021-130: (BEG PARDON SIR BUT CAN->BIG PARTICER BECAUSE) YOU AS (A GENTLEMAN->GENTLEMEN) ASSURE ME THAT HE IS NOT HERE CERTAINLY SAID SIR RISDON +7902-96595-0022-131: SURELY CRIED SIR RISDON EXCITEDLY +7902-96595-0023-132: SIR (RISDON->RICHARD) WAS SILENT +7902-96595-0024-133: LADY (GRAEME->GRAHAM) LOOKED GHASTLY +7902-96595-0025-134: YOU DO NOT KNOW NO +7975-280057-0000-1008: THESE HATREDS WERE SOON TO MAKE TROUBLE FOR ME OF WHICH I HAD NEVER DREAMED +7975-280057-0001-1009: HENRY WASHINGTON YOUNGER MY FATHER REPRESENTED JACKSON COUNTY THREE TIMES IN THE LEGISLATURE AND WAS ALSO (*->A) JUDGE OF THE COUNTY COURT +7975-280057-0002-1010: MY MOTHER WHO WAS (BURSHEBA FRISTOE->PERCEIVED HER FOR STOVE) OF INDEPENDENCE WAS (THE->A) DAUGHTER OF RICHARD (FRISTOE->CRISTO) WHO FOUGHT UNDER GENERAL ANDREW JACKSON AT NEW ORLEANS JACKSON COUNTY HAVING BEEN SO NAMED (AT->IN) MY GRANDFATHER (FRISTOE'S->FRUSTES) INSISTENCE +7975-280057-0003-1011: I CANNOT REMEMBER WHEN I DID NOT KNOW HOW TO SHOOT +7975-280057-0004-1012: MY BROTHER JAMES WAS BORN JANUARY FIFTEENTH EIGHTEEN FORTY EIGHT JOHN IN EIGHTEEN FIFTY ONE AND ROBERT IN DECEMBER EIGHTEEN FIFTY THREE +7975-280057-0005-1013: MY ELDEST BROTHER RICHARD DIED IN EIGHTEEN SIXTY +7975-280057-0006-1014: MY FATHER WAS IN THE EMPLOY OF THE UNITED STATES GOVERNMENT AND HAD THE (MAIL->MALE) CONTRACT FOR FIVE HUNDRED MILES +7975-280057-0007-1015: HE HAD STARTED BACK TO HARRISONVILLE IN A BUGGY BUT WAS WAYLAID ONE MILE SOUTH OF (WESTPORT->WESTBURT) A SUBURB OF (KANSAS->KANSA) CITY AND BRUTALLY MURDERED FALLING OUT OF HIS BUGGY INTO THE ROAD WITH THREE MORTAL BULLET WOUNDS +7975-280057-0008-1016: (MISSUS->MISS) WASHINGTON (WELLS->WALES) AND HER SON SAMUEL ON THE ROAD HOME FROM (KANSAS->KANSA) CITY TO (LEE'S SUMMIT->LEE SOMEWHAT) RECOGNIZED THE BODY AS THAT OF MY FATHER +7975-280057-0009-1017: (MISSUS WELLS->MISS WELL) STAYED TO GUARD THE REMAINS WHILE HER (SON->SOON) CARRIED THE NEWS OF THE MURDER TO COLONEL PEABODY OF THE FEDERAL COMMAND WHO WAS THEN (IN CAMP->ENCAMP) AT KANSAS CITY +7975-280057-0010-1018: (MISSUS->MISS) MC (CORKLE->CORKEL) JUMPED FROM THE WINDOW OF THE HOUSE AND ESCAPED +7975-280057-0011-1019: AS THE RAIDERS (LEFT->LIVED) ONE OF THEM SHOUTED +7975-280057-0012-1020: NOW (OLD->*) LADY CALL ON YOUR PROTECTORS WHY DON'T YOU CALL ON (COLE->CO) YOUNGER NOW +7975-280057-0013-1021: EVERY KNOT REPRESENTED A HUMAN LIFE +7975-280057-0014-1022: BUT SHE FAILED TO FIND THE COMFORT SHE SOUGHT FOR ANNOYANCES CONTINUED IN A MORE AGGRAVATED FORM +7975-280057-0015-1023: TWO MONTHS AFTER THIS INCIDENT THE SAME PERSECUTORS AGAIN ENTERED OUR HOME IN THE (DEAD->DAY) OF THE NIGHT AND AT THE POINT OF A PISTOL TRIED TO FORCE MY MOTHER TO SET FIRE TO HER OWN HOME +7975-280057-0016-1024: I HAVE ALWAYS FELT THAT THE EXPOSURE TO WHICH SHE WAS SUBJECTED ON THIS CRUEL JOURNEY TOO HARD EVEN FOR A MAN TO TAKE WAS (THE->A) DIRECT CAUSE OF HER DEATH +7975-280057-0017-1025: FROM (HARRISONVILLE->HARRISON BILL) SHE WENT TO (WAVERLY->WAVERLEY) WHERE SHE WAS (HOUNDED->HOUNDY) CONTINUALLY +7975-280057-0018-1026: ONE OF THE CONDITIONS UPON WHICH HER LIFE WAS SPARED WAS THAT SHE WOULD REPORT AT (LEXINGTON WEEKLY->LESSINGTON WEAKLY) +7975-280057-0019-1027: ONE OF MY OLD SCHOOL TEACHERS WHOM I HAVE NEVER SEEN SINCE THE SPRING (OR->OF) SUMMER OF EIGHTEEN SIXTY TWO IS STEPHEN B ELKINS SENATOR FROM WEST VIRGINIA +7975-280057-0020-1028: WHEN I WAS TAKEN PRISONER I EXPECTED TO BE SHOT WITHOUT CEREMONY +7975-280063-0000-1058: WE TOOK THE OATH PERHAPS THREE HUNDRED OF US DOWN ON LUTHER MASON'S FARM A FEW MILES FROM WHERE I NOW (WRITE->RIGHT) WHERE COLONEL (HAYS->HAYES) HAD ENCAMPED AFTER INDEPENDENCE +7975-280063-0001-1059: (BOONE MUIR->WHOM YOU) AND MYSELF (MET->MAKE) COFFEE AND THE REST BELOW ROSE HILL ON GRAND RIVER +7975-280063-0002-1060: ACCORDINGLY I WAS SHORTLY AWAKENED TO ACCOMPANY HIM (TO LONE->THE LONG) JACK WHERE HE WOULD PERSONALLY MAKE KNOWN THE SITUATION TO THE OTHER COLONELS +7975-280063-0003-1061: FOSTER HAD NEARLY ONE THOUSAND (CAVALRYMEN->CAVERN) AND TWO PIECES OF (RABB'S->RABS) INDIANA BATTERY THAT HAD ALREADY MADE FOR ITSELF A NAME FOR HARD FIGHTING +7975-280063-0004-1062: COME IN COLONEL (HAYS->HAYES) EXCLAIMED COLONEL (COCKRELL->COCKROL) +7975-280063-0005-1063: I THINK HE'LL BE RATHER (TOUGH MEAT->TO HAVE ME) FOR BREAKFAST I REPLIED HE MIGHT BE ALL (RIGHT FOR->RIPER) DINNER +7975-280063-0006-1064: (JACKMAN->JACKMEN) WITH A PARTY OF THIRTY SEASONED MEN CHARGED THE INDIANA GUNS AND CAPTURED THEM BUT MAJOR (FOSTER LED->FOXTER LIT) A GALLANT CHARGE AGAINST THE INVADERS AND RECAPTURED THE PIECES +7975-280063-0007-1065: WE WERE OUT OF AMMUNITION AND WERE HELPLESS HAD THE FIGHT BEEN PRESSED +7975-280063-0008-1066: THEY DID MARK MY CLOTHES IN ONE OR TWO PLACES HOWEVER +7975-280063-0009-1067: (MAJOR->MEASURE) FOSTER IN A LETTER TO (JUDGE GEORGE M BENNETT->JOE GEORGIUM BENNET) OF MINNEAPOLIS SAID +7975-280063-0010-1068: I WAS TOLD BY SOME OF OUR MEN FROM THE WESTERN BORDER OF THE STATE THAT THEY RECOGNIZED (THE->A) DARING (YOUNG RIDER AS COLE->OWN WRITER'S COAL) YOUNGER +7975-280063-0011-1069: ABOUT NINE THIRTY A M I WAS SHOT DOWN +7975-280063-0012-1070: THE WOUNDED OF BOTH FORCES WERE GATHERED UP AND WERE PLACED IN HOUSES +7975-280076-0000-1029: ALTHOUGH EVERY BOOK (PURPORTING->PORPORTING) TO (NARRATE THE->THEIR EIGHTH) LIVES OF THE YOUNGER BROTHERS HAS TOLD (OF->THAT) THE LIBERTY ROBBERY AND IMPLIED THAT WE HAD A PART IN IT THE YOUNGERS WERE NOT SUSPECTED AT THAT TIME NOR FOR A LONG TIME AFTERWARD +7975-280076-0001-1030: IT WAS CLAIMED BY PEOPLE OF LIBERTY THAT THEY (POSITIVELY->POSIT TILL WE) RECOGNIZED AMONG THE ROBBERS (OLL SHEPHERD->ALL SHEPARD) RED (MONKERS->MOCKERS) AND BUD (PENCE->PANTS) WHO HAD SEEN SERVICE WITH (QUANTRELL->QUANTRAIL) +7975-280076-0002-1031: THIS (RAID->RAY) WAS ACCOMPANIED BY (BLOODSHED->BLOCHHEAD) JUDGE MC (LAIN->LANE) THE BANKER BEING SHOT THOUGH NOT FATALLY +7975-280076-0003-1032: (NO->THOUGH) WARRANT WAS ISSUED FOR THE YOUNGERS BUT SUBSEQUENT HISTORIANS HAVE INFERENTIALLY AT LEAST ACCUSED US OF TAKING PART BUT AS I SAID BEFORE THERE IS NO TRUTH IN THE ACCUSATION +7975-280076-0004-1033: JUNE THIRD EIGHTEEN SEVENTY ONE (OBOCOCK BROTHERS->OBEK BROTHER'S) BANK AT (CORYDON->CROYD AND) IOWA WAS ROBBED OF FORTY THOUSAND DOLLARS BY SEVEN MEN IN BROAD DAYLIGHT +7975-280076-0005-1034: IT WAS (CHARGED->CHARGE) THAT (ARTHUR MC->AWFUL MAC) COY OR A (C MC COY AND->SEMICA) MYSELF HAD BEEN PARTICIPANTS IN THE GAD'S HILL AFFAIR AND THE TWO STAGE (ROBBERIES->ROBBERS) +7975-280076-0006-1035: THE (PARTS->PART) OF THIS LETTER NOW (RELEVANT ARE->ELEVANT OUR) AS FOLLOWS +7975-280076-0007-1036: YOU MAY USE THIS LETTER IN YOUR OWN WAY +7975-280076-0008-1037: I WILL GIVE YOU THIS OUTLINE AND SKETCH OF MY WHEREABOUTS AND ACTIONS AT THE TIME OF CERTAIN (ROBBERIES->ROBBERS) WITH WHICH I AM CHARGED +7975-280076-0009-1038: AT THE TIME OF THE (GALLATIN->YELLED AND) BANK ROBBERY I WAS GATHERING CATTLE (IN->AND) ELLIS COUNTY TEXAS (CATTLE THAT I BOUGHT->CATTLET ABOUT) FROM (PLEAS TAYLOR->PLAYERS TAILOR) AND RECTOR +7975-280076-0010-1039: THIS CAN BE PROVED BY BOTH OF THEM ALSO BY (SHERIFF BARKLEY->HYR PARKLEY) AND FIFTY OTHER RESPECTABLE MEN OF THAT COUNTY +7975-280076-0011-1040: (I BROUGHT->ABRUPT) THE CATTLE (TO->THE) KANSAS (THAT->SET) FALL AND REMAINED IN SAINT CLAIR COUNTY UNTIL FEBRUARY +7975-280076-0012-1041: (I->AND) THEN WENT TO (ARKANSAS AND->OUR CONCERN) RETURNED TO SAINT CLAIR COUNTY ABOUT THE FIRST OF MAY +7975-280076-0013-1042: (I->AND) WENT TO KANSAS WHERE (OUR CATTLE->KETTLE) WERE IN (WOODSON->WOODS AND) COUNTY AT COLONEL (RIDGE'S->RIDGES) +7975-280076-0014-1043: DURING THE SUMMER I WAS EITHER IN SAINT CLAIR (JACKSON OR->JACK'S UNDER) KANSAS BUT AS THERE WAS NO ROBBERY COMMITTED THAT SUMMER IT MAKES NO DIFFERENCE WHERE I WAS +7975-280076-0015-1044: (I->AND) WENT THROUGH INDEPENDENCE AND FROM THERE TO ACE (WEBB'S->WHIPS) +7975-280076-0016-1045: THERE I TOOK DINNER AND THEN WENT TO DOCTOR L W (TWYMAN'S->TWEMINS) +7975-280076-0017-1046: OUR BUSINESS THERE WAS TO SEE E (P->*) WEST HE WAS NOT AT HOME BUT THE FAMILY WILL REMEMBER THAT WE WERE THERE +7975-280076-0018-1047: WE CROSSED ON THE BRIDGE (STAYED IN->STATING) THE CITY ALL NIGHT AND THE NEXT MORNING WE RODE UP (THROUGH->TO) THE CITY +7975-280076-0019-1048: (I MET->AMID) SEVERAL OF MY FRIENDS AMONG THEM WAS BOB (HUDSPETH->HUSBITH) +7975-280076-0020-1049: WE WERE NOT ON (GOOD->THE) TERMS AT THE TIME NOR HAVE WE BEEN FOR SEVERAL YEARS +7975-280076-0021-1050: POOR JOHN HE HAS BEEN HUNTED DOWN AND SHOT LIKE A WILD BEAST AND NEVER WAS A BOY MORE INNOCENT +7975-280076-0022-1051: DOCTOR L (LEWIS->LOOSE) WAS HIS PHYSICIAN +7975-280076-0023-1052: THERE WERE FIFTY OR A HUNDRED PERSONS THERE WHO WILL TESTIFY IN ANY COURT THAT JOHN AND I WERE THERE +7975-280076-0024-1053: (HELVIN->HELD AND) FICKLE AND WIFE OF GREENTON VALLEY WERE ATTENDING THE SPRINGS AT THAT TIME AND EITHER OF THEM WILL TESTIFY TO THE ABOVE FOR JOHN AND I (SAT->SET) IN FRONT OF MISTER SMITH WHILE HE WAS PREACHING AND WAS IN HIS COMPANY FOR A FEW MOMENTS TOGETHER WITH HIS WIFE AND MISTER AND (MISSUS->MISS) FICKLE AFTER (*->THE) SERVICE +7975-280076-0025-1054: ABOUT THE LAST OF DECEMBER EIGHTEEN SEVENTY THREE I ARRIVED IN (CARROLL PARISH->CAROL PARRISH) LOUISIANA +7975-280076-0026-1055: I STAYED THERE UNTIL THE EIGHTH OF FEBRUARY EIGHTEEN SEVENTY FOUR +7975-280076-0027-1056: I HAD NOT HEARD OF THAT WHEN I WROTE THE LETTER OF EIGHTEEN SEVENTY FOUR AND TO CORRECT ANY MISAPPREHENSION THAT MIGHT BE CREATED BY OMITTING IT I WILL SAY THAT AT (THAT->THE) TIME I WAS AT (NEOSHO->NEOSHIL OF) KANSAS WITH A DROVE OF CATTLE WHICH I SOLD TO MAJOR RAY +7975-280076-0028-1057: IT WAS IMMEDIATELY FOLLOWING THE ROCK ISLAND ROBBERY AT (ADAIR->EIGHT ER) IOWA THAT (THERE->THEIR) FIRST APPEARED A DELIBERATE ENLISTMENT OF SOME LOCAL PAPERS IN MISSOURI TO CONNECT US WITH THIS ROBBERY +7975-280084-0000-1090: I URGED ON THE BOYS (THAT->AT) WHATEVER (HAPPENED->HAPPEN) WE SHOULD NOT SHOOT ANY ONE +7975-280084-0001-1091: WHEN (MILLER AND->MILRON) I CROSSED THE BRIDGE THE THREE WERE ON SOME DRY (GOODS->GOOD) BOXES AT THE CORNER NEAR THE BANK AND AS SOON AS THEY SAW US WENT RIGHT INTO THE BANK INSTEAD OF WAITING FOR US TO GET THERE +7975-280084-0002-1092: WHEN WE CAME UP I (TOLD->TOO) MILLER TO SHUT THE BANK DOOR WHICH THEY HAD LEFT OPEN IN THEIR HURRY +7975-280084-0003-1093: J (S ALLEN->SALEN) WHOSE (HARDWARE STORE->HARDWORSTOE) WAS NEAR TRIED TO GO INTO THE BANK BUT MILLER ORDERED HIM AWAY AND HE RAN (AROUND->ROUND) THE CORNER SHOUTING +7975-280084-0004-1094: GET YOUR GUNS BOYS THEY'RE ROBBING THE BANK +7975-280084-0005-1095: AND I CALLED TO HIM TO GET INSIDE AT THE SAME TIME FIRING A PISTOL SHOT IN THE AIR AS A SIGNAL TO THE THREE BOYS AT THE BRIDGE THAT WE HAD BEEN DISCOVERED +7975-280084-0006-1096: ALMOST AT THIS INSTANT I HEARD A PISTOL SHOT IN THE BANK +7975-280084-0007-1097: (CHADWELL->SAID WELL) WOODS AND JIM RODE UP AND (JOINED US->JARS) SHOUTING TO (*->THE) PEOPLE IN THE STREET TO GET INSIDE AND FIRING THEIR PISTOLS TO EMPHASIZE THEIR COMMANDS +7975-280084-0008-1098: IF ANY OF OUR PARTY SHOT HIM IT MUST HAVE BEEN WOODS +7975-280084-0009-1099: MEANTIME THE STREET WAS GETTING UNCOMFORTABLY HOT +7975-280084-0010-1100: EVERY TIME I SAW ANY ONE WITH A BEAD ON ME I WOULD DROP OFF MY HORSE AND (TRY->TROUT) TO DRIVE THE (SHOOTER->SHEETTER) INSIDE BUT I COULD NOT SEE IN EVERY DIRECTION +7975-280084-0011-1101: DOCTOR (WHEELER->WHALER) WHO HAD GONE UPSTAIRS IN THE HOTEL SHOT MILLER AND HE LAY DYING IN THE STREET +7975-280084-0012-1102: CHANGING HIS PISTOL TO HIS LEFT HAND BOB RAN OUT AND MOUNTED MILLER'S (MARE->MAYOR) +7975-280084-0013-1103: (WHAT->BUT) KEPT YOU SO LONG (I ASKED PITTS->AS PITT) +7975-280084-0014-1104: AS TO THE REST OF THE AFFAIR INSIDE THE BANK I TAKE THE ACCOUNT OF A (NORTHFIELD->NORTH FIELD) NARRATOR +7975-280084-0015-1105: WHERE'S THE MONEY OUTSIDE (THE->TO) SAFE BOB ASKED +7975-280084-0016-1106: THE (SHUTTERS->SHOUTERS) WERE CLOSED AND THIS CAUSED BUNKER AN (INSTANT'S->INSTANCE) DELAY THAT WAS ALMOST FATAL (PITTS->FITZ) CHASED HIM WITH A BULLET +7975-280084-0017-1107: THE FIRST ONE MISSED HIM BUT THE SECOND WENT THROUGH HIS RIGHT SHOULDER +7975-280085-0000-1071: THAT NIGHT IT STARTED TO RAIN AND WE WORE OUT OUR HORSES +7975-280085-0001-1072: FRIDAY WE MOVED TOWARD WATERVILLE AND FRIDAY NIGHT WE (CAMPED->CAN'T) BETWEEN ELYSIAN AND GERMAN LAKE +7975-280085-0002-1073: (BOB'S SHATTERED ELBOW WAS->BOB SHUTTERED ELBOWS) REQUIRING FREQUENT ATTENTION AND THAT NIGHT WE MADE ONLY NINE MILES AND MONDAY MONDAY NIGHT (AND->IN) TUESDAY WE SPENT IN A DESERTED FARM HOUSE CLOSE TO (MANKATO->MAIN CATO) +7975-280085-0003-1074: THAT (DAY->THEY) A MAN NAMED DUNNING DISCOVERED US AND WE TOOK HIM PRISONER +7975-280085-0004-1075: FINALLY WE ADMINISTERED TO HIM AN OATH NOT TO BETRAY OUR WHEREABOUTS UNTIL WE HAD TIME TO MAKE OUR ESCAPE AND HE AGREED NOT TO +7975-280085-0005-1076: NO SOONER HOWEVER WAS HE RELEASED THAN HE MADE (POSTHASTE->POST TASTE) INTO (MANKATO->MANCATEO) TO ANNOUNCE OUR PRESENCE AND IN A FEW MINUTES ANOTHER POSSE WAS LOOKING FOR US +7975-280085-0006-1077: THE WHISTLE ON THE (OIL MILL BLEW->OARMEIL BLUE) AND WE FEARED THAT IT WAS A SIGNAL THAT HAD BEEN AGREED UPON TO ALARM THE TOWN IN CASE WE WERE OBSERVED BUT WE WERE NOT MOLESTED +7975-280085-0007-1078: HE HAD TO SLEEP WITH IT PILLOWED ON MY BREAST JIM BEING ALSO CRIPPLED WITH A WOUND IN HIS SHOULDER AND WE COULD NOT GET MUCH SLEEP +7975-280085-0008-1079: BUT THEY SOON AFTER GOT CLOSE ENOUGH SO THAT ONE OF THEM BROKE MY WALKING STICK WITH A SHOT +7975-280085-0009-1080: WE WERE (IN SIGHT->INSIDE) OF OUR LONG SOUGHT HORSES WHEN THEY CUT US OFF FROM THE ANIMALS AND OUR LAST HOPE WAS GONE +7975-280085-0010-1081: SIX (STEPPED TO->STEPS OF) THE FRONT SHERIFF (GLISPIN->LISPIN) COLONEL T L (VOUGHT->VAULT) B M RICE G A BRADFORD C A (POMEROY->POMERALI) AND S (J SEVERSON->VERSON) +7975-280085-0011-1082: FORMING (IN->A) LINE FOUR PACES APART HE ORDERED THEM TO ADVANCE RAPIDLY AND CONCENTRATE THE FIRE OF THE WHOLE LINE THE INSTANT THE ROBBERS WERE DISCOVERED +7975-280085-0012-1083: MAKE FOR THE HORSES I SAID EVERY MAN FOR HIMSELF +7975-280085-0013-1084: (THERE IS->THERE'S) NO USE STOPPING TO PICK UP A COMRADE HERE FOR WE CAN'T GET HIM THROUGH THE LINE JUST (CHARGE THEM->SHARZAN) AND MAKE IT IF WE CAN +7975-280085-0014-1085: I GOT UP AS (THE->A) SIGNAL FOR THE CHARGE AND WE FIRED ONE VOLLEY +7975-280085-0015-1086: ONE OF THE FELLOWS IN THE OUTER LINE NOT BRAVE ENOUGH HIMSELF TO JOIN THE VOLUNTEERS WHO HAD COME IN TO (BEAT US OUT->BE DISOUT) WAS NOT DISPOSED TO BELIEVE IN THE SURRENDER AND HAD HIS GUN LEVELLED ON BOB IN SPITE OF THE HANDKERCHIEF WHICH WAS WAVING AS A FLAG OF TRUCE +7975-280085-0016-1087: SHERIFF (GLISPIN->GLISBON) OF (WATONWAN->WATERWIN) COUNTY WHO WAS TAKING BOB'S PISTOL FROM HIM WAS ALSO SHOUTING TO THE FELLOW +7975-280085-0017-1088: INCLUDING THOSE RECEIVED IN AND ON THE WAY FROM (NORTHFIELD->NORTH FIELD) I HAD ELEVEN (WOUNDS->WINDS) +7975-280085-0018-1089: (AND->IN) SHERIFF (GLISPIN'S->GLISBON'S) ORDER NOT TO SHOOT WAS THE BEGINNING OF THE (PROTECTORATE->PROTECTOR) THAT MINNESOTA PEOPLE ESTABLISHED OVER US +8131-117016-0000-1303: CAPTAIN (MURDOCH->MURDOCK) +8131-117016-0001-1304: BUT MARSPORT HAD FLOURISHED ENOUGH TO KILL IT OFF +8131-117016-0002-1305: SOME OF MARS LAWS DATED FROM THE TIME WHEN (LAW ENFORCEMENT->LAWN FORCEMENT) HAD BEEN HAMPERED BY LACK OF MEN RATHER THAN BY THE TYPE OF MEN +8131-117016-0003-1306: THE (STONEWALL->STONE WALL) GANG NUMBERED PERHAPS FIVE HUNDRED +8131-117016-0004-1307: EVEN (DERELICTS->DEAR ALEXE) AND FAILURES HAD TO EAT THERE WERE (STORES->STORIES) AND SHOPS THROUGHOUT THE DISTRICT WHICH EKED OUT SOME KIND OF A MARGINAL LIVING +8131-117016-0005-1308: THEY WERE SAFE FROM PROTECTION (RACKETEERS->RACKETERS) THERE NONE BOTHERED TO COME SO FAR OUT +8131-117016-0006-1309: THE SHOPKEEPERS AND SOME OF THE LESS UNFORTUNATE PEOPLE THERE HAD PROTESTED LOUD ENOUGH TO REACH CLEAR BACK TO EARTH +8131-117016-0007-1310: CAPTAIN (MURDOCH->MURDOCK) WAS AN UNKNOWN FACTOR AND NOW WAS ASKING FOR MORE MEN +8131-117016-0008-1311: THE PRESSURE WAS ENOUGH TO GET THEM FOR HIM +8131-117016-0009-1312: GORDON REPORTED FOR WORK WITH A SENSE OF THE BOTTOM FALLING OUT MIXED WITH A VAGUE RELIEF +8131-117016-0010-1313: I'VE GOT A FREE HAND AND WE'RE GOING TO RUN THIS THE WAY WE WOULD ON EARTH +8131-117016-0011-1314: YOUR JOB IS TO PROTECT THE CITIZENS HERE AND THAT MEANS (EVERYONE->EVERY ONE) NOT BREAKING THE LAWS WHETHER YOU FEEL LIKE IT OR NOT NO GRAFT +8131-117016-0012-1315: THE FIRST MAN MAKING A (SHAKEDOWN->SHAKE DOWN) WILL GET THE SAME TREATMENT WE'RE GOING TO USE ON THE (STONEWALL->STONE WALL) BOYS YOU'LL GET DOUBLE PAY HERE AND YOU CAN LIVE ON IT +8131-117016-0013-1316: HE PICKED OUT FIVE OF THE MEN INCLUDING GORDON YOU FIVE WILL COME WITH ME +8131-117016-0014-1317: THE REST OF YOU CAN TEAM UP ANY WAY YOU WANT (TONIGHT->TO NIGHT) PICK ANY (ROUTE THAT'S->ROW OF THIS) OPEN (OKAY MEN->O CAME IN) LET'S GO +8131-117016-0015-1318: (BRUCE GORDON->BORDON) GRINNED SLOWLY AS HE SWUNG THE STICK AND (MURDOCH'S->MARDOCK'S) EYES FELL ON HIM (EARTH COP->EARTHCOP) +8131-117016-0016-1319: TWO YEARS GORDON ADMITTED +8131-117016-0017-1320: FOR A SECOND GORDON CURSED HIMSELF +8131-117016-0018-1321: HE BEGAN WONDERING ABOUT SECURITY THEN +8131-117016-0019-1322: NOBODY HAD TRIED TO GET IN TOUCH WITH HIM +8131-117016-0020-1323: THERE WAS A CRUDE LIGHTING SYSTEM HERE PUT UP BY THE CITIZENS AT THE FRONT OF EACH BUILDING A DIM (PHOSPHOR BULB->PHOSPHO BOB) GLOWED WHEN DARKNESS FELL THEY WOULD HAVE NOTHING ELSE TO SEE BY +8131-117016-0021-1324: MOVING IN TWO GROUPS OF THREES (AT->IT) OPPOSITE SIDES OF THE STREET THEY BEGAN THEIR BEAT +8131-117016-0022-1325: THERE WAS NO CHANCE TO SAVE THE CITIZEN WHO WAS DYING FROM LACK OF AIR +8131-117016-0023-1326: GORDON FELT THE SOLID PLEASURE OF THE FINELY TURNED CLUB IN HIS HANDS +8131-117016-0024-1327: GORDON'S EYES POPPED AT THAT +8131-117016-0025-1328: HE SWALLOWED THE SENTIMENT HIS OWN CLUB WAS MOVING NOW +8131-117016-0026-1329: THE OTHER FOUR COPS HAD COME IN RELUCTANTLY +8131-117016-0027-1330: HE BROUGHT HIM TO THE GROUND WITH A SINGLE BLOW ACROSS THE KIDNEYS +8131-117016-0028-1331: THEY ROUNDED UP THE MEN OF THE GANG AND ONE OF THE (COPS->CUPS) STARTED OFF +8131-117016-0029-1332: TO FIND A PHONE AND CALL THE WAGON +8131-117016-0030-1333: (WE'RE->WERE) NOT USING WAGONS (MURDOCH->MURDOCK) TOLD HIM (LINE->LIE) THEM UP +8131-117016-0031-1334: IF THEY TRIED TO RUN THEY WERE HIT FROM BEHIND IF THEY STOOD STILL THEY WERE CLUBBED CAREFULLY +8131-117016-0032-1335: (MURDOCH->MURDOCK) INDICATED ONE WHO STOOD WITH HIS (SHOULDERS->SHOULDER) SHAKING AND TEARS RUNNING DOWN HIS CHEEKS +8131-117016-0033-1336: THE CAPTAIN'S FACE WAS AS SICK AS (GORDON->GORDON'S) FELT +8131-117016-0034-1337: I WANT THE NAME OF EVERY MAN IN THE GANG YOU CAN REMEMBER HE TOLD THE MAN +8131-117016-0035-1338: COLONEL THEY'D KILL ME I DON'T KNOW +8131-117016-0036-1339: (MURDOCH->MURDOCK) TOOK HIS NOD AS EVIDENCE ENOUGH AND TURNED TO THE WRETCHED (TOUGHS->TUFTS) +8131-117016-0037-1340: IF HE SHOULD TURN UP DEAD I'LL KNOW YOU BOYS ARE RESPONSIBLE AND I'LL FIND YOU +8131-117016-0038-1341: TROUBLE BEGAN BREWING SHORTLY AFTER THOUGH +8131-117016-0039-1342: (MURDOCH->MARDACK) SENT ONE OF THE MEN TO PICK UP A SECOND SQUAD OF SIX AND THEN A THIRD +8131-117016-0040-1343: (IN->AND) THE THIRD ONE (BRUCE->BRUSH) GORDON SPOTTED ONE OF THE MEN (WHO'D->WHO HAD) BEEN BEATEN BEFORE +8131-117016-0041-1344: GET A STRETCHER AND TAKE HIM WHEREVER HE BELONGS HE ORDERED +8131-117016-0042-1345: BUT THE CAPTAIN STIRRED FINALLY SIGHING +8131-117016-0043-1346: NO THE COPS (THEY'RE->ARE) GIVING ME WE'RE COVERED GORDON +8131-117016-0044-1347: BUT THE (STONEWALL->STERN WALL) GANG IS BACKING (WAYNE->WANE) +8131-117016-0045-1348: BUT IT'S GOING TO BE TOUGH ON THEM +8131-117016-0046-1349: BRUCE GORDON GRIMACED I'VE GOT A YELLOW TICKET FROM SECURITY +8131-117016-0047-1350: (MURDOCH->MARDOCK) BLINKED HE DROPPED HIS EYES SLOWLY +8131-117016-0048-1351: WHAT MAKES YOU THINK (WAYNE->WAIN) WILL BE (RE ELECTED->REELECTED) +8131-117016-0049-1352: NOBODY WANTS HIM EXCEPT A GANG OF CROOKS AND THOSE IN POWER +8131-117016-0050-1353: EVER SEE A MARTIAN ELECTION +8131-117016-0051-1354: NO YOU'RE A (FIRSTER->FIRST TER) HE CAN'T LOSE +8131-117016-0052-1355: AND THEN HELL IS GOING TO POP (AND->IN) THIS WHOLE PLANET MAY BE BLOWN WIDE OPEN +8131-117016-0053-1356: IT FITTED WITH THE DIRE PREDICTIONS OF SECURITY AND WITH THE SPYING GORDON WAS GOING TO DO ACCORDING TO THEM +8131-117016-0054-1357: HE WAS GETTING EVEN FATTER NOW THAT HE WAS EATING BETTER FOOD FROM THE FAIR RESTAURANT AROUND THE CORNER +8131-117016-0055-1358: (COST EM->COSTUME) MORE BUT THEY'D BE RESPECTABLE +8131-117016-0056-1359: BECAUSE (IZZY->IZZIE) IS ALWAYS HONEST ACCORDING TO HOW HE SEES IT +8131-117016-0057-1360: BUT YOU GOT EARTH IDEAS OF THE STUFF LIKE I HAD ONCE +8131-117016-0058-1361: THE GROUPS GREW MORE EXPERIENCED AND (MURDOCH->MURDOCK) WAS TRAINING A NEW SQUAD EVERY NIGHT +8131-117016-0059-1362: IT (WASN'T->WAS AN) EXACTLY LEGAL BUT NOTHING WAS HERE +8131-117016-0060-1363: THIS COULD LEAD TO ABUSES AS HE'D SEEN ON EARTH +8131-117016-0061-1364: BUT THERE PROBABLY WOULDN'T BE TIME FOR (IT->*) IF MAYOR (WAYNE->WAIN) WAS RE ELECTED +8131-117017-0000-1270: IT WAS NIGHT OUTSIDE AND THE (PHOSPHOR BULBS->PHOSPHO BOBS) AT THE CORNERS GLOWED DIMLY GIVING HIM BARELY ENOUGH LIGHT BY WHICH TO LOCATE THE WAY TO THE EXTEMPORIZED PRECINCT HOUSE +8131-117017-0001-1271: IT HAD PROBABLY BEEN YEARS SINCE ANY HAD DARED RISK IT AFTER THE SUN WENT DOWN +8131-117017-0002-1272: AND THE SLOW DOUBTFUL RESPECT ON THE FACES OF THE CITIZENS AS THEY NODDED TO HIM WAS EVEN MORE PROOF THAT (HALEY'S->HAYE) SYSTEM WAS WORKING +8131-117017-0003-1273: GORDON HIT THE SIGNAL SWITCH AND THE (MARSPEAKER LET->MARKEE LED) OUT A SHRILL WHISTLE +8131-117017-0004-1274: (GUNS->GUN) SUDDENLY SEEMED TO BE FLOURISHING EVERYWHERE +8131-117017-0005-1275: YOU CAN'T DO IT TO ME +8131-117017-0006-1276: I'M REFORMED I'M GOING STRAIGHT +8131-117017-0007-1277: YOU DAMNED (COPS->COPSE) CAN'T (O'NEILL->O'NEIA) WAS BLUBBERING +8131-117017-0008-1278: ONE LOOK WAS ENOUGH THE WORK PAPERS HAD THE (TELLTALE->TELL TALE) OVER THICKENING OF THE SIGNATURE (THAT->THEY) HAD SHOWED UP ON OTHER PAPERS OBVIOUSLY FORGERIES +8131-117017-0009-1279: SOME TURNED AWAY AS GORDON AND THE OTHER COP WENT TO WORK BUT MOST OF THEM WEREN'T SQUEAMISH +8131-117017-0010-1280: WHEN IT WAS OVER THE TWO PICKED UP THEIR WHIMPERING CAPTIVE +8131-117017-0011-1281: JENKINS THE OTHER COP HAD BEEN HOLDING THE WALLET +8131-117017-0012-1282: MUST (OF->HAVE) BEEN MAKING A BIG CONTACT IN SOMETHING FIFTY FIFTY +8131-117017-0013-1283: THERE MUST HAVE BEEN OVER TWO THOUSAND CREDITS IN THE WALLET +8131-117017-0014-1284: WHEN GORDON AND JENKINS CAME BACK (MURDOCH->MARDOCK) TOSSED THE MONEY TO THEM SPLIT IT +8131-117017-0015-1285: WHATEVER COMES TO HAND (GOV'NOR->GOVERNOR) +8131-117017-0016-1286: LIKE THIS SOCIAL CALL GORDON ASKED HIM +8131-117017-0017-1287: THE LITTLE MAN SHOOK HIS HEAD HIS ANCIENT EIGHTEEN YEAR OLD FACE TURNING SOBER (NOPE->NOTE) +8131-117017-0018-1288: YOU OWE ME SOME BILLS (GOV'NOR->GUV'NER) +8131-117017-0019-1289: ELEVEN HUNDRED FIFTY CREDITS +8131-117017-0020-1290: YOU DIDN'T PAY UP YOUR PLEDGE TO THE (CAMPAIGN->CAPTAIN) FUND SO I (HADDA->HAD A) FILL IN +8131-117017-0021-1291: A THOUSAND (INTEREST->INTERESTS) AT TEN PER CENT A WEEK STANDARD RIGHT +8131-117017-0022-1292: GORDON HAD HEARD OF THE FRIENDLY INTEREST CHARGED ON THE SIDE HERE BUT HE SHOOK HIS HEAD WRONG (IZZY->IS HE) +8131-117017-0023-1293: (HUH IZZY->HER AS HE) TURNED IT OVER AND SHOOK HIS HEAD +8131-117017-0024-1294: NOW SHOW ME WHERE I SIGNED ANY AGREEMENT SAYING I'D PAY YOU BACK +8131-117017-0025-1295: FOR A SECOND (IZZY'S->IS HIS) FACE WENT BLANK THEN HE CHUCKLED +8131-117017-0026-1296: HE (PULLED->POURED) OUT THE BILLS AND HANDED THEM OVER +8131-117017-0027-1297: THANKS (IZZY->IS HE) THANKS YOURSELF +8131-117017-0028-1298: THE KID POCKETED THE MONEY CHEERFULLY NODDING +8131-117017-0029-1299: THE LITTLE GUY KNEW MARS AS FEW OTHERS DID APPARENTLY FROM ALL SIDES +8131-117017-0030-1300: AND IF ANY OF THE OTHER COPS HAD PRIVATE RACKETS OF THEIR OWN (IZZY->IZZIE) WAS UNDOUBTEDLY THE MAN TO FIND IT OUT AND (USE->USED) THE INFORMATION WITH A BEAT SUCH AS THAT EVEN GOING HALVES AND WITH ALL THE GRAFT (TO->OF) THE UPPER BRACKETS HE'D STILL BE ABLE TO MAKE HIS PILE IN A MATTER OF MONTHS +8131-117017-0031-1301: THE CAPTAIN LOOKED COMPLETELY BEATEN AS HE CAME INTO THE ROOM AND DROPPED (ONTO->INTO) THE BENCH +8131-117017-0032-1302: GO ON (ACCEPT DAMN IT->EXCEPT DEMON) +8131-117029-0000-1247: THERE WAS A MAN COMING FROM EARTH ON A SECOND SHIP WHO WOULD SEE HIM +8131-117029-0001-1248: THE LITTLE PUBLISHER WAS BACK AT THE CRUSADER AGAIN +8131-117029-0002-1249: ONLY GORDON AND SHEILA WERE LEFT +8131-117029-0003-1250: CREDIT HAD BEEN ESTABLISHED AGAIN AND THE BUSINESSES WERE OPEN +8131-117029-0004-1251: GORDON CAME TO A ROW OF TEMPORARY BUBBLES INDIVIDUAL DWELLINGS BUILT LIKE THE DOME BUT OPAQUE FOR PRIVACY +8131-117029-0005-1252: THEY HAD BEEN LUCKY +8131-117029-0006-1253: (SCHULBERG'S->SHELBERG'S) VOLUNTEERS WERE OFFICIAL NOW +8131-117029-0007-1254: (FATS->FAT) PLACE WAS STILL OPEN THOUGH THE CROOKED TABLES HAD BEEN REMOVED GORDON DROPPED TO A STOOL SLIPPING OFF HIS HELMET +8131-117029-0008-1255: HE REACHED AUTOMATICALLY FOR THE GLASS OF ETHER NEEDLED BEER +8131-117029-0009-1256: THOUGHT YOU'D BE IN THE CHIPS +8131-117029-0010-1257: THAT'S MARS GORDON (ECHOED THE OTHER'S->ACCORD OTHERS) COMMENT WHY DON'T YOU PULL OFF THE PLANET FATS YOU COULD GO BACK TO EARTH I'D GUESS THE OTHER NODDED +8131-117029-0011-1258: (GUESS A->GUESSIMIAN) MAN GETS USED TO ANYTHING HELL MAYBE I CAN HIRE SOME BUMS TO SIT AROUND AND WHOOP IT UP WHEN THE SHIPS COME IN AND (BILL->BUILD) THIS AS A REAL OLD MARTIAN DEN OF SIN +8131-117029-0012-1259: THERE WAS A GRIN ON THE OTHER'S FACE +8131-117029-0013-1260: FINALLY GOT OUR ORDERS FOR YOU IT'S MERCURY +8131-117029-0014-1261: WE SENT TWENTY OTHERS THE SAME WAY AND THEY FAILED +8131-117029-0015-1262: LET'S (SAY YOU'VE->SAVE) SHIFTED SOME OF THE MISERY AROUND A BIT AND GIVEN THEM A CHANCE TO DO BETTER +8131-117029-0016-1263: YOU CAN'T STAY HERE +8131-117029-0017-1264: THERE'S A ROCKET WAITING TO (TRANSSHIP->TRANSHIP) YOU TO THE MOON ON THE WAY TO MERCURY RIGHT NOW GORDON SIGHED +8131-117029-0018-1265: AND (I'VE->I) PAID HER THE PAY WE OWE YOU FROM THE TIME YOU (BEGAN->BEGIN) USING YOUR BADGE SHE'S OUT SHOPPING +8131-117029-0019-1266: BUT HIS OLD EYES WERE GLINTING +8131-117029-0020-1267: DID YOU THINK WE'D LET YOU GO WITHOUT SEEING YOU OFF (COBBER->COPPER) HE ASKED +8131-117029-0021-1268: I I OH (DRAT->DREAD) IT I'M GETTING OLD (IZZY->IS HE) YOU TELL HIM +8131-117029-0022-1269: HE GRABBED GORDON'S HAND AND WADDLED DOWN THE LANDING PLANK (IZZY->IS HE) SHOOK HIS HEAD +8188-269288-0000-2881: (ANNIE->ANY) COLCHESTER HAD BEGUN TO MAKE FRIENDS WITH (LESLIE->LIZZLE) +8188-269288-0001-2882: LESLIE DETERMINED TO (TRY FOR->TRIFLE) HONORS IN ENGLISH LANGUAGE AND LITERATURE +8188-269288-0002-2883: HER TASTES ALL LAY IN THIS DIRECTION HER IDEA BEING BY AND BY TO FOLLOW HER MOTHER'S (PROFESSION->PROFICIENT) OF JOURNALISM FOR WHICH SHE (ALREADY->ALWAYS) SHOWED CONSIDERABLE APTITUDE +8188-269288-0003-2884: SHE HAD NO IDEA OF ALLOWING HERSELF TO BREAK DOWN +8188-269288-0004-2885: WHAT DO YOU MEAN REPLIED (LESLIE->LISLEY) +8188-269288-0005-2886: WHY YOU WILL BE PARTING FROM ME YOU KNOW +8188-269288-0006-2887: I WON'T BE THE CONSTANT WORRY AND PLAGUE OF YOUR LIFE +8188-269288-0007-2888: (IT->THIS) IS THIS IF BY ANY CHANCE YOU DON'T LEAVE SAINT (WODE'S->WORDS) ANNIE I HOPE YOU WILL ALLOW ME TO BE YOUR (ROOMFELLOW->ROOM FELLOW) AGAIN NEXT TERM +8188-269288-0008-2889: SAID ANNIE A FLASH OF LIGHT COMING INTO HER EYES AND THEN LEAVING THEM +8188-269288-0009-2890: BUT SHE ADDED ABRUPTLY YOU SPEAK OF SOMETHING WHICH MUST NOT TAKE PLACE +8188-269288-0010-2891: I MUST PASS (IN HONORS->AN HONOUR) IF I DON'T I SHALL DIE +8188-269288-0011-2892: A FEW MOMENTS LATER (THERE->DICK) CAME A TAP AT THE DOOR +8188-269288-0012-2893: LESLIE OPENED THE DOOR +8188-269288-0013-2894: JANE (HERIOT->HARRIET) STOOD WITHOUT +8188-269288-0014-2895: THESE LETTERS HAVE JUST COME FOR YOU (AND ANNIE->IN ANY) COLCHESTER SHE SAID AND AS I WAS COMING UPSTAIRS I THOUGHT I WOULD LEAVE THEM WITH YOU +8188-269288-0015-2896: (LESLIE->LISLEY) THANKED HER AND EAGERLY GRASPED THE LITTLE PARCEL +8188-269288-0016-2897: HER EYES SHONE WITH PLEASURE AT THE ANTICIPATION OF THE DELIGHTFUL TIME SHE WOULD HAVE (REVELING->RIVELING) IN THE HOME NEWS THE OTHER LETTER WAS DIRECTED TO (ANNIE->ANY) COLCHESTER +8188-269288-0017-2898: HERE IS A LETTER FOR YOU (ANNIE->ANY) CRIED (LESLIE->LIZZIE) +8188-269288-0018-2899: HER FACE GREW SUDDENLY WHITE AS DEATH WHAT IS IT DEAR +8188-269288-0019-2900: I HAVE BEEN (STARVING->STARLING) OR RATHER I HAVE BEEN THIRSTING +8188-269288-0020-2901: WELL READ IT IN PEACE SAID (LESLIE->LIDNESLEY) I WON'T DISTURB YOU +8188-269288-0021-2902: I AM TRULY GLAD IT HAS COME +8188-269288-0022-2903: (LESLIE->LISSLY) SEATED HERSELF WITH HER BACK TO HER COMPANION AND OPENED HER (OWN->ON) LETTERS +8188-269288-0023-2904: DON'T NOTICE ME REPLIED (ANNIE->ANY) +8188-269288-0024-2905: I MUST GO INTO THE (GROUNDS->GROUND) THE AIR IS STIFLING +8188-269288-0025-2906: BUT THEY (ARE->HAD) JUST SHUTTING UP +8188-269288-0026-2907: I SHALL GO I KNOW A WAY +8188-269288-0027-2908: JUST AFTER MIDNIGHT SHE ROSE WITH A SIGH TO PREPARE FOR BED +8188-269288-0028-2909: SHE LOOKED ROUND THE ROOM +8188-269288-0029-2910: NOW I REMEMBER SHE GOT A LETTER WHICH UPSET HER VERY MUCH AND WENT OUT +8188-269288-0030-2911: (LESLIE->LISLEY) WENT TO THE WINDOW AND FLUNG IT OPEN SHE PUT HER HEAD OUT AND TRIED TO PEER INTO THE DARKNESS BUT THE MOON HAD ALREADY SET AND SHE COULD NOT SEE MORE THAN A COUPLE OF YARDS IN FRONT OF HER +8188-269288-0031-2912: SHE IS A VERY QUEER (ERRATIC->THE RATT) CREATURE AND THAT LETTER THERE WAS BAD NEWS IN THAT LETTER +8188-269288-0032-2913: WHAT (CAN SHE BE->CAN'T YOU) DOING OUT BY HERSELF +8188-269288-0033-2914: (LESLIE->THIS LILY) LEFT THE ROOM BUT SHE HAD SCARCELY GONE A DOZEN (PACES->PLACES) DOWN THE CORRIDOR BEFORE SHE MET (ANNIE->ANY) RETURNING +8188-269288-0034-2915: (ANNIE'S->ANY) EYES WERE VERY BRIGHT HER CHEEKS WERE NO LONGER PALE AND THERE WAS A BRILLIANT (COLOR->COLOUR) IN THEM +8188-269288-0035-2916: SHE DID NOT TAKE THE LEAST NOTICE OF (LESLIE->PLEASING) BUT GOING INTO THE ROOM SHUT THE DOOR +8188-269288-0036-2917: DON'T BEGIN SAID ANNIE +8188-269288-0037-2918: DON'T BEGIN WHAT DO YOU MEAN +8188-269288-0038-2919: I MEAN THAT I DON'T WANT YOU TO BEGIN TO ASK QUESTIONS +8188-269288-0039-2920: I WALKED UP AND DOWN AS FAST AS EVER I COULD OUTSIDE IN ORDER TO MAKE MYSELF SLEEPY +8188-269288-0040-2921: (DON'T->THEY'RE) TALK TO ME (LESLIE->LISLEY) DON'T SAY A SINGLE WORD +8188-269288-0041-2922: I SHALL GO OFF TO SLEEP THAT IS ALL I CARE FOR +8188-269288-0042-2923: DON'T SAID ANNIE +8188-269288-0043-2924: NOW DRINK THIS AT ONCE SHE SAID IN A VOICE OF AUTHORITY IF YOU REALLY WISH TO SLEEP +8188-269288-0044-2925: (ANNIE STARED->ANY STEERED) VACANTLY AT THE (COCOA THEN->CUCKOO DID) SHE UTTERED A LAUGH +8188-269288-0045-2926: DRINK THAT SHE SAID +8188-269288-0046-2927: DO YOU WANT TO KILL ME DON'T TALK ANY MORE +8188-269288-0047-2928: I AM SLEEPY I SHALL SLEEP +8188-269288-0048-2929: SHE GOT INTO BED AS SHE SPOKE AND WRAPPED THE CLOTHES TIGHTLY ROUND HER +8188-269288-0049-2930: (CAN'T->CAN) YOU MANAGE WITH A CANDLE JUST FOR ONCE +8188-269288-0050-2931: CERTAINLY (SAID LESLIE->CITIZELY) +8188-269288-0051-2932: SHE TURNED OFF THE LIGHT AND (LIT A->LET HER) CANDLE (WHICH->WOULD) SHE PUT BEHIND HER SCREEN THEN PREPARED TO GET INTO BED +8188-269288-0052-2933: (ANNIE'S->ANY'S) MANNER WAS VERY MYSTERIOUS +8188-269288-0053-2934: (ANNIE->AND HE) DID NOT MEAN TO (CONFIDE->CONFINE) IN (ANYONE->ANY ONE) THAT NIGHT AND THE KINDEST THING WAS TO LEAVE HER ALONE +8188-269288-0054-2935: (TIRED->TIE IT) OUT LESLIE HERSELF DROPPED ASLEEP +8188-269288-0055-2936: (ANNIE->ANY) IS THAT YOU SHE CALLED OUT +8188-269288-0056-2937: THERE WAS NO REPLY BUT THE SOUND OF HURRYING STEPS CAME QUICKER AND QUICKER NOW AND THEN (THEY WERE->THEIR) INTERRUPTED BY A GROAN +8188-269288-0057-2938: OH THIS WILL KILL ME MY HEART WILL BREAK THIS WILL KILL ME +8188-269290-0000-2823: THE GUILD OF SAINT ELIZABETH +8188-269290-0001-2824: IMMEDIATELY AFTER DINNER THAT EVENING (LESLIE->LISLEY) RAN UP TO HER ROOM TO MAKE PREPARATIONS FOR HER VISIT TO EAST HALL +8188-269290-0002-2825: I'M NOT COMING SAID ANNIE +8188-269290-0003-2826: EVERY STUDENT IS TO BE (IN->AN) EAST HALL AT HALF PAST EIGHT +8188-269290-0004-2827: IT (DOESN'T->DOES) MATTER REPLIED ANNIE (WHETHER->WHITHER) IT IS AN ORDER OR NOT (I'M->I AM) NOT COMING SAY NOTHING ABOUT ME PLEASE +8188-269290-0005-2828: IT BURNED AS IF WITH FEVER +8188-269290-0006-2829: YOU DON'T KNOW WHAT A TRIAL IT IS FOR ME TO HAVE YOU HERE +8188-269290-0007-2830: I WANT TO BE ALONE GO +8188-269290-0008-2831: I KNOW YOU DON'T QUITE MEAN WHAT YOU SAY SAID (LESLIE->LIZZIE) BUT OF COURSE IF YOU REALLY WISH ME +8188-269290-0009-2832: YOU FRET ME BEYOND ENDURANCE +8188-269290-0010-2833: WRAPPING A PRETTY BLUE SHAWL (ROUND HER HEAD AND->AROUND A HIDDEN) SHOULDERS SHE TURNED TO ANNIE +8188-269290-0011-2834: LESLIE WAS JUST CLOSING THE DOOR BEHIND HER WHEN (ANNIE->NY) CALLED AFTER HER +8188-269290-0012-2835: I TOOK IT OUT SAID (LESLIE->LISLEY) TOOK IT OUT +8188-269290-0013-2836: HAVE THE GOODNESS TO FIND IT AND PUT IT BACK +8188-269290-0014-2837: BUT DON'T LOCK ME OUT PLEASE (ANNIE->ANY) +8188-269290-0015-2838: OH I WON'T LOCK YOU (OUT->ABOUT) SHE SAID BUT I MUST HAVE THE KEY +8188-269290-0016-2839: JANE (HERIOT'S->HEARIT'S) VOICE WAS HEARD IN THE PASSAGE +8188-269290-0017-2840: AS SHE WALKED DOWN THE CORRIDOR SHE HEARD IT BEING TURNED (IN->TO) THE LOCK +8188-269290-0018-2841: WHAT CAN THIS MEAN SHE SAID TO HERSELF +8188-269290-0019-2842: OH I WON'T PRESS YOU REPLIED JANE +8188-269290-0020-2843: OH I SHALL NEVER DO THAT REPLIED (LESLIE->LISLEY) +8188-269290-0021-2844: YOU SEE ALL THE GIRLS EXCEPT EILEEN AND MARJORIE LAUGH AT HER AND THAT SEEMS TO ME TO MAKE HER WORSE +8188-269290-0022-2845: SOME DAY JANE YOU MUST SEE HER +8188-269290-0023-2846: IF YOU (ARE->*) IN LONDON DURING THE SUMMER YOU MUST COME (AND PAY US A->IN PAIR FOR) VISIT WILL YOU +8188-269290-0024-2847: THAT IS IF YOU CARE TO CONFIDE IN ME +8188-269290-0025-2848: I BELIEVE POOR ANNIE IS DREADFULLY UNHAPPY +8188-269290-0026-2849: THAT'S JUST (IT JANE->A CHAIN) THAT IS WHAT FRIGHTENS ME SHE REFUSES TO COME +8188-269290-0027-2850: REFUSES TO COME SHE CRIED +8188-269290-0028-2851: (SHE WILL->SHE'LL) GET (INTO AN->IN HER) AWFUL SCRAPE +8188-269290-0029-2852: I AM SURE SHE IS ILL SHE WORKS TOO HARD AND SHE BUT THERE I DON'T KNOW THAT I OUGHT TO SAY ANY MORE +8188-269290-0030-2853: I'LL WAIT FOR YOU HERE SAID (LESLIE->LISLEY) +8188-269290-0031-2854: DO COME (ANNIE->ANY) DO +8188-269290-0032-2855: SCARCELY LIKELY REPLIED LESLIE SHE TOLD ME SHE WAS DETERMINED NOT TO COME TO THE MEETING +8188-269290-0033-2856: BUT MARJORIE AND (EILEEN->IDLEEN) HAD ALREADY DEPARTED AND (LESLIE->LISLEY) AND JANE FOUND THEMSELVES AMONG THE LAST STUDENTS TO ARRIVE AT THE GREAT EAST HALL +8188-269290-0034-2857: MISS (LAUDERDALE->LAUDER DALE) WAS STANDING WITH THE OTHER TUTORS AND (PRINCIPALS->PRINCIPLES) OF THE DIFFERENT HALLS (ON->ARE) A RAISED PLATFORM +8188-269290-0035-2858: THEN A (ROLL CALL->ROCKLE) WAS GONE THROUGH BY ONE OF THE TUTORS THE ONLY (ABSENTEE->EBSENTEE) WAS (ANNIE->ANY) COLCHESTER +8188-269290-0036-2859: THE PHYSICAL PART OF (YOUR TRAINING->THE ORTRAINING) AND ALSO THE MENTAL PART ARE ABUNDANTLY SUPPLIED IN THIS GREAT HOUSE OF LEARNING SHE CONTINUED BUT THE SPIRITUAL PART IT SEEMS TO ME OUGHT NOW TO BE STRENGTHENED +8188-269290-0037-2860: HEAR (HEAR->HERE) AND ONCE AGAIN (HEAR->HAIR) +8188-269290-0038-2861: SHE UTTERED (HER->A) STRANGE REMARK STANDING UP +8188-269290-0039-2862: MARJORIE AND (EILEEN->ILINE) WERE CLOSE TO HER +8188-269290-0040-2863: I WILL TALK WITH YOU (BELLE ACHESON->BELL ARTISON) PRESENTLY SHE SAID +8188-269290-0041-2864: THE NAMES OF (*->THE) PROPOSED MEMBERS ARE TO BE SUBMITTED TO ME BEFORE THIS DAY WEEK +8188-269290-0042-2865: AM I MY BROTHER'S KEEPER +8188-269290-0043-2866: YOU ASK SHE CONTINUED +8188-269290-0044-2867: GOD (ANSWERS TO->AUTHEST) EACH OF YOU YOU ARE +8188-269290-0045-2868: THE WORLD (SAYS->TASTE) NO I AM NOT BUT GOD (SAYS->SAKES) YES YOU ARE +8188-269290-0046-2869: ALL MEN ARE (YOUR->*) BROTHERS +8188-269290-0047-2870: FOR ALL WHO SIN ALL WHO SUFFER YOU ARE TO (A CERTAIN->EXERT AN) EXTENT RESPONSIBLE +8188-269290-0048-2871: AFTER THE ADDRESS THE GIRLS THEMSELVES WERE ENCOURAGED TO SPEAK AND A VERY ANIMATED DISCUSSION FOLLOWED +8188-269290-0049-2872: IT WAS PAST TEN O'CLOCK WHEN SHE LEFT THE HALL +8188-269290-0050-2873: JUST AS SHE WAS DOING SO (MISS->WAS) FRERE CAME UP +8188-269290-0051-2874: (ANNIE COLCHESTER IS->ANY COLCHISED AS) YOUR (ROOMFELLOW->ROOM FELLOW) IS SHE NOT SHE SAID +8188-269290-0052-2875: I SEE BY YOUR FACE (MISS GILROY->MY SCALE ROY) THAT YOU ARE DISTRESSED ABOUT SOMETHING ARE (YOU->*) KEEPING ANYTHING BACK +8188-269290-0053-2876: I AM AFRAID I AM REPLIED LESLIE (DISTRESS->DISTRESSED) NOW IN HER TONE +8188-269290-0054-2877: I MUST SEE HER MYSELF EARLY IN THE MORNING AND I AM QUITE SURE THAT NOTHING WILL SATISFY MISS (LAUDERDALE->LAURDELL) EXCEPT A VERY AMPLE APOLOGY AND A FULL EXPLANATION OF THE REASON WHY SHE ABSENTED HERSELF +8188-269290-0055-2878: EXCUSES MAKE NO DIFFERENCE +8188-269290-0056-2879: THE GIRL WHO BREAKS THE RULES (HAS->HAVE) TO BE PUNISHED +8188-269290-0057-2880: I WILL TELL HER +8188-274364-0000-2811: THE COMMONS ALSO VOTED THAT THE NEW CREATED PEERS OUGHT TO HAVE NO VOICE IN THIS TRIAL BECAUSE THE ACCUSATION BEING AGREED TO WHILE THEY WERE COMMONERS THEIR CONSENT TO IT WAS IMPLIED WITH THAT OF ALL THE COMMONS OF ENGLAND +8188-274364-0001-2812: (IN->*) THE GOVERNMENT OF IRELAND HIS ADMINISTRATION HAD BEEN EQUALLY (PROMOTIVE->PROMOTED) OF HIS MASTER'S INTEREST AND THAT OF THE SUBJECTS COMMITTED TO HIS CARE +8188-274364-0002-2813: THE CASE OF LORD (MOUNTNORRIS->MONTORIS) OF ALL THOSE WHICH WERE COLLECTED WITH SO (MUCH->ACT) INDUSTRY IS THE MOST FLAGRANT AND THE LEAST EXCUSABLE +8188-274364-0003-2814: THE COURT WHICH CONSISTED OF THE (CHIEF OFFICERS->CHEAP OFFICIALS) OF THE ARMY FOUND THE (CRIME->CROWN) TO BE CAPITAL AND CONDEMNED THAT NOBLEMAN TO LOSE HIS HEAD +8188-274364-0004-2815: (WHERE THE->WITH A) TOKEN BY WHICH I (SHOULD->SHALL) DISCOVER IT +8188-274364-0005-2816: IT IS NOW (*->A) FULL TWO HUNDRED AND FORTY YEARS SINCE TREASONS WERE DEFINED AND SO LONG HAS IT BEEN SINCE ANY MAN WAS TOUCHED TO THIS EXTENT UPON THIS CRIME (BEFORE->FOR) MYSELF +8188-274364-0006-2817: LET US NOT TO (OUR OWN DESTRUCTION->HER UNDESTRUCTION) AWAKE THOSE (SLEEPING->KEEPING) LIONS BY RATTLING UP A COMPANY OF (OLD RECORDS->ALL RICARDS) WHICH HAVE LAIN FOR SO MANY AGES BY THE (WALL->WAR) FORGOTTEN AND NEGLECTED +8188-274364-0007-2818: (HOWEVER->HERBID) THESE GENTLEMEN AT THE BAR (SAY->SO) THEY SPEAK FOR THE (COMMONWEALTH->CORNWEALTH) AND THEY BELIEVE SO YET UNDER (FAVOR->FAVOUR) IT IS I WHO IN THIS PARTICULAR SPEAK FOR THE (COMMONWEALTH->CORNWEALTH) +8188-274364-0008-2819: MY LORDS I HAVE NOW TROUBLED YOUR LORDSHIPS A GREAT DEAL LONGER THAN I SHOULD HAVE DONE +8188-274364-0009-2820: YOUNG VANE FALLING UPON THIS PAPER OF NOTES DEEMED THE MATTER OF THE UTMOST IMPORTANCE AND IMMEDIATELY COMMUNICATED IT TO (PYM->POEM) WHO NOW PRODUCED THE PAPER BEFORE THE HOUSE OF COMMONS +8188-274364-0010-2821: THE KING PROPOSES THIS DIFFICULTY BUT HOW CAN I UNDERTAKE OFFENSIVE WAR IF I HAVE NO MORE MONEY +8188-274364-0011-2822: YOUR MAJESTY HAVING TRIED THE AFFECTIONS OF YOUR PEOPLE YOU (ARE->*) ABSOLVED AND LOOSE FROM ALL RULES OF GOVERNMENT AND MAY DO WHAT POWER WILL ADMIT +8280-266249-0000-339: OLD MISTER DINSMORE HAD ACCEPTED (A PRESSING->OPPRESSING) INVITATION FROM HIS GRANDDAUGHTER AND HER HUSBAND TO JOIN THE PARTY AND WITH THE ADDITION OF SERVANTS IT WAS A LARGE ONE +8280-266249-0001-340: AS THEY WERE IN NO HASTE AND THE CONFINEMENT OF A RAILROAD CAR (WOULD->WILL) BE VERY IRKSOME TO THE YOUNGER CHILDREN IT HAD BEEN DECIDED TO MAKE THE JOURNEY BY WATER +8280-266249-0002-341: THERE WERE NO SAD LEAVE TAKINGS TO MAR THEIR PLEASURE THE CHILDREN WERE IN WILD SPIRITS AND ALL SEEMED CHEERFUL AND HAPPY AS THEY SAT OR STOOD UPON THE DECK WATCHING THE RECEDING SHORE AS THE VESSEL STEAMED OUT OF THE (HARBOR->HARBOUR) +8280-266249-0003-342: AT LENGTH THE LAND HAD QUITE DISAPPEARED NOTHING COULD BE SEEN BUT THE SKY OVERHEAD AND A VAST EXPANSE OF WATER ALL (AROUND->ROUND) AND THE PASSENGERS FOUND LEISURE TO TURN THEIR ATTENTION UPON EACH OTHER +8280-266249-0004-343: THERE ARE SOME NICE LOOKING PEOPLE ON BOARD REMARKED MISTER TRAVILLA IN AN UNDERTONE TO HIS WIFE +8280-266249-0005-344: (BESIDE->BESIDES) OURSELVES ADDED COUSIN (RONALD->RANALD) LAUGHING +8280-266249-0006-345: YES SHE ANSWERED THAT LITTLE GROUP YONDER A YOUNG MINISTER AND HIS WIFE AND CHILD I SUPPOSE +8280-266249-0007-346: AND (WHAT->WHEN) A DEAR LITTLE FELLOW HE IS JUST ABOUT THE AGE OF OUR (HAROLD->HERALD) I SHOULD JUDGE +8280-266249-0008-347: DO YOU SON WAS THE SMILING REJOINDER +8280-266249-0009-348: HE CERTAINLY LOOKS LIKE A VERY NICE LITTLE BOY +8280-266249-0010-349: SUPPOSE YOU AND HE SHAKE HANDS FRANK +8280-266249-0011-350: I DO INDEED (THOUGH->THE) PROBABLY COMPARATIVELY FEW ARE AWARE THAT TOBACCO IS THE CAUSE OF THEIR AILMENTS +8280-266249-0012-351: DOUBTLESS THAT IS THE CASE REMARKED MISTER DINSMORE +8280-266249-0013-352: WITH ALL MY HEART IF YOU WILL STEP INTO THE (GENTLEMEN'S->GENTLEMAN'S) CABIN WHERE THERE'S A LIGHT +8280-266249-0014-353: HE LED THE WAY THE OTHERS ALL FOLLOWING AND TAKING OUT A SLIP OF PAPER READ FROM IT IN A DISTINCT TONE LOUD ENOUGH TO BE HEARD BY THOSE (*->ALL) ABOUT HIM WITHOUT DISTURBING THE OTHER PASSENGERS +8280-266249-0015-354: ONE DROP OF NICOTINE (EXTRACT OF->EXTRACTED) TOBACCO PLACED ON THE TONGUE OF (A->THE) DOG WILL KILL HIM IN A MINUTE THE HUNDREDTH PART OF A GRAIN (PICKED->PRICKED) UNDER THE SKIN OF A MAN'S ARM WILL PRODUCE NAUSEA AND FAINTING +8280-266249-0016-355: THE HALF DOZEN CIGARS WHICH MOST SMOKERS (USE->USED) A DAY CONTAIN SIX OR SEVEN GRAINS ENOUGH IF CONCENTRATED AND ABSORBED TO KILL THREE MEN AND A POUND (OF->OR) TOBACCO ACCORDING TO ITS QUALITY CONTAINS FROM ONE QUARTER TO ONE AND A QUARTER OUNCES +8280-266249-0017-356: IS IT STRANGE THEN THAT SMOKERS AND (CHEWERS->SHOERS) HAVE A THOUSAND AILMENTS +8280-266249-0018-357: THAT THE FRENCH (POLYTECHNIC->POLYTECHNICHER) INSTITUTE HAD TO PROHIBIT ITS USE ON ACCOUNT OF ITS EFFECTS (ON->UPON) THE MIND +8280-266249-0019-358: NOTICE THE MULTITUDE OF SUDDEN DEATHS AND SEE HOW MANY (ARE->OUR) SMOKERS AND CHEWERS +8280-266249-0020-359: IN A SMALL COUNTRY TOWN SEVEN OF THESE MYSTERIOUS PROVIDENCES OCCURRED WITHIN THE CIRCUIT OF A MILE ALL DIRECTLY TRACEABLE TO TOBACCO AND ANY PHYSICIAN ON A FEW MOMENTS REFLECTION CAN MATCH THIS FACT BY HIS OWN OBSERVATION +8280-266249-0021-360: AND THEN SUCH POWERFUL ACIDS PRODUCE INTENSE IRRITATION AND THIRST THIRST WHICH WATER DOES NOT QUENCH +8280-266249-0022-361: HENCE A RESORT TO CIDER AND BEER +8280-266249-0023-362: NO SIR WHAT KNOW YE NOT THAT YOUR BODY IS THE TEMPLE OF THE HOLY GHOST WHICH IS IN YOU WHICH YE HAVE OF GOD AND YE ARE NOT YOUR OWN +8280-266249-0024-363: FOR (YE->YOU) ARE BOUGHT WITH A PRICE THEREFORE GLORIFY GOD IN YOUR BODY AND IN YOUR SPIRIT WHICH ARE (GOD'S->GODS) +8280-266249-0025-364: WE CERTAINLY HAVE NO RIGHT TO INJURE OUR BODIES EITHER BY NEGLECT OR SELF INDULGENCE +8280-266249-0026-365: AND AGAIN I BESEECH YOU THEREFORE BRETHREN BY THE MERCIES OF GOD THAT YE PRESENT YOUR BODIES A LIVING SACRIFICE HOLY ACCEPTABLE UNTO GOD WHICH IS YOUR REASONABLE SERVICE +8280-266249-0027-366: IT MUST REQUIRE A GOOD DEAL OF RESOLUTION FOR ONE WHO HAS BECOME FOND OF THE INDULGENCE TO GIVE IT UP REMARKED MISTER DALY +8280-266249-0028-367: NO DOUBT NO DOUBT RETURNED MISTER (LILBURN->LILBOURNE) BUT IF THY RIGHT (EYE OFFEND THEE->I OFFENDLY) PLUCK IT (OUT->UP) AND CAST IT FROM (THEE->ME) FOR IT IS PROFITABLE FOR THEE THAT ONE OF THY MEMBERS SHOULD PERISH AND NOT THAT THY WHOLE BODY SHOULD BE CAST INTO HELL +8280-266249-0029-368: THERE WAS A PAUSE BROKEN BY YOUNG HORACE WHO HAD BEEN WATCHING A GROUP OF MEN GATHERED ABOUT A TABLE AT THE FURTHER END OF THE ROOM +8280-266249-0030-369: THEY ARE GAMBLING YONDER AND I'M AFRAID THAT YOUNG FELLOW IS BEING BADLY FLEECED BY (THAT->THE) MIDDLE AGED MAN OPPOSITE +8280-266249-0031-370: THE EYES OF THE WHOLE PARTY WERE AT ONCE TURNED IN THAT DIRECTION +8280-266249-0032-371: NO SIR HE IS NOT HERE +8280-266249-0033-372: (AND->AT) THE DOOR WAS SLAMMED VIOLENTLY (TO->TOO) +8280-266249-0034-373: NOW THE VOICE CAME FROM THE SKYLIGHT OVERHEAD APPARENTLY AND WITH A FIERCE IMPRECATION THE IRATE GAMESTER RUSHED UPON DECK AND RAN HITHER AND THITHER IN SEARCH OF HIS TORMENTOR +8280-266249-0035-374: HIS VICTIM WHO HAD BEEN LOOKING ON DURING THE LITTLE SCENE AND LISTENING TO THE MYSTERIOUS VOICE (IN->AND) SILENT WIDE EYED WONDER AND FEAR NOW ROSE HASTILY HIS FACE DEATHLY PALE WITH TREMBLING HANDS GATHERED UP THE MONEY HE HAD STAKED AND HURRYING (INTO->TO) HIS (STATE ROOM->STATEROOM) LOCKED HIMSELF IN +8280-266249-0036-375: WHAT DOES IT MEAN CRIED ONE +8280-266249-0037-376: A (VENTRILOQUIST ABOARD->VENTILOQUE QUESTERED BOARD) OF COURSE RETURNED ANOTHER LET'S FOLLOW AND SEE THE FUN +8280-266249-0038-377: I WONDER WHICH OF US IT IS REMARKED THE FIRST LOOKING HARD AT OUR PARTY I DON'T KNOW BUT COME ON +8280-266249-0039-378: THAT FELLOW NICK WARD IS A NOTED (BLACKLEG->BLACK LAG) AND RUFFIAN HAD HIS NOSE BROKEN IN A FIGHT AND IS SENSITIVE ON THE SUBJECT WAS CHEATING OF COURSE +8280-266249-0040-379: WHO ASKED THE MATE I'VE SEEN (NONE UP->NO NAP) HERE THOUGH THERE ARE SOME IN THE STEERAGE +8280-266249-0041-380: THEY HEARD HIM IN SILENCE WITH A COOL PHLEGMATIC INDIFFERENCE MOST EXASPERATING TO ONE IN HIS PRESENT MOOD +8280-266249-0042-381: A MAN OF GIANT SIZE AND HERCULEAN STRENGTH HAD LAID ASIDE HIS PIPE AND SLOWLY RISING TO HIS FEET SEIZED THE SCOUNDREL IN HIS POWERFUL GRASP +8280-266249-0043-382: LET ME GO YELLED WARD MAKING A DESPERATE EFFORT TO FREE HIS ARMS +8280-266249-0044-383: I (DINKS->THINK) NO I (DINKS->THINK) I (DEACH->DID) YOU (VON LESSON->FOR MESSUM) RETURNED HIS CAPTOR NOT RELAXING HIS GRASP IN THE LEAST +8280-266249-0045-384: THE GERMAN RELEASED HIS PRISONER AND THE LATTER (SLUNK->SUNK) AWAY WITH MUTTERED THREATS AND IMPRECATIONS UPON THE HEAD OF HIS TORMENTOR +8280-266249-0046-385: MISTER (LILBURN->LILLBURN) AND MISTER DALY EACH (AT->HAD) A DIFFERENT TIME SOUGHT OUT THE YOUNG MAN (WARD'S->WORDS) INTENDED VICTIM AND TRIED TO INFLUENCE HIM FOR GOOD +8280-266249-0047-386: YET THERE WAS GAMBLING AGAIN THE SECOND NIGHT BETWEEN WARD AND SEVERAL OTHERS OF HIS (PROFESSION->PROFESSIONS) +8280-266249-0048-387: THEY KEPT IT UP TILL AFTER MIDNIGHT +8280-266249-0049-388: THEN MISTER (LILBURN->LILLO) WAKING FROM HIS FIRST SLEEP IN A STATEROOM NEAR BY THOUGHT HE WOULD BREAK IT UP ONCE MORE +8280-266249-0050-389: AN INTENSE VOICELESS EXCITEMENT POSSESSED THE PLAYERS FOR THE GAME WAS A CLOSE ONE AND THE STAKES WERE VERY HEAVY +8280-266249-0051-390: (THEY BENT->THEY'VE BEEN) EAGERLY OVER THE BOARD EACH WATCHING WITH FEVERISH ANXIETY HIS COMPANION'S MOVEMENTS EACH CASTING NOW AND AGAIN A GLOATING EYE UPON THE HEAP OF GOLD AND GREENBACKS THAT LAY BETWEEN THEM AND AT TIMES HALF STRETCHING OUT HIS HAND TO CLUTCH IT +8280-266249-0052-391: A DEEP GROAN STARTLED THEM AND THEY SPRANG TO THEIR FEET PALE AND TREMBLING WITH SUDDEN TERROR EACH HOLDING HIS BREATH AND STRAINING HIS EAR TO CATCH A REPETITION OF THE DREAD SOUND +8280-266249-0053-392: BUT (ALL WAS->ALWAYS) SILENT AND AFTER A MOMENT OF ANXIOUS WAITING THEY SAT DOWN TO THEIR GAME AGAIN TRYING TO CONCEAL AND SHAKE OFF THEIR FEARS (WITH A->TO THE) FORCED UNNATURAL LAUGH +8280-266249-0054-393: IT CAME FROM UNDER THE TABLE GASPED (WARD->WARREN) LOOK WHAT'S THERE LOOK (*->TO) YOURSELF +8280-266249-0055-394: WHAT CAN IT HAVE BEEN THEY ASKED EACH OTHER +8280-266249-0056-395: OH NONSENSE WHAT FOOLS WE ARE +8280-266249-0057-396: IT WAS THE LAST GAME OF CARDS FOR THAT TRIP +8280-266249-0058-397: THE CAPTAIN COMING IN SHORTLY AFTER THE SUDDEN FLIGHT OF THE GAMBLERS TOOK CHARGE OF THE MONEY AND THE NEXT DAY RESTORED IT TO THE OWNERS +8280-266249-0059-398: TO ELSIE'S OBSERVANT EYES IT PRESENTLY BECAME EVIDENT THAT THE (DALYS WERE IN->DAILIES RAN) VERY (STRAITENED->STRAIGHT IN) CIRCUMSTANCES +8280-266249-0060-399: OH HOW KIND HOW VERY KIND MISSUS (DALY->DALEY) SAID WITH TEARS OF JOY AND GRATITUDE WE HAVE HARDLY KNOWN HOW WE SHOULD MEET THE MOST NECESSARY EXPENSES OF THIS TRIP BUT HAVE BEEN TRYING TO CAST OUR CARE UPON THE LORD ASKING HIM TO PROVIDE +8280-266249-0061-400: AND HOW WONDERFULLY HE HAS ANSWERED OUR PETITIONS +8280-266249-0062-401: ELSIE ANSWERED PRESSING HER HAND AFFECTIONATELY (ART->ARE) WE NOT SISTERS IN CHRIST +8280-266249-0063-402: YE ARE ALL THE CHILDREN OF GOD BY FAITH IN CHRIST JESUS +8280-266249-0064-403: (YE ARE->YEAR) ALL ONE (IN->AND) CHRIST JESUS +8280-266249-0065-404: WE (FEEL->SEE ON) MY HUSBAND AND I THAT WE ARE ONLY THE STEWARDS OF HIS BOUNTY AND (THAT->*) BECAUSE HE HAS SAID INASMUCH AS YE HAVE DONE IT UNTO ONE OF THE LEAST OF THESE MY BRETHREN YE HAVE DONE IT UNTO ME IT IS THE GREATEST PRIVILEGE AND DELIGHT TO DO ANYTHING FOR HIS PEOPLE +8461-258277-0000-1649: WHEN IT WAS THE SEVEN HUNDRED AND EIGHTEENTH NIGHT +8461-258277-0001-1650: BUT HE ANSWERED NEEDS (MUST I HAVE ZAYNAB ALSO->MICE THY HALVES THINE APPLES SAY) NOW (SUDDENLY->CERTAINLY) THERE CAME A RAP AT THE DOOR AND THE MAID SAID WHO IS AT THE DOOR +8461-258277-0002-1651: THE KNOCKER REPLIED (KAMAR->COME ALL) DAUGHTER (OF AZARIAH->VASSARIAH) THE JEW SAY ME IS ALI OF CAIRO WITH YOU +8461-258277-0003-1652: REPLIED THE BROKER'S DAUGHTER O THOU DAUGHTER OF A DOG +8461-258277-0004-1653: (AND->ON) HAVING THUS (ISLAMISED->ISLAMIZED) SHE ASKED HIM (DO->TWO) MEN IN THE FAITH OF (AL ISLAM GIVE->ALI SLAM GAVE) MARRIAGE PORTIONS TO WOMEN OR (DO->TWO) WOMEN (DOWER->TO OUR) MEN +8461-258277-0005-1654: AND SHE THREW DOWN THE JEW'S HEAD BEFORE HIM +8461-258277-0006-1655: NOW THE CAUSE OF HER SLAYING HER SIRE WAS AS FOLLOWS +8461-258277-0007-1656: THEN HE (SET->SAT) OUT REJOICING TO RETURN TO THE BARRACK OF THE (FORTY->FORTE) +8461-258277-0008-1657: SO HE ATE AND FELL DOWN SENSELESS FOR THE SWEETMEATS WERE DRUGGED WITH (BHANG->BANG) WHEREUPON THE KAZI BUNDLED HIM INTO THE SACK AND MADE OFF WITH HIM CHARGER AND CHEST AND ALL TO THE BARRACK OF THE (FORTY->FORTE) +8461-258277-0009-1658: PRESENTLY (HASAN SHUMAN->HER SANCHUMAIN) CAME OUT OF A (CLOSET->CLOTH) AND SAID TO HIM HAST THOU GOTTEN (THE GEAR->AGAIN) O ALI +8461-258277-0010-1659: SO HE TOLD (HIM->THEM) WHAT HAD BEFALLEN HIM AND ADDED IF I KNOW (WHITHER->WHETHER) THE RASCAL IS GONE AND WHERE TO FIND THE KNAVE I WOULD PAY HIM OUT +8461-258277-0011-1660: KNOWEST THOU WHITHER HE WENT +8461-258277-0012-1661: ANSWERED HASAN I KNOW WHERE HE IS AND OPENING THE DOOR OF THE CLOSET SHOWED HIM THE SWEETMEAT (SELLER->CELLAR) WITHIN DRUGGED AND SENSELESS +8461-258277-0013-1662: SO I WENT ROUND ABOUT THE HIGHWAYS OF THE CITY TILL I MET A SWEETMEAT (SELLER->CELLAR) AND BUYING HIS CLOTHES AND STOCK IN TRADE AND GEAR FOR TEN DINARS DID WHAT WAS DONE +8461-258277-0014-1663: QUOTH (AL RASHID->A RASCHID) WHOSE HEAD IS THIS +8461-258277-0015-1664: SO (ALI->I) RELATED TO HIM ALL THAT (HAD->*) PASSED FROM FIRST (TO->*) LAST AND THE CALIPH SAID I (HAD->HATE) NOT THOUGHT THOU WOULDST KILL HIM FOR THAT HE WAS A SORCERER +8461-258277-0016-1665: HE REPLIED I HAVE FORTY LADS BUT THEY ARE IN CAIRO +8461-278226-0000-1633: AND LAURA HAD HER OWN PET PLANS +8461-278226-0001-1634: SHE MEANT TO BE SCRUPULOUSLY CONSCIENTIOUS IN THE ADMINISTRATION OF (HER TALENTS->A TALENT) AND SOMETIMES AT CHURCH ON A SUNDAY WHEN THE (SERMON->SIMON) WAS PARTICULARLY AWAKENING SHE MENTALLY DEBATED (THE->A) SERIOUS QUESTION AS TO WHETHER NEW (BONNETS->BONNET) AND A PAIR OF (JOUVIN'S->ZO BOUNDS) GLOVES DAILY WERE NOT SINFUL BUT I THINK SHE DECIDED THAT THE NEW BONNETS AND GLOVES WERE ON THE WHOLE A (PARDONABLE->PIONABLE) WEAKNESS AS BEING GOOD FOR TRADE +8461-278226-0002-1635: ONE MORNING LAURA TOLD HER HUSBAND WITH A GAY LAUGH THAT SHE WAS GOING TO VICTIMIZE HIM BUT HE WAS TO PROMISE TO BE PATIENT AND BEAR WITH HER FOR ONCE IN A WAY +8461-278226-0003-1636: I WANT TO SEE ALL THE PICTURES THE MODERN PICTURES ESPECIALLY +8461-278226-0004-1637: I REMEMBER ALL THE (RUBENSES AT->RUBEN SAYS THAT) THE LOUVRE FOR I SAW THEM (THREE->FOR) YEARS AGO WHEN I WAS STAYING IN PARIS WITH GRANDPAPA +8461-278226-0005-1638: SHE RETURNED IN A LITTLE MORE THAN TEN MINUTES IN THE FRESHEST TOILETTE ALL PALE SHIMMERING BLUE LIKE THE SPRING SKY WITH (PEARL GREY->PEAR GRAY) GLOVES AND BOOTS AND PARASOL AND A BONNET THAT SEEMED MADE (OF AZURE->TO USURE) BUTTERFLIES +8461-278226-0006-1639: (IT->HE) WAS DRAWING TOWARDS THE CLOSE OF THIS DELIGHTFUL HONEYMOON TOUR AND IT WAS A BRIGHT SUNSHINY MORNING EARLY IN FEBRUARY BUT FEBRUARY IN PARIS IS SOMETIMES BETTER THAN APRIL IN LONDON +8461-278226-0007-1640: BUT SHE FIXED UPON A PICTURE WHICH SHE SAID SHE PREFERRED TO ANYTHING SHE HAD SEEN IN THE GALLERY +8461-278226-0008-1641: PHILIP (JOCELYN->JOSCELYN) WAS EXAMINING SOME PICTURES ON THE OTHER SIDE OF THE ROOM WHEN HIS WIFE MADE (THIS->THE) DISCOVERY +8461-278226-0009-1642: HOW I WISH YOU COULD GET ME A COPY OF THAT PICTURE (PHILIP->FILLIP) LAURA SAID ENTREATINGLY +8461-278226-0010-1643: I SHOULD SO LIKE ONE TO HANG IN MY MORNING ROOM (AT JOCELYN'S ROCK->A JOSCELYN STRUCK) +8461-278226-0011-1644: SHE TURNED TO THE FRENCH (ARTIST->ARD THIS) PRESENTLY AND ASKED HIM WHERE THE ELDER MISTER (KERSTALL->CURSON) LIVED AND IF THERE WAS ANY POSSIBILITY OF SEEING HIM +8461-278226-0012-1645: THEY HAVE SAID THAT HE IS EVEN A LITTLE IMBECILE THAT HE DOES NOT REMEMBER HIMSELF OF THE MOST COMMON EVENTS OF HIS LIFE +8461-278226-0013-1646: BUT THERE ARE SOME OTHERS WHO SAY THAT HIS MEMORY HAS NOT ALTOGETHER FAILED AND THAT HE (IS->*) STILL ENOUGH HARSHLY CRITICAL (TOWARDS->TOWARD) THE WORKS OF OTHERS +8461-278226-0014-1647: I DON'T THINK YOU WILL HAVE ANY DIFFICULTY IN FINDING THE HOUSE +8461-278226-0015-1648: YOU WILL (BE DOING->BETRAY) ME SUCH A FAVOUR (PHILIP->FELLOW) IF (YOU'LL->YOU) SAY YES +8461-281231-0000-1594: HIS FOLLOWERS (RUSHED->RUSH) FORWARD (TO->*) WHERE HE LAY AND THEIR UNITED FORCE COMPELLING THE BLACK (KNIGHT->NIGHT) TO PAUSE THEY DRAGGED (THEIR->THE) WOUNDED LEADER WITHIN THE WALLS +8461-281231-0001-1595: IT WAS ON THEIR JOURNEY TO THAT TOWN THAT THEY WERE OVERTAKEN ON THE ROAD BY (CEDRIC->SADRIC) AND HIS PARTY IN WHOSE COMPANY THEY WERE AFTERWARDS CARRIED CAPTIVE TO THE (CASTLE->COUNCIL) OF (TORQUILSTONE->TORCLESTONE) +8461-281231-0002-1596: (AS HE->I SEE) LAY UPON HIS BED (RACKED->RAPPED) WITH PAIN AND (MENTAL->MANTLE) AGONY AND FILLED WITH THE FEAR OF RAPIDLY APPROACHING DEATH HE HEARD A VOICE ADDRESS HIM +8461-281231-0003-1597: WHAT ART THOU HE EXCLAIMED IN TERROR +8461-281231-0004-1598: LEAVE ME AND SEEK THE SAXON (WITCH ULRICA->WHICH OREKA) WHO WAS MY TEMPTRESS LET HER AS WELL AS I TASTE THE TORTURES WHICH ANTICIPATE HELL +8461-281231-0005-1599: EXCLAIMED THE NORMAN (HO->OH) +8461-281231-0006-1600: (REMEMBEREST->REMEMBER AS) THOU THE MAGAZINE OF FUEL THAT IS (STORED->STOLE) BENEATH THESE APARTMENTS WOMAN +8461-281231-0007-1601: THEY ARE FAST RISING AT LEAST SAID (ULRICA->A RIKA) AND A SIGNAL SHALL SOON WAVE (TO WARN->TOWARD) THE BESIEGERS TO PRESS HARD UPON THOSE WHO WOULD EXTINGUISH THEM +8461-281231-0008-1602: MEANWHILE THE BLACK KNIGHT HAD LED HIS FORCES AGAIN TO THE ATTACK AND SO VIGOROUS WAS THEIR ASSAULT THAT BEFORE LONG THE GATE OF THE CASTLE ALONE SEPARATED THEM FROM THOSE WITHIN +8461-281231-0009-1603: THE DEFENDERS (FINDING->FIND IN) THE CASTLE TO BE ON FIRE NOW DETERMINED TO SELL THEIR LIVES AS (DEARLY->DAILY) AS THEY COULD AND HEADED BY (DE BRACY->THE BRAZY) THEY THREW OPEN THE GATE AND WERE AT ONCE INVOLVED IN A TERRIFIC CONFLICT WITH THOSE OUTSIDE +8461-281231-0010-1604: THE BLACK (KNIGHT->NIGHT) WITH (PORTENTOUS->POTENTAL) STRENGTH (FORCED HIS WAY INWARD->FORCES AWAY IN WOOD) IN DESPITE OF (DE->THE) BRACY AND HIS FOLLOWERS +8461-281231-0011-1605: TWO OF THE FOREMOST INSTANTLY FELL AND THE REST GAVE WAY NOTWITHSTANDING ALL (THEIR LEADERS->THE LEADER'S) EFFORTS TO STOP THEM +8461-281231-0012-1606: THE BLACK (KNIGHT->NIGHT) WAS SOON ENGAGED IN DESPERATE COMBAT WITH THE NORMAN CHIEF AND (THE VAULTED->DEVOTED) ROOF OF THE HALL RUNG WITH (THEIR->A) FURIOUS BLOWS +8461-281231-0013-1607: AT LENGTH (DE BRACY->THE BRACEY) FELL +8461-281231-0014-1608: TELL ME THY NAME OR WORK THY PLEASURE ON ME +8461-281231-0015-1609: YET FIRST LET ME SAY SAID (DE BRACY->DEBRACY) WHAT (IT->DID) IMPORTS THEE TO KNOW +8461-281231-0016-1610: EXCLAIMED THE BLACK KNIGHT PRISONER AND (PERISH->PARISH) +8461-281231-0017-1611: THE LIFE OF EVERY MAN IN THE CASTLE (SHALL->SHE'LL) ANSWER IT IF A HAIR OF HIS HEAD BE SINGED SHOW ME HIS CHAMBER +8461-281231-0018-1612: RAISING THE WOUNDED MAN WITH (EASE->THESE) THE BLACK KNIGHT RUSHED WITH (HIM->THEM) TO THE (POSTERN->PASTING) GATE AND HAVING THERE DELIVERED HIS BURDEN TO THE CARE OF TWO (YEOMEN->YEOMAN) HE AGAIN ENTERED THE CASTLE TO ASSIST IN THE RESCUE OF (THE OTHER->THAT A) PRISONERS +8461-281231-0019-1613: BUT IN OTHER PARTS THE BESIEGERS PURSUED THE DEFENDERS OF THE CASTLE FROM CHAMBER TO CHAMBER AND SATIATED IN (THEIR->THE) BLOOD THE VENGEANCE WHICH HAD LONG ANIMATED THEM AGAINST THE SOLDIERS OF THE TYRANT FRONT DE BOEUF +8461-281231-0020-1614: AS THE FIRE (COMMENCED->COMMANDS) TO SPREAD RAPIDLY THROUGH ALL PARTS OF THE CASTLE (ULRICA->OR RICA) APPEARED ON ONE OF THE TURRETS +8461-281231-0021-1615: BEFORE LONG THE TOWERING FLAMES HAD SURMOUNTED EVERY OBSTRUCTION AND ROSE TO THE EVENING SKIES (ONE->WHEN) HUGE AND BURNING BEACON (SEEN->SEEMED) FAR AND WIDE THROUGH THE ADJACENT COUNTRY (TOWER->TOWERED) AFTER TOWER CRASHED DOWN WITH BLAZING ROOF AND RAFTER +8461-281231-0022-1616: AT LENGTH WITH A TERRIFIC CRASH THE WHOLE (TURRET->TORR) GAVE WAY AND SHE PERISHED IN (THE->*) FLAMES WHICH (HAD->I) CONSUMED HER TYRANT +8461-281231-0023-1617: WHEN THE OUTLAWS HAD DIVIDED THE SPOILS WHICH THEY HAD TAKEN FROM THE CASTLE OF (TORQUILSTONE->TORKILSTONE) CEDRIC PREPARED TO TAKE HIS DEPARTURE +8461-281231-0024-1618: HE LEFT THE GALLANT BAND OF FORESTERS SORROWING DEEPLY FOR HIS LOST FRIEND THE LORD OF (CONINGSBURGH->CONNINGSBURG) AND HE AND HIS FOLLOWERS HAD SCARCE DEPARTED WHEN A PROCESSION MOVED SLOWLY FROM UNDER THE GREENWOOD BRANCHES IN THE DIRECTION WHICH HE HAD TAKEN IN THE CENTRE OF WHICH WAS THE CAR IN WHICH THE BODY OF (ATHELSTANE->ADDSTEIN) WAS LAID +8461-281231-0025-1619: (DE BRACY->DEBRACY) BOWED LOW AND IN SILENCE THREW HIMSELF UPON A HORSE AND GALLOPED OFF THROUGH THE WOOD +8461-281231-0026-1620: HERE IS A BUGLE WHICH AN ENGLISH YEOMAN HAS ONCE WORN I PRAY YOU TO KEEP IT AS A MEMORIAL OF YOUR GALLANT BEARING +8461-281231-0027-1621: SO SAYING HE MOUNTED HIS STRONG WAR HORSE AND RODE OFF THROUGH THE FOREST +8461-281231-0028-1622: DURING ALL THIS TIME ISAAC OF YORK SAT MOURNFULLY APART GRIEVING FOR THE LOSS OF HIS (DEARLY->STEELY) LOVED DAUGHTER REBECCA +8461-281231-0029-1623: AND WITH THIS EPISTLE (THE UNHAPPY->THEN HAPPY) OLD MAN SET OUT TO PROCURE HIS DAUGHTER'S LIBERATION +8461-281231-0030-1624: THE TEMPLAR IS FLED SAID (DE->THE) BRACY IN ANSWER TO THE PRINCE'S EAGER QUESTIONS (FRONT DE BOEUF->FROM THE BIRTH) YOU WILL NEVER SEE MORE AND HE ADDED IN A LOW AND EMPHATIC TONE RICHARD IS (IN->AN) ENGLAND I HAVE SEEN HIM AND SPOKEN WITH HIM +8461-281231-0031-1625: HE APPEALED TO (DE BRACY->THE BRACELE) TO ASSIST HIM IN (THIS->HIS) PROJECT AND BECAME AT ONCE DEEPLY SUSPICIOUS OF THE (KNIGHT'S->NIGHT'S) LOYALTY TOWARDS HIM WHEN HE DECLINED TO LIFT HAND AGAINST THE MAN WHO HAD SPARED HIS OWN LIFE +8461-281231-0032-1626: BEFORE REACHING (HIS->ITS) DESTINATION HE WAS TOLD THAT LUCAS (DE BEAUMANOIR->THE BOURMANOIR) THE GRAND MASTER OF THE ORDER OF THE TEMPLARS WAS THEN ON VISIT TO (THE PRECEPTORY->THEIR PERCEPTORY) +8461-281231-0033-1627: HE HAD NOT UNTIL THEN BEEN INFORMED (OF->TO) THE PRESENCE OF THE JEWISH MAIDEN IN THE ABODE OF THE TEMPLARS AND GREAT WAS HIS FURY AND INDIGNATION (ON->OF) LEARNING THAT SHE WAS AMONGST THEM +8461-281231-0034-1628: POOR ISAAC WAS HURRIED OFF ACCORDINGLY AND EXPELLED FROM THE PRECEPTORY ALL HIS ENTREATIES AND EVEN HIS (OFFERS->OFFICE) UNHEARD AND DISREGARDED +8461-281231-0035-1629: THE ASSURANCE THAT SHE POSSESSED SOME FRIEND (IN THIS->AND HIS) AWFUL ASSEMBLY GAVE (HER->A) COURAGE TO LOOK (AROUND->ROUND) AND TO MARK INTO WHOSE PRESENCE SHE HAD BEEN CONDUCTED +8461-281231-0036-1630: SHE GAZED ACCORDINGLY UPON A SCENE WHICH MIGHT WELL HAVE STRUCK TERROR INTO A BOLDER HEART THAN HERS +8461-281231-0037-1631: AT HIS FEET WAS PLACED (A->THE) TABLE OCCUPIED BY TWO SCRIBES WHOSE DUTY (IT->*) WAS TO RECORD THE PROCEEDINGS OF THE DAY +8461-281231-0038-1632: THE PRECEPTORS OF WHOM (THERE->THEY) WERE FOUR PRESENT OCCUPIED SEATS BEHIND (THEIR->THE) SUPERIORS AND BEHIND THEM STOOD THE ESQUIRES OF THE ORDER (ROBED->ROPED) IN WHITE + +SUBSTITUTIONS: count ref -> hyp +45 THE -> A +35 AND -> IN +30 A -> THE +28 IN -> AND +13 AN -> AND +12 ANNIE -> ANY +10 THIS -> THE +10 THE -> TO +9 TO -> THE +9 THEIR -> THE +9 LESLIE -> LISLEY +9 DICKIE -> DICKY +8 THAT -> THE +7 THE -> THEY +7 HER -> A +7 DE -> THE +7 AND -> AN +6 THIS -> HIS +6 THEY -> THERE +6 THE -> THAT +6 OH -> O +6 MURDOCH -> MURDOCK +6 LARCH -> LARGE +6 I -> AND +6 ARCHY -> ARCHIE +5 UPON -> UP +5 TOO -> TO +5 THE -> THIS +5 SIGURD -> CIGARET +5 SHARRKAN -> SHARKAN +5 SET -> SAID +5 ORGANISER -> ORGANIZER +5 OR -> OF +5 OF -> A +5 HERMON -> HERMAN +5 HATH -> HAD +5 AROUND -> ROUND +4 YOU'RE -> YOU +4 WERE -> WHERE +4 WAS -> IS +4 THIS -> THESE +4 THESE -> THIS +4 THERE -> THEIR +4 THE -> THEIR +4 REGIN -> RIGAN +4 PRIORESS -> PRIORS +4 OL -> OLD +4 O -> OF +4 N'T -> NOT +4 MISSUS -> MISS +4 MAN -> MEN +4 KNOW -> NO +4 KNIGHT -> NIGHT +4 KINE -> KIND +4 IZZY -> IS +4 IT -> HE +4 IT -> A +4 IS -> AS +4 INTERESTS -> INTEREST +4 IM -> HIM +4 HAS -> HAD +4 HAD -> AND +4 DEFENSE -> DEFENCE +4 ANYONE -> ANY +4 AN -> IN +4 A -> I +4 A -> AND +3 ZAU -> ZAO +3 WOULD -> WILL +3 WITH -> WAS +3 WILFRID -> WILFRED +3 WHEN -> WITH +3 WHEN -> AND +3 WHAT -> BUT +3 TRY -> TRIED +3 THERE -> THEY +3 THERE -> THERE'S +3 THERE -> THEN +3 THAT -> AT +3 SET -> SAT +3 SANCT -> SAINT +3 REVEREND -> REVERE +3 OUR -> HER +3 OR -> O +3 ON -> IN +3 OL -> ALL +3 OF -> TO +3 O -> ARE +3 MISTAH -> MISTER +3 LIL -> LITTLE +3 LEVER -> LOVER +3 JES -> JUST +3 JACKAL -> JACK +3 ITS -> HIS +3 IN -> AN +3 I -> I'VE +3 I -> I'M +3 HIM -> EM +3 HEAR -> HERE +3 HE'S -> IS +3 HE -> YOU +3 HE -> HE'D +3 HAS -> HATH +3 FROM -> FOR +3 FAUVENT -> PREVENT +3 DO -> TO +3 BRAHMAN -> BRAMIN +3 BEFEL -> BEFELL +3 BAGHDAD -> BAGDAD +3 AT -> THAT +3 AT -> A +3 AS -> IS +3 ARSINOE -> ARSENO +3 ARE -> OUR +3 AND -> ON +3 AIN'T -> AND +3 A -> TO +3 A -> HER +3 A -> AT +2 YOUR -> YOU'RE +2 YOUR -> THE +2 YOU'VE -> YOU +2 YOU'LL -> YOU +2 YOU -> YE +2 YOU -> HE +2 YO -> YOU +2 YER -> YOU +2 WOULD -> HAD +2 WITH -> TO +2 WINTER -> WINDOW +2 WINE -> WHY +2 WILL -> WOULD +2 WILL -> WE +2 WILL -> WAS +2 WILDERNESS -> WIDERNESS +2 WHO -> ONE +2 WHITE -> WIDE +2 WHERE -> WITH +2 WERE -> WITH +2 WERE -> WAS +2 WENT -> WHEN +2 WE'RE -> WE +2 WAYNE -> WAIN +2 USE -> USED +2 UP -> UPSTAIRS +2 UNDER -> AND +2 TWO -> TOO +2 TONIGHT -> TO +2 TO -> OF +2 TO -> INTO +2 TO -> A +2 THROUGH -> TO +2 THOUSANDS -> THOUSAND +2 THOUGH -> THAT +2 THIS -> THAT +2 THIS -> GOT +2 THEY'RE -> THEY +2 THEY'RE -> THEIR +2 THEY -> THEIR +2 THEY -> THE +2 THESE -> HIS +2 THEN -> THAT +2 THEN -> AND +2 THEE -> THE +2 THE -> THEM +2 THE -> NO +2 THAT'S -> THAT +2 THAT -> THY +2 THAN -> THEN +2 THAN -> AND +2 TAMAR -> TO +2 SYDNEY -> SIDNEY +2 STONEWALL -> STONE +2 STOKER -> STOCKER +2 SOMEONE -> SOME +2 SINDBAD -> SINBAD +2 SIGURD -> CIGAR +2 SHOULDERS -> SHOULDER +2 SHELL -> SHELLFISH +2 SHE -> YOU +2 SHE -> SHE'LL +2 SHAWS -> SHORES +2 SHALL -> SHOULD +2 SERGEY -> SO +2 SELLER -> CELLAR +2 SEEK -> SEE +2 SAY -> SEE +2 RUSSIA -> RATIA +2 ROOMFELLOW -> ROOM +2 ROMANCE -> ROMANS +2 ROBBERIES -> ROBBERS +2 RAYSTOKE -> RAYSTROKE +2 PROCLUS -> PROCLIS +2 PRIORESS -> PIRRUS +2 POLL -> POLE +2 PLAIN -> PLANE +2 PLACE -> PLACED +2 PIGEONCOTE -> PIGEON +2 PHOSPHOR -> PHOSPHO +2 PARTS -> PART +2 ONE -> WHEN +2 ON -> AND +2 OLIVE -> I +2 OLD -> ALL +2 OF -> WHAT +2 OF -> O +2 OF -> HAVE +2 OF -> AT +2 OF -> AS +2 O -> WHO +2 O -> OH +2 NUTRITION -> UTRITION +2 NOUGHT -> NOT +2 NOT -> NOW +2 NORTHFIELD -> NORTH +2 NOR -> NO +2 NEAREST -> NURSE +2 MYRTILUS -> MYRTULAS +2 MUST -> WAS +2 MURDOCH -> MARDOCK +2 MISSUS -> MISTER +2 MINE -> MY +2 MINE -> MIGHT +2 MILICENT -> MILLISON +2 MESTIENNE -> MESS +2 MESTER -> MISTER +2 MENAHEM -> MANY +2 MEN -> MAN +2 MEAT -> ME +2 MADELEINE -> MADELEIN +2 M -> AM +2 LUNA -> LENA +2 LIKED -> LIKE +2 LIDDY -> LIVY +2 LET -> LED +2 LESLIE -> LIZZIE +2 LAST -> LOST +2 KEYS -> CASE +2 KEEP -> HE +2 KEEP -> GIVE +2 KANSAS -> KANSA +2 IZZY -> IZZIE +2 IT'S -> IS +2 IT -> THIS +2 IT -> ITS +2 IT -> HAD +2 IS -> IT'S +2 IS -> HIS +2 IS -> HAS +2 INTO -> TO +2 INTO -> IN +2 INSCRIPTIONS -> SCRIPTIONS +2 IN'T -> IN +2 IN -> ON +2 IF -> OF +2 I'VE -> I +2 I'M -> I +2 I'LL -> I +2 I'D -> I'VE +2 I'D -> I +2 I -> AS +2 HOZE -> HOSE +2 HO -> OH +2 HIS -> THE +2 HIS -> ITS +2 HIS -> IS +2 HIM -> THEM +2 HERE -> HEAR +2 HER -> THE +2 HE -> IT +2 HE -> HIS +2 HAYS -> HAYES +2 HAVE -> HATH +2 HAVE -> HAD +2 HAS -> IS +2 HAS -> AS +2 HANDS -> HAND +2 HAID -> HEAD +2 GURR -> GURG +2 GURR -> GIRL +2 GRAEME -> GRAHAM +2 GOING -> GOIN +2 GOD -> GOT +2 GOD -> GONE +2 GIVING -> GIVEN +2 FRANZ -> FRANCE +2 FORTY -> FORTE +2 FOLLOWED -> FOLLOW +2 FOALS -> FOLDS +2 FLEROV'S -> FLIROV'S +2 FESTAL -> FEAST +2 FAVOR -> FAVOUR +2 FAUVENT -> FOR +2 FAUCHELEVENT -> FOR +2 EXECUTIVE -> EXECUTED +2 ETERNAL -> HAD +2 ENOUGH -> UP +2 E'S -> HE +2 DONE -> TURNED +2 DOG -> DOOR +2 DO -> TWO +2 DINKS -> THINK +2 DIDN'T -> THEN +2 DESSERTS -> DESERTS +2 DE -> DEBRACY +2 COUNTRY -> COUNTRIES +2 COUNSEL -> COUNCIL +2 CORNER -> CORN +2 COMMONWEALTH -> CORNWEALTH +2 CENTER -> CENTRE +2 CARROLL -> CAROL +2 BUT -> WITH +2 BUT -> THAT +2 BULK -> BARK +2 BROTHERS -> BROTHER'S +2 BOUT -> ABOUT +2 BESSY -> BUSY +2 BEG -> THEY +2 BEFORE -> FOR +2 BEEN -> THEN +2 AWK -> AWKWARD +2 AWHILE -> A +2 AT -> TO +2 AT -> IT +2 AT -> IN +2 AT -> AUNT +2 ASKED -> AS +2 ARE -> OR +2 ARE -> A +2 ANY -> AND +2 AND -> THEN +2 AND -> A +2 ALL -> OR +2 ALL -> ON +2 AL -> A +2 A -> OUR +2 A -> IT +2 A -> HAVE +2 A -> ARE +2 A -> AN +1 ZEMSTVOS -> THEM +1 ZAYNAB -> THINE +1 ZAU -> ZOAMA +1 ZAU -> ZA +1 ZAU -> THOUA +1 ZAU -> THOU +1 ZAU -> OWL +1 ZARATHUSTRA -> THEY +1 ZARATHUSTRA -> THEREUSTRA +1 ZARATHUSTRA -> THE +1 ZARATHUSTRA -> OR +1 ZARATHUSTRA -> DECK +1 ZARATHUSTRA -> ARTISTRA +1 YUSS -> YES +1 YOURSELF -> YOUR +1 YOUR -> YOU +1 YOUR -> THEIR +1 YOUR -> OUR +1 YOUR -> OPEN +1 YOUR -> HE +1 YOUNG -> OWN +1 YOU'VE -> YOUR +1 YOU'D -> YOU +1 YOU -> YOURSELVES +1 YOU -> YOURS +1 YOU -> YOUR +1 YOU -> YOU'RE +1 YOU -> YOU'LL +1 YOU -> USE +1 YOU -> OBLIGED +1 YOU -> JULIA +1 YOU -> IT +1 YOU -> EVEN +1 YOU -> EUPS +1 YOU -> EUGEUM +1 YO'LL -> YOU'LL +1 YO'LL -> YOU +1 YO' -> YOU +1 YO -> YOU'LL +1 YET -> IN +1 YET -> HE +1 YET -> GET +1 YEP -> HEIP +1 YEOMEN -> YEOMAN +1 YELLS -> YEARS +1 YEARS -> EARS +1 YEARNS -> URNS +1 YEAR -> YOUR +1 YE -> YOU +1 YE -> YEAR +1 YAUSKY -> YSKEEPER +1 YARD -> AND +1 YAHWEH -> YANAWAY +1 WYLDER'S -> WILDER'S +1 WUNNERED -> WANTED +1 WROTE -> ONES +1 WRITE -> RIGHT +1 WRIT -> WRITE +1 WRETCH -> THATCH +1 WOUNDS -> WINDS +1 WOULD -> WERE +1 WOULD -> DID +1 WOT -> WHAT +1 WORTH -> WORSE +1 WORSHIPPERS -> WORSE +1 WORSHIP'S -> WORSHIP +1 WORRY -> WERE +1 WORLD -> WOOLWRIGHT +1 WORKS -> WORK +1 WORKMAN -> WORKMEN +1 WORKINGMEN -> WORKING +1 WORKED -> WORK +1 WORK -> WORKADAY +1 WORD -> WORLD +1 WORD -> WORDS +1 WOODSON -> WOODS +1 WOODEN -> WOOD +1 WONT -> WANT +1 WONDERED -> WANTED +1 WONDERED -> WANDERED +1 WONDERED -> I +1 WONDER -> WANDER +1 WOMEN -> WOMAN +1 WOMAN -> WOMEN +1 WOKE -> WALKING +1 WODE'S -> WORDS +1 WITHAL -> WITH +1 WITH -> WOULD +1 WITH -> WIT +1 WITH -> WIS +1 WITH -> WIDTH +1 WITH -> WHETHER +1 WITH -> WHEN +1 WITH -> WE +1 WITH -> ITS +1 WITH -> FOR +1 WITCH -> WHICH +1 WISHT -> WISHED +1 WISCONSIN -> MISS +1 WIRES -> WISE +1 WINTERS -> WINTERSPIN +1 WINKED -> WAITED +1 WING -> WINGED +1 WINE -> WHITE +1 WINDOW -> WIND +1 WILLY -> BILLY +1 WILL -> WITHOUT +1 WILL -> WILT +1 WILL -> WHO +1 WILL -> WHERE +1 WILL -> WHEN +1 WILL -> WHEEL +1 WILL -> WALLA +1 WILL -> TO +1 WILL -> OLY +1 WILL -> OH +1 WILKSES -> WILTZES +1 WILKS -> WILKES +1 WILFRID -> WILL +1 WILFRID -> WILFRIED +1 WILDLY -> WIDELY +1 WILDEST -> WALLACE +1 WILDERNESS -> WIDENANCE +1 WILDERNESS -> WEARINESS +1 WILDERNESS -> LITERN +1 WILD -> WIND +1 WILD -> WHITE +1 WIFE -> WIF +1 WIFE -> MY +1 WIDEAWAKE -> WIDE +1 WICKER'S -> JOKERS +1 WICKER -> WICKER'S +1 WI -> WITH +1 WHOM -> WHY +1 WHO'D -> WHO +1 WHO -> WITH +1 WHO -> WHOSE +1 WHO -> THE +1 WHO -> HER +1 WHO -> AND +1 WHITHER -> WHETHER +1 WHITE -> WHY +1 WHISKERED -> WAS +1 WHILOME -> WILL +1 WHILE -> WIDE +1 WHILE -> WHERE +1 WHICH -> WOULD +1 WHICH -> WITCH +1 WHICH -> SPREAD +1 WHICH -> PITCHES +1 WHETHER -> WHITHER +1 WHETHER -> WEATHER +1 WHEREABOUTS -> WHEREABOUT +1 WHERE -> WILL +1 WHERE -> WEAR +1 WHERE -> THERE +1 WHERE -> HER +1 WHER -> WERE +1 WHEN -> ONE +1 WHEELER -> WHALER +1 WHATE'ER -> WHATEVER +1 WHAT'S -> ONCE +1 WHAT -> WOULD +1 WHAT -> WITH +1 WHAT -> WHICH +1 WHAT -> WHEN +1 WHAT -> OGA +1 WHAT -> IT +1 WHAT -> FOR +1 WHAT -> AT +1 WHAT -> A +1 WHACKS -> WAX +1 WESTPORT -> WESTBURT +1 WERE -> YOU +1 WERE -> WHEN +1 WERE -> WENT +1 WERE -> WED +1 WERE -> RAN +1 WERE -> FELL +1 WENT -> APT +1 WELSH -> WELL +1 WELLS -> WELL +1 WELLS -> WALES +1 WELL -> WHY +1 WELL -> WHILE +1 WELL -> AWAY +1 WEEVILLY -> WEEBLY +1 WEEKLY -> WEAKLY +1 WEEDS -> REEDS +1 WEBB'S -> WHIPS +1 WEAK -> WEEK +1 WE'VE -> WE +1 WE'RE -> WERE +1 WE'RE -> HE +1 WE -> WILL +1 WE -> WAS +1 WE -> REPROVE +1 WE -> REED +1 WE -> REALLY +1 WE -> IT +1 WE -> A +1 WAYS -> WAGE +1 WAYNE -> WANE +1 WAY -> IN +1 WAVERLY -> WAVERLEY +1 WATONWAN -> WATERWIN +1 WATER -> UNDER +1 WATCHMAKER'S -> WATCHMAKERS +1 WATCHED -> WATCH +1 WASN'T -> WAS +1 WAS -> WISE +1 WAS -> WILL +1 WAS -> WHICH +1 WAS -> WERE +1 WARS -> WALLS +1 WARDERS -> OURS +1 WARD'S -> WORDS +1 WARD -> WARREN +1 WANTS -> ONCE +1 WANTON -> WARRENTON +1 WANTED -> WATER +1 WANTED -> WANTON +1 WANT -> WARNED +1 WANT -> WANTS +1 WANDERERS -> WONDERS +1 WANDERER -> WONDER +1 WALLS -> WARDS +1 WALL -> WAR +1 WAKE -> AWAKE +1 WAITING -> WINNING +1 WAITIN -> WAITING +1 WAIT -> WHERE +1 WAGGOT -> WAGGET +1 WAGGING -> WORKING +1 WADED -> WAITED +1 WADDED -> WATERED +1 VOUGHT -> VAULT +1 VON -> FOR +1 VOMITING -> RHOMETTING +1 VOICED -> FOREST +1 VIOLENCE -> ONLENETS +1 VIOLENCE -> FIDANTS +1 VINTAGE -> VENTAGE +1 VILLAGES -> RELIGIOUS +1 VILLAGERS -> VILLAGES +1 VIL -> VILLE +1 VIGILANCE -> VISIONS +1 VIGILANCE -> VICHILLENZ +1 VESTRY -> VETCHERY +1 VERSES -> VERSE +1 VERILY -> VERY +1 VENTRILOQUIST -> VENTILOQUE +1 VEIL -> VEAL +1 VEHEMENTLY -> TO +1 VAVASOUR -> VAVASOR +1 VAULT -> VOLT +1 VAUGIRARD -> VIGOR +1 VAST -> VATS +1 VASSILIEVITCH -> ON +1 VALET -> VALLED +1 UTTER -> OTHER +1 USEFUL -> YEARS +1 US -> UP +1 US -> STARED +1 US -> ITS +1 US -> FOR +1 US -> DISOUT +1 URARTU -> YOU +1 URARTU -> HERE +1 URARTIAN -> GERGIAN +1 UP -> UPON +1 UP -> THAT +1 UP -> NAP +1 UNTO -> TO +1 UNTO -> ON +1 UNTO -> IN +1 UNTO -> AND +1 UNTIL -> INTO +1 UNREWARDED -> AND +1 UNLESS -> AND +1 UNHAPPY -> HAPPY +1 UNEXPECTED -> INEXPECTED +1 UNDERTAKER'S -> UNDERTAKERS +1 UNDER -> ON +1 UNCLE -> AND +1 UNADULTERATED -> ADULTERATED +1 UN -> ONE +1 ULTIMATELY -> ULTIMATE +1 ULRICA -> OREKA +1 ULRICA -> OR +1 ULRICA -> A +1 UKINZER -> A +1 UDDER -> UTTER +1 TYRANNY -> SOON +1 TWYMAN'S -> TWEMINS +1 TWO -> TO +1 TWO -> DO +1 TWAS -> TOWARDS +1 TUTOR -> TUDOR +1 TURRET -> TORR +1 TURNS -> TURNED +1 TURNING -> SHIRTING +1 TUBERCULOUS -> TIBERICAN'S +1 TRY -> TROUT +1 TRY -> TRIFLE +1 TRY -> TRIES +1 TRUSTEE -> TRUSTY +1 TRUNDLED -> TUMBLED +1 TRULY -> JULIE +1 TROOPS -> TRUCE +1 TRIVET -> TRIBUT +1 TRIPES -> TRIPE'S +1 TRIFLE -> TRAVEL +1 TRIBES -> TRIUMPHS +1 TRIBES -> TRINES +1 TRIBE -> TRIUMPH +1 TRIBE -> TIME +1 TRELAWNEY -> TREE +1 TREE -> HERE +1 TREASURE -> TREASURES +1 TRAVELED -> TRAVELLED +1 TRANSSHIP -> TRANSHIP +1 TRANSLATED -> TRANSGRATED +1 TRAINING -> ORTRAINING +1 TRAINDAWG -> TRAIN +1 TRAFFIC -> LIFE +1 TRADITIONS -> JUDICINT +1 TOWER -> TOWERED +1 TOWARDS -> TOWARD +1 TOWARDS -> TO +1 TOWARDS -> DOOR +1 TOWARD -> TOWARDS +1 TOWARD -> TO +1 TOUR -> TO +1 TOUGHS -> TUFTS +1 TOUGH -> TO +1 TOTING -> TOATING +1 TOSSING -> TAUSEN +1 TORQUILSTONE -> TORKILSTONE +1 TORQUILSTONE -> TORCLESTONE +1 TORN -> TAUGHT +1 TORMENT -> AND +1 TORCH -> TORTURE +1 TOP -> TAR +1 TOP -> STOPPED +1 TOOK -> TO +1 TOO -> TWO +1 TOO -> DO +1 TOO -> CHIMNETS +1 TONGUE -> DONG +1 TOMORROW -> TO +1 TOMATO -> METAL +1 TOM -> TUMULT +1 TOLERBLE -> TOLERABLE +1 TOLD -> TOO +1 TOLD -> STOOTE +1 TOILET -> TOLD +1 TOILET -> DOLIGHT +1 TO -> WHO +1 TO -> TWO +1 TO -> TRITESYMOSA'S +1 TO -> TOWER +1 TO -> TOWARD +1 TO -> TOO +1 TO -> TOIL +1 TO -> TOGETHER +1 TO -> THAT +1 TO -> SURRENDER +1 TO -> ROOM +1 TO -> REFORMED +1 TO -> OR +1 TO -> MADE +1 TO -> INSTRUCTIVE +1 TO -> HURT +1 TO -> FOR +1 TO -> FIRST +1 TO -> DIRECTIFY +1 TO -> DID +1 TO -> DEER +1 TO -> AS +1 TITLE -> TANOV +1 TIRING -> ENTIRE +1 TIRESOME -> PARASSOME +1 TIRED -> TIE +1 TIRED -> HIRED +1 TINCTURED -> TINTED +1 TIME'S -> TIMES +1 TIME -> TIE +1 TIMBER -> TIMBOO +1 TILLERS -> TELLERS +1 TILL -> TO +1 TIGLATH -> TIGLASS +1 TIGLATH -> TIGG +1 TIGLATH -> TIG +1 TIGLATH -> TAKE +1 TIGER -> TIRE +1 TIGER -> STAGER +1 TIDINGS -> HIDINGS +1 TIBER -> TYBER +1 THY -> THINE +1 THY -> THEIR +1 THY -> DIVORITES +1 THY -> DAGGOOD +1 THUS -> THIS +1 THUS -> DOES +1 THUMB -> TENTH'S +1 THROWING -> ROWING +1 THROBBED -> THROPPED +1 THREE -> THIRD'S +1 THREE -> FOR +1 THOUSANDTH -> THOUSAND +1 THOUGHTS -> TORCH +1 THOUGH -> THOSE +1 THOUGH -> THE +1 THOU -> THOUGH +1 THOU -> DONE +1 THOSE -> THUS +1 THOSE -> LUCIKAM +1 THITHER -> THAT +1 THIS -> US +1 THIS -> TO +1 THIS -> THERE'S +1 THIS -> MISSUS +1 THIS -> ITS +1 THIRST -> THOSE +1 THINK -> THING +1 THINK -> THEY +1 THINGS -> THANKS +1 THIN -> FLITTON +1 THEY'RE -> HER +1 THEY'RE -> ARE +1 THEY -> THIS +1 THEY -> THEY'VE +1 THEY -> THAT +1 THEY -> ISSUED +1 THEY -> HE +1 THEY -> DO +1 THEY -> DESLEY +1 THEY -> DECLINED +1 THESE -> THE +1 THERE'S -> THERE +1 THERE'LL -> THERE +1 THERE -> WRITHS +1 THERE -> THAT +1 THERE -> DICK +1 THERE -> DEER +1 THEN -> WITHIN +1 THEN -> WHEN +1 THEN -> THEY +1 THEN -> THAN +1 THEN -> TEN +1 THEN -> IN +1 THEN -> DID +1 THEM -> HIM +1 THEM -> EM +1 THEIR -> THEY +1 THEIR -> HER +1 THEIR -> DEAR +1 THEIR -> A +1 THEE -> ME +1 THEATRE -> FUTURE +1 THE -> WE +1 THE -> VIRTUARY +1 THE -> THOUGH +1 THE -> THIRD +1 THE -> THESE +1 THE -> THEREFORE +1 THE -> THEN +1 THE -> REMAINED +1 THE -> PATH +1 THE -> OUR +1 THE -> OTHERS +1 THE -> OTHER +1 THE -> LOVE +1 THE -> LOGS +1 THE -> LIDA +1 THE -> KIND +1 THE -> ITALY +1 THE -> IT +1 THE -> INCES +1 THE -> HER +1 THE -> HALF +1 THE -> EVEN +1 THE -> EIGHTH +1 THE -> EARTH +1 THE -> DURING +1 THE -> DISAPPEAR +1 THE -> DID +1 THE -> DEVOTED +1 THE -> DECLINE +1 THE -> DEBRAMIN +1 THE -> DE +1 THE -> COAT +1 THE -> BEHOLDAY +1 THE -> ASHORE +1 THE -> AND +1 THE -> AN +1 THE -> AGAIN +1 THE -> ABIDING +1 THAT'S -> OF +1 THAT'S -> IT'S +1 THAT'S -> I +1 THAT'LL -> THAT +1 THAT -> WITH +1 THAT -> US +1 THAT -> TILL +1 THAT -> THEY +1 THAT -> THERE +1 THAT -> THEN +1 THAT -> THEIR +1 THAT -> SNAT +1 THAT -> SET +1 THAT -> NEITHER +1 THAT -> LET +1 THAT -> HIM +1 THAT -> DID +1 THAT -> AND +1 THAT -> ABOUT +1 THAT -> A +1 THAN -> GOD +1 TESTIMONY -> DETACHEMONY +1 TERRIBLE -> SEVERAL +1 TEND -> INTERESTING +1 TEMPTETH -> TEMPT +1 TEMPLES -> TEMPLE +1 TEMPLES -> SIMPLES +1 TELLTALE -> TELL +1 TELL -> SO +1 TEETH -> CHEEKS +1 TECHNICAL -> TECHNICHAL +1 TEA -> TINEL +1 TAYLOR -> TAILOR +1 TATTLERS -> TEDLERS +1 TASKMASTER -> TAX +1 TARDY -> TIDY +1 TAPPED -> TAP +1 TAPIS -> TAPPY +1 TANQUAM -> TANK +1 TALMASH -> THOMAS +1 TALENTS -> TALENT +1 TALENTS -> OTALONS +1 TAHITI -> TEDI +1 T'OTHER -> THE +1 SYRUP -> SURF +1 SYRUP -> SERF +1 SYRUP -> CYRUP +1 SYNONYMON -> SINNING +1 SYLLOGISM -> SILLIGIOUS +1 SWORD -> WOOD +1 SWELP -> SWAP +1 SWELL -> SWELLIN +1 SWEET -> SWEEP +1 SWAYING -> SWAIN +1 SWAY -> WEIGH +1 SWALLOWED -> SWALLOW +1 SWAG -> WAG +1 SURPRISED -> SURPRISE +1 SUPPOSE -> S'POSE +1 SUPERIOR -> RAYS +1 SUMTHIN -> SOMETHING +1 SUMMIT -> SOMEWHAT +1 SULPHURIC -> SUFFER +1 SUFFOLK -> SUFFOLD +1 SUFFICIENT -> SUSPICION +1 SUFFICIENT -> SUSPICIENT +1 SUFFICES -> SURFACES +1 SUDDENLY -> CERTAINLY +1 SUCKED -> SACKED +1 SUBSTANCE -> ABSTANCE +1 SUB -> SUBTERRAB +1 STRUCK -> DRAP +1 STRODE -> STROLLED +1 STRIFE -> STRIPE +1 STREAK -> STREET +1 STRAYING -> STRAIN +1 STRANGEST -> STRANGER'S +1 STRAITENED -> STRAIGHT +1 STRAINS -> TRAINS +1 STORES -> STORIES +1 STORES -> STALLS +1 STORED -> STOLE +1 STOOD -> TOO +1 STONEWALL -> STERN +1 STONED -> STONE +1 STILL -> STEALING +1 STILL -> SO +1 STEWPAN -> STEWPENT +1 STEW -> DO +1 STERN -> STERNMOST +1 STEPPED -> STEPS +1 STEPAN -> STEP +1 STEP -> SABATANI +1 STENOGRAPHIC -> SYNOGRAPHIC +1 STEEVER -> STEPLE +1 STEERAGE -> STEERING +1 STEAL -> STEED +1 STAYING -> SEEING +1 STAYED -> STEESEY +1 STAYED -> STATING +1 STATUS -> STRATUS +1 STATURE -> STATUE +1 STATES -> ESTATES +1 STATED -> SUITED +1 STATED -> OF +1 STATE -> STATES +1 STATE -> STATEROOM +1 STAS -> STATS +1 STARVING -> STARLING +1 START -> STARTED +1 STARES -> TEARS +1 STARED -> STEERED +1 STANDSTILL -> FANSTILL +1 STANDS -> SENDS +1 STANDARD -> STANDARDS +1 STAGE -> AGE +1 SQUEAMISH -> SCREAMISH +1 SQUARE -> IS +1 SPONSUS -> QUON +1 SPONGE -> SPINES +1 SPONDYLES -> SPONGEALS +1 SPIRIT -> EXPERIMENT +1 SPILLING -> SPINNING +1 SPICE -> SPIES +1 SPARSELY -> FIRSTLY +1 SPAKE -> SPEAK +1 SPAKE -> PIKE +1 SPAKE -> BIG +1 SPADDLE -> SPATTLE +1 SOUTHERN -> SUDDEN +1 SOUSE -> SOUS +1 SOURCE -> SORT +1 SOUL -> SO +1 SOUGHT -> THOUGHT +1 SOUGHT -> SET +1 SOTELES -> SORTILESS +1 SORDID -> SARDID +1 SON -> SUNG +1 SON -> SUN +1 SON -> SUDDEN +1 SON -> SOON +1 SOME -> I'M +1 SOJOURN -> SAJOURN +1 SOCIALIST -> SOCIALLY +1 SO -> TO +1 SO -> SOUL +1 SO -> SORE +1 SO -> SORCAS +1 SO -> SOPHIA +1 SO -> SOMINUTELY +1 SO -> SIR +1 SO -> SELL +1 SO -> MISS +1 SO -> HER +1 SO -> FERNED +1 SNOOZING -> NEWSING +1 SNETKOV -> SNACKOV +1 SNETKOV -> PLACE +1 SNATCHER -> NATURE +1 SNARLED -> SNARLS +1 SMUGGLERS -> SMUGGERS +1 SMUGGLERS -> SMOKE +1 SMUGGLERS -> MOTHERS +1 SMOLNY -> MOLLY +1 SMOKER -> MOKER +1 SMIRCHED -> SMARCHED +1 SMILE -> MINE +1 SMELL -> SMILE +1 SLUNK -> SUNK +1 SLIGHTLY -> SLIGHTLY'LL +1 SLIGHTLY -> SAT +1 SLICES -> SIZES +1 SLEEPING -> KEEPING +1 SLAPPED -> HE +1 SLACKENED -> CLACKENED +1 SLAB -> FLAP +1 SKYLARKS -> SKYLACKS +1 SKIRT -> GOOD +1 SKIN -> SKINNED +1 SKIN -> KIN +1 SKEW -> SKEERO +1 SKEPTICAL -> SCEPTICAL +1 SIZE -> SIGHS +1 SIXTH -> SIX +1 SIXES -> SAXES +1 SIT -> SET +1 SIRE -> SIRS +1 SIR -> SERVANTS +1 SIR -> BECAUSE +1 SINUHIT -> SOON +1 SINNED -> SENT +1 SINGS -> SANGS +1 SINGA -> SHANGHAT +1 SILLY -> SIDI +1 SILENCED -> SILENCE +1 SIGURD -> SIR +1 SIGNOR -> SENOR +1 SIGNED -> SIGN +1 SIGHING -> SIGNED +1 SIEVE -> SEA +1 SIDE -> OUT +1 SICUT -> CICEROSINAQUA +1 SHUTTERS -> SHOUTERS +1 SHUT -> SHET +1 SHUT -> AT +1 SHUMAN -> SANCHUMAIN +1 SHUFFLE -> SHOVEL +1 SHUCKS -> SHOCKS +1 SHRUBS -> SHRUGS +1 SHRUBBERY -> AND +1 SHOWS -> SHARES +1 SHOULDST -> SHOULDEST +1 SHOULD -> YOU +1 SHOULD -> SHALL +1 SHOT -> HAD +1 SHOOTER -> SHEETTER +1 SHO'LY -> SURELY +1 SHIPS -> THE +1 SHIPS -> SHIP +1 SHIP -> VOYS +1 SHET -> SHUT +1 SHERIFF -> HYR +1 SHERE -> SHEAR +1 SHERBURN'S -> SHERBOURNE'S +1 SHERBURN -> SHERBIN +1 SHEPHERD -> SHEPARD +1 SHEETS -> SEATS +1 SHEET -> SEED +1 SHED -> SHUT +1 SHED -> SHARED +1 SHE'LL -> YOU'LL +1 SHE -> SEA +1 SHATTERED -> SHUTTERED +1 SHARDURIS -> YOURIS +1 SHARDURIS -> SHALL +1 SHARDURIS -> CHARS +1 SHALLUM -> CHARLEM +1 SHALL -> SHOW +1 SHALL -> SHE'LL +1 SHALL -> I +1 SHAKEDOWN -> SHAKE +1 SHAG -> SHAGG +1 SEYTON -> SETTON +1 SEYTON -> SEATING +1 SEVERE -> SAVIOUR +1 SEVERAL -> SHEVARIN +1 SEVENTIETH -> SEVENTEENTH +1 SEVEN -> THE +1 SERMON -> SIMON +1 SERGEY -> SURGY +1 SERGEY -> SOJI +1 SENTENCED -> INTENSE +1 SENSITIVE -> SCENTED +1 SENSE -> DESCENTS +1 SENOR -> SIGNOR +1 SEEST -> CEASE +1 SEEN -> SEEMED +1 SEEN -> SEE +1 SEEMS -> SEEMED +1 SEEMED -> SEEMS +1 SEEMED -> SEEMING +1 SEEMED -> SEEM +1 SEEM -> SIMPLE +1 SEEM -> SEEMED +1 SEEM -> SEE +1 SEE -> SEEM +1 SEE -> EVEN +1 SEE -> C +1 SEDUCETH -> SEDUCE +1 SECURE -> SECURED +1 SEARCHING -> SURGING +1 SEAMEN -> SEE +1 SEAMAN -> SEAMEN +1 SCULPTORS -> SCULPTOR'S +1 SCULPTOR'S -> SCULPTURES +1 SCRUTINISED -> ARE +1 SCRIBES -> ITS +1 SCREW -> CREW +1 SCRAPPIN -> SCRAP +1 SCORN -> GONE +1 SCO'TCH -> SCORCHED +1 SCHULBERG'S -> SHELBERG'S +1 SCHOOLDAYS -> SCHOOL +1 SCHOOL -> SCHOOLGIRLS +1 SCHOOL -> SCHOOLBOY +1 SCAPEGRACES -> SKIPPED +1 SCAPED -> ESCAPED +1 SCAPE -> ESCAPE +1 SAYS -> TASTE +1 SAYS -> SAY +1 SAYS -> SAKES +1 SAYS -> AS +1 SAYIN -> SAYING +1 SAY -> SO +1 SAY -> SAVE +1 SAW -> SOUGHT +1 SAW -> SORROW +1 SAW -> SOLD +1 SATURDAY -> SATAN +1 SATURATED -> SITUATED +1 SAT -> SET +1 SARAH -> SEREN +1 SANS -> SONSPIER +1 SANITARY -> SENATORY +1 SANG -> YET +1 SAND -> SEND +1 SANCTESS -> SANCTUS +1 SAN -> SENT +1 SAN -> FRITZO +1 SALTS -> SOULS +1 SALONE -> SALON +1 SALLOWER -> SALARY +1 SAINT -> SAY +1 SAILS -> SAILORS +1 SAIL -> SAILORS +1 SAID -> TO +1 SAID -> SET +1 SAID -> SAYS +1 SAID -> PSALMS +1 SAID -> OF +1 SAID -> CITIZELY +1 SAH -> I +1 SAGITTAIRE -> SAGOTARA +1 SAFE -> SAVED +1 S -> SALEN +1 RYO -> RYEO +1 RUSSIA -> RACHEL +1 RUSHED -> RUSH +1 RUNS -> TRANSCENDI +1 RUNG -> RUN +1 RUN -> RUM +1 RUN -> RAN +1 RUMP -> RUM +1 RUM -> ROMAN +1 RULER -> SPONNET +1 RUBENSES -> RUBEN +1 ROWED -> RIDE +1 ROUTE -> ROW +1 ROUSED -> RALPHED +1 ROUND -> AROUND +1 ROTHS -> ROSS +1 ROSAMUN -> ROSAMOND +1 ROPE'S -> ROPES +1 ROPE'S -> AND +1 ROOTS -> OR +1 ROOM -> ROME +1 RONALD -> RANALD +1 ROLL -> ROCKLE +1 ROCK -> STRUCK +1 ROBED -> ROPED +1 ROARING -> ROWING +1 ROAD -> RULED +1 ROAD -> RULE +1 RISDON -> RICHARD +1 RISDON -> AND +1 RINGMASTER -> RING +1 RING -> RANG +1 RINDS -> RHINES +1 RIGOROUS -> RECKLESS +1 RIGOR -> RIGA +1 RIGHT -> RIPER +1 RIDGE'S -> RIDGES +1 RIDER -> WRITER'S +1 RID -> HER +1 RHODIAN -> RODIAN +1 RHODIAN -> RADIAN +1 RHODES -> ROADS +1 REVOLUTIONISTS -> REVOLUTIONIST +1 REVOLTE -> REVOLT +1 REVEREND -> ROBIN +1 REVEREND -> REVERED +1 REVELING -> RIVELING +1 REVEALED -> REVIL +1 RETZCH'S -> WRETCH +1 RETURNED -> TURN +1 RETURN -> RETURNING +1 RESUMED -> JAMES'S +1 RESTORETH -> RESTORE +1 RESTIVE -> RENTS +1 RESOLVED -> WE +1 RESK -> REST +1 RESISTING -> FUN +1 RESINOUS -> VEZENOUS +1 RESIDUE -> READY +1 RESCUED -> RESCUE +1 REQUEST -> QUEST +1 REPUTATION -> REPETITION +1 REPLY -> THE +1 REPEATED -> REPLIED +1 REND -> RUN +1 REMISSION -> REMISSIONER'S +1 REMEMBEREST -> REMEMBER +1 REMARKED -> REMARK +1 REMAINED -> REMAINS +1 REMAIN -> EVER +1 RELIGION -> WHO +1 RELIGION -> RELIGIONISTS +1 RELEVANT -> ELEVANT +1 RELEASED -> RELIEF +1 RELATED -> RELIGHTED +1 REJECT -> REJECTED +1 REIGNS -> REIGN +1 REIGNED -> RANGED +1 REGULATION -> REGULATING +1 REGARDING -> GUARDING +1 REG'LER -> REGULAR +1 REFUGE -> REFUGERY +1 REFRESHMENT -> FRESHMENT +1 REFORMERS -> WE +1 REFERENCE -> REFERENCES +1 REELECTION -> RELECTION +1 RECORDS -> RICARDS +1 RECITER -> RESIDER +1 RECITE -> RESIDE +1 RECEDED -> WAS +1 REALLY -> REELING +1 REALISED -> REALIZED +1 READY -> RETICOSE +1 REACHED -> REACH +1 REACHED -> RAGED +1 RE -> REELECTED +1 RAYSTOKE -> RAY +1 RAYSTOKE -> RACE +1 RATTLING -> RIDING +1 RATHER -> WHETHER +1 RATHER -> ALL +1 RATCHFORD -> RETFORD +1 RASHID -> RASCHID +1 RASCALS -> RASCUOUS +1 RAPSCALLIONS -> RASCALIONS +1 RAPPERS -> WRAPPERS +1 RAMSES -> RAMESES +1 RAM -> RUM +1 RAM -> ROOM +1 RAISE -> THAT +1 RAINY -> REINY +1 RAINY -> RAINING +1 RAIN -> REIGN +1 RAID -> RAY +1 RAGE -> RATES +1 RADPROP -> RED +1 RACKETEERS -> RACKETERS +1 RACKED -> RAPPED +1 RACHEL -> ORIGINALLY +1 RACES -> TWO +1 RABBITS -> RABBIT +1 RABB'S -> RABS +1 QUMMUKH -> KUMAK +1 QUITE -> ACQUAINT +1 QUICKENETH -> QUICKENED +1 QUICK -> QUICKLY +1 QUEST -> FRENCH +1 QUARTER -> THEIR +1 QUANTRELL -> QUANTRAIL +1 QUANTITIES -> QUALITIES +1 PYM -> POEM +1 PUTTING -> PUT +1 PUTTEL -> POTTLE +1 PUTS -> BUT +1 PURPORTING -> PORPORTING +1 PULSE -> PART +1 PULLED -> POURED +1 PUDDLES -> BOTTLES +1 PSALM -> NEITHER +1 PRYTANEUM -> BRITTANNIUM +1 PROVIDED -> REVOLT +1 PROVEN -> PROVING +1 PROTECTORATE -> PROTECTOR +1 PROPERLY -> PROPER +1 PROMOTIVE -> PROMOTED +1 PROHIBITION -> PROB'S +1 PROGRAMME -> PROGRAM +1 PROFIT -> PROPHET +1 PROFESSION -> PROFICIENT +1 PROFESSION -> PROFESSIONS +1 PRODUCES -> PRODUED +1 PRODUCE -> PRODUCED +1 PROCOPIUS -> PROCOPIAS +1 PROCLUS -> PROCKLESS +1 PROAS -> PROPS +1 PROAS -> PRATS +1 PROAS -> POETS +1 PROA -> WHICHELE +1 PROA -> PROTINENT +1 PROA -> EXPERIOR +1 PRISONER -> PRISONERS +1 PRISON -> PRISONED +1 PRIORESS -> PRIEST +1 PRINCIPALS -> PRINCIPLES +1 PRINCE -> PRINCESO +1 PRIMER -> PRIMARY +1 PRIEST -> PRIESTS +1 PRETTY -> BERTIE +1 PRESTIGE -> BESIEGE +1 PRESSING -> RAISING +1 PRENTICESHIP -> PREDICUP +1 PRELIMINARIES -> PROLIMINARIES +1 PRECEPTORY -> PERCEPTORY +1 PRECEPTORS -> PERCEPTIVES +1 PRECENTORS -> PRESENTERS +1 PREACHED -> PREACH +1 PRAYERS -> PRAY +1 PRAISEWORTHY -> PRAISE +1 PRACTITIONER -> PRACTITIONERS +1 PRACTICED -> PRACTISED +1 POWDERED -> PATTERED +1 POURED -> PUT +1 POUCHES -> PIUCES +1 POTUM -> POT +1 POTION -> FOCIN +1 POTASSIUM -> PROTESTING +1 POSTHASTE -> POST +1 POSTERN -> PASTING +1 POSITIVELY -> POSIT +1 PORTMANTEAU -> PART +1 PORTENTOUS -> POTENTAL +1 PORED -> POURED +1 POPULACE -> POPULOUS +1 POOR -> PORT +1 POOR -> FAR +1 POMEROY -> POMERALI +1 POLYTECHNIC -> POLYTECHNICHER +1 POLL -> PAUL +1 POINT -> SPITE +1 PO -> POLIT +1 PLUMB -> PLUM +1 PLEASANT -> PRESENT +1 PLEAS -> PLAYERS +1 PLATTERBAFF'S -> PLATTERBUFF'S +1 PLATTERBAFF -> PLATTERBUFF +1 PLATTERBAFF -> PLATTER +1 PLATTERBAFF -> FLATHER +1 PLANTED -> BLOODED +1 PLANK -> PLANCORN +1 PLAIN -> TOWING +1 PLACING -> REPLACING +1 PLACE -> PLATE +1 PLACE -> PACE +1 PITTS -> FITZ +1 PITHUM -> PITTHAM +1 PISTOLES -> PISTOL +1 PIPE -> PEP +1 PINKUS -> PINKIS +1 PILESER -> THAT +1 PILESER -> LAUGHED +1 PILESER -> LASS +1 PILESER -> BELIEVER +1 PIKES -> PINES +1 PIGSKIN -> PICTION +1 PIGEONCOTES -> PIGEON +1 PIGEONCOTE -> FID +1 PIGEONCOTE -> FEATURE +1 PIGEONCOTE -> DIGEON +1 PIGEONCOTE -> BEEJON +1 PIGEON -> PITCHEN +1 PIECE -> PEACE +1 PICKED -> PRICKED +1 PHUT'S -> PHUT +1 PHUT -> FAT +1 PHILISTINES -> FAIRLY +1 PHILIPPUS -> PHILIPUS +1 PHILIPPUS -> PHILIP'S +1 PHILIP -> FILLIP +1 PHILIP -> FELLOW +1 PHELPS -> PHILP +1 PETREL -> PETAL +1 PET -> BED +1 PESTE -> PESTS +1 PERSPIRED -> POSPIRED +1 PERNOUNCE -> PRONOUNCE +1 PERISH -> PARISH +1 PERHAPS -> THERE +1 PERE -> PETERS +1 PERE -> BAT +1 PEONAGE -> PINION +1 PENCE -> PANTS +1 PEKAHIAH -> PEKAHIA +1 PEASANTS -> PIECE +1 PEAS -> PEASE +1 PEARL -> PEAR +1 PEACEFUL -> BEATHFUL +1 PAY -> PAIR +1 PAWNBROKER -> POND +1 PAUSED -> PASSED +1 PAUSE -> PULSE +1 PASTES -> PACE +1 PAST -> PASS +1 PASSES -> PAUSES +1 PASSERS -> PASSES +1 PASSED -> PASS +1 PARTLY -> PARSLY +1 PART -> PARTS +1 PARRICIDES -> PARASITES +1 PARR -> POOR +1 PARKS -> BOX +1 PARISH -> PARRISH +1 PARDONABLE -> PIONABLE +1 PARDON -> PARTICER +1 PANEL -> PANNER +1 PALL -> POOL +1 PALESTINE -> PALASTEIN +1 PALAESTRA -> PELUSTER +1 PAIR -> PARENT +1 PAINT -> PAIN +1 PADDLING -> PADDLIN +1 PACES -> PLACES +1 PACE -> FACE +1 P -> PATUM +1 OX -> AX +1 OWNERS -> LANDOWNERS +1 OWNED -> ON +1 OWN -> UNDESTRUCTION +1 OWN -> ON +1 OW'M -> ALL +1 OW -> HOW +1 OVERRIPENESS -> OVER +1 OVERHEARD -> OWN +1 OVERFULL -> OVER +1 OVERFLOWING -> OVERWHELMING +1 OVER -> OF +1 OUTER -> OUTER'S +1 OUT -> UP +1 OUT -> OUTGAZE +1 OUT -> HIS +1 OUT -> HER +1 OUT -> AT +1 OUT -> ALL +1 OUT -> ABOUT +1 OUR -> OURSAND +1 OUR -> KETTLE +1 OUR -> I'LL +1 OUR -> ARE +1 OUNCES -> OUNCE +1 OUGHTN'T -> OUGHT +1 OUGHT -> ARE +1 OUGHT -> ALL +1 OUEN -> LAUIS +1 OTTO -> ARE +1 OTHER -> OTHERWAYS +1 OTHER -> A +1 ORNERIEST -> ORNEIST +1 ORDER -> OTTO +1 OR -> UNDER +1 OR -> TWO +1 OR -> TO +1 OR -> OPPOSITION +1 OR -> FOREMOTHER +1 OR -> AND +1 OR -> ALL +1 OPPRESSORS -> IMPRESSORS +1 OPPRESSED -> OPPRESS +1 OPPOSITION -> OUR +1 OPPORTUNITY -> A +1 OPENING -> SOMETHING +1 OPENED -> OPEN +1 OPENED -> IN +1 OPEN -> UP +1 OPE -> OPEUS +1 ONTO -> ON +1 ONTO -> INTO +1 ONLY -> OWING +1 ONLY -> ON +1 ONLY -> EARLIEST +1 ONLY -> ALLEY +1 ONE -> A +1 ONCE -> WHENCE +1 ONCE -> WAS +1 ON'T -> ON +1 ON -> WHEN +1 ON -> UPON +1 ON -> UNTO +1 ON -> UNLUCK +1 ON -> UNDEST +1 ON -> UNCLEAN +1 ON -> UNCHANGED +1 ON -> SON +1 ON -> PIGEON +1 ON -> OF +1 ON -> I'M +1 ON -> HER +1 ON -> DOWN +1 ON -> ARE +1 ON -> ALL +1 OME -> HOME +1 OMAR -> MARBIN +1 OLL -> ALL +1 OLIVE -> OLIV +1 OLIVE -> OLD +1 OLIVE -> ALL +1 OLIVE -> ALIVE +1 OLD -> O +1 OKAY -> O +1 OIL -> OARMEIL +1 OH -> U +1 OH -> ALL +1 OGRE'S -> OGRES +1 OFFICES -> OFFICERS +1 OFFICERS -> OFFICIALS +1 OFFICER -> OFFICERS +1 OFFICE -> OF +1 OFFERS -> OFFICE +1 OFFENSE -> OFFENCE +1 OFFEND -> OFFENDLY +1 OFFEN -> OFTEN +1 OFF -> OUR +1 OFF -> OF +1 OF -> WALL +1 OF -> VILLAGE +1 OF -> VASSARIAH +1 OF -> THIS +1 OF -> THEIR +1 OF -> THE +1 OF -> THAT +1 OF -> OVER +1 OF -> OR +1 OF -> OMIDIAN +1 OF -> OFURUS +1 OF -> IS +1 OF -> INTO +1 OF -> HER +1 OF -> GIVE +1 OF -> FOR +1 OF -> AND +1 ODD -> OTT +1 ODD -> AUGHT +1 OCCASION -> CAPTAIN +1 OBOCOCK -> OBEK +1 OBJECT -> SUBJECT +1 OBEYED -> OBEY +1 O'NIGHTS -> OR +1 O'NEILL -> O'NEIA +1 O -> OR +1 O -> AND +1 O -> ALL +1 O -> A +1 NYTOUCH -> KNIGHTSAGE +1 NUZHAT -> UZHAT +1 NUZHAT -> USHART +1 NUZHAT -> NOSHAT +1 NUTS -> KNOTS +1 NUNS -> NUN'S +1 NU'UMAN -> NUMA +1 NOWT -> THAT +1 NOW -> THOU +1 NOW -> SOME +1 NOW -> ON +1 NOW -> NABRAMIN +1 NOTTINGHAM -> NINETEEN +1 NOTE -> NOTEBOOK +1 NOT -> SNUG +1 NOT -> OUT +1 NOT -> NUT +1 NOT -> NONE +1 NOT -> NIGHT +1 NOT -> NIGH +1 NOT -> MUCH +1 NOT -> KNOWN +1 NOT -> I +1 NORTH -> NORTHEAST +1 NOR -> WHATEVER +1 NOPE -> NOTE +1 NONETHELESS -> NONE +1 NONE -> NO +1 NOBLE -> SNOWBLE +1 NO -> THOUGH +1 NIPPER -> NIBBER +1 NIKOLAY -> NIKOLA +1 NIGHT -> KNIGHT +1 NICO -> NACO +1 NEXTER -> NEXT +1 NEW -> YOUR +1 NEW -> YOU +1 NEVER -> REPAST +1 NEVER -> EVER +1 NERVE -> NURSE +1 NEOSHO -> NEOSHIL +1 NEIGHBOURS -> NEIGHBORS +1 NEIGHBOURING -> NEIGHBORING +1 NEIGHBORS -> LABORS +1 NEAT -> MEAT +1 NEAREST -> NEW +1 NEARER -> NEAR +1 NEAR -> NEARING +1 NAUGHT -> NOUGHT +1 NATURALLY -> NATURAL +1 NATURAL -> NATURALLY +1 NARRATIVES -> NARRATIVE +1 NARRATE -> THEIR +1 NARCOTIC -> NAUCOTIC +1 NANDY'S -> ANDY'S +1 MYSTERIOUS -> MYSTERY +1 MYRTILUS -> MERTULIST +1 MYRTILUS -> BURTULAS +1 MY -> MILAD +1 MY -> BY +1 MUSTACHES -> MOUSTACHES +1 MUST -> MISTER +1 MUST -> MISS +1 MUST -> MICE +1 MUSICIANS -> MY +1 MURDOCH'S -> MARDOCK'S +1 MURDOCH -> MARDACK +1 MURDER -> MURDERER +1 MUIR -> YOU +1 MUG -> MUCH +1 MUDDY -> MONEY +1 MUD -> MADAM +1 MUCH -> ACT +1 MOWER -> OVER +1 MOVEMENT -> MOMENT +1 MOUTHWHAT -> MOUTH +1 MOUTHS -> MOTHS +1 MOURNING -> MORNING +1 MOUNTNORRIS -> MONTORIS +1 MOTOR -> MOTTAR +1 MOST -> PRO +1 MOST -> POSSES +1 MOSES -> MOVES +1 MORTIS -> MORTARS +1 MORTIFICATIONTHAT -> MORTIFICATION +1 MORTEM -> MODER +1 MORE'N -> MORE +1 MORE -> SMALL +1 MORAL -> MORTAL +1 MOPED -> MILKED +1 MOOR -> MORE +1 MOONLIGHT -> MONTH +1 MONTHLY -> MOUTHLY +1 MONSEIGNEUR -> MONSIEUR +1 MONKERS -> MOCKERS +1 MOMMOL -> MAMMA +1 MODERATE -> AND +1 MO -> MORE +1 MISTER -> MISSUS +1 MISTER -> MISS +1 MISTER -> MIDSR +1 MISTER -> MICHIG +1 MIST -> MISTAGINATION +1 MISS -> WAS +1 MISS -> MY +1 MINNIE -> MINNI +1 MINISTERED -> MINISTER +1 MINISTER -> MEANS +1 MINIONETTE -> MINOR +1 MINE -> MIND +1 MINE -> MIKE +1 MIND -> MINE +1 MINCE -> MINSER +1 MIMICK -> MIMIC +1 MILLY -> MERELY +1 MILLSTON -> MILLSTONE +1 MILLER -> MILRON +1 MILL -> BLUE +1 MILICENT'S -> MILLSON'S +1 MILICENT -> MILLSON +1 MILICENT -> MILLICINE +1 MILICENT -> MILLICENT +1 MILICENT -> MELLICENT +1 MILE -> MILES +1 MILDEWED -> MILDED +1 MIHI -> HE +1 MIDSHIPMAN -> MITCHIPMAN +1 MIDRIFF -> MIDRIFTS +1 MIDIAN -> MENDIAN +1 MIDDLING -> MIDDLIN +1 METHINKETH -> METHINK +1 MET -> MAKE +1 MESTIENNE'S -> MESTINE'S +1 MESTIENNE -> MISSED +1 MESTIENNE -> MISS +1 MESTIENNE -> MESTINE +1 MESSES -> MASSES +1 MERRY -> MERRYMAKING +1 MERNEPTAH -> MARNET +1 MERLONUS -> MERLUNUS +1 MERLONUS -> MERLINUS +1 MERIT -> MARRIAGE +1 MENTAL -> MANTLE +1 MEND -> MENTAL +1 MEN'S -> MAN'S +1 MEN -> MEANTIME +1 MEN -> IN +1 MEN -> CAME +1 MEMBRANE -> MEMORANE +1 MEDICAL -> MEDICA +1 MEDALS -> METALS +1 MEDAL -> MEDDLE +1 MEAT -> HAVE +1 ME -> MISS +1 ME -> HIM +1 MC -> MAC +1 MAY -> ME +1 MAY -> MADE +1 MATTER -> MATHA +1 MATI -> MEANT +1 MATI -> MANTIL +1 MATEY -> MATE +1 MATERIALS -> MATERIORS +1 MATE -> MADE +1 MASTER -> MERCER +1 MASTER -> MASSA +1 MASKED -> MASSED +1 MARVELED -> MARVELLED +1 MARTIN -> MERTON +1 MARSPEAKER -> MARKEE +1 MARSHAL -> MARTIAN +1 MARSHAL -> MARTIAL +1 MARSH -> MARS +1 MARRIAGE -> MARY'S +1 MARMALADES -> MARVELL +1 MARGARET'S -> MARGARET +1 MARE -> MAYOR +1 MANNER -> MANNERS +1 MANKATO -> MANCATEO +1 MANKATO -> MAIN +1 MANASSEH -> MANOT +1 MAN'S -> MEN'S +1 MAN -> MADAMELY +1 MAMIE -> MAMMY +1 MALNUTRITION -> MALLETRICIAN +1 MAKES -> MATRON +1 MAKE -> MAY +1 MAJORITY -> MATURITY +1 MAJOR -> MEASURE +1 MAIL -> MALE +1 MAD -> MAN +1 MABILLON -> MARBULAN +1 M -> EM +1 M -> BENNET +1 LYSIMACHUS -> LISAKETH +1 LUNA'S -> LEANY'S +1 LUNA -> LEWINA +1 LUKE -> LOOK +1 LUCRATIVE -> LOOK +1 LUCIEN -> MISS +1 LUCIEN -> LUCIAN +1 LOWERING -> LORING +1 LOWER -> BLOW +1 LOVER -> LOVE +1 LOVED -> LOVE +1 LOVE -> LOVED +1 LOVE -> LAW +1 LOST -> ASKED +1 LOSS -> LAWS +1 LORD'S -> LARGE +1 LORD -> LOT +1 LORD -> LARD +1 LOOKOUT -> LOOK +1 LOOKED -> LOOKS +1 LOOKED -> LOOK +1 LOOKED -> LIFTED +1 LONESOMENESS -> LONESOME +1 LONE -> LONG +1 LOBSTER -> LOBSURD +1 LOBSTER -> LOBSTERBOAT +1 LOBSTER -> LOBS +1 LL -> CHILLED +1 LL -> BESTOW +1 LIVES -> IS +1 LIVELY -> LOVELY +1 LIVELONG -> LIVE +1 LIVED -> LIVE +1 LIVE -> LEAVE +1 LITTLE -> OF +1 LITTLE -> LISBETH +1 LITTLE -> FEW +1 LIT -> LET +1 LIPS -> LITS +1 LINE -> LIE +1 LIME -> LIMETERY +1 LILBURN -> LILLO +1 LILBURN -> LILLBURN +1 LILBURN -> LILBOURNE +1 LIL -> LOW +1 LIKELY -> LIKE +1 LIKE -> THE +1 LIKE -> NIGHT +1 LIKE -> LIKED +1 LIGHTLY -> LATE +1 LIFE -> LIE +1 LIEUTENANT -> TANNIC +1 LIES -> LIVES +1 LIE -> LIKE +1 LIDDY -> LINING +1 LIAISON -> LEAR +1 LEXINGTON -> LESSINGTON +1 LEWIS -> LOOSE +1 LEVITICUS -> LEVIKUS +1 LETTERS -> LET +1 LETS -> LET'S +1 LET -> THEM +1 LET -> THAT +1 LET -> LEFT +1 LET -> LATINUE +1 LET -> LAID +1 LEST -> REALIZED +1 LESSON -> MESSUM +1 LESSEN -> LISTEN +1 LESLIE -> THIS +1 LESLIE -> PLEASING +1 LESLIE -> LIZZLE +1 LESLIE -> LISSLY +1 LESLIE -> LIDNESLEY +1 LENOIR -> WARRITZ +1 LEMON -> LENNONSHIPS +1 LEGS -> LESS +1 LEFT -> LIVED +1 LEFT -> LIFTED +1 LEFT -> LAUGHTER +1 LEFT -> LAD +1 LEER -> URINA'S +1 LEECHES -> LEECH +1 LEECH -> LIEGE +1 LEE'S -> LEE +1 LED -> LIT +1 LEAVED -> LEAVE +1 LEARNED -> LEARNS +1 LEARN -> LEARNED +1 LEADPENCIL -> LEAD +1 LEADERSHIP -> LEGERSHIP +1 LEADERS -> LEADER'S +1 LAW -> LAWN +1 LAW -> LAST +1 LAUGHED -> LAP +1 LAUGHED -> HAVE +1 LAUDERDALE -> LAURDELL +1 LAUDERDALE -> LAUDER +1 LATH -> LAS +1 LATER -> LATE +1 LATELY -> PLATELY +1 LATE -> LAID +1 LASH -> LAST +1 LARKIN'S -> LARKINS +1 LANDOWNER -> LANDLORD +1 LANDI -> LANDIE +1 LANDED -> LAND +1 LAND -> THE +1 LAMBS -> LAMPS +1 LAKE -> LATER +1 LAIN -> LANE +1 LADY -> LADY'S +1 LADS -> LAD +1 LACHRYMA -> LACK +1 LACHAISE -> LACHES +1 LABOURERS -> LABORERS +1 LABOUR -> LABOR +1 LABORING -> LABOURING +1 L -> ELE +1 KNOWS -> ELSE +1 KNOW -> THERE +1 KNOW -> NOT +1 KNOTTY -> NAUGHTY +1 KNOT -> NOT +1 KNOBBLY -> NOBBY +1 KNIGHTS -> NIGHTS +1 KNIGHT'S -> NIGHT'S +1 KLEPTOMANIAC -> CLEPTOMANIA +1 KLEPTOMANIA -> CLUBTOMANIA +1 KITTY -> KATY +1 KITE -> KINDLING +1 KISSED -> GUESS +1 KING'S -> KING +1 KINE -> WOUNDED +1 KINDER -> KIND +1 KILLS -> KILLET +1 KICK -> KICKY +1 KEYS -> A +1 KETTLE -> CATTLE +1 KERSTALL -> CURSON +1 KENITES -> KENITE +1 KENITES -> KANITE +1 KEEN -> KIN +1 KEDEM -> KIEDAM +1 KAMAR -> COME +1 JUSTIFIED -> IT'S +1 JUST -> JEST +1 JUST -> JEALOUS +1 JUST -> ITS +1 JUST -> DESTRULIAN +1 JUST -> CHOS +1 JURY -> JERRY +1 JUMPS -> JUMPED +1 JULIEN -> JULIAN +1 JUG -> CHUG +1 JUDGMENT -> JULIET +1 JUDGMENT -> GERMAN +1 JUDGMENT -> GEOGNANT +1 JUDGE -> JOE +1 JUDAH -> JULIA +1 JOUVIN'S -> ZO +1 JOSHUA -> JAUNTS +1 JOKINGLY -> CHOKINGLY +1 JOINED -> JARS +1 JOCELYN'S -> JOSCELYN +1 JOCELYN -> JOSCELYN +1 JIS -> IT +1 JEWISH -> TO +1 JEWELER -> JUROR +1 JEWELER -> JEWELLER +1 JEHOASH -> JOESH +1 JEERED -> JOURED +1 JEDGE -> JUDGE +1 JANSENIST -> GENT +1 JANEERO -> GENERO +1 JANE -> CHAIN +1 JAMS -> JAM'S +1 JAM -> JAME +1 JAKEY'S -> JAKI +1 JAKEY -> JI +1 JAKEY -> JAKIE +1 JAKEY -> JAGGIE +1 JACKSON -> JACK'S +1 JACKMAN -> JACKMEN +1 J -> VERSON +1 IZZY'S -> IS +1 IZZY -> AS +1 IVANOVITCH -> GEVINOVITCH +1 ITS -> TO +1 ITS -> THEY +1 ITS -> IT'S +1 IT'S -> YES +1 IT'S -> AND +1 IT -> THE +1 IT -> STIPS +1 IT -> OVER +1 IT -> OR +1 IT -> ONLY +1 IT -> IT'S +1 IT -> IT'LL +1 IT -> HIM +1 IT -> DID +1 IT -> AT +1 IT -> AND +1 IT -> AN +1 ISSUE -> ISSUED +1 ISRAELITES -> ISRAITS +1 ISN'T -> IS +1 ISLAMISED -> ISLAMIZED +1 ISLAM -> SLAM +1 IS -> YOUR +1 IS -> WAS +1 IS -> TURBOT +1 IS -> SAID +1 IS -> RAPIS +1 IS -> OWNETTE +1 IS -> ONLY +1 IS -> LENNING +1 IS -> ITS +1 IS -> ISN'T +1 IS -> IF +1 IS -> HEAR +1 IS -> FLORID +1 IS -> EXPENDING +1 IS -> ENDOWED +1 IS -> EAST +1 IS -> CIGARET +1 IRONICAL -> IRONIC +1 INWARD -> WOOD +1 INVALIDES -> INVALID +1 INTO -> AND +1 INTERNAL -> AND +1 INTERFERE -> INFERE +1 INTEREST -> INTERESTS +1 INTELLECTUALLY -> INTELLECTUAL +1 INSTANT'S -> INSTANCE +1 INSIST -> INSISTS +1 INQUIRE -> ACQUIRE +1 INNES -> EANS +1 INN -> IN +1 INJURE -> ENDURE +1 INGENIOUSLY -> INGENUOUSLY +1 INFLUENCED -> EVILISED +1 INFERIOR -> CONTRAY +1 INFAMY -> INFAMYLON +1 INFAMY -> IN +1 INFAMOUS -> IN +1 INELEGANTLY -> IN +1 INCREASE -> INCREASES +1 INCORRECT -> AND +1 INCOMPARABLE -> INN +1 IN'T -> INTO +1 IN -> WHO +1 IN -> WHEN +1 IN -> WHEEL +1 IN -> TO +1 IN -> SO +1 IN -> NEAT +1 IN -> MAU +1 IN -> JUST +1 IN -> INTO +1 IN -> INSIDE +1 IN -> IMPERFECT +1 IN -> HE +1 IN -> FROM +1 IN -> FOR +1 IN -> EXPERIENCE +1 IN -> ENCAMP +1 IN -> BENEATH +1 IN -> AT +1 IN -> A +1 IMPROVISED -> PROVISED +1 IMPROVIDENT -> IN +1 IMPROVED -> PROVED +1 IMPROVE -> PROVE +1 IMPLY -> SAY +1 IMMENSE -> MOST +1 IM -> QUEST +1 ILU -> TO +1 IGNORED -> NURED +1 IGNOMY -> CHOMI +1 IF -> YOU +1 IF -> FOR +1 IF -> FEET +1 IDEA -> AND +1 ICES -> ISIS +1 ICES -> EYESES +1 I'VE -> OF +1 I'LL -> LOOK +1 I'LL -> ELSE +1 I'FAITH -> I +1 I'D -> I'LL +1 I -> TO +1 I -> THY +1 I -> THEY +1 I -> THAT +1 I -> OUGHT +1 I -> OH +1 I -> MY +1 I -> LIKE +1 I -> IT +1 I -> HOW +1 I -> EYES +1 I -> BY +1 I -> ATTENDED +1 I -> AMID +1 I -> AH +1 I -> ABRUPT +1 HYDROCHLORIC -> HYDROCLOIC +1 HYDROCHLORIC -> HYDROCLOGIC +1 HURT -> SHARP +1 HUNTINGDON -> HUNTON +1 HUNTINGDON -> HONEYMAN +1 HUNTERS -> HANDS +1 HUMOR -> HUMOUR +1 HUMANITARY -> HUMANITY +1 HUH -> HER +1 HUDSPETH -> HUSBITH +1 HOWL -> HOW +1 HOWEVER -> SAMUR +1 HOWEVER -> HERBID +1 HOW -> OH +1 HOW -> HOSTUALLY +1 HOW -> HER +1 HOUSEHOLD -> HOUSE +1 HOUR -> OUR +1 HOUR -> I +1 HOUNDED -> HOUNDY +1 HOSPITABLY -> HOW +1 HORSTIUS -> HORSES +1 HORSTIUS -> HORSE +1 HORDE -> HOARD +1 HOPPING -> HAVING +1 HOO'S -> WHOSE +1 HOO'S -> WHO'S +1 HOO'LL -> HE'LL +1 HOO -> WHO +1 HONOURS -> HONORS +1 HONOUR -> HONOR +1 HONORS -> HONOUR +1 HONOR -> HONOUR +1 HONOR -> HANNER +1 HONESTLY -> ON +1 HOMEPUSH -> HOME +1 HOLLER -> HOLLERED +1 HOLE -> HALL +1 HOLD -> PATENTS +1 HOLD -> FATS +1 HOLD -> ERON +1 HOF -> WHOLE +1 HITHER -> THITHER +1 HIT -> HATE +1 HISSELF -> HIMSELF +1 HIS -> US +1 HIS -> THIS +1 HIS -> IN +1 HIS -> HE +1 HIS -> FUND +1 HIS -> DISCOURSE +1 HIS -> AWAY +1 HIS -> AS +1 HIS -> AN +1 HIS -> A +1 HINDFELL -> HINFELD +1 HIMSELF -> HIS +1 HIM -> CAME +1 HIJAZ -> KI +1 HIGHS -> HIES +1 HIGH -> TIME +1 HIERARCHY -> HALLWAKE +1 HI -> AY +1 HEYDAY -> HAY +1 HEWN -> YOU +1 HERMON'S -> HERMANN'S +1 HERMON'S -> HARMONT'S +1 HERMON'S -> HARMON'S +1 HERMON -> HERMANN +1 HERMON -> HERMAND +1 HERMON -> HERMA +1 HERMON -> HARMON +1 HERMON -> HAREMON +1 HERIOT'S -> HEARIT'S +1 HERIOT -> HARRIET +1 HERETOFORE -> HERE +1 HERE -> YOU +1 HERE -> HER +1 HER -> HIS +1 HER -> HIM +1 HER -> HERSELF +1 HER -> HE +1 HER -> FOR +1 HER -> CHARRED +1 HER -> ACCUSTOM +1 HEN -> HINCOUPS +1 HELVIN -> HELD +1 HELPED -> HELPS +1 HELM -> HOME +1 HELM -> HAIL +1 HELLO -> HALLO +1 HELEN -> ALLAN +1 HELD -> HELDS +1 HEELED -> HEALED +1 HEEDED -> READ +1 HEDGES -> HATCHES +1 HEBREW -> BERTRADIZANCE +1 HEAT -> HEATLESS +1 HEARTY -> EARTHLY +1 HEARTS -> HEART +1 HEARTIEST -> HARDIEST +1 HEART -> HEARTS +1 HEART -> HARD +1 HEARSE -> HOUSE +1 HEARD -> HAD +1 HEAR -> HAIR +1 HEADLONG -> HAD +1 HEAD -> HIDDEN +1 HEAD -> HAIR +1 HE'S -> HE +1 HE -> YES +1 HE -> THEY +1 HE -> SIR +1 HE -> SIMILATE +1 HE -> SHE +1 HE -> SEE +1 HE -> PIERRE +1 HE -> IRRESPONSIBLE +1 HE -> IF +1 HE -> CAME +1 HE -> AND +1 HE -> A +1 HAYES -> HAZE +1 HAVEN -> HEROIND +1 HAVE -> OFTEN +1 HAVE -> I'VE +1 HAVE -> HALVES +1 HAVE -> HALF +1 HAVE -> EVER +1 HAVE -> ERE +1 HAVE -> BE +1 HAUGHTINESS -> FORTNESS +1 HATTERSLEY -> HAUGHTERSLEY +1 HATES -> HATE +1 HASAN -> HER +1 HAS -> LEGION +1 HAS -> JUST +1 HAS -> HISTORY +1 HAS -> HAVE +1 HAS -> BEST +1 HARVEY'SWHICH -> HARVEST +1 HARRISONVILLE -> HARRISON +1 HARRIS -> ARIST'S +1 HAROLD -> HERALD +1 HARK -> ARE +1 HARE -> HAIR +1 HARDWARE -> HARDWORSTOE +1 HARDLY -> ARE +1 HARD -> OUR +1 HARBOR -> HARBOUR +1 HAPPENED -> HAPPEN +1 HAPLY -> HAPPILY +1 HAND -> HANDS +1 HALL -> WHOLE +1 HALF -> HAPPEN +1 HALEY'S -> HAYE +1 HADDA -> HAD +1 HAD -> TENDED +1 HAD -> STORE +1 HAD -> I +1 HAD -> HEART +1 HAD -> HATE +1 HAD -> HAS +1 HAD -> GOT +1 HAD -> AT +1 HAD -> ADMIRED +1 GURR -> GURSER +1 GURR -> GREW +1 GURR -> GRAF +1 GURR -> GORE +1 GURR -> GIRK +1 GURR -> GERT +1 GURR -> GER +1 GURR -> GARR +1 GURR -> DUR +1 GUNS -> GUN +1 GUNNAR -> GUTTER +1 GUNNAR -> GUNNER +1 GULLET -> COLLEGE +1 GUIRUN'S -> GUNDRUE +1 GUINEA -> GUINEAS +1 GUIDE -> GOD +1 GUESS -> GUESSIMIAN +1 GUDRUN -> GUNDRON +1 GRUMBLINGLY -> TREMBLINGLY +1 GRUFFLY -> ROUGHLY +1 GROVE -> GROW +1 GROUNDS -> GROUND +1 GROTTO -> DRATO +1 GROOM -> ROOM +1 GRINNING -> GRINNIE +1 GRIBIER -> CLAVIER +1 GREY -> GRAY +1 GREAVES -> GREEBS +1 GREAT -> GRAY +1 GRAY -> GREY +1 GRAY -> GLAY +1 GRATITUDE -> CREDIT +1 GRASPS -> GRASPED +1 GRAPPLE -> GRANTPLE +1 GRANDPAP -> GRANDPAPAZZARD +1 GRANDAME -> GRAND +1 GRAMMATEUS -> GRAMMATIUS +1 GRAM -> GRAHAM +1 GOVERNMENTS -> GOVERNMENT'S +1 GOVERNMENT'S -> GOVERNMENT +1 GOVERNMENT -> GOVERN +1 GOV'NOR -> GUV'NER +1 GOV'NOR -> GOVERNOR +1 GOT -> GOD +1 GOT -> COURT +1 GOSLER -> GOSPIR +1 GORDON -> GORDON'S +1 GOODS -> GOOD +1 GOOD -> THE +1 GONE -> DISCOUR +1 GOLDFISH -> GOLD +1 GOLDEN -> GOLD +1 GOING -> YOU +1 GOING -> GO +1 GOES -> GO +1 GODEBILLIOS -> GO +1 GOD'S -> GODS +1 GOD -> THE +1 GOBEY'S -> GOBY'S +1 GOBEY'S -> GOBIES +1 GOBEY'S -> GOBIAS +1 GOAL -> GOLD +1 GNAWING -> GNARRING +1 GNARLED -> KNOLLED +1 GLOOMY -> BLOOMY +1 GLISPIN'S -> GLISBON'S +1 GLISPIN -> LISPIN +1 GLISPIN -> GLISBON +1 GLASS -> GLANCE +1 GLADDENEST -> GLADNESSED +1 GLAD -> GRINDING +1 GIVE -> GIVEN +1 GIVE -> GAVE +1 GIT -> GET +1 GIRTHING -> GIRDING +1 GIRTHED -> GIRDED +1 GIRL -> GO +1 GIRDS -> GORGE +1 GIORGIO -> YOUR +1 GIORGIO -> GEORGIO +1 GILROY -> SCALE +1 GIFTS -> FORGIVES +1 GET -> HER +1 GET -> GIT +1 GET -> GENISH +1 GET -> GAINED +1 GEORGIA -> GEORGE +1 GEORGE'SWHICH -> GEORGE'S +1 GEORGE'S -> GEORGE +1 GEORGE -> GEORGIUM +1 GENTLEMEN'S -> GENTLEMAN'S +1 GENTLEMEN -> GENTLEMAN +1 GENTLEMAN -> GENTLEMEN +1 GENEROUS -> GENERALS +1 GENERAL -> JOE +1 GAUTHIER -> GATHIERRE +1 GASHED -> GASH +1 GARDEN'S -> GARDENS +1 GAMMON -> GAMIN +1 GALLATIN -> YELLED +1 GABLE -> CABLE +1 G'YIRLS -> IS +1 FUZZ -> FUZ +1 FURZE -> FIRS +1 FULL -> POOL +1 FULL -> FOR +1 FULL -> FOOT +1 FUGITIVES -> FUGITIVE +1 FROZE -> ROSE +1 FROWNED -> GROUND +1 FRONTIERS -> FRONTIER +1 FRONT -> FROM +1 FROM -> ON +1 FRO -> FROM +1 FRISTOE'S -> FRUSTES +1 FRISTOE -> HER +1 FRISTOE -> CRISTO +1 FRIGHTFUL -> CRIED +1 FRIEND -> FRIN +1 FRIDOLIN -> FRIEDLIN +1 FRIAR -> FRY +1 FRET -> FRED +1 FREEWAY -> FREE +1 FREES -> FREEZE +1 FREEDOM -> READ +1 FRANC -> FROG +1 FOURTEENTHAT'S -> FOURTEEN +1 FOUR -> FULL +1 FOUR -> FOOL +1 FOUNDED -> FOUND +1 FOUGHT -> THOUGHT +1 FOSTER -> FOXTER +1 FORWARD -> BEFORE +1 FORTS -> FAULTS +1 FORTNIGHT -> NIGHT +1 FORMER -> FORM +1 FOREVER -> FOR +1 FOREVER -> DURE +1 FOREMAN -> MAN +1 FOREGATHERED -> FORGATHERED +1 FORCED -> FORCES +1 FOR -> WERE +1 FOR -> TO +1 FOR -> OF +1 FOR -> IN +1 FOR -> FURTHER +1 FOR -> FULL +1 FOR -> FROM +1 FOR -> FOUR +1 FOR -> FER +1 FOR -> FAULT +1 FOR -> BOUHAIR +1 FOR -> ABOVE +1 FOOLS -> FOOTS +1 FOOL -> FULL +1 FOOD -> FOLD +1 FONTEVRAULT -> FONTREVAL +1 FOLLOWS -> FOLLOWED +1 FOLLOWED -> FELL +1 FOLLOWED -> ALL +1 FOLLOW -> FOLLOWING +1 FOE -> FOLK +1 FOALS -> FOOLS +1 FOALS -> FALLS +1 FOAL -> WHOLE +1 FOAL -> FOOL +1 FOAL -> FOLIGAN +1 FOAL -> FALL +1 FLYING -> LYING +1 FLY -> FLIES +1 FLUTTERING -> FACTIVE +1 FLOWERBEDS -> FLOWER +1 FLOSSY -> FLOSSIE +1 FLOORBOARDS -> FLOOR +1 FLOCKS -> FLAUNT +1 FLEW -> FLUD +1 FLEROV -> FLAREOV +1 FLAVOR -> FLAVOUR +1 FLAVOR -> FLARE +1 FLATTERER -> SLATTERER +1 FLATTERED -> FLUTTERED +1 FLATHEADS -> FLAT +1 FLASHLIGHT -> FLASH +1 FLABBERGASTED -> FLAVAGASTED +1 FITTING -> FEELING +1 FISHING -> FISHIN +1 FISHIN -> FISHIN' +1 FISHED -> FINISHED +1 FIRSTER -> FIRST +1 FIRE -> FORE +1 FINNEY -> FINNELL +1 FINICAL -> FINNICAL +1 FINGER -> FINGERING +1 FINELY -> FINALLY +1 FINDING -> FIND +1 FILTRATES -> FIR +1 FILTRATE -> FEDERATE +1 FIGGER -> FORGON +1 FIENDS -> FIEND +1 FIELD -> FIELDS +1 FIACRE -> FIATHIS +1 FESTIVE -> FESTIVATIONS +1 FELT -> FELL +1 FEELS -> FILLS +1 FEEL -> SEE +1 FEEL -> FILL +1 FEEDS -> FEATS +1 FEE -> FEET +1 FAVOURITE -> FAVORITE +1 FAVORITE -> FAVOURITE +1 FAVORABLE -> FAVOURABLE +1 FAUVENT -> VENT +1 FAUVENT -> FUVENT +1 FAUVENT -> FRAVAIN +1 FAUVENT -> FERVENT +1 FAUVENT -> FAVOT +1 FAUCHELEVENT -> THRAUCHELEVENT +1 FAUCHELEVENT -> SO +1 FAUCHELEVENT -> FUSSION +1 FAUCHELEVENT -> FUCHELEVENT +1 FAUCHELEVENT -> FRESHEN +1 FAUCHELEVENT -> FOURCHELEVENT +1 FAUCHELEVENT -> FOUCHELEVENT +1 FAUCHELEVENT -> FORCHELEVENT +1 FAUCHELEVENT -> FLUCHELEVENT +1 FAUCHELEVENT -> FASHIONEVENT +1 FAUCHELEVENT -> A +1 FAUCES -> FORCES +1 FATTY -> FATIMATUS +1 FATS -> FAT +1 FATHER'S -> FATHERS +1 FATHER -> FURTHER +1 FATHER -> FOUND +1 FATHER -> FATHERLAND +1 FATHER -> EITHER +1 FATHER -> DE +1 FATAL -> FIELD +1 FAT -> BAT +1 FAST -> ODOU +1 FARRINDER -> VERUNDER +1 FARRINDER -> GREYNDER +1 FARRINDER -> FARRINGERS +1 FARRINDER -> FARRENDER +1 FANNY -> THEN +1 FANNY -> ANY +1 FANGED -> FACT +1 FAN -> PAMP +1 FAMILY -> FELLOW +1 FAM'LY -> FAMILY +1 FALLING -> FOLLOWING +1 FALL -> FOR +1 FAITH -> FATE +1 FAIRLY -> FAIRLY'S +1 FAIR -> HER +1 FAILING -> FEELING +1 FAILED -> FAME +1 FAIL -> FILL +1 FAFNIR'S -> FAFTENNER'S +1 FAFNIR'S -> FAFNER'S +1 FAFNIR -> STAFFNER +1 FAFNIR -> FAFNER +1 FAFNIR -> FAFFNER +1 FACT -> THAT +1 FACE -> FAITH +1 FACE -> FACED +1 EYES -> EYE +1 EYE -> I +1 EYE -> EYES +1 EYE -> DIE +1 EXTRAORDINARY -> EXTRAORDINARILY +1 EXTRACT -> EXTRACTED +1 EXTRA -> THAT +1 EXPOSE -> EXPOSED +1 EXPLOITING -> EXPLODING +1 EXPIATION -> EXPLANATION +1 EXPERIENCE -> SIGNING +1 EXPECTED -> SPECTRE +1 EXECUTIVE -> EXUDY +1 EXECUTIVE -> EXECUTORY +1 EXECUTIVE -> EXECUTE +1 EXCLAIMED -> EXPLAINED +1 EXCITING -> THESE +1 EXAMINING -> EXAMINED +1 EXAMINATION -> MAXIMMUNITION +1 EXACKLY -> EXACTLY +1 EVIL -> A +1 EVERYONE -> EVERY +1 EVERY -> EVERYONE +1 EVER -> EVERGREWING +1 EV'YBODY'S -> EVERY +1 EUSEBIUS -> EUSIBIUS +1 EUSEBIUS -> EUSCIBIUS +1 EUPHRATES -> EUPHADIS +1 EUPHRANOR -> EUPHRANER +1 EUNUCH -> EUNUCHS +1 ETHER -> THEM +1 ETHELRIED'S -> ETHELRE'S +1 ETHELRIED -> ETHEL +1 ETHELRIED -> EFFLARIDE +1 ET -> AT +1 ESPECIALLY -> SPENT +1 ESPECIALLY -> HAS +1 ESCAPE -> US +1 ERRATIC -> THE +1 ERNESTINE -> ERNESTON +1 ERE'S -> YES +1 ERE -> AT +1 EQUERRY'S -> EQUEROR'S +1 EPHRAIM -> ATRONE +1 ENTRUSTED -> AND +1 ENTR'ACTE -> AND +1 ENTIRELY -> TIRELESS +1 ENTIRELY -> ENCHANTING +1 ENTIRE -> AND +1 ENSUED -> IN +1 ENSNARES -> AND +1 ENSLAVED -> ENSLAVE +1 ENRAGED -> ENRAGE +1 ENJOY -> ENJOYED +1 ENFRANCHISEMENT -> ENCRONTISEMENT +1 ENFORCEMENT -> FORCEMENT +1 ENDURETH -> AND +1 ENDURE -> INDUCE +1 END -> AND +1 EMETIC -> AMATIC +1 EMBRUN -> AMBRON +1 EM -> HIM +1 ELEXANDER -> IT +1 ELEXANDER -> ALEXANDER +1 ELDER -> OTHER +1 ELBOW -> ELBOWS +1 ELBERT -> HILBER +1 ELASTIC -> MOLASTIC +1 EILEEN -> ILINE +1 EILEEN -> IDLEEN +1 EIGHTH -> EIGHTHS +1 EH -> THEY +1 EH -> HE +1 EGGS -> NICE +1 EELS -> FIELDS +1 EDGING -> EDGED +1 ECONOMIC -> AGONIC +1 ECHOED -> ACCORD +1 EAU -> HE +1 EAST -> EACH +1 EASILY -> IS +1 EASE -> THESE +1 EARTH -> EARTHCOP +1 EARTH -> ART +1 EARNEST -> HONEST +1 EARLIEST -> OIETY +1 EAR -> IRRESCELLING +1 E'S -> LOVE +1 E'LL -> YOU'LL +1 E'ER -> ERE +1 E -> DOG +1 DUSK -> THUS +1 DURATION -> DIRECTION +1 DUPLICATES -> DIPPLICATES +1 DUMAS -> YOU +1 DUM -> DOOMFUL +1 DUKE -> DO +1 DUDS -> DEADS +1 DU -> DUMEN +1 DRUGSTORE -> DRUG +1 DROUTH -> DROUGHT +1 DRINKS -> DRINK +1 DRINKERS -> DRAKERS +1 DRINKERS -> DRAKE +1 DREAM -> STREAM +1 DRAWERS -> DRAWER +1 DRAW -> DRAWING +1 DRAUGHT -> DROUGHT +1 DRAT -> DREAD +1 DRAINS -> DREAMS +1 DOWNING -> DAWNING +1 DOWER -> TO +1 DOUBTS -> DOUBT +1 DOUBT -> OUT +1 DOTH -> DIRTS +1 DOST -> THOSE +1 DOSE -> DUST +1 DOOR -> CAME +1 DONOVAN'S -> DOLOMAN'S +1 DONOVAN -> DONOON +1 DONE -> NONE +1 DONE -> DUNFAR +1 DONE -> DON +1 DON'T -> THEY'RE +1 DON'T -> ONE +1 DON'T -> DON +1 DON'T -> DO +1 DOM -> DON +1 DOGS -> DOG +1 DOG -> DOLE +1 DOG -> DARK +1 DOEST -> DO +1 DOESN'T -> DOESNATE +1 DOESN'T -> DOES +1 DOES -> DOESN'T +1 DOCTOR -> DOCTRIPOS +1 DOCK -> DOCKYARD +1 DOAN -> DON'T +1 DO -> TOO +1 DO -> THE +1 DO -> DOUGH +1 DO -> DON'T +1 DO -> DID +1 DO -> D'YE +1 DIVIDED -> DIVIDEST +1 DITCHFIELD -> DIXFIELD +1 DISTRUSTED -> DISTRUDGED +1 DISTRESS -> DISTRESSED +1 DISTANT -> OF +1 DISTAFF -> DISTANT +1 DISSIMULATION -> THE +1 DISSENTIENT -> DYSINTHIAN +1 DISSENSIONS -> HERALD +1 DISPUTE -> DISPUTABLE +1 DISPOSED -> DISPOSE +1 DISNEY -> DYSNEY +1 DISINFECTING -> DISINFACT +1 DISHES -> DISH +1 DISFIGURED -> THIS +1 DISASTROUS -> DISASTRATES +1 DISAGREE -> DISAGRATING +1 DIRK -> DARK +1 DINERS -> DINNERS +1 DIGGING -> TIGING +1 DIGESTION -> DIAD +1 DIE -> GUY +1 DIDN'T -> DON'T +1 DID -> WITH +1 DID -> THESE +1 DID -> THAT +1 DICKIE -> DICK +1 DEVOUR -> THE +1 DETECTIVES -> TETE +1 DETECTIN -> DETECTIVE +1 DESTINIES -> DEBT'S +1 DESPOTIC -> THAT +1 DESPITE -> THIS +1 DESK -> VES +1 DESIRES -> DESIRE +1 DESIGNED -> DESIGN +1 DERELICTS -> DEAR +1 DEODORIZING -> ORDEALIZING +1 DENIS -> DENY +1 DEMANDS -> DEMAND +1 DELUSION -> ILLUSION +1 DELMONICO -> DEMONICO +1 DELIVERER -> DELIVER +1 DELIVER -> DRIVER +1 DEFERENCE -> THEIR +1 DEEPENED -> DEEP +1 DEBTOR -> DAUGHTER +1 DEARLY -> STEELY +1 DEARLY -> DAILY +1 DEANS -> DEAN +1 DEAD -> DEBT +1 DEAD -> DEADROOM +1 DEAD -> DAY +1 DEACH -> DID +1 DE -> TO +1 DE -> OF +1 DE -> DEAD +1 DE -> BEAR +1 DAYS -> STAYS +1 DAY -> THEY +1 DATED -> DID +1 DARKAND -> DARK +1 DAPHNE'S -> AFTER +1 DAPHNE -> JAPLY +1 DAPHNE -> DAPHNEY +1 DANDAN -> THAN +1 DANDAN -> DAN +1 DANCER -> DANCERS +1 DAMN -> DEMON +1 DAME'S -> JAMES +1 DALYS -> DAILIES +1 DALY -> DALEY +1 DAGOS -> DAGGERS +1 DADDY -> DIRTY +1 DA -> DE +1 DA -> DALMY +1 D -> THEN +1 D -> DAT +1 CYNTHIA -> CENTIA +1 CYMBALS -> SYMBOLS +1 CUT -> GOT +1 CURRENTS -> CURRANTS +1 CUISINE -> COSEINE +1 CRUX -> CREW +1 CRUSHING -> CRASHING +1 CRUMPLED -> CRUMBLED +1 CRUMPLED -> CRAMPLED +1 CRUMBLY -> CRAMBLY +1 CRUCIFIXION -> CURSE +1 CRIME -> CROWN +1 CREEL -> CREO +1 CREAM -> QUEEN +1 CRAYFISH -> CRAYFISHHORESERVES +1 CRAWFISH -> CROPPISH +1 CRAWFISH -> COFFISH +1 CRAWFISH -> CISH +1 CRATES -> CREATES +1 CRAB -> CRABS +1 COYNESS -> KINDNESS +1 COXCOMB -> ACCOUNT +1 COWLEY'S -> COLLEASE +1 COURT -> COURTYARD +1 COURSING -> CURSING +1 COURSE -> COURTS +1 COURSE -> COARSE +1 COURFEYRAC -> HER +1 COURFEYRAC -> CURFYRAC +1 COUNT'S -> COUNT +1 COUNT -> COMPOSER +1 COUNSELS -> COUNCIL +1 COUNCILLOR -> COUNSELLOR +1 COULD -> HAD +1 COULD -> GOOD +1 COULD -> COULDN'T +1 COUGHING -> COFFIN +1 COST -> COSTUME +1 CORYDON -> CROYD +1 CORTONA -> CORTEANA +1 CORSICAN -> CORSICIAN +1 CORNER -> CORRIE +1 CORNER -> CORNESTONES +1 CORKLE -> CORKEL +1 CORAL -> COAL +1 COQUETTE -> COCKET +1 COPS -> CUPS +1 COPS -> COPSE +1 COOL -> UR +1 CONTINUAL -> CONTINUOUS +1 CONTINGENT -> CONTENDENT +1 CONTEND -> COMPEND +1 CONSTANT -> CAN'T +1 CONSONANTS -> COUNTENANCE +1 CONSOMME -> CONSUM +1 CONQUER -> CONCUR +1 CONINGSBURGH -> CONNINGSBURG +1 CONGEALETH -> CONCEALETH +1 CONFIRMATION -> CONFIRMATON +1 CONFIDENTIALLY -> TO +1 CONFIDE -> CONFINE +1 CONFICERE -> CONFUSE +1 CONFESS -> CONSIST +1 CONFECTIONARY -> CONFECTIONARIES +1 CONCOCTED -> CONCLUDED +1 CONCOCTED -> CALLED +1 COMORIN -> CORMOR +1 COMMITTEE -> COME +1 COMMENCED -> COMMANDS +1 COMING -> COMIN +1 COMING -> CARMINALS +1 COMETH -> COME +1 COMEST -> COMES +1 COMES -> COME +1 COMES -> COMBED +1 COME -> SENT +1 COME -> COMMANDER +1 COMB -> CALM +1 COLOSSEUM -> COLISEUM +1 COLOR -> COLOUR +1 COLONEL -> CAN +1 COLOGNE -> OVERLUME +1 COLLECT -> COLLECTED +1 COLE -> CO +1 COLDS -> GOLDS +1 COLD -> CALLED +1 COLCHESTER -> COLCHISED +1 COINS -> COIN +1 COIN -> KIND +1 COD -> CART +1 COCOA -> CUCKOO +1 COCKRELL -> COCKROL +1 COBBER -> COPPER +1 COALESCED -> COVETTES +1 CLUMB -> CLIMB +1 CLOSET -> CLOTH +1 CLOSEST -> CLOSETS +1 CLOSELY -> CLOTHESILY +1 CLOSE -> CLOSER +1 CLOSE -> BUT +1 CLOMB -> CLIMBED +1 CLOISTER -> CLOSER +1 CLING -> CLINK +1 CLIME -> CLIMB +1 CLEVERLY -> LEVELLY +1 CLEAVE -> CLIFF +1 CLAWS -> CLOTH +1 CLASSES -> CLASS +1 CLASS -> GLASS +1 CLARET -> CLARE +1 CLARET -> CLARA +1 CLAIRVAUX -> CLERVAL +1 CINDERLAD -> SIR +1 CINDERLAD -> SINGLEAD +1 CINDERLAD -> SINDERLAD +1 CINDERLAD -> SIDNEYLOD +1 CINDERLAD -> SAID +1 CINDERELLA -> CINRILLA +1 CILLEY -> SILLY +1 CHURCH -> WATCH +1 CHUCKLED -> CHLED +1 CHUCKED -> TECHTAMORPHYANDER +1 CHRISTIANS -> CHRISTIAN +1 CHRISTIANITY -> STUNNITY +1 CHRISTENING -> CHRISTIANING +1 CHRIS -> THIS +1 CHRIS -> MISTER +1 CHRIS -> GRIS +1 CHRIS -> CRISP +1 CHRIS -> CHRISTO +1 CHRIS -> CHRISTEN +1 CHOUETTE -> SWEAT +1 CHONODEMAIRE -> SHADOW +1 CHLORATE -> LOW +1 CHLORATE -> CHLORODE +1 CHINTZ -> CHIN +1 CHILLS -> CHILL +1 CHILDLESS -> CHIMELESS +1 CHIEF -> CHEAP +1 CHIDE -> CHID +1 CHEWERS -> SHOERS +1 CHERISHED -> CHERISH +1 CHEFS -> CHEFTS +1 CHEEKS -> CHIEFS +1 CHEEKE -> CHEEK +1 CHEEKBONES -> CHEAP +1 CHEEK -> CHEEKS +1 CHECKING -> COOKING +1 CHEAP -> CHEEK +1 CHARMED -> SHONE +1 CHARLEY'S -> CHARLIE'S +1 CHARGED -> CHARGE +1 CHARGE -> SHARZAN +1 CHARACTERISTIC -> CARE +1 CHANGE -> ITS +1 CHAMBERLAIN -> TREMBLING +1 CHALONS -> CHALON +1 CHAIN -> CHANGE +1 CHADWELL -> SAID +1 CERTAIN -> AN +1 CEDRIC -> SADRIC +1 CAVALRYMEN -> CAVERN +1 CATTLE -> CATTLET +1 CATS -> COUNTS +1 CATHOLIC -> CATTLE +1 CATHEDRAL -> CATEURAL +1 CATCHED -> CAST +1 CASTRATO -> GASTRATO +1 CASTLE -> COUNCIL +1 CASTETH -> CAST +1 CARROLL -> GAL +1 CARRIED -> CHARACTER +1 CAREWORN -> CARE +1 CAPTURE -> CAPTURED +1 CAPRIVI'S -> THE +1 CAPITULANTES -> CAPITULAT +1 CAPITALISTS -> CAPITALIST +1 CAP'S -> CAPS +1 CAP -> CATHOLIC +1 CANS -> CANES +1 CANONIZED -> CANNONIZED +1 CAN'T -> CATCH +1 CAN'T -> CAN +1 CAN -> KIN +1 CAN -> KENN +1 CAN -> CAN'T +1 CAMPED -> CAN'T +1 CAMPAIGN -> CAPTAIN +1 CAMOUFLAGE -> THE +1 CAMEL -> CAMEO +1 CAMEL -> CAMELO +1 CALL -> CALLER +1 CALIFORNIAN -> CALIFORNIA +1 CALIFORNIAN -> CALIFORNI +1 CALENDAR -> CALENDER +1 CAIN -> GAME +1 CAGE -> YOU +1 CACKED -> CAGLE +1 CA'M -> CALM +1 C -> SEMICA +1 BYE -> BI +1 BY -> THY +1 BY -> THE +1 BY -> FOR +1 BY -> BUY +1 BUZZARD -> BAZARD +1 BUTTON -> BOTTOM +1 BUTTERFLY -> BUT +1 BUT -> THE +1 BUT -> PECUSE +1 BUT -> OR +1 BUT -> IT +1 BUT -> BY +1 BUT -> BLOW +1 BUT -> BE +1 BUST -> BEST +1 BUSINESSWHICH -> BUSINESS +1 BURYING -> BEARING +1 BURST -> FORCE +1 BURSHEBA -> PERCEIVED +1 BURNETH -> BERNNETH +1 BURDENS -> BURGLAR +1 BURDENS -> BIRDS +1 BULBS -> BOBS +1 BULB -> BOB +1 BUILDS -> BIDS +1 BUILD -> BUILT +1 BUFFETING -> BUFFET +1 BRYNHILD'S -> BURNHIL'S +1 BRYNHILD -> BURNEHELD +1 BRYNHILD -> BURNEHALD +1 BRYNHILD -> BRINEHILL +1 BRUCE -> BRUSH +1 BRUCE -> BORDON +1 BROUGHT -> POURED +1 BRILLIANT -> POINT +1 BRIDGE -> STANDARD +1 BREED -> BREATHE +1 BREATHLESS -> BRENT +1 BREATHING -> BREASING +1 BREAST -> CHEST +1 BRAU -> BROW +1 BRASS -> BREASTPAND +1 BRAHMAN -> PROMIN +1 BRAHMAN -> GRANDMOTHER +1 BRAHMAN -> BROWN +1 BRAHMAN -> BRAMMING +1 BRAHMAN -> BRAMMER +1 BRAHMAN -> BRAMMEN +1 BRAHMAN -> BRAMID +1 BRAHMAN -> BRAM +1 BRAHMAN -> BRAHMIN +1 BRAHMAN -> BRAHMEN +1 BRACY -> BRAZY +1 BRACY -> BRACEY +1 BRACY -> BRACELE +1 BOWL -> BULL +1 BOURGES -> BOURGE +1 BOTTOMED -> BOTTOM +1 BOTTLES -> BIDLES +1 BOTTLED -> BOTHERED +1 BOTTLE -> IT +1 BOONE -> WHOM +1 BOON -> BOOM +1 BOOMED -> BOOM +1 BONNETS -> BONNET +1 BONDAGE -> BANDAGE +1 BOLT -> BOLD +1 BOLSHEVIKI -> BULGEHEVIKI +1 BOLSHEVIKI -> BALL +1 BOILER -> WHIRLER +1 BOIL -> BOY +1 BOEUF -> BIRTH +1 BOEOTIAN -> BE +1 BOB'S -> BOB +1 BOAT -> WROTE +1 BLOOMIN -> ROOM +1 BLOODSHED -> BLOCHHEAD +1 BLOKES -> LOAST +1 BLOKE -> LOGIS +1 BLODGETT -> BLOTCHETT +1 BLODGETT -> BLADGE +1 BLODGETT -> ALEXANDER +1 BLOCK -> PLUCK +1 BLINKED -> BINKED +1 BLANKETED -> BLANKET +1 BLACKLEG -> BLACK +1 BLACKGUARD -> BLANKARD +1 BLACKBURN -> BRACKBURN +1 BITTER -> BEACHER +1 BIT -> WID +1 BISQUE -> DISK +1 BIRTHPLACE -> BOTH +1 BIRTH -> BERTH +1 BIRDSEYE -> BIRD'S +1 BIND -> FIND +1 BIN -> BEEN +1 BILL -> BUILD +1 BIBLICAL -> BIBBICAL +1 BIBLICAL -> BEVOCO +1 BIBLICAL -> BEPPOCO +1 BIBLICAL -> BAPLICO +1 BIBLE -> VARIABLE +1 BHANG -> BANG +1 BEWARE -> BE +1 BEULAH -> BOOLA +1 BETTER -> BY +1 BETHUNE -> BESOON +1 BETCHA -> BUT +1 BETAKEN -> TAKEN +1 BESSY -> BESSIE +1 BESIDE -> BESIDES +1 BENT -> BEEN +1 BENSON -> BESIN +1 BENOIT -> BENOIS +1 BENJAMIN -> BINTAMEN +1 BELONGED -> BELONGS +1 BELLOWED -> BELOVED +1 BELLE -> BELL +1 BELL -> BELT +1 BELIKE -> BE +1 BELIEVED -> BELIEF +1 BEING -> VERY +1 BEING -> BEEN +1 BEGUN -> BEGAN +1 BEGUILED -> BEGUILD +1 BEGGING -> PEGGING +1 BEGGED -> BEG +1 BEGGED -> BAG +1 BEGAN -> BEGIN +1 BEG -> BIG +1 BEFORE -> FOUR +1 BEFORE -> BEHELD +1 BEFAL -> BEFALL +1 BEDOUIN -> BEDOING +1 BECKY -> BACKY +1 BEAUMANOIR -> BOURMANOIR +1 BEAT -> BE +1 BEASTLY -> PIECE +1 BEAR -> BEARED +1 BEAR -> BARE +1 BEALE -> BEER +1 BEALE -> BEE +1 BEALE -> BEAUT +1 BEALE -> BEA +1 BE -> HAD +1 BE -> BETRAY +1 BE -> BEEN +1 BAXTER -> BEXT +1 BASSORAH -> PESSORAH +1 BASSORAH -> BASSORA +1 BASIL -> BASER +1 BASIL -> BALES +1 BARKLEY -> PARKLEY +1 BARK -> BULK +1 BARGELLO -> BARGIENLO +1 BARELY -> VARIOUSLY +1 BAR -> BARS +1 BAPTISMAL -> BABYSMAL +1 BANYAN -> BENDONED +1 BANYAN -> BEN +1 BANYAN -> BANNY +1 BANYAN -> BANDREE +1 BANG -> BENITT'S +1 BANDINELLO -> BEND +1 BALLROOM -> BALL +1 BALLOT -> BURIT +1 BALE -> BAIL +1 BALAMMED -> BELLAMED +1 BAILEY -> BAILIQUE +1 BAILEY -> BAILEY'S +1 BAGHDAD -> WABDAD +1 BAG -> BICK +1 BAD -> WHETHER +1 BAD -> BAND +1 BABES -> BABE +1 AZURE -> USURE +1 AWKWARD -> OF +1 AWK'ARD -> UPWARD +1 AWAY -> WAIT +1 AW -> AH +1 AVIDITY -> ALGITTEE +1 AVENUE -> CONSIN +1 AVE -> HAVE +1 AUNT -> AUNTS +1 AUNT -> AREN +1 ATUM -> OUTSIDE +1 ATUM -> ANTUM +1 ATTENTION -> INTENTION +1 ATTENTION -> ATTENTIONS +1 ATHELSTANE -> ADDSTEIN +1 AT -> UP +1 AT -> SAYS +1 AT -> OTHER +1 AT -> OF +1 AT -> I +1 AT -> HAD +1 AT -> BUT +1 AT -> ASSALY +1 AT -> AND +1 AT -> ADD +1 ASSYRIA -> THE +1 ASSYRIA -> A +1 ASS -> EYES +1 ASKS -> ASK +1 ASKED -> PITT +1 ASKED -> I +1 ASK -> ASKED +1 ASIA -> AS +1 ASHUR -> ASHER +1 ASHORE -> SHORE +1 ASHLEY -> HASHY +1 ASH -> ASHHOPPER +1 ASCERTAINING -> ASSERTING +1 ASCERTAIN -> ASSERT +1 AS -> US +1 AS -> I +1 AS -> HAS +1 AS -> COAL +1 AS -> A +1 ARTIST -> ARD +1 ARTHUR -> AWFUL +1 ART -> ARE +1 ARSTS -> ASKS +1 ARSINOE'S -> ARSENO'S +1 ARSINOE -> ALSO +1 ARRIVE -> ARRIVED +1 ARPAD -> OUR +1 ARPAD -> ARPET +1 AROUSED -> ARISED +1 AROUND -> HER +1 ARM -> HEART +1 ARKANSAS -> OUR +1 AREN'T -> YOU'RE +1 ARE -> WERE +1 ARE -> THEIR +1 ARE -> IS +1 ARE -> I +1 ARE -> HE +1 ARE -> HAD +1 ARE -> DON'T +1 ARE -> AND +1 ARE -> AH +1 ARDENT -> ARDENTS +1 ARCHIBALD -> ARQUEBALD +1 ARCHBISHOP -> ARCHBISH +1 APPROVE -> IMPROVE +1 APPEAR -> APPEARED +1 APPARENTLY -> A +1 APOMORPHINE -> EPIMORPHY +1 APES -> IPES +1 ANYWAY -> AN +1 ANYONE'S -> ANY +1 ANY -> IN +1 ANY -> ANYTHING +1 ANXIETY -> ANCIDE +1 ANTONIO -> ANTONIA +1 ANTOLIAN -> IN +1 ANTIDOTES -> AND +1 ANSWERS -> AUTHEST +1 ANOTHER -> AND +1 ANNOYED -> ANNOY +1 ANNIE'S -> ANY'S +1 ANNIE'S -> ANY +1 ANNIE -> NY +1 ANNIE -> AND +1 ANIMATE -> ENEMY +1 ANGUISH -> ENGLISH +1 ANGESTON -> ANGER +1 ANEW -> KNEW +1 ANDS -> ENDS +1 ANDBUT -> AND +1 AND -> WHEN +1 AND -> UNDER +1 AND -> TO +1 AND -> SAUCES +1 AND -> ROOM +1 AND -> NOT +1 AND -> MISSUS +1 AND -> INTO +1 AND -> INSTEAD +1 AND -> INFORT +1 AND -> IMPALIATE +1 AND -> HIM +1 AND -> HELLO +1 AND -> HE +1 AND -> HAS +1 AND -> FOR +1 AND -> END +1 AND -> CONFINED +1 AND -> CONCERN +1 AND -> AT +1 AND -> ANY +1 AND -> ALRE +1 AND -> ADD +1 ANALYSIS -> OF +1 AN -> NOW +1 AN -> NO +1 AN -> HER +1 AN -> CAN +1 AMYNTAS -> I +1 AMYNTAS -> A +1 AMOUR -> AMOPRA +1 AMONGST -> A +1 AMIABLE -> ADMIABLE +1 ALTHOUGH -> ON +1 ALTHEA -> ALTHIE +1 ALTHEA -> ALTHIA +1 ALTER -> ENTER +1 ALSO -> ONCE +1 ALSO -> APPLES +1 ALSO -> ALL +1 ALREADY -> ALWAYS +1 ALONGSIDE -> LONG +1 ALONGER -> ALONG +1 ALOES -> ALLO'S +1 ALMS -> ARMS +1 ALLS -> ALL +1 ALLOWED -> ALLOW +1 ALLOWANCE -> ALLOW +1 ALLOW -> LOAD +1 ALLOW -> ALONE +1 ALLIGATOR -> ADDER +1 ALLIED -> ALIT +1 ALL -> THINK +1 ALL -> SOUL +1 ALL -> OFF +1 ALL -> OF +1 ALL -> ALWAYS +1 ALL -> ALSO +1 ALKALOIDS -> AKELOITS +1 ALKALOIDS -> ACCOLITES +1 ALKALOID -> ACALIT +1 ALIMENTARY -> ELEMENTARY +1 ALIGHTED -> DELIGHTED +1 ALI -> I +1 ALF -> A +1 ALESSANDRO -> ALISANDRO +1 ALCOHOL -> ACCULENT +1 ALCOHOL -> ACCOHOL +1 ALBERT -> I'LL +1 ALABAMA -> ALADAMA +1 AL -> KHAN +1 AL -> ALI +1 AIR -> ERROR +1 AIN'T -> ENTERTA +1 AIN'T -> END +1 AIN'T -> ANNE +1 AIN'T -> AM +1 AILS -> ELSE +1 AID -> APE +1 AH -> HA +1 AGRARIAN -> INGREDIAN +1 AGONE -> A +1 AGENT -> AGENTIVE +1 AGAIN -> BEGAN +1 AFTERWARDS -> AFTERWARD +1 AFTER -> OUT +1 AFTER -> ANSWERED +1 AFORESAID -> AFOR +1 AFIRE -> AFAR +1 AFFLICTION -> AFFLICATION +1 AFFECTION -> AFFECTANT +1 ADVISED -> ADVICE +1 ADVENTURES -> VENTURES +1 ADVENTURE -> ADVENTUR +1 ADULT -> DOUBT +1 ADORED -> ENDURED +1 ADN'T -> HADN'T +1 ADHERENTS -> ADHERENCE +1 ADD -> EDISM +1 ADD -> AT +1 ADAIR -> EIGHT +1 AD -> AT +1 ACUTE -> ACUTEATION +1 ACTS -> ACT +1 ACHESON -> ARTISON +1 ACCESS -> AXIS +1 ACCEPT -> EXCEPT +1 ABSTAIN -> ABSTAINED +1 ABSENTEE -> EBSENTEE +1 ABROAD -> HER +1 ABROAD -> BROAD +1 ABOUT -> OF +1 ABOUT -> HE +1 ABOUT -> FLIROFF +1 ABOUT -> BY +1 ABOUT -> A +1 ABOARD -> QUESTERED +1 ABOARD -> BROAD +1 A -> YOUR +1 A -> WAS +1 A -> US +1 A -> THEY +1 A -> SOME +1 A -> OPPRESSING +1 A -> OLLA +1 A -> KIDNE +1 A -> JETS +1 A -> ITS +1 A -> IMMERED +1 A -> IF +1 A -> HIS +1 A -> HIM +1 A -> GUQUET +1 A -> GENTLEMEN +1 A -> EXERT +1 A -> ESPECIALTY +1 A -> DESP +1 A -> CLEF +1 A -> BEFORE +1 A -> AVOID +1 A -> ASSERTED +1 A -> AS +1 A -> ARREST +1 A -> AROUND +1 A -> APOLLO +1 A -> ALENUBERG +1 A -> ADVENTURE +1 A -> ACQUIRED +1 A -> ACCURSED +1 A -> ABASEMENT + +DELETIONS: count ref +24 THE +21 A +17 IT +16 TO +15 IS +13 IN +12 AND +11 OF +8 ARE +7 I +7 HAD +7 AS +6 HER +6 AL +5 WAS +4 THAT +4 IF +4 HIM +4 FOR +4 AT +4 ALL +3 YOU +3 WITH +3 WILL +3 OUT +3 OTHER +3 ON +3 HIS +3 HE +3 BRAHMAN +3 BE +3 AM +2 YE +2 WIDE +2 WERE +2 WARD +2 VE +2 THIS +2 STAIRS +2 ROOM +2 PHUT +2 OUR +2 OR +2 ONE +2 OFF +2 NOT +2 MET +2 LA +2 JUST +2 HEAR +2 HAVE +2 HAS +2 GOOD +2 FISH +2 EMOTION +2 DONE +2 DO +2 DAY +2 BUZZARD +2 BRACY +2 BEEN +2 ASKED +2 AN +1 YOUTH +1 YOUR +1 YOU'VE +1 YONDER +1 YET +1 YER +1 YARD +1 WINE +1 WHOLE +1 WHILE +1 WHICH +1 WHEEL +1 WEAL +1 WAYS +1 WAY +1 WARN +1 WARMEST +1 VOTE +1 VOLVITUR +1 VENTURE +1 VAULTED +1 US +1 UPON +1 UP +1 UN +1 TWO +1 TWAIN +1 TURNED +1 TURBULENT +1 TRIBE +1 TREE +1 TRADITIONS +1 TIME +1 THUS +1 THOUGHT +1 THINKS +1 THING +1 THEM +1 THEE +1 THEATRE +1 TERRA +1 TELLTALE +1 TEAL +1 SURELY +1 SUDDEN +1 SUCCOURS +1 STORE +1 STONES +1 STOLE +1 STILL +1 SPIRITS +1 SPECIALTY +1 SOUNDLY +1 SONNY +1 SO +1 SMALL +1 SLEEP +1 SIR +1 SINE +1 SIMULATES +1 SIGURD +1 SIGHT +1 SIDE +1 SHUT +1 SHORE +1 SHOOT +1 SHALL +1 SEVERSON +1 SET +1 SERVES +1 SCUSE +1 SAN +1 SADLY +1 S +1 ROUND +1 RING +1 REW +1 REVOLUTION +1 REST +1 RESPONSIBLE +1 RENDER +1 RELATED +1 RECTIFY +1 REALLY +1 READ +1 RACHEL +1 PROVE +1 PROPRE +1 PRESSING +1 POSITION +1 POLO +1 PITTS +1 PERFECT +1 PENDING +1 PEER +1 PATIENCE +1 PASSED +1 PARR +1 PALLIATE +1 P +1 OWN +1 OTHER'S +1 ONCE +1 OLD +1 OCCASIONS +1 OBSERVED +1 OBLIGE +1 NEWBERG +1 N'T +1 N +1 MYSELF +1 MUCH +1 MOTIONLESS +1 MOTHER +1 MOSES +1 MINUTELY +1 MIDIAN +1 MEAN +1 MC +1 MATTERS +1 MAKING +1 MAKE +1 MAKAN +1 MAINE +1 M +1 LUCK +1 LONG +1 LOCKS +1 LOCK +1 LIZABETH +1 LIVE +1 LIKE +1 LET +1 LESLIE +1 LENIN +1 LAW +1 LAST +1 LAD +1 LACHAISE +1 KNOW +1 KITE +1 KINE +1 KID +1 JURY +1 JUICES +1 JIM +1 JILT +1 INTENDED +1 INSTRUCTED +1 IM +1 ILU +1 ILLS +1 I'LL +1 HUMBLY +1 HOWEVER +1 HOUSE +1 HOPPER +1 HITHER +1 HERE +1 HAW +1 HA +1 GROWING +1 GORDON +1 GOAL +1 GIRLS +1 GET +1 GENTLEMAN +1 GEAR +1 GAZE +1 FULL +1 FRIENDS +1 FRANCISCO +1 FORMED +1 FLEROV +1 FIND +1 FATHER +1 FARE +1 FAR +1 EXTEND +1 EVER +1 ESTIMATES +1 END +1 EM +1 ELISIONS +1 ELECTED +1 EASY +1 EAST +1 EAD +1 DOUBT +1 DOOR +1 DOING +1 DESTRUCTION +1 DE +1 DARE +1 CUSTOM +1 CURSED +1 CURSE +1 CROST +1 CREAM +1 COY +1 COP +1 COOPS +1 COLE +1 CLIMB +1 CLEVER +1 CLEFT +1 CISEAUX +1 CHIPS +1 CHANGE +1 CELL +1 CATTLE +1 CAT +1 CAN +1 CAMP +1 CALL +1 BUT +1 BROUGHT +1 BOYS +1 BOY +1 BOUGHT +1 BOTH +1 BOOK +1 BOAT +1 BLEW +1 BITING +1 BIN +1 BENNETT +1 BASEMENT +1 BAND +1 B +1 AZARIAH +1 ATTEMPTED +1 ASSAILING +1 ASLEEP +1 ARCHIAS +1 AQUA +1 ANDREW +1 ANCESTORS +1 ALLEN +1 AGO +1 AGAIN +1 AFT +1 ACKNOWLEDGE + +INSERTIONS: count hyp +30 THE +20 A +19 TO +17 IN +15 IT +15 AND +13 OF +9 THAT +9 IS +9 HE +8 HAD +8 ARE +7 ONE +7 AS +5 AT +4 WILL +4 WHICH +4 ON +4 HER +4 HAVE +3 YOU +3 WALL +3 THIS +3 OUT +3 MORROW +3 LIKE +3 GOOD +3 FOR +3 AN +2 WHILE +2 WHAT +2 WE +2 US +2 TWO +2 TURNED +2 THEM +2 SIR +2 SCHLEVENT +2 SAY +2 OUR +2 NOT +2 NIGHT +2 MEN +2 ME +2 MASTER +2 LONG +2 LAD +2 I +2 HIS +2 FIELD +2 FELLOW +2 EM +2 DAYS +2 AM +2 ALL +1 ZERIA +1 YOUR +1 YOUNG +1 WORTHY +1 WITCH +1 WIT +1 WINTER +1 WHO +1 WHERE +1 WELL +1 WAY +1 WAS +1 UNDER +1 TRADES +1 TRACT +1 TOO +1 TIRED +1 TILL +1 TIEN +1 THOSE +1 THESE +1 THEE +1 THAT'S +1 TER +1 TASTE +1 TALE +1 SYRIA +1 SUPPOSED +1 SUIT +1 STROKE +1 STRIKE +1 STRETS +1 STREET +1 STRAW +1 STOVE +1 STOOLS +1 STEAMS +1 SPONSES +1 SPEEDABLY +1 SPEED +1 SPECIALLY +1 SOR +1 SONG +1 SOFT +1 SNAS +1 SN +1 SLEPT +1 SLAVENT +1 SIT +1 SIMULATION +1 SIDE +1 SICIENTS +1 SHRABBERY +1 SEVENTH +1 SENT +1 SELF +1 SEATED +1 SAID +1 ROY +1 ROOM +1 ROAR +1 RIPENESS +1 RIKA +1 RIDICT +1 RICA +1 REWARDED +1 RESISTS +1 RATT +1 RAGE +1 QUEER +1 PUSH +1 PROVIDENT +1 PREVIOUS +1 POSITION +1 PLEASURE +1 PLEASE +1 PLAIN +1 PETULITY +1 PENCIL +1 PAD +1 OTHER +1 OR +1 ONE'S +1 OH +1 OCHIAN +1 OAST +1 NUT +1 NIGHTS +1 NICES +1 NELLO +1 NEEDS +1 NED +1 NEAR +1 MORE +1 MONTH'S +1 MONDER +1 MISS +1 MESS +1 MEANTUS +1 MEANTES +1 MAR +1 MANTLE +1 MAN +1 LONGER +1 LILY +1 LEVITCH +1 LESS +1 LEAVE +1 LAG +1 KOTE +1 KNOW +1 KISS +1 KINDRED +1 JOKE +1 JOE +1 JAS +1 ITS +1 INTO +1 INSPIRANTS +1 INCLINE +1 IMAGED +1 ILL +1 HOTEL +1 HOO +1 HO +1 HITTED +1 HIM +1 HERE +1 HELL +1 HEATED +1 HEARD +1 HEALTH +1 HEADS +1 HAS +1 HAM +1 GRIMES +1 GOT +1 GOFF +1 GIVE +1 GIGGS +1 FULL +1 FRIENDLY +1 FOUR +1 FORACT +1 FLY +1 FLASH +1 FISHE +1 FISH +1 FILLIENTLY +1 FIGURED +1 FICTION +1 FAWN +1 FAMOUS +1 FAITH +1 EYED +1 EVER +1 ESTRAVA +1 ER +1 ELL +1 ELEGANTLY +1 EATS +1 EARL'S +1 DOWN +1 DORIS +1 DOES +1 DE +1 DAN +1 DAME +1 DALE +1 CRIED +1 CORRECT +1 COMPARABLE +1 CODE +1 COCTED +1 COATS +1 COAT +1 CHEVIKI +1 CHANN +1 CATO +1 CAMEL +1 BUT +1 BROKER +1 BROAD +1 BRACES +1 BOUT +1 BOUNDS +1 BONES +1 BODY'S +1 BOAT +1 BOARDS +1 BOARD +1 BLADGET +1 BILL +1 BELLEZER +1 BELIEU +1 BEHOLS +1 BEEN +1 BEDS +1 BATH +1 BAFF +1 AWAY +1 AWAKE +1 ATTIGUE +1 ATTENTIVE +1 ASCENSIONS +1 ALEXE + +PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp +THE 2991 279 3134 3127 +A 1035 228 1145 1153 +AND 1704 205 1788 1825 +IN 743 146 808 824 +TO 1391 133 1444 1471 +OF 1340 89 1386 1383 +IS 373 79 415 410 +THAT 649 77 682 693 +IT 617 73 660 647 +I 816 68 853 847 +HE 668 59 693 702 +YOU 494 55 513 530 +THIS 215 53 246 237 +HER 267 53 289 298 +HAD 355 53 375 388 +AT 251 52 279 275 +ON 258 46 281 281 +ARE 135 46 159 157 +THEY 221 43 239 246 +FOR 415 43 431 442 +AN 102 43 125 122 +HIS 474 40 493 495 +AS 324 40 339 349 +ALL 222 34 236 242 +THEIR 99 33 112 119 +WITH 368 32 385 383 +WILL 147 31 166 159 +WAS 640 31 653 658 +OR 92 31 109 106 +THERE 157 29 174 169 +O 26 28 41 39 +THEN 142 26 153 157 +HAVE 220 25 233 232 +HAS 84 25 102 91 +OUR 61 24 70 76 +NOT 388 24 401 399 +ANY 72 24 76 92 +WERE 150 23 166 157 +HIM 295 23 305 308 +BUT 358 21 370 367 +SO 199 20 211 207 +DO 140 20 153 147 +YOUR 94 19 104 103 +WHEN 155 19 162 167 +ITS 51 19 57 64 +WE 144 18 151 155 +DE 4 18 18 8 +WHAT 180 17 192 185 +UP 139 17 145 150 +OUT 156 17 166 163 +ONE 211 17 216 223 +LESLIE 6 17 23 6 +WHO 144 16 151 153 +THESE 44 16 51 53 +OH 27 16 35 35 +BRAHMAN 3 16 19 3 +TOO 33 15 41 40 +SAID 246 15 252 255 +WHERE 51 14 57 59 +US 52 14 58 60 +SET 22 14 31 27 +MISS 13 14 15 25 +INTO 112 14 117 121 +ANNIE 8 14 22 8 +TWO 57 13 62 65 +MISTER 70 13 74 79 +GURR 1 13 14 1 +FAUCHELEVENT 11 13 24 11 +WOULD 122 12 129 127 +WHICH 186 12 191 193 +THEM 141 12 144 150 +SEE 74 12 77 83 +LIKE 85 12 89 93 +JUST 50 12 57 55 +IF 158 12 167 161 +HERE 64 12 69 71 +DID 92 12 95 101 +BE 307 12 313 313 +ROOM 34 11 37 42 +NO 189 11 190 199 +MEN 61 11 66 67 +LET 56 11 64 59 +OTHER 64 10 69 69 +NIGHT 48 10 49 57 +MAN 109 10 114 114 +IT'S 20 10 24 26 +HERMON 1 10 11 1 +HATH 12 10 17 17 +FAUVENT 0 10 10 0 +DICKIE 13 10 23 13 +AL 6 10 16 6 +SIR 37 9 40 43 +SIGURD 0 9 9 0 +SAY 79 9 83 84 +OLD 53 9 57 58 +MURDOCH 0 9 9 0 +MISSUS 25 9 31 28 +LISLEY 0 9 0 9 +I'VE 7 9 10 13 +HEAR 15 9 21 18 +GOT 38 9 40 45 +GOOD 64 9 67 70 +EM 0 9 2 7 +DICKY 0 9 0 9 +BY 195 9 199 200 +BEEN 129 9 133 134 +ABOUT 74 9 79 78 +ZAU 0 8 8 0 +YOU'RE 5 8 9 9 +UPON 65 8 71 67 +THY 24 8 28 28 +SHALL 56 8 62 58 +MY 246 8 248 252 +MINE 9 8 15 11 +ME 258 8 260 264 +KNOW 88 8 95 89 +I'LL 19 8 24 22 +GOD 24 8 29 27 +FULL 14 8 18 18 +FROM 175 8 179 179 +DONE 32 8 39 33 +DON'T 69 8 73 73 +ASKED 40 8 46 42 +AROUND 14 8 20 16 +AM 59 8 62 64 +YOU'LL 2 7 4 7 +UNDER 34 7 37 38 +THOUGH 35 7 39 38 +THEY'RE 5 7 11 6 +THAN 69 7 74 71 +ROUND 16 7 18 21 +PRIORESS 0 7 7 0 +OL 0 7 7 0 +NOW 112 7 116 115 +LARGE 10 7 10 17 +KIND 19 7 19 26 +IZZY 0 7 7 0 +I'M 32 7 34 37 +HOW 78 7 81 82 +GET 48 7 53 50 +CAN 76 7 80 79 +AIN'T 4 7 11 4 +ZARATHUSTRA 0 6 6 0 +YE 10 6 14 12 +WIDE 6 6 8 10 +WHILE 19 6 22 22 +WHETHER 10 6 12 14 +WELL 82 6 85 85 +TURNED 30 6 31 35 +TRY 11 6 17 11 +THOSE 36 6 38 40 +THAT'S 18 6 23 19 +SHE 287 6 292 288 +SEEMED 15 6 18 18 +SAYS 24 6 28 26 +PIGEONCOTE 1 6 7 1 +OVER 48 6 49 53 +ONLY 69 6 73 71 +ONCE 53 6 56 56 +OLIVE 1 6 7 1 +MURDOCK 0 6 0 6 +MILICENT 0 6 6 0 +LOVE 27 6 29 31 +LITTLE 88 6 91 91 +LARCH 1 6 7 1 +KINE 0 6 6 0 +IM 0 6 6 0 +GIVE 45 6 47 49 +FATHER 45 6 51 45 +EVER 25 6 27 29 +DOG 5 6 9 7 +COME 69 6 71 73 +CIGARET 0 6 0 6 +CHRIS 17 6 23 17 +BEFORE 52 6 56 54 +ARCHY 0 6 6 0 +ARCHIE 0 6 0 6 +YET 30 5 34 31 +WILFRID 4 5 9 4 +WILDERNESS 3 5 8 3 +WHY 47 5 47 52 +WHITE 15 5 18 17 +WALL 3 5 4 7 +UNTO 6 5 10 7 +TOWARDS 10 5 13 12 +THUS 10 5 13 12 +THINK 44 5 46 47 +THERE'S 10 5 11 14 +THEE 23 5 27 24 +SON 14 5 18 15 +SOME 76 5 77 80 +SHUT 7 5 10 9 +SHARRKAN 0 5 5 0 +SHARKAN 0 5 0 5 +SEEM 5 5 8 7 +SAT 8 5 9 12 +REVEREND 10 5 15 10 +PLACE 35 5 39 36 +PIGEON 0 5 1 4 +PART 20 5 21 24 +OWN 42 5 45 44 +ORGANIZER 0 5 0 5 +ORGANISER 0 5 5 0 +OFF 49 5 53 50 +N'T 0 5 5 0 +MUST 72 5 77 72 +MORE 98 5 99 102 +MESTIENNE 0 5 5 0 +M 2 5 7 2 +LOOK 33 5 33 38 +LONG 51 5 52 55 +LEFT 34 5 38 35 +LAST 44 5 47 46 +LAD 6 5 7 10 +KNIGHT 4 5 8 5 +INTERESTS 1 5 5 2 +INTEREST 6 5 7 10 +I'D 8 5 13 8 +HERMAN 0 5 0 5 +HEART 26 5 28 29 +FOUR 22 5 24 25 +FOLLOWED 6 5 10 7 +EXECUTIVE 0 5 5 0 +DOOR 28 5 30 31 +CINDERLAD 2 5 7 2 +CAN'T 14 5 16 17 +BRACY 2 5 7 2 +ALSO 30 5 33 32 +YOU'VE 3 4 7 3 +YES 42 4 42 46 +WINE 4 4 8 4 +WHOLE 21 4 22 24 +WENT 73 4 76 74 +WE'RE 3 4 7 3 +WANTED 6 4 8 8 +TOWARD 7 4 9 9 +TIME 81 4 83 83 +TIGLATH 0 4 4 0 +THOU 66 4 68 68 +SHOULD 70 4 72 72 +SHE'LL 1 4 2 4 +SERGEY 1 4 5 1 +SENT 10 4 10 14 +SAINT 18 4 19 21 +RUN 7 4 9 9 +RUM 0 4 1 3 +RIGAN 0 4 0 4 +REGIN 0 4 4 0 +RAYSTOKE 1 4 5 1 +PRIORS 0 4 0 4 +POURED 2 4 3 5 +PLAIN 2 4 5 3 +PILESER 0 4 4 0 +PHUT 0 4 3 1 +OUGHT 14 4 16 16 +NONE 12 4 13 15 +MYRTILUS 0 4 4 0 +MUCH 39 4 41 41 +MEAT 3 4 6 4 +MASTER 21 4 23 23 +LOVER 2 4 3 5 +LIVE 15 4 17 17 +LIL 0 4 4 0 +LAW 2 4 5 3 +KEEP 12 4 16 12 +HEAD 39 4 41 41 +HE'S 5 4 9 5 +HANDS 13 4 15 15 +GRAY 1 4 3 3 +GONE 16 4 17 19 +GOING 35 4 39 35 +GO 61 4 61 65 +FOALS 0 4 4 0 +FOAL 1 4 5 1 +FIELD 3 4 4 6 +FELLOW 13 4 13 17 +FARRINDER 0 4 4 0 +EYES 34 4 35 37 +EYE 8 4 11 9 +END 13 4 15 15 +DOUBT 7 4 9 9 +DOES 14 4 15 17 +DEFENSE 0 4 4 0 +DEFENCE 2 4 2 6 +DEAD 18 4 21 19 +DAY 58 4 61 59 +COUNCIL 0 4 0 4 +CORNER 9 4 13 9 +CATTLE 3 4 5 5 +CAME 66 4 66 70 +BIBLICAL 0 4 4 0 +BEG 5 4 8 6 +BEALE 6 4 10 6 +BANYAN 0 4 4 0 +BAGHDAD 2 4 6 2 +AWAY 38 4 39 41 +AUNT 4 4 6 6 +ARSINOE 0 4 4 0 +ANYONE 2 4 6 2 +ALLOW 6 4 8 8 +AH 8 4 9 11 +ADD 7 4 9 9 +ZAO 0 3 0 3 +YO 0 3 3 0 +YER 0 3 3 0 +YEARS 19 3 20 21 +WORK 25 3 26 27 +WORDS 18 3 18 21 +WOOD 4 3 4 7 +WONDERED 5 3 8 5 +WITCH 1 3 2 3 +WINTER 3 3 5 4 +WINDOW 16 3 17 18 +WILFRED 0 3 0 3 +WHEEL 1 3 2 3 +WAYNE 0 3 3 0 +WAY 60 3 62 61 +WARD 3 3 6 3 +WANT 24 3 26 25 +USE 11 3 13 12 +ULRICA 0 3 3 0 +TRIED 19 3 19 22 +TRIBE 1 3 4 1 +TREE 7 3 9 8 +TOLD 24 3 26 25 +TIRED 5 3 7 6 +TILL 23 3 24 25 +THOUSAND 16 3 16 19 +THOUGHT 38 3 39 40 +SYRUP 1 3 4 1 +SUDDEN 7 3 8 9 +STONEWALL 0 3 3 0 +STONE 11 3 11 14 +STILL 38 3 41 38 +SPAKE 2 3 5 2 +SOUL 10 3 11 12 +SOUGHT 4 3 6 5 +SOON 22 3 22 25 +SMUGGLERS 4 3 7 4 +SIDE 20 3 22 21 +SHARDURIS 0 3 3 0 +SCHOOL 6 3 8 7 +SAW 26 3 29 26 +SANCT 0 3 3 0 +SAN 2 3 5 2 +RUSSIA 0 3 3 0 +RING 4 3 6 5 +REVERE 0 3 0 3 +REALLY 16 3 18 17 +READ 7 3 8 9 +RACHEL 1 3 3 2 +QUEST 0 3 1 2 +PROCLUS 0 3 3 0 +PROAS 0 3 3 0 +PROA 0 3 3 0 +PREVENT 1 3 1 4 +POOR 23 3 25 24 +POLL 0 3 3 0 +PLATTERBAFF 0 3 3 0 +PIECE 3 3 4 5 +PASSED 8 3 10 9 +PARTS 3 3 5 4 +PACE 0 3 1 2 +OPEN 14 3 15 16 +OFFICERS 2 3 3 4 +NUZHAT 1 3 4 1 +NURSE 0 3 0 3 +NOUGHT 0 3 2 1 +NORTH 3 3 4 5 +NOR 18 3 21 18 +NEW 28 3 30 29 +NEAREST 2 3 5 2 +NEAR 15 3 16 17 +MOST 42 3 44 43 +MORROW 6 3 6 9 +MISTAH 0 3 3 0 +MET 8 3 11 8 +MESS 0 3 0 3 +MAY 41 3 43 42 +MAKE 64 3 66 65 +MADE 77 3 77 80 +LUNA 0 3 3 0 +LOST 11 3 12 13 +LOOKED 22 3 25 22 +LOBSTER 9 3 12 9 +LILBURN 0 3 3 0 +LIKED 7 3 9 8 +LIE 3 3 4 5 +LIDDY 0 3 3 0 +LEVER 0 3 3 0 +LED 3 3 4 5 +LEAVE 22 3 22 25 +LAUGHED 9 3 11 10 +LATE 8 3 9 10 +KIN 0 3 0 3 +KEYS 2 3 5 2 +JUDGMENT 6 3 9 6 +JOE 0 3 0 3 +JES 0 3 3 0 +JAKEY 0 3 3 0 +JACKAL 5 3 8 5 +JACK 3 3 3 6 +IN'T 0 3 3 0 +HOWEVER 13 3 16 13 +HOUSE 36 3 37 38 +HONOUR 2 3 3 4 +HONOR 1 3 3 2 +HOME 34 3 34 37 +HOLD 7 3 10 7 +HO 1 3 3 2 +HERMON'S 0 3 3 0 +HE'D 4 3 4 7 +HAYES 0 3 1 2 +HATE 4 3 4 7 +HAND 39 3 40 41 +HALF 22 3 23 24 +HAIR 6 3 6 9 +GRAHAM 0 3 0 3 +GOLD 6 3 6 9 +GOBEY'S 0 3 3 0 +GIVEN 13 3 13 16 +GIRL 10 3 11 12 +GEORGE 1 3 2 3 +GENTLEMEN 4 3 5 6 +GENTLEMAN 5 3 7 6 +FOOL 3 3 4 5 +FOLLOW 9 3 10 11 +FISH 5 3 7 6 +FIND 24 3 25 26 +FELL 15 3 15 18 +FAT 1 3 2 3 +FAITH 8 3 9 10 +FAFNIR 0 3 3 0 +FACE 31 3 33 32 +EVERY 37 3 38 39 +EVEN 46 3 46 49 +ERE 1 3 2 3 +ELSE 12 3 12 15 +EAST 6 3 8 7 +EARTH 18 3 20 19 +E'S 0 3 3 0 +DON 5 3 5 8 +DOESN'T 1 3 3 2 +DIDN'T 18 3 21 18 +DAYS 12 3 13 14 +DARK 11 3 11 14 +CRAWFISH 4 3 7 4 +COULD 111 3 114 111 +COMES 13 3 15 14 +CLIMB 0 3 1 2 +CHEEKS 3 3 4 5 +CHEEK 0 3 1 2 +CHEAP 0 3 1 2 +CHANGE 7 3 9 8 +CARROLL 0 3 3 0 +CAMEL 0 3 2 1 +BUZZARD 6 3 9 6 +BULK 0 3 2 1 +BRYNHILD 0 3 3 0 +BROAD 3 3 3 6 +BRAMIN 0 3 0 3 +BOUT 0 3 2 1 +BOAT 6 3 8 7 +BLODGETT 0 3 3 0 +BESSY 6 3 9 6 +BEGAN 15 3 16 17 +BEFELL 0 3 0 3 +BEFEL 0 3 3 0 +BEAR 6 3 8 7 +BARK 0 3 1 2 +BAGDAD 0 3 0 3 +AWKWARD 2 3 3 4 +ARSENO 0 3 0 3 +AGAIN 54 3 56 55 +AFTER 95 3 97 96 +YOUNG 38 2 39 39 +YO'LL 0 2 2 0 +YEAR 6 2 7 7 +YARD 0 2 2 0 +WROTE 3 2 4 4 +WRITE 0 2 1 1 +WRETCH 2 2 3 3 +WORSE 10 2 10 12 +WORLD 13 2 14 14 +WORKING 8 2 8 10 +WORD 13 2 15 13 +WONDER 4 2 5 5 +WOMEN 14 2 15 15 +WOMAN 18 2 19 19 +WIT 1 2 1 3 +WISE 3 2 3 5 +WIND 6 2 6 8 +WILD 7 2 9 7 +WIFE 14 2 16 14 +WIDERNESS 0 2 0 2 +WICKER'S 7 2 8 8 +WHOSE 16 2 16 18 +WHOM 19 2 20 20 +WHITHER 2 2 3 3 +WHATEVER 10 2 10 12 +WELLS 0 2 2 0 +WAYS 3 2 5 3 +WATER 21 2 22 22 +WATCH 12 2 12 14 +WANTS 4 2 5 5 +WANTON 0 2 1 1 +WALLS 3 2 4 4 +WAITING 6 2 7 7 +WAITED 5 2 5 7 +WAIT 10 2 11 11 +WAIN 0 2 0 2 +VIOLENCE 14 2 16 14 +VILLAGES 0 2 1 1 +VIGILANCE 0 2 2 0 +VERY 84 2 84 86 +VE 0 2 2 0 +VAULT 8 2 9 9 +UTTER 1 2 2 2 +UTRITION 0 2 0 2 +USED 9 2 9 11 +URARTU 0 2 2 0 +UPSTAIRS 3 2 3 5 +UN 0 2 2 0 +TRIFLE 0 2 1 1 +TRIBES 4 2 6 4 +TRADITIONS 0 2 2 0 +TOWER 3 2 4 4 +TORQUILSTONE 0 2 2 0 +TORCH 0 2 1 1 +TOP 1 2 3 1 +TONIGHT 0 2 2 0 +TOILET 1 2 3 1 +TIGER 11 2 13 11 +TIE 2 2 2 4 +THROUGH 35 2 37 35 +THREE 35 2 37 35 +THOUSANDS 3 2 5 3 +THITHER 4 2 5 5 +THING 20 2 21 21 +THINE 1 2 1 3 +THEATRE 0 2 2 0 +TEMPLES 0 2 2 0 +TELLTALE 0 2 2 0 +TELL 51 2 52 52 +TASTE 6 2 6 8 +TAMAR 0 2 2 0 +TALENTS 1 2 3 1 +SYDNEY 1 2 3 1 +SURELY 7 2 8 8 +SUFFICIENT 1 2 3 1 +STRUCK 12 2 13 13 +STREET 15 2 15 17 +STORES 0 2 2 0 +STORE 2 2 3 3 +STOLE 2 2 3 3 +STOKER 0 2 2 0 +STOCKER 0 2 0 2 +STERN 0 2 1 1 +STEP 2 2 3 3 +STAYED 2 2 4 2 +STATES 6 2 7 7 +STATED 1 2 3 1 +STATE 22 2 24 22 +STARED 3 2 4 4 +STANDARD 3 2 4 4 +STAIRS 1 2 3 1 +SOMETHING 29 2 29 31 +SOMEONE 0 2 2 0 +SNETKOV 0 2 2 0 +SMILE 8 2 9 9 +SMALL 16 2 17 17 +SLIGHTLY 0 2 2 0 +SKIN 3 2 5 3 +SIT 4 2 5 5 +SINDBAD 3 2 5 3 +SINBAD 0 2 0 2 +SILLY 2 2 3 3 +SIGNOR 0 2 1 1 +SIGNED 1 2 2 2 +SIDNEY 0 2 0 2 +SHOULDERS 2 2 4 2 +SHOULDER 2 2 2 4 +SHORES 0 2 0 2 +SHORE 1 2 2 2 +SHIPS 2 2 4 2 +SHIP 16 2 17 17 +SHET 0 2 1 1 +SHELLFISH 0 2 0 2 +SHELL 1 2 3 1 +SHED 3 2 5 3 +SHAWS 0 2 2 0 +SEYTON 1 2 3 1 +SEVERAL 9 2 10 10 +SENOR 0 2 1 1 +SELLER 0 2 2 0 +SEEN 30 2 32 30 +SEEMS 12 2 13 13 +SEEK 8 2 10 8 +SEAMEN 0 2 1 1 +SEA 15 2 15 17 +SCULPTOR'S 0 2 1 1 +SCRIPTIONS 0 2 0 2 +SCHLEVENT 0 2 0 2 +SAILORS 0 2 0 2 +S 1 2 3 1 +ROWING 0 2 0 2 +ROPE'S 0 2 2 0 +ROOMFELLOW 0 2 2 0 +ROMANS 0 2 0 2 +ROMANCE 1 2 3 1 +ROBBERS 3 2 3 5 +ROBBERIES 0 2 2 0 +ROAD 14 2 16 14 +RISDON 5 2 7 5 +RIGHT 32 2 33 33 +RHODIAN 0 2 2 0 +REVOLT 0 2 0 2 +REST 18 2 19 19 +REMAINED 7 2 8 8 +RELIGION 9 2 11 9 +RELATED 2 2 4 2 +REIGN 1 2 1 3 +REALIZED 0 2 0 2 +READY 12 2 13 13 +REACHED 10 2 12 10 +RAYSTROKE 0 2 0 2 +RAY 1 2 1 3 +RATIA 0 2 0 2 +RATHER 14 2 16 14 +RAN 9 2 9 11 +RAM 2 2 4 2 +RAINY 0 2 2 0 +RAGE 1 2 2 2 +PUT 40 2 40 42 +PULSE 0 2 1 1 +PROVE 4 2 5 5 +PROFESSION 2 2 4 2 +PROCLIS 0 2 0 2 +PRIEST 5 2 6 6 +PRESSING 1 2 3 1 +POSITION 6 2 7 7 +POOL 0 2 0 2 +POLE 0 2 0 2 +POINT 9 2 10 10 +PLANE 0 2 0 2 +PLACED 10 2 10 12 +PITTS 0 2 2 0 +PIRRUS 0 2 0 2 +PHOSPHOR 0 2 2 0 +PHOSPHO 0 2 0 2 +PHILIPPUS 1 2 3 1 +PHILIP 1 2 3 1 +PERE 0 2 2 0 +PASSES 0 2 1 1 +PASS 6 2 6 8 +PARR 0 2 2 0 +PARISH 0 2 1 1 +PAIR 5 2 6 6 +P 0 2 2 0 +OTTO 0 2 1 1 +OPPOSITION 1 2 2 2 +OPENED 9 2 11 9 +ONTO 0 2 2 0 +OFTEN 21 2 21 23 +OFFICE 3 2 4 4 +ODD 1 2 3 1 +NUTRITION 0 2 2 0 +NUT 0 2 0 2 +NOTE 2 2 3 3 +NORTHFIELD 0 2 2 0 +NIGHTS 2 2 2 4 +NEVER 59 2 61 59 +NEITHER 8 2 8 10 +NEIGHBORS 4 2 5 5 +NEAT 2 2 3 3 +NATURALLY 6 2 7 7 +NATURAL 6 2 7 7 +MYRTULAS 0 2 0 2 +MOSES 6 2 8 6 +MINISTER 6 2 7 7 +MIND 23 2 24 24 +MILLISON 0 2 0 2 +MIGHT 43 2 43 45 +MIDIAN 0 2 2 0 +MESTER 0 2 2 0 +MERLONUS 0 2 2 0 +MENTAL 2 2 3 3 +MENAHEM 0 2 2 0 +MEN'S 0 2 1 1 +MC 2 2 4 2 +MATI 0 2 2 0 +MATE 1 2 2 2 +MARSHAL 4 2 6 4 +MARRIAGE 5 2 6 6 +MARDOCK 0 2 0 2 +MANY 27 2 27 29 +MANTLE 0 2 0 2 +MANKATO 0 2 2 0 +MAN'S 12 2 13 13 +MADELEINE 1 2 3 1 +MADELEIN 0 2 0 2 +LUCIEN 0 2 2 0 +LOW 13 2 13 15 +LOVED 6 2 7 7 +LORD 17 2 19 17 +LL 0 2 2 0 +LIZZIE 0 2 0 2 +LIVY 0 2 0 2 +LIVES 5 2 6 6 +LIVED 5 2 6 6 +LIT 1 2 2 2 +LIFTED 2 2 2 4 +LIFE 54 2 55 55 +LESS 9 2 9 11 +LENA 0 2 0 2 +LEECH 0 2 1 1 +LEARNED 5 2 6 6 +LAUDERDALE 0 2 2 0 +LATER 7 2 8 8 +LAND 19 2 20 20 +LAID 9 2 9 11 +LACHAISE 0 2 2 0 +LA 1 2 3 1 +KITE 0 2 2 0 +KETTLE 0 2 1 1 +KENITES 0 2 2 0 +KANSAS 5 2 7 5 +KANSA 0 2 0 2 +JURY 2 2 4 2 +JULIA 0 2 0 2 +JUDGE 6 2 7 7 +JOSCELYN 0 2 0 2 +JEWELER 0 2 2 0 +IZZIE 0 2 0 2 +ISSUED 2 2 2 4 +ISN'T 1 2 2 2 +INSCRIPTIONS 0 2 2 0 +INN 3 2 4 4 +INFAMY 0 2 2 0 +IMPROVE 0 2 1 1 +ILU 0 2 2 0 +ICES 1 2 3 1 +HYDROCHLORIC 0 2 2 0 +HURT 5 2 6 6 +HUNTINGDON 3 2 5 3 +HOZE 0 2 2 0 +HOUR 14 2 16 14 +HOSE 0 2 0 2 +HORSTIUS 0 2 2 0 +HOO'S 0 2 2 0 +HOO 0 2 1 1 +HONORS 1 2 2 2 +HITHER 5 2 7 5 +HIMSELF 51 2 52 52 +HERALD 0 2 0 2 +HELM 0 2 2 0 +HELLO 0 2 1 1 +HELD 12 2 13 13 +HEARTS 4 2 5 5 +HEARD 27 2 28 28 +HAYS 0 2 2 0 +HARD 15 2 16 16 +HAPPEN 3 2 3 5 +HALL 13 2 14 14 +HAID 0 2 2 0 +HA 2 2 3 3 +GURG 0 2 0 2 +GUNNAR 0 2 2 0 +GUESS 4 2 5 5 +GROUND 6 2 6 8 +GREY 0 2 1 1 +GRAEME 0 2 2 0 +GOVERNMENT'S 0 2 1 1 +GOVERNMENT 19 2 20 20 +GOV'NOR 0 2 2 0 +GORDON 19 2 21 19 +GOIN 1 2 1 3 +GOAL 1 2 3 1 +GLISPIN 0 2 2 0 +GLASS 8 2 9 9 +GIVING 6 2 8 6 +GIT 0 2 1 1 +GIORGIO 0 2 2 0 +GEORGE'S 0 2 1 1 +FURTHER 6 2 6 8 +FRISTOE 0 2 2 0 +FRANZ 6 2 8 6 +FRANCE 3 2 3 5 +FOUND 37 2 37 39 +FORTY 11 2 13 11 +FORTE 0 2 0 2 +FOREVER 1 2 3 1 +FORCES 3 2 3 5 +FOOLS 1 2 2 2 +FOLLOWING 10 2 10 12 +FOLDS 1 2 1 3 +FLY 4 2 5 5 +FLIROV'S 0 2 0 2 +FLEROV'S 0 2 2 0 +FLEROV 0 2 2 0 +FLAVOR 0 2 2 0 +FLASH 2 2 2 4 +FISHIN 0 2 1 1 +FIRST 54 2 54 56 +FILL 4 2 4 6 +FIELDS 2 2 2 4 +FESTAL 0 2 2 0 +FEET 9 2 9 11 +FEELING 10 2 10 12 +FEEL 11 2 13 11 +FEAST 0 2 0 2 +FAVOURITE 0 2 1 1 +FAVOUR 4 2 4 6 +FAVORITE 0 2 1 1 +FAVOR 0 2 2 0 +FATS 1 2 2 2 +FAR 20 2 21 21 +FANNY 3 2 5 3 +FAMILY 17 2 18 18 +FALL 4 2 5 5 +FAIRLY 2 2 3 3 +FAFNIR'S 0 2 2 0 +FACT 13 2 14 14 +EXPERIENCE 3 2 4 4 +EXECUTED 1 2 1 3 +EVERYONE 0 2 1 1 +EUSEBIUS 0 2 2 0 +ETHELRIED 0 2 2 0 +ETERNAL 1 2 3 1 +ESPECIALLY 5 2 7 5 +ESCAPE 11 2 12 12 +ENTIRELY 1 2 3 1 +ENTIRE 1 2 2 2 +ENOUGH 29 2 31 29 +ENDURE 2 2 3 3 +EMOTION 0 2 2 0 +ELEXANDER 0 2 2 0 +EILEEN 1 2 3 1 +EIGHTH 2 2 3 3 +EH 0 2 2 0 +EARLIEST 0 2 1 1 +DROUGHT 0 2 0 2 +DRINKERS 0 2 2 0 +DOWN 68 2 68 70 +DISTANT 4 2 5 5 +DINKS 0 2 2 0 +DIE 9 2 10 10 +DICK 5 2 5 7 +DESSERTS 0 2 2 0 +DESERTS 0 2 0 2 +DELIVER 1 2 2 2 +DEER 0 2 0 2 +DEBRACY 0 2 0 2 +DEARLY 0 2 2 0 +DEAR 14 2 14 16 +DAPHNE 2 2 4 2 +DANDAN 1 2 3 1 +DAN 0 2 0 2 +DA 0 2 2 0 +D 0 2 2 0 +CURSE 1 2 2 2 +CRUMPLED 0 2 2 0 +CRIED 21 2 21 23 +CREW 5 2 5 7 +CREAM 4 2 6 4 +COURT 11 2 12 12 +COURSE 14 2 16 14 +COURFEYRAC 0 2 2 0 +COUNTRY 15 2 17 15 +COUNTRIES 4 2 4 6 +COUNT 17 2 18 18 +COUNSEL 0 2 2 0 +CORNWEALTH 0 2 0 2 +CORN 0 2 0 2 +COPS 3 2 5 3 +CONCOCTED 0 2 2 0 +COMMONWEALTH 0 2 2 0 +COMING 14 2 16 14 +COLE 0 2 2 0 +COIN 1 2 2 2 +COAT 2 2 2 4 +COAL 0 2 0 2 +CLOTH 2 2 2 4 +CLOSER 1 2 1 3 +CLOSE 12 2 14 12 +CLASS 5 2 6 6 +CLARET 0 2 2 0 +CIGAR 1 2 1 3 +CHLORATE 0 2 2 0 +CHARGE 7 2 8 8 +CHAIN 0 2 1 1 +CENTRE 1 2 1 3 +CENTER 1 2 3 1 +CELLAR 0 2 0 2 +CATHOLIC 0 2 1 1 +CAST 9 2 9 11 +CASE 20 2 20 22 +CAROL 0 2 0 2 +CARE 18 2 18 20 +CAPTAIN 17 2 17 19 +CALM 0 2 0 2 +CALLED 24 2 24 26 +CALL 11 2 13 11 +CALIFORNIAN 0 2 2 0 +C 2 2 3 3 +BUSY 5 2 5 7 +BURDENS 0 2 2 0 +BUILD 0 2 1 1 +BRUCE 1 2 3 1 +BROUGHT 9 2 11 9 +BROTHERS 6 2 8 6 +BROTHER'S 1 2 1 3 +BOY 26 2 27 27 +BOTTOM 5 2 5 7 +BOTTLES 0 2 1 1 +BOTH 16 2 17 17 +BOOM 1 2 1 3 +BOLSHEVIKI 1 2 3 1 +BOB 5 2 5 7 +BLOW 4 2 4 6 +BIRTH 0 2 1 1 +BIN 0 2 2 0 +BILL 2 2 3 3 +BIG 8 2 8 10 +BEST 19 2 19 21 +BELL 2 2 3 3 +BEING 37 2 39 37 +BEGGED 7 2 9 7 +BAT 1 2 1 3 +BASSORAH 0 2 2 0 +BASIL 2 2 4 2 +BANG 0 2 1 1 +BAND 8 2 9 9 +BALL 0 2 0 2 +BAILEY 0 2 2 0 +BAG 4 2 5 5 +BAD 8 2 10 8 +AWK 0 2 2 0 +AWHILE 1 2 3 1 +AWAKE 4 2 4 6 +ATUM 0 2 2 0 +ATTENTION 5 2 7 5 +ASSYRIA 1 2 3 1 +ASKS 0 2 1 1 +ASK 16 2 17 17 +ASHORE 3 2 4 4 +ART 12 2 13 13 +ARPAD 0 2 2 0 +ANNIE'S 0 2 2 0 +AMYNTAS 0 2 2 0 +ALWAYS 34 2 34 36 +ALTHEA 1 2 3 1 +ALKALOIDS 0 2 2 0 +ALI 3 2 4 4 +ALEXANDER 0 2 0 2 +ALCOHOL 3 2 5 3 +ADVENTURE 2 2 3 3 +ACT 7 2 7 9 +ABROAD 2 2 4 2 +ABOARD 1 2 3 1 +ZOAMA 0 1 0 1 +ZO 0 1 0 1 +ZERIA 0 1 0 1 +ZEMSTVOS 0 1 1 0 +ZAYNAB 0 1 1 0 +ZA 0 1 0 1 +YUSS 0 1 1 0 +YSKEEPER 0 1 0 1 +YOUTH 3 1 4 3 +YOURSELVES 2 1 2 3 +YOURSELF 8 1 9 8 +YOURS 1 1 1 2 +YOURIS 0 1 0 1 +YOU'D 4 1 5 4 +YONDER 4 1 5 4 +YO' 0 1 1 0 +YEP 0 1 1 0 +YEOMEN 0 1 1 0 +YEOMAN 1 1 1 2 +YELLS 0 1 1 0 +YELLED 1 1 1 2 +YEARNS 0 1 1 0 +YAUSKY 0 1 1 0 +YANAWAY 0 1 0 1 +YAHWEH 0 1 1 0 +WYLDER'S 0 1 1 0 +WUNNERED 0 1 1 0 +WRITHS 0 1 0 1 +WRITER'S 0 1 0 1 +WRIT 0 1 1 0 +WRAPPERS 0 1 0 1 +WOUNDS 1 1 2 1 +WOUNDED 6 1 6 7 +WOT 1 1 2 1 +WORTHY 1 1 1 2 +WORTH 1 1 2 1 +WORSHIPPERS 0 1 1 0 +WORSHIP'S 0 1 1 0 +WORSHIP 4 1 4 5 +WORRY 2 1 3 2 +WORKS 4 1 5 4 +WORKMEN 0 1 0 1 +WORKMAN 0 1 1 0 +WORKINGMEN 0 1 1 0 +WORKED 1 1 2 1 +WORKADAY 0 1 0 1 +WOOLWRIGHT 0 1 0 1 +WOODSON 0 1 1 0 +WOODS 2 1 2 3 +WOODEN 5 1 6 5 +WONT 3 1 4 3 +WONDERS 1 1 1 2 +WOKE 0 1 1 0 +WODE'S 0 1 1 0 +WITHOUT 51 1 51 52 +WITHIN 11 1 11 12 +WITHAL 1 1 2 1 +WISHT 0 1 1 0 +WISHED 5 1 5 6 +WISCONSIN 0 1 1 0 +WIS 0 1 0 1 +WIRES 0 1 1 0 +WINTERSPIN 0 1 0 1 +WINTERS 0 1 1 0 +WINNING 1 1 1 2 +WINKED 0 1 1 0 +WINGED 0 1 0 1 +WING 0 1 1 0 +WINDS 0 1 0 1 +WILTZES 0 1 0 1 +WILT 4 1 4 5 +WILLY 0 1 1 0 +WILKSES 0 1 1 0 +WILKS 0 1 1 0 +WILKES 0 1 0 1 +WILFRIED 0 1 0 1 +WILDLY 1 1 2 1 +WILDEST 0 1 1 0 +WILDER'S 0 1 0 1 +WIF 0 1 0 1 +WIDTH 0 1 0 1 +WIDENANCE 0 1 0 1 +WIDELY 0 1 0 1 +WIDEAWAKE 0 1 1 0 +WID 0 1 0 1 +WICKER 6 1 7 6 +WI 0 1 1 0 +WHO'S 0 1 0 1 +WHO'D 0 1 1 0 +WHISKERED 0 1 1 0 +WHIRLER 0 1 0 1 +WHIPS 0 1 0 1 +WHILOME 0 1 1 0 +WHICHELE 0 1 0 1 +WHEREABOUTS 2 1 3 2 +WHEREABOUT 0 1 0 1 +WHER 0 1 1 0 +WHENCE 5 1 5 6 +WHEELER 0 1 1 0 +WHATE'ER 0 1 1 0 +WHAT'S 5 1 6 5 +WHALER 0 1 0 1 +WHACKS 0 1 1 0 +WESTPORT 0 1 1 0 +WESTBURT 0 1 0 1 +WELSH 0 1 1 0 +WEIGH 0 1 0 1 +WEEVILLY 0 1 1 0 +WEEKLY 0 1 1 0 +WEEK 6 1 6 7 +WEEDS 0 1 1 0 +WEEBLY 0 1 0 1 +WED 0 1 0 1 +WEBB'S 0 1 1 0 +WEATHER 5 1 5 6 +WEARINESS 0 1 0 1 +WEAR 0 1 0 1 +WEAL 0 1 1 0 +WEAKLY 0 1 0 1 +WEAK 1 1 2 1 +WE'VE 2 1 3 2 +WAX 0 1 0 1 +WAVERLY 0 1 1 0 +WAVERLEY 0 1 0 1 +WATONWAN 0 1 1 0 +WATERWIN 0 1 0 1 +WATERED 0 1 0 1 +WATCHMAKERS 0 1 0 1 +WATCHMAKER'S 0 1 1 0 +WATCHED 3 1 4 3 +WASN'T 2 1 3 2 +WARS 1 1 2 1 +WARRITZ 0 1 0 1 +WARRENTON 0 1 0 1 +WARREN 0 1 0 1 +WARNED 0 1 0 1 +WARN 1 1 2 1 +WARMEST 0 1 1 0 +WARDS 0 1 0 1 +WARDERS 0 1 1 0 +WARD'S 0 1 1 0 +WAR 5 1 5 6 +WANE 0 1 0 1 +WANDERERS 0 1 1 0 +WANDERER 1 1 2 1 +WANDERED 0 1 0 1 +WANDER 0 1 0 1 +WALLACE 0 1 0 1 +WALLA 0 1 0 1 +WALKING 5 1 5 6 +WALES 0 1 0 1 +WAKE 2 1 3 2 +WAITIN 0 1 1 0 +WAGGOT 0 1 1 0 +WAGGING 0 1 1 0 +WAGGET 0 1 0 1 +WAGE 0 1 0 1 +WAG 0 1 0 1 +WADED 0 1 1 0 +WADDED 0 1 1 0 +WABDAD 0 1 0 1 +VOYS 0 1 0 1 +VOUGHT 0 1 1 0 +VOTE 3 1 4 3 +VON 0 1 1 0 +VOMITING 0 1 1 0 +VOLVITUR 0 1 1 0 +VOLT 0 1 0 1 +VOICED 0 1 1 0 +VISIONS 0 1 0 1 +VIRTUARY 0 1 0 1 +VINTAGE 1 1 2 1 +VILLE 0 1 0 1 +VILLAGERS 0 1 1 0 +VILLAGE 2 1 2 3 +VIL 0 1 1 0 +VIGOR 0 1 0 1 +VICHILLENZ 0 1 0 1 +VEZENOUS 0 1 0 1 +VETCHERY 0 1 0 1 +VESTRY 0 1 1 0 +VES 0 1 0 1 +VERUNDER 0 1 0 1 +VERSON 0 1 0 1 +VERSES 4 1 5 4 +VERSE 1 1 1 2 +VERILY 2 1 3 2 +VENTURES 0 1 0 1 +VENTURE 1 1 2 1 +VENTRILOQUIST 0 1 1 0 +VENTILOQUE 0 1 0 1 +VENTAGE 0 1 0 1 +VENT 0 1 0 1 +VEIL 1 1 2 1 +VEHEMENTLY 0 1 1 0 +VEAL 0 1 0 1 +VAVASOUR 0 1 1 0 +VAVASOR 0 1 0 1 +VAULTED 0 1 1 0 +VAUGIRARD 0 1 1 0 +VATS 0 1 0 1 +VAST 2 1 3 2 +VASSILIEVITCH 0 1 1 0 +VASSARIAH 0 1 0 1 +VARIOUSLY 0 1 0 1 +VARIABLE 0 1 0 1 +VALLED 0 1 0 1 +VALET 1 1 2 1 +UZHAT 0 1 0 1 +USURE 0 1 0 1 +USHART 0 1 0 1 +USEFUL 0 1 1 0 +URNS 0 1 0 1 +URINA'S 0 1 0 1 +URARTIAN 0 1 1 0 +UR 0 1 0 1 +UPWARD 1 1 1 2 +UNTIL 15 1 16 15 +UNREWARDED 0 1 1 0 +UNLUCK 0 1 0 1 +UNLESS 7 1 8 7 +UNHAPPY 4 1 5 4 +UNEXPECTED 2 1 3 2 +UNDESTRUCTION 0 1 0 1 +UNDEST 0 1 0 1 +UNDERTAKERS 0 1 0 1 +UNDERTAKER'S 1 1 2 1 +UNCLEAN 0 1 0 1 +UNCLE 6 1 7 6 +UNCHANGED 0 1 0 1 +UNADULTERATED 0 1 1 0 +ULTIMATELY 0 1 1 0 +ULTIMATE 0 1 0 1 +UKINZER 0 1 1 0 +UDDER 0 1 1 0 +U 0 1 0 1 +TYRANNY 0 1 1 0 +TYBER 0 1 0 1 +TWYMAN'S 0 1 1 0 +TWEMINS 0 1 0 1 +TWAS 0 1 1 0 +TWAIN 1 1 2 1 +TUTOR 0 1 1 0 +TURRET 0 1 1 0 +TURNS 1 1 2 1 +TURNING 5 1 6 5 +TURN 8 1 8 9 +TURBULENT 0 1 1 0 +TURBOT 0 1 0 1 +TUMULT 1 1 1 2 +TUMBLED 1 1 1 2 +TUFTS 1 1 1 2 +TUDOR 0 1 0 1 +TUBERCULOUS 0 1 1 0 +TRUSTY 0 1 0 1 +TRUSTEE 0 1 1 0 +TRUNDLED 0 1 1 0 +TRULY 4 1 5 4 +TRUCE 2 1 2 3 +TROUT 1 1 1 2 +TROOPS 1 1 2 1 +TRIVET 0 1 1 0 +TRIUMPHS 0 1 0 1 +TRIUMPH 3 1 3 4 +TRITESYMOSA'S 0 1 0 1 +TRIPES 0 1 1 0 +TRIPE'S 0 1 0 1 +TRINES 0 1 0 1 +TRIES 3 1 3 4 +TRIBUT 0 1 0 1 +TREMBLINGLY 1 1 1 2 +TREMBLING 2 1 2 3 +TRELAWNEY 0 1 1 0 +TREASURES 1 1 1 2 +TREASURE 1 1 2 1 +TRAVELLED 1 1 1 2 +TRAVELED 0 1 1 0 +TRAVEL 1 1 1 2 +TRANSSHIP 0 1 1 0 +TRANSLATED 0 1 1 0 +TRANSHIP 0 1 0 1 +TRANSGRATED 0 1 0 1 +TRANSCENDI 0 1 0 1 +TRAINS 0 1 0 1 +TRAINING 4 1 5 4 +TRAINDAWG 0 1 1 0 +TRAIN 0 1 0 1 +TRAFFIC 0 1 1 0 +TRADES 0 1 0 1 +TRACT 0 1 0 1 +TOWING 0 1 0 1 +TOWERED 0 1 0 1 +TOUR 1 1 2 1 +TOUGHS 0 1 1 0 +TOUGH 2 1 3 2 +TOTING 0 1 1 0 +TOSSING 0 1 1 0 +TORTURE 1 1 1 2 +TORR 0 1 0 1 +TORN 1 1 2 1 +TORMENT 0 1 1 0 +TORKILSTONE 0 1 0 1 +TORCLESTONE 0 1 0 1 +TOOK 34 1 35 34 +TONGUE 4 1 5 4 +TOMORROW 0 1 1 0 +TOMATO 0 1 1 0 +TOM 8 1 9 8 +TOLERBLE 0 1 1 0 +TOLERABLE 1 1 1 2 +TOIL 1 1 1 2 +TOGETHER 11 1 11 12 +TOATING 0 1 0 1 +TITLE 0 1 1 0 +TIRING 0 1 1 0 +TIRESOME 0 1 1 0 +TIRELESS 0 1 0 1 +TIRE 0 1 0 1 +TINTED 0 1 0 1 +TINEL 0 1 0 1 +TINCTURED 0 1 1 0 +TIMES 10 1 10 11 +TIME'S 0 1 1 0 +TIMBOO 0 1 0 1 +TIMBER 0 1 1 0 +TILLERS 0 1 1 0 +TIGLASS 0 1 0 1 +TIGING 0 1 0 1 +TIGG 0 1 0 1 +TIG 0 1 0 1 +TIEN 0 1 0 1 +TIDY 0 1 0 1 +TIDINGS 1 1 2 1 +TIBERICAN'S 0 1 0 1 +TIBER 0 1 1 0 +THUMB 3 1 4 3 +THROWING 2 1 3 2 +THROPPED 0 1 0 1 +THROBBED 0 1 1 0 +THRAUCHELEVENT 0 1 0 1 +THOUSANDTH 0 1 1 0 +THOUGHTS 4 1 5 4 +THOUA 0 1 0 1 +THOMAS 2 1 2 3 +THIRST 8 1 9 8 +THIRD'S 0 1 0 1 +THIRD 11 1 11 12 +THINKS 1 1 2 1 +THINGS 20 1 21 20 +THIN 3 1 4 3 +THEY'VE 0 1 0 1 +THEREUSTRA 0 1 0 1 +THEREFORE 12 1 12 13 +THERE'LL 0 1 1 0 +THATCH 0 1 0 1 +THAT'LL 0 1 1 0 +THANKS 5 1 5 6 +TETE 0 1 0 1 +TESTIMONY 0 1 1 0 +TERRIBLE 3 1 4 3 +TERRA 0 1 1 0 +TER 0 1 0 1 +TENTH'S 0 1 0 1 +TENDED 0 1 0 1 +TEND 1 1 2 1 +TEN 18 1 18 19 +TEMPTETH 0 1 1 0 +TEMPT 1 1 1 2 +TEMPLE 2 1 2 3 +TELLERS 0 1 0 1 +TEETH 3 1 4 3 +TEDLERS 0 1 0 1 +TEDI 0 1 0 1 +TECHTAMORPHYANDER 0 1 0 1 +TECHNICHAL 0 1 0 1 +TECHNICAL 0 1 1 0 +TEARS 6 1 6 7 +TEAL 0 1 1 0 +TEA 1 1 2 1 +TAYLOR 0 1 1 0 +TAX 0 1 0 1 +TAUSEN 0 1 0 1 +TAUGHT 1 1 1 2 +TATTLERS 0 1 1 0 +TASKMASTER 0 1 1 0 +TARDY 0 1 1 0 +TAR 0 1 0 1 +TAPPY 0 1 0 1 +TAPPED 0 1 1 0 +TAPIS 0 1 1 0 +TAP 1 1 1 2 +TANQUAM 0 1 1 0 +TANOV 0 1 0 1 +TANNIC 0 1 0 1 +TANK 0 1 0 1 +TALMASH 0 1 1 0 +TALENT 1 1 1 2 +TALE 2 1 2 3 +TAKEN 22 1 22 23 +TAKE 45 1 45 46 +TAILOR 0 1 0 1 +TAHITI 0 1 1 0 +T'OTHER 0 1 1 0 +SYRIA 4 1 4 5 +SYNONYMON 0 1 1 0 +SYNOGRAPHIC 0 1 0 1 +SYMBOLS 0 1 0 1 +SYLLOGISM 0 1 1 0 +SWORD 11 1 12 11 +SWELP 0 1 1 0 +SWELLIN 0 1 0 1 +SWELL 0 1 1 0 +SWEET 2 1 3 2 +SWEEP 1 1 1 2 +SWEAT 0 1 0 1 +SWAYING 0 1 1 0 +SWAY 0 1 1 0 +SWAP 0 1 0 1 +SWALLOWED 3 1 4 3 +SWALLOW 0 1 0 1 +SWAIN 0 1 0 1 +SWAG 0 1 1 0 +SUSPICION 4 1 4 5 +SUSPICIENT 0 1 0 1 +SURRENDER 2 1 2 3 +SURPRISED 1 1 2 1 +SURPRISE 4 1 4 5 +SURGY 0 1 0 1 +SURGING 0 1 0 1 +SURFACES 0 1 0 1 +SURF 0 1 0 1 +SUPPOSED 6 1 6 7 +SUPPOSE 10 1 11 10 +SUPERIOR 3 1 4 3 +SUNK 0 1 0 1 +SUNG 2 1 2 3 +SUN 7 1 7 8 +SUMTHIN 0 1 1 0 +SUMMIT 0 1 1 0 +SULPHURIC 0 1 1 0 +SUITED 1 1 1 2 +SUIT 2 1 2 3 +SUFFOLK 0 1 1 0 +SUFFOLD 0 1 0 1 +SUFFICES 0 1 1 0 +SUFFER 1 1 1 2 +SUDDENLY 7 1 8 7 +SUCKED 0 1 1 0 +SUCCOURS 0 1 1 0 +SUBTERRAB 0 1 0 1 +SUBSTANCE 2 1 3 2 +SUBJECT 6 1 6 7 +SUB 0 1 1 0 +STUNNITY 0 1 0 1 +STROLLED 0 1 0 1 +STROKE 2 1 2 3 +STRODE 0 1 1 0 +STRIPE 0 1 0 1 +STRIKE 2 1 2 3 +STRIFE 5 1 6 5 +STRETS 0 1 0 1 +STREAM 4 1 4 5 +STREAK 1 1 2 1 +STRAYING 0 1 1 0 +STRAW 1 1 1 2 +STRATUS 0 1 0 1 +STRANGEST 0 1 1 0 +STRANGER'S 0 1 0 1 +STRAITENED 0 1 1 0 +STRAINS 0 1 1 0 +STRAIN 1 1 1 2 +STRAIGHT 5 1 5 6 +STOVE 1 1 1 2 +STORIES 3 1 3 4 +STORED 0 1 1 0 +STOPPED 5 1 5 6 +STOOTE 0 1 0 1 +STOOLS 0 1 0 1 +STOOD 21 1 22 21 +STONES 3 1 4 3 +STONED 0 1 1 0 +STIPS 0 1 0 1 +STEWPENT 0 1 0 1 +STEWPAN 1 1 2 1 +STEW 0 1 1 0 +STERNMOST 0 1 0 1 +STEPS 7 1 7 8 +STEPPED 1 1 2 1 +STEPLE 0 1 0 1 +STEPAN 1 1 2 1 +STENOGRAPHIC 0 1 1 0 +STEEVER 0 1 1 0 +STEESEY 0 1 0 1 +STEERING 1 1 1 2 +STEERED 0 1 0 1 +STEERAGE 1 1 2 1 +STEELY 0 1 0 1 +STEED 1 1 1 2 +STEAMS 0 1 0 1 +STEALING 0 1 0 1 +STEAL 0 1 1 0 +STAYS 0 1 0 1 +STAYING 1 1 2 1 +STATUS 0 1 1 0 +STATURE 0 1 1 0 +STATUE 2 1 2 3 +STATS 0 1 0 1 +STATING 0 1 0 1 +STATEROOM 1 1 1 2 +STAS 0 1 1 0 +STARVING 1 1 2 1 +STARTED 10 1 10 11 +START 2 1 3 2 +STARLING 0 1 0 1 +STARES 0 1 1 0 +STANDSTILL 0 1 1 0 +STANDS 2 1 3 2 +STANDARDS 0 1 0 1 +STALLS 0 1 0 1 +STAGER 0 1 0 1 +STAGE 4 1 5 4 +STAFFNER 0 1 0 1 +SQUEAMISH 2 1 3 2 +SQUARE 1 1 2 1 +SPREAD 4 1 4 5 +SPONSUS 0 1 1 0 +SPONSES 0 1 0 1 +SPONNET 0 1 0 1 +SPONGEALS 0 1 0 1 +SPONGE 0 1 1 0 +SPONDYLES 0 1 1 0 +SPITE 10 1 10 11 +SPIRITS 1 1 2 1 +SPIRIT 5 1 6 5 +SPINNING 1 1 1 2 +SPINES 0 1 0 1 +SPILLING 0 1 1 0 +SPIES 0 1 0 1 +SPICE 0 1 1 0 +SPENT 3 1 3 4 +SPEEDABLY 0 1 0 1 +SPEED 1 1 1 2 +SPECTRE 0 1 0 1 +SPECIALTY 0 1 1 0 +SPECIALLY 0 1 0 1 +SPEAK 15 1 15 16 +SPATTLE 0 1 0 1 +SPARSELY 0 1 1 0 +SPADDLE 0 1 1 0 +SOUTHERN 2 1 3 2 +SOUSE 0 1 1 0 +SOUS 1 1 1 2 +SOURCE 1 1 2 1 +SOUNDLY 1 1 2 1 +SOULS 2 1 2 3 +SOTELES 0 1 1 0 +SORTILESS 0 1 0 1 +SORT 12 1 12 13 +SORROW 1 1 1 2 +SORE 1 1 1 2 +SORDID 0 1 1 0 +SORCAS 0 1 0 1 +SOR 0 1 0 1 +SOPHIA 0 1 0 1 +SONSPIER 0 1 0 1 +SONNY 0 1 1 0 +SONG 2 1 2 3 +SOMINUTELY 0 1 0 1 +SOMEWHAT 5 1 5 6 +SOLD 4 1 4 5 +SOJOURN 1 1 2 1 +SOJI 0 1 0 1 +SOFT 5 1 5 6 +SOCIALLY 0 1 0 1 +SOCIALIST 1 1 2 1 +SNUG 0 1 0 1 +SNOWBLE 0 1 0 1 +SNOOZING 0 1 1 0 +SNATCHER 1 1 2 1 +SNAT 0 1 0 1 +SNAS 0 1 0 1 +SNARLS 0 1 0 1 +SNARLED 0 1 1 0 +SNACKOV 0 1 0 1 +SN 0 1 0 1 +SMUGGERS 0 1 0 1 +SMOLNY 0 1 1 0 +SMOKER 2 1 3 2 +SMOKE 2 1 2 3 +SMIRCHED 0 1 1 0 +SMELL 0 1 1 0 +SMARCHED 0 1 0 1 +SLUNK 0 1 1 0 +SLIGHTLY'LL 0 1 0 1 +SLICES 1 1 2 1 +SLEPT 3 1 3 4 +SLEEPING 5 1 6 5 +SLEEP 14 1 15 14 +SLAVENT 0 1 0 1 +SLATTERER 0 1 0 1 +SLAPPED 0 1 1 0 +SLAM 0 1 0 1 +SLACKENED 0 1 1 0 +SLAB 1 1 2 1 +SKYLARKS 0 1 1 0 +SKYLACKS 0 1 0 1 +SKIRT 0 1 1 0 +SKIPPED 0 1 0 1 +SKINNED 0 1 0 1 +SKEW 0 1 1 0 +SKEPTICAL 0 1 1 0 +SKEERO 0 1 0 1 +SIZES 0 1 0 1 +SIZE 4 1 5 4 +SIXTH 4 1 5 4 +SIXES 0 1 1 0 +SIX 17 1 17 18 +SITUATED 0 1 0 1 +SIRS 0 1 0 1 +SIRE 3 1 4 3 +SINUHIT 0 1 1 0 +SINNING 0 1 0 1 +SINNED 0 1 1 0 +SINGS 2 1 3 2 +SINGLEAD 0 1 0 1 +SINGA 1 1 2 1 +SINE 0 1 1 0 +SINDERLAD 0 1 0 1 +SIMULATION 0 1 0 1 +SIMULATES 0 1 1 0 +SIMPLES 0 1 0 1 +SIMPLE 4 1 4 5 +SIMON 1 1 1 2 +SIMILATE 0 1 0 1 +SILLIGIOUS 0 1 0 1 +SILENCED 0 1 1 0 +SILENCE 9 1 9 10 +SIGNING 1 1 1 2 +SIGN 4 1 4 5 +SIGHT 7 1 8 7 +SIGHS 0 1 0 1 +SIGHING 1 1 2 1 +SIEVE 0 1 1 0 +SIDNEYLOD 0 1 0 1 +SIDI 0 1 0 1 +SICUT 0 1 1 0 +SICIENTS 0 1 0 1 +SHUTTERS 1 1 2 1 +SHUTTERED 0 1 0 1 +SHUMAN 0 1 1 0 +SHUFFLE 0 1 1 0 +SHUCKS 0 1 1 0 +SHRUGS 0 1 0 1 +SHRUBS 1 1 2 1 +SHRUBBERY 0 1 1 0 +SHRABBERY 0 1 0 1 +SHOWS 1 1 2 1 +SHOW 16 1 16 17 +SHOVEL 0 1 0 1 +SHOUTERS 0 1 0 1 +SHOULDST 2 1 3 2 +SHOULDEST 0 1 0 1 +SHOT 9 1 10 9 +SHOOTER 0 1 1 0 +SHOOT 5 1 6 5 +SHONE 2 1 2 3 +SHOERS 0 1 0 1 +SHOCKS 0 1 0 1 +SHO'LY 0 1 1 0 +SHIRTING 0 1 0 1 +SHEVARIN 0 1 0 1 +SHERIFF 3 1 4 3 +SHERE 0 1 1 0 +SHERBURN'S 0 1 1 0 +SHERBURN 0 1 1 0 +SHERBOURNE'S 0 1 0 1 +SHERBIN 0 1 0 1 +SHEPHERD 0 1 1 0 +SHEPARD 0 1 0 1 +SHELBERG'S 0 1 0 1 +SHEETTER 0 1 0 1 +SHEETS 0 1 1 0 +SHEET 1 1 2 1 +SHEAR 0 1 0 1 +SHATTERED 0 1 1 0 +SHARZAN 0 1 0 1 +SHARP 5 1 5 6 +SHARES 0 1 0 1 +SHARED 0 1 0 1 +SHANGHAT 0 1 0 1 +SHALLUM 0 1 1 0 +SHAKEDOWN 0 1 1 0 +SHAKE 2 1 2 3 +SHAGG 0 1 0 1 +SHAG 2 1 3 2 +SHADOW 5 1 5 6 +SEVERSON 0 1 1 0 +SEVERE 1 1 2 1 +SEVENTIETH 0 1 1 0 +SEVENTH 3 1 3 4 +SEVENTEENTH 0 1 0 1 +SEVEN 11 1 12 11 +SETTON 0 1 0 1 +SERVES 0 1 1 0 +SERVANTS 4 1 4 5 +SERMON 0 1 1 0 +SERF 0 1 0 1 +SEREN 0 1 0 1 +SENTENCED 0 1 1 0 +SENSITIVE 1 1 2 1 +SENSE 8 1 9 8 +SENDS 0 1 0 1 +SEND 4 1 4 5 +SENATORY 0 1 0 1 +SEMICA 0 1 0 1 +SELL 4 1 4 5 +SELF 6 1 6 7 +SEEST 1 1 2 1 +SEEMING 1 1 1 2 +SEEING 8 1 8 9 +SEED 0 1 0 1 +SEDUCETH 0 1 1 0 +SEDUCE 0 1 0 1 +SECURED 0 1 0 1 +SECURE 4 1 5 4 +SEATS 1 1 1 2 +SEATING 0 1 0 1 +SEATED 5 1 5 6 +SEARCHING 0 1 1 0 +SEAMAN 4 1 5 4 +SCUSE 0 1 1 0 +SCULPTURES 0 1 0 1 +SCULPTORS 1 1 2 1 +SCRUTINISED 0 1 1 0 +SCRIBES 1 1 2 1 +SCREW 0 1 1 0 +SCREAMISH 0 1 0 1 +SCRAPPIN 0 1 1 0 +SCRAP 0 1 0 1 +SCORN 2 1 3 2 +SCORCHED 0 1 0 1 +SCO'TCH 0 1 1 0 +SCHULBERG'S 0 1 1 0 +SCHOOLGIRLS 0 1 0 1 +SCHOOLDAYS 0 1 1 0 +SCHOOLBOY 0 1 0 1 +SCEPTICAL 0 1 0 1 +SCENTED 0 1 0 1 +SCAPEGRACES 0 1 1 0 +SCAPED 0 1 1 0 +SCAPE 0 1 1 0 +SCALE 0 1 0 1 +SAYING 17 1 17 18 +SAYIN 0 1 1 0 +SAXES 0 1 0 1 +SAVIOUR 0 1 0 1 +SAVED 0 1 0 1 +SAVE 7 1 7 8 +SAUCES 0 1 0 1 +SATURDAY 0 1 1 0 +SATURATED 0 1 1 0 +SATAN 1 1 1 2 +SARDID 0 1 0 1 +SARAH 2 1 3 2 +SANS 0 1 1 0 +SANITARY 1 1 2 1 +SANGS 0 1 0 1 +SANG 1 1 2 1 +SAND 2 1 3 2 +SANCTUS 0 1 0 1 +SANCTESS 0 1 1 0 +SANCHUMAIN 0 1 0 1 +SAMUR 0 1 0 1 +SALTS 0 1 1 0 +SALONE 0 1 1 0 +SALON 1 1 1 2 +SALLOWER 0 1 1 0 +SALEN 0 1 0 1 +SALARY 0 1 0 1 +SAKES 0 1 0 1 +SAJOURN 0 1 0 1 +SAILS 0 1 1 0 +SAIL 3 1 4 3 +SAH 1 1 2 1 +SAGOTARA 0 1 0 1 +SAGITTAIRE 0 1 1 0 +SAFE 7 1 8 7 +SADRIC 0 1 0 1 +SADLY 3 1 4 3 +SACKED 0 1 0 1 +SABATANI 0 1 0 1 +S'POSE 3 1 3 4 +RYO 0 1 1 0 +RYEO 0 1 0 1 +RUSHED 3 1 4 3 +RUSH 0 1 0 1 +RUNS 0 1 1 0 +RUNG 1 1 2 1 +RUMP 0 1 1 0 +RULER 0 1 1 0 +RULED 0 1 0 1 +RULE 5 1 5 6 +RUBENSES 0 1 1 0 +RUBEN 0 1 0 1 +ROY 0 1 0 1 +ROWED 0 1 1 0 +ROW 2 1 2 3 +ROUTE 0 1 1 0 +ROUSED 2 1 3 2 +ROUGHLY 0 1 0 1 +ROTHS 0 1 1 0 +ROSS 0 1 0 1 +ROSE 12 1 12 13 +ROSAMUN 0 1 1 0 +ROSAMOND 0 1 0 1 +ROPES 1 1 1 2 +ROPED 0 1 0 1 +ROOTS 1 1 2 1 +RONALD 0 1 1 0 +ROME 0 1 0 1 +ROMAN 2 1 2 3 +ROLL 1 1 2 1 +RODIAN 0 1 0 1 +ROCKLE 0 1 0 1 +ROCK 3 1 4 3 +ROBIN 0 1 0 1 +ROBED 0 1 1 0 +ROARING 0 1 1 0 +ROAR 1 1 1 2 +ROADS 0 1 0 1 +RIVELING 0 1 0 1 +RIPER 0 1 0 1 +RIPENESS 0 1 0 1 +RINGMASTER 0 1 1 0 +RINDS 0 1 1 0 +RIKA 0 1 0 1 +RIGOROUS 0 1 1 0 +RIGOR 0 1 1 0 +RIGA 0 1 0 1 +RIDING 0 1 0 1 +RIDICT 0 1 0 1 +RIDGES 0 1 0 1 +RIDGE'S 0 1 1 0 +RIDER 1 1 2 1 +RIDE 5 1 5 6 +RID 4 1 5 4 +RICHARD 5 1 5 6 +RICARDS 0 1 0 1 +RICA 0 1 0 1 +RHOMETTING 0 1 0 1 +RHODES 0 1 1 0 +RHINES 0 1 0 1 +REWARDED 0 1 0 1 +REW 0 1 1 0 +REVOLUTIONISTS 0 1 1 0 +REVOLUTIONIST 1 1 1 2 +REVOLUTION 2 1 3 2 +REVOLTE 0 1 1 0 +REVIL 0 1 0 1 +REVERED 0 1 0 1 +REVELING 0 1 1 0 +REVEALED 0 1 1 0 +RETZCH'S 0 1 1 0 +RETURNING 6 1 6 7 +RETURNED 18 1 19 18 +RETURN 17 1 18 17 +RETICOSE 0 1 0 1 +RETFORD 0 1 0 1 +RESUMED 1 1 2 1 +RESTORETH 0 1 1 0 +RESTORE 0 1 0 1 +RESTIVE 0 1 1 0 +RESPONSIBLE 4 1 5 4 +RESOLVED 0 1 1 0 +RESK 0 1 1 0 +RESISTS 0 1 0 1 +RESISTING 1 1 2 1 +RESINOUS 0 1 1 0 +RESIDUE 2 1 3 2 +RESIDER 0 1 0 1 +RESIDE 1 1 1 2 +RESCUED 2 1 3 2 +RESCUE 1 1 1 2 +REQUEST 3 1 4 3 +REPUTATION 2 1 3 2 +REPROVE 0 1 0 1 +REPLY 1 1 2 1 +REPLIED 39 1 39 40 +REPLACING 0 1 0 1 +REPETITION 1 1 1 2 +REPEATED 5 1 6 5 +REPAST 1 1 1 2 +RENTS 0 1 0 1 +RENDER 0 1 1 0 +REND 0 1 1 0 +REMISSIONER'S 0 1 0 1 +REMISSION 0 1 1 0 +REMEMBEREST 0 1 1 0 +REMEMBER 13 1 13 14 +REMARKED 9 1 10 9 +REMARK 6 1 6 7 +REMAINS 2 1 2 3 +REMAIN 4 1 5 4 +RELIGIOUS 2 1 2 3 +RELIGIONISTS 0 1 0 1 +RELIGHTED 0 1 0 1 +RELIEF 6 1 6 7 +RELEVANT 0 1 1 0 +RELECTION 0 1 0 1 +RELEASED 2 1 3 2 +REJECTED 0 1 0 1 +REJECT 0 1 1 0 +REINY 0 1 0 1 +REIGNS 0 1 1 0 +REIGNED 0 1 1 0 +REGULATION 0 1 1 0 +REGULATING 0 1 0 1 +REGULAR 1 1 1 2 +REGARDING 2 1 3 2 +REG'LER 0 1 1 0 +REFUGERY 0 1 0 1 +REFUGE 2 1 3 2 +REFRESHMENT 0 1 1 0 +REFORMERS 1 1 2 1 +REFORMED 1 1 1 2 +REFERENCES 0 1 0 1 +REFERENCE 2 1 3 2 +REELING 0 1 0 1 +REELECTION 0 1 1 0 +REELECTED 0 1 0 1 +REEDS 0 1 0 1 +REED 0 1 0 1 +RED 10 1 10 11 +RECTIFY 0 1 1 0 +RECORDS 0 1 1 0 +RECKLESS 2 1 2 3 +RECITER 1 1 2 1 +RECITE 1 1 2 1 +RECEDED 0 1 1 0 +REALISED 0 1 1 0 +REACH 4 1 4 5 +RE 1 1 2 1 +RAYS 0 1 0 1 +RATTLING 2 1 3 2 +RATT 0 1 0 1 +RATES 0 1 0 1 +RATCHFORD 0 1 1 0 +RASHID 0 1 1 0 +RASCUOUS 0 1 0 1 +RASCHID 0 1 0 1 +RASCALS 0 1 1 0 +RASCALIONS 0 1 0 1 +RAPSCALLIONS 0 1 1 0 +RAPPERS 0 1 1 0 +RAPPED 0 1 0 1 +RAPIS 0 1 0 1 +RANGED 0 1 0 1 +RANG 2 1 2 3 +RANALD 0 1 0 1 +RAMSES 0 1 1 0 +RAMESES 0 1 0 1 +RALPHED 0 1 0 1 +RAISING 2 1 2 3 +RAISE 5 1 6 5 +RAINING 0 1 0 1 +RAIN 2 1 3 2 +RAID 0 1 1 0 +RAGED 0 1 0 1 +RADPROP 0 1 1 0 +RADIAN 0 1 0 1 +RACKETERS 0 1 0 1 +RACKETEERS 0 1 1 0 +RACKED 0 1 1 0 +RACES 0 1 1 0 +RACE 2 1 2 3 +RABS 0 1 0 1 +RABBITS 0 1 1 0 +RABBIT 5 1 5 6 +RABB'S 0 1 1 0 +QUON 0 1 0 1 +QUMMUKH 0 1 1 0 +QUITE 15 1 16 15 +QUICKLY 5 1 5 6 +QUICKENETH 0 1 1 0 +QUICKENED 0 1 0 1 +QUICK 4 1 5 4 +QUESTERED 0 1 0 1 +QUEER 4 1 4 5 +QUEEN 5 1 5 6 +QUARTER 6 1 7 6 +QUANTRELL 0 1 1 0 +QUANTRAIL 0 1 0 1 +QUANTITIES 1 1 2 1 +QUALITIES 1 1 1 2 +PYM 0 1 1 0 +PUTTING 0 1 1 0 +PUTTEL 0 1 1 0 +PUTS 2 1 3 2 +PUSH 1 1 1 2 +PURPORTING 0 1 1 0 +PULLED 3 1 4 3 +PUDDLES 0 1 1 0 +PSALMS 0 1 0 1 +PSALM 0 1 1 0 +PRYTANEUM 0 1 1 0 +PROVISED 0 1 0 1 +PROVING 0 1 0 1 +PROVIDENT 0 1 0 1 +PROVIDED 1 1 2 1 +PROVEN 0 1 1 0 +PROVED 2 1 2 3 +PROTINENT 0 1 0 1 +PROTESTING 0 1 0 1 +PROTECTORATE 0 1 1 0 +PROTECTOR 1 1 1 2 +PROPS 0 1 0 1 +PROPRE 0 1 1 0 +PROPHET 1 1 1 2 +PROPERLY 1 1 2 1 +PROPER 2 1 2 3 +PRONOUNCE 1 1 1 2 +PROMOTIVE 0 1 1 0 +PROMOTED 0 1 0 1 +PROMIN 0 1 0 1 +PROLIMINARIES 0 1 0 1 +PROHIBITION 0 1 1 0 +PROGRAMME 0 1 1 0 +PROGRAM 0 1 0 1 +PROFIT 1 1 2 1 +PROFICIENT 0 1 0 1 +PROFESSIONS 0 1 0 1 +PRODUED 0 1 0 1 +PRODUCES 0 1 1 0 +PRODUCED 7 1 7 8 +PRODUCE 5 1 6 5 +PROCOPIUS 0 1 1 0 +PROCOPIAS 0 1 0 1 +PROCKLESS 0 1 0 1 +PROB'S 0 1 0 1 +PRO 0 1 0 1 +PRISONERS 3 1 3 4 +PRISONER 12 1 13 12 +PRISONED 0 1 0 1 +PRISON 4 1 5 4 +PRINCIPLES 0 1 0 1 +PRINCIPALS 0 1 1 0 +PRINCESO 0 1 0 1 +PRINCE 6 1 7 6 +PRIMER 0 1 1 0 +PRIMARY 0 1 0 1 +PRIESTS 1 1 1 2 +PRICKED 0 1 0 1 +PREVIOUS 0 1 0 1 +PRETTY 7 1 8 7 +PRESTIGE 1 1 2 1 +PRESENTERS 0 1 0 1 +PRESENT 16 1 16 17 +PRENTICESHIP 0 1 1 0 +PRELIMINARIES 0 1 1 0 +PREDICUP 0 1 0 1 +PRECEPTORY 1 1 2 1 +PRECEPTORS 1 1 2 1 +PRECENTORS 0 1 1 0 +PREACHED 0 1 1 0 +PREACH 0 1 0 1 +PRAYERS 2 1 3 2 +PRAY 5 1 5 6 +PRATS 0 1 0 1 +PRAISEWORTHY 0 1 1 0 +PRAISE 3 1 3 4 +PRACTITIONERS 0 1 0 1 +PRACTITIONER 0 1 1 0 +PRACTISED 0 1 0 1 +PRACTICED 0 1 1 0 +POWDERED 0 1 1 0 +POUCHES 0 1 1 0 +POTUM 0 1 1 0 +POTTLE 0 1 0 1 +POTION 0 1 1 0 +POTENTAL 0 1 0 1 +POTASSIUM 0 1 1 0 +POT 0 1 0 1 +POSTHASTE 0 1 1 0 +POSTERN 0 1 1 0 +POST 6 1 6 7 +POSSES 0 1 0 1 +POSPIRED 0 1 0 1 +POSITIVELY 2 1 3 2 +POSIT 0 1 0 1 +PORTMANTEAU 0 1 1 0 +PORTENTOUS 0 1 1 0 +PORT 1 1 1 2 +PORPORTING 0 1 0 1 +PORED 0 1 1 0 +POPULOUS 0 1 0 1 +POPULACE 0 1 1 0 +POND 0 1 0 1 +POMEROY 0 1 1 0 +POMERALI 0 1 0 1 +POLYTECHNICHER 0 1 0 1 +POLYTECHNIC 0 1 1 0 +POLO 0 1 1 0 +POLIT 0 1 0 1 +POETS 0 1 0 1 +POEM 0 1 0 1 +PO 0 1 1 0 +PLUMB 0 1 1 0 +PLUM 0 1 0 1 +PLUCK 1 1 1 2 +PLEASURE 8 1 8 9 +PLEASING 2 1 2 3 +PLEASE 15 1 15 16 +PLEASANT 8 1 9 8 +PLEAS 0 1 1 0 +PLAYERS 1 1 1 2 +PLATTERBUFF'S 0 1 0 1 +PLATTERBUFF 0 1 0 1 +PLATTERBAFF'S 0 1 1 0 +PLATTER 0 1 0 1 +PLATELY 0 1 0 1 +PLATE 2 1 2 3 +PLANTED 0 1 1 0 +PLANK 2 1 3 2 +PLANCORN 0 1 0 1 +PLACING 1 1 2 1 +PLACES 3 1 3 4 +PIUCES 0 1 0 1 +PITTHAM 0 1 0 1 +PITT 0 1 0 1 +PITHUM 0 1 1 0 +PITCHES 0 1 0 1 +PITCHEN 0 1 0 1 +PISTOLES 0 1 1 0 +PISTOL 5 1 5 6 +PIPE 3 1 4 3 +PIONABLE 0 1 0 1 +PINKUS 0 1 1 0 +PINKIS 0 1 0 1 +PINION 0 1 0 1 +PINES 0 1 0 1 +PIKES 1 1 2 1 +PIKE 0 1 0 1 +PIGSKIN 0 1 1 0 +PIGEONCOTES 0 1 1 0 +PIERRE 0 1 0 1 +PICTION 0 1 0 1 +PICKED 2 1 3 2 +PHUT'S 0 1 1 0 +PHILP 0 1 0 1 +PHILISTINES 1 1 2 1 +PHILIPUS 0 1 0 1 +PHILIP'S 0 1 0 1 +PHELPS 1 1 2 1 +PETULITY 0 1 0 1 +PETREL 0 1 1 0 +PETERS 1 1 1 2 +PETAL 0 1 0 1 +PET 1 1 2 1 +PESTS 0 1 0 1 +PESTE 0 1 1 0 +PESSORAH 0 1 0 1 +PERSPIRED 0 1 1 0 +PERNOUNCE 0 1 1 0 +PERISH 3 1 4 3 +PERHAPS 16 1 17 16 +PERFECT 4 1 5 4 +PERCEPTORY 0 1 0 1 +PERCEPTIVES 0 1 0 1 +PERCEIVED 7 1 7 8 +PEP 0 1 0 1 +PEONAGE 0 1 1 0 +PENDING 0 1 1 0 +PENCIL 1 1 1 2 +PENCE 0 1 1 0 +PELUSTER 0 1 0 1 +PEKAHIAH 0 1 1 0 +PEKAHIA 0 1 0 1 +PEGGING 0 1 0 1 +PEER 2 1 3 2 +PECUSE 0 1 0 1 +PEASE 0 1 0 1 +PEASANTS 5 1 6 5 +PEAS 1 1 2 1 +PEARL 0 1 1 0 +PEAR 0 1 0 1 +PEACEFUL 1 1 2 1 +PEACE 8 1 8 9 +PAY 10 1 11 10 +PAWNBROKER 1 1 2 1 +PAUSES 0 1 0 1 +PAUSED 2 1 3 2 +PAUSE 3 1 4 3 +PAUL 0 1 0 1 +PATUM 0 1 0 1 +PATTERED 0 1 0 1 +PATIENCE 2 1 3 2 +PATH 1 1 1 2 +PATENTS 0 1 0 1 +PASTING 0 1 0 1 +PASTES 0 1 1 0 +PAST 9 1 10 9 +PASSERS 1 1 2 1 +PARTLY 1 1 2 1 +PARTICER 0 1 0 1 +PARSLY 0 1 0 1 +PARRISH 0 1 0 1 +PARRICIDES 0 1 1 0 +PARKS 0 1 1 0 +PARKLEY 0 1 0 1 +PARENT 0 1 0 1 +PARDONABLE 0 1 1 0 +PARDON 9 1 10 9 +PARASSOME 0 1 0 1 +PARASITES 0 1 0 1 +PANTS 0 1 0 1 +PANNER 0 1 0 1 +PANEL 0 1 1 0 +PAMP 0 1 0 1 +PALLIATE 0 1 1 0 +PALL 0 1 1 0 +PALESTINE 1 1 2 1 +PALASTEIN 0 1 0 1 +PALAESTRA 0 1 1 0 +PAINT 0 1 1 0 +PAIN 3 1 3 4 +PADDLING 0 1 1 0 +PADDLIN 0 1 0 1 +PAD 0 1 0 1 +PACES 1 1 2 1 +OX 0 1 1 0 +OWNETTE 0 1 0 1 +OWNERS 1 1 2 1 +OWNED 1 1 2 1 +OWL 0 1 0 1 +OWING 1 1 1 2 +OW'M 0 1 1 0 +OW 0 1 1 0 +OVERWHELMING 0 1 0 1 +OVERRIPENESS 0 1 1 0 +OVERLUME 0 1 0 1 +OVERHEARD 2 1 3 2 +OVERFULL 0 1 1 0 +OVERFLOWING 0 1 1 0 +OUTSIDE 8 1 8 9 +OUTGAZE 0 1 0 1 +OUTER'S 0 1 0 1 +OUTER 1 1 2 1 +OURSAND 0 1 0 1 +OURS 0 1 0 1 +OUNCES 2 1 3 2 +OUNCE 0 1 0 1 +OUGHTN'T 0 1 1 0 +OUEN 0 1 1 0 +OTT 0 1 0 1 +OTHERWAYS 0 1 0 1 +OTHERS 17 1 17 18 +OTHER'S 1 1 2 1 +OTALONS 0 1 0 1 +ORTRAINING 0 1 0 1 +ORNERIEST 0 1 1 0 +ORNEIST 0 1 0 1 +ORIGINALLY 0 1 0 1 +OREKA 0 1 0 1 +ORDER 19 1 20 19 +ORDEALIZING 0 1 0 1 +OPPRESSORS 0 1 1 0 +OPPRESSING 0 1 0 1 +OPPRESSED 1 1 2 1 +OPPRESS 0 1 0 1 +OPPORTUNITY 3 1 4 3 +OPEUS 0 1 0 1 +OPENING 6 1 7 6 +OPE 0 1 1 0 +ONLENETS 0 1 0 1 +ONES 4 1 4 5 +ONE'S 5 1 5 6 +ON'T 0 1 1 0 +OMIDIAN 0 1 0 1 +OME 0 1 1 0 +OMAR 1 1 2 1 +OLY 0 1 0 1 +OLLA 0 1 0 1 +OLL 0 1 1 0 +OLIV 0 1 0 1 +OKAY 0 1 1 0 +OIL 2 1 3 2 +OIETY 0 1 0 1 +OGRES 0 1 0 1 +OGRE'S 0 1 1 0 +OGA 0 1 0 1 +OFURUS 0 1 0 1 +OFFICIALS 0 1 0 1 +OFFICES 0 1 1 0 +OFFICER 2 1 3 2 +OFFERS 2 1 3 2 +OFFENSE 0 1 1 0 +OFFENDLY 0 1 0 1 +OFFEND 1 1 2 1 +OFFENCE 2 1 2 3 +OFFEN 0 1 1 0 +ODOU 0 1 0 1 +OCHIAN 0 1 0 1 +OCCASIONS 1 1 2 1 +OCCASION 4 1 5 4 +OBSERVED 4 1 5 4 +OBOCOCK 0 1 1 0 +OBLIGED 3 1 3 4 +OBLIGE 0 1 1 0 +OBJECT 6 1 7 6 +OBEYED 2 1 3 2 +OBEY 1 1 1 2 +OBEK 0 1 0 1 +OAST 0 1 0 1 +OARMEIL 0 1 0 1 +O'NIGHTS 0 1 1 0 +O'NEILL 0 1 1 0 +O'NEIA 0 1 0 1 +NYTOUCH 0 1 1 0 +NY 0 1 0 1 +NUTS 0 1 1 0 +NURED 0 1 0 1 +NUNS 2 1 3 2 +NUN'S 0 1 0 1 +NUMA 0 1 0 1 +NU'UMAN 0 1 1 0 +NOWT 0 1 1 0 +NOTTINGHAM 0 1 1 0 +NOTEBOOK 0 1 0 1 +NOSHAT 0 1 0 1 +NORTHEAST 0 1 0 1 +NOPE 0 1 1 0 +NONETHELESS 0 1 1 0 +NOBLE 5 1 6 5 +NOBBY 0 1 0 1 +NIPPER 0 1 1 0 +NINETEEN 1 1 1 2 +NIKOLAY 0 1 1 0 +NIKOLA 0 1 0 1 +NIGHT'S 1 1 1 2 +NIGH 1 1 1 2 +NICO 0 1 1 0 +NICES 0 1 0 1 +NICE 3 1 3 4 +NIBBER 0 1 0 1 +NEXTER 0 1 1 0 +NEXT 10 1 10 11 +NEWSING 0 1 0 1 +NEWBERG 1 1 2 1 +NERVE 0 1 1 0 +NEOSHO 0 1 1 0 +NEOSHIL 0 1 0 1 +NELLO 0 1 0 1 +NEIGHBOURS 0 1 1 0 +NEIGHBOURING 0 1 1 0 +NEIGHBORING 2 1 2 3 +NEEDS 8 1 8 9 +NED 3 1 3 4 +NEARING 1 1 1 2 +NEARER 3 1 4 3 +NAUGHTY 0 1 0 1 +NAUGHT 0 1 1 0 +NAUCOTIC 0 1 0 1 +NATURE 11 1 11 12 +NARRATIVES 1 1 2 1 +NARRATIVE 1 1 1 2 +NARRATE 0 1 1 0 +NARCOTIC 0 1 1 0 +NAP 0 1 0 1 +NANDY'S 0 1 1 0 +NACO 0 1 0 1 +NABRAMIN 0 1 0 1 +N 0 1 1 0 +MYSTERY 0 1 0 1 +MYSTERIOUS 5 1 6 5 +MYSELF 26 1 27 26 +MUSTACHES 0 1 1 0 +MUSICIANS 0 1 1 0 +MURDOCH'S 0 1 1 0 +MURDERER 0 1 0 1 +MURDER 4 1 5 4 +MUIR 0 1 1 0 +MUG 0 1 1 0 +MUDDY 0 1 1 0 +MUD 0 1 1 0 +MOWER 0 1 1 0 +MOVES 0 1 0 1 +MOVEMENT 1 1 2 1 +MOUTHWHAT 0 1 1 0 +MOUTHS 0 1 1 0 +MOUTHLY 0 1 0 1 +MOUTH 7 1 7 8 +MOUSTACHES 0 1 0 1 +MOURNING 2 1 3 2 +MOUNTNORRIS 0 1 1 0 +MOTTAR 0 1 0 1 +MOTOR 0 1 1 0 +MOTIONLESS 0 1 1 0 +MOTHS 0 1 0 1 +MOTHERS 5 1 5 6 +MOTHER 51 1 52 51 +MORTIS 0 1 1 0 +MORTIFICATIONTHAT 0 1 1 0 +MORTIFICATION 0 1 0 1 +MORTEM 0 1 1 0 +MORTARS 0 1 0 1 +MORTAL 1 1 1 2 +MORNING 21 1 21 22 +MORE'N 0 1 1 0 +MORAL 7 1 8 7 +MOPED 0 1 1 0 +MOOR 2 1 3 2 +MOONLIGHT 2 1 3 2 +MONTORIS 0 1 0 1 +MONTHLY 0 1 1 0 +MONTH'S 0 1 0 1 +MONTH 1 1 1 2 +MONSIEUR 0 1 0 1 +MONSEIGNEUR 5 1 6 5 +MONKERS 0 1 1 0 +MONEY 16 1 16 17 +MONDER 0 1 0 1 +MOMMOL 0 1 1 0 +MOMENT 24 1 24 25 +MOLLY 0 1 0 1 +MOLASTIC 0 1 0 1 +MOKER 0 1 0 1 +MODERATE 1 1 2 1 +MODER 0 1 0 1 +MOCKERS 0 1 0 1 +MO 0 1 1 0 +MITCHIPMAN 0 1 0 1 +MISTAGINATION 0 1 0 1 +MIST 1 1 2 1 +MISSED 5 1 5 6 +MINUTELY 0 1 1 0 +MINSER 0 1 0 1 +MINOR 1 1 1 2 +MINNIE 1 1 2 1 +MINNI 0 1 0 1 +MINISTERED 0 1 1 0 +MINIONETTE 0 1 1 0 +MINCE 0 1 1 0 +MIMICK 0 1 1 0 +MIMIC 0 1 0 1 +MILRON 0 1 0 1 +MILLY 0 1 1 0 +MILLSTONE 0 1 0 1 +MILLSTON 0 1 1 0 +MILLSON'S 0 1 0 1 +MILLSON 0 1 0 1 +MILLICINE 0 1 0 1 +MILLICENT 0 1 0 1 +MILLER 3 1 4 3 +MILL 0 1 1 0 +MILKED 0 1 0 1 +MILICENT'S 0 1 1 0 +MILES 6 1 6 7 +MILE 4 1 5 4 +MILDEWED 0 1 1 0 +MILDED 0 1 0 1 +MILAD 0 1 0 1 +MIKE 2 1 2 3 +MIHI 0 1 1 0 +MIDSR 0 1 0 1 +MIDSHIPMAN 0 1 1 0 +MIDRIFTS 0 1 0 1 +MIDRIFF 0 1 1 0 +MIDDLING 0 1 1 0 +MIDDLIN 0 1 0 1 +MICHIG 0 1 0 1 +MICE 0 1 0 1 +METHINKETH 0 1 1 0 +METHINK 0 1 0 1 +METALS 0 1 0 1 +METAL 0 1 0 1 +MESTINE'S 0 1 0 1 +MESTINE 0 1 0 1 +MESTIENNE'S 0 1 1 0 +MESSUM 0 1 0 1 +MESSES 0 1 1 0 +MERTULIST 0 1 0 1 +MERTON 0 1 0 1 +MERRYMAKING 0 1 0 1 +MERRY 2 1 3 2 +MERNEPTAH 0 1 1 0 +MERLUNUS 0 1 0 1 +MERLINUS 0 1 0 1 +MERIT 2 1 3 2 +MERELY 6 1 6 7 +MERCER 0 1 0 1 +MENDIAN 0 1 0 1 +MEND 0 1 1 0 +MEMORANE 0 1 0 1 +MEMBRANE 0 1 1 0 +MELLICENT 0 1 0 1 +MEDICAL 1 1 2 1 +MEDICA 0 1 0 1 +MEDDLE 1 1 1 2 +MEDALS 0 1 1 0 +MEDAL 0 1 1 0 +MEASURE 0 1 0 1 +MEANTUS 0 1 0 1 +MEANTIME 2 1 2 3 +MEANTES 0 1 0 1 +MEANT 10 1 10 11 +MEANS 23 1 23 24 +MEAN 19 1 20 19 +MAYOR 1 1 1 2 +MAXIMMUNITION 0 1 0 1 +MAU 0 1 0 1 +MATURITY 0 1 0 1 +MATTERS 3 1 4 3 +MATTER 21 1 22 21 +MATRON 0 1 0 1 +MATHA 0 1 0 1 +MATEY 0 1 1 0 +MATERIORS 0 1 0 1 +MATERIALS 1 1 2 1 +MASSES 1 1 1 2 +MASSED 0 1 0 1 +MASSA 0 1 0 1 +MASKED 0 1 1 0 +MARY'S 2 1 2 3 +MARVELLED 0 1 0 1 +MARVELL 0 1 0 1 +MARVELED 0 1 1 0 +MARTIN 0 1 1 0 +MARTIAN 2 1 2 3 +MARTIAL 0 1 0 1 +MARSPEAKER 0 1 1 0 +MARSH 0 1 1 0 +MARS 3 1 3 4 +MARNET 0 1 0 1 +MARMALADES 1 1 2 1 +MARKEE 0 1 0 1 +MARGARET'S 2 1 3 2 +MARGARET 14 1 14 15 +MARE 0 1 1 0 +MARDOCK'S 0 1 0 1 +MARDACK 0 1 0 1 +MARBULAN 0 1 0 1 +MARBIN 0 1 0 1 +MAR 1 1 1 2 +MANTIL 0 1 0 1 +MANOT 0 1 0 1 +MANNERS 0 1 0 1 +MANNER 8 1 9 8 +MANCATEO 0 1 0 1 +MANASSEH 0 1 1 0 +MAMMY 0 1 0 1 +MAMMA 1 1 1 2 +MAMIE 0 1 1 0 +MALNUTRITION 0 1 1 0 +MALLETRICIAN 0 1 0 1 +MALE 0 1 0 1 +MAKING 17 1 18 17 +MAKES 10 1 11 10 +MAKAN 7 1 8 7 +MAJORITY 5 1 6 5 +MAJOR 4 1 5 4 +MAINE 0 1 1 0 +MAIN 1 1 1 2 +MAIL 0 1 1 0 +MADAMELY 0 1 0 1 +MADAM 0 1 0 1 +MAD 1 1 2 1 +MAC 0 1 0 1 +MABILLON 0 1 1 0 +LYSIMACHUS 0 1 1 0 +LYING 2 1 2 3 +LUNA'S 2 1 3 2 +LUKE 3 1 4 3 +LUCRATIVE 0 1 1 0 +LUCK 2 1 3 2 +LUCIKAM 0 1 0 1 +LUCIAN 0 1 0 1 +LOWERING 2 1 3 2 +LOWER 2 1 3 2 +LOVELY 3 1 3 4 +LOT 6 1 6 7 +LOSS 2 1 3 2 +LORING 0 1 0 1 +LORD'S 0 1 1 0 +LOOSE 3 1 3 4 +LOOKS 5 1 5 6 +LOOKOUT 0 1 1 0 +LONGER 16 1 16 17 +LONESOMENESS 0 1 1 0 +LONESOME 1 1 1 2 +LONE 0 1 1 0 +LOGS 0 1 0 1 +LOGIS 0 1 0 1 +LOCKS 0 1 1 0 +LOCK 3 1 4 3 +LOBSURD 0 1 0 1 +LOBSTERBOAT 0 1 0 1 +LOBS 0 1 0 1 +LOAST 0 1 0 1 +LOAD 0 1 0 1 +LIZZLE 0 1 0 1 +LIZABETH 0 1 1 0 +LIVELY 1 1 2 1 +LIVELONG 0 1 1 0 +LITS 0 1 0 1 +LITERN 0 1 0 1 +LISTEN 11 1 11 12 +LISSLY 0 1 0 1 +LISPIN 0 1 0 1 +LISBETH 0 1 0 1 +LISAKETH 0 1 0 1 +LIPS 5 1 6 5 +LINING 0 1 0 1 +LINE 6 1 7 6 +LIMETERY 0 1 0 1 +LIME 0 1 1 0 +LILY 0 1 0 1 +LILLO 0 1 0 1 +LILLBURN 0 1 0 1 +LILBOURNE 0 1 0 1 +LIKELY 2 1 3 2 +LIGHTLY 0 1 1 0 +LIEUTENANT 0 1 1 0 +LIES 3 1 4 3 +LIEGE 0 1 0 1 +LIDNESLEY 0 1 0 1 +LIDA 0 1 0 1 +LIAISON 0 1 1 0 +LEXINGTON 0 1 1 0 +LEWIS 0 1 1 0 +LEWINA 0 1 0 1 +LEVITICUS 0 1 1 0 +LEVITCH 0 1 0 1 +LEVIKUS 0 1 0 1 +LEVELLY 0 1 0 1 +LETTERS 6 1 7 6 +LETS 0 1 1 0 +LET'S 4 1 4 5 +LEST 2 1 3 2 +LESSON 1 1 2 1 +LESSINGTON 0 1 0 1 +LESSEN 1 1 2 1 +LENOIR 0 1 1 0 +LENNONSHIPS 0 1 0 1 +LENNING 0 1 0 1 +LENIN 1 1 2 1 +LEMON 0 1 1 0 +LEGS 2 1 3 2 +LEGION 0 1 0 1 +LEGERSHIP 0 1 0 1 +LEER 0 1 1 0 +LEECHES 0 1 1 0 +LEE'S 0 1 1 0 +LEE 0 1 0 1 +LEAVED 1 1 2 1 +LEARNS 1 1 1 2 +LEARN 4 1 5 4 +LEAR 0 1 0 1 +LEANY'S 0 1 0 1 +LEADPENCIL 0 1 1 0 +LEADERSHIP 0 1 1 0 +LEADERS 1 1 2 1 +LEADER'S 0 1 0 1 +LEAD 2 1 2 3 +LAWS 3 1 3 4 +LAWN 0 1 0 1 +LAURDELL 0 1 0 1 +LAUIS 0 1 0 1 +LAUGHTER 3 1 3 4 +LAUDER 0 1 0 1 +LATINUE 0 1 0 1 +LATH 0 1 1 0 +LATELY 1 1 2 1 +LASS 0 1 0 1 +LASH 1 1 2 1 +LAS 0 1 0 1 +LARKINS 0 1 0 1 +LARKIN'S 0 1 1 0 +LARD 0 1 0 1 +LAP 2 1 2 3 +LANE 2 1 2 3 +LANDOWNERS 1 1 1 2 +LANDOWNER 3 1 4 3 +LANDLORD 1 1 1 2 +LANDIE 0 1 0 1 +LANDI 0 1 1 0 +LANDED 2 1 3 2 +LAMPS 0 1 0 1 +LAMBS 0 1 1 0 +LAKE 5 1 6 5 +LAIN 1 1 2 1 +LAG 0 1 0 1 +LADY'S 1 1 1 2 +LADY 14 1 15 14 +LADS 1 1 2 1 +LACK 2 1 2 3 +LACHRYMA 0 1 1 0 +LACHES 0 1 0 1 +LABOURING 0 1 0 1 +LABOURERS 0 1 1 0 +LABOUR 0 1 1 0 +LABORS 0 1 0 1 +LABORING 1 1 2 1 +LABORERS 0 1 0 1 +LABOR 1 1 1 2 +L 3 1 4 3 +KUMAK 0 1 0 1 +KOTE 0 1 0 1 +KNOWS 7 1 8 7 +KNOWN 10 1 10 11 +KNOTTY 0 1 1 0 +KNOTS 0 1 0 1 +KNOT 2 1 3 2 +KNOLLED 0 1 0 1 +KNOBBLY 0 1 1 0 +KNIGHTSAGE 0 1 0 1 +KNIGHTS 1 1 2 1 +KNIGHT'S 0 1 1 0 +KNEW 16 1 16 17 +KLEPTOMANIAC 0 1 1 0 +KLEPTOMANIA 0 1 1 0 +KITTY 3 1 4 3 +KISSED 5 1 6 5 +KISS 2 1 2 3 +KING'S 6 1 7 6 +KING 45 1 45 46 +KINDRED 1 1 1 2 +KINDNESS 7 1 7 8 +KINDLING 0 1 0 1 +KINDER 1 1 2 1 +KILLS 0 1 1 0 +KILLET 0 1 0 1 +KIEDAM 0 1 0 1 +KIDNE 0 1 0 1 +KID 1 1 2 1 +KICKY 0 1 0 1 +KICK 0 1 1 0 +KI 0 1 0 1 +KHAN 1 1 1 2 +KERSTALL 0 1 1 0 +KENN 0 1 0 1 +KENITE 0 1 0 1 +KEEPING 5 1 5 6 +KEEN 1 1 2 1 +KEDEM 0 1 1 0 +KATY 0 1 0 1 +KANITE 0 1 0 1 +KAMAR 0 1 1 0 +JUSTIFIED 1 1 2 1 +JUROR 0 1 0 1 +JUMPS 1 1 2 1 +JUMPED 1 1 1 2 +JULIET 0 1 0 1 +JULIEN 1 1 2 1 +JULIE 0 1 0 1 +JULIAN 0 1 0 1 +JUICES 0 1 1 0 +JUG 3 1 4 3 +JUDICINT 0 1 0 1 +JUDAH 1 1 2 1 +JOUVIN'S 0 1 1 0 +JOURED 0 1 0 1 +JOSHUA 0 1 1 0 +JOKINGLY 0 1 1 0 +JOKERS 0 1 0 1 +JOKE 0 1 0 1 +JOINED 1 1 2 1 +JOESH 0 1 0 1 +JOCELYN'S 0 1 1 0 +JOCELYN 0 1 1 0 +JIS 0 1 1 0 +JIM 4 1 5 4 +JILT 0 1 1 0 +JI 0 1 0 1 +JEWISH 1 1 2 1 +JEWELLER 0 1 0 1 +JETS 0 1 0 1 +JEST 0 1 0 1 +JERRY 1 1 1 2 +JEHOASH 0 1 1 0 +JEERED 0 1 1 0 +JEDGE 0 1 1 0 +JEALOUS 0 1 0 1 +JAUNTS 0 1 0 1 +JAS 0 1 0 1 +JARS 0 1 0 1 +JAPLY 0 1 0 1 +JANSENIST 0 1 1 0 +JANEERO 0 1 1 0 +JANE 6 1 7 6 +JAMS 1 1 2 1 +JAMES'S 0 1 0 1 +JAMES 3 1 3 4 +JAME 0 1 0 1 +JAM'S 0 1 0 1 +JAM 1 1 2 1 +JAKIE 0 1 0 1 +JAKI 0 1 0 1 +JAKEY'S 0 1 1 0 +JAGGIE 0 1 0 1 +JACKSON 3 1 4 3 +JACKMEN 0 1 0 1 +JACKMAN 0 1 1 0 +JACK'S 0 1 0 1 +J 1 1 2 1 +IZZY'S 0 1 1 0 +IVANOVITCH 4 1 5 4 +ITALY 1 1 1 2 +IT'LL 1 1 1 2 +ISSUE 1 1 2 1 +ISRAITS 0 1 0 1 +ISRAELITES 0 1 1 0 +ISLAMIZED 0 1 0 1 +ISLAMISED 0 1 1 0 +ISLAM 0 1 1 0 +ISIS 0 1 0 1 +IRRESPONSIBLE 0 1 0 1 +IRRESCELLING 0 1 0 1 +IRONICAL 0 1 1 0 +IRONIC 0 1 0 1 +IPES 0 1 0 1 +INWARD 0 1 1 0 +INVALIDES 0 1 1 0 +INVALID 0 1 0 1 +INTERNAL 2 1 3 2 +INTERFERE 0 1 1 0 +INTERESTING 6 1 6 7 +INTENTION 4 1 4 5 +INTENSE 3 1 3 4 +INTENDED 4 1 5 4 +INTELLECTUALLY 0 1 1 0 +INTELLECTUAL 0 1 0 1 +INSTRUCTIVE 0 1 0 1 +INSTRUCTED 0 1 1 0 +INSTEAD 4 1 4 5 +INSTANT'S 0 1 1 0 +INSTANCE 1 1 1 2 +INSPIRANTS 0 1 0 1 +INSISTS 0 1 0 1 +INSIST 0 1 1 0 +INSIDE 4 1 4 5 +INQUIRE 1 1 2 1 +INNES 0 1 1 0 +INJURE 2 1 3 2 +INGREDIAN 0 1 0 1 +INGENUOUSLY 0 1 0 1 +INGENIOUSLY 0 1 1 0 +INFORT 0 1 0 1 +INFLUENCED 0 1 1 0 +INFERIOR 2 1 3 2 +INFERE 0 1 0 1 +INFAMYLON 0 1 0 1 +INFAMOUS 0 1 1 0 +INEXPECTED 0 1 0 1 +INELEGANTLY 0 1 1 0 +INDUCE 0 1 0 1 +INCREASES 2 1 2 3 +INCREASE 4 1 5 4 +INCORRECT 0 1 1 0 +INCOMPARABLE 0 1 1 0 +INCLINE 1 1 1 2 +INCES 0 1 0 1 +IMPROVISED 1 1 2 1 +IMPROVIDENT 0 1 1 0 +IMPROVED 0 1 1 0 +IMPRESSORS 0 1 0 1 +IMPLY 1 1 2 1 +IMPERFECT 0 1 0 1 +IMPALIATE 0 1 0 1 +IMMERED 0 1 0 1 +IMMENSE 2 1 3 2 +IMAGED 0 1 0 1 +ILLUSION 1 1 1 2 +ILLS 0 1 1 0 +ILL 5 1 5 6 +ILINE 0 1 0 1 +IGNORED 0 1 1 0 +IGNOMY 0 1 1 0 +IDLEEN 0 1 0 1 +IDEA 10 1 11 10 +I'FAITH 0 1 1 0 +HYR 0 1 0 1 +HYDROCLOIC 0 1 0 1 +HYDROCLOGIC 0 1 0 1 +HUSBITH 0 1 0 1 +HUNTON 0 1 0 1 +HUNTERS 0 1 1 0 +HUMOUR 1 1 1 2 +HUMOR 0 1 1 0 +HUMBLY 0 1 1 0 +HUMANITY 1 1 1 2 +HUMANITARY 0 1 1 0 +HUH 0 1 1 0 +HUDSPETH 0 1 1 0 +HOWL 0 1 1 0 +HOUSEHOLD 0 1 1 0 +HOUNDY 0 1 0 1 +HOUNDED 0 1 1 0 +HOTEL 4 1 4 5 +HOSTUALLY 0 1 0 1 +HOSPITABLY 0 1 1 0 +HORSES 4 1 4 5 +HORSE 10 1 10 11 +HORDE 0 1 1 0 +HOPPING 0 1 1 0 +HOPPER 0 1 1 0 +HOO'LL 0 1 1 0 +HONOURS 0 1 1 0 +HONEYMAN 0 1 0 1 +HONESTLY 0 1 1 0 +HONEST 6 1 6 7 +HOMEPUSH 0 1 1 0 +HOLLERED 0 1 0 1 +HOLLER 0 1 1 0 +HOLE 0 1 1 0 +HOF 0 1 1 0 +HOARD 0 1 0 1 +HITTED 0 1 0 1 +HIT 2 1 3 2 +HISTORY 4 1 4 5 +HISSELF 0 1 1 0 +HIRED 0 1 0 1 +HINFELD 0 1 0 1 +HINDFELL 0 1 1 0 +HINCOUPS 0 1 0 1 +HILBER 0 1 0 1 +HIJAZ 0 1 1 0 +HIGHS 0 1 1 0 +HIGH 7 1 8 7 +HIES 0 1 0 1 +HIERARCHY 0 1 1 0 +HIDINGS 0 1 0 1 +HIDDEN 3 1 3 4 +HI 0 1 1 0 +HEYDAY 0 1 1 0 +HEWN 0 1 1 0 +HERSELF 35 1 35 36 +HEROIND 0 1 0 1 +HERMANN'S 0 1 0 1 +HERMANN 0 1 0 1 +HERMAND 0 1 0 1 +HERMA 0 1 0 1 +HERIOT'S 0 1 1 0 +HERIOT 0 1 1 0 +HERETOFORE 0 1 1 0 +HERBID 0 1 0 1 +HEN 0 1 1 0 +HELVIN 0 1 1 0 +HELPS 0 1 0 1 +HELPED 2 1 3 2 +HELL 4 1 4 5 +HELEN 0 1 1 0 +HELDS 0 1 0 1 +HEIP 0 1 0 1 +HEELED 0 1 1 0 +HEEDED 0 1 1 0 +HEDGES 0 1 1 0 +HEBREW 1 1 2 1 +HEATLESS 0 1 0 1 +HEATED 0 1 0 1 +HEAT 0 1 1 0 +HEARTY 2 1 3 2 +HEARTIEST 0 1 1 0 +HEARSE 3 1 4 3 +HEARIT'S 0 1 0 1 +HEALTH 1 1 1 2 +HEALED 1 1 1 2 +HEADS 4 1 4 5 +HEADLONG 0 1 1 0 +HE'LL 2 1 2 3 +HAZE 0 1 0 1 +HAYE 0 1 0 1 +HAY 1 1 1 2 +HAW 0 1 1 0 +HAVING 22 1 22 23 +HAVEN 0 1 1 0 +HAUGHTINESS 0 1 1 0 +HAUGHTERSLEY 0 1 0 1 +HATTERSLEY 0 1 1 0 +HATES 2 1 3 2 +HATCHES 0 1 0 1 +HASHY 0 1 0 1 +HASAN 1 1 2 1 +HARVEY'SWHICH 0 1 1 0 +HARVEST 3 1 3 4 +HARRISONVILLE 1 1 2 1 +HARRISON 0 1 0 1 +HARRIS 1 1 2 1 +HARRIET 0 1 0 1 +HAROLD 0 1 1 0 +HARMONT'S 0 1 0 1 +HARMON'S 0 1 0 1 +HARMON 0 1 0 1 +HARK 0 1 1 0 +HAREMON 0 1 0 1 +HARE 0 1 1 0 +HARDWORSTOE 0 1 0 1 +HARDWARE 0 1 1 0 +HARDLY 9 1 10 9 +HARDIEST 0 1 0 1 +HARBOUR 0 1 0 1 +HARBOR 0 1 1 0 +HAPPY 7 1 7 8 +HAPPILY 1 1 1 2 +HAPPENED 9 1 10 9 +HAPLY 0 1 1 0 +HANNER 0 1 0 1 +HAM 0 1 0 1 +HALVES 1 1 1 2 +HALLWAKE 0 1 0 1 +HALLO 0 1 0 1 +HALEY'S 0 1 1 0 +HAIL 1 1 1 2 +HADN'T 1 1 1 2 +HADDA 0 1 1 0 +GUY 1 1 1 2 +GUV'NER 0 1 0 1 +GUTTER 0 1 0 1 +GURSER 0 1 0 1 +GUQUET 0 1 0 1 +GUNS 2 1 3 2 +GUNNER 0 1 0 1 +GUNDRUE 0 1 0 1 +GUNDRON 0 1 0 1 +GUN 2 1 2 3 +GULLET 0 1 1 0 +GUIRUN'S 0 1 1 0 +GUINEAS 0 1 0 1 +GUINEA 2 1 3 2 +GUIDE 2 1 3 2 +GUESSIMIAN 0 1 0 1 +GUDRUN 0 1 1 0 +GUARDING 0 1 0 1 +GRUMBLINGLY 0 1 1 0 +GRUFFLY 0 1 1 0 +GROWING 2 1 3 2 +GROW 1 1 1 2 +GROVE 0 1 1 0 +GROUNDS 0 1 1 0 +GROTTO 0 1 1 0 +GROOM 0 1 1 0 +GRIS 0 1 0 1 +GRINNING 2 1 3 2 +GRINNIE 0 1 0 1 +GRINDING 0 1 0 1 +GRIMES 0 1 0 1 +GRIBIER 0 1 1 0 +GREYNDER 0 1 0 1 +GREW 4 1 4 5 +GREEBS 0 1 0 1 +GREAVES 0 1 1 0 +GREAT 39 1 40 39 +GRATITUDE 4 1 5 4 +GRASPS 0 1 1 0 +GRASPED 1 1 1 2 +GRAPPLE 0 1 1 0 +GRANTPLE 0 1 0 1 +GRANDPAPAZZARD 0 1 0 1 +GRANDPAP 2 1 3 2 +GRANDMOTHER 0 1 0 1 +GRANDAME 0 1 1 0 +GRAND 5 1 5 6 +GRAMMATIUS 0 1 0 1 +GRAMMATEUS 0 1 1 0 +GRAM 0 1 1 0 +GRAF 0 1 0 1 +GOVERNOR 2 1 2 3 +GOVERNMENTS 2 1 3 2 +GOVERN 0 1 0 1 +GOSPIR 0 1 0 1 +GOSLER 0 1 1 0 +GORGE 0 1 0 1 +GORE 0 1 0 1 +GORDON'S 2 1 2 3 +GOODS 4 1 5 4 +GOLDS 0 1 0 1 +GOLDFISH 0 1 1 0 +GOLDEN 2 1 3 2 +GOFF 0 1 0 1 +GOES 6 1 7 6 +GODS 0 1 0 1 +GODEBILLIOS 0 1 1 0 +GOD'S 2 1 3 2 +GOBY'S 0 1 0 1 +GOBIES 0 1 0 1 +GOBIAS 0 1 0 1 +GNAWING 0 1 1 0 +GNARRING 0 1 0 1 +GNARLED 0 1 1 0 +GLOOMY 1 1 2 1 +GLISPIN'S 0 1 1 0 +GLISBON'S 0 1 0 1 +GLISBON 0 1 0 1 +GLAY 0 1 0 1 +GLANCE 3 1 3 4 +GLADNESSED 0 1 0 1 +GLADDENEST 0 1 1 0 +GLAD 4 1 5 4 +GIRTHING 0 1 1 0 +GIRTHED 0 1 1 0 +GIRLS 5 1 6 5 +GIRK 0 1 0 1 +GIRDS 0 1 1 0 +GIRDING 0 1 0 1 +GIRDED 0 1 0 1 +GILROY 0 1 1 0 +GIGGS 0 1 0 1 +GIFTS 0 1 1 0 +GEVINOVITCH 0 1 0 1 +GERT 0 1 0 1 +GERMAN 7 1 7 8 +GERGIAN 0 1 0 1 +GER 0 1 0 1 +GEORGIUM 0 1 0 1 +GEORGIO 0 1 0 1 +GEORGIA 0 1 1 0 +GEORGE'SWHICH 0 1 1 0 +GEOGNANT 0 1 0 1 +GENTLEMEN'S 0 1 1 0 +GENTLEMAN'S 0 1 0 1 +GENT 0 1 0 1 +GENISH 0 1 0 1 +GENEROUS 1 1 2 1 +GENERO 0 1 0 1 +GENERALS 0 1 0 1 +GENERAL 6 1 7 6 +GEAR 1 1 2 1 +GAZE 3 1 4 3 +GAVE 32 1 32 33 +GAUTHIER 0 1 1 0 +GATHIERRE 0 1 0 1 +GASTRATO 0 1 0 1 +GASHED 0 1 1 0 +GASH 0 1 0 1 +GARR 0 1 0 1 +GARDENS 0 1 0 1 +GARDEN'S 0 1 1 0 +GAMMON 0 1 1 0 +GAMIN 0 1 0 1 +GAME 6 1 6 7 +GALLATIN 0 1 1 0 +GAL 0 1 0 1 +GAINED 1 1 1 2 +GABLE 0 1 1 0 +G'YIRLS 0 1 1 0 +FUZZ 0 1 1 0 +FUZ 0 1 0 1 +FUVENT 0 1 0 1 +FUTURE 3 1 3 4 +FUSSION 0 1 0 1 +FURZE 0 1 1 0 +FUND 1 1 1 2 +FUN 2 1 2 3 +FUGITIVES 0 1 1 0 +FUGITIVE 0 1 0 1 +FUCHELEVENT 0 1 0 1 +FRY 1 1 1 2 +FRUSTES 0 1 0 1 +FROZE 0 1 1 0 +FROWNED 0 1 1 0 +FRONTIERS 1 1 2 1 +FRONTIER 0 1 0 1 +FRONT 12 1 13 12 +FROG 0 1 0 1 +FRO 1 1 2 1 +FRITZO 0 1 0 1 +FRISTOE'S 0 1 1 0 +FRIN 0 1 0 1 +FRIGHTFUL 3 1 4 3 +FRIENDS 16 1 17 16 +FRIENDLY 4 1 4 5 +FRIEND 13 1 14 13 +FRIEDLIN 0 1 0 1 +FRIDOLIN 0 1 1 0 +FRIAR 0 1 1 0 +FRET 1 1 2 1 +FRESHMENT 0 1 0 1 +FRESHEN 0 1 0 1 +FRENCH 5 1 5 6 +FREEZE 0 1 0 1 +FREEWAY 0 1 1 0 +FREES 0 1 1 0 +FREEDOM 5 1 6 5 +FREE 12 1 12 13 +FRED 0 1 0 1 +FRAVAIN 0 1 0 1 +FRANCISCO 4 1 5 4 +FRANC 0 1 1 0 +FOXTER 0 1 0 1 +FOURTEENTHAT'S 0 1 1 0 +FOURTEEN 3 1 3 4 +FOURCHELEVENT 0 1 0 1 +FOUNDED 1 1 2 1 +FOUGHT 1 1 2 1 +FOUCHELEVENT 0 1 0 1 +FOSTER 2 1 3 2 +FORWARD 4 1 5 4 +FORTS 0 1 1 0 +FORTNIGHT 0 1 1 0 +FORTNESS 0 1 0 1 +FORMER 3 1 4 3 +FORMED 1 1 2 1 +FORM 9 1 9 10 +FORGON 0 1 0 1 +FORGIVES 0 1 0 1 +FORGATHERED 0 1 0 1 +FOREST 2 1 2 3 +FOREMOTHER 0 1 0 1 +FOREMAN 0 1 1 0 +FOREGATHERED 0 1 1 0 +FORE 0 1 0 1 +FORCHELEVENT 0 1 0 1 +FORCEMENT 0 1 0 1 +FORCED 3 1 4 3 +FORCE 7 1 7 8 +FORACT 0 1 0 1 +FOOTS 0 1 0 1 +FOOT 1 1 1 2 +FOOD 3 1 4 3 +FONTREVAL 0 1 0 1 +FONTEVRAULT 0 1 1 0 +FOLLOWS 5 1 6 5 +FOLK 4 1 4 5 +FOLIGAN 0 1 0 1 +FOLD 1 1 1 2 +FOE 0 1 1 0 +FOCIN 0 1 0 1 +FLYING 0 1 1 0 +FLUTTERING 1 1 2 1 +FLUTTERED 0 1 0 1 +FLUD 0 1 0 1 +FLUCHELEVENT 0 1 0 1 +FLOWERBEDS 0 1 1 0 +FLOWER 1 1 1 2 +FLOSSY 0 1 1 0 +FLOSSIE 0 1 0 1 +FLORID 0 1 0 1 +FLOORBOARDS 0 1 1 0 +FLOOR 3 1 3 4 +FLOCKS 0 1 1 0 +FLITTON 0 1 0 1 +FLIROFF 0 1 0 1 +FLIES 0 1 0 1 +FLEW 0 1 1 0 +FLAVOUR 0 1 0 1 +FLAVAGASTED 0 1 0 1 +FLAUNT 0 1 0 1 +FLATTERER 0 1 1 0 +FLATTERED 0 1 1 0 +FLATHER 0 1 0 1 +FLATHEADS 0 1 1 0 +FLAT 0 1 0 1 +FLASHLIGHT 0 1 1 0 +FLAREOV 0 1 0 1 +FLARE 0 1 0 1 +FLAP 0 1 0 1 +FLABBERGASTED 0 1 1 0 +FITZ 0 1 0 1 +FITTING 1 1 2 1 +FISHING 2 1 3 2 +FISHIN' 0 1 0 1 +FISHED 1 1 2 1 +FISHE 0 1 0 1 +FIRSTLY 0 1 0 1 +FIRSTER 0 1 1 0 +FIRS 0 1 0 1 +FIRE 14 1 15 14 +FIR 0 1 0 1 +FINNICAL 0 1 0 1 +FINNEY 1 1 2 1 +FINNELL 0 1 0 1 +FINISHED 3 1 3 4 +FINICAL 0 1 1 0 +FINGERING 1 1 1 2 +FINGER 5 1 6 5 +FINELY 1 1 2 1 +FINDING 7 1 8 7 +FINALLY 6 1 6 7 +FILTRATES 0 1 1 0 +FILTRATE 0 1 1 0 +FILLS 1 1 1 2 +FILLIP 0 1 0 1 +FILLIENTLY 0 1 0 1 +FIGURED 0 1 0 1 +FIGGER 0 1 1 0 +FIENDS 0 1 1 0 +FIEND 0 1 0 1 +FIDANTS 0 1 0 1 +FID 0 1 0 1 +FICTION 0 1 0 1 +FIATHIS 0 1 0 1 +FIACRE 0 1 1 0 +FEW 26 1 26 27 +FESTIVE 0 1 1 0 +FESTIVATIONS 0 1 0 1 +FERVENT 0 1 0 1 +FERNED 0 1 0 1 +FER 0 1 0 1 +FELT 18 1 19 18 +FEELS 1 1 2 1 +FEEDS 0 1 1 0 +FEE 0 1 1 0 +FEDERATE 0 1 0 1 +FEATURE 0 1 0 1 +FEATS 0 1 0 1 +FAWN 0 1 0 1 +FAVOURABLE 0 1 0 1 +FAVOT 0 1 0 1 +FAVORABLE 0 1 1 0 +FAULTS 2 1 2 3 +FAULT 5 1 5 6 +FAUCES 0 1 1 0 +FATTY 0 1 1 0 +FATIMATUS 0 1 0 1 +FATHERS 1 1 1 2 +FATHERLAND 0 1 0 1 +FATHER'S 6 1 7 6 +FATE 5 1 5 6 +FATAL 1 1 2 1 +FAST 11 1 12 11 +FASHIONEVENT 0 1 0 1 +FARRINGERS 0 1 0 1 +FARRENDER 0 1 0 1 +FARE 0 1 1 0 +FANSTILL 0 1 0 1 +FANGED 0 1 1 0 +FAN 2 1 3 2 +FAMOUS 1 1 1 2 +FAME 2 1 2 3 +FAM'LY 0 1 1 0 +FALLS 0 1 0 1 +FALLING 5 1 6 5 +FAIRLY'S 0 1 0 1 +FAIR 12 1 13 12 +FAILING 2 1 3 2 +FAILED 7 1 8 7 +FAIL 2 1 3 2 +FAFTENNER'S 0 1 0 1 +FAFNER'S 0 1 0 1 +FAFNER 0 1 0 1 +FAFFNER 0 1 0 1 +FACTIVE 0 1 0 1 +FACED 2 1 2 3 +EYESES 0 1 0 1 +EYED 4 1 4 5 +EXUDY 0 1 0 1 +EXTRAORDINARY 1 1 2 1 +EXTRAORDINARILY 0 1 0 1 +EXTRACTED 0 1 0 1 +EXTRACT 2 1 3 2 +EXTRA 1 1 2 1 +EXTEND 0 1 1 0 +EXPOSED 3 1 3 4 +EXPOSE 2 1 3 2 +EXPLOITING 0 1 1 0 +EXPLODING 0 1 0 1 +EXPLANATION 1 1 1 2 +EXPLAINED 4 1 4 5 +EXPIATION 0 1 1 0 +EXPERIOR 0 1 0 1 +EXPERIMENT 0 1 0 1 +EXPENDING 0 1 0 1 +EXPECTED 7 1 8 7 +EXERT 1 1 1 2 +EXECUTORY 0 1 0 1 +EXECUTE 1 1 1 2 +EXCLAIMED 14 1 15 14 +EXCITING 0 1 1 0 +EXCEPT 11 1 11 12 +EXAMINING 2 1 3 2 +EXAMINED 3 1 3 4 +EXAMINATION 2 1 3 2 +EXACTLY 9 1 9 10 +EXACKLY 0 1 1 0 +EVILISED 0 1 0 1 +EVIL 4 1 5 4 +EVERGREWING 0 1 0 1 +EV'YBODY'S 0 1 1 0 +EUSIBIUS 0 1 0 1 +EUSCIBIUS 0 1 0 1 +EUPS 0 1 0 1 +EUPHRATES 0 1 1 0 +EUPHRANOR 0 1 1 0 +EUPHRANER 0 1 0 1 +EUPHADIS 0 1 0 1 +EUNUCHS 0 1 0 1 +EUNUCH 10 1 11 10 +EUGEUM 0 1 0 1 +ETHER 2 1 3 2 +ETHELRIED'S 0 1 1 0 +ETHELRE'S 0 1 0 1 +ETHEL 0 1 0 1 +ET 2 1 3 2 +ESTRAVA 0 1 0 1 +ESTIMATES 0 1 1 0 +ESTATES 1 1 1 2 +ESPECIALTY 0 1 0 1 +ESCAPED 1 1 1 2 +ERROR 2 1 2 3 +ERRATIC 0 1 1 0 +ERON 0 1 0 1 +ERNESTON 0 1 0 1 +ERNESTINE 0 1 1 0 +ERE'S 0 1 1 0 +ER 0 1 0 1 +EQUERRY'S 0 1 1 0 +EQUEROR'S 0 1 0 1 +EPIMORPHY 0 1 0 1 +EPHRAIM 0 1 1 0 +ENTRUSTED 0 1 1 0 +ENTR'ACTE 0 1 1 0 +ENTERTA 0 1 0 1 +ENTER 5 1 5 6 +ENSUED 2 1 3 2 +ENSNARES 0 1 1 0 +ENSLAVED 2 1 3 2 +ENSLAVE 1 1 1 2 +ENRAGED 0 1 1 0 +ENRAGE 0 1 0 1 +ENJOYED 0 1 0 1 +ENJOY 3 1 4 3 +ENGLISH 4 1 4 5 +ENFRANCHISEMENT 0 1 1 0 +ENFORCEMENT 0 1 1 0 +ENEMY 3 1 3 4 +ENDURETH 0 1 1 0 +ENDURED 0 1 0 1 +ENDS 0 1 0 1 +ENDOWED 0 1 0 1 +ENCRONTISEMENT 0 1 0 1 +ENCHANTING 0 1 0 1 +ENCAMP 0 1 0 1 +EMETIC 0 1 1 0 +EMBRUN 0 1 1 0 +ELL 0 1 0 1 +ELISIONS 0 1 1 0 +ELEVANT 0 1 0 1 +ELEMENTARY 0 1 0 1 +ELEGANTLY 0 1 0 1 +ELECTED 1 1 2 1 +ELE 0 1 0 1 +ELDER 1 1 2 1 +ELBOWS 1 1 1 2 +ELBOW 0 1 1 0 +ELBERT 0 1 1 0 +ELASTIC 1 1 2 1 +EITHER 8 1 8 9 +EIGHTHS 0 1 0 1 +EIGHT 9 1 9 10 +EGGS 1 1 2 1 +EFFLARIDE 0 1 0 1 +EELS 0 1 1 0 +EDISM 0 1 0 1 +EDGING 2 1 3 2 +EDGED 0 1 0 1 +ECONOMIC 0 1 1 0 +ECHOED 0 1 1 0 +EBSENTEE 0 1 0 1 +EAU 0 1 1 0 +EATS 0 1 0 1 +EASY 8 1 9 8 +EASILY 4 1 5 4 +EASE 3 1 4 3 +EARTHLY 1 1 1 2 +EARTHCOP 0 1 0 1 +EARS 3 1 3 4 +EARNEST 8 1 9 8 +EARL'S 0 1 0 1 +EAR 1 1 2 1 +EANS 0 1 0 1 +EAD 0 1 1 0 +EACH 18 1 18 19 +E'LL 0 1 1 0 +E'ER 0 1 1 0 +E 1 1 2 1 +DYSNEY 0 1 0 1 +DYSINTHIAN 0 1 0 1 +DUST 2 1 2 3 +DUSK 1 1 2 1 +DURING 20 1 20 21 +DURE 0 1 0 1 +DURATION 1 1 2 1 +DUR 0 1 0 1 +DUPLICATES 1 1 2 1 +DUNFAR 0 1 0 1 +DUMEN 0 1 0 1 +DUMAS 0 1 1 0 +DUM 0 1 1 0 +DUKE 11 1 12 11 +DUDS 0 1 1 0 +DU 0 1 1 0 +DRUGSTORE 0 1 1 0 +DRUG 0 1 0 1 +DROUTH 0 1 1 0 +DRIVER 0 1 0 1 +DRINKS 0 1 1 0 +DRINK 24 1 24 25 +DREAMS 0 1 0 1 +DREAM 3 1 4 3 +DREAD 3 1 3 4 +DRAWING 9 1 9 10 +DRAWERS 1 1 2 1 +DRAWER 0 1 0 1 +DRAW 1 1 2 1 +DRAUGHT 2 1 3 2 +DRATO 0 1 0 1 +DRAT 0 1 1 0 +DRAP 0 1 0 1 +DRAKERS 0 1 0 1 +DRAKE 0 1 0 1 +DRAINS 0 1 1 0 +DOWNING 0 1 1 0 +DOWER 0 1 1 0 +DOUGH 0 1 0 1 +DOUBTS 1 1 2 1 +DOTH 1 1 2 1 +DOST 3 1 4 3 +DOSE 0 1 1 0 +DORIS 0 1 0 1 +DOOMFUL 0 1 0 1 +DONOVAN'S 0 1 1 0 +DONOVAN 1 1 2 1 +DONOON 0 1 0 1 +DONG 0 1 0 1 +DOM 0 1 1 0 +DOLOMAN'S 0 1 0 1 +DOLIGHT 0 1 0 1 +DOLE 0 1 0 1 +DOING 9 1 10 9 +DOGS 1 1 2 1 +DOEST 1 1 2 1 +DOESNATE 0 1 0 1 +DOCTRIPOS 0 1 0 1 +DOCTOR 24 1 25 24 +DOCKYARD 0 1 0 1 +DOCK 0 1 1 0 +DOAN 0 1 1 0 +DIXFIELD 0 1 0 1 +DIVORITES 0 1 0 1 +DIVIDEST 0 1 0 1 +DIVIDED 2 1 3 2 +DITCHFIELD 0 1 1 0 +DISTRUSTED 0 1 1 0 +DISTRUDGED 0 1 0 1 +DISTRESSED 2 1 2 3 +DISTRESS 3 1 4 3 +DISTAFF 0 1 1 0 +DISSIMULATION 0 1 1 0 +DISSENTIENT 0 1 1 0 +DISSENSIONS 1 1 2 1 +DISPUTE 0 1 1 0 +DISPUTABLE 0 1 0 1 +DISPOSED 2 1 3 2 +DISPOSE 0 1 0 1 +DISOUT 0 1 0 1 +DISNEY 0 1 1 0 +DISK 0 1 0 1 +DISINFECTING 0 1 1 0 +DISINFACT 0 1 0 1 +DISHES 6 1 7 6 +DISH 2 1 2 3 +DISFIGURED 0 1 1 0 +DISCOURSE 2 1 2 3 +DISCOUR 0 1 0 1 +DISASTROUS 0 1 1 0 +DISASTRATES 0 1 0 1 +DISAPPEAR 1 1 1 2 +DISAGREE 0 1 1 0 +DISAGRATING 0 1 0 1 +DIRTY 2 1 2 3 +DIRTS 0 1 0 1 +DIRK 1 1 2 1 +DIRECTION 7 1 7 8 +DIRECTIFY 0 1 0 1 +DIPPLICATES 0 1 0 1 +DINNERS 1 1 1 2 +DINERS 0 1 1 0 +DIGGING 0 1 1 0 +DIGESTION 2 1 3 2 +DIGEON 0 1 0 1 +DIAD 0 1 0 1 +DEVOUR 0 1 1 0 +DEVOTED 3 1 3 4 +DETECTIVES 0 1 1 0 +DETECTIVE 2 1 2 3 +DETECTIN 0 1 1 0 +DETACHEMONY 0 1 0 1 +DESTRULIAN 0 1 0 1 +DESTRUCTION 0 1 1 0 +DESTINIES 0 1 1 0 +DESPOTIC 0 1 1 0 +DESPITE 2 1 3 2 +DESP 0 1 0 1 +DESLEY 0 1 0 1 +DESK 1 1 2 1 +DESIRES 2 1 3 2 +DESIRE 9 1 9 10 +DESIGNED 1 1 2 1 +DESIGN 2 1 2 3 +DESCENTS 0 1 0 1 +DERELICTS 0 1 1 0 +DEODORIZING 0 1 1 0 +DENY 1 1 1 2 +DENIS 1 1 2 1 +DEMONICO 0 1 0 1 +DEMON 0 1 0 1 +DEMANDS 2 1 3 2 +DEMAND 0 1 0 1 +DELUSION 1 1 2 1 +DELMONICO 0 1 1 0 +DELIVERER 0 1 1 0 +DELIGHTED 2 1 2 3 +DEFERENCE 0 1 1 0 +DEEPENED 0 1 1 0 +DEEP 9 1 9 10 +DECLINED 1 1 1 2 +DECLINE 0 1 0 1 +DECK 6 1 6 7 +DEBTOR 0 1 1 0 +DEBT'S 0 1 0 1 +DEBT 0 1 0 1 +DEBRAMIN 0 1 0 1 +DEANS 0 1 1 0 +DEAN 0 1 0 1 +DEADS 0 1 0 1 +DEADROOM 0 1 0 1 +DEACH 0 1 1 0 +DAWNING 0 1 0 1 +DAUGHTER 10 1 10 11 +DATED 1 1 2 1 +DAT 1 1 1 2 +DARKAND 0 1 1 0 +DARE 5 1 6 5 +DAPHNEY 0 1 0 1 +DAPHNE'S 0 1 1 0 +DANCERS 0 1 0 1 +DANCER 0 1 1 0 +DAMN 0 1 1 0 +DAME'S 0 1 1 0 +DAME 1 1 1 2 +DALYS 0 1 1 0 +DALY 2 1 3 2 +DALMY 0 1 0 1 +DALEY 0 1 0 1 +DALE 0 1 0 1 +DAILY 2 1 2 3 +DAILIES 0 1 0 1 +DAGOS 0 1 1 0 +DAGGOOD 0 1 0 1 +DAGGERS 0 1 0 1 +DADDY 0 1 1 0 +D'YE 0 1 0 1 +CYRUP 0 1 0 1 +CYNTHIA 1 1 2 1 +CYMBALS 0 1 1 0 +CUT 11 1 12 11 +CUSTOM 1 1 2 1 +CURSON 0 1 0 1 +CURSING 0 1 0 1 +CURSED 1 1 2 1 +CURRENTS 0 1 1 0 +CURRANTS 0 1 0 1 +CURFYRAC 0 1 0 1 +CUPS 0 1 0 1 +CUISINE 0 1 1 0 +CUCKOO 0 1 0 1 +CRUX 0 1 1 0 +CRUSHING 1 1 2 1 +CRUMBLY 0 1 1 0 +CRUMBLED 0 1 0 1 +CRUCIFIXION 8 1 9 8 +CROYD 0 1 0 1 +CROWN 3 1 3 4 +CROST 0 1 1 0 +CROPPISH 0 1 0 1 +CRISTO 0 1 0 1 +CRISP 0 1 0 1 +CRIME 4 1 5 4 +CREO 0 1 0 1 +CREEL 0 1 1 0 +CREDIT 5 1 5 6 +CREATES 0 1 0 1 +CRAYFISHHORESERVES 0 1 0 1 +CRAYFISH 2 1 3 2 +CRATES 1 1 2 1 +CRASHING 0 1 0 1 +CRAMPLED 0 1 0 1 +CRAMBLY 0 1 0 1 +CRABS 0 1 0 1 +CRAB 6 1 7 6 +COYNESS 0 1 1 0 +COY 1 1 2 1 +COXCOMB 0 1 1 0 +COWLEY'S 0 1 1 0 +COVETTES 0 1 0 1 +COURTYARD 2 1 2 3 +COURTS 0 1 0 1 +COURSING 0 1 1 0 +COUNTS 0 1 0 1 +COUNTENANCE 2 1 2 3 +COUNT'S 1 1 2 1 +COUNSELS 0 1 1 0 +COUNSELLOR 0 1 0 1 +COUNCILLOR 0 1 1 0 +COULDN'T 7 1 7 8 +COUGHING 1 1 2 1 +COSTUME 1 1 1 2 +COST 1 1 2 1 +COSEINE 0 1 0 1 +CORYDON 0 1 1 0 +CORTONA 0 1 1 0 +CORTEANA 0 1 0 1 +CORSICIAN 0 1 0 1 +CORSICAN 0 1 1 0 +CORRIE 0 1 0 1 +CORRECT 1 1 1 2 +CORNESTONES 0 1 0 1 +CORMOR 0 1 0 1 +CORKLE 0 1 1 0 +CORKEL 0 1 0 1 +CORAL 0 1 1 0 +COQUETTE 1 1 2 1 +COPSE 0 1 0 1 +COPPER 2 1 2 3 +COP 2 1 3 2 +COOPS 0 1 1 0 +COOL 4 1 5 4 +COOKING 1 1 1 2 +CONTRAY 0 1 0 1 +CONTINUOUS 0 1 0 1 +CONTINUAL 0 1 1 0 +CONTINGENT 0 1 1 0 +CONTENDENT 0 1 0 1 +CONTEND 1 1 2 1 +CONSUM 0 1 0 1 +CONSTANT 3 1 4 3 +CONSONANTS 0 1 1 0 +CONSOMME 0 1 1 0 +CONSIST 0 1 0 1 +CONSIN 0 1 0 1 +CONQUER 0 1 1 0 +CONNINGSBURG 0 1 0 1 +CONINGSBURGH 0 1 1 0 +CONGEALETH 0 1 1 0 +CONFUSE 0 1 0 1 +CONFIRMATON 0 1 0 1 +CONFIRMATION 0 1 1 0 +CONFINED 0 1 0 1 +CONFINE 0 1 0 1 +CONFIDENTIALLY 0 1 1 0 +CONFIDE 1 1 2 1 +CONFICERE 0 1 1 0 +CONFESS 8 1 9 8 +CONFECTIONARY 3 1 4 3 +CONFECTIONARIES 0 1 0 1 +CONCUR 0 1 0 1 +CONCLUDED 2 1 2 3 +CONCERN 2 1 2 3 +CONCEALETH 0 1 0 1 +COMPOSER 0 1 0 1 +COMPEND 0 1 0 1 +COMPARABLE 0 1 0 1 +COMORIN 0 1 1 0 +COMMITTEE 5 1 6 5 +COMMENCED 1 1 2 1 +COMMANDS 2 1 2 3 +COMMANDER 2 1 2 3 +COMIN 0 1 0 1 +COMETH 0 1 1 0 +COMEST 0 1 1 0 +COMBED 0 1 0 1 +COMB 0 1 1 0 +COLOUR 2 1 2 3 +COLOSSEUM 0 1 1 0 +COLOR 1 1 2 1 +COLONEL 27 1 28 27 +COLOGNE 0 1 1 0 +COLLEGE 0 1 0 1 +COLLECTED 1 1 1 2 +COLLECT 0 1 1 0 +COLLEASE 0 1 0 1 +COLISEUM 0 1 0 1 +COLDS 0 1 1 0 +COLD 5 1 6 5 +COLCHISED 0 1 0 1 +COLCHESTER 4 1 5 4 +COINS 1 1 2 1 +COFFISH 0 1 0 1 +COFFIN 20 1 20 21 +CODE 0 1 0 1 +COD 0 1 1 0 +COCTED 0 1 0 1 +COCOA 0 1 1 0 +COCKROL 0 1 0 1 +COCKRELL 0 1 1 0 +COCKET 0 1 0 1 +COBBER 0 1 1 0 +COATS 0 1 0 1 +COARSE 0 1 0 1 +COALESCED 0 1 1 0 +CO 0 1 0 1 +CLUMB 0 1 1 0 +CLUBTOMANIA 0 1 0 1 +CLOTHESILY 0 1 0 1 +CLOSETS 0 1 0 1 +CLOSET 1 1 2 1 +CLOSEST 0 1 1 0 +CLOSELY 4 1 5 4 +CLOMB 0 1 1 0 +CLOISTER 2 1 3 2 +CLINK 0 1 0 1 +CLING 0 1 1 0 +CLIME 1 1 2 1 +CLIMBED 0 1 0 1 +CLIFF 4 1 4 5 +CLEVERLY 0 1 1 0 +CLEVER 2 1 3 2 +CLERVAL 0 1 0 1 +CLEPTOMANIA 0 1 0 1 +CLEFT 1 1 2 1 +CLEF 0 1 0 1 +CLEAVE 0 1 1 0 +CLAWS 0 1 1 0 +CLAVIER 0 1 0 1 +CLASSES 3 1 4 3 +CLARE 0 1 0 1 +CLARA 0 1 0 1 +CLAIRVAUX 0 1 1 0 +CLACKENED 0 1 0 1 +CITIZELY 0 1 0 1 +CISH 0 1 0 1 +CISEAUX 0 1 1 0 +CINRILLA 0 1 0 1 +CINDERELLA 0 1 1 0 +CILLEY 0 1 1 0 +CICEROSINAQUA 0 1 0 1 +CHURCH 13 1 14 13 +CHUG 0 1 0 1 +CHUCKLED 2 1 3 2 +CHUCKED 0 1 1 0 +CHRISTO 0 1 0 1 +CHRISTIANS 0 1 1 0 +CHRISTIANITY 2 1 3 2 +CHRISTIANING 0 1 0 1 +CHRISTIAN 0 1 0 1 +CHRISTENING 0 1 1 0 +CHRISTEN 0 1 0 1 +CHOUETTE 0 1 1 0 +CHOS 0 1 0 1 +CHONODEMAIRE 0 1 1 0 +CHOMI 0 1 0 1 +CHOKINGLY 0 1 0 1 +CHLORODE 0 1 0 1 +CHLED 0 1 0 1 +CHIPS 1 1 2 1 +CHINTZ 0 1 1 0 +CHIN 1 1 1 2 +CHIMNETS 0 1 0 1 +CHIMELESS 0 1 0 1 +CHILLS 0 1 1 0 +CHILLED 1 1 1 2 +CHILL 1 1 1 2 +CHILDLESS 0 1 1 0 +CHIEFS 0 1 0 1 +CHIEF 8 1 9 8 +CHIDE 0 1 1 0 +CHID 0 1 0 1 +CHEWERS 1 1 2 1 +CHEVIKI 0 1 0 1 +CHEST 3 1 3 4 +CHERISHED 0 1 1 0 +CHERISH 0 1 0 1 +CHEFTS 0 1 0 1 +CHEFS 0 1 1 0 +CHEEKE 0 1 1 0 +CHEEKBONES 0 1 1 0 +CHECKING 0 1 1 0 +CHARS 0 1 0 1 +CHARRED 0 1 0 1 +CHARMED 1 1 2 1 +CHARLIE'S 0 1 0 1 +CHARLEY'S 0 1 1 0 +CHARLEM 0 1 0 1 +CHARGED 4 1 5 4 +CHARACTERISTIC 0 1 1 0 +CHARACTER 6 1 6 7 +CHANN 0 1 0 1 +CHAMBERLAIN 5 1 6 5 +CHALONS 0 1 1 0 +CHALON 0 1 0 1 +CHADWELL 0 1 1 0 +CERTAINLY 13 1 13 14 +CERTAIN 15 1 16 15 +CENTIA 0 1 0 1 +CELL 1 1 2 1 +CEDRIC 1 1 2 1 +CEASE 0 1 0 1 +CAVERN 0 1 0 1 +CAVALRYMEN 0 1 1 0 +CATTLET 0 1 0 1 +CATS 1 1 2 1 +CATO 0 1 0 1 +CATHEDRAL 0 1 1 0 +CATEURAL 0 1 0 1 +CATCHED 0 1 1 0 +CATCH 6 1 6 7 +CAT 2 1 3 2 +CASTRATO 1 1 2 1 +CASTLE 8 1 9 8 +CASTETH 0 1 1 0 +CART 3 1 3 4 +CARRIED 11 1 12 11 +CARMINALS 0 1 0 1 +CAREWORN 1 1 2 1 +CAPTURED 2 1 2 3 +CAPTURE 2 1 3 2 +CAPS 0 1 0 1 +CAPRIVI'S 0 1 1 0 +CAPITULAT 0 1 0 1 +CAPITULANTES 0 1 1 0 +CAPITALISTS 0 1 1 0 +CAPITALIST 0 1 0 1 +CAP'S 0 1 1 0 +CAP 4 1 5 4 +CANS 0 1 1 0 +CANONIZED 0 1 1 0 +CANNONIZED 0 1 0 1 +CANES 0 1 0 1 +CAMPED 0 1 1 0 +CAMPAIGN 0 1 1 0 +CAMP 0 1 1 0 +CAMOUFLAGE 0 1 1 0 +CAMEO 0 1 0 1 +CAMELO 0 1 0 1 +CALLER 0 1 0 1 +CALIFORNIA 0 1 0 1 +CALIFORNI 0 1 0 1 +CALENDER 0 1 0 1 +CALENDAR 0 1 1 0 +CAIN 0 1 1 0 +CAGLE 0 1 0 1 +CAGE 7 1 8 7 +CACKED 0 1 1 0 +CABLE 0 1 0 1 +CA'M 0 1 1 0 +BYE 0 1 1 0 +BUY 4 1 4 5 +BUTTON 0 1 1 0 +BUTTERFLY 0 1 1 0 +BUST 0 1 1 0 +BUSINESSWHICH 0 1 1 0 +BUSINESS 12 1 12 13 +BURYING 0 1 1 0 +BURTULAS 0 1 0 1 +BURST 2 1 3 2 +BURSHEBA 0 1 1 0 +BURNHIL'S 0 1 0 1 +BURNETH 0 1 1 0 +BURNEHELD 0 1 0 1 +BURNEHALD 0 1 0 1 +BURIT 0 1 0 1 +BURGLAR 0 1 0 1 +BULL 0 1 0 1 +BULGEHEVIKI 0 1 0 1 +BULBS 0 1 1 0 +BULB 0 1 1 0 +BUILT 1 1 1 2 +BUILDS 0 1 1 0 +BUFFETING 0 1 1 0 +BUFFET 0 1 0 1 +BRYNHILD'S 0 1 1 0 +BRUSH 3 1 3 4 +BROWN 0 1 0 1 +BROW 0 1 0 1 +BROKER 0 1 0 1 +BRITTANNIUM 0 1 0 1 +BRINEHILL 0 1 0 1 +BRILLIANT 4 1 5 4 +BRIDGE 3 1 4 3 +BRENT 0 1 0 1 +BREED 0 1 1 0 +BREATHLESS 1 1 2 1 +BREATHING 0 1 1 0 +BREATHE 0 1 0 1 +BREASTPAND 0 1 0 1 +BREAST 1 1 2 1 +BREASING 0 1 0 1 +BRAZY 0 1 0 1 +BRAU 0 1 1 0 +BRASS 1 1 2 1 +BRAMMING 0 1 0 1 +BRAMMER 0 1 0 1 +BRAMMEN 0 1 0 1 +BRAMID 0 1 0 1 +BRAM 0 1 0 1 +BRAHMIN 0 1 0 1 +BRAHMEN 0 1 0 1 +BRACKBURN 0 1 0 1 +BRACEY 0 1 0 1 +BRACES 0 1 0 1 +BRACELE 0 1 0 1 +BOYS 7 1 8 7 +BOX 10 1 10 11 +BOWL 0 1 1 0 +BOURMANOIR 0 1 0 1 +BOURGES 0 1 1 0 +BOURGE 0 1 0 1 +BOUNDS 0 1 0 1 +BOUHAIR 0 1 0 1 +BOUGHT 3 1 4 3 +BOTTOMED 0 1 1 0 +BOTTLED 0 1 1 0 +BOTTLE 3 1 4 3 +BOTHERED 1 1 1 2 +BORDON 0 1 0 1 +BOONE 0 1 1 0 +BOON 1 1 2 1 +BOOMED 1 1 2 1 +BOOLA 0 1 0 1 +BOOK 7 1 8 7 +BONNETS 1 1 2 1 +BONNET 1 1 1 2 +BONES 2 1 2 3 +BONDAGE 1 1 2 1 +BOLT 0 1 1 0 +BOLD 0 1 0 1 +BOILER 0 1 1 0 +BOIL 4 1 5 4 +BOEUF 1 1 2 1 +BOEOTIAN 0 1 1 0 +BODY'S 0 1 0 1 +BOBS 0 1 0 1 +BOB'S 1 1 2 1 +BOARDS 1 1 1 2 +BOARD 5 1 5 6 +BLUE 7 1 7 8 +BLOTCHETT 0 1 0 1 +BLOOMY 0 1 0 1 +BLOOMIN 0 1 1 0 +BLOODSHED 0 1 1 0 +BLOODED 0 1 0 1 +BLOKES 0 1 1 0 +BLOKE 0 1 1 0 +BLOCK 1 1 2 1 +BLOCHHEAD 0 1 0 1 +BLINKED 1 1 2 1 +BLEW 0 1 1 0 +BLANKETED 0 1 1 0 +BLANKET 0 1 0 1 +BLANKARD 0 1 0 1 +BLADGET 0 1 0 1 +BLADGE 0 1 0 1 +BLACKLEG 0 1 1 0 +BLACKGUARD 0 1 1 0 +BLACKBURN 1 1 2 1 +BLACK 13 1 13 14 +BITTER 2 1 3 2 +BITING 0 1 1 0 +BIT 10 1 11 10 +BISQUE 0 1 1 0 +BIRTHPLACE 0 1 1 0 +BIRDSEYE 0 1 1 0 +BIRDS 0 1 0 1 +BIRD'S 0 1 0 1 +BINTAMEN 0 1 0 1 +BINKED 0 1 0 1 +BIND 0 1 1 0 +BILLY 0 1 0 1 +BIDS 0 1 0 1 +BIDLES 0 1 0 1 +BICK 0 1 0 1 +BIBLE 3 1 4 3 +BIBBICAL 0 1 0 1 +BI 0 1 0 1 +BHANG 0 1 1 0 +BEXT 0 1 0 1 +BEWARE 1 1 2 1 +BEVOCO 0 1 0 1 +BEULAH 0 1 1 0 +BETTER 28 1 29 28 +BETRAY 1 1 1 2 +BETHUNE 0 1 1 0 +BETCHA 0 1 1 0 +BETAKEN 0 1 1 0 +BESTOW 3 1 3 4 +BESSIE 0 1 0 1 +BESOON 0 1 0 1 +BESIN 0 1 0 1 +BESIEGE 0 1 0 1 +BESIDES 9 1 9 10 +BESIDE 3 1 4 3 +BERTRADIZANCE 0 1 0 1 +BERTIE 0 1 0 1 +BERTH 0 1 0 1 +BERNNETH 0 1 0 1 +BEPPOCO 0 1 0 1 +BENT 1 1 2 1 +BENSON 0 1 1 0 +BENOIT 0 1 1 0 +BENOIS 0 1 0 1 +BENNETT 0 1 1 0 +BENNET 0 1 0 1 +BENJAMIN 0 1 1 0 +BENITT'S 0 1 0 1 +BENEATH 3 1 3 4 +BENDONED 0 1 0 1 +BEND 1 1 1 2 +BEN 0 1 0 1 +BELT 0 1 0 1 +BELOVED 1 1 1 2 +BELONGS 1 1 1 2 +BELONGED 0 1 1 0 +BELLOWED 0 1 1 0 +BELLEZER 0 1 0 1 +BELLE 0 1 1 0 +BELLAMED 0 1 0 1 +BELIKE 0 1 1 0 +BELIEVER 0 1 0 1 +BELIEVED 5 1 6 5 +BELIEU 0 1 0 1 +BELIEF 5 1 5 6 +BEHOLS 0 1 0 1 +BEHOLDAY 0 1 0 1 +BEHELD 0 1 0 1 +BEGUN 2 1 3 2 +BEGUILED 0 1 1 0 +BEGUILD 0 1 0 1 +BEGIN 9 1 9 10 +BEGGING 1 1 2 1 +BEFALL 0 1 0 1 +BEFAL 0 1 1 0 +BEER 2 1 2 3 +BEEJON 0 1 0 1 +BEE 0 1 0 1 +BEDS 1 1 1 2 +BEDOUIN 0 1 1 0 +BEDOING 0 1 0 1 +BED 14 1 14 15 +BECKY 0 1 1 0 +BECAUSE 34 1 34 35 +BEAUT 0 1 0 1 +BEAUMANOIR 0 1 1 0 +BEATHFUL 0 1 0 1 +BEAT 5 1 6 5 +BEASTLY 0 1 1 0 +BEARING 6 1 6 7 +BEARED 0 1 0 1 +BEACHER 0 1 0 1 +BEA 0 1 0 1 +BAZARD 0 1 0 1 +BAXTER 0 1 1 0 +BATH 1 1 1 2 +BASSORA 0 1 0 1 +BASER 1 1 1 2 +BASEMENT 0 1 1 0 +BARS 5 1 5 6 +BARKLEY 0 1 1 0 +BARGIENLO 0 1 0 1 +BARGELLO 0 1 1 0 +BARELY 1 1 2 1 +BARE 1 1 1 2 +BAR 6 1 7 6 +BAPTISMAL 0 1 1 0 +BAPLICO 0 1 0 1 +BANNY 0 1 0 1 +BANDREE 0 1 0 1 +BANDINELLO 0 1 1 0 +BANDAGE 0 1 0 1 +BALLROOM 0 1 1 0 +BALLOT 3 1 4 3 +BALES 2 1 2 3 +BALE 0 1 1 0 +BALAMMED 0 1 1 0 +BAILIQUE 0 1 0 1 +BAILEY'S 0 1 0 1 +BAIL 1 1 1 2 +BAFF 0 1 0 1 +BACKY 0 1 0 1 +BABYSMAL 0 1 0 1 +BABES 0 1 1 0 +BABE 0 1 0 1 +B 2 1 3 2 +AZURE 0 1 1 0 +AZARIAH 0 1 1 0 +AY 0 1 0 1 +AXIS 0 1 0 1 +AX 0 1 0 1 +AWK'ARD 0 1 1 0 +AWFUL 4 1 4 5 +AW 0 1 1 0 +AVOID 4 1 4 5 +AVIDITY 0 1 1 0 +AVENUE 2 1 3 2 +AVE 0 1 1 0 +AUTHEST 0 1 0 1 +AUNTS 0 1 0 1 +AUGHT 2 1 2 3 +ATTIGUE 0 1 0 1 +ATTENTIVE 0 1 0 1 +ATTENTIONS 0 1 0 1 +ATTENDED 0 1 0 1 +ATTEMPTED 2 1 3 2 +ATRONE 0 1 0 1 +ATHELSTANE 0 1 1 0 +ASSERTING 0 1 0 1 +ASSERTED 0 1 0 1 +ASSERT 2 1 2 3 +ASSALY 0 1 0 1 +ASSAILING 0 1 1 0 +ASS 2 1 3 2 +ASLEEP 9 1 10 9 +ASIA 0 1 1 0 +ASHUR 0 1 1 0 +ASHLEY 4 1 5 4 +ASHHOPPER 0 1 0 1 +ASHER 0 1 0 1 +ASH 0 1 1 0 +ASCERTAINING 0 1 1 0 +ASCERTAIN 0 1 1 0 +ASCENSIONS 0 1 0 1 +ARTISTRA 0 1 0 1 +ARTIST 5 1 6 5 +ARTISON 0 1 0 1 +ARTHUR 0 1 1 0 +ARSTS 0 1 1 0 +ARSINOE'S 0 1 1 0 +ARSENO'S 0 1 0 1 +ARRIVED 4 1 4 5 +ARRIVE 3 1 4 3 +ARREST 0 1 0 1 +ARQUEBALD 0 1 0 1 +ARPET 0 1 0 1 +AROUSED 0 1 1 0 +ARMS 9 1 9 10 +ARM 4 1 5 4 +ARKANSAS 0 1 1 0 +ARIST'S 0 1 0 1 +ARISED 0 1 0 1 +AREN'T 2 1 3 2 +AREN 0 1 0 1 +ARDENTS 0 1 0 1 +ARDENT 2 1 3 2 +ARD 0 1 0 1 +ARCHIBALD 0 1 1 0 +ARCHIAS 0 1 1 0 +ARCHBISHOP 1 1 2 1 +ARCHBISH 0 1 0 1 +AQUA 0 1 1 0 +APT 1 1 1 2 +APPROVE 0 1 1 0 +APPLES 0 1 0 1 +APPEARED 8 1 8 9 +APPEAR 5 1 6 5 +APPARENTLY 4 1 5 4 +APOMORPHINE 0 1 1 0 +APOLLO 0 1 0 1 +APES 1 1 2 1 +APE 1 1 1 2 +ANYWAY 1 1 2 1 +ANYTHING 31 1 31 32 +ANYONE'S 0 1 1 0 +ANY'S 0 1 0 1 +ANXIETY 4 1 5 4 +ANTUM 0 1 0 1 +ANTONIO 0 1 1 0 +ANTONIA 0 1 0 1 +ANTOLIAN 0 1 1 0 +ANTIDOTES 0 1 1 0 +ANSWERS 1 1 2 1 +ANSWERED 26 1 26 27 +ANOTHER 30 1 31 30 +ANNOYED 2 1 3 2 +ANNOY 0 1 0 1 +ANNE 1 1 1 2 +ANIMATE 0 1 1 0 +ANGUISH 3 1 4 3 +ANGESTON 0 1 1 0 +ANGER 2 1 2 3 +ANEW 0 1 1 0 +ANDY'S 0 1 0 1 +ANDS 0 1 1 0 +ANDREW 1 1 2 1 +ANDBUT 0 1 1 0 +ANCIDE 0 1 0 1 +ANCESTORS 0 1 1 0 +ANALYSIS 0 1 1 0 +AMOUR 0 1 1 0 +AMOPRA 0 1 0 1 +AMONGST 4 1 5 4 +AMID 1 1 1 2 +AMIABLE 0 1 1 0 +AMBRON 0 1 0 1 +AMATIC 0 1 0 1 +ALTHOUGH 9 1 10 9 +ALTHIE 0 1 0 1 +ALTHIA 0 1 0 1 +ALTER 0 1 1 0 +ALREADY 15 1 16 15 +ALRE 0 1 0 1 +ALONGSIDE 0 1 1 0 +ALONGER 0 1 1 0 +ALONG 15 1 15 16 +ALONE 10 1 10 11 +ALOES 1 1 2 1 +ALMS 1 1 2 1 +ALLS 0 1 1 0 +ALLOWED 6 1 7 6 +ALLOWANCE 0 1 1 0 +ALLO'S 0 1 0 1 +ALLIGATOR 1 1 2 1 +ALLIED 0 1 1 0 +ALLEY 1 1 1 2 +ALLEN 0 1 1 0 +ALLAN 0 1 0 1 +ALKALOID 0 1 1 0 +ALIVE 2 1 2 3 +ALIT 0 1 0 1 +ALISANDRO 0 1 0 1 +ALIMENTARY 0 1 1 0 +ALIGHTED 1 1 2 1 +ALGITTEE 0 1 0 1 +ALF 0 1 1 0 +ALEXE 0 1 0 1 +ALESSANDRO 0 1 1 0 +ALENUBERG 0 1 0 1 +ALBERT 9 1 10 9 +ALADAMA 0 1 0 1 +ALABAMA 0 1 1 0 +AKELOITS 0 1 0 1 +AIR 9 1 10 9 +AILS 0 1 1 0 +AID 4 1 5 4 +AGRARIAN 0 1 1 0 +AGONIC 0 1 0 1 +AGONE 0 1 1 0 +AGO 9 1 10 9 +AGENTIVE 0 1 0 1 +AGENT 2 1 3 2 +AGE 4 1 4 5 +AFTERWARDS 5 1 6 5 +AFTERWARD 2 1 2 3 +AFT 1 1 2 1 +AFORESAID 0 1 1 0 +AFOR 0 1 0 1 +AFIRE 0 1 1 0 +AFFLICTION 0 1 1 0 +AFFLICATION 0 1 0 1 +AFFECTION 0 1 1 0 +AFFECTANT 0 1 0 1 +AFAR 0 1 0 1 +ADVISED 0 1 1 0 +ADVICE 2 1 2 3 +ADVENTURES 2 1 3 2 +ADVENTUR 0 1 0 1 +ADULTERATED 0 1 0 1 +ADULT 0 1 1 0 +ADORED 0 1 1 0 +ADN'T 0 1 1 0 +ADMIRED 1 1 1 2 +ADMIABLE 0 1 0 1 +ADHERENTS 0 1 1 0 +ADHERENCE 0 1 0 1 +ADDSTEIN 0 1 0 1 +ADDER 0 1 0 1 +ADAIR 0 1 1 0 +AD 0 1 1 0 +ACUTEATION 0 1 0 1 +ACUTE 1 1 2 1 +ACTS 3 1 4 3 +ACQUIRED 2 1 2 3 +ACQUIRE 0 1 0 1 +ACQUAINT 1 1 1 2 +ACKNOWLEDGE 1 1 2 1 +ACHESON 0 1 1 0 +ACCUSTOM 0 1 0 1 +ACCURSED 0 1 0 1 +ACCULENT 0 1 0 1 +ACCOUNT 7 1 7 8 +ACCORD 1 1 1 2 +ACCOLITES 0 1 0 1 +ACCOHOL 0 1 0 1 +ACCESS 0 1 1 0 +ACCEPT 4 1 5 4 +ACALIT 0 1 0 1 +ABSTANCE 0 1 0 1 +ABSTAINED 0 1 0 1 +ABSTAIN 0 1 1 0 +ABSENTEE 0 1 1 0 +ABRUPT 0 1 0 1 +ABOVE 9 1 9 10 +ABIDING 0 1 0 1 +ABASEMENT 0 1 0 1 +ZEAL 1 0 1 1 +ZAMAN 4 0 4 4 +YUNKERS 1 0 1 1 +YOUNGEST 1 0 1 1 +YOUNGERS 2 0 2 2 +YOUNGER 8 0 8 8 +YORK 3 0 3 3 +YOLKS 1 0 1 1 +YIELDED 1 0 1 1 +YIELD 1 0 1 1 +YESTERDAY 3 0 3 3 +YES'M 1 0 1 1 +YELLOW 4 0 4 4 +YELL 1 0 1 1 +YEARNING 1 0 1 1 +YEA 1 0 1 1 +YAWN 1 0 1 1 +YARNS 1 0 1 1 +YARDS 1 0 1 1 +YACHT 1 0 1 1 +WYLDER 1 0 1 1 +WRONG 4 0 4 4 +WRITTEN 5 0 5 5 +WRITING 1 0 1 1 +WRITER 1 0 1 1 +WRINKLES 1 0 1 1 +WRINGING 1 0 1 1 +WRIGGLING 1 0 1 1 +WRETCHED 2 0 2 2 +WRECKAGE 1 0 1 1 +WREATHS 1 0 1 1 +WRAPPING 1 0 1 1 +WRAPPED 1 0 1 1 +WOUND 2 0 2 2 +WOULDST 1 0 1 1 +WOULDN'T 9 0 9 9 +WOTTETH 1 0 1 1 +WORST 1 0 1 1 +WORN 3 0 3 3 +WORLDLY 1 0 1 1 +WORKSHOP 1 0 1 1 +WORKHOUSE 1 0 1 1 +WORKERS 1 0 1 1 +WORKER 1 0 1 1 +WORE 3 0 3 3 +WONDROUS 1 0 1 1 +WONDERING 1 0 1 1 +WONDERFULLY 1 0 1 1 +WONDERFUL 6 0 6 6 +WON'T 13 0 13 13 +WON 1 0 1 1 +WOMAN'S 2 0 2 2 +WOLF 1 0 1 1 +WOES 1 0 1 1 +WOE 2 0 2 2 +WIZARDS 1 0 1 1 +WITTY 1 0 1 1 +WITNESSED 1 0 1 1 +WITHHELD 2 0 2 2 +WITHDRAWN 2 0 2 2 +WITCHES 1 0 1 1 +WISTFUL 1 0 1 1 +WISHING 3 0 3 3 +WISHES 3 0 3 3 +WISH 15 0 15 15 +WISELY 1 0 1 1 +WISDOM 4 0 4 4 +WIRE 1 0 1 1 +WINGS 2 0 2 2 +WINDOWS 1 0 1 1 +WIN 2 0 2 2 +WILLINGLY 1 0 1 1 +WILLING 4 0 4 4 +WILFUL 1 0 1 1 +WIELD 1 0 1 1 +WIDOWER 1 0 1 1 +WIDOW 1 0 1 1 +WIDEN 1 0 1 1 +WICKED 2 0 2 2 +WHOSO 1 0 1 1 +WHOOP 1 0 1 1 +WHOMSOEVER 1 0 1 1 +WHOLLY 2 0 2 2 +WHOEVER 2 0 2 2 +WHITEHALL 1 0 1 1 +WHISTLING 2 0 2 2 +WHISTLE 3 0 3 3 +WHISPERED 2 0 2 2 +WHIPPINGS 1 0 1 1 +WHIP 2 0 2 2 +WHIMPERING 1 0 1 1 +WHIM 1 0 1 1 +WHILST 1 0 1 1 +WHEREVER 2 0 2 2 +WHEREUPON 3 0 3 3 +WHEREIN 2 0 2 2 +WHEREFORE 2 0 2 2 +WHEREBY 2 0 2 2 +WHERE'S 4 0 4 4 +WHENEVER 4 0 4 4 +WHEELS 1 0 1 1 +WHATSOEVER 1 0 1 1 +WETTED 1 0 1 1 +WET 3 0 3 3 +WESTWARD 1 0 1 1 +WESTERN 2 0 2 2 +WEST 3 0 3 3 +WEREN'T 1 0 1 1 +WEPT 2 0 2 2 +WENCH 1 0 1 1 +WELCOMED 1 0 1 1 +WELCOME 4 0 4 4 +WEIGHTY 1 0 1 1 +WEIGHT 1 0 1 1 +WEIGHING 1 0 1 1 +WEIGHED 1 0 1 1 +WEEPING 3 0 3 3 +WEEKS 1 0 1 1 +WEDNESDAY 2 0 2 2 +WEDDING 7 0 7 7 +WEB 1 0 1 1 +WEARY 4 0 4 4 +WEARING 2 0 2 2 +WEAPONS 1 0 1 1 +WEAPON 1 0 1 1 +WEALTHY 3 0 3 3 +WEALTH 3 0 3 3 +WEAKNESS 4 0 4 4 +WE'LL 2 0 2 2 +WE'D 1 0 1 1 +WAZIR 5 0 5 5 +WAYLAID 1 0 1 1 +WAVING 1 0 1 1 +WAVES 2 0 2 2 +WAVE 1 0 1 1 +WATERY 1 0 1 1 +WATERVILLE 1 0 1 1 +WATERS 1 0 1 1 +WATCHING 7 0 7 7 +WASTED 1 0 1 1 +WASHINGTON 2 0 2 2 +WASHED 4 0 4 4 +WASH 4 0 4 4 +WARRANT 1 0 1 1 +WARNING 1 0 1 1 +WARNER 1 0 1 1 +WARN'T 2 0 2 2 +WARMTH 1 0 1 1 +WARMLY 1 0 1 1 +WARM 1 0 1 1 +WAREHOUSES 1 0 1 1 +WANTING 1 0 1 1 +WALLET 2 0 2 2 +WALKED 10 0 10 10 +WALK 5 0 5 5 +WAKING 2 0 2 2 +WAKED 2 0 2 2 +WAITERS 1 0 1 1 +WAITER 1 0 1 1 +WAISTCOAT 3 0 3 3 +WAIST 1 0 1 1 +WAGONS 1 0 1 1 +WAGON 1 0 1 1 +WAGED 1 0 1 1 +WADDLED 1 0 1 1 +W 1 0 1 1 +VRONSKY 1 0 1 1 +VOYAGES 1 0 1 1 +VOYAGE 7 0 7 7 +VOWS 2 0 2 2 +VOWELS 1 0 1 1 +VOW 1 0 1 1 +VOTING 2 0 2 2 +VOTES 3 0 3 3 +VOTED 1 0 1 1 +VOLUNTEERS 2 0 2 2 +VOLUNTARILY 1 0 1 1 +VOLUMINOUS 1 0 1 1 +VOLUME 2 0 2 2 +VOLLEY 1 0 1 1 +VOLCANOES 1 0 1 1 +VOICELESS 1 0 1 1 +VOICE 20 0 20 20 +VOCAL 2 0 2 2 +VITRIOL 1 0 1 1 +VITAL 1 0 1 1 +VISITOR 4 0 4 4 +VISITING 1 0 1 1 +VISITED 2 0 2 2 +VISIT 8 0 8 8 +VISION 1 0 1 1 +VISCOUNT 1 0 1 1 +VIRTUOUS 2 0 2 2 +VIRTUE 2 0 2 2 +VIRGINIA 1 0 1 1 +VIOLENTLY 1 0 1 1 +VIOLENT 1 0 1 1 +VINE 1 0 1 1 +VINDICTIVENESS 1 0 1 1 +VILE 1 0 1 1 +VIGOROUS 3 0 3 3 +VIGILANT 1 0 1 1 +VIEWS 1 0 1 1 +VIEW 1 0 1 1 +VICTORY 1 0 1 1 +VICTORIAN 1 0 1 1 +VICTIMS 1 0 1 1 +VICTIMIZE 1 0 1 1 +VICTIM 3 0 3 3 +VICIOUS 3 0 3 3 +VICES 1 0 1 1 +VICE 1 0 1 1 +VEXED 1 0 1 1 +VEXATION 1 0 1 1 +VESSEL 2 0 2 2 +VERDICT 3 0 3 3 +VENICE 2 0 2 2 +VENIAL 1 0 1 1 +VENGEANCE 1 0 1 1 +VEINS 1 0 1 1 +VEILS 1 0 1 1 +VEHICLES 1 0 1 1 +VEGETABLES 1 0 1 1 +VEGETABLE 1 0 1 1 +VECCHIO 1 0 1 1 +VAUDEVILLE 1 0 1 1 +VARIOUS 2 0 2 2 +VARIES 1 0 1 1 +VARIED 1 0 1 1 +VANITY 1 0 1 1 +VANISHED 1 0 1 1 +VANE 1 0 1 1 +VAMPA 2 0 2 2 +VALUES 2 0 2 2 +VALUE 3 0 3 3 +VALUABLES 1 0 1 1 +VALLEY 2 0 2 2 +VALJEAN'S 3 0 3 3 +VALJEAN 7 0 7 7 +VAIN 5 0 5 5 +VAGUELY 1 0 1 1 +VAGUE 1 0 1 1 +VACATION 1 0 1 1 +VACANTLY 1 0 1 1 +UTTERLY 3 0 3 3 +UTTERING 1 0 1 1 +UTTERED 4 0 4 4 +UTMOST 5 0 5 5 +USURPER 2 0 2 2 +USUALLY 6 0 6 6 +USUAL 3 0 3 3 +USING 3 0 3 3 +USEST 1 0 1 1 +URGED 2 0 2 2 +URGE 1 0 1 1 +UPSET 1 0 1 1 +UPRIGHT 1 0 1 1 +UPPER 5 0 5 5 +UNWEPT 1 0 1 1 +UNUSUALLY 1 0 1 1 +UNUSUAL 2 0 2 2 +UNSWERVING 1 0 1 1 +UNSOUGHT 1 0 1 1 +UNSELFISH 1 0 1 1 +UNSEASONABLE 1 0 1 1 +UNS 1 0 1 1 +UNREASONABLE 1 0 1 1 +UNPRESSED 1 0 1 1 +UNPLEASANT 3 0 3 3 +UNPITIED 1 0 1 1 +UNOCCUPIED 1 0 1 1 +UNNATURAL 1 0 1 1 +UNMISTAKABLY 1 0 1 1 +UNLUCKY 2 0 2 2 +UNLIKELY 1 0 1 1 +UNKNOWN 2 0 2 2 +UNKIND 1 0 1 1 +UNJOINTED 1 0 1 1 +UNIVERSE 1 0 1 1 +UNIVERSAL 5 0 5 5 +UNITED 5 0 5 5 +UNISON 1 0 1 1 +UNIQUE 1 0 1 1 +UNIONISTS 1 0 1 1 +UNION 1 0 1 1 +UNINTENTIONAL 1 0 1 1 +UNIFORM 2 0 2 2 +UNHESITATINGLY 1 0 1 1 +UNHEARD 1 0 1 1 +UNHAPPINESS 1 0 1 1 +UNGRATEFUL 3 0 3 3 +UNFORTUNATELY 2 0 2 2 +UNFORTUNATE 2 0 2 2 +UNFLATTERING 1 0 1 1 +UNEXPECTEDLY 2 0 2 2 +UNEASY 4 0 4 4 +UNEASILY 1 0 1 1 +UNDOUBTEDLY 1 0 1 1 +UNDERTOOK 1 0 1 1 +UNDERTONE 1 0 1 1 +UNDERTAKE 1 0 1 1 +UNDERSTOOD 6 0 6 6 +UNDERSTANDS 1 0 1 1 +UNDERSTANDING 5 0 5 5 +UNDERSTAND 7 0 7 7 +UNDERNEATH 1 0 1 1 +UNDERGROUND 1 0 1 1 +UNDERGO 1 0 1 1 +UNCONNECTED 1 0 1 1 +UNCONCERN 1 0 1 1 +UNCOMMON 1 0 1 1 +UNCOMFORTABLY 2 0 2 2 +UNCOMFORTABLE 1 0 1 1 +UNCLE'S 2 0 2 2 +UNCERTAIN 2 0 2 2 +UNBURDEN 1 0 1 1 +UNAWARE 1 0 1 1 +UNASSISTED 1 0 1 1 +UNALTERABLE 1 0 1 1 +UNABLE 1 0 1 1 +UGLY 1 0 1 1 +TYRANTS 1 0 1 1 +TYRANT 2 0 2 2 +TYPE 1 0 1 1 +TWIST 1 0 1 1 +TWILIGHT 1 0 1 1 +TWICE 2 0 2 2 +TWENTY 16 0 16 16 +TWELVEMONTH 1 0 1 1 +TWELVE 3 0 3 3 +TUTORS 2 0 2 2 +TURRETS 1 0 1 1 +TURKISH 1 0 1 1 +TURK 1 0 1 1 +TURBAN 1 0 1 1 +TUNE 1 0 1 1 +TUMBLE 1 0 1 1 +TUG 1 0 1 1 +TUESDAY 1 0 1 1 +TUCKED 1 0 1 1 +TUBE 2 0 2 2 +TRYING 8 0 8 8 +TRUTH 10 0 10 10 +TRUSTWORTHY 1 0 1 1 +TRUSTED 1 0 1 1 +TRUST 3 0 3 3 +TRUNK 1 0 1 1 +TRUE 15 0 15 15 +TROUSERS 2 0 2 2 +TROUBLING 1 0 1 1 +TROUBLED 6 0 6 6 +TROUBLE 8 0 8 8 +TROLL 1 0 1 1 +TRIUMPHING 1 0 1 1 +TRIP 2 0 2 2 +TRIM 1 0 1 1 +TRIFLING 3 0 3 3 +TRIBUTE 2 0 2 2 +TRIANGLE 1 0 1 1 +TRIAL 3 0 3 3 +TREND 1 0 1 1 +TREMBLE 1 0 1 1 +TREES 3 0 3 3 +TREATMENT 1 0 1 1 +TREATED 1 0 1 1 +TREAT 3 0 3 3 +TREASONS 1 0 1 1 +TREACHEROUSLY 1 0 1 1 +TRAVILLA 1 0 1 1 +TRAVELLERS 2 0 2 2 +TRAP 2 0 2 2 +TRANSPORTED 2 0 2 2 +TRANSPARENT 1 0 1 1 +TRANSITORINESS 1 0 1 1 +TRANSFORMING 1 0 1 1 +TRANSFIGURED 1 0 1 1 +TRANSFERENCE 1 0 1 1 +TRANQUILLITIES 1 0 1 1 +TRAMP 2 0 2 2 +TRAINED 1 0 1 1 +TRAGIC 1 0 1 1 +TRADITIONAL 1 0 1 1 +TRADEMARK 1 0 1 1 +TRADE 7 0 7 7 +TRACK 3 0 3 3 +TRACED 1 0 1 1 +TRACEABLE 1 0 1 1 +TRACE 2 0 2 2 +TOY 1 0 1 1 +TOWNSFOLK 1 0 1 1 +TOWN 14 0 14 14 +TOWERS 1 0 1 1 +TOWERING 1 0 1 1 +TOUCHING 4 0 4 4 +TOUCHED 5 0 5 5 +TOUCH 3 0 3 3 +TOSSED 1 0 1 1 +TORTURES 1 0 1 1 +TORMENTOR 2 0 2 2 +TORE 1 0 1 1 +TOPS 1 0 1 1 +TOPIC 1 0 1 1 +TONGUES 1 0 1 1 +TONES 2 0 2 2 +TONE 6 0 6 6 +TOMBS 1 0 1 1 +TOMB 2 0 2 2 +TOLERABLY 1 0 1 1 +TOKEN 1 0 1 1 +TOILING 1 0 1 1 +TOILETTE 1 0 1 1 +TOES 2 0 2 2 +TOBACCO 7 0 7 7 +TOASTED 2 0 2 2 +TOAST 1 0 1 1 +TIS 4 0 4 4 +TIPPLING 1 0 1 1 +TINY 1 0 1 1 +TINKLE 1 0 1 1 +TIMEPIECE 1 0 1 1 +TIGHTLY 2 0 2 2 +TIGHTENING 1 0 1 1 +TIGHTENED 1 0 1 1 +TIGHT 1 0 1 1 +TIED 2 0 2 2 +TIDES 1 0 1 1 +TIDE 1 0 1 1 +TICKLING 1 0 1 1 +TICKING 1 0 1 1 +TICKET 1 0 1 1 +THYSELF 3 0 3 3 +THYME 1 0 1 1 +THWARTED 1 0 1 1 +THURSDAY 1 0 1 1 +THUNDER 3 0 3 3 +THRUST 6 0 6 6 +THROWN 1 0 1 1 +THROW 2 0 2 2 +THROUGHOUT 3 0 3 3 +THRONE 2 0 2 2 +THROAT 2 0 2 2 +THRILLING 1 0 1 1 +THRIFTILY 1 0 1 1 +THREW 8 0 8 8 +THREES 1 0 1 1 +THREATS 3 0 3 3 +THREATENED 1 0 1 1 +THREAD 2 0 2 2 +THRACE 1 0 1 1 +THOUGHTFUL 2 0 2 2 +THOROUGHLY 1 0 1 1 +THORNTON 4 0 4 4 +THONG 1 0 1 1 +THIRTY 7 0 7 7 +THIRTEEN 1 0 1 1 +THIRSTY 1 0 1 1 +THIRSTING 1 0 1 1 +THINKING 4 0 4 4 +THIEVES 1 0 1 1 +THIEF 2 0 2 2 +THICKENING 1 0 1 1 +THICK 4 0 4 4 +THEY'D 2 0 2 2 +THEREWITH 1 0 1 1 +THEREIN 3 0 3 3 +THEREAFTER 1 0 1 1 +THEORY 2 0 2 2 +THEOLOGIANS 1 0 1 1 +THENCEFORTH 1 0 1 1 +THENCE 1 0 1 1 +THEMSELVES 17 0 17 17 +THANKFUL 2 0 2 2 +THANKED 2 0 2 2 +THANK 7 0 7 7 +TEXAS 1 0 1 1 +TESTING 1 0 1 1 +TESTIFY 2 0 2 2 +TERROR 5 0 5 5 +TERRIFIC 2 0 2 2 +TERMS 1 0 1 1 +TERM 2 0 2 2 +TERENTIUS 1 0 1 1 +TENDING 1 0 1 1 +TENDERNESS 1 0 1 1 +TENDERLY 1 0 1 1 +TENDER 3 0 3 3 +TENDENCY 1 0 1 1 +TENACITY 1 0 1 1 +TEMPTRESS 1 0 1 1 +TEMPTING 1 0 1 1 +TEMPTATION 4 0 4 4 +TEMPORARY 2 0 2 2 +TEMPLARS 2 0 2 2 +TEMPLAR 2 0 2 2 +TEMPEST 1 0 1 1 +TEMPERATURE 1 0 1 1 +TEMPERATE 2 0 2 2 +TEMPERAMENT 1 0 1 1 +TEMPER 1 0 1 1 +TELLING 2 0 2 2 +TELEPHONE 1 0 1 1 +TELEGRAM 3 0 3 3 +TEEMING 1 0 1 1 +TEASPOONFUL 1 0 1 1 +TEARING 1 0 1 1 +TEAR 3 0 3 3 +TEAM 1 0 1 1 +TEACHING 1 0 1 1 +TEACHERS 1 0 1 1 +TEACH 2 0 2 2 +TAXES 1 0 1 1 +TAUNTS 1 0 1 1 +TASTES 1 0 1 1 +TASTED 2 0 2 2 +TASK 3 0 3 3 +TARRIED 1 0 1 1 +TAPE 1 0 1 1 +TANNER 1 0 1 1 +TALL 2 0 2 2 +TALKS 1 0 1 1 +TALKING 5 0 5 5 +TALKER 1 0 1 1 +TALKED 5 0 5 5 +TALK 15 0 15 15 +TALES 1 0 1 1 +TAKINGS 1 0 1 1 +TAKING 11 0 11 11 +TAKES 3 0 3 3 +TAINTED 1 0 1 1 +TAILS 2 0 2 2 +TAILOR'S 1 0 1 1 +TAIL 3 0 3 3 +TAGGING 1 0 1 1 +TACK 1 0 1 1 +TABLETS 2 0 2 2 +TABLES 2 0 2 2 +TABLE 7 0 7 7 +T 1 0 1 1 +SYSTEM 2 0 2 2 +SYRINGE 1 0 1 1 +SYMPTOMS 1 0 1 1 +SYMPATHY 4 0 4 4 +SYMPATHIES 1 0 1 1 +SWUNG 3 0 3 3 +SWITCHED 1 0 1 1 +SWITCH 1 0 1 1 +SWISS 1 0 1 1 +SWINGING 1 0 1 1 +SWINGED 1 0 1 1 +SWIMS 1 0 1 1 +SWIFTLY 1 0 1 1 +SWEPT 1 0 1 1 +SWEETNESS 2 0 2 2 +SWEETMEATS 2 0 2 2 +SWEETMEAT 2 0 2 2 +SWEEPING 1 0 1 1 +SWEAR 5 0 5 5 +SWARTHY 1 0 1 1 +SWARMED 2 0 2 2 +SWAM 1 0 1 1 +SWALLOWING 1 0 1 1 +SVIAZHSKY 1 0 1 1 +SUSTAINS 1 0 1 1 +SUSPICIOUS 3 0 3 3 +SUSPENDED 1 0 1 1 +SUSPECTED 6 0 6 6 +SUSPECT 1 0 1 1 +SUSAN'S 1 0 1 1 +SURVEYED 1 0 1 1 +SURROUNDINGS 1 0 1 1 +SURROUNDING 1 0 1 1 +SURROUNDED 1 0 1 1 +SURRENDERING 1 0 1 1 +SURRENDERED 1 0 1 1 +SURPLICE 1 0 1 1 +SURPASS 1 0 1 1 +SURMOUNTED 1 0 1 1 +SURLY 1 0 1 1 +SURFACE 3 0 3 3 +SURE 18 0 18 18 +SUPPRESS 1 0 1 1 +SUPPOSITION 1 0 1 1 +SUPPORTED 1 0 1 1 +SUPPORT 1 0 1 1 +SUPPLY 2 0 2 2 +SUPPLIED 3 0 3 3 +SUPPLICATION 1 0 1 1 +SUPPER 1 0 1 1 +SUPERNATURAL 2 0 2 2 +SUPERNACULUM 1 0 1 1 +SUPERLATIVE 1 0 1 1 +SUPERIORS 1 0 1 1 +SUP 1 0 1 1 +SUNSHINY 1 0 1 1 +SUNSHINE 1 0 1 1 +SUNRISE 2 0 2 2 +SUNNYSIDE 1 0 1 1 +SUNDAY 4 0 4 4 +SUMS 2 0 2 2 +SUMMONED 1 0 1 1 +SUMMON 1 0 1 1 +SUMMER 6 0 6 6 +SULTRY 1 0 1 1 +SULTAN 2 0 2 2 +SUITS 1 0 1 1 +SUITABLE 2 0 2 2 +SUICIDE 1 0 1 1 +SUGGESTED 2 0 2 2 +SUGAR 9 0 9 9 +SUFFICIENTLY 2 0 2 2 +SUFFERINGS 1 0 1 1 +SUFFERING 2 0 2 2 +SUFFERED 3 0 3 3 +SUE 1 0 1 1 +SUCK 1 0 1 1 +SUCH 44 0 44 44 +SUCCUMBED 1 0 1 1 +SUCCESSIVELY 1 0 1 1 +SUCCESSIVE 1 0 1 1 +SUCCESSFULLY 1 0 1 1 +SUCCESSES 2 0 2 2 +SUCCESS 2 0 2 2 +SUCCEEDING 1 0 1 1 +SUCCEEDED 3 0 3 3 +SUCCEED 1 0 1 1 +SUBURB 1 0 1 1 +SUBSTITUTING 1 0 1 1 +SUBSTANCES 1 0 1 1 +SUBSISTENCE 1 0 1 1 +SUBSIDED 1 0 1 1 +SUBSEQUENT 1 0 1 1 +SUBORDINATED 1 0 1 1 +SUBMITTED 2 0 2 2 +SUBMISSIVE 1 0 1 1 +SUBMISSION 1 0 1 1 +SUBJECTS 6 0 6 6 +SUBJECTED 3 0 3 3 +SUBDUED 2 0 2 2 +STYLED 1 0 1 1 +STYLE 1 0 1 1 +STURDY 1 0 1 1 +STUPID 2 0 2 2 +STUMBLED 1 0 1 1 +STUFFS 1 0 1 1 +STUFF 1 0 1 1 +STUDYING 1 0 1 1 +STUDY 1 0 1 1 +STUDENTS 1 0 1 1 +STUDENT 2 0 2 2 +STUCK 2 0 2 2 +STRUGGLING 1 0 1 1 +STRUGGLES 1 0 1 1 +STRUGGLE 1 0 1 1 +STRUCTURE 1 0 1 1 +STROVE 1 0 1 1 +STRONGLY 1 0 1 1 +STRONGER 1 0 1 1 +STRONG 12 0 12 12 +STRIPPED 2 0 2 2 +STRIKING 1 0 1 1 +STRIDES 1 0 1 1 +STRICTLY 1 0 1 1 +STREWN 1 0 1 1 +STRETCHING 1 0 1 1 +STRETCHER 1 0 1 1 +STRETCH 1 0 1 1 +STRENUOUSLY 1 0 1 1 +STRENGTHENED 2 0 2 2 +STRENGTH 12 0 12 12 +STREETS 2 0 2 2 +STRATAGEM 1 0 1 1 +STRANGERS 1 0 1 1 +STRANGER 3 0 3 3 +STRANGELY 1 0 1 1 +STRANGE 5 0 5 5 +STRAITS 1 0 1 1 +STRAINING 1 0 1 1 +STRAIGHTWAY 1 0 1 1 +STRAIGHTFORWARD 1 0 1 1 +STOUT 1 0 1 1 +STORY 9 0 9 9 +STORMED 1 0 1 1 +STORM 1 0 1 1 +STOREHOUSES 1 0 1 1 +STOPPING 3 0 3 3 +STOP 5 0 5 5 +STOOL 2 0 2 2 +STOMACH 3 0 3 3 +STOCK 3 0 3 3 +STIRRING 1 0 1 1 +STIRRED 1 0 1 1 +STIR 1 0 1 1 +STILLNESS 3 0 3 3 +STILE 1 0 1 1 +STIFLING 1 0 1 1 +STIFLED 2 0 2 2 +STIFLE 3 0 3 3 +STIFFNESS 1 0 1 1 +STIFF 2 0 2 2 +STICK 5 0 5 5 +STEWART 1 0 1 1 +STEWARDS 1 0 1 1 +STEWARD 1 0 1 1 +STEPHEN 1 0 1 1 +STEMS 1 0 1 1 +STEEP 1 0 1 1 +STEEL 1 0 1 1 +STEAMED 1 0 1 1 +STEAMBOAT 2 0 2 2 +STEALTHILY 1 0 1 1 +STEADY 3 0 3 3 +STEADILY 1 0 1 1 +STAY 6 0 6 6 +STATUES 3 0 3 3 +STATIONED 2 0 2 2 +STATION 2 0 2 2 +STATESMAN 1 0 1 1 +STATEMENT 3 0 3 3 +STAT 1 0 1 1 +STARVE 1 0 1 1 +STARTLING 1 0 1 1 +STARTLED 1 0 1 1 +STARTING 1 0 1 1 +STARK 1 0 1 1 +STARCHY 1 0 1 1 +STANLEY 2 0 2 2 +STANDPOINT 1 0 1 1 +STANDING 10 0 10 10 +STAND 7 0 7 7 +STAMPED 1 0 1 1 +STAMMERED 1 0 1 1 +STAMMER 1 0 1 1 +STAKES 1 0 1 1 +STAKED 1 0 1 1 +STAKE 1 0 1 1 +STAIRCASE 1 0 1 1 +STAINED 2 0 2 2 +STABLE 1 0 1 1 +SQUIRE 3 0 3 3 +SQUEEZE 1 0 1 1 +SQUEAKS 1 0 1 1 +SQUATTED 1 0 1 1 +SQUALL 1 0 1 1 +SQUALID 1 0 1 1 +SQUAD 2 0 2 2 +SPYING 1 0 1 1 +SPY 1 0 1 1 +SPRINKLES 1 0 1 1 +SPRINGS 1 0 1 1 +SPRING 4 0 4 4 +SPRIG 1 0 1 1 +SPREADS 1 0 1 1 +SPRANG 3 0 3 3 +SPOTTED 1 0 1 1 +SPOT 6 0 6 6 +SPORT 2 0 2 2 +SPOON 1 0 1 1 +SPOKEN 2 0 2 2 +SPOKE 15 0 15 15 +SPOILS 1 0 1 1 +SPLIT 2 0 2 2 +SPITEFUL 1 0 1 1 +SPIT 1 0 1 1 +SPIRITUAL 1 0 1 1 +SPIRAL 1 0 1 1 +SPINSTER 1 0 1 1 +SPIDER 1 0 1 1 +SPHERE 1 0 1 1 +SPELL 1 0 1 1 +SPEEDILY 1 0 1 1 +SPEECH 5 0 5 5 +SPECULATED 1 0 1 1 +SPECTATORS 1 0 1 1 +SPECTACLE 1 0 1 1 +SPECIES 1 0 1 1 +SPECIAL 3 0 3 3 +SPEAKS 1 0 1 1 +SPEAKING 7 0 7 7 +SPEAKER 1 0 1 1 +SPAWN 1 0 1 1 +SPASM 1 0 1 1 +SPARROWS 1 0 1 1 +SPARK 1 0 1 1 +SPARING 1 0 1 1 +SPARED 2 0 2 2 +SPARE 1 0 1 1 +SPANKER 1 0 1 1 +SPANISH 1 0 1 1 +SPADES 1 0 1 1 +SPACE 2 0 2 2 +SOWING 1 0 1 1 +SOUTH 1 0 1 1 +SOUP 1 0 1 1 +SOUNDS 2 0 2 2 +SOUNDED 3 0 3 3 +SOUND 12 0 12 12 +SORTS 4 0 4 4 +SORRY 3 0 3 3 +SORROWING 1 0 1 1 +SORELY 1 0 1 1 +SORCERER 1 0 1 1 +SOOTH 1 0 1 1 +SOOT 1 0 1 1 +SOONER 4 0 4 4 +SONS 1 0 1 1 +SOMETIMES 14 0 14 14 +SOMETHING'S 1 0 1 1 +SOMEHOW 3 0 3 3 +SOMEBODY 3 0 3 3 +SOMBER 1 0 1 1 +SOLVE 1 0 1 1 +SOLUTION 4 0 4 4 +SOLUBLE 2 0 2 2 +SOLOMON 1 0 1 1 +SOLIDS 1 0 1 1 +SOLIDLY 1 0 1 1 +SOLID 1 0 1 1 +SOLICITUDE 1 0 1 1 +SOLEMNLY 1 0 1 1 +SOLEMNITY 1 0 1 1 +SOLEMN 1 0 1 1 +SOLDIERS 3 0 3 3 +SOLDIER 1 0 1 1 +SOLACE 1 0 1 1 +SOIL 2 0 2 2 +SOFTLY 2 0 2 2 +SOFA 2 0 2 2 +SODA 1 0 1 1 +SOCIETY 1 0 1 1 +SOCIETIES 1 0 1 1 +SOCIAL 12 0 12 12 +SOBERLY 1 0 1 1 +SOBER 4 0 4 4 +SOARING 1 0 1 1 +SOAK 1 0 1 1 +SNEEZE 2 0 2 2 +SNEERED 1 0 1 1 +SNEAKY 1 0 1 1 +SNATCH 1 0 1 1 +SNAKE 1 0 1 1 +SMUGGLED 1 0 1 1 +SMOULDERING 1 0 1 1 +SMOTE 2 0 2 2 +SMOKING 3 0 3 3 +SMOKESTACKS 1 0 1 1 +SMOKERS 3 0 3 3 +SMOKED 2 0 2 2 +SMITH 1 0 1 1 +SMILING 2 0 2 2 +SMILED 1 0 1 1 +SMELT 1 0 1 1 +SMART 1 0 1 1 +SMALLEST 1 0 1 1 +SMALLER 1 0 1 1 +SLUMBER 2 0 2 2 +SLOWLY 6 0 6 6 +SLOW 3 0 3 3 +SLIPPING 1 0 1 1 +SLIPPER 1 0 1 1 +SLIP 3 0 3 3 +SLING 1 0 1 1 +SLIGHT 1 0 1 1 +SLENDER 2 0 2 2 +SLEEVES 1 0 1 1 +SLEEPY 2 0 2 2 +SLEEPS 2 0 2 2 +SLEEPER 1 0 1 1 +SLEDGE 1 0 1 1 +SLAYING 1 0 1 1 +SLAY 1 0 1 1 +SLAVES 2 0 2 2 +SLAVERY 1 0 1 1 +SLAVE 3 0 3 3 +SLAMMED 1 0 1 1 +SLAIN 2 0 2 2 +SKYLIGHT 2 0 2 2 +SKY 3 0 3 3 +SKULLS 1 0 1 1 +SKULL 1 0 1 1 +SKIRTS 1 0 1 1 +SKIRMISH 1 0 1 1 +SKIMMING 1 0 1 1 +SKILLED 1 0 1 1 +SKILFULLY 1 0 1 1 +SKIES 1 0 1 1 +SKETCH 1 0 1 1 +SIXTY 7 0 7 7 +SIXTEEN 2 0 2 2 +SITUATION 1 0 1 1 +SITTING 3 0 3 3 +SITTETH 1 0 1 1 +SISTERS 4 0 4 4 +SISTERLY 1 0 1 1 +SISTER 8 0 8 8 +SINKS 1 0 1 1 +SINGULAR 2 0 2 2 +SINGLE 8 0 8 8 +SINGING 2 0 2 2 +SINGER 1 0 1 1 +SINGED 1 0 1 1 +SING 4 0 4 4 +SINFUL 1 0 1 1 +SINCERITY 1 0 1 1 +SINCERE 1 0 1 1 +SINCE 17 0 17 17 +SIN 2 0 2 2 +SIMPLY 3 0 3 3 +SIMMERING 1 0 1 1 +SIMILAR 2 0 2 2 +SILVERWARE 1 0 1 1 +SILVER 7 0 7 7 +SILL 1 0 1 1 +SILK 1 0 1 1 +SILENTLY 2 0 2 2 +SILENT 9 0 9 9 +SILAS 1 0 1 1 +SIGNS 2 0 2 2 +SIGNIFIES 1 0 1 1 +SIGNIFIED 1 0 1 1 +SIGNIFICANT 2 0 2 2 +SIGNIFICANCE 2 0 2 2 +SIGNATURE 1 0 1 1 +SIGNALS 2 0 2 2 +SIGNAL 7 0 7 7 +SIGHED 1 0 1 1 +SIGH 5 0 5 5 +SIFTED 1 0 1 1 +SIDEWAYS 1 0 1 1 +SIDEWALK 1 0 1 1 +SIDES 4 0 4 4 +SICK 2 0 2 2 +SHUTTING 1 0 1 1 +SHUTTER 1 0 1 1 +SHUDDER 1 0 1 1 +SHRUNK 1 0 1 1 +SHROUDED 1 0 1 1 +SHRINKING 1 0 1 1 +SHRILL 1 0 1 1 +SHRIEKING 1 0 1 1 +SHRIEKED 1 0 1 1 +SHOWN 4 0 4 4 +SHOWING 7 0 7 7 +SHOWED 9 0 9 9 +SHOUTS 2 0 2 2 +SHOUTING 4 0 4 4 +SHOUTED 4 0 4 4 +SHOULDN'T 1 0 1 1 +SHORTLY 5 0 5 5 +SHORTER 1 0 1 1 +SHORT 8 0 8 8 +SHOPS 1 0 1 1 +SHOPPY 1 0 1 1 +SHOPPING 1 0 1 1 +SHOPKEEPERS 1 0 1 1 +SHOP 6 0 6 6 +SHOOK 5 0 5 5 +SHOES 5 0 5 5 +SHOCKED 2 0 2 2 +SHIRTS 1 0 1 1 +SHIRT 1 0 1 1 +SHIRKING 1 0 1 1 +SHIMMERING 1 0 1 1 +SHIFTY 1 0 1 1 +SHIFTED 2 0 2 2 +SHERRY 3 0 3 3 +SHELLS 4 0 4 4 +SHELF 1 0 1 1 +SHEILA 1 0 1 1 +SHEATH 1 0 1 1 +SHE'S 5 0 5 5 +SHAWL 1 0 1 1 +SHARPNESS 1 0 1 1 +SHARPLY 4 0 4 4 +SHARPENED 1 0 1 1 +SHARE 2 0 2 2 +SHAPIA 1 0 1 1 +SHAPES 1 0 1 1 +SHAPED 1 0 1 1 +SHAPE 3 0 3 3 +SHAME 2 0 2 2 +SHAM 1 0 1 1 +SHALT 7 0 7 7 +SHAKING 1 0 1 1 +SHAHRAZAD 3 0 3 3 +SHAFTS 1 0 1 1 +SHADOWS 1 0 1 1 +SEX 1 0 1 1 +SEWING 1 0 1 1 +SEVERELY 1 0 1 1 +SEVENTY 7 0 7 7 +SEVENTEEN 4 0 4 4 +SETTLED 4 0 4 4 +SETTLE 2 0 2 2 +SERVING 1 0 1 1 +SERVICES 1 0 1 1 +SERVICE 15 0 15 15 +SERVED 3 0 3 3 +SERVE 7 0 7 7 +SERVANT 4 0 4 4 +SERPENTS 2 0 2 2 +SERPENT 1 0 1 1 +SERIOUSLY 3 0 3 3 +SERIOUS 5 0 5 5 +SERENITY 1 0 1 1 +SEPULTURE 1 0 1 1 +SEPULCHRE 1 0 1 1 +SEPARATION 3 0 3 3 +SEPARATING 1 0 1 1 +SEPARATED 3 0 3 3 +SEPARATE 2 0 2 2 +SENTINELS 2 0 2 2 +SENTIMENTAL 1 0 1 1 +SENTIMENT 1 0 1 1 +SENTENCE 2 0 2 2 +SENSIBLY 1 0 1 1 +SENSES 2 0 2 2 +SENSELESS 2 0 2 2 +SENSATION 1 0 1 1 +SENATOR 1 0 1 1 +SEIZED 3 0 3 3 +SEES 1 0 1 1 +SEEMLY 1 0 1 1 +SEEKING 1 0 1 1 +SEEKEST 1 0 1 1 +SECURITY 7 0 7 7 +SECRETS 3 0 3 3 +SECRETLY 1 0 1 1 +SECRETARY 2 0 2 2 +SECRET 3 0 3 3 +SECONDS 1 0 1 1 +SECOND 15 0 15 15 +SEAT 1 0 1 1 +SEASONS 1 0 1 1 +SEASONED 1 0 1 1 +SEAS 1 0 1 1 +SEARCHINGLY 1 0 1 1 +SEARCHES 1 0 1 1 +SEARCHED 2 0 2 2 +SEARCH 6 0 6 6 +SEALED 2 0 2 2 +SCUTTLING 1 0 1 1 +SCUM 1 0 1 1 +SCULPTURE 1 0 1 1 +SCULPTOR 3 0 3 3 +SCRUPULOUSLY 1 0 1 1 +SCREEN 1 0 1 1 +SCREAM 1 0 1 1 +SCRATCHING 1 0 1 1 +SCRATCH 1 0 1 1 +SCRAPING 1 0 1 1 +SCRAPE 1 0 1 1 +SCOUNDREL 2 0 2 2 +SCOTCH 2 0 2 2 +SCISSORS 5 0 5 5 +SCIENTIFICALLY 1 0 1 1 +SCIENTIFIC 1 0 1 1 +SCIENCE 1 0 1 1 +SCHOOLMATE 1 0 1 1 +SCHOOLMASTER 5 0 5 5 +SCHOLARS 1 0 1 1 +SCHEME 1 0 1 1 +SCENES 2 0 2 2 +SCENE 6 0 6 6 +SCATTER 1 0 1 1 +SCARRED 1 0 1 1 +SCARLET 1 0 1 1 +SCARED 1 0 1 1 +SCARCELY 4 0 4 4 +SCARCE 1 0 1 1 +SCANNING 1 0 1 1 +SCALES 1 0 1 1 +SAXON 2 0 2 2 +SAWYER 3 0 3 3 +SAVAGES 1 0 1 1 +SAVAGERY 1 0 1 1 +SAUCER 1 0 1 1 +SATISFY 3 0 3 3 +SATISFIED 3 0 3 3 +SATISFACTORY 2 0 2 2 +SATISFACTORILY 1 0 1 1 +SATISFACTION 6 0 6 6 +SATIATED 1 0 1 1 +SATANICAL 1 0 1 1 +SANCTUARY 1 0 1 1 +SANCHO 9 0 9 9 +SAMUEL 1 0 1 1 +SAME 22 0 22 22 +SAMARIA 1 0 1 1 +SALUTED 2 0 2 2 +SALT 2 0 2 2 +SALOON 1 0 1 1 +SAKE 7 0 7 7 +SAITH 1 0 1 1 +SAINTS 3 0 3 3 +SAILOR 2 0 2 2 +SAILING 2 0 2 2 +SAILED 1 0 1 1 +SAFETY 2 0 2 2 +SAFELY 2 0 2 2 +SAD 2 0 2 2 +SACRIFICES 3 0 3 3 +SACRIFICE 5 0 5 5 +SACRED 1 0 1 1 +SACRAMENT 1 0 1 1 +SACK 1 0 1 1 +RUSTLING 2 0 2 2 +RUSTLE 1 0 1 1 +RUSSIAN 3 0 3 3 +RUNNING 3 0 3 3 +RUMBLING 1 0 1 1 +RULES 2 0 2 2 +RUINS 1 0 1 1 +RUINING 1 0 1 1 +RUINED 1 0 1 1 +RUFFIAN 1 0 1 1 +RUBBERS 1 0 1 1 +ROYAL 7 0 7 7 +ROXBURY 1 0 1 1 +ROVER 1 0 1 1 +ROUSE 1 0 1 1 +ROUNDED 1 0 1 1 +ROUGH 3 0 3 3 +ROT 1 0 1 1 +ROSY 2 0 2 2 +ROSEMARY 2 0 2 2 +ROSA 1 0 1 1 +ROPE 3 0 3 3 +ROOMS 2 0 2 2 +ROOFS 1 0 1 1 +ROOF 2 0 2 2 +ROLLED 1 0 1 1 +RODE 6 0 6 6 +ROCKS 1 0 1 1 +ROCKET 1 0 1 1 +ROBERT 1 0 1 1 +ROBBING 1 0 1 1 +ROBBERY 5 0 5 5 +ROBBED 2 0 2 2 +ROASTING 1 0 1 1 +ROASTED 1 0 1 1 +ROAST 1 0 1 1 +ROARED 1 0 1 1 +ROADSIDE 1 0 1 1 +RIVERS 1 0 1 1 +RIVER 11 0 11 11 +RIVALRY 1 0 1 1 +RIVAL 2 0 2 2 +RISK 3 0 3 3 +RISING 5 0 5 5 +RISEN 1 0 1 1 +RISE 3 0 3 3 +RIP 2 0 2 2 +RIGHTEOUSNESS 1 0 1 1 +RIGHTEOUS 1 0 1 1 +RIDICULOUS 1 0 1 1 +RIDDEN 1 0 1 1 +RICHLY 1 0 1 1 +RICHER 1 0 1 1 +RICH 7 0 7 7 +RICE 1 0 1 1 +RHEUMATISM 1 0 1 1 +REWARDS 1 0 1 1 +REWARD 4 0 4 4 +REVOLUTIONARIES 1 0 1 1 +REVIVE 1 0 1 1 +REVIEW 1 0 1 1 +REVERSES 1 0 1 1 +REVENGES 1 0 1 1 +REVENGE 1 0 1 1 +REVELLED 1 0 1 1 +REVELATION 1 0 1 1 +REVEL 1 0 1 1 +RETREAT 1 0 1 1 +RETARDED 1 0 1 1 +RETAINED 2 0 2 2 +RESULT 2 0 2 2 +RESTS 1 0 1 1 +RESTRAIN 2 0 2 2 +RESTORED 2 0 2 2 +RESTAURANTS 1 0 1 1 +RESTAURANT 3 0 3 3 +RESPONSIBILITY 2 0 2 2 +RESPONDED 2 0 2 2 +RESPECTS 1 0 1 1 +RESPECTIVE 2 0 2 2 +RESPECTING 1 0 1 1 +RESPECTFULLY 3 0 3 3 +RESPECTFUL 1 0 1 1 +RESPECTED 1 0 1 1 +RESPECTABLE 3 0 3 3 +RESPECT 4 0 4 4 +RESORTS 1 0 1 1 +RESORTED 1 0 1 1 +RESORT 1 0 1 1 +RESOLVING 1 0 1 1 +RESOLVE 1 0 1 1 +RESOLUTIONS 1 0 1 1 +RESOLUTION 2 0 2 2 +RESISTANCE 3 0 3 3 +RESIST 3 0 3 3 +RESIGNED 1 0 1 1 +RESIDENCE 2 0 2 2 +RESERVOIR 1 0 1 1 +RESERVE 1 0 1 1 +RESEMBLING 1 0 1 1 +RESEMBLES 1 0 1 1 +RESEMBLE 1 0 1 1 +RESEARCHES 1 0 1 1 +REQUIRING 1 0 1 1 +REQUIRES 1 0 1 1 +REQUIRED 3 0 3 3 +REQUIRE 4 0 4 4 +REQUESTED 2 0 2 2 +REPUTATIONS 1 0 1 1 +REPROACH 2 0 2 2 +REPRESENTED 3 0 3 3 +REPRESENTATIVE 2 0 2 2 +REPORTED 1 0 1 1 +REPORT 2 0 2 2 +REPEATING 1 0 1 1 +REPEAT 1 0 1 1 +REPARATION 1 0 1 1 +REPAIRED 2 0 2 2 +REPAIR 1 0 1 1 +RENOUNCE 3 0 3 3 +RENEWED 2 0 2 2 +RENDERS 1 0 1 1 +RENDERED 1 0 1 1 +REMOVED 3 0 3 3 +REMOVE 3 0 3 3 +REMOVAL 1 0 1 1 +REMOTE 1 0 1 1 +REMORSEFUL 1 0 1 1 +REMONSTRANCE 1 0 1 1 +REMNANTS 1 0 1 1 +REMNANT 1 0 1 1 +REMINISCENCES 1 0 1 1 +REMEMBERING 2 0 2 2 +REMEMBERED 4 0 4 4 +REMEDY 4 0 4 4 +REMARKS 1 0 1 1 +REMARKABLY 1 0 1 1 +REMARKABLE 2 0 2 2 +RELYING 1 0 1 1 +RELUCTANTLY 2 0 2 2 +RELUCTANCE 1 0 1 1 +RELINQUISH 1 0 1 1 +RELIGIONS 1 0 1 1 +RELIEVED 1 0 1 1 +RELIED 1 0 1 1 +RELIC 1 0 1 1 +RELEASE 2 0 2 2 +RELAXING 1 0 1 1 +RELATIONS 2 0 2 2 +RELATION 2 0 2 2 +REJOINED 1 0 1 1 +REJOINDER 1 0 1 1 +REJOICING 1 0 1 1 +REJOICED 3 0 3 3 +REGRETTING 1 0 1 1 +REGISTER 1 0 1 1 +REGION 1 0 1 1 +REGIMENTS 2 0 2 2 +REGARDED 2 0 2 2 +REGARD 2 0 2 2 +REGAINED 1 0 1 1 +REGAIN 2 0 2 2 +REFUTATION 1 0 1 1 +REFUSING 2 0 2 2 +REFUSES 2 0 2 2 +REFUSED 1 0 1 1 +REFUSE 2 0 2 2 +REFRAIN 2 0 2 2 +REFORMS 1 0 1 1 +REFORM 6 0 6 6 +REFLECTIVE 1 0 1 1 +REFLECTIONS 1 0 1 1 +REFLECTION 3 0 3 3 +REFINED 2 0 2 2 +REFERRED 2 0 2 2 +REFER 1 0 1 1 +REEF 1 0 1 1 +REDUCED 2 0 2 2 +REDOUBLING 1 0 1 1 +REDEMPTION 1 0 1 1 +REDEEMING 1 0 1 1 +RECTUM 1 0 1 1 +RECTOR 1 0 1 1 +RECRUITS 1 0 1 1 +RECOVERY 1 0 1 1 +RECOVERED 1 0 1 1 +RECOVER 3 0 3 3 +RECOURSE 1 0 1 1 +RECOUNTED 1 0 1 1 +RECORD 2 0 2 2 +RECOMPENSE 2 0 2 2 +RECOMMEND 2 0 2 2 +RECOLLECTING 1 0 1 1 +RECOLLECTED 1 0 1 1 +RECOLLECT 1 0 1 1 +RECOILED 1 0 1 1 +RECOGNIZED 5 0 5 5 +RECOGNITION 2 0 2 2 +RECKON 4 0 4 4 +RECITING 1 0 1 1 +RECITED 3 0 3 3 +RECIPE 2 0 2 2 +RECEPTION 1 0 1 1 +RECENTLY 1 0 1 1 +RECEIVED 9 0 9 9 +RECEIVE 4 0 4 4 +RECEIPT 1 0 1 1 +RECEDING 1 0 1 1 +RECAPTURED 1 0 1 1 +RECALLING 1 0 1 1 +RECALLED 1 0 1 1 +RECALL 1 0 1 1 +REBECCA 1 0 1 1 +REASONABLE 2 0 2 2 +REASON 11 0 11 11 +REAR 1 0 1 1 +REAPING 1 0 1 1 +REAP 1 0 1 1 +REALM 1 0 1 1 +REALIZE 1 0 1 1 +REALITY 3 0 3 3 +REAL 3 0 3 3 +READERS 1 0 1 1 +READER 1 0 1 1 +REACHING 2 0 2 2 +RAWNESS 1 0 1 1 +RAVING 1 0 1 1 +RAVENING 1 0 1 1 +RAVAGED 1 0 1 1 +RATTLED 1 0 1 1 +RATTLE 1 0 1 1 +RATE 2 0 2 2 +RASCAL 3 0 3 3 +RARE 1 0 1 1 +RAPIDLY 4 0 4 4 +RAP 1 0 1 1 +RANSOM 8 0 8 8 +RANKS 1 0 1 1 +RAMBLER 1 0 1 1 +RAMBLE 1 0 1 1 +RAM'S 1 0 1 1 +RAISED 6 0 6 6 +RAINS 1 0 1 1 +RAINBOWS 1 0 1 1 +RAILROAD 1 0 1 1 +RAIDERS 1 0 1 1 +RAFTER 1 0 1 1 +RAFT 6 0 6 6 +RADICALS 1 0 1 1 +RADIANT 1 0 1 1 +RACKETS 1 0 1 1 +RACK 1 0 1 1 +R 1 0 1 1 +QUOTH 5 0 5 5 +QUOTED 1 0 1 1 +QUIXOTE 5 0 5 5 +QUIVERED 1 0 1 1 +QUIVER 1 0 1 1 +QUIT 1 0 1 1 +QUIETLY 4 0 4 4 +QUIET 1 0 1 1 +QUICKER 3 0 3 3 +QUESTIONS 6 0 6 6 +QUESTIONED 1 0 1 1 +QUESTIONABLE 1 0 1 1 +QUESTION 15 0 15 15 +QUENCH 1 0 1 1 +QUEENS 1 0 1 1 +QUEEN'S 1 0 1 1 +QUARTERS 3 0 3 3 +QUART 1 0 1 1 +QUARRELS 1 0 1 1 +QUANTITY 3 0 3 3 +QUALITY 1 0 1 1 +QUACKS 2 0 2 2 +PUZZLED 2 0 2 2 +PUSHING 1 0 1 1 +PUSHED 1 0 1 1 +PURSUIT 1 0 1 1 +PURSUED 3 0 3 3 +PURSUANCE 1 0 1 1 +PURPOSES 1 0 1 1 +PURPOSE 5 0 5 5 +PURITAN 2 0 2 2 +PURIFY 1 0 1 1 +PURE 4 0 4 4 +PURCHASED 1 0 1 1 +PUNISHMENTS 1 0 1 1 +PUNISHMENT 1 0 1 1 +PUNISHES 1 0 1 1 +PUNISHED 1 0 1 1 +PUNISH 1 0 1 1 +PUNCTUALITY 1 0 1 1 +PUNCTILIOUS 1 0 1 1 +PUMP 1 0 1 1 +PULP 1 0 1 1 +PULLEY 1 0 1 1 +PULL 1 0 1 1 +PUFFING 1 0 1 1 +PUFFED 1 0 1 1 +PUDDINGS 1 0 1 1 +PUBLISHER 1 0 1 1 +PUBLIC 5 0 5 5 +PRYING 2 0 2 2 +PRUDENT 1 0 1 1 +PRUDENCE 4 0 4 4 +PROW 1 0 1 1 +PROVOKE 1 0 1 1 +PROVOCATOR 1 0 1 1 +PROVISION 1 0 1 1 +PROVINCIAL 1 0 1 1 +PROVINCE 4 0 4 4 +PROVIDENCES 1 0 1 1 +PROVIDENCE 1 0 1 1 +PROVIDE 1 0 1 1 +PROVERBIAL 1 0 1 1 +PROUD 2 0 2 2 +PROTESTED 2 0 2 2 +PROTECTS 1 0 1 1 +PROTECTORS 1 0 1 1 +PROTECTION 2 0 2 2 +PROTECT 2 0 2 2 +PROSPEROUS 1 0 1 1 +PROPRIETORS 1 0 1 1 +PROPOSITION 1 0 1 1 +PROPOSES 1 0 1 1 +PROPOSED 3 0 3 3 +PROPOSALS 1 0 1 1 +PROPORTION 3 0 3 3 +PROPERTY 2 0 2 2 +PROOF 5 0 5 5 +PRONOUNCED 1 0 1 1 +PROMPTLY 3 0 3 3 +PROMPT 1 0 1 1 +PROMISING 1 0 1 1 +PROMISED 7 0 7 7 +PROMISE 4 0 4 4 +PROLONGED 1 0 1 1 +PROJECT 1 0 1 1 +PROHIBITED 1 0 1 1 +PROHIBIT 1 0 1 1 +PROGRESS 1 0 1 1 +PROFUSION 1 0 1 1 +PROFOUND 1 0 1 1 +PROFLIGATE 1 0 1 1 +PROFITABLY 1 0 1 1 +PROFITABLE 1 0 1 1 +PROFESSIONAL 2 0 2 2 +PROFANITY 1 0 1 1 +PROFANE 1 0 1 1 +PRODUCTIONS 1 0 1 1 +PRODUCING 1 0 1 1 +PROCURE 2 0 2 2 +PROCESSIONS 1 0 1 1 +PROCESSION 1 0 1 1 +PROCESS 6 0 6 6 +PROCEEDINGS 2 0 2 2 +PROCEEDED 1 0 1 1 +PROCEED 2 0 2 2 +PROCEDURE 1 0 1 1 +PROBLEMS 1 0 1 1 +PROBLEM 1 0 1 1 +PROBABLY 7 0 7 7 +PROBABLE 1 0 1 1 +PROBABILITY 1 0 1 1 +PRIVILEGE 1 0 1 1 +PRIVATE 6 0 6 6 +PRIVACY 1 0 1 1 +PRINT 1 0 1 1 +PRINCIPLE 3 0 3 3 +PRINCIPALLY 1 0 1 1 +PRINCIPAL 1 0 1 1 +PRINCESS 11 0 11 11 +PRINCES 1 0 1 1 +PRINCE'S 2 0 2 2 +PRIME 3 0 3 3 +PRIDE 2 0 2 2 +PRICE 2 0 2 2 +PREVIOUSLY 1 0 1 1 +PREVENTED 1 0 1 1 +PREVAILING 1 0 1 1 +PREVAILED 1 0 1 1 +PRETTILY 1 0 1 1 +PRETTIEST 1 0 1 1 +PRETEXT 1 0 1 1 +PRETENDED 1 0 1 1 +PRETEND 3 0 3 3 +PRESUMPTUOUS 1 0 1 1 +PRESSURE 4 0 4 4 +PRESSED 2 0 2 2 +PRESS 2 0 2 2 +PRESERVING 5 0 5 5 +PRESERVES 3 0 3 3 +PRESERVED 2 0 2 2 +PRESERVE 2 0 2 2 +PRESENTS 4 0 4 4 +PRESENTLY 12 0 12 12 +PRESENTING 1 0 1 1 +PRESENTED 3 0 3 3 +PRESENCE 9 0 9 9 +PREPARING 3 0 3 3 +PREPARED 7 0 7 7 +PREPARE 1 0 1 1 +PREPARATIONS 5 0 5 5 +PREOCCUPIED 1 0 1 1 +PREMISES 1 0 1 1 +PREFERRED 1 0 1 1 +PREFER 2 0 2 2 +PREDICTIONS 1 0 1 1 +PRECIPITANCY 1 0 1 1 +PRECIOUS 2 0 2 2 +PRECINCT 1 0 1 1 +PRECAUTION 1 0 1 1 +PREACHING 2 0 2 2 +PREACHER 1 0 1 1 +PRAYER 6 0 6 6 +PRAYED 3 0 3 3 +PRAM 1 0 1 1 +PRAISES 1 0 1 1 +PRAISED 2 0 2 2 +PRACTISE 1 0 1 1 +PRACTICE 1 0 1 1 +PRACTICALLY 2 0 2 2 +POWERS 3 0 3 3 +POWERLESS 1 0 1 1 +POWERFUL 3 0 3 3 +POWER 27 0 27 27 +POVERTY 3 0 3 3 +POURS 1 0 1 1 +POURING 1 0 1 1 +POUR 1 0 1 1 +POUND 3 0 3 3 +POUNCE 1 0 1 1 +POTS 1 0 1 1 +POTASSIC 1 0 1 1 +POTASH 1 0 1 1 +POSTERS 1 0 1 1 +POSSIBLY 3 0 3 3 +POSSIBLE 12 0 12 12 +POSSIBILITY 2 0 2 2 +POSSESSION 2 0 2 2 +POSSESSES 1 0 1 1 +POSSESSED 5 0 5 5 +POSSESS 1 0 1 1 +POSSE 1 0 1 1 +POSITIVE 1 0 1 1 +PORTO 1 0 1 1 +PORTIONS 2 0 2 2 +PORTION 2 0 2 2 +PORTER 2 0 2 2 +POPULATION 1 0 1 1 +POPULARITY 1 0 1 1 +POPULAR 1 0 1 1 +POPPED 1 0 1 1 +POPES 2 0 2 2 +POPE'S 1 0 1 1 +POPE 1 0 1 1 +POP 1 0 1 1 +PONY 1 0 1 1 +POLONIUS 1 0 1 1 +POLLY'S 3 0 3 3 +POLLY 19 0 19 19 +POLITICIANS 1 0 1 1 +POLITICAL 3 0 3 3 +POLICE 5 0 5 5 +POKING 1 0 1 1 +POKED 1 0 1 1 +POISONS 1 0 1 1 +POISONING 3 0 3 3 +POINTING 2 0 2 2 +POINTED 3 0 3 3 +POETRY 2 0 2 2 +POCKETS 1 0 1 1 +POCKETED 1 0 1 1 +POCKET 8 0 8 8 +PLUNGED 3 0 3 3 +PLUNDERED 1 0 1 1 +PLUG 1 0 1 1 +PLUCKING 2 0 2 2 +PLOT 1 0 1 1 +PLEDGED 1 0 1 1 +PLEDGE 1 0 1 1 +PLEASURES 2 0 2 2 +PLEASED 4 0 4 4 +PLEASANTER 1 0 1 1 +PLEADINGS 1 0 1 1 +PLEADED 3 0 3 3 +PLEAD 1 0 1 1 +PLAYING 1 0 1 1 +PLAYED 5 0 5 5 +PLAY 4 0 4 4 +PLATTERS 1 0 1 1 +PLATFORM 2 0 2 2 +PLATED 1 0 1 1 +PLASTER 1 0 1 1 +PLANTS 1 0 1 1 +PLANTATIONS 2 0 2 2 +PLANS 5 0 5 5 +PLANNING 2 0 2 2 +PLANNED 1 0 1 1 +PLANKS 2 0 2 2 +PLANETS 1 0 1 1 +PLANET 2 0 2 2 +PLAN 1 0 1 1 +PLAINLY 3 0 3 3 +PLAGUE 2 0 2 2 +PITY 4 0 4 4 +PITIFULNESS 1 0 1 1 +PITCHER 1 0 1 1 +PIT 1 0 1 1 +PISTOLS 1 0 1 1 +PIPING 1 0 1 1 +PIPES 1 0 1 1 +PIOUS 1 0 1 1 +PINT 1 0 1 1 +PINKERTON'S 1 0 1 1 +PINK 1 0 1 1 +PINED 1 0 1 1 +PINCHED 2 0 2 2 +PINCH 1 0 1 1 +PIN 1 0 1 1 +PILLOWED 1 0 1 1 +PILLOW 1 0 1 1 +PILED 1 0 1 1 +PILE 1 0 1 1 +PIG 1 0 1 1 +PIERO 1 0 1 1 +PIERCED 1 0 1 1 +PIECES 9 0 9 9 +PICTURES 3 0 3 3 +PICTURE 5 0 5 5 +PICKET 1 0 1 1 +PICK 5 0 5 5 +PIAZZA 1 0 1 1 +PHYSIOLOGICAL 1 0 1 1 +PHYSICIAN 3 0 3 3 +PHYSICAL 2 0 2 2 +PHRASE 1 0 1 1 +PHONE 1 0 1 1 +PHLEGMATIC 1 0 1 1 +PHILOSOPHERS 1 0 1 1 +PHARMACY 1 0 1 1 +PEYTON 1 0 1 1 +PETITIONS 1 0 1 1 +PETER'S 2 0 2 2 +PETER 15 0 15 15 +PERVADED 1 0 1 1 +PERUSING 1 0 1 1 +PERUSAL 1 0 1 1 +PERSUADED 1 0 1 1 +PERSUADE 1 0 1 1 +PERSPECTIVE 1 0 1 1 +PERSONS 8 0 8 8 +PERSONALLY 5 0 5 5 +PERSONAL 2 0 2 2 +PERSONAGE 1 0 1 1 +PERSON'S 1 0 1 1 +PERSON 16 0 16 16 +PERSISTED 3 0 3 3 +PERSIST 1 0 1 1 +PERSEUS 2 0 2 2 +PERSECUTORS 1 0 1 1 +PERSECUTION 1 0 1 1 +PERSECUTED 1 0 1 1 +PERSECUTE 2 0 2 2 +PERPLEXITY 1 0 1 1 +PERPETUALLY 1 0 1 1 +PERMITTED 5 0 5 5 +PERMIT 4 0 4 4 +PERMISSION 2 0 2 2 +PERMANENT 1 0 1 1 +PERISHED 3 0 3 3 +PERIODS 1 0 1 1 +PERIOD 2 0 2 2 +PERILS 1 0 1 1 +PERFORMANCES 1 0 1 1 +PERFORM 2 0 2 2 +PERFECTLY 5 0 5 5 +PERFECTION 2 0 2 2 +PERCHED 1 0 1 1 +PERCH 1 0 1 1 +PERCEPTIBLE 1 0 1 1 +PERCEIVE 1 0 1 1 +PERAMBULATOR'S 1 0 1 1 +PER 1 0 1 1 +PEPPINO 1 0 1 1 +PEPPER 2 0 2 2 +PEOPLE'S 3 0 3 3 +PEOPLE 44 0 44 44 +PENNY 1 0 1 1 +PENETRATING 1 0 1 1 +PENETRATE 1 0 1 1 +PENALTY 1 0 1 1 +PELT 1 0 1 1 +PEERS 1 0 1 1 +PEERED 1 0 1 1 +PEDESTAL 1 0 1 1 +PECULIAR 2 0 2 2 +PEBBLES 3 0 3 3 +PEASANT 3 0 3 3 +PEARLS 1 0 1 1 +PEALS 1 0 1 1 +PEAL 1 0 1 1 +PEABODY 1 0 1 1 +PAYS 1 0 1 1 +PAYING 1 0 1 1 +PAVILION 1 0 1 1 +PAVEMENT 1 0 1 1 +PATRIOTS 1 0 1 1 +PATRIOTISM 1 0 1 1 +PATRIOT 1 0 1 1 +PATRIMONY 1 0 1 1 +PATRIARCHS 1 0 1 1 +PATIENTLY 1 0 1 1 +PATIENT'S 1 0 1 1 +PATIENT 2 0 2 2 +PATHS 1 0 1 1 +PASTE 1 0 1 1 +PASSION 5 0 5 5 +PASSING 3 0 3 3 +PASSENGERS 4 0 4 4 +PASSAGE 2 0 2 2 +PASSABLE 1 0 1 1 +PARTY 13 0 13 13 +PARTNER 1 0 1 1 +PARTISANS 1 0 1 1 +PARTINGS 1 0 1 1 +PARTING 2 0 2 2 +PARTIES 1 0 1 1 +PARTICULARS 1 0 1 1 +PARTICULARLY 5 0 5 5 +PARTICULAR 4 0 4 4 +PARTICLE 2 0 2 2 +PARTICIPATION 1 0 1 1 +PARTICIPANTS 1 0 1 1 +PARTAKE 1 0 1 1 +PARSLEY 1 0 1 1 +PARLIAMENTARY 1 0 1 1 +PARK 1 0 1 1 +PARISIAN 1 0 1 1 +PARIS 5 0 5 5 +PARENTS 2 0 2 2 +PARCEL 2 0 2 2 +PARASOL 1 0 1 1 +PARALLEL 1 0 1 1 +PARADISE 1 0 1 1 +PAPERS 4 0 4 4 +PAPER 6 0 6 6 +PAPA 4 0 4 4 +PANZA 1 0 1 1 +PANTING 2 0 2 2 +PANS 1 0 1 1 +PANNIERS 1 0 1 1 +PAN 3 0 3 3 +PALINGS 1 0 1 1 +PALER 1 0 1 1 +PALE 8 0 8 8 +PALACE 6 0 6 6 +PAINTING 1 0 1 1 +PAINTER 1 0 1 1 +PAINFULLY 1 0 1 1 +PAINFUL 5 0 5 5 +PAINED 1 0 1 1 +PAID 9 0 9 9 +PAGES 4 0 4 4 +PAGE 2 0 2 2 +PADDLE 2 0 2 2 +PACKED 1 0 1 1 +PACIFY 1 0 1 1 +PACIFIC 2 0 2 2 +OXEN 1 0 1 1 +OWE 2 0 2 2 +OVERWHELMED 2 0 2 2 +OVERTURNING 1 0 1 1 +OVERTHREW 1 0 1 1 +OVERTAKEN 1 0 1 1 +OVERLY 1 0 1 1 +OVERHEAD 2 0 2 2 +OVERCOME 2 0 2 2 +OVERCOAT 1 0 1 1 +OVEN 2 0 2 2 +OVAL 1 0 1 1 +OUTWARD 1 0 1 1 +OUTLINES 1 0 1 1 +OUTLINE 1 0 1 1 +OUTLAWS 1 0 1 1 +OUTFIT 1 0 1 1 +OUTDO 1 0 1 1 +OURSELVES 4 0 4 4 +OTTER 1 0 1 1 +OTHO 1 0 1 1 +OTHERWISE 1 0 1 1 +OSTRICH 1 0 1 1 +ORTHODOX 1 0 1 1 +ORPHAN 1 0 1 1 +ORNERY 1 0 1 1 +ORNAMENTED 1 0 1 1 +ORNAMENTAL 1 0 1 1 +ORLEANS 1 0 1 1 +ORISON 1 0 1 1 +ORIGIN 1 0 1 1 +ORIENTAL 1 0 1 1 +ORGANIZATION 3 0 3 3 +ORFICER 1 0 1 1 +ORDINARY 1 0 1 1 +ORDERS 8 0 8 8 +ORDERED 5 0 5 5 +ORDEAL 1 0 1 1 +ORDAINED 1 0 1 1 +ORCHARDS 1 0 1 1 +ORBIS 1 0 1 1 +ORANGE 1 0 1 1 +ORACLE 2 0 2 2 +OPTIC 1 0 1 1 +OPPRESSOR 1 0 1 1 +OPPRESSION 3 0 3 3 +OPPOSITE 3 0 3 3 +OPPOSE 1 0 1 1 +OPINION 3 0 3 3 +OPERATIONS 1 0 1 1 +OPERATED 1 0 1 1 +OPERA 1 0 1 1 +OPENS 3 0 3 3 +OPAQUE 1 0 1 1 +ONWARDS 1 0 1 1 +ONWARD 1 0 1 1 +ONION 2 0 2 2 +ONESELF 1 0 1 1 +OMITTING 1 0 1 1 +OLDISH 1 0 1 1 +OLDEST 1 0 1 1 +OLDER 1 0 1 1 +OGRE 3 0 3 3 +OGLING 1 0 1 1 +OFFICIAL 1 0 1 1 +OFFERINGS 1 0 1 1 +OFFERING 1 0 1 1 +OFFERED 3 0 3 3 +OFFER 2 0 2 2 +OFFENSIVE 1 0 1 1 +OCEAN 1 0 1 1 +OCCURRED 5 0 5 5 +OCCUR 1 0 1 1 +OCCUPY 3 0 3 3 +OCCUPIED 2 0 2 2 +OBVIOUSLY 1 0 1 1 +OBVIOUS 1 0 1 1 +OBTAINED 1 0 1 1 +OBTAIN 3 0 3 3 +OBSTRUCTION 1 0 1 1 +OBSTINATE 1 0 1 1 +OBSTINACY 1 0 1 1 +OBSERVER 1 0 1 1 +OBSERVE 1 0 1 1 +OBSERVATIONS 2 0 2 2 +OBSERVATION 1 0 1 1 +OBSERVANT 1 0 1 1 +OBLONG 1 0 1 1 +OBLIGATION 3 0 3 3 +OBJECTS 2 0 2 2 +OBJECTION 2 0 2 2 +OBEYING 1 0 1 1 +OBEDIENTLY 1 0 1 1 +OBEDIENT 2 0 2 2 +OATH 4 0 4 4 +OAR 1 0 1 1 +O'CLOCK 9 0 9 9 +NUN 1 0 1 1 +NUMBERED 1 0 1 1 +NUMBER 5 0 5 5 +NUISANCE 1 0 1 1 +NUBIAN 1 0 1 1 +NOWHERE 2 0 2 2 +NOTWITHSTANDING 1 0 1 1 +NOTORIOUS 1 0 1 1 +NOTION 1 0 1 1 +NOTICED 3 0 3 3 +NOTICE 7 0 7 7 +NOTHING 28 0 28 28 +NOTES 1 0 1 1 +NOTED 2 0 2 2 +NOTABLES 1 0 1 1 +NOSE 3 0 3 3 +NORTHWARD 1 0 1 1 +NORTHERN 1 0 1 1 +NORMAN 2 0 2 2 +NOON 1 0 1 1 +NONSENSE 2 0 2 2 +NOISE 5 0 5 5 +NODDING 1 0 1 1 +NODDED 3 0 3 3 +NOD 2 0 2 2 +NOBODY 6 0 6 6 +NOBLEMEN 1 0 1 1 +NOBLEMAN 1 0 1 1 +NOBILITY 1 0 1 1 +NINTH 1 0 1 1 +NINEVEH 1 0 1 1 +NINETY 2 0 2 2 +NINETEENTH 1 0 1 1 +NINE 6 0 6 6 +NIMBLENESS 1 0 1 1 +NICOTINE 1 0 1 1 +NICKEL 1 0 1 1 +NICK 1 0 1 1 +NICHOLAS 1 0 1 1 +NICETIES 1 0 1 1 +NEWS 7 0 7 7 +NEVERTHELESS 3 0 3 3 +NERVOUSNESS 2 0 2 2 +NERVOUSLY 1 0 1 1 +NERVOUS 2 0 2 2 +NERVES 1 0 1 1 +NEMESIS 3 0 3 3 +NEIGHBOURHOOD 1 0 1 1 +NEIGHBOUR 1 0 1 1 +NEIGHBOR 2 0 2 2 +NEGRO 2 0 2 2 +NEGLECTING 1 0 1 1 +NEGLECTED 2 0 2 2 +NEGLECT 1 0 1 1 +NEGATIVE 1 0 1 1 +NEEDN'T 1 0 1 1 +NEEDLED 1 0 1 1 +NEEDED 9 0 9 9 +NEED 9 0 9 9 +NECK 1 0 1 1 +NECESSITY 7 0 7 7 +NECESSARY 10 0 10 10 +NECESSARILY 2 0 2 2 +NEATLY 2 0 2 2 +NEARLY 2 0 2 2 +NEARED 1 0 1 1 +NAY 2 0 2 2 +NAVEL 1 0 1 1 +NAUSEA 1 0 1 1 +NATURED 2 0 2 2 +NATTY 1 0 1 1 +NATIVE 6 0 6 6 +NATIONS 3 0 3 3 +NATION 2 0 2 2 +NARROWNESS 1 0 1 1 +NARROWER 1 0 1 1 +NARROW 5 0 5 5 +NARRATOR 1 0 1 1 +NAPKINS 1 0 1 1 +NAPKIN 1 0 1 1 +NAMES 5 0 5 5 +NAMED 5 0 5 5 +NAME'S 1 0 1 1 +NAME 21 0 21 21 +NAILS 2 0 2 2 +NAILED 2 0 2 2 +NAIL 5 0 5 5 +MUTTERED 5 0 5 5 +MUSKETS 1 0 1 1 +MUSICAL 1 0 1 1 +MUSIC 2 0 2 2 +MURMURED 1 0 1 1 +MURMUR 1 0 1 1 +MURDERED 1 0 1 1 +MULTITUDE 1 0 1 1 +MULE 1 0 1 1 +MUFFLED 1 0 1 1 +MUCOUS 1 0 1 1 +MOVING 6 0 6 6 +MOVEMENTS 2 0 2 2 +MOVED 7 0 7 7 +MOVE 1 0 1 1 +MOUSE 1 0 1 1 +MOURNFULLY 1 0 1 1 +MOUNTED 2 0 2 2 +MOUNTAINS 1 0 1 1 +MOUNTAIN 1 0 1 1 +MOUNT 1 0 1 1 +MOTLEY 1 0 1 1 +MOTIVES 1 0 1 1 +MOTIVE 1 0 1 1 +MOTHER'S 3 0 3 3 +MOSTLY 2 0 2 2 +MORTAR 1 0 1 1 +MORPHINE 2 0 2 2 +MOREOVER 1 0 1 1 +MORCERF 3 0 3 3 +MORALS 1 0 1 1 +MOPPED 1 0 1 1 +MOORED 1 0 1 1 +MOONFLOWERS 1 0 1 1 +MOON 2 0 2 2 +MOOD 2 0 2 2 +MONTHS 6 0 6 6 +MONTESQUIEU 1 0 1 1 +MONSTROUS 1 0 1 1 +MONSTERS 2 0 2 2 +MONOTONOUS 1 0 1 1 +MONKEY 3 0 3 3 +MONDAY 2 0 2 2 +MONASTERY 1 0 1 1 +MONARCH 2 0 2 2 +MOMENTS 6 0 6 6 +MOMENT'S 1 0 1 1 +MOLESTED 1 0 1 1 +MOHAMMED 1 0 1 1 +MODEST 1 0 1 1 +MODERN 2 0 2 2 +MODEL 2 0 2 2 +MOCKERY 1 0 1 1 +MOB 1 0 1 1 +MOANING 2 0 2 2 +MIXTURE 1 0 1 1 +MIXING 1 0 1 1 +MIXED 2 0 2 2 +MIX 3 0 3 3 +MISTRUST 1 0 1 1 +MISTRESSES 1 0 1 1 +MISTRESS 6 0 6 6 +MISTAKE 2 0 2 2 +MISSOURI 1 0 1 1 +MISSISSIPPIAN 1 0 1 1 +MISSISSIPPI 1 0 1 1 +MISSING 3 0 3 3 +MISFORTUNE 1 0 1 1 +MISERY 3 0 3 3 +MISERABLE 2 0 2 2 +MISCONDUCT 1 0 1 1 +MISCONCEPTION 1 0 1 1 +MISCHIEVOUS 2 0 2 2 +MISCHIEF 1 0 1 1 +MISAPPREHENSION 1 0 1 1 +MISANTHROPY 1 0 1 1 +MIRTH 2 0 2 2 +MIRACULOUS 1 0 1 1 +MIRACLES 3 0 3 3 +MIRABELLE 2 0 2 2 +MINUTES 11 0 11 11 +MINUTE 6 0 6 6 +MINNESOTA 1 0 1 1 +MINNEAPOLIS 1 0 1 1 +MINISTERS 1 0 1 1 +MINIMS 1 0 1 1 +MINIATURE 1 0 1 1 +MINGLED 2 0 2 2 +MINDS 1 0 1 1 +MINDED 1 0 1 1 +MILTON 4 0 4 4 +MILLIONS 1 0 1 1 +MILLER'S 1 0 1 1 +MILITARY 5 0 5 5 +MIKE'S 1 0 1 1 +MIGHTY 3 0 3 3 +MIGHTINESS 1 0 1 1 +MIDST 3 0 3 3 +MIDNIGHT 3 0 3 3 +MIDDY'S 1 0 1 1 +MIDDY 1 0 1 1 +MIDDLE 5 0 5 5 +MICROSCOPIC 1 0 1 1 +MICROBE 1 0 1 1 +METALLIC 1 0 1 1 +MESSAGE 1 0 1 1 +MESELF 1 0 1 1 +MERITS 1 0 1 1 +MERE 3 0 3 3 +MERCY 5 0 5 5 +MERCURY 2 0 2 2 +MERCIFUL 1 0 1 1 +MERCIES 1 0 1 1 +MERCHANTS 6 0 6 6 +MERCHANT 3 0 3 3 +MENTION 1 0 1 1 +MENTALLY 3 0 3 3 +MENACING 1 0 1 1 +MEMORY 4 0 4 4 +MEMORIAL 1 0 1 1 +MEMBERS 7 0 7 7 +MEMBER 1 0 1 1 +MELTED 1 0 1 1 +MELANCHOLY 1 0 1 1 +MEETING 4 0 4 4 +MEET 9 0 9 9 +MEDIUMS 1 0 1 1 +MEDIUM 1 0 1 1 +MEDITATION 1 0 1 1 +MEDITATED 1 0 1 1 +MEDICINE 1 0 1 1 +MEDICAMENTS 1 0 1 1 +MEDIAN 1 0 1 1 +MECHANICALLY 1 0 1 1 +MECHANICAL 1 0 1 1 +MEASURABLE 1 0 1 1 +MEANWHILE 4 0 4 4 +MEANING 2 0 2 2 +MAYBE 4 0 4 4 +MATTOCK 1 0 1 1 +MATTERED 1 0 1 1 +MATERIALLY 1 0 1 1 +MATERIAL 1 0 1 1 +MATCH 1 0 1 1 +MASTERY 1 0 1 1 +MASTERS 1 0 1 1 +MASTERPIECE 1 0 1 1 +MASTERED 1 0 1 1 +MASTER'S 1 0 1 1 +MAST 1 0 1 1 +MASON'S 1 0 1 1 +MASON 1 0 1 1 +MARY 2 0 2 2 +MARVELLOUS 4 0 4 4 +MARTYR 1 0 1 1 +MARSPORT 1 0 1 1 +MARSHAL'S 1 0 1 1 +MARRY 3 0 3 3 +MARRIED 4 0 4 4 +MARLBOROUGH'S 1 0 1 1 +MARKS 1 0 1 1 +MARKING 1 0 1 1 +MARK 4 0 4 4 +MARJORIE 3 0 3 3 +MARIUS 6 0 6 6 +MARIA 1 0 1 1 +MARGUERITE 11 0 11 11 +MARGINAL 1 0 1 1 +MARCH 2 0 2 2 +MARBLE 2 0 2 2 +MANTELPIECE 1 0 1 1 +MANIFESTATION 1 0 1 1 +MANCHESTER 1 0 1 1 +MANAGE 1 0 1 1 +MAMMOTH 1 0 1 1 +MALICE 1 0 1 1 +MALEVOLENT 1 0 1 1 +MALADY 1 0 1 1 +MAKER 1 0 1 1 +MAJESTY 2 0 2 2 +MAINTAINED 1 0 1 1 +MAINTAIN 1 0 1 1 +MAINLY 1 0 1 1 +MAIDEN 3 0 3 3 +MAID 3 0 3 3 +MAHOGANY 2 0 2 2 +MAGNIFYING 1 0 1 1 +MAGNIFIES 1 0 1 1 +MAGNIFICENT 2 0 2 2 +MAGNIFICENCE 1 0 1 1 +MAGNANIMITY 1 0 1 1 +MAGICIAN 2 0 2 2 +MAGICAL 1 0 1 1 +MAGIC 1 0 1 1 +MAGAZINE 1 0 1 1 +MADRID 1 0 1 1 +MADNESS 2 0 2 2 +MADAME 1 0 1 1 +MACHINES 1 0 1 1 +MACHINERY 1 0 1 1 +LYNCHES 1 0 1 1 +LUTHER 1 0 1 1 +LUSTILY 1 0 1 1 +LURKING 1 0 1 1 +LUMP 1 0 1 1 +LUGGAGE 1 0 1 1 +LUCKY 2 0 2 2 +LUCKLESS 1 0 1 1 +LUCID 1 0 1 1 +LUCAS 1 0 1 1 +LOYALTY 2 0 2 2 +LOYAL 2 0 2 2 +LOWERED 1 0 1 1 +LOVES 6 0 6 6 +LOVERS 2 0 2 2 +LOUVRE 1 0 1 1 +LOUISIANA 1 0 1 1 +LOUIS 3 0 3 3 +LOUDLY 1 0 1 1 +LOUDER 1 0 1 1 +LOUD 8 0 8 8 +LOSSES 1 0 1 1 +LOSING 3 0 3 3 +LOSE 6 0 6 6 +LORN 1 0 1 1 +LORDSHIPS 1 0 1 1 +LORDS 2 0 2 2 +LOQUACITY 1 0 1 1 +LOOSENED 1 0 1 1 +LOOSELY 1 0 1 1 +LOOKING 21 0 21 21 +LONGING 3 0 3 3 +LONGED 2 0 2 2 +LONELY 1 0 1 1 +LONDON 4 0 4 4 +LODGE 1 0 1 1 +LOCKED 4 0 4 4 +LOCATE 1 0 1 1 +LOCAL 4 0 4 4 +LOBSTERS 2 0 2 2 +LOADING 1 0 1 1 +LIVING 5 0 5 5 +LIVID 1 0 1 1 +LIVERY 1 0 1 1 +LIVELIHOOD 1 0 1 1 +LITTER 1 0 1 1 +LITERATURE 1 0 1 1 +LITERALLY 1 0 1 1 +LISTENING 5 0 5 5 +LISTENERS 1 0 1 1 +LISTENER 1 0 1 1 +LISTENED 4 0 4 4 +LIQUOR 4 0 4 4 +LIQUID 1 0 1 1 +LIP 3 0 3 3 +LIONS 1 0 1 1 +LION 1 0 1 1 +LINK 1 0 1 1 +LINGO 1 0 1 1 +LINGER 1 0 1 1 +LINES 2 0 2 2 +LINEN 2 0 2 2 +LINCOLN 1 0 1 1 +LIMPED 4 0 4 4 +LIMP 3 0 3 3 +LIMITS 1 0 1 1 +LIMIT 1 0 1 1 +LIMES 1 0 1 1 +LIKING 1 0 1 1 +LIKES 1 0 1 1 +LIGHTS 1 0 1 1 +LIGHTING 1 0 1 1 +LIGHTENED 1 0 1 1 +LIGHT 19 0 19 19 +LIGATURES 1 0 1 1 +LIFTING 1 0 1 1 +LIFT 1 0 1 1 +LIFETIME 1 0 1 1 +LIBRARY 2 0 2 2 +LIBERTY 3 0 3 3 +LIBERATION 2 0 2 2 +LEVIN 6 0 6 6 +LEVELLED 1 0 1 1 +LEVEL 1 0 1 1 +LETTING 1 0 1 1 +LETTER 22 0 22 22 +LESSONS 1 0 1 1 +LESSENS 1 0 1 1 +LENT 1 0 1 1 +LENGTH 4 0 4 4 +LEND 2 0 2 2 +LEISURE 3 0 3 3 +LEGISLATURE 1 0 1 1 +LEGALLY 1 0 1 1 +LEGAL 2 0 2 2 +LEG 1 0 1 1 +LEAVING 5 0 5 5 +LEAVES 1 0 1 1 +LEAST 15 0 15 15 +LEARNING 4 0 4 4 +LEAPT 1 0 1 1 +LEAPED 1 0 1 1 +LEAP 1 0 1 1 +LEANING 1 0 1 1 +LEAN 1 0 1 1 +LEAF 1 0 1 1 +LEADS 1 0 1 1 +LEADING 3 0 3 3 +LEADER 2 0 2 2 +LAZY 1 0 1 1 +LAZILY 1 0 1 1 +LAYING 3 0 3 3 +LAY 14 0 14 14 +LAWYER 1 0 1 1 +LAURA 3 0 3 3 +LAUGHS 1 0 1 1 +LAUGHING 5 0 5 5 +LAUGH 9 0 9 9 +LATTER 2 0 2 2 +LATIN 1 0 1 1 +LASTLY 2 0 2 2 +LASTING 1 0 1 1 +LARGESSE 1 0 1 1 +LAPSE 1 0 1 1 +LANTERN 1 0 1 1 +LANGUAGE 2 0 2 2 +LANDSMAN 1 0 1 1 +LANDING 1 0 1 1 +LAMPLIT 1 0 1 1 +LAMP 1 0 1 1 +LAME 1 0 1 1 +LADIES 6 0 6 6 +LACE 2 0 2 2 +LABOURS 1 0 1 1 +LABORER 1 0 1 1 +KNUCKLES 1 0 1 1 +KNOWING 3 0 3 3 +KNOWEST 3 0 3 3 +KNOWED 1 0 1 1 +KNOCKER 1 0 1 1 +KNOCK 1 0 1 1 +KNIGHTHOOD 1 0 1 1 +KNEES 3 0 3 3 +KNAVE 1 0 1 1 +KITCHEN 3 0 3 3 +KISSING 2 0 2 2 +KINSFOLK 1 0 1 1 +KINGS 8 0 8 8 +KINGDOM 3 0 3 3 +KINDS 1 0 1 1 +KINDLY 3 0 3 3 +KINDEST 1 0 1 1 +KILLING 1 0 1 1 +KILLED 4 0 4 4 +KILL 14 0 14 14 +KIDNEYS 1 0 1 1 +KICKED 1 0 1 1 +KHORASAN 2 0 2 2 +KEYHOLE 1 0 1 1 +KEY 5 0 5 5 +KEPT 9 0 9 9 +KENT 2 0 2 2 +KENNETH 3 0 3 3 +KEEPER'S 1 0 1 1 +KEEPER 2 0 2 2 +KEENLY 1 0 1 1 +KAZI 1 0 1 1 +K 1 0 1 1 +JUSTLY 1 0 1 1 +JUSTINIAN 1 0 1 1 +JUSTIFIES 1 0 1 1 +JUSTIFICATION 3 0 3 3 +JURISDICTION 1 0 1 1 +JUNIOR 1 0 1 1 +JUNE 1 0 1 1 +JUMPING 2 0 2 2 +JUMP 1 0 1 1 +JUICE 1 0 1 1 +JUGS 1 0 1 1 +JUDICIAL 1 0 1 1 +JUDGES 2 0 2 2 +JUDGED 1 0 1 1 +JOYOUS 1 0 1 1 +JOYFUL 2 0 2 2 +JOYANCE 2 0 2 2 +JOY 7 0 7 7 +JOURNEYED 1 0 1 1 +JOURNEY 8 0 8 8 +JOURNALISM 1 0 1 1 +JOSEPH 1 0 1 1 +JONES 1 0 1 1 +JOLLY 1 0 1 1 +JOINTS 1 0 1 1 +JOINING 1 0 1 1 +JOIN 5 0 5 5 +JOHN 9 0 9 9 +JOBS 1 0 1 1 +JOB 7 0 7 7 +JOANNA'S 1 0 1 1 +JEWELRY 1 0 1 1 +JEW'S 1 0 1 1 +JEW 1 0 1 1 +JESUS 2 0 2 2 +JESTER 2 0 2 2 +JERK 1 0 1 1 +JERICHO 1 0 1 1 +JENKINS 2 0 2 2 +JEHU 1 0 1 1 +JEHOVAH 3 0 3 3 +JEERINGLY 1 0 1 1 +JEAN 10 0 10 10 +JANUARY 1 0 1 1 +JANGLING 1 0 1 1 +JANE'S 1 0 1 1 +JAMIESON 1 0 1 1 +JAIL 1 0 1 1 +JACKET 1 0 1 1 +IVANOVITCH'S 1 0 1 1 +ITSELF 7 0 7 7 +ISRAEL'S 2 0 2 2 +ISRAEL 7 0 7 7 +ISOLATION 1 0 1 1 +ISLANDERS 1 0 1 1 +ISLAND 5 0 5 5 +ISAAC 2 0 2 2 +IRRITATION 1 0 1 1 +IRRITABILITY 1 0 1 1 +IRREVERENTLY 1 0 1 1 +IRREVERENCE 1 0 1 1 +IRRESISTIBLY 1 0 1 1 +IRRESISTIBLE 1 0 1 1 +IRON 7 0 7 7 +IRKSOME 1 0 1 1 +IRISH 1 0 1 1 +IRELAND 2 0 2 2 +IRATE 1 0 1 1 +IOWA 2 0 2 2 +INVOLVED 1 0 1 1 +INVOKE 1 0 1 1 +INVITED 1 0 1 1 +INVITATION 2 0 2 2 +INVISIBLE 1 0 1 1 +INVINCIBLE 1 0 1 1 +INVETERATE 1 0 1 1 +INVESTIGATION 2 0 2 2 +INVENTING 1 0 1 1 +INVADING 1 0 1 1 +INVADERS 1 0 1 1 +INVADED 1 0 1 1 +INVADE 1 0 1 1 +INTRODUCTION 1 0 1 1 +INTRODUCING 1 0 1 1 +INTRODUCES 1 0 1 1 +INTRODUCED 1 0 1 1 +INTOXICATED 2 0 2 2 +INTOLERABLE 1 0 1 1 +INTIMATES 1 0 1 1 +INTERVIEWS 1 0 1 1 +INTERVAL 3 0 3 3 +INTERRUPTED 1 0 1 1 +INTERRED 2 0 2 2 +INTERPRETATION 1 0 1 1 +INTERPOLATIONS 1 0 1 1 +INTERNATIONAL 1 0 1 1 +INTERMISSION 1 0 1 1 +INTERMENT 2 0 2 2 +INTERMEDDLING 1 0 1 1 +INTERFERENCE 1 0 1 1 +INTERESTED 3 0 3 3 +INTERCOURSE 1 0 1 1 +INTERCHANGE 1 0 1 1 +INTENTIONALLY 2 0 2 2 +INTENSITY 1 0 1 1 +INTENSELY 2 0 2 2 +INTEND 2 0 2 2 +INTELLIGENT 2 0 2 2 +INTELLIGENCE 2 0 2 2 +INTELLECT 1 0 1 1 +INSULTED 1 0 1 1 +INSUFFICIENT 1 0 1 1 +INSTRUMENTS 4 0 4 4 +INSTRUCTIONS 1 0 1 1 +INSTITUTIONS 1 0 1 1 +INSTITUTED 1 0 1 1 +INSTITUTE 1 0 1 1 +INSTINCTS 1 0 1 1 +INSTINCT 3 0 3 3 +INSTANTLY 1 0 1 1 +INSTANT 5 0 5 5 +INSPIRES 1 0 1 1 +INSPIRATION 3 0 3 3 +INSOLUBLE 1 0 1 1 +INSOLENT 1 0 1 1 +INSISTING 1 0 1 1 +INSISTENCE 1 0 1 1 +INSISTED 2 0 2 2 +INSINUATING 1 0 1 1 +INSHALLAH 1 0 1 1 +INSECURITY 1 0 1 1 +INSCRIPTION 1 0 1 1 +INSANE 1 0 1 1 +INQUISITION 1 0 1 1 +INQUIRIES 1 0 1 1 +INQUIRED 5 0 5 5 +INNOCENT 3 0 3 3 +INNKEEPER 2 0 2 2 +INJURIES 1 0 1 1 +INJURED 1 0 1 1 +INIQUITIES 1 0 1 1 +INHERENT 1 0 1 1 +INHABITANTS 1 0 1 1 +INHABIT 1 0 1 1 +INGREDIENTS 1 0 1 1 +INFORMED 5 0 5 5 +INFORMATION 3 0 3 3 +INFORM 1 0 1 1 +INFLUENCES 1 0 1 1 +INFLUENCE 10 0 10 10 +INFLICT 1 0 1 1 +INFLATE 1 0 1 1 +INFIRMITY 1 0 1 1 +INFIRM 1 0 1 1 +INFINITELY 1 0 1 1 +INFINITE 4 0 4 4 +INFERENTIALLY 1 0 1 1 +INEXORABLY 1 0 1 1 +INEVITABLE 1 0 1 1 +INDUSTRY 1 0 1 1 +INDUSTRIOUS 1 0 1 1 +INDUSTRIAL 1 0 1 1 +INDULGENT 1 0 1 1 +INDULGENCE 2 0 2 2 +INDUCED 1 0 1 1 +INDIVIDUALS 9 0 9 9 +INDIVIDUAL 1 0 1 1 +INDISPOSITION 1 0 1 1 +INDISCRETION 1 0 1 1 +INDIGNATION 1 0 1 1 +INDIFFERENT 2 0 2 2 +INDIFFERENCE 1 0 1 1 +INDICATIONS 2 0 2 2 +INDICATED 2 0 2 2 +INDIANS 2 0 2 2 +INDIANA 2 0 2 2 +INDIAN 1 0 1 1 +INDESCRIBABLE 1 0 1 1 +INDEPENDENT 2 0 2 2 +INDEPENDENCE 4 0 4 4 +INDEED 14 0 14 14 +INDECISION 1 0 1 1 +INCUR 1 0 1 1 +INCREDULOUSLY 1 0 1 1 +INCREDULITY 1 0 1 1 +INCREASING 2 0 2 2 +INCREASED 5 0 5 5 +INCONSISTENCY 1 0 1 1 +INCONCEIVABLE 1 0 1 1 +INCOHERENT 1 0 1 1 +INCLUDING 2 0 2 2 +INCLUDE 1 0 1 1 +INCLINED 1 0 1 1 +INCLINATION 1 0 1 1 +INCIDENTS 1 0 1 1 +INCIDENT 1 0 1 1 +INCARCERATING 1 0 1 1 +INASMUCH 1 0 1 1 +INANIMATE 1 0 1 1 +IMPULSE 3 0 3 3 +IMPROVISE 1 0 1 1 +IMPROVISATION 1 0 1 1 +IMPROVING 1 0 1 1 +IMPRESSION 1 0 1 1 +IMPRECATIONS 1 0 1 1 +IMPRECATION 1 0 1 1 +IMPOSSIBLE 4 0 4 4 +IMPOSING 1 0 1 1 +IMPOSES 1 0 1 1 +IMPORTS 1 0 1 1 +IMPORTED 1 0 1 1 +IMPORTANT 1 0 1 1 +IMPORTANCE 3 0 3 3 +IMPLIES 1 0 1 1 +IMPLIED 2 0 2 2 +IMPLACABLE 1 0 1 1 +IMPIOUS 1 0 1 1 +IMPERTINENT 1 0 1 1 +IMPERSONAL 1 0 1 1 +IMPERIOUS 1 0 1 1 +IMPERATIVE 1 0 1 1 +IMPATIENTLY 2 0 2 2 +IMPATIENT 2 0 2 2 +IMMORTALS 1 0 1 1 +IMMEDIATELY 9 0 9 9 +IMMEDIATE 1 0 1 1 +IMITATION 1 0 1 1 +IMITATE 1 0 1 1 +IMBECILE 1 0 1 1 +IMAGINED 1 0 1 1 +IMAGINE 6 0 6 6 +IMAGINATION 1 0 1 1 +IMAGINARY 1 0 1 1 +IMAGE 2 0 2 2 +ILLUSTRIOUS 4 0 4 4 +ILLITERATE 1 0 1 1 +IGNORANT 2 0 2 2 +IGNORANCE 1 0 1 1 +IDOLATRIES 1 0 1 1 +IDLE 1 0 1 1 +IDIOTIC 1 0 1 1 +IDIOT 1 0 1 1 +IDEAS 2 0 2 2 +IDEAL 1 0 1 1 +ICE 1 0 1 1 +HYPOTHETICAL 1 0 1 1 +HYPODERMICALLY 1 0 1 1 +HYPODERMIC 1 0 1 1 +HUSTLED 1 0 1 1 +HUSTLE 1 0 1 1 +HUSKILY 1 0 1 1 +HUSH 1 0 1 1 +HUSBANDMEN 1 0 1 1 +HUSBAND'S 3 0 3 3 +HUSBAND 9 0 9 9 +HURRYING 3 0 3 3 +HURRY 1 0 1 1 +HURRIEDLY 3 0 3 3 +HURRIED 3 0 3 3 +HURRICANE 1 0 1 1 +HUNTED 2 0 2 2 +HUNT 1 0 1 1 +HUNGRY 2 0 2 2 +HUNGER 2 0 2 2 +HUNGARY 1 0 1 1 +HUNG 2 0 2 2 +HUNDREDTH 1 0 1 1 +HUNDREDS 1 0 1 1 +HUNDRED 29 0 29 29 +HUMOURS 1 0 1 1 +HUMILIATIONS 1 0 1 1 +HUMILIATION 1 0 1 1 +HUMILIATED 1 0 1 1 +HUMBLE 1 0 1 1 +HUMAN 6 0 6 6 +HULLO 1 0 1 1 +HUGELY 1 0 1 1 +HUGE 3 0 3 3 +HOUSES 4 0 4 4 +HOURS 12 0 12 12 +HOTLY 1 0 1 1 +HOT 5 0 5 5 +HOST 3 0 3 3 +HOSPITALITY 1 0 1 1 +HORSEBACK 1 0 1 1 +HORRIBLE 3 0 3 3 +HORNS 1 0 1 1 +HORN 1 0 1 1 +HORIZONTAL 1 0 1 1 +HORIZON 2 0 2 2 +HORACE 1 0 1 1 +HOPING 1 0 1 1 +HOPELESS 1 0 1 1 +HOPEFUL 1 0 1 1 +HOPED 2 0 2 2 +HOPE 16 0 16 16 +HOOTED 1 0 1 1 +HOOK 1 0 1 1 +HONEYMOON 1 0 1 1 +HONEY 1 0 1 1 +HOMEWARD 1 0 1 1 +HOMELESS 1 0 1 1 +HOLY 6 0 6 6 +HOLLOW 2 0 2 2 +HOLES 2 0 2 2 +HOLDS 1 0 1 1 +HOLDING 6 0 6 6 +HITHERTO 2 0 2 2 +HITCH 1 0 1 1 +HISTORIANS 1 0 1 1 +HIRE 1 0 1 1 +HINTS 1 0 1 1 +HINTED 1 0 1 1 +HINT 3 0 3 3 +HINGES 1 0 1 1 +HINDER 1 0 1 1 +HILL 7 0 7 7 +HIGHWAYS 1 0 1 1 +HIGHLY 1 0 1 1 +HIGHEST 1 0 1 1 +HIGHER 1 0 1 1 +HIGGINS 1 0 1 1 +HIDING 1 0 1 1 +HIDEOUS 1 0 1 1 +HIDE 2 0 2 2 +HID 2 0 2 2 +HEY 1 0 1 1 +HESITATING 1 0 1 1 +HESITATED 2 0 2 2 +HERS 4 0 4 4 +HERO 1 0 1 1 +HERE'S 2 0 2 2 +HERDSMEN 1 0 1 1 +HERCULEAN 1 0 1 1 +HERBS 1 0 1 1 +HEPTARCHIES 1 0 1 1 +HENRY 3 0 3 3 +HENCE 4 0 4 4 +HELSTONE 1 0 1 1 +HELPLESSLY 1 0 1 1 +HELPLESS 3 0 3 3 +HELP 17 0 17 17 +HELMET 2 0 2 2 +HEIR 2 0 2 2 +HEIGHT 1 0 1 1 +HEEL 1 0 1 1 +HEED 1 0 1 1 +HEDGE 1 0 1 1 +HEBREWS 4 0 4 4 +HEAVY 11 0 11 11 +HEAVIEST 1 0 1 1 +HEAVENS 1 0 1 1 +HEAVEN'S 1 0 1 1 +HEAVEN 6 0 6 6 +HEAVE 1 0 1 1 +HEARTILY 1 0 1 1 +HEARTED 2 0 2 2 +HEARING 2 0 2 2 +HEAP 2 0 2 2 +HEALTHY 1 0 1 1 +HEADQUARTERS 2 0 2 2 +HEADLIGHTS 1 0 1 1 +HEADED 4 0 4 4 +HEADACHES 1 0 1 1 +HAWED 1 0 1 1 +HAVEN'T 4 0 4 4 +HAUNT 3 0 3 3 +HAUNCHES 1 0 1 1 +HATTON 1 0 1 1 +HATS 1 0 1 1 +HATREDS 1 0 1 1 +HATRED 2 0 2 2 +HATED 3 0 3 3 +HAT 3 0 3 3 +HASTY 2 0 2 2 +HASTILY 2 0 2 2 +HASTENED 1 0 1 1 +HASTEN 1 0 1 1 +HASTE 5 0 5 5 +HAST 7 0 7 7 +HASN'T 1 0 1 1 +HASHISH 1 0 1 1 +HARSHLY 2 0 2 2 +HARRY 3 0 3 3 +HARNESSED 1 0 1 1 +HARMONY 1 0 1 1 +HARMLESS 1 0 1 1 +HARM 6 0 6 6 +HARKNESS 1 0 1 1 +HARGRAVE 1 0 1 1 +HAPPINESS 5 0 5 5 +HAPPIEST 1 0 1 1 +HAPPIER 3 0 3 3 +HAPPENS 1 0 1 1 +HANGING 1 0 1 1 +HANGED 1 0 1 1 +HANG 3 0 3 3 +HANDSOME 4 0 4 4 +HANDLED 1 0 1 1 +HANDKERCHIEF 3 0 3 3 +HANDING 1 0 1 1 +HANDIER 1 0 1 1 +HANDED 1 0 1 1 +HAMPERED 1 0 1 1 +HAMMERS 1 0 1 1 +HAMMER 1 0 1 1 +HALTS 1 0 1 1 +HALTING 1 0 1 1 +HALT 5 0 5 5 +HALLS 1 0 1 1 +HALFPENNY 1 0 1 1 +HALE 6 0 6 6 +HAIRED 1 0 1 1 +HAIN'T 1 0 1 1 +HAG 1 0 1 1 +HACK 1 0 1 1 +HABITUAL 1 0 1 1 +HABITS 2 0 2 2 +GYLINGDEN 1 0 1 1 +GUT 3 0 3 3 +GUSH 1 0 1 1 +GULPED 1 0 1 1 +GULF 1 0 1 1 +GUISE 1 0 1 1 +GUILTY 5 0 5 5 +GUILT 2 0 2 2 +GUILD 1 0 1 1 +GUESTS 2 0 2 2 +GUEST 4 0 4 4 +GUESSED 1 0 1 1 +GUARDS 1 0 1 1 +GUARDED 1 0 1 1 +GUARD 1 0 1 1 +GRUMBLED 2 0 2 2 +GRUFFISH 1 0 1 1 +GROWTH 1 0 1 1 +GROWN 1 0 1 1 +GROUPS 6 0 6 6 +GROUP 3 0 3 3 +GROOMED 1 0 1 1 +GROAN 2 0 2 2 +GRINNED 2 0 2 2 +GRIN 2 0 2 2 +GRIMSBY 1 0 1 1 +GRIMACED 1 0 1 1 +GRIEVING 1 0 1 1 +GREET 2 0 2 2 +GREENWOOD 1 0 1 1 +GREENTON 1 0 1 1 +GREENHORNS 1 0 1 1 +GREENBACKS 1 0 1 1 +GREEN 2 0 2 2 +GREEK 2 0 2 2 +GREATLY 1 0 1 1 +GREATEST 6 0 6 6 +GREATER 6 0 6 6 +GREASY 1 0 1 1 +GRAVITY 1 0 1 1 +GRAVES 1 0 1 1 +GRAVELLED 1 0 1 1 +GRAVE 17 0 17 17 +GRATING 2 0 2 2 +GRATIFICATION 1 0 1 1 +GRATEFUL 2 0 2 2 +GRATED 1 0 1 1 +GRASS 2 0 2 2 +GRASP 2 0 2 2 +GRAPE 1 0 1 1 +GRANTING 1 0 1 1 +GRANT 2 0 2 2 +GRANDSON 1 0 1 1 +GRANDPAPA 1 0 1 1 +GRANDFATHER 2 0 2 2 +GRANDEUR 1 0 1 1 +GRANDDAUGHTER 1 0 1 1 +GRAINS 1 0 1 1 +GRAIN 4 0 4 4 +GRAFTON'S 1 0 1 1 +GRAFTON 1 0 1 1 +GRAFT 2 0 2 2 +GRADUALLY 1 0 1 1 +GRACIOUSLY 2 0 2 2 +GRACIOUS 3 0 3 3 +GRACE 1 0 1 1 +GRABBED 2 0 2 2 +GOWN 1 0 1 1 +GOTTEN 1 0 1 1 +GOOSE 1 0 1 1 +GOODNESS 5 0 5 5 +GOLFING 1 0 1 1 +GOLDFINCH 1 0 1 1 +GNASHING 1 0 1 1 +GLOWING 2 0 2 2 +GLOWED 3 0 3 3 +GLOVES 3 0 3 3 +GLOVE 1 0 1 1 +GLORY 2 0 2 2 +GLORIOUS 1 0 1 1 +GLORIFY 1 0 1 1 +GLOOM 1 0 1 1 +GLOATING 1 0 1 1 +GLINTING 1 0 1 1 +GLIMPSE 1 0 1 1 +GLIMMER 1 0 1 1 +GLIDING 1 0 1 1 +GLEAMED 1 0 1 1 +GLAZED 1 0 1 1 +GLANCING 1 0 1 1 +GLANCES 2 0 2 2 +GLANCED 2 0 2 2 +GLADNESS 2 0 2 2 +GLADLY 1 0 1 1 +GLADDENED 1 0 1 1 +GIVES 7 0 7 7 +GIRDLE 2 0 2 2 +GIMLET 1 0 1 1 +GIFTED 1 0 1 1 +GIANT'S 1 0 1 1 +GIANT 1 0 1 1 +GHOSTS 1 0 1 1 +GHOST 2 0 2 2 +GHASTLY 2 0 2 2 +GETTING 12 0 12 12 +GETS 3 0 3 3 +GESTURE 1 0 1 1 +GERMS 1 0 1 1 +GERM 1 0 1 1 +GEORGES 1 0 1 1 +GENUINE 1 0 1 1 +GENTLY 1 0 1 1 +GENTLE 1 0 1 1 +GENIUS 1 0 1 1 +GENIALLY 1 0 1 1 +GENEROSITY 1 0 1 1 +GENERATION 1 0 1 1 +GENERALLY 3 0 3 3 +GAZING 2 0 2 2 +GAZED 3 0 3 3 +GAY 2 0 2 2 +GAULS 1 0 1 1 +GATHERING 2 0 2 2 +GATHERED 8 0 8 8 +GATHER 1 0 1 1 +GATES 1 0 1 1 +GATE 4 0 4 4 +GASPED 2 0 2 2 +GASP 1 0 1 1 +GARNISHMENT 1 0 1 1 +GARMENTS 2 0 2 2 +GARLANDED 1 0 1 1 +GARLAND 1 0 1 1 +GARDEN 7 0 7 7 +GAPS 1 0 1 1 +GAP 1 0 1 1 +GANG 5 0 5 5 +GAMMER 1 0 1 1 +GAMESTER 1 0 1 1 +GAMBLING 3 0 3 3 +GAMBLERS 1 0 1 1 +GALLOPED 1 0 1 1 +GALLERY 1 0 1 1 +GALLANT 3 0 3 3 +GALL 1 0 1 1 +GAIN 3 0 3 3 +GAILY 1 0 1 1 +GAIETY 1 0 1 1 +GAD'S 1 0 1 1 +GAD 1 0 1 1 +GABBLE 1 0 1 1 +G 1 0 1 1 +FUSS 2 0 2 2 +FURY 2 0 2 2 +FURTHEST 1 0 1 1 +FURNITURE 1 0 1 1 +FURNISHED 1 0 1 1 +FURNACE 1 0 1 1 +FURIOUS 2 0 2 2 +FUR 1 0 1 1 +FUNNY 3 0 3 3 +FUNDS 1 0 1 1 +FUMED 1 0 1 1 +FULLY 1 0 1 1 +FULFILLED 1 0 1 1 +FULFIL 1 0 1 1 +FUEL 1 0 1 1 +FRUITS 4 0 4 4 +FRUITLESS 1 0 1 1 +FRUIT 7 0 7 7 +FROWN 1 0 1 1 +FROSTY 1 0 1 1 +FROST 1 0 1 1 +FROGS 2 0 2 2 +FROG'S 1 0 1 1 +FRIGHTENS 1 0 1 1 +FRIGHTENED 3 0 3 3 +FRIGHTEN 1 0 1 1 +FRIENDSHIP 2 0 2 2 +FRIENDLINESS 1 0 1 1 +FRIEND'S 1 0 1 1 +FRIDAY 2 0 2 2 +FRESHEST 1 0 1 1 +FRESH 5 0 5 5 +FRERE 1 0 1 1 +FREQUENTLY 2 0 2 2 +FREQUENT 2 0 2 2 +FREELY 2 0 2 2 +FREED 2 0 2 2 +FRAUD 1 0 1 1 +FRANTICALLY 1 0 1 1 +FRANKNESS 1 0 1 1 +FRANKLY 1 0 1 1 +FRANK 1 0 1 1 +FRANCS 6 0 6 6 +FRANCOIS 1 0 1 1 +FRANCIS 1 0 1 1 +FRAME 1 0 1 1 +FRAGMENTS 1 0 1 1 +FOUNDATION 1 0 1 1 +FORWARDS 3 0 3 3 +FORTUNES 1 0 1 1 +FORTUNE 6 0 6 6 +FORTUNATELY 5 0 5 5 +FORTHWITH 1 0 1 1 +FORTH 4 0 4 4 +FORSOOTH 1 0 1 1 +FORMS 2 0 2 2 +FORMING 2 0 2 2 +FORMIDABLE 2 0 2 2 +FORMERLY 2 0 2 2 +FORGOTTEN 4 0 4 4 +FORGOT 7 0 7 7 +FORGIVE 2 0 2 2 +FORGETTING 1 0 1 1 +FORGET 2 0 2 2 +FORGERIES 1 0 1 1 +FORETASTE 1 0 1 1 +FORESTERS 1 0 1 1 +FORESHADOWED 1 0 1 1 +FORENOON 1 0 1 1 +FOREMOST 1 0 1 1 +FORELOCK 1 0 1 1 +FOREIGNERS 1 0 1 1 +FOREIGN 6 0 6 6 +FOREHEAD 4 0 4 4 +FOREFINGER 1 0 1 1 +FORBIDDEN 1 0 1 1 +FORBID 1 0 1 1 +FORBEARANCE 1 0 1 1 +FORBEAR 1 0 1 1 +FOOTSTEPS 1 0 1 1 +FOOTNOTE 1 0 1 1 +FOOTED 1 0 1 1 +FOOLISH 3 0 3 3 +FOND 2 0 2 2 +FOLLY 1 0 1 1 +FOLLOWERS 3 0 3 3 +FOLLOWER 1 0 1 1 +FOLKS 3 0 3 3 +FOLDED 2 0 2 2 +FOGGY 1 0 1 1 +FOES 2 0 2 2 +FLUTTER 1 0 1 1 +FLUSHED 2 0 2 2 +FLUSH 1 0 1 1 +FLUNG 2 0 2 2 +FLUID 2 0 2 2 +FLOWERS 4 0 4 4 +FLOW 1 0 1 1 +FLOURISHING 1 0 1 1 +FLOURISHED 1 0 1 1 +FLOURISH 1 0 1 1 +FLOATED 1 0 1 1 +FLITTED 1 0 1 1 +FLIRTATION 1 0 1 1 +FLING 1 0 1 1 +FLINCH 1 0 1 1 +FLIGHT 5 0 5 5 +FLICK 1 0 1 1 +FLEERED 1 0 1 1 +FLEECED 1 0 1 1 +FLEE 1 0 1 1 +FLED 4 0 4 4 +FLATTERY 1 0 1 1 +FLASK 1 0 1 1 +FLASHING 1 0 1 1 +FLASHED 1 0 1 1 +FLARING 1 0 1 1 +FLAPPING 1 0 1 1 +FLAMES 2 0 2 2 +FLAME 5 0 5 5 +FLAGRANT 1 0 1 1 +FLAGONS 1 0 1 1 +FLAGON 1 0 1 1 +FLAGGED 1 0 1 1 +FLAG 1 0 1 1 +FIXING 1 0 1 1 +FIXED 5 0 5 5 +FIX 1 0 1 1 +FIVE 20 0 20 20 +FITTED 2 0 2 2 +FITS 1 0 1 1 +FIT 7 0 7 7 +FISHER 2 0 2 2 +FIRMLY 2 0 2 2 +FIRM 1 0 1 1 +FIRING 2 0 2 2 +FIREPLACE 1 0 1 1 +FIREMAN 3 0 3 3 +FIREFLY 1 0 1 1 +FIRED 1 0 1 1 +FINS 1 0 1 1 +FINISHING 2 0 2 2 +FINISH 3 0 3 3 +FINGERS 1 0 1 1 +FINEST 1 0 1 1 +FINE 10 0 10 10 +FINANCIAL 1 0 1 1 +FINAL 2 0 2 2 +FIN 1 0 1 1 +FILTER 1 0 1 1 +FILMY 1 0 1 1 +FILLED 5 0 5 5 +FIGURE'S 1 0 1 1 +FIGURE 3 0 3 3 +FIGHTING 1 0 1 1 +FIGHT 5 0 5 5 +FIFTY 14 0 14 14 +FIFTEENTH 1 0 1 1 +FIFTEEN 7 0 7 7 +FIERCE 2 0 2 2 +FIENDISH 1 0 1 1 +FICKLE 2 0 2 2 +FEVERISH 4 0 4 4 +FEVER 1 0 1 1 +FETTERS 1 0 1 1 +FETCHED 1 0 1 1 +FETCH 7 0 7 7 +FESTIVAL 1 0 1 1 +FEROCIOUS 1 0 1 1 +FENDER 1 0 1 1 +FENCED 1 0 1 1 +FENCE 4 0 4 4 +FEMALE 1 0 1 1 +FELLOWSHIP 1 0 1 1 +FELLOWS 2 0 2 2 +FEELINGS 3 0 3 3 +FEED 1 0 1 1 +FEEBLY 1 0 1 1 +FEEBLE 2 0 2 2 +FEDERAL 1 0 1 1 +FED 1 0 1 1 +FEBRUARY 5 0 5 5 +FEATURES 1 0 1 1 +FEATHERS 1 0 1 1 +FEATHER 1 0 1 1 +FEARS 1 0 1 1 +FEARLESS 1 0 1 1 +FEARING 1 0 1 1 +FEARFUL 2 0 2 2 +FEARED 4 0 4 4 +FEAR 13 0 13 13 +FAVOURS 1 0 1 1 +FATTER 1 0 1 1 +FATIGUE 2 0 2 2 +FATHERLY 1 0 1 1 +FATALLY 1 0 1 1 +FASTER 2 0 2 2 +FASHIONS 1 0 1 1 +FASHIONED 2 0 2 2 +FASHION 2 0 2 2 +FASCINATION 1 0 1 1 +FARTHEST 1 0 1 1 +FARTHER 3 0 3 3 +FARMS 1 0 1 1 +FARM 3 0 3 3 +FAREWELL 1 0 1 1 +FARED 1 0 1 1 +FANTASTIC 1 0 1 1 +FANS 1 0 1 1 +FANCY 4 0 4 4 +FANCIFUL 1 0 1 1 +FANCIED 1 0 1 1 +FANATICS 1 0 1 1 +FAMILIES 3 0 3 3 +FAMILIARITY 1 0 1 1 +FAMILIAR 2 0 2 2 +FALSE 1 0 1 1 +FALLEN 1 0 1 1 +FAITHLESS 1 0 1 1 +FAITHFULLY 2 0 2 2 +FAITHFUL 3 0 3 3 +FAIRY 2 0 2 2 +FAINTNESS 1 0 1 1 +FAINTING 2 0 2 2 +FAINT 4 0 4 4 +FAILURES 3 0 3 3 +FAILURE 1 0 1 1 +FAILS 1 0 1 1 +FAGOTS 1 0 1 1 +FAGGOT 1 0 1 1 +FACTS 3 0 3 3 +FACTORIES 2 0 2 2 +FACTOR 1 0 1 1 +FACING 3 0 3 3 +FACES 2 0 2 2 +FABULOUS 1 0 1 1 +EYELIDS 1 0 1 1 +EXTREMELY 4 0 4 4 +EXTREME 2 0 2 2 +EXTINGUISHING 1 0 1 1 +EXTINGUISH 1 0 1 1 +EXTERNAL 2 0 2 2 +EXTENUATING 1 0 1 1 +EXTENT 2 0 2 2 +EXTENSION 1 0 1 1 +EXTENDING 2 0 2 2 +EXTENDED 2 0 2 2 +EXTEMPORIZED 1 0 1 1 +EXPRESSLY 1 0 1 1 +EXPRESSION 4 0 4 4 +EXPRESSED 3 0 3 3 +EXPOSURE 1 0 1 1 +EXPOSES 1 0 1 1 +EXPLANATORY 1 0 1 1 +EXPLAINING 1 0 1 1 +EXPLAIN 1 0 1 1 +EXPERIMENTS 1 0 1 1 +EXPERIMENTING 1 0 1 1 +EXPERIENCES 1 0 1 1 +EXPERIENCED 1 0 1 1 +EXPENSES 2 0 2 2 +EXPENSE 2 0 2 2 +EXPENDED 1 0 1 1 +EXPELLED 1 0 1 1 +EXPEL 1 0 1 1 +EXPEDIENT 1 0 1 1 +EXPECTS 1 0 1 1 +EXPECT 4 0 4 4 +EXPANSE 2 0 2 2 +EXOTIC 1 0 1 1 +EXIT 1 0 1 1 +EXISTS 1 0 1 1 +EXISTING 1 0 1 1 +EXISTENCE 5 0 5 5 +EXISTED 1 0 1 1 +EXIST 2 0 2 2 +EXHIBITED 4 0 4 4 +EXERTIONS 1 0 1 1 +EXERTING 1 0 1 1 +EXERTED 1 0 1 1 +EXERCISES 1 0 1 1 +EXERCISE 3 0 3 3 +EXECUTIONER'S 2 0 2 2 +EXECUTION 2 0 2 2 +EXECRABLE 1 0 1 1 +EXCUSES 1 0 1 1 +EXCUSE 3 0 3 3 +EXCUSABLE 1 0 1 1 +EXCLAMATION 1 0 1 1 +EXCLAIMING 1 0 1 1 +EXCLAIM 1 0 1 1 +EXCITEMENT 4 0 4 4 +EXCITEDLY 2 0 2 2 +EXCITED 2 0 2 2 +EXCITE 1 0 1 1 +EXCITABILITY 1 0 1 1 +EXCHANGED 2 0 2 2 +EXCHANGE 1 0 1 1 +EXCESSIVELY 1 0 1 1 +EXCESS 1 0 1 1 +EXCEPTIONALLY 2 0 2 2 +EXCEPTION 1 0 1 1 +EXCELLENT 5 0 5 5 +EXCELLENCY 4 0 4 4 +EXCEEDINGLY 1 0 1 1 +EXCEEDING 3 0 3 3 +EXASPERATING 1 0 1 1 +EXAMPLE 3 0 3 3 +EXAMINE 2 0 2 2 +EXALTED 1 0 1 1 +EXAGGERATE 1 0 1 1 +EXACTITUDE 2 0 2 2 +EXACT 1 0 1 1 +EVIDENTLY 4 0 4 4 +EVIDENT 3 0 3 3 +EVIDENCE 2 0 2 2 +EVERYWHERE 4 0 4 4 +EVERYTHING'S 1 0 1 1 +EVERYTHING 15 0 15 15 +EVERYBODY 6 0 6 6 +EVENTS 4 0 4 4 +EVENT 1 0 1 1 +EVENING 9 0 9 9 +EVE 2 0 2 2 +EVAPORATION 2 0 2 2 +EVAPORATING 1 0 1 1 +EVAPORATE 3 0 3 3 +EVADED 1 0 1 1 +EUROPEAN 1 0 1 1 +EUROPE 1 0 1 1 +EUNUCH'S 1 0 1 1 +ETHEREAL 2 0 2 2 +ETERNITY 1 0 1 1 +ESTRANGE 1 0 1 1 +ESTHER 1 0 1 1 +ESTEEM 3 0 3 3 +ESTATE 1 0 1 1 +ESTABLISHMENT 1 0 1 1 +ESTABLISHED 2 0 2 2 +ESTABLISH 1 0 1 1 +ESSENTIALLY 1 0 1 1 +ESSENTIAL 1 0 1 1 +ESSENCE 1 0 1 1 +ESSAY 1 0 1 1 +ESQUIRES 1 0 1 1 +ESPECIAL 1 0 1 1 +ESCAPADE 1 0 1 1 +ERRANT 1 0 1 1 +ERECTS 1 0 1 1 +ERECTED 3 0 3 3 +ERECT 1 0 1 1 +EQUIVALENT 1 0 1 1 +EQUALLY 2 0 2 2 +EPOCH 1 0 1 1 +EPISTLES 1 0 1 1 +EPISTLE 1 0 1 1 +EPIGASTER 1 0 1 1 +ENVYING 1 0 1 1 +ENVY 3 0 3 3 +ENVIRONMENT 1 0 1 1 +ENVIOUS 1 0 1 1 +ENVIED 1 0 1 1 +ENVELOPE 1 0 1 1 +ENTREATY 1 0 1 1 +ENTREATINGLY 1 0 1 1 +ENTREATIES 1 0 1 1 +ENTREATED 1 0 1 1 +ENTHUSIASM 3 0 3 3 +ENTERTAINMENT 1 0 1 1 +ENTERTAINING 1 0 1 1 +ENTERTAIN 1 0 1 1 +ENTERED 11 0 11 11 +ENTAILED 1 0 1 1 +ENLISTMENT 1 0 1 1 +ENJOYMENT 3 0 3 3 +ENGRAVED 1 0 1 1 +ENGLAND 3 0 3 3 +ENGAGEMENTS 1 0 1 1 +ENGAGEMENT 1 0 1 1 +ENGAGED 2 0 2 2 +ENGAGE 1 0 1 1 +ENERGY 1 0 1 1 +ENEMIES 2 0 2 2 +ENDURANCE 1 0 1 1 +ENDING 1 0 1 1 +ENDEAVOURED 1 0 1 1 +ENCOURAGED 2 0 2 2 +ENCOUNTERED 1 0 1 1 +ENCOMPASSED 1 0 1 1 +ENCHANTMENT 2 0 2 2 +ENCHANTED 3 0 3 3 +ENCAMPMENT 1 0 1 1 +ENCAMPED 1 0 1 1 +EMPTY 8 0 8 8 +EMPTIES 1 0 1 1 +EMPTIED 2 0 2 2 +EMPRESSES 1 0 1 1 +EMPLOYED 2 0 2 2 +EMPLOY 1 0 1 1 +EMPIRE 3 0 3 3 +EMPHATIC 2 0 2 2 +EMPHASIZE 1 0 1 1 +EMPERORS 2 0 2 2 +EMPEROR 1 0 1 1 +EMOTIONS 2 0 2 2 +EMIR 2 0 2 2 +EMERGED 1 0 1 1 +EMBROIDERY 1 0 1 1 +EMBRACES 1 0 1 1 +EMBRACED 1 0 1 1 +EMBARRASSMENT 1 0 1 1 +EMBARRASSED 1 0 1 1 +EMBARKED 2 0 2 2 +ELYSIAN 1 0 1 1 +ELSIE'S 1 0 1 1 +ELSIE 1 0 1 1 +ELLIS 1 0 1 1 +ELKINS 1 0 1 1 +ELIZABETH 1 0 1 1 +ELEVENTH 1 0 1 1 +ELEVEN 4 0 4 4 +ELEVATION 1 0 1 1 +ELEPHANT 1 0 1 1 +ELEMENTS 1 0 1 1 +ELEGANT 1 0 1 1 +ELECTRIC 1 0 1 1 +ELECTION 4 0 4 4 +ELDEST 1 0 1 1 +ELBOWED 1 0 1 1 +ELAPSED 1 0 1 1 +ELAPSE 1 0 1 1 +EKED 1 0 1 1 +EJACULATED 1 0 1 1 +EIGHTEENTH 3 0 3 3 +EIGHTEEN 10 0 10 10 +EGYPTIAN 6 0 6 6 +EGYPT 5 0 5 5 +EGG 1 0 1 1 +EFFORTS 1 0 1 1 +EFFORT 4 0 4 4 +EFFECTS 2 0 2 2 +EFFECTIVE 1 0 1 1 +EFFECT 1 0 1 1 +EDWARD 1 0 1 1 +EDUCATION 2 0 2 2 +EDUCATED 1 0 1 1 +EDGES 1 0 1 1 +EDGE 2 0 2 2 +ECONOMIZE 1 0 1 1 +ECONOMICAL 1 0 1 1 +ECHOES 1 0 1 1 +ECCLESIASTICS 1 0 1 1 +EATING 2 0 2 2 +EAT 12 0 12 12 +EASTERN 2 0 2 2 +EASIEST 1 0 1 1 +EARTHEN 1 0 1 1 +EARNED 1 0 1 1 +EARN 2 0 2 2 +EARLY 8 0 8 8 +EARLINESS 1 0 1 1 +EAGLE 4 0 4 4 +EAGERLY 4 0 4 4 +EAGER 2 0 2 2 +DYING 6 0 6 6 +DWELT 1 0 1 1 +DWELLS 1 0 1 1 +DWELLINGS 1 0 1 1 +DWELLERS 1 0 1 1 +DWELL 1 0 1 1 +DWARF 2 0 2 2 +DUTY 10 0 10 10 +DUTIES 2 0 2 2 +DUSTY 1 0 1 1 +DUNNO 1 0 1 1 +DUNNING 1 0 1 1 +DUN 1 0 1 1 +DULL 3 0 3 3 +DUE 5 0 5 5 +DRY 6 0 6 6 +DRUNK 2 0 2 2 +DRUMS 1 0 1 1 +DRUGGED 2 0 2 2 +DROWNING 1 0 1 1 +DROWN 1 0 1 1 +DROVE 1 0 1 1 +DROPS 1 0 1 1 +DROPPED 8 0 8 8 +DROP 3 0 3 3 +DROOPING 2 0 2 2 +DRIVING 1 0 1 1 +DRIVEN 1 0 1 1 +DRIVE 5 0 5 5 +DRINKING 4 0 4 4 +DRIFT 1 0 1 1 +DRIED 1 0 1 1 +DREW 7 0 7 7 +DRESSING 1 0 1 1 +DRESSES 1 0 1 1 +DRESSED 1 0 1 1 +DRESS 1 0 1 1 +DREAMING 2 0 2 2 +DREAMED 1 0 1 1 +DREADFULLY 1 0 1 1 +DREADFUL 2 0 2 2 +DRAWN 1 0 1 1 +DRANK 2 0 2 2 +DRAMATIC 1 0 1 1 +DRAINED 1 0 1 1 +DRAIN 1 0 1 1 +DRAGONS 1 0 1 1 +DRAGON 1 0 1 1 +DRAGGED 1 0 1 1 +DRAG 1 0 1 1 +DOZEN 2 0 2 2 +DOWNSTAIRS 1 0 1 1 +DOWNS 2 0 2 2 +DOVES 1 0 1 1 +DOUBTLESS 2 0 2 2 +DOUBTFUL 1 0 1 1 +DOUBLE 5 0 5 5 +DOT 1 0 1 1 +DOORS 3 0 3 3 +DOMINION 1 0 1 1 +DOMINATES 1 0 1 1 +DOMED 1 0 1 1 +DOME 2 0 2 2 +DOLLARS 2 0 2 2 +DOINGS 1 0 1 1 +DOIN 1 0 1 1 +DOGGEDLY 1 0 1 1 +DODGING 1 0 1 1 +DIXON 4 0 4 4 +DIVISION 1 0 1 1 +DIVINE 1 0 1 1 +DIVIDES 1 0 1 1 +DIVERT 1 0 1 1 +DISTURBING 1 0 1 1 +DISTURBED 1 0 1 1 +DISTURBANCE 1 0 1 1 +DISTURB 2 0 2 2 +DISTRICTS 1 0 1 1 +DISTRICT 2 0 2 2 +DISTRIBUTED 1 0 1 1 +DISTRIBUTE 1 0 1 1 +DISTRACTED 2 0 2 2 +DISTINGUISH 2 0 2 2 +DISTINCTLY 1 0 1 1 +DISTINCTIVE 1 0 1 1 +DISTINCT 1 0 1 1 +DISTICHS 1 0 1 1 +DISTENDED 1 0 1 1 +DISTANCES 1 0 1 1 +DISTANCE 3 0 3 3 +DISSIPATION 2 0 2 2 +DISSENTERING 1 0 1 1 +DISREGARDED 1 0 1 1 +DISPUTED 1 0 1 1 +DISPROVE 1 0 1 1 +DISPOSITION 2 0 2 2 +DISPOSAL 1 0 1 1 +DISPLEASED 1 0 1 1 +DISPLAY 1 0 1 1 +DISPERSED 2 0 2 2 +DISPENSED 1 0 1 1 +DISPENSE 1 0 1 1 +DISMAL 1 0 1 1 +DISHONEST 1 0 1 1 +DISGUST 1 0 1 1 +DISGRACE 4 0 4 4 +DISENTANGLE 1 0 1 1 +DISEASE 1 0 1 1 +DISCUSSIONS 1 0 1 1 +DISCUSSION 1 0 1 1 +DISCUSSED 3 0 3 3 +DISCRIMINATION 1 0 1 1 +DISCRETION 1 0 1 1 +DISCOVERY 3 0 3 3 +DISCOVERIES 1 0 1 1 +DISCOVERED 4 0 4 4 +DISCOVER 2 0 2 2 +DISCOURSES 1 0 1 1 +DISCOURAGEMENTS 1 0 1 1 +DISCONTENT 1 0 1 1 +DISCONCERTION 1 0 1 1 +DISCOMFORT 1 0 1 1 +DISCLOSURES 1 0 1 1 +DISCLOSE 1 0 1 1 +DISCLAIM 1 0 1 1 +DISCIPLINE 1 0 1 1 +DISCERNING 1 0 1 1 +DISAPPOINTED 2 0 2 2 +DISAPPEARS 1 0 1 1 +DISAPPEARED 5 0 5 5 +DISADVANTAGES 3 0 3 3 +DISADVANTAGEOUS 1 0 1 1 +DIRECTLY 3 0 3 3 +DIRECTIONS 1 0 1 1 +DIRECTED 3 0 3 3 +DIRECT 2 0 2 2 +DIRE 1 0 1 1 +DIP 2 0 2 2 +DINSMORE 2 0 2 2 +DINNER 6 0 6 6 +DINING 1 0 1 1 +DINE 2 0 2 2 +DINARS 1 0 1 1 +DIMPLED 1 0 1 1 +DIMLY 1 0 1 1 +DIMINISHED 1 0 1 1 +DIMINISH 1 0 1 1 +DIM 2 0 2 2 +DILUTE 1 0 1 1 +DILIGENTLY 1 0 1 1 +DILAPIDATED 1 0 1 1 +DIGNITY 4 0 4 4 +DIGGERS 2 0 2 2 +DIGGER 10 0 10 10 +DIFFICULTY 7 0 7 7 +DIFFICULT 2 0 2 2 +DIFFERENT 7 0 7 7 +DIFFERENCES 1 0 1 1 +DIFFERENCE 7 0 7 7 +DIFFER 1 0 1 1 +DIED 13 0 13 13 +DIDST 1 0 1 1 +DICTATED 1 0 1 1 +DICE 1 0 1 1 +DIAMETER 1 0 1 1 +DIALOGUE 1 0 1 1 +DEVOURED 1 0 1 1 +DEVOTIONS 1 0 1 1 +DEVOTION 1 0 1 1 +DEVILS 2 0 2 2 +DEVIL 4 0 4 4 +DEVICE 1 0 1 1 +DEVELOPED 1 0 1 1 +DETERMINED 5 0 5 5 +DETECTIVE'S 1 0 1 1 +DETECTION 1 0 1 1 +DETECTED 1 0 1 1 +DETAILS 2 0 2 2 +DETAILED 3 0 3 3 +DESTROYS 1 0 1 1 +DESTROYER 1 0 1 1 +DESTROYED 4 0 4 4 +DESTROY 3 0 3 3 +DESTINED 1 0 1 1 +DESTINATION 1 0 1 1 +DESSERT 2 0 2 2 +DESPOTISM 2 0 2 2 +DESPOILED 1 0 1 1 +DESPISED 1 0 1 1 +DESPISE 1 0 1 1 +DESPERATELY 1 0 1 1 +DESPERATE 2 0 2 2 +DESPAIR 2 0 2 2 +DESIRED 2 0 2 2 +DESIRABLE 1 0 1 1 +DESERVING 1 0 1 1 +DESERVES 1 0 1 1 +DESERVE 2 0 2 2 +DESERTING 1 0 1 1 +DESERTED 2 0 2 2 +DESERT 1 0 1 1 +DESCRIPTION 2 0 2 2 +DESCRIBED 1 0 1 1 +DESCRIBE 1 0 1 1 +DESCEND 1 0 1 1 +DERIVE 1 0 1 1 +DEPRESSION 1 0 1 1 +DEPRECATINGLY 1 0 1 1 +DEPRECATE 1 0 1 1 +DEPOSITED 1 0 1 1 +DEPOSED 1 0 1 1 +DEPLORED 1 0 1 1 +DEPENDS 2 0 2 2 +DEPENDENCE 1 0 1 1 +DEPEND 1 0 1 1 +DEPARTURE 2 0 2 2 +DEPARTMENT 2 0 2 2 +DEPARTING 1 0 1 1 +DEPARTED 3 0 3 3 +DENOUNCED 1 0 1 1 +DENOTING 1 0 1 1 +DENIAL 1 0 1 1 +DEN 1 0 1 1 +DEMETER 4 0 4 4 +DEMANDED 1 0 1 1 +DELIVERY 1 0 1 1 +DELIVERED 4 0 4 4 +DELIGHTFUL 2 0 2 2 +DELIGHT 7 0 7 7 +DELICIOUSLY 1 0 1 1 +DELICATE 3 0 3 3 +DELIBERATELY 1 0 1 1 +DELIBERATE 1 0 1 1 +DELAYED 1 0 1 1 +DELAY 3 0 3 3 +DEJECTION 1 0 1 1 +DEITY 1 0 1 1 +DEGREE 1 0 1 1 +DEGENERATING 1 0 1 1 +DEFYING 1 0 1 1 +DEFRAUD 1 0 1 1 +DEFORMED 2 0 2 2 +DEFINED 1 0 1 1 +DEFIANT 1 0 1 1 +DEFENDING 1 0 1 1 +DEFENDERS 2 0 2 2 +DEFEND 4 0 4 4 +DEFECTION 1 0 1 1 +DEFEAT 1 0 1 1 +DEEPLY 3 0 3 3 +DEEMED 1 0 1 1 +DECORATION 1 0 1 1 +DECLINING 1 0 1 1 +DECLARING 1 0 1 1 +DECLARED 1 0 1 1 +DECKS 1 0 1 1 +DECISION 3 0 3 3 +DECIDED 5 0 5 5 +DECIDE 2 0 2 2 +DECEPTION 1 0 1 1 +DECEMBER 2 0 2 2 +DECEIVED 5 0 5 5 +DECEIVE 1 0 1 1 +DECEASED 1 0 1 1 +DECAY 1 0 1 1 +DEBTS 1 0 1 1 +DEBATED 1 0 1 1 +DEBATE 2 0 2 2 +DEBARRED 1 0 1 1 +DEATHS 1 0 1 1 +DEATHLY 1 0 1 1 +DEATHLIKE 1 0 1 1 +DEATH 16 0 16 16 +DEARER 1 0 1 1 +DEALT 2 0 2 2 +DEALER 1 0 1 1 +DEAL 11 0 11 11 +DEAF 1 0 1 1 +DAZED 1 0 1 1 +DAYLIGHT 2 0 2 2 +DAYBREAK 2 0 2 2 +DAY'S 1 0 1 1 +DAWNED 2 0 2 2 +DAWN 4 0 4 4 +DAVID 2 0 2 2 +DAUNTED 1 0 1 1 +DAUGHTER'S 1 0 1 1 +DASHING 1 0 1 1 +DASHED 1 0 1 1 +DARTED 1 0 1 1 +DARKNESS 7 0 7 7 +DARING 1 0 1 1 +DARED 3 0 3 3 +DARCY'S 1 0 1 1 +DARCY 6 0 6 6 +DANGERS 1 0 1 1 +DANGEROUS 2 0 2 2 +DANGER 11 0 11 11 +DANDY 1 0 1 1 +DANCE 2 0 2 2 +DAMPNESS 1 0 1 1 +DAMNED 1 0 1 1 +DAMES 1 0 1 1 +DAMASCUS 4 0 4 4 +DAM 1 0 1 1 +CYNICISM 1 0 1 1 +CUTTER'S 1 0 1 1 +CUTTER 3 0 3 3 +CUSHION 1 0 1 1 +CURVED 2 0 2 2 +CURTAINS 2 0 2 2 +CURSORILY 1 0 1 1 +CURSES 1 0 1 1 +CURRENT 1 0 1 1 +CURRENCY 1 0 1 1 +CURL 1 0 1 1 +CURIOUS 4 0 4 4 +CURED 1 0 1 1 +CURE 4 0 4 4 +CURATE 2 0 2 2 +CUPBOARD 2 0 2 2 +CUP 1 0 1 1 +CULTURED 1 0 1 1 +CULTURE 1 0 1 1 +CULTIVATED 2 0 2 2 +CULT 1 0 1 1 +CUBITS 1 0 1 1 +CRYSTALLINE 1 0 1 1 +CRYING 1 0 1 1 +CRY 2 0 2 2 +CRUSHED 1 0 1 1 +CRUSADER 1 0 1 1 +CRUELTY 4 0 4 4 +CRUEL 4 0 4 4 +CRUDE 1 0 1 1 +CROWNED 1 0 1 1 +CROWDED 2 0 2 2 +CROWD 5 0 5 5 +CROSSED 5 0 5 5 +CROSS 8 0 8 8 +CROOKS 1 0 1 1 +CROOKED 1 0 1 1 +CROAKING 1 0 1 1 +CRITICS 1 0 1 1 +CRITICAL 2 0 2 2 +CRIPPLED 2 0 2 2 +CRIMSON 1 0 1 1 +CRIMINALS 1 0 1 1 +CRIMINAL 1 0 1 1 +CRIES 4 0 4 4 +CRICKETS 1 0 1 1 +CREPT 1 0 1 1 +CREEPY 1 0 1 1 +CREEPING 1 0 1 1 +CREDITS 3 0 3 3 +CREATURES 2 0 2 2 +CREATURE 4 0 4 4 +CREATOR 4 0 4 4 +CREATIONS 1 0 1 1 +CREATING 1 0 1 1 +CREATED 3 0 3 3 +CREATE 3 0 3 3 +CREASES 1 0 1 1 +CREASED 1 0 1 1 +CREAKED 1 0 1 1 +CRAWLED 2 0 2 2 +CRASHED 1 0 1 1 +CRASH 1 0 1 1 +CRAFT 1 0 1 1 +CRACKERS 1 0 1 1 +CRACKED 2 0 2 2 +COWARDS 1 0 1 1 +COWARD 1 0 1 1 +COVERING 1 0 1 1 +COVERED 5 0 5 5 +COVER 1 0 1 1 +COVE 1 0 1 1 +COUSINS 1 0 1 1 +COUSIN 10 0 10 10 +COURAGE 4 0 4 4 +COUPLETS 1 0 1 1 +COUPLE 2 0 2 2 +COUNTY 9 0 9 9 +COUNTESS 1 0 1 1 +COUNTER 1 0 1 1 +COUNSELLED 1 0 1 1 +COUGH 3 0 3 3 +COTTONY 1 0 1 1 +COTTON 3 0 3 3 +COSETTE 2 0 2 2 +CORRIDOR 2 0 2 2 +CORRESPONDENCE 1 0 1 1 +CORPSES 1 0 1 1 +CORPSE 3 0 3 3 +CORPORATIONS 1 0 1 1 +CORNERS 1 0 1 1 +CORDIAL 1 0 1 1 +COPY 1 0 1 1 +COP'S 1 0 1 1 +COOLNESS 2 0 2 2 +COOK 4 0 4 4 +CONVINCING 1 0 1 1 +CONVINCED 1 0 1 1 +CONVICTION 3 0 3 3 +CONVEYANCE 1 0 1 1 +CONVERTS 1 0 1 1 +CONVERSATION 10 0 10 10 +CONVENTIONS 1 0 1 1 +CONVENTION 1 0 1 1 +CONVENT 4 0 4 4 +CONVENIENCES 1 0 1 1 +CONTROLLED 1 0 1 1 +CONTROL 2 0 2 2 +CONTRIVE 1 0 1 1 +CONTRARY 5 0 5 5 +CONTRADICTION 1 0 1 1 +CONTRACTED 1 0 1 1 +CONTRACT 3 0 3 3 +CONTINUED 11 0 11 11 +CONTINUE 3 0 3 3 +CONTINUATION 1 0 1 1 +CONTINUANCE 1 0 1 1 +CONTINUALLY 1 0 1 1 +CONTENTS 1 0 1 1 +CONTENTION 1 0 1 1 +CONTENTED 1 0 1 1 +CONTENT 1 0 1 1 +CONTEMPORARY 2 0 2 2 +CONTAINS 1 0 1 1 +CONTAINING 2 0 2 2 +CONTAINED 1 0 1 1 +CONTAIN 1 0 1 1 +CONTAGIOUS 2 0 2 2 +CONTACT 3 0 3 3 +CONSUMED 2 0 2 2 +CONSULTED 3 0 3 3 +CONSULTATIONS 1 0 1 1 +CONSTRUCT 1 0 1 1 +CONSTRAINED 1 0 1 1 +CONSTITUTE 1 0 1 1 +CONSTITUENT 1 0 1 1 +CONSTANTLY 3 0 3 3 +CONSTANTIUS 1 0 1 1 +CONSPIRATORS 2 0 2 2 +CONSPIRACY 1 0 1 1 +CONSORTED 1 0 1 1 +CONSOLES 1 0 1 1 +CONSISTS 2 0 2 2 +CONSISTENCY 1 0 1 1 +CONSISTED 1 0 1 1 +CONSIDERING 2 0 2 2 +CONSIDERED 3 0 3 3 +CONSIDERATION 2 0 2 2 +CONSIDERABLE 6 0 6 6 +CONSIDER 1 0 1 1 +CONSERVATIVE 2 0 2 2 +CONSEQUENCES 1 0 1 1 +CONSEQUENCE 1 0 1 1 +CONSENTED 1 0 1 1 +CONSENT 2 0 2 2 +CONSCIOUSNESS 2 0 2 2 +CONSCIOUSLY 1 0 1 1 +CONSCIENTIOUS 1 0 1 1 +CONSCIENCES 1 0 1 1 +CONSCIENCE 3 0 3 3 +CONQUEST 3 0 3 3 +CONQUEROR 1 0 1 1 +CONQUERING 1 0 1 1 +CONQUERED 2 0 2 2 +CONNOISSEUR 1 0 1 1 +CONNECTIONS 1 0 1 1 +CONNECTION 4 0 4 4 +CONNECTED 1 0 1 1 +CONNECT 2 0 2 2 +CONJECTURES 1 0 1 1 +CONGRESSES 1 0 1 1 +CONGRESS 3 0 3 3 +CONFUSION 4 0 4 4 +CONFOUND 1 0 1 1 +CONFLICT 2 0 2 2 +CONFISCATION 1 0 1 1 +CONFIRMS 1 0 1 1 +CONFIRMED 2 0 2 2 +CONFINEMENT 1 0 1 1 +CONFIDENTIAL 1 0 1 1 +CONFIDENCE 3 0 3 3 +CONFESSION 4 0 4 4 +CONFESSED 2 0 2 2 +CONFERRING 1 0 1 1 +CONFERENCE 1 0 1 1 +CONFECTIONS 1 0 1 1 +CONFECTIONER 1 0 1 1 +CONDUCTED 2 0 2 2 +CONDUCT 4 0 4 4 +CONDITIONS 4 0 4 4 +CONDITION 4 0 4 4 +CONDESCEND 1 0 1 1 +CONDEMNED 2 0 2 2 +CONCLUSION 2 0 2 2 +CONCILIATE 1 0 1 1 +CONCIERGE'S 1 0 1 1 +CONCERNS 1 0 1 1 +CONCERNING 1 0 1 1 +CONCEPTION 5 0 5 5 +CONCENTRATED 2 0 2 2 +CONCENTRATE 1 0 1 1 +CONCEIVE 1 0 1 1 +CONCEITED 1 0 1 1 +CONCEAL 3 0 3 3 +COMTE 1 0 1 1 +COMRADE 3 0 3 3 +COMPULSORY 1 0 1 1 +COMPREHENDED 1 0 1 1 +COMPOUND 1 0 1 1 +COMPOSURE 1 0 1 1 +COMPOSITION 1 0 1 1 +COMPOSED 1 0 1 1 +COMPLY 1 0 1 1 +COMPLIMENT 1 0 1 1 +COMPLICITY 1 0 1 1 +COMPLETELY 6 0 6 6 +COMPLETED 1 0 1 1 +COMPLETE 1 0 1 1 +COMPLAIN 1 0 1 1 +COMPETITION 1 0 1 1 +COMPELLING 1 0 1 1 +COMPELLED 1 0 1 1 +COMPATRIOT 1 0 1 1 +COMPASS 1 0 1 1 +COMPARATIVELY 1 0 1 1 +COMPANY 13 0 13 13 +COMPANIONS 3 0 3 3 +COMPANION'S 1 0 1 1 +COMPANION 4 0 4 4 +COMMUNITY 5 0 5 5 +COMMUNICATION 2 0 2 2 +COMMUNICATES 2 0 2 2 +COMMUNICATED 1 0 1 1 +COMMUNICANTS 1 0 1 1 +COMMONS 3 0 3 3 +COMMONLY 1 0 1 1 +COMMONERS 1 0 1 1 +COMMON 3 0 3 3 +COMMITTED 4 0 4 4 +COMMISSIONED 1 0 1 1 +COMMISSION 1 0 1 1 +COMMISSARY 2 0 2 2 +COMMENT 2 0 2 2 +COMMENDING 1 0 1 1 +COMMENDED 1 0 1 1 +COMMANDING 2 0 2 2 +COMMANDED 2 0 2 2 +COMMAND 2 0 2 2 +COMICAL 1 0 1 1 +COMFORTABLE 2 0 2 2 +COMFORT 2 0 2 2 +COMBATIVE 1 0 1 1 +COMBAT 2 0 2 2 +COLOURED 1 0 1 1 +COLOSSAL 1 0 1 1 +COLONELS 1 0 1 1 +COLLECTOR'S 1 0 1 1 +COLLECTOR 1 0 1 1 +COLLECTION 1 0 1 1 +COLLECTING 2 0 2 2 +COLLAR 2 0 2 2 +COLIC 1 0 1 1 +COINCIDENCES 1 0 1 1 +COIL 1 0 1 1 +COFFEE 1 0 1 1 +COCKING 1 0 1 1 +COAST 2 0 2 2 +COACH 3 0 3 3 +CLUTCHING 1 0 1 1 +CLUTCH 1 0 1 1 +CLUNG 1 0 1 1 +CLUBBED 1 0 1 1 +CLUB 3 0 3 3 +CLOVER 1 0 1 1 +CLOUDS 1 0 1 1 +CLOTHES 8 0 8 8 +CLOTHE 1 0 1 1 +CLOSING 2 0 2 2 +CLOSES 1 0 1 1 +CLOSED 4 0 4 4 +CLOGGED 2 0 2 2 +CLOCKS 1 0 1 1 +CLERK 2 0 2 2 +CLERICAL 2 0 2 2 +CLENCHING 1 0 1 1 +CLEMENT 1 0 1 1 +CLEMENCY 1 0 1 1 +CLEARLY 1 0 1 1 +CLEARER 1 0 1 1 +CLEARED 2 0 2 2 +CLEAR 7 0 7 7 +CLEANED 2 0 2 2 +CLEAN 4 0 4 4 +CLASPED 1 0 1 1 +CLASP 1 0 1 1 +CLASHING 1 0 1 1 +CLAPPED 1 0 1 1 +CLANKING 1 0 1 1 +CLAIR 3 0 3 3 +CLAIMS 1 0 1 1 +CLAIMED 1 0 1 1 +CIVILIZED 1 0 1 1 +CIVILITY 1 0 1 1 +CIVILITIES 1 0 1 1 +CITY 16 0 16 16 +CITIZENS 6 0 6 6 +CITIZEN 1 0 1 1 +CIRCUMSTANTIAL 1 0 1 1 +CIRCUMSTANCES 6 0 6 6 +CIRCULAR 1 0 1 1 +CIRCUIT 1 0 1 1 +CIRCLES 1 0 1 1 +CIRCLE 2 0 2 2 +CIGARS 1 0 1 1 +CIDER 1 0 1 1 +CHURCHYARDS 1 0 1 1 +CHRYSIPPUS 2 0 2 2 +CHRISTMAS 1 0 1 1 +CHRISTI 1 0 1 1 +CHRIST 3 0 3 3 +CHRIS'S 1 0 1 1 +CHOSEN 3 0 3 3 +CHOSE 2 0 2 2 +CHOP 1 0 1 1 +CHOOSE 3 0 3 3 +CHOKE 1 0 1 1 +CHOIR 2 0 2 2 +CHOICE 1 0 1 1 +CHIRP 1 0 1 1 +CHINESE 1 0 1 1 +CHIMNEY 7 0 7 7 +CHIMES 1 0 1 1 +CHILDREN 13 0 13 13 +CHILDHOOD 1 0 1 1 +CHILD'S 1 0 1 1 +CHILD 6 0 6 6 +CHEESE 1 0 1 1 +CHEERFULNESS 1 0 1 1 +CHEERFULLY 1 0 1 1 +CHEERFUL 4 0 4 4 +CHEEKED 1 0 1 1 +CHECKED 1 0 1 1 +CHEATING 1 0 1 1 +CHEAPLY 1 0 1 1 +CHATTING 1 0 1 1 +CHASSEUR 1 0 1 1 +CHASM 1 0 1 1 +CHASED 1 0 1 1 +CHARMS 1 0 1 1 +CHARMING 3 0 3 3 +CHARM 3 0 3 3 +CHARLIE 1 0 1 1 +CHARLES 2 0 2 2 +CHARITY 1 0 1 1 +CHARIOT 1 0 1 1 +CHARGES 2 0 2 2 +CHARGER 1 0 1 1 +CHARCOAL 1 0 1 1 +CHAPTERS 1 0 1 1 +CHAPS 1 0 1 1 +CHAPLET 1 0 1 1 +CHAPEL 6 0 6 6 +CHAP 1 0 1 1 +CHANTED 1 0 1 1 +CHANT 1 0 1 1 +CHANNEL 3 0 3 3 +CHANGING 2 0 2 2 +CHANGED 4 0 4 4 +CHANCES 1 0 1 1 +CHANCELLOR'S 1 0 1 1 +CHANCELLOR 6 0 6 6 +CHANCE 11 0 11 11 +CHAMPIONS 1 0 1 1 +CHAMPAGNE 1 0 1 1 +CHAMBER 5 0 5 5 +CHAIR 5 0 5 5 +CHAFING 2 0 2 2 +CETERA 2 0 2 2 +CESSATION 1 0 1 1 +CERTIFIED 1 0 1 1 +CEREMONY 1 0 1 1 +CENTURY 3 0 3 3 +CENTURIES 5 0 5 5 +CENTRES 1 0 1 1 +CENTRAL 6 0 6 6 +CENT 1 0 1 1 +CEMETERY 1 0 1 1 +CELLARS 1 0 1 1 +CELIA 1 0 1 1 +CELERY 1 0 1 1 +CELEBRATED 3 0 3 3 +CEASED 7 0 7 7 +CAVALRY 1 0 1 1 +CAUTIOUSLY 1 0 1 1 +CAUTION 1 0 1 1 +CAUSED 1 0 1 1 +CAUSE 7 0 7 7 +CAUGHT 5 0 5 5 +CATCHING 1 0 1 1 +CASTING 2 0 2 2 +CASKET 1 0 1 1 +CASHIER 1 0 1 1 +CASES 2 0 2 2 +CARVED 3 0 3 3 +CARTHUSIANS 1 0 1 1 +CARS 1 0 1 1 +CARRY 7 0 7 7 +CARROT 1 0 1 1 +CARRIAGE 3 0 3 3 +CARPET 1 0 1 1 +CARPENTER 1 0 1 1 +CAROLINA 1 0 1 1 +CARGO 1 0 1 1 +CARESSES 1 0 1 1 +CAREFULLY 3 0 3 3 +CAREFUL 4 0 4 4 +CARED 4 0 4 4 +CARDS 1 0 1 1 +CARDINALS 1 0 1 1 +CARBONATE 1 0 1 1 +CARAVAN 1 0 1 1 +CAR 5 0 5 5 +CAPTOR 1 0 1 1 +CAPTIVE 3 0 3 3 +CAPTAIN'S 1 0 1 1 +CAPITULUM 1 0 1 1 +CAPITAL 3 0 3 3 +CAPERING 1 0 1 1 +CAPERED 1 0 1 1 +CAPABLE 2 0 2 2 +CAPABILITIES 1 0 1 1 +CANVAS 1 0 1 1 +CANST 1 0 1 1 +CANOE 1 0 1 1 +CANNOT 21 0 21 21 +CANE 1 0 1 1 +CANDLESTICKS 1 0 1 1 +CANDLESTICK 2 0 2 2 +CANDLES 1 0 1 1 +CANDLE 3 0 3 3 +CANAL 1 0 1 1 +CAMPAIGNS 2 0 2 2 +CALMLY 2 0 2 2 +CALLS 1 0 1 1 +CALLING 2 0 2 2 +CALLETH 1 0 1 1 +CALLEST 1 0 1 1 +CALIPH 1 0 1 1 +CALCULATE 1 0 1 1 +CAIRO 2 0 2 2 +CAFE 1 0 1 1 +CAESARS 1 0 1 1 +CADET 1 0 1 1 +CABLE'S 1 0 1 1 +CABINET 2 0 2 2 +CABIN 4 0 4 4 +CABARET 1 0 1 1 +BUYING 2 0 2 2 +BUTTERFLIES 1 0 1 1 +BUTTER 5 0 5 5 +BUSTED 2 0 2 2 +BUSINESSES 1 0 1 1 +BUSHY 2 0 2 2 +BUSHES 1 0 1 1 +BURSTING 1 0 1 1 +BURNING 1 0 1 1 +BURNED 2 0 2 2 +BURN 1 0 1 1 +BURIED 7 0 7 7 +BURIAL 1 0 1 1 +BURGUNDY 1 0 1 1 +BURDEN 1 0 1 1 +BUNKER 1 0 1 1 +BUNDLES 2 0 2 2 +BUNDLED 1 0 1 1 +BUMS 1 0 1 1 +BULLOCK 1 0 1 1 +BULLET 2 0 2 2 +BUILDINGS 1 0 1 1 +BUILDING 3 0 3 3 +BUGLE 1 0 1 1 +BUGGY 2 0 2 2 +BUFF 1 0 1 1 +BUD 1 0 1 1 +BUCKLEY 1 0 1 1 +BUCK 1 0 1 1 +BUBBLES 1 0 1 1 +BRUTE 2 0 2 2 +BRUTALLY 1 0 1 1 +BRUTAL 1 0 1 1 +BRUSHED 2 0 2 2 +BRUISING 1 0 1 1 +BROTHERLY 1 0 1 1 +BROTHER 18 0 18 18 +BROTH 1 0 1 1 +BRONZE 1 0 1 1 +BROKER'S 1 0 1 1 +BROKEN 10 0 10 10 +BROKE 7 0 7 7 +BRITISH 1 0 1 1 +BRINGING 3 0 3 3 +BRINGETH 2 0 2 2 +BRING 12 0 12 12 +BRIM 1 0 1 1 +BRIGHT 5 0 5 5 +BRIGANDS 1 0 1 1 +BRIEF 3 0 3 3 +BRIDE 3 0 3 3 +BRICKS 1 0 1 1 +BRICK 1 0 1 1 +BREWING 1 0 1 1 +BRETHREN 3 0 3 3 +BRED 1 0 1 1 +BREATH 6 0 6 6 +BREASTS 1 0 1 1 +BREAKS 3 0 3 3 +BREAKING 3 0 3 3 +BREAKFAST 4 0 4 4 +BREAKERS 1 0 1 1 +BREAK 6 0 6 6 +BREAD 3 0 3 3 +BREACH 1 0 1 1 +BRAVELY 2 0 2 2 +BRAVE 6 0 6 6 +BRANDON 1 0 1 1 +BRANCHES 2 0 2 2 +BRANCH 3 0 3 3 +BRAG 1 0 1 1 +BRADFORD 1 0 1 1 +BRACKETS 1 0 1 1 +BRACELET 1 0 1 1 +BOXES 2 0 2 2 +BOWS 1 0 1 1 +BOWED 2 0 2 2 +BOW 3 0 3 3 +BOURGEOIS 1 0 1 1 +BOUQUET 2 0 2 2 +BOUNTY 1 0 1 1 +BOUND 5 0 5 5 +BOULEVARD 1 0 1 1 +BOSTON 3 0 3 3 +BOSOM 3 0 3 3 +BORROWED 1 0 1 1 +BORN 7 0 7 7 +BORED 1 0 1 1 +BORE 2 0 2 2 +BORDERS 1 0 1 1 +BORDER 1 0 1 1 +BOOTY 1 0 1 1 +BOOTS 2 0 2 2 +BOOT 1 0 1 1 +BOOKS 1 0 1 1 +BOOKLET 1 0 1 1 +BOMB 1 0 1 1 +BOLTS 1 0 1 1 +BOLTED 1 0 1 1 +BOLDER 1 0 1 1 +BOILING 3 0 3 3 +BOILED 2 0 2 2 +BODY 13 0 13 13 +BODILY 3 0 3 3 +BODIES 2 0 2 2 +BOAT'S 1 0 1 1 +BOAST 1 0 1 1 +BOARDING 1 0 1 1 +BLURTED 1 0 1 1 +BLUNTLY 1 0 1 1 +BLUNTED 1 0 1 1 +BLUBBERING 1 0 1 1 +BLOWS 1 0 1 1 +BLOWN 2 0 2 2 +BLOWING 2 0 2 2 +BLOSSOM 1 0 1 1 +BLOOM 1 0 1 1 +BLOOD 8 0 8 8 +BLIZZARD'S 1 0 1 1 +BLIZZARD 2 0 2 2 +BLINDNESS 1 0 1 1 +BLINDING 1 0 1 1 +BLINDED 1 0 1 1 +BLIND 5 0 5 5 +BLESSINGS 1 0 1 1 +BLESSING 2 0 2 2 +BLESSED 6 0 6 6 +BLESS 3 0 3 3 +BLEND 1 0 1 1 +BLEAK 1 0 1 1 +BLAZING 1 0 1 1 +BLANKLY 1 0 1 1 +BLANK 1 0 1 1 +BLAMING 1 0 1 1 +BLAMED 1 0 1 1 +BLAME 2 0 2 2 +BLADE 1 0 1 1 +BLACKSTONE 1 0 1 1 +BITE 1 0 1 1 +BISHOPS 1 0 1 1 +BISHOP 4 0 4 4 +BISCUIT 1 0 1 1 +BIRTHDAY 1 0 1 1 +BIRD 1 0 1 1 +BIRCH 1 0 1 1 +BILLS 2 0 2 2 +BILIOUS 1 0 1 1 +BIGGER 1 0 1 1 +BEYOND 7 0 7 7 +BEWILDERMENT 1 0 1 1 +BETWIXT 1 0 1 1 +BETWEEN 21 0 21 21 +BETOOK 1 0 1 1 +BETIDETH 1 0 1 1 +BETIDE 2 0 2 2 +BET 1 0 1 1 +BESS 1 0 1 1 +BESPAKE 1 0 1 1 +BESOUGHT 1 0 1 1 +BESIEGERS 2 0 2 2 +BESEECH 2 0 2 2 +BERNARDONE 1 0 1 1 +BERNARD 4 0 4 4 +BEQUEATH 2 0 2 2 +BENCH 3 0 3 3 +BELOW 2 0 2 2 +BELONG 1 0 1 1 +BELLY 3 0 3 3 +BELLS 4 0 4 4 +BELLIES 1 0 1 1 +BELIEVING 1 0 1 1 +BELIEVES 1 0 1 1 +BELIEVE 16 0 16 16 +BEINGS 3 0 3 3 +BEHOLDING 1 0 1 1 +BEHOLD 5 0 5 5 +BEHIND 16 0 16 16 +BEHAVED 3 0 3 3 +BEHALF 1 0 1 1 +BEGINNING 6 0 6 6 +BEGGAR 1 0 1 1 +BEFITTING 1 0 1 1 +BEFALLEN 1 0 1 1 +BEDROOM 1 0 1 1 +BECOMES 6 0 6 6 +BECOME 15 0 15 15 +BECAME 10 0 10 10 +BEAVER 1 0 1 1 +BEAUTY 4 0 4 4 +BEAUTIFULLY 1 0 1 1 +BEAUTIFUL 8 0 8 8 +BEATEN 2 0 2 2 +BEASTS 4 0 4 4 +BEAST 2 0 2 2 +BEARD 1 0 1 1 +BEAMS 2 0 2 2 +BEALE'S 1 0 1 1 +BEAD 1 0 1 1 +BEACON 2 0 2 2 +BEACH 2 0 2 2 +BAY 1 0 1 1 +BATTLE 2 0 2 2 +BATTERY 1 0 1 1 +BATON 1 0 1 1 +BATHING 1 0 1 1 +BATHED 1 0 1 1 +BASKING 1 0 1 1 +BASKETS 1 0 1 1 +BASIS 2 0 2 2 +BASIN 1 0 1 1 +BASED 2 0 2 2 +BASE 1 0 1 1 +BARRIER 1 0 1 1 +BARRICADES 1 0 1 1 +BARRED 1 0 1 1 +BARRACK 2 0 2 2 +BARONET 1 0 1 1 +BARKING 1 0 1 1 +BARIUM 1 0 1 1 +BARBAROUS 1 0 1 1 +BARBARITY 1 0 1 1 +BAPTIST 1 0 1 1 +BANQUET 3 0 3 3 +BANKER 1 0 1 1 +BANK 9 0 9 9 +BANISH 1 0 1 1 +BANDS 1 0 1 1 +BANDITS 1 0 1 1 +BANDIT 1 0 1 1 +BALSAM 1 0 1 1 +BALLOCK 1 0 1 1 +BAKING 1 0 1 1 +BAH 1 0 1 1 +BAGS 2 0 2 2 +BAGGY 1 0 1 1 +BAGGAGE 1 0 1 1 +BADLY 3 0 3 3 +BADGE 1 0 1 1 +BADE 1 0 1 1 +BADAWI 1 0 1 1 +BACON 1 0 1 1 +BACKS 1 0 1 1 +BACKING 1 0 1 1 +BACKGROUND 3 0 3 3 +BACK 51 0 51 51 +BACHELOR 1 0 1 1 +BABYLONIA 1 0 1 1 +AWOKE 1 0 1 1 +AWKWARDNESS 1 0 1 1 +AWKWARDLY 1 0 1 1 +AWE 1 0 1 1 +AWARE 2 0 2 2 +AWAKENING 2 0 2 2 +AWAKENED 1 0 1 1 +AWAITS 1 0 1 1 +AWAITED 1 0 1 1 +AWAIT 1 0 1 1 +AVOIDED 1 0 1 1 +AVERAGE 2 0 2 2 +AUTOMATICALLY 1 0 1 1 +AUTOCRACY 1 0 1 1 +AUTHORITY 10 0 10 10 +AUTHORITIES 1 0 1 1 +AUTHOR 1 0 1 1 +AUTHENTIC 1 0 1 1 +AUSTRIA 1 0 1 1 +AUSPICIOUS 3 0 3 3 +AUGMENTED 1 0 1 1 +AUDACIOUS 1 0 1 1 +ATTRACTIVE 3 0 3 3 +ATTRACTED 2 0 2 2 +ATTORNEY 1 0 1 1 +ATTENTIVELY 4 0 4 4 +ATTENDING 1 0 1 1 +ATTEMPTING 1 0 1 1 +ATTEMPT 2 0 2 2 +ATTAINED 1 0 1 1 +ATTACKS 3 0 3 3 +ATTACKED 1 0 1 1 +ATTACK 2 0 2 2 +ATTACHMENT 1 0 1 1 +ATTACHED 1 0 1 1 +ATMOSPHERE 1 0 1 1 +ATE 1 0 1 1 +ASUNDER 1 0 1 1 +ASTONISHMENT 2 0 2 2 +ASTONISHED 1 0 1 1 +ASSYRIAN 2 0 2 2 +ASSUREDLY 1 0 1 1 +ASSURE 8 0 8 8 +ASSURANCE 2 0 2 2 +ASSUME 1 0 1 1 +ASSOCIATIONS 1 0 1 1 +ASSOCIATES 1 0 1 1 +ASSISTED 1 0 1 1 +ASSISTANT 1 0 1 1 +ASSISTANCE 3 0 3 3 +ASSIST 3 0 3 3 +ASSEMBLY 3 0 3 3 +ASSEMBLED 2 0 2 2 +ASSEMBLAGE 1 0 1 1 +ASSAULT 1 0 1 1 +ASSASSINATED 1 0 1 1 +ASSAILED 1 0 1 1 +ASPECT 1 0 1 1 +ASKING 5 0 5 5 +ASIDE 5 0 5 5 +ASCENSION 1 0 1 1 +ARTS 1 0 1 1 +ARTISTS 4 0 4 4 +ARTICLES 1 0 1 1 +ARTFUL 1 0 1 1 +ARRIVES 1 0 1 1 +ARRIVAL 1 0 1 1 +ARRESTED 1 0 1 1 +ARRANGING 1 0 1 1 +ARRANGEMENTS 1 0 1 1 +ARRANGED 2 0 2 2 +AROSE 2 0 2 2 +ARMY 19 0 19 19 +ARMIES 2 0 2 2 +ARMED 4 0 4 4 +ARKADYEVITCH 1 0 1 1 +ARK 1 0 1 1 +ARISTOCRACY 1 0 1 1 +ARISE 1 0 1 1 +ARGUMENTS 3 0 3 3 +ARGUMENT 1 0 1 1 +ARGUED 1 0 1 1 +ARENA 1 0 1 1 +ARCHITECTURE 1 0 1 1 +ARCHBISHOPS 1 0 1 1 +ARABIC 1 0 1 1 +ARABIANS 1 0 1 1 +APTITUDE 1 0 1 1 +APRIL 1 0 1 1 +APPROVAL 1 0 1 1 +APPROACHING 1 0 1 1 +APPROACHED 3 0 3 3 +APPREHENSIONS 1 0 1 1 +APPOINTMENT 2 0 2 2 +APPLYING 1 0 1 1 +APPLY 3 0 3 3 +APPLAUSE 1 0 1 1 +APPETITE 2 0 2 2 +APPEARING 1 0 1 1 +APPEARANCE 7 0 7 7 +APPEALS 1 0 1 1 +APPEALING 1 0 1 1 +APPEALED 1 0 1 1 +APPEAL 1 0 1 1 +APPARITION 1 0 1 1 +APPARENT 1 0 1 1 +APOLOGY 1 0 1 1 +APERTURE 1 0 1 1 +APARTMENTS 3 0 3 3 +APARTMENT 1 0 1 1 +APART 4 0 4 4 +ANYWHERE 1 0 1 1 +ANYHOW 2 0 2 2 +ANYBODY 2 0 2 2 +ANXIOUS 3 0 3 3 +ANVILS 1 0 1 1 +ANTIQUARIAN'S 1 0 1 1 +ANTICIPATION 1 0 1 1 +ANTICIPATE 1 0 1 1 +ANTHONY 1 0 1 1 +ANSWERING 2 0 2 2 +ANSWER 15 0 15 15 +ANON 1 0 1 1 +ANNOYANCES 1 0 1 1 +ANNOYANCE 1 0 1 1 +ANNOUNCING 1 0 1 1 +ANNOUNCED 3 0 3 3 +ANNOUNCE 1 0 1 1 +ANNIHILATION 2 0 2 2 +ANNIHILATED 1 0 1 1 +ANIMATED 2 0 2 2 +ANIMALS 4 0 4 4 +ANDY 1 0 1 1 +ANCIENTS 1 0 1 1 +ANCIENT 2 0 2 2 +ANCHOR 1 0 1 1 +AMUSING 1 0 1 1 +AMPLY 1 0 1 1 +AMPLE 1 0 1 1 +AMOUNT 1 0 1 1 +AMONG 18 0 18 18 +AMMUNITION 1 0 1 1 +AMISS 1 0 1 1 +AMERICAN 1 0 1 1 +AMERICA 1 0 1 1 +AMENDS 1 0 1 1 +AMENDMENT 1 0 1 1 +AMENDED 1 0 1 1 +AMBITIOUS 1 0 1 1 +AMBITIONS 1 0 1 1 +AMBASSADOR 1 0 1 1 +ALTOGETHER 2 0 2 2 +ALTERED 1 0 1 1 +ALTAR 10 0 10 10 +ALOUD 2 0 2 2 +ALMOST 11 0 11 11 +ALMIGHTY 1 0 1 1 +ALLOWING 1 0 1 1 +ALLOWANCES 1 0 1 1 +ALLIANCE 1 0 1 1 +ALLAYS 1 0 1 1 +ALLAH'S 1 0 1 1 +ALLAH 9 0 9 9 +ALIMONY 2 0 2 2 +ALIKE 2 0 2 2 +ALI'S 1 0 1 1 +ALEX 1 0 1 1 +ALBERT'S 3 0 3 3 +ALAS 1 0 1 1 +ALARMS 1 0 1 1 +ALARMED 1 0 1 1 +ALARM 3 0 3 3 +AIM 4 0 4 4 +AILMENTS 2 0 2 2 +AHEAD 2 0 2 2 +AGREES 1 0 1 1 +AGREEMENT 2 0 2 2 +AGREED 6 0 6 6 +AGREEABLE 2 0 2 2 +AGREE 1 0 1 1 +AGONY 1 0 1 1 +AGITATOR 1 0 1 1 +AGITATION 1 0 1 1 +AGITATING 1 0 1 1 +AGILITY 1 0 1 1 +AGHAST 1 0 1 1 +AGGRESSIVENESS 1 0 1 1 +AGGRAVATIONS 1 0 1 1 +AGGRAVATED 1 0 1 1 +AGES 1 0 1 1 +AGED 2 0 2 2 +AGAINST 27 0 27 27 +AFTERNOON 6 0 6 6 +AFRICAN 1 0 1 1 +AFRAID 9 0 9 9 +AFFORD 2 0 2 2 +AFFIRMED 1 0 1 1 +AFFECTIONS 1 0 1 1 +AFFECTIONATELY 1 0 1 1 +AFFECTED 2 0 2 2 +AFFAIRS 2 0 2 2 +AFFAIR 2 0 2 2 +ADVISEDLY 1 0 1 1 +ADVISE 1 0 1 1 +ADVENT 1 0 1 1 +ADVANTAGES 2 0 2 2 +ADVANTAGE 4 0 4 4 +ADVANCING 1 0 1 1 +ADVANCES 2 0 2 2 +ADVANCED 1 0 1 1 +ADVANCE 5 0 5 5 +ADRIFT 1 0 1 1 +ADORNED 1 0 1 1 +ADORN 1 0 1 1 +ADMITTED 4 0 4 4 +ADMIT 1 0 1 1 +ADMISSION 1 0 1 1 +ADMIRATION 5 0 5 5 +ADMIRABLE 1 0 1 1 +ADMINISTRATION 3 0 3 3 +ADMINISTERED 1 0 1 1 +ADJACENT 1 0 1 1 +ADHERENT 1 0 1 1 +ADDRESSING 1 0 1 1 +ADDRESSED 1 0 1 1 +ADDRESS 3 0 3 3 +ADDITION 1 0 1 1 +ADDED 13 0 13 13 +ADAGE 1 0 1 1 +ACTORS 1 0 1 1 +ACTIVITIES 1 0 1 1 +ACTIONS 2 0 2 2 +ACTION 2 0 2 2 +ACTING 2 0 2 2 +ACTED 1 0 1 1 +ACROSS 8 0 8 8 +ACQUITTAL 1 0 1 1 +ACQUIT 1 0 1 1 +ACQUISITIVE 1 0 1 1 +ACQUAINTED 3 0 3 3 +ACQUAINTANCES 1 0 1 1 +ACQUAINTANCE 2 0 2 2 +ACKNOWLEDGMENT 1 0 1 1 +ACIDS 1 0 1 1 +ACID 4 0 4 4 +ACHIEVED 1 0 1 1 +ACHED 1 0 1 1 +ACE 1 0 1 1 +ACCUSTOMED 2 0 2 2 +ACCUSING 3 0 3 3 +ACCUSED 1 0 1 1 +ACCUSATION 4 0 4 4 +ACCURATE 1 0 1 1 +ACCOUNTS 2 0 2 2 +ACCOUNTED 1 0 1 1 +ACCORDINGLY 5 0 5 5 +ACCORDING 7 0 7 7 +ACCORDANCE 1 0 1 1 +ACCOMPLISHMENTS 1 0 1 1 +ACCOMPLISHED 1 0 1 1 +ACCOMPLICE 2 0 2 2 +ACCOMPANY 1 0 1 1 +ACCOMPANIED 3 0 3 3 +ACCOMMODATING 1 0 1 1 +ACCIDENTS 1 0 1 1 +ACCIDENT 1 0 1 1 +ACCESSION 1 0 1 1 +ACCESSIBLE 1 0 1 1 +ACCEPTED 4 0 4 4 +ACCEPTABLE 1 0 1 1 +ABYSSINIANS 2 0 2 2 +ABUSING 1 0 1 1 +ABUSES 2 0 2 2 +ABUSE 1 0 1 1 +ABUNDANTLY 1 0 1 1 +ABUNDANT 1 0 1 1 +ABSORBING 2 0 2 2 +ABSORBED 4 0 4 4 +ABSORB 1 0 1 1 +ABSOLVED 1 0 1 1 +ABSOLUTELY 3 0 3 3 +ABSOLUTE 1 0 1 1 +ABSENTED 1 0 1 1 +ABSENT 1 0 1 1 +ABSENCE 4 0 4 4 +ABRUPTLY 3 0 3 3 +ABOLISH 1 0 1 1 +ABODE 3 0 3 3 +ABNORMAL 1 0 1 1 +ABLE 11 0 11 11 +ABILITY 1 0 1 1 +ABILITIES 1 0 1 1 +ABIDE 1 0 1 1 +ABBOT 1 0 1 1 +ABACK 1 0 1 1 +AARON 1 0 1 1 diff --git a/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-08-58-48 b/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-08-58-48 new file mode 100644 index 0000000000000000000000000000000000000000..8a0028678b1e5ae5b48c7da737639fcf323258a9 --- /dev/null +++ b/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-08-58-48 @@ -0,0 +1,6 @@ +2023-02-12 08:58:48,826 INFO [decode.py:655] Decoding started +2023-02-12 08:58:48,827 INFO [decode.py:661] Device: cuda:0 +2023-02-12 08:58:48,855 INFO [decode.py:671] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'f8acb25-dirty', 'icefall-git-date': 'Thu Feb 9 12:58:59 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n03', 'IP address': '10.1.7.3'}, 'epoch': 30, 'iter': 0, 'avg': 9, 'use_averaged_model': True, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'greedy_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1/greedy_search'), 'suffix': 'epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500} +2023-02-12 08:58:48,856 INFO [decode.py:673] About to create model +2023-02-12 08:58:49,471 INFO [zipformer.py:402] At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-12 08:58:49,480 INFO [decode.py:744] Calculating the averaged model over epoch range from 21 (excluded) to 30 diff --git a/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-09-04-44 b/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-09-04-44 new file mode 100644 index 0000000000000000000000000000000000000000..8c4e75935e258d045280f0746f6e9701df426c91 --- /dev/null +++ b/log/greedy_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model-2023-02-12-09-04-44 @@ -0,0 +1,28 @@ +2023-02-12 09:04:44,043 INFO [decode.py:655] Decoding started +2023-02-12 09:04:44,044 INFO [decode.py:661] Device: cuda:0 +2023-02-12 09:04:44,046 INFO [decode.py:671] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'f8acb25-dirty', 'icefall-git-date': 'Thu Feb 9 12:58:59 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n03', 'IP address': '10.1.7.3'}, 'epoch': 30, 'iter': 0, 'avg': 9, 'use_averaged_model': True, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'greedy_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1/greedy_search'), 'suffix': 'epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500} +2023-02-12 09:04:44,046 INFO [decode.py:673] About to create model +2023-02-12 09:04:44,322 INFO [zipformer.py:402] At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-12 09:04:44,332 INFO [decode.py:744] Calculating the averaged model over epoch range from 21 (excluded) to 30 +2023-02-12 09:04:49,669 INFO [decode.py:778] Number of model parameters: 20697573 +2023-02-12 09:04:49,670 INFO [asr_datamodule.py:444] About to get test-clean cuts +2023-02-12 09:04:49,844 INFO [asr_datamodule.py:451] About to get test-other cuts +2023-02-12 09:04:53,359 INFO [decode.py:560] batch 0/?, cuts processed until now is 36 +2023-02-12 09:06:00,901 INFO [decode.py:560] batch 50/?, cuts processed until now is 2609 +2023-02-12 09:06:01,345 INFO [decode.py:576] The transcripts are stored in pruned_transducer_stateless7_streaming/exp/v1/greedy_search/recogs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt +2023-02-12 09:06:01,409 INFO [utils.py:538] [test-clean-greedy_search] %WER 3.94% [2072 / 52576, 243 ins, 178 del, 1651 sub ] +2023-02-12 09:06:01,657 INFO [decode.py:589] Wrote detailed error stats to pruned_transducer_stateless7_streaming/exp/v1/greedy_search/errs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt +2023-02-12 09:06:01,658 INFO [decode.py:605] +For test-clean, WER of different settings are: +greedy_search 3.94 best for test-clean + +2023-02-12 09:06:04,334 INFO [decode.py:560] batch 0/?, cuts processed until now is 43 +2023-02-12 09:07:03,165 INFO [decode.py:560] batch 50/?, cuts processed until now is 2939 +2023-02-12 09:07:03,305 INFO [decode.py:576] The transcripts are stored in pruned_transducer_stateless7_streaming/exp/v1/greedy_search/recogs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt +2023-02-12 09:07:03,376 INFO [utils.py:538] [test-other-greedy_search] %WER 9.79% [5125 / 52343, 496 ins, 537 del, 4092 sub ] +2023-02-12 09:07:03,533 INFO [decode.py:589] Wrote detailed error stats to pruned_transducer_stateless7_streaming/exp/v1/greedy_search/errs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt +2023-02-12 09:07:03,535 INFO [decode.py:605] +For test-other, WER of different settings are: +greedy_search 9.79 best for test-other + +2023-02-12 09:07:03,535 INFO [decode.py:809] Done! diff --git a/log/greedy_search/recogs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/recogs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..90a0d8a79de382b74d5b6782f4c9637e747c9336 --- /dev/null +++ b/log/greedy_search/recogs-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,5240 @@ +1089-134686-0000-1733: ref=['HE', 'HOPED', 'THERE', 'WOULD', 'BE', 'STEW', 'FOR', 'DINNER', 'TURNIPS', 'AND', 'CARROTS', 'AND', 'BRUISED', 'POTATOES', 'AND', 'FAT', 'MUTTON', 'PIECES', 'TO', 'BE', 'LADLED', 'OUT', 'IN', 'THICK', 'PEPPERED', 'FLOUR', 'FATTENED', 'SAUCE'] +1089-134686-0000-1733: hyp=['HE', 'HOPED', 'THERE', 'WOULD', 'BE', 'STEW', 'FOR', 'DINNER', 'TURNIPS', 'AND', 'CARROTS', 'AND', 'BRUISED', 'POTATOES', 'AND', 'FAT', 'MUTTON', 'PIECES', 'TO', 'BE', 'LADLED', 'OUT', 'IN', 'THICK', 'PEPPERED', 'FLOWER', 'FAT', 'AND', 'SAUCE'] +1089-134686-0001-1734: ref=['STUFF', 'IT', 'INTO', 'YOU', 'HIS', 'BELLY', 'COUNSELLED', 'HIM'] +1089-134686-0001-1734: hyp=['STUFF', 'IT', 'INTO', 'YOU', 'HIS', 'BELLY', 'COUNSELLED', 'HIM'] +1089-134686-0002-1735: ref=['AFTER', 'EARLY', 'NIGHTFALL', 'THE', 'YELLOW', 'LAMPS', 'WOULD', 'LIGHT', 'UP', 'HERE', 'AND', 'THERE', 'THE', 'SQUALID', 'QUARTER', 'OF', 'THE', 'BROTHELS'] +1089-134686-0002-1735: hyp=['AFTER', 'EARLY', 'NIGHTFALL', 'THE', 'YELLOW', 'LAMPS', 'WOULD', 'LIGHT', 'UP', 'HERE', 'AND', 'THERE', 'THE', 'SQUALID', 'QUARTER', 'OF', 'THE', 'BRAFFLELS'] +1089-134686-0003-1736: ref=['HELLO', 'BERTIE', 'ANY', 'GOOD', 'IN', 'YOUR', 'MIND'] +1089-134686-0003-1736: hyp=['HALLO', 'BERTIE', 'ANY', 'GOOD', 'IN', 'YOUR', 'MIND'] +1089-134686-0004-1737: ref=['NUMBER', 'TEN', 'FRESH', 'NELLY', 'IS', 'WAITING', 'ON', 'YOU', 'GOOD', 'NIGHT', 'HUSBAND'] +1089-134686-0004-1737: hyp=['NUMBER', 'TEN', 'FRESH', 'NELLIERS', 'WAITING', 'ON', 'YOU', 'GOOD', 'NIGHT', 'HUSBAND'] +1089-134686-0005-1738: ref=['THE', 'MUSIC', 'CAME', 'NEARER', 'AND', 'HE', 'RECALLED', 'THE', 'WORDS', 'THE', 'WORDS', 'OF', "SHELLEY'S", 'FRAGMENT', 'UPON', 'THE', 'MOON', 'WANDERING', 'COMPANIONLESS', 'PALE', 'FOR', 'WEARINESS'] +1089-134686-0005-1738: hyp=['THE', 'MUSIC', 'CAME', 'NEARER', 'AND', 'HE', 'RECALLED', 'THE', 'WORDS', 'THE', 'WORDS', 'OF', "SHELLEY'S", 'FRAGMENT', 'UPON', 'THE', 'MOON', 'WANDERING', 'COMPANIONLESS', 'PALE', 'FOR', 'WEARINESS'] +1089-134686-0006-1739: ref=['THE', 'DULL', 'LIGHT', 'FELL', 'MORE', 'FAINTLY', 'UPON', 'THE', 'PAGE', 'WHEREON', 'ANOTHER', 'EQUATION', 'BEGAN', 'TO', 'UNFOLD', 'ITSELF', 'SLOWLY', 'AND', 'TO', 'SPREAD', 'ABROAD', 'ITS', 'WIDENING', 'TAIL'] +1089-134686-0006-1739: hyp=['THE', 'DULL', 'LIGHT', 'FELL', 'MORE', 'FAINTLY', 'UPON', 'THE', 'PAGE', 'WHEREON', 'ANOTHER', 'EQUATION', 'BEGAN', 'TO', 'UNFOLD', 'ITSELF', 'SLOWLY', 'AND', 'TO', 'SPREAD', 'ABROAD', 'ITS', 'WIDENING', 'TAIL'] +1089-134686-0007-1740: ref=['A', 'COLD', 'LUCID', 'INDIFFERENCE', 'REIGNED', 'IN', 'HIS', 'SOUL'] +1089-134686-0007-1740: hyp=['A', 'COLD', 'LUCID', 'INDIFFERENCE', 'REIGNED', 'IN', 'HIS', 'SOUL'] +1089-134686-0008-1741: ref=['THE', 'CHAOS', 'IN', 'WHICH', 'HIS', 'ARDOUR', 'EXTINGUISHED', 'ITSELF', 'WAS', 'A', 'COLD', 'INDIFFERENT', 'KNOWLEDGE', 'OF', 'HIMSELF'] +1089-134686-0008-1741: hyp=['THE', 'CHAOS', 'IN', 'WHICH', 'HIS', 'ARDOUR', 'EXTINGUISHED', 'ITSELF', 'WAS', 'A', 'COLD', 'INDIFFERENT', 'KNOWLEDGE', 'OF', 'HIMSELF'] +1089-134686-0009-1742: ref=['AT', 'MOST', 'BY', 'AN', 'ALMS', 'GIVEN', 'TO', 'A', 'BEGGAR', 'WHOSE', 'BLESSING', 'HE', 'FLED', 'FROM', 'HE', 'MIGHT', 'HOPE', 'WEARILY', 'TO', 'WIN', 'FOR', 'HIMSELF', 'SOME', 'MEASURE', 'OF', 'ACTUAL', 'GRACE'] +1089-134686-0009-1742: hyp=['AT', 'MOST', 'BY', 'AN', 'ALMS', 'GIVEN', 'TO', 'A', 'BEGGAR', 'WHOSE', 'BLESSING', 'HE', 'FLED', 'FROM', 'HE', 'MIGHT', 'HOPE', 'WEARILY', 'TO', 'WIN', 'FOR', 'HIMSELF', 'SOME', 'MEASURE', 'OF', 'ACTUAL', 'GRACE'] +1089-134686-0010-1743: ref=['WELL', 'NOW', 'ENNIS', 'I', 'DECLARE', 'YOU', 'HAVE', 'A', 'HEAD', 'AND', 'SO', 'HAS', 'MY', 'STICK'] +1089-134686-0010-1743: hyp=['WELL', 'NOW', 'ENNIS', 'I', 'DECLARE', 'YOU', 'HAVE', 'A', 'HEAD', 'AND', 'SO', 'HAS', 'MY', 'STICK'] +1089-134686-0011-1744: ref=['ON', 'SATURDAY', 'MORNINGS', 'WHEN', 'THE', 'SODALITY', 'MET', 'IN', 'THE', 'CHAPEL', 'TO', 'RECITE', 'THE', 'LITTLE', 'OFFICE', 'HIS', 'PLACE', 'WAS', 'A', 'CUSHIONED', 'KNEELING', 'DESK', 'AT', 'THE', 'RIGHT', 'OF', 'THE', 'ALTAR', 'FROM', 'WHICH', 'HE', 'LED', 'HIS', 'WING', 'OF', 'BOYS', 'THROUGH', 'THE', 'RESPONSES'] +1089-134686-0011-1744: hyp=['ON', 'SATURDAY', 'MORNINGS', 'WHEN', 'THE', 'SODALITY', 'MET', 'IN', 'THE', 'CHAPEL', 'TO', 'RECITE', 'THE', 'LITTLE', 'OFFICE', 'HIS', 'PLACE', 'WAS', 'A', 'CUSHIONED', 'KNEELING', 'DESK', 'AT', 'THE', 'RIGHT', 'OF', 'THE', 'ALTAR', 'FROM', 'WHICH', 'HE', 'LED', 'HIS', 'WING', 'OF', 'BOYS', 'THROUGH', 'THE', 'RESPONSES'] +1089-134686-0012-1745: ref=['HER', 'EYES', 'SEEMED', 'TO', 'REGARD', 'HIM', 'WITH', 'MILD', 'PITY', 'HER', 'HOLINESS', 'A', 'STRANGE', 'LIGHT', 'GLOWING', 'FAINTLY', 'UPON', 'HER', 'FRAIL', 'FLESH', 'DID', 'NOT', 'HUMILIATE', 'THE', 'SINNER', 'WHO', 'APPROACHED', 'HER'] +1089-134686-0012-1745: hyp=['HER', 'EYES', 'SEEMED', 'TO', 'REGARD', 'HIM', 'WITH', 'MILD', 'PITY', 'HER', 'HOLINESS', 'A', 'STRANGE', 'LIGHT', 'GLOWING', 'FAINTLY', 'UPON', 'HER', 'FRAIL', 'FLESH', 'DID', 'NOT', 'HUMILIATE', 'THE', 'SINNER', 'WHO', 'APPROACHED', 'HER'] +1089-134686-0013-1746: ref=['IF', 'EVER', 'HE', 'WAS', 'IMPELLED', 'TO', 'CAST', 'SIN', 'FROM', 'HIM', 'AND', 'TO', 'REPENT', 'THE', 'IMPULSE', 'THAT', 'MOVED', 'HIM', 'WAS', 'THE', 'WISH', 'TO', 'BE', 'HER', 'KNIGHT'] +1089-134686-0013-1746: hyp=['IF', 'EVER', 'HE', 'WAS', 'IMPELLED', 'TO', 'CAST', 'SIN', 'FROM', 'HIM', 'AND', 'TO', 'REPENT', 'THE', 'IMPULSE', 'THAT', 'MOVED', 'HIM', 'WAS', 'THE', 'WISH', 'TO', 'BE', 'HER', 'KNIGHT'] +1089-134686-0014-1747: ref=['HE', 'TRIED', 'TO', 'THINK', 'HOW', 'IT', 'COULD', 'BE'] +1089-134686-0014-1747: hyp=['HE', 'TRIED', 'TO', 'THINK', 'HOW', 'IT', 'COULD', 'BE'] +1089-134686-0015-1748: ref=['BUT', 'THE', 'DUSK', 'DEEPENING', 'IN', 'THE', 'SCHOOLROOM', 'COVERED', 'OVER', 'HIS', 'THOUGHTS', 'THE', 'BELL', 'RANG'] +1089-134686-0015-1748: hyp=['BUT', 'THE', 'DUSK', 'DEEPENING', 'IN', 'THE', 'SCHOOLROOM', 'COVERED', 'OVER', 'HIS', 'THOUGHTS', 'THE', 'BELL', 'RANG'] +1089-134686-0016-1749: ref=['THEN', 'YOU', 'CAN', 'ASK', 'HIM', 'QUESTIONS', 'ON', 'THE', 'CATECHISM', 'DEDALUS'] +1089-134686-0016-1749: hyp=['THEN', 'YOU', 'CAN', 'ASK', 'HIM', 'QUESTIONS', 'ON', 'THE', 'CATECHISM', 'DEDALUS'] +1089-134686-0017-1750: ref=['STEPHEN', 'LEANING', 'BACK', 'AND', 'DRAWING', 'IDLY', 'ON', 'HIS', 'SCRIBBLER', 'LISTENED', 'TO', 'THE', 'TALK', 'ABOUT', 'HIM', 'WHICH', 'HERON', 'CHECKED', 'FROM', 'TIME', 'TO', 'TIME', 'BY', 'SAYING'] +1089-134686-0017-1750: hyp=['STEPHEN', 'LEANING', 'BACK', 'AND', 'DRAWING', 'IDLY', 'ON', 'HIS', 'SCRIBBLER', 'LISTENED', 'TO', 'THE', 'TALK', 'ABOUT', 'HIM', 'WHICH', 'HERON', 'CHECKED', 'FROM', 'TIME', 'TO', 'TIME', 'BY', 'SAYING'] +1089-134686-0018-1751: ref=['IT', 'WAS', 'STRANGE', 'TOO', 'THAT', 'HE', 'FOUND', 'AN', 'ARID', 'PLEASURE', 'IN', 'FOLLOWING', 'UP', 'TO', 'THE', 'END', 'THE', 'RIGID', 'LINES', 'OF', 'THE', 'DOCTRINES', 'OF', 'THE', 'CHURCH', 'AND', 'PENETRATING', 'INTO', 'OBSCURE', 'SILENCES', 'ONLY', 'TO', 'HEAR', 'AND', 'FEEL', 'THE', 'MORE', 'DEEPLY', 'HIS', 'OWN', 'CONDEMNATION'] +1089-134686-0018-1751: hyp=['IT', 'WAS', 'STRANGE', 'TOO', 'THAT', 'HE', 'FOUND', 'AN', 'ARID', 'PLEASURE', 'IN', 'FOLLOWING', 'UP', 'TO', 'THE', 'END', 'THE', 'RIGID', 'LINES', 'OF', 'THE', 'DOCTRINES', 'OF', 'THE', 'CHURCH', 'AND', 'PENETRATING', 'INTO', 'OBSCURE', 'SILENCES', 'ONLY', 'TO', 'HEAR', 'AND', 'FEEL', 'THE', 'MORE', 'DEEPLY', 'HIS', 'OWN', 'CONDEMNATION'] +1089-134686-0019-1752: ref=['THE', 'SENTENCE', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SAYS', 'THAT', 'HE', 'WHO', 'OFFENDS', 'AGAINST', 'ONE', 'COMMANDMENT', 'BECOMES', 'GUILTY', 'OF', 'ALL', 'HAD', 'SEEMED', 'TO', 'HIM', 'FIRST', 'A', 'SWOLLEN', 'PHRASE', 'UNTIL', 'HE', 'HAD', 'BEGUN', 'TO', 'GROPE', 'IN', 'THE', 'DARKNESS', 'OF', 'HIS', 'OWN', 'STATE'] +1089-134686-0019-1752: hyp=['THE', 'SENTENCE', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SAYS', 'THAT', 'HE', 'WHO', 'OFFENDS', 'AGAINST', 'ONE', 'COMMANDMENT', 'BECOMES', 'GUILTY', 'OF', 'ALL', 'HAD', 'SEEMED', 'TO', 'HIM', 'FIRST', 'A', 'SWOLLEN', 'PHRASE', 'UNTIL', 'HE', 'HAD', 'BEGUN', 'TO', 'GROPE', 'IN', 'THE', 'DARKNESS', 'OF', 'HIS', 'OWN', 'STATE'] +1089-134686-0020-1753: ref=['IF', 'A', 'MAN', 'HAD', 'STOLEN', 'A', 'POUND', 'IN', 'HIS', 'YOUTH', 'AND', 'HAD', 'USED', 'THAT', 'POUND', 'TO', 'AMASS', 'A', 'HUGE', 'FORTUNE', 'HOW', 'MUCH', 'WAS', 'HE', 'OBLIGED', 'TO', 'GIVE', 'BACK', 'THE', 'POUND', 'HE', 'HAD', 'STOLEN', 'ONLY', 'OR', 'THE', 'POUND', 'TOGETHER', 'WITH', 'THE', 'COMPOUND', 'INTEREST', 'ACCRUING', 'UPON', 'IT', 'OR', 'ALL', 'HIS', 'HUGE', 'FORTUNE'] +1089-134686-0020-1753: hyp=['IF', 'A', 'MAN', 'HAD', 'STOLEN', 'A', 'POUND', 'IN', 'HIS', 'YOUTH', 'AND', 'HAD', 'USED', 'THAT', 'POUND', 'TO', 'AMASS', 'A', 'HUGE', 'FORTUNE', 'HOW', 'MUCH', 'WAS', 'HE', 'OBLIGED', 'TO', 'GIVE', 'BACK', 'THE', 'POUND', 'HE', 'HAD', 'STOLEN', 'ONLY', 'WERE', 'THE', 'POUND', 'TOGETHER', 'WITH', 'THE', 'COMPOUND', 'INTEREST', 'ACCRUING', 'UPON', 'IT', 'OR', 'ALL', 'HIS', 'HUGE', 'FORTUNE'] +1089-134686-0021-1754: ref=['IF', 'A', 'LAYMAN', 'IN', 'GIVING', 'BAPTISM', 'POUR', 'THE', 'WATER', 'BEFORE', 'SAYING', 'THE', 'WORDS', 'IS', 'THE', 'CHILD', 'BAPTIZED'] +1089-134686-0021-1754: hyp=['IF', 'A', 'LAYMAN', 'IN', 'GIVING', 'BAPTISM', 'POUR', 'THE', 'WATER', 'BEFORE', 'SAYING', 'THE', 'WORDS', 'IS', 'THE', 'CHILD', 'BAPTIZED'] +1089-134686-0022-1755: ref=['HOW', 'COMES', 'IT', 'THAT', 'WHILE', 'THE', 'FIRST', 'BEATITUDE', 'PROMISES', 'THE', 'KINGDOM', 'OF', 'HEAVEN', 'TO', 'THE', 'POOR', 'OF', 'HEART', 'THE', 'SECOND', 'BEATITUDE', 'PROMISES', 'ALSO', 'TO', 'THE', 'MEEK', 'THAT', 'THEY', 'SHALL', 'POSSESS', 'THE', 'LAND'] +1089-134686-0022-1755: hyp=['HOW', 'COMES', 'IT', 'THAT', 'WHILE', 'THE', 'FIRST', 'BE', 'ATTITUDE', 'PROMISES', 'THE', 'KINGDOM', 'OF', 'HEAVEN', 'TO', 'THE', 'POOR', 'OF', 'HEART', 'THE', 'SECOND', 'BE', 'ATTITUDE', 'PROMISES', 'ALSO', 'TO', 'THE', 'MEEK', 'THAT', 'THEY', 'SHALL', 'POSSESS', 'THE', 'LAND'] +1089-134686-0023-1756: ref=['WHY', 'WAS', 'THE', 'SACRAMENT', 'OF', 'THE', 'EUCHARIST', 'INSTITUTED', 'UNDER', 'THE', 'TWO', 'SPECIES', 'OF', 'BREAD', 'AND', 'WINE', 'IF', 'JESUS', 'CHRIST', 'BE', 'PRESENT', 'BODY', 'AND', 'BLOOD', 'SOUL', 'AND', 'DIVINITY', 'IN', 'THE', 'BREAD', 'ALONE', 'AND', 'IN', 'THE', 'WINE', 'ALONE'] +1089-134686-0023-1756: hyp=['WHY', 'WAS', 'THE', 'SACRAMENT', 'OF', 'THE', 'EUCHARIST', 'INSTITUTED', 'UNDER', 'THE', 'TWO', 'SPECIES', 'OF', 'BREAD', 'AND', 'WINE', 'IF', 'JESUS', 'CHRIST', 'BE', 'PRESENT', 'BODY', 'AND', 'BLOOD', 'SOUL', 'AND', 'DIVINITY', 'IN', 'THE', 'BREAD', 'ALONE', 'AND', 'IN', 'THE', 'WINE', 'ALONE'] +1089-134686-0024-1757: ref=['IF', 'THE', 'WINE', 'CHANGE', 'INTO', 'VINEGAR', 'AND', 'THE', 'HOST', 'CRUMBLE', 'INTO', 'CORRUPTION', 'AFTER', 'THEY', 'HAVE', 'BEEN', 'CONSECRATED', 'IS', 'JESUS', 'CHRIST', 'STILL', 'PRESENT', 'UNDER', 'THEIR', 'SPECIES', 'AS', 'GOD', 'AND', 'AS', 'MAN'] +1089-134686-0024-1757: hyp=['IF', 'THE', 'WINE', 'CHANGE', 'INTO', 'VINEGAR', 'AND', 'THE', 'HOST', 'CRUMBLE', 'INTO', 'CORRUPTION', 'AFTER', 'THEY', 'HAVE', 'BEEN', 'CONSECRATED', 'IS', 'JESUS', 'CHRIST', 'STILL', 'PRESENT', 'UNDER', 'THEIR', 'SPECIES', 'AS', 'GOD', 'AND', 'AS', 'MAN'] +1089-134686-0025-1758: ref=['A', 'GENTLE', 'KICK', 'FROM', 'THE', 'TALL', 'BOY', 'IN', 'THE', 'BENCH', 'BEHIND', 'URGED', 'STEPHEN', 'TO', 'ASK', 'A', 'DIFFICULT', 'QUESTION'] +1089-134686-0025-1758: hyp=['A', 'GENTLE', 'KICK', 'FROM', 'THE', 'TALL', 'BOY', 'IN', 'THE', 'BENCH', 'BEHIND', 'URGED', 'STEPHEN', 'TO', 'ASK', 'A', 'DIFFICULT', 'QUESTION'] +1089-134686-0026-1759: ref=['THE', 'RECTOR', 'DID', 'NOT', 'ASK', 'FOR', 'A', 'CATECHISM', 'TO', 'HEAR', 'THE', 'LESSON', 'FROM'] +1089-134686-0026-1759: hyp=['THE', 'RECTOR', 'DID', 'NOT', 'ASK', 'FOR', 'A', 'CATECHISM', 'TO', 'HEAR', 'THE', 'LESSON', 'FROM'] +1089-134686-0027-1760: ref=['HE', 'CLASPED', 'HIS', 'HANDS', 'ON', 'THE', 'DESK', 'AND', 'SAID'] +1089-134686-0027-1760: hyp=['HE', 'CLASPED', 'HIS', 'HANDS', 'ON', 'THE', 'DESK', 'AND', 'SAID'] +1089-134686-0028-1761: ref=['THE', 'RETREAT', 'WILL', 'BEGIN', 'ON', 'WEDNESDAY', 'AFTERNOON', 'IN', 'HONOUR', 'OF', 'SAINT', 'FRANCIS', 'XAVIER', 'WHOSE', 'FEAST', 'DAY', 'IS', 'SATURDAY'] +1089-134686-0028-1761: hyp=['THE', 'RETREAT', 'WILL', 'BEGIN', 'ON', 'WEDNESDAY', 'AFTERNOON', 'IN', 'HONOR', 'OF', 'SAINT', 'FRANCIS', 'SAVIER', 'WHOSE', 'FEAST', 'DAY', 'IS', 'SATURDAY'] +1089-134686-0029-1762: ref=['ON', 'FRIDAY', 'CONFESSION', 'WILL', 'BE', 'HEARD', 'ALL', 'THE', 'AFTERNOON', 'AFTER', 'BEADS'] +1089-134686-0029-1762: hyp=['ON', 'FRIDAY', 'CONFESSION', 'WILL', 'BE', 'HEARD', 'ALL', 'THE', 'AFTERNOON', 'AFTER', 'BEADS'] +1089-134686-0030-1763: ref=['BEWARE', 'OF', 'MAKING', 'THAT', 'MISTAKE'] +1089-134686-0030-1763: hyp=['BEWARE', 'OF', 'MAKING', 'THAT', 'MISTAKE'] +1089-134686-0031-1764: ref=["STEPHEN'S", 'HEART', 'BEGAN', 'SLOWLY', 'TO', 'FOLD', 'AND', 'FADE', 'WITH', 'FEAR', 'LIKE', 'A', 'WITHERING', 'FLOWER'] +1089-134686-0031-1764: hyp=["STEPHEN'S", 'HEART', 'BEGAN', 'SLOWLY', 'TO', 'FOLD', 'AND', 'FADE', 'WITH', 'FEAR', 'LIKE', 'A', 'WITHERING', 'FLOWER'] +1089-134686-0032-1765: ref=['HE', 'IS', 'CALLED', 'AS', 'YOU', 'KNOW', 'THE', 'APOSTLE', 'OF', 'THE', 'INDIES'] +1089-134686-0032-1765: hyp=['HE', 'IS', 'CALLED', 'AS', 'YOU', 'KNOW', 'THE', 'APOSTLE', 'OF', 'THE', 'INDIES'] +1089-134686-0033-1766: ref=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'XAVIER'] +1089-134686-0033-1766: hyp=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'ZEVIER'] +1089-134686-0034-1767: ref=['THE', 'RECTOR', 'PAUSED', 'AND', 'THEN', 'SHAKING', 'HIS', 'CLASPED', 'HANDS', 'BEFORE', 'HIM', 'WENT', 'ON'] +1089-134686-0034-1767: hyp=['THE', 'RECTOR', 'PAUSED', 'AND', 'THEN', 'SHAKING', 'HIS', 'CLASPED', 'HANDS', 'BEFORE', 'HIM', 'WENT', 'ON'] +1089-134686-0035-1768: ref=['HE', 'HAD', 'THE', 'FAITH', 'IN', 'HIM', 'THAT', 'MOVES', 'MOUNTAINS'] +1089-134686-0035-1768: hyp=['HE', 'HAD', 'THE', 'FAITH', 'IN', 'HIM', 'THAT', 'MOVES', 'MOUNTAINS'] +1089-134686-0036-1769: ref=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'XAVIER'] +1089-134686-0036-1769: hyp=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'ZEVIER'] +1089-134686-0037-1770: ref=['IN', 'THE', 'SILENCE', 'THEIR', 'DARK', 'FIRE', 'KINDLED', 'THE', 'DUSK', 'INTO', 'A', 'TAWNY', 'GLOW'] +1089-134686-0037-1770: hyp=['IN', 'THE', 'SILENCE', 'THEIR', 'DARK', 'FIRE', 'KINDLED', 'THE', 'DUSK', 'INTO', 'A', 'TAWNY', 'GLOW'] +1089-134691-0000-1707: ref=['HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0000-1707: hyp=['HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0001-1708: ref=['FOR', 'A', 'FULL', 'HOUR', 'HE', 'HAD', 'PACED', 'UP', 'AND', 'DOWN', 'WAITING', 'BUT', 'HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0001-1708: hyp=['FOR', 'A', 'FULL', 'HOUR', 'HE', 'HAD', 'PACED', 'UP', 'AND', 'DOWN', 'WAITING', 'BUT', 'HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0002-1709: ref=['HE', 'SET', 'OFF', 'ABRUPTLY', 'FOR', 'THE', 'BULL', 'WALKING', 'RAPIDLY', 'LEST', 'HIS', "FATHER'S", 'SHRILL', 'WHISTLE', 'MIGHT', 'CALL', 'HIM', 'BACK', 'AND', 'IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HAD', 'ROUNDED', 'THE', 'CURVE', 'AT', 'THE', 'POLICE', 'BARRACK', 'AND', 'WAS', 'SAFE'] +1089-134691-0002-1709: hyp=['HE', 'SET', 'OFF', 'ABRUPTLY', 'FOR', 'THE', 'BULL', 'WALKING', 'RAPIDLY', 'LEST', 'HIS', "FATHER'S", 'SHRILL', 'WHISTLE', 'MIGHT', 'CALL', 'HIM', 'BACK', 'AND', 'IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HAD', 'ROUNDED', 'THE', 'CURVE', 'AT', 'THE', 'POLICE', 'BARRACK', 'AND', 'WAS', 'SAFE'] +1089-134691-0003-1710: ref=['THE', 'UNIVERSITY'] +1089-134691-0003-1710: hyp=['THE', 'UNIVERSITY'] +1089-134691-0004-1711: ref=['PRIDE', 'AFTER', 'SATISFACTION', 'UPLIFTED', 'HIM', 'LIKE', 'LONG', 'SLOW', 'WAVES'] +1089-134691-0004-1711: hyp=['PRIDE', 'AFTER', 'SATISFACTION', 'UPLIFTED', 'HIM', 'LIKE', 'LONG', 'SLOW', 'WAVES'] +1089-134691-0005-1712: ref=['WHOSE', 'FEET', 'ARE', 'AS', 'THE', 'FEET', 'OF', 'HARTS', 'AND', 'UNDERNEATH', 'THE', 'EVERLASTING', 'ARMS'] +1089-134691-0005-1712: hyp=['WHOSE', 'FEET', 'ARE', 'AS', 'THE', 'FEET', 'OF', 'HEARTS', 'AND', 'UNDERNEATH', 'THE', 'EVERLASTING', 'ARMS'] +1089-134691-0006-1713: ref=['THE', 'PRIDE', 'OF', 'THAT', 'DIM', 'IMAGE', 'BROUGHT', 'BACK', 'TO', 'HIS', 'MIND', 'THE', 'DIGNITY', 'OF', 'THE', 'OFFICE', 'HE', 'HAD', 'REFUSED'] +1089-134691-0006-1713: hyp=['THE', 'PRIDE', 'OF', 'THAT', 'DIM', 'IMAGE', 'BROUGHT', 'BACK', 'TO', 'HIS', 'MIND', 'THE', 'DIGNITY', 'OF', 'THE', 'OFFICE', 'HE', 'HAD', 'REFUSED'] +1089-134691-0007-1714: ref=['SOON', 'THE', 'WHOLE', 'BRIDGE', 'WAS', 'TREMBLING', 'AND', 'RESOUNDING'] +1089-134691-0007-1714: hyp=['SOON', 'THE', 'WHOLE', 'BRIDGE', 'WAS', 'TREMBLING', 'AND', 'RESOUNDING'] +1089-134691-0008-1715: ref=['THE', 'UNCOUTH', 'FACES', 'PASSED', 'HIM', 'TWO', 'BY', 'TWO', 'STAINED', 'YELLOW', 'OR', 'RED', 'OR', 'LIVID', 'BY', 'THE', 'SEA', 'AND', 'AS', 'HE', 'STROVE', 'TO', 'LOOK', 'AT', 'THEM', 'WITH', 'EASE', 'AND', 'INDIFFERENCE', 'A', 'FAINT', 'STAIN', 'OF', 'PERSONAL', 'SHAME', 'AND', 'COMMISERATION', 'ROSE', 'TO', 'HIS', 'OWN', 'FACE'] +1089-134691-0008-1715: hyp=['THE', 'UNCOUTH', 'FACES', 'PASSED', 'HIM', 'TWO', 'BY', 'TWO', 'STAINED', 'YELLOW', 'OR', 'RED', 'OR', 'LIVID', 'BY', 'THE', 'SEA', 'AND', 'AS', 'HE', 'STROVE', 'TO', 'LOOK', 'AT', 'THEM', 'WITH', 'EASE', 'AND', 'INDIFFERENCE', 'A', 'FAINT', 'STAIN', 'OF', 'PERSONAL', 'SHAME', 'AND', 'COMMISERATION', 'ROSE', 'TO', 'HIS', 'OWN', 'FACE'] +1089-134691-0009-1716: ref=['ANGRY', 'WITH', 'HIMSELF', 'HE', 'TRIED', 'TO', 'HIDE', 'HIS', 'FACE', 'FROM', 'THEIR', 'EYES', 'BY', 'GAZING', 'DOWN', 'SIDEWAYS', 'INTO', 'THE', 'SHALLOW', 'SWIRLING', 'WATER', 'UNDER', 'THE', 'BRIDGE', 'BUT', 'HE', 'STILL', 'SAW', 'A', 'REFLECTION', 'THEREIN', 'OF', 'THEIR', 'TOP', 'HEAVY', 'SILK', 'HATS', 'AND', 'HUMBLE', 'TAPE', 'LIKE', 'COLLARS', 'AND', 'LOOSELY', 'HANGING', 'CLERICAL', 'CLOTHES', 'BROTHER', 'HICKEY'] +1089-134691-0009-1716: hyp=['ANGRY', 'WITH', 'HIMSELF', 'HE', 'TRIED', 'TO', 'HIDE', 'HIS', 'FACE', 'FROM', 'THEIR', 'EYES', 'BY', 'GAZING', 'DOWN', 'SIDEWAYS', 'INTO', 'THE', 'SHALLOW', 'SWIRLING', 'WATER', 'UNDER', 'THE', 'BRIDGE', 'BUT', 'HE', 'STILL', 'SAW', 'A', 'REFLECTION', 'THEREIN', 'OF', 'THEIR', 'TOP', 'HEAVY', 'SILK', 'HATS', 'AND', 'HUMBLE', 'TAPE', 'LIKE', 'COLLARS', 'AND', 'LOOSELY', 'HANGING', 'CLERICAL', 'CLOTHES', 'BROTHER', 'HICKY'] +1089-134691-0010-1717: ref=['BROTHER', 'MAC', 'ARDLE', 'BROTHER', 'KEOGH'] +1089-134691-0010-1717: hyp=['BROTHER', 'MICARDLE', 'BROTHER', 'KIOPH'] +1089-134691-0011-1718: ref=['THEIR', 'PIETY', 'WOULD', 'BE', 'LIKE', 'THEIR', 'NAMES', 'LIKE', 'THEIR', 'FACES', 'LIKE', 'THEIR', 'CLOTHES', 'AND', 'IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'THEIR', 'HUMBLE', 'AND', 'CONTRITE', 'HEARTS', 'IT', 'MIGHT', 'BE', 'PAID', 'A', 'FAR', 'RICHER', 'TRIBUTE', 'OF', 'DEVOTION', 'THAN', 'HIS', 'HAD', 'EVER', 'BEEN', 'A', 'GIFT', 'TENFOLD', 'MORE', 'ACCEPTABLE', 'THAN', 'HIS', 'ELABORATE', 'ADORATION'] +1089-134691-0011-1718: hyp=['THEIR', 'PIETY', 'WOULD', 'BE', 'LIKE', 'THEIR', 'NAMES', 'LIKE', 'THEIR', 'FACES', 'LIKE', 'THEIR', 'CLOTHES', 'AND', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'THEIR', 'HUMBLE', 'AND', 'CONTRITE', 'HEARTS', 'IT', 'MIGHT', 'BE', 'PAID', 'A', 'FAR', 'RICHER', 'TRIBUTE', 'OF', 'DEVOTION', 'THAN', 'HIS', 'HAD', 'EVER', 'BEEN', 'A', 'GIFT', 'TENFOLD', 'MORE', 'ACCEPTABLE', 'THAN', 'HIS', 'ELABORATE', 'ADORATION'] +1089-134691-0012-1719: ref=['IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'MOVE', 'HIMSELF', 'TO', 'BE', 'GENEROUS', 'TOWARDS', 'THEM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'IF', 'HE', 'EVER', 'CAME', 'TO', 'THEIR', 'GATES', 'STRIPPED', 'OF', 'HIS', 'PRIDE', 'BEATEN', 'AND', 'IN', "BEGGAR'S", 'WEEDS', 'THAT', 'THEY', 'WOULD', 'BE', 'GENEROUS', 'TOWARDS', 'HIM', 'LOVING', 'HIM', 'AS', 'THEMSELVES'] +1089-134691-0012-1719: hyp=['IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'MOVE', 'HIMSELF', 'TO', 'BE', 'GENEROUS', 'TOWARDS', 'THEM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'IF', 'HE', 'EVER', 'CAME', 'TO', 'THEIR', 'GATES', 'STRIPPED', 'OF', 'HIS', 'PRIDE', 'BEATEN', 'AND', 'IN', 'BEGGARS', 'WEEDS', 'THAT', 'THEY', 'WOULD', 'BE', 'GENEROUS', 'TOWARDS', 'HIM', 'LOVING', 'HIM', 'AS', 'THEMSELVES'] +1089-134691-0013-1720: ref=['IDLE', 'AND', 'EMBITTERING', 'FINALLY', 'TO', 'ARGUE', 'AGAINST', 'HIS', 'OWN', 'DISPASSIONATE', 'CERTITUDE', 'THAT', 'THE', 'COMMANDMENT', 'OF', 'LOVE', 'BADE', 'US', 'NOT', 'TO', 'LOVE', 'OUR', 'NEIGHBOUR', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'AMOUNT', 'AND', 'INTENSITY', 'OF', 'LOVE', 'BUT', 'TO', 'LOVE', 'HIM', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'KIND', 'OF', 'LOVE'] +1089-134691-0013-1720: hyp=['IDLE', 'AND', 'EMBITTERING', 'FINALLY', 'TO', 'ARGUE', 'AGAINST', 'HIS', 'OWN', 'DISPASSIONATE', 'CERTITUDE', 'THAT', 'THE', 'COMMANDMENT', 'OF', 'LOVE', 'BADE', 'US', 'NOT', 'TO', 'LOVE', 'OUR', 'NEIGHBOUR', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'AMOUNT', 'AND', 'INTENSITY', 'OF', 'LOVE', 'BUT', 'TO', 'LOVE', 'HIM', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'KIND', 'OF', 'LOVE'] +1089-134691-0014-1721: ref=['THE', 'PHRASE', 'AND', 'THE', 'DAY', 'AND', 'THE', 'SCENE', 'HARMONIZED', 'IN', 'A', 'CHORD'] +1089-134691-0014-1721: hyp=['THE', 'PHRASE', 'AND', 'THE', 'DAY', 'AND', 'THE', 'SCENE', 'HARMONIZED', 'IN', 'ACCORD'] +1089-134691-0015-1722: ref=['WORDS', 'WAS', 'IT', 'THEIR', 'COLOURS'] +1089-134691-0015-1722: hyp=['WORDS', 'WAS', 'IT', 'THEIR', 'COLORS'] +1089-134691-0016-1723: ref=['THEY', 'WERE', 'VOYAGING', 'ACROSS', 'THE', 'DESERTS', 'OF', 'THE', 'SKY', 'A', 'HOST', 'OF', 'NOMADS', 'ON', 'THE', 'MARCH', 'VOYAGING', 'HIGH', 'OVER', 'IRELAND', 'WESTWARD', 'BOUND'] +1089-134691-0016-1723: hyp=['THEY', 'WERE', 'VOYAGING', 'ACROSS', 'THE', 'DESERTS', 'OF', 'THE', 'SKY', 'A', 'HOST', 'OF', 'NOMADS', 'ON', 'THE', 'MARCH', 'VOYAGING', 'HIGH', 'OVER', 'IRELAND', 'WESTWARD', 'BOUND'] +1089-134691-0017-1724: ref=['THE', 'EUROPE', 'THEY', 'HAD', 'COME', 'FROM', 'LAY', 'OUT', 'THERE', 'BEYOND', 'THE', 'IRISH', 'SEA', 'EUROPE', 'OF', 'STRANGE', 'TONGUES', 'AND', 'VALLEYED', 'AND', 'WOODBEGIRT', 'AND', 'CITADELLED', 'AND', 'OF', 'ENTRENCHED', 'AND', 'MARSHALLED', 'RACES'] +1089-134691-0017-1724: hyp=['THE', 'EUROPE', 'THEY', 'HAD', 'COME', 'FROM', 'LAY', 'OUT', 'THERE', 'BEYOND', 'THE', 'IRISH', 'SEA', 'EUROPE', 'OF', 'STRANGE', 'TONGUES', 'AND', 'VALLED', 'AND', 'WOULD', 'BE', 'GIRT', 'AND', 'CITADEL', 'AND', 'OF', 'ENTRENCHED', 'AND', 'MARSHALLED', 'RACES'] +1089-134691-0018-1725: ref=['AGAIN', 'AGAIN'] +1089-134691-0018-1725: hyp=['AGAIN', 'AGAIN'] +1089-134691-0019-1726: ref=['A', 'VOICE', 'FROM', 'BEYOND', 'THE', 'WORLD', 'WAS', 'CALLING'] +1089-134691-0019-1726: hyp=['A', 'VOICE', 'FROM', 'BEYOND', 'THE', 'WORLD', 'WAS', 'CALLING'] +1089-134691-0020-1727: ref=['HELLO', 'STEPHANOS', 'HERE', 'COMES', 'THE', 'DEDALUS'] +1089-134691-0020-1727: hyp=['HALLO', 'STUFFANOS', 'HERE', 'COMES', 'THE', 'DAEDALUS'] +1089-134691-0021-1728: ref=['THEIR', 'DIVING', 'STONE', 'POISED', 'ON', 'ITS', 'RUDE', 'SUPPORTS', 'AND', 'ROCKING', 'UNDER', 'THEIR', 'PLUNGES', 'AND', 'THE', 'ROUGH', 'HEWN', 'STONES', 'OF', 'THE', 'SLOPING', 'BREAKWATER', 'OVER', 'WHICH', 'THEY', 'SCRAMBLED', 'IN', 'THEIR', 'HORSEPLAY', 'GLEAMED', 'WITH', 'COLD', 'WET', 'LUSTRE'] +1089-134691-0021-1728: hyp=['THEIR', 'DIVING', 'STONE', 'POISED', 'ON', 'ITS', 'RUDE', 'SUPPORTS', 'AND', 'ROCKING', 'UNDER', 'THEIR', 'PLUNGES', 'AND', 'THE', 'ROUGH', 'HEWN', 'STONES', 'OF', 'THE', 'SLOPING', 'BRAKE', 'WATER', 'OVER', 'WHICH', 'THEY', 'SCRAMBLED', 'IN', 'THEIR', 'HORSE', 'PLAY', 'GLEAMED', 'WITH', 'COLD', 'WET', 'LUSTRE'] +1089-134691-0022-1729: ref=['HE', 'STOOD', 'STILL', 'IN', 'DEFERENCE', 'TO', 'THEIR', 'CALLS', 'AND', 'PARRIED', 'THEIR', 'BANTER', 'WITH', 'EASY', 'WORDS'] +1089-134691-0022-1729: hyp=['HE', 'STOOD', 'STILL', 'IN', 'DEFERENCE', 'TO', 'THEIR', 'CALLS', 'AND', 'PARRIED', 'THEIR', 'BANTER', 'WITH', 'EASY', 'WORDS'] +1089-134691-0023-1730: ref=['IT', 'WAS', 'A', 'PAIN', 'TO', 'SEE', 'THEM', 'AND', 'A', 'SWORD', 'LIKE', 'PAIN', 'TO', 'SEE', 'THE', 'SIGNS', 'OF', 'ADOLESCENCE', 'THAT', 'MADE', 'REPELLENT', 'THEIR', 'PITIABLE', 'NAKEDNESS'] +1089-134691-0023-1730: hyp=['IT', 'WAS', 'A', 'PAIN', 'TO', 'SEE', 'THEM', 'AND', 'A', 'SWORD', 'LIKE', 'PAIN', 'TO', 'SEE', 'THE', 'SIGNS', 'OF', 'ADOLESCENCE', 'THAT', 'MADE', 'REPELLENT', 'THEIR', 'PITIABLE', 'NAKEDNESS'] +1089-134691-0024-1731: ref=['STEPHANOS', 'DEDALOS'] +1089-134691-0024-1731: hyp=["STEPHANO'S", 'DEAD', 'LOSS'] +1089-134691-0025-1732: ref=['A', 'MOMENT', 'BEFORE', 'THE', 'GHOST', 'OF', 'THE', 'ANCIENT', 'KINGDOM', 'OF', 'THE', 'DANES', 'HAD', 'LOOKED', 'FORTH', 'THROUGH', 'THE', 'VESTURE', 'OF', 'THE', 'HAZEWRAPPED', 'CITY'] +1089-134691-0025-1732: hyp=['A', 'MOMENT', 'BEFORE', 'THE', 'GHOST', 'OF', 'THE', 'ANCIENT', 'KINGDOM', 'OF', 'THE', 'DANES', 'HAD', 'LOOKED', 'FORTH', 'THROUGH', 'THE', 'VESTURE', 'OF', 'THE', 'HAYES', 'WRAPPED', 'CITY'] +1188-133604-0000-1771: ref=['YOU', 'WILL', 'FIND', 'ME', 'CONTINUALLY', 'SPEAKING', 'OF', 'FOUR', 'MEN', 'TITIAN', 'HOLBEIN', 'TURNER', 'AND', 'TINTORET', 'IN', 'ALMOST', 'THE', 'SAME', 'TERMS'] +1188-133604-0000-1771: hyp=['YOU', 'WILL', 'FIND', 'ME', 'CONTINUALLY', 'SPEAKING', 'OF', 'FOUR', 'MEN', 'TITIAN', 'HOLBINE', 'TURNER', 'AND', 'TINCTARETTE', 'IN', 'ALMOST', 'THE', 'SAME', 'TERMS'] +1188-133604-0001-1772: ref=['THEY', 'UNITE', 'EVERY', 'QUALITY', 'AND', 'SOMETIMES', 'YOU', 'WILL', 'FIND', 'ME', 'REFERRING', 'TO', 'THEM', 'AS', 'COLORISTS', 'SOMETIMES', 'AS', 'CHIAROSCURISTS'] +1188-133604-0001-1772: hyp=['THE', 'UNITE', 'EVERY', 'QUALITY', 'AND', 'SOMETIMES', 'YOU', 'WILL', 'FIND', 'ME', 'REFERRING', 'TO', 'THEM', 'AS', 'COLORISTS', 'SOMETIMES', 'AS', 'KIARASCURISTS'] +1188-133604-0002-1773: ref=['BY', 'BEING', 'STUDIOUS', 'OF', 'COLOR', 'THEY', 'ARE', 'STUDIOUS', 'OF', 'DIVISION', 'AND', 'WHILE', 'THE', 'CHIAROSCURIST', 'DEVOTES', 'HIMSELF', 'TO', 'THE', 'REPRESENTATION', 'OF', 'DEGREES', 'OF', 'FORCE', 'IN', 'ONE', 'THING', 'UNSEPARATED', 'LIGHT', 'THE', 'COLORISTS', 'HAVE', 'FOR', 'THEIR', 'FUNCTION', 'THE', 'ATTAINMENT', 'OF', 'BEAUTY', 'BY', 'ARRANGEMENT', 'OF', 'THE', 'DIVISIONS', 'OF', 'LIGHT'] +1188-133604-0002-1773: hyp=['BY', 'BEING', 'STUDIOUS', 'OF', 'COLOR', 'THEY', 'ARE', 'STUDIOUS', 'OF', 'DIVISION', 'AND', 'WHILE', 'THE', 'CURE', 'SCURUS', 'DEVOTES', 'HIMSELF', 'TO', 'THE', 'REPRESENTATION', 'OF', 'DEGREES', 'OF', 'FORCE', 'IN', 'ONE', 'THING', 'ON', 'SEPARATED', 'LIGHT', 'THE', 'COLORISTS', 'HAVE', 'FOR', 'THEIR', 'FUNCTION', 'THE', 'ATTAINMENT', 'OF', 'BEAUTY', 'BY', 'ARRANGEMENT', 'OF', 'THE', 'DIVISIONS', 'OF', 'LIGHT'] +1188-133604-0003-1774: ref=['MY', 'FIRST', 'AND', 'PRINCIPAL', 'REASON', 'WAS', 'THAT', 'THEY', 'ENFORCED', 'BEYOND', 'ALL', 'RESISTANCE', 'ON', 'ANY', 'STUDENT', 'WHO', 'MIGHT', 'ATTEMPT', 'TO', 'COPY', 'THEM', 'THIS', 'METHOD', 'OF', 'LAYING', 'PORTIONS', 'OF', 'DISTINCT', 'HUE', 'SIDE', 'BY', 'SIDE'] +1188-133604-0003-1774: hyp=['MY', 'FIRST', 'AND', 'PRINCIPAL', 'REASON', 'WAS', 'THAT', 'THEY', 'ENFORCED', 'BEYOND', 'ALL', 'RESISTANCE', 'ON', 'ANY', 'STUDENT', 'WHO', 'MIGHT', 'ATTEMPT', 'TO', 'COPY', 'THEM', 'THIS', 'METHOD', 'OF', 'LAYING', 'PORTIONS', 'OF', 'DISTINCT', 'HUE', 'SIDE', 'BY', 'SIDE'] +1188-133604-0004-1775: ref=['SOME', 'OF', 'THE', 'TOUCHES', 'INDEED', 'WHEN', 'THE', 'TINT', 'HAS', 'BEEN', 'MIXED', 'WITH', 'MUCH', 'WATER', 'HAVE', 'BEEN', 'LAID', 'IN', 'LITTLE', 'DROPS', 'OR', 'PONDS', 'SO', 'THAT', 'THE', 'PIGMENT', 'MIGHT', 'CRYSTALLIZE', 'HARD', 'AT', 'THE', 'EDGE'] +1188-133604-0004-1775: hyp=['SOME', 'OF', 'THE', 'TOUCHES', 'INDEED', 'WHEN', 'THE', 'TINT', 'HAS', 'BEEN', 'MIXED', 'WITH', 'MUCH', 'WATER', 'HAVE', 'BEEN', 'LAID', 'IN', 'LITTLE', 'DROPS', 'OR', 'PONDS', 'SO', 'THAT', 'THE', 'PIGMENT', 'MIGHT', 'CRYSTALLIZE', 'HARD', 'AT', 'THE', 'EDGE'] +1188-133604-0005-1776: ref=['IT', 'IS', 'THE', 'HEAD', 'OF', 'A', 'PARROT', 'WITH', 'A', 'LITTLE', 'FLOWER', 'IN', 'HIS', 'BEAK', 'FROM', 'A', 'PICTURE', 'OF', "CARPACCIO'S", 'ONE', 'OF', 'HIS', 'SERIES', 'OF', 'THE', 'LIFE', 'OF', 'SAINT', 'GEORGE'] +1188-133604-0005-1776: hyp=['IT', 'IS', 'THE', 'HEAD', 'OF', 'A', 'PARROT', 'WITH', 'A', 'LITTLE', 'FLOWER', 'IN', 'HIS', 'BEAK', 'FROM', 'A', 'PICTURE', 'OF', "CARPATCHIO'S", 'ONE', 'OF', 'HIS', 'SERIES', 'OF', 'THE', 'LIFE', 'OF', 'SAINT', 'GEORGE'] +1188-133604-0006-1777: ref=['THEN', 'HE', 'COMES', 'TO', 'THE', 'BEAK', 'OF', 'IT'] +1188-133604-0006-1777: hyp=['THEN', 'HE', 'COMES', 'TO', 'THE', 'BEAK', 'OF', 'IT'] +1188-133604-0007-1778: ref=['THE', 'BROWN', 'GROUND', 'BENEATH', 'IS', 'LEFT', 'FOR', 'THE', 'MOST', 'PART', 'ONE', 'TOUCH', 'OF', 'BLACK', 'IS', 'PUT', 'FOR', 'THE', 'HOLLOW', 'TWO', 'DELICATE', 'LINES', 'OF', 'DARK', 'GRAY', 'DEFINE', 'THE', 'OUTER', 'CURVE', 'AND', 'ONE', 'LITTLE', 'QUIVERING', 'TOUCH', 'OF', 'WHITE', 'DRAWS', 'THE', 'INNER', 'EDGE', 'OF', 'THE', 'MANDIBLE'] +1188-133604-0007-1778: hyp=['THE', 'BROWN', 'GROUND', 'BENEATH', 'IS', 'LEFT', 'FOR', 'THE', 'MOST', 'PART', 'ONE', 'TOUCH', 'OF', 'BLACK', 'IS', 'PUT', 'FOR', 'THE', 'HOLLOW', 'TOO', 'DELICATE', 'LINES', 'OF', 'DARK', 'GREY', 'TO', 'FIND', 'THE', 'OUTER', 'CURVE', 'AND', 'ONE', 'LITTLE', 'QUIVERING', 'TOUCH', 'OF', 'WHITE', 'DRAWS', 'THE', 'INNER', 'EDGE', 'OF', 'THE', 'MANDIBLE'] +1188-133604-0008-1779: ref=['FOR', 'BELIEVE', 'ME', 'THE', 'FINAL', 'PHILOSOPHY', 'OF', 'ART', 'CAN', 'ONLY', 'RATIFY', 'THEIR', 'OPINION', 'THAT', 'THE', 'BEAUTY', 'OF', 'A', 'COCK', 'ROBIN', 'IS', 'TO', 'BE', 'RED', 'AND', 'OF', 'A', 'GRASS', 'PLOT', 'TO', 'BE', 'GREEN', 'AND', 'THE', 'BEST', 'SKILL', 'OF', 'ART', 'IS', 'IN', 'INSTANTLY', 'SEIZING', 'ON', 'THE', 'MANIFOLD', 'DELICIOUSNESS', 'OF', 'LIGHT', 'WHICH', 'YOU', 'CAN', 'ONLY', 'SEIZE', 'BY', 'PRECISION', 'OF', 'INSTANTANEOUS', 'TOUCH'] +1188-133604-0008-1779: hyp=['FOR', 'BELIEVE', 'ME', 'THE', 'FINAL', 'PHILOSOPHY', 'OF', 'ART', 'CAN', 'ONLY', 'RATIFY', 'THEIR', 'OPINION', 'THAT', 'THE', 'BEAUTY', 'OF', 'A', 'COCK', 'ROBIN', 'IS', 'TO', 'BE', 'READ', 'AND', 'OF', 'A', 'GRASS', 'PLOT', 'TO', 'BE', 'GREEN', 'AND', 'THE', 'BEST', 'SKILL', 'OF', 'ART', 'IS', 'AN', 'INSTANTLY', 'SEIZING', 'ON', 'THE', 'MANIFOLD', 'DELICIOUSNESS', 'OF', 'LIGHT', 'WHICH', 'YOU', 'CAN', 'ONLY', 'SEIZE', 'BY', 'PRECISION', 'OF', 'INSTANTANEOUS', 'TOUCH'] +1188-133604-0009-1780: ref=['NOW', 'YOU', 'WILL', 'SEE', 'IN', 'THESE', 'STUDIES', 'THAT', 'THE', 'MOMENT', 'THE', 'WHITE', 'IS', 'INCLOSED', 'PROPERLY', 'AND', 'HARMONIZED', 'WITH', 'THE', 'OTHER', 'HUES', 'IT', 'BECOMES', 'SOMEHOW', 'MORE', 'PRECIOUS', 'AND', 'PEARLY', 'THAN', 'THE', 'WHITE', 'PAPER', 'AND', 'THAT', 'I', 'AM', 'NOT', 'AFRAID', 'TO', 'LEAVE', 'A', 'WHOLE', 'FIELD', 'OF', 'UNTREATED', 'WHITE', 'PAPER', 'ALL', 'ROUND', 'IT', 'BEING', 'SURE', 'THAT', 'EVEN', 'THE', 'LITTLE', 'DIAMONDS', 'IN', 'THE', 'ROUND', 'WINDOW', 'WILL', 'TELL', 'AS', 'JEWELS', 'IF', 'THEY', 'ARE', 'GRADATED', 'JUSTLY'] +1188-133604-0009-1780: hyp=['NOW', 'YOU', 'WILL', 'SEE', 'IN', 'THESE', 'STUDIES', 'THAT', 'THE', 'MOMENT', 'THE', 'WIGHT', 'IS', 'ENCLOSED', 'PROPERLY', 'AND', 'HARMONIZE', 'WITH', 'THE', 'OTHER', 'HUES', 'IT', 'BECOMES', 'SOMEHOW', 'MORE', 'PRECIOUS', 'AND', 'PEARLY', 'THAN', 'THE', 'WHITE', 'PAPER', 'AND', 'THAT', 'I', 'AM', 'NOT', 'AFRAID', 'TO', 'LEAVE', 'A', 'WHOLE', 'FIELD', 'OF', 'UNTREATED', 'WHITE', 'PAPER', 'ALL', 'ROUND', 'IT', 'BEING', 'SURE', 'THAT', 'EVEN', 'THE', 'LITTLE', 'DIAMONDS', 'IN', 'THE', 'ROUND', 'WINDOW', 'WILL', 'TELL', 'AS', 'JEWELS', 'IF', 'THEY', 'ARE', 'GRADATED', 'JUSTLY'] +1188-133604-0010-1781: ref=['BUT', 'IN', 'THIS', 'VIGNETTE', 'COPIED', 'FROM', 'TURNER', 'YOU', 'HAVE', 'THE', 'TWO', 'PRINCIPLES', 'BROUGHT', 'OUT', 'PERFECTLY'] +1188-133604-0010-1781: hyp=['BUT', 'IN', 'THIS', 'VINEY', 'COPIED', 'FROM', 'TURNER', 'YOU', 'HAVE', 'THE', 'TWO', 'PRINCIPLES', 'BROUGHT', 'OUT', 'PERFECTLY'] +1188-133604-0011-1782: ref=['THEY', 'ARE', 'BEYOND', 'ALL', 'OTHER', 'WORKS', 'THAT', 'I', 'KNOW', 'EXISTING', 'DEPENDENT', 'FOR', 'THEIR', 'EFFECT', 'ON', 'LOW', 'SUBDUED', 'TONES', 'THEIR', 'FAVORITE', 'CHOICE', 'IN', 'TIME', 'OF', 'DAY', 'BEING', 'EITHER', 'DAWN', 'OR', 'TWILIGHT', 'AND', 'EVEN', 'THEIR', 'BRIGHTEST', 'SUNSETS', 'PRODUCED', 'CHIEFLY', 'OUT', 'OF', 'GRAY', 'PAPER'] +1188-133604-0011-1782: hyp=['THEY', 'ARE', 'BEYOND', 'ALL', 'OTHER', 'WORKS', 'THAN', 'I', 'KNOW', 'EXISTING', 'DEPENDENT', 'FOR', 'THEIR', 'EFFECT', 'ON', 'LOW', 'SUBDUED', 'TONES', 'THEIR', 'FAVORITE', 'CHOICE', 'IN', 'TIME', 'OF', 'DAY', 'BEING', 'EITHER', 'DAWN', 'OR', 'TWILIGHT', 'AND', 'EVEN', 'THEIR', 'BRIGHTEST', 'SUNSETS', 'PRODUCED', 'CHIEFLY', 'OUT', 'OF', 'GRAY', 'PAPER'] +1188-133604-0012-1783: ref=['IT', 'MAY', 'BE', 'THAT', 'A', 'GREAT', 'COLORIST', 'WILL', 'USE', 'HIS', 'UTMOST', 'FORCE', 'OF', 'COLOR', 'AS', 'A', 'SINGER', 'HIS', 'FULL', 'POWER', 'OF', 'VOICE', 'BUT', 'LOUD', 'OR', 'LOW', 'THE', 'VIRTUE', 'IS', 'IN', 'BOTH', 'CASES', 'ALWAYS', 'IN', 'REFINEMENT', 'NEVER', 'IN', 'LOUDNESS'] +1188-133604-0012-1783: hyp=['IT', 'MAY', 'BE', 'THAT', 'A', 'GREAT', 'COLOR', 'LIST', 'WILL', 'USE', 'HIS', 'UTMOST', 'FORCE', 'OF', 'COLOR', 'AS', 'A', 'SINGER', 'HIS', 'FULL', 'POWER', 'OF', 'VOICE', 'BUT', 'LOUD', 'OR', 'LOW', 'THE', 'VIRTUE', 'IS', 'IN', 'BOTH', 'CASES', 'ALWAYS', 'IN', 'REFINEMENT', 'NEVER', 'IN', 'LOUDNESS'] +1188-133604-0013-1784: ref=['IT', 'MUST', 'REMEMBER', 'BE', 'ONE', 'OR', 'THE', 'OTHER'] +1188-133604-0013-1784: hyp=['IT', 'MUST', 'REMEMBER', 'BE', 'ONE', 'OR', 'THE', 'OTHER'] +1188-133604-0014-1785: ref=['DO', 'NOT', 'THEREFORE', 'THINK', 'THAT', 'THE', 'GOTHIC', 'SCHOOL', 'IS', 'AN', 'EASY', 'ONE'] +1188-133604-0014-1785: hyp=['DO', 'NOT', 'THEREFORE', 'THINK', 'THAT', 'THE', 'GOTHIC', 'SCHOOLS', 'AN', 'EASY', 'ONE'] +1188-133604-0015-1786: ref=['THE', 'LAW', 'OF', 'THAT', 'SCHOOL', 'IS', 'THAT', 'EVERYTHING', 'SHALL', 'BE', 'SEEN', 'CLEARLY', 'OR', 'AT', 'LEAST', 'ONLY', 'IN', 'SUCH', 'MIST', 'OR', 'FAINTNESS', 'AS', 'SHALL', 'BE', 'DELIGHTFUL', 'AND', 'I', 'HAVE', 'NO', 'DOUBT', 'THAT', 'THE', 'BEST', 'INTRODUCTION', 'TO', 'IT', 'WOULD', 'BE', 'THE', 'ELEMENTARY', 'PRACTICE', 'OF', 'PAINTING', 'EVERY', 'STUDY', 'ON', 'A', 'GOLDEN', 'GROUND'] +1188-133604-0015-1786: hyp=['THE', 'LAW', 'OF', 'THAT', 'SCHOOL', 'IS', 'THAT', 'EVERYTHING', 'SHALL', 'BE', 'SEEN', 'CLEARLY', 'OR', 'AT', 'LEAST', 'ONLY', 'IN', 'SUCH', 'MIST', 'OR', 'FAINTNESS', 'AS', 'SHALL', 'BE', 'DELIGHTFUL', 'AND', 'I', 'HAVE', 'NO', 'DOUBT', 'THAT', 'THE', 'BEST', 'INTRODUCTION', 'TO', 'IT', 'WOULD', 'BE', 'THE', 'ELEMENTARY', 'PRACTICE', 'OF', 'PAINTING', 'EVERY', 'STUDY', 'ON', 'A', 'GOLDEN', 'GROUND'] +1188-133604-0016-1787: ref=['THIS', 'AT', 'ONCE', 'COMPELS', 'YOU', 'TO', 'UNDERSTAND', 'THAT', 'THE', 'WORK', 'IS', 'TO', 'BE', 'IMAGINATIVE', 'AND', 'DECORATIVE', 'THAT', 'IT', 'REPRESENTS', 'BEAUTIFUL', 'THINGS', 'IN', 'THE', 'CLEAREST', 'WAY', 'BUT', 'NOT', 'UNDER', 'EXISTING', 'CONDITIONS', 'AND', 'THAT', 'IN', 'FACT', 'YOU', 'ARE', 'PRODUCING', "JEWELER'S", 'WORK', 'RATHER', 'THAN', 'PICTURES'] +1188-133604-0016-1787: hyp=['THIS', 'AT', 'ONCE', 'COMPELS', 'YOU', 'TO', 'UNDERSTAND', 'THAT', 'THE', 'WORK', 'IS', 'TO', 'BE', 'IMAGINATIVE', 'AND', 'DECORATIVE', 'THAT', 'IT', 'REPRESENTS', 'BEAUTIFUL', 'THINGS', 'IN', 'THE', 'CLEAREST', 'WAY', 'BUT', 'NOT', 'UNDER', 'EXISTING', 'CONDITIONS', 'AND', 'THAT', 'IN', 'FACT', 'YOU', 'ARE', 'PRODUCING', "JEWELLER'S", 'WORK', 'RATHER', 'THAN', 'PICTURES'] +1188-133604-0017-1788: ref=['THAT', 'A', 'STYLE', 'IS', 'RESTRAINED', 'OR', 'SEVERE', 'DOES', 'NOT', 'MEAN', 'THAT', 'IT', 'IS', 'ALSO', 'ERRONEOUS'] +1188-133604-0017-1788: hyp=['THAT', 'A', 'STYLE', 'IS', 'RESTRAINED', 'OR', 'SEVERE', 'DOES', 'NOT', 'MEAN', 'THAT', 'IT', 'IS', 'ALSO', 'ERRONEOUS'] +1188-133604-0018-1789: ref=['IN', 'ALL', 'EARLY', 'GOTHIC', 'ART', 'INDEED', 'YOU', 'WILL', 'FIND', 'FAILURE', 'OF', 'THIS', 'KIND', 'ESPECIALLY', 'DISTORTION', 'AND', 'RIGIDITY', 'WHICH', 'ARE', 'IN', 'MANY', 'RESPECTS', 'PAINFULLY', 'TO', 'BE', 'COMPARED', 'WITH', 'THE', 'SPLENDID', 'REPOSE', 'OF', 'CLASSIC', 'ART'] +1188-133604-0018-1789: hyp=['IN', 'ALL', 'EARLY', 'GOTHIC', 'ART', 'INDEED', 'YOU', 'WILL', 'FIND', 'FAILURE', 'OF', 'THIS', 'KIND', 'ESPECIALLY', 'DISTORTION', 'AND', 'RIGIDITY', 'WHICH', 'ARE', 'IN', 'MANY', 'RESPECTS', 'PAINFULLY', 'TO', 'BE', 'COMPARED', 'WITH', 'THE', 'SPLENDID', 'REPOSE', 'OF', 'CLASSIC', 'ART'] +1188-133604-0019-1790: ref=['THE', 'LARGE', 'LETTER', 'CONTAINS', 'INDEED', 'ENTIRELY', 'FEEBLE', 'AND', 'ILL', 'DRAWN', 'FIGURES', 'THAT', 'IS', 'MERELY', 'CHILDISH', 'AND', 'FAILING', 'WORK', 'OF', 'AN', 'INFERIOR', 'HAND', 'IT', 'IS', 'NOT', 'CHARACTERISTIC', 'OF', 'GOTHIC', 'OR', 'ANY', 'OTHER', 'SCHOOL'] +1188-133604-0019-1790: hyp=['THE', 'LARGE', 'LETTER', 'CONTAINS', 'INDEED', 'ENTIRELY', 'FEEBLE', 'AND', 'ILL', 'DRAWN', 'FIGURES', 'THAT', 'IS', 'MERELY', 'CHILDISH', 'AND', 'FAILING', 'WORK', 'OF', 'AN', 'INFERIOR', 'HAND', 'IT', 'IS', 'NOT', 'CHARACTERISTIC', 'OF', 'GOTHIC', 'OR', 'ANY', 'OTHER', 'SCHOOL'] +1188-133604-0020-1791: ref=['BUT', 'OBSERVE', 'YOU', 'CAN', 'ONLY', 'DO', 'THIS', 'ON', 'ONE', 'CONDITION', 'THAT', 'OF', 'STRIVING', 'ALSO', 'TO', 'CREATE', 'IN', 'REALITY', 'THE', 'BEAUTY', 'WHICH', 'YOU', 'SEEK', 'IN', 'IMAGINATION'] +1188-133604-0020-1791: hyp=['BUT', 'OBSERVE', 'YOU', 'CAN', 'ONLY', 'DO', 'THIS', 'ON', 'ONE', 'CONDITION', 'THAT', 'OF', 'STRIVING', 'ALSO', 'TO', 'CREATE', 'IN', 'REALITY', 'THE', 'BEAUTY', 'WHICH', 'YOU', 'SEEK', 'IN', 'IMAGINATION'] +1188-133604-0021-1792: ref=['IT', 'WILL', 'BE', 'WHOLLY', 'IMPOSSIBLE', 'FOR', 'YOU', 'TO', 'RETAIN', 'THE', 'TRANQUILLITY', 'OF', 'TEMPER', 'AND', 'FELICITY', 'OF', 'FAITH', 'NECESSARY', 'FOR', 'NOBLE', 'PURIST', 'PAINTING', 'UNLESS', 'YOU', 'ARE', 'ACTIVELY', 'ENGAGED', 'IN', 'PROMOTING', 'THE', 'FELICITY', 'AND', 'PEACE', 'OF', 'PRACTICAL', 'LIFE'] +1188-133604-0021-1792: hyp=['IT', 'WILL', 'BE', 'WHOLLY', 'IMPOSSIBLE', 'FOR', 'YOU', 'TO', 'RETAIN', 'THE', 'TRANQUILLITY', 'OF', 'TEMPER', 'AND', 'FELICITY', 'OF', 'FAITH', 'NECESSARY', 'FOR', 'NOBLE', 'PUREST', 'PAINTING', 'UNLESS', 'YOU', 'ARE', 'ACTIVELY', 'ENGAGED', 'IN', 'PROMOTING', 'THE', 'FELICITY', 'AND', 'PEACE', 'OF', 'PRACTICAL', 'LIFE'] +1188-133604-0022-1793: ref=['YOU', 'MUST', 'LOOK', 'AT', 'HIM', 'IN', 'THE', 'FACE', 'FIGHT', 'HIM', 'CONQUER', 'HIM', 'WITH', 'WHAT', 'SCATHE', 'YOU', 'MAY', 'YOU', 'NEED', 'NOT', 'THINK', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'HIM'] +1188-133604-0022-1793: hyp=['YOU', 'MUST', 'LOOK', 'AT', 'HIM', 'IN', 'THE', 'FACE', 'FIGHT', 'HIM', 'CONQUER', 'HIM', 'WITH', 'WHAT', 'SCATH', 'YOU', 'MAY', 'YOU', 'NEED', 'NOT', 'THINK', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'HIM'] +1188-133604-0023-1794: ref=['THE', 'COLORIST', 'SAYS', 'FIRST', 'OF', 'ALL', 'AS', 'MY', 'DELICIOUS', 'PAROQUET', 'WAS', 'RUBY', 'SO', 'THIS', 'NASTY', 'VIPER', 'SHALL', 'BE', 'BLACK', 'AND', 'THEN', 'IS', 'THE', 'QUESTION', 'CAN', 'I', 'ROUND', 'HIM', 'OFF', 'EVEN', 'THOUGH', 'HE', 'IS', 'BLACK', 'AND', 'MAKE', 'HIM', 'SLIMY', 'AND', 'YET', 'SPRINGY', 'AND', 'CLOSE', 'DOWN', 'CLOTTED', 'LIKE', 'A', 'POOL', 'OF', 'BLACK', 'BLOOD', 'ON', 'THE', 'EARTH', 'ALL', 'THE', 'SAME'] +1188-133604-0023-1794: hyp=['THE', 'CHOLERAIST', 'SAYS', 'FIRST', 'OF', 'ALL', 'AS', 'MY', 'DELICIOUS', 'PERICE', 'WAS', 'RUBY', 'SO', 'THIS', 'NASTY', 'VIPER', 'SHALL', 'BE', 'BLACK', 'AND', 'THEN', 'IS', 'THE', 'QUESTION', 'CAN', 'I', 'ROUND', 'HIM', 'OFF', 'EVEN', 'THOUGH', 'HE', 'IS', 'BLACK', 'AND', 'MAKE', 'HIM', 'SLIMY', 'AND', 'YET', 'SPRINGY', 'AND', 'CLOSE', 'DOWN', 'CLOTTED', 'LIKE', 'A', 'POOL', 'OF', 'BLACK', 'BLOOD', 'ON', 'THE', 'EARTH', 'ALL', 'THE', 'SAME'] +1188-133604-0024-1795: ref=['NOTHING', 'WILL', 'BE', 'MORE', 'PRECIOUS', 'TO', 'YOU', 'I', 'THINK', 'IN', 'THE', 'PRACTICAL', 'STUDY', 'OF', 'ART', 'THAN', 'THE', 'CONVICTION', 'WHICH', 'WILL', 'FORCE', 'ITSELF', 'ON', 'YOU', 'MORE', 'AND', 'MORE', 'EVERY', 'HOUR', 'OF', 'THE', 'WAY', 'ALL', 'THINGS', 'ARE', 'BOUND', 'TOGETHER', 'LITTLE', 'AND', 'GREAT', 'IN', 'SPIRIT', 'AND', 'IN', 'MATTER'] +1188-133604-0024-1795: hyp=['NOTHING', 'WILL', 'BE', 'MORE', 'PRECIOUS', 'TO', 'YOU', 'I', 'THINK', 'IN', 'THE', 'PRACTICAL', 'STUDY', 'OF', 'ART', 'THAN', 'THE', 'CONVICTION', 'WHICH', 'WILL', 'FORCE', 'ITSELF', 'ON', 'YOU', 'MORE', 'AND', 'MORE', 'EVERY', 'HOUR', 'OF', 'THE', 'WAY', 'ALL', 'THINGS', 'ARE', 'BOUND', 'TOGETHER', 'LITTLE', 'AND', 'GREAT', 'IN', 'SPIRIT', 'AND', 'IN', 'MATTER'] +1188-133604-0025-1796: ref=['YOU', 'KNOW', 'I', 'HAVE', 'JUST', 'BEEN', 'TELLING', 'YOU', 'HOW', 'THIS', 'SCHOOL', 'OF', 'MATERIALISM', 'AND', 'CLAY', 'INVOLVED', 'ITSELF', 'AT', 'LAST', 'IN', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0025-1796: hyp=['YOU', 'KNOW', "I'VE", 'JUST', 'BEEN', 'TELLING', 'YOU', 'HOW', 'THIS', 'SCHOOL', 'OF', 'MATERIALISM', 'IN', 'CLAY', 'INVOLVED', 'ITSELF', 'AT', 'LAST', 'IN', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0026-1797: ref=['HERE', 'IS', 'AN', 'EQUALLY', 'TYPICAL', 'GREEK', 'SCHOOL', 'LANDSCAPE', 'BY', 'WILSON', 'LOST', 'WHOLLY', 'IN', 'GOLDEN', 'MIST', 'THE', 'TREES', 'SO', 'SLIGHTLY', 'DRAWN', 'THAT', 'YOU', "DON'T", 'KNOW', 'IF', 'THEY', 'ARE', 'TREES', 'OR', 'TOWERS', 'AND', 'NO', 'CARE', 'FOR', 'COLOR', 'WHATEVER', 'PERFECTLY', 'DECEPTIVE', 'AND', 'MARVELOUS', 'EFFECT', 'OF', 'SUNSHINE', 'THROUGH', 'THE', 'MIST', 'APOLLO', 'AND', 'THE', 'PYTHON'] +1188-133604-0026-1797: hyp=['HERE', 'IS', 'AN', 'EQUALLY', 'TYPICAL', 'GREEK', 'SCHOOL', 'LANDSCAPE', 'BY', 'WILSON', 'LOST', 'WHOLLY', 'IN', 'GOLDEN', 'MIST', 'THE', 'TREES', 'SO', 'SLIGHTLY', 'DRAWN', 'THAT', 'YOU', "DON'T", 'KNOW', 'IF', 'THEY', 'ARE', 'TREES', 'OR', 'TOWERS', 'AND', 'NO', 'CARE', 'FOR', 'COLOR', 'WHATSOEVER', 'PERFECTLY', 'DECEPTIVE', 'AND', 'MARVELLOUS', 'EFFECT', 'OF', 'SUNSHINE', 'THROUGH', 'THE', 'MIST', 'APOLLO', 'IN', 'THE', 'PYTHON'] +1188-133604-0027-1798: ref=['NOW', 'HERE', 'IS', 'RAPHAEL', 'EXACTLY', 'BETWEEN', 'THE', 'TWO', 'TREES', 'STILL', 'DRAWN', 'LEAF', 'BY', 'LEAF', 'WHOLLY', 'FORMAL', 'BUT', 'BEAUTIFUL', 'MIST', 'COMING', 'GRADUALLY', 'INTO', 'THE', 'DISTANCE'] +1188-133604-0027-1798: hyp=['NOW', 'HERE', 'IS', 'RAPHAEL', 'EXACTLY', 'BETWEEN', 'THE', 'TWO', 'TREES', 'STILL', 'DRAWN', 'LEAF', 'BY', 'LEAF', 'HOLY', 'FORMAL', 'BUT', 'BEAUTIFUL', 'MIST', 'COMING', 'GRADUALLY', 'INTO', 'THE', 'DISTANCE'] +1188-133604-0028-1799: ref=['WELL', 'THEN', 'LAST', 'HERE', 'IS', "TURNER'S", 'GREEK', 'SCHOOL', 'OF', 'THE', 'HIGHEST', 'CLASS', 'AND', 'YOU', 'DEFINE', 'HIS', 'ART', 'ABSOLUTELY', 'AS', 'FIRST', 'THE', 'DISPLAYING', 'INTENSELY', 'AND', 'WITH', 'THE', 'STERNEST', 'INTELLECT', 'OF', 'NATURAL', 'FORM', 'AS', 'IT', 'IS', 'AND', 'THEN', 'THE', 'ENVELOPMENT', 'OF', 'IT', 'WITH', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0028-1799: hyp=['WELL', 'THEN', 'LAST', 'HERE', 'IS', 'TURNERS', 'GREEK', 'SCHOOL', 'OF', 'THE', 'HIGHEST', 'CLASS', 'AND', 'YOU', 'DEFINE', 'HIS', 'ART', 'ABSOLUTELY', 'AS', 'FIRST', 'THE', 'DISPLAYING', 'INTENSELY', 'AND', 'WITH', 'THE', 'STERNEST', 'INTELLECT', 'OF', 'NATURAL', 'FORM', 'AS', 'IT', 'IS', 'AND', 'THEN', 'THE', 'ENVELOPMENT', 'OF', 'IT', 'WITH', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0029-1800: ref=['ONLY', 'THERE', 'ARE', 'TWO', 'SORTS', 'OF', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0029-1800: hyp=['ONLY', 'THERE', 'ARE', 'TWO', 'SORTS', 'OF', 'CLOUD', 'IN', 'FIRE'] +1188-133604-0030-1801: ref=['HE', 'KNOWS', 'THEM', 'BOTH'] +1188-133604-0030-1801: hyp=['HE', 'KNOWS', 'THEM', 'BOTH'] +1188-133604-0031-1802: ref=["THERE'S", 'ONE', 'AND', "THERE'S", 'ANOTHER', 'THE', 'DUDLEY', 'AND', 'THE', 'FLINT'] +1188-133604-0031-1802: hyp=["THERE'S", 'ONE', 'AND', "THERE'S", 'ANOTHER', 'THE', 'DUDLEY', 'AND', 'THE', 'FLINT'] +1188-133604-0032-1803: ref=['IT', 'IS', 'ONLY', 'A', 'PENCIL', 'OUTLINE', 'BY', 'EDWARD', 'BURNE', 'JONES', 'IN', 'ILLUSTRATION', 'OF', 'THE', 'STORY', 'OF', 'PSYCHE', 'IT', 'IS', 'THE', 'INTRODUCTION', 'OF', 'PSYCHE', 'AFTER', 'ALL', 'HER', 'TROUBLES', 'INTO', 'HEAVEN'] +1188-133604-0032-1803: hyp=['IT', 'IS', 'ONLY', 'A', 'PENCIL', 'OUTLINE', 'BY', 'EDWARD', 'BYRNE', 'JONES', 'IN', 'ILLUSTRATION', 'OF', 'THE', 'STORY', 'OF', 'PSYCHE', 'IT', 'IS', 'THE', 'INTRODUCTION', 'OF', 'PSYCHE', 'AFTER', 'ALL', 'HER', 'TROUBLES', 'AND', 'TO', 'HEAVEN'] +1188-133604-0033-1804: ref=['EVERY', 'PLANT', 'IN', 'THE', 'GRASS', 'IS', 'SET', 'FORMALLY', 'GROWS', 'PERFECTLY', 'AND', 'MAY', 'BE', 'REALIZED', 'COMPLETELY'] +1188-133604-0033-1804: hyp=['EVERY', 'PLANT', 'IN', 'THE', 'GRASS', 'IS', 'SET', 'FORMERLY', 'GROWS', 'PERFECTLY', 'AND', 'MAY', 'BE', 'REALIZED', 'COMPLETELY'] +1188-133604-0034-1805: ref=['EXQUISITE', 'ORDER', 'AND', 'UNIVERSAL', 'WITH', 'ETERNAL', 'LIFE', 'AND', 'LIGHT', 'THIS', 'IS', 'THE', 'FAITH', 'AND', 'EFFORT', 'OF', 'THE', 'SCHOOLS', 'OF', 'CRYSTAL', 'AND', 'YOU', 'MAY', 'DESCRIBE', 'AND', 'COMPLETE', 'THEIR', 'WORK', 'QUITE', 'LITERALLY', 'BY', 'TAKING', 'ANY', 'VERSES', 'OF', 'CHAUCER', 'IN', 'HIS', 'TENDER', 'MOOD', 'AND', 'OBSERVING', 'HOW', 'HE', 'INSISTS', 'ON', 'THE', 'CLEARNESS', 'AND', 'BRIGHTNESS', 'FIRST', 'AND', 'THEN', 'ON', 'THE', 'ORDER'] +1188-133604-0034-1805: hyp=['EXQUISITE', 'ORDER', 'AND', 'UNIVERSAL', 'WITH', 'ETERNAL', 'LIFE', 'AND', 'LIGHT', 'THIS', 'IS', 'THE', 'FAITH', 'AND', 'EFFORT', 'OF', 'THE', 'SCHOOLS', 'OF', 'CRISTEL', 'AND', 'YOU', 'MAY', 'DESCRIBE', 'AND', 'COMPLETE', 'THEIR', 'WORK', 'QUITE', 'LITERALLY', 'BY', 'TAKING', 'ANY', 'VERSES', 'OF', 'CHAUCER', 'IN', 'HIS', 'TENDER', 'MOOD', 'IN', 'OBSERVING', 'HOW', 'HE', 'INSISTS', 'ON', 'THE', 'CLEARNESS', 'AND', 'BRIGHTNESS', 'FIRST', 'AND', 'THEN', 'ON', 'THE', 'ORDER'] +1188-133604-0035-1806: ref=['THUS', 'IN', "CHAUCER'S", 'DREAM'] +1188-133604-0035-1806: hyp=['THUS', 'IN', "CHAUCER'S", 'DREAM'] +1188-133604-0036-1807: ref=['IN', 'BOTH', 'THESE', 'HIGH', 'MYTHICAL', 'SUBJECTS', 'THE', 'SURROUNDING', 'NATURE', 'THOUGH', 'SUFFERING', 'IS', 'STILL', 'DIGNIFIED', 'AND', 'BEAUTIFUL'] +1188-133604-0036-1807: hyp=['IN', 'BOTH', 'THESE', 'HIGH', 'MYTHICAL', 'SUBJECTS', 'THE', 'SURROUNDING', 'NATURE', 'THOUGH', 'SUFFERING', 'IS', 'STILL', 'DIGNIFIED', 'AND', 'BEAUTIFUL'] +1188-133604-0037-1808: ref=['EVERY', 'LINE', 'IN', 'WHICH', 'THE', 'MASTER', 'TRACES', 'IT', 'EVEN', 'WHERE', 'SEEMINGLY', 'NEGLIGENT', 'IS', 'LOVELY', 'AND', 'SET', 'DOWN', 'WITH', 'A', 'MEDITATIVE', 'CALMNESS', 'WHICH', 'MAKES', 'THESE', 'TWO', 'ETCHINGS', 'CAPABLE', 'OF', 'BEING', 'PLACED', 'BESIDE', 'THE', 'MOST', 'TRANQUIL', 'WORK', 'OF', 'HOLBEIN', 'OR', 'DUERER'] +1188-133604-0037-1808: hyp=['EVERY', 'LINE', 'IN', 'WHICH', 'THE', 'MASTER', 'TRACES', 'IT', 'EVEN', 'WHERE', 'SEEMINGLY', 'NEGLIGENT', 'IS', 'LOVELY', 'AND', 'SET', 'DOWN', 'WITH', 'A', 'MEDITATIVE', 'CALMNESS', 'WHICH', 'MAKES', 'THESE', 'TWO', 'ETCHINGS', 'CAPABLE', 'OF', 'BEING', 'PLACED', 'BESIDE', 'THE', 'MOST', 'TRANQUIL', 'WORK', 'OF', 'HOLBINE', 'OR', 'DURE'] +1188-133604-0038-1809: ref=['BUT', 'NOW', 'HERE', 'IS', 'A', 'SUBJECT', 'OF', 'WHICH', 'YOU', 'WILL', 'WONDER', 'AT', 'FIRST', 'WHY', 'TURNER', 'DREW', 'IT', 'AT', 'ALL'] +1188-133604-0038-1809: hyp=['BUT', 'NOW', 'HERE', 'IS', 'A', 'SUBJECT', 'OF', 'WHICH', 'YOU', 'WILL', 'WONDER', 'AT', 'FIRST', 'WHY', 'TURNER', 'DREW', 'IT', 'AT', 'ALL'] +1188-133604-0039-1810: ref=['IT', 'HAS', 'NO', 'BEAUTY', 'WHATSOEVER', 'NO', 'SPECIALTY', 'OF', 'PICTURESQUENESS', 'AND', 'ALL', 'ITS', 'LINES', 'ARE', 'CRAMPED', 'AND', 'POOR'] +1188-133604-0039-1810: hyp=['IT', 'HAS', 'NO', 'BEAUTY', 'WHATSOEVER', 'NO', 'SPECIALTY', 'OF', 'PICTURESQUENESS', 'IN', 'ALL', 'ITS', 'LINES', 'ARE', 'CRAMPED', 'AND', 'POOR'] +1188-133604-0040-1811: ref=['THE', 'CRAMPNESS', 'AND', 'THE', 'POVERTY', 'ARE', 'ALL', 'INTENDED'] +1188-133604-0040-1811: hyp=['THE', 'CRAMPNESS', 'AND', 'THE', 'POVERTY', 'ARE', 'ALL', 'INTENDED'] +1188-133604-0041-1812: ref=['IT', 'IS', 'A', 'GLEANER', 'BRINGING', 'DOWN', 'HER', 'ONE', 'SHEAF', 'OF', 'CORN', 'TO', 'AN', 'OLD', 'WATERMILL', 'ITSELF', 'MOSSY', 'AND', 'RENT', 'SCARCELY', 'ABLE', 'TO', 'GET', 'ITS', 'STONES', 'TO', 'TURN'] +1188-133604-0041-1812: hyp=['IT', 'IS', 'A', 'GLEANER', 'BRINGING', 'DOWN', 'HER', 'ONE', 'SHEAF', 'OF', 'CORN', 'TO', 'AN', 'OLD', 'WATER', 'MILL', 'ITSELF', 'MOSSY', 'AND', 'RENT', 'SCARCELY', 'ABLE', 'TO', 'GET', 'ITS', 'STONES', 'TO', 'TURN'] +1188-133604-0042-1813: ref=['THE', 'SCENE', 'IS', 'ABSOLUTELY', 'ARCADIAN'] +1188-133604-0042-1813: hyp=['THE', 'SCENE', 'IS', 'ABSOLUTELY', 'ARCADIAN'] +1188-133604-0043-1814: ref=['SEE', 'THAT', 'YOUR', 'LIVES', 'BE', 'IN', 'NOTHING', 'WORSE', 'THAN', 'A', "BOY'S", 'CLIMBING', 'FOR', 'HIS', 'ENTANGLED', 'KITE'] +1188-133604-0043-1814: hyp=['SEE', 'THAT', 'YOUR', 'LIES', 'BE', 'IN', 'NOTHING', 'WORSE', 'THAN', 'A', "BOY'S", 'CLIMBING', 'FOR', 'HIS', 'ENTANGLED', 'KITE'] +1188-133604-0044-1815: ref=['IT', 'WILL', 'BE', 'WELL', 'FOR', 'YOU', 'IF', 'YOU', 'JOIN', 'NOT', 'WITH', 'THOSE', 'WHO', 'INSTEAD', 'OF', 'KITES', 'FLY', 'FALCONS', 'WHO', 'INSTEAD', 'OF', 'OBEYING', 'THE', 'LAST', 'WORDS', 'OF', 'THE', 'GREAT', 'CLOUD', 'SHEPHERD', 'TO', 'FEED', 'HIS', 'SHEEP', 'LIVE', 'THE', 'LIVES', 'HOW', 'MUCH', 'LESS', 'THAN', 'VANITY', 'OF', 'THE', 'WAR', 'WOLF', 'AND', 'THE', 'GIER', 'EAGLE'] +1188-133604-0044-1815: hyp=['IT', 'WILL', 'BE', 'WELL', 'FOR', 'YOU', 'IF', 'YOU', 'JOIN', 'NOT', 'WITH', 'THOSE', 'WHO', 'INSTEAD', 'OF', 'KITES', 'FLY', 'FALCONS', 'WHO', 'INSTEAD', 'OF', 'OBEYING', 'THE', 'LAST', 'WORDS', 'OF', 'THE', 'GREAT', 'CLOUD', 'SHEPHERD', 'TO', 'FEED', 'HIS', 'SHEEP', 'LIVE', 'THE', 'LIVES', 'HOW', 'MUCH', 'LESS', 'THAN', 'VANITY', 'OF', 'THE', 'WAR', 'WOLF', 'AND', 'THE', 'GEAREAGLE'] +121-121726-0000-2558: ref=['ALSO', 'A', 'POPULAR', 'CONTRIVANCE', 'WHEREBY', 'LOVE', 'MAKING', 'MAY', 'BE', 'SUSPENDED', 'BUT', 'NOT', 'STOPPED', 'DURING', 'THE', 'PICNIC', 'SEASON'] +121-121726-0000-2558: hyp=['ALSO', 'A', 'POPULAR', 'CONTRIVANCE', 'WHEREBY', 'LOVE', 'MAKING', 'MAY', 'BE', 'SUSPENDED', 'BUT', 'NOT', 'STOPPED', 'DURING', 'THE', 'PICNIC', 'SEASON'] +121-121726-0001-2559: ref=['HARANGUE', 'THE', 'TIRESOME', 'PRODUCT', 'OF', 'A', 'TIRELESS', 'TONGUE'] +121-121726-0001-2559: hyp=['HARANG', 'THE', 'TIRESOME', 'PRODUCT', 'OF', 'A', 'TIRELESS', 'TONGUE'] +121-121726-0002-2560: ref=['ANGOR', 'PAIN', 'PAINFUL', 'TO', 'HEAR'] +121-121726-0002-2560: hyp=['ANGOR', 'PAIN', 'PAINFUL', 'TO', 'HEAR'] +121-121726-0003-2561: ref=['HAY', 'FEVER', 'A', 'HEART', 'TROUBLE', 'CAUSED', 'BY', 'FALLING', 'IN', 'LOVE', 'WITH', 'A', 'GRASS', 'WIDOW'] +121-121726-0003-2561: hyp=['HAY', 'FEVER', 'A', 'HEART', 'TROUBLE', 'CAUSED', 'BY', 'FALLING', 'IN', 'LOVE', 'WITH', 'A', 'GRASS', 'WIDOW'] +121-121726-0004-2562: ref=['HEAVEN', 'A', 'GOOD', 'PLACE', 'TO', 'BE', 'RAISED', 'TO'] +121-121726-0004-2562: hyp=['HEAVEN', 'A', 'GOOD', 'PLACE', 'TO', 'BE', 'RAISED', 'TO'] +121-121726-0005-2563: ref=['HEDGE', 'A', 'FENCE'] +121-121726-0005-2563: hyp=['HEDGE', 'OFFENCE'] +121-121726-0006-2564: ref=['HEREDITY', 'THE', 'CAUSE', 'OF', 'ALL', 'OUR', 'FAULTS'] +121-121726-0006-2564: hyp=['HEREDITY', 'THE', 'CAUSE', 'OF', 'ALL', 'OUR', 'FAULTS'] +121-121726-0007-2565: ref=['HORSE', 'SENSE', 'A', 'DEGREE', 'OF', 'WISDOM', 'THAT', 'KEEPS', 'ONE', 'FROM', 'BETTING', 'ON', 'THE', 'RACES'] +121-121726-0007-2565: hyp=['HORSE', 'SENSE', 'A', 'DEGREE', 'OF', 'WISDOM', 'THAT', 'KEEPS', 'ONE', 'FROM', 'BETTING', 'ON', 'THE', 'RACES'] +121-121726-0008-2566: ref=['HOSE', "MAN'S", 'EXCUSE', 'FOR', 'WETTING', 'THE', 'WALK'] +121-121726-0008-2566: hyp=['HOSE', "MAN'S", 'EXCUSE', 'FOR', 'WETTING', 'THE', 'WALK'] +121-121726-0009-2567: ref=['HOTEL', 'A', 'PLACE', 'WHERE', 'A', 'GUEST', 'OFTEN', 'GIVES', 'UP', 'GOOD', 'DOLLARS', 'FOR', 'POOR', 'QUARTERS'] +121-121726-0009-2567: hyp=['HOTEL', 'A', 'PLACE', 'WHERE', 'A', 'GUEST', 'OFTEN', 'GIVES', 'UP', 'GOOD', 'DOLLARS', 'FOR', 'POOR', 'QUARTERS'] +121-121726-0010-2568: ref=['HOUSECLEANING', 'A', 'DOMESTIC', 'UPHEAVAL', 'THAT', 'MAKES', 'IT', 'EASY', 'FOR', 'THE', 'GOVERNMENT', 'TO', 'ENLIST', 'ALL', 'THE', 'SOLDIERS', 'IT', 'NEEDS'] +121-121726-0010-2568: hyp=['HOUSE', 'CLEANING', 'A', 'DOMESTIC', 'UPHEAVAL', 'THAT', 'MAKES', 'IT', 'EASY', 'FOR', 'THE', 'GOVERNMENT', 'TO', 'ENLIST', 'ALL', 'THE', 'SOLDIERS', 'IT', 'NEEDS'] +121-121726-0011-2569: ref=['HUSBAND', 'THE', 'NEXT', 'THING', 'TO', 'A', 'WIFE'] +121-121726-0011-2569: hyp=['HUSBAND', 'THE', 'NEXT', 'THING', 'TO', 'A', 'WIFE'] +121-121726-0012-2570: ref=['HUSSY', 'WOMAN', 'AND', 'BOND', 'TIE'] +121-121726-0012-2570: hyp=['HUSSY', 'WOMAN', 'AND', 'BOND', 'TIE'] +121-121726-0013-2571: ref=['TIED', 'TO', 'A', 'WOMAN'] +121-121726-0013-2571: hyp=['TIED', 'TO', 'A', 'WOMAN'] +121-121726-0014-2572: ref=['HYPOCRITE', 'A', 'HORSE', 'DEALER'] +121-121726-0014-2572: hyp=['HYPOCRITE', 'A', 'HORSE', 'DEALER'] +121-123852-0000-2615: ref=['THOSE', 'PRETTY', 'WRONGS', 'THAT', 'LIBERTY', 'COMMITS', 'WHEN', 'I', 'AM', 'SOMETIME', 'ABSENT', 'FROM', 'THY', 'HEART', 'THY', 'BEAUTY', 'AND', 'THY', 'YEARS', 'FULL', 'WELL', 'BEFITS', 'FOR', 'STILL', 'TEMPTATION', 'FOLLOWS', 'WHERE', 'THOU', 'ART'] +121-123852-0000-2615: hyp=['THOSE', 'PRETTY', 'WRONGS', 'THAT', 'LIBERTY', 'COMMITS', 'WHEN', 'I', 'AM', 'SOME', 'TIME', 'ABSENT', 'FROM', 'THY', 'HEART', 'THY', 'BEAUTY', 'AND', 'THY', 'YEARS', 'FALL', 'WELL', 'BEFITS', 'FOR', 'STILL', 'TEMPTATION', 'FOLLOWS', 'WHERE', 'THOU', 'ART'] +121-123852-0001-2616: ref=['AY', 'ME'] +121-123852-0001-2616: hyp=['I', 'ME'] +121-123852-0002-2617: ref=['NO', 'MATTER', 'THEN', 'ALTHOUGH', 'MY', 'FOOT', 'DID', 'STAND', 'UPON', 'THE', 'FARTHEST', 'EARTH', "REMOV'D", 'FROM', 'THEE', 'FOR', 'NIMBLE', 'THOUGHT', 'CAN', 'JUMP', 'BOTH', 'SEA', 'AND', 'LAND', 'AS', 'SOON', 'AS', 'THINK', 'THE', 'PLACE', 'WHERE', 'HE', 'WOULD', 'BE', 'BUT', 'AH'] +121-123852-0002-2617: hyp=['NO', 'MATTER', 'THEN', 'ALTHOUGH', 'MY', 'FOOT', 'DID', 'STAND', 'UPON', 'THE', 'FARTHEST', 'EARTH', 'REMOVED', 'FROM', 'THEE', 'FOR', 'NIMBLE', 'THOUGHT', 'CAN', 'JUMP', 'BOTH', 'SEA', 'AND', 'LAND', 'AS', 'SOON', 'AS', 'THINK', 'THE', 'PLACE', 'WHERE', 'HE', 'WOULD', 'BE', 'BUT', 'AH'] +121-123852-0003-2618: ref=['THOUGHT', 'KILLS', 'ME', 'THAT', 'I', 'AM', 'NOT', 'THOUGHT', 'TO', 'LEAP', 'LARGE', 'LENGTHS', 'OF', 'MILES', 'WHEN', 'THOU', 'ART', 'GONE', 'BUT', 'THAT', 'SO', 'MUCH', 'OF', 'EARTH', 'AND', 'WATER', 'WROUGHT', 'I', 'MUST', 'ATTEND', "TIME'S", 'LEISURE', 'WITH', 'MY', 'MOAN', 'RECEIVING', 'NOUGHT', 'BY', 'ELEMENTS', 'SO', 'SLOW', 'BUT', 'HEAVY', 'TEARS', 'BADGES', 'OF', "EITHER'S", 'WOE'] +121-123852-0003-2618: hyp=['THOUGHT', 'KILLS', 'ME', 'THAT', 'I', 'AM', 'NOT', 'BOUGHT', 'TO', 'LEAP', 'LARGE', 'LENGTHS', 'OF', 'MILES', 'WHEN', 'THOU', 'ART', 'GONE', 'BUT', 'THAT', 'SO', 'MUCH', 'OF', 'EARTH', 'AND', 'WATER', 'WROUGHT', 'I', 'MUST', 'ATTEND', "TIME'S", 'LEISURE', 'WITH', 'MY', 'MOAN', 'RECEIVING', 'NOT', 'BY', 'ELEMENTS', 'SO', 'SLOW', 'BUT', 'HEAVY', 'TEARS', 'BADGES', 'OF', "EITHER'S", 'WOE'] +121-123852-0004-2619: ref=['MY', 'HEART', 'DOTH', 'PLEAD', 'THAT', 'THOU', 'IN', 'HIM', 'DOST', 'LIE', 'A', 'CLOSET', 'NEVER', "PIERC'D", 'WITH', 'CRYSTAL', 'EYES', 'BUT', 'THE', 'DEFENDANT', 'DOTH', 'THAT', 'PLEA', 'DENY', 'AND', 'SAYS', 'IN', 'HIM', 'THY', 'FAIR', 'APPEARANCE', 'LIES'] +121-123852-0004-2619: hyp=['MY', 'HEART', 'DOTH', 'PLEAD', 'THAT', 'THOU', 'IN', 'HIM', 'DOST', 'LIE', 'A', 'CLOSET', 'NEVER', 'PIERCED', 'WITH', 'CRYSTAL', 'EYES', 'BUT', 'THE', 'DEFENDANT', 'DOTH', 'THAT', 'PLEA', 'DENY', 'AND', 'SAYS', 'IN', 'HIM', 'THY', 'FAIR', 'APPEARANCE', 'LIES'] +121-123859-0000-2573: ref=['YOU', 'ARE', 'MY', 'ALL', 'THE', 'WORLD', 'AND', 'I', 'MUST', 'STRIVE', 'TO', 'KNOW', 'MY', 'SHAMES', 'AND', 'PRAISES', 'FROM', 'YOUR', 'TONGUE', 'NONE', 'ELSE', 'TO', 'ME', 'NOR', 'I', 'TO', 'NONE', 'ALIVE', 'THAT', 'MY', "STEEL'D", 'SENSE', 'OR', 'CHANGES', 'RIGHT', 'OR', 'WRONG'] +121-123859-0000-2573: hyp=['YOU', 'ARE', 'MY', 'ALL', 'THE', 'WORLD', 'AND', 'I', 'MUST', 'STRIVE', 'TO', 'KNOW', 'MY', 'SHAMES', 'AND', 'PRAISES', 'FROM', 'YOUR', 'TONGUE', 'NONE', 'ELSE', 'TO', 'ME', 'NOR', 'I', 'TO', 'NONE', 'ALIVE', 'THAT', 'MY', 'STEELED', 'SENSE', 'OR', 'CHANGES', 'RIGHT', 'OR', 'WRONG'] +121-123859-0001-2574: ref=['O', 'TIS', 'THE', 'FIRST', 'TIS', 'FLATTERY', 'IN', 'MY', 'SEEING', 'AND', 'MY', 'GREAT', 'MIND', 'MOST', 'KINGLY', 'DRINKS', 'IT', 'UP', 'MINE', 'EYE', 'WELL', 'KNOWS', 'WHAT', 'WITH', 'HIS', 'GUST', 'IS', 'GREEING', 'AND', 'TO', 'HIS', 'PALATE', 'DOTH', 'PREPARE', 'THE', 'CUP', 'IF', 'IT', 'BE', "POISON'D", 'TIS', 'THE', 'LESSER', 'SIN', 'THAT', 'MINE', 'EYE', 'LOVES', 'IT', 'AND', 'DOTH', 'FIRST', 'BEGIN'] +121-123859-0001-2574: hyp=['OH', 'TIS', 'THE', 'FIRST', 'TIS', 'FLATTERY', 'IN', 'MY', 'SEEING', 'AND', 'MY', 'GREAT', 'MIND', 'MOST', 'KINGLY', 'DRINKS', 'IT', 'UP', 'MINE', 'EYE', 'WELL', 'KNOWS', 'WHAT', 'WITH', 'HIS', 'GUST', 'IS', 'GREEN', 'AND', 'TO', 'HIS', 'PALATE', 'DOTH', 'PREPARE', 'THE', 'CUP', 'IF', 'IT', 'BE', 'POISONED', 'TIS', 'THE', 'LESSER', 'SIN', 'THAT', 'MINE', 'EYE', 'LOVES', 'IT', 'AND', 'DOTH', 'FIRST', 'BEGIN'] +121-123859-0002-2575: ref=['BUT', 'RECKONING', 'TIME', 'WHOSE', "MILLION'D", 'ACCIDENTS', 'CREEP', 'IN', 'TWIXT', 'VOWS', 'AND', 'CHANGE', 'DECREES', 'OF', 'KINGS', 'TAN', 'SACRED', 'BEAUTY', 'BLUNT', 'THE', "SHARP'ST", 'INTENTS', 'DIVERT', 'STRONG', 'MINDS', 'TO', 'THE', 'COURSE', 'OF', 'ALTERING', 'THINGS', 'ALAS', 'WHY', 'FEARING', 'OF', "TIME'S", 'TYRANNY', 'MIGHT', 'I', 'NOT', 'THEN', 'SAY', 'NOW', 'I', 'LOVE', 'YOU', 'BEST', 'WHEN', 'I', 'WAS', 'CERTAIN', "O'ER", 'INCERTAINTY', 'CROWNING', 'THE', 'PRESENT', 'DOUBTING', 'OF', 'THE', 'REST'] +121-123859-0002-2575: hyp=['BUT', 'RECKONING', 'TIME', 'WHOSE', 'MILLIONED', 'ACCIDENTS', 'CREEP', 'IN', 'TWIXT', 'VOWS', 'AND', 'CHANGE', 'DECREES', 'OF', 'KINGS', 'TAN', 'SACRED', 'BEAUTY', 'BLUNT', 'THE', 'SHARPEST', 'INTENSE', 'DIVERT', 'STRONG', 'MINDS', 'TO', 'THE', 'COURSE', 'OF', 'ALTERING', 'THINGS', 'ALAS', 'WHY', 'FEARING', 'OF', "TIME'S", 'TYRANNY', 'MIGHT', 'I', 'NOT', 'THEN', 'SAY', 'NOW', 'I', 'LOVE', 'YOU', 'BEST', 'WHEN', 'I', 'WAS', 'CERTAIN', 'OR', 'IN', 'CERTAINTY', 'CROWNING', 'THE', 'PRESENT', 'DOUBTING', 'OF', 'THE', 'REST'] +121-123859-0003-2576: ref=['LOVE', 'IS', 'A', 'BABE', 'THEN', 'MIGHT', 'I', 'NOT', 'SAY', 'SO', 'TO', 'GIVE', 'FULL', 'GROWTH', 'TO', 'THAT', 'WHICH', 'STILL', 'DOTH', 'GROW'] +121-123859-0003-2576: hyp=['LOVE', 'IS', 'A', 'BABE', 'THEN', 'MIGHT', 'I', 'NOT', 'SAY', 'SO', 'TO', 'GIVE', 'FULL', 'GROWTH', 'TO', 'THAT', 'WHICH', 'STILL', 'DOTH', 'GROW'] +121-123859-0004-2577: ref=['SO', 'I', 'RETURN', "REBUK'D", 'TO', 'MY', 'CONTENT', 'AND', 'GAIN', 'BY', 'ILL', 'THRICE', 'MORE', 'THAN', 'I', 'HAVE', 'SPENT'] +121-123859-0004-2577: hyp=['SO', 'I', 'RETURN', 'REBUKED', 'TO', 'MY', 'CONTENT', 'AND', 'GAIN', 'BY', 'ILL', 'THRICE', 'MORE', 'THAN', 'I', 'HAVE', 'SPENT'] +121-127105-0000-2578: ref=['IT', 'WAS', 'THIS', 'OBSERVATION', 'THAT', 'DREW', 'FROM', 'DOUGLAS', 'NOT', 'IMMEDIATELY', 'BUT', 'LATER', 'IN', 'THE', 'EVENING', 'A', 'REPLY', 'THAT', 'HAD', 'THE', 'INTERESTING', 'CONSEQUENCE', 'TO', 'WHICH', 'I', 'CALL', 'ATTENTION'] +121-127105-0000-2578: hyp=['IT', 'WAS', 'THIS', 'OBSERVATION', 'THAT', 'DREW', 'FROM', 'DOUGLAS', 'NOT', 'IMMEDIATELY', 'BUT', 'LATER', 'IN', 'THE', 'EVENING', 'A', 'REPLY', 'THAT', 'HAD', 'THE', 'INTERESTING', 'CONSEQUENCE', 'TO', 'WHICH', 'I', 'CALL', 'ATTENTION'] +121-127105-0001-2579: ref=['SOMEONE', 'ELSE', 'TOLD', 'A', 'STORY', 'NOT', 'PARTICULARLY', 'EFFECTIVE', 'WHICH', 'I', 'SAW', 'HE', 'WAS', 'NOT', 'FOLLOWING'] +121-127105-0001-2579: hyp=['SOME', 'ONE', 'ELSE', 'TOLD', 'A', 'STORY', 'NOT', 'PARTICULARLY', 'EFFECTIVE', 'WHICH', 'I', 'SAW', 'HE', 'WAS', 'NOT', 'FOLLOWING'] +121-127105-0002-2580: ref=['CRIED', 'ONE', 'OF', 'THE', 'WOMEN', 'HE', 'TOOK', 'NO', 'NOTICE', 'OF', 'HER', 'HE', 'LOOKED', 'AT', 'ME', 'BUT', 'AS', 'IF', 'INSTEAD', 'OF', 'ME', 'HE', 'SAW', 'WHAT', 'HE', 'SPOKE', 'OF'] +121-127105-0002-2580: hyp=['CRIED', 'ONE', 'OF', 'THE', 'WOMEN', 'HE', 'TOOK', 'NO', 'NOTICE', 'OF', 'HER', 'HE', 'LOOKED', 'AT', 'ME', 'BUT', 'AS', 'IF', 'INSTEAD', 'OF', 'ME', 'HE', 'SAW', 'WHAT', 'HE', 'SPOKE', 'OF'] +121-127105-0003-2581: ref=['THERE', 'WAS', 'A', 'UNANIMOUS', 'GROAN', 'AT', 'THIS', 'AND', 'MUCH', 'REPROACH', 'AFTER', 'WHICH', 'IN', 'HIS', 'PREOCCUPIED', 'WAY', 'HE', 'EXPLAINED'] +121-127105-0003-2581: hyp=['THERE', 'WAS', 'A', 'UNANIMOUS', 'GROAN', 'AT', 'THIS', 'AND', 'MUCH', 'REPROACH', 'AFTER', 'WHICH', 'IN', 'HIS', 'PREOCCUPIED', 'WAY', 'HE', 'EXPLAINED'] +121-127105-0004-2582: ref=['THE', "STORY'S", 'WRITTEN'] +121-127105-0004-2582: hyp=['THE', 'STORIES', 'WRITTEN'] +121-127105-0005-2583: ref=['I', 'COULD', 'WRITE', 'TO', 'MY', 'MAN', 'AND', 'ENCLOSE', 'THE', 'KEY', 'HE', 'COULD', 'SEND', 'DOWN', 'THE', 'PACKET', 'AS', 'HE', 'FINDS', 'IT'] +121-127105-0005-2583: hyp=['THY', 'GOOD', 'RIGHT', 'TO', 'MY', 'MAN', 'AND', 'ENCLOSE', 'THE', 'KEY', 'HE', 'COULD', 'SEND', 'DOWN', 'THE', 'PACKET', 'AS', 'HE', 'FINDS', 'IT'] +121-127105-0006-2584: ref=['THE', 'OTHERS', 'RESENTED', 'POSTPONEMENT', 'BUT', 'IT', 'WAS', 'JUST', 'HIS', 'SCRUPLES', 'THAT', 'CHARMED', 'ME'] +121-127105-0006-2584: hyp=['THE', 'OTHERS', 'RESENTED', 'POSTPONEMENT', 'BUT', 'IT', 'WAS', 'JUST', 'HIS', 'SCRUPLES', 'THAT', 'CHARMED', 'ME'] +121-127105-0007-2585: ref=['TO', 'THIS', 'HIS', 'ANSWER', 'WAS', 'PROMPT', 'OH', 'THANK', 'GOD', 'NO', 'AND', 'IS', 'THE', 'RECORD', 'YOURS'] +121-127105-0007-2585: hyp=['TO', 'THIS', 'HIS', 'ANSWER', 'WAS', 'PROMPT', 'OH', 'THANK', 'GOD', 'NO', 'AND', 'IS', 'THE', 'RECORD', 'YOURS'] +121-127105-0008-2586: ref=['HE', 'HUNG', 'FIRE', 'AGAIN', 'A', "WOMAN'S"] +121-127105-0008-2586: hyp=['HE', 'HUNG', 'FIRE', 'AGAIN', 'A', "WOMAN'S"] +121-127105-0009-2587: ref=['SHE', 'HAS', 'BEEN', 'DEAD', 'THESE', 'TWENTY', 'YEARS'] +121-127105-0009-2587: hyp=['SHE', 'HAS', 'BEEN', 'DEAD', 'THESE', 'TWENTY', 'YEARS'] +121-127105-0010-2588: ref=['SHE', 'SENT', 'ME', 'THE', 'PAGES', 'IN', 'QUESTION', 'BEFORE', 'SHE', 'DIED'] +121-127105-0010-2588: hyp=['SHE', 'SENT', 'ME', 'THE', 'PAGES', 'IN', 'QUESTION', 'BEFORE', 'SHE', 'DIED'] +121-127105-0011-2589: ref=['SHE', 'WAS', 'THE', 'MOST', 'AGREEABLE', 'WOMAN', "I'VE", 'EVER', 'KNOWN', 'IN', 'HER', 'POSITION', 'SHE', 'WOULD', 'HAVE', 'BEEN', 'WORTHY', 'OF', 'ANY', 'WHATEVER'] +121-127105-0011-2589: hyp=['SHE', 'WAS', 'THE', 'MOST', 'AGREEABLE', 'WOMAN', "I'VE", 'EVER', 'KNOWN', 'IN', 'HER', 'POSITION', 'SHE', 'WOULD', 'HAVE', 'BEEN', 'WORTHY', 'OF', 'ANY', 'WHATEVER'] +121-127105-0012-2590: ref=['IT', "WASN'T", 'SIMPLY', 'THAT', 'SHE', 'SAID', 'SO', 'BUT', 'THAT', 'I', 'KNEW', 'SHE', "HADN'T", 'I', 'WAS', 'SURE', 'I', 'COULD', 'SEE'] +121-127105-0012-2590: hyp=["TWASN'T", 'SIMPLY', 'THAT', 'SHE', 'SAID', 'SO', 'BUT', 'THAT', 'I', 'KNEW', 'SHE', "HADN'T", 'I', 'WAS', 'SURE', 'I', 'COULD', 'SEE'] +121-127105-0013-2591: ref=["YOU'LL", 'EASILY', 'JUDGE', 'WHY', 'WHEN', 'YOU', 'HEAR', 'BECAUSE', 'THE', 'THING', 'HAD', 'BEEN', 'SUCH', 'A', 'SCARE', 'HE', 'CONTINUED', 'TO', 'FIX', 'ME'] +121-127105-0013-2591: hyp=["YOU'LL", 'EASILY', 'JUDGE', 'WHY', 'WHEN', 'YOU', 'HEAR', 'BECAUSE', 'THE', 'THING', 'HAD', 'BEEN', 'SUCH', 'A', 'SCARE', 'HE', 'CONTINUED', 'TO', 'FIX', 'ME'] +121-127105-0014-2592: ref=['YOU', 'ARE', 'ACUTE'] +121-127105-0014-2592: hyp=['YOU', 'ARE', 'ACUTE'] +121-127105-0015-2593: ref=['HE', 'QUITTED', 'THE', 'FIRE', 'AND', 'DROPPED', 'BACK', 'INTO', 'HIS', 'CHAIR'] +121-127105-0015-2593: hyp=['HE', 'QUITTED', 'THE', 'FIRE', 'AND', 'DROPPED', 'BACK', 'INTO', 'HIS', 'CHAIR'] +121-127105-0016-2594: ref=['PROBABLY', 'NOT', 'TILL', 'THE', 'SECOND', 'POST'] +121-127105-0016-2594: hyp=['PROBABLY', 'NOT', 'TILL', 'THE', 'SECOND', 'POST'] +121-127105-0017-2595: ref=['IT', 'WAS', 'ALMOST', 'THE', 'TONE', 'OF', 'HOPE', 'EVERYBODY', 'WILL', 'STAY'] +121-127105-0017-2595: hyp=['IT', 'WAS', 'ALMOST', 'THE', 'TONE', 'OF', 'HOPE', 'EVERYBODY', 'WILL', 'STAY'] +121-127105-0018-2596: ref=['CRIED', 'THE', 'LADIES', 'WHOSE', 'DEPARTURE', 'HAD', 'BEEN', 'FIXED'] +121-127105-0018-2596: hyp=['CRIED', 'THE', 'LADIES', 'WHOSE', 'DEPARTURE', 'HAD', 'BEEN', 'FIXED'] +121-127105-0019-2597: ref=['MISSUS', 'GRIFFIN', 'HOWEVER', 'EXPRESSED', 'THE', 'NEED', 'FOR', 'A', 'LITTLE', 'MORE', 'LIGHT'] +121-127105-0019-2597: hyp=['MISSUS', 'GRIFFIN', 'HOWEVER', 'EXPRESSED', 'THE', 'NEED', 'FOR', 'LITTLE', 'MORE', 'LIGHT'] +121-127105-0020-2598: ref=['WHO', 'WAS', 'IT', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'THE', 'STORY', 'WILL', 'TELL', 'I', 'TOOK', 'UPON', 'MYSELF', 'TO', 'REPLY', 'OH', 'I', "CAN'T", 'WAIT', 'FOR', 'THE', 'STORY', 'THE', 'STORY', "WON'T", 'TELL', 'SAID', 'DOUGLAS', 'NOT', 'IN', 'ANY', 'LITERAL', 'VULGAR', 'WAY', "MORE'S", 'THE', 'PITY', 'THEN'] +121-127105-0020-2598: hyp=['WHO', 'WAS', 'IT', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'THE', 'STORY', 'WILL', 'TELL', 'I', 'TOOK', 'UPON', 'MYSELF', 'TO', 'REPLY', 'OH', 'I', "CAN'T", 'WAIT', 'FOR', 'THE', 'STORY', 'THE', 'STORY', "WON'T", 'TELL', 'SAID', 'DOUGLAS', 'NOT', 'IN', 'ANY', 'LITERAL', 'VULGAR', 'WAY', "MORE'S", 'THE', 'PITY', 'THEN'] +121-127105-0021-2599: ref=["WON'T", 'YOU', 'TELL', 'DOUGLAS'] +121-127105-0021-2599: hyp=["WON'T", 'YOU', 'TELL', 'DOUGLAS'] +121-127105-0022-2600: ref=['WELL', 'IF', 'I', "DON'T", 'KNOW', 'WHO', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'I', 'KNOW', 'WHO', 'HE', 'WAS'] +121-127105-0022-2600: hyp=['FOR', 'IF', 'I', "DON'T", 'KNOW', 'WHO', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'I', 'KNOW', 'WHO', 'HE', 'WAS'] +121-127105-0023-2601: ref=['LET', 'ME', 'SAY', 'HERE', 'DISTINCTLY', 'TO', 'HAVE', 'DONE', 'WITH', 'IT', 'THAT', 'THIS', 'NARRATIVE', 'FROM', 'AN', 'EXACT', 'TRANSCRIPT', 'OF', 'MY', 'OWN', 'MADE', 'MUCH', 'LATER', 'IS', 'WHAT', 'I', 'SHALL', 'PRESENTLY', 'GIVE'] +121-127105-0023-2601: hyp=['LET', 'ME', 'SAY', 'HERE', 'DISTINCTLY', 'TO', 'HAVE', 'DONE', 'WITH', 'IT', 'THAT', 'THIS', 'NARRATIVE', 'FROM', 'AN', 'EXACT', 'TRANSCRIPT', 'OF', 'MY', 'OWN', 'MADE', 'MUCH', 'LATER', 'IS', 'WHAT', 'I', 'SHALL', 'PRESENTLY', 'GIVE'] +121-127105-0024-2602: ref=['POOR', 'DOUGLAS', 'BEFORE', 'HIS', 'DEATH', 'WHEN', 'IT', 'WAS', 'IN', 'SIGHT', 'COMMITTED', 'TO', 'ME', 'THE', 'MANUSCRIPT', 'THAT', 'REACHED', 'HIM', 'ON', 'THE', 'THIRD', 'OF', 'THESE', 'DAYS', 'AND', 'THAT', 'ON', 'THE', 'SAME', 'SPOT', 'WITH', 'IMMENSE', 'EFFECT', 'HE', 'BEGAN', 'TO', 'READ', 'TO', 'OUR', 'HUSHED', 'LITTLE', 'CIRCLE', 'ON', 'THE', 'NIGHT', 'OF', 'THE', 'FOURTH'] +121-127105-0024-2602: hyp=['POOR', 'DOUGLAS', 'BEFORE', 'HIS', 'DEATH', 'WHEN', 'IT', 'WAS', 'IN', 'SIGHT', 'COMMITTED', 'TO', 'ME', 'THE', 'MANUSCRIPT', 'THAT', 'REACHED', 'HIM', 'ON', 'THE', 'THIRD', 'OF', 'THESE', 'DAYS', 'AND', 'THAT', 'ON', 'THE', 'SAME', 'SPOT', 'WITH', 'IMMENSE', 'EFFECT', 'HE', 'BEGAN', 'TO', 'READ', 'TO', 'OUR', 'HUSHED', 'LITTLE', 'CIRCLE', 'ON', 'THE', 'NIGHT', 'OF', 'THE', 'FOURTH'] +121-127105-0025-2603: ref=['THE', 'DEPARTING', 'LADIES', 'WHO', 'HAD', 'SAID', 'THEY', 'WOULD', 'STAY', "DIDN'T", 'OF', 'COURSE', 'THANK', 'HEAVEN', 'STAY', 'THEY', 'DEPARTED', 'IN', 'CONSEQUENCE', 'OF', 'ARRANGEMENTS', 'MADE', 'IN', 'A', 'RAGE', 'OF', 'CURIOSITY', 'AS', 'THEY', 'PROFESSED', 'PRODUCED', 'BY', 'THE', 'TOUCHES', 'WITH', 'WHICH', 'HE', 'HAD', 'ALREADY', 'WORKED', 'US', 'UP'] +121-127105-0025-2603: hyp=['THE', 'DEPARTING', 'LADIES', 'WHO', 'HAD', 'SAID', 'THEY', 'WOULD', 'STAY', "DIDN'T", 'OF', 'COURSE', 'THANK', 'HEAVEN', 'STAY', 'THEY', 'DEPARTED', 'IN', 'CONSEQUENCE', 'OF', 'ARRANGEMENTS', 'MADE', 'IN', 'A', 'RAGE', 'OF', 'CURIOSITY', 'AS', 'THEY', 'PROFESSED', 'PRODUCED', 'BY', 'THE', 'TOUCHES', 'WITH', 'WHICH', 'HE', 'HAD', 'ALREADY', 'WORKED', 'US', 'UP'] +121-127105-0026-2604: ref=['THE', 'FIRST', 'OF', 'THESE', 'TOUCHES', 'CONVEYED', 'THAT', 'THE', 'WRITTEN', 'STATEMENT', 'TOOK', 'UP', 'THE', 'TALE', 'AT', 'A', 'POINT', 'AFTER', 'IT', 'HAD', 'IN', 'A', 'MANNER', 'BEGUN'] +121-127105-0026-2604: hyp=['THE', 'FIRST', 'OF', 'THESE', 'TOUCHES', 'CONVEYED', 'THAT', 'THE', 'WRITTEN', 'STATEMENT', 'TOOK', 'UP', 'THE', 'TALE', 'AT', 'A', 'POINT', 'AFTER', 'IT', 'HAD', 'IN', 'A', 'MANNER', 'BEGUN'] +121-127105-0027-2605: ref=['HE', 'HAD', 'FOR', 'HIS', 'OWN', 'TOWN', 'RESIDENCE', 'A', 'BIG', 'HOUSE', 'FILLED', 'WITH', 'THE', 'SPOILS', 'OF', 'TRAVEL', 'AND', 'THE', 'TROPHIES', 'OF', 'THE', 'CHASE', 'BUT', 'IT', 'WAS', 'TO', 'HIS', 'COUNTRY', 'HOME', 'AN', 'OLD', 'FAMILY', 'PLACE', 'IN', 'ESSEX', 'THAT', 'HE', 'WISHED', 'HER', 'IMMEDIATELY', 'TO', 'PROCEED'] +121-127105-0027-2605: hyp=['HE', 'HAD', 'FOR', 'HIS', 'OWN', 'TOWN', 'RESIDENCE', 'A', 'BIG', 'HOUSE', 'FILLED', 'WITH', 'THE', 'SPOILS', 'OF', 'TRAVEL', 'AND', 'THE', 'TROPHIES', 'OF', 'THE', 'CHASE', 'BUT', 'IT', 'WAS', 'TO', 'HIS', 'COUNTRY', 'HOME', 'AN', 'OLD', 'FAMILY', 'PLACE', 'IN', 'ESSEX', 'THAT', 'HE', 'WISHED', 'HER', 'IMMEDIATELY', 'TO', 'PROCEED'] +121-127105-0028-2606: ref=['THE', 'AWKWARD', 'THING', 'WAS', 'THAT', 'THEY', 'HAD', 'PRACTICALLY', 'NO', 'OTHER', 'RELATIONS', 'AND', 'THAT', 'HIS', 'OWN', 'AFFAIRS', 'TOOK', 'UP', 'ALL', 'HIS', 'TIME'] +121-127105-0028-2606: hyp=['THE', 'AWKWARD', 'THING', 'WAS', 'THAT', 'THEY', 'HAD', 'PRACTICALLY', 'NO', 'OTHER', 'RELATIONS', 'AND', 'THAT', 'HIS', 'OWN', 'AFFAIRS', 'TOOK', 'UP', 'ALL', 'HIS', 'TIME'] +121-127105-0029-2607: ref=['THERE', 'WERE', 'PLENTY', 'OF', 'PEOPLE', 'TO', 'HELP', 'BUT', 'OF', 'COURSE', 'THE', 'YOUNG', 'LADY', 'WHO', 'SHOULD', 'GO', 'DOWN', 'AS', 'GOVERNESS', 'WOULD', 'BE', 'IN', 'SUPREME', 'AUTHORITY'] +121-127105-0029-2607: hyp=['THERE', 'WERE', 'PLENTY', 'OF', 'PEOPLE', 'TO', 'HELP', 'BUT', 'OF', 'COURSE', 'THE', 'YOUNG', 'LADY', 'WHO', 'SHOULD', 'GO', 'DOWN', 'AS', 'GOVERNESS', 'WOULD', 'BE', 'IN', 'SUPREME', 'AUTHORITY'] +121-127105-0030-2608: ref=['I', "DON'T", 'ANTICIPATE'] +121-127105-0030-2608: hyp=['I', "DON'T", 'ANTICIPATE'] +121-127105-0031-2609: ref=['SHE', 'WAS', 'YOUNG', 'UNTRIED', 'NERVOUS', 'IT', 'WAS', 'A', 'VISION', 'OF', 'SERIOUS', 'DUTIES', 'AND', 'LITTLE', 'COMPANY', 'OF', 'REALLY', 'GREAT', 'LONELINESS'] +121-127105-0031-2609: hyp=['SHE', 'WAS', 'YOUNG', 'UNTRIED', 'NERVOUS', 'IT', 'WAS', 'A', 'VISION', 'OF', 'SERIOUS', 'DUTIES', 'IN', 'LITTLE', 'COMPANY', 'OF', 'REALLY', 'GREAT', 'LONELINESS'] +121-127105-0032-2610: ref=['YES', 'BUT', "THAT'S", 'JUST', 'THE', 'BEAUTY', 'OF', 'HER', 'PASSION'] +121-127105-0032-2610: hyp=['YES', 'BUT', "THAT'S", 'JUST', 'THE', 'BEAUTY', 'OF', 'HER', 'PASSION'] +121-127105-0033-2611: ref=['IT', 'WAS', 'THE', 'BEAUTY', 'OF', 'IT'] +121-127105-0033-2611: hyp=['IT', 'WAS', 'THE', 'BEAUTY', 'OF', 'IT'] +121-127105-0034-2612: ref=['IT', 'SOUNDED', 'DULL', 'IT', 'SOUNDED', 'STRANGE', 'AND', 'ALL', 'THE', 'MORE', 'SO', 'BECAUSE', 'OF', 'HIS', 'MAIN', 'CONDITION', 'WHICH', 'WAS'] +121-127105-0034-2612: hyp=['IT', 'SOUNDED', 'DULL', 'THAT', 'SOUNDED', 'STRANGE', 'AND', 'ALL', 'THE', 'MORE', 'SO', 'BECAUSE', 'OF', 'HIS', 'MAIN', 'CONDITION', 'WHICH', 'WAS'] +121-127105-0035-2613: ref=['SHE', 'PROMISED', 'TO', 'DO', 'THIS', 'AND', 'SHE', 'MENTIONED', 'TO', 'ME', 'THAT', 'WHEN', 'FOR', 'A', 'MOMENT', 'DISBURDENED', 'DELIGHTED', 'HE', 'HELD', 'HER', 'HAND', 'THANKING', 'HER', 'FOR', 'THE', 'SACRIFICE', 'SHE', 'ALREADY', 'FELT', 'REWARDED'] +121-127105-0035-2613: hyp=['SHE', 'PROMISED', 'TO', 'DO', 'THIS', 'AND', 'SHE', 'MENTIONED', 'TO', 'ME', 'THAT', 'WHEN', 'FOR', 'A', 'MOMENT', 'DISBURDENED', 'DELIGHTED', 'HE', 'HELD', 'HER', 'HAND', 'THANKING', 'HER', 'FOR', 'THE', 'SACRIFICE', 'SHE', 'ALREADY', 'FELT', 'REWARDED'] +121-127105-0036-2614: ref=['BUT', 'WAS', 'THAT', 'ALL', 'HER', 'REWARD', 'ONE', 'OF', 'THE', 'LADIES', 'ASKED'] +121-127105-0036-2614: hyp=['BUT', 'WAS', 'THAT', 'ALL', 'HER', 'REWARD', 'ONE', 'OF', 'THE', 'LADIES', 'ASKED'] +1221-135766-0000-1305: ref=['HOW', 'STRANGE', 'IT', 'SEEMED', 'TO', 'THE', 'SAD', 'WOMAN', 'AS', 'SHE', 'WATCHED', 'THE', 'GROWTH', 'AND', 'THE', 'BEAUTY', 'THAT', 'BECAME', 'EVERY', 'DAY', 'MORE', 'BRILLIANT', 'AND', 'THE', 'INTELLIGENCE', 'THAT', 'THREW', 'ITS', 'QUIVERING', 'SUNSHINE', 'OVER', 'THE', 'TINY', 'FEATURES', 'OF', 'THIS', 'CHILD'] +1221-135766-0000-1305: hyp=['HOW', 'STRANGE', 'IT', 'SEEMED', 'TO', 'THE', 'SAD', 'WOMAN', 'AS', 'SHE', 'WATCHED', 'THE', 'GROWTH', 'AND', 'THE', 'BEAUTY', 'THAT', 'BECAME', 'EVERY', 'DAY', 'MORE', 'BRILLIANT', 'AND', 'THE', 'INTELLIGENCE', 'THAT', 'THREW', 'ITS', 'QUIVERING', 'SUNSHINE', 'OVER', 'THE', 'TINY', 'FEATURES', 'OF', 'THIS', 'CHILD'] +1221-135766-0001-1306: ref=['GOD', 'AS', 'A', 'DIRECT', 'CONSEQUENCE', 'OF', 'THE', 'SIN', 'WHICH', 'MAN', 'THUS', 'PUNISHED', 'HAD', 'GIVEN', 'HER', 'A', 'LOVELY', 'CHILD', 'WHOSE', 'PLACE', 'WAS', 'ON', 'THAT', 'SAME', 'DISHONOURED', 'BOSOM', 'TO', 'CONNECT', 'HER', 'PARENT', 'FOR', 'EVER', 'WITH', 'THE', 'RACE', 'AND', 'DESCENT', 'OF', 'MORTALS', 'AND', 'TO', 'BE', 'FINALLY', 'A', 'BLESSED', 'SOUL', 'IN', 'HEAVEN'] +1221-135766-0001-1306: hyp=['GOD', 'AS', 'A', 'DIRECT', 'CONSEQUENCE', 'OF', 'THE', 'SIN', 'WHICH', 'MAN', 'THUS', 'PUNISHED', 'HAD', 'GIVEN', 'HER', 'A', 'LOVELY', 'CHILD', 'WHOSE', 'PLACE', 'WAS', 'ON', 'THAT', 'SAME', 'DISHONORED', 'BOSOM', 'TO', 'CONNECT', 'HER', 'PARENT', 'FOR', 'EVER', 'WITH', 'THE', 'RACE', 'AND', 'DESCENT', 'OF', 'MORTALS', 'AND', 'TO', 'BE', 'FINALLY', 'A', 'BLESSED', 'SOUL', 'IN', 'HEAVEN'] +1221-135766-0002-1307: ref=['YET', 'THESE', 'THOUGHTS', 'AFFECTED', 'HESTER', 'PRYNNE', 'LESS', 'WITH', 'HOPE', 'THAN', 'APPREHENSION'] +1221-135766-0002-1307: hyp=['YET', 'THESE', 'THOUGHTS', 'AFFECTED', 'HESTER', 'PRYNNE', 'LESS', 'WITH', 'HOPE', 'THAN', 'APPREHENSION'] +1221-135766-0003-1308: ref=['THE', 'CHILD', 'HAD', 'A', 'NATIVE', 'GRACE', 'WHICH', 'DOES', 'NOT', 'INVARIABLY', 'CO', 'EXIST', 'WITH', 'FAULTLESS', 'BEAUTY', 'ITS', 'ATTIRE', 'HOWEVER', 'SIMPLE', 'ALWAYS', 'IMPRESSED', 'THE', 'BEHOLDER', 'AS', 'IF', 'IT', 'WERE', 'THE', 'VERY', 'GARB', 'THAT', 'PRECISELY', 'BECAME', 'IT', 'BEST'] +1221-135766-0003-1308: hyp=['THE', 'CHILD', 'HAD', 'A', 'NATIVE', 'GRACE', 'WHICH', 'DOES', 'NOT', 'INVARIABLY', 'COEXIST', 'WITH', 'FAULTLESS', 'BEAUTY', 'ITS', 'ATTIRE', 'HOWEVER', 'SIMPLE', 'ALWAYS', 'IMPRESSED', 'THE', 'BEHOLDER', 'AS', 'IF', 'IT', 'WERE', 'THE', 'VERY', 'GARB', 'THAT', 'PRECISELY', 'BECAME', 'IT', 'BEST'] +1221-135766-0004-1309: ref=['THIS', 'OUTWARD', 'MUTABILITY', 'INDICATED', 'AND', 'DID', 'NOT', 'MORE', 'THAN', 'FAIRLY', 'EXPRESS', 'THE', 'VARIOUS', 'PROPERTIES', 'OF', 'HER', 'INNER', 'LIFE'] +1221-135766-0004-1309: hyp=['THIS', 'OUTWARD', 'MUTABILITY', 'INDICATED', 'AND', 'DID', 'NOT', 'MORE', 'THAN', 'FAIRLY', 'EXPRESS', 'THE', 'VARIOUS', 'PROPERTIES', 'OF', 'HER', 'INNER', 'LIFE'] +1221-135766-0005-1310: ref=['HESTER', 'COULD', 'ONLY', 'ACCOUNT', 'FOR', 'THE', "CHILD'S", 'CHARACTER', 'AND', 'EVEN', 'THEN', 'MOST', 'VAGUELY', 'AND', 'IMPERFECTLY', 'BY', 'RECALLING', 'WHAT', 'SHE', 'HERSELF', 'HAD', 'BEEN', 'DURING', 'THAT', 'MOMENTOUS', 'PERIOD', 'WHILE', 'PEARL', 'WAS', 'IMBIBING', 'HER', 'SOUL', 'FROM', 'THE', 'SPIRITUAL', 'WORLD', 'AND', 'HER', 'BODILY', 'FRAME', 'FROM', 'ITS', 'MATERIAL', 'OF', 'EARTH'] +1221-135766-0005-1310: hyp=['HESTER', 'COULD', 'ONLY', 'ACCOUNT', 'FOR', 'THE', "CHILD'S", 'CHARACTER', 'AND', 'EVEN', 'THEN', 'MOST', 'VAGUELY', 'AND', 'IMPERFECTLY', 'BY', 'RECALLING', 'WHAT', 'SHE', 'HERSELF', 'HAD', 'BEEN', 'DURING', 'THAT', 'MOMENTOUS', 'PERIOD', 'WHILE', 'PEARL', 'WAS', 'IMBIBING', 'HER', 'SOUL', 'FROM', 'THE', 'SPIRITUAL', 'WORLD', 'AND', 'HER', 'BODILY', 'FRAME', 'FROM', 'ITS', 'MATERIAL', 'OF', 'EARTH'] +1221-135766-0006-1311: ref=['THEY', 'WERE', 'NOW', 'ILLUMINATED', 'BY', 'THE', 'MORNING', 'RADIANCE', 'OF', 'A', 'YOUNG', "CHILD'S", 'DISPOSITION', 'BUT', 'LATER', 'IN', 'THE', 'DAY', 'OF', 'EARTHLY', 'EXISTENCE', 'MIGHT', 'BE', 'PROLIFIC', 'OF', 'THE', 'STORM', 'AND', 'WHIRLWIND'] +1221-135766-0006-1311: hyp=['THEY', 'WERE', 'NOW', 'ILLUMINATED', 'BY', 'THE', 'MORNING', 'RADIANCE', 'OF', 'A', 'YOUNG', "CHILD'S", 'DISPOSITION', 'BUT', 'LATER', 'IN', 'THE', 'DAY', 'OF', 'EARTHLY', 'EXISTENCE', 'MIGHT', 'BE', 'PROLIFIC', 'OF', 'THE', 'STORM', 'AND', 'WHIRLWIND'] +1221-135766-0007-1312: ref=['HESTER', 'PRYNNE', 'NEVERTHELESS', 'THE', 'LOVING', 'MOTHER', 'OF', 'THIS', 'ONE', 'CHILD', 'RAN', 'LITTLE', 'RISK', 'OF', 'ERRING', 'ON', 'THE', 'SIDE', 'OF', 'UNDUE', 'SEVERITY'] +1221-135766-0007-1312: hyp=['HESTER', 'PRYNNE', 'NEVERTHELESS', 'THE', 'LOVING', 'MOTHER', 'OF', 'THIS', 'ONE', 'CHILD', 'RAN', 'LITTLE', 'RISK', 'OF', 'ERRING', 'ON', 'THE', 'SIDE', 'OF', 'UNDUE', 'SEVERITY'] +1221-135766-0008-1313: ref=['MINDFUL', 'HOWEVER', 'OF', 'HER', 'OWN', 'ERRORS', 'AND', 'MISFORTUNES', 'SHE', 'EARLY', 'SOUGHT', 'TO', 'IMPOSE', 'A', 'TENDER', 'BUT', 'STRICT', 'CONTROL', 'OVER', 'THE', 'INFANT', 'IMMORTALITY', 'THAT', 'WAS', 'COMMITTED', 'TO', 'HER', 'CHARGE'] +1221-135766-0008-1313: hyp=['MINDFUL', 'HOWEVER', 'OF', 'HER', 'OWN', 'ERRORS', 'AND', 'MISFORTUNES', 'SHE', 'EARLY', 'SOUGHT', 'TO', 'IMPOSE', 'A', 'TENDER', 'BUT', 'STRICT', 'CONTROL', 'OVER', 'THE', 'INFANT', 'IMMORTALITY', 'THAT', 'WAS', 'COMMITTED', 'TO', 'HER', 'CHARGE'] +1221-135766-0009-1314: ref=['AS', 'TO', 'ANY', 'OTHER', 'KIND', 'OF', 'DISCIPLINE', 'WHETHER', 'ADDRESSED', 'TO', 'HER', 'MIND', 'OR', 'HEART', 'LITTLE', 'PEARL', 'MIGHT', 'OR', 'MIGHT', 'NOT', 'BE', 'WITHIN', 'ITS', 'REACH', 'IN', 'ACCORDANCE', 'WITH', 'THE', 'CAPRICE', 'THAT', 'RULED', 'THE', 'MOMENT'] +1221-135766-0009-1314: hyp=['AS', 'TO', 'ANY', 'OTHER', 'KIND', 'OF', 'DISCIPLINE', 'WHETHER', 'ADDRESSED', 'TO', 'HER', 'MIND', 'OR', 'HEART', 'LITTLE', 'PEARL', 'MIGHT', 'OR', 'MIGHT', 'NOT', 'BE', 'WITHIN', 'ITS', 'REACH', 'IN', 'ACCORDANCE', 'WITH', 'THE', 'CAPRICE', 'THAT', 'RULED', 'THE', 'MOMENT'] +1221-135766-0010-1315: ref=['IT', 'WAS', 'A', 'LOOK', 'SO', 'INTELLIGENT', 'YET', 'INEXPLICABLE', 'PERVERSE', 'SOMETIMES', 'SO', 'MALICIOUS', 'BUT', 'GENERALLY', 'ACCOMPANIED', 'BY', 'A', 'WILD', 'FLOW', 'OF', 'SPIRITS', 'THAT', 'HESTER', 'COULD', 'NOT', 'HELP', 'QUESTIONING', 'AT', 'SUCH', 'MOMENTS', 'WHETHER', 'PEARL', 'WAS', 'A', 'HUMAN', 'CHILD'] +1221-135766-0010-1315: hyp=['IT', 'WAS', 'A', 'LOOK', 'SO', 'INTELLIGENT', 'YET', 'INEXPLICABLE', 'PERVERSE', 'SOMETIMES', 'SO', 'MALICIOUS', 'BUT', 'GENERALLY', 'ACCOMPANIED', 'BY', 'A', 'WILD', 'FLOW', 'OF', 'SPIRITS', 'THAT', 'HESTER', 'COULD', 'NOT', 'HELP', 'QUESTIONING', 'AT', 'SUCH', 'MOMENTS', 'WHETHER', 'PEARL', 'WAS', 'A', 'HUMAN', 'CHILD'] +1221-135766-0011-1316: ref=['BEHOLDING', 'IT', 'HESTER', 'WAS', 'CONSTRAINED', 'TO', 'RUSH', 'TOWARDS', 'THE', 'CHILD', 'TO', 'PURSUE', 'THE', 'LITTLE', 'ELF', 'IN', 'THE', 'FLIGHT', 'WHICH', 'SHE', 'INVARIABLY', 'BEGAN', 'TO', 'SNATCH', 'HER', 'TO', 'HER', 'BOSOM', 'WITH', 'A', 'CLOSE', 'PRESSURE', 'AND', 'EARNEST', 'KISSES', 'NOT', 'SO', 'MUCH', 'FROM', 'OVERFLOWING', 'LOVE', 'AS', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'PEARL', 'WAS', 'FLESH', 'AND', 'BLOOD', 'AND', 'NOT', 'UTTERLY', 'DELUSIVE'] +1221-135766-0011-1316: hyp=['BEHOLDING', 'IT', 'HESTER', 'WAS', 'CONSTRAINED', 'TO', 'RUSH', 'TOWARDS', 'THE', 'CHILD', 'TO', 'PURSUE', 'THE', 'LITTLE', 'ELF', 'IN', 'THE', 'FLIGHT', 'WHICH', 'SHE', 'INVARIABLY', 'BEGAN', 'TO', 'SNATCH', 'HER', 'TO', 'HER', 'BOSOM', 'WITH', 'A', 'CLOSE', 'PRESSURE', 'AND', 'EARNEST', 'KISSES', 'NOT', 'SO', 'MUCH', 'FROM', 'OVERFLOWING', 'LOVE', 'AS', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'PEARL', 'WAS', 'FLESH', 'AND', 'BLOOD', 'AND', 'NOT', 'UTTERLY', 'DELUSIVE'] +1221-135766-0012-1317: ref=['BROODING', 'OVER', 'ALL', 'THESE', 'MATTERS', 'THE', 'MOTHER', 'FELT', 'LIKE', 'ONE', 'WHO', 'HAS', 'EVOKED', 'A', 'SPIRIT', 'BUT', 'BY', 'SOME', 'IRREGULARITY', 'IN', 'THE', 'PROCESS', 'OF', 'CONJURATION', 'HAS', 'FAILED', 'TO', 'WIN', 'THE', 'MASTER', 'WORD', 'THAT', 'SHOULD', 'CONTROL', 'THIS', 'NEW', 'AND', 'INCOMPREHENSIBLE', 'INTELLIGENCE'] +1221-135766-0012-1317: hyp=['BROODING', 'OVER', 'ALL', 'THESE', 'MATTERS', 'THE', 'MOTHER', 'FELT', 'LIKE', 'ONE', 'WHO', 'HAS', 'EVOKED', 'A', 'SPIRIT', 'BUT', 'BY', 'SOME', 'IRREGULARITY', 'IN', 'THE', 'PROCESS', 'OF', 'CONJURATION', 'HAS', 'FAILED', 'TO', 'WIN', 'THE', 'MASTER', 'WORD', 'THAT', 'SHOULD', 'CONTROL', 'THIS', 'NEW', 'AND', 'INCOMPREHENSIBLE', 'INTELLIGENCE'] +1221-135766-0013-1318: ref=['PEARL', 'WAS', 'A', 'BORN', 'OUTCAST', 'OF', 'THE', 'INFANTILE', 'WORLD'] +1221-135766-0013-1318: hyp=['PEARL', 'WAS', 'A', 'BORN', 'OUTCAST', 'OF', 'THE', 'INFANTILE', 'WORLD'] +1221-135766-0014-1319: ref=['PEARL', 'SAW', 'AND', 'GAZED', 'INTENTLY', 'BUT', 'NEVER', 'SOUGHT', 'TO', 'MAKE', 'ACQUAINTANCE'] +1221-135766-0014-1319: hyp=['PEARL', 'SAUL', 'AND', 'GAZED', 'INTENTLY', 'BUT', 'NEVER', 'SOUGHT', 'TO', 'MAKE', 'ACQUAINTANCE'] +1221-135766-0015-1320: ref=['IF', 'SPOKEN', 'TO', 'SHE', 'WOULD', 'NOT', 'SPEAK', 'AGAIN'] +1221-135766-0015-1320: hyp=['IF', 'SPOKEN', 'TO', 'SHE', 'WOULD', 'NOT', 'SPEAK', 'AGAIN'] +1221-135767-0000-1280: ref=['HESTER', 'PRYNNE', 'WENT', 'ONE', 'DAY', 'TO', 'THE', 'MANSION', 'OF', 'GOVERNOR', 'BELLINGHAM', 'WITH', 'A', 'PAIR', 'OF', 'GLOVES', 'WHICH', 'SHE', 'HAD', 'FRINGED', 'AND', 'EMBROIDERED', 'TO', 'HIS', 'ORDER', 'AND', 'WHICH', 'WERE', 'TO', 'BE', 'WORN', 'ON', 'SOME', 'GREAT', 'OCCASION', 'OF', 'STATE', 'FOR', 'THOUGH', 'THE', 'CHANCES', 'OF', 'A', 'POPULAR', 'ELECTION', 'HAD', 'CAUSED', 'THIS', 'FORMER', 'RULER', 'TO', 'DESCEND', 'A', 'STEP', 'OR', 'TWO', 'FROM', 'THE', 'HIGHEST', 'RANK', 'HE', 'STILL', 'HELD', 'AN', 'HONOURABLE', 'AND', 'INFLUENTIAL', 'PLACE', 'AMONG', 'THE', 'COLONIAL', 'MAGISTRACY'] +1221-135767-0000-1280: hyp=['HESTER', 'PRYNNE', 'WENT', 'ONE', 'DAY', 'TO', 'THE', 'MANSION', 'OF', 'GOVERNOR', 'BELLINGHAM', 'WITH', 'A', 'PAIR', 'OF', 'GLOVES', 'WHICH', 'SHE', 'HAD', 'FRINGED', 'AND', 'EMBROIDERED', 'TO', 'HIS', 'ORDER', 'AND', 'WHICH', 'WERE', 'TO', 'BE', 'WORN', 'ON', 'SOME', 'GREAT', 'OCCASION', 'OF', 'STATE', 'FOR', 'THOUGH', 'THE', 'CHANCES', 'OF', 'A', 'POPULAR', 'ELECTION', 'HAD', 'CAUSED', 'THIS', 'FORMER', 'RULER', 'TO', 'DESCEND', 'A', 'STEP', 'OR', 'TWO', 'FROM', 'THE', 'HIGHEST', 'RANK', 'HE', 'STILL', 'HELD', 'AN', 'HONORABLE', 'AND', 'INFLUENTIAL', 'PLACE', 'AMONG', 'THE', 'COLONIAL', 'MAGISTRACY'] +1221-135767-0001-1281: ref=['ANOTHER', 'AND', 'FAR', 'MORE', 'IMPORTANT', 'REASON', 'THAN', 'THE', 'DELIVERY', 'OF', 'A', 'PAIR', 'OF', 'EMBROIDERED', 'GLOVES', 'IMPELLED', 'HESTER', 'AT', 'THIS', 'TIME', 'TO', 'SEEK', 'AN', 'INTERVIEW', 'WITH', 'A', 'PERSONAGE', 'OF', 'SO', 'MUCH', 'POWER', 'AND', 'ACTIVITY', 'IN', 'THE', 'AFFAIRS', 'OF', 'THE', 'SETTLEMENT'] +1221-135767-0001-1281: hyp=['ANOTHER', 'AND', 'FAR', 'MORE', 'IMPORTANT', 'REASON', 'THAN', 'THE', 'DELIVERY', 'OF', 'A', 'PAIR', 'OF', 'EMBROIDERED', 'GLOVES', 'IMPELLED', 'HESTER', 'AT', 'THIS', 'TIME', 'TO', 'SEEK', 'AN', 'INTERVIEW', 'WITH', 'A', 'PERSONAGE', 'OF', 'SO', 'MUCH', 'POWER', 'AND', 'ACTIVITY', 'IN', 'THE', 'AFFAIRS', 'OF', 'THE', 'SETTLEMENT'] +1221-135767-0002-1282: ref=['AT', 'THAT', 'EPOCH', 'OF', 'PRISTINE', 'SIMPLICITY', 'HOWEVER', 'MATTERS', 'OF', 'EVEN', 'SLIGHTER', 'PUBLIC', 'INTEREST', 'AND', 'OF', 'FAR', 'LESS', 'INTRINSIC', 'WEIGHT', 'THAN', 'THE', 'WELFARE', 'OF', 'HESTER', 'AND', 'HER', 'CHILD', 'WERE', 'STRANGELY', 'MIXED', 'UP', 'WITH', 'THE', 'DELIBERATIONS', 'OF', 'LEGISLATORS', 'AND', 'ACTS', 'OF', 'STATE'] +1221-135767-0002-1282: hyp=['AT', 'THAT', 'EPOCH', 'OF', 'PRISTINE', 'SIMPLICITY', 'HOWEVER', 'MATTERS', 'OF', 'EVEN', 'SLIGHTER', 'PUBLIC', 'INTEREST', 'AND', 'OF', 'FAR', 'LESS', 'INTRINSIC', 'WEIGHT', 'THAN', 'THE', 'WELFARE', 'OF', 'HESTER', 'AND', 'HER', 'CHILD', 'WERE', 'STRANGELY', 'MIXED', 'UP', 'WITH', 'THE', 'DELIBERATIONS', 'OF', 'LEGISLATORS', 'AND', 'ACTS', 'OF', 'STATE'] +1221-135767-0003-1283: ref=['THE', 'PERIOD', 'WAS', 'HARDLY', 'IF', 'AT', 'ALL', 'EARLIER', 'THAN', 'THAT', 'OF', 'OUR', 'STORY', 'WHEN', 'A', 'DISPUTE', 'CONCERNING', 'THE', 'RIGHT', 'OF', 'PROPERTY', 'IN', 'A', 'PIG', 'NOT', 'ONLY', 'CAUSED', 'A', 'FIERCE', 'AND', 'BITTER', 'CONTEST', 'IN', 'THE', 'LEGISLATIVE', 'BODY', 'OF', 'THE', 'COLONY', 'BUT', 'RESULTED', 'IN', 'AN', 'IMPORTANT', 'MODIFICATION', 'OF', 'THE', 'FRAMEWORK', 'ITSELF', 'OF', 'THE', 'LEGISLATURE'] +1221-135767-0003-1283: hyp=['THE', 'PERIOD', 'WAS', 'HARDLY', 'IF', 'AT', 'ALL', 'EARLIER', 'THAN', 'THAT', 'OF', 'OUR', 'STORY', 'WHEN', 'A', 'DISPUTE', 'CONCERNING', 'THE', 'RIGHT', 'OF', 'PROPERTY', 'IN', 'A', 'PIG', 'NOT', 'ONLY', 'CAUSED', 'A', 'FIERCE', 'AND', 'BITTER', 'CONTEST', 'IN', 'THE', 'LEGISLATIVE', 'BODY', 'OF', 'THE', 'COLONY', 'BUT', 'RESULTED', 'IN', 'AN', 'IMPORTANT', 'MODIFICATION', 'OF', 'THE', 'FRAMEWORK', 'ITSELF', 'OF', 'THE', 'LEGISLATURE'] +1221-135767-0004-1284: ref=['WE', 'HAVE', 'SPOKEN', 'OF', "PEARL'S", 'RICH', 'AND', 'LUXURIANT', 'BEAUTY', 'A', 'BEAUTY', 'THAT', 'SHONE', 'WITH', 'DEEP', 'AND', 'VIVID', 'TINTS', 'A', 'BRIGHT', 'COMPLEXION', 'EYES', 'POSSESSING', 'INTENSITY', 'BOTH', 'OF', 'DEPTH', 'AND', 'GLOW', 'AND', 'HAIR', 'ALREADY', 'OF', 'A', 'DEEP', 'GLOSSY', 'BROWN', 'AND', 'WHICH', 'IN', 'AFTER', 'YEARS', 'WOULD', 'BE', 'NEARLY', 'AKIN', 'TO', 'BLACK'] +1221-135767-0004-1284: hyp=['WE', 'HAVE', 'SPOKEN', 'OF', 'PEARLS', 'RICH', 'AND', 'LUXURIANT', 'BEAUTY', 'A', 'BEAUTY', 'THAT', 'SHONE', 'WITH', 'DEEP', 'AND', 'VIVID', 'TINTS', 'A', 'BRIGHT', 'COMPLEXION', 'EYES', 'POSSESSING', 'INTENSITY', 'BOTH', 'OF', 'DEPTH', 'AND', 'GLOW', 'AND', 'HAIR', 'ALREADY', 'OF', 'A', 'DEEP', 'GLOSSY', 'BROWN', 'AND', 'WHICH', 'IN', 'AFTER', 'YEARS', 'WOULD', 'BE', 'NEARLY', 'AKIN', 'TO', 'BLACK'] +1221-135767-0005-1285: ref=['IT', 'WAS', 'THE', 'SCARLET', 'LETTER', 'IN', 'ANOTHER', 'FORM', 'THE', 'SCARLET', 'LETTER', 'ENDOWED', 'WITH', 'LIFE'] +1221-135767-0005-1285: hyp=['IT', 'WAS', 'THE', 'SCARLET', 'LETTER', 'IN', 'ANOTHER', 'FORM', 'THE', 'SCARLET', 'LETTER', 'ENDOWED', 'WITH', 'LIFE'] +1221-135767-0006-1286: ref=['THE', 'MOTHER', 'HERSELF', 'AS', 'IF', 'THE', 'RED', 'IGNOMINY', 'WERE', 'SO', 'DEEPLY', 'SCORCHED', 'INTO', 'HER', 'BRAIN', 'THAT', 'ALL', 'HER', 'CONCEPTIONS', 'ASSUMED', 'ITS', 'FORM', 'HAD', 'CAREFULLY', 'WROUGHT', 'OUT', 'THE', 'SIMILITUDE', 'LAVISHING', 'MANY', 'HOURS', 'OF', 'MORBID', 'INGENUITY', 'TO', 'CREATE', 'AN', 'ANALOGY', 'BETWEEN', 'THE', 'OBJECT', 'OF', 'HER', 'AFFECTION', 'AND', 'THE', 'EMBLEM', 'OF', 'HER', 'GUILT', 'AND', 'TORTURE'] +1221-135767-0006-1286: hyp=['THE', 'MOTHER', 'HERSELF', 'AS', 'IF', 'THE', 'RED', 'IGNOMINY', 'WERE', 'SO', 'DEEPLY', 'SCORCHED', 'INTO', 'HER', 'BRAIN', 'THAT', 'ALL', 'HER', 'CONCEPTIONS', 'ASSUMED', 'ITS', 'FORM', 'HAD', 'CAREFULLY', 'WROUGHT', 'OUT', 'THE', 'SIMILITUDE', 'LAVISHING', 'MANY', 'HOURS', 'OF', 'MORBID', 'INGENUITY', 'TO', 'CREATE', 'AN', 'ANALOGY', 'BETWEEN', 'THE', 'OBJECT', 'OF', 'HER', 'AFFECTION', 'AND', 'THE', 'EMBLEM', 'OF', 'HER', 'GUILT', 'AND', 'TORTURE'] +1221-135767-0007-1287: ref=['BUT', 'IN', 'TRUTH', 'PEARL', 'WAS', 'THE', 'ONE', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'AND', 'ONLY', 'IN', 'CONSEQUENCE', 'OF', 'THAT', 'IDENTITY', 'HAD', 'HESTER', 'CONTRIVED', 'SO', 'PERFECTLY', 'TO', 'REPRESENT', 'THE', 'SCARLET', 'LETTER', 'IN', 'HER', 'APPEARANCE'] +1221-135767-0007-1287: hyp=['BUT', 'IN', 'TRUTH', 'PEARL', 'WAS', 'THE', 'ONE', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'AND', 'ONLY', 'IN', 'CONSEQUENCE', 'OF', 'THAT', 'IDENTITY', 'HAD', 'HESTER', 'CONTRIVED', 'SO', 'PERFECTLY', 'TO', 'REPRESENT', 'THE', 'SCARLET', 'LETTER', 'IN', 'HER', 'APPEARANCE'] +1221-135767-0008-1288: ref=['COME', 'THEREFORE', 'AND', 'LET', 'US', 'FLING', 'MUD', 'AT', 'THEM'] +1221-135767-0008-1288: hyp=['COME', 'THEREFORE', 'AND', 'LET', 'US', 'FLING', 'MUD', 'AT', 'THEM'] +1221-135767-0009-1289: ref=['BUT', 'PEARL', 'WHO', 'WAS', 'A', 'DAUNTLESS', 'CHILD', 'AFTER', 'FROWNING', 'STAMPING', 'HER', 'FOOT', 'AND', 'SHAKING', 'HER', 'LITTLE', 'HAND', 'WITH', 'A', 'VARIETY', 'OF', 'THREATENING', 'GESTURES', 'SUDDENLY', 'MADE', 'A', 'RUSH', 'AT', 'THE', 'KNOT', 'OF', 'HER', 'ENEMIES', 'AND', 'PUT', 'THEM', 'ALL', 'TO', 'FLIGHT'] +1221-135767-0009-1289: hyp=['BUT', 'PEARL', 'WHO', 'WAS', 'A', 'DAUNTLESS', 'CHILD', 'AFTER', 'FROWNING', 'STAMPING', 'HER', 'FOOT', 'AND', 'SHAKING', 'HER', 'LITTLE', 'HAND', 'WITH', 'A', 'VARIETY', 'OF', 'THREATENING', 'GESTURES', 'SUDDENLY', 'MADE', 'A', 'RUSH', 'AT', 'THE', 'KNOT', 'OF', 'HER', 'ENEMIES', 'AND', 'PUT', 'THEM', 'ALL', 'TO', 'FLIGHT'] +1221-135767-0010-1290: ref=['SHE', 'SCREAMED', 'AND', 'SHOUTED', 'TOO', 'WITH', 'A', 'TERRIFIC', 'VOLUME', 'OF', 'SOUND', 'WHICH', 'DOUBTLESS', 'CAUSED', 'THE', 'HEARTS', 'OF', 'THE', 'FUGITIVES', 'TO', 'QUAKE', 'WITHIN', 'THEM'] +1221-135767-0010-1290: hyp=['SHE', 'SCREAMED', 'AND', 'SHOUTED', 'TOO', 'WITH', 'A', 'TERRIFIC', 'VOLUME', 'OF', 'SOUND', 'WHICH', 'DOUBTLESS', 'CAUSED', 'THE', 'HEARTS', 'OF', 'THE', 'FUGITIVES', 'TO', 'QUAKE', 'WITHIN', 'THEM'] +1221-135767-0011-1291: ref=['IT', 'WAS', 'FURTHER', 'DECORATED', 'WITH', 'STRANGE', 'AND', 'SEEMINGLY', 'CABALISTIC', 'FIGURES', 'AND', 'DIAGRAMS', 'SUITABLE', 'TO', 'THE', 'QUAINT', 'TASTE', 'OF', 'THE', 'AGE', 'WHICH', 'HAD', 'BEEN', 'DRAWN', 'IN', 'THE', 'STUCCO', 'WHEN', 'NEWLY', 'LAID', 'ON', 'AND', 'HAD', 'NOW', 'GROWN', 'HARD', 'AND', 'DURABLE', 'FOR', 'THE', 'ADMIRATION', 'OF', 'AFTER', 'TIMES'] +1221-135767-0011-1291: hyp=['IT', 'WAS', 'FURTHER', 'DECORATED', 'WITH', 'STRANGE', 'AND', 'SEEMINGLY', 'CABALISTIC', 'FIGURES', 'AND', 'DIAGRAMS', 'SUITABLE', 'TO', 'THE', 'QUAINT', 'TASTE', 'OF', 'THE', 'AGE', 'WHICH', 'HAD', 'BEEN', 'DRAWN', 'IN', 'THE', 'STUCCO', 'WHEN', 'NEWLY', 'LAID', 'ON', 'AND', 'HAD', 'NOW', 'GROWN', 'HARD', 'AND', 'DURABLE', 'FOR', 'THE', 'ADMIRATION', 'OF', 'AFTER', 'TIMES'] +1221-135767-0012-1292: ref=['THEY', 'APPROACHED', 'THE', 'DOOR', 'WHICH', 'WAS', 'OF', 'AN', 'ARCHED', 'FORM', 'AND', 'FLANKED', 'ON', 'EACH', 'SIDE', 'BY', 'A', 'NARROW', 'TOWER', 'OR', 'PROJECTION', 'OF', 'THE', 'EDIFICE', 'IN', 'BOTH', 'OF', 'WHICH', 'WERE', 'LATTICE', 'WINDOWS', 'THE', 'WOODEN', 'SHUTTERS', 'TO', 'CLOSE', 'OVER', 'THEM', 'AT', 'NEED'] +1221-135767-0012-1292: hyp=['THEY', 'APPROACHED', 'THE', 'DOOR', 'WHICH', 'WAS', 'OF', 'AN', 'ARCHED', 'FORM', 'AND', 'FLANKED', 'ON', 'EACH', 'SIDE', 'BY', 'A', 'NARROW', 'TOWER', 'OR', 'PROJECTION', 'OF', 'THE', 'EDIFICE', 'IN', 'BOTH', 'OF', 'WHICH', 'WERE', 'LATTICE', 'WINDOWS', 'THE', 'WOODEN', 'SHUTTERS', 'TO', 'CLOSE', 'OVER', 'THEM', 'AT', 'NEED'] +1221-135767-0013-1293: ref=['LIFTING', 'THE', 'IRON', 'HAMMER', 'THAT', 'HUNG', 'AT', 'THE', 'PORTAL', 'HESTER', 'PRYNNE', 'GAVE', 'A', 'SUMMONS', 'WHICH', 'WAS', 'ANSWERED', 'BY', 'ONE', 'OF', 'THE', "GOVERNOR'S", 'BOND', 'SERVANT', 'A', 'FREE', 'BORN', 'ENGLISHMAN', 'BUT', 'NOW', 'A', 'SEVEN', 'YEARS', 'SLAVE'] +1221-135767-0013-1293: hyp=['LIFTING', 'THE', 'IRON', 'HAMMER', 'THAT', 'HUNG', 'AT', 'THE', 'PORTAL', 'HESTER', 'PRYNNE', 'GAVE', 'A', 'SUMMONS', 'WHICH', 'WAS', 'ANSWERED', 'BY', 'ONE', 'OF', 'THE', "GOVERNOR'S", 'BOND', 'SERVANTS', 'A', 'FREE', 'BORN', 'ENGLISHMAN', 'BUT', 'NOW', 'A', 'SEVEN', 'YEARS', 'SLAVE'] +1221-135767-0014-1294: ref=['YEA', 'HIS', 'HONOURABLE', 'WORSHIP', 'IS', 'WITHIN', 'BUT', 'HE', 'HATH', 'A', 'GODLY', 'MINISTER', 'OR', 'TWO', 'WITH', 'HIM', 'AND', 'LIKEWISE', 'A', 'LEECH'] +1221-135767-0014-1294: hyp=['YEA', 'HIS', 'HONOURABLE', 'WORSHIP', 'IS', 'WITHIN', 'BUT', 'HE', 'HATH', 'A', 'GODLY', 'MINISTER', 'OR', 'TWO', 'WITH', 'HIM', 'AND', 'LIKEWISE', 'A', 'LEECH'] +1221-135767-0015-1295: ref=['YE', 'MAY', 'NOT', 'SEE', 'HIS', 'WORSHIP', 'NOW'] +1221-135767-0015-1295: hyp=['YE', 'MAY', 'NOT', 'SEE', 'HIS', 'WORSHIP', 'NOW'] +1221-135767-0016-1296: ref=['WITH', 'MANY', 'VARIATIONS', 'SUGGESTED', 'BY', 'THE', 'NATURE', 'OF', 'HIS', 'BUILDING', 'MATERIALS', 'DIVERSITY', 'OF', 'CLIMATE', 'AND', 'A', 'DIFFERENT', 'MODE', 'OF', 'SOCIAL', 'LIFE', 'GOVERNOR', 'BELLINGHAM', 'HAD', 'PLANNED', 'HIS', 'NEW', 'HABITATION', 'AFTER', 'THE', 'RESIDENCES', 'OF', 'GENTLEMEN', 'OF', 'FAIR', 'ESTATE', 'IN', 'HIS', 'NATIVE', 'LAND'] +1221-135767-0016-1296: hyp=['WITH', 'MANY', 'VARIATIONS', 'SUGGESTED', 'BY', 'THE', 'NATURE', 'OF', 'HIS', 'BUILDING', 'MATERIALS', 'DIVERSITY', 'OF', 'CLIMATE', 'AND', 'A', 'DIFFERENT', 'MODE', 'OF', 'SOCIAL', 'LIFE', 'GOVERNOR', 'BELLINGHAM', 'HAD', 'PLANNED', 'HIS', 'NEW', 'HABITATION', 'AFTER', 'THE', 'RESIDENCES', 'OF', 'GENTLEMEN', 'OF', 'FAIREST', 'STATE', 'IN', 'HIS', 'NATIVE', 'LAND'] +1221-135767-0017-1297: ref=['ON', 'THE', 'TABLE', 'IN', 'TOKEN', 'THAT', 'THE', 'SENTIMENT', 'OF', 'OLD', 'ENGLISH', 'HOSPITALITY', 'HAD', 'NOT', 'BEEN', 'LEFT', 'BEHIND', 'STOOD', 'A', 'LARGE', 'PEWTER', 'TANKARD', 'AT', 'THE', 'BOTTOM', 'OF', 'WHICH', 'HAD', 'HESTER', 'OR', 'PEARL', 'PEEPED', 'INTO', 'IT', 'THEY', 'MIGHT', 'HAVE', 'SEEN', 'THE', 'FROTHY', 'REMNANT', 'OF', 'A', 'RECENT', 'DRAUGHT', 'OF', 'ALE'] +1221-135767-0017-1297: hyp=['ON', 'THE', 'TABLE', 'IN', 'TOKEN', 'THAT', 'THE', 'SENTIMENT', 'OF', 'OLD', 'ENGLISH', 'HOSPITALITY', 'HAD', 'NOT', 'BEEN', 'LEFT', 'BEHIND', 'STOOD', 'A', 'LARGE', 'PEWTER', 'TANKARD', 'AT', 'THE', 'BOTTOM', 'OF', 'WHICH', 'HAD', 'HESTER', 'OR', 'PEARL', 'PEEPED', 'INTO', 'IT', 'THEY', 'MIGHT', 'HAVE', 'SEEN', 'THE', 'FROTHY', 'REMNANT', 'OF', 'A', 'RECENT', 'DRAUGHT', 'OF', 'ALE'] +1221-135767-0018-1298: ref=['LITTLE', 'PEARL', 'WHO', 'WAS', 'AS', 'GREATLY', 'PLEASED', 'WITH', 'THE', 'GLEAMING', 'ARMOUR', 'AS', 'SHE', 'HAD', 'BEEN', 'WITH', 'THE', 'GLITTERING', 'FRONTISPIECE', 'OF', 'THE', 'HOUSE', 'SPENT', 'SOME', 'TIME', 'LOOKING', 'INTO', 'THE', 'POLISHED', 'MIRROR', 'OF', 'THE', 'BREASTPLATE'] +1221-135767-0018-1298: hyp=['LITTLE', 'PEARL', 'WHO', 'WAS', 'AS', 'GREATLY', 'PLEASED', 'WITH', 'THE', 'GLEAMING', 'ARMOR', 'AS', 'SHE', 'HAD', 'BEEN', 'WITH', 'THE', 'GLITTERING', 'FRONTESPIECE', 'OF', 'THE', 'HOUSE', 'SPENT', 'SOME', 'TIME', 'LOOKING', 'INTO', 'THE', 'POLISHED', 'MIRROR', 'OF', 'THE', 'BREASTPLATE'] +1221-135767-0019-1299: ref=['MOTHER', 'CRIED', 'SHE', 'I', 'SEE', 'YOU', 'HERE', 'LOOK', 'LOOK'] +1221-135767-0019-1299: hyp=['MOTHER', 'CRIED', 'SHE', 'I', 'SEE', 'YOU', 'HERE', 'LOOK', 'LOOK'] +1221-135767-0020-1300: ref=['IN', 'TRUTH', 'SHE', 'SEEMED', 'ABSOLUTELY', 'HIDDEN', 'BEHIND', 'IT'] +1221-135767-0020-1300: hyp=['IN', 'TRUTH', 'SHE', 'SEEMED', 'ABSOLUTELY', 'HIDDEN', 'BEHIND', 'IT'] +1221-135767-0021-1301: ref=['PEARL', 'ACCORDINGLY', 'RAN', 'TO', 'THE', 'BOW', 'WINDOW', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'HALL', 'AND', 'LOOKED', 'ALONG', 'THE', 'VISTA', 'OF', 'A', 'GARDEN', 'WALK', 'CARPETED', 'WITH', 'CLOSELY', 'SHAVEN', 'GRASS', 'AND', 'BORDERED', 'WITH', 'SOME', 'RUDE', 'AND', 'IMMATURE', 'ATTEMPT', 'AT', 'SHRUBBERY'] +1221-135767-0021-1301: hyp=['PEARL', 'ACCORDINGLY', 'RAN', 'TO', 'THE', 'BOW', 'WINDOW', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'HALL', 'AND', 'LOOKED', 'ALONG', 'THE', 'VISTA', 'OF', 'A', 'GARDEN', 'WALK', 'CARPETED', 'WITH', 'CLOSELY', 'SHAVEN', 'GRASS', 'AND', 'BORDERED', 'WITH', 'SOME', 'RUDE', 'AND', 'IMMITOR', 'ATTEMPT', 'AT', 'SHRUBBERY'] +1221-135767-0022-1302: ref=['BUT', 'THE', 'PROPRIETOR', 'APPEARED', 'ALREADY', 'TO', 'HAVE', 'RELINQUISHED', 'AS', 'HOPELESS', 'THE', 'EFFORT', 'TO', 'PERPETUATE', 'ON', 'THIS', 'SIDE', 'OF', 'THE', 'ATLANTIC', 'IN', 'A', 'HARD', 'SOIL', 'AND', 'AMID', 'THE', 'CLOSE', 'STRUGGLE', 'FOR', 'SUBSISTENCE', 'THE', 'NATIVE', 'ENGLISH', 'TASTE', 'FOR', 'ORNAMENTAL', 'GARDENING'] +1221-135767-0022-1302: hyp=['BUT', 'THE', 'PROPRIETOR', 'APPEARED', 'ALREADY', 'TO', 'HAVE', 'RELINQUISHED', 'AS', 'HOPELESS', 'THE', 'EFFORT', 'TO', 'PERPETUATE', 'ON', 'THIS', 'SIDE', 'OF', 'THE', 'ATLANTIC', 'IN', 'A', 'HARD', 'SOIL', 'AND', 'AMID', 'THE', 'CLOSE', 'STRUGGLE', 'FOR', 'SUBSISTENCE', 'THE', 'NATIVE', 'ENGLISH', 'TASTE', 'FOR', 'ORNAMENTAL', 'GARDENING'] +1221-135767-0023-1303: ref=['THERE', 'WERE', 'A', 'FEW', 'ROSE', 'BUSHES', 'HOWEVER', 'AND', 'A', 'NUMBER', 'OF', 'APPLE', 'TREES', 'PROBABLY', 'THE', 'DESCENDANTS', 'OF', 'THOSE', 'PLANTED', 'BY', 'THE', 'REVEREND', 'MISTER', 'BLACKSTONE', 'THE', 'FIRST', 'SETTLER', 'OF', 'THE', 'PENINSULA', 'THAT', 'HALF', 'MYTHOLOGICAL', 'PERSONAGE', 'WHO', 'RIDES', 'THROUGH', 'OUR', 'EARLY', 'ANNALS', 'SEATED', 'ON', 'THE', 'BACK', 'OF', 'A', 'BULL'] +1221-135767-0023-1303: hyp=['THERE', 'WERE', 'A', 'FEW', 'ROSE', 'BUSHES', 'HOWEVER', 'AND', 'A', 'NUMBER', 'OF', 'APPLE', 'TREES', 'PROBABLY', 'THE', 'DESCENDANTS', 'OF', 'THOSE', 'PLANTED', 'BY', 'THE', 'REVEREND', 'MISTER', 'BLACKSTONE', 'THE', 'FIRST', 'SETTLER', 'OF', 'THE', 'PENINSULA', 'THAT', 'HALF', 'MYTHOLOGICAL', 'PERSONAGE', 'WHO', 'RIDES', 'THROUGH', 'OUR', 'EARLY', 'ANNALS', 'SEATED', 'ON', 'THE', 'BACK', 'OF', 'A', 'BULL'] +1221-135767-0024-1304: ref=['PEARL', 'SEEING', 'THE', 'ROSE', 'BUSHES', 'BEGAN', 'TO', 'CRY', 'FOR', 'A', 'RED', 'ROSE', 'AND', 'WOULD', 'NOT', 'BE', 'PACIFIED'] +1221-135767-0024-1304: hyp=['PEARL', 'SEEING', 'THE', 'ROSE', 'BUSHES', 'BEGAN', 'TO', 'CRY', 'FOR', 'A', 'RED', 'ROSE', 'AND', 'WOULD', 'NOT', 'BE', 'PACIFIED'] +1284-1180-0000-829: ref=['HE', 'WORE', 'BLUE', 'SILK', 'STOCKINGS', 'BLUE', 'KNEE', 'PANTS', 'WITH', 'GOLD', 'BUCKLES', 'A', 'BLUE', 'RUFFLED', 'WAIST', 'AND', 'A', 'JACKET', 'OF', 'BRIGHT', 'BLUE', 'BRAIDED', 'WITH', 'GOLD'] +1284-1180-0000-829: hyp=['HE', 'WORE', 'BLUE', 'SILK', 'STOCKINGS', 'BLUE', 'KNEEP', 'HANDS', 'WITH', 'GOLD', 'BUCKLES', 'A', 'BLUE', 'RUFFLED', 'WAIST', 'AND', 'A', 'JACKET', 'OF', 'BRIGHT', 'BLUE', 'BRAIDED', 'WITH', 'GOLD'] +1284-1180-0001-830: ref=['HIS', 'HAT', 'HAD', 'A', 'PEAKED', 'CROWN', 'AND', 'A', 'FLAT', 'BRIM', 'AND', 'AROUND', 'THE', 'BRIM', 'WAS', 'A', 'ROW', 'OF', 'TINY', 'GOLDEN', 'BELLS', 'THAT', 'TINKLED', 'WHEN', 'HE', 'MOVED'] +1284-1180-0001-830: hyp=['HIS', 'HAT', 'HAD', 'A', 'PEAKED', 'CROWN', 'IN', 'A', 'FLAT', 'BRIM', 'AND', 'AROUND', 'THE', 'BRIM', 'WAS', 'A', 'ROW', 'OF', 'TINY', 'GOLDEN', 'BELLS', 'THAT', 'TINKLED', 'WHEN', 'HE', 'MOVED'] +1284-1180-0002-831: ref=['INSTEAD', 'OF', 'SHOES', 'THE', 'OLD', 'MAN', 'WORE', 'BOOTS', 'WITH', 'TURNOVER', 'TOPS', 'AND', 'HIS', 'BLUE', 'COAT', 'HAD', 'WIDE', 'CUFFS', 'OF', 'GOLD', 'BRAID'] +1284-1180-0002-831: hyp=['INSTEAD', 'OF', 'SHOES', 'THE', 'OLD', 'MEN', 'WORE', 'BOOTS', 'WITH', 'TURN', 'OVER', 'TOPS', 'AND', 'HIS', 'BLUE', 'COAT', 'HAD', 'WIDE', 'CUFFS', 'OF', 'GOLD', 'BRAID'] +1284-1180-0003-832: ref=['FOR', 'A', 'LONG', 'TIME', 'HE', 'HAD', 'WISHED', 'TO', 'EXPLORE', 'THE', 'BEAUTIFUL', 'LAND', 'OF', 'OZ', 'IN', 'WHICH', 'THEY', 'LIVED'] +1284-1180-0003-832: hyp=['FOR', 'A', 'LONG', 'TIME', 'HE', 'HAD', 'WISHED', 'TO', 'EXPLORE', 'THE', 'BEAUTIFUL', 'LAND', 'OF', 'OZ', 'IN', 'WHICH', 'THEY', 'LIVED'] +1284-1180-0004-833: ref=['WHEN', 'THEY', 'WERE', 'OUTSIDE', 'UNC', 'SIMPLY', 'LATCHED', 'THE', 'DOOR', 'AND', 'STARTED', 'UP', 'THE', 'PATH'] +1284-1180-0004-833: hyp=['WHEN', 'THEY', 'WERE', 'OUTSIDE', 'UN', 'SIMPLY', 'LATCHED', 'THE', 'DOOR', 'AND', 'STARTED', 'UP', 'THE', 'PATH'] +1284-1180-0005-834: ref=['NO', 'ONE', 'WOULD', 'DISTURB', 'THEIR', 'LITTLE', 'HOUSE', 'EVEN', 'IF', 'ANYONE', 'CAME', 'SO', 'FAR', 'INTO', 'THE', 'THICK', 'FOREST', 'WHILE', 'THEY', 'WERE', 'GONE'] +1284-1180-0005-834: hyp=['NO', 'ONE', 'WOULD', 'DISTURB', 'THEIR', 'LITTLE', 'HOUSE', 'EVEN', 'IF', 'ANY', 'ONE', 'CAME', 'SO', 'FAR', 'INTO', 'THE', 'THICK', 'FOREST', 'WHILE', 'THEY', 'WERE', 'GONE'] +1284-1180-0006-835: ref=['AT', 'THE', 'FOOT', 'OF', 'THE', 'MOUNTAIN', 'THAT', 'SEPARATED', 'THE', 'COUNTRY', 'OF', 'THE', 'MUNCHKINS', 'FROM', 'THE', 'COUNTRY', 'OF', 'THE', 'GILLIKINS', 'THE', 'PATH', 'DIVIDED'] +1284-1180-0006-835: hyp=['AT', 'THE', 'FOOT', 'OF', 'THE', 'MOUNTAIN', 'THAT', 'SEPARATED', 'THE', 'COUNTRY', 'OF', 'THE', 'MUNCHKINS', 'FROM', 'THE', 'COUNTRY', 'OF', 'THE', 'GYLICANS', 'THE', 'PATH', 'DIVIDED'] +1284-1180-0007-836: ref=['HE', 'KNEW', 'IT', 'WOULD', 'TAKE', 'THEM', 'TO', 'THE', 'HOUSE', 'OF', 'THE', 'CROOKED', 'MAGICIAN', 'WHOM', 'HE', 'HAD', 'NEVER', 'SEEN', 'BUT', 'WHO', 'WAS', 'THEIR', 'NEAREST', 'NEIGHBOR'] +1284-1180-0007-836: hyp=['HE', 'KNEW', 'IT', 'WOULD', 'TAKE', 'THEM', 'TO', 'THE', 'HOUSE', 'OF', 'THE', 'CROOKED', 'MAGICIAN', 'WHOM', 'HE', 'HAD', 'NEVER', 'SEEN', 'BUT', 'WHO', 'WAS', 'THERE', 'NEAREST', 'NEIGHBOUR'] +1284-1180-0008-837: ref=['ALL', 'THE', 'MORNING', 'THEY', 'TRUDGED', 'UP', 'THE', 'MOUNTAIN', 'PATH', 'AND', 'AT', 'NOON', 'UNC', 'AND', 'OJO', 'SAT', 'ON', 'A', 'FALLEN', 'TREE', 'TRUNK', 'AND', 'ATE', 'THE', 'LAST', 'OF', 'THE', 'BREAD', 'WHICH', 'THE', 'OLD', 'MUNCHKIN', 'HAD', 'PLACED', 'IN', 'HIS', 'POCKET'] +1284-1180-0008-837: hyp=['ALL', 'THE', 'MORNING', 'THEY', 'TRUDGED', 'UP', 'THE', 'MOUNTAIN', 'PATH', 'AND', 'AT', 'NOON', 'UNCAN', 'OJO', 'SAT', 'ON', 'A', 'FALLEN', 'TREE', 'TRUNK', 'AND', 'ATE', 'THE', 'LAST', 'OF', 'THE', 'BREAD', 'WHICH', 'THE', 'OLD', 'MUNCHKIN', 'HAD', 'PLACED', 'IN', 'HIS', 'POCKET'] +1284-1180-0009-838: ref=['THEN', 'THEY', 'STARTED', 'ON', 'AGAIN', 'AND', 'TWO', 'HOURS', 'LATER', 'CAME', 'IN', 'SIGHT', 'OF', 'THE', 'HOUSE', 'OF', 'DOCTOR', 'PIPT'] +1284-1180-0009-838: hyp=['THEN', 'THEY', 'STARTED', 'ON', 'AGAIN', 'AND', 'TWO', 'HOURS', 'LATER', 'CAME', 'IN', 'SIGHT', 'OF', 'THE', 'HOUSE', 'OF', 'DOCTOR', 'PIPT'] +1284-1180-0010-839: ref=['UNC', 'KNOCKED', 'AT', 'THE', 'DOOR', 'OF', 'THE', 'HOUSE', 'AND', 'A', 'CHUBBY', 'PLEASANT', 'FACED', 'WOMAN', 'DRESSED', 'ALL', 'IN', 'BLUE', 'OPENED', 'IT', 'AND', 'GREETED', 'THE', 'VISITORS', 'WITH', 'A', 'SMILE'] +1284-1180-0010-839: hyp=['UNCONOCTED', 'THE', 'DOOR', 'OF', 'THE', 'HOUSE', 'INTO', 'CHUBBY', 'PLEASANT', 'FACED', 'WOMAN', 'DRESSED', 'ALL', 'IN', 'BLUE', 'OPENED', 'IT', 'AND', 'GREETED', 'THE', 'VISITORS', 'WITH', 'A', 'SMILE'] +1284-1180-0011-840: ref=['I', 'AM', 'MY', 'DEAR', 'AND', 'ALL', 'STRANGERS', 'ARE', 'WELCOME', 'TO', 'MY', 'HOME'] +1284-1180-0011-840: hyp=['I', 'AM', 'MY', 'DEAR', 'AND', 'ALL', 'STRANGERS', 'ARE', 'WELCOME', 'TO', 'MY', 'HOME'] +1284-1180-0012-841: ref=['WE', 'HAVE', 'COME', 'FROM', 'A', 'FAR', 'LONELIER', 'PLACE', 'THAN', 'THIS', 'A', 'LONELIER', 'PLACE'] +1284-1180-0012-841: hyp=['WE', 'HAVE', 'COME', 'FROM', 'AFAR', 'LONELIER', 'PLACE', 'THAN', 'THIS', 'A', 'LONELIER', 'PLACE'] +1284-1180-0013-842: ref=['AND', 'YOU', 'MUST', 'BE', 'OJO', 'THE', 'UNLUCKY', 'SHE', 'ADDED'] +1284-1180-0013-842: hyp=['AND', 'YOU', 'MUST', 'BE', 'OJO', 'THE', 'UNLUCKY', 'SHE', 'ADDED'] +1284-1180-0014-843: ref=['OJO', 'HAD', 'NEVER', 'EATEN', 'SUCH', 'A', 'FINE', 'MEAL', 'IN', 'ALL', 'HIS', 'LIFE'] +1284-1180-0014-843: hyp=['OJO', 'HAD', 'NEVER', 'EATEN', 'SUCH', 'A', 'FINE', 'MEAL', 'IN', 'ALL', 'HIS', 'LIFE'] +1284-1180-0015-844: ref=['WE', 'ARE', 'TRAVELING', 'REPLIED', 'OJO', 'AND', 'WE', 'STOPPED', 'AT', 'YOUR', 'HOUSE', 'JUST', 'TO', 'REST', 'AND', 'REFRESH', 'OURSELVES'] +1284-1180-0015-844: hyp=['WE', 'ARE', 'TRAVELLING', 'REPLIED', 'OJO', 'AND', 'WE', 'STOPPED', 'AT', 'YOUR', 'HOUSE', 'JUST', 'A', 'REST', 'AND', 'REFRESH', 'OURSELVES'] +1284-1180-0016-845: ref=['THE', 'WOMAN', 'SEEMED', 'THOUGHTFUL'] +1284-1180-0016-845: hyp=['THE', 'WOMAN', 'SEEMED', 'THOUGHTFUL'] +1284-1180-0017-846: ref=['AT', 'ONE', 'END', 'STOOD', 'A', 'GREAT', 'FIREPLACE', 'IN', 'WHICH', 'A', 'BLUE', 'LOG', 'WAS', 'BLAZING', 'WITH', 'A', 'BLUE', 'FLAME', 'AND', 'OVER', 'THE', 'FIRE', 'HUNG', 'FOUR', 'KETTLES', 'IN', 'A', 'ROW', 'ALL', 'BUBBLING', 'AND', 'STEAMING', 'AT', 'A', 'GREAT', 'RATE'] +1284-1180-0017-846: hyp=['AT', 'ONE', 'END', 'STOOD', 'A', 'GREAT', 'FIREPLACE', 'IN', 'WHICH', 'A', 'BLUE', 'LOG', 'WAS', 'BLAZING', 'WITH', 'A', 'BLUE', 'FLAME', 'AND', 'OVER', 'THE', 'FIRE', 'HUNG', 'FOUR', 'KETTLES', 'IN', 'A', 'ROW', 'ALL', 'BUBBLING', 'AND', 'STEAMING', 'AT', 'A', 'GREAT', 'RATE'] +1284-1180-0018-847: ref=['IT', 'TAKES', 'ME', 'SEVERAL', 'YEARS', 'TO', 'MAKE', 'THIS', 'MAGIC', 'POWDER', 'BUT', 'AT', 'THIS', 'MOMENT', 'I', 'AM', 'PLEASED', 'TO', 'SAY', 'IT', 'IS', 'NEARLY', 'DONE', 'YOU', 'SEE', 'I', 'AM', 'MAKING', 'IT', 'FOR', 'MY', 'GOOD', 'WIFE', 'MARGOLOTTE', 'WHO', 'WANTS', 'TO', 'USE', 'SOME', 'OF', 'IT', 'FOR', 'A', 'PURPOSE', 'OF', 'HER', 'OWN'] +1284-1180-0018-847: hyp=['IT', 'TAKES', 'ME', 'SEVERAL', 'YEARS', 'TO', 'MAKE', 'THIS', 'MAGIC', 'POWDER', 'BUT', 'AT', 'THIS', 'MOMENT', 'I', 'AM', 'PLEASED', 'TO', 'SAY', 'IT', 'IS', 'NEARLY', 'DONE', 'YOU', 'SEE', 'I', 'AM', 'MAKING', 'IT', 'FOR', 'MY', 'GOOD', 'WIFE', 'MARGOLOTTE', 'WHO', 'WANTS', 'TO', 'USE', 'SOME', 'OF', 'IT', 'FOR', 'A', 'PURPOSE', 'OF', 'HER', 'OWN'] +1284-1180-0019-848: ref=['YOU', 'MUST', 'KNOW', 'SAID', 'MARGOLOTTE', 'WHEN', 'THEY', 'WERE', 'ALL', 'SEATED', 'TOGETHER', 'ON', 'THE', 'BROAD', 'WINDOW', 'SEAT', 'THAT', 'MY', 'HUSBAND', 'FOOLISHLY', 'GAVE', 'AWAY', 'ALL', 'THE', 'POWDER', 'OF', 'LIFE', 'HE', 'FIRST', 'MADE', 'TO', 'OLD', 'MOMBI', 'THE', 'WITCH', 'WHO', 'USED', 'TO', 'LIVE', 'IN', 'THE', 'COUNTRY', 'OF', 'THE', 'GILLIKINS', 'TO', 'THE', 'NORTH', 'OF', 'HERE'] +1284-1180-0019-848: hyp=['YOU', 'MUST', 'KNOW', 'SAID', 'MARGOLOTTE', 'WHEN', 'THEY', 'WERE', 'ALL', 'SEATED', 'TOGETHER', 'ON', 'THE', 'BROAD', 'WINDOW', 'SEAT', 'THAT', 'MY', 'HUSBAND', 'FOOLISHLY', 'GAVE', 'AWAY', 'ALL', 'THE', 'POWDER', 'OF', 'LIFE', 'HE', 'FIRST', 'MADE', 'TO', 'OLD', 'MOMBY', 'THE', 'WITCH', 'WHO', 'USED', 'TO', 'LIVE', 'IN', 'THE', 'COUNTRY', 'OF', 'THE', 'GILLICKINS', 'TO', 'THE', 'NORTH', 'OF', 'HERE'] +1284-1180-0020-849: ref=['THE', 'FIRST', 'LOT', 'WE', 'TESTED', 'ON', 'OUR', 'GLASS', 'CAT', 'WHICH', 'NOT', 'ONLY', 'BEGAN', 'TO', 'LIVE', 'BUT', 'HAS', 'LIVED', 'EVER', 'SINCE'] +1284-1180-0020-849: hyp=['THE', 'FIRST', 'LOT', 'WE', 'TESTED', 'ON', 'OUR', 'GLASS', 'CAT', 'WHICH', 'NOT', 'ONLY', 'BEGAN', 'TO', 'LIVE', 'BUT', 'HAS', 'LIVED', 'EVER', 'SINCE'] +1284-1180-0021-850: ref=['I', 'THINK', 'THE', 'NEXT', 'GLASS', 'CAT', 'THE', 'MAGICIAN', 'MAKES', 'WILL', 'HAVE', 'NEITHER', 'BRAINS', 'NOR', 'HEART', 'FOR', 'THEN', 'IT', 'WILL', 'NOT', 'OBJECT', 'TO', 'CATCHING', 'MICE', 'AND', 'MAY', 'PROVE', 'OF', 'SOME', 'USE', 'TO', 'US'] +1284-1180-0021-850: hyp=['I', 'THINK', 'THE', 'NEXT', 'GLASS', 'CAT', 'THE', 'MAGICIAN', 'MAKES', 'WILL', 'HAVE', 'NEITHER', 'BRAINS', 'NOR', 'HEART', 'FOR', 'THEN', 'IT', 'WILL', 'NOT', 'OBJECT', 'TO', 'CATCHING', 'MICE', 'AND', 'THEY', 'PROVE', 'OF', 'SOME', 'USE', 'TO', 'US'] +1284-1180-0022-851: ref=["I'M", 'AFRAID', 'I', "DON'T", 'KNOW', 'MUCH', 'ABOUT', 'THE', 'LAND', 'OF', 'OZ'] +1284-1180-0022-851: hyp=['I', 'AM', 'AFRAID', 'I', "DON'T", 'KNOW', 'MUCH', 'ABOUT', 'THE', 'LAND', 'OF', 'OZ'] +1284-1180-0023-852: ref=['YOU', 'SEE', "I'VE", 'LIVED', 'ALL', 'MY', 'LIFE', 'WITH', 'UNC', 'NUNKIE', 'THE', 'SILENT', 'ONE', 'AND', 'THERE', 'WAS', 'NO', 'ONE', 'TO', 'TELL', 'ME', 'ANYTHING'] +1284-1180-0023-852: hyp=['YOU', 'SEE', 'I', 'HAVE', 'LIVED', 'ALL', 'MY', 'LIFE', 'WITH', 'UNC', 'NUNKIE', 'THE', 'SILENT', 'ONE', 'AND', 'THERE', 'WAS', 'NO', 'ONE', 'TO', 'TELL', 'ME', 'ANYTHING'] +1284-1180-0024-853: ref=['THAT', 'IS', 'ONE', 'REASON', 'YOU', 'ARE', 'OJO', 'THE', 'UNLUCKY', 'SAID', 'THE', 'WOMAN', 'IN', 'A', 'SYMPATHETIC', 'TONE'] +1284-1180-0024-853: hyp=['THAT', 'IS', 'ONE', 'REASON', 'YOU', 'ARE', 'OJO', 'THE', 'UNLUCKY', 'SAID', 'THE', 'WOMAN', 'IN', 'SYMPATHETIC', 'TONE'] +1284-1180-0025-854: ref=['I', 'THINK', 'I', 'MUST', 'SHOW', 'YOU', 'MY', 'PATCHWORK', 'GIRL', 'SAID', 'MARGOLOTTE', 'LAUGHING', 'AT', 'THE', "BOY'S", 'ASTONISHMENT', 'FOR', 'SHE', 'IS', 'RATHER', 'DIFFICULT', 'TO', 'EXPLAIN'] +1284-1180-0025-854: hyp=['I', 'THINK', 'I', 'MUST', 'SHOW', 'YOU', 'MY', 'PATCHWORK', 'GIRL', 'SAID', 'MARGOLOTTE', 'LAUGHING', 'AT', 'THE', "BOY'S", 'ASTONISHMENT', 'FOR', 'SHE', 'IS', 'RATHER', 'DIFFICULT', 'TO', 'EXPLAIN'] +1284-1180-0026-855: ref=['BUT', 'FIRST', 'I', 'WILL', 'TELL', 'YOU', 'THAT', 'FOR', 'MANY', 'YEARS', 'I', 'HAVE', 'LONGED', 'FOR', 'A', 'SERVANT', 'TO', 'HELP', 'ME', 'WITH', 'THE', 'HOUSEWORK', 'AND', 'TO', 'COOK', 'THE', 'MEALS', 'AND', 'WASH', 'THE', 'DISHES'] +1284-1180-0026-855: hyp=['BUT', 'FIRST', 'I', 'WILL', 'TELL', 'YOU', 'THAT', 'FROM', 'MANY', 'YEARS', 'I', 'HAVE', 'LONGED', 'FOR', 'A', 'SERVANT', 'TO', 'HELP', 'ME', 'WITH', 'THE', 'HOUSEWORK', 'AND', 'TO', 'COPE', 'THE', 'MEALS', 'AND', 'WASH', 'THE', 'DISHES'] +1284-1180-0027-856: ref=['YET', 'THAT', 'TASK', 'WAS', 'NOT', 'SO', 'EASY', 'AS', 'YOU', 'MAY', 'SUPPOSE'] +1284-1180-0027-856: hyp=['YET', 'THAT', 'TASK', 'WAS', 'NOT', 'SO', 'EASY', 'AS', 'YOU', 'MAY', 'SUPPOSE'] +1284-1180-0028-857: ref=['A', 'BED', 'QUILT', 'MADE', 'OF', 'PATCHES', 'OF', 'DIFFERENT', 'KINDS', 'AND', 'COLORS', 'OF', 'CLOTH', 'ALL', 'NEATLY', 'SEWED', 'TOGETHER'] +1284-1180-0028-857: hyp=['A', 'BED', 'QUILT', 'MADE', 'OF', 'PATCHES', 'OF', 'DIFFERENT', 'KINDS', 'AND', 'COLLARS', 'OF', 'CLOTH', 'ALL', 'NEATLY', 'SEWED', 'TOGETHER'] +1284-1180-0029-858: ref=['SOMETIMES', 'IT', 'IS', 'CALLED', 'A', 'CRAZY', 'QUILT', 'BECAUSE', 'THE', 'PATCHES', 'AND', 'COLORS', 'ARE', 'SO', 'MIXED', 'UP'] +1284-1180-0029-858: hyp=['SOMETIMES', 'IT', 'IS', 'CALLED', 'A', 'CRAZY', 'QUILT', 'BECAUSE', 'THE', 'PATCHES', 'AND', 'COLORS', 'ARE', 'SO', 'MIXED', 'UP'] +1284-1180-0030-859: ref=['WHEN', 'I', 'FOUND', 'IT', 'I', 'SAID', 'TO', 'MYSELF', 'THAT', 'IT', 'WOULD', 'DO', 'NICELY', 'FOR', 'MY', 'SERVANT', 'GIRL', 'FOR', 'WHEN', 'SHE', 'WAS', 'BROUGHT', 'TO', 'LIFE', 'SHE', 'WOULD', 'NOT', 'BE', 'PROUD', 'NOR', 'HAUGHTY', 'AS', 'THE', 'GLASS', 'CAT', 'IS', 'FOR', 'SUCH', 'A', 'DREADFUL', 'MIXTURE', 'OF', 'COLORS', 'WOULD', 'DISCOURAGE', 'HER', 'FROM', 'TRYING', 'TO', 'BE', 'AS', 'DIGNIFIED', 'AS', 'THE', 'BLUE', 'MUNCHKINS', 'ARE'] +1284-1180-0030-859: hyp=['WHEN', 'I', 'FOUND', 'IT', 'I', 'SAID', 'TO', 'MYSELF', 'THAT', 'IT', 'WOULD', 'DO', 'NICELY', 'FOR', 'MY', 'SERVANT', 'GIRL', 'FOR', 'WHEN', 'SHE', 'WAS', 'BROUGHT', 'TO', 'LIFE', 'SHE', 'WOULD', 'NOT', 'BE', 'PROUD', 'NOR', 'HAUGHTY', 'AS', 'THE', 'GLASS', 'CAT', 'IS', 'FOR', 'SUCH', 'A', 'DREADFUL', 'MIXTURE', 'OF', 'COLOURS', 'WOULD', 'DISCOURAGE', 'HER', 'FROM', 'TRYING', 'TO', 'BE', 'AS', 'DIGNIFIED', 'AS', 'THE', 'BLUE', 'MUNCHKINS', 'ARE'] +1284-1180-0031-860: ref=['AT', 'THE', 'EMERALD', 'CITY', 'WHERE', 'OUR', 'PRINCESS', 'OZMA', 'LIVES', 'GREEN', 'IS', 'THE', 'POPULAR', 'COLOR'] +1284-1180-0031-860: hyp=['AT', 'THE', 'EMERALD', 'CITY', 'WHERE', 'OUR', 'PRINCESS', 'OSMO', 'LIVES', 'GREEN', 'IS', 'THE', 'POPULAR', 'COLOR'] +1284-1180-0032-861: ref=['I', 'WILL', 'SHOW', 'YOU', 'WHAT', 'A', 'GOOD', 'JOB', 'I', 'DID', 'AND', 'SHE', 'WENT', 'TO', 'A', 'TALL', 'CUPBOARD', 'AND', 'THREW', 'OPEN', 'THE', 'DOORS'] +1284-1180-0032-861: hyp=['I', 'WILL', 'SHOW', 'YOU', 'WHAT', 'A', 'GOOD', 'JOB', 'I', 'DID', 'AND', 'SHE', 'WENT', 'TO', 'A', 'TALL', 'CUPBOARD', 'AND', 'THREW', 'OPEN', 'THE', 'DOORS'] +1284-1181-0000-807: ref=['OJO', 'EXAMINED', 'THIS', 'CURIOUS', 'CONTRIVANCE', 'WITH', 'WONDER'] +1284-1181-0000-807: hyp=['OJO', 'EXAMINED', 'THIS', 'CURIOUS', 'CONTRIVANCE', 'WITH', 'WONDER'] +1284-1181-0001-808: ref=['MARGOLOTTE', 'HAD', 'FIRST', 'MADE', 'THE', "GIRL'S", 'FORM', 'FROM', 'THE', 'PATCHWORK', 'QUILT', 'AND', 'THEN', 'SHE', 'HAD', 'DRESSED', 'IT', 'WITH', 'A', 'PATCHWORK', 'SKIRT', 'AND', 'AN', 'APRON', 'WITH', 'POCKETS', 'IN', 'IT', 'USING', 'THE', 'SAME', 'GAY', 'MATERIAL', 'THROUGHOUT'] +1284-1181-0001-808: hyp=['MARGOLOT', 'HAD', 'FIRST', 'MADE', 'THE', "GIRL'S", 'FORM', 'FROM', 'THE', 'PATCHWORK', 'QUILT', 'AND', 'THEN', 'SHE', 'HAD', 'DRESSED', 'IT', 'WITH', 'A', 'PATCHWORK', 'SKIRT', 'AND', 'AN', 'APRON', 'WITH', 'POCKETS', 'IN', 'IT', 'USING', 'THE', 'SAME', 'GAME', 'MATERIAL', 'THROUGHOUT'] +1284-1181-0002-809: ref=['THE', 'HEAD', 'OF', 'THE', 'PATCHWORK', 'GIRL', 'WAS', 'THE', 'MOST', 'CURIOUS', 'PART', 'OF', 'HER'] +1284-1181-0002-809: hyp=['THE', 'HEAD', 'OF', 'THE', 'PATCHWORK', 'GIRL', 'WAS', 'THE', 'MOST', 'CURIOUS', 'PART', 'OF', 'HER'] +1284-1181-0003-810: ref=['THE', 'HAIR', 'WAS', 'OF', 'BROWN', 'YARN', 'AND', 'HUNG', 'DOWN', 'ON', 'HER', 'NECK', 'IN', 'SEVERAL', 'NEAT', 'BRAIDS'] +1284-1181-0003-810: hyp=['THE', 'HAIR', 'WAS', 'OF', 'BROWN', 'YARN', 'AND', 'HUNG', 'DOWN', 'ON', 'HER', 'NECK', 'AND', 'SEVERAL', 'NEAT', 'BRAIDS'] +1284-1181-0004-811: ref=['GOLD', 'IS', 'THE', 'MOST', 'COMMON', 'METAL', 'IN', 'THE', 'LAND', 'OF', 'OZ', 'AND', 'IS', 'USED', 'FOR', 'MANY', 'PURPOSES', 'BECAUSE', 'IT', 'IS', 'SOFT', 'AND', 'PLIABLE'] +1284-1181-0004-811: hyp=['GOLD', 'IS', 'THE', 'MOST', 'COMMON', 'MEDAL', 'IN', 'THE', 'LAND', 'OF', 'OZ', 'AND', 'IS', 'USED', 'FOR', 'MANY', 'PURPOSES', 'BECAUSE', 'IT', 'IS', 'SOFT', 'AND', 'PLIABLE'] +1284-1181-0005-812: ref=['NO', 'I', 'FORGOT', 'ALL', 'ABOUT', 'THE', 'BRAINS', 'EXCLAIMED', 'THE', 'WOMAN'] +1284-1181-0005-812: hyp=['NO', 'I', 'FORGOT', 'ALL', 'ABOUT', 'THE', 'BRAINS', 'EXCLAIMED', 'THE', 'WOMAN'] +1284-1181-0006-813: ref=['WELL', 'THAT', 'MAY', 'BE', 'TRUE', 'AGREED', 'MARGOLOTTE', 'BUT', 'ON', 'THE', 'CONTRARY', 'A', 'SERVANT', 'WITH', 'TOO', 'MUCH', 'BRAINS', 'IS', 'SURE', 'TO', 'BECOME', 'INDEPENDENT', 'AND', 'HIGH', 'AND', 'MIGHTY', 'AND', 'FEEL', 'ABOVE', 'HER', 'WORK'] +1284-1181-0006-813: hyp=['WELL', 'THAT', 'MAY', 'BE', 'TRUE', 'AGREED', 'MARGOLOTTE', 'BUT', 'ON', 'THE', 'CONTRARY', 'A', 'SERVANT', 'WITH', 'TOO', 'MUCH', 'BRAINS', 'IS', 'SURE', 'TO', 'BECOME', 'INDEPENDENT', 'AND', 'HIGH', 'AND', 'MIGHTY', 'AND', 'FEEL', 'ABOVE', 'HER', 'WORK'] +1284-1181-0007-814: ref=['SHE', 'POURED', 'INTO', 'THE', 'DISH', 'A', 'QUANTITY', 'FROM', 'EACH', 'OF', 'THESE', 'BOTTLES'] +1284-1181-0007-814: hyp=['SHE', 'POURED', 'INTO', 'THE', 'DISH', 'A', 'QUANTITY', 'FROM', 'EACH', 'OF', 'THESE', 'BOTTLES'] +1284-1181-0008-815: ref=['I', 'THINK', 'THAT', 'WILL', 'DO', 'SHE', 'CONTINUED', 'FOR', 'THE', 'OTHER', 'QUALITIES', 'ARE', 'NOT', 'NEEDED', 'IN', 'A', 'SERVANT'] +1284-1181-0008-815: hyp=['I', 'THINK', 'THAT', 'WILL', 'DO', 'SHE', 'CONTINUED', 'FOR', 'THE', 'OTHER', 'QUALITIES', 'ARE', 'NOT', 'NEEDED', 'IN', 'A', 'SERVANT'] +1284-1181-0009-816: ref=['SHE', 'RAN', 'TO', 'HER', "HUSBAND'S", 'SIDE', 'AT', 'ONCE', 'AND', 'HELPED', 'HIM', 'LIFT', 'THE', 'FOUR', 'KETTLES', 'FROM', 'THE', 'FIRE'] +1284-1181-0009-816: hyp=['SHE', 'RAN', 'TO', 'HER', "HUSBAND'S", 'SIDE', 'AT', 'ONCE', 'AND', 'HELPED', 'HIM', 'LIFT', 'THE', 'FOUR', 'KETTLES', 'FROM', 'THE', 'FIRE'] +1284-1181-0010-817: ref=['THEIR', 'CONTENTS', 'HAD', 'ALL', 'BOILED', 'AWAY', 'LEAVING', 'IN', 'THE', 'BOTTOM', 'OF', 'EACH', 'KETTLE', 'A', 'FEW', 'GRAINS', 'OF', 'FINE', 'WHITE', 'POWDER'] +1284-1181-0010-817: hyp=['THEIR', 'CONTENTS', 'HAD', 'ALL', 'BOILED', 'AWAY', 'LEAVING', 'IN', 'THE', 'BOTTOM', 'OF', 'EACH', 'KETTLE', 'A', 'FEW', 'GRAINS', 'OF', 'FINE', 'WHITE', 'POWDER'] +1284-1181-0011-818: ref=['VERY', 'CAREFULLY', 'THE', 'MAGICIAN', 'REMOVED', 'THIS', 'POWDER', 'PLACING', 'IT', 'ALL', 'TOGETHER', 'IN', 'A', 'GOLDEN', 'DISH', 'WHERE', 'HE', 'MIXED', 'IT', 'WITH', 'A', 'GOLDEN', 'SPOON'] +1284-1181-0011-818: hyp=['VERY', 'CAREFULLY', 'THE', 'MAGICIAN', 'REMOVED', 'THIS', 'POWDER', 'PLACING', 'IT', 'ALTOGETHER', 'IN', 'A', 'GOLDEN', 'DISH', 'WHERE', 'HE', 'MIXED', 'IT', 'WITH', 'A', 'GOLDEN', 'SPOON'] +1284-1181-0012-819: ref=['NO', 'ONE', 'SAW', 'HIM', 'DO', 'THIS', 'FOR', 'ALL', 'WERE', 'LOOKING', 'AT', 'THE', 'POWDER', 'OF', 'LIFE', 'BUT', 'SOON', 'THE', 'WOMAN', 'REMEMBERED', 'WHAT', 'SHE', 'HAD', 'BEEN', 'DOING', 'AND', 'CAME', 'BACK', 'TO', 'THE', 'CUPBOARD'] +1284-1181-0012-819: hyp=['NO', 'ONE', 'SAW', 'HIM', 'DO', 'THIS', 'FOR', 'ALL', 'WERE', 'LOOKING', 'AT', 'THE', 'POWDER', 'OF', 'LIFE', 'BUT', 'SOON', 'THE', 'WOMAN', 'REMEMBERED', 'WHAT', 'SHE', 'HAD', 'BEEN', 'DOING', 'AND', 'CAME', 'BACK', 'TO', 'THE', 'CUPBOARD'] +1284-1181-0013-820: ref=['OJO', 'BECAME', 'A', 'BIT', 'UNEASY', 'AT', 'THIS', 'FOR', 'HE', 'HAD', 'ALREADY', 'PUT', 'QUITE', 'A', 'LOT', 'OF', 'THE', 'CLEVERNESS', 'POWDER', 'IN', 'THE', 'DISH', 'BUT', 'HE', 'DARED', 'NOT', 'INTERFERE', 'AND', 'SO', 'HE', 'COMFORTED', 'HIMSELF', 'WITH', 'THE', 'THOUGHT', 'THAT', 'ONE', 'CANNOT', 'HAVE', 'TOO', 'MUCH', 'CLEVERNESS'] +1284-1181-0013-820: hyp=['OJO', 'BECAME', 'A', 'BIT', 'UNEASY', 'AT', 'THIS', 'FOR', 'HE', 'HAD', 'ALREADY', 'PUT', 'QUITE', 'A', 'LOT', 'OF', 'THE', 'CLEVERNESS', 'POWDER', 'IN', 'THE', 'DISH', 'BUT', 'HE', 'DARED', 'NOT', 'INTERFERE', 'AND', 'SO', 'HE', 'COMFORTED', 'HIMSELF', 'WITH', 'THE', 'THOUGHT', 'THAT', 'ONE', 'CANNOT', 'HAVE', 'TOO', 'MUCH', 'CLEVERNESS'] +1284-1181-0014-821: ref=['HE', 'SELECTED', 'A', 'SMALL', 'GOLD', 'BOTTLE', 'WITH', 'A', 'PEPPER', 'BOX', 'TOP', 'SO', 'THAT', 'THE', 'POWDER', 'MIGHT', 'BE', 'SPRINKLED', 'ON', 'ANY', 'OBJECT', 'THROUGH', 'THE', 'SMALL', 'HOLES'] +1284-1181-0014-821: hyp=['HE', 'SELECTED', 'A', 'SMALL', 'GOLD', 'BOTTLE', 'WITH', 'A', 'PEPPER', 'BOX', 'TOP', 'SO', 'THAT', 'THE', 'POWDER', 'MIGHT', 'BE', 'SPRINKLED', 'ON', 'ANY', 'OBJECT', 'THROUGH', 'THE', 'SMALL', 'HOLES'] +1284-1181-0015-822: ref=['MOST', 'PEOPLE', 'TALK', 'TOO', 'MUCH', 'SO', 'IT', 'IS', 'A', 'RELIEF', 'TO', 'FIND', 'ONE', 'WHO', 'TALKS', 'TOO', 'LITTLE'] +1284-1181-0015-822: hyp=['MOST', 'PEOPLE', 'TALK', 'TOO', 'MUCH', 'SO', 'IT', 'IS', 'A', 'RELIEF', 'TO', 'FIND', 'ONE', 'WHO', 'TALKS', 'TOO', 'LITTLE'] +1284-1181-0016-823: ref=['I', 'AM', 'NOT', 'ALLOWED', 'TO', 'PERFORM', 'MAGIC', 'EXCEPT', 'FOR', 'MY', 'OWN', 'AMUSEMENT', 'HE', 'TOLD', 'HIS', 'VISITORS', 'AS', 'HE', 'LIGHTED', 'A', 'PIPE', 'WITH', 'A', 'CROOKED', 'STEM', 'AND', 'BEGAN', 'TO', 'SMOKE'] +1284-1181-0016-823: hyp=['I', 'AM', 'NOT', 'ALLOWED', 'TO', 'PERFORM', 'MAGIC', 'EXCEPT', 'FOR', 'MY', 'OWN', 'AMUSEMENT', 'HE', 'TOLD', 'HIS', 'VISITORS', 'AS', 'HE', 'LIGHTED', 'A', 'PIPE', 'WITH', 'A', 'CROOKED', 'STEM', 'AND', 'BEGAN', 'TO', 'SMOKE'] +1284-1181-0017-824: ref=['THE', 'WIZARD', 'OF', 'OZ', 'WHO', 'USED', 'TO', 'BE', 'A', 'HUMBUG', 'AND', 'KNEW', 'NO', 'MAGIC', 'AT', 'ALL', 'HAS', 'BEEN', 'TAKING', 'LESSONS', 'OF', 'GLINDA', 'AND', "I'M", 'TOLD', 'HE', 'IS', 'GETTING', 'TO', 'BE', 'A', 'PRETTY', 'GOOD', 'WIZARD', 'BUT', 'HE', 'IS', 'MERELY', 'THE', 'ASSISTANT', 'OF', 'THE', 'GREAT', 'SORCERESS'] +1284-1181-0017-824: hyp=['THE', 'WIZARD', 'OF', 'OZ', 'WHO', 'USED', 'TO', 'BE', 'A', 'HUMBUG', 'AND', 'KNEW', 'NO', 'MAGIC', 'AT', 'ALL', 'HAS', 'BEEN', 'TAKING', 'LESSONS', 'OF', 'GLINDA', 'AND', "I'M", 'TOLD', 'HE', 'IS', 'GETTING', 'TO', 'BE', 'A', 'PRETTY', 'GOOD', 'WIZARD', 'BUT', 'HE', 'IS', 'MERELY', 'THE', 'ASSISTANT', 'OF', 'THE', 'GREAT', 'SORCERESS'] +1284-1181-0018-825: ref=['IT', 'TRULY', 'IS', 'ASSERTED', 'THE', 'MAGICIAN'] +1284-1181-0018-825: hyp=['IT', 'TRULY', 'IS', 'ASSERTED', 'THE', 'MAGICIAN'] +1284-1181-0019-826: ref=['I', 'NOW', 'USE', 'THEM', 'AS', 'ORNAMENTAL', 'STATUARY', 'IN', 'MY', 'GARDEN'] +1284-1181-0019-826: hyp=['I', 'NOW', 'USE', 'THEM', 'AS', 'ORNAMENTAL', 'STATUARY', 'IN', 'MY', 'GARDEN'] +1284-1181-0020-827: ref=['DEAR', 'ME', 'WHAT', 'A', 'CHATTERBOX', "YOU'RE", 'GETTING', 'TO', 'BE', 'UNC', 'REMARKED', 'THE', 'MAGICIAN', 'WHO', 'WAS', 'PLEASED', 'WITH', 'THE', 'COMPLIMENT'] +1284-1181-0020-827: hyp=['DEAR', 'ME', 'WHAT', 'A', 'CHATTER', 'BOX', 'ARE', 'GETTING', 'TO', 'BE', 'YUNK', 'REMARKED', 'THE', 'MAGICIAN', 'WHO', 'WAS', 'PLEASED', 'WITH', 'THE', 'COMPLIMENT'] +1284-1181-0021-828: ref=['ASKED', 'THE', 'VOICE', 'IN', 'SCORNFUL', 'ACCENTS'] +1284-1181-0021-828: hyp=['ASKED', 'THE', 'VOICE', 'IN', 'SCORNFUL', 'ACCENTS'] +1284-134647-0000-862: ref=['THE', 'GRATEFUL', 'APPLAUSE', 'OF', 'THE', 'CLERGY', 'HAS', 'CONSECRATED', 'THE', 'MEMORY', 'OF', 'A', 'PRINCE', 'WHO', 'INDULGED', 'THEIR', 'PASSIONS', 'AND', 'PROMOTED', 'THEIR', 'INTEREST'] +1284-134647-0000-862: hyp=['THE', 'GRATEFUL', 'APPLAUSE', 'OF', 'THE', 'CLERGY', 'HAS', 'CONSECRATED', 'THE', 'MEMORY', 'OF', 'A', 'PRINCE', 'WHO', 'INDULGED', 'THEIR', 'PASSIONS', 'AND', 'PROMOTED', 'THEIR', 'INTEREST'] +1284-134647-0001-863: ref=['THE', 'EDICT', 'OF', 'MILAN', 'THE', 'GREAT', 'CHARTER', 'OF', 'TOLERATION', 'HAD', 'CONFIRMED', 'TO', 'EACH', 'INDIVIDUAL', 'OF', 'THE', 'ROMAN', 'WORLD', 'THE', 'PRIVILEGE', 'OF', 'CHOOSING', 'AND', 'PROFESSING', 'HIS', 'OWN', 'RELIGION'] +1284-134647-0001-863: hyp=['THE', 'EDICT', 'OF', 'MILAN', 'THE', 'GREAT', 'CHARTER', 'OF', 'TOLERATION', 'HAD', 'CONFIRMED', 'TO', 'EACH', 'INDIVIDUAL', 'OF', 'THE', 'ROMAN', 'WORLD', 'THE', 'PRIVILEGE', 'OF', 'CHOOSING', 'AND', 'PROFESSING', 'HIS', 'OWN', 'RELIGION'] +1284-134647-0002-864: ref=['BUT', 'THIS', 'INESTIMABLE', 'PRIVILEGE', 'WAS', 'SOON', 'VIOLATED', 'WITH', 'THE', 'KNOWLEDGE', 'OF', 'TRUTH', 'THE', 'EMPEROR', 'IMBIBED', 'THE', 'MAXIMS', 'OF', 'PERSECUTION', 'AND', 'THE', 'SECTS', 'WHICH', 'DISSENTED', 'FROM', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'AFFLICTED', 'AND', 'OPPRESSED', 'BY', 'THE', 'TRIUMPH', 'OF', 'CHRISTIANITY'] +1284-134647-0002-864: hyp=['BUT', 'THIS', 'INESTIMABLE', 'PRIVILEGE', 'WAS', 'SOON', 'VIOLATED', 'WITH', 'THE', 'KNOWLEDGE', 'OF', 'TRUTH', 'THE', 'EMPEROR', 'IBED', 'THE', 'MAXIMS', 'OF', 'PERSECUTION', 'AND', 'THE', 'SECTS', 'WHICH', 'DISSENTED', 'FROM', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'AFFLICTED', 'AND', 'OPPRESSED', 'BY', 'THE', 'TRIUMPH', 'OF', 'CHRISTIANITY'] +1284-134647-0003-865: ref=['CONSTANTINE', 'EASILY', 'BELIEVED', 'THAT', 'THE', 'HERETICS', 'WHO', 'PRESUMED', 'TO', 'DISPUTE', 'HIS', 'OPINIONS', 'OR', 'TO', 'OPPOSE', 'HIS', 'COMMANDS', 'WERE', 'GUILTY', 'OF', 'THE', 'MOST', 'ABSURD', 'AND', 'CRIMINAL', 'OBSTINACY', 'AND', 'THAT', 'A', 'SEASONABLE', 'APPLICATION', 'OF', 'MODERATE', 'SEVERITIES', 'MIGHT', 'SAVE', 'THOSE', 'UNHAPPY', 'MEN', 'FROM', 'THE', 'DANGER', 'OF', 'AN', 'EVERLASTING', 'CONDEMNATION'] +1284-134647-0003-865: hyp=['CONSTANTINE', 'EASILY', 'BELIEVED', 'THAT', 'THE', 'HERETICS', 'WHO', 'PRESUMED', 'TO', 'DISPUTE', 'HIS', 'OPINIONS', 'OR', 'TO', 'OPPOSE', 'HIS', 'COMMANDS', 'WERE', 'GUILTY', 'OF', 'THE', 'MOST', 'ABSURD', 'AND', 'CRIMINAL', 'OBSTINACY', 'AND', 'THAT', 'A', 'SEASONABLE', 'APPLICATION', 'OF', 'MODERATE', 'SEVERITIES', 'MIGHT', 'SAVE', 'THOSE', 'UNHAPPY', 'MEN', 'FROM', 'THE', 'DANGER', 'OF', 'AN', 'EVERLASTING', 'CONDEMNATION'] +1284-134647-0004-866: ref=['SOME', 'OF', 'THE', 'PENAL', 'REGULATIONS', 'WERE', 'COPIED', 'FROM', 'THE', 'EDICTS', 'OF', 'DIOCLETIAN', 'AND', 'THIS', 'METHOD', 'OF', 'CONVERSION', 'WAS', 'APPLAUDED', 'BY', 'THE', 'SAME', 'BISHOPS', 'WHO', 'HAD', 'FELT', 'THE', 'HAND', 'OF', 'OPPRESSION', 'AND', 'PLEADED', 'FOR', 'THE', 'RIGHTS', 'OF', 'HUMANITY'] +1284-134647-0004-866: hyp=['SOME', 'OF', 'THE', 'PENAL', 'REGULATIONS', 'WERE', 'COPIED', 'FROM', 'THE', 'EDICTS', 'OF', 'DIOCLETIAN', 'AND', 'THIS', 'METHOD', 'OF', 'CONVERSION', 'WAS', 'APPLAUDED', 'BY', 'THE', 'SAME', 'BISHOPS', 'WHO', 'HAD', 'FELLED', 'THE', 'HAND', 'OF', 'OPPRESSION', 'AND', 'PLEADED', 'FOR', 'THE', 'RIGHTS', 'OF', 'HUMANITY'] +1284-134647-0005-867: ref=['THEY', 'ASSERTED', 'WITH', 'CONFIDENCE', 'AND', 'ALMOST', 'WITH', 'EXULTATION', 'THAT', 'THE', 'APOSTOLICAL', 'SUCCESSION', 'WAS', 'INTERRUPTED', 'THAT', 'ALL', 'THE', 'BISHOPS', 'OF', 'EUROPE', 'AND', 'ASIA', 'WERE', 'INFECTED', 'BY', 'THE', 'CONTAGION', 'OF', 'GUILT', 'AND', 'SCHISM', 'AND', 'THAT', 'THE', 'PREROGATIVES', 'OF', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'CONFINED', 'TO', 'THE', 'CHOSEN', 'PORTION', 'OF', 'THE', 'AFRICAN', 'BELIEVERS', 'WHO', 'ALONE', 'HAD', 'PRESERVED', 'INVIOLATE', 'THE', 'INTEGRITY', 'OF', 'THEIR', 'FAITH', 'AND', 'DISCIPLINE'] +1284-134647-0005-867: hyp=['THEY', 'ASSERTED', 'WITH', 'CONFIDENCE', 'AND', 'ALMOST', 'WITH', 'EXULTATION', 'THAT', 'THE', 'APOSTOLICAL', 'SUCCESSION', 'WAS', 'INTERRUPTED', 'THAT', 'ALL', 'THE', 'BISHOPS', 'OF', 'EUROPE', 'AND', 'ASIA', 'WERE', 'INFECTED', 'BY', 'THE', 'CONTAGION', 'OF', 'GUILT', 'AND', 'SCHISM', 'AND', 'THAT', 'THE', 'PREROGATIVES', 'OF', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'CONFINED', 'TO', 'THE', 'CHOSEN', 'PORTION', 'OF', 'THE', 'AFRICAN', 'BELIEVERS', 'WHO', 'ALONE', 'HAD', 'PRESERVED', 'INVIOLATE', 'THE', 'INTEGRITY', 'OF', 'THEIR', 'FAITH', 'AND', 'DISCIPLINE'] +1284-134647-0006-868: ref=['BISHOPS', 'VIRGINS', 'AND', 'EVEN', 'SPOTLESS', 'INFANTS', 'WERE', 'SUBJECTED', 'TO', 'THE', 'DISGRACE', 'OF', 'A', 'PUBLIC', 'PENANCE', 'BEFORE', 'THEY', 'COULD', 'BE', 'ADMITTED', 'TO', 'THE', 'COMMUNION', 'OF', 'THE', 'DONATISTS'] +1284-134647-0006-868: hyp=['BISHOPS', 'VIRGINS', 'AND', 'EVEN', 'SPOTLESS', 'INFANTS', 'WERE', 'SUBJECTED', 'TO', 'THE', 'DISGRACE', 'OF', 'A', 'PUBLIC', 'PENANCE', 'BEFORE', 'THEY', 'COULD', 'BE', 'ADMITTED', 'TO', 'THE', 'COMMUNION', 'OF', 'THE', 'DONATISTS'] +1284-134647-0007-869: ref=['PROSCRIBED', 'BY', 'THE', 'CIVIL', 'AND', 'ECCLESIASTICAL', 'POWERS', 'OF', 'THE', 'EMPIRE', 'THE', 'DONATISTS', 'STILL', 'MAINTAINED', 'IN', 'SOME', 'PROVINCES', 'PARTICULARLY', 'IN', 'NUMIDIA', 'THEIR', 'SUPERIOR', 'NUMBERS', 'AND', 'FOUR', 'HUNDRED', 'BISHOPS', 'ACKNOWLEDGED', 'THE', 'JURISDICTION', 'OF', 'THEIR', 'PRIMATE'] +1284-134647-0007-869: hyp=['PRESCRIBED', 'BY', 'THE', 'CIVIL', 'AND', 'ECCLESIASTICAL', 'POWERS', 'OF', 'THE', 'EMPIRE', 'THE', 'DONATIST', 'STILL', 'MAINTAINED', 'IN', 'SOME', 'PROVINCES', 'PARTICULARLY', 'IN', 'MEDIA', 'THEIR', 'SUPERIOR', 'NUMBERS', 'AND', 'FOUR', 'HUNDRED', 'BISHOPS', 'ACKNOWLEDGED', 'THE', 'JURISDICTION', 'OF', 'THEIR', 'PRIMATE'] +1320-122612-0000-120: ref=['SINCE', 'THE', 'PERIOD', 'OF', 'OUR', 'TALE', 'THE', 'ACTIVE', 'SPIRIT', 'OF', 'THE', 'COUNTRY', 'HAS', 'SURROUNDED', 'IT', 'WITH', 'A', 'BELT', 'OF', 'RICH', 'AND', 'THRIVING', 'SETTLEMENTS', 'THOUGH', 'NONE', 'BUT', 'THE', 'HUNTER', 'OR', 'THE', 'SAVAGE', 'IS', 'EVER', 'KNOWN', 'EVEN', 'NOW', 'TO', 'PENETRATE', 'ITS', 'WILD', 'RECESSES'] +1320-122612-0000-120: hyp=['SINCE', 'THE', 'PERIOD', 'OF', 'OUR', 'TALE', 'THE', 'ACTIVE', 'SPIRIT', 'OF', 'THE', 'COUNTRY', 'HAS', 'SURROUNDED', 'IT', 'WITH', 'A', 'BELT', 'OF', 'RICH', 'AND', 'THRIVING', 'SETTLEMENTS', 'THOUGH', 'NONE', 'BUT', 'THE', 'HUNTER', 'OR', 'THE', 'SAVAGE', 'IS', 'EVER', 'KNOWN', 'EVEN', 'NOW', 'TO', 'PENETRATE', 'ITS', 'WILD', 'RECESSES'] +1320-122612-0001-121: ref=['THE', 'DEWS', 'WERE', 'SUFFERED', 'TO', 'EXHALE', 'AND', 'THE', 'SUN', 'HAD', 'DISPERSED', 'THE', 'MISTS', 'AND', 'WAS', 'SHEDDING', 'A', 'STRONG', 'AND', 'CLEAR', 'LIGHT', 'IN', 'THE', 'FOREST', 'WHEN', 'THE', 'TRAVELERS', 'RESUMED', 'THEIR', 'JOURNEY'] +1320-122612-0001-121: hyp=['THE', 'DEWS', 'WERE', 'SUFFERED', 'TO', 'EXHALE', 'AND', 'THE', 'SUN', 'HAD', 'DISPERSED', 'THE', 'MISTS', 'AND', 'WAS', 'SHEDDING', 'A', 'STRONG', 'AND', 'CLEAR', 'LIGHT', 'IN', 'THE', 'FOREST', 'WHEN', 'THE', 'TRAVELLERS', 'RESUMED', 'THEIR', 'JOURNEY'] +1320-122612-0002-122: ref=['AFTER', 'PROCEEDING', 'A', 'FEW', 'MILES', 'THE', 'PROGRESS', 'OF', 'HAWKEYE', 'WHO', 'LED', 'THE', 'ADVANCE', 'BECAME', 'MORE', 'DELIBERATE', 'AND', 'WATCHFUL'] +1320-122612-0002-122: hyp=['AFTER', 'PROCEEDING', 'A', 'FEW', 'MILES', 'THE', 'PROGRESS', 'OF', 'HAWKEYE', 'WHO', 'LED', 'THE', 'ADVANCE', 'BECAME', 'MORE', 'DELIBERATE', 'AND', 'WATCHFUL'] +1320-122612-0003-123: ref=['HE', 'OFTEN', 'STOPPED', 'TO', 'EXAMINE', 'THE', 'TREES', 'NOR', 'DID', 'HE', 'CROSS', 'A', 'RIVULET', 'WITHOUT', 'ATTENTIVELY', 'CONSIDERING', 'THE', 'QUANTITY', 'THE', 'VELOCITY', 'AND', 'THE', 'COLOR', 'OF', 'ITS', 'WATERS'] +1320-122612-0003-123: hyp=['HE', 'OFTEN', 'STOPPED', 'TO', 'EXAMINE', 'THE', 'TREES', 'NOR', 'DID', 'HE', 'CROSS', 'A', 'RIVULET', 'WITHOUT', 'ATTENTIVELY', 'CONSIDERING', 'THE', 'QUANTITY', 'THE', 'VELOCITY', 'AND', 'THE', 'COLOUR', 'OF', 'ITS', 'WATERS'] +1320-122612-0004-124: ref=['DISTRUSTING', 'HIS', 'OWN', 'JUDGMENT', 'HIS', 'APPEALS', 'TO', 'THE', 'OPINION', 'OF', 'CHINGACHGOOK', 'WERE', 'FREQUENT', 'AND', 'EARNEST'] +1320-122612-0004-124: hyp=['DISTRUSTING', 'HIS', 'OWN', 'JUDGMENT', 'HIS', 'APPEALS', 'TO', 'THE', 'OPINION', 'OF', 'CHINGACHGOOK', 'WERE', 'FREQUENT', 'AND', 'EARNEST'] +1320-122612-0005-125: ref=['YET', 'HERE', 'ARE', 'WE', 'WITHIN', 'A', 'SHORT', 'RANGE', 'OF', 'THE', 'SCAROONS', 'AND', 'NOT', 'A', 'SIGN', 'OF', 'A', 'TRAIL', 'HAVE', 'WE', 'CROSSED'] +1320-122612-0005-125: hyp=['YET', 'HERE', 'ARE', 'WE', 'WITH', 'AN', 'A', 'SHORT', 'RANGE', 'OF', 'THE', 'SCARONS', 'AND', 'NOT', 'A', 'SIGN', 'OF', 'A', 'TRAIL', 'HAVE', 'WE', 'CROSSED'] +1320-122612-0006-126: ref=['LET', 'US', 'RETRACE', 'OUR', 'STEPS', 'AND', 'EXAMINE', 'AS', 'WE', 'GO', 'WITH', 'KEENER', 'EYES'] +1320-122612-0006-126: hyp=['LET', 'US', 'RETRACE', 'OUR', 'STEPS', 'AND', 'EXAMINE', 'AS', 'WE', 'GO', 'WITH', 'KEENER', 'EYES'] +1320-122612-0007-127: ref=['CHINGACHGOOK', 'HAD', 'CAUGHT', 'THE', 'LOOK', 'AND', 'MOTIONING', 'WITH', 'HIS', 'HAND', 'HE', 'BADE', 'HIM', 'SPEAK'] +1320-122612-0007-127: hyp=['INGACHGOOK', 'HAD', 'CAUGHT', 'THE', 'LOOK', 'AND', 'MOTIONING', 'WITH', 'HIS', 'HAND', 'HE', 'BADE', 'HIM', 'SPEAK'] +1320-122612-0008-128: ref=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'FOLLOWED', 'THE', 'UNEXPECTED', 'MOVEMENT', 'AND', 'READ', 'THEIR', 'SUCCESS', 'IN', 'THE', 'AIR', 'OF', 'TRIUMPH', 'THAT', 'THE', 'YOUTH', 'ASSUMED'] +1320-122612-0008-128: hyp=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'FOLLOWED', 'THE', 'UNEXPECTED', 'MOVEMENT', 'AND', 'READ', 'THEIR', 'SUCCESS', 'IN', 'THE', 'AIR', 'OF', 'TRIUMPH', 'THAT', 'THE', 'YOUTH', 'ASSUMED'] +1320-122612-0009-129: ref=['IT', 'WOULD', 'HAVE', 'BEEN', 'MORE', 'WONDERFUL', 'HAD', 'HE', 'SPOKEN', 'WITHOUT', 'A', 'BIDDING'] +1320-122612-0009-129: hyp=['IT', 'WOULD', 'HAVE', 'BEEN', 'MORE', 'WONDERFUL', 'HAD', 'HE', 'SPOKEN', 'WITHOUT', 'A', 'BIDDING'] +1320-122612-0010-130: ref=['SEE', 'SAID', 'UNCAS', 'POINTING', 'NORTH', 'AND', 'SOUTH', 'AT', 'THE', 'EVIDENT', 'MARKS', 'OF', 'THE', 'BROAD', 'TRAIL', 'ON', 'EITHER', 'SIDE', 'OF', 'HIM', 'THE', 'DARK', 'HAIR', 'HAS', 'GONE', 'TOWARD', 'THE', 'FOREST'] +1320-122612-0010-130: hyp=['SEE', 'SAID', 'UNCAS', 'POINTING', 'NORTH', 'AND', 'SOUTH', 'AT', 'THE', 'EVIDENT', 'MARKS', 'OF', 'THE', 'BROAD', 'TRAIL', 'ON', 'EITHER', 'SIDE', 'OF', 'HIM', 'THE', 'DARK', 'HAIR', 'HAS', 'GONE', 'TOWARD', 'THE', 'FOREST'] +1320-122612-0011-131: ref=['IF', 'A', 'ROCK', 'OR', 'A', 'RIVULET', 'OR', 'A', 'BIT', 'OF', 'EARTH', 'HARDER', 'THAN', 'COMMON', 'SEVERED', 'THE', 'LINKS', 'OF', 'THE', 'CLEW', 'THEY', 'FOLLOWED', 'THE', 'TRUE', 'EYE', 'OF', 'THE', 'SCOUT', 'RECOVERED', 'THEM', 'AT', 'A', 'DISTANCE', 'AND', 'SELDOM', 'RENDERED', 'THE', 'DELAY', 'OF', 'A', 'SINGLE', 'MOMENT', 'NECESSARY'] +1320-122612-0011-131: hyp=['IF', 'A', 'ROCK', 'OR', 'A', 'RIVULET', 'OR', 'A', 'BIT', 'OF', 'EARTH', 'HARDER', 'THAN', 'COMMON', 'SEVERED', 'THE', 'LINKS', 'OF', 'THE', 'CLUE', 'THEY', 'FOLLOWED', 'THE', 'TRUE', 'EYE', 'OF', 'THE', 'SCOUT', 'RECOVERED', 'THEM', 'AT', 'A', 'DISTANCE', 'AND', 'SELDOM', 'RENDERED', 'THE', 'DELAY', 'OF', 'A', 'SINGLE', 'MOMENT', 'NECESSARY'] +1320-122612-0012-132: ref=['EXTINGUISHED', 'BRANDS', 'WERE', 'LYING', 'AROUND', 'A', 'SPRING', 'THE', 'OFFALS', 'OF', 'A', 'DEER', 'WERE', 'SCATTERED', 'ABOUT', 'THE', 'PLACE', 'AND', 'THE', 'TREES', 'BORE', 'EVIDENT', 'MARKS', 'OF', 'HAVING', 'BEEN', 'BROWSED', 'BY', 'THE', 'HORSES'] +1320-122612-0012-132: hyp=['EXTINGUISHED', 'BRANDS', 'WERE', 'LYING', 'AROUND', 'A', 'SPRING', 'THE', 'OFFALS', 'OF', 'A', 'DEER', 'WERE', 'SCATTERED', 'ABOUT', 'THE', 'PLACE', 'AND', 'THE', 'TREES', 'BORE', 'EVIDENT', 'MARKS', 'OF', 'HAVING', 'BEEN', 'BROWSED', 'BY', 'THE', 'HORSES'] +1320-122612-0013-133: ref=['A', 'CIRCLE', 'OF', 'A', 'FEW', 'HUNDRED', 'FEET', 'IN', 'CIRCUMFERENCE', 'WAS', 'DRAWN', 'AND', 'EACH', 'OF', 'THE', 'PARTY', 'TOOK', 'A', 'SEGMENT', 'FOR', 'HIS', 'PORTION'] +1320-122612-0013-133: hyp=['A', 'CIRCLE', 'OF', 'A', 'FEW', 'HUNDRED', 'FEET', 'IN', 'CIRCUMFERENCE', 'WAS', 'DRAWN', 'AND', 'EACH', 'OF', 'THE', 'PARTY', 'TOOK', 'A', 'SEGMENT', 'FOR', 'HIS', 'PORTION'] +1320-122612-0014-134: ref=['THE', 'EXAMINATION', 'HOWEVER', 'RESULTED', 'IN', 'NO', 'DISCOVERY'] +1320-122612-0014-134: hyp=['THE', 'EXAMINATION', 'HOWEVER', 'RESULTED', 'IN', 'NO', 'DISCOVERY'] +1320-122612-0015-135: ref=['THE', 'WHOLE', 'PARTY', 'CROWDED', 'TO', 'THE', 'SPOT', 'WHERE', 'UNCAS', 'POINTED', 'OUT', 'THE', 'IMPRESSION', 'OF', 'A', 'MOCCASIN', 'IN', 'THE', 'MOIST', 'ALLUVION'] +1320-122612-0015-135: hyp=['THE', 'WHOLE', 'PARTY', 'CROWDED', 'TO', 'THE', 'SPOT', 'WHERE', 'UNCAS', 'POINTED', 'OUT', 'THE', 'IMPRESSION', 'OF', 'A', 'MOCCASIN', 'IN', 'THE', 'MOIST', 'ALLUVIAN'] +1320-122612-0016-136: ref=['RUN', 'BACK', 'UNCAS', 'AND', 'BRING', 'ME', 'THE', 'SIZE', 'OF', 'THE', "SINGER'S", 'FOOT'] +1320-122612-0016-136: hyp=['RUN', 'BACK', 'UNCAS', 'AND', 'BRING', 'ME', 'THE', 'SIZE', 'OF', 'THE', "SINGER'S", 'FOOT'] +1320-122617-0000-78: ref=['NOTWITHSTANDING', 'THE', 'HIGH', 'RESOLUTION', 'OF', 'HAWKEYE', 'HE', 'FULLY', 'COMPREHENDED', 'ALL', 'THE', 'DIFFICULTIES', 'AND', 'DANGER', 'HE', 'WAS', 'ABOUT', 'TO', 'INCUR'] +1320-122617-0000-78: hyp=['NOTWITHSTANDING', 'THE', 'HIGH', 'RESOLUTION', 'OF', 'HAWKEYE', 'HE', 'FULLY', 'COMPREHENDED', 'ALL', 'THE', 'DIFFICULTIES', 'AND', 'DANGER', 'HE', 'WAS', 'ABOUT', 'TO', 'INCUR'] +1320-122617-0001-79: ref=['IN', 'HIS', 'RETURN', 'TO', 'THE', 'CAMP', 'HIS', 'ACUTE', 'AND', 'PRACTISED', 'INTELLECTS', 'WERE', 'INTENTLY', 'ENGAGED', 'IN', 'DEVISING', 'MEANS', 'TO', 'COUNTERACT', 'A', 'WATCHFULNESS', 'AND', 'SUSPICION', 'ON', 'THE', 'PART', 'OF', 'HIS', 'ENEMIES', 'THAT', 'HE', 'KNEW', 'WERE', 'IN', 'NO', 'DEGREE', 'INFERIOR', 'TO', 'HIS', 'OWN'] +1320-122617-0001-79: hyp=['IN', 'HIS', 'RETURN', 'TO', 'THE', 'CAMP', 'HIS', 'ACUTE', 'AND', 'PRACTISED', 'INTELLECTS', 'WERE', 'INTENTLY', 'ENGAGED', 'IN', 'DEVISING', 'MEANS', 'TO', 'COUNTERACT', 'A', 'WATCHFULNESS', 'AND', 'SUSPICION', 'ON', 'THE', 'PART', 'OF', 'HIS', 'ENEMIES', 'THAT', 'HE', 'KNEW', 'WERE', 'IN', 'NO', 'DEGREE', 'INFERIOR', 'TO', 'HIS', 'OWN'] +1320-122617-0002-80: ref=['IN', 'OTHER', 'WORDS', 'WHILE', 'HE', 'HAD', 'IMPLICIT', 'FAITH', 'IN', 'THE', 'ABILITY', 'OF', "BALAAM'S", 'ASS', 'TO', 'SPEAK', 'HE', 'WAS', 'SOMEWHAT', 'SKEPTICAL', 'ON', 'THE', 'SUBJECT', 'OF', 'A', "BEAR'S", 'SINGING', 'AND', 'YET', 'HE', 'HAD', 'BEEN', 'ASSURED', 'OF', 'THE', 'LATTER', 'ON', 'THE', 'TESTIMONY', 'OF', 'HIS', 'OWN', 'EXQUISITE', 'ORGANS'] +1320-122617-0002-80: hyp=['IN', 'OTHER', 'WORDS', 'WHILE', 'HE', 'HAD', 'IMPLICIT', 'FAITH', 'IN', 'THE', 'ABILITY', 'OF', "BALEM'S", 'ASS', 'TO', 'SPEAK', 'HE', 'WAS', 'SOMEWHAT', 'SCEPTICAL', 'ON', 'THE', 'SUBJECT', 'OF', 'A', "BEAR'S", 'SINGING', 'AND', 'YET', 'HE', 'HAD', 'BEEN', 'ASSURED', 'OF', 'THE', 'LATTER', 'ON', 'THE', 'TESTIMONY', 'OF', 'HIS', 'OWN', 'EXQUISITE', 'ORGANS'] +1320-122617-0003-81: ref=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'AIR', 'AND', 'MANNER', 'THAT', 'BETRAYED', 'TO', 'THE', 'SCOUT', 'THE', 'UTTER', 'CONFUSION', 'OF', 'THE', 'STATE', 'OF', 'HIS', 'MIND'] +1320-122617-0003-81: hyp=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'AIR', 'AND', 'MANNER', 'THAT', 'BETRAYED', 'TO', 'THE', 'SCOUT', 'THE', 'UTTER', 'CONFUSION', 'OF', 'THE', 'STATE', 'OF', 'HIS', 'MIND'] +1320-122617-0004-82: ref=['THE', 'INGENIOUS', 'HAWKEYE', 'WHO', 'RECALLED', 'THE', 'HASTY', 'MANNER', 'IN', 'WHICH', 'THE', 'OTHER', 'HAD', 'ABANDONED', 'HIS', 'POST', 'AT', 'THE', 'BEDSIDE', 'OF', 'THE', 'SICK', 'WOMAN', 'WAS', 'NOT', 'WITHOUT', 'HIS', 'SUSPICIONS', 'CONCERNING', 'THE', 'SUBJECT', 'OF', 'SO', 'MUCH', 'SOLEMN', 'DELIBERATION'] +1320-122617-0004-82: hyp=['THE', 'INGENIOUS', 'HAWKEYE', 'WHO', 'RECALLED', 'THE', 'HASTY', 'MANNER', 'IN', 'WHICH', 'THE', 'OTHER', 'HAD', 'ABANDONED', 'HIS', 'POST', 'AT', 'THE', 'BEDSIDE', 'OF', 'THE', 'SICK', 'WOMAN', 'WAS', 'NOT', 'WITHOUT', 'HIS', 'SUSPICIONS', 'CONCERNING', 'THE', 'SUBJECT', 'OF', 'SO', 'MUCH', 'SOLEMN', 'DELIBERATION'] +1320-122617-0005-83: ref=['THE', 'BEAR', 'SHOOK', 'HIS', 'SHAGGY', 'SIDES', 'AND', 'THEN', 'A', 'WELL', 'KNOWN', 'VOICE', 'REPLIED'] +1320-122617-0005-83: hyp=['THE', 'BEAR', 'SHOOK', 'HIS', 'SHAGGY', 'SIDES', 'AND', 'THEN', 'A', 'WELL', 'KNOWN', 'VOICE', 'REPLIED'] +1320-122617-0006-84: ref=['CAN', 'THESE', 'THINGS', 'BE', 'RETURNED', 'DAVID', 'BREATHING', 'MORE', 'FREELY', 'AS', 'THE', 'TRUTH', 'BEGAN', 'TO', 'DAWN', 'UPON', 'HIM'] +1320-122617-0006-84: hyp=['CAN', 'THESE', 'THINGS', 'BE', 'RETURNED', 'DAVID', 'BREATHING', 'MORE', 'FREELY', 'AS', 'THE', 'TRUTH', 'BEGAN', 'TO', 'DAWN', 'UPON', 'HIM'] +1320-122617-0007-85: ref=['COME', 'COME', 'RETURNED', 'HAWKEYE', 'UNCASING', 'HIS', 'HONEST', 'COUNTENANCE', 'THE', 'BETTER', 'TO', 'ASSURE', 'THE', 'WAVERING', 'CONFIDENCE', 'OF', 'HIS', 'COMPANION', 'YOU', 'MAY', 'SEE', 'A', 'SKIN', 'WHICH', 'IF', 'IT', 'BE', 'NOT', 'AS', 'WHITE', 'AS', 'ONE', 'OF', 'THE', 'GENTLE', 'ONES', 'HAS', 'NO', 'TINGE', 'OF', 'RED', 'TO', 'IT', 'THAT', 'THE', 'WINDS', 'OF', 'THE', 'HEAVEN', 'AND', 'THE', 'SUN', 'HAVE', 'NOT', 'BESTOWED', 'NOW', 'LET', 'US', 'TO', 'BUSINESS'] +1320-122617-0007-85: hyp=['COME', 'COME', 'RETURNED', 'HAWKEYE', 'UNCASING', 'HIS', 'HONEST', 'COUNTENANCE', 'THE', 'BETTER', 'TO', 'ASSURE', 'THE', 'WAVERING', 'CONFIDENCE', 'OF', 'HIS', 'COMPANION', 'YOU', 'MAY', 'SEE', 'A', 'SKIN', 'WHICH', 'IF', 'IT', 'BE', 'NOT', 'AS', 'WHITE', 'AS', 'ONE', 'OF', 'THE', 'GENTLE', 'ONES', 'HAS', 'NO', 'TINGE', 'OF', 'RED', 'TO', 'IT', 'THAT', 'THE', 'WINDS', 'OF', 'THE', 'HEAVEN', 'AND', 'THE', 'SUN', 'HAVE', 'NOT', 'BESTOWED', 'NOW', 'LET', 'US', 'TO', 'BUSINESS'] +1320-122617-0008-86: ref=['THE', 'YOUNG', 'MAN', 'IS', 'IN', 'BONDAGE', 'AND', 'MUCH', 'I', 'FEAR', 'HIS', 'DEATH', 'IS', 'DECREED'] +1320-122617-0008-86: hyp=['THE', 'YOUNG', 'MAN', 'IS', 'IN', 'BONDAGE', 'AND', 'MUCH', 'I', 'FEAR', 'HIS', 'DEATH', 'IS', 'DECREED'] +1320-122617-0009-87: ref=['I', 'GREATLY', 'MOURN', 'THAT', 'ONE', 'SO', 'WELL', 'DISPOSED', 'SHOULD', 'DIE', 'IN', 'HIS', 'IGNORANCE', 'AND', 'I', 'HAVE', 'SOUGHT', 'A', 'GOODLY', 'HYMN', 'CAN', 'YOU', 'LEAD', 'ME', 'TO', 'HIM'] +1320-122617-0009-87: hyp=['I', 'GREATLY', 'MOURNED', 'THAT', 'ONE', 'SO', 'WELL', 'DISPOSED', 'SHOULD', 'DIE', 'IN', 'HIS', 'IGNORANCE', 'AND', 'I', 'HAVE', 'SOUGHT', 'A', 'GOODLY', 'HYMN', 'CAN', 'YOU', 'LEAD', 'ME', 'TO', 'HIM'] +1320-122617-0010-88: ref=['THE', 'TASK', 'WILL', 'NOT', 'BE', 'DIFFICULT', 'RETURNED', 'DAVID', 'HESITATING', 'THOUGH', 'I', 'GREATLY', 'FEAR', 'YOUR', 'PRESENCE', 'WOULD', 'RATHER', 'INCREASE', 'THAN', 'MITIGATE', 'HIS', 'UNHAPPY', 'FORTUNES'] +1320-122617-0010-88: hyp=['THE', 'TASK', 'WILL', 'NOT', 'BE', 'DIFFICULT', 'RETURNED', 'DAVID', 'HESITATING', 'THOUGH', 'I', 'GREATLY', 'FEAR', 'YOUR', 'PRESENCE', 'WOULD', 'RATHER', 'INCREASE', 'THAN', 'MITIGATE', 'HIS', 'UNHAPPY', 'FORTUNES'] +1320-122617-0011-89: ref=['THE', 'LODGE', 'IN', 'WHICH', 'UNCAS', 'WAS', 'CONFINED', 'WAS', 'IN', 'THE', 'VERY', 'CENTER', 'OF', 'THE', 'VILLAGE', 'AND', 'IN', 'A', 'SITUATION', 'PERHAPS', 'MORE', 'DIFFICULT', 'THAN', 'ANY', 'OTHER', 'TO', 'APPROACH', 'OR', 'LEAVE', 'WITHOUT', 'OBSERVATION'] +1320-122617-0011-89: hyp=['THE', 'LODGE', 'IN', 'WHICH', 'UNCAS', 'WAS', 'CONFINED', 'WAS', 'IN', 'THE', 'VERY', 'CENTRE', 'OF', 'THE', 'VILLAGE', 'AND', 'IN', 'A', 'SITUATION', 'PERHAPS', 'MORE', 'DIFFICULT', 'THAN', 'ANY', 'OTHER', 'TO', 'APPROACH', 'OR', 'LEAVE', 'WITHOUT', 'OBSERVATION'] +1320-122617-0012-90: ref=['FOUR', 'OR', 'FIVE', 'OF', 'THE', 'LATTER', 'ONLY', 'LINGERED', 'ABOUT', 'THE', 'DOOR', 'OF', 'THE', 'PRISON', 'OF', 'UNCAS', 'WARY', 'BUT', 'CLOSE', 'OBSERVERS', 'OF', 'THE', 'MANNER', 'OF', 'THEIR', 'CAPTIVE'] +1320-122617-0012-90: hyp=['FOUR', 'OR', 'FIVE', 'OF', 'THE', 'LATTER', 'ONLY', 'LINGERED', 'ABOUT', 'THE', 'DOOR', 'OF', 'THE', 'PRISON', 'OF', 'UNCAS', 'WARY', 'BUT', 'CLOSE', 'OBSERVERS', 'OF', 'THE', 'MANNER', 'OF', 'THEIR', 'CAPTIVE'] +1320-122617-0013-91: ref=['DELIVERED', 'IN', 'A', 'STRONG', 'TONE', 'OF', 'ASSENT', 'ANNOUNCED', 'THE', 'GRATIFICATION', 'THE', 'SAVAGE', 'WOULD', 'RECEIVE', 'IN', 'WITNESSING', 'SUCH', 'AN', 'EXHIBITION', 'OF', 'WEAKNESS', 'IN', 'AN', 'ENEMY', 'SO', 'LONG', 'HATED', 'AND', 'SO', 'MUCH', 'FEARED'] +1320-122617-0013-91: hyp=['DELIVERED', 'IN', 'A', 'STRONG', 'TONE', 'OF', 'ASSENT', 'ANNOUNCED', 'THE', 'GRATIFICATION', 'THE', 'SAVAGE', 'WOULD', 'RECEIVE', 'AND', 'WITNESSING', 'SUCH', 'AN', 'EXHIBITION', 'OF', 'WEAKNESS', 'AND', 'AN', 'ENEMY', 'SO', 'LONG', 'HATED', 'AND', 'SO', 'MUCH', 'FEARED'] +1320-122617-0014-92: ref=['THEY', 'DREW', 'BACK', 'A', 'LITTLE', 'FROM', 'THE', 'ENTRANCE', 'AND', 'MOTIONED', 'TO', 'THE', 'SUPPOSED', 'CONJURER', 'TO', 'ENTER'] +1320-122617-0014-92: hyp=['THEY', 'DREW', 'BACK', 'A', 'LITTLE', 'FROM', 'THE', 'ENTRANCE', 'AND', 'MOTIONED', 'TO', 'THE', 'SUPPOSED', 'CONJUROR', 'TO', 'ENTER'] +1320-122617-0015-93: ref=['BUT', 'THE', 'BEAR', 'INSTEAD', 'OF', 'OBEYING', 'MAINTAINED', 'THE', 'SEAT', 'IT', 'HAD', 'TAKEN', 'AND', 'GROWLED'] +1320-122617-0015-93: hyp=['BUT', 'THE', 'BEAR', 'INSTEAD', 'OF', 'OBEYING', 'MAINTAINED', 'THE', 'SEAT', 'IT', 'HAD', 'TAKEN', 'AND', 'GROWLED'] +1320-122617-0016-94: ref=['THE', 'CUNNING', 'MAN', 'IS', 'AFRAID', 'THAT', 'HIS', 'BREATH', 'WILL', 'BLOW', 'UPON', 'HIS', 'BROTHERS', 'AND', 'TAKE', 'AWAY', 'THEIR', 'COURAGE', 'TOO', 'CONTINUED', 'DAVID', 'IMPROVING', 'THE', 'HINT', 'HE', 'RECEIVED', 'THEY', 'MUST', 'STAND', 'FURTHER', 'OFF'] +1320-122617-0016-94: hyp=['THE', 'CUNNING', 'MAN', 'IS', 'AFRAID', 'THAT', 'HIS', 'BREATH', 'WILL', 'BLOW', 'UPON', 'HIS', 'BROTHERS', 'AND', 'TAKE', 'AWAY', 'THEIR', 'COURAGE', 'TOO', 'CONTINUED', 'DAVID', 'IMPROVING', 'THE', 'HINT', 'HE', 'RECEIVED', 'THEY', 'MUST', 'STAND', 'FURTHER', 'OFF'] +1320-122617-0017-95: ref=['THEN', 'AS', 'IF', 'SATISFIED', 'OF', 'THEIR', 'SAFETY', 'THE', 'SCOUT', 'LEFT', 'HIS', 'POSITION', 'AND', 'SLOWLY', 'ENTERED', 'THE', 'PLACE'] +1320-122617-0017-95: hyp=['THEN', 'AS', 'IF', 'SATISFIED', 'OF', 'THEIR', 'SAFETY', 'THE', 'SCOUT', 'LEFT', 'HIS', 'POSITION', 'AND', 'SLOWLY', 'ENTERED', 'THE', 'PLACE'] +1320-122617-0018-96: ref=['IT', 'WAS', 'SILENT', 'AND', 'GLOOMY', 'BEING', 'TENANTED', 'SOLELY', 'BY', 'THE', 'CAPTIVE', 'AND', 'LIGHTED', 'BY', 'THE', 'DYING', 'EMBERS', 'OF', 'A', 'FIRE', 'WHICH', 'HAD', 'BEEN', 'USED', 'FOR', 'THE', 'PURPOSED', 'OF', 'COOKERY'] +1320-122617-0018-96: hyp=['IT', 'WAS', 'SILENT', 'AND', 'GLOOMY', 'BEING', 'TENANTED', 'SOLELY', 'BY', 'THE', 'CAPTIVE', 'AND', 'LIGHTED', 'BY', 'THE', 'DYING', 'EMBERS', 'OF', 'A', 'FIRE', 'WHICH', 'HAD', 'BEEN', 'USED', 'FOR', 'THE', 'PURPOSE', 'OF', 'COOKERY'] +1320-122617-0019-97: ref=['UNCAS', 'OCCUPIED', 'A', 'DISTANT', 'CORNER', 'IN', 'A', 'RECLINING', 'ATTITUDE', 'BEING', 'RIGIDLY', 'BOUND', 'BOTH', 'HANDS', 'AND', 'FEET', 'BY', 'STRONG', 'AND', 'PAINFUL', 'WITHES'] +1320-122617-0019-97: hyp=['UNCAS', 'OCCUPIED', 'A', 'DISTANT', 'CORNER', 'IN', 'A', 'RECLINING', 'ATTITUDE', 'BEING', 'RIGIDLY', 'BOUND', 'BOTH', 'HANDS', 'AND', 'FEET', 'BY', 'STRONG', 'AND', 'PAINFUL', 'WIDTHS'] +1320-122617-0020-98: ref=['THE', 'SCOUT', 'WHO', 'HAD', 'LEFT', 'DAVID', 'AT', 'THE', 'DOOR', 'TO', 'ASCERTAIN', 'THEY', 'WERE', 'NOT', 'OBSERVED', 'THOUGHT', 'IT', 'PRUDENT', 'TO', 'PRESERVE', 'HIS', 'DISGUISE', 'UNTIL', 'ASSURED', 'OF', 'THEIR', 'PRIVACY'] +1320-122617-0020-98: hyp=['THE', 'SCOUT', 'WHO', 'HAD', 'LEFT', 'DAVID', 'AT', 'THE', 'DOOR', 'TO', 'ASCERTAIN', 'THEY', 'WERE', 'NOT', 'OBSERVED', 'THOUGHT', 'IT', 'PRUDENT', 'TO', 'PRESERVE', 'HIS', 'DISGUISE', 'UNTIL', 'ASSURED', 'OF', 'THEIR', 'PRIVACY'] +1320-122617-0021-99: ref=['WHAT', 'SHALL', 'WE', 'DO', 'WITH', 'THE', 'MINGOES', 'AT', 'THE', 'DOOR', 'THEY', 'COUNT', 'SIX', 'AND', 'THIS', 'SINGER', 'IS', 'AS', 'GOOD', 'AS', 'NOTHING'] +1320-122617-0021-99: hyp=['WHAT', 'SHALL', 'WE', 'DO', 'WITH', 'THE', 'MINGOES', 'AT', 'THE', 'DOOR', 'THEY', 'COUNT', 'SIX', 'AND', 'THE', 'SINGER', 'IS', 'AS', 'GOOD', 'AS', 'NOTHING'] +1320-122617-0022-100: ref=['THE', 'DELAWARES', 'ARE', 'CHILDREN', 'OF', 'THE', 'TORTOISE', 'AND', 'THEY', 'OUTSTRIP', 'THE', 'DEER'] +1320-122617-0022-100: hyp=['THE', 'DELAWARES', 'ARE', 'CHILDREN', 'OF', 'THE', 'TORTOISE', 'AND', 'THE', 'OUTSTRIPPED', 'THE', 'DEER'] +1320-122617-0023-101: ref=['UNCAS', 'WHO', 'HAD', 'ALREADY', 'APPROACHED', 'THE', 'DOOR', 'IN', 'READINESS', 'TO', 'LEAD', 'THE', 'WAY', 'NOW', 'RECOILED', 'AND', 'PLACED', 'HIMSELF', 'ONCE', 'MORE', 'IN', 'THE', 'BOTTOM', 'OF', 'THE', 'LODGE'] +1320-122617-0023-101: hyp=['UNCAS', 'WHO', 'HAD', 'ALREADY', 'APPROACHED', 'THE', 'DOOR', 'IN', 'READINESS', 'TO', 'LEAD', 'THE', 'WAY', 'NOW', 'RECOILED', 'AND', 'PLACED', 'HIMSELF', 'ONCE', 'MORE', 'IN', 'THE', 'BOTTOM', 'OF', 'THE', 'LODGE'] +1320-122617-0024-102: ref=['BUT', 'HAWKEYE', 'WHO', 'WAS', 'TOO', 'MUCH', 'OCCUPIED', 'WITH', 'HIS', 'OWN', 'THOUGHTS', 'TO', 'NOTE', 'THE', 'MOVEMENT', 'CONTINUED', 'SPEAKING', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'HIS', 'COMPANION'] +1320-122617-0024-102: hyp=['BUT', 'HAWKEYE', 'WHO', 'WAS', 'TOO', 'MUCH', 'OCCUPIED', 'WITH', 'HIS', 'OWN', 'THOUGHTS', 'TO', 'NOTE', 'THE', 'MOVEMENT', 'CONTINUED', 'SPEAKING', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'HIS', 'COMPANION'] +1320-122617-0025-103: ref=['SO', 'UNCAS', 'YOU', 'HAD', 'BETTER', 'TAKE', 'THE', 'LEAD', 'WHILE', 'I', 'WILL', 'PUT', 'ON', 'THE', 'SKIN', 'AGAIN', 'AND', 'TRUST', 'TO', 'CUNNING', 'FOR', 'WANT', 'OF', 'SPEED'] +1320-122617-0025-103: hyp=['SO', 'UNCAS', 'YOU', 'HAD', 'BETTER', 'TAKE', 'THE', 'LEAD', 'WHILE', 'I', 'WILL', 'PUT', 'ON', 'THE', 'SKIN', 'AGAIN', 'AND', 'TRUST', 'TO', 'CUNNING', 'FOR', 'WANT', 'OF', 'SPEED'] +1320-122617-0026-104: ref=['WELL', 'WHAT', "CAN'T", 'BE', 'DONE', 'BY', 'MAIN', 'COURAGE', 'IN', 'WAR', 'MUST', 'BE', 'DONE', 'BY', 'CIRCUMVENTION'] +1320-122617-0026-104: hyp=['WELL', 'WHAT', "CAN'T", 'BE', 'DONE', 'BY', 'MAIN', 'COURAGE', 'AND', 'WAR', 'MUST', 'BE', 'DONE', 'BY', 'CIRCUMVENTION'] +1320-122617-0027-105: ref=['AS', 'SOON', 'AS', 'THESE', 'DISPOSITIONS', 'WERE', 'MADE', 'THE', 'SCOUT', 'TURNED', 'TO', 'DAVID', 'AND', 'GAVE', 'HIM', 'HIS', 'PARTING', 'INSTRUCTIONS'] +1320-122617-0027-105: hyp=['AS', 'SOON', 'AS', 'THESE', 'DISPOSITIONS', 'WERE', 'MADE', 'THE', 'SCOUT', 'TURNED', 'TO', 'DAVID', 'AND', 'GAVE', 'HIM', 'HIS', 'PARTING', 'INSTRUCTIONS'] +1320-122617-0028-106: ref=['MY', 'PURSUITS', 'ARE', 'PEACEFUL', 'AND', 'MY', 'TEMPER', 'I', 'HUMBLY', 'TRUST', 'IS', 'GREATLY', 'GIVEN', 'TO', 'MERCY', 'AND', 'LOVE', 'RETURNED', 'DAVID', 'A', 'LITTLE', 'NETTLED', 'AT', 'SO', 'DIRECT', 'AN', 'ATTACK', 'ON', 'HIS', 'MANHOOD', 'BUT', 'THERE', 'ARE', 'NONE', 'WHO', 'CAN', 'SAY', 'THAT', 'I', 'HAVE', 'EVER', 'FORGOTTEN', 'MY', 'FAITH', 'IN', 'THE', 'LORD', 'EVEN', 'IN', 'THE', 'GREATEST', 'STRAITS'] +1320-122617-0028-106: hyp=['MY', 'PURSUITS', 'ARE', 'PEACEFUL', 'AND', 'MY', 'TEMPER', 'I', 'HUMBLY', 'TRUST', 'IS', 'GREATLY', 'GIVEN', 'TO', 'MERCY', 'AND', 'LOVE', 'RETURNED', 'DAVID', 'A', 'LITTLE', 'NETTLED', 'AT', 'SO', 'DIRECT', 'AN', 'ATTACK', 'ON', 'HIS', 'MANHOOD', 'BUT', 'THERE', 'ARE', 'NONE', 'WHO', 'CAN', 'SAY', 'THAT', 'I', 'HAVE', 'EVER', 'FORGOTTEN', 'MY', 'FAITH', 'IN', 'THE', 'LORD', 'EVEN', 'IN', 'THE', 'GREATEST', 'STRAITS'] +1320-122617-0029-107: ref=['IF', 'YOU', 'ARE', 'NOT', 'THEN', 'KNOCKED', 'ON', 'THE', 'HEAD', 'YOUR', 'BEING', 'A', 'NON', 'COMPOSSER', 'WILL', 'PROTECT', 'YOU', 'AND', "YOU'LL", 'THEN', 'HAVE', 'A', 'GOOD', 'REASON', 'TO', 'EXPECT', 'TO', 'DIE', 'IN', 'YOUR', 'BED'] +1320-122617-0029-107: hyp=['IF', 'YOU', 'ARE', 'NOT', 'THEN', 'KNOCKED', 'ON', 'THE', 'HEAD', 'YOUR', 'BEING', 'A', 'NONCOMPOSTER', 'WILL', 'PROTECT', 'YOU', 'AND', "YOU'LL", 'THEN', 'HAVE', 'A', 'GOOD', 'REASON', 'TO', 'EXPECT', 'TO', 'DIE', 'IN', 'YOUR', 'BED'] +1320-122617-0030-108: ref=['SO', 'CHOOSE', 'FOR', 'YOURSELF', 'TO', 'MAKE', 'A', 'RUSH', 'OR', 'TARRY', 'HERE'] +1320-122617-0030-108: hyp=['SUSE', 'FOR', 'YOURSELF', 'TO', 'MAKE', 'A', 'RUSH', 'OR', 'TARRY', 'HERE'] +1320-122617-0031-109: ref=['BRAVELY', 'AND', 'GENEROUSLY', 'HAS', 'HE', 'BATTLED', 'IN', 'MY', 'BEHALF', 'AND', 'THIS', 'AND', 'MORE', 'WILL', 'I', 'DARE', 'IN', 'HIS', 'SERVICE'] +1320-122617-0031-109: hyp=['BRAVELY', 'AND', 'GENEROUSLY', 'HAS', 'HE', 'BATTLED', 'IN', 'MY', 'BEHALF', 'AND', 'THIS', 'AND', 'MORE', 'WILL', 'I', 'DARE', 'IN', 'HIS', 'SERVICE'] +1320-122617-0032-110: ref=['KEEP', 'SILENT', 'AS', 'LONG', 'AS', 'MAY', 'BE', 'AND', 'IT', 'WOULD', 'BE', 'WISE', 'WHEN', 'YOU', 'DO', 'SPEAK', 'TO', 'BREAK', 'OUT', 'SUDDENLY', 'IN', 'ONE', 'OF', 'YOUR', 'SHOUTINGS', 'WHICH', 'WILL', 'SERVE', 'TO', 'REMIND', 'THE', 'INDIANS', 'THAT', 'YOU', 'ARE', 'NOT', 'ALTOGETHER', 'AS', 'RESPONSIBLE', 'AS', 'MEN', 'SHOULD', 'BE'] +1320-122617-0032-110: hyp=['KEEP', 'SILENT', 'AS', 'LONG', 'AS', 'MAY', 'BE', 'AND', 'IT', 'WOULD', 'BE', 'WISE', 'WHEN', 'YOU', 'DO', 'SPEAK', 'TO', 'BREAK', 'OUT', 'SUDDENLY', 'IN', 'ONE', 'OF', 'YOUR', 'SHOUTINGS', 'WHICH', 'WILL', 'SERVE', 'TO', 'REMIND', 'THE', 'INDIANS', 'THAT', 'YOU', 'ARE', 'NOT', 'ALTOGETHER', 'AS', 'RESPONSIBLE', 'AS', 'MEN', 'SHOULD', 'BE'] +1320-122617-0033-111: ref=['IF', 'HOWEVER', 'THEY', 'TAKE', 'YOUR', 'SCALP', 'AS', 'I', 'TRUST', 'AND', 'BELIEVE', 'THEY', 'WILL', 'NOT', 'DEPEND', 'ON', 'IT', 'UNCAS', 'AND', 'I', 'WILL', 'NOT', 'FORGET', 'THE', 'DEED', 'BUT', 'REVENGE', 'IT', 'AS', 'BECOMES', 'TRUE', 'WARRIORS', 'AND', 'TRUSTY', 'FRIENDS'] +1320-122617-0033-111: hyp=['IF', 'HOWEVER', 'THEY', 'TAKE', 'YOUR', 'SCALP', 'AS', 'I', 'TRUST', 'AND', 'BELIEVE', 'THEY', 'WILL', 'NOT', 'DEPEND', 'ON', 'IT', 'UNCAS', 'AND', 'I', 'WILL', 'NOT', 'FORGET', 'THE', 'DEED', 'BUT', 'REVENGE', 'IT', 'AS', 'BECOMES', 'TRUE', 'WARRIORS', 'AND', 'TRUSTY', 'FRIENDS'] +1320-122617-0034-112: ref=['HOLD', 'SAID', 'DAVID', 'PERCEIVING', 'THAT', 'WITH', 'THIS', 'ASSURANCE', 'THEY', 'WERE', 'ABOUT', 'TO', 'LEAVE', 'HIM', 'I', 'AM', 'AN', 'UNWORTHY', 'AND', 'HUMBLE', 'FOLLOWER', 'OF', 'ONE', 'WHO', 'TAUGHT', 'NOT', 'THE', 'DAMNABLE', 'PRINCIPLE', 'OF', 'REVENGE'] +1320-122617-0034-112: hyp=['HOLD', 'SAID', 'DAVID', 'PERCEIVING', 'THAT', 'WITH', 'THIS', 'ASSURANCE', 'THEY', 'WERE', 'ABOUT', 'TO', 'LEAVE', 'HIM', 'I', 'AM', 'AN', 'UNWORTHY', 'AND', 'HUMBLE', 'FOLLOWER', 'OF', 'ONE', 'WHO', 'TAUGHT', 'NOT', 'THE', 'DAMNABLE', 'PRINCIPLE', 'OF', 'REVENGE'] +1320-122617-0035-113: ref=['THEN', 'HEAVING', 'A', 'HEAVY', 'SIGH', 'PROBABLY', 'AMONG', 'THE', 'LAST', 'HE', 'EVER', 'DREW', 'IN', 'PINING', 'FOR', 'A', 'CONDITION', 'HE', 'HAD', 'SO', 'LONG', 'ABANDONED', 'HE', 'ADDED', 'IT', 'IS', 'WHAT', 'I', 'WOULD', 'WISH', 'TO', 'PRACTISE', 'MYSELF', 'AS', 'ONE', 'WITHOUT', 'A', 'CROSS', 'OF', 'BLOOD', 'THOUGH', 'IT', 'IS', 'NOT', 'ALWAYS', 'EASY', 'TO', 'DEAL', 'WITH', 'AN', 'INDIAN', 'AS', 'YOU', 'WOULD', 'WITH', 'A', 'FELLOW', 'CHRISTIAN'] +1320-122617-0035-113: hyp=['THEN', 'HEAVING', 'A', 'HEAVY', 'SIGH', 'PROBABLY', 'AMONG', 'THE', 'LAST', 'HE', 'EVER', 'DREW', 'IN', 'PINING', 'FOR', 'A', 'CONDITION', 'HE', 'HAD', 'SO', 'LONG', 'ABANDONED', 'HE', 'ADDED', 'IT', 'IS', 'WHAT', 'I', 'WOULD', 'WISH', 'TO', 'PRACTISE', 'MYSELF', 'AS', 'ONE', 'WITHOUT', 'A', 'CROSS', 'OF', 'BLOOD', 'THOUGH', 'IT', 'IS', 'NOT', 'ALWAYS', 'EASY', 'TO', 'DEAL', 'WITH', 'AN', 'INDIAN', 'AS', 'YOU', 'WOULD', 'WITH', 'A', 'FELLOW', 'CHRISTIAN'] +1320-122617-0036-114: ref=['GOD', 'BLESS', 'YOU', 'FRIEND', 'I', 'DO', 'BELIEVE', 'YOUR', 'SCENT', 'IS', 'NOT', 'GREATLY', 'WRONG', 'WHEN', 'THE', 'MATTER', 'IS', 'DULY', 'CONSIDERED', 'AND', 'KEEPING', 'ETERNITY', 'BEFORE', 'THE', 'EYES', 'THOUGH', 'MUCH', 'DEPENDS', 'ON', 'THE', 'NATURAL', 'GIFTS', 'AND', 'THE', 'FORCE', 'OF', 'TEMPTATION'] +1320-122617-0036-114: hyp=['GOD', 'BLESS', 'YOU', 'FRIEND', 'I', 'DO', 'BELIEVE', 'YOUR', 'SIN', 'HAS', 'NOT', 'GREATLY', 'WRONG', 'WHEN', 'THE', 'MATTER', 'IS', 'DULY', 'CONSIDERED', 'AND', 'KEEPING', 'ETERNITY', 'BEFORE', 'THE', 'EYES', 'THOUGH', 'MUCH', 'DEPENDS', 'ON', 'THE', 'NATURAL', 'GIFTS', 'IN', 'THE', 'FORCE', 'OF', 'TEMPTATION'] +1320-122617-0037-115: ref=['THE', 'DELAWARE', 'DOG', 'HE', 'SAID', 'LEANING', 'FORWARD', 'AND', 'PEERING', 'THROUGH', 'THE', 'DIM', 'LIGHT', 'TO', 'CATCH', 'THE', 'EXPRESSION', 'OF', 'THE', "OTHER'S", 'FEATURES', 'IS', 'HE', 'AFRAID'] +1320-122617-0037-115: hyp=['THE', 'DELAWARE', 'DOG', 'HE', 'SAID', 'LEANING', 'FORWARD', 'AND', 'PEERING', 'THROUGH', 'THE', 'DIM', 'LIGHT', 'TO', 'CATCH', 'THE', 'EXPRESSION', 'OF', 'THE', "OTHER'S", 'FEATURES', 'IS', 'HE', 'AFRAID'] +1320-122617-0038-116: ref=['WILL', 'THE', 'HURONS', 'HEAR', 'HIS', 'GROANS'] +1320-122617-0038-116: hyp=['WILL', 'THE', 'HURONS', 'HEAR', 'HIS', 'GROANS'] +1320-122617-0039-117: ref=['THE', 'MOHICAN', 'STARTED', 'ON', 'HIS', 'FEET', 'AND', 'SHOOK', 'HIS', 'SHAGGY', 'COVERING', 'AS', 'THOUGH', 'THE', 'ANIMAL', 'HE', 'COUNTERFEITED', 'WAS', 'ABOUT', 'TO', 'MAKE', 'SOME', 'DESPERATE', 'EFFORT'] +1320-122617-0039-117: hyp=['THE', 'MOHICANS', 'STARTED', 'ON', 'HIS', 'FEET', 'AND', 'SHOOK', 'HIS', 'SHAGGY', 'COVERING', 'AS', 'THOUGH', 'THE', 'ANIMAL', 'HE', 'COUNTERFEITED', 'WAS', 'ABOUT', 'TO', 'MAKE', 'SOME', 'DESPERATE', 'EFFORT'] +1320-122617-0040-118: ref=['HE', 'HAD', 'NO', 'OCCASION', 'TO', 'DELAY', 'FOR', 'AT', 'THE', 'NEXT', 'INSTANT', 'A', 'BURST', 'OF', 'CRIES', 'FILLED', 'THE', 'OUTER', 'AIR', 'AND', 'RAN', 'ALONG', 'THE', 'WHOLE', 'EXTENT', 'OF', 'THE', 'VILLAGE'] +1320-122617-0040-118: hyp=['HE', 'HAD', 'NO', 'OCCASION', 'TO', 'DELAY', 'FOR', 'AT', 'THE', 'NEXT', 'INSTANT', 'A', 'BURST', 'OF', 'CRIES', 'FILLED', 'THE', 'OUTER', 'AIR', 'AND', 'RAN', 'ALONG', 'THE', 'WHOLE', 'EXTENT', 'OF', 'THE', 'VILLAGE'] +1320-122617-0041-119: ref=['UNCAS', 'CAST', 'HIS', 'SKIN', 'AND', 'STEPPED', 'FORTH', 'IN', 'HIS', 'OWN', 'BEAUTIFUL', 'PROPORTIONS'] +1320-122617-0041-119: hyp=['UNCAS', 'CAST', 'HIS', 'SKIN', 'AND', 'STEPPED', 'FORTH', 'IN', 'HIS', 'OWN', 'BEAUTIFUL', 'PROPORTIONS'] +1580-141083-0000-1949: ref=['I', 'WILL', 'ENDEAVOUR', 'IN', 'MY', 'STATEMENT', 'TO', 'AVOID', 'SUCH', 'TERMS', 'AS', 'WOULD', 'SERVE', 'TO', 'LIMIT', 'THE', 'EVENTS', 'TO', 'ANY', 'PARTICULAR', 'PLACE', 'OR', 'GIVE', 'A', 'CLUE', 'AS', 'TO', 'THE', 'PEOPLE', 'CONCERNED'] +1580-141083-0000-1949: hyp=['I', 'WILL', 'ENDEAVOUR', 'IN', 'MY', 'STATEMENT', 'TO', 'AVOID', 'SUCH', 'TERMS', 'AS', 'WOULD', 'SERVE', 'TO', 'LIMIT', 'THE', 'EVENTS', 'TO', 'ANY', 'PARTICULAR', 'PLACE', 'OR', 'GIVE', 'A', 'CLUE', 'AS', 'TO', 'THE', 'PEOPLE', 'CONCERNED'] +1580-141083-0001-1950: ref=['I', 'HAD', 'ALWAYS', 'KNOWN', 'HIM', 'TO', 'BE', 'RESTLESS', 'IN', 'HIS', 'MANNER', 'BUT', 'ON', 'THIS', 'PARTICULAR', 'OCCASION', 'HE', 'WAS', 'IN', 'SUCH', 'A', 'STATE', 'OF', 'UNCONTROLLABLE', 'AGITATION', 'THAT', 'IT', 'WAS', 'CLEAR', 'SOMETHING', 'VERY', 'UNUSUAL', 'HAD', 'OCCURRED'] +1580-141083-0001-1950: hyp=['I', 'HAD', 'ALWAYS', 'KNOWN', 'HIM', 'TO', 'BE', 'RESTLESS', 'IN', 'HIS', 'MANNER', 'BUT', 'ON', 'THIS', 'PARTICULAR', 'OCCASION', 'HE', 'WAS', 'IN', 'SUCH', 'A', 'STATE', 'OF', 'UNCONTROLLABLE', 'AGITATION', 'THAT', 'IT', 'WAS', 'CLEAR', 'SOMETHING', 'VERY', 'UNUSUAL', 'HAD', 'OCCURRED'] +1580-141083-0002-1951: ref=['MY', "FRIEND'S", 'TEMPER', 'HAD', 'NOT', 'IMPROVED', 'SINCE', 'HE', 'HAD', 'BEEN', 'DEPRIVED', 'OF', 'THE', 'CONGENIAL', 'SURROUNDINGS', 'OF', 'BAKER', 'STREET'] +1580-141083-0002-1951: hyp=['MY', "FRIEND'S", 'TEMPER', 'HAD', 'NOT', 'IMPROVED', 'SINCE', 'HE', 'HAD', 'BEEN', 'DEPRIVED', 'OF', 'THE', 'CONGENIAL', 'SURROUNDINGS', 'OF', 'BAKER', 'STREET'] +1580-141083-0003-1952: ref=['WITHOUT', 'HIS', 'SCRAPBOOKS', 'HIS', 'CHEMICALS', 'AND', 'HIS', 'HOMELY', 'UNTIDINESS', 'HE', 'WAS', 'AN', 'UNCOMFORTABLE', 'MAN'] +1580-141083-0003-1952: hyp=['WITHOUT', 'HIS', 'SCRAP', 'BOOKS', 'HIS', 'CHEMICALS', 'AND', 'HIS', 'HOMELY', 'UNTIDINESS', 'HE', 'WAS', 'AN', 'UNCOMFORTABLE', 'MAN'] +1580-141083-0004-1953: ref=['I', 'HAD', 'TO', 'READ', 'IT', 'OVER', 'CAREFULLY', 'AS', 'THE', 'TEXT', 'MUST', 'BE', 'ABSOLUTELY', 'CORRECT'] +1580-141083-0004-1953: hyp=['I', 'HAD', 'TO', 'READ', 'IT', 'OVER', 'CAREFULLY', 'AS', 'THE', 'TEXT', 'MUST', 'BE', 'ABSOLUTELY', 'CORRECT'] +1580-141083-0005-1954: ref=['I', 'WAS', 'ABSENT', 'RATHER', 'MORE', 'THAN', 'AN', 'HOUR'] +1580-141083-0005-1954: hyp=['I', 'WAS', 'ABSENT', 'RATHER', 'MORE', 'THAN', 'AN', 'HOUR'] +1580-141083-0006-1955: ref=['THE', 'ONLY', 'DUPLICATE', 'WHICH', 'EXISTED', 'SO', 'FAR', 'AS', 'I', 'KNEW', 'WAS', 'THAT', 'WHICH', 'BELONGED', 'TO', 'MY', 'SERVANT', 'BANNISTER', 'A', 'MAN', 'WHO', 'HAS', 'LOOKED', 'AFTER', 'MY', 'ROOM', 'FOR', 'TEN', 'YEARS', 'AND', 'WHOSE', 'HONESTY', 'IS', 'ABSOLUTELY', 'ABOVE', 'SUSPICION'] +1580-141083-0006-1955: hyp=['THE', 'ONLY', 'DUPLICATE', 'WHICH', 'EXISTED', 'SO', 'FAR', 'AS', 'I', 'KNEW', 'WAS', 'THAT', 'WHICH', 'BELONGED', 'TO', 'MY', 'SERVANT', 'BANISTER', 'A', 'MAN', 'WHO', 'HAS', 'LOOKED', 'AFTER', 'MY', 'ROOM', 'FOR', 'TEN', 'YEARS', 'AND', 'WHOSE', 'HONESTY', 'IS', 'ABSOLUTELY', 'ABOVE', 'SUSPICION'] +1580-141083-0007-1956: ref=['THE', 'MOMENT', 'I', 'LOOKED', 'AT', 'MY', 'TABLE', 'I', 'WAS', 'AWARE', 'THAT', 'SOMEONE', 'HAD', 'RUMMAGED', 'AMONG', 'MY', 'PAPERS'] +1580-141083-0007-1956: hyp=['THE', 'MOMENT', 'I', 'LOOKED', 'AT', 'MY', 'TABLE', 'I', 'WAS', 'AWARE', 'THAT', 'SOME', 'ONE', 'HAD', 'RUMMAGED', 'AMONG', 'MY', 'PAPERS'] +1580-141083-0008-1957: ref=['THE', 'PROOF', 'WAS', 'IN', 'THREE', 'LONG', 'SLIPS', 'I', 'HAD', 'LEFT', 'THEM', 'ALL', 'TOGETHER'] +1580-141083-0008-1957: hyp=['THE', 'PROOF', 'WAS', 'IN', 'THREE', 'LONG', 'SLIPS', 'I', 'HAD', 'LEFT', 'THEM', 'ALL', 'TOGETHER'] +1580-141083-0009-1958: ref=['THE', 'ALTERNATIVE', 'WAS', 'THAT', 'SOMEONE', 'PASSING', 'HAD', 'OBSERVED', 'THE', 'KEY', 'IN', 'THE', 'DOOR', 'HAD', 'KNOWN', 'THAT', 'I', 'WAS', 'OUT', 'AND', 'HAD', 'ENTERED', 'TO', 'LOOK', 'AT', 'THE', 'PAPERS'] +1580-141083-0009-1958: hyp=['THEY', 'ALL', 'TURNED', 'OF', 'WAS', 'THAT', 'SOME', 'ONE', 'PASSING', 'HAD', 'OBSERVED', 'THE', 'KEY', 'IN', 'THE', 'DOOR', 'HAD', 'KNOWN', 'THAT', 'I', 'WAS', 'OUT', 'AND', 'HAD', 'ENTERED', 'TO', 'LOOK', 'AT', 'THE', 'PAPERS'] +1580-141083-0010-1959: ref=['I', 'GAVE', 'HIM', 'A', 'LITTLE', 'BRANDY', 'AND', 'LEFT', 'HIM', 'COLLAPSED', 'IN', 'A', 'CHAIR', 'WHILE', 'I', 'MADE', 'A', 'MOST', 'CAREFUL', 'EXAMINATION', 'OF', 'THE', 'ROOM'] +1580-141083-0010-1959: hyp=['I', 'GAVE', 'HIM', 'A', 'LITTLE', 'BRANDY', 'AND', 'LEFT', 'HIM', 'COLLAPSED', 'IN', 'A', 'CHAIR', 'WHILE', 'I', 'MADE', 'A', 'MOST', 'CAREFUL', 'EXAMINATION', 'OF', 'THE', 'ROOM'] +1580-141083-0011-1960: ref=['A', 'BROKEN', 'TIP', 'OF', 'LEAD', 'WAS', 'LYING', 'THERE', 'ALSO'] +1580-141083-0011-1960: hyp=['A', 'BROKEN', 'TIP', 'OF', 'LEAD', 'WAS', 'LYING', 'THERE', 'ALSO'] +1580-141083-0012-1961: ref=['NOT', 'ONLY', 'THIS', 'BUT', 'ON', 'THE', 'TABLE', 'I', 'FOUND', 'A', 'SMALL', 'BALL', 'OF', 'BLACK', 'DOUGH', 'OR', 'CLAY', 'WITH', 'SPECKS', 'OF', 'SOMETHING', 'WHICH', 'LOOKS', 'LIKE', 'SAWDUST', 'IN', 'IT'] +1580-141083-0012-1961: hyp=['NOT', 'ONLY', 'THIS', 'BUT', 'ON', 'THE', 'TABLE', 'I', 'FOUND', 'A', 'SMALL', 'BALL', 'OF', 'BLACK', 'DOUGH', 'OR', 'CLAY', 'WITH', 'SPECKS', 'OF', 'SOMETHING', 'WHICH', 'LOOKS', 'LIKE', 'SAWDUST', 'IN', 'IT'] +1580-141083-0013-1962: ref=['ABOVE', 'ALL', 'THINGS', 'I', 'DESIRE', 'TO', 'SETTLE', 'THE', 'MATTER', 'QUIETLY', 'AND', 'DISCREETLY'] +1580-141083-0013-1962: hyp=['ABOVE', 'ALL', 'THINGS', 'I', 'DESIRE', 'TO', 'SETTLE', 'THE', 'MATTER', 'QUIETLY', 'AND', 'DISCREETLY'] +1580-141083-0014-1963: ref=['TO', 'THE', 'BEST', 'OF', 'MY', 'BELIEF', 'THEY', 'WERE', 'ROLLED', 'UP'] +1580-141083-0014-1963: hyp=['TO', 'THE', 'BEST', 'OF', 'MY', 'BELIEF', 'THEY', 'WERE', 'ROLLED', 'UP'] +1580-141083-0015-1964: ref=['DID', 'ANYONE', 'KNOW', 'THAT', 'THESE', 'PROOFS', 'WOULD', 'BE', 'THERE', 'NO', 'ONE', 'SAVE', 'THE', 'PRINTER'] +1580-141083-0015-1964: hyp=['DID', 'ANY', 'ONE', 'KNOW', 'THAT', 'THESE', 'PROOFS', 'WOULD', 'BE', 'THERE', 'NO', 'ONE', 'SAVE', 'THE', 'PRINTER'] +1580-141083-0016-1965: ref=['I', 'WAS', 'IN', 'SUCH', 'A', 'HURRY', 'TO', 'COME', 'TO', 'YOU', 'YOU', 'LEFT', 'YOUR', 'DOOR', 'OPEN'] +1580-141083-0016-1965: hyp=['I', 'WAS', 'IN', 'SUCH', 'A', 'HURRY', 'TO', 'COME', 'TO', 'YOU', 'YOU', 'LEFT', 'YOUR', 'DOOR', 'OPEN'] +1580-141083-0017-1966: ref=['SO', 'IT', 'SEEMS', 'TO', 'ME'] +1580-141083-0017-1966: hyp=['SO', 'IT', 'SEEMS', 'TO', 'ME'] +1580-141083-0018-1967: ref=['NOW', 'MISTER', 'SOAMES', 'AT', 'YOUR', 'DISPOSAL'] +1580-141083-0018-1967: hyp=['NOW', 'MISTER', 'SOLMES', 'AT', 'YOUR', 'DISPOSAL'] +1580-141083-0019-1968: ref=['ABOVE', 'WERE', 'THREE', 'STUDENTS', 'ONE', 'ON', 'EACH', 'STORY'] +1580-141083-0019-1968: hyp=['ABOVE', 'WERE', 'THREE', 'STUDENTS', 'ONE', 'ON', 'EACH', 'STORY'] +1580-141083-0020-1969: ref=['THEN', 'HE', 'APPROACHED', 'IT', 'AND', 'STANDING', 'ON', 'TIPTOE', 'WITH', 'HIS', 'NECK', 'CRANED', 'HE', 'LOOKED', 'INTO', 'THE', 'ROOM'] +1580-141083-0020-1969: hyp=['THEN', 'HE', 'APPROACHED', 'IT', 'AND', 'STANDING', 'ON', 'TIPTOE', 'WITH', 'HIS', 'NECK', 'CRANED', 'HE', 'LOOKED', 'INTO', 'THE', 'ROOM'] +1580-141083-0021-1970: ref=['THERE', 'IS', 'NO', 'OPENING', 'EXCEPT', 'THE', 'ONE', 'PANE', 'SAID', 'OUR', 'LEARNED', 'GUIDE'] +1580-141083-0021-1970: hyp=['THERE', 'IS', 'NO', 'OPENING', 'EXCEPT', 'THE', 'ONE', 'PAIN', 'SAID', 'OUR', 'LEARNED', 'GUIDE'] +1580-141083-0022-1971: ref=['I', 'AM', 'AFRAID', 'THERE', 'ARE', 'NO', 'SIGNS', 'HERE', 'SAID', 'HE'] +1580-141083-0022-1971: hyp=['I', 'AM', 'AFRAID', 'THERE', 'ARE', 'NO', 'SIGNS', 'HERE', 'SAID', 'HE'] +1580-141083-0023-1972: ref=['ONE', 'COULD', 'HARDLY', 'HOPE', 'FOR', 'ANY', 'UPON', 'SO', 'DRY', 'A', 'DAY'] +1580-141083-0023-1972: hyp=['ONE', 'COULD', 'HARDLY', 'HOPE', 'FOR', 'ANY', 'UPON', 'SO', 'DRY', 'A', 'DAY'] +1580-141083-0024-1973: ref=['YOU', 'LEFT', 'HIM', 'IN', 'A', 'CHAIR', 'YOU', 'SAY', 'WHICH', 'CHAIR', 'BY', 'THE', 'WINDOW', 'THERE'] +1580-141083-0024-1973: hyp=['YOU', 'LEFT', 'HIM', 'IN', 'A', 'CHAIR', 'YOU', 'SAY', 'WHICH', 'CHAIR', 'BY', 'THE', 'WINDOW', 'THERE'] +1580-141083-0025-1974: ref=['THE', 'MAN', 'ENTERED', 'AND', 'TOOK', 'THE', 'PAPERS', 'SHEET', 'BY', 'SHEET', 'FROM', 'THE', 'CENTRAL', 'TABLE'] +1580-141083-0025-1974: hyp=['THE', 'MEN', 'ENTERED', 'AND', 'TOOK', 'THE', 'PAPERS', 'SHEET', 'BY', 'SHEET', 'FROM', 'THE', 'CENTRAL', 'TABLE'] +1580-141083-0026-1975: ref=['AS', 'A', 'MATTER', 'OF', 'FACT', 'HE', 'COULD', 'NOT', 'SAID', 'SOAMES', 'FOR', 'I', 'ENTERED', 'BY', 'THE', 'SIDE', 'DOOR'] +1580-141083-0026-1975: hyp=['AS', 'A', 'MATTER', 'OF', 'FACT', 'HE', 'COULD', 'NOT', 'SAID', 'SOLMES', 'FOR', 'I', 'ENTERED', 'BY', 'THE', 'SIDE', 'DOOR'] +1580-141083-0027-1976: ref=['HOW', 'LONG', 'WOULD', 'IT', 'TAKE', 'HIM', 'TO', 'DO', 'THAT', 'USING', 'EVERY', 'POSSIBLE', 'CONTRACTION', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'NOT', 'LESS'] +1580-141083-0027-1976: hyp=['HOW', 'LONG', 'WOULD', 'IT', 'TAKE', 'HIM', 'TO', 'DO', 'THAT', 'USING', 'EVERY', 'POSSIBLE', 'CONTRACTION', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'NOT', 'LESS'] +1580-141083-0028-1977: ref=['THEN', 'HE', 'TOSSED', 'IT', 'DOWN', 'AND', 'SEIZED', 'THE', 'NEXT'] +1580-141083-0028-1977: hyp=['THEN', 'HE', 'TOSSED', 'IT', 'DOWN', 'AND', 'SEIZED', 'THE', 'NEXT'] +1580-141083-0029-1978: ref=['HE', 'WAS', 'IN', 'THE', 'MIDST', 'OF', 'THAT', 'WHEN', 'YOUR', 'RETURN', 'CAUSED', 'HIM', 'TO', 'MAKE', 'A', 'VERY', 'HURRIED', 'RETREAT', 'VERY', 'HURRIED', 'SINCE', 'HE', 'HAD', 'NOT', 'TIME', 'TO', 'REPLACE', 'THE', 'PAPERS', 'WHICH', 'WOULD', 'TELL', 'YOU', 'THAT', 'HE', 'HAD', 'BEEN', 'THERE'] +1580-141083-0029-1978: hyp=['HE', 'WAS', 'IN', 'THE', 'MIDST', 'OF', 'THAT', 'WHEN', 'YOUR', 'RETURN', 'CAUSED', 'HIM', 'TO', 'MAKE', 'A', 'VERY', 'HURRIED', 'RETREAT', 'VERY', 'HURRIED', 'SINCE', 'HE', 'HAD', 'NOT', 'TIME', 'TO', 'REPLACE', 'THE', 'PAPERS', 'WHICH', 'WOULD', 'TELL', 'YOU', 'THAT', 'HE', 'HAD', 'BEEN', 'THERE'] +1580-141083-0030-1979: ref=['MISTER', 'SOAMES', 'WAS', 'SOMEWHAT', 'OVERWHELMED', 'BY', 'THIS', 'FLOOD', 'OF', 'INFORMATION'] +1580-141083-0030-1979: hyp=['MISTER', 'PSALMS', 'WAS', 'SOMEWHAT', 'OVERWHELMED', 'BY', 'THIS', 'FLOOD', 'OF', 'INFORMATION'] +1580-141083-0031-1980: ref=['HOLMES', 'HELD', 'OUT', 'A', 'SMALL', 'CHIP', 'WITH', 'THE', 'LETTERS', 'N', 'N', 'AND', 'A', 'SPACE', 'OF', 'CLEAR', 'WOOD', 'AFTER', 'THEM', 'YOU', 'SEE'] +1580-141083-0031-1980: hyp=['HOLMES', 'HELD', 'OUT', 'A', 'SMALL', 'CHIP', 'WITH', 'THE', 'LETTERS', 'N', 'N', 'AND', 'A', 'SPACE', 'OF', 'CLEAR', 'WOOD', 'AFTER', 'THEM', 'YOU', 'SEE'] +1580-141083-0032-1981: ref=['WATSON', 'I', 'HAVE', 'ALWAYS', 'DONE', 'YOU', 'AN', 'INJUSTICE', 'THERE', 'ARE', 'OTHERS'] +1580-141083-0032-1981: hyp=['WATSON', 'I', 'HAVE', 'ALWAYS', 'DONE', 'YOU', 'AND', 'INJUSTICE', 'THERE', 'ARE', 'OTHERS'] +1580-141083-0033-1982: ref=['I', 'WAS', 'HOPING', 'THAT', 'IF', 'THE', 'PAPER', 'ON', 'WHICH', 'HE', 'WROTE', 'WAS', 'THIN', 'SOME', 'TRACE', 'OF', 'IT', 'MIGHT', 'COME', 'THROUGH', 'UPON', 'THIS', 'POLISHED', 'SURFACE', 'NO', 'I', 'SEE', 'NOTHING'] +1580-141083-0033-1982: hyp=['I', 'WAS', 'HOPING', 'THAT', 'IF', 'THE', 'PAPER', 'ON', 'WHICH', 'HE', 'WROTE', 'WAS', 'THIN', 'SOME', 'TRACE', 'OF', 'IT', 'MIGHT', 'COME', 'THROUGH', 'UPON', 'THIS', 'POLISHED', 'SURFACE', 'NO', 'I', 'SEE', 'NOTHING'] +1580-141083-0034-1983: ref=['AS', 'HOLMES', 'DREW', 'THE', 'CURTAIN', 'I', 'WAS', 'AWARE', 'FROM', 'SOME', 'LITTLE', 'RIGIDITY', 'AND', 'ALERTNESS', 'OF', 'HIS', 'ATTITUDE', 'THAT', 'HE', 'WAS', 'PREPARED', 'FOR', 'AN', 'EMERGENCY'] +1580-141083-0034-1983: hyp=['AS', 'HOLMES', 'DREW', 'THE', 'CURTAIN', 'I', 'WAS', 'AWARE', 'FROM', 'SOME', 'LITTLE', 'RIGIDITY', 'AND', 'INERTNESS', 'OF', 'HIS', 'ATTITUDE', 'THAT', 'HE', 'WAS', 'PREPARED', 'FOR', 'AN', 'EMERGENCY'] +1580-141083-0035-1984: ref=['HOLMES', 'TURNED', 'AWAY', 'AND', 'STOOPED', 'SUDDENLY', 'TO', 'THE', 'FLOOR', 'HALLOA', "WHAT'S", 'THIS'] +1580-141083-0035-1984: hyp=['HOLMES', 'TURNED', 'AWAY', 'AND', 'STOOPED', 'SUDDENLY', 'TO', 'THE', 'FLOOR', 'HULLO', 'WHAT', 'IS', 'THIS'] +1580-141083-0036-1985: ref=['HOLMES', 'HELD', 'IT', 'OUT', 'ON', 'HIS', 'OPEN', 'PALM', 'IN', 'THE', 'GLARE', 'OF', 'THE', 'ELECTRIC', 'LIGHT'] +1580-141083-0036-1985: hyp=['HOLMES', 'HELD', 'IT', 'OUT', 'ON', 'HIS', 'OPEN', 'PALM', 'IN', 'THE', 'GLARE', 'OF', 'THE', 'ELECTRIC', 'LIGHT'] +1580-141083-0037-1986: ref=['WHAT', 'COULD', 'HE', 'DO', 'HE', 'CAUGHT', 'UP', 'EVERYTHING', 'WHICH', 'WOULD', 'BETRAY', 'HIM', 'AND', 'HE', 'RUSHED', 'INTO', 'YOUR', 'BEDROOM', 'TO', 'CONCEAL', 'HIMSELF'] +1580-141083-0037-1986: hyp=['WHAT', 'COULD', 'HE', 'DO', 'HE', 'CAUGHT', 'UP', 'EVERYTHING', 'WHICH', 'WOULD', 'BETRAY', 'HIM', 'AND', 'HE', 'RUSHED', 'INTO', 'YOUR', 'BEDROOM', 'TO', 'CONCEAL', 'HIMSELF'] +1580-141083-0038-1987: ref=['I', 'UNDERSTAND', 'YOU', 'TO', 'SAY', 'THAT', 'THERE', 'ARE', 'THREE', 'STUDENTS', 'WHO', 'USE', 'THIS', 'STAIR', 'AND', 'ARE', 'IN', 'THE', 'HABIT', 'OF', 'PASSING', 'YOUR', 'DOOR', 'YES', 'THERE', 'ARE'] +1580-141083-0038-1987: hyp=['I', 'UNDERSTAND', 'YOU', 'TO', 'SAY', 'THAT', 'THERE', 'ARE', 'THREE', 'STUDENTS', 'WHO', 'USE', 'THIS', 'STARE', 'AND', 'ARE', 'IN', 'THE', 'HABIT', 'OF', 'PASSING', 'YOUR', 'DOOR', 'YES', 'THERE', 'ARE'] +1580-141083-0039-1988: ref=['AND', 'THEY', 'ARE', 'ALL', 'IN', 'FOR', 'THIS', 'EXAMINATION', 'YES'] +1580-141083-0039-1988: hyp=['AND', 'THEY', 'ARE', 'ALL', 'IN', 'FOR', 'THE', 'EXAMINATION', 'YES'] +1580-141083-0040-1989: ref=['ONE', 'HARDLY', 'LIKES', 'TO', 'THROW', 'SUSPICION', 'WHERE', 'THERE', 'ARE', 'NO', 'PROOFS'] +1580-141083-0040-1989: hyp=['ONE', 'HARDLY', 'LIKES', 'TO', 'THROW', 'SUSPICION', 'WHERE', 'THERE', 'ARE', 'NO', 'PROOFS'] +1580-141083-0041-1990: ref=['LET', 'US', 'HEAR', 'THE', 'SUSPICIONS', 'I', 'WILL', 'LOOK', 'AFTER', 'THE', 'PROOFS'] +1580-141083-0041-1990: hyp=['LET', 'US', 'SEE', 'THE', 'SUSPICIONS', 'I', 'WILL', 'LOOK', 'AFTER', 'THE', 'PROOFS'] +1580-141083-0042-1991: ref=['MY', 'SCHOLAR', 'HAS', 'BEEN', 'LEFT', 'VERY', 'POOR', 'BUT', 'HE', 'IS', 'HARD', 'WORKING', 'AND', 'INDUSTRIOUS', 'HE', 'WILL', 'DO', 'WELL'] +1580-141083-0042-1991: hyp=['MY', 'SCHOLAR', 'HAS', 'BEEN', 'LEFT', 'VERY', 'POOR', 'BUT', 'HE', 'IS', 'HARD', 'WORKING', 'AND', 'INDUSTRIOUS', 'HE', 'WILL', 'DO', 'WELL'] +1580-141083-0043-1992: ref=['THE', 'TOP', 'FLOOR', 'BELONGS', 'TO', 'MILES', 'MC', 'LAREN'] +1580-141083-0043-1992: hyp=['THE', 'TOP', 'FLOOR', 'BELONGS', 'TO', 'MYLES', 'MC', 'LAREN'] +1580-141083-0044-1993: ref=['I', 'DARE', 'NOT', 'GO', 'SO', 'FAR', 'AS', 'THAT', 'BUT', 'OF', 'THE', 'THREE', 'HE', 'IS', 'PERHAPS', 'THE', 'LEAST', 'UNLIKELY'] +1580-141083-0044-1993: hyp=['I', 'DARE', 'NOT', 'GO', 'SO', 'FAR', 'AS', 'THAT', 'BUT', 'OF', 'THE', 'THREE', 'HE', 'IS', 'PERHAPS', 'THE', 'LEAST', 'UNLIKELY'] +1580-141083-0045-1994: ref=['HE', 'WAS', 'STILL', 'SUFFERING', 'FROM', 'THIS', 'SUDDEN', 'DISTURBANCE', 'OF', 'THE', 'QUIET', 'ROUTINE', 'OF', 'HIS', 'LIFE'] +1580-141083-0045-1994: hyp=['HE', 'WAS', 'STILL', 'SUFFERING', 'FROM', 'THIS', 'SUDDEN', 'DISTURBANCE', 'OF', 'THE', 'QUIET', 'ROUTINE', 'OF', 'HIS', 'LIFE'] +1580-141083-0046-1995: ref=['BUT', 'I', 'HAVE', 'OCCASIONALLY', 'DONE', 'THE', 'SAME', 'THING', 'AT', 'OTHER', 'TIMES'] +1580-141083-0046-1995: hyp=['BUT', 'I', 'HAVE', 'OCCASIONALLY', 'DONE', 'THE', 'SAME', 'THING', 'AT', 'OTHER', 'TIMES'] +1580-141083-0047-1996: ref=['DID', 'YOU', 'LOOK', 'AT', 'THESE', 'PAPERS', 'ON', 'THE', 'TABLE'] +1580-141083-0047-1996: hyp=['DID', 'YOU', 'LOOK', 'AT', 'THESE', 'PAPERS', 'ON', 'THE', 'TABLE'] +1580-141083-0048-1997: ref=['HOW', 'CAME', 'YOU', 'TO', 'LEAVE', 'THE', 'KEY', 'IN', 'THE', 'DOOR'] +1580-141083-0048-1997: hyp=['HOW', 'CAME', 'YOU', 'TO', 'LEAVE', 'THE', 'KEY', 'IN', 'THE', 'DOOR'] +1580-141083-0049-1998: ref=['ANYONE', 'IN', 'THE', 'ROOM', 'COULD', 'GET', 'OUT', 'YES', 'SIR'] +1580-141083-0049-1998: hyp=['ANY', 'ONE', 'IN', 'THE', 'ROOM', 'COULD', 'GET', 'OUT', 'YES', 'SIR'] +1580-141083-0050-1999: ref=['I', 'REALLY', "DON'T", 'THINK', 'HE', 'KNEW', 'MUCH', 'ABOUT', 'IT', 'MISTER', 'HOLMES'] +1580-141083-0050-1999: hyp=['I', 'HAVE', 'REALLY', "DON'T", 'THINK', 'HE', 'KNEW', 'MUCH', 'ABOUT', 'IT', 'MISTER', 'HOLMES'] +1580-141083-0051-2000: ref=['ONLY', 'FOR', 'A', 'MINUTE', 'OR', 'SO'] +1580-141083-0051-2000: hyp=['ONLY', 'FOR', 'A', 'MINUTE', 'OR', 'SO'] +1580-141083-0052-2001: ref=['OH', 'I', 'WOULD', 'NOT', 'VENTURE', 'TO', 'SAY', 'SIR'] +1580-141083-0052-2001: hyp=['OH', 'I', 'WOULD', 'NOT', 'VENTURE', 'TO', 'SAY', 'SIR'] +1580-141083-0053-2002: ref=['YOU', "HAVEN'T", 'SEEN', 'ANY', 'OF', 'THEM', 'NO', 'SIR'] +1580-141083-0053-2002: hyp=['YOU', "HAVEN'T", 'SEEN', 'ANY', 'OF', 'THEM', 'NO', 'SIR'] +1580-141084-0000-2003: ref=['IT', 'WAS', 'THE', 'INDIAN', 'WHOSE', 'DARK', 'SILHOUETTE', 'APPEARED', 'SUDDENLY', 'UPON', 'HIS', 'BLIND'] +1580-141084-0000-2003: hyp=['IT', 'WAS', 'THE', 'INDIAN', 'WHOSE', 'DARK', 'SILHOUETTE', 'APPEARED', 'SUDDENLY', 'UPON', 'HIS', 'BLIND'] +1580-141084-0001-2004: ref=['HE', 'WAS', 'PACING', 'SWIFTLY', 'UP', 'AND', 'DOWN', 'HIS', 'ROOM'] +1580-141084-0001-2004: hyp=['HE', 'WAS', 'PACING', 'SWIFTLY', 'UP', 'AND', 'DOWN', 'HIS', 'ROOM'] +1580-141084-0002-2005: ref=['THIS', 'SET', 'OF', 'ROOMS', 'IS', 'QUITE', 'THE', 'OLDEST', 'IN', 'THE', 'COLLEGE', 'AND', 'IT', 'IS', 'NOT', 'UNUSUAL', 'FOR', 'VISITORS', 'TO', 'GO', 'OVER', 'THEM'] +1580-141084-0002-2005: hyp=['THE', 'SET', 'OF', 'ROOMS', 'IS', 'QUITE', 'THE', 'OLDEST', 'IN', 'THE', 'COLLEGE', 'AND', 'IT', 'IS', 'NOT', 'UNUSUAL', 'FOR', 'VISITORS', 'TO', 'GO', 'OVER', 'THEM'] +1580-141084-0003-2006: ref=['NO', 'NAMES', 'PLEASE', 'SAID', 'HOLMES', 'AS', 'WE', 'KNOCKED', 'AT', "GILCHRIST'S", 'DOOR'] +1580-141084-0003-2006: hyp=['NO', 'NAMES', 'PLEASE', 'SAID', 'HOLMES', 'AS', 'WE', 'KNOCKED', 'AT', "GILCRE'S", 'DOOR'] +1580-141084-0004-2007: ref=['OF', 'COURSE', 'HE', 'DID', 'NOT', 'REALIZE', 'THAT', 'IT', 'WAS', 'I', 'WHO', 'WAS', 'KNOCKING', 'BUT', 'NONE', 'THE', 'LESS', 'HIS', 'CONDUCT', 'WAS', 'VERY', 'UNCOURTEOUS', 'AND', 'INDEED', 'UNDER', 'THE', 'CIRCUMSTANCES', 'RATHER', 'SUSPICIOUS'] +1580-141084-0004-2007: hyp=['OF', 'COURSE', 'HE', 'DID', 'NOT', 'REALIZE', 'THAT', 'IT', 'WAS', 'I', 'WHO', 'WAS', 'KNOCKING', 'BUT', 'NONE', 'THE', 'LESS', 'HIS', 'CONDUCT', 'WAS', 'VERY', 'UNCOURTEOUS', 'AND', 'INDEED', 'UNDER', 'THE', 'CIRCUMSTANCES', 'RATHER', 'SUSPICIOUS'] +1580-141084-0005-2008: ref=['THAT', 'IS', 'VERY', 'IMPORTANT', 'SAID', 'HOLMES'] +1580-141084-0005-2008: hyp=['THAT', 'IS', 'VERY', 'IMPORTANT', 'SAID', 'HOLMES'] +1580-141084-0006-2009: ref=['YOU', "DON'T", 'SEEM', 'TO', 'REALIZE', 'THE', 'POSITION'] +1580-141084-0006-2009: hyp=['YOU', "DON'T", 'SEEM', 'TO', 'REALIZE', 'THE', 'POSITION'] +1580-141084-0007-2010: ref=['TO', 'MORROW', 'IS', 'THE', 'EXAMINATION'] +1580-141084-0007-2010: hyp=['TO', 'MORROW', 'IS', 'THE', 'EXAMINATION'] +1580-141084-0008-2011: ref=['I', 'CANNOT', 'ALLOW', 'THE', 'EXAMINATION', 'TO', 'BE', 'HELD', 'IF', 'ONE', 'OF', 'THE', 'PAPERS', 'HAS', 'BEEN', 'TAMPERED', 'WITH', 'THE', 'SITUATION', 'MUST', 'BE', 'FACED'] +1580-141084-0008-2011: hyp=['I', 'CANNOT', 'ALLOW', 'THE', 'EXAMINATION', 'TO', 'BE', 'HELD', 'IF', 'ONE', 'OF', 'THE', 'PAPERS', 'HAS', 'BEEN', 'TAMPERED', 'WITH', 'THE', 'SITUATION', 'MUST', 'BE', 'FACED'] +1580-141084-0009-2012: ref=['IT', 'IS', 'POSSIBLE', 'THAT', 'I', 'MAY', 'BE', 'IN', 'A', 'POSITION', 'THEN', 'TO', 'INDICATE', 'SOME', 'COURSE', 'OF', 'ACTION'] +1580-141084-0009-2012: hyp=['IT', 'IS', 'POSSIBLE', 'THAT', 'I', 'MAY', 'BE', 'IN', 'A', 'POSITION', 'THEN', 'TO', 'INDICATE', 'SOME', 'COURSE', 'OF', 'ACTION'] +1580-141084-0010-2013: ref=['I', 'WILL', 'TAKE', 'THE', 'BLACK', 'CLAY', 'WITH', 'ME', 'ALSO', 'THE', 'PENCIL', 'CUTTINGS', 'GOOD', 'BYE'] +1580-141084-0010-2013: hyp=['I', 'WILL', 'TAKE', 'THE', 'BLACK', 'CLAY', 'WITH', 'ME', 'ALSO', 'THE', 'PENCIL', 'CUTTINGS', 'GOOD', 'BY'] +1580-141084-0011-2014: ref=['WHEN', 'WE', 'WERE', 'OUT', 'IN', 'THE', 'DARKNESS', 'OF', 'THE', 'QUADRANGLE', 'WE', 'AGAIN', 'LOOKED', 'UP', 'AT', 'THE', 'WINDOWS'] +1580-141084-0011-2014: hyp=['WHEN', 'WE', 'WERE', 'OUT', 'IN', 'THE', 'DARKNESS', 'OF', 'THE', 'QUADRANGLE', 'WE', 'AGAIN', 'LOOKED', 'UP', 'AT', 'THE', 'WINDOWS'] +1580-141084-0012-2015: ref=['THE', 'FOUL', 'MOUTHED', 'FELLOW', 'AT', 'THE', 'TOP'] +1580-141084-0012-2015: hyp=['THE', 'FOUL', 'MOUTHED', 'FELLOW', 'AT', 'THE', 'TOP'] +1580-141084-0013-2016: ref=['HE', 'IS', 'THE', 'ONE', 'WITH', 'THE', 'WORST', 'RECORD'] +1580-141084-0013-2016: hyp=['HE', 'IS', 'THE', 'ONE', 'WITH', 'THE', 'WORST', 'RECORD'] +1580-141084-0014-2017: ref=['WHY', 'BANNISTER', 'THE', 'SERVANT', "WHAT'S", 'HIS', 'GAME', 'IN', 'THE', 'MATTER'] +1580-141084-0014-2017: hyp=['WHY', 'BANISTER', 'THE', 'SERVANT', "WHAT'S", 'HIS', 'GAME', 'IN', 'THE', 'MATTER'] +1580-141084-0015-2018: ref=['HE', 'IMPRESSED', 'ME', 'AS', 'BEING', 'A', 'PERFECTLY', 'HONEST', 'MAN'] +1580-141084-0015-2018: hyp=['HE', 'IMPRESSED', 'ME', 'AS', 'BEING', 'A', 'PERFECTLY', 'HONEST', 'MAN'] +1580-141084-0016-2019: ref=['MY', 'FRIEND', 'DID', 'NOT', 'APPEAR', 'TO', 'BE', 'DEPRESSED', 'BY', 'HIS', 'FAILURE', 'BUT', 'SHRUGGED', 'HIS', 'SHOULDERS', 'IN', 'HALF', 'HUMOROUS', 'RESIGNATION'] +1580-141084-0016-2019: hyp=['MY', 'FRIEND', 'DID', 'NOT', 'APPEAR', 'TO', 'BE', 'DEPRESSED', 'BY', 'HIS', 'FAILURE', 'BUT', 'SHRUGGED', 'HIS', 'SHOULDERS', 'IN', 'HALF', 'HUMOROUS', 'RESIGNATION'] +1580-141084-0017-2020: ref=['NO', 'GOOD', 'MY', 'DEAR', 'WATSON'] +1580-141084-0017-2020: hyp=['NO', 'GOOD', 'MY', 'DEAR', 'WATSON'] +1580-141084-0018-2021: ref=['I', 'THINK', 'SO', 'YOU', 'HAVE', 'FORMED', 'A', 'CONCLUSION'] +1580-141084-0018-2021: hyp=['I', 'THINK', 'SO', 'YOU', 'HAVE', 'FORMED', 'A', 'CONCLUSION'] +1580-141084-0019-2022: ref=['YES', 'MY', 'DEAR', 'WATSON', 'I', 'HAVE', 'SOLVED', 'THE', 'MYSTERY'] +1580-141084-0019-2022: hyp=['YES', 'MY', 'DEAR', 'WATSON', 'I', 'HAVE', 'SOLVED', 'THE', 'MYSTERY'] +1580-141084-0020-2023: ref=['LOOK', 'AT', 'THAT', 'HE', 'HELD', 'OUT', 'HIS', 'HAND'] +1580-141084-0020-2023: hyp=['LOOK', 'AT', 'THAT', 'HE', 'HELD', 'OUT', 'HIS', 'HAND'] +1580-141084-0021-2024: ref=['ON', 'THE', 'PALM', 'WERE', 'THREE', 'LITTLE', 'PYRAMIDS', 'OF', 'BLACK', 'DOUGHY', 'CLAY'] +1580-141084-0021-2024: hyp=['ON', 'THE', 'PALM', 'WERE', 'THREE', 'LITTLE', 'PYRAMIDS', 'OF', 'BLACK', 'DOUGHY', 'CLAY'] +1580-141084-0022-2025: ref=['AND', 'ONE', 'MORE', 'THIS', 'MORNING'] +1580-141084-0022-2025: hyp=['AND', 'ONE', 'MORE', 'THIS', 'MORNING'] +1580-141084-0023-2026: ref=['IN', 'A', 'FEW', 'HOURS', 'THE', 'EXAMINATION', 'WOULD', 'COMMENCE', 'AND', 'HE', 'WAS', 'STILL', 'IN', 'THE', 'DILEMMA', 'BETWEEN', 'MAKING', 'THE', 'FACTS', 'PUBLIC', 'AND', 'ALLOWING', 'THE', 'CULPRIT', 'TO', 'COMPETE', 'FOR', 'THE', 'VALUABLE', 'SCHOLARSHIP'] +1580-141084-0023-2026: hyp=['IN', 'A', 'FEW', 'HOURS', 'THE', 'EXAMINATION', 'WOULD', 'COMMENCE', 'AND', 'HE', 'WAS', 'STILL', 'IN', 'THE', 'DILEMMA', 'BETWEEN', 'MAKING', 'THE', 'FACTS', 'PUBLIC', 'AND', 'ALLOWING', 'THE', 'CULPRIT', 'TO', 'COMPETE', 'FOR', 'THE', 'VALUABLE', 'SCHOLARSHIP'] +1580-141084-0024-2027: ref=['HE', 'COULD', 'HARDLY', 'STAND', 'STILL', 'SO', 'GREAT', 'WAS', 'HIS', 'MENTAL', 'AGITATION', 'AND', 'HE', 'RAN', 'TOWARDS', 'HOLMES', 'WITH', 'TWO', 'EAGER', 'HANDS', 'OUTSTRETCHED', 'THANK', 'HEAVEN', 'THAT', 'YOU', 'HAVE', 'COME'] +1580-141084-0024-2027: hyp=['HE', 'COULD', 'HARDLY', 'STAND', 'STILL', 'SO', 'GREAT', 'WAS', 'HIS', 'MENTAL', 'AGITATION', 'AND', 'HE', 'RAN', 'TOWARDS', 'HOMES', 'WITH', 'TWO', 'EAGER', 'HANDS', 'OUTSTRETCHED', 'THANK', 'HEAVEN', 'THAT', 'YOU', 'HAVE', 'COME'] +1580-141084-0025-2028: ref=['YOU', 'KNOW', 'HIM', 'I', 'THINK', 'SO'] +1580-141084-0025-2028: hyp=['YOU', 'KNOW', 'HIM', 'I', 'THINK', 'SO'] +1580-141084-0026-2029: ref=['IF', 'THIS', 'MATTER', 'IS', 'NOT', 'TO', 'BECOME', 'PUBLIC', 'WE', 'MUST', 'GIVE', 'OURSELVES', 'CERTAIN', 'POWERS', 'AND', 'RESOLVE', 'OURSELVES', 'INTO', 'A', 'SMALL', 'PRIVATE', 'COURT', 'MARTIAL'] +1580-141084-0026-2029: hyp=['IF', 'THIS', 'MATTER', 'IS', 'NOT', 'TO', 'BECOME', 'PUBLIC', 'WE', 'MUST', 'GIVE', 'OURSELVES', 'CERTAIN', 'POWERS', 'AND', 'RESOLVE', 'OURSELVES', 'INTO', 'A', 'SMALL', 'PRIVATE', 'COURT', 'MARTIAL'] +1580-141084-0027-2030: ref=['NO', 'SIR', 'CERTAINLY', 'NOT'] +1580-141084-0027-2030: hyp=['NO', 'SIR', 'CERTAINLY', 'NOT'] +1580-141084-0028-2031: ref=['THERE', 'WAS', 'NO', 'MAN', 'SIR'] +1580-141084-0028-2031: hyp=['THERE', 'WAS', 'NO', 'MAN', 'SIR'] +1580-141084-0029-2032: ref=['HIS', 'TROUBLED', 'BLUE', 'EYES', 'GLANCED', 'AT', 'EACH', 'OF', 'US', 'AND', 'FINALLY', 'RESTED', 'WITH', 'AN', 'EXPRESSION', 'OF', 'BLANK', 'DISMAY', 'UPON', 'BANNISTER', 'IN', 'THE', 'FARTHER', 'CORNER'] +1580-141084-0029-2032: hyp=['HIS', 'TROUBLED', 'BLUE', 'EYES', 'GLANCED', 'AT', 'EACH', 'OF', 'US', 'AND', 'FINALLY', 'RESTED', 'WITH', 'AN', 'EXPRESSION', 'OF', 'BLANK', 'DISMAY', 'UPON', 'BANISTER', 'IN', 'THE', 'FARTHER', 'CORNER'] +1580-141084-0030-2033: ref=['JUST', 'CLOSE', 'THE', 'DOOR', 'SAID', 'HOLMES'] +1580-141084-0030-2033: hyp=['JUST', 'CLOSE', 'THE', 'DOOR', 'SAID', 'HOLMES'] +1580-141084-0031-2034: ref=['WE', 'WANT', 'TO', 'KNOW', 'MISTER', 'GILCHRIST', 'HOW', 'YOU', 'AN', 'HONOURABLE', 'MAN', 'EVER', 'CAME', 'TO', 'COMMIT', 'SUCH', 'AN', 'ACTION', 'AS', 'THAT', 'OF', 'YESTERDAY'] +1580-141084-0031-2034: hyp=['WE', 'WANT', 'TO', 'KNOW', 'MISTER', 'GOST', 'HOW', 'YOU', 'AN', 'HONOURABLE', 'MAN', 'EVER', 'CAME', 'TO', 'COMMIT', 'SUCH', 'AN', 'ACTION', 'AS', 'THAT', 'OF', 'YESTERDAY'] +1580-141084-0032-2035: ref=['FOR', 'A', 'MOMENT', 'GILCHRIST', 'WITH', 'UPRAISED', 'HAND', 'TRIED', 'TO', 'CONTROL', 'HIS', 'WRITHING', 'FEATURES'] +1580-141084-0032-2035: hyp=['FOR', 'A', 'MOMENT', 'GILCRIS', 'WITH', 'UPRAISED', 'HAND', 'TRIED', 'TO', 'CONTROL', 'HIS', 'WRITHING', 'FEATURES'] +1580-141084-0033-2036: ref=['COME', 'COME', 'SAID', 'HOLMES', 'KINDLY', 'IT', 'IS', 'HUMAN', 'TO', 'ERR', 'AND', 'AT', 'LEAST', 'NO', 'ONE', 'CAN', 'ACCUSE', 'YOU', 'OF', 'BEING', 'A', 'CALLOUS', 'CRIMINAL'] +1580-141084-0033-2036: hyp=['COME', 'COME', 'SAID', 'HOLMES', 'KINDLY', 'IT', 'IS', 'HUMAN', 'TO', 'ERR', 'AND', 'AT', 'LEAST', 'NO', 'ONE', 'CAN', 'ACCUSE', 'YOU', 'OF', 'BEING', 'A', 'CALLOUS', 'CRIMINAL'] +1580-141084-0034-2037: ref=['WELL', 'WELL', "DON'T", 'TROUBLE', 'TO', 'ANSWER', 'LISTEN', 'AND', 'SEE', 'THAT', 'I', 'DO', 'YOU', 'NO', 'INJUSTICE'] +1580-141084-0034-2037: hyp=['WELL', 'WELL', "DON'T", 'TROUBLE', 'TO', 'ANSWER', 'LISTEN', 'AND', 'SEE', 'THAT', 'I', 'DO', 'YOU', 'KNOW', 'INJUSTICE'] +1580-141084-0035-2038: ref=['HE', 'COULD', 'EXAMINE', 'THE', 'PAPERS', 'IN', 'HIS', 'OWN', 'OFFICE'] +1580-141084-0035-2038: hyp=['HE', 'COULD', 'EXAMINE', 'THE', 'PAPERS', 'IN', 'HIS', 'OWN', 'OFFICE'] +1580-141084-0036-2039: ref=['THE', 'INDIAN', 'I', 'ALSO', 'THOUGHT', 'NOTHING', 'OF'] +1580-141084-0036-2039: hyp=['THE', 'INDIAN', 'I', 'ALSO', 'THOUGHT', 'NOTHING', 'OF'] +1580-141084-0037-2040: ref=['WHEN', 'I', 'APPROACHED', 'YOUR', 'ROOM', 'I', 'EXAMINED', 'THE', 'WINDOW'] +1580-141084-0037-2040: hyp=['WHEN', 'I', 'APPROACHED', 'YOUR', 'ROOM', 'I', 'EXAMINED', 'THE', 'WINDOW'] +1580-141084-0038-2041: ref=['NO', 'ONE', 'LESS', 'THAN', 'THAT', 'WOULD', 'HAVE', 'A', 'CHANCE'] +1580-141084-0038-2041: hyp=['NO', 'ONE', 'LESS', 'THAN', 'THAT', 'WOULD', 'HAVE', 'A', 'CHANCE'] +1580-141084-0039-2042: ref=['I', 'ENTERED', 'AND', 'I', 'TOOK', 'YOU', 'INTO', 'MY', 'CONFIDENCE', 'AS', 'TO', 'THE', 'SUGGESTIONS', 'OF', 'THE', 'SIDE', 'TABLE'] +1580-141084-0039-2042: hyp=['I', 'ENTERED', 'AND', 'I', 'TOOK', 'YOU', 'INTO', 'MY', 'CONFIDENCE', 'AS', 'TO', 'THE', 'SUGGESTIONS', 'OF', 'THE', 'SIDE', 'TABLE'] +1580-141084-0040-2043: ref=['HE', 'RETURNED', 'CARRYING', 'HIS', 'JUMPING', 'SHOES', 'WHICH', 'ARE', 'PROVIDED', 'AS', 'YOU', 'ARE', 'AWARE', 'WITH', 'SEVERAL', 'SHARP', 'SPIKES'] +1580-141084-0040-2043: hyp=['HE', 'RETURNED', 'CARRYING', 'HIS', 'JUMPING', 'SHOES', 'WHICH', 'ARE', 'PROVIDED', 'AS', 'YOU', 'ARE', 'WEAR', 'WITH', 'SEVERAL', 'SHARP', 'SPIKES'] +1580-141084-0041-2044: ref=['NO', 'HARM', 'WOULD', 'HAVE', 'BEEN', 'DONE', 'HAD', 'IT', 'NOT', 'BEEN', 'THAT', 'AS', 'HE', 'PASSED', 'YOUR', 'DOOR', 'HE', 'PERCEIVED', 'THE', 'KEY', 'WHICH', 'HAD', 'BEEN', 'LEFT', 'BY', 'THE', 'CARELESSNESS', 'OF', 'YOUR', 'SERVANT'] +1580-141084-0041-2044: hyp=['NO', 'HARM', 'WOULD', 'HAVE', 'BEEN', 'DONE', 'HAD', 'IT', 'NOT', 'BEEN', 'THAT', 'AS', 'HE', 'PASSED', 'YOUR', 'DOOR', 'HE', 'PERCEIVED', 'THE', 'KEY', 'WHICH', 'HAD', 'BEEN', 'LEFT', 'BY', 'THE', 'CARELESSNESS', 'OF', 'YOUR', 'SERVANT'] +1580-141084-0042-2045: ref=['A', 'SUDDEN', 'IMPULSE', 'CAME', 'OVER', 'HIM', 'TO', 'ENTER', 'AND', 'SEE', 'IF', 'THEY', 'WERE', 'INDEED', 'THE', 'PROOFS'] +1580-141084-0042-2045: hyp=['A', 'SUDDEN', 'IMPULSE', 'CAME', 'OVER', 'HIM', 'TO', 'ENTER', 'AND', 'SEE', 'IF', 'THEY', 'WERE', 'INDEED', 'THE', 'PROOFS'] +1580-141084-0043-2046: ref=['HE', 'PUT', 'HIS', 'SHOES', 'ON', 'THE', 'TABLE'] +1580-141084-0043-2046: hyp=['HE', 'PUT', 'HIS', 'SHOES', 'ON', 'THE', 'TABLE'] +1580-141084-0044-2047: ref=['GLOVES', 'SAID', 'THE', 'YOUNG', 'MAN'] +1580-141084-0044-2047: hyp=['GLOVES', 'SAID', 'THE', 'YOUNG', 'MAN'] +1580-141084-0045-2048: ref=['SUDDENLY', 'HE', 'HEARD', 'HIM', 'AT', 'THE', 'VERY', 'DOOR', 'THERE', 'WAS', 'NO', 'POSSIBLE', 'ESCAPE'] +1580-141084-0045-2048: hyp=['SUDDENLY', 'HE', 'HEARD', 'HIM', 'AT', 'THE', 'VERY', 'DOOR', 'THERE', 'WAS', 'NO', 'POSSIBLE', 'ESCAPE'] +1580-141084-0046-2049: ref=['HAVE', 'I', 'TOLD', 'THE', 'TRUTH', 'MISTER', 'GILCHRIST'] +1580-141084-0046-2049: hyp=['HAVE', 'I', 'TOLD', 'THE', 'TRUTH', 'MISTER', 'GORIST'] +1580-141084-0047-2050: ref=['I', 'HAVE', 'A', 'LETTER', 'HERE', 'MISTER', 'SOAMES', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'EARLY', 'THIS', 'MORNING', 'IN', 'THE', 'MIDDLE', 'OF', 'A', 'RESTLESS', 'NIGHT'] +1580-141084-0047-2050: hyp=['I', 'HAVE', 'A', 'LETTER', 'HERE', 'MISTER', 'SOLMES', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'EARLY', 'THIS', 'MORNING', 'IN', 'THE', 'MIDDLE', 'OF', 'A', 'RESTLESS', 'NIGHT'] +1580-141084-0048-2051: ref=['IT', 'WILL', 'BE', 'CLEAR', 'TO', 'YOU', 'FROM', 'WHAT', 'I', 'HAVE', 'SAID', 'THAT', 'ONLY', 'YOU', 'COULD', 'HAVE', 'LET', 'THIS', 'YOUNG', 'MAN', 'OUT', 'SINCE', 'YOU', 'WERE', 'LEFT', 'IN', 'THE', 'ROOM', 'AND', 'MUST', 'HAVE', 'LOCKED', 'THE', 'DOOR', 'WHEN', 'YOU', 'WENT', 'OUT'] +1580-141084-0048-2051: hyp=['IT', 'WOULD', 'BE', 'CLEAR', 'TO', 'YOU', 'FROM', 'WHAT', 'I', 'HAVE', 'SAID', 'THAT', 'ONLY', 'YOU', 'COULD', 'HAVE', 'LET', 'THIS', 'YOUNG', 'MAN', 'OUT', 'SINCE', 'YOU', 'WERE', 'LEFT', 'IN', 'THE', 'ROOM', 'AND', 'MUST', 'HAVE', 'LOCKED', 'THE', 'DOOR', 'WHEN', 'YOU', 'WENT', 'OUT'] +1580-141084-0049-2052: ref=['IT', 'WAS', 'SIMPLE', 'ENOUGH', 'SIR', 'IF', 'YOU', 'ONLY', 'HAD', 'KNOWN', 'BUT', 'WITH', 'ALL', 'YOUR', 'CLEVERNESS', 'IT', 'WAS', 'IMPOSSIBLE', 'THAT', 'YOU', 'COULD', 'KNOW'] +1580-141084-0049-2052: hyp=['IT', 'WAS', 'SIMPLE', 'ENOUGH', 'SIR', 'IF', 'YOU', 'ONLY', 'HAD', 'KNOWN', 'BUT', 'WITH', 'ALL', 'YOUR', 'CLEVERNESS', 'IT', 'WAS', 'IMPOSSIBLE', 'THAT', 'YOU', 'COULD', 'KNOW'] +1580-141084-0050-2053: ref=['IF', 'MISTER', 'SOAMES', 'SAW', 'THEM', 'THE', 'GAME', 'WAS', 'UP'] +1580-141084-0050-2053: hyp=['IF', 'MISTER', 'SOLMES', 'SAW', 'THEM', 'THE', 'GAME', 'WAS', 'UP'] +1995-1826-0000-750: ref=['IN', 'THE', 'DEBATE', 'BETWEEN', 'THE', 'SENIOR', 'SOCIETIES', 'HER', 'DEFENCE', 'OF', 'THE', 'FIFTEENTH', 'AMENDMENT', 'HAD', 'BEEN', 'NOT', 'ONLY', 'A', 'NOTABLE', 'BIT', 'OF', 'REASONING', 'BUT', 'DELIVERED', 'WITH', 'REAL', 'ENTHUSIASM'] +1995-1826-0000-750: hyp=['IN', 'THE', 'DEBATE', 'BETWEEN', 'THE', 'SENIOR', 'SOCIETIES', 'HER', 'DEFENCE', 'OF', 'THE', 'FIFTEENTH', 'AMENDMENT', 'HAD', 'BEEN', 'NOT', 'ONLY', 'A', 'NOTABLE', 'BIT', 'OF', 'REASONING', 'BUT', 'DELIVERED', 'WITH', 'REAL', 'ENTHUSIASM'] +1995-1826-0001-751: ref=['THE', 'SOUTH', 'SHE', 'HAD', 'NOT', 'THOUGHT', 'OF', 'SERIOUSLY', 'AND', 'YET', 'KNOWING', 'OF', 'ITS', 'DELIGHTFUL', 'HOSPITALITY', 'AND', 'MILD', 'CLIMATE', 'SHE', 'WAS', 'NOT', 'AVERSE', 'TO', 'CHARLESTON', 'OR', 'NEW', 'ORLEANS'] +1995-1826-0001-751: hyp=['THE', 'SOUTH', 'SHE', 'HAD', 'NOT', 'THOUGHT', 'OF', 'SERIOUSLY', 'AND', 'YET', 'KNOWING', 'OF', 'ITS', 'DELIGHTFUL', 'HOSPITALITY', 'AND', 'MILD', 'CLIMATE', 'SHE', 'WAS', 'NOT', 'AVERSE', 'TO', 'CHARLESTON', 'OR', 'NEW', 'ORLEANS'] +1995-1826-0002-752: ref=['JOHN', 'TAYLOR', 'WHO', 'HAD', 'SUPPORTED', 'HER', 'THROUGH', 'COLLEGE', 'WAS', 'INTERESTED', 'IN', 'COTTON'] +1995-1826-0002-752: hyp=['JOHN', 'TAYLOR', 'WHO', 'HAD', 'SUPPORTED', 'HER', 'THROUGH', 'COLLEGE', 'WAS', 'INTERESTED', 'IN', 'COTTON'] +1995-1826-0003-753: ref=['BETTER', 'GO', 'HE', 'HAD', 'COUNSELLED', 'SENTENTIOUSLY'] +1995-1826-0003-753: hyp=['BETTER', 'GO', 'HE', 'HAD', 'COUNSELS', 'SENTENTIOUSLY'] +1995-1826-0004-754: ref=['MIGHT', 'LEARN', 'SOMETHING', 'USEFUL', 'DOWN', 'THERE'] +1995-1826-0004-754: hyp=['MIGHT', 'LEARN', 'SOMETHING', 'USEFUL', 'DOWN', 'THERE'] +1995-1826-0005-755: ref=['BUT', 'JOHN', "THERE'S", 'NO', 'SOCIETY', 'JUST', 'ELEMENTARY', 'WORK'] +1995-1826-0005-755: hyp=['BUT', 'JOHN', "THERE'S", 'NO', 'SOCIETY', 'JUST', 'ELEMENTARY', 'WORK'] +1995-1826-0006-756: ref=['BEEN', 'LOOKING', 'UP', 'TOOMS', 'COUNTY'] +1995-1826-0006-756: hyp=['BEEN', 'LOOKING', 'UP', "TOMB'S", 'COUNTY'] +1995-1826-0007-757: ref=['FIND', 'SOME', 'CRESSWELLS', 'THERE', 'BIG', 'PLANTATIONS', 'RATED', 'AT', 'TWO', 'HUNDRED', 'AND', 'FIFTY', 'THOUSAND', 'DOLLARS'] +1995-1826-0007-757: hyp=['FIVE', 'CROSS', 'WHIRLS', 'THERE', 'BIG', 'PLANTATIONS', 'RATED', 'AT', 'TWO', 'HUNDRED', 'AND', 'FIFTY', 'THOUSAND', 'DOLLARS'] +1995-1826-0008-758: ref=['SOME', 'OTHERS', 'TOO', 'BIG', 'COTTON', 'COUNTY'] +1995-1826-0008-758: hyp=['SOME', 'OTHERS', 'TOO', 'BIG', 'COTTON', 'COUNTY'] +1995-1826-0009-759: ref=['YOU', 'OUGHT', 'TO', 'KNOW', 'JOHN', 'IF', 'I', 'TEACH', 'NEGROES', "I'LL", 'SCARCELY', 'SEE', 'MUCH', 'OF', 'PEOPLE', 'IN', 'MY', 'OWN', 'CLASS'] +1995-1826-0009-759: hyp=['YOU', 'OUGHT', 'TO', 'KNOW', 'JOHN', 'IF', 'I', 'TEACH', 'NEGROES', "I'LL", 'SCARCELY', 'SEE', 'MUCH', 'OF', 'PEOPLE', 'IN', 'MY', 'OWN', 'CLASS'] +1995-1826-0010-760: ref=['AT', 'ANY', 'RATE', 'I', 'SAY', 'GO'] +1995-1826-0010-760: hyp=['AT', 'ANY', 'RATE', 'I', 'SAY', 'GO'] +1995-1826-0011-761: ref=['HERE', 'SHE', 'WAS', 'TEACHING', 'DIRTY', 'CHILDREN', 'AND', 'THE', 'SMELL', 'OF', 'CONFUSED', 'ODORS', 'AND', 'BODILY', 'PERSPIRATION', 'WAS', 'TO', 'HER', 'AT', 'TIMES', 'UNBEARABLE'] +1995-1826-0011-761: hyp=['HERE', 'SHE', 'WAS', 'TEACHING', 'DIRTY', 'CHILDREN', 'AND', 'THE', 'SMELL', 'OF', 'CONFUSED', 'ODORS', 'AND', 'BODILY', 'PERSPIRATION', 'WAS', 'TO', 'HER', 'AT', 'TIMES', 'UNBEARABLE'] +1995-1826-0012-762: ref=['SHE', 'WANTED', 'A', 'GLANCE', 'OF', 'THE', 'NEW', 'BOOKS', 'AND', 'PERIODICALS', 'AND', 'TALK', 'OF', 'GREAT', 'PHILANTHROPIES', 'AND', 'REFORMS'] +1995-1826-0012-762: hyp=['SHE', 'WANTED', 'A', 'GLANCE', 'OF', 'THE', 'NEW', 'BOOKS', 'IN', 'PERIODICALS', 'AND', 'TALK', 'OF', 'GRATE', 'PHILANTHROPIES', 'AND', 'REFORMS'] +1995-1826-0013-763: ref=['SO', 'FOR', 'THE', 'HUNDREDTH', 'TIME', 'SHE', 'WAS', 'THINKING', 'TODAY', 'AS', 'SHE', 'WALKED', 'ALONE', 'UP', 'THE', 'LANE', 'BACK', 'OF', 'THE', 'BARN', 'AND', 'THEN', 'SLOWLY', 'DOWN', 'THROUGH', 'THE', 'BOTTOMS'] +1995-1826-0013-763: hyp=['SO', 'FOR', 'THE', 'HUNDREDTH', 'TIME', 'SHE', 'WAS', 'THINKING', 'TO', 'DAY', 'AS', 'SHE', 'WALKED', 'ALONE', 'UP', 'THE', 'LANE', 'BACK', 'OF', 'THE', 'BARN', 'AND', 'THEN', 'SLOWLY', 'DOWN', 'THROUGH', 'THE', 'BOTTOMS'] +1995-1826-0014-764: ref=['COTTON', 'SHE', 'PAUSED'] +1995-1826-0014-764: hyp=['COTTON', 'SHE', 'PAUSED'] +1995-1826-0015-765: ref=['SHE', 'HAD', 'ALMOST', 'FORGOTTEN', 'THAT', 'IT', 'WAS', 'HERE', 'WITHIN', 'TOUCH', 'AND', 'SIGHT'] +1995-1826-0015-765: hyp=['SHE', 'HAD', 'ALMOST', 'FORGOTTEN', 'THAT', 'IT', 'WAS', 'HERE', 'WITHIN', 'TOUCH', 'IN', 'SIGHT'] +1995-1826-0016-766: ref=['THE', 'GLIMMERING', 'SEA', 'OF', 'DELICATE', 'LEAVES', 'WHISPERED', 'AND', 'MURMURED', 'BEFORE', 'HER', 'STRETCHING', 'AWAY', 'TO', 'THE', 'NORTHWARD'] +1995-1826-0016-766: hyp=['THE', 'GLIMMERING', 'SEA', 'OF', 'DELICATE', 'LEAVES', 'WHISPERED', 'AND', 'MURMURED', 'BEFORE', 'HER', 'STRETCHING', 'AWAY', 'TO', 'THE', 'NORTHWARD'] +1995-1826-0017-767: ref=['THERE', 'MIGHT', 'BE', 'A', 'BIT', 'OF', 'POETRY', 'HERE', 'AND', 'THERE', 'BUT', 'MOST', 'OF', 'THIS', 'PLACE', 'WAS', 'SUCH', 'DESPERATE', 'PROSE'] +1995-1826-0017-767: hyp=['THERE', 'MIGHT', 'BE', 'A', 'BIT', 'OF', 'POETRY', 'HERE', 'AND', 'THERE', 'BUT', 'MOST', 'OF', 'THIS', 'PLACE', 'WAS', 'SUCH', 'DESPERATE', 'PROSE'] +1995-1826-0018-768: ref=['HER', 'REGARD', 'SHIFTED', 'TO', 'THE', 'GREEN', 'STALKS', 'AND', 'LEAVES', 'AGAIN', 'AND', 'SHE', 'STARTED', 'TO', 'MOVE', 'AWAY'] +1995-1826-0018-768: hyp=['HER', 'REGARD', 'SHIFTED', 'TO', 'THE', 'GREEN', 'STALKS', 'AND', 'LEAVES', 'AGAIN', 'AND', 'SHE', 'STARTED', 'TO', 'MOVE', 'AWAY'] +1995-1826-0019-769: ref=['COTTON', 'IS', 'A', 'WONDERFUL', 'THING', 'IS', 'IT', 'NOT', 'BOYS', 'SHE', 'SAID', 'RATHER', 'PRIMLY'] +1995-1826-0019-769: hyp=['COTTON', 'IS', 'A', 'WONDERFUL', 'THING', 'IS', 'IT', 'NOT', 'BOYS', 'SHE', 'SAID', 'RATHER', 'PRIMLY'] +1995-1826-0020-770: ref=['MISS', 'TAYLOR', 'DID', 'NOT', 'KNOW', 'MUCH', 'ABOUT', 'COTTON', 'BUT', 'AT', 'LEAST', 'ONE', 'MORE', 'REMARK', 'SEEMED', 'CALLED', 'FOR'] +1995-1826-0020-770: hyp=['MISS', 'TAYLOR', 'DID', 'NOT', 'KNOW', 'MUCH', 'ABOUT', 'COTTON', 'BUT', 'AT', 'LEAST', 'ONE', 'MORE', 'REMARKED', 'SEEMED', 'CALLED', 'FOR'] +1995-1826-0021-771: ref=["DON'T", 'KNOW', 'WELL', 'OF', 'ALL', 'THINGS', 'INWARDLY', 'COMMENTED', 'MISS', 'TAYLOR', 'LITERALLY', 'BORN', 'IN', 'COTTON', 'AND', 'OH', 'WELL', 'AS', 'MUCH', 'AS', 'TO', 'ASK', "WHAT'S", 'THE', 'USE', 'SHE', 'TURNED', 'AGAIN', 'TO', 'GO'] +1995-1826-0021-771: hyp=["DON'T", 'KNOW', 'OO', 'OF', 'ALL', 'THINGS', 'INWARDLY', 'COMMENTED', 'MISS', 'TAYLOR', 'THAT', 'A', 'BORN', 'IN', 'COTTON', 'AND', 'OH', 'WELL', 'AS', 'MUCH', 'AS', 'TO', 'ASK', "WHAT'S", 'THE', 'USE', 'SHE', 'TURNED', 'AGAIN', 'TO', 'GO'] +1995-1826-0022-772: ref=['I', 'SUPPOSE', 'THOUGH', "IT'S", 'TOO', 'EARLY', 'FOR', 'THEM', 'THEN', 'CAME', 'THE', 'EXPLOSION'] +1995-1826-0022-772: hyp=['I', 'SUPPOSE', 'THOUGH', "IT'S", 'TOO', 'EARLY', 'FOR', 'THEM', 'THEN', 'CAME', 'THE', 'EXPLOSION'] +1995-1826-0023-773: ref=['GOOBERS', "DON'T", 'GROW', 'ON', 'THE', 'TOPS', 'OF', 'VINES', 'BUT', 'UNDERGROUND', 'ON', 'THE', 'ROOTS', 'LIKE', 'YAMS', 'IS', 'THAT', 'SO'] +1995-1826-0023-773: hyp=['GOULD', 'WAS', "DON'T", 'GROW', 'ON', 'THE', 'TOPS', 'OF', 'ICE', 'BUT', 'UNDER', 'GROUND', 'ON', 'THE', 'WOODS', 'LIKE', 'A', 'M', 'THAT', 'SO'] +1995-1826-0024-774: ref=['THE', 'GOLDEN', 'FLEECE', "IT'S", 'THE', 'SILVER', 'FLEECE', 'HE', 'HARKENED'] +1995-1826-0024-774: hyp=['THE', 'GOLDEN', 'FLEECE', "IT'S", 'THE', 'SILVER', 'FLEECE', 'HE', 'HEARKENED'] +1995-1826-0025-775: ref=['SOME', 'TIME', "YOU'LL", 'TELL', 'ME', 'PLEASE', "WON'T", 'YOU'] +1995-1826-0025-775: hyp=['SOMETIME', 'YOU', 'DAMNLY', 'PLEASE', "WON'T", 'YOU'] +1995-1826-0026-776: ref=['NOW', 'FOR', 'ONE', 'LITTLE', 'HALF', 'HOUR', 'SHE', 'HAD', 'BEEN', 'A', 'WOMAN', 'TALKING', 'TO', 'A', 'BOY', 'NO', 'NOT', 'EVEN', 'THAT', 'SHE', 'HAD', 'BEEN', 'TALKING', 'JUST', 'TALKING', 'THERE', 'WERE', 'NO', 'PERSONS', 'IN', 'THE', 'CONVERSATION', 'JUST', 'THINGS', 'ONE', 'THING', 'COTTON'] +1995-1826-0026-776: hyp=['THOU', 'FOR', 'ONE', 'LITTLE', 'HALF', 'HOUR', 'SHE', 'HAD', 'BEEN', 'A', 'WOMAN', 'TALKING', 'TO', 'A', 'BOY', 'NO', 'NOT', 'EVEN', 'THAT', 'SHE', 'HAD', 'BEEN', 'TALKING', 'JUST', 'TALKING', 'THERE', 'WERE', 'NO', 'PERSONS', 'IN', 'THE', 'CONVERSATION', 'JUST', 'THINGS', 'ONE', 'THING', 'COTTON'] +1995-1836-0000-735: ref=['THE', 'HON', 'CHARLES', 'SMITH', 'MISS', "SARAH'S", 'BROTHER', 'WAS', 'WALKING', 'SWIFTLY', 'UPTOWN', 'FROM', 'MISTER', "EASTERLY'S", 'WALL', 'STREET', 'OFFICE', 'AND', 'HIS', 'FACE', 'WAS', 'PALE'] +1995-1836-0000-735: hyp=['THE', 'HON', 'SMITH', 'MISS', "SARAH'S", 'BROTHER', 'WAS', 'WALKING', 'SWIFTLY', 'UPTOWN', 'FROM', 'MISTER', "EASTERLY'S", 'WALL', 'STREET', 'OFFICE', 'AND', 'HIS', 'FACE', 'WAS', 'PALE'] +1995-1836-0001-736: ref=['AT', 'LAST', 'THE', 'COTTON', 'COMBINE', 'WAS', 'TO', 'ALL', 'APPEARANCES', 'AN', 'ASSURED', 'FACT', 'AND', 'HE', 'WAS', 'SLATED', 'FOR', 'THE', 'SENATE'] +1995-1836-0001-736: hyp=['AT', 'LAST', 'THE', 'COTTON', 'COMBINE', 'WAS', 'TO', 'ALL', 'APPEARANCES', 'AND', 'ASSURED', 'FACT', 'AND', 'HE', 'WAS', 'SLATED', 'FOR', 'THE', 'SENATE'] +1995-1836-0002-737: ref=['WHY', 'SHOULD', 'HE', 'NOT', 'BE', 'AS', 'OTHER', 'MEN'] +1995-1836-0002-737: hyp=['WHY', 'SHOULD', 'HE', 'NOT', 'BE', 'AS', 'OTHER', 'MEN'] +1995-1836-0003-738: ref=['SHE', 'WAS', 'NOT', 'HERSELF', 'A', 'NOTABLY', 'INTELLIGENT', 'WOMAN', 'SHE', 'GREATLY', 'ADMIRED', 'INTELLIGENCE', 'OR', 'WHATEVER', 'LOOKED', 'TO', 'HER', 'LIKE', 'INTELLIGENCE', 'IN', 'OTHERS'] +1995-1836-0003-738: hyp=['SHE', 'WAS', 'NOT', 'HERSELF', 'UNNOTABLY', 'INTELLIGENT', 'WOMAN', 'SHE', 'GREATLY', 'ADMIRED', 'INTELLIGENCE', 'OR', 'WHATEVER', 'LOOKED', 'TO', 'HER', 'LIKE', 'INTELLIGENCE', 'IN', 'OTHERS'] +1995-1836-0004-739: ref=['AS', 'SHE', 'AWAITED', 'HER', 'GUESTS', 'SHE', 'SURVEYED', 'THE', 'TABLE', 'WITH', 'BOTH', 'SATISFACTION', 'AND', 'DISQUIETUDE', 'FOR', 'HER', 'SOCIAL', 'FUNCTIONS', 'WERE', 'FEW', 'TONIGHT', 'THERE', 'WERE', 'SHE', 'CHECKED', 'THEM', 'OFF', 'ON', 'HER', 'FINGERS', 'SIR', 'JAMES', 'CREIGHTON', 'THE', 'RICH', 'ENGLISH', 'MANUFACTURER', 'AND', 'LADY', 'CREIGHTON', 'MISTER', 'AND', 'MISSUS', 'VANDERPOOL', 'MISTER', 'HARRY', 'CRESSWELL', 'AND', 'HIS', 'SISTER', 'JOHN', 'TAYLOR', 'AND', 'HIS', 'SISTER', 'AND', 'MISTER', 'CHARLES', 'SMITH', 'WHOM', 'THE', 'EVENING', 'PAPERS', 'MENTIONED', 'AS', 'LIKELY', 'TO', 'BE', 'UNITED', 'STATES', 'SENATOR', 'FROM', 'NEW', 'JERSEY', 'A', 'SELECTION', 'OF', 'GUESTS', 'THAT', 'HAD', 'BEEN', 'DETERMINED', 'UNKNOWN', 'TO', 'THE', 'HOSTESS', 'BY', 'THE', 'MEETING', 'OF', 'COTTON', 'INTERESTS', 'EARLIER', 'IN', 'THE', 'DAY'] +1995-1836-0004-739: hyp=['AS', 'SHE', 'AWAITED', 'HER', 'GUESS', 'SHE', 'SURVEYED', 'THE', 'TABLE', 'WITH', 'BOTH', 'SATISFACTION', 'AND', 'AS', 'QUIETUDE', 'FOR', 'HER', 'SOCIAL', 'FUNCTIONS', 'WERE', 'FEW', 'TO', 'NIGHT', 'THERE', 'WERE', 'SHE', 'CHECKED', 'THEM', 'OFF', 'ON', 'HER', 'FINGERS', 'SIR', 'JAMES', 'CRIGHTON', 'THE', 'RICH', 'ENGLISH', 'MANUFACTURER', 'AND', 'LADY', 'KRETON', 'MISTER', 'AND', 'MISSUS', 'VAN', 'DERBOOLE', 'MISTER', 'HARRY', 'CRESWELL', 'AND', 'HIS', 'SISTER', 'JOHN', 'TAYLOR', 'AND', 'HIS', 'SISTER', 'AND', 'MISTER', 'CHARLES', 'SMITH', 'WHOM', 'THE', 'EVENING', 'PAPERS', 'MENTIONED', 'AS', 'LIKELY', 'TO', 'BE', 'UTIT', 'STATES', 'SENATOR', 'FROM', 'NEW', 'JERSEY', 'A', 'SELECTION', 'OF', 'GUESTS', 'THAT', 'HAD', 'BEEN', 'DETERMINED', 'UNKNOWN', 'TO', 'THE', 'HOSTESS', 'BY', 'THE', 'MEETING', 'OF', 'COTTON', 'INTERESTS', 'EARLIER', 'IN', 'THE', 'DAY'] +1995-1836-0005-740: ref=['MISSUS', 'GREY', 'HAD', 'MET', 'SOUTHERNERS', 'BEFORE', 'BUT', 'NOT', 'INTIMATELY', 'AND', 'SHE', 'ALWAYS', 'HAD', 'IN', 'MIND', 'VIVIDLY', 'THEIR', 'CRUELTY', 'TO', 'POOR', 'NEGROES', 'A', 'SUBJECT', 'SHE', 'MADE', 'A', 'POINT', 'OF', 'INTRODUCING', 'FORTHWITH'] +1995-1836-0005-740: hyp=['MISSUS', 'GRAY', 'HAD', 'MET', 'SOUTHERNERS', 'BEFORE', 'BUT', 'NOT', 'INTIMATELY', 'AND', 'SHE', 'ALWAYS', 'HAD', 'IN', 'MIND', 'VIVIDLY', 'THEIR', 'CRUELTY', 'TO', 'POOR', 'NEGROES', 'A', 'SUBJECT', 'SHE', 'MADE', 'A', 'POINT', 'OF', 'INTRODUCING', 'FORTHWITH'] +1995-1836-0006-741: ref=['SHE', 'WAS', 'THEREFORE', 'MOST', 'AGREEABLY', 'SURPRISED', 'TO', 'HEAR', 'MISTER', 'CRESSWELL', 'EXPRESS', 'HIMSELF', 'SO', 'CORDIALLY', 'AS', 'APPROVING', 'OF', 'NEGRO', 'EDUCATION'] +1995-1836-0006-741: hyp=['SHE', 'WAS', 'THEREFORE', 'MOST', 'AGREEABLY', 'SURPRISED', 'TO', 'HEAR', 'MISTER', 'CRESWELL', 'EXPRESS', 'HIMSELF', 'SO', 'CORDIALLY', 'AS', 'APPROVING', 'OF', 'NEGRO', 'EDUCATION'] +1995-1836-0007-742: ref=['BUT', 'YOU', 'BELIEVE', 'IN', 'SOME', 'EDUCATION', 'ASKED', 'MARY', 'TAYLOR'] +1995-1836-0007-742: hyp=['DO', 'BELIEVE', 'IN', 'SOME', 'EDUCATION', 'ASKED', 'MARY', 'TAYLOR'] +1995-1836-0008-743: ref=['I', 'BELIEVE', 'IN', 'THE', 'TRAINING', 'OF', 'PEOPLE', 'TO', 'THEIR', 'HIGHEST', 'CAPACITY', 'THE', 'ENGLISHMAN', 'HERE', 'HEARTILY', 'SECONDED', 'HIM'] +1995-1836-0008-743: hyp=['I', 'BELIEVE', 'IN', 'THE', 'TRAINING', 'OF', 'PEOPLE', 'TO', 'THE', 'HOUSE', 'CAPACITY', 'THE', 'ENGLISHMAN', 'HERE', 'HEARTILY', 'SECONDED', 'HIM'] +1995-1836-0009-744: ref=['BUT', 'CRESSWELL', 'ADDED', 'SIGNIFICANTLY', 'CAPACITY', 'DIFFERS', 'ENORMOUSLY', 'BETWEEN', 'RACES'] +1995-1836-0009-744: hyp=['BUT', 'CRESWELL', 'ADDED', 'SIGNIFICANTLY', 'CAPACITY', 'DIFFERS', 'ENORMOUSLY', 'BETWEEN', 'RACES'] +1995-1836-0010-745: ref=['THE', 'VANDERPOOLS', 'WERE', 'SURE', 'OF', 'THIS', 'AND', 'THE', 'ENGLISHMAN', 'INSTANCING', 'INDIA', 'BECAME', 'QUITE', 'ELOQUENT', 'MISSUS', 'GREY', 'WAS', 'MYSTIFIED', 'BUT', 'HARDLY', 'DARED', 'ADMIT', 'IT', 'THE', 'GENERAL', 'TREND', 'OF', 'THE', 'CONVERSATION', 'SEEMED', 'TO', 'BE', 'THAT', 'MOST', 'INDIVIDUALS', 'NEEDED', 'TO', 'BE', 'SUBMITTED', 'TO', 'THE', 'SHARPEST', 'SCRUTINY', 'BEFORE', 'BEING', 'ALLOWED', 'MUCH', 'EDUCATION', 'AND', 'AS', 'FOR', 'THE', 'LOWER', 'RACES', 'IT', 'WAS', 'SIMPLY', 'CRIMINAL', 'TO', 'OPEN', 'SUCH', 'USELESS', 'OPPORTUNITIES', 'TO', 'THEM'] +1995-1836-0010-745: hyp=['THE', 'VANDER', 'POOLS', 'WERE', 'SURE', 'THIS', 'AND', 'THE', 'ENGLISHMAN', 'INSTANCING', 'INDIA', 'BECAME', 'QUITE', 'ELOQUENT', 'MISSUS', 'GRAY', 'WAS', 'MYSTIFIED', 'BUT', 'HARDLY', 'DARED', 'ADMIT', 'IT', 'THE', 'GENERAL', 'TREND', 'OF', 'THE', 'CONVERSATION', 'SEEMED', 'TO', 'BE', 'THAT', 'MOST', 'INDIVIDUALS', 'NEEDED', 'TO', 'BE', 'SUBMITTED', 'TO', 'THE', 'SHARPEST', 'SCRUTINY', 'BEFORE', 'BEING', 'ALLOWED', 'MUCH', 'EDUCATION', 'AND', 'AS', 'FOR', 'THE', 'LOWER', 'RACES', 'IT', 'WAS', 'SIMPLY', 'CRIMINAL', 'TO', 'OPEN', 'SUCH', 'USELESS', 'OPPORTUNITIES', 'TO', 'THEM'] +1995-1836-0011-746: ref=['POSITIVELY', 'HEROIC', 'ADDED', 'CRESSWELL', 'AVOIDING', 'HIS', "SISTER'S", 'EYES'] +1995-1836-0011-746: hyp=['WAS', 'ACTIVELY', 'HEROIC', 'ADDED', 'CHRISWELL', 'AVOIDING', 'HIS', "SISTER'S", 'EYES'] +1995-1836-0012-747: ref=['BUT', "WE'RE", 'NOT', 'ER', 'EXACTLY', 'WELCOMED'] +1995-1836-0012-747: hyp=['BUT', 'WE', 'ARE', 'NOT', 'A', 'EXACTLY', 'WELCOME'] +1995-1836-0013-748: ref=['MARY', 'TAYLOR', 'HOWEVER', 'RELATED', 'THE', 'TALE', 'OF', 'ZORA', 'TO', 'MISSUS', "GREY'S", 'PRIVATE', 'EAR', 'LATER'] +1995-1836-0013-748: hyp=['MERRY', 'TAYLOR', 'HOWEVER', 'RELATED', 'THE', 'TALE', 'OF', 'ZORAH', 'TO', 'MISSUS', "GRAY'S", 'PRIVATE', 'EAR', 'LATER'] +1995-1836-0014-749: ref=['FORTUNATELY', 'SAID', 'MISTER', 'VANDERPOOL', 'NORTHERNERS', 'AND', 'SOUTHERNERS', 'ARE', 'ARRIVING', 'AT', 'A', 'BETTER', 'MUTUAL', 'UNDERSTANDING', 'ON', 'MOST', 'OF', 'THESE', 'MATTERS'] +1995-1836-0014-749: hyp=['FORTUNATELY', 'SAID', 'MISTER', 'VAN', 'DERPOOL', 'NORTHERNOSING', 'SOUTHERNERS', 'ALL', 'RIVING', 'AT', 'A', 'BETTER', 'MUTUAL', 'UNDERSTANDING', 'ON', 'MOST', 'OF', 'THESE', 'MATTERS'] +1995-1837-0000-777: ref=['HE', 'KNEW', 'THE', 'SILVER', 'FLEECE', 'HIS', 'AND', "ZORA'S", 'MUST', 'BE', 'RUINED'] +1995-1837-0000-777: hyp=['HE', 'KNEW', 'THE', 'SILVER', 'FLEECE', 'HIS', 'AND', 'ZORAS', 'MUST', 'BE', 'RUINED'] +1995-1837-0001-778: ref=['IT', 'WAS', 'THE', 'FIRST', 'GREAT', 'SORROW', 'OF', 'HIS', 'LIFE', 'IT', 'WAS', 'NOT', 'SO', 'MUCH', 'THE', 'LOSS', 'OF', 'THE', 'COTTON', 'ITSELF', 'BUT', 'THE', 'FANTASY', 'THE', 'HOPES', 'THE', 'DREAMS', 'BUILT', 'AROUND', 'IT'] +1995-1837-0001-778: hyp=['IT', 'WAS', 'THE', 'FIRST', 'GREAT', 'SORROW', 'OF', 'HIS', 'LIFE', 'IT', 'WAS', 'NOT', 'SO', 'MUCH', 'THE', 'LOSS', 'OF', 'THE', 'CONTIN', 'ITSELF', 'BUT', 'THE', 'FANTASY', 'THE', 'HOPES', 'THE', 'DREAMS', 'BUILT', 'AROUND', 'IT'] +1995-1837-0002-779: ref=['AH', 'THE', 'SWAMP', 'THE', 'CRUEL', 'SWAMP'] +1995-1837-0002-779: hyp=['AH', 'THE', 'SWAMP', 'THE', 'CRUEL', 'SWAMP'] +1995-1837-0003-780: ref=['THE', 'REVELATION', 'OF', 'HIS', 'LOVE', 'LIGHTED', 'AND', 'BRIGHTENED', 'SLOWLY', 'TILL', 'IT', 'FLAMED', 'LIKE', 'A', 'SUNRISE', 'OVER', 'HIM', 'AND', 'LEFT', 'HIM', 'IN', 'BURNING', 'WONDER'] +1995-1837-0003-780: hyp=['WHO', 'REVELATION', 'OF', 'HIS', 'LOVE', 'LIGHTED', 'AND', 'BRIGHTENED', 'SLOWLY', 'TILL', 'IT', 'FLAMED', 'LIKE', 'A', 'SUNRISE', 'OVER', 'HIM', 'AND', 'LEFT', 'HIM', 'IN', 'BURNING', 'WONDER'] +1995-1837-0004-781: ref=['HE', 'PANTED', 'TO', 'KNOW', 'IF', 'SHE', 'TOO', 'KNEW', 'OR', 'KNEW', 'AND', 'CARED', 'NOT', 'OR', 'CARED', 'AND', 'KNEW', 'NOT'] +1995-1837-0004-781: hyp=['HE', 'PANTED', 'TO', 'KNOW', 'IF', 'SHE', 'TOO', 'KNEW', 'OR', 'NEW', 'AND', 'CARED', 'NOT', 'OR', 'CARED', 'AND', 'KNEW', 'NOT'] +1995-1837-0005-782: ref=['SHE', 'WAS', 'SO', 'STRANGE', 'AND', 'HUMAN', 'A', 'CREATURE'] +1995-1837-0005-782: hyp=['SHE', 'WAS', 'SO', 'STRANGE', 'IN', 'HUMAN', 'A', 'CREATURE'] +1995-1837-0006-783: ref=['THE', 'WORLD', 'WAS', 'WATER', 'VEILED', 'IN', 'MISTS'] +1995-1837-0006-783: hyp=['THE', 'WORLD', 'WAS', 'WATER', 'VEILED', 'IN', 'MISTS'] +1995-1837-0007-784: ref=['THEN', 'OF', 'A', 'SUDDEN', 'AT', 'MIDDAY', 'THE', 'SUN', 'SHOT', 'OUT', 'HOT', 'AND', 'STILL', 'NO', 'BREATH', 'OF', 'AIR', 'STIRRED', 'THE', 'SKY', 'WAS', 'LIKE', 'BLUE', 'STEEL', 'THE', 'EARTH', 'STEAMED'] +1995-1837-0007-784: hyp=['THEN', 'OF', 'A', 'SUDDEN', 'AT', 'MIDDAY', 'THE', 'SUN', 'SHOT', 'OUT', 'HOT', 'AND', 'STILL', 'NO', 'BREATH', 'OF', 'AIR', 'STIRRED', 'THE', 'SKY', 'WAS', 'LIKE', 'BLUE', 'STEEL', 'THE', 'EARTH', 'STEAMED'] +1995-1837-0008-785: ref=['WHERE', 'WAS', 'THE', 'USE', 'OF', 'IMAGINING'] +1995-1837-0008-785: hyp=['WHERE', 'WAS', 'THE', 'USE', 'OF', 'IMAGINING'] +1995-1837-0009-786: ref=['THE', 'LAGOON', 'HAD', 'BEEN', 'LEVEL', 'WITH', 'THE', 'DYKES', 'A', 'WEEK', 'AGO', 'AND', 'NOW'] +1995-1837-0009-786: hyp=['THE', 'LAGOON', 'HAD', 'BEEN', 'LEVEL', 'WITH', 'THE', 'DIKES', 'A', 'WEEK', 'AGO', 'AND', 'NOW'] +1995-1837-0010-787: ref=['PERHAPS', 'SHE', 'TOO', 'MIGHT', 'BE', 'THERE', 'WAITING', 'WEEPING'] +1995-1837-0010-787: hyp=['PERHAPS', 'SHE', 'TOO', 'MIGHT', 'BE', 'THERE', 'WAITING', 'WEEPING'] +1995-1837-0011-788: ref=['HE', 'STARTED', 'AT', 'THE', 'THOUGHT', 'HE', 'HURRIED', 'FORTH', 'SADLY'] +1995-1837-0011-788: hyp=['HE', 'STARTED', 'AT', 'THE', 'THOUGHT', 'HE', 'HURRIED', 'FORTH', 'SADLY'] +1995-1837-0012-789: ref=['HE', 'SPLASHED', 'AND', 'STAMPED', 'ALONG', 'FARTHER', 'AND', 'FARTHER', 'ONWARD', 'UNTIL', 'HE', 'NEARED', 'THE', 'RAMPART', 'OF', 'THE', 'CLEARING', 'AND', 'PUT', 'FOOT', 'UPON', 'THE', 'TREE', 'BRIDGE'] +1995-1837-0012-789: hyp=['HIS', 'BLASHED', 'AND', 'STAMPED', 'ALONG', 'FARTHER', 'AND', 'FARTHER', 'ONWARD', 'UNTIL', 'HE', 'NEARED', 'THE', 'RAMPART', 'OF', 'THE', 'CLEARING', 'AND', 'PUT', 'FOOT', 'UPON', 'THE', 'TREE', 'BRIDGE'] +1995-1837-0013-790: ref=['THEN', 'HE', 'LOOKED', 'DOWN', 'THE', 'LAGOON', 'WAS', 'DRY'] +1995-1837-0013-790: hyp=['THEN', 'HE', 'LOOKED', 'DOWN', 'THE', 'LAGOON', 'WAS', 'DRY'] +1995-1837-0014-791: ref=['HE', 'STOOD', 'A', 'MOMENT', 'BEWILDERED', 'THEN', 'TURNED', 'AND', 'RUSHED', 'UPON', 'THE', 'ISLAND', 'A', 'GREAT', 'SHEET', 'OF', 'DAZZLING', 'SUNLIGHT', 'SWEPT', 'THE', 'PLACE', 'AND', 'BENEATH', 'LAY', 'A', 'MIGHTY', 'MASS', 'OF', 'OLIVE', 'GREEN', 'THICK', 'TALL', 'WET', 'AND', 'WILLOWY'] +1995-1837-0014-791: hyp=['HE', 'STOOD', 'A', 'MOMENT', 'BEWILDERED', 'THEN', 'TURNED', 'AND', 'RUSHED', 'UPON', 'THE', 'ISLAND', 'A', 'GREAT', 'SHEET', 'OF', 'DAZZLING', 'SUNLIGHT', 'SWEPT', 'THE', 'PLACE', 'AND', 'BENEATH', 'LAY', 'A', 'MIGHTY', 'MASS', 'OF', 'OLIVE', 'GREEN', 'THICK', 'TALL', 'WET', 'AND', 'WILLOWY'] +1995-1837-0015-792: ref=['THE', 'SQUARES', 'OF', 'COTTON', 'SHARP', 'EDGED', 'HEAVY', 'WERE', 'JUST', 'ABOUT', 'TO', 'BURST', 'TO', 'BOLLS'] +1995-1837-0015-792: hyp=['THE', 'SQUARES', 'OF', 'COTTON', 'SHARP', 'EDGED', 'HEAVY', 'WERE', 'JUST', 'ABOUT', 'TO', 'BURST', 'TO', 'BOWLS'] +1995-1837-0016-793: ref=['FOR', 'ONE', 'LONG', 'MOMENT', 'HE', 'PAUSED', 'STUPID', 'AGAPE', 'WITH', 'UTTER', 'AMAZEMENT', 'THEN', 'LEANED', 'DIZZILY', 'AGAINST', 'A', 'TREE'] +1995-1837-0016-793: hyp=['FOR', 'ONE', 'LONG', 'MOMENT', 'HE', 'PAUSED', 'STUPID', 'AGAPE', 'WITH', 'UTTER', 'AMAZEMENT', 'THEN', 'LEANED', 'DIZZILY', 'AGAINST', 'A', 'TREE'] +1995-1837-0017-794: ref=['HE', 'GAZED', 'ABOUT', 'PERPLEXED', 'ASTONISHED'] +1995-1837-0017-794: hyp=['HE', 'GAZED', 'ABOUT', 'PERPLEXED', 'ASTONISHED'] +1995-1837-0018-795: ref=['HERE', 'LAY', 'THE', 'READING', 'OF', 'THE', 'RIDDLE', 'WITH', 'INFINITE', 'WORK', 'AND', 'PAIN', 'SOME', 'ONE', 'HAD', 'DUG', 'A', 'CANAL', 'FROM', 'THE', 'LAGOON', 'TO', 'THE', 'CREEK', 'INTO', 'WHICH', 'THE', 'FORMER', 'HAD', 'DRAINED', 'BY', 'A', 'LONG', 'AND', 'CROOKED', 'WAY', 'THUS', 'ALLOWING', 'IT', 'TO', 'EMPTY', 'DIRECTLY'] +1995-1837-0018-795: hyp=['HERE', 'LAY', 'THE', 'READING', 'OF', 'THE', 'RIDDLE', 'WITH', 'INFINITE', 'WORK', 'AND', 'PAIN', 'SOME', 'ONE', 'HAD', 'DUG', 'A', 'CANAL', 'FROM', 'THE', 'LAGOON', 'TO', 'THE', 'CREEK', 'INTO', 'WHICH', 'THE', 'FORMER', 'HAD', 'DRAINED', 'BY', 'A', 'LONG', 'AND', 'CROOKED', 'WAY', 'THUS', 'ALLOWING', 'IT', 'TO', 'EMPTY', 'DIRECTLY'] +1995-1837-0019-796: ref=['HE', 'SAT', 'DOWN', 'WEAK', 'BEWILDERED', 'AND', 'ONE', 'THOUGHT', 'WAS', 'UPPERMOST', 'ZORA'] +1995-1837-0019-796: hyp=['HE', 'SAT', 'DOWN', 'WEAK', 'BEWILDERED', 'AND', 'ONE', 'THOUGHT', 'WAS', 'UPPERMOST', 'SORA'] +1995-1837-0020-797: ref=['THE', 'YEARS', 'OF', 'THE', 'DAYS', 'OF', 'HER', 'DYING', 'WERE', 'TEN'] +1995-1837-0020-797: hyp=['THE', 'YEARS', 'OF', 'THE', 'DAYS', 'OF', 'HER', 'DYING', 'WERE', 'TEN'] +1995-1837-0021-798: ref=['THE', 'HOPE', 'AND', 'DREAM', 'OF', 'HARVEST', 'WAS', 'UPON', 'THE', 'LAND'] +1995-1837-0021-798: hyp=['THE', 'HOPE', 'AND', 'DREAM', 'OF', 'HARVEST', 'WAS', 'UPON', 'THE', 'LAND'] +1995-1837-0022-799: ref=['UP', 'IN', 'THE', 'SICK', 'ROOM', 'ZORA', 'LAY', 'ON', 'THE', 'LITTLE', 'WHITE', 'BED'] +1995-1837-0022-799: hyp=['UP', 'IN', 'THE', 'SICK', 'ROOM', 'ZORA', 'LAY', 'ON', 'THE', 'LITTLE', 'WHITE', 'BED'] +1995-1837-0023-800: ref=['THE', 'NET', 'AND', 'WEB', 'OF', 'ENDLESS', 'THINGS', 'HAD', 'BEEN', 'CRAWLING', 'AND', 'CREEPING', 'AROUND', 'HER', 'SHE', 'HAD', 'STRUGGLED', 'IN', 'DUMB', 'SPEECHLESS', 'TERROR', 'AGAINST', 'SOME', 'MIGHTY', 'GRASPING', 'THAT', 'STROVE', 'FOR', 'HER', 'LIFE', 'WITH', 'GNARLED', 'AND', 'CREEPING', 'FINGERS', 'BUT', 'NOW', 'AT', 'LAST', 'WEAKLY', 'SHE', 'OPENED', 'HER', 'EYES', 'AND', 'QUESTIONED'] +1995-1837-0023-800: hyp=['THE', 'NED', 'AND', 'WEB', 'OF', 'ENDLESS', 'THINGS', 'HAD', 'BEEN', 'CRAWLING', 'AND', 'CREEPING', 'AROUND', 'HER', 'SHE', 'HAD', 'STRUGGLED', 'IN', 'DUMB', 'SPEECHLESS', 'TERROR', 'AGAINST', 'SOME', 'MIGHTY', 'GRASPING', 'THAT', 'STROVE', 'FOR', 'HER', 'LIFE', 'WITH', 'GNARLED', 'AND', 'CREEPING', 'FINGERS', 'BUT', 'NOW', 'AT', 'LAST', 'WEEKLY', 'SHE', 'OPENED', 'HER', 'EYES', 'AND', 'QUESTIONED'] +1995-1837-0024-801: ref=['FOR', 'A', 'WHILE', 'SHE', 'LAY', 'IN', 'HER', 'CHAIR', 'IN', 'HAPPY', 'DREAMY', 'PLEASURE', 'AT', 'SUN', 'AND', 'BIRD', 'AND', 'TREE'] +1995-1837-0024-801: hyp=['FOR', 'A', 'WHILE', 'SHE', 'LAY', 'IN', 'HER', 'CHAIR', 'IN', 'HAPPY', 'DREAMY', 'PLEASURE', 'ITS', 'SUN', 'AND', 'BIRD', 'AND', 'TREE'] +1995-1837-0025-802: ref=['SHE', 'ROSE', 'WITH', 'A', 'FLEETING', 'GLANCE', 'GATHERED', 'THE', 'SHAWL', 'ROUND', 'HER', 'THEN', 'GLIDING', 'FORWARD', 'WAVERING', 'TREMULOUS', 'SLIPPED', 'ACROSS', 'THE', 'ROAD', 'AND', 'INTO', 'THE', 'SWAMP'] +1995-1837-0025-802: hyp=['SHE', 'ROSE', 'WITH', 'A', 'FLEETING', 'GLANCE', 'GATHERED', 'THE', 'SHAWL', 'AROUND', 'HER', 'THEN', 'GLIDING', 'FORWARD', 'WAVERING', 'TREMULOUS', 'SLIPPED', 'ACROSS', 'THE', 'ROAD', 'AND', 'INTO', 'THE', 'SWAMP'] +1995-1837-0026-803: ref=['SHE', 'HAD', 'BEEN', 'BORN', 'WITHIN', 'ITS', 'BORDERS', 'WITHIN', 'ITS', 'BORDERS', 'SHE', 'HAD', 'LIVED', 'AND', 'GROWN', 'AND', 'WITHIN', 'ITS', 'BORDERS', 'SHE', 'HAD', 'MET', 'HER', 'LOVE'] +1995-1837-0026-803: hyp=['SHE', 'HAD', 'BEEN', 'BORN', 'WITHIN', 'ITS', 'BORDERS', 'WITHIN', 'HIS', 'BORDERS', 'SHE', 'HAD', 'LIVED', 'AND', 'GROWN', 'AND', 'WITHIN', 'ITS', 'BORDER', 'SHE', 'HAD', 'MET', 'HER', 'LOVE'] +1995-1837-0027-804: ref=['ON', 'SHE', 'HURRIED', 'UNTIL', 'SWEEPING', 'DOWN', 'TO', 'THE', 'LAGOON', 'AND', 'THE', 'ISLAND', 'LO', 'THE', 'COTTON', 'LAY', 'BEFORE', 'HER'] +1995-1837-0027-804: hyp=['ON', 'SHE', 'HURRIED', 'UNTIL', 'SWEEPING', 'DOWN', 'TO', 'THE', 'LAGOON', 'AND', 'THE', 'ISLAND', 'LO', 'THE', 'COTTON', 'LAY', 'BEFORE', 'HER'] +1995-1837-0028-805: ref=['THE', 'CHAIR', 'WAS', 'EMPTY', 'BUT', 'HE', 'KNEW'] +1995-1837-0028-805: hyp=['THE', 'CHAIR', 'WAS', 'EMPTY', 'BUT', 'HE', 'KNEW'] +1995-1837-0029-806: ref=['HE', 'DARTED', 'THROUGH', 'THE', 'TREES', 'AND', 'PAUSED', 'A', 'TALL', 'MAN', 'STRONGLY', 'BUT', 'SLIMLY', 'MADE'] +1995-1837-0029-806: hyp=['HE', 'DARTED', 'THROUGH', 'THE', 'TREES', 'AND', 'PAUSED', 'A', 'TALL', 'MAN', 'STRONGLY', 'BUT', 'SLIMLY', 'MADE'] +2094-142345-0000-308: ref=['IT', 'IS', 'A', 'VERY', 'FINE', 'OLD', 'PLACE', 'OF', 'RED', 'BRICK', 'SOFTENED', 'BY', 'A', 'PALE', 'POWDERY', 'LICHEN', 'WHICH', 'HAS', 'DISPERSED', 'ITSELF', 'WITH', 'HAPPY', 'IRREGULARITY', 'SO', 'AS', 'TO', 'BRING', 'THE', 'RED', 'BRICK', 'INTO', 'TERMS', 'OF', 'FRIENDLY', 'COMPANIONSHIP', 'WITH', 'THE', 'LIMESTONE', 'ORNAMENTS', 'SURROUNDING', 'THE', 'THREE', 'GABLES', 'THE', 'WINDOWS', 'AND', 'THE', 'DOOR', 'PLACE'] +2094-142345-0000-308: hyp=['IT', 'IS', 'A', 'VERY', 'FINE', 'OLD', 'PLACE', 'OF', 'RED', 'BRICK', 'SOFTENED', 'BY', 'A', 'PALE', 'POWDERY', 'LICHEN', 'WHICH', 'HAS', 'DISPERSED', 'ITSELF', 'WITH', 'HAPPY', 'IRREGULARITY', 'SO', 'AS', 'TO', 'BRING', 'THE', 'RED', 'BRICK', 'INTO', 'TERMS', 'OF', 'FRIENDLY', 'COMPANIONSHIP', 'WITH', 'A', 'LIMESTONE', 'ORNAMENTS', 'SURROUNDING', 'THE', 'THREE', 'GABLES', 'THE', 'WINDOWS', 'AND', 'THE', 'DOOR', 'PLACE'] +2094-142345-0001-309: ref=['BUT', 'THE', 'WINDOWS', 'ARE', 'PATCHED', 'WITH', 'WOODEN', 'PANES', 'AND', 'THE', 'DOOR', 'I', 'THINK', 'IS', 'LIKE', 'THE', 'GATE', 'IT', 'IS', 'NEVER', 'OPENED'] +2094-142345-0001-309: hyp=['BUT', 'THE', 'WINDOWS', 'ARE', 'PATCHED', 'WITH', 'WOODEN', 'PANES', 'AND', 'THE', 'DOOR', 'I', 'THINK', 'IS', 'LIKE', 'THE', 'GATE', 'IT', 'IS', 'NEVER', 'OPENED'] +2094-142345-0002-310: ref=['FOR', 'IT', 'IS', 'A', 'SOLID', 'HEAVY', 'HANDSOME', 'DOOR', 'AND', 'MUST', 'ONCE', 'HAVE', 'BEEN', 'IN', 'THE', 'HABIT', 'OF', 'SHUTTING', 'WITH', 'A', 'SONOROUS', 'BANG', 'BEHIND', 'A', 'LIVERIED', 'LACKEY', 'WHO', 'HAD', 'JUST', 'SEEN', 'HIS', 'MASTER', 'AND', 'MISTRESS', 'OFF', 'THE', 'GROUNDS', 'IN', 'A', 'CARRIAGE', 'AND', 'PAIR'] +2094-142345-0002-310: hyp=['FOR', 'IT', 'IS', 'A', 'SOLID', 'HEAVY', 'HANDSOME', 'DOOR', 'AND', 'MUST', 'ONCE', 'HAVE', 'BEEN', 'IN', 'THE', 'HABIT', 'OF', 'SHEDDING', 'WITH', 'A', 'SONOROUS', 'BANG', 'BEHIND', 'THE', 'LIVERYED', 'LACKEY', 'WHO', 'HAD', 'JUST', 'SEEN', 'HIS', 'MASTER', 'AND', 'MISTRESS', 'OFF', 'THE', 'GROUNDS', 'IN', 'A', 'CARRIAGE', 'AND', 'PAIR'] +2094-142345-0003-311: ref=['A', 'LARGE', 'OPEN', 'FIREPLACE', 'WITH', 'RUSTY', 'DOGS', 'IN', 'IT', 'AND', 'A', 'BARE', 'BOARDED', 'FLOOR', 'AT', 'THE', 'FAR', 'END', 'FLEECES', 'OF', 'WOOL', 'STACKED', 'UP', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'FLOOR', 'SOME', 'EMPTY', 'CORN', 'BAGS'] +2094-142345-0003-311: hyp=['A', 'LARGE', 'OPEN', 'FIREPLACE', 'WITH', 'RUSTY', 'DOGS', 'IN', 'IT', 'AND', 'A', 'BARE', 'BOARDED', 'FLOOR', 'AT', 'THE', 'FAR', 'END', 'FLEECES', 'OF', 'WOOL', 'STACKED', 'UP', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'FLOOR', 'SOME', 'EMPTY', 'CORN', 'BAGS'] +2094-142345-0004-312: ref=['AND', 'WHAT', 'THROUGH', 'THE', 'LEFT', 'HAND', 'WINDOW'] +2094-142345-0004-312: hyp=['AND', 'WHAT', 'THROUGH', 'THE', 'LEFT', 'HAND', 'WINDOW'] +2094-142345-0005-313: ref=['SEVERAL', 'CLOTHES', 'HORSES', 'A', 'PILLION', 'A', 'SPINNING', 'WHEEL', 'AND', 'AN', 'OLD', 'BOX', 'WIDE', 'OPEN', 'AND', 'STUFFED', 'FULL', 'OF', 'COLOURED', 'RAGS'] +2094-142345-0005-313: hyp=['SEVERAL', 'CLOTHES', 'HORSES', 'APILLION', 'A', 'SPINNING', 'WHEEL', 'AND', 'AN', 'OLD', 'BOX', 'WIDE', 'OPEN', 'AND', 'STUFFED', 'FULL', 'OF', 'COLOURED', 'RAGS'] +2094-142345-0006-314: ref=['AT', 'THE', 'EDGE', 'OF', 'THIS', 'BOX', 'THERE', 'LIES', 'A', 'GREAT', 'WOODEN', 'DOLL', 'WHICH', 'SO', 'FAR', 'AS', 'MUTILATION', 'IS', 'CONCERNED', 'BEARS', 'A', 'STRONG', 'RESEMBLANCE', 'TO', 'THE', 'FINEST', 'GREEK', 'SCULPTURE', 'AND', 'ESPECIALLY', 'IN', 'THE', 'TOTAL', 'LOSS', 'OF', 'ITS', 'NOSE'] +2094-142345-0006-314: hyp=['AT', 'THE', 'EDGE', 'OF', 'THIS', 'BOX', 'THERE', 'LIES', 'A', 'GREAT', 'WOODEN', 'DOLL', 'WHICH', 'SO', 'FAR', 'AS', 'MUTILATION', 'IS', 'CONCERNED', 'BEARS', 'A', 'STRONG', 'RESEMBLANCE', 'TO', 'THE', 'FINEST', 'GREEK', 'SCULPTURE', 'AND', 'ESPECIALLY', 'IN', 'THE', 'TOTAL', 'LOSS', 'OF', 'ITS', 'NOSE'] +2094-142345-0007-315: ref=['THE', 'HISTORY', 'OF', 'THE', 'HOUSE', 'IS', 'PLAIN', 'NOW'] +2094-142345-0007-315: hyp=['THE', 'HISTORY', 'OF', 'THE', 'HOUSE', 'IS', 'PLAIN', 'NOW'] +2094-142345-0008-316: ref=['BUT', 'THERE', 'IS', 'ALWAYS', 'A', 'STRONGER', 'SENSE', 'OF', 'LIFE', 'WHEN', 'THE', 'SUN', 'IS', 'BRILLIANT', 'AFTER', 'RAIN', 'AND', 'NOW', 'HE', 'IS', 'POURING', 'DOWN', 'HIS', 'BEAMS', 'AND', 'MAKING', 'SPARKLES', 'AMONG', 'THE', 'WET', 'STRAW', 'AND', 'LIGHTING', 'UP', 'EVERY', 'PATCH', 'OF', 'VIVID', 'GREEN', 'MOSS', 'ON', 'THE', 'RED', 'TILES', 'OF', 'THE', 'COW', 'SHED', 'AND', 'TURNING', 'EVEN', 'THE', 'MUDDY', 'WATER', 'THAT', 'IS', 'HURRYING', 'ALONG', 'THE', 'CHANNEL', 'TO', 'THE', 'DRAIN', 'INTO', 'A', 'MIRROR', 'FOR', 'THE', 'YELLOW', 'BILLED', 'DUCKS', 'WHO', 'ARE', 'SEIZING', 'THE', 'OPPORTUNITY', 'OF', 'GETTING', 'A', 'DRINK', 'WITH', 'AS', 'MUCH', 'BODY', 'IN', 'IT', 'AS', 'POSSIBLE'] +2094-142345-0008-316: hyp=['BUT', 'THERE', 'IS', 'ALWAYS', 'AS', 'STRONGER', 'SENSE', 'OF', 'LIFE', 'WHEN', 'THE', 'SUN', 'IS', 'BRILLIANT', 'AFTER', 'RAIN', 'AND', 'NOW', 'HE', 'IS', 'POURING', 'DOWN', 'HIS', 'BEAMS', 'AND', 'MAKING', 'SPARKLES', 'AMONG', 'THE', 'WET', 'STRAW', 'AND', 'LIGHTING', 'UP', 'EVERY', 'PATCH', 'OF', 'VIVID', 'GREEN', 'MOSS', 'ON', 'THE', 'RED', 'TILES', 'OF', 'THE', 'COWSHED', 'AND', 'TURNING', 'EVEN', 'THE', 'MUDDY', 'WATER', 'THAT', 'IS', 'HURRYING', 'ALONG', 'THE', 'CHANNEL', 'TO', 'THE', 'DRAIN', 'INTO', 'A', 'MIRROR', 'FOR', 'THE', 'YELLOW', 'BUILD', 'DUCKS', 'WHO', 'ARE', 'SEIZING', 'THE', 'OPPORTUNITY', 'OF', 'GETTING', 'A', 'DRINK', 'WITH', 'AS', 'MUCH', 'BODY', 'IN', 'IT', 'AS', 'POSSIBLE'] +2094-142345-0009-317: ref=['FOR', 'THE', 'GREAT', 'BARN', 'DOORS', 'ARE', 'THROWN', 'WIDE', 'OPEN', 'AND', 'MEN', 'ARE', 'BUSY', 'THERE', 'MENDING', 'THE', 'HARNESS', 'UNDER', 'THE', 'SUPERINTENDENCE', 'OF', 'MISTER', 'GOBY', 'THE', 'WHITTAW', 'OTHERWISE', 'SADDLER', 'WHO', 'ENTERTAINS', 'THEM', 'WITH', 'THE', 'LATEST', 'TREDDLESTON', 'GOSSIP'] +2094-142345-0009-317: hyp=['FOR', 'THE', 'GREAT', 'BARN', 'DOORS', 'ARE', 'THROWN', 'WIDE', 'OPEN', 'AND', 'MEN', 'ARE', 'BUSY', 'THERE', 'MENDING', 'THE', 'HARNESS', 'UNDER', 'THE', 'SUPERINTENDENCE', 'OF', 'MISTER', 'GOBY', 'THE', 'WIDOW', 'OTHERWISE', 'SADDLER', 'WHO', 'ENTERTAINS', 'THEM', 'WITH', 'THE', 'LATEST', 'TREADLESTON', 'GOSSIP'] +2094-142345-0010-318: ref=['HETTY', 'SORREL', 'OFTEN', 'TOOK', 'THE', 'OPPORTUNITY', 'WHEN', 'HER', "AUNT'S", 'BACK', 'WAS', 'TURNED', 'OF', 'LOOKING', 'AT', 'THE', 'PLEASING', 'REFLECTION', 'OF', 'HERSELF', 'IN', 'THOSE', 'POLISHED', 'SURFACES', 'FOR', 'THE', 'OAK', 'TABLE', 'WAS', 'USUALLY', 'TURNED', 'UP', 'LIKE', 'A', 'SCREEN', 'AND', 'WAS', 'MORE', 'FOR', 'ORNAMENT', 'THAN', 'FOR', 'USE', 'AND', 'SHE', 'COULD', 'SEE', 'HERSELF', 'SOMETIMES', 'IN', 'THE', 'GREAT', 'ROUND', 'PEWTER', 'DISHES', 'THAT', 'WERE', 'RANGED', 'ON', 'THE', 'SHELVES', 'ABOVE', 'THE', 'LONG', 'DEAL', 'DINNER', 'TABLE', 'OR', 'IN', 'THE', 'HOBS', 'OF', 'THE', 'GRATE', 'WHICH', 'ALWAYS', 'SHONE', 'LIKE', 'JASPER'] +2094-142345-0010-318: hyp=["HETTY'S", 'SORREL', 'OFTEN', 'TOOK', 'THE', 'OPPORTUNITY', 'WHEN', 'HER', "AUNT'S", 'BACK', 'WAS', 'TURNED', 'OF', 'LOOKING', 'AT', 'THE', 'PLEASING', 'REFLECTION', 'OF', 'HERSELF', 'IN', 'THOSE', 'POLISHED', 'SERVICES', 'FOR', 'THE', 'OAK', 'TABLE', 'WAS', 'USUALLY', 'TURNED', 'UP', 'LIKE', 'A', 'SCREEN', 'AND', 'WAS', 'MORE', 'FOR', 'ORNAMENT', 'THAN', 'FOR', 'USE', 'AND', 'SHE', 'COULD', 'SEE', 'HERSELF', 'SOMETIMES', 'IN', 'THE', 'GREAT', 'ROUND', 'PEWTER', 'DISHES', 'THAT', 'WERE', 'RANGED', 'ON', 'THE', 'SHELVES', 'ABOVE', 'THE', 'LONG', 'DEAL', 'DINNER', 'TABLE', 'OR', 'IN', 'THE', 'HOBS', 'OF', 'THE', 'GRATE', 'WHICH', 'ALWAYS', 'SHONE', 'LIKE', 'JASPER'] +2094-142345-0011-319: ref=['DO', 'NOT', 'SUPPOSE', 'HOWEVER', 'THAT', 'MISSUS', 'POYSER', 'WAS', 'ELDERLY', 'OR', 'SHREWISH', 'IN', 'HER', 'APPEARANCE', 'SHE', 'WAS', 'A', 'GOOD', 'LOOKING', 'WOMAN', 'NOT', 'MORE', 'THAN', 'EIGHT', 'AND', 'THIRTY', 'OF', 'FAIR', 'COMPLEXION', 'AND', 'SANDY', 'HAIR', 'WELL', 'SHAPEN', 'LIGHT', 'FOOTED'] +2094-142345-0011-319: hyp=['DO', 'NOT', 'SUPPOSE', 'HOWEVER', 'THAT', 'MISSUS', 'POYSER', 'WAS', 'ELDERLY', 'OR', 'SHREWISH', 'IN', 'HER', 'APPEARANCE', 'SHE', 'WAS', 'A', 'GOOD', 'LOOKING', 'WOMAN', 'NOT', 'MORE', 'THAN', 'EIGHT', 'AND', 'THIRTY', 'A', 'FAIR', 'COMPLEXION', 'AND', 'SANDY', 'HAIR', 'WHILE', 'SHAKEN', 'LIGHTFOOTED'] +2094-142345-0012-320: ref=['THE', 'FAMILY', 'LIKENESS', 'BETWEEN', 'HER', 'AND', 'HER', 'NIECE', 'DINAH', 'MORRIS', 'WITH', 'THE', 'CONTRAST', 'BETWEEN', 'HER', 'KEENNESS', 'AND', "DINAH'S", 'SERAPHIC', 'GENTLENESS', 'OF', 'EXPRESSION', 'MIGHT', 'HAVE', 'SERVED', 'A', 'PAINTER', 'AS', 'AN', 'EXCELLENT', 'SUGGESTION', 'FOR', 'A', 'MARTHA', 'AND', 'MARY'] +2094-142345-0012-320: hyp=['THE', 'FAMILY', 'LIKENESS', 'BETWEEN', 'HER', 'AND', 'HER', 'NIECE', 'DINAH', 'MORRIS', 'WITH', 'A', 'CONTRAST', 'BETWEEN', 'HER', 'KEENNESS', 'AND', "DINAH'S", 'SERAPHIC', 'GENTLENESS', 'OF', 'EXPRESSION', 'MIGHT', 'HAVE', 'SERVED', 'A', 'PAINTER', 'AS', 'AN', 'EXCELLENT', 'SUGGESTION', 'FOR', 'MARTHA', 'AND', 'MARY'] +2094-142345-0013-321: ref=['HER', 'TONGUE', 'WAS', 'NOT', 'LESS', 'KEEN', 'THAN', 'HER', 'EYE', 'AND', 'WHENEVER', 'A', 'DAMSEL', 'CAME', 'WITHIN', 'EARSHOT', 'SEEMED', 'TO', 'TAKE', 'UP', 'AN', 'UNFINISHED', 'LECTURE', 'AS', 'A', 'BARREL', 'ORGAN', 'TAKES', 'UP', 'A', 'TUNE', 'PRECISELY', 'AT', 'THE', 'POINT', 'WHERE', 'IT', 'HAD', 'LEFT', 'OFF'] +2094-142345-0013-321: hyp=['HER', 'TONGUE', 'WAS', 'NOT', 'LESS', 'KEEN', 'THAN', 'HER', 'EYE', 'AND', 'WHENEVER', 'A', 'DAMSEL', 'CAME', 'WITHIN', 'EAR', 'SHOT', 'SEEMED', 'TO', 'TAKE', 'UP', 'AN', 'UNFINISHED', 'LECTURE', 'AS', 'A', 'BARREL', 'ORGAN', 'TAKES', 'UP', 'A', 'TUNE', 'PRECISELY', 'AT', 'THE', 'POINT', 'WHERE', 'IT', 'HAD', 'LEFT', 'OFF'] +2094-142345-0014-322: ref=['THE', 'FACT', 'THAT', 'IT', 'WAS', 'CHURNING', 'DAY', 'WAS', 'ANOTHER', 'REASON', 'WHY', 'IT', 'WAS', 'INCONVENIENT', 'TO', 'HAVE', 'THE', 'WHITTAWS', 'AND', 'WHY', 'CONSEQUENTLY', 'MISSUS', 'POYSER', 'SHOULD', 'SCOLD', 'MOLLY', 'THE', 'HOUSEMAID', 'WITH', 'UNUSUAL', 'SEVERITY'] +2094-142345-0014-322: hyp=['THE', 'FACT', 'THAT', 'IT', 'WAS', 'CHURNING', 'DAY', 'WAS', 'ANOTHER', 'REASON', 'WHY', 'IT', 'WAS', 'INCONVENIENT', 'TO', 'HAVE', 'THE', 'WIDOWS', 'AND', 'WHY', 'CONSEQUENTLY', 'MISSUS', 'POYSER', 'SHOULD', 'SCOLD', 'MOLLY', 'THE', 'HOUSEMAID', 'WITH', 'UNUSUAL', 'SEVERITY'] +2094-142345-0015-323: ref=['TO', 'ALL', 'APPEARANCE', 'MOLLY', 'HAD', 'GOT', 'THROUGH', 'HER', 'AFTER', 'DINNER', 'WORK', 'IN', 'AN', 'EXEMPLARY', 'MANNER', 'HAD', 'CLEANED', 'HERSELF', 'WITH', 'GREAT', 'DISPATCH', 'AND', 'NOW', 'CAME', 'TO', 'ASK', 'SUBMISSIVELY', 'IF', 'SHE', 'SHOULD', 'SIT', 'DOWN', 'TO', 'HER', 'SPINNING', 'TILL', 'MILKING', 'TIME'] +2094-142345-0015-323: hyp=['TO', 'ALL', 'APPEARANCE', 'MOLLY', 'HAD', 'GOT', 'THROUGH', 'HER', 'AFTER', 'DINNER', 'WORK', 'IN', 'AN', 'EXEMPLARY', 'MANNER', 'HAD', 'CLEANED', 'HERSELF', 'WITH', 'GREAT', 'DISPATCH', 'AND', 'NOW', 'CAME', 'TO', 'ASK', 'SUBMISSIVELY', 'IF', 'SHE', 'SHOULD', 'SIT', 'DOWN', 'TO', 'HER', 'SPINNING', 'TILL', 'MILKING', 'TIME'] +2094-142345-0016-324: ref=['SPINNING', 'INDEED'] +2094-142345-0016-324: hyp=['SPINNING', 'INDEED'] +2094-142345-0017-325: ref=['I', 'NEVER', 'KNEW', 'YOUR', 'EQUALS', 'FOR', 'GALLOWSNESS'] +2094-142345-0017-325: hyp=['I', 'NEVER', 'KNEW', 'YOUR', 'EQUALS', 'FOR', 'GALLOWSNESS'] +2094-142345-0018-326: ref=['WHO', 'TAUGHT', 'YOU', 'TO', 'SCRUB', 'A', 'FLOOR', 'I', 'SHOULD', 'LIKE', 'TO', 'KNOW'] +2094-142345-0018-326: hyp=['WHO', 'TAUGHT', 'YOU', 'TO', 'SCRUB', 'A', 'FLOOR', 'I', 'SHOULD', 'LIKE', 'TO', 'KNOW'] +2094-142345-0019-327: ref=['COMB', 'THE', 'WOOL', 'FOR', 'THE', 'WHITTAWS', 'INDEED'] +2094-142345-0019-327: hyp=['COMB', 'THE', 'WOOL', 'FOR', 'THE', 'WIDOWS', 'INDEED'] +2094-142345-0020-328: ref=["THAT'S", 'WHAT', "YOU'D", 'LIKE', 'TO', 'BE', 'DOING', 'IS', 'IT'] +2094-142345-0020-328: hyp=["THAT'S", 'WHAT', "YOU'D", 'LIKE', 'TO', 'BE', 'DOING', 'IS', 'IT'] +2094-142345-0021-329: ref=["THAT'S", 'THE', 'WAY', 'WITH', 'YOU', "THAT'S", 'THE', 'ROAD', "YOU'D", 'ALL', 'LIKE', 'TO', 'GO', 'HEADLONGS', 'TO', 'RUIN'] +2094-142345-0021-329: hyp=["THAT'S", 'THE', 'WAY', 'WITH', 'YOU', "THAT'S", 'THE', 'ROAD', "YOU'D", 'ALL', 'LIKE', 'TO', 'GO', 'HEADLONGS', 'TO', 'RUIN'] +2094-142345-0022-330: ref=['MISTER', "OTTLEY'S", 'INDEED'] +2094-142345-0022-330: hyp=['MISTER', "OUTLEY'S", 'INDEED'] +2094-142345-0023-331: ref=["YOU'RE", 'A', 'RARE', 'UN', 'FOR', 'SITTING', 'DOWN', 'TO', 'YOUR', 'WORK', 'A', 'LITTLE', 'WHILE', 'AFTER', "IT'S", 'TIME', 'TO', 'PUT', 'BY'] +2094-142345-0023-331: hyp=['YOU', 'ARE', 'A', 'RARE', 'IN', 'PROCEEDING', 'DOWN', 'TO', 'YOUR', 'WORK', 'A', 'LITTLE', 'WHILE', 'AFTER', 'ITS', 'TIME', 'TO', 'PUT', 'BY'] +2094-142345-0024-332: ref=['MUNNY', 'MY', "IRON'S", 'TWITE', 'TOLD', 'PEASE', 'PUT', 'IT', 'DOWN', 'TO', 'WARM'] +2094-142345-0024-332: hyp=['MONEY', 'MY', "IRON'S", 'TIGHT', 'PEASE', 'PUT', 'IT', 'DOWN', 'TO', 'WARM'] +2094-142345-0025-333: ref=['COLD', 'IS', 'IT', 'MY', 'DARLING', 'BLESS', 'YOUR', 'SWEET', 'FACE'] +2094-142345-0025-333: hyp=['COLD', 'IS', 'IT', 'MY', 'DARLING', 'BLESS', 'YOUR', 'SWEET', 'FACE'] +2094-142345-0026-334: ref=["SHE'S", 'GOING', 'TO', 'PUT', 'THE', 'IRONING', 'THINGS', 'AWAY'] +2094-142345-0026-334: hyp=["SHE'S", 'GOING', 'TO', 'PUT', 'THE', 'IRONING', 'THINGS', 'AWAY'] +2094-142345-0027-335: ref=['MUNNY', 'I', 'TOULD', 'IKE', 'TO', 'DO', 'INTO', 'DE', 'BARN', 'TO', 'TOMMY', 'TO', 'SEE', 'DE', 'WHITTAWD'] +2094-142345-0027-335: hyp=['MONEY', 'I', 'DID', 'LIKE', 'TO', 'DO', 'INTO', 'THE', 'BARN', 'TO', 'TOMMY', 'TO', 'SEE', 'THE', 'WID', 'ODD'] +2094-142345-0028-336: ref=['NO', 'NO', 'NO', 'TOTTY', 'UD', 'GET', 'HER', 'FEET', 'WET', 'SAID', 'MISSUS', 'POYSER', 'CARRYING', 'AWAY', 'HER', 'IRON'] +2094-142345-0028-336: hyp=['NO', 'NO', 'TOTTY', 'HAD', 'GET', 'HER', 'FEET', 'WET', 'SAID', 'MISSUS', 'POYSER', 'CARRYING', 'AWAY', 'HER', 'IRON'] +2094-142345-0029-337: ref=['DID', 'EVER', 'ANYBODY', 'SEE', 'THE', 'LIKE', 'SCREAMED', 'MISSUS', 'POYSER', 'RUNNING', 'TOWARDS', 'THE', 'TABLE', 'WHEN', 'HER', 'EYE', 'HAD', 'FALLEN', 'ON', 'THE', 'BLUE', 'STREAM'] +2094-142345-0029-337: hyp=['DID', 'EVER', 'ANYBODY', 'SEE', 'THE', 'LIKE', 'SCREAMED', 'MISSUS', 'POYSER', 'RUNNING', 'TOWARDS', 'THE', 'TABLE', 'WHEN', 'HER', 'EYE', 'HAD', 'FALLEN', 'ON', 'THE', 'BLUE', 'STREAM'] +2094-142345-0030-338: ref=['TOTTY', 'HOWEVER', 'HAD', 'DESCENDED', 'FROM', 'HER', 'CHAIR', 'WITH', 'GREAT', 'SWIFTNESS', 'AND', 'WAS', 'ALREADY', 'IN', 'RETREAT', 'TOWARDS', 'THE', 'DAIRY', 'WITH', 'A', 'SORT', 'OF', 'WADDLING', 'RUN', 'AND', 'AN', 'AMOUNT', 'OF', 'FAT', 'ON', 'THE', 'NAPE', 'OF', 'HER', 'NECK', 'WHICH', 'MADE', 'HER', 'LOOK', 'LIKE', 'THE', 'METAMORPHOSIS', 'OF', 'A', 'WHITE', 'SUCKLING', 'PIG'] +2094-142345-0030-338: hyp=['TOTTY', 'HOWEVER', 'HAD', 'DESCENDED', 'FROM', 'HER', 'CHAIR', 'WITH', 'GREAT', 'SWIFTNESS', 'AND', 'WAS', 'ALREADY', 'IN', 'RETREAT', 'TOWARDS', 'THE', 'DAIRY', 'WITH', 'A', 'SORT', 'OF', 'WADDLING', 'RUN', 'AND', 'AN', 'AMOUNT', 'OF', 'FAT', 'ON', 'THE', 'NAPE', 'OF', 'HER', 'NECK', 'WHICH', 'MADE', 'HER', 'LOOK', 'LIKE', 'THE', 'METAMORPHOSIS', 'OF', 'A', 'WHITE', 'SUCKLING', 'PIG'] +2094-142345-0031-339: ref=['AND', 'SHE', 'WAS', 'VERY', 'FOND', 'OF', 'YOU', 'TOO', 'AUNT', 'RACHEL'] +2094-142345-0031-339: hyp=['AND', 'SHE', 'WAS', 'VERY', 'FOND', 'OF', 'YOU', 'TOO', 'AUNT', 'RACHEL'] +2094-142345-0032-340: ref=['I', 'OFTEN', 'HEARD', 'HER', 'TALK', 'OF', 'YOU', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'WAY'] +2094-142345-0032-340: hyp=['I', 'OFTEN', 'HEARD', 'HER', 'TALK', 'OF', 'YOU', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'WAY'] +2094-142345-0033-341: ref=['WHEN', 'SHE', 'HAD', 'THAT', 'BAD', 'ILLNESS', 'AND', 'I', 'WAS', 'ONLY', 'ELEVEN', 'YEARS', 'OLD', 'SHE', 'USED', 'TO', 'SAY', "YOU'LL", 'HAVE', 'A', 'FRIEND', 'ON', 'EARTH', 'IN', 'YOUR', 'AUNT', 'RACHEL', 'IF', "I'M", 'TAKEN', 'FROM', 'YOU', 'FOR', 'SHE', 'HAS', 'A', 'KIND', 'HEART', 'AND', "I'M", 'SURE', "I'VE", 'FOUND', 'IT', 'SO'] +2094-142345-0033-341: hyp=['WHEN', 'SHE', 'HAD', 'THAT', 'BAN', 'ILLNESS', 'AND', 'I', 'WAS', 'ONLY', 'ELEVEN', 'YEARS', 'OLD', 'SHE', 'USED', 'TO', 'SAY', "YOU'LL", 'HAVE', 'A', 'FRIEND', 'ON', 'EARTH', 'IN', 'YOUR', 'AUNT', 'RACHEL', 'IF', "I'M", 'TAKEN', 'FROM', 'YOU', 'FOR', 'SHE', 'HAS', 'A', 'KIND', 'HEART', 'AND', "I'M", 'SURE', "I'VE", 'FOUND', 'IT', 'SO'] +2094-142345-0034-342: ref=['AND', "THERE'S", 'LINEN', 'IN', 'THE', 'HOUSE', 'AS', 'I', 'COULD', 'WELL', 'SPARE', 'YOU', 'FOR', "I'VE", 'GOT', 'LOTS', 'O', 'SHEETING', 'AND', 'TABLE', 'CLOTHING', 'AND', 'TOWELLING', 'AS', "ISN'T", 'MADE', 'UP'] +2094-142345-0034-342: hyp=['AND', "THERE'S", 'LINEN', 'IN', 'THE', 'HOUSE', 'AS', 'I', 'COULD', 'WELL', 'SPARE', 'YOU', 'FOR', 'I', 'GOT', 'LOTS', 'OF', 'SHEETING', 'AND', 'TABLE', 'CLOTHING', 'AND', 'TOWELINGS', "ISN'T", 'MADE', 'UP'] +2094-142345-0035-343: ref=['BUT', 'NOT', 'MORE', 'THAN', "WHAT'S", 'IN', 'THE', 'BIBLE', 'AUNT', 'SAID', 'DINAH'] +2094-142345-0035-343: hyp=['BUT', 'NOT', 'MORE', 'THAN', "WHAT'S", 'IN', 'THE', 'BIBLE', 'AND', 'SAID', 'DINAH'] +2094-142345-0036-344: ref=['NAY', 'DEAR', 'AUNT', 'YOU', 'NEVER', 'HEARD', 'ME', 'SAY', 'THAT', 'ALL', 'PEOPLE', 'ARE', 'CALLED', 'TO', 'FORSAKE', 'THEIR', 'WORK', 'AND', 'THEIR', 'FAMILIES'] +2094-142345-0036-344: hyp=['NAY', 'DEAR', 'AUNT', 'YOU', 'NEVER', 'HEARD', 'ME', 'SAY', 'THAT', 'ALL', 'PEOPLE', 'ARE', 'CALLED', 'TO', 'FORSAKE', 'THEIR', 'WORK', 'AND', 'THEIR', 'FAMILIES'] +2094-142345-0037-345: ref=['WE', 'CAN', 'ALL', 'BE', 'SERVANTS', 'OF', 'GOD', 'WHEREVER', 'OUR', 'LOT', 'IS', 'CAST', 'BUT', 'HE', 'GIVES', 'US', 'DIFFERENT', 'SORTS', 'OF', 'WORK', 'ACCORDING', 'AS', 'HE', 'FITS', 'US', 'FOR', 'IT', 'AND', 'CALLS', 'US', 'TO', 'IT'] +2094-142345-0037-345: hyp=['WE', 'CAN', 'ALL', 'BE', 'SERVANTS', 'OF', 'GOD', 'WHEREVER', 'OUR', 'LOT', 'IS', 'CAST', 'BUT', 'HE', 'GIVES', 'US', 'DIFFERENT', 'SORTS', 'OF', 'WORK', 'ACCORDING', 'AS', 'HE', 'FITS', 'US', 'FOR', 'IT', 'AND', 'CALLS', 'US', 'TO', 'IT'] +2094-142345-0038-346: ref=['I', 'CAN', 'NO', 'MORE', 'HELP', 'SPENDING', 'MY', 'LIFE', 'IN', 'TRYING', 'TO', 'DO', 'WHAT', 'I', 'CAN', 'FOR', 'THE', 'SOULS', 'OF', 'OTHERS', 'THAN', 'YOU', 'COULD', 'HELP', 'RUNNING', 'IF', 'YOU', 'HEARD', 'LITTLE', 'TOTTY', 'CRYING', 'AT', 'THE', 'OTHER', 'END', 'OF', 'THE', 'HOUSE', 'THE', 'VOICE', 'WOULD', 'GO', 'TO', 'YOUR', 'HEART', 'YOU', 'WOULD', 'THINK', 'THE', 'DEAR', 'CHILD', 'WAS', 'IN', 'TROUBLE', 'OR', 'IN', 'DANGER', 'AND', 'YOU', "COULDN'T", 'REST', 'WITHOUT', 'RUNNING', 'TO', 'HELP', 'HER', 'AND', 'COMFORT', 'HER'] +2094-142345-0038-346: hyp=['I', 'CAN', 'NO', 'MORE', 'HELP', 'SPENDING', 'MY', 'LIFE', 'IN', 'TRYING', 'TO', 'DO', 'WHAT', 'I', 'CAN', 'FOR', 'THE', 'SOULS', 'OF', 'OTHERS', 'THEN', 'YOU', 'COULD', 'HELP', 'RUNNING', 'IF', 'YOU', 'HEARD', 'LITTLE', 'TOTTY', 'CRYING', 'AT', 'THE', 'OTHER', 'END', 'OF', 'THE', 'HOUSE', 'THE', 'VOICE', 'WOULD', 'GO', 'TO', 'YOUR', 'HEART', 'YOU', 'WOULD', 'THINK', 'THE', 'DEAR', 'CHILD', 'WAS', 'IN', 'TROUBLE', 'OR', 'IN', 'DANGER', 'AND', 'YOU', "COULDN'T", 'REST', 'WITHOUT', 'RUNNING', 'TO', 'HELP', 'HER', 'AND', 'COMFORT', 'HER'] +2094-142345-0039-347: ref=["I'VE", 'STRONG', 'ASSURANCE', 'THAT', 'NO', 'EVIL', 'WILL', 'HAPPEN', 'TO', 'YOU', 'AND', 'MY', 'UNCLE', 'AND', 'THE', 'CHILDREN', 'FROM', 'ANYTHING', "I'VE", 'DONE'] +2094-142345-0039-347: hyp=["I'VE", 'STRONG', 'ASSURANCE', 'THAT', 'NO', 'EVIL', 'WILL', 'HAPPEN', 'TO', 'YOU', 'AND', 'MY', 'UNCLE', 'AND', 'THE', 'CHILDREN', 'FROM', 'ANYTHING', 'I', 'HAVE', 'DONE'] +2094-142345-0040-348: ref=['I', "DIDN'T", 'PREACH', 'WITHOUT', 'DIRECTION'] +2094-142345-0040-348: hyp=['I', "DIDN'T", 'PREACH', 'WITHOUT', 'DIRECTION'] +2094-142345-0041-349: ref=['DIRECTION'] +2094-142345-0041-349: hyp=['DIRECTION'] +2094-142345-0042-350: ref=['I', 'HANNA', 'COMMON', 'PATIENCE', 'WITH', 'YOU'] +2094-142345-0042-350: hyp=['I', 'HAD', 'A', 'COMMON', 'PATIENCE', 'WITH', 'YOU'] +2094-142345-0043-351: ref=['BY', 'THIS', 'TIME', 'THE', 'TWO', 'GENTLEMEN', 'HAD', 'REACHED', 'THE', 'PALINGS', 'AND', 'HAD', 'GOT', 'DOWN', 'FROM', 'THEIR', 'HORSES', 'IT', 'WAS', 'PLAIN', 'THEY', 'MEANT', 'TO', 'COME', 'IN'] +2094-142345-0043-351: hyp=['BY', 'THIS', 'TIME', 'THE', 'TWO', 'GENTLEMEN', 'HAD', 'REACHED', 'THE', 'PALINGS', 'AND', 'HAD', 'GOT', 'DOWN', 'FROM', 'THEIR', 'HORSES', 'IT', 'WAS', 'PLAIN', 'THEY', 'MEANT', 'TO', 'COME', 'IN'] +2094-142345-0044-352: ref=['SAID', 'MISTER', 'IRWINE', 'WITH', 'HIS', 'STATELY', 'CORDIALITY'] +2094-142345-0044-352: hyp=['SAID', 'MISTER', 'IRWINE', 'WITH', 'HIS', 'STATELY', 'CORDIALITY'] +2094-142345-0045-353: ref=['OH', 'SIR', "DON'T", 'MENTION', 'IT', 'SAID', 'MISSUS', 'POYSER'] +2094-142345-0045-353: hyp=['OH', 'SIR', "DON'T", 'MENTION', 'IT', 'SAID', 'MISSUS', 'POYSER'] +2094-142345-0046-354: ref=['I', 'DELIGHT', 'IN', 'YOUR', 'KITCHEN'] +2094-142345-0046-354: hyp=['I', 'DELIGHT', 'IN', 'YOUR', 'KITCHEN'] +2094-142345-0047-355: ref=['POYSER', 'IS', 'NOT', 'AT', 'HOME', 'IS', 'HE'] +2094-142345-0047-355: hyp=['POYSER', 'IS', 'NOT', 'AT', 'HOME', 'IS', 'HE'] +2094-142345-0048-356: ref=['SAID', 'CAPTAIN', 'DONNITHORNE', 'SEATING', 'HIMSELF', 'WHERE', 'HE', 'COULD', 'SEE', 'ALONG', 'THE', 'SHORT', 'PASSAGE', 'TO', 'THE', 'OPEN', 'DAIRY', 'DOOR'] +2094-142345-0048-356: hyp=['SAID', 'CAPTAIN', 'DONNYTHORNE', 'SITTING', 'HIMSELF', 'WHERE', 'HE', 'COULD', 'SEE', 'ALONG', 'THE', 'SHORT', 'PASSAGE', 'TO', 'THE', 'OPEN', 'DAIRY', 'DOOR'] +2094-142345-0049-357: ref=['NO', 'SIR', 'HE', "ISN'T", "HE'S", 'GONE', 'TO', 'ROSSETER', 'TO', 'SEE', 'MISTER', 'WEST', 'THE', 'FACTOR', 'ABOUT', 'THE', 'WOOL'] +2094-142345-0049-357: hyp=['NO', 'SIR', 'HE', "ISN'T", "HE'S", 'GONE', 'TO', 'ROSSOTER', 'TO', 'SEE', 'MISTER', 'WEST', 'THE', 'FACTOR', 'ABOUT', 'THE', 'WOOL'] +2094-142345-0050-358: ref=['BUT', "THERE'S", 'FATHER', 'THE', 'BARN', 'SIR', 'IF', "HE'D", 'BE', 'OF', 'ANY', 'USE'] +2094-142345-0050-358: hyp=['BUT', "THERE'S", 'FATHER', 'IN', 'BARN', 'SIR', 'IF', "HE'D", 'BE', 'OF', 'ANY', 'USE'] +2094-142345-0051-359: ref=['NO', 'THANK', 'YOU', "I'LL", 'JUST', 'LOOK', 'AT', 'THE', 'WHELPS', 'AND', 'LEAVE', 'A', 'MESSAGE', 'ABOUT', 'THEM', 'WITH', 'YOUR', 'SHEPHERD'] +2094-142345-0051-359: hyp=['NO', 'THANK', 'YOU', "I'LL", 'JUST', 'LOOK', 'AT', 'THE', 'WHELMS', 'AND', 'LEAVE', 'A', 'MESSAGE', 'ABOUT', 'THEM', 'WITH', 'YOUR', 'SHEPHERD'] +2094-142345-0052-360: ref=['I', 'MUST', 'COME', 'ANOTHER', 'DAY', 'AND', 'SEE', 'YOUR', 'HUSBAND', 'I', 'WANT', 'TO', 'HAVE', 'A', 'CONSULTATION', 'WITH', 'HIM', 'ABOUT', 'HORSES'] +2094-142345-0052-360: hyp=['I', 'MUST', 'COME', 'ANOTHER', 'DAY', 'AND', 'SEE', 'YOUR', 'HUSBAND', 'I', 'WANT', 'TO', 'HAVE', 'A', 'CONSULTATION', 'WITH', 'HIM', 'ABOUT', 'HORSES'] +2094-142345-0053-361: ref=['FOR', 'IF', "HE'S", 'ANYWHERE', 'ON', 'THE', 'FARM', 'WE', 'CAN', 'SEND', 'FOR', 'HIM', 'IN', 'A', 'MINUTE'] +2094-142345-0053-361: hyp=['FOR', 'IF', "HE'S", 'ANYWHERE', 'ON', 'THE', 'FARM', 'WE', 'CAN', 'SEND', 'FOR', 'HIM', 'IN', 'A', 'MINUTE'] +2094-142345-0054-362: ref=['OH', 'SIR', 'SAID', 'MISSUS', 'POYSER', 'RATHER', 'ALARMED', 'YOU', "WOULDN'T", 'LIKE', 'IT', 'AT', 'ALL'] +2094-142345-0054-362: hyp=['OH', 'SIR', 'SAID', 'MISSUS', 'POYSER', 'RATHER', 'ALARMED', 'YOU', "WOULDN'T", 'LIKE', 'IT', 'AT', 'ALL'] +2094-142345-0055-363: ref=['BUT', 'YOU', 'KNOW', 'MORE', 'ABOUT', 'THAT', 'THAN', 'I', 'DO', 'SIR'] +2094-142345-0055-363: hyp=['BUT', 'YOU', 'KNOW', 'MORE', 'ABOUT', 'THAT', 'THAN', 'I', 'DO', 'SIR'] +2094-142345-0056-364: ref=['I', 'THINK', 'I', 'SHOULD', 'BE', 'DOING', 'YOU', 'A', 'SERVICE', 'TO', 'TURN', 'YOU', 'OUT', 'OF', 'SUCH', 'A', 'PLACE'] +2094-142345-0056-364: hyp=['I', 'THINK', 'I', 'SHOULD', 'BE', 'DOING', 'YOU', 'A', 'SERVICE', 'TO', 'TURN', 'YOU', 'OUT', 'OF', 'SUCH', 'A', 'PLACE'] +2094-142345-0057-365: ref=['I', 'KNOW', 'HIS', 'FARM', 'IS', 'IN', 'BETTER', 'ORDER', 'THAN', 'ANY', 'OTHER', 'WITHIN', 'TEN', 'MILES', 'OF', 'US', 'AND', 'AS', 'FOR', 'THE', 'KITCHEN', 'HE', 'ADDED', 'SMILING', 'I', "DON'T", 'BELIEVE', "THERE'S", 'ONE', 'IN', 'THE', 'KINGDOM', 'TO', 'BEAT', 'IT'] +2094-142345-0057-365: hyp=['I', 'KNOWS', 'FARM', 'IS', 'IN', 'BETTER', 'ORDER', 'THAN', 'ANY', 'OTHER', 'WITHIN', 'TEN', 'MILES', 'OF', 'US', 'AND', 'AS', 'FOR', 'THE', 'KITCHEN', 'HE', 'ADDED', 'SMILING', 'I', "DON'T", 'BELIEVE', "THERE'S", 'ONE', 'IN', 'THE', 'KINGDOM', 'TO', 'BEAT', 'IT'] +2094-142345-0058-366: ref=['BY', 'THE', 'BY', "I'VE", 'NEVER', 'SEEN', 'YOUR', 'DAIRY', 'I', 'MUST', 'SEE', 'YOUR', 'DAIRY', 'MISSUS', 'POYSER'] +2094-142345-0058-366: hyp=['BY', 'THE', 'BYE', 'I', 'HAVE', 'NEVER', 'SEEN', 'YOUR', 'DAIRY', 'I', 'MUST', 'SEE', 'YOUR', 'DEARIE', 'MISSUS', 'POYSER'] +2094-142345-0059-367: ref=['THIS', 'MISSUS', 'POYSER', 'SAID', 'BLUSHING', 'AND', 'BELIEVING', 'THAT', 'THE', 'CAPTAIN', 'WAS', 'REALLY', 'INTERESTED', 'IN', 'HER', 'MILK', 'PANS', 'AND', 'WOULD', 'ADJUST', 'HIS', 'OPINION', 'OF', 'HER', 'TO', 'THE', 'APPEARANCE', 'OF', 'HER', 'DAIRY'] +2094-142345-0059-367: hyp=['THIS', 'MISSUS', 'POYSER', 'SAID', 'BLUSHING', 'AND', 'BELIEVING', 'THAT', 'THE', 'CAPTAIN', 'WAS', 'REALLY', 'INTERESTED', 'IN', 'HER', 'MILK', 'PANS', 'AND', 'WOULD', 'ADJUST', 'HIS', 'OPINION', 'OF', 'HER', 'TO', 'THE', 'APPEARANCE', 'OF', 'HER', 'DAIRY'] +2094-142345-0060-368: ref=['OH', "I'VE", 'NO', 'DOUBT', "IT'S", 'IN', 'CAPITAL', 'ORDER'] +2094-142345-0060-368: hyp=['OH', "I'VE", 'NO', 'DOUBT', "IT'S", 'IN', 'CAPITAL', 'ORDER'] +2300-131720-0000-1816: ref=['THE', 'PARIS', 'PLANT', 'LIKE', 'THAT', 'AT', 'THE', 'CRYSTAL', 'PALACE', 'WAS', 'A', 'TEMPORARY', 'EXHIBIT'] +2300-131720-0000-1816: hyp=['THE', 'PARIS', 'PLANT', 'LIKE', 'THAT', 'AT', 'THE', 'CRYSTAL', 'PALACE', 'WAS', 'A', 'TEMPORARY', 'EXHIBIT'] +2300-131720-0001-1817: ref=['THE', 'LONDON', 'PLANT', 'WAS', 'LESS', 'TEMPORARY', 'BUT', 'NOT', 'PERMANENT', 'SUPPLYING', 'BEFORE', 'IT', 'WAS', 'TORN', 'OUT', 'NO', 'FEWER', 'THAN', 'THREE', 'THOUSAND', 'LAMPS', 'IN', 'HOTELS', 'CHURCHES', 'STORES', 'AND', 'DWELLINGS', 'IN', 'THE', 'VICINITY', 'OF', 'HOLBORN', 'VIADUCT'] +2300-131720-0001-1817: hyp=['THE', 'LONDON', 'PLANT', 'WAS', 'LESS', 'TEMPORARY', 'BUT', 'NOT', 'PERMANENT', 'SUPPLYING', 'BEFORE', 'IT', 'WAS', 'TORN', 'OUT', 'NO', 'FEWER', 'THAN', 'THREE', 'THOUSAND', 'LAMPS', 'IN', 'HOTELS', 'CHURCHES', 'STORES', 'AND', 'DWELLINGS', 'IN', 'THE', 'VICINITY', 'OF', 'HOLBORN', 'VIADUK'] +2300-131720-0002-1818: ref=['THERE', 'MESSRS', 'JOHNSON', 'AND', 'HAMMER', 'PUT', 'INTO', 'PRACTICE', 'MANY', 'OF', 'THE', 'IDEAS', 'NOW', 'STANDARD', 'IN', 'THE', 'ART', 'AND', 'SECURED', 'MUCH', 'USEFUL', 'DATA', 'FOR', 'THE', 'WORK', 'IN', 'NEW', 'YORK', 'OF', 'WHICH', 'THE', 'STORY', 'HAS', 'JUST', 'BEEN', 'TOLD'] +2300-131720-0002-1818: hyp=['THERE', 'MESSRS', 'JOHNSON', 'AND', 'HAMMER', 'PUT', 'INTO', 'PRACTICE', 'MANY', 'OF', 'THE', 'IDEAS', 'NOW', 'STANDARD', 'IN', 'THE', 'ART', 'AND', 'SECURED', 'MUCH', 'USEFUL', 'DATA', 'FOR', 'THE', 'WORK', 'IN', 'NEW', 'YORK', 'OF', 'WHICH', 'THE', 'STORY', 'HAS', 'JUST', 'BEEN', 'TOLD'] +2300-131720-0003-1819: ref=['THE', 'DYNAMO', 'ELECTRIC', 'MACHINE', 'THOUGH', 'SMALL', 'WAS', 'ROBUST', 'FOR', 'UNDER', 'ALL', 'THE', 'VARYING', 'SPEEDS', 'OF', 'WATER', 'POWER', 'AND', 'THE', 'VICISSITUDES', 'OF', 'THE', 'PLANT', 'TO', 'WHICH', 'IT', 'BELONGED', 'IT', 'CONTINUED', 'IN', 'ACTIVE', 'USE', 'UNTIL', 'EIGHTEEN', 'NINETY', 'NINE', 'SEVENTEEN', 'YEARS'] +2300-131720-0003-1819: hyp=['THE', 'DYNAMO', 'ELECTRIC', 'MACHINE', 'THOUGH', 'SMALL', 'WAS', 'ROBUST', 'FOR', 'UNDER', 'ALL', 'THE', 'VARYING', 'SPEEDS', 'OF', 'WATER', 'POWER', 'AND', 'THE', 'VICISSITUDES', 'OF', 'THE', 'PLANT', 'TO', 'WHICH', 'IT', 'BELONGED', 'IT', 'CONTINUED', 'IN', 'ACTIVE', 'USE', 'UNTIL', 'EIGHTEEN', 'NINETY', 'NINE', 'SEVENTEEN', 'YEARS'] +2300-131720-0004-1820: ref=['OWING', 'TO', 'HIS', 'INSISTENCE', 'ON', 'LOW', 'PRESSURE', 'DIRECT', 'CURRENT', 'FOR', 'USE', 'IN', 'DENSELY', 'POPULATED', 'DISTRICTS', 'AS', 'THE', 'ONLY', 'SAFE', 'AND', 'TRULY', 'UNIVERSAL', 'PROFITABLE', 'WAY', 'OF', 'DELIVERING', 'ELECTRICAL', 'ENERGY', 'TO', 'THE', 'CONSUMERS', 'EDISON', 'HAS', 'BEEN', 'FREQUENTLY', 'SPOKEN', 'OF', 'AS', 'AN', 'OPPONENT', 'OF', 'THE', 'ALTERNATING', 'CURRENT'] +2300-131720-0004-1820: hyp=['OWING', 'TO', 'HIS', 'INSISTENCE', 'ON', 'LOW', 'PRESSURE', 'DIRECT', 'CURRENT', 'FOR', 'USE', 'IN', 'DENSELY', 'POPULATED', 'DISTRICTS', 'AS', 'THE', 'ONLY', 'SAFE', 'AND', 'TRULY', 'UNIVERSAL', 'PROFITABLE', 'WAY', 'OF', 'DELIVERING', 'ELECTRICAL', 'ENERGY', 'TO', 'THE', 'CONSUMERS', 'EDISON', 'HAS', 'BEEN', 'FREQUENTLY', 'SPOKEN', 'OF', 'AS', 'AN', 'OPPONENT', 'OF', 'THE', 'ALTERNATING', 'CURRENT'] +2300-131720-0005-1821: ref=['WHY', 'IF', 'WE', 'ERECT', 'A', 'STATION', 'AT', 'THE', 'FALLS', 'IT', 'IS', 'A', 'GREAT', 'ECONOMY', 'TO', 'GET', 'IT', 'UP', 'TO', 'THE', 'CITY'] +2300-131720-0005-1821: hyp=['WHY', 'IF', 'WE', 'ERECT', 'A', 'STATION', 'AT', 'THE', 'FALLS', 'IT', 'IS', 'A', 'GREAT', 'ECONOMY', 'TO', 'GET', 'IT', 'UP', 'TO', 'THE', 'CITY'] +2300-131720-0006-1822: ref=['THERE', 'SEEMS', 'NO', 'GOOD', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'IT', 'WILL', 'CHANGE'] +2300-131720-0006-1822: hyp=['THERE', 'SEEMS', 'NO', 'GOOD', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'IT', 'WILL', 'CHANGE'] +2300-131720-0007-1823: ref=['BROAD', 'AS', 'THE', 'PRAIRIES', 'AND', 'FREE', 'IN', 'THOUGHT', 'AS', 'THE', 'WINDS', 'THAT', 'SWEEP', 'THEM', 'HE', 'IS', 'IDIOSYNCRATICALLY', 'OPPOSED', 'TO', 'LOOSE', 'AND', 'WASTEFUL', 'METHODS', 'TO', 'PLANS', 'OF', 'EMPIRE', 'THAT', 'NEGLECT', 'THE', 'POOR', 'AT', 'THE', 'GATE'] +2300-131720-0007-1823: hyp=['BROAD', 'AS', 'THE', 'PRAIRIES', 'AND', 'FREE', 'IN', 'THOUGHT', 'AS', 'THE', 'WINDS', 'THAT', 'SWEPT', 'THEM', 'HE', 'IS', 'IDIOSYNCRATICALLY', 'OPPOSED', 'TO', 'LOOSEN', 'WASTEFUL', 'METHODS', 'TO', 'PLANS', 'OF', 'EMPIRE', 'THAT', 'NEGLECT', 'THE', 'POOR', 'AT', 'THE', 'GATE'] +2300-131720-0008-1824: ref=['EVERYTHING', 'HE', 'HAS', 'DONE', 'HAS', 'BEEN', 'AIMED', 'AT', 'THE', 'CONSERVATION', 'OF', 'ENERGY', 'THE', 'CONTRACTION', 'OF', 'SPACE', 'THE', 'INTENSIFICATION', 'OF', 'CULTURE'] +2300-131720-0008-1824: hyp=['EVERYTHING', 'HE', 'HAS', 'DONE', 'HAS', 'BEEN', 'AIMED', 'AT', 'THE', 'CONSERVATION', 'OF', 'ENERGY', 'THE', 'CONTRACTION', 'OF', 'SPACE', 'THE', 'INTENSIFICATION', 'OF', 'CULTURE'] +2300-131720-0009-1825: ref=['FOR', 'SOME', 'YEARS', 'IT', 'WAS', 'NOT', 'FOUND', 'FEASIBLE', 'TO', 'OPERATE', 'MOTORS', 'ON', 'ALTERNATING', 'CURRENT', 'CIRCUITS', 'AND', 'THAT', 'REASON', 'WAS', 'OFTEN', 'URGED', 'AGAINST', 'IT', 'SERIOUSLY'] +2300-131720-0009-1825: hyp=['FOR', 'SOME', 'YEARS', 'IT', 'WAS', 'NOT', 'FOUND', 'FEASIBLE', 'TO', 'OPERATE', 'MOTORS', 'ON', 'ALTERNATING', 'CURRENT', 'CIRCUITS', 'AND', 'THAT', 'REASON', 'WAS', 'OFTEN', 'URGED', 'AGAINST', 'IT', 'SERIOUSLY'] +2300-131720-0010-1826: ref=['IT', 'COULD', 'NOT', 'BE', 'USED', 'FOR', 'ELECTROPLATING', 'OR', 'DEPOSITION', 'NOR', 'COULD', 'IT', 'CHARGE', 'STORAGE', 'BATTERIES', 'ALL', 'OF', 'WHICH', 'ARE', 'EASILY', 'WITHIN', 'THE', 'ABILITY', 'OF', 'THE', 'DIRECT', 'CURRENT'] +2300-131720-0010-1826: hyp=['IT', 'COULD', 'NOT', 'BE', 'USED', 'FOR', 'ELECTROPLATING', 'OR', 'DEPOSITION', 'NOR', 'COULD', 'IT', 'CHARGE', 'STORAGE', 'BATTERIES', 'ALL', 'OF', 'WHICH', 'ARE', 'EASILY', 'WITHIN', 'THE', 'ABILITY', 'OF', 'THE', 'DIRECT', 'CURRENT'] +2300-131720-0011-1827: ref=['BUT', 'WHEN', 'IT', 'CAME', 'TO', 'BE', 'A', 'QUESTION', 'OF', 'LIGHTING', 'A', 'SCATTERED', 'SUBURB', 'A', 'GROUP', 'OF', 'DWELLINGS', 'ON', 'THE', 'OUTSKIRTS', 'A', 'REMOTE', 'COUNTRY', 'RESIDENCE', 'OR', 'A', 'FARM', 'HOUSE', 'THE', 'ALTERNATING', 'CURRENT', 'IN', 'ALL', 'ELEMENTS', 'SAVE', 'ITS', 'DANGER', 'WAS', 'AND', 'IS', 'IDEAL'] +2300-131720-0011-1827: hyp=['BUT', 'WHEN', 'IT', 'CAME', 'TO', 'BE', 'A', 'QUESTION', 'OF', 'LIGHTING', 'A', 'SCATTERED', 'SUBURB', 'A', 'GROUP', 'OF', 'DWELLINGS', 'ON', 'THE', 'OUTSKIRTS', 'A', 'REMOTE', 'COUNTRY', 'RESIDENCE', 'OR', 'A', 'FARM', 'HOUSE', 'THE', 'ALTERNATING', 'CURRENT', 'IN', 'ALL', 'ELEMENTS', 'SAVE', 'ITS', 'DANGER', 'WAS', 'AND', 'IS', 'IDEAL'] +2300-131720-0012-1828: ref=['EDISON', 'WAS', 'INTOLERANT', 'OF', 'SHAM', 'AND', 'SHODDY', 'AND', 'NOTHING', 'WOULD', 'SATISFY', 'HIM', 'THAT', 'COULD', 'NOT', 'STAND', 'CROSS', 'EXAMINATION', 'BY', 'MICROSCOPE', 'TEST', 'TUBE', 'AND', 'GALVANOMETER'] +2300-131720-0012-1828: hyp=['EDISON', 'WAS', 'INTOLERANT', 'OF', 'SHAM', 'AND', 'SHOTTY', 'AND', 'NOTHING', 'WOULD', 'SATISFY', 'HIM', 'THAT', 'COULD', 'NOT', 'STAND', 'CROSS', 'EXAMINATION', 'BY', 'MICROSCOPE', 'TEST', 'TUBE', 'AND', 'GALVANOMETER'] +2300-131720-0013-1829: ref=['UNLESS', 'HE', 'COULD', 'SECURE', 'AN', 'ENGINE', 'OF', 'SMOOTHER', 'RUNNING', 'AND', 'MORE', 'EXACTLY', 'GOVERNED', 'AND', 'REGULATED', 'THAN', 'THOSE', 'AVAILABLE', 'FOR', 'HIS', 'DYNAMO', 'AND', 'LAMP', 'EDISON', 'REALIZED', 'THAT', 'HE', 'WOULD', 'FIND', 'IT', 'ALMOST', 'IMPOSSIBLE', 'TO', 'GIVE', 'A', 'STEADY', 'LIGHT'] +2300-131720-0013-1829: hyp=['UNLESS', 'HE', 'COULD', 'SECURE', 'AN', 'ENGINE', 'OF', 'SMOOTHER', 'RUNNING', 'AND', 'MORE', 'EXACTLY', 'GOVERN', 'AND', 'REGULATED', 'THAN', 'THOSE', 'AVAILABLE', 'FOR', 'HIS', 'DYNAMO', 'AND', 'LAMP', 'EDISON', 'REALIZED', 'THAT', 'HE', 'WOULD', 'FIND', 'IT', 'ALMOST', 'IMPOSSIBLE', 'TO', 'GIVE', 'A', 'STEADY', 'LIGHT'] +2300-131720-0014-1830: ref=['MISTER', 'EDISON', 'WAS', 'A', 'LEADER', 'FAR', 'AHEAD', 'OF', 'THE', 'TIME'] +2300-131720-0014-1830: hyp=['MISTER', 'EDISON', 'WAS', 'A', 'LEADER', 'FAR', 'AHEAD', 'OF', 'THE', 'TIME'] +2300-131720-0015-1831: ref=['HE', 'OBTAINED', 'THE', 'DESIRED', 'SPEED', 'AND', 'LOAD', 'WITH', 'A', 'FRICTION', 'BRAKE', 'ALSO', 'REGULATOR', 'OF', 'SPEED', 'BUT', 'WAITED', 'FOR', 'AN', 'INDICATOR', 'TO', 'VERIFY', 'IT'] +2300-131720-0015-1831: hyp=['HE', 'OBTAINED', 'THE', 'DESIRED', 'SPEED', 'AND', 'LOAD', 'WITH', 'A', 'FRICTION', 'BREAK', 'ALSO', 'REGULATOR', 'OF', 'SPEED', 'BUT', 'WAITED', 'FOR', 'AN', 'INDICATOR', 'TO', 'VERIFY', 'IT'] +2300-131720-0016-1832: ref=['THEN', 'AGAIN', 'THERE', 'WAS', 'NO', 'KNOWN', 'WAY', 'TO', 'LUBRICATE', 'AN', 'ENGINE', 'FOR', 'CONTINUOUS', 'RUNNING', 'AND', 'MISTER', 'EDISON', 'INFORMED', 'ME', 'THAT', 'AS', 'A', 'MARINE', 'ENGINE', 'STARTED', 'BEFORE', 'THE', 'SHIP', 'LEFT', 'NEW', 'YORK', 'AND', 'CONTINUED', 'RUNNING', 'UNTIL', 'IT', 'REACHED', 'ITS', 'HOME', 'PORT', 'SO', 'AN', 'ENGINE', 'FOR', 'HIS', 'PURPOSES', 'MUST', 'PRODUCE', 'LIGHT', 'AT', 'ALL', 'TIMES'] +2300-131720-0016-1832: hyp=['THEN', 'AGAIN', 'THERE', 'WAS', 'NO', 'KNOWN', 'WAY', 'TO', 'LUBRICADE', 'AN', 'ENGINE', 'FOR', 'CONTINUOUS', 'RUNNING', 'AND', 'MISTER', 'EDISON', 'INFORMED', 'ME', 'THAT', 'AS', 'A', 'MARINE', 'ENGINE', 'STARTED', 'BEFORE', 'THE', 'SHIP', 'LEFT', 'NEW', 'YORK', 'AND', 'CONTINUED', 'RUNNING', 'UNTIL', 'IT', 'REACHED', 'ITS', 'HOME', 'PORT', 'SO', 'AN', 'ENGINE', 'FOR', 'HIS', 'PURPOSES', 'MUST', 'PRODUCE', 'LIGHT', 'AT', 'ALL', 'TIMES'] +2300-131720-0017-1833: ref=['EDISON', 'HAD', 'INSTALLED', 'HIS', 'HISTORIC', 'FIRST', 'GREAT', 'CENTRAL', 'STATION', 'SYSTEM', 'IN', 'NEW', 'YORK', 'ON', 'THE', 'MULTIPLE', 'ARC', 'SYSTEM', 'COVERED', 'BY', 'HIS', 'FEEDER', 'AND', 'MAIN', 'INVENTION', 'WHICH', 'RESULTED', 'IN', 'A', 'NOTABLE', 'SAVING', 'IN', 'THE', 'COST', 'OF', 'CONDUCTORS', 'AS', 'AGAINST', 'A', 'STRAIGHT', 'TWO', 'WIRE', 'SYSTEM', 'THROUGHOUT', 'OF', 'THE', 'TREE', 'KIND'] +2300-131720-0017-1833: hyp=['EDISON', 'HAD', 'INSTALLED', 'HIS', 'HISTORIC', 'FIRST', 'GREAT', 'CENTRAL', 'STATION', 'SYSTEM', 'IN', 'NEW', 'YORK', 'ON', 'THE', 'MULTIPLE', 'ARC', 'SYSTEM', 'COVERED', 'BY', 'HIS', 'FEEDER', 'AND', 'MAIN', 'INVENTION', 'WHICH', 'RESULTED', 'IN', 'A', 'NOTABLE', 'SAVING', 'IN', 'THE', 'COST', 'OF', 'CONDUCTORS', 'AS', 'AGAINST', 'A', 'STRAIT', 'TWO', 'WIRE', 'SYSTEM', 'THROUGHOUT', 'OF', 'THE', 'TREE', 'KIND'] +2300-131720-0018-1834: ref=['HE', 'SOON', 'FORESAW', 'THAT', 'STILL', 'GREATER', 'ECONOMY', 'WOULD', 'BE', 'NECESSARY', 'FOR', 'COMMERCIAL', 'SUCCESS', 'NOT', 'ALONE', 'FOR', 'THE', 'LARGER', 'TERRITORY', 'OPENING', 'BUT', 'FOR', 'THE', 'COMPACT', 'DISTRICTS', 'OF', 'LARGE', 'CITIES'] +2300-131720-0018-1834: hyp=['HE', 'SOON', 'FORESAW', 'THAT', 'STILL', 'GREATER', 'ECONOMY', 'WOULD', 'BE', 'NECESSARY', 'FOR', 'COMMERCIAL', 'SUCCESS', 'NOT', 'ALONE', 'FOR', 'THE', 'LARGER', 'TERRITORY', 'OPENING', 'BUT', 'FOR', 'THE', 'COMPACT', 'DISTRICT', 'OF', 'LARGE', 'CITIES'] +2300-131720-0019-1835: ref=['THE', 'STRONG', 'POSITION', 'HELD', 'BY', 'THE', 'EDISON', 'SYSTEM', 'UNDER', 'THE', 'STRENUOUS', 'COMPETITION', 'THAT', 'WAS', 'ALREADY', 'SPRINGING', 'UP', 'WAS', 'ENORMOUSLY', 'IMPROVED', 'BY', 'THE', 'INTRODUCTION', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'AND', 'IT', 'GAVE', 'AN', 'IMMEDIATE', 'IMPETUS', 'TO', 'INCANDESCENT', 'LIGHTING'] +2300-131720-0019-1835: hyp=['THE', 'STRONG', 'POSITION', 'HELD', 'BY', 'THE', 'EDISON', 'SYSTEM', 'UNDER', 'THE', 'STRENUOUS', 'COMPETITION', 'IT', 'WAS', 'ALREADY', 'SPRINGING', 'UP', 'WAS', 'ENORMOUSLY', 'IMPROVED', 'BY', 'THE', 'INTRODUCTION', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'AND', 'HE', 'GAVE', 'AN', 'IMMEDIATE', 'IMPETUS', 'TO', 'INCONDESCENT', 'LIGHTING'] +2300-131720-0020-1836: ref=['IT', 'WAS', 'SPECIALLY', 'SUITED', 'FOR', 'A', 'TRIAL', 'PLANT', 'ALSO', 'IN', 'THE', 'EARLY', 'DAYS', 'WHEN', 'A', 'YIELD', 'OF', 'SIX', 'OR', 'EIGHT', 'LAMPS', 'TO', 'THE', 'HORSE', 'POWER', 'WAS', 'CONSIDERED', 'SUBJECT', 'FOR', 'CONGRATULATION'] +2300-131720-0020-1836: hyp=['IT', 'WAS', 'SPECIALLY', 'SUITED', 'FOR', 'A', 'TRIAL', 'PLANT', 'ALSO', 'IN', 'THE', 'EARLY', 'DAYS', 'WHEN', 'A', 'YIELD', 'OF', 'SIX', 'OR', 'EIGHT', 'LAMPS', 'TO', 'THE', 'HORSE', 'BOWER', 'WAS', 'CONSIDERED', 'SUBJECT', 'FOR', 'CONGRATULATION'] +2300-131720-0021-1837: ref=['THE', 'STREET', 'CONDUCTORS', 'WERE', 'OF', 'THE', 'OVERHEAD', 'POLE', 'LINE', 'CONSTRUCTION', 'AND', 'WERE', 'INSTALLED', 'BY', 'THE', 'CONSTRUCTION', 'COMPANY', 'THAT', 'HAD', 'BEEN', 'ORGANIZED', 'BY', 'EDISON', 'TO', 'BUILD', 'AND', 'EQUIP', 'CENTRAL', 'STATIONS'] +2300-131720-0021-1837: hyp=['THE', 'STREET', 'CONDUCTORS', 'WERE', 'OF', 'THE', 'OVERHEAD', 'POLE', 'LINE', 'CONSTRUCTION', 'AND', 'WERE', 'INSTALLED', 'BY', 'THE', 'CONSTRUCTION', 'COMPANY', 'THAT', 'HAD', 'BEEN', 'ORGANIZED', 'BY', 'EDISON', 'TO', 'BUILD', 'AN', 'EQUIP', 'CENTRAL', 'STATIONS'] +2300-131720-0022-1838: ref=['MEANWHILE', 'HE', 'HAD', 'CALLED', 'UPON', 'ME', 'TO', 'MAKE', 'A', 'REPORT', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'KNOWN', 'IN', 'ENGLAND', 'AS', 'THE', 'HOPKINSON', 'BOTH', 'DOCTOR', 'JOHN', 'HOPKINSON', 'AND', 'MISTER', 'EDISON', 'BEING', 'INDEPENDENT', 'INVENTORS', 'AT', 'PRACTICALLY', 'THE', 'SAME', 'TIME'] +2300-131720-0022-1838: hyp=['MEANWHILE', 'HE', 'HAD', 'CALLED', 'UPON', 'ME', 'TO', 'MAKE', 'A', 'REPORT', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'KNOWN', 'IN', 'ENGLAND', 'AS', 'THE', 'HOPKINSON', 'BOTH', 'DOCTOR', 'JOHN', 'HOPKINSON', 'AND', 'MISTER', 'EDISON', 'BEING', 'INDEPENDENT', 'IN', 'VENORS', 'AT', 'PRACTICALLY', 'THE', 'SAME', 'TIME'] +2300-131720-0023-1839: ref=['I', 'THINK', 'HE', 'WAS', 'PERHAPS', 'MORE', 'APPRECIATIVE', 'THAN', 'I', 'WAS', 'OF', 'THE', 'DISCIPLINE', 'OF', 'THE', 'EDISON', 'CONSTRUCTION', 'DEPARTMENT', 'AND', 'THOUGHT', 'IT', 'WOULD', 'BE', 'WELL', 'FOR', 'US', 'TO', 'WAIT', 'UNTIL', 'THE', 'MORNING', 'OF', 'THE', 'FOURTH', 'BEFORE', 'WE', 'STARTED', 'UP'] +2300-131720-0023-1839: hyp=['I', 'THINK', 'HE', 'WAS', 'PERHAPS', 'MORE', 'APPRECIATIVE', 'THAT', 'I', 'WAS', 'OF', 'THE', 'DISCIPLINE', 'OF', 'THE', 'EDISON', 'CONSTRUCTION', 'DEPARTMENT', 'AND', 'THOUGHT', 'IT', 'WOULD', 'BE', 'WELL', 'FOR', 'US', 'TO', 'WAIT', 'UNTIL', 'THE', 'MORNING', 'OF', 'THE', 'FOURTH', 'BEFORE', 'WE', 'STARTED', 'UP'] +2300-131720-0024-1840: ref=['BUT', 'THE', 'PLANT', 'RAN', 'AND', 'IT', 'WAS', 'THE', 'FIRST', 'THREE', 'WIRE', 'STATION', 'IN', 'THIS', 'COUNTRY'] +2300-131720-0024-1840: hyp=['BUT', 'THE', 'PLANT', 'RAN', 'AND', 'IT', 'WAS', 'THE', 'FIRST', 'THREE', 'WIRE', 'STATION', 'IN', 'THIS', 'COUNTRY'] +2300-131720-0025-1841: ref=['THEY', 'WERE', 'LATER', 'USED', 'AS', 'RESERVE', 'MACHINES', 'AND', 'FINALLY', 'WITH', 'THE', 'ENGINE', 'RETIRED', 'FROM', 'SERVICE', 'AS', 'PART', 'OF', 'THE', 'COLLECTION', 'OF', 'EDISONIA', 'BUT', 'THEY', 'REMAIN', 'IN', 'PRACTICALLY', 'AS', 'GOOD', 'CONDITION', 'AS', 'WHEN', 'INSTALLED', 'IN', 'EIGHTEEN', 'EIGHTY', 'THREE'] +2300-131720-0025-1841: hyp=['THEY', 'WERE', 'LATER', 'USED', 'AS', 'RESERVED', 'MACHINES', 'AND', 'FINALLY', 'WITH', 'THE', 'ENGINE', 'RETIRED', 'FROM', 'SERVICE', 'AS', 'PART', 'OF', 'THE', 'COLLECTION', 'OF', 'EDISONIA', 'BUT', 'THEY', 'REMAIN', 'IN', 'PRACTICALLY', 'AS', 'GOOD', 'CONDITION', 'AS', 'WHEN', 'INSTALLED', 'IN', 'EIGHTEEN', 'EIGHTY', 'THREE'] +2300-131720-0026-1842: ref=['THE', 'ARC', 'LAMP', 'INSTALLED', 'OUTSIDE', 'A', "CUSTOMER'S", 'PREMISES', 'OR', 'IN', 'A', 'CIRCUIT', 'FOR', 'PUBLIC', 'STREET', 'LIGHTING', 'BURNED', 'SO', 'MANY', 'HOURS', 'NIGHTLY', 'SO', 'MANY', 'NIGHTS', 'IN', 'THE', 'MONTH', 'AND', 'WAS', 'PAID', 'FOR', 'AT', 'THAT', 'RATE', 'SUBJECT', 'TO', 'REBATE', 'FOR', 'HOURS', 'WHEN', 'THE', 'LAMP', 'MIGHT', 'BE', 'OUT', 'THROUGH', 'ACCIDENT'] +2300-131720-0026-1842: hyp=['THE', 'ARC', 'LAMP', 'INSTALLED', 'OUTSIDE', 'A', "CUSTOMER'S", 'PREMISES', 'OR', 'IN', 'A', 'CIRCUIT', 'FOR', 'PUBLIC', 'STREET', 'LIGHTING', 'BURNED', 'SO', 'MANY', 'HOURS', 'NIGHTLY', 'SO', 'MANY', 'NIGHTS', 'IN', 'THE', 'MONTH', 'AND', 'WAS', 'PAID', 'FOR', 'AT', 'THAT', 'RATE', 'SUBJECT', 'TO', 'REBATE', 'FOR', 'HOURS', 'WHEN', 'THE', 'LAMP', 'MIGHT', 'BE', 'OUT', 'THROUGH', 'ACCIDENT'] +2300-131720-0027-1843: ref=['EDISON', 'HELD', 'THAT', 'THE', 'ELECTRICITY', 'SOLD', 'MUST', 'BE', 'MEASURED', 'JUST', 'LIKE', 'GAS', 'OR', 'WATER', 'AND', 'HE', 'PROCEEDED', 'TO', 'DEVELOP', 'A', 'METER'] +2300-131720-0027-1843: hyp=['EDISON', 'HELD', 'THAT', 'THE', 'ELECTRICITY', 'SOLD', 'MUST', 'BE', 'MEASURED', 'JUST', 'LIKE', 'GAS', 'OR', 'WATER', 'AND', 'HE', 'PROCEEDED', 'TO', 'DEVELOP', 'A', 'METER'] +2300-131720-0028-1844: ref=['THERE', 'WAS', 'INFINITE', 'SCEPTICISM', 'AROUND', 'HIM', 'ON', 'THE', 'SUBJECT', 'AND', 'WHILE', 'OTHER', 'INVENTORS', 'WERE', 'ALSO', 'GIVING', 'THE', 'SUBJECT', 'THEIR', 'THOUGHT', 'THE', 'PUBLIC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'ANYTHING', 'SO', 'UTTERLY', 'INTANGIBLE', 'AS', 'ELECTRICITY', 'THAT', 'COULD', 'NOT', 'BE', 'SEEN', 'OR', 'WEIGHED', 'AND', 'ONLY', 'GAVE', 'SECONDARY', 'EVIDENCE', 'OF', 'ITSELF', 'AT', 'THE', 'EXACT', 'POINT', 'OF', 'USE', 'COULD', 'NOT', 'BE', 'BROUGHT', 'TO', 'ACCURATE', 'REGISTRATION'] +2300-131720-0028-1844: hyp=['THERE', 'WAS', 'INFINITE', 'SCEPTICISM', 'AROUND', 'HIM', 'ON', 'THE', 'SUBJECT', 'AND', 'WHILE', 'OTHER', 'INVENTORS', 'WERE', 'ALSO', 'GIVING', 'THE', 'SUBJECT', 'THEIR', 'THOUGHT', 'THE', 'PUBLIC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'ANYTHING', 'SO', 'UTTERLY', 'INTANGIBLE', 'AS', 'ELECTRICITY', 'THAT', 'COULD', 'NOT', 'BE', 'SEEN', 'OR', 'WEIGHED', 'AND', 'ONLY', 'GAVE', 'SECONDARY', 'EVIDENCE', 'OF', 'ITSELF', 'AT', 'THE', 'EXACT', 'POINT', 'OF', 'USE', 'COULD', 'NOT', 'BE', 'BROUGHT', 'TO', 'ACCURATE', 'REGISTRATION'] +2300-131720-0029-1845: ref=['HENCE', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'NO', 'LONGER', 'USED', 'DESPITE', 'ITS', 'EXCELLENT', 'QUALITIES'] +2300-131720-0029-1845: hyp=['HENCE', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'NO', 'LONGER', 'USED', 'DESPITE', 'ITS', 'EXCELLENT', 'QUALITIES'] +2300-131720-0030-1846: ref=['THE', 'PRINCIPLE', 'EMPLOYED', 'IN', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'THAT', 'WHICH', 'EXEMPLIFIES', 'THE', 'POWER', 'OF', 'ELECTRICITY', 'TO', 'DECOMPOSE', 'A', 'CHEMICAL', 'SUBSTANCE'] +2300-131720-0030-1846: hyp=['THE', 'PRINCIPLE', 'EMPLOYED', 'IN', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'THAT', 'WHICH', 'EXEMPLIFIES', 'THE', 'POWER', 'OF', 'ELECTRICITY', 'TO', 'DECOMPOSE', 'A', 'CHEMICAL', 'SUBSTANCE'] +2300-131720-0031-1847: ref=['ASSOCIATED', 'WITH', 'THIS', 'SIMPLE', 'FORM', 'OF', 'APPARATUS', 'WERE', 'VARIOUS', 'INGENIOUS', 'DETAILS', 'AND', 'REFINEMENTS', 'TO', 'SECURE', 'REGULARITY', 'OF', 'OPERATION', 'FREEDOM', 'FROM', 'INACCURACY', 'AND', 'IMMUNITY', 'FROM', 'SUCH', 'TAMPERING', 'AS', 'WOULD', 'PERMIT', 'THEFT', 'OF', 'CURRENT', 'OR', 'DAMAGE'] +2300-131720-0031-1847: hyp=['ASSOCIATED', 'WITH', 'THIS', 'SIMPLE', 'FORM', 'OF', 'APPARATUS', 'WERE', 'VARIOUS', 'INGENIOUS', 'DETAILS', 'AND', 'REFINEMENTS', 'TO', 'SECURE', 'REGULARITY', 'OF', 'OPERATION', 'FREEDOM', 'FROM', 'INACCURACY', 'AND', 'IMMUNITY', 'FROM', 'SUCH', 'TAMPERING', 'AS', 'WOULD', 'PERMIT', 'THEFT', 'OF', 'CURRENT', 'OR', 'DAMAGE'] +2300-131720-0032-1848: ref=['THE', 'STANDARD', 'EDISON', 'METER', 'PRACTICE', 'WAS', 'TO', 'REMOVE', 'THE', 'CELLS', 'ONCE', 'A', 'MONTH', 'TO', 'THE', 'METER', 'ROOM', 'OF', 'THE', 'CENTRAL', 'STATION', 'COMPANY', 'FOR', 'EXAMINATION', 'ANOTHER', 'SET', 'BEING', 'SUBSTITUTED'] +2300-131720-0032-1848: hyp=['THE', 'STANDARD', 'EDISON', 'METER', 'PRACTICE', 'WAS', 'TO', 'REMOVE', 'THE', 'CELLS', 'ONCE', 'A', 'MONTH', 'TO', 'THE', 'METEOR', 'ROOM', 'OF', 'THE', 'CENTRAL', 'STATION', 'COMPANY', 'FOR', 'EXAMINATION', 'ANOTHER', 'SET', 'BEING', 'SUBSTITUTED'] +2300-131720-0033-1849: ref=['IN', 'DECEMBER', 'EIGHTEEN', 'EIGHTY', 'EIGHT', 'MISTER', 'W', 'J', 'JENKS', 'READ', 'AN', 'INTERESTING', 'PAPER', 'BEFORE', 'THE', 'AMERICAN', 'INSTITUTE', 'OF', 'ELECTRICAL', 'ENGINEERS', 'ON', 'THE', 'SIX', 'YEARS', 'OF', 'PRACTICAL', 'EXPERIENCE', 'HAD', 'UP', 'TO', 'THAT', 'TIME', 'WITH', 'THE', 'METER', 'THEN', 'MORE', 'GENERALLY', 'IN', 'USE', 'THAN', 'ANY', 'OTHER'] +2300-131720-0033-1849: hyp=['IN', 'DECEMBER', 'EIGHTEEN', 'EIGHTY', 'EIGHT', 'MISTER', 'W', 'J', 'JENKS', 'READ', 'AN', 'INTERESTING', 'PAPER', 'BEFORE', 'THE', 'AMERICAN', 'INSTITUTE', 'OF', 'ELECTRICAL', 'ENGINEERS', 'ON', 'THE', 'SIX', 'YEARS', 'OF', 'PRACTICAL', 'EXPERIENCE', 'HAD', 'UP', 'TO', 'THAT', 'TIME', 'WITH', 'THE', 'METRE', 'THEN', 'MORE', 'GENERALLY', 'IN', 'USE', 'THAN', 'ANY', 'OTHER'] +2300-131720-0034-1850: ref=['THE', 'OTHERS', 'HAVING', 'BEEN', 'IN', 'OPERATION', 'TOO', 'SHORT', 'A', 'TIME', 'TO', 'SHOW', 'DEFINITE', 'RESULTS', 'ALTHOUGH', 'THEY', 'ALSO', 'WENT', 'QUICKLY', 'TO', 'A', 'DIVIDEND', 'BASIS'] +2300-131720-0034-1850: hyp=['THE', 'OTHERS', 'HAVING', 'BEEN', 'IN', 'OPERATION', 'TOO', 'SHORT', 'A', 'TIME', 'TO', 'SHOW', 'DEFINITE', 'RESULTS', 'ALTHOUGH', 'THEY', 'ALSO', 'WENT', 'QUICKLY', 'TO', 'A', 'DIVIDEND', 'BASIS'] +2300-131720-0035-1851: ref=['IN', 'THIS', 'CONNECTION', 'IT', 'SHOULD', 'BE', 'MENTIONED', 'THAT', 'THE', 'ASSOCIATION', 'OF', 'EDISON', 'ILLUMINATING', 'COMPANIES', 'IN', 'THE', 'SAME', 'YEAR', 'ADOPTED', 'RESOLUTIONS', 'UNANIMOUSLY', 'TO', 'THE', 'EFFECT', 'THAT', 'THE', 'EDISON', 'METER', 'WAS', 'ACCURATE', 'AND', 'THAT', 'ITS', 'USE', 'WAS', 'NOT', 'EXPENSIVE', 'FOR', 'STATIONS', 'ABOVE', 'ONE', 'THOUSAND', 'LIGHTS', 'AND', 'THAT', 'THE', 'BEST', 'FINANCIAL', 'RESULTS', 'WERE', 'INVARIABLY', 'SECURED', 'IN', 'A', 'STATION', 'SELLING', 'CURRENT', 'BY', 'METER'] +2300-131720-0035-1851: hyp=['IN', 'THIS', 'CONNECTION', 'IT', 'SHOULD', 'BE', 'MENTIONED', 'THAT', 'THE', 'ASSOCIATION', 'OF', 'EDISON', 'ILLUMINATING', 'COMPANIES', 'IN', 'THE', 'SAME', 'YEAR', 'ADOPTED', 'RESOLUTIONS', 'UNANIMOUSLY', 'TO', 'THE', 'EFFECT', 'THAT', 'THE', 'EDISON', 'METER', 'WAS', 'ACCURATE', 'AND', 'THAT', 'ITS', 'USE', 'WAS', 'NOT', 'EXPENSIVE', 'FOR', 'STATIONS', 'ABOVE', 'ONE', 'THOUSAND', 'LIGHTS', 'AND', 'THAT', 'THE', 'BEST', 'FINANCIAL', 'RESULTS', 'WERE', 'INVARIABLY', 'SECURED', 'IN', 'A', 'STATION', 'SELLING', 'CURRENT', 'BY', 'METRE'] +2300-131720-0036-1852: ref=['THE', 'METER', 'CONTINUED', 'IN', 'GENERAL', 'SERVICE', 'DURING', 'EIGHTEEN', 'NINETY', 'NINE', 'AND', 'PROBABLY', 'UP', 'TO', 'THE', 'CLOSE', 'OF', 'THE', 'CENTURY'] +2300-131720-0036-1852: hyp=['THE', 'METRE', 'CONTINUED', 'IN', 'GENERAL', 'SERVICE', 'DURING', 'EIGHTEEN', 'NINETY', 'NINE', 'AND', 'PROBABLY', 'UP', 'TO', 'THE', 'CLOSE', 'OF', 'THE', 'CENTURY'] +2300-131720-0037-1853: ref=['HE', 'WEIGHED', 'AND', 'REWEIGHED', 'THE', 'METER', 'PLATES', 'AND', 'PURSUED', 'EVERY', 'LINE', 'OF', 'INVESTIGATION', 'IMAGINABLE', 'BUT', 'ALL', 'IN', 'VAIN'] +2300-131720-0037-1853: hyp=['HE', 'WEIGHED', 'AND', 'REWAYED', 'THE', 'METERPLATES', 'AND', 'PURSUED', 'EVERY', 'LINE', 'OF', 'INVESTIGATION', 'IMAGINABLE', 'BUT', 'ALL', 'IN', 'VAIN'] +2300-131720-0038-1854: ref=['HE', 'FELT', 'HE', 'WAS', 'UP', 'AGAINST', 'IT', 'AND', 'THAT', 'PERHAPS', 'ANOTHER', 'KIND', 'OF', 'A', 'JOB', 'WOULD', 'SUIT', 'HIM', 'BETTER'] +2300-131720-0038-1854: hyp=['HE', 'FELT', 'HE', 'WAS', 'UP', 'AGAINST', 'IT', 'AND', 'THAT', 'PERHAPS', 'ANOTHER', 'KIND', 'OF', 'A', 'JOB', 'WOULD', 'SUIT', 'HIM', 'BETTER'] +2300-131720-0039-1855: ref=['THE', 'PROBLEM', 'WAS', 'SOLVED'] +2300-131720-0039-1855: hyp=['THE', 'PROBLEM', 'WAS', 'SOLVED'] +2300-131720-0040-1856: ref=['WE', 'WERE', 'MORE', 'INTERESTED', 'IN', 'THE', 'TECHNICAL', 'CONDITION', 'OF', 'THE', 'STATION', 'THAN', 'IN', 'THE', 'COMMERCIAL', 'PART'] +2300-131720-0040-1856: hyp=['WE', 'WERE', 'MORE', 'INTERESTED', 'IN', 'THE', 'TECHNICAL', 'CONDITION', 'OF', 'THE', 'STATION', 'THAN', 'IN', 'THE', 'COMMERCIAL', 'PART'] +2300-131720-0041-1857: ref=['WE', 'HAD', 'METERS', 'IN', 'WHICH', 'THERE', 'WERE', 'TWO', 'BOTTLES', 'OF', 'LIQUID'] +2300-131720-0041-1857: hyp=['WE', 'HAD', 'METRES', 'IN', 'WHICH', 'THERE', 'WERE', 'TWO', 'BOTTLES', 'OF', 'LIQUID'] +237-126133-0000-2407: ref=['HERE', 'SHE', 'WOULD', 'STAY', 'COMFORTED', 'AND', 'SOOTHED', 'AMONG', 'THE', 'LOVELY', 'PLANTS', 'AND', 'RICH', 'EXOTICS', 'REJOICING', 'THE', 'HEART', 'OF', 'OLD', 'TURNER', 'THE', 'GARDENER', 'WHO', 'SINCE', "POLLY'S", 'FIRST', 'RAPTUROUS', 'ENTRANCE', 'HAD', 'TAKEN', 'HER', 'INTO', 'HIS', 'GOOD', 'GRACES', 'FOR', 'ALL', 'TIME'] +237-126133-0000-2407: hyp=['HERE', 'SHE', 'WOULD', 'STAY', 'COMFORTED', 'AND', 'SOOTHE', 'AMONG', 'THE', 'LOVELY', 'PLANTS', 'AND', 'RICH', 'EXOTICS', 'REJOICING', 'THE', 'HEART', 'OF', 'OLD', 'TURNER', 'THE', 'GARDENER', 'WHO', 'SINCE', "POLLY'S", 'FIRST', 'RAPTUROUS', 'ENTRANCE', 'HAD', 'TAKEN', 'HER', 'INTO', 'HIS', 'GOOD', 'GRACES', 'FOR', 'ALL', 'TIME'] +237-126133-0001-2408: ref=['EVERY', 'CHANCE', 'SHE', 'COULD', 'STEAL', 'AFTER', 'PRACTICE', 'HOURS', 'WERE', 'OVER', 'AND', 'AFTER', 'THE', 'CLAMOROUS', 'DEMANDS', 'OF', 'THE', 'BOYS', 'UPON', 'HER', 'TIME', 'WERE', 'FULLY', 'SATISFIED', 'WAS', 'SEIZED', 'TO', 'FLY', 'ON', 'THE', 'WINGS', 'OF', 'THE', 'WIND', 'TO', 'THE', 'FLOWERS'] +237-126133-0001-2408: hyp=['EVERY', 'CHANCE', 'SHE', 'COULD', 'STEAL', 'AFTER', 'PRACTICE', 'HOURS', 'WERE', 'OVER', 'AND', 'AFTER', 'THE', 'CLAMOROUS', 'DEMANDS', 'OF', 'THE', 'BOYS', 'UPON', 'HER', 'TIME', 'WERE', 'FULLY', 'SATISFIED', 'WAS', 'SEIZED', 'TO', 'FLY', 'ON', 'THE', 'WINGS', 'OF', 'THE', 'WIND', 'TO', 'THE', 'FLOWERS'] +237-126133-0002-2409: ref=['THEN', 'DEAR', 'SAID', 'MISSUS', 'WHITNEY', 'YOU', 'MUST', 'BE', 'KINDER', 'TO', 'HER', 'THAN', 'EVER', 'THINK', 'WHAT', 'IT', 'WOULD', 'BE', 'FOR', 'ONE', 'OF', 'YOU', 'TO', 'BE', 'AWAY', 'FROM', 'HOME', 'EVEN', 'AMONG', 'FRIENDS'] +237-126133-0002-2409: hyp=['THEN', 'DEAR', 'SAID', 'MISSUS', 'WHITNEY', 'YOU', 'MUST', 'BE', 'KINDER', 'TO', 'HER', 'THAN', 'EVER', 'THINK', 'WHAT', 'IT', 'WOULD', 'BE', 'FOR', 'ONE', 'OF', 'YOU', 'TO', 'BE', 'AWAY', 'FROM', 'HOME', 'EVEN', 'AMONG', 'FRIENDS'] +237-126133-0003-2410: ref=['SOMEHOW', 'OF', 'ALL', 'THE', 'DAYS', 'WHEN', 'THE', 'HOME', 'FEELING', 'WAS', 'THE', 'STRONGEST', 'THIS', 'DAY', 'IT', 'SEEMED', 'AS', 'IF', 'SHE', 'COULD', 'BEAR', 'IT', 'NO', 'LONGER'] +237-126133-0003-2410: hyp=['SOMEHOW', 'OF', 'ALL', 'THE', 'DAYS', 'WHEN', 'THE', 'HOME', 'FEELING', 'WAS', 'THE', 'STRONGEST', 'THIS', 'DAY', 'IT', 'SEEMED', 'AS', 'IF', 'SHE', 'COULD', 'BEAR', 'IT', 'NO', 'LONGER'] +237-126133-0004-2411: ref=['IF', 'SHE', 'COULD', 'ONLY', 'SEE', 'PHRONSIE', 'FOR', 'JUST', 'ONE', 'MOMENT'] +237-126133-0004-2411: hyp=['IF', 'SHE', 'COULD', 'ONLY', 'SEE', 'PHRONSIE', 'FOR', 'JUST', 'ONE', 'MOMENT'] +237-126133-0005-2412: ref=['OH', "SHE'S", 'ALWAYS', 'AT', 'THE', 'PIANO', 'SAID', 'VAN', 'SHE', 'MUST', 'BE', 'THERE', 'NOW', 'SOMEWHERE', 'AND', 'THEN', 'SOMEBODY', 'LAUGHED'] +237-126133-0005-2412: hyp=['OH', "SHE'S", 'ALWAYS', 'AT', 'THE', 'PIANO', 'SAID', 'VAN', 'SHE', 'MUST', 'BE', 'THERE', 'NOW', 'SOMEWHERE', 'AND', 'THEN', 'SOMEBODY', 'LAUGHED'] +237-126133-0006-2413: ref=['AT', 'THIS', 'THE', 'BUNDLE', 'OPENED', 'SUDDENLY', 'AND', 'OUT', 'POPPED', 'PHRONSIE'] +237-126133-0006-2413: hyp=['AT', 'THIS', 'THE', 'BUNDLE', 'OPENED', 'SUDDENLY', 'AND', 'OUT', 'POPPED', 'PHRONSIE'] +237-126133-0007-2414: ref=['BUT', 'POLLY', "COULDN'T", 'SPEAK', 'AND', 'IF', 'JASPER', "HADN'T", 'CAUGHT', 'HER', 'JUST', 'IN', 'TIME', 'SHE', 'WOULD', 'HAVE', 'TUMBLED', 'OVER', 'BACKWARD', 'FROM', 'THE', 'STOOL', 'PHRONSIE', 'AND', 'ALL'] +237-126133-0007-2414: hyp=['BUT', 'POLLY', "COULDN'T", 'SPEAK', 'AND', 'IF', 'JASPER', "HADN'T", 'CAUGHT', 'HER', 'JUST', 'IN', 'TIME', 'SHE', 'WOULD', 'HAVE', 'TUMBLED', 'OVER', 'BACKWARD', 'FROM', 'THE', 'STOOL', 'PHRONSIE', 'AND', 'ALL'] +237-126133-0008-2415: ref=['ASKED', 'PHRONSIE', 'WITH', 'HER', 'LITTLE', 'FACE', 'CLOSE', 'TO', "POLLY'S", 'OWN'] +237-126133-0008-2415: hyp=['ASKED', 'PHRONSIE', 'WITH', 'HER', 'LITTLE', 'FACE', 'CLOSE', 'TO', "POLLY'S", 'OWN'] +237-126133-0009-2416: ref=['NOW', "YOU'LL", 'STAY', 'CRIED', 'VAN', 'SAY', 'POLLY', "WON'T", 'YOU'] +237-126133-0009-2416: hyp=['NOW', "YOU'LL", 'STAY', 'CRIED', 'VAN', 'SAY', 'POLLY', "WON'T", 'YOU'] +237-126133-0010-2417: ref=['OH', 'YOU', 'ARE', 'THE', 'DEAREST', 'AND', 'BEST', 'MISTER', 'KING', 'I', 'EVER', 'SAW', 'BUT', 'HOW', 'DID', 'YOU', 'MAKE', 'MAMMY', 'LET', 'HER', 'COME'] +237-126133-0010-2417: hyp=['OH', 'YOU', 'ARE', 'THE', 'DEAREST', 'AND', 'BEST', 'MISTER', 'KING', 'I', 'EVER', 'SAW', 'BUT', 'HOW', 'DID', 'YOU', 'MAKE', 'MAMMY', 'LET', 'HER', 'COME'] +237-126133-0011-2418: ref=["ISN'T", 'HE', 'SPLENDID', 'CRIED', 'JASPER', 'IN', 'INTENSE', 'PRIDE', 'SWELLING', 'UP', 'FATHER', 'KNEW', 'HOW', 'TO', 'DO', 'IT'] +237-126133-0011-2418: hyp=["ISN'T", 'HE', 'SPLENDID', 'CRIED', 'JASPER', 'IN', 'INTENSE', 'PRIDE', 'SWELLING', 'UP', 'FATHER', 'KNEW', 'HOW', 'TO', 'DO', 'IT'] +237-126133-0012-2419: ref=['THERE', 'THERE', 'HE', 'SAID', 'SOOTHINGLY', 'PATTING', 'HER', 'BROWN', 'FUZZY', 'HEAD'] +237-126133-0012-2419: hyp=['THERE', 'THERE', 'HE', 'SAID', 'SOOTHINGLY', 'PATTING', 'HER', 'BROWN', 'FUZZY', 'HEAD'] +237-126133-0013-2420: ref=['I', 'KNOW', 'GASPED', 'POLLY', 'CONTROLLING', 'HER', 'SOBS', 'I', "WON'T", 'ONLY', 'I', "CAN'T", 'THANK', 'YOU'] +237-126133-0013-2420: hyp=['I', 'KNOW', 'GASPED', 'POLLY', 'CONTROLLING', 'HER', 'SOBS', 'I', "WON'T", 'ONLY', 'I', "CAN'T", 'THANK', 'YOU'] +237-126133-0014-2421: ref=['ASKED', 'PHRONSIE', 'IN', 'INTENSE', 'INTEREST', 'SLIPPING', 'DOWN', 'OUT', 'OF', "POLLY'S", 'ARMS', 'AND', 'CROWDING', 'UP', 'CLOSE', 'TO', "JASPER'S", 'SIDE'] +237-126133-0014-2421: hyp=['ASKED', 'PHRONSIE', 'IN', 'INTENSE', 'INTEREST', 'SLIPPING', 'DOWN', 'OUT', 'OF', "POLLY'S", 'ARMS', 'AND', 'CROWDING', 'UP', 'CLOSE', 'TO', "JASPER'S", 'SIDE'] +237-126133-0015-2422: ref=['YES', 'ALL', 'ALONE', 'BY', 'HIMSELF', 'ASSERTED', 'JASPER', 'VEHEMENTLY', 'AND', 'WINKING', 'FURIOUSLY', 'TO', 'THE', 'OTHERS', 'TO', 'STOP', 'THEIR', 'LAUGHING', 'HE', 'DID', 'NOW', 'TRULY', 'PHRONSIE'] +237-126133-0015-2422: hyp=['YES', 'ALL', 'ALONE', 'BY', 'HIMSELF', 'ASSERTED', 'JASPER', 'VEHEMENTLY', 'AND', 'WINKING', 'FURIOUSLY', 'TO', 'THE', 'OTHERS', 'TO', 'STOP', 'THEIR', 'LAUGHING', 'HE', 'DID', 'NOW', 'TRULY', 'PHRONSIE'] +237-126133-0016-2423: ref=['OH', 'NO', 'JASPER', 'I', 'MUST', 'GO', 'BY', 'MY', 'VERY', 'OWN', 'SELF'] +237-126133-0016-2423: hyp=['OH', 'NO', 'JAPS', 'HER', 'I', 'MUST', 'GO', 'BY', 'MY', 'VERY', 'OWN', 'SELF'] +237-126133-0017-2424: ref=['THERE', 'JAP', "YOU'VE", 'CAUGHT', 'IT', 'LAUGHED', 'PERCY', 'WHILE', 'THE', 'OTHERS', 'SCREAMED', 'AT', 'THE', 'SIGHT', 'OF', "JASPER'S", 'FACE'] +237-126133-0017-2424: hyp=['THERE', 'JAP', "YOU'VE", 'GOT', 'IT', 'LAUGHED', 'PERCY', 'WHILE', 'THE', 'OTHERS', 'SCREAMED', 'AT', 'THE', 'SIGHT', 'OF', "JASPER'S", 'FACE'] +237-126133-0018-2425: ref=["DON'T", 'MIND', 'IT', 'POLLY', 'WHISPERED', 'JASPER', "TWASN'T", 'HER', 'FAULT'] +237-126133-0018-2425: hyp=["DON'T", 'MIND', 'IT', 'POLLY', 'WHISPERED', 'JASPER', "TWASN'T", 'HER', 'FAULT'] +237-126133-0019-2426: ref=['DEAR', 'ME', 'EJACULATED', 'THE', 'OLD', 'GENTLEMAN', 'IN', 'THE', 'UTMOST', 'AMAZEMENT', 'AND', 'SUCH', 'A', 'TIME', 'AS', "I'VE", 'HAD', 'TO', 'GET', 'HER', 'HERE', 'TOO'] +237-126133-0019-2426: hyp=['DEAR', 'ME', 'EJACULATED', 'THE', 'OLD', 'GENTLEMAN', 'IN', 'THE', 'UTMOST', 'AMAZEMENT', 'AND', 'SUCH', 'A', 'TIME', 'AS', "I'VE", 'HAD', 'TO', 'GET', 'HER', 'HERE', 'TOO'] +237-126133-0020-2427: ref=['HOW', 'DID', 'HER', 'MOTHER', 'EVER', 'LET', 'HER', 'GO'] +237-126133-0020-2427: hyp=['HOW', 'DID', 'HER', 'MOTHER', 'EVER', 'LET', 'HER', 'GO'] +237-126133-0021-2428: ref=['SHE', 'ASKED', 'IMPULSIVELY', 'I', "DIDN'T", 'BELIEVE', 'YOU', 'COULD', 'PERSUADE', 'HER', 'FATHER'] +237-126133-0021-2428: hyp=['SHE', 'ASKED', 'IMPULSIVELY', 'I', "DIDN'T", 'BELIEVE', 'YOU', 'COULD', 'PERSUADE', 'HER', 'FATHER'] +237-126133-0022-2429: ref=['I', "DIDN'T", 'HAVE', 'ANY', 'FEARS', 'IF', 'I', 'WORKED', 'IT', 'RIGHTLY', 'SAID', 'THE', 'OLD', 'GENTLEMAN', 'COMPLACENTLY'] +237-126133-0022-2429: hyp=['I', "DIDN'T", 'HAVE', 'ANY', 'FEARS', 'IF', 'I', 'WORKED', 'IT', 'RIGHTLY', 'SAID', 'THE', 'OLD', 'GENTLEMAN', 'COMPLACENTLY'] +237-126133-0023-2430: ref=['HE', 'CRIED', 'IN', 'HIGH', 'DUDGEON', 'JUST', 'AS', 'IF', 'HE', 'OWNED', 'THE', 'WHOLE', 'OF', 'THE', 'PEPPERS', 'AND', 'COULD', 'DISPOSE', 'OF', 'THEM', 'ALL', 'TO', 'SUIT', 'HIS', 'FANCY'] +237-126133-0023-2430: hyp=['HE', 'CRIED', 'AND', 'HIGH', 'DUDGEON', 'JUST', 'AS', 'IF', 'HE', 'OWNED', 'THE', 'WHOLE', 'OF', 'THE', 'PEPPERS', 'AND', 'COULD', 'DISPOSE', 'OF', 'THEM', 'ALL', 'TO', 'SUIT', 'HIS', 'FANCY'] +237-126133-0024-2431: ref=['AND', 'THE', 'OLD', 'GENTLEMAN', 'WAS', 'SO', 'DELIGHTED', 'WITH', 'HIS', 'SUCCESS', 'THAT', 'HE', 'HAD', 'TO', 'BURST', 'OUT', 'INTO', 'A', 'SERIES', 'OF', 'SHORT', 'HAPPY', 'BITS', 'OF', 'LAUGHTER', 'THAT', 'OCCUPIED', 'QUITE', 'A', 'SPACE', 'OF', 'TIME'] +237-126133-0024-2431: hyp=['AND', 'THE', 'OLD', 'GENTLEMAN', 'WAS', 'SO', 'DELIGHTED', 'WITH', 'HIS', 'SUCCESS', 'THAT', 'HE', 'HAD', 'TO', 'BURST', 'OUT', 'INTO', 'A', 'SERIES', 'OF', 'SHORT', 'HAPPY', 'BITS', 'OF', 'LAUGHTER', 'THAT', 'OCCUPIED', 'QUITE', 'A', 'SPACE', 'OF', 'TIME'] +237-126133-0025-2432: ref=['AT', 'LAST', 'HE', 'CAME', 'OUT', 'OF', 'THEM', 'AND', 'WIPED', 'HIS', 'FACE', 'VIGOROUSLY'] +237-126133-0025-2432: hyp=['AT', 'LAST', 'HE', 'CAME', 'OUT', 'OF', 'THEM', 'AND', 'WIPED', 'HIS', 'FACE', 'VIGOROUSLY'] +237-134493-0000-2388: ref=['IT', 'IS', 'SIXTEEN', 'YEARS', 'SINCE', 'JOHN', 'BERGSON', 'DIED'] +237-134493-0000-2388: hyp=['IT', 'IS', 'SIXTEEN', 'YEARS', 'SINCE', 'JOHN', 'BERKS', 'AND', 'DIED'] +237-134493-0001-2389: ref=['HIS', 'WIFE', 'NOW', 'LIES', 'BESIDE', 'HIM', 'AND', 'THE', 'WHITE', 'SHAFT', 'THAT', 'MARKS', 'THEIR', 'GRAVES', 'GLEAMS', 'ACROSS', 'THE', 'WHEAT', 'FIELDS'] +237-134493-0001-2389: hyp=['HIS', 'WIFE', 'NOW', 'LIES', 'BESIDE', 'HIM', 'AND', 'THE', 'WHITE', 'SHAFT', 'THAT', 'MARKS', 'THEIR', 'GRAVES', 'GLEAMS', 'ACROSS', 'THE', 'WHEAT', 'FIELDS'] +237-134493-0002-2390: ref=['FROM', 'THE', 'NORWEGIAN', 'GRAVEYARD', 'ONE', 'LOOKS', 'OUT', 'OVER', 'A', 'VAST', 'CHECKER', 'BOARD', 'MARKED', 'OFF', 'IN', 'SQUARES', 'OF', 'WHEAT', 'AND', 'CORN', 'LIGHT', 'AND', 'DARK', 'DARK', 'AND', 'LIGHT'] +237-134493-0002-2390: hyp=['FROM', 'THE', 'NORWEGIAN', 'GRAVEYARD', 'ONE', 'LOOKS', 'OUT', 'OVER', 'A', 'VAST', 'CHECKER', 'BOARD', 'MARKED', 'OFF', 'IN', 'SQUARES', 'OF', 'WHEAT', 'AND', 'CORN', 'LIGHT', 'AND', 'DARK', 'AND', 'LIGHT'] +237-134493-0003-2391: ref=['FROM', 'THE', 'GRAVEYARD', 'GATE', 'ONE', 'CAN', 'COUNT', 'A', 'DOZEN', 'GAYLY', 'PAINTED', 'FARMHOUSES', 'THE', 'GILDED', 'WEATHER', 'VANES', 'ON', 'THE', 'BIG', 'RED', 'BARNS', 'WINK', 'AT', 'EACH', 'OTHER', 'ACROSS', 'THE', 'GREEN', 'AND', 'BROWN', 'AND', 'YELLOW', 'FIELDS'] +237-134493-0003-2391: hyp=['FROM', 'THE', 'GRAVEYARD', 'GATE', 'ONE', 'CAN', 'COUNT', 'A', 'DOZEN', 'GAILY', 'PAINTED', 'FARM', 'HOUSES', 'THE', 'GILDED', 'WEATHER', 'VEINS', 'ON', 'THE', 'BIG', 'RED', 'BARNS', 'WINK', 'AT', 'EACH', 'OTHER', 'ACROSS', 'THE', 'GREEN', 'AND', 'BROWN', 'AND', 'YELLOW', 'FIELDS'] +237-134493-0004-2392: ref=['THE', 'AIR', 'AND', 'THE', 'EARTH', 'ARE', 'CURIOUSLY', 'MATED', 'AND', 'INTERMINGLED', 'AS', 'IF', 'THE', 'ONE', 'WERE', 'THE', 'BREATH', 'OF', 'THE', 'OTHER'] +237-134493-0004-2392: hyp=['THE', 'AIR', 'AND', 'THE', 'EARTH', 'ARE', 'CURIOUSLY', 'MATED', 'AND', 'INTERMINGLED', 'AS', 'IF', 'THE', 'ONE', 'WERE', 'THE', 'BREATH', 'OF', 'THE', 'OTHER'] +237-134493-0005-2393: ref=['HE', 'WAS', 'A', 'SPLENDID', 'FIGURE', 'OF', 'A', 'BOY', 'TALL', 'AND', 'STRAIGHT', 'AS', 'A', 'YOUNG', 'PINE', 'TREE', 'WITH', 'A', 'HANDSOME', 'HEAD', 'AND', 'STORMY', 'GRAY', 'EYES', 'DEEPLY', 'SET', 'UNDER', 'A', 'SERIOUS', 'BROW'] +237-134493-0005-2393: hyp=['HE', 'WAS', 'A', 'SPLENDID', 'FIGURE', 'OF', 'A', 'BOY', 'TALL', 'AND', 'STRAIGHT', 'AS', 'A', 'YOUNG', 'PINE', 'TREE', 'WITH', 'A', 'HANDSOME', 'HEAD', 'AND', 'STORMY', 'GRAY', 'EYES', 'DEEPLY', 'SET', 'UNDER', 'A', 'SERIOUS', 'BROW'] +237-134493-0006-2394: ref=["THAT'S", 'NOT', 'MUCH', 'OF', 'A', 'JOB', 'FOR', 'AN', 'ATHLETE', 'HERE', "I'VE", 'BEEN', 'TO', 'TOWN', 'AND', 'BACK'] +237-134493-0006-2394: hyp=["THAT'S", 'NOT', 'MUCH', 'OF', 'A', 'JOB', 'FOR', 'AN', 'ATHLETE', 'HERE', "I'VE", 'BEEN', 'TOWN', 'AND', 'BACK'] +237-134493-0007-2395: ref=['ALEXANDRA', 'LETS', 'YOU', 'SLEEP', 'LATE'] +237-134493-0007-2395: hyp=['ALEXANDER', "THAT'S", 'YOU', 'SLEEP', 'LATE'] +237-134493-0008-2396: ref=['SHE', 'GATHERED', 'UP', 'HER', 'REINS'] +237-134493-0008-2396: hyp=['SHE', 'GATHERED', 'UP', 'HER', 'REINS'] +237-134493-0009-2397: ref=['PLEASE', 'WAIT', 'FOR', 'ME', 'MARIE', 'EMIL', 'COAXED'] +237-134493-0009-2397: hyp=['PLEASE', 'WAIT', 'FOR', 'ME', 'MARIE', 'AMYL', 'COAXED'] +237-134493-0010-2398: ref=['I', 'NEVER', 'SEE', "LOU'S", 'SCYTHE', 'OVER', 'HERE'] +237-134493-0010-2398: hyp=['I', 'NEVER', 'SEE', 'LOOSE', 'SIGH', 'OVER', 'HERE'] +237-134493-0011-2399: ref=['HOW', 'BROWN', "YOU'VE", 'GOT', 'SINCE', 'YOU', 'CAME', 'HOME', 'I', 'WISH', 'I', 'HAD', 'AN', 'ATHLETE', 'TO', 'MOW', 'MY', 'ORCHARD'] +237-134493-0011-2399: hyp=['HOW', 'BROWN', "YOU'VE", 'GOT', 'SINCE', 'YOU', 'CAME', 'HOME', 'I', 'WISH', 'I', 'HAD', 'AN', 'ATHLETE', 'TO', 'MOW', 'MY', 'ORCHARD'] +237-134493-0012-2400: ref=['I', 'GET', 'WET', 'TO', 'MY', 'KNEES', 'WHEN', 'I', 'GO', 'DOWN', 'TO', 'PICK', 'CHERRIES'] +237-134493-0012-2400: hyp=['I', 'GET', 'WET', 'TO', 'MY', 'KNEES', 'WHEN', 'I', 'GO', 'DOWN', 'TO', 'PICTURES'] +237-134493-0013-2401: ref=['INDEED', 'HE', 'HAD', 'LOOKED', 'AWAY', 'WITH', 'THE', 'PURPOSE', 'OF', 'NOT', 'SEEING', 'IT'] +237-134493-0013-2401: hyp=['INDEED', 'HE', 'HAD', 'LOOKED', 'AWAY', 'WITH', 'A', 'PURPOSE', 'OF', 'NOT', 'SEEING', 'IT'] +237-134493-0014-2402: ref=['THEY', 'THINK', "YOU'RE", 'PROUD', 'BECAUSE', "YOU'VE", 'BEEN', 'AWAY', 'TO', 'SCHOOL', 'OR', 'SOMETHING'] +237-134493-0014-2402: hyp=['THEY', 'THINK', 'YOU', 'ARE', 'PROUD', 'BECAUSE', "YOU'VE", 'BEEN', 'AWAY', 'TO', 'SCHOOL', 'OR', 'SOMETHING'] +237-134493-0015-2403: ref=['THERE', 'WAS', 'SOMETHING', 'INDIVIDUAL', 'ABOUT', 'THE', 'GREAT', 'FARM', 'A', 'MOST', 'UNUSUAL', 'TRIMNESS', 'AND', 'CARE', 'FOR', 'DETAIL'] +237-134493-0015-2403: hyp=['THERE', 'WAS', 'SOMETHING', 'INDIVIDUAL', 'ABOUT', 'THE', 'GREAT', 'FARM', 'A', 'MOST', 'UNUSUAL', 'TRIMNESS', 'AND', 'CARE', 'FOR', 'DETAIL'] +237-134493-0016-2404: ref=['ON', 'EITHER', 'SIDE', 'OF', 'THE', 'ROAD', 'FOR', 'A', 'MILE', 'BEFORE', 'YOU', 'REACHED', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'STOOD', 'TALL', 'OSAGE', 'ORANGE', 'HEDGES', 'THEIR', 'GLOSSY', 'GREEN', 'MARKING', 'OFF', 'THE', 'YELLOW', 'FIELDS'] +237-134493-0016-2404: hyp=['ON', 'EITHER', 'SIDE', 'OF', 'THE', 'ROAD', 'FOR', 'A', 'MILE', 'BEFORE', 'YOU', 'REACHED', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'STOOD', 'TALL', 'O', 'SAGE', 'ORANGE', 'HEDGES', 'THEIR', 'GLOSSY', 'GREEN', 'MARKING', 'OFF', 'THE', 'YELLOW', 'FIELDS'] +237-134493-0017-2405: ref=['ANY', 'ONE', 'THEREABOUTS', 'WOULD', 'HAVE', 'TOLD', 'YOU', 'THAT', 'THIS', 'WAS', 'ONE', 'OF', 'THE', 'RICHEST', 'FARMS', 'ON', 'THE', 'DIVIDE', 'AND', 'THAT', 'THE', 'FARMER', 'WAS', 'A', 'WOMAN', 'ALEXANDRA', 'BERGSON'] +237-134493-0017-2405: hyp=['ANY', 'ONE', 'THEREABOUTS', 'WOULD', 'HAVE', 'TOLD', 'YOU', 'THAT', 'THIS', 'WAS', 'ONE', 'OF', 'THE', 'RICHEST', 'FARMS', 'ON', 'THE', 'DIVIDE', 'AND', 'THAT', 'THE', 'FARMER', 'WAS', 'A', 'WOMAN', 'ALEXANDRA', 'BERGSON'] +237-134493-0018-2406: ref=['THERE', 'IS', 'EVEN', 'A', 'WHITE', 'ROW', 'OF', 'BEEHIVES', 'IN', 'THE', 'ORCHARD', 'UNDER', 'THE', 'WALNUT', 'TREES'] +237-134493-0018-2406: hyp=['THERE', 'IS', 'EVEN', 'A', 'WHITE', 'ROW', 'OF', 'BEEHIVES', 'IN', 'THE', 'ORCHARD', 'UNDER', 'THE', 'WALNUT', 'TREES'] +237-134500-0000-2345: ref=['FRANK', 'READ', 'ENGLISH', 'SLOWLY', 'AND', 'THE', 'MORE', 'HE', 'READ', 'ABOUT', 'THIS', 'DIVORCE', 'CASE', 'THE', 'ANGRIER', 'HE', 'GREW'] +237-134500-0000-2345: hyp=['FRANK', 'READ', 'ENGLISH', 'SLOWLY', 'AND', 'THE', 'MORE', 'HE', 'READ', 'ABOUT', 'THIS', 'DIVORCE', 'CASE', 'THE', 'ANGRIER', 'HE', 'GREW'] +237-134500-0001-2346: ref=['MARIE', 'SIGHED'] +237-134500-0001-2346: hyp=['MARIE', 'SIGHED'] +237-134500-0002-2347: ref=['A', 'BRISK', 'WIND', 'HAD', 'COME', 'UP', 'AND', 'WAS', 'DRIVING', 'PUFFY', 'WHITE', 'CLOUDS', 'ACROSS', 'THE', 'SKY'] +237-134500-0002-2347: hyp=['A', 'BRAY', 'SQUINT', 'HAD', 'COME', 'UP', 'AND', 'WAS', 'DRIVING', 'PUFFY', 'WHITE', 'CLOUDS', 'ACROSS', 'THE', 'SKY'] +237-134500-0003-2348: ref=['THE', 'ORCHARD', 'WAS', 'SPARKLING', 'AND', 'RIPPLING', 'IN', 'THE', 'SUN'] +237-134500-0003-2348: hyp=['THE', 'ORCHARD', 'WAS', 'SPARKLING', 'AND', 'RIPPLING', 'IN', 'THE', 'SUN'] +237-134500-0004-2349: ref=['THAT', 'INVITATION', 'DECIDED', 'HER'] +237-134500-0004-2349: hyp=['THAT', 'INVITATION', 'DECIDED', 'HER'] +237-134500-0005-2350: ref=['OH', 'BUT', "I'M", 'GLAD', 'TO', 'GET', 'THIS', 'PLACE', 'MOWED'] +237-134500-0005-2350: hyp=['OH', 'BUT', "I'M", 'GLAD', 'TO', 'GET', 'THIS', 'PLACE', 'MOWED'] +237-134500-0006-2351: ref=['JUST', 'SMELL', 'THE', 'WILD', 'ROSES', 'THEY', 'ARE', 'ALWAYS', 'SO', 'SPICY', 'AFTER', 'A', 'RAIN'] +237-134500-0006-2351: hyp=['JUST', 'SMELL', 'THE', 'WILD', 'ROSES', 'THEY', 'ARE', 'ALWAYS', 'SO', 'SPICY', 'AFTER', 'A', 'RAIN'] +237-134500-0007-2352: ref=['WE', 'NEVER', 'HAD', 'SO', 'MANY', 'OF', 'THEM', 'IN', 'HERE', 'BEFORE'] +237-134500-0007-2352: hyp=['WE', 'NEVER', 'HAD', 'SO', 'MANY', 'OF', 'THEM', 'IN', 'HERE', 'BEFORE'] +237-134500-0008-2353: ref=['I', 'SUPPOSE', "IT'S", 'THE', 'WET', 'SEASON', 'WILL', 'YOU', 'HAVE', 'TO', 'CUT', 'THEM', 'TOO'] +237-134500-0008-2353: hyp=['I', 'SUPPOSE', "IT'S", 'THE', 'WET', 'SEASON', 'WILL', 'YOU', 'HAVE', 'TO', 'CUT', 'THEM', 'TOO'] +237-134500-0009-2354: ref=['I', 'SUPPOSE', "THAT'S", 'THE', 'WET', 'SEASON', 'TOO', 'THEN'] +237-134500-0009-2354: hyp=['I', 'SUPPOSE', "THAT'S", 'THE', 'WHITE', 'SEASON', 'TOO', 'THEN'] +237-134500-0010-2355: ref=["IT'S", 'EXCITING', 'TO', 'SEE', 'EVERYTHING', 'GROWING', 'SO', 'FAST', 'AND', 'TO', 'GET', 'THE', 'GRASS', 'CUT'] +237-134500-0010-2355: hyp=["IT'S", 'EXCITING', 'TO', 'SEE', 'EVERYTHING', 'GROWING', 'SO', 'FAST', 'AND', 'TO', 'GET', 'THE', 'GRASS', 'CUT'] +237-134500-0011-2356: ref=["AREN'T", 'YOU', 'SPLASHED', 'LOOK', 'AT', 'THE', 'SPIDER', 'WEBS', 'ALL', 'OVER', 'THE', 'GRASS'] +237-134500-0011-2356: hyp=["AREN'T", 'YOU', 'SPLASHED', 'LOOK', 'AT', 'THE', 'SPIDER', 'WHIPS', 'ALL', 'OVER', 'THE', 'GRASS'] +237-134500-0012-2357: ref=['IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HEARD', 'THE', 'CHERRIES', 'DROPPING', 'SMARTLY', 'INTO', 'THE', 'PAIL', 'AND', 'HE', 'BEGAN', 'TO', 'SWING', 'HIS', 'SCYTHE', 'WITH', 'THAT', 'LONG', 'EVEN', 'STROKE', 'THAT', 'FEW', 'AMERICAN', 'BOYS', 'EVER', 'LEARN'] +237-134500-0012-2357: hyp=['IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HEARD', 'THE', 'CHERRIES', 'DROPPING', 'SMARTLY', 'INTO', 'THE', 'PAIL', 'AND', 'HE', 'BEGAN', 'TO', 'SWING', 'HIS', 'SCYTHE', 'WITH', 'THAT', 'LONG', 'EVEN', 'STROKE', 'THAT', 'FEW', 'AMERICAN', 'BOYS', 'EVER', 'LEARN'] +237-134500-0013-2358: ref=['MARIE', 'PICKED', 'CHERRIES', 'AND', 'SANG', 'SOFTLY', 'TO', 'HERSELF', 'STRIPPING', 'ONE', 'GLITTERING', 'BRANCH', 'AFTER', 'ANOTHER', 'SHIVERING', 'WHEN', 'SHE', 'CAUGHT', 'A', 'SHOWER', 'OF', 'RAINDROPS', 'ON', 'HER', 'NECK', 'AND', 'HAIR'] +237-134500-0013-2358: hyp=['MARIE', 'PICKED', 'THE', 'CHERRIES', 'AND', 'SANG', 'SOFTLY', 'TO', 'HERSELF', 'STRIPPING', 'ONE', 'GLITTERING', 'RANCH', 'AFTER', 'ANOTHER', 'SHIVERING', 'WHEN', 'SHE', 'CAUGHT', 'A', 'SHOWER', 'OF', 'RAINDROPS', 'ON', 'HER', 'NECK', 'AND', 'HAIR'] +237-134500-0014-2359: ref=['AND', 'EMIL', 'MOWED', 'HIS', 'WAY', 'SLOWLY', 'DOWN', 'TOWARD', 'THE', 'CHERRY', 'TREES'] +237-134500-0014-2359: hyp=['AND', 'AMIEL', 'MOWED', 'HIS', 'WAY', 'SLOWLY', 'DOWN', 'TOWARD', 'THE', 'CHERRY', 'TREES'] +237-134500-0015-2360: ref=['THAT', 'SUMMER', 'THE', 'RAINS', 'HAD', 'BEEN', 'SO', 'MANY', 'AND', 'OPPORTUNE', 'THAT', 'IT', 'WAS', 'ALMOST', 'MORE', 'THAN', 'SHABATA', 'AND', 'HIS', 'MAN', 'COULD', 'DO', 'TO', 'KEEP', 'UP', 'WITH', 'THE', 'CORN', 'THE', 'ORCHARD', 'WAS', 'A', 'NEGLECTED', 'WILDERNESS'] +237-134500-0015-2360: hyp=['THAT', 'SUMMER', 'THE', 'RAINS', 'HAD', 'BEEN', 'SO', 'MANY', 'AND', 'OPPORTUNE', 'THAT', 'IT', 'WAS', 'ALMOST', 'MORE', 'THAN', 'CHEBATA', 'AND', 'HIS', 'MAN', 'COULD', 'DO', 'TO', 'KEEP', 'UP', 'WITH', 'THE', 'CORN', 'THE', 'ORCHARD', 'WAS', 'A', 'NEGLECTED', 'WILDERNESS'] +237-134500-0016-2361: ref=['I', "DON'T", 'KNOW', 'ALL', 'OF', 'THEM', 'BUT', 'I', 'KNOW', 'LINDENS', 'ARE'] +237-134500-0016-2361: hyp=['I', "DON'T", 'KNOW', 'ALL', 'OF', 'THEM', 'BUT', 'I', 'KNOW', 'LINDENS', 'ARE'] +237-134500-0017-2362: ref=['IF', 'I', 'FEEL', 'THAT', 'WAY', 'I', 'FEEL', 'THAT', 'WAY'] +237-134500-0017-2362: hyp=['IF', 'I', 'FEEL', 'THAT', 'WAY', 'I', 'FEEL', 'THAT', 'WAY'] +237-134500-0018-2363: ref=['HE', 'REACHED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'AND', 'BEGAN', 'TO', 'PICK', 'THE', 'SWEET', 'INSIPID', 'FRUIT', 'LONG', 'IVORY', 'COLORED', 'BERRIES', 'TIPPED', 'WITH', 'FAINT', 'PINK', 'LIKE', 'WHITE', 'CORAL', 'THAT', 'FALL', 'TO', 'THE', 'GROUND', 'UNHEEDED', 'ALL', 'SUMMER', 'THROUGH'] +237-134500-0018-2363: hyp=['HE', 'REACHED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'AND', 'BEGAN', 'TO', 'PICK', 'THE', 'SWEET', 'INSIPID', 'FRUIT', 'LONG', 'IVORY', 'COLORED', 'BERRIES', 'TIPPED', 'WITH', 'FAINT', 'PINK', 'LIKE', 'WHITE', 'CORAL', 'THAT', 'FALL', 'TO', 'THE', 'GROUND', 'UNHEEDED', 'ALL', 'SUMMER', 'THROUGH'] +237-134500-0019-2364: ref=['HE', 'DROPPED', 'A', 'HANDFUL', 'INTO', 'HER', 'LAP'] +237-134500-0019-2364: hyp=['HE', 'DROPPED', 'A', 'HANDFUL', 'INTO', 'HER', 'LAP'] +237-134500-0020-2365: ref=['YES', "DON'T", 'YOU'] +237-134500-0020-2365: hyp=['YES', "DON'T", 'YOU'] +237-134500-0021-2366: ref=['OH', 'EVER', 'SO', 'MUCH', 'ONLY', 'HE', 'SEEMS', 'KIND', 'OF', 'STAID', 'AND', 'SCHOOL', 'TEACHERY'] +237-134500-0021-2366: hyp=['OH', 'EVER', 'SO', 'MUCH', 'ONLY', 'HE', 'SEEMS', 'KIND', 'OF', 'STAID', 'AND', 'SCHOOL', 'TEACHERY'] +237-134500-0022-2367: ref=['WHEN', 'SHE', 'USED', 'TO', 'TELL', 'ME', 'ABOUT', 'HIM', 'I', 'ALWAYS', 'WONDERED', 'WHETHER', 'SHE', "WASN'T", 'A', 'LITTLE', 'IN', 'LOVE', 'WITH', 'HIM'] +237-134500-0022-2367: hyp=['WHEN', 'SHE', 'USED', 'TO', 'TELL', 'ME', 'ABOUT', 'HIM', 'I', 'ALWAYS', 'WONDERED', 'WHETHER', 'SHE', "WASN'T", 'A', 'LITTLE', 'IN', 'LOVE', 'WITH', 'HIM'] +237-134500-0023-2368: ref=['IT', 'WOULD', 'SERVE', 'YOU', 'ALL', 'RIGHT', 'IF', 'SHE', 'WALKED', 'OFF', 'WITH', 'CARL'] +237-134500-0023-2368: hyp=['IT', 'WOULD', 'SERVE', 'YOU', 'ALL', 'RIGHT', 'IF', 'SHE', 'WALKED', 'OFF', 'WITH', 'KARL'] +237-134500-0024-2369: ref=['I', 'LIKE', 'TO', 'TALK', 'TO', 'CARL', 'ABOUT', 'NEW', 'YORK', 'AND', 'WHAT', 'A', 'FELLOW', 'CAN', 'DO', 'THERE'] +237-134500-0024-2369: hyp=['I', 'LIKE', 'TO', 'TALK', 'TO', 'KARL', 'ABOUT', 'NEW', 'YORK', 'AND', 'WHAT', 'A', 'FELLOW', 'CAN', 'DO', 'THERE'] +237-134500-0025-2370: ref=['OH', 'EMIL'] +237-134500-0025-2370: hyp=['OH', 'AMIEL'] +237-134500-0026-2371: ref=['SURELY', 'YOU', 'ARE', 'NOT', 'THINKING', 'OF', 'GOING', 'OFF', 'THERE'] +237-134500-0026-2371: hyp=['SURELY', 'YOU', 'ARE', 'NOT', 'THINKING', 'OF', 'GOING', 'OFF', 'THERE'] +237-134500-0027-2372: ref=["MARIE'S", 'FACE', 'FELL', 'UNDER', 'HIS', 'BROODING', 'GAZE'] +237-134500-0027-2372: hyp=["MARIE'S", 'FACE', 'FELL', 'UNDER', 'HIS', 'BROODING', 'GAZE'] +237-134500-0028-2373: ref=["I'M", 'SURE', 'ALEXANDRA', 'HOPES', 'YOU', 'WILL', 'STAY', 'ON', 'HERE', 'SHE', 'MURMURED'] +237-134500-0028-2373: hyp=['I', 'AM', 'SURE', 'ALEXANDER', 'HELPS', 'YOU', 'WILL', 'STAY', 'ON', 'HERE', 'SHE', 'MURMURED'] +237-134500-0029-2374: ref=['I', "DON'T", 'WANT', 'TO', 'STAND', 'AROUND', 'AND', 'LOOK', 'ON'] +237-134500-0029-2374: hyp=['I', "DON'T", 'WANT', 'TO', 'STAND', 'AROUND', 'AND', 'LOOK', 'ON'] +237-134500-0030-2375: ref=['I', 'WANT', 'TO', 'BE', 'DOING', 'SOMETHING', 'ON', 'MY', 'OWN', 'ACCOUNT'] +237-134500-0030-2375: hyp=['I', 'WANT', 'TO', 'BE', 'DOING', 'SOMETHING', 'ON', 'MY', 'OWN', 'ACCOUNT'] +237-134500-0031-2376: ref=['SOMETIMES', 'I', "DON'T", 'WANT', 'TO', 'DO', 'ANYTHING', 'AT', 'ALL', 'AND', 'SOMETIMES', 'I', 'WANT', 'TO', 'PULL', 'THE', 'FOUR', 'CORNERS', 'OF', 'THE', 'DIVIDE', 'TOGETHER', 'HE', 'THREW', 'OUT', 'HIS', 'ARM', 'AND', 'BROUGHT', 'IT', 'BACK', 'WITH', 'A', 'JERK', 'SO', 'LIKE', 'A', 'TABLE', 'CLOTH'] +237-134500-0031-2376: hyp=['SOMETIMES', 'I', "DON'T", 'WANT', 'TO', 'DO', 'ANYTHING', 'AT', 'ALL', 'AND', 'SOMETIMES', 'I', 'WANT', 'TO', 'PULL', 'THE', 'FOUR', 'CORNERS', 'OF', 'THE', 'DIVIDE', 'TOGETHER', 'HE', 'THREW', 'OUT', 'HIS', 'ARM', 'AND', 'BROUGHT', 'IT', 'BACK', 'WITH', 'A', 'JERK', 'SO', 'LIKE', 'A', 'TABLECLOTH'] +237-134500-0032-2377: ref=['I', 'GET', 'TIRED', 'OF', 'SEEING', 'MEN', 'AND', 'HORSES', 'GOING', 'UP', 'AND', 'DOWN', 'UP', 'AND', 'DOWN'] +237-134500-0032-2377: hyp=['I', 'GET', 'TIRED', 'OF', 'SEEING', 'MAN', 'AND', 'HORSES', 'GOING', 'UP', 'AND', 'DOWN', 'UP', 'AND', 'DOWN'] +237-134500-0033-2378: ref=['I', 'WISH', 'YOU', "WEREN'T", 'SO', 'RESTLESS', 'AND', "DIDN'T", 'GET', 'SO', 'WORKED', 'UP', 'OVER', 'THINGS', 'SHE', 'SAID', 'SADLY'] +237-134500-0033-2378: hyp=['I', 'WISH', 'YOU', "WEREN'T", 'SO', 'RESTLESS', 'AND', "DIDN'T", 'GET', 'SO', 'WORKED', 'UP', 'OVER', 'THINGS', 'SHE', 'SAID', 'SADLY'] +237-134500-0034-2379: ref=['THANK', 'YOU', 'HE', 'RETURNED', 'SHORTLY'] +237-134500-0034-2379: hyp=['THANK', 'YOU', 'HE', 'RETURNED', 'SHORTLY'] +237-134500-0035-2380: ref=['AND', 'YOU', 'NEVER', 'USED', 'TO', 'BE', 'CROSS', 'TO', 'ME'] +237-134500-0035-2380: hyp=['AND', 'WHO', 'NEVER', 'USED', 'TO', 'BE', 'CROSS', 'TO', 'ME'] +237-134500-0036-2381: ref=['I', "CAN'T", 'PLAY', 'WITH', 'YOU', 'LIKE', 'A', 'LITTLE', 'BOY', 'ANY', 'MORE', 'HE', 'SAID', 'SLOWLY', "THAT'S", 'WHAT', 'YOU', 'MISS', 'MARIE'] +237-134500-0036-2381: hyp=['I', "CAN'T", 'PLAY', 'WITH', 'YOU', 'LIKE', 'A', 'LITTLE', 'BOY', 'ANY', 'MORE', 'HE', 'SAID', 'SLOWLY', "THAT'S", 'WHAT', 'YOU', 'MISS', 'MARIE'] +237-134500-0037-2382: ref=['BUT', 'EMIL', 'IF', 'I', 'UNDERSTAND', 'THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER', 'WE', 'CAN', 'NEVER', 'DO', 'NICE', 'THINGS', 'TOGETHER', 'ANY', 'MORE'] +237-134500-0037-2382: hyp=['BUT', 'AMIEL', 'IF', 'I', 'UNDERSTAND', 'IN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER', 'WE', 'CAN', 'NEVER', 'DO', 'NICE', 'THINGS', 'TOGETHER', 'ANY', 'MORE'] +237-134500-0038-2383: ref=['AND', 'ANYHOW', "THERE'S", 'NOTHING', 'TO', 'UNDERSTAND'] +237-134500-0038-2383: hyp=['AND', 'ANYHOW', "THERE'S", 'NOTHING', 'TOO', 'UNDERSTAND'] +237-134500-0039-2384: ref=['THAT', "WON'T", 'LAST', 'IT', 'WILL', 'GO', 'AWAY', 'AND', 'THINGS', 'WILL', 'BE', 'JUST', 'AS', 'THEY', 'USED', 'TO'] +237-134500-0039-2384: hyp=['THAT', "WON'T", 'LAST', 'IT', 'WILL', 'GO', 'AWAY', 'AND', 'THINGS', 'WILL', 'BE', 'JUST', 'AS', 'THEY', 'USED', 'TO'] +237-134500-0040-2385: ref=['I', 'PRAY', 'FOR', 'YOU', 'BUT', "THAT'S", 'NOT', 'THE', 'SAME', 'AS', 'IF', 'YOU', 'PRAYED', 'YOURSELF'] +237-134500-0040-2385: hyp=['I', 'PRAY', 'FOR', 'YOU', 'BUT', "THAT'S", 'NOT', 'THE', 'SAME', 'AS', 'IF', 'YOU', 'PRAYED', 'YOURSELF'] +237-134500-0041-2386: ref=['I', "CAN'T", 'PRAY', 'TO', 'HAVE', 'THE', 'THINGS', 'I', 'WANT', 'HE', 'SAID', 'SLOWLY', 'AND', 'I', "WON'T", 'PRAY', 'NOT', 'TO', 'HAVE', 'THEM', 'NOT', 'IF', "I'M", 'DAMNED', 'FOR', 'IT'] +237-134500-0041-2386: hyp=['I', "CAN'T", 'PRAY', 'TO', 'HAVE', 'THE', 'THINGS', 'I', 'WANT', 'HE', 'SAID', 'SLOWLY', 'AND', 'I', "WON'T", 'PRAY', 'NOT', 'TO', 'HAVE', 'THEM', 'NOT', 'IF', "I'M", 'DAMNED', 'FOR', 'IT'] +237-134500-0042-2387: ref=['THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER'] +237-134500-0042-2387: hyp=['THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER'] +260-123286-0000-200: ref=['SATURDAY', 'AUGUST', 'FIFTEENTH', 'THE', 'SEA', 'UNBROKEN', 'ALL', 'ROUND', 'NO', 'LAND', 'IN', 'SIGHT'] +260-123286-0000-200: hyp=['SATURDAY', 'AUGUST', 'FIFTEENTH', 'THE', 'SEA', 'UNBROKEN', 'ALL', 'ROUND', 'NO', 'LAND', 'IN', 'SIGHT'] +260-123286-0001-201: ref=['THE', 'HORIZON', 'SEEMS', 'EXTREMELY', 'DISTANT'] +260-123286-0001-201: hyp=['THE', 'HORIZON', 'SEEMS', 'EXTREMELY', 'DISTANT'] +260-123286-0002-202: ref=['ALL', 'MY', 'DANGER', 'AND', 'SUFFERINGS', 'WERE', 'NEEDED', 'TO', 'STRIKE', 'A', 'SPARK', 'OF', 'HUMAN', 'FEELING', 'OUT', 'OF', 'HIM', 'BUT', 'NOW', 'THAT', 'I', 'AM', 'WELL', 'HIS', 'NATURE', 'HAS', 'RESUMED', 'ITS', 'SWAY'] +260-123286-0002-202: hyp=['ALL', 'MY', 'DANGER', 'AND', 'SUFFERINGS', 'WERE', 'NEEDED', 'TO', 'STRIKE', 'A', 'SPARK', 'OF', 'HUMAN', 'FEELING', 'OUT', 'OF', 'HIM', 'BUT', 'NOW', 'THAT', 'I', 'AM', 'WELL', 'HIS', 'NATURE', 'HAS', 'RESUMED', 'ITS', 'SWAY'] +260-123286-0003-203: ref=['YOU', 'SEEM', 'ANXIOUS', 'MY', 'UNCLE', 'I', 'SAID', 'SEEING', 'HIM', 'CONTINUALLY', 'WITH', 'HIS', 'GLASS', 'TO', 'HIS', 'EYE', 'ANXIOUS'] +260-123286-0003-203: hyp=['YOU', 'SEEM', 'ANXIOUS', 'MY', 'UNCLE', 'I', 'SAID', 'SEEING', 'HIM', 'CONTINUALLY', 'WITH', 'HIS', 'GLASS', 'TO', 'HIS', 'EYE', 'ANXIOUS'] +260-123286-0004-204: ref=['ONE', 'MIGHT', 'BE', 'WITH', 'LESS', 'REASON', 'THAN', 'NOW'] +260-123286-0004-204: hyp=['ONE', 'MIGHT', 'BE', 'WITH', 'LESS', 'REASON', 'THAN', 'NOW'] +260-123286-0005-205: ref=['I', 'AM', 'NOT', 'COMPLAINING', 'THAT', 'THE', 'RATE', 'IS', 'SLOW', 'BUT', 'THAT', 'THE', 'SEA', 'IS', 'SO', 'WIDE'] +260-123286-0005-205: hyp=['I', 'AM', 'NOT', 'COMPLAINING', 'THAT', 'THE', 'RATE', 'IS', 'SLOW', 'BUT', 'THAT', 'THE', 'SEAT', 'IS', 'SO', 'WIDE'] +260-123286-0006-206: ref=['WE', 'ARE', 'LOSING', 'TIME', 'AND', 'THE', 'FACT', 'IS', 'I', 'HAVE', 'NOT', 'COME', 'ALL', 'THIS', 'WAY', 'TO', 'TAKE', 'A', 'LITTLE', 'SAIL', 'UPON', 'A', 'POND', 'ON', 'A', 'RAFT'] +260-123286-0006-206: hyp=['WE', 'ARE', 'LOSING', 'TIME', 'AND', 'THE', 'FACT', 'IS', 'I', 'HAVE', 'NOT', 'COME', 'ALL', 'THIS', 'WAY', 'TO', 'TAKE', 'A', 'LITTLE', 'SAIL', 'UPON', 'A', 'POND', 'ON', 'A', 'RAFT'] +260-123286-0007-207: ref=['HE', 'CALLED', 'THIS', 'SEA', 'A', 'POND', 'AND', 'OUR', 'LONG', 'VOYAGE', 'TAKING', 'A', 'LITTLE', 'SAIL'] +260-123286-0007-207: hyp=['HE', 'CALLED', 'THE', 'SEA', 'UPON', 'AND', 'OUR', 'LONG', 'VOYAGE', 'TAKING', 'A', 'LITTLE', 'SAIL'] +260-123286-0008-208: ref=['THEREFORE', "DON'T", 'TALK', 'TO', 'ME', 'ABOUT', 'VIEWS', 'AND', 'PROSPECTS'] +260-123286-0008-208: hyp=['THEREFORE', "DON'T", 'TALK', 'TO', 'ME', 'ABOUT', 'VIEWS', 'AND', 'PROSPECTS'] +260-123286-0009-209: ref=['I', 'TAKE', 'THIS', 'AS', 'MY', 'ANSWER', 'AND', 'I', 'LEAVE', 'THE', 'PROFESSOR', 'TO', 'BITE', 'HIS', 'LIPS', 'WITH', 'IMPATIENCE'] +260-123286-0009-209: hyp=['I', 'TAKE', 'THIS', 'AS', 'MY', 'ANSWER', 'AND', 'I', 'LEAVE', 'THE', 'PROFESSOR', 'TO', 'BITE', 'HIS', 'LIPS', 'WITH', 'IMPATIENCE'] +260-123286-0010-210: ref=['SUNDAY', 'AUGUST', 'SIXTEENTH'] +260-123286-0010-210: hyp=['SUNDAY', 'AUGUST', 'SIXTEENTH'] +260-123286-0011-211: ref=['NOTHING', 'NEW', 'WEATHER', 'UNCHANGED', 'THE', 'WIND', 'FRESHENS'] +260-123286-0011-211: hyp=['NOTHING', 'NEW', 'WHETHER', 'UNCHANGED', 'THE', 'WIND', 'FRESHENS'] +260-123286-0012-212: ref=['BUT', 'THERE', 'SEEMED', 'NO', 'REASON', 'TO', 'FEAR'] +260-123286-0012-212: hyp=['BUT', 'THERE', 'SEEMED', 'NO', 'REASON', 'OF', 'FEAR'] +260-123286-0013-213: ref=['THE', 'SHADOW', 'OF', 'THE', 'RAFT', 'WAS', 'CLEARLY', 'OUTLINED', 'UPON', 'THE', 'SURFACE', 'OF', 'THE', 'WAVES'] +260-123286-0013-213: hyp=['THE', 'SHADOW', 'OF', 'THE', 'RAFT', 'WAS', 'CLEARLY', 'OUTLINED', 'UPON', 'THE', 'SURFACE', 'OF', 'THE', 'WAVES'] +260-123286-0014-214: ref=['TRULY', 'THIS', 'SEA', 'IS', 'OF', 'INFINITE', 'WIDTH'] +260-123286-0014-214: hyp=['TRULY', 'THE', 'SEA', 'IS', 'OF', 'INFINITE', 'WIDTH'] +260-123286-0015-215: ref=['IT', 'MUST', 'BE', 'AS', 'WIDE', 'AS', 'THE', 'MEDITERRANEAN', 'OR', 'THE', 'ATLANTIC', 'AND', 'WHY', 'NOT'] +260-123286-0015-215: hyp=['IT', 'MUST', 'BE', 'AS', 'WIDE', 'AS', 'THE', 'MEDITERRANEAN', 'OR', 'THE', 'ATLANTIC', 'AND', 'WHY', 'NOT'] +260-123286-0016-216: ref=['THESE', 'THOUGHTS', 'AGITATED', 'ME', 'ALL', 'DAY', 'AND', 'MY', 'IMAGINATION', 'SCARCELY', 'CALMED', 'DOWN', 'AFTER', 'SEVERAL', 'HOURS', 'SLEEP'] +260-123286-0016-216: hyp=['THESE', 'THOUGHTS', 'AGITATED', 'ME', 'ALL', 'DAY', 'AND', 'MY', 'IMAGINATION', 'SCARCELY', 'CALM', 'DOWN', 'AFTER', 'SEVERAL', 'HOURS', 'SLEEVE'] +260-123286-0017-217: ref=['I', 'SHUDDER', 'AS', 'I', 'RECALL', 'THESE', 'MONSTERS', 'TO', 'MY', 'REMEMBRANCE'] +260-123286-0017-217: hyp=['I', 'SHUDDER', 'AS', 'I', 'RECALL', 'THESE', 'MONSTERS', 'TO', 'MY', 'REMEMBRANCE'] +260-123286-0018-218: ref=['I', 'SAW', 'AT', 'THE', 'HAMBURG', 'MUSEUM', 'THE', 'SKELETON', 'OF', 'ONE', 'OF', 'THESE', 'CREATURES', 'THIRTY', 'FEET', 'IN', 'LENGTH'] +260-123286-0018-218: hyp=['I', 'SAW', 'AT', 'THE', 'HAMBURG', 'MUSEUM', 'THE', 'SKELETON', 'OF', 'ONE', 'OF', 'THESE', 'CREATURES', 'THIRTY', 'FEET', 'IN', 'LENGTH'] +260-123286-0019-219: ref=['I', 'SUPPOSE', 'PROFESSOR', 'LIEDENBROCK', 'WAS', 'OF', 'MY', 'OPINION', 'TOO', 'AND', 'EVEN', 'SHARED', 'MY', 'FEARS', 'FOR', 'AFTER', 'HAVING', 'EXAMINED', 'THE', 'PICK', 'HIS', 'EYES', 'TRAVERSED', 'THE', 'OCEAN', 'FROM', 'SIDE', 'TO', 'SIDE'] +260-123286-0019-219: hyp=['I', 'SUPPOSE', 'PROFESSOR', 'LIEDENBROCK', 'WAS', 'OF', 'MY', 'OPINION', 'TOO', 'AND', 'EVEN', 'SHARED', 'MY', 'FEARS', 'FOR', 'AFTER', 'HAVING', 'EXAMINED', 'THE', 'PICK', 'HIS', 'EYES', 'TRAVERSED', 'THE', 'OCEAN', 'FROM', 'SIDE', 'TO', 'SIDE'] +260-123286-0020-220: ref=['TUESDAY', 'AUGUST', 'EIGHTEENTH'] +260-123286-0020-220: hyp=['TUESDAY', 'AUGUST', 'EIGHTEENTH'] +260-123286-0021-221: ref=['DURING', 'HIS', 'WATCH', 'I', 'SLEPT'] +260-123286-0021-221: hyp=['DURING', 'HIS', 'WATCH', 'I', 'SLEPT'] +260-123286-0022-222: ref=['TWO', 'HOURS', 'AFTERWARDS', 'A', 'TERRIBLE', 'SHOCK', 'AWOKE', 'ME'] +260-123286-0022-222: hyp=['TWO', 'HOURS', 'AFTERWARDS', 'A', 'TERRIBLE', 'SHOCK', 'AWOKE', 'ME'] +260-123286-0023-223: ref=['THE', 'RAFT', 'WAS', 'HEAVED', 'UP', 'ON', 'A', 'WATERY', 'MOUNTAIN', 'AND', 'PITCHED', 'DOWN', 'AGAIN', 'AT', 'A', 'DISTANCE', 'OF', 'TWENTY', 'FATHOMS'] +260-123286-0023-223: hyp=['THE', 'RAFT', 'WAS', 'HEAVED', 'UP', 'ON', 'A', 'WATERY', 'MOUNTAIN', 'AND', 'PITCHED', 'DOWN', 'AGAIN', 'AT', 'A', 'DISTANCE', 'OF', 'TWENTY', 'FATHOMS'] +260-123286-0024-224: ref=["THERE'S", 'A', 'WHALE', 'A', 'WHALE', 'CRIED', 'THE', 'PROFESSOR'] +260-123286-0024-224: hyp=["THERE'S", 'A', 'WAIL', 'A', 'WELL', 'CRIED', 'THE', 'PROFESSOR'] +260-123286-0025-225: ref=['FLIGHT', 'WAS', 'OUT', 'OF', 'THE', 'QUESTION', 'NOW', 'THE', 'REPTILES', 'ROSE', 'THEY', 'WHEELED', 'AROUND', 'OUR', 'LITTLE', 'RAFT', 'WITH', 'A', 'RAPIDITY', 'GREATER', 'THAN', 'THAT', 'OF', 'EXPRESS', 'TRAINS'] +260-123286-0025-225: hyp=['FIGHT', 'WAS', 'OUT', 'OF', 'THE', 'QUESTION', 'NOW', 'THE', 'REPTILES', 'ROSE', 'THEY', 'WHEELED', 'AROUND', 'OUR', 'LITTLE', 'RAFT', 'WITH', 'A', 'RAPIDITY', 'GREATER', 'THAN', 'THAT', 'OF', 'EXPRESS', 'TRAINS'] +260-123286-0026-226: ref=['TWO', 'MONSTERS', 'ONLY', 'WERE', 'CREATING', 'ALL', 'THIS', 'COMMOTION', 'AND', 'BEFORE', 'MY', 'EYES', 'ARE', 'TWO', 'REPTILES', 'OF', 'THE', 'PRIMITIVE', 'WORLD'] +260-123286-0026-226: hyp=['TWO', 'MASTERS', 'ONLY', 'WERE', 'CREATING', 'ALL', 'THIS', 'COMMOTION', 'AND', 'BEFORE', 'MY', 'EYES', 'ARE', 'TOO', 'REPTILES', 'OF', 'THE', 'PRIMITIVE', 'WORLD'] +260-123286-0027-227: ref=['I', 'CAN', 'DISTINGUISH', 'THE', 'EYE', 'OF', 'THE', 'ICHTHYOSAURUS', 'GLOWING', 'LIKE', 'A', 'RED', 'HOT', 'COAL', 'AND', 'AS', 'LARGE', 'AS', 'A', "MAN'S", 'HEAD'] +260-123286-0027-227: hyp=['I', 'CAN', 'DISTINGUISH', 'THE', 'EYE', 'OF', 'THE', 'ICT', 'THEASURUS', 'GLOWING', 'LIKE', 'A', 'RED', 'HOT', 'CO', 'AND', 'AS', 'LARGE', 'AS', 'A', "MAN'S", 'HEAD'] +260-123286-0028-228: ref=['ITS', 'JAW', 'IS', 'ENORMOUS', 'AND', 'ACCORDING', 'TO', 'NATURALISTS', 'IT', 'IS', 'ARMED', 'WITH', 'NO', 'LESS', 'THAN', 'ONE', 'HUNDRED', 'AND', 'EIGHTY', 'TWO', 'TEETH'] +260-123286-0028-228: hyp=['ITS', 'JAW', 'IS', 'ENORMOUS', 'AND', 'ACCORDING', 'TO', 'NATURALISTS', 'IT', 'IS', 'ARMED', 'WITH', 'NO', 'LESS', 'THAN', 'ONE', 'HUNDRED', 'AND', 'EIGHTY', 'TWO', 'TEETH'] +260-123286-0029-229: ref=['THOSE', 'HUGE', 'CREATURES', 'ATTACKED', 'EACH', 'OTHER', 'WITH', 'THE', 'GREATEST', 'ANIMOSITY'] +260-123286-0029-229: hyp=['THOSE', 'HUGE', 'CREATURES', 'ATTACKED', 'EACH', 'OTHER', 'WITH', 'THE', 'GREATEST', 'ANIMOSITY'] +260-123286-0030-230: ref=['SUDDENLY', 'THE', 'ICHTHYOSAURUS', 'AND', 'THE', 'PLESIOSAURUS', 'DISAPPEAR', 'BELOW', 'LEAVING', 'A', 'WHIRLPOOL', 'EDDYING', 'IN', 'THE', 'WATER'] +260-123286-0030-230: hyp=['SUDDENLY', 'THE', 'ICTOISORUS', 'AND', 'THE', 'PLECIUS', 'DISAPPEAR', 'BELOW', 'LEAVING', 'A', 'WAR', 'POOL', 'EDDYING', 'IN', 'THE', 'WATER'] +260-123286-0031-231: ref=['AS', 'FOR', 'THE', 'ICHTHYOSAURUS', 'HAS', 'HE', 'RETURNED', 'TO', 'HIS', 'SUBMARINE', 'CAVERN'] +260-123286-0031-231: hyp=['AS', 'FOR', 'THE', 'ITTHIASORIS', 'HAS', 'HE', 'RETURNED', 'WHOSE', 'SUBMARINE', 'CAVERN'] +260-123288-0000-232: ref=['THE', 'ROARINGS', 'BECOME', 'LOST', 'IN', 'THE', 'DISTANCE'] +260-123288-0000-232: hyp=['THE', 'ROARINGS', 'BECOME', 'LOST', 'IN', 'THE', 'DISTANCE'] +260-123288-0001-233: ref=['THE', 'WEATHER', 'IF', 'WE', 'MAY', 'USE', 'THAT', 'TERM', 'WILL', 'CHANGE', 'BEFORE', 'LONG'] +260-123288-0001-233: hyp=['THE', 'WEATHER', 'IF', 'WE', 'MAY', 'USE', 'THE', 'TERM', 'WILL', 'CHANGE', 'BEFORE', 'LAWN'] +260-123288-0002-234: ref=['THE', 'ATMOSPHERE', 'IS', 'CHARGED', 'WITH', 'VAPOURS', 'PERVADED', 'WITH', 'THE', 'ELECTRICITY', 'GENERATED', 'BY', 'THE', 'EVAPORATION', 'OF', 'SALINE', 'WATERS'] +260-123288-0002-234: hyp=['THE', 'ATMOSPHERE', 'IS', 'CHARGED', 'WITH', 'VAPORS', 'PERVADED', 'WITH', 'THE', 'ELECTRICITY', 'GENERATED', 'BY', 'THE', 'EVAPORATION', 'OF', 'SAILING', 'WATERS'] +260-123288-0003-235: ref=['THE', 'ELECTRIC', 'LIGHT', 'CAN', 'SCARCELY', 'PENETRATE', 'THROUGH', 'THE', 'DENSE', 'CURTAIN', 'WHICH', 'HAS', 'DROPPED', 'OVER', 'THE', 'THEATRE', 'ON', 'WHICH', 'THE', 'BATTLE', 'OF', 'THE', 'ELEMENTS', 'IS', 'ABOUT', 'TO', 'BE', 'WAGED'] +260-123288-0003-235: hyp=['THE', 'ELECTRIC', 'LIGHT', 'CAN', 'SCARCELY', 'PENETRATE', 'THROUGH', 'THE', 'DENSE', 'CURTAIN', 'WHICH', 'IS', 'DROPPED', 'OVER', 'THE', 'THEATRE', 'ON', 'WHICH', 'THE', 'BATTLE', 'OF', 'THE', 'ELEMENTS', 'IS', 'ABOUT', 'TO', 'BE', 'WAGED'] +260-123288-0004-236: ref=['THE', 'AIR', 'IS', 'HEAVY', 'THE', 'SEA', 'IS', 'CALM'] +260-123288-0004-236: hyp=['THE', 'AIR', 'IS', 'HEAVY', 'THE', 'SEA', 'IS', 'CALM'] +260-123288-0005-237: ref=['FROM', 'TIME', 'TO', 'TIME', 'A', 'FLEECY', 'TUFT', 'OF', 'MIST', 'WITH', 'YET', 'SOME', 'GLEAMING', 'LIGHT', 'LEFT', 'UPON', 'IT', 'DROPS', 'DOWN', 'UPON', 'THE', 'DENSE', 'FLOOR', 'OF', 'GREY', 'AND', 'LOSES', 'ITSELF', 'IN', 'THE', 'OPAQUE', 'AND', 'IMPENETRABLE', 'MASS'] +260-123288-0005-237: hyp=['FROM', 'TIME', 'TO', 'TIME', 'A', 'FLEECY', 'TUFT', 'OF', 'MISTS', 'WITH', 'YET', 'SOME', 'GLEAMING', 'LIGHT', 'LEFT', 'UPON', 'IT', 'DROPS', 'DOWN', 'UPON', 'THE', 'DENSE', 'FLOOR', 'OF', 'GREY', 'AND', 'LOSES', 'ITSELF', 'IN', 'THE', 'OPE', 'AND', 'IMPENETRABLE', 'MASS'] +260-123288-0006-238: ref=['THE', 'ATMOSPHERE', 'IS', 'EVIDENTLY', 'CHARGED', 'AND', 'SURCHARGED', 'WITH', 'ELECTRICITY'] +260-123288-0006-238: hyp=['THE', 'ATMOSPHERE', 'AS', 'EVIDENTLY', 'CHARGED', 'IN', 'SURCHARGED', 'WITH', 'ELECTRICITY'] +260-123288-0007-239: ref=['THE', 'WIND', 'NEVER', 'LULLS', 'BUT', 'TO', 'ACQUIRE', 'INCREASED', 'STRENGTH', 'THE', 'VAST', 'BANK', 'OF', 'HEAVY', 'CLOUDS', 'IS', 'A', 'HUGE', 'RESERVOIR', 'OF', 'FEARFUL', 'WINDY', 'GUSTS', 'AND', 'RUSHING', 'STORMS'] +260-123288-0007-239: hyp=['THE', 'WIND', 'NEVER', 'LULLS', 'BUT', 'TO', 'ACQUIRE', 'INCREASED', 'STRENGTH', 'THE', 'VAST', 'BANK', 'OF', 'HEAVY', 'CLOUDS', 'IS', 'A', 'HUGE', 'RESERVOIR', 'OF', 'FEARFUL', 'WINDY', 'GUSTS', 'AND', 'RUSHING', 'STORMS'] +260-123288-0008-240: ref=["THERE'S", 'A', 'HEAVY', 'STORM', 'COMING', 'ON', 'I', 'CRIED', 'POINTING', 'TOWARDS', 'THE', 'HORIZON'] +260-123288-0008-240: hyp=["THERE'S", 'A', 'HEAVY', 'STORM', 'COMING', 'ON', 'I', 'CRIED', 'POINTING', 'TOWARDS', 'THE', 'HORIZON'] +260-123288-0009-241: ref=['THOSE', 'CLOUDS', 'SEEM', 'AS', 'IF', 'THEY', 'WERE', 'GOING', 'TO', 'CRUSH', 'THE', 'SEA'] +260-123288-0009-241: hyp=['THOSE', 'CLOUDS', 'SEEM', 'AS', 'IF', 'THEY', 'WERE', 'GOING', 'TO', 'CRUSH', 'THE', 'SEA'] +260-123288-0010-242: ref=['ON', 'THE', 'MAST', 'ALREADY', 'I', 'SEE', 'THE', 'LIGHT', 'PLAY', 'OF', 'A', 'LAMBENT', 'SAINT', "ELMO'S", 'FIRE', 'THE', 'OUTSTRETCHED', 'SAIL', 'CATCHES', 'NOT', 'A', 'BREATH', 'OF', 'WIND', 'AND', 'HANGS', 'LIKE', 'A', 'SHEET', 'OF', 'LEAD'] +260-123288-0010-242: hyp=['ON', 'THE', 'MAST', 'ALREADY', 'I', 'SEE', 'THE', 'LIGHT', 'PLAY', 'OF', 'A', 'LAMENT', 'SAINT', "AIRABLE'S", 'FIRE', 'THE', 'OUTSTRETCHED', 'SILL', 'CATCHES', 'NOT', 'A', 'BREATH', 'OF', 'WIND', 'AND', 'HANGS', 'LIKE', 'A', 'SHEET', 'OF', 'LEAD'] +260-123288-0011-243: ref=['BUT', 'IF', 'WE', 'HAVE', 'NOW', 'CEASED', 'TO', 'ADVANCE', 'WHY', 'DO', 'WE', 'YET', 'LEAVE', 'THAT', 'SAIL', 'LOOSE', 'WHICH', 'AT', 'THE', 'FIRST', 'SHOCK', 'OF', 'THE', 'TEMPEST', 'MAY', 'CAPSIZE', 'US', 'IN', 'A', 'MOMENT'] +260-123288-0011-243: hyp=['BUT', 'IF', 'WE', 'HAVE', 'NOW', 'CEASED', 'TO', 'ADVANCE', 'WHY', 'DO', 'WE', 'YET', 'LEAVE', 'THAT', 'SALE', 'LOOSE', 'WHICH', 'AT', 'THE', 'FIRST', 'SHOCK', 'OF', 'A', 'TEMPEST', 'MAY', 'CAPSIZE', 'US', 'IN', 'A', 'MOMENT'] +260-123288-0012-244: ref=['THAT', 'WILL', 'BE', 'SAFEST', 'NO', 'NO', 'NEVER'] +260-123288-0012-244: hyp=['THAT', 'WILL', 'BE', 'THE', 'SAFEST', 'NO', 'NO', 'NEVER'] +260-123288-0013-245: ref=['THE', 'PILED', 'UP', 'VAPOURS', 'CONDENSE', 'INTO', 'WATER', 'AND', 'THE', 'AIR', 'PUT', 'INTO', 'VIOLENT', 'ACTION', 'TO', 'SUPPLY', 'THE', 'VACUUM', 'LEFT', 'BY', 'THE', 'CONDENSATION', 'OF', 'THE', 'MISTS', 'ROUSES', 'ITSELF', 'INTO', 'A', 'WHIRLWIND'] +260-123288-0013-245: hyp=['THE', 'PILED', 'UP', 'VAPORS', 'CONTENTS', 'INTO', 'WATER', 'AND', 'THE', 'AIR', 'PUT', 'INTO', 'VIOLENT', 'ACTION', 'TO', 'SUPPLY', 'THE', 'VACUUM', 'LEFT', 'BY', 'THE', 'CONDENSATION', 'OF', 'THE', 'MIST', 'ROUSES', 'ITSELF', 'INTO', 'A', 'WHIRLWIND'] +260-123288-0014-246: ref=['HANS', 'STIRS', 'NOT'] +260-123288-0014-246: hyp=['HANS', 'STIRS', 'NOT'] +260-123288-0015-247: ref=['FROM', 'THE', 'UNDER', 'SURFACE', 'OF', 'THE', 'CLOUDS', 'THERE', 'ARE', 'CONTINUAL', 'EMISSIONS', 'OF', 'LURID', 'LIGHT', 'ELECTRIC', 'MATTER', 'IS', 'IN', 'CONTINUAL', 'EVOLUTION', 'FROM', 'THEIR', 'COMPONENT', 'MOLECULES', 'THE', 'GASEOUS', 'ELEMENTS', 'OF', 'THE', 'AIR', 'NEED', 'TO', 'BE', 'SLAKED', 'WITH', 'MOISTURE', 'FOR', 'INNUMERABLE', 'COLUMNS', 'OF', 'WATER', 'RUSH', 'UPWARDS', 'INTO', 'THE', 'AIR', 'AND', 'FALL', 'BACK', 'AGAIN', 'IN', 'WHITE', 'FOAM'] +260-123288-0015-247: hyp=['FROM', 'THE', 'UNDER', 'SURFACE', 'OF', 'THE', 'CLOUDS', 'THERE', 'ARE', 'CONTINUAL', 'MISSIONS', 'OF', 'LURID', 'LIGHT', 'ELECTRIC', 'MATTER', 'IS', 'IN', 'CONTINUAL', 'EVOLUTION', 'FROM', 'THEIR', 'COMPONENT', 'MOLECULES', 'THE', 'GASEOUS', 'ELEMENTS', 'OF', 'THE', 'AIR', 'NEED', 'TO', 'BE', 'SLAKED', 'WITH', 'MOISTURE', 'FOR', 'INNUMERABLE', 'COLUMNS', 'OF', 'WATER', 'RUSH', 'UPWARDS', 'INTO', 'THE', 'AIR', 'AND', 'FALL', 'BACK', 'AGAIN', 'IN', 'WHITE', 'FOAM'] +260-123288-0016-248: ref=['I', 'REFER', 'TO', 'THE', 'THERMOMETER', 'IT', 'INDICATES', 'THE', 'FIGURE', 'IS', 'OBLITERATED'] +260-123288-0016-248: hyp=['I', 'REFER', 'TO', 'THE', 'THERMOMETER', 'IT', 'INDICATES', 'THE', 'FIGURE', 'IS', 'OBLITERATED'] +260-123288-0017-249: ref=['IS', 'THE', 'ATMOSPHERIC', 'CONDITION', 'HAVING', 'ONCE', 'REACHED', 'THIS', 'DENSITY', 'TO', 'BECOME', 'FINAL'] +260-123288-0017-249: hyp=['IS', 'THE', 'ATMOSPHERIC', 'CONDITION', 'HAVING', 'ONCE', 'REACHED', 'OSTENSITY', 'TO', 'BECOME', 'FINAL'] +260-123288-0018-250: ref=['THE', 'RAFT', 'BEARS', 'ON', 'STILL', 'TO', 'THE', 'SOUTH', 'EAST'] +260-123288-0018-250: hyp=['THE', 'RAFT', 'BEARS', 'ON', 'STILL', 'TO', 'THE', 'SOUTH', 'EAST'] +260-123288-0019-251: ref=['AT', 'NOON', 'THE', 'VIOLENCE', 'OF', 'THE', 'STORM', 'REDOUBLES'] +260-123288-0019-251: hyp=['AT', 'NOON', 'THE', 'VIOLENCE', 'OF', 'THE', 'STORM', 'REDOUBLES'] +260-123288-0020-252: ref=['EACH', 'OF', 'US', 'IS', 'LASHED', 'TO', 'SOME', 'PART', 'OF', 'THE', 'RAFT'] +260-123288-0020-252: hyp=['EACH', 'OF', 'US', 'IS', 'LASHED', 'IN', 'SOME', 'PART', 'OF', 'THE', 'RAFT'] +260-123288-0021-253: ref=['THE', 'WAVES', 'RISE', 'ABOVE', 'OUR', 'HEADS'] +260-123288-0021-253: hyp=['THE', 'WAVES', 'RISE', 'ABOVE', 'OUR', 'HEADS'] +260-123288-0022-254: ref=['THEY', 'SEEM', 'TO', 'BE', 'WE', 'ARE', 'LOST', 'BUT', 'I', 'AM', 'NOT', 'SURE'] +260-123288-0022-254: hyp=['THEY', 'SEEMED', 'TO', 'BE', 'WE', 'ARE', 'LOST', 'BUT', 'I', 'AM', 'NOT', 'SURE'] +260-123288-0023-255: ref=['HE', 'NODS', 'HIS', 'CONSENT'] +260-123288-0023-255: hyp=['HE', 'GNAWEDS', 'HIS', 'CONSENT'] +260-123288-0024-256: ref=['THE', 'FIREBALL', 'HALF', 'OF', 'IT', 'WHITE', 'HALF', 'AZURE', 'BLUE', 'AND', 'THE', 'SIZE', 'OF', 'A', 'TEN', 'INCH', 'SHELL', 'MOVED', 'SLOWLY', 'ABOUT', 'THE', 'RAFT', 'BUT', 'REVOLVING', 'ON', 'ITS', 'OWN', 'AXIS', 'WITH', 'ASTONISHING', 'VELOCITY', 'AS', 'IF', 'WHIPPED', 'ROUND', 'BY', 'THE', 'FORCE', 'OF', 'THE', 'WHIRLWIND'] +260-123288-0024-256: hyp=['THE', 'FIRE', 'BALL', 'HALF', 'OF', 'IT', 'WHITE', 'HALF', 'AZURE', 'BLUE', 'AND', 'THE', 'SIZE', 'OF', 'A', 'TEN', 'INCH', 'CHILL', 'MOVED', 'SLOWLY', 'ABOUT', 'THE', 'RAFT', 'BUT', 'REVOLVING', 'ON', 'ITS', 'OWN', 'AXIS', 'WITH', 'ASTONISHING', 'VELOCITY', 'AS', 'IF', 'WHIP', 'ROUND', 'BY', 'THE', 'FORCE', 'OF', 'THE', 'WHIRLWIND'] +260-123288-0025-257: ref=['HERE', 'IT', 'COMES', 'THERE', 'IT', 'GLIDES', 'NOW', 'IT', 'IS', 'UP', 'THE', 'RAGGED', 'STUMP', 'OF', 'THE', 'MAST', 'THENCE', 'IT', 'LIGHTLY', 'LEAPS', 'ON', 'THE', 'PROVISION', 'BAG', 'DESCENDS', 'WITH', 'A', 'LIGHT', 'BOUND', 'AND', 'JUST', 'SKIMS', 'THE', 'POWDER', 'MAGAZINE', 'HORRIBLE'] +260-123288-0025-257: hyp=['HERE', 'IT', 'COMES', 'THERE', 'IT', 'GLIDES', 'NOW', 'IT', 'IS', 'UP', 'THE', 'RAGGED', 'STUMP', 'OF', 'THE', 'MAST', 'THENCE', 'IT', 'LIGHTLY', 'LEAPS', 'ON', 'THE', 'PROVISION', 'BAG', 'DESCENDS', 'WITH', 'A', 'LIGHT', 'BOUND', 'AND', 'JUST', 'SKIMS', 'THE', 'POWDER', 'MAGAZINE', 'HORRIBLE'] +260-123288-0026-258: ref=['WE', 'SHALL', 'BE', 'BLOWN', 'UP', 'BUT', 'NO', 'THE', 'DAZZLING', 'DISK', 'OF', 'MYSTERIOUS', 'LIGHT', 'NIMBLY', 'LEAPS', 'ASIDE', 'IT', 'APPROACHES', 'HANS', 'WHO', 'FIXES', 'HIS', 'BLUE', 'EYE', 'UPON', 'IT', 'STEADILY', 'IT', 'THREATENS', 'THE', 'HEAD', 'OF', 'MY', 'UNCLE', 'WHO', 'FALLS', 'UPON', 'HIS', 'KNEES', 'WITH', 'HIS', 'HEAD', 'DOWN', 'TO', 'AVOID', 'IT'] +260-123288-0026-258: hyp=['WE', 'SHALL', 'BE', 'BLOWN', 'UP', 'BUT', 'NO', 'THE', 'DAZZLING', 'DISK', 'OF', 'MYSTERIOUS', 'LIGHT', 'NIMBLY', 'LEAPS', 'ASIDE', 'IT', 'APPROACHES', 'HANS', 'WHO', 'FIXES', 'HIS', 'BLUE', 'EYE', 'UPON', 'IT', 'STEADILY', 'IT', 'THREATENS', 'THE', 'HEAD', 'OF', 'MY', 'UNCLE', 'WHO', 'FALLS', 'UPON', 'HIS', 'KNEES', 'WITH', 'HIS', 'HEAD', 'DOWN', 'TO', 'AVOID', 'IT'] +260-123288-0027-259: ref=['A', 'SUFFOCATING', 'SMELL', 'OF', 'NITROGEN', 'FILLS', 'THE', 'AIR', 'IT', 'ENTERS', 'THE', 'THROAT', 'IT', 'FILLS', 'THE', 'LUNGS'] +260-123288-0027-259: hyp=['A', 'SUFFOCATING', 'SMELL', 'OF', 'NITROGEN', 'FILLS', 'THE', 'AIR', 'IT', 'ENTERS', 'THE', 'THROAT', 'IT', 'FILLS', 'THE', 'LUNGS'] +260-123288-0028-260: ref=['WE', 'SUFFER', 'STIFLING', 'PAINS'] +260-123288-0028-260: hyp=['WE', 'SUFFER', 'STIFLING', 'PAINS'] +260-123440-0000-179: ref=['AND', 'HOW', 'ODD', 'THE', 'DIRECTIONS', 'WILL', 'LOOK'] +260-123440-0000-179: hyp=['AND', 'HOW', 'ODD', 'THE', 'DIRECTIONS', 'WILL', 'LOOK'] +260-123440-0001-180: ref=['POOR', 'ALICE'] +260-123440-0001-180: hyp=['POOR', 'ALICE'] +260-123440-0002-181: ref=['IT', 'WAS', 'THE', 'WHITE', 'RABBIT', 'RETURNING', 'SPLENDIDLY', 'DRESSED', 'WITH', 'A', 'PAIR', 'OF', 'WHITE', 'KID', 'GLOVES', 'IN', 'ONE', 'HAND', 'AND', 'A', 'LARGE', 'FAN', 'IN', 'THE', 'OTHER', 'HE', 'CAME', 'TROTTING', 'ALONG', 'IN', 'A', 'GREAT', 'HURRY', 'MUTTERING', 'TO', 'HIMSELF', 'AS', 'HE', 'CAME', 'OH', 'THE', 'DUCHESS', 'THE', 'DUCHESS'] +260-123440-0002-181: hyp=['IT', 'WAS', 'THE', 'WHITE', 'RABBIT', 'RETURNING', 'SPLENDIDLY', 'DRESSED', 'WITH', 'A', 'PAIR', 'OF', 'WHITE', 'KID', 'GLOVES', 'IN', 'ONE', 'HAND', 'AND', 'A', 'LARGE', 'FAN', 'IN', 'THE', 'OTHER', 'HE', 'CAME', 'TROTTING', 'ALONG', 'IN', 'A', 'GREAT', 'HURRY', 'MUTTERING', 'TO', 'HIMSELF', 'AS', 'HE', 'CAME', 'OH', 'THE', 'DUCHESS', 'THE', 'DUCHESS'] +260-123440-0003-182: ref=['OH', "WON'T", 'SHE', 'BE', 'SAVAGE', 'IF', "I'VE", 'KEPT', 'HER', 'WAITING'] +260-123440-0003-182: hyp=['OH', "WON'T", 'SHE', 'BE', 'SAVAGE', 'IF', "I'VE", 'KEPT', 'HER', 'WAITING'] +260-123440-0004-183: ref=['ALICE', 'TOOK', 'UP', 'THE', 'FAN', 'AND', 'GLOVES', 'AND', 'AS', 'THE', 'HALL', 'WAS', 'VERY', 'HOT', 'SHE', 'KEPT', 'FANNING', 'HERSELF', 'ALL', 'THE', 'TIME', 'SHE', 'WENT', 'ON', 'TALKING', 'DEAR', 'DEAR', 'HOW', 'QUEER', 'EVERYTHING', 'IS', 'TO', 'DAY'] +260-123440-0004-183: hyp=['ALICE', 'TOOK', 'UP', 'THE', 'FAN', 'AND', 'GLOVES', 'AND', 'AS', 'THE', 'HALL', 'WAS', 'VERY', 'HOT', 'SHE', 'KEPT', 'FANNING', 'HERSELF', 'ALL', 'THE', 'TIME', 'SHE', 'WENT', 'ON', 'TALKING', 'DEAR', 'DEAR', 'HOW', 'QUEER', 'EVERYTHING', 'IS', 'TO', 'DAY'] +260-123440-0005-184: ref=['AND', 'YESTERDAY', 'THINGS', 'WENT', 'ON', 'JUST', 'AS', 'USUAL'] +260-123440-0005-184: hyp=['AND', 'YESTERDAY', 'THANKS', 'WENT', 'ON', 'JUST', 'AS', 'USUAL'] +260-123440-0006-185: ref=['I', 'WONDER', 'IF', "I'VE", 'BEEN', 'CHANGED', 'IN', 'THE', 'NIGHT'] +260-123440-0006-185: hyp=['I', 'WONDER', 'IF', "I'VE", 'BEEN', 'CHANGED', 'IN', 'THE', 'NIGHT'] +260-123440-0007-186: ref=['I', 'ALMOST', 'THINK', 'I', 'CAN', 'REMEMBER', 'FEELING', 'A', 'LITTLE', 'DIFFERENT'] +260-123440-0007-186: hyp=['I', 'ALMOST', 'THINK', 'I', 'CAN', 'REMEMBER', 'FEELING', 'LITTLE', 'DIFFERENT'] +260-123440-0008-187: ref=["I'LL", 'TRY', 'IF', 'I', 'KNOW', 'ALL', 'THE', 'THINGS', 'I', 'USED', 'TO', 'KNOW'] +260-123440-0008-187: hyp=["I'LL", 'TRY', 'IF', 'I', 'KNOW', 'ALL', 'THE', 'THINGS', 'I', 'USED', 'TO', 'KNOW'] +260-123440-0009-188: ref=['I', 'SHALL', 'NEVER', 'GET', 'TO', 'TWENTY', 'AT', 'THAT', 'RATE'] +260-123440-0009-188: hyp=['I', 'SHALL', 'NEVER', 'GET', 'TO', 'TWENTY', 'AT', 'THAT', 'RATE'] +260-123440-0010-189: ref=['HOW', 'CHEERFULLY', 'HE', 'SEEMS', 'TO', 'GRIN', 'HOW', 'NEATLY', 'SPREAD', 'HIS', 'CLAWS', 'AND', 'WELCOME', 'LITTLE', 'FISHES', 'IN', 'WITH', 'GENTLY', 'SMILING', 'JAWS'] +260-123440-0010-189: hyp=['HOW', 'CHEERFULLY', 'HE', 'SEEMS', 'TO', 'GRIN', 'HOW', 'NEATLY', 'SPREAD', 'HIS', 'CLAWS', 'AND', 'WELCOME', 'LITTLE', 'FISHES', 'IN', 'WITH', 'GENTLY', 'SMILING', 'JAWS'] +260-123440-0011-190: ref=['NO', "I'VE", 'MADE', 'UP', 'MY', 'MIND', 'ABOUT', 'IT', 'IF', "I'M", 'MABEL', "I'LL", 'STAY', 'DOWN', 'HERE'] +260-123440-0011-190: hyp=['NO', "I'VE", 'MADE', 'UP', 'MY', 'MIND', 'ABOUT', 'IT', 'IF', 'I', 'MAYBEL', "I'LL", 'STAY', 'DOWN', 'HERE'] +260-123440-0012-191: ref=["IT'LL", 'BE', 'NO', 'USE', 'THEIR', 'PUTTING', 'THEIR', 'HEADS', 'DOWN', 'AND', 'SAYING', 'COME', 'UP', 'AGAIN', 'DEAR'] +260-123440-0012-191: hyp=["IT'LL", 'BE', 'NO', 'USE', "THEY'RE", 'PUTTING', 'THEIR', 'HEADS', 'DOWN', 'AND', 'SAYING', 'COME', 'UP', 'AGAIN', 'DEAR'] +260-123440-0013-192: ref=['I', 'AM', 'SO', 'VERY', 'TIRED', 'OF', 'BEING', 'ALL', 'ALONE', 'HERE'] +260-123440-0013-192: hyp=['I', 'AM', 'SO', 'VERY', 'TIRED', 'OF', 'BEING', 'ALL', 'ALONE', 'HERE'] +260-123440-0014-193: ref=['AND', 'I', 'DECLARE', "IT'S", 'TOO', 'BAD', 'THAT', 'IT', 'IS'] +260-123440-0014-193: hyp=['AND', 'I', 'DECLARE', "IT'S", 'TOO', 'BAD', 'THAT', 'IT', 'IS'] +260-123440-0015-194: ref=['I', 'WISH', 'I', "HADN'T", 'CRIED', 'SO', 'MUCH', 'SAID', 'ALICE', 'AS', 'SHE', 'SWAM', 'ABOUT', 'TRYING', 'TO', 'FIND', 'HER', 'WAY', 'OUT'] +260-123440-0015-194: hyp=['I', 'WISH', 'I', "HADN'T", 'CRIED', 'SO', 'MUCH', 'SAID', 'ALICE', 'AS', 'SHE', 'SWAM', 'ABOUT', 'TRYING', 'TO', 'FIND', 'HER', 'WAY', 'OUT'] +260-123440-0016-195: ref=['I', 'SHALL', 'BE', 'PUNISHED', 'FOR', 'IT', 'NOW', 'I', 'SUPPOSE', 'BY', 'BEING', 'DROWNED', 'IN', 'MY', 'OWN', 'TEARS'] +260-123440-0016-195: hyp=['I', 'SHALL', 'BE', 'PUNISHED', 'FOR', 'IT', 'NOW', 'I', 'SUPPOSE', 'BY', 'BEING', 'DROWNED', 'IN', 'MY', 'OWN', 'TEARS'] +260-123440-0017-196: ref=['THAT', 'WILL', 'BE', 'A', 'QUEER', 'THING', 'TO', 'BE', 'SURE'] +260-123440-0017-196: hyp=['THAT', 'WILL', 'BE', 'A', 'QUEER', 'THING', 'TO', 'BE', 'SURE'] +260-123440-0018-197: ref=['I', 'AM', 'VERY', 'TIRED', 'OF', 'SWIMMING', 'ABOUT', 'HERE', 'O', 'MOUSE'] +260-123440-0018-197: hyp=['I', 'AM', 'VERY', 'TIRED', 'OF', 'SWIMMING', 'ABOUT', 'HERE', 'O', 'MOUSE'] +260-123440-0019-198: ref=['CRIED', 'ALICE', 'AGAIN', 'FOR', 'THIS', 'TIME', 'THE', 'MOUSE', 'WAS', 'BRISTLING', 'ALL', 'OVER', 'AND', 'SHE', 'FELT', 'CERTAIN', 'IT', 'MUST', 'BE', 'REALLY', 'OFFENDED'] +260-123440-0019-198: hyp=['CRIED', 'ALICE', 'AGAIN', 'FOR', 'THIS', 'TIME', 'THE', 'MOUSE', 'WAS', 'BRISTLING', 'ALL', 'OVER', 'AND', 'SHE', 'FELT', 'CERTAIN', 'IT', 'MUST', 'BE', 'REALLY', 'OFFENDED'] +260-123440-0020-199: ref=['WE', "WON'T", 'TALK', 'ABOUT', 'HER', 'ANY', 'MORE', 'IF', "YOU'D", 'RATHER', 'NOT', 'WE', 'INDEED'] +260-123440-0020-199: hyp=['WE', "WON'T", 'TALK', 'ABOUT', 'HER', 'ANY', 'MORE', 'IF', "YOU'D", 'RATHER', 'NOT', 'WE', 'INDEED'] +2830-3979-0000-1120: ref=['WE', 'WANT', 'YOU', 'TO', 'HELP', 'US', 'PUBLISH', 'SOME', 'LEADING', 'WORK', 'OF', "LUTHER'S", 'FOR', 'THE', 'GENERAL', 'AMERICAN', 'MARKET', 'WILL', 'YOU', 'DO', 'IT'] +2830-3979-0000-1120: hyp=['WE', 'WANT', 'YOU', 'TO', 'HELP', 'US', 'PUBLISH', 'SOME', 'LEADING', 'WORK', 'OF', 'LUTHERS', 'FOR', 'THE', 'GENERAL', 'AMERICAN', 'MARKET', 'WILL', 'YOU', 'DO', 'IT'] +2830-3979-0001-1121: ref=['THE', 'CONDITION', 'IS', 'THAT', 'I', 'WILL', 'BE', 'PERMITTED', 'TO', 'MAKE', 'LUTHER', 'TALK', 'AMERICAN', 'STREAMLINE', 'HIM', 'SO', 'TO', 'SPEAK', 'BECAUSE', 'YOU', 'WILL', 'NEVER', 'GET', 'PEOPLE', 'WHETHER', 'IN', 'OR', 'OUTSIDE', 'THE', 'LUTHERAN', 'CHURCH', 'ACTUALLY', 'TO', 'READ', 'LUTHER', 'UNLESS', 'WE', 'MAKE', 'HIM', 'TALK', 'AS', 'HE', 'WOULD', 'TALK', 'TODAY', 'TO', 'AMERICANS'] +2830-3979-0001-1121: hyp=['THE', 'CONDITION', 'IS', 'THAT', 'I', 'WILL', 'BE', 'PERMITTED', 'TO', 'MAKE', 'LUTHER', 'TALK', 'AMERICAN', 'STREAM', 'LINE', 'HIM', 'SO', 'TO', 'SPEAK', 'BECAUSE', 'YOU', 'WILL', 'NEVER', 'GET', 'PEOPLE', 'WHETHER', 'IN', 'OR', 'OUTSIDE', 'THE', 'LUTHERAN', 'CHURCH', 'ACTUALLY', 'TO', 'READ', 'LUTHER', 'UNLESS', 'WE', 'MAKE', 'HIM', 'TALK', 'AS', 'HE', 'WOULD', 'TALK', 'TO', 'DAY', 'TO', 'AMERICANS'] +2830-3979-0002-1122: ref=['LET', 'US', 'BEGIN', 'WITH', 'THAT', 'HIS', 'COMMENTARY', 'ON', 'GALATIANS'] +2830-3979-0002-1122: hyp=['LET', 'US', 'BEGIN', 'WITH', 'THAT', 'HIS', 'COMMENTARY', 'ONGOLATIONS'] +2830-3979-0003-1123: ref=['THE', 'UNDERTAKING', 'WHICH', 'SEEMED', 'SO', 'ATTRACTIVE', 'WHEN', 'VIEWED', 'AS', 'A', 'LITERARY', 'TASK', 'PROVED', 'A', 'MOST', 'DIFFICULT', 'ONE', 'AND', 'AT', 'TIMES', 'BECAME', 'OPPRESSIVE'] +2830-3979-0003-1123: hyp=['THE', 'UNDERTAKING', 'WHICH', 'SEEMED', 'SO', 'ATTRACTIVE', 'WHEN', 'VIEWED', 'AS', 'A', 'LITERARY', 'TASK', 'PROVED', 'A', 'MOST', 'DIFFICULT', 'ONE', 'AND', 'AT', 'TIMES', 'BECAME', 'OPPRESSIVE'] +2830-3979-0004-1124: ref=['IT', 'WAS', 'WRITTEN', 'IN', 'LATIN'] +2830-3979-0004-1124: hyp=['IT', 'WAS', 'WRITTEN', 'IN', 'LATIN'] +2830-3979-0005-1125: ref=['THE', 'WORK', 'HAD', 'TO', 'BE', 'CONDENSED'] +2830-3979-0005-1125: hyp=['THE', 'WORK', 'HAD', 'TO', 'BE', 'CONDENSED'] +2830-3979-0006-1126: ref=['A', 'WORD', 'SHOULD', 'NOW', 'BE', 'SAID', 'ABOUT', 'THE', 'ORIGIN', 'OF', "LUTHER'S", 'COMMENTARY', 'ON', 'GALATIANS'] +2830-3979-0006-1126: hyp=['A', 'WORD', 'SHOULD', 'NOW', 'BE', 'SAID', 'ABOUT', 'THE', 'ORIGIN', 'OF', "LUTHER'S", 'COMMENTARY', 'ANGULATIONS'] +2830-3979-0007-1127: ref=['MUCH', 'LATER', 'WHEN', 'A', 'FRIEND', 'OF', 'HIS', 'WAS', 'PREPARING', 'AN', 'EDITION', 'OF', 'ALL', 'HIS', 'LATIN', 'WORKS', 'HE', 'REMARKED', 'TO', 'HIS', 'HOME', 'CIRCLE', 'IF', 'I', 'HAD', 'MY', 'WAY', 'ABOUT', 'IT', 'THEY', 'WOULD', 'REPUBLISH', 'ONLY', 'THOSE', 'OF', 'MY', 'BOOKS', 'WHICH', 'HAVE', 'DOCTRINE', 'MY', 'GALATIANS', 'FOR', 'INSTANCE'] +2830-3979-0007-1127: hyp=['MUCH', 'LATER', 'WHEN', 'A', 'FRIEND', 'OF', 'HIS', 'WAS', 'PREPARING', 'AN', 'ADDITION', 'OF', 'ALL', 'HIS', 'LATIN', 'WORKS', 'HE', 'REMARKED', 'TO', 'HIS', 'HOME', 'CIRCLE', 'IF', 'I', 'HAD', 'MY', 'WAY', 'ABOUT', 'IT', 'THEY', 'WOULD', 'REPUBLISH', 'ONLY', 'THOSE', 'OF', 'MY', 'BOOKS', 'WHICH', 'HAVE', 'DOCTRINE', 'MIGALLATIONS', 'FOR', 'INSTANCE'] +2830-3979-0008-1128: ref=['IN', 'OTHER', 'WORDS', 'THESE', 'THREE', 'MEN', 'TOOK', 'DOWN', 'THE', 'LECTURES', 'WHICH', 'LUTHER', 'ADDRESSED', 'TO', 'HIS', 'STUDENTS', 'IN', 'THE', 'COURSE', 'OF', 'GALATIANS', 'AND', 'ROERER', 'PREPARED', 'THE', 'MANUSCRIPT', 'FOR', 'THE', 'PRINTER'] +2830-3979-0008-1128: hyp=['IN', 'OTHER', 'WORDS', 'THESE', 'THREE', 'MEN', 'TOOK', 'DOWN', 'THE', 'LECTURES', 'WHICH', 'LUTHER', 'ADDRESSED', 'TO', 'HIS', 'STUDENTS', 'IN', 'THE', 'COURSE', 'OF', 'GALATIANS', 'AND', 'ROAR', 'PREPARED', 'THE', 'MANUSCRIPT', 'FOR', 'THE', 'PRINTER'] +2830-3979-0009-1129: ref=['IT', 'PRESENTS', 'LIKE', 'NO', 'OTHER', 'OF', "LUTHER'S", 'WRITINGS', 'THE', 'CENTRAL', 'THOUGHT', 'OF', 'CHRISTIANITY', 'THE', 'JUSTIFICATION', 'OF', 'THE', 'SINNER', 'FOR', 'THE', 'SAKE', 'OF', "CHRIST'S", 'MERITS', 'ALONE'] +2830-3979-0009-1129: hyp=['IT', 'PRESENTS', 'LIKE', 'NO', 'OTHER', 'OF', "LUTHER'S", 'WRITINGS', 'THE', 'CENTRAL', 'THOUGHT', 'OF', 'CHRISTIANITY', 'THE', 'JUSTIFICATION', 'OF', 'THE', 'SINNER', 'FOR', 'THE', 'SAKE', 'OF', 'CHRIST', 'MERITS', 'ALONE'] +2830-3979-0010-1130: ref=['BUT', 'THE', 'ESSENCE', 'OF', "LUTHER'S", 'LECTURES', 'IS', 'THERE'] +2830-3979-0010-1130: hyp=['BUT', 'THE', 'ESSENCE', 'OF', "LUTHER'S", 'LECTURES', 'IS', 'THERE'] +2830-3979-0011-1131: ref=['THE', 'LORD', 'WHO', 'HAS', 'GIVEN', 'US', 'POWER', 'TO', 'TEACH', 'AND', 'TO', 'HEAR', 'LET', 'HIM', 'ALSO', 'GIVE', 'US', 'THE', 'POWER', 'TO', 'SERVE', 'AND', 'TO', 'DO', 'LUKE', 'TWO'] +2830-3979-0011-1131: hyp=['THE', 'LORD', 'WHO', 'HAS', 'GIVEN', 'US', 'POWER', 'TO', 'TEACH', 'AND', 'TO', 'HEAR', 'LET', 'HIM', 'ALSO', 'GIVE', 'US', 'THE', 'POWER', 'TO', 'SERVE', 'AND', 'TO', 'DO', 'LUKE', 'TOO'] +2830-3979-0012-1132: ref=['THE', 'WORD', 'OF', 'OUR', 'GOD', 'SHALL', 'STAND', 'FOREVER'] +2830-3979-0012-1132: hyp=['THE', 'WORD', 'OF', 'OUR', 'GOD', 'SHALL', 'STAND', 'FOR', 'EVER'] +2830-3980-0000-1043: ref=['IN', 'EVERY', 'WAY', 'THEY', 'SOUGHT', 'TO', 'UNDERMINE', 'THE', 'AUTHORITY', 'OF', 'SAINT', 'PAUL'] +2830-3980-0000-1043: hyp=['IN', 'EVERY', 'WAY', 'THEY', 'SOUGHT', 'TO', 'UNDERMINE', 'THE', 'AUTHORITY', 'OF', 'SAINT', 'PAUL'] +2830-3980-0001-1044: ref=['THEY', 'SAID', 'TO', 'THE', 'GALATIANS', 'YOU', 'HAVE', 'NO', 'RIGHT', 'TO', 'THINK', 'HIGHLY', 'OF', 'PAUL'] +2830-3980-0001-1044: hyp=['THEY', 'SAID', 'TO', 'THE', 'GALATIANS', 'YOU', 'HAVE', 'NO', 'RIGHT', 'TO', 'THINK', 'HIGHLY', 'OF', 'PAUL'] +2830-3980-0002-1045: ref=['HE', 'WAS', 'THE', 'LAST', 'TO', 'TURN', 'TO', 'CHRIST'] +2830-3980-0002-1045: hyp=['HE', 'WAS', 'THE', 'LAST', 'TO', 'TURN', 'TO', 'CHRIST'] +2830-3980-0003-1046: ref=['PAUL', 'CAME', 'LATER', 'AND', 'IS', 'BENEATH', 'US'] +2830-3980-0003-1046: hyp=['PAUL', 'CAME', 'LATER', 'IN', 'HIS', 'BENEATH', 'US'] +2830-3980-0004-1047: ref=['INDEED', 'HE', 'PERSECUTED', 'THE', 'CHURCH', 'OF', 'CHRIST', 'FOR', 'A', 'LONG', 'TIME'] +2830-3980-0004-1047: hyp=['INDEED', 'HE', 'PERSECUTED', 'THE', 'CHURCH', 'OF', 'CHRIST', 'FOR', 'A', 'LONG', 'TIME'] +2830-3980-0005-1048: ref=['DO', 'YOU', 'SUPPOSE', 'THAT', 'GOD', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'FEW', 'LUTHERAN', 'HERETICS', 'WOULD', 'DISOWN', 'HIS', 'ENTIRE', 'CHURCH'] +2830-3980-0005-1048: hyp=['DO', 'YOU', 'SUPPOSE', 'THAT', 'GOD', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'FEW', 'LUTHERAN', 'HERETICS', 'WOULD', 'DISOWN', 'HIS', 'ENTIRE', 'CHURCH'] +2830-3980-0006-1049: ref=['AGAINST', 'THESE', 'BOASTING', 'FALSE', 'APOSTLES', 'PAUL', 'BOLDLY', 'DEFENDS', 'HIS', 'APOSTOLIC', 'AUTHORITY', 'AND', 'MINISTRY'] +2830-3980-0006-1049: hyp=['AGAINST', 'THESE', 'BOASTING', 'FALSE', 'APOSTLES', 'PAUL', 'BOLDLY', 'DEFENDS', 'HIS', 'APOSTOLIC', 'AUTHORITY', 'AND', 'MINISTRY'] +2830-3980-0007-1050: ref=['AS', 'THE', 'AMBASSADOR', 'OF', 'A', 'GOVERNMENT', 'IS', 'HONORED', 'FOR', 'HIS', 'OFFICE', 'AND', 'NOT', 'FOR', 'HIS', 'PRIVATE', 'PERSON', 'SO', 'THE', 'MINISTER', 'OF', 'CHRIST', 'SHOULD', 'EXALT', 'HIS', 'OFFICE', 'IN', 'ORDER', 'TO', 'GAIN', 'AUTHORITY', 'AMONG', 'MEN'] +2830-3980-0007-1050: hyp=['AS', 'THE', 'AMBASSADOR', 'OF', 'A', 'GOVERNMENT', 'IS', 'HONORED', 'FOR', 'HIS', 'OFFICE', 'AND', 'NOT', 'FOR', 'HIS', 'PRIVATE', 'PERSON', 'SO', 'THE', 'MINISTER', 'OF', 'CHRIST', 'SHOULD', 'EXALT', 'HIS', 'OFFICE', 'IN', 'ORDER', 'TO', 'GAIN', 'AUTHORITY', 'AMONG', 'MEN'] +2830-3980-0008-1051: ref=['PAUL', 'TAKES', 'PRIDE', 'IN', 'HIS', 'MINISTRY', 'NOT', 'TO', 'HIS', 'OWN', 'PRAISE', 'BUT', 'TO', 'THE', 'PRAISE', 'OF', 'GOD'] +2830-3980-0008-1051: hyp=['PAUL', 'TAKES', 'PRIDE', 'IN', 'HIS', 'MINISTRY', 'NOT', 'TO', 'HIS', 'OWN', 'PHRASE', 'BUT', 'TO', 'THE', 'PRAISE', 'OF', 'GOD'] +2830-3980-0009-1052: ref=['PAUL', 'AN', 'APOSTLE', 'NOT', 'OF', 'MEN', 'ET', 'CETERA'] +2830-3980-0009-1052: hyp=['PAUL', 'AN', 'APOSTLE', 'NOT', 'OF', 'MEN', 'ET', 'CETERA'] +2830-3980-0010-1053: ref=['EITHER', 'HE', 'CALLS', 'MINISTERS', 'THROUGH', 'THE', 'AGENCY', 'OF', 'MEN', 'OR', 'HE', 'CALLS', 'THEM', 'DIRECTLY', 'AS', 'HE', 'CALLED', 'THE', 'PROPHETS', 'AND', 'APOSTLES'] +2830-3980-0010-1053: hyp=['EITHER', 'HE', 'CALLS', 'MINISTERS', 'THROUGH', 'THE', 'AGENCY', 'OF', 'MEN', 'OR', 'HE', 'CALLS', 'THEM', 'DIRECTLY', 'AS', 'HE', 'CALLED', 'THE', 'PROPHETS', 'AND', 'APOSTLES'] +2830-3980-0011-1054: ref=['PAUL', 'DECLARES', 'THAT', 'THE', 'FALSE', 'APOSTLES', 'WERE', 'CALLED', 'OR', 'SENT', 'NEITHER', 'BY', 'MEN', 'NOR', 'BY', 'MAN'] +2830-3980-0011-1054: hyp=['PAUL', 'DECLARES', 'THAT', 'THE', 'FALSE', 'APOSTLES', 'RECALL', 'THEIR', 'SCENT', 'NEITHER', 'BY', 'MEN', 'NOR', 'BY', 'MAN'] +2830-3980-0012-1055: ref=['THE', 'MOST', 'THEY', 'COULD', 'CLAIM', 'IS', 'THAT', 'THEY', 'WERE', 'SENT', 'BY', 'OTHERS'] +2830-3980-0012-1055: hyp=['THE', 'MOST', 'THEY', 'COULD', 'CLAIM', 'IS', 'THAT', 'THEY', 'WERE', 'SENT', 'BY', 'OTHERS'] +2830-3980-0013-1056: ref=['HE', 'MENTIONS', 'THE', 'APOSTLES', 'FIRST', 'BECAUSE', 'THEY', 'WERE', 'APPOINTED', 'DIRECTLY', 'BY', 'GOD'] +2830-3980-0013-1056: hyp=['HE', 'MENTIONS', 'THE', 'APOSTLES', 'FIRST', 'BECAUSE', 'THEY', 'WERE', 'APPOINTED', 'DIRECTLY', 'BY', 'GOD'] +2830-3980-0014-1057: ref=['THE', 'CALL', 'IS', 'NOT', 'TO', 'BE', 'TAKEN', 'LIGHTLY'] +2830-3980-0014-1057: hyp=['THE', 'CALL', 'IS', 'NOT', 'TO', 'BE', 'TAKEN', 'LIGHTLY'] +2830-3980-0015-1058: ref=['FOR', 'A', 'PERSON', 'TO', 'POSSESS', 'KNOWLEDGE', 'IS', 'NOT', 'ENOUGH'] +2830-3980-0015-1058: hyp=['FOR', 'A', 'PERSON', 'TO', 'POSSESS', 'KNOWLEDGE', 'IS', 'NOT', 'ENOUGH'] +2830-3980-0016-1059: ref=['IT', 'SPOILS', "ONE'S", 'BEST', 'WORK'] +2830-3980-0016-1059: hyp=['IT', 'SPOILS', "ONE'S", 'BEST', 'WORK'] +2830-3980-0017-1060: ref=['WHEN', 'I', 'WAS', 'A', 'YOUNG', 'MAN', 'I', 'THOUGHT', 'PAUL', 'WAS', 'MAKING', 'TOO', 'MUCH', 'OF', 'HIS', 'CALL'] +2830-3980-0017-1060: hyp=['WHEN', 'I', 'WAS', 'A', 'YOUNG', 'MAN', 'I', 'THOUGHT', 'PAUL', 'WAS', 'MAKING', 'TOO', 'MUCH', 'OF', 'HIS', 'CALL'] +2830-3980-0018-1061: ref=['I', 'DID', 'NOT', 'THEN', 'REALIZE', 'THE', 'IMPORTANCE', 'OF', 'THE', 'MINISTRY'] +2830-3980-0018-1061: hyp=['I', 'DID', 'NOT', 'THEN', 'REALIZE', 'THE', 'IMPORTANCE', 'OF', 'THE', 'MINISTRY'] +2830-3980-0019-1062: ref=['I', 'KNEW', 'NOTHING', 'OF', 'THE', 'DOCTRINE', 'OF', 'FAITH', 'BECAUSE', 'WE', 'WERE', 'TAUGHT', 'SOPHISTRY', 'INSTEAD', 'OF', 'CERTAINTY', 'AND', 'NOBODY', 'UNDERSTOOD', 'SPIRITUAL', 'BOASTING'] +2830-3980-0019-1062: hyp=['I', 'KNEW', 'NOTHING', 'OF', 'THE', 'DOCTRINE', 'OF', 'FAITH', 'BECAUSE', 'WE', 'WERE', 'TAUGHT', 'SOPHISTRI', 'INSTEAD', 'OF', 'CERTAINTY', 'AND', 'NOBODY', 'UNDERSTOOD', 'SPIRITUAL', 'BOASTING'] +2830-3980-0020-1063: ref=['THIS', 'IS', 'NO', 'SINFUL', 'PRIDE', 'IT', 'IS', 'HOLY', 'PRIDE'] +2830-3980-0020-1063: hyp=['THIS', 'IS', 'NO', 'SINFUL', 'PRIDE', 'IT', 'IS', 'WHOLLY', 'PRIDE'] +2830-3980-0021-1064: ref=['AND', 'GOD', 'THE', 'FATHER', 'WHO', 'RAISED', 'HIM', 'FROM', 'THE', 'DEAD'] +2830-3980-0021-1064: hyp=['AND', 'GOD', 'THE', 'FATHER', 'WHO', 'RAISED', 'HIM', 'FROM', 'THE', 'DEAD'] +2830-3980-0022-1065: ref=['THE', 'CLAUSE', 'SEEMS', 'SUPERFLUOUS', 'ON', 'FIRST', 'SIGHT'] +2830-3980-0022-1065: hyp=['THE', 'CLAWS', 'SEEMED', 'SUPERVOUS', 'ON', 'FIRST', 'SIGHT'] +2830-3980-0023-1066: ref=['THESE', 'PERVERTERS', 'OF', 'THE', 'RIGHTEOUSNESS', 'OF', 'CHRIST', 'RESIST', 'THE', 'FATHER', 'AND', 'THE', 'SON', 'AND', 'THE', 'WORKS', 'OF', 'THEM', 'BOTH'] +2830-3980-0023-1066: hyp=['THESE', 'PERVERTERS', 'OF', 'THE', 'RIGHTEOUSNESS', 'OF', 'CHRIST', 'RESIST', 'THE', 'FATHER', 'AND', 'THE', 'SON', 'AND', 'THE', 'WORKS', 'OF', 'THEM', 'BOTH'] +2830-3980-0024-1067: ref=['IN', 'THIS', 'WHOLE', 'EPISTLE', 'PAUL', 'TREATS', 'OF', 'THE', 'RESURRECTION', 'OF', 'CHRIST'] +2830-3980-0024-1067: hyp=['IN', 'THIS', 'WHOLE', 'EPISTLE', 'PAUL', 'TREATS', 'OF', 'THE', 'RESURRECTION', 'OF', 'CHRIST'] +2830-3980-0025-1068: ref=['BY', 'HIS', 'RESURRECTION', 'CHRIST', 'WON', 'THE', 'VICTORY', 'OVER', 'LAW', 'SIN', 'FLESH', 'WORLD', 'DEVIL', 'DEATH', 'HELL', 'AND', 'EVERY', 'EVIL'] +2830-3980-0025-1068: hyp=['BY', 'HIS', 'RESURRECTION', 'CHRIST', 'WON', 'THE', 'VICTORY', 'OVER', 'LAW', 'SIN', 'FLESH', 'WORLD', 'DEVIL', 'DEATH', 'HELL', 'AND', 'EVERY', 'EVIL'] +2830-3980-0026-1069: ref=['VERSE', 'TWO'] +2830-3980-0026-1069: hyp=['FIRST', 'TWO'] +2830-3980-0027-1070: ref=['AND', 'ALL', 'THE', 'BRETHREN', 'WHICH', 'ARE', 'WITH', 'ME'] +2830-3980-0027-1070: hyp=['AND', 'ALL', 'THE', 'BRETHREN', 'WHICH', 'ARE', 'WITH', 'ME'] +2830-3980-0028-1071: ref=['THIS', 'SHOULD', 'GO', 'FAR', 'IN', 'SHUTTING', 'THE', 'MOUTHS', 'OF', 'THE', 'FALSE', 'APOSTLES'] +2830-3980-0028-1071: hyp=['THIS', 'SHOULD', 'GO', 'FAR', 'IN', 'SHUTTING', 'THE', 'MOUTHS', 'OF', 'THE', 'FALSE', 'APOSTLES'] +2830-3980-0029-1072: ref=['ALTHOUGH', 'THE', 'BRETHREN', 'WITH', 'ME', 'ARE', 'NOT', 'APOSTLES', 'LIKE', 'MYSELF', 'YET', 'THEY', 'ARE', 'ALL', 'OF', 'ONE', 'MIND', 'WITH', 'ME', 'THINK', 'WRITE', 'AND', 'TEACH', 'AS', 'I', 'DO'] +2830-3980-0029-1072: hyp=['ALTHOUGH', 'THE', 'BRETHREN', 'WITH', 'ME', 'ARE', 'NOT', 'APOSTLES', 'LIKE', 'MYSELF', 'YET', 'THEY', 'ARE', 'ALL', 'OF', 'ONE', 'MIND', 'WITH', 'ME', 'THINK', 'WRITE', 'AND', 'TEACH', 'AS', 'I', 'DO'] +2830-3980-0030-1073: ref=['THEY', 'DO', 'NOT', 'GO', 'WHERE', 'THE', 'ENEMIES', 'OF', 'THE', 'GOSPEL', 'PREDOMINATE', 'THEY', 'GO', 'WHERE', 'THE', 'CHRISTIANS', 'ARE'] +2830-3980-0030-1073: hyp=['THEY', 'DO', 'NOT', 'GO', 'WHERE', 'THE', 'ENEMIES', 'OF', 'THE', 'GOSPEL', 'PREDOMINATE', 'THEY', 'GO', 'WITH', 'THE', 'CHRISTIANS', 'ARE'] +2830-3980-0031-1074: ref=['WHY', 'DO', 'THEY', 'NOT', 'INVADE', 'THE', 'CATHOLIC', 'PROVINCES', 'AND', 'PREACH', 'THEIR', 'DOCTRINE', 'TO', 'GODLESS', 'PRINCES', 'BISHOPS', 'AND', 'DOCTORS', 'AS', 'WE', 'HAVE', 'DONE', 'BY', 'THE', 'HELP', 'OF', 'GOD'] +2830-3980-0031-1074: hyp=['WHY', 'DO', 'THEY', 'NOT', 'INVADE', 'THE', 'CATHOLIC', 'PROVINCES', 'AND', 'PREACH', 'THEIR', 'DOCTRINE', 'TO', 'GODLESS', 'PRINCES', 'BISHOPS', 'AND', 'DOCTORS', 'AS', 'WE', 'HAVE', 'DONE', 'BY', 'THE', 'HELP', 'OF', 'GOD'] +2830-3980-0032-1075: ref=['WE', 'LOOK', 'FOR', 'THAT', 'REWARD', 'WHICH', 'EYE', 'HATH', 'NOT', 'SEEN', 'NOR', 'EAR', 'HEARD', 'NEITHER', 'HATH', 'ENTERED', 'INTO', 'THE', 'HEART', 'OF', 'MAN'] +2830-3980-0032-1075: hyp=['WE', 'LOOK', 'FOR', 'THAT', 'REWARD', 'WHICH', 'I', 'HATH', 'NOT', 'SEEN', 'NOR', 'EAR', 'HEARD', 'NEITHER', 'HATH', 'ENTERED', 'INTO', 'THE', 'HEART', 'OF', 'MAN'] +2830-3980-0033-1076: ref=['NOT', 'ALL', 'THE', 'GALATIANS', 'HAD', 'BECOME', 'PERVERTED'] +2830-3980-0033-1076: hyp=['NOT', 'ALL', 'THE', 'GALATIANS', 'HAD', 'BECOME', 'PERVERTED'] +2830-3980-0034-1077: ref=['THESE', 'MEANS', 'CANNOT', 'BE', 'CONTAMINATED'] +2830-3980-0034-1077: hyp=['THESE', 'MEANS', 'CANNOT', 'BE', 'CONTAMINATED'] +2830-3980-0035-1078: ref=['THEY', 'REMAIN', 'DIVINE', 'REGARDLESS', 'OF', "MEN'S", 'OPINION'] +2830-3980-0035-1078: hyp=['THEY', 'REMAINED', 'DIVINE', 'REGARDLESS', 'OF', "MEN'S", 'OPINION'] +2830-3980-0036-1079: ref=['WHEREVER', 'THE', 'MEANS', 'OF', 'GRACE', 'ARE', 'FOUND', 'THERE', 'IS', 'THE', 'HOLY', 'CHURCH', 'EVEN', 'THOUGH', 'ANTICHRIST', 'REIGNS', 'THERE'] +2830-3980-0036-1079: hyp=['WHEREVER', 'THE', 'MEANS', 'OF', 'GRACE', 'ARE', 'FOUND', 'THERE', 'IS', 'THE', 'HOLY', 'CHURCH', 'EVEN', 'THOUGH', 'ANTICHRIST', 'REIGNS', 'THERE'] +2830-3980-0037-1080: ref=['SO', 'MUCH', 'FOR', 'THE', 'TITLE', 'OF', 'THE', 'EPISTLE', 'NOW', 'FOLLOWS', 'THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'VERSE', 'THREE'] +2830-3980-0037-1080: hyp=['SO', 'MUCH', 'FOR', 'THE', 'TITLE', 'OF', 'THE', 'EPISTLE', 'NOW', 'FOLLOWS', 'THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'VERSE', 'THREE'] +2830-3980-0038-1081: ref=['GRACE', 'BE', 'TO', 'YOU', 'AND', 'PEACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0038-1081: hyp=['GRACE', 'BE', 'TO', 'YOU', 'IN', 'PEACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0039-1082: ref=['THE', 'TERMS', 'OF', 'GRACE', 'AND', 'PEACE', 'ARE', 'COMMON', 'TERMS', 'WITH', 'PAUL', 'AND', 'ARE', 'NOW', 'PRETTY', 'WELL', 'UNDERSTOOD'] +2830-3980-0039-1082: hyp=['THE', 'TERMS', 'OF', 'GRACE', 'AND', 'PEACE', 'ARE', 'COMMON', 'TERMS', 'WITH', 'PAUL', 'AND', 'ARE', 'NOW', 'PRETTY', 'WELL', 'UNDERSTOOD'] +2830-3980-0040-1083: ref=['THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'IS', 'REFRESHING'] +2830-3980-0040-1083: hyp=['THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'IS', 'REFRESHING'] +2830-3980-0041-1084: ref=['GRACE', 'INVOLVES', 'THE', 'REMISSION', 'OF', 'SINS', 'PEACE', 'AND', 'A', 'HAPPY', 'CONSCIENCE'] +2830-3980-0041-1084: hyp=['GRACE', 'INVOLVES', 'THE', 'REMISSION', 'OF', 'SINS', 'PEACE', 'AND', 'A', 'HAPPY', 'CONSCIENCE'] +2830-3980-0042-1085: ref=['THE', 'WORLD', 'BRANDS', 'THIS', 'A', 'PERNICIOUS', 'DOCTRINE'] +2830-3980-0042-1085: hyp=['THE', 'WORLD', 'BRAINS', 'THIS', 'A', 'PERNICIOUS', 'DOCTRINE'] +2830-3980-0043-1086: ref=['EXPERIENCE', 'PROVES', 'THIS'] +2830-3980-0043-1086: hyp=['EXPERIENCE', 'PROVES', 'THIS'] +2830-3980-0044-1087: ref=['HOWEVER', 'THE', 'GRACE', 'AND', 'PEACE', 'OF', 'GOD', 'WILL'] +2830-3980-0044-1087: hyp=['HOWEVER', 'THE', 'GRACE', 'AND', 'PEACE', 'OF', 'GOD', 'WILL'] +2830-3980-0045-1088: ref=['MEN', 'SHOULD', 'NOT', 'SPECULATE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0045-1088: hyp=['MEN', 'SHOULD', 'NOT', 'SPECULATE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0046-1089: ref=['WAS', 'IT', 'NOT', 'ENOUGH', 'TO', 'SAY', 'FROM', 'GOD', 'THE', 'FATHER'] +2830-3980-0046-1089: hyp=['WAS', 'IT', 'NOT', 'ENOUGH', 'TO', 'SAY', 'FROM', 'GOD', 'THE', 'FATHER'] +2830-3980-0047-1090: ref=['TO', 'DO', 'SO', 'IS', 'TO', 'LOSE', 'GOD', 'ALTOGETHER', 'BECAUSE', 'GOD', 'BECOMES', 'INTOLERABLE', 'WHEN', 'WE', 'SEEK', 'TO', 'MEASURE', 'AND', 'TO', 'COMPREHEND', 'HIS', 'INFINITE', 'MAJESTY'] +2830-3980-0047-1090: hyp=['TO', 'DO', 'SO', 'IS', 'TO', 'LOSE', 'GOD', 'ALTOGETHER', 'BECAUSE', 'GOD', 'BECOMES', 'INTOLERABLE', 'WHEN', 'WE', 'SEEK', 'TO', 'MEASURE', 'AND', 'TO', 'COMPREHEND', 'HIS', 'INFINITE', 'MAJESTY'] +2830-3980-0048-1091: ref=['HE', 'CAME', 'DOWN', 'TO', 'EARTH', 'LIVED', 'AMONG', 'MEN', 'SUFFERED', 'WAS', 'CRUCIFIED', 'AND', 'THEN', 'HE', 'DIED', 'STANDING', 'CLEARLY', 'BEFORE', 'US', 'SO', 'THAT', 'OUR', 'HEARTS', 'AND', 'EYES', 'MAY', 'FASTEN', 'UPON', 'HIM'] +2830-3980-0048-1091: hyp=['HE', 'CAME', 'DOWN', 'TO', 'EARTH', 'LIVED', 'AMONG', 'MEN', 'SUFFERED', 'WAS', 'CRUCIFIED', 'AND', 'THEN', 'HE', 'DIED', 'STANDING', 'CLEARLY', 'BEFORE', 'US', 'SO', 'THAT', 'OUR', 'HEARTS', 'AND', 'EYES', 'MAY', 'FASTEN', 'UPON', 'HIM'] +2830-3980-0049-1092: ref=['EMBRACE', 'HIM', 'AND', 'FORGET', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0049-1092: hyp=['EMBRACE', 'HIM', 'AND', 'FORGET', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0050-1093: ref=['DID', 'NOT', 'CHRIST', 'HIMSELF', 'SAY', 'I', 'AM', 'THE', 'WAY', 'AND', 'THE', 'TRUTH', 'AND', 'THE', 'LIFE', 'NO', 'MAN', 'COMETH', 'UNTO', 'THE', 'FATHER', 'BUT', 'BY', 'ME'] +2830-3980-0050-1093: hyp=['DID', 'NOT', 'CHRIST', 'HIMSELF', 'SAY', 'I', 'AM', 'THE', 'WAY', 'AND', 'THE', 'TRUTH', 'AND', 'THE', 'LIFE', 'NO', 'MAN', 'COMETH', 'UNTO', 'THE', 'FATHER', 'BUT', 'BY', 'ME'] +2830-3980-0051-1094: ref=['WHEN', 'YOU', 'ARGUE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD', 'APART', 'FROM', 'THE', 'QUESTION', 'OF', 'JUSTIFICATION', 'YOU', 'MAY', 'BE', 'AS', 'PROFOUND', 'AS', 'YOU', 'LIKE'] +2830-3980-0051-1094: hyp=['WHEN', 'YOU', 'ARGUE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD', 'APART', 'FROM', 'THE', 'QUESTION', 'OF', 'JUSTIFICATION', 'YOU', 'MAY', 'BE', 'AS', 'PROFOUND', 'AS', 'YOU', 'LIKE'] +2830-3980-0052-1095: ref=['WE', 'ARE', 'TO', 'HEAR', 'CHRIST', 'WHO', 'HAS', 'BEEN', 'APPOINTED', 'BY', 'THE', 'FATHER', 'AS', 'OUR', 'DIVINE', 'TEACHER'] +2830-3980-0052-1095: hyp=['WE', 'ARE', 'TO', 'HEAR', 'CHRIST', 'WHO', 'HAS', 'BEEN', 'APPOINTED', 'BY', 'THE', 'FATHER', 'AS', 'OUR', 'DIVINE', 'TEACHER'] +2830-3980-0053-1096: ref=['AT', 'THE', 'SAME', 'TIME', 'PAUL', 'CONFIRMS', 'OUR', 'CREED', 'THAT', 'CHRIST', 'IS', 'VERY', 'GOD'] +2830-3980-0053-1096: hyp=['AT', 'THE', 'SAME', 'TIME', 'PAUL', 'CONFIRMS', 'OUR', 'CREED', 'THAT', 'CHRIST', 'IS', 'VERY', 'GOD'] +2830-3980-0054-1097: ref=['THAT', 'CHRIST', 'IS', 'VERY', 'GOD', 'IS', 'APPARENT', 'IN', 'THAT', 'PAUL', 'ASCRIBES', 'TO', 'HIM', 'DIVINE', 'POWERS', 'EQUALLY', 'WITH', 'THE', 'FATHER', 'AS', 'FOR', 'INSTANCE', 'THE', 'POWER', 'TO', 'DISPENSE', 'GRACE', 'AND', 'PEACE'] +2830-3980-0054-1097: hyp=['THAT', 'CHRIST', 'IS', 'VERY', 'GOD', 'IS', 'APPARENT', 'IN', 'THAT', 'PAUL', 'ASCRIBES', 'TO', 'HIM', 'DIVINE', 'POWERS', 'EQUALLY', 'WITH', 'THE', 'FATHER', 'AS', 'FOR', 'INSTANCE', 'THE', 'POWER', 'DOES', 'SPENCE', 'GRACE', 'AND', 'PEACE'] +2830-3980-0055-1098: ref=['TO', 'BESTOW', 'PEACE', 'AND', 'GRACE', 'LIES', 'IN', 'THE', 'PROVINCE', 'OF', 'GOD', 'WHO', 'ALONE', 'CAN', 'CREATE', 'THESE', 'BLESSINGS', 'THE', 'ANGELS', 'CANNOT'] +2830-3980-0055-1098: hyp=['TO', 'BESTOW', 'PEACE', 'AND', 'GRACE', 'LIES', 'IN', 'THE', 'PROVINCE', 'OF', 'GOD', 'WHO', 'ALONE', 'CAN', 'CREATE', 'THESE', 'BLESSINGS', 'THE', 'ANGELS', 'CANNOT'] +2830-3980-0056-1099: ref=['OTHERWISE', 'PAUL', 'SHOULD', 'HAVE', 'WRITTEN', 'GRACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'PEACE', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0056-1099: hyp=['OTHERWISE', 'PAUL', 'SHOULD', 'HAVE', 'WRITTEN', 'GRACE', 'FROM', 'GOD', 'THE', 'FATHER', 'IN', 'PEACE', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0057-1100: ref=['THE', 'ARIANS', 'TOOK', 'CHRIST', 'FOR', 'A', 'NOBLE', 'AND', 'PERFECT', 'CREATURE', 'SUPERIOR', 'EVEN', 'TO', 'THE', 'ANGELS', 'BECAUSE', 'BY', 'HIM', 'GOD', 'CREATED', 'HEAVEN', 'AND', 'EARTH'] +2830-3980-0057-1100: hyp=['THE', 'ARIANS', 'TOOK', 'CHRIST', 'FOR', 'A', 'NOBLE', 'AND', 'PERFECT', 'CREATURE', 'SUPERIOR', 'EVEN', 'TO', 'THE', 'ANGELS', 'BECAUSE', 'BY', 'HIM', 'GOD', 'CREATED', 'HEAVEN', 'AND', 'EARTH'] +2830-3980-0058-1101: ref=['MOHAMMED', 'ALSO', 'SPEAKS', 'HIGHLY', 'OF', 'CHRIST'] +2830-3980-0058-1101: hyp=['MOHAMMED', 'ALSO', 'SPEAKS', 'HIGHLY', 'OF', 'CHRIST'] +2830-3980-0059-1102: ref=['PAUL', 'STICKS', 'TO', 'HIS', 'THEME'] +2830-3980-0059-1102: hyp=['PAUL', 'STICKS', 'TO', 'HIS', 'THEME'] +2830-3980-0060-1103: ref=['HE', 'NEVER', 'LOSES', 'SIGHT', 'OF', 'THE', 'PURPOSE', 'OF', 'HIS', 'EPISTLE'] +2830-3980-0060-1103: hyp=['HE', 'NEVER', 'LOSES', 'SIGHT', 'OF', 'THE', 'PURPOSE', 'OF', 'HIS', 'EPISTLE'] +2830-3980-0061-1104: ref=['NOT', 'GOLD', 'OR', 'SILVER', 'OR', 'PASCHAL', 'LAMBS', 'OR', 'AN', 'ANGEL', 'BUT', 'HIMSELF', 'WHAT', 'FOR'] +2830-3980-0061-1104: hyp=['NOT', 'GOLD', 'OR', 'SILVER', 'OR', 'PASSIONAL', 'LAMBS', 'OR', 'AN', 'ANGEL', 'BUT', 'HIMSELF', 'WHAT', 'FOR'] +2830-3980-0062-1105: ref=['NOT', 'FOR', 'A', 'CROWN', 'OR', 'A', 'KINGDOM', 'OR', 'OUR', 'GOODNESS', 'BUT', 'FOR', 'OUR', 'SINS'] +2830-3980-0062-1105: hyp=['NOT', 'FOR', 'A', 'CROWN', 'OR', 'A', 'KINGDOM', 'OR', 'A', 'GOODNESS', 'BEFORE', 'OUR', 'SINS'] +2830-3980-0063-1106: ref=['UNDERSCORE', 'THESE', 'WORDS', 'FOR', 'THEY', 'ARE', 'FULL', 'OF', 'COMFORT', 'FOR', 'SORE', 'CONSCIENCES'] +2830-3980-0063-1106: hyp=['UNDERSCORE', 'THESE', 'WORDS', 'FOR', 'THEY', 'ARE', 'FULL', 'OF', 'COMFORT', 'FOR', 'SORE', 'CONSCIENCES'] +2830-3980-0064-1107: ref=['HOW', 'MAY', 'WE', 'OBTAIN', 'REMISSION', 'OF', 'OUR', 'SINS'] +2830-3980-0064-1107: hyp=['HOW', 'MAY', 'WE', 'OBTAIN', 'REMISSION', 'OF', 'OUR', 'SINS'] +2830-3980-0065-1108: ref=['PAUL', 'ANSWERS', 'THE', 'MAN', 'WHO', 'IS', 'NAMED', 'JESUS', 'CHRIST', 'AND', 'THE', 'SON', 'OF', 'GOD', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0065-1108: hyp=['PAUL', 'ANSWERS', 'THE', 'MAN', 'WHO', 'IS', 'NAMED', 'JESUS', 'CHRIST', 'AND', 'THE', 'SON', 'OF', 'GOD', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0066-1109: ref=['SINCE', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS', 'IT', 'STANDS', 'TO', 'REASON', 'THAT', 'THEY', 'CANNOT', 'BE', 'PUT', 'AWAY', 'BY', 'OUR', 'OWN', 'EFFORTS'] +2830-3980-0066-1109: hyp=['SINCE', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS', 'IT', 'STANDS', 'TO', 'REASON', 'THAT', 'THEY', 'CANNOT', 'BE', 'PUT', 'AWAY', 'BY', 'OUR', 'OWN', 'EFFORTS'] +2830-3980-0067-1110: ref=['THIS', 'SENTENCE', 'ALSO', 'DEFINES', 'OUR', 'SINS', 'AS', 'GREAT', 'SO', 'GREAT', 'IN', 'FACT', 'THAT', 'THE', 'WHOLE', 'WORLD', 'COULD', 'NOT', 'MAKE', 'AMENDS', 'FOR', 'A', 'SINGLE', 'SIN'] +2830-3980-0067-1110: hyp=['THIS', 'SENTENCE', 'ALSO', 'DEFINES', 'OUR', 'SINS', 'AS', 'GREAT', 'SO', 'GREAT', 'IN', 'FACT', 'THAT', 'THE', 'WHOLE', 'WORLD', 'COULD', 'NOT', 'MAKE', 'AMENDS', 'FOR', 'A', 'SINGLE', 'SIN'] +2830-3980-0068-1111: ref=['THE', 'GREATNESS', 'OF', 'THE', 'RANSOM', 'CHRIST', 'THE', 'SON', 'OF', 'GOD', 'INDICATES', 'THIS'] +2830-3980-0068-1111: hyp=['THE', 'GREATNESS', 'OF', 'THE', 'RANSOM', 'CHRIST', 'THE', 'SON', 'OF', 'GOD', 'INDICATES', 'THIS'] +2830-3980-0069-1112: ref=['THE', 'VICIOUS', 'CHARACTER', 'OF', 'SIN', 'IS', 'BROUGHT', 'OUT', 'BY', 'THE', 'WORDS', 'WHO', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0069-1112: hyp=['THE', 'VICIOUS', 'CHARACTER', 'OF', 'SIN', 'IS', 'BROUGHT', 'OUT', 'BY', 'THE', 'WORDS', 'WHO', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0070-1113: ref=['BUT', 'WE', 'ARE', 'CARELESS', 'WE', 'MAKE', 'LIGHT', 'OF', 'SIN'] +2830-3980-0070-1113: hyp=['BUT', 'WE', 'ARE', 'CARELESS', 'WE', 'MAKE', 'LIGHT', 'OF', 'SIN'] +2830-3980-0071-1114: ref=['WE', 'THINK', 'THAT', 'BY', 'SOME', 'LITTLE', 'WORK', 'OR', 'MERIT', 'WE', 'CAN', 'DISMISS', 'SIN'] +2830-3980-0071-1114: hyp=['WE', 'THINK', 'THAT', 'BY', 'SOME', 'LITTLE', 'WORK', 'OR', 'MERIT', 'WE', 'CAN', 'DISMISS', 'IN'] +2830-3980-0072-1115: ref=['THIS', 'PASSAGE', 'THEN', 'BEARS', 'OUT', 'THE', 'FACT', 'THAT', 'ALL', 'MEN', 'ARE', 'SOLD', 'UNDER', 'SIN'] +2830-3980-0072-1115: hyp=['THIS', 'PASSAGE', 'THEN', 'BEARS', 'OUT', 'THE', 'FACT', 'THAT', 'ALL', 'MEN', 'ARE', 'SOLD', 'UNDER', 'SIN'] +2830-3980-0073-1116: ref=['THIS', 'ATTITUDE', 'SPRINGS', 'FROM', 'A', 'FALSE', 'CONCEPTION', 'OF', 'SIN', 'THE', 'CONCEPTION', 'THAT', 'SIN', 'IS', 'A', 'SMALL', 'MATTER', 'EASILY', 'TAKEN', 'CARE', 'OF', 'BY', 'GOOD', 'WORKS', 'THAT', 'WE', 'MUST', 'PRESENT', 'OURSELVES', 'UNTO', 'GOD', 'WITH', 'A', 'GOOD', 'CONSCIENCE', 'THAT', 'WE', 'MUST', 'FEEL', 'NO', 'SIN', 'BEFORE', 'WE', 'MAY', 'FEEL', 'THAT', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS'] +2830-3980-0073-1116: hyp=['THIS', 'ATTITUDE', 'SPRINGS', 'FROM', 'A', 'FALSE', 'CONCEPTION', 'OF', 'SIN', 'THE', 'CONCEPTION', 'THAT', 'SIN', 'IS', 'A', 'SMALL', 'MATTER', 'EASILY', 'TAKEN', 'CARE', 'OF', 'BY', 'GOOD', 'WORKS', 'THAT', 'WE', 'MUST', 'PRESENT', 'OURSELVES', 'INTO', 'GOD', 'WITH', 'A', 'GOOD', 'CONSCIENCE', 'THAT', 'WE', 'MUST', 'FEEL', 'NO', 'SIN', 'BEFORE', 'WE', 'MAY', 'FEEL', 'THAT', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS'] +2830-3980-0074-1117: ref=['THIS', 'ATTITUDE', 'IS', 'UNIVERSAL', 'AND', 'PARTICULARLY', 'DEVELOPED', 'IN', 'THOSE', 'WHO', 'CONSIDER', 'THEMSELVES', 'BETTER', 'THAN', 'OTHERS'] +2830-3980-0074-1117: hyp=['THIS', 'ATTITUDE', 'IS', 'UNIVERSAL', 'AND', 'PARTICULARLY', 'DEVELOPED', 'IN', 'THOSE', 'WHO', 'CONSIDER', 'THEMSELVES', 'BETTER', 'THAN', 'OTHERS'] +2830-3980-0075-1118: ref=['BUT', 'THE', 'REAL', 'SIGNIFICANCE', 'AND', 'COMFORT', 'OF', 'THE', 'WORDS', 'FOR', 'OUR', 'SINS', 'IS', 'LOST', 'UPON', 'THEM'] +2830-3980-0075-1118: hyp=['BUT', 'THE', 'REAL', 'SIGNIFICANCE', 'AND', 'COMFORT', 'OF', 'THE', 'WORDS', 'FOR', 'OUR', 'SINS', 'IS', 'LOST', 'UPON', 'THEM'] +2830-3980-0076-1119: ref=['ON', 'THE', 'OTHER', 'HAND', 'WE', 'ARE', 'NOT', 'TO', 'REGARD', 'THEM', 'AS', 'SO', 'TERRIBLE', 'THAT', 'WE', 'MUST', 'DESPAIR'] +2830-3980-0076-1119: hyp=['ON', 'THE', 'OTHER', 'HAND', 'WE', 'ARE', 'NOT', 'TO', 'REGARD', 'THEM', 'AS', 'SO', 'TERRIBLE', 'THAT', 'WE', 'MUST', 'DESPAIR'] +2961-960-0000-497: ref=['HE', 'PASSES', 'ABRUPTLY', 'FROM', 'PERSONS', 'TO', 'IDEAS', 'AND', 'NUMBERS', 'AND', 'FROM', 'IDEAS', 'AND', 'NUMBERS', 'TO', 'PERSONS', 'FROM', 'THE', 'HEAVENS', 'TO', 'MAN', 'FROM', 'ASTRONOMY', 'TO', 'PHYSIOLOGY', 'HE', 'CONFUSES', 'OR', 'RATHER', 'DOES', 'NOT', 'DISTINGUISH', 'SUBJECT', 'AND', 'OBJECT', 'FIRST', 'AND', 'FINAL', 'CAUSES', 'AND', 'IS', 'DREAMING', 'OF', 'GEOMETRICAL', 'FIGURES', 'LOST', 'IN', 'A', 'FLUX', 'OF', 'SENSE'] +2961-960-0000-497: hyp=['HE', 'PASSES', 'ABRUPTLY', 'FROM', 'PERSONS', 'TO', 'IDEAS', 'AND', 'NUMBERS', 'AND', 'FROM', 'IDEAS', 'AND', 'NUMBERS', 'TO', 'PERSONS', 'FROM', 'THE', 'HEAVENS', 'TO', 'MEN', 'FROM', 'ASTRONOMY', 'TO', 'PHYSIOLOGY', 'HE', 'CONFUSES', 'OR', 'RATHER', 'DOES', 'NOT', 'DISTINGUISH', 'SUBJECT', 'AND', 'OBJECT', 'FIRST', 'AND', 'FINAL', 'CAUSES', 'AND', 'IS', 'DREAMING', 'OF', 'GEOMETRICAL', 'FIGURES', 'LOST', 'IN', 'A', 'FLUX', 'OF', 'SENSE'] +2961-960-0001-498: ref=['THE', 'INFLUENCE', 'WITH', 'THE', 'TIMAEUS', 'HAS', 'EXERCISED', 'UPON', 'POSTERITY', 'IS', 'DUE', 'PARTLY', 'TO', 'A', 'MISUNDERSTANDING'] +2961-960-0001-498: hyp=['THE', 'INFLUENCE', 'WHICH', 'THE', 'TIMAS', 'HAS', 'EXERCISED', 'UPON', 'POSTERITY', 'IS', 'DUE', 'PARTLY', 'TO', 'A', 'MISUNDERSTANDING'] +2961-960-0002-499: ref=['IN', 'THE', 'SUPPOSED', 'DEPTHS', 'OF', 'THIS', 'DIALOGUE', 'THE', 'NEO', 'PLATONISTS', 'FOUND', 'HIDDEN', 'MEANINGS', 'AND', 'CONNECTIONS', 'WITH', 'THE', 'JEWISH', 'AND', 'CHRISTIAN', 'SCRIPTURES', 'AND', 'OUT', 'OF', 'THEM', 'THEY', 'ELICITED', 'DOCTRINES', 'QUITE', 'AT', 'VARIANCE', 'WITH', 'THE', 'SPIRIT', 'OF', 'PLATO'] +2961-960-0002-499: hyp=['IN', 'THE', 'SUPPOSED', 'DEPTHS', 'OF', 'THIS', 'DIALOGUE', 'THE', 'NEO', 'PLATINISTS', 'FOUND', 'HIDDEN', 'MEANINGS', 'IN', 'CONNECTIONS', 'WITH', 'THE', 'JEWISH', 'AND', 'CHRISTIAN', 'SCRIPTURES', 'AND', 'OUT', 'OF', 'THEM', 'THEY', 'ELICITED', 'DOCTRINES', 'QUITE', 'AT', 'VARIANCE', 'WITH', 'THE', 'SPIRIT', 'OF', 'PLATO'] +2961-960-0003-500: ref=['THEY', 'WERE', 'ABSORBED', 'IN', 'HIS', 'THEOLOGY', 'AND', 'WERE', 'UNDER', 'THE', 'DOMINION', 'OF', 'HIS', 'NAME', 'WHILE', 'THAT', 'WHICH', 'WAS', 'TRULY', 'GREAT', 'AND', 'TRULY', 'CHARACTERISTIC', 'IN', 'HIM', 'HIS', 'EFFORT', 'TO', 'REALIZE', 'AND', 'CONNECT', 'ABSTRACTIONS', 'WAS', 'NOT', 'UNDERSTOOD', 'BY', 'THEM', 'AT', 'ALL'] +2961-960-0003-500: hyp=['THEY', 'WERE', 'ABSORBED', 'IN', 'HIS', 'THEOLOGY', 'AND', 'WERE', 'UNDER', 'THE', 'DOMINION', 'OF', 'HIS', 'NAME', 'WHILE', 'THAT', 'WHICH', 'WAS', 'TRULY', 'GREAT', 'AND', 'TRULY', 'CORRECTURISTIC', 'IN', 'HIM', 'HIS', 'EFFORT', 'TO', 'REALIZE', 'AND', 'CONNECT', 'ABSTRACTIONS', 'WAS', 'NOT', 'UNDERSTOOD', 'BY', 'THEM', 'AT', 'ALL'] +2961-960-0004-501: ref=['THERE', 'IS', 'NO', 'DANGER', 'OF', 'THE', 'MODERN', 'COMMENTATORS', 'ON', 'THE', 'TIMAEUS', 'FALLING', 'INTO', 'THE', 'ABSURDITIES', 'OF', 'THE', 'NEO', 'PLATONISTS'] +2961-960-0004-501: hyp=['THERE', 'IS', 'NO', 'DANGER', 'OF', 'THE', 'MODERN', 'COMMENTATORS', 'ON', 'THE', 'TIMEUS', 'FALLING', 'INTO', 'THE', 'ABSURDITIES', 'OF', 'THE', 'NEOPLATANISTS'] +2961-960-0005-502: ref=['IN', 'THE', 'PRESENT', 'DAY', 'WE', 'ARE', 'WELL', 'AWARE', 'THAT', 'AN', 'ANCIENT', 'PHILOSOPHER', 'IS', 'TO', 'BE', 'INTERPRETED', 'FROM', 'HIMSELF', 'AND', 'BY', 'THE', 'CONTEMPORARY', 'HISTORY', 'OF', 'THOUGHT'] +2961-960-0005-502: hyp=['IN', 'THE', 'PRESENT', 'DAY', 'WE', 'ARE', 'WELL', 'AWARE', 'THAT', 'AN', 'ANCIENT', 'PHILOSOPHER', 'IS', 'TO', 'BE', 'INTERPRETED', 'FROM', 'HIMSELF', 'AND', 'BY', 'THE', 'CONTEMPORARY', 'HISTORY', 'OF', 'THOUGHT'] +2961-960-0006-503: ref=['THE', 'FANCIES', 'OF', 'THE', 'NEO', 'PLATONISTS', 'ARE', 'ONLY', 'INTERESTING', 'TO', 'US', 'BECAUSE', 'THEY', 'EXHIBIT', 'A', 'PHASE', 'OF', 'THE', 'HUMAN', 'MIND', 'WHICH', 'PREVAILED', 'WIDELY', 'IN', 'THE', 'FIRST', 'CENTURIES', 'OF', 'THE', 'CHRISTIAN', 'ERA', 'AND', 'IS', 'NOT', 'WHOLLY', 'EXTINCT', 'IN', 'OUR', 'OWN', 'DAY'] +2961-960-0006-503: hyp=['THE', 'FANCIES', 'OF', 'THE', 'NEW', 'PLATINISTS', 'ARE', 'ONLY', 'INTERESTING', 'TO', 'US', 'BECAUSE', 'THEY', 'EXHIBIT', 'A', 'PHASE', 'OF', 'THE', 'HUMAN', 'MIND', 'WHICH', 'PREVAIL', 'WIDELY', 'IN', 'THE', 'FIRST', 'CENTURIES', 'OF', 'THE', 'CHRISTIAN', 'ERA', 'AND', 'IS', 'NOT', 'WHOLLY', 'EXTINCT', 'IN', 'OUR', 'OWN', 'DAY'] +2961-960-0007-504: ref=['BUT', 'THEY', 'HAVE', 'NOTHING', 'TO', 'DO', 'WITH', 'THE', 'INTERPRETATION', 'OF', 'PLATO', 'AND', 'IN', 'SPIRIT', 'THEY', 'ARE', 'OPPOSED', 'TO', 'HIM'] +2961-960-0007-504: hyp=['BUT', 'THEY', 'HAVE', 'NOTHING', 'TO', 'DO', 'WITH', 'THE', 'INTERPRETATION', 'OF', 'PLATO', 'AND', 'IN', 'SPIRIT', 'THEY', 'ARE', 'OPPOSED', 'TO', 'HIM'] +2961-960-0008-505: ref=['WE', 'DO', 'NOT', 'KNOW', 'HOW', 'PLATO', 'WOULD', 'HAVE', 'ARRANGED', 'HIS', 'OWN', 'DIALOGUES', 'OR', 'WHETHER', 'THE', 'THOUGHT', 'OF', 'ARRANGING', 'ANY', 'OF', 'THEM', 'BESIDES', 'THE', 'TWO', 'TRILOGIES', 'WHICH', 'HE', 'HAS', 'EXPRESSLY', 'CONNECTED', 'WAS', 'EVER', 'PRESENT', 'TO', 'HIS', 'MIND'] +2961-960-0008-505: hyp=['WE', 'DO', 'NOT', 'KNOW', 'HOW', 'PLATO', 'WOULD', 'HAVE', 'ARRANGED', 'HIS', 'OWN', 'DIALECTS', 'OR', 'WHETHER', 'THE', 'THOUGHT', 'OF', 'ARRANGING', 'ANY', 'OF', 'THEM', 'BESIDES', 'THE', 'TUTRILOGIES', 'WHICH', 'HE', 'HAS', 'EXPRESSLY', 'CONNECTED', 'WAS', 'EVER', 'PRESENT', 'TO', 'HIS', 'MIND'] +2961-960-0009-506: ref=['THE', 'DIALOGUE', 'IS', 'PRIMARILY', 'CONCERNED', 'WITH', 'THE', 'ANIMAL', 'CREATION', 'INCLUDING', 'UNDER', 'THIS', 'TERM', 'THE', 'HEAVENLY', 'BODIES', 'AND', 'WITH', 'MAN', 'ONLY', 'AS', 'ONE', 'AMONG', 'THE', 'ANIMALS'] +2961-960-0009-506: hyp=['THE', 'DIALOGUE', 'IS', 'PRIMARILY', 'CONCERNED', 'WITH', 'THE', 'ANIMAL', 'CREATION', 'INCLUDING', 'UNDER', 'THIS', 'TERM', 'THE', 'HEAVENLY', 'BODIES', 'AND', 'WITH', 'MAN', 'ONLY', 'AS', 'ONE', 'AMONG', 'THE', 'ANIMALS'] +2961-960-0010-507: ref=['BUT', 'HE', 'HAS', 'NOT', 'AS', 'YET', 'DEFINED', 'THIS', 'INTERMEDIATE', 'TERRITORY', 'WHICH', 'LIES', 'SOMEWHERE', 'BETWEEN', 'MEDICINE', 'AND', 'MATHEMATICS', 'AND', 'HE', 'WOULD', 'HAVE', 'FELT', 'THAT', 'THERE', 'WAS', 'AS', 'GREAT', 'AN', 'IMPIETY', 'IN', 'RANKING', 'THEORIES', 'OF', 'PHYSICS', 'FIRST', 'IN', 'THE', 'ORDER', 'OF', 'KNOWLEDGE', 'AS', 'IN', 'PLACING', 'THE', 'BODY', 'BEFORE', 'THE', 'SOUL'] +2961-960-0010-507: hyp=['BUT', 'HE', 'HAS', 'NOT', 'AS', 'YET', 'THE', 'FIND', 'THIS', 'INTERMEDIATE', 'TERRITORY', 'WHICH', 'LIES', 'SOMEWHERE', 'BETWEEN', 'MEDICINE', 'AND', 'MATHEMATICS', 'AND', 'HE', 'WOULD', 'HAVE', 'FELT', 'THAT', 'THERE', 'WAS', 'AS', 'GREAT', 'AN', 'IMPIETY', 'IN', 'RANKING', 'THEORIES', 'OF', 'PHYSICS', 'FIRST', 'IN', 'THE', 'ORDER', 'OF', 'KNOWLEDGE', 'AS', 'IN', 'PLACING', 'THE', 'BODY', 'BEFORE', 'THE', 'SOUL'] +2961-960-0011-508: ref=['WITH', 'HERACLEITUS', 'HE', 'ACKNOWLEDGES', 'THE', 'PERPETUAL', 'FLUX', 'LIKE', 'ANAXAGORAS', 'HE', 'ASSERTS', 'THE', 'PREDOMINANCE', 'OF', 'MIND', 'ALTHOUGH', 'ADMITTING', 'AN', 'ELEMENT', 'OF', 'NECESSITY', 'WHICH', 'REASON', 'IS', 'INCAPABLE', 'OF', 'SUBDUING', 'LIKE', 'THE', 'PYTHAGOREANS', 'HE', 'SUPPOSES', 'THE', 'MYSTERY', 'OF', 'THE', 'WORLD', 'TO', 'BE', 'CONTAINED', 'IN', 'NUMBER'] +2961-960-0011-508: hyp=['WITH', 'HERACLITUS', 'HE', 'ACKNOWLEDGES', 'THE', 'PERPETUAL', 'FLUX', 'LIKE', 'AN', 'EXAGGARIST', 'HE', 'ASSERTS', 'THE', 'PREDOMINANCE', 'OF', 'MIND', 'ALTHOUGH', 'ADMITTING', 'AN', 'ELEMENT', 'OF', 'NECESSITY', 'WHICH', 'REASON', 'IS', 'INCAPABLE', 'OF', 'SUBDUING', 'LIKE', 'THE', 'PITHAGORIANS', 'HE', 'SUPPOSES', 'THE', 'MYSTERY', 'OF', 'THE', 'WORLD', 'TO', 'BE', 'CONTAINED', 'IN', 'NUMBER'] +2961-960-0012-509: ref=['MANY', 'IF', 'NOT', 'ALL', 'THE', 'ELEMENTS', 'OF', 'THE', 'PRE', 'SOCRATIC', 'PHILOSOPHY', 'ARE', 'INCLUDED', 'IN', 'THE', 'TIMAEUS'] +2961-960-0012-509: hyp=['MANY', 'IF', 'NOT', 'ALL', 'THE', 'ELEMENTS', 'OF', 'THE', 'PRESOCRATIC', 'PHILOSOPHY', 'ARE', 'INCLUDED', 'IN', 'THE', 'TIMAS'] +2961-960-0013-510: ref=['IT', 'IS', 'PROBABLE', 'THAT', 'THE', 'RELATION', 'OF', 'THE', 'IDEAS', 'TO', 'GOD', 'OR', 'OF', 'GOD', 'TO', 'THE', 'WORLD', 'WAS', 'DIFFERENTLY', 'CONCEIVED', 'BY', 'HIM', 'AT', 'DIFFERENT', 'TIMES', 'OF', 'HIS', 'LIFE'] +2961-960-0013-510: hyp=['IT', 'IS', 'PROBABLE', 'THAT', 'THE', 'RELATION', 'OF', 'THE', 'IDEAS', 'TO', 'GOD', 'OR', 'OF', 'GOD', 'TO', 'THE', 'WORLD', 'WAS', 'DIFFERENTLY', 'CONCEIVED', 'BY', 'HIM', 'AT', 'DIFFERENT', 'TIMES', 'OF', 'HIS', 'LIFE'] +2961-960-0014-511: ref=['THE', 'IDEAS', 'ALSO', 'REMAIN', 'BUT', 'THEY', 'HAVE', 'BECOME', 'TYPES', 'IN', 'NATURE', 'FORMS', 'OF', 'MEN', 'ANIMALS', 'BIRDS', 'FISHES'] +2961-960-0014-511: hyp=['THE', 'IDEAS', 'ALSO', 'REMAIN', 'BUT', 'THEY', 'HAVE', 'BECOME', 'TYPES', 'IN', 'NATURE', 'FORMS', 'OF', 'MEN', 'ANIMALS', 'BIRDS', 'FISHES'] +2961-960-0015-512: ref=['THE', 'STYLE', 'AND', 'PLAN', 'OF', 'THE', 'TIMAEUS', 'DIFFER', 'GREATLY', 'FROM', 'THAT', 'OF', 'ANY', 'OTHER', 'OF', 'THE', 'PLATONIC', 'DIALOGUES'] +2961-960-0015-512: hyp=['THE', 'STYLE', 'AND', 'PLAN', 'OF', 'THE', 'TIMIRS', 'DIFFER', 'GREATLY', 'FROM', 'THAT', 'OF', 'ANY', 'OTHER', 'OF', 'THE', 'PLATONIC', 'DIALOGUES'] +2961-960-0016-513: ref=['BUT', 'PLATO', 'HAS', 'NOT', 'THE', 'SAME', 'MASTERY', 'OVER', 'HIS', 'INSTRUMENT', 'WHICH', 'HE', 'EXHIBITS', 'IN', 'THE', 'PHAEDRUS', 'OR', 'SYMPOSIUM'] +2961-960-0016-513: hyp=['BUT', 'PLATO', 'HAS', 'NOT', 'THE', 'SAME', 'MYSTERY', 'OVER', 'HIS', 'INSTRUMENT', 'WHICH', 'HE', 'EXHIBITS', 'IN', 'THE', 'FEGERIS', 'OR', 'SUPPOSIUM'] +2961-960-0017-514: ref=['NOTHING', 'CAN', 'EXCEED', 'THE', 'BEAUTY', 'OR', 'ART', 'OF', 'THE', 'INTRODUCTION', 'IN', 'WHICH', 'HE', 'IS', 'USING', 'WORDS', 'AFTER', 'HIS', 'ACCUSTOMED', 'MANNER'] +2961-960-0017-514: hyp=['NOTHING', 'CAN', 'EXCEED', 'THE', 'BEAUTY', 'OR', 'ART', 'OF', 'INTRODUCTION', 'IN', 'WHICH', 'HIS', 'USING', 'WORDS', 'AFTER', 'HIS', 'ACCUSTOMED', 'MANNER'] +2961-960-0018-515: ref=['BUT', 'IN', 'THE', 'REST', 'OF', 'THE', 'WORK', 'THE', 'POWER', 'OF', 'LANGUAGE', 'SEEMS', 'TO', 'FAIL', 'HIM', 'AND', 'THE', 'DRAMATIC', 'FORM', 'IS', 'WHOLLY', 'GIVEN', 'UP'] +2961-960-0018-515: hyp=['BUT', 'IN', 'THE', 'REST', 'OF', 'THE', 'WORK', 'THE', 'POWER', 'OF', 'LANGUAGE', 'SEEMS', 'TO', 'FAIL', 'HIM', 'AND', 'THE', 'DRAMATIC', 'FORM', 'IS', 'WHOLLY', 'GIVEN', 'UP'] +2961-960-0019-516: ref=['HE', 'COULD', 'WRITE', 'IN', 'ONE', 'STYLE', 'BUT', 'NOT', 'IN', 'ANOTHER', 'AND', 'THE', 'GREEK', 'LANGUAGE', 'HAD', 'NOT', 'AS', 'YET', 'BEEN', 'FASHIONED', 'BY', 'ANY', 'POET', 'OR', 'PHILOSOPHER', 'TO', 'DESCRIBE', 'PHYSICAL', 'PHENOMENA'] +2961-960-0019-516: hyp=['HE', 'COULD', 'WRITE', 'IN', 'ONE', 'STYLE', 'BUT', 'NOT', 'IN', 'ANOTHER', 'AND', 'THE', 'GREEK', 'LANGUAGE', 'HAD', 'NOT', 'AS', 'YET', 'BEEN', 'FASHIONED', 'BY', 'ANY', 'POET', 'OR', 'PHILOSOPHER', 'TO', 'DESCRIBE', 'PHYSICAL', 'PHENOMENA'] +2961-960-0020-517: ref=['AND', 'HENCE', 'WE', 'FIND', 'THE', 'SAME', 'SORT', 'OF', 'CLUMSINESS', 'IN', 'THE', 'TIMAEUS', 'OF', 'PLATO', 'WHICH', 'CHARACTERIZES', 'THE', 'PHILOSOPHICAL', 'POEM', 'OF', 'LUCRETIUS'] +2961-960-0020-517: hyp=['AND', 'HENCE', 'WE', 'FIND', 'THE', 'SAME', 'SORT', 'OF', 'CLUMSINESS', 'IN', 'THE', 'TIMAS', 'OF', 'PLATO', 'WHICH', 'CHARACTERIZES', 'THE', 'PHILOSOPHICAL', 'POEM', 'OF', 'LUCRETIUS'] +2961-960-0021-518: ref=['THERE', 'IS', 'A', 'WANT', 'OF', 'FLOW', 'AND', 'OFTEN', 'A', 'DEFECT', 'OF', 'RHYTHM', 'THE', 'MEANING', 'IS', 'SOMETIMES', 'OBSCURE', 'AND', 'THERE', 'IS', 'A', 'GREATER', 'USE', 'OF', 'APPOSITION', 'AND', 'MORE', 'OF', 'REPETITION', 'THAN', 'OCCURS', 'IN', "PLATO'S", 'EARLIER', 'WRITINGS'] +2961-960-0021-518: hyp=['THERE', 'IS', 'A', 'WANT', 'OF', 'FLOW', 'AND', 'OFTEN', 'A', 'DEFECT', 'OF', 'RHYTHM', 'THE', 'MEANING', 'IS', 'SOMETIMES', 'OBSCURE', 'AND', 'THERE', 'IS', 'A', 'GREATER', 'USE', 'OF', 'APPOSITION', 'IN', 'MORE', 'OF', 'REPETITION', 'THAN', 'OCCURS', 'IN', "PLATO'S", 'EARLIER', 'WRITINGS'] +2961-960-0022-519: ref=['PLATO', 'HAD', 'NOT', 'THE', 'COMMAND', 'OF', 'HIS', 'MATERIALS', 'WHICH', 'WOULD', 'HAVE', 'ENABLED', 'HIM', 'TO', 'PRODUCE', 'A', 'PERFECT', 'WORK', 'OF', 'ART'] +2961-960-0022-519: hyp=['PLATO', 'HAD', 'NOT', 'THE', 'COMMAND', 'OF', 'HIS', 'MATERIALS', 'WHICH', 'WOULD', 'HAVE', 'ENABLED', 'HIM', 'TO', 'PRODUCE', 'A', 'PERFECT', 'WORK', 'OF', 'ART'] +2961-961-0000-520: ref=['SOCRATES', 'BEGINS', 'THE', 'TIMAEUS', 'WITH', 'A', 'SUMMARY', 'OF', 'THE', 'REPUBLIC'] +2961-961-0000-520: hyp=['SOCRATES', 'BEGINS', 'TO', 'TIMAS', 'WITH', 'A', 'SUMMARY', 'OF', 'THE', 'REPUBLIC'] +2961-961-0001-521: ref=['AND', 'NOW', 'HE', 'DESIRES', 'TO', 'SEE', 'THE', 'IDEAL', 'STATE', 'SET', 'IN', 'MOTION', 'HE', 'WOULD', 'LIKE', 'TO', 'KNOW', 'HOW', 'SHE', 'BEHAVED', 'IN', 'SOME', 'GREAT', 'STRUGGLE'] +2961-961-0001-521: hyp=['AND', 'NOW', 'HE', 'DESIRES', 'TO', 'SEE', 'THE', 'IDEAL', 'STATE', 'SET', 'IN', 'MOTION', 'HE', 'WOULD', 'LIKE', 'TO', 'KNOW', 'HOW', 'SHE', 'BEHAVED', 'IN', 'SOME', 'GREAT', 'STRUGGLE'] +2961-961-0002-522: ref=['AND', 'THEREFORE', 'TO', 'YOU', 'I', 'TURN', 'TIMAEUS', 'CITIZEN', 'OF', 'LOCRIS', 'WHO', 'ARE', 'AT', 'ONCE', 'A', 'PHILOSOPHER', 'AND', 'A', 'STATESMAN', 'AND', 'TO', 'YOU', 'CRITIAS', 'WHOM', 'ALL', 'ATHENIANS', 'KNOW', 'TO', 'BE', 'SIMILARLY', 'ACCOMPLISHED', 'AND', 'TO', 'HERMOCRATES', 'WHO', 'IS', 'ALSO', 'FITTED', 'BY', 'NATURE', 'AND', 'EDUCATION', 'TO', 'SHARE', 'IN', 'OUR', 'DISCOURSE'] +2961-961-0002-522: hyp=['AND', 'THEREFORE', 'TO', 'YOU', 'I', 'TURN', 'TO', 'ME', 'AS', 'CITIZEN', 'OF', 'LOCRIS', 'WHO', 'ARE', 'AT', 'ONCE', 'A', 'PHILOSOPHER', 'IN', 'A', 'STATESMAN', 'AND', 'TO', 'YOU', 'CRITIUS', 'WHOM', 'ALL', 'ATHENIANS', 'KNOW', 'TO', 'BE', 'SIMILARLY', 'ACCOMPLISHED', 'AND', 'TO', 'HERMOCRATES', 'WHOSE', 'ALSO', 'FITTED', 'BY', 'NATURE', 'AND', 'EDUCATION', 'TO', 'SHARE', 'IN', 'OUR', 'DISCOURSE'] +2961-961-0003-523: ref=['I', 'WILL', 'IF', 'TIMAEUS', 'APPROVES', 'I', 'APPROVE'] +2961-961-0003-523: hyp=['I', 'WILL', 'IF', 'TO', 'ME', 'AS', 'APPROVES', 'I', 'APPROVE'] +2961-961-0004-524: ref=['LISTEN', 'THEN', 'SOCRATES', 'TO', 'A', 'TALE', 'OF', "SOLON'S", 'WHO', 'BEING', 'THE', 'FRIEND', 'OF', 'DROPIDAS', 'MY', 'GREAT', 'GRANDFATHER', 'TOLD', 'IT', 'TO', 'MY', 'GRANDFATHER', 'CRITIAS', 'AND', 'HE', 'TOLD', 'ME'] +2961-961-0004-524: hyp=['LISTEN', 'THEN', 'SOCRATES', 'TO', 'A', 'TALE', 'OF', 'SILENCE', 'WHO', 'BEING', 'THE', 'FRIEND', 'OF', 'DROPIDUS', 'BY', 'GREAT', 'GRANDFATHER', 'TOLD', 'IT', 'TO', 'MY', 'GRANDFATHER', 'CRITIUS', 'AND', 'HE', 'TOLD', 'ME'] +2961-961-0005-525: ref=['SOME', 'POEMS', 'OF', 'SOLON', 'WERE', 'RECITED', 'BY', 'THE', 'BOYS'] +2961-961-0005-525: hyp=['SOME', 'POEMS', 'OF', 'SOLID', 'WERE', 'RECITED', 'BY', 'THE', 'BOYS'] +2961-961-0006-526: ref=['AND', 'WHAT', 'WAS', 'THE', 'SUBJECT', 'OF', 'THE', 'POEM', 'SAID', 'THE', 'PERSON', 'WHO', 'MADE', 'THE', 'REMARK'] +2961-961-0006-526: hyp=['AND', 'WHAT', 'WAS', 'THE', 'SUBJECT', 'OF', 'THE', 'POEM', 'SAID', 'THE', 'PERSON', 'WHO', 'MADE', 'THE', 'REMARK'] +2961-961-0007-527: ref=['THE', 'SUBJECT', 'WAS', 'A', 'VERY', 'NOBLE', 'ONE', 'HE', 'DESCRIBED', 'THE', 'MOST', 'FAMOUS', 'ACTION', 'IN', 'WHICH', 'THE', 'ATHENIAN', 'PEOPLE', 'WERE', 'EVER', 'ENGAGED'] +2961-961-0007-527: hyp=['THE', 'SUBJECT', 'WAS', 'A', 'VERY', 'NOBLE', 'ONE', 'HE', 'DESCRIBED', 'THE', 'MOST', 'FAMOUS', 'ACTION', 'IN', 'WHICH', 'THE', 'ATHENIAN', 'PEOPLE', 'WERE', 'EVER', 'ENGAGED'] +2961-961-0008-528: ref=['BUT', 'THE', 'MEMORY', 'OF', 'THEIR', 'EXPLOITS', 'HAS', 'PASSED', 'AWAY', 'OWING', 'TO', 'THE', 'LAPSE', 'OF', 'TIME', 'AND', 'THE', 'EXTINCTION', 'OF', 'THE', 'ACTORS'] +2961-961-0008-528: hyp=['BUT', 'THE', 'MEMORY', 'OF', 'THEIR', 'EXPLOITS', 'HAD', 'PASSED', 'AWAY', 'OWING', 'TO', 'THE', 'LAPSE', 'OF', 'TIME', 'AND', 'THE', 'EXTINCTION', 'OF', 'THE', 'ACTORS'] +2961-961-0009-529: ref=['TELL', 'US', 'SAID', 'THE', 'OTHER', 'THE', 'WHOLE', 'STORY', 'AND', 'WHERE', 'SOLON', 'HEARD', 'THE', 'STORY'] +2961-961-0009-529: hyp=['TELL', 'US', 'SAID', 'THE', 'OTHER', 'THE', 'WHOLE', 'STORY', 'AND', 'WHERE', 'SOLON', 'HEARD', 'THIS', 'STORY'] +2961-961-0010-530: ref=['BUT', 'IN', 'EGYPT', 'THE', 'TRADITIONS', 'OF', 'OUR', 'OWN', 'AND', 'OTHER', 'LANDS', 'ARE', 'BY', 'US', 'REGISTERED', 'FOR', 'EVER', 'IN', 'OUR', 'TEMPLES'] +2961-961-0010-530: hyp=['BUT', 'IN', 'EGYPT', 'THE', 'TRADITIONS', 'OF', 'OUR', 'OWN', 'AND', 'OTHER', 'LANDS', 'ARE', 'BY', 'US', 'REGISTERED', 'FOREVER', 'IN', 'OUR', 'TEMPLES'] +2961-961-0011-531: ref=['THE', 'GENEALOGIES', 'WHICH', 'YOU', 'HAVE', 'RECITED', 'TO', 'US', 'OUT', 'OF', 'YOUR', 'OWN', 'ANNALS', 'SOLON', 'ARE', 'A', 'MERE', "CHILDREN'S", 'STORY'] +2961-961-0011-531: hyp=['THE', 'GENEALOGIES', 'WHICH', 'YOU', 'HAVE', 'RECITED', 'TO', 'US', 'OUT', 'OF', 'YOUR', 'OWN', 'ANNAL', 'SOLEMN', 'ARE', 'A', 'MERE', "CHILDREN'S", 'STORY'] +2961-961-0012-532: ref=['FOR', 'IN', 'THE', 'TIMES', 'BEFORE', 'THE', 'GREAT', 'FLOOD', 'ATHENS', 'WAS', 'THE', 'GREATEST', 'AND', 'BEST', 'OF', 'CITIES', 'AND', 'DID', 'THE', 'NOBLEST', 'DEEDS', 'AND', 'HAD', 'THE', 'BEST', 'CONSTITUTION', 'OF', 'ANY', 'UNDER', 'THE', 'FACE', 'OF', 'HEAVEN'] +2961-961-0012-532: hyp=['FOR', 'IN', 'THE', 'TIMES', 'BEFORE', 'THE', 'GREAT', 'FLOOD', 'ATHENS', 'WAS', 'THE', 'GREATEST', 'AND', 'BEST', 'OF', 'CITIES', 'AND', 'DEAD', 'THE', 'NOBLEST', 'DEEDS', 'AND', 'HAD', 'THE', 'BEST', 'CONSTITUTION', 'OF', 'ANY', 'UNDER', 'THE', 'FACE', 'OF', 'HEAVEN'] +2961-961-0013-533: ref=['SOLON', 'MARVELLED', 'AND', 'DESIRED', 'TO', 'BE', 'INFORMED', 'OF', 'THE', 'PARTICULARS'] +2961-961-0013-533: hyp=['SULLEN', 'MARVELLED', 'AND', 'DESIRED', 'TO', 'BE', 'INFORMED', 'OF', 'THE', 'PARTICULARS'] +2961-961-0014-534: ref=['NINE', 'THOUSAND', 'YEARS', 'HAVE', 'ELAPSED', 'SINCE', 'SHE', 'FOUNDED', 'YOURS', 'AND', 'EIGHT', 'THOUSAND', 'SINCE', 'SHE', 'FOUNDED', 'OURS', 'AS', 'OUR', 'ANNALS', 'RECORD'] +2961-961-0014-534: hyp=['NINE', 'THOUSAND', 'YEARS', 'HAVE', 'ELAPSED', 'SINCE', 'YOU', 'FOUND', 'IT', 'YOURS', 'AND', 'EIGHT', 'THOUSAND', 'SINCE', 'YOU', 'FOUND', 'IT', 'OURS', 'AS', 'OUR', 'ANNALS', 'RECORD'] +2961-961-0015-535: ref=['MANY', 'LAWS', 'EXIST', 'AMONG', 'US', 'WHICH', 'ARE', 'THE', 'COUNTERPART', 'OF', 'YOURS', 'AS', 'THEY', 'WERE', 'IN', 'THE', 'OLDEN', 'TIME'] +2961-961-0015-535: hyp=['MANY', 'LAWS', 'EXIST', 'AMONG', 'US', 'WHICH', 'ARE', 'THE', 'COUNTERPART', 'OF', 'YOURS', 'AS', 'THEY', 'WERE', 'IN', 'THE', 'OLDEN', 'TIME'] +2961-961-0016-536: ref=['I', 'WILL', 'BRIEFLY', 'DESCRIBE', 'THEM', 'TO', 'YOU', 'AND', 'YOU', 'SHALL', 'READ', 'THE', 'ACCOUNT', 'OF', 'THEM', 'AT', 'YOUR', 'LEISURE', 'IN', 'THE', 'SACRED', 'REGISTERS'] +2961-961-0016-536: hyp=['I', 'WILL', 'BRIEFLY', 'DESCRIBE', 'THEM', 'TO', 'YOU', 'AND', 'YOU', 'SHALL', 'READ', 'THE', 'ACCOUNT', 'OF', 'THEM', 'AT', 'YOUR', 'LEISURE', 'IN', 'THE', 'SACRED', 'REGISTERS'] +2961-961-0017-537: ref=['OBSERVE', 'AGAIN', 'WHAT', 'CARE', 'THE', 'LAW', 'TOOK', 'IN', 'THE', 'PURSUIT', 'OF', 'WISDOM', 'SEARCHING', 'OUT', 'THE', 'DEEP', 'THINGS', 'OF', 'THE', 'WORLD', 'AND', 'APPLYING', 'THEM', 'TO', 'THE', 'USE', 'OF', 'MAN'] +2961-961-0017-537: hyp=['OBSERVE', 'AGAIN', 'WHAT', 'CARE', 'THE', 'LAW', 'TOOK', 'IN', 'THE', 'PURSUIT', 'OF', 'WISDOM', 'SEARCHING', 'OUT', 'THE', 'DEEP', 'THINGS', 'OF', 'THE', 'WORLD', 'AND', 'APPLYING', 'THEM', 'TO', 'THE', 'USE', 'OF', 'MEN'] +2961-961-0018-538: ref=['THE', 'MOST', 'FAMOUS', 'OF', 'THEM', 'ALL', 'WAS', 'THE', 'OVERTHROW', 'OF', 'THE', 'ISLAND', 'OF', 'ATLANTIS'] +2961-961-0018-538: hyp=['THE', 'MOST', 'FAME', 'AS', 'OF', 'THEM', 'ALL', 'WAS', 'THE', 'OVERTHROW', 'OF', 'THE', 'ISLAND', 'OF', 'ATLANTIS'] +2961-961-0019-539: ref=['FOR', 'AT', 'THE', 'PERIL', 'OF', 'HER', 'OWN', 'EXISTENCE', 'AND', 'WHEN', 'THE', 'OTHER', 'HELLENES', 'HAD', 'DESERTED', 'HER', 'SHE', 'REPELLED', 'THE', 'INVADER', 'AND', 'OF', 'HER', 'OWN', 'ACCORD', 'GAVE', 'LIBERTY', 'TO', 'ALL', 'THE', 'NATIONS', 'WITHIN', 'THE', 'PILLARS'] +2961-961-0019-539: hyp=['FOR', 'AT', 'THE', 'PERIL', 'OF', 'HER', 'OWN', 'EXISTENCE', 'AND', 'WHEN', 'THE', 'OTTER', 'HELLENES', 'HAD', 'DESERTED', 'HER', 'SHE', 'REPELLED', 'INVADER', 'AND', 'OF', 'HER', 'OWN', 'ACCORD', 'GAVE', 'LIBERTY', 'TO', 'ALL', 'THE', 'NATIONS', 'WITHIN', 'THE', 'PILLARS'] +2961-961-0020-540: ref=['THIS', 'IS', 'THE', 'EXPLANATION', 'OF', 'THE', 'SHALLOWS', 'WHICH', 'ARE', 'FOUND', 'IN', 'THAT', 'PART', 'OF', 'THE', 'ATLANTIC', 'OCEAN'] +2961-961-0020-540: hyp=['THIS', 'IS', 'THE', 'EXPLANATION', 'OF', 'THE', 'SHALLOWS', 'WHICH', 'ARE', 'FOUND', 'IN', 'THAT', 'PART', 'OF', 'THE', 'ATLANTIC', 'OCEAN'] +2961-961-0021-541: ref=['BUT', 'I', 'WOULD', 'NOT', 'SPEAK', 'AT', 'THE', 'TIME', 'BECAUSE', 'I', 'WANTED', 'TO', 'REFRESH', 'MY', 'MEMORY'] +2961-961-0021-541: hyp=['BUT', 'I', 'WOULD', 'NOT', 'SPEAK', 'AT', 'THE', 'TIME', 'BECAUSE', 'I', 'WANTED', 'TO', 'REFRESH', 'MY', 'MEMORY'] +2961-961-0022-542: ref=['THEN', 'NOW', 'LET', 'ME', 'EXPLAIN', 'TO', 'YOU', 'THE', 'ORDER', 'OF', 'OUR', 'ENTERTAINMENT', 'FIRST', 'TIMAEUS', 'WHO', 'IS', 'A', 'NATURAL', 'PHILOSOPHER', 'WILL', 'SPEAK', 'OF', 'THE', 'ORIGIN', 'OF', 'THE', 'WORLD', 'GOING', 'DOWN', 'TO', 'THE', 'CREATION', 'OF', 'MAN', 'AND', 'THEN', 'I', 'SHALL', 'RECEIVE', 'THE', 'MEN', 'WHOM', 'HE', 'HAS', 'CREATED', 'AND', 'SOME', 'OF', 'WHOM', 'WILL', 'HAVE', 'BEEN', 'EDUCATED', 'BY', 'YOU', 'AND', 'INTRODUCE', 'THEM', 'TO', 'YOU', 'AS', 'THE', 'LOST', 'ATHENIAN', 'CITIZENS', 'OF', 'WHOM', 'THE', 'EGYPTIAN', 'RECORD', 'SPOKE'] +2961-961-0022-542: hyp=['THEN', 'THOU', 'LET', 'ME', 'EXPLAIN', 'TO', 'YOU', 'THE', 'ORDER', 'OF', 'OUR', 'ENTERTAINMENT', 'FIRST', 'TIMAS', 'WHO', 'IS', 'A', 'NATURAL', 'PHILOSOPHER', 'WILL', 'SPEAK', 'OF', 'THE', 'ORIGIN', 'OF', 'THE', 'WORLD', 'GOING', 'DOWN', 'TO', 'THE', 'CREATION', 'OF', 'MEN', 'AND', 'THEN', 'I', 'SHALL', 'RECEIVE', 'THE', 'MEN', 'WHOM', 'HE', 'HAS', 'CREATED', 'AND', 'SOME', 'OF', 'WHOM', 'WILL', 'HAVE', 'BEEN', 'EDUCATED', 'BY', 'YOU', 'AND', 'INTRODUCE', 'THEM', 'TO', 'YOU', 'AS', 'THE', 'LOST', 'ATHENIAN', 'CITIZENS', 'OF', 'WHOM', 'THE', 'EGYPTIAN', 'RECORDS', 'SPOKE'] +3570-5694-0000-2433: ref=['BUT', 'ALREADY', 'AT', 'A', 'POINT', 'IN', 'ECONOMIC', 'EVOLUTION', 'FAR', 'ANTEDATING', 'THE', 'EMERGENCE', 'OF', 'THE', 'LADY', 'SPECIALISED', 'CONSUMPTION', 'OF', 'GOODS', 'AS', 'AN', 'EVIDENCE', 'OF', 'PECUNIARY', 'STRENGTH', 'HAD', 'BEGUN', 'TO', 'WORK', 'OUT', 'IN', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM'] +3570-5694-0000-2433: hyp=['BETTER', 'ALREADY', 'AT', 'A', 'POINT', 'IN', 'ECONOMIC', 'EVOLUTION', 'FAR', 'ANTIDATING', 'THE', 'EMERGENCE', 'OF', 'THE', 'LADY', 'SPECIALIZED', 'CONSUMPTION', 'OF', 'GOODS', 'AS', 'AN', 'EVIDENCE', 'OF', 'PECUNIARY', 'STRENGTH', 'HAD', 'BEGUN', 'TO', 'WORK', 'OUT', 'IN', 'A', 'MORE', 'OR', 'LESS', 'CELEBRATE', 'SYSTEM'] +3570-5694-0001-2434: ref=['THE', 'UTILITY', 'OF', 'CONSUMPTION', 'AS', 'AN', 'EVIDENCE', 'OF', 'WEALTH', 'IS', 'TO', 'BE', 'CLASSED', 'AS', 'A', 'DERIVATIVE', 'GROWTH'] +3570-5694-0001-2434: hyp=['THEATILITY', 'OF', 'CONSUMPTION', 'AS', 'AN', 'EVIDENCE', 'OF', 'WEALTH', 'IS', 'TO', 'BE', 'CLASSED', 'AS', 'A', 'DERIVATIVE', 'GROWTH'] +3570-5694-0002-2435: ref=['SUCH', 'CONSUMPTION', 'AS', 'FALLS', 'TO', 'THE', 'WOMEN', 'IS', 'MERELY', 'INCIDENTAL', 'TO', 'THEIR', 'WORK', 'IT', 'IS', 'A', 'MEANS', 'TO', 'THEIR', 'CONTINUED', 'LABOUR', 'AND', 'NOT', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THEIR', 'OWN', 'COMFORT', 'AND', 'FULNESS', 'OF', 'LIFE'] +3570-5694-0002-2435: hyp=['SUCH', 'CONSUMPTION', 'AS', 'FALLS', 'THROUGH', 'THE', 'WOMEN', 'IS', 'MERELY', 'INCIDENTAL', 'TO', 'THEIR', 'WORK', 'IT', 'IS', 'A', 'MEANS', 'TO', 'THEIR', 'CONTINUED', 'LABOR', 'AND', 'NOT', 'TO', 'CONSUMPTION', 'DIRECTED', 'TO', 'THEIR', 'OWN', 'COMFORT', 'AND', 'FULLNESS', 'OF', 'LIFE'] +3570-5694-0003-2436: ref=['WITH', 'A', 'FURTHER', 'ADVANCE', 'IN', 'CULTURE', 'THIS', 'TABU', 'MAY', 'CHANGE', 'INTO', 'SIMPLE', 'CUSTOM', 'OF', 'A', 'MORE', 'OR', 'LESS', 'RIGOROUS', 'CHARACTER', 'BUT', 'WHATEVER', 'BE', 'THE', 'THEORETICAL', 'BASIS', 'OF', 'THE', 'DISTINCTION', 'WHICH', 'IS', 'MAINTAINED', 'WHETHER', 'IT', 'BE', 'A', 'TABU', 'OR', 'A', 'LARGER', 'CONVENTIONALITY', 'THE', 'FEATURES', 'OF', 'THE', 'CONVENTIONAL', 'SCHEME', 'OF', 'CONSUMPTION', 'DO', 'NOT', 'CHANGE', 'EASILY'] +3570-5694-0003-2436: hyp=['WITH', 'A', 'FURTHER', 'ADVANCE', 'IN', 'CULTURE', 'THIS', 'TABOO', 'MAY', 'CHANGED', 'INTO', 'SIMPLE', 'CUSTOM', 'OF', 'A', 'MORE', 'OR', 'LESS', 'RIGOROUS', 'CHARACTER', 'BUT', 'WHATEVER', 'BE', 'THE', 'THEORETICAL', 'BASIS', 'OF', 'THE', 'DISTINCTION', 'WHICH', 'IS', 'MAINTAINED', 'WHETHER', 'IT', 'BE', 'AT', 'A', 'BOO', 'OR', 'A', 'LARGER', 'CONVENTIONALITY', 'THE', 'FEATURES', 'OF', 'THE', 'CONVENTIONAL', 'SCHEME', 'OF', 'CONSUMPTION', 'DO', 'NOT', 'CHANGE', 'EASILY'] +3570-5694-0004-2437: ref=['IN', 'THE', 'NATURE', 'OF', 'THINGS', 'LUXURIES', 'AND', 'THE', 'COMFORTS', 'OF', 'LIFE', 'BELONG', 'TO', 'THE', 'LEISURE', 'CLASS'] +3570-5694-0004-2437: hyp=['IN', 'THE', 'NATURE', 'OF', 'THINGS', 'LUXURIES', 'AND', 'THE', 'COMFORTS', 'OF', 'LIFE', 'BELONG', 'TO', 'THE', 'LEISURE', 'CLASS'] +3570-5694-0005-2438: ref=['UNDER', 'THE', 'TABU', 'CERTAIN', 'VICTUALS', 'AND', 'MORE', 'PARTICULARLY', 'CERTAIN', 'BEVERAGES', 'ARE', 'STRICTLY', 'RESERVED', 'FOR', 'THE', 'USE', 'OF', 'THE', 'SUPERIOR', 'CLASS'] +3570-5694-0005-2438: hyp=['UNDER', 'THE', 'TABOO', 'CERTAIN', 'VICTUALS', 'AND', 'MORE', 'PARTICULARLY', 'CERTAIN', 'BEVERAGES', 'ARE', 'STRICTLY', 'RESERVED', 'FOR', 'THE', 'USE', 'OF', 'THE', 'SUPERIOR', 'CLASS'] +3570-5694-0006-2439: ref=['DRUNKENNESS', 'AND', 'THE', 'OTHER', 'PATHOLOGICAL', 'CONSEQUENCES', 'OF', 'THE', 'FREE', 'USE', 'OF', 'STIMULANTS', 'THEREFORE', 'TEND', 'IN', 'THEIR', 'TURN', 'TO', 'BECOME', 'HONORIFIC', 'AS', 'BEING', 'A', 'MARK', 'AT', 'THE', 'SECOND', 'REMOVE', 'OF', 'THE', 'SUPERIOR', 'STATUS', 'OF', 'THOSE', 'WHO', 'ARE', 'ABLE', 'TO', 'AFFORD', 'THE', 'INDULGENCE'] +3570-5694-0006-2439: hyp=['DRINKENNESS', 'AND', 'THE', 'OTHER', 'PETHOLOGICAL', 'CONSEQUENCES', 'OF', 'THE', 'FREWS', 'OF', 'STIMULANTS', 'THEREFORE', 'TEND', 'IN', 'THEIR', 'TURN', 'TO', 'BECOME', 'UNERRIFIC', 'AS', 'BEING', 'A', 'MARK', 'AT', 'THE', 'SECOND', 'REMOVE', 'OF', 'THE', 'SUPERIOR', 'STATUS', 'OF', 'THOSE', 'WHO', 'ARE', 'ABLE', 'TO', 'AFFORD', 'THE', 'INDULGENCE'] +3570-5694-0007-2440: ref=['IT', 'HAS', 'EVEN', 'HAPPENED', 'THAT', 'THE', 'NAME', 'FOR', 'CERTAIN', 'DISEASED', 'CONDITIONS', 'OF', 'THE', 'BODY', 'ARISING', 'FROM', 'SUCH', 'AN', 'ORIGIN', 'HAS', 'PASSED', 'INTO', 'EVERYDAY', 'SPEECH', 'AS', 'A', 'SYNONYM', 'FOR', 'NOBLE', 'OR', 'GENTLE'] +3570-5694-0007-2440: hyp=['IT', 'HAS', 'EVEN', 'HAPPENED', 'THAT', 'THE', 'NAME', 'FOR', 'CERTAIN', 'DISEASED', 'CONDITIONS', 'OF', 'THE', 'BODY', 'ARISING', 'FROM', 'SUCH', 'AN', 'ORIGIN', 'HAS', 'PASSED', 'INTO', 'EVERYDAY', 'SPEECH', 'AS', 'A', 'SYNONYM', 'FOR', 'NOBLE', 'OR', 'GENTLE'] +3570-5694-0008-2441: ref=['THE', 'CONSUMPTION', 'OF', 'LUXURIES', 'IN', 'THE', 'TRUE', 'SENSE', 'IS', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THE', 'COMFORT', 'OF', 'THE', 'CONSUMER', 'HIMSELF', 'AND', 'IS', 'THEREFORE', 'A', 'MARK', 'OF', 'THE', 'MASTER'] +3570-5694-0008-2441: hyp=['THE', 'CONSUMPTION', 'OF', 'LUXURIES', 'IN', 'THE', 'TRUE', 'SENSE', 'IS', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THE', 'COMFORT', 'OF', 'THE', 'CONSUMER', 'HIMSELF', 'AND', 'IS', 'THEREFORE', 'A', 'MARK', 'OF', 'THE', 'MASTER'] +3570-5694-0009-2442: ref=['WITH', 'MANY', 'QUALIFICATIONS', 'WITH', 'MORE', 'QUALIFICATIONS', 'AS', 'THE', 'PATRIARCHAL', 'TRADITION', 'HAS', 'GRADUALLY', 'WEAKENED', 'THE', 'GENERAL', 'RULE', 'IS', 'FELT', 'TO', 'BE', 'RIGHT', 'AND', 'BINDING', 'THAT', 'WOMEN', 'SHOULD', 'CONSUME', 'ONLY', 'FOR', 'THE', 'BENEFIT', 'OF', 'THEIR', 'MASTERS'] +3570-5694-0009-2442: hyp=['WITH', 'MANY', 'QUALIFICATIONS', 'WITH', 'MORE', 'QUALIFICATIONS', 'AS', 'THE', 'PATRIARCHAL', 'TRADITION', 'HAS', 'GRADUALLY', 'WEAKENED', 'THE', 'GENERAL', 'RULE', 'IS', 'FELT', 'TO', 'BE', 'RIGHT', 'AND', 'BINDING', 'THAT', 'WOMEN', 'SHOULD', 'CONSUME', 'ONLY', 'FOR', 'THE', 'BENEFIT', 'OF', 'THEIR', 'MASTERS'] +3570-5694-0010-2443: ref=['THE', 'OBJECTION', 'OF', 'COURSE', 'PRESENTS', 'ITSELF', 'THAT', 'EXPENDITURE', 'ON', "WOMEN'S", 'DRESS', 'AND', 'HOUSEHOLD', 'PARAPHERNALIA', 'IS', 'AN', 'OBVIOUS', 'EXCEPTION', 'TO', 'THIS', 'RULE', 'BUT', 'IT', 'WILL', 'APPEAR', 'IN', 'THE', 'SEQUEL', 'THAT', 'THIS', 'EXCEPTION', 'IS', 'MUCH', 'MORE', 'OBVIOUS', 'THAN', 'SUBSTANTIAL'] +3570-5694-0010-2443: hyp=['THE', 'OBJECTION', 'OF', 'COURSE', 'PRESENTS', 'ITSELF', 'THAT', 'EXPENDITURE', 'ON', "WOMEN'S", 'DRESS', 'AND', 'HOUSEHOLD', 'PARAPHERNALIA', 'IS', 'AN', 'OBVIOUS', 'EXCEPTION', 'TO', 'THIS', 'RULE', 'BUT', 'IT', 'WILL', 'APPEAR', 'IN', 'THE', 'SEQUEL', 'THAT', 'THIS', 'EXCEPTION', 'IS', 'MUCH', 'MORE', 'OBVIOUS', 'THAN', 'SUBSTANTIAL'] +3570-5694-0011-2444: ref=['THE', 'CUSTOM', 'OF', 'FESTIVE', 'GATHERINGS', 'PROBABLY', 'ORIGINATED', 'IN', 'MOTIVES', 'OF', 'CONVIVIALITY', 'AND', 'RELIGION', 'THESE', 'MOTIVES', 'ARE', 'ALSO', 'PRESENT', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'BUT', 'THEY', 'DO', 'NOT', 'CONTINUE', 'TO', 'BE', 'THE', 'SOLE', 'MOTIVES'] +3570-5694-0011-2444: hyp=['THE', 'CUSTOM', 'OF', 'FESTIVE', 'GATHERINGS', 'PROBABLY', 'ORIGINATED', 'IN', 'MOTIVES', 'OF', 'CONVIVIALITY', 'AND', 'RELIGION', 'THESE', 'MOTIVES', 'ARE', 'ALSO', 'PRESENT', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'BUT', 'THEY', 'DO', 'NOT', 'CONTINUE', 'TO', 'BE', 'THE', 'SOLE', 'MOTIVES'] +3570-5694-0012-2445: ref=['THERE', 'IS', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM', 'OF', 'RANK', 'AND', 'GRADES'] +3570-5694-0012-2445: hyp=['THERE', 'IS', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM', 'OF', 'RANK', 'AND', 'GRATES'] +3570-5694-0013-2446: ref=['THIS', 'DIFFERENTIATION', 'IS', 'FURTHERED', 'BY', 'THE', 'INHERITANCE', 'OF', 'WEALTH', 'AND', 'THE', 'CONSEQUENT', 'INHERITANCE', 'OF', 'GENTILITY'] +3570-5694-0013-2446: hyp=['THIS', 'DIFFERENTIATION', 'IS', 'FURTHERED', 'BY', 'THE', 'INHERITANCE', 'OF', 'WEALTH', 'AND', 'THE', 'CONSEQUENT', 'INHERITANCE', 'OF', 'GENTILITY'] +3570-5694-0014-2447: ref=['MANY', 'OF', 'THESE', 'AFFILIATED', 'GENTLEMEN', 'OF', 'LEISURE', 'ARE', 'AT', 'THE', 'SAME', 'TIME', 'LESSER', 'MEN', 'OF', 'SUBSTANCE', 'IN', 'THEIR', 'OWN', 'RIGHT', 'SO', 'THAT', 'SOME', 'OF', 'THEM', 'ARE', 'SCARCELY', 'AT', 'ALL', 'OTHERS', 'ONLY', 'PARTIALLY', 'TO', 'BE', 'RATED', 'AS', 'VICARIOUS', 'CONSUMERS'] +3570-5694-0014-2447: hyp=['MANY', 'OF', 'THESE', 'HAVE', 'FILLIOTTED', 'GENTLEMEN', 'OF', 'LEISURE', 'ARE', 'AT', 'THE', 'SAME', 'TIME', 'LESS', 'AMEN', 'OF', 'SUBSTANCE', 'IN', 'THEIR', 'OWN', 'RIGHT', 'SO', 'THAT', 'SOME', 'OF', 'THEM', 'ARE', 'SCARCELY', 'AT', 'ALL', 'OTHERS', 'ONLY', 'PARTIALLY', 'TO', 'BE', 'RATED', 'AS', 'VICARIOUS', 'CONSUMERS'] +3570-5694-0015-2448: ref=['SO', 'MANY', 'OF', 'THEM', 'HOWEVER', 'AS', 'MAKE', 'UP', 'THE', 'RETAINER', 'AND', 'HANGERS', 'ON', 'OF', 'THE', 'PATRON', 'MAY', 'BE', 'CLASSED', 'AS', 'VICARIOUS', 'CONSUMER', 'WITHOUT', 'QUALIFICATION'] +3570-5694-0015-2448: hyp=['SO', 'MANY', 'OF', 'THEM', 'HOWEVER', 'AS', 'MAKE', 'UP', 'THE', 'RETAINER', 'AND', 'HANGERS', 'ON', 'OF', 'THE', 'PATRON', 'MAY', 'BE', 'CLASSED', 'AS', 'VICARIOUS', 'CONSUMER', 'WITHOUT', 'QUALIFICATION'] +3570-5694-0016-2449: ref=['MANY', 'OF', 'THESE', 'AGAIN', 'AND', 'ALSO', 'MANY', 'OF', 'THE', 'OTHER', 'ARISTOCRACY', 'OF', 'LESS', 'DEGREE', 'HAVE', 'IN', 'TURN', 'ATTACHED', 'TO', 'THEIR', 'PERSONS', 'A', 'MORE', 'OR', 'LESS', 'COMPREHENSIVE', 'GROUP', 'OF', 'VICARIOUS', 'CONSUMER', 'IN', 'THE', 'PERSONS', 'OF', 'THEIR', 'WIVES', 'AND', 'CHILDREN', 'THEIR', 'SERVANTS', 'RETAINERS', 'ET', 'CETERA'] +3570-5694-0016-2449: hyp=['MANY', 'OF', 'THESE', 'AGAIN', 'AND', 'ALSO', 'MANY', 'OF', 'THE', 'OTHER', 'ARE', 'ARISTOCRACY', 'OF', 'LESS', 'DEGREE', 'HAVE', 'IN', 'TURN', 'ATTACHED', 'TO', 'THEIR', 'PERSONS', 'A', 'MORE', 'OR', 'LESS', 'COMPREHENSIVE', 'GROUP', 'OF', 'VICARIOUS', 'CONSUMER', 'IN', 'THE', 'PERSONS', 'OF', 'THEIR', 'WIVES', 'AND', 'CHILDREN', 'THEIR', 'SERVANTS', 'RETAINERS', 'ET', 'CETERA'] +3570-5694-0017-2450: ref=['THE', 'WEARING', 'OF', 'UNIFORMS', 'OR', 'LIVERIES', 'IMPLIES', 'A', 'CONSIDERABLE', 'DEGREE', 'OF', 'DEPENDENCE', 'AND', 'MAY', 'EVEN', 'BE', 'SAID', 'TO', 'BE', 'A', 'MARK', 'OF', 'SERVITUDE', 'REAL', 'OR', 'OSTENSIBLE'] +3570-5694-0017-2450: hyp=['THE', 'WEARING', 'OF', 'UNIFORMS', 'A', 'LIVERIES', 'IMPLIES', 'A', 'CONSIDERABLE', 'DEGREE', 'OF', 'DEPENDENCE', 'AND', 'MAY', 'EVEN', 'BE', 'SAID', 'TO', 'BE', 'A', 'MARK', 'OF', 'SERVITUDE', 'REAL', 'OR', 'OSTENSIBLE'] +3570-5694-0018-2451: ref=['THE', 'WEARERS', 'OF', 'UNIFORMS', 'AND', 'LIVERIES', 'MAY', 'BE', 'ROUGHLY', 'DIVIDED', 'INTO', 'TWO', 'CLASSES', 'THE', 'FREE', 'AND', 'THE', 'SERVILE', 'OR', 'THE', 'NOBLE', 'AND', 'THE', 'IGNOBLE'] +3570-5694-0018-2451: hyp=['THE', 'WEARERS', 'OF', 'UNIFORMS', 'AND', 'LIVERIES', 'MAY', 'BE', 'ROUGHLY', 'DIVIDED', 'INTO', 'TWO', 'CLASSES', 'THE', 'FREE', 'AND', 'THE', 'SERVILE', 'OR', 'THE', 'NOBLE', 'AND', 'THE', 'IGNOBLE'] +3570-5694-0019-2452: ref=['BUT', 'THE', 'GENERAL', 'DISTINCTION', 'IS', 'NOT', 'ON', 'THAT', 'ACCOUNT', 'TO', 'BE', 'OVERLOOKED'] +3570-5694-0019-2452: hyp=['BUT', 'THE', 'GENERAL', 'DISTINCTION', 'IS', 'NOT', 'ON', 'THAT', 'ACCOUNT', 'TO', 'BE', 'OVERLOOKED'] +3570-5694-0020-2453: ref=['SO', 'THOSE', 'OFFICES', 'WHICH', 'ARE', 'BY', 'RIGHT', 'THE', 'PROPER', 'EMPLOYMENT', 'OF', 'THE', 'LEISURE', 'CLASS', 'ARE', 'NOBLE', 'SUCH', 'AS', 'GOVERNMENT', 'FIGHTING', 'HUNTING', 'THE', 'CARE', 'OF', 'ARMS', 'AND', 'ACCOUTREMENTS', 'AND', 'THE', 'LIKE', 'IN', 'SHORT', 'THOSE', 'WHICH', 'MAY', 'BE', 'CLASSED', 'AS', 'OSTENSIBLY', 'PREDATORY', 'EMPLOYMENTS'] +3570-5694-0020-2453: hyp=['SO', 'THOSE', 'OFFICERS', 'WHICH', 'ARE', 'BY', 'RIGHT', 'THE', 'PROPER', 'EMPLOYMENT', 'OF', 'THE', 'LEISURE', 'CLASS', 'ARE', 'NOBLE', 'SUCH', 'AS', 'GOVERNMENT', 'FIGHTING', 'HUNTING', 'THE', 'CARE', 'OF', 'ARMS', 'AND', 'ACCOUTREMENTS', 'AND', 'THE', 'LIKE', 'IN', 'SHORT', 'THOSE', 'WHICH', 'MAY', 'BE', 'CLASSED', 'AS', 'OSTENSIBLY', 'PREDATORY', 'EMPLOYMENTS'] +3570-5694-0021-2454: ref=['WHENEVER', 'AS', 'IN', 'THESE', 'CASES', 'THE', 'MENIAL', 'SERVICE', 'IN', 'QUESTION', 'HAS', 'TO', 'DO', 'DIRECTLY', 'WITH', 'THE', 'PRIMARY', 'LEISURE', 'EMPLOYMENTS', 'OF', 'FIGHTING', 'AND', 'HUNTING', 'IT', 'EASILY', 'ACQUIRES', 'A', 'REFLECTED', 'HONORIFIC', 'CHARACTER'] +3570-5694-0021-2454: hyp=['WHENEVER', 'AS', 'IN', 'THESE', 'CASES', 'THE', 'MENIAL', 'SERVICE', 'IN', 'QUESTION', 'HAS', 'TO', 'DO', 'DIRECTLY', 'WITH', 'A', 'PRIMARY', 'LEISURE', 'EMPLOYMENTS', 'OF', 'FIGHTING', 'AND', 'HUNTING', 'IT', 'EASILY', 'ACQUIRES', 'A', 'REFLECTED', 'HONORIFIC', 'CHARACTER'] +3570-5694-0022-2455: ref=['THE', 'LIVERY', 'BECOMES', 'OBNOXIOUS', 'TO', 'NEARLY', 'ALL', 'WHO', 'ARE', 'REQUIRED', 'TO', 'WEAR', 'IT'] +3570-5694-0022-2455: hyp=['THE', 'LIVERY', 'BECOMES', 'OBNOXIOUS', 'TO', 'NEARLY', 'ALL', 'WHO', 'ARE', 'REQUIRED', 'TO', 'WEAR', 'IT'] +3570-5695-0000-2456: ref=['IN', 'A', 'GENERAL', 'WAY', 'THOUGH', 'NOT', 'WHOLLY', 'NOR', 'CONSISTENTLY', 'THESE', 'TWO', 'GROUPS', 'COINCIDE'] +3570-5695-0000-2456: hyp=['AND', 'A', 'GENERAL', 'WAY', 'THOUGH', 'NOT', 'WHOLLY', 'NOR', 'CONSISTENTLY', 'THESE', 'TWO', 'GROUPS', 'COINCIDE'] +3570-5695-0001-2457: ref=['THE', 'DEPENDENT', 'WHO', 'WAS', 'FIRST', 'DELEGATED', 'FOR', 'THESE', 'DUTIES', 'WAS', 'THE', 'WIFE', 'OR', 'THE', 'CHIEF', 'WIFE', 'AND', 'AS', 'WOULD', 'BE', 'EXPECTED', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'OF', 'THE', 'INSTITUTION', 'WHEN', 'THE', 'NUMBER', 'OF', 'PERSONS', 'BY', 'WHOM', 'THESE', 'DUTIES', 'ARE', 'CUSTOMARILY', 'PERFORMED', 'GRADUALLY', 'NARROWS', 'THE', 'WIFE', 'REMAINS', 'THE', 'LAST'] +3570-5695-0001-2457: hyp=['THE', 'DEPENDENT', 'WHO', 'WAS', 'FIRST', 'DELEGATED', 'FOR', 'THESE', 'DUTIES', 'WAS', 'THE', 'WIFE', 'OR', 'THE', 'CHIEF', 'WIFE', 'AND', 'AS', 'WOULD', 'BE', 'EXPECTED', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'OF', 'THE', 'INSTITUTION', 'WHEN', 'THE', 'NUMBER', 'OF', 'PERSONS', 'BY', 'WHOM', 'THESE', 'DUTIES', 'ARE', 'CUSTOMARY', 'PERFORMED', 'GRADUAL', 'AND', 'ARROWS', 'THE', 'WIFE', 'REMAINS', 'THE', 'LAST'] +3570-5695-0002-2458: ref=['BUT', 'AS', 'WE', 'DESCEND', 'THE', 'SOCIAL', 'SCALE', 'THE', 'POINT', 'IS', 'PRESENTLY', 'REACHED', 'WHERE', 'THE', 'DUTIES', 'OF', 'VICARIOUS', 'LEISURE', 'AND', 'CONSUMPTION', 'DEVOLVE', 'UPON', 'THE', 'WIFE', 'ALONE'] +3570-5695-0002-2458: hyp=['BUT', 'AS', 'WE', 'DESCEND', 'THE', 'SOCIAL', 'SCALE', 'THE', 'POINT', 'IS', 'PRESENTLY', 'REACHED', 'WHERE', 'THE', 'DUTIES', 'OF', "YARE'S", 'LEISURE', 'AND', 'CONSUMPTION', 'DEVOLVE', 'UPON', 'THE', 'WIFE', 'ALONE'] +3570-5695-0003-2459: ref=['IN', 'THE', 'COMMUNITIES', 'OF', 'THE', 'WESTERN', 'CULTURE', 'THIS', 'POINT', 'IS', 'AT', 'PRESENT', 'FOUND', 'AMONG', 'THE', 'LOWER', 'MIDDLE', 'CLASS'] +3570-5695-0003-2459: hyp=['IN', 'THE', 'COMMUNITIES', 'OF', 'THE', 'WESTERN', 'CULTURE', 'THIS', 'POINT', 'IS', 'AT', 'PRESENT', 'FOUND', 'AMONG', 'THE', 'LOWER', 'MIDDLE', 'CLASS'] +3570-5695-0004-2460: ref=['IF', 'BEAUTY', 'OR', 'COMFORT', 'IS', 'ACHIEVED', 'AND', 'IT', 'IS', 'A', 'MORE', 'OR', 'LESS', 'FORTUITOUS', 'CIRCUMSTANCE', 'IF', 'THEY', 'ARE', 'THEY', 'MUST', 'BE', 'ACHIEVED', 'BY', 'MEANS', 'AND', 'METHODS', 'THAT', 'COMMEND', 'THEMSELVES', 'TO', 'THE', 'GREAT', 'ECONOMIC', 'LAW', 'OF', 'WASTED', 'EFFORT'] +3570-5695-0004-2460: hyp=['IF', 'BEAUTY', 'OR', 'COMFORT', 'IS', 'ACHIEVED', 'AND', 'IT', 'IS', 'A', 'MORE', 'OR', 'LESS', 'FORTUITOUS', 'CIRCUMSTANCE', 'IF', 'THEY', 'ARE', 'THEY', 'MUST', 'BE', 'ACHIEVED', 'BY', 'MEANS', 'AND', 'METHODS', 'THAT', 'COMMEND', 'THEMSELVES', 'TO', 'THE', 'GREAT', 'ECONOMIC', 'LAW', 'OF', 'WASTED', 'EFFORT'] +3570-5695-0005-2461: ref=['THE', 'MAN', 'OF', 'THE', 'HOUSEHOLD', 'ALSO', 'CAN', 'DO', 'SOMETHING', 'IN', 'THIS', 'DIRECTION', 'AND', 'INDEED', 'HE', 'COMMONLY', 'DOES', 'BUT', 'WITH', 'A', 'STILL', 'LOWER', 'DESCENT', 'INTO', 'THE', 'LEVELS', 'OF', 'INDIGENCE', 'ALONG', 'THE', 'MARGIN', 'OF', 'THE', 'SLUMS', 'THE', 'MAN', 'AND', 'PRESENTLY', 'ALSO', 'THE', 'CHILDREN', 'VIRTUALLY', 'CEASE', 'TO', 'CONSUME', 'VALUABLE', 'GOODS', 'FOR', 'APPEARANCES', 'AND', 'THE', 'WOMAN', 'REMAINS', 'VIRTUALLY', 'THE', 'SOLE', 'EXPONENT', 'OF', 'THE', "HOUSEHOLD'S", 'PECUNIARY', 'DECENCY'] +3570-5695-0005-2461: hyp=['THE', 'MAN', 'OF', 'THE', 'HOUSEHOLD', 'ALSO', 'CAN', 'DO', 'SOMETHING', 'IN', 'THIS', 'DIRECTION', 'AND', 'INDEED', 'HE', 'COMMONLY', 'DOES', 'BUT', 'WITH', 'A', 'STILL', 'LOWER', 'DISSENT', 'INTO', 'THE', 'LEVELS', 'OF', 'INDIGENCE', 'ALONG', 'THE', 'MARGIN', 'OF', 'THE', 'SLUMS', 'THE', 'MAN', 'AND', 'PRESENTLY', 'ALSO', 'THE', 'CHILDREN', 'VIRTUALLY', 'SEIZED', 'TO', 'CONSUME', 'VALUABLE', 'GOODS', 'FOR', 'APPEARANCES', 'AND', 'THE', 'WOMAN', 'REMAINS', 'VIRTUALLY', 'THE', 'SOLE', 'EXPONENT', 'OF', 'THE', "HOUSEHOLD'S", 'PECUNIARY', 'DECENCY'] +3570-5695-0006-2462: ref=['VERY', 'MUCH', 'OF', 'SQUALOR', 'AND', 'DISCOMFORT', 'WILL', 'BE', 'ENDURED', 'BEFORE', 'THE', 'LAST', 'TRINKET', 'OR', 'THE', 'LAST', 'PRETENSE', 'OF', 'PECUNIARY', 'DECENCY', 'IS', 'PUT', 'AWAY'] +3570-5695-0006-2462: hyp=['VERY', 'MUCH', 'OF', 'SQUALOR', 'AND', 'DISCOMFORT', 'WILL', 'BE', 'ENDURED', 'BEFORE', 'THE', 'LAST', 'TRINKET', 'OR', 'THE', 'LAST', 'PRETENCE', 'OF', 'PECUNIARY', 'DECENCIES', 'PUT', 'AWAY'] +3570-5695-0007-2463: ref=['THERE', 'IS', 'NO', 'CLASS', 'AND', 'NO', 'COUNTRY', 'THAT', 'HAS', 'YIELDED', 'SO', 'ABJECTLY', 'BEFORE', 'THE', 'PRESSURE', 'OF', 'PHYSICAL', 'WANT', 'AS', 'TO', 'DENY', 'THEMSELVES', 'ALL', 'GRATIFICATION', 'OF', 'THIS', 'HIGHER', 'OR', 'SPIRITUAL', 'NEED'] +3570-5695-0007-2463: hyp=['THERE', 'IS', 'NO', 'CLASS', 'AND', 'NO', 'COUNTRY', 'THAT', 'HAS', 'YIELDED', 'SO', 'OBJECTLY', 'BEFORE', 'THE', 'PRESSURE', 'OF', 'PHYSICAL', 'WANT', 'AS', 'TO', 'DENY', 'THEMSELVES', 'ALL', 'GRATIFICATION', 'OF', 'THIS', 'HIGHER', 'OR', 'SPIRITUAL', 'NEED'] +3570-5695-0008-2464: ref=['THE', 'QUESTION', 'IS', 'WHICH', 'OF', 'THE', 'TWO', 'METHODS', 'WILL', 'MOST', 'EFFECTIVELY', 'REACH', 'THE', 'PERSONS', 'WHOSE', 'CONVICTIONS', 'IT', 'IS', 'DESIRED', 'TO', 'AFFECT'] +3570-5695-0008-2464: hyp=['THE', 'QUESTION', 'IS', 'WHICH', 'OF', 'THE', 'TWO', 'METHODS', 'WILL', 'MOST', 'EFFECTIVELY', 'REACH', 'THE', 'PERSONS', 'WHOSE', 'CONVICTIONS', 'IT', 'IS', 'DESIRED', 'TO', 'EFFECT'] +3570-5695-0009-2465: ref=['EACH', 'WILL', 'THEREFORE', 'SERVE', 'ABOUT', 'EQUALLY', 'WELL', 'DURING', 'THE', 'EARLIER', 'STAGES', 'OF', 'SOCIAL', 'GROWTH'] +3570-5695-0009-2465: hyp=['EACH', 'WILL', 'THEREFORE', 'SERVE', 'ABOUT', 'EQUALLY', 'WELL', 'DURING', 'THE', 'EARLIER', 'STAGES', 'OF', 'SOCIAL', 'GROWTH'] +3570-5695-0010-2466: ref=['THE', 'MODERN', 'ORGANIZATION', 'OF', 'INDUSTRY', 'WORKS', 'IN', 'THE', 'SAME', 'DIRECTION', 'ALSO', 'BY', 'ANOTHER', 'LINE'] +3570-5695-0010-2466: hyp=['THE', 'MODERN', 'ORGANIZATION', 'OF', 'INDUSTRY', 'WORKS', 'IN', 'THE', 'SAME', 'DIRECTION', 'ALSO', 'BY', 'ANOTHER', 'LINE'] +3570-5695-0011-2467: ref=['IT', 'IS', 'EVIDENT', 'THEREFORE', 'THAT', 'THE', 'PRESENT', 'TREND', 'OF', 'THE', 'DEVELOPMENT', 'IS', 'IN', 'THE', 'DIRECTION', 'OF', 'HEIGHTENING', 'THE', 'UTILITY', 'OF', 'CONSPICUOUS', 'CONSUMPTION', 'AS', 'COMPARED', 'WITH', 'LEISURE'] +3570-5695-0011-2467: hyp=['IT', 'IS', 'EVIDENT', 'THEREFORE', 'THAT', 'THE', 'PRESENT', 'TREND', 'OF', 'THE', 'DEVELOPMENT', 'IS', 'IN', 'THE', 'DIRECTION', 'OF', 'HEIGHTENING', 'THE', 'UTILITY', 'OF', 'CONSPICUOUS', 'CONSUMPTION', 'AS', 'COMPARED', 'WITH', 'LEISURE'] +3570-5695-0012-2468: ref=['IT', 'IS', 'ALSO', 'NOTICEABLE', 'THAT', 'THE', 'SERVICEABILITY', 'OF', 'CONSUMPTION', 'AS', 'A', 'MEANS', 'OF', 'REPUTE', 'AS', 'WELL', 'AS', 'THE', 'INSISTENCE', 'ON', 'IT', 'AS', 'AN', 'ELEMENT', 'OF', 'DECENCY', 'IS', 'AT', 'ITS', 'BEST', 'IN', 'THOSE', 'PORTIONS', 'OF', 'THE', 'COMMUNITY', 'WHERE', 'THE', 'HUMAN', 'CONTACT', 'OF', 'THE', 'INDIVIDUAL', 'IS', 'WIDEST', 'AND', 'THE', 'MOBILITY', 'OF', 'THE', 'POPULATION', 'IS', 'GREATEST'] +3570-5695-0012-2468: hyp=['IT', 'IS', 'ALSO', 'NOTICEABLE', 'THAT', 'THE', 'SURFABILITY', 'OF', 'CONSUMPTION', 'AS', 'A', 'MEANS', 'OF', 'REPUTE', 'AS', 'WELL', 'AS', 'THE', 'INSISTENCE', 'ON', 'IT', 'AS', 'AN', 'ELEMENT', 'OF', 'DECENCY', 'IS', 'AT', 'ITS', 'BEST', 'IN', 'THOSE', 'PORTIONS', 'OF', 'THE', 'COMMUNITY', 'WHERE', 'THE', 'HUMAN', 'CONTACT', 'OF', 'THE', 'INDIVIDUAL', 'IS', 'WIDEST', 'AND', 'THE', 'MOBILITY', 'OF', 'THE', 'POPULATION', 'IS', 'GREATEST'] +3570-5695-0013-2469: ref=['CONSUMPTION', 'BECOMES', 'A', 'LARGER', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'LIVING', 'IN', 'THE', 'CITY', 'THAN', 'IN', 'THE', 'COUNTRY'] +3570-5695-0013-2469: hyp=['CONSUMPTION', 'BECOMES', 'A', 'LARGER', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'LIVING', 'IN', 'THE', 'CITY', 'THAN', 'IN', 'THE', 'COUNTRY'] +3570-5695-0014-2470: ref=['AMONG', 'THE', 'COUNTRY', 'POPULATION', 'ITS', 'PLACE', 'IS', 'TO', 'SOME', 'EXTENT', 'TAKEN', 'BY', 'SAVINGS', 'AND', 'HOME', 'COMFORTS', 'KNOWN', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'NEIGHBORHOOD', 'GOSSIP', 'SUFFICIENTLY', 'TO', 'SERVE', 'THE', 'LIKE', 'GENERAL', 'PURPOSE', 'OF', 'PECUNIARY', 'REPUTE'] +3570-5695-0014-2470: hyp=['AMONG', 'THE', 'COUNTRY', 'POPULATION', 'ITS', 'PLACES', 'TO', 'SOME', 'EXTENT', 'TAKEN', 'BY', 'SAVINGS', 'AND', 'HOME', 'COMFORTS', 'KNOWN', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'NEIGHBOURHOOD', 'GOSSIPS', 'SUFFICIENTLY', 'TO', 'SERVE', 'THE', 'LIKE', 'GENERAL', 'PURPOSE', 'OF', 'PECUNIARY', 'REPUTE'] +3570-5695-0015-2471: ref=['THE', 'RESULT', 'IS', 'A', 'GREAT', 'MOBILITY', 'OF', 'THE', 'LABOR', 'EMPLOYED', 'IN', 'PRINTING', 'PERHAPS', 'GREATER', 'THAN', 'IN', 'ANY', 'OTHER', 'EQUALLY', 'WELL', 'DEFINED', 'AND', 'CONSIDERABLE', 'BODY', 'OF', 'WORKMEN'] +3570-5695-0015-2471: hyp=['THE', 'RESULT', 'IS', 'A', 'GREAT', 'MOBILITY', 'OF', 'THE', 'LABOR', 'EMPLOYED', 'IN', 'PRINTING', 'PERHAPS', 'GREATER', 'THAN', 'IN', 'ANY', 'OTHER', 'EQUALLY', 'WELL', 'DEFINED', 'AND', 'CONSIDERABLE', 'BODY', 'OF', 'WORKMEN'] +3570-5696-0000-2472: ref=['UNDER', 'THE', 'SIMPLE', 'TEST', 'OF', 'EFFECTIVENESS', 'FOR', 'ADVERTISING', 'WE', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'LEISURE', 'AND', 'THE', 'CONSPICUOUS', 'CONSUMPTION', 'OF', 'GOODS', 'DIVIDING', 'THE', 'FIELD', 'OF', 'PECUNIARY', 'EMULATION', 'PRETTY', 'EVENLY', 'BETWEEN', 'THEM', 'AT', 'THE', 'OUTSET'] +3570-5696-0000-2472: hyp=['UNDER', 'THE', 'SIMPLE', 'TEST', 'OF', 'EFFECTIVENESS', 'FOR', 'ADVERTISING', 'WE', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'LEISURE', 'AND', 'THE', 'CONSPICUOUS', 'CONSUMPTION', 'OF', 'GOODS', 'DIVIDING', 'THE', 'FIELD', 'OF', 'PECUNIARY', 'EMULATION', 'PRETTY', 'EVENLY', 'BETWEEN', 'THEM', 'AT', 'THE', 'OUTSET'] +3570-5696-0001-2473: ref=['BUT', 'THE', 'ACTUAL', 'COURSE', 'OF', 'DEVELOPMENT', 'HAS', 'BEEN', 'SOMEWHAT', 'DIFFERENT', 'FROM', 'THIS', 'IDEAL', 'SCHEME', 'LEISURE', 'HELD', 'THE', 'FIRST', 'PLACE', 'AT', 'THE', 'START', 'AND', 'CAME', 'TO', 'HOLD', 'A', 'RANK', 'VERY', 'MUCH', 'ABOVE', 'WASTEFUL', 'CONSUMPTION', 'OF', 'GOODS', 'BOTH', 'AS', 'A', 'DIRECT', 'EXPONENT', 'OF', 'WEALTH', 'AND', 'AS', 'AN', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'DECENCY', 'DURING', 'THE', 'QUASI', 'PEACEABLE', 'CULTURE'] +3570-5696-0001-2473: hyp=['BUT', 'THE', 'ACTUAL', 'COURSE', 'OF', 'DEVELOPMENT', 'HAS', 'BEEN', 'SOMEWHAT', 'DIFFERENT', 'FROM', 'THIS', 'IDEAL', 'SCHEME', 'LEISURE', 'HELD', 'THE', 'FIRST', 'PLACE', 'AT', 'THE', 'START', 'AND', 'CAME', 'TO', 'ALL', 'THE', 'RANK', 'VEREMENT', 'ABOVE', 'WASTEFUL', 'CONSUMPTION', 'OF', 'GOODS', 'BOTH', 'AS', 'A', 'DIRECT', 'EXPONENT', 'OF', 'WEALTH', 'AND', 'AS', 'AN', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'DECENCY', 'DURING', 'THE', 'COURSE', 'I', 'PEACEABLE', 'CULTURE'] +3570-5696-0002-2474: ref=['OTHER', 'CIRCUMSTANCES', 'PERMITTING', 'THAT', 'INSTINCT', 'DISPOSES', 'MEN', 'TO', 'LOOK', 'WITH', 'FAVOR', 'UPON', 'PRODUCTIVE', 'EFFICIENCY', 'AND', 'ON', 'WHATEVER', 'IS', 'OF', 'HUMAN', 'USE'] +3570-5696-0002-2474: hyp=['ARE', 'THE', 'CIRCUMSTANCES', 'PERMITTING', 'THAT', 'INSTINCT', 'DISPOSES', 'MEN', 'TO', 'LOOK', 'WITH', 'FAVOUR', 'UPON', 'PRODUCTIVE', 'EFFICIENCY', 'AND', 'ON', 'WHATEVER', 'IS', 'OF', 'HUMAN', 'USE'] +3570-5696-0003-2475: ref=['A', 'RECONCILIATION', 'BETWEEN', 'THE', 'TWO', 'CONFLICTING', 'REQUIREMENTS', 'IS', 'EFFECTED', 'BY', 'A', 'RESORT', 'TO', 'MAKE', 'BELIEVE', 'MANY', 'AND', 'INTRICATE', 'POLITE', 'OBSERVANCES', 'AND', 'SOCIAL', 'DUTIES', 'OF', 'A', 'CEREMONIAL', 'NATURE', 'ARE', 'DEVELOPED', 'MANY', 'ORGANIZATIONS', 'ARE', 'FOUNDED', 'WITH', 'SOME', 'SPECIOUS', 'OBJECT', 'OF', 'AMELIORATION', 'EMBODIED', 'IN', 'THEIR', 'OFFICIAL', 'STYLE', 'AND', 'TITLE', 'THERE', 'IS', 'MUCH', 'COMING', 'AND', 'GOING', 'AND', 'A', 'DEAL', 'OF', 'TALK', 'TO', 'THE', 'END', 'THAT', 'THE', 'TALKERS', 'MAY', 'NOT', 'HAVE', 'OCCASION', 'TO', 'REFLECT', 'ON', 'WHAT', 'IS', 'THE', 'EFFECTUAL', 'ECONOMIC', 'VALUE', 'OF', 'THEIR', 'TRAFFIC'] +3570-5696-0003-2475: hyp=['I', 'RECONCILIATION', 'BETWEEN', 'THE', 'TWO', 'CONFLICTING', 'REQUIREMENTS', 'IS', 'AFFECTED', 'BY', 'RESORT', 'TO', 'MAKE', 'BELIEVE', 'MEN', 'AND', 'INTRICATE', 'POLITE', 'OBSERVANCES', 'AND', 'SOCIAL', 'DUTIES', 'OF', 'A', 'CEREMONIAL', 'NATURE', 'ARE', 'DEVELOPED', 'MANY', 'ORGANIZATIONS', 'ARE', 'FOUNDED', 'WITH', 'SOME', 'SPECIOUS', 'OBJECT', 'OF', 'AMELIORATION', 'EMBODIED', 'IN', 'THEIR', 'OFFICIAL', 'STYLANT', 'TITLE', 'THERE', 'IS', 'MUCH', 'COMING', 'AND', 'GOING', 'AND', 'A', 'DEAL', 'OF', 'TALK', 'TO', 'THE', 'END', 'THAT', 'THE', 'TALK', 'IS', 'MAY', 'NOT', 'HAVE', 'OCCASION', 'TO', 'REFLECT', 'ON', 'WHAT', 'IS', 'THE', 'EFFECTUAL', 'ECONOMIC', 'VALUE', 'OF', 'THEIR', 'TRAFFIC'] +3570-5696-0004-2476: ref=['THE', 'SALIENT', 'FEATURES', 'OF', 'THIS', 'DEVELOPMENT', 'OF', 'DOMESTIC', 'SERVICE', 'HAVE', 'ALREADY', 'BEEN', 'INDICATED'] +3570-5696-0004-2476: hyp=['THE', 'SAILORED', 'FEATURES', 'OF', 'THIS', 'DEVELOPMENT', 'OF', 'DOMESTIC', 'SERVICE', 'HAVE', 'ALREADY', 'BEEN', 'INDICATED'] +3570-5696-0005-2477: ref=['THROUGHOUT', 'THE', 'ENTIRE', 'EVOLUTION', 'OF', 'CONSPICUOUS', 'EXPENDITURE', 'WHETHER', 'OF', 'GOODS', 'OR', 'OF', 'SERVICES', 'OR', 'HUMAN', 'LIFE', 'RUNS', 'THE', 'OBVIOUS', 'IMPLICATION', 'THAT', 'IN', 'ORDER', 'TO', 'EFFECTUALLY', 'MEND', 'THE', "CONSUMER'S", 'GOOD', 'FAME', 'IT', 'MUST', 'BE', 'AN', 'EXPENDITURE', 'OF', 'SUPERFLUITIES'] +3570-5696-0005-2477: hyp=['THROUGHOUT', 'THE', 'ENTIRE', 'REVOLUTION', 'OF', 'CONSPICUOUS', 'EXPENDITURE', 'WHETHER', 'OF', 'GOODS', 'OR', 'OF', 'SERVICES', 'OR', 'HUMAN', 'LIFE', 'RUNS', 'THE', 'OBVIOUS', 'IMPLICATION', 'THAT', 'IN', 'ORDER', 'TO', 'EFFECTUALLY', 'MEND', 'THE', 'CONSUMERS', 'GOOD', 'FAME', 'IT', 'MUST', 'BE', 'AN', 'EXPENDITURE', 'OF', 'SUPERFLUITIES'] +3570-5696-0006-2478: ref=['AS', 'USED', 'IN', 'THE', 'SPEECH', 'OF', 'EVERYDAY', 'LIFE', 'THE', 'WORD', 'CARRIES', 'AN', 'UNDERTONE', 'OF', 'DEPRECATION'] +3570-5696-0006-2478: hyp=['AS', 'USED', 'IN', 'THE', 'SPEECH', 'OF', 'EVERY', 'DAY', 'LIFE', 'THE', 'WORD', 'CARRIES', 'AN', 'UNDERTONE', 'OF', 'DEPRECATION'] +3570-5696-0007-2479: ref=['THE', 'USE', 'OF', 'THE', 'WORD', 'WASTE', 'AS', 'A', 'TECHNICAL', 'TERM', 'THEREFORE', 'IMPLIES', 'NO', 'DEPRECATION', 'OF', 'THE', 'MOTIVES', 'OR', 'OF', 'THE', 'ENDS', 'SOUGHT', 'BY', 'THE', 'CONSUMER', 'UNDER', 'THIS', 'CANON', 'OF', 'CONSPICUOUS', 'WASTE'] +3570-5696-0007-2479: hyp=['THE', 'USE', 'OF', 'THE', 'WORD', 'WASTE', 'AS', 'A', 'TECHNICAL', 'TERM', 'THEREFORE', 'IMPLIES', 'NO', 'DEPRECATION', 'OF', 'THE', 'MOTIVES', 'OR', 'OF', 'THE', 'ENDS', 'SOUGHT', 'BY', 'THE', 'CONSUMER', 'UNDER', 'THIS', 'CANON', 'OF', 'CONSPICUOUS', 'WASTE'] +3570-5696-0008-2480: ref=['BUT', 'IT', 'IS', 'ON', 'OTHER', 'GROUNDS', 'WORTH', 'NOTING', 'THAT', 'THE', 'TERM', 'WASTE', 'IN', 'THE', 'LANGUAGE', 'OF', 'EVERYDAY', 'LIFE', 'IMPLIES', 'DEPRECATION', 'OF', 'WHAT', 'IS', 'CHARACTERIZED', 'AS', 'WASTEFUL'] +3570-5696-0008-2480: hyp=['BUT', 'IT', 'IS', 'ANOTHER', "GROUND'", 'NOTING', 'THAT', 'THE', 'TERM', 'WASTE', 'IN', 'THE', 'LANGUAGE', 'OF', 'EVERY', 'DAY', 'LIFE', 'IMPLIES', 'DEPRECATION', 'OF', 'WHAT', 'IS', 'CHARACTERIZED', 'AS', 'WASTEFUL'] +3570-5696-0009-2481: ref=['IN', 'STRICT', 'ACCURACY', 'NOTHING', 'SHOULD', 'BE', 'INCLUDED', 'UNDER', 'THE', 'HEAD', 'OF', 'CONSPICUOUS', 'WASTE', 'BUT', 'SUCH', 'EXPENDITURE', 'AS', 'IS', 'INCURRED', 'ON', 'THE', 'GROUND', 'OF', 'AN', 'INVIDIOUS', 'PECUNIARY', 'COMPARISON'] +3570-5696-0009-2481: hyp=['IN', 'STRICT', 'ACCURACY', 'NOTHING', 'SHOULD', 'BE', 'INCLUDED', 'UNDER', 'THE', 'HEAD', 'OF', 'CONSPICUOUS', 'WASTE', 'BUT', 'SUCH', 'EXPENDITURE', 'AS', 'IS', 'INCURRED', 'ON', 'THE', 'GROUND', 'OF', 'AN', 'INVIDIOUS', 'PECUNIARY', 'COMPARISON'] +3570-5696-0010-2482: ref=['AN', 'ARTICLE', 'MAY', 'BE', 'USEFUL', 'AND', 'WASTEFUL', 'BOTH', 'AND', 'ITS', 'UTILITY', 'TO', 'THE', 'CONSUMER', 'MAY', 'BE', 'MADE', 'UP', 'OF', 'USE', 'AND', 'WASTE', 'IN', 'THE', 'MOST', 'VARYING', 'PROPORTIONS'] +3570-5696-0010-2482: hyp=['AN', 'ARTICLE', 'MAY', 'BE', 'USEFUL', 'AND', 'WASTEFUL', 'BOTH', 'AND', 'ITS', 'UTILITY', 'TO', 'THE', 'CONSUMER', 'MAY', 'BE', 'MADE', 'UP', 'OF', 'USE', 'AND', 'WASTE', 'IN', 'THE', 'MOST', 'VARYING', 'PROPORTIONS'] +3575-170457-0000-369: ref=['AND', 'OFTEN', 'HAS', 'MY', 'MOTHER', 'SAID', 'WHILE', 'ON', 'HER', 'LAP', 'I', 'LAID', 'MY', 'HEAD', 'SHE', 'FEARED', 'FOR', 'TIME', 'I', 'WAS', 'NOT', 'MADE', 'BUT', 'FOR', 'ETERNITY'] +3575-170457-0000-369: hyp=['AND', 'OFTEN', 'HAS', 'MY', 'MOTHER', 'SAID', 'WHILE', 'ON', 'HER', 'LAP', 'I', 'LAID', 'MY', 'HEAD', 'SHE', 'FEARED', 'FOR', 'TIME', 'I', 'WAS', 'NOT', 'MADE', 'BUT', 'FOR', 'ETERNITY'] +3575-170457-0001-370: ref=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DENIED', 'EACH', "OTHER'S", 'SOCIETY'] +3575-170457-0001-370: hyp=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DENIED', 'EACH', "OTHER'S", 'SOCIETY'] +3575-170457-0002-371: ref=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DIVIDED'] +3575-170457-0002-371: hyp=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DIVIDED'] +3575-170457-0003-372: ref=['SURELY', 'IT', 'MUST', 'BE', 'BECAUSE', 'WE', 'ARE', 'IN', 'DANGER', 'OF', 'LOVING', 'EACH', 'OTHER', 'TOO', 'WELL', 'OF', 'LOSING', 'SIGHT', 'OF', 'THE', 'CREATOR', 'IN', 'IDOLATRY', 'OF', 'THE', 'CREATURE'] +3575-170457-0003-372: hyp=['SURELY', 'IT', 'MUST', 'BE', 'BECAUSE', 'WE', 'ARE', 'IN', 'DANGER', 'OF', 'LOVING', 'EACH', 'OTHER', 'TOO', 'WELL', 'OF', 'LOSING', 'SIGHT', 'OF', 'THE', 'CREATOR', 'AND', 'IDOLATRY', 'OF', 'THE', 'CREATURE'] +3575-170457-0004-373: ref=['WE', 'USED', 'TO', 'DISPUTE', 'ABOUT', 'POLITICS', 'AND', 'RELIGION'] +3575-170457-0004-373: hyp=['WE', 'USED', 'TO', 'DISPUTE', 'ABOUT', 'POLITICS', 'AND', 'RELIGION'] +3575-170457-0005-374: ref=['SHE', 'A', 'TORY', 'AND', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'ALWAYS', 'IN', 'A', 'MINORITY', 'OF', 'ONE', 'IN', 'OUR', 'HOUSE', 'OF', 'VIOLENT', 'DISSENT', 'AND', 'RADICALISM'] +3575-170457-0005-374: hyp=['SHE', 'ATTORIAN', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'ALWAYS', 'IN', 'A', 'MINORITY', 'OF', 'ONE', 'IN', 'OUR', 'HOUSE', 'A', 'VIOLENT', 'DESCENT', 'AND', 'RADICALISM'] +3575-170457-0006-375: ref=['HER', 'FEEBLE', 'HEALTH', 'GAVE', 'HER', 'HER', 'YIELDING', 'MANNER', 'FOR', 'SHE', 'COULD', 'NEVER', 'OPPOSE', 'ANY', 'ONE', 'WITHOUT', 'GATHERING', 'UP', 'ALL', 'HER', 'STRENGTH', 'FOR', 'THE', 'STRUGGLE'] +3575-170457-0006-375: hyp=['HER', 'FEEBLE', 'HEALTH', 'GAVE', 'HER', 'HER', 'YIELDING', 'MANNER', 'FOR', 'SHE', 'COULD', 'NEVER', 'OPPOSE', 'ANY', 'ONE', 'WITHOUT', 'GATHERING', 'UP', 'ALL', 'HER', 'STRENGTH', 'FOR', 'THE', 'STRUGGLE'] +3575-170457-0007-376: ref=['HE', 'SPOKE', 'FRENCH', 'PERFECTLY', 'I', 'HAVE', 'BEEN', 'TOLD', 'WHEN', 'NEED', 'WAS', 'BUT', 'DELIGHTED', 'USUALLY', 'IN', 'TALKING', 'THE', 'BROADEST', 'YORKSHIRE'] +3575-170457-0007-376: hyp=['HE', 'SPOKE', 'FRENCH', 'PERFECTLY', 'I', 'HAVE', 'BEEN', 'TOLD', 'WHEN', 'NEED', 'WAS', 'BUT', 'DELIGHTED', 'USUALLY', 'IN', 'TALKING', 'THE', 'BROADEST', 'YORKSHIRE'] +3575-170457-0008-377: ref=['AND', 'SO', 'LIFE', 'AND', 'DEATH', 'HAVE', 'DISPERSED', 'THE', 'CIRCLE', 'OF', 'VIOLENT', 'RADICALS', 'AND', 'DISSENTERS', 'INTO', 'WHICH', 'TWENTY', 'YEARS', 'AGO', 'THE', 'LITTLE', 'QUIET', 'RESOLUTE', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'RECEIVED', 'AND', 'BY', 'WHOM', 'SHE', 'WAS', 'TRULY', 'LOVED', 'AND', 'HONOURED'] +3575-170457-0008-377: hyp=['AND', 'SO', 'LIFE', 'AND', 'DEATH', 'HAVE', 'DISPERSED', 'THE', 'CIRCLE', 'OF', 'VIOLENT', 'RADICALS', 'AND', 'DISSENTERS', 'INTO', 'WHICH', 'TWENTY', 'YEARS', 'AGO', 'THE', 'LITTLE', 'QUIET', 'RESOLUTE', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'RECEIVED', 'AND', 'BY', 'WHOM', 'SHE', 'WAS', 'TRULY', 'LOVED', 'AND', 'HONORED'] +3575-170457-0009-378: ref=['JANUARY', 'AND', 'FEBRUARY', 'OF', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'HAD', 'PASSED', 'AWAY', 'AND', 'STILL', 'THERE', 'WAS', 'NO', 'REPLY', 'FROM', 'SOUTHEY'] +3575-170457-0009-378: hyp=['JANUARY', 'AND', 'FEBRUARY', 'OF', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'HAD', 'PASSED', 'AWAY', 'AND', 'STILL', 'THERE', 'WAS', 'NO', 'REPLY', 'FROM', 'SALVIE'] +3575-170457-0010-379: ref=['I', 'AM', 'NOT', 'DEPRECIATING', 'IT', 'WHEN', 'I', 'SAY', 'THAT', 'IN', 'THESE', 'TIMES', 'IT', 'IS', 'NOT', 'RARE'] +3575-170457-0010-379: hyp=['I', 'AM', 'NOT', 'DEPRECIATING', 'IT', 'WHEN', 'I', 'SAY', 'THAT', 'IN', 'THESE', 'TIMES', 'IT', 'IS', 'NOT', 'RARE'] +3575-170457-0011-380: ref=['BUT', 'IT', 'IS', 'NOT', 'WITH', 'A', 'VIEW', 'TO', 'DISTINCTION', 'THAT', 'YOU', 'SHOULD', 'CULTIVATE', 'THIS', 'TALENT', 'IF', 'YOU', 'CONSULT', 'YOUR', 'OWN', 'HAPPINESS'] +3575-170457-0011-380: hyp=['BUT', 'IT', 'IS', 'NOT', 'WITH', 'A', 'VIEW', 'TO', 'DISTINCTION', 'THAT', 'YOU', 'SHOULD', 'CULTIVATE', 'THIS', 'TALENT', 'IF', 'YOU', 'CONSULT', 'YOUR', 'OWN', 'HAPPINESS'] +3575-170457-0012-381: ref=['YOU', 'WILL', 'SAY', 'THAT', 'A', 'WOMAN', 'HAS', 'NO', 'NEED', 'OF', 'SUCH', 'A', 'CAUTION', 'THERE', 'CAN', 'BE', 'NO', 'PERIL', 'IN', 'IT', 'FOR', 'HER'] +3575-170457-0012-381: hyp=['YOU', 'WILL', 'SAY', 'THAT', 'A', 'WOMAN', 'HAS', 'NO', 'NEED', 'OF', 'SUCH', 'A', 'CAUTION', 'THERE', 'CAN', 'BE', 'NO', 'PERIL', 'IN', 'IT', 'FOR', 'HER'] +3575-170457-0013-382: ref=['THE', 'MORE', 'SHE', 'IS', 'ENGAGED', 'IN', 'HER', 'PROPER', 'DUTIES', 'THE', 'LESS', 'LEISURE', 'WILL', 'SHE', 'HAVE', 'FOR', 'IT', 'EVEN', 'AS', 'AN', 'ACCOMPLISHMENT', 'AND', 'A', 'RECREATION'] +3575-170457-0013-382: hyp=['THE', 'MORE', 'SHE', 'IS', 'ENGAGED', 'IN', 'HER', 'PROPER', 'DUTIES', 'THE', 'LESS', 'LEISURE', 'WILL', 'SHE', 'HAVE', 'FOR', 'IT', 'EVEN', 'AS', 'AN', 'ACCOMPLISHMENT', 'AND', 'A', 'RECREATION'] +3575-170457-0014-383: ref=['TO', 'THOSE', 'DUTIES', 'YOU', 'HAVE', 'NOT', 'YET', 'BEEN', 'CALLED', 'AND', 'WHEN', 'YOU', 'ARE', 'YOU', 'WILL', 'BE', 'LESS', 'EAGER', 'FOR', 'CELEBRITY'] +3575-170457-0014-383: hyp=['TO', 'THOSE', 'DUTIES', 'YOU', 'HAVE', 'NOT', 'YET', 'BEEN', 'CALLED', 'AND', 'WHEN', 'YOU', 'ARE', 'YOU', 'WILL', 'BE', 'LESS', 'EAGER', 'FOR', 'CELEBRITY'] +3575-170457-0015-384: ref=['BUT', 'DO', 'NOT', 'SUPPOSE', 'THAT', 'I', 'DISPARAGE', 'THE', 'GIFT', 'WHICH', 'YOU', 'POSSESS', 'NOR', 'THAT', 'I', 'WOULD', 'DISCOURAGE', 'YOU', 'FROM', 'EXERCISING', 'IT', 'I', 'ONLY', 'EXHORT', 'YOU', 'SO', 'TO', 'THINK', 'OF', 'IT', 'AND', 'SO', 'TO', 'USE', 'IT', 'AS', 'TO', 'RENDER', 'IT', 'CONDUCIVE', 'TO', 'YOUR', 'OWN', 'PERMANENT', 'GOOD'] +3575-170457-0015-384: hyp=['BUT', 'DO', 'NOT', 'SUPPOSE', 'THAT', 'I', 'DISPARAGE', 'THE', 'GIFT', 'WHICH', 'YOU', 'POSSESS', 'NOR', 'THAT', 'I', 'WOULD', 'DISCOURAGE', 'YOU', 'FROM', 'EXERCISING', 'IT', 'I', 'ONLY', 'EXHORT', 'YOU', 'SO', 'TO', 'THINK', 'OF', 'IT', 'AND', 'SO', 'TO', 'USE', 'IT', 'AS', 'TO', 'RENDER', 'IT', 'CONDUCIVE', 'TO', 'YOUR', 'OWN', 'PERMANENT', 'GOOD'] +3575-170457-0016-385: ref=['FAREWELL', 'MADAM'] +3575-170457-0016-385: hyp=['FAREWELL', 'MADAM'] +3575-170457-0017-386: ref=['THOUGH', 'I', 'MAY', 'BE', 'BUT', 'AN', 'UNGRACIOUS', 'ADVISER', 'YOU', 'WILL', 'ALLOW', 'ME', 'THEREFORE', 'TO', 'SUBSCRIBE', 'MYSELF', 'WITH', 'THE', 'BEST', 'WISHES', 'FOR', 'YOUR', 'HAPPINESS', 'HERE', 'AND', 'HEREAFTER', 'YOUR', 'TRUE', 'FRIEND', 'ROBERT', 'SOUTHEY'] +3575-170457-0017-386: hyp=['THOUGH', 'I', 'MAY', 'BE', 'BUT', 'AN', 'UNGRACIOUS', 'ADVISER', 'YOU', 'WILL', 'ALLOW', 'ME', 'THEREFORE', 'TO', 'SUBSCRIBE', 'MYSELF', 'WITH', 'THE', 'BEST', 'WISHES', 'FOR', 'YOUR', 'HAPPINESS', 'HERE', 'AND', 'HEREAFTER', 'YOUR', 'TRUE', 'FRIEND', 'ROBERT', 'SELVIE'] +3575-170457-0018-387: ref=['SIR', 'MARCH', 'SIXTEENTH'] +3575-170457-0018-387: hyp=['SIR', 'MARCH', 'SIXTEENTH'] +3575-170457-0019-388: ref=['I', 'HAD', 'NOT', 'VENTURED', 'TO', 'HOPE', 'FOR', 'SUCH', 'A', 'REPLY', 'SO', 'CONSIDERATE', 'IN', 'ITS', 'TONE', 'SO', 'NOBLE', 'IN', 'ITS', 'SPIRIT'] +3575-170457-0019-388: hyp=['I', 'HAVE', 'NOT', 'VENTURED', 'TO', 'HOPE', 'FOR', 'SUCH', 'A', 'REPLY', 'SO', 'CONSIDERATE', 'IN', 'ITS', 'TONE', 'SO', 'NOBLE', 'IN', 'ITS', 'SPIRIT'] +3575-170457-0020-389: ref=['I', 'KNOW', 'THE', 'FIRST', 'LETTER', 'I', 'WROTE', 'TO', 'YOU', 'WAS', 'ALL', 'SENSELESS', 'TRASH', 'FROM', 'BEGINNING', 'TO', 'END', 'BUT', 'I', 'AM', 'NOT', 'ALTOGETHER', 'THE', 'IDLE', 'DREAMING', 'BEING', 'IT', 'WOULD', 'SEEM', 'TO', 'DENOTE'] +3575-170457-0020-389: hyp=['I', 'KNOW', 'THE', 'FIRST', 'LETTER', 'I', 'WROTE', 'TO', 'YOU', 'WAS', 'ALL', 'SENSELESS', 'TRASH', 'FROM', 'BEGINNING', 'TO', 'END', 'BUT', 'I', 'AM', 'NOT', 'ALTOGETHER', 'THE', 'IDLE', 'DREAMING', 'BEING', 'IT', 'WOULD', 'SEEM', 'TO', 'DENOTE'] +3575-170457-0021-390: ref=['I', 'THOUGHT', 'IT', 'THEREFORE', 'MY', 'DUTY', 'WHEN', 'I', 'LEFT', 'SCHOOL', 'TO', 'BECOME', 'A', 'GOVERNESS'] +3575-170457-0021-390: hyp=['I', 'THOUGHT', 'IT', 'THEREFORE', 'MY', 'DUTY', 'WHEN', 'I', 'LEFT', 'SCHOOL', 'TO', 'BECOME', 'A', 'GOVERNESS'] +3575-170457-0022-391: ref=['IN', 'THE', 'EVENINGS', 'I', 'CONFESS', 'I', 'DO', 'THINK', 'BUT', 'I', 'NEVER', 'TROUBLE', 'ANY', 'ONE', 'ELSE', 'WITH', 'MY', 'THOUGHTS'] +3575-170457-0022-391: hyp=['IN', 'THE', 'EVENINGS', 'I', 'CONFESSED', 'I', 'DO', 'THINK', 'BUT', 'I', 'NEVER', 'TROUBLE', 'ANYONE', 'ELSE', 'WITH', 'MY', 'THOUGHTS'] +3575-170457-0023-392: ref=['I', 'CAREFULLY', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PREOCCUPATION', 'AND', 'ECCENTRICITY', 'WHICH', 'MIGHT', 'LEAD', 'THOSE', 'I', 'LIVE', 'AMONGST', 'TO', 'SUSPECT', 'THE', 'NATURE', 'OF', 'MY', 'PURSUITS'] +3575-170457-0023-392: hyp=['I', 'CAREFULLY', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PREOCCUPATION', 'AND', 'EXCENTRICITY', 'WHICH', 'MIGHT', 'LEAD', 'THOSE', 'I', 'LIVE', 'AMONGST', 'TO', 'SUSPECT', 'THE', 'NATURE', 'OF', 'MY', 'PURSUITS'] +3575-170457-0024-393: ref=['I', "DON'T", 'ALWAYS', 'SUCCEED', 'FOR', 'SOMETIMES', 'WHEN', "I'M", 'TEACHING', 'OR', 'SEWING', 'I', 'WOULD', 'RATHER', 'BE', 'READING', 'OR', 'WRITING', 'BUT', 'I', 'TRY', 'TO', 'DENY', 'MYSELF', 'AND', 'MY', "FATHER'S", 'APPROBATION', 'AMPLY', 'REWARDED', 'ME', 'FOR', 'THE', 'PRIVATION'] +3575-170457-0024-393: hyp=['I', "DON'T", 'ALWAYS', 'SUCCEED', 'FOR', 'SOMETIMES', 'WHEN', "I'M", 'TEACHING', 'OR', 'SEWING', 'I', 'WOULD', 'RATHER', 'BE', 'READING', 'A', 'WRITING', 'BUT', 'I', 'TRIED', 'TO', 'DENY', 'MYSELF', 'AND', 'MY', "FATHER'S", 'APPROBATION', 'AMPLY', 'REWARDED', 'ME', 'FOR', 'THE', 'PRIVATION'] +3575-170457-0025-394: ref=['AGAIN', 'I', 'THANK', 'YOU', 'THIS', 'INCIDENT', 'I', 'SUPPOSE', 'WILL', 'BE', 'RENEWED', 'NO', 'MORE', 'IF', 'I', 'LIVE', 'TO', 'BE', 'AN', 'OLD', 'WOMAN', 'I', 'SHALL', 'REMEMBER', 'IT', 'THIRTY', 'YEARS', 'HENCE', 'AS', 'A', 'BRIGHT', 'DREAM'] +3575-170457-0025-394: hyp=['AGAIN', 'I', 'THANK', 'YOU', 'THIS', 'INCIDENT', 'I', 'SUPPOSE', 'WILL', 'BE', 'RENEWED', 'NO', 'MORE', 'IF', 'I', 'LIVE', 'TO', 'BE', 'AN', 'OLD', 'WOMAN', 'I', 'SHALL', 'REMEMBER', 'IT', 'THIRTY', 'YEARS', 'HENCE', 'AS', 'A', 'BRIGHT', 'DREAM'] +3575-170457-0026-395: ref=['P', 'S', 'PRAY', 'SIR', 'EXCUSE', 'ME', 'FOR', 'WRITING', 'TO', 'YOU', 'A', 'SECOND', 'TIME', 'I', 'COULD', 'NOT', 'HELP', 'WRITING', 'PARTLY', 'TO', 'TELL', 'YOU', 'HOW', 'THANKFUL', 'I', 'AM', 'FOR', 'YOUR', 'KINDNESS', 'AND', 'PARTLY', 'TO', 'LET', 'YOU', 'KNOW', 'THAT', 'YOUR', 'ADVICE', 'SHALL', 'NOT', 'BE', 'WASTED', 'HOWEVER', 'SORROWFULLY', 'AND', 'RELUCTANTLY', 'IT', 'MAY', 'BE', 'AT', 'FIRST', 'FOLLOWED', 'C', 'B'] +3575-170457-0026-395: hyp=['P', 'S', 'PRAY', 'SIR', 'EXCUSE', 'ME', 'FOR', 'WRITING', 'TO', 'YOU', 'A', 'SECOND', 'TIME', 'I', 'COULD', 'NOT', 'HELP', 'WRITING', 'PARTLY', 'TO', 'TELL', 'YOU', 'HOW', 'THANKFUL', 'I', 'AM', 'FOR', 'YOUR', 'KINDNESS', 'AND', 'PARTLY', 'TO', 'LET', 'YOU', 'KNOW', 'THAT', 'YOUR', 'ADVICE', 'SHALL', 'NOT', 'BE', 'WASTED', 'HOWEVER', 'SORROWFULLY', 'AND', 'RELUCTANTLY', 'IT', 'MAY', 'BE', 'AT', 'FIRST', 'FOLLOWED', 'C'] +3575-170457-0027-396: ref=['I', 'CANNOT', 'DENY', 'MYSELF', 'THE', 'GRATIFICATION', 'OF', 'INSERTING', "SOUTHEY'S", 'REPLY'] +3575-170457-0027-396: hyp=['I', 'CANNOT', 'DENY', 'MYSELF', 'THE', 'GRATIFICATION', 'OF', 'INSERTING', 'SO', 'THESE', 'REPLY'] +3575-170457-0028-397: ref=['KESWICK', 'MARCH', 'TWENTY', 'SECOND', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'DEAR', 'MADAM'] +3575-170457-0028-397: hyp=['KEZWICK', 'MARCH', 'TWENTY', 'SECOND', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'DEAR', 'MADAM'] +3575-170457-0029-398: ref=['YOUR', 'LETTER', 'HAS', 'GIVEN', 'ME', 'GREAT', 'PLEASURE', 'AND', 'I', 'SHOULD', 'NOT', 'FORGIVE', 'MYSELF', 'IF', 'I', 'DID', 'NOT', 'TELL', 'YOU', 'SO'] +3575-170457-0029-398: hyp=['YOUR', 'LETTER', 'HAS', 'GIVEN', 'ME', 'GREAT', 'PLEASURE', 'AND', 'I', 'SHOULD', 'NOT', 'FORGIVE', 'MYSELF', 'IF', 'I', 'DID', 'NOT', 'TELL', 'YOU', 'SO'] +3575-170457-0030-399: ref=['OF', 'THIS', 'SECOND', 'LETTER', 'ALSO', 'SHE', 'SPOKE', 'AND', 'TOLD', 'ME', 'THAT', 'IT', 'CONTAINED', 'AN', 'INVITATION', 'FOR', 'HER', 'TO', 'GO', 'AND', 'SEE', 'THE', 'POET', 'IF', 'EVER', 'SHE', 'VISITED', 'THE', 'LAKES'] +3575-170457-0030-399: hyp=['OF', 'THIS', 'SECOND', 'LETTER', 'ALSO', 'SHE', 'SPOKE', 'AND', 'TOLD', 'ME', 'THAT', 'IT', 'CONTAINED', 'AN', 'INVITATION', 'FOR', 'HER', 'TO', 'GO', 'AND', 'SEE', 'THE', 'POET', 'IF', 'EVER', 'SHE', 'VISITED', 'THE', 'LAKES'] +3575-170457-0031-400: ref=['ON', 'AUGUST', 'TWENTY', 'SEVENTH', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'SHE', 'WRITES'] +3575-170457-0031-400: hyp=['ON', 'AUGUST', 'TWENTY', 'SEVENTH', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'SHE', 'WRITES'] +3575-170457-0032-401: ref=['COME', 'COME', 'I', 'AM', 'GETTING', 'REALLY', 'TIRED', 'OF', 'YOUR', 'ABSENCE'] +3575-170457-0032-401: hyp=['COME', 'COME', "I'M", 'GETTING', 'REALLY', 'TIRED', 'OF', 'YOUR', 'ABSENCE'] +3575-170457-0033-402: ref=['SATURDAY', 'AFTER', 'SATURDAY', 'COMES', 'ROUND', 'AND', 'I', 'CAN', 'HAVE', 'NO', 'HOPE', 'OF', 'HEARING', 'YOUR', 'KNOCK', 'AT', 'THE', 'DOOR', 'AND', 'THEN', 'BEING', 'TOLD', 'THAT', 'MISS', 'E', 'IS', 'COME', 'OH', 'DEAR'] +3575-170457-0033-402: hyp=['SATURDAY', 'AFTER', 'SATURDAY', 'COMES', 'AROUND', 'AND', 'I', 'CAN', 'HAVE', 'NO', 'HOPE', 'OF', 'HEARING', 'YOUR', 'KNOCK', 'AT', 'THE', 'DOOR', 'AND', 'THEN', 'BEING', 'TOLD', 'THAT', 'MISSY', 'IS', 'COME', 'OH', 'DEAR'] +3575-170457-0034-403: ref=['IN', 'THIS', 'MONOTONOUS', 'LIFE', 'OF', 'MINE', 'THAT', 'WAS', 'A', 'PLEASANT', 'EVENT'] +3575-170457-0034-403: hyp=['IN', 'THIS', 'MONOTONOUS', 'LIFE', 'OF', 'MIND', 'THAT', 'WAS', 'A', 'PLEASANT', 'EVENT'] +3575-170457-0035-404: ref=['I', 'WISH', 'IT', 'WOULD', 'RECUR', 'AGAIN', 'BUT', 'IT', 'WILL', 'TAKE', 'TWO', 'OR', 'THREE', 'INTERVIEWS', 'BEFORE', 'THE', 'STIFFNESS', 'THE', 'ESTRANGEMENT', 'OF', 'THIS', 'LONG', 'SEPARATION', 'WILL', 'WEAR', 'AWAY'] +3575-170457-0035-404: hyp=['I', 'WISH', 'YOU', 'WERE', 'RECUR', 'AGAIN', 'BUT', 'IT', 'WILL', 'TAKE', 'TWO', 'OR', 'THREE', 'INTERVIEWS', 'BEFORE', 'THE', 'STIFFNESS', 'THE', 'ESTRANGEMENT', 'OF', 'THIS', 'LONG', 'SEPARATION', 'WILL', 'WEAR', 'AWAY'] +3575-170457-0036-405: ref=['MY', 'EYES', 'FILL', 'WITH', 'TEARS', 'WHEN', 'I', 'CONTRAST', 'THE', 'BLISS', 'OF', 'SUCH', 'A', 'STATE', 'BRIGHTENED', 'BY', 'HOPES', 'OF', 'THE', 'FUTURE', 'WITH', 'THE', 'MELANCHOLY', 'STATE', 'I', 'NOW', 'LIVE', 'IN', 'UNCERTAIN', 'THAT', 'I', 'EVER', 'FELT', 'TRUE', 'CONTRITION', 'WANDERING', 'IN', 'THOUGHT', 'AND', 'DEED', 'LONGING', 'FOR', 'HOLINESS', 'WHICH', 'I', 'SHALL', 'NEVER', 'NEVER', 'OBTAIN', 'SMITTEN', 'AT', 'TIMES', 'TO', 'THE', 'HEART', 'WITH', 'THE', 'CONVICTION', 'THAT', 'GHASTLY', 'CALVINISTIC', 'DOCTRINES', 'ARE', 'TRUE', 'DARKENED', 'IN', 'SHORT', 'BY', 'THE', 'VERY', 'SHADOWS', 'OF', 'SPIRITUAL', 'DEATH'] +3575-170457-0036-405: hyp=['MY', 'EYES', 'FILLED', 'TEARS', 'WHEN', 'I', 'CONTRAST', 'THE', 'BLISS', 'OF', 'SUCH', 'A', 'STATE', 'BRIGHTENED', 'BY', 'HOPES', 'OF', 'THE', 'FUTURE', 'WITH', 'THE', 'MELANCHOLY', 'STATE', 'I', 'NOW', 'LIVE', 'IN', 'UNCERTAIN', 'THAT', 'I', 'EVER', 'FELT', 'TRUE', 'CONTRITION', 'WANDERING', 'IN', 'THOUGHT', 'INDEED', 'LONGING', 'FOR', 'HOLINESS', 'WHICH', 'I', 'SHALL', 'NEVER', 'NEVER', 'OBTAIN', 'SMITTEN', 'AT', 'TIMES', 'TO', 'THE', 'HEART', 'WITH', 'THE', 'CONVICTION', 'THAT', 'GHASTLY', 'CALVINISTIC', 'DOCTRINES', 'ARE', 'TRUE', 'DARKENED', 'IN', 'SHORT', 'BY', 'THE', 'VERY', 'SHADOWS', 'OF', 'SPIRITUAL', 'DEATH'] +3575-170457-0037-406: ref=['IF', 'CHRISTIAN', 'PERFECTION', 'BE', 'NECESSARY', 'TO', 'SALVATION', 'I', 'SHALL', 'NEVER', 'BE', 'SAVED', 'MY', 'HEART', 'IS', 'A', 'VERY', 'HOTBED', 'FOR', 'SINFUL', 'THOUGHTS', 'AND', 'WHEN', 'I', 'DECIDE', 'ON', 'AN', 'ACTION', 'I', 'SCARCELY', 'REMEMBER', 'TO', 'LOOK', 'TO', 'MY', 'REDEEMER', 'FOR', 'DIRECTION'] +3575-170457-0037-406: hyp=['IF', 'CHRISTIAN', 'PERFECTION', 'BE', 'NECESSARY', 'TO', 'SALVATION', 'I', 'SHALL', 'NEVER', 'BE', 'SAVED', 'MY', 'HEART', 'IS', 'A', 'VERY', 'HOT', 'BED', 'FOR', 'SINFUL', 'THOUGHTS', 'AND', 'WHEN', 'I', 'DECIDE', 'ON', 'AN', 'ACTION', 'I', 'SCARCELY', 'REMEMBER', 'TO', 'LOOK', 'TO', 'MY', 'REDEEMER', 'FOR', 'A', 'DIRECTION'] +3575-170457-0038-407: ref=['AND', 'MEANTIME', 'I', 'KNOW', 'THE', 'GREATNESS', 'OF', 'JEHOVAH', 'I', 'ACKNOWLEDGE', 'THE', 'PERFECTION', 'OF', 'HIS', 'WORD', 'I', 'ADORE', 'THE', 'PURITY', 'OF', 'THE', 'CHRISTIAN', 'FAITH', 'MY', 'THEORY', 'IS', 'RIGHT', 'MY', 'PRACTICE', 'HORRIBLY', 'WRONG'] +3575-170457-0038-407: hyp=['AND', 'MEANTIME', 'I', 'KNOW', 'THE', 'GREATNESS', 'OF', 'JEHOVAH', 'I', 'ACKNOWLEDGE', 'THE', 'PERFECTION', 'OF', 'HIS', 'WORD', 'I', 'ADORE', 'THE', 'PURITY', 'OF', 'THE', 'CHRISTIAN', 'FAITH', 'MY', 'THEORY', 'IS', 'RIGHT', 'MY', 'PRACTICE', 'HORRIBLY', 'WRONG'] +3575-170457-0039-408: ref=['THE', 'CHRISTMAS', 'HOLIDAYS', 'CAME', 'AND', 'SHE', 'AND', 'ANNE', 'RETURNED', 'TO', 'THE', 'PARSONAGE', 'AND', 'TO', 'THAT', 'HAPPY', 'HOME', 'CIRCLE', 'IN', 'WHICH', 'ALONE', 'THEIR', 'NATURES', 'EXPANDED', 'AMONGST', 'ALL', 'OTHER', 'PEOPLE', 'THEY', 'SHRIVELLED', 'UP', 'MORE', 'OR', 'LESS'] +3575-170457-0039-408: hyp=['THE', 'CHRIST', 'HOLIDAYS', 'CAME', 'AND', 'SHE', 'AND', 'ANNE', 'RETURNED', 'TO', 'THE', 'PARSONAGE', 'AND', 'TO', 'THAT', 'HAPPY', 'HOME', 'CIRCLE', 'IN', 'WHICH', 'ALONE', 'THEIR', 'NATURES', 'EXPANDED', 'AMONGST', 'ALL', 'OTHER', 'PEOPLE', 'THEY', 'SHRIVELLED', 'UP', 'MORE', 'OR', 'LESS'] +3575-170457-0040-409: ref=['INDEED', 'THERE', 'WERE', 'ONLY', 'ONE', 'OR', 'TWO', 'STRANGERS', 'WHO', 'COULD', 'BE', 'ADMITTED', 'AMONG', 'THE', 'SISTERS', 'WITHOUT', 'PRODUCING', 'THE', 'SAME', 'RESULT'] +3575-170457-0040-409: hyp=['INDEED', 'THERE', 'WERE', 'ONLY', 'ONE', 'OR', 'TWO', 'STRANGERS', 'WHO', 'COULD', 'BE', 'ADMITTED', 'AMONG', 'THE', 'SISTERS', 'WITHOUT', 'PRODUCING', 'THE', 'SAME', 'RESULT'] +3575-170457-0041-410: ref=['SHE', 'WAS', 'GONE', 'OUT', 'INTO', 'THE', 'VILLAGE', 'ON', 'SOME', 'ERRAND', 'WHEN', 'AS', 'SHE', 'WAS', 'DESCENDING', 'THE', 'STEEP', 'STREET', 'HER', 'FOOT', 'SLIPPED', 'ON', 'THE', 'ICE', 'AND', 'SHE', 'FELL', 'IT', 'WAS', 'DARK', 'AND', 'NO', 'ONE', 'SAW', 'HER', 'MISCHANCE', 'TILL', 'AFTER', 'A', 'TIME', 'HER', 'GROANS', 'ATTRACTED', 'THE', 'ATTENTION', 'OF', 'A', 'PASSER', 'BY'] +3575-170457-0041-410: hyp=['SHE', 'WAS', 'GONE', 'OUT', 'INTO', 'THE', 'VILLAGE', 'ON', 'SOME', 'ERRAND', 'WHEN', 'AS', 'SHE', 'WAS', 'DESCENDING', 'THE', 'STEEP', 'STREET', 'HER', 'FOOT', 'SLIPPED', 'ON', 'THE', 'ICE', 'AND', 'SHE', 'FELL', 'HE', 'WAS', 'DARK', 'AND', 'NO', 'ONE', 'SAW', 'HER', 'MISCHANCE', 'TILL', 'AFTER', 'A', 'TIME', 'HER', 'GROANS', 'ATTRACTED', 'THE', 'ATTENTION', 'OF', 'A', 'PASSER', 'BY'] +3575-170457-0042-411: ref=['UNFORTUNATELY', 'THE', 'FRACTURE', 'COULD', 'NOT', 'BE', 'SET', 'TILL', 'SIX', "O'CLOCK", 'THE', 'NEXT', 'MORNING', 'AS', 'NO', 'SURGEON', 'WAS', 'TO', 'BE', 'HAD', 'BEFORE', 'THAT', 'TIME', 'AND', 'SHE', 'NOW', 'LIES', 'AT', 'OUR', 'HOUSE', 'IN', 'A', 'VERY', 'DOUBTFUL', 'AND', 'DANGEROUS', 'STATE'] +3575-170457-0042-411: hyp=['UNFORTUNATELY', 'THE', 'FRACTURE', 'COULD', 'NOT', 'BE', 'SET', 'TILL', 'SIX', "O'CLOCK", 'THE', 'NEXT', 'MORNING', 'AS', 'NO', 'SURGEON', 'WAS', 'TO', 'BE', 'HAD', 'BEFORE', 'THAT', 'TIME', 'AND', 'SHE', 'NOW', 'LIES', 'AT', 'HER', 'HOUSE', 'IN', 'A', 'VERY', 'DOUBTFUL', 'AND', 'DANGEROUS', 'STATE'] +3575-170457-0043-412: ref=['HOWEVER', 'REMEMBERING', 'WHAT', 'YOU', 'TOLD', 'ME', 'NAMELY', 'THAT', 'YOU', 'HAD', 'COMMENDED', 'THE', 'MATTER', 'TO', 'A', 'HIGHER', 'DECISION', 'THAN', 'OURS', 'AND', 'THAT', 'YOU', 'WERE', 'RESOLVED', 'TO', 'SUBMIT', 'WITH', 'RESIGNATION', 'TO', 'THAT', 'DECISION', 'WHATEVER', 'IT', 'MIGHT', 'BE', 'I', 'HOLD', 'IT', 'MY', 'DUTY', 'TO', 'YIELD', 'ALSO', 'AND', 'TO', 'BE', 'SILENT', 'IT', 'MAY', 'BE', 'ALL', 'FOR', 'THE', 'BEST'] +3575-170457-0043-412: hyp=['HOWEVER', 'REMEMBERING', 'WHAT', 'YOU', 'TOLD', 'ME', 'NAMELY', 'THAT', 'YOU', 'HAD', 'COMMENDED', 'THE', 'MATTER', 'TO', 'A', 'HIGHER', 'DECISION', 'THAN', 'OURS', 'AND', 'THAT', 'YOU', 'WERE', 'RESOLVED', 'TO', 'SUBMIT', 'WITH', 'RESIGNATION', 'TO', 'THAT', 'DECISION', 'WHATEVER', 'IT', 'MIGHT', 'BE', 'I', 'HOLD', 'IT', 'MY', 'DUTY', 'TO', 'YIELD', 'ALSO', 'AND', 'TO', 'BE', 'SILENT', 'AND', 'MAY', 'BE', 'ALL', 'FOR', 'THE', 'BEST'] +3575-170457-0044-413: ref=['AFTER', 'THIS', 'DISAPPOINTMENT', 'I', 'NEVER', 'DARE', 'RECKON', 'WITH', 'CERTAINTY', 'ON', 'THE', 'ENJOYMENT', 'OF', 'A', 'PLEASURE', 'AGAIN', 'IT', 'SEEMS', 'AS', 'IF', 'SOME', 'FATALITY', 'STOOD', 'BETWEEN', 'YOU', 'AND', 'ME'] +3575-170457-0044-413: hyp=['AFTER', 'THIS', 'DISAPPOINTMENT', 'I', 'NEVER', 'DARE', 'RECKON', 'WITH', 'CERTAINTY', 'ON', 'THE', 'ENJOYMENT', 'OF', 'A', 'PLEASURE', 'AGAIN', 'IT', 'SEEMS', 'AS', 'IF', 'SOME', 'FATALITY', 'STOOD', 'BETWEEN', 'YOU', 'AND', 'ME'] +3575-170457-0045-414: ref=['I', 'AM', 'NOT', 'GOOD', 'ENOUGH', 'FOR', 'YOU', 'AND', 'YOU', 'MUST', 'BE', 'KEPT', 'FROM', 'THE', 'CONTAMINATION', 'OF', 'TOO', 'INTIMATE', 'SOCIETY'] +3575-170457-0045-414: hyp=['I', 'AM', 'NOT', 'GOOD', 'ENOUGH', 'FOR', 'YOU', 'AND', 'YOU', 'MUST', 'BE', 'KEPT', 'FROM', 'THE', 'CONTAMINATION', 'OF', 'TWO', 'INTIMATE', 'SOCIETY'] +3575-170457-0046-415: ref=['A', 'GOOD', 'NEIGHBOUR', 'OF', 'THE', 'BRONTES', 'A', 'CLEVER', 'INTELLIGENT', 'YORKSHIRE', 'WOMAN', 'WHO', 'KEEPS', 'A', "DRUGGIST'S", 'SHOP', 'IN', 'HAWORTH', 'AND', 'FROM', 'HER', 'OCCUPATION', 'HER', 'EXPERIENCE', 'AND', 'EXCELLENT', 'SENSE', 'HOLDS', 'THE', 'POSITION', 'OF', 'VILLAGE', 'DOCTRESS', 'AND', 'NURSE', 'AND', 'AS', 'SUCH', 'HAS', 'BEEN', 'A', 'FRIEND', 'IN', 'MANY', 'A', 'TIME', 'OF', 'TRIAL', 'AND', 'SICKNESS', 'AND', 'DEATH', 'IN', 'THE', 'HOUSEHOLDS', 'ROUND', 'TOLD', 'ME', 'A', 'CHARACTERISTIC', 'LITTLE', 'INCIDENT', 'CONNECTED', 'WITH', "TABBY'S", 'FRACTURED', 'LEG'] +3575-170457-0046-415: hyp=['A', 'GOOD', 'NEIGHBOR', 'OF', 'THE', 'BRONTES', 'A', 'CLEVER', 'INTELLIGENT', 'YORKSHIRE', 'WOMAN', 'WHO', 'KEEPS', 'A', 'DRUGGIST', 'SHOP', 'IN', 'HAWORTH', 'FROM', 'HER', 'OCCUPATION', 'HER', 'EXPERIENCE', 'AND', 'EXCELLENT', 'SENSE', 'HOLDS', 'THE', 'POSITION', 'OF', 'VILLAGE', 'DOCTRIS', 'AND', 'NURSE', 'AND', 'AS', 'SUCH', 'HAS', 'BEEN', 'A', 'FRIEND', 'IN', 'MANY', 'A', 'TIME', 'OF', 'TRIAL', 'AND', 'SICKNESS', 'AND', 'DEATH', 'IN', 'THE', 'HOUSEHOLDS', 'ROUND', 'TOLD', 'ME', 'A', 'CHARACTERISTIC', 'LITTLE', 'INCIDENT', 'CONNECTED', 'WITH', "TABBY'S", 'FRACTURED', 'LEG'] +3575-170457-0047-416: ref=['TABBY', 'HAD', 'LIVED', 'WITH', 'THEM', 'FOR', 'TEN', 'OR', 'TWELVE', 'YEARS', 'AND', 'WAS', 'AS', 'CHARLOTTE', 'EXPRESSED', 'IT', 'ONE', 'OF', 'THE', 'FAMILY'] +3575-170457-0047-416: hyp=['TABBY', 'HAD', 'LIVED', 'WITH', 'THEM', 'FOR', 'TEN', 'OR', 'TWELVE', 'YEARS', 'AND', 'WAS', 'AS', 'CHARLOTTE', 'EXPRESSED', 'IT', 'ONE', 'OF', 'THE', 'FAMILY'] +3575-170457-0048-417: ref=['HE', 'REFUSED', 'AT', 'FIRST', 'TO', 'LISTEN', 'TO', 'THE', 'CAREFUL', 'ADVICE', 'IT', 'WAS', 'REPUGNANT', 'TO', 'HIS', 'LIBERAL', 'NATURE'] +3575-170457-0048-417: hyp=['HE', 'REFUSE', 'AT', 'FIRST', 'TO', 'LISTEN', 'TO', 'THE', 'CAREFUL', 'ADVICE', 'IT', 'WAS', 'REPUGNANT', 'TO', 'HIS', 'LIBERAL', 'NATURE'] +3575-170457-0049-418: ref=['THIS', 'DECISION', 'WAS', 'COMMUNICATED', 'TO', 'THE', 'GIRLS'] +3575-170457-0049-418: hyp=['THIS', 'DECISION', 'WAS', 'COMMUNICATED', 'TO', 'THE', 'GIRLS'] +3575-170457-0050-419: ref=['TABBY', 'HAD', 'TENDED', 'THEM', 'IN', 'THEIR', 'CHILDHOOD', 'THEY', 'AND', 'NONE', 'OTHER', 'SHOULD', 'TEND', 'HER', 'IN', 'HER', 'INFIRMITY', 'AND', 'AGE'] +3575-170457-0050-419: hyp=['TABBY', 'HAD', 'TENDED', 'THEM', 'IN', 'THEIR', 'CHILDHOOD', 'THEY', 'AND', 'NONE', 'OTHER', 'SHOULD', 'TEND', 'HER', 'IN', 'HER', 'INFIRMITY', 'AND', 'AGE'] +3575-170457-0051-420: ref=['AT', 'TEA', 'TIME', 'THEY', 'WERE', 'SAD', 'AND', 'SILENT', 'AND', 'THE', 'MEAL', 'WENT', 'AWAY', 'UNTOUCHED', 'BY', 'ANY', 'OF', 'THE', 'THREE'] +3575-170457-0051-420: hyp=['AT', 'TEA', 'TIME', 'THEY', 'WERE', 'SAD', 'AND', 'SILENT', 'AND', 'THE', 'MEAL', 'WENT', 'AWAY', 'UNTOUCHED', 'BY', 'ANY', 'OF', 'THE', 'THREE'] +3575-170457-0052-421: ref=['SHE', 'HAD', 'ANOTHER', 'WEIGHT', 'ON', 'HER', 'MIND', 'THIS', 'CHRISTMAS'] +3575-170457-0052-421: hyp=['SHE', 'HAD', 'ANOTHER', 'WEIGHT', 'ON', 'HER', 'MIND', 'THIS', 'CHRISTMAS'] +3575-170457-0053-422: ref=['BUT', 'ANNE', 'HAD', 'BEGUN', 'TO', 'SUFFER', 'JUST', 'BEFORE', 'THE', 'HOLIDAYS', 'AND', 'CHARLOTTE', 'WATCHED', 'OVER', 'HER', 'YOUNGER', 'SISTERS', 'WITH', 'THE', 'JEALOUS', 'VIGILANCE', 'OF', 'SOME', 'WILD', 'CREATURE', 'THAT', 'CHANGES', 'HER', 'VERY', 'NATURE', 'IF', 'DANGER', 'THREATENS', 'HER', 'YOUNG'] +3575-170457-0053-422: hyp=['BUT', 'ANNE', 'HAD', 'BEGUN', 'TO', 'SUFFER', 'JUST', 'BEFORE', 'THE', 'HOLIDAYS', 'AND', 'CHARLOTTE', 'WATCHED', 'OVER', 'HER', 'YOUNGER', 'SISTERS', 'WITH', 'A', 'JEALOUS', 'VIGILANCE', 'OF', 'SOME', 'WILD', 'CREATURE', 'THAT', 'CHANGES', 'HER', 'VERY', 'NATURE', 'IF', 'DANGER', 'THREATENS', 'HER', 'YOUNG'] +3575-170457-0054-423: ref=['STUNG', 'BY', 'ANXIETY', 'FOR', 'THIS', 'LITTLE', 'SISTER', 'SHE', 'UPBRAIDED', 'MISS', 'W', 'FOR', 'HER', 'FANCIED', 'INDIFFERENCE', 'TO', "ANNE'S", 'STATE', 'OF', 'HEALTH'] +3575-170457-0054-423: hyp=['STUNG', 'BY', 'ANXIETY', 'FOR', 'THIS', 'LITTLE', 'SISTER', 'SHE', 'UPBRAIDED', 'MISS', 'W', 'FOR', 'HER', 'FANCIED', 'INDIFFERENCE', 'TO', 'AN', 'STATE', 'OF', 'HEALTH'] +3575-170457-0055-424: ref=['STILL', 'HER', 'HEART', 'HAD', 'RECEIVED', 'A', 'SHOCK', 'IN', 'THE', 'PERCEPTION', 'OF', "ANNE'S", 'DELICACY', 'AND', 'ALL', 'THESE', 'HOLIDAYS', 'SHE', 'WATCHED', 'OVER', 'HER', 'WITH', 'THE', 'LONGING', 'FOND', 'ANXIETY', 'WHICH', 'IS', 'SO', 'FULL', 'OF', 'SUDDEN', 'PANGS', 'OF', 'FEAR'] +3575-170457-0055-424: hyp=['STILL', 'HER', 'HEART', 'HAD', 'RECEIVED', 'A', 'SHOCK', 'IN', 'THE', 'PERCEPTION', 'OF', "ANNE'S", 'DELICACY', 'AND', 'ALL', 'THESE', 'HOLIDAYS', 'SHE', 'WATCHED', 'OVER', 'HER', 'WITH', 'THE', 'LONGING', 'FOND', 'ANXIETY', 'WHICH', 'IS', 'SO', 'FULL', 'OF', 'SUDDEN', 'PANGS', 'OF', 'FEAR'] +3575-170457-0056-425: ref=['I', 'DOUBT', 'WHETHER', 'BRANWELL', 'WAS', 'MAINTAINING', 'HIMSELF', 'AT', 'THIS', 'TIME'] +3575-170457-0056-425: hyp=['I', 'DOUBT', 'WHETHER', 'BROWNWELL', 'WAS', 'MAINTAINING', 'HIMSELF', 'AT', 'THIS', 'TIME'] +3729-6852-0000-1660: ref=['TO', 'CELEBRATE', 'THE', 'ARRIVAL', 'OF', 'HER', 'SON', 'SILVIA', 'GAVE', 'A', 'SPLENDID', 'SUPPER', 'TO', 'WHICH', 'SHE', 'HAD', 'INVITED', 'ALL', 'HER', 'RELATIVES', 'AND', 'IT', 'WAS', 'A', 'GOOD', 'OPPORTUNITY', 'FOR', 'ME', 'TO', 'MAKE', 'THEIR', 'ACQUAINTANCE'] +3729-6852-0000-1660: hyp=['TO', 'CELEBRATE', 'THE', 'ARRIVAL', 'OF', 'HER', 'SON', 'SYLVIA', 'GAVE', 'A', 'SPLENDID', 'SUPPER', 'TO', 'WHICH', 'SHE', 'HAD', 'INVITED', 'ALL', 'HER', 'RELATIVES', 'AND', 'IT', 'WAS', 'A', 'GOOD', 'OPPORTUNITY', 'FOR', 'ME', 'TO', 'MAKE', 'THEIR', 'ACQUAINTANCE'] +3729-6852-0001-1661: ref=['WITHOUT', 'SAYING', 'IT', 'POSITIVELY', 'SHE', 'MADE', 'ME', 'UNDERSTAND', 'THAT', 'BEING', 'HERSELF', 'AN', 'ILLUSTRIOUS', 'MEMBER', 'OF', 'THE', 'REPUBLIC', 'OF', 'LETTERS', 'SHE', 'WAS', 'WELL', 'AWARE', 'THAT', 'SHE', 'WAS', 'SPEAKING', 'TO', 'AN', 'INSECT'] +3729-6852-0001-1661: hyp=['WITHOUT', 'SAYING', 'IT', 'POSITIVELY', 'SHE', 'MADE', 'ME', 'UNDERSTAND', 'THAT', 'BEING', 'HERSELF', 'AN', 'ILLUSTRIOUS', 'MEMBER', 'OF', 'THE', 'REPUBLIC', 'OF', 'LETTERS', 'SHE', 'WAS', 'WELL', 'AWARE', 'THAT', 'SHE', 'WAS', 'SPEAKING', 'TO', 'AN', 'INSECT'] +3729-6852-0002-1662: ref=['IN', 'ORDER', 'TO', 'PLEASE', 'HER', 'I', 'SPOKE', 'TO', 'HER', 'OF', 'THE', 'ABBE', 'CONTI', 'AND', 'I', 'HAD', 'OCCASION', 'TO', 'QUOTE', 'TWO', 'LINES', 'OF', 'THAT', 'PROFOUND', 'WRITER'] +3729-6852-0002-1662: hyp=['IN', 'ORDER', 'TO', 'PLEASE', 'HER', 'I', 'SPOKE', 'TO', 'HER', 'OF', 'THE', 'ABBEY', 'CONTI', 'AND', 'I', 'HAD', 'OCCASION', 'TO', 'QUOTE', 'TWO', 'LINES', 'OF', 'THAT', 'PROFOUND', 'WRITER'] +3729-6852-0003-1663: ref=['MADAM', 'CORRECTED', 'ME', 'WITH', 'A', 'PATRONIZING', 'AIR', 'FOR', 'MY', 'PRONUNCIATION', 'OF', 'THE', 'WORD', 'SCEVRA', 'WHICH', 'MEANS', 'DIVIDED', 'SAYING', 'THAT', 'IT', 'OUGHT', 'TO', 'BE', 'PRONOUNCED', 'SCEURA', 'AND', 'SHE', 'ADDED', 'THAT', 'I', 'OUGHT', 'TO', 'BE', 'VERY', 'GLAD', 'TO', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'ON', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'ARRIVAL', 'IN', 'PARIS', 'TELLING', 'ME', 'THAT', 'IT', 'WOULD', 'BE', 'AN', 'IMPORTANT', 'DAY', 'IN', 'MY', 'LIFE'] +3729-6852-0003-1663: hyp=['MADAME', 'CORRECTED', 'ME', 'WITH', 'A', 'PATRONIZING', 'AIR', 'FOR', 'MY', 'PRONUNCIATION', 'OF', 'THE', 'WORD', 'SCAVER', 'WHICH', 'MEANS', 'DIVIDED', 'SAYING', 'THAT', 'IT', 'OUGHT', 'TO', 'BE', 'PRONOUNCED', 'SKURA', 'AND', 'SHE', 'ADDED', 'THAT', 'I', 'OUGHT', 'TO', 'BE', 'VERY', 'GLAD', 'TO', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'ON', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'ARRIVAL', 'IN', 'PARIS', 'TELLING', 'ME', 'THAT', 'IT', 'WOULD', 'BE', 'AN', 'IMPORTANT', 'DAY', 'IN', 'MY', 'LIFE'] +3729-6852-0004-1664: ref=['HER', 'FACE', 'WAS', 'AN', 'ENIGMA', 'FOR', 'IT', 'INSPIRED', 'EVERYONE', 'WITH', 'THE', 'WARMEST', 'SYMPATHY', 'AND', 'YET', 'IF', 'YOU', 'EXAMINED', 'IT', 'ATTENTIVELY', 'THERE', 'WAS', 'NOT', 'ONE', 'BEAUTIFUL', 'FEATURE', 'SHE', 'COULD', 'NOT', 'BE', 'CALLED', 'HANDSOME', 'BUT', 'NO', 'ONE', 'COULD', 'HAVE', 'THOUGHT', 'HER', 'UGLY'] +3729-6852-0004-1664: hyp=['HER', 'FACE', 'WAS', 'AN', 'ENIGMA', 'FOR', 'IT', 'INSPIRED', 'EVERY', 'ONE', 'WITH', 'THE', 'WARMEST', 'SYMPATHY', 'AND', 'YET', 'IF', 'YOU', 'EXAMINED', 'IT', 'ATTENTIVELY', 'THERE', 'WAS', 'NOT', 'ONE', 'BEAUTIFUL', 'FEATURE', 'SHE', 'COULD', 'NOT', 'BE', 'CALLED', 'HANDSOME', 'BUT', 'NO', 'ONE', 'COULD', 'HAVE', 'THOUGHT', 'HER', 'UGLY'] +3729-6852-0005-1665: ref=['SILVIA', 'WAS', 'THE', 'ADORATION', 'OF', 'FRANCE', 'AND', 'HER', 'TALENT', 'WAS', 'THE', 'REAL', 'SUPPORT', 'OF', 'ALL', 'THE', 'COMEDIES', 'WHICH', 'THE', 'GREATEST', 'AUTHORS', 'WROTE', 'FOR', 'HER', 'ESPECIALLY', 'OF', 'THE', 'PLAYS', 'OF', 'MARIVAUX', 'FOR', 'WITHOUT', 'HER', 'HIS', 'COMEDIES', 'WOULD', 'NEVER', 'HAVE', 'GONE', 'TO', 'POSTERITY'] +3729-6852-0005-1665: hyp=['SYLVIA', 'WAS', 'THE', 'ADORATION', 'OF', 'FRANCE', 'AND', 'HER', 'TALENT', 'WAS', 'THE', 'REAL', 'SUPPORT', 'OF', 'ALL', 'THE', 'COMEDIES', 'WHICH', 'THE', 'GREATEST', 'AUTHORS', 'WROTE', 'FOR', 'HER', 'ESPECIALLY', 'OF', 'THE', 'PLAYS', 'OF', 'MARIVAUX', 'FOR', 'WITHOUT', 'HER', 'HIS', 'COMEDIES', 'WOULD', 'NEVER', 'HAVE', 'GONE', 'TO', 'PROSTERITY'] +3729-6852-0006-1666: ref=['SILVIA', 'DID', 'NOT', 'THINK', 'THAT', 'HER', 'GOOD', 'CONDUCT', 'WAS', 'A', 'MERIT', 'FOR', 'SHE', 'KNEW', 'THAT', 'SHE', 'WAS', 'VIRTUOUS', 'ONLY', 'BECAUSE', 'HER', 'SELF', 'LOVE', 'COMPELLED', 'HER', 'TO', 'BE', 'SO', 'AND', 'SHE', 'NEVER', 'EXHIBITED', 'ANY', 'PRIDE', 'OR', 'ASSUMED', 'ANY', 'SUPERIORITY', 'TOWARDS', 'HER', 'THEATRICAL', 'SISTERS', 'ALTHOUGH', 'SATISFIED', 'TO', 'SHINE', 'BY', 'THEIR', 'TALENT', 'OR', 'THEIR', 'BEAUTY', 'THEY', 'CARED', 'LITTLE', 'ABOUT', 'RENDERING', 'THEMSELVES', 'CONSPICUOUS', 'BY', 'THEIR', 'VIRTUE'] +3729-6852-0006-1666: hyp=['SYLVIA', 'DID', 'NOT', 'THINK', 'THAT', 'HER', 'GOOD', 'CONDUCT', 'WAS', 'A', 'MERIT', 'FOR', 'SHE', 'KNEW', 'THAT', 'SHE', 'WAS', 'VIRTUOUS', 'ONLY', 'BECAUSE', 'HER', 'SELF', 'LOVE', 'COMPELLED', 'HER', 'TO', 'BE', 'SO', 'AND', 'SHE', 'NEVER', 'EXHIBITED', 'ANY', 'PRIDE', 'OR', 'ASSUMED', 'ANY', 'SUPERIORITY', 'TOWARDS', 'HER', 'THEATRICAL', 'SISTERS', 'ALTHOUGH', 'SATISFIED', 'TO', 'SHINE', 'BY', 'THEIR', 'TALENT', 'OR', 'THEIR', 'BEAUTY', 'THEY', 'CARED', 'LITTLE', 'ABOUT', 'RENDERING', 'THEMSELVES', 'CONSPICUOUS', 'BY', 'THEIR', 'VIRTUE'] +3729-6852-0007-1667: ref=['TWO', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'I', 'SAW', 'HER', 'PERFORM', 'THE', 'CHARACTER', 'OF', 'MARIANNE', 'IN', 'THE', 'COMEDY', 'OF', 'MARIVAUX', 'AND', 'IN', 'SPITE', 'OF', 'HER', 'AGE', 'AND', 'DECLINING', 'HEALTH', 'THE', 'ILLUSION', 'WAS', 'COMPLETE'] +3729-6852-0007-1667: hyp=['TWO', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'I', 'SAW', 'HER', 'PERFORM', 'THE', 'CHARACTER', 'OF', 'MARIANNE', 'IN', 'THE', 'COMEDY', 'OF', 'MARAVO', 'AND', 'IN', 'SPITE', 'OF', 'HER', 'AGE', 'AND', 'DECLINING', 'HEALTH', 'THE', 'ILLUSION', 'WAS', 'COMPLETE'] +3729-6852-0008-1668: ref=['SHE', 'WAS', 'HONOURABLY', 'BURIED', 'IN', 'THE', 'CHURCH', 'OF', 'SAINT', 'SAUVEUR', 'WITHOUT', 'THE', 'SLIGHTEST', 'OPPOSITION', 'FROM', 'THE', 'VENERABLE', 'PRIEST', 'WHO', 'FAR', 'FROM', 'SHARING', 'THE', 'ANTI', 'CHRISTAIN', 'INTOLERANCY', 'OF', 'THE', 'CLERGY', 'IN', 'GENERAL', 'SAID', 'THAT', 'HER', 'PROFESSION', 'AS', 'AN', 'ACTRESS', 'HAD', 'NOT', 'HINDERED', 'HER', 'FROM', 'BEING', 'A', 'GOOD', 'CHRISTIAN', 'AND', 'THAT', 'THE', 'EARTH', 'WAS', 'THE', 'COMMON', 'MOTHER', 'OF', 'ALL', 'HUMAN', 'BEINGS', 'AS', 'JESUS', 'CHRIST', 'HAD', 'BEEN', 'THE', 'SAVIOUR', 'OF', 'ALL', 'MANKIND'] +3729-6852-0008-1668: hyp=['SHE', 'WAS', 'HONOURABLY', 'BURIED', 'IN', 'THE', 'CHURCH', 'OF', 'SAINT', 'SAVER', 'WITHOUT', 'THE', 'SLIGHTEST', 'OPPOSITION', 'FROM', 'THE', 'VENERABLE', 'PRIEST', 'WHO', 'FAR', 'FROM', 'SHARING', 'THE', 'ANTI', 'CHRISTIAN', 'INTOLERANCY', 'OF', 'THE', 'CLERGY', 'IN', 'GENERAL', 'SAID', 'THAT', 'HER', 'PROFESSION', 'AS', 'AN', 'ACTRESS', 'HAD', 'NOT', 'HINDERED', 'HER', 'FROM', 'BEING', 'A', 'GOOD', 'CHRISTIAN', 'AND', 'THAT', 'THE', 'EARTH', 'WAS', 'A', 'COMMON', 'MOTHER', 'OF', 'ALL', 'HUMAN', 'BEINGS', 'AS', 'JESUS', 'CHRIST', 'HAD', 'BEEN', 'THE', 'SAVIOUR', 'OF', 'ALL', 'MANKIND'] +3729-6852-0009-1669: ref=['YOU', 'WILL', 'FORGIVE', 'ME', 'DEAR', 'READER', 'IF', 'I', 'HAVE', 'MADE', 'YOU', 'ATTEND', 'THE', 'FUNERAL', 'OF', 'SILVIA', 'TEN', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'BELIEVE', 'ME', 'I', 'HAVE', 'NO', 'INTENTION', 'OF', 'PERFORMING', 'A', 'MIRACLE', 'YOU', 'MAY', 'CONSOLE', 'YOURSELF', 'WITH', 'THE', 'IDEA', 'THAT', 'I', 'SHALL', 'SPARE', 'YOU', 'THAT', 'UNPLEASANT', 'TASK', 'WHEN', 'POOR', 'SILVIA', 'DIES'] +3729-6852-0009-1669: hyp=['YOU', 'WILL', 'FORGIVE', 'ME', 'DEAR', 'READER', 'IF', 'I', 'HAVE', 'MADE', 'YOU', 'ATTEND', 'THE', 'FUNERAL', 'OF', 'SYLVIA', 'TEN', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'BELIEVE', 'ME', 'I', 'HAVE', 'NO', 'INTENTION', 'OF', 'PERFORMING', 'A', 'MIRACLE', 'YOU', 'MAY', 'CONSOLE', 'YOURSELF', 'WITH', 'THE', 'IDEA', 'THAT', 'I', 'SHALL', 'SPARE', 'YOU', 'THAT', 'UNPLEASANT', 'TASK', 'WHEN', 'POOR', 'SYLVIA', 'DIES'] +3729-6852-0010-1670: ref=['I', 'NEVER', 'HAD', 'ANY', 'FAMILY'] +3729-6852-0010-1670: hyp=['I', 'NEVER', 'HAD', 'ANY', 'FAMILY'] +3729-6852-0011-1671: ref=['I', 'HAD', 'A', 'NAME', 'I', 'BELIEVE', 'IN', 'MY', 'YOUNG', 'DAYS', 'BUT', 'I', 'HAVE', 'FORGOTTEN', 'IT', 'SINCE', 'I', 'HAVE', 'BEEN', 'IN', 'SERVICE'] +3729-6852-0011-1671: hyp=['I', 'HAD', 'A', 'NAME', 'I', 'BELIEVE', 'IN', 'MY', 'YOUNG', 'DAYS', 'BUT', 'I', 'HAVE', 'FORGOTTEN', 'IT', 'SINCE', 'I', 'HAVE', 'BEEN', 'IN', 'SERVICE'] +3729-6852-0012-1672: ref=['I', 'SHALL', 'CALL', 'YOU', 'ESPRIT'] +3729-6852-0012-1672: hyp=['I', 'SHALL', 'CALL', 'YOU', 'A', 'SPREE'] +3729-6852-0013-1673: ref=['YOU', 'DO', 'ME', 'A', 'GREAT', 'HONOUR'] +3729-6852-0013-1673: hyp=['YOU', 'DO', 'ME', 'A', 'GREAT', 'HONOR'] +3729-6852-0014-1674: ref=['HERE', 'GO', 'AND', 'GET', 'ME', 'CHANGE', 'FOR', 'A', 'LOUIS', 'I', 'HAVE', 'IT', 'SIR'] +3729-6852-0014-1674: hyp=['HERE', 'GO', 'AND', 'GET', 'ME', 'CHANGE', 'FOR', 'A', 'LOUIS', 'I', 'HAVE', 'IT', 'SIR'] +3729-6852-0015-1675: ref=['AT', 'YOUR', 'SERVICE', 'SIR'] +3729-6852-0015-1675: hyp=['AT', 'YOUR', 'SERVICE', 'SIR'] +3729-6852-0016-1676: ref=['MADAME', 'QUINSON', 'BESIDES', 'CAN', 'ANSWER', 'YOUR', 'ENQUIRIES'] +3729-6852-0016-1676: hyp=['MADAME', 'QUINCENT', 'BESIDES', 'CAN', 'ANSWER', 'YOUR', 'INQUIRIES'] +3729-6852-0017-1677: ref=['I', 'SEE', 'A', 'QUANTITY', 'OF', 'CHAIRS', 'FOR', 'HIRE', 'AT', 'THE', 'RATE', 'OF', 'ONE', 'SOU', 'MEN', 'READING', 'THE', 'NEWSPAPER', 'UNDER', 'THE', 'SHADE', 'OF', 'THE', 'TREES', 'GIRLS', 'AND', 'MEN', 'BREAKFASTING', 'EITHER', 'ALONE', 'OR', 'IN', 'COMPANY', 'WAITERS', 'WHO', 'WERE', 'RAPIDLY', 'GOING', 'UP', 'AND', 'DOWN', 'A', 'NARROW', 'STAIRCASE', 'HIDDEN', 'UNDER', 'THE', 'FOLIAGE'] +3729-6852-0017-1677: hyp=['I', 'SEE', 'A', 'QUANTITY', 'OF', 'CHAIRS', 'FOR', 'HIRE', 'AT', 'THE', 'RATE', 'OF', 'ONE', 'SOUS', 'MEN', 'READING', 'THE', 'NEWSPAPER', 'UNDER', 'THE', 'SHADE', 'OF', 'THE', 'TREES', 'GIRLS', 'AND', 'MEN', 'BREAKFASTING', 'EITHER', 'ALONE', 'OR', 'IN', 'COMPANY', 'WAITERS', 'WHO', 'WERE', 'RAPIDLY', 'GOING', 'UP', 'AND', 'DOWN', 'A', 'NARROW', 'STAIRCASE', 'HIDDEN', 'UNDER', 'THE', 'FOLIAGE'] +3729-6852-0018-1678: ref=['I', 'SIT', 'DOWN', 'AT', 'A', 'SMALL', 'TABLE', 'A', 'WAITER', 'COMES', 'IMMEDIATELY', 'TO', 'ENQUIRE', 'MY', 'WISHES'] +3729-6852-0018-1678: hyp=['I', 'SIT', 'DOWN', 'AT', 'A', 'SMALL', 'TABLE', 'A', 'WAITER', 'COMES', 'IMMEDIATELY', 'TO', 'INQUIRE', 'MY', 'WISHES'] +3729-6852-0019-1679: ref=['I', 'TELL', 'HIM', 'TO', 'GIVE', 'ME', 'SOME', 'COFFEE', 'IF', 'IT', 'IS', 'GOOD'] +3729-6852-0019-1679: hyp=['I', 'TELL', 'HIM', 'TO', 'GIVE', 'ME', 'SOME', 'COFFEE', 'IF', 'IT', 'IS', 'GOOD'] +3729-6852-0020-1680: ref=['THEN', 'TURNING', 'TOWARDS', 'ME', 'HE', 'SAYS', 'THAT', 'I', 'LOOK', 'LIKE', 'A', 'FOREIGNER', 'AND', 'WHEN', 'I', 'SAY', 'THAT', 'I', 'AM', 'AN', 'ITALIAN', 'HE', 'BEGINS', 'TO', 'SPEAK', 'TO', 'ME', 'OF', 'THE', 'COURT', 'OF', 'THE', 'CITY', 'OF', 'THE', 'THEATRES', 'AND', 'AT', 'LAST', 'HE', 'OFFERS', 'TO', 'ACCOMPANY', 'ME', 'EVERYWHERE'] +3729-6852-0020-1680: hyp=['THEN', 'TURNING', 'TOWARDS', 'ME', 'HE', 'SAYS', 'THAT', 'I', 'LOOK', 'LIKE', 'A', 'FOREIGNER', 'AND', 'WHEN', 'I', 'SAY', 'THAT', 'I', 'AM', 'AN', 'ITALIAN', 'HE', 'BEGINS', 'TO', 'SPEAK', 'TO', 'ME', 'OF', 'THE', 'CORPS', 'THE', 'CITY', 'OF', 'THE', 'THEATRES', 'AND', 'AT', 'LAST', 'HE', 'OFFERS', 'TO', 'ACCOMPANY', 'ME', 'EVERYWHERE'] +3729-6852-0021-1681: ref=['I', 'THANK', 'HIM', 'AND', 'TAKE', 'MY', 'LEAVE'] +3729-6852-0021-1681: hyp=['I', 'THANK', 'HIM', 'AND', 'TAKE', 'MY', 'LEAVE'] +3729-6852-0022-1682: ref=['I', 'ADDRESS', 'HIM', 'IN', 'ITALIAN', 'AND', 'HE', 'ANSWERS', 'VERY', 'WITTILY', 'BUT', 'HIS', 'WAY', 'OF', 'SPEAKING', 'MAKES', 'ME', 'SMILE', 'AND', 'I', 'TELL', 'HIM', 'WHY'] +3729-6852-0022-1682: hyp=['I', 'ADDRESS', 'HIM', 'IN', 'ITALIAN', 'AND', 'HE', 'ANSWERS', 'VERY', 'WITTILY', 'BUT', 'HIS', 'WAY', 'OF', 'SPEAKING', 'MAKES', 'ME', 'SMILE', 'AND', 'I', 'TELL', 'HIM', 'WHY'] +3729-6852-0023-1683: ref=['MY', 'REMARK', 'PLEASES', 'HIM', 'BUT', 'I', 'SOON', 'PROVE', 'TO', 'HIM', 'THAT', 'IT', 'IS', 'NOT', 'THE', 'RIGHT', 'WAY', 'TO', 'SPEAK', 'HOWEVER', 'PERFECT', 'MAY', 'HAVE', 'BEEN', 'THE', 'LANGUAGE', 'OF', 'THAT', 'ANCIENT', 'WRITER'] +3729-6852-0023-1683: hyp=['MY', 'REMARK', 'PLEASES', 'HIM', 'BUT', 'I', 'SOON', 'PROVE', 'TO', 'HIM', 'THAT', 'IT', 'IS', 'NOT', 'THE', 'RIGHT', 'WAY', 'TO', 'SPEAK', 'HOWEVER', 'PERFECT', 'MAY', 'HAVE', 'BEEN', 'THE', 'LANGUAGE', 'OF', 'THAT', 'ANCIENT', 'WRITER'] +3729-6852-0024-1684: ref=['I', 'SEE', 'A', 'CROWD', 'IN', 'ONE', 'CORNER', 'OF', 'THE', 'GARDEN', 'EVERYBODY', 'STANDING', 'STILL', 'AND', 'LOOKING', 'UP'] +3729-6852-0024-1684: hyp=['I', 'SEE', 'A', 'CROWD', 'IN', 'ONE', 'CORNER', 'OF', 'THE', 'GARDEN', 'EVERYBODY', 'STANDING', 'STILL', 'AND', 'LOOKING', 'UP'] +3729-6852-0025-1685: ref=['IS', 'THERE', 'NOT', 'A', 'MERIDIAN', 'EVERYWHERE'] +3729-6852-0025-1685: hyp=['IS', 'THERE', 'NOT', 'A', 'MERIDIAN', 'EVERYWHERE'] +3729-6852-0026-1686: ref=['YES', 'BUT', 'THE', 'MERIDIAN', 'OF', 'THE', 'PALAIS', 'ROYAL', 'IS', 'THE', 'MOST', 'EXACT'] +3729-6852-0026-1686: hyp=['YES', 'BUT', 'THE', 'MERIDIAN', 'OF', 'THE', 'PALAIS', 'ROYAL', 'IS', 'THE', 'MOST', 'EXACT'] +3729-6852-0027-1687: ref=['THAT', 'IS', 'TRUE', 'BADAUDERIE'] +3729-6852-0027-1687: hyp=['THAT', 'IS', 'TRUE', 'BAD', 'DEALT', 'GREE'] +3729-6852-0028-1688: ref=['ALL', 'THESE', 'HONEST', 'PERSONS', 'ARE', 'WAITING', 'THEIR', 'TURN', 'TO', 'GET', 'THEIR', 'SNUFF', 'BOXES', 'FILLED'] +3729-6852-0028-1688: hyp=['ALL', 'THESE', 'HONEST', 'PERSONS', 'ARE', 'WAITING', 'THEIR', 'TURN', 'TO', 'GET', 'THEIR', 'SNUFF', 'BOXES', 'FILLED'] +3729-6852-0029-1689: ref=['IT', 'IS', 'SOLD', 'EVERYWHERE', 'BUT', 'FOR', 'THE', 'LAST', 'THREE', 'WEEKS', 'NOBODY', 'WILL', 'USE', 'ANY', 'SNUFF', 'BUT', 'THAT', 'SOLD', 'AT', 'THE', 'CIVET', 'CAT'] +3729-6852-0029-1689: hyp=['IT', 'IS', 'SOLD', 'EVERYWHERE', 'BUT', 'FOR', 'THE', 'LAST', 'THREE', 'WEEKS', 'NOBODY', 'WILL', 'USE', 'ANY', 'SNUFF', 'BUT', "THAT'S", 'SOLD', 'AT', 'THE', 'SAVE', 'CAT'] +3729-6852-0030-1690: ref=['IS', 'IT', 'BETTER', 'THAN', 'ANYWHERE', 'ELSE'] +3729-6852-0030-1690: hyp=['IS', 'IT', 'BETTER', 'THAN', 'ANYWHERE', 'ELSE'] +3729-6852-0031-1691: ref=['BUT', 'HOW', 'DID', 'SHE', 'MANAGE', 'TO', 'RENDER', 'IT', 'SO', 'FASHIONABLE'] +3729-6852-0031-1691: hyp=['BUT', 'HOW', 'DID', 'SHE', 'MANAGE', 'TO', 'RENDER', 'IT', 'SO', 'FASHIONABLE'] +3729-6852-0032-1692: ref=['SIMPLY', 'BY', 'STOPPING', 'HER', 'CARRIAGE', 'TWO', 'OR', 'THREE', 'TIMES', 'BEFORE', 'THE', 'SHOP', 'TO', 'HAVE', 'HER', 'SNUFF', 'BOX', 'FILLED', 'AND', 'BY', 'SAYING', 'ALOUD', 'TO', 'THE', 'YOUNG', 'GIRL', 'WHO', 'HANDED', 'BACK', 'THE', 'BOX', 'THAT', 'HER', 'SNUFF', 'WAS', 'THE', 'VERY', 'BEST', 'IN', 'PARIS'] +3729-6852-0032-1692: hyp=['SIMPLY', 'BY', 'STOPPING', 'HER', 'CARRIAGE', 'TWO', 'OR', 'THREE', 'TIMES', 'BEFORE', 'THE', 'SHOP', 'TO', 'HAVE', 'HER', 'SNUFF', 'BOX', 'FILLED', 'AND', 'BY', 'SAYING', 'ALOUD', 'TO', 'THE', 'YOUNG', 'GIRL', 'WHO', 'HANDED', 'BACK', 'THE', 'BOX', 'THAT', 'HER', 'SNUFF', 'WAS', 'THE', 'VERY', 'BEST', 'IN', 'PARIS'] +3729-6852-0033-1693: ref=['YOU', 'ARE', 'NOW', 'IN', 'THE', 'ONLY', 'COUNTRY', 'IN', 'THE', 'WORLD', 'WHERE', 'WIT', 'CAN', 'MAKE', 'A', 'FORTUNE', 'BY', 'SELLING', 'EITHER', 'A', 'GENUINE', 'OR', 'A', 'FALSE', 'ARTICLE', 'IN', 'THE', 'FIRST', 'CASE', 'IT', 'RECEIVES', 'THE', 'WELCOME', 'OF', 'INTELLIGENT', 'AND', 'TALENTED', 'PEOPLE', 'AND', 'IN', 'THE', 'SECOND', 'FOOLS', 'ARE', 'ALWAYS', 'READY', 'TO', 'REWARD', 'IT', 'FOR', 'SILLINESS', 'IS', 'TRULY', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'PEOPLE', 'HERE', 'AND', 'HOWEVER', 'WONDERFUL', 'IT', 'MAY', 'APPEAR', 'SILLINESS', 'IS', 'THE', 'DAUGHTER', 'OF', 'WIT'] +3729-6852-0033-1693: hyp=['YOU', 'ARE', 'NOW', 'IN', 'THE', 'ONLY', 'COUNTRY', 'IN', 'THE', 'WORLD', 'WHERE', 'WIT', 'CAN', 'MAKE', 'A', 'FORTUNE', 'BY', 'SELLING', 'EITHER', 'A', 'GENUINE', 'OR', 'A', 'FALSE', 'ARTICLE', 'IN', 'THE', 'FIRST', 'CASE', 'IT', 'RECEIVES', 'THE', 'WELCOME', 'OF', 'INTELLIGENT', 'AND', 'TALENTED', 'PEOPLE', 'AND', 'IN', 'THE', 'SECOND', 'FOOLS', 'ARE', 'ALWAYS', 'READY', 'TO', 'REWARD', 'IT', 'FOR', 'SILLINESS', 'IS', 'TRULY', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'PEOPLE', 'HERE', 'AND', 'HOWEVER', 'WONDERFUL', 'IT', 'MAY', 'APPEAR', 'SILLINESS', 'IS', 'THE', 'DAUGHTER', 'OF', 'WIT'] +3729-6852-0034-1694: ref=['LET', 'A', 'MAN', 'RUN', 'AND', 'EVERYBODY', 'WILL', 'RUN', 'AFTER', 'HIM', 'THE', 'CROWD', 'WILL', 'NOT', 'STOP', 'UNLESS', 'THE', 'MAN', 'IS', 'PROVED', 'TO', 'BE', 'MAD', 'BUT', 'TO', 'PROVE', 'IT', 'IS', 'INDEED', 'A', 'DIFFICULT', 'TASK', 'BECAUSE', 'WE', 'HAVE', 'A', 'CROWD', 'OF', 'MEN', 'WHO', 'MAD', 'FROM', 'THEIR', 'BIRTH', 'ARE', 'STILL', 'CONSIDERED', 'WISE'] +3729-6852-0034-1694: hyp=['LET', 'A', 'MAN', 'RUN', 'AND', 'EVERYBODY', 'WILL', 'RUN', 'AFTER', 'HIM', 'THE', 'CROWD', 'WILL', 'NOT', 'STOP', 'UNLESS', 'THE', 'MAN', 'IS', 'PROVED', 'TO', 'BE', 'MAD', 'BUT', 'TO', 'PROVE', 'IT', 'IS', 'INDEED', 'A', 'DIFFICULT', 'TASK', 'BECAUSE', 'WE', 'HAVE', 'A', 'CROWD', 'OF', 'MEN', 'WHO', 'MAD', 'FROM', 'THEIR', 'BIRTH', 'ARE', 'STILL', 'CONSIDERED', 'WISE'] +3729-6852-0035-1695: ref=['IT', 'SEEMS', 'TO', 'ME', 'I', 'REPLIED', 'THAT', 'SUCH', 'APPROVAL', 'SUCH', 'RATIFICATION', 'OF', 'THE', 'OPINION', 'EXPRESSED', 'BY', 'THE', 'KING', 'THE', 'PRINCES', 'OF', 'THE', 'BLOOD', 'ET', 'CETERA', 'IS', 'RATHER', 'A', 'PROOF', 'OF', 'THE', 'AFFECTION', 'FELT', 'FOR', 'THEM', 'BY', 'THE', 'NATION', 'FOR', 'THE', 'FRENCH', 'CARRY', 'THAT', 'AFFECTION', 'TO', 'SUCH', 'AN', 'EXTENT', 'THAT', 'THEY', 'BELIEVE', 'THEM', 'INFALLIBLE'] +3729-6852-0035-1695: hyp=['IT', 'SEEMS', 'TO', 'ME', 'I', 'REPLIED', 'THAT', 'SUCH', 'APPROVAL', 'SUCH', 'RATIFICATION', 'OF', 'THE', 'OPINION', 'EXPRESSED', 'BY', 'THE', 'KING', 'THE', 'PRINCES', 'OF', 'THE', 'BLOOD', 'ET', 'CETERA', 'IS', 'RATHER', 'A', 'PROOF', 'OF', 'THE', 'AFFECTION', 'FELT', 'FOR', 'THEM', 'BY', 'THE', 'NATION', 'FOR', 'THE', 'FRENCH', 'CARRY', 'THAT', 'AFFECTION', 'TO', 'SUCH', 'AN', 'EXTENT', 'THAT', 'THEY', 'BELIEVED', 'THEM', 'INFALLIBLE'] +3729-6852-0036-1696: ref=['WHEN', 'THE', 'KING', 'COMES', 'TO', 'PARIS', 'EVERYBODY', 'CALLS', 'OUT', 'VIVE', 'LE', 'ROI'] +3729-6852-0036-1696: hyp=['WHEN', 'THE', 'KING', 'COMES', 'TO', 'PARIS', 'EVERYBODY', 'CALLS', 'OUT', 'VIVE', 'LE', 'ROY'] +3729-6852-0037-1697: ref=['SHE', 'INTRODUCED', 'ME', 'TO', 'ALL', 'HER', 'GUESTS', 'AND', 'GAVE', 'ME', 'SOME', 'PARTICULARS', 'RESPECTING', 'EVERY', 'ONE', 'OF', 'THEM'] +3729-6852-0037-1697: hyp=['SHE', 'INTRODUCED', 'ME', 'TO', 'ALL', 'HER', 'GUESTS', 'AND', 'GAVE', 'ME', 'SOME', 'PARTICULARS', 'RESPECTING', 'EVERY', 'ONE', 'OF', 'THEM'] +3729-6852-0038-1698: ref=['WHAT', 'SIR', 'I', 'SAID', 'TO', 'HIM', 'AM', 'I', 'FORTUNATE', 'ENOUGH', 'TO', 'SEE', 'YOU'] +3729-6852-0038-1698: hyp=['WHAT', 'SIR', 'I', 'SAID', 'TO', 'HIM', 'AM', 'I', 'FORTUNATE', 'ENOUGH', 'TO', 'SEE', 'YOU'] +3729-6852-0039-1699: ref=['HE', 'HIMSELF', 'RECITED', 'THE', 'SAME', 'PASSAGE', 'IN', 'FRENCH', 'AND', 'POLITELY', 'POINTED', 'OUT', 'THE', 'PARTS', 'IN', 'WHICH', 'HE', 'THOUGHT', 'THAT', 'I', 'HAD', 'IMPROVED', 'ON', 'THE', 'ORIGINAL'] +3729-6852-0039-1699: hyp=['HE', 'HIMSELF', 'RECITED', 'THE', 'SAME', 'PASSAGE', 'IN', 'FRENCH', 'AND', 'POLITELY', 'POINTED', 'OUT', 'THE', 'PARTS', 'IN', 'WHICH', 'HE', 'THOUGHT', 'THAT', 'I', 'HAD', 'IMPROVED', 'ON', 'THE', 'ORIGINAL'] +3729-6852-0040-1700: ref=['FOR', 'THE', 'FIRST', 'DAY', 'SIR', 'I', 'THINK', 'THAT', 'WHAT', 'YOU', 'HAVE', 'DONE', 'GIVES', 'GREAT', 'HOPES', 'OF', 'YOU', 'AND', 'WITHOUT', 'ANY', 'DOUBT', 'YOU', 'WILL', 'MAKE', 'RAPID', 'PROGRESS'] +3729-6852-0040-1700: hyp=['FOR', 'THE', 'FIRST', 'DAY', 'SIR', 'I', 'THINK', 'THAT', 'WHAT', 'YOU', 'HAVE', 'DONE', 'GIVES', 'GREAT', 'HOPES', 'OF', 'YOU', 'AND', 'WITHOUT', 'ANY', 'DOUBT', 'YOU', 'WILL', 'MAKE', 'RAPID', 'PROGRESS'] +3729-6852-0041-1701: ref=['I', 'BELIEVE', 'IT', 'SIR', 'AND', 'THAT', 'IS', 'WHAT', 'I', 'FEAR', 'THEREFORE', 'THE', 'PRINCIPAL', 'OBJECT', 'OF', 'MY', 'VISIT', 'HERE', 'IS', 'TO', 'DEVOTE', 'MYSELF', 'ENTIRELY', 'TO', 'THE', 'STUDY', 'OF', 'THE', 'FRENCH', 'LANGUAGE'] +3729-6852-0041-1701: hyp=['I', 'BELIEVE', 'IT', 'SIR', 'AND', 'THAT', 'IS', 'WHAT', 'I', 'FEAR', 'THEREFORE', 'THE', 'PRINCIPAL', 'OBJECT', 'OF', 'MY', 'VISIT', 'HERE', 'IS', 'TO', 'DEVOTE', 'MYSELF', 'ENTIRELY', 'TO', 'THE', 'STUDY', 'OF', 'THE', 'FRENCH', 'LANGUAGE'] +3729-6852-0042-1702: ref=['I', 'AM', 'A', 'VERY', 'UNPLEASANT', 'PUPIL', 'ALWAYS', 'ASKING', 'QUESTIONS', 'CURIOUS', 'TROUBLESOME', 'INSATIABLE', 'AND', 'EVEN', 'SUPPOSING', 'THAT', 'I', 'COULD', 'MEET', 'WITH', 'THE', 'TEACHER', 'I', 'REQUIRE', 'I', 'AM', 'AFRAID', 'I', 'AM', 'NOT', 'RICH', 'ENOUGH', 'TO', 'PAY', 'HIM'] +3729-6852-0042-1702: hyp=['I', 'AM', 'A', 'VERY', 'UNPLEASANT', 'PUPIL', 'ALWAYS', 'ASKING', 'QUESTIONS', 'CURIOUS', 'TROUBLESOME', 'INSATIABLE', 'AND', 'EVEN', 'SUPPOSING', 'THAT', 'I', 'COULD', 'MEET', 'WITH', 'THE', 'TEACHER', 'I', 'REQUIRE', 'I', 'AM', 'AFRAID', 'I', 'AM', 'NOT', 'RICH', 'ENOUGH', 'TO', 'PAY', 'HIM'] +3729-6852-0043-1703: ref=['I', 'RESIDE', 'IN', 'THE', 'MARAIS', 'RUE', 'DE', 'DOUZE', 'PORTES'] +3729-6852-0043-1703: hyp=['I', 'RESIDE', 'IN', 'THE', 'MARA', 'GRUE', 'DE', 'DUSPORT'] +3729-6852-0044-1704: ref=['I', 'WILL', 'MAKE', 'YOU', 'TRANSLATE', 'THEM', 'INTO', 'FRENCH', 'AND', 'YOU', 'NEED', 'NOT', 'BE', 'AFRAID', 'OF', 'MY', 'FINDING', 'YOU', 'INSATIABLE'] +3729-6852-0044-1704: hyp=['I', 'WILL', 'MAKE', 'YOU', 'TRANSLATE', 'THEM', 'INTO', 'FRENCH', 'AND', 'YOU', 'NEED', 'NOT', 'BE', 'AFRAID', 'OF', 'MY', 'FINDING', 'YOU', 'INSATIABLE'] +3729-6852-0045-1705: ref=['HE', 'HAD', 'A', 'GOOD', 'APPETITE', 'COULD', 'TELL', 'A', 'GOOD', 'STORY', 'WITHOUT', 'LAUGHING', 'WAS', 'CELEBRATED', 'FOR', 'HIS', 'WITTY', 'REPARTEES', 'AND', 'HIS', 'SOCIABLE', 'MANNERS', 'BUT', 'HE', 'SPENT', 'HIS', 'LIFE', 'AT', 'HOME', 'SELDOM', 'GOING', 'OUT', 'AND', 'SEEING', 'HARDLY', 'ANYONE', 'BECAUSE', 'HE', 'ALWAYS', 'HAD', 'A', 'PIPE', 'IN', 'HIS', 'MOUTH', 'AND', 'WAS', 'SURROUNDED', 'BY', 'AT', 'LEAST', 'TWENTY', 'CATS', 'WITH', 'WHICH', 'HE', 'WOULD', 'AMUSE', 'HIMSELF', 'ALL', 'DAY'] +3729-6852-0045-1705: hyp=['HE', 'HAD', 'A', 'GOOD', 'APPETITE', 'COTEL', 'A', 'GOOD', 'STORY', 'WITHOUT', 'LAUGHING', 'WAS', 'CELEBRATED', 'FOR', 'HIS', 'WITTY', 'REPARTEES', 'AND', 'HIS', 'SOCIABLE', 'MANNERS', 'BUT', 'HE', 'SPENT', 'HIS', 'LIFE', 'AT', 'HOME', 'SELDOM', 'GOING', 'OUT', 'AND', 'SEEING', 'HARDLY', 'ANY', 'ONE', 'BECAUSE', 'HE', 'ALWAYS', 'HAD', 'A', 'PIPE', 'IN', 'HIS', 'MOUTH', 'AND', 'WAS', 'SURROUNDED', 'BY', 'AT', 'LEAST', 'TWENTY', 'CATS', 'WITH', 'WHICH', 'HE', 'WOULD', 'AMUSE', 'HIMSELF', 'ALL', 'DAY'] +3729-6852-0046-1706: ref=['HIS', 'HOUSEKEEPER', 'HAD', 'THE', 'MANAGEMENT', 'OF', 'EVERYTHING', 'SHE', 'NEVER', 'ALLOWED', 'HIM', 'TO', 'BE', 'IN', 'NEED', 'OF', 'ANYTHING', 'AND', 'SHE', 'GAVE', 'NO', 'ACCOUNT', 'OF', 'HIS', 'MONEY', 'WHICH', 'SHE', 'KEPT', 'ALTOGETHER', 'BECAUSE', 'HE', 'NEVER', 'ASKED', 'HER', 'TO', 'RENDER', 'ANY', 'ACCOUNTS'] +3729-6852-0046-1706: hyp=['HIS', 'HOUSEKEEPER', 'HAD', 'THE', 'MANAGEMENT', 'OF', 'EVERYTHING', 'SHE', 'NEVER', 'ALLOWED', 'HIM', 'TO', 'BE', 'IN', 'NEED', 'OF', 'ANYTHING', 'AND', 'SHE', 'GAVE', 'NO', 'ACCOUNT', 'OF', 'HIS', 'MONEY', 'WHICH', 'SHE', 'KEPT', 'ALTOGETHER', 'BECAUSE', 'HE', 'NEVER', 'ASKED', 'HER', 'TO', 'RENDER', 'ANY', 'ACCOUNTS'] +4077-13751-0000-1258: ref=['ON', 'THE', 'SIXTH', 'OF', 'APRIL', 'EIGHTEEN', 'THIRTY', 'THE', 'CHURCH', 'OF', 'JESUS', 'CHRIST', 'OF', 'LATTER', 'DAY', 'SAINTS', 'WAS', 'FORMALLY', 'ORGANIZED', 'AND', 'THUS', 'TOOK', 'ON', 'A', 'LEGAL', 'EXISTENCE'] +4077-13751-0000-1258: hyp=['ON', 'THE', 'SIXTH', 'OF', 'APRIL', 'EIGHTEEN', 'THIRTY', 'THE', 'CHURCH', 'OF', 'JESUS', 'CHRIST', 'OF', 'LATTER', 'DAY', 'SAINTS', 'WAS', 'FORMERLY', 'ORGANIZED', 'AND', 'THUS', 'TOOK', 'ON', 'A', 'LEGAL', 'EXISTENCE'] +4077-13751-0001-1259: ref=['ITS', 'ORIGIN', 'WAS', 'SMALL', 'A', 'GERM', 'AN', 'INSIGNIFICANT', 'SEED', 'HARDLY', 'TO', 'BE', 'THOUGHT', 'OF', 'AS', 'LIKELY', 'TO', 'AROUSE', 'OPPOSITION'] +4077-13751-0001-1259: hyp=['ITS', 'ORIGIN', 'WAS', 'SMALL', 'A', 'GERM', 'AN', 'INSIGNIFICANT', 'SEED', 'HARDLY', 'TO', 'BE', 'THOUGHT', 'OF', 'AS', 'LIKELY', 'TO', 'AROUSE', 'OPPOSITION'] +4077-13751-0002-1260: ref=['INSTEAD', 'OF', 'BUT', 'SIX', 'REGULARLY', 'AFFILIATED', 'MEMBERS', 'AND', 'AT', 'MOST', 'TWO', 'SCORE', 'OF', 'ADHERENTS', 'THE', 'ORGANIZATION', 'NUMBERS', 'TODAY', 'MANY', 'HUNDRED', 'THOUSAND', 'SOULS'] +4077-13751-0002-1260: hyp=['INSTEAD', 'OF', 'BUT', 'SIX', 'REGULARLY', 'AFFILIATED', 'MEMBERS', 'AND', 'AT', 'MOST', 'TWO', 'SCORE', 'OF', 'ADHERENTS', 'THE', 'ORGANIZATION', 'NUMBERS', 'TO', 'DAY', 'MANY', 'HUNDRED', 'THOUSAND', 'SOULS'] +4077-13751-0003-1261: ref=['IN', 'PLACE', 'OF', 'A', 'SINGLE', 'HAMLET', 'IN', 'THE', 'SMALLEST', 'CORNER', 'OF', 'WHICH', 'THE', 'MEMBERS', 'COULD', 'HAVE', 'CONGREGATED', 'THERE', 'NOW', 'ARE', 'ABOUT', 'SEVENTY', 'STAKES', 'OF', 'ZION', 'AND', 'ABOUT', 'SEVEN', 'HUNDRED', 'ORGANIZED', 'WARDS', 'EACH', 'WARD', 'AND', 'STAKE', 'WITH', 'ITS', 'FULL', 'COMPLEMENT', 'OF', 'OFFICERS', 'AND', 'PRIESTHOOD', 'ORGANIZATIONS'] +4077-13751-0003-1261: hyp=['IN', 'PLACE', 'OF', 'A', 'SINGLE', 'HAMLET', 'IN', 'THE', 'SMALLEST', 'CORNER', 'OF', 'WHICH', 'THE', 'MEMBERS', 'COULD', 'HAVE', 'CONGREGATED', 'THERE', 'NOW', 'ARE', 'ABOUT', 'SEVENTY', 'STAKES', 'OF', 'ZION', 'AND', 'ABOUT', 'SEVEN', 'HUNDRED', 'ORGANIZED', 'WARDS', 'EACH', 'WARD', 'AND', 'STAKE', 'WITH', 'ITS', 'FULL', 'COMPLEMENT', 'OF', 'OFFICERS', 'AND', 'PRIESTHOOD', 'ORGANIZATIONS'] +4077-13751-0004-1262: ref=['THE', 'PRACTISE', 'OF', 'GATHERING', 'ITS', 'PROSELYTES', 'INTO', 'ONE', 'PLACE', 'PREVENTS', 'THE', 'BUILDING', 'UP', 'AND', 'STRENGTHENING', 'OF', 'FOREIGN', 'BRANCHES', 'AND', 'INASMUCH', 'AS', 'EXTENSIVE', 'AND', 'STRONG', 'ORGANIZATIONS', 'ARE', 'SELDOM', 'MET', 'WITH', 'ABROAD', 'VERY', 'ERRONEOUS', 'IDEAS', 'EXIST', 'CONCERNING', 'THE', 'STRENGTH', 'OF', 'THE', 'CHURCH'] +4077-13751-0004-1262: hyp=['THE', 'PRACTICE', 'OF', 'GATHERING', 'ITS', 'PROSELYTES', 'INTO', 'ONE', 'PLACE', 'PREVENTS', 'THE', 'BILLING', 'UP', 'AND', 'STRENGTHENING', 'OF', 'FOREIGN', 'BRANCHES', 'AND', 'INASMUCH', 'AS', 'EXTENSIVE', 'AND', 'STRONG', 'ORGANIZATIONS', 'ARE', 'SELDOM', 'MET', 'WITH', 'ABROAD', 'VERY', 'ERRONEOUS', 'IDEAS', 'EXIST', 'CONCERNING', 'THE', 'STRENGTH', 'OF', 'THE', 'CHURCH'] +4077-13751-0005-1263: ref=['NEVERTHELESS', 'THE', 'MUSTARD', 'SEED', 'AMONG', 'THE', 'SMALLEST', 'OF', 'ALL', 'SEEDS', 'HAS', 'ATTAINED', 'THE', 'PROPORTIONS', 'OF', 'A', 'TREE', 'AND', 'THE', 'BIRDS', 'OF', 'THE', 'AIR', 'ARE', 'NESTING', 'IN', 'ITS', 'BRANCHES', 'THE', 'ACORN', 'IS', 'NOW', 'AN', 'OAK', 'OFFERING', 'PROTECTION', 'AND', 'THE', 'SWEETS', 'OF', 'SATISFACTION', 'TO', 'EVERY', 'EARNEST', 'PILGRIM', 'JOURNEYING', 'ITS', 'WAY', 'FOR', 'TRUTH'] +4077-13751-0005-1263: hyp=['NEVERTHELESS', 'THE', 'MUSTARD', 'SEED', 'AMONG', 'THE', 'SMALLEST', 'OF', 'ALL', 'SEEDS', 'HESITATED', 'THE', 'PROPORTIONS', 'OF', 'A', 'TREE', 'AND', 'THE', 'BIRDS', 'OF', 'THE', 'AIR', 'ARE', 'NESTING', 'IN', 'ITS', 'BRANCHES', 'THE', 'ACORN', 'IS', 'NOW', 'IN', 'OAK', 'OFFERING', 'PROTECTION', 'AND', 'THE', 'SWEETS', 'OF', 'SATISFACTION', 'TO', 'EVERY', 'EARNEST', 'PILGRIM', 'JOURNEYING', 'ITS', 'WAY', 'FOR', 'TRUTH'] +4077-13751-0006-1264: ref=['THEIR', 'EYES', 'WERE', 'FROM', 'THE', 'FIRST', 'TURNED', 'IN', 'ANTICIPATION', 'TOWARD', 'THE', 'EVENING', 'SUN', 'NOT', 'MERELY', 'THAT', 'THE', 'WORK', 'OF', 'PROSELYTING', 'SHOULD', 'BE', 'CARRIED', 'ON', 'IN', 'THE', 'WEST', 'BUT', 'THAT', 'THE', 'HEADQUARTERS', 'OF', 'THE', 'CHURCH', 'SHOULD', 'BE', 'THERE', 'ESTABLISHED'] +4077-13751-0006-1264: hyp=['THEIR', 'EYES', 'WERE', 'FROM', 'THE', 'FIRST', 'TURNED', 'IN', 'ANTICIPATION', 'TOWARD', 'THE', 'EVENING', 'SUN', 'NOT', 'MERELY', 'THAT', 'THE', 'WORK', 'OF', 'PROSELLING', 'SHOULD', 'BE', 'CARRIED', 'ON', 'IN', 'THE', 'WEST', 'BUT', 'THAT', 'THE', 'HEADQUARTERS', 'OF', 'THE', 'CHURCH', 'SHOULD', 'BE', 'THEIR', 'ESTABLISHED'] +4077-13751-0007-1265: ref=['THE', 'BOOK', 'OF', 'MORMON', 'HAD', 'TAUGHT', 'THE', 'PEOPLE', 'THE', 'TRUE', 'ORIGIN', 'AND', 'DESTINY', 'OF', 'THE', 'AMERICAN', 'INDIANS', 'AND', 'TOWARD', 'THIS', 'DARK', 'SKINNED', 'REMNANT', 'OF', 'A', 'ONCE', 'MIGHTY', 'PEOPLE', 'THE', 'MISSIONARIES', 'OF', 'MORMONISM', 'EARLY', 'TURNED', 'THEIR', 'EYES', 'AND', 'WITH', 'THEIR', 'EYES', 'WENT', 'THEIR', 'HEARTS', 'AND', 'THEIR', 'HOPES'] +4077-13751-0007-1265: hyp=['THE', 'BOOK', 'OR', 'MORMON', 'HAD', 'TAUGHT', 'THAT', 'PEOPLE', 'THE', 'TRUE', 'ORIGIN', 'AND', 'DESTINY', 'OF', 'THE', 'AMERICAN', 'INDIANS', 'AND', 'TOWARD', 'THIS', 'DARK', 'SKINNED', 'REMNANT', 'OF', 'A', 'ONCE', 'MIGHTY', 'PEOPLE', 'THE', 'MISSIONARIES', 'OF', 'MORMONISM', 'EARLY', 'TURNED', 'THEIR', 'EYES', 'AND', 'WITH', 'THEIR', 'EYES', 'WENT', 'THEIR', 'HEARTS', 'AND', 'THEIR', 'HOPES'] +4077-13751-0008-1266: ref=['IT', 'IS', 'NOTABLE', 'THAT', 'THE', 'INDIAN', 'TRIBES', 'HAVE', 'GENERALLY', 'REGARDED', 'THE', 'RELIGION', 'OF', 'THE', 'LATTER', 'DAY', 'SAINTS', 'WITH', 'FAVOR', 'SEEING', 'IN', 'THE', 'BOOK', 'OF', 'MORMON', 'STRIKING', 'AGREEMENT', 'WITH', 'THEIR', 'OWN', 'TRADITIONS'] +4077-13751-0008-1266: hyp=['IT', 'IS', 'NOTABLE', 'THAT', 'THE', 'INDIAN', 'TRIBES', 'HAVE', 'GENERALLY', 'REGARDED', 'THEIR', 'RELIGION', 'OF', 'THE', 'LATTER', 'DAY', 'SAINTS', 'WITH', 'FAVOR', 'SEEING', 'IN', 'THE', 'BOOK', 'A', 'MORMON', 'STRIKING', 'AGREEMENT', 'WITH', 'THEIR', 'OWN', 'TRADITIONS'] +4077-13751-0009-1267: ref=['THE', 'FIRST', 'WELL', 'ESTABLISHED', 'SEAT', 'OF', 'THE', 'CHURCH', 'WAS', 'IN', 'THE', 'PRETTY', 'LITTLE', 'TOWN', 'OF', 'KIRTLAND', 'OHIO', 'ALMOST', 'WITHIN', 'SIGHT', 'OF', 'LAKE', 'ERIE', 'AND', 'HERE', 'SOON', 'ROSE', 'THE', 'FIRST', 'TEMPLE', 'OF', 'MODERN', 'TIMES'] +4077-13751-0009-1267: hyp=['THE', 'FIRST', 'WELL', 'ESTABLISHED', 'SEAT', 'OF', 'THE', 'CHURCH', 'WAS', 'IN', 'THE', 'PRETTY', 'LITTLE', 'TOWN', 'OF', 'CURTLEND', 'OHIO', 'ALMOST', 'WITHIN', 'SIGHT', 'OF', 'LAKE', 'ERIE', 'AND', 'HERE', 'SOON', 'ROSE', 'THE', 'FIRST', 'TEMPLE', 'OF', 'MODERN', 'TIMES'] +4077-13751-0010-1268: ref=['TO', 'THE', 'FERVENT', 'LATTER', 'DAY', 'SAINT', 'A', 'TEMPLE', 'IS', 'NOT', 'SIMPLY', 'A', 'CHURCH', 'BUILDING', 'A', 'HOUSE', 'FOR', 'RELIGIOUS', 'ASSEMBLY'] +4077-13751-0010-1268: hyp=['TO', 'THE', 'FERVENT', 'LATTER', 'DAY', 'SAINT', 'A', 'TEMPLE', 'IS', 'NOT', 'SIMPLY', 'A', 'CHURCH', 'BUILDING', 'A', 'HOUSE', 'FOR', 'RELIGIOUS', 'ASSEMBLY'] +4077-13751-0011-1269: ref=['SOON', 'THOUSANDS', 'OF', 'CONVERTS', 'HAD', 'RENTED', 'OR', 'PURCHASED', 'HOMES', 'IN', 'MISSOURI', 'INDEPENDENCE', 'JACKSON', 'COUNTY', 'BEING', 'THEIR', 'CENTER', 'BUT', 'FROM', 'THE', 'FIRST', 'THEY', 'WERE', 'UNPOPULAR', 'AMONG', 'THE', 'MISSOURIANS'] +4077-13751-0011-1269: hyp=['SOON', 'THOUSANDS', 'OF', 'CONVERTS', 'HAD', 'RENTED', 'OR', 'PURCHASED', 'HOMES', 'IN', 'MISSOURI', 'INDEPENDENCE', 'JACKSON', 'COUNTY', 'BEING', 'THEIR', 'CENTER', 'BUT', 'FROM', 'THE', 'FIRST', 'THEY', 'WERE', 'UNPOPULAR', 'AMONG', 'THE', 'MISSOURIENS'] +4077-13751-0012-1270: ref=['THE', 'LIEUTENANT', 'GOVERNOR', 'LILBURN', 'W', 'BOGGS', 'AFTERWARD', 'GOVERNOR', 'WAS', 'A', 'PRONOUNCED', 'MORMON', 'HATER', 'AND', 'THROUGHOUT', 'THE', 'PERIOD', 'OF', 'THE', 'TROUBLES', 'HE', 'MANIFESTED', 'SYMPATHY', 'WITH', 'THE', 'PERSECUTORS'] +4077-13751-0012-1270: hyp=['THE', 'LIEUTENANT', 'GOVERNOR', 'LITTLE', 'BURN', 'W', 'BOGS', 'AFTERWARD', 'GOVERNOR', 'WAS', 'A', 'PRONOUNCED', 'MORMON', 'HATER', 'AND', 'THROUGHOUT', 'THE', 'PERIOD', 'OF', 'THE', 'TROUBLES', 'HE', 'MANIFEST', 'HIS', 'SYMPATHY', 'WITH', 'THE', 'PERSECUTORS'] +4077-13751-0013-1271: ref=['THEIR', 'SUFFERINGS', 'HAVE', 'NEVER', 'YET', 'BEEN', 'FITLY', 'CHRONICLED', 'BY', 'HUMAN', 'SCRIBE'] +4077-13751-0013-1271: hyp=['THEIR', 'SUFFERINGS', 'HAVE', 'NEVER', 'YET', 'BEEN', 'FITLY', 'CHRONICLED', 'BY', 'HUMAN', 'SCRIBE'] +4077-13751-0014-1272: ref=['MAKING', 'THEIR', 'WAY', 'ACROSS', 'THE', 'RIVER', 'MOST', 'OF', 'THE', 'REFUGEES', 'FOUND', 'SHELTER', 'AMONG', 'THE', 'MORE', 'HOSPITABLE', 'PEOPLE', 'OF', 'CLAY', 'COUNTY', 'AND', 'AFTERWARD', 'ESTABLISHED', 'THEMSELVES', 'IN', 'CALDWELL', 'COUNTY', 'THEREIN', 'FOUNDING', 'THE', 'CITY', 'OF', 'FAR', 'WEST'] +4077-13751-0014-1272: hyp=['MAKING', 'THEIR', 'WAY', 'ACROSS', 'THE', 'RIVER', 'MOST', 'OF', 'THE', 'REFUGEES', 'FOUND', 'SHELTER', 'AMONG', 'THE', 'MORE', 'HOSPITABLE', 'PEOPLE', 'OF', 'CLAY', 'COUNTY', 'AND', 'AFTERWARD', 'ESTABLISHED', 'THEMSELVES', 'IN', 'COLDWELL', 'COUNTY', 'THEY', 'WERE', 'IN', 'FOUNDING', 'THE', 'CITY', 'OF', 'FAR', 'WEST'] +4077-13751-0015-1273: ref=['A', 'SMALL', 'SETTLEMENT', 'HAD', 'BEEN', 'FOUNDED', 'BY', 'MORMON', 'FAMILIES', 'ON', 'SHOAL', 'CREEK', 'AND', 'HERE', 'ON', 'THE', 'THIRTIETH', 'OF', 'OCTOBER', 'EIGHTEEN', 'THIRTY', 'EIGHT', 'A', 'COMPANY', 'OF', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'FELL', 'UPON', 'THE', 'HAPLESS', 'SETTLERS', 'AND', 'BUTCHERED', 'A', 'SCORE'] +4077-13751-0015-1273: hyp=['A', 'SMALL', 'SETTLEMENT', 'HAD', 'BEEN', 'FOUNDED', 'BY', 'MORMON', 'FAMILIES', 'ON', 'SHOAL', 'CREEK', 'AND', 'HERE', 'ON', 'THE', 'THIRTIETH', 'OF', 'OCTOBER', 'EIGHTEEN', 'THIRTY', 'EIGHT', 'A', 'COMPANY', 'OF', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'FELL', 'UPON', 'THE', 'HAPLESS', 'SETTLERS', 'AND', 'BUTCHER', 'TO', 'SCORE'] +4077-13751-0016-1274: ref=['BE', 'IT', 'SAID', 'TO', 'THE', 'HONOR', 'OF', 'SOME', 'OF', 'THE', 'OFFICERS', 'ENTRUSTED', 'WITH', 'THE', 'TERRIBLE', 'COMMISSION', 'THAT', 'WHEN', 'THEY', 'LEARNED', 'ITS', 'TRUE', 'SIGNIFICANCE', 'THEY', 'RESIGNED', 'THEIR', 'AUTHORITY', 'RATHER', 'THAN', 'HAVE', 'ANYTHING', 'TO', 'DO', 'WITH', 'WHAT', 'THEY', 'DESIGNATED', 'A', 'COLD', 'BLOODED', 'BUTCHERY'] +4077-13751-0016-1274: hyp=['BE', 'IT', 'SAID', 'TO', 'THE', 'HONOR', 'OF', 'SOME', 'OF', 'THE', 'OFFICERS', 'ENTRUSTED', 'WITH', 'A', 'TERRIBLE', 'COMMISSION', 'THAT', 'WHEN', 'THEY', 'LEARNED', 'ITS', 'TRUE', 'SIGNIFICANCE', 'THEY', 'RESIGNED', 'THEIR', 'AUTHORITY', 'RATHER', 'THAN', 'HAVE', 'ANYTHING', 'TO', 'DO', 'WITH', 'WHAT', 'THEY', 'DESIGNATED', 'A', 'COLD', 'BLOODED', 'BUTCHERY'] +4077-13751-0017-1275: ref=['OH', 'WHAT', 'A', 'RECORD', 'TO', 'READ', 'WHAT', 'A', 'PICTURE', 'TO', 'GAZE', 'UPON', 'HOW', 'AWFUL', 'THE', 'FACT'] +4077-13751-0017-1275: hyp=['OH', 'WHAT', 'A', 'RECORD', 'TO', 'READ', 'WHAT', 'A', 'PICTURE', 'TO', 'GAZE', 'UPON', 'HOW', 'AWFUL', 'THE', 'FACT'] +4077-13751-0018-1276: ref=['AMERICAN', 'SCHOOL', 'BOYS', 'READ', 'WITH', 'EMOTIONS', 'OF', 'HORROR', 'OF', 'THE', 'ALBIGENSES', 'DRIVEN', 'BEATEN', 'AND', 'KILLED', 'WITH', 'A', 'PAPAL', 'LEGATE', 'DIRECTING', 'THE', 'BUTCHERY', 'AND', 'OF', 'THE', 'VAUDOIS', 'HUNTED', 'AND', 'HOUNDED', 'LIKE', 'BEASTS', 'AS', 'THE', 'EFFECT', 'OF', 'A', 'ROYAL', 'DECREE', 'AND', 'THEY', 'YET', 'SHALL', 'READ', 'IN', 'THE', 'HISTORY', 'OF', 'THEIR', 'OWN', 'COUNTRY', 'OF', 'SCENES', 'AS', 'TERRIBLE', 'AS', 'THESE', 'IN', 'THE', 'EXHIBITION', 'OF', 'INJUSTICE', 'AND', 'INHUMAN', 'HATE'] +4077-13751-0018-1276: hyp=['AMERICAN', 'SCHOOLBOYS', 'READ', 'WITH', 'EMOTIONS', 'OF', 'HORROR', 'OF', 'THE', 'ALBIGENZAS', 'DRIVEN', 'BEATEN', 'AND', 'KILLED', 'WITH', 'A', 'PEPPEL', 'LEGATE', 'DIRECTING', 'THE', 'BUTCHERY', 'AND', 'OF', 'THE', 'FAUDOIR', 'HUNTED', 'AND', 'HOUNDED', 'LIKE', 'BEASTS', 'AS', 'THE', 'EFFECT', 'OF', 'A', 'ROYAL', 'DECREE', 'AND', 'THEY', 'YET', 'SHALL', 'READ', 'IN', 'THE', 'HISTORY', 'OF', 'THEIR', 'OWN', 'COUNTRY', 'OF', 'SCENES', 'AS', 'TERRIBLE', 'AS', 'THESE', 'IN', 'THE', 'EXHIBITION', 'OF', 'INJUSTICE', 'AND', 'INHUMAN', 'HATE'] +4077-13751-0019-1277: ref=['WHO', 'BEGAN', 'THE', 'QUARREL', 'WAS', 'IT', 'THE', 'MORMONS'] +4077-13751-0019-1277: hyp=['WHO', 'BEGAN', 'THE', 'QUARREL', 'WAS', 'IT', 'THE', 'MORMONS'] +4077-13751-0020-1278: ref=['AS', 'A', 'SAMPLE', 'OF', 'THE', 'PRESS', 'COMMENTS', 'AGAINST', 'THE', 'BRUTALITY', 'OF', 'THE', 'MISSOURIANS', 'I', 'QUOTE', 'A', 'PARAGRAPH', 'FROM', 'THE', 'QUINCY', 'ARGUS', 'MARCH', 'SIXTEENTH', 'EIGHTEEN', 'THIRTY', 'NINE'] +4077-13751-0020-1278: hyp=['AS', 'THE', 'SAMPLE', 'OF', 'THE', 'PRESS', 'COMETS', 'AGAINST', 'THE', 'BRUTALITY', 'OF', 'THE', 'MISSOURIANCE', 'I', 'QUOTE', 'A', 'PARAGRAPH', 'FROM', 'THE', 'QUINCY', 'ARGUS', 'MARCH', 'SIXTEENTH', 'EIGHTEEN', 'THIRTY', 'NINE'] +4077-13751-0021-1279: ref=['IT', 'WILL', 'BE', 'OBSERVED', 'THAT', 'AN', 'ORGANIZED', 'MOB', 'AIDED', 'BY', 'MANY', 'OF', 'THE', 'CIVIL', 'AND', 'MILITARY', 'OFFICERS', 'OF', 'MISSOURI', 'WITH', 'GOVERNOR', 'BOGGS', 'AT', 'THEIR', 'HEAD', 'HAVE', 'BEEN', 'THE', 'PROMINENT', 'ACTORS', 'IN', 'THIS', 'BUSINESS', 'INCITED', 'TOO', 'IT', 'APPEARS', 'AGAINST', 'THE', 'MORMONS', 'BY', 'POLITICAL', 'HATRED', 'AND', 'BY', 'THE', 'ADDITIONAL', 'MOTIVES', 'OF', 'PLUNDER', 'AND', 'REVENGE'] +4077-13751-0021-1279: hyp=['IT', 'WILL', 'BE', 'OBSERVED', 'THAT', 'AN', 'ORGANIZED', 'MOB', 'AIDED', 'BY', 'MANY', 'OF', 'THE', 'CIVIL', 'AND', 'MILITARY', 'OFFICERS', 'OF', 'MISSOURI', 'WITH', 'GOVERNOR', 'BOX', 'AT', 'THEIR', 'HEAD', 'HAVE', 'BEEN', 'THE', 'PROMINENT', 'ACTORS', 'IN', 'THIS', 'BUSINESS', 'INCITED', 'TOO', 'IT', 'APPEARS', 'AGAINST', 'THE', 'MORMONS', 'BY', 'POLITICAL', 'HATRED', 'AND', 'BY', 'THE', 'ADDITIONAL', 'MOTIVES', 'OF', 'PLUNDER', 'AND', 'REVENGE'] +4077-13754-0000-1241: ref=['THE', 'ARMY', 'FOUND', 'THE', 'PEOPLE', 'IN', 'POVERTY', 'AND', 'LEFT', 'THEM', 'IN', 'COMPARATIVE', 'WEALTH'] +4077-13754-0000-1241: hyp=['THE', 'ARMY', 'FOUND', 'THE', 'PEOPLE', 'IN', 'POVERTY', 'AND', 'LEFT', 'THEM', 'IN', 'COMPARATIVE', 'WEALTH'] +4077-13754-0001-1242: ref=['BUT', 'A', 'WORD', 'FURTHER', 'CONCERNING', 'THE', 'EXPEDITION', 'IN', 'GENERAL'] +4077-13754-0001-1242: hyp=['BUT', 'A', 'WORD', 'FURTHER', 'CONCERNING', 'THE', 'EXPEDITION', 'IN', 'GENERAL'] +4077-13754-0002-1243: ref=['IT', 'WAS', 'THROUGH', "FLOYD'S", 'ADVICE', 'THAT', 'BUCHANAN', 'ORDERED', 'THE', 'MILITARY', 'EXPEDITION', 'TO', 'UTAH', 'OSTENSIBLY', 'TO', 'INSTALL', 'CERTAIN', 'FEDERAL', 'OFFICIALS', 'AND', 'TO', 'REPRESS', 'AN', 'ALLEGED', 'INFANTILE', 'REBELLION', 'WHICH', 'IN', 'FACT', 'HAD', 'NEVER', 'COME', 'INTO', 'EXISTENCE', 'BUT', 'IN', 'REALITY', 'TO', 'FURTHER', 'THE', 'INTERESTS', 'OF', 'THE', 'SECESSIONISTS'] +4077-13754-0002-1243: hyp=['IT', 'WAS', 'THROUGH', "FLUD'S", 'ADVICE', 'THAT', 'BUCCATAN', 'ORDER', 'THE', 'MILITARY', 'EXPEDITION', 'TO', 'UTAH', 'OSTENSIBLY', 'TO', 'INSTALL', 'CERTAIN', 'FEDERAL', 'OFFICIALS', 'AND', 'TO', 'REPRESS', 'AN', 'ALLEGED', 'INFANTILE', 'REBELLION', 'WHICH', 'IN', 'FACT', 'HAD', 'NEVER', 'COME', 'INTO', 'EXISTENCE', 'BUT', 'IN', 'REALITY', 'TO', 'FURTHER', 'THE', 'INTRICTS', 'OF', 'THE', 'SECESSIONISTS'] +4077-13754-0003-1244: ref=['MOREOVER', 'HAD', 'THE', 'PEOPLE', 'BEEN', 'INCLINED', 'TO', 'REBELLION', 'WHAT', 'GREATER', 'OPPORTUNITY', 'COULD', 'THEY', 'HAVE', 'WISHED'] +4077-13754-0003-1244: hyp=['MOREOVER', 'HAD', 'THE', 'PEOPLE', 'BEEN', 'INCLINED', 'TO', 'REBELLION', 'WHAT', 'GREATER', 'OPPORTUNITY', 'COULD', 'THEY', 'HAVE', 'WISHED'] +4077-13754-0004-1245: ref=['ALREADY', 'A', 'NORTH', 'AND', 'A', 'SOUTH', 'WERE', 'TALKED', 'OF', 'WHY', 'NOT', 'SET', 'UP', 'ALSO', 'A', 'WEST'] +4077-13754-0004-1245: hyp=['ALREADY', 'A', 'NORTH', 'AND', 'THE', 'SOUTH', 'WERE', 'TALKED', 'OF', 'WHY', 'NOT', 'SET', 'UP', 'ALSO', 'A', 'WEST'] +4077-13754-0005-1246: ref=['THEY', 'KNEW', 'NO', 'NORTH', 'NO', 'SOUTH', 'NO', 'EAST', 'NO', 'WEST', 'THEY', 'STOOD', 'POSITIVELY', 'BY', 'THE', 'CONSTITUTION', 'AND', 'WOULD', 'HAVE', 'NOTHING', 'TO', 'DO', 'IN', 'THE', 'BLOODY', 'STRIFE', 'BETWEEN', 'BROTHERS', 'UNLESS', 'INDEED', 'THEY', 'WERE', 'SUMMONED', 'BY', 'THE', 'AUTHORITY', 'TO', 'WHICH', 'THEY', 'HAD', 'ALREADY', 'ONCE', 'LOYALLY', 'RESPONDED', 'TO', 'FURNISH', 'MEN', 'AND', 'ARMS', 'FOR', 'THEIR', "COUNTRY'S", 'NEED'] +4077-13754-0005-1246: hyp=['THEY', 'KNEW', 'NORTH', 'NOR', 'SOUTH', 'NOR', 'EAST', 'NO', 'WEST', 'THEY', 'STOOD', 'POSITIVELY', 'BY', 'THE', 'CONSTITUTION', 'AND', 'WOULD', 'HAVE', 'NOTHING', 'TO', 'DO', 'IN', 'THE', 'BLOODY', 'STRIFE', 'BETWEEN', 'BROTHERS', 'UNLESS', 'INDEED', 'THEY', 'WERE', 'SUMMONED', 'BY', 'THE', 'AUTHORITY', 'TO', 'WHICH', 'THEY', 'HAD', 'ALREADY', 'ONCE', 'LOYALLY', 'RESPONDED', 'TO', 'FURNISH', 'MEN', 'IN', 'ARMS', 'FOR', 'THEIR', "COUNTRY'S", 'NEED'] +4077-13754-0006-1247: ref=['WHAT', 'THE', 'LATTER', 'DAY', 'SAINTS', 'CALL', 'CELESTIAL', 'MARRIAGE', 'IS', 'CHARACTERISTIC', 'OF', 'THE', 'CHURCH', 'AND', 'IS', 'IN', 'VERY', 'GENERAL', 'PRACTISE', 'BUT', 'OF', 'CELESTIAL', 'MARRIAGE', 'PLURALITY', 'OF', 'WIVES', 'WAS', 'AN', 'INCIDENT', 'NEVER', 'AN', 'ESSENTIAL'] +4077-13754-0006-1247: hyp=['WHAT', 'THE', 'LATTER', 'DAY', 'SAINTS', 'CALL', 'CELESTIAL', 'MARRIAGE', 'IS', 'CHARACTERISTIC', 'OF', 'THE', 'CHURCH', 'AND', 'IS', 'IN', 'VERY', 'GENERAL', 'PRACTICE', 'BUT', 'OF', 'CELESTIAL', 'MARRIAGE', 'PLURALITY', 'OF', 'WIVES', 'WAS', 'AN', 'INCIDENT', 'NEVER', 'AN', 'ESSENTIAL'] +4077-13754-0007-1248: ref=['WE', 'BELIEVE', 'IN', 'A', 'LITERAL', 'RESURRECTION', 'AND', 'AN', 'ACTUAL', 'HEREAFTER', 'IN', 'WHICH', 'FUTURE', 'STATE', 'SHALL', 'BE', 'RECOGNIZED', 'EVERY', 'SANCTIFIED', 'AND', 'AUTHORIZED', 'RELATIONSHIP', 'EXISTING', 'HERE', 'ON', 'EARTH', 'OF', 'PARENT', 'AND', 'CHILD', 'BROTHER', 'AND', 'SISTER', 'HUSBAND', 'AND', 'WIFE'] +4077-13754-0007-1248: hyp=['WE', 'BELIEVE', 'IN', 'A', 'LITERAL', 'RESURRECTION', 'AND', 'AN', 'ACTUAL', 'HEREAFTER', 'IN', 'WHICH', 'FUTURE', 'STATES', 'SHALL', 'BE', 'RECOGNIZED', 'EVERY', 'SANCTIFIED', 'AND', 'AUTHORIZED', 'RELATIONSHIP', 'EXISTING', 'HERE', 'ON', 'EARTH', 'OF', 'PARENT', 'AND', 'CHILD', 'BROTHER', 'AND', 'SISTER', 'HUSBAND', 'AND', 'WIFE'] +4077-13754-0008-1249: ref=['IT', 'HAS', 'BEEN', 'MY', 'PRIVILEGE', 'TO', 'TREAD', 'THE', 'SOIL', 'OF', 'MANY', 'LANDS', 'TO', 'OBSERVE', 'THE', 'CUSTOMS', 'AND', 'STUDY', 'THE', 'HABITS', 'OF', 'MORE', 'NATIONS', 'THAN', 'ONE', 'AND', 'I', 'HAVE', 'YET', 'TO', 'FIND', 'THE', 'PLACE', 'AND', 'MEET', 'THE', 'PEOPLE', 'WHERE', 'AND', 'WITH', 'WHOM', 'THE', 'PURITY', 'OF', 'MAN', 'AND', 'WOMAN', 'IS', 'HELD', 'MORE', 'PRECIOUS', 'THAN', 'AMONG', 'THE', 'MALIGNED', 'MORMONS', 'IN', 'THE', 'MOUNTAIN', 'VALLEYS', 'OF', 'THE', 'WEST'] +4077-13754-0008-1249: hyp=['IT', 'HAS', 'BEEN', 'MY', 'PRIVILEGE', 'TO', 'TREAD', 'THE', 'SOIL', 'OF', 'MANY', 'LANDS', 'TO', 'OBSERVE', 'THE', 'CUSTOMS', 'AND', 'STUDY', 'THE', 'HABITS', 'OF', 'MORE', 'NATIONS', 'THAN', 'ONE', 'AND', 'I', 'HAVE', 'YET', 'TO', 'FIND', 'THE', 'PLACE', 'AND', 'MEET', 'THE', 'PEOPLE', 'WHEREINWITH', 'WHOM', 'THE', 'PURITY', 'OF', 'MAN', 'AND', 'WOMAN', 'IS', 'HELD', 'MORE', 'PRECIOUS', 'THAN', 'AMONG', 'THE', 'MALIGNED', 'MORMONS', 'IN', 'THE', 'MOUNTAIN', 'VALLEYS', 'OF', 'THE', 'WEST'] +4077-13754-0009-1250: ref=['AT', 'THE', 'INCEPTION', 'OF', 'PLURAL', 'MARRIAGE', 'AMONG', 'THE', 'LATTER', 'DAY', 'SAINTS', 'THERE', 'WAS', 'NO', 'LAW', 'NATIONAL', 'OR', 'STATE', 'AGAINST', 'ITS', 'PRACTISE'] +4077-13754-0009-1250: hyp=['AT', 'THE', 'INCEPTION', 'OF', 'PEARL', 'MARRIAGE', 'AMONG', 'THE', 'LATTER', 'DAY', 'SAINTS', 'THERE', 'WAS', 'NO', 'LAW', 'NATIONAL', 'OR', 'STATE', 'AGAINST', 'ITS', 'PRACTICE'] +4077-13754-0010-1251: ref=['IN', 'EIGHTEEN', 'SIXTY', 'TWO', 'A', 'LAW', 'WAS', 'ENACTED', 'WITH', 'THE', 'PURPOSE', 'OF', 'SUPPRESSING', 'PLURAL', 'MARRIAGE', 'AND', 'AS', 'HAD', 'BEEN', 'PREDICTED', 'IN', 'THE', 'NATIONAL', 'SENATE', 'PRIOR', 'TO', 'ITS', 'PASSAGE', 'IT', 'LAY', 'FOR', 'MANY', 'YEARS', 'A', 'DEAD', 'LETTER'] +4077-13754-0010-1251: hyp=['IN', 'EIGHTEEN', 'SIXTY', 'TWO', 'A', 'LAW', 'WAS', 'ENACTED', 'WITH', 'A', 'PURPOSE', 'OF', 'SUPPRESSING', 'PLURAL', 'MARRIAGE', 'AND', 'AS', 'HAD', 'BEEN', 'PREDICTED', 'IN', 'THE', 'NATIONAL', 'SENATE', 'PRAYER', 'TO', 'ITS', 'PASSAGE', 'IT', 'LAY', 'FOR', 'MANY', 'YEARS', 'A', 'DEAD', 'LETTER'] +4077-13754-0011-1252: ref=['FEDERAL', 'JUDGES', 'AND', 'UNITED', 'STATES', 'ATTORNEYS', 'IN', 'UTAH', 'WHO', 'WERE', 'NOT', 'MORMONS', 'NOR', 'LOVERS', 'OF', 'MORMONISM', 'REFUSED', 'TO', 'ENTERTAIN', 'COMPLAINTS', 'OR', 'PROSECUTE', 'CASES', 'UNDER', 'THE', 'LAW', 'BECAUSE', 'OF', 'ITS', 'MANIFEST', 'INJUSTICE', 'AND', 'INADEQUACY'] +4077-13754-0011-1252: hyp=['FEDERAL', 'JUDGES', 'AND', 'UNITED', 'STATES', 'ATTORNEYS', 'IN', 'UTA', 'WHO', 'WERE', 'NOT', 'MORMONS', 'NOR', 'LOVERS', 'OF', 'WARMONISM', 'REFUSED', 'TO', 'ENTERTAIN', 'COMPLAINTS', 'OR', 'PROSECUTE', 'CASES', 'UNDER', 'THE', 'LAW', 'BECAUSE', 'OF', 'ITS', 'MANIFEST', 'INJUSTICE', 'AND', 'INADEQUACY'] +4077-13754-0012-1253: ref=['THIS', 'MEANT', 'THAT', 'FOR', 'AN', 'ALLEGED', 'MISDEMEANOR', 'FOR', 'WHICH', 'CONGRESS', 'PRESCRIBED', 'A', 'MAXIMUM', 'PENALTY', 'OF', 'SIX', 'MONTHS', 'IMPRISONMENT', 'AND', 'A', 'FINE', 'OF', 'THREE', 'HUNDRED', 'DOLLARS', 'A', 'MAN', 'MIGHT', 'BE', 'IMPRISONED', 'FOR', 'LIFE', 'AYE', 'FOR', 'MANY', 'TERMS', 'OF', 'A', "MAN'S", 'NATURAL', 'LIFE', 'DID', 'THE', "COURT'S", 'POWER', 'TO', 'ENFORCE', 'ITS', 'SENTENCES', 'EXTEND', 'SO', 'FAR', 'AND', 'MIGHT', 'BE', 'FINED', 'MILLIONS', 'OF', 'DOLLARS'] +4077-13754-0012-1253: hyp=['THIS', 'MEANT', 'THAT', 'FOR', 'AN', 'ALLEGED', 'MISDEMEANOR', 'FOR', 'WHICH', 'CONGRESS', 'PRESCRIBED', 'A', 'MAXIMUM', 'PENALTY', 'OF', 'SIX', 'MONTHS', 'IMPRISONMENT', 'AND', 'A', 'FINE', 'OF', 'THREE', 'HUNDRED', 'DOLLARS', 'A', 'MAN', 'MIGHT', 'BE', 'IMPRISONED', 'FOR', 'LIFE', 'I', 'FOR', 'MANY', 'TERMS', 'OF', 'A', "MAN'S", 'NATURAL', 'LIFE', 'DID', 'THE', "COURT'S", 'POWER', 'TO', 'ENFORCE', 'ITS', 'SENTENCES', 'EXTEND', 'SO', 'FAR', 'AND', 'MIGHT', 'BE', 'FINED', 'MILLIONS', 'OF', 'DOLLARS'] +4077-13754-0013-1254: ref=['BEFORE', 'THIS', 'TRAVESTY', 'ON', 'THE', 'ADMINISTRATION', 'OF', 'LAW', 'COULD', 'BE', 'BROUGHT', 'BEFORE', 'THE', 'COURT', 'OF', 'LAST', 'RESORT', 'AND', 'THERE', 'MEET', 'WITH', 'THE', 'REVERSAL', 'AND', 'REBUKE', 'IT', 'DESERVED', 'MEN', 'WERE', 'IMPRISONED', 'UNDER', 'SENTENCES', 'OF', 'MANY', 'YEARS', 'DURATION'] +4077-13754-0013-1254: hyp=['BEFORE', 'THIS', 'TRAVESTY', 'ON', 'THE', 'ADMINISTRATION', 'OF', 'LAW', 'COULD', 'BE', 'BROUGHT', 'BEFORE', 'THE', 'COURT', 'OF', 'LAST', 'RESORT', 'AND', 'THERE', 'MET', 'WITH', 'THE', 'REVERSAL', 'AND', 'REBUKE', 'IT', 'DESERVED', 'MEN', 'WERE', 'IMPRISONED', 'UNDER', 'SENTENCE', 'OF', 'MANY', 'YEARS', 'DURATION'] +4077-13754-0014-1255: ref=['THE', 'PEOPLE', 'CONTESTED', 'THESE', 'MEASURES', 'ONE', 'BY', 'ONE', 'IN', 'THE', 'COURTS', 'PRESENTING', 'IN', 'CASE', 'AFTER', 'CASE', 'THE', 'DIFFERENT', 'PHASES', 'OF', 'THE', 'SUBJECT', 'AND', 'URGING', 'THE', 'UNCONSTITUTIONALITY', 'OF', 'THE', 'MEASURE'] +4077-13754-0014-1255: hyp=['THE', 'PEOPLE', 'CONTESTED', 'THESE', 'MEASURES', 'ONE', 'BY', 'ONE', 'IN', 'THE', 'COURTS', 'PRESENTING', 'IN', 'CASE', 'AFTER', 'CASE', 'THE', 'DIFFERENT', 'PHASES', 'OF', 'THE', 'SUBJECT', 'AND', 'URGING', 'THE', 'UNCONSTITUTIONALITY', 'OF', 'THE', 'MEASURE'] +4077-13754-0015-1256: ref=['THEN', 'THE', 'CHURCH', 'WAS', 'DISINCORPORATED', 'AND', 'ITS', 'PROPERTY', 'BOTH', 'REAL', 'AND', 'PERSONAL', 'CONFISCATED', 'AND', 'ESCHEATED', 'TO', 'THE', 'GOVERNMENT', 'OF', 'THE', 'UNITED', 'STATES', 'AND', 'ALTHOUGH', 'THE', 'PERSONAL', 'PROPERTY', 'WAS', 'SOON', 'RESTORED', 'REAL', 'ESTATE', 'OF', 'GREAT', 'VALUE', 'LONG', 'LAY', 'IN', 'THE', 'HANDS', 'OF', 'THE', "COURT'S", 'RECEIVER', 'AND', 'THE', 'MORMON', 'CHURCH', 'HAD', 'TO', 'PAY', 'THE', 'NATIONAL', 'GOVERNMENT', 'HIGH', 'RENTAL', 'ON', 'ITS', 'OWN', 'PROPERTY'] +4077-13754-0015-1256: hyp=['THEN', 'THE', 'CHURCH', 'WAS', 'DISINCORPORATED', 'AND', 'ITS', 'PROPERTY', 'BOTH', 'REAL', 'AND', 'PERSONAL', 'CONFISCATED', 'AND', 'ISIATED', 'TO', 'THE', 'GOVERNMENT', 'OF', 'THE', 'UNITED', 'STATES', 'AND', 'ALTHOUGH', 'THE', 'PERSONAL', 'PROPERTY', 'WAS', 'SOON', 'RESTORED', 'REAL', 'ESTATE', 'OF', 'GREAT', 'VALUE', 'LONG', 'LAY', 'IN', 'THE', 'HANDS', 'OF', 'THE', 'COURTS', 'RECEIVER', 'AND', 'THE', 'MORMON', 'CHURCH', 'HAD', 'TO', 'PAY', 'THE', 'NATIONAL', 'GOVERNMENT', 'HIGHER', 'RENTAL', 'ON', 'ITS', 'OWN', 'PROPERTY'] +4077-13754-0016-1257: ref=['AND', 'SO', 'THE', 'STORY', 'OF', 'MORMONISM', 'RUNS', 'ON', 'ITS', 'FINALE', 'HAS', 'NOT', 'YET', 'BEEN', 'WRITTEN', 'THE', 'CURRENT', 'PRESS', 'PRESENTS', 'CONTINUOUSLY', 'NEW', 'STAGES', 'OF', 'ITS', 'PROGRESS', 'NEW', 'DEVELOPMENTS', 'OF', 'ITS', 'PLAN'] +4077-13754-0016-1257: hyp=['AND', 'SO', 'THE', 'STORY', 'OF', 'MORMONISM', 'RUNS', 'ON', 'ITS', 'FINALE', 'HAS', 'NOT', 'YET', 'BEEN', 'WRITTEN', 'THE', 'CURRENT', 'PRESS', 'PRESENTS', 'CONTINUOUSLY', 'NEW', 'STAGES', 'OF', 'ITS', 'PROGRESS', 'NEW', 'DEVELOPMENTS', 'OF', 'ITS', 'PLAN'] +4446-2271-0000-1133: ref=['MAINHALL', 'LIKED', 'ALEXANDER', 'BECAUSE', 'HE', 'WAS', 'AN', 'ENGINEER'] +4446-2271-0000-1133: hyp=['MAIN', 'HALL', 'LIKED', 'ALEXANDER', 'BECAUSE', 'HE', 'WAS', 'AN', 'ENGINEER'] +4446-2271-0001-1134: ref=['HE', 'HAD', 'PRECONCEIVED', 'IDEAS', 'ABOUT', 'EVERYTHING', 'AND', 'HIS', 'IDEA', 'ABOUT', 'AMERICANS', 'WAS', 'THAT', 'THEY', 'SHOULD', 'BE', 'ENGINEERS', 'OR', 'MECHANICS'] +4446-2271-0001-1134: hyp=['WE', 'HAD', 'FREQUENCEDE', 'IDEAS', 'ABOUT', 'EVERYTHING', 'AND', 'HIS', 'IDEA', 'ABOUT', 'AMERICANS', 'WAS', 'THAT', 'THEY', 'SHOULD', 'BE', 'ENGINEERS', 'OR', 'MECHANICS'] +4446-2271-0002-1135: ref=["IT'S", 'TREMENDOUSLY', 'WELL', 'PUT', 'ON', 'TOO'] +4446-2271-0002-1135: hyp=['ITS', 'TREMENDOUSLY', 'WELL', 'PUT', 'ON', 'TOO'] +4446-2271-0003-1136: ref=["IT'S", 'BEEN', 'ON', 'ONLY', 'TWO', 'WEEKS', 'AND', "I'VE", 'BEEN', 'HALF', 'A', 'DOZEN', 'TIMES', 'ALREADY'] +4446-2271-0003-1136: hyp=["IT'S", 'BEEN', 'ON', 'ONLY', 'TWO', 'WEEKS', 'AND', "I'VE", 'BEEN', 'HALF', 'A', 'DOZEN', 'TIMES', 'ALREADY'] +4446-2271-0004-1137: ref=['DO', 'YOU', 'KNOW', 'ALEXANDER', 'MAINHALL', 'LOOKED', 'WITH', 'PERPLEXITY', 'UP', 'INTO', 'THE', 'TOP', 'OF', 'THE', 'HANSOM', 'AND', 'RUBBED', 'HIS', 'PINK', 'CHEEK', 'WITH', 'HIS', 'GLOVED', 'FINGER', 'DO', 'YOU', 'KNOW', 'I', 'SOMETIMES', 'THINK', 'OF', 'TAKING', 'TO', 'CRITICISM', 'SERIOUSLY', 'MYSELF'] +4446-2271-0004-1137: hyp=['DO', 'YOU', 'KNOW', 'ALEXANDER', 'MAIN', 'HALL', 'LOOKED', 'WITH', 'PERPLEXITY', 'UP', 'INTO', 'THE', 'TOP', 'OF', 'THE', 'HANSOM', 'AND', 'RUBBED', 'HIS', 'PINK', 'CHEEK', 'WITH', 'HIS', 'GLOVED', 'FINGER', 'DO', 'YOU', 'KNOW', 'I', 'SOMETIMES', 'THINK', 'OF', 'TAKING', 'TO', 'CRITICISM', 'SERIOUSLY', 'MYSELF'] +4446-2271-0005-1138: ref=['SHE', 'SAVES', 'HER', 'HAND', 'TOO', "SHE'S", 'AT', 'HER', 'BEST', 'IN', 'THE', 'SECOND', 'ACT'] +4446-2271-0005-1138: hyp=['SHE', 'SAVES', 'HER', 'HAND', 'TOO', "SHE'S", 'AT', 'HER', 'BEST', 'IN', 'THE', 'SECOND', 'ACT'] +4446-2271-0006-1139: ref=["HE'S", 'BEEN', 'WANTING', 'TO', 'MARRY', 'HILDA', 'THESE', 'THREE', 'YEARS', 'AND', 'MORE'] +4446-2271-0006-1139: hyp=["HE'S", 'BEEN', 'WANTING', 'TO', 'MARRY', 'HILDER', 'THESE', 'THREE', 'YEARS', 'AND', 'MORE'] +4446-2271-0007-1140: ref=['SHE', "DOESN'T", 'TAKE', 'UP', 'WITH', 'ANYBODY', 'YOU', 'KNOW'] +4446-2271-0007-1140: hyp=['SHE', "DOESN'T", 'TAKE', 'UP', 'WITH', 'ANYBODY', 'YOU', 'KNOW'] +4446-2271-0008-1141: ref=['IRENE', 'BURGOYNE', 'ONE', 'OF', 'HER', 'FAMILY', 'TOLD', 'ME', 'IN', 'CONFIDENCE', 'THAT', 'THERE', 'WAS', 'A', 'ROMANCE', 'SOMEWHERE', 'BACK', 'IN', 'THE', 'BEGINNING'] +4446-2271-0008-1141: hyp=['IRENE', 'WERE', 'GOING', 'ONE', 'OF', 'HER', 'FAMILY', 'TOLD', 'ME', 'IN', 'CONFIDENCE', 'THAT', 'THERE', 'WAS', 'A', 'ROMANCE', 'SOMEWHERE', 'BACK', 'IN', 'THE', 'BEGINNING'] +4446-2271-0009-1142: ref=['MAINHALL', 'VOUCHED', 'FOR', 'HER', 'CONSTANCY', 'WITH', 'A', 'LOFTINESS', 'THAT', 'MADE', 'ALEXANDER', 'SMILE', 'EVEN', 'WHILE', 'A', 'KIND', 'OF', 'RAPID', 'EXCITEMENT', 'WAS', 'TINGLING', 'THROUGH', 'HIM'] +4446-2271-0009-1142: hyp=['MAIN', 'HOLE', 'VOUCHED', 'FOR', 'HER', 'CONSTANCY', 'WITH', 'A', 'LOFTINESS', 'THAT', 'MADE', 'ALEXANDER', 'SMILE', 'EVEN', 'WHILE', 'A', 'KIND', 'OF', 'RAPID', 'EXCITEMENT', 'WAS', 'TINGLING', 'THROUGH', 'HIM'] +4446-2271-0010-1143: ref=["HE'S", 'ANOTHER', "WHO'S", 'AWFULLY', 'KEEN', 'ABOUT', 'HER', 'LET', 'ME', 'INTRODUCE', 'YOU'] +4446-2271-0010-1143: hyp=["HE'S", 'ANOTHER', "WHO'S", 'AWFULLY', 'KEEN', 'ABOUT', 'HER', 'LET', 'ME', 'INTRODUCE', 'YOU'] +4446-2271-0011-1144: ref=['SIR', 'HARRY', 'TOWNE', 'MISTER', 'BARTLEY', 'ALEXANDER', 'THE', 'AMERICAN', 'ENGINEER'] +4446-2271-0011-1144: hyp=['SIR', 'HARRYTOWN', 'MISTER', 'BARTLEY', 'ALEXANDER', 'THE', 'AMERICAN', 'ENGINEER'] +4446-2271-0012-1145: ref=['I', 'SAY', 'SIR', 'HARRY', 'THE', 'LITTLE', "GIRL'S", 'GOING', 'FAMOUSLY', 'TO', 'NIGHT', "ISN'T", 'SHE'] +4446-2271-0012-1145: hyp=['I', 'SAY', 'SIR', 'HARRY', 'THE', 'LITTLE', "GIRL'S", 'GOING', 'FAMOUSLY', 'TO', 'NIGHT', "ISN'T", 'SHE'] +4446-2271-0013-1146: ref=['DO', 'YOU', 'KNOW', 'I', 'THOUGHT', 'THE', 'DANCE', 'A', 'BIT', 'CONSCIOUS', 'TO', 'NIGHT', 'FOR', 'THE', 'FIRST', 'TIME'] +4446-2271-0013-1146: hyp=['YOU', 'KNOW', 'I', 'THOUGHT', 'THE', 'DANCE', 'A', 'BIT', 'CONSCIOUS', 'TO', 'NIGHT', 'FOR', 'THE', 'FIRST', 'TIME'] +4446-2271-0014-1147: ref=['WESTMERE', 'AND', 'I', 'WERE', 'BACK', 'AFTER', 'THE', 'FIRST', 'ACT', 'AND', 'WE', 'THOUGHT', 'SHE', 'SEEMED', 'QUITE', 'UNCERTAIN', 'OF', 'HERSELF'] +4446-2271-0014-1147: hyp=['WESTMER', 'AND', 'I', 'WERE', 'BACK', 'AFTER', 'THE', 'FIRST', 'ACT', 'AND', 'WE', 'THOUGHT', 'SHE', 'SEEMED', 'QUITE', 'UNCERTAIN', 'OF', 'HERSELF'] +4446-2271-0015-1148: ref=['A', 'LITTLE', 'ATTACK', 'OF', 'NERVES', 'POSSIBLY'] +4446-2271-0015-1148: hyp=['A', 'LITTLE', 'ATTACK', 'OF', 'NERVES', 'POSSIBLY'] +4446-2271-0016-1149: ref=['HE', 'WAS', 'BEGINNING', 'TO', 'FEEL', 'A', 'KEEN', 'INTEREST', 'IN', 'THE', 'SLENDER', 'BAREFOOT', 'DONKEY', 'GIRL', 'WHO', 'SLIPPED', 'IN', 'AND', 'OUT', 'OF', 'THE', 'PLAY', 'SINGING', 'LIKE', 'SOME', 'ONE', 'WINDING', 'THROUGH', 'A', 'HILLY', 'FIELD'] +4446-2271-0016-1149: hyp=['IT', 'WAS', 'BEGINNING', 'TO', 'FEEL', 'THE', 'KEEN', 'INTEREST', 'IN', 'THE', 'SLENDER', 'BAREFOOT', 'DONKEY', 'GIRL', 'WHO', 'SLIPPED', 'IN', 'AND', 'OUT', 'OF', 'THE', 'PLAY', 'SINGING', 'LIKE', 'SOME', 'ONE', 'WINDING', 'THROUGH', 'A', 'HILLY', 'FIELD'] +4446-2271-0017-1150: ref=['ONE', 'NIGHT', 'WHEN', 'HE', 'AND', 'WINIFRED', 'WERE', 'SITTING', 'TOGETHER', 'ON', 'THE', 'BRIDGE', 'HE', 'TOLD', 'HER', 'THAT', 'THINGS', 'HAD', 'HAPPENED', 'WHILE', 'HE', 'WAS', 'STUDYING', 'ABROAD', 'THAT', 'HE', 'WAS', 'SORRY', 'FOR', 'ONE', 'THING', 'IN', 'PARTICULAR', 'AND', 'HE', 'ASKED', 'HER', 'WHETHER', 'SHE', 'THOUGHT', 'SHE', 'OUGHT', 'TO', 'KNOW', 'ABOUT', 'THEM'] +4446-2271-0017-1150: hyp=['ONE', 'NIGHT', 'WHEN', 'HE', 'AND', 'WINIFRED', 'WERE', 'SITTING', 'TOGETHER', 'ON', 'THE', 'BRIDGE', 'HE', 'TOLD', 'HER', 'THE', 'THINGS', 'HAD', 'HAPPENED', 'WHILE', 'HE', 'WAS', 'STUDYING', 'ABROAD', 'THAT', 'HE', 'WAS', 'SORRY', 'FOR', 'ONE', 'THING', 'IN', 'PARTICULAR', 'AND', 'HE', 'ASKED', 'HER', 'WHETHER', 'SHE', 'THOUGHT', 'SHE', 'OUGHT', 'TO', 'KNOW', 'ABOUT', 'THEM'] +4446-2271-0018-1151: ref=['SHE', 'CONSIDERED', 'A', 'MOMENT', 'AND', 'THEN', 'SAID', 'NO', 'I', 'THINK', 'NOT', 'THOUGH', 'I', 'AM', 'GLAD', 'YOU', 'ASK', 'ME'] +4446-2271-0018-1151: hyp=['SHE', 'CONSIDERED', 'FOR', 'A', 'MOMENT', 'AND', 'THEN', 'SAID', 'NO', 'I', 'THINK', 'NOT', 'THE', 'WAY', 'I', 'AM', 'GLAD', 'YOU', 'ASK', 'ME'] +4446-2271-0019-1152: ref=['AFTER', 'THAT', 'IT', 'WAS', 'EASY', 'TO', 'FORGET', 'ACTUALLY', 'TO', 'FORGET'] +4446-2271-0019-1152: hyp=['AFTER', 'THAT', 'IT', 'WAS', 'EASY', 'TO', 'FORGET', 'ACTUALLY', 'TO', 'FORGET'] +4446-2271-0020-1153: ref=['OF', 'COURSE', 'HE', 'REFLECTED', 'SHE', 'ALWAYS', 'HAD', 'THAT', 'COMBINATION', 'OF', 'SOMETHING', 'HOMELY', 'AND', 'SENSIBLE', 'AND', 'SOMETHING', 'UTTERLY', 'WILD', 'AND', 'DAFT'] +4446-2271-0020-1153: hyp=['OF', 'COURSE', 'HE', 'REFLECTED', 'SHE', 'ALWAYS', 'HAD', 'THAT', 'COMBINATION', 'OF', 'SOMETHING', 'HOMELY', 'AND', 'SENSIBLE', 'AND', 'SOMETHING', 'UTTERLY', 'WILD', 'AND', 'DAFT'] +4446-2271-0021-1154: ref=['SHE', 'MUST', 'CARE', 'ABOUT', 'THE', 'THEATRE', 'A', 'GREAT', 'DEAL', 'MORE', 'THAN', 'SHE', 'USED', 'TO'] +4446-2271-0021-1154: hyp=['SHE', 'MUST', 'CARE', 'ABOUT', 'THE', 'THEATRE', 'A', 'GREAT', 'DEAL', 'MORE', 'THAN', 'SHE', 'USED', 'TO'] +4446-2271-0022-1155: ref=["I'M", 'GLAD', "SHE'S", 'HELD', 'HER', 'OWN', 'SINCE'] +4446-2271-0022-1155: hyp=["I'M", 'GLAD', "SHE'S", 'HELD', 'HER', 'OWN', 'SINCE'] +4446-2271-0023-1156: ref=['AFTER', 'ALL', 'WE', 'WERE', 'AWFULLY', 'YOUNG'] +4446-2271-0023-1156: hyp=['AFTER', 'ALL', 'WE', 'WERE', 'AWFULLY', 'YOUNG'] +4446-2271-0024-1157: ref=['I', "SHOULDN'T", 'WONDER', 'IF', 'SHE', 'COULD', 'LAUGH', 'ABOUT', 'IT', 'WITH', 'ME', 'NOW'] +4446-2271-0024-1157: hyp=['I', "SHOULDN'T", 'WONDER', 'IF', 'SHE', 'COULD', 'LAUGH', 'ABOUT', 'IT', 'WITH', 'ME', 'NOW'] +4446-2273-0000-1158: ref=['HILDA', 'WAS', 'VERY', 'NICE', 'TO', 'HIM', 'AND', 'HE', 'SAT', 'ON', 'THE', 'EDGE', 'OF', 'HIS', 'CHAIR', 'FLUSHED', 'WITH', 'HIS', 'CONVERSATIONAL', 'EFFORTS', 'AND', 'MOVING', 'HIS', 'CHIN', 'ABOUT', 'NERVOUSLY', 'OVER', 'HIS', 'HIGH', 'COLLAR'] +4446-2273-0000-1158: hyp=['HILDA', 'WAS', 'VERY', 'NICE', 'TO', 'HIM', 'AND', 'HE', 'SAT', 'ON', 'THE', 'EDGE', 'OF', 'HIS', 'CHAIR', 'FLUSHED', 'WITH', 'HIS', 'CONVERSATIONAL', 'EFFORTS', 'AND', 'MOVING', 'HIS', 'CHIN', 'ABOUT', 'NERVOUSLY', 'OVER', 'HIS', 'HIGH', 'COLLAR'] +4446-2273-0001-1159: ref=['THEY', 'ASKED', 'HIM', 'TO', 'COME', 'TO', 'SEE', 'THEM', 'IN', 'CHELSEA', 'AND', 'THEY', 'SPOKE', 'VERY', 'TENDERLY', 'OF', 'HILDA'] +4446-2273-0001-1159: hyp=['THEY', 'ASKED', 'HIM', 'TO', 'COME', 'TO', 'SEE', 'THEM', 'IN', 'CHELSEA', 'AND', 'THEY', 'SPOKE', 'VERY', 'TENDERLY', 'OF', 'HILDA'] +4446-2273-0002-1160: ref=['LAMB', "WOULDN'T", 'CARE', 'A', 'GREAT', 'DEAL', 'ABOUT', 'MANY', 'OF', 'THEM', 'I', 'FANCY'] +4446-2273-0002-1160: hyp=['LAMB', "WOULDN'T", 'CARE', 'A', 'GREAT', 'DEAL', 'ABOUT', 'MANY', 'OF', 'THEM', 'I', 'FANCY'] +4446-2273-0003-1161: ref=['WHEN', 'BARTLEY', 'ARRIVED', 'AT', 'BEDFORD', 'SQUARE', 'ON', 'SUNDAY', 'EVENING', 'MARIE', 'THE', 'PRETTY', 'LITTLE', 'FRENCH', 'GIRL', 'MET', 'HIM', 'AT', 'THE', 'DOOR', 'AND', 'CONDUCTED', 'HIM', 'UPSTAIRS'] +4446-2273-0003-1161: hyp=['WHEN', 'BARTLEY', 'ARRIVED', 'AT', 'BEDFORD', 'SQUARE', 'ON', 'SUNDAY', 'EVENING', 'MARIE', 'THE', 'PRETTY', 'LITTLE', 'FRENCH', 'GIRL', 'MET', 'HIM', 'AT', 'THE', 'DOOR', 'AND', 'CONDUCTED', 'HIM', 'UPSTAIRS'] +4446-2273-0004-1162: ref=['I', 'SHOULD', 'NEVER', 'HAVE', 'ASKED', 'YOU', 'IF', 'MOLLY', 'HAD', 'BEEN', 'HERE', 'FOR', 'I', 'REMEMBER', 'YOU', "DON'T", 'LIKE', 'ENGLISH', 'COOKERY'] +4446-2273-0004-1162: hyp=['I', 'SHOULD', 'NEVER', 'HAVE', 'ASKED', 'YOU', 'IF', 'MOLLY', 'HAD', 'BEEN', 'HERE', 'FOR', 'I', 'REMEMBER', 'YOU', "DON'T", 'LIKE', 'ENGLISH', 'COOKERY'] +4446-2273-0005-1163: ref=['I', "HAVEN'T", 'HAD', 'A', 'CHANCE', 'YET', 'TO', 'TELL', 'YOU', 'WHAT', 'A', 'JOLLY', 'LITTLE', 'PLACE', 'I', 'THINK', 'THIS', 'IS'] +4446-2273-0005-1163: hyp=['I', "HAVEN'T", 'HAD', 'A', 'CHANCE', 'YET', 'TO', 'TELL', 'YOU', 'WHAT', 'A', 'JOLLY', 'LITTLE', 'PLACE', 'I', 'THINK', 'THIS', 'IS'] +4446-2273-0006-1164: ref=['THEY', 'ARE', 'ALL', 'SKETCHES', 'MADE', 'ABOUT', 'THE', 'VILLA', "D'ESTE", 'YOU', 'SEE'] +4446-2273-0006-1164: hyp=['THEY', 'ARE', 'ALL', 'SKETCHES', 'MADE', 'ABOUT', 'THE', 'VILLA', 'DESTA', 'YOU', 'SEE'] +4446-2273-0007-1165: ref=['THOSE', 'FELLOWS', 'ARE', 'ALL', 'VERY', 'LOYAL', 'EVEN', 'MAINHALL'] +4446-2273-0007-1165: hyp=['THOSE', 'FELLOWS', 'ARE', 'ALL', 'VERY', 'LOYAL', 'EVEN', 'MAIN', 'HALL'] +4446-2273-0008-1166: ref=["I'VE", 'MANAGED', 'TO', 'SAVE', 'SOMETHING', 'EVERY', 'YEAR', 'AND', 'THAT', 'WITH', 'HELPING', 'MY', 'THREE', 'SISTERS', 'NOW', 'AND', 'THEN', 'AND', 'TIDING', 'POOR', 'COUSIN', 'MIKE', 'OVER', 'BAD', 'SEASONS'] +4446-2273-0008-1166: hyp=["I'VE", 'MANAGED', 'TO', 'SAVE', 'SOMETHING', 'EVERY', 'YEAR', 'AND', 'THAT', 'WITH', 'HELPING', 'MY', 'THREE', 'SISTERS', 'NOW', 'AND', 'THEN', 'AND', 'TIDING', 'POOR', 'COUSIN', 'MICHAEL', 'OVER', 'BAD', 'SEASONS'] +4446-2273-0009-1167: ref=["IT'S", 'NOT', 'PARTICULARLY', 'RARE', 'SHE', 'SAID', 'BUT', 'SOME', 'OF', 'IT', 'WAS', 'MY', "MOTHER'S"] +4446-2273-0009-1167: hyp=["IT'S", 'NOT', 'PARTICULARLY', 'RARE', 'SHE', 'SAID', 'BUT', 'SOME', 'OF', 'IT', 'WAS', 'MY', "MOTHER'S"] +4446-2273-0010-1168: ref=['THERE', 'WAS', 'WATERCRESS', 'SOUP', 'AND', 'SOLE', 'AND', 'A', 'DELIGHTFUL', 'OMELETTE', 'STUFFED', 'WITH', 'MUSHROOMS', 'AND', 'TRUFFLES', 'AND', 'TWO', 'SMALL', 'RARE', 'DUCKLINGS', 'AND', 'ARTICHOKES', 'AND', 'A', 'DRY', 'YELLOW', 'RHONE', 'WINE', 'OF', 'WHICH', 'BARTLEY', 'HAD', 'ALWAYS', 'BEEN', 'VERY', 'FOND'] +4446-2273-0010-1168: hyp=['THERE', 'WAS', 'WATERCRESS', 'SOUP', 'AND', 'SOLE', 'AND', 'A', 'DELIGHTFUL', 'OMELET', 'STUFFED', 'WITH', 'MUSHROOMS', 'AND', 'TRUFFLES', 'AND', 'TWO', 'SMALL', 'RARE', 'DUCKLINGS', 'AND', 'ARTICHOKES', 'AND', 'A', 'DRY', 'YELLOW', 'ROAN', 'WINE', 'OF', 'WHICH', 'BARTLEY', 'HAD', 'ALWAYS', 'BEEN', 'VERY', 'FOND'] +4446-2273-0011-1169: ref=['THERE', 'IS', 'NOTHING', 'ELSE', 'THAT', 'LOOKS', 'SO', 'JOLLY'] +4446-2273-0011-1169: hyp=['THERE', 'IS', 'NOTHING', 'ELSE', 'THAT', 'LOOKS', 'SO', 'JOLLY'] +4446-2273-0012-1170: ref=['THANK', 'YOU', 'BUT', 'I', "DON'T", 'LIKE', 'IT', 'SO', 'WELL', 'AS', 'THIS'] +4446-2273-0012-1170: hyp=['THANK', 'YOU', 'BUT', 'I', "DON'T", 'LIKE', 'IT', 'SO', 'WELL', 'AS', 'THIS'] +4446-2273-0013-1171: ref=['HAVE', 'YOU', 'BEEN', 'IN', 'PARIS', 'MUCH', 'THESE', 'LATE', 'YEARS'] +4446-2273-0013-1171: hyp=['HAVE', 'YOU', 'BEEN', 'IN', 'PARIS', 'MUCH', 'THESE', 'LATE', 'YEARS'] +4446-2273-0014-1172: ref=['THERE', 'ARE', 'FEW', 'CHANGES', 'IN', 'THE', 'OLD', 'QUARTER'] +4446-2273-0014-1172: hyp=['THERE', 'ARE', 'A', 'FEW', 'CHANGES', 'IN', 'THE', 'OLD', 'QUARTER'] +4446-2273-0015-1173: ref=["DON'T", 'I', 'THOUGH', "I'M", 'SO', 'SORRY', 'TO', 'HEAR', 'IT', 'HOW', 'DID', 'HER', 'SON', 'TURN', 'OUT'] +4446-2273-0015-1173: hyp=["DON'T", 'I', 'THOUGH', "I'M", 'SO', 'SORRY', 'TO', 'HEAR', 'IT', 'HOW', 'DID', 'HER', 'SON', 'TURN', 'OUT'] +4446-2273-0016-1174: ref=['HER', 'HAIR', 'IS', 'STILL', 'LIKE', 'FLAX', 'AND', 'HER', 'BLUE', 'EYES', 'ARE', 'JUST', 'LIKE', 'A', "BABY'S", 'AND', 'SHE', 'HAS', 'THE', 'SAME', 'THREE', 'FRECKLES', 'ON', 'HER', 'LITTLE', 'NOSE', 'AND', 'TALKS', 'ABOUT', 'GOING', 'BACK', 'TO', 'HER', 'BAINS', 'DE', 'MER'] +4446-2273-0016-1174: hyp=['HER', 'HAIR', 'IS', 'STILL', 'LIKE', 'FLAX', 'AND', 'HER', 'BLUE', 'EYES', 'ARE', 'JUST', 'LIKE', 'A', "BABY'S", 'AND', 'SHE', 'HAS', 'THE', 'SAME', 'THREE', 'FRECKLES', 'ON', 'HER', 'LITTLE', 'NOSE', 'AND', 'TALKS', 'ABOUT', 'GOING', 'BACK', 'TO', 'HER', 'BANDOMERE'] +4446-2273-0017-1175: ref=['HOW', 'JOLLY', 'IT', 'WAS', 'BEING', 'YOUNG', 'HILDA'] +4446-2273-0017-1175: hyp=['HOW', 'JOLLY', 'IT', 'WAS', 'BEING', 'YOUNG', 'HILDA'] +4446-2273-0018-1176: ref=['DO', 'YOU', 'REMEMBER', 'THAT', 'FIRST', 'WALK', 'WE', 'TOOK', 'TOGETHER', 'IN', 'PARIS'] +4446-2273-0018-1176: hyp=['DO', 'YOU', 'REMEMBER', 'THAT', 'FIRST', 'WALK', 'WE', 'TOOK', 'TOGETHER', 'IN', 'PARIS'] +4446-2273-0019-1177: ref=['COME', "WE'LL", 'HAVE', 'OUR', 'COFFEE', 'IN', 'THE', 'OTHER', 'ROOM', 'AND', 'YOU', 'CAN', 'SMOKE'] +4446-2273-0019-1177: hyp=['COME', "WE'LL", 'HAVE', 'OUR', 'COFFEE', 'IN', 'THE', 'OTHER', 'ROOM', 'AND', 'YOU', 'CAN', 'SMOKE'] +4446-2273-0020-1178: ref=['I', 'THINK', 'WE', 'DID', 'SHE', 'ANSWERED', 'DEMURELY'] +4446-2273-0020-1178: hyp=['I', 'THINK', 'WE', 'DID', 'SHE', 'ANSWERED', 'DEMURELY'] +4446-2273-0021-1179: ref=['WHAT', 'SHE', 'WANTED', 'FROM', 'US', 'WAS', 'NEITHER', 'OUR', 'FLOWERS', 'NOR', 'OUR', 'FRANCS', 'BUT', 'JUST', 'OUR', 'YOUTH'] +4446-2273-0021-1179: hyp=['WHAT', 'SHE', 'WANTED', 'FROM', 'US', 'WAS', 'NEITHER', 'OUR', 'FLOWERS', 'NOR', 'OUR', 'FRANKS', 'BUT', 'JUST', 'OUR', 'YOUTH'] +4446-2273-0022-1180: ref=['THEY', 'WERE', 'BOTH', 'REMEMBERING', 'WHAT', 'THE', 'WOMAN', 'HAD', 'SAID', 'WHEN', 'SHE', 'TOOK', 'THE', 'MONEY', 'GOD', 'GIVE', 'YOU', 'A', 'HAPPY', 'LOVE'] +4446-2273-0022-1180: hyp=['THEY', 'WERE', 'BOTH', 'REMEMBERING', 'WHAT', 'THE', 'WOMAN', 'HAD', 'SAID', 'WHEN', 'SHE', 'TOOK', 'THE', 'MONEY', 'GOD', 'GIVE', 'YOU', 'A', 'HAPPY', 'LOVE'] +4446-2273-0023-1181: ref=['THE', 'STRANGE', 'WOMAN', 'AND', 'HER', 'PASSIONATE', 'SENTENCE', 'THAT', 'RANG', 'OUT', 'SO', 'SHARPLY', 'HAD', 'FRIGHTENED', 'THEM', 'BOTH'] +4446-2273-0023-1181: hyp=['THE', 'STRANGE', 'WOMAN', 'AND', 'HER', 'PASSIONATE', 'SENTENCE', 'THAT', 'RANG', 'OUT', 'SO', 'SHARPLY', 'HAD', 'FRIGHTENED', 'THEM', 'BOTH'] +4446-2273-0024-1182: ref=['BARTLEY', 'STARTED', 'WHEN', 'HILDA', 'RANG', 'THE', 'LITTLE', 'BELL', 'BESIDE', 'HER', 'DEAR', 'ME', 'WHY', 'DID', 'YOU', 'DO', 'THAT'] +4446-2273-0024-1182: hyp=['BARTLEY', 'STARTED', 'WHEN', 'HILDA', 'RANG', 'THE', 'LITTLE', 'BELL', 'BESIDE', 'HER', 'DEAR', 'ME', 'WHY', 'DID', 'YOU', 'DO', 'THAT'] +4446-2273-0025-1183: ref=['IT', 'WAS', 'VERY', 'JOLLY', 'HE', 'MURMURED', 'LAZILY', 'AS', 'MARIE', 'CAME', 'IN', 'TO', 'TAKE', 'AWAY', 'THE', 'COFFEE'] +4446-2273-0025-1183: hyp=['IT', 'WAS', 'VERY', 'JOLLY', 'HE', 'MURMURED', 'LAZILY', 'AS', 'MARIE', 'CAME', 'IN', 'TO', 'TAKE', 'AWAY', 'THE', 'COFFEE'] +4446-2273-0026-1184: ref=['HAVE', 'I', 'TOLD', 'YOU', 'ABOUT', 'MY', 'NEW', 'PLAY'] +4446-2273-0026-1184: hyp=['HAVE', 'I', 'TOLD', 'YOU', 'ABOUT', 'MY', 'NEW', 'PLAY'] +4446-2273-0027-1185: ref=['WHEN', 'SHE', 'FINISHED', 'ALEXANDER', 'SHOOK', 'HIMSELF', 'OUT', 'OF', 'A', 'REVERIE'] +4446-2273-0027-1185: hyp=['WHEN', 'SHE', 'FINISHED', 'ALEXANDER', 'SHOOK', 'HIMSELF', 'OUT', 'OF', 'A', 'REVERIE'] +4446-2273-0028-1186: ref=['NONSENSE', 'OF', 'COURSE', 'I', "CAN'T", 'REALLY', 'SING', 'EXCEPT', 'THE', 'WAY', 'MY', 'MOTHER', 'AND', 'GRANDMOTHER', 'DID', 'BEFORE', 'ME'] +4446-2273-0028-1186: hyp=['NONSENSE', 'OF', 'COURSE', 'I', "CAN'T", 'REALLY', 'SING', 'EXCEPT', 'THE', 'WAY', 'MY', 'MOTHER', 'AND', 'GRANDMOTHER', 'DID', 'BEFORE', 'ME'] +4446-2273-0029-1187: ref=["IT'S", 'REALLY', 'TOO', 'WARM', 'IN', 'THIS', 'ROOM', 'TO', 'SING', "DON'T", 'YOU', 'FEEL', 'IT'] +4446-2273-0029-1187: hyp=["IT'S", 'REALLY', 'TOO', 'WARM', 'IN', 'THIS', 'ROOM', 'TO', 'SING', "DON'T", 'YOU', 'FEEL', 'IT'] +4446-2273-0030-1188: ref=['ALEXANDER', 'WENT', 'OVER', 'AND', 'OPENED', 'THE', 'WINDOW', 'FOR', 'HER'] +4446-2273-0030-1188: hyp=['ALEXANDER', 'WENT', 'OVER', 'AND', 'OPENED', 'THE', 'WINDOW', 'FOR', 'HER'] +4446-2273-0031-1189: ref=['THERE', 'JUST', 'IN', 'FRONT'] +4446-2273-0031-1189: hyp=['THERE', 'JUST', 'IN', 'FRONT'] +4446-2273-0032-1190: ref=['HE', 'STOOD', 'A', 'LITTLE', 'BEHIND', 'HER', 'AND', 'TRIED', 'TO', 'STEADY', 'HIMSELF', 'AS', 'HE', 'SAID', "IT'S", 'SOFT', 'AND', 'MISTY', 'SEE', 'HOW', 'WHITE', 'THE', 'STARS', 'ARE'] +4446-2273-0032-1190: hyp=['HE', 'STOOD', 'A', 'LITTLE', 'BEHIND', 'HER', 'AND', 'TRIED', 'TO', 'STEADY', 'HIMSELF', 'AS', 'HE', 'SAID', "IT'S", 'SOFT', 'AND', 'MISTY', 'SEE', 'HOW', 'WHITE', 'THE', 'STARS', 'ARE'] +4446-2273-0033-1191: ref=['FOR', 'A', 'LONG', 'TIME', 'NEITHER', 'HILDA', 'NOR', 'BARTLEY', 'SPOKE'] +4446-2273-0033-1191: hyp=['FOR', 'A', 'LONG', 'TIME', 'NEITHER', 'HILDA', 'NOR', 'BARTLEY', 'SPOKE'] +4446-2273-0034-1192: ref=['HE', 'FELT', 'A', 'TREMOR', 'RUN', 'THROUGH', 'THE', 'SLENDER', 'YELLOW', 'FIGURE', 'IN', 'FRONT', 'OF', 'HIM'] +4446-2273-0034-1192: hyp=['HE', 'FELT', 'A', 'TREMOR', 'RUN', 'THROUGH', 'THE', 'SLENDER', 'YELLOW', 'FIGURE', 'IN', 'FRONT', 'OF', 'HIM'] +4446-2273-0035-1193: ref=['BARTLEY', 'LEANED', 'OVER', 'HER', 'SHOULDER', 'WITHOUT', 'TOUCHING', 'HER', 'AND', 'WHISPERED', 'IN', 'HER', 'EAR', 'YOU', 'ARE', 'GIVING', 'ME', 'A', 'CHANCE', 'YES'] +4446-2273-0035-1193: hyp=['BARTLEY', 'LEANED', 'OVER', 'HER', 'SHOULDER', 'WITHOUT', 'TOUCHING', 'HER', 'AND', 'WHISPERED', 'IN', 'HER', 'EAR', 'YOU', 'ARE', 'GIVING', 'ME', 'A', 'CHANCE', 'YES'] +4446-2273-0036-1194: ref=['ALEXANDER', 'UNCLENCHED', 'THE', 'TWO', 'HANDS', 'AT', 'HIS', 'SIDES'] +4446-2273-0036-1194: hyp=['ALEXANDER', 'CLENCHED', 'THE', 'TWO', 'HANDS', 'AT', 'HIS', 'SIDES'] +4446-2275-0000-1195: ref=['THE', 'STOP', 'AT', 'QUEENSTOWN', 'THE', 'TEDIOUS', 'PASSAGE', 'UP', 'THE', 'MERSEY', 'WERE', 'THINGS', 'THAT', 'HE', 'NOTED', 'DIMLY', 'THROUGH', 'HIS', 'GROWING', 'IMPATIENCE'] +4446-2275-0000-1195: hyp=['THE', 'STOP', 'AT', 'QUEENSTOWN', 'THE', 'TEDIOUS', 'PASSAGE', 'OF', 'THE', 'MERCY', 'WERE', 'THINGS', 'THAT', 'HE', 'NOTED', 'DIMLY', 'THROUGH', 'HIS', 'GROWING', 'IMPATIENCE'] +4446-2275-0001-1196: ref=['SHE', 'BLUSHED', 'AND', 'SMILED', 'AND', 'FUMBLED', 'HIS', 'CARD', 'IN', 'HER', 'CONFUSION', 'BEFORE', 'SHE', 'RAN', 'UPSTAIRS'] +4446-2275-0001-1196: hyp=['SHE', 'BLUSHED', 'AND', 'SMILED', 'AND', 'FUMBLED', 'HIS', 'CARD', 'IN', 'HER', 'CONFUSION', 'BEFORE', 'SHE', 'RAN', 'UPSTAIRS'] +4446-2275-0002-1197: ref=['ALEXANDER', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'HALLWAY', 'BUTTONING', 'AND', 'UNBUTTONING', 'HIS', 'OVERCOAT', 'UNTIL', 'SHE', 'RETURNED', 'AND', 'TOOK', 'HIM', 'UP', 'TO', "HILDA'S", 'LIVING', 'ROOM'] +4446-2275-0002-1197: hyp=['ALEXANDER', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'HALLWAY', 'BUTTONING', 'AND', 'UNBUTTONING', 'HIS', 'OVERCOAT', 'UNTIL', 'SHE', 'RETURNED', 'AND', 'TOOK', 'HIM', 'UP', 'TO', "HILDA'S", 'LIVING', 'ROOM'] +4446-2275-0003-1198: ref=['THE', 'ROOM', 'WAS', 'EMPTY', 'WHEN', 'HE', 'ENTERED'] +4446-2275-0003-1198: hyp=['THE', 'ROOM', 'WAS', 'EMPTY', 'WHEN', 'HE', 'ENTERED'] +4446-2275-0004-1199: ref=['ALEXANDER', 'DID', 'NOT', 'SIT', 'DOWN'] +4446-2275-0004-1199: hyp=['ALEXANDER', 'DID', 'NOT', 'SIT', 'DOWN'] +4446-2275-0005-1200: ref=['I', 'FELT', 'IT', 'IN', 'MY', 'BONES', 'WHEN', 'I', 'WOKE', 'THIS', 'MORNING', 'THAT', 'SOMETHING', 'SPLENDID', 'WAS', 'GOING', 'TO', 'TURN', 'UP'] +4446-2275-0005-1200: hyp=['I', 'FELT', 'IT', 'IN', 'MY', 'BONES', 'WHEN', 'I', 'WOKE', 'THIS', 'MORNING', 'THAT', 'SOMETHING', 'SPLENDID', 'WAS', 'GOING', 'TO', 'TURN', 'UP'] +4446-2275-0006-1201: ref=['I', 'THOUGHT', 'IT', 'MIGHT', 'BE', 'SISTER', 'KATE', 'OR', 'COUSIN', 'MIKE', 'WOULD', 'BE', 'HAPPENING', 'ALONG'] +4446-2275-0006-1201: hyp=['I', 'THOUGHT', 'IT', 'MIGHT', 'BE', 'SISTER', 'KATE', 'OR', 'COUSIN', 'MIKE', 'WOULD', 'BE', 'HAPPENING', 'ALONG'] +4446-2275-0007-1202: ref=['SHE', 'PUSHED', 'HIM', 'TOWARD', 'THE', 'BIG', 'CHAIR', 'BY', 'THE', 'FIRE', 'AND', 'SAT', 'DOWN', 'ON', 'A', 'STOOL', 'AT', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'HEARTH', 'HER', 'KNEES', 'DRAWN', 'UP', 'TO', 'HER', 'CHIN', 'LAUGHING', 'LIKE', 'A', 'HAPPY', 'LITTLE', 'GIRL'] +4446-2275-0007-1202: hyp=['SHE', 'PUSHED', 'HIM', 'TOWARD', 'THE', 'BIG', 'CHAIR', 'BY', 'THE', 'FIRE', 'AND', 'SAT', 'DOWN', 'ON', 'A', 'STOOL', 'AT', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'HEARTH', 'HER', 'KNEES', 'DRAWN', 'UP', 'TO', 'HER', 'CHIN', 'LAUGHING', 'LIKE', 'A', 'HAPPY', 'LITTLE', 'GIRL'] +4446-2275-0008-1203: ref=['WHEN', 'DID', 'YOU', 'COME', 'BARTLEY', 'AND', 'HOW', 'DID', 'IT', 'HAPPEN', 'YOU', "HAVEN'T", 'SPOKEN', 'A', 'WORD'] +4446-2275-0008-1203: hyp=['WHEN', 'DID', 'YOU', 'COME', 'BARTLEY', 'AND', 'HOW', 'DID', 'IT', 'HAPPEN', 'YOU', "HAVEN'T", 'SPOKEN', 'A', 'WORD'] +4446-2275-0009-1204: ref=['I', 'GOT', 'IN', 'ABOUT', 'TEN', 'MINUTES', 'AGO'] +4446-2275-0009-1204: hyp=['I', 'GOT', 'IN', 'ABOUT', 'TEN', 'MINUTES', 'AGO'] +4446-2275-0010-1205: ref=['ALEXANDER', 'LEANED', 'FORWARD', 'AND', 'WARMED', 'HIS', 'HANDS', 'BEFORE', 'THE', 'BLAZE'] +4446-2275-0010-1205: hyp=['ALEXANDER', 'LEANED', 'FORWARD', 'AND', 'WARMED', 'HIS', 'HANDS', 'BEFORE', 'THE', 'BLAZE'] +4446-2275-0011-1206: ref=['BARTLEY', 'BENT', 'LOWER', 'OVER', 'THE', 'FIRE'] +4446-2275-0011-1206: hyp=['BARTLEY', 'BENT', 'LOWERED', 'OVER', 'THE', 'FIRE'] +4446-2275-0012-1207: ref=['SHE', 'LOOKED', 'AT', 'HIS', 'HEAVY', 'SHOULDERS', 'AND', 'BIG', 'DETERMINED', 'HEAD', 'THRUST', 'FORWARD', 'LIKE', 'A', 'CATAPULT', 'IN', 'LEASH'] +4446-2275-0012-1207: hyp=['SHE', 'LOOKED', 'AT', 'HIS', 'HEAVY', 'SHOULDERS', 'IN', 'BIG', 'DETERMINED', 'HEAD', 'THRUST', 'FORWARD', 'LIKE', 'A', 'CATAPULT', 'IN', 'LEASH'] +4446-2275-0013-1208: ref=["I'LL", 'DO', 'ANYTHING', 'YOU', 'WISH', 'ME', 'TO', 'BARTLEY', 'SHE', 'SAID', 'TREMULOUSLY'] +4446-2275-0013-1208: hyp=["I'LL", 'DO', 'ANYTHING', 'YOU', 'WISH', 'ME', 'TO', 'BARTLEY', 'SHE', 'SAID', 'TREMULOUSLY'] +4446-2275-0014-1209: ref=['I', "CAN'T", 'STAND', 'SEEING', 'YOU', 'MISERABLE'] +4446-2275-0014-1209: hyp=['I', "CAN'T", 'STAND', 'SEEING', 'YOU', 'MISERABLE'] +4446-2275-0015-1210: ref=['HE', 'PULLED', 'UP', 'A', 'WINDOW', 'AS', 'IF', 'THE', 'AIR', 'WERE', 'HEAVY'] +4446-2275-0015-1210: hyp=['HE', 'PULLED', 'UP', 'A', 'WINDOW', 'AS', 'IF', 'THE', 'AIR', 'WERE', 'HEAVY'] +4446-2275-0016-1211: ref=['HILDA', 'WATCHED', 'HIM', 'FROM', 'HER', 'CORNER', 'TREMBLING', 'AND', 'SCARCELY', 'BREATHING', 'DARK', 'SHADOWS', 'GROWING', 'ABOUT', 'HER', 'EYES', 'IT'] +4446-2275-0016-1211: hyp=['HILDA', 'WATCHED', 'HIM', 'FROM', 'THE', 'CORNER', 'TREMBLING', 'AND', 'SCARCELY', 'BREATHING', 'DARK', 'SHADOWS', 'GROWING', 'ABOUT', 'HER', 'EYES'] +4446-2275-0017-1212: ref=['BUT', "IT'S", 'WORSE', 'NOW', "IT'S", 'UNBEARABLE'] +4446-2275-0017-1212: hyp=['BUT', "IT'S", 'WORSE', 'NOW', "IT'S", 'UNBEARABLE'] +4446-2275-0018-1213: ref=['I', 'GET', 'NOTHING', 'BUT', 'MISERY', 'OUT', 'OF', 'EITHER'] +4446-2275-0018-1213: hyp=['I', 'GET', 'NOTHING', 'BUT', 'MISERY', 'OUT', 'OF', 'EITHER'] +4446-2275-0019-1214: ref=['THE', 'WORLD', 'IS', 'ALL', 'THERE', 'JUST', 'AS', 'IT', 'USED', 'TO', 'BE', 'BUT', 'I', "CAN'T", 'GET', 'AT', 'IT', 'ANY', 'MORE'] +4446-2275-0019-1214: hyp=['THE', 'WORLD', 'IS', 'ALL', 'THERE', 'JUST', 'AS', 'IT', 'USED', 'TO', 'BE', 'BUT', 'I', "CAN'T", 'GET', 'AT', 'IT', 'ANY', 'MORE'] +4446-2275-0020-1215: ref=['IT', 'WAS', 'MYSELF', 'I', 'WAS', 'DEFYING', 'HILDA'] +4446-2275-0020-1215: hyp=['IT', 'WAS', 'MYSELF', 'I', 'WAS', 'DEFYING', 'HELDA'] +4446-2275-0021-1216: ref=["HILDA'S", 'FACE', 'QUIVERED', 'BUT', 'SHE', 'WHISPERED', 'YES', 'I', 'THINK', 'IT', 'MUST', 'HAVE', 'BEEN'] +4446-2275-0021-1216: hyp=["HELDA'S", 'FACE', 'QUIVERED', 'BUT', 'SHE', 'WHISPERED', 'YES', 'I', 'THINK', 'IT', 'MUST', 'HAVE', 'BEEN'] +4446-2275-0022-1217: ref=['BUT', 'WHY', "DIDN'T", 'YOU', 'TELL', 'ME', 'WHEN', 'YOU', 'WERE', 'HERE', 'IN', 'THE', 'SUMMER'] +4446-2275-0022-1217: hyp=['BUT', 'WHY', "DIDN'T", 'YOU', 'TELL', 'ME', 'WHEN', 'YOU', 'WERE', 'HERE', 'IN', 'THE', 'SUMMER'] +4446-2275-0023-1218: ref=['ALEXANDER', 'GROANED', 'I', 'MEANT', 'TO', 'BUT', 'SOMEHOW', 'I', "COULDN'T"] +4446-2275-0023-1218: hyp=['ALEXANDER', 'GROANED', 'I', 'MEANT', 'TO', 'BUT', 'SOMEHOW', 'I', "COULDN'T"] +4446-2275-0024-1219: ref=['SHE', 'PRESSED', 'HIS', 'HAND', 'GENTLY', 'IN', 'GRATITUDE'] +4446-2275-0024-1219: hyp=['SHE', 'PRESSED', 'HIS', 'HAND', 'GENTLY', 'IN', 'GRATITUDE'] +4446-2275-0025-1220: ref=["WEREN'T", 'YOU', 'HAPPY', 'THEN', 'AT', 'ALL'] +4446-2275-0025-1220: hyp=["WEREN'T", 'YOU', 'HAPPY', 'THEN', 'AT', 'ALL'] +4446-2275-0026-1221: ref=['SHE', 'CLOSED', 'HER', 'EYES', 'AND', 'TOOK', 'A', 'DEEP', 'BREATH', 'AS', 'IF', 'TO', 'DRAW', 'IN', 'AGAIN', 'THE', 'FRAGRANCE', 'OF', 'THOSE', 'DAYS'] +4446-2275-0026-1221: hyp=['SHE', 'CLOSED', 'HER', 'EYES', 'AND', 'TOOK', 'A', 'DEEP', 'BREATH', 'AS', 'IF', 'TO', 'DRAW', 'IN', 'AGAIN', 'THE', 'FRAGRANCE', 'OF', 'THOSE', 'DAYS'] +4446-2275-0027-1222: ref=['HE', 'MOVED', 'UNEASILY', 'AND', 'HIS', 'CHAIR', 'CREAKED'] +4446-2275-0027-1222: hyp=['HE', 'MOVED', 'UNEASILY', 'AND', 'HIS', 'CHAIR', 'CREAKED'] +4446-2275-0028-1223: ref=['YES', 'YES', 'SHE', 'HURRIED', 'PULLING', 'HER', 'HAND', 'GENTLY', 'AWAY', 'FROM', 'HIM'] +4446-2275-0028-1223: hyp=['YES', 'YES', 'SHE', 'HURRIED', 'PULLING', 'HER', 'HAND', 'GENTLY', 'AWAY', 'FROM', 'HIM'] +4446-2275-0029-1224: ref=['PLEASE', 'TELL', 'ME', 'ONE', 'THING', 'BARTLEY', 'AT', 'LEAST', 'TELL', 'ME', 'THAT', 'YOU', 'BELIEVE', 'I', 'THOUGHT', 'I', 'WAS', 'MAKING', 'YOU', 'HAPPY'] +4446-2275-0029-1224: hyp=['PLEASE', 'TELL', 'ME', 'ONE', 'THING', 'BARTLEY', 'AT', 'LEAST', 'TELL', 'ME', 'THAT', 'YOU', 'BELIEVE', 'I', 'THOUGHT', 'I', 'WAS', 'MAKING', 'YOU', 'HAPPY'] +4446-2275-0030-1225: ref=['YES', 'HILDA', 'I', 'KNOW', 'THAT', 'HE', 'SAID', 'SIMPLY'] +4446-2275-0030-1225: hyp=['YES', 'HELDA', 'I', 'KNOW', 'THAT', 'HE', 'SAID', 'SIMPLY'] +4446-2275-0031-1226: ref=['I', 'UNDERSTAND', 'BARTLEY', 'I', 'WAS', 'WRONG'] +4446-2275-0031-1226: hyp=['I', 'UNDERSTAND', 'BARTLEY', 'I', 'WAS', 'WRONG'] +4446-2275-0032-1227: ref=['BUT', 'I', "DIDN'T", 'KNOW', "YOU'VE", 'ONLY', 'TO', 'TELL', 'ME', 'NOW'] +4446-2275-0032-1227: hyp=['BUT', 'I', "DIDN'T", 'KNOW', "YOU'VE", 'ONLY', 'TO', 'TELL', 'ME', 'NOW'] +4446-2275-0033-1228: ref=['WHAT', 'I', 'MEAN', 'IS', 'THAT', 'I', 'WANT', 'YOU', 'TO', 'PROMISE', 'NEVER', 'TO', 'SEE', 'ME', 'AGAIN', 'NO', 'MATTER', 'HOW', 'OFTEN', 'I', 'COME', 'NO', 'MATTER', 'HOW', 'HARD', 'I', 'BEG'] +4446-2275-0033-1228: hyp=['WHAT', 'I', 'MEAN', 'IS', 'THAT', 'I', 'WANT', 'YOU', 'TO', 'PROMISE', 'NEVER', 'TO', 'SEE', 'ME', 'AGAIN', 'NO', 'MATTER', 'HOW', 'OFTEN', 'I', 'COME', 'NO', 'MATTER', 'HOW', 'HARD', 'I', 'BEG'] +4446-2275-0034-1229: ref=['KEEP', 'AWAY', 'IF', 'YOU', 'WISH', 'WHEN', 'HAVE', 'I', 'EVER', 'FOLLOWED', 'YOU'] +4446-2275-0034-1229: hyp=['KEEP', 'AWAY', 'IF', 'YOU', 'WISH', 'WHEN', 'HAVE', 'I', 'EVER', 'FOLLOWED', 'YOU'] +4446-2275-0035-1230: ref=['ALEXANDER', 'ROSE', 'AND', 'SHOOK', 'HIMSELF', 'ANGRILY', 'YES', 'I', 'KNOW', "I'M", 'COWARDLY'] +4446-2275-0035-1230: hyp=['ALEXANDER', 'ROSE', 'AND', 'SHOOK', 'HIMSELF', 'ANGRILY', 'YES', 'I', 'KNOW', "I'M", 'COWARDLY'] +4446-2275-0036-1231: ref=['HE', 'TOOK', 'HER', 'ROUGHLY', 'IN', 'HIS', 'ARMS', 'DO', 'YOU', 'KNOW', 'WHAT', 'I', 'MEAN'] +4446-2275-0036-1231: hyp=['HE', 'TOOK', 'A', 'ROUGHLY', 'IN', 'HIS', 'ARMS', 'DO', 'YOU', 'KNOW', 'WHAT', 'I', 'MEAN'] +4446-2275-0037-1232: ref=['OH', 'BARTLEY', 'WHAT', 'AM', 'I', 'TO', 'DO'] +4446-2275-0037-1232: hyp=['O', 'BARTLEY', 'WHAT', 'AM', 'I', 'TO', 'DO'] +4446-2275-0038-1233: ref=['I', 'WILL', 'ASK', 'THE', 'LEAST', 'IMAGINABLE', 'BUT', 'I', 'MUST', 'HAVE', 'SOMETHING'] +4446-2275-0038-1233: hyp=['I', 'WILL', 'ASK', 'THE', 'LEAST', 'IMAGINABLE', 'BUT', 'I', 'MUST', 'HAVE', 'SOMETHING'] +4446-2275-0039-1234: ref=['I', 'MUST', 'KNOW', 'ABOUT', 'YOU'] +4446-2275-0039-1234: hyp=['I', 'MUST', 'KNOW', 'ABOUT', 'YOU'] +4446-2275-0040-1235: ref=['THE', 'SIGHT', 'OF', 'YOU', 'BARTLEY', 'TO', 'SEE', 'YOU', 'LIVING', 'AND', 'HAPPY', 'AND', 'SUCCESSFUL', 'CAN', 'I', 'NEVER', 'MAKE', 'YOU', 'UNDERSTAND', 'WHAT', 'THAT', 'MEANS', 'TO', 'ME'] +4446-2275-0040-1235: hyp=['THE', 'SIGHT', 'OF', 'YOU', 'BARTLEY', 'TO', 'SEE', 'YOU', 'LIVING', 'AND', 'HAPPY', 'AND', 'SUCCESSFUL', 'CAN', 'I', 'NEVER', 'MAKE', 'YOU', 'UNDERSTAND', 'WHAT', 'THAT', 'MEANS', 'TO', 'ME'] +4446-2275-0041-1236: ref=['YOU', 'SEE', 'LOVING', 'SOME', 'ONE', 'AS', 'I', 'LOVE', 'YOU', 'MAKES', 'THE', 'WHOLE', 'WORLD', 'DIFFERENT'] +4446-2275-0041-1236: hyp=['YOU', 'SEE', 'LOVING', 'SOME', 'ONE', 'AS', 'I', 'LOVE', 'YOU', 'MAKES', 'THE', 'WHOLE', 'WORLD', 'DIFFERENT'] +4446-2275-0042-1237: ref=['AND', 'THEN', 'YOU', 'CAME', 'BACK', 'NOT', 'CARING', 'VERY', 'MUCH', 'BUT', 'IT', 'MADE', 'NO', 'DIFFERENCE'] +4446-2275-0042-1237: hyp=['AND', 'THEN', 'YOU', 'CAME', 'BACK', 'NOT', 'CARING', 'VERY', 'MUCH', 'BUT', 'IT', 'MADE', 'NO', 'DIFFERENCE'] +4446-2275-0043-1238: ref=['BARTLEY', 'BENT', 'OVER', 'AND', 'TOOK', 'HER', 'IN', 'HIS', 'ARMS', 'KISSING', 'HER', 'MOUTH', 'AND', 'HER', 'WET', 'TIRED', 'EYES'] +4446-2275-0043-1238: hyp=['BARTLEY', 'BENT', 'OVER', 'AND', 'TOOK', 'HER', 'IN', 'HIS', 'ARMS', 'KISSING', 'HER', 'MOUTH', 'AND', 'HER', 'WET', 'TIRED', 'EYES'] +4446-2275-0044-1239: ref=["DON'T", 'CRY', "DON'T", 'CRY', 'HE', 'WHISPERED'] +4446-2275-0044-1239: hyp=['A', 'TALL', 'CRY', "DON'T", 'CRY', 'HE', 'WHISPERED'] +4446-2275-0045-1240: ref=["WE'VE", 'TORTURED', 'EACH', 'OTHER', 'ENOUGH', 'FOR', 'TONIGHT'] +4446-2275-0045-1240: hyp=['WITH', 'TORTURED', 'EACH', 'OTHER', 'ENOUGH', 'FOR', 'TO', 'NIGHT'] +4507-16021-0000-1469: ref=['CHAPTER', 'ONE', 'ORIGIN'] +4507-16021-0000-1469: hyp=['CHAPTER', 'ONE', 'ORIGIN'] +4507-16021-0001-1470: ref=['IT', 'ENGENDERS', 'A', 'WHOLE', 'WORLD', 'LA', 'PEGRE', 'FOR', 'WHICH', 'READ', 'THEFT', 'AND', 'A', 'HELL', 'LA', 'PEGRENNE', 'FOR', 'WHICH', 'READ', 'HUNGER'] +4507-16021-0001-1470: hyp=['IT', 'ENGENDERS', 'A', 'WHOLE', 'WORLD', 'LAPE', 'FOR', 'WHICH', 'RED', 'THEFT', 'AND', 'A', 'HELL', 'LA', 'PAGRIN', 'FOR', 'WHICH', 'RED', 'HUNGER'] +4507-16021-0002-1471: ref=['THUS', 'IDLENESS', 'IS', 'THE', 'MOTHER'] +4507-16021-0002-1471: hyp=['THUS', 'IDLENESS', 'IS', 'THE', 'MOTHER'] +4507-16021-0003-1472: ref=['SHE', 'HAS', 'A', 'SON', 'THEFT', 'AND', 'A', 'DAUGHTER', 'HUNGER'] +4507-16021-0003-1472: hyp=['SHE', 'HAS', 'A', 'SON', 'THEFT', 'AND', 'A', 'DAUGHTER', 'HUNGER'] +4507-16021-0004-1473: ref=['WHAT', 'IS', 'SLANG'] +4507-16021-0004-1473: hyp=['WHAT', 'IS', 'SLANG'] +4507-16021-0005-1474: ref=['WE', 'HAVE', 'NEVER', 'UNDERSTOOD', 'THIS', 'SORT', 'OF', 'OBJECTIONS'] +4507-16021-0005-1474: hyp=['WE', 'HAVE', 'NEVER', 'UNDERSTOOD', 'THIS', 'SORT', 'OF', 'OBJECTIONS'] +4507-16021-0006-1475: ref=['SLANG', 'IS', 'ODIOUS'] +4507-16021-0006-1475: hyp=['SLANG', 'IS', 'ODIOUS'] +4507-16021-0007-1476: ref=['SLANG', 'MAKES', 'ONE', 'SHUDDER'] +4507-16021-0007-1476: hyp=['SLANG', 'MAKES', 'ONE', 'SHUDDER'] +4507-16021-0008-1477: ref=['WHO', 'DENIES', 'THAT', 'OF', 'COURSE', 'IT', 'DOES'] +4507-16021-0008-1477: hyp=['WHO', 'DENIES', 'THAT', 'OF', 'COURSE', 'IT', 'DOES'] +4507-16021-0009-1478: ref=['WHEN', 'IT', 'IS', 'A', 'QUESTION', 'OF', 'PROBING', 'A', 'WOUND', 'A', 'GULF', 'A', 'SOCIETY', 'SINCE', 'WHEN', 'HAS', 'IT', 'BEEN', 'CONSIDERED', 'WRONG', 'TO', 'GO', 'TOO', 'FAR', 'TO', 'GO', 'TO', 'THE', 'BOTTOM'] +4507-16021-0009-1478: hyp=['WHEN', 'IT', 'IS', 'A', 'QUESTION', 'OF', 'PROBING', 'A', 'WOUND', 'A', 'GULF', 'A', 'SOCIETY', 'SINCE', 'ONE', 'HAS', 'IT', 'BEEN', 'CONSIDERED', 'WRONG', 'TO', 'GO', 'TOO', 'FAR', 'TO', 'GO', 'TO', 'THE', 'BOTTOM'] +4507-16021-0010-1479: ref=['WE', 'HAVE', 'ALWAYS', 'THOUGHT', 'THAT', 'IT', 'WAS', 'SOMETIMES', 'A', 'COURAGEOUS', 'ACT', 'AND', 'AT', 'LEAST', 'A', 'SIMPLE', 'AND', 'USEFUL', 'DEED', 'WORTHY', 'OF', 'THE', 'SYMPATHETIC', 'ATTENTION', 'WHICH', 'DUTY', 'ACCEPTED', 'AND', 'FULFILLED', 'MERITS'] +4507-16021-0010-1479: hyp=['WE', 'HAVE', 'ALWAYS', 'THOUGHT', 'THAT', 'IT', 'WAS', 'SOMETIMES', 'A', 'COURAGEOUS', 'ACT', 'AND', 'AT', 'LEAST', 'A', 'SIMPLE', 'AND', 'USEFUL', 'DEED', 'WORTHY', 'OF', 'THE', 'SYMPATHETIC', 'ATTENTION', 'WHICH', 'DUTY', 'ACCEPTED', 'AND', 'FULFILLED', 'MERITS'] +4507-16021-0011-1480: ref=['WHY', 'SHOULD', 'ONE', 'NOT', 'EXPLORE', 'EVERYTHING', 'AND', 'STUDY', 'EVERYTHING'] +4507-16021-0011-1480: hyp=['WHY', 'SHOULD', 'ONE', 'NOT', 'EXPLORE', 'EVERYTHING', 'AND', 'STUDY', 'EVERYTHING'] +4507-16021-0012-1481: ref=['WHY', 'SHOULD', 'ONE', 'HALT', 'ON', 'THE', 'WAY'] +4507-16021-0012-1481: hyp=['WHY', 'SHOULD', 'ONE', 'HALT', 'ON', 'THE', 'WAY'] +4507-16021-0013-1482: ref=['NOTHING', 'IS', 'MORE', 'LUGUBRIOUS', 'THAN', 'THE', 'CONTEMPLATION', 'THUS', 'IN', 'ITS', 'NUDITY', 'IN', 'THE', 'BROAD', 'LIGHT', 'OF', 'THOUGHT', 'OF', 'THE', 'HORRIBLE', 'SWARMING', 'OF', 'SLANG'] +4507-16021-0013-1482: hyp=['NOTHING', 'IS', 'MORE', 'LUGUBRIOUS', 'THAN', 'THE', 'CONTEMPLATION', 'THUS', 'IN', 'ITS', 'NUDITY', 'IN', 'THE', 'BROAD', 'LIGHT', 'OF', 'THOUGHT', 'OF', 'THE', 'HORRIBLE', 'SWARMING', 'OF', 'SLANG'] +4507-16021-0014-1483: ref=['NOW', 'WHEN', 'HAS', 'HORROR', 'EVER', 'EXCLUDED', 'STUDY'] +4507-16021-0014-1483: hyp=['NO', 'WHEN', 'HAS', 'HORROR', 'EVER', 'EXCLUDED', 'STUDY'] +4507-16021-0015-1484: ref=['SINCE', 'WHEN', 'HAS', 'MALADY', 'BANISHED', 'MEDICINE'] +4507-16021-0015-1484: hyp=['SINCE', 'WHEN', 'HAS', 'MALADY', 'BANISHED', 'MEDICINE'] +4507-16021-0016-1485: ref=['CAN', 'ONE', 'IMAGINE', 'A', 'NATURALIST', 'REFUSING', 'TO', 'STUDY', 'THE', 'VIPER', 'THE', 'BAT', 'THE', 'SCORPION', 'THE', 'CENTIPEDE', 'THE', 'TARANTULA', 'AND', 'ONE', 'WHO', 'WOULD', 'CAST', 'THEM', 'BACK', 'INTO', 'THEIR', 'DARKNESS', 'SAYING', 'OH', 'HOW', 'UGLY', 'THAT', 'IS'] +4507-16021-0016-1485: hyp=['CAN', 'ONE', 'IMAGINE', 'A', 'NATURALIST', 'REFUSING', 'TO', 'STUDY', 'THE', 'VIPER', 'THE', 'BAT', 'THE', 'SCORPION', 'THE', 'CENTIPEDE', 'THE', 'TURANSULA', 'AND', 'ONE', 'WHO', 'WOULD', 'CAST', 'THEM', 'BACK', 'INTO', 'THEIR', 'DARKNESS', 'SAYING', 'O', 'HOW', 'UGLY', 'THAT', 'IS'] +4507-16021-0017-1486: ref=['HE', 'WOULD', 'BE', 'LIKE', 'A', 'PHILOLOGIST', 'REFUSING', 'TO', 'EXAMINE', 'A', 'FACT', 'IN', 'LANGUAGE', 'A', 'PHILOSOPHER', 'HESITATING', 'TO', 'SCRUTINIZE', 'A', 'FACT', 'IN', 'HUMANITY'] +4507-16021-0017-1486: hyp=['HE', 'WOULD', 'BE', 'LIKE', 'A', 'PHILOLOGIST', 'REFUSING', 'TO', 'EXAMINE', 'A', 'FACT', 'IN', 'LANGUAGE', 'A', 'PHILOSOPHER', 'HESITATING', 'TO', 'SCRUTINIZE', 'A', 'FACT', 'IN', 'HUMANITY'] +4507-16021-0018-1487: ref=['WHAT', 'IS', 'SLANG', 'PROPERLY', 'SPEAKING'] +4507-16021-0018-1487: hyp=['WHAT', 'IS', 'SLANG', 'PROPERLY', 'SPEAKING'] +4507-16021-0019-1488: ref=['IT', 'IS', 'THE', 'LANGUAGE', 'OF', 'WRETCHEDNESS'] +4507-16021-0019-1488: hyp=['IT', 'IS', 'THE', 'LANGUAGE', 'OF', 'WRETCHEDNESS'] +4507-16021-0020-1489: ref=['WE', 'MAY', 'BE', 'STOPPED', 'THE', 'FACT', 'MAY', 'BE', 'PUT', 'TO', 'US', 'IN', 'GENERAL', 'TERMS', 'WHICH', 'IS', 'ONE', 'WAY', 'OF', 'ATTENUATING', 'IT', 'WE', 'MAY', 'BE', 'TOLD', 'THAT', 'ALL', 'TRADES', 'PROFESSIONS', 'IT', 'MAY', 'BE', 'ADDED', 'ALL', 'THE', 'ACCIDENTS', 'OF', 'THE', 'SOCIAL', 'HIERARCHY', 'AND', 'ALL', 'FORMS', 'OF', 'INTELLIGENCE', 'HAVE', 'THEIR', 'OWN', 'SLANG'] +4507-16021-0020-1489: hyp=['WE', 'MAY', 'BE', 'STOPPED', 'THE', 'FACT', 'MAY', 'BE', 'PUT', 'TO', 'US', 'IN', 'GENERAL', 'TERMS', 'WHICH', 'IS', 'ONE', 'WAY', 'OF', 'ATTENUATING', 'IT', 'WE', 'MAY', 'BE', 'TOLD', 'THAT', 'ALL', 'TRADES', 'PROFESSIONS', 'IT', 'MAY', 'BE', 'ADDED', 'ALL', 'THE', 'ACCIDENTS', 'OF', 'THE', 'SOCIAL', 'HIERARCHY', 'AND', 'ALL', 'FORMS', 'OF', 'INTELLIGENCE', 'HAVE', 'THEIR', 'OWN', 'SLANG'] +4507-16021-0021-1490: ref=['THE', 'PAINTER', 'WHO', 'SAYS', 'MY', 'GRINDER', 'THE', 'NOTARY', 'WHO', 'SAYS', 'MY', 'SKIP', 'THE', 'GUTTER', 'THE', 'HAIRDRESSER', 'WHO', 'SAYS', 'MY', 'MEALYBACK', 'THE', 'COBBLER', 'WHO', 'SAYS', 'MY', 'CUB', 'TALKS', 'SLANG'] +4507-16021-0021-1490: hyp=['THE', 'PAINTER', 'WHO', 'SAYS', 'MY', 'GRINDER', 'THE', 'NOTARY', 'WHO', 'SAYS', 'MY', 'SKIP', 'THE', 'GUTTER', 'THE', 'HAIR', 'DRESSER', 'WHO', 'SAYS', 'MY', 'MEALEY', 'BACK', 'THE', 'COBBLER', 'WHO', 'SAYS', 'MY', 'CUB', 'TALKS', 'SLING'] +4507-16021-0022-1491: ref=['THERE', 'IS', 'THE', 'SLANG', 'OF', 'THE', 'AFFECTED', 'LADY', 'AS', 'WELL', 'AS', 'OF', 'THE', 'PRECIEUSES'] +4507-16021-0022-1491: hyp=['THERE', 'IS', 'THE', 'SLAYING', 'OF', 'THE', 'AFFECTED', 'LADY', 'AS', 'WELL', 'AS', 'OF', 'THE', 'PURSUS'] +4507-16021-0023-1492: ref=['THE', 'SUGAR', 'MANUFACTURER', 'WHO', 'SAYS', 'LOAF', 'CLARIFIED', 'LUMPS', 'BASTARD', 'COMMON', 'BURNT', 'THIS', 'HONEST', 'MANUFACTURER', 'TALKS', 'SLANG'] +4507-16021-0023-1492: hyp=['THE', 'SUGAR', 'MANUFACTURER', 'WHO', 'SAYS', 'LOAF', 'CLARIFIED', 'LUMPS', 'BASTARD', 'COMMON', 'BURNT', 'THIS', 'HONEST', 'MANUFACTURER', 'TALKS', 'SLANG'] +4507-16021-0024-1493: ref=['ALGEBRA', 'MEDICINE', 'BOTANY', 'HAVE', 'EACH', 'THEIR', 'SLANG'] +4507-16021-0024-1493: hyp=['ALGEBRA', 'MEDICINE', 'BARTANY', 'HAVE', 'EACH', 'THEIR', 'SLANG'] +4507-16021-0025-1494: ref=['TO', 'MEET', 'THE', 'NEEDS', 'OF', 'THIS', 'CONFLICT', 'WRETCHEDNESS', 'HAS', 'INVENTED', 'A', 'LANGUAGE', 'OF', 'COMBAT', 'WHICH', 'IS', 'SLANG'] +4507-16021-0025-1494: hyp=['TO', 'MEET', 'THE', 'NEEDS', 'OF', 'THIS', 'CONFLICT', 'WRETCHEDNESS', 'HAS', 'INVENTED', 'A', 'LANGUAGE', 'OF', 'COMBAT', 'WHICH', 'IS', 'SLANG'] +4507-16021-0026-1495: ref=['TO', 'KEEP', 'AFLOAT', 'AND', 'TO', 'RESCUE', 'FROM', 'OBLIVION', 'TO', 'HOLD', 'ABOVE', 'THE', 'GULF', 'WERE', 'IT', 'BUT', 'A', 'FRAGMENT', 'OF', 'SOME', 'LANGUAGE', 'WHICH', 'MAN', 'HAS', 'SPOKEN', 'AND', 'WHICH', 'WOULD', 'OTHERWISE', 'BE', 'LOST', 'THAT', 'IS', 'TO', 'SAY', 'ONE', 'OF', 'THE', 'ELEMENTS', 'GOOD', 'OR', 'BAD', 'OF', 'WHICH', 'CIVILIZATION', 'IS', 'COMPOSED', 'OR', 'BY', 'WHICH', 'IT', 'IS', 'COMPLICATED', 'TO', 'EXTEND', 'THE', 'RECORDS', 'OF', 'SOCIAL', 'OBSERVATION', 'IS', 'TO', 'SERVE', 'CIVILIZATION', 'ITSELF'] +4507-16021-0026-1495: hyp=['TO', 'KEEP', 'AFLOAT', 'AND', 'TO', 'RESCUE', 'FROM', 'OBLIVION', 'TO', 'HOLD', 'ABOVE', 'THE', 'GULF', 'WHERE', 'IT', 'BUT', 'A', 'FRAGMENT', 'OF', 'SOME', 'LANGUAGE', 'WHICH', 'MAN', 'HAS', 'SPOKEN', 'AND', 'WHICH', 'WOULD', 'OTHERWISE', 'BE', 'LOST', 'THAT', 'IS', 'TO', 'SAY', 'ONE', 'OF', 'THE', 'ELEMENTS', 'GOOD', 'OR', 'BAD', 'OF', 'WHICH', 'CIVILIZATION', 'IS', 'COMPOSED', 'OR', 'BY', 'WHICH', 'IT', 'IS', 'COMPLICATED', 'TO', 'EXTEND', 'THE', 'RECORDS', 'OF', 'SOCIAL', 'OBSERVATION', 'IS', 'TO', 'SERVE', 'CIVILIZATION', 'ITSELF'] +4507-16021-0027-1496: ref=['PHOENICIAN', 'VERY', 'GOOD'] +4507-16021-0027-1496: hyp=['PHOENICIAN', 'VERY', 'GOOD'] +4507-16021-0028-1497: ref=['EVEN', 'DIALECT', 'LET', 'THAT', 'PASS'] +4507-16021-0028-1497: hyp=['EVEN', 'DIALECT', 'LET', 'THAT', 'PASS'] +4507-16021-0029-1498: ref=['TO', 'THIS', 'WE', 'REPLY', 'IN', 'ONE', 'WORD', 'ONLY'] +4507-16021-0029-1498: hyp=['TO', 'THIS', 'WE', 'REPLY', 'IN', 'ONE', 'WORD', 'ONLY'] +4507-16021-0030-1499: ref=['ASSUREDLY', 'IF', 'THE', 'TONGUE', 'WHICH', 'A', 'NATION', 'OR', 'A', 'PROVINCE', 'HAS', 'SPOKEN', 'IS', 'WORTHY', 'OF', 'INTEREST', 'THE', 'LANGUAGE', 'WHICH', 'HAS', 'BEEN', 'SPOKEN', 'BY', 'A', 'MISERY', 'IS', 'STILL', 'MORE', 'WORTHY', 'OF', 'ATTENTION', 'AND', 'STUDY'] +4507-16021-0030-1499: hyp=['ASSUREDLY', 'IF', 'THE', 'TONGUE', 'WHICH', 'A', 'NATION', 'OR', 'A', 'PROVINCE', 'HAS', 'SPOKEN', 'IS', 'WORTHY', 'OF', 'INTEREST', 'THE', 'LANGUAGE', 'WHICH', 'HAS', 'BEEN', 'SPOKEN', 'BY', 'A', 'MISERY', 'IS', 'STILL', 'MORE', 'WORTHY', 'OF', 'ATTENTION', 'AND', 'STUDY'] +4507-16021-0031-1500: ref=['AND', 'THEN', 'WE', 'INSIST', 'UPON', 'IT', 'THE', 'STUDY', 'OF', 'SOCIAL', 'DEFORMITIES', 'AND', 'INFIRMITIES', 'AND', 'THE', 'TASK', 'OF', 'POINTING', 'THEM', 'OUT', 'WITH', 'A', 'VIEW', 'TO', 'REMEDY', 'IS', 'NOT', 'A', 'BUSINESS', 'IN', 'WHICH', 'CHOICE', 'IS', 'PERMITTED'] +4507-16021-0031-1500: hyp=['AND', 'THEN', 'WE', 'INSIST', 'UPON', 'IT', 'THE', 'STUDY', 'OF', 'SOCIAL', 'DEFORMITIES', 'AND', 'INFIRMITIES', 'AND', 'THE', 'TASK', 'OF', 'POINTING', 'THEM', 'OUT', 'WITH', 'THE', 'VIEW', 'TO', 'REMEDY', 'IS', 'NOT', 'A', 'BUSINESS', 'IN', 'WHICH', 'CHOICEST', 'PERMITTED'] +4507-16021-0032-1501: ref=['HE', 'MUST', 'DESCEND', 'WITH', 'HIS', 'HEART', 'FULL', 'OF', 'CHARITY', 'AND', 'SEVERITY', 'AT', 'THE', 'SAME', 'TIME', 'AS', 'A', 'BROTHER', 'AND', 'AS', 'A', 'JUDGE', 'TO', 'THOSE', 'IMPENETRABLE', 'CASEMATES', 'WHERE', 'CRAWL', 'PELL', 'MELL', 'THOSE', 'WHO', 'BLEED', 'AND', 'THOSE', 'WHO', 'DEAL', 'THE', 'BLOW', 'THOSE', 'WHO', 'WEEP', 'AND', 'THOSE', 'WHO', 'CURSE', 'THOSE', 'WHO', 'FAST', 'AND', 'THOSE', 'WHO', 'DEVOUR', 'THOSE', 'WHO', 'ENDURE', 'EVIL', 'AND', 'THOSE', 'WHO', 'INFLICT', 'IT'] +4507-16021-0032-1501: hyp=['HE', 'MUST', 'DESCEND', 'WITH', 'HIS', 'HEART', 'FULL', 'OF', 'CHARITY', 'AND', 'SEVERITY', 'AT', 'THE', 'SAME', 'TIME', 'AS', 'A', 'BROTHER', 'AND', 'AS', 'A', 'JUDGE', 'TO', 'THOSE', 'IMPENETRABLE', 'CASEMATES', 'WERE', 'CRAWL', 'PELL', 'MELL', 'THOSE', 'WHO', 'BLEED', 'AND', 'THOSE', 'WHO', 'DEAL', 'THE', 'BLOW', 'THOSE', 'WHO', 'WEEP', 'IN', 'THOSE', 'WHO', 'CURSE', 'THOSE', 'WHO', 'FAST', 'IN', 'THOSE', 'WHO', 'DEVOUR', 'THOSE', 'WHO', 'ENDURE', 'EVIL', 'AND', 'THOSE', 'WHO', 'INFLICT', 'IT'] +4507-16021-0033-1502: ref=['DO', 'WE', 'REALLY', 'KNOW', 'THE', 'MOUNTAIN', 'WELL', 'WHEN', 'WE', 'ARE', 'NOT', 'ACQUAINTED', 'WITH', 'THE', 'CAVERN'] +4507-16021-0033-1502: hyp=['DO', 'WE', 'REALLY', 'KNOW', 'THE', 'MOUNTAIN', 'WELL', 'WHEN', 'WE', 'ARE', 'NOT', 'ACQUAINTED', 'WITH', 'THE', 'CAVERN'] +4507-16021-0034-1503: ref=['THEY', 'CONSTITUTE', 'TWO', 'DIFFERENT', 'ORDERS', 'OF', 'FACTS', 'WHICH', 'CORRESPOND', 'TO', 'EACH', 'OTHER', 'WHICH', 'ARE', 'ALWAYS', 'INTERLACED', 'AND', 'WHICH', 'OFTEN', 'BRING', 'FORTH', 'RESULTS'] +4507-16021-0034-1503: hyp=['THEY', 'CONSTITUTE', 'TWO', 'DIFFERENT', 'ORDERS', 'OF', 'FACTS', 'WHICH', 'CORRESPOND', 'TO', 'EACH', 'OTHER', 'WHICH', 'ARE', 'ALWAYS', 'INTERLACED', 'AND', 'WHICH', 'OFTEN', 'BRING', 'FORTH', 'RESULTS'] +4507-16021-0035-1504: ref=['TRUE', 'HISTORY', 'BEING', 'A', 'MIXTURE', 'OF', 'ALL', 'THINGS', 'THE', 'TRUE', 'HISTORIAN', 'MINGLES', 'IN', 'EVERYTHING'] +4507-16021-0035-1504: hyp=['TRUE', 'HISTORY', 'BEING', 'A', 'MIXTURE', 'OF', 'ALL', 'THINGS', 'THE', 'TRUE', 'HISTORIAN', 'MINGLES', 'IN', 'EVERYTHING'] +4507-16021-0036-1505: ref=['FACTS', 'FORM', 'ONE', 'OF', 'THESE', 'AND', 'IDEAS', 'THE', 'OTHER'] +4507-16021-0036-1505: hyp=['FACTS', 'FORM', 'ONE', 'OF', 'THESE', 'AND', 'IDEAS', 'THE', 'OTHER'] +4507-16021-0037-1506: ref=['THERE', 'IT', 'CLOTHES', 'ITSELF', 'IN', 'WORD', 'MASKS', 'IN', 'METAPHOR', 'RAGS'] +4507-16021-0037-1506: hyp=['THERE', 'IT', 'CLOTHES', 'ITSELF', 'IN', 'WORD', 'MASKS', 'IN', 'METAPHOR', 'RAGS'] +4507-16021-0038-1507: ref=['IN', 'THIS', 'GUISE', 'IT', 'BECOMES', 'HORRIBLE'] +4507-16021-0038-1507: hyp=['IN', 'THIS', 'SKIES', 'IT', 'BECOMES', 'HORRIBLE'] +4507-16021-0039-1508: ref=['ONE', 'PERCEIVES', 'WITHOUT', 'UNDERSTANDING', 'IT', 'A', 'HIDEOUS', 'MURMUR', 'SOUNDING', 'ALMOST', 'LIKE', 'HUMAN', 'ACCENTS', 'BUT', 'MORE', 'NEARLY', 'RESEMBLING', 'A', 'HOWL', 'THAN', 'AN', 'ARTICULATE', 'WORD'] +4507-16021-0039-1508: hyp=['ONE', 'PERCEIVES', 'WITHOUT', 'UNDERSTANDING', 'IT', 'A', 'HIDEOUS', 'MURMUR', 'SOUNDING', 'ALMOST', 'LIKE', 'HUMAN', 'ACCENTS', 'BUT', 'MORE', 'NEARLY', 'RESEMBLING', 'A', 'HOWL', 'THAN', 'AN', 'ARTICULATE', 'WORD'] +4507-16021-0040-1509: ref=['ONE', 'THINKS', 'ONE', 'HEARS', 'HYDRAS', 'TALKING'] +4507-16021-0040-1509: hyp=['ONE', 'THINKS', 'ONE', 'HEARS', 'HYDRAS', 'TALKING'] +4507-16021-0041-1510: ref=['IT', 'IS', 'UNINTELLIGIBLE', 'IN', 'THE', 'DARK'] +4507-16021-0041-1510: hyp=['IT', 'IS', 'UNINTELLIGIBLE', 'IN', 'THE', 'DARK'] +4507-16021-0042-1511: ref=['IT', 'IS', 'BLACK', 'IN', 'MISFORTUNE', 'IT', 'IS', 'BLACKER', 'STILL', 'IN', 'CRIME', 'THESE', 'TWO', 'BLACKNESSES', 'AMALGAMATED', 'COMPOSE', 'SLANG'] +4507-16021-0042-1511: hyp=['IT', 'IS', 'BLACK', 'AND', 'MISFORTUNE', 'IT', 'IS', 'BLACKER', 'STILL', 'AND', 'CRIME', 'THESE', 'TWO', 'BLACKNESSES', 'AMALGAMATED', 'COMPOSED', 'SLING'] +4507-16021-0043-1512: ref=['THE', 'EARTH', 'IS', 'NOT', 'DEVOID', 'OF', 'RESEMBLANCE', 'TO', 'A', 'JAIL'] +4507-16021-0043-1512: hyp=['THE', 'EARTH', 'IS', 'NOT', 'DEVOID', 'OF', 'RESEMBLANCE', 'TO', 'A', 'JAIL'] +4507-16021-0044-1513: ref=['LOOK', 'CLOSELY', 'AT', 'LIFE'] +4507-16021-0044-1513: hyp=['LOOK', 'CLOSELY', 'AT', 'LIFE'] +4507-16021-0045-1514: ref=['IT', 'IS', 'SO', 'MADE', 'THAT', 'EVERYWHERE', 'WE', 'FEEL', 'THE', 'SENSE', 'OF', 'PUNISHMENT'] +4507-16021-0045-1514: hyp=['IT', 'IS', 'SO', 'MADE', 'THAT', 'EVERYWHERE', 'WE', 'FEEL', 'THE', 'SENSE', 'OF', 'PUNISHMENT'] +4507-16021-0046-1515: ref=['EACH', 'DAY', 'HAS', 'ITS', 'OWN', 'GREAT', 'GRIEF', 'OR', 'ITS', 'LITTLE', 'CARE'] +4507-16021-0046-1515: hyp=['EACH', 'DAY', 'HAS', 'ITS', 'OWN', 'GREAT', 'GRIEF', 'FOR', 'ITS', 'LITTLE', 'CARE'] +4507-16021-0047-1516: ref=['YESTERDAY', 'YOU', 'WERE', 'TREMBLING', 'FOR', 'A', 'HEALTH', 'THAT', 'IS', 'DEAR', 'TO', 'YOU', 'TO', 'DAY', 'YOU', 'FEAR', 'FOR', 'YOUR', 'OWN', 'TO', 'MORROW', 'IT', 'WILL', 'BE', 'ANXIETY', 'ABOUT', 'MONEY', 'THE', 'DAY', 'AFTER', 'TO', 'MORROW', 'THE', 'DIATRIBE', 'OF', 'A', 'SLANDERER', 'THE', 'DAY', 'AFTER', 'THAT', 'THE', 'MISFORTUNE', 'OF', 'SOME', 'FRIEND', 'THEN', 'THE', 'PREVAILING', 'WEATHER', 'THEN', 'SOMETHING', 'THAT', 'HAS', 'BEEN', 'BROKEN', 'OR', 'LOST', 'THEN', 'A', 'PLEASURE', 'WITH', 'WHICH', 'YOUR', 'CONSCIENCE', 'AND', 'YOUR', 'VERTEBRAL', 'COLUMN', 'REPROACH', 'YOU', 'AGAIN', 'THE', 'COURSE', 'OF', 'PUBLIC', 'AFFAIRS'] +4507-16021-0047-1516: hyp=['YESTERDAY', 'YOU', 'WERE', 'TREMBLING', 'FOR', 'A', 'HEALTH', 'THAT', 'IS', 'DEAR', 'TO', 'YOU', 'TO', 'DAY', 'YOU', 'FEAR', 'FOR', 'YOUR', 'OWN', 'TO', 'MORROW', 'IT', 'WILL', 'BE', 'ANXIETY', 'ABOUT', 'MONEY', 'THE', 'DAY', 'AFTER', 'TO', 'MORROW', 'THE', 'DIETRIBE', 'OF', 'A', 'SLANDERER', 'THE', 'DAY', 'AFTER', 'THAT', 'THE', 'MISFORTUNE', 'OF', 'SOME', 'FRIEND', 'THEN', 'THE', 'PREVAILING', 'WEATHER', 'THEN', 'SOMETHING', 'THAT', 'HAS', 'BEEN', 'BROKEN', 'OR', 'LOST', 'THEN', 'A', 'PLEASURE', 'WITH', 'WHICH', 'YOUR', 'CONSCIENCE', 'AND', 'YOUR', 'VERTEBRAL', 'COLUMN', 'REPROACH', 'YOU', 'AGAIN', 'THE', 'COURSE', 'OF', 'PUBLIC', 'AFFAIRS'] +4507-16021-0048-1517: ref=['THIS', 'WITHOUT', 'RECKONING', 'IN', 'THE', 'PAINS', 'OF', 'THE', 'HEART', 'AND', 'SO', 'IT', 'GOES', 'ON'] +4507-16021-0048-1517: hyp=['THIS', 'WITHOUT', 'RECKONING', 'IN', 'THE', 'PAINS', 'OF', 'THE', 'HEART', 'AND', 'SO', 'TO', 'GOES', 'ON'] +4507-16021-0049-1518: ref=['THERE', 'IS', 'HARDLY', 'ONE', 'DAY', 'OUT', 'OF', 'A', 'HUNDRED', 'WHICH', 'IS', 'WHOLLY', 'JOYOUS', 'AND', 'SUNNY'] +4507-16021-0049-1518: hyp=['THERE', 'IS', 'HARDLY', 'ONE', 'DAY', 'OUT', 'OF', 'A', 'HUNDRED', 'WHICH', 'IS', 'WHOLLY', 'JOYOUS', 'AND', 'SUNNY'] +4507-16021-0050-1519: ref=['AND', 'YOU', 'BELONG', 'TO', 'THAT', 'SMALL', 'CLASS', 'WHO', 'ARE', 'HAPPY'] +4507-16021-0050-1519: hyp=['AND', 'YOU', 'BELONG', 'TO', 'THAT', 'SMALL', 'CLASS', 'WHO', 'ARE', 'HAPPY'] +4507-16021-0051-1520: ref=['IN', 'THIS', 'WORLD', 'EVIDENTLY', 'THE', 'VESTIBULE', 'OF', 'ANOTHER', 'THERE', 'ARE', 'NO', 'FORTUNATE'] +4507-16021-0051-1520: hyp=['IN', 'THIS', "WORLD'S", 'EVIDENTLY', 'THE', 'VESTIBULE', 'OF', 'ANOTHER', 'THERE', 'ARE', 'NO', 'FORTUNATE'] +4507-16021-0052-1521: ref=['THE', 'REAL', 'HUMAN', 'DIVISION', 'IS', 'THIS', 'THE', 'LUMINOUS', 'AND', 'THE', 'SHADY'] +4507-16021-0052-1521: hyp=['THE', 'REAL', 'HUMAN', 'DIVISION', 'IS', 'THIS', 'THE', 'LUMINOUS', 'AND', 'THE', 'SHADY'] +4507-16021-0053-1522: ref=['TO', 'DIMINISH', 'THE', 'NUMBER', 'OF', 'THE', 'SHADY', 'TO', 'AUGMENT', 'THE', 'NUMBER', 'OF', 'THE', 'LUMINOUS', 'THAT', 'IS', 'THE', 'OBJECT'] +4507-16021-0053-1522: hyp=['TO', 'DIMINISH', 'THE', 'NUMBER', 'OF', 'THE', 'SHADY', 'TO', 'AUGMENT', 'THE', 'NUMBER', 'OF', 'THE', 'LUMINOUS', 'THAT', 'IS', 'THE', 'OBJECT'] +4507-16021-0054-1523: ref=['THAT', 'IS', 'WHY', 'WE', 'CRY', 'EDUCATION', 'SCIENCE'] +4507-16021-0054-1523: hyp=['THAT', 'IS', 'WHY', 'WE', 'CRY', 'EDUCATION', 'SCIENCE'] +4507-16021-0055-1524: ref=['TO', 'TEACH', 'READING', 'MEANS', 'TO', 'LIGHT', 'THE', 'FIRE', 'EVERY', 'SYLLABLE', 'SPELLED', 'OUT', 'SPARKLES'] +4507-16021-0055-1524: hyp=['TO', 'TEACH', 'READING', 'MEANS', 'TO', 'LIGHT', 'THE', 'FIRE', 'EVERY', 'SYLLABLE', "SPELL'D", 'OUT', 'SPARKLES'] +4507-16021-0056-1525: ref=['HOWEVER', 'HE', 'WHO', 'SAYS', 'LIGHT', 'DOES', 'NOT', 'NECESSARILY', 'SAY', 'JOY'] +4507-16021-0056-1525: hyp=['HOWEVER', 'HE', 'WHO', 'SAYS', 'LIGHT', 'DOES', 'NOT', 'NECESSARILY', 'SAY', 'JOY'] +4507-16021-0057-1526: ref=['PEOPLE', 'SUFFER', 'IN', 'THE', 'LIGHT', 'EXCESS', 'BURNS'] +4507-16021-0057-1526: hyp=['PEOPLE', 'SUFFER', 'IN', 'THE', 'LIGHT', 'EXCESS', 'BURNS'] +4507-16021-0058-1527: ref=['THE', 'FLAME', 'IS', 'THE', 'ENEMY', 'OF', 'THE', 'WING'] +4507-16021-0058-1527: hyp=['THE', 'FLAME', 'IS', 'THE', 'ENEMY', 'OF', 'THE', 'WING'] +4507-16021-0059-1528: ref=['TO', 'BURN', 'WITHOUT', 'CEASING', 'TO', 'FLY', 'THEREIN', 'LIES', 'THE', 'MARVEL', 'OF', 'GENIUS'] +4507-16021-0059-1528: hyp=['TO', 'BURN', 'WITHOUT', 'CEASING', 'TO', 'FLY', 'THEREIN', 'LIES', 'THE', 'MARVEL', 'OF', 'GENIUS'] +4970-29093-0000-2093: ref=["YOU'LL", 'NEVER', 'DIG', 'IT', 'OUT', 'OF', 'THE', 'ASTOR', 'LIBRARY'] +4970-29093-0000-2093: hyp=["YOU'LL", 'NEVER', 'DIG', 'IT', 'OUT', 'OF', 'THE', 'ASTER', 'LIBRARY'] +4970-29093-0001-2094: ref=['TO', 'THE', 'YOUNG', 'AMERICAN', 'HERE', 'OR', 'ELSEWHERE', 'THE', 'PATHS', 'TO', 'FORTUNE', 'ARE', 'INNUMERABLE', 'AND', 'ALL', 'OPEN', 'THERE', 'IS', 'INVITATION', 'IN', 'THE', 'AIR', 'AND', 'SUCCESS', 'IN', 'ALL', 'HIS', 'WIDE', 'HORIZON'] +4970-29093-0001-2094: hyp=['TO', 'THE', 'YOUNG', 'AMERICAN', 'HERE', 'OR', 'ELSEWHERE', 'THE', 'PATHS', 'TO', 'FORTUNE', 'ARE', 'INNUMERABLE', 'AND', 'ALL', 'OPEN', 'THERE', 'IS', 'INVITATION', 'IN', 'THE', 'AIR', 'AND', 'SUCCESS', 'IN', 'ALL', 'HIS', 'WIDE', 'HORIZON'] +4970-29093-0002-2095: ref=['HE', 'HAS', 'NO', 'TRADITIONS', 'TO', 'BIND', 'HIM', 'OR', 'GUIDE', 'HIM', 'AND', 'HIS', 'IMPULSE', 'IS', 'TO', 'BREAK', 'AWAY', 'FROM', 'THE', 'OCCUPATION', 'HIS', 'FATHER', 'HAS', 'FOLLOWED', 'AND', 'MAKE', 'A', 'NEW', 'WAY', 'FOR', 'HIMSELF'] +4970-29093-0002-2095: hyp=['HE', 'HAS', 'NO', 'TRADITIONS', 'TO', 'BIND', 'HIM', 'OR', 'GUIDE', 'HIM', 'AND', 'HIS', 'IMPULSE', 'IS', 'TO', 'BREAK', 'AWAY', 'FROM', 'THE', 'OCCUPATION', 'HIS', 'FATHER', 'HAS', 'FOLLOWED', 'AND', 'MAKE', 'A', 'NEW', 'WAY', 'FOR', 'HIMSELF'] +4970-29093-0003-2096: ref=['THE', 'MODEST', 'FELLOW', 'WOULD', 'HAVE', 'LIKED', 'FAME', 'THRUST', 'UPON', 'HIM', 'FOR', 'SOME', 'WORTHY', 'ACHIEVEMENT', 'IT', 'MIGHT', 'BE', 'FOR', 'A', 'BOOK', 'OR', 'FOR', 'THE', 'SKILLFUL', 'MANAGEMENT', 'OF', 'SOME', 'GREAT', 'NEWSPAPER', 'OR', 'FOR', 'SOME', 'DARING', 'EXPEDITION', 'LIKE', 'THAT', 'OF', 'LIEUTENANT', 'STRAIN', 'OR', 'DOCTOR', 'KANE'] +4970-29093-0003-2096: hyp=['THE', 'MODEST', 'FELLOW', 'WOULD', 'HAVE', 'LIKED', 'FAME', 'THRUST', 'UPON', 'HIM', 'FOR', 'SOME', 'WORTHY', 'ACHIEVEMENT', 'IT', 'MIGHT', 'BE', 'FOR', 'A', 'BOOK', 'OR', 'FOR', 'THE', 'SKILFUL', 'MANAGEMENT', 'OF', 'SOME', 'GREAT', 'NEWSPAPER', 'OR', 'FOR', 'SOME', 'DARING', 'EXPEDITION', 'LIKE', 'THAT', 'OF', 'LIEUTENANT', 'STRAIN', 'OR', 'DOCTOR', 'KANE'] +4970-29093-0004-2097: ref=['HE', 'WAS', 'UNABLE', 'TO', 'DECIDE', 'EXACTLY', 'WHAT', 'IT', 'SHOULD', 'BE'] +4970-29093-0004-2097: hyp=['HE', 'WAS', 'UNABLE', 'TO', 'DECIDE', 'EXACTLY', 'WHAT', 'IT', 'SHOULD', 'BE'] +4970-29093-0005-2098: ref=['SOMETIMES', 'HE', 'THOUGHT', 'HE', 'WOULD', 'LIKE', 'TO', 'STAND', 'IN', 'A', 'CONSPICUOUS', 'PULPIT', 'AND', 'HUMBLY', 'PREACH', 'THE', 'GOSPEL', 'OF', 'REPENTANCE', 'AND', 'IT', 'EVEN', 'CROSSED', 'HIS', 'MIND', 'THAT', 'IT', 'WOULD', 'BE', 'NOBLE', 'TO', 'GIVE', 'HIMSELF', 'TO', 'A', 'MISSIONARY', 'LIFE', 'TO', 'SOME', 'BENIGHTED', 'REGION', 'WHERE', 'THE', 'DATE', 'PALM', 'GROWS', 'AND', 'THE', "NIGHTINGALE'S", 'VOICE', 'IS', 'IN', 'TUNE', 'AND', 'THE', 'BUL', 'BUL', 'SINGS', 'ON', 'THE', 'OFF', 'NIGHTS'] +4970-29093-0005-2098: hyp=['SOMETIMES', 'HE', 'THOUGHT', 'HE', 'WOULD', 'LIKE', 'TO', 'STAND', 'IN', 'A', 'CONSPICUOUS', 'PULPIT', 'AND', 'HUMBLY', 'PREACH', 'THE', 'GOSPEL', 'OF', 'REPENTANCE', 'AND', 'IT', 'EVEN', 'CROSSED', 'HIS', 'MIND', 'THAT', 'IT', 'WOULD', 'BE', 'NOBLE', 'TO', 'GIVE', 'HIMSELF', 'TO', 'A', 'MISSIONARY', 'LIFE', 'TO', 'SOME', 'BENIGHTED', 'REGION', 'WHERE', 'THE', 'DATE', 'PALM', 'GROVES', 'AND', 'THE', "NIGHTINGALE'S", 'VOICE', 'IS', 'IN', 'TUNE', 'AND', 'THE', 'BULL', 'BULL', 'SINGS', 'ON', 'THE', 'OPT', 'NIGHTS'] +4970-29093-0006-2099: ref=['LAW', 'SEEMED', 'TO', 'HIM', 'WELL', 'ENOUGH', 'AS', 'A', 'SCIENCE', 'BUT', 'HE', 'NEVER', 'COULD', 'DISCOVER', 'A', 'PRACTICAL', 'CASE', 'WHERE', 'IT', 'APPEARED', 'TO', 'HIM', 'WORTH', 'WHILE', 'TO', 'GO', 'TO', 'LAW', 'AND', 'ALL', 'THE', 'CLIENTS', 'WHO', 'STOPPED', 'WITH', 'THIS', 'NEW', 'CLERK', 'IN', 'THE', 'ANTE', 'ROOM', 'OF', 'THE', 'LAW', 'OFFICE', 'WHERE', 'HE', 'WAS', 'WRITING', 'PHILIP', 'INVARIABLY', 'ADVISED', 'TO', 'SETTLE', 'NO', 'MATTER', 'HOW', 'BUT', 'SETTLE', 'GREATLY', 'TO', 'THE', 'DISGUST', 'OF', 'HIS', 'EMPLOYER', 'WHO', 'KNEW', 'THAT', 'JUSTICE', 'BETWEEN', 'MAN', 'AND', 'MAN', 'COULD', 'ONLY', 'BE', 'ATTAINED', 'BY', 'THE', 'RECOGNIZED', 'PROCESSES', 'WITH', 'THE', 'ATTENDANT', 'FEES'] +4970-29093-0006-2099: hyp=['LAW', 'SEEMED', 'TO', 'HIM', 'WELL', 'ENOUGH', 'AS', 'A', 'SCIENCE', 'BUT', 'HE', 'NEVER', 'COULD', 'DISCOVER', 'A', 'PRACTICAL', 'CASE', 'WHERE', 'IT', 'APPEARED', 'TO', 'HIM', 'WORTH', 'WHILE', 'TO', 'GO', 'TO', 'LAW', 'AND', 'ALL', 'THE', 'CLIENTS', 'WHO', 'STOPPED', 'WITH', 'THIS', 'NEW', 'CLERK', 'IN', 'THE', 'ANTEROOM', 'OF', 'THE', 'LAW', 'OFFICE', 'WHERE', 'HE', 'WAS', 'WRITING', 'PHILIP', 'INVARIABLY', 'ADVISED', 'TO', 'SETTLE', 'NO', 'MATTER', 'HOW', 'BUT', 'SETTLE', 'GREATLY', 'TO', 'THE', 'DISGUST', 'OF', 'HIS', 'EMPLOYER', 'WHO', 'KNEW', 'THAT', 'JUSTICE', 'BETWEEN', 'MAN', 'AND', 'MAN', 'COULD', 'ONLY', 'BE', 'ATTAINED', 'BY', 'THE', 'RECOGNIZED', 'PROCESSES', 'WITH', 'THE', 'ATTENDANT', 'FEES'] +4970-29093-0007-2100: ref=['IT', 'IS', 'SUCH', 'A', 'NOBLE', 'AMBITION', 'THAT', 'IT', 'IS', 'A', 'PITY', 'IT', 'HAS', 'USUALLY', 'SUCH', 'A', 'SHALLOW', 'FOUNDATION'] +4970-29093-0007-2100: hyp=['IT', 'IS', 'SUCH', 'A', 'NOBLE', 'AMBITION', 'THAT', 'IT', 'IS', 'A', 'PITY', 'IT', 'HAS', 'USUALLY', 'SUCH', 'A', 'SHALLOW', 'FOUNDATION'] +4970-29093-0008-2101: ref=['HE', 'WANTED', 'TO', 'BEGIN', 'AT', 'THE', 'TOP', 'OF', 'THE', 'LADDER'] +4970-29093-0008-2101: hyp=['HE', 'WANTED', 'TO', 'BEGIN', 'AT', 'THE', 'TOP', 'OF', 'THE', 'LADDER'] +4970-29093-0009-2102: ref=['PHILIP', 'THEREFORE', 'READ', 'DILIGENTLY', 'IN', 'THE', 'ASTOR', 'LIBRARY', 'PLANNED', 'LITERARY', 'WORKS', 'THAT', 'SHOULD', 'COMPEL', 'ATTENTION', 'AND', 'NURSED', 'HIS', 'GENIUS'] +4970-29093-0009-2102: hyp=['PHILIP', 'THEREFORE', 'READ', 'DILIGENTLY', 'IN', 'THE', 'ASTER', 'LIBRARY', 'PLANNED', 'LITERARY', 'WORKS', 'THAT', 'SHOULD', 'COMPEL', 'ATTENTION', 'AND', 'NURSED', 'HIS', 'GENIUS'] +4970-29093-0010-2103: ref=['HE', 'HAD', 'NO', 'FRIEND', 'WISE', 'ENOUGH', 'TO', 'TELL', 'HIM', 'TO', 'STEP', 'INTO', 'THE', 'DORKING', 'CONVENTION', 'THEN', 'IN', 'SESSION', 'MAKE', 'A', 'SKETCH', 'OF', 'THE', 'MEN', 'AND', 'WOMEN', 'ON', 'THE', 'PLATFORM', 'AND', 'TAKE', 'IT', 'TO', 'THE', 'EDITOR', 'OF', 'THE', 'DAILY', 'GRAPEVINE', 'AND', 'SEE', 'WHAT', 'HE', 'COULD', 'GET', 'A', 'LINE', 'FOR', 'IT'] +4970-29093-0010-2103: hyp=['HE', 'HAD', 'NO', 'FRIEND', 'WISE', 'ENOUGH', 'TO', 'TELL', 'HIM', 'TO', 'STEP', 'INTO', 'THE', 'DORKING', 'CONVENTION', 'THAN', 'IN', 'SESSION', 'MAKE', 'A', 'SKETCH', 'OF', 'THE', 'MEN', 'AND', 'WOMEN', 'ON', 'THE', 'PLATFORM', 'AND', 'TAKE', 'IT', 'TO', 'THE', 'EDITOR', 'OF', 'THE', 'DAILY', 'GRAPE', 'VINE', 'AND', 'SEE', 'WHAT', 'HE', 'COULD', 'GET', 'A', 'LINE', 'FOR', 'IT'] +4970-29093-0011-2104: ref=['O', 'VERY', 'WELL', 'SAID', 'GRINGO', 'TURNING', 'AWAY', 'WITH', 'A', 'SHADE', 'OF', 'CONTEMPT', "YOU'LL", 'FIND', 'IF', 'YOU', 'ARE', 'GOING', 'INTO', 'LITERATURE', 'AND', 'NEWSPAPER', 'WORK', 'THAT', 'YOU', "CAN'T", 'AFFORD', 'A', 'CONSCIENCE', 'LIKE', 'THAT'] +4970-29093-0011-2104: hyp=['OH', 'VERY', 'WELL', 'SAID', 'GRINGAUD', 'TURNING', 'AWAY', 'WITH', 'A', 'SHADE', 'OF', 'CONTEMPT', "YOU'LL", 'FIND', 'IF', 'YOU', 'ARE', 'GOING', 'INTO', 'LITERATURE', 'AND', 'NEWSPAPER', 'WORK', 'THAT', 'YOU', "CAN'T", 'AFFORD', 'A', 'CONSCIENCE', 'LIKE', 'THAT'] +4970-29093-0012-2105: ref=['BUT', 'PHILIP', 'DID', 'AFFORD', 'IT', 'AND', 'HE', 'WROTE', 'THANKING', 'HIS', 'FRIENDS', 'AND', 'DECLINING', 'BECAUSE', 'HE', 'SAID', 'THE', 'POLITICAL', 'SCHEME', 'WOULD', 'FAIL', 'AND', 'OUGHT', 'TO', 'FAIL'] +4970-29093-0012-2105: hyp=['BUT', 'PHILIP', 'DID', 'AFFORD', 'IT', 'AND', 'HE', 'WROTE', 'THANKING', 'HIS', 'FRIENDS', 'AND', 'DECLINING', 'BECAUSE', 'HE', 'SAID', 'THE', 'POLITICAL', 'SCHEME', 'WOULD', 'FAIL', 'AND', 'OUGHT', 'TO', 'FAIL'] +4970-29093-0013-2106: ref=['AND', 'HE', 'WENT', 'BACK', 'TO', 'HIS', 'BOOKS', 'AND', 'TO', 'HIS', 'WAITING', 'FOR', 'AN', 'OPENING', 'LARGE', 'ENOUGH', 'FOR', 'HIS', 'DIGNIFIED', 'ENTRANCE', 'INTO', 'THE', 'LITERARY', 'WORLD'] +4970-29093-0013-2106: hyp=['AND', 'HE', 'WENT', 'BACK', 'TO', 'HIS', 'BOOKS', 'AND', 'TO', 'HIS', 'WAITING', 'FOR', 'AN', 'OPENING', 'LARGE', 'ENOUGH', 'FOR', 'HIS', 'DIGNIFIED', 'ENTRANCE', 'INTO', 'THE', 'LITERARY', 'WORLD'] +4970-29093-0014-2107: ref=['WELL', "I'M", 'GOING', 'AS', 'AN', 'ENGINEER', 'YOU', 'CAN', 'GO', 'AS', 'ONE'] +4970-29093-0014-2107: hyp=['WELL', "I'M", 'GOING', 'AS', 'AN', 'ENGINEER', 'YOU', 'COULD', 'GO', 'AS', 'ONE'] +4970-29093-0015-2108: ref=['YOU', 'CAN', 'BEGIN', 'BY', 'CARRYING', 'A', 'ROD', 'AND', 'PUTTING', 'DOWN', 'THE', 'FIGURES'] +4970-29093-0015-2108: hyp=['YOU', 'CAN', 'BEGIN', 'BY', 'CARRYING', 'A', 'ROD', 'AND', 'PUTTING', 'DOWN', 'THE', 'FIGURES'] +4970-29093-0016-2109: ref=['NO', 'ITS', 'NOT', 'TOO', 'SOON'] +4970-29093-0016-2109: hyp=['NO', "IT'S", 'OUGHT', 'TOO', 'SOON'] +4970-29093-0017-2110: ref=["I'VE", 'BEEN', 'READY', 'TO', 'GO', 'ANYWHERE', 'FOR', 'SIX', 'MONTHS'] +4970-29093-0017-2110: hyp=["I'VE", 'BEEN', 'READY', 'TO', 'GO', 'ANYWHERE', 'FOR', 'SIX', 'MONTHS'] +4970-29093-0018-2111: ref=['THE', 'TWO', 'YOUNG', 'MEN', 'WHO', 'WERE', 'BY', 'THIS', 'TIME', 'FULL', 'OF', 'THE', 'ADVENTURE', 'WENT', 'DOWN', 'TO', 'THE', 'WALL', 'STREET', 'OFFICE', 'OF', "HENRY'S", 'UNCLE', 'AND', 'HAD', 'A', 'TALK', 'WITH', 'THAT', 'WILY', 'OPERATOR'] +4970-29093-0018-2111: hyp=['THE', 'TWO', 'YOUNG', 'MEN', 'WHO', 'WERE', 'BY', 'THIS', 'TIME', 'FULL', 'OF', 'THE', 'ADVENTURE', 'WENT', 'DOWN', 'TO', 'THE', 'WALL', 'STREET', 'OFFICE', 'OF', "HENRY'S", 'UNCLE', 'AND', 'HAD', 'A', 'TALK', 'WITH', 'THAT', 'WILY', 'OPERATOR'] +4970-29093-0019-2112: ref=['THE', 'NIGHT', 'WAS', 'SPENT', 'IN', 'PACKING', 'UP', 'AND', 'WRITING', 'LETTERS', 'FOR', 'PHILIP', 'WOULD', 'NOT', 'TAKE', 'SUCH', 'AN', 'IMPORTANT', 'STEP', 'WITHOUT', 'INFORMING', 'HIS', 'FRIENDS'] +4970-29093-0019-2112: hyp=['THE', 'NIGHT', 'WAS', 'SPENT', 'IN', 'PACKING', 'UP', 'AND', 'WRITING', 'LETTERS', 'FOR', 'PHILIP', 'WOULD', 'NOT', 'TAKE', 'SUCH', 'AN', 'IMPORTANT', 'STEP', 'WITHOUT', 'INFORMING', 'HIS', 'FRIENDS'] +4970-29093-0020-2113: ref=['WHY', "IT'S", 'IN', 'MISSOURI', 'SOMEWHERE', 'ON', 'THE', 'FRONTIER', 'I', 'THINK', "WE'LL", 'GET', 'A', 'MAP'] +4970-29093-0020-2113: hyp=['WHY', "IT'S", 'A', 'MISSOURI', 'SOMEWHERE', 'ON', 'THE', 'FRONTIER', 'I', 'THINK', "WE'LL", 'GET', 'A', 'MAP'] +4970-29093-0021-2114: ref=['I', 'WAS', 'AFRAID', 'IT', 'WAS', 'NEARER', 'HOME'] +4970-29093-0021-2114: hyp=['I', 'WAS', 'AFRAID', 'IT', 'WAS', 'NEARER', 'HOME'] +4970-29093-0022-2115: ref=['HE', 'KNEW', 'HIS', 'UNCLE', 'WOULD', 'BE', 'GLAD', 'TO', 'HEAR', 'THAT', 'HE', 'HAD', 'AT', 'LAST', 'TURNED', 'HIS', 'THOUGHTS', 'TO', 'A', 'PRACTICAL', 'MATTER'] +4970-29093-0022-2115: hyp=['HE', 'KNEW', 'HIS', 'UNCLE', 'WOULD', 'BE', 'GLAD', 'TO', 'HEAR', 'THAT', 'HE', 'HAD', 'AT', 'LAST', 'TURNED', 'HIS', 'THOUGHTS', 'TO', 'A', 'PRACTICAL', 'MATTER'] +4970-29093-0023-2116: ref=['HE', 'WELL', 'KNEW', 'THE', 'PERILS', 'OF', 'THE', 'FRONTIER', 'THE', 'SAVAGE', 'STATE', 'OF', 'SOCIETY', 'THE', 'LURKING', 'INDIANS', 'AND', 'THE', 'DANGERS', 'OF', 'FEVER'] +4970-29093-0023-2116: hyp=['HE', 'WELL', 'KNEW', 'THE', 'PERILS', 'OF', 'THE', 'FRONTIER', 'THE', 'SAVAGE', 'STATE', 'OF', 'SOCIETY', 'THE', 'LURKING', 'INDIANS', 'AND', 'THE', 'DANGERS', 'OF', 'FEVER'] +4970-29095-0000-2054: ref=['SHE', 'WAS', 'TIRED', 'OF', 'OTHER', 'THINGS'] +4970-29095-0000-2054: hyp=['SHE', 'WAS', 'TIRED', 'OF', 'OTHER', 'THINGS'] +4970-29095-0001-2055: ref=['SHE', 'TRIED', 'THIS', 'MORNING', 'AN', 'AIR', 'OR', 'TWO', 'UPON', 'THE', 'PIANO', 'SANG', 'A', 'SIMPLE', 'SONG', 'IN', 'A', 'SWEET', 'BUT', 'SLIGHTLY', 'METALLIC', 'VOICE', 'AND', 'THEN', 'SEATING', 'HERSELF', 'BY', 'THE', 'OPEN', 'WINDOW', 'READ', "PHILIP'S", 'LETTER'] +4970-29095-0001-2055: hyp=['SHE', 'TRIED', 'THIS', 'MORNING', 'AN', 'HOUR', 'OR', 'TWO', 'UPON', 'THE', 'PIANO', 'SAYING', 'A', 'SIMPLE', 'SONG', 'AND', 'A', 'SWEET', 'BUT', 'SLIGHTLY', 'METALLIC', 'VOICE', 'AND', 'THEN', 'SEATING', 'HERSELF', 'BY', 'THE', 'OPEN', 'WINDOW', 'READ', "PHILIP'S", 'LETTER'] +4970-29095-0002-2056: ref=['WELL', 'MOTHER', 'SAID', 'THE', 'YOUNG', 'STUDENT', 'LOOKING', 'UP', 'WITH', 'A', 'SHADE', 'OF', 'IMPATIENCE'] +4970-29095-0002-2056: hyp=['WELL', 'MOTHER', 'SAID', 'THE', 'YOUNG', 'STUDENT', 'LOOKING', 'UP', 'WITH', 'A', 'SHADE', 'OF', 'IMPATIENCE'] +4970-29095-0003-2057: ref=['I', 'HOPE', 'THEE', 'TOLD', 'THE', 'ELDERS', 'THAT', 'FATHER', 'AND', 'I', 'ARE', 'RESPONSIBLE', 'FOR', 'THE', 'PIANO', 'AND', 'THAT', 'MUCH', 'AS', 'THEE', 'LOVES', 'MUSIC', 'THEE', 'IS', 'NEVER', 'IN', 'THE', 'ROOM', 'WHEN', 'IT', 'IS', 'PLAYED'] +4970-29095-0003-2057: hyp=['I', 'HOPE', 'THEE', 'TOLD', 'THE', 'ELDERS', 'THAT', 'FATHER', 'AND', 'I', 'ARE', 'RESPONSIBLE', 'FOR', 'THE', 'PIANO', 'AND', 'THAT', 'MUCH', 'AS', 'THEE', 'LOVES', 'MUSIC', 'THEE', 'IS', 'NEVER', 'IN', 'THE', 'ROOM', 'WHEN', 'IT', 'IS', 'PLAYED'] +4970-29095-0004-2058: ref=['I', 'HEARD', 'FATHER', 'TELL', 'COUSIN', 'ABNER', 'THAT', 'HE', 'WAS', 'WHIPPED', 'SO', 'OFTEN', 'FOR', 'WHISTLING', 'WHEN', 'HE', 'WAS', 'A', 'BOY', 'THAT', 'HE', 'WAS', 'DETERMINED', 'TO', 'HAVE', 'WHAT', 'COMPENSATION', 'HE', 'COULD', 'GET', 'NOW'] +4970-29095-0004-2058: hyp=['I', 'HEARD', 'FATHER', 'TELL', 'COUSIN', 'ABNER', 'THAT', 'HE', 'WAS', 'WHIPPED', 'SO', 'OFTEN', 'FOR', 'WHISTLING', 'WHEN', 'HE', 'WAS', 'A', 'BOY', 'THAT', 'HE', 'WAS', 'DETERMINED', 'TO', 'HAVE', 'WHAT', 'COMPENSATION', 'HE', 'COULD', 'GET', 'NOW'] +4970-29095-0005-2059: ref=['THY', 'WAYS', 'GREATLY', 'TRY', 'ME', 'RUTH', 'AND', 'ALL', 'THY', 'RELATIONS'] +4970-29095-0005-2059: hyp=['THY', 'WAYS', 'GREATLY', 'TRY', 'ME', 'RUTH', 'AND', 'ALL', 'THY', 'RELATIONS'] +4970-29095-0006-2060: ref=['IS', 'THY', 'FATHER', 'WILLING', 'THEE', 'SHOULD', 'GO', 'AWAY', 'TO', 'A', 'SCHOOL', 'OF', 'THE', "WORLD'S", 'PEOPLE'] +4970-29095-0006-2060: hyp=['IS', 'THY', 'FATHER', 'WILLING', 'THEE', 'SHOULD', 'GO', 'AWAY', 'TO', 'A', 'SCHOOL', 'OF', 'THE', "WORLD'S", 'PEOPLE'] +4970-29095-0007-2061: ref=['I', 'HAVE', 'NOT', 'ASKED', 'HIM', 'RUTH', 'REPLIED', 'WITH', 'A', 'LOOK', 'THAT', 'MIGHT', 'IMPLY', 'THAT', 'SHE', 'WAS', 'ONE', 'OF', 'THOSE', 'DETERMINED', 'LITTLE', 'BODIES', 'WHO', 'FIRST', 'MADE', 'UP', 'HER', 'OWN', 'MIND', 'AND', 'THEN', 'COMPELLED', 'OTHERS', 'TO', 'MAKE', 'UP', 'THEIRS', 'IN', 'ACCORDANCE', 'WITH', 'HERS'] +4970-29095-0007-2061: hyp=['I', 'HAVE', 'NOT', 'ASKED', 'HIM', 'RUTH', 'REPLIED', 'WITH', 'A', 'LOOK', 'THAT', 'MIGHT', 'IMPLY', 'THAT', 'SHE', 'WAS', 'ONE', 'OF', 'THOSE', 'DETERMINED', 'LITTLE', 'BODIES', 'WHO', 'FIRST', 'MADE', 'UP', 'HER', 'OWN', 'MIND', 'AND', 'THEN', 'COMPELLED', 'OTHERS', 'TO', 'MAKE', 'UP', 'THEIRS', 'IN', 'ACCORDANCE', 'WITH', 'HERS'] +4970-29095-0008-2062: ref=['MOTHER', "I'M", 'GOING', 'TO', 'STUDY', 'MEDICINE'] +4970-29095-0008-2062: hyp=['MOTHER', 'I', 'AM', 'GOING', 'TO', 'SET', 'E', 'MEDICINE'] +4970-29095-0009-2063: ref=['MARGARET', 'BOLTON', 'ALMOST', 'LOST', 'FOR', 'A', 'MOMENT', 'HER', 'HABITUAL', 'PLACIDITY'] +4970-29095-0009-2063: hyp=['MARGARET', 'BOLTON', 'ALMOST', 'LOST', 'FOR', 'A', 'MOMENT', 'HER', 'HABITUAL', 'PLACIDITY'] +4970-29095-0010-2064: ref=['THEE', 'STUDY', 'MEDICINE'] +4970-29095-0010-2064: hyp=['THE', 'STUDY', 'MEDICINE'] +4970-29095-0011-2065: ref=['DOES', 'THEE', 'THINK', 'THEE', 'COULD', 'STAND', 'IT', 'SIX', 'MONTHS'] +4970-29095-0011-2065: hyp=['DOES', 'THEE', 'THINK', 'THEE', 'COULD', 'STAND', 'AT', 'SIX', 'MONTHS'] +4970-29095-0012-2066: ref=['AND', 'BESIDES', 'SUPPOSE', 'THEE', 'DOES', 'LEARN', 'MEDICINE'] +4970-29095-0012-2066: hyp=['AND', 'BESIDES', 'SUPPOSE', 'THEE', 'DOES', 'LEARN', 'MEDICINE'] +4970-29095-0013-2067: ref=['I', 'WILL', 'PRACTICE', 'IT'] +4970-29095-0013-2067: hyp=['I', 'WILL', 'PRACTISE', 'IT'] +4970-29095-0014-2068: ref=['WHERE', 'THEE', 'AND', 'THY', 'FAMILY', 'ARE', 'KNOWN'] +4970-29095-0014-2068: hyp=["WHERE'S", 'THEE', 'AND', 'THY', 'FAMILY', 'ARE', 'KNOWN'] +4970-29095-0015-2069: ref=['IF', 'I', 'CAN', 'GET', 'PATIENTS'] +4970-29095-0015-2069: hyp=['IF', 'I', 'CAN', 'GET', 'PATIENCE'] +4970-29095-0016-2070: ref=['RUTH', 'SAT', 'QUITE', 'STILL', 'FOR', 'A', 'TIME', 'WITH', 'FACE', 'INTENT', 'AND', 'FLUSHED', 'IT', 'WAS', 'OUT', 'NOW'] +4970-29095-0016-2070: hyp=['RUTH', 'SAT', 'QUITE', 'STILL', 'FOR', 'A', 'TIME', 'WITH', 'FACE', 'INTENT', 'AND', 'FLUSHED', 'IT', 'WAS', 'OUT', 'NOW'] +4970-29095-0017-2071: ref=['THE', 'SIGHT', 'SEERS', 'RETURNED', 'IN', 'HIGH', 'SPIRITS', 'FROM', 'THE', 'CITY'] +4970-29095-0017-2071: hyp=['THE', 'SIGHTSEERS', 'RETURNED', 'IN', 'HIGH', 'SPIRITS', 'FROM', 'THE', 'CITY'] +4970-29095-0018-2072: ref=['RUTH', 'ASKED', 'THE', 'ENTHUSIASTS', 'IF', 'THEY', 'WOULD', 'LIKE', 'TO', 'LIVE', 'IN', 'SUCH', 'A', 'SOUNDING', 'MAUSOLEUM', 'WITH', 'ITS', 'GREAT', 'HALLS', 'AND', 'ECHOING', 'ROOMS', 'AND', 'NO', 'COMFORTABLE', 'PLACE', 'IN', 'IT', 'FOR', 'THE', 'ACCOMMODATION', 'OF', 'ANY', 'BODY'] +4970-29095-0018-2072: hyp=['RUTH', 'ASKED', 'THE', 'ENTHUSIASTS', 'IF', 'THEY', 'WOULD', 'LIKE', 'TO', 'LIVE', 'IN', 'SUCH', 'A', 'SOUNDING', 'MUZOLEUM', 'WITH', 'ITS', 'GREAT', 'HALLS', 'AND', 'ECHOING', 'ROOMS', 'AND', 'NO', 'COMFORTABLE', 'PLACE', 'IN', 'IT', 'FOR', 'THE', 'ACCOMMODATION', 'OF', 'ANY', 'BODY'] +4970-29095-0019-2073: ref=['AND', 'THEN', 'THERE', 'WAS', 'BROAD', 'STREET'] +4970-29095-0019-2073: hyp=['AND', 'THEN', 'THERE', 'WAS', 'BROAD', 'STREET'] +4970-29095-0020-2074: ref=['THERE', 'CERTAINLY', 'WAS', 'NO', 'END', 'TO', 'IT', 'AND', 'EVEN', 'RUTH', 'WAS', 'PHILADELPHIAN', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'A', 'STREET', 'OUGHT', 'NOT', 'TO', 'HAVE', 'ANY', 'END', 'OR', 'ARCHITECTURAL', 'POINT', 'UPON', 'WHICH', 'THE', 'WEARY', 'EYE', 'COULD', 'REST'] +4970-29095-0020-2074: hyp=['THERE', 'IS', 'CERTAINLY', 'WAS', 'NO', 'END', 'TO', 'IT', 'AND', 'EVEN', 'RUTH', 'WAS', 'PHILADELPHIAN', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'A', 'STREET', 'OUGHT', 'NOT', 'TO', 'HAVE', 'ANY', 'END', 'OR', 'ARCHITECTURAL', 'BLINT', 'UPON', 'WHICH', 'THE', 'WEARY', 'EYE', 'COULD', 'REST'] +4970-29095-0021-2075: ref=['BUT', 'NEITHER', 'SAINT', 'GIRARD', 'NOR', 'BROAD', 'STREET', 'NEITHER', 'WONDERS', 'OF', 'THE', 'MINT', 'NOR', 'THE', 'GLORIES', 'OF', 'THE', 'HALL', 'WHERE', 'THE', 'GHOSTS', 'OF', 'OUR', 'FATHERS', 'SIT', 'ALWAYS', 'SIGNING', 'THE', 'DECLARATION', 'IMPRESSED', 'THE', 'VISITORS', 'SO', 'MUCH', 'AS', 'THE', 'SPLENDORS', 'OF', 'THE', 'CHESTNUT', 'STREET', 'WINDOWS', 'AND', 'THE', 'BARGAINS', 'ON', 'EIGHTH', 'STREET'] +4970-29095-0021-2075: hyp=['BUT', 'NEITHER', 'SAINT', 'GERARD', 'NOR', 'BROAD', 'STREET', 'NEITHER', 'WONDERS', 'OF', 'THE', 'MENT', 'NOR', 'THE', 'GLORIES', 'OF', 'THE', 'HALL', 'WHERE', 'THE', 'GHOSTS', 'OF', 'OUR', 'FATHERS', 'SIT', 'ALWAYS', 'SIGNING', 'THE', 'DECLARATION', 'IMPRESS', 'THE', 'VISITORS', 'SO', 'MUCH', 'AS', 'THE', 'SPLENDORS', 'OF', 'THE', 'CHESTNUT', 'STREET', 'WINDOWS', 'AND', 'THE', 'BARGAINS', 'ON', 'EIGHTH', 'STREET'] +4970-29095-0022-2076: ref=['IS', 'THEE', 'GOING', 'TO', 'THE', 'YEARLY', 'MEETING', 'RUTH', 'ASKED', 'ONE', 'OF', 'THE', 'GIRLS'] +4970-29095-0022-2076: hyp=['IS', 'THEE', 'GOING', 'TO', 'THE', 'YEARLY', 'MEETING', 'RUTH', 'ASKED', 'ONE', 'OF', 'THE', 'GIRLS'] +4970-29095-0023-2077: ref=['I', 'HAVE', 'NOTHING', 'TO', 'WEAR', 'REPLIED', 'THAT', 'DEMURE', 'PERSON'] +4970-29095-0023-2077: hyp=['I', 'HAVE', 'NOTHING', 'TO', 'WEAR', 'REPLIED', 'THAT', 'DEMURE', 'PERSON'] +4970-29095-0024-2078: ref=['IT', 'HAS', 'OCCUPIED', 'MOTHER', 'A', 'LONG', 'TIME', 'TO', 'FIND', 'AT', 'THE', 'SHOPS', 'THE', 'EXACT', 'SHADE', 'FOR', 'HER', 'NEW', 'BONNET'] +4970-29095-0024-2078: hyp=['IT', 'HAS', 'OCCUPIED', 'MOTHER', 'A', 'LONG', 'TIME', 'TO', 'FIND', 'THE', 'SHOPS', 'THE', 'EXACT', 'SHADE', 'FOR', 'HER', 'NEW', 'BONNET'] +4970-29095-0025-2079: ref=['AND', 'THEE', "WON'T", 'GO', 'WHY', 'SHOULD', 'I'] +4970-29095-0025-2079: hyp=['AND', 'THEE', "WON'T", 'GO', 'WHY', 'SHOULD', 'I'] +4970-29095-0026-2080: ref=['IF', 'I', 'GO', 'TO', 'MEETING', 'AT', 'ALL', 'I', 'LIKE', 'BEST', 'TO', 'SIT', 'IN', 'THE', 'QUIET', 'OLD', 'HOUSE', 'IN', 'GERMANTOWN', 'WHERE', 'THE', 'WINDOWS', 'ARE', 'ALL', 'OPEN', 'AND', 'I', 'CAN', 'SEE', 'THE', 'TREES', 'AND', 'HEAR', 'THE', 'STIR', 'OF', 'THE', 'LEAVES'] +4970-29095-0026-2080: hyp=['IF', 'I', 'GO', 'TO', 'MEETING', 'AT', 'ALL', 'I', 'LIKE', 'BEST', 'TO', 'SIT', 'IN', 'THE', 'QUIET', 'OLD', 'HOUSE', 'IN', 'GERMANTOWN', 'WHERE', 'THE', 'WINDOWS', 'ARE', 'ALL', 'OPEN', 'AND', 'I', 'CAN', 'SEE', 'THE', 'TREES', 'AND', 'HERE', 'THE', 'STIR', 'OF', 'THE', 'LEAVES'] +4970-29095-0027-2081: ref=["IT'S", 'SUCH', 'A', 'CRUSH', 'AT', 'THE', 'YEARLY', 'MEETING', 'AT', 'ARCH', 'STREET', 'AND', 'THEN', "THERE'S", 'THE', 'ROW', 'OF', 'SLEEK', 'LOOKING', 'YOUNG', 'MEN', 'WHO', 'LINE', 'THE', 'CURBSTONE', 'AND', 'STARE', 'AT', 'US', 'AS', 'WE', 'COME', 'OUT'] +4970-29095-0027-2081: hyp=["IT'S", 'SUCH', 'A', 'CRUSH', 'AT', 'THE', 'YEARLY', 'MEETING', 'AT', 'ARCH', 'STREET', 'AND', 'THEN', "THERE'S", 'THE', 'ROW', 'OF', 'SLEEK', 'LOOKING', 'YOUNG', 'MEN', 'WHO', 'LIE', 'IN', 'THE', 'CURBSTONE', 'AND', 'STARE', 'AT', 'US', 'AS', 'WE', 'COME', 'OUT'] +4970-29095-0028-2082: ref=['HE', "DOESN'T", 'SAY', 'BUT', "IT'S", 'ON', 'THE', 'FRONTIER', 'AND', 'ON', 'THE', 'MAP', 'EVERYTHING', 'BEYOND', 'IT', 'IS', 'MARKED', 'INDIANS', 'AND', 'DESERT', 'AND', 'LOOKS', 'AS', 'DESOLATE', 'AS', 'A', 'WEDNESDAY', 'MEETING', 'HUMPH', 'IT', 'WAS', 'TIME', 'FOR', 'HIM', 'TO', 'DO', 'SOMETHING'] +4970-29095-0028-2082: hyp=['HE', "DOESN'T", 'SAY', 'BUT', "IT'S", 'ON', 'THE', 'FRONTIER', 'AND', 'ON', 'THE', 'MAP', 'EVERYTHING', 'BEYOND', 'IT', 'IS', 'MARKED', 'INDIANS', 'AND', 'DESERT', 'AND', 'LOOKS', 'AS', 'DESOLATE', 'AS', 'A', 'WINDSAY', 'MEETING', 'IT', 'WAS', 'TIME', 'FOR', 'HIM', 'TO', 'DO', 'SOMETHING'] +4970-29095-0029-2083: ref=['IS', 'HE', 'GOING', 'TO', 'START', 'A', 'DAILY', 'NEWSPAPER', 'AMONG', 'THE', 'KICK', 'A', 'POOS'] +4970-29095-0029-2083: hyp=['IS', 'HE', 'GOING', 'TO', 'START', 'A', 'DAILY', 'NEWSPAPER', 'AMONG', 'THE', 'KICKAPOOS'] +4970-29095-0030-2084: ref=['FATHER', "THEE'S", 'UNJUST', 'TO', 'PHILIP', "HE'S", 'GOING', 'INTO', 'BUSINESS'] +4970-29095-0030-2084: hyp=['FATHER', 'THESE', 'UNJUSTIFILL', 'UP', "HE'S", 'GOING', 'INTO', 'BUSINESS'] +4970-29095-0031-2085: ref=['HE', "DOESN'T", 'SAY', 'EXACTLY', 'WHAT', 'IT', 'IS', 'SAID', 'RUTH', 'A', 'LITTLE', 'DUBIOUSLY', 'BUT', "IT'S", 'SOMETHING', 'ABOUT', 'LAND', 'AND', 'RAILROADS', 'AND', 'THEE', 'KNOWS', 'FATHER', 'THAT', 'FORTUNES', 'ARE', 'MADE', 'NOBODY', 'KNOWS', 'EXACTLY', 'HOW', 'IN', 'A', 'NEW', 'COUNTRY'] +4970-29095-0031-2085: hyp=['HE', "DOESN'T", 'SAY', 'EXACTLY', 'WHAT', 'IT', 'IS', 'SAID', 'RUTH', 'A', 'LITTLE', 'DUBIOUSLY', 'BUT', "IT'S", 'SOMETHING', 'ABOUT', 'LAND', 'AND', 'RAILROADS', 'AND', 'THE', 'NOSE', 'FATHER', 'THAT', 'FORTUNES', 'ARE', 'MADE', 'NOBODY', 'KNOWS', 'EXACTLY', 'HOW', 'IN', 'A', 'NEW', 'COUNTRY'] +4970-29095-0032-2086: ref=['BUT', 'PHILIP', 'IS', 'HONEST', 'AND', 'HE', 'HAS', 'TALENT', 'ENOUGH', 'IF', 'HE', 'WILL', 'STOP', 'SCRIBBLING', 'TO', 'MAKE', 'HIS', 'WAY'] +4970-29095-0032-2086: hyp=['BUT', 'PHILIP', 'IS', 'HONEST', 'AND', 'HE', 'HAS', 'TALENT', 'ENOUGH', 'IF', 'HE', 'WILL', 'STOP', 'SCRIBBLING', 'TO', 'MAKE', 'HIS', 'WAY'] +4970-29095-0033-2087: ref=['WHAT', 'A', 'BOX', 'WOMEN', 'ARE', 'PUT', 'INTO', 'MEASURED', 'FOR', 'IT', 'AND', 'PUT', 'IN', 'YOUNG', 'IF', 'WE', 'GO', 'ANYWHERE', "IT'S", 'IN', 'A', 'BOX', 'VEILED', 'AND', 'PINIONED', 'AND', 'SHUT', 'IN', 'BY', 'DISABILITIES'] +4970-29095-0033-2087: hyp=['WHAT', 'A', 'BOXWOMEN', 'ARE', 'PUT', 'INTO', 'MEASURED', 'FOR', 'IT', 'AND', 'PUT', 'IN', 'YOUNG', 'IF', 'WE', 'GO', 'ANYWHERE', "IT'S", 'IN', 'A', 'BOX', 'VEILED', 'AND', 'PINIONED', 'AND', 'SHUT', 'IN', 'BY', 'DISABILITIES'] +4970-29095-0034-2088: ref=['WHY', 'SHOULD', 'I', 'RUST', 'AND', 'BE', 'STUPID', 'AND', 'SIT', 'IN', 'INACTION', 'BECAUSE', 'I', 'AM', 'A', 'GIRL'] +4970-29095-0034-2088: hyp=['WHY', 'SHOULD', 'I', 'RUST', 'AND', 'BE', 'STUPID', 'AND', 'SIT', 'IN', 'AN', 'ACTION', 'BECAUSE', 'I', 'AM', 'A', 'GIRL'] +4970-29095-0035-2089: ref=['AND', 'IF', 'I', 'HAD', 'A', 'FORTUNE', 'WOULD', 'THEE', 'WANT', 'ME', 'TO', 'LEAD', 'A', 'USELESS', 'LIFE'] +4970-29095-0035-2089: hyp=['AND', 'IF', 'I', 'HAD', 'A', 'FORTUNE', 'WOULD', 'THEE', 'WANT', 'ME', 'TO', 'LEAD', 'A', 'USELESS', 'LIFE'] +4970-29095-0036-2090: ref=['HAS', 'THEE', 'CONSULTED', 'THY', 'MOTHER', 'ABOUT', 'A', 'CAREER', 'I', 'SUPPOSE', 'IT', 'IS', 'A', 'CAREER', 'THEE', 'WANTS'] +4970-29095-0036-2090: hyp=['HAS', 'THE', 'CONSULTED', 'THY', 'MOTHER', 'ABOUT', 'A', 'CAREER', 'I', 'SUPPOSE', 'IT', 'IS', 'A', 'CAREER', 'OF', 'THEE', 'WANTS'] +4970-29095-0037-2091: ref=['BUT', 'THAT', 'WISE', 'AND', 'PLACID', 'WOMAN', 'UNDERSTOOD', 'THE', 'SWEET', 'REBEL', 'A', 'GREAT', 'DEAL', 'BETTER', 'THAN', 'RUTH', 'UNDERSTOOD', 'HERSELF'] +4970-29095-0037-2091: hyp=['BUT', 'THAT', 'WISE', 'AND', 'PLACID', 'WOMAN', 'UNDERSTOOD', 'THE', 'SWEET', 'REBEL', 'A', 'GREAT', 'DEAL', 'BETTER', 'THAN', 'RUTH', 'UNDERSTOOD', 'HERSELF'] +4970-29095-0038-2092: ref=['RUTH', 'WAS', 'GLAD', 'TO', 'HEAR', 'THAT', 'PHILIP', 'HAD', 'MADE', 'A', 'PUSH', 'INTO', 'THE', 'WORLD', 'AND', 'SHE', 'WAS', 'SURE', 'THAT', 'HIS', 'TALENT', 'AND', 'COURAGE', 'WOULD', 'MAKE', 'A', 'WAY', 'FOR', 'HIM'] +4970-29095-0038-2092: hyp=['RUTH', 'WAS', 'GLAD', 'TO', 'HEAR', 'THAT', 'PHILIP', 'HAD', 'MADE', 'A', 'PUSH', 'INTO', 'THE', 'WORLD', 'AND', 'SHE', 'WAS', 'SURE', 'THAT', 'HIS', 'TALENT', 'AND', 'COURAGE', 'WOULD', 'MAKE', 'AWAY', 'FOR', 'HIM'] +4992-23283-0000-2140: ref=['BUT', 'THE', 'MORE', 'FORGETFULNESS', 'HAD', 'THEN', 'PREVAILED', 'THE', 'MORE', 'POWERFUL', 'WAS', 'THE', 'FORCE', 'OF', 'REMEMBRANCE', 'WHEN', 'SHE', 'AWOKE'] +4992-23283-0000-2140: hyp=['BUT', 'THE', 'MORE', 'FORGETFULNESS', 'HAD', 'THEN', 'PREVAILED', 'THE', 'MORE', 'POWERFUL', 'WAS', 'THE', 'FORCE', 'OF', 'REMEMBRANCE', 'WHEN', 'SHE', 'AWOKE'] +4992-23283-0001-2141: ref=['MISS', "MILNER'S", 'HEALTH', 'IS', 'NOT', 'GOOD'] +4992-23283-0001-2141: hyp=['MISS', "MILNER'S", 'HEALTH', 'IS', 'NOT', 'GOOD'] +4992-23283-0002-2142: ref=['SAID', 'MISSUS', 'HORTON', 'A', 'FEW', 'MINUTES', 'AFTER'] +4992-23283-0002-2142: hyp=['SAID', 'MISSUS', 'WHARTON', 'A', 'FEW', 'MINUTES', 'AFTER'] +4992-23283-0003-2143: ref=['SO', 'THERE', 'IS', 'TO', 'ME', 'ADDED', 'SANDFORD', 'WITH', 'A', 'SARCASTIC', 'SNEER'] +4992-23283-0003-2143: hyp=['SO', 'THERE', 'IS', 'TO', 'ME', 'ADDED', 'SANDFORD', 'WITH', 'A', 'SARCASTIC', 'SNEER'] +4992-23283-0004-2144: ref=['AND', 'YET', 'YOU', 'MUST', 'OWN', 'HER', 'BEHAVIOUR', 'HAS', 'WARRANTED', 'THEM', 'HAS', 'IT', 'NOT', 'BEEN', 'IN', 'THIS', 'PARTICULAR', 'INCOHERENT', 'AND', 'UNACCOUNTABLE'] +4992-23283-0004-2144: hyp=['AND', 'YET', 'YOU', 'MUST', 'OWN', 'HER', 'BEHAVIOR', 'HAS', 'WARRANTED', 'THEM', 'HAS', 'IT', 'NOT', 'BEEN', 'IN', 'THIS', 'PARTICULAR', 'INCOHERENT', 'AND', 'UNACCOUNTABLE'] +4992-23283-0005-2145: ref=['NOT', 'THAT', 'I', 'KNOW', 'OF', 'NOT', 'ONE', 'MORE', 'THAT', 'I', 'KNOW', 'OF', 'HE', 'REPLIED', 'WITH', 'ASTONISHMENT', 'AT', 'WHAT', 'SHE', 'HAD', 'INSINUATED', 'AND', 'YET', 'WITH', 'A', 'PERFECT', 'ASSURANCE', 'THAT', 'SHE', 'WAS', 'IN', 'THE', 'WRONG'] +4992-23283-0005-2145: hyp=['NOT', 'THAT', 'I', 'KNOW', 'OF', 'NOT', 'ONE', 'MORE', 'THAT', 'I', 'KNOW', 'OF', 'HE', 'REPLIED', 'WITH', 'ASTONISHMENT', 'AT', 'WHAT', 'SHE', 'HAD', 'INSINUATED', 'AND', 'YET', 'WITH', 'A', 'PERFECT', 'ASSURANCE', 'THAT', 'SHE', 'WAS', 'IN', 'THE', 'WRONG'] +4992-23283-0006-2146: ref=['PERHAPS', 'I', 'AM', 'MISTAKEN', 'ANSWERED', 'SHE'] +4992-23283-0006-2146: hyp=['PERHAPS', 'I', 'AM', 'MISTAKEN', 'ANSWERED', 'SHE'] +4992-23283-0007-2147: ref=['TO', 'ASK', 'ANY', 'MORE', 'QUESTIONS', 'OF', 'YOU', 'I', 'BELIEVE', 'WOULD', 'BE', 'UNFAIR'] +4992-23283-0007-2147: hyp=['TO', 'ASK', 'ANY', 'MORE', 'QUESTIONS', 'OF', 'YOU', 'I', 'BELIEVE', 'WOULD', 'BE', 'UNFAIR'] +4992-23283-0008-2148: ref=['HE', 'SEEMED', 'TO', 'WAIT', 'FOR', 'HER', 'REPLY', 'BUT', 'AS', 'SHE', 'MADE', 'NONE', 'HE', 'PROCEEDED'] +4992-23283-0008-2148: hyp=['HE', 'SEEMED', 'TO', 'WAIT', 'FOR', 'HER', 'REPLY', 'BUT', 'AS', 'SHE', 'MADE', 'NONE', 'HE', 'PROCEEDED'] +4992-23283-0009-2149: ref=['OH', 'MY', 'LORD', 'CRIED', 'MISS', 'WOODLEY', 'WITH', 'A', 'MOST', 'FORCIBLE', 'ACCENT', 'YOU', 'ARE', 'THE', 'LAST', 'PERSON', 'ON', 'EARTH', 'SHE', 'WOULD', 'PARDON', 'ME', 'FOR', 'ENTRUSTING'] +4992-23283-0009-2149: hyp=['O', 'MY', 'LORD', 'CRIED', 'MISS', 'WOODLEY', 'WITH', 'A', 'MOST', 'FORCIBLE', 'ACCENT', 'YOU', 'ARE', 'THE', 'LAST', 'PERSON', 'ON', 'EARTH', 'SHE', 'WOULD', 'PARDON', 'ME', 'FOR', 'ENTRUSTING'] +4992-23283-0010-2150: ref=['BUT', 'IN', 'SUCH', 'A', 'CASE', 'MISS', "MILNER'S", 'ELECTION', 'OF', 'A', 'HUSBAND', 'SHALL', 'NOT', 'DIRECT', 'MINE'] +4992-23283-0010-2150: hyp=['BUT', 'IN', 'SUCH', 'A', 'CASE', 'MISS', "MILNER'S", 'ELECTION', 'OF', 'A', 'HUSBAND', 'SHALL', 'NOT', 'DIRECT', 'MINE'] +4992-23283-0011-2151: ref=['IF', 'SHE', 'DOES', 'NOT', 'KNOW', 'HOW', 'TO', 'ESTIMATE', 'HER', 'OWN', 'VALUE', 'I', 'DO'] +4992-23283-0011-2151: hyp=['IF', 'SHE', 'DOES', 'NOT', 'KNOW', 'HOW', 'TO', 'ESTIMATE', 'HER', 'OWN', 'VALUE', 'I', 'DO'] +4992-23283-0012-2152: ref=['INDEPENDENT', 'OF', 'HER', 'FORTUNE', 'SHE', 'HAS', 'BEAUTY', 'TO', 'CAPTIVATE', 'THE', 'HEART', 'OF', 'ANY', 'MAN', 'AND', 'WITH', 'ALL', 'HER', 'FOLLIES', 'SHE', 'HAS', 'A', 'FRANKNESS', 'IN', 'HER', 'MANNER', 'AN', 'UNAFFECTED', 'WISDOM', 'IN', 'HER', 'THOUGHTS', 'A', 'VIVACITY', 'IN', 'HER', 'CONVERSATION', 'AND', 'WITHAL', 'A', 'SOFTNESS', 'IN', 'HER', 'DEMEANOUR', 'THAT', 'MIGHT', 'ALONE', 'ENGAGE', 'THE', 'AFFECTIONS', 'OF', 'A', 'MAN', 'OF', 'THE', 'NICEST', 'SENTIMENTS', 'AND', 'THE', 'STRONGEST', 'UNDERSTANDING'] +4992-23283-0012-2152: hyp=['INDEPENDENT', 'OF', 'HER', 'FORTUNE', 'SHE', 'HAS', 'BEAUTY', 'TO', 'CAPTIVATE', 'THE', 'HEART', 'OF', 'ANY', 'MAN', 'AND', 'WITH', 'ALL', 'HER', 'FOLLIES', 'SHE', 'HAS', 'A', 'FRANKNESS', 'IN', 'HER', 'MANNER', 'AN', 'UNAFFECTED', 'WISDOM', 'IN', 'HER', 'THOUGHTS', 'OF', 'A', 'VIVACITY', 'IN', 'HER', 'CONVERSATION', 'AND', 'WITHAL', 'A', 'SOFTNESS', 'IN', 'HER', 'DEMEANOUR', 'THAT', 'MIGHT', 'ALONE', 'ENGAGE', 'THE', 'AFFECTIONS', 'OF', 'A', 'MAN', 'OF', 'THE', 'NICEST', 'SENTIMENTS', 'AND', 'THE', 'STRONGEST', 'UNDERSTANDING'] +4992-23283-0013-2153: ref=['MY', 'LORD', 'MISS', "MILNER'S", 'TASTE', 'IS', 'NOT', 'A', 'DEPRAVED', 'ONE', 'IT', 'IS', 'BUT', 'TOO', 'REFINED'] +4992-23283-0013-2153: hyp=['MY', 'LORD', 'MISS', "MILNER'S", 'TASTE', 'IS', 'NOT', 'A', 'DEPRAVED', 'ONE', 'IT', 'IS', 'BUT', 'TOO', 'REFINED'] +4992-23283-0014-2154: ref=['WHAT', 'CAN', 'YOU', 'MEAN', 'BY', 'THAT', 'MISS', 'WOODLEY', 'YOU', 'TALK', 'MYSTERIOUSLY'] +4992-23283-0014-2154: hyp=['WHAT', 'CAN', 'YOU', 'MEAN', 'BY', 'THAT', 'MISS', 'WOODLEY', 'YOU', 'TALK', 'MYSTERIOUSLY'] +4992-23283-0015-2155: ref=['IS', 'SHE', 'NOT', 'AFRAID', 'THAT', 'I', 'WILL', 'THWART', 'HER', 'INCLINATIONS'] +4992-23283-0015-2155: hyp=['IS', 'SHE', 'NOT', 'AFRAID', 'THAT', 'I', 'WILL', 'THWART', 'HER', 'INCLINATIONS'] +4992-23283-0016-2156: ref=['AGAIN', 'HE', 'SEARCHED', 'HIS', 'OWN', 'THOUGHTS', 'NOR', 'INEFFECTUALLY', 'AS', 'BEFORE'] +4992-23283-0016-2156: hyp=['AGAIN', 'HE', 'SEARCHED', 'HIS', 'OWN', 'THOUGHTS', 'NOR', 'IN', 'EFFECTUALLY', 'AS', 'BEFORE'] +4992-23283-0017-2157: ref=['MISS', 'WOODLEY', 'WAS', 'TOO', 'LITTLE', 'VERSED', 'IN', 'THE', 'SUBJECT', 'TO', 'KNOW', 'THIS', 'WOULD', 'HAVE', 'BEEN', 'NOT', 'TO', 'LOVE', 'AT', 'ALL', 'AT', 'LEAST', 'NOT', 'TO', 'THE', 'EXTENT', 'OF', 'BREAKING', 'THROUGH', 'ENGAGEMENTS', 'AND', 'ALL', 'THE', 'VARIOUS', 'OBSTACLES', 'THAT', 'STILL', 'MILITATED', 'AGAINST', 'THEIR', 'UNION'] +4992-23283-0017-2157: hyp=['MISS', 'WOODLEY', 'WAS', 'TOO', 'LITTLE', 'VERSED', 'IN', 'THE', 'SUBJECT', 'TO', 'KNOW', 'THIS', 'WOULD', 'HAVE', 'BEEN', 'NOT', 'TO', 'LOVE', 'AT', 'ALL', 'AT', 'LEAST', 'NOT', 'TO', 'THE', 'EXTENT', 'OF', 'BREAKING', 'THROUGH', 'ENGAGEMENTS', 'AND', 'ALL', 'THE', 'VARIOUS', 'OBSTACLES', 'THAT', 'STILL', 'MITIGATED', 'AGAINST', 'THEIR', 'UNION'] +4992-23283-0018-2158: ref=['TO', 'RELIEVE', 'HER', 'FROM', 'BOTH', 'HE', 'LAID', 'HIS', 'HAND', 'WITH', 'FORCE', 'UPON', 'HIS', 'HEART', 'AND', 'SAID', 'DO', 'YOU', 'BELIEVE', 'ME'] +4992-23283-0018-2158: hyp=['TO', 'RELIEVE', 'HER', 'FROM', 'BOTH', 'HE', 'LAID', 'HIS', 'HAND', 'WITH', 'FORCE', 'UPON', 'HIS', 'HEART', 'AND', 'SAID', 'DO', 'YOU', 'BELIEVE', 'ME'] +4992-23283-0019-2159: ref=['I', 'WILL', 'MAKE', 'NO', 'UNJUST', 'USE', 'OF', 'WHAT', 'I', 'KNOW', 'HE', 'REPLIED', 'WITH', 'FIRMNESS', 'I', 'BELIEVE', 'YOU', 'MY', 'LORD'] +4992-23283-0019-2159: hyp=['I', 'WILL', 'MAKE', 'NO', 'UNJUST', 'USE', 'OF', 'WHAT', 'I', 'KNOW', 'HE', 'REPLIED', 'WITH', 'FIRMNESS', 'I', 'BELIEVE', 'YOU', 'MY', 'LORD'] +4992-23283-0020-2160: ref=['I', 'HAVE', 'NEVER', 'YET', 'HOWEVER', 'BEEN', 'VANQUISHED', 'BY', 'THEM', 'AND', 'EVEN', 'UPON', 'THIS', 'OCCASION', 'MY', 'REASON', 'SHALL', 'COMBAT', 'THEM', 'TO', 'THE', 'LAST', 'AND', 'MY', 'REASON', 'SHALL', 'FAIL', 'ME', 'BEFORE', 'I', 'DO', 'WRONG'] +4992-23283-0020-2160: hyp=['I', 'HAVE', 'NEVER', 'YET', 'HOWEVER', 'BEEN', 'VANQUISHED', 'BY', 'THEM', 'AND', 'EVEN', 'UPON', 'THIS', 'OCCASION', 'MY', 'REASON', 'SHALL', 'COMBAT', 'THEM', 'TO', 'THE', 'LAST', 'AND', 'MY', 'REASON', 'SHALL', 'FAIL', 'ME', 'BEFORE', 'I', 'DO', 'WRONG'] +4992-41797-0000-2117: ref=['YES', 'DEAD', 'THESE', 'FOUR', 'YEARS', 'AN', 'A', 'GOOD', 'JOB', 'FOR', 'HER', 'TOO'] +4992-41797-0000-2117: hyp=['YES', 'DEAD', 'THESE', 'FOUR', 'YEARS', 'AND', 'A', 'GOOD', 'JOB', 'FOR', 'HER', 'TOO'] +4992-41797-0001-2118: ref=['WELL', 'AS', 'I', 'SAY', "IT'S", 'AN', 'AWFUL', 'QUEER', 'WORLD', 'THEY', 'CLAP', 'ALL', 'THE', 'BURGLARS', 'INTO', 'JAIL', 'AND', 'THE', 'MURDERERS', 'AND', 'THE', 'WIFE', 'BEATERS', "I'VE", 'ALLERS', 'THOUGHT', 'A', 'GENTLE', 'REPROOF', 'WOULD', 'BE', 'ENOUGH', 'PUNISHMENT', 'FOR', 'A', 'WIFE', 'BEATER', 'CAUSE', 'HE', 'PROBABLY', 'HAS', 'A', 'LOT', 'O', 'PROVOCATION', 'THAT', 'NOBODY', 'KNOWS', 'AND', 'THE', 'FIREBUGS', "CAN'T", 'THINK', 'O', 'THE', 'RIGHT', 'NAME', 'SOMETHING', 'LIKE', 'CENDENARIES', 'AN', 'THE', 'BREAKERS', 'O', 'THE', 'PEACE', 'AN', 'WHAT', 'NOT', 'AN', 'YET', 'THE', 'LAW', 'HAS', 'NOTHIN', 'TO', 'SAY', 'TO', 'A', 'MAN', 'LIKE', 'HEN', 'LORD'] +4992-41797-0001-2118: hyp=['WELL', 'AS', 'I', 'SAY', "IT'S", 'AN', 'AWFUL', 'QUEER', 'WORLD', 'THEY', 'CLAP', 'ALL', 'THE', 'BURGLARS', 'AND', 'DOWN', 'THE', 'MURDERERS', 'AND', 'THE', 'WHITE', 'BEATERS', 'I', 'ALLERS', 'THOUGHT', 'A', 'GENTLE', 'REPROOF', 'WOULD', 'BE', 'ENOUGH', 'PUNISHMENT', 'FOR', 'A', 'WIFE', 'BEATER', 'CAUSE', 'HE', 'PROBABLY', 'HAS', 'A', 'LOT', 'OF', 'PROVOCATION', 'THAT', 'NOBODY', 'KNOWS', 'AND', 'THE', 'FIRE', 'BUGS', "CAN'T", 'THINK', 'OF', 'THE', 'RIGHT', 'NAME', 'SOMETHIN', 'LIKE', 'SENDIARIES', 'AND', 'THE', 'BREAKERS', 'OF', 'THE', 'PEACE', 'AND', 'WHAT', 'NOT', 'AND', 'YET', 'THE', 'LAW', 'HAS', 'NOTHING', 'TO', 'SAY', 'TO', 'A', 'MAN', 'LIKE', 'HANDLED'] +4992-41797-0002-2119: ref=['GRANDFATHER', 'WAS', 'ALEXANDER', 'CAREY', 'L', 'L', 'D', 'DOCTOR', 'OF', 'LAWS', 'THAT', 'IS'] +4992-41797-0002-2119: hyp=['GRANDFATHER', 'WAS', 'ALEXANDER', 'CAREY', 'L', 'D', 'DOCTOR', 'OF', 'LAWS', 'THAT', 'IS'] +4992-41797-0003-2120: ref=['MISTER', 'POPHAM', 'LAID', 'DOWN', 'HIS', 'BRUSH'] +4992-41797-0003-2120: hyp=['MISTER', 'POPHAM', 'LAID', 'DOWN', 'HIS', 'BRUSH'] +4992-41797-0004-2121: ref=['I', 'SWAN', 'TO', 'MAN', 'HE', 'EJACULATED', 'IF', 'YOU', "DON'T", 'WORK', 'HARD', 'YOU', "CAN'T", 'KEEP', 'UP', 'WITH', 'THE', 'TIMES', 'DOCTOR', 'OF', 'LAWS'] +4992-41797-0004-2121: hyp=['I', 'SWAY', 'INTO', 'MEN', 'HE', 'EJACULATED', 'IF', 'YOU', "DON'T", 'WORK', 'HARD', 'YOU', "CAN'T", 'KEEP', 'UP', 'WITH', 'THE', 'TIMES', 'DOCTOR', 'OF', 'LAWS'] +4992-41797-0005-2122: ref=['DONE', 'HE', "AIN'T", 'DONE', 'A', 'THING', "HE'D", 'OUGHTER', 'SENCE', 'HE', 'WAS', 'BORN'] +4992-41797-0005-2122: hyp=['DONE', 'HE', "HAIN'T", 'DONE', 'A', 'THING', 'HE', 'ORDERED', 'SINCE', 'HE', 'WAS', 'BORN'] +4992-41797-0006-2123: ref=['HE', 'KEEPS', 'THE', 'THOU', 'SHALT', 'NOT', 'COMMANDMENTS', 'FIRST', 'RATE', 'HEN', 'LORD', 'DOES'] +4992-41797-0006-2123: hyp=['HE', 'KEEPS', 'THE', 'THOU', 'SHALT', 'NOT', 'COMMANDMENTS', 'FIRST', 'RATE', 'HENLOORD', 'DOES'] +4992-41797-0007-2124: ref=['HE', 'GIVE', 'UP', 'HIS', 'POSITION', 'AND', 'SHUT', 'THE', 'FAMILY', 'UP', 'IN', 'THAT', 'TOMB', 'OF', 'A', 'HOUSE', 'SO', 'T', 'HE', 'COULD', 'STUDY', 'HIS', 'BOOKS'] +4992-41797-0007-2124: hyp=['HE', 'GAVE', 'UP', 'HIS', 'POSITION', 'AND', 'SHUT', 'THE', 'FAMILY', 'UP', 'IN', 'THAT', 'TOMB', 'OF', 'A', 'HOUSE', 'SODIN', 'HE', "COULDN'T", 'STUDY', 'HIS', 'BOOKS'] +4992-41797-0008-2125: ref=['MISTER', 'POPHAM', 'EXAGGERATED', 'NOTHING', 'BUT', 'ON', 'THE', 'CONTRARY', 'LEFT', 'MUCH', 'UNSAID', 'IN', 'HIS', 'NARRATIVE', 'OF', 'THE', 'FAMILY', 'AT', 'THE', 'HOUSE', 'OF', 'LORDS'] +4992-41797-0008-2125: hyp=['MISTER', 'POPHAM', 'EXAGGERATED', 'NOTHING', 'BUT', 'ON', 'THE', 'CONTRARY', 'LEFT', 'MUCH', 'UNSAID', 'IN', 'HIS', 'NARRATIVE', 'OF', 'THE', 'FAMILY', 'AT', 'THE', 'HOUSE', 'OF', 'LORDS'] +4992-41797-0009-2126: ref=['HENRY', 'LORD', 'WITH', 'THE', 'DEGREE', 'OF', 'PH', 'D', 'TO', 'HIS', 'CREDIT', 'HAD', 'BEEN', 'PROFESSOR', 'OF', 'ZOOLOGY', 'AT', 'A', 'NEW', 'ENGLAND', 'COLLEGE', 'BUT', 'HAD', 'RESIGNED', 'HIS', 'POST', 'IN', 'ORDER', 'TO', 'WRITE', 'A', 'SERIES', 'OF', 'SCIENTIFIC', 'TEXT', 'BOOKS'] +4992-41797-0009-2126: hyp=['HENRY', 'LORD', 'WITH', 'A', 'DEGREE', 'OF', 'PH', 'TO', 'HIS', 'CREDIT', 'HAD', 'BEEN', 'PROFESSOR', 'OF', 'ZOOLOGY', 'AT', 'A', 'NEW', 'ENGLAND', 'COLLEGE', 'BUT', 'HAD', 'RESIGNED', 'HIS', 'POST', 'IN', 'ORDER', 'TO', 'WRITE', 'A', 'SERIES', 'OF', 'SCIENTIFIC', 'TEXT', 'BOOKS'] +4992-41797-0010-2127: ref=['ALWAYS', 'IRRITABLE', 'COLD', 'INDIFFERENT', 'HE', 'HAD', 'GROWN', 'RAPIDLY', 'MORE', 'SO', 'AS', 'YEARS', 'WENT', 'ON'] +4992-41797-0010-2127: hyp=['ALWAYS', 'IRRITABLE', 'COLD', 'INDIFFERENT', 'HE', 'HAD', 'GROWN', 'RAPIDLY', 'MORE', 'SO', 'AS', 'YEARS', 'WENT', 'ON'] +4992-41797-0011-2128: ref=['WHATEVER', 'APPEALED', 'TO', 'HER', 'SENSE', 'OF', 'BEAUTY', 'WAS', 'STRAIGHTWAY', 'TRANSFERRED', 'TO', 'PAPER', 'OR', 'CANVAS'] +4992-41797-0011-2128: hyp=['WHATEVER', 'APPEALED', 'TO', 'HER', 'SENSE', 'OF', 'BEAUTY', 'WAS', 'STRAIGHTWAY', 'TRANSFERRED', 'TO', 'PAPER', 'OR', 'GAMBUS'] +4992-41797-0012-2129: ref=['SHE', 'IS', 'WILD', 'TO', 'KNOW', 'HOW', 'TO', 'DO', 'THINGS'] +4992-41797-0012-2129: hyp=['SHE', 'IS', 'WILD', 'TO', 'KNOW', 'HOW', 'TO', 'DO', 'THINGS'] +4992-41797-0013-2130: ref=['SHE', 'MAKES', 'EFFORT', 'AFTER', 'EFFORT', 'TREMBLING', 'WITH', 'EAGERNESS', 'AND', 'WHEN', 'SHE', 'FAILS', 'TO', 'REPRODUCE', 'WHAT', 'SHE', 'SEES', 'SHE', 'WORKS', 'HERSELF', 'INTO', 'A', 'FRENZY', 'OF', 'GRIEF', 'AND', 'DISAPPOINTMENT'] +4992-41797-0013-2130: hyp=['SHE', 'MAKES', 'EFFORT', 'AFTER', 'EFFORT', 'TREMBLING', 'WITH', 'EAGERNESS', 'THAN', 'WHEN', 'SHE', 'FAILS', 'TO', 'REPRODUCE', 'WHAT', 'SHE', 'SEES', 'SHE', 'WORKS', 'HERSELF', 'INTO', 'A', 'FRENZY', 'OF', 'GRIEF', 'AND', 'DISAPPOINTMENT'] +4992-41797-0014-2131: ref=['WHEN', 'SHE', 'COULD', 'NOT', 'MAKE', 'A', 'RABBIT', 'OR', 'A', 'BIRD', 'LOOK', 'REAL', 'ON', 'PAPER', 'SHE', 'SEARCHED', 'IN', 'HER', "FATHER'S", 'BOOKS', 'FOR', 'PICTURES', 'OF', 'ITS', 'BONES'] +4992-41797-0014-2131: hyp=['WHEN', 'SHE', 'COULD', 'NOT', 'MAKE', 'A', 'RABBIT', 'OR', 'A', 'BIRD', 'LOOK', 'REAL', 'ON', 'PAPER', 'SHE', 'SEARCHED', 'IN', 'HER', "FATHER'S", 'BOOKS', 'FOR', 'PICTURES', 'OF', 'ITS', 'BONES'] +4992-41797-0015-2132: ref=['CYRIL', 'THERE', 'MUST', 'BE', 'SOME', 'BETTER', 'WAY', 'OF', 'DOING', 'I', 'JUST', 'DRAW', 'THE', 'OUTLINE', 'OF', 'AN', 'ANIMAL', 'AND', 'THEN', 'I', 'PUT', 'HAIRS', 'OR', 'FEATHERS', 'ON', 'IT', 'THEY', 'HAVE', 'NO', 'BODIES'] +4992-41797-0015-2132: hyp=['CYRIL', 'THERE', 'MUST', 'BE', 'SOME', 'BETTER', 'WAY', 'OF', 'DOING', 'I', 'JUST', 'DRAW', 'THE', 'OUTLINE', 'OF', 'AN', 'ANIMAL', 'AND', 'THEN', 'I', 'PUT', 'HAIRS', 'OR', 'FEATHERS', 'ON', 'IT', 'THEY', 'HAVE', 'NO', 'BODIES'] +4992-41797-0016-2133: ref=['THEY', "COULDN'T", 'RUN', 'NOR', 'MOVE', "THEY'RE", 'JUST', 'PASTEBOARD'] +4992-41797-0016-2133: hyp=['THEY', "COULDN'T", 'RUN', 'OR', 'MOVE', "THEY'RE", 'JUST', 'PASTEBOARD'] +4992-41797-0017-2134: ref=['HE', "WOULDN'T", 'SEARCH', 'SO', "DON'T", 'WORRY', 'REPLIED', 'CYRIL', 'QUIETLY', 'AND', 'THE', 'TWO', 'LOOKED', 'AT', 'EACH', 'OTHER', 'AND', 'KNEW', 'THAT', 'IT', 'WAS', 'SO'] +4992-41797-0017-2134: hyp=['HE', "WOULDN'T", 'SEARCH', 'SO', "DON'T", 'WORRY', 'REPLIED', 'CYRIL', 'QUIETLY', 'AND', 'THE', 'TWO', 'LOOKED', 'AT', 'EACH', 'OTHER', 'AND', 'KNEW', 'THAT', 'IT', 'WAS', 'SO'] +4992-41797-0018-2135: ref=['THERE', 'IN', 'THE', 'CEDAR', 'HOLLOW', 'THEN', 'LIVED', 'OLIVE', 'LORD', 'AN', 'ANGRY', 'RESENTFUL', 'LITTLE', 'CREATURE', 'WEIGHED', 'DOWN', 'BY', 'A', 'FIERCE', 'SENSE', 'OF', 'INJURY'] +4992-41797-0018-2135: hyp=['THERE', 'IN', 'THE', 'CEDAR', 'HOLLOW', 'THEN', 'LIVED', 'OLIVE', 'LORD', 'AN', 'ANGRY', 'RESENTFUL', 'LITTLE', 'CREATURE', 'WEIGHED', 'DOWN', 'BY', 'A', 'FIERCE', 'SENSE', 'OF', 'INJURY'] +4992-41797-0019-2136: ref=["OLIVE'S", 'MOURNFUL', 'BLACK', 'EYES', 'MET', "NANCY'S", 'SPARKLING', 'BROWN', 'ONES'] +4992-41797-0019-2136: hyp=['ALL', 'OF', 'THIS', 'MOURNFUL', 'BLACK', 'EYES', 'MET', "NANCY'S", 'SPARKLING', 'BROWN', 'ONES'] +4992-41797-0020-2137: ref=["NANCY'S", 'CURLY', 'CHESTNUT', 'CROP', 'SHONE', 'IN', 'THE', 'SUN', 'AND', "OLIVE'S", 'THICK', 'BLACK', 'PLAITS', 'LOOKED', 'BLACKER', 'BY', 'CONTRAST'] +4992-41797-0020-2137: hyp=["NANCY'S", 'CURLY', 'CHESTNUT', 'CROP', 'SHONE', 'IN', 'THE', 'SUN', 'AND', "OLIVE'S", 'THICK', 'BLACK', 'PLATES', 'LOOKED', 'BLACKER', 'BY', 'CONTRAST'] +4992-41797-0021-2138: ref=["SHE'S", 'WONDERFUL', 'MORE', 'WONDERFUL', 'THAN', 'ANYBODY', "WE'VE", 'EVER', 'SEEN', 'ANYWHERE', 'AND', 'SHE', 'DRAWS', 'BETTER', 'THAN', 'THE', 'TEACHER', 'IN', 'CHARLESTOWN'] +4992-41797-0021-2138: hyp=['SHE', 'IS', 'WONDERFUL', 'MORE', 'WONDERFUL', 'IN', 'ANYBODY', "WE'VE", 'EVER', 'SEEN', 'ANYWHERE', 'AND', 'SHE', 'DRAWS', 'BETTER', 'THAN', 'THE', 'TEACHER', 'IN', 'CHARLESTOWN'] +4992-41797-0022-2139: ref=["SHE'S", 'OLDER', 'THAN', 'I', 'AM', 'BUT', 'SO', 'TINY', 'AND', 'SAD', 'AND', 'SHY', 'THAT', 'SHE', 'SEEMS', 'LIKE', 'A', 'CHILD'] +4992-41797-0022-2139: hyp=["SHE'S", 'OLDER', 'THAN', 'I', 'AM', 'BUT', 'SO', 'TINY', 'AND', 'SAD', 'AND', 'SHY', 'THAT', 'SHE', 'SEEMS', 'LIKE', 'A', 'CHILD'] +4992-41806-0000-2161: ref=['NATTY', 'HARMON', 'TRIED', 'THE', 'KITCHEN', 'PUMP', 'SECRETLY', 'SEVERAL', 'TIMES', 'DURING', 'THE', 'EVENING', 'FOR', 'THE', 'WATER', 'HAD', 'TO', 'RUN', 'UP', 'HILL', 'ALL', 'THE', 'WAY', 'FROM', 'THE', 'WELL', 'TO', 'THE', 'KITCHEN', 'SINK', 'AND', 'HE', 'BELIEVED', 'THIS', 'TO', 'BE', 'A', 'CONTINUAL', 'MIRACLE', 'THAT', 'MIGHT', 'GIVE', 'OUT', 'AT', 'ANY', 'MOMENT'] +4992-41806-0000-2161: hyp=['NATTY', 'HARMON', 'TRIED', 'THE', 'KITCHEN', 'PUMP', 'SECRETLY', 'SEVERAL', 'TIMES', 'DURING', 'THE', 'EVENING', 'FOR', 'THE', 'WATER', 'HAD', 'TO', 'RUN', 'UP', 'HILL', 'ALL', 'THE', 'WAY', 'FROM', 'THE', 'WELL', 'TO', 'THE', 'KITCHEN', 'SINK', 'AND', 'HE', 'BELIEVED', 'THIS', 'TO', 'BE', 'CONTINUAL', 'MIRACLE', 'THAT', 'MIGHT', 'GIVE', 'OUT', 'AT', 'ANY', 'MOMENT'] +4992-41806-0001-2162: ref=['TO', 'NIGHT', 'THERE', 'WAS', 'NO', 'NEED', 'OF', 'EXTRA', 'HEAT', 'AND', 'THERE', 'WERE', 'GREAT', 'CEREMONIES', 'TO', 'BE', 'OBSERVED', 'IN', 'LIGHTING', 'THE', 'FIRES', 'ON', 'THE', 'HEARTHSTONES'] +4992-41806-0001-2162: hyp=['TO', 'NIGHT', 'THERE', 'WAS', 'NO', 'NEED', 'OF', 'EXTRA', 'HEAT', 'AND', 'THERE', 'WERE', 'GREAT', 'CEREMONIES', 'TO', 'BE', 'OBSERVED', 'IN', 'LIGHTING', 'THE', 'FIRES', 'ON', 'THE', 'HEARTHSTONES'] +4992-41806-0002-2163: ref=['THEY', 'BEGAN', 'WITH', 'THE', 'ONE', 'IN', 'THE', 'FAMILY', 'SITTING', 'ROOM', 'COLONEL', 'WHEELER', 'RALPH', 'THURSTON', 'MISTER', 'AND', 'MISSUS', 'BILL', 'HARMON', 'WITH', 'NATTY', 'AND', 'RUFUS', 'MISTER', 'AND', 'MISSUS', 'POPHAM', 'WITH', 'DIGBY', 'AND', 'LALLIE', 'JOY', 'ALL', 'STANDING', 'IN', 'ADMIRING', 'GROUPS', 'AND', 'THRILLING', 'WITH', 'DELIGHT', 'AT', 'THE', 'ORDER', 'OF', 'EVENTS'] +4992-41806-0002-2163: hyp=['THEY', 'BEGAN', 'WITH', 'THE', 'ONE', 'IN', 'THE', 'FAMILY', 'SITTING', 'ROOM', 'COLONEL', 'WHEELER', 'RALPH', 'THURSTON', 'MISTER', 'AND', 'MISSUS', 'BILL', 'HARMON', 'WITH', 'NATTY', 'AND', 'RUFFUS', 'MISTER', 'AND', 'MISSUS', 'POPPUM', 'WITH', 'DIGBY', 'AND', 'LALLY', 'JOY', 'ALL', 'STANDING', 'IN', 'ADMIRING', 'GROUPS', 'AND', 'THRILLING', 'WITH', 'DELIGHT', 'AT', 'THE', 'ORDER', 'OF', 'EVENTS'] +4992-41806-0003-2164: ref=['KATHLEEN', 'WAVED', 'THE', 'TORCH', 'TO', 'AND', 'FRO', 'AS', 'SHE', 'RECITED', 'SOME', 'BEAUTIFUL', 'LINES', 'WRITTEN', 'FOR', 'SOME', 'SUCH', 'PURPOSE', 'AS', 'THAT', 'WHICH', 'CALLED', 'THEM', 'TOGETHER', 'TO', 'NIGHT'] +4992-41806-0003-2164: hyp=['CATHERINE', 'WAVED', 'THE', 'TORCH', 'TO', 'AND', 'FRO', 'AS', 'SHE', 'RECITED', 'SOME', 'BEAUTIFUL', 'LINES', 'WRITTEN', 'FOR', 'SOME', 'SUCH', 'PURPOSE', 'AS', 'THAT', 'WHICH', 'CALLED', 'THEM', 'TOGETHER', 'TO', 'NIGHT'] +4992-41806-0004-2165: ref=['BURN', 'FIRE', 'BURN', 'FLICKER', 'FLICKER', 'FLAME'] +4992-41806-0004-2165: hyp=['BURN', 'FIRE', 'BURN', 'FLICKER', 'FLICKER', 'FLAME'] +4992-41806-0005-2166: ref=['NEXT', 'CAME', "OLIVE'S", 'TURN', 'TO', 'HELP', 'IN', 'THE', 'CEREMONIES'] +4992-41806-0005-2166: hyp=['NEXT', 'CAME', 'OLIVES', 'TURN', 'TO', 'HELP', 'IN', 'THE', 'CEREMONIES'] +4992-41806-0006-2167: ref=['RALPH', 'THURSTON', 'HAD', 'FOUND', 'A', 'LINE', 'OF', 'LATIN', 'FOR', 'THEM', 'IN', 'HIS', 'BELOVED', 'HORACE', 'TIBI', 'SPLENDET', 'FOCUS', 'FOR', 'YOU', 'THE', 'HEARTH', 'FIRE', 'SHINES'] +4992-41806-0006-2167: hyp=['RALPH', 'THURSTON', 'HAD', 'FOUND', 'A', 'LINE', 'OF', 'LATIN', 'FOR', 'THEM', 'IN', 'HIS', 'BELOVED', 'HORRENTS', 'TIBEE', 'SPLENDID', 'FOCUS', 'FOR', 'YOU', 'THE', 'HEARTH', 'FIRE', 'SHINES'] +4992-41806-0007-2168: ref=['OLIVE', 'HAD', 'PAINTED', 'THE', 'MOTTO', 'ON', 'A', 'LONG', 'NARROW', 'PANEL', 'OF', 'CANVAS', 'AND', 'GIVING', 'IT', 'TO', 'MISTER', 'POPHAM', 'STOOD', 'BY', 'THE', 'FIRESIDE', 'WHILE', 'HE', 'DEFTLY', 'FITTED', 'IT', 'INTO', 'THE', 'PLACE', 'PREPARED', 'FOR', 'IT'] +4992-41806-0007-2168: hyp=['OLIVE', 'HAD', 'PAINTED', 'THE', 'MOTTO', 'ON', 'A', 'LONG', 'NARROW', 'PANEL', 'OF', 'CANVAS', 'AND', 'GIVING', 'IT', 'TO', 'MISTER', 'POPHAM', 'STOOD', 'BY', 'THE', 'FIRESIDE', 'WHILE', 'HE', 'DEFTLY', 'FITTED', 'IT', 'INTO', 'THE', 'PLACE', 'PREPARED', 'FOR', 'IT'] +4992-41806-0008-2169: ref=['OLIVE', 'HAS', 'ANOTHER', 'LOVELY', 'GIFT', 'FOR', 'THE', 'YELLOW', 'HOUSE', 'SAID', 'MOTHER', 'CAREY', 'RISING', 'AND', 'TO', 'CARRY', 'OUT', 'THE', 'NEXT', 'PART', 'OF', 'THE', 'PROGRAMME', 'WE', 'SHALL', 'HAVE', 'TO', 'GO', 'IN', 'PROCESSION', 'UPSTAIRS', 'TO', 'MY', 'BEDROOM'] +4992-41806-0008-2169: hyp=['OLIVE', 'HAS', 'ANOTHER', 'LOVELY', 'GIFT', 'FOR', 'THE', 'YELLOW', 'HOUSE', 'SAID', 'MOTHER', 'CAREY', 'RISING', 'AND', 'TO', 'CARRY', 'OUT', 'THE', 'NEXT', 'PART', 'OF', 'THE', 'PROGRAMME', 'WE', 'SHALL', 'HAVE', 'TO', 'GO', 'IN', 'PROCESSION', 'UPSTAIRS', 'TO', 'MY', 'BEDROOM'] +4992-41806-0009-2170: ref=['EXCLAIMED', 'BILL', 'HARMON', 'TO', 'HIS', 'WIFE', 'AS', 'THEY', 'WENT', 'THROUGH', 'THE', 'LIGHTED', 'HALL'] +4992-41806-0009-2170: hyp=['EXCLAIMED', 'BILL', 'HARMON', 'TO', 'HIS', 'WIFE', 'AS', 'THEY', 'WENT', 'THROUGH', 'THE', 'LIGHTED', 'HALL'] +4992-41806-0010-2171: ref=["AIN'T", 'THEY', 'THE', 'GREATEST'] +4992-41806-0010-2171: hyp=["AIN'T", 'THEY', 'THE', 'GREATEST'] +4992-41806-0011-2172: ref=['MOTHER', 'CAREY', 'POURED', 'COFFEE', 'NANCY', 'CHOCOLATE', 'AND', 'THE', 'OTHERS', 'HELPED', 'SERVE', 'THE', 'SANDWICHES', 'AND', 'CAKE', 'DOUGHNUTS', 'AND', 'TARTS'] +4992-41806-0011-2172: hyp=['MOTHER', 'CAREY', 'POURED', 'COFFEE', 'NANCY', 'CHOCOLATE', 'AND', 'THE', 'OTHERS', 'HELP', 'SERVED', 'THE', 'SANDWICHES', 'AND', 'CAKE', 'DOUGHNUTS', 'AND', 'TARTS'] +4992-41806-0012-2173: ref=['AT', 'THAT', 'MOMENT', 'THE', 'GENTLEMAN', 'ENTERED', 'BEARING', 'A', 'HUGE', 'OBJECT', 'CONCEALED', 'BY', 'A', 'PIECE', 'OF', 'GREEN', 'FELT'] +4992-41806-0012-2173: hyp=['AT', 'THAT', 'MOMENT', 'THE', 'GENTLEMAN', 'ENTERED', 'BEARING', 'A', 'HUGE', 'OBJECT', 'CONCEALED', 'BY', 'A', 'PIECE', 'OF', 'GREEN', 'FIL'] +4992-41806-0013-2174: ref=['APPROACHING', 'THE', 'DINING', 'TABLE', 'HE', 'CAREFULLY', 'PLACED', 'THE', 'ARTICLE', 'IN', 'THE', 'CENTRE', 'AND', 'REMOVED', 'THE', 'CLOTH'] +4992-41806-0013-2174: hyp=['APPROACHING', 'THE', 'DINING', 'TABLE', 'HE', 'CAREFULLY', 'PLACED', 'THE', 'ARTICLE', 'IN', 'THE', 'CENTRE', 'AND', 'REMOVED', 'THE', 'CLOTH'] +4992-41806-0014-2175: ref=['THINKS', 'I', 'TO', 'MYSELF', 'I', 'NEVER', 'SEEN', 'ANYTHING', 'OSH', 'POPHAM', "COULDN'T", 'MEND', 'IF', 'HE', 'TOOK', 'TIME', 'ENOUGH', 'AND', 'GLUE', 'ENOUGH', 'SO', 'I', 'CARRIED', 'THIS', 'LITTLE', 'FELLER', 'HOME', 'IN', 'A', 'BUSHEL', 'BASKET', 'ONE', 'NIGHT', 'LAST', 'MONTH', 'AN', "I'VE", 'SPENT', 'ELEVEN', "EVENIN'S", 'PUTTIN', 'HIM', 'TOGETHER'] +4992-41806-0014-2175: hyp=['THINK', 'SADD', 'TO', 'MYSELF', 'I', 'NEVER', 'SEEN', 'ANYTHING', 'I', 'WAS', 'POPLED', "GOODN'T", 'MEN', 'IF', 'HE', 'TOOK', 'TIME', 'ENOUGH', 'AND', 'GLUE', 'ENOUGH', 'SO', 'I', 'CARRIED', 'THIS', 'LITTLE', 'FELLER', 'HOME', 'IN', 'A', 'BUSH', 'O', 'BASKET', 'ONE', 'NIGHT', 'LAST', 'MONTH', 'AND', "I'VE", 'SPENT', 'ELEVEN', 'EVENINGS', 'PUTTING', 'HIM', 'TOGETHER'] +4992-41806-0015-2176: ref=['MISSUS', 'HARMON', 'THOUGHT', 'HE', 'SANG', 'TOO', 'MUCH', 'AND', 'TOLD', 'HER', 'HUSBAND', 'PRIVATELY', 'THAT', 'IF', 'HE', 'WAS', 'A', 'CANARY', 'BIRD', 'SHE', 'SHOULD', 'WANT', 'TO', 'KEEP', 'A', 'TABLE', 'COVER', 'OVER', 'HIS', 'HEAD', 'MOST', 'OF', 'THE', 'TIME', 'BUT', 'HE', 'WAS', 'IMMENSELY', 'POPULAR', 'WITH', 'THE', 'REST', 'OF', 'HIS', 'AUDIENCE'] +4992-41806-0015-2176: hyp=['MISSUS', 'HARMON', 'THOUGHT', 'HE', 'SANG', 'TOO', 'MUCH', 'AND', 'TOLD', 'HER', 'HUSBAND', 'PRIVATELY', 'THAT', 'IF', 'HE', 'WAS', 'A', 'CANARY', 'BIRD', 'SHE', 'SHOULD', 'WANT', 'TO', 'KEEP', 'A', 'TABLE', 'COVER', 'OVER', 'HIS', 'HEAD', 'MOST', 'OF', 'THE', 'TIME', 'BUT', 'HE', 'WAS', 'IMMENSELY', 'POPULAR', 'WITH', 'THE', 'REST', 'OF', 'HIS', 'AUDIENCE'] +4992-41806-0016-2177: ref=['THE', 'FACE', 'OF', 'THE', 'MAHOGANY', 'SHONE', 'WITH', 'DELIGHT', 'AND', 'WHY', 'NOT', 'WHEN', 'IT', 'WAS', 'DOING', 'EVERYTHING', 'ALMOST', 'EVERYTHING', 'WITHIN', 'THE', 'SCOPE', 'OF', 'A', 'PIANO', 'AND', 'YET', 'THE', 'FAMILY', 'HAD', 'ENJOYED', 'WEEKS', 'OF', 'GOOD', 'NOURISHING', 'MEALS', 'ON', 'WHAT', 'HAD', 'BEEN', 'SAVED', 'BY', 'ITS', 'EXERTIONS'] +4992-41806-0016-2177: hyp=['THE', 'FACE', 'OF', 'THE', 'MAHOGANY', 'SHONE', 'WITH', 'DELIGHT', 'AND', 'WHY', 'NOT', 'WHEN', 'IT', 'WAS', 'DOING', 'EVERYTHING', 'ALMOST', 'EVERYTHING', 'WITHIN', 'THE', 'SCOPE', 'OF', 'A', 'PIANO', 'AND', 'YET', 'THE', 'FAMILY', 'HAD', 'ENJOYED', 'WEEKS', 'OF', 'GOOD', 'NOURISHING', 'MEALS', 'ON', 'WHAT', 'HAD', 'BEEN', 'SAVED', 'BY', 'ITS', 'EXERTIONS'] +4992-41806-0017-2178: ref=['WE', 'SHUT', 'OUR', 'EYES', 'THE', 'FLOWERS', 'BLOOM', 'ON', 'WE', 'MURMUR', 'BUT', 'THE', 'CORN', 'EARS', 'FILL', 'WE', 'CHOOSE', 'THE', 'SHADOW', 'BUT', 'THE', 'SUN', 'THAT', 'CASTS', 'IT', 'SHINES', 'BEHIND', 'US', 'STILL'] +4992-41806-0017-2178: hyp=['WE', 'SHUT', 'OUR', 'EYES', 'THE', 'FLOWERS', 'BLOOM', 'ON', 'WE', 'MURMUR', 'BUT', 'THE', 'CORNEERS', 'FILL', 'WE', 'CHOOSE', 'THE', 'SHADOW', 'BUT', 'THE', 'SUN', 'THAT', 'CAST', 'IT', 'SHINES', 'BEHIND', 'US', 'STILL'] +5105-28233-0000-1649: ref=['LENGTH', 'OF', 'SERVICE', 'FOURTEEN', 'YEARS', 'THREE', 'MONTHS', 'AND', 'FIVE', 'DAYS'] +5105-28233-0000-1649: hyp=['LENGTH', 'OF', 'SERVICE', 'FOURTEEN', 'YEARS', 'THREE', 'MONTHS', 'AND', 'FIVE', 'DAYS'] +5105-28233-0001-1650: ref=['HE', 'SEEMED', 'BORN', 'TO', 'PLEASE', 'WITHOUT', 'BEING', 'CONSCIOUS', 'OF', 'THE', 'POWER', 'HE', 'POSSESSED'] +5105-28233-0001-1650: hyp=['HE', 'SEEMED', 'BORN', 'TO', 'PLEASE', 'WITHOUT', 'BEING', 'CONSCIOUS', 'OF', 'THE', 'POWER', 'HE', 'POSSESSED'] +5105-28233-0002-1651: ref=['IT', 'MUST', 'BE', 'OWNED', 'AND', 'NO', 'ONE', 'WAS', 'MORE', 'READY', 'TO', 'CONFESS', 'IT', 'THAN', 'HIMSELF', 'THAT', 'HIS', 'LITERARY', 'ATTAINMENTS', 'WERE', 'BY', 'NO', 'MEANS', 'OF', 'A', 'HIGH', 'ORDER'] +5105-28233-0002-1651: hyp=['IT', 'MUST', 'BE', 'OWNED', 'AND', 'NO', 'ONE', 'WAS', 'MORE', 'READY', 'TO', 'CONFESS', 'IT', 'THAN', 'HIMSELF', 'THAT', 'HIS', 'LITERARY', 'ATTAINMENTS', 'WERE', 'BY', 'NO', 'MEANS', 'OF', 'A', 'HIGH', 'ORDER'] +5105-28233-0003-1652: ref=['WE', "DON'T", 'SPIN', 'TOPS', 'IS', 'A', 'FAVORITE', 'SAYING', 'AMONGST', 'ARTILLERY', 'OFFICERS', 'INDICATING', 'THAT', 'THEY', 'DO', 'NOT', 'SHIRK', 'THEIR', 'DUTY', 'BY', 'FRIVOLOUS', 'PURSUITS', 'BUT', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'SERVADAC', 'BEING', 'NATURALLY', 'IDLE', 'WAS', 'VERY', 'MUCH', 'GIVEN', 'TO', 'SPINNING', 'TOPS'] +5105-28233-0003-1652: hyp=['WE', "DON'T", 'SPEND', 'TOPS', 'IS', 'A', 'FAVORITE', 'SAYING', 'AMONGST', 'ARTILLERY', 'OFFICERS', 'INDICATING', 'THAT', 'THEY', 'DO', 'NOT', 'SHIRK', 'THEIR', 'DUTY', 'BY', 'FRIVOLOUS', 'PURSUITS', 'BUT', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'SERVADAC', 'BEING', 'NATURALLY', 'IDLE', 'WAS', 'VERY', 'MUCH', 'GIVEN', 'TO', 'SPINNING', 'TOPS'] +5105-28233-0004-1653: ref=['ONCE', 'IN', 'ACTION', 'HE', 'WAS', 'LEADING', 'A', 'DETACHMENT', 'OF', 'INFANTRY', 'THROUGH', 'AN', 'INTRENCHMENT'] +5105-28233-0004-1653: hyp=['ONCE', 'IN', 'ACTION', 'HE', 'WAS', 'LEADING', 'A', 'DETACHMENT', 'OF', 'INFANTRY', 'THROUGH', 'AN', 'ENTRENCHMENT'] +5105-28233-0005-1654: ref=['SOMETIMES', 'HE', 'WOULD', 'WANDER', 'ON', 'FOOT', 'UPON', 'THE', 'SANDY', 'SHORE', 'AND', 'SOMETIMES', 'HE', 'WOULD', 'ENJOY', 'A', 'RIDE', 'ALONG', 'THE', 'SUMMIT', 'OF', 'THE', 'CLIFF', 'ALTOGETHER', 'BEING', 'IN', 'NO', 'HURRY', 'AT', 'ALL', 'TO', 'BRING', 'HIS', 'TASK', 'TO', 'AN', 'END'] +5105-28233-0005-1654: hyp=['SOMETIMES', 'HE', 'WOULD', 'WANDER', 'ON', 'FOOT', 'UPON', 'THE', 'SANDY', 'SHORE', 'AND', 'SOMETIMES', 'HE', 'WOULD', 'ENJOY', 'A', 'RIDE', 'ALONG', 'THE', 'SUMMIT', 'OF', 'THE', 'CLIFF', 'ALTOGETHER', 'BEING', 'IN', 'NO', 'HURRY', 'AT', 'ALL', 'TO', 'BRING', 'HIS', 'TASK', 'TO', 'AN', 'END'] +5105-28233-0006-1655: ref=['NO', 'CATHEDRAL', 'NOT', 'EVEN', 'BURGOS', 'ITSELF', 'COULD', 'VIE', 'WITH', 'THE', 'CHURCH', 'AT', 'MONTMARTRE'] +5105-28233-0006-1655: hyp=['NO', 'CATHEDRAL', 'NOT', 'EVEN', 'BURGOS', 'ITSELF', 'COULD', 'VIE', 'WITH', 'THE', 'CHURCH', 'AT', 'MONT', 'MARTRA'] +5105-28233-0007-1656: ref=['BEN', "ZOOF'S", 'MOST', 'AMBITIOUS', 'DESIRE', 'WAS', 'TO', 'INDUCE', 'THE', 'CAPTAIN', 'TO', 'GO', 'WITH', 'HIM', 'AND', 'END', 'HIS', 'DAYS', 'IN', 'HIS', 'MUCH', 'LOVED', 'HOME', 'AND', 'SO', 'INCESSANTLY', 'WERE', "SERVADAC'S", 'EARS', 'BESIEGED', 'WITH', 'DESCRIPTIONS', 'OF', 'THE', 'UNPARALLELED', 'BEAUTIES', 'AND', 'ADVANTAGES', 'OF', 'THIS', 'EIGHTEENTH', 'ARRONDISSEMENT', 'OF', 'PARIS', 'THAT', 'HE', 'COULD', 'SCARCELY', 'HEAR', 'THE', 'NAME', 'OF', 'MONTMARTRE', 'WITHOUT', 'A', 'CONSCIOUS', 'THRILL', 'OF', 'AVERSION'] +5105-28233-0007-1656: hyp=['BEN', "ZOOF'S", 'MOST', 'AMBITIOUS', 'DESIRE', 'WAS', 'TO', 'INDUCE', 'THE', 'CAPTAIN', 'TO', 'GO', 'WITH', 'HIM', 'AND', 'END', 'HIS', 'DAYS', 'IN', 'HIS', 'MUCH', 'LOVED', 'HOME', 'AND', 'SO', 'INCESSANTLY', 'WERE', "SERVADAC'S", 'EARS', 'BESIEGED', 'WITH', 'DESCRIPTIONS', 'OF', 'THE', 'UNPARALLELED', 'BEAUTIES', 'AND', 'ADVANTAGES', 'OF', 'THIS', 'EIGHTEENTH', 'ARE', 'ON', 'DE', 'SAINT', 'OF', 'PARIS', 'THAT', 'HE', 'COULD', 'SCARCELY', 'HEAR', 'THE', 'NAME', 'OF', 'MONTMARTRA', 'WITHOUT', 'A', 'CONSCIOUS', 'THRILL', 'OF', 'AVERSION'] +5105-28233-0008-1657: ref=['WHEN', 'A', 'PRIVATE', 'IN', 'THE', 'EIGHTH', 'CAVALRY', 'HE', 'HAD', 'BEEN', 'ON', 'THE', 'POINT', 'OF', 'QUITTING', 'THE', 'ARMY', 'AT', 'TWENTY', 'EIGHT', 'YEARS', 'OF', 'AGE', 'BUT', 'UNEXPECTEDLY', 'HE', 'HAD', 'BEEN', 'APPOINTED', 'ORDERLY', 'TO', 'CAPTAIN', 'SERVADAC'] +5105-28233-0008-1657: hyp=['WHEN', 'A', 'PRIVATE', 'AND', 'THE', 'EIGHTH', 'CAVALRY', 'HE', 'HAD', 'BEEN', 'ON', 'THE', 'POINT', 'OF', 'QUITTING', 'THE', 'ARMY', 'AT', 'TWENTY', 'EIGHT', 'YEARS', 'OF', 'AGE', 'BUT', 'UNEXPECTEDLY', 'HE', 'HAD', 'BEEN', 'APPOINTED', 'ORDERLY', 'TO', 'CAPTAIN', 'SERVADAC'] +5105-28233-0009-1658: ref=['THE', 'BOND', 'OF', 'UNION', 'THUS', 'EFFECTED', 'COULD', 'NEVER', 'BE', 'SEVERED', 'AND', 'ALTHOUGH', 'BEN', "ZOOF'S", 'ACHIEVEMENTS', 'HAD', 'FAIRLY', 'EARNED', 'HIM', 'THE', 'RIGHT', 'OF', 'RETIREMENT', 'HE', 'FIRMLY', 'DECLINED', 'ALL', 'HONORS', 'OR', 'ANY', 'PENSION', 'THAT', 'MIGHT', 'PART', 'HIM', 'FROM', 'HIS', 'SUPERIOR', 'OFFICER'] +5105-28233-0009-1658: hyp=['THE', 'BOND', 'OF', 'UNION', 'THUS', 'EFFECTED', 'COULD', 'NEVER', 'BE', 'SEVERED', 'AND', 'ALTHOUGH', 'BEN', "ZOV'S", 'ACHIEVEMENTS', 'HAD', 'FAIRLY', 'EARNED', 'HIM', 'THE', 'RIGHT', 'OF', 'RETIREMENT', 'HE', 'FIRMLY', 'DECLINED', 'ALL', 'HONORS', 'OR', 'ANY', 'PENSION', 'THAT', 'MIGHT', 'PART', 'HIM', 'FROM', 'HIS', 'SUPERIOR', 'OFFICER'] +5105-28233-0010-1659: ref=['UNLIKE', 'HIS', 'MASTER', 'HE', 'MADE', 'NO', 'PRETENSION', 'TO', 'ANY', 'GIFT', 'OF', 'POETIC', 'POWER', 'BUT', 'HIS', 'INEXHAUSTIBLE', 'MEMORY', 'MADE', 'HIM', 'A', 'LIVING', 'ENCYCLOPAEDIA', 'AND', 'FOR', 'HIS', 'STOCK', 'OF', 'ANECDOTES', 'AND', "TROOPER'S", 'TALES', 'HE', 'WAS', 'MATCHLESS'] +5105-28233-0010-1659: hyp=['I', 'MAKE', 'HIS', 'MASTER', 'HE', 'MADE', 'NO', 'PRETENSION', 'TO', 'ANY', 'GIFT', 'OF', 'POETIC', 'POWER', 'BUT', 'HIS', 'INEXHAUSTIBLE', 'MEMORY', 'MADE', 'HIM', 'A', 'LIVING', 'ENCYCLOPAEDIA', 'AND', 'FOR', 'HIS', 'STOCK', 'OF', 'ANECDOTES', 'AND', "TROOPER'S", 'TALES', 'HE', 'WAS', 'MATCHLESS'] +5105-28240-0000-1624: ref=['FAST', 'AS', 'HIS', 'LEGS', 'COULD', 'CARRY', 'HIM', 'SERVADAC', 'HAD', 'MADE', 'HIS', 'WAY', 'TO', 'THE', 'TOP', 'OF', 'THE', 'CLIFF'] +5105-28240-0000-1624: hyp=['FAST', 'AS', 'HIS', 'LEGS', 'COULD', 'CARRY', 'HIM', 'SERVADAC', 'HAD', 'MADE', 'HIS', 'WAY', 'TO', 'THE', 'TOP', 'OF', 'THE', 'CLIFF'] +5105-28240-0001-1625: ref=['IT', 'WAS', 'QUITE', 'TRUE', 'THAT', 'A', 'VESSEL', 'WAS', 'IN', 'SIGHT', 'HARDLY', 'MORE', 'THAN', 'SIX', 'MILES', 'FROM', 'THE', 'SHORE', 'BUT', 'OWING', 'TO', 'THE', 'INCREASE', 'IN', 'THE', "EARTH'S", 'CONVEXITY', 'AND', 'THE', 'CONSEQUENT', 'LIMITATION', 'OF', 'THE', 'RANGE', 'OF', 'VISION', 'THE', 'RIGGING', 'OF', 'THE', 'TOPMASTS', 'ALONE', 'WAS', 'VISIBLE', 'ABOVE', 'THE', 'WATER'] +5105-28240-0001-1625: hyp=['IT', 'WAS', 'QUITE', 'TRUE', 'THAT', 'A', 'VESSEL', 'WAS', 'IN', 'SIGHT', 'HARDLY', 'MORE', 'THAN', 'SIX', 'MILES', 'FROM', 'THE', 'SHORE', 'BUT', 'OWING', 'TO', 'THE', 'INCREASE', 'IN', 'THE', "EARTH'S", 'CONVEXITY', 'AND', 'THE', 'CONSEQUENT', 'LIMITATION', 'OF', 'THE', 'RANGE', 'OF', 'VISION', 'THE', 'RIGGING', 'OF', 'THE', 'TOPMASTS', 'ALONE', 'WAS', 'VISIBLE', 'ABOVE', 'THE', 'WATER'] +5105-28240-0002-1626: ref=['EXCLAIMED', 'SERVADAC', 'KEEPING', 'HIS', 'EYE', 'UNMOVED', 'AT', 'HIS', 'TELESCOPE'] +5105-28240-0002-1626: hyp=['EXCLAIMED', 'SERVADAC', 'KEEPING', 'HIS', 'EYE', 'UNMOVED', 'AT', 'HIS', 'TELESCOPE'] +5105-28240-0003-1627: ref=['SHE', 'IS', 'UNDER', 'SAIL', 'BUT', 'SHE', 'IS', 'COUNT', "TIMASCHEFF'S", 'YACHT', 'HE', 'WAS', 'RIGHT'] +5105-28240-0003-1627: hyp=['SHE', 'IS', 'UNDER', 'SALE', 'BUT', 'SHE', 'IS', 'COUNT', "TIMASCHEFF'S", 'YACHT', 'HE', 'WAS', 'RIGHT'] +5105-28240-0004-1628: ref=['IF', 'THE', 'COUNT', 'WERE', 'ON', 'BOARD', 'A', 'STRANGE', 'FATALITY', 'WAS', 'BRINGING', 'HIM', 'TO', 'THE', 'PRESENCE', 'OF', 'HIS', 'RIVAL'] +5105-28240-0004-1628: hyp=['IF', 'THE', 'COUNT', 'WERE', 'ON', 'BOARD', 'A', 'STRANGE', 'FATALITY', 'WAS', 'BRINGING', 'HIM', 'TO', 'THE', 'PRESENCE', 'OF', 'HIS', 'RIVAL'] +5105-28240-0005-1629: ref=['HE', 'RECKONED', 'THEREFORE', 'NOT', 'ONLY', 'UPON', 'ASCERTAINING', 'THE', 'EXTENT', 'OF', 'THE', 'LATE', 'CATASTROPHE', 'BUT', 'UPON', 'LEARNING', 'ITS', 'CAUSE'] +5105-28240-0005-1629: hyp=['HE', 'RECKONED', 'THEREFORE', 'NOT', 'ONLY', 'UPON', 'ASCERTAINING', 'THE', 'EXTENT', 'OF', 'THE', 'LATE', 'CATASTROPHE', 'BUT', 'UPON', 'LEARNING', 'ITS', 'CAUSE'] +5105-28240-0006-1630: ref=['THE', 'WIND', 'BEING', 'ADVERSE', 'THE', 'DOBRYNA', 'DID', 'NOT', 'MAKE', 'VERY', 'RAPID', 'PROGRESS', 'BUT', 'AS', 'THE', 'WEATHER', 'IN', 'SPITE', 'OF', 'A', 'FEW', 'CLOUDS', 'REMAINED', 'CALM', 'AND', 'THE', 'SEA', 'WAS', 'QUITE', 'SMOOTH', 'SHE', 'WAS', 'ENABLED', 'TO', 'HOLD', 'A', 'STEADY', 'COURSE'] +5105-28240-0006-1630: hyp=['THE', 'WIND', 'BEING', 'ADVERSE', 'THE', 'DOBRINA', 'DID', 'NOT', 'MAKE', 'VERY', 'RAPID', 'PROGRESS', 'BUT', 'AS', 'THE', 'WEATHER', 'IN', 'SPITE', 'OF', 'A', 'FEW', 'CLOUDS', 'REMAINED', 'CALM', 'AND', 'THE', 'SEA', 'WAS', 'QUITE', 'SMOOTH', 'SHE', 'WAS', 'ENABLED', 'TO', 'HOLD', 'A', 'STEADY', 'COURSE'] +5105-28240-0007-1631: ref=['SERVADAC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'THE', 'DOBRYNA', 'WAS', 'ENDEAVORING', 'TO', 'PUT', 'IN'] +5105-28240-0007-1631: hyp=['SERVADAC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'THE', 'DOBRINA', 'WAS', 'ENDEAVORING', 'TO', 'PUT', 'IN'] +5105-28240-0008-1632: ref=['A', 'NARROW', 'CHANNEL', 'FORMED', 'A', 'PASSAGE', 'THROUGH', 'THE', 'RIDGE', 'OF', 'ROCKS', 'THAT', 'PROTECTED', 'IT', 'FROM', 'THE', 'OPEN', 'SEA', 'AND', 'WHICH', 'EVEN', 'IN', 'THE', 'ROUGHEST', 'WEATHER', 'WOULD', 'ENSURE', 'THE', 'CALMNESS', 'OF', 'ITS', 'WATERS'] +5105-28240-0008-1632: hyp=['A', 'NARROW', 'CHANNEL', 'FORMED', 'A', 'PASSAGE', 'THROUGH', 'THE', 'RIDGE', 'OF', 'ROCKS', 'THAT', 'PROTECTED', 'IT', 'FROM', 'THE', 'OPEN', 'SEA', 'AND', 'WHICH', 'EVEN', 'IN', 'THE', 'ROUGHEST', 'WEATHER', 'WOULD', 'INSURE', 'THE', 'CALMNESS', 'OF', 'ITS', 'WATERS'] +5105-28240-0009-1633: ref=['SLIGHTLY', 'CHANGING', 'HER', 'COURSE', 'SHE', 'FIRST', 'STRUCK', 'HER', 'MAINSAIL', 'AND', 'IN', 'ORDER', 'TO', 'FACILITATE', 'THE', 'MOVEMENTS', 'OF', 'HER', 'HELMSMAN', 'SOON', 'CARRIED', 'NOTHING', 'BUT', 'HER', 'TWO', 'TOPSAILS', 'BRIGANTINE', 'AND', 'JIB'] +5105-28240-0009-1633: hyp=['SLIGHTLY', 'CHANGING', 'HER', 'COURSE', 'SHE', 'FIRST', 'STRUCK', 'HER', 'MAINSAIL', 'AND', 'IN', 'ORDER', 'TO', 'FACILITATE', 'THE', 'MOVEMENTS', 'OF', 'HER', 'HELMSMAN', 'SOON', 'CARRIED', 'NOTHING', 'BUT', 'HER', 'TWO', 'TOPSAILS', 'BRIGANTINE', 'AND', 'JIB'] +5105-28240-0010-1634: ref=['CAPTAIN', 'SERVADAC', 'HASTENED', 'TOWARDS', 'HIM'] +5105-28240-0010-1634: hyp=['CAPTAIN', 'SERVADAC', 'HASTENED', 'TOWARD', 'HIM'] +5105-28240-0011-1635: ref=['I', 'LEFT', 'YOU', 'ON', 'A', 'CONTINENT', 'AND', 'HERE', 'I', 'HAVE', 'THE', 'HONOR', 'OF', 'FINDING', 'YOU', 'ON', 'AN', 'ISLAND'] +5105-28240-0011-1635: hyp=['I', 'LEFT', 'YOU', 'ON', 'A', 'CONTINENT', 'AND', 'HERE', 'I', 'HAVE', 'THE', 'HONOR', 'OF', 'FINDING', 'YOU', 'ON', 'AN', 'ISLAND'] +5105-28240-0012-1636: ref=['NEVER', 'MIND', 'NOW', 'INTERPOSED', 'THE', 'CAPTAIN', 'WE', 'WILL', 'TALK', 'OF', 'THAT', 'BY', 'AND', 'BY'] +5105-28240-0012-1636: hyp=['NEVER', 'MIND', 'NOW', 'INTERPOSED', 'THE', 'CAPTAIN', 'WE', 'WILL', 'TALK', 'OF', 'THAT', 'BY', 'AND', 'BY'] +5105-28240-0013-1637: ref=['NOTHING', 'MORE', 'THAN', 'YOU', 'KNOW', 'YOURSELF'] +5105-28240-0013-1637: hyp=['NOTHING', 'MORE', 'THAN', 'YOU', 'KNOW', 'YOURSELF'] +5105-28240-0014-1638: ref=['ARE', 'YOU', 'CERTAIN', 'THAT', 'THIS', 'IS', 'THE', 'MEDITERRANEAN'] +5105-28240-0014-1638: hyp=['ARE', 'YOU', 'CERTAIN', 'THAT', 'THIS', 'IS', 'THE', 'MEDITERRANEAN'] +5105-28240-0015-1639: ref=['FOR', 'SOME', 'MOMENTS', 'HE', 'SEEMED', 'PERFECTLY', 'STUPEFIED', 'THEN', 'RECOVERING', 'HIMSELF', 'HE', 'BEGAN', 'TO', 'OVERWHELM', 'THE', 'COUNT', 'WITH', 'A', 'TORRENT', 'OF', 'QUESTIONS'] +5105-28240-0015-1639: hyp=['FOR', 'SOME', 'MOMENTS', 'HE', 'SEEMED', 'PERFECTLY', 'STUPEFIED', 'AND', 'THEN', 'RECOVERING', 'HIMSELF', 'HE', 'BEGAN', 'TO', 'OVERWHELM', 'THE', 'COUNT', 'WITH', 'A', 'TORRENT', 'OF', 'QUESTIONS'] +5105-28240-0016-1640: ref=['TO', 'ALL', 'THESE', 'INQUIRIES', 'THE', 'COUNT', 'RESPONDED', 'IN', 'THE', 'AFFIRMATIVE'] +5105-28240-0016-1640: hyp=['TO', 'ALL', 'THESE', 'INQUIRIES', 'THE', 'COUNT', 'RESPONDED', 'IN', 'THE', 'AFFIRMATIVE'] +5105-28240-0017-1641: ref=['SOME', 'MYSTERIOUS', 'FORCE', 'SEEMED', 'TO', 'HAVE', 'BROUGHT', 'ABOUT', 'A', 'CONVULSION', 'OF', 'THE', 'ELEMENTS'] +5105-28240-0017-1641: hyp=['SOME', 'MYSTERIOUS', 'FORCE', 'SEEMED', 'TO', 'HAVE', 'BROUGHT', 'ABOUT', 'A', 'CONVULSION', 'OF', 'THE', 'ELEMENTS'] +5105-28240-0018-1642: ref=['YOU', 'WILL', 'TAKE', 'ME', 'ON', 'BOARD', 'COUNT', 'WILL', 'YOU', 'NOT'] +5105-28240-0018-1642: hyp=['YOU', 'WILL', 'TAKE', 'ME', 'ON', 'BOARD', 'COUNT', 'WILL', 'YOU', 'NOT'] +5105-28240-0019-1643: ref=['MY', 'YACHT', 'IS', 'AT', 'YOUR', 'SERVICE', 'SIR', 'EVEN', 'SHOULD', 'YOU', 'REQUIRE', 'TO', 'MAKE', 'A', 'TOUR', 'ROUND', 'THE', 'WORLD'] +5105-28240-0019-1643: hyp=['MY', 'YACHT', 'IS', 'AT', 'YOUR', 'SERVICE', 'SIR', 'EVEN', 'SHOULD', 'YOU', 'REQUIRE', 'TO', 'MAKE', 'A', 'TOUR', 'ROUND', 'THE', 'WORLD'] +5105-28240-0020-1644: ref=['THE', 'COUNT', 'SHOOK', 'HIS', 'HEAD'] +5105-28240-0020-1644: hyp=['THE', 'COUNT', 'SHOOK', 'HIS', 'HEAD'] +5105-28240-0021-1645: ref=['BEFORE', 'STARTING', 'IT', 'WAS', 'INDISPENSABLE', 'THAT', 'THE', 'ENGINE', 'OF', 'THE', 'DOBRYNA', 'SHOULD', 'BE', 'REPAIRED', 'TO', 'SAIL', 'UNDER', 'CANVAS', 'ONLY', 'WOULD', 'IN', 'CONTRARY', 'WINDS', 'AND', 'ROUGH', 'SEAS', 'BE', 'BOTH', 'TEDIOUS', 'AND', 'DIFFICULT'] +5105-28240-0021-1645: hyp=['BEFORE', 'STARTING', 'IT', 'WAS', 'INDISPENSABLE', 'THAT', 'THE', 'ENGINE', 'OF', 'THE', 'DOBRINA', 'SHOULD', 'BE', 'REPAIRED', 'TO', 'SAIL', 'UNDER', 'CANVAS', 'ONLY', 'WOULD', 'IN', 'CONTRARY', 'WINDS', 'AND', 'ROUGH', 'SEAS', 'BE', 'BOTH', 'TEDIOUS', 'AND', 'DIFFICULT'] +5105-28240-0022-1646: ref=['IT', 'WAS', 'ON', 'THE', 'LAST', 'DAY', 'OF', 'JANUARY', 'THAT', 'THE', 'REPAIRS', 'OF', 'THE', 'SCHOONER', 'WERE', 'COMPLETED'] +5105-28240-0022-1646: hyp=['IT', 'WAS', 'ON', 'THE', 'LAST', 'DAY', 'OF', 'JANUARY', 'THAT', 'THE', 'REPAIRS', 'OF', 'THE', 'SCHOONER', 'WERE', 'COMPLETED'] +5105-28240-0023-1647: ref=['A', 'SLIGHT', 'DIMINUTION', 'IN', 'THE', 'EXCESSIVELY', 'HIGH', 'TEMPERATURE', 'WHICH', 'HAD', 'PREVAILED', 'FOR', 'THE', 'LAST', 'FEW', 'WEEKS', 'WAS', 'THE', 'ONLY', 'APPARENT', 'CHANGE', 'IN', 'THE', 'GENERAL', 'ORDER', 'OF', 'THINGS', 'BUT', 'WHETHER', 'THIS', 'WAS', 'TO', 'BE', 'ATTRIBUTED', 'TO', 'ANY', 'ALTERATION', 'IN', 'THE', "EARTH'S", 'ORBIT', 'WAS', 'A', 'QUESTION', 'WHICH', 'WOULD', 'STILL', 'REQUIRE', 'SEVERAL', 'DAYS', 'TO', 'DECIDE'] +5105-28240-0023-1647: hyp=['A', 'SLIGHT', 'DIMINUTION', 'IN', 'THE', 'EXCESSIVELY', 'HIGH', 'TEMPERATURE', 'WHICH', 'HAD', 'PREVAILED', 'FOR', 'THE', 'LAST', 'FEW', 'WEEKS', 'WAS', 'THE', 'ONLY', 'APPARENT', 'CHANGE', 'IN', 'THE', 'GENERAL', 'ORDER', 'OF', 'THINGS', 'BUT', 'WHETHER', 'THIS', 'WAS', 'TO', 'BE', 'ATTRIBUTED', 'TO', 'ANY', 'ALTERATION', 'IN', 'THE', "EARTH'S", 'ORBIT', 'WAS', 'A', 'QUESTION', 'WHICH', 'WOULD', 'STILL', 'REQUIRE', 'SEVERAL', 'DAYS', 'TO', 'DECIDE'] +5105-28240-0024-1648: ref=['DOUBTS', 'NOW', 'AROSE', 'AND', 'SOME', 'DISCUSSION', 'FOLLOWED', 'WHETHER', 'OR', 'NOT', 'IT', 'WAS', 'DESIRABLE', 'FOR', 'BEN', 'ZOOF', 'TO', 'ACCOMPANY', 'HIS', 'MASTER'] +5105-28240-0024-1648: hyp=['DOUBTS', 'NOW', 'AROSE', 'AND', 'SOME', 'DISCUSSION', 'FOLLOWED', 'WHETHER', 'OR', 'NOT', 'IT', 'WAS', 'DESIRABLE', 'FOR', 'BEN', 'ZOOF', 'TO', 'ACCOMPANY', 'HIS', 'MASTER'] +5105-28241-0000-1604: ref=['HER', 'SEA', 'GOING', 'QUALITIES', 'WERE', 'EXCELLENT', 'AND', 'WOULD', 'HAVE', 'AMPLY', 'SUFFICED', 'FOR', 'A', 'CIRCUMNAVIGATION', 'OF', 'THE', 'GLOBE'] +5105-28241-0000-1604: hyp=['HER', 'SEA', 'GOING', 'QUALITIES', 'WERE', 'EXCELLENT', 'AND', 'WOULD', 'HAVE', 'AMPLY', 'SUFFICED', 'FOR', 'A', 'CIRCUMNAVIGATION', 'OF', 'THE', 'GLOBE'] +5105-28241-0001-1605: ref=['AFTER', 'AN', 'APPRENTICESHIP', 'ON', 'A', 'MERCHANT', 'SHIP', 'HE', 'HAD', 'ENTERED', 'THE', 'IMPERIAL', 'NAVY', 'AND', 'HAD', 'ALREADY', 'REACHED', 'THE', 'RANK', 'OF', 'LIEUTENANT', 'WHEN', 'THE', 'COUNT', 'APPOINTED', 'HIM', 'TO', 'THE', 'CHARGE', 'OF', 'HIS', 'OWN', 'PRIVATE', 'YACHT', 'IN', 'WHICH', 'HE', 'WAS', 'ACCUSTOMED', 'TO', 'SPEND', 'BY', 'FAR', 'THE', 'GREATER', 'PART', 'OF', 'HIS', 'TIME', 'THROUGHOUT', 'THE', 'WINTER', 'GENERALLY', 'CRUISING', 'IN', 'THE', 'MEDITERRANEAN', 'WHILST', 'IN', 'THE', 'SUMMER', 'HE', 'VISITED', 'MORE', 'NORTHERN', 'WATERS'] +5105-28241-0001-1605: hyp=['AFTER', 'AN', 'APPRENTICESHIP', 'ON', 'A', 'MERCHANT', 'SHIP', 'HE', 'HAD', 'ENTERED', 'THE', 'IMPERIAL', 'NAVY', 'AND', 'HAD', 'ALREADY', 'REACHED', 'THE', 'RANK', 'OF', 'LIEUTENANT', 'WHEN', 'THE', 'COUNT', 'APPOINTED', 'HIM', 'TO', 'THE', 'CHARGE', 'OF', 'HIS', 'OWN', 'PRIVATE', 'YACHT', 'IN', 'WHICH', 'HE', 'WAS', 'ACCUSTOMED', 'TO', 'SPEND', 'BY', 'FAR', 'THE', 'GREATER', 'PART', 'OF', 'HIS', 'TIME', 'THROUGHOUT', 'THE', 'WINTER', 'GENERALLY', 'CRUISING', 'IN', 'THE', 'MEDITERRANEAN', 'WHILST', 'IN', 'THE', 'SUMMER', 'HE', 'VISITED', 'MORE', 'NORTHERN', 'WATERS'] +5105-28241-0002-1606: ref=['THE', 'LATE', 'ASTOUNDING', 'EVENTS', 'HOWEVER', 'HAD', 'RENDERED', 'PROCOPE', 'MANIFESTLY', 'UNEASY', 'AND', 'NOT', 'THE', 'LESS', 'SO', 'FROM', 'HIS', 'CONSCIOUSNESS', 'THAT', 'THE', 'COUNT', 'SECRETLY', 'PARTOOK', 'OF', 'HIS', 'OWN', 'ANXIETY'] +5105-28241-0002-1606: hyp=['THE', 'LATE', 'ASTOUNDING', 'EVENTS', 'HOWEVER', 'HAD', 'RENDERED', 'PROCOPE', 'MANIFESTLY', 'UNEASY', 'AND', 'NOT', 'THE', 'LESS', 'SO', 'FROM', 'HIS', 'CONSCIOUSNESS', 'THAT', 'THE', 'COUNT', 'SECRETLY', 'PARTOOK', 'OF', 'HIS', 'OWN', 'ANXIETY'] +5105-28241-0003-1607: ref=['STEAM', 'UP', 'AND', 'CANVAS', 'SPREAD', 'THE', 'SCHOONER', 'STARTED', 'EASTWARDS'] +5105-28241-0003-1607: hyp=['STEAM', 'UP', 'AND', 'CANVAS', 'SPREAD', 'THE', 'SCHOONER', 'STARTED', 'EASTWARDS'] +5105-28241-0004-1608: ref=['ALTHOUGH', 'ONLY', 'A', 'MODERATE', 'BREEZE', 'WAS', 'BLOWING', 'THE', 'SEA', 'WAS', 'ROUGH', 'A', 'CIRCUMSTANCE', 'TO', 'BE', 'ACCOUNTED', 'FOR', 'ONLY', 'BY', 'THE', 'DIMINUTION', 'IN', 'THE', 'FORCE', 'OF', 'THE', "EARTH'S", 'ATTRACTION', 'RENDERING', 'THE', 'LIQUID', 'PARTICLES', 'SO', 'BUOYANT', 'THAT', 'BY', 'THE', 'MERE', 'EFFECT', 'OF', 'OSCILLATION', 'THEY', 'WERE', 'CARRIED', 'TO', 'A', 'HEIGHT', 'THAT', 'WAS', 'QUITE', 'UNPRECEDENTED'] +5105-28241-0004-1608: hyp=['ALTHOUGH', 'ONLY', 'A', 'MODERATE', 'BREEZE', 'WAS', 'BLOWING', 'THE', 'SEA', 'WAS', 'ROUGH', 'A', 'CIRCUMSTANCE', 'TO', 'BE', 'ACCOUNTED', 'FOR', 'ONLY', 'BY', 'THE', 'DIMINUTION', 'IN', 'THE', 'FORCE', 'OF', 'THE', "EARTH'S", 'ATTRACTION', 'RENDERING', 'THE', 'LIQUID', 'PARTICLE', 'SO', 'BUOYANT', 'THAT', 'BY', 'THE', 'MERE', 'EFFECT', 'OF', 'OSCILLATION', 'THEY', 'WERE', 'CARRIED', 'TO', 'A', 'HEIGHT', 'THAT', 'WAS', 'QUITE', 'UNPRECEDENTED'] +5105-28241-0005-1609: ref=['FOR', 'A', 'FEW', 'MILES', 'SHE', 'FOLLOWED', 'THE', 'LINE', 'HITHERTO', 'PRESUMABLY', 'OCCUPIED', 'BY', 'THE', 'COAST', 'OF', 'ALGERIA', 'BUT', 'NO', 'LAND', 'APPEARED', 'TO', 'THE', 'SOUTH'] +5105-28241-0005-1609: hyp=['FOR', 'A', 'FEW', 'MILES', 'SHE', 'FOLLOWED', 'THE', 'LINE', 'HITHERTO', 'PRESUMABLY', 'OCCUPIED', 'BY', 'THE', 'COAST', 'OF', 'ALGERIA', 'BUT', 'NO', 'LAND', 'APPEARED', 'TO', 'THE', 'SOUTH'] +5105-28241-0006-1610: ref=['THE', 'LOG', 'AND', 'THE', 'COMPASS', 'THEREFORE', 'WERE', 'ABLE', 'TO', 'BE', 'CALLED', 'UPON', 'TO', 'DO', 'THE', 'WORK', 'OF', 'THE', 'SEXTANT', 'WHICH', 'HAD', 'BECOME', 'UTTERLY', 'USELESS'] +5105-28241-0006-1610: hyp=['THE', 'LOG', 'AND', 'THE', 'COMPASS', 'THEREFORE', 'WERE', 'ABLE', 'TO', 'BE', 'CALLED', 'UPON', 'TO', 'DO', 'THE', 'WORK', 'OF', 'THE', 'SEXTANT', 'WHICH', 'HAD', 'BECOME', 'UTTERLY', 'USELESS'] +5105-28241-0007-1611: ref=['THERE', 'IS', 'NO', 'FEAR', 'OF', 'THAT', 'SIR'] +5105-28241-0007-1611: hyp=["THERE'S", 'NO', 'FEAR', 'OF', 'THAT', 'SIR'] +5105-28241-0008-1612: ref=['THE', 'EARTH', 'HAS', 'UNDOUBTEDLY', 'ENTERED', 'UPON', 'A', 'NEW', 'ORBIT', 'BUT', 'SHE', 'IS', 'NOT', 'INCURRING', 'ANY', 'PROBABLE', 'RISK', 'OF', 'BEING', 'PRECIPITATED', 'ONTO', 'THE', 'SUN'] +5105-28241-0008-1612: hyp=['AT', 'THE', 'EARTH', 'HAS', 'UNDOUBTEDLY', 'ENTERED', 'UPON', 'A', 'NEW', 'ORBIT', 'BUT', 'SHE', 'IS', 'NOT', 'INCURRING', 'ANY', 'PROBABLE', 'RISK', 'OF', 'BEING', 'PRECIPITATED', 'ON', 'TO', 'THE', 'SUN'] +5105-28241-0009-1613: ref=['AND', 'WHAT', 'DEMONSTRATION', 'DO', 'YOU', 'OFFER', 'ASKED', 'SERVADAC', 'EAGERLY', 'THAT', 'IT', 'WILL', 'NOT', 'HAPPEN'] +5105-28241-0009-1613: hyp=['AND', 'WHAT', 'DEMONSTRATION', 'DO', 'YOU', 'OFFER', 'ASKED', 'SERVADAC', 'EAGERLY', 'THAT', 'IT', 'WILL', 'NOT', 'HAPPEN'] +5105-28241-0010-1614: ref=['OCEAN', 'REIGNED', 'SUPREME'] +5105-28241-0010-1614: hyp=['OCEAN', 'RAINED', 'SUPREME'] +5105-28241-0011-1615: ref=['ALL', 'THE', 'IMAGES', 'OF', 'HIS', 'PAST', 'LIFE', 'FLOATED', 'UPON', 'HIS', 'MEMORY', 'HIS', 'THOUGHTS', 'SPED', 'AWAY', 'TO', 'HIS', 'NATIVE', 'FRANCE', 'ONLY', 'TO', 'RETURN', 'AGAIN', 'TO', 'WONDER', 'WHETHER', 'THE', 'DEPTHS', 'OF', 'OCEAN', 'WOULD', 'REVEAL', 'ANY', 'TRACES', 'OF', 'THE', 'ALGERIAN', 'METROPOLIS'] +5105-28241-0011-1615: hyp=['ALL', 'THE', 'IMAGES', 'OF', 'HIS', 'PAST', 'LIFE', 'FLOATED', 'UPON', 'HIS', 'MEMORY', 'HIS', 'THOUGHTS', 'SPED', 'AWAY', 'TO', 'HIS', 'NATIVE', 'FRANCE', 'ONLY', 'TO', 'RETURN', 'AGAIN', 'TO', 'WONDER', 'WHETHER', 'THE', 'DEPTHS', 'OF', 'OCEAN', 'WOULD', 'REVEAL', 'ANY', 'TRACES', 'OF', 'THE', 'ALGERIAN', 'METROPOLIS'] +5105-28241-0012-1616: ref=['IS', 'IT', 'NOT', 'IMPOSSIBLE', 'HE', 'MURMURED', 'ALOUD', 'THAT', 'ANY', 'CITY', 'SHOULD', 'DISAPPEAR', 'SO', 'COMPLETELY'] +5105-28241-0012-1616: hyp=['IS', 'IT', 'NOT', 'IMPOSSIBLE', 'HE', 'MURMURED', 'ALOUD', 'THAT', 'ANY', 'CITY', 'SHOULD', 'DISAPPEAR', 'SO', 'COMPLETELY'] +5105-28241-0013-1617: ref=['WOULD', 'NOT', 'THE', 'LOFTIEST', 'EMINENCES', 'OF', 'THE', 'CITY', 'AT', 'LEAST', 'BE', 'VISIBLE'] +5105-28241-0013-1617: hyp=['WOULD', 'NOT', 'THE', 'LOFTIEST', 'EMINENCES', 'OF', 'THE', 'CITY', 'AT', 'LEAST', 'BE', 'VISIBLE'] +5105-28241-0014-1618: ref=['ANOTHER', 'CIRCUMSTANCE', 'WAS', 'MOST', 'REMARKABLE'] +5105-28241-0014-1618: hyp=['ANOTHER', 'CIRCUMSTANCE', 'WAS', 'MOST', 'REMARKABLE'] +5105-28241-0015-1619: ref=['TO', 'THE', 'SURPRISE', 'OF', 'ALL', 'AND', 'ESPECIALLY', 'OF', 'LIEUTENANT', 'PROCOPE', 'THE', 'LINE', 'INDICATED', 'A', 'BOTTOM', 'AT', 'A', 'NEARLY', 'UNIFORM', 'DEPTH', 'OF', 'FROM', 'FOUR', 'TO', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'THE', 'SOUNDING', 'WAS', 'PERSEVERED', 'WITH', 'CONTINUOUSLY', 'FOR', 'MORE', 'THAN', 'TWO', 'HOURS', 'OVER', 'A', 'CONSIDERABLE', 'AREA', 'THE', 'DIFFERENCES', 'OF', 'LEVEL', 'WERE', 'INSIGNIFICANT', 'NOT', 'CORRESPONDING', 'IN', 'ANY', 'DEGREE', 'TO', 'WHAT', 'WOULD', 'BE', 'EXPECTED', 'OVER', 'THE', 'SITE', 'OF', 'A', 'CITY', 'THAT', 'HAD', 'BEEN', 'TERRACED', 'LIKE', 'THE', 'SEATS', 'OF', 'AN', 'AMPHITHEATER'] +5105-28241-0015-1619: hyp=['TO', 'THE', 'SURPRISE', 'OF', 'ALL', 'AND', 'ESPECIALLY', 'OF', 'LIEUTENANT', 'PROCOPE', 'THE', 'LINE', 'INDICATED', 'A', 'BOTTOM', 'AT', 'A', 'NEARLY', 'UNIFORM', 'DEPTH', 'OF', 'FROM', 'FOUR', 'TO', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'THE', 'SOUNDING', 'WAS', 'PERSEVERED', 'WITH', 'CONTINUOUSLY', 'FOR', 'MORE', 'THAN', 'TWO', 'HOURS', 'OVER', 'A', 'CONSIDERABLE', 'AREA', 'THE', 'DIFFERENCES', 'OF', 'LEVEL', 'WERE', 'INSIGNIFICANT', 'NOT', 'CORRESPONDING', 'IN', 'ANY', 'DEGREE', 'TO', 'WHAT', 'WOULD', 'BE', 'EXPECTED', 'OVER', 'THE', 'SITE', 'OF', 'A', 'CITY', 'THAT', 'HAD', 'BEEN', 'TERRACED', 'LIKE', 'THE', 'SEATS', 'OF', 'AN', 'AMPHITHEATRE'] +5105-28241-0016-1620: ref=['YOU', 'MUST', 'SEE', 'LIEUTENANT', 'I', 'SHOULD', 'THINK', 'THAT', 'WE', 'ARE', 'NOT', 'SO', 'NEAR', 'THE', 'COAST', 'OF', 'ALGERIA', 'AS', 'YOU', 'IMAGINED'] +5105-28241-0016-1620: hyp=['YOU', 'MUST', 'SEE', 'LIEUTENANT', 'I', 'SHOULD', 'THINK', 'THAT', 'WE', 'ARE', 'NOT', 'SO', 'NEAR', 'THE', 'COAST', 'OF', 'ALGERIA', 'AS', 'YOU', 'IMAGINED'] +5105-28241-0017-1621: ref=['AFTER', 'PONDERING', 'AWHILE', 'HE', 'SAID', 'IF', 'WE', 'WERE', 'FARTHER', 'AWAY', 'I', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'A', 'DEPTH', 'OF', 'TWO', 'OR', 'THREE', 'HUNDRED', 'FATHOMS', 'INSTEAD', 'OF', 'FIVE', 'FATHOMS', 'FIVE', 'FATHOMS'] +5105-28241-0017-1621: hyp=['AFTER', 'PONDERING', 'A', 'WHILE', 'HE', 'SAID', 'IF', 'WE', 'WERE', 'FARTHER', 'AWAY', 'I', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'A', 'DEPTH', 'OF', 'TWO', 'OR', 'THREE', 'HUNDRED', 'FATHOMS', 'INSTEAD', 'OF', 'FIVE', 'FATHOMS', 'FIVE', 'FATHOMS'] +5105-28241-0018-1622: ref=['ITS', 'DEPTH', 'REMAINED', 'INVARIABLE', 'STILL', 'FOUR', 'OR', 'AT', 'MOST', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'ITS', 'BOTTOM', 'WAS', 'ASSIDUOUSLY', 'DREDGED', 'IT', 'WAS', 'ONLY', 'TO', 'PROVE', 'IT', 'BARREN', 'OF', 'MARINE', 'PRODUCTION', 'OF', 'ANY', 'TYPE'] +5105-28241-0018-1622: hyp=['ITS', 'DEPTH', 'REMAINED', 'INVARIABLE', 'STILL', 'FOUR', 'OR', 'AT', 'MOST', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'ITS', 'BOTTOM', 'WAS', 'ASSIDUOUSLY', 'DREDGED', 'IT', 'WAS', 'ONLY', 'TO', 'PROVE', 'IT', 'BARREN', 'OF', 'MARINE', 'PRODUCTION', 'OF', 'ANY', 'TYPE'] +5105-28241-0019-1623: ref=['NOTHING', 'WAS', 'TO', 'BE', 'DONE', 'BUT', 'TO', 'PUT', 'ABOUT', 'AND', 'RETURN', 'IN', 'DISAPPOINTMENT', 'TOWARDS', 'THE', 'NORTH'] +5105-28241-0019-1623: hyp=['NOTHING', 'WAS', 'TO', 'BE', 'DONE', 'BUT', 'TO', 'PUT', 'ABOUT', 'AND', 'RETURN', 'IN', 'DISAPPOINTMENT', 'TOWARD', 'THE', 'NORTH'] +5142-33396-0000-898: ref=['AT', 'ANOTHER', 'TIME', 'HARALD', 'ASKED'] +5142-33396-0000-898: hyp=['AT', 'ANOTHER', 'TIME', 'HAROLD', 'ASKED'] +5142-33396-0001-899: ref=['WHAT', 'IS', 'YOUR', 'COUNTRY', 'OLAF', 'HAVE', 'YOU', 'ALWAYS', 'BEEN', 'A', 'THRALL', 'THE', "THRALL'S", 'EYES', 'FLASHED'] +5142-33396-0001-899: hyp=['WHAT', 'IS', 'YOUR', 'COUNTRY', 'OLAF', 'HAVE', 'YOU', 'ALWAYS', 'BEEN', 'A', 'THRALL', 'THE', "THRALL'S", 'EYES', 'FLASHED'] +5142-33396-0002-900: ref=['TWO', 'HUNDRED', 'WARRIORS', 'FEASTED', 'IN', 'HIS', 'HALL', 'AND', 'FOLLOWED', 'HIM', 'TO', 'BATTLE'] +5142-33396-0002-900: hyp=['TWO', 'HUNDRED', 'WARRIORS', 'FEASTED', 'IN', 'HIS', 'HALL', 'AND', 'FOLLOWED', 'HIM', 'TO', 'BATTLE'] +5142-33396-0003-901: ref=['THE', 'REST', 'OF', 'YOU', 'OFF', 'A', 'VIKING', 'HE', 'HAD', 'THREE', 'SHIPS'] +5142-33396-0003-901: hyp=['THE', 'REST', 'OF', 'YOU', 'OFF', 'A', 'VIKING', 'HE', 'HAD', 'THREE', 'SHIPS'] +5142-33396-0004-902: ref=['THESE', 'HE', 'GAVE', 'TO', 'THREE', 'OF', 'MY', 'BROTHERS'] +5142-33396-0004-902: hyp=['THESE', 'HE', 'GAVE', 'TO', 'THREE', 'OF', 'MY', 'BROTHERS'] +5142-33396-0005-903: ref=['BUT', 'I', 'STAYED', 'THAT', 'SPRING', 'AND', 'BUILT', 'ME', 'A', 'BOAT'] +5142-33396-0005-903: hyp=['BUT', 'I', 'STAYED', 'THAT', 'SPRING', 'AND', 'BUILT', 'ME', 'A', 'BOAT'] +5142-33396-0006-904: ref=['I', 'MADE', 'HER', 'FOR', 'ONLY', 'TWENTY', 'OARS', 'BECAUSE', 'I', 'THOUGHT', 'FEW', 'MEN', 'WOULD', 'FOLLOW', 'ME', 'FOR', 'I', 'WAS', 'YOUNG', 'FIFTEEN', 'YEARS', 'OLD'] +5142-33396-0006-904: hyp=['I', 'MADE', 'HER', 'ONLY', 'TWENTY', 'WARS', 'BECAUSE', 'I', 'THOUGHT', 'FEW', 'MEN', 'WOULD', 'FOLLOW', 'ME', 'FOR', 'I', 'WAS', 'YOUNG', 'FIFTEEN', 'YEARS', 'OLD'] +5142-33396-0007-905: ref=['AT', 'THE', 'PROW', 'I', 'CARVED', 'THE', 'HEAD', 'WITH', 'OPEN', 'MOUTH', 'AND', 'FORKED', 'TONGUE', 'THRUST', 'OUT'] +5142-33396-0007-905: hyp=['AT', 'THE', 'PROW', 'I', 'CARVED', 'THE', 'HEAD', 'WITH', 'OPEN', 'MOUTH', 'AND', 'FORKED', 'TONGUE', 'THRUST', 'OUT'] +5142-33396-0008-906: ref=['I', 'PAINTED', 'THE', 'EYES', 'RED', 'FOR', 'ANGER'] +5142-33396-0008-906: hyp=['I', 'PAINTED', 'THE', 'EYES', 'RED', 'FOR', 'ANGER'] +5142-33396-0009-907: ref=['THERE', 'STAND', 'SO', 'I', 'SAID', 'AND', 'GLARE', 'AND', 'HISS', 'AT', 'MY', 'FOES'] +5142-33396-0009-907: hyp=['THERE', 'STAND', 'SO', 'I', 'SAID', 'AND', 'GLARE', 'AND', 'HISS', 'AT', 'MY', 'FOES'] +5142-33396-0010-908: ref=['IN', 'THE', 'STERN', 'I', 'CURVED', 'THE', 'TAIL', 'UP', 'ALMOST', 'AS', 'HIGH', 'AS', 'THE', 'HEAD'] +5142-33396-0010-908: hyp=['IN', 'A', 'STERN', 'I', 'CARVED', 'THE', 'TAIL', 'UP', 'ALMOST', 'AS', 'HIGH', 'AS', 'THE', 'HEAD'] +5142-33396-0011-909: ref=['THERE', 'SHE', 'SAT', 'ON', 'THE', 'ROLLERS', 'AS', 'FAIR', 'A', 'SHIP', 'AS', 'I', 'EVER', 'SAW'] +5142-33396-0011-909: hyp=['THERE', 'SHE', 'SAT', 'ON', 'THE', 'ROLLERS', 'AS', 'FAIR', 'A', 'SHIP', 'AS', 'I', 'EVER', 'SAW'] +5142-33396-0012-910: ref=['THEN', 'I', 'WILL', 'GET', 'ME', 'A', 'FARM', 'AND', 'WILL', 'WINTER', 'IN', 'THAT', 'LAND', 'NOW', 'WHO', 'WILL', 'FOLLOW', 'ME'] +5142-33396-0012-910: hyp=['THEN', 'I', 'WILL', 'GET', 'ME', 'A', 'FARM', 'AND', 'WILL', 'WIN', 'HER', 'IN', 'THAT', 'LAND', 'NOW', 'WHO', 'WILL', 'FOLLOW', 'ME'] +5142-33396-0013-911: ref=['HE', 'IS', 'BUT', 'A', 'BOY', 'THE', 'MEN', 'SAID'] +5142-33396-0013-911: hyp=['HE', 'IS', 'BUT', 'A', 'BOY', 'THE', 'MAN', 'SAID'] +5142-33396-0014-912: ref=['THIRTY', 'MEN', 'ONE', 'AFTER', 'ANOTHER', 'RAISED', 'THEIR', 'HORNS', 'AND', 'SAID'] +5142-33396-0014-912: hyp=['THIRTY', 'MEN', 'ONE', 'AFTER', 'ANOTHER', 'RAISED', 'THEIR', 'HORNS', 'AND', 'SAID'] +5142-33396-0015-913: ref=['AS', 'OUR', 'BOAT', 'FLASHED', 'DOWN', 'THE', 'ROLLERS', 'INTO', 'THE', 'WATER', 'I', 'MADE', 'THIS', 'SONG', 'AND', 'SANG', 'IT'] +5142-33396-0015-913: hyp=['AS', 'OUR', 'BOAT', 'FLASHED', 'DOWN', 'THE', 'ROLLERS', 'INTO', 'THE', 'WATER', 'I', 'MADE', 'THIS', 'SONG', 'AND', 'SANG', 'IT'] +5142-33396-0016-914: ref=['SO', 'WE', 'HARRIED', 'THE', 'COAST', 'OF', 'NORWAY'] +5142-33396-0016-914: hyp=['SO', 'WE', 'HURRIED', 'THE', 'COAST', 'OF', 'NORWAY'] +5142-33396-0017-915: ref=['WE', 'ATE', 'AT', 'MANY', "MEN'S", 'TABLES', 'UNINVITED'] +5142-33396-0017-915: hyp=['WE', 'ATE', 'IT', 'MANY', "MEN'S", 'TABLES', 'UNINVITED'] +5142-33396-0018-916: ref=['MY', "DRAGON'S", 'BELLY', 'IS', 'NEVER', 'FULL', 'AND', 'ON', 'BOARD', 'WENT', 'THE', 'GOLD'] +5142-33396-0018-916: hyp=['I', "DRAGON'S", 'BELLY', 'IS', 'NEVER', 'FULL', 'AND', 'ON', 'BOARD', 'WENT', 'THE', 'GOLD'] +5142-33396-0019-917: ref=['OH', 'IT', 'IS', 'BETTER', 'TO', 'LIVE', 'ON', 'THE', 'SEA', 'AND', 'LET', 'OTHER', 'MEN', 'RAISE', 'YOUR', 'CROPS', 'AND', 'COOK', 'YOUR', 'MEALS'] +5142-33396-0019-917: hyp=['OH', 'IT', 'IS', 'BETTER', 'TO', 'LIVE', 'ON', 'THE', 'SEA', 'AND', 'LET', 'OTHER', 'MEN', 'RAISE', 'YOUR', 'CROPS', 'AND', 'COOK', 'YOUR', 'MEALS'] +5142-33396-0020-918: ref=['A', 'HOUSE', 'SMELLS', 'OF', 'SMOKE', 'A', 'SHIP', 'SMELLS', 'OF', 'FROLIC'] +5142-33396-0020-918: hyp=['A', 'HOUSE', 'SMELLS', 'OF', 'SMOKE', 'A', "SHIP'S", 'MILLS', 'OF', 'FROLIC'] +5142-33396-0021-919: ref=['UP', 'AND', 'DOWN', 'THE', 'WATER', 'WE', 'WENT', 'TO', 'GET', 'MUCH', 'WEALTH', 'AND', 'MUCH', 'FROLIC'] +5142-33396-0021-919: hyp=['UP', 'AND', 'DOWN', 'THE', 'WATER', 'WE', 'WENT', 'TO', 'GET', 'MUCH', 'WEALTH', 'AND', 'MUCH', 'FROLIC'] +5142-33396-0022-920: ref=['WHAT', 'OF', 'THE', 'FARM', 'OLAF', 'NOT', 'YET', 'I', 'ANSWERED', 'VIKING', 'IS', 'BETTER', 'FOR', 'SUMMER'] +5142-33396-0022-920: hyp=['WHAT', 'OF', 'THE', 'FARM', 'ALL', 'OFF', 'NOT', 'YET', 'I', 'ANSWERED', 'VIKING', 'IS', 'BETTER', 'FOR', 'SUMMER'] +5142-33396-0023-921: ref=['IT', 'WAS', 'SO', 'DARK', 'THAT', 'I', 'COULD', 'SEE', 'NOTHING', 'BUT', 'A', 'FEW', 'SPARKS', 'ON', 'THE', 'HEARTH'] +5142-33396-0023-921: hyp=['IT', 'WAS', 'SO', 'DARK', 'THAT', 'I', 'COULD', 'SEE', 'NOTHING', 'BUT', 'A', 'FEW', 'SPARKS', 'ON', 'THE', 'HEARTH'] +5142-33396-0024-922: ref=['I', 'STOOD', 'WITH', 'MY', 'BACK', 'TO', 'THE', 'WALL', 'FOR', 'I', 'WANTED', 'NO', 'SWORD', 'REACHING', 'OUT', 'OF', 'THE', 'DARK', 'FOR', 'ME'] +5142-33396-0024-922: hyp=['I', 'STOOD', 'WITH', 'MY', 'BACK', 'TO', 'THE', 'WALL', 'FOR', 'I', 'WANTED', 'NO', 'SWORD', 'REACHING', 'OUT', 'OF', 'THE', 'DARK', 'FOR', 'ME'] +5142-33396-0025-923: ref=['COME', 'COME', 'I', 'CALLED', 'WHEN', 'NO', 'ONE', 'OBEYED', 'A', 'FIRE'] +5142-33396-0025-923: hyp=['COME', 'COME', 'I', 'CALLED', 'WHEN', 'NO', 'ONE', 'OBEYED', 'A', 'FIRE'] +5142-33396-0026-924: ref=['MY', 'MEN', 'LAUGHED', 'YES', 'A', 'STINGY', 'HOST'] +5142-33396-0026-924: hyp=['MY', 'MEN', 'LAUGHED', 'YES', 'A', 'STINGY', 'HOST'] +5142-33396-0027-925: ref=['HE', 'ACTS', 'AS', 'THOUGH', 'HE', 'HAD', 'NOT', 'EXPECTED', 'US'] +5142-33396-0027-925: hyp=['HE', 'ACTS', 'AS', 'THOUGH', 'HE', 'IS', 'NOT', 'EXPECTED', 'US'] +5142-33396-0028-926: ref=['ON', 'A', 'BENCH', 'IN', 'A', 'FAR', 'CORNER', 'WERE', 'A', 'DOZEN', 'PEOPLE', 'HUDDLED', 'TOGETHER'] +5142-33396-0028-926: hyp=['ON', 'A', 'BENCH', 'IN', 'A', 'FAR', 'CORNER', 'WERE', 'A', 'DOZEN', 'PEOPLE', 'HUDDLED', 'TOGETHER'] +5142-33396-0029-927: ref=['BRING', 'IN', 'THE', 'TABLE', 'WE', 'ARE', 'HUNGRY'] +5142-33396-0029-927: hyp=['BRING', 'IN', 'THE', 'TABLE', 'WE', 'ARE', 'HUNGRY'] +5142-33396-0030-928: ref=['THE', 'THRALLS', 'WERE', 'BRINGING', 'IN', 'A', 'GREAT', 'POT', 'OF', 'MEAT'] +5142-33396-0030-928: hyp=['THE', 'THRALLS', 'WERE', 'RINGING', 'IN', 'A', 'GREAT', 'POT', 'OF', 'MEAT'] +5142-33396-0031-929: ref=['THEY', 'SET', 'UP', 'A', 'CRANE', 'OVER', 'THE', 'FIRE', 'AND', 'HUNG', 'THE', 'POT', 'UPON', 'IT', 'AND', 'WE', 'SAT', 'AND', 'WATCHED', 'IT', 'BOIL', 'WHILE', 'WE', 'JOKED', 'AT', 'LAST', 'THE', 'SUPPER', 'BEGAN'] +5142-33396-0031-929: hyp=['THEY', 'SET', 'UP', 'A', 'CRANE', 'OVER', 'THE', 'FIRE', 'AND', 'HUNG', 'THE', 'POT', 'UPON', 'IT', 'AND', 'WE', 'SAT', 'AND', 'WATCHED', 'IT', 'BOIL', 'WHILE', 'WE', 'JOKED', 'AT', 'LAST', 'THE', 'SUPPER', 'BEGAN'] +5142-33396-0032-930: ref=['THE', 'FARMER', 'SAT', 'GLOOMILY', 'ON', 'THE', 'BENCH', 'AND', 'WOULD', 'NOT', 'EAT', 'AND', 'YOU', 'CANNOT', 'WONDER', 'FOR', 'HE', 'SAW', 'US', 'PUTTING', 'POTFULS', 'OF', 'HIS', 'GOOD', 'BEEF', 'AND', 'BASKET', 'LOADS', 'OF', 'BREAD', 'INTO', 'OUR', 'BIG', 'MOUTHS'] +5142-33396-0032-930: hyp=['THE', 'FARMER', 'SAT', 'GLOOMILY', 'ON', 'THE', 'BENCH', 'AND', 'WOULD', 'NOT', 'EAT', 'AND', 'YOU', 'CANNOT', 'WONDER', 'FOR', 'HE', 'SAW', 'US', 'PUTTING', 'POTFULS', 'OF', 'HIS', 'GOOD', 'BEEF', 'AND', 'BASKEY', 'LOADS', 'OF', 'BREAD', 'AND', 'OUR', 'BIG', 'MOUTHS'] +5142-33396-0033-931: ref=['YOU', 'WOULD', 'NOT', 'EAT', 'WITH', 'US', 'YOU', 'CANNOT', 'SAY', 'NO', 'TO', 'HALF', 'OF', 'MY', 'ALE', 'I', 'DRINK', 'THIS', 'TO', 'YOUR', 'HEALTH'] +5142-33396-0033-931: hyp=['YOU', 'WOULD', 'NOT', 'EAT', 'WITH', 'US', 'YOU', 'CANNOT', 'SAY', 'NO', 'TO', 'HALF', 'OF', 'MY', 'ALE', 'I', 'DRINK', 'THIS', 'TO', 'YOUR', 'HEALTH'] +5142-33396-0034-932: ref=['THEN', 'I', 'DRANK', 'HALF', 'OF', 'THE', 'HORNFUL', 'AND', 'SENT', 'THE', 'REST', 'ACROSS', 'THE', 'FIRE', 'TO', 'THE', 'FARMER', 'HE', 'TOOK', 'IT', 'AND', 'SMILED', 'SAYING'] +5142-33396-0034-932: hyp=['THEN', 'I', 'DRANK', 'HALF', 'OF', 'THE', 'HORNFUL', 'AND', 'SET', 'THE', 'REST', 'ACROSS', 'THE', 'FIRE', 'TO', 'THE', 'FARMER', 'HE', 'TOOK', 'IT', 'AND', 'SMILED', 'SAYING'] +5142-33396-0035-933: ref=['DID', 'YOU', 'EVER', 'HAVE', 'SUCH', 'A', 'LORDLY', 'GUEST', 'BEFORE', 'I', 'WENT', 'ON'] +5142-33396-0035-933: hyp=['DID', 'YOU', 'EVER', 'HAVE', 'SUCH', 'A', 'LORDLY', 'GUEST', 'BEFORE', 'I', 'WENT', 'ON'] +5142-33396-0036-934: ref=['SO', 'I', 'WILL', 'GIVE', 'OUT', 'THIS', 'LAW', 'THAT', 'MY', 'MEN', 'SHALL', 'NEVER', 'LEAVE', 'YOU', 'ALONE'] +5142-33396-0036-934: hyp=['SO', 'I', 'WILL', 'GIVE', 'OUT', 'THIS', 'LAW', 'THAT', 'MY', 'MEN', 'SHALL', 'NEVER', 'LEAVE', 'YOU', 'ALONE'] +5142-33396-0037-935: ref=['HAKON', 'THERE', 'SHALL', 'BE', 'YOUR', 'CONSTANT', 'COMPANION', 'FRIEND', 'FARMER'] +5142-33396-0037-935: hyp=['HAWKIN', 'THERE', 'SHALL', 'BE', 'YOUR', 'CONSTANT', 'COMPANION', 'FRIEND', 'FARMER'] +5142-33396-0038-936: ref=['HE', 'SHALL', 'NOT', 'LEAVE', 'YOU', 'DAY', 'OR', 'NIGHT', 'WHETHER', 'YOU', 'ARE', 'WORKING', 'OR', 'PLAYING', 'OR', 'SLEEPING'] +5142-33396-0038-936: hyp=['HE', 'SHALL', 'NOT', 'LEAVE', 'YOU', 'DAY', 'OR', 'NIGHT', 'WHETHER', 'YOU', 'ARE', 'WORKING', 'OR', 'PLAYING', 'OR', 'SLEEPING'] +5142-33396-0039-937: ref=['I', 'NAMED', 'NINE', 'OTHERS', 'AND', 'SAID'] +5142-33396-0039-937: hyp=['I', 'NAME', 'NINE', 'OTHERS', 'AND', 'SAID'] +5142-33396-0040-938: ref=['AND', 'THESE', 'SHALL', 'FOLLOW', 'YOUR', 'THRALLS', 'IN', 'THE', 'SAME', 'WAY'] +5142-33396-0040-938: hyp=['AND', 'THESE', 'SHALL', 'FOLLOW', 'YOUR', 'THRALLS', 'IN', 'THE', 'SAME', 'WAY'] +5142-33396-0041-939: ref=['SO', 'I', 'SET', 'GUARDS', 'OVER', 'EVERY', 'ONE', 'IN', 'THAT', 'HOUSE'] +5142-33396-0041-939: hyp=['SO', 'I', 'SET', 'GUARDS', 'OVER', 'EVERYONE', 'IN', 'THAT', 'HOUSE'] +5142-33396-0042-940: ref=['SO', 'NO', 'TALES', 'GOT', 'OUT', 'TO', 'THE', 'NEIGHBORS', 'BESIDES', 'IT', 'WAS', 'A', 'LONELY', 'PLACE', 'AND', 'BY', 'GOOD', 'LUCK', 'NO', 'ONE', 'CAME', 'THAT', 'WAY'] +5142-33396-0042-940: hyp=['SO', 'NO', 'TALES', 'GOT', 'OUT', 'TO', 'THE', 'NEIGHBORS', 'BESIDES', 'IT', 'WAS', 'A', 'LONELY', 'PLACE', 'AND', 'BY', 'GOOD', 'LUCK', 'NO', 'ONE', 'CAME', 'THAT', 'WAY'] +5142-33396-0043-941: ref=['THEIR', 'EYES', 'DANCED', 'BIG', 'THORLEIF', 'STOOD', 'UP', 'AND', 'STRETCHED', 'HIMSELF'] +5142-33396-0043-941: hyp=['THEIR', 'EYES', 'DANCED', 'BIG', 'TORE', 'LEAF', 'STOOD', 'UP', 'AND', 'STRETCHED', 'HIMSELF'] +5142-33396-0044-942: ref=['I', 'AM', 'STIFF', 'WITH', 'LONG', 'SITTING', 'HE', 'SAID', 'I', 'ITCH', 'FOR', 'A', 'FIGHT', 'I', 'TURNED', 'TO', 'THE', 'FARMER'] +5142-33396-0044-942: hyp=["I'M", 'STIFF', 'WITH', 'LONG', 'CITY', 'HE', 'SAID', 'I', 'ITCH', 'FOR', 'A', 'FIGHT', 'I', 'TURNED', 'TO', 'THE', 'FARMER'] +5142-33396-0045-943: ref=['THIS', 'IS', 'OUR', 'LAST', 'FEAST', 'WITH', 'YOU', 'I', 'SAID'] +5142-33396-0045-943: hyp=['THIS', 'IS', 'OUR', 'LAST', 'FEAST', 'WITH', 'YOU', 'I', 'SAID'] +5142-33396-0046-944: ref=['BY', 'THE', 'BEARD', 'OF', 'ODIN', 'I', 'CRIED', 'YOU', 'HAVE', 'TAKEN', 'OUR', 'JOKE', 'LIKE', 'A', 'MAN'] +5142-33396-0046-944: hyp=['BY', 'THE', 'BEARD', 'OF', 'ODIN', 'I', 'CRIED', 'YOU', 'HAVE', 'TAKEN', 'OUR', 'JOKE', 'LIKE', 'A', 'MAN'] +5142-33396-0047-945: ref=['MY', 'MEN', 'POUNDED', 'THE', 'TABLE', 'WITH', 'THEIR', 'FISTS'] +5142-33396-0047-945: hyp=['MY', 'MEN', 'POUNDED', 'THE', 'TABLE', 'WITH', 'THEIR', 'FISTS'] +5142-33396-0048-946: ref=['BY', 'THE', 'HAMMER', 'OF', 'THOR', 'SHOUTED', 'GRIM', 'HERE', 'IS', 'NO', 'STINGY', 'COWARD'] +5142-33396-0048-946: hyp=['BY', 'THE', 'HAMMER', 'OTHOR', 'SHOUTED', 'GRIM', 'THERE', 'IS', 'NO', 'STINGY', 'COWARD'] +5142-33396-0049-947: ref=['HERE', 'FRIEND', 'TAKE', 'IT', 'AND', 'HE', 'THRUST', 'IT', 'INTO', 'THE', "FARMER'S", 'HAND'] +5142-33396-0049-947: hyp=['HERE', 'FRIEND', 'TAKE', 'IT', 'AND', 'HE', 'THRUST', 'INTO', 'THE', "FARMER'S", 'HAND'] +5142-33396-0050-948: ref=['MAY', 'YOU', 'DRINK', "HEART'S", 'EASE', 'FROM', 'IT', 'FOR', 'MANY', 'YEARS'] +5142-33396-0050-948: hyp=['MAY', 'YOU', 'DRINK', 'HEARTSEASE', 'FROM', 'IT', 'FOR', 'MANY', 'YEARS'] +5142-33396-0051-949: ref=['AND', 'WITH', 'IT', 'I', 'LEAVE', 'YOU', 'A', 'NAME', 'SIF', 'THE', 'FRIENDLY', 'I', 'SHALL', 'HOPE', 'TO', 'DRINK', 'WITH', 'YOU', 'SOMETIME', 'IN', 'VALHALLA'] +5142-33396-0051-949: hyp=['AND', 'WITH', 'IT', 'I', 'LEAVE', 'YOU', 'A', 'NAME', 'SIFT', 'THE', 'FRIENDLY', 'I', 'SHALL', 'HOPE', 'TO', 'DRINK', 'WITH', 'YOU', 'SOME', 'TIME', 'IN', 'VALHALLA'] +5142-33396-0052-950: ref=['HERE', 'IS', 'A', 'RING', 'FOR', 'SIF', 'THE', 'FRIENDLY', 'AND', 'HERE', 'IS', 'A', 'BRACELET', 'A', 'SWORD', 'WOULD', 'NOT', 'BE', 'ASHAMED', 'TO', 'HANG', 'AT', 'YOUR', 'SIDE'] +5142-33396-0052-950: hyp=['HERE', 'IS', 'A', 'RING', 'FOR', 'SIFT', 'THE', 'FRIENDLY', 'AND', 'HERE', 'IS', 'A', 'BRACELET', 'AND', 'A', 'SWORD', 'WOULD', 'NOT', 'BE', 'ASHAMED', 'TO', 'HANG', 'AT', 'YOUR', 'SIDE'] +5142-33396-0053-951: ref=['I', 'TOOK', 'FIVE', 'GREAT', 'BRACELETS', 'OF', 'GOLD', 'FROM', 'OUR', 'TREASURE', 'CHEST', 'AND', 'GAVE', 'THEM', 'TO', 'HIM'] +5142-33396-0053-951: hyp=['I', 'TOOK', 'FIVE', 'GREAT', 'BRACELETS', 'OF', 'GOLD', 'FROM', 'OUR', 'TREASURE', 'CHEST', 'AND', 'GAVE', 'THEM', 'TO', 'HIM'] +5142-33396-0054-952: ref=['THAT', 'IS', 'THE', 'BEST', 'WAY', 'TO', 'DECIDE', 'FOR', 'THE', 'SPEAR', 'WILL', 'ALWAYS', 'POINT', 'SOMEWHERE', 'AND', 'ONE', 'THING', 'IS', 'AS', 'GOOD', 'AS', 'ANOTHER'] +5142-33396-0054-952: hyp=['THAT', 'IS', 'THE', 'BEST', 'WAY', 'TO', 'DECIDE', 'FOR', 'THE', 'SPEAR', 'WILL', 'ALWAYS', 'POINT', 'SOMEWHERE', 'AND', 'ONE', 'THING', 'IS', 'AS', 'GOOD', 'AS', 'ANOTHER'] +5142-33396-0055-953: ref=['THAT', 'TIME', 'IT', 'POINTED', 'US', 'INTO', 'YOUR', "FATHER'S", 'SHIPS'] +5142-33396-0055-953: hyp=['THAT', 'TIME', 'IT', 'POINTED', 'US', 'INTO', 'YOUR', "FATHER'S", 'SHIPS'] +5142-33396-0056-954: ref=['HERE', 'THEY', 'SAID', 'IS', 'A', 'RASCAL', 'WHO', 'HAS', 'BEEN', 'HARRYING', 'OUR', 'COASTS'] +5142-33396-0056-954: hyp=['HERE', 'THEY', 'SAID', 'AS', 'A', 'RASCAL', 'WHO', 'HAS', 'BEEN', 'HARRYING', 'OUR', 'COAST'] +5142-33396-0057-955: ref=['WE', 'SUNK', 'HIS', 'SHIP', 'AND', 'MEN', 'BUT', 'HIM', 'WE', 'BROUGHT', 'TO', 'YOU'] +5142-33396-0057-955: hyp=['WE', 'SUNK', 'HIS', 'SHIP', 'AND', 'MEN', 'BUT', 'HIM', 'WE', 'BROUGHT', 'TO', 'YOU'] +5142-33396-0058-956: ref=['A', 'ROBBER', 'VIKING', 'SAID', 'THE', 'KING', 'AND', 'SCOWLED', 'AT', 'ME'] +5142-33396-0058-956: hyp=['A', 'ROBBER', 'VIKING', 'SAID', 'THE', 'KING', 'AND', 'HE', 'SCOWLED', 'AT', 'ME'] +5142-33396-0059-957: ref=['YES', 'AND', 'WITH', 'ALL', 'YOUR', 'FINGERS', 'IT', 'TOOK', 'YOU', 'A', 'YEAR', 'TO', 'CATCH', 'ME', 'THE', 'KING', 'FROWNED', 'MORE', 'ANGRILY'] +5142-33396-0059-957: hyp=['YES', 'AND', 'WITH', 'ALL', 'YOUR', 'FINGERS', 'IT', 'TOOK', 'YOU', 'A', 'YEAR', 'TO', 'CATCH', 'ME', 'THE', 'KING', 'FROWNED', 'MORE', 'ANGRILY'] +5142-33396-0060-958: ref=['TAKE', 'HIM', 'OUT', 'THORKEL', 'AND', 'LET', 'HIM', 'TASTE', 'YOUR', 'SWORD'] +5142-33396-0060-958: hyp=['TAKE', 'HIM', 'OUT', 'TORCOAL', 'AND', 'LET', 'HIM', 'TASTE', 'YOUR', 'SWORD'] +5142-33396-0061-959: ref=['YOUR', 'MOTHER', 'THE', 'QUEEN', 'WAS', 'STANDING', 'BY'] +5142-33396-0061-959: hyp=['YOUR', 'MOTHER', 'THE', 'QUEEN', 'WAS', 'STANDING', 'BY'] +5142-33396-0062-960: ref=['NOW', 'SHE', 'PUT', 'HER', 'HAND', 'ON', 'HIS', 'ARM', 'AND', 'SMILED', 'AND', 'SAID'] +5142-33396-0062-960: hyp=['NOW', 'SHE', 'PUT', 'HER', 'HAND', 'ON', 'HIS', 'ARM', 'AND', 'SMILED', 'AND', 'SAID'] +5142-33396-0063-961: ref=['AND', 'WOULD', 'HE', 'NOT', 'BE', 'A', 'GOOD', 'GIFT', 'FOR', 'OUR', 'BABY'] +5142-33396-0063-961: hyp=['AND', 'WOULD', 'HE', 'NOT', 'BE', 'A', 'GOOD', 'GIFT', 'FOR', 'OUR', 'BABY'] +5142-33396-0064-962: ref=['YOUR', 'FATHER', 'THOUGHT', 'A', 'MOMENT', 'THEN', 'LOOKED', 'AT', 'YOUR', 'MOTHER', 'AND', 'SMILED'] +5142-33396-0064-962: hyp=['YOUR', 'FATHER', 'THOUGHT', 'A', 'MOMENT', 'AND', 'LOOKED', 'AT', 'YOUR', 'MOTHER', 'AND', 'SMILED'] +5142-33396-0065-963: ref=['SOFT', 'HEART', 'HE', 'SAID', 'GENTLY', 'TO', 'HER', 'THEN', 'TO', 'THORKEL', 'WELL', 'LET', 'HIM', 'GO', 'THORKEL'] +5142-33396-0065-963: hyp=['SOFT', 'HEART', 'HE', 'SAID', 'GENTLY', 'TO', 'HER', 'THEN', 'TO', 'TORQUAL', 'WELL', 'LET', 'HIM', 'GO', 'TORKO'] +5142-33396-0066-964: ref=['THEN', 'HE', 'TURNED', 'TO', 'ME', 'AGAIN', 'FROWNING'] +5142-33396-0066-964: hyp=['THEN', 'HE', 'TURNED', 'TO', 'ME', 'AGAIN', 'FROWNING'] +5142-33396-0067-965: ref=['BUT', 'YOUNG', 'SHARP', 'TONGUE', 'NOW', 'THAT', 'WE', 'HAVE', 'CAUGHT', 'YOU', 'WE', 'WILL', 'PUT', 'YOU', 'INTO', 'A', 'TRAP', 'THAT', 'YOU', 'CANNOT', 'GET', 'OUT', 'OF'] +5142-33396-0067-965: hyp=['BUT', 'YOUNG', 'SHARP', 'TONGUE', 'NOW', 'THAT', "WE'VE", 'CAUGHT', 'YOU', 'WE', 'WILL', 'PUT', 'YOU', 'INTO', 'A', 'TRAP', 'THAT', 'YOU', 'CANNOT', 'GET', 'OUT', 'OF'] +5142-33396-0068-966: ref=['SO', 'I', 'LIVED', 'AND', 'NOW', 'AM', 'YOUR', 'TOOTH', 'THRALL', 'WELL', 'IT', 'IS', 'THE', 'LUCK', 'OF', 'WAR'] +5142-33396-0068-966: hyp=['SO', 'I', 'LIVED', 'AND', 'NOW', "I'M", 'YOUR', 'TOOTH', 'THRALL', 'WELL', 'IT', 'IS', 'THE', 'LUCK', 'OF', 'WAR'] +5142-36377-0000-870: ref=['IT', 'WAS', 'ONE', 'OF', 'THE', 'MASTERLY', 'AND', 'CHARMING', 'STORIES', 'OF', 'DUMAS', 'THE', 'ELDER'] +5142-36377-0000-870: hyp=['IT', 'WAS', 'ONE', 'OF', 'THE', 'MASTERLY', 'AND', 'CHARMING', 'STORIES', 'OF', 'DUMAS', 'THE', 'ELDER'] +5142-36377-0001-871: ref=['IN', 'FIVE', 'MINUTES', 'I', 'WAS', 'IN', 'A', 'NEW', 'WORLD', 'AND', 'MY', 'MELANCHOLY', 'ROOM', 'WAS', 'FULL', 'OF', 'THE', 'LIVELIEST', 'FRENCH', 'COMPANY'] +5142-36377-0001-871: hyp=['IN', 'FIVE', 'MINUTES', 'I', 'WAS', 'IN', 'A', 'NEW', 'WORLD', 'AND', 'MY', 'MELANCHOLY', 'ROOM', 'WAS', 'FULL', 'OF', 'THE', 'LIVELIEST', 'FRENCH', 'COMPANY'] +5142-36377-0002-872: ref=['THE', 'SOUND', 'OF', 'AN', 'IMPERATIVE', 'AND', 'UNCOMPROMISING', 'BELL', 'RECALLED', 'ME', 'IN', 'DUE', 'TIME', 'TO', 'THE', 'REGIONS', 'OF', 'REALITY'] +5142-36377-0002-872: hyp=['THE', 'SOUND', 'OF', 'AN', 'IMPERATIVE', 'AND', 'UNCOMPROMISING', 'BELL', 'RECALLED', 'ME', 'IN', 'DUE', 'TIME', 'TO', 'THE', 'REGIONS', 'OF', 'REALITY'] +5142-36377-0003-873: ref=['AMBROSE', 'MET', 'ME', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'STAIRS', 'AND', 'SHOWED', 'ME', 'THE', 'WAY', 'TO', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0003-873: hyp=['AMBROSE', 'MET', 'ME', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'STAIRS', 'AND', 'SHOWED', 'ME', 'THE', 'WAY', 'TO', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0004-874: ref=['SHE', 'SIGNED', 'TO', 'ME', 'WITH', 'A', 'GHOSTLY', 'SOLEMNITY', 'TO', 'TAKE', 'THE', 'VACANT', 'PLACE', 'ON', 'THE', 'LEFT', 'OF', 'HER', 'FATHER'] +5142-36377-0004-874: hyp=['SHE', 'SIGNED', 'TO', 'ME', 'WITH', 'A', 'GHOSTLY', 'SOLEMNITY', 'TO', 'TAKE', 'THE', 'VACANT', 'PLACE', 'ON', 'THE', 'LEFT', 'OF', 'HER', 'FATHER'] +5142-36377-0005-875: ref=['THE', 'DOOR', 'OPENED', 'AGAIN', 'WHILE', 'I', 'WAS', 'STILL', 'STUDYING', 'THE', 'TWO', 'BROTHERS', 'WITHOUT', 'I', 'HONESTLY', 'CONFESS', 'BEING', 'VERY', 'FAVORABLY', 'IMPRESSED', 'BY', 'EITHER', 'OF', 'THEM'] +5142-36377-0005-875: hyp=['THE', 'DOOR', 'OPENED', 'AGAIN', 'WHILE', 'I', 'WAS', 'STILL', 'STUDYING', 'THE', 'TWO', 'BROTHERS', 'WITHOUT', 'I', 'HONESTLY', 'CONFESSED', 'BEING', 'VERY', 'FAVORABLY', 'IMPRESSED', 'BY', 'EITHER', 'OF', 'THEM'] +5142-36377-0006-876: ref=['A', 'NEW', 'MEMBER', 'OF', 'THE', 'FAMILY', 'CIRCLE', 'WHO', 'INSTANTLY', 'ATTRACTED', 'MY', 'ATTENTION', 'ENTERED', 'THE', 'ROOM'] +5142-36377-0006-876: hyp=['A', 'NEW', 'MEMBER', 'OF', 'THE', 'FAMILY', 'CIRCLE', 'WHO', 'INSTANTLY', 'ATTRACTED', 'MY', 'ATTENTION', 'ENTERED', 'THE', 'ROOM'] +5142-36377-0007-877: ref=['A', 'LITTLE', 'CRACKED', 'THAT', 'IN', 'THE', 'POPULAR', 'PHRASE', 'WAS', 'MY', 'IMPRESSION', 'OF', 'THE', 'STRANGER', 'WHO', 'NOW', 'MADE', 'HIS', 'APPEARANCE', 'IN', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0007-877: hyp=['A', 'LITTLE', 'CRACKED', 'THAT', 'IN', 'THE', 'POPULAR', 'PHRASE', 'WAS', 'MY', 'IMPRESSION', 'OF', 'THE', 'STRANGER', 'WHO', 'NOW', 'MADE', 'HIS', 'APPEARANCE', 'IN', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0008-878: ref=['MISTER', 'MEADOWCROFT', 'THE', 'ELDER', 'HAVING', 'NOT', 'SPOKEN', 'ONE', 'WORD', 'THUS', 'FAR', 'HIMSELF', 'INTRODUCED', 'THE', 'NEWCOMER', 'TO', 'ME', 'WITH', 'A', 'SIDE', 'GLANCE', 'AT', 'HIS', 'SONS', 'WHICH', 'HAD', 'SOMETHING', 'LIKE', 'DEFIANCE', 'IN', 'IT', 'A', 'GLANCE', 'WHICH', 'AS', 'I', 'WAS', 'SORRY', 'TO', 'NOTICE', 'WAS', 'RETURNED', 'WITH', 'THE', 'DEFIANCE', 'ON', 'THEIR', 'SIDE', 'BY', 'THE', 'TWO', 'YOUNG', 'MEN'] +5142-36377-0008-878: hyp=['MISTER', 'MEDICRAFT', 'THE', 'ELDER', 'HAVING', 'NOT', 'SPOKEN', 'ONE', 'WORD', 'THUS', 'FAR', 'HIMSELF', 'INTRODUCED', 'THE', 'NEWCOMER', 'TO', 'ME', 'WITH', 'A', 'SIDE', 'GLANCE', 'AT', 'HIS', 'SONS', 'WHICH', 'HAD', 'SOMETHING', 'LIKE', 'DEFIANCE', 'IN', 'IT', 'A', 'GLANCE', 'WHICH', 'AS', 'I', 'WAS', 'SORRY', 'TO', 'NOTICE', 'WAS', 'RETURNED', 'WITH', 'THE', 'DEFIANCE', 'ON', 'THEIR', 'SIDE', 'BY', 'THE', 'TWO', 'YOUNG', 'MEN'] +5142-36377-0009-879: ref=['PHILIP', 'LEFRANK', 'THIS', 'IS', 'MY', 'OVERLOOKER', 'MISTER', 'JAGO', 'SAID', 'THE', 'OLD', 'MAN', 'FORMALLY', 'PRESENTING', 'US'] +5142-36377-0009-879: hyp=['PHILIP', 'LENG', 'THIS', 'IS', 'MY', 'OVERLOOKER', 'MISTER', 'YAGO', 'SAID', 'THE', 'OLD', 'MAN', 'FORMERLY', 'PRESENTING', 'US'] +5142-36377-0010-880: ref=['HE', 'IS', 'NOT', 'WELL', 'HE', 'HAS', 'COME', 'OVER', 'THE', 'OCEAN', 'FOR', 'REST', 'AND', 'CHANGE', 'OF', 'SCENE'] +5142-36377-0010-880: hyp=['HE', 'IS', 'NOT', 'WELL', 'HE', 'HAS', 'COME', 'OVER', 'THE', 'OCEAN', 'FOR', 'REST', 'AND', 'CHANGE', 'IS', 'SEEN'] +5142-36377-0011-881: ref=['MISTER', 'JAGO', 'IS', 'AN', 'AMERICAN', 'PHILIP'] +5142-36377-0011-881: hyp=['THE', 'TRIAGO', 'IS', 'AN', 'AMERICAN', 'PHILIP'] +5142-36377-0012-882: ref=['MAKE', 'ACQUAINTANCE', 'WITH', 'MISTER', 'JAGO', 'SIT', 'TOGETHER'] +5142-36377-0012-882: hyp=['MAKE', 'ACQUAINTANCE', 'WITH', 'MISTER', 'SIP', 'TOGETHER'] +5142-36377-0013-883: ref=['THEY', 'POINTEDLY', 'DREW', 'BACK', 'FROM', 'JOHN', 'JAGO', 'AS', 'HE', 'APPROACHED', 'THE', 'EMPTY', 'CHAIR', 'NEXT', 'TO', 'ME', 'AND', 'MOVED', 'ROUND', 'TO', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'TABLE'] +5142-36377-0013-883: hyp=['THEY', 'POINTEDLY', 'DREW', 'BACK', 'FROM', 'JOHN', 'JAGO', 'AS', 'HE', 'APPROACHED', 'THE', 'EMPTY', 'CHAIR', 'NEXT', 'TO', 'ME', 'AND', 'MOVED', 'ROUND', 'TO', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'TABLE'] +5142-36377-0014-884: ref=['A', 'PRETTY', 'GIRL', 'AND', 'SO', 'FAR', 'AS', 'I', 'COULD', 'JUDGE', 'BY', 'APPEARANCES', 'A', 'GOOD', 'GIRL', 'TOO', 'DESCRIBING', 'HER', 'GENERALLY', 'I', 'MAY', 'SAY', 'THAT', 'SHE', 'HAD', 'A', 'SMALL', 'HEAD', 'WELL', 'CARRIED', 'AND', 'WELL', 'SET', 'ON', 'HER', 'SHOULDERS', 'BRIGHT', 'GRAY', 'EYES', 'THAT', 'LOOKED', 'AT', 'YOU', 'HONESTLY', 'AND', 'MEANT', 'WHAT', 'THEY', 'LOOKED', 'A', 'TRIM', 'SLIGHT', 'LITTLE', 'FIGURE', 'TOO', 'SLIGHT', 'FOR', 'OUR', 'ENGLISH', 'NOTIONS', 'OF', 'BEAUTY', 'A', 'STRONG', 'AMERICAN', 'ACCENT', 'AND', 'A', 'RARE', 'THING', 'IN', 'AMERICA', 'A', 'PLEASANTLY', 'TONED', 'VOICE', 'WHICH', 'MADE', 'THE', 'ACCENT', 'AGREEABLE', 'TO', 'ENGLISH', 'EARS'] +5142-36377-0014-884: hyp=['A', 'PRETTY', 'GIRL', 'AND', 'SO', 'FAR', 'AS', 'I', 'COULD', 'JUDGE', 'MY', 'APPEARANCES', 'A', 'GOOD', 'GIRL', 'TOO', 'DESCRIBING', 'HER', 'GENERALLY', 'I', 'MAY', 'SAY', 'THAT', 'SHE', 'HAD', 'A', 'SMALL', 'HEAD', 'WELL', 'CARRIED', 'AND', 'WELL', 'SET', 'ON', 'HER', 'SHOULDERS', 'BRIGHT', 'GRAY', 'EYES', 'THAT', 'LOOKED', 'AT', 'YOU', 'HONESTLY', 'AND', 'MEANT', 'WHAT', 'THEY', 'LOOKED', 'A', 'TRIM', 'SLIGHT', 'LITTLE', 'FIGURE', 'TOO', 'SLIGHT', 'FOR', 'OUR', 'ENGLISH', 'NOTIONS', 'OF', 'BEAUTY', 'A', 'STRONG', 'AMERICAN', 'ACCENT', 'AND', 'A', 'RARE', 'THING', 'IN', 'AMERICA', 'A', 'PLEASANTLY', 'TONED', 'VOICE', 'WHICH', 'MADE', 'THE', 'ACCENT', 'AGREEABLE', 'TO', 'ENGLISH', 'YEARS'] +5142-36377-0015-885: ref=['OUR', 'FIRST', 'IMPRESSIONS', 'OF', 'PEOPLE', 'ARE', 'IN', 'NINE', 'CASES', 'OUT', 'OF', 'TEN', 'THE', 'RIGHT', 'IMPRESSIONS'] +5142-36377-0015-885: hyp=['OUR', 'FIRST', 'IMPRESSIONS', 'OF', 'PEOPLE', 'ARE', 'IN', 'NINE', 'CASES', 'AT', 'A', 'TEN', 'THE', 'RIGHT', 'IMPRESSIONS'] +5142-36377-0016-886: ref=['FOR', 'ONCE', 'IN', 'A', 'WAY', 'I', 'PROVED', 'A', 'TRUE', 'PROPHET'] +5142-36377-0016-886: hyp=['FOR', 'ONCE', 'IN', 'A', 'WAY', 'I', 'PROVED', 'A', 'TRUE', 'PROPHET'] +5142-36377-0017-887: ref=['THE', 'ONLY', 'CHEERFUL', 'CONVERSATION', 'WAS', 'THE', 'CONVERSATION', 'ACROSS', 'THE', 'TABLE', 'BETWEEN', 'NAOMI', 'AND', 'ME'] +5142-36377-0017-887: hyp=['THE', 'ONLY', 'CHEERFUL', 'CONVERSATION', 'WAS', 'THE', 'CONVERSATION', 'ACROSS', 'THE', 'TABLE', 'BETWEEN', 'NAOMI', 'AND', 'ME'] +5142-36377-0018-888: ref=['HE', 'LOOKED', 'UP', 'AT', 'NAOMI', 'DOUBTINGLY', 'FROM', 'HIS', 'PLATE', 'AND', 'LOOKED', 'DOWN', 'AGAIN', 'SLOWLY', 'WITH', 'A', 'FROWN'] +5142-36377-0018-888: hyp=['HE', 'LOOKED', 'UP', 'AND', 'NOW', 'AND', 'ME', 'DOUBTINGLY', 'FROM', 'HIS', 'PLATE', 'AND', 'LOOKED', 'DOWN', 'AGAIN', 'SLOWLY', 'WITH', 'A', 'FROWN'] +5142-36377-0019-889: ref=['WHEN', 'I', 'ADDRESSED', 'HIM', 'HE', 'ANSWERED', 'CONSTRAINEDLY'] +5142-36377-0019-889: hyp=['WHEN', 'I', 'ADDRESSED', 'HIM', 'HE', 'ANSWERED', 'CONSTRAINEDLY'] +5142-36377-0020-890: ref=['A', 'MORE', 'DREARY', 'AND', 'MORE', 'DISUNITED', 'FAMILY', 'PARTY', 'I', 'NEVER', 'SAT', 'AT', 'THE', 'TABLE', 'WITH'] +5142-36377-0020-890: hyp=['A', 'MORE', 'DREARY', 'AND', 'MORE', 'DISUNITED', 'FAMILY', 'PARTY', 'I', 'NEVER', 'SAT', 'AT', 'THE', 'TABLE', 'WITH'] +5142-36377-0021-891: ref=['ENVY', 'HATRED', 'MALICE', 'AND', 'UNCHARITABLENESS', 'ARE', 'NEVER', 'SO', 'ESSENTIALLY', 'DETESTABLE', 'TO', 'MY', 'MIND', 'AS', 'WHEN', 'THEY', 'ARE', 'ANIMATED', 'BY', 'A', 'SENSE', 'OF', 'PROPRIETY', 'AND', 'WORK', 'UNDER', 'THE', 'SURFACE', 'BUT', 'FOR', 'MY', 'INTEREST', 'IN', 'NAOMI', 'AND', 'MY', 'OTHER', 'INTEREST', 'IN', 'THE', 'LITTLE', 'LOVE', 'LOOKS', 'WHICH', 'I', 'NOW', 'AND', 'THEN', 'SURPRISED', 'PASSING', 'BETWEEN', 'HER', 'AND', 'AMBROSE', 'I', 'SHOULD', 'NEVER', 'HAVE', 'SAT', 'THROUGH', 'THAT', 'SUPPER'] +5142-36377-0021-891: hyp=['ENVY', 'HATRED', 'MALICE', 'AND', 'UNCHARITABLENESS', 'ARE', 'NEVER', 'SO', 'ESSENTIALLY', 'DETESTABLE', 'TO', 'MY', 'MIND', 'AS', 'WHEN', 'THEY', 'ARE', 'ANIMATED', 'BY', 'THE', 'SENSE', 'OF', 'PROPRIETY', 'AND', 'WORK', 'UNDER', 'THE', 'SURFACE', 'BUT', 'FOR', 'MY', 'INTEREST', 'IN', 'THEY', 'OWE', 'ME', 'AND', 'MY', 'OTHER', 'INTEREST', 'IN', 'THE', 'LITTLE', 'LOVE', 'LOOKS', 'WHICH', 'I', 'NOW', 'AND', 'THEN', 'SURPRISED', 'PASSING', 'BETWEEN', 'HER', 'AND', 'AMBROSE', 'I', 'SHOULD', 'NEVER', 'HAVE', 'SAT', 'THROUGH', 'THAT', 'SUPPER'] +5142-36377-0022-892: ref=['I', 'WISH', 'YOU', 'GOOD', 'NIGHT', 'SHE', 'LAID', 'HER', 'BONY', 'HANDS', 'ON', 'THE', 'BACK', 'OF', 'MISTER', "MEADOWCROFT'S", 'INVALID', 'CHAIR', 'CUT', 'HIM', 'SHORT', 'IN', 'HIS', 'FAREWELL', 'SALUTATION', 'TO', 'ME', 'AND', 'WHEELED', 'HIM', 'OUT', 'TO', 'HIS', 'BED', 'AS', 'IF', 'SHE', 'WERE', 'WHEELING', 'HIM', 'OUT', 'TO', 'HIS', 'GRAVE'] +5142-36377-0022-892: hyp=['I', 'WISH', 'YOU', 'GOOD', 'NIGHT', 'SHE', 'LAID', 'HER', 'BONY', 'HANDS', 'ON', 'THE', 'BACK', 'OF', 'MISTER', "METICOFF'S", 'INVALID', 'CHAIR', 'CAUGHT', 'HIM', 'SHORT', 'IN', 'HIS', 'FAREWELL', 'SALUTATION', 'TO', 'ME', 'AND', 'WHEELED', 'HIM', 'OUT', 'TO', 'HIS', 'BED', 'AS', 'IF', 'SHE', 'WERE', 'WHEELING', 'HIM', 'OUT', 'TO', 'HIS', 'GRAVE'] +5142-36377-0023-893: ref=['YOU', 'WERE', 'QUITE', 'RIGHT', 'TO', 'SAY', 'NO', 'AMBROSE', 'BEGAN', 'NEVER', 'SMOKE', 'WITH', 'JOHN', 'JAGO', 'HIS', 'CIGARS', 'WILL', 'POISON', 'YOU'] +5142-36377-0023-893: hyp=['YOU', 'WERE', 'QUITE', 'RIGHT', 'TO', 'SAY', 'NO', 'AMBROSE', 'BEGAN', 'NEVER', 'SMOKE', 'WITH', 'JOHN', 'IAGO', 'HIS', 'CIGARS', 'WILL', 'POISON', 'YOU'] +5142-36377-0024-894: ref=['NAOMI', 'SHOOK', 'HER', 'FOREFINGER', 'REPROACHFULLY', 'AT', 'THEM', 'AS', 'IF', 'THE', 'TWO', 'STURDY', 'YOUNG', 'FARMERS', 'HAD', 'BEEN', 'TWO', 'CHILDREN'] +5142-36377-0024-894: hyp=['THEY', 'ONLY', 'SHOOK', 'HER', 'FOREFINGER', 'REPROACHFULLY', 'AT', 'THEM', 'AS', 'IF', 'THE', 'TWO', 'STURDY', 'YOUNG', 'FARMERS', 'HAD', 'BEEN', 'TWO', 'CHILDREN'] +5142-36377-0025-895: ref=['SILAS', 'SLUNK', 'AWAY', 'WITHOUT', 'A', 'WORD', 'OF', 'PROTEST', 'AMBROSE', 'STOOD', 'HIS', 'GROUND', 'EVIDENTLY', 'BENT', 'ON', 'MAKING', 'HIS', 'PEACE', 'WITH', 'NAOMI', 'BEFORE', 'HE', 'LEFT', 'HER', 'SEEING', 'THAT', 'I', 'WAS', 'IN', 'THE', 'WAY', 'I', 'WALKED', 'ASIDE', 'TOWARD', 'A', 'GLASS', 'DOOR', 'AT', 'THE', 'LOWER', 'END', 'OF', 'THE', 'ROOM'] +5142-36377-0025-895: hyp=['SILAS', 'SLUNK', 'AWAY', 'WITHOUT', 'A', 'WORD', 'OF', 'PROTEST', 'AMBROSE', 'STOOD', 'HIS', 'GROUND', 'EVIDENTLY', 'BENT', 'ON', 'MAKING', 'HIS', 'PEACE', 'WHEN', 'THEY', 'ARMY', 'BEFORE', 'HE', 'LEFT', 'HER', 'SEEING', 'THAT', 'I', 'WAS', 'IN', 'THE', 'WAY', 'I', 'WALKED', 'ASIDE', 'TOWARD', 'A', 'GLASS', 'DOOR', 'AT', 'THE', 'LOWER', 'END', 'OF', 'THE', 'ROOM'] +5142-36586-0000-967: ref=['IT', 'IS', 'MANIFEST', 'THAT', 'MAN', 'IS', 'NOW', 'SUBJECT', 'TO', 'MUCH', 'VARIABILITY'] +5142-36586-0000-967: hyp=['IT', 'IS', 'MANIFEST', 'THAT', 'MAN', 'IS', 'NOW', 'SUBJECT', 'TO', 'MUCH', 'VARIABILITY'] +5142-36586-0001-968: ref=['SO', 'IT', 'IS', 'WITH', 'THE', 'LOWER', 'ANIMALS'] +5142-36586-0001-968: hyp=['SO', 'IT', 'IS', 'WITH', 'THE', 'LOWER', 'ANIMALS'] +5142-36586-0002-969: ref=['THE', 'VARIABILITY', 'OF', 'MULTIPLE', 'PARTS'] +5142-36586-0002-969: hyp=['THE', 'VERY', 'ABILITY', 'OF', 'MULTIPLE', 'PARTS'] +5142-36586-0003-970: ref=['BUT', 'THIS', 'SUBJECT', 'WILL', 'BE', 'MORE', 'PROPERLY', 'DISCUSSED', 'WHEN', 'WE', 'TREAT', 'OF', 'THE', 'DIFFERENT', 'RACES', 'OF', 'MANKIND'] +5142-36586-0003-970: hyp=['BUT', 'THIS', 'SUBJECT', 'WILL', 'BE', 'MORE', 'PROPERLY', 'DISCUSSED', 'WHEN', 'WE', 'TREAT', 'OF', 'THE', 'DIFFERENT', 'RACES', 'OF', 'MANKIND'] +5142-36586-0004-971: ref=['EFFECTS', 'OF', 'THE', 'INCREASED', 'USE', 'AND', 'DISUSE', 'OF', 'PARTS'] +5142-36586-0004-971: hyp=['EFFECTS', 'OF', 'THE', 'INCREASED', 'USE', 'AND', 'DISUSE', 'OF', 'PARTS'] +5142-36600-0000-896: ref=['CHAPTER', 'SEVEN', 'ON', 'THE', 'RACES', 'OF', 'MAN'] +5142-36600-0000-896: hyp=['CHAPTER', 'SEVEN', 'ON', 'THE', 'RACES', 'OF', 'MAN'] +5142-36600-0001-897: ref=['IN', 'DETERMINING', 'WHETHER', 'TWO', 'OR', 'MORE', 'ALLIED', 'FORMS', 'OUGHT', 'TO', 'BE', 'RANKED', 'AS', 'SPECIES', 'OR', 'VARIETIES', 'NATURALISTS', 'ARE', 'PRACTICALLY', 'GUIDED', 'BY', 'THE', 'FOLLOWING', 'CONSIDERATIONS', 'NAMELY', 'THE', 'AMOUNT', 'OF', 'DIFFERENCE', 'BETWEEN', 'THEM', 'AND', 'WHETHER', 'SUCH', 'DIFFERENCES', 'RELATE', 'TO', 'FEW', 'OR', 'MANY', 'POINTS', 'OF', 'STRUCTURE', 'AND', 'WHETHER', 'THEY', 'ARE', 'OF', 'PHYSIOLOGICAL', 'IMPORTANCE', 'BUT', 'MORE', 'ESPECIALLY', 'WHETHER', 'THEY', 'ARE', 'CONSTANT'] +5142-36600-0001-897: hyp=['AND', 'DETERMINING', 'WHETHER', 'TWO', 'OR', 'MORE', 'ALLIED', 'FORMS', 'OUGHT', 'TO', 'BE', 'RANKED', 'A', 'SPECIES', 'OR', 'VARIETIES', 'NATURALISTS', 'ARE', 'PRACTICALLY', 'GUIDED', 'BY', 'THE', 'FOLLOWING', 'CONSIDERATIONS', 'NAMELY', 'THE', 'AMOUNT', 'OF', 'DIFFERENCE', 'BETWEEN', 'THEM', 'AND', 'WHETHER', 'SUCH', 'DIFFERENCES', 'RELATE', 'TO', 'FEW', 'OR', 'MANY', 'POINTS', 'OF', 'STRUCTURE', 'AND', 'WHETHER', 'THEY', 'ARE', 'OF', 'PHYSIOLOGICAL', 'IMPORTANCE', 'BUT', 'MORE', 'ESPECIALLY', 'WHETHER', 'THEY', 'ARE', 'CONSTANT'] +5639-40744-0000-137: ref=['ELEVEN', "O'CLOCK", 'HAD', 'STRUCK', 'IT', 'WAS', 'A', 'FINE', 'CLEAR', 'NIGHT', 'THEY', 'WERE', 'THE', 'ONLY', 'PERSONS', 'ON', 'THE', 'ROAD', 'AND', 'THEY', 'SAUNTERED', 'LEISURELY', 'ALONG', 'TO', 'AVOID', 'PAYING', 'THE', 'PRICE', 'OF', 'FATIGUE', 'FOR', 'THE', 'RECREATION', 'PROVIDED', 'FOR', 'THE', 'TOLEDANS', 'IN', 'THEIR', 'VALLEY', 'OR', 'ON', 'THE', 'BANKS', 'OF', 'THEIR', 'RIVER'] +5639-40744-0000-137: hyp=['ELEVEN', "O'CLOCK", 'HAD', 'STRUCK', 'IT', 'WAS', 'A', 'FINE', 'CLEAR', 'NIGHT', 'THERE', 'WERE', 'THE', 'ONLY', 'PERSONS', 'ON', 'THE', 'ROAD', 'AND', 'THEY', 'SAUNTERED', 'LEISURELY', 'ALONG', 'TO', 'AVOID', 'PAYING', 'THE', 'PRICE', 'OF', 'FATIGUE', 'FOR', 'THE', 'RECREATION', 'PROVIDED', 'FOR', 'THE', 'TOLEDANS', 'IN', 'THE', 'VALLEY', 'OR', 'ON', 'THE', 'BANKS', 'OF', 'THEIR', 'RIVER'] +5639-40744-0001-138: ref=['SECURE', 'AS', 'HE', 'THOUGHT', 'IN', 'THE', 'CAREFUL', 'ADMINISTRATION', 'OF', 'JUSTICE', 'IN', 'THAT', 'CITY', 'AND', 'THE', 'CHARACTER', 'OF', 'ITS', 'WELL', 'DISPOSED', 'INHABITANTS', 'THE', 'GOOD', 'HIDALGO', 'WAS', 'FAR', 'FROM', 'THINKING', 'THAT', 'ANY', 'DISASTER', 'COULD', 'BEFAL', 'HIS', 'FAMILY'] +5639-40744-0001-138: hyp=['SECURE', 'AS', 'HE', 'THOUGHT', 'IN', 'THE', 'CAREFUL', 'ADMINISTRATION', 'OF', 'JUSTICE', 'IN', 'THAT', 'CITY', 'AND', 'THE', 'CHARACTER', 'OF', 'ITS', 'WELL', 'DISPOSED', 'INHABITANTS', 'THE', 'GOOD', 'HADALGO', 'WAS', 'FAR', 'FROM', 'THINKING', 'THAT', 'ANY', 'DISASTER', 'COULD', 'BEFALL', 'HIS', 'FAMILY'] +5639-40744-0002-139: ref=['RODOLFO', 'AND', 'HIS', 'COMPANIONS', 'WITH', 'THEIR', 'FACES', 'MUFFLED', 'IN', 'THEIR', 'CLOAKS', 'STARED', 'RUDELY', 'AND', 'INSOLENTLY', 'AT', 'THE', 'MOTHER', 'THE', 'DAUGHTER', 'AND', 'THE', 'SERVANT', 'MAID'] +5639-40744-0002-139: hyp=['RUDOLPHO', 'AND', 'HIS', 'COMPANIONS', 'WITH', 'THEIR', 'FACES', 'MUFFLED', 'IN', 'THEIR', 'CLOAKS', 'STARED', 'RUDELY', 'AND', 'INSOLENTLY', 'AT', 'THE', 'MOTHER', 'THE', 'DAUGHTER', 'AND', 'THE', 'SERVANT', 'MAID'] +5639-40744-0003-140: ref=['IN', 'A', 'MOMENT', 'HE', 'COMMUNICATED', 'HIS', 'THOUGHTS', 'TO', 'HIS', 'COMPANIONS', 'AND', 'IN', 'THE', 'NEXT', 'MOMENT', 'THEY', 'RESOLVED', 'TO', 'TURN', 'BACK', 'AND', 'CARRY', 'HER', 'OFF', 'TO', 'PLEASE', 'RODOLFO', 'FOR', 'THE', 'RICH', 'WHO', 'ARE', 'OPEN', 'HANDED', 'ALWAYS', 'FIND', 'PARASITES', 'READY', 'TO', 'ENCOURAGE', 'THEIR', 'BAD', 'PROPENSITIES', 'AND', 'THUS', 'TO', 'CONCEIVE', 'THIS', 'WICKED', 'DESIGN', 'TO', 'COMMUNICATE', 'IT', 'APPROVE', 'IT', 'RESOLVE', 'ON', 'RAVISHING', 'LEOCADIA', 'AND', 'TO', 'CARRY', 'THAT', 'DESIGN', 'INTO', 'EFFECT', 'WAS', 'THE', 'WORK', 'OF', 'A', 'MOMENT'] +5639-40744-0003-140: hyp=['IN', 'A', 'MOMENT', 'HE', 'COMMUNICATED', 'HIS', 'THOUGHTS', 'TO', 'HIS', 'COMPANIONS', 'AND', 'IN', 'THE', 'NEXT', 'MOMENT', 'THEY', 'RESOLVED', 'TO', 'TURN', 'BACK', 'AND', 'CARRY', 'HER', 'OFF', 'TO', 'PLEASE', 'RUDOLPHO', 'FOR', 'THE', 'RICH', 'WHO', 'ARE', 'OPEN', 'HANDED', 'ALWAYS', 'FIND', 'PARASITES', 'READY', 'TO', 'ENCOURAGE', 'THEIR', 'BAD', 'PROPENSITIES', 'AND', 'THUS', 'TO', 'CONCEIVE', 'THIS', 'WICKED', 'DESIGN', 'TO', 'COMMUNICATE', 'IT', 'APPROVE', 'IT', 'RESOLVE', 'ON', 'RAVISHING', 'LOCATIA', 'AND', 'TO', 'CARRY', 'THAT', 'DESIGN', 'INTO', 'EFFECT', 'WAS', 'THE', 'WORK', 'OF', 'A', 'MOMENT'] +5639-40744-0004-141: ref=['THEY', 'DREW', 'THEIR', 'SWORDS', 'HID', 'THEIR', 'FACES', 'IN', 'THE', 'FLAPS', 'OF', 'THEIR', 'CLOAKS', 'TURNED', 'BACK', 'AND', 'SOON', 'CAME', 'IN', 'FRONT', 'OF', 'THE', 'LITTLE', 'PARTY', 'WHO', 'HAD', 'NOT', 'YET', 'DONE', 'GIVING', 'THANKS', 'TO', 'GOD', 'FOR', 'THEIR', 'ESCAPE', 'FROM', 'THOSE', 'AUDACIOUS', 'MEN'] +5639-40744-0004-141: hyp=['THEY', 'DREW', 'THEIR', 'SWORDS', 'HID', 'THEIR', 'FACES', 'IN', 'THE', 'FLAPS', 'OF', 'THEIR', 'CLOAKS', 'TURNED', 'BACK', 'AND', 'SOON', 'CAME', 'IN', 'FRONT', 'OF', 'THE', 'LITTLE', 'PARTY', 'WHO', 'HAD', 'NOT', 'YET', 'DONE', 'GIVING', 'THANKS', 'TO', 'GOD', 'FOR', 'THEIR', 'ESCAPE', 'FROM', 'THOSE', 'AUDACIOUS', 'MEN'] +5639-40744-0005-142: ref=['FINALLY', 'THE', 'ONE', 'PARTY', 'WENT', 'OFF', 'EXULTING', 'AND', 'THE', 'OTHER', 'WAS', 'LEFT', 'IN', 'DESOLATION', 'AND', 'WOE'] +5639-40744-0005-142: hyp=['FINALLY', 'THE', 'ONE', 'PARTY', 'WENT', 'OFF', 'EXULTING', 'AND', 'THE', 'OTHER', 'WAS', 'LEFT', 'IN', 'DESOLATION', 'AND', 'WOE'] +5639-40744-0006-143: ref=['RODOLFO', 'ARRIVED', 'AT', 'HIS', 'OWN', 'HOUSE', 'WITHOUT', 'ANY', 'IMPEDIMENT', 'AND', "LEOCADIA'S", 'PARENTS', 'REACHED', 'THEIRS', 'HEART', 'BROKEN', 'AND', 'DESPAIRING'] +5639-40744-0006-143: hyp=['RUDOLPHO', 'ARRIVED', 'AT', 'HIS', 'OWN', 'HOUSE', 'WITHOUT', 'ANY', 'IMPEDIMENT', "ALYOCADIA'S", 'PARENTS', 'REACHED', 'THEIRS', 'HEARTBROKEN', 'AND', 'DESPAIRING'] +5639-40744-0007-144: ref=['MEANWHILE', 'RODOLFO', 'HAD', 'LEOCADIA', 'SAFE', 'IN', 'HIS', 'CUSTODY', 'AND', 'IN', 'HIS', 'OWN', 'APARTMENT'] +5639-40744-0007-144: hyp=['MEANWHILE', 'RUDOLPHO', 'HAD', 'LOCALIA', 'SAFE', 'IN', 'HIS', 'CUSTODY', 'AND', 'IN', 'HIS', 'OWN', 'APARTMENT'] +5639-40744-0008-145: ref=['WHO', 'TOUCHES', 'ME', 'AM', 'I', 'IN', 'BED'] +5639-40744-0008-145: hyp=['WHO', 'TOUCHES', 'ME', 'AM', 'I', 'IN', 'BED'] +5639-40744-0009-146: ref=['MOTHER', 'DEAR', 'FATHER', 'DO', 'YOU', 'HEAR', 'ME'] +5639-40744-0009-146: hyp=['MOTHER', 'DEAR', 'FATHER', 'DO', 'YOU', 'HEAR', 'ME'] +5639-40744-0010-147: ref=['IT', 'IS', 'THE', 'ONLY', 'AMENDS', 'I', 'ASK', 'OF', 'YOU', 'FOR', 'THE', 'WRONG', 'YOU', 'HAVE', 'DONE', 'ME'] +5639-40744-0010-147: hyp=['IT', 'IS', 'THE', 'ONLY', 'AMENDS', 'I', 'ASK', 'OF', 'YOU', 'FOR', 'THE', 'WRONG', 'YOU', 'HAVE', 'DONE', 'ME'] +5639-40744-0011-148: ref=['SHE', 'FOUND', 'THE', 'DOOR', 'BUT', 'IT', 'WAS', 'LOCKED', 'OUTSIDE'] +5639-40744-0011-148: hyp=['SHE', 'FOUND', 'THE', 'DOOR', 'BUT', 'IT', 'WAS', 'LOCKED', 'OUTSIDE'] +5639-40744-0012-149: ref=['SHE', 'SUCCEEDED', 'IN', 'OPENING', 'THE', 'WINDOW', 'AND', 'THE', 'MOONLIGHT', 'SHONE', 'IN', 'SO', 'BRIGHTLY', 'THAT', 'SHE', 'COULD', 'DISTINGUISH', 'THE', 'COLOUR', 'OF', 'SOME', 'DAMASK', 'HANGINGS', 'IN', 'THE', 'ROOM'] +5639-40744-0012-149: hyp=['SHE', 'SUCCEEDED', 'IN', 'OPENING', 'THE', 'WINDOW', 'AND', 'THE', 'MOONLIGHT', 'SHONE', 'IN', 'SO', 'BRIGHTLY', 'THAT', 'SHE', 'COULD', 'DISTINGUISH', 'THE', 'COLOUR', 'OF', 'SOME', 'DAMASK', 'HANGING', 'IN', 'THE', 'ROOM'] +5639-40744-0013-150: ref=['SHE', 'SAW', 'THAT', 'THE', 'BED', 'WAS', 'GILDED', 'AND', 'SO', 'RICH', 'THAT', 'IT', 'SEEMED', 'THAT', 'OF', 'A', 'PRINCE', 'RATHER', 'THAN', 'OF', 'A', 'PRIVATE', 'GENTLEMAN'] +5639-40744-0013-150: hyp=['SHE', 'SAW', 'THAT', 'THE', 'BED', 'WAS', 'GILDED', 'AND', 'SO', 'RICH', 'THAT', 'IT', 'SEEMED', 'THAT', 'OF', 'A', 'PRINCE', 'THE', 'RATHER', 'THAT', 'OF', 'A', 'PRIVATE', 'GENTLEMAN'] +5639-40744-0014-151: ref=['AMONG', 'OTHER', 'THINGS', 'ON', 'WHICH', 'SHE', 'CAST', 'HER', 'EYES', 'WAS', 'A', 'SMALL', 'CRUCIFIX', 'OF', 'SOLID', 'SILVER', 'STANDING', 'ON', 'A', 'CABINET', 'NEAR', 'THE', 'WINDOW'] +5639-40744-0014-151: hyp=['AMONG', 'OTHER', 'THINGS', 'ON', 'WHICH', 'SHE', 'CAST', 'HER', 'EYES', 'WAS', 'A', 'SMALL', 'CRUCIFIX', 'OF', 'SOLID', 'SILVER', 'STANDING', 'ON', 'A', 'CABINET', 'NEAR', 'THE', 'WINDOW'] +5639-40744-0015-152: ref=['THIS', 'PERSON', 'WAS', 'RODOLFO', 'WHO', 'THOUGH', 'HE', 'HAD', 'GONE', 'TO', 'LOOK', 'FOR', 'HIS', 'FRIENDS', 'HAD', 'CHANGED', 'HIS', 'MIND', 'IN', 'THAT', 'RESPECT', 'NOT', 'THINKING', 'IT', 'ADVISABLE', 'TO', 'ACQUAINT', 'THEM', 'WITH', 'WHAT', 'HAD', 'PASSED', 'BETWEEN', 'HIM', 'AND', 'THE', 'GIRL'] +5639-40744-0015-152: hyp=['THIS', 'PERSON', 'WAS', 'RUDOLPHU', 'WHO', 'THOUGH', 'HE', 'HAD', 'GONE', 'TO', 'LOOK', 'FOR', 'HIS', 'FRIENDS', 'HAD', 'CHANGED', 'HIS', 'MIND', 'IN', 'THAT', 'RESPECT', 'NOT', 'THINKING', 'IT', 'ADVISABLE', 'TO', 'ACQUAINT', 'THEM', 'WITH', 'WHAT', 'HAD', 'PASSED', 'BETWEEN', 'HIM', 'AND', 'THE', 'GIRL'] +5639-40744-0016-153: ref=['ON', 'THE', 'CONTRARY', 'HE', 'RESOLVED', 'TO', 'TELL', 'THEM', 'THAT', 'REPENTING', 'OF', 'HIS', 'VIOLENCE', 'AND', 'MOVED', 'BY', 'HER', 'TEARS', 'HE', 'HAD', 'ONLY', 'CARRIED', 'HER', 'HALF', 'WAY', 'TOWARDS', 'HIS', 'HOUSE', 'AND', 'THEN', 'LET', 'HER', 'GO'] +5639-40744-0016-153: hyp=['ON', 'THE', 'CONTRARY', 'HE', 'RESOLVED', 'TO', 'TELL', 'THEM', 'THAT', 'REPENTING', 'OF', 'HIS', 'VIOLENCE', 'AND', 'MOVED', 'BY', 'A', 'TEARS', 'HE', 'HAD', 'ONLY', 'CARRIED', 'HER', 'HALF', 'WAY', 'TOWARDS', 'HIS', 'HOUSE', 'AND', 'THEN', 'LET', 'HER', 'GO'] +5639-40744-0017-154: ref=['CHOKING', 'WITH', 'EMOTION', 'LEOCADI', 'MADE', 'A', 'SIGN', 'TO', 'HER', 'PARENTS', 'THAT', 'SHE', 'WISHED', 'TO', 'BE', 'ALONE', 'WITH', 'THEM'] +5639-40744-0017-154: hyp=['CHOKING', 'WITH', 'EMOTION', 'LOCATIA', 'MADE', 'A', 'SIGN', 'TO', 'HER', 'PARENTS', 'THAT', 'SHE', 'WISHED', 'TO', 'BE', 'ALONE', 'WITH', 'THEM'] +5639-40744-0018-155: ref=['THAT', 'WOULD', 'BE', 'VERY', 'WELL', 'MY', 'CHILD', 'REPLIED', 'HER', 'FATHER', 'IF', 'YOUR', 'PLAN', 'WERE', 'NOT', 'LIABLE', 'TO', 'BE', 'FRUSTRATED', 'BY', 'ORDINARY', 'CUNNING', 'BUT', 'NO', 'DOUBT', 'THIS', 'IMAGE', 'HAS', 'BEEN', 'ALREADY', 'MISSED', 'BY', 'ITS', 'OWNER', 'AND', 'HE', 'WILL', 'HAVE', 'SET', 'IT', 'DOWN', 'FOR', 'CERTAIN', 'THAT', 'IT', 'WAS', 'TAKEN', 'OUT', 'OF', 'THE', 'ROOM', 'BY', 'THE', 'PERSON', 'HE', 'LOCKED', 'UP', 'THERE'] +5639-40744-0018-155: hyp=['THAT', 'WOULD', 'BE', 'VERY', 'WELL', 'MY', 'CHILD', 'REPLIED', 'HER', 'FATHER', 'IF', 'YOUR', 'PLAN', 'WERE', 'NOT', 'LIABLE', 'TO', 'BE', 'FRUSTRATED', 'BY', 'ORDINARY', 'CUNNING', 'BUT', 'NO', 'DOUBT', 'THIS', 'IMAGE', 'HAD', 'BEEN', 'ALREADY', 'MISSED', 'BY', 'ITS', 'OWNER', 'AND', 'HE', 'WILL', 'HAVE', 'SET', 'IT', 'DOWN', 'FOR', 'CERTAIN', 'THAT', 'IT', 'WAS', 'TAKEN', 'OUT', 'OF', 'THE', 'ROOM', 'BY', 'THE', 'PERSON', 'HE', 'LOCKED', 'UP', 'THERE'] +5639-40744-0019-156: ref=['WHAT', 'YOU', 'HAD', 'BEST', 'DO', 'MY', 'CHILD', 'IS', 'TO', 'KEEP', 'IT', 'AND', 'PRAY', 'TO', 'IT', 'THAT', 'SINCE', 'IT', 'WAS', 'A', 'WITNESS', 'TO', 'YOUR', 'UNDOING', 'IT', 'WILL', 'DEIGN', 'TO', 'VINDICATE', 'YOUR', 'CAUSE', 'BY', 'ITS', 'RIGHTEOUS', 'JUDGMENT'] +5639-40744-0019-156: hyp=['WHAT', 'YOU', 'HAD', 'BEST', 'DO', 'MY', 'CHILD', 'IS', 'TO', 'KEEP', 'IT', 'AND', 'PRAY', 'TO', 'IT', 'THAT', 'SINS', 'IT', 'WAS', 'A', 'WITNESS', 'TO', 'YOUR', 'UNDOING', 'IT', 'WILL', 'DEIGN', 'TO', 'VINDICATE', 'YOUR', 'CAUSE', 'BY', 'ITS', 'RIGHTEOUS', 'JUDGMENT'] +5639-40744-0020-157: ref=['THUS', 'DID', 'THIS', 'HUMANE', 'AND', 'RIGHT', 'MINDED', 'FATHER', 'COMFORT', 'HIS', 'UNHAPPY', 'DAUGHTER', 'AND', 'HER', 'MOTHER', 'EMBRACING', 'HER', 'AGAIN', 'DID', 'ALL', 'SHE', 'COULD', 'TO', 'SOOTHE', 'HER', 'FEELINGS'] +5639-40744-0020-157: hyp=['THUS', 'DID', 'THE', 'HUMANE', 'AND', 'RIGHT', 'MINDED', 'FATHER', 'COMFORT', 'HIS', 'UNHAPPY', 'DAUGHTER', 'AND', 'HER', 'MOTHER', 'EMBRACING', 'HER', 'AGAIN', 'DID', 'ALL', 'SHE', 'COULD', 'TO', 'SOOTHE', 'THE', 'FEELINGS'] +5639-40744-0021-158: ref=['SHE', 'MEANWHILE', 'PASSED', 'HER', 'LIFE', 'WITH', 'HER', 'PARENTS', 'IN', 'THE', 'STRICTEST', 'RETIREMENT', 'NEVER', 'LETTING', 'HERSELF', 'BE', 'SEEN', 'BUT', 'SHUNNING', 'EVERY', 'EYE', 'LEST', 'IT', 'SHOULD', 'READ', 'HER', 'MISFORTUNE', 'IN', 'HER', 'FACE'] +5639-40744-0021-158: hyp=['SHE', 'MEANWHILE', 'PAST', 'HER', 'LIFE', 'WITH', 'HER', 'PARENTS', 'IN', 'THE', 'STRICTEST', 'RETIREMENT', 'NEVER', 'LETTING', 'HERSELF', 'BE', 'SEEN', 'BUT', 'SHUNNING', 'EVERY', 'EYE', 'LEST', 'IT', 'SHOULD', 'READ', 'HER', 'MISFORTUNE', 'IN', 'HER', 'FACE'] +5639-40744-0022-159: ref=['TIME', 'ROLLED', 'ON', 'THE', 'HOUR', 'OF', 'HER', 'DELIVERY', 'ARRIVED', 'IT', 'TOOK', 'PLACE', 'IN', 'THE', 'UTMOST', 'SECRECY', 'HER', 'MOTHER', 'TAKING', 'UPON', 'HER', 'THE', 'OFFICE', 'OF', 'MIDWIFE', 'AND', 'SHE', 'GAVE', 'BIRTH', 'TO', 'A', 'SON', 'ONE', 'OF', 'THE', 'MOST', 'BEAUTIFUL', 'EVER', 'SEEN'] +5639-40744-0022-159: hyp=['TIME', 'ROLLED', 'ON', 'THE', 'HOUR', 'OF', 'HER', 'DELIVERY', 'ARRIVED', 'IT', 'TOOK', 'PLACE', 'IN', 'THE', 'UTMOST', 'SECRECY', 'HER', 'MOTHER', 'TAKING', 'UP', 'ON', 'HER', 'THE', 'OFFICE', 'OF', 'MIDWIFE', 'AS', 'SHE', 'GAVE', 'BIRTH', 'TO', 'A', 'SON', 'ONE', 'OF', 'THE', 'MOST', 'BEAUTIFUL', 'EVER', 'SEEN'] +5639-40744-0023-160: ref=['WHEN', 'THE', 'BOY', 'WALKED', 'THROUGH', 'THE', 'STREETS', 'BLESSINGS', 'WERE', 'SHOWERED', 'UPON', 'HIM', 'BY', 'ALL', 'WHO', 'SAW', 'HIM', 'BLESSINGS', 'UPON', 'HIS', 'BEAUTY', 'UPON', 'THE', 'MOTHER', 'THAT', 'BORE', 'HIM', 'UPON', 'THE', 'FATHER', 'THAT', 'BEGOT', 'HIM', 'UPON', 'THOSE', 'WHO', 'BROUGHT', 'HIM', 'UP', 'SO', 'WELL'] +5639-40744-0023-160: hyp=['AND', 'THE', 'BOY', 'WALKED', 'THROUGH', 'THE', 'STREETS', 'BLESSINGS', 'WHERE', 'SHOWERED', 'UP', 'ON', 'HIM', 'BY', 'ALL', 'WHO', 'SAW', 'HIM', 'BLESSING', 'UPON', 'HIS', 'BEAUTY', 'UPON', 'THE', 'MOTHER', 'THAT', 'BORE', 'HIM', 'UPON', 'THE', 'FATHER', 'THAT', 'BEGOT', 'HIM', 'UPON', 'THOSE', 'WHO', 'BROUGHT', 'HIM', 'UP', 'SO', 'WELL'] +5639-40744-0024-161: ref=['ONE', 'DAY', 'WHEN', 'THE', 'BOY', 'WAS', 'SENT', 'BY', 'HIS', 'GRANDFATHER', 'WITH', 'A', 'MESSAGE', 'TO', 'A', 'RELATION', 'HE', 'PASSED', 'ALONG', 'A', 'STREET', 'IN', 'WHICH', 'THERE', 'WAS', 'A', 'GREAT', 'CONCOURSE', 'OF', 'HORSEMEN'] +5639-40744-0024-161: hyp=['ONE', 'DAY', 'WHEN', 'THE', 'BOY', 'WAS', 'SENT', 'BY', 'HIS', 'GRANDFATHER', 'WITH', 'A', 'MESSAGE', 'TO', 'A', 'RELATION', 'HE', 'PASSED', 'ALONG', 'A', 'STREET', 'IN', 'WHICH', 'THERE', 'WAS', 'A', 'GREAT', 'CONCOURSE', 'OF', 'HORSEMEN'] +5639-40744-0025-162: ref=['THE', 'BED', 'SHE', 'TOO', 'WELL', 'REMEMBERED', 'WAS', 'THERE', 'AND', 'ABOVE', 'ALL', 'THE', 'CABINET', 'ON', 'WHICH', 'HAD', 'STOOD', 'THE', 'IMAGE', 'SHE', 'HAD', 'TAKEN', 'AWAY', 'WAS', 'STILL', 'ON', 'THE', 'SAME', 'SPOT'] +5639-40744-0025-162: hyp=['THE', 'BED', 'SHE', 'TOO', 'WELL', 'REMEMBERED', 'WAS', 'THERE', 'AND', 'ABOVE', 'ALL', 'THE', 'CABINET', 'ON', 'WHICH', 'HAD', 'STOOD', 'THE', 'IMAGE', 'SHE', 'HAD', 'TAKEN', 'AWAY', 'WAS', 'STILL', 'ON', 'THE', 'SAME', 'SPOT'] +5639-40744-0026-163: ref=['LUIS', 'WAS', 'OUT', 'OF', 'DANGER', 'IN', 'A', 'FORTNIGHT', 'IN', 'A', 'MONTH', 'HE', 'ROSE', 'FROM', 'HIS', 'BED', 'AND', 'DURING', 'ALL', 'THAT', 'TIME', 'HE', 'WAS', 'VISITED', 'DAILY', 'BY', 'HIS', 'MOTHER', 'AND', 'GRANDMOTHER', 'AND', 'TREATED', 'BY', 'THE', 'MASTER', 'AND', 'MISTRESS', 'OF', 'THE', 'HOUSE', 'AS', 'IF', 'HE', 'WAS', 'THEIR', 'OWN', 'CHILD'] +5639-40744-0026-163: hyp=['LOUIS', 'WAS', 'OUT', 'OF', 'DANGER', 'IN', 'A', 'FORTNIGHT', 'IN', 'A', 'MONTH', 'HE', 'ROSE', 'FROM', 'HIS', 'BED', 'AND', 'DREWING', 'ALL', 'THAT', 'TIME', 'HE', 'WAS', 'VISITED', 'DAILY', 'BY', 'HIS', 'MOTHER', 'AND', 'GRANDMOTHER', 'AND', 'TREATED', 'BY', 'THE', 'MASTER', 'AND', 'MISTRESS', 'OF', 'THE', 'HOUSE', 'AS', 'IF', 'HE', 'WAS', 'THEIR', 'OWN', 'CHILD'] +5639-40744-0027-164: ref=['THUS', 'SAYING', 'AND', 'PRESSING', 'THE', 'CRUCIFIX', 'TO', 'HER', 'BREAST', 'SHE', 'FELL', 'FAINTING', 'INTO', 'THE', 'ARMS', 'OF', 'DONA', 'ESTAFANIA', 'WHO', 'AS', 'A', 'GENTLEWOMAN', 'TO', 'WHOSE', 'SEX', 'PITY', 'IS', 'AS', 'NATURAL', 'AS', 'CRUELTY', 'IS', 'TO', 'MAN', 'INSTANTLY', 'PRESSED', 'HER', 'LIPS', 'TO', 'THOSE', 'OF', 'THE', 'FAINTING', 'GIRL', 'SHEDDING', 'OVER', 'HER', 'SO', 'MANY', 'TEARS', 'THAT', 'THERE', 'NEEDED', 'NO', 'OTHER', 'SPRINKLING', 'OF', 'WATER', 'TO', 'RECOVER', 'LEOCADIA', 'FROM', 'HER', 'SWOON'] +5639-40744-0027-164: hyp=['THUS', 'SAYING', 'AND', 'PRESSING', 'THE', 'CRUCIFIX', 'TO', 'HER', 'BREAST', 'SHE', 'FELL', 'FAINTING', 'INTO', 'THE', 'ARMS', 'OF', 'DONA', 'ESTAPHANIA', 'WHO', 'AS', 'A', 'GENTLEWOMAN', 'TO', 'WHOSE', 'SEX', 'PITY', 'IS', 'A', 'NATURAL', 'AS', 'CRUELTY', 'AS', 'TO', 'MAN', 'INSTANTLY', 'PRESSED', 'HER', 'LIPS', 'TO', 'THOSE', 'OF', 'THE', 'FAINTING', 'GIRL', 'SHEDDING', 'OVER', 'HER', 'SO', 'MANY', 'TEARS', 'THAT', 'THERE', 'NEEDED', 'NO', 'OTHER', 'SPRINKLING', 'OF', 'WATER', 'TO', 'RECOVER', 'LOCATIA', 'FROM', 'HER', 'SWOON'] +5639-40744-0028-165: ref=['I', 'HAVE', 'GREAT', 'THINGS', 'TO', 'TELL', 'YOU', 'SENOR', 'SAID', 'DONA', 'ESTAFANIA', 'TO', 'HER', 'HUSBAND', 'THE', 'CREAM', 'AND', 'SUBSTANCE', 'OF', 'WHICH', 'IS', 'THIS', 'THE', 'FAINTING', 'GIRL', 'BEFORE', 'YOU', 'IS', 'YOUR', 'DAUGHTER', 'AND', 'THAT', 'BOY', 'IS', 'YOUR', 'GRANDSON'] +5639-40744-0028-165: hyp=['I', 'HAVE', 'GREAT', 'THINGS', 'TO', 'TELL', 'YOU', 'SENOR', 'SAID', 'DORIS', 'DANIA', 'TO', 'HER', 'HUSBAND', 'THE', 'CREAM', 'AND', 'SUBSTANCE', 'OF', 'WHICH', 'IS', 'THIS', 'THE', 'FAINTING', 'GIRL', 'BEFORE', 'YOU', 'IS', 'YOUR', 'DAUGHTER', 'AND', 'THE', 'BOY', 'IS', 'YOUR', 'GRANDSON'] +5639-40744-0029-166: ref=['THIS', 'TRUTH', 'WHICH', 'I', 'HAVE', 'LEARNED', 'FROM', 'HER', 'LIPS', 'IS', 'CONFIRMED', 'BY', 'HIS', 'FACE', 'IN', 'WHICH', 'WE', 'HAVE', 'BOTH', 'BEHELD', 'THAT', 'OF', 'OUR', 'SON'] +5639-40744-0029-166: hyp=['THIS', 'TRUTH', 'WHICH', 'I', 'HAVE', 'LEARNED', 'FROM', 'HER', 'LIPS', 'IS', 'CONFIRMED', 'BY', 'HIS', 'FACE', 'IN', 'WHICH', 'WE', 'HAVE', 'BOTH', 'BEHELD', 'THAT', 'OF', 'OUR', 'SON'] +5639-40744-0030-167: ref=['JUST', 'THEN', 'LEOCADIA', 'CAME', 'TO', 'HERSELF', 'AND', 'EMBRACING', 'THE', 'CROSS', 'SEEMED', 'CHANGED', 'INTO', 'A', 'SEA', 'OF', 'TEARS', 'AND', 'THE', 'GENTLEMAN', 'REMAINED', 'IN', 'UTTER', 'BEWILDERMENT', 'UNTIL', 'HIS', 'WIFE', 'HAD', 'REPEATED', 'TO', 'HIM', 'FROM', 'BEGINNING', 'TO', 'END', "LEOCADIA'S", 'WHOLE', 'STORY', 'AND', 'HE', 'BELIEVED', 'IT', 'THROUGH', 'THE', 'BLESSED', 'DISPENSATION', 'OF', 'HEAVEN', 'WHICH', 'HAD', 'CONFIRMED', 'IT', 'BY', 'SO', 'MANY', 'CONVINCING', 'TESTIMONIES'] +5639-40744-0030-167: hyp=['JUST', 'THEN', 'LEOCAYA', 'CAME', 'TO', 'HERSELF', 'AND', 'EMBRACING', 'THE', 'CROSS', 'SEEMED', 'CHANGED', 'INTO', 'A', 'SEA', 'OF', 'TEARS', 'AND', 'THE', 'GENTLEMAN', 'REMAINING', 'IN', 'OUT', 'OF', 'A', 'WILDERMENT', 'UNTIL', 'HIS', 'WIFE', 'HAD', 'REPEATED', 'TO', 'HIM', 'FROM', 'BEGINNING', 'TO', 'END', 'LOCATEOUS', 'WHOLE', 'STORY', 'AND', 'HE', 'BELIEVED', 'IT', 'THROUGH', 'THE', 'BLESSED', 'DISPENSATION', 'OF', 'HEAVEN', 'WHICH', 'HAD', 'CONFIRMED', 'IT', 'BY', 'SO', 'MANY', 'CONVINCING', 'TESTIMONIES'] +5639-40744-0031-168: ref=['SO', 'PERSUASIVE', 'WERE', 'HER', 'ENTREATIES', 'AND', 'SO', 'STRONG', 'HER', 'ASSURANCES', 'THAT', 'NO', 'HARM', 'WHATEVER', 'COULD', 'RESULT', 'TO', 'THEM', 'FROM', 'THE', 'INFORMATION', 'SHE', 'SOUGHT', 'THEY', 'WERE', 'INDUCED', 'TO', 'CONFESS', 'THAT', 'ONE', "SUMMER'S", 'NIGHT', 'THE', 'SAME', 'SHE', 'HAD', 'MENTIONED', 'THEMSELVES', 'AND', 'ANOTHER', 'FRIEND', 'BEING', 'OUT', 'ON', 'A', 'STROLL', 'WITH', 'RODOLFO', 'THEY', 'HAD', 'BEEN', 'CONCERNED', 'IN', 'THE', 'ABDUCTION', 'OF', 'A', 'GIRL', 'WHOM', 'RODOLFO', 'CARRIED', 'OFF', 'WHILST', 'THE', 'REST', 'OF', 'THEM', 'DETAINED', 'HER', 'FAMILY', 'WHO', 'MADE', 'A', 'GREAT', 'OUTCRY', 'AND', 'WOULD', 'HAVE', 'DEFENDED', 'HER', 'IF', 'THEY', 'COULD'] +5639-40744-0031-168: hyp=['SO', 'PERSUASIVE', 'WERE', 'HER', 'ENTREATIES', 'AND', 'SO', 'STRONG', 'HER', 'ASSURANCES', 'THAT', 'NO', 'HARM', 'WHATEVER', 'COULD', 'RESULT', 'TO', 'THEM', 'FROM', 'THE', 'INFORMATION', 'SHE', 'SOUGHT', 'THEY', 'WERE', 'INDUCED', 'TO', 'CONFESS', 'THAT', 'ONE', "SUMMER'S", 'NIGHT', 'THE', 'SAME', 'SHE', 'HAD', 'MENTIONED', 'THEMSELVES', 'AND', 'ANOTHER', 'FRIEND', 'BEING', 'OUT', 'ON', 'THE', 'STROLL', 'WITH', 'RUDOLPHO', 'THEY', 'HAD', 'BEEN', 'CONCERNED', 'IN', 'THE', 'ADOPTION', 'OF', 'A', 'GIRL', 'WHOM', 'RUDOLPHO', 'CARRIED', 'OFF', 'WHILST', 'THE', 'REST', 'OF', 'THEM', 'DETAINED', 'HER', 'FAMILY', 'WHO', 'MADE', 'A', 'GREAT', 'OUTCRY', 'AND', 'WOULD', 'HAVE', 'DEFENDED', 'HER', 'IF', 'THEY', 'COULD'] +5639-40744-0032-169: ref=['FOR', "GOD'S", 'SAKE', 'MY', 'LADY', 'MOTHER', 'GIVE', 'ME', 'A', 'WIFE', 'WHO', 'WOULD', 'BE', 'AN', 'AGREEABLE', 'COMPANION', 'NOT', 'ONE', 'WHO', 'WILL', 'DISGUST', 'ME', 'SO', 'THAT', 'WE', 'MAY', 'BOTH', 'BEAR', 'EVENLY', 'AND', 'WITH', 'MUTUAL', 'GOOD', 'WILL', 'THE', 'YOKE', 'IMPOSED', 'ON', 'US', 'BY', 'HEAVEN', 'INSTEAD', 'OF', 'PULLING', 'THIS', 'WAY', 'AND', 'THAT', 'WAY', 'AND', 'FRETTING', 'EACH', 'OTHER', 'TO', 'DEATH'] +5639-40744-0032-169: hyp=['FOR', "GOD'S", 'SAKE', 'MY', 'LADY', 'MOTHER', 'GIVE', 'ME', 'A', 'WIFE', 'WHO', 'WILL', 'BE', 'AN', 'AGREEABLE', 'COMPANION', 'NOT', 'ONE', 'WHO', 'WILL', 'DISGUST', 'ME', 'SO', 'THAT', 'WE', 'MAY', 'BOTH', 'BEAR', 'EVENLY', 'AND', 'WITH', 'MUTUAL', 'GOOD', 'WILL', 'THE', 'YOKE', 'AND', 'POST', 'ON', 'US', 'BY', 'HEAVEN', 'INSTEAD', 'OF', 'PULLING', 'THIS', 'WAY', 'AND', 'THAT', 'WAY', 'AND', 'FRETTING', 'EACH', 'OTHER', 'TO', 'DEATH'] +5639-40744-0033-170: ref=['HER', 'BEARING', 'WAS', 'GRACEFUL', 'AND', 'ANIMATED', 'SHE', 'LED', 'HER', 'SON', 'BY', 'THE', 'HAND', 'AND', 'BEFORE', 'HER', 'WALKED', 'TWO', 'MAIDS', 'WITH', 'WAX', 'LIGHTS', 'AND', 'SILVER', 'CANDLESTICKS'] +5639-40744-0033-170: hyp=['HER', 'BEARING', 'WAS', 'GRACEFUL', 'AND', 'ANIMATED', 'SHE', 'LED', 'HER', 'SON', 'BY', 'THE', 'HAND', 'AND', 'BEFORE', 'HER', 'WALKED', 'TWO', 'MAIDS', 'WITH', 'WAX', 'LIGHTS', 'AND', 'SILVER', 'CANDLESTICKS'] +5639-40744-0034-171: ref=['ALL', 'ROSE', 'TO', 'DO', 'HER', 'REVERENCE', 'AS', 'IF', 'SOMETHING', 'FROM', 'HEAVEN', 'HAD', 'MIRACULOUSLY', 'APPEARED', 'BEFORE', 'THEM', 'BUT', 'GAZING', 'ON', 'HER', 'ENTRANCED', 'WITH', 'ADMIRATION', 'NOT', 'ONE', 'OF', 'THEM', 'WAS', 'ABLE', 'TO', 'ADDRESS', 'A', 'SINGLE', 'WORD', 'TO', 'HER'] +5639-40744-0034-171: hyp=['ALL', 'ROSE', 'TO', 'DO', 'HER', 'REVERENCE', 'AS', 'IF', 'SOMETHING', 'FROM', 'HEAVEN', 'HAD', 'MIRACULOUSLY', 'APPEARED', 'BEFORE', 'THEM', 'BUT', 'GAZING', 'ON', 'HER', 'AND', 'TRANCED', 'WITH', 'ADMIRATION', 'NOT', 'ONE', 'OF', 'THEM', 'WAS', 'ABLE', 'TO', 'ADDRESS', 'A', 'SINGLE', 'WORD', 'TO', 'HER'] +5639-40744-0035-172: ref=['SHE', 'REFLECTED', 'HOW', 'NEAR', 'SHE', 'STOOD', 'TO', 'THE', 'CRISIS', 'WHICH', 'WAS', 'TO', 'DETERMINE', 'WHETHER', 'SHE', 'WAS', 'TO', 'BE', 'BLESSED', 'OR', 'UNHAPPY', 'FOR', 'EVER', 'AND', 'RACKED', 'BY', 'THE', 'INTENSITY', 'OF', 'HER', 'EMOTIONS', 'SHE', 'SUDDENLY', 'CHANGED', 'COLOUR', 'HER', 'HEAD', 'DROPPED', 'AND', 'SHE', 'FELL', 'FORWARD', 'IN', 'A', 'SWOON', 'INTO', 'THE', 'ARMS', 'OF', 'THE', 'DISMAYED', 'ESTAFANIA'] +5639-40744-0035-172: hyp=['SHE', 'REFLECTED', 'HOW', 'NEAR', 'SHE', 'STOOD', 'TO', 'THE', 'CRISIS', 'WHICH', 'WAS', 'TO', 'DETERMINE', 'WHETHER', 'SHE', 'WAS', 'TO', 'BE', 'BLESSED', 'OR', 'UNHAPPY', 'FOR', 'EVER', 'AND', 'RACKED', 'BY', 'THE', 'INTENSITY', 'OF', 'HER', 'EMOTIONS', 'SHE', 'SUDDENLY', 'CHANGED', 'COLOR', 'HER', 'HEAD', 'DROPPED', 'AND', 'SHE', 'FELL', 'FORWARD', 'IN', 'A', 'SWOON', 'INTO', 'THE', 'ARMS', 'OF', 'THE', 'DISMAYED', 'STEFFANIA'] +5639-40744-0036-173: ref=['HIS', 'MOTHER', 'HAD', 'LEFT', 'HER', 'TO', 'HIM', 'AS', 'BEING', 'HER', 'DESTINED', 'PROTECTOR', 'BUT', 'WHEN', 'SHE', 'SAW', 'THAT', 'HE', 'TOO', 'WAS', 'INSENSIBLE', 'SHE', 'WAS', 'NEAR', 'MAKING', 'A', 'THIRD', 'AND', 'WOULD', 'HAVE', 'DONE', 'SO', 'HAD', 'HE', 'NOT', 'COME', 'TO', 'HIMSELF'] +5639-40744-0036-173: hyp=['HIS', 'MOTHER', 'HAD', 'LEFT', 'HER', 'TO', 'HIM', 'AS', 'BEING', 'HER', 'DESTINED', 'PROTECTOR', 'BUT', 'WHEN', 'SHE', 'SAW', 'THAT', 'HE', 'TOO', 'WAS', 'INSENSIBLE', 'SHE', 'WAS', 'NEAR', 'MAKING', 'A', 'THIRD', 'AND', 'WOULD', 'HAVE', 'DONE', 'SO', 'HAD', 'HE', 'NOT', 'COME', 'TO', 'HIMSELF'] +5639-40744-0037-174: ref=['KNOW', 'THEN', 'SON', 'OF', 'MY', 'HEART', 'THAT', 'THIS', 'FAINTING', 'LADY', 'IS', 'YOUR', 'REAL', 'BRIDE', 'I', 'SAY', 'REAL', 'BECAUSE', 'SHE', 'IS', 'THE', 'ONE', 'WHOM', 'YOUR', 'FATHER', 'AND', 'I', 'HAVE', 'CHOSEN', 'FOR', 'YOU', 'AND', 'THE', 'PORTRAIT', 'WAS', 'A', 'PRETENCE'] +5639-40744-0037-174: hyp=['KNOW', 'THEN', 'SON', 'OF', 'MY', 'HEART', 'THAT', 'THIS', 'FAINTING', 'LADY', 'IS', 'YOUR', 'REAL', 'BRIDE', 'I', 'SAY', 'REAL', 'BECAUSE', 'SHE', 'IS', 'THE', 'ONE', 'WHOM', 'YOUR', 'FATHER', 'AND', 'I', 'HAVE', 'CHOSEN', 'FOR', 'YOU', 'AND', 'A', 'PORTRAIT', 'WAS', 'A', 'PRETENCE'] +5639-40744-0038-175: ref=['JUST', 'AT', 'THE', 'MOMENT', 'WHEN', 'THE', 'TEARS', 'OF', 'THE', 'PITYING', 'BEHOLDERS', 'FLOWED', 'FASTEST', 'AND', 'THEIR', 'EJACULATIONS', 'WERE', 'MOST', 'EXPRESSIVE', 'OF', 'DESPAIR', 'LEOCADIA', 'GAVE', 'SIGNS', 'OF', 'RECOVERY', 'AND', 'BROUGHT', 'BACK', 'GLADNESS', 'TO', 'THE', 'HEARTS', 'OF', 'ALL'] +5639-40744-0038-175: hyp=['JUST', 'AT', 'A', 'MOMENT', 'WHEN', 'THE', 'TEARS', 'OF', 'THE', 'PITYING', 'BEHOLDERS', 'FLOWED', 'FASTEST', 'AND', 'THERE', 'EJACULATIONS', 'WERE', 'MOST', 'EXPRESSIVE', 'OF', 'DESPAIR', 'LE', 'OCCADIA', 'GAVE', 'SIGNS', 'OF', 'RECOVERY', 'AND', 'BROUGHT', 'BACK', 'GLADNESS', 'THROUGH', 'THE', 'HEARTS', 'OF', 'ALL'] +5639-40744-0039-176: ref=['WHEN', 'SHE', 'CAME', 'TO', 'HER', 'SENSES', 'AND', 'BLUSHING', 'TO', 'FIND', 'HERSELF', 'IN', "RODOLFO'S", 'ARMS', 'WOULD', 'HAVE', 'DISENGAGED', 'HERSELF', 'NO', 'SENORA', 'HE', 'SAID', 'THAT', 'MUST', 'NOT', 'BE', 'STRIVE', 'NOT', 'TO', 'WITHDRAW', 'FROM', 'THE', 'ARMS', 'OF', 'HIM', 'WHO', 'HOLDS', 'YOU', 'IN', 'HIS', 'SOUL'] +5639-40744-0039-176: hyp=['WHEN', 'SHE', 'CAME', 'TO', 'HER', 'SENSES', 'AND', 'BLUSHING', 'TO', 'FIND', 'HERSELF', 'IN', "RIDOLPH'S", 'ARMS', 'WOULD', 'HAVE', 'DISENGAGED', 'HERSELF', 'NO', 'SENORA', 'HE', 'SAID', 'THAT', 'MUST', 'NOT', 'BE', 'STRIVE', 'NOT', 'TO', 'WITHDRAW', 'FROM', 'THE', 'ARMS', 'OF', 'HIM', 'WHO', 'HOLDS', 'YOU', 'IN', 'HIS', 'SOUL'] +5639-40744-0040-177: ref=['THIS', 'WAS', 'DONE', 'FOR', 'THE', 'EVENT', 'TOOK', 'PLACE', 'AT', 'A', 'TIME', 'WHEN', 'THE', 'CONSENT', 'OF', 'THE', 'PARTIES', 'WAS', 'SUFFICIENT', 'FOR', 'THE', 'CELEBRATION', 'OF', 'A', 'MARRIAGE', 'WITHOUT', 'ANY', 'OF', 'THE', 'PRELIMINARY', 'FORMALITIES', 'WHICH', 'ARE', 'NOW', 'SO', 'PROPERLY', 'REQUIRED'] +5639-40744-0040-177: hyp=['THIS', 'WAS', 'DONE', 'FOR', 'THE', 'EVENT', 'TOOK', 'PLACE', 'AT', 'A', 'TIME', 'BUT', 'THE', 'CONSENT', 'OF', 'THE', 'PARTIES', 'WAS', 'SUFFICIENT', 'FOR', 'THE', 'CELEBRATION', 'OF', 'THE', 'MARRIAGE', 'WITHOUT', 'ANY', 'OF', 'THE', 'PRELIMINARY', 'FORMALITIES', 'WHICH', 'ARE', 'NOW', 'SO', 'PROPERLY', 'REQUIRED'] +5639-40744-0041-178: ref=['NOR', 'WAS', 'RODOLFO', 'LESS', 'SURPRISED', 'THAN', 'THEY', 'AND', 'THE', 'BETTER', 'TO', 'ASSURE', 'HIMSELF', 'OF', 'SO', 'WONDERFUL', 'A', 'FACT', 'HE', 'BEGGED', 'LEOCADIA', 'TO', 'GIVE', 'HIM', 'SOME', 'TOKEN', 'WHICH', 'SHOULD', 'MAKE', 'PERFECTLY', 'CLEAR', 'TO', 'HIM', 'THAT', 'WHICH', 'INDEED', 'HE', 'DID', 'NOT', 'DOUBT', 'SINCE', 'IT', 'WAS', 'AUTHENTICATED', 'BY', 'HIS', 'PARENTS'] +5639-40744-0041-178: hyp=['NOR', 'WAS', 'RUDOLPHAL', 'LESS', 'SURPRISED', 'THAN', 'THEY', 'AND', 'A', 'BETTER', 'TO', 'ASSURE', 'HIMSELF', 'OF', 'SO', 'WONDERFUL', 'A', 'FACT', 'HE', 'BEGGED', 'LOCATIA', 'TO', 'GIVE', 'HIM', 'SOME', 'TOKEN', 'WHICH', 'SHOULD', 'MAKE', 'PERFECTLY', 'CLEAR', 'TO', 'HIM', 'THAT', 'WHICH', 'INDEED', 'HE', 'DID', 'NOT', 'DOUBT', 'SINCE', 'IT', 'WAS', 'AUTHENTICATED', 'BY', 'HIS', 'PARENTS'] +5683-32865-0000-2483: ref=['YOU', 'KNOW', 'CAPTAIN', 'LAKE'] +5683-32865-0000-2483: hyp=['YOU', 'KNOW', 'CAPTAIN', 'LAKE'] +5683-32865-0001-2484: ref=['SAID', 'LORD', 'CHELFORD', 'ADDRESSING', 'ME'] +5683-32865-0001-2484: hyp=['SAID', 'LORD', 'CHELFORD', 'ADDRESSING', 'ME'] +5683-32865-0002-2485: ref=['HE', 'HAD', 'HIS', 'HAND', 'UPON', "LAKE'S", 'SHOULDER'] +5683-32865-0002-2485: hyp=['HE', 'HAD', 'HIS', 'HAND', 'UPON', "LAKE'S", 'SHOULDER'] +5683-32865-0003-2486: ref=['THEY', 'ARE', 'COUSINS', 'YOU', 'KNOW', 'WE', 'ARE', 'ALL', 'COUSINS'] +5683-32865-0003-2486: hyp=['THEY', 'ARE', 'COUSINS', 'YOU', 'KNOW', 'WE', 'ARE', 'ALL', 'COUSINS'] +5683-32865-0004-2487: ref=['WHATEVER', 'LORD', 'CHELFORD', 'SAID', 'MISS', 'BRANDON', 'RECEIVED', 'IT', 'VERY', 'GRACIOUSLY', 'AND', 'EVEN', 'WITH', 'A', 'MOMENTARY', 'SMILE'] +5683-32865-0004-2487: hyp=['WHATEVER', 'LORD', 'CHELFORD', 'SAID', 'MISS', 'BRANDON', 'RECEIVED', 'IT', 'VERY', 'GRACIOUSLY', 'AND', 'EVEN', 'WITH', 'A', 'MOMENTARY', 'SMILE'] +5683-32865-0005-2488: ref=['BUT', 'HER', 'GREETING', 'TO', 'CAPTAIN', 'LAKE', 'WAS', 'MORE', 'THAN', 'USUALLY', 'HAUGHTY', 'AND', 'FROZEN', 'AND', 'HER', 'FEATURES', 'I', 'FANCIED', 'PARTICULARLY', 'PROUD', 'AND', 'PALE'] +5683-32865-0005-2488: hyp=['BUT', 'HER', 'GREETING', 'TO', 'CAPTAIN', 'LEAK', 'WAS', 'MORE', 'THAN', 'USUALLY', 'HAUGHTY', 'AND', 'FROZEN', 'AND', 'HER', 'FEATURES', 'I', 'FANCIED', 'PARTICULARLY', 'PROUD', 'AND', 'PALE'] +5683-32865-0006-2489: ref=['AT', 'DINNER', 'LAKE', 'WAS', 'EASY', 'AND', 'AMUSING'] +5683-32865-0006-2489: hyp=['AT', 'DINNER', 'LAKE', 'WAS', 'EASY', 'AND', 'AMUSING'] +5683-32865-0007-2490: ref=["I'M", 'GLAD', 'YOU', 'LIKE', 'IT', 'SAYS', 'WYLDER', 'CHUCKLING', 'BENIGNANTLY', 'ON', 'IT', 'OVER', 'HIS', 'SHOULDER'] +5683-32865-0007-2490: hyp=['I', 'AM', 'GLAD', 'YOU', 'LIKE', 'IT', 'SAYS', 'WYLDER', 'CHUCKLING', 'BENIGNANTLY', 'ON', 'IT', 'OVER', 'HIS', 'SHOULDER'] +5683-32865-0008-2491: ref=['I', 'BELIEVE', 'I', 'HAVE', 'A', 'LITTLE', 'TASTE', 'THAT', 'WAY', 'THOSE', 'ARE', 'ALL', 'REAL', 'YOU', 'KNOW', 'THOSE', 'JEWELS'] +5683-32865-0008-2491: hyp=['I', 'BELIEVE', 'I', 'HAVE', 'A', 'LITTLE', 'TASTE', 'THAT', 'WAY', 'THOSE', 'ARE', 'ALL', 'REAL', 'YOU', 'KNOW', 'THOSE', 'JEWELS'] +5683-32865-0009-2492: ref=['AND', 'HE', 'PLACED', 'IT', 'IN', 'THAT', "GENTLEMAN'S", 'FINGERS', 'WHO', 'NOW', 'TOOK', 'HIS', 'TURN', 'AT', 'THE', 'LAMP', 'AND', 'CONTEMPLATED', 'THE', 'LITTLE', 'PARALLELOGRAM', 'WITH', 'A', 'GLEAM', 'OF', 'SLY', 'AMUSEMENT'] +5683-32865-0009-2492: hyp=['AND', 'HE', 'PLACED', 'IT', 'IN', 'THAT', "GENTLEMAN'S", 'FINGERS', 'WHO', 'NOW', 'TOOK', 'HIS', 'TURN', 'AT', 'THE', 'LAMP', 'AND', 'CONTEMPLATED', 'THE', 'LITTLE', 'PARALLELLOGRAM', 'WITH', 'A', 'GLEAM', 'OF', 'SLY', 'AMUSEMENT'] +5683-32865-0010-2493: ref=['I', 'WAS', 'THINKING', "IT'S", 'VERY', 'LIKE', 'THE', 'ACE', 'OF', 'HEARTS', 'ANSWERED', 'THE', 'CAPTAIN', 'SOFTLY', 'SMILING', 'ON'] +5683-32865-0010-2493: hyp=['I', 'WAS', 'THINKING', "IT'S", 'VERY', 'LIKE', 'THE', 'ACE', 'OF', 'HEARTS', 'ANSWERED', 'THE', 'CAPTAIN', 'SOFTLY', 'SMILING', 'ON'] +5683-32865-0011-2494: ref=['WHEREUPON', 'LAKE', 'LAUGHED', 'QUIETLY', 'STILL', 'LOOKING', 'ON', 'THE', 'ACE', 'OF', 'HEARTS', 'WITH', 'HIS', 'SLY', 'EYES'] +5683-32865-0011-2494: hyp=['WHEREUPON', 'LAKE', 'LAUGHED', 'QUIETLY', 'STILL', 'LOOKING', 'ON', 'THE', 'ACE', 'OF', 'HEARTS', 'WITH', 'HIS', 'SLY', 'EYES'] +5683-32865-0012-2495: ref=['AND', 'WYLDER', 'LAUGHED', 'TOO', 'MORE', 'SUDDENLY', 'AND', 'NOISILY', 'THAN', 'THE', 'HUMOUR', 'OF', 'THE', 'JOKE', 'SEEMED', 'QUITE', 'TO', 'CALL', 'FOR', 'AND', 'GLANCED', 'A', 'GRIM', 'LOOK', 'FROM', 'THE', 'CORNERS', 'OF', 'HIS', 'EYES', 'ON', 'LAKE', 'BUT', 'THE', 'GALLANT', 'CAPTAIN', 'DID', 'NOT', 'SEEM', 'TO', 'PERCEIVE', 'IT', 'AND', 'AFTER', 'A', 'FEW', 'SECONDS', 'MORE', 'HE', 'HANDED', 'IT', 'VERY', 'INNOCENTLY', 'BACK', 'TO', 'MISSUS', 'DOROTHY', 'ONLY', 'REMARKING'] +5683-32865-0012-2495: hyp=['AND', 'WHILE', 'THEIR', 'LEFT', 'TOO', 'MORE', 'SUDDENLY', 'AND', 'NOISILY', 'THAN', 'THE', 'HUMOR', 'OF', 'THE', 'JOKE', 'SEEMED', 'QUITE', 'TO', 'CALL', 'FOR', 'AND', 'GLANCED', 'A', 'GRIM', 'LOOK', 'FROM', 'THE', 'CORNERS', 'OF', 'HIS', 'EYES', 'UNLAKE', 'BUT', 'THE', 'GALLANT', 'CAPTAIN', 'DID', 'NOT', 'SEEM', 'TO', 'PERCEIVE', 'IT', 'AND', 'AFTER', 'A', 'FEW', 'SECONDS', 'MORE', 'HE', 'HANDED', 'IT', 'VERY', 'INNOCENTLY', 'BACK', 'TO', 'MISSUS', 'DOROTHY', 'ONLY', 'REMARKING'] +5683-32865-0013-2496: ref=['DO', 'YOU', 'KNOW', 'LAKE', 'OH', 'I', 'REALLY', "CAN'T", 'TELL', 'BUT', "HE'LL", 'SOON', 'TIRE', 'OF', 'COUNTRY', 'LIFE'] +5683-32865-0013-2496: hyp=['DO', 'YOU', 'KNOW', 'LAKE', 'OH', 'I', 'REALLY', "CAN'T", 'TELL', 'BUT', "HE'LL", 'SOON', 'TIRE', 'OF', 'COUNTRY', 'LIFE'] +5683-32865-0014-2497: ref=["HE'S", 'NOT', 'A', 'MAN', 'FOR', 'COUNTRY', 'QUARTERS'] +5683-32865-0014-2497: hyp=["HE'S", 'NOT', 'A', 'MAN', 'FOR', 'COUNTRY', 'QUARTERS'] +5683-32865-0015-2498: ref=['I', 'HAD', 'A', 'HORRID', 'DREAM', 'ABOUT', 'HIM', 'LAST', 'NIGHT', 'THAT'] +5683-32865-0015-2498: hyp=['I', 'HAD', 'HORRID', 'DREAM', 'ABOUT', 'HIM', 'LAST', 'NIGHT', 'THAT'] +5683-32865-0016-2499: ref=['OH', 'I', 'KNOW', "THAT'S", 'LORNE', 'BRANDON'] +5683-32865-0016-2499: hyp=['OH', 'I', 'KNOW', "THAT'S", 'LORN', 'BRANDON'] +5683-32865-0017-2500: ref=['ALL', 'THE', 'TIME', 'HE', 'WAS', 'TALKING', 'TO', 'ME', 'HIS', 'ANGRY', 'LITTLE', 'EYES', 'WERE', 'FOLLOWING', 'LAKE'] +5683-32865-0017-2500: hyp=['ALL', 'THE', 'TIME', 'HE', 'WAS', 'TALKING', 'TO', 'ME', 'HIS', 'ANGRY', 'LITTLE', 'EYES', 'WERE', 'FOLLOWING', 'LAKE'] +5683-32866-0000-2527: ref=['MISS', 'LAKE', 'DECLINED', 'THE', 'CARRIAGE', 'TO', 'NIGHT'] +5683-32866-0000-2527: hyp=['MISS', 'LAKE', 'DECLINED', 'THE', 'CARRIAGE', 'TO', 'NIGHT'] +5683-32866-0001-2528: ref=['AND', 'HE', 'ADDED', 'SOMETHING', 'STILL', 'LESS', 'COMPLIMENTARY'] +5683-32866-0001-2528: hyp=['AND', 'HE', 'ADDED', 'SOME', 'THINGS', 'STILL', 'LESS', 'COMPLIMENTARY'] +5683-32866-0002-2529: ref=['BUT', "DON'T", 'THESE', 'VERY', 'WISE', 'THINGS', 'SOMETIMES', 'TURN', 'OUT', 'VERY', 'FOOLISHLY'] +5683-32866-0002-2529: hyp=['BUT', "DON'T", 'THESE', 'VERY', 'WISE', 'THINGS', 'SOMETIMES', 'TURN', 'OUT', 'VERY', 'FOOLISHLY'] +5683-32866-0003-2530: ref=['IN', 'THE', 'MEANTIME', 'I', 'HAD', 'FORMED', 'A', 'NEW', 'IDEA', 'OF', 'HER'] +5683-32866-0003-2530: hyp=['IN', 'THE', 'MEANTIME', 'I', 'HAD', 'FORMED', 'A', 'NEW', 'IDEA', 'OF', 'HER'] +5683-32866-0004-2531: ref=['BY', 'THIS', 'TIME', 'LORD', 'CHELFORD', 'AND', 'WYLDER', 'RETURNED', 'AND', 'DISGUSTED', 'RATHER', 'WITH', 'MYSELF', 'I', 'RUMINATED', 'ON', 'MY', 'WANT', 'OF', 'GENERAL', 'SHIP'] +5683-32866-0004-2531: hyp=['BY', 'THIS', 'TIME', 'LORD', 'CHELFORD', 'AND', 'WYLDER', 'RETURNED', 'AND', 'DISGUSTED', 'RATHER', 'WITH', 'MYSELF', 'I', 'RUMINATED', 'ON', 'MY', 'WANT', 'OF', 'GENERALSHIP'] +5683-32866-0005-2532: ref=['AND', 'HE', 'MADE', 'A', 'LITTLE', 'DIP', 'OF', 'HIS', 'CANE', 'TOWARDS', 'BRANDON', 'HALL', 'OVER', 'HIS', 'SHOULDER'] +5683-32866-0005-2532: hyp=['AND', 'HE', 'MADE', 'A', 'LITTLE', 'DIP', 'OF', 'HIS', 'CANE', 'TOWARDS', 'BRANDON', 'HALL', 'OVER', 'HIS', 'SHOULDER'] +5683-32866-0006-2533: ref=['YES', 'SO', 'THEY', 'SAID', 'BUT', 'THAT', 'WOULD', 'I', 'THINK', 'HAVE', 'BEEN', 'WORSE'] +5683-32866-0006-2533: hyp=['YES', 'SO', 'THEY', 'SAID', 'BUT', 'THAT', 'WOULD', 'I', 'THINK', 'HAVE', 'BEEN', 'WORSE'] +5683-32866-0007-2534: ref=['IF', 'A', "FELLOW'S", 'BEEN', 'A', 'LITTLE', 'BIT', 'WILD', "HE'S", 'BEELZEBUB', 'AT', 'ONCE'] +5683-32866-0007-2534: hyp=['IF', 'A', "FELLOW'S", 'BEEN', 'A', 'LITTLE', 'BIT', 'WILD', 'HE', 'IS', 'BEALES', 'A', 'BUB', 'AT', 'ONCE'] +5683-32866-0008-2535: ref=["BRACTON'S", 'A', 'VERY', 'GOOD', 'FELLOW', 'I', 'CAN', 'ASSURE', 'YOU'] +5683-32866-0008-2535: hyp=["BRACTON'S", 'A', 'VERY', 'GOOD', 'FELLOW', 'I', 'CAN', 'ASSURE', 'YOU'] +5683-32866-0009-2536: ref=['I', "DON'T", 'KNOW', 'AND', "CAN'T", 'SAY', 'HOW', 'YOU', 'FINE', 'GENTLEMEN', 'DEFINE', 'WICKEDNESS', 'ONLY', 'AS', 'AN', 'OBSCURE', 'FEMALE', 'I', 'SPEAK', 'ACCORDING', 'TO', 'MY', 'LIGHTS', 'AND', 'HE', 'IS', 'GENERALLY', 'THOUGHT', 'THE', 'WICKEDEST', 'MAN', 'IN', 'THIS', 'COUNTY'] +5683-32866-0009-2536: hyp=['I', "DON'T", 'KNOW', 'ONE', "CAN'T", 'SAY', 'HOW', 'YOU', 'FIND', 'GENTLEMEN', 'TO', 'FIND', 'WICKEDNESS', 'ONLY', 'AS', 'AN', 'OBSCURE', 'FEMALE', 'I', 'SPEAK', 'ACCORDING', 'TO', 'MY', 'LIGHTS', 'AND', 'HE', 'IS', 'GENERALLY', 'THOUGHT', 'THE', 'WICKEDEST', 'MAN', 'IN', 'THIS', 'COUNTY'] +5683-32866-0010-2537: ref=['WELL', 'YOU', 'KNOW', 'RADIE', 'WOMEN', 'LIKE', 'WICKED', 'FELLOWS', 'IT', 'IS', 'CONTRAST', 'I', 'SUPPOSE', 'BUT', 'THEY', 'DO', 'AND', "I'M", 'SURE', 'FROM', 'WHAT', 'BRACTON', 'HAS', 'SAID', 'TO', 'ME', 'I', 'KNOW', 'HIM', 'INTIMATELY', 'THAT', 'DORCAS', 'LIKES', 'HIM', 'AND', 'I', "CAN'T", 'CONCEIVE', 'WHY', 'THEY', 'ARE', 'NOT', 'MARRIED'] +5683-32866-0010-2537: hyp=['WELL', 'YOU', 'KNOW', 'RADIE', 'WOMEN', 'LIKE', 'WICKED', 'FELLOWS', 'IT', 'IS', 'CONTRAST', 'I', 'SUPPOSE', 'BUT', 'THEY', 'DO', 'AND', "I'M", 'SURE', 'FROM', 'WHAT', 'BRACTON', 'HAS', 'SAID', 'TO', 'ME', 'I', 'KNOW', 'HIM', 'INTIMATELY', 'THAT', 'DORCAS', 'LIKES', 'HIM', 'AND', 'I', "CAN'T", 'CONCEIVE', 'WHY', 'THEY', 'ARE', 'NOT', 'MARRIED'] +5683-32866-0011-2538: ref=['THEIR', 'WALK', 'CONTINUED', 'SILENT', 'FOR', 'THE', 'GREATER', 'PART', 'NEITHER', 'WAS', 'QUITE', 'SATISFIED', 'WITH', 'THE', 'OTHER', 'BUT', 'RACHEL', 'AT', 'LAST', 'SAID'] +5683-32866-0011-2538: hyp=['THEIR', 'WALK', 'CONTINUED', 'SILENT', 'FOR', 'THE', 'GREATER', 'PART', 'NEITHER', 'WAS', 'QUITE', 'SATISFIED', 'WITH', 'THE', 'OTHER', 'BUT', 'RACHEL', 'AT', 'LAST', 'SAID'] +5683-32866-0012-2539: ref=['NOW', "THAT'S", 'IMPOSSIBLE', 'RADIE', 'FOR', 'I', 'REALLY', "DON'T", 'THINK', 'I', 'ONCE', 'THOUGHT', 'OF', 'HIM', 'ALL', 'THIS', 'EVENING', 'EXCEPT', 'JUST', 'WHILE', 'WE', 'WERE', 'TALKING'] +5683-32866-0012-2539: hyp=['NOW', "THAT'S", 'IMPOSSIBLE', 'RADIE', 'FOR', 'I', 'REALLY', "DON'T", 'THINK', 'I', 'ONCE', 'THOUGHT', 'OF', 'HIM', 'ALL', 'THIS', 'EVENING', 'EXCEPT', 'JUST', 'WHILE', 'WE', 'WERE', 'TALKING'] +5683-32866-0013-2540: ref=['THERE', 'WAS', 'A', 'BRIGHT', 'MOONLIGHT', 'BROKEN', 'BY', 'THE', 'SHADOWS', 'OF', 'OVERHANGING', 'BOUGHS', 'AND', 'WITHERED', 'LEAVES', 'AND', 'THE', 'MOTTLED', 'LIGHTS', 'AND', 'SHADOWS', 'GLIDED', 'ODDLY', 'ACROSS', 'HIS', 'PALE', 'FEATURES'] +5683-32866-0013-2540: hyp=['THERE', 'WAS', 'A', 'BRIGHT', 'MOONLIGHT', 'BROKEN', 'BY', 'THE', 'SHADOWS', 'OF', 'OVERHANGING', 'BOUGHS', 'AND', 'WITHERED', 'LEAVES', 'AND', 'THE', 'MOTTLED', 'LIGHTS', 'AND', 'SHADOWS', 'GLIDED', 'ODDLY', 'ACROSS', 'HIS', 'PALE', 'FEATURES'] +5683-32866-0014-2541: ref=["DON'T", 'INSULT', 'ME', 'STANLEY', 'BY', 'TALKING', 'AGAIN', 'AS', 'YOU', 'DID', 'THIS', 'MORNING'] +5683-32866-0014-2541: hyp=["DON'T", 'INSULT', 'ME', 'STANLEY', 'BY', 'TALKING', 'AGAIN', 'AS', 'YOU', 'DID', 'THIS', 'MORNING'] +5683-32866-0015-2542: ref=['WHAT', 'I', 'SAY', 'IS', 'ALTOGETHER', 'ON', 'YOUR', 'OWN', 'ACCOUNT'] +5683-32866-0015-2542: hyp=['WHAT', 'I', 'SAY', 'IS', 'ALTOGETHER', 'ON', 'YOUR', 'OWN', 'ACCOUNT'] +5683-32866-0016-2543: ref=['MARK', 'MY', 'WORDS', "YOU'LL", 'FIND', 'HIM', 'TOO', 'STRONG', 'FOR', 'YOU', 'AYE', 'AND', 'TOO', 'DEEP'] +5683-32866-0016-2543: hyp=['MARK', 'MY', 'WORDS', "YOU'LL", 'FIND', 'HIM', 'TOO', 'STRONG', 'FOR', 'YOU', 'AY', 'AND', 'TOO', 'DEEP'] +5683-32866-0017-2544: ref=['I', 'AM', 'VERY', 'UNEASY', 'ABOUT', 'IT', 'WHATEVER', 'IT', 'IS', 'I', "CAN'T", 'HELP', 'IT'] +5683-32866-0017-2544: hyp=['I', 'AM', 'VERY', 'UNEASY', 'ABOUT', 'IT', 'WHATEVER', 'IT', 'IS', 'I', "CAN'T", 'HELP', 'IT'] +5683-32866-0018-2545: ref=['TO', 'MY', 'MIND', 'THERE', 'HAS', 'ALWAYS', 'BEEN', 'SOMETHING', 'INEXPRESSIBLY', 'AWFUL', 'IN', 'FAMILY', 'FEUDS'] +5683-32866-0018-2545: hyp=['TO', 'MY', 'MIND', 'THERE', 'HAS', 'ALWAYS', 'BEEN', 'SOMETHING', 'INEXPRESSIBLY', 'AWFUL', 'IN', 'FAMILY', 'FEUDS'] +5683-32866-0019-2546: ref=['THE', 'MYSTERY', 'OF', 'THEIR', 'ORIGIN', 'THEIR', 'CAPACITY', 'FOR', 'EVOLVING', 'LATENT', 'FACULTIES', 'OF', 'CRIME', 'AND', 'THE', 'STEADY', 'VITALITY', 'WITH', 'WHICH', 'THEY', 'SURVIVE', 'THE', 'HEARSE', 'AND', 'SPEAK', 'THEIR', 'DEEP', 'MOUTHED', 'MALIGNITIES', 'IN', 'EVERY', 'NEW', 'BORN', 'GENERATION', 'HAVE', 'ASSOCIATED', 'THEM', 'SOMEHOW', 'IN', 'MY', 'MIND', 'WITH', 'A', 'SPELL', 'OF', 'LIFE', 'EXCEEDING', 'AND', 'DISTINCT', 'FROM', 'HUMAN', 'AND', 'A', 'SPECIAL', 'SATANIC', 'ACTION'] +5683-32866-0019-2546: hyp=['THE', 'MYSTERY', 'OF', 'THEIR', 'ORIGIN', 'THEIR', 'CAPACITY', 'FOR', 'EVOLVING', 'LATENT', 'FACULTIES', 'OF', 'CRIME', 'AND', 'THE', 'STUDY', 'VITALITY', 'WITH', 'WHICH', 'THEY', 'SURVIVE', 'THE', 'HEARSE', 'AND', 'SPEAK', 'THEIR', 'DEEP', 'MOUTH', 'MALIGNITIES', 'IN', 'EVERY', 'NEW', 'BORN', 'GENERATION', 'HAVE', 'ASSOCIATED', 'THEM', 'SOMEHOW', 'IN', 'MY', 'MIND', 'WITH', 'A', 'SPELL', 'OF', 'LIFE', 'EXCEEDING', 'AND', 'DISTINCT', 'FROM', 'HUMAN', 'AND', 'ESPECIAL', 'SATANIC', 'ACTION'] +5683-32866-0020-2547: ref=['THE', 'FLOOR', 'MORE', 'THAN', 'ANYTHING', 'ELSE', 'SHOWED', 'THE', 'GREAT', 'AGE', 'OF', 'THE', 'ROOM'] +5683-32866-0020-2547: hyp=['THE', 'FLOOR', 'MORE', 'THAN', 'ANYTHING', 'ELSE', 'SHOWED', 'THE', 'GREAT', 'AGE', 'OF', 'THE', 'ROOM'] +5683-32866-0021-2548: ref=['MY', 'BED', 'WAS', 'UNEXCEPTIONABLY', 'COMFORTABLE', 'BUT', 'IN', 'MY', 'THEN', 'MOOD', 'I', 'COULD', 'HAVE', 'WISHED', 'IT', 'A', 'GREAT', 'DEAL', 'MORE', 'MODERN'] +5683-32866-0021-2548: hyp=['MY', 'BED', 'WAS', 'UNEXCEPTIONALLY', 'COMFORTABLE', 'BUT', 'IN', 'MY', 'THEN', 'MOOD', 'I', 'COULD', 'HAVE', 'WISHED', 'IT', 'A', 'GREAT', 'DEAL', 'MORE', 'MODERN'] +5683-32866-0022-2549: ref=['ITS', 'CURTAINS', 'WERE', 'OF', 'THICK', 'AND', 'FADED', 'TAPESTRY'] +5683-32866-0022-2549: hyp=['ITS', 'CURTAINS', 'WERE', 'OF', 'THICK', 'AND', 'FADED', 'TAPESTRY'] +5683-32866-0023-2550: ref=['ALL', 'THE', 'FURNITURE', 'BELONGED', 'TO', 'OTHER', 'TIMES'] +5683-32866-0023-2550: hyp=['ALL', 'THE', 'FURNITURE', 'BELONGED', 'TO', 'OTHER', 'TIMES'] +5683-32866-0024-2551: ref=['I', "SHAN'T", 'TROUBLE', 'YOU', 'ABOUT', 'MY', 'TRAIN', 'OF', 'THOUGHTS', 'OR', 'FANCIES', 'BUT', 'I', 'BEGAN', 'TO', 'FEEL', 'VERY', 'LIKE', 'A', 'GENTLEMAN', 'IN', 'A', 'GHOST', 'STORY', 'WATCHING', 'EXPERIMENTALLY', 'IN', 'A', 'HAUNTED', 'CHAMBER'] +5683-32866-0024-2551: hyp=['I', "SHAN'T", 'TROUBLE', 'YOU', 'ABOUT', 'MY', 'TRAIN', 'OF', 'THOUGHTS', 'OR', 'FANCIES', 'BUT', 'I', 'BEGAN', 'TO', 'FEEL', 'VERY', 'LIKE', 'A', 'GENTLEMAN', 'IN', 'A', 'GHOST', 'STORY', 'WATCHING', 'EXPERIMENTALLY', 'IN', 'A', 'HAUNTED', 'CHAMBER'] +5683-32866-0025-2552: ref=['I', 'DID', 'NOT', 'EVEN', 'TAKE', 'THE', 'PRECAUTION', 'OF', 'SMOKING', 'UP', 'THE', 'CHIMNEY'] +5683-32866-0025-2552: hyp=['I', 'DID', 'NOT', 'EVEN', 'TAKE', 'THE', 'PRECAUTION', 'OF', 'SMOKING', 'UP', 'THE', 'CHIMNEY'] +5683-32866-0026-2553: ref=['I', 'BOLDLY', 'LIGHTED', 'MY', 'CHEROOT'] +5683-32866-0026-2553: hyp=['I', 'BOLDLY', 'LIGHTED', 'MY', 'JERUET'] +5683-32866-0027-2554: ref=['A', 'COLD', 'BRIGHT', 'MOON', 'WAS', 'SHINING', 'WITH', 'CLEAR', 'SHARP', 'LIGHTS', 'AND', 'SHADOWS'] +5683-32866-0027-2554: hyp=['A', 'COLD', 'BRIGHT', 'MOON', 'WAS', 'SHINING', 'WITH', 'CLEAR', 'SHARP', 'LIGHTS', 'AND', 'SHADOWS'] +5683-32866-0028-2555: ref=['THE', 'SOMBRE', 'OLD', 'TREES', 'LIKE', 'GIGANTIC', 'HEARSE', 'PLUMES', 'BLACK', 'AND', 'AWFUL'] +5683-32866-0028-2555: hyp=['THE', 'SOMBRE', 'OLD', 'TREES', 'LIKE', 'GIGANTIC', 'HEARSE', 'PLUMES', 'BLACK', 'AND', 'AWFUL'] +5683-32866-0029-2556: ref=['SOMEHOW', 'I', 'HAD', 'GROWN', 'NERVOUS'] +5683-32866-0029-2556: hyp=['SOMEHOW', 'I', 'HAD', 'GROWN', 'NERVOUS'] +5683-32866-0030-2557: ref=['A', 'LITTLE', 'BIT', 'OF', 'PLASTER', 'TUMBLED', 'DOWN', 'THE', 'CHIMNEY', 'AND', 'STARTLED', 'ME', 'CONFOUNDEDLY'] +5683-32866-0030-2557: hyp=['A', 'LITTLE', 'BIT', 'OF', 'PLASTER', 'TUMBLED', 'DOWN', 'THE', 'CHIMNEY', 'AND', 'STARTLED', 'ME', 'CONFOUNDEDLY'] +5683-32879-0000-2501: ref=['IT', 'WAS', 'NOT', 'VERY', 'MUCH', 'PAST', 'ELEVEN', 'THAT', 'MORNING', 'WHEN', 'THE', 'PONY', 'CARRIAGE', 'FROM', 'BRANDON', 'DREW', 'UP', 'BEFORE', 'THE', 'LITTLE', 'GARDEN', 'WICKET', 'OF', "REDMAN'S", 'FARM'] +5683-32879-0000-2501: hyp=['IT', 'WAS', 'NOT', 'VERY', 'MUCH', 'PAST', 'ELEVEN', 'THAT', 'MORNING', 'WHEN', 'THE', 'PONY', 'CARRIAGE', 'FROM', 'BRANDON', 'DREW', 'UP', 'BEFORE', 'THE', 'LITTLE', 'GARDEN', 'WICKET', 'OF', "REDMAN'S", 'FARM'] +5683-32879-0001-2502: ref=['WELL', 'SHE', 'WAS', 'BETTER', 'THOUGH', 'SHE', 'HAD', 'HAD', 'A', 'BAD', 'NIGHT'] +5683-32879-0001-2502: hyp=['WHILE', 'SHE', 'WAS', 'BETTER', 'THOUGH', 'SHE', 'HAD', 'HAD', 'A', 'BAD', 'NIGHT'] +5683-32879-0002-2503: ref=['SO', 'THERE', 'CAME', 'A', 'STEP', 'AND', 'A', 'LITTLE', 'RUSTLING', 'OF', 'FEMININE', 'DRAPERIES', 'THE', 'SMALL', 'DOOR', 'OPENED', 'AND', 'RACHEL', 'ENTERED', 'WITH', 'HER', 'HAND', 'EXTENDED', 'AND', 'A', 'PALE', 'SMILE', 'OF', 'WELCOME'] +5683-32879-0002-2503: hyp=['SO', 'THERE', 'CAME', 'A', 'STEP', 'AND', 'A', 'LITTLE', 'RUSTLING', 'OF', 'FEMININE', 'DRAPERIES', 'THE', 'SMALL', 'DOOR', 'OPENED', 'AND', 'RACHEL', 'ENTERED', 'WITH', 'HER', 'HAND', 'EXTENDED', 'AND', 'A', 'PALE', 'SMILE', 'OF', 'WELCOME'] +5683-32879-0003-2504: ref=['WOMEN', 'CAN', 'HIDE', 'THEIR', 'PAIN', 'BETTER', 'THAN', 'WE', 'MEN', 'AND', 'BEAR', 'IT', 'BETTER', 'TOO', 'EXCEPT', 'WHEN', 'SHAME', 'DROPS', 'FIRE', 'INTO', 'THE', 'DREADFUL', 'CHALICE'] +5683-32879-0003-2504: hyp=['WOMEN', 'CAN', 'HIDE', 'THEIR', 'PAIN', 'BETTER', 'THAN', 'WE', 'MEN', 'AND', 'BEAR', 'IT', 'BETTER', 'TOO', 'EXCEPT', 'WHEN', 'SHAME', 'DROPS', 'FIRE', 'INTO', 'THE', 'DREADFUL', 'CHALICE'] +5683-32879-0004-2505: ref=['BUT', 'POOR', 'RACHEL', 'LAKE', 'HAD', 'MORE', 'THAN', 'THAT', 'STOICAL', 'HYPOCRISY', 'WHICH', 'ENABLES', 'THE', 'TORTURED', 'SPIRITS', 'OF', 'HER', 'SEX', 'TO', 'LIFT', 'A', 'PALE', 'FACE', 'THROUGH', 'THE', 'FLAMES', 'AND', 'SMILE'] +5683-32879-0004-2505: hyp=['BUT', 'POOR', 'RACHEL', 'LAKE', 'HAD', 'MORE', 'THAN', 'THAT', 'STOICAL', 'HYPOCRISY', 'WHICH', 'ENABLES', 'THE', 'TORTURED', 'SPIRITS', 'OF', 'HER', 'SEX', 'TO', 'LIFT', 'A', 'PALE', 'FACE', 'THROUGH', 'THE', 'FLAMES', 'AND', 'SMILE'] +5683-32879-0005-2506: ref=['THIS', 'TRANSIENT', 'SPRING', 'AND', 'LIGHTING', 'UP', 'ARE', 'BEAUTIFUL', 'A', 'GLAMOUR', 'BEGUILING', 'OUR', 'SENSES'] +5683-32879-0005-2506: hyp=['THIS', 'TRANSIENT', 'SPRING', 'AND', 'LIGHTING', 'UP', 'ARE', 'BEAUTIFUL', 'A', 'GLAMOUR', 'BEGUILING', 'OUR', 'SENSES'] +5683-32879-0006-2507: ref=['THERE', 'WAS', 'SOMETHING', 'OF', 'SWEETNESS', 'AND', 'FONDNESS', 'IN', 'HER', 'TONES', 'AND', 'MANNER', 'WHICH', 'WAS', 'NEW', 'TO', 'RACHEL', 'AND', 'COMFORTING', 'AND', 'SHE', 'RETURNED', 'THE', 'GREETING', 'AS', 'KINDLY', 'AND', 'FELT', 'MORE', 'LIKE', 'HER', 'FORMER', 'SELF'] +5683-32879-0006-2507: hyp=['THERE', 'WAS', 'SOMETHING', 'OF', 'SWEETNESS', 'AND', 'FONDNESS', 'IN', 'HER', 'TONES', 'AND', 'MANNER', 'WHICH', 'WAS', 'NEW', 'TO', 'RACHEL', 'AND', 'COMFORTING', 'AND', 'SHE', 'RETURNED', 'THE', 'GREETING', 'AS', 'KINDLY', 'AND', 'FELT', 'MORE', 'LIKE', 'HER', 'FORMER', 'SELF'] +5683-32879-0007-2508: ref=["RACHEL'S", 'PALE', 'AND', 'SHARPENED', 'FEATURES', 'AND', 'DILATED', 'EYE', 'STRUCK', 'HER', 'WITH', 'A', 'PAINFUL', 'SURPRISE'] +5683-32879-0007-2508: hyp=["RACHEL'S", 'PALE', 'AND', 'SHARPENED', 'FEATURES', 'AND', 'DILATED', 'EYE', 'STRUCK', 'HER', 'WITH', 'A', 'PAINFUL', 'SURPRISE'] +5683-32879-0008-2509: ref=['YOU', 'HAVE', 'BEEN', 'SO', 'ILL', 'MY', 'POOR', 'RACHEL'] +5683-32879-0008-2509: hyp=['YOU', 'HAVE', 'BEEN', 'SO', 'ILL', 'MY', 'POOR', 'RACHEL'] +5683-32879-0009-2510: ref=['ILL', 'AND', 'TROUBLED', 'DEAR', 'TROUBLED', 'IN', 'MIND', 'AND', 'MISERABLY', 'NERVOUS'] +5683-32879-0009-2510: hyp=['ILL', 'AND', 'TROUBLED', 'DEAR', 'TROUBLED', 'IN', 'MIND', 'AND', 'MISERABLY', 'NERVOUS'] +5683-32879-0010-2511: ref=['POOR', 'RACHEL', 'HER', 'NATURE', 'RECOILED', 'FROM', 'DECEIT', 'AND', 'SHE', 'TOLD', 'AT', 'ALL', 'EVENTS', 'AS', 'MUCH', 'OF', 'THE', 'TRUTH', 'AS', 'SHE', 'DARED'] +5683-32879-0010-2511: hyp=['POOR', 'RACHEL', 'HER', 'NATURE', 'RECOILED', 'FROM', 'DECEIT', 'AND', 'SHE', 'TOLD', 'AT', 'ALL', 'EVENTS', 'AS', 'MUCH', 'OF', 'THE', 'TRUTH', 'AS', 'SHE', 'DARED'] +5683-32879-0011-2512: ref=['SHE', 'SPOKE', 'WITH', 'A', 'SUDDEN', 'ENERGY', 'WHICH', 'PARTOOK', 'OF', 'FEAR', 'AND', 'PASSION', 'AND', 'FLUSHED', 'HER', 'THIN', 'CHEEK', 'AND', 'MADE', 'HER', 'LANGUID', 'EYES', 'FLASH'] +5683-32879-0011-2512: hyp=['SHE', 'SPOKE', 'WITH', 'A', 'SUDDEN', 'ENERGY', 'WHICH', 'PARTOOK', 'A', 'FEAR', 'AND', 'PASSION', 'AND', 'FLUSHED', 'HER', 'THIN', 'CHEEK', 'AND', 'MADE', 'HER', 'LANGUID', 'EYES', 'FLASH'] +5683-32879-0012-2513: ref=['THANK', 'YOU', 'RACHEL', 'MY', 'COUSIN', 'RACHEL', 'MY', 'ONLY', 'FRIEND'] +5683-32879-0012-2513: hyp=['THANK', 'YOU', 'RACHEL', 'MY', 'COUSIN', 'RACHEL', 'MY', 'ONLY', 'FRIEND'] +5683-32879-0013-2514: ref=['CHELFORD', 'HAD', 'A', 'NOTE', 'FROM', 'MISTER', 'WYLDER', 'THIS', 'MORNING', 'ANOTHER', 'NOTE', 'HIS', 'COMING', 'DELAYED', 'AND', 'SOMETHING', 'OF', 'HIS', 'HAVING', 'TO', 'SEE', 'SOME', 'PERSON', 'WHO', 'IS', 'ABROAD', 'CONTINUED', 'DORCAS', 'AFTER', 'A', 'LITTLE', 'PAUSE'] +5683-32879-0013-2514: hyp=['CHELFORD', 'HAD', 'A', 'NOTE', 'FROM', 'MISTER', 'WYLDER', 'THIS', 'MORNING', 'ANOTHER', 'NOTE', 'HIS', 'COMING', 'DELAYED', 'AND', 'SOMETHING', 'OF', 'HIS', 'HAVING', 'TO', 'SEE', 'SOME', 'PERSON', 'WHO', 'WAS', 'ABROAD', 'CONTINUED', 'DORCAS', 'AFTER', 'A', 'LITTLE', 'PAUSE'] +5683-32879-0014-2515: ref=['YES', 'SOMETHING', 'EVERYTHING', 'SAID', 'RACHEL', 'HURRIEDLY', 'LOOKING', 'FROWNINGLY', 'AT', 'A', 'FLOWER', 'WHICH', 'SHE', 'WAS', 'TWIRLING', 'IN', 'HER', 'FINGERS'] +5683-32879-0014-2515: hyp=['YES', 'SOMETHING', 'EVERYTHING', 'SAID', 'RACHEL', 'HURRIEDLY', 'LOOKING', 'FROWNINGLY', 'AT', 'A', 'FLOWER', 'WHICH', 'SHE', 'WAS', 'TWIRLING', 'IN', 'HER', 'FINGERS'] +5683-32879-0015-2516: ref=['YES', 'SAID', 'RACHEL'] +5683-32879-0015-2516: hyp=['YES', 'SAID', 'RACHEL'] +5683-32879-0016-2517: ref=['AND', 'THE', 'WAN', 'ORACLE', 'HAVING', 'SPOKEN', 'SHE', 'SATE', 'DOWN', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'ABSTRACTION', 'AGAIN', 'BESIDE', 'DORCAS', 'AND', 'SHE', 'LOOKED', 'FULL', 'IN', 'HER', "COUSIN'S", 'EYES'] +5683-32879-0016-2517: hyp=['AND', 'THE', 'WAN', 'ORACLE', 'HAVING', 'SPOKEN', 'SHE', 'SAT', 'DOWN', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'ABSTRACTION', 'AGAIN', 'BESIDE', 'DORCAS', 'AND', 'SHE', 'LOOKED', 'FULL', 'IN', 'HER', "COUSIN'S", 'EYES'] +5683-32879-0017-2518: ref=['OF', 'MARK', 'WYLDER', 'I', 'SAY', 'THIS', 'HIS', 'NAME', 'HAS', 'BEEN', 'FOR', 'YEARS', 'HATEFUL', 'TO', 'ME', 'AND', 'RECENTLY', 'IT', 'HAS', 'BECOME', 'FRIGHTFUL', 'AND', 'YOU', 'WILL', 'PROMISE', 'ME', 'SIMPLY', 'THIS', 'THAT', 'YOU', 'WILL', 'NEVER', 'ASK', 'ME', 'TO', 'SPEAK', 'AGAIN', 'ABOUT', 'HIM'] +5683-32879-0017-2518: hyp=['OF', 'MARK', 'WYLDER', 'I', 'SAY', 'THIS', 'HIS', 'NAME', 'HAS', 'BEEN', 'FOR', 'YEARS', 'HATEFUL', 'TO', 'ME', 'AND', 'RECENTLY', 'IT', 'HAS', 'BECOME', 'FRIGHTFUL', 'AND', 'YOU', 'WILL', 'PROMISE', 'ME', 'SIMPLY', 'THIS', 'THAT', 'YOU', 'WILL', 'NEVER', 'ASK', 'ME', 'TO', 'SPEAK', 'AGAIN', 'ABOUT', 'HIM'] +5683-32879-0018-2519: ref=['IT', 'IS', 'AN', 'ANTIPATHY', 'AN', 'ANTIPATHY', 'I', 'CANNOT', 'GET', 'OVER', 'DEAR', 'DORCAS', 'YOU', 'MAY', 'THINK', 'IT', 'A', 'MADNESS', 'BUT', "DON'T", 'BLAME', 'ME'] +5683-32879-0018-2519: hyp=['IT', 'IS', 'AN', 'ANTIPATHY', 'AN', 'ANTIPATHY', 'I', 'CANNOT', 'GET', 'OVER', 'DEAR', 'DORCAS', 'YOU', 'MAY', 'THINK', 'IT', 'A', 'MADNESS', 'BUT', "DON'T", 'BLAME', 'ME'] +5683-32879-0019-2520: ref=['I', 'HAVE', 'VERY', 'FEW', 'TO', 'LOVE', 'ME', 'NOW', 'AND', 'I', 'THOUGHT', 'YOU', 'MIGHT', 'LOVE', 'ME', 'AS', 'I', 'HAVE', 'BEGUN', 'TO', 'LOVE', 'YOU'] +5683-32879-0019-2520: hyp=['I', 'HAVE', 'VERY', 'FEW', 'TO', 'LOVE', 'ME', 'NOW', 'AND', 'I', 'THOUGHT', 'YOU', 'MIGHT', 'LOVE', 'ME', 'AS', 'I', 'HAVE', 'BEGUN', 'TO', 'LOVE', 'YOU'] +5683-32879-0020-2521: ref=['AND', 'SHE', 'THREW', 'HER', 'ARMS', 'ROUND', 'HER', "COUSIN'S", 'NECK', 'AND', 'BRAVE', 'RACHEL', 'AT', 'LAST', 'BURST', 'INTO', 'TEARS'] +5683-32879-0020-2521: hyp=['AND', 'SHE', 'THREW', 'HER', 'ARMS', 'ROUND', 'HER', "COUSIN'S", 'NECK', 'AND', 'BRAVE', 'RACHEL', 'AT', 'LAST', 'BURST', 'INTO', 'TEARS'] +5683-32879-0021-2522: ref=['DORCAS', 'IN', 'HER', 'STRANGE', 'WAY', 'WAS', 'MOVED'] +5683-32879-0021-2522: hyp=['DORCAS', 'IN', 'HER', 'STRANGE', 'WAY', 'WAS', 'MOVED'] +5683-32879-0022-2523: ref=['I', 'LIKE', 'YOU', 'STILL', 'RACHEL', "I'M", 'SURE', "I'LL", 'ALWAYS', 'LIKE', 'YOU'] +5683-32879-0022-2523: hyp=['I', 'LIKE', 'YOU', 'STILL', 'RACHEL', "I'M", 'SURE', "I'LL", 'ALWAYS', 'LIKE', 'YOU'] +5683-32879-0023-2524: ref=['YOU', 'RESEMBLE', 'ME', 'RACHEL', 'YOU', 'ARE', 'FEARLESS', 'AND', 'INFLEXIBLE', 'AND', 'GENEROUS'] +5683-32879-0023-2524: hyp=['YOU', 'RESEMBLE', 'ME', 'RACHEL', 'YOU', 'ARE', 'FEARLESS', 'AND', 'INFLEXIBLE', 'AND', 'GENEROUS'] +5683-32879-0024-2525: ref=['YES', 'RACHEL', 'I', 'DO', 'LOVE', 'YOU'] +5683-32879-0024-2525: hyp=['YES', 'RACHEL', 'I', 'DO', 'LOVE', 'YOU'] +5683-32879-0025-2526: ref=['THANK', 'YOU', 'DORCAS', 'DEAR'] +5683-32879-0025-2526: hyp=['THANK', 'YOU', 'DORCAS', 'DEAR'] +61-70968-0000-2179: ref=['HE', 'BEGAN', 'A', 'CONFUSED', 'COMPLAINT', 'AGAINST', 'THE', 'WIZARD', 'WHO', 'HAD', 'VANISHED', 'BEHIND', 'THE', 'CURTAIN', 'ON', 'THE', 'LEFT'] +61-70968-0000-2179: hyp=['HE', 'BEGAN', 'A', 'CONFUSED', 'COMPLAINT', 'AGAINST', 'THE', 'WIZARD', 'WHO', 'HAD', 'VANISHED', 'BEHIND', 'THE', 'CURTAIN', 'ON', 'THE', 'LEFT'] +61-70968-0001-2180: ref=['GIVE', 'NOT', 'SO', 'EARNEST', 'A', 'MIND', 'TO', 'THESE', 'MUMMERIES', 'CHILD'] +61-70968-0001-2180: hyp=['GIVE', 'NOT', 'SO', 'EARNEST', 'A', 'MIND', 'TO', 'THESE', 'MUMMERIES', 'CHILD'] +61-70968-0002-2181: ref=['A', 'GOLDEN', 'FORTUNE', 'AND', 'A', 'HAPPY', 'LIFE'] +61-70968-0002-2181: hyp=['A', 'GOLDEN', 'FORTUNE', 'AND', 'A', 'HAPPY', 'LIFE'] +61-70968-0003-2182: ref=['HE', 'WAS', 'LIKE', 'UNTO', 'MY', 'FATHER', 'IN', 'A', 'WAY', 'AND', 'YET', 'WAS', 'NOT', 'MY', 'FATHER'] +61-70968-0003-2182: hyp=['HE', 'WAS', 'LIKE', 'UNTO', 'MY', 'FATHER', 'IN', 'A', 'WAY', 'AND', 'YET', 'WAS', 'NOT', 'MY', 'FATHER'] +61-70968-0004-2183: ref=['ALSO', 'THERE', 'WAS', 'A', 'STRIPLING', 'PAGE', 'WHO', 'TURNED', 'INTO', 'A', 'MAID'] +61-70968-0004-2183: hyp=['ALSO', 'THERE', 'WAS', 'A', 'STRIPLING', 'PAGE', 'WHO', 'TURNED', 'INTO', 'A', 'MAID'] +61-70968-0005-2184: ref=['THIS', 'WAS', 'SO', 'SWEET', 'A', 'LADY', 'SIR', 'AND', 'IN', 'SOME', 'MANNER', 'I', 'DO', 'THINK', 'SHE', 'DIED'] +61-70968-0005-2184: hyp=['THIS', 'WAS', 'SO', 'SWEET', 'A', 'LADY', 'SIR', 'AND', 'IN', 'SOME', 'MANNER', 'I', 'DO', 'THINK', 'SHE', 'DIED'] +61-70968-0006-2185: ref=['BUT', 'THEN', 'THE', 'PICTURE', 'WAS', 'GONE', 'AS', 'QUICKLY', 'AS', 'IT', 'CAME'] +61-70968-0006-2185: hyp=['BUT', 'THEN', 'THE', 'PICTURE', 'WAS', 'GONE', 'AS', 'QUICKLY', 'AS', 'IT', 'CAME'] +61-70968-0007-2186: ref=['SISTER', 'NELL', 'DO', 'YOU', 'HEAR', 'THESE', 'MARVELS'] +61-70968-0007-2186: hyp=['SISTER', 'NELL', 'DO', 'YOU', 'HEAR', 'THESE', 'MARVELS'] +61-70968-0008-2187: ref=['TAKE', 'YOUR', 'PLACE', 'AND', 'LET', 'US', 'SEE', 'WHAT', 'THE', 'CRYSTAL', 'CAN', 'SHOW', 'TO', 'YOU'] +61-70968-0008-2187: hyp=['TAKE', 'YOUR', 'PLACE', 'AND', 'LET', 'US', 'SEE', 'WHAT', 'THE', 'CRYSTAL', 'CAN', 'SHOW', 'TO', 'YOU'] +61-70968-0009-2188: ref=['LIKE', 'AS', 'NOT', 'YOUNG', 'MASTER', 'THOUGH', 'I', 'AM', 'AN', 'OLD', 'MAN'] +61-70968-0009-2188: hyp=['LIKE', 'AS', 'NOT', 'YOUNG', 'MASTER', 'THOUGH', 'I', 'AM', 'AN', 'OLD', 'MAN'] +61-70968-0010-2189: ref=['FORTHWITH', 'ALL', 'RAN', 'TO', 'THE', 'OPENING', 'OF', 'THE', 'TENT', 'TO', 'SEE', 'WHAT', 'MIGHT', 'BE', 'AMISS', 'BUT', 'MASTER', 'WILL', 'WHO', 'PEEPED', 'OUT', 'FIRST', 'NEEDED', 'NO', 'MORE', 'THAN', 'ONE', 'GLANCE'] +61-70968-0010-2189: hyp=['FORTHWITH', 'ALL', 'RAN', 'TO', 'THE', 'OPENING', 'OF', 'THE', 'TENT', 'TO', 'SEE', 'WHAT', 'MIGHT', 'BE', 'AMISS', 'BUT', 'MASTER', 'WILL', 'WHO', 'PEEPED', 'OUT', 'FIRST', 'NEEDED', 'NO', 'MORE', 'THAN', 'ONE', 'GLANCE'] +61-70968-0011-2190: ref=['HE', 'GAVE', 'WAY', 'TO', 'THE', 'OTHERS', 'VERY', 'READILY', 'AND', 'RETREATED', 'UNPERCEIVED', 'BY', 'THE', 'SQUIRE', 'AND', 'MISTRESS', 'FITZOOTH', 'TO', 'THE', 'REAR', 'OF', 'THE', 'TENT'] +61-70968-0011-2190: hyp=['HE', 'GAVE', 'WAY', 'TO', 'THE', 'OTHERS', 'VERY', 'READILY', 'AND', 'RETREATED', 'UNPERCEIVED', 'BY', 'THE', 'SQUIRE', 'AND', 'MISTRESS', 'FITZOOTH', 'TO', 'THE', 'REAR', 'OF', 'THE', 'TENT'] +61-70968-0012-2191: ref=['CRIES', 'OF', 'A', 'NOTTINGHAM', 'A', 'NOTTINGHAM'] +61-70968-0012-2191: hyp=['CRIES', 'OF', 'UNNOTTINGHAM', 'ARE', 'NOTTINGHAM'] +61-70968-0013-2192: ref=['BEFORE', 'THEM', 'FLED', 'THE', 'STROLLER', 'AND', 'HIS', 'THREE', 'SONS', 'CAPLESS', 'AND', 'TERRIFIED'] +61-70968-0013-2192: hyp=['BEFORE', 'THEM', 'FLED', 'THE', 'STROLLER', 'AND', 'HIS', 'THREE', 'SONS', 'CAPLESS', 'AND', 'TERRIFIED'] +61-70968-0014-2193: ref=['WHAT', 'IS', 'THE', 'TUMULT', 'AND', 'RIOTING', 'CRIED', 'OUT', 'THE', 'SQUIRE', 'AUTHORITATIVELY', 'AND', 'HE', 'BLEW', 'TWICE', 'ON', 'A', 'SILVER', 'WHISTLE', 'WHICH', 'HUNG', 'AT', 'HIS', 'BELT'] +61-70968-0014-2193: hyp=['WHAT', 'IS', 'THE', 'TUMULT', 'AND', 'RIOTING', 'CRIED', 'OUT', 'THE', 'SQUIRE', 'AUTHORITATIVELY', 'AND', 'HE', 'BLEW', 'TWICE', 'ON', 'THE', 'SILVER', 'WHISTLE', 'WHICH', 'HUNG', 'AT', 'HIS', 'BELT'] +61-70968-0015-2194: ref=['NAY', 'WE', 'REFUSED', 'THEIR', 'REQUEST', 'MOST', 'POLITELY', 'MOST', 'NOBLE', 'SAID', 'THE', 'LITTLE', 'STROLLER'] +61-70968-0015-2194: hyp=['NAY', 'WE', 'REFUSED', 'THEIR', 'REQUEST', 'MOST', 'POLITELY', 'MOST', 'NOBLE', 'SAID', 'THE', 'LITTLE', 'STROLLER'] +61-70968-0016-2195: ref=['AND', 'THEN', 'THEY', 'BECAME', 'VEXED', 'AND', 'WOULD', 'HAVE', 'SNATCHED', 'YOUR', 'PURSE', 'FROM', 'US'] +61-70968-0016-2195: hyp=['AND', 'THEN', 'THEY', 'BECAME', 'VEXED', 'AND', 'WOULD', 'HAVE', 'SNATCHED', 'YOUR', 'PURSE', 'FROM', 'US'] +61-70968-0017-2196: ref=['I', 'COULD', 'NOT', 'SEE', 'MY', 'BOY', 'INJURED', 'EXCELLENCE', 'FOR', 'BUT', 'DOING', 'HIS', 'DUTY', 'AS', 'ONE', 'OF', "CUMBERLAND'S", 'SONS'] +61-70968-0017-2196: hyp=['I', 'COULD', 'NOT', 'SEE', 'MY', 'BOY', 'INJURE', 'EXCELLENCE', 'FOR', 'BUT', 'DOING', 'HIS', 'DUTY', 'AS', 'ONE', 'OF', "CUMBERLAND'S", 'SONS'] +61-70968-0018-2197: ref=['SO', 'I', 'DID', 'PUSH', 'THIS', 'FELLOW'] +61-70968-0018-2197: hyp=['SO', 'I', 'DID', 'PUSH', 'THIS', 'FELLOW'] +61-70968-0019-2198: ref=['IT', 'IS', 'ENOUGH', 'SAID', 'GEORGE', 'GAMEWELL', 'SHARPLY', 'AND', 'HE', 'TURNED', 'UPON', 'THE', 'CROWD'] +61-70968-0019-2198: hyp=['IT', 'IS', 'ENOUGH', 'SAID', 'GEORGE', 'GAMEWELL', 'SHARPLY', 'AS', 'HE', 'TURNED', 'UPON', 'THE', 'CROWD'] +61-70968-0020-2199: ref=['SHAME', 'ON', 'YOU', 'CITIZENS', 'CRIED', 'HE', 'I', 'BLUSH', 'FOR', 'MY', 'FELLOWS', 'OF', 'NOTTINGHAM'] +61-70968-0020-2199: hyp=['SHEEM', 'ON', 'YOU', 'CITIZENS', 'CRIED', 'HE', 'I', 'BLUSH', 'FOR', 'MY', 'FELLOWS', 'OF', 'NOTTINGHAM'] +61-70968-0021-2200: ref=['SURELY', 'WE', 'CAN', 'SUBMIT', 'WITH', 'GOOD', 'GRACE'] +61-70968-0021-2200: hyp=['SURELY', 'WE', 'CAN', 'SUBMIT', 'WITH', 'GOOD', 'GRACE'] +61-70968-0022-2201: ref=['TIS', 'FINE', 'FOR', 'YOU', 'TO', 'TALK', 'OLD', 'MAN', 'ANSWERED', 'THE', 'LEAN', 'SULLEN', 'APPRENTICE'] +61-70968-0022-2201: hyp=['TIS', 'FINE', 'FOR', 'YOU', 'TO', 'TALK', 'OLD', 'MAN', 'ANSWERED', 'THE', 'LEAN', 'SULLEN', 'APPRENTICE'] +61-70968-0023-2202: ref=['BUT', 'I', 'WRESTLED', 'WITH', 'THIS', 'FELLOW', 'AND', 'DO', 'KNOW', 'THAT', 'HE', 'PLAYED', 'UNFAIRLY', 'IN', 'THE', 'SECOND', 'BOUT'] +61-70968-0023-2202: hyp=['BUT', 'I', 'WRESTLED', 'WITH', 'THIS', 'FELLOW', 'AND', 'DO', 'KNOW', 'THAT', 'HE', 'PLAYED', 'UNFAIRLY', 'IN', 'THE', 'SECOND', 'BOUT'] +61-70968-0024-2203: ref=['SPOKE', 'THE', 'SQUIRE', 'LOSING', 'ALL', 'PATIENCE', 'AND', 'IT', 'WAS', 'TO', 'YOU', 'THAT', 'I', 'GAVE', 'ANOTHER', 'PURSE', 'IN', 'CONSOLATION'] +61-70968-0024-2203: hyp=['SPOKE', 'THE', 'SQUIRE', 'LOSING', 'ALL', 'PATIENT', 'AND', 'IT', 'WAS', 'TO', 'YOU', 'THAT', 'I', 'GAVE', 'ANOTHER', 'PERSON', 'CONSOLATION'] +61-70968-0025-2204: ref=['COME', 'TO', 'ME', 'MEN', 'HERE', 'HERE', 'HE', 'RAISED', 'HIS', 'VOICE', 'STILL', 'LOUDER'] +61-70968-0025-2204: hyp=['COME', 'TO', 'ME', 'MEN', 'HERE', 'HERE', 'HE', 'RAISED', 'HIS', 'VOICE', 'STILL', 'LOUDER'] +61-70968-0026-2205: ref=['THE', 'STROLLERS', 'TOOK', 'THEIR', 'PART', 'IN', 'IT', 'WITH', 'HEARTY', 'ZEST', 'NOW', 'THAT', 'THEY', 'HAD', 'SOME', 'CHANCE', 'OF', 'BEATING', 'OFF', 'THEIR', 'FOES'] +61-70968-0026-2205: hyp=['THE', 'STROLLERS', 'TOOK', 'THEIR', 'PART', 'IN', 'IT', 'WITH', 'HEARTY', 'ZEST', 'NOW', 'THAT', 'THEY', 'HAD', 'SOME', 'CHANCE', 'OF', 'BEATING', 'OFF', 'THEIR', 'FOES'] +61-70968-0027-2206: ref=['ROBIN', 'AND', 'THE', 'LITTLE', 'TUMBLER', 'BETWEEN', 'THEM', 'TRIED', 'TO', 'FORCE', 'THE', 'SQUIRE', 'TO', 'STAND', 'BACK', 'AND', 'VERY', 'VALIANTLY', 'DID', 'THESE', 'TWO', 'COMPORT', 'THEMSELVES'] +61-70968-0027-2206: hyp=['ROBIN', 'AND', 'THE', 'LITTLE', 'TUMBLER', 'BETWEEN', 'THEM', 'TRIED', 'TO', 'FORCE', 'THE', 'SQUIRE', 'TO', 'STAND', 'BACK', 'AND', 'VERY', 'VALIANTLY', 'DID', 'THESE', 'TWO', 'COMPORT', 'THEMSELVES'] +61-70968-0028-2207: ref=['THE', 'HEAD', 'AND', 'CHIEF', 'OF', 'THE', 'RIOT', 'THE', 'NOTTINGHAM', 'APPRENTICE', 'WITH', 'CLENCHED', 'FISTS', 'THREATENED', 'MONTFICHET'] +61-70968-0028-2207: hyp=['THE', 'HEAD', 'AND', 'CHIEF', 'OF', 'THE', 'RIOT', 'THE', 'NOTTINGHAM', 'APPRENTICED', 'WITH', 'CLENCHED', 'FISTS', 'THREATENED', 'MONTFICHET'] +61-70968-0029-2208: ref=['THE', 'SQUIRE', 'HELPED', 'TO', 'THRUST', 'THEM', 'ALL', 'IN', 'AND', 'ENTERED', 'SWIFTLY', 'HIMSELF'] +61-70968-0029-2208: hyp=['THE', 'SQUIRE', 'HELPED', 'TO', 'THRUST', 'THEM', 'ALL', 'IN', 'AND', 'ENTERED', 'SWIFTLY', 'HIMSELF'] +61-70968-0030-2209: ref=['NOW', 'BE', 'SILENT', 'ON', 'YOUR', 'LIVES', 'HE', 'BEGAN', 'BUT', 'THE', 'CAPTURED', 'APPRENTICE', 'SET', 'UP', 'AN', 'INSTANT', 'SHOUT'] +61-70968-0030-2209: hyp=['NOW', 'BE', 'SILENT', 'ON', 'YOUR', 'LIVES', 'HE', 'BEGAN', 'BUT', 'THE', 'CAPTURED', 'APPRENTICE', 'SET', 'UP', 'AN', 'INSTANT', 'SHOUT'] +61-70968-0031-2210: ref=['SILENCE', 'YOU', 'KNAVE', 'CRIED', 'MONTFICHET'] +61-70968-0031-2210: hyp=['SILENCE', 'YOU', 'KNAVE', 'CRIED', 'MONTFICHET'] +61-70968-0032-2211: ref=['HE', 'FELT', 'FOR', 'AND', 'FOUND', 'THE', "WIZARD'S", 'BLACK', 'CLOTH', 'THE', 'SQUIRE', 'WAS', 'QUITE', 'OUT', 'OF', 'BREATH'] +61-70968-0032-2211: hyp=['HE', 'FELT', 'FOR', 'AND', 'FOUND', 'THE', "WIZARD'S", 'BLACK', 'CLOTH', 'THE', 'SQUIRE', 'WAS', 'QUITE', 'OUT', 'OF', 'BREATH'] +61-70968-0033-2212: ref=['THRUSTING', 'OPEN', 'THE', 'PROPER', 'ENTRANCE', 'OF', 'THE', 'TENT', 'ROBIN', 'SUDDENLY', 'RUSHED', 'FORTH', 'WITH', 'HIS', 'BURDEN', 'WITH', 'A', 'GREAT', 'SHOUT'] +61-70968-0033-2212: hyp=['THRUSTING', 'OPEN', 'THE', 'PROPER', 'ENTRANCE', 'OF', 'THE', 'TENT', 'ROBIN', 'SUDDENLY', 'RUSHED', 'FORTH', 'WITH', 'HIS', 'BURDEN', 'WITH', 'A', 'GREAT', 'SHOUT'] +61-70968-0034-2213: ref=['A', 'MONTFICHET', 'A', 'MONTFICHET', 'GAMEWELL', 'TO', 'THE', 'RESCUE'] +61-70968-0034-2213: hyp=['A', 'MONTFICHET', 'A', 'MONTFICHET', 'GAMEWELL', 'TO', 'THE', 'RESCUE'] +61-70968-0035-2214: ref=['TAKING', 'ADVANTAGE', 'OF', 'THIS', 'THE', "SQUIRE'S", 'FEW', 'MEN', 'REDOUBLED', 'THEIR', 'EFFORTS', 'AND', 'ENCOURAGED', 'BY', "ROBIN'S", 'AND', 'THE', 'LITTLE', "STROLLER'S", 'CRIES', 'FOUGHT', 'THEIR', 'WAY', 'TO', 'HIM'] +61-70968-0035-2214: hyp=['TAKING', 'ADVANTAGE', 'OF', 'THIS', 'THE', "SQUIRE'S", 'FEW', 'MEN', 'REDOUBLED', 'THEIR', 'EFFORTS', 'AND', 'ENCOURAGED', 'BY', 'ROBINS', 'AND', 'THE', 'LITTLE', "STROLLER'S", 'CRIES', 'FOUGHT', 'THEIR', 'WAY', 'TO', 'HIM'] +61-70968-0036-2215: ref=['GEORGE', 'MONTFICHET', 'WILL', 'NEVER', 'FORGET', 'THIS', 'DAY'] +61-70968-0036-2215: hyp=['GEORGE', 'MONTFICHET', 'WILL', 'NEVER', 'FORGET', 'THIS', 'DAY'] +61-70968-0037-2216: ref=['WHAT', 'IS', 'YOUR', 'NAME', 'LORDING', 'ASKED', 'THE', 'LITTLE', 'STROLLER', 'PRESENTLY'] +61-70968-0037-2216: hyp=['WHAT', 'IS', 'YOUR', 'NAME', 'LORDING', 'ASKED', 'THE', 'LITTLE', 'STROLLER', 'PRESENTLY'] +61-70968-0038-2217: ref=['ROBIN', 'FITZOOTH'] +61-70968-0038-2217: hyp=['ROBIN', 'FITZOOTH'] +61-70968-0039-2218: ref=['AND', 'MINE', 'IS', 'WILL', 'STUTELEY', 'SHALL', 'WE', 'BE', 'COMRADES'] +61-70968-0039-2218: hyp=['AND', 'MINE', 'IS', 'WILL', 'STUTELEY', 'SHALL', 'WE', 'BE', 'COMRADES'] +61-70968-0040-2219: ref=['RIGHT', 'WILLINGLY', 'FOR', 'BETWEEN', 'US', 'WE', 'HAVE', 'WON', 'THE', 'BATTLE', 'ANSWERED', 'ROBIN'] +61-70968-0040-2219: hyp=['RIGHT', 'WILLINGLY', 'FOR', 'BETWEEN', 'US', 'WE', 'HAVE', 'WON', 'THE', 'BATTLE', 'ANSWERED', 'ROBIN'] +61-70968-0041-2220: ref=['I', 'LIKE', 'YOU', 'WILL', 'YOU', 'ARE', 'THE', 'SECOND', 'WILL', 'THAT', 'I', 'HAVE', 'MET', 'AND', 'LIKED', 'WITHIN', 'TWO', 'DAYS', 'IS', 'THERE', 'A', 'SIGN', 'IN', 'THAT'] +61-70968-0041-2220: hyp=['I', 'LIKE', 'YOU', 'WILL', 'YOU', 'ARE', 'THE', 'SECOND', 'WILL', 'THAT', 'I', 'HAVE', 'MET', 'AND', 'LIKED', 'WITHIN', 'TWO', 'DAYS', 'IS', 'THERE', 'A', 'SIGN', 'IN', 'THAT'] +61-70968-0042-2221: ref=['MONTFICHET', 'CALLED', 'OUT', 'FOR', 'ROBIN', 'TO', 'GIVE', 'HIM', 'AN', 'ARM'] +61-70968-0042-2221: hyp=['MARTFICHET', 'CALLED', 'OUT', 'FOR', 'ROBIN', 'TO', 'GIVE', 'HIM', 'AN', 'ARM'] +61-70968-0043-2222: ref=['FRIENDS', 'SAID', 'MONTFICHET', 'FAINTLY', 'TO', 'THE', 'WRESTLERS', 'BEAR', 'US', 'ESCORT', 'SO', 'FAR', 'AS', 'THE', "SHERIFF'S", 'HOUSE'] +61-70968-0043-2222: hyp=['FRIENDS', 'SAID', 'MONTFICHET', 'FAINTLY', 'TO', 'THE', 'WRESTLERS', 'BEAR', 'US', 'ESCORT', 'SO', 'FAR', 'AS', 'THE', "SHERIFF'S", 'HOUSE'] +61-70968-0044-2223: ref=['IT', 'WILL', 'NOT', 'BE', 'SAFE', 'FOR', 'YOU', 'TO', 'STAY', 'HERE', 'NOW'] +61-70968-0044-2223: hyp=['IT', 'WILL', 'NOT', 'BE', 'SAFE', 'FOR', 'YOU', 'TO', 'STAY', 'HERE', 'NOW'] +61-70968-0045-2224: ref=['PRAY', 'FOLLOW', 'US', 'WITH', 'MINE', 'AND', 'MY', 'LORD', "SHERIFF'S", 'MEN'] +61-70968-0045-2224: hyp=['PRAY', 'FOLLOW', 'US', 'WITH', 'MINE', 'AND', 'MY', 'LORD', "SHERIFF'S", 'MEN'] +61-70968-0046-2225: ref=['NOTTINGHAM', 'CASTLE', 'WAS', 'REACHED', 'AND', 'ADMITTANCE', 'WAS', 'DEMANDED'] +61-70968-0046-2225: hyp=['NODDING', 'HIM', 'CASTLE', 'WAS', 'REACHED', 'AND', 'ADMITTANCE', 'WAS', 'DEMANDED'] +61-70968-0047-2226: ref=['MASTER', 'MONCEUX', 'THE', 'SHERIFF', 'OF', 'NOTTINGHAM', 'WAS', 'MIGHTILY', 'PUT', 'ABOUT', 'WHEN', 'TOLD', 'OF', 'THE', 'RIOTING'] +61-70968-0047-2226: hyp=['MASTER', 'MONCEUX', 'THE', 'SHERIFF', 'OF', 'NOTTINGHAM', 'WAS', 'MIGHTILY', 'PUT', 'ABOUT', 'WHEN', 'TOLD', 'OF', 'THE', 'RIOTING'] +61-70968-0048-2227: ref=['AND', 'HENRY', 'MIGHT', 'RETURN', 'TO', 'ENGLAND', 'AT', 'ANY', 'MOMENT'] +61-70968-0048-2227: hyp=['AND', 'HENRY', 'MIGHT', 'RETURN', 'TO', 'ENGLAND', 'AT', 'ANY', 'MOMENT'] +61-70968-0049-2228: ref=['HAVE', 'YOUR', 'WILL', 'CHILD', 'IF', 'THE', 'BOY', 'ALSO', 'WILLS', 'IT', 'MONTFICHET', 'ANSWERED', 'FEELING', 'TOO', 'ILL', 'TO', 'OPPOSE', 'ANYTHING', 'VERY', 'STRONGLY', 'JUST', 'THEN'] +61-70968-0049-2228: hyp=['HAVE', 'YOUR', 'WILL', 'CHILD', 'IF', 'THE', 'BOY', 'ALSO', 'WILDS', 'IT', 'MONTFICHET', 'ANSWERED', 'FEELING', 'TOO', 'ILL', 'TO', 'OPPOSE', 'ANYTHING', 'VERY', 'STRONGLY', 'JUST', 'THEN'] +61-70968-0050-2229: ref=['HE', 'MADE', 'AN', 'EFFORT', 'TO', 'HIDE', 'HIS', 'CONDITION', 'FROM', 'THEM', 'ALL', 'AND', 'ROBIN', 'FELT', 'HIS', 'FINGERS', 'TIGHTEN', 'UPON', 'HIS', 'ARM'] +61-70968-0050-2229: hyp=['HE', 'MADE', 'AN', 'EFFORT', 'TO', 'HIDE', 'HIS', 'CONDITION', 'FROM', 'THEM', 'ALL', 'AND', 'ROBIN', 'FELT', 'HIS', 'FINGERS', 'TIGHTEN', 'UPON', 'HIS', 'ARM'] +61-70968-0051-2230: ref=['BEG', 'ME', 'A', 'ROOM', 'OF', 'THE', 'SHERIFF', 'CHILD', 'QUICKLY'] +61-70968-0051-2230: hyp=['BEGGED', 'ME', 'A', 'ROOM', 'OF', 'THE', 'SHERIFF', 'CHILD', 'QUICKLY'] +61-70968-0052-2231: ref=['BUT', 'WHO', 'IS', 'THIS', 'FELLOW', 'PLUCKING', 'AT', 'YOUR', 'SLEEVE'] +61-70968-0052-2231: hyp=['BUT', 'WHO', 'IS', 'THIS', 'FELLOW', 'PLUCKING', 'IT', 'OR', 'STEVE'] +61-70968-0053-2232: ref=['HE', 'IS', 'MY', 'ESQUIRE', 'EXCELLENCY', 'RETURNED', 'ROBIN', 'WITH', 'DIGNITY'] +61-70968-0053-2232: hyp=['HE', 'IS', 'MY', 'ESQUIRE', 'EXCELLENCY', 'RETURNED', 'ROBIN', 'WITH', 'DIGNITY'] +61-70968-0054-2233: ref=['MISTRESS', 'FITZOOTH', 'HAD', 'BEEN', 'CARRIED', 'OFF', 'BY', 'THE', "SHERIFF'S", 'DAUGHTER', 'AND', 'HER', 'MAIDS', 'AS', 'SOON', 'AS', 'THEY', 'HAD', 'ENTERED', 'THE', 'HOUSE', 'SO', 'THAT', 'ROBIN', 'ALONE', 'HAD', 'THE', 'CARE', 'OF', 'MONTFICHET'] +61-70968-0054-2233: hyp=['MISTRESS', 'FITZOOTH', 'HAD', 'BEEN', 'CARRIED', 'OFF', 'BY', 'THE', "SHERIFF'S", 'DAUGHTER', 'AND', 'HER', 'MAIDS', 'AS', 'SOON', 'AS', 'THEY', 'HAD', 'ENTERED', 'THE', 'HOUSE', 'SO', 'THAT', 'ROBIN', 'ALONE', 'HAD', 'THE', 'CARE', 'OF', 'MONT', 'VICHET'] +61-70968-0055-2234: ref=['ROBIN', 'WAS', 'GLAD', 'WHEN', 'AT', 'LENGTH', 'THEY', 'WERE', 'LEFT', 'TO', 'THEIR', 'OWN', 'DEVICES'] +61-70968-0055-2234: hyp=['ROBIN', 'WAS', 'GLAD', 'WHEN', 'AT', 'LENGTH', 'THEY', 'WERE', 'LEFT', 'TO', 'THEIR', 'OWN', 'DEVICES'] +61-70968-0056-2235: ref=['THE', 'WINE', 'DID', 'CERTAINLY', 'BRING', 'BACK', 'THE', 'COLOR', 'TO', 'THE', "SQUIRE'S", 'CHEEKS'] +61-70968-0056-2235: hyp=['THE', 'WINE', 'DID', 'CERTAINLY', 'BRING', 'BACK', 'THE', 'COLOR', 'TO', 'THE', "SQUIRE'S", 'CHEEKS'] +61-70968-0057-2236: ref=['THESE', 'ESCAPADES', 'ARE', 'NOT', 'FOR', 'OLD', 'GAMEWELL', 'LAD', 'HIS', 'DAY', 'HAS', 'COME', 'TO', 'TWILIGHT'] +61-70968-0057-2236: hyp=['THESE', 'ESCAPADES', 'ARE', 'NOT', 'FOR', 'OLD', 'GAMEWELL', 'LAD', 'HIS', 'DAY', 'HAS', 'COME', 'TO', 'TWILIGHT'] +61-70968-0058-2237: ref=['WILL', 'YOU', 'FORGIVE', 'ME', 'NOW'] +61-70968-0058-2237: hyp=['WILL', 'YOU', 'FORGIVE', 'ME', 'NOW'] +61-70968-0059-2238: ref=['IT', 'WILL', 'BE', 'NO', 'DISAPPOINTMENT', 'TO', 'ME'] +61-70968-0059-2238: hyp=['IT', 'WILL', 'BE', 'NO', 'DISAPPOINTMENT', 'TO', 'ME'] +61-70968-0060-2239: ref=['NO', 'THANKS', 'I', 'AM', 'GLAD', 'TO', 'GIVE', 'YOU', 'SUCH', 'EASY', 'HAPPINESS'] +61-70968-0060-2239: hyp=['NO', 'THANKS', 'I', 'AM', 'GLAD', 'TO', 'GIVE', 'YOU', 'SUCH', 'EASY', 'HAPPINESS'] +61-70968-0061-2240: ref=['YOU', 'ARE', 'A', 'WORTHY', 'LEECH', 'WILL', 'PRESENTLY', 'WHISPERED', 'ROBIN', 'THE', 'WINE', 'HAS', 'WORKED', 'A', 'MARVEL'] +61-70968-0061-2240: hyp=['YOU', 'ARE', 'A', 'WORTHY', 'LEECH', 'WILL', 'PRESENTLY', 'WHISPERED', 'ROBIN', 'THE', 'WINE', 'HAS', 'WORKED', 'A', 'MARVEL'] +61-70968-0062-2241: ref=['AY', 'AND', 'SHOW', 'YOU', 'SOME', 'PRETTY', 'TRICKS'] +61-70968-0062-2241: hyp=['I', 'AND', 'SHOW', 'YOU', 'SOME', 'PRETTY', 'TRICKS'] +61-70970-0000-2242: ref=['YOUNG', 'FITZOOTH', 'HAD', 'BEEN', 'COMMANDED', 'TO', 'HIS', "MOTHER'S", 'CHAMBER', 'SO', 'SOON', 'AS', 'HE', 'HAD', 'COME', 'OUT', 'FROM', 'HIS', 'CONVERSE', 'WITH', 'THE', 'SQUIRE'] +61-70970-0000-2242: hyp=['YOUNG', 'FITZOOTH', 'HAD', 'BEEN', 'COMMANDED', 'TO', 'HIS', "MOTHER'S", 'CHAMBER', 'SO', 'SOON', 'AS', 'HE', 'HAD', 'COME', 'OUT', 'FROM', 'HIS', 'CONVERSE', 'WITH', 'THE', 'SQUIRE'] +61-70970-0001-2243: ref=['THERE', 'BEFELL', 'AN', 'ANXIOUS', 'INTERVIEW', 'MISTRESS', 'FITZOOTH', 'ARGUING', 'FOR', 'AND', 'AGAINST', 'THE', "SQUIRE'S", 'PROJECT', 'IN', 'A', 'BREATH'] +61-70970-0001-2243: hyp=['THERE', 'BEFEL', 'AN', 'ANXIOUS', 'INTERVIEW', 'MISTRESS', 'FITZOOTH', 'ARGUING', 'FOUR', 'AND', 'AGAINST', 'THE', "SQUIRE'S", 'PROJECT', 'IN', 'A', 'BREATH'] +61-70970-0002-2244: ref=['MOST', 'OF', 'ALL', 'ROBIN', 'THOUGHT', 'OF', 'HIS', 'FATHER', 'WHAT', 'WOULD', 'HE', 'COUNSEL'] +61-70970-0002-2244: hyp=['MOST', 'OF', 'ALL', 'ROBIN', 'THOUGHT', 'OF', 'HIS', 'FATHER', 'WHAT', 'WOULD', 'HE', 'COUNSEL'] +61-70970-0003-2245: ref=['IF', 'FOR', 'A', 'WHIM', 'YOU', 'BEGGAR', 'YOURSELF', 'I', 'CANNOT', 'STAY', 'YOU'] +61-70970-0003-2245: hyp=['IF', 'FOR', 'A', 'WHIM', 'YOU', 'BEGGAR', 'YOURSELF', 'I', 'CANNOT', 'STAY', 'YOU'] +61-70970-0004-2246: ref=['BUT', 'TAKE', 'IT', 'WHILST', 'I', 'LIVE', 'AND', 'WEAR', "MONTFICHET'S", 'SHIELD', 'IN', 'THE', 'DAYS', 'WHEN', 'MY', 'EYES', 'CAN', 'BE', 'REJOICED', 'BY', 'SO', 'BRAVE', 'A', 'SIGHT', 'FOR', 'YOU', 'WILL', "NE'ER", 'DISGRACE', 'OUR', 'SCUTCHEON', 'I', 'WARRANT', 'ME'] +61-70970-0004-2246: hyp=['BUT', 'TAKE', 'IT', 'WHILST', 'I', 'LIVE', 'AND', 'WHERE', "MONTFICHET'S", 'SHIELD', 'IN', 'THE', 'DAYS', 'WHEN', 'MY', 'EYES', 'CAN', 'BE', 'REJOICED', 'BY', 'SO', 'BRAVE', 'A', 'SIGHT', 'FOR', 'YOU', 'WILL', 'NEVER', 'DISGRACE', 'OUR', 'DUCHEN', 'I', 'WARRANT', 'ME'] +61-70970-0005-2247: ref=['THE', 'LAD', 'HAD', 'CHECKED', 'HIM', 'THEN'] +61-70970-0005-2247: hyp=['THE', 'LAD', 'HAD', 'CHECKED', 'HIM', 'THEN'] +61-70970-0006-2248: ref=['NEVER', 'THAT', 'SIR', 'HE', 'HAD', 'SAID'] +61-70970-0006-2248: hyp=['NEVER', 'THAT', 'SIR', 'HE', 'HAD', 'SAID'] +61-70970-0007-2249: ref=['HE', 'WAS', 'IN', 'DEEP', 'CONVERSE', 'WITH', 'THE', 'CLERK', 'AND', 'ENTERED', 'THE', 'HALL', 'HOLDING', 'HIM', 'BY', 'THE', 'ARM'] +61-70970-0007-2249: hyp=['HE', 'WAS', 'IN', 'DEEP', 'CONVERSE', 'WITH', 'THE', 'CLERK', 'AND', 'ENTERED', 'THE', 'HALL', 'HOLDING', 'HIM', 'BY', 'THE', 'ARM'] +61-70970-0008-2250: ref=['NOW', 'TO', 'BED', 'BOY'] +61-70970-0008-2250: hyp=['NOW', 'TO', 'BED', 'BOY'] +61-70970-0009-2251: ref=['TIS', 'LATE', 'AND', 'I', 'GO', 'MYSELF', 'WITHIN', 'A', 'SHORT', 'SPACE'] +61-70970-0009-2251: hyp=['TIS', 'LATE', 'AND', 'I', 'GO', 'MYSELF', 'WITHIN', 'A', 'SHORT', 'SPACE'] +61-70970-0010-2252: ref=['DISMISS', 'YOUR', 'SQUIRE', 'ROBIN', 'AND', 'BID', 'ME', 'GOOD', 'E', 'E', 'N'] +61-70970-0010-2252: hyp=['DISMISS', 'YOUR', 'SQUIRE', 'ROBIN', 'AND', 'BID', 'ME', 'GOOD', 'EVEN'] +61-70970-0011-2253: ref=['AS', 'ANY', 'IN', 'ENGLAND', 'I', 'WOULD', 'SAY', 'SAID', 'GAMEWELL', 'PROUDLY', 'THAT', 'IS', 'IN', 'HIS', 'DAY'] +61-70970-0011-2253: hyp=['AS', 'ANY', 'IN', 'ENGLAND', 'I', 'WOULD', 'SAY', 'SAID', 'GAMEWELL', 'PROUDLY', 'THAT', 'IS', 'IN', 'HIS', 'DAY'] +61-70970-0012-2254: ref=['YET', 'HE', 'WILL', 'TEACH', 'YOU', 'A', 'FEW', 'TRICKS', 'WHEN', 'MORNING', 'IS', 'COME'] +61-70970-0012-2254: hyp=['YET', 'HE', 'WILL', 'TEACH', 'YOU', 'A', 'FEW', 'TRICKS', 'WHEN', 'MORNING', 'IS', 'COME'] +61-70970-0013-2255: ref=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'ALTER', 'HIS', 'SLEEPING', 'ROOM', 'TO', 'ONE', 'NEARER', 'TO', "GAMEWELL'S", 'CHAMBER'] +61-70970-0013-2255: hyp=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'ALTER', 'HIS', 'SLEEPING', 'ROOM', 'TO', 'ONE', 'NEARER', 'TO', "GAMEWELL'S", 'CHAMBER'] +61-70970-0014-2256: ref=['PRESENTLY', 'HE', 'CROSSED', 'THE', 'FLOOR', 'OF', 'HIS', 'ROOM', 'WITH', 'DECIDED', 'STEP'] +61-70970-0014-2256: hyp=['PRESENTLY', 'HE', 'CROSSED', 'THE', 'FLOOR', 'OF', 'HIS', 'ROOM', 'WITH', 'DECIDED', 'STEP'] +61-70970-0015-2257: ref=['WILL', 'CRIED', 'HE', 'SOFTLY', 'AND', 'STUTELEY', 'WHO', 'HAD', 'CHOSEN', 'HIS', 'COUCH', 'ACROSS', 'THE', 'DOOR', 'OF', 'HIS', 'YOUNG', "MASTER'S", 'CHAMBER', 'SPRANG', 'UP', 'AT', 'ONCE', 'IN', 'ANSWER'] +61-70970-0015-2257: hyp=['WILL', 'CRIED', 'HE', 'SOFTLY', 'AND', 'STUTELEY', 'WHO', 'HAD', 'CHOSEN', 'HIS', 'COUCH', 'ACROSS', 'THE', 'DOOR', 'OF', 'HIS', 'YOUNG', "MASTER'S", 'CHAMBER', 'SPRANG', 'UP', 'AT', 'ONCE', 'IN', 'ANSWER'] +61-70970-0016-2258: ref=['WE', 'WILL', 'GO', 'OUT', 'TOGETHER', 'TO', 'THE', 'BOWER', 'THERE', 'IS', 'A', 'WAY', 'DOWN', 'TO', 'THE', 'COURT', 'FROM', 'MY', 'WINDOW'] +61-70970-0016-2258: hyp=['WE', 'WILL', 'GO', 'OUT', 'TOGETHER', 'TO', 'THE', 'BOWER', 'THERE', 'IS', 'A', 'WAY', 'DOWN', 'TO', 'THE', 'COURT', 'FROM', 'MY', 'WINDOW'] +61-70970-0017-2259: ref=['REST', 'AND', 'BE', 'STILL', 'UNTIL', 'I', 'WARN', 'YOU'] +61-70970-0017-2259: hyp=['REST', 'AND', 'BE', 'STILL', 'UNTIL', 'I', 'WARN', 'YOU'] +61-70970-0018-2260: ref=['THE', 'HOURS', 'PASSED', 'WEARILY', 'BY', 'AND', 'MOVEMENT', 'COULD', 'YET', 'BE', 'HEARD', 'ABOUT', 'THE', 'HALL'] +61-70970-0018-2260: hyp=['THE', 'HOURS', 'PASSED', 'WEARILY', 'BY', 'AND', 'MOVEMENT', 'COULD', 'YET', 'BE', 'HEARD', 'ABOUT', 'THE', 'HALL'] +61-70970-0019-2261: ref=['AT', 'LAST', 'ALL', 'WAS', 'QUIET', 'AND', 'BLACK', 'IN', 'THE', 'COURTYARD', 'OF', 'GAMEWELL'] +61-70970-0019-2261: hyp=['AT', 'LAST', 'ALL', 'WAS', 'QUIET', 'AND', 'BLACK', 'IN', 'THE', 'COURTYARD', 'OF', 'GAMEWELL'] +61-70970-0020-2262: ref=['WILL', 'WHISPERED', 'ROBIN', 'OPENING', 'HIS', 'DOOR', 'AS', 'HE', 'SPOKE', 'ARE', 'YOU', 'READY'] +61-70970-0020-2262: hyp=['WILL', 'WHISPERED', 'ROBIN', 'OPENING', 'HIS', 'DOOR', 'AS', 'HE', 'SPOKE', 'ARE', 'YOU', 'READY'] +61-70970-0021-2263: ref=['THEY', 'THEN', 'RENEWED', 'THEIR', 'JOURNEY', 'AND', 'UNDER', 'THE', 'BETTER', 'LIGHT', 'MADE', 'A', 'SAFE', 'CROSSING', 'OF', 'THE', 'STABLE', 'ROOFS'] +61-70970-0021-2263: hyp=['THEY', 'THEN', 'RENEWED', 'THEIR', 'JOURNEY', 'AND', 'UNDER', 'THE', 'BETTER', 'LIGHT', 'MADE', 'A', 'SAFE', 'CROSSING', 'OF', 'THE', 'STABLE', 'ROOFS'] +61-70970-0022-2264: ref=['ROBIN', 'ENTERED', 'THE', 'HUT', 'DRAGGING', 'THE', 'UNWILLING', 'ESQUIRE', 'AFTER', 'HIM'] +61-70970-0022-2264: hyp=['ROBIN', 'ENTERED', 'THE', 'HUT', 'DRAGGING', 'THE', 'UNWILLING', 'ESQUIRE', 'AFTER', 'HIM'] +61-70970-0023-2265: ref=['BE', 'NOT', 'SO', 'FOOLISH', 'FRIEND', 'SAID', 'FITZOOTH', 'CROSSLY'] +61-70970-0023-2265: hyp=['BE', 'NOT', 'SO', 'FOOLISH', 'FRIEND', 'SAID', 'FITZOOTH', 'CROSSLY'] +61-70970-0024-2266: ref=['THEY', 'MOVED', 'THEREAFTER', 'CAUTIOUSLY', 'ABOUT', 'THE', 'HUT', 'GROPING', 'BEFORE', 'AND', 'ABOUT', 'THEM', 'TO', 'FIND', 'SOMETHING', 'TO', 'SHOW', 'THAT', 'WARRENTON', 'HAD', 'FULFILLED', 'HIS', 'MISSION'] +61-70970-0024-2266: hyp=['THEY', 'MOVED', 'THEREAFTER', 'CAUTIOUSLY', 'ABOUT', 'THE', 'HUT', 'GROPING', 'BEFORE', 'AND', 'ABOUT', 'THEM', 'TO', 'FIND', 'SOMETHING', 'TO', 'SHOW', 'THAT', 'THE', 'WARRENTON', 'HAD', 'FULFILLED', 'HIS', 'MISSION'] +61-70970-0025-2267: ref=['THEY', 'WERE', 'UPON', 'THE', 'VERGE', 'OF', 'AN', 'OPEN', 'TRAP', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'HUT', 'AND', 'STUTELEY', 'HAD', 'TRIPPED', 'OVER', 'THE', 'EDGE', 'OF', 'THE', 'REVERSED', 'FLAP', 'MOUTH', 'OF', 'THIS', 'PIT'] +61-70970-0025-2267: hyp=['THEY', 'WERE', 'UPON', 'THE', 'VERGE', 'OF', 'AN', 'OPEN', 'TRAP', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'HUT', 'AND', 'STUTELEY', 'HAD', 'TRIPPED', 'OVER', 'THE', 'EDGE', 'OF', 'THE', 'REVERSED', 'FLAP', 'MOUTH', 'OF', 'THIS', 'PIT'] +61-70970-0026-2268: ref=["FITZOOTH'S", 'HAND', 'RESTED', 'AT', 'LAST', 'UPON', 'THE', 'TOP', 'RUNG', 'OF', 'A', 'LADDER', 'AND', 'SLOWLY', 'THE', 'TRUTH', 'CAME', 'TO', 'HIM'] +61-70970-0026-2268: hyp=["FITUTH'S", 'HAND', 'RESTED', 'AT', 'LAST', 'UPON', 'THE', 'TOPRUNG', 'OF', 'A', 'LADDER', 'AND', 'SLOWLY', 'THE', 'TRUTH', 'CAME', 'TO', 'HIM'] +61-70970-0027-2269: ref=['ROBIN', 'CAREFULLY', 'DESCENDED', 'THE', 'LADDER', 'AND', 'FOUND', 'HIMSELF', 'SOON', 'UPON', 'FIRM', 'ROCKY', 'GROUND'] +61-70970-0027-2269: hyp=['ROBIN', 'CAREFULLY', 'DESCENDED', 'THE', 'LADDER', 'AND', 'FOUND', 'HIMSELF', 'SOON', 'UPON', 'FIRM', 'ROCKY', 'GROUND'] +61-70970-0028-2270: ref=['STUTELEY', 'WAS', 'BY', 'HIS', 'SIDE', 'IN', 'A', 'FLASH', 'AND', 'THEN', 'THEY', 'BOTH', 'BEGAN', 'FEELING', 'ABOUT', 'THEM', 'TO', 'ASCERTAIN', 'THE', 'SHAPE', 'AND', 'CHARACTER', 'OF', 'THIS', 'VAULT'] +61-70970-0028-2270: hyp=['STUTELEY', 'WAS', 'BY', 'HIS', 'SIDE', 'IN', 'A', 'FLASH', 'AND', 'THEN', 'THEY', 'BOTH', 'BEGAN', 'FEELING', 'ABOUT', 'THEM', 'TO', 'ASCERTAIN', 'THE', 'SHAPE', 'AND', 'CHARACTER', 'OF', 'THIS', 'VAULT'] +61-70970-0029-2271: ref=['FROM', 'THE', 'BLACKNESS', 'BEHIND', 'THE', 'LIGHT', 'THEY', 'HEARD', 'A', 'VOICE', "WARRENTON'S"] +61-70970-0029-2271: hyp=['FROM', 'THE', 'BLACKNESS', 'BEHIND', 'THE', 'LIGHT', 'THEY', 'HEARD', 'A', 'VOICE', 'WARRENTONS'] +61-70970-0030-2272: ref=['SAVE', 'ME', 'MASTERS', 'BUT', 'YOU', 'STARTLED', 'ME', 'RARELY'] +61-70970-0030-2272: hyp=['SAVE', 'ME', 'MASTERS', 'BUT', 'YOU', 'STARTLED', 'ME', 'RARELY'] +61-70970-0031-2273: ref=['CRIED', 'HE', 'WAVING', 'THE', 'LANTHORN', 'BEFORE', 'HIM', 'TO', 'MAKE', 'SURE', 'THAT', 'THESE', 'WERE', 'NO', 'GHOSTS', 'IN', 'FRONT', 'OF', 'HIM'] +61-70970-0031-2273: hyp=['CRIED', 'HE', 'WAVING', 'THE', 'LANTERN', 'BEFORE', 'HIM', 'TO', 'MAKE', 'SURE', 'THAT', 'THESE', 'WERE', 'NO', 'GHOSTS', 'IN', 'FRONT', 'OF', 'HIM'] +61-70970-0032-2274: ref=['ENQUIRED', 'ROBIN', 'WITH', 'HIS', 'SUSPICIONS', 'STILL', 'UPON', 'HIM'] +61-70970-0032-2274: hyp=['INQUIRED', 'ROBIN', 'WITH', 'HIS', 'SUSPICION', 'STILL', 'UPON', 'HIM'] +61-70970-0033-2275: ref=['TRULY', 'SUCH', 'A', 'HORSE', 'SHOULD', 'BE', 'WORTH', 'MUCH', 'IN', 'NOTTINGHAM', 'FAIR'] +61-70970-0033-2275: hyp=['TRULY', 'SUCH', 'A', 'HORSE', 'WOULD', 'BE', 'WORTH', 'MUCH', 'IN', 'NOTTINGHAM', 'FAIR'] +61-70970-0034-2276: ref=['NAY', 'NAY', 'LORDING', 'ANSWERED', 'WARRENTON', 'WITH', 'A', 'HALF', 'LAUGH'] +61-70970-0034-2276: hyp=['NAY', 'NAY', 'LORDING', 'ANSWERED', 'WARRENTON', 'WITH', 'A', 'HALF', 'LAUGH'] +61-70970-0035-2277: ref=['WARRENTON', 'SPOKE', 'THUS', 'WITH', 'SIGNIFICANCE', 'TO', 'SHOW', 'ROBIN', 'THAT', 'HE', 'WAS', 'NOT', 'TO', 'THINK', "GEOFFREY'S", 'CLAIMS', 'TO', 'THE', 'ESTATE', 'WOULD', 'BE', 'PASSED', 'BY'] +61-70970-0035-2277: hyp=['WARRENTON', 'SPOKE', 'THUS', 'WITH', 'SIGNIFICANCE', 'TO', 'SHOW', 'ROBIN', 'THAT', 'HE', 'WAS', 'NOT', 'TO', 'THINK', "JEFFREY'S", 'CLAIMS', 'TO', 'THE', 'ESTATE', 'WOULD', 'BE', 'PASSED', 'BY'] +61-70970-0036-2278: ref=['ROBIN', 'FITZOOTH', 'SAW', 'THAT', 'HIS', 'DOUBTS', 'OF', 'WARRENTON', 'HAD', 'BEEN', 'UNFAIR', 'AND', 'HE', 'BECAME', 'ASHAMED', 'OF', 'HIMSELF', 'FOR', 'HARBORING', 'THEM'] +61-70970-0036-2278: hyp=['ROBIN', 'FITZOOTH', 'SAW', 'THAT', 'HIS', 'DOUBTS', 'OF', 'WARRENTON', 'HAD', 'BEEN', 'UNFAIR', 'AND', 'HE', 'BECAME', 'ASHAMED', 'OF', 'HIMSELF', 'FOR', 'HARBOURING', 'THEM'] +61-70970-0037-2279: ref=['HIS', 'TONES', 'RANG', 'PLEASANTLY', 'ON', "WARRENTON'S", 'EARS', 'AND', 'FORTHWITH', 'A', 'GOOD', 'FELLOWSHIP', 'WAS', 'HERALDED', 'BETWEEN', 'THEM'] +61-70970-0037-2279: hyp=['HIS', 'TONES', 'RANG', 'PLEASANTLY', "UNWARRANTON'S", 'EARS', 'AND', 'FORTHWITH', 'THE', 'GOOD', 'FELLOWSHIP', 'WAS', 'HERALDED', 'BETWEEN', 'THEM'] +61-70970-0038-2280: ref=['THE', 'OLD', 'SERVANT', 'TOLD', 'HIM', 'QUIETLY', 'AS', 'THEY', 'CREPT', 'BACK', 'TO', 'GAMEWELL', 'THAT', 'THIS', 'PASSAGE', 'WAY', 'LED', 'FROM', 'THE', 'HUT', 'IN', 'THE', 'PLEASANCE', 'TO', 'SHERWOOD', 'AND', 'THAT', 'GEOFFREY', 'FOR', 'THE', 'TIME', 'WAS', 'HIDING', 'WITH', 'THE', 'OUTLAWS', 'IN', 'THE', 'FOREST'] +61-70970-0038-2280: hyp=['THE', 'OLD', 'SERVANT', 'TOLD', 'HIM', 'QUIETLY', 'AS', 'THEY', 'CREPT', 'BACK', 'TO', 'GAMEWELL', 'THAT', 'THIS', 'PASSAGEWAY', 'LED', 'FROM', 'THE', 'HUT', 'IN', 'THE', 'PLEASANTS', 'TO', 'SHERWOOD', 'AND', 'THAT', 'JEFFREY', 'FOR', 'THE', 'TIME', 'WAS', 'HIDING', 'WITH', 'THE', 'OUTLAWS', 'IN', 'THE', 'FOREST'] +61-70970-0039-2281: ref=['HE', 'IMPLORES', 'US', 'TO', 'BE', 'DISCREET', 'AS', 'THE', 'GRAVE', 'IN', 'THIS', 'MATTER', 'FOR', 'IN', 'SOOTH', 'HIS', 'LIFE', 'IS', 'IN', 'THE', 'HOLLOW', 'OF', 'OUR', 'HANDS'] +61-70970-0039-2281: hyp=['HE', 'IMPLORS', 'US', 'TO', 'BE', 'DISCREET', 'AS', 'THE', 'GRAVE', 'IN', 'THIS', 'MATTER', 'FOR', 'IN', 'SOOTH', 'HIS', 'LIFE', 'IS', 'IN', 'THE', 'HOLLOW', 'OF', 'OUR', 'HANDS'] +61-70970-0040-2282: ref=['THEY', 'REGAINED', 'THEIR', 'APARTMENT', 'APPARENTLY', 'WITHOUT', 'DISTURBING', 'THE', 'HOUSEHOLD', 'OF', 'GAMEWELL'] +61-70970-0040-2282: hyp=['THEY', 'REGAIN', 'THEIR', 'APARTMENT', 'APPARENTLY', 'WITHOUT', 'DISTURBING', 'THE', 'HOUSEHOLD', 'OF', 'GAINWELL'] +672-122797-0000-1529: ref=['OUT', 'IN', 'THE', 'WOODS', 'STOOD', 'A', 'NICE', 'LITTLE', 'FIR', 'TREE'] +672-122797-0000-1529: hyp=['OUT', 'IN', 'THE', 'WOOD', 'STOOD', 'A', 'NICE', 'LITTLE', 'FIR', 'TREE'] +672-122797-0001-1530: ref=['THE', 'PLACE', 'HE', 'HAD', 'WAS', 'A', 'VERY', 'GOOD', 'ONE', 'THE', 'SUN', 'SHONE', 'ON', 'HIM', 'AS', 'TO', 'FRESH', 'AIR', 'THERE', 'WAS', 'ENOUGH', 'OF', 'THAT', 'AND', 'ROUND', 'HIM', 'GREW', 'MANY', 'LARGE', 'SIZED', 'COMRADES', 'PINES', 'AS', 'WELL', 'AS', 'FIRS'] +672-122797-0001-1530: hyp=['THE', 'PLACE', 'HE', 'HAD', 'WAS', 'A', 'VERY', 'GOOD', 'ONE', 'THE', 'SUN', 'SHONE', 'ON', 'HIM', 'AS', 'TO', 'FRESH', 'AIR', 'THERE', 'WAS', 'ENOUGH', 'OF', 'THAT', 'AND', 'ROUND', 'HIM', 'GREW', 'MANY', 'LARGE', 'SIZED', 'COMRADES', 'PINES', 'AS', 'WELL', 'AS', 'FURS'] +672-122797-0002-1531: ref=['HE', 'DID', 'NOT', 'THINK', 'OF', 'THE', 'WARM', 'SUN', 'AND', 'OF', 'THE', 'FRESH', 'AIR', 'HE', 'DID', 'NOT', 'CARE', 'FOR', 'THE', 'LITTLE', 'COTTAGE', 'CHILDREN', 'THAT', 'RAN', 'ABOUT', 'AND', 'PRATTLED', 'WHEN', 'THEY', 'WERE', 'IN', 'THE', 'WOODS', 'LOOKING', 'FOR', 'WILD', 'STRAWBERRIES'] +672-122797-0002-1531: hyp=['HE', 'DID', 'NOT', 'THINK', 'OF', 'THE', 'WARM', 'SUN', 'AND', 'OF', 'THE', 'FRESH', 'AIR', 'HE', 'DID', 'NOT', 'CARE', 'FOR', 'THE', 'LITTLE', 'COTTAGE', 'CHILDREN', 'THAT', 'RAN', 'ABOUT', 'IN', 'PRATTLED', 'WHEN', 'THEY', 'WERE', 'IN', 'THE', 'WOODS', 'LOOKING', 'FOR', 'WILD', 'STRAWBERRIES'] +672-122797-0003-1532: ref=['BUT', 'THIS', 'WAS', 'WHAT', 'THE', 'TREE', 'COULD', 'NOT', 'BEAR', 'TO', 'HEAR'] +672-122797-0003-1532: hyp=['BUT', 'THIS', 'WAS', 'WHAT', 'THE', 'TREE', 'COULD', 'NOT', 'BEAR', 'TO', 'HEAR'] +672-122797-0004-1533: ref=['IN', 'WINTER', 'WHEN', 'THE', 'SNOW', 'LAY', 'GLITTERING', 'ON', 'THE', 'GROUND', 'A', 'HARE', 'WOULD', 'OFTEN', 'COME', 'LEAPING', 'ALONG', 'AND', 'JUMP', 'RIGHT', 'OVER', 'THE', 'LITTLE', 'TREE'] +672-122797-0004-1533: hyp=['IN', 'WINTER', 'WHEN', 'THE', 'SNOW', 'LAY', 'GLITTERING', 'ON', 'THE', 'GROUND', 'A', 'HARE', 'WOULD', 'OFTEN', 'COME', 'LEAPING', 'ALONG', 'AND', 'JUMP', 'RIGHT', 'OVER', 'THE', 'LITTLE', 'TREE'] +672-122797-0005-1534: ref=['OH', 'THAT', 'MADE', 'HIM', 'SO', 'ANGRY'] +672-122797-0005-1534: hyp=['OH', 'THAT', 'MADE', 'HIM', 'SO', 'ANGRY'] +672-122797-0006-1535: ref=['TO', 'GROW', 'AND', 'GROW', 'TO', 'GET', 'OLDER', 'AND', 'BE', 'TALL', 'THOUGHT', 'THE', 'TREE', 'THAT', 'AFTER', 'ALL', 'IS', 'THE', 'MOST', 'DELIGHTFUL', 'THING', 'IN', 'THE', 'WORLD'] +672-122797-0006-1535: hyp=['TO', 'GROW', 'AND', 'GROW', 'TO', 'GET', 'OLDER', 'AND', 'BE', 'TALL', 'THOUGHT', 'THE', 'TREE', 'THAT', 'AFTER', 'ALL', 'IS', 'THE', 'MOST', 'DELIGHTFUL', 'THING', 'IN', 'THE', 'WORLD'] +672-122797-0007-1536: ref=['IN', 'AUTUMN', 'THE', 'WOOD', 'CUTTERS', 'ALWAYS', 'CAME', 'AND', 'FELLED', 'SOME', 'OF', 'THE', 'LARGEST', 'TREES'] +672-122797-0007-1536: hyp=['IN', 'AUTUMN', 'THE', 'WOODCUTTERS', 'ALWAYS', 'CAME', 'AND', 'FELLED', 'SOME', 'OF', 'THE', 'LARGEST', 'TREES'] +672-122797-0008-1537: ref=['THIS', 'HAPPENED', 'EVERY', 'YEAR', 'AND', 'THE', 'YOUNG', 'FIR', 'TREE', 'THAT', 'HAD', 'NOW', 'GROWN', 'TO', 'A', 'VERY', 'COMELY', 'SIZE', 'TREMBLED', 'AT', 'THE', 'SIGHT', 'FOR', 'THE', 'MAGNIFICENT', 'GREAT', 'TREES', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'NOISE', 'AND', 'CRACKING', 'THE', 'BRANCHES', 'WERE', 'LOPPED', 'OFF', 'AND', 'THE', 'TREES', 'LOOKED', 'LONG', 'AND', 'BARE', 'THEY', 'WERE', 'HARDLY', 'TO', 'BE', 'RECOGNISED', 'AND', 'THEN', 'THEY', 'WERE', 'LAID', 'IN', 'CARTS', 'AND', 'THE', 'HORSES', 'DRAGGED', 'THEM', 'OUT', 'OF', 'THE', 'WOOD'] +672-122797-0008-1537: hyp=['THIS', 'HAPPENED', 'EVERY', 'YEAR', 'AND', 'THE', 'YOUNG', 'FIR', 'TREE', 'THAT', 'HAD', 'NOW', 'GROWN', 'TO', 'A', 'VERY', 'COMELY', 'SIZED', 'TREMBLED', 'AT', 'THE', 'SIGHT', 'FOR', 'THE', 'MAGNIFICENT', 'GREAT', 'TREES', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'NOISE', 'AND', 'CRACKING', 'THE', 'BRANCHES', 'WERE', 'LOPPED', 'OFF', 'AND', 'THE', 'TREES', 'LOOKED', 'LONG', 'AND', 'BARE', 'THEY', 'WERE', 'HARDLY', 'TO', 'BE', 'RECOGNIZED', 'AND', 'THEN', 'THEY', 'WERE', 'LAID', 'IN', 'CARTS', 'AND', 'THE', 'HORSES', 'DRAGGED', 'THEM', 'OUT', 'OF', 'THE', 'WOOD'] +672-122797-0009-1538: ref=['HAVE', 'YOU', 'NOT', 'MET', 'THEM', 'ANYWHERE'] +672-122797-0009-1538: hyp=['HAVE', 'YOU', 'NOT', 'MET', 'THE', 'MANYWHERE'] +672-122797-0010-1539: ref=['REJOICE', 'IN', 'THY', 'GROWTH', 'SAID', 'THE', 'SUNBEAMS'] +672-122797-0010-1539: hyp=['REJOICE', 'IN', 'THY', 'GROWTH', 'SAID', 'THE', 'SUNBEAMS'] +672-122797-0011-1540: ref=['AND', 'THEN', 'WHAT', 'HAPPENS', 'THEN'] +672-122797-0011-1540: hyp=['AND', 'THEN', 'WHAT', 'HAPPENS', 'THEN'] +672-122797-0012-1541: ref=['I', 'WOULD', 'FAIN', 'KNOW', 'IF', 'I', 'AM', 'DESTINED', 'FOR', 'SO', 'GLORIOUS', 'A', 'CAREER', 'CRIED', 'THE', 'TREE', 'REJOICING'] +672-122797-0012-1541: hyp=['I', 'WOULD', 'FAIN', 'KNOW', 'IF', 'I', 'AM', 'DESTINED', 'FOR', 'SO', 'GLORIOUS', 'A', 'CAREER', 'CRIED', 'THE', 'TREE', 'REJOICING'] +672-122797-0013-1542: ref=['I', 'AM', 'NOW', 'TALL', 'AND', 'MY', 'BRANCHES', 'SPREAD', 'LIKE', 'THE', 'OTHERS', 'THAT', 'WERE', 'CARRIED', 'OFF', 'LAST', 'YEAR', 'OH'] +672-122797-0013-1542: hyp=['I', 'AM', 'NOW', 'TALL', 'AND', 'MY', 'BRANCHES', 'SPREAD', 'LIKE', 'THE', 'OTHERS', 'THAT', 'WERE', 'CARRIED', 'OFF', 'LAST', 'YEAR', 'OH'] +672-122797-0014-1543: ref=['WERE', 'I', 'BUT', 'ALREADY', 'ON', 'THE', 'CART'] +672-122797-0014-1543: hyp=['WERE', 'I', 'BUT', 'ALREADY', 'ON', 'THE', 'CART'] +672-122797-0015-1544: ref=['WERE', 'I', 'IN', 'THE', 'WARM', 'ROOM', 'WITH', 'ALL', 'THE', 'SPLENDOR', 'AND', 'MAGNIFICENCE'] +672-122797-0015-1544: hyp=['WHERE', 'I', 'IN', 'THE', 'WARM', 'ROOM', 'WITH', 'ALL', 'BUT', 'SPLENDOUR', 'AND', 'MAGNIFICENCE'] +672-122797-0016-1545: ref=['YES', 'THEN', 'SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'WILL', 'SURELY', 'FOLLOW', 'OR', 'WHEREFORE', 'SHOULD', 'THEY', 'THUS', 'ORNAMENT', 'ME'] +672-122797-0016-1545: hyp=['YES', 'AND', 'SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'WILL', 'SURELY', 'FOLLOW', 'OR', 'WHEREFORE', 'SHOULD', 'THEY', 'THUS', 'ORNAMENT', 'ME'] +672-122797-0017-1546: ref=['SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'MUST', 'FOLLOW', 'BUT', 'WHAT'] +672-122797-0017-1546: hyp=['SOMETHING', 'BETTER', 'OR', 'SOME', 'THING', 'STILL', 'GRANDER', 'MUST', 'FOLLOW', 'BUT', 'WHAT'] +672-122797-0018-1547: ref=['REJOICE', 'IN', 'OUR', 'PRESENCE', 'SAID', 'THE', 'AIR', 'AND', 'THE', 'SUNLIGHT'] +672-122797-0018-1547: hyp=['REJOICE', 'IN', 'OUR', 'PRESENCE', 'SAID', 'THE', 'HEIR', 'IN', 'THE', 'SUNLIGHT'] +672-122797-0019-1548: ref=['REJOICE', 'IN', 'THY', 'OWN', 'FRESH', 'YOUTH'] +672-122797-0019-1548: hyp=['REJOICE', 'IN', 'THY', 'OWN', 'FRESH', 'YOUTH'] +672-122797-0020-1549: ref=['BUT', 'THE', 'TREE', 'DID', 'NOT', 'REJOICE', 'AT', 'ALL', 'HE', 'GREW', 'AND', 'GREW', 'AND', 'WAS', 'GREEN', 'BOTH', 'WINTER', 'AND', 'SUMMER'] +672-122797-0020-1549: hyp=['BUT', 'THE', 'TREE', 'DID', 'NOT', 'REJOICE', 'AT', 'ALL', 'HE', 'GREW', 'AND', 'GREW', 'AND', 'WAS', 'GREEN', 'BOTH', 'WINTER', 'AND', 'SUMMER'] +672-122797-0021-1550: ref=['AND', 'TOWARDS', 'CHRISTMAS', 'HE', 'WAS', 'ONE', 'OF', 'THE', 'FIRST', 'THAT', 'WAS', 'CUT', 'DOWN'] +672-122797-0021-1550: hyp=['AND', 'TOWARDS', 'CHRISTMAS', 'HE', 'WAS', 'ONE', 'OF', 'THE', 'FIRST', 'THAT', 'WAS', 'CUT', 'DOWN'] +672-122797-0022-1551: ref=['THE', 'AXE', 'STRUCK', 'DEEP', 'INTO', 'THE', 'VERY', 'PITH', 'THE', 'TREE', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'A', 'SIGH', 'HE', 'FELT', 'A', 'PANG', 'IT', 'WAS', 'LIKE', 'A', 'SWOON', 'HE', 'COULD', 'NOT', 'THINK', 'OF', 'HAPPINESS', 'FOR', 'HE', 'WAS', 'SORROWFUL', 'AT', 'BEING', 'SEPARATED', 'FROM', 'HIS', 'HOME', 'FROM', 'THE', 'PLACE', 'WHERE', 'HE', 'HAD', 'SPRUNG', 'UP'] +672-122797-0022-1551: hyp=['THE', 'AXE', 'STRUCK', 'DEEP', 'INTO', 'THE', 'VERY', 'PITH', 'THE', 'TREE', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'A', 'SIGH', 'HE', 'FELT', 'A', 'PANG', 'IT', 'WAS', 'LIKE', 'A', 'SWOON', 'HE', 'COULD', 'NOT', 'THINK', 'OF', 'HAPPINESS', 'FOR', 'HE', 'WAS', 'SORROWFUL', 'AT', 'BEING', 'SEPARATED', 'FROM', 'HIS', 'HOME', 'FROM', 'THE', 'PLACE', 'WHERE', 'HE', 'HAD', 'SPRUNG', 'UP'] +672-122797-0023-1552: ref=['HE', 'WELL', 'KNEW', 'THAT', 'HE', 'SHOULD', 'NEVER', 'SEE', 'HIS', 'DEAR', 'OLD', 'COMRADES', 'THE', 'LITTLE', 'BUSHES', 'AND', 'FLOWERS', 'AROUND', 'HIM', 'ANYMORE', 'PERHAPS', 'NOT', 'EVEN', 'THE', 'BIRDS'] +672-122797-0023-1552: hyp=['HE', 'WELL', 'KNEW', 'THAT', 'HE', 'SHOULD', 'NEVER', 'SEE', 'HIS', 'DEAR', 'OLD', 'COMRADES', 'THE', 'LITTLE', 'BUSHES', 'AND', 'FLOWERS', 'AROUND', 'HIM', 'ANY', 'MORE', 'PERHAPS', 'NOT', 'EVEN', 'THE', 'BIRDS'] +672-122797-0024-1553: ref=['THE', 'DEPARTURE', 'WAS', 'NOT', 'AT', 'ALL', 'AGREEABLE'] +672-122797-0024-1553: hyp=['THE', 'DEPARTURE', 'WAS', 'NOT', 'AT', 'ALL', 'AGREEABLE'] +672-122797-0025-1554: ref=['THE', 'TREE', 'ONLY', 'CAME', 'TO', 'HIMSELF', 'WHEN', 'HE', 'WAS', 'UNLOADED', 'IN', 'A', 'COURT', 'YARD', 'WITH', 'THE', 'OTHER', 'TREES', 'AND', 'HEARD', 'A', 'MAN', 'SAY', 'THAT', 'ONE', 'IS', 'SPLENDID', 'WE', "DON'T", 'WANT', 'THE', 'OTHERS'] +672-122797-0025-1554: hyp=['THE', 'TREE', 'ONLY', 'CAME', 'TO', 'HIMSELF', 'WHEN', 'HE', 'WAS', 'UNLOADED', 'IN', 'A', 'COURTYARD', 'WITH', 'THE', 'OTHER', 'TREES', 'AND', 'HEARD', 'A', 'MAN', 'SAY', 'THAT', 'ONE', 'IS', 'SPLENDID', 'WE', "DON'T", 'WANT', 'THE', 'OTHERS'] +672-122797-0026-1555: ref=['THERE', 'TOO', 'WERE', 'LARGE', 'EASY', 'CHAIRS', 'SILKEN', 'SOFAS', 'LARGE', 'TABLES', 'FULL', 'OF', 'PICTURE', 'BOOKS', 'AND', 'FULL', 'OF', 'TOYS', 'WORTH', 'HUNDREDS', 'AND', 'HUNDREDS', 'OF', 'CROWNS', 'AT', 'LEAST', 'THE', 'CHILDREN', 'SAID', 'SO'] +672-122797-0026-1555: hyp=['THERE', 'TOO', 'WERE', 'LARGE', 'EASY', 'CHAIRS', 'SILKEN', 'SOFAS', 'LARGE', 'TABLES', 'FULL', 'OF', 'PICTURE', 'BOOKS', 'AND', 'FULL', 'OF', 'TOYS', 'WORTH', 'HUNDREDS', 'AND', 'HUNDREDS', 'OF', 'CROWNS', 'AT', 'LEAST', 'THE', 'CHILDREN', 'SAID', 'SO'] +672-122797-0027-1556: ref=['THE', 'SERVANTS', 'AS', 'WELL', 'AS', 'THE', 'YOUNG', 'LADIES', 'DECORATED', 'IT'] +672-122797-0027-1556: hyp=['THE', 'SERVANTS', 'AS', 'WELL', 'AS', 'THE', 'YOUNG', 'LADIES', 'DECORATED', 'IT'] +672-122797-0028-1557: ref=['THIS', 'EVENING', 'THEY', 'ALL', 'SAID'] +672-122797-0028-1557: hyp=['THIS', 'EVENING', 'THEY', 'ALL', 'SAID'] +672-122797-0029-1558: ref=['HOW', 'IT', 'WILL', 'SHINE', 'THIS', 'EVENING'] +672-122797-0029-1558: hyp=['HOW', 'IT', 'WILL', 'SHINE', 'THIS', 'EVENING'] +672-122797-0030-1559: ref=['PERHAPS', 'THE', 'OTHER', 'TREES', 'FROM', 'THE', 'FOREST', 'WILL', 'COME', 'TO', 'LOOK', 'AT', 'ME'] +672-122797-0030-1559: hyp=['PERHAPS', 'THE', 'OTHER', 'TREES', 'FROM', 'THE', 'FOREST', 'WILL', 'COME', 'TO', 'LOOK', 'AT', 'ME'] +672-122797-0031-1560: ref=['IT', 'BLAZED', 'UP', 'FAMOUSLY', 'HELP', 'HELP'] +672-122797-0031-1560: hyp=['IT', 'BLAZED', 'UP', 'FAMOUSLY', 'HELP', 'HELP'] +672-122797-0032-1561: ref=['CRIED', 'THE', 'YOUNG', 'LADIES', 'AND', 'THEY', 'QUICKLY', 'PUT', 'OUT', 'THE', 'FIRE'] +672-122797-0032-1561: hyp=['CRIED', 'THE', 'YOUNG', 'LADIES', 'AND', 'THEY', 'QUICKLY', 'PUT', 'OUT', 'THE', 'FIRE'] +672-122797-0033-1562: ref=['A', 'STORY'] +672-122797-0033-1562: hyp=['A', 'STORY'] +672-122797-0034-1563: ref=['A', 'STORY', 'CRIED', 'THE', 'CHILDREN', 'DRAWING', 'A', 'LITTLE', 'FAT', 'MAN', 'TOWARDS', 'THE', 'TREE'] +672-122797-0034-1563: hyp=['A', 'STORY', 'CRIED', 'THE', 'CHILDREN', 'DRAWING', 'A', 'LITTLE', 'FAT', 'MAN', 'TOWARDS', 'THE', 'TREE'] +672-122797-0035-1564: ref=['BUT', 'I', 'SHALL', 'TELL', 'ONLY', 'ONE', 'STORY'] +672-122797-0035-1564: hyp=['BUT', 'I', 'SHALL', 'TELL', 'ONLY', 'ONE', 'STORY'] +672-122797-0036-1565: ref=['HUMPY', 'DUMPY', 'FELL', 'DOWNSTAIRS', 'AND', 'YET', 'HE', 'MARRIED', 'THE', 'PRINCESS'] +672-122797-0036-1565: hyp=['HUMPY', "DON'T", 'BE', 'FELL', 'DOWNSTAIRS', 'AND', 'YET', 'HE', 'MARRIED', 'THE', 'PRINCESS'] +672-122797-0037-1566: ref=["THAT'S", 'THE', 'WAY', 'OF', 'THE', 'WORLD'] +672-122797-0037-1566: hyp=["THAT'S", 'THE', 'WAY', 'OF', 'THE', 'WORLD'] +672-122797-0038-1567: ref=['THOUGHT', 'THE', 'FIR', 'TREE', 'AND', 'BELIEVED', 'IT', 'ALL', 'BECAUSE', 'THE', 'MAN', 'WHO', 'TOLD', 'THE', 'STORY', 'WAS', 'SO', 'GOOD', 'LOOKING', 'WELL', 'WELL'] +672-122797-0038-1567: hyp=['THOUGHT', 'THE', 'FIR', 'TREE', 'AND', 'BELIEVED', 'IT', 'ALL', 'BECAUSE', 'THE', 'MAN', 'WHO', 'TOLD', 'THE', 'STORY', 'WAS', 'SO', 'GOOD', 'LOOKING', 'WELL', 'WELL'] +672-122797-0039-1568: ref=['I', "WON'T", 'TREMBLE', 'TO', 'MORROW', 'THOUGHT', 'THE', 'FIR', 'TREE'] +672-122797-0039-1568: hyp=['I', "WON'T", 'TREMBLE', 'TO', 'MORROW', 'THOUGHT', 'THE', 'FIR', 'TREE'] +672-122797-0040-1569: ref=['AND', 'THE', 'WHOLE', 'NIGHT', 'THE', 'TREE', 'STOOD', 'STILL', 'AND', 'IN', 'DEEP', 'THOUGHT'] +672-122797-0040-1569: hyp=['AND', 'THE', 'WHOLE', 'NIGHT', 'THE', 'TREE', 'STOOD', 'STILL', 'AND', 'IN', 'DEEP', 'THOUGHT'] +672-122797-0041-1570: ref=['IN', 'THE', 'MORNING', 'THE', 'SERVANT', 'AND', 'THE', 'HOUSEMAID', 'CAME', 'IN'] +672-122797-0041-1570: hyp=['IN', 'THE', 'MORNING', 'THE', 'SERVANT', 'AND', 'THE', 'HOUSEMAID', 'CAME', 'IN'] +672-122797-0042-1571: ref=['BUT', 'THEY', 'DRAGGED', 'HIM', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'UP', 'THE', 'STAIRS', 'INTO', 'THE', 'LOFT', 'AND', 'HERE', 'IN', 'A', 'DARK', 'CORNER', 'WHERE', 'NO', 'DAYLIGHT', 'COULD', 'ENTER', 'THEY', 'LEFT', 'HIM'] +672-122797-0042-1571: hyp=['BUT', 'THEY', 'DRAGGED', 'HIM', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'UP', 'THE', 'STAIRS', 'INTO', 'THE', 'LOFT', 'AND', 'HERE', 'IT', 'A', 'DARK', 'CORNER', 'WHERE', 'NO', 'DAYLIGHT', 'COULD', 'ENTER', 'THEY', 'LEFT', 'HIM'] +672-122797-0043-1572: ref=["WHAT'S", 'THE', 'MEANING', 'OF', 'THIS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0043-1572: hyp=["WHAT'S", 'THE', 'MEANING', 'OF', 'THIS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0044-1573: ref=['AND', 'HE', 'LEANED', 'AGAINST', 'THE', 'WALL', 'LOST', 'IN', 'REVERIE'] +672-122797-0044-1573: hyp=['AND', 'HE', 'LEANED', 'AGAINST', 'THE', 'WALL', 'LOST', 'IN', 'REVERIE'] +672-122797-0045-1574: ref=['TIME', 'ENOUGH', 'HAD', 'HE', 'TOO', 'FOR', 'HIS', 'REFLECTIONS', 'FOR', 'DAYS', 'AND', 'NIGHTS', 'PASSED', 'ON', 'AND', 'NOBODY', 'CAME', 'UP', 'AND', 'WHEN', 'AT', 'LAST', 'SOMEBODY', 'DID', 'COME', 'IT', 'WAS', 'ONLY', 'TO', 'PUT', 'SOME', 'GREAT', 'TRUNKS', 'IN', 'A', 'CORNER', 'OUT', 'OF', 'THE', 'WAY'] +672-122797-0045-1574: hyp=['TIME', 'ENOUGH', 'HAD', 'HE', 'TOO', 'FOR', 'HIS', 'REFLECTIONS', 'FOR', 'DAYS', 'AND', 'NIGHTS', 'PASSED', 'ON', 'AND', 'NOBODY', 'CAME', 'UP', 'AND', 'WHEN', 'AT', 'LAST', 'SOMEBODY', 'DID', 'COME', 'IT', 'WAS', 'ONLY', 'TO', 'PUT', 'SOME', 'GREAT', 'TRUNKS', 'IN', 'A', 'CORNER', 'OUT', 'OF', 'THE', 'WAY'] +672-122797-0046-1575: ref=['TIS', 'NOW', 'WINTER', 'OUT', 'OF', 'DOORS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0046-1575: hyp=['TIS', 'NOW', 'WINTER', 'OUT', 'OF', 'DOORS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0047-1576: ref=['HOW', 'KIND', 'MAN', 'IS', 'AFTER', 'ALL'] +672-122797-0047-1576: hyp=['HOW', 'KIND', 'MAN', 'IS', 'AFTER', 'ALL'] +672-122797-0048-1577: ref=['IF', 'IT', 'ONLY', 'WERE', 'NOT', 'SO', 'DARK', 'HERE', 'AND', 'SO', 'TERRIBLY', 'LONELY'] +672-122797-0048-1577: hyp=['IF', 'IT', 'ONLY', 'WERE', 'NOT', 'SO', 'DARK', 'HERE', 'AND', 'SO', 'TERRIBLY', 'LONELY'] +672-122797-0049-1578: ref=['SQUEAK', 'SQUEAK'] +672-122797-0049-1578: hyp=['SQUEAK', 'SQUEAK'] +672-122797-0050-1579: ref=['THEY', 'SNUFFED', 'ABOUT', 'THE', 'FIR', 'TREE', 'AND', 'RUSTLED', 'AMONG', 'THE', 'BRANCHES'] +672-122797-0050-1579: hyp=['THEY', 'SNUFFED', 'ABOUT', 'THE', 'FIR', 'TREE', 'AND', 'RUSTLED', 'AMONG', 'THE', 'BRANCHES'] +672-122797-0051-1580: ref=['I', 'AM', 'BY', 'NO', 'MEANS', 'OLD', 'SAID', 'THE', 'FIR', 'TREE'] +672-122797-0051-1580: hyp=['I', 'AM', 'BY', 'NO', 'MEANS', 'OLD', 'SAID', 'THE', 'FIR', 'TREE'] +672-122797-0052-1581: ref=["THERE'S", 'MANY', 'A', 'ONE', 'CONSIDERABLY', 'OLDER', 'THAN', 'I', 'AM'] +672-122797-0052-1581: hyp=["THERE'S", 'MANY', 'A', 'ONE', 'CONSIDERABLY', 'OLDER', 'THAN', 'I', 'AM'] +672-122797-0053-1582: ref=['THEY', 'WERE', 'SO', 'EXTREMELY', 'CURIOUS'] +672-122797-0053-1582: hyp=['THEY', 'WERE', 'SO', 'EXTREMELY', 'CURIOUS'] +672-122797-0054-1583: ref=['I', 'KNOW', 'NO', 'SUCH', 'PLACE', 'SAID', 'THE', 'TREE'] +672-122797-0054-1583: hyp=['I', 'KNOW', 'NO', 'SUCH', 'PLACE', 'SAID', 'THE', 'TREE'] +672-122797-0055-1584: ref=['AND', 'THEN', 'HE', 'TOLD', 'ALL', 'ABOUT', 'HIS', 'YOUTH', 'AND', 'THE', 'LITTLE', 'MICE', 'HAD', 'NEVER', 'HEARD', 'THE', 'LIKE', 'BEFORE', 'AND', 'THEY', 'LISTENED', 'AND', 'SAID'] +672-122797-0055-1584: hyp=['AND', 'THEN', 'HE', 'TOLD', 'ALL', 'ABOUT', 'HIS', 'YOUTH', 'AND', 'THE', 'LITTLE', 'MICE', 'HAD', 'NEVER', 'HEARD', 'THE', 'LIKE', 'BEFORE', 'AND', 'THEY', 'LISTENED', 'AND', 'SAID'] +672-122797-0056-1585: ref=['SAID', 'THE', 'FIR', 'TREE', 'THINKING', 'OVER', 'WHAT', 'HE', 'HAD', 'HIMSELF', 'RELATED'] +672-122797-0056-1585: hyp=['SAID', 'THE', 'FIR', 'TREE', 'THINKING', 'OVER', 'WHAT', 'HE', 'HAD', 'HIMSELF', 'RELATED'] +672-122797-0057-1586: ref=['YES', 'IN', 'REALITY', 'THOSE', 'WERE', 'HAPPY', 'TIMES'] +672-122797-0057-1586: hyp=['YES', 'IN', 'REALITY', 'THOSE', 'WERE', 'HAPPY', 'TIMES'] +672-122797-0058-1587: ref=['WHO', 'IS', 'HUMPY', 'DUMPY', 'ASKED', 'THE', 'MICE'] +672-122797-0058-1587: hyp=['WHO', "IT'S", 'HUMPY', 'DUMPEY', 'ASKED', 'THE', 'MICE'] +672-122797-0059-1588: ref=['ONLY', 'THAT', 'ONE', 'ANSWERED', 'THE', 'TREE'] +672-122797-0059-1588: hyp=['ONLY', 'THAT', 'ONE', 'ANSWERED', 'THE', 'TREE'] +672-122797-0060-1589: ref=['IT', 'IS', 'A', 'VERY', 'STUPID', 'STORY'] +672-122797-0060-1589: hyp=['IT', 'IS', 'A', 'VERY', 'STUPID', 'STORY'] +672-122797-0061-1590: ref=["DON'T", 'YOU', 'KNOW', 'ONE', 'ABOUT', 'BACON', 'AND', 'TALLOW', 'CANDLES', "CAN'T", 'YOU', 'TELL', 'ANY', 'LARDER', 'STORIES'] +672-122797-0061-1590: hyp=["DON'T", 'YOU', 'KNOW', 'ONE', 'ABOUT', 'BACON', 'AND', 'TALLOW', 'CANDLES', "CAN'T", 'YOU', 'TELL', 'ANY', 'LARDER', 'STORIES'] +672-122797-0062-1591: ref=['NO', 'SAID', 'THE', 'TREE'] +672-122797-0062-1591: hyp=['NO', 'SAID', 'THE', 'TREE'] +672-122797-0063-1592: ref=['THEN', 'GOOD', 'BYE', 'SAID', 'THE', 'RATS', 'AND', 'THEY', 'WENT', 'HOME'] +672-122797-0063-1592: hyp=['THEN', 'GOOD', 'BYE', 'SAID', 'THE', 'RATS', 'AND', 'THEY', 'WENT', 'HOME'] +672-122797-0064-1593: ref=['AT', 'LAST', 'THE', 'LITTLE', 'MICE', 'STAYED', 'AWAY', 'ALSO', 'AND', 'THE', 'TREE', 'SIGHED', 'AFTER', 'ALL', 'IT', 'WAS', 'VERY', 'PLEASANT', 'WHEN', 'THE', 'SLEEK', 'LITTLE', 'MICE', 'SAT', 'ROUND', 'ME', 'AND', 'LISTENED', 'TO', 'WHAT', 'I', 'TOLD', 'THEM'] +672-122797-0064-1593: hyp=['AT', 'LAST', 'THE', 'LITTLE', 'MICE', 'STAYED', 'AWAY', 'ALSO', 'AND', 'THE', 'TREE', 'SIGHED', 'AFTER', 'ALL', 'IT', 'WAS', 'VERY', 'PLEASANT', 'WHEN', 'THE', 'SLEEK', 'LITTLE', 'MICE', 'SAT', 'ROUND', 'ME', 'AND', 'LISTENED', 'TO', 'WHAT', 'I', 'TOLD', 'THEM'] +672-122797-0065-1594: ref=['NOW', 'THAT', 'TOO', 'IS', 'OVER'] +672-122797-0065-1594: hyp=['NOW', 'THAT', 'TOO', 'IS', 'OVER'] +672-122797-0066-1595: ref=['WHY', 'ONE', 'MORNING', 'THERE', 'CAME', 'A', 'QUANTITY', 'OF', 'PEOPLE', 'AND', 'SET', 'TO', 'WORK', 'IN', 'THE', 'LOFT'] +672-122797-0066-1595: hyp=['WHY', 'ONE', 'MORNING', 'THERE', 'CAME', 'A', 'QUANTITY', 'OF', 'PEOPLE', 'AND', 'SET', 'TO', 'WORK', 'IN', 'THE', 'LOFT'] +672-122797-0067-1596: ref=['THE', 'TRUNKS', 'WERE', 'MOVED', 'THE', 'TREE', 'WAS', 'PULLED', 'OUT', 'AND', 'THROWN', 'RATHER', 'HARD', 'IT', 'IS', 'TRUE', 'DOWN', 'ON', 'THE', 'FLOOR', 'BUT', 'A', 'MAN', 'DREW', 'HIM', 'TOWARDS', 'THE', 'STAIRS', 'WHERE', 'THE', 'DAYLIGHT', 'SHONE'] +672-122797-0067-1596: hyp=['THE', 'TRUNKS', 'WERE', 'MOVED', 'THE', 'TREE', 'WAS', 'PULLED', 'OUT', 'AND', 'THROWN', 'RATHER', 'HARD', 'IT', 'IS', 'TRUE', 'DOWN', 'ON', 'THE', 'FLOOR', 'BUT', 'A', 'MAN', 'DREW', 'HIM', 'TOWARD', 'THE', 'STAIRS', 'WHERE', 'THE', 'DAYLIGHT', 'SHONE'] +672-122797-0068-1597: ref=['BUT', 'IT', 'WAS', 'NOT', 'THE', 'FIR', 'TREE', 'THAT', 'THEY', 'MEANT'] +672-122797-0068-1597: hyp=['BUT', 'IT', 'WAS', 'NOT', 'THE', 'FIR', 'TREE', 'THAT', 'THEY', 'MEANT'] +672-122797-0069-1598: ref=['IT', 'WAS', 'IN', 'A', 'CORNER', 'THAT', 'HE', 'LAY', 'AMONG', 'WEEDS', 'AND', 'NETTLES'] +672-122797-0069-1598: hyp=['IT', 'WAS', 'IN', 'A', 'CORNER', 'THAT', 'HE', 'LAY', 'AMONG', 'WEEDS', 'AND', 'NETTLES'] +672-122797-0070-1599: ref=['THE', 'GOLDEN', 'STAR', 'OF', 'TINSEL', 'WAS', 'STILL', 'ON', 'THE', 'TOP', 'OF', 'THE', 'TREE', 'AND', 'GLITTERED', 'IN', 'THE', 'SUNSHINE'] +672-122797-0070-1599: hyp=['THE', 'GOLDEN', 'STAR', 'OF', 'TINSEL', 'WAS', 'STILL', 'ON', 'THE', 'TOP', 'OF', 'THE', 'TREE', 'AND', 'GLITTERED', 'IN', 'THE', 'SUNSHINE'] +672-122797-0071-1600: ref=['IN', 'THE', 'COURT', 'YARD', 'SOME', 'OF', 'THE', 'MERRY', 'CHILDREN', 'WERE', 'PLAYING', 'WHO', 'HAD', 'DANCED', 'AT', 'CHRISTMAS', 'ROUND', 'THE', 'FIR', 'TREE', 'AND', 'WERE', 'SO', 'GLAD', 'AT', 'THE', 'SIGHT', 'OF', 'HIM'] +672-122797-0071-1600: hyp=['IN', 'THE', 'COURTYARD', 'SOME', 'OF', 'THE', 'MARRIED', 'CHILDREN', 'WERE', 'PLAYING', 'WHO', 'HAD', 'DANCED', 'AT', 'CHRISTMAS', 'ROUND', 'THE', 'FIR', 'TREE', 'AND', 'WERE', 'SO', 'GLAD', 'AT', 'THE', 'SIGHT', 'OF', 'HIM'] +672-122797-0072-1601: ref=['AND', 'THE', "GARDENER'S", 'BOY', 'CHOPPED', 'THE', 'TREE', 'INTO', 'SMALL', 'PIECES', 'THERE', 'WAS', 'A', 'WHOLE', 'HEAP', 'LYING', 'THERE'] +672-122797-0072-1601: hyp=['AND', 'THE', "GARDENER'S", 'BOY', 'CHOPPED', 'THE', 'TREE', 'INTO', 'SMALL', 'PIECES', 'THERE', 'WAS', 'A', 'WHOLE', 'HEAP', 'LYING', 'THERE'] +672-122797-0073-1602: ref=['THE', 'WOOD', 'FLAMED', 'UP', 'SPLENDIDLY', 'UNDER', 'THE', 'LARGE', 'BREWING', 'COPPER', 'AND', 'IT', 'SIGHED', 'SO', 'DEEPLY'] +672-122797-0073-1602: hyp=['THE', 'WOOD', 'FLAMED', 'UP', 'SPLENDIDLY', 'UNDER', 'THE', 'LARGE', 'BREWING', 'COPPER', 'AND', 'ITS', 'SIDE', 'SO', 'DEEPLY'] +672-122797-0074-1603: ref=['HOWEVER', 'THAT', 'WAS', 'OVER', 'NOW', 'THE', 'TREE', 'GONE', 'THE', 'STORY', 'AT', 'AN', 'END'] +672-122797-0074-1603: hyp=['HOWEVER', 'THAT', 'WAS', 'OVER', 'NOW', 'THE', 'TREE', 'GONE', 'THE', 'STORY', 'AT', 'AN', 'END'] +6829-68769-0000-1858: ref=['KENNETH', 'AND', 'BETH', 'REFRAINED', 'FROM', 'TELLING', 'THE', 'OTHER', 'GIRLS', 'OR', 'UNCLE', 'JOHN', 'OF', 'OLD', 'WILL', "ROGERS'S", 'VISIT', 'BUT', 'THEY', 'GOT', 'MISTER', 'WATSON', 'IN', 'THE', 'LIBRARY', 'AND', 'QUESTIONED', 'HIM', 'CLOSELY', 'ABOUT', 'THE', 'PENALTY', 'FOR', 'FORGING', 'A', 'CHECK'] +6829-68769-0000-1858: hyp=['KENNETH', 'AND', 'BETH', 'REFRAINED', 'FROM', 'TELLING', 'THE', 'OTHER', 'GIRLS', 'OR', 'UNCLE', 'JOHN', 'OF', 'OLD', 'WILL', 'ROGERS', 'VISIT', 'BUT', 'THEY', 'GOT', 'MISTER', 'WATSON', 'IN', 'THE', 'LIBRARY', 'AND', 'QUESTIONED', 'HIM', 'CLOSELY', 'ABOUT', 'THE', 'PENALTY', 'FOR', 'FORGING', 'A', 'CHECK'] +6829-68769-0001-1859: ref=['IT', 'WAS', 'A', 'SERIOUS', 'CRIME', 'INDEED', 'MISTER', 'WATSON', 'TOLD', 'THEM', 'AND', 'TOM', 'GATES', 'BADE', 'FAIR', 'TO', 'SERVE', 'A', 'LENGTHY', 'TERM', 'IN', "STATE'S", 'PRISON', 'AS', 'A', 'CONSEQUENCE', 'OF', 'HIS', 'RASH', 'ACT'] +6829-68769-0001-1859: hyp=['IT', 'WAS', 'A', 'SERIOUS', 'CRIME', 'INDEED', 'MISTER', 'WATSON', 'TOLD', 'THEM', 'AND', 'TOM', 'GATES', 'BADE', 'FAIR', 'TO', 'SERVE', 'A', 'LENGTHY', 'TERM', 'IN', 'THE', "STATE'S", 'PRISON', 'AS', 'A', 'CONSEQUENCE', 'OF', 'HIS', 'RASH', 'ACT'] +6829-68769-0002-1860: ref=['I', "CAN'T", 'SEE', 'IT', 'IN', 'THAT', 'LIGHT', 'SAID', 'THE', 'OLD', 'LAWYER'] +6829-68769-0002-1860: hyp=['I', "CAN'T", 'SEE', 'IT', 'IN', 'THAT', 'LIGHT', 'SAID', 'THE', 'OLD', 'LAWYER'] +6829-68769-0003-1861: ref=['IT', 'WAS', 'A', 'DELIBERATE', 'THEFT', 'FROM', 'HIS', 'EMPLOYERS', 'TO', 'PROTECT', 'A', 'GIRL', 'HE', 'LOVED'] +6829-68769-0003-1861: hyp=['IT', 'WAS', 'A', 'DELIBERATE', 'THEFT', 'FROM', 'HIS', 'EMPLOYERS', 'TO', 'PROTECT', 'A', 'GIRL', 'HE', 'LOVED'] +6829-68769-0004-1862: ref=['BUT', 'THEY', 'COULD', 'NOT', 'HAVE', 'PROVEN', 'A', 'CASE', 'AGAINST', 'LUCY', 'IF', 'SHE', 'WAS', 'INNOCENT', 'AND', 'ALL', 'THEIR', 'THREATS', 'OF', 'ARRESTING', 'HER', 'WERE', 'PROBABLY', 'MERE', 'BLUFF'] +6829-68769-0004-1862: hyp=['BUT', 'THEY', 'COULD', 'NOT', 'HAVE', 'PROVEN', 'A', 'CASE', 'AGAINST', 'LUCY', 'IF', 'SHE', 'WAS', 'INNOCENT', 'AND', 'ALL', 'THEIR', 'THREATS', 'OF', 'ARRESTING', 'HER', 'WERE', 'PROBABLY', 'A', 'MERE', 'BLUFF'] +6829-68769-0005-1863: ref=['HE', 'WAS', 'SOFT', 'HEARTED', 'AND', 'IMPETUOUS', 'SAID', 'BETH', 'AND', 'BEING', 'IN', 'LOVE', 'HE', "DIDN'T", 'STOP', 'TO', 'COUNT', 'THE', 'COST'] +6829-68769-0005-1863: hyp=['HE', 'WAS', 'SOFT', 'HEARTED', 'AND', 'IMPETUOUS', 'SAID', 'BETH', 'AND', 'BEING', 'IN', 'LOVE', 'HE', "DIDN'T", 'STOP', 'TO', 'COUNT', 'THE', 'COST'] +6829-68769-0006-1864: ref=['IF', 'THE', 'PROSECUTION', 'WERE', 'WITHDRAWN', 'AND', 'THE', 'CASE', 'SETTLED', 'WITH', 'THE', 'VICTIM', 'OF', 'THE', 'FORGED', 'CHECK', 'THEN', 'THE', 'YOUNG', 'MAN', 'WOULD', 'BE', 'ALLOWED', 'HIS', 'FREEDOM'] +6829-68769-0006-1864: hyp=['IF', 'THE', 'PROSECUTION', 'WERE', 'WITHDRAWN', 'AND', 'THE', 'CASE', 'SETTLED', 'WITH', 'THE', 'VICTIM', 'OF', 'THE', 'FORGED', 'CHECK', 'THEN', 'THE', 'YOUNG', 'MAN', 'WOULD', 'BE', 'ALLOWED', 'HIS', 'FREEDOM'] +6829-68769-0007-1865: ref=['BUT', 'UNDER', 'THE', 'CIRCUMSTANCES', 'I', 'DOUBT', 'IF', 'SUCH', 'AN', 'ARRANGEMENT', 'COULD', 'BE', 'MADE'] +6829-68769-0007-1865: hyp=['BUT', 'UNDER', 'THE', 'CIRCUMSTANCES', 'I', 'DOUBT', 'OF', 'SUCH', 'AN', 'ARRANGEMENT', 'COULD', 'BE', 'MADE'] +6829-68769-0008-1866: ref=['FAIRVIEW', 'WAS', 'TWELVE', 'MILES', 'AWAY', 'BUT', 'BY', 'TEN', "O'CLOCK", 'THEY', 'DREW', 'UP', 'AT', 'THE', 'COUNTY', 'JAIL'] +6829-68769-0008-1866: hyp=['FAIR', 'VIEWS', 'TWELVE', 'MILES', 'AWAY', 'BUT', 'BY', 'TEN', "O'CLOCK", 'THEY', 'DREW', 'UP', 'AT', 'THE', 'COUNTY', 'TRAIL'] +6829-68769-0009-1867: ref=['THEY', 'WERE', 'RECEIVED', 'IN', 'THE', 'LITTLE', 'OFFICE', 'BY', 'A', 'MAN', 'NAMED', 'MARKHAM', 'WHO', 'WAS', 'THE', 'JAILER'] +6829-68769-0009-1867: hyp=['THEY', 'WERE', 'RECEIVED', 'IN', 'THE', 'LITTLE', 'OFFICE', 'BY', 'A', 'MAN', 'NAMED', 'MARKHAM', 'WHO', 'WAS', 'THE', 'JAILER'] +6829-68769-0010-1868: ref=['WE', 'WISH', 'TO', 'TALK', 'WITH', 'HIM', 'ANSWERED', 'KENNETH', 'TALK'] +6829-68769-0010-1868: hyp=['WE', 'WISH', 'TO', 'TALK', 'WITH', 'HIM', 'ANSWERED', 'KENNETH', 'TALK'] +6829-68769-0011-1869: ref=["I'M", 'RUNNING', 'FOR', 'REPRESENTATIVE', 'ON', 'THE', 'REPUBLICAN', 'TICKET', 'SAID', 'KENNETH', 'QUIETLY'] +6829-68769-0011-1869: hyp=["I'M", 'RUNNING', 'FOR', 'REPRESENTATIVE', 'ON', 'THE', 'REPUBLICAN', 'TICKET', 'SAID', 'KENNETH', 'QUIETLY'] +6829-68769-0012-1870: ref=['OH', 'SAY', "THAT'S", 'DIFFERENT', 'OBSERVED', 'MARKHAM', 'ALTERING', 'HIS', 'DEMEANOR'] +6829-68769-0012-1870: hyp=["I'LL", 'SAY', "THAT'S", 'DIFFERENT', 'OBSERVED', 'MARKHAM', 'ALTERING', 'HIS', 'DEMEANOR'] +6829-68769-0013-1871: ref=['MAY', 'WE', 'SEE', 'GATES', 'AT', 'ONCE', 'ASKED', 'KENNETH'] +6829-68769-0013-1871: hyp=['MAYBE', 'SEA', 'GATES', 'AT', 'ONCE', 'ASKED', 'KENNETH'] +6829-68769-0014-1872: ref=['THEY', 'FOLLOWED', 'THE', 'JAILER', 'ALONG', 'A', 'SUCCESSION', 'OF', 'PASSAGES'] +6829-68769-0014-1872: hyp=['THEY', 'FOLLOWED', 'THE', 'JAILER', 'ALONG', 'A', 'SUCCESSION', 'OF', 'PASSAGES'] +6829-68769-0015-1873: ref=['SOMETIMES', "I'M", 'THAT', 'YEARNING', 'FOR', 'A', 'SMOKE', "I'M", 'NEARLY', 'CRAZY', 'AN', 'I', 'DUNNO', 'WHICH', 'IS', 'WORST', 'DYIN', 'ONE', 'WAY', 'OR', 'ANOTHER'] +6829-68769-0015-1873: hyp=['SOMETIMES', 'ON', 'THAT', 'YEARNIN', 'FUR', 'AS', 'SMOKE', "I'M", 'NEARLY', 'CRAZY', 'AND', 'I', 'DUNNO', 'WHICH', 'IS', 'WORSE', 'DYING', 'ONE', 'WAY', 'OR', 'THE', 'OTHER'] +6829-68769-0016-1874: ref=['HE', 'UNLOCKED', 'THE', 'DOOR', 'AND', 'CALLED', "HERE'S", 'VISITORS', 'TOM'] +6829-68769-0016-1874: hyp=['HE', 'UNLOCKED', 'THE', 'DOOR', 'AND', 'CALLED', "HERE'S", 'VISITORS', 'TOM'] +6829-68769-0017-1875: ref=['WORSE', 'TOM', 'WORSE', 'N', 'EVER', 'REPLIED', 'THE', 'JAILER', 'GLOOMILY'] +6829-68769-0017-1875: hyp=['WORSE', 'TOM', 'WORSE', 'THAN', 'EVER', 'REPLIED', 'THE', 'JAILER', 'GLOOMILY'] +6829-68769-0018-1876: ref=['MISS', 'DE', 'GRAF', 'SAID', 'KENNETH', 'NOTICING', 'THE', "BOY'S", 'FACE', 'CRITICALLY', 'AS', 'HE', 'STOOD', 'WHERE', 'THE', 'LIGHT', 'FROM', 'THE', 'PASSAGE', 'FELL', 'UPON', 'IT'] +6829-68769-0018-1876: hyp=['MISTER', 'GRAF', 'SAID', 'KENNETH', 'NOTICING', 'THE', "BOY'S", 'FACE', 'CRITICALLY', 'AS', 'HE', 'STOOD', 'WHERE', 'THE', 'LIGHT', 'FROM', 'THE', 'PASSAGE', 'FELL', 'UPON', 'IT'] +6829-68769-0019-1877: ref=['SORRY', 'WE', "HAVEN'T", 'ANY', 'RECEPTION', 'ROOM', 'IN', 'THE', 'JAIL'] +6829-68769-0019-1877: hyp=['SIR', 'WE', "HAVEN'T", 'ANY', 'RECEPTION', 'ROOM', 'IN', 'THE', 'JAIL'] +6829-68769-0020-1878: ref=['SIT', 'DOWN', 'PLEASE', 'SAID', 'GATES', 'IN', 'A', 'CHEERFUL', 'AND', 'PLEASANT', 'VOICE', "THERE'S", 'A', 'BENCH', 'HERE'] +6829-68769-0020-1878: hyp=['SIT', 'DOWN', 'PLEASE', 'SAID', 'GATES', 'IN', 'A', 'CHEERFUL', 'AND', 'PLEASANT', 'VOICE', "THERE'S", 'A', 'BENCH', 'HERE'] +6829-68769-0021-1879: ref=['A', 'FRESH', 'WHOLESOME', 'LOOKING', 'BOY', 'WAS', 'TOM', 'GATES', 'WITH', 'STEADY', 'GRAY', 'EYES', 'AN', 'INTELLIGENT', 'FOREHEAD', 'BUT', 'A', 'SENSITIVE', 'RATHER', 'WEAK', 'MOUTH'] +6829-68769-0021-1879: hyp=['A', 'FRESH', 'WHOLESOME', 'LOOKING', 'BOY', 'WAS', 'TOM', 'GATES', 'WAS', 'STEADY', 'GRAY', 'EYES', 'AN', 'INTELLIGENT', 'FOREHEAD', 'BUT', 'A', 'SENSITIVE', 'RATHER', 'WEAK', 'MOUTH'] +6829-68769-0022-1880: ref=['WE', 'HAVE', 'HEARD', 'SOMETHING', 'OF', 'YOUR', 'STORY', 'SAID', 'KENNETH', 'AND', 'ARE', 'INTERESTED', 'IN', 'IT'] +6829-68769-0022-1880: hyp=['WE', 'HAVE', 'HEARD', 'SOMETHING', 'OF', 'YOUR', 'STORY', 'SAID', 'KENNETH', 'AND', 'ARE', 'INTERESTED', 'IN', 'IT'] +6829-68769-0023-1881: ref=['I', "DIDN'T", 'STOP', 'TO', 'THINK', 'WHETHER', 'IT', 'WAS', 'FOOLISH', 'OR', 'NOT', 'I', 'DID', 'IT', 'AND', "I'M", 'GLAD', 'I', 'DID'] +6829-68769-0023-1881: hyp=['I', "DIDN'T", 'STOP', 'TO', 'THINK', 'WHETHER', 'IT', 'WAS', 'FOOLISH', 'OR', 'NOT', 'I', 'DID', 'IT', 'AND', "I'M", 'GLAD', 'I', 'DID', 'IT'] +6829-68769-0024-1882: ref=['OLD', 'WILL', 'IS', 'A', 'FINE', 'FELLOW', 'BUT', 'POOR', 'AND', 'HELPLESS', 'SINCE', 'MISSUS', 'ROGERS', 'HAD', 'HER', 'ACCIDENT'] +6829-68769-0024-1882: hyp=['OLD', 'WILL', 'IS', 'A', 'FINE', 'FELLOW', 'BUT', 'POOR', 'AND', 'HELPLESS', 'SINCE', 'MISSUS', 'ROGERS', 'HAD', 'HER', 'ACCIDENT'] +6829-68769-0025-1883: ref=['THEN', 'ROGERS', "WOULDN'T", 'DO', 'ANYTHING', 'BUT', 'LEAD', 'HER', 'AROUND', 'AND', 'WAIT', 'UPON', 'HER', 'AND', 'THE', 'PLACE', 'WENT', 'TO', 'RACK', 'AND', 'RUIN'] +6829-68769-0025-1883: hyp=['THEN', 'ROGERS', "WOULDN'T", 'DO', 'ANYTHING', 'BUT', 'LEAD', 'HER', 'AROUND', 'AND', 'WAIT', 'UPON', 'HER', 'AND', 'THE', 'PLACE', 'WENT', 'TO', 'RACK', 'AND', 'RUIN'] +6829-68769-0026-1884: ref=['HE', 'SPOKE', 'SIMPLY', 'BUT', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'NARROW', 'CELL', 'IN', 'FRONT', 'OF', 'THEM'] +6829-68769-0026-1884: hyp=['HE', 'SPOKE', 'SIMPLY', 'BUT', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'NARROW', 'CELL', 'IN', 'FRONT', 'OF', 'THEM'] +6829-68769-0027-1885: ref=['WHOSE', 'NAME', 'DID', 'YOU', 'SIGN', 'TO', 'THE', 'CHECK', 'ASKED', 'KENNETH'] +6829-68769-0027-1885: hyp=['WHOSE', 'NAME', 'DID', 'YOU', 'SIGN', 'TO', 'THE', 'CHECK', 'ASKED', 'KENNETH'] +6829-68769-0028-1886: ref=['HE', 'IS', 'SUPPOSED', 'TO', 'SIGN', 'ALL', 'THE', 'CHECKS', 'OF', 'THE', 'CONCERN'] +6829-68769-0028-1886: hyp=['HE', 'IS', 'SUPPOSED', 'TO', 'SIGN', 'ALL', 'THE', 'CHECKS', 'OF', 'THE', 'CONCERN'] +6829-68769-0029-1887: ref=["IT'S", 'A', 'STOCK', 'COMPANY', 'AND', 'RICH'] +6829-68769-0029-1887: hyp=["IT'S", 'A', 'STOCK', 'COMPANY', 'IN', 'RICH'] +6829-68769-0030-1888: ref=['I', 'WAS', 'BOOKKEEPER', 'SO', 'IT', 'WAS', 'EASY', 'TO', 'GET', 'A', 'BLANK', 'CHECK', 'AND', 'FORGE', 'THE', 'SIGNATURE'] +6829-68769-0030-1888: hyp=['I', 'WAS', 'BIT', 'KEEPER', 'SO', 'IT', 'WAS', 'EASY', 'TO', 'GET', 'A', 'BLANK', 'CHECK', 'AND', 'FORGE', 'THE', 'SIGNATURE'] +6829-68769-0031-1889: ref=['AS', 'REGARDS', 'MY', 'ROBBING', 'THE', 'COMPANY', "I'LL", 'SAY', 'THAT', 'I', 'SAVED', 'THEM', 'A', 'HEAVY', 'LOSS', 'ONE', 'DAY'] +6829-68769-0031-1889: hyp=['AS', 'REGARDS', 'MY', 'ROBBING', 'THE', 'COMPANY', "I'LL", 'SAY', 'THAT', 'I', 'SAVED', 'HIM', 'A', 'HEAVY', 'LOST', 'ONE', 'DAY'] +6829-68769-0032-1890: ref=['I', 'DISCOVERED', 'AND', 'PUT', 'OUT', 'A', 'FIRE', 'THAT', 'WOULD', 'HAVE', 'DESTROYED', 'THE', 'WHOLE', 'PLANT', 'BUT', 'MARSHALL', 'NEVER', 'EVEN', 'THANKED', 'ME'] +6829-68769-0032-1890: hyp=['I', 'DISCOVERED', 'AND', 'PUT', 'OUT', 'A', 'FIRE', 'THAT', 'WOULD', 'HAVE', 'DESTROYED', 'THE', 'WHOLE', 'PLANT', 'BUT', 'MARTIAL', 'NEVER', 'EVEN', 'THANKED', 'ME'] +6829-68769-0033-1891: ref=['IT', 'WAS', 'BETTER', 'FOR', 'HIM', 'TO', 'THINK', 'THE', 'GIRL', 'UNFEELING', 'THAN', 'TO', 'KNOW', 'THE', 'TRUTH'] +6829-68769-0033-1891: hyp=['IT', 'WAS', 'BETTER', 'FOR', 'HIM', 'TO', 'THINK', 'THE', 'GIRL', 'UNFEELING', 'THAN', 'TO', 'KNOW', 'THE', 'TRUTH'] +6829-68769-0034-1892: ref=["I'M", 'GOING', 'TO', 'SEE', 'MISTER', 'MARSHALL', 'SAID', 'KENNETH', 'AND', 'DISCOVER', 'WHAT', 'I', 'CAN', 'DO', 'TO', 'ASSIST', 'YOU', 'THANK', 'YOU', 'SIR'] +6829-68769-0034-1892: hyp=["I'M", 'GOING', 'TO', 'SEE', 'MISTER', 'MARSHAL', 'SAID', 'KENNETH', 'AND', 'DISCOVER', 'WHAT', 'I', 'CAN', 'DO', 'TO', 'ASSIST', 'YOU', 'THANK', 'YOU', 'SIR'] +6829-68769-0035-1893: ref=['IT', "WON'T", 'BE', 'MUCH', 'BUT', "I'M", 'GRATEFUL', 'TO', 'FIND', 'A', 'FRIEND'] +6829-68769-0035-1893: hyp=['IT', "WON'T", 'BE', 'MUCH', 'BUT', "I'M", 'GRATEFUL', 'TO', 'FIND', 'A', 'FRIEND'] +6829-68769-0036-1894: ref=['THEY', 'LEFT', 'HIM', 'THEN', 'FOR', 'THE', 'JAILER', 'ARRIVED', 'TO', 'UNLOCK', 'THE', 'DOOR', 'AND', 'ESCORT', 'THEM', 'TO', 'THE', 'OFFICE'] +6829-68769-0036-1894: hyp=['THEY', 'LEFT', 'HIM', 'THEN', 'FOR', 'THE', 'JAILER', 'ARRIVED', 'TO', 'UNLOCK', 'THE', 'DOOR', 'AND', 'ESCORT', 'THEM', 'TO', 'THE', 'OFFICE'] +6829-68769-0037-1895: ref=["I'VE", 'SEEN', 'LOTS', 'OF', 'THAT', 'KIND', 'IN', 'MY', 'DAY'] +6829-68769-0037-1895: hyp=["I'VE", 'SEEN', 'LOTS', 'OF', 'THAT', 'KIND', 'IN', 'MY', 'DAY'] +6829-68769-0038-1896: ref=['AND', 'IT', 'RUINS', 'A', "MAN'S", 'DISPOSITION'] +6829-68769-0038-1896: hyp=['AND', 'IT', 'RUINS', 'A', "MAN'S", 'DISPOSITION'] +6829-68769-0039-1897: ref=['HE', 'LOOKED', 'UP', 'RATHER', 'UNGRACIOUSLY', 'BUT', 'MOTIONED', 'THEM', 'TO', 'BE', 'SEATED'] +6829-68769-0039-1897: hyp=['HE', 'LOOKED', 'UP', 'RATHER', 'UNGRACIOUSLY', 'BUT', 'MOTIONED', 'THEM', 'TO', 'BE', 'SEATED'] +6829-68769-0040-1898: ref=['SOME', 'GIRL', 'HAS', 'BEEN', 'HERE', 'TWICE', 'TO', 'INTERVIEW', 'MY', 'MEN', 'AND', 'I', 'HAVE', 'REFUSED', 'TO', 'ADMIT', 'HER'] +6829-68769-0040-1898: hyp=['SOME', 'GIRL', 'HAS', 'BEEN', 'IN', 'HERE', 'TWICE', 'TO', 'INTERVIEW', 'MY', 'MEN', 'AND', 'I', 'HAVE', 'REFUSED', 'TO', 'ADMIT', 'HER'] +6829-68769-0041-1899: ref=["I'M", 'NOT', 'ELECTIONEERING', 'JUST', 'NOW'] +6829-68769-0041-1899: hyp=["I'M", 'NOT', 'ELECTIONEERING', 'JUST', 'NOW'] +6829-68769-0042-1900: ref=['OH', 'WELL', 'SIR', 'WHAT', 'ABOUT', 'HIM'] +6829-68769-0042-1900: hyp=['OH', 'WELL', 'SIR', 'WHAT', 'ABOUT', 'EM'] +6829-68769-0043-1901: ref=['AND', 'HE', 'DESERVES', 'A', 'TERM', 'IN', "STATE'S", 'PRISON'] +6829-68769-0043-1901: hyp=['AND', 'HE', 'DESERVES', 'A', 'TERM', 'IN', "STATE'S", 'PRISON'] +6829-68769-0044-1902: ref=['IT', 'HAS', 'COST', 'ME', 'TWICE', 'SIXTY', 'DOLLARS', 'IN', 'ANNOYANCE'] +6829-68769-0044-1902: hyp=['IT', 'HAS', 'COST', 'ME', 'TWICE', 'SIXTY', 'DOLLARS', 'IN', 'ANNOYANCE'] +6829-68769-0045-1903: ref=["I'LL", 'PAY', 'ALL', 'THE', 'COSTS', 'BESIDES'] +6829-68769-0045-1903: hyp=["I'LL", 'PAY', 'ALL', 'THE', 'COST', 'BESIDES'] +6829-68769-0046-1904: ref=["YOU'RE", 'FOOLISH', 'WHY', 'SHOULD', 'YOU', 'DO', 'ALL', 'THIS'] +6829-68769-0046-1904: hyp=["YOU'RE", 'FOOLISH', 'WHY', 'SHOULD', 'YOU', 'DO', 'ALL', 'THIS'] +6829-68769-0047-1905: ref=['I', 'HAVE', 'MY', 'OWN', 'REASONS', 'MISTER', 'MARSHALL'] +6829-68769-0047-1905: hyp=['I', 'HAVE', 'MY', 'OWN', 'REASONS', 'MISTER', 'MARSHALL'] +6829-68769-0048-1906: ref=['GIVE', 'ME', 'A', 'CHECK', 'FOR', 'A', 'HUNDRED', 'AND', 'FIFTY', 'AND', "I'LL", 'TURN', 'OVER', 'TO', 'YOU', 'THE', 'FORGED', 'CHECK', 'AND', 'QUASH', 'FURTHER', 'PROCEEDINGS'] +6829-68769-0048-1906: hyp=['GIVE', 'ME', 'A', 'CHECK', 'FOR', 'A', 'HUNDRED', 'AND', 'FIFTY', 'AND', "I'LL", 'TURN', 'OVER', 'TO', 'YOU', 'THE', 'FORCH', 'CHECK', 'AND', 'CRASH', 'FURTHER', 'PROCEEDINGS'] +6829-68769-0049-1907: ref=['HE', 'DETESTED', 'THE', 'GRASPING', 'DISPOSITION', 'THAT', 'WOULD', 'ENDEAVOR', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'HIS', 'EVIDENT', 'DESIRE', 'TO', 'HELP', 'YOUNG', 'GATES'] +6829-68769-0049-1907: hyp=['HE', 'DETESTED', 'THE', 'GRASPING', 'DISPOSITION', 'THAT', 'WOULD', 'ENDEAVOR', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'HIS', 'EVIDENT', 'DESIRE', 'TO', 'HELP', 'YOUNG', 'GATES'] +6829-68769-0050-1908: ref=['BETH', 'UNEASY', 'AT', 'HIS', 'SILENCE', 'NUDGED', 'HIM'] +6829-68769-0050-1908: hyp=['BETH', 'UNEASY', 'AT', 'A', 'SILENCE', 'NUDGED', 'HIM'] +6829-68769-0051-1909: ref=['THERE', 'WAS', 'A', 'GRIM', 'SMILE', 'OF', 'AMUSEMENT', 'ON', 'HIS', 'SHREWD', 'FACE'] +6829-68769-0051-1909: hyp=['THERE', 'WAS', 'A', 'GRIM', 'SMILE', 'OF', 'AMUSEMENT', 'ON', 'HIS', 'SHREWD', 'FACE'] +6829-68769-0052-1910: ref=['HE', 'MIGHT', 'HAVE', 'HAD', 'THAT', 'FORGED', 'CHECK', 'FOR', 'THE', 'FACE', 'OF', 'IT', 'IF', "HE'D", 'BEEN', 'SHARP'] +6829-68769-0052-1910: hyp=['HE', 'MIGHT', 'HAVE', 'HAD', 'THAT', 'FORGED', 'CHECK', 'FOR', 'THE', 'FACE', 'OF', 'IT', 'IF', "HE'D", 'BEEN', 'SHARP'] +6829-68769-0053-1911: ref=['AND', 'TO', 'THINK', 'WE', 'CAN', 'SAVE', 'ALL', 'THAT', 'MISERY', 'AND', 'DESPAIR', 'BY', 'THE', 'PAYMENT', 'OF', 'A', 'HUNDRED', 'AND', 'FIFTY', 'DOLLARS'] +6829-68769-0053-1911: hyp=['AND', 'TO', 'THINK', 'WE', 'CAN', 'SAVE', 'ALL', 'THAT', 'MISERY', 'AND', 'DESPAIR', 'BY', 'THE', 'PAYMENT', 'OF', 'A', 'HUNDRED', 'AND', 'FIFTY', 'DOLLARS'] +6829-68771-0000-1912: ref=['SO', 'TO', 'THE', 'SURPRISE', 'OF', 'THE', 'DEMOCRATIC', 'COMMITTEE', 'AND', 'ALL', 'HIS', 'FRIENDS', 'MISTER', 'HOPKINS', 'ANNOUNCED', 'THAT', 'HE', 'WOULD', 'OPPOSE', "FORBES'S", 'AGGRESSIVE', 'CAMPAIGN', 'WITH', 'AN', 'EQUAL', 'AGGRESSIVENESS', 'AND', 'SPEND', 'AS', 'MANY', 'DOLLARS', 'IN', 'DOING', 'SO', 'AS', 'MIGHT', 'BE', 'NECESSARY'] +6829-68771-0000-1912: hyp=['SO', 'TO', 'THE', 'SURPRISE', 'OF', 'THE', 'DEMOCRATIC', 'COMMITTEE', 'AND', 'ALL', 'HIS', 'FRIENDS', 'MISTER', 'HOPKINS', 'ANNOUNCED', 'THAT', 'HE', 'WOULD', 'OPPOSE', 'FORCE', 'AGGRESSIVE', 'CAMPAIGN', 'WITH', 'AN', 'EQUAL', 'AGGRESSIVENESS', 'AND', 'SPEND', 'AS', 'MANY', 'DOLLARS', 'IN', 'DOING', 'SO', 'AS', 'MIGHT', 'BE', 'NECESSARY'] +6829-68771-0001-1913: ref=['ONE', 'OF', 'MISTER', "HOPKINS'S", 'FIRST', 'TASKS', 'AFTER', 'CALLING', 'HIS', 'FAITHFUL', 'HENCHMEN', 'AROUND', 'HIM', 'WAS', 'TO', 'MAKE', 'A', 'CAREFUL', 'CANVASS', 'OF', 'THE', 'VOTERS', 'OF', 'HIS', 'DISTRICT', 'TO', 'SEE', 'WHAT', 'WAS', 'STILL', 'TO', 'BE', 'ACCOMPLISHED'] +6829-68771-0001-1913: hyp=['ONE', 'OF', 'MISTER', 'HOPKINS', 'FIRST', 'TASKS', 'AFTER', 'CALLING', 'HIS', 'FAITHFUL', 'HENCHMAN', 'AROUND', 'HIM', 'WAS', 'TO', 'MAKE', 'A', 'CAREFUL', 'CANVAS', 'OF', 'THE', 'VOTERS', 'OF', 'HIS', 'DISTRICT', 'TO', 'SEE', 'WHAT', 'WAS', 'STILL', 'TO', 'BE', 'ACCOMPLISHED'] +6829-68771-0002-1914: ref=['THE', 'WEAK', 'KNEED', 'CONTINGENCY', 'MUST', 'BE', 'STRENGTHENED', 'AND', 'FORTIFIED', 'AND', 'A', 'COUPLE', 'OF', 'HUNDRED', 'VOTES', 'IN', 'ONE', 'WAY', 'OR', 'ANOTHER', 'SECURED', 'FROM', 'THE', 'OPPOSITION'] +6829-68771-0002-1914: hyp=['THE', 'WEAK', 'NEED', 'CONTINGENCY', 'MUST', 'BE', 'STRENGTHENED', 'AND', 'FORTIFIED', 'AND', 'A', 'COUPLE', 'OF', 'HUNDRED', 'VOTES', 'IN', 'ONE', 'WAY', 'OR', 'THE', 'OTHER', 'SECURED', 'FROM', 'THE', 'OPPOSITION'] +6829-68771-0003-1915: ref=['THE', 'DEMOCRATIC', 'COMMITTEE', 'FIGURED', 'OUT', 'A', 'WAY', 'TO', 'DO', 'THIS'] +6829-68771-0003-1915: hyp=['THE', 'DEMOCRATIC', 'COMMITTEE', 'FIGURED', 'OUT', 'A', 'WAY', 'TO', 'DO', 'THIS'] +6829-68771-0004-1916: ref=['UNDER', 'ORDINARY', 'CONDITIONS', 'REYNOLDS', 'WAS', 'SURE', 'TO', 'BE', 'ELECTED', 'BUT', 'THE', 'COMMITTEE', 'PROPOSED', 'TO', 'SACRIFICE', 'HIM', 'IN', 'ORDER', 'TO', 'ELECT', 'HOPKINS'] +6829-68771-0004-1916: hyp=['UNDER', 'ORDINARY', 'CONDITIONS', 'REYNOLDS', 'WAS', 'SURE', 'TO', 'BE', 'ELECTED', 'BUT', 'THE', 'COMMITTEE', 'PROPOSED', 'TO', 'SACRIFICE', 'HIM', 'IN', 'ORDER', 'TO', 'ELECT', 'HOPKINS'] +6829-68771-0005-1917: ref=['THE', 'ONLY', 'THING', 'NECESSARY', 'WAS', 'TO', 'FIX', 'SETH', 'REYNOLDS', 'AND', 'THIS', 'HOPKINS', 'ARRANGED', 'PERSONALLY'] +6829-68771-0005-1917: hyp=['THE', 'ONLY', 'THING', 'NECESSARY', 'WAS', 'TO', 'FIX', 'SETH', 'REYNOLDS', 'AND', 'THIS', 'HOPKINS', 'ARRANGED', 'PERSONALLY'] +6829-68771-0006-1918: ref=['AND', 'THIS', 'WAS', 'WHY', 'KENNETH', 'AND', 'BETH', 'DISCOVERED', 'HIM', 'CONVERSING', 'WITH', 'THE', 'YOUNG', 'WOMAN', 'IN', 'THE', 'BUGGY'] +6829-68771-0006-1918: hyp=['AND', 'THIS', 'WAS', 'WHY', 'KENNETH', 'AND', 'BETH', 'DISCOVERED', 'HIM', 'CONVERSING', 'WITH', 'THE', 'YOUNG', 'WOMAN', 'IN', 'THE', 'BUGGY'] +6829-68771-0007-1919: ref=['THE', 'DESCRIPTION', 'SHE', 'GAVE', 'OF', 'THE', 'COMING', 'RECEPTION', 'TO', 'THE', "WOMAN'S", 'POLITICAL', 'LEAGUE', 'WAS', 'SO', 'HUMOROUS', 'AND', 'DIVERTING', 'THAT', 'THEY', 'WERE', 'BOTH', 'LAUGHING', 'HEARTILY', 'OVER', 'THE', 'THING', 'WHEN', 'THE', 'YOUNG', 'PEOPLE', 'PASSED', 'THEM', 'AND', 'THUS', 'MISTER', 'HOPKINS', 'FAILED', 'TO', 'NOTICE', 'WHO', 'THE', 'OCCUPANTS', 'OF', 'THE', 'OTHER', 'VEHICLE', 'WERE'] +6829-68771-0007-1919: hyp=['THE', 'DESCRIPTION', 'SHE', 'GAVE', 'OF', 'THE', 'COMING', 'RECEPTION', 'TO', 'THE', "WOMEN'S", 'POLITICAL', 'LEAGUE', 'WAS', 'SO', 'HUMOROUS', 'AND', 'DIVERTING', 'THAT', 'THEY', 'WERE', 'BOTH', 'LAUGHING', 'HEARTILY', 'OVER', 'THE', 'THING', 'WHEN', 'THE', 'YOUNG', 'PEOPLE', 'PASSED', 'THEM', 'AND', 'THUS', 'MISTER', 'HOPKINS', 'FAILED', 'TO', 'NOTICE', 'WHO', 'THE', 'OCCUPANT', 'OF', 'THE', 'OTHER', 'VEHICLE', 'WERE'] +6829-68771-0008-1920: ref=['THESE', 'WOMEN', 'WERE', 'FLATTERED', 'BY', 'THE', 'ATTENTION', 'OF', 'THE', 'YOUNG', 'LADY', 'AND', 'HAD', 'PROMISED', 'TO', 'ASSIST', 'IN', 'ELECTING', 'MISTER', 'FORBES'] +6829-68771-0008-1920: hyp=['THESE', 'WOMEN', 'WERE', 'FLATTERED', 'BY', 'THE', 'ATTENTION', 'OF', 'THE', 'YOUNG', 'LADY', 'AND', 'HAD', 'PROMISED', 'TO', 'ASSIST', 'IN', 'ELECTING', 'MISTER', 'FORBES'] +6829-68771-0009-1921: ref=['LOUISE', 'HOPED', 'FOR', 'EXCELLENT', 'RESULTS', 'FROM', 'THIS', 'ORGANIZATION', 'AND', 'WISHED', 'THE', 'ENTERTAINMENT', 'TO', 'BE', 'SO', 'EFFECTIVE', 'IN', 'WINNING', 'THEIR', 'GOOD', 'WILL', 'THAT', 'THEY', 'WOULD', 'WORK', 'EARNESTLY', 'FOR', 'THE', 'CAUSE', 'IN', 'WHICH', 'THEY', 'WERE', 'ENLISTED'] +6829-68771-0009-1921: hyp=['LOUISE', 'HOPED', 'FOR', 'EXCELLENT', 'RESULTS', 'FROM', 'THIS', 'ORGANIZATION', 'AND', 'WISHED', 'THE', 'ENTERTAINMENT', 'TO', 'BE', 'SO', 'EFFECTIVE', 'IN', 'WINNING', 'THEIR', 'GOOD', 'WILL', 'THAT', 'THEY', 'WOULD', 'WORK', 'EARNESTLY', 'FOR', 'THE', 'CAUSE', 'IN', 'WHICH', 'THEY', 'WERE', 'ENLISTED'] +6829-68771-0010-1922: ref=['THE', 'FAIRVIEW', 'BAND', 'WAS', 'ENGAGED', 'TO', 'DISCOURSE', 'AS', 'MUCH', 'HARMONY', 'AS', 'IT', 'COULD', 'PRODUCE', 'AND', 'THE', 'RESOURCES', 'OF', 'THE', 'GREAT', 'HOUSE', 'WERE', 'TAXED', 'TO', 'ENTERTAIN', 'THE', 'GUESTS'] +6829-68771-0010-1922: hyp=['THE', 'FAIR', 'VIEW', 'BAND', 'WAS', 'ENGAGED', 'TO', 'DISCOURSE', 'AS', 'MUCH', 'HARMONY', 'AS', 'IT', 'COULD', 'PRODUCE', 'AND', 'THE', 'RESOURCES', 'OF', 'THE', 'GREAT', 'HOUSE', 'WERE', 'TAXED', 'TO', 'ENTERTAIN', 'THE', 'GUESTS'] +6829-68771-0011-1923: ref=['TABLES', 'WERE', 'SPREAD', 'ON', 'THE', 'LAWN', 'AND', 'A', 'DAINTY', 'BUT', 'SUBSTANTIAL', 'REPAST', 'WAS', 'TO', 'BE', 'SERVED'] +6829-68771-0011-1923: hyp=['TABLES', 'WERE', 'SPREAD', 'ON', 'THE', 'LAWN', 'AND', 'A', 'DAINTY', 'BUT', 'SUBSTANTIAL', 'REPAST', 'WAS', 'TO', 'BE', 'SERVED'] +6829-68771-0012-1924: ref=['THIS', 'WAS', 'THE', 'FIRST', 'OCCASION', 'WITHIN', 'A', 'GENERATION', 'WHEN', 'SUCH', 'AN', 'ENTERTAINMENT', 'HAD', 'BEEN', 'GIVEN', 'AT', 'ELMHURST', 'AND', 'THE', 'ONLY', 'ONE', 'WITHIN', 'THE', 'MEMORY', 'OF', 'MAN', 'WHERE', 'THE', 'NEIGHBORS', 'AND', 'COUNTRY', 'PEOPLE', 'HAD', 'BEEN', 'INVITED', 'GUESTS'] +6829-68771-0012-1924: hyp=['THIS', 'WAS', 'THE', 'FIRST', 'OCCASION', 'WITHIN', 'A', 'GENERATION', 'WHEN', 'SUCH', 'AN', 'ENTERTAINMENT', 'HAD', 'BEEN', 'GIVEN', 'AT', 'ELMHURST', 'AND', 'THE', 'ONLY', 'WHEN', 'WITHIN', 'THE', 'MEMORY', 'OF', 'MAN', 'WERE', 'THE', 'NEIGHBORS', 'AND', 'COUNTRY', 'PEOPLE', 'HAD', 'BEEN', 'THE', 'INVITED', 'GUESTS'] +6829-68771-0013-1925: ref=['THE', 'ATTENDANCE', 'WAS', 'UNEXPECTEDLY', 'LARGE', 'AND', 'THE', 'GIRLS', 'WERE', 'DELIGHTED', 'FORESEEING', 'GREAT', 'SUCCESS', 'FOR', 'THEIR', 'FETE'] +6829-68771-0013-1925: hyp=['THE', 'ATTENDANTS', 'WAS', 'UNEXPECTEDLY', 'LARGE', 'AND', 'THE', 'GIRLS', 'WERE', 'DELIGHTED', 'FORESEEING', 'GREAT', 'SUCCESS', 'FOR', 'THEIR', 'FIGHT'] +6829-68771-0014-1926: ref=['WE', 'OUGHT', 'TO', 'HAVE', 'MORE', 'ATTENDANTS', 'BETH', 'SAID', 'LOUISE', 'APPROACHING', 'HER', 'COUSIN'] +6829-68771-0014-1926: hyp=['WE', 'OUGHT', 'TO', 'HAVE', 'MORE', 'ATTENDANCE', 'BETH', 'SAID', 'LOUISE', 'APPROACHING', 'HER', 'COUSIN'] +6829-68771-0015-1927: ref=["WON'T", 'YOU', 'RUN', 'INTO', 'THE', 'HOUSE', 'AND', 'SEE', 'IF', 'MARTHA', "CAN'T", 'SPARE', 'ONE', 'OR', 'TWO', 'MORE', 'MAIDS'] +6829-68771-0015-1927: hyp=["WON'T", 'YOU', 'RUN', 'INTO', 'THE', 'HOUSE', 'AND', 'SEE', 'IF', 'MARTHA', "CAN'T", 'SPARE', 'ONE', 'OR', 'TWO', 'MORE', 'MATES'] +6829-68771-0016-1928: ref=['SHE', 'WAS', 'VERY', 'FOND', 'OF', 'THE', 'YOUNG', 'LADIES', 'WHOM', 'SHE', 'HAD', 'KNOWN', 'WHEN', 'AUNT', 'JANE', 'WAS', 'THE', 'MISTRESS', 'HERE', 'AND', 'BETH', 'WAS', 'HER', 'ESPECIAL', 'FAVORITE'] +6829-68771-0016-1928: hyp=['SHE', 'WAS', 'VERY', 'FOND', 'OF', 'THE', 'YOUNG', 'LADIES', 'WHOM', 'SHE', 'HAD', 'KNOWN', 'WHEN', 'AUNT', 'JANE', 'WAS', 'THEIR', 'MISTRESS', 'HERE', 'AND', 'BETH', 'WAS', 'HER', 'SPECIAL', 'FAVORITE'] +6829-68771-0017-1929: ref=['THE', 'HOUSEKEEPER', 'LED', 'THE', 'WAY', 'AND', 'BETH', 'FOLLOWED'] +6829-68771-0017-1929: hyp=['THE', 'HOUSEKEEPER', 'LED', 'THE', 'WAY', 'IN', 'BETH', 'FOLLOWED'] +6829-68771-0018-1930: ref=['FOR', 'A', 'MOMENT', 'BETH', 'STOOD', 'STARING', 'WHILE', 'THE', 'NEW', 'MAID', 'REGARDED', 'HER', 'WITH', 'COMPOSURE', 'AND', 'A', 'SLIGHT', 'SMILE', 'UPON', 'HER', 'BEAUTIFUL', 'FACE'] +6829-68771-0018-1930: hyp=['FOR', 'A', 'MOMENT', 'BETH', 'STOOD', 'STARING', 'WHILE', 'THE', 'NEW', 'MAID', 'REGARDED', 'HER', 'WITH', 'COMPOSURE', 'AND', 'OF', 'SLIGHT', 'SMILE', 'UPON', 'HER', 'BEAUTIFUL', 'FACE'] +6829-68771-0019-1931: ref=['SHE', 'WAS', 'DRESSED', 'IN', 'THE', 'REGULATION', 'COSTUME', 'OF', 'THE', 'MAIDS', 'AT', 'ELMHURST', 'A', 'PLAIN', 'BLACK', 'GOWN', 'WITH', 'WHITE', 'APRON', 'AND', 'CAP'] +6829-68771-0019-1931: hyp=['SHE', 'WAS', 'DRESSED', 'IN', 'THE', 'REGULATION', 'COSTUME', 'OF', 'THE', 'MAIDS', 'AT', 'ELMHURST', 'A', 'PLAYING', 'BLACK', 'GOWN', 'WITH', 'A', 'WHITE', 'APRON', 'AND', 'CAP'] +6829-68771-0020-1932: ref=['THEN', 'SHE', 'GAVE', 'A', 'LITTLE', 'LAUGH', 'AND', 'REPLIED', 'NO', 'MISS', 'BETH', "I'M", 'ELIZABETH', 'PARSONS'] +6829-68771-0020-1932: hyp=['THEN', 'SHE', 'GAVE', 'A', 'LITTLE', 'LAUGH', 'AND', 'REPLIED', 'NO', 'MISS', 'BETH', "I'M", 'ELIZABETH', "PARSON'S"] +6829-68771-0021-1933: ref=['BUT', 'IT', "CAN'T", 'BE', 'PROTESTED', 'THE', 'GIRL'] +6829-68771-0021-1933: hyp=['BUT', 'IT', "CAN'T", 'BE', 'PROTESTED', 'THE', 'GIRL'] +6829-68771-0022-1934: ref=['I', 'ATTEND', 'TO', 'THE', 'HOUSEHOLD', 'MENDING', 'YOU', 'KNOW', 'AND', 'CARE', 'FOR', 'THE', 'LINEN'] +6829-68771-0022-1934: hyp=['I', 'ATTEND', 'TO', 'THE', 'HOUSEHOLD', 'MENDING', 'YOU', 'KNOW', 'AND', 'CARE', 'FOR', 'THE', 'LINEN'] +6829-68771-0023-1935: ref=['YOU', 'SPEAK', 'LIKE', 'AN', 'EDUCATED', 'PERSON', 'SAID', 'BETH', 'WONDERINGLY', 'WHERE', 'IS', 'YOUR', 'HOME'] +6829-68771-0023-1935: hyp=['YOU', 'SPEAK', 'LIKE', 'AN', 'EDUCATED', 'PERSON', 'SAID', 'BETH', 'WONDERINGLY', 'WHERE', 'IS', 'YOUR', 'HOME'] +6829-68771-0024-1936: ref=['FOR', 'THE', 'FIRST', 'TIME', 'THE', 'MAID', 'SEEMED', 'A', 'LITTLE', 'CONFUSED', 'AND', 'HER', 'GAZE', 'WANDERED', 'FROM', 'THE', 'FACE', 'OF', 'HER', 'VISITOR'] +6829-68771-0024-1936: hyp=['FOR', 'THE', 'FIRST', 'TIME', 'THE', 'MAID', 'SEEMED', 'A', 'LITTLE', 'CONFUSED', 'AND', 'HER', 'GAZE', 'WANDERED', 'FROM', 'THE', 'FACE', 'OF', 'HER', 'VISITOR'] +6829-68771-0025-1937: ref=['SHE', 'SAT', 'DOWN', 'IN', 'A', 'ROCKING', 'CHAIR', 'AND', 'CLASPING', 'HER', 'HANDS', 'IN', 'HER', 'LAP', 'ROCKED', 'SLOWLY', 'BACK', 'AND', 'FORTH', "I'M", 'SORRY', 'SAID', 'BETH'] +6829-68771-0025-1937: hyp=['SHE', 'SAT', 'DOWN', 'IN', 'A', 'ROCKING', 'CHAIR', 'AND', 'CLASPING', 'HER', 'HANDS', 'IN', 'HER', 'LAP', 'ROCK', 'SLOWLY', 'BACK', 'AND', 'FORTH', "I'M", 'SORRY', 'SAID', 'BETH'] +6829-68771-0026-1938: ref=['ELIZA', 'PARSONS', 'SHOOK', 'HER', 'HEAD'] +6829-68771-0026-1938: hyp=['ELIZA', 'PARSON', 'SHOOK', 'HER', 'HEAD'] +6829-68771-0027-1939: ref=['THEY', 'THEY', 'EXCITE', 'ME', 'IN', 'SOME', 'WAY', 'AND', 'I', 'I', "CAN'T", 'BEAR', 'THEM', 'YOU', 'MUST', 'EXCUSE', 'ME'] +6829-68771-0027-1939: hyp=['FATE', 'THEY', 'EXCITE', 'ME', 'IN', 'SOME', 'WAY', 'AND', 'I', 'I', "CAN'T", 'BEAR', 'THEM', 'YOU', 'MUST', 'EXCUSE', 'ME'] +6829-68771-0028-1940: ref=['SHE', 'EVEN', 'SEEMED', 'MILDLY', 'AMUSED', 'AT', 'THE', 'ATTENTION', 'SHE', 'ATTRACTED'] +6829-68771-0028-1940: hyp=['SHE', 'EVEN', 'SEEMED', 'MILDLY', 'AMUSED', 'AT', 'THE', 'ATTENTION', 'SHE', 'ATTRACTED'] +6829-68771-0029-1941: ref=['BETH', 'WAS', 'A', 'BEAUTIFUL', 'GIRL', 'THE', 'HANDSOMEST', 'OF', 'THE', 'THREE', 'COUSINS', 'BY', 'FAR', 'YET', 'ELIZA', 'SURPASSED', 'HER', 'IN', 'NATURAL', 'CHARM', 'AND', 'SEEMED', 'WELL', 'AWARE', 'OF', 'THE', 'FACT'] +6829-68771-0029-1941: hyp=['BETH', 'WAS', 'A', 'BEAUTIFUL', 'GIRL', 'THE', 'HANDSOMEST', 'OF', 'THE', 'THREE', 'COUSINS', 'BY', 'FAR', 'YET', 'ELIZA', 'SURPASSED', 'HER', 'A', 'NATURAL', 'CHARM', 'AND', 'SEEMED', 'WELL', 'AWARE', 'OF', 'THE', 'FACT'] +6829-68771-0030-1942: ref=['HER', 'MANNER', 'WAS', 'NEITHER', 'INDEPENDENT', 'NOR', 'ASSERTIVE', 'BUT', 'RATHER', 'ONE', 'OF', 'WELL', 'BRED', 'COMPOSURE', 'AND', 'CALM', 'RELIANCE'] +6829-68771-0030-1942: hyp=['HER', 'MANNER', 'WAS', 'NEITHER', 'INDEPENDENT', 'NOR', 'ASSERTIVE', 'BUT', 'RATHER', 'ONE', 'OF', 'WELL', 'BRED', 'COMPOSURE', 'AND', 'CALM', 'RELIANCE'] +6829-68771-0031-1943: ref=['HER', 'EYES', 'WANDERED', 'TO', 'THE', "MAID'S", 'HANDS'] +6829-68771-0031-1943: hyp=['HER', 'EYES', 'WANDERED', 'TO', 'THE', "MAID'S", 'HANDS'] +6829-68771-0032-1944: ref=['HOWEVER', 'HER', 'FEATURES', 'AND', 'FORM', 'MIGHT', 'REPRESS', 'ANY', 'EVIDENCE', 'OF', 'NERVOUSNESS', 'THESE', 'HANDS', 'TOLD', 'A', 'DIFFERENT', 'STORY'] +6829-68771-0032-1944: hyp=['HOWEVER', 'HER', 'FEATURES', 'INFORM', 'MIGHT', 'REPRESS', 'ANY', 'EVIDENCE', 'OF', 'NERVOUSNESS', 'THESE', 'HANDS', 'TOLD', 'A', 'DIFFERENT', 'STORY'] +6829-68771-0033-1945: ref=['SHE', 'ROSE', 'QUICKLY', 'TO', 'HER', 'FEET', 'WITH', 'AN', 'IMPETUOUS', 'GESTURE', 'THAT', 'MADE', 'HER', 'VISITOR', 'CATCH', 'HER', 'BREATH'] +6829-68771-0033-1945: hyp=['SHE', 'ROSE', 'QUICKLY', 'TO', 'HER', 'FEET', 'WITH', 'AN', 'IMPETUOUS', 'GESTURE', 'THAT', 'MADE', 'HER', 'VISITOR', 'CATCH', 'HER', 'BREATH'] +6829-68771-0034-1946: ref=['I', 'WISH', 'I', 'KNEW', 'MYSELF', 'SHE', 'CRIED', 'FIERCELY'] +6829-68771-0034-1946: hyp=['I', 'WISH', 'I', 'KNEW', 'MYSELF', 'SHE', 'CRIED', 'FIERCELY'] +6829-68771-0035-1947: ref=['WILL', 'YOU', 'LEAVE', 'ME', 'ALONE', 'IN', 'MY', 'OWN', 'ROOM', 'OR', 'MUST', 'I', 'GO', 'AWAY', 'TO', 'ESCAPE', 'YOU'] +6829-68771-0035-1947: hyp=['WILL', 'YOU', 'LEAVE', 'ME', 'ALONE', 'IN', 'MY', 'OWN', 'ROOM', 'OR', 'MUST', 'I', 'GO', 'AWAY', 'TO', 'ESCAPE', 'YOU'] +6829-68771-0036-1948: ref=['ELIZA', 'CLOSED', 'THE', 'DOOR', 'BEHIND', 'HER', 'WITH', 'A', 'DECIDED', 'SLAM', 'AND', 'A', 'KEY', 'CLICKED', 'IN', 'THE', 'LOCK'] +6829-68771-0036-1948: hyp=['ELIZA', 'CLOSED', 'THE', 'DOOR', 'BEHIND', 'HER', 'WITH', 'A', 'DECIDED', 'SLAM', 'AND', 'A', 'KEY', 'CLICKED', 'IN', 'THE', 'LOCK'] +6930-75918-0000-0: ref=['CONCORD', 'RETURNED', 'TO', 'ITS', 'PLACE', 'AMIDST', 'THE', 'TENTS'] +6930-75918-0000-0: hyp=['CONCORD', 'RETURNED', 'TO', 'ITS', 'PLACE', 'AMIDST', 'THE', 'TENTS'] +6930-75918-0001-1: ref=['THE', 'ENGLISH', 'FORWARDED', 'TO', 'THE', 'FRENCH', 'BASKETS', 'OF', 'FLOWERS', 'OF', 'WHICH', 'THEY', 'HAD', 'MADE', 'A', 'PLENTIFUL', 'PROVISION', 'TO', 'GREET', 'THE', 'ARRIVAL', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'THE', 'FRENCH', 'IN', 'RETURN', 'INVITED', 'THE', 'ENGLISH', 'TO', 'A', 'SUPPER', 'WHICH', 'WAS', 'TO', 'BE', 'GIVEN', 'THE', 'NEXT', 'DAY'] +6930-75918-0001-1: hyp=['THE', 'ENGLISH', 'FOOTED', 'TO', 'THE', 'FRENCH', 'BASKETS', 'OF', 'FLOWERS', 'OF', 'WHICH', 'THEY', 'HAD', 'MADE', 'A', 'PLENTIFUL', 'PROVISION', 'TO', 'GREET', 'THE', 'ARRIVAL', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'THE', 'FRENCH', 'IN', 'RETURN', 'INVITED', 'THE', 'ENGLISH', 'TO', 'A', 'SUPPER', 'WHICH', 'WAS', 'TO', 'BE', 'GIVEN', 'THE', 'NEXT', 'DAY'] +6930-75918-0002-2: ref=['CONGRATULATIONS', 'WERE', 'POURED', 'IN', 'UPON', 'THE', 'PRINCESS', 'EVERYWHERE', 'DURING', 'HER', 'JOURNEY'] +6930-75918-0002-2: hyp=['CONGRATULATIONS', 'WERE', 'POURED', 'IN', 'UPON', 'THE', 'PRINCESS', 'EVERYWHERE', 'DURING', 'HER', 'JOURNEY'] +6930-75918-0003-3: ref=['FROM', 'THE', 'RESPECT', 'PAID', 'HER', 'ON', 'ALL', 'SIDES', 'SHE', 'SEEMED', 'LIKE', 'A', 'QUEEN', 'AND', 'FROM', 'THE', 'ADORATION', 'WITH', 'WHICH', 'SHE', 'WAS', 'TREATED', 'BY', 'TWO', 'OR', 'THREE', 'SHE', 'APPEARED', 'AN', 'OBJECT', 'OF', 'WORSHIP', 'THE', 'QUEEN', 'MOTHER', 'GAVE', 'THE', 'FRENCH', 'THE', 'MOST', 'AFFECTIONATE', 'RECEPTION', 'FRANCE', 'WAS', 'HER', 'NATIVE', 'COUNTRY', 'AND', 'SHE', 'HAD', 'SUFFERED', 'TOO', 'MUCH', 'UNHAPPINESS', 'IN', 'ENGLAND', 'FOR', 'ENGLAND', 'TO', 'HAVE', 'MADE', 'HER', 'FORGET', 'FRANCE'] +6930-75918-0003-3: hyp=['FROM', 'THE', 'RESPECT', 'PAID', 'HER', 'ON', 'ALL', 'SIDES', 'SHE', 'SEEMED', 'LIKE', 'A', 'QUEEN', 'AND', 'FROM', 'THE', 'ADORATION', 'WITH', 'WHICH', 'SHE', 'WAS', 'TREATED', 'BY', 'TWO', 'OR', 'THREE', 'SHE', 'APPEARED', 'AN', 'OBJECT', 'OF', 'WORSHIP', 'THE', 'QUEEN', 'MOTHER', 'GAVE', 'THE', 'FRENCH', 'THE', 'MOST', 'AFFECTIONATE', 'RECEPTION', 'FRANCE', 'WAS', 'HER', 'NATIVE', 'COUNTRY', 'AND', 'SHE', 'HAD', 'SUFFERED', 'TOO', 'MUCH', 'UNHAPPINESS', 'IN', 'ENGLAND', 'FOR', 'ENGLAND', 'TO', 'HAVE', 'MADE', 'HER', 'FORGET', 'FRANCE'] +6930-75918-0004-4: ref=['SHE', 'TAUGHT', 'HER', 'DAUGHTER', 'THEN', 'BY', 'HER', 'OWN', 'AFFECTION', 'FOR', 'IT', 'THAT', 'LOVE', 'FOR', 'A', 'COUNTRY', 'WHERE', 'THEY', 'HAD', 'BOTH', 'BEEN', 'HOSPITABLY', 'RECEIVED', 'AND', 'WHERE', 'A', 'BRILLIANT', 'FUTURE', 'OPENED', 'BEFORE', 'THEM'] +6930-75918-0004-4: hyp=['SHE', 'TAUGHT', 'HER', 'DAUGHTER', 'THEN', 'BY', 'HER', 'OWN', 'AFFECTION', 'FOR', 'IT', 'THAT', 'LOVE', 'FOR', 'A', 'COUNTRY', 'WHERE', 'THEY', 'HAD', 'BOTH', 'BEEN', 'HOSPITABLY', 'RECEIVED', 'AND', 'WHERE', 'A', 'BRILLIANT', 'FUTURE', 'OPENED', 'FOR', 'THEM'] +6930-75918-0005-5: ref=['THE', 'COUNT', 'HAD', 'THROWN', 'HIMSELF', 'BACK', 'ON', 'HIS', 'SEAT', 'LEANING', 'HIS', 'SHOULDERS', 'AGAINST', 'THE', 'PARTITION', 'OF', 'THE', 'TENT', 'AND', 'REMAINED', 'THUS', 'HIS', 'FACE', 'BURIED', 'IN', 'HIS', 'HANDS', 'WITH', 'HEAVING', 'CHEST', 'AND', 'RESTLESS', 'LIMBS'] +6930-75918-0005-5: hyp=['THE', 'COUNT', 'HAD', 'THROWN', 'HIMSELF', 'BACK', 'ON', 'HIS', 'SEAT', 'LEANING', 'HIS', 'SHOULDERS', 'AGAINST', 'THE', 'PARTITION', 'OF', 'THE', 'TENT', 'AND', 'REMAINED', 'THUS', 'HIS', 'FACE', 'BURIED', 'IN', 'HIS', 'HANDS', 'WITH', 'HEAVING', 'CHEST', 'AND', 'RESTLESS', 'LIMBS'] +6930-75918-0006-6: ref=['THIS', 'HAS', 'INDEED', 'BEEN', 'A', 'HARASSING', 'DAY', 'CONTINUED', 'THE', 'YOUNG', 'MAN', 'HIS', 'EYES', 'FIXED', 'UPON', 'HIS', 'FRIEND'] +6930-75918-0006-6: hyp=['THIS', 'HAS', 'INDEED', 'BEEN', 'A', 'HARASSING', 'DAY', 'CONTINUED', 'THE', 'YOUNG', 'MAN', 'HIS', 'EYES', 'FIXED', 'UPON', 'HIS', 'FRIEND'] +6930-75918-0007-7: ref=['YOU', 'WILL', 'BE', 'FRANK', 'WITH', 'ME', 'I', 'ALWAYS', 'AM'] +6930-75918-0007-7: hyp=['YOU', 'WILL', 'BE', 'FRANK', 'WITH', 'ME', 'I', 'ALWAYS', 'AM'] +6930-75918-0008-8: ref=['CAN', 'YOU', 'IMAGINE', 'WHY', 'BUCKINGHAM', 'HAS', 'BEEN', 'SO', 'VIOLENT', 'I', 'SUSPECT'] +6930-75918-0008-8: hyp=['CAN', 'YOU', 'IMAGINE', 'MY', 'BUCKINGHAM', 'HAS', 'BEEN', 'SO', 'VIOLENT', 'I', 'SUSPECT'] +6930-75918-0009-9: ref=['IT', 'IS', 'YOU', 'WHO', 'ARE', 'MISTAKEN', 'RAOUL', 'I', 'HAVE', 'READ', 'HIS', 'DISTRESS', 'IN', 'HIS', 'EYES', 'IN', 'HIS', 'EVERY', 'GESTURE', 'AND', 'ACTION', 'THE', 'WHOLE', 'DAY'] +6930-75918-0009-9: hyp=['IT', 'IS', 'YOU', 'WHO', 'ARE', 'MISTAKEN', 'RAOUL', 'I', 'HAVE', 'READ', 'HIS', 'DISTRESS', 'IN', 'HIS', 'EYES', 'IN', 'HIS', 'EVERY', 'GESTURE', 'AND', 'ACTION', 'THE', 'WHOLE', 'DAY'] +6930-75918-0010-10: ref=['I', 'CAN', 'PERCEIVE', 'LOVE', 'CLEARLY', 'ENOUGH'] +6930-75918-0010-10: hyp=['I', 'CAN', 'PERCEIVE', 'LOVE', 'CLEARLY', 'ENOUGH'] +6930-75918-0011-11: ref=['I', 'AM', 'CONVINCED', 'OF', 'WHAT', 'I', 'SAY', 'SAID', 'THE', 'COUNT'] +6930-75918-0011-11: hyp=['I', 'AM', 'CONVINCED', 'OF', 'WHAT', 'I', 'SAY', 'SAID', 'THE', 'COUNT'] +6930-75918-0012-12: ref=['IT', 'IS', 'ANNOYANCE', 'THEN'] +6930-75918-0012-12: hyp=['IT', 'IS', 'ANNOYANCE', 'THEN'] +6930-75918-0013-13: ref=['IN', 'THOSE', 'VERY', 'TERMS', 'I', 'EVEN', 'ADDED', 'MORE'] +6930-75918-0013-13: hyp=['IN', 'THOSE', 'VERY', 'TERMS', 'I', 'EVEN', 'ADDED', 'MORE'] +6930-75918-0014-14: ref=['BUT', 'CONTINUED', 'RAOUL', 'NOT', 'INTERRUPTED', 'BY', 'THIS', 'MOVEMENT', 'OF', 'HIS', 'FRIEND', 'HEAVEN', 'BE', 'PRAISED', 'THE', 'FRENCH', 'WHO', 'ARE', 'PRONOUNCED', 'TO', 'BE', 'THOUGHTLESS', 'AND', 'INDISCREET', 'RECKLESS', 'EVEN', 'ARE', 'CAPABLE', 'OF', 'BRINGING', 'A', 'CALM', 'AND', 'SOUND', 'JUDGMENT', 'TO', 'BEAR', 'ON', 'MATTERS', 'OF', 'SUCH', 'HIGH', 'IMPORTANCE'] +6930-75918-0014-14: hyp=['BUT', 'CONTINUED', 'RAOUL', 'NOT', 'INTERRUPTED', 'BY', 'THIS', 'MOVEMENT', 'OF', 'HIS', 'FRIEND', 'HEAVEN', 'BE', 'PRAISED', 'THE', 'FRENCH', 'WHO', 'ARE', 'PRONOUNCED', 'TO', 'BE', 'THOUGHTLESS', 'AND', 'INDISCREET', 'RECKLESS', 'EVEN', 'ARE', 'CAPABLE', 'OF', 'BRINGING', 'A', 'CALM', 'AND', 'SOUND', 'JUDGMENT', 'TO', 'BEAR', 'ON', 'MATTERS', 'OF', 'SUCH', 'HIGH', 'IMPORTANCE'] +6930-75918-0015-15: ref=['THUS', 'IT', 'IS', 'THAT', 'THE', 'HONOR', 'OF', 'THREE', 'IS', 'SAVED', 'OUR', "COUNTRY'S", 'OUR', "MASTER'S", 'AND', 'OUR', 'OWN'] +6930-75918-0015-15: hyp=['THUS', 'IT', 'IS', 'THAT', 'THE', 'HONOR', 'OF', 'THREE', 'IS', 'SAVED', 'OUR', 'COUNTRY', 'OUR', 'MASTERS', 'AND', 'OUR', 'OWN'] +6930-75918-0016-16: ref=['YES', 'I', 'NEED', 'REPOSE', 'MANY', 'THINGS', 'HAVE', 'AGITATED', 'ME', 'TO', 'DAY', 'BOTH', 'IN', 'MIND', 'AND', 'BODY', 'WHEN', 'YOU', 'RETURN', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'THE', 'SAME', 'MAN'] +6930-75918-0016-16: hyp=['YES', 'I', 'NEED', 'REPOSE', 'MANY', 'THINGS', 'HAVE', 'AGITATED', 'ME', 'TO', 'DAY', 'BOTH', 'IN', 'MIND', 'AND', 'BODY', 'WHEN', 'YOU', 'RETURN', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'THE', 'SAME', 'MAN'] +6930-75918-0017-17: ref=['BUT', 'IN', 'THIS', 'FRIENDLY', 'PRESSURE', 'RAOUL', 'COULD', 'DETECT', 'THE', 'NERVOUS', 'AGITATION', 'OF', 'A', 'GREAT', 'INTERNAL', 'CONFLICT'] +6930-75918-0017-17: hyp=['BUT', 'IN', 'THIS', 'FRIENDLY', 'PRESSURE', 'RAOUL', 'COULD', 'DETECT', 'THE', 'NERVOUS', 'AGITATION', 'OF', 'A', 'GREAT', 'INTERNAL', 'CONFLICT'] +6930-75918-0018-18: ref=['THE', 'NIGHT', 'WAS', 'CLEAR', 'STARLIT', 'AND', 'SPLENDID', 'THE', 'TEMPEST', 'HAD', 'PASSED', 'AWAY', 'AND', 'THE', 'SWEET', 'INFLUENCES', 'OF', 'THE', 'EVENING', 'HAD', 'RESTORED', 'LIFE', 'PEACE', 'AND', 'SECURITY', 'EVERYWHERE'] +6930-75918-0018-18: hyp=['THE', 'NIGHT', 'WAS', 'CLEAR', 'STARLIT', 'AND', 'SPLENDID', 'THE', 'TEMPEST', 'HAD', 'PASSED', 'AWAY', 'AND', 'THE', 'SWEET', 'INFLUENCES', 'OF', 'THE', 'EVENING', 'HAD', 'RESTORED', 'LIFE', 'PEACE', 'AND', 'SECURITY', 'EVERYWHERE'] +6930-75918-0019-19: ref=['UPON', 'THE', 'LARGE', 'SQUARE', 'IN', 'FRONT', 'OF', 'THE', 'HOTEL', 'THE', 'SHADOWS', 'OF', 'THE', 'TENTS', 'INTERSECTED', 'BY', 'THE', 'GOLDEN', 'MOONBEAMS', 'FORMED', 'AS', 'IT', 'WERE', 'A', 'HUGE', 'MOSAIC', 'OF', 'JET', 'AND', 'YELLOW', 'FLAGSTONES'] +6930-75918-0019-19: hyp=['UPON', 'THE', 'LARGE', 'SQUARE', 'IN', 'FRONT', 'OF', 'THE', 'HOTEL', 'THE', 'SHADOWS', 'OF', 'THE', 'TENTS', 'INTERSECTED', 'BY', 'THE', 'GOLDEN', 'MOONBEAMS', 'FORMED', 'AS', 'IT', 'WERE', 'A', 'HUGE', 'MOSAIC', 'OF', 'JET', 'AND', 'YELLOW', 'FLAGSTONES'] +6930-75918-0020-20: ref=['BRAGELONNE', 'WATCHED', 'FOR', 'SOME', 'TIME', 'THE', 'CONDUCT', 'OF', 'THE', 'TWO', 'LOVERS', 'LISTENED', 'TO', 'THE', 'LOUD', 'AND', 'UNCIVIL', 'SLUMBERS', 'OF', 'MANICAMP', 'WHO', 'SNORED', 'AS', 'IMPERIOUSLY', 'AS', 'THOUGH', 'HE', 'WAS', 'WEARING', 'HIS', 'BLUE', 'AND', 'GOLD', 'INSTEAD', 'OF', 'HIS', 'VIOLET', 'SUIT'] +6930-75918-0020-20: hyp=['BRAGGLIN', 'WATCHED', 'FOR', 'SOME', 'TIME', 'THE', 'CONDUCT', 'OF', 'THE', 'TWO', 'LOVERS', 'LISTENED', 'TO', 'THE', 'LOUD', 'AND', 'UNCIVIL', 'SLUMBERS', 'OF', 'MANICAMP', 'WHO', 'SNORED', 'AS', 'IMPERIOUSLY', 'AS', 'THOUGH', 'HE', 'WAS', 'WEARING', 'HIS', 'BLUE', 'AND', 'GOLD', 'INSTEAD', 'OF', 'HIS', 'VIOLET', 'SUIT'] +6930-76324-0000-21: ref=['GOLIATH', 'MAKES', 'ANOTHER', 'DISCOVERY'] +6930-76324-0000-21: hyp=['GOLIATH', 'MAKES', 'ANOTHER', 'DISCOVERY'] +6930-76324-0001-22: ref=['THEY', 'WERE', 'CERTAINLY', 'NO', 'NEARER', 'THE', 'SOLUTION', 'OF', 'THEIR', 'PROBLEM'] +6930-76324-0001-22: hyp=['THERE', 'WERE', 'CERTAINLY', 'NO', 'NEAR', 'THE', 'SOLUTION', 'OF', 'THEIR', 'PROBLEM'] +6930-76324-0002-23: ref=['THE', 'POOR', 'LITTLE', 'THINGS', 'CRIED', 'CYNTHIA', 'THINK', 'OF', 'THEM', 'HAVING', 'BEEN', 'TURNED', 'TO', 'THE', 'WALL', 'ALL', 'THESE', 'YEARS'] +6930-76324-0002-23: hyp=['THE', 'POOR', 'LITTLE', 'THINGS', 'CRIED', 'CYNTHIA', 'THINK', 'OF', 'THEM', 'HAVING', 'BEEN', 'TURNED', 'TO', 'THE', 'WALL', 'ALL', 'THESE', 'YEARS'] +6930-76324-0003-24: ref=['NOW', 'WHAT', 'WAS', 'THE', 'SENSE', 'OF', 'IT', 'TWO', 'INNOCENT', 'BABIES', 'LIKE', 'THAT'] +6930-76324-0003-24: hyp=['NOW', 'WHAT', 'IS', 'THE', 'SENSE', 'OF', 'IT', 'TOO', 'INNOCENT', 'BABIES', 'LIKE', 'THAT'] +6930-76324-0004-25: ref=['BUT', 'JOYCE', 'HAD', 'NOT', 'BEEN', 'LISTENING', 'ALL', 'AT', 'ONCE', 'SHE', 'PUT', 'DOWN', 'HER', 'CANDLE', 'ON', 'THE', 'TABLE', 'AND', 'FACED', 'HER', 'COMPANION'] +6930-76324-0004-25: hyp=['BUT', 'JOYCE', 'HAD', 'NOT', 'BEEN', 'LISTENING', 'ALL', 'AT', 'ONCE', 'SHE', 'PUT', 'DOWN', 'HER', 'CANDLE', 'ON', 'THE', 'TABLE', 'AND', 'FACED', 'HER', 'COMPANION'] +6930-76324-0005-26: ref=['THE', 'TWIN', 'BROTHER', 'DID', 'SOMETHING', 'SHE', "DIDN'T", 'LIKE', 'AND', 'SHE', 'TURNED', 'HIS', 'PICTURE', 'TO', 'THE', 'WALL'] +6930-76324-0005-26: hyp=['THE', 'TWIN', 'BROTHER', 'DID', 'SOMETHING', 'SHE', "DIDN'T", 'LIKE', 'AND', 'SHE', 'TURNED', 'HIS', 'PICTURE', 'TO', 'THE', 'WALL'] +6930-76324-0006-27: ref=['HERS', 'HAPPENED', 'TO', 'BE', 'IN', 'THE', 'SAME', 'FRAME', 'TOO', 'BUT', 'SHE', 'EVIDENTLY', "DIDN'T", 'CARE', 'ABOUT', 'THAT'] +6930-76324-0006-27: hyp=['HERS', 'HAPPENED', 'TO', 'BE', 'ON', 'THE', 'SAME', 'FRAME', 'TOO', 'BUT', 'SHE', 'EVIDENTLY', "DIDN'T", 'CARE', 'ABOUT', 'IT'] +6930-76324-0007-28: ref=['NOW', 'WHAT', 'HAVE', 'YOU', 'TO', 'SAY', 'CYNTHIA', 'SPRAGUE'] +6930-76324-0007-28: hyp=['NOW', 'WHAT', 'HAVE', 'YOU', 'TO', 'SAY', 'CYNTHIA', 'SPRAGUE'] +6930-76324-0008-29: ref=['I', 'THOUGHT', 'WE', 'WERE', 'STUMPED', 'AGAIN', 'WHEN', 'I', 'FIRST', 'SAW', 'THAT', 'PICTURE', 'BUT', "IT'S", 'BEEN', 'OF', 'SOME', 'USE', 'AFTER', 'ALL'] +6930-76324-0008-29: hyp=['I', 'THOUGHT', 'WE', 'WERE', 'STUMPED', 'AGAIN', 'WHEN', 'I', 'FIRST', 'SAW', 'THAT', 'PICTURE', 'BUT', 'IT', 'SPIN', 'OF', 'SOME', 'USE', 'AFTER', 'ALL'] +6930-76324-0009-30: ref=['DO', 'YOU', 'SUPPOSE', 'THE', 'MINIATURE', 'WAS', 'A', 'COPY', 'OF', 'THE', 'SAME', 'THING'] +6930-76324-0009-30: hyp=['DO', 'YOU', 'SUPPOSE', 'THE', 'MINIATURE', 'WAS', 'A', 'COPY', 'OF', 'THE', 'SAME', 'THING'] +6930-76324-0010-31: ref=['WHAT', 'IN', 'THE', 'WORLD', 'IS', 'THAT', 'QUERIED', 'JOYCE'] +6930-76324-0010-31: hyp=['ONE', 'IN', 'THE', 'WORLD', 'IS', 'IT', 'QUERIED', 'JOYCE'] +6930-76324-0011-32: ref=['THEY', 'WORRY', 'ME', 'TERRIBLY', 'AND', 'BESIDES', "I'D", 'LIKE', 'TO', 'SEE', 'WHAT', 'THIS', 'LOVELY', 'FURNITURE', 'LOOKS', 'LIKE', 'WITHOUT', 'SUCH', 'QUANTITIES', 'OF', 'DUST', 'ALL', 'OVER', 'IT', 'GOOD', 'SCHEME', 'CYN'] +6930-76324-0011-32: hyp=['MAY', 'WORRY', 'ME', 'TERRIBLY', 'EMBICIDES', "I'D", 'LIKE', 'TO', 'SEE', 'WHAT', 'THIS', 'LOVELY', 'FURNITURE', 'LOOKS', 'LIKE', 'WITHOUT', 'SUCH', 'QUANTITIES', 'OF', 'DUST', 'ALL', 'OVER', 'IT', 'GOOD', 'SCHEME', 'SIN'] +6930-76324-0012-33: ref=["WE'LL", 'COME', 'IN', 'HERE', 'THIS', 'AFTERNOON', 'WITH', 'OLD', 'CLOTHES', 'ON', 'AND', 'HAVE', 'A', 'REGULAR', 'HOUSE', 'CLEANING'] +6930-76324-0012-33: hyp=['WILL', 'COME', 'AND', 'HERE', 'THIS', 'AFTERNOON', 'WITH', 'OLD', 'CLOTHES', 'ON', 'AND', 'HALF', 'A', 'REGULAR', 'HOUSE', 'CLEANING'] +6930-76324-0013-34: ref=['IT', "CAN'T", 'HURT', 'ANYTHING', "I'M", 'SURE', 'FOR', 'WE', "WON'T", 'DISTURB', 'THINGS', 'AT', 'ALL'] +6930-76324-0013-34: hyp=['YOU', "CAN'T", 'HURT', 'ANYTHING', "I'M", 'SURE', 'FOR', 'WE', "WON'T", 'DISTURB', 'THINGS', 'AT', 'ALL'] +6930-76324-0014-35: ref=['THIS', 'THOUGHT', 'HOWEVER', 'DID', 'NOT', 'ENTER', 'THE', 'HEADS', 'OF', 'THE', 'ENTHUSIASTIC', 'PAIR'] +6930-76324-0014-35: hyp=['THIS', 'THOUGHT', 'HOWEVER', 'DID', 'NOT', 'ENTER', 'THE', 'HEADS', 'OF', 'THE', 'ENTHUSIASTIC', 'PAIR'] +6930-76324-0015-36: ref=['SMUGGLING', 'THE', 'HOUSE', 'CLEANING', 'PARAPHERNALIA', 'INTO', 'THE', 'CELLAR', 'WINDOW', 'UNOBSERVED', 'THAT', 'AFTERNOON', 'PROVED', 'NO', 'EASY', 'TASK', 'FOR', 'CYNTHIA', 'HAD', 'ADDED', 'A', 'WHISK', 'BROOM', 'AND', 'DUST', 'PAN', 'TO', 'THE', 'OUTFIT'] +6930-76324-0015-36: hyp=['SMUGGLING', 'THE', 'HOUSE', 'CLEANING', 'PAIR', 'FERNALIA', 'INTO', 'THE', 'CELLAR', 'WINDOW', 'UNOBSERVED', 'THAT', 'AFTERNOON', 'PROVED', 'NO', 'EASY', 'TASK', 'FOR', 'CYNTHIA', 'HAD', 'ADDED', 'A', 'WHISK', 'BROOM', 'AND', 'DUST', 'PAN', 'TO', 'THE', 'OUTFIT'] +6930-76324-0016-37: ref=['THE', 'LURE', 'PROVED', 'TOO', 'MUCH', 'FOR', 'HIM', 'AND', 'HE', 'CAME', 'SPORTING', 'AFTER', 'IT', 'AS', 'FRISKILY', 'AS', 'A', 'YOUNG', 'KITTEN', 'MUCH', 'TO', "CYNTHIA'S", 'DELIGHT', 'WHEN', 'SHE', 'CAUGHT', 'SIGHT', 'OF', 'HIM'] +6930-76324-0016-37: hyp=['THE', 'LURE', 'PROVED', 'TOO', 'MUCH', 'FOR', 'HIM', 'AND', 'HE', 'CAME', 'SPORTING', 'AFTER', 'IT', 'AS', 'FRISKLY', 'AS', 'A', 'YOUNG', 'KITTEN', 'MUCH', 'TO', "CYNTHIA'S", 'DELIGHT', 'WHEN', 'SHE', 'CAUGHT', 'SIGHT', 'OF', 'HIM'] +6930-76324-0017-38: ref=['OH', 'LET', 'HIM', 'COME', 'ALONG', 'SHE', 'URGED', 'I', 'DO', 'LOVE', 'TO', 'SEE', 'HIM', 'ABOUT', 'THAT', 'OLD', 'HOUSE'] +6930-76324-0017-38: hyp=['OH', 'LET', 'HIM', 'COME', 'ALONG', 'SHE', 'URGED', 'I', 'DO', 'LOVE', 'TO', 'SEE', 'HIM', 'ABOUT', 'THAT', 'OLD', 'HOUSE'] +6930-76324-0018-39: ref=['HE', 'MAKES', 'IT', 'SORT', 'OF', 'COZIER'] +6930-76324-0018-39: hyp=['HE', 'MAKES', 'IT', 'SORT', 'OF', 'COSIER'] +6930-76324-0019-40: ref=['NOW', "LET'S", 'DUST', 'THE', 'FURNITURE', 'AND', 'PICTURES'] +6930-76324-0019-40: hyp=['NOW', 'ITS', 'DUST', 'THE', 'FURNITURE', 'AND', 'PICTURES'] +6930-76324-0020-41: ref=['YET', 'LITTLE', 'AS', 'IT', 'WAS', 'IT', 'HAD', 'ALREADY', 'MADE', 'A', 'VAST', 'DIFFERENCE', 'IN', 'THE', 'ASPECT', 'OF', 'THE', 'ROOM'] +6930-76324-0020-41: hyp=['YET', 'LITTLE', 'AS', 'IT', 'WAS', 'IT', 'HAD', 'ALREADY', 'MADE', 'A', 'VAST', 'DIFFERENCE', 'IN', 'THE', 'ASPECT', 'OF', 'THE', 'ROOM'] +6930-76324-0021-42: ref=['SURFACE', 'DUST', 'AT', 'LEAST', 'HAD', 'BEEN', 'REMOVED', 'AND', 'THE', 'FINE', 'OLD', 'FURNITURE', 'GAVE', 'A', 'HINT', 'OF', 'ITS', 'REAL', 'ELEGANCE', 'AND', 'POLISH'] +6930-76324-0021-42: hyp=['SURFACE', 'DUS', 'AT', 'LEAST', 'HAD', 'BEEN', 'REMOVED', 'AND', 'THE', 'FINE', 'OLD', 'FURNITURE', 'GAVE', 'A', 'HINT', 'OF', 'ITS', 'REAL', 'ELEGANCE', 'AND', 'POLISH'] +6930-76324-0022-43: ref=['THEN', 'SHE', 'SUDDENLY', 'REMARKED'] +6930-76324-0022-43: hyp=['THEN', 'SHE', 'SUDDENLY', 'REMARKED'] +6930-76324-0023-44: ref=['AND', 'MY', 'POCKET', 'MONEY', 'IS', 'GETTING', 'LOW', 'AGAIN', 'AND', 'YOU', "HAVEN'T", 'ANY', 'LEFT', 'AS', 'USUAL'] +6930-76324-0023-44: hyp=['AND', 'MY', 'POCKET', 'MONEY', 'IS', 'GETTING', 'LOW', 'AGAIN', 'AND', 'YOU', "HAVEN'T", 'ANY', 'LEFT', 'AS', 'USUAL'] +6930-76324-0024-45: ref=['THEY', 'SAY', 'ILLUMINATION', 'BY', 'CANDLE', 'LIGHT', 'IS', 'THE', 'PRETTIEST', 'IN', 'THE', 'WORLD'] +6930-76324-0024-45: hyp=['THEY', 'SAY', 'ILLUMINATION', 'BY', 'CANDLE', 'LIGHT', 'IS', 'THE', 'PRETTIEST', 'IN', 'THE', 'WORLD'] +6930-76324-0025-46: ref=['WHY', "IT'S", 'GOLIATH', 'AS', 'USUAL', 'THEY', 'BOTH', 'CRIED', 'PEERING', 'IN'] +6930-76324-0025-46: hyp=['WHY', 'IT', 'GOLIATH', 'AS', 'USUAL', 'THEY', 'BOTH', 'CRIED', 'PEERING', 'IN'] +6930-76324-0026-47: ref=["ISN'T", 'HE', 'THE', 'GREATEST', 'FOR', 'GETTING', 'INTO', 'ODD', 'CORNERS'] +6930-76324-0026-47: hyp=["ISN'T", 'HE', 'THE', 'GREATEST', 'FOR', 'GETTING', 'INTO', 'ODD', 'CORNERS'] +6930-76324-0027-48: ref=['FORGETTING', 'ALL', 'THEIR', 'WEARINESS', 'THEY', 'SEIZED', 'THEIR', 'CANDLES', 'AND', 'SCURRIED', 'THROUGH', 'THE', 'HOUSE', 'FINDING', 'AN', 'OCCASIONAL', 'PAPER', 'TUCKED', 'AWAY', 'IN', 'SOME', 'ODD', 'CORNER'] +6930-76324-0027-48: hyp=['FORGETTING', 'ALL', 'THEIR', 'WEARINESS', 'THEY', 'SEIZED', 'THEIR', 'CANDLES', 'AND', 'SCURRIED', 'THROUGH', 'THE', 'HOUSE', 'FINDING', 'ON', 'OCCASIONAL', 'PAPER', 'TUCKED', 'AWAY', 'IN', 'SOME', 'ODD', 'CORNER'] +6930-76324-0028-49: ref=['WELL', "I'M", 'CONVINCED', 'THAT', 'THE', 'BOARDED', 'UP', 'HOUSE', 'MYSTERY', 'HAPPENED', 'NOT', 'EARLIER', 'THAN', 'APRIL', 'SIXTEENTH', 'EIGHTEEN', 'SIXTY', 'ONE', 'AND', 'PROBABLY', 'NOT', 'MUCH', 'LATER'] +6930-76324-0028-49: hyp=['WELL', "I'M", 'CONVINCED', 'THAT', 'THE', 'BOARDED', 'UP', 'HOUSE', 'MYSTERY', 'HAPPENED', 'NOT', 'EARLIER', 'THAN', 'APRIL', 'SIXTEENTH', 'EIGHTEEN', 'SIXTY', 'ONE', 'AND', 'PROBABLY', 'NOT', 'MUCH', 'LATER'] +6930-81414-0000-50: ref=['NO', 'WORDS', 'WERE', 'SPOKEN', 'NO', 'LANGUAGE', 'WAS', 'UTTERED', 'SAVE', 'THAT', 'OF', 'WAILING', 'AND', 'HISSING', 'AND', 'THAT', 'SOMEHOW', 'WAS', 'INDISTINCT', 'AS', 'IF', 'IT', 'EXISTED', 'IN', 'FANCY', 'AND', 'NOT', 'IN', 'REALITY'] +6930-81414-0000-50: hyp=['NO', 'WORDS', 'WERE', 'SPOKEN', 'NO', 'LANGUAGE', 'WAS', 'UTTERED', 'SAVE', 'THAT', 'OF', 'WAILING', 'AND', 'HISSING', 'AND', 'THAT', 'SOMEHOW', 'WAS', 'INDISTINCT', 'AS', 'IF', 'IT', 'EXISTED', 'IN', 'FANCY', 'AND', 'NOT', 'IN', 'REALITY'] +6930-81414-0001-51: ref=['I', 'HEARD', 'A', 'NOISE', 'BEHIND', 'I', 'TURNED', 'AND', 'SAW', 'KAFFAR', 'HIS', 'BLACK', 'EYES', 'SHINING', 'WHILE', 'IN', 'HIS', 'HAND', 'HE', 'HELD', 'A', 'GLEAMING', 'KNIFE', 'HE', 'LIFTED', 'IT', 'ABOVE', 'HIS', 'HEAD', 'AS', 'IF', 'TO', 'STRIKE', 'BUT', 'I', 'HAD', 'THE', 'STRENGTH', 'OF', 'TEN', 'MEN', 'AND', 'I', 'HURLED', 'HIM', 'FROM', 'ME'] +6930-81414-0001-51: hyp=['I', 'HEARD', 'A', 'NOISE', 'BEHIND', 'I', 'TURNED', 'AND', 'SAW', 'KAFFIR', 'HIS', 'BLACK', 'EYES', 'SHINING', 'WHILE', 'IN', 'HIS', 'HAND', 'HE', 'HELD', 'A', 'GLEAMING', 'KNIFE', 'HE', 'LIFTED', 'IT', 'ABOVE', 'HIS', 'HEAD', 'AS', 'IF', 'TO', 'STRIKE', 'BUT', 'I', 'HAD', 'THE', 'STRENGTH', 'OF', 'TEN', 'MEN', 'AND', 'I', 'HURLED', 'HIM', 'FROM', 'ME'] +6930-81414-0002-52: ref=['ONWARD', 'SAID', 'A', 'DISTANT', 'VOICE'] +6930-81414-0002-52: hyp=['ONWARD', 'SAID', 'A', 'DISTANT', 'VOICE'] +6930-81414-0003-53: ref=['NO', 'SOUND', 'BROKE', 'THE', 'STILLNESS', 'OF', 'THE', 'NIGHT'] +6930-81414-0003-53: hyp=['NO', 'SOUND', 'BROKE', 'THE', 'STILLNESS', 'OF', 'THE', 'NIGHT'] +6930-81414-0004-54: ref=['THE', 'STORY', 'OF', 'ITS', 'EVIL', 'INFLUENCE', 'CAME', 'BACK', 'TO', 'ME', 'AND', 'IN', 'MY', 'BEWILDERED', 'CONDITION', 'I', 'WONDERED', 'WHETHER', 'THERE', 'WAS', 'NOT', 'SOME', 'TRUTH', 'IN', 'WHAT', 'HAD', 'BEEN', 'SAID'] +6930-81414-0004-54: hyp=['THE', 'STORY', 'OF', 'ITS', 'EVIL', 'INFLUENCE', 'CAME', 'BACK', 'TO', 'ME', 'AND', 'IN', 'MY', 'BEWILDERED', 'CONDITION', 'I', 'WONDERED', 'WHETHER', 'THERE', 'WAS', 'NOT', 'SOME', 'TRUTH', 'IN', 'WHAT', 'HAD', 'BEEN', 'SAID'] +6930-81414-0005-55: ref=['WHAT', 'WAS', 'THAT'] +6930-81414-0005-55: hyp=['WHAT', 'WAS', 'THAT'] +6930-81414-0006-56: ref=['WHAT', 'THEN', 'A', 'HUMAN', 'HAND', 'LARGE', 'AND', 'SHAPELY', 'APPEARED', 'DISTINCTLY', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'POND'] +6930-81414-0006-56: hyp=['WHAT', 'THEN', 'A', 'HUMAN', 'HAND', 'LARGE', 'AND', 'SHABBY', 'APPEARED', 'DISTINCTLY', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'POND'] +6930-81414-0007-57: ref=['NOTHING', 'MORE', 'NOT', 'EVEN', 'THE', 'WRIST', 'TO', 'WHICH', 'IT', 'MIGHT', 'BE', 'ATTACHED'] +6930-81414-0007-57: hyp=['NOTHING', 'MORE', 'NOT', 'EVEN', 'THE', 'WRIST', 'TO', 'WHICH', 'IT', 'MIGHT', 'BE', 'ATTACHED'] +6930-81414-0008-58: ref=['IT', 'DID', 'NOT', 'BECKON', 'OR', 'INDEED', 'MOVE', 'AT', 'ALL', 'IT', 'WAS', 'AS', 'STILL', 'AS', 'THE', 'HAND', 'OF', 'DEATH'] +6930-81414-0008-58: hyp=['IT', 'DID', 'NOT', 'BECKON', 'OR', 'INDEED', 'MOVE', 'AT', 'ALL', 'IT', 'WAS', 'AS', 'STILL', 'AS', 'THE', 'HAND', 'OF', 'DEATH'] +6930-81414-0009-59: ref=['I', 'AWOKE', 'TO', 'CONSCIOUSNESS', 'FIGHTING', 'AT', 'FIRST', 'IT', 'SEEMED', 'AS', 'IF', 'I', 'WAS', 'FIGHTING', 'WITH', 'A', 'PHANTOM', 'BUT', 'GRADUALLY', 'MY', 'OPPONENT', 'BECAME', 'MORE', 'REAL', 'TO', 'ME', 'IT', 'WAS', 'KAFFAR'] +6930-81414-0009-59: hyp=['I', 'AWOKE', 'TO', 'CONSCIOUSNESS', 'FIGHTING', 'AT', 'FIRST', 'IT', 'SEEMED', 'AS', 'IF', 'I', 'WAS', 'FIGHTING', 'WITH', 'THE', 'PHANTOM', 'BUT', 'GRADUALLY', 'MY', 'OPPONENT', 'BECAME', 'MORE', 'REAL', 'TO', 'ME', 'IT', 'WAS', 'KAFFIR'] +6930-81414-0010-60: ref=['A', 'SOUND', 'OF', 'VOICES', 'A', 'FLASH', 'OF', 'LIGHT'] +6930-81414-0010-60: hyp=['A', 'SOUND', 'OF', 'VOICES', 'A', 'FLASH', 'OF', 'LIGHT'] +6930-81414-0011-61: ref=['A', 'FEELING', 'OF', 'FREEDOM', 'AND', 'I', 'WAS', 'AWAKE', 'WHERE'] +6930-81414-0011-61: hyp=['A', 'FEELING', 'OF', 'FREEDOM', 'AND', 'I', 'WAS', 'AWAKE', 'WHERE'] +6930-81414-0012-62: ref=['SAID', 'ANOTHER', 'VOICE', 'WHICH', 'I', 'RECOGNIZED', 'AS', "VOLTAIRE'S", 'KAFFAR'] +6930-81414-0012-62: hyp=['SAID', 'ANOTHER', 'VOICE', 'WHICH', 'I', 'RECOGNIZED', 'AS', "VOLTAIRE'S", 'KAFFIR'] +6930-81414-0013-63: ref=['I', 'HAD', 'SCARCELY', 'KNOWN', 'WHAT', 'I', 'HAD', 'BEEN', 'SAYING', 'OR', 'DOING', 'UP', 'TO', 'THIS', 'TIME', 'BUT', 'AS', 'HE', 'SPOKE', 'I', 'LOOKED', 'AT', 'MY', 'HAND'] +6930-81414-0013-63: hyp=['I', 'HAD', 'SCARCELY', 'KNOWN', 'WHEN', 'I', 'HAD', 'BEEN', 'SAYING', 'OR', 'DOING', 'UP', 'TO', 'THIS', 'TIME', 'BUT', 'AS', 'HE', 'SPOKE', 'I', 'LOOKED', 'AT', 'MY', 'HAND'] +6930-81414-0014-64: ref=['IN', 'THE', 'LIGHT', 'OF', 'THE', 'MOON', 'I', 'SAW', 'A', 'KNIFE', 'RED', 'WITH', 'BLOOD', 'AND', 'MY', 'HAND', 'TOO', 'WAS', 'ALSO', 'DISCOLOURED'] +6930-81414-0014-64: hyp=['IN', 'THE', 'LIGHT', 'OF', 'THE', 'MOON', 'I', 'SAW', 'A', 'KNIFE', 'RED', 'WITH', 'BLOOD', 'AND', 'MY', 'HAND', 'TOO', 'WAS', 'ALSO', 'DISCOLORED'] +6930-81414-0015-65: ref=['I', 'DO', 'NOT', 'KNOW', 'I', 'AM', 'DAZED', 'BEWILDERED'] +6930-81414-0015-65: hyp=['I', 'DO', 'NOT', 'KNOW', 'I', 'AM', 'DAZED', 'BEWILDERED'] +6930-81414-0016-66: ref=['BUT', 'THAT', 'IS', "KAFFAR'S", 'KNIFE'] +6930-81414-0016-66: hyp=['BUT', 'THAT', 'IS', "KAFFIR'S", 'KNIFE'] +6930-81414-0017-67: ref=['I', 'KNOW', 'HE', 'HAD', 'IT', 'THIS', 'VERY', 'EVENING'] +6930-81414-0017-67: hyp=['I', 'KNOW', 'HE', 'HAD', 'IT', 'THIS', 'VERY', 'EVENING'] +6930-81414-0018-68: ref=['I', 'REMEMBER', 'SAYING', 'HAVE', 'WE', 'BEEN', 'TOGETHER'] +6930-81414-0018-68: hyp=['I', 'REMEMBERED', 'SAYING', 'HAVE', 'WE', 'BEEN', 'TOGETHER'] +6930-81414-0019-69: ref=['VOLTAIRE', 'PICKED', 'UP', 'SOMETHING', 'FROM', 'THE', 'GROUND', 'AND', 'LOOKED', 'AT', 'IT'] +6930-81414-0019-69: hyp=['OLD', 'CHAIR', 'PICKED', 'UP', 'SOMETHING', 'FROM', 'THE', 'GROUND', 'AND', 'LOOKED', 'AT', 'IT'] +6930-81414-0020-70: ref=['I', 'SAY', 'YOU', 'DO', 'KNOW', 'WHAT', 'THIS', 'MEANS', 'AND', 'YOU', 'MUST', 'TELL', 'US'] +6930-81414-0020-70: hyp=['I', 'SAY', 'YOU', 'DO', 'KNOW', 'WHAT', 'THIS', 'MEANS', 'AND', 'YOU', 'MUST', 'TELL', 'US'] +6930-81414-0021-71: ref=['A', 'TERRIBLE', 'THOUGHT', 'FLASHED', 'INTO', 'MY', 'MIND'] +6930-81414-0021-71: hyp=['A', 'TERRIBLE', 'THOUGHT', 'FLASHED', 'INTO', 'MY', 'MIND'] +6930-81414-0022-72: ref=['I', 'HAD', 'AGAIN', 'BEEN', 'ACTING', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'THIS', "MAN'S", 'POWER'] +6930-81414-0022-72: hyp=['I', 'HAD', 'AGAIN', 'BEEN', 'ACTING', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'THIS', "MAN'S", 'POWER'] +6930-81414-0023-73: ref=['PERCHANCE', 'TOO', "KAFFAR'S", 'DEATH', 'MIGHT', 'SERVE', 'HIM', 'IN', 'GOOD', 'STEAD'] +6930-81414-0023-73: hyp=['PERCHANCE', 'TO', "KAFFIR'S", 'DEATH', 'MIGHT', 'SERVE', 'HIM', 'IN', 'GOOD', 'STEAD'] +6930-81414-0024-74: ref=['MY', 'TONGUE', 'REFUSED', 'TO', 'ARTICULATE', 'MY', 'POWER', 'OF', 'SPEECH', 'LEFT', 'ME'] +6930-81414-0024-74: hyp=['MY', 'TONGUE', 'REFUSED', 'TO', 'ARTICULATE', 'MY', 'POWER', 'OF', 'SPEECH', 'LAUGHED', 'ME'] +6930-81414-0025-75: ref=['MY', 'POSITION', 'WAS', 'TOO', 'TERRIBLE'] +6930-81414-0025-75: hyp=['MY', 'POSITION', 'WAS', 'TOO', 'TERRIBLE'] +6930-81414-0026-76: ref=['MY', 'OVERWROUGHT', 'NERVES', 'YIELDED', 'AT', 'LAST'] +6930-81414-0026-76: hyp=['MY', 'OVERWROUGHT', 'NERVES', 'YIELDED', 'AT', 'LAST'] +6930-81414-0027-77: ref=['FOR', 'SOME', 'TIME', 'AFTER', 'THAT', 'I', 'REMEMBERED', 'NOTHING', 'DISTINCTLY'] +6930-81414-0027-77: hyp=['FOR', 'SOME', 'TIME', 'AFTER', 'THAT', 'I', 'REMEMBERED', 'NOTHING', 'DISTINCTLY'] +7021-79730-0000-1399: ref=['THE', 'THREE', 'MODES', 'OF', 'MANAGEMENT'] +7021-79730-0000-1399: hyp=['THE', 'THREE', 'MODES', 'OF', 'MANAGEMENT'] +7021-79730-0001-1400: ref=['TO', 'SUPPOSE', 'THAT', 'THE', 'OBJECT', 'OF', 'THIS', 'WORK', 'IS', 'TO', 'AID', 'IN', 'EFFECTING', 'SUCH', 'A', 'SUBSTITUTION', 'AS', 'THAT', 'IS', 'ENTIRELY', 'TO', 'MISTAKE', 'ITS', 'NATURE', 'AND', 'DESIGN'] +7021-79730-0001-1400: hyp=['TO', 'SUPPOSE', 'THAT', 'THE', 'OBJECT', 'OF', 'THIS', 'WORK', 'IS', 'TO', 'AID', 'IN', 'AFFECTING', 'SUCH', 'A', 'SUBSTITUTION', 'AS', 'THAT', 'IS', 'ENTIRELY', 'TO', 'MISTAKE', 'ITS', 'NATURE', 'AND', 'DESIGN'] +7021-79730-0002-1401: ref=['BY', 'REASON', 'AND', 'AFFECTION'] +7021-79730-0002-1401: hyp=['BY', 'REASON', 'AND', 'AFFECTION'] +7021-79730-0003-1402: ref=['AS', 'THE', 'CHAISE', 'DRIVES', 'AWAY', 'MARY', 'STANDS', 'BEWILDERED', 'AND', 'PERPLEXED', 'ON', 'THE', 'DOOR', 'STEP', 'HER', 'MIND', 'IN', 'A', 'TUMULT', 'OF', 'EXCITEMENT', 'IN', 'WHICH', 'HATRED', 'OF', 'THE', 'DOCTOR', 'DISTRUST', 'AND', 'SUSPICION', 'OF', 'HER', 'MOTHER', 'DISAPPOINTMENT', 'VEXATION', 'AND', 'ILL', 'HUMOR', 'SURGE', 'AND', 'SWELL', 'AMONG', 'THOSE', 'DELICATE', 'ORGANIZATIONS', 'ON', 'WHICH', 'THE', 'STRUCTURE', 'AND', 'DEVELOPMENT', 'OF', 'THE', 'SOUL', 'SO', 'CLOSELY', 'DEPEND', 'DOING', 'PERHAPS', 'AN', 'IRREPARABLE', 'INJURY'] +7021-79730-0003-1402: hyp=['AS', 'THE', 'CHASE', 'DRIVES', 'AWAY', 'MARY', 'STANDS', 'BEWILDERED', 'AND', 'PERPLEXED', 'ON', 'THE', 'DOORSTEP', 'HER', 'MIND', 'IN', 'A', 'TUMULT', 'OF', 'EXCITEMENT', 'IN', 'WHICH', 'HATRED', 'OF', 'THE', 'DOCTOR', 'DISTRUST', 'AND', 'SUSPICION', 'OF', 'HER', 'MOTHER', 'DISAPPOINTMENT', 'VEXATION', 'AND', 'ILL', 'HUMOUR', 'SURGE', 'AND', 'SWELL', 'AMONG', 'THOSE', 'DELICATE', 'ORGANIZATIONS', 'ON', 'WHICH', 'THE', 'STRUCTURE', 'AND', 'DEVELOPMENT', 'OF', 'THE', 'SOUL', 'SO', 'CLOSELY', 'DEPEND', 'DOING', 'PERHAPS', 'AN', 'IRREPARABLE', 'INJURY'] +7021-79730-0004-1403: ref=['THE', 'MOTHER', 'AS', 'SOON', 'AS', 'THE', 'CHAISE', 'IS', 'SO', 'FAR', 'TURNED', 'THAT', 'MARY', 'CAN', 'NO', 'LONGER', 'WATCH', 'THE', 'EXPRESSION', 'OF', 'HER', 'COUNTENANCE', 'GOES', 'AWAY', 'FROM', 'THE', 'DOOR', 'WITH', 'A', 'SMILE', 'OF', 'COMPLACENCY', 'AND', 'SATISFACTION', 'UPON', 'HER', 'FACE', 'AT', 'THE', 'INGENUITY', 'AND', 'SUCCESS', 'OF', 'HER', 'LITTLE', 'ARTIFICE'] +7021-79730-0004-1403: hyp=['THE', 'MOTHER', 'AS', 'SOON', 'AS', 'THE', 'CHASE', 'IS', 'SO', 'FAR', 'TURNED', 'THAT', 'MARY', 'CAN', 'NO', 'LONGER', 'WATCH', 'THE', 'EXPRESSION', 'OF', 'HER', 'COUNTENANCE', 'GOES', 'AWAY', 'FROM', 'THE', 'DOOR', 'WITH', 'A', 'SMILE', 'OF', 'COMPLACENCY', 'AND', 'SATISFACTION', 'ON', 'HER', 'FACE', 'AT', 'THE', 'INGENUITY', 'AND', 'SUCCESS', 'OF', 'HER', 'LITTLE', 'ARTIFICE'] +7021-79730-0005-1404: ref=['SO', 'YOU', 'WILL', 'BE', 'A', 'GOOD', 'GIRL', 'I', 'KNOW', 'AND', 'NOT', 'MAKE', 'ANY', 'TROUBLE', 'BUT', 'WILL', 'STAY', 'AT', 'HOME', 'CONTENTEDLY', "WON'T", 'YOU'] +7021-79730-0005-1404: hyp=['SO', 'YOU', 'WILL', 'BE', 'A', 'GOOD', 'GIRL', 'I', 'KNOW', 'AND', 'NOT', 'MAKE', 'ANY', 'TROUBLE', 'BUT', 'WILL', 'STAY', 'AT', 'HOME', 'CONTENTEDLY', "WON'T", 'YOU'] +7021-79730-0006-1405: ref=['THE', 'MOTHER', 'IN', 'MANAGING', 'THE', 'CASE', 'IN', 'THIS', 'WAY', 'RELIES', 'PARTLY', 'ON', 'CONVINCING', 'THE', 'REASON', 'OF', 'THE', 'CHILD', 'AND', 'PARTLY', 'ON', 'AN', 'APPEAL', 'TO', 'HER', 'AFFECTION'] +7021-79730-0006-1405: hyp=['THE', 'MOTHER', 'IN', 'MANAGING', 'THE', 'CASE', 'IN', 'THIS', 'WAY', 'REALIZE', 'PARTLY', 'ON', 'CONVINCING', 'THE', 'REASON', 'OF', 'THE', 'CHILD', 'AND', 'PARTLY', 'ON', 'AN', 'APPEAL', 'TO', 'HER', 'AFFECTION'] +7021-79730-0007-1406: ref=['IF', 'YOU', 'SHOULD', 'NOT', 'BE', 'A', 'GOOD', 'GIRL', 'BUT', 'SHOULD', 'SHOW', 'SIGNS', 'OF', 'MAKING', 'US', 'ANY', 'TROUBLE', 'I', 'SHALL', 'HAVE', 'TO', 'SEND', 'YOU', 'OUT', 'SOMEWHERE', 'TO', 'THE', 'BACK', 'PART', 'OF', 'THE', 'HOUSE', 'UNTIL', 'WE', 'ARE', 'GONE'] +7021-79730-0007-1406: hyp=['IF', 'YOU', 'SHOULD', 'NOT', 'BE', 'A', 'GOOD', 'GIRL', 'BUT', 'SHOULD', 'SHOW', 'SIGNS', 'OF', 'MAKING', 'US', 'ANY', 'TROUBLE', 'I', 'SHALL', 'HAVE', 'TO', 'SEND', 'YOU', 'OUT', 'SOMEWHERE', 'TO', 'THE', 'BACK', 'PART', 'OF', 'THE', 'HOUSE', 'UNTIL', 'WE', 'ARE', 'GONE'] +7021-79730-0008-1407: ref=['BUT', 'THIS', 'LAST', 'SUPPOSITION', 'IS', 'ALMOST', 'ALWAYS', 'UNNECESSARY', 'FOR', 'IF', 'MARY', 'HAS', 'BEEN', 'HABITUALLY', 'MANAGED', 'ON', 'THIS', 'PRINCIPLE', 'SHE', 'WILL', 'NOT', 'MAKE', 'ANY', 'TROUBLE'] +7021-79730-0008-1407: hyp=['BUT', 'THIS', 'LAST', 'SUPPOSITION', 'IS', 'ALMOST', 'ALWAYS', 'UNNECESSARY', 'FOR', 'IF', 'MARY', 'HAS', 'BEEN', 'HABITUALLY', 'MANAGED', 'ON', 'THIS', 'PRINCIPLE', 'SHE', 'WILL', 'NOT', 'MAKE', 'ANY', 'TROUBLE'] +7021-79730-0009-1408: ref=['IT', 'IS', 'INDEED', 'TRUE', 'THAT', 'THE', 'IMPORTANCE', 'OF', 'TACT', 'AND', 'SKILL', 'IN', 'THE', 'TRAINING', 'OF', 'THE', 'YOUNG', 'AND', 'OF', 'CULTIVATING', 'THEIR', 'REASON', 'AND', 'SECURING', 'THEIR', 'AFFECTION', 'CAN', 'NOT', 'BE', 'OVERRATED'] +7021-79730-0009-1408: hyp=['IT', 'IS', 'INDEED', 'TRUE', 'THAT', 'THE', 'IMPORTANCE', 'OF', 'TACT', 'AND', 'SKILL', 'IN', 'THE', 'TRAINING', 'OF', 'THE', 'YOUNG', 'AND', 'OF', 'CULTIVATING', 'THEIR', 'REASON', 'AND', 'SECURING', 'THEIR', 'AFFECTION', 'CANNOT', 'BE', 'OVERRATED'] +7021-79740-0000-1384: ref=['TO', 'SUCH', 'PERSONS', 'THESE', 'INDIRECT', 'MODES', 'OF', 'TRAINING', 'CHILDREN', 'IN', 'HABITS', 'OF', 'SUBORDINATION', 'TO', 'THEIR', 'WILL', 'OR', 'RATHER', 'OF', 'YIELDING', 'TO', 'THEIR', 'INFLUENCE', 'ARE', 'SPECIALLY', 'USEFUL'] +7021-79740-0000-1384: hyp=['TO', 'SUCH', 'PERSONS', 'THESE', 'INDIRECT', 'MODES', 'OF', 'TRAINING', 'CHILDREN', 'IN', 'HABITS', 'OF', 'SUBORDINATION', 'TO', 'THEIR', 'WILL', 'OR', 'RATHER', 'OF', 'YIELDING', 'TO', 'THEIR', 'INFLUENCE', 'ARE', 'SPECIALLY', 'USEFUL'] +7021-79740-0001-1385: ref=['DELLA', 'HAD', 'A', 'YOUNG', 'SISTER', 'NAMED', 'MARIA', 'AND', 'A', 'COUSIN', 'WHOSE', 'NAME', 'WAS', 'JANE'] +7021-79740-0001-1385: hyp=['DELLA', 'HAD', 'A', 'YOUNG', 'SISTER', 'NAMED', 'MARIA', 'AND', 'A', 'COUSIN', 'WHOSE', 'NAME', 'WAS', 'JANE'] +7021-79740-0002-1386: ref=['NOW', 'DELIA', 'CONTRIVED', 'TO', 'OBTAIN', 'A', 'GREAT', 'INFLUENCE', 'AND', 'ASCENDENCY', 'OVER', 'THE', 'MINDS', 'OF', 'THE', 'CHILDREN', 'BY', 'MEANS', 'OF', 'THESE', 'DOLLS'] +7021-79740-0002-1386: hyp=['NOW', 'GELIA', 'CONTRIVED', 'TO', 'OBTAIN', 'A', 'GREAT', 'INFLUENCE', 'AND', 'ASCENDANCY', 'OVER', 'THE', 'MINDS', 'OF', 'THE', 'CHILDREN', 'BY', 'MEANS', 'OF', 'THESE', 'DOLLS'] +7021-79740-0003-1387: ref=['TO', 'GIVE', 'AN', 'IDEA', 'OF', 'THESE', 'CONVERSATIONS', 'I', 'WILL', 'REPORT', 'ONE', 'OF', 'THEM', 'IN', 'FULL'] +7021-79740-0003-1387: hyp=['TO', 'GIVE', 'AN', 'IDEA', 'OF', 'THESE', 'CONVERSATIONS', 'I', 'WILL', 'REPORT', 'ONE', 'OF', 'THEM', 'IN', 'FULL'] +7021-79740-0004-1388: ref=['YOU', 'HAVE', 'COME', 'ANDELLA', 'ANDELLA', 'WAS', 'THE', 'NAME', 'OF', "JANE'S", 'DOLL', 'TO', 'MAKE', 'ROSALIE', 'A', 'VISIT'] +7021-79740-0004-1388: hyp=['YOU', 'HAVE', 'COME', 'AND', 'DELLA', 'AND', 'DELLA', 'WAS', 'THE', 'NAME', 'OF', 'JANE', 'STALL', 'TO', 'MAKE', 'ROSALIE', 'A', 'VISIT'] +7021-79740-0005-1389: ref=['I', 'AM', 'VERY', 'GLAD'] +7021-79740-0005-1389: hyp=['I', 'AM', 'VERY', 'GLAD'] +7021-79740-0006-1390: ref=['I', 'EXPECT', 'YOU', 'HAVE', 'BEEN', 'A', 'VERY', 'GOOD', 'GIRL', 'ANDELLA', 'SINCE', 'YOU', 'WERE', 'HERE', 'LAST'] +7021-79740-0006-1390: hyp=['I', 'EXPECT', 'YOU', 'HAVE', 'BEEN', 'A', 'VERY', 'GOOD', 'GIRL', 'ANDELLA', 'SINCE', 'YOU', 'WERE', 'HERE', 'LAST'] +7021-79740-0007-1391: ref=['THEN', 'TURNING', 'TO', 'JANE', 'SHE', 'ASKED', 'IN', 'A', 'SOMEWHAT', 'ALTERED', 'TONE', 'HAS', 'SHE', 'BEEN', 'A', 'GOOD', 'GIRL', 'JANE'] +7021-79740-0007-1391: hyp=['THEN', 'TURNING', 'TO', 'JANE', 'SHE', 'ASKED', 'IN', 'A', 'SOMEWHAT', 'ALTERED', 'TONE', 'HAS', 'SHE', 'BEEN', 'A', 'GOOD', 'GIRL', 'JANE'] +7021-79740-0008-1392: ref=['FOR', 'INSTANCE', 'ONE', 'DAY', 'THE', 'CHILDREN', 'HAD', 'BEEN', 'PLAYING', 'UPON', 'THE', 'PIAZZA', 'WITH', 'BLOCKS', 'AND', 'OTHER', 'PLAYTHINGS', 'AND', 'FINALLY', 'HAD', 'GONE', 'INTO', 'THE', 'HOUSE', 'LEAVING', 'ALL', 'THE', 'THINGS', 'ON', 'THE', 'FLOOR', 'OF', 'THE', 'PIAZZA', 'INSTEAD', 'OF', 'PUTTING', 'THEM', 'AWAY', 'IN', 'THEIR', 'PLACES', 'AS', 'THEY', 'OUGHT', 'TO', 'HAVE', 'DONE'] +7021-79740-0008-1392: hyp=['FOR', 'INSTANCE', 'ONE', 'DAY', 'THE', 'CHILDREN', 'HAD', 'BEEN', 'PLAYING', 'UPON', 'THE', 'PIAZZA', 'WITH', 'BLOCKS', 'AND', 'OTHER', 'PLAYTHINGS', 'AND', 'FINALLY', 'HAD', 'GONE', 'INTO', 'THE', 'HOUSE', 'LEAVING', 'ALL', 'THE', 'THINGS', 'ON', 'THE', 'FLOOR', 'OF', 'THE', 'PIAZZA', 'INSTEAD', 'OF', 'PUTTING', 'THEM', 'AWAY', 'IN', 'THEIR', 'PLACES', 'AS', 'THEY', 'OUGHT', 'TO', 'HAVE', 'DONE'] +7021-79740-0009-1393: ref=['THEY', 'WERE', 'NOW', 'PLAYING', 'WITH', 'THEIR', 'DOLLS', 'IN', 'THE', 'PARLOR'] +7021-79740-0009-1393: hyp=['THEY', 'WERE', 'NOW', 'PLAYING', 'WITH', 'THEIR', 'DOLLS', 'IN', 'THE', 'PARLOUR'] +7021-79740-0010-1394: ref=['DELIA', 'CAME', 'TO', 'THE', 'PARLOR', 'AND', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'MYSTERY', 'BECKONED', 'THE', 'CHILDREN', 'ASIDE', 'AND', 'SAID', 'TO', 'THEM', 'IN', 'A', 'WHISPER', 'LEAVE', 'ANDELLA', 'AND', 'ROSALIE', 'HERE', 'AND', "DON'T", 'SAY', 'A', 'WORD', 'TO', 'THEM'] +7021-79740-0010-1394: hyp=['DELHIA', 'CAME', 'TO', 'THE', 'PARLOUR', 'AND', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'MYSTERY', 'BECKONED', 'THE', 'CHILDREN', 'ASIDE', 'AND', 'SAID', 'TO', 'THEM', 'IN', 'A', 'WHISPER', 'LEAVE', 'AND', 'ELLA', 'AND', 'ROSALIE', 'HERE', 'AND', "DON'T", 'SAY', 'A', 'WORD', 'TO', 'THEM'] +7021-79740-0011-1395: ref=['SO', 'SAYING', 'SHE', 'LED', 'THE', 'WAY', 'ON', 'TIPTOE', 'FOLLOWED', 'BY', 'THE', 'CHILDREN', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'ROUND', 'BY', 'A', 'CIRCUITOUS', 'ROUTE', 'TO', 'THE', 'PIAZZA', 'THERE'] +7021-79740-0011-1395: hyp=['SO', 'SAYING', 'SHE', 'LED', 'THE', 'WAY', 'ON', 'TIPTOE', 'FOLLOWED', 'BY', 'THE', 'CHILDREN', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'ROUND', 'BY', 'A', 'CIRCUITOUS', 'ROUTE', 'TO', 'THE', 'PIAZZA', 'THERE'] +7021-79740-0012-1396: ref=['SAID', 'SHE', 'POINTING', 'TO', 'THE', 'PLAYTHINGS', 'SEE'] +7021-79740-0012-1396: hyp=['SAID', 'SHE', 'POINTING', 'TO', 'THE', 'PLAYTHINGS', 'SEE'] +7021-79740-0013-1397: ref=['PUT', 'THESE', 'PLAYTHINGS', 'ALL', 'AWAY', 'QUICK', 'AND', 'CAREFULLY', 'AND', 'WE', 'WILL', 'NOT', 'LET', 'THEM', 'KNOW', 'ANY', 'THING', 'ABOUT', 'YOUR', 'LEAVING', 'THEM', 'OUT'] +7021-79740-0013-1397: hyp=['PUT', 'THESE', 'PLAYTHINGS', 'ALL', 'AWAY', 'QUICK', 'AND', 'CAREFULLY', 'AND', 'WE', 'WILL', 'NOT', 'LET', 'THEM', 'KNOW', 'ANYTHING', 'ABOUT', 'YOUR', 'LEAVING', 'THEM', 'OUT'] +7021-79740-0014-1398: ref=['AND', 'THIS', 'METHOD', 'OF', 'TREATING', 'THE', 'CASE', 'WAS', 'MUCH', 'MORE', 'EFFECTUAL', 'IN', 'MAKING', 'THEM', 'DISPOSED', 'TO', 'AVOID', 'COMMITTING', 'A', 'SIMILAR', 'FAULT', 'ANOTHER', 'TIME', 'THAN', 'ANY', 'DIRECT', 'REBUKES', 'OR', 'EXPRESSIONS', 'OF', 'DISPLEASURE', 'ADDRESSED', 'PERSONALLY', 'TO', 'THEM', 'WOULD', 'HAVE', 'BEEN'] +7021-79740-0014-1398: hyp=['AND', 'THIS', 'METHOD', 'OF', 'TREATING', 'THE', 'CASE', 'WAS', 'MUCH', 'MORE', 'EFFECTUAL', 'IN', 'MAKING', 'THEM', 'DISPOSED', 'TO', 'AVOID', 'COMMITTING', 'A', 'SIMILAR', 'FAULT', 'ANOTHER', 'TIME', 'THAN', 'ANY', 'DIRECT', 'REBUKES', 'OR', 'EXPRESSIONS', 'OF', 'DISPLEASURE', 'ADDRESSED', 'PERSONALLY', 'TO', 'THEM', 'WOULD', 'HAVE', 'BEEN'] +7021-79759-0000-1378: ref=['NATURE', 'OF', 'THE', 'EFFECT', 'PRODUCED', 'BY', 'EARLY', 'IMPRESSIONS'] +7021-79759-0000-1378: hyp=['NATURE', 'OF', 'THE', 'EFFECT', 'PRODUCED', 'BY', 'EARLY', 'IMPRESSIONS'] +7021-79759-0001-1379: ref=['THAT', 'IS', 'COMPARATIVELY', 'NOTHING'] +7021-79759-0001-1379: hyp=['THAT', 'IS', 'COMPARATIVELY', 'NOTHING'] +7021-79759-0002-1380: ref=['THEY', 'ARE', 'CHIEFLY', 'FORMED', 'FROM', 'COMBINATIONS', 'OF', 'THE', 'IMPRESSIONS', 'MADE', 'IN', 'CHILDHOOD'] +7021-79759-0002-1380: hyp=['THEY', 'ARE', 'CHIEFLY', 'FORMED', 'FROM', 'COMBINATIONS', 'OF', 'THE', 'IMPRESSIONS', 'MADE', 'IN', 'CHILDHOOD'] +7021-79759-0003-1381: ref=['VAST', 'IMPORTANCE', 'AND', 'INFLUENCE', 'OF', 'THIS', 'MENTAL', 'FURNISHING'] +7021-79759-0003-1381: hyp=['VAST', 'IMPORTANCE', 'AND', 'INFLUENCE', 'OF', 'THIS', 'MENTAL', 'FURNISHING'] +7021-79759-0004-1382: ref=['WITHOUT', 'GOING', 'TO', 'ANY', 'SUCH', 'EXTREME', 'AS', 'THIS', 'WE', 'CAN', 'EASILY', 'SEE', 'ON', 'REFLECTION', 'HOW', 'VAST', 'AN', 'INFLUENCE', 'ON', 'THE', 'IDEAS', 'AND', 'CONCEPTIONS', 'AS', 'WELL', 'AS', 'ON', 'THE', 'PRINCIPLES', 'OF', 'ACTION', 'IN', 'MATURE', 'YEARS', 'MUST', 'BE', 'EXERTED', 'BY', 'THE', 'NATURE', 'AND', 'CHARACTER', 'OF', 'THE', 'IMAGES', 'WHICH', 'THE', 'PERIOD', 'OF', 'INFANCY', 'AND', 'CHILDHOOD', 'IMPRESSES', 'UPON', 'THE', 'MIND'] +7021-79759-0004-1382: hyp=['WITHOUT', 'GOING', 'TO', 'ANY', 'SUCH', 'EXTREME', 'AS', 'THIS', 'WE', 'CAN', 'EASILY', 'SEE', 'ON', 'REFLECTION', 'HOW', 'VAST', 'AN', 'INFLUENCE', 'ON', 'THE', 'IDEAS', 'AND', 'CONCEPTIONS', 'AS', 'WELL', 'AS', 'ON', 'THE', 'PRINCIPLES', 'OF', 'ACTION', 'AND', 'MATURE', 'YEARS', 'MUST', 'BE', 'EXERTED', 'BY', 'THE', 'NATURE', 'AND', 'CHARACTER', 'OF', 'THE', 'IMAGES', 'WHICH', 'THE', 'PERIOD', 'OF', 'INFANCY', 'AND', 'CHILDHOOD', 'IMPRESS', 'UPON', 'THE', 'MIND'] +7021-79759-0005-1383: ref=['THE', 'PAIN', 'PRODUCED', 'BY', 'AN', 'ACT', 'OF', 'HASTY', 'AND', 'ANGRY', 'VIOLENCE', 'TO', 'WHICH', 'A', 'FATHER', 'SUBJECTS', 'HIS', 'SON', 'MAY', 'SOON', 'PASS', 'AWAY', 'BUT', 'THE', 'MEMORY', 'OF', 'IT', 'DOES', 'NOT', 'PASS', 'AWAY', 'WITH', 'THE', 'PAIN'] +7021-79759-0005-1383: hyp=['THE', 'PAIN', 'PRODUCED', 'BY', 'AN', 'ACT', 'OF', 'HASTY', 'AND', 'ANGRY', 'VIOLENCE', 'TO', 'WHICH', 'A', 'FATHER', 'SUBJECTS', 'HIS', 'SON', 'MAY', 'SOON', 'PASS', 'AWAY', 'BUT', 'THE', 'MEMORY', 'OF', 'IT', 'DOES', 'NOT', 'PASS', 'AWAY', 'WITH', 'THE', 'PAIN'] +7021-85628-0000-1409: ref=['BUT', 'ANDERS', 'CARED', 'NOTHING', 'ABOUT', 'THAT'] +7021-85628-0000-1409: hyp=['BUT', 'ANDREWS', 'CARED', 'NOTHING', 'ABOUT', 'THAT'] +7021-85628-0001-1410: ref=['HE', 'MADE', 'A', 'BOW', 'SO', 'DEEP', 'THAT', 'HIS', 'BACK', 'CAME', 'NEAR', 'BREAKING', 'AND', 'HE', 'WAS', 'DUMBFOUNDED', 'I', 'CAN', 'TELL', 'YOU', 'WHEN', 'HE', 'SAW', 'IT', 'WAS', 'NOBODY', 'BUT', 'ANDERS'] +7021-85628-0001-1410: hyp=['HE', 'MADE', 'A', 'BOW', 'SO', 'DEEP', 'THAT', 'HIS', 'BACK', 'CAME', 'NEAR', 'BREAKING', 'AND', 'HE', 'WAS', 'DUMBFOUNDED', 'I', 'CAN', 'TELL', 'YOU', 'WHEN', 'HE', 'SAW', 'IT', 'WAS', 'NOBODY', 'BUT', "ANDREW'S"] +7021-85628-0002-1411: ref=['HE', 'WAS', 'SUCH', 'A', 'BIG', 'BOY', 'THAT', 'HE', 'WORE', 'HIGH', 'BOOTS', 'AND', 'CARRIED', 'A', 'JACK', 'KNIFE'] +7021-85628-0002-1411: hyp=['HE', 'WAS', 'SUCH', 'A', 'BIG', 'BOY', 'THAT', 'HE', 'WORE', 'HIGH', 'BOOTS', 'AND', 'CARRIED', 'A', 'JACK', 'KNIFE'] +7021-85628-0003-1412: ref=['NOW', 'THIS', 'KNIFE', 'WAS', 'A', 'SPLENDID', 'ONE', 'THOUGH', 'HALF', 'THE', 'BLADE', 'WAS', 'GONE', 'AND', 'THE', 'HANDLE', 'WAS', 'A', 'LITTLE', 'CRACKED', 'AND', 'ANDERS', 'KNEW', 'THAT', 'ONE', 'IS', 'ALMOST', 'A', 'MAN', 'AS', 'SOON', 'AS', 'ONE', 'HAS', 'A', 'JACK', 'KNIFE'] +7021-85628-0003-1412: hyp=['NOW', 'THIS', 'KNIFE', 'WAS', 'A', 'SPLENDID', 'ONE', 'THOUGH', 'HALF', 'THE', 'BLADE', 'WAS', 'GONE', 'AND', 'THE', 'HANDLE', 'WAS', 'A', 'LITTLE', 'CRACKED', 'AND', 'ANDREWS', 'KNEW', 'THAT', 'ONE', 'IS', 'ALMOST', 'A', 'MAN', 'AS', 'SOON', 'AS', 'ONE', 'HAS', 'A', 'JACKKNIFE'] +7021-85628-0004-1413: ref=['YES', 'WHY', 'NOT', 'THOUGHT', 'ANDERS'] +7021-85628-0004-1413: hyp=['YES', 'WHY', 'NOT', 'THOUGHT', 'ANDERS'] +7021-85628-0005-1414: ref=['SEEING', 'THAT', 'I', 'AM', 'SO', 'FINE', 'I', 'MAY', 'AS', 'WELL', 'GO', 'AND', 'VISIT', 'THE', 'KING'] +7021-85628-0005-1414: hyp=['SEEING', 'THAT', 'I', 'AM', 'SO', 'FINE', 'I', 'MAY', 'AS', 'WELL', 'GO', 'AND', 'VISIT', 'THE', 'KING'] +7021-85628-0006-1415: ref=['I', 'AM', 'GOING', 'TO', 'THE', 'COURT', 'BALL', 'ANSWERED', 'ANDERS'] +7021-85628-0006-1415: hyp=['I', 'AM', 'GOING', 'TO', 'THE', 'COURT', 'BALL', 'ANSWERED', 'ANDREWS'] +7021-85628-0007-1416: ref=['AND', 'SHE', 'TOOK', 'ANDERS', 'HAND', 'AND', 'WALKED', 'WITH', 'HIM', 'UP', 'THE', 'BROAD', 'MARBLE', 'STAIRS', 'WHERE', 'SOLDIERS', 'WERE', 'POSTED', 'AT', 'EVERY', 'THIRD', 'STEP', 'AND', 'THROUGH', 'THE', 'MAGNIFICENT', 'HALLS', 'WHERE', 'COURTIERS', 'IN', 'SILK', 'AND', 'VELVET', 'STOOD', 'BOWING', 'WHEREVER', 'HE', 'WENT'] +7021-85628-0007-1416: hyp=['AND', 'SHE', 'TOOK', "ANDREW'S", 'HAND', 'AND', 'WALKED', 'WITH', 'HIM', 'UP', 'THE', 'BROAD', 'MARBLE', 'STAIRS', 'WHERE', 'SOLDIERS', 'WERE', 'POSTED', 'AT', 'EVERY', 'THIRD', 'STEP', 'AND', 'THROUGH', 'THE', 'MAGNIFICENT', 'HALLS', 'WHERE', 'COURTIERS', 'IN', 'SILK', 'AND', 'VELVET', 'STOOD', 'BOWING', 'WHEREVER', 'HE', 'WENT'] +7021-85628-0008-1417: ref=['FOR', 'LIKE', 'AS', 'NOT', 'THEY', 'MUST', 'HAVE', 'THOUGHT', 'HIM', 'A', 'PRINCE', 'WHEN', 'THEY', 'SAW', 'HIS', 'FINE', 'CAP'] +7021-85628-0008-1417: hyp=['FOR', 'LIKE', 'AS', 'NOT', 'THEY', 'MUST', 'HAVE', 'THOUGHT', 'HIM', 'A', 'PRINCE', 'WHEN', 'THEY', 'SAW', 'HIS', 'FINE', 'CAP'] +7021-85628-0009-1418: ref=['AT', 'THE', 'FARTHER', 'END', 'OF', 'THE', 'LARGEST', 'HALL', 'A', 'TABLE', 'WAS', 'SET', 'WITH', 'GOLDEN', 'CUPS', 'AND', 'GOLDEN', 'PLATES', 'IN', 'LONG', 'ROWS'] +7021-85628-0009-1418: hyp=['AT', 'THE', 'FARTHER', 'END', 'OF', 'THE', 'LARGEST', 'HALL', 'A', 'TABLE', 'WAS', 'SET', 'WITH', 'GOLDEN', 'CUPS', 'AND', 'GOLDEN', 'PLATES', 'IN', 'LONG', 'ROWS'] +7021-85628-0010-1419: ref=['ON', 'HUGE', 'SILVER', 'PLATTERS', 'WERE', 'PYRAMIDS', 'OF', 'TARTS', 'AND', 'CAKES', 'AND', 'RED', 'WINE', 'SPARKLED', 'IN', 'GLITTERING', 'DECANTERS'] +7021-85628-0010-1419: hyp=['ON', 'HUGE', 'SILVER', 'PLATTERS', 'WERE', 'PYRAMIDS', 'OF', 'TARTS', 'AND', 'CAKES', 'AND', 'RED', 'WINE', 'SPARKLED', 'IN', 'GLITTERING', 'DECANTERS'] +7021-85628-0011-1420: ref=['THE', 'PRINCESS', 'SAT', 'DOWN', 'UNDER', 'A', 'BLUE', 'CANOPY', 'WITH', 'BOUQUETS', 'OF', 'ROSES', 'AND', 'SHE', 'LET', 'ANDERS', 'SIT', 'IN', 'A', 'GOLDEN', 'CHAIR', 'BY', 'HER', 'SIDE'] +7021-85628-0011-1420: hyp=['THE', 'PRINCESS', 'SAT', 'DOWN', 'UNDER', 'A', 'BLUE', 'CANOPY', 'WITH', 'BOUQUETS', 'OF', 'ROSES', 'AND', 'SHE', 'LET', 'ANDREW', 'SIT', 'IN', 'A', 'GOLDEN', 'CHAIR', 'BY', 'HER', 'SIDE'] +7021-85628-0012-1421: ref=['BUT', 'YOU', 'MUST', 'NOT', 'EAT', 'WITH', 'YOUR', 'CAP', 'ON', 'YOUR', 'HEAD', 'SHE', 'SAID', 'AND', 'WAS', 'GOING', 'TO', 'TAKE', 'IT', 'OFF'] +7021-85628-0012-1421: hyp=['BUT', 'YOU', 'MUST', 'NOT', 'EAT', 'WITH', 'YOUR', 'CAP', 'ON', 'YOUR', 'HEAD', 'SHE', 'SAID', 'AND', 'WAS', 'GOING', 'TO', 'TAKE', 'IT', 'OFF'] +7021-85628-0013-1422: ref=['THE', 'PRINCESS', 'CERTAINLY', 'WAS', 'BEAUTIFUL', 'AND', 'HE', 'WOULD', 'HAVE', 'DEARLY', 'LIKED', 'TO', 'BE', 'KISSED', 'BY', 'HER', 'BUT', 'THE', 'CAP', 'WHICH', 'HIS', 'MOTHER', 'HAD', 'MADE', 'HE', 'WOULD', 'NOT', 'GIVE', 'UP', 'ON', 'ANY', 'CONDITION'] +7021-85628-0013-1422: hyp=['THE', 'PRINCESS', 'CERTAINLY', 'WAS', 'BEAUTIFUL', 'AND', 'HE', 'WOULD', 'HAVE', 'DEARLY', 'LIKED', 'TO', 'BE', 'KISSED', 'BY', 'HER', 'BUT', 'THE', 'CAP', 'WHICH', 'HIS', 'MOTHER', 'HAD', 'MADE', 'HE', 'WOULD', 'NOT', 'GIVE', 'UP', 'ON', 'ANY', 'CONDITION'] +7021-85628-0014-1423: ref=['HE', 'ONLY', 'SHOOK', 'HIS', 'HEAD'] +7021-85628-0014-1423: hyp=['HE', 'ONLY', 'SHOOK', 'HIS', 'HEAD'] +7021-85628-0015-1424: ref=['WELL', 'BUT', 'NOW', 'SAID', 'THE', 'PRINCESS', 'AND', 'SHE', 'FILLED', 'HIS', 'POCKETS', 'WITH', 'CAKES', 'AND', 'PUT', 'HER', 'OWN', 'HEAVY', 'GOLD', 'CHAIN', 'AROUND', 'HIS', 'NECK', 'AND', 'BENT', 'DOWN', 'AND', 'KISSED', 'HIM'] +7021-85628-0015-1424: hyp=['WELL', 'BUT', 'NOW', 'SAID', 'THE', 'PRINCESS', 'AND', 'SHE', 'FILLED', 'HIS', 'POCKETS', 'WITH', 'CAKES', 'AND', 'PUT', 'HER', 'OWN', 'HEAVY', 'GOLD', 'CHAIN', 'AROUND', 'HIS', 'NECK', 'AND', 'BENT', 'DOWN', 'AND', 'KISSED', 'HIM'] +7021-85628-0016-1425: ref=['THAT', 'IS', 'A', 'VERY', 'FINE', 'CAP', 'YOU', 'HAVE', 'HE', 'SAID'] +7021-85628-0016-1425: hyp=['THAT', 'IS', 'A', 'VERY', 'FINE', 'CAP', 'YOU', 'HAVE', 'HE', 'SAID'] +7021-85628-0017-1426: ref=['SO', 'IT', 'IS', 'SAID', 'ANDERS'] +7021-85628-0017-1426: hyp=['SO', 'IT', 'IS', 'SAID', 'ANDREWS'] +7021-85628-0018-1427: ref=['AND', 'IT', 'IS', 'MADE', 'OF', "MOTHER'S", 'BEST', 'YARN', 'AND', 'SHE', 'KNITTED', 'IT', 'HERSELF', 'AND', 'EVERYBODY', 'WANTS', 'TO', 'GET', 'IT', 'AWAY', 'FROM', 'ME'] +7021-85628-0018-1427: hyp=['AND', 'IT', 'IS', 'MADE', 'OF', "MOTHER'S", 'BEST', 'YARN', 'AND', 'SHE', 'KNITTED', 'IT', 'HERSELF', 'AND', 'EVERYBODY', 'WANTS', 'TO', 'GET', 'IT', 'AWAY', 'FROM', 'ME'] +7021-85628-0019-1428: ref=['WITH', 'ONE', 'JUMP', 'ANDERS', 'GOT', 'OUT', 'OF', 'HIS', 'CHAIR'] +7021-85628-0019-1428: hyp=['WITH', 'ONE', 'JUMP', 'ANDREWS', 'GOT', 'OUT', 'OF', 'HIS', 'CHAIR'] +7021-85628-0020-1429: ref=['HE', 'DARTED', 'LIKE', 'AN', 'ARROW', 'THROUGH', 'ALL', 'THE', 'HALLS', 'DOWN', 'ALL', 'THE', 'STAIRS', 'AND', 'ACROSS', 'THE', 'YARD'] +7021-85628-0020-1429: hyp=['HE', 'DARTED', 'LIKE', 'AN', 'ARROW', 'THROUGH', 'ALL', 'THE', 'HALLS', 'DOWN', 'ALL', 'THE', 'STAIRS', 'AND', 'ACROSS', 'THE', 'YARD'] +7021-85628-0021-1430: ref=['HE', 'STILL', 'HELD', 'ON', 'TO', 'IT', 'WITH', 'BOTH', 'HANDS', 'AS', 'HE', 'RUSHED', 'INTO', 'HIS', "MOTHER'S", 'COTTAGE'] +7021-85628-0021-1430: hyp=['HE', 'STILL', 'HELD', 'ON', 'TO', 'IT', 'WITH', 'BOTH', 'HANDS', 'AS', 'HE', 'RUSHED', 'INTO', 'HIS', "MOTHER'S", 'COTTAGE'] +7021-85628-0022-1431: ref=['AND', 'ALL', 'HIS', 'BROTHERS', 'AND', 'SISTERS', 'STOOD', 'ROUND', 'AND', 'LISTENED', 'WITH', 'THEIR', 'MOUTHS', 'OPEN'] +7021-85628-0022-1431: hyp=['AND', 'ALL', 'HIS', 'BROTHERS', 'AND', 'SISTERS', 'STOOD', 'ROUND', 'AND', 'LISTENED', 'WITH', 'THEIR', 'MOUTHS', 'OPEN'] +7021-85628-0023-1432: ref=['BUT', 'WHEN', 'HIS', 'BIG', 'BROTHER', 'HEARD', 'THAT', 'HE', 'HAD', 'REFUSED', 'TO', 'GIVE', 'HIS', 'CAP', 'FOR', 'A', "KING'S", 'GOLDEN', 'CROWN', 'HE', 'SAID', 'THAT', 'ANDERS', 'WAS', 'A', 'STUPID'] +7021-85628-0023-1432: hyp=['BUT', 'WHEN', 'HIS', 'BIG', 'BROTHER', 'HEARD', 'THAT', 'HE', 'HAD', 'REFUSED', 'TO', 'GIVE', 'HIS', 'CAP', 'FOR', 'A', "KING'S", 'GOLDEN', 'CROWN', 'HE', 'SAID', 'THAT', 'ANDERS', 'WAS', 'A', 'STUPID'] +7021-85628-0024-1433: ref=['ANDERS', 'FACE', 'GREW', 'RED'] +7021-85628-0024-1433: hyp=["ANDREW'S", 'FACE', 'GREW', 'RED'] +7021-85628-0025-1434: ref=['BUT', 'HIS', 'MOTHER', 'HUGGED', 'HIM', 'CLOSE'] +7021-85628-0025-1434: hyp=['BUT', 'HIS', 'MOTHER', 'HUGGED', 'HIM', 'CLOSE'] +7021-85628-0026-1435: ref=['NO', 'MY', 'LITTLE', 'SON', 'SHE', 'SAID'] +7021-85628-0026-1435: hyp=['NO', 'MY', 'LITTLE', 'FUN', 'SHE', 'SAID'] +7021-85628-0027-1436: ref=['IF', 'YOU', 'DRESSED', 'IN', 'SILK', 'AND', 'GOLD', 'FROM', 'TOP', 'TO', 'TOE', 'YOU', 'COULD', 'NOT', 'LOOK', 'ANY', 'NICER', 'THAN', 'IN', 'YOUR', 'LITTLE', 'RED', 'CAP'] +7021-85628-0027-1436: hyp=['IF', 'YOU', 'DRESSED', 'IN', 'SILK', 'AND', 'GOLD', 'FROM', 'TOP', 'TO', 'TOE', 'YOU', 'COULD', 'NOT', 'LOOK', 'ANY', 'NICER', 'THAN', 'IN', 'YOUR', 'LITTLE', 'RED', 'CAP'] +7127-75946-0000-467: ref=['AT', 'THE', 'CONCLUSION', 'OF', 'THE', 'BANQUET', 'WHICH', 'WAS', 'SERVED', 'AT', 'FIVE', "O'CLOCK", 'THE', 'KING', 'ENTERED', 'HIS', 'CABINET', 'WHERE', 'HIS', 'TAILORS', 'WERE', 'AWAITING', 'HIM', 'FOR', 'THE', 'PURPOSE', 'OF', 'TRYING', 'ON', 'THE', 'CELEBRATED', 'COSTUME', 'REPRESENTING', 'SPRING', 'WHICH', 'WAS', 'THE', 'RESULT', 'OF', 'SO', 'MUCH', 'IMAGINATION', 'AND', 'HAD', 'COST', 'SO', 'MANY', 'EFFORTS', 'OF', 'THOUGHT', 'TO', 'THE', 'DESIGNERS', 'AND', 'ORNAMENT', 'WORKERS', 'OF', 'THE', 'COURT'] +7127-75946-0000-467: hyp=['AT', 'THE', 'CONCLUSION', 'OF', 'THE', 'BANQUET', 'WHICH', 'WAS', 'SERVED', 'AT', 'FIVE', "O'CLOCK", 'THE', 'KING', 'ENTERED', 'HIS', 'CABINET', 'WHERE', 'HIS', 'TAILORS', 'WERE', 'AWAITING', 'HIM', 'FOR', 'THE', 'PURPOSE', 'OF', 'TRYING', 'ON', 'THE', 'CELEBRATED', 'COSTUME', 'REPRESENTING', 'SPRING', 'WHICH', 'WAS', 'THE', 'RESULT', 'OF', 'SO', 'MUCH', 'IMAGINATION', 'AND', 'HAD', 'COST', 'SO', 'MANY', 'EFFORTS', 'OF', 'THOUGHT', 'TO', 'THE', 'DESIGNERS', 'AND', 'ORNAMENT', 'WORKERS', 'OF', 'THE', 'COURT'] +7127-75946-0001-468: ref=['AH', 'VERY', 'WELL'] +7127-75946-0001-468: hyp=['AH', 'VERY', 'WELL'] +7127-75946-0002-469: ref=['LET', 'HIM', 'COME', 'IN', 'THEN', 'SAID', 'THE', 'KING', 'AND', 'AS', 'IF', 'COLBERT', 'HAD', 'BEEN', 'LISTENING', 'AT', 'THE', 'DOOR', 'FOR', 'THE', 'PURPOSE', 'OF', 'KEEPING', 'HIMSELF', 'AU', 'COURANT', 'WITH', 'THE', 'CONVERSATION', 'HE', 'ENTERED', 'AS', 'SOON', 'AS', 'THE', 'KING', 'HAD', 'PRONOUNCED', 'HIS', 'NAME', 'TO', 'THE', 'TWO', 'COURTIERS'] +7127-75946-0002-469: hyp=['LET', 'HIM', 'COME', 'IN', 'THEN', 'SAID', 'THE', 'KING', 'AND', 'AS', 'IF', 'COLBERT', 'HAD', 'BEEN', 'LISTENING', 'AT', 'THE', 'DOOR', 'FOR', 'THE', 'PURPOSE', 'OF', 'KEEPING', 'HIMSELF', 'ACCURANT', 'WITH', 'THE', 'CONVERSATION', 'HE', 'ENTERED', 'AS', 'SOON', 'AS', 'THE', 'KING', 'HAD', 'PRONOUNCED', 'HIS', 'NAME', 'TO', 'THE', 'TWO', 'COURTIERS'] +7127-75946-0003-470: ref=['GENTLEMEN', 'TO', 'YOUR', 'POSTS', 'WHEREUPON', 'SAINT', 'AIGNAN', 'AND', 'VILLEROY', 'TOOK', 'THEIR', 'LEAVE'] +7127-75946-0003-470: hyp=['GENTLEMEN', 'TO', 'YOUR', 'POSTS', 'WHEREUPON', 'SAINT', 'ENG', 'YON', 'AND', 'VILLAY', 'TOOK', 'THEIR', 'LEAVE'] +7127-75946-0004-471: ref=['CERTAINLY', 'SIRE', 'BUT', 'I', 'MUST', 'HAVE', 'MONEY', 'TO', 'DO', 'THAT', 'WHAT'] +7127-75946-0004-471: hyp=['CERTAINLY', 'SIRE', 'BUT', 'I', 'MUST', 'HAVE', 'MONEY', 'TO', 'DO', 'THAT', 'WHAT'] +7127-75946-0005-472: ref=['WHAT', 'DO', 'YOU', 'MEAN', 'INQUIRED', 'LOUIS'] +7127-75946-0005-472: hyp=['WHAT', 'DO', 'YOU', 'MEAN', 'INQUIRED', 'LOUISE'] +7127-75946-0006-473: ref=['HE', 'HAS', 'GIVEN', 'THEM', 'WITH', 'TOO', 'MUCH', 'GRACE', 'NOT', 'TO', 'HAVE', 'OTHERS', 'STILL', 'TO', 'GIVE', 'IF', 'THEY', 'ARE', 'REQUIRED', 'WHICH', 'IS', 'THE', 'CASE', 'AT', 'THE', 'PRESENT', 'MOMENT'] +7127-75946-0006-473: hyp=['HE', 'HAS', 'GIVEN', 'THEM', 'WITH', 'TOO', 'MUCH', 'GRACE', 'NOT', 'TO', 'HAVE', 'OTHERS', 'STILL', 'TO', 'GIVE', 'IF', 'THEY', 'ARE', 'REQUIRED', 'WHICH', 'IS', 'THE', 'CASE', 'AT', 'THE', 'PRESENT', 'MOMENT'] +7127-75946-0007-474: ref=['IT', 'IS', 'NECESSARY', 'THEREFORE', 'THAT', 'HE', 'SHOULD', 'COMPLY', 'THE', 'KING', 'FROWNED'] +7127-75946-0007-474: hyp=['IT', 'IS', 'NECESSARY', 'THEREFORE', 'THAT', 'HE', 'SHOULD', 'COMPLY', 'THE', 'KING', 'FROWNED'] +7127-75946-0008-475: ref=['DOES', 'YOUR', 'MAJESTY', 'THEN', 'NO', 'LONGER', 'BELIEVE', 'THE', 'DISLOYAL', 'ATTEMPT'] +7127-75946-0008-475: hyp=['DOES', 'YOUR', 'MAJESTY', 'THEN', 'NO', 'LONGER', 'BELIEVE', 'THE', 'DISLOYAL', 'ATTEMPT'] +7127-75946-0009-476: ref=['NOT', 'AT', 'ALL', 'YOU', 'ARE', 'ON', 'THE', 'CONTRARY', 'MOST', 'AGREEABLE', 'TO', 'ME'] +7127-75946-0009-476: hyp=['NOT', 'AT', 'ALL', 'YOU', 'ARE', 'ON', 'THE', 'CONTRARY', 'MOST', 'AGREEABLE', 'TO', 'ME'] +7127-75946-0010-477: ref=['YOUR', "MAJESTY'S", 'PLAN', 'THEN', 'IN', 'THIS', 'AFFAIR', 'IS'] +7127-75946-0010-477: hyp=['YOUR', "MAJESTY'S", 'PLAN', 'THEN', 'IN', 'THIS', 'AFFAIR', 'IS'] +7127-75946-0011-478: ref=['YOU', 'WILL', 'TAKE', 'THEM', 'FROM', 'MY', 'PRIVATE', 'TREASURE'] +7127-75946-0011-478: hyp=['YOU', 'WILL', 'TAKE', 'THEM', 'FROM', 'MY', 'PRIVATE', 'TREASURE'] +7127-75946-0012-479: ref=['THE', 'NEWS', 'CIRCULATED', 'WITH', 'THE', 'RAPIDITY', 'OF', 'LIGHTNING', 'DURING', 'ITS', 'PROGRESS', 'IT', 'KINDLED', 'EVERY', 'VARIETY', 'OF', 'COQUETRY', 'DESIRE', 'AND', 'WILD', 'AMBITION'] +7127-75946-0012-479: hyp=['THE', 'NEWS', 'CIRCULATED', 'WITH', 'THE', 'RAPIDITY', 'OF', 'LIGHTNING', 'DURING', 'ITS', 'PROGRESS', 'IT', 'KINDLED', 'EVERY', 'VARIETY', 'OF', 'COQUETRY', 'DESIRE', 'AND', 'WILD', 'AMBITION'] +7127-75946-0013-480: ref=['THE', 'KING', 'HAD', 'COMPLETED', 'HIS', 'TOILETTE', 'BY', 'NINE', "O'CLOCK", 'HE', 'APPEARED', 'IN', 'AN', 'OPEN', 'CARRIAGE', 'DECORATED', 'WITH', 'BRANCHES', 'OF', 'TREES', 'AND', 'FLOWERS'] +7127-75946-0013-480: hyp=['THE', 'KING', 'HAD', 'COMPLETED', 'HIS', 'TOILET', 'BY', 'NINE', "O'CLOCK", 'HE', 'APPEARED', 'IN', 'AN', 'OPEN', 'CARRIAGE', 'DECORATED', 'WITH', 'BRANCHES', 'OF', 'TREES', 'AND', 'FLOWERS'] +7127-75946-0014-481: ref=['THE', 'QUEENS', 'HAD', 'TAKEN', 'THEIR', 'SEATS', 'UPON', 'A', 'MAGNIFICENT', 'DIAS', 'OR', 'PLATFORM', 'ERECTED', 'UPON', 'THE', 'BORDERS', 'OF', 'THE', 'LAKE', 'IN', 'A', 'THEATER', 'OF', 'WONDERFUL', 'ELEGANCE', 'OF', 'CONSTRUCTION'] +7127-75946-0014-481: hyp=['THE', 'QUEENS', 'HAD', 'TAKEN', 'THEIR', 'SEATS', 'UPON', 'A', 'MAGNIFICENT', 'DAIS', 'OR', 'PLATFORM', 'ERECTED', 'UPON', 'THE', 'BORDERS', 'OF', 'THE', 'LAKE', 'IN', 'A', 'THEATRE', 'OF', 'WONDERFUL', 'ELEGANCE', 'OF', 'CONSTRUCTION'] +7127-75946-0015-482: ref=['SUDDENLY', 'FOR', 'THE', 'PURPOSE', 'OF', 'RESTORING', 'PEACE', 'AND', 'ORDER', 'SPRING', 'ACCOMPANIED', 'BY', 'HIS', 'WHOLE', 'COURT', 'MADE', 'HIS', 'APPEARANCE'] +7127-75946-0015-482: hyp=['SUDDENLY', 'FOR', 'THE', 'PURPOSE', 'OF', 'RESTORING', 'PEACE', 'AND', 'ORDER', 'SPRANG', 'ACCOMPANIED', 'BY', 'HIS', 'WHOLE', 'COURT', 'MADE', 'HIS', 'APPEARANCE'] +7127-75946-0016-483: ref=['THE', 'SEASONS', 'ALLIES', 'OF', 'SPRING', 'FOLLOWED', 'HIM', 'CLOSELY', 'TO', 'FORM', 'A', 'QUADRILLE', 'WHICH', 'AFTER', 'MANY', 'WORDS', 'OF', 'MORE', 'OR', 'LESS', 'FLATTERING', 'IMPORT', 'WAS', 'THE', 'COMMENCEMENT', 'OF', 'THE', 'DANCE'] +7127-75946-0016-483: hyp=['THE', 'SEASONS', 'ALLIES', 'OF', 'SPRING', 'FOLLOWED', 'HIM', 'CLOSELY', 'TO', 'FORM', 'A', 'QUADRILLE', 'WHICH', 'AFTER', 'MANY', 'WORDS', 'OF', 'MORE', 'OR', 'LESS', 'FLATTERING', 'IMPORT', 'WAS', 'THE', 'COMMENCEMENT', 'OF', 'THE', 'DANCE'] +7127-75946-0017-484: ref=['HIS', 'LEGS', 'THE', 'BEST', 'SHAPED', 'AT', 'COURT', 'WERE', 'DISPLAYED', 'TO', 'GREAT', 'ADVANTAGE', 'IN', 'FLESH', 'COLORED', 'SILKEN', 'HOSE', 'OF', 'SILK', 'SO', 'FINE', 'AND', 'SO', 'TRANSPARENT', 'THAT', 'IT', 'SEEMED', 'ALMOST', 'LIKE', 'FLESH', 'ITSELF'] +7127-75946-0017-484: hyp=['HIS', 'LEGS', 'THE', 'BEST', 'SHAPED', 'AT', 'COURT', 'WERE', 'DISPLAYED', 'TO', 'GREAT', 'ADVANTAGE', 'IN', 'FLESH', 'COLOURED', 'SILKEN', 'HOSE', 'A', 'SILK', 'SO', 'FINE', 'AND', 'SO', 'TRANSPARENT', 'THAT', 'IT', 'SEEMED', 'ALMOST', 'LIKE', 'FLESH', 'ITSELF'] +7127-75946-0018-485: ref=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'CARRIAGE', 'WHICH', 'RESEMBLED', 'THE', 'BUOYANT', 'MOVEMENTS', 'OF', 'AN', 'IMMORTAL', 'AND', 'HE', 'DID', 'NOT', 'DANCE', 'SO', 'MUCH', 'AS', 'SEEM', 'TO', 'SOAR', 'ALONG'] +7127-75946-0018-485: hyp=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'CARRIAGE', 'WHICH', 'RESEMBLED', 'THE', 'BUOYANT', 'MOVEMENTS', 'OF', 'AN', 'IMMORTAL', 'AND', 'HE', 'DID', 'NOT', 'DANCE', 'SO', 'MUCH', 'AS', 'SEEMED', 'TO', 'SOAR', 'ALONG'] +7127-75946-0019-486: ref=['YES', 'IT', 'IS', 'SUPPRESSED'] +7127-75946-0019-486: hyp=['YES', 'IT', 'IS', 'SUPPRESSED'] +7127-75946-0020-487: ref=['FAR', 'FROM', 'IT', 'SIRE', 'YOUR', 'MAJESTY', 'HAVING', 'GIVEN', 'NO', 'DIRECTIONS', 'ABOUT', 'IT', 'THE', 'MUSICIANS', 'HAVE', 'RETAINED', 'IT'] +7127-75946-0020-487: hyp=['FAR', 'FROM', 'IT', 'SIRE', 'YOUR', 'MAJESTY', 'HEAVEN', 'GIVEN', 'NO', 'DIRECTIONS', 'ABOUT', 'IT', 'THE', 'MUSICIANS', 'HAVE', 'RETAINED', 'IT'] +7127-75946-0021-488: ref=['YES', 'SIRE', 'AND', 'READY', 'DRESSED', 'FOR', 'THE', 'BALLET'] +7127-75946-0021-488: hyp=['YES', 'SIRE', 'AND', 'READY', 'DRESSED', 'FOR', 'THE', 'BALLET'] +7127-75946-0022-489: ref=['SIRE', 'HE', 'SAID', 'YOUR', "MAJESTY'S", 'MOST', 'DEVOTED', 'SERVANT', 'APPROACHES', 'TO', 'PERFORM', 'A', 'SERVICE', 'ON', 'THIS', 'OCCASION', 'WITH', 'SIMILAR', 'ZEAL', 'THAT', 'HE', 'HAS', 'ALREADY', 'SHOWN', 'ON', 'THE', 'FIELD', 'OF', 'BATTLE'] +7127-75946-0022-489: hyp=['SIRE', 'HE', 'SAID', 'YOUR', "MAJESTY'S", 'MOST', 'DEVOTED', 'SERVANT', 'APPROACHES', 'TO', 'PERFORM', 'A', 'SERVICE', 'ON', 'THIS', 'OCCASION', 'WITH', 'SIMILAR', 'ZEAL', 'THAT', 'HE', 'HAS', 'ALREADY', 'SHOWN', 'ON', 'THE', 'FIELD', 'OF', 'BATTLE'] +7127-75946-0023-490: ref=['THE', 'KING', 'SEEMED', 'ONLY', 'PLEASED', 'WITH', 'EVERY', 'ONE', 'PRESENT'] +7127-75946-0023-490: hyp=['THE', 'KING', 'SEEMED', 'ONLY', 'PLEASED', 'WITH', 'EVERY', 'ONE', 'PRESENT'] +7127-75946-0024-491: ref=['MONSIEUR', 'WAS', 'THE', 'ONLY', 'ONE', 'WHO', 'DID', 'NOT', 'UNDERSTAND', 'ANYTHING', 'ABOUT', 'THE', 'MATTER'] +7127-75946-0024-491: hyp=['MONSIEUR', 'WAS', 'THE', 'ONLY', 'ONE', 'WHO', 'DID', 'NOT', 'UNDERSTAND', 'ANYTHING', 'ABOUT', 'THE', 'MATTER'] +7127-75946-0025-492: ref=['THE', 'BALLET', 'BEGAN', 'THE', 'EFFECT', 'WAS', 'MORE', 'THAN', 'BEAUTIFUL'] +7127-75946-0025-492: hyp=['THE', 'BALLET', 'BEGAN', 'THE', 'EFFECT', 'WAS', 'MORE', 'THAN', 'BEAUTIFUL'] +7127-75946-0026-493: ref=['WHEN', 'THE', 'MUSIC', 'BY', 'ITS', 'BURSTS', 'OF', 'MELODY', 'CARRIED', 'AWAY', 'THESE', 'ILLUSTRIOUS', 'DANCERS', 'WHEN', 'THE', 'SIMPLE', 'UNTUTORED', 'PANTOMIME', 'OF', 'THAT', 'PERIOD', 'ONLY', 'THE', 'MORE', 'NATURAL', 'ON', 'ACCOUNT', 'OF', 'THE', 'VERY', 'INDIFFERENT', 'ACTING', 'OF', 'THE', 'AUGUST', 'ACTORS', 'HAD', 'REACHED', 'ITS', 'CULMINATING', 'POINT', 'OF', 'TRIUMPH', 'THE', 'THEATER', 'SHOOK', 'WITH', 'TUMULTUOUS', 'APPLAUSE'] +7127-75946-0026-493: hyp=['WHEN', 'THE', 'MUSIC', 'BY', 'ITS', 'BURSTS', 'OF', 'MELODY', 'CARRIED', 'AWAY', 'THESE', 'ILLUSTRIOUS', 'DANCERS', 'WHEN', 'THIS', 'SIMPLE', 'UNTUTORED', 'PANTOMIME', 'OF', 'THAT', 'PERIOD', 'ONLY', 'THE', 'MORE', 'NATURAL', 'ON', 'ACCOUNT', 'OF', 'THE', 'VERY', 'INDIFFERENT', 'ACTING', 'OF', 'THE', 'AUGUST', 'ACTORS', 'HAD', 'REACHED', 'ITS', 'CULMINATING', 'POINT', 'OF', 'TRIUMPH', 'THE', 'THEATRE', 'SHOOK', 'WITH', 'TUMULTUOUS', 'APPLAUSE'] +7127-75946-0027-494: ref=['DISDAINFUL', 'OF', 'A', 'SUCCESS', 'OF', 'WHICH', 'MADAME', 'SHOWED', 'NO', 'ACKNOWLEDGEMENT', 'HE', 'THOUGHT', 'OF', 'NOTHING', 'BUT', 'BOLDLY', 'REGAINING', 'THE', 'MARKED', 'PREFERENCE', 'OF', 'THE', 'PRINCESS'] +7127-75946-0027-494: hyp=['DISDAINFUL', 'OF', 'A', 'SUCCESS', 'OF', 'WHICH', 'MADAME', 'SHOWED', 'NO', 'ACKNOWLEDGMENT', 'HE', 'THOUGHT', 'OF', 'NOTHING', 'BUT', 'BOLDLY', 'REGAINING', 'THE', 'MARKET', 'PREFERENCE', 'OF', 'THE', 'PRINCESS'] +7127-75946-0028-495: ref=['BY', 'DEGREES', 'ALL', 'HIS', 'HAPPINESS', 'ALL', 'HIS', 'BRILLIANCY', 'SUBSIDED', 'INTO', 'REGRET', 'AND', 'UNEASINESS', 'SO', 'THAT', 'HIS', 'LIMBS', 'LOST', 'THEIR', 'POWER', 'HIS', 'ARMS', 'HUNG', 'HEAVILY', 'BY', 'HIS', 'SIDES', 'AND', 'HIS', 'HEAD', 'DROOPED', 'AS', 'THOUGH', 'HE', 'WAS', 'STUPEFIED'] +7127-75946-0028-495: hyp=['BY', 'DEGREES', 'ALL', 'HIS', 'HAPPINESS', 'ALL', 'HIS', 'BRILLIANCY', 'SUBSIDED', 'INTO', 'REGRET', 'AND', 'UNEASINESS', 'SO', 'THAT', 'HIS', 'LIMBS', 'LOST', 'THEIR', 'POWER', 'HIS', 'ARMS', 'HUNG', 'HEAVILY', 'BY', 'HIS', 'SIDES', 'AND', 'HIS', 'HEAD', 'DROOPED', 'AS', 'THOUGH', 'HE', 'WAS', 'STUPEFIED'] +7127-75946-0029-496: ref=['THE', 'KING', 'WHO', 'HAD', 'FROM', 'THIS', 'MOMENT', 'BECOME', 'IN', 'REALITY', 'THE', 'PRINCIPAL', 'DANCER', 'IN', 'THE', 'QUADRILLE', 'CAST', 'A', 'LOOK', 'UPON', 'HIS', 'VANQUISHED', 'RIVAL'] +7127-75946-0029-496: hyp=['THE', 'KING', 'WHO', 'HAD', 'FROM', 'THIS', 'MOMENT', 'BECOME', 'IN', 'REALITY', 'THE', 'PRINCIPAL', 'DANCER', 'IN', 'THE', 'QUADRILLE', 'CAST', 'A', 'LOOK', 'UPON', 'HIS', 'VANQUISHED', 'RIVAL'] +7127-75947-0000-426: ref=['EVERY', 'ONE', 'COULD', 'OBSERVE', 'HIS', 'AGITATION', 'AND', 'PROSTRATION', 'A', 'PROSTRATION', 'WHICH', 'WAS', 'INDEED', 'THE', 'MORE', 'REMARKABLE', 'SINCE', 'PEOPLE', 'WERE', 'NOT', 'ACCUSTOMED', 'TO', 'SEE', 'HIM', 'WITH', 'HIS', 'ARMS', 'HANGING', 'LISTLESSLY', 'BY', 'HIS', 'SIDE', 'HIS', 'HEAD', 'BEWILDERED', 'AND', 'HIS', 'EYES', 'WITH', 'ALL', 'THEIR', 'BRIGHT', 'INTELLIGENCE', 'BEDIMMED'] +7127-75947-0000-426: hyp=['EVERY', 'ONE', 'COULD', 'OBSERVE', 'HIS', 'AGITATION', 'AND', 'PROSTRATION', 'A', 'PROSTRATION', 'WHICH', 'WAS', 'INDEED', 'THE', 'MORE', 'REMARKABLE', 'SINCE', 'PEOPLE', 'WERE', 'NOT', 'ACCUSTOMED', 'TO', 'SEE', 'HIM', 'WITH', 'HIS', 'ARMS', 'HANGING', 'LISTLESSLY', 'BY', 'HIS', 'SIDE', 'HIS', 'HEAD', 'BEWILDERED', 'AND', 'HIS', 'EYES', 'WITH', 'ALL', 'THEIR', 'BRIGHT', 'INTELLIGENCE', 'BE', 'DIMMED'] +7127-75947-0001-427: ref=['UPON', 'THIS', 'MADAME', 'DEIGNED', 'TO', 'TURN', 'HER', 'EYES', 'LANGUISHINGLY', 'TOWARDS', 'THE', 'COMTE', 'OBSERVING'] +7127-75947-0001-427: hyp=['UPON', 'THIS', 'MADAME', 'DEIGNED', 'TO', 'TURN', 'HER', 'EYES', 'LANGUISHINGLY', 'TOWARDS', 'THE', 'COMTE', 'OBSERVING'] +7127-75947-0002-428: ref=['DO', 'YOU', 'THINK', 'SO', 'SHE', 'REPLIED', 'WITH', 'INDIFFERENCE'] +7127-75947-0002-428: hyp=['DO', 'YOU', 'THINK', 'SO', 'SHE', 'REPLIED', 'WITH', 'INDIFFERENCE'] +7127-75947-0003-429: ref=['YES', 'THE', 'CHARACTER', 'WHICH', 'YOUR', 'ROYAL', 'HIGHNESS', 'ASSUMED', 'IS', 'IN', 'PERFECT', 'HARMONY', 'WITH', 'YOUR', 'OWN'] +7127-75947-0003-429: hyp=['YES', 'THE', 'CHARACTER', 'WHICH', 'YOUR', 'ROYAL', 'HIGHNESS', 'ASSUMED', 'IS', 'IN', 'PERFECT', 'HARMONY', 'WITH', 'YOUR', 'OWN'] +7127-75947-0004-430: ref=['EXPLAIN', 'YOURSELF'] +7127-75947-0004-430: hyp=['EXPLAIN', 'YOURSELF'] +7127-75947-0005-431: ref=['I', 'ALLUDE', 'TO', 'THE', 'GODDESS'] +7127-75947-0005-431: hyp=['I', 'ALLUDE', 'TO', 'THE', 'GODDESS'] +7127-75947-0006-432: ref=['THE', 'PRINCESS', 'INQUIRED', 'NO'] +7127-75947-0006-432: hyp=['THE', 'PRINCESS', 'INQUIRED', 'NO'] +7127-75947-0007-433: ref=['SHE', 'THEN', 'ROSE', 'HUMMING', 'THE', 'AIR', 'TO', 'WHICH', 'SHE', 'WAS', 'PRESENTLY', 'GOING', 'TO', 'DANCE'] +7127-75947-0007-433: hyp=['SHE', 'THEN', 'ROSE', 'HUMMING', 'THE', 'AIR', 'TO', 'WHICH', 'SHE', 'WAS', 'PRESENTLY', 'GOING', 'TO', 'DANCE'] +7127-75947-0008-434: ref=['THE', 'ARROW', 'PIERCED', 'HIS', 'HEART', 'AND', 'WOUNDED', 'HIM', 'MORTALLY'] +7127-75947-0008-434: hyp=['THE', 'ARROW', 'PIERCED', 'HIS', 'HEART', 'AND', 'WOUNDED', 'HIM', 'MORTALLY'] +7127-75947-0009-435: ref=['A', 'QUARTER', 'OF', 'AN', 'HOUR', 'AFTERWARDS', 'HE', 'RETURNED', 'TO', 'THE', 'THEATER', 'BUT', 'IT', 'WILL', 'BE', 'READILY', 'BELIEVED', 'THAT', 'IT', 'WAS', 'ONLY', 'A', 'POWERFUL', 'EFFORT', 'OF', 'REASON', 'OVER', 'HIS', 'GREAT', 'EXCITEMENT', 'THAT', 'ENABLED', 'HIM', 'TO', 'GO', 'BACK', 'OR', 'PERHAPS', 'FOR', 'LOVE', 'IS', 'THUS', 'STRANGELY', 'CONSTITUTED', 'HE', 'FOUND', 'IT', 'IMPOSSIBLE', 'EVEN', 'TO', 'REMAIN', 'MUCH', 'LONGER', 'SEPARATED', 'FROM', 'THE', 'PRESENCE', 'OF', 'ONE', 'WHO', 'HAD', 'BROKEN', 'HIS', 'HEART'] +7127-75947-0009-435: hyp=['A', 'QUARTER', 'OF', 'AN', 'HOUR', 'AFTERWARDS', 'HE', 'RETURNED', 'TO', 'THE', 'THEATRE', 'BUT', 'IT', 'WILL', 'BE', 'READILY', 'BELIEVED', 'THAT', 'IT', 'WAS', 'ONLY', 'A', 'POWERFUL', 'EFFORT', 'OF', 'REASON', 'OVER', 'HIS', 'GREAT', 'EXCITEMENT', 'THAT', 'ENABLED', 'HIM', 'TO', 'GO', 'BACK', 'OR', 'PERHAPS', 'FOR', 'LOVE', 'IS', 'THUS', 'STRANGELY', 'CONSTITUTED', 'HE', 'FOUND', 'IT', 'IMPOSSIBLE', 'EVEN', 'TO', 'REMAIN', 'MUCH', 'LONGER', 'SEPARATED', 'FROM', 'THEIR', 'PRESENCE', 'OF', 'ONE', 'WHO', 'HAD', 'BROKEN', 'HIS', 'HEART'] +7127-75947-0010-436: ref=['WHEN', 'SHE', 'PERCEIVED', 'THE', 'YOUNG', 'MAN', 'SHE', 'ROSE', 'LIKE', 'A', 'WOMAN', 'SURPRISED', 'IN', 'THE', 'MIDST', 'OF', 'IDEAS', 'SHE', 'WAS', 'DESIROUS', 'OF', 'CONCEALING', 'FROM', 'HERSELF'] +7127-75947-0010-436: hyp=['WHEN', 'SHE', 'PERCEIVED', 'THE', 'YOUNG', 'MAN', 'SHE', 'ROSE', 'LIKE', 'A', 'WOMAN', 'SURPRISED', 'IN', 'THE', 'MIDST', 'OF', 'IDEAS', 'SHE', 'WAS', 'DESIROUS', 'OF', 'CONCEALING', 'FROM', 'HERSELF'] +7127-75947-0011-437: ref=['REMAIN', 'I', 'IMPLORE', 'YOU', 'THE', 'EVENING', 'IS', 'MOST', 'LOVELY'] +7127-75947-0011-437: hyp=['REMAIN', 'I', 'IMPLORE', 'YOU', 'THE', 'EVENING', 'IS', 'MOST', 'LOVELY'] +7127-75947-0012-438: ref=['INDEED', 'AH'] +7127-75947-0012-438: hyp=['INDEED', 'AH'] +7127-75947-0013-439: ref=['I', 'REMEMBER', 'NOW', 'AND', 'I', 'CONGRATULATE', 'MYSELF', 'DO', 'YOU', 'LOVE', 'ANY', 'ONE'] +7127-75947-0013-439: hyp=['I', 'REMEMBER', 'NOW', 'AND', 'I', 'CONGRATULATE', 'MYSELF', 'DO', 'YOU', 'LOVE', 'ANY', 'ONE'] +7127-75947-0014-440: ref=['FORGIVE', 'ME', 'I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'AM', 'SAYING', 'A', 'THOUSAND', 'TIMES', 'FORGIVE', 'ME', 'MADAME', 'WAS', 'RIGHT', 'QUITE', 'RIGHT', 'THIS', 'BRUTAL', 'EXILE', 'HAS', 'COMPLETELY', 'TURNED', 'MY', 'BRAIN'] +7127-75947-0014-440: hyp=['FORGIVE', 'ME', 'I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'AM', 'SAYING', 'A', 'THOUSAND', 'TIMES', 'FORGIVE', 'ME', 'MADAME', 'WAS', 'RIGHT', 'QUITE', 'RIGHT', 'THIS', 'BRUTAL', 'EXILE', 'HAS', 'COMPLETELY', 'TURNED', 'MY', 'BRAIN'] +7127-75947-0015-441: ref=['THERE', 'CANNOT', 'BE', 'A', 'DOUBT', 'HE', 'RECEIVED', 'YOU', 'KINDLY', 'FOR', 'IN', 'FACT', 'YOU', 'RETURNED', 'WITHOUT', 'HIS', 'PERMISSION'] +7127-75947-0015-441: hyp=['THERE', 'CANNOT', 'BE', 'A', 'DOUBT', 'HE', 'RECEIVED', 'YOU', 'KINDLY', 'FOR', 'IN', 'FACT', 'YOU', 'RETURNED', 'WITHOUT', 'HIS', 'PERMISSION'] +7127-75947-0016-442: ref=['OH', 'MADEMOISELLE', 'WHY', 'HAVE', 'I', 'NOT', 'A', 'DEVOTED', 'SISTER', 'OR', 'A', 'TRUE', 'FRIEND', 'SUCH', 'AS', 'YOURSELF'] +7127-75947-0016-442: hyp=['OH', 'MADEMOISELLE', 'WHY', 'HAVE', 'I', 'NOT', 'A', 'DEVOTED', 'SISTER', 'OR', 'A', 'TRUE', 'FRIEND', 'SUCH', 'AS', 'YOURSELF'] +7127-75947-0017-443: ref=['WHAT', 'ALREADY', 'HERE', 'THEY', 'SAID', 'TO', 'HER'] +7127-75947-0017-443: hyp=['WHAT', 'ALREADY', 'HERE', 'THEY', 'SAID', 'TO', 'HER'] +7127-75947-0018-444: ref=['I', 'HAVE', 'BEEN', 'HERE', 'THIS', 'QUARTER', 'OF', 'AN', 'HOUR', 'REPLIED', 'LA', 'VALLIERE'] +7127-75947-0018-444: hyp=['I', 'HAVE', 'BEEN', 'HERE', 'THIS', 'QUARTER', 'OF', 'AN', 'HOUR', 'REPLIED', 'LA', 'VALLIERS'] +7127-75947-0019-445: ref=['DID', 'NOT', 'THE', 'DANCING', 'AMUSE', 'YOU', 'NO'] +7127-75947-0019-445: hyp=['DID', 'NOT', 'THE', 'DANCING', 'AMUSE', 'YOU', 'NO'] +7127-75947-0020-446: ref=['NO', 'MORE', 'THAN', 'THE', 'DANCING'] +7127-75947-0020-446: hyp=['NO', 'MORE', 'THAN', 'THE', 'DANCING'] +7127-75947-0021-447: ref=['LA', 'VALLIERE', 'IS', 'QUITE', 'A', 'POETESS', 'SAID', 'TONNAY', 'CHARENTE'] +7127-75947-0021-447: hyp=['LA', 'VALLIERS', 'IS', 'QUITE', 'A', 'POETES', 'SAID', 'TONY', 'CHARLET'] +7127-75947-0022-448: ref=['I', 'AM', 'A', 'WOMAN', 'AND', 'THERE', 'ARE', 'FEW', 'LIKE', 'ME', 'WHOEVER', 'LOVES', 'ME', 'FLATTERS', 'ME', 'WHOEVER', 'FLATTERS', 'ME', 'PLEASES', 'ME', 'AND', 'WHOEVER', 'PLEASES', 'WELL', 'SAID', 'MONTALAIS', 'YOU', 'DO', 'NOT', 'FINISH'] +7127-75947-0022-448: hyp=['I', 'AM', 'A', 'WOMAN', 'AND', 'THERE', 'ARE', 'FEW', 'LIKE', 'ME', 'WHOEVER', 'LOVES', 'ME', 'FLATTERS', 'ME', 'WHOEVER', 'FLATTERS', 'ME', 'PLEASES', 'ME', 'AND', 'WHOEVER', 'PLEASES', 'WELL', 'SAID', 'MONTALAIS', 'YOU', 'DO', 'NOT', 'FINISH'] +7127-75947-0023-449: ref=['IT', 'IS', 'TOO', 'DIFFICULT', 'REPLIED', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'LAUGHING', 'LOUDLY'] +7127-75947-0023-449: hyp=['IT', 'IS', 'TOO', 'DIFFICULT', 'REPLIED', 'MADEMOISELLE', 'DENISCHALANT', 'LAUGHING', 'LOUDLY'] +7127-75947-0024-450: ref=['LOOK', 'YONDER', 'DO', 'YOU', 'NOT', 'SEE', 'THE', 'MOON', 'SLOWLY', 'RISING', 'SILVERING', 'THE', 'TOPMOST', 'BRANCHES', 'OF', 'THE', 'CHESTNUTS', 'AND', 'THE', 'OAKS'] +7127-75947-0024-450: hyp=['LOOK', 'YONDER', 'DO', 'YOU', 'NOT', 'SEE', 'THE', 'MOON', 'SLOWLY', 'RISING', 'SILVERING', 'THE', 'TOPMOST', 'BRANCHES', 'OF', 'THE', 'CHESTNUTS', 'AND', 'THE', 'YOLKS'] +7127-75947-0025-451: ref=['EXQUISITE', 'SOFT', 'TURF', 'OF', 'THE', 'WOODS', 'THE', 'HAPPINESS', 'WHICH', 'YOUR', 'FRIENDSHIP', 'CONFERS', 'UPON', 'ME'] +7127-75947-0025-451: hyp=['EXQUISITE', 'SOFT', 'TURF', 'OF', 'THE', 'WOODS', 'THE', 'HAPPINESS', 'WHICH', 'YOUR', 'FRIENDSHIP', 'CONFERS', 'UPON', 'ME'] +7127-75947-0026-452: ref=['WELL', 'SAID', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'I', 'ALSO', 'THINK', 'A', 'GOOD', 'DEAL', 'BUT', 'I', 'TAKE', 'CARE'] +7127-75947-0026-452: hyp=['WELL', 'SAID', 'MADEMOISELLE', 'DENISCHALANT', 'I', 'ALSO', 'THINK', 'A', 'GOOD', 'DEAL', 'BUT', 'I', 'TAKE', 'CARE'] +7127-75947-0027-453: ref=['TO', 'SAY', 'NOTHING', 'SAID', 'MONTALAIS', 'SO', 'THAT', 'WHEN', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'THINKS', 'ATHENAIS', 'IS', 'THE', 'ONLY', 'ONE', 'WHO', 'KNOWS', 'IT'] +7127-75947-0027-453: hyp=['TO', 'SAY', 'NOTHING', 'SAID', 'MONTALAIS', 'SO', 'THAT', 'WHEN', 'MADEMOISELLE', 'DENISCHERANT', 'THINKS', 'ETHNE', 'IS', 'THE', 'ONLY', 'ONE', 'WHO', 'KNOWS', 'IT'] +7127-75947-0028-454: ref=['QUICK', 'QUICK', 'THEN', 'AMONG', 'THE', 'HIGH', 'REED', 'GRASS', 'SAID', 'MONTALAIS', 'STOOP', 'ATHENAIS', 'YOU', 'ARE', 'SO', 'TALL'] +7127-75947-0028-454: hyp=['QUICK', 'QUICK', 'THEN', 'AMONG', 'THE', 'HIGH', 'REED', 'GRASS', 'SAID', 'MONTALAIS', 'STOOP', 'ETHINE', 'YOU', 'ARE', 'SO', 'TALL'] +7127-75947-0029-455: ref=['THE', 'YOUNG', 'GIRLS', 'HAD', 'INDEED', 'MADE', 'THEMSELVES', 'SMALL', 'INDEED', 'INVISIBLE'] +7127-75947-0029-455: hyp=['THE', 'YOUNG', 'GIRLS', 'HAD', 'INDEED', 'MADE', 'THEMSELVES', 'SMALL', 'INDEED', 'INVISIBLE'] +7127-75947-0030-456: ref=['SHE', 'WAS', 'HERE', 'JUST', 'NOW', 'SAID', 'THE', 'COUNT'] +7127-75947-0030-456: hyp=['SHE', 'WAS', 'HERE', 'JUST', 'NOW', 'SAID', 'THE', 'COUNT'] +7127-75947-0031-457: ref=['YOU', 'ARE', 'POSITIVE', 'THEN'] +7127-75947-0031-457: hyp=['YOU', 'ARE', 'POSITIVE', 'THEN'] +7127-75947-0032-458: ref=['YES', 'BUT', 'PERHAPS', 'I', 'FRIGHTENED', 'HER', 'IN', 'WHAT', 'WAY'] +7127-75947-0032-458: hyp=['YES', 'BUT', 'PERHAPS', 'I', 'FRIGHTENED', 'HER', 'AND', 'WHAT', 'WAY'] +7127-75947-0033-459: ref=['HOW', 'IS', 'IT', 'LA', 'VALLIERE', 'SAID', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'THAT', 'THE', 'VICOMTE', 'DE', 'BRAGELONNE', 'SPOKE', 'OF', 'YOU', 'AS', 'LOUISE'] +7127-75947-0033-459: hyp=['HOW', 'IS', 'IT', 'LA', 'VALLIERS', 'SAID', 'MADEMOISELLE', 'DENISCHANT', 'THAT', 'THE', 'VICOMTE', 'DE', 'BREG', 'ALONE', 'SPOKE', 'OF', 'YOU', 'AS', 'LOUISE'] +7127-75947-0034-460: ref=['IT', 'SEEMS', 'THE', 'KING', 'WILL', 'NOT', 'CONSENT', 'TO', 'IT'] +7127-75947-0034-460: hyp=['IT', 'SEEMS', 'THE', 'KING', 'WILL', 'NOT', 'CONSENT', 'TO', 'IT'] +7127-75947-0035-461: ref=['GOOD', 'GRACIOUS', 'HAS', 'THE', 'KING', 'ANY', 'RIGHT', 'TO', 'INTERFERE', 'IN', 'MATTERS', 'OF', 'THAT', 'KIND'] +7127-75947-0035-461: hyp=['GOOD', 'GRACIOUS', 'HAS', 'THE', 'KING', 'ANY', 'RIGHT', 'TO', 'INTERFERE', 'IN', 'MATTERS', 'OF', 'THAT', 'KIND'] +7127-75947-0036-462: ref=['I', 'GIVE', 'MY', 'CONSENT'] +7127-75947-0036-462: hyp=['I', 'GIVE', 'MY', 'CONSENT'] +7127-75947-0037-463: ref=['OH', 'I', 'AM', 'SPEAKING', 'SERIOUSLY', 'REPLIED', 'MONTALAIS', 'AND', 'MY', 'OPINION', 'IN', 'THIS', 'CASE', 'IS', 'QUITE', 'AS', 'GOOD', 'AS', 'THE', "KING'S", 'I', 'SUPPOSE', 'IS', 'IT', 'NOT', 'LOUISE'] +7127-75947-0037-463: hyp=['OH', 'I', 'AM', 'SPEAKING', 'SERIOUSLY', 'REPLIED', 'MONTALAIS', 'AND', 'MY', 'OPINION', 'IN', 'THIS', 'CASE', 'IS', 'QUITE', 'AS', 'GOOD', 'AS', 'THE', 'KING', 'AS', 'I', 'SUPPOSE', 'IS', 'IT', 'NOT', 'LOUISE'] +7127-75947-0038-464: ref=['LET', 'US', 'RUN', 'THEN', 'SAID', 'ALL', 'THREE', 'AND', 'GRACEFULLY', 'LIFTING', 'UP', 'THE', 'LONG', 'SKIRTS', 'OF', 'THEIR', 'SILK', 'DRESSES', 'THEY', 'LIGHTLY', 'RAN', 'ACROSS', 'THE', 'OPEN', 'SPACE', 'BETWEEN', 'THE', 'LAKE', 'AND', 'THE', 'THICKEST', 'COVERT', 'OF', 'THE', 'PARK'] +7127-75947-0038-464: hyp=['LET', 'US', 'RUN', 'THEN', 'SAID', 'ALL', 'THREE', 'AND', 'GRACEFULLY', 'LIFTING', 'UP', 'THE', 'LONG', 'SKIRTS', 'OF', 'THEIR', 'SILK', 'DRESSES', 'THEY', 'LIGHTLY', 'RAN', 'ACROSS', 'THE', 'OPEN', 'SPACE', 'BETWEEN', 'THE', 'LAKE', 'AND', 'THE', 'THICKEST', 'COVERT', 'OF', 'THE', 'PARK'] +7127-75947-0039-465: ref=['IN', 'FACT', 'THE', 'SOUND', 'OF', "MADAME'S", 'AND', 'THE', "QUEEN'S", 'CARRIAGES', 'COULD', 'BE', 'HEARD', 'IN', 'THE', 'DISTANCE', 'UPON', 'THE', 'HARD', 'DRY', 'GROUND', 'OF', 'THE', 'ROADS', 'FOLLOWED', 'BY', 'THE', 'MOUNTED', 'CAVALIERS'] +7127-75947-0039-465: hyp=['IN', 'FACT', 'THE', 'SOUND', 'OF', "MADAME'S", 'AND', 'THE', "QUEEN'S", 'CARRIAGES', 'COULD', 'BE', 'HEARD', 'IN', 'THE', 'DISTANCE', 'UPON', 'THE', 'HARD', 'DRY', 'GROUND', 'OF', 'THE', 'ROADS', 'FOLLOWED', 'BY', 'THE', 'MOUNTAIN', 'CAVALIERS'] +7127-75947-0040-466: ref=['IN', 'THIS', 'WAY', 'THE', 'FETE', 'OF', 'THE', 'WHOLE', 'COURT', 'WAS', 'A', 'FETE', 'ALSO', 'FOR', 'THE', 'MYSTERIOUS', 'INHABITANTS', 'OF', 'THE', 'FOREST', 'FOR', 'CERTAINLY', 'THE', 'DEER', 'IN', 'THE', 'BRAKE', 'THE', 'PHEASANT', 'ON', 'THE', 'BRANCH', 'THE', 'FOX', 'IN', 'ITS', 'HOLE', 'WERE', 'ALL', 'LISTENING'] +7127-75947-0040-466: hyp=['IN', 'THIS', 'WAY', 'THE', 'FETE', 'OF', 'THE', 'WHOLE', 'COURT', 'WAS', 'A', 'FETE', 'ALSO', 'FOR', 'THE', 'MYSTERIOUS', 'INHABITANTS', 'OF', 'THE', 'FOREST', 'FOR', 'CERTAINLY', 'THE', 'DEER', 'IN', 'THE', 'BRAKE', 'THE', 'PHEASANT', 'ON', 'THE', 'BRANCH', 'THE', 'FOX', 'IN', 'ITS', 'HOLE', 'WERE', 'ALL', 'LISTENING'] +7176-88083-0000-707: ref=['ALL', 'ABOUT', 'HIM', 'WAS', 'A', 'TUMULT', 'OF', 'BRIGHT', 'AND', 'BROKEN', 'COLOR', 'SCATTERED', 'IN', 'BROAD', 'SPLASHES'] +7176-88083-0000-707: hyp=['ALL', 'ABOUT', 'HIM', 'WAS', 'A', 'TUMULT', 'OF', 'BRIGHT', 'AND', 'BROKEN', 'COLOR', 'SCATTERED', 'AND', 'BROAD', 'SPLASHES'] +7176-88083-0001-708: ref=['THE', 'MERGANSER', 'HAD', 'A', 'CRESTED', 'HEAD', 'OF', 'IRIDESCENT', 'GREEN', 'BLACK', 'A', 'BROAD', 'COLLAR', 'OF', 'LUSTROUS', 'WHITE', 'BLACK', 'BACK', 'BLACK', 'AND', 'WHITE', 'WINGS', 'WHITE', 'BELLY', 'SIDES', 'FINELY', 'PENCILLED', 'IN', 'BLACK', 'AND', 'WHITE', 'AND', 'A', 'BREAST', 'OF', 'RICH', 'CHESTNUT', 'RED', 'STREAKED', 'WITH', 'BLACK'] +7176-88083-0001-708: hyp=['THE', 'MERGANCER', 'HAD', 'A', 'CRUSTED', 'HEAD', 'OF', 'IRIDESCENT', 'GREEN', 'BLACK', 'A', 'BROAD', 'COLLAR', 'OF', 'LUSTROUS', 'WHITE', 'BLACK', 'BACK', 'BLACK', 'AND', 'WHITE', 'WINGS', 'WHITE', 'BELLY', 'SIDES', 'FINELY', 'PENCILLED', 'IN', 'BLACK', 'AND', 'WHITE', 'AND', 'A', 'BREAST', 'OF', 'RICH', 'CHESTNUT', 'RED', 'STREAKED', 'WITH', 'BLACK'] +7176-88083-0002-709: ref=['HIS', 'FEET', 'WERE', 'RED', 'HIS', 'LONG', 'NARROW', 'BEAK', 'WITH', 'ITS', 'SAW', 'TOOTHED', 'EDGES', 'AND', 'SHARP', 'HOOKED', 'TIP', 'WAS', 'BRIGHT', 'RED'] +7176-88083-0002-709: hyp=['HIS', 'FEET', 'WERE', 'RED', 'HIS', 'LONG', 'NARROW', 'BEAK', 'WITH', 'ITS', 'SALL', 'TOOTHED', 'EDGES', 'AND', 'SHARP', 'HOOKED', 'TIP', 'WAS', 'BRIGHT', 'RED'] +7176-88083-0003-710: ref=['BUT', 'HERE', 'HE', 'WAS', 'AT', 'A', 'TERRIBLE', 'DISADVANTAGE', 'AS', 'COMPARED', 'WITH', 'THE', 'OWLS', 'HAWKS', 'AND', 'EAGLES', 'HE', 'HAD', 'NO', 'RENDING', 'CLAWS'] +7176-88083-0003-710: hyp=['BUT', 'HERE', 'HE', 'WAS', 'AT', 'A', 'TERRIBLE', 'DISADVANTAGE', 'AS', 'COMPARED', 'WITH', 'THE', 'OWLS', 'HAWKS', 'AND', 'EAGLES', 'HE', 'HAD', 'NO', 'RENDING', 'CLAWS'] +7176-88083-0004-711: ref=['BUT', 'SUDDENLY', 'STRAIGHT', 'AND', 'SWIFT', 'AS', 'A', 'DIVING', 'CORMORANT', 'HE', 'SHOT', 'DOWN', 'INTO', 'THE', 'TORRENT', 'AND', 'DISAPPEARED', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0004-711: hyp=['BUT', 'SUDDENLY', 'STRAIGHT', 'AND', 'SWIFT', 'AS', 'A', 'DIVING', 'COMRADE', 'HE', 'SHOT', 'DOWN', 'INTO', 'THE', 'TORRENT', 'AND', 'DISAPPEARED', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0005-712: ref=['ONCE', 'FAIRLY', 'A', 'WING', 'HOWEVER', 'HE', 'WHEELED', 'AND', 'MADE', 'BACK', 'HURRIEDLY', 'FOR', 'HIS', 'PERCH'] +7176-88083-0005-712: hyp=['ONCE', 'FAIRLY', 'A', 'WING', 'HOWEVER', 'HE', 'WHEELED', 'AND', 'MADE', 'BACK', 'HURRIEDLY', 'FOR', 'HIS', 'PERCH'] +7176-88083-0006-713: ref=['IT', 'MIGHT', 'HAVE', 'SEEMED', 'THAT', 'A', 'TROUT', 'OF', 'THIS', 'SIZE', 'WAS', 'A', 'FAIRLY', 'SUBSTANTIAL', 'MEAL'] +7176-88083-0006-713: hyp=['IT', 'MIGHT', 'HAVE', 'SEEMED', 'THAT', 'A', 'TROUT', 'OF', 'THIS', 'SIZE', 'WAS', 'A', 'FAIRLY', 'SUBSTANTIAL', 'MEAL'] +7176-88083-0007-714: ref=['BUT', 'SUCH', 'WAS', 'HIS', 'KEENNESS', 'THAT', 'EVEN', 'WHILE', 'THE', 'WIDE', 'FLUKES', 'OF', 'HIS', 'ENGORGED', 'VICTIM', 'WERE', 'STILL', 'STICKING', 'OUT', 'AT', 'THE', 'CORNERS', 'OF', 'HIS', 'BEAK', 'HIS', 'FIERCE', 'RED', 'EYES', 'WERE', 'ONCE', 'MORE', 'PEERING', 'DOWNWARD', 'INTO', 'THE', 'TORRENT', 'IN', 'SEARCH', 'OF', 'FRESH', 'PREY'] +7176-88083-0007-714: hyp=['BUT', 'SUCH', 'WAS', 'HIS', 'KEENNESS', 'THAT', 'EVEN', 'WHILE', 'THE', 'WIDE', 'FLUKES', 'OF', 'HIS', 'ENGORGED', 'VICTIM', 'WERE', 'STILL', 'STICKING', 'OUT', 'AT', 'THE', 'CORNERS', 'OF', 'HIS', 'BEAK', 'HIS', 'FIERCE', 'RED', 'EYES', 'WERE', 'ONCE', 'MORE', 'PEERING', 'DOWNWARD', 'INTO', 'THE', 'TORRENT', 'IN', 'SEARCH', 'OF', 'FRESH', 'PREY'] +7176-88083-0008-715: ref=['IN', 'DESPAIR', 'HE', 'HURLED', 'HIMSELF', 'DOWNWARD', 'TOO', 'SOON'] +7176-88083-0008-715: hyp=['IN', 'DESPAIR', 'HE', 'HURLED', 'HIMSELF', 'DOWNWARD', 'TOO', 'SOON'] +7176-88083-0009-716: ref=['THE', 'GREAT', 'HAWK', 'FOLLOWED', 'HURRIEDLY', 'TO', 'RETRIEVE', 'HIS', 'PREY', 'FROM', 'THE', 'GROUND'] +7176-88083-0009-716: hyp=['THE', 'GREAT', 'HAWK', 'FOWLED', 'HURRIEDLY', 'TO', 'RETRIEVE', 'HIS', 'PREY', 'FROM', 'THE', 'GROUND'] +7176-88083-0010-717: ref=['THE', 'CAT', 'GROWLED', 'SOFTLY', 'PICKED', 'UP', 'THE', 'PRIZE', 'IN', 'HER', 'JAWS', 'AND', 'TROTTED', 'INTO', 'THE', 'BUSHES', 'TO', 'DEVOUR', 'IT'] +7176-88083-0010-717: hyp=['THE', 'CAT', 'GROWLED', 'SOFTLY', 'PICKED', 'UP', 'THE', 'PRIZE', 'IN', 'HER', 'JAWS', 'AND', 'TROTTED', 'INTO', 'THE', 'BUSHES', 'TO', 'DEVOUR', 'IT'] +7176-88083-0011-718: ref=['IN', 'FACT', 'HE', 'HAD', 'JUST', 'FINISHED', 'IT', 'THE', 'LAST', 'OF', 'THE', "TROUT'S", 'TAIL', 'HAD', 'JUST', 'VANISHED', 'WITH', 'A', 'SPASM', 'DOWN', 'HIS', 'STRAINED', 'GULLET', 'WHEN', 'THE', 'BAFFLED', 'HAWK', 'CAUGHT', 'SIGHT', 'OF', 'HIM', 'AND', 'SWOOPED'] +7176-88083-0011-718: hyp=['IN', 'FACT', 'HE', 'HAD', 'JUST', 'FINISHED', 'IT', 'THE', 'LAST', 'OF', 'THE', "TROUT'S", 'TAIL', 'HAD', 'JUST', 'VANISHED', 'WITH', 'A', 'SPASM', 'DOWN', 'HIS', 'STRAINED', 'GULLET', 'WHEN', 'THE', 'BAFFLED', 'HAWK', 'CAUGHT', 'SIGHT', 'OF', 'HIM', 'AND', 'SWOOPED'] +7176-88083-0012-719: ref=['THE', 'HAWK', 'ALIGHTED', 'ON', 'THE', 'DEAD', 'BRANCH', 'AND', 'SAT', 'UPRIGHT', 'MOTIONLESS', 'AS', 'IF', 'SURPRISED'] +7176-88083-0012-719: hyp=['THE', 'HAWK', 'ALIGHTED', 'ON', 'THE', 'DEAD', 'BRANCH', 'AND', 'SAT', 'UPRIGHT', 'MOTIONLESS', 'AS', 'IF', 'SURPRISED'] +7176-88083-0013-720: ref=['LIKE', 'HIS', 'UNFORTUNATE', 'LITTLE', 'COUSIN', 'THE', 'TEAL', 'HE', 'TOO', 'HAD', 'FELT', 'THE', 'FEAR', 'OF', 'DEATH', 'SMITTEN', 'INTO', 'HIS', 'HEART', 'AND', 'WAS', 'HEADING', 'DESPERATELY', 'FOR', 'THE', 'REFUGE', 'OF', 'SOME', 'DARK', 'OVERHANGING', 'BANK', 'DEEP', 'FRINGED', 'WITH', 'WEEDS', 'WHERE', 'THE', 'DREADFUL', 'EYE', 'OF', 'THE', 'HAWK', 'SHOULD', 'NOT', 'DISCERN', 'HIM'] +7176-88083-0013-720: hyp=['LIKE', 'HIS', 'UNFORTUNATE', 'LITTLE', 'COUSIN', 'THE', 'TEAL', 'HE', 'TOO', 'HAD', 'FELT', 'THE', 'FEAR', 'OF', 'DEATH', 'SMITTEN', 'INTO', 'HIS', 'HEART', 'AND', 'WAS', 'HEADING', 'DESPERATELY', 'FOR', 'THE', 'REFUGE', 'OF', 'SOME', 'DARK', 'OVERHANGING', 'BANK', 'DEEP', 'FRINGED', 'WITH', 'WEEDS', 'WHERE', 'THE', 'DREADFUL', 'EYE', 'OF', 'THE', 'HAWK', 'SHOULD', 'NOT', 'DISCERN', 'HIM'] +7176-88083-0014-721: ref=['THE', 'HAWK', 'SAT', 'UPON', 'THE', 'BRANCH', 'AND', 'WATCHED', 'HIS', 'QUARRY', 'SWIMMING', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0014-721: hyp=['THE', 'HAWK', 'SAT', 'UPON', 'THE', 'BRANCH', 'AND', 'WATCHED', 'HIS', 'QUARRY', 'SWIMMING', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0015-722: ref=['ALMOST', 'INSTANTLY', 'HE', 'WAS', 'FORCED', 'TO', 'THE', 'TOP'] +7176-88083-0015-722: hyp=['ALMOST', 'INSTANTLY', 'HE', 'WAS', 'FORCED', 'TO', 'THE', 'TOP'] +7176-88083-0016-723: ref=['STRAIGHTWAY', 'THE', 'HAWK', 'GLIDED', 'FROM', 'HIS', 'PERCH', 'AND', 'DARTED', 'AFTER', 'HIM'] +7176-88083-0016-723: hyp=['STRAIGHTWAY', 'IN', 'THE', 'HAWK', 'GLIDED', 'FROM', 'HIS', 'PERCH', 'AND', 'DARTED', 'AFTER', 'HIM'] +7176-88083-0017-724: ref=['BUT', 'AT', 'THIS', 'POINT', 'IN', 'THE', 'RAPIDS', 'IT', 'WAS', 'IMPOSSIBLE', 'FOR', 'HIM', 'TO', 'STAY', 'DOWN'] +7176-88083-0017-724: hyp=['BUT', 'AT', 'THIS', 'POINT', 'IN', 'THE', 'RAPIDS', 'IT', 'WAS', 'IMPOSSIBLE', 'FOR', 'HIM', 'TO', 'STAY', 'DOWN'] +7176-88083-0018-725: ref=['BUT', 'THIS', 'FREQUENTER', 'OF', 'THE', 'HEIGHTS', 'OF', 'AIR', 'FOR', 'ALL', 'HIS', 'SAVAGE', 'VALOR', 'WAS', 'TROUBLED', 'AT', 'THE', 'LEAPING', 'WAVES', 'AND', 'THE', 'TOSSING', 'FOAM', 'OF', 'THESE', 'MAD', 'RAPIDS', 'HE', 'DID', 'NOT', 'UNDERSTAND', 'THEM'] +7176-88083-0018-725: hyp=['BUT', 'THIS', 'FREQUENTER', 'OF', 'THE', 'HEIGHTS', 'OF', 'AIR', 'FOR', 'ALL', 'HIS', 'SAVAGE', 'VALOUR', 'WAS', 'TROUBLED', 'AT', 'THE', 'LEAPING', 'WAVES', 'AND', 'THE', 'TOSSING', 'FOAM', 'OF', 'THESE', 'MAD', 'RAPIDS', 'HE', 'DID', 'NOT', 'UNDERSTAND', 'THEM'] +7176-88083-0019-726: ref=['AS', 'HE', 'FLEW', 'HIS', 'DOWN', 'REACHING', 'CLUTCHING', 'TALONS', 'WERE', 'NOT', 'HALF', 'A', 'YARD', 'ABOVE', 'THE', "FUGITIVE'S", 'HEAD'] +7176-88083-0019-726: hyp=['AS', 'HE', 'FLEW', 'HIS', 'DOWNREACHING', 'CLUTCHING', 'TALONS', 'WERE', 'NOT', 'HALF', 'A', 'YARD', 'ABOVE', 'THE', "FUGITIVE'S", 'HEAD'] +7176-88083-0020-727: ref=['WHERE', 'THE', 'WAVES', 'FOR', 'AN', 'INSTANT', 'SANK', 'THEY', 'CAME', 'CLOSER', 'BUT', 'NOT', 'QUITE', 'WITHIN', 'GRASPING', 'REACH'] +7176-88083-0020-727: hyp=['WHERE', 'THE', 'WAVE', 'IS', 'FOR', 'AN', 'INSTANT', 'SANK', 'THEY', 'CAME', 'CLOSER', 'BUT', 'NOT', 'QUITE', 'WITHIN', 'GRASPING', 'REACH'] +7176-88083-0021-728: ref=['BUT', 'AS', 'BEFORE', 'THE', 'LEAPING', 'WAVES', 'OF', 'THE', 'RAPIDS', 'WERE', 'TOO', 'MUCH', 'FOR', 'HIS', 'PURSUER', 'AND', 'HE', 'WAS', 'ABLE', 'TO', 'FLAP', 'HIS', 'WAY', 'ONWARD', 'IN', 'A', 'CLOUD', 'OF', 'FOAM', 'WHILE', 'DOOM', 'HUNG', 'LOW', 'ABOVE', 'HIS', 'HEAD', 'YET', 'HESITATED', 'TO', 'STRIKE'] +7176-88083-0021-728: hyp=['BUT', 'AS', 'BEFORE', 'THE', 'LEAPING', 'WAVES', 'OF', 'THE', 'RAPIDS', 'WERE', 'TOO', 'MUCH', 'FOR', 'HIS', 'PURSUER', 'AND', 'HE', 'WAS', 'ABLE', 'TO', 'FLAP', 'HIS', 'WAY', 'ONWARD', 'IN', 'A', 'CLOUD', 'OF', 'FOAM', 'WHILE', 'DOOM', 'HUNG', 'LOW', 'ABOVE', 'HIS', 'HEAD', 'YET', 'HESITATED', 'TO', 'STRIKE'] +7176-88083-0022-729: ref=['THE', 'HAWK', 'EMBITTERED', 'BY', 'THE', 'LOSS', 'OF', 'HIS', 'FIRST', 'QUARRY', 'HAD', 'BECOME', 'AS', 'DOGGED', 'IN', 'PURSUIT', 'AS', 'A', 'WEASEL', 'NOT', 'TO', 'BE', 'SHAKEN', 'OFF', 'OR', 'EVADED', 'OR', 'DECEIVED'] +7176-88083-0022-729: hyp=['THE', 'HAWK', 'EMBITTERED', 'BY', 'THE', 'LOSS', 'OF', 'HIS', 'FIRST', 'QUARRY', 'HAD', 'BECOME', 'AS', 'DOGGED', 'IN', 'PURSUIT', 'AS', 'A', 'WEASEL', 'NOT', 'TO', 'BE', 'SHAKEN', 'OFF', 'OR', 'EVADED', 'OR', 'DECEIVED'] +7176-88083-0023-730: ref=['HE', 'HAD', 'A', 'LOT', 'OF', 'LINE', 'OUT', 'AND', 'THE', 'PLACE', 'WAS', 'NONE', 'TOO', 'FREE', 'FOR', 'A', 'LONG', 'CAST', 'BUT', 'HE', 'WAS', 'IMPATIENT', 'TO', 'DROP', 'HIS', 'FLIES', 'AGAIN', 'ON', 'THE', 'SPOT', 'WHERE', 'THE', 'BIG', 'FISH', 'WAS', 'FEEDING'] +7176-88083-0023-730: hyp=['HE', 'HAD', 'A', 'LOT', 'OF', 'LINE', 'OUT', 'AND', 'THE', 'PLACE', 'WAS', 'NONE', 'TOO', 'FREE', 'FOR', 'A', 'LONG', 'CAST', 'BUT', 'HE', 'WAS', 'IMPATIENT', 'TO', 'DROP', 'HIS', 'FLIES', 'AGAIN', 'ON', 'THE', 'SPOT', 'WHERE', 'THE', 'BIG', 'FISH', 'WAS', 'FEEDING'] +7176-88083-0024-731: ref=['THE', 'LAST', 'DROP', 'FLY', 'AS', 'LUCK', 'WOULD', 'HAVE', 'IT', 'CAUGHT', 'JUST', 'IN', 'THE', 'CORNER', 'OF', 'THE', "HAWK'S", 'ANGRILY', 'OPEN', 'BEAK', 'HOOKING', 'ITSELF', 'FIRMLY'] +7176-88083-0024-731: hyp=['THE', 'LAST', 'DROP', 'FLY', 'AS', 'LUCK', 'WOULD', 'HAVE', 'IT', 'CAUGHT', 'JUST', 'IN', 'THE', 'CORNER', 'OF', 'THE', "HAWK'S", 'ANGRILY', 'OPEN', 'BEAK', 'HOOKING', 'ITSELF', 'FIRMLY'] +7176-88083-0025-732: ref=['AT', 'THE', 'SUDDEN', 'SHARP', 'STING', 'OF', 'IT', 'THE', 'GREAT', 'BIRD', 'TURNED', 'HIS', 'HEAD', 'AND', 'NOTICED', 'FOR', 'THE', 'FIRST', 'TIME', 'THE', 'FISHERMAN', 'STANDING', 'ON', 'THE', 'BANK'] +7176-88083-0025-732: hyp=['AT', 'THE', 'SUDDEN', 'SHARP', 'STING', 'OF', 'IT', 'THE', 'GREAT', 'BIRD', 'TURNED', 'HIS', 'HEAD', 'AND', 'NOTICED', 'FOR', 'THE', 'FIRST', 'TIME', 'THE', 'FISHERMAN', 'STANDING', 'ON', 'THE', 'BANK'] +7176-88083-0026-733: ref=['THE', 'DRAG', 'UPON', 'HIS', 'BEAK', 'AND', 'THE', 'LIGHT', 'CHECK', 'UPON', 'HIS', 'WINGS', 'WERE', 'INEXPLICABLE', 'TO', 'HIM', 'AND', 'APPALLING'] +7176-88083-0026-733: hyp=['THE', 'DRAG', 'UPON', 'HIS', 'BEAK', 'AND', 'THE', 'LIGHT', 'CHECK', 'UPON', 'HIS', 'WINGS', 'WERE', 'INEXPLICABLE', 'TO', 'HIM', 'AND', 'APPALLING'] +7176-88083-0027-734: ref=['THEN', 'THE', 'LEADER', 'PARTED', 'FROM', 'THE', 'LINE'] +7176-88083-0027-734: hyp=['THAN', 'THE', 'LEADER', 'PARTED', 'FROM', 'THE', 'LINE'] +7176-92135-0000-661: ref=['HE', 'IS', 'A', 'WELCOME', 'FIGURE', 'AT', 'THE', 'GARDEN', 'PARTIES', 'OF', 'THE', 'ELECT', 'WHO', 'ARE', 'ALWAYS', 'READY', 'TO', 'ENCOURAGE', 'HIM', 'BY', 'ACCEPTING', 'FREE', 'SEATS', 'FOR', 'HIS', 'PLAY', 'ACTOR', 'MANAGERS', 'NOD', 'TO', 'HIM', 'EDITORS', 'ALLOW', 'HIM', 'TO', 'CONTRIBUTE', 'WITHOUT', 'CHARGE', 'TO', 'A', 'SYMPOSIUM', 'ON', 'THE', 'PRICE', 'OF', 'GOLF', 'BALLS'] +7176-92135-0000-661: hyp=['HE', 'IS', 'A', 'WELCOME', 'FIGURE', 'AT', 'THE', 'GARDEN', 'PARTIES', 'OF', 'THE', 'ELECT', 'WHO', 'ARE', 'ALWAYS', 'READY', 'TO', 'ENCOURAGE', 'HIM', 'BY', 'ACCEPTING', 'FREE', 'SEATS', 'FOR', 'HIS', 'PLAY', 'ACTOR', 'MANAGERS', 'NOD', 'TO', 'HIM', 'EDITORS', 'ALLOW', 'HIM', 'TO', 'CONTRIBUTE', 'WITHOUT', 'CHARGE', 'TO', 'A', 'SUPPOSIUM', 'ON', 'THE', 'PRICE', 'OF', 'GOLF', 'BALLS'] +7176-92135-0001-662: ref=['IN', 'SHORT', 'HE', 'BECOMES', 'A', 'PROMINENT', 'FIGURE', 'IN', 'LONDON', 'SOCIETY', 'AND', 'IF', 'HE', 'IS', 'NOT', 'CAREFUL', 'SOMEBODY', 'WILL', 'SAY', 'SO'] +7176-92135-0001-662: hyp=['IN', 'SHORT', 'HE', 'BECOMES', 'A', 'PROMINENT', 'FIGURE', 'IN', 'LONDON', 'SOCIETY', 'AND', 'IF', 'HE', 'IS', 'NOT', 'CAREFUL', 'SOMEBODY', 'WILL', 'SAY', 'SO'] +7176-92135-0002-663: ref=['BUT', 'EVEN', 'THE', 'UNSUCCESSFUL', 'DRAMATIST', 'HAS', 'HIS', 'MOMENTS'] +7176-92135-0002-663: hyp=['BUT', 'EVEN', 'THE', 'UNSUCCESSFUL', 'DRAMATIST', 'HAS', 'HIS', 'MOMENTS'] +7176-92135-0003-664: ref=['YOUR', 'PLAY', 'MUST', 'BE', 'NOT', 'MERELY', 'A', 'GOOD', 'PLAY', 'BUT', 'A', 'SUCCESSFUL', 'ONE'] +7176-92135-0003-664: hyp=['YOU', 'ARE', 'PLAY', 'MUST', 'BE', 'NOT', 'MERELY', 'A', 'GOOD', 'PLAY', 'BUT', 'A', 'SUCCESSFUL', 'ONE'] +7176-92135-0004-665: ref=['FRANKLY', 'I', 'CANNOT', 'ALWAYS', 'SAY'] +7176-92135-0004-665: hyp=['FRANKLY', 'I', 'CANNOT', 'ALWAYS', 'SAY'] +7176-92135-0005-666: ref=['BUT', 'SUPPOSE', 'YOU', 'SAID', "I'M", 'FOND', 'OF', 'WRITING', 'MY', 'PEOPLE', 'ALWAYS', 'SAY', 'MY', 'LETTERS', 'HOME', 'ARE', 'GOOD', 'ENOUGH', 'FOR', 'PUNCH'] +7176-92135-0005-666: hyp=['BUT', 'SUPPOSE', 'YOU', 'SAID', "I'M", 'FOND', 'OF', 'WRITING', 'MY', 'PEOPLE', 'ALWAYS', 'SAY', 'MY', 'LETTERS', 'HOME', 'ARE', 'GOOD', 'ENOUGH', 'FOR', 'PUNCH'] +7176-92135-0006-667: ref=["I'VE", 'GOT', 'A', 'LITTLE', 'IDEA', 'FOR', 'A', 'PLAY', 'ABOUT', 'A', 'MAN', 'AND', 'A', 'WOMAN', 'AND', 'ANOTHER', 'WOMAN', 'AND', 'BUT', 'PERHAPS', "I'D", 'BETTER', 'KEEP', 'THE', 'PLOT', 'A', 'SECRET', 'FOR', 'THE', 'MOMENT'] +7176-92135-0006-667: hyp=["I'VE", 'GOT', 'A', 'LITTLE', 'IDEA', 'FOR', 'A', 'PLAY', 'ABOUT', 'A', 'MAN', 'AND', 'A', 'WOMAN', 'AND', 'ANOTHER', 'WOMAN', 'AND', 'BUT', 'PERHAPS', 'I', 'BETTER', 'KEEP', 'THE', 'PLOT', 'A', 'SECRET', 'FOR', 'THE', 'MOMENT'] +7176-92135-0007-668: ref=['ANYHOW', "IT'S", 'JOLLY', 'EXCITING', 'AND', 'I', 'CAN', 'DO', 'THE', 'DIALOGUE', 'ALL', 'RIGHT'] +7176-92135-0007-668: hyp=['ANYHOW', "IT'S", 'A', 'JOLLY', 'EXCITING', 'AND', 'I', 'CAN', 'DO', 'THE', 'DIALOGUE', 'ALL', 'RIGHT'] +7176-92135-0008-669: ref=['LEND', 'ME', 'YOUR', 'EAR', 'FOR', 'TEN', 'MINUTES', 'AND', 'YOU', 'SHALL', 'LEARN', 'JUST', 'WHAT', 'STAGECRAFT', 'IS'] +7176-92135-0008-669: hyp=['LEND', 'ME', 'YOUR', 'EAR', 'FOR', 'TEN', 'MINUTES', 'AND', 'YOU', 'SHALL', 'LEARN', 'JUST', 'WHAT', 'STAGECRAFT', 'IS'] +7176-92135-0009-670: ref=['AND', 'I', 'SHOULD', 'BEGIN', 'WITH', 'A', 'SHORT', 'HOMILY', 'ON', 'SOLILOQUY'] +7176-92135-0009-670: hyp=['AND', 'I', 'SHOULD', 'BEGIN', 'WITH', 'A', 'SHORT', 'HUMMILY', 'ON', 'SOLILOQUY'] +7176-92135-0010-671: ref=['HAM', 'TO', 'BE', 'OR', 'NOT', 'TO', 'BE'] +7176-92135-0010-671: hyp=['HIM', 'TO', 'BE', 'OR', 'NOT', 'TO', 'BE'] +7176-92135-0011-672: ref=['NOW', 'THE', 'OBJECT', 'OF', 'THIS', 'SOLILOQUY', 'IS', 'PLAIN'] +7176-92135-0011-672: hyp=['NOW', 'THE', 'OBJECT', 'OF', 'THIS', 'SOLOQUIE', 'IS', 'PLAIN'] +7176-92135-0012-673: ref=['INDEED', 'IRRESOLUTION', 'BEING', 'THE', 'KEYNOTE', 'OF', "HAMLET'S", 'SOLILOQUY', 'A', 'CLEVER', 'PLAYER', 'COULD', 'TO', 'SOME', 'EXTENT', 'INDICATE', 'THE', 'WHOLE', 'THIRTY', 'LINES', 'BY', 'A', 'SILENT', 'WORKING', 'OF', 'THE', 'JAW', 'BUT', 'AT', 'THE', 'SAME', 'TIME', 'IT', 'WOULD', 'BE', 'IDLE', 'TO', 'DENY', 'THAT', 'HE', 'WOULD', 'MISS', 'THE', 'FINER', 'SHADES', 'OF', 'THE', "DRAMATIST'S", 'MEANING'] +7176-92135-0012-673: hyp=['INDEED', 'IRRESOLUTION', 'MEAN', 'THE', 'KEEN', 'OUT', 'OF', "HAMLET'S", 'SOLILOQUY', 'A', 'CLEVER', 'PLAYER', 'COULD', 'TO', 'SOME', 'EXTENT', 'INDICATE', 'THE', 'WHOLE', 'THIRTY', 'LINES', 'BY', 'A', 'SILAGE', 'WORKING', 'OF', 'THE', 'JOB', 'BUT', 'AT', 'THE', 'SAME', 'TIME', 'IT', 'WOULD', 'BE', 'IDLE', 'TO', 'DENY', 'THAT', 'HE', 'WOULD', 'MISS', 'THE', 'FINER', 'SHADES', 'OF', 'THE', "DRAMATIST'S", 'MEANING'] +7176-92135-0013-674: ref=['WE', 'MODERNS', 'HOWEVER', 'SEE', 'THE', 'ABSURDITY', 'OF', 'IT'] +7176-92135-0013-674: hyp=['WE', 'MODERNS', 'HOWEVER', 'SEE', 'THE', 'ABSURDITY', 'OF', 'IT'] +7176-92135-0014-675: ref=['IF', 'IT', 'BE', 'GRANTED', 'FIRST', 'THAT', 'THE', 'THOUGHTS', 'OF', 'A', 'CERTAIN', 'CHARACTER', 'SHOULD', 'BE', 'KNOWN', 'TO', 'THE', 'AUDIENCE', 'AND', 'SECONDLY', 'THAT', 'SOLILOQUY', 'OR', 'THE', 'HABIT', 'OF', 'THINKING', 'ALOUD', 'IS', 'IN', 'OPPOSITION', 'TO', 'MODERN', 'STAGE', 'TECHNIQUE', 'HOW', 'SHALL', 'A', 'SOLILOQUY', 'BE', 'AVOIDED', 'WITHOUT', 'DAMAGE', 'TO', 'THE', 'PLAY'] +7176-92135-0014-675: hyp=['IF', 'IT', 'BE', 'GRANTED', 'FIRST', 'THAT', 'THE', 'THOUGHTS', 'OF', 'A', 'CERTAIN', 'CHARACTER', 'SHOULD', 'BE', 'KNOWN', 'TO', 'THE', 'AUDIENCE', 'AND', 'SECONDLY', 'THAT', 'SOLILOQUY', 'OR', 'THE', 'HABIT', 'OF', 'THINKING', 'ALOUD', 'IS', 'IN', 'OPPOSITION', 'TO', 'MODERN', 'STAGE', 'TYPE', 'HALL', 'SHALL', 'A', 'SOLILOQUY', 'BE', 'AVOIDED', 'WITHOUT', 'DAMAGE', 'TO', 'THE', 'PLAY'] +7176-92135-0015-676: ref=['AND', 'SO', 'ON', 'TILL', 'YOU', 'GET', 'TO', 'THE', 'END', 'WHEN', 'OPHELIA', 'MIGHT', 'SAY', 'AH', 'YES', 'OR', 'SOMETHING', 'NON', 'COMMITTAL', 'OF', 'THAT', 'SORT'] +7176-92135-0015-676: hyp=['AND', 'SO', 'ON', 'TILL', 'YOU', 'GET', 'THE', 'END', 'ONE', 'OF', 'VILLIA', 'MIGHT', 'SAY', 'AH', 'YES', 'OR', 'SOMETHING', 'NON', 'COMMITTAL', 'OF', 'THAT', 'SORT'] +7176-92135-0016-677: ref=['THIS', 'WOULD', 'BE', 'AN', 'EASY', 'WAY', 'OF', 'DOING', 'IT', 'BUT', 'IT', 'WOULD', 'NOT', 'BE', 'THE', 'BEST', 'WAY', 'FOR', 'THE', 'REASON', 'THAT', 'IT', 'IS', 'TOO', 'EASY', 'TO', 'CALL', 'ATTENTION', 'TO', 'ITSELF'] +7176-92135-0016-677: hyp=['THIS', 'WOULD', 'BE', 'AN', 'EASY', 'WAY', 'OF', 'DOING', 'IT', 'BUT', 'IT', 'WOULD', 'NOT', 'BE', 'THE', 'BEST', 'WAY', 'FOR', 'THE', 'REASON', 'THAT', 'IT', 'IS', 'TOO', 'EASY', 'TO', 'CALL', 'ATTENTION', 'TO', 'ITSELF'] +7176-92135-0017-678: ref=['IN', 'THE', 'OLD', 'BADLY', 'MADE', 'PLAY', 'IT', 'WAS', 'FREQUENTLY', 'NECESSARY', 'FOR', 'ONE', 'OF', 'THE', 'CHARACTERS', 'TO', 'TAKE', 'THE', 'AUDIENCE', 'INTO', 'HIS', 'CONFIDENCE'] +7176-92135-0017-678: hyp=['IN', 'THE', 'OLD', 'BADLY', 'MADE', 'PLAY', 'IT', 'WAS', 'FREQUENTLY', 'NECESSARY', 'FOR', 'ONE', 'OF', 'THE', 'CHARACTERS', 'TO', 'TAKE', 'THE', 'AUDIENCE', 'INTO', 'HIS', 'CONFIDENCE'] +7176-92135-0018-679: ref=['IN', 'THE', 'MODERN', 'WELL', 'CONSTRUCTED', 'PLAY', 'HE', 'SIMPLY', 'RINGS', 'UP', 'AN', 'IMAGINARY', 'CONFEDERATE', 'AND', 'TELLS', 'HIM', 'WHAT', 'HE', 'IS', 'GOING', 'TO', 'DO', 'COULD', 'ANYTHING', 'BE', 'MORE', 'NATURAL'] +7176-92135-0018-679: hyp=['IN', 'THE', 'MODERN', 'WELL', 'CONSTRUCTED', 'PLAY', 'HE', 'SIMPLY', 'RINGS', 'UP', 'AN', 'IMAGINARY', 'CONFEDERATE', 'AND', 'TELLS', 'HIM', 'WHAT', 'HE', 'IS', 'GOING', 'TO', 'DO', 'COULD', 'ANYTHING', 'BE', 'MORE', 'NATURAL'] +7176-92135-0019-680: ref=['I', 'WANT', 'DOUBLE', 'NINE', 'HAL', 'LO'] +7176-92135-0019-680: hyp=['I', 'WANT', 'DOUBLE', 'NINE', 'HELLO'] +7176-92135-0020-681: ref=['DOUBLE', 'NINE', 'TWO', 'THREE', 'ELSINORE', 'DOUBLE', 'NINE', 'YES', 'HALLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0020-681: hyp=['DOUBLED', 'NINE', 'TWO', 'THREE', 'ELZINORE', 'DOUBLE', 'NOT', 'YES', 'HELLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0021-682: ref=['I', 'SAY', "I'VE", 'BEEN', 'WONDERING', 'ABOUT', 'THIS', 'BUSINESS'] +7176-92135-0021-682: hyp=['I', 'SAY', "I'VE", 'BEEN', 'WANDERING', 'ABOUT', 'THIS', 'BUSINESS'] +7176-92135-0022-683: ref=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER', 'IN', 'THE', 'MIND', 'TO', 'SUFFER', 'THE', 'SLINGS', 'AND', 'ARROWS', 'WHAT', 'NO', 'HAMLET', 'SPEAKING'] +7176-92135-0022-683: hyp=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER', 'IN', 'THE', 'MIND', 'TO', 'SUFFER', 'THE', 'SLINGS', 'AND', 'ARROWS', 'WHAT', 'NO', 'HAMLET', 'SPEAKING'] +7176-92135-0023-684: ref=['YOU', 'GAVE', 'ME', 'DOUBLE', 'FIVE', 'I', 'WANT', 'DOUBLE', 'NINE', 'HALLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0023-684: hyp=['YOU', 'GAVE', 'ME', 'DOUBLE', 'FIVE', 'I', 'WANT', 'DOUBLE', 'NINE', 'HELLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0024-685: ref=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER'] +7176-92135-0024-685: hyp=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER'] +7176-92135-0025-686: ref=['IT', 'IS', 'TO', 'LET', 'HAMLET', 'IF', 'THAT', 'HAPPEN', 'TO', 'BE', 'THE', 'NAME', 'OF', 'YOUR', 'CHARACTER', 'ENTER', 'WITH', 'A', 'SMALL', 'DOG', 'PET', 'FALCON', 'MONGOOSE', 'TAME', 'BEAR', 'OR', 'WHATEVER', 'ANIMAL', 'IS', 'MOST', 'IN', 'KEEPING', 'WITH', 'THE', 'PART', 'AND', 'CONFIDE', 'IN', 'THIS', 'ANIMAL', 'SUCH', 'SORROWS', 'HOPES', 'OR', 'SECRET', 'HISTORY', 'AS', 'THE', 'AUDIENCE', 'HAS', 'GOT', 'TO', 'KNOW'] +7176-92135-0025-686: hyp=['IT', 'IS', 'TO', 'LET', 'HAMLET', 'IF', 'THAT', 'HAPPENED', 'TO', 'BE', 'THE', 'NAME', 'OF', 'YOUR', 'CHARACTER', 'INTO', 'A', 'SMALL', 'DOG', 'PET', 'FALCON', 'MONGOOSE', 'TAME', 'BEAR', 'ORDER', 'ANIMAL', 'IS', 'MOST', 'IN', 'KEEPING', 'WITH', 'THE', 'PART', 'AND', 'CONFIDE', 'IN', 'THIS', 'ANIMAL', 'SUCH', 'SORROWS', 'HOPES', 'OR', 'SECRET', 'HISTORY', 'AS', 'THE', 'AUDIENCE', 'HAS', 'GOT', 'TO', 'KNOW'] +7176-92135-0026-687: ref=['ENTER', 'HAMLET', 'WITH', 'HIS', 'FAVOURITE', 'BOAR', 'HOUND'] +7176-92135-0026-687: hyp=['ENTER', 'HAMLET', 'WITH', 'HIS', 'FAVOURITE', 'BOARHOUND'] +7176-92135-0027-688: ref=['LADY', 'LARKSPUR', 'STARTS', 'SUDDENLY', 'AND', 'TURNS', 'TOWARDS', 'HIM'] +7176-92135-0027-688: hyp=['LADY', 'LARKSBURG', 'START', 'SUDDENLY', 'AND', 'TURNS', 'TOWARD', 'HIM'] +7176-92135-0028-689: ref=['LARKSPUR', 'BIT', 'ME', 'AGAIN', 'THIS', 'MORNING', 'FOR', 'THE', 'THIRD', 'TIME'] +7176-92135-0028-689: hyp=['LARKSBUR', 'BID', 'ME', 'AGAIN', 'THIS', 'MORNING', 'FOR', 'THE', 'THIRD', 'TIME'] +7176-92135-0029-690: ref=['I', 'WANT', 'TO', 'GET', 'AWAY', 'FROM', 'IT', 'ALL', 'SWOONS'] +7176-92135-0029-690: hyp=['I', 'WANT', 'TO', 'GET', 'AWAY', 'FROM', 'IT', 'ALL', 'SWOON'] +7176-92135-0030-691: ref=['ENTER', 'LORD', 'ARTHUR', 'FLUFFINOSE'] +7176-92135-0030-691: hyp=['ENTER', 'LORD', 'ARTHUR', "FLUFFINO'S"] +7176-92135-0031-692: ref=['AND', 'THERE', 'YOU', 'ARE', 'YOU', 'WILL', 'OF', 'COURSE', 'APPRECIATE', 'THAT', 'THE', 'UNFINISHED', 'SENTENCES', 'NOT', 'ONLY', 'SAVE', 'TIME', 'BUT', 'ALSO', 'MAKE', 'THE', 'MANOEUVRING', 'VERY', 'MUCH', 'MORE', 'NATURAL'] +7176-92135-0031-692: hyp=['AND', 'THERE', 'YOU', 'ARE', 'YOU', 'WILL', 'OF', 'COURSE', 'APPRECIATE', 'THAT', 'THE', 'UNFINISHANCES', 'NOT', 'ONLY', 'SAVE', 'TIME', 'BUT', 'ALSO', 'MAKE', 'THE', 'MANOEUVRING', 'VERY', 'MUCH', 'MORE', 'NATURAL'] +7176-92135-0032-693: ref=['HOW', 'YOU', 'MAY', 'BE', 'WONDERING', 'ARE', 'YOU', 'TO', 'BEGIN', 'YOUR', 'MASTERPIECE'] +7176-92135-0032-693: hyp=['HOW', 'YOU', 'MAY', 'BE', 'WONDERING', 'ARE', 'YE', 'TO', 'BEGIN', 'YOUR', 'MASTERPIECE'] +7176-92135-0033-694: ref=['RELAPSES', 'INTO', 'SILENCE', 'FOR', 'THE', 'REST', 'OF', 'THE', 'EVENING'] +7176-92135-0033-694: hyp=['RELAPSES', 'INTO', 'SILENCE', 'FOR', 'THE', 'REST', 'OF', 'THE', 'EVENING'] +7176-92135-0034-695: ref=['THE', 'DUCHESS', 'OF', 'SOUTHBRIDGE', 'TO', 'LORD', 'REGGIE', 'OH', 'REGGIE', 'WHAT', 'DID', 'YOU', 'SAY'] +7176-92135-0034-695: hyp=['THE', 'DUCHESS', 'OF', 'SOUTHBRIDGE', 'TWO', 'LORD', 'REGGIE', 'OH', 'REGGIE', 'WHAT', 'DID', 'YOU', 'SAY'] +7176-92135-0035-696: ref=['THEN', 'LORD', 'TUPPENY', 'WELL', 'WHAT', 'ABOUT', 'AUCTION'] +7176-92135-0035-696: hyp=['THEN', 'LORD', 'TOPPENNY', 'WELL', 'WHAT', 'ABOUT', 'AUCTION'] +7176-92135-0036-697: ref=['THE', 'CROWD', 'DRIFTS', 'OFF', 'LEAVING', 'THE', 'HERO', 'AND', 'HEROINE', 'ALONE', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'STAGE', 'AND', 'THEN', 'YOU', 'CAN', 'BEGIN'] +7176-92135-0036-697: hyp=['THE', 'CROWD', 'DRIFTS', 'OFF', 'LEAPING', 'THE', 'HERO', 'AND', 'HEROINE', 'ALONE', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'STAGE', 'AND', 'THEN', 'YOU', 'CAN', 'BEGIN'] +7176-92135-0037-698: ref=['THEN', 'IS', 'THE', 'TIME', 'TO', 'INTRODUCE', 'A', 'MEAL', 'ON', 'THE', 'STAGE'] +7176-92135-0037-698: hyp=['THEN', 'IS', 'THE', 'TIME', 'TO', 'INTRODUCE', 'A', 'MEAL', 'ON', 'THE', 'STAGE'] +7176-92135-0038-699: ref=['A', 'STAGE', 'MEAL', 'IS', 'POPULAR', 'BECAUSE', 'IT', 'PROVES', 'TO', 'THE', 'AUDIENCE', 'THAT', 'THE', 'ACTORS', 'EVEN', 'WHEN', 'CALLED', 'CHARLES', 'HAWTREY', 'OR', 'OWEN', 'NARES', 'ARE', 'REAL', 'PEOPLE', 'JUST', 'LIKE', 'YOU', 'AND', 'ME'] +7176-92135-0038-699: hyp=['A', 'SAGE', 'MEAL', 'IS', 'POPULAR', 'BECAUSE', 'IT', 'PROVED', 'TO', 'THE', 'AUDIENCE', 'THAT', 'THE', 'ACTORS', 'EVEN', 'WHEN', 'CALLED', 'CHARLES', 'HOLTREE', 'OR', 'OWENAIRS', 'ARE', 'REAL', 'PEOPLE', 'JUST', 'LIKE', 'YOU', 'AND', 'ME'] +7176-92135-0039-700: ref=['TEA', 'PLEASE', 'MATTHEWS', 'BUTLER', 'IMPASSIVELY'] +7176-92135-0039-700: hyp=['T', 'PLEASE', 'MATTHEWS', 'BUTLER', 'IMPASSIVELY'] +7176-92135-0040-701: ref=['HOSTESS', 'REPLACES', 'LUMP', 'AND', 'INCLINES', 'EMPTY', 'TEAPOT', 'OVER', 'TRAY', 'FOR', 'A', 'MOMENT', 'THEN', 'HANDS', 'HIM', 'A', 'CUP', 'PAINTED', 'BROWN', 'INSIDE', 'THUS', 'DECEIVING', 'THE', 'GENTLEMAN', 'WITH', 'THE', 'TELESCOPE', 'IN', 'THE', 'UPPER', 'CIRCLE'] +7176-92135-0040-701: hyp=['HOSTESS', 'REPLACES', 'LUMP', 'AND', 'INCLINES', 'EMPTY', 'TEAPOT', 'OVER', 'TRAY', 'FOR', 'A', 'MOMENT', 'THEN', 'HANDSOME', 'A', 'CUP', 'PAINTED', 'BROWN', 'INSIDE', 'LUST', 'DECEIVING', 'THE', 'GENTLEMAN', 'WITH', 'THE', 'TELESCOPE', 'IN', 'THE', 'UPPER', 'CIRCLE'] +7176-92135-0041-702: ref=['RE', 'ENTER', 'BUTLER', 'AND', 'THREE', 'FOOTMEN', 'WHO', 'REMOVE', 'THE', 'TEA', 'THINGS', 'HOSTESS', 'TO', 'GUEST'] +7176-92135-0041-702: hyp=['RE', 'ENTER', 'BUTLER', 'AND', 'THREE', 'FOOTMEN', 'WHO', 'MOVED', 'THE', 'TEA', 'THINGS', 'HOSTESS', 'TWO', 'GUESTS'] +7176-92135-0042-703: ref=['IN', 'NOVELS', 'THE', 'HERO', 'HAS', 'OFTEN', 'PUSHED', 'HIS', 'MEALS', 'AWAY', 'UNTASTED', 'BUT', 'NO', 'STAGE', 'HERO', 'WOULD', 'DO', 'ANYTHING', 'SO', 'UNNATURAL', 'AS', 'THIS'] +7176-92135-0042-703: hyp=['AND', 'NOVELS', 'THE', 'HERO', 'HAS', 'OFTEN', 'PUSHED', 'HIS', 'MEALS', 'AWAY', 'UNTASTED', 'BUT', 'NO', 'STEED', 'HERO', 'WOULD', 'DO', 'ANYTHING', 'SO', 'UNNATURAL', 'AS', 'THIS'] +7176-92135-0043-704: ref=['TWO', 'BITES', 'ARE', 'MADE', 'AND', 'THE', 'BREAD', 'IS', 'CRUMBLED', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'EAGERNESS', 'INDEED', 'ONE', 'FEELS', 'THAT', 'IN', 'REAL', 'LIFE', 'THE', 'GUEST', 'WOULD', 'CLUTCH', 'HOLD', 'OF', 'THE', 'FOOTMAN', 'AND', 'SAY', 'HALF', 'A', 'MO', 'OLD', 'CHAP', 'I', "HAVEN'T", 'NEARLY', 'FINISHED', 'BUT', 'THE', 'ACTOR', 'IS', 'BETTER', 'SCHOOLED', 'THAN', 'THIS'] +7176-92135-0043-704: hyp=['TWO', 'WHITES', 'ARE', 'MADE', 'AND', 'THE', 'BREAD', 'IS', 'CRUMBLED', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'EAGERNESS', 'INDEED', 'ONE', 'FEELS', 'THAT', 'IN', 'REAL', 'LIFE', 'THE', 'GUEST', 'WOULD', 'CLUTCH', 'HOLD', 'OF', 'THE', 'FOOTMAN', 'AND', 'SAY', 'HALF', 'A', 'MOLD', 'CHAP', 'I', "HAVEN'T", 'NEARLY', 'FINISHED', 'BUT', 'THE', 'ACTOR', 'IS', 'BETTER', 'SCHOOLED', 'THAN', 'THIS'] +7176-92135-0044-705: ref=['BUT', 'IT', 'IS', 'THE', 'CIGARETTE', 'WHICH', 'CHIEFLY', 'HAS', 'BROUGHT', 'THE', 'MODERN', 'DRAMA', 'TO', 'ITS', 'PRESENT', 'STATE', 'OF', 'PERFECTION'] +7176-92135-0044-705: hyp=['BUT', 'IT', 'IS', 'A', 'CIGARETTE', 'WHICH', 'CHIEFLY', 'HAS', 'BROUGHT', 'THE', 'MODERN', 'DRAMA', 'TO', 'ITS', 'PRESENT', 'STATE', 'OF', 'PERFECTION'] +7176-92135-0045-706: ref=['LORD', 'JOHN', 'TAKING', 'OUT', 'GOLD', 'CIGARETTE', 'CASE', 'FROM', 'HIS', 'LEFT', 'HAND', 'UPPER', 'WAISTCOAT', 'POCKET'] +7176-92135-0045-706: hyp=['LORD', 'JOHN', 'TAKING', 'OUT', 'GOLD', 'SICK', 'RED', 'CASE', 'FROM', 'HIS', 'LEFT', 'HAND', 'UPPER', 'WAISTCOAT', 'POCKET'] +7729-102255-0000-261: ref=['THE', 'BOGUS', 'LEGISLATURE', 'NUMBERED', 'THIRTY', 'SIX', 'MEMBERS'] +7729-102255-0000-261: hyp=['THE', 'BOGUS', 'LEGISLATURE', 'NUMBERED', 'THIRTY', 'SIX', 'MEMBERS'] +7729-102255-0001-262: ref=['THIS', 'WAS', 'AT', 'THE', 'MARCH', 'ELECTION', 'EIGHTEEN', 'FIFTY', 'FIVE'] +7729-102255-0001-262: hyp=['THIS', 'WAS', 'AT', 'THE', 'MARCH', 'ELECTION', 'EIGHTEEN', 'FIFTY', 'FIVE'] +7729-102255-0002-263: ref=['THAT', "SUMMER'S", 'EMIGRATION', 'HOWEVER', 'BEING', 'MAINLY', 'FROM', 'THE', 'FREE', 'STATES', 'GREATLY', 'CHANGED', 'THE', 'RELATIVE', 'STRENGTH', 'OF', 'THE', 'TWO', 'PARTIES'] +7729-102255-0002-263: hyp=['THAT', "SUMMER'S", 'EMIGRATION', 'HOWEVER', 'BEING', 'MAINLY', 'FROM', 'THE', 'FREE', 'STATES', 'GREATLY', 'CHANGED', 'THE', 'RELATIVE', 'STRENGTH', 'OF', 'THE', 'TWO', 'PARTIES'] +7729-102255-0003-264: ref=['FOR', 'GENERAL', 'SERVICE', 'THEREFORE', 'REQUIRING', 'NO', 'SPECIAL', 'EFFORT', 'THE', 'NUMERICAL', 'STRENGTH', 'OF', 'THE', 'FACTIONS', 'WAS', 'ABOUT', 'EQUAL', 'WHILE', 'ON', 'EXTRAORDINARY', 'OCCASIONS', 'THE', 'TWO', 'THOUSAND', 'BORDER', 'RUFFIAN', 'RESERVE', 'LYING', 'A', 'LITTLE', 'FARTHER', 'BACK', 'FROM', 'THE', 'STATE', 'LINE', 'COULD', 'AT', 'ANY', 'TIME', 'EASILY', 'TURN', 'THE', 'SCALE'] +7729-102255-0003-264: hyp=['FOR', 'GENERAL', 'SERVICE', 'THEREFORE', 'REQUIRING', 'NO', 'SPECIAL', 'EFFORT', 'THE', 'NUMERICAL', 'STRENGTH', 'OF', 'THE', 'FACTIONS', 'WAS', 'ABOUT', 'EQUAL', 'WHILE', 'ON', 'EXTRAORDINARY', 'OCCASIONS', 'THE', 'TWO', 'THOUSAND', 'BORDER', 'RUFFIAN', 'RESERVED', 'LYING', 'A', 'LITTLE', 'FARTHER', 'BACK', 'FROM', 'THE', 'STATE', 'LINE', 'COULD', 'AT', 'ANY', 'TIME', 'EASILY', 'TURN', 'THE', 'SCALE'] +7729-102255-0004-265: ref=['THE', 'FREE', 'STATE', 'MEN', 'HAD', 'ONLY', 'THEIR', 'CONVICTIONS', 'THEIR', 'INTELLIGENCE', 'THEIR', 'COURAGE', 'AND', 'THE', 'MORAL', 'SUPPORT', 'OF', 'THE', 'NORTH', 'THE', 'CONSPIRACY', 'HAD', 'ITS', 'SECRET', 'COMBINATION', 'THE', 'TERRITORIAL', 'OFFICIALS', 'THE', 'LEGISLATURE', 'THE', 'BOGUS', 'LAWS', 'THE', 'COURTS', 'THE', 'MILITIA', 'OFFICERS', 'THE', 'PRESIDENT', 'AND', 'THE', 'ARMY'] +7729-102255-0004-265: hyp=['THE', 'FREE', 'STATE', 'MEN', 'HAD', 'ONLY', 'THEIR', 'CONVICTIONS', 'THEIR', 'INTELLIGENCE', 'THEIR', 'COURAGE', 'AND', 'THE', 'MORAL', 'SUPPORT', 'OF', 'THE', 'NORTH', 'THE', 'CONSPIRACY', 'HAD', 'ITS', 'SECRET', 'COMBINATION', 'THE', 'TERRITORIAL', 'OFFICIALS', 'THE', 'LEGISLATURE', 'THE', 'BOGUS', 'LAWS', 'THE', 'COURTS', 'THE', 'MILITIA', 'OFFICERS', 'THE', 'PRESIDENT', 'AND', 'THE', 'ARMY'] +7729-102255-0005-266: ref=['THIS', 'WAS', 'A', 'FORMIDABLE', 'ARRAY', 'OF', 'ADVANTAGES', 'SLAVERY', 'WAS', 'PLAYING', 'WITH', 'LOADED', 'DICE'] +7729-102255-0005-266: hyp=['THIS', 'WAS', 'A', 'FORMIDABLE', 'ARRAY', 'OF', 'ADVANTAGES', 'SLAVERY', 'WAS', 'PLAYING', 'WITH', 'LOADED', 'DICE'] +7729-102255-0006-267: ref=['COMING', 'BY', 'WAY', 'OF', 'THE', 'MISSOURI', 'RIVER', 'TOWNS', 'HE', 'FELL', 'FIRST', 'AMONG', 'BORDER', 'RUFFIAN', 'COMPANIONSHIP', 'AND', 'INFLUENCES', 'AND', 'PERHAPS', 'HAVING', 'HIS', 'INCLINATIONS', 'ALREADY', 'MOLDED', 'BY', 'HIS', 'WASHINGTON', 'INSTRUCTIONS', 'HIS', 'EARLY', 'IMPRESSIONS', 'WERE', 'DECIDEDLY', 'ADVERSE', 'TO', 'THE', 'FREE', 'STATE', 'CAUSE'] +7729-102255-0006-267: hyp=['COMING', 'BY', 'WAY', 'OF', 'THE', 'MISSOURI', 'RIVER', 'TOWNS', 'HE', 'FELL', 'FIRST', 'AMONG', 'BORDER', 'RUFFIAN', 'COMPANIONSHIP', 'AND', 'INFLUENCES', 'AND', 'PERHAPS', 'HAVING', 'HIS', 'INCLINATIONS', 'ALREADY', 'MOULDED', 'BY', 'HIS', 'WASHINGTON', 'INSTRUCTIONS', 'HIS', 'EARLY', 'IMPRESSIONS', 'WERE', 'DECIDEDLY', 'ADVERSE', 'TO', 'THE', 'FREE', 'STATE', 'CAUSE'] +7729-102255-0007-268: ref=['HIS', 'RECEPTION', 'SPEECH', 'AT', 'WESTPORT', 'IN', 'WHICH', 'HE', 'MAINTAINED', 'THE', 'LEGALITY', 'OF', 'THE', 'LEGISLATURE', 'AND', 'HIS', 'DETERMINATION', 'TO', 'ENFORCE', 'THEIR', 'LAWS', 'DELIGHTED', 'HIS', 'PRO', 'SLAVERY', 'AUDITORS'] +7729-102255-0007-268: hyp=['HIS', 'RECEPTION', 'SPEECH', 'AT', 'WESTPORT', 'IN', 'WHICH', 'HE', 'MAINTAINED', 'THE', 'LEGALITY', 'OF', 'THE', 'LEGISLATURE', 'AND', 'HIS', 'DETERMINATION', 'TO', 'ENFORCE', 'THEIR', 'LAWS', 'DELIGHTED', 'HIS', 'PRO', 'SLAVERY', 'AUDITORS'] +7729-102255-0008-269: ref=['ALL', 'THE', 'TERRITORIAL', 'DIGNITARIES', 'WERE', 'PRESENT', 'GOVERNOR', 'SHANNON', 'PRESIDED', 'JOHN', 'CALHOUN', 'THE', 'SURVEYOR', 'GENERAL', 'MADE', 'THE', 'PRINCIPAL', 'SPEECH', 'A', 'DENUNCIATION', 'OF', 'THE', 'ABOLITIONISTS', 'SUPPORTING', 'THE', 'TOPEKA', 'MOVEMENT', 'CHIEF', 'JUSTICE', 'LECOMPTE', 'DIGNIFIED', 'THE', 'OCCASION', 'WITH', 'APPROVING', 'REMARKS'] +7729-102255-0008-269: hyp=['ALL', 'THE', 'TERRITORIAL', 'DIGNITARIES', 'WERE', 'PRESENT', 'GOVERNOR', 'SHANNON', 'PRESIDED', 'JOHN', 'CALHOUN', 'THE', 'SURVEYOR', 'GENERAL', 'MADE', 'THE', 'PRINCIPAL', 'SPEECH', 'A', 'DENUNCIATION', 'OF', 'THE', 'ABOLITIONIST', 'SUPPORTING', 'THE', 'TOPECA', 'MOVEMENT', 'CHIEF', 'JUSTICE', 'LE', 'COMTE', 'DIGNIFIED', 'THE', 'OCCASION', 'WITH', 'APPROVING', 'REMARKS'] +7729-102255-0009-270: ref=['ALL', 'DISSENT', 'ALL', 'NON', 'COMPLIANCE', 'ALL', 'HESITATION', 'ALL', 'MERE', 'SILENCE', 'EVEN', 'WERE', 'IN', 'THEIR', 'STRONGHOLD', 'TOWNS', 'LIKE', 'LEAVENWORTH', 'BRANDED', 'AS', 'ABOLITIONISM', 'DECLARED', 'TO', 'BE', 'HOSTILITY', 'TO', 'THE', 'PUBLIC', 'WELFARE', 'AND', 'PUNISHED', 'WITH', 'PROSCRIPTION', 'PERSONAL', 'VIOLENCE', 'EXPULSION', 'AND', 'FREQUENTLY', 'DEATH'] +7729-102255-0009-270: hyp=['ALL', 'DESCENT', 'ALL', 'NON', 'COMPLIANCE', 'ALL', 'HESITATION', 'ALL', 'MERE', 'SILENCE', 'EVEN', 'WERE', 'IN', 'THEIR', 'STRONGHOLD', 'TOWNS', 'LIKE', 'LEVIN', 'WORTH', 'BRANDED', 'AS', 'ABOLITIONISM', 'DECLARED', 'TO', 'BE', 'HOSTILITY', 'TO', 'THE', 'PUBLIC', 'WELFARE', 'AND', 'PUNISHED', 'WITH', 'PROSCRIPTION', 'PERSONAL', 'VIOLENCE', 'EXPULSION', 'AND', 'FREQUENTLY', 'DEATH'] +7729-102255-0010-271: ref=['OF', 'THE', 'LYNCHINGS', 'THE', 'MOBS', 'AND', 'THE', 'MURDERS', 'IT', 'WOULD', 'BE', 'IMPOSSIBLE', 'EXCEPT', 'IN', 'A', 'VERY', 'EXTENDED', 'WORK', 'TO', 'NOTE', 'THE', 'FREQUENT', 'AND', 'ATROCIOUS', 'DETAILS'] +7729-102255-0010-271: hyp=['OF', 'THE', 'LUNCHINGS', 'THE', 'MOBS', 'AND', 'THE', 'MURDERS', 'IT', 'WOULD', 'BE', 'IMPOSSIBLE', 'EXCEPT', 'IN', 'A', 'VERY', 'EXTENDED', 'WORK', 'TO', 'NOTE', 'THE', 'FREQUENT', 'AND', 'ATROCIOUS', 'DETAILS'] +7729-102255-0011-272: ref=['THE', 'PRESENT', 'CHAPTERS', 'CAN', 'ONLY', 'TOUCH', 'UPON', 'THE', 'MORE', 'SALIENT', 'MOVEMENTS', 'OF', 'THE', 'CIVIL', 'WAR', 'IN', 'KANSAS', 'WHICH', 'HAPPILY', 'WERE', 'NOT', 'SANGUINARY', 'IF', 'HOWEVER', 'THE', 'INDIVIDUAL', 'AND', 'MORE', 'ISOLATED', 'CASES', 'OF', 'BLOODSHED', 'COULD', 'BE', 'DESCRIBED', 'THEY', 'WOULD', 'SHOW', 'A', 'STARTLING', 'AGGREGATE', 'OF', 'BARBARITY', 'AND', 'LOSS', 'OF', 'LIFE', 'FOR', "OPINION'S", 'SAKE'] +7729-102255-0011-272: hyp=['THE', 'PRESENT', 'CHAPTERS', 'CAN', 'ONLY', 'TOUCH', 'UPON', 'THE', 'MORE', 'SALIENT', 'MOVEMENTS', 'OF', 'THE', 'CIVIL', 'WAR', 'IN', 'KANSAS', 'WHICH', 'HAPPILY', 'ARE', 'NOT', 'SANGUINARY', 'IF', 'HOWEVER', 'THE', 'INDIVIDUAL', 'AND', 'MORE', 'ISOLATED', 'CASES', 'OF', 'BLOODSHED', 'COULD', 'BE', 'DESCRIBED', 'THEY', 'WOULD', 'SHOW', 'A', 'STARTLING', 'AGGREGATE', 'OF', 'BARBARITY', 'AND', 'A', 'LOSS', 'OF', 'LIFE', 'FOR', "OPINION'S", 'SAKE'] +7729-102255-0012-273: ref=['SEVERAL', 'HUNDRED', 'FREE', 'STATE', 'MEN', 'PROMPTLY', 'RESPONDED', 'TO', 'THE', 'SUMMONS'] +7729-102255-0012-273: hyp=['SEVERAL', 'HUNDRED', 'FREE', 'STATE', 'MEN', 'PROMPTLY', 'RESPONDED', 'TO', 'THE', 'SUMMONS'] +7729-102255-0013-274: ref=['IT', 'WAS', 'IN', 'FACT', 'THE', 'BEST', 'WEAPON', 'OF', 'ITS', 'DAY'] +7729-102255-0013-274: hyp=['IT', 'WAS', 'IN', 'FACT', 'THE', 'BEST', 'WEAPON', 'OF', 'ITS', 'DAY'] +7729-102255-0014-275: ref=['THE', 'LEADERS', 'OF', 'THE', 'CONSPIRACY', 'BECAME', 'DISTRUSTFUL', 'OF', 'THEIR', 'POWER', 'TO', 'CRUSH', 'THE', 'TOWN'] +7729-102255-0014-275: hyp=['THE', 'LEADERS', 'OF', 'THE', 'CONSPIRACY', 'BECAME', 'DISTRUSTFUL', 'OF', 'THEIR', 'POWER', 'TO', 'CRUSH', 'THE', 'TOWN'] +7729-102255-0015-276: ref=['ONE', 'OF', 'HIS', 'MILITIA', 'GENERALS', 'SUGGESTED', 'THAT', 'THE', 'GOVERNOR', 'SHOULD', 'REQUIRE', 'THE', 'OUTLAWS', 'AT', 'LAWRENCE', 'AND', 'ELSEWHERE', 'TO', 'SURRENDER', 'THE', 'SHARPS', 'RIFLES', 'ANOTHER', 'WROTE', 'ASKING', 'HIM', 'TO', 'CALL', 'OUT', 'THE', 'GOVERNMENT', 'TROOPS', 'AT', 'FORT', 'LEAVENWORTH'] +7729-102255-0015-276: hyp=['ONE', 'OF', 'HIS', 'MILITIA', 'GENERALS', 'SUGGESTED', 'THAT', 'THE', 'GOVERNOR', 'SHOULD', 'REQUIRE', 'THE', 'OUTLAWS', 'AT', 'LAWRENCE', 'AND', 'ELSEWHERE', 'TO', 'SURRENDER', 'THE', "SHARP'S", 'RIFLES', 'ANOTHER', 'WROTE', 'ASKING', 'HIM', 'TO', 'CALL', 'OUT', 'THE', 'GOVERNMENT', 'TROOPS', 'AT', 'FORT', 'LEVINWORTH'] +7729-102255-0016-277: ref=['THE', 'GOVERNOR', 'ON', 'HIS', 'PART', 'BECOMING', 'DOUBTFUL', 'OF', 'THE', 'LEGALITY', 'OF', 'EMPLOYING', 'MISSOURI', 'MILITIA', 'TO', 'ENFORCE', 'KANSAS', 'LAWS', 'WAS', 'ALSO', 'EAGER', 'TO', 'SECURE', 'THE', 'HELP', 'OF', 'FEDERAL', 'TROOPS'] +7729-102255-0016-277: hyp=['THE', 'GOVERNOR', 'ON', 'HIS', 'PART', 'BECOMING', 'DOUBTFUL', 'OF', 'THE', 'LEGALITY', 'OF', 'EMPLOYING', 'MISSOURI', 'MILITIA', 'TO', 'ENFORCE', 'KANSAS', 'LAWS', 'WAS', 'ALSO', 'EAGER', 'TO', 'SECURE', 'THE', 'HELP', 'OF', 'FEDERAL', 'TROOPS'] +7729-102255-0017-278: ref=['SHERIFF', 'JONES', 'HAD', 'HIS', 'POCKETS', 'ALWAYS', 'FULL', 'OF', 'WRITS', 'ISSUED', 'IN', 'THE', 'SPIRIT', 'OF', 'PERSECUTION', 'BUT', 'WAS', 'OFTEN', 'BAFFLED', 'BY', 'THE', 'SHARP', 'WITS', 'AND', 'READY', 'RESOURCES', 'OF', 'THE', 'FREE', 'STATE', 'PEOPLE', 'AND', 'SOMETIMES', 'DEFIED', 'OUTRIGHT'] +7729-102255-0017-278: hyp=['SHERIFF', 'JONES', 'HAD', 'HIS', 'POCKETS', 'ALWAYS', 'FULL', 'OF', 'WRITS', 'ISSUED', 'IN', 'THE', 'SPIRIT', 'OF', 'PERSECUTION', 'BUT', 'WAS', 'OFTEN', 'BAFFLED', 'BY', 'THE', 'SHARP', 'WITS', 'AND', 'READY', 'RESOURCES', 'OF', 'THE', 'FREE', 'STATE', 'PEOPLE', 'AND', 'SOMETIMES', 'DEFIED', 'OUTRIGHT'] +7729-102255-0018-279: ref=['LITTLE', 'BY', 'LITTLE', 'HOWEVER', 'THE', 'LATTER', 'BECAME', 'HEMMED', 'AND', 'BOUND', 'IN', 'THE', 'MESHES', 'OF', 'THE', 'VARIOUS', 'DEVICES', 'AND', 'PROCEEDINGS', 'WHICH', 'THE', 'TERRITORIAL', 'OFFICIALS', 'EVOLVED', 'FROM', 'THE', 'BOGUS', 'LAWS'] +7729-102255-0018-279: hyp=['LITTLE', 'BY', 'LITTLE', 'HOWEVER', 'THE', 'LATTER', 'BECAME', 'HEMMED', 'AND', 'BOUND', 'IN', 'THE', 'MESHES', 'OF', 'THE', 'VARIOUS', 'DEVICES', 'AND', 'PROCEEDINGS', 'WHICH', 'THE', 'TERRITORIAL', 'OFFICIALS', 'EVOLVED', 'FROM', 'THE', 'VOGUS', 'LAWS'] +7729-102255-0019-280: ref=['TO', 'EMBARRASS', 'THIS', 'DAMAGING', 'EXPOSURE', 'JUDGE', 'LECOMPTE', 'ISSUED', 'A', 'WRIT', 'AGAINST', 'THE', 'EX', 'GOVERNOR', 'ON', 'A', 'FRIVOLOUS', 'CHARGE', 'OF', 'CONTEMPT'] +7729-102255-0019-280: hyp=['TO', 'EMBARRASS', 'THIS', 'DAMAGING', 'EXPOSURE', 'JUDGE', 'LECOMTE', 'ISSUED', 'A', 'WRIT', 'AGAINST', 'THE', 'EX', 'GOVERNOR', 'ON', 'A', 'FRIVOLOUS', 'CHARGE', 'OF', 'CONTEMPT'] +7729-102255-0020-281: ref=['THE', 'INCIDENT', 'WAS', 'NOT', 'VIOLENT', 'NOR', 'EVEN', 'DRAMATIC', 'NO', 'POSSE', 'WAS', 'SUMMONED', 'NO', 'FURTHER', 'EFFORT', 'MADE', 'AND', 'REEDER', 'FEARING', 'PERSONAL', 'VIOLENCE', 'SOON', 'FLED', 'IN', 'DISGUISE'] +7729-102255-0020-281: hyp=['THE', 'INCIDENT', 'WAS', 'NOT', 'VIOLENT', 'NOR', 'EVEN', 'DRAMATIC', 'NO', 'POSSE', 'WAS', 'SUMMON', 'NO', 'FURTHER', 'EFFORT', 'MADE', 'AND', 'READER', 'FEARING', 'PERSONAL', 'VIOLENCE', 'SOON', 'FLED', 'IN', 'DISGUISE'] +7729-102255-0021-282: ref=['BUT', 'THE', 'AFFAIR', 'WAS', 'MAGNIFIED', 'AS', 'A', 'CROWNING', 'PROOF', 'THAT', 'THE', 'FREE', 'STATE', 'MEN', 'WERE', 'INSURRECTIONISTS', 'AND', 'OUTLAWS'] +7729-102255-0021-282: hyp=['BUT', 'THE', 'AFFAIR', 'WAS', 'MAGNIFIED', 'AS', 'A', 'CROWNING', 'PROOF', 'THAT', 'THE', 'FREE', 'STATE', 'MEN', 'WERE', 'INSURRECTIONISTS', 'AND', 'OUTLAWS'] +7729-102255-0022-283: ref=['FROM', 'THESE', 'AGAIN', 'SPRANG', 'BARRICADED', 'AND', 'FORTIFIED', 'DWELLINGS', 'CAMPS', 'AND', 'SCOUTING', 'PARTIES', 'FINALLY', 'CULMINATING', 'IN', 'ROVING', 'GUERRILLA', 'BANDS', 'HALF', 'PARTISAN', 'HALF', 'PREDATORY'] +7729-102255-0022-283: hyp=['FROM', 'THESE', 'AGAIN', 'SPRANG', 'BARRICADED', 'AND', 'FORTIFIED', 'DWELLINGS', 'CAMPS', 'AND', 'SCOUT', 'PARTIES', 'FINALLY', 'CULMINATING', 'AND', 'ROVING', 'GUERRILLA', 'VANS', 'HALF', 'PARTISAN', 'HALF', 'PREDATORY'] +7729-102255-0023-284: ref=['THEIR', 'DISTINCTIVE', 'CHARACTERS', 'HOWEVER', 'DISPLAY', 'ONE', 'BROAD', 'AND', 'UNFAILING', 'DIFFERENCE'] +7729-102255-0023-284: hyp=['THEIR', 'DISTINCTIVE', 'CHARACTERS', 'HOWEVER', 'DISPLAY', 'ONE', 'BROAD', 'AND', 'UNFAILING', 'DIFFERENCE'] +7729-102255-0024-285: ref=['THE', 'FREE', 'STATE', 'MEN', 'CLUNG', 'TO', 'THEIR', 'PRAIRIE', 'TOWNS', 'AND', 'PRAIRIE', 'RAVINES', 'WITH', 'ALL', 'THE', 'OBSTINACY', 'AND', 'COURAGE', 'OF', 'TRUE', 'DEFENDERS', 'OF', 'THEIR', 'HOMES', 'AND', 'FIRESIDES'] +7729-102255-0024-285: hyp=['THE', 'FREE', 'STATE', 'MEN', 'CLUNG', 'TO', 'THEIR', 'PRAIRIE', 'TOWNS', 'AND', 'PRAIRINES', 'WITH', 'ALL', 'THE', 'OBSTINACY', 'AND', 'COURAGE', 'OF', 'TRUE', 'DEFENDERS', 'OF', 'THEIR', 'HOMES', 'AND', 'FIRESIDES'] +7729-102255-0025-286: ref=['THEIR', 'ASSUMED', 'CHARACTER', 'CHANGED', 'WITH', 'THEIR', 'CHANGING', 'OPPORTUNITIES', 'OR', 'NECESSITIES'] +7729-102255-0025-286: hyp=['THERE', 'ASSUMED', 'CHARACTER', 'CHANGED', 'WITH', 'THEIR', 'CHANGING', 'OPPORTUNITIES', 'OR', 'NECESSITIES'] +7729-102255-0026-287: ref=['IN', 'THE', 'SHOOTING', 'OF', 'SHERIFF', 'JONES', 'IN', 'LAWRENCE', 'AND', 'IN', 'THE', 'REFUSAL', 'OF', 'EX', 'GOVERNOR', 'BEEDER', 'TO', 'ALLOW', 'THE', 'DEPUTY', 'MARSHAL', 'TO', 'ARREST', 'HIM', 'THEY', 'DISCOVERED', 'GRAVE', 'OFFENSES', 'AGAINST', 'THE', 'TERRITORIAL', 'AND', 'UNITED', 'STATES', 'LAWS'] +7729-102255-0026-287: hyp=['IN', 'THE', 'SHOOTING', 'OF', 'SHERIFF', 'JONES', 'IN', 'LAWRENCE', 'AND', 'IN', 'THE', 'REFUSAL', 'OF', 'EX', 'GOVERNOR', 'READER', 'TO', 'ALLOW', 'THE', 'DEPUTY', 'MARSHAL', 'TO', 'ARREST', 'HIM', 'THEY', 'DISCOVERED', 'GRAVE', 'OFFENCES', 'AGAINST', 'THE', 'TERRITORIAL', 'AND', 'THE', 'UNITED', 'STATES', 'LAWS'] +7729-102255-0027-288: ref=['FOOTNOTE', 'SUMNER', 'TO', 'SHANNON', 'MAY', 'TWELFTH', 'EIGHTEEN', 'FIFTY', 'SIX'] +7729-102255-0027-288: hyp=['FOOTNOTE', 'SUMNER', 'TO', 'SHANNON', 'MAY', 'TWELFTH', 'EIGHTEEN', 'FIFTY', 'SIX'] +7729-102255-0028-289: ref=['PRIVATE', 'PERSONS', 'WHO', 'HAD', 'LEASED', 'THE', 'FREE', 'STATE', 'HOTEL', 'VAINLY', 'BESOUGHT', 'THE', 'VARIOUS', 'AUTHORITIES', 'TO', 'PREVENT', 'THE', 'DESTRUCTION', 'OF', 'THEIR', 'PROPERTY'] +7729-102255-0028-289: hyp=['PRIVATE', 'PERSONS', 'WHO', 'HAD', 'LEAST', 'THE', 'FREE', 'STATE', 'HOTEL', 'VAINLY', 'BESOUGHT', 'THE', 'VARIOUS', 'AUTHORITIES', 'TO', 'PRESENT', 'THE', 'DESTRUCTION', 'OF', 'THEIR', 'PROPERTY'] +7729-102255-0029-290: ref=['TEN', 'DAYS', 'WERE', 'CONSUMED', 'IN', 'THESE', 'NEGOTIATIONS', 'BUT', 'THE', 'SPIRIT', 'OF', 'VENGEANCE', 'REFUSED', 'TO', 'YIELD'] +7729-102255-0029-290: hyp=['TEN', 'DAYS', 'WERE', 'CONSUMED', 'IN', 'THESE', 'NEGOTIATIONS', 'BUT', 'THE', 'SPIRIT', 'OF', 'VENGEANCE', 'REFUSED', 'TO', 'YIELD'] +7729-102255-0030-291: ref=['HE', 'SUMMONED', 'HALF', 'A', 'DOZEN', 'CITIZENS', 'TO', 'JOIN', 'HIS', 'POSSE', 'WHO', 'FOLLOWED', 'OBEYED', 'AND', 'ASSISTED', 'HIM'] +7729-102255-0030-291: hyp=['HE', 'SUMMONED', 'HALF', 'A', 'DOZEN', 'CITIZENS', 'TO', 'JOIN', 'HIS', 'POSSE', 'WHO', 'FOLLOWED', 'OBEYED', 'AND', 'ASSISTED', 'HIM'] +7729-102255-0031-292: ref=['HE', 'CONTINUED', 'HIS', 'PRETENDED', 'SEARCH', 'AND', 'TO', 'GIVE', 'COLOR', 'TO', 'HIS', 'ERRAND', 'MADE', 'TWO', 'ARRESTS'] +7729-102255-0031-292: hyp=['HE', 'CONTINUED', 'HIS', 'PRETENDED', 'SEARCH', 'AND', 'TO', 'GIVE', 'COLOR', 'TO', 'HIS', 'ERRAND', 'MADE', 'TO', 'ARREST'] +7729-102255-0032-293: ref=['THE', 'FREE', 'STATE', 'HOTEL', 'A', 'STONE', 'BUILDING', 'IN', 'DIMENSIONS', 'FIFTY', 'BY', 'SEVENTY', 'FEET', 'THREE', 'STORIES', 'HIGH', 'AND', 'HANDSOMELY', 'FURNISHED', 'PREVIOUSLY', 'OCCUPIED', 'ONLY', 'FOR', 'LODGING', 'ROOMS', 'ON', 'THAT', 'DAY', 'FOR', 'THE', 'FIRST', 'TIME', 'OPENED', 'ITS', 'TABLE', 'ACCOMMODATIONS', 'TO', 'THE', 'PUBLIC', 'AND', 'PROVIDED', 'A', 'FREE', 'DINNER', 'IN', 'HONOR', 'OF', 'THE', 'OCCASION'] +7729-102255-0032-293: hyp=['THE', 'FREE', 'STATE', 'HOTEL', 'A', 'STONE', 'BUILDING', 'IN', 'DIMENSIONS', 'FIFTY', 'BY', 'SEVENTY', 'FEET', 'THREE', 'STORIES', 'HIGH', 'AND', 'HANDSOMELY', 'FURNISHED', 'PREVIOUSLY', 'OCCUPIED', 'ONLY', 'FOR', 'LODGING', 'ROOMS', 'ON', 'THAT', 'DAY', 'FOR', 'THE', 'FIRST', 'TIME', 'OPENED', 'ITS', 'TABLE', 'ACCOMMODATIONS', 'TO', 'THE', 'PUBLIC', 'AND', 'PROVIDED', 'A', 'FREE', 'DINNER', 'IN', 'HONOR', 'OF', 'THE', 'OCCASION'] +7729-102255-0033-294: ref=['AS', 'HE', 'HAD', 'PROMISED', 'TO', 'PROTECT', 'THE', 'HOTEL', 'THE', 'REASSURED', 'CITIZENS', 'BEGAN', 'TO', 'LAUGH', 'AT', 'THEIR', 'OWN', 'FEARS'] +7729-102255-0033-294: hyp=['AS', 'HE', 'HAD', 'PROMISED', 'TO', 'PROTECT', 'THE', 'HOTEL', 'THE', 'REASSURED', 'CITIZENS', 'BEGAN', 'TO', 'LAUGH', 'AT', 'THEIR', 'OWN', 'FEARS'] +7729-102255-0034-295: ref=['TO', 'THEIR', 'SORROW', 'THEY', 'WERE', 'SOON', 'UNDECEIVED'] +7729-102255-0034-295: hyp=['TO', 'THEIR', 'SORROW', 'THEY', 'WERE', 'SOON', 'UNDECEIVED'] +7729-102255-0035-296: ref=['THE', 'MILITARY', 'FORCE', 'PARTLY', 'RABBLE', 'PARTLY', 'ORGANIZED', 'HAD', 'MEANWHILE', 'MOVED', 'INTO', 'THE', 'TOWN'] +7729-102255-0035-296: hyp=['THE', 'MILITARY', 'FORCE', 'PARTLY', 'RABBLE', 'PARTLY', 'ORGANIZED', 'HEAD', 'MEANWHILE', 'MOVED', 'INTO', 'THE', 'TOWN'] +7729-102255-0036-297: ref=['HE', 'PLANTED', 'A', 'COMPANY', 'BEFORE', 'THE', 'HOTEL', 'AND', 'DEMANDED', 'A', 'SURRENDER', 'OF', 'THE', 'ARMS', 'BELONGING', 'TO', 'THE', 'FREE', 'STATE', 'MILITARY', 'COMPANIES'] +7729-102255-0036-297: hyp=['HE', 'PLANTED', 'ACCOMPANIED', 'BEFORE', 'THE', 'HOTEL', 'AND', 'DEMANDED', 'A', 'SURRENDER', 'OF', 'THE', 'ARMS', 'BELONGING', 'TO', 'THE', 'FREE', 'STATE', 'MILITARY', 'COMPANIES'] +7729-102255-0037-298: ref=['HALF', 'AN', 'HOUR', 'LATER', 'TURNING', 'A', 'DEAF', 'EAR', 'TO', 'ALL', 'REMONSTRANCE', 'HE', 'GAVE', 'THE', 'PROPRIETORS', 'UNTIL', 'FIVE', "O'CLOCK", 'TO', 'REMOVE', 'THEIR', 'FAMILIES', 'AND', 'PERSONAL', 'PROPERTY', 'FROM', 'THE', 'FREE', 'STATE', 'HOTEL'] +7729-102255-0037-298: hyp=['HALF', 'AN', 'HOUR', 'LATER', 'TURNING', 'A', 'DEAF', 'EAR', 'TO', 'ALL', 'REMONSTRANCE', 'HE', 'GAVE', 'THE', 'PROPRIETORS', 'UNTIL', 'FIVE', "O'CLOCK", 'TO', 'REMOVE', 'THEIR', 'FAMILIES', 'AND', 'PERSONAL', 'PROPERTY', 'FROM', 'THE', 'FREE', 'STATE', 'HOTEL'] +7729-102255-0038-299: ref=['ATCHISON', 'WHO', 'HAD', 'BEEN', 'HARANGUING', 'THE', 'MOB', 'PLANTED', 'HIS', 'TWO', 'GUNS', 'BEFORE', 'THE', 'BUILDING', 'AND', 'TRAINED', 'THEM', 'UPON', 'IT'] +7729-102255-0038-299: hyp=['ADJUT', 'WHO', 'HAD', 'BEEN', 'HARANGUING', 'THE', 'MOB', 'PLANTED', 'HIS', 'TWO', 'GUNS', 'BEFORE', 'THE', 'BUILDING', 'AND', 'TRAINED', 'THEM', 'UPON', 'IT'] +7729-102255-0039-300: ref=['THE', 'INMATES', 'BEING', 'REMOVED', 'AT', 'THE', 'APPOINTED', 'HOUR', 'A', 'FEW', 'CANNON', 'BALLS', 'WERE', 'FIRED', 'THROUGH', 'THE', 'STONE', 'WALLS'] +7729-102255-0039-300: hyp=['THE', 'INMATES', 'BEING', 'REMOVED', 'AT', 'THE', 'APPOINTED', 'HOUR', 'A', 'FEW', 'CANNON', 'BALLS', 'WERE', 'FIRED', 'THROUGH', 'THE', 'STONE', 'WALLS'] +7729-102255-0040-301: ref=['IN', 'THIS', 'INCIDENT', 'CONTRASTING', 'THE', 'CREATIVE', 'AND', 'THE', 'DESTRUCTIVE', 'SPIRIT', 'OF', 'THE', 'FACTIONS', 'THE', 'EMIGRANT', 'AID', 'SOCIETY', 'OF', 'MASSACHUSETTS', 'FINDS', 'ITS', 'MOST', 'HONORABLE', 'AND', 'TRIUMPHANT', 'VINDICATION'] +7729-102255-0040-301: hyp=['IN', 'THIS', 'INCIDENT', 'CONTRASTING', 'THE', 'CREATIVE', 'AND', 'THE', 'DESTRUCTIVE', 'SPIRIT', 'OF', 'THE', 'FACTIONS', 'THE', 'IMMIGRANT', 'AIDS', 'SOCIETY', 'OF', 'MASSACHUSETTS', 'FINDS', 'ITS', 'MOST', 'HONORABLE', 'AND', 'TRIUMPHANT', 'VINDICATION'] +7729-102255-0041-302: ref=['THE', 'WHOLE', 'PROCEEDING', 'WAS', 'SO', 'CHILDISH', 'THE', 'MISERABLE', 'PLOT', 'SO', 'TRANSPARENT', 'THE', 'OUTRAGE', 'SO', 'GROSS', 'AS', 'TO', 'BRING', 'DISGUST', 'TO', 'THE', 'BETTER', 'CLASS', 'OF', 'BORDER', 'RUFFIANS', 'WHO', 'WERE', 'WITNESSES', 'AND', 'ACCESSORIES'] +7729-102255-0041-302: hyp=['THE', 'WHOLE', 'PROCEEDING', 'WAS', 'SO', 'CHILDISH', 'THE', 'MISERABLE', 'PLOT', 'SO', 'TRANSPARENT', 'THE', 'OUTRAGED', 'SO', 'GROSS', 'AS', 'TO', 'BRING', 'DISGUST', 'TO', 'THE', 'BETTER', 'CLASS', 'OF', 'BORDER', 'RUFFIANS', 'WHO', 'WERE', 'WITNESSES', 'AND', 'ACCESSORIES'] +7729-102255-0042-303: ref=['RELOCATED', 'FOOTNOTE', 'GOVERNOR', 'ROBINSON', 'BEING', 'ON', 'HIS', 'WAY', 'EAST', 'THE', 'STEAMBOAT', 'ON', 'WHICH', 'HE', 'WAS', 'TRAVELING', 'STOPPED', 'AT', 'LEXINGTON', 'MISSOURI'] +7729-102255-0042-303: hyp=['RE', 'LOCATED', 'FOOTNOTE', 'GOVERNOR', 'ROBINSON', 'BEING', 'ON', 'HIS', 'WAY', 'EAST', 'THE', 'STEAMBOAT', 'ON', 'WHICH', 'HE', 'WAS', 'TRAVELLING', 'STOPPED', 'AT', 'LEXINGTON', 'MISSOURI'] +7729-102255-0043-304: ref=['IN', 'A', 'FEW', 'DAYS', 'AN', 'OFFICER', 'CAME', 'WITH', 'A', 'REQUISITION', 'FROM', 'GOVERNOR', 'SHANNON', 'AND', 'TOOK', 'THE', 'PRISONER', 'BY', 'LAND', 'TO', 'WESTPORT', 'AND', 'AFTERWARDS', 'FROM', 'THERE', 'TO', 'KANSAS', 'CITY', 'AND', 'LEAVENWORTH'] +7729-102255-0043-304: hyp=['IN', 'A', 'FEW', 'DAYS', 'AN', 'OFFICER', 'CAME', 'WITH', 'A', 'REQUISITION', 'FROM', 'GOVERNOR', 'SHANNON', 'AND', 'TOOK', 'THE', 'PRISONER', 'BY', 'LANDA', 'WESTPORT', 'AND', 'AFTERWARDS', 'FROM', 'THERE', 'TO', 'KANSA', 'CITY', 'IN', 'LEVINWORTH'] +7729-102255-0044-305: ref=['HERE', 'HE', 'WAS', 'PLACED', 'IN', 'THE', 'CUSTODY', 'OF', 'CAPTAIN', 'MARTIN', 'OF', 'THE', 'KICKAPOO', 'RANGERS', 'WHO', 'PROVED', 'A', 'KIND', 'JAILER', 'AND', 'MATERIALLY', 'ASSISTED', 'IN', 'PROTECTING', 'HIM', 'FROM', 'THE', 'DANGEROUS', 'INTENTIONS', 'OF', 'THE', 'MOB', 'WHICH', 'AT', 'THAT', 'TIME', 'HELD', 'LEAVENWORTH', 'UNDER', 'A', 'REIGN', 'OF', 'TERROR'] +7729-102255-0044-305: hyp=['HARRY', 'WAS', 'PLACED', 'IN', 'THE', 'CUSTODY', 'OF', 'CAPTAIN', 'MARTIN', 'OF', 'THE', 'KICKAPOO', 'RANGERS', 'WHO', 'PROVED', 'A', 'KIND', 'JAILER', 'AND', 'MATERIALLY', 'ASSISTED', 'IN', 'PROTECTING', 'HIM', 'FROM', 'THE', 'DANGEROUS', 'INTENTIONS', 'OF', 'THE', 'MOB', 'WHICH', 'AT', 'THAT', 'TIME', 'HELD', 'LEVIN', 'WORTH', 'UNDER', 'THE', 'REIGN', 'OF', 'TERROR'] +7729-102255-0045-306: ref=['CAPTAIN', 'MARTIN', 'SAID', 'I', 'SHALL', 'GIVE', 'YOU', 'A', 'PISTOL', 'TO', 'HELP', 'PROTECT', 'YOURSELF', 'IF', 'WORSE', 'COMES', 'TO', 'WORST'] +7729-102255-0045-306: hyp=['CAPTAIN', 'MARTIN', 'SAID', 'I', 'SHALL', 'GIVE', 'YOU', 'A', 'PISTOL', 'TO', 'HELP', 'PROTECT', 'YOURSELF', 'IF', 'WORSE', 'COMES', 'TO', 'WORST'] +7729-102255-0046-307: ref=['IN', 'THE', 'EARLY', 'MORNING', 'OF', 'THE', 'NEXT', 'DAY', 'MAY', 'TWENTY', 'NINTH', 'A', 'COMPANY', 'OF', 'DRAGOONS', 'WITH', 'ONE', 'EMPTY', 'SADDLE', 'CAME', 'DOWN', 'FROM', 'THE', 'FORT', 'AND', 'WHILE', 'THE', 'PRO', 'SLAVERY', 'MEN', 'STILL', 'SLEPT', 'THE', 'PRISONER', 'AND', 'HIS', 'ESCORT', 'WERE', 'ON', 'THEIR', 'WAY', 'ACROSS', 'THE', 'PRAIRIES', 'TO', 'LECOMPTON', 'IN', 'THE', 'CHARGE', 'OF', 'OFFICERS', 'OF', 'THE', 'UNITED', 'STATES', 'ARMY'] +7729-102255-0046-307: hyp=['IN', 'THE', 'EARLY', 'MORNING', 'OF', 'THE', 'NEXT', 'DAY', 'MAY', 'TWENTY', 'NINTH', 'A', 'COMPANY', 'OF', 'DRAGOONS', 'WITH', 'ONE', 'EMPTY', 'SADDLE', 'CAME', 'DOWN', 'FROM', 'THE', 'FORT', 'AND', 'WHILE', 'THE', 'PRO', 'SLAVERY', 'MEN', 'STILL', 'SLEPT', 'THE', 'PRISONER', 'AND', 'HIS', 'ESCORT', 'WERE', 'ON', 'THEIR', 'WAY', 'ACROSS', 'THE', 'PRAIRIES', 'TO', 'LECOMPTON', 'IN', 'THE', 'CHARGE', 'OF', 'OFFICERS', 'OF', 'THE', 'UNITED', 'STATES', 'ARMY'] +8224-274381-0000-1451: ref=['THOUGH', 'THROWN', 'INTO', 'PRISON', 'FOR', 'THIS', 'ENTERPRISE', 'AND', 'DETAINED', 'SOME', 'TIME', 'HE', 'WAS', 'NOT', 'DISCOURAGED', 'BUT', 'STILL', 'CONTINUED', 'BY', 'HIS', 'COUNTENANCE', 'AND', 'PROTECTION', 'TO', 'INFUSE', 'SPIRIT', 'INTO', 'THE', 'DISTRESSED', 'ROYALISTS'] +8224-274381-0000-1451: hyp=['THOUGH', 'THROWN', 'INTO', 'PRISON', 'FOR', 'THIS', 'ENTERPRISE', 'AND', 'DETAINED', 'SOME', 'TIME', 'HE', 'WAS', 'NOT', 'DISCOURAGED', 'BUT', 'STILL', 'CONTINUED', 'BY', 'HIS', 'COUNTENANCE', 'AND', 'PROTECTION', 'TO', 'INFUSE', 'SPIRIT', 'INTO', 'THE', 'DISTRESSED', 'ROYALISTS'] +8224-274381-0001-1452: ref=['AMONG', 'OTHER', 'PERSONS', 'OF', 'DISTINCTION', 'WHO', 'UNITED', 'THEMSELVES', 'TO', 'HIM', 'WAS', 'LORD', 'NAPIER', 'OF', 'MERCHISTON', 'SON', 'OF', 'THE', 'FAMOUS', 'INVENTOR', 'OF', 'THE', 'LOGARITHMS', 'THE', 'PERSON', 'TO', 'WHOM', 'THE', 'TITLE', 'OF', 'A', 'GREAT', 'MAN', 'IS', 'MORE', 'JUSTLY', 'DUE', 'THAN', 'TO', 'ANY', 'OTHER', 'WHOM', 'HIS', 'COUNTRY', 'EVER', 'PRODUCED'] +8224-274381-0001-1452: hyp=['AMONG', 'OTHER', 'PERSONS', 'OF', 'DISTINCTION', 'WHO', 'UNITED', 'THEMSELVES', 'TO', 'HIM', 'WAS', 'LORD', 'NAPIER', 'OF', 'MURCHISTON', 'SON', 'OF', 'THE', 'FAMOUS', 'INVENTOR', 'OF', 'THE', 'LOGARITHMS', 'THE', 'PERSON', 'TO', 'WHOM', 'THE', 'TITLE', 'OF', 'A', 'GREAT', 'MAN', 'IS', 'MORE', 'JUSTLY', 'DUE', 'THAN', 'TO', 'ANY', 'OTHER', 'WHOM', 'HIS', 'COUNTRY', 'EVER', 'PRODUCED'] +8224-274381-0002-1453: ref=['WHILE', 'THE', 'FORMER', 'FORETOLD', 'THAT', 'THE', 'SCOTTISH', 'COVENANTERS', 'WERE', 'SECRETLY', 'FORMING', 'A', 'UNION', 'WITH', 'THE', 'ENGLISH', 'PARLIAMENT', 'AND', 'INCULCATED', 'THE', 'NECESSITY', 'OF', 'PREVENTING', 'THEM', 'BY', 'SOME', 'VIGOROUS', 'UNDERTAKING', 'THE', 'LATTER', 'STILL', 'INSISTED', 'THAT', 'EVERY', 'SUCH', 'ATTEMPT', 'WOULD', 'PRECIPITATE', 'THEM', 'INTO', 'MEASURES', 'TO', 'WHICH', 'OTHERWISE', 'THEY', 'WERE', 'NOT', 'PERHAPS', 'INCLINED'] +8224-274381-0002-1453: hyp=['WHILE', 'THE', 'FORMER', 'FORETOLD', 'THAT', 'THE', 'SCOTTISH', 'COVENANTERS', 'WERE', 'SECRETLY', 'FORMING', 'A', 'UNION', 'WITH', 'THE', 'ENGLISH', 'PARLIAMENT', 'AND', 'INCALCATED', 'THE', 'NECESSITY', 'OF', 'PREVENTING', 'THEM', 'BY', 'SOME', 'VIGOROUS', 'UNDERTAKING', 'THE', 'LATTER', 'STILL', 'INSISTED', 'THAT', 'EVERY', 'SUCH', 'ATTEMPT', 'WOULD', 'PRECIPITATE', 'THEM', 'INTO', 'MEASURES', 'TO', 'WHICH', 'OTHERWISE', 'THEY', 'WERE', 'NOT', 'PERHAPS', 'INCLINED'] +8224-274381-0003-1454: ref=['THE', "KING'S", 'EARS', 'WERE', 'NOW', 'OPEN', 'TO', "MONTROSE'S", 'COUNSELS', 'WHO', 'PROPOSED', 'NONE', 'BUT', 'THE', 'BOLDEST', 'AND', 'MOST', 'DARING', 'AGREEABLY', 'TO', 'THE', 'DESPERATE', 'STATE', 'OF', 'THE', 'ROYAL', 'CAUSE', 'IN', 'SCOTLAND'] +8224-274381-0003-1454: hyp=['THE', "KING'S", 'EARS', 'WERE', 'NOW', 'OPEN', 'TO', "MONTROSE'S", 'COUNCILS', 'WHO', 'PROPOSED', 'NONE', 'BUT', 'THE', 'BOLDEST', 'AND', 'MOST', 'DARING', 'AGREEABLY', 'TO', 'THE', 'DESPERATE', 'STATE', 'OF', 'THE', 'ROYAL', 'CAUSE', 'IN', 'SCOTLAND'] +8224-274381-0004-1455: ref=['FIVE', 'HUNDRED', 'MEN', 'MORE', 'WHO', 'HAD', 'BEEN', 'LEVIED', 'BY', 'THE', 'COVENANTERS', 'WERE', 'PERSUADED', 'TO', 'EMBRACE', 'THE', 'ROYAL', 'CAUSE', 'AND', 'WITH', 'THIS', 'COMBINED', 'FORCE', 'HE', 'HASTENED', 'TO', 'ATTACK', 'LORD', 'ELCHO', 'WHO', 'LAY', 'AT', 'PERTH', 'WITH', 'AN', 'ARMY', 'OF', 'SIX', 'THOUSAND', 'MEN', 'ASSEMBLED', 'UPON', 'THE', 'FIRST', 'NEWS', 'OF', 'THE', 'IRISH', 'INVASION'] +8224-274381-0004-1455: hyp=['FIVE', 'HUNDRED', 'MEN', 'MORE', 'WHO', 'HAD', 'BEEN', 'LEVIED', 'BY', 'THE', 'COVENANTERS', 'WERE', 'PERSUADED', 'TO', 'EMBRACE', 'THE', 'ROYAL', 'CAUSE', 'AND', 'WITH', 'THIS', 'COMBINED', 'FORCE', 'HE', 'HASTENED', 'TO', 'ATTACK', 'LORD', 'ELKOE', 'WHO', 'LAY', 'AT', 'PERTH', 'WITH', 'AN', 'ARMY', 'OF', 'SIX', 'THOUSAND', 'MEN', 'ASSEMBLED', 'UPON', 'THE', 'FIRST', 'NEWS', 'OF', 'THE', 'IRISH', 'INVASION'] +8224-274381-0005-1456: ref=['DREADING', 'THE', 'SUPERIOR', 'POWER', 'OF', 'ARGYLE', 'WHO', 'HAVING', 'JOINED', 'HIS', 'VASSALS', 'TO', 'A', 'FORCE', 'LEVIED', 'BY', 'THE', 'PUBLIC', 'WAS', 'APPROACHING', 'WITH', 'A', 'CONSIDERABLE', 'ARMY', 'MONTROSE', 'HASTENED', 'NORTHWARDS', 'IN', 'ORDER', 'TO', 'ROUSE', 'AGAIN', 'THE', 'MARQUIS', 'OF', 'HUNTLEY', 'AND', 'THE', 'GORDONS', 'WHO', 'HAVING', 'BEFORE', 'HASTILY', 'TAKEN', 'ARMS', 'HAD', 'BEEN', 'INSTANTLY', 'SUPPRESSED', 'BY', 'THE', 'COVENANTERS'] +8224-274381-0005-1456: hyp=['DREADING', 'THE', 'SUPERIOR', 'POWER', 'OF', 'ARGYLE', 'WHO', 'HAVING', 'JOINED', 'HIS', 'VASSALS', 'TO', 'A', 'FORCE', 'LEVIED', 'BY', 'THE', 'PUBLIC', 'WAS', 'APPROACHING', 'WITH', 'A', 'CONSIDERABLE', 'ARMY', 'MONTROSE', 'HASTENED', 'NORTHWARD', 'IN', 'ORDER', 'TO', 'ROUSE', 'AGAIN', 'THE', 'MARQUIS', 'OF', 'HUNTLY', 'AND', 'THE', 'GORDONS', 'WHO', 'HAVING', 'BEFORE', 'HASTILY', 'TAKEN', 'ARMS', 'HAD', 'BEEN', 'INSTANTLY', 'SUPPRESSED', 'BY', 'THE', 'COVENANTERS'] +8224-274381-0006-1457: ref=['THIS', "NOBLEMAN'S", 'CHARACTER', 'THOUGH', 'CELEBRATED', 'FOR', 'POLITICAL', 'COURAGE', 'AND', 'CONDUCT', 'WAS', 'VERY', 'LOW', 'FOR', 'MILITARY', 'PROWESS', 'AND', 'AFTER', 'SOME', 'SKIRMISHES', 'IN', 'WHICH', 'HE', 'WAS', 'WORSTED', 'HE', 'HERE', 'ALLOWED', 'MONTROSE', 'TO', 'ESCAPE', 'HIM'] +8224-274381-0006-1457: hyp=['THIS', "NOBLEMAN'S", 'CHARACTER', 'THOUGH', 'CELEBRATED', 'FOR', 'POLITICAL', 'COURAGE', 'AND', 'CONDUCT', 'WAS', 'VERY', 'LOW', 'FOR', 'MILITARY', 'PROWESS', 'AND', 'AFTER', 'SOME', 'SKIRMISHES', 'IN', 'WHICH', 'HE', 'WAS', 'WORSTED', 'HE', 'HERE', 'ALLOWED', 'MONTROSE', 'TO', 'ESCAPE', 'HIM'] +8224-274381-0007-1458: ref=['BY', 'QUICK', 'MARCHES', 'THROUGH', 'THESE', 'INACCESSIBLE', 'MOUNTAINS', 'THAT', 'GENERAL', 'FREED', 'HIMSELF', 'FROM', 'THE', 'SUPERIOR', 'FORCES', 'OF', 'THE', 'COVENANTERS'] +8224-274381-0007-1458: hyp=['BY', 'QUICK', 'MARCHES', 'THROUGH', 'THESE', 'INACCESSIBLE', 'MOUNTAINS', 'THAT', 'GENERAL', 'FREED', 'HIMSELF', 'FROM', 'THE', 'SUPERIOR', 'FORCES', 'OF', 'THE', 'COVENANTERS'] +8224-274381-0008-1459: ref=['WITH', 'THESE', 'AND', 'SOME', 'REENFORCEMENTS', 'OF', 'THE', 'ATHOLEMEN', 'AND', 'MACDONALDS', 'WHOM', 'HE', 'HAD', 'RECALLED', 'MONTROSE', 'FELL', 'SUDDENLY', 'UPON', "ARGYLE'S", 'COUNTRY', 'AND', 'LET', 'LOOSE', 'UPON', 'IT', 'ALL', 'THE', 'RAGE', 'OF', 'WAR', 'CARRYING', 'OFF', 'THE', 'CATTLE', 'BURNING', 'THE', 'HOUSES', 'AND', 'PUTTING', 'THE', 'INHABITANTS', 'TO', 'THE', 'SWORD'] +8224-274381-0008-1459: hyp=['WITH', 'THESE', 'AND', 'SOME', 'REINFORCEMENTS', 'OF', 'THE', 'ETHEL', 'MEN', 'AND', 'MON', 'DONALDS', 'WHOM', 'HE', 'HAD', 'RECALLED', 'MONTROSE', 'FELL', 'SUDDENLY', 'UPON', "ARGYLE'S", 'COUNTRY', 'AND', 'LET', 'LOOSE', 'UPON', 'IT', 'ALL', 'THE', 'RAGE', 'OF', 'WAR', 'CARRYING', 'OFF', 'THE', 'CATTLE', 'BURNING', 'THE', 'HOUSES', 'AND', 'PUTTING', 'THE', 'INHABITANTS', 'TO', 'THE', 'SWORD'] +8224-274381-0009-1460: ref=['THIS', 'SEVERITY', 'BY', 'WHICH', 'MONTROSE', 'SULLIED', 'HIS', 'VICTORIES', 'WAS', 'THE', 'RESULT', 'OF', 'PRIVATE', 'ANIMOSITY', 'AGAINST', 'THE', 'CHIEFTAIN', 'AS', 'MUCH', 'AS', 'OF', 'ZEAL', 'FOR', 'THE', 'PUBLIC', 'CAUSE', 'ARGYLE', 'COLLECTING', 'THREE', 'THOUSAND', 'MEN', 'MARCHED', 'IN', 'QUEST', 'OF', 'THE', 'ENEMY', 'WHO', 'HAD', 'RETIRED', 'WITH', 'THEIR', 'PLUNDER', 'AND', 'HE', 'LAY', 'AT', 'INNERLOCHY', 'SUPPOSING', 'HIMSELF', 'STILL', 'AT', 'A', 'CONSIDERABLE', 'DISTANCE', 'FROM', 'THEM'] +8224-274381-0009-1460: hyp=['THIS', 'SEVERITY', 'BY', 'WHICH', 'MONTROSE', 'SULLIED', 'HIS', 'VICTORIES', 'WAS', 'THE', 'RESULT', 'OF', 'PRIVATE', 'ANIMOSITY', 'AGAINST', 'THE', 'CHIEFTAIN', 'AS', 'MUCH', 'AS', 'OF', 'ZEAL', 'FOR', 'THE', 'PUBLIC', 'CAUSE', 'ARGYLE', 'COLLECTING', 'THREE', 'THOUSAND', 'MEN', 'MARCHED', 'IN', 'QUEST', 'OF', 'THE', 'ENEMY', 'WHO', 'HAD', 'RETIRED', 'WITH', 'THEIR', 'PLUNDER', 'AND', 'HE', 'LAY', 'AT', 'INERLOCKY', 'SUPPOSING', 'HIMSELF', 'STILL', 'AT', 'A', 'CONSIDERABLE', 'DISTANCE', 'FROM', 'THEM'] +8224-274381-0010-1461: ref=['BY', 'A', 'QUICK', 'AND', 'UNEXPECTED', 'MARCH', 'MONTROSE', 'HASTENED', 'TO', 'INNERLOCHY', 'AND', 'PRESENTED', 'HIMSELF', 'IN', 'ORDER', 'OF', 'BATTLE', 'BEFORE', 'THE', 'SURPRISED', 'BUT', 'NOT', 'AFFRIGHTENED', 'COVENANTERS'] +8224-274381-0010-1461: hyp=['BY', 'A', 'QUICK', 'AND', 'UNEXPECTED', 'MARCH', 'MONTROSE', 'HASTENED', 'TO', 'IN', 'A', 'LOCKY', 'AND', 'PRESENTED', 'HIMSELF', 'IN', 'ORDER', 'OF', 'BATTLE', 'BEFORE', 'THE', 'SURPRISED', 'BUT', 'NOT', 'A', 'FRIGHTENED', 'COVENANTERS'] +8224-274381-0011-1462: ref=['HIS', 'CONDUCT', 'AND', 'PRESENCE', 'OF', 'MIND', 'IN', 'THIS', 'EMERGENCE', 'APPEARED', 'CONSPICUOUS'] +8224-274381-0011-1462: hyp=['HIS', 'CONDUCT', 'AND', 'PRESENCE', 'OF', 'MIND', 'IN', 'THIS', 'EMERGENCE', 'APPEARED', 'CONSPICUOUS'] +8224-274381-0012-1463: ref=['MONTROSE', 'WEAK', 'IN', 'CAVALRY', 'HERE', 'LINED', 'HIS', 'TROOPS', 'OF', 'HORSE', 'WITH', 'INFANTRY', 'AND', 'AFTER', 'PUTTING', 'THE', "ENEMY'S", 'HORSE', 'TO', 'ROUT', 'FELL', 'WITH', 'UNITED', 'FORCE', 'UPON', 'THEIR', 'FOOT', 'WHO', 'WERE', 'ENTIRELY', 'CUT', 'IN', 'PIECES', 'THOUGH', 'WITH', 'THE', 'LOSS', 'OF', 'THE', 'GALLANT', 'LORD', 'GORDON', 'ON', 'THE', 'PART', 'OF', 'THE', 'ROYALISTS'] +8224-274381-0012-1463: hyp=['MONTROSE', 'WEAK', 'IN', 'CAVALRY', 'HERE', 'LINED', 'HIS', 'TROOPS', 'OF', 'HORSE', 'WITH', 'INFANTRY', 'AND', 'AFTER', 'PUTTING', 'THE', "ENEMY'S", 'HORSE', 'TO', 'ROUT', 'FELL', 'WITH', 'UNITED', 'FORCE', 'UPON', 'THEIR', 'FOOT', 'WHO', 'WERE', 'ENTIRELY', 'CUT', 'IN', 'PIECES', 'THOUGH', 'WITH', 'THE', 'LOSS', 'OF', 'THE', 'GALLANT', 'LORD', 'GORDON', 'ON', 'THE', 'PART', 'OF', 'THE', 'ROYALISTS'] +8224-274381-0013-1464: ref=['FROM', 'THE', 'SAME', 'MEN', 'NEW', 'REGIMENTS', 'AND', 'NEW', 'COMPANIES', 'WERE', 'FORMED', 'DIFFERENT', 'OFFICERS', 'APPOINTED', 'AND', 'THE', 'WHOLE', 'MILITARY', 'FORCE', 'PUT', 'INTO', 'SUCH', 'HANDS', 'AS', 'THE', 'INDEPENDENTS', 'COULD', 'RELY', 'ON'] +8224-274381-0013-1464: hyp=['FROM', 'THE', 'SAME', 'MEN', 'NEW', 'REGIMENTS', 'AND', 'NEW', 'COMPANIES', 'WERE', 'FORMED', 'DIFFERENT', 'OFFICERS', 'APPOINTED', 'AND', 'THE', 'WHOLE', 'MILITARY', 'FORCE', 'PUT', 'INTO', 'SUCH', 'HANDS', 'AS', 'THE', 'INDEPENDENTS', 'COULD', 'RELY', 'ON'] +8224-274381-0014-1465: ref=['BESIDES', 'MEMBERS', 'OF', 'PARLIAMENT', 'WHO', 'WERE', 'EXCLUDED', 'MANY', 'OFFICERS', 'UNWILLING', 'TO', 'SERVE', 'UNDER', 'THE', 'NEW', 'GENERALS', 'THREW', 'UP', 'THEIR', 'COMMISSIONS', 'AND', 'UNWARILY', 'FACILITATED', 'THE', 'PROJECT', 'OF', 'PUTTING', 'THE', 'ARMY', 'ENTIRELY', 'INTO', 'THE', 'HANDS', 'OF', 'THAT', 'FACTION'] +8224-274381-0014-1465: hyp=['BESIDES', 'MEMBERS', 'OF', 'PARLIAMENT', 'WHO', 'WERE', 'EXCLUDED', 'MANY', 'OFFICERS', 'UNWILLING', 'TO', 'SERVE', 'UNDER', 'THE', 'NEW', 'GENERALS', 'THREW', 'UP', 'THEIR', 'COMMISSIONS', 'AND', 'THEN', 'WARILY', 'FACILITATED', 'THE', 'PROJECT', 'OF', 'PUTTING', 'THE', 'ARMY', 'ENTIRELY', 'INTO', 'THE', 'HANDS', 'OF', 'THAT', 'FACTION'] +8224-274381-0015-1466: ref=['THOUGH', 'THE', 'DISCIPLINE', 'OF', 'THE', 'FORMER', 'PARLIAMENTARY', 'ARMY', 'WAS', 'NOT', 'CONTEMPTIBLE', 'A', 'MORE', 'EXACT', 'PLAN', 'WAS', 'INTRODUCED', 'AND', 'RIGOROUSLY', 'EXECUTED', 'BY', 'THESE', 'NEW', 'COMMANDERS'] +8224-274381-0015-1466: hyp=['THOUGH', 'THE', 'DISCIPLINE', 'OF', 'THE', 'FORMER', 'PARLIAMENTARY', 'ARMY', 'WAS', 'NOT', 'CONTEMPTIBLE', 'A', 'MORE', 'EXACT', 'PLAN', 'WAS', 'INTRODUCED', 'AND', 'RIGOROUSLY', 'EXECUTED', 'BY', 'THESE', 'NEW', 'COMMANDERS'] +8224-274381-0016-1467: ref=['VALOR', 'INDEED', 'WAS', 'VERY', 'GENERALLY', 'DIFFUSED', 'OVER', 'THE', 'ONE', 'PARTY', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'DURING', 'THIS', 'PERIOD', 'DISCIPLINE', 'ALSO', 'WAS', 'ATTAINED', 'BY', 'THE', 'FORCES', 'OF', 'THE', 'PARLIAMENT', 'BUT', 'THE', 'PERFECTION', 'OF', 'THE', 'MILITARY', 'ART', 'IN', 'CONCERTING', 'THE', 'GENERAL', 'PLANS', 'OF', 'ACTION', 'AND', 'THE', 'OPERATIONS', 'OF', 'THE', 'FIELD', 'SEEMS', 'STILL', 'ON', 'BOTH', 'SIDES', 'TO', 'HAVE', 'BEEN', 'IN', 'A', 'GREAT', 'MEASURE', 'WANTING'] +8224-274381-0016-1467: hyp=['VALOR', 'INDEED', 'WAS', 'VERY', 'GENERALLY', 'DIFFUSED', 'OVER', 'THE', 'ONE', 'PARTY', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'DURING', 'THIS', 'PERIOD', 'DISCIPLINE', 'ALSO', 'WAS', 'ATTAINED', 'BY', 'THE', 'FORCES', 'OF', 'THE', 'PARLIAMENT', 'BUT', 'THE', 'PERFECTION', 'OF', 'THE', 'MILITARY', 'ART', 'IN', 'CONCERTING', 'THE', 'GENERAL', 'PLANS', 'OF', 'ACTION', 'AND', 'THE', 'OPERATIONS', 'OF', 'THE', 'FIELD', 'SEEMS', 'STILL', 'ON', 'BOTH', 'SIDES', 'TO', 'HAVE', 'BEEN', 'IN', 'A', 'GREAT', 'MEASURE', 'WANTING'] +8224-274381-0017-1468: ref=['HISTORIANS', 'AT', 'LEAST', 'PERHAPS', 'FROM', 'THEIR', 'OWN', 'IGNORANCE', 'AND', 'INEXPERIENCE', 'HAVE', 'NOT', 'REMARKED', 'ANY', 'THING', 'BUT', 'A', 'HEADLONG', 'IMPETUOUS', 'CONDUCT', 'EACH', 'PARTY', 'HURRYING', 'TO', 'A', 'BATTLE', 'WHERE', 'VALOR', 'AND', 'FORTUNE', 'CHIEFLY', 'DETERMINED', 'THE', 'SUCCESS'] +8224-274381-0017-1468: hyp=['HISTORIANS', 'AT', 'LEAST', 'PERHAPS', 'FROM', 'THEIR', 'OWN', 'IGNORANCE', 'AND', 'INEXPERIENCE', 'HAVE', 'NOT', 'REMARKED', 'ANY', 'THING', 'BUT', 'A', 'HEADLONG', 'IMPETUOUS', 'CONDUCT', 'EACH', 'PARTY', 'HURRYING', 'TO', 'A', 'BATTLE', 'WHERE', 'VALOR', 'AND', 'FORTUNE', 'CHIEFLY', 'DETERMINE', 'THE', 'SUCCESS'] +8224-274384-0000-1437: ref=['HE', 'PASSED', 'THROUGH', 'HENLEY', 'SAINT', 'ALBANS', 'AND', 'CAME', 'SO', 'NEAR', 'TO', 'LONDON', 'AS', 'HARROW', 'ON', 'THE', 'HILL'] +8224-274384-0000-1437: hyp=['HE', 'PASSED', 'THROUGH', 'HENLEY', 'SAINT', "ALBAN'S", 'AND', 'CAME', 'SO', 'NEAR', 'TO', 'LONDON', 'AS', 'HARROW', 'ON', 'THE', 'HILL'] +8224-274384-0001-1438: ref=['THE', 'SCOTTISH', 'GENERALS', 'AND', 'COMMISSIONERS', 'AFFECTED', 'GREAT', 'SURPRISE', 'ON', 'THE', 'APPEARANCE', 'OF', 'THE', 'KING', 'AND', 'THOUGH', 'THEY', 'PAID', 'HIM', 'ALL', 'THE', 'EXTERIOR', 'RESPECT', 'DUE', 'TO', 'HIS', 'DIGNITY', 'THEY', 'INSTANTLY', 'SET', 'A', 'GUARD', 'UPON', 'HIM', 'UNDER', 'COLOR', 'OF', 'PROTECTION', 'AND', 'MADE', 'HIM', 'IN', 'REALITY', 'A', 'PRISONER'] +8224-274384-0001-1438: hyp=['THE', 'SCOTTISH', 'GENERALS', 'AND', 'COMMISSIONERS', 'AFFECTED', 'GREAT', 'SURPRISE', 'ON', 'THE', 'APPEARANCE', 'OF', 'THE', 'KING', 'AND', 'THOUGH', 'THEY', 'PAID', 'HIM', 'ALL', 'THE', 'EXTERIOR', 'RESPECT', 'DUE', 'TO', 'HIS', 'DIGNITY', 'THEY', 'INSTANTLY', 'SET', 'A', 'GUARD', 'UPON', 'HIM', 'UNDER', 'COLOR', 'OF', 'PROTECTION', 'AND', 'MADE', 'HIM', 'IN', 'REALITY', 'A', 'PRISONER'] +8224-274384-0002-1439: ref=['THEY', 'INFORMED', 'THE', 'ENGLISH', 'PARLIAMENT', 'OF', 'THIS', 'UNEXPECTED', 'INCIDENT', 'AND', 'ASSURED', 'THEM', 'THAT', 'THEY', 'HAD', 'ENTERED', 'INTO', 'NO', 'PRIVATE', 'TREATY', 'WITH', 'THE', 'KING'] +8224-274384-0002-1439: hyp=['THEY', 'INFORMED', 'THE', 'ENGLISH', 'PARLIAMENT', 'OF', 'THIS', 'UNEXPECTED', 'INCIDENT', 'AND', 'ASSURED', 'THEM', 'THAT', 'THEY', 'HAD', 'ENTERED', 'INTO', 'NO', 'PRIVATE', 'TREATY', 'WITH', 'THE', 'KING'] +8224-274384-0003-1440: ref=['OR', 'HATH', 'HE', 'GIVEN', 'US', 'ANY', 'GIFT'] +8224-274384-0003-1440: hyp=['OR', 'HATH', 'HE', 'GIVEN', 'US', 'ANY', 'GIFT'] +8224-274384-0004-1441: ref=['AND', 'THE', 'MEN', 'OF', 'ISRAEL', 'ANSWERED', 'THE', 'MEN', 'OF', 'JUDAH', 'AND', 'SAID', 'WE', 'HAVE', 'TEN', 'PARTS', 'IN', 'THE', 'KING', 'AND', 'WE', 'HAVE', 'ALSO', 'MORE', 'RIGHT', 'IN', 'DAVID', 'THAN', 'YE', 'WHY', 'THEN', 'DID', 'YE', 'DESPISE', 'US', 'THAT', 'OUR', 'ADVICE', 'SHOULD', 'NOT', 'BE', 'FIRST', 'HAD', 'IN', 'BRINGING', 'BACK', 'OUR', 'KING'] +8224-274384-0004-1441: hyp=['AND', 'THE', 'MEN', 'OF', 'ISRAEL', 'ANSWERED', 'THE', 'MEN', 'OF', 'JUDAH', 'AND', 'SAID', 'WE', 'HAVE', 'TEN', 'PARTS', 'IN', 'THE', 'KING', 'AND', 'WE', 'HAVE', 'ALSO', 'MORE', 'RIGHT', 'IN', 'DAVID', 'THAN', 'YE', 'WHY', 'THEN', 'DID', 'YE', 'DESPISE', 'US', 'THAT', 'OUR', 'ADVICE', 'SHOULD', 'NOT', 'BE', 'FIRST', 'HAD', 'IN', 'BRINGING', 'BACK', 'OUR', 'KING'] +8224-274384-0005-1442: ref=['ANOTHER', 'PREACHER', 'AFTER', 'REPROACHING', 'HIM', 'TO', 'HIS', 'FACE', 'WITH', 'HIS', 'MISGOVERNMENT', 'ORDERED', 'THIS', 'PSALM', 'TO', 'BE', 'SUNG'] +8224-274384-0005-1442: hyp=['ANOTHER', 'PREACHER', 'AFTER', 'REPROACHING', 'HIM', 'TO', 'HIS', 'FACE', 'WITH', 'HIS', 'MISGOVERNMENT', 'ORDERED', 'THIS', 'SUM', 'TO', 'BE', 'SUNG'] +8224-274384-0006-1443: ref=['THE', 'KING', 'STOOD', 'UP', 'AND', 'CALLED', 'FOR', 'THAT', 'PSALM', 'WHICH', 'BEGINS', 'WITH', 'THESE', 'WORDS'] +8224-274384-0006-1443: hyp=['THE', 'KING', 'STOOD', 'UP', 'AND', 'CALLED', 'FOR', 'THAT', 'PSALM', 'WHICH', 'BEGINS', 'WITH', 'THESE', 'WORDS'] +8224-274384-0007-1444: ref=['HAVE', 'MERCY', 'LORD', 'ON', 'ME', 'I', 'PRAY', 'FOR', 'MEN', 'WOULD', 'ME', 'DEVOUR'] +8224-274384-0007-1444: hyp=['HAVE', 'MERCY', 'LORD', 'ON', 'ME', 'I', 'PRAY', 'FOR', 'MEN', 'WITH', 'ME', 'DEVOUR'] +8224-274384-0008-1445: ref=['THE', 'GOOD', 'NATURED', 'AUDIENCE', 'IN', 'PITY', 'TO', 'FALLEN', 'MAJESTY', 'SHOWED', 'FOR', 'ONCE', 'GREATER', 'DEFERENCE', 'TO', 'THE', 'KING', 'THAN', 'TO', 'THE', 'MINISTER', 'AND', 'SUNG', 'THE', 'PSALM', 'WHICH', 'THE', 'FORMER', 'HAD', 'CALLED', 'FOR'] +8224-274384-0008-1445: hyp=['THE', 'GOOD', 'NATURED', 'AUDIENCE', 'IN', 'PITY', 'TO', 'FALL', 'AND', 'MAJESTY', 'SHOWED', 'FOR', 'ONCE', 'GREATER', 'DEFERENCE', 'TO', 'THE', 'KING', 'THAN', 'TO', 'THE', 'MINISTER', 'AND', 'SUNG', 'THE', 'PSALM', 'WHICH', 'THE', 'FORMER', 'HAD', 'CALLED', 'FOR'] +8224-274384-0009-1446: ref=['THE', 'PARLIAMENT', 'AND', 'THE', 'SCOTS', 'LAID', 'THEIR', 'PROPOSALS', 'BEFORE', 'THE', 'KING'] +8224-274384-0009-1446: hyp=['THE', 'PARLIAMENT', 'AND', 'THE', 'SCOTS', 'LAID', 'THEIR', 'PROPOSALS', 'BEFORE', 'THE', 'KING'] +8224-274384-0010-1447: ref=['BEFORE', 'THE', 'SETTLEMENT', 'OF', 'TERMS', 'THE', 'ADMINISTRATION', 'MUST', 'BE', 'POSSESSED', 'ENTIRELY', 'BY', 'THE', 'PARLIAMENTS', 'OF', 'BOTH', 'KINGDOMS', 'AND', 'HOW', 'INCOMPATIBLE', 'THAT', 'SCHEME', 'WITH', 'THE', 'LIBERTY', 'OF', 'THE', 'KING', 'IS', 'EASILY', 'IMAGINED'] +8224-274384-0010-1447: hyp=['BEFORE', 'THE', 'SETTLEMENT', 'OF', 'TERMS', 'THE', 'ADMINISTRATION', 'MUST', 'BE', 'POSSESSED', 'ENTIRELY', 'BY', 'THE', 'PARLIAMENTS', 'OF', 'BOTH', 'KINGDOMS', 'AND', 'HOW', 'INCOMPATIBLE', 'THAT', 'SCHEME', 'WITH', 'THE', 'LIBERTY', 'OF', 'THE', 'KING', 'IS', 'EASILY', 'IMAGINED'] +8224-274384-0011-1448: ref=['THE', 'ENGLISH', 'IT', 'IS', 'EVIDENT', 'HAD', 'THEY', 'NOT', 'BEEN', 'PREVIOUSLY', 'ASSURED', 'OF', 'RECEIVING', 'THE', 'KING', 'WOULD', 'NEVER', 'HAVE', 'PARTED', 'WITH', 'SO', 'CONSIDERABLE', 'A', 'SUM', 'AND', 'WHILE', 'THEY', 'WEAKENED', 'THEMSELVES', 'BY', 'THE', 'SAME', 'MEASURE', 'HAVE', 'STRENGTHENED', 'A', 'PEOPLE', 'WITH', 'WHOM', 'THEY', 'MUST', 'AFTERWARDS', 'HAVE', 'SO', 'MATERIAL', 'AN', 'INTEREST', 'TO', 'DISCUSS'] +8224-274384-0011-1448: hyp=['THE', 'ENGLISH', 'IT', 'IS', 'EVIDENT', 'HAD', 'THEY', 'NOT', 'BEEN', 'PREVIOUSLY', 'ASSURED', 'OF', 'RECEIVING', 'THE', 'KING', 'WOULD', 'NEVER', 'HAVE', 'PARTED', 'WITH', 'SO', 'CONSIDERABLE', 'A', 'SUM', 'AND', 'WHILE', 'THEY', 'WEAKENED', 'THEMSELVES', 'BY', 'THE', 'SAME', 'MEASURE', 'HAVE', 'STRENGTHENED', 'A', 'PEOPLE', 'WITH', 'WHOM', 'THEY', 'MUST', 'AFTERWARDS', 'HAVE', 'SO', 'MATERIAL', 'AN', 'INTEREST', 'TO', 'DISCUSS'] +8224-274384-0012-1449: ref=['IF', 'ANY', 'STILL', 'RETAINED', 'RANCOR', 'AGAINST', 'HIM', 'IN', 'HIS', 'PRESENT', 'CONDITION', 'THEY', 'PASSED', 'IN', 'SILENCE', 'WHILE', 'HIS', 'WELL', 'WISHERS', 'MORE', 'GENEROUS', 'THAN', 'PRUDENT', 'ACCOMPANIED', 'HIS', 'MARCH', 'WITH', 'TEARS', 'WITH', 'ACCLAMATIONS', 'AND', 'WITH', 'PRAYERS', 'FOR', 'HIS', 'SAFETY'] +8224-274384-0012-1449: hyp=['IF', 'ANY', 'STILL', 'RETAINED', 'RANK', 'OR', 'AGAINST', 'HIM', 'IN', 'HIS', 'PRESENT', 'CONDITION', 'THEY', 'PASSED', 'IN', 'SILENCE', 'WHILE', 'HIS', 'WELL', 'WISHERS', 'MORE', 'GENEROUS', 'THAN', 'PRUDENT', 'ACCOMPANIED', 'HIS', 'MARCH', 'WITH', 'TEARS', 'WITH', 'ACCLAMATIONS', 'AND', 'WITH', 'PRAYERS', 'FOR', 'HIS', 'SAFETY'] +8224-274384-0013-1450: ref=['HIS', 'DEATH', 'IN', 'THIS', 'CONJUNCTURE', 'WAS', 'A', 'PUBLIC', 'MISFORTUNE'] +8224-274384-0013-1450: hyp=['HIS', 'DEATH', 'IN', 'THIS', 'CONJUNCTURE', 'WAS', 'A', 'PUBLIC', 'MISFORTUNE'] +8230-279154-0000-617: ref=['THE', 'ANALYSIS', 'OF', 'KNOWLEDGE', 'WILL', 'OCCUPY', 'US', 'UNTIL', 'THE', 'END', 'OF', 'THE', 'THIRTEENTH', 'LECTURE', 'AND', 'IS', 'THE', 'MOST', 'DIFFICULT', 'PART', 'OF', 'OUR', 'WHOLE', 'ENTERPRISE'] +8230-279154-0000-617: hyp=['THE', 'ANALYSIS', 'OF', 'KNOWLEDGE', 'WILL', 'OCCUPY', 'US', 'UNTIL', 'THE', 'END', 'OF', 'THE', 'THIRTEENTH', 'LECTURE', 'AND', 'IS', 'THE', 'MOST', 'DIFFICULT', 'PART', 'OF', 'OUR', 'WHOLE', 'ENTERPRISE'] +8230-279154-0001-618: ref=['WHAT', 'IS', 'CALLED', 'PERCEPTION', 'DIFFERS', 'FROM', 'SENSATION', 'BY', 'THE', 'FACT', 'THAT', 'THE', 'SENSATIONAL', 'INGREDIENTS', 'BRING', 'UP', 'HABITUAL', 'ASSOCIATES', 'IMAGES', 'AND', 'EXPECTATIONS', 'OF', 'THEIR', 'USUAL', 'CORRELATES', 'ALL', 'OF', 'WHICH', 'ARE', 'SUBJECTIVELY', 'INDISTINGUISHABLE', 'FROM', 'THE', 'SENSATION'] +8230-279154-0001-618: hyp=['WHAT', 'IS', 'CALLED', 'PERCEPTION', 'DIFFERS', 'FROM', 'SENSATION', 'BY', 'THE', 'FACT', 'THAT', 'THE', 'SENSATIONAL', 'INGREDIENTS', 'BRING', 'UP', 'HABITUAL', 'ASSOCIATES', 'IMAGES', 'AND', 'EXPECTATIONS', 'OF', 'THEIR', 'USUAL', 'COROLLETS', 'ALL', 'OF', 'WHICH', 'ARE', 'SUBJECTIVELY', 'INDISTINGUISHABLE', 'FROM', 'THE', 'SENSATION'] +8230-279154-0002-619: ref=['WHETHER', 'OR', 'NOT', 'THIS', 'PRINCIPLE', 'IS', 'LIABLE', 'TO', 'EXCEPTIONS', 'EVERYONE', 'WOULD', 'AGREE', 'THAT', 'IS', 'HAS', 'A', 'BROAD', 'MEASURE', 'OF', 'TRUTH', 'THOUGH', 'THE', 'WORD', 'EXACTLY', 'MIGHT', 'SEEM', 'AN', 'OVERSTATEMENT', 'AND', 'IT', 'MIGHT', 'SEEM', 'MORE', 'CORRECT', 'TO', 'SAY', 'THAT', 'IDEAS', 'APPROXIMATELY', 'REPRESENT', 'IMPRESSIONS'] +8230-279154-0002-619: hyp=['WHETHER', 'OR', 'NOT', 'THIS', 'PRINCIPLE', 'IS', 'LIABLE', 'TO', 'EXCEPTIONS', 'EVERY', 'ONE', 'WOULD', 'AGREE', 'THAT', 'IT', 'HAS', 'A', 'BROAD', 'MEASURE', 'OF', 'TRUTH', 'THOUGH', 'THE', 'WORD', 'EXACTLY', 'MIGHT', 'SEEM', 'AN', 'OVERSTATEMENT', 'AND', 'IT', 'MIGHT', 'SEEM', 'MORE', 'CORRECT', 'TO', 'SAY', 'THAT', 'IDEAS', 'APPROXIMATELY', 'REPRESENT', 'IMPRESSIONS'] +8230-279154-0003-620: ref=['AND', 'WHAT', 'SORT', 'OF', 'EVIDENCE', 'IS', 'LOGICALLY', 'POSSIBLE'] +8230-279154-0003-620: hyp=['AND', 'WHAT', 'SORT', 'OF', 'EVIDENCE', 'IS', 'LOGICALLY', 'POSSIBLE'] +8230-279154-0004-621: ref=['THERE', 'IS', 'NO', 'LOGICAL', 'IMPOSSIBILITY', 'IN', 'THE', 'HYPOTHESIS', 'THAT', 'THE', 'WORLD', 'SPRANG', 'INTO', 'BEING', 'FIVE', 'MINUTES', 'AGO', 'EXACTLY', 'AS', 'IT', 'THEN', 'WAS', 'WITH', 'A', 'POPULATION', 'THAT', 'REMEMBERED', 'A', 'WHOLLY', 'UNREAL', 'PAST'] +8230-279154-0004-621: hyp=['THERE', 'IS', 'NO', 'LOGICAL', 'IMPOSSIBILITY', 'IN', 'THE', 'HYPOTHESIS', 'THAT', 'THE', 'WORLD', 'SPRANG', 'INTO', 'BEING', 'FIVE', 'MINUTES', 'AGO', 'EXACTLY', 'AS', 'IT', 'THEN', 'WAS', 'WITH', 'THE', 'POPULATION', 'THAT', 'REMEMBERED', 'A', 'WHOLLY', 'UNREAL', 'PAST'] +8230-279154-0005-622: ref=['ALL', 'THAT', 'I', 'AM', 'DOING', 'IS', 'TO', 'USE', 'ITS', 'LOGICAL', 'TENABILITY', 'AS', 'A', 'HELP', 'IN', 'THE', 'ANALYSIS', 'OF', 'WHAT', 'OCCURS', 'WHEN', 'WE', 'REMEMBER'] +8230-279154-0005-622: hyp=['ALL', 'THAT', 'I', 'AM', 'DOING', 'IS', 'TO', 'USE', 'ITS', 'LOGICAL', 'TENABILITY', 'AS', 'A', 'HELP', 'IN', 'THE', 'ANALYSIS', 'OF', 'WHAT', 'OCCURS', 'WHEN', 'WE', 'REMEMBER'] +8230-279154-0006-623: ref=['THE', 'BEHAVIOURIST', 'WHO', 'ATTEMPTS', 'TO', 'MAKE', 'PSYCHOLOGY', 'A', 'RECORD', 'OF', 'BEHAVIOUR', 'HAS', 'TO', 'TRUST', 'HIS', 'MEMORY', 'IN', 'MAKING', 'THE', 'RECORD'] +8230-279154-0006-623: hyp=['THE', 'BEHAVIOURIST', 'WHO', 'ATTEMPTS', 'TO', 'MAKE', 'PSYCHOLOGY', 'A', 'RECORD', 'OF', 'BEHAVIOR', 'HAS', 'TO', 'TRUST', 'HIS', 'MEMORY', 'IN', 'MAKING', 'THE', 'RECORD'] +8230-279154-0007-624: ref=['HABIT', 'IS', 'A', 'CONCEPT', 'INVOLVING', 'THE', 'OCCURRENCE', 'OF', 'SIMILAR', 'EVENTS', 'AT', 'DIFFERENT', 'TIMES', 'IF', 'THE', 'BEHAVIOURIST', 'FEELS', 'CONFIDENT', 'THAT', 'THERE', 'IS', 'SUCH', 'A', 'PHENOMENON', 'AS', 'HABIT', 'THAT', 'CAN', 'ONLY', 'BE', 'BECAUSE', 'HE', 'TRUSTS', 'HIS', 'MEMORY', 'WHEN', 'IT', 'ASSURES', 'HIM', 'THAT', 'THERE', 'HAVE', 'BEEN', 'OTHER', 'TIMES'] +8230-279154-0007-624: hyp=['HABIT', 'IS', 'A', 'CONCEPT', 'INVOLVING', 'THE', 'OCCURRENCE', 'OF', 'SIMILAR', 'EVENTS', 'AT', 'DIFFERENT', 'TIMES', 'IF', 'THE', 'BEHAVIORIST', 'FILLS', 'CONFIDENT', 'THAT', 'THERE', 'IS', 'SUCH', 'A', 'PHENOMENON', 'AS', 'HABIT', 'THAT', 'CAN', 'ONLY', 'BE', 'BECAUSE', 'HE', 'TRUSTS', 'HIS', 'MEMORY', 'WHEN', 'IT', 'ASSURES', 'HIM', 'THAT', 'THERE', 'HAVE', 'BEEN', 'OTHER', 'TIMES'] +8230-279154-0008-625: ref=['BUT', 'I', 'DO', 'NOT', 'THINK', 'SUCH', 'AN', 'INFERENCE', 'IS', 'WARRANTED'] +8230-279154-0008-625: hyp=['BUT', 'I', 'DO', 'NOT', 'THINK', 'SUCH', 'AN', 'EFFERENCE', 'IS', 'WARRANTED'] +8230-279154-0009-626: ref=['OUR', 'CONFIDENCE', 'OR', 'LACK', 'OF', 'CONFIDENCE', 'IN', 'THE', 'ACCURACY', 'OF', 'A', 'MEMORY', 'IMAGE', 'MUST', 'IN', 'FUNDAMENTAL', 'CASES', 'BE', 'BASED', 'UPON', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'IMAGE', 'ITSELF', 'SINCE', 'WE', 'CANNOT', 'EVOKE', 'THE', 'PAST', 'BODILY', 'AND', 'COMPARE', 'IT', 'WITH', 'THE', 'PRESENT', 'IMAGE'] +8230-279154-0009-626: hyp=['OUR', 'CONFIDENCE', 'OR', 'LACK', 'OF', 'CONFIDENCE', 'IN', 'THE', 'ACCURACY', 'OF', 'A', 'MEMORY', 'IMAGE', 'MUST', 'IN', 'FUNDAMENTAL', 'CASES', 'BE', 'BASED', 'UPON', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'IMAGE', 'ITSELF', 'SINCE', 'WE', 'CANNOT', 'EVOKE', 'THE', 'PAST', 'BODILY', 'AND', 'COMPARE', 'IT', 'WITH', 'THE', 'PRESENT', 'IMAGE'] +8230-279154-0010-627: ref=['WE', 'SOMETIMES', 'HAVE', 'IMAGES', 'THAT', 'ARE', 'BY', 'NO', 'MEANS', 'PECULIARLY', 'VAGUE', 'WHICH', 'YET', 'WE', 'DO', 'NOT', 'TRUST', 'FOR', 'EXAMPLE', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'FATIGUE', 'WE', 'MAY', 'SEE', 'A', "FRIEND'S", 'FACE', 'VIVIDLY', 'AND', 'CLEARLY', 'BUT', 'HORRIBLY', 'DISTORTED'] +8230-279154-0010-627: hyp=['WE', 'SOMETIMES', 'HAVE', 'IMAGES', 'THAT', 'ARE', 'BY', 'NO', 'MEANS', 'PECULIARLY', 'VAGUE', 'WHICH', 'YET', 'WE', 'DO', 'NOT', 'TRUST', 'FOR', 'EXAMPLE', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'FATIGUE', 'WE', 'MAY', 'SEE', 'A', "FRIEND'S", 'FACE', 'VIVIDLY', 'AND', 'CLEARLY', 'BUT', 'HORRIBLY', 'DISTORTED'] +8230-279154-0011-628: ref=['SOME', 'IMAGES', 'LIKE', 'SOME', 'SENSATIONS', 'FEEL', 'VERY', 'FAMILIAR', 'WHILE', 'OTHERS', 'FEEL', 'STRANGE'] +8230-279154-0011-628: hyp=['SOME', 'IMAGES', 'LIKE', 'SOME', 'SENSATIONS', 'FEEL', 'VERY', 'FAMILIAR', 'WHILE', 'OTHERS', 'FEEL', 'STRANGE'] +8230-279154-0012-629: ref=['FAMILIARITY', 'IS', 'A', 'FEELING', 'CAPABLE', 'OF', 'DEGREES'] +8230-279154-0012-629: hyp=['FAMILIARITY', 'IS', 'A', 'FILLING', 'CAPABLE', 'OF', 'DEGREES'] +8230-279154-0013-630: ref=['IN', 'AN', 'IMAGE', 'OF', 'A', 'WELL', 'KNOWN', 'FACE', 'FOR', 'EXAMPLE', 'SOME', 'PARTS', 'MAY', 'FEEL', 'MORE', 'FAMILIAR', 'THAN', 'OTHERS', 'WHEN', 'THIS', 'HAPPENS', 'WE', 'HAVE', 'MORE', 'BELIEF', 'IN', 'THE', 'ACCURACY', 'OF', 'THE', 'FAMILIAR', 'PARTS', 'THAN', 'IN', 'THAT', 'OF', 'THE', 'UNFAMILIAR', 'PARTS'] +8230-279154-0013-630: hyp=['IN', 'AN', 'IMAGE', 'OF', 'A', 'WELL', 'KNOWN', 'FACE', 'FOR', 'EXAMPLE', 'SOME', 'PARTS', 'MAY', 'FEEL', 'MORE', 'FAMILIAR', 'THAN', 'OTHERS', 'WHEN', 'THIS', 'HAPPENS', 'WE', 'HAVE', 'MORE', 'BELIEF', 'IN', 'THE', 'ACCURACY', 'OF', 'THE', 'FAMILIAR', 'PARTS', 'THAN', 'IN', 'THAT', 'OF', 'THE', 'UNFAMILIAR', 'PARTS'] +8230-279154-0014-631: ref=['I', 'COME', 'NOW', 'TO', 'THE', 'OTHER', 'CHARACTERISTIC', 'WHICH', 'MEMORY', 'IMAGES', 'MUST', 'HAVE', 'IN', 'ORDER', 'TO', 'ACCOUNT', 'FOR', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0014-631: hyp=['I', 'COME', 'NOW', 'TO', 'THE', 'OTHER', 'CHARACTERISTIC', 'WHICH', 'MEMORY', 'IMAGES', 'MUST', 'HAVE', 'IN', 'ORDER', 'TO', 'ACCOUNT', 'FOR', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0015-632: ref=['THEY', 'MUST', 'HAVE', 'SOME', 'CHARACTERISTIC', 'WHICH', 'MAKES', 'US', 'REGARD', 'THEM', 'AS', 'REFERRING', 'TO', 'MORE', 'OR', 'LESS', 'REMOTE', 'PORTIONS', 'OF', 'THE', 'PAST'] +8230-279154-0015-632: hyp=['THEY', 'MUST', 'HAVE', 'SOME', 'CHARACTERISTIC', 'WHICH', 'MAKES', 'US', 'REGARD', 'THEM', 'AS', 'REFERRING', 'TO', 'MORE', 'OR', 'LESS', 'REMOTE', 'PORTIONS', 'OF', 'THE', 'PAST'] +8230-279154-0016-633: ref=['IN', 'ACTUAL', 'FACT', 'THERE', 'ARE', 'DOUBTLESS', 'VARIOUS', 'FACTORS', 'THAT', 'CONCUR', 'IN', 'GIVING', 'US', 'THE', 'FEELING', 'OF', 'GREATER', 'OR', 'LESS', 'REMOTENESS', 'IN', 'SOME', 'REMEMBERED', 'EVENT'] +8230-279154-0016-633: hyp=['IN', 'ACTUAL', 'FACT', 'THERE', 'ARE', 'DOUBTLESS', 'VARIOUS', 'FACTORS', 'THAT', 'CONCUR', 'IN', 'GIVING', 'US', 'THE', 'FEELING', 'OF', 'GREATER', 'OR', 'LESS', 'REMOTENESS', 'IN', 'SOME', 'REMEMBERED', 'EVENT'] +8230-279154-0017-634: ref=['THERE', 'MAY', 'BE', 'A', 'SPECIFIC', 'FEELING', 'WHICH', 'COULD', 'BE', 'CALLED', 'THE', 'FEELING', 'OF', 'PASTNESS', 'ESPECIALLY', 'WHERE', 'IMMEDIATE', 'MEMORY', 'IS', 'CONCERNED'] +8230-279154-0017-634: hyp=['THERE', 'MAY', 'BE', 'A', 'SPECIFIC', 'FEELING', 'WHICH', 'COULD', 'BE', 'CALLED', 'THE', 'FEELING', 'OF', 'PASTNESS', 'ESPECIALLY', 'WHERE', 'IMMEDIATE', 'MEMORY', 'IS', 'CONCERNED'] +8230-279154-0018-635: ref=['THERE', 'IS', 'OF', 'COURSE', 'A', 'DIFFERENCE', 'BETWEEN', 'KNOWING', 'THE', 'TEMPORAL', 'RELATION', 'OF', 'A', 'REMEMBERED', 'EVENT', 'TO', 'THE', 'PRESENT', 'AND', 'KNOWING', 'THE', 'TIME', 'ORDER', 'OF', 'TWO', 'REMEMBERED', 'EVENTS'] +8230-279154-0018-635: hyp=['THERE', 'IS', 'OF', 'COURSE', 'A', 'DIFFERENCE', 'BETWEEN', 'KNOWING', 'THE', 'TEMPORAL', 'RELATION', 'OF', 'A', 'REMEMBERED', 'EVENT', 'TO', 'THE', 'PRESENT', 'AND', 'KNOWING', 'THE', 'TIME', 'ORDER', 'OF', 'TWO', 'REMEMBERED', 'EVENTS'] +8230-279154-0019-636: ref=['IT', 'WOULD', 'SEEM', 'THAT', 'ONLY', 'RATHER', 'RECENT', 'EVENTS', 'CAN', 'BE', 'PLACED', 'AT', 'ALL', 'ACCURATELY', 'BY', 'MEANS', 'OF', 'FEELINGS', 'GIVING', 'THEIR', 'TEMPORAL', 'RELATION', 'TO', 'THE', 'PRESENT', 'BUT', 'IT', 'IS', 'CLEAR', 'THAT', 'SUCH', 'FEELINGS', 'MUST', 'PLAY', 'AN', 'ESSENTIAL', 'PART', 'IN', 'THE', 'PROCESS', 'OF', 'DATING', 'REMEMBERED', 'EVENTS'] +8230-279154-0019-636: hyp=['IT', 'WOULD', 'SEEM', 'THAT', 'ONLY', 'RATHER', 'RECENT', 'EVENTS', 'CAN', 'BE', 'PLACED', 'AT', 'ALL', 'ACCURATELY', 'BY', 'MEANS', 'OF', 'FEELINGS', 'GIVING', 'THEIR', 'TEMPORAL', 'RELATION', 'TO', 'THE', 'PRESENT', 'BUT', 'IT', 'IS', 'CLEAR', 'THAT', 'SUCH', 'FEELINGS', 'MUST', 'PLAY', 'AN', 'ESSENTIAL', 'PART', 'IN', 'THE', 'PROCESS', 'OF', 'DATING', 'REMEMBERED', 'EVENTS'] +8230-279154-0020-637: ref=['IF', 'WE', 'HAD', 'RETAINED', 'THE', 'SUBJECT', 'OR', 'ACT', 'IN', 'KNOWLEDGE', 'THE', 'WHOLE', 'PROBLEM', 'OF', 'MEMORY', 'WOULD', 'HAVE', 'BEEN', 'COMPARATIVELY', 'SIMPLE'] +8230-279154-0020-637: hyp=['IF', 'WE', 'HAD', 'RETAINED', 'THE', 'SUBJECT', 'OR', 'ACT', 'IN', 'KNOWLEDGE', 'THE', 'WHOLE', 'PROBLEM', 'OF', 'MEMORY', 'WOULD', 'HAVE', 'BEEN', 'COMPARATIVELY', 'SIMPLE'] +8230-279154-0021-638: ref=['REMEMBERING', 'HAS', 'TO', 'BE', 'A', 'PRESENT', 'OCCURRENCE', 'IN', 'SOME', 'WAY', 'RESEMBLING', 'OR', 'RELATED', 'TO', 'WHAT', 'IS', 'REMEMBERED'] +8230-279154-0021-638: hyp=['REMEMBERING', 'HAS', 'TO', 'BE', 'A', 'PRESENT', 'OCCURRENCE', 'IN', 'SOME', 'WAY', 'RESEMBLING', 'OR', 'RELATED', 'TO', 'WHAT', 'IS', 'REMEMBERED'] +8230-279154-0022-639: ref=['SOME', 'POINTS', 'MAY', 'BE', 'TAKEN', 'AS', 'FIXED', 'AND', 'SUCH', 'AS', 'ANY', 'THEORY', 'OF', 'MEMORY', 'MUST', 'ARRIVE', 'AT'] +8230-279154-0022-639: hyp=['SOME', 'POINTS', 'MAY', 'BE', 'TAKEN', 'AS', 'FIXED', 'AND', 'SUCH', 'AS', 'ANY', 'THEORY', 'OF', 'MEMORY', 'MUST', 'ARRIVE', 'AT'] +8230-279154-0023-640: ref=['IN', 'THIS', 'CASE', 'AS', 'IN', 'MOST', 'OTHERS', 'WHAT', 'MAY', 'BE', 'TAKEN', 'AS', 'CERTAIN', 'IN', 'ADVANCE', 'IS', 'RATHER', 'VAGUE'] +8230-279154-0023-640: hyp=['IN', 'THIS', 'CASE', 'AS', 'IN', 'MOST', 'OTHERS', 'WHAT', 'MAY', 'BE', 'TAKEN', 'AS', 'CERTAIN', 'IN', 'ADVANCE', 'IS', 'RATHER', 'VAGUE'] +8230-279154-0024-641: ref=['THE', 'FIRST', 'OF', 'OUR', 'VAGUE', 'BUT', 'INDUBITABLE', 'DATA', 'IS', 'THAT', 'THERE', 'IS', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0024-641: hyp=['THE', 'FIRST', 'OF', 'OUR', 'VAGUE', 'BUT', 'INDUBITABLE', 'DATA', 'IS', 'THAT', 'THERE', 'IS', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0025-642: ref=['WE', 'MIGHT', 'PROVISIONALLY', 'THOUGH', 'PERHAPS', 'NOT', 'QUITE', 'CORRECTLY', 'DEFINE', 'MEMORY', 'AS', 'THAT', 'WAY', 'OF', 'KNOWING', 'ABOUT', 'THE', 'PAST', 'WHICH', 'HAS', 'NO', 'ANALOGUE', 'IN', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'FUTURE', 'SUCH', 'A', 'DEFINITION', 'WOULD', 'AT', 'LEAST', 'SERVE', 'TO', 'MARK', 'THE', 'PROBLEM', 'WITH', 'WHICH', 'WE', 'ARE', 'CONCERNED', 'THOUGH', 'SOME', 'EXPECTATIONS', 'MAY', 'DESERVE', 'TO', 'RANK', 'WITH', 'MEMORY', 'AS', 'REGARDS', 'IMMEDIACY'] +8230-279154-0025-642: hyp=['WE', 'MIGHT', 'PROVISIONALLY', 'THOUGH', 'PERHAPS', 'NOT', 'QUITE', 'CORRECTLY', 'DEFINE', 'MEMORY', 'AS', 'THAT', 'WAY', 'OF', 'KNOWING', 'ABOUT', 'THE', 'PAST', 'WHICH', 'HAS', 'NO', 'ANALOGUE', 'IN', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'FUTURE', 'SUCH', 'A', 'DEFINITION', 'WOULD', 'AT', 'LEAST', 'SERVE', 'TO', 'MARK', 'THE', 'PROBLEM', 'WITH', 'WHICH', 'WE', 'ARE', 'CONCERNED', 'THOUGH', 'SOME', 'EXPECTATIONS', 'MAY', 'DESERVE', 'TO', 'RANK', 'WITH', 'MEMORY', 'AS', 'REGARDS', 'IMMEDIACY'] +8230-279154-0026-643: ref=['THIS', 'DISTINCTION', 'IS', 'VITAL', 'TO', 'THE', 'UNDERSTANDING', 'OF', 'MEMORY', 'BUT', 'IT', 'IS', 'NOT', 'SO', 'EASY', 'TO', 'CARRY', 'OUT', 'IN', 'PRACTICE', 'AS', 'IT', 'IS', 'TO', 'DRAW', 'IN', 'THEORY'] +8230-279154-0026-643: hyp=['THIS', 'DISTINCTION', 'IS', 'VITAL', 'TO', 'THE', 'UNDERSTANDING', 'OF', 'MEMORY', 'BUT', 'IT', 'IS', 'NOT', 'SO', 'EASY', 'TO', 'CARRY', 'OUT', 'IN', 'PRACTICE', 'AS', 'IT', 'IS', 'TO', 'DRAW', 'IN', 'THEORY'] +8230-279154-0027-644: ref=['A', 'GRAMOPHONE', 'BY', 'THE', 'HELP', 'OF', 'SUITABLE', 'RECORDS', 'MIGHT', 'RELATE', 'TO', 'US', 'THE', 'INCIDENTS', 'OF', 'ITS', 'PAST', 'AND', 'PEOPLE', 'ARE', 'NOT', 'SO', 'DIFFERENT', 'FROM', 'GRAMOPHONES', 'AS', 'THEY', 'LIKE', 'TO', 'BELIEVE'] +8230-279154-0027-644: hyp=['A', 'GRAMMON', 'BY', 'THE', 'HELP', 'OF', 'SUITABLE', 'RECORDS', 'MIGHT', 'RELATE', 'TO', 'US', 'THE', 'INCIDENTS', 'OF', 'ITS', 'PAST', 'AND', 'PEOPLE', 'ARE', 'NOT', 'SO', 'DIFFERENT', 'FROM', 'GRAMOPHONES', 'AS', 'THEY', 'LIKE', 'TO', 'BELIEVE'] +8230-279154-0028-645: ref=['I', 'CAN', 'SET', 'TO', 'WORK', 'NOW', 'TO', 'REMEMBER', 'THINGS', 'I', 'NEVER', 'REMEMBERED', 'BEFORE', 'SUCH', 'AS', 'WHAT', 'I', 'HAD', 'TO', 'EAT', 'FOR', 'BREAKFAST', 'THIS', 'MORNING', 'AND', 'IT', 'CAN', 'HARDLY', 'BE', 'WHOLLY', 'HABIT', 'THAT', 'ENABLES', 'ME', 'TO', 'DO', 'THIS'] +8230-279154-0028-645: hyp=['I', 'CAN', 'SET', 'TO', 'WORK', 'NOW', 'TO', 'REMEMBER', 'THINGS', 'I', 'NEVER', 'REMEMBERED', 'BEFORE', 'SUCH', 'AS', 'WHAT', 'I', 'HAD', 'TO', 'EAT', 'FOR', 'BREAKFAST', 'THIS', 'MORNING', 'AND', 'IT', 'CAN', 'HARDLY', 'BE', 'WHOLLY', 'HABIT', 'THAT', 'ENABLES', 'ME', 'TO', 'DO', 'THIS'] +8230-279154-0029-646: ref=['THE', 'FACT', 'THAT', 'A', 'MAN', 'CAN', 'RECITE', 'A', 'POEM', 'DOES', 'NOT', 'SHOW', 'THAT', 'HE', 'REMEMBERS', 'ANY', 'PREVIOUS', 'OCCASION', 'ON', 'WHICH', 'HE', 'HAS', 'RECITED', 'OR', 'READ', 'IT'] +8230-279154-0029-646: hyp=['THE', 'FACT', 'THAT', 'A', 'MAN', 'CAN', 'RECITE', 'A', 'POEM', 'DOES', 'NOT', 'SHOW', 'THAT', 'HE', 'REMEMBERS', 'ANY', 'PREVIOUS', 'OCCASION', 'ON', 'WHICH', 'HE', 'HAS', 'RECITED', 'OR', 'READ', 'IT'] +8230-279154-0030-647: ref=["SEMON'S", 'TWO', 'BOOKS', 'MENTIONED', 'IN', 'AN', 'EARLIER', 'LECTURE', 'DO', 'NOT', 'TOUCH', 'KNOWLEDGE', 'MEMORY', 'AT', 'ALL', 'CLOSELY'] +8230-279154-0030-647: hyp=['SIMMONS', 'TWO', 'BOOKS', 'MENTIONED', 'IN', 'AN', 'EARLIER', 'LECTURE', 'DO', 'NOT', 'TOUCH', 'KNOWLEDGE', 'MEMORY', 'AT', 'ALL', 'CLOSELY'] +8230-279154-0031-648: ref=['THEY', 'GIVE', 'LAWS', 'ACCORDING', 'TO', 'WHICH', 'IMAGES', 'OF', 'PAST', 'OCCURRENCES', 'COME', 'INTO', 'OUR', 'MINDS', 'BUT', 'DO', 'NOT', 'DISCUSS', 'OUR', 'BELIEF', 'THAT', 'THESE', 'IMAGES', 'REFER', 'TO', 'PAST', 'OCCURRENCES', 'WHICH', 'IS', 'WHAT', 'CONSTITUTES', 'KNOWLEDGE', 'MEMORY'] +8230-279154-0031-648: hyp=['THEY', 'GIVE', 'LAWS', 'ACCORDING', 'TO', 'WHICH', 'IMAGES', 'OF', 'PAST', 'OCCURRENCES', 'COME', 'INTO', 'OUR', 'MINDS', 'BUT', 'DO', 'NOT', 'DISCUSS', 'OUR', 'BELIEF', 'THAT', 'THESE', 'IMAGES', 'REFER', 'TO', 'PAST', 'OCCURRENCES', 'WHICH', 'IS', 'WHAT', 'CONSTITUTES', 'KNOWLEDGE', 'MEMORY'] +8230-279154-0032-649: ref=['IT', 'IS', 'THIS', 'THAT', 'IS', 'OF', 'INTEREST', 'TO', 'THEORY', 'OF', 'KNOWLEDGE'] +8230-279154-0032-649: hyp=['IT', 'IS', 'THIS', 'THAT', 'IS', 'OF', 'INTEREST', 'TO', 'THEORY', 'OF', 'KNOWLEDGE'] +8230-279154-0033-650: ref=['IT', 'IS', 'BY', 'NO', 'MEANS', 'ALWAYS', 'RELIABLE', 'ALMOST', 'EVERYBODY', 'HAS', 'AT', 'SOME', 'TIME', 'EXPERIENCED', 'THE', 'WELL', 'KNOWN', 'ILLUSION', 'THAT', 'ALL', 'THAT', 'IS', 'HAPPENING', 'NOW', 'HAPPENED', 'BEFORE', 'AT', 'SOME', 'TIME'] +8230-279154-0033-650: hyp=['IT', 'IS', 'BY', 'NO', 'MEANS', 'ALWAYS', 'RELIABLE', 'ALMOST', 'EVERYBODY', 'HAS', 'AT', 'SOME', 'TIME', 'EXPERIENCED', 'THE', 'WELL', 'KNOWN', 'ILLUSION', 'THAT', 'ALL', 'THAT', 'IS', 'HAPPENING', 'NOW', 'HAPPENED', 'BEFORE', 'AT', 'SOME', 'TIME'] +8230-279154-0034-651: ref=['WHENEVER', 'THE', 'SENSE', 'OF', 'FAMILIARITY', 'OCCURS', 'WITHOUT', 'A', 'DEFINITE', 'OBJECT', 'IT', 'LEADS', 'US', 'TO', 'SEARCH', 'THE', 'ENVIRONMENT', 'UNTIL', 'WE', 'ARE', 'SATISFIED', 'THAT', 'WE', 'HAVE', 'FOUND', 'THE', 'APPROPRIATE', 'OBJECT', 'WHICH', 'LEADS', 'US', 'TO', 'THE', 'JUDGMENT', 'THIS', 'IS', 'FAMILIAR'] +8230-279154-0034-651: hyp=['WHENEVER', 'THE', 'SENSE', 'OF', 'FAMILIARITY', 'OCCURS', 'WITHOUT', 'A', 'DEFINITE', 'OBJECT', 'IT', 'LEAVES', 'US', 'TO', 'SEARCH', 'THE', 'ENVIRONMENT', 'UNTIL', 'WE', 'ARE', 'SATISFIED', 'THAT', 'WE', 'HAVE', 'FOUND', 'THE', 'APPROPRIATE', 'OBJECT', 'WHICH', 'LEADS', 'US', 'TO', 'THE', 'JUDGMENT', 'THIS', 'IS', 'FAMILIAR'] +8230-279154-0035-652: ref=['THUS', 'NO', 'KNOWLEDGE', 'AS', 'TO', 'THE', 'PAST', 'IS', 'TO', 'BE', 'DERIVED', 'FROM', 'THE', 'FEELING', 'OF', 'FAMILIARITY', 'ALONE'] +8230-279154-0035-652: hyp=['THUS', 'NO', 'KNOWLEDGE', 'AS', 'TO', 'THE', 'PAST', 'IS', 'TO', 'BE', 'DERIVED', 'FROM', 'THE', 'FEELING', 'OF', 'FAMILIARITY', 'ALONE'] +8230-279154-0036-653: ref=['A', 'FURTHER', 'STAGE', 'IS', 'RECOGNITION'] +8230-279154-0036-653: hyp=['A', 'FURTHER', 'STAGE', 'IS', 'RECOGNITION'] +8230-279154-0037-654: ref=['RECOGNITION', 'IN', 'THIS', 'SENSE', 'DOES', 'NOT', 'NECESSARILY', 'INVOLVE', 'MORE', 'THAN', 'A', 'HABIT', 'OF', 'ASSOCIATION', 'THE', 'KIND', 'OF', 'OBJECT', 'WE', 'ARE', 'SEEING', 'AT', 'THE', 'MOMENT', 'IS', 'ASSOCIATED', 'WITH', 'THE', 'WORD', 'CAT', 'OR', 'WITH', 'AN', 'AUDITORY', 'IMAGE', 'OF', 'PURRING', 'OR', 'WHATEVER', 'OTHER', 'CHARACTERISTIC', 'WE', 'MAY', 'HAPPEN', 'TO', 'RECOGNIZE', 'IN', 'THE', 'CAT', 'OF', 'THE', 'MOMENT'] +8230-279154-0037-654: hyp=['RECOGNITION', 'IN', 'THIS', 'SENSE', 'DOES', 'NOT', 'NECESSARILY', 'INVOLVE', 'MORE', 'THAN', 'A', 'HABIT', 'OF', 'ASSOCIATION', 'THE', 'KIND', 'OF', 'OBJECT', 'WE', 'ARE', 'SEEING', 'AT', 'THE', 'MOMENT', 'IS', 'ASSOCIATED', 'WITH', 'THE', 'WORD', 'CAT', 'OR', 'WITH', 'AN', 'AUDITORY', 'IMAGE', 'OF', 'PURRING', 'OR', 'WHATEVER', 'OTHER', 'CHARACTERISTIC', 'WE', 'MAY', 'HAPPEN', 'TO', 'RECOGNIZE', 'IN', 'THE', 'CAT', 'OF', 'THE', 'MOMENT'] +8230-279154-0038-655: ref=['WE', 'ARE', 'OF', 'COURSE', 'IN', 'FACT', 'ABLE', 'TO', 'JUDGE', 'WHEN', 'WE', 'RECOGNIZE', 'AN', 'OBJECT', 'THAT', 'WE', 'HAVE', 'SEEN', 'IT', 'BEFORE', 'BUT', 'THIS', 'JUDGMENT', 'IS', 'SOMETHING', 'OVER', 'AND', 'ABOVE', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'AND', 'MAY', 'VERY', 'PROBABLY', 'BE', 'IMPOSSIBLE', 'TO', 'ANIMALS', 'THAT', 'NEVERTHELESS', 'HAVE', 'THE', 'EXPERIENCE', 'OF', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'OF', 'THE', 'WORD'] +8230-279154-0038-655: hyp=['WE', 'ARE', 'OF', 'COURSE', 'IN', 'FACT', 'ABLE', 'TO', 'JUDGE', 'WHEN', 'WE', 'RECOGNIZE', 'AN', 'OBJECT', 'THAT', 'WE', 'HAVE', 'SEEN', 'IT', 'BEFORE', 'BUT', 'THIS', 'JUDGMENT', 'IS', 'SOMETHING', 'OVER', 'AND', 'ABOVE', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'AND', 'MAY', 'VERY', 'PROBABLY', 'BE', 'IMPOSSIBLE', 'TO', 'ANIMALS', 'THAT', 'NEVERTHELESS', 'HAVE', 'THE', 'EXPERIENCE', 'OF', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'OF', 'THE', 'WORD'] +8230-279154-0039-656: ref=['THIS', 'KNOWLEDGE', 'IS', 'MEMORY', 'IN', 'ONE', 'SENSE', 'THOUGH', 'IN', 'ANOTHER', 'IT', 'IS', 'NOT'] +8230-279154-0039-656: hyp=['THIS', 'KNOWLEDGE', 'IS', 'MEMORY', 'IN', 'ONE', 'SENSE', 'THOUGH', 'IN', 'ANOTHER', 'IT', 'IS', 'NOT'] +8230-279154-0040-657: ref=['THERE', 'ARE', 'HOWEVER', 'SEVERAL', 'POINTS', 'IN', 'WHICH', 'SUCH', 'AN', 'ACCOUNT', 'OF', 'RECOGNITION', 'IS', 'INADEQUATE', 'TO', 'BEGIN', 'WITH', 'IT', 'MIGHT', 'SEEM', 'AT', 'FIRST', 'SIGHT', 'MORE', 'CORRECT', 'TO', 'DEFINE', 'RECOGNITION', 'AS', 'I', 'HAVE', 'SEEN', 'THIS', 'BEFORE', 'THAN', 'AS', 'THIS', 'HAS', 'EXISTED', 'BEFORE'] +8230-279154-0040-657: hyp=['THERE', 'ARE', 'HOWEVER', 'SEVERAL', 'POINTS', 'IN', 'WHICH', 'SUCH', 'AN', 'ACCOUNT', 'OF', 'RECOGNITION', 'IS', 'INADEQUATE', 'TO', 'BEGIN', 'WITH', 'IT', 'MIGHT', 'SEEM', 'AT', 'FIRST', 'SIGHT', 'MORE', 'CORRECT', 'TO', 'DEFINE', 'RECOGNITION', 'AS', 'I', 'HAVE', 'SEEN', 'THIS', 'BEFORE', 'THAN', 'AS', 'THIS', 'HAS', 'EXISTED', 'BEFORE'] +8230-279154-0041-658: ref=['THE', 'DEFINITION', 'OF', 'MY', 'EXPERIENCE', 'IS', 'DIFFICULT', 'BROADLY', 'SPEAKING', 'IT', 'IS', 'EVERYTHING', 'THAT', 'IS', 'CONNECTED', 'WITH', 'WHAT', 'I', 'AM', 'EXPERIENCING', 'NOW', 'BY', 'CERTAIN', 'LINKS', 'OF', 'WHICH', 'THE', 'VARIOUS', 'FORMS', 'OF', 'MEMORY', 'ARE', 'AMONG', 'THE', 'MOST', 'IMPORTANT'] +8230-279154-0041-658: hyp=['THE', 'DEFINITION', 'OF', 'MY', 'EXPERIENCE', 'IS', 'DIFFICULT', 'BROADLY', 'SPEAKING', 'IT', 'IS', 'EVERYTHING', 'THAT', 'IS', 'CONNECTED', 'WITH', 'WHAT', 'I', 'AM', 'EXPERIENCING', 'NOW', 'BY', 'CERTAIN', 'LINKS', 'OF', 'WHICH', 'THE', 'VARIOUS', 'FORMS', 'OF', 'MEMORY', 'ARE', 'AMONG', 'THE', 'MOST', 'IMPORTANT'] +8230-279154-0042-659: ref=['THUS', 'IF', 'I', 'RECOGNIZE', 'A', 'THING', 'THE', 'OCCASION', 'OF', 'ITS', 'PREVIOUS', 'EXISTENCE', 'IN', 'VIRTUE', 'OF', 'WHICH', 'I', 'RECOGNIZE', 'IT', 'FORMS', 'PART', 'OF', 'MY', 'EXPERIENCE', 'BY', 'DEFINITION', 'RECOGNITION', 'WILL', 'BE', 'ONE', 'OF', 'THE', 'MARKS', 'BY', 'WHICH', 'MY', 'EXPERIENCE', 'IS', 'SINGLED', 'OUT', 'FROM', 'THE', 'REST', 'OF', 'THE', 'WORLD'] +8230-279154-0042-659: hyp=['THUS', 'IF', 'I', 'RECOGNIZE', 'A', 'THING', 'THE', 'OCCASION', 'OF', 'ITS', 'PREVIOUS', 'EXISTENCE', 'IN', 'VIRTUE', 'OF', 'WHICH', 'I', 'RECOGNIZE', 'IT', 'FORMS', 'PART', 'OF', 'MY', 'EXPERIENCE', 'BY', 'DEFINITION', 'RECOGNITION', 'WILL', 'BE', 'ONE', 'OF', 'THE', 'MARKS', 'BY', 'WHICH', 'MY', 'EXPERIENCE', 'IS', 'SINGLED', 'OUT', 'FROM', 'THE', 'REST', 'OF', 'THE', 'WORLD'] +8230-279154-0043-660: ref=['OF', 'COURSE', 'THE', 'WORDS', 'THIS', 'HAS', 'EXISTED', 'BEFORE', 'ARE', 'A', 'VERY', 'INADEQUATE', 'TRANSLATION', 'OF', 'WHAT', 'ACTUALLY', 'HAPPENS', 'WHEN', 'WE', 'FORM', 'A', 'JUDGMENT', 'OF', 'RECOGNITION', 'BUT', 'THAT', 'IS', 'UNAVOIDABLE', 'WORDS', 'ARE', 'FRAMED', 'TO', 'EXPRESS', 'A', 'LEVEL', 'OF', 'THOUGHT', 'WHICH', 'IS', 'BY', 'NO', 'MEANS', 'PRIMITIVE', 'AND', 'ARE', 'QUITE', 'INCAPABLE', 'OF', 'EXPRESSING', 'SUCH', 'AN', 'ELEMENTARY', 'OCCURRENCE', 'AS', 'RECOGNITION'] +8230-279154-0043-660: hyp=['OF', 'COURSE', 'THE', 'WORDS', 'THIS', 'HAS', 'EXISTED', 'BEFORE', 'ARE', 'A', 'VERY', 'INADEQUATE', 'TRANSLATION', 'OF', 'WHAT', 'ACTUALLY', 'HAPPENS', 'WHEN', 'WE', 'FORM', 'A', 'JUDGMENT', 'OF', 'RECOGNITION', 'BUT', 'THAT', 'IS', 'UNAVOIDABLE', 'WORDS', 'ARE', 'FRAMED', 'TO', 'EXPRESS', 'A', 'LEVEL', 'OF', 'THOUGHT', 'WHICH', 'IS', 'BY', 'NO', 'MEANS', 'PRIMITIVE', 'AND', 'ARE', 'QUITE', 'INCAPABLE', 'OF', 'EXPRESSING', 'SUCH', 'AN', 'ELEMENTARY', 'OCCURRENCE', 'AS', 'RECOGNITION'] +8455-210777-0000-972: ref=['I', 'REMAINED', 'THERE', 'ALONE', 'FOR', 'MANY', 'HOURS', 'BUT', 'I', 'MUST', 'ACKNOWLEDGE', 'THAT', 'BEFORE', 'I', 'LEFT', 'THE', 'CHAMBERS', 'I', 'HAD', 'GRADUALLY', 'BROUGHT', 'MYSELF', 'TO', 'LOOK', 'AT', 'THE', 'MATTER', 'IN', 'ANOTHER', 'LIGHT'] +8455-210777-0000-972: hyp=['I', 'REMAINED', 'THERE', 'ALONE', 'FOR', 'MANY', 'HOURS', 'BUT', 'I', 'MUST', 'ACKNOWLEDGE', 'THAT', 'BEFORE', 'I', 'LEFT', 'THE', 'CHAMBERS', 'I', 'HAD', 'GRADUALLY', 'BROUGHT', 'MYSELF', 'TO', 'LOOK', 'AT', 'THE', 'MATTER', 'IN', 'ANOTHER', 'LIGHT'] +8455-210777-0001-973: ref=['HAD', 'EVA', 'CRASWELLER', 'NOT', 'BEEN', 'GOOD', 'LOOKING', 'HAD', 'JACK', 'BEEN', 'STILL', 'AT', 'COLLEGE', 'HAD', 'SIR', 'KENNINGTON', 'OVAL', 'REMAINED', 'IN', 'ENGLAND', 'HAD', 'MISTER', 'BUNNIT', 'AND', 'THE', 'BAR', 'KEEPER', 'NOT', 'SUCCEEDED', 'IN', 'STOPPING', 'MY', 'CARRIAGE', 'ON', 'THE', 'HILL', 'SHOULD', 'I', 'HAVE', 'SUCCEEDED', 'IN', 'ARRANGING', 'FOR', 'THE', 'FINAL', 'DEPARTURE', 'OF', 'MY', 'OLD', 'FRIEND'] +8455-210777-0001-973: hyp=['HAD', 'EVER', 'CRUSSWELLER', 'NOT', 'BEEN', 'GOOD', 'LOOKING', 'HAD', 'JACK', 'BEEN', 'STILL', 'AT', 'COLLEGE', 'HAD', 'SIR', 'KENNINGTON', 'OVAL', 'REMAINED', 'IN', 'ENGLAND', 'HAD', 'MISTER', 'BUNNOT', 'AND', 'THE', 'BAR', 'KEEPER', 'NOT', 'SUCCEEDED', 'IN', 'STOPPING', 'MY', 'CARRIAGE', 'ON', 'THE', 'HILL', 'SHOULD', 'I', 'HAVE', 'SUCCEEDED', 'IN', 'A', 'RADIAN', 'FOR', 'THE', 'FINAL', 'DEPARTURE', 'OF', 'MY', 'OLD', 'FRIEND'] +8455-210777-0002-974: ref=['ON', 'ARRIVING', 'AT', 'HOME', 'AT', 'MY', 'OWN', 'RESIDENCE', 'I', 'FOUND', 'THAT', 'OUR', 'SALON', 'WAS', 'FILLED', 'WITH', 'A', 'BRILLIANT', 'COMPANY'] +8455-210777-0002-974: hyp=['ON', 'ARRIVING', 'AT', 'HOME', 'AT', 'MY', 'OWN', 'RESIDENCE', 'I', 'FOUND', 'THAT', 'OUR', 'SALON', 'WAS', 'FILLED', 'WITH', 'A', 'BRILLIANT', 'COMPANY'] +8455-210777-0003-975: ref=['AS', 'I', 'SPOKE', 'I', 'MADE', 'HIM', 'A', 'GRACIOUS', 'BOW', 'AND', 'I', 'THINK', 'I', 'SHOWED', 'HIM', 'BY', 'MY', 'MODE', 'OF', 'ADDRESS', 'THAT', 'I', 'DID', 'NOT', 'BEAR', 'ANY', 'GRUDGE', 'AS', 'TO', 'MY', 'INDIVIDUAL', 'SELF'] +8455-210777-0003-975: hyp=['AS', 'I', 'SPOKE', 'I', 'MADE', 'HIM', 'A', 'GRACIOUS', 'BOW', 'AND', 'I', 'THINK', 'I', 'SHOWED', 'HIM', 'BY', 'MY', 'MODE', 'OF', 'ADDRESS', 'THAT', 'I', 'DID', 'NOT', 'BEAR', 'ANY', 'GRUDGE', 'AS', 'TO', 'MY', 'INDIVIDUAL', 'SELF'] +8455-210777-0004-976: ref=['I', 'HAVE', 'COME', 'TO', 'YOUR', 'SHORES', 'MISTER', 'PRESIDENT', 'WITH', 'THE', 'PURPOSE', 'OF', 'SEEING', 'HOW', 'THINGS', 'ARE', 'PROGRESSING', 'IN', 'THIS', 'DISTANT', 'QUARTER', 'OF', 'THE', 'WORLD'] +8455-210777-0004-976: hyp=['I', 'HAVE', 'COME', 'TO', 'YOUR', 'SHORES', 'MISTER', 'PRESIDENT', 'WITH', 'THE', 'PURPOSE', 'OF', 'SEEING', 'HOW', 'THINGS', 'ARE', 'PROGRESSING', 'IN', 'THIS', 'DISTANT', 'QUARTER', 'OF', 'THE', 'WORLD'] +8455-210777-0005-977: ref=['WE', 'HAVE', 'OUR', 'LITTLE', 'STRUGGLES', 'HERE', 'AS', 'ELSEWHERE', 'AND', 'ALL', 'THINGS', 'CANNOT', 'BE', 'DONE', 'BY', 'ROSE', 'WATER'] +8455-210777-0005-977: hyp=['WE', 'HAVE', 'OUR', 'LITTLE', 'STRUGGLES', 'HERE', 'AS', 'ELSEWHERE', 'AND', 'ALL', 'THINGS', 'CANNOT', 'BE', 'DONE', 'BY', 'ROSE', 'WATER'] +8455-210777-0006-978: ref=['WE', 'ARE', 'QUITE', 'SATISFIED', 'NOW', 'CAPTAIN', 'BATTLEAX', 'SAID', 'MY', 'WIFE'] +8455-210777-0006-978: hyp=['WE', 'ARE', 'QUITE', 'SATISFIED', 'NOW', 'CAPTAIN', 'BATTLE', 'AXE', 'SAID', 'MY', 'WIFE'] +8455-210777-0007-979: ref=['QUITE', 'SATISFIED', 'SAID', 'EVA'] +8455-210777-0007-979: hyp=['QUITE', 'SATISFIED', 'SAID', 'EVA'] +8455-210777-0008-980: ref=['THE', 'LADIES', 'IN', 'COMPLIANCE', 'WITH', 'THAT', 'SOFTNESS', 'OF', 'HEART', 'WHICH', 'IS', 'THEIR', 'CHARACTERISTIC', 'ARE', 'ON', 'ONE', 'SIDE', 'AND', 'THE', 'MEN', 'BY', 'WHOM', 'THE', 'WORLD', 'HAS', 'TO', 'BE', 'MANAGED', 'ARE', 'ON', 'THE', 'OTHER'] +8455-210777-0008-980: hyp=['THE', 'LADIES', 'IN', 'COMPLIANCE', 'WITH', 'THAT', 'SOFTNESS', 'OF', 'HEART', 'WHICH', 'IS', 'THEIR', 'CHARACTERISTIC', 'ARE', 'ON', 'ONE', 'SIDE', 'AND', 'THE', 'MEN', 'BY', 'WHOM', 'THE', 'WORLD', 'HAS', 'TO', 'BE', 'MANAGED', 'OR', 'ON', 'THE', 'OTHER'] +8455-210777-0009-981: ref=['NO', 'DOUBT', 'IN', 'PROCESS', 'OF', 'TIME', 'THE', 'LADIES', 'WILL', 'FOLLOW'] +8455-210777-0009-981: hyp=['NO', 'DOUBT', 'IN', 'PROCESS', 'OF', 'TIME', 'THE', 'LADIES', 'WILL', 'FOLLOW'] +8455-210777-0010-982: ref=['THEIR', 'MASTERS', 'SAID', 'MISSUS', 'NEVERBEND'] +8455-210777-0010-982: hyp=['THEIR', 'MASTER', 'SAID', 'MISSUS', 'NEVERBEND'] +8455-210777-0011-983: ref=['I', 'DID', 'NOT', 'MEAN', 'SAID', 'CAPTAIN', 'BATTLEAX', 'TO', 'TOUCH', 'UPON', 'PUBLIC', 'SUBJECTS', 'AT', 'SUCH', 'A', 'MOMENT', 'AS', 'THIS'] +8455-210777-0011-983: hyp=['I', 'DID', 'NOT', 'MEAN', 'SAID', 'CAPTAIN', 'BATTLEX', 'TO', 'TOUCH', 'UPON', 'PUBLIC', 'SUBJECTS', 'AT', 'SUCH', 'A', 'MOMENT', 'AS', 'THIS'] +8455-210777-0012-984: ref=['MISSUS', 'NEVERBEND', 'YOU', 'MUST', 'INDEED', 'BE', 'PROUD', 'OF', 'YOUR', 'SON'] +8455-210777-0012-984: hyp=['MISSUS', 'NEVERBEND', 'YOU', 'MUST', 'INDEED', 'BE', 'PROUD', 'OF', 'YOUR', 'SON'] +8455-210777-0013-985: ref=['JACK', 'HAD', 'BEEN', 'STANDING', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'ROOM', 'TALKING', 'TO', 'EVA', 'AND', 'WAS', 'NOW', 'REDUCED', 'TO', 'SILENCE', 'BY', 'HIS', 'PRAISES'] +8455-210777-0013-985: hyp=['JACK', 'HAD', 'BEEN', 'STANDING', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'ROOM', 'TALKING', 'TO', 'EVA', 'AND', 'WAS', 'NOW', 'REDUCED', 'TO', 'SILENCE', 'BY', 'HIS', 'PRAISES'] +8455-210777-0014-986: ref=['SIR', 'KENNINGTON', 'OVAL', 'IS', 'A', 'VERY', 'FINE', 'PLAYER', 'SAID', 'MY', 'WIFE'] +8455-210777-0014-986: hyp=['SIR', 'KENNINGTON', 'OVAL', 'IS', 'A', 'VERY', 'FINE', 'PLAYER', 'SAID', 'MY', 'WIFE'] +8455-210777-0015-987: ref=['I', 'AND', 'MY', 'WIFE', 'AND', 'SON', 'AND', 'THE', 'TWO', 'CRASWELLERS', 'AND', 'THREE', 'OR', 'FOUR', 'OTHERS', 'AGREED', 'TO', 'DINE', 'ON', 'BOARD', 'THE', 'SHIP', 'ON', 'THE', 'NEXT'] +8455-210777-0015-987: hyp=['I', 'AM', 'MY', 'WIFE', 'AND', 'SON', 'AND', 'THE', 'TWO', 'CRESTWELLERS', 'AND', 'THREE', 'OR', 'FOUR', 'OTHERS', 'AGREED', 'TO', 'DINE', 'ON', 'BOARD', 'THE', 'SHIP', 'ON', 'THE', 'NEXT'] +8455-210777-0016-988: ref=['THIS', 'I', 'FELT', 'WAS', 'PAID', 'TO', 'ME', 'AS', 'BEING', 'PRESIDENT', 'OF', 'THE', 'REPUBLIC', 'AND', 'I', 'ENDEAVOURED', 'TO', 'BEHAVE', 'MYSELF', 'WITH', 'SUCH', 'MINGLED', 'HUMILITY', 'AND', 'DIGNITY', 'AS', 'MIGHT', 'BEFIT', 'THE', 'OCCASION', 'BUT', 'I', 'COULD', 'NOT', 'BUT', 'FEEL', 'THAT', 'SOMETHING', 'WAS', 'WANTING', 'TO', 'THE', 'SIMPLICITY', 'OF', 'MY', 'ORDINARY', 'LIFE'] +8455-210777-0016-988: hyp=['THIS', 'I', 'FELT', 'WAS', 'PAID', 'TO', 'ME', 'AS', 'BEING', 'PRESIDENT', 'OF', 'THE', 'REPUBLIC', 'AND', 'I', 'ENDEAVOURED', 'TO', 'BEHAVE', 'MYSELF', 'WITH', 'SUCH', 'MINGLED', 'HUMILITY', 'AND', 'DIGNITY', 'AS', 'MIGHT', 'BE', 'FIT', 'THE', 'OCCASION', 'BUT', 'I', 'COULD', 'NOT', 'BUT', 'FEEL', 'THAT', 'SOMETHING', 'WAS', 'WANTING', 'TO', 'THE', 'SIMPLICITY', 'OF', 'MY', 'ORDINARY', 'LIFE'] +8455-210777-0017-989: ref=['MY', 'WIFE', 'ON', 'THE', 'SPUR', 'OF', 'THE', 'MOMENT', 'MANAGED', 'TO', 'GIVE', 'THE', 'GENTLEMEN', 'A', 'VERY', 'GOOD', 'DINNER'] +8455-210777-0017-989: hyp=['MY', 'WIFE', 'ON', 'THE', 'SPUR', 'OF', 'THE', 'MOMENT', 'MANAGED', 'TO', 'GIVE', 'THE', 'GENTLEMAN', 'A', 'VERY', 'GOOD', 'DINNER'] +8455-210777-0018-990: ref=['THIS', 'SHE', 'SAID', 'WAS', 'TRUE', 'HOSPITALITY', 'AND', 'I', 'AM', 'NOT', 'SURE', 'THAT', 'I', 'DID', 'NOT', 'AGREE', 'WITH', 'HER'] +8455-210777-0018-990: hyp=['THIS', 'SHE', 'SAID', 'WAS', 'TRUE', 'HOSPITALITY', 'AND', 'I', 'AM', 'NOT', 'SURE', 'THAT', 'I', 'DID', 'NOT', 'AGREE', 'WITH', 'THERE'] +8455-210777-0019-991: ref=['THEN', 'THERE', 'WERE', 'THREE', 'OR', 'FOUR', 'LEADING', 'MEN', 'OF', 'THE', 'COMMUNITY', 'WITH', 'THEIR', 'WIVES', 'WHO', 'WERE', 'FOR', 'THE', 'MOST', 'PART', 'THE', 'FATHERS', 'AND', 'MOTHERS', 'OF', 'THE', 'YOUNG', 'LADIES'] +8455-210777-0019-991: hyp=['THEN', 'THERE', 'WERE', 'THREE', 'OR', 'FOUR', 'LEADING', 'MEN', 'OF', 'THE', 'COMMUNITY', 'WITH', 'THEIR', 'WIVES', 'WHO', 'WERE', 'FOR', 'THE', 'MOST', 'PART', 'THE', 'FATHERS', 'AND', 'MOTHERS', 'OF', 'THE', 'YOUNG', 'LADIES'] +8455-210777-0020-992: ref=['OH', 'YES', 'SAID', 'JACK', 'AND', "I'M", 'NOWHERE'] +8455-210777-0020-992: hyp=['OH', 'YES', 'SAID', 'JACK', 'AND', "I'M", 'NOWHERE'] +8455-210777-0021-993: ref=['BUT', 'I', 'MEAN', 'TO', 'HAVE', 'MY', 'INNINGS', 'BEFORE', 'LONG'] +8455-210777-0021-993: hyp=['BUT', 'I', 'MEAN', 'TO', 'HAVE', 'MY', 'INNINGS', 'BEFORE', 'LONG'] +8455-210777-0022-994: ref=['OF', 'WHAT', 'MISSUS', 'NEVERBEND', 'HAD', 'GONE', 'THROUGH', 'IN', 'PROVIDING', 'BIRDS', 'BEASTS', 'AND', 'FISHES', 'NOT', 'TO', 'TALK', 'OF', 'TARTS', 'AND', 'JELLIES', 'FOR', 'THE', 'DINNER', 'OF', 'THAT', 'DAY', 'NO', 'ONE', 'BUT', 'MYSELF', 'CAN', 'HAVE', 'ANY', 'IDEA', 'BUT', 'IT', 'MUST', 'BE', 'ADMITTED', 'THAT', 'SHE', 'ACCOMPLISHED', 'HER', 'TASK', 'WITH', 'THOROUGH', 'SUCCESS'] +8455-210777-0022-994: hyp=['OF', 'WHAT', 'MISSUS', 'NEVERBEND', 'HAD', 'GONE', 'THROUGH', 'IN', 'PROVIDING', 'BIRDS', 'BEASTS', 'AND', 'FISHES', 'NOT', 'TO', 'TALK', 'OF', 'TARTS', 'AND', 'JELLIES', 'FOR', 'THE', 'DINNER', 'OF', 'THAT', 'DAY', 'NO', 'ONE', 'BUT', 'MYSELF', 'CAN', 'HAVE', 'ANY', 'IDEA', 'BUT', 'IT', 'MUST', 'BE', 'ADMITTED', 'THAT', 'SHE', 'ACCOMPLISHED', 'HER', 'TASK', 'WITH', 'THOROUGH', 'SUCCESS'] +8455-210777-0023-995: ref=['WE', 'SAT', 'WITH', 'THE', 'OFFICERS', 'SOME', 'LITTLE', 'TIME', 'AFTER', 'DINNER', 'AND', 'THEN', 'WENT', 'ASHORE'] +8455-210777-0023-995: hyp=['WE', 'SAT', 'WITH', 'THE', 'OFFICER', 'SOME', 'LITTLE', 'TIME', 'AFTER', 'DINNER', 'AND', 'THEN', 'WENT', 'ASHORE'] +8455-210777-0024-996: ref=['HOW', 'MUCH', 'OF', 'EVIL', 'OF', 'REAL', 'ACCOMPLISHED', 'EVIL', 'HAD', 'THERE', 'NOT', 'OCCURRED', 'TO', 'ME', 'DURING', 'THE', 'LAST', 'FEW', 'DAYS'] +8455-210777-0024-996: hyp=['HOW', 'MUCH', 'OF', 'EVIL', 'OF', 'REAL', 'ACCOMPLISHED', 'EVIL', 'HAD', 'THERE', 'NOT', 'OCCURRED', 'TO', 'ME', 'DURING', 'THE', 'LAST', 'FEW', 'DAYS'] +8455-210777-0025-997: ref=['WHAT', 'COULD', 'I', 'DO', 'NOW', 'BUT', 'JUST', 'LAY', 'MYSELF', 'DOWN', 'AND', 'DIE'] +8455-210777-0025-997: hyp=['WHAT', 'COULD', 'I', 'DO', 'NOW', 'BUT', 'JUST', 'LAY', 'MYSELF', 'DOWN', 'AND', 'DIE'] +8455-210777-0026-998: ref=['AND', 'THE', 'DEATH', 'OF', 'WHICH', 'I', 'DREAMT', 'COULD', 'NOT', 'ALAS'] +8455-210777-0026-998: hyp=['AND', 'THE', 'DEATH', 'OF', 'WHICH', 'I', 'DREAMT', 'COULD', 'NOT', 'ALAS'] +8455-210777-0027-999: ref=['WHEN', 'THIS', 'CAPTAIN', 'SHOULD', 'HAVE', 'TAKEN', 'HIMSELF', 'AND', 'HIS', 'VESSEL', 'BACK', 'TO', 'ENGLAND', 'I', 'WOULD', 'RETIRE', 'TO', 'A', 'SMALL', 'FARM', 'WHICH', 'I', 'POSSESSED', 'AT', 'THE', 'FARTHEST', 'SIDE', 'OF', 'THE', 'ISLAND', 'AND', 'THERE', 'IN', 'SECLUSION', 'WOULD', 'I', 'END', 'MY', 'DAYS'] +8455-210777-0027-999: hyp=['WHEN', 'THIS', 'CAPTAIN', 'SHOULD', 'HAVE', 'TAKEN', 'HIMSELF', 'AND', 'HIS', 'VESSEL', 'BACK', 'TO', 'ENGLAND', 'I', 'WOULD', 'RETIRE', 'TO', 'A', 'SMALL', 'FARM', 'WHICH', 'I', 'POSSESSED', 'AT', 'THE', 'FURTHEST', 'SIDE', 'OF', 'THE', 'ISLAND', 'AND', 'THERE', 'IN', 'SECLUSION', 'WHAT', 'I', 'END', 'MY', 'DAYS'] +8455-210777-0028-1000: ref=['JACK', 'WOULD', 'BECOME', "EVA'S", 'HAPPY', 'HUSBAND', 'AND', 'WOULD', 'REMAIN', 'AMIDST', 'THE', 'HURRIED', 'DUTIES', 'OF', 'THE', 'EAGER', 'WORLD'] +8455-210777-0028-1000: hyp=['JACK', 'WOULD', 'BECOME', "EVA'S", 'HAPPY', 'HUSBAND', 'AND', 'WOULD', 'REMAIN', 'AMIDST', 'THE', 'HURRIED', 'DUTIES', 'OF', 'THE', 'EAGER', 'WORLD'] +8455-210777-0029-1001: ref=['THINKING', 'OF', 'ALL', 'THIS', 'I', 'WENT', 'TO', 'SLEEP'] +8455-210777-0029-1001: hyp=['THINKING', 'OF', 'ALL', 'THIS', 'I', 'WENT', 'TO', 'SLEEP'] +8455-210777-0030-1002: ref=['MISTER', 'NEVERBEND', 'BEGAN', 'THE', 'CAPTAIN', 'AND', 'I', 'OBSERVED', 'THAT', 'UP', 'TO', 'THAT', 'MOMENT', 'HE', 'HAD', 'GENERALLY', 'ADDRESSED', 'ME', 'AS', 'PRESIDENT', 'IT', 'CANNOT', 'BE', 'DENIED', 'THAT', 'WE', 'HAVE', 'COME', 'HERE', 'ON', 'AN', 'UNPLEASANT', 'MISSION'] +8455-210777-0030-1002: hyp=['MISTER', 'NEVERBEND', 'BEGAN', 'THE', 'CAPTAIN', 'AND', 'I', 'OBSERVE', 'THAT', 'UP', 'TO', 'THAT', 'MOMENT', 'HE', 'HAD', 'GENERALLY', 'ADDRESSED', 'ME', 'AS', 'PRESIDENT', 'IT', 'CANNOT', 'BE', 'DENIED', 'THAT', 'WE', 'HAVE', 'COME', 'HERE', 'ON', 'AN', 'UNPLEASANT', 'MISSION'] +8455-210777-0031-1003: ref=['YOU', 'HAVE', 'RECEIVED', 'US', 'WITH', 'ALL', 'THAT', 'COURTESY', 'AND', 'HOSPITALITY', 'FOR', 'WHICH', 'YOUR', 'CHARACTER', 'IN', 'ENGLAND', 'STANDS', 'SO', 'HIGH'] +8455-210777-0031-1003: hyp=['YOU', 'HAVE', 'RECEIVED', 'US', 'WITH', 'ALL', 'THAT', 'COURTESY', 'AND', 'HOSPITALITY', 'FOR', 'WHICH', 'YOUR', 'CHARACTER', 'IN', 'ENGLAND', 'STAND', 'SO', 'HIGH'] +8455-210777-0032-1004: ref=['IT', 'IS', 'A', 'DUTY', 'SAID', 'I'] +8455-210777-0032-1004: hyp=['IT', 'IS', 'A', 'DUTY', 'SAID', 'I'] +8455-210777-0033-1005: ref=['BUT', 'YOUR', 'POWER', 'IS', 'SO', 'SUPERIOR', 'TO', 'ANY', 'THAT', 'I', 'CAN', 'ADVANCE', 'AS', 'TO', 'MAKE', 'US', 'HERE', 'FEEL', 'THAT', 'THERE', 'IS', 'NO', 'DISGRACE', 'IN', 'YIELDING', 'TO', 'IT'] +8455-210777-0033-1005: hyp=['BUT', 'YOUR', 'POWER', 'IS', 'SO', 'SUPERIOR', 'TO', 'ANY', 'THAT', 'I', 'CAN', 'ADVANCE', 'AS', 'TO', 'MAKE', 'US', 'HERE', 'FEEL', 'THAT', 'THERE', 'IS', 'NO', 'DISGRACE', 'IN', 'YIELDING', 'TO', 'IT'] +8455-210777-0034-1006: ref=['NOT', 'A', 'DOUBT', 'BUT', 'HAD', 'YOUR', 'FORCE', 'BEEN', 'ONLY', 'DOUBLE', 'OR', 'TREBLE', 'OUR', 'OWN', 'I', 'SHOULD', 'HAVE', 'FOUND', 'IT', 'MY', 'DUTY', 'TO', 'STRUGGLE', 'WITH', 'YOU'] +8455-210777-0034-1006: hyp=['NOT', 'A', 'DOUBT', 'BUT', 'HAD', 'YOUR', 'FORCE', 'BEEN', 'ONLY', 'DOUBLE', 'OR', 'TROUBLE', 'OUR', 'OWN', 'I', 'SHOULD', 'HAVE', 'FOUND', 'IT', 'MY', 'DUTY', 'TO', 'STRUGGLE', 'WITH', 'YOU'] +8455-210777-0035-1007: ref=['THAT', 'IS', 'ALL', 'QUITE', 'TRUE', 'MISTER', 'NEVERBEND', 'SAID', 'SIR', 'FERDINANDO', 'BROWN'] +8455-210777-0035-1007: hyp=['THAT', 'IS', 'ALL', 'QUITE', 'TRUE', 'MISTER', 'NEVERBEND', 'SAID', 'SIR', 'FERDINANDO', 'BROWN'] +8455-210777-0036-1008: ref=['I', 'CAN', 'AFFORD', 'TO', 'SMILE', 'BECAUSE', 'I', 'AM', 'ABSOLUTELY', 'POWERLESS', 'BEFORE', 'YOU', 'BUT', 'I', 'DO', 'NOT', 'THE', 'LESS', 'FEEL', 'THAT', 'IN', 'A', 'MATTER', 'IN', 'WHICH', 'THE', 'PROGRESS', 'OF', 'THE', 'WORLD', 'IS', 'CONCERNED', 'I', 'OR', 'RATHER', 'WE', 'HAVE', 'BEEN', 'PUT', 'DOWN', 'BY', 'BRUTE', 'FORCE'] +8455-210777-0036-1008: hyp=['I', 'CAN', 'AFFORD', 'TO', 'SMILE', 'BECAUSE', 'I', 'AM', 'ABSOLUTELY', 'POWERLESS', 'BEFORE', 'YOU', 'BUT', 'I', 'DO', 'NOT', 'THE', 'LESS', 'FEEL', 'THAT', 'IN', 'A', 'MATTER', 'OF', 'WHICH', 'THE', 'PROGRESS', 'OF', 'THE', 'WORLD', 'IS', 'CONCERNED', 'I', 'OR', 'RATHER', 'WE', 'HAVE', 'BEEN', 'PUT', 'DOWN', 'BY', 'BRUTE', 'FORCE'] +8455-210777-0037-1009: ref=['YOU', 'HAVE', 'COME', 'TO', 'US', 'THREATENING', 'US', 'WITH', 'ABSOLUTE', 'DESTRUCTION'] +8455-210777-0037-1009: hyp=['YOU', 'HAVE', 'COME', 'TO', 'US', 'THREATENING', 'US', 'WITH', 'ABSOLUTE', 'DESTRUCTION'] +8455-210777-0038-1010: ref=['THEREFORE', 'I', 'FEEL', 'MYSELF', 'QUITE', 'ABLE', 'AS', 'PRESIDENT', 'OF', 'THIS', 'REPUBLIC', 'TO', 'RECEIVE', 'YOU', 'WITH', 'A', 'COURTESY', 'DUE', 'TO', 'THE', 'SERVANTS', 'OF', 'A', 'FRIENDLY', 'ALLY'] +8455-210777-0038-1010: hyp=['THEREFORE', 'I', 'FEEL', 'MYSELF', 'QUITE', 'ABLE', 'AS', 'PRESIDENT', 'OF', 'THIS', 'REPUBLIC', 'TO', 'RECEIVE', 'YOU', 'WITH', 'A', 'COURTESY', 'DUE', 'TO', 'THE', 'SERVANTS', 'OF', 'A', 'FRIENDLY', 'ALLY'] +8455-210777-0039-1011: ref=['I', 'CAN', 'ASSURE', 'YOU', 'HE', 'HAS', 'NOT', 'EVEN', 'ALLOWED', 'ME', 'TO', 'SEE', 'THE', 'TRIGGER', 'SINCE', 'I', 'HAVE', 'BEEN', 'ON', 'BOARD'] +8455-210777-0039-1011: hyp=['I', 'CAN', 'ASSURE', 'YOU', 'HE', 'HAS', 'NOT', 'EVEN', 'ALLOWED', 'ME', 'TO', 'SEE', 'THE', 'TRIGGER', 'SINCE', 'I', 'HAVE', 'BEEN', 'ON', 'BOARD'] +8455-210777-0040-1012: ref=['THEN', 'SAID', 'SIR', 'FERDINANDO', 'THERE', 'IS', 'NOTHING', 'FOR', 'IT', 'BUT', 'THAT', 'HE', 'MUST', 'TAKE', 'YOU', 'WITH', 'HIM'] +8455-210777-0040-1012: hyp=['THEN', 'SAID', 'SIR', 'FERDINANDO', 'THERE', 'IS', 'NOTHING', 'FOR', 'IT', 'BUT', 'THAT', 'WE', 'MUST', 'TAKE', 'YOU', 'WITH', 'HIM'] +8455-210777-0041-1013: ref=['THERE', 'CAME', 'UPON', 'ME', 'A', 'SUDDEN', 'SHOCK', 'WHEN', 'I', 'HEARD', 'THESE', 'WORDS', 'WHICH', 'EXCEEDED', 'ANYTHING', 'WHICH', 'I', 'HAD', 'YET', 'FELT'] +8455-210777-0041-1013: hyp=['THERE', 'CAME', 'UPON', 'ME', 'A', 'SUDDEN', 'SHOCK', 'WHEN', 'I', 'HEARD', 'THESE', 'WORDS', 'WHICH', 'EXCEEDED', 'ANYTHING', 'WHICH', 'I', 'HAD', 'YET', 'FELT'] +8455-210777-0042-1014: ref=['YOU', 'HEAR', 'WHAT', 'SIR', 'FERDINANDO', 'BROWN', 'HAS', 'SAID', 'REPLIED', 'CAPTAIN', 'BATTLEAX'] +8455-210777-0042-1014: hyp=['YOU', 'HEAR', 'WHAT', 'SIR', 'FERDINANDO', 'BROWN', 'HAS', 'SAID', 'REPLIED', 'CAPTAIN', 'BATTLEX'] +8455-210777-0043-1015: ref=['BUT', 'WHAT', 'IS', 'THE', 'DELICATE', 'MISSION', 'I', 'ASKED'] +8455-210777-0043-1015: hyp=['BUT', 'WHAT', 'IS', 'THE', 'DELICATE', 'MISSION', 'I', 'ASKED'] +8455-210777-0044-1016: ref=['I', 'WAS', 'TO', 'BE', 'TAKEN', 'AWAY', 'AND', 'CARRIED', 'TO', 'ENGLAND', 'OR', 'ELSEWHERE', 'OR', 'DROWNED', 'UPON', 'THE', 'VOYAGE', 'IT', 'MATTERED', 'NOT', 'WHICH'] +8455-210777-0044-1016: hyp=['I', 'WAS', 'TO', 'BE', 'TAKEN', 'AWAY', 'AND', 'CARRIED', 'TO', 'ENGLAND', 'OR', 'ELSEWHERE', 'OR', 'DROWNED', 'UPON', 'THE', 'VOYAGE', 'IT', 'MATTERED', 'NOT', 'WHICH'] +8455-210777-0045-1017: ref=['THEN', 'THE', 'REPUBLIC', 'OF', 'BRITANNULA', 'WAS', 'TO', 'BE', 'DECLARED', 'AS', 'NON', 'EXISTENT', 'AND', 'THE', 'BRITISH', 'FLAG', 'WAS', 'TO', 'BE', 'EXALTED', 'AND', 'A', 'BRITISH', 'GOVERNOR', 'INSTALLED', 'IN', 'THE', 'EXECUTIVE', 'CHAMBERS'] +8455-210777-0045-1017: hyp=['THEN', 'THE', 'REPUBLIC', 'OF', 'BRITAIN', 'YULA', 'WAS', 'TO', 'BE', 'DECLARED', 'AS', 'NON', 'EXISTENT', 'AND', 'THE', 'BRITISH', 'FLAG', 'WAS', 'TO', 'BE', 'EXALTED', 'AND', 'A', 'BRITISH', 'GOVERNOR', 'INSTALLED', 'IN', 'THE', 'EXECUTIVE', 'CHAMBERS'] +8455-210777-0046-1018: ref=['YOU', 'MAY', 'BE', 'QUITE', 'SURE', "IT'S", 'THERE', 'SAID', 'CAPTAIN', 'BATTLEAX', 'AND', 'THAT', 'I', 'CAN', 'SO', 'USE', 'IT', 'AS', 'TO', 'HALF', 'OBLITERATE', 'YOUR', 'TOWN', 'WITHIN', 'TWO', 'MINUTES', 'OF', 'MY', 'RETURN', 'ON', 'BOARD'] +8455-210777-0046-1018: hyp=['YOU', 'MAY', 'BE', 'QUITE', 'SURE', 'TO', 'THERE', 'SAID', 'CAPTAIN', 'BATTLE', 'AXE', 'AND', 'THAT', 'I', 'CAN', 'SO', 'USE', 'IT', 'AS', 'TO', 'HALF', 'OBLITERATE', 'YOUR', 'TOWN', 'WITHIN', 'TWO', 'MINUTES', 'OF', 'MY', 'RETURN', 'ON', 'BOARD'] +8455-210777-0047-1019: ref=['YOU', 'PROPOSE', 'TO', 'KIDNAP', 'ME', 'I', 'SAID'] +8455-210777-0047-1019: hyp=['YOU', 'PROPOSE', 'TO', 'KIDNAP', 'ME', 'I', 'SAID'] +8455-210777-0048-1020: ref=['WHAT', 'WOULD', 'BECOME', 'OF', 'YOUR', 'GUN', 'WERE', 'I', 'TO', 'KIDNAP', 'YOU'] +8455-210777-0048-1020: hyp=['WHAT', 'WILL', 'BECOME', 'OF', 'YOUR', 'GUN', 'WERE', 'I', 'TO', 'KIDNAP', 'YOU'] +8455-210777-0049-1021: ref=['LIEUTENANT', 'CROSSTREES', 'IS', 'A', 'VERY', 'GALLANT', 'OFFICER'] +8455-210777-0049-1021: hyp=['LIEUTENANT', 'CROSS', 'TREES', 'IS', 'A', 'VERY', 'GALLANT', 'OFFICER'] +8455-210777-0050-1022: ref=['ONE', 'OF', 'US', 'ALWAYS', 'REMAINS', 'ON', 'BOARD', 'WHILE', 'THE', 'OTHER', 'IS', 'ON', 'SHORE'] +8455-210777-0050-1022: hyp=['ONE', 'OF', 'US', 'ALWAYS', 'REMAINS', 'ON', 'BOARD', 'WHILE', 'THE', 'OTHER', 'IS', 'ON', 'SHORE'] +8455-210777-0051-1023: ref=['WHAT', 'WORLD', 'WIDE', 'INIQUITY', 'SUCH', 'A', 'SPEECH', 'AS', 'THAT', 'DISCLOSES', 'SAID', 'I', 'STILL', 'TURNING', 'MYSELF', 'TO', 'THE', 'CAPTAIN', 'FOR', 'THOUGH', 'I', 'WOULD', 'HAVE', 'CRUSHED', 'THEM', 'BOTH', 'BY', 'MY', 'WORDS', 'HAD', 'IT', 'BEEN', 'POSSIBLE', 'MY', 'DISLIKE', 'CENTRED', 'ITSELF', 'ON', 'SIR', 'FERDINANDO'] +8455-210777-0051-1023: hyp=['WHAT', 'WORLD', 'WIDE', 'INIQUITY', 'SUCH', 'A', 'SPEECH', 'AS', 'THAT', 'DISCLOSES', 'SAID', 'I', 'STILL', 'TURNING', 'MYSELF', 'TO', 'THE', 'CAPTAIN', 'FOR', 'THOUGH', 'I', 'WOULD', 'HAVE', 'CRUSHED', 'THEM', 'BOTH', 'BY', 'MY', 'WORDS', 'HAD', 'IT', 'BEEN', 'POSSIBLE', 'MY', 'DISLIKE', 'SENATE', 'ITSELF', 'ON', 'SIR', 'FERDINANDO'] +8455-210777-0052-1024: ref=['YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'SUGGEST', 'SAID', 'HE', 'THAT', 'THAT', 'IS', 'A', 'MATTER', 'OF', 'OPINION'] +8455-210777-0052-1024: hyp=['YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'SUGGEST', 'SAID', 'HE', 'THAT', 'THAT', 'IS', 'A', 'MATTER', 'OF', 'OPINION'] +8455-210777-0053-1025: ref=['WERE', 'I', 'TO', 'COMPLY', 'WITH', 'YOUR', 'ORDERS', 'WITHOUT', 'EXPRESSING', 'MY', 'OWN', 'OPINION', 'I', 'SHOULD', 'SEEM', 'TO', 'HAVE', 'DONE', 'SO', 'WILLINGLY', 'HEREAFTER'] +8455-210777-0053-1025: hyp=['WERE', 'I', 'TO', 'COMPLY', 'WITH', 'YOUR', 'ORDERS', 'WITHOUT', 'EXPRESSING', 'MY', 'OWN', 'OPINION', 'I', 'SHOULD', 'SEEM', 'TO', 'HAVE', 'DONE', 'SO', 'WILLINGLY', 'HEREAFTER'] +8455-210777-0054-1026: ref=['THE', 'LETTER', 'RAN', 'AS', 'FOLLOWS'] +8455-210777-0054-1026: hyp=['THE', 'LETTER', 'RAN', 'AS', 'FOLLOWS'] +8455-210777-0055-1027: ref=['SIR', 'I', 'HAVE', 'IT', 'IN', 'COMMAND', 'TO', 'INFORM', 'YOUR', 'EXCELLENCY', 'THAT', 'YOU', 'HAVE', 'BEEN', 'APPOINTED', 'GOVERNOR', 'OF', 'THE', 'CROWN', 'COLONY', 'WHICH', 'IS', 'CALLED', 'BRITANNULA'] +8455-210777-0055-1027: hyp=['SIR', 'I', 'HAVE', 'IT', 'IN', 'COMMAND', 'TO', 'INFORM', 'YOUR', 'EXCELLENCY', 'THAT', 'YOU', 'HAVE', 'BEEN', 'APPOINTED', 'GOVERNOR', 'OF', 'THE', 'CROWN', 'COLONY', 'WHICH', 'IS', 'CALLED', 'BRITAIN', 'ULLA'] +8455-210777-0056-1028: ref=['THE', 'PECULIAR', 'CIRCUMSTANCES', 'OF', 'THE', 'COLONY', 'ARE', 'WITHIN', 'YOUR', "EXCELLENCY'S", 'KNOWLEDGE'] +8455-210777-0056-1028: hyp=['THE', 'PECULIAR', 'CIRCUMSTANCES', 'OF', 'THE', 'COLONY', 'ARE', 'WITHIN', 'YOUR', "EXCELLENCY'S", 'KNOWLEDGE'] +8455-210777-0057-1029: ref=['BUT', 'IN', 'THEIR', 'SELECTION', 'OF', 'A', 'CONSTITUTION', 'THE', 'BRITANNULISTS', 'HAVE', 'UNFORTUNATELY', 'ALLOWED', 'THEMSELVES', 'BUT', 'ONE', 'DELIBERATIVE', 'ASSEMBLY', 'AND', 'HENCE', 'HAVE', 'SPRUNG', 'THEIR', 'PRESENT', 'DIFFICULTIES'] +8455-210777-0057-1029: hyp=['BUT', 'IN', 'THEIR', 'SELECTION', 'OF', 'A', 'CONSTITUTION', 'THE', 'BRITAIN', 'UILESTS', 'HAVE', 'UNFORTUNATELY', 'ALLOWED', 'THEMSELVES', 'BUT', 'ONE', 'DELIBERATE', 'ASSEMBLY', 'AND', 'HENCE', 'HAS', 'SPRUNG', 'THEIR', 'PRESENT', 'DIFFICULTIES'] +8455-210777-0058-1030: ref=['IT', 'IS', 'FOUNDED', 'ON', 'THE', 'ACKNOWLEDGED', 'WEAKNESS', 'OF', 'THOSE', 'WHO', 'SURVIVE', 'THAT', 'PERIOD', 'OF', 'LIFE', 'AT', 'WHICH', 'MEN', 'CEASE', 'TO', 'WORK'] +8455-210777-0058-1030: hyp=['IT', 'IS', 'FOUNDED', 'ON', 'THE', 'ACKNOWLEDGED', 'WEAKNESS', 'OF', 'THOSE', 'WHO', 'SURVIVE', 'THAT', 'PERIOD', 'OF', 'LIFE', 'AT', 'WHICH', 'MEN', 'CEASE', 'TO', 'WORK'] +8455-210777-0059-1031: ref=['BUT', 'IT', 'IS', 'SURMISED', 'THAT', 'YOU', 'WILL', 'FIND', 'DIFFICULTIES', 'IN', 'THE', 'WAY', 'OF', 'YOUR', 'ENTERING', 'AT', 'ONCE', 'UPON', 'YOUR', 'GOVERNMENT'] +8455-210777-0059-1031: hyp=['BUT', 'IT', 'IS', 'SURMISED', 'THAT', 'YOU', 'WILL', 'FIND', 'DIFFICULTIES', 'IN', 'THE', 'WAY', 'OF', 'YOUR', 'ENTERING', 'AT', 'ONCE', 'UPON', 'YOUR', 'GOVERNOR'] +8455-210777-0060-1032: ref=['THE', 'JOHN', 'BRIGHT', 'IS', 'ARMED', 'WITH', 'A', 'WEAPON', 'OF', 'GREAT', 'POWER', 'AGAINST', 'WHICH', 'IT', 'IS', 'IMPOSSIBLE', 'THAT', 'THE', 'PEOPLE', 'OF', 'BRITANNULA', 'SHOULD', 'PREVAIL'] +8455-210777-0060-1032: hyp=['THE', 'JOHN', 'BRIGHT', 'HIS', 'ARM', 'WITH', 'A', 'WEAPON', 'OF', 'GREAT', 'POWER', 'AGAINST', 'WHICH', 'IT', 'IS', 'IMPOSSIBLE', 'THAT', 'THE', 'PEOPLE', 'OF', 'BRITAIN', 'EULO', 'SHOULD', 'PREVAIL'] +8455-210777-0061-1033: ref=['YOU', 'WILL', 'CARRY', 'OUT', 'WITH', 'YOU', 'ONE', 'HUNDRED', 'MEN', 'OF', 'THE', 'NORTH', 'NORTH', 'WEST', 'BIRMINGHAM', 'REGIMENT', 'WHICH', 'WILL', 'PROBABLY', 'SUFFICE', 'FOR', 'YOUR', 'OWN', 'SECURITY', 'AS', 'IT', 'IS', 'THOUGHT', 'THAT', 'IF', 'MISTER', 'NEVERBEND', 'BE', 'WITHDRAWN', 'THE', 'PEOPLE', 'WILL', 'REVERT', 'EASILY', 'TO', 'THEIR', 'OLD', 'HABITS', 'OF', 'OBEDIENCE'] +8455-210777-0061-1033: hyp=['YOU', 'WILL', 'CARRY', 'OUT', 'WITH', 'YOU', 'ONE', 'HUNDRED', 'MEN', 'OF', 'THE', 'NORTH', 'NORTH', 'WEST', 'BIRMINGHAM', 'REGIMENT', 'WHICH', 'WILL', 'PROBABLY', 'SUFFICE', 'FOR', 'YOUR', 'OWN', 'SECURITY', 'AS', 'IT', 'IS', 'THOUGHT', 'THAT', 'IF', 'MISTER', 'NEVERBEND', 'BE', 'WITHDRAWN', 'THE', 'PEOPLE', 'WILL', 'REVERT', 'EASILY', 'TO', 'THEIR', 'OLD', 'HABITS', 'OF', 'OBEDIENCE'] +8455-210777-0062-1034: ref=['WHEN', 'DO', 'YOU', 'INTEND', 'THAT', 'THE', 'JOHN', 'BRIGHT', 'SHALL', 'START'] +8455-210777-0062-1034: hyp=['WHEN', 'DO', 'YOU', 'INTEND', 'THAT', 'THAT', 'JOHN', 'BRIGHT', 'SHALL', 'START'] +8455-210777-0063-1035: ref=['TO', 'DAY', 'I', 'SHOUTED'] +8455-210777-0063-1035: hyp=['TO', 'DAY', 'I', 'SHOUTED'] +8455-210777-0064-1036: ref=['AND', 'I', 'HAVE', 'NO', 'ONE', 'READY', 'TO', 'WHOM', 'I', 'CAN', 'GIVE', 'UP', 'THE', 'ARCHIVES', 'OF', 'THE', 'GOVERNMENT'] +8455-210777-0064-1036: hyp=['AND', 'I', 'HAVE', 'NO', 'ONE', 'READY', 'TO', 'WHOM', 'I', 'CAN', 'GIVE', 'UP', 'THE', 'ARCHIVE', 'THE', 'GOVERNMENT'] +8455-210777-0065-1037: ref=['I', 'SHALL', 'BE', 'HAPPY', 'TO', 'TAKE', 'CHARGE', 'OF', 'THEM', 'SAID', 'SIR', 'FERDINANDO'] +8455-210777-0065-1037: hyp=['I', 'SHALL', 'BE', 'HAPPY', 'TO', 'TAKE', 'CHARGE', 'OF', 'THEM', 'SAID', 'SIR', 'FERDINANDO'] +8455-210777-0066-1038: ref=['THEY', 'OF', 'COURSE', 'MUST', 'ALL', 'BE', 'ALTERED'] +8455-210777-0066-1038: hyp=['THEY', 'OF', 'COURSE', 'MUST', 'ALL', 'BE', 'ALTERED'] +8455-210777-0067-1039: ref=['OR', 'OF', 'THE', 'HABITS', 'OF', 'OUR', 'PEOPLE', 'IT', 'IS', 'QUITE', 'IMPOSSIBLE'] +8455-210777-0067-1039: hyp=['OR', 'OF', 'THE', 'HABITS', 'OF', 'OUR', 'PEOPLE', 'IT', 'IS', 'QUITE', 'IMPOSSIBLE'] +8455-210777-0068-1040: ref=['YOUR', 'POWER', 'IS', 'SUFFICIENT', 'I', 'SAID'] +8455-210777-0068-1040: hyp=['YOUR', 'POWER', 'IS', 'SUFFICIENT', 'I', 'SAID'] +8455-210777-0069-1041: ref=['IF', 'YOU', 'WILL', 'GIVE', 'US', 'YOUR', 'PROMISE', 'TO', 'MEET', 'CAPTAIN', 'BATTLEAX', 'HERE', 'AT', 'THIS', 'TIME', 'TO', 'MORROW', 'WE', 'WILL', 'STRETCH', 'A', 'POINT', 'AND', 'DELAY', 'THE', 'DEPARTURE', 'OF', 'THE', 'JOHN', 'BRIGHT', 'FOR', 'TWENTY', 'FOUR', 'HOURS'] +8455-210777-0069-1041: hyp=['IF', 'YOU', 'WILL', 'GIVE', 'US', 'YOUR', 'PROMISE', 'TO', 'MEET', 'CAPTAIN', 'ADELAX', 'HERE', 'AT', 'THIS', 'TIME', 'TO', 'MORROW', 'WE', 'WILL', 'STRETCH', 'A', 'POINT', 'AND', 'DELAY', 'THE', 'DEPARTURE', 'OF', 'THE', 'JOHN', 'BRIGHT', 'FOR', 'TWENTY', 'FOUR', 'HOURS'] +8455-210777-0070-1042: ref=['AND', 'THIS', 'PLAN', 'WAS', 'ADOPTED', 'TOO', 'IN', 'ORDER', 'TO', 'EXTRACT', 'FROM', 'ME', 'A', 'PROMISE', 'THAT', 'I', 'WOULD', 'DEPART', 'IN', 'PEACE'] +8455-210777-0070-1042: hyp=['AND', 'THIS', 'PLAN', 'WAS', 'ADOPTED', 'TOO', 'IN', 'ORDER', 'TO', 'EXTRACT', 'FROM', 'ME', 'A', 'PROMISE', 'THAT', 'I', 'WOULD', 'DEPART', 'IN', 'PEACE'] +8463-287645-0000-543: ref=['THIS', 'WAS', 'WHAT', 'DID', 'THE', 'MISCHIEF', 'SO', 'FAR', 'AS', 'THE', 'RUNNING', 'AWAY', 'WAS', 'CONCERNED'] +8463-287645-0000-543: hyp=['THIS', 'WAS', 'WHAT', 'DID', 'THE', 'MISCHIEF', 'SO', 'FAR', 'AS', 'THE', 'RUNNING', 'AWAY', 'WAS', 'CONCERNED'] +8463-287645-0001-544: ref=['IT', 'IS', 'HARDLY', 'NECESSARY', 'TO', 'SAY', 'MORE', 'OF', 'THEM', 'HERE'] +8463-287645-0001-544: hyp=['IT', 'IS', 'HARDLY', 'NECESSARY', 'TO', 'SAY', 'MORE', 'OF', 'THEM', 'HERE'] +8463-287645-0002-545: ref=['FROM', 'THE', 'MANNER', 'IN', 'WHICH', 'HE', 'EXPRESSED', 'HIMSELF', 'WITH', 'REGARD', 'TO', 'ROBERT', 'HOLLAN', 'NO', 'MAN', 'IN', 'THE', 'WHOLE', 'RANGE', 'OF', 'HIS', 'RECOLLECTIONS', 'WILL', 'BE', 'LONGER', 'REMEMBERED', 'THAN', 'HE', 'HIS', 'ENTHRALMENT', 'WHILE', 'UNDER', 'HOLLAN', 'WILL', 'HARDLY', 'EVER', 'BE', 'FORGOTTEN'] +8463-287645-0002-545: hyp=['FROM', 'THE', 'MANNER', 'IN', 'WHICH', 'HE', 'EXPRESSED', 'HIMSELF', 'WITH', 'REGARD', 'TO', 'ROBERT', 'HOLLAND', 'NO', 'MAN', 'IN', 'THE', 'WHOLE', 'RANGE', 'OF', 'HIS', 'RECOLLECTIONS', 'WILL', 'BE', 'LONGER', 'REMEMBERED', 'THAN', 'HE', 'HIS', 'ENTHRALLMENT', 'WHILE', 'UNDER', 'HOLLAND', 'WILL', 'HARDLY', 'EVER', 'BE', 'FORGOTTEN'] +8463-287645-0003-546: ref=['OF', 'THIS', 'PARTY', 'EDWARD', 'A', 'BOY', 'OF', 'SEVENTEEN', 'CALLED', 'FORTH', 'MUCH', 'SYMPATHY', 'HE', 'TOO', 'WAS', 'CLAIMED', 'BY', 'HOLLAN'] +8463-287645-0003-546: hyp=['OF', 'THIS', 'PARTY', 'EDWARD', 'A', 'BOY', 'OF', 'SEVENTEEN', 'CALLED', 'FORTH', 'MUCH', 'SYMPATHY', 'HE', 'TOO', 'WAS', 'CLAIMED', 'BY', 'HOLLAND'] +8463-287645-0004-547: ref=['JOHN', 'WESLEY', 'COMBASH', 'JACOB', 'TAYLOR', 'AND', 'THOMAS', 'EDWARD', 'SKINNER'] +8463-287645-0004-547: hyp=['JOHN', 'WESLEY', 'COMBATCH', 'JACOB', 'TAYLOR', 'AND', 'THOMAS', 'EDWARD', 'SKINNER'] +8463-287645-0005-548: ref=['A', 'FEW', 'YEARS', 'BACK', 'ONE', 'OF', 'THEIR', 'SLAVES', 'A', 'COACHMAN', 'WAS', 'KEPT', 'ON', 'THE', 'COACH', 'BOX', 'ONE', 'COLD', 'NIGHT', 'WHEN', 'THEY', 'WERE', 'OUT', 'AT', 'A', 'BALL', 'UNTIL', 'HE', 'BECAME', 'ALMOST', 'FROZEN', 'TO', 'DEATH', 'IN', 'FACT', 'HE', 'DID', 'DIE', 'IN', 'THE', 'INFIRMARY', 'FROM', 'THE', 'EFFECTS', 'OF', 'THE', 'FROST', 'ABOUT', 'ONE', 'WEEK', 'AFTERWARDS'] +8463-287645-0005-548: hyp=['IF', 'YOU', 'YEARS', 'BACK', 'ONE', 'OF', 'THEIR', 'SLAVES', 'A', 'COACHMAN', 'WAS', 'KEPT', 'ON', 'THE', 'COACH', 'BOX', 'ONE', 'CALLED', 'NIGHT', 'WHEN', 'THEY', 'WERE', 'OUT', 'AT', 'A', 'BALL', 'UNTIL', 'HE', 'BECAME', 'ALMOST', 'FROZEN', 'TO', 'DEATH', 'IN', 'FACT', 'HE', 'DID', 'DIE', 'IN', 'THE', 'INFIRMARY', 'FROM', 'THE', 'EFFECTS', 'OF', 'THE', 'FROST', 'ABOUT', 'ONE', 'WEEK', 'AFTERWARDS'] +8463-287645-0006-549: ref=['THE', 'DOCTOR', 'WHO', 'ATTENDED', 'THE', 'INJURED', 'CREATURE', 'IN', 'THIS', 'CASE', 'WAS', 'SIMPLY', 'TOLD', 'THAT', 'SHE', 'SLIPPED', 'AND', 'FELL', 'DOWN', 'STAIRS', 'AS', 'SHE', 'WAS', 'COMING', 'DOWN'] +8463-287645-0006-549: hyp=['THE', 'DOCTOR', 'WHO', 'ATTENDED', 'THE', 'ANCIENT', 'CREATURE', 'IN', 'THIS', 'CASE', 'WAS', 'SIMPLY', 'TOLD', 'THAT', 'SHE', 'SLIPPED', 'AND', 'FELL', 'DOWN', 'THE', 'STAIRS', 'AS', 'SHE', 'WAS', 'COMING', 'DOWN'] +8463-287645-0007-550: ref=['ANOTHER', 'CASE', 'SAID', 'JOHN', 'WESLEY', 'WAS', 'A', 'LITTLE', 'GIRL', 'HALF', 'GROWN', 'WHO', 'WAS', 'WASHING', 'WINDOWS', 'UP', 'STAIRS', 'ONE', 'DAY', 'AND', 'UNLUCKILY', 'FELL', 'ASLEEP', 'IN', 'THE', 'WINDOW', 'AND', 'IN', 'THIS', 'POSITION', 'WAS', 'FOUND', 'BY', 'HER', 'MISTRESS', 'IN', 'A', 'RAGE', 'THE', 'MISTRESS', 'HIT', 'HER', 'A', 'HEAVY', 'SLAP', 'KNOCKED', 'HER', 'OUT', 'OF', 'THE', 'WINDOW', 'AND', 'SHE', 'FELL', 'TO', 'THE', 'PAVEMENT', 'AND', 'DIED', 'IN', 'A', 'FEW', 'HOURS', 'FROM', 'THE', 'EFFECTS', 'THEREOF'] +8463-287645-0007-550: hyp=['ANOTHER', 'CASE', 'SAID', 'JOHN', 'WESTLEY', 'WAS', 'A', 'LITTLE', 'GIRL', 'HALF', 'GROWN', 'WHO', 'WAS', 'WASHING', 'WINDOWS', 'UPSTAIRS', 'ONE', 'DAY', 'AND', 'UNLUCKILY', 'FELL', 'ASLEEP', 'IN', 'THE', 'WINDOW', 'AND', 'IN', 'THIS', 'POSITION', 'WAS', 'FOUND', 'BY', 'HER', 'MISTRESS', 'IN', 'A', 'RAGE', 'THE', 'MISTRESS', 'HID', 'HER', 'A', 'HEAVY', 'SLAP', 'KNOCKED', 'HER', 'OUT', 'OF', 'THE', 'WINDOW', 'AND', 'SHE', 'FELL', 'TO', 'THE', 'PAVEMENT', 'AND', 'DIED', 'IN', 'A', 'FEW', 'HOURS', 'FROM', 'THE', 'EFFECTS', 'THEREOF'] +8463-287645-0008-551: ref=['AS', 'USUAL', 'NOTHING', 'WAS', 'DONE', 'IN', 'THE', 'WAY', 'OF', 'PUNISHMENT'] +8463-287645-0008-551: hyp=['AS', 'USUAL', 'NOTHING', 'WAS', 'DONE', 'IN', 'THE', 'WAY', 'OF', 'PUNISHMENT'] +8463-287645-0009-552: ref=['I', 'NEVER', 'KNEW', 'OF', 'BUT', 'ONE', 'MAN', 'WHO', 'COULD', 'EVER', 'PLEASE', 'HIM'] +8463-287645-0009-552: hyp=['I', 'NEVER', 'KNEW', 'OF', 'BUT', 'ONE', 'MAN', 'WHO', 'COULD', 'EVER', 'PLEASE', 'HIM'] +8463-287645-0010-553: ref=['HE', 'WORKED', 'ME', 'VERY', 'HARD', 'HE', 'WANTED', 'TO', 'BE', 'BEATING', 'ME', 'ALL', 'THE', 'TIME'] +8463-287645-0010-553: hyp=['HE', 'WORKED', 'ME', 'VERY', 'HARD', 'HE', 'WANTED', 'TO', 'BE', 'BEATING', 'ME', 'ALL', 'THE', 'TIME'] +8463-287645-0011-554: ref=['SHE', 'WAS', 'A', 'LARGE', 'HOMELY', 'WOMAN', 'THEY', 'WERE', 'COMMON', 'WHITE', 'PEOPLE', 'WITH', 'NO', 'REPUTATION', 'IN', 'THE', 'COMMUNITY'] +8463-287645-0011-554: hyp=['SHE', 'WAS', 'A', 'LARGE', 'HOMELY', 'WOMAN', 'THEY', 'WERE', 'COMMON', 'WHITE', 'PEOPLE', 'WITH', 'NO', 'REPUTATION', 'IN', 'THE', 'COMMUNITY'] +8463-287645-0012-555: ref=['SUBSTANTIALLY', 'THIS', 'WAS', "JACOB'S", 'UNVARNISHED', 'DESCRIPTION', 'OF', 'HIS', 'MASTER', 'AND', 'MISTRESS'] +8463-287645-0012-555: hyp=['SUBSTANTIALLY', 'THIS', 'WAS', "JACOB'S", 'UNVARNISHED', 'DESCRIPTION', 'OF', 'HIS', 'MASTER', 'AND', 'MISTRESS'] +8463-287645-0013-556: ref=['AS', 'TO', 'HIS', 'AGE', 'AND', 'ALSO', 'THE', 'NAME', 'OF', 'HIS', 'MASTER', "JACOB'S", 'STATEMENT', 'VARIED', 'SOMEWHAT', 'FROM', 'THE', 'ADVERTISEMENT'] +8463-287645-0013-556: hyp=['AS', 'TO', 'HIS', 'AGE', 'AND', 'ALSO', 'THE', 'NAME', 'OF', 'HIS', 'MASTER', "JACOB'S", 'STATEMENT', 'VARIED', 'SOMEWHAT', 'FROM', 'THE', 'ADVERTISEMENT'] +8463-287645-0014-557: ref=['OF', 'STARTING', 'I', "DIDN'T", 'KNOW', 'THE', 'WAY', 'TO', 'COME'] +8463-287645-0014-557: hyp=['OF', 'STARTING', 'I', "DIDN'T", 'KNOW', 'THE', 'WAY', 'TO', 'COME'] +8463-294825-0000-558: ref=["IT'S", 'ALMOST', 'BEYOND', 'CONJECTURE'] +8463-294825-0000-558: hyp=["IT'S", 'ALMOST', 'BEYOND', 'CONJECTURE'] +8463-294825-0001-559: ref=['THIS', 'REALITY', 'BEGINS', 'TO', 'EXPLAIN', 'THE', 'DARK', 'POWER', 'AND', 'OTHERWORLDLY', 'FASCINATION', 'OF', 'TWENTY', 'THOUSAND', 'LEAGUES', 'UNDER', 'THE', 'SEAS'] +8463-294825-0001-559: hyp=['THIS', 'REALITY', 'BEGINS', 'TO', 'EXPLAIN', 'THE', 'DARK', 'POWER', 'AND', 'OTHER', 'WORLDLY', 'FASCINATION', 'OF', 'TWENTY', 'THOUSAND', 'LEAGUES', 'UNDER', 'THE', 'SEAS'] +8463-294825-0002-560: ref=['FIRST', 'AS', 'A', 'PARIS', 'STOCKBROKER', 'LATER', 'AS', 'A', 'CELEBRATED', 'AUTHOR', 'AND', 'YACHTSMAN', 'HE', 'WENT', 'ON', 'FREQUENT', 'VOYAGES', 'TO', 'BRITAIN', 'AMERICA', 'THE', 'MEDITERRANEAN'] +8463-294825-0002-560: hyp=['FIRST', 'AS', 'A', 'PARIS', 'DOCKBROKER', 'LATER', 'AS', 'A', 'CELEBRATED', 'AUTHOR', 'AND', 'YACHTSMAN', 'HE', 'WENT', 'ON', 'FREQUENT', 'VOYAGES', 'TO', 'BRITAIN', 'AMERICA', 'THE', 'MEDITERRANEAN'] +8463-294825-0003-561: ref=['NEMO', 'BUILDS', 'A', 'FABULOUS', 'FUTURISTIC', 'SUBMARINE', 'THE', 'NAUTILUS', 'THEN', 'CONDUCTS', 'AN', 'UNDERWATER', 'CAMPAIGN', 'OF', 'VENGEANCE', 'AGAINST', 'HIS', 'IMPERIALIST', 'OPPRESSOR'] +8463-294825-0003-561: hyp=['NEMO', 'BUILDS', 'A', 'FABULOUS', 'FUTURESTIC', 'SUBMARINE', 'THE', 'NAUTILUS', 'THEN', 'CONDUCTS', 'AN', 'UNDERWATER', 'CAMPAIGN', 'OF', 'VENGEANCE', 'AGAINST', 'HIS', 'IMPERIALIST', 'OPPRESSOR'] +8463-294825-0004-562: ref=['IN', 'ALL', 'THE', 'NOVEL', 'HAD', 'A', 'DIFFICULT', 'GESTATION'] +8463-294825-0004-562: hyp=['IN', 'ALL', 'THE', 'NOVEL', 'HEAD', 'A', 'DIFFICULT', 'JUST', 'STATION'] +8463-294825-0005-563: ref=['OTHER', 'SUBTLETIES', 'OCCUR', 'INSIDE', 'EACH', 'EPISODE', 'THE', 'TEXTURES', 'SPARKLING', 'WITH', 'WIT', 'INFORMATION', 'AND', 'INSIGHT'] +8463-294825-0005-563: hyp=['OTHER', 'SUBTLETIES', 'OCCUR', 'INSIDE', 'EACH', 'EPISODE', 'THE', 'TEXTURES', 'SPARKLING', 'WITH', 'WIT', 'INFORMATION', 'AND', 'INSIGHT'] +8463-294825-0006-564: ref=['HIS', 'SPECIFICATIONS', 'FOR', 'AN', 'OPEN', 'SEA', 'SUBMARINE', 'AND', 'A', 'SELF', 'CONTAINED', 'DIVING', 'SUIT', 'WERE', 'DECADES', 'BEFORE', 'THEIR', 'TIME', 'YET', 'MODERN', 'TECHNOLOGY', 'BEARS', 'THEM', 'OUT', 'TRIUMPHANTLY'] +8463-294825-0006-564: hyp=['HIS', 'SPECIFICATIONS', 'FOR', 'AN', 'OPEN', 'SEA', 'SUBMARINE', 'AND', 'A', 'SELF', 'CONTAINING', 'DIVING', 'SUIT', 'WERE', 'DECADES', 'BEFORE', 'THEIR', 'TIME', 'YET', 'MODERN', 'TECHNOLOGY', 'BEARS', 'THEM', 'OUT', 'TRIUMPHANTLY'] +8463-294825-0007-565: ref=['EVEN', 'THE', 'SUPPORTING', 'CAST', 'IS', 'SHREWDLY', 'DRAWN', 'PROFESSOR', 'ARONNAX', 'THE', 'CAREER', 'SCIENTIST', 'CAUGHT', 'IN', 'AN', 'ETHICAL', 'CONFLICT', 'CONSEIL', 'THE', 'COMPULSIVE', 'CLASSIFIER', 'WHO', 'SUPPLIES', 'HUMOROUS', 'TAG', 'LINES', 'FOR', "VERNE'S", 'FAST', 'FACTS', 'THE', 'HARPOONER', 'NED', 'LAND', 'A', 'CREATURE', 'OF', 'CONSTANT', 'APPETITES', 'MAN', 'AS', 'HEROIC', 'ANIMAL'] +8463-294825-0007-565: hyp=['EVEN', 'THE', 'SUPPORTING', 'CAST', 'IS', 'SHREWDLY', 'DRAWN', 'PROFESSOR', 'ARONNAX', 'THE', 'CAREER', 'SCIENTIST', 'CAUGHT', 'IN', 'AN', 'ETHICAL', 'CONFLICT', 'CONSEIL', 'THE', 'COMPULSIVE', 'CLASSIFIER', 'WHO', 'SUPPLIES', 'HUMOROUS', 'TAG', 'LINES', 'FOR', 'VERNS', 'FAST', 'FACTS', 'THE', 'HARPOONER', 'NED', 'LAND', 'A', 'CREATURE', 'OF', 'CONSTANT', 'APPETITES', 'MAN', 'AS', 'HEROIC', 'ANIMAL'] +8463-294825-0008-566: ref=['BUT', 'MUCH', 'OF', 'THE', "NOVEL'S", 'BROODING', 'POWER', 'COMES', 'FROM', 'CAPTAIN', 'NEMO'] +8463-294825-0008-566: hyp=['BUT', 'MUCH', 'OF', 'THE', 'NOVELS', 'BROODING', 'POWER', 'COMES', 'FROM', 'CAPTAIN', 'NEMO'] +8463-294825-0009-567: ref=['THIS', 'COMPULSION', 'LEADS', 'NEMO', 'INTO', 'UGLY', 'CONTRADICTIONS', "HE'S", 'A', 'FIGHTER', 'FOR', 'FREEDOM', 'YET', 'ALL', 'WHO', 'BOARD', 'HIS', 'SHIP', 'ARE', 'IMPRISONED', 'THERE', 'FOR', 'GOOD', 'HE', 'WORKS', 'TO', 'SAVE', 'LIVES', 'BOTH', 'HUMAN', 'AND', 'ANIMAL', 'YET', 'HE', 'HIMSELF', 'CREATES', 'A', 'HOLOCAUST', 'HE', 'DETESTS', 'IMPERIALISM', 'YET', 'HE', 'LAYS', 'PERSONAL', 'CLAIM', 'TO', 'THE', 'SOUTH', 'POLE'] +8463-294825-0009-567: hyp=['THIS', 'COMPULSION', 'LEADS', 'NEMO', 'INTO', 'UGLY', 'CONTRADICTIONS', 'HE', 'IS', 'A', 'FIGHTER', 'FOR', 'FREEDOM', 'YET', 'ALL', 'WHO', 'BOARD', 'HIS', 'SHIP', 'ARE', 'IMPRISONED', 'THERE', 'FOR', 'GOOD', 'HE', 'WORKS', 'TO', 'SAVE', 'LIVES', 'BOTH', 'HUMAN', 'AND', 'ANIMAL', 'YET', 'HE', 'HIMSELF', 'CREATES', 'A', 'HOHLAST', 'HE', 'DETESTS', 'IMPERIALISM', 'YET', 'HE', 'LAYS', 'PERSONAL', 'CLAIM', 'TO', 'THE', 'SOUTH', 'POLE'] +8463-294825-0010-568: ref=['AND', 'IN', 'THIS', 'LAST', 'ACTION', 'HE', 'FALLS', 'INTO', 'THE', 'CLASSIC', 'SIN', 'OF', 'PRIDE'] +8463-294825-0010-568: hyp=['AND', 'IN', 'THIS', 'LAST', 'ACTION', 'HE', 'FALLS', 'INTO', 'THE', 'CLASSIC', 'SIN', 'OF', 'PRIDE'] +8463-294825-0011-569: ref=["HE'S", 'SWIFTLY', 'PUNISHED'] +8463-294825-0011-569: hyp=['HIS', 'SWIFTLY', 'PUNISHED'] +8463-294825-0012-570: ref=['THE', 'NAUTILUS', 'NEARLY', 'PERISHES', 'IN', 'THE', 'ANTARCTIC', 'AND', 'NEMO', 'SINKS', 'INTO', 'A', 'GROWING', 'DEPRESSION'] +8463-294825-0012-570: hyp=['THE', 'NAUTILUS', 'NEARLY', 'PERISHES', 'IN', 'THE', 'ANTARCTIC', 'AND', 'NEMO', 'SINKS', 'INTO', 'A', 'GROWING', 'DEPRESSION'] +8463-294825-0013-571: ref=['FOR', 'MANY', 'THEN', 'THIS', 'BOOK', 'HAS', 'BEEN', 'A', 'SOURCE', 'OF', 'FASCINATION', 'SURELY', 'ONE', 'OF', 'THE', 'MOST', 'INFLUENTIAL', 'NOVELS', 'EVER', 'WRITTEN', 'AN', 'INSPIRATION', 'FOR', 'SUCH', 'SCIENTISTS', 'AND', 'DISCOVERERS', 'AS', 'ENGINEER', 'SIMON', 'LAKE', 'OCEANOGRAPHER', 'WILLIAM', 'BEEBE', 'POLAR', 'TRAVELER', 'SIR', 'ERNEST', 'SHACKLETON'] +8463-294825-0013-571: hyp=['FOR', 'MANY', 'THEN', 'THIS', 'BOOK', 'HAS', 'BEEN', 'A', 'SOURCE', 'OF', 'FASCINATION', 'SURELY', 'ONE', 'OF', 'THE', 'MOST', 'INFLUENTIAL', 'NOVELS', 'EVER', 'WRITTEN', 'AND', 'INSPIRATION', 'FOR', 'SUCH', 'SCIENTISTS', 'AND', 'DISCOVERERS', 'AS', 'ENGINEERS', 'SIMON', 'LAKE', 'OCEANOGRAPHER', 'WILLIAM', 'B', 'POLLAR', 'TRAVELLERS', 'ARE', 'EARNEST', 'SHACKLETON'] +8463-294825-0014-572: ref=['FATHOM', 'SIX', 'FEET'] +8463-294825-0014-572: hyp=['FATHOM', 'SIX', 'FEET'] +8463-294825-0015-573: ref=['GRAM', 'ROUGHLY', 'ONE', 'TWENTY', 'EIGHTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0015-573: hyp=['GRAHAM', 'ROUGHLY', 'WON', 'TWENTY', 'EIGHTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0016-574: ref=['MILLIGRAM', 'ROUGHLY', 'ONE', 'TWENTY', 'EIGHT', 'THOUSAND', 'OF', 'AN', 'OUNCE'] +8463-294825-0016-574: hyp=['MILAGRAM', 'ROUGHLY', 'WON', 'TWENTY', 'EIGHT', 'THOUSANDTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0017-575: ref=['LITER', 'ROUGHLY', 'ONE', 'QUART'] +8463-294825-0017-575: hyp=['LEADER', 'ROUGHLY', 'WON', 'COURT'] +8463-294825-0018-576: ref=['METER', 'ROUGHLY', 'ONE', 'YARD', 'THREE', 'INCHES'] +8463-294825-0018-576: hyp=['METER', 'ROUGHLY', 'ONE', 'YARD', 'THREE', 'INCHES'] +8463-294825-0019-577: ref=['MILLIMETER', 'ROUGHLY', 'ONE', 'TWENTY', 'FIFTH', 'OF', 'AN', 'INCH'] +8463-294825-0019-577: hyp=['MILLIMETER', 'ROUGHLY', 'WON', 'TWENTY', 'FIFTH', 'OF', 'AN', 'INCH'] +8463-294828-0000-578: ref=['CHAPTER', 'THREE', 'AS', 'MASTER', 'WISHES'] +8463-294828-0000-578: hyp=['CHAPTER', 'THREE', 'AS', 'MASTER', 'WISHES'] +8463-294828-0001-579: ref=['THREE', 'SECONDS', 'BEFORE', 'THE', 'ARRIVAL', 'OF', 'J', 'B', "HOBSON'S", 'LETTER', 'I', 'NO', 'MORE', 'DREAMED', 'OF', 'CHASING', 'THE', 'UNICORN', 'THAN', 'OF', 'TRYING', 'FOR', 'THE', 'NORTHWEST', 'PASSAGE'] +8463-294828-0001-579: hyp=['THREE', 'SECONDS', 'BEFORE', 'THE', 'ARRIVAL', 'OF', 'J', 'B', "HOBSON'S", 'LETTER', 'I', 'KNOW', 'MORE', 'DREAMED', 'OF', 'CHASING', 'THE', 'UNICORN', 'THAN', 'OF', 'TRYING', 'FOR', 'THE', 'NORTH', 'WEST', 'PASSAGE'] +8463-294828-0002-580: ref=['EVEN', 'SO', 'I', 'HAD', 'JUST', 'RETURNED', 'FROM', 'AN', 'ARDUOUS', 'JOURNEY', 'EXHAUSTED', 'AND', 'BADLY', 'NEEDING', 'A', 'REST'] +8463-294828-0002-580: hyp=['EVEN', 'SO', 'I', 'HAD', 'JUST', 'RETURNED', 'FROM', 'AN', 'ARDUOUS', 'JOURNEY', 'EXHAUSTED', 'AND', 'BADLY', 'NEEDING', 'ARREST'] +8463-294828-0003-581: ref=['I', 'WANTED', 'NOTHING', 'MORE', 'THAN', 'TO', 'SEE', 'MY', 'COUNTRY', 'AGAIN', 'MY', 'FRIENDS', 'MY', 'MODEST', 'QUARTERS', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'MY', 'DEARLY', 'BELOVED', 'COLLECTIONS'] +8463-294828-0003-581: hyp=['I', 'WANTED', 'NOTHING', 'MORE', 'THAN', 'TO', 'SEE', 'MY', 'COUNTRY', 'AGAIN', 'MY', 'FRIENDS', 'MY', 'MODEST', 'QUARTERS', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'MY', 'DEARLY', 'BELOVED', 'COLLECTIONS'] +8463-294828-0004-582: ref=['BUT', 'NOW', 'NOTHING', 'COULD', 'HOLD', 'ME', 'BACK'] +8463-294828-0004-582: hyp=['BUT', 'NOW', 'NOTHING', 'COULD', 'HOLD', 'ME', 'BACK'] +8463-294828-0005-583: ref=['CONSEIL', 'WAS', 'MY', 'MANSERVANT'] +8463-294828-0005-583: hyp=['CONSEIL', 'WAS', 'MY', "MAN'S", 'SERVANT'] +8463-294828-0006-584: ref=['FROM', 'RUBBING', 'SHOULDERS', 'WITH', 'SCIENTISTS', 'IN', 'OUR', 'LITTLE', 'UNIVERSE', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'THE', 'BOY', 'HAD', 'COME', 'TO', 'KNOW', 'A', 'THING', 'OR', 'TWO'] +8463-294828-0006-584: hyp=['FROM', 'RUBBING', 'SHOULDERS', 'WITH', 'SCIENTISTS', 'IN', 'OUR', 'LITTLE', 'UNIVERSE', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'THE', 'BOY', 'HAD', 'COME', 'TO', 'KNOW', 'A', 'THING', 'OR', 'TWO'] +8463-294828-0007-585: ref=['CLASSIFYING', 'WAS', 'EVERYTHING', 'TO', 'HIM', 'SO', 'HE', 'KNEW', 'NOTHING', 'ELSE', 'WELL', 'VERSED', 'IN', 'THE', 'THEORY', 'OF', 'CLASSIFICATION', 'HE', 'WAS', 'POORLY', 'VERSED', 'IN', 'ITS', 'PRACTICAL', 'APPLICATION', 'AND', 'I', 'DOUBT', 'THAT', 'HE', 'COULD', 'TELL', 'A', 'SPERM', 'WHALE', 'FROM', 'A', 'BALEEN', 'WHALE'] +8463-294828-0007-585: hyp=['CLASSIFYING', 'WAS', 'EVERYTHING', 'TO', 'HIM', 'SO', 'HE', 'KNEW', 'NOTHING', 'ELSE', 'WILL', 'VERSED', 'IN', 'A', 'THEORY', 'OF', 'CLASSIFICATION', 'HE', 'WAS', 'POORLY', 'VERSED', 'IN', 'ITS', 'PRACTICAL', 'APPLICATION', 'AND', 'I', 'DOUBT', 'THAT', 'HE', 'COULD', 'TELL', 'A', 'SPERM', 'WHALE', 'FROM', 'A', 'BALEEN', 'WHALE'] +8463-294828-0008-586: ref=['AND', 'YET', 'WHAT', 'A', 'FINE', 'GALLANT', 'LAD'] +8463-294828-0008-586: hyp=['AND', 'YET', 'WHAT', 'A', 'FINE', 'GALLANT', 'LAD'] +8463-294828-0009-587: ref=['NOT', 'ONCE', 'DID', 'HE', 'COMMENT', 'ON', 'THE', 'LENGTH', 'OR', 'THE', 'HARDSHIPS', 'OF', 'A', 'JOURNEY'] +8463-294828-0009-587: hyp=['NOT', 'ONCE', 'DID', 'HE', 'COMMENT', 'ON', 'THE', 'LENGTH', 'OR', 'THE', 'HARDSHIPS', 'OF', 'THE', 'JOURNEY'] +8463-294828-0010-588: ref=['NEVER', 'DID', 'HE', 'OBJECT', 'TO', 'BUCKLING', 'UP', 'HIS', 'SUITCASE', 'FOR', 'ANY', 'COUNTRY', 'WHATEVER', 'CHINA', 'OR', 'THE', 'CONGO', 'NO', 'MATTER', 'HOW', 'FAR', 'OFF', 'IT', 'WAS'] +8463-294828-0010-588: hyp=['NEVER', 'DID', 'HE', 'OBJECT', 'TO', 'BUCKLING', 'UP', 'HIS', 'SUIT', 'CASE', 'FOR', 'ANY', 'COUNTRY', 'WHATEVER', 'CHINA', 'OR', 'THE', 'CONGO', 'NO', 'MATTER', 'HOW', 'FAR', 'OFF', 'IT', 'WAS'] +8463-294828-0011-589: ref=['HE', 'WENT', 'HERE', 'THERE', 'AND', 'EVERYWHERE', 'IN', 'PERFECT', 'CONTENTMENT'] +8463-294828-0011-589: hyp=['HE', 'WENT', 'HERE', 'THERE', 'AND', 'EVERYWHERE', 'IN', 'PERFECT', 'CONTENTMENT'] +8463-294828-0012-590: ref=['PLEASE', 'FORGIVE', 'ME', 'FOR', 'THIS', 'UNDERHANDED', 'WAY', 'OF', 'ADMITTING', 'I', 'HAD', 'TURNED', 'FORTY'] +8463-294828-0012-590: hyp=['PLEASE', 'FORGIVE', 'ME', 'FOR', 'THIS', 'UNDERHANDED', 'WAY', 'OF', 'ADMITTING', 'THAT', 'I', 'HAD', 'TURNED', 'FORTY'] +8463-294828-0013-591: ref=['HE', 'WAS', 'A', 'FANATIC', 'ON', 'FORMALITY', 'AND', 'HE', 'ONLY', 'ADDRESSED', 'ME', 'IN', 'THE', 'THIRD', 'PERSON', 'TO', 'THE', 'POINT', 'WHERE', 'IT', 'GOT', 'TIRESOME'] +8463-294828-0013-591: hyp=['HE', 'WAS', 'A', 'FANATIC', 'ON', 'FORMALITY', 'AND', 'HE', 'ONLY', 'ADDRESSED', 'ME', 'IN', 'THE', 'THIRD', 'PERSON', 'TO', 'THE', 'POINT', 'WHERE', 'IT', 'GOT', 'TO', 'HYAHSOME'] +8463-294828-0014-592: ref=['THERE', 'WAS', 'GOOD', 'REASON', 'TO', 'STOP', 'AND', 'THINK', 'EVEN', 'FOR', 'THE', "WORLD'S", 'MOST', 'EMOTIONLESS', 'MAN'] +8463-294828-0014-592: hyp=['THERE', 'WAS', 'GOOD', 'REASON', 'TO', 'STOP', 'AND', 'THINK', 'EVEN', 'FOR', 'THE', "WORLD'S", 'MOST', 'EMOTIONLESS', 'MAN'] +8463-294828-0015-593: ref=['CONSEIL', 'I', 'CALLED', 'A', 'THIRD', 'TIME', 'CONSEIL', 'APPEARED'] +8463-294828-0015-593: hyp=['CONSEIL', 'I', 'CALLED', 'A', 'THIRD', 'TIME', 'CONSEIL', 'APPEARED'] +8463-294828-0016-594: ref=['DID', 'MASTER', 'SUMMON', 'ME', 'HE', 'SAID', 'ENTERING'] +8463-294828-0016-594: hyp=['DEAD', 'MASTER', 'SUMMON', 'ME', 'HE', 'SAID', 'ENTERING'] +8463-294828-0017-595: ref=['PACK', 'AS', 'MUCH', 'INTO', 'MY', 'TRUNK', 'AS', 'YOU', 'CAN', 'MY', 'TRAVELING', 'KIT', 'MY', 'SUITS', 'SHIRTS', 'AND', 'SOCKS', "DON'T", 'BOTHER', 'COUNTING', 'JUST', 'SQUEEZE', 'IT', 'ALL', 'IN', 'AND', 'HURRY'] +8463-294828-0017-595: hyp=['PACK', 'AS', 'MUCH', 'INTO', 'MY', 'TRUNK', 'AS', 'YOU', 'CAN', 'MY', 'TRAVELLING', 'KIT', 'MY', 'SUITS', 'SHIRTS', 'AND', 'SOCKS', "DON'T", 'BOTHER', 'COUNTING', 'JEST', 'SQUEEZE', 'IT', 'ALL', 'IN', 'AND', 'HURRY'] +8463-294828-0018-596: ref=["WE'LL", 'DEAL', 'WITH', 'THEM', 'LATER', 'WHAT'] +8463-294828-0018-596: hyp=["WE'LL", 'DEAL', 'WITH', 'THEM', 'LATER', 'WHAT'] +8463-294828-0019-597: ref=['ANYHOW', "WE'LL", 'LEAVE', 'INSTRUCTIONS', 'TO', 'SHIP', 'THE', 'WHOLE', 'MENAGERIE', 'TO', 'FRANCE'] +8463-294828-0019-597: hyp=['ANYHOW', "WE'LL", 'LIVE', 'INSTRUCTIONS', 'TO', 'SHIP', 'THE', 'WHOLE', 'MENAGERIE', 'TO', 'FRANCE'] +8463-294828-0020-598: ref=['YES', 'WE', 'ARE', 'CERTAINLY', 'I', 'REPLIED', 'EVASIVELY', 'BUT', 'AFTER', 'WE', 'MAKE', 'A', 'DETOUR'] +8463-294828-0020-598: hyp=['YES', 'WE', 'ARE', 'CERTAINLY', 'I', 'REPLIED', 'EVASIVELY', 'BUT', 'AFTER', 'WE', 'MAKE', 'A', 'DETOUR'] +8463-294828-0021-599: ref=['A', 'ROUTE', 'SLIGHTLY', 'LESS', 'DIRECT', "THAT'S", 'ALL'] +8463-294828-0021-599: hyp=['A', 'ROUT', 'SLIGHTLY', 'LESS', 'DIRECT', "THAT'S", 'ALL'] +8463-294828-0022-600: ref=["WE'RE", 'LEAVING', 'ON', 'THE', 'ABRAHAM', 'LINCOLN'] +8463-294828-0022-600: hyp=['WERE', 'LEAVING', 'ON', 'THE', 'ABRAHAM', 'LINCOLN'] +8463-294828-0023-601: ref=['YOU', 'SEE', 'MY', 'FRIEND', "IT'S", 'AN', 'ISSUE', 'OF', 'THE', 'MONSTER', 'THE', 'NOTORIOUS', 'NARWHALE'] +8463-294828-0023-601: hyp=['YOU', 'SEE', 'MY', 'FRIEND', "IT'S", 'AN', 'ISSUE', 'OF', 'THE', 'MONSTER', 'THE', 'NOTORIOUS', 'NARWHALE'] +8463-294828-0024-602: ref=['WE', "DON'T", 'KNOW', 'WHERE', 'IT', 'WILL', 'TAKE', 'US'] +8463-294828-0024-602: hyp=['WE', "DON'T", 'KNOW', 'WHERE', 'IT', 'WILL', 'TAKE', 'US'] +8463-294828-0025-603: ref=['BUT', "WE'RE", 'GOING', 'JUST', 'THE', 'SAME'] +8463-294828-0025-603: hyp=['BUT', "WE'RE", 'GOING', 'JUST', 'THE', 'SAME'] +8463-294828-0026-604: ref=['WE', 'HAVE', 'A', 'COMMANDER', "WHO'S", 'GAME', 'FOR', 'ANYTHING'] +8463-294828-0026-604: hyp=['WE', 'HAVE', 'A', 'COMMANDER', 'WHOSE', 'GAME', 'FOR', 'ANYTHING'] +8463-294828-0027-605: ref=['I', 'LEFT', 'INSTRUCTIONS', 'FOR', 'SHIPPING', 'MY', 'CONTAINERS', 'OF', 'STUFFED', 'ANIMALS', 'AND', 'DRIED', 'PLANTS', 'TO', 'PARIS', 'FRANCE'] +8463-294828-0027-605: hyp=['I', 'LEFT', 'INSTRUCTIONS', 'FOR', 'SHIPPING', 'MY', 'CONTAINERS', 'OF', 'STUFFED', 'ANIMALS', 'AND', 'DRIED', 'PLANTS', 'TO', 'PARIS', 'FRANCE'] +8463-294828-0028-606: ref=['I', 'OPENED', 'A', 'LINE', 'OF', 'CREDIT', 'SUFFICIENT', 'TO', 'COVER', 'THE', 'BABIRUSA', 'AND', 'CONSEIL', 'AT', 'MY', 'HEELS', 'I', 'JUMPED', 'INTO', 'A', 'CARRIAGE'] +8463-294828-0028-606: hyp=['I', 'OPENED', 'A', 'LINE', 'OF', 'CREDIT', 'SUFFICIENT', 'TO', 'COVER', 'THE', 'BABAROUSA', 'AND', 'CONSEIL', 'AT', 'MY', 'HEELS', 'I', 'JUMPED', 'INTO', 'A', 'CARRIAGE'] +8463-294828-0029-607: ref=['OUR', 'BAGGAGE', 'WAS', 'IMMEDIATELY', 'CARRIED', 'TO', 'THE', 'DECK', 'OF', 'THE', 'FRIGATE', 'I', 'RUSHED', 'ABOARD'] +8463-294828-0029-607: hyp=['OUR', 'BAGGAGE', 'WAS', 'IMMEDIATELY', 'CARRIED', 'TO', 'THE', 'DECK', 'OF', 'THE', 'FRIGATE', 'I', 'RUSHED', 'ABOARD'] +8463-294828-0030-608: ref=['I', 'ASKED', 'FOR', 'COMMANDER', 'FARRAGUT'] +8463-294828-0030-608: hyp=['I', 'ASKED', 'FOR', 'COMMANDER', 'FARRAGUT'] +8463-294828-0031-609: ref=['ONE', 'OF', 'THE', 'SAILORS', 'LED', 'ME', 'TO', 'THE', 'AFTERDECK', 'WHERE', 'I', 'STOOD', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'SMART', 'LOOKING', 'OFFICER', 'WHO', 'EXTENDED', 'HIS', 'HAND', 'TO', 'ME'] +8463-294828-0031-609: hyp=['ONE', 'OF', 'THE', 'SAILORS', 'LED', 'ME', 'TO', 'THE', 'AFTER', 'DECK', 'WHERE', 'I', 'STOOD', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'SMART', 'LOOKING', 'OFFICER', 'WHO', 'EXTENDED', 'HIS', 'HAND', 'TO', 'ME'] +8463-294828-0032-610: ref=['IN', 'PERSON', 'WELCOME', 'ABOARD', 'PROFESSOR', 'YOUR', 'CABIN', 'IS', 'WAITING', 'FOR', 'YOU'] +8463-294828-0032-610: hyp=['IN', 'PERSON', 'WELCOME', 'ABOARD', 'PROFESSOR', 'YOUR', 'CABIN', 'IS', 'WAITING', 'FOR', 'YOU'] +8463-294828-0033-611: ref=['I', 'WAS', 'WELL', 'SATISFIED', 'WITH', 'MY', 'CABIN', 'WHICH', 'WAS', 'LOCATED', 'IN', 'THE', 'STERN', 'AND', 'OPENED', 'INTO', 'THE', 'OFFICERS', 'MESS'] +8463-294828-0033-611: hyp=['I', 'WAS', 'WELL', 'SATISFIED', 'WITH', 'MY', 'CABIN', 'WHICH', 'WAS', 'LOCATED', 'IN', 'THE', 'STERN', 'AND', 'OPENED', 'INTO', 'THE', "OFFICER'S", 'MESS'] +8463-294828-0034-612: ref=["WE'LL", 'BE', 'QUITE', 'COMFORTABLE', 'HERE', 'I', 'TOLD', 'CONSEIL'] +8463-294828-0034-612: hyp=['WILL', 'BE', 'QUITE', 'COMFORTABLE', 'HERE', 'I', 'TOLD', 'CONSEIL'] +8463-294828-0035-613: ref=['AND', 'SO', 'IF', "I'D", 'BEEN', 'DELAYED', 'BY', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'OR', 'EVEN', 'LESS', 'THE', 'FRIGATE', 'WOULD', 'HAVE', 'GONE', 'WITHOUT', 'ME', 'AND', 'I', 'WOULD', 'HAVE', 'MISSED', 'OUT', 'ON', 'THIS', 'UNEARTHLY', 'EXTRAORDINARY', 'AND', 'INCONCEIVABLE', 'EXPEDITION', 'WHOSE', 'TRUE', 'STORY', 'MIGHT', 'WELL', 'MEET', 'WITH', 'SOME', 'SKEPTICISM'] +8463-294828-0035-613: hyp=['AND', 'SO', 'IF', 'I', 'HAD', 'BEEN', 'DELAYED', 'BY', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'OR', 'EVEN', 'LESS', 'THE', 'FRIGATE', 'WOULD', 'HAVE', 'GONE', 'WITHOUT', 'ME', 'AND', 'I', 'WOULD', 'HAVE', 'MISSED', 'OUT', 'ON', 'THIS', 'UNEARTHLY', 'EXTRAORDINARY', 'AND', 'INCONCEIVABLE', 'EXPEDITION', 'WHOSE', 'TRUE', 'STORY', 'MIGHT', 'WELL', 'MEET', 'WITH', 'SOME', 'SKEPTICISM'] +8463-294828-0036-614: ref=['THE', 'WHARVES', 'OF', 'BROOKLYN', 'AND', 'EVERY', 'PART', 'OF', 'NEW', 'YORK', 'BORDERING', 'THE', 'EAST', 'RIVER', 'WERE', 'CROWDED', 'WITH', 'CURIOSITY', 'SEEKERS'] +8463-294828-0036-614: hyp=['THE', 'WHARVES', 'OF', 'BROOKLYN', 'AND', 'EVERY', 'PART', 'OF', 'NEW', 'YORK', 'BORDERING', 'THE', 'EAST', 'RIVER', 'WERE', 'CROWDED', 'WITH', 'CURIOSITY', 'SEEKERS'] +8463-294828-0037-615: ref=['DEPARTING', 'FROM', 'FIVE', 'HUNDRED', 'THOUSAND', 'THROATS', 'THREE', 'CHEERS', 'BURST', 'FORTH', 'IN', 'SUCCESSION'] +8463-294828-0037-615: hyp=['DEPARTING', 'FROM', 'FIVE', 'HUNDRED', 'THOUSAND', 'THROATS', 'THREE', 'CHEERS', 'BURST', 'FORTH', 'IN', 'SUCCESSION'] +8463-294828-0038-616: ref=['THOUSANDS', 'OF', 'HANDKERCHIEFS', 'WERE', 'WAVING', 'ABOVE', 'THESE', 'TIGHTLY', 'PACKED', 'MASSES', 'HAILING', 'THE', 'ABRAHAM', 'LINCOLN', 'UNTIL', 'IT', 'REACHED', 'THE', 'WATERS', 'OF', 'THE', 'HUDSON', 'RIVER', 'AT', 'THE', 'TIP', 'OF', 'THE', 'LONG', 'PENINSULA', 'THAT', 'FORMS', 'NEW', 'YORK', 'CITY'] +8463-294828-0038-616: hyp=['THOUSANDS', 'OF', 'HANDKERCHIEFS', 'WERE', 'WAVING', 'ABOVE', 'THESE', 'TIGHTLY', 'PACKED', 'MASSES', 'HAILING', 'THE', 'ABRAHAM', 'LINCOLN', 'UNTIL', 'IT', 'REACHED', 'THE', 'WATERS', 'OF', 'THE', 'HUDSON', 'RIVER', 'AT', 'THE', 'TIP', 'OF', 'THE', 'LONG', 'PRONUNCILA', 'THAT', 'FORMS', 'NEW', 'YORK', 'CITY'] +8555-284447-0000-2299: ref=['THEN', 'HE', 'RUSHED', 'DOWN', 'STAIRS', 'INTO', 'THE', 'COURTYARD', 'SHOUTING', 'LOUDLY', 'FOR', 'HIS', 'SOLDIERS', 'AND', 'THREATENING', 'TO', 'PATCH', 'EVERYBODY', 'IN', 'HIS', 'DOMINIONS', 'IF', 'THE', 'SAILORMAN', 'WAS', 'NOT', 'RECAPTURED'] +8555-284447-0000-2299: hyp=['THEN', 'HE', 'RUSHED', 'DOWNSTAIRS', 'INTO', 'THE', 'COURTYARD', 'SHOUTING', 'LOUDLY', 'FOR', 'HIS', 'SOLDIERS', 'AND', 'THREATENING', 'TO', 'PATCH', 'EVERYBODY', 'IN', 'HIS', 'DOMINIONS', 'AT', 'THE', 'SAILORMAN', 'WAS', 'NOT', 'RECAPTURED'] +8555-284447-0001-2300: ref=['HOLD', 'HIM', 'FAST', 'MY', 'MEN', 'AND', 'AS', 'SOON', 'AS', "I'VE", 'HAD', 'MY', 'COFFEE', 'AND', 'OATMEAL', "I'LL", 'TAKE', 'HIM', 'TO', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'AND', 'PATCH', 'HIM'] +8555-284447-0001-2300: hyp=['HOLD', 'HIM', 'FAST', 'TO', 'MY', 'MEN', 'AND', 'AS', 'SOON', 'AS', "I'VE", 'HAD', 'MY', 'COFFEE', 'AN', 'OATMEAL', 'I', 'WILL', 'TAKE', 'HIM', 'TO', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'AND', 'PATCH', 'HIM'] +8555-284447-0002-2301: ref=['I', "WOULDN'T", 'MIND', 'A', 'CUP', 'O', 'COFFEE', 'MYSELF', 'SAID', "CAP'N", 'BILL', "I'VE", 'HAD', "CONSID'BLE", 'EXERCISE', 'THIS', 'MORNIN', 'AND', "I'M", 'ALL', 'READY', 'FOR', 'BREAKFAS'] +8555-284447-0002-2301: hyp=['I', "WOULDN'T", 'MIND', 'A', 'CUP', 'OF', 'COFFEE', 'MYSELF', 'SAID', "CAP'N", 'BILL', 'I', 'HAVE', 'HAD', 'CONSIDERABLE', 'EXERCISE', 'THIS', 'MORNING', 'AND', "I'M", 'ALREADY', 'FOR', 'BREAKFAST'] +8555-284447-0003-2302: ref=['BUT', "CAP'N", 'BILL', 'MADE', 'NO', 'SUCH', 'ATTEMPT', 'KNOWING', 'IT', 'WOULD', 'BE', 'USELESS'] +8555-284447-0003-2302: hyp=['BUT', "CAP'N", 'BILL', 'MADE', 'NO', 'SUCH', 'ATTEMPT', 'KNOWING', 'IT', 'WOULD', 'BE', 'USELESS'] +8555-284447-0004-2303: ref=['AS', 'SOON', 'AS', 'THEY', 'ENTERED', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'THE', 'BOOLOOROO', 'GAVE', 'A', 'YELL', 'OF', 'DISAPPOINTMENT'] +8555-284447-0004-2303: hyp=['AS', 'SOON', 'AS', 'THEY', 'ENTERED', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'THE', 'BOOLOOROO', 'GAVE', 'A', 'YELL', 'OF', 'DISAPPOINTMENT'] +8555-284447-0005-2304: ref=['THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'WAS', 'HIGH', 'AND', 'BIG', 'AND', 'AROUND', 'IT', 'RAN', 'ROWS', 'OF', 'BENCHES', 'FOR', 'THE', 'SPECTATORS', 'TO', 'SIT', 'UPON'] +8555-284447-0005-2304: hyp=['THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'WAS', 'HIGH', 'AND', 'BIG', 'AND', 'AROUND', 'IT', 'RAN', 'ROWS', 'OF', 'BENCHES', 'FOR', 'THE', 'SPECTATORS', 'TO', 'SIT', 'UPON'] +8555-284447-0006-2305: ref=['IN', 'ONE', 'PLACE', 'AT', 'THE', 'HEAD', 'OF', 'THE', 'ROOM', 'WAS', 'A', 'RAISED', 'PLATFORM', 'FOR', 'THE', 'ROYAL', 'FAMILY', 'WITH', 'ELEGANT', 'THRONE', 'CHAIRS', 'FOR', 'THE', 'KING', 'AND', 'QUEEN', 'AND', 'SIX', 'SMALLER', 'BUT', 'RICHLY', 'UPHOLSTERED', 'CHAIRS', 'FOR', 'THE', 'SNUBNOSED', 'PRINCESSES'] +8555-284447-0006-2305: hyp=['IN', 'ONE', 'PLACE', 'AT', 'THE', 'HEAD', 'OF', 'THE', 'ROOM', 'WAS', 'A', 'RAISED', 'PLATFORM', 'FOR', 'THE', 'ROYAL', 'FAMILY', 'WITH', 'ELEGANT', 'THRONE', 'CHAIRS', 'FOR', 'THE', 'KING', 'AND', 'QUEEN', 'AND', 'SIX', 'SMALLER', 'BUT', 'RICHLY', 'UPHOLSTERED', 'CHAIRS', 'FOR', 'THE', 'SNUBNOSED', 'PRINCESSES'] +8555-284447-0007-2306: ref=['THEREFORE', 'HER', 'MAJESTY', 'PAID', 'NO', 'ATTENTION', 'TO', 'ANYONE', 'AND', 'NO', 'ONE', 'PAID', 'ANY', 'ATTENTION', 'TO', 'HER'] +8555-284447-0007-2306: hyp=['THEREFORE', 'HER', 'MAJESTY', 'PAID', 'NO', 'ATTENTION', 'TO', 'ANY', 'ONE', 'AND', 'NO', 'ONE', 'PAID', 'ANY', 'ATTENTION', 'TO', 'HER'] +8555-284447-0008-2307: ref=['RICH', 'JEWELS', 'OF', 'BLUE', 'STONES', 'GLITTERED', 'UPON', 'THEIR', 'PERSONS', 'AND', 'THE', 'ROYAL', 'LADIES', 'WERE', 'FULLY', 'AS', 'GORGEOUS', 'AS', 'THEY', 'WERE', 'HAUGHTY', 'AND', 'OVERBEARING'] +8555-284447-0008-2307: hyp=['RICH', 'JEWELS', 'OF', 'BLUE', 'STONES', 'GLITTERED', 'UPON', 'THEIR', 'PERSONS', 'AND', 'THE', 'ROYAL', 'LADIES', 'WERE', 'FULLY', 'AS', 'CORGEOUS', 'AS', 'THEY', 'WERE', 'HALTING', 'AND', 'OVERBEARING'] +8555-284447-0009-2308: ref=['MORNIN', 'GIRLS', 'HOPE', 'YE', 'FEEL', 'AS', 'WELL', 'AS', 'YE', 'LOOK'] +8555-284447-0009-2308: hyp=['MORNING', 'GIRLS', 'O', 'BELL', 'AS', 'WELL', 'AS', 'YE', 'LOOK'] +8555-284447-0010-2309: ref=['CONTROL', 'YOURSELVES', 'MY', 'DEARS', 'REPLIED', 'THE', 'BOOLOOROO', 'THE', 'WORST', 'PUNISHMENT', 'I', 'KNOW', 'HOW', 'TO', 'INFLICT', 'ON', 'ANYONE', 'THIS', 'PRISONER', 'IS', 'ABOUT', 'TO', 'SUFFER', "YOU'LL", 'SEE', 'A', 'VERY', 'PRETTY', 'PATCHING', 'MY', 'ROYAL', 'DAUGHTERS'] +8555-284447-0010-2309: hyp=['CONTROL', 'YOURSELVES', 'MY', 'DEARS', 'REPLIED', 'THE', 'BOOLOOROO', 'THE', 'WORST', 'PUNISHMENT', 'I', 'KNOW', 'HOW', 'TO', 'INFLICT', 'ON', 'ANY', 'ONE', 'THIS', 'PRISONER', 'IS', 'ABOUT', 'TO', 'SUFFER', 'YOU', 'WILL', 'SEE', 'A', 'VERY', 'PRETTY', 'PATCHING', 'MY', 'ROYAL', 'DAUGHTERS'] +8555-284447-0011-2310: ref=['SUPPOSE', "IT'S", 'A', 'FRIEND'] +8555-284447-0011-2310: hyp=['SUPPOSE', "IT'S", 'OF', 'BRAND'] +8555-284447-0012-2311: ref=['THE', 'CAPTAIN', 'SHOOK', 'HIS', 'HEAD'] +8555-284447-0012-2311: hyp=['THE', 'CAPTAIN', 'SHOOK', 'HIS', 'HEAD'] +8555-284447-0013-2312: ref=['WHY', 'YOU', 'SAID', 'TO', 'FETCH', 'THE', 'FIRST', 'LIVING', 'CREATURE', 'WE', 'MET', 'AND', 'THAT', 'WAS', 'THIS', 'BILLYGOAT', 'REPLIED', 'THE', 'CAPTAIN', 'PANTING', 'HARD', 'AS', 'HE', 'HELD', 'FAST', 'TO', 'ONE', 'OF', 'THE', "GOAT'S", 'HORNS'] +8555-284447-0013-2312: hyp=['WHY', 'YOU', 'SIT', 'TO', 'VEGET', 'THE', 'FIRST', 'LIVING', 'CREATURE', 'WE', 'MET', 'AND', 'THAT', 'WAS', 'THE', 'SPILLY', 'GOAT', 'REPLIED', 'THE', 'CAPTAIN', 'PANTING', 'HARD', 'AS', 'HE', 'HELD', 'FAST', 'TO', 'ONE', 'OF', 'THE', "GOAT'S", 'HORNS'] +8555-284447-0014-2313: ref=['THE', 'IDEA', 'OF', 'PATCHING', "CAP'N", 'BILL', 'TO', 'A', 'GOAT', 'WAS', 'VASTLY', 'AMUSING', 'TO', 'HIM', 'AND', 'THE', 'MORE', 'HE', 'THOUGHT', 'OF', 'IT', 'THE', 'MORE', 'HE', 'ROARED', 'WITH', 'LAUGHTER'] +8555-284447-0014-2313: hyp=['THE', 'IDEA', 'OF', 'PATCHING', "CAP'N", 'BILL', 'TO', 'A', 'GOAT', 'WAS', 'VASTLY', 'AMUSING', 'TO', 'HIM', 'AND', 'THE', 'MORE', 'HE', 'THOUGHT', 'OF', 'IT', 'THE', 'MORE', 'HE', 'ROARED', 'WITH', 'LAUGHTER'] +8555-284447-0015-2314: ref=['THEY', 'LOOK', 'SOMETHING', 'ALIKE', 'YOU', 'KNOW', 'SUGGESTED', 'THE', 'CAPTAIN', 'OF', 'THE', 'GUARDS', 'LOOKING', 'FROM', 'ONE', 'TO', 'THE', 'OTHER', 'DOUBTFULLY', 'AND', "THEY'RE", 'NEARLY', 'THE', 'SAME', 'SIZE', 'IF', 'YOU', 'STAND', 'THE', 'GOAT', 'ON', 'HIS', 'HIND', 'LEGS', "THEY'VE", 'BOTH', 'GOT', 'THE', 'SAME', 'STYLE', 'OF', 'WHISKERS', 'AND', "THEY'RE", 'BOTH', 'OF', 'EM', 'OBSTINATE', 'AND', 'DANGEROUS', 'SO', 'THEY', 'OUGHT', 'TO', 'MAKE', 'A', 'GOOD', 'PATCH', 'SPLENDID'] +8555-284447-0015-2314: hyp=['THEY', 'LOOK', 'SOMETHING', 'ALIKE', 'YOU', 'KNOW', 'SUGGESTED', 'THE', 'CAPTAIN', 'OF', 'THE', 'GUARDS', 'LOOKING', 'FROM', 'ONE', 'TO', 'THE', 'OTHER', 'DOUBTFULLY', 'AND', "THEY'RE", 'NEARLY', 'THE', 'SAME', 'SIZE', 'IF', 'HE', 'STAND', 'A', 'BOAT', 'ON', 'HIS', 'HIND', 'LEGS', "THEY'VE", 'BOTH', 'GOT', 'THE', 'SAME', 'STYLE', 'OF', 'WHISKERS', 'AND', "THEY'RE", 'BOTH', 'OF', 'THEM', 'OBSTINATE', 'AND', 'DANGEROUS', 'SO', 'THEY', 'OUGHT', 'TO', 'MAKE', 'A', 'GOOD', 'PATCH', 'SPLENDID'] +8555-284447-0016-2315: ref=['FINE', 'GLORIOUS'] +8555-284447-0016-2315: hyp=['FINE', 'GLORIOUS'] +8555-284447-0017-2316: ref=['WHEN', 'THIS', 'HAD', 'BEEN', 'ACCOMPLISHED', 'THE', 'BOOLOOROO', 'LEANED', 'OVER', 'TO', 'TRY', 'TO', 'DISCOVER', 'WHY', 'THE', 'FRAME', 'ROLLED', 'AWAY', 'SEEMINGLY', 'OF', 'ITS', 'OWN', 'ACCORD', 'AND', 'HE', 'WAS', 'THE', 'MORE', 'PUZZLED', 'BECAUSE', 'IT', 'HAD', 'NEVER', 'DONE', 'SUCH', 'A', 'THING', 'BEFORE'] +8555-284447-0017-2316: hyp=['WHEN', 'THIS', 'HAD', 'BEEN', 'ACCOMPLISHED', 'THE', 'BOOLOOROO', 'LEANED', 'OVER', 'TO', 'TRY', 'TO', 'DISCOVER', 'WHY', 'THE', 'FRAME', 'ROLLED', 'AWAY', 'SEEMINGLY', 'OF', 'ITS', 'OWN', 'ACCORD', 'AND', 'HE', 'WAS', 'THE', 'MORE', 'PUZZLED', 'BECAUSE', 'IT', 'HAD', 'NEVER', 'DONE', 'SUCH', 'A', 'THING', 'BEFORE'] +8555-284447-0018-2317: ref=['AT', 'ONCE', 'THE', 'GOAT', 'GAVE', 'A', 'LEAP', 'ESCAPED', 'FROM', 'THE', 'SOLDIERS', 'AND', 'WITH', 'BOWED', 'HEAD', 'RUSHED', 'UPON', 'THE', 'BOOLOOROO'] +8555-284447-0018-2317: hyp=['AT', 'ONCE', 'THE', 'GOAT', 'GAVE', 'A', 'LEAP', 'ESCAPE', 'FROM', 'THE', 'SOLDIERS', 'AND', 'WITH', 'BOWED', 'HEAD', 'RUSHED', 'UPON', 'THE', 'BOOLOOROO'] +8555-284447-0019-2318: ref=['BEFORE', 'ANY', 'COULD', 'STOP', 'HIM', 'HE', 'BUTTED', 'HIS', 'MAJESTY', 'SO', 'FURIOUSLY', 'THAT', 'THE', 'KING', 'SOARED', 'FAR', 'INTO', 'THE', 'AIR', 'AND', 'TUMBLED', 'IN', 'A', 'HEAP', 'AMONG', 'THE', 'BENCHES', 'WHERE', 'HE', 'LAY', 'MOANING', 'AND', 'GROANING'] +8555-284447-0019-2318: hyp=['BEFORE', 'ANY', 'GOOD', 'STOP', 'HIM', 'HE', 'BUTTED', 'HIS', 'MAJESTY', 'SO', 'FURIOUSLY', 'THAT', 'THE', 'KING', 'SOARED', 'FAR', 'INTO', 'THE', 'AIR', 'AND', 'TUMBLED', 'IN', 'A', 'HEAP', 'AMONG', 'THE', 'BENCHES', 'WHERE', 'HE', 'LAY', 'MOANING', 'AND', 'GROANING'] +8555-284447-0020-2319: ref=['THE', "GOAT'S", 'WARLIKE', 'SPIRIT', 'WAS', 'ROUSED', 'BY', 'THIS', 'SUCCESSFUL', 'ATTACK'] +8555-284447-0020-2319: hyp=['THE', 'GOATS', 'WORE', 'LIKE', 'SPIRIT', 'WAS', 'ROUSED', 'BY', 'THIS', 'SUCCESSFUL', 'ATTACK'] +8555-284447-0021-2320: ref=['THEN', 'THEY', 'SPED', 'IN', 'GREAT', 'HASTE', 'FOR', 'THE', 'DOOR', 'AND', 'THE', 'GOAT', 'GAVE', 'A', 'FINAL', 'BUTT', 'THAT', 'SENT', 'THE', 'ROW', 'OF', 'ROYAL', 'LADIES', 'ALL', 'DIVING', 'INTO', 'THE', 'CORRIDOR', 'IN', 'ANOTHER', 'TANGLE', 'WHEREUPON', 'THEY', 'SHRIEKED', 'IN', 'A', 'MANNER', 'THAT', 'TERRIFIED', 'EVERYONE', 'WITHIN', 'SOUND', 'OF', 'THEIR', 'VOICES'] +8555-284447-0021-2320: hyp=['THEN', 'THEY', 'SPED', 'IN', 'GREAT', 'HASTE', 'FOR', 'THE', 'DOOR', 'AND', 'THE', 'GOAT', 'GAVE', 'A', 'FINAL', 'BUT', 'THAT', 'SENT', 'A', 'ROW', 'OF', 'ROYAL', 'LADIES', 'ALL', 'DIVING', 'INTO', 'THE', 'CORRIDOR', 'IN', 'ANOTHER', 'TANGLE', 'WHEREUPON', 'THEY', 'SHRIEKED', 'IN', 'A', 'MANNER', 'THAT', 'TERRIFIED', 'EVERYONE', 'WITHIN', 'SOUND', 'OF', 'THEIR', 'VOICES'] +8555-284447-0022-2321: ref=['I', 'HAD', 'A', 'NOTION', 'IT', 'WAS', 'YOU', 'MATE', 'AS', 'SAVED', 'ME', 'FROM', 'THE', 'KNIFE'] +8555-284447-0022-2321: hyp=['I', 'HAD', 'A', 'NOTION', 'IT', 'WAS', 'YOU', 'MADE', 'TO', 'SEE', 'ME', 'FROM', 'THE', 'KNIFE'] +8555-284447-0023-2322: ref=['I', "COULDN'T", 'SHIVER', 'MUCH', 'BEIN', 'BOUND', 'SO', 'TIGHT', 'BUT', 'WHEN', "I'M", 'LOOSE', 'I', 'MEAN', 'TO', 'HAVE', 'JUS', 'ONE', 'GOOD', 'SHIVER', 'TO', 'RELIEVE', 'MY', "FEELIN'S"] +8555-284447-0023-2322: hyp=['I', "COULDN'", 'SHIVER', 'MUCH', 'BEING', 'BOUND', 'SO', 'TIGHT', 'BUT', 'WHEN', "I'M", 'LOOSE', 'I', 'MEAN', 'TO', 'HAVE', 'JUST', 'SWUNG', 'GOOD', 'SHIVER', 'TO', 'RELIEVE', 'MY', 'FEELINS'] +8555-284447-0024-2323: ref=['COME', 'AND', 'GET', 'THE', 'BOOLOOROO', 'SHE', 'SAID', 'GOING', 'TOWARD', 'THE', 'BENCHES'] +8555-284447-0024-2323: hyp=['COME', 'AND', 'GET', 'THE', 'BOOLOOROO', 'SHE', 'SAID', 'GOING', 'TOWARD', 'THE', 'BENCHES'] +8555-284449-0000-2324: ref=['SO', 'THEY', 'WERE', 'QUITE', 'WILLING', 'TO', 'OBEY', 'THE', 'ORDERS', 'OF', 'THEIR', 'GIRL', 'QUEEN', 'AND', 'IN', 'A', 'SHORT', 'TIME', 'THE', 'BLASTS', 'OF', 'TRUMPETS', 'AND', 'ROLL', 'OF', 'DRUMS', 'AND', 'CLASHING', 'OF', 'CYMBALS', 'TOLD', 'TROT', 'AND', "CAP'N", 'BILL', 'THAT', 'THE', 'BLUE', 'BANDS', 'HAD', 'ASSEMBLED', 'BEFORE', 'THE', 'PALACE'] +8555-284449-0000-2324: hyp=['SO', 'THEY', 'WERE', 'QUITE', 'WILLING', 'TO', 'OBEY', 'THE', 'ORDERS', 'OF', 'THEIR', 'GIRL', 'QUEEN', 'AND', 'IN', 'A', 'SHORT', 'TIME', 'THE', 'BLAST', 'OF', 'TRUMPETS', 'AND', 'ROLL', 'OF', 'DRUMS', 'AND', 'CLASHING', 'OF', 'CYMBALS', 'TOLD', 'TROT', 'AND', "CAP'N", 'BILL', 'THAT', 'THE', 'BLUE', 'BANDS', 'HAD', 'A', 'SIMPLED', 'BEFORE', 'THE', 'PALACE'] +8555-284449-0001-2325: ref=['THEN', 'THEY', 'ALL', 'MARCHED', 'OUT', 'A', 'LITTLE', 'WAY', 'INTO', 'THE', 'FIELDS', 'AND', 'FOUND', 'THAT', 'THE', 'ARMY', 'OF', 'PINKIES', 'HAD', 'ALREADY', 'FORMED', 'AND', 'WAS', 'ADVANCING', 'STEADILY', 'TOWARD', 'THEM'] +8555-284449-0001-2325: hyp=['THEN', 'THEY', 'ALL', 'MARCHED', 'OUT', 'A', 'LITTLE', 'WAY', 'INTO', 'THE', 'FIELDS', 'AND', 'FOUND', 'THAT', 'THE', 'ARMY', 'OF', 'PINKIES', 'HAD', 'ALREADY', 'FORMED', 'AND', 'WAS', 'ADVANCING', 'STEADILY', 'TOWARD', 'THEM'] +8555-284449-0002-2326: ref=['AT', 'THE', 'HEAD', 'OF', 'THE', 'PINKIES', 'WERE', 'GHIP', 'GHISIZZLE', 'AND', 'BUTTON', 'BRIGHT', 'WHO', 'HAD', 'THE', 'PARROT', 'ON', 'HIS', 'SHOULDER', 'AND', 'THEY', 'WERE', 'SUPPORTED', 'BY', 'CAPTAIN', 'CORALIE', 'AND', 'CAPTAIN', 'TINTINT', 'AND', 'ROSALIE', 'THE', 'WITCH'] +8555-284449-0002-2326: hyp=['AT', 'THE', 'HEAD', 'OF', 'THE', 'PINKIES', 'WERE', 'GHIP', 'GHISIZZLE', 'AND', 'BUTTON', 'BRIGHT', 'WHO', 'HAD', 'THE', 'PARROT', 'ON', 'HIS', 'SHOULDER', 'AND', 'THEY', 'WERE', 'SUPPORTED', 'BY', 'CAPTAIN', 'CORLEY', 'AND', 'CAPTAIN', 'TINTANT', 'AND', 'ROSALIE', 'THE', 'WITCH'] +8555-284449-0003-2327: ref=['WHEN', 'THE', 'BLUESKINS', 'SAW', 'GHIP', 'GHISIZZLE', 'THEY', 'RAISED', 'ANOTHER', 'GREAT', 'SHOUT', 'FOR', 'HE', 'WAS', 'THE', 'FAVORITE', 'OF', 'THE', 'SOLDIERS', 'AND', 'VERY', 'POPULAR', 'WITH', 'ALL', 'THE', 'PEOPLE'] +8555-284449-0003-2327: hyp=['WHEN', 'THE', 'BLUESKIN', 'SAW', 'GHIP', 'GHISIZZLE', 'THEY', 'RAISED', 'ANOTHER', 'GREAT', 'SHOUT', 'FOR', 'HE', 'WAS', 'THE', 'FAVOURITE', 'OF', 'THE', 'SOLDIERS', 'AND', 'VERY', 'POPULAR', 'WITH', 'ALL', 'THE', 'PEOPLE'] +8555-284449-0004-2328: ref=['SINCE', 'LAST', 'THURSDAY', 'I', 'GHIP', 'GHISIZZLE', 'HAVE', 'BEEN', 'THE', 'LAWFUL', 'BOOLOOROO', 'OF', 'THE', 'BLUE', 'COUNTRY', 'BUT', 'NOW', 'THAT', 'YOU', 'ARE', 'CONQUERED', 'BY', 'QUEEN', 'TROT', 'I', 'SUPPOSE', 'I', 'AM', 'CONQUERED', 'TOO', 'AND', 'YOU', 'HAVE', 'NO', 'BOOLOOROO', 'AT', 'ALL'] +8555-284449-0004-2328: hyp=['SINCE', 'LAST', 'THURSDAY', 'I', 'GHISIZZLE', 'HAVE', 'BEEN', 'THE', 'LAWFUL', 'BOOLOOROO', 'OF', 'THE', 'BLUE', 'COUNTRY', 'BUT', 'NOW', 'THAT', 'YOU', 'ARE', 'CONQUERED', 'BY', 'QUEEN', 'TROT', 'I', 'SUPPOSE', 'I', 'AM', 'CONQUERED', 'TOO', 'AND', 'YOU', 'HAVE', 'NO', 'BOOLOOROO', 'AT', 'ALL'] +8555-284449-0005-2329: ref=['WHEN', 'HE', 'FINISHED', 'SHE', 'SAID', 'CHEERFULLY'] +8555-284449-0005-2329: hyp=['WHEN', 'HE', 'FINISHED', 'SHE', 'SAID', 'CHEERFULLY'] +8555-284449-0006-2330: ref=["DON'T", 'WORRY', 'SIZZLE', 'DEAR', "IT'LL", 'ALL', 'COME', 'RIGHT', 'PRETTY', 'SOON'] +8555-284449-0006-2330: hyp=["DON'T", 'WORRY', 'SIZZLE', 'DEAR', 'IT', 'ALL', 'COME', 'RIGHT', 'PRETTY', 'SOON'] +8555-284449-0007-2331: ref=['NOW', 'THEN', "LET'S", 'ENTER', 'THE', 'CITY', 'AN', 'ENJOY', 'THE', 'GRAND', 'FEAST', "THAT'S", 'BEING', 'COOKED', "I'M", 'NEARLY', 'STARVED', 'MYSELF', 'FOR', 'THIS', 'CONQUERIN', 'KINGDOMS', 'IS', 'HARD', 'WORK'] +8555-284449-0007-2331: hyp=['NOW', 'THEN', "LET'S", 'ENTER', 'THE', 'CITY', 'AND', 'ENJOY', 'THE', 'GREAT', 'FEAST', 'ITS', 'BEING', 'COOKED', "I'M", 'NEARLY', 'STARVED', 'MYSELF', 'FOR', 'THIS', 'CONQUERING', "KINGDOM'S", 'IS', 'HARD', 'WORK'] +8555-284449-0008-2332: ref=['THEN', 'SHE', 'GAVE', 'ROSALIE', 'BACK', 'HER', 'MAGIC', 'RING', 'THANKING', 'THE', 'KIND', 'WITCH', 'FOR', 'ALL', 'SHE', 'HAD', 'DONE', 'FOR', 'THEM'] +8555-284449-0008-2332: hyp=['THEN', 'SHE', 'GAVE', 'ROSALIE', 'BACK', 'HER', 'MAGIC', 'RING', 'THANKING', 'THE', 'KIND', 'WHICH', 'FOR', 'ALL', 'SHE', 'HAD', 'DONE', 'FOR', 'THEM'] +8555-284449-0009-2333: ref=['YOU', 'ARE', 'MATE', 'REPLIED', 'THE', 'SAILOR'] +8555-284449-0009-2333: hyp=['YOU', 'ARE', 'MATE', 'REPLIED', 'THE', 'SAILOR'] +8555-284449-0010-2334: ref=['IT', 'WILL', 'BE', 'SUCH', 'A', 'SATISFACTION'] +8555-284449-0010-2334: hyp=['IT', 'WILL', 'BE', 'SUCH', 'A', 'SATISFACTION'] +8555-284449-0011-2335: ref=['THE', 'GUARDS', 'HAD', 'A', 'TERRIBLE', 'STRUGGLE', 'WITH', 'THE', 'GOAT', 'WHICH', 'WAS', 'LOOSE', 'IN', 'THE', 'ROOM', 'AND', 'STILL', 'WANTED', 'TO', 'FIGHT', 'BUT', 'FINALLY', 'THEY', 'SUBDUED', 'THE', 'ANIMAL', 'AND', 'THEN', 'THEY', 'TOOK', 'THE', 'BOOLOOROO', 'OUT', 'OF', 'THE', 'FRAME', 'HE', 'WAS', 'TIED', 'IN', 'AND', 'BROUGHT', 'BOTH', 'HIM', 'AND', 'THE', 'GOAT', 'BEFORE', 'QUEEN', 'TROT', 'WHO', 'AWAITED', 'THEM', 'IN', 'THE', 'THRONE', 'ROOM', 'OF', 'THE', 'PALACE'] +8555-284449-0011-2335: hyp=['THE', 'GUARDS', 'HAD', 'A', 'TERRIBLE', 'STRUGGLE', 'WITH', 'THE', 'GOAT', 'WHICH', 'WAS', 'LOOSE', 'IN', 'THE', 'ROOM', 'AND', 'STILL', 'WANTED', 'TO', 'FIGHT', 'BUT', 'FINALLY', 'THEY', 'SUBDUED', 'THE', 'ANIMAL', 'AND', 'THEN', 'THEY', 'TOOK', 'THE', 'BOOLOOROO', 'OUT', 'OF', 'THE', 'FRAME', 'WHOSE', 'TIED', 'IN', 'AND', 'BROUGHT', 'BOTH', 'HIM', 'AND', 'THE', 'GOAT', 'BEFORE', 'QUEEN', 'TROT', 'WHO', 'AWAITED', 'THEM', 'IN', 'THE', 'THRONE', 'ROOM', 'OF', 'THE', 'PALACE'] +8555-284449-0012-2336: ref=["I'LL", 'GLADLY', 'DO', 'THAT', 'PROMISED', 'THE', 'NEW', 'BOOLOOROO', 'AND', "I'LL", 'FEED', 'THE', 'HONORABLE', 'GOAT', 'ALL', 'THE', 'SHAVINGS', 'AND', 'LEATHER', 'AND', 'TIN', 'CANS', 'HE', 'CAN', 'EAT', 'BESIDES', 'THE', 'GRASS'] +8555-284449-0012-2336: hyp=['I', 'WILL', 'GLADLY', 'DO', 'THAT', 'PROMISED', 'THE', 'NEW', 'BOOLOOROO', 'AND', "I'LL", 'FEED', 'THE', 'HON', 'GO', 'TO', 'ALL', 'THE', 'SHAVINGS', 'AND', 'LEATHER', 'AND', 'TIN', 'CANS', 'HE', 'CAN', 'EAT', 'BESIDES', 'THE', 'GRASS'] +8555-284449-0013-2337: ref=['SCUSE', 'ME', 'SAID', 'TROT', 'I', 'NEGLECTED', 'TO', 'TELL', 'YOU', 'THAT', "YOU'RE", 'NOT', 'THE', 'BOOLOOROO', 'ANY', 'MORE'] +8555-284449-0013-2337: hyp=['EXCUSE', 'ME', 'SAID', 'TROT', 'I', 'NEGLECTED', 'TO', 'TELL', 'YOU', 'THAT', "YOU'RE", 'NOT', 'THE', 'BOOLOOROO', 'ANY', 'MORE'] +8555-284449-0014-2338: ref=['THE', 'FORMER', 'BOOLOOROO', 'GROANED'] +8555-284449-0014-2338: hyp=['THE', 'FORMER', 'BOOLOOROO', 'GROANED'] +8555-284449-0015-2339: ref=["I'LL", 'NOT', 'BE', 'WICKED', 'ANY', 'MORE', 'SIGHED', 'THE', 'OLD', 'BOOLOOROO', "I'LL", 'REFORM'] +8555-284449-0015-2339: hyp=["I'LL", 'NOW', 'BE', 'WICKED', 'ANY', 'MORE', 'SIGHED', 'THE', 'OLD', 'BOOLOOROO', "I'LL", 'REFORM'] +8555-284449-0016-2340: ref=['AS', 'A', 'PRIVATE', 'CITIZEN', 'I', 'SHALL', 'BE', 'A', 'MODEL', 'OF', 'DEPORTMENT', 'BECAUSE', 'IT', 'WOULD', 'BE', 'DANGEROUS', 'TO', 'BE', 'OTHERWISE'] +8555-284449-0016-2340: hyp=['AS', 'A', 'PRIVATE', 'CITIZEN', 'I', 'SHALL', 'BE', 'A', 'MODEL', 'OF', 'DEPORTMENT', 'BECAUSE', 'IT', 'WOULD', 'BE', 'DANGEROUS', 'TO', 'BE', 'OTHERWISE'] +8555-284449-0017-2341: ref=['WHEN', 'FIRST', 'THEY', 'ENTERED', 'THE', 'THRONE', 'ROOM', 'THEY', 'TRIED', 'TO', 'BE', 'AS', 'HAUGHTY', 'AND', 'SCORNFUL', 'AS', 'EVER', 'BUT', 'THE', 'BLUES', 'WHO', 'WERE', 'ASSEMBLED', 'THERE', 'ALL', 'LAUGHED', 'AT', 'THEM', 'AND', 'JEERED', 'THEM', 'FOR', 'THERE', 'WAS', 'NOT', 'A', 'SINGLE', 'PERSON', 'IN', 'ALL', 'THE', 'BLUE', 'COUNTRY', 'WHO', 'LOVED', 'THE', 'PRINCESSES', 'THE', 'LEAST', 'LITTLE', 'BIT'] +8555-284449-0017-2341: hyp=['WHEN', 'FIRST', 'THEY', 'ENTERED', 'THE', 'THRONE', 'ROOM', 'THEY', 'TRIED', 'TO', 'BE', 'AS', 'HAUGHTY', 'AND', 'SCORNFUL', 'AS', 'EVER', 'BUT', 'THE', 'BLUES', 'WHO', 'WERE', 'ASSEMBLED', 'THERE', 'ALL', 'LAUGHED', 'AT', 'THEM', 'AND', 'JEERED', 'THEM', 'FOR', 'THERE', 'WAS', 'NOT', 'A', 'SINGLE', 'PERSON', 'IN', 'ALL', 'THE', 'BLUE', 'COUNTRY', 'WHO', 'LOVED', 'THE', 'PRINCESSES', 'THE', 'LEAST', 'LITTLE', 'BIT'] +8555-284449-0018-2342: ref=['SO', 'GHIP', 'GHISIZZLE', 'ORDERED', 'THE', 'CAPTAIN', 'TO', 'TAKE', 'A', 'FILE', 'OF', 'SOLDIERS', 'AND', 'ESCORT', 'THE', 'RAVING', 'BEAUTIES', 'TO', 'THEIR', 'NEW', 'HOME'] +8555-284449-0018-2342: hyp=['SO', 'GHIP', 'GHISIZZLE', 'ORDERED', 'THE', 'CAPTAIN', 'TO', 'TAKE', 'A', 'FILE', 'OF', 'SOLDIERS', 'AND', 'ESCORT', 'THE', 'RAVING', 'BEAUTIES', 'TO', 'THEIR', 'NEW', 'HOME'] +8555-284449-0019-2343: ref=['THAT', 'EVENING', 'TROT', 'GAVE', 'A', 'GRAND', 'BALL', 'IN', 'THE', 'PALACE', 'TO', 'WHICH', 'THE', 'MOST', 'IMPORTANT', 'OF', 'THE', 'PINKIES', 'AND', 'THE', 'BLUESKINS', 'WERE', 'INVITED'] +8555-284449-0019-2343: hyp=['THAT', 'EVENING', 'TROT', 'GAVE', 'A', 'GRAND', 'BALL', 'IN', 'THE', 'PALACE', 'TO', 'WHICH', 'THE', 'MOST', 'IMPORTANT', 'OF', 'THE', 'PINKIES', 'IN', 'THE', 'BLUESKINS', 'WERE', 'INVITED'] +8555-284449-0020-2344: ref=['THE', 'COMBINED', 'BANDS', 'OF', 'BOTH', 'THE', 'COUNTRIES', 'PLAYED', 'THE', 'MUSIC', 'AND', 'A', 'FINE', 'SUPPER', 'WAS', 'SERVED'] +8555-284449-0020-2344: hyp=['THE', 'COMBINED', 'BANDS', 'OF', 'BOTH', 'THE', 'COUNTRIES', 'PLAYED', 'THE', 'MUSIC', 'AND', 'A', 'FINE', 'SUPPER', 'WAS', 'SERVED'] +8555-292519-0000-2283: ref=['BRIGHTER', 'THAN', 'EARLY', "DAWN'S", 'MOST', 'BRILLIANT', 'DYE', 'ARE', 'BLOWN', 'CLEAR', 'BANDS', 'OF', 'COLOR', 'THROUGH', 'THE', 'SKY', 'THAT', 'SWIRL', 'AND', 'SWEEP', 'AND', 'MEET', 'TO', 'BREAK', 'AND', 'FOAM', 'LIKE', 'RAINBOW', 'VEILS', 'UPON', 'A', "BUBBLE'S", 'DOME'] +8555-292519-0000-2283: hyp=['BRIGHTER', 'THAN', 'EARLY', 'DAWNS', 'MOST', 'BRILLIANT', 'DYE', 'ARE', 'BLOWN', 'CLEAR', 'BANDS', 'OF', 'COLOUR', 'THROUGH', 'THE', 'SKY', 'THAT', 'SWIRL', 'AND', 'SWEEP', 'AND', 'MEET', 'TO', 'BREAK', 'AND', 'FOAM', 'LIKE', 'RAINBOW', 'VEILS', 'UPON', 'A', "BUBBLE'S", 'DOME'] +8555-292519-0001-2284: ref=['GUIDED', 'BY', 'YOU', 'HOW', 'WE', 'MIGHT', 'STROLL', 'TOWARDS', 'DEATH', 'OUR', 'ONLY', 'MUSIC', 'ONE', "ANOTHER'S", 'BREATH', 'THROUGH', 'GARDENS', 'INTIMATE', 'WITH', 'HOLLYHOCKS', 'WHERE', 'SILENT', 'POPPIES', 'BURN', 'BETWEEN', 'THE', 'ROCKS', 'BY', 'POOLS', 'WHERE', 'BIRCHES', 'BEND', 'TO', 'CONFIDANTS', 'ABOVE', 'GREEN', 'WATERS', 'SCUMMED', 'WITH', 'LILY', 'PLANTS'] +8555-292519-0001-2284: hyp=['GUIDED', 'BY', 'YOU', 'HOW', 'WE', 'MIGHT', 'STROLL', 'TOWARDS', 'DEATH', 'OUR', 'ONLY', 'MUSIC', 'ONE', "ANOTHER'S", 'BREATH', 'THROUGH', 'GARDENS', 'INTIMATE', 'WITH', 'HOLLYHOCKS', 'WHERE', 'SILENT', 'POPPIES', 'BURN', 'BETWEEN', 'THE', 'ROCKS', 'BY', 'POOLS', 'WHERE', 'BIRCHES', 'BEND', 'TO', 'CONFIDANTS', 'ABOVE', 'GREEN', 'WATERS', 'SCUMBED', 'WITH', 'A', 'LILY', 'PLANTS'] +8555-292519-0002-2285: ref=['VENICE'] +8555-292519-0002-2285: hyp=['VENICE'] +8555-292519-0003-2286: ref=['IN', 'A', 'SUNSET', 'GLOWING', 'OF', 'CRIMSON', 'AND', 'GOLD', 'SHE', 'LIES', 'THE', 'GLORY', 'OF', 'THE', 'WORLD', 'A', 'BEACHED', "KING'S", 'GALLEY', 'WHOSE', 'SAILS', 'ARE', 'FURLED', 'WHO', 'IS', 'HUNG', 'WITH', 'TAPESTRIES', 'RICH', 'AND', 'OLD'] +8555-292519-0003-2286: hyp=['IN', 'A', 'SUNSET', 'GLOWING', 'OF', 'CRIMSON', 'AND', 'GOLD', 'SHE', 'LIES', 'THE', 'GLORY', 'OF', 'THE', 'WORLD', 'A', 'BEECHED', "KING'S", 'GALLEY', 'WHO', 'SAILS', 'ARE', 'FURLED', 'WHO', 'IS', 'HUNG', 'WITH', 'TAPESTRIES', 'RICH', 'AND', 'OLD'] +8555-292519-0004-2287: ref=['THE', 'PITY', 'THAT', 'WE', 'MUST', 'COME', 'AND', 'GO'] +8555-292519-0004-2287: hyp=['THE', 'PITY', 'THAT', 'WE', 'MUST', 'COME', 'AND', 'GO'] +8555-292519-0005-2288: ref=['WHILE', 'THE', 'OLD', 'GOLD', 'AND', 'THE', 'MARBLE', 'STAYS', 'FOREVER', 'GLEAMING', 'ITS', 'SOFT', 'STRONG', 'BLAZE', 'CALM', 'IN', 'THE', 'EARLY', 'EVENING', 'GLOW'] +8555-292519-0005-2288: hyp=['WHILE', 'THE', 'OLD', 'GOLD', 'AND', 'THE', 'MARBLE', 'STAYS', 'FOR', 'EVER', 'GLEAMING', 'ITS', 'SOFT', 'STRONG', 'BLAZE', 'CALM', 'IN', 'THE', 'EARLY', 'EVENING', 'GLOW'] +8555-292519-0006-2289: ref=['THE', 'PLEASANT', 'GRAVEYARD', 'OF', 'MY', 'SOUL', 'WITH', 'SENTIMENTAL', 'CYPRESS', 'TREES', 'AND', 'FLOWERS', 'IS', 'FILLED', 'THAT', 'I', 'MAY', 'STROLL', 'IN', 'MEDITATION', 'AT', 'MY', 'EASE'] +8555-292519-0006-2289: hyp=['THE', 'PLEASANT', 'GRAVEYARD', 'OF', 'MY', 'SOUL', 'WITH', 'SENTIMENTAL', 'CYPRESS', 'TREES', 'AND', 'FLOWERS', 'IS', 'FILLED', 'THAT', 'I', 'MAY', 'STROLL', 'IN', 'MEDITATION', 'AT', 'MY', 'EASE'] +8555-292519-0007-2290: ref=['IT', 'IS', 'MY', 'HEART', 'HUNG', 'IN', 'THE', 'SKY', 'AND', 'NO', 'CLOUDS', 'EVER', 'FLOAT', 'BETWEEN', 'THE', 'GRAVE', 'FLOWERS', 'AND', 'MY', 'HEART', 'ON', 'HIGH'] +8555-292519-0007-2290: hyp=['IT', 'IS', 'MY', 'HEART', 'HUNG', 'IN', 'THE', 'SKY', 'AND', 'NO', 'CLOUDS', 'EVER', 'FLOAT', 'BETWEEN', 'THE', 'GRAY', 'FLOWERS', 'AND', 'MY', 'HEART', 'ON', 'HIGH'] +8555-292519-0008-2291: ref=['OVER', 'THE', 'TRACK', 'LINED', 'CITY', 'STREET', 'THE', 'YOUNG', 'MEN', 'THE', 'GRINNING', 'MEN', 'PASS'] +8555-292519-0008-2291: hyp=['OVER', 'THE', 'TRACK', 'LINED', 'CITY', 'STREET', 'THE', 'YOUNG', 'MAN', 'THE', 'GRINNING', 'MEN', 'PASS'] +8555-292519-0009-2292: ref=['HO', 'YE', 'SAILS', 'THAT', 'SEEM', 'TO', 'WANDER', 'IN', 'DREAM', 'FILLED', 'MEADOWS', 'SAY', 'IS', 'THE', 'SHORE', 'WHERE', 'I', 'STAND', 'THE', 'ONLY', 'FIELD', 'OF', 'STRUGGLE', 'OR', 'ARE', 'YE', 'HIT', 'AND', 'BATTERED', 'OUT', 'THERE', 'BY', 'WAVES', 'AND', 'WIND', 'GUSTS', 'AS', 'YE', 'TACK', 'OVER', 'A', 'CLASHING', 'SEA', 'OF', 'WATERY', 'ECHOES'] +8555-292519-0009-2292: hyp=['HOME', 'YE', 'SAILS', 'THAT', 'SEEM', 'TO', 'WONDER', 'AND', 'DREAM', 'FILLED', 'MEADOWS', 'SAY', 'IS', 'THE', 'SHORE', 'WHERE', 'I', 'STAND', 'THE', 'ONLY', 'FIELD', 'OF', 'STRUGGLE', 'OR', 'ARE', 'YE', 'HIT', 'AND', 'BATTERED', 'OUT', 'THERE', 'BY', 'WAVES', 'AND', 'WIND', 'GUSTS', 'AS', 'YE', 'TACK', 'OVER', 'A', 'CLASHING', 'SEA', 'OF', 'WATERY', 'ECHOES'] +8555-292519-0010-2293: ref=['OLD', 'DANCES', 'ARE', 'SIMPLIFIED', 'OF', 'THEIR', 'YEARNING', 'BLEACHED', 'BY', 'TIME'] +8555-292519-0010-2293: hyp=['OLD', 'DANCES', 'ARE', 'SIMPLIFIED', 'OF', 'THEIR', 'YEARNING', 'BLEACHED', 'BY', 'TIME'] +8555-292519-0011-2294: ref=['HE', 'HAD', 'GOT', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0011-2294: hyp=['HE', 'HAD', 'GOT', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0012-2295: ref=['THROUGH', 'THE', 'BLACK', 'NIGHT', 'RAIN', 'HE', 'SANG', 'TO', 'HER', 'WINDOW', 'BARS'] +8555-292519-0012-2295: hyp=['THROUGH', 'THE', 'BLACK', 'NIGHT', 'RAIN', 'HE', 'SANG', 'TO', 'HER', 'WINDOW', 'BARS'] +8555-292519-0013-2296: ref=['THAT', 'WAS', 'BUT', 'RUSTLING', 'OF', 'DRIPPING', 'PLANTS', 'IN', 'THE', 'DARK'] +8555-292519-0013-2296: hyp=['THAT', 'WAS', 'BUT', 'RUSTLING', 'OF', 'TRIPPING', 'PLANTS', 'IN', 'THE', 'DARK'] +8555-292519-0014-2297: ref=['SHE', 'WAS', 'ALONE', 'THAT', 'NIGHT'] +8555-292519-0014-2297: hyp=['SHE', 'WAS', 'ALONE', 'THAT', 'NIGHT'] +8555-292519-0015-2298: ref=['HE', 'HAD', 'BROKEN', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0015-2298: hyp=['HE', 'HAD', 'BROKEN', 'INTO', 'HER', 'COURTYARD'] +908-157963-0000-1321: ref=['TO', 'FADE', 'AWAY', 'LIKE', 'MORNING', 'BEAUTY', 'FROM', 'HER', 'MORTAL', 'DAY', 'DOWN', 'BY', 'THE', 'RIVER', 'OF', 'ADONA', 'HER', 'SOFT', 'VOICE', 'IS', 'HEARD', 'AND', 'THUS', 'HER', 'GENTLE', 'LAMENTATION', 'FALLS', 'LIKE', 'MORNING', 'DEW'] +908-157963-0000-1321: hyp=['TO', 'FADE', 'AWAY', 'LIKE', 'MORNING', 'BEAUTY', 'FROM', 'HER', 'MORTAL', 'DAY', 'DOWN', 'BY', 'THE', 'RIVER', 'OF', 'ADONNA', 'HER', 'SOFT', 'VOICES', 'HEARD', 'AND', 'THUS', 'HER', 'GENTLE', 'LAMENTATION', 'FALLS', 'LIKE', 'MORNING', 'DEW'] +908-157963-0001-1322: ref=['O', 'LIFE', 'OF', 'THIS', 'OUR', 'SPRING'] +908-157963-0001-1322: hyp=['O', 'LIFE', 'OF', 'THIS', 'OUR', 'SPRING'] +908-157963-0002-1323: ref=['WHY', 'FADES', 'THE', 'LOTUS', 'OF', 'THE', 'WATER'] +908-157963-0002-1323: hyp=['WHY', 'FADES', 'THE', 'LOTUS', 'OF', 'THE', 'WATER'] +908-157963-0003-1324: ref=['WHY', 'FADE', 'THESE', 'CHILDREN', 'OF', 'THE', 'SPRING'] +908-157963-0003-1324: hyp=['WHY', 'FADE', 'THESE', 'CHILDREN', 'OF', 'THE', 'SPRING'] +908-157963-0004-1325: ref=['THEL', 'IS', 'LIKE', 'A', 'WATRY', 'BOW', 'AND', 'LIKE', 'A', 'PARTING', 'CLOUD', 'LIKE', 'A', 'REFLECTION', 'IN', 'A', 'GLASS', 'LIKE', 'SHADOWS', 'IN', 'THE', 'WATER', 'LIKE', 'DREAMS', 'OF', 'INFANTS', 'LIKE', 'A', 'SMILE', 'UPON', 'AN', 'INFANTS', 'FACE'] +908-157963-0004-1325: hyp=['FELL', 'IS', 'LIKE', 'A', 'WATERY', 'BOW', 'AND', 'LIKE', 'A', 'PARTING', 'CLOUD', 'LIKE', 'A', 'REFLECTION', 'IN', 'A', 'GLASS', 'LIKE', 'SHADOWS', 'IN', 'THE', 'WATER', 'LIKE', 'DREAMS', 'OF', 'INFANTS', 'LIKE', 'A', 'SMILE', 'UPON', 'AN', "INFANT'S", 'FACE'] +908-157963-0005-1326: ref=['LIKE', 'THE', 'DOVES', 'VOICE', 'LIKE', 'TRANSIENT', 'DAY', 'LIKE', 'MUSIC', 'IN', 'THE', 'AIR', 'AH'] +908-157963-0005-1326: hyp=['LIKE', 'THE', 'DOVES', 'BOYS', 'LIKE', 'TRANSIENT', 'DAY', 'LIKE', 'MUSIC', 'IN', 'THE', 'AIR', 'AH'] +908-157963-0006-1327: ref=['AND', 'GENTLE', 'SLEEP', 'THE', 'SLEEP', 'OF', 'DEATH', 'AND', 'GENTLY', 'HEAR', 'THE', 'VOICE', 'OF', 'HIM', 'THAT', 'WALKETH', 'IN', 'THE', 'GARDEN', 'IN', 'THE', 'EVENING', 'TIME'] +908-157963-0006-1327: hyp=['AND', 'GENTLE', 'SLEEP', 'THE', 'SLEEP', 'OF', 'DEATH', 'AND', 'GENTLY', 'HEAR', 'THE', 'VOICE', 'OF', 'HIM', 'THAT', 'WALKETH', 'IN', 'THE', 'GARDEN', 'IN', 'THE', 'EVENING', 'TIME'] +908-157963-0007-1328: ref=['THE', 'LILLY', 'OF', 'THE', 'VALLEY', 'BREATHING', 'IN', 'THE', 'HUMBLE', 'GRASS', 'ANSWERD', 'THE', 'LOVELY', 'MAID', 'AND', 'SAID', 'I', 'AM', 'A', 'WATRY', 'WEED', 'AND', 'I', 'AM', 'VERY', 'SMALL', 'AND', 'LOVE', 'TO', 'DWELL', 'IN', 'LOWLY', 'VALES', 'SO', 'WEAK', 'THE', 'GILDED', 'BUTTERFLY', 'SCARCE', 'PERCHES', 'ON', 'MY', 'HEAD', 'YET', 'I', 'AM', 'VISITED', 'FROM', 'HEAVEN', 'AND', 'HE', 'THAT', 'SMILES', 'ON', 'ALL', 'WALKS', 'IN', 'THE', 'VALLEY', 'AND', 'EACH', 'MORN', 'OVER', 'ME', 'SPREADS', 'HIS', 'HAND', 'SAYING', 'REJOICE', 'THOU', 'HUMBLE', 'GRASS', 'THOU', 'NEW', 'BORN', 'LILY', 'FLOWER'] +908-157963-0007-1328: hyp=['THE', 'LILY', 'OF', 'THE', 'VALLEY', 'BREATHING', 'IN', 'THE', 'HUMBLE', 'GRASS', 'ANSWERED', 'THE', 'LOVELY', 'MAIDEN', 'SAID', 'I', 'AM', 'A', 'WATCHERY', 'WEED', 'AND', 'I', 'AM', 'VERY', 'SMALL', 'AND', 'LOVE', 'TO', 'DWELL', 'IN', 'LOWLY', 'VALES', 'SO', 'WEAK', 'THE', 'GILDED', 'BUTTERFLY', 'SCARCE', 'PURCHASE', 'ON', 'MY', 'HEAD', 'YET', 'I', 'AM', 'VISITED', 'FROM', 'HEAVEN', 'AND', 'HE', 'THAT', 'SMILES', 'ON', 'ALL', 'WALKS', 'IN', 'THE', 'VALLEY', 'AND', 'EACH', 'MORN', 'OVER', 'ME', 'SPREADS', 'HIS', 'HAND', 'SAYING', 'REJOICE', 'THOU', 'HUMBLE', 'GRASS', 'THOU', 'NEWBORN', 'LILY', 'FLOWER'] +908-157963-0008-1329: ref=['THOU', 'GENTLE', 'MAID', 'OF', 'SILENT', 'VALLEYS', 'AND', 'OF', 'MODEST', 'BROOKS', 'FOR', 'THOU', 'SHALL', 'BE', 'CLOTHED', 'IN', 'LIGHT', 'AND', 'FED', 'WITH', 'MORNING', 'MANNA', 'TILL', 'SUMMERS', 'HEAT', 'MELTS', 'THEE', 'BESIDE', 'THE', 'FOUNTAINS', 'AND', 'THE', 'SPRINGS', 'TO', 'FLOURISH', 'IN', 'ETERNAL', 'VALES', 'THEY', 'WHY', 'SHOULD', 'THEL', 'COMPLAIN'] +908-157963-0008-1329: hyp=['THOU', 'GENTLE', 'MAID', 'OF', 'SILENT', 'VALLEYS', 'AND', 'OF', 'MODEST', 'BROOKS', 'FOR', 'THOU', 'SHALT', 'BE', 'CLOTHED', 'IN', 'LIGHT', 'AND', 'FED', 'WITH', 'MORNING', 'MANA', 'TILL', "SUMMER'S", 'HEAT', 'MELTS', 'THEE', 'BESIDE', 'THE', 'FOUNTAINS', 'AND', 'THE', 'SPRINGS', 'TO', 'FLOURISH', 'IN', 'ETERNAL', 'VALES', 'THEY', 'WHY', 'SHOULD', 'THOU', 'COMPLAIN'] +908-157963-0009-1330: ref=['WHY', 'SHOULD', 'THE', 'MISTRESS', 'OF', 'THE', 'VALES', 'OF', 'HAR', 'UTTER', 'A', 'SIGH'] +908-157963-0009-1330: hyp=['WHY', 'SHOULD', 'THE', 'MISTRESS', 'OF', 'THE', 'VEILS', 'OF', 'HAR', 'UTTER', 'A', 'SIGH'] +908-157963-0010-1331: ref=['SHE', 'CEASD', 'AND', 'SMILD', 'IN', 'TEARS', 'THEN', 'SAT', 'DOWN', 'IN', 'HER', 'SILVER', 'SHRINE'] +908-157963-0010-1331: hyp=['SHE', 'CEASED', 'AND', 'SMILED', 'IN', 'TEARS', 'THEN', 'SAT', 'DOWN', 'IN', 'HER', 'SILVER', 'SHRINE'] +908-157963-0011-1332: ref=['WHICH', 'THOU', 'DOST', 'SCATTER', 'ON', 'EVERY', 'LITTLE', 'BLADE', 'OF', 'GRASS', 'THAT', 'SPRINGS', 'REVIVES', 'THE', 'MILKED', 'COW', 'AND', 'TAMES', 'THE', 'FIRE', 'BREATHING', 'STEED'] +908-157963-0011-1332: hyp=['WHICH', 'THOU', 'DOST', 'SCATTER', 'ON', 'EVERY', 'LITTLE', 'BLADE', 'OF', 'GRASS', 'THAT', 'SPRINGS', 'REVIVES', 'THE', 'MILKED', 'COW', 'AND', 'TAMES', 'THE', 'FIRE', 'BREATHING', 'STEED'] +908-157963-0012-1333: ref=['BUT', 'THEL', 'IS', 'LIKE', 'A', 'FAINT', 'CLOUD', 'KINDLED', 'AT', 'THE', 'RISING', 'SUN', 'I', 'VANISH', 'FROM', 'MY', 'PEARLY', 'THRONE', 'AND', 'WHO', 'SHALL', 'FIND', 'MY', 'PLACE'] +908-157963-0012-1333: hyp=['BUT', 'THOU', 'IS', 'LIKE', 'A', 'FAINT', 'CLOUD', 'KINDLED', 'AT', 'THE', 'RISING', 'SUN', 'I', 'VANISH', 'FROM', 'MY', 'PEARLY', 'THRONE', 'AND', 'WHO', 'SHALL', 'FIND', 'MY', 'PLACE'] +908-157963-0013-1334: ref=['AND', 'WHY', 'IT', 'SCATTERS', 'ITS', 'BRIGHT', 'BEAUTY', 'THRO', 'THE', 'HUMID', 'AIR'] +908-157963-0013-1334: hyp=['AND', 'WYAT', 'SCATTERS', 'ITS', 'BRIGHT', 'BEAUTY', 'THROUGH', 'THE', 'HUMAN', 'AIR'] +908-157963-0014-1335: ref=['DESCEND', 'O', 'LITTLE', 'CLOUD', 'AND', 'HOVER', 'BEFORE', 'THE', 'EYES', 'OF', 'THEL'] +908-157963-0014-1335: hyp=['DESCEND', 'O', 'LITTLE', 'CLOUD', 'AND', 'HOVER', 'BEFORE', 'THE', 'EYES', 'OF', 'FELL'] +908-157963-0015-1336: ref=['O', 'LITTLE', 'CLOUD', 'THE', 'VIRGIN', 'SAID', 'I', 'CHARGE', 'THEE', 'TO', 'TELL', 'ME', 'WHY', 'THOU', 'COMPLAINEST', 'NOW', 'WHEN', 'IN', 'ONE', 'HOUR', 'THOU', 'FADE', 'AWAY', 'THEN', 'WE', 'SHALL', 'SEEK', 'THEE', 'BUT', 'NOT', 'FIND', 'AH', 'THEL', 'IS', 'LIKE', 'TO', 'THEE'] +908-157963-0015-1336: hyp=['O', 'LITTLE', 'CLOUD', 'THE', 'VIRGIN', 'SAID', 'I', 'CHARGE', 'THEE', 'TO', 'TELL', 'ME', 'WHY', 'THOU', 'COMPLAINEST', 'NOW', 'WHEN', 'IN', 'ONE', 'HOUR', 'THOU', 'FADE', 'AWAY', 'THEN', 'WE', 'SHALL', 'SEEK', 'THEE', 'BUT', 'NOT', 'FIND', 'AH', 'FELL', 'IS', 'LIKE', 'TO', 'THEE'] +908-157963-0016-1337: ref=['I', 'PASS', 'AWAY', 'YET', 'I', 'COMPLAIN', 'AND', 'NO', 'ONE', 'HEARS', 'MY', 'VOICE'] +908-157963-0016-1337: hyp=['I', 'PASS', 'AWAY', 'YET', 'I', 'COMPLAIN', 'AND', 'NO', 'ONE', 'HEARS', 'MY', 'VOICE'] +908-157963-0017-1338: ref=['THE', 'CLOUD', 'THEN', 'SHEWD', 'HIS', 'GOLDEN', 'HEAD', 'AND', 'HIS', 'BRIGHT', 'FORM', "EMERG'D"] +908-157963-0017-1338: hyp=['THE', 'CLOUD', 'THEN', 'SHOWED', 'HIS', 'GOLDEN', 'HEAD', 'AND', 'HIS', 'BRIGHT', 'FORM', 'EMERGED'] +908-157963-0018-1339: ref=['AND', 'FEAREST', 'THOU', 'BECAUSE', 'I', 'VANISH', 'AND', 'AM', 'SEEN', 'NO', 'MORE'] +908-157963-0018-1339: hyp=['AND', "FEAR'ST", 'THOU', 'BECAUSE', 'I', 'VANISH', 'AND', 'AM', 'SEEN', 'NO', 'MORE'] +908-157963-0019-1340: ref=['IT', 'IS', 'TO', 'TENFOLD', 'LIFE', 'TO', 'LOVE', 'TO', 'PEACE', 'AND', 'RAPTURES', 'HOLY', 'UNSEEN', 'DESCENDING', 'WEIGH', 'MY', 'LIGHT', 'WINGS', 'UPON', 'BALMY', 'FLOWERS', 'AND', 'COURT', 'THE', 'FAIR', 'EYED', 'DEW', 'TO', 'TAKE', 'ME', 'TO', 'HER', 'SHINING', 'TENT', 'THE', 'WEEPING', 'VIRGIN', 'TREMBLING', 'KNEELS', 'BEFORE', 'THE', 'RISEN', 'SUN'] +908-157963-0019-1340: hyp=['IT', 'IS', 'TO', 'TENFOLD', 'LIFE', 'TO', 'LOVE', 'TO', 'PEACE', 'AND', 'RAPTURES', 'WHOLLY', 'UNSEEN', 'DESCENDING', 'WEIGH', 'MY', 'LIGHT', 'WINGS', 'UPON', 'BALMY', 'FLOWERS', 'AND', 'COURT', 'THE', 'FAIR', 'EYED', 'DEW', 'TO', 'TAKE', 'ME', 'TO', 'HER', 'SHINING', 'TENT', 'THE', 'WEEPING', 'VIRGIN', 'TREMBLING', 'KNEELS', 'BEFORE', 'THE', 'RISEN', 'SUN'] +908-157963-0020-1341: ref=['TILL', 'WE', 'ARISE', "LINK'D", 'IN', 'A', 'GOLDEN', 'BAND', 'AND', 'NEVER', 'PART', 'BUT', 'WALK', 'UNITED', 'BEARING', 'FOOD', 'TO', 'ALL', 'OUR', 'TENDER', 'FLOWERS'] +908-157963-0020-1341: hyp=['TILL', 'WE', 'ARISE', 'LINKED', 'IN', 'A', 'GOLDEN', 'BAND', 'AND', 'NEVER', 'PART', 'BUT', 'WALK', 'UNITED', 'BEARING', 'FOOD', 'TO', 'ALL', 'OUR', 'TENDER', 'FLOWERS'] +908-157963-0021-1342: ref=['LIVES', 'NOT', 'ALONE', 'NOR', 'OR', 'ITSELF', 'FEAR', 'NOT', 'AND', 'I', 'WILL', 'CALL', 'THE', 'WEAK', 'WORM', 'FROM', 'ITS', 'LOWLY', 'BED', 'AND', 'THOU', 'SHALT', 'HEAR', 'ITS', 'VOICE'] +908-157963-0021-1342: hyp=['LIVES', 'NOT', 'ALONE', 'NOR', 'OF', 'ITSELF', 'FEAR', 'NOT', 'AND', 'I', 'WILL', 'CALL', 'THE', 'WEAK', 'WORM', 'FROM', 'ITS', 'LOWLY', 'BED', 'AND', 'THOU', 'SHALT', 'HEAR', 'ITS', 'VOICE'] +908-157963-0022-1343: ref=['COME', 'FORTH', 'WORM', 'AND', 'THE', 'SILENT', 'VALLEY', 'TO', 'THY', 'PENSIVE', 'QUEEN'] +908-157963-0022-1343: hyp=['COME', 'FORTH', 'WORM', 'AND', 'THE', 'SILENT', 'VALLEY', 'TO', 'THY', 'PENSIVE', 'QUEEN'] +908-157963-0023-1344: ref=['THE', 'HELPLESS', 'WORM', 'AROSE', 'AND', 'SAT', 'UPON', 'THE', 'LILLYS', 'LEAF', 'AND', 'THE', 'BRIGHT', 'CLOUD', 'SAILD', 'ON', 'TO', 'FIND', 'HIS', 'PARTNER', 'IN', 'THE', 'VALE'] +908-157963-0023-1344: hyp=['THE', 'HELPLESS', 'WORM', 'AROSE', 'AND', 'SAT', 'UPON', 'THE', "LILY'S", 'LEAF', 'AND', 'THE', 'BRIGHT', 'CLOUD', 'SAILED', 'ON', 'TO', 'FIND', 'HIS', 'PARTNER', 'IN', 'THE', 'VALE'] +908-157963-0024-1345: ref=['IMAGE', 'OF', 'WEAKNESS', 'ART', 'THOU', 'BUT', 'A', 'WORM'] +908-157963-0024-1345: hyp=['IMAGE', 'OF', 'WEAKNESS', 'ART', 'THOU', 'BUT', 'A', 'WORM'] +908-157963-0025-1346: ref=['I', 'SEE', 'THEY', 'LAY', 'HELPLESS', 'AND', 'NAKED', 'WEEPING', 'AND', 'NONE', 'TO', 'ANSWER', 'NONE', 'TO', 'CHERISH', 'THEE', 'WITH', 'MOTHERS', 'SMILES'] +908-157963-0025-1346: hyp=['I', 'SEE', 'THEY', 'LAY', 'HELPLESS', 'AND', 'NAKED', 'WEEPING', 'AND', 'NONE', 'TO', 'ANSWER', 'NONE', 'TO', 'CHERISH', 'THEE', 'WITH', "MOTHER'S", 'SMILES'] +908-157963-0026-1347: ref=['AND', 'SAYS', 'THOU', 'MOTHER', 'OF', 'MY', 'CHILDREN', 'I', 'HAVE', 'LOVED', 'THEE', 'AND', 'I', 'HAVE', 'GIVEN', 'THEE', 'A', 'CROWN', 'THAT', 'NONE', 'CAN', 'TAKE', 'AWAY'] +908-157963-0026-1347: hyp=['AND', 'SAYS', 'THOU', 'MOTHER', 'OF', 'MY', 'CHILDREN', 'I', 'HAVE', 'LOVED', 'THEE', 'AND', 'I', 'HAVE', 'GIVEN', 'THEE', 'A', 'CROWN', 'THAT', 'NONE', 'CAN', 'TAKE', 'AWAY'] +908-157963-0027-1348: ref=['AND', 'LAY', 'ME', 'DOWN', 'IN', 'THY', 'COLD', 'BED', 'AND', 'LEAVE', 'MY', 'SHINING', 'LOT'] +908-157963-0027-1348: hyp=['AND', 'LAY', 'ME', 'DOWN', 'IN', 'THY', 'COLD', 'BED', 'AND', 'LEAVE', 'MY', 'SHINING', 'LOT'] +908-157963-0028-1349: ref=['OR', 'AN', 'EYE', 'OF', 'GIFTS', 'AND', 'GRACES', 'SHOWRING', 'FRUITS', 'AND', 'COINED', 'GOLD'] +908-157963-0028-1349: hyp=['OR', 'AN', 'EYE', 'OF', 'GIFTS', 'AND', 'GRACES', 'SHOWERING', 'FRUITS', 'AND', 'COINED', 'GOLD'] +908-157963-0029-1350: ref=['WHY', 'A', 'TONGUE', "IMPRESS'D", 'WITH', 'HONEY', 'FROM', 'EVERY', 'WIND'] +908-157963-0029-1350: hyp=['WHY', 'A', 'TONGUE', 'IMPRESSED', 'WITH', 'HONEY', 'FROM', 'EVERY', 'WIND'] +908-157963-0030-1351: ref=['WHY', 'AN', 'EAR', 'A', 'WHIRLPOOL', 'FIERCE', 'TO', 'DRAW', 'CREATIONS', 'IN'] +908-157963-0030-1351: hyp=['WHY', 'AN', 'EAR', 'A', 'WHIRLPOOL', 'FIERCE', 'TO', 'DRAW', 'CREATIONS', 'IN'] +908-31957-0000-1352: ref=['ALL', 'IS', 'SAID', 'WITHOUT', 'A', 'WORD'] +908-31957-0000-1352: hyp=['ALL', 'IS', 'SAID', 'WITHOUT', 'A', 'WORD'] +908-31957-0001-1353: ref=['I', 'SIT', 'BENEATH', 'THY', 'LOOKS', 'AS', 'CHILDREN', 'DO', 'IN', 'THE', 'NOON', 'SUN', 'WITH', 'SOULS', 'THAT', 'TREMBLE', 'THROUGH', 'THEIR', 'HAPPY', 'EYELIDS', 'FROM', 'AN', 'UNAVERRED', 'YET', 'PRODIGAL', 'INWARD', 'JOY'] +908-31957-0001-1353: hyp=['I', 'SIT', 'BENEATH', 'THY', 'LOOKS', 'AS', 'CHILDREN', 'DO', 'IN', 'THE', 'NOON', 'SUN', 'WITH', 'SOULS', 'THAT', 'TREMBLE', 'THROUGH', 'THEIR', 'HAPPY', 'EYELIDS', 'FROM', 'AN', 'UNAVERRED', 'YET', 'CHRONICAL', 'INWARD', 'JOY'] +908-31957-0002-1354: ref=['I', 'DID', 'NOT', 'WRONG', 'MYSELF', 'SO', 'BUT', 'I', 'PLACED', 'A', 'WRONG', 'ON', 'THEE'] +908-31957-0002-1354: hyp=['I', 'DID', 'NOT', 'WRONG', 'MYSELF', 'SO', 'BUT', 'I', 'PLACED', 'A', 'WRONG', 'ON', 'THEE'] +908-31957-0003-1355: ref=['WHEN', 'CALLED', 'BEFORE', 'I', 'TOLD', 'HOW', 'HASTILY', 'I', 'DROPPED', 'MY', 'FLOWERS', 'OR', 'BRAKE', 'OFF', 'FROM', 'A', 'GAME'] +908-31957-0003-1355: hyp=['WHEN', 'CALLED', 'BEFORE', 'I', 'TOLD', 'HOW', 'HASTILY', 'I', 'DROPPED', 'MY', 'FLOWERS', 'OR', 'BREAK', 'OFF', 'FROM', 'A', 'GAME'] +908-31957-0004-1356: ref=['SHALL', 'I', 'NEVER', 'MISS', 'HOME', 'TALK', 'AND', 'BLESSING', 'AND', 'THE', 'COMMON', 'KISS', 'THAT', 'COMES', 'TO', 'EACH', 'IN', 'TURN', 'NOR', 'COUNT', 'IT', 'STRANGE', 'WHEN', 'I', 'LOOK', 'UP', 'TO', 'DROP', 'ON', 'A', 'NEW', 'RANGE', 'OF', 'WALLS', 'AND', 'FLOORS', 'ANOTHER', 'HOME', 'THAN', 'THIS'] +908-31957-0004-1356: hyp=['SHALL', 'I', 'NEVER', 'MISS', 'HOME', 'TALK', 'AND', 'BLESSING', 'AND', 'THE', 'COMMON', 'KISS', 'THAT', 'COMES', 'TO', 'EACH', 'IN', 'TURN', 'NOR', 'COUNT', 'IT', 'STRANGE', 'WHEN', 'I', 'LOOK', 'UP', 'TO', 'DROP', 'ON', 'A', 'NEW', 'RANGE', 'OF', 'WALLS', 'AND', 'FLOORS', 'ANOTHER', 'HOME', 'THAN', 'THIS'] +908-31957-0005-1357: ref=['ALAS', 'I', 'HAVE', 'GRIEVED', 'SO', 'I', 'AM', 'HARD', 'TO', 'LOVE'] +908-31957-0005-1357: hyp=['ALAS', 'I', 'HAVE', 'GRIEVED', 'SO', 'I', 'AM', 'HARD', 'TO', 'LOVE'] +908-31957-0006-1358: ref=['OPEN', 'THY', 'HEART', 'WIDE', 'AND', 'FOLD', 'WITHIN', 'THE', 'WET', 'WINGS', 'OF', 'THY', 'DOVE'] +908-31957-0006-1358: hyp=['OPEN', 'THY', 'HEART', 'WIDE', 'AND', 'FOLD', 'WITHIN', 'THE', 'WET', 'WINGS', 'OF', 'THY', 'DOVE'] +908-31957-0007-1359: ref=['COULD', 'IT', 'MEAN', 'TO', 'LAST', 'A', 'LOVE', 'SET', 'PENDULOUS', 'BETWEEN', 'SORROW', 'AND', 'SORROW'] +908-31957-0007-1359: hyp=['COULD', 'IT', 'MEAN', 'TO', 'LAST', 'A', 'LOVE', 'SET', 'PENDULOUS', 'BETWEEN', 'SORROW', 'AND', 'SORROW'] +908-31957-0008-1360: ref=['NAY', 'I', 'RATHER', 'THRILLED', 'DISTRUSTING', 'EVERY', 'LIGHT', 'THAT', 'SEEMED', 'TO', 'GILD', 'THE', 'ONWARD', 'PATH', 'AND', 'FEARED', 'TO', 'OVERLEAN', 'A', 'FINGER', 'EVEN'] +908-31957-0008-1360: hyp=['NAY', 'I', 'RATHER', 'THRILLED', 'DISTRUSTING', 'EVERY', 'LIGHT', 'THAT', 'SEEMED', 'TO', 'GILD', 'THE', 'ONWARD', 'PATH', 'IN', 'FEAR', 'TO', 'OVERLENE', 'A', 'FINGER', 'EVEN'] +908-31957-0009-1361: ref=['AND', 'THOUGH', 'I', 'HAVE', 'GROWN', 'SERENE', 'AND', 'STRONG', 'SINCE', 'THEN', 'I', 'THINK', 'THAT', 'GOD', 'HAS', 'WILLED', 'A', 'STILL', 'RENEWABLE', 'FEAR'] +908-31957-0009-1361: hyp=['AND', 'THOUGH', 'I', 'HAVE', 'GROWN', 'SERENE', 'AND', 'STRONG', 'SINCE', 'THEN', 'I', 'THINK', 'THAT', 'GOD', 'HAS', 'WILLED', 'A', 'STILL', 'RENEWABLE', 'FEAR'] +908-31957-0010-1362: ref=['O', 'LOVE', 'O', 'TROTH'] +908-31957-0010-1362: hyp=['O', 'LOVE', 'O', 'TROTH'] +908-31957-0011-1363: ref=['AND', 'LOVE', 'BE', 'FALSE'] +908-31957-0011-1363: hyp=['AND', 'LOVE', 'BE', 'FALSE'] +908-31957-0012-1364: ref=['IF', 'HE', 'TO', 'KEEP', 'ONE', 'OATH', 'MUST', 'LOSE', 'ONE', 'JOY', 'BY', 'HIS', "LIFE'S", 'STAR', 'FORETOLD'] +908-31957-0012-1364: hyp=['IF', 'HE', 'TO', 'KEEP', 'ONE', 'OATH', 'MUST', 'LOSE', 'ONE', 'JOY', 'BY', 'HIS', "LIFE'S", 'STAR', 'FORETOLD'] +908-31957-0013-1365: ref=['SLOW', 'TO', 'WORLD', 'GREETINGS', 'QUICK', 'WITH', 'ITS', 'O', 'LIST', 'WHEN', 'THE', 'ANGELS', 'SPEAK'] +908-31957-0013-1365: hyp=['SLOW', 'TO', 'WORLD', 'GREETINGS', 'QUICK', 'WITH', 'ITS', 'O', 'LIST', 'WHEN', 'THE', 'ANGEL', 'SPEAK'] +908-31957-0014-1366: ref=['A', 'RING', 'OF', 'AMETHYST', 'I', 'COULD', 'NOT', 'WEAR', 'HERE', 'PLAINER', 'TO', 'MY', 'SIGHT', 'THAN', 'THAT', 'FIRST', 'KISS'] +908-31957-0014-1366: hyp=['A', 'RING', 'OF', 'AMETHYST', 'I', 'COULD', 'NOT', 'WEAR', 'HERE', 'PLAINER', 'TO', 'MY', 'SIGHT', 'THAN', 'THAT', 'FIRST', 'KISS'] +908-31957-0015-1367: ref=['THAT', 'WAS', 'THE', 'CHRISM', 'OF', 'LOVE', 'WHICH', "LOVE'S", 'OWN', 'CROWN', 'WITH', 'SANCTIFYING', 'SWEETNESS', 'DID', 'PRECEDE', 'THE', 'THIRD', 'UPON', 'MY', 'LIPS', 'WAS', 'FOLDED', 'DOWN', 'IN', 'PERFECT', 'PURPLE', 'STATE', 'SINCE', 'WHEN', 'INDEED', 'I', 'HAVE', 'BEEN', 'PROUD', 'AND', 'SAID', 'MY', 'LOVE', 'MY', 'OWN'] +908-31957-0015-1367: hyp=['THAT', 'WAS', 'THE', 'CHRISM', 'OF', 'LOVE', 'WHICH', 'LOVES', 'OWN', 'CROWN', 'WITH', 'SANCTIFYING', 'SWEETNESS', 'DID', 'PROCEED', 'THE', 'THIRD', 'UPON', 'MY', 'LIPS', 'WAS', 'FOLDED', 'DOWN', 'IMPERFECT', 'PURPLE', 'STATE', 'SINCE', 'WHEN', 'INDEED', 'I', 'HAVE', 'BEEN', 'PROUD', 'AND', 'SAID', 'MY', 'LOVE', 'MY', 'OWN'] +908-31957-0016-1368: ref=['DEAREST', 'TEACH', 'ME', 'SO', 'TO', 'POUR', 'OUT', 'GRATITUDE', 'AS', 'THOU', 'DOST', 'GOOD'] +908-31957-0016-1368: hyp=['DEAREST', 'TEACH', 'ME', 'SO', 'TO', 'POUR', 'OUT', 'GRATITUDE', 'AS', 'THOU', 'DOST', 'GOOD'] +908-31957-0017-1369: ref=['MUSSULMANS', 'AND', 'GIAOURS', 'THROW', 'KERCHIEFS', 'AT', 'A', 'SMILE', 'AND', 'HAVE', 'NO', 'RUTH', 'FOR', 'ANY', 'WEEPING'] +908-31957-0017-1369: hyp=['MUSSULMANS', 'AND', 'GEY', 'ORDS', 'THROW', 'KERCHIEFS', 'AT', 'A', 'SMILE', 'AND', 'HAVE', 'NO', 'RUTH', 'FOR', 'ANY', 'WEEPING'] +908-31957-0018-1370: ref=['BUT', 'THOU', 'ART', 'NOT', 'SUCH', 'A', 'LOVER', 'MY', 'BELOVED'] +908-31957-0018-1370: hyp=['BUT', 'THOU', 'ART', 'NOT', 'SUCH', 'A', 'LOVER', 'MY', 'BELOVED'] +908-31957-0019-1371: ref=['THOU', 'CANST', 'WAIT', 'THROUGH', 'SORROW', 'AND', 'SICKNESS', 'TO', 'BRING', 'SOULS', 'TO', 'TOUCH', 'AND', 'THINK', 'IT', 'SOON', 'WHEN', 'OTHERS', 'CRY', 'TOO', 'LATE'] +908-31957-0019-1371: hyp=['THOU', 'CANST', 'WAIT', 'THROUGH', 'SORROW', 'AND', 'SICKNESS', 'TO', 'BRING', 'SOULS', 'TO', 'TOUCH', 'AND', 'THINK', 'IT', 'SOON', 'WHEN', 'OTHERS', 'CRY', 'TOO', 'LATE'] +908-31957-0020-1372: ref=['I', 'THANK', 'ALL', 'WHO', 'HAVE', 'LOVED', 'ME', 'IN', 'THEIR', 'HEARTS', 'WITH', 'THANKS', 'AND', 'LOVE', 'FROM', 'MINE'] +908-31957-0020-1372: hyp=['I', 'THINK', 'ALL', 'WHO', 'HAVE', 'LOVED', 'ME', 'IN', 'THEIR', 'HEARTS', 'WITH', 'THANKS', 'AND', 'LOVE', 'FROM', 'MINE'] +908-31957-0021-1373: ref=['OH', 'TO', 'SHOOT', 'MY', "SOUL'S", 'FULL', 'MEANING', 'INTO', 'FUTURE', 'YEARS', 'THAT', 'THEY', 'SHOULD', 'LEND', 'IT', 'UTTERANCE', 'AND', 'SALUTE', 'LOVE', 'THAT', 'ENDURES', 'FROM', 'LIFE', 'THAT', 'DISAPPEARS'] +908-31957-0021-1373: hyp=['OH', 'TO', 'SHOOT', 'MY', "SOUL'S", 'FULL', 'MEANING', 'INTO', 'FUTURE', 'YEARS', 'THAT', 'THEY', 'SHOULD', 'LEND', 'IT', 'UTTERANCE', 'AND', 'SALUTE', 'LOVE', 'THAT', 'ENDURES', 'FROM', 'LIFE', 'THAT', 'DISAPPEARS'] +908-31957-0022-1374: ref=['THEN', 'I', 'LONG', 'TRIED', 'BY', 'NATURAL', 'ILLS', 'RECEIVED', 'THE', 'COMFORT', 'FAST', 'WHILE', 'BUDDING', 'AT', 'THY', 'SIGHT', 'MY', "PILGRIM'S", 'STAFF', 'GAVE', 'OUT', 'GREEN', 'LEAVES', 'WITH', 'MORNING', 'DEWS', 'IMPEARLED'] +908-31957-0022-1374: hyp=['THEN', 'I', 'LONG', 'TRIED', 'BY', 'NATURAL', 'ILLS', 'RECEIVED', 'THE', 'COMFORT', 'FAST', 'WHILE', 'BUDDING', 'AT', 'THY', 'SIGHT', 'MY', "PILGRIM'S", 'STAFF', 'GAVE', 'OUT', 'GREEN', 'LEAVES', 'WITH', 'MORNING', 'DEWS', 'IMPERILLED'] +908-31957-0023-1375: ref=['I', 'LOVE', 'THEE', 'FREELY', 'AS', 'MEN', 'STRIVE', 'FOR', 'RIGHT', 'I', 'LOVE', 'THEE', 'PURELY', 'AS', 'THEY', 'TURN', 'FROM', 'PRAISE'] +908-31957-0023-1375: hyp=['I', 'LOVE', 'THEE', 'FREELY', 'AS', 'MEN', 'STRIVE', 'FOR', 'RIGHT', 'I', 'LOVE', 'THEE', 'PURELY', 'AS', 'THEY', 'TURN', 'FROM', 'PREISE'] +908-31957-0024-1376: ref=['I', 'LOVE', 'THEE', 'WITH', 'THE', 'PASSION', 'PUT', 'TO', 'USE', 'IN', 'MY', 'OLD', 'GRIEFS', 'AND', 'WITH', 'MY', "CHILDHOOD'S", 'FAITH'] +908-31957-0024-1376: hyp=['I', 'LOVE', 'THEE', 'WITH', 'THE', 'PASSION', 'PUT', 'TO', 'USE', 'IN', 'MY', 'OLD', 'GREEDS', 'AND', 'WITH', 'MY', "CHILDHOOD'S", 'FAITH'] +908-31957-0025-1377: ref=['I', 'LOVE', 'THEE', 'WITH', 'A', 'LOVE', 'I', 'SEEMED', 'TO', 'LOSE', 'WITH', 'MY', 'LOST', 'SAINTS', 'I', 'LOVE', 'THEE', 'WITH', 'THE', 'BREATH', 'SMILES', 'TEARS', 'OF', 'ALL', 'MY', 'LIFE', 'AND', 'IF', 'GOD', 'CHOOSE', 'I', 'SHALL', 'BUT', 'LOVE', 'THEE', 'BETTER', 'AFTER', 'DEATH'] +908-31957-0025-1377: hyp=['I', 'LOVE', 'THEE', 'WITH', 'A', 'LOVE', 'I', 'SEEMED', 'TO', 'LOSE', 'WITH', 'MY', 'LOST', 'SAINTS', 'I', 'LOVE', 'THEE', 'WITH', 'THE', 'BREATH', 'SMILES', 'TEARS', 'OF', 'ALL', 'MY', 'LIFE', 'AND', 'IF', 'GOD', 'CHOOSE', 'I', 'SHALL', 'BUT', 'LOVE', 'THEE', 'BETTER', 'AFTER', 'DEATH'] diff --git a/log/greedy_search/recogs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/recogs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..96daade84e9af276067c6841cf1e3ee05187ef80 --- /dev/null +++ b/log/greedy_search/recogs-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,5878 @@ +1688-142285-0000-1948: ref=["THERE'S", 'IRON', 'THEY', 'SAY', 'IN', 'ALL', 'OUR', 'BLOOD', 'AND', 'A', 'GRAIN', 'OR', 'TWO', 'PERHAPS', 'IS', 'GOOD', 'BUT', 'HIS', 'HE', 'MAKES', 'ME', 'HARSHLY', 'FEEL', 'HAS', 'GOT', 'A', 'LITTLE', 'TOO', 'MUCH', 'OF', 'STEEL', 'ANON'] +1688-142285-0000-1948: hyp=["THERE'S", 'IRON', 'THEY', 'SAY', 'IN', 'ALL', 'OUR', 'BLOOD', 'AND', 'A', 'GRAIN', 'OR', 'TWO', 'PERHAPS', 'IS', 'GOOD', 'BUT', 'HIS', 'HE', 'MAKES', 'ME', 'HARSHLY', 'FEEL', 'HAS', 'GOT', 'A', 'LITTLE', 'TOO', 'MUCH', 'OF', 'STEEL', 'ANON'] +1688-142285-0001-1949: ref=['MARGARET', 'SAID', 'MISTER', 'HALE', 'AS', 'HE', 'RETURNED', 'FROM', 'SHOWING', 'HIS', 'GUEST', 'DOWNSTAIRS', 'I', 'COULD', 'NOT', 'HELP', 'WATCHING', 'YOUR', 'FACE', 'WITH', 'SOME', 'ANXIETY', 'WHEN', 'MISTER', 'THORNTON', 'MADE', 'HIS', 'CONFESSION', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY'] +1688-142285-0001-1949: hyp=['MARGARET', 'SAID', 'MISTER', 'HALE', 'AS', 'HE', 'RETURNED', 'FROM', 'SHOWING', 'HIS', 'GUEST', 'DOWNSTAIRS', 'I', 'COULD', 'NOT', 'HELP', 'WATCHING', 'YOUR', 'FACE', 'WITH', 'SOME', 'ANXIETY', 'WHEN', 'MISTER', 'THORNTON', 'MADE', 'HIS', 'CONFESSION', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY'] +1688-142285-0002-1950: ref=['YOU', "DON'T", 'MEAN', 'THAT', 'YOU', 'THOUGHT', 'ME', 'SO', 'SILLY'] +1688-142285-0002-1950: hyp=['YOU', "DON'T", 'MEAN', 'THAT', 'YOU', 'THOUGHT', 'ME', 'SO', 'SILLY'] +1688-142285-0003-1951: ref=['I', 'REALLY', 'LIKED', 'THAT', 'ACCOUNT', 'OF', 'HIMSELF', 'BETTER', 'THAN', 'ANYTHING', 'ELSE', 'HE', 'SAID'] +1688-142285-0003-1951: hyp=['I', 'REALLY', 'LIKE', 'THAT', 'ACCOUNT', 'OF', 'HIMSELF', 'BETTER', 'THAN', 'ANYTHING', 'ELSE', 'HE', 'SAID'] +1688-142285-0004-1952: ref=['HIS', 'STATEMENT', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY', 'WAS', 'THE', 'THING', 'I', 'LIKED', 'BEST', 'OF', 'ALL'] +1688-142285-0004-1952: hyp=['HIS', 'STATEMENT', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY', 'WAS', 'THE', 'THING', 'I', 'LIKE', 'BEST', 'OF', 'ALL'] +1688-142285-0005-1953: ref=['YOU', 'WHO', 'WERE', 'ALWAYS', 'ACCUSING', 'PEOPLE', 'OF', 'BEING', 'SHOPPY', 'AT', 'HELSTONE'] +1688-142285-0005-1953: hyp=['YOU', 'WHO', 'WERE', 'ALWAYS', 'ACCUSING', 'PEOPLE', 'OF', 'BEING', 'SHOPPY', 'AT', 'HELSTONE'] +1688-142285-0006-1954: ref=['I', "DON'T", 'THINK', 'MISTER', 'HALE', 'YOU', 'HAVE', 'DONE', 'QUITE', 'RIGHT', 'IN', 'INTRODUCING', 'SUCH', 'A', 'PERSON', 'TO', 'US', 'WITHOUT', 'TELLING', 'US', 'WHAT', 'HE', 'HAD', 'BEEN'] +1688-142285-0006-1954: hyp=['I', "DON'T", 'THINK', 'MISTER', 'HALE', 'YOU', 'HAVE', 'DONE', 'QUITE', 'RIGHT', 'HE', 'INTRODUCING', 'SUCH', 'A', 'PERSON', 'TO', 'US', 'WITHOUT', 'TELLING', 'US', 'WHAT', 'HE', 'HAD', 'BEEN'] +1688-142285-0007-1955: ref=['I', 'REALLY', 'WAS', 'VERY', 'MUCH', 'AFRAID', 'OF', 'SHOWING', 'HIM', 'HOW', 'MUCH', 'SHOCKED', 'I', 'WAS', 'AT', 'SOME', 'PARTS', 'OF', 'WHAT', 'HE', 'SAID'] +1688-142285-0007-1955: hyp=['I', 'REALLY', 'WAS', 'VERY', 'MUCH', 'AFRAID', 'OF', 'SHOWING', 'HIM', 'HOW', 'MUCH', 'SHOCKED', 'I', 'WAS', 'AT', 'SOME', 'PART', 'OF', 'WHAT', 'HE', 'SAID'] +1688-142285-0008-1956: ref=['HIS', 'FATHER', 'DYING', 'IN', 'MISERABLE', 'CIRCUMSTANCES'] +1688-142285-0008-1956: hyp=['HIS', 'FATHER', 'DYING', 'IN', 'MISERABLE', 'CIRCUMSTANCES'] +1688-142285-0009-1957: ref=['WHY', 'IT', 'MIGHT', 'HAVE', 'BEEN', 'IN', 'THE', 'WORKHOUSE'] +1688-142285-0009-1957: hyp=['WHY', 'IT', 'MIGHT', 'HAVE', 'BEEN', 'IN', 'THE', 'WORKHOUSE'] +1688-142285-0010-1958: ref=['HIS', 'FATHER', 'SPECULATED', 'WILDLY', 'FAILED', 'AND', 'THEN', 'KILLED', 'HIMSELF', 'BECAUSE', 'HE', 'COULD', 'NOT', 'BEAR', 'THE', 'DISGRACE'] +1688-142285-0010-1958: hyp=['HIS', 'FATHER', 'SPECULATED', 'WILDLY', 'FAILED', 'AND', 'THEN', 'KILLED', 'HIMSELF', 'BECAUSE', 'HE', 'COULD', 'NOT', 'BEAR', 'THE', 'DISGRACE'] +1688-142285-0011-1959: ref=['ALL', 'HIS', 'FORMER', 'FRIENDS', 'SHRUNK', 'FROM', 'THE', 'DISCLOSURES', 'THAT', 'HAD', 'TO', 'BE', 'MADE', 'OF', 'HIS', 'DISHONEST', 'GAMBLING', 'WILD', 'HOPELESS', 'STRUGGLES', 'MADE', 'WITH', 'OTHER', "PEOPLE'S", 'MONEY', 'TO', 'REGAIN', 'HIS', 'OWN', 'MODERATE', 'PORTION', 'OF', 'WEALTH'] +1688-142285-0011-1959: hyp=['ALL', 'HIS', 'FORMER', 'FRIENDS', 'SHRUNK', 'FROM', 'THE', 'DISCLOSURES', 'THAT', 'HAD', 'TO', 'BE', 'MADE', 'OF', 'HIS', 'DISHONEST', 'GAMBLING', 'WILD', 'HOPELESS', 'STRUGGLES', 'MADE', 'WITH', 'OTHER', "PEOPLE'S", 'MONEY', 'TO', 'REGAIN', 'HIS', 'OWN', 'MODERATE', 'PORTION', 'OF', 'WEALTH'] +1688-142285-0012-1960: ref=['NO', 'ONE', 'CAME', 'FORWARDS', 'TO', 'HELP', 'THE', 'MOTHER', 'AND', 'THIS', 'BOY'] +1688-142285-0012-1960: hyp=['NO', 'ONE', 'CAME', 'FORWARDS', 'TO', 'HELP', 'THE', 'MOTHER', 'AND', 'THIS', 'BOY'] +1688-142285-0013-1961: ref=['AT', 'LEAST', 'NO', 'FRIEND', 'CAME', 'FORWARDS', 'IMMEDIATELY', 'AND', 'MISSUS', 'THORNTON', 'IS', 'NOT', 'ONE', 'I', 'FANCY', 'TO', 'WAIT', 'TILL', 'TARDY', 'KINDNESS', 'COMES', 'TO', 'FIND', 'HER', 'OUT'] +1688-142285-0013-1961: hyp=['AT', 'LEAST', 'NO', 'FRIEND', 'CAME', 'FORWARDS', 'IMMEDIATELY', 'AND', 'MISTER', 'THORNTON', 'IS', 'NOT', 'ONE', 'I', 'FANCY', 'TO', 'WAIT', 'TILL', 'TIDY', 'KINDNESS', 'COMES', 'TO', 'FIND', 'HER', 'OUT'] +1688-142285-0014-1962: ref=['SO', 'THEY', 'LEFT', 'MILTON'] +1688-142285-0014-1962: hyp=['SO', 'THEY', 'LEFT', 'MILTON'] +1688-142285-0015-1963: ref=['HOW', 'TAINTED', 'ASKED', 'HER', 'FATHER'] +1688-142285-0015-1963: hyp=['HOW', 'TAINTED', 'ASKED', 'HER', 'FATHER'] +1688-142285-0016-1964: ref=['OH', 'PAPA', 'BY', 'THAT', 'TESTING', 'EVERYTHING', 'BY', 'THE', 'STANDARD', 'OF', 'WEALTH'] +1688-142285-0016-1964: hyp=['OH', 'PAPA', 'BY', 'THAT', 'TESTING', 'EVERYTHING', 'BY', 'THE', 'STANDARD', 'OF', 'WEALTH'] +1688-142285-0017-1965: ref=['WHEN', 'HE', 'SPOKE', 'OF', 'THE', 'MECHANICAL', 'POWERS', 'HE', 'EVIDENTLY', 'LOOKED', 'UPON', 'THEM', 'ONLY', 'AS', 'NEW', 'WAYS', 'OF', 'EXTENDING', 'TRADE', 'AND', 'MAKING', 'MONEY'] +1688-142285-0017-1965: hyp=['WHEN', 'HE', 'SPOKE', 'OF', 'THE', 'MECHANICAL', 'POWERS', 'HE', 'EVIDENTLY', 'LOOKED', 'UPON', 'THEM', 'ONLY', 'AS', 'NEW', 'WAYS', 'OF', 'EXTENDING', 'TRADE', 'AND', 'MAKING', 'MONEY'] +1688-142285-0018-1966: ref=['AND', 'THE', 'POOR', 'MEN', 'AROUND', 'HIM', 'THEY', 'WERE', 'POOR', 'BECAUSE', 'THEY', 'WERE', 'VICIOUS', 'OUT', 'OF', 'THE', 'PALE', 'OF', 'HIS', 'SYMPATHIES', 'BECAUSE', 'THEY', 'HAD', 'NOT', 'HIS', 'IRON', 'NATURE', 'AND', 'THE', 'CAPABILITIES', 'THAT', 'IT', 'GIVES', 'HIM', 'FOR', 'BEING', 'RICH'] +1688-142285-0018-1966: hyp=['AND', 'THE', 'POOR', 'MEN', 'AROUND', 'HIM', 'THEY', 'WERE', 'POOR', 'BECAUSE', 'THEY', 'WERE', 'VICIOUS', 'OUT', 'OF', 'THE', 'PALE', 'OF', 'HIS', 'SYMPATHIES', 'BECAUSE', 'THEY', 'HAD', 'NOT', 'HIS', 'IRON', 'NATURE', 'AND', 'THE', 'CAPABILITIES', 'THAT', 'IT', 'GIVES', 'HIM', 'FOR', 'BEING', 'RICH'] +1688-142285-0019-1967: ref=['NOT', 'VICIOUS', 'HE', 'NEVER', 'SAID', 'THAT'] +1688-142285-0019-1967: hyp=['NOT', 'VICIOUS', 'HE', 'NEVER', 'SAID', 'THAT'] +1688-142285-0020-1968: ref=['IMPROVIDENT', 'AND', 'SELF', 'INDULGENT', 'WERE', 'HIS', 'WORDS'] +1688-142285-0020-1968: hyp=['IN', 'PROVIDENT', 'AND', 'SELF', 'INDULGENT', 'WERE', 'HIS', 'WORDS'] +1688-142285-0021-1969: ref=['MARGARET', 'WAS', 'COLLECTING', 'HER', "MOTHER'S", 'WORKING', 'MATERIALS', 'AND', 'PREPARING', 'TO', 'GO', 'TO', 'BED'] +1688-142285-0021-1969: hyp=['MARGARET', 'WAS', 'COLLECTING', 'HER', "MOTHER'S", 'WORKING', 'MATERIALS', 'AND', 'PREPARING', 'TO', 'GO', 'TO', 'BED'] +1688-142285-0022-1970: ref=['JUST', 'AS', 'SHE', 'WAS', 'LEAVING', 'THE', 'ROOM', 'SHE', 'HESITATED', 'SHE', 'WAS', 'INCLINED', 'TO', 'MAKE', 'AN', 'ACKNOWLEDGMENT', 'WHICH', 'SHE', 'THOUGHT', 'WOULD', 'PLEASE', 'HER', 'FATHER', 'BUT', 'WHICH', 'TO', 'BE', 'FULL', 'AND', 'TRUE', 'MUST', 'INCLUDE', 'A', 'LITTLE', 'ANNOYANCE'] +1688-142285-0022-1970: hyp=['JUST', 'AS', 'SHE', 'WAS', 'LEAVING', 'THE', 'ROOM', 'SHE', 'HESITATED', 'SHE', 'WAS', 'INCLINED', 'TO', 'MAKE', 'AN', 'ACKNOWLEDGMENT', 'WHICH', 'SHE', 'THOUGHT', 'WOULD', 'PLEASE', 'HER', 'FATHER', 'BUT', 'WHICH', 'TO', 'BE', 'FULL', 'AND', 'TRUE', 'MUST', 'INCLUDE', 'A', 'LITTLE', 'ANNOYANCE'] +1688-142285-0023-1971: ref=['HOWEVER', 'OUT', 'IT', 'CAME'] +1688-142285-0023-1971: hyp=['HOWEVER', 'OUT', 'IT', 'CAME'] +1688-142285-0024-1972: ref=['PAPA', 'I', 'DO', 'THINK', 'MISTER', 'THORNTON', 'A', 'VERY', 'REMARKABLE', 'MAN', 'BUT', 'PERSONALLY', 'I', "DON'T", 'LIKE', 'HIM', 'AT', 'ALL'] +1688-142285-0024-1972: hyp=['PAPA', 'I', 'DO', 'THINK', 'MISTER', 'THORNTON', 'A', 'VERY', 'REMARKABLE', 'MAN', 'BUT', 'PERSONALLY', 'I', "DON'T", 'LIKE', 'HIM', 'AT', 'ALL'] +1688-142285-0025-1973: ref=['AND', 'I', 'DO', 'SAID', 'HER', 'FATHER', 'LAUGHING'] +1688-142285-0025-1973: hyp=['HELLO', 'I', 'DO', 'SAID', 'HER', 'FATHER', 'LAUGHING'] +1688-142285-0026-1974: ref=['PERSONALLY', 'AS', 'YOU', 'CALL', 'IT', 'AND', 'ALL'] +1688-142285-0026-1974: hyp=['PERSONALLY', 'AS', 'YOU', 'CALL', 'IT', 'AND', 'ALL'] +1688-142285-0027-1975: ref=['I', "DON'T", 'SET', 'HIM', 'UP', 'FOR', 'A', 'HERO', 'OR', 'ANYTHING', 'OF', 'THAT', 'KIND'] +1688-142285-0027-1975: hyp=['I', "DON'T", 'SET', 'HIM', 'UP', 'FOR', 'A', 'HERO', 'OR', 'ANYTHING', 'OF', 'THAT', 'KIND'] +1688-142285-0028-1976: ref=['BUT', 'GOOD', 'NIGHT', 'CHILD'] +1688-142285-0028-1976: hyp=['BUT', 'GOOD', 'NIGHT', 'CHILD'] +1688-142285-0029-1977: ref=['THERE', 'WERE', 'SEVERAL', 'OTHER', 'SIGNS', 'OF', 'SOMETHING', 'WRONG', 'ABOUT', 'MISSUS', 'HALE'] +1688-142285-0029-1977: hyp=['THERE', 'WERE', 'SEVERAL', 'OTHER', 'SIGNS', 'OF', 'SOMETHING', 'WRONG', 'ABOUT', 'MISSUS', 'HALE'] +1688-142285-0030-1978: ref=['SHE', 'AND', 'DIXON', 'HELD', 'MYSTERIOUS', 'CONSULTATIONS', 'IN', 'HER', 'BEDROOM', 'FROM', 'WHICH', 'DIXON', 'WOULD', 'COME', 'OUT', 'CRYING', 'AND', 'CROSS', 'AS', 'WAS', 'HER', 'CUSTOM', 'WHEN', 'ANY', 'DISTRESS', 'OF', 'HER', 'MISTRESS', 'CALLED', 'UPON', 'HER', 'SYMPATHY'] +1688-142285-0030-1978: hyp=['SHE', 'AND', 'DIXON', 'HELD', 'MYSTERIOUS', 'CONSULTATIONS', 'IN', 'HER', 'BEDROOM', 'FROM', 'WHICH', 'DIXON', 'WOULD', 'COME', 'OUT', 'CRYING', 'AND', 'CROSS', 'AS', 'WAS', 'ACCUSTOM', 'WHEN', 'ANY', 'DISTRESS', 'OF', 'HER', 'MISTRESS', 'CALLED', 'UPON', 'HER', 'SYMPATHY'] +1688-142285-0031-1979: ref=['ONCE', 'MARGARET', 'HAD', 'GONE', 'INTO', 'THE', 'CHAMBER', 'SOON', 'AFTER', 'DIXON', 'LEFT', 'IT', 'AND', 'FOUND', 'HER', 'MOTHER', 'ON', 'HER', 'KNEES', 'AND', 'AS', 'MARGARET', 'STOLE', 'OUT', 'SHE', 'CAUGHT', 'A', 'FEW', 'WORDS', 'WHICH', 'WERE', 'EVIDENTLY', 'A', 'PRAYER', 'FOR', 'STRENGTH', 'AND', 'PATIENCE', 'TO', 'ENDURE', 'SEVERE', 'BODILY', 'SUFFERING'] +1688-142285-0031-1979: hyp=['ONCE', 'MARGARET', 'HAD', 'GONE', 'INTO', 'THE', 'CHAMBER', 'SOON', 'AFTER', 'DIXON', 'LIFTED', 'AND', 'FOUND', 'HER', 'MOTHER', 'ON', 'HER', 'KNEES', 'AND', 'AS', 'MARGARET', 'STOLE', 'OUT', 'SHE', 'CAUGHT', 'A', 'FEW', 'WORDS', 'WHICH', 'WERE', 'EVIDENTLY', 'A', 'PRAYER', 'FOR', 'STRENGTH', 'AND', 'PATIENCE', 'TO', 'INDUCE', 'SEVERE', 'BODILY', 'SUFFERING'] +1688-142285-0032-1980: ref=['BUT', 'THOUGH', 'SHE', 'RECEIVED', 'CARESSES', 'AND', 'FOND', 'WORDS', 'BACK', 'AGAIN', 'IN', 'SUCH', 'PROFUSION', 'AS', 'WOULD', 'HAVE', 'GLADDENED', 'HER', 'FORMERLY', 'YET', 'SHE', 'FELT', 'THAT', 'THERE', 'WAS', 'A', 'SECRET', 'WITHHELD', 'FROM', 'HER', 'AND', 'SHE', 'BELIEVED', 'IT', 'BORE', 'SERIOUS', 'REFERENCE', 'TO', 'HER', "MOTHER'S", 'HEALTH'] +1688-142285-0032-1980: hyp=['BUT', 'THOUGH', 'SHE', 'RECEIVED', 'CARESSES', 'AND', 'FOND', 'WORDS', 'BACK', 'AGAIN', 'IN', 'SUCH', 'PROFUSION', 'AS', 'WOULD', 'HAVE', 'GLADDENED', 'HER', 'FORMERLY', 'YET', 'SHE', 'FELT', 'THAT', 'THERE', 'WAS', 'A', 'SECRET', 'WITHHELD', 'FROM', 'HER', 'AND', 'SHE', 'BELIEVED', 'IT', 'BORE', 'SERIOUS', 'REFERENCE', 'TO', 'HER', "MOTHER'S", 'HEALTH'] +1688-142285-0033-1981: ref=['SHE', 'LAY', 'AWAKE', 'VERY', 'LONG', 'THIS', 'NIGHT', 'PLANNING', 'HOW', 'TO', 'LESSEN', 'THE', 'EVIL', 'INFLUENCE', 'OF', 'THEIR', 'MILTON', 'LIFE', 'ON', 'HER', 'MOTHER'] +1688-142285-0033-1981: hyp=['SHE', 'LAY', 'AWAKE', 'VERY', 'LONG', 'THIS', 'NIGHT', 'PLANNING', 'HOW', 'TO', 'LISTEN', 'THE', 'EVIL', 'INFLUENCE', 'OF', 'THEIR', 'MILTON', 'LIFE', 'ON', 'HER', 'MOTHER'] +1688-142285-0034-1982: ref=['A', 'SERVANT', 'TO', 'GIVE', 'DIXON', 'PERMANENT', 'ASSISTANCE', 'SHOULD', 'BE', 'GOT', 'IF', 'SHE', 'GAVE', 'UP', 'HER', 'WHOLE', 'TIME', 'TO', 'THE', 'SEARCH', 'AND', 'THEN', 'AT', 'ANY', 'RATE', 'HER', 'MOTHER', 'MIGHT', 'HAVE', 'ALL', 'THE', 'PERSONAL', 'ATTENTION', 'SHE', 'REQUIRED', 'AND', 'HAD', 'BEEN', 'ACCUSTOMED', 'TO', 'HER', 'WHOLE', 'LIFE'] +1688-142285-0034-1982: hyp=['A', 'SERVANT', 'GIVE', 'DIXON', 'PERMANENT', 'ASSISTANCE', 'SHOULD', 'BE', 'GOT', 'IF', 'SHE', 'GAVE', 'UP', 'THE', 'WHOLE', 'TIME', 'TO', 'THE', 'SEARCH', 'AND', 'THEN', 'AT', 'ANY', 'RATE', 'HER', 'MOTHER', 'MIGHT', 'HAVE', 'ALL', 'THE', 'PERSONAL', 'ATTENTIONS', 'SHE', 'REQUIRED', 'AND', 'HAD', 'BEEN', 'ACCUSTOMED', 'TO', 'HER', 'WHOLE', 'LIFE'] +1688-142285-0035-1983: ref=['VISITING', 'REGISTER', 'OFFICES', 'SEEING', 'ALL', 'MANNER', 'OF', 'UNLIKELY', 'PEOPLE', 'AND', 'VERY', 'FEW', 'IN', 'THE', 'LEAST', 'LIKELY', 'ABSORBED', "MARGARET'S", 'TIME', 'AND', 'THOUGHTS', 'FOR', 'SEVERAL', 'DAYS'] +1688-142285-0035-1983: hyp=['VISITING', 'REGISTER', 'OFFICERS', 'SEEING', 'ALL', 'MANNER', 'OF', 'UNLIKELY', 'PEOPLE', 'AND', 'VERY', 'FEW', 'IN', 'THE', 'LEAST', 'LIKELY', 'ABSORBED', "MARGARET'S", 'TIME', 'AND', 'THOUGHTS', 'FOR', 'SEVERAL', 'DAYS'] +1688-142285-0036-1984: ref=['ONE', 'AFTERNOON', 'SHE', 'MET', 'BESSY', 'HIGGINS', 'IN', 'THE', 'STREET', 'AND', 'STOPPED', 'TO', 'SPEAK', 'TO', 'HER'] +1688-142285-0036-1984: hyp=['ONE', 'AFTERNOON', 'SHE', 'MET', 'BESSY', 'HIGGINS', 'IN', 'THE', 'STREET', 'AND', 'STOPPED', 'TO', 'SPEAK', 'TO', 'HER'] +1688-142285-0037-1985: ref=['WELL', 'BESSY', 'HOW', 'ARE', 'YOU'] +1688-142285-0037-1985: hyp=['WELL', 'BUSY', 'HOW', 'ARE', 'YOU'] +1688-142285-0038-1986: ref=['BETTER', 'AND', 'NOT', 'BETTER', 'IF', 'YO', 'KNOW', 'WHAT', 'THAT', 'MEANS'] +1688-142285-0038-1986: hyp=['BETTER', 'AND', 'NOT', 'BETTER', 'IF', 'YOU', 'KNOW', 'WHAT', 'THAT', 'MEANS'] +1688-142285-0039-1987: ref=['NOT', 'EXACTLY', 'REPLIED', 'MARGARET', 'SMILING'] +1688-142285-0039-1987: hyp=['NOT', 'EXACTLY', 'REPLIED', 'MARGARET', 'SMILING'] +1688-142285-0040-1988: ref=["I'M", 'BETTER', 'IN', 'NOT', 'BEING', 'TORN', 'TO', 'PIECES', 'BY', 'COUGHING', "O'NIGHTS", 'BUT', "I'M", 'WEARY', 'AND', 'TIRED', 'O', 'MILTON', 'AND', 'LONGING', 'TO', 'GET', 'AWAY', 'TO', 'THE', 'LAND', 'O', 'BEULAH', 'AND', 'WHEN', 'I', 'THINK', "I'M", 'FARTHER', 'AND', 'FARTHER', 'OFF', 'MY', 'HEART', 'SINKS', 'AND', "I'M", 'NO', 'BETTER', "I'M", 'WORSE'] +1688-142285-0040-1988: hyp=["I'M", 'BETTER', 'IN', 'NOT', 'BEING', 'TAUGHT', 'TO', 'PIECES', 'BY', 'COUGHING', 'OR', 'NIGHTS', 'BUT', "I'M", 'WEARY', 'AND', 'TIRED', 'OF', 'MILTON', 'AND', 'LONGING', 'TO', 'GET', 'AWAY', 'TO', 'THE', 'LAND', 'OF', 'BOOLA', 'AND', 'WHEN', 'I', 'THINK', "I'M", 'FARTHER', 'AND', 'FARTHER', 'OFF', 'MY', 'HEART', 'SINKS', 'AND', "I'M", 'NO', 'BETTER', "I'M", 'WORSE'] +1688-142285-0041-1989: ref=['MARGARET', 'TURNED', 'ROUND', 'TO', 'WALK', 'ALONGSIDE', 'OF', 'THE', 'GIRL', 'IN', 'HER', 'FEEBLE', 'PROGRESS', 'HOMEWARD'] +1688-142285-0041-1989: hyp=['MARGARET', 'TURNED', 'ROUND', 'TO', 'WALK', 'LONG', 'SIDE', 'OF', 'THE', 'GIRL', 'IN', 'HER', 'FEEBLE', 'PROGRESS', 'HOMEWARD'] +1688-142285-0042-1990: ref=['BUT', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'SHE', 'DID', 'NOT', 'SPEAK'] +1688-142285-0042-1990: hyp=['BUT', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'SHE', 'DID', 'NOT', 'SPEAK'] +1688-142285-0043-1991: ref=['AT', 'LAST', 'SHE', 'SAID', 'IN', 'A', 'LOW', 'VOICE'] +1688-142285-0043-1991: hyp=['AT', 'LAST', 'SHE', 'SAID', 'IN', 'A', 'LOW', 'VOICE'] +1688-142285-0044-1992: ref=['BESSY', 'DO', 'YOU', 'WISH', 'TO', 'DIE'] +1688-142285-0044-1992: hyp=['BESSY', 'DO', 'YOU', 'WISH', 'TO', 'DIE'] +1688-142285-0045-1993: ref=['BESSY', 'WAS', 'SILENT', 'IN', 'HER', 'TURN', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'THEN', 'SHE', 'REPLIED'] +1688-142285-0045-1993: hyp=['BESSY', 'WAS', 'SILENT', 'IN', 'HER', 'TURN', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'THEN', 'SHE', 'REPLIED'] +1688-142285-0046-1994: ref=['NOUGHT', 'WORSE', 'THAN', 'MANY', 'OTHERS', 'I', 'RECKON'] +1688-142285-0046-1994: hyp=['NOT', 'WORSE', 'THAN', 'MANY', 'OTHERS', 'I', 'RECKON'] +1688-142285-0047-1995: ref=['BUT', 'WHAT', 'WAS', 'IT'] +1688-142285-0047-1995: hyp=['BUT', 'WHAT', 'WAS', 'IT'] +1688-142285-0048-1996: ref=['YOU', 'KNOW', "I'M", 'A', 'STRANGER', 'HERE', 'SO', 'PERHAPS', "I'M", 'NOT', 'SO', 'QUICK', 'AT', 'UNDERSTANDING', 'WHAT', 'YOU', 'MEAN', 'AS', 'IF', "I'D", 'LIVED', 'ALL', 'MY', 'LIFE', 'AT', 'MILTON'] +1688-142285-0048-1996: hyp=['YOU', 'KNOW', "I'M", 'A', 'STRANGER', 'HERE', 'SO', 'PERHAPS', "I'M", 'NOT', 'SO', 'QUICK', 'AT', 'UNDERSTANDING', 'WHAT', 'YOU', 'MEAN', 'AS', 'IF', "I'D", 'LIVED', 'ALL', 'MY', 'LIFE', 'IN', 'MILTON'] +1688-142285-0049-1997: ref=['I', 'HAD', 'FORGOTTEN', 'WHAT', 'I', 'SAID', 'FOR', 'THE', 'TIME', 'CONTINUED', 'MARGARET', 'QUIETLY'] +1688-142285-0049-1997: hyp=['I', 'HAD', 'FORGOTTEN', 'WHAT', 'I', 'SAID', 'FOR', 'THE', 'TIME', 'CONTINUED', 'MARGARET', 'QUIETLY'] +1688-142285-0050-1998: ref=['I', 'SHOULD', 'HAVE', 'THOUGHT', 'OF', 'IT', 'AGAIN', 'WHEN', 'I', 'WAS', 'LESS', 'BUSY', 'MAY', 'I', 'GO', 'WITH', 'YOU', 'NOW'] +1688-142285-0050-1998: hyp=['I', 'SHOULD', 'HAVE', 'THOUGHT', 'OF', 'IT', 'AGAIN', 'WHEN', 'I', 'WAS', 'LESS', 'BUSY', 'MAY', 'I', 'GO', 'WITH', 'YOU', 'NOW'] +1688-142285-0051-1999: ref=['THE', 'SHARPNESS', 'IN', 'HER', 'EYE', 'TURNED', 'TO', 'A', 'WISTFUL', 'LONGING', 'AS', 'SHE', 'MET', "MARGARET'S", 'SOFT', 'AND', 'FRIENDLY', 'GAZE'] +1688-142285-0051-1999: hyp=['THE', 'SHARPNESS', 'IN', 'HER', 'EYE', 'TURNED', 'TO', 'A', 'WISTFUL', 'LONGING', 'AS', 'SHE', 'MET', 'MARGARET', 'SOFT', 'AND', 'FRIENDLY', 'GAZE'] +1688-142285-0052-2000: ref=['AS', 'THEY', 'TURNED', 'UP', 'INTO', 'A', 'SMALL', 'COURT', 'OPENING', 'OUT', 'OF', 'A', 'SQUALID', 'STREET', 'BESSY', 'SAID'] +1688-142285-0052-2000: hyp=['AS', 'THEY', 'TURNED', 'UP', 'INTO', 'A', 'SMALL', 'COURT', 'OPENING', 'OUT', 'INTO', 'A', 'SQUALID', 'STREET', 'BESSY', 'SAID'] +1688-142285-0053-2001: ref=["YO'LL", 'NOT', 'BE', 'DAUNTED', 'IF', "FATHER'S", 'AT', 'HOME', 'AND', 'SPEAKS', 'A', 'BIT', 'GRUFFISH', 'AT', 'FIRST'] +1688-142285-0053-2001: hyp=['YOU', 'WILL', 'NOT', 'BE', 'DAUNTED', 'IF', "FATHER'S", 'AT', 'HOME', 'AND', 'SPEAKS', 'A', 'BIT', 'GRUFFISH', 'AT', 'FIRST'] +1688-142285-0054-2002: ref=['BUT', 'NICHOLAS', 'WAS', 'NOT', 'AT', 'HOME', 'WHEN', 'THEY', 'ENTERED'] +1688-142285-0054-2002: hyp=['BUT', 'NICHOLAS', 'WAS', 'NOT', 'AT', 'HOME', 'WHEN', 'THEY', 'ENTERED'] +1688-142285-0055-2003: ref=['GASPED', 'BESSY', 'AT', 'LAST'] +1688-142285-0055-2003: hyp=['GASPED', 'BESSIE', 'AT', 'LAST'] +1688-142285-0056-2004: ref=['BESSY', 'TOOK', 'A', 'LONG', 'AND', 'FEVERISH', 'DRAUGHT', 'AND', 'THEN', 'FELL', 'BACK', 'AND', 'SHUT', 'HER', 'EYES'] +1688-142285-0056-2004: hyp=['BESSY', 'TOOK', 'A', 'LONG', 'AND', 'FEVERISH', 'DRAUGHT', 'AND', 'THEN', 'FELL', 'BACK', 'AND', 'SHUT', 'HER', 'EYES'] +1688-142285-0057-2005: ref=['MARGARET', 'BENT', 'OVER', 'AND', 'SAID', 'BESSY', "DON'T", 'BE', 'IMPATIENT', 'WITH', 'YOUR', 'LIFE', 'WHATEVER', 'IT', 'IS', 'OR', 'MAY', 'HAVE', 'BEEN'] +1688-142285-0057-2005: hyp=['MARGARET', 'BENT', 'OVER', 'AND', 'SAID', 'BESSY', "DON'T", 'BE', 'IMPATIENT', 'WITH', 'YOUR', 'LIFE', 'WHATEVER', 'IT', 'IS', 'OR', 'MAY', 'HAVE', 'BEEN'] +1688-142285-0058-2006: ref=['REMEMBER', 'WHO', 'GAVE', 'IT', 'YOU', 'AND', 'MADE', 'IT', 'WHAT', 'IT', 'IS'] +1688-142285-0058-2006: hyp=['REMEMBER', 'WHO', 'GAVE', 'IT', 'TO', 'YOU', 'AND', 'MADE', 'IT', 'WHAT', 'IT', 'IS'] +1688-142285-0059-2007: ref=['NOW', "I'LL", 'NOT', 'HAVE', 'MY', 'WENCH', 'PREACHED', 'TO'] +1688-142285-0059-2007: hyp=['NOW', "I'LL", 'NOT', 'HAVE', 'MY', 'WENCH', 'PREACH', 'TO'] +1688-142285-0060-2008: ref=['BUT', 'SURELY', 'SAID', 'MARGARET', 'FACING', 'ROUND', 'YOU', 'BELIEVE', 'IN', 'WHAT', 'I', 'SAID', 'THAT', 'GOD', 'GAVE', 'HER', 'LIFE', 'AND', 'ORDERED', 'WHAT', 'KIND', 'OF', 'LIFE', 'IT', 'WAS', 'TO', 'BE'] +1688-142285-0060-2008: hyp=['BUT', 'SURELY', 'SAID', 'MARGARET', 'FACING', 'ROUND', 'YOU', 'BELIEVE', 'IN', 'WHAT', 'I', 'SAID', 'THAT', 'GOD', 'GAVE', 'HER', 'LIFE', 'AND', 'ORDERED', 'WHAT', 'KIND', 'OF', 'LIFE', 'IT', 'WAS', 'TO', 'BE'] +1688-142285-0061-2009: ref=['I', 'BELIEVE', 'WHAT', 'I', 'SEE', 'AND', 'NO', 'MORE'] +1688-142285-0061-2009: hyp=['I', 'BELIEVE', 'WHAT', 'I', 'SEE', 'AND', 'NO', 'MORE'] +1688-142285-0062-2010: ref=["THAT'S", 'WHAT', 'I', 'BELIEVE', 'YOUNG', 'WOMAN'] +1688-142285-0062-2010: hyp=["THAT'S", 'WHAT', 'I', 'BELIEVE', 'YOUNG', 'WOMAN'] +1688-142285-0063-2011: ref=['I', "DON'T", 'BELIEVE', 'ALL', 'I', 'HEAR', 'NO', 'NOT', 'BY', 'A', 'BIG', 'DEAL'] +1688-142285-0063-2011: hyp=['I', "DON'T", 'BELIEVE', 'ALL', 'I', 'HEAR', 'NO', 'NOT', 'BY', 'A', 'BIG', 'DEAL'] +1688-142285-0064-2012: ref=['BUT', "HOO'S", 'COME', 'AT', 'LAST', 'AND', "HOO'S", 'WELCOME', 'AS', 'LONG', 'AS', "HOO'LL", 'KEEP', 'FROM', 'PREACHING', 'ON', 'WHAT', 'HOO', 'KNOWS', 'NOUGHT', 'ABOUT'] +1688-142285-0064-2012: hyp=['BUT', 'WHOSE', 'COME', 'AT', 'LAST', 'AND', "WHO'S", 'WELCOME', 'AS', 'LONG', 'AS', "HE'LL", 'KEEP', 'FROM', 'PREACHING', 'ON', 'WHAT', 'WHO', 'KNOWS', 'NOT', 'ABOUT'] +1688-142285-0065-2013: ref=["IT'S", 'SIMPLE', 'AND', 'NOT', 'FAR', 'TO', 'FETCH', 'NOR', 'HARD', 'TO', 'WORK'] +1688-142285-0065-2013: hyp=["IT'S", 'SIMPLE', 'AND', 'NOT', 'FAR', 'TO', 'FETCH', 'NOR', 'HARD', 'TO', 'WORK'] +1688-142285-0066-2014: ref=['BUT', 'THE', 'GIRL', 'ONLY', 'PLEADED', 'THE', 'MORE', 'WITH', 'MARGARET'] +1688-142285-0066-2014: hyp=['BUT', 'THE', 'GIRL', 'ONLY', 'PLEADED', 'THE', 'MORE', 'WITH', 'MARGARET'] +1688-142285-0067-2015: ref=["DON'T", 'THINK', 'HARDLY', 'ON', 'HIM', "HE'S", 'A', 'GOOD', 'MAN', 'HE', 'IS'] +1688-142285-0067-2015: hyp=["DON'T", 'THINK', 'HARDLY', 'ON', 'HIM', "HE'S", 'A', 'GOOD', 'MAN', 'HE', 'IS'] +1688-142285-0068-2016: ref=['I', 'SOMETIMES', 'THINK', 'I', 'SHALL', 'BE', 'MOPED', 'WI', 'SORROW', 'EVEN', 'IN', 'THE', 'CITY', 'OF', 'GOD', 'IF', 'FATHER', 'IS', 'NOT', 'THERE'] +1688-142285-0068-2016: hyp=['I', 'SOMETIMES', 'THINK', 'I', 'SHALL', 'BE', 'MILKED', 'WITH', 'SORROW', 'EVEN', 'IN', 'THE', 'CITY', 'OF', 'GOD', 'IF', 'EITHER', 'IS', 'NOT', 'THERE'] +1688-142285-0069-2017: ref=['THE', 'FEVERISH', 'COLOUR', 'CAME', 'INTO', 'HER', 'CHEEK', 'AND', 'THE', 'FEVERISH', 'FLAME', 'INTO', 'HER', 'EYE'] +1688-142285-0069-2017: hyp=['THE', 'FEVERISH', 'COLOUR', 'CAME', 'INTO', 'A', 'CHEEKS', 'AND', 'THE', 'FEVERISH', 'FLAME', 'INTO', 'HER', 'EYE'] +1688-142285-0070-2018: ref=['BUT', 'YOU', 'WILL', 'BE', 'THERE', 'FATHER', 'YOU', 'SHALL', 'OH', 'MY', 'HEART'] +1688-142285-0070-2018: hyp=['BUT', "YOU'LL", 'BE', 'THEIR', 'FATHER', 'YOU', 'SHALL', 'O', 'MY', 'HEART'] +1688-142285-0071-2019: ref=['SHE', 'PUT', 'HER', 'HAND', 'TO', 'IT', 'AND', 'BECAME', 'GHASTLY', 'PALE'] +1688-142285-0071-2019: hyp=['SHE', 'PUT', 'HER', 'HAND', 'TO', 'IT', 'AND', 'BECAME', 'GHASTLY', 'PALE'] +1688-142285-0072-2020: ref=['MARGARET', 'HELD', 'HER', 'IN', 'HER', 'ARMS', 'AND', 'PUT', 'THE', 'WEARY', 'HEAD', 'TO', 'REST', 'UPON', 'HER', 'BOSOM'] +1688-142285-0072-2020: hyp=['MARGARET', 'HELD', 'HER', 'IN', 'HER', 'ARMS', 'AND', 'PUT', 'THE', 'WEARY', 'HEAD', 'TO', 'REST', 'UPON', 'HER', 'BOSOM'] +1688-142285-0073-2021: ref=['PRESENTLY', 'THE', 'SPASM', 'THAT', 'FORESHADOWED', 'DEATH', 'HAD', 'PASSED', 'AWAY', 'AND', 'BESSY', 'ROUSED', 'HERSELF', 'AND', 'SAID'] +1688-142285-0073-2021: hyp=['PRESENTLY', 'THE', 'SPASM', 'THAT', 'FORESHADOWED', 'DEATH', 'HAD', 'PASSED', 'AWAY', 'AND', 'BUSY', 'ROUSED', 'HERSELF', 'AND', 'SAID'] +1688-142285-0074-2022: ref=["I'LL", 'GO', 'TO', 'BED', "IT'S", 'BEST', 'PLACE', 'BUT', 'CATCHING', 'AT', "MARGARET'S", 'GOWN', "YO'LL", 'COME', 'AGAIN', 'I', 'KNOW', 'YO', 'WILL', 'BUT', 'JUST', 'SAY', 'IT'] +1688-142285-0074-2022: hyp=["I'LL", 'GO', 'TO', 'BED', "IT'S", 'BEST', 'PLACE', 'BUT', 'CATCHING', 'THAT', "MARGARET'S", 'GOWN', "YOU'LL", 'COME', 'AGAIN', 'I', 'KNOW', 'YOU', 'WILL', 'BUT', 'JUST', 'SAY', 'IT'] +1688-142285-0075-2023: ref=['I', 'WILL', 'COME', 'TO', 'MORROW', 'SAID', 'MARGARET'] +1688-142285-0075-2023: hyp=['OH', 'COME', 'TO', 'MORROW', 'SAID', 'MARGARET'] +1688-142285-0076-2024: ref=['MARGARET', 'WENT', 'AWAY', 'VERY', 'SAD', 'AND', 'THOUGHTFUL'] +1688-142285-0076-2024: hyp=['MARGARET', 'WENT', 'AWAY', 'VERY', 'SAD', 'AND', 'THOUGHTFUL'] +1688-142285-0077-2025: ref=['SHE', 'WAS', 'LATE', 'FOR', 'TEA', 'AT', 'HOME'] +1688-142285-0077-2025: hyp=['SHE', 'WAS', 'LATE', 'FOR', 'TEA', 'AT', 'HOME'] +1688-142285-0078-2026: ref=['HAVE', 'YOU', 'MET', 'WITH', 'A', 'SERVANT', 'DEAR'] +1688-142285-0078-2026: hyp=['HAVE', 'YOU', 'MET', 'WITH', 'A', 'SERVANT', 'DEAR'] +1688-142285-0079-2027: ref=['NO', 'MAMMA', 'THAT', 'ANNE', 'BUCKLEY', 'WOULD', 'NEVER', 'HAVE', 'DONE'] +1688-142285-0079-2027: hyp=['NO', 'MAMMA', 'THAT', 'ANNE', 'BUCKLEY', 'WOULD', 'NEVER', 'HAVE', 'DONE'] +1688-142285-0080-2028: ref=['SUPPOSE', 'I', 'TRY', 'SAID', 'MISTER', 'HALE'] +1688-142285-0080-2028: hyp=["S'POSE", 'I', 'TRY', 'SAID', 'MISTER', 'HALE'] +1688-142285-0081-2029: ref=['EVERYBODY', 'ELSE', 'HAS', 'HAD', 'THEIR', 'TURN', 'AT', 'THIS', 'GREAT', 'DIFFICULTY', 'NOW', 'LET', 'ME', 'TRY'] +1688-142285-0081-2029: hyp=['EVERYBODY', 'ELSE', 'HAS', 'HAD', 'THEY', 'TURN', 'UP', 'THIS', 'GREAT', 'DIFFICULTY', 'NOW', 'LET', 'ME', 'TRY'] +1688-142285-0082-2030: ref=['I', 'MAY', 'BE', 'THE', 'CINDERELLA', 'TO', 'PUT', 'ON', 'THE', 'SLIPPER', 'AFTER', 'ALL'] +1688-142285-0082-2030: hyp=['I', 'MAY', 'BE', 'THE', 'CINRILLA', 'TO', 'PUT', 'ON', 'THE', 'SLIPPER', 'AFTER', 'ALL'] +1688-142285-0083-2031: ref=['WHAT', 'WOULD', 'YOU', 'DO', 'PAPA', 'HOW', 'WOULD', 'YOU', 'SET', 'ABOUT', 'IT'] +1688-142285-0083-2031: hyp=['WHAT', 'WOULD', 'YOU', 'DO', 'PAPA', 'HOW', 'WOULD', 'YOU', 'SET', 'ABOUT', 'IT'] +1688-142285-0084-2032: ref=['WHY', 'I', 'WOULD', 'APPLY', 'TO', 'SOME', 'GOOD', 'HOUSE', 'MOTHER', 'TO', 'RECOMMEND', 'ME', 'ONE', 'KNOWN', 'TO', 'HERSELF', 'OR', 'HER', 'SERVANTS'] +1688-142285-0084-2032: hyp=['WHY', 'I', 'WOULD', 'APPLY', 'TO', 'SOME', 'GOOD', 'HOUSE', 'MOTHER', 'TO', 'RECOMMEND', 'ME', 'ONE', 'KNOWN', 'TO', 'HERSELF', 'OR', 'HER', 'SERVANTS'] +1688-142285-0085-2033: ref=['VERY', 'GOOD', 'BUT', 'WE', 'MUST', 'FIRST', 'CATCH', 'OUR', 'HOUSE', 'MOTHER'] +1688-142285-0085-2033: hyp=['VERY', 'GOOD', 'BUT', 'WE', 'MUST', 'FIRST', 'CATCH', 'OUR', 'HOUSE', 'MOTHER'] +1688-142285-0086-2034: ref=['THE', 'MOTHER', 'OF', 'WHOM', 'HE', 'SPOKE', 'TO', 'US', 'SAID', 'MARGARET'] +1688-142285-0086-2034: hyp=['THE', 'MOTHER', 'OF', 'WHOM', 'HE', 'SPOKE', 'TO', 'US', 'SAID', 'MARGARET'] +1688-142285-0087-2035: ref=['MISSUS', 'THORNTON', 'THE', 'ONLY', 'MOTHER', 'HE', 'HAS', 'I', 'BELIEVE', 'SAID', 'MISTER', 'HALE', 'QUIETLY'] +1688-142285-0087-2035: hyp=['MISTER', 'THORNTON', 'THE', 'ONLY', 'MOTHER', 'HE', 'HAS', 'I', 'BELIEVE', 'SAID', 'MISTER', 'HALE', 'QUIETLY'] +1688-142285-0088-2036: ref=['I', 'SHALL', 'LIKE', 'TO', 'SEE', 'HER', 'SHE', 'MUST', 'BE', 'AN', 'UNCOMMON', 'PERSON', 'HER', 'MOTHER', 'ADDED'] +1688-142285-0088-2036: hyp=['I', 'SHALL', 'LIKE', 'TO', 'SEE', 'HER', 'SHE', 'MUST', 'BE', 'AN', 'UNCOMMON', 'PERSON', 'HER', 'MOTHER', 'ADDED'] +1688-142285-0089-2037: ref=['PERHAPS', 'SHE', 'MAY', 'HAVE', 'A', 'RELATION', 'WHO', 'MIGHT', 'SUIT', 'US', 'AND', 'BE', 'GLAD', 'OF', 'OUR', 'PLACE'] +1688-142285-0089-2037: hyp=['PERHAPS', 'SHE', 'MAY', 'HAVE', 'A', 'RELATION', 'WHO', 'MIGHT', 'SUIT', 'US', 'AND', 'BE', 'GLAD', 'OF', 'OUR', 'PLACE'] +1688-142285-0090-2038: ref=['SHE', 'SOUNDED', 'TO', 'BE', 'SUCH', 'A', 'CAREFUL', 'ECONOMICAL', 'PERSON', 'THAT', 'I', 'SHOULD', 'LIKE', 'ANY', 'ONE', 'OUT', 'OF', 'THE', 'SAME', 'FAMILY'] +1688-142285-0090-2038: hyp=['SHE', 'SOUNDED', 'TO', 'BE', 'SUCH', 'A', 'CAREFUL', 'ECONOMICAL', 'PERSON', 'THAT', 'I', 'SHOULD', 'LIKE', 'ANY', 'ONE', 'OUT', 'OF', 'THE', 'SAME', 'FAMILY'] +1688-142285-0091-2039: ref=['MY', 'DEAR', 'SAID', 'MISTER', 'HALE', 'ALARMED', 'PRAY', "DON'T", 'GO', 'OFF', 'ON', 'THAT', 'IDEA'] +1688-142285-0091-2039: hyp=['MY', 'DEAR', 'SAID', 'MISTER', 'HALE', 'ALARMED', 'PRAY', "DON'T", 'GO', 'OFF', 'ON', 'THAT', 'IDEA'] +1688-142285-0092-2040: ref=['I', 'AM', 'SURE', 'AT', 'ANY', 'RATE', 'SHE', 'WOULD', 'NOT', 'LIKE', 'STRANGERS', 'TO', 'KNOW', 'ANYTHING', 'ABOUT', 'IT'] +1688-142285-0092-2040: hyp=['I', 'AM', 'SURE', 'AT', 'ANY', 'RATE', 'SHE', 'WOULD', 'NOT', 'LIKE', 'STRANGERS', 'TO', 'KNOW', 'ANYTHING', 'ABOUT', 'IT'] +1688-142285-0093-2041: ref=['TAKE', 'NOTICE', 'THAT', 'IS', 'NOT', 'MY', 'KIND', 'OF', 'HAUGHTINESS', 'PAPA', 'IF', 'I', 'HAVE', 'ANY', 'AT', 'ALL', 'WHICH', 'I', "DON'T", 'AGREE', 'TO', 'THOUGH', "YOU'RE", 'ALWAYS', 'ACCUSING', 'ME', 'OF', 'IT'] +1688-142285-0093-2041: hyp=['TAKE', 'NOTICE', 'THAT', 'THIS', 'IS', 'NOT', 'MY', 'KIND', 'OF', 'FORTNESS', 'PAPA', 'IF', 'I', 'HAVE', 'ANY', 'AT', 'ALL', 'WHICH', 'I', "DON'T", 'AGREE', 'TO', 'THOUGH', 'YOU', 'ALWAYS', 'ACCUSING', 'ME', 'OF', 'IT'] +1688-142285-0094-2042: ref=['I', "DON'T", 'KNOW', 'POSITIVELY', 'THAT', 'IT', 'IS', 'HERS', 'EITHER', 'BUT', 'FROM', 'LITTLE', 'THINGS', 'I', 'HAVE', 'GATHERED', 'FROM', 'HIM', 'I', 'FANCY', 'SO'] +1688-142285-0094-2042: hyp=['I', "DON'T", 'KNOW', 'POSITIVELY', 'THAT', 'IT', 'IS', 'HERS', 'EITHER', 'BUT', 'FROM', 'LITTLE', 'THINGS', 'I', 'HAVE', 'GATHERED', 'FROM', 'HIM', 'I', 'FANCY', 'SO'] +1688-142285-0095-2043: ref=['THEY', 'CARED', 'TOO', 'LITTLE', 'TO', 'ASK', 'IN', 'WHAT', 'MANNER', 'HER', 'SON', 'HAD', 'SPOKEN', 'ABOUT', 'HER'] +1688-142285-0095-2043: hyp=['THEY', 'CARED', 'TOO', 'LITTLE', 'TO', 'ASK', 'IN', 'WHAT', 'MANNER', 'HER', 'SON', 'HAD', 'SPOKEN', 'ABOUT', 'HER'] +1998-15444-0000-2204: ref=['IF', 'CALLED', 'TO', 'A', 'CASE', 'SUPPOSED', 'OR', 'SUSPECTED', 'TO', 'BE', 'ONE', 'OF', 'POISONING', 'THE', 'MEDICAL', 'MAN', 'HAS', 'TWO', 'DUTIES', 'TO', 'PERFORM', 'TO', 'SAVE', 'THE', "PATIENT'S", 'LIFE', 'AND', 'TO', 'PLACE', 'HIMSELF', 'IN', 'A', 'POSITION', 'TO', 'GIVE', 'EVIDENCE', 'IF', 'CALLED', 'ON', 'TO', 'DO', 'SO'] +1998-15444-0000-2204: hyp=['IF', 'CALLED', 'TO', 'A', 'CASE', 'SUPPOSED', 'OF', 'SUSPECTED', 'TO', 'BE', 'ONE', 'OF', 'POISONING', 'THE', 'MEDICAL', 'MAN', 'HAS', 'TWO', 'DUTIES', 'TO', 'PERFORM', 'TO', 'SAVE', 'THE', "PATIENT'S", 'LIFE', 'AND', 'TO', 'PLACE', 'HIMSELF', 'IN', 'A', 'POSITION', 'TO', 'GIVE', 'EVIDENCE', 'OF', 'CALLED', 'UNTO'] +1998-15444-0001-2205: ref=['HE', 'SHOULD', 'MAKE', 'INQUIRIES', 'AS', 'TO', 'SYMPTOMS', 'AND', 'TIME', 'AT', 'WHICH', 'FOOD', 'OR', 'MEDICINE', 'WAS', 'LAST', 'TAKEN'] +1998-15444-0001-2205: hyp=['HE', 'SHOULD', 'MAKE', 'INQUIRIES', 'AS', 'TO', 'SYMPTOMS', 'AND', 'TIME', 'AT', 'WHICH', 'FOOD', 'OR', 'MEDICINE', 'WAS', 'LAST', 'TAKEN'] +1998-15444-0002-2206: ref=['HE', 'SHOULD', 'NOTICE', 'THE', 'POSITION', 'AND', 'TEMPERATURE', 'OF', 'THE', 'BODY', 'THE', 'CONDITION', 'OF', 'RIGOR', 'MORTIS', 'MARKS', 'OF', 'VIOLENCE', 'APPEARANCE', 'OF', 'LIPS', 'AND', 'MOUTH'] +1998-15444-0002-2206: hyp=['HE', 'SHOULD', 'NOTICE', 'THE', 'POSITION', 'AND', 'TEMPERATURE', 'OF', 'THE', 'BODY', 'THE', 'CONDITION', 'OF', 'RIGA', 'MORTARS', 'MARKS', 'OF', 'FIDANTS', 'APPEARANCE', 'OF', 'LIPS', 'AND', 'MOUTH'] +1998-15444-0003-2207: ref=['IN', 'MAKING', 'A', 'POST', 'MORTEM', 'EXAMINATION', 'THE', 'ALIMENTARY', 'CANAL', 'SHOULD', 'BE', 'REMOVED', 'AND', 'PRESERVED', 'FOR', 'FURTHER', 'INVESTIGATION'] +1998-15444-0003-2207: hyp=['IN', 'MAKING', 'A', 'POST', 'MODER', 'MAXIMMUNITION', 'THE', 'ELEMENTARY', 'CANAL', 'SHOULD', 'BE', 'REMOVED', 'AND', 'PRESERVED', 'FOR', 'FURTHER', 'INVESTIGATION'] +1998-15444-0004-2208: ref=['THE', 'GUT', 'AND', 'THE', 'GULLET', 'BEING', 'CUT', 'ACROSS', 'BETWEEN', 'THESE', 'LIGATURES', 'THE', 'STOMACH', 'MAY', 'BE', 'REMOVED', 'ENTIRE', 'WITHOUT', 'SPILLING', 'ITS', 'CONTENTS'] +1998-15444-0004-2208: hyp=['THE', 'GUT', 'IN', 'THE', 'COLLEGE', 'BEING', 'CUT', 'ACROSS', 'BETWEEN', 'THESE', 'LIGATURES', 'THE', 'STOMACH', 'MAY', 'BE', 'REMOVED', 'AND', 'TIRED', 'WITHOUT', 'SPINNING', 'ITS', 'CONTENTS'] +1998-15444-0005-2209: ref=['IF', 'THE', 'MEDICAL', 'PRACTITIONER', 'IS', 'IN', 'DOUBT', 'ON', 'ANY', 'POINT', 'HE', 'SHOULD', 'OBTAIN', 'TECHNICAL', 'ASSISTANCE', 'FROM', 'SOMEONE', 'WHO', 'HAS', 'PAID', 'ATTENTION', 'TO', 'THE', 'SUBJECT'] +1998-15444-0005-2209: hyp=['IF', 'THE', 'MEDICA', 'PRACTITIONERS', 'ENDOWED', 'ON', 'ANY', 'POINT', 'HE', 'SHOULD', 'OBTAIN', 'TECHNICHAL', 'ASSISTANCE', 'FROM', 'SOME', 'ONE', 'WHO', 'HAS', 'PAID', 'ATTENTION', 'TO', 'THE', 'SUBJECT'] +1998-15444-0006-2210: ref=['IN', 'A', 'CASE', 'OF', 'ATTEMPTED', 'SUICIDE', 'BY', 'POISONING', 'IS', 'IT', 'THE', 'DUTY', 'OF', 'THE', 'DOCTOR', 'TO', 'INFORM', 'THE', 'POLICE'] +1998-15444-0006-2210: hyp=['IN', 'A', 'CASE', 'OF', 'ATTEMPTED', 'SUICIDE', 'FOR', 'POISONING', 'IS', 'IT', 'THE', 'DUTY', 'OF', 'THE', 'DOCTOR', 'TO', 'INFORM', 'THE', 'POLICE'] +1998-15444-0007-2211: ref=['THE', 'BEST', 'EMETIC', 'IS', 'THAT', 'WHICH', 'IS', 'AT', 'HAND'] +1998-15444-0007-2211: hyp=['THE', 'BEST', 'AMATIC', 'IS', 'THAT', 'WHICH', 'AT', 'HAND'] +1998-15444-0008-2212: ref=['THE', 'DOSE', 'FOR', 'AN', 'ADULT', 'IS', 'TEN', 'MINIMS'] +1998-15444-0008-2212: hyp=['THE', 'DUST', 'FOR', 'NO', 'DOUBT', 'IS', 'TEN', 'MINIMS'] +1998-15444-0009-2213: ref=['APOMORPHINE', 'IS', 'NOT', 'ALLIED', 'IN', 'PHYSIOLOGICAL', 'ACTION', 'TO', 'MORPHINE', 'AND', 'MAY', 'BE', 'GIVEN', 'IN', 'CASES', 'OF', 'NARCOTIC', 'POISONING'] +1998-15444-0009-2213: hyp=['EPIMORPHY', 'IS', 'NOT', 'ALIT', 'IN', 'PHYSIOLOGICAL', 'ACTION', 'TO', 'MORPHINE', 'AND', 'MAY', 'BE', 'GIVEN', 'IN', 'CASES', 'OF', 'NAUCOTIC', 'POISONING'] +1998-15444-0010-2214: ref=['TICKLING', 'THE', 'FAUCES', 'WITH', 'A', 'FEATHER', 'MAY', 'EXCITE', 'VOMITING'] +1998-15444-0010-2214: hyp=['TICKLING', 'THE', 'FORCES', 'WITH', 'THE', 'FEATHER', 'MAY', 'EXCITE', 'RHOMETTING'] +1998-15444-0011-2215: ref=['IN', 'USING', 'THE', 'ELASTIC', 'STOMACH', 'TUBE', 'SOME', 'FLUID', 'SHOULD', 'BE', 'INTRODUCED', 'INTO', 'THE', 'STOMACH', 'BEFORE', 'ATTEMPTING', 'TO', 'EMPTY', 'IT', 'OR', 'A', 'PORTION', 'OF', 'THE', 'MUCOUS', 'MEMBRANE', 'MAY', 'BE', 'SUCKED', 'INTO', 'THE', 'APERTURE'] +1998-15444-0011-2215: hyp=['IN', 'USING', 'THE', 'ELASTIC', 'STOMACH', 'TUBE', 'SOME', 'FLUID', 'SHOULD', 'BE', 'INTRODUCED', 'INTO', 'THE', 'STOMACH', 'BEFORE', 'ATTEMPTING', 'TO', 'EMPTY', 'IT', 'OR', 'A', 'PORTION', 'OF', 'THE', 'MUCOUS', 'MEMORANE', 'MAY', 'BE', 'SACKED', 'INTO', 'THE', 'APERTURE'] +1998-15444-0012-2216: ref=['THE', 'TUBE', 'SHOULD', 'BE', 'EXAMINED', 'TO', 'SEE', 'THAT', 'IT', 'IS', 'NOT', 'BROKEN', 'OR', 'CRACKED', 'AS', 'ACCIDENTS', 'HAVE', 'HAPPENED', 'FROM', 'NEGLECTING', 'THIS', 'PRECAUTION'] +1998-15444-0012-2216: hyp=['THE', 'TUBE', 'SHOULD', 'BE', 'EXAMINED', 'TO', 'SEE', 'THAT', 'IT', 'IS', 'NOT', 'BROKEN', 'OR', 'CRACKED', 'AS', 'ACCIDENTS', 'HAVE', 'HAPPENED', 'FROM', 'NEGLECTING', 'THIS', 'PRECAUTION'] +1998-15444-0013-2217: ref=['ANTIDOTES', 'ARE', 'USUALLY', 'GIVEN', 'HYPODERMICALLY', 'OR', 'IF', 'BY', 'MOUTH', 'IN', 'THE', 'FORM', 'OF', 'TABLETS'] +1998-15444-0013-2217: hyp=['AND', 'HE', 'DOES', 'A', 'USUALLY', 'GIVEN', 'HYPODERMICALLY', 'OR', 'IF', 'THE', 'MOUTH', 'AND', 'THE', 'FORM', 'OF', 'TABLETS'] +1998-15444-0014-2218: ref=['IN', 'THE', 'ABSENCE', 'OF', 'A', 'HYPODERMIC', 'SYRINGE', 'THE', 'REMEDY', 'MAY', 'BE', 'GIVEN', 'BY', 'THE', 'RECTUM'] +1998-15444-0014-2218: hyp=['IN', 'THE', 'ABSENCE', 'OF', 'THE', 'HYPODERMIC', 'SYRINGE', 'THE', 'REMEDY', 'MAY', 'BE', 'GIVEN', 'BY', 'THE', 'RECTUM'] +1998-15444-0015-2219: ref=['NOTICE', 'THE', 'SMELL', 'COLOUR', 'AND', 'GENERAL', 'APPEARANCE', 'OF', 'THE', 'MATTER', 'SUBMITTED', 'FOR', 'EXAMINATION'] +1998-15444-0015-2219: hyp=['NOTICE', 'THE', 'SMILE', 'COLOUR', 'AND', 'GENERAL', 'APPEARANCE', 'OF', 'THE', 'MATTER', 'SUBMITTED', 'FOR', 'EXAMINATION'] +1998-15444-0016-2220: ref=['FOR', 'THE', 'SEPARATION', 'OF', 'AN', 'ALKALOID', 'THE', 'FOLLOWING', 'IS', 'THE', 'PROCESS', 'OF', 'STAS', 'OTTO'] +1998-15444-0016-2220: hyp=['FOR', 'THE', 'SEPARATION', 'OF', 'AN', 'ACALIT', 'THE', 'FOLLOWING', 'IS', 'THE', 'PROCESS', 'OF', 'STATS', 'ARE', 'TO'] +1998-15444-0017-2221: ref=['THIS', 'PROCESS', 'IS', 'BASED', 'UPON', 'THE', 'PRINCIPLE', 'THAT', 'THE', 'SALTS', 'OF', 'THE', 'ALKALOIDS', 'ARE', 'SOLUBLE', 'IN', 'ALCOHOL', 'AND', 'WATER', 'AND', 'INSOLUBLE', 'IN', 'ETHER'] +1998-15444-0017-2221: hyp=['THIS', 'PROCESS', 'IS', 'BASED', 'UPON', 'THE', 'PRINCIPLE', 'THAT', 'THE', 'SOULS', 'OF', 'THE', 'ACCOLITES', 'ARE', 'SOLUBLE', 'IN', 'ACCULENT', 'WATER', 'AND', 'INSOLUBLE', 'IN', 'ETHER'] +1998-15444-0018-2222: ref=['THE', 'PURE', 'ALKALOIDS', 'WITH', 'THE', 'EXCEPTION', 'OF', 'MORPHINE', 'IN', 'ITS', 'CRYSTALLINE', 'FORM', 'ARE', 'SOLUBLE', 'IN', 'ETHER'] +1998-15444-0018-2222: hyp=['THE', 'PURE', 'AKELOITS', 'WAS', 'THE', 'EXCEPTION', 'OF', 'MORPHINE', 'IN', 'ITS', 'CRYSTALLINE', 'FORM', 'A', 'SOLUBLE', 'BENEATH', 'THEM'] +1998-15444-0019-2223: ref=['TWO', 'COOL', 'THE', 'MIXTURE', 'AND', 'FILTER', 'WASH', 'THE', 'RESIDUE', 'WITH', 'STRONG', 'ALCOHOL', 'AND', 'MIX', 'THE', 'FILTRATES'] +1998-15444-0019-2223: hyp=['TWO', 'UR', 'THE', 'MIXTURE', 'AND', 'FILTER', 'WASH', 'THE', 'RESIDUE', 'WITH', 'STRONG', 'ALCOHOL', 'AND', 'MIX', 'THE', 'FIR', 'TRADES'] +1998-15444-0020-2224: ref=['THE', 'RESIDUE', 'MAY', 'BE', 'SET', 'ASIDE', 'FOR', 'THE', 'DETECTION', 'OF', 'THE', 'METALLIC', 'POISONS', 'IF', 'SUSPECTED', 'EXPEL', 'THE', 'ALCOHOL', 'BY', 'CAREFUL', 'EVAPORATION'] +1998-15444-0020-2224: hyp=['THE', 'READY', 'YOU', 'MAY', 'BE', 'SAID', 'ASIDE', 'FOR', 'THE', 'DETECTION', 'OF', 'THE', 'METALLIC', 'POISONS', 'OF', 'SUSPECTED', 'EXPEL', 'THE', 'ACCOHOL', 'BY', 'CAREFUL', 'EVAPORATION'] +1998-15444-0021-2225: ref=['ON', 'THE', 'EVAPORATION', 'OF', 'THE', 'ALCOHOL', 'THE', 'RESINOUS', 'AND', 'FATTY', 'MATTERS', 'SEPARATE'] +1998-15444-0021-2225: hyp=['ON', 'THE', 'EVAPORATION', 'OF', 'THE', 'ALCOHOL', 'THE', 'VEZENOUS', 'AND', 'FATIMATUS', 'SEPARATE'] +1998-15444-0022-2226: ref=['EVAPORATE', 'THE', 'FILTRATE', 'TO', 'A', 'SYRUP', 'AND', 'EXTRACT', 'WITH', 'SUCCESSIVE', 'PORTIONS', 'OF', 'ABSOLUTE', 'ALCOHOL'] +1998-15444-0022-2226: hyp=['EVAPORATE', 'THE', 'FEDERATE', 'TO', 'A', 'CYRUP', 'AN', 'EXTRACT', 'WITH', 'SUCCESSIVE', 'PORTIONS', 'OF', 'ABSOLUTE', 'ALCOHOL'] +1998-15444-0023-2227: ref=['SEPARATE', 'THE', 'ETHEREAL', 'SOLUTION', 'AND', 'EVAPORATE'] +1998-15444-0023-2227: hyp=['SEPARATE', 'THE', 'ETHEREAL', 'SOLUTION', 'AND', 'EVAPORATE'] +1998-15444-0024-2228: ref=['FIVE', 'A', 'PART', 'OF', 'THIS', 'ETHEREAL', 'SOLUTION', 'IS', 'POURED', 'INTO', 'A', 'WATCH', 'GLASS', 'AND', 'ALLOWED', 'TO', 'EVAPORATE'] +1998-15444-0024-2228: hyp=['FIVE', 'A', 'PART', 'OF', 'THIS', 'ETHEREAL', 'SOLUTION', 'IS', 'PUT', 'INTO', 'WATCH', 'GLASS', 'AND', 'ALLOW', 'TO', 'EVAPORATE'] +1998-15444-0025-2229: ref=['TO', 'PURIFY', 'IT', 'ADD', 'A', 'SMALL', 'QUANTITY', 'OF', 'DILUTE', 'SULPHURIC', 'ACID', 'AND', 'AFTER', 'EVAPORATING', 'TO', 'THREE', 'QUARTERS', 'OF', 'ITS', 'BULK', 'ADD', 'A', 'SATURATED', 'SOLUTION', 'OF', 'CARBONATE', 'OF', 'POTASH', 'OR', 'SODA'] +1998-15444-0025-2229: hyp=['TO', 'PURIFY', 'IT', 'EDISM', 'A', 'QUANTITY', 'OF', 'DILUTE', 'SUFFER', 'ACID', 'AND', 'AFTER', 'EVAPORATING', 'TO', 'THREE', 'QUARTERS', 'OF', 'ITS', 'BARK', 'ADD', 'A', 'SITUATED', 'SOLUTION', 'OF', 'CARBONATE', 'OF', 'POTASH', 'OR', 'SODA'] +1998-15444-0026-2230: ref=['BOIL', 'THE', 'FINELY', 'DIVIDED', 'SUBSTANCE', 'WITH', 'ABOUT', 'ONE', 'EIGHTH', 'ITS', 'BULK', 'OF', 'PURE', 'HYDROCHLORIC', 'ACID', 'ADD', 'FROM', 'TIME', 'TO', 'TIME', 'POTASSIC', 'CHLORATE', 'UNTIL', 'THE', 'SOLIDS', 'ARE', 'REDUCED', 'TO', 'A', 'STRAW', 'YELLOW', 'FLUID'] +1998-15444-0026-2230: hyp=['BOY', 'THE', 'FINALLY', 'DIVIDEST', 'ABSTANCE', 'WITH', 'ABOUT', 'ONE', 'EIGHTHS', 'ITS', 'BARK', 'OF', 'PURE', 'HYDROCLOIC', 'ACID', 'ADD', 'FROM', 'TIME', 'TO', 'TIME', 'POTASSIC', 'LOW', 'RAGE', 'UNTIL', 'THE', 'SOLIDS', 'ARE', 'REDUCED', 'TO', 'A', 'STRAW', 'YELLOW', 'FLUID'] +1998-15444-0027-2231: ref=['THE', 'RESIDUE', 'OF', 'THE', 'MATERIAL', 'AFTER', 'DIGESTION', 'WITH', 'HYDROCHLORIC', 'ACID', 'AND', 'POTASSIUM', 'CHLORATE', 'MAY', 'HAVE', 'TO', 'BE', 'EXAMINED', 'FOR', 'SILVER', 'LEAD', 'AND', 'BARIUM'] +1998-15444-0027-2231: hyp=['THE', 'RESIDUE', 'OF', 'THE', 'MATERIAL', 'AFTER', 'DIGESTION', 'WAS', 'HYDROCLOGIC', 'ACID', 'AND', 'PROTESTING', 'CHLORODE', 'MAY', 'HAVE', 'TO', 'BE', 'EXAMINED', 'FOR', 'SILVER', 'LEAD', 'AND', 'BARIUM'] +1998-29454-0000-2157: ref=['A', 'THOUSAND', 'BLESSINGS', 'FROM', 'A', 'GRATEFUL', 'HEART'] +1998-29454-0000-2157: hyp=['A', 'THOUSAND', 'BLESSINGS', 'FROM', 'A', 'GRATEFUL', 'HEART'] +1998-29454-0001-2158: ref=['PERUSAL', 'SAID', 'THE', 'PAWNBROKER', "THAT'S", 'THE', 'WAY', 'TO', 'PERNOUNCE', 'IT'] +1998-29454-0001-2158: hyp=['PERUSAL', 'SAID', 'THE', 'POND', 'BROKER', "THAT'S", 'THE', 'WAY', 'TO', 'PRONOUNCE', 'IT'] +1998-29454-0002-2159: ref=['HIS', 'BOOKS', 'TOLD', 'HIM', 'THAT', 'TREASURE', 'IS', 'BEST', 'HIDDEN', 'UNDER', 'LOOSE', 'BOARDS', 'UNLESS', 'OF', 'COURSE', 'YOUR', 'HOUSE', 'HAS', 'A', 'SECRET', 'PANEL', 'WHICH', 'HIS', 'HAD', 'NOT'] +1998-29454-0002-2159: hyp=['HIS', 'BOOKS', 'TOLD', 'HIM', 'THE', 'TREASURES', 'BEST', 'HIDDEN', 'ON', 'A', 'LOOSE', 'BOARDS', 'AND', 'AS', 'OF', 'COURSE', 'YOUR', 'HOUSE', 'HAD', 'A', 'SECRET', 'PANNER', 'WHICH', 'HIS', 'HAD', 'NOT'] +1998-29454-0003-2160: ref=['HE', 'GOT', 'IT', 'UP', 'AND', 'PUSHED', 'HIS', 'TREASURES', 'AS', 'FAR', 'IN', 'AS', 'HE', 'COULD', 'ALONG', 'THE', 'ROUGH', 'CRUMBLY', 'SURFACE', 'OF', 'THE', 'LATH', 'AND', 'PLASTER'] +1998-29454-0003-2160: hyp=['HE', 'GOT', 'IT', 'UP', 'AND', 'PUSHED', 'HIS', 'TREASURES', 'AS', 'FAR', 'IN', 'AS', 'HE', 'COULD', 'ALONG', 'THE', 'ROUGH', 'CRAMBLY', 'SURFACE', 'OF', 'THE', 'LAS', 'AND', 'PLASTER'] +1998-29454-0004-2161: ref=['WHEN', 'DICKIE', 'CAME', 'DOWN', 'HIS', 'AUNT', 'SLIGHTLY', 'SLAPPED', 'HIM', 'AND', 'HE', 'TOOK', 'THE', 'HALFPENNY', 'AND', 'LIMPED', 'OFF', 'OBEDIENTLY'] +1998-29454-0004-2161: hyp=['WHEN', 'DICKIE', 'CAME', 'DOWN', 'HIS', 'AUNT', 'SAT', 'HE', 'SLEPT', 'HIM', 'AND', 'HE', 'TOOK', 'THE', 'HALFPENNY', 'AND', 'LIMPED', 'OFF', 'OBEDIENTLY'] +1998-29454-0005-2162: ref=['HE', 'HAD', 'NEVER', 'SEEN', 'ONE', 'BEFORE', 'AND', 'IT', 'INTERESTED', 'HIM', 'EXTREMELY'] +1998-29454-0005-2162: hyp=['HE', 'HAD', 'NEVER', 'SEEN', 'ONE', 'BEFORE', 'AND', 'IT', 'INTERESTED', 'HIM', 'EXTREMELY'] +1998-29454-0006-2163: ref=['HE', 'LOOKED', 'ABOUT', 'HIM', 'AND', 'KNEW', 'THAT', 'HE', 'DID', 'NOT', 'AT', 'ALL', 'KNOW', 'WHERE', 'HE', 'WAS'] +1998-29454-0006-2163: hyp=['HE', 'LOOKED', 'ABOUT', 'HIM', 'AND', 'KNEW', 'THAT', 'HE', 'DID', 'NOT', 'AT', 'ALL', 'KNOW', 'WHERE', 'HE', 'WAS'] +1998-29454-0007-2164: ref=["WHAT'S", 'UP', 'MATEY', 'LOST', 'YOUR', 'WAY', 'DICKIE', 'EXPLAINED'] +1998-29454-0007-2164: hyp=["WHAT'S", 'THAT', 'MATE', 'ASKED', 'YOUR', 'WAY', 'DICKIE', 'EXPLAINED'] +1998-29454-0008-2165: ref=['WHEN', 'HE', 'SAID', 'AVE', 'I', 'BIN', 'ASLEEP'] +1998-29454-0008-2165: hyp=['WHEN', 'HE', 'SAID', 'HAVE', 'I', 'BEEN', 'ASLEEP'] +1998-29454-0009-2166: ref=['HERE', 'WE', 'ARE', 'SAID', 'THE', 'MAN'] +1998-29454-0009-2166: hyp=['HERE', 'WE', 'ARE', 'SAID', 'THE', 'MAN'] +1998-29454-0010-2167: ref=['NOT', 'EXACKLY', 'SAID', 'THE', 'MAN', 'BUT', "IT'S", 'ALL', 'RIGHT'] +1998-29454-0010-2167: hyp=['NOT', 'EXACTLY', 'SAID', 'THE', 'MAN', 'BUT', "IT'S", 'ALL', 'RIGHT'] +1998-29454-0011-2168: ref=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'MAN', 'ASKED', 'DICKIE', 'IF', 'HE', 'COULD', 'WALK', 'A', 'LITTLE', 'WAY', 'AND', 'WHEN', 'DICKIE', 'SAID', 'HE', 'COULD', 'THEY', 'SET', 'OUT', 'IN', 'THE', 'MOST', 'FRIENDLY', 'WAY', 'SIDE', 'BY', 'SIDE'] +1998-29454-0011-2168: hyp=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'MAN', 'ASKED', 'DICKIE', 'IF', 'HE', 'COULD', 'WALK', 'A', 'LITTLE', 'WAY', 'AND', 'WHEN', 'DICKIE', 'SAID', 'HE', 'COULD', 'THEY', 'SET', 'OUT', 'IN', 'THE', 'MOST', 'FRIENDLY', 'WAY', 'SIDE', 'BY', 'SIDE'] +1998-29454-0012-2169: ref=['AND', 'THE', 'TEA', 'AND', 'ALL', 'AN', 'THE', 'EGG'] +1998-29454-0012-2169: hyp=['AND', 'THE', 'TINEL', 'AND', 'THE', 'EGG'] +1998-29454-0013-2170: ref=['AND', 'THIS', 'IS', 'THE', 'PRETTIEST', 'PLACE', 'EVER', 'I', 'SEE'] +1998-29454-0013-2170: hyp=['AND', 'THIS', 'IS', 'THE', 'PRETTIEST', 'PLACE', 'EVER', 'I', 'SEE'] +1998-29454-0014-2171: ref=['I', 'SHALL', 'CATCH', 'IT', 'A', 'FAIR', 'TREAT', 'AS', 'IT', 'IS'] +1998-29454-0014-2171: hyp=['I', 'SHOULD', 'CATCH', 'IT', 'IF', 'HER', 'TREAT', 'AS', 'IT', 'IS'] +1998-29454-0015-2172: ref=['SHE', 'WAS', 'WAITIN', 'FOR', 'THE', 'WOOD', 'TO', 'BOIL', 'THE', 'KETTLE', 'WHEN', 'I', 'COME', 'OUT', 'MOTHER'] +1998-29454-0015-2172: hyp=['SHE', 'WAS', 'WAITING', 'FOR', 'THE', 'WOOD', 'TO', 'BOIL', 'THE', 'CATTLE', 'WHEN', 'TO', 'COME', 'OUT', 'MOTHER'] +1998-29454-0016-2173: ref=["AIN'T", 'BAD', 'WHEN', "SHE'S", 'IN', 'A', 'GOOD', 'TEMPER'] +1998-29454-0016-2173: hyp=['AND', 'BAD', 'WHEN', "SHE'S", 'IN', 'A', 'GOOD', 'TEMPER'] +1998-29454-0017-2174: ref=['THAT', "AIN'T", 'WHAT', "SHE'LL", 'BE', 'IN', 'WHEN', 'YOU', 'GETS', 'BACK'] +1998-29454-0017-2174: hyp=['THAT', 'ANNE', 'BUT', "YOU'LL", 'BE', 'IN', 'WHEN', 'YOU', 'GETS', 'BACK'] +1998-29454-0018-2175: ref=['I', 'GOT', 'TO', 'STICK', 'IT', 'SAID', 'DICKIE', 'SADLY', "I'D", 'BEST', 'BE', 'GETTING', 'HOME'] +1998-29454-0018-2175: hyp=['I', 'GOT', 'A', 'STICK', 'IT', 'SAID', 'DICKY', 'SADLY', "I'D", 'BEST', 'BE', 'GETTING', 'HOME'] +1998-29454-0019-2176: ref=['I', "WOULDN'T", 'GO', 'OME', 'NOT', 'IF', 'I', 'WAS', 'YOU', 'SAID', 'THE', 'MAN'] +1998-29454-0019-2176: hyp=['I', "WOULDN'T", 'GO', 'HOME', 'NOT', 'IF', 'I', 'WERE', 'USE', 'SAID', 'THE', 'MAN'] +1998-29454-0020-2177: ref=['NO', 'SAID', 'DICKIE', 'OH', 'NO', 'NO', 'I', 'NEVER'] +1998-29454-0020-2177: hyp=['NO', 'SAID', 'DICKIE', 'OH', 'NO', 'NO', 'I', 'NEVER'] +1998-29454-0021-2178: ref=['I', "AIN'T", 'IT', 'YER', 'HAVE', 'I', 'LIKE', 'WHAT', 'YER', 'AUNT', 'DO'] +1998-29454-0021-2178: hyp=['I', 'ENTERTA', 'HAVE', 'I', 'LIKE', 'WHAT', 'YOU', 'AREN', 'TO'] +1998-29454-0022-2179: ref=['WELL', "THAT'LL", 'SHOW', 'YOU', 'THE', 'SORT', 'OF', 'MAN', 'I', 'AM'] +1998-29454-0022-2179: hyp=['WELL', 'THAT', 'SHOW', 'YOU', 'A', 'SORT', 'OF', 'MEN', 'I', 'AM'] +1998-29454-0023-2180: ref=['THE', "MAN'S", 'MANNER', 'WAS', 'SO', 'KIND', 'AND', 'HEARTY', 'THE', 'WHOLE', 'ADVENTURE', 'WAS', 'SO', 'WONDERFUL', 'AND', 'NEW', 'IS', 'IT', 'COUNTRY', 'WHERE', 'YOU', 'GOING'] +1998-29454-0023-2180: hyp=['THE', "MAN'S", 'MANNER', 'WAS', 'SO', 'KIND', 'AND', 'HEARTY', 'THE', 'WHOLE', 'ADVENTUR', 'WAS', 'SO', 'WONDERFUL', 'AND', 'NEW', 'IS', 'IT', 'COUNTRY', 'WHERE', 'YOU', 'GOING'] +1998-29454-0024-2181: ref=['THE', 'SUN', 'SHOT', 'LONG', 'GOLDEN', 'BEAMS', 'THROUGH', 'THE', 'GAPS', 'IN', 'THE', 'HEDGE'] +1998-29454-0024-2181: hyp=['THE', 'SUN', 'HAD', 'LONG', 'GOLDEN', 'BEAMS', 'THROUGH', 'THE', 'GAPS', 'AND', 'THE', 'HEDGE'] +1998-29454-0025-2182: ref=['A', 'BIRD', 'PAUSED', 'IN', 'ITS', 'FLIGHT', 'ON', 'A', 'BRANCH', 'QUITE', 'CLOSE', 'AND', 'CLUNG', 'THERE', 'SWAYING'] +1998-29454-0025-2182: hyp=['A', 'BIRD', 'PASSED', 'IN', 'ITS', 'FLIGHT', 'ON', 'BRANCH', 'QUITE', 'CLOSE', 'AND', 'CLUNG', 'THEIR', 'SWAIN'] +1998-29454-0026-2183: ref=['HE', 'TOOK', 'OUT', 'OF', 'HIS', 'POCKET', 'A', 'NEW', 'ENVELOPE', 'A', 'NEW', 'SHEET', 'OF', 'PAPER', 'AND', 'A', 'NEW', 'PENCIL', 'READY', 'SHARPENED', 'BY', 'MACHINERY'] +1998-29454-0026-2183: hyp=['HE', 'TOOK', 'OUT', 'OF', 'HIS', 'POCKET', 'AND', 'YOUR', 'ENVELOPE', 'AND', 'YOU', 'SEED', 'OF', 'PAPER', 'AND', 'A', 'NEW', 'PENCIL', 'READY', 'SHARPENED', 'BY', 'MACHINERY'] +1998-29454-0027-2184: ref=['AN', 'I', 'ASKS', 'YOU', 'LET', 'ME', 'COME', 'ALONGER', 'YOU', 'GOT', 'THAT'] +1998-29454-0027-2184: hyp=['AND', 'I', 'ASK', 'YOU', 'LET', 'ME', 'COME', 'ALONG', 'OF', 'YOU', 'GOT', 'THAT'] +1998-29454-0028-2185: ref=['GET', 'IT', 'WROTE', 'DOWN', 'THEN', 'DONE'] +1998-29454-0028-2185: hyp=['GENISH', 'WROTE', 'DOWN', 'THEN', 'DONE'] +1998-29454-0029-2186: ref=['THEN', 'HE', 'FOLDED', 'IT', 'AND', 'PUT', 'IT', 'IN', 'HIS', 'POCKET'] +1998-29454-0029-2186: hyp=['THEN', 'HE', 'FOLDED', 'IT', 'AND', 'PUT', 'IT', 'IN', 'HIS', 'POCKET'] +1998-29454-0030-2187: ref=['NOW', "WE'RE", 'SQUARE', 'HE', 'SAID'] +1998-29454-0030-2187: hyp=['NOW', 'HE', 'IS', 'QUEER', 'HE', 'SAID'] +1998-29454-0031-2188: ref=['THEY', 'COULD', 'PUT', 'A', 'MAN', 'AWAY', 'FOR', 'LESS', 'THAN', 'THAT'] +1998-29454-0031-2188: hyp=['THEY', 'COULD', 'PUT', 'A', 'MEN', 'AWAY', 'FOR', 'LESS', 'THAN', 'THAT'] +1998-29454-0032-2189: ref=['I', 'SEE', 'THAT', 'THERE', 'IN', 'A', 'BOOK', 'SAID', 'DICKIE', 'CHARMED'] +1998-29454-0032-2189: hyp=['I', 'SEE', 'THAT', 'THEN', 'A', 'BOOK', 'SAID', 'DICK', 'HAD', 'CHARMED'] +1998-29454-0033-2190: ref=['HE', 'REWARD', 'THE', 'WAKE', 'THE', 'LAST', 'OF', 'THE', 'ENGLISH', 'AND', 'I', 'WUNNERED', 'WHAT', 'IT', 'STOOD', 'FOR'] +1998-29454-0033-2190: hyp=['HE', 'REWARD', 'THE', 'WAKE', 'THE', 'LAST', 'OF', 'THE', 'ENGLISH', 'AND', 'I', 'WANTED', 'WHAT', 'HAD', 'STOOD', 'FOR'] +1998-29454-0034-2191: ref=['WILD', 'ONES', "AIN'T", 'ALF', 'THE', 'SIZE', 'I', 'LAY'] +1998-29454-0034-2191: hyp=['WILD', 'ONES', 'AND', 'A', 'HALF', 'SIZE', 'I', 'LAY'] +1998-29454-0035-2192: ref=['ADVENTURES', 'I', 'SHOULD', 'THINK', 'SO'] +1998-29454-0035-2192: hyp=['ADVENTURES', 'I', 'SHOULD', 'THINK', 'SO'] +1998-29454-0036-2193: ref=['AH', 'SAID', 'DICKIE', 'AND', 'A', 'FULL', 'SILENCE', 'FELL', 'BETWEEN', 'THEM'] +1998-29454-0036-2193: hyp=['AH', 'SAID', 'DICKY', 'AND', 'A', 'FOOT', 'SILENCE', 'FELL', 'BETWEEN', 'THEM'] +1998-29454-0037-2194: ref=['THAT', 'WAS', 'CHARMING', 'BUT', 'IT', 'WAS', 'PLEASANT', 'TOO', 'TO', 'WASH', 'THE', 'MUD', 'OFF', 'ON', 'THE', 'WET', 'GRASS'] +1998-29454-0037-2194: hyp=['THAT', 'WAS', 'CHARMING', 'BUT', 'IT', 'WAS', 'PLEASANT', 'TOO', 'TO', 'WASH', 'THE', 'MADAM', 'ON', 'THE', 'WET', 'GRASS'] +1998-29454-0038-2195: ref=['DICKIE', 'ALWAYS', 'REMEMBERED', 'THAT', 'MOMENT'] +1998-29454-0038-2195: hyp=['DICKY', 'ALWAYS', 'REMEMBERED', 'THAT', 'MOMENT'] +1998-29454-0039-2196: ref=['SO', 'YOU', 'SHALL', 'SAID', 'MISTER', 'BEALE', 'A', "REG'LER", 'WASH', 'ALL', 'OVER', 'THIS', 'VERY', 'NIGHT', 'I', 'ALWAYS', 'LIKE', 'A', 'WASH', 'MESELF'] +1998-29454-0039-2196: hyp=['SO', 'YOU', 'SHALL', 'SAID', 'MISTER', 'BEALE', 'A', 'REGULAR', 'WASH', 'ALL', 'OVER', 'THIS', 'VERY', 'NIGHT', 'I', 'ALWAYS', 'LIKE', 'A', 'WASH', 'MESELF'] +1998-29454-0040-2197: ref=['SOME', 'BLOKES', 'THINK', 'IT', 'PAYS', 'TO', 'BE', 'DIRTY', 'BUT', 'IT', "DON'T"] +1998-29454-0040-2197: hyp=['SOME', 'LOAST', 'THINK', 'IT', 'PAYS', 'TO', 'BE', 'DIRTY', 'BUT', 'IT', "DON'T"] +1998-29454-0041-2198: ref=['IF', "YOU'RE", 'CLEAN', 'THEY', 'SAY', 'HONEST', 'POVERTY', 'AN', 'IF', "YOU'RE", 'DIRTY', 'THEY', 'SAY', 'SERVE', 'YOU', 'RIGHT'] +1998-29454-0041-2198: hyp=['IF', 'YOU', 'CLEAN', 'THEY', 'SAY', 'HONEST', 'POVERTY', 'AND', 'IF', "YOU'RE", 'DIRTY', 'THEY', 'SAY', 'SERVE', 'YOU', 'RIGHT'] +1998-29454-0042-2199: ref=['YOU', 'ARE', 'GOOD', 'SAID', 'DICKIE', 'I', 'DO', 'LIKE', 'YOU'] +1998-29454-0042-2199: hyp=['YOU', 'ARE', 'GOOD', 'SAID', 'DICKIE', 'I', 'DO', 'LIKE', 'YOU'] +1998-29454-0043-2200: ref=['I', 'KNOW', 'YOU', 'WILL', 'SAID', 'DICKIE', 'WITH', 'ENTHUSIASM', 'I', 'KNOW', 'OW', 'GOOD', 'YOU', 'ARE'] +1998-29454-0043-2200: hyp=['I', 'KNOW', 'YOU', 'WILL', 'SAID', 'DICKIE', 'WITH', 'ENTHUSIASM', 'I', 'KNOW', 'HOW', 'GOOD', 'YOU', 'ARE'] +1998-29454-0044-2201: ref=['BLESS', 'ME', 'SAID', 'MISTER', 'BEALE', 'UNCOMFORTABLY', 'WELL', 'THERE'] +1998-29454-0044-2201: hyp=['BLESS', 'ME', 'SAID', 'MISTER', 'BEALE', 'UNCOMFORTABLY', 'WELL', 'THEN'] +1998-29454-0045-2202: ref=['STEP', 'OUT', 'SONNY', 'OR', "WE'LL", 'NEVER', 'GET', 'THERE', 'THIS', 'SIDE', 'CHRISTMAS'] +1998-29454-0045-2202: hyp=['SABATANI', 'OR', "WE'LL", 'NEVER', 'GET', 'THERE', 'THIS', 'OUT', 'OF', 'CHRISTMAS'] +1998-29454-0046-2203: ref=['WELL', "YOU'LL", 'KNOW', 'ALL', 'ABOUT', 'IT', 'PRESENTLY'] +1998-29454-0046-2203: hyp=['WELL', 'YOU', 'KNOW', 'ALL', 'ABOUT', 'IT', 'PRESENTLY'] +1998-29455-0000-2232: ref=['THE', 'SINGING', 'AND', 'LAUGHING', 'WENT', 'ON', 'LONG', 'AFTER', 'HE', 'HAD', 'FALLEN', 'ASLEEP', 'AND', 'IF', 'LATER', 'IN', 'THE', 'EVENING', 'THERE', 'WERE', 'LOUD', 'VOICED', 'ARGUMENTS', 'OR', 'QUARRELS', 'EVEN', 'DICKIE', 'DID', 'NOT', 'HEAR', 'THEM'] +1998-29455-0000-2232: hyp=['THE', 'SINGING', 'AND', 'LAUGHING', 'WENT', 'ON', 'LONG', 'AFTER', 'HE', 'HAD', 'FALLEN', 'ASLEEP', 'AND', 'IF', 'LATE', 'IN', 'THE', 'EVENING', 'THEY', 'WERE', 'LOUD', 'FOREST', 'ARGUMENTS', 'OR', 'QUARRELS', 'EVEN', 'DICKY', 'DID', 'NOT', 'HEAR', 'THEM'] +1998-29455-0001-2233: ref=["WHAT'S", 'ALL', 'THAT', 'THERE', 'DICKIE', 'ASKED', 'POINTING', 'TO', 'THE', 'ODD', 'KNOBBLY', 'BUNDLES', 'OF', 'ALL', 'SORTS', 'AND', 'SHAPES', 'TIED', 'ON', 'TO', 'THE', "PERAMBULATOR'S", 'FRONT'] +1998-29455-0001-2233: hyp=["WHAT'S", 'ON', 'THAT', 'THERE', 'DICKIE', 'ASKED', 'POINTING', 'TO', 'THE', 'OTT', 'NOBBY', 'BUNDLES', 'OF', 'ALL', 'SORTS', 'AND', 'SHAPES', 'TIED', 'ON', 'TO', 'THE', "PERAMBULATOR'S", 'FRONT'] +1998-29455-0002-2234: ref=['TELL', 'YER', 'WHAT', 'MATE', 'LOOKS', 'TO', 'ME', 'AS', 'IF', "I'D", 'TOOK', 'A', 'FANCY', 'TO', 'YOU'] +1998-29455-0002-2234: hyp=['TELL', 'YOU', 'WHAT', 'MADE', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'I', 'TOOK', 'A', 'FANCY', 'TO', 'YOU'] +1998-29455-0003-2235: ref=['SWELP', 'ME', 'HE', 'SAID', 'HELPLESSLY'] +1998-29455-0003-2235: hyp=['SWAP', 'ME', 'HE', 'SAID', 'HELPLESSLY'] +1998-29455-0004-2236: ref=['OH', 'LOOK', 'SAID', 'DICKIE', 'THE', 'FLOWERS'] +1998-29455-0004-2236: hyp=['O', 'LOOK', 'SAID', 'DICKY', 'THE', 'FLOWERS'] +1998-29455-0005-2237: ref=["THEY'RE", 'ONLY', 'WEEDS', 'SAID', 'BEALE'] +1998-29455-0005-2237: hyp=['THEY', 'ARE', 'ONLY', 'REEDS', 'SAID', 'BEALE'] +1998-29455-0006-2238: ref=['BUT', 'I', 'SHALL', 'HAVE', 'THEM', 'WHILE', "THEY'RE", 'ALIVE', 'SAID', 'DICKIE', 'AS', 'HE', 'HAD', 'SAID', 'TO', 'THE', 'PAWNBROKER', 'ABOUT', 'THE', 'MOONFLOWERS'] +1998-29455-0006-2238: hyp=['BUT', 'I', 'SHALL', 'HAVE', 'THEM', 'WHERE', 'THEY', 'ARE', 'ALIVE', 'SAID', 'DICKY', 'AS', 'HE', 'HAD', 'SAID', 'TO', 'THE', 'PAWNBROKER', 'BY', 'THE', 'MOONFLOWERS'] +1998-29455-0007-2239: ref=['HI', 'THERE', 'GOES', 'A', 'RABBIT'] +1998-29455-0007-2239: hyp=['AY', 'THERE', 'GOES', 'A', 'RABBIT'] +1998-29455-0008-2240: ref=['SEE', 'IM', 'CROST', 'THE', 'ROAD', 'THERE', 'SEE', 'HIM'] +1998-29455-0008-2240: hyp=['SEEM', 'QUEST', 'ROAD', 'THERE', 'SEE', 'EM'] +1998-29455-0009-2241: ref=['HOW', 'BEAUTIFUL', 'SAID', 'DICKIE', 'WRIGGLING', 'WITH', 'DELIGHT'] +1998-29455-0009-2241: hyp=['HOW', 'BEAUTIFUL', 'SAID', 'DICKY', 'WRIGGLING', 'WIS', 'DELIGHT'] +1998-29455-0010-2242: ref=['THIS', 'LIFE', 'OF', 'THE', 'RABBIT', 'AS', 'DESCRIBED', 'BY', 'MISTER', 'BEALE', 'WAS', 'THE', "CHILD'S", 'FIRST', 'GLIMPSE', 'OF', 'FREEDOM', "I'D", 'LIKE', 'TO', 'BE', 'A', 'RABBIT'] +1998-29455-0010-2242: hyp=['THIS', 'LIFE', 'OF', 'THE', 'RABBIT', 'AS', 'DESCRIBED', 'BY', 'MISTER', 'BEALE', 'WAS', 'THE', "CHILD'S", 'FIRST', 'GLIMPSE', 'OF', 'FREEDOM', "I'D", 'LIKE', 'TO', 'BE', 'A', 'RABBIT'] +1998-29455-0011-2243: ref=["OW'M", 'I', 'TO', 'WHEEL', 'THE', 'BLOOMIN', 'PRAM', 'IF', 'YOU', 'GOES', 'ON', 'LIKE', 'AS', 'IF', 'YOU', 'WAS', 'A', 'BAG', 'OF', 'EELS'] +1998-29455-0011-2243: hyp=['ALL', 'MY', 'TOWER', 'THE', 'ROOM', 'AND', 'PRAM', 'IF', 'YOU', 'GO', 'SON', 'LIKE', 'US', 'IF', 'YOU', 'WAS', 'A', 'BICK', 'OF', 'FIELDS'] +1998-29455-0012-2244: ref=['I', 'LIKE', 'YOU', 'NEXTER', 'MY', 'OWN', 'DADDY', 'AND', 'MISTER', 'BAXTER', 'NEXT', 'DOOR'] +1998-29455-0012-2244: hyp=['I', 'LIKE', 'YOU', 'NEXT', 'TO', 'MY', 'OWN', 'DIRTY', 'AND', 'MISTER', 'BEXT', 'THE', 'NEXT', 'DOOR'] +1998-29455-0013-2245: ref=["THAT'S", 'ALL', 'RIGHT', 'SAID', 'MISTER', 'BEALE', 'AWKWARDLY'] +1998-29455-0013-2245: hyp=["THAT'S", 'ALL', 'RIGHT', 'SAID', 'MISTER', 'BEALE', 'AWKWARDLY'] +1998-29455-0014-2246: ref=['DICKIE', 'QUICK', 'TO', 'IMITATE', 'TOUCHED', 'HIS'] +1998-29455-0014-2246: hyp=['DICKIE', 'QUICKLY', 'IMITATE', 'TOUCHED', 'HIS'] +1998-29455-0015-2247: ref=['POOR', 'LITTLE', 'MAN', 'SAID', 'THE', 'LADY', 'YOU', 'MISS', 'YOUR', 'MOTHER', "DON'T", 'YOU'] +1998-29455-0015-2247: hyp=['POOR', 'LITTLE', 'MAN', 'SAID', 'THE', 'LADY', 'YOU', 'MISS', 'YOUR', 'MOTHER', "DON'T", 'YOU'] +1998-29455-0016-2248: ref=['OH', 'WELL', 'DONE', 'LITTLE', 'UN', 'SAID', 'MISTER', 'BEALE', 'TO', 'HIMSELF'] +1998-29455-0016-2248: hyp=['OH', 'WELL', 'DONE', 'LITTLE', 'ONE', 'SAID', 'MISTER', 'BEE', 'TO', 'HIMSELF'] +1998-29455-0017-2249: ref=['THE', 'TWO', 'TRAVELLERS', 'WERE', 'LEFT', 'FACING', 'EACH', 'OTHER', 'THE', 'RICHER', 'BY', 'A', 'PENNY', 'AND', 'OH', 'WONDERFUL', 'GOOD', 'FORTUNE', 'A', 'WHOLE', 'HALF', 'CROWN'] +1998-29455-0017-2249: hyp=['THE', 'TWO', 'TRAVELLERS', 'WERE', 'LEFT', 'FACING', 'EACH', 'OTHER', 'THE', 'RICHER', 'BY', 'A', 'PENNY', 'AND', 'O', 'WONDERFUL', 'GOOD', 'FORTUNE', 'A', 'WHOLE', 'HALF', 'CROWN'] +1998-29455-0018-2250: ref=['NO', 'I', 'NEVER', 'SAID', 'DICKIE', "ERE'S", 'THE', 'STEEVER'] +1998-29455-0018-2250: hyp=['NO', 'I', 'NEVER', 'SAID', 'DICKIE', 'YES', 'THE', 'STEPLE'] +1998-29455-0019-2251: ref=['YOU', 'STICK', 'TO', 'THAT', 'SAID', 'BEALE', 'RADIANT', 'WITH', 'DELIGHT', "YOU'RE", 'A', 'FAIR', 'MASTERPIECE', 'YOU', 'ARE', 'YOU', 'EARNED', 'IT', 'HONEST', 'IF', 'EVER', 'A', 'KID', 'DONE'] +1998-29455-0019-2251: hyp=['YOU', 'STICK', 'TO', 'THAT', 'SAID', 'BEER', 'RADIANT', 'WITH', 'DELIGHT', 'YOU', 'ARE', 'A', 'FAIR', 'MASTERPIECE', 'YOU', 'ARE', 'YOU', 'EARNED', 'IT', 'HONEST', 'IF', 'EVER', 'KIDNE'] +1998-29455-0020-2252: ref=['THEY', 'WENT', 'ON', 'UP', 'THE', 'HILL', 'AS', 'HAPPY', 'AS', 'ANY', 'ONE', 'NEED', 'WISH', 'TO', 'BE'] +1998-29455-0020-2252: hyp=['THEY', 'WENT', 'ON', 'UP', 'THE', 'HILL', 'AS', 'HAPPY', 'AS', 'ANY', 'ONE', 'NEED', 'WISH', 'TO', 'BE'] +1998-29455-0021-2253: ref=['PLEASE', 'DO', 'NOT', 'BE', 'TOO', 'SHOCKED'] +1998-29455-0021-2253: hyp=['PLEASE', "DON'T", 'BE', 'TOO', 'SHOCKED'] +1998-29455-0022-2254: ref=['REMEMBER', 'THAT', 'NEITHER', 'OF', 'THEM', 'KNEW', 'ANY', 'BETTER'] +1998-29455-0022-2254: hyp=['REMEMBER', 'THAT', 'NEITHER', 'OF', 'THEM', 'KNEW', 'ANY', 'BETTER'] +1998-29455-0023-2255: ref=['TO', 'THE', 'ELDER', 'TRAMP', 'LIES', 'AND', 'BEGGING', 'WERE', 'NATURAL', 'MEANS', 'OF', 'LIVELIHOOD'] +1998-29455-0023-2255: hyp=['TO', 'THE', 'OTHER', 'TRAMP', 'LIES', 'AND', 'PEGGING', 'WHEN', 'NATURAL', 'MEANS', 'OF', 'LIVELIHOOD'] +1998-29455-0024-2256: ref=['BUT', 'YOU', 'SAID', 'THE', 'BED', 'WITH', 'THE', 'GREEN', 'CURTAINS', 'URGED', 'DICKIE'] +1998-29455-0024-2256: hyp=['BUT', 'YOU', 'SAID', 'THE', 'BED', 'WAS', 'THE', 'GREEN', 'CURTAINS', 'URGED', 'DICKIE'] +1998-29455-0025-2257: ref=['WHICH', 'THIS', "AIN'T", 'NOT', 'BY', 'NO', 'MEANS'] +1998-29455-0025-2257: hyp=['WHICH', 'THIS', 'END', 'NOT', 'BY', 'NO', 'MEANS'] +1998-29455-0026-2258: ref=['THE', 'NIGHT', 'IS', 'FULL', 'OF', 'INTERESTING', 'LITTLE', 'SOUNDS', 'THAT', 'WILL', 'NOT', 'AT', 'FIRST', 'LET', 'YOU', 'SLEEP', 'THE', 'RUSTLE', 'OF', 'LITTLE', 'WILD', 'THINGS', 'IN', 'THE', 'HEDGES', 'THE', 'BARKING', 'OF', 'DOGS', 'IN', 'DISTANT', 'FARMS', 'THE', 'CHIRP', 'OF', 'CRICKETS', 'AND', 'THE', 'CROAKING', 'OF', 'FROGS'] +1998-29455-0026-2258: hyp=['THE', 'NIGHT', 'IS', 'FULL', 'OF', 'INTERESTING', 'LITTLE', 'SOUNDS', 'THAT', 'WILL', 'NOT', 'AT', 'FIRST', 'LET', 'YOU', 'SLEEP', 'THE', 'RUSTLE', 'OF', 'LITTLE', 'WHITE', 'THINGS', 'ON', 'THE', 'HATCHES', 'THE', 'BARKING', 'OF', 'DOGS', 'AND', 'DISTANT', 'FARMS', 'THE', 'CHIRP', 'OF', 'CRICKETS', 'AND', 'THE', 'CROAKING', 'OF', 'FROGS'] +1998-29455-0027-2259: ref=['THE', 'NEW', 'GAME', 'OF', 'BEGGING', 'AND', 'INVENTING', 'STORIES', 'TO', 'INTEREST', 'THE', 'PEOPLE', 'FROM', 'WHOM', 'IT', 'WAS', 'WORTH', 'WHILE', 'TO', 'BEG', 'WENT', 'ON', 'GAILY', 'DAY', 'BY', 'DAY', 'AND', 'WEEK', 'BY', 'WEEK', 'AND', 'DICKIE', 'BY', 'CONSTANT', 'PRACTICE', 'GREW', 'SO', 'CLEVER', 'AT', 'TAKING', 'HIS', 'PART', 'IN', 'THE', 'ACTING', 'THAT', 'MISTER', 'BEALE', 'WAS', 'QUITE', 'DAZED', 'WITH', 'ADMIRATION'] +1998-29455-0027-2259: hyp=['THE', 'NEW', 'GAME', 'OF', 'BEGGING', 'AND', 'INVENTING', 'STORIES', 'TO', 'INTEREST', 'THE', 'PEOPLE', 'FROM', 'WHOM', 'IT', 'WAS', 'WORSE', 'WIDE', 'TO', 'BEG', 'WENT', 'ON', 'GAILY', 'DAY', 'BY', 'DAY', 'AND', 'WEEK', 'BY', 'WEEK', 'AND', 'DICKIE', 'BY', 'CONSTANT', 'PRACTICE', 'GREW', 'SO', 'CLEVER', 'TAKING', 'HIS', 'PART', 'IN', 'THE', 'ACTING', 'THAT', 'MISTER', 'BEA', 'WAS', 'QUITE', 'DAZED', 'WITH', 'ADMIRATION'] +1998-29455-0028-2260: ref=['BLESSED', 'IF', 'I', 'EVER', 'SEE', 'SUCH', 'A', 'NIPPER', 'HE', 'SAID', 'OVER', 'AND', 'OVER', 'AGAIN'] +1998-29455-0028-2260: hyp=['BLESSED', 'FOR', 'EVER', 'SEE', 'SUCH', 'A', 'NIBBER', 'HE', 'SAID', 'OVER', 'AND', 'OVER', 'AGAIN'] +1998-29455-0029-2261: ref=['CLEVER', 'AS', 'A', 'TRAINDAWG', 'E', 'IS', 'AN', 'ALL', 'OUTER', 'IS', 'OWN', 'EAD'] +1998-29455-0029-2261: hyp=['CLEVER', 'AS', 'A', 'TRAIN', 'DOG', 'IS', 'IN', 'OR', "OUTER'S", 'OWNETTE'] +1998-29455-0030-2262: ref=['I', "AIN'T", 'SURE', 'AS', 'I', "ADN'T", 'BETTER', 'STICK', 'TO', 'THE', 'ROAD', 'AND', 'KEEP', 'AWAY', 'FROM', 'OLD', 'ANDS', 'LIKE', 'YOU', 'JIM'] +1998-29455-0030-2262: hyp=['I', 'AM', 'SURE', 'AS', 'I', "HADN'T", 'BETTER', 'STICK', 'TO', 'THE', 'ROAD', 'AND', 'KEEP', 'AWAY', 'FROM', 'OLD', 'ENDS', 'LIKE', 'EUGEUM'] +1998-29455-0031-2263: ref=['I', 'OPE', "E'S", 'CLEVER', 'ENOUGH', 'TO', 'DO', 'WOT', "E'S", 'TOLD', 'KEEP', 'IS', 'MUG', 'SHUT', "THAT'S", 'ALL'] +1998-29455-0031-2263: hyp=['I', 'OPEUS', 'LOVE', 'ENOUGH', 'TO', 'DO', 'WHAT', 'HE', 'STOOTE', 'HE', 'WAS', 'MUCH', 'AT', "THAT'S", 'ALL'] +1998-29455-0032-2264: ref=['IF', "E'S", 'STRAIGHT', "E'LL", 'DO', 'FOR', 'ME', 'AND', 'IF', 'HE', "AIN'T", "I'LL", 'DO', 'FOR', 'IM', 'SEE'] +1998-29455-0032-2264: hyp=['IF', 'HE', 'STRAIGHT', "YOU'LL", 'DO', 'FOR', 'ME', 'AND', 'HE', 'AND', "I'LL", 'DO', 'FOR', 'HIM', 'SEE'] +1998-29455-0033-2265: ref=['SEE', 'THAT', 'BLOKE', 'JUST', 'NOW', 'SAID', 'MISTER', 'BEALE', 'YUSS', 'SAID', 'DICKIE'] +1998-29455-0033-2265: hyp=['SEE', 'THAT', 'LOGIS', 'NOW', 'SAID', 'MISTER', 'BEALE', 'YES', 'SAID', 'DICKIE'] +1998-29455-0034-2266: ref=['WELL', 'YOU', 'NEVER', 'SEE', 'IM'] +1998-29455-0034-2266: hyp=['WELL', 'YOU', 'NEVER', 'SEE', 'HIM'] +1998-29455-0035-2267: ref=['IF', 'ANY', 'ONE', 'ARSTS', 'YOU', 'IF', 'YOU', 'EVER', 'SEE', 'IM', 'YOU', 'NEVER', 'SET', 'EYES', 'ON', 'IM', 'IN', 'ALL', 'YOUR', 'BORN', 'NOT', 'TO', 'REMEMBER', 'IM'] +1998-29455-0035-2267: hyp=['IF', 'ANY', 'ONE', 'ASKS', 'YOU', 'IF', 'YOU', 'EVER', 'SEE', 'HIM', 'YOU', 'NEVER', 'SAID', 'EYES', 'ON', 'HIM', 'IN', 'ALL', "YOU'RE", 'BORN', 'NOT', 'TO', 'REMEMBER'] +1998-29455-0036-2268: ref=['DICKIE', 'WAS', 'FULL', 'OF', 'QUESTIONS', 'BUT', 'MISTER', 'BEALE', 'HAD', 'NO', 'ANSWERS', 'FOR', 'THEM'] +1998-29455-0036-2268: hyp=['DICKY', 'WAS', 'FULL', 'OF', 'QUESTIONS', 'BUT', 'MISTER', 'BEAUT', 'NO', 'ANSWERS', 'WERE', 'THEM'] +1998-29455-0037-2269: ref=['NOR', 'WAS', 'IT', 'SUNDAY', 'ON', 'WHICH', 'THEY', 'TOOK', 'A', 'REST', 'AND', 'WASHED', 'THEIR', 'SHIRTS', 'ACCORDING', 'TO', 'MISTER', "BEALE'S", 'RULE', 'OF', 'LIFE'] +1998-29455-0037-2269: hyp=['NOR', 'WAS', 'IT', 'SUNDAY', 'ON', 'WHICH', 'THEY', 'TOOK', 'A', 'REST', 'AND', 'WASHED', 'THEIR', 'SHIRTS', 'ACCORDING', 'TO', 'MISTER', "BEALE'S", 'RULE', 'OF', 'LIFE'] +1998-29455-0038-2270: ref=['THEY', 'DID', 'NOT', 'STAY', 'THERE', 'BUT', 'WALKED', 'OUT', 'ACROSS', 'THE', 'DOWNS', 'WHERE', 'THE', 'SKYLARKS', 'WERE', 'SINGING', 'AND', 'ON', 'A', 'DIP', 'OF', 'THE', 'DOWNS', 'CAME', 'UPON', 'GREAT', 'STONE', 'WALLS', 'AND', 'TOWERS', 'VERY', 'STRONG', 'AND', 'GRAY'] +1998-29455-0038-2270: hyp=['THEY', 'DID', 'NOT', 'STAY', 'THERE', 'BUT', 'WALKED', 'OUT', 'ACROSS', 'THE', 'DOWNS', 'WITH', 'THE', 'SKYLACKS', 'WAS', 'SINGING', 'AND', 'ON', 'A', 'DIP', 'OF', 'THE', 'DOWNS', 'CAME', 'UPON', 'GREAT', 'STONE', 'WARDS', 'AND', 'TOWERS', 'VERY', 'STRONG', 'AND', 'GRAY'] +1998-29455-0039-2271: ref=["WHAT'S", 'THAT', 'THERE', 'SAID', 'DICKIE'] +1998-29455-0039-2271: hyp=["WHAT'S", 'THAT', 'THERE', 'SAID', 'DICKY'] +2033-164914-0000-661: ref=['REPLIED', 'HE', 'OF', 'A', 'TRUTH', 'I', 'HEARD', 'HIM', 'NOT', 'AND', 'I', 'WOT', 'HIM', 'NOT', 'AND', 'FOLKS', 'ARE', 'ALL', 'SLEEPING'] +2033-164914-0000-661: hyp=['REPLIED', 'HE', 'OF', 'A', 'TRUTH', 'I', 'HEARD', 'HIM', 'NOT', 'AND', 'I', 'WOT', 'HIM', 'NOT', 'AND', 'FOLKS', 'ARE', 'ALL', 'SLEEPING'] +2033-164914-0001-662: ref=['BUT', 'SHE', 'SAID', 'WHOMSOEVER', 'THOU', 'SEEST', 'AWAKE', 'HE', 'IS', 'THE', 'RECITER'] +2033-164914-0001-662: hyp=['BUT', 'SHE', 'SAID', 'WHOMSOEVER', 'THOU', 'SEEST', 'AWAKE', 'HE', 'IS', 'THE', 'RESIDER'] +2033-164914-0002-663: ref=['THEN', 'SAID', 'THE', 'EUNUCH', 'ART', 'THOU', 'HE', 'WHO', 'REPEATED', 'POETRY', 'BUT', 'NOW', 'AND', 'MY', 'LADY', 'HEARD', 'HIM'] +2033-164914-0002-663: hyp=['THEN', 'SAID', 'THE', 'EUNUCH', 'ART', 'THOU', 'HE', 'WHO', 'REPEATED', 'POETRY', 'BUT', 'NOW', 'AND', 'MY', 'LADY', 'HEARD', 'HIM'] +2033-164914-0003-664: ref=['REJOINED', 'THE', 'EUNUCH', 'WHO', 'THEN', 'WAS', 'THE', 'RECITER', 'POINT', 'HIM', 'OUT', 'TO', 'ME'] +2033-164914-0003-664: hyp=['REJOINED', 'THE', 'EUNUCH', 'WHO', 'THEN', 'WAS', 'THE', 'RECITER', 'POINT', 'HIM', 'OUT', 'TO', 'ME'] +2033-164914-0004-665: ref=['BY', 'ALLAH', 'REPLIED', 'THE', 'FIREMAN', 'I', 'TELL', 'THEE', 'THE', 'TRUTH'] +2033-164914-0004-665: hyp=['BY', 'ALLAH', 'REPLIED', 'THE', 'FIREMAN', 'I', 'TELL', 'THEE', 'THE', 'TRUTH'] +2033-164914-0005-666: ref=['TELL', 'ME', 'WHAT', 'HAPPENED', 'QUOTH', 'ZAU', 'AL', 'MAKAN'] +2033-164914-0005-666: hyp=['TELL', 'ME', 'WHAT', 'HAPPENED', 'QUOTH', 'OWL', 'MAKAN'] +2033-164914-0006-667: ref=['WHAT', 'AILS', 'THEE', 'THEN', 'THAT', 'THOU', 'MUST', 'NEEDS', 'RECITE', 'VERSES', 'SEEING', 'THAT', 'WE', 'ARE', 'TIRED', 'OUT', 'WITH', 'WALKING', 'AND', 'WATCHING', 'AND', 'ALL', 'THE', 'FOLK', 'ARE', 'ASLEEP', 'FOR', 'THEY', 'REQUIRE', 'SLEEP', 'TO', 'REST', 'THEM', 'OF', 'THEIR', 'FATIGUE'] +2033-164914-0006-667: hyp=['WHAT', 'ELSE', 'THEE', 'THEN', 'THAT', 'THOU', 'MUST', 'NEEDS', 'RESIDE', 'VERSES', 'SEEING', 'THAT', 'WE', 'ARE', 'TIRED', 'OUT', 'WITH', 'WALKING', 'AND', 'WATCHING', 'AND', 'ALL', 'THE', 'FOLK', 'ARE', 'ASLEEP', 'FOR', 'THEY', 'REQUIRE', 'SLEEP', 'TO', 'REST', 'THEM', 'OF', 'THEIR', 'FATIGUE'] +2033-164914-0007-668: ref=['AND', 'HE', 'ALSO', 'IMPROVISED', 'THE', 'TWO', 'FOLLOWING', 'DISTICHS'] +2033-164914-0007-668: hyp=['AND', 'HE', 'ALSO', 'PROVISED', 'THE', 'TWO', 'FOLLOWING', 'DISTICHS'] +2033-164914-0008-669: ref=['WHEN', 'NUZHAT', 'AL', 'ZAMAN', 'HEARD', 'THE', 'FIRST', 'IMPROVISATION', 'SHE', 'CALLED', 'TO', 'MIND', 'HER', 'FATHER', 'AND', 'HER', 'MOTHER', 'AND', 'HER', 'BROTHER', 'AND', 'THEIR', 'WHILOME', 'HOME', 'THEN', 'SHE', 'WEPT', 'AND', 'CRIED', 'AT', 'THE', 'EUNUCH', 'AND', 'SAID', 'TO', 'HIM', 'WOE', 'TO', 'THEE'] +2033-164914-0008-669: hyp=['WHEN', 'UZHAT', 'AL', 'ZAMAN', 'HEARD', 'THE', 'FIRST', 'IMPROVISATION', 'SHE', 'CALLED', 'TO', 'MINE', 'HER', 'FATHER', 'AND', 'HER', 'MOTHER', 'AND', 'HER', 'BROTHER', 'AND', 'THEIR', 'WILL', 'ON', 'HOME', 'THEN', 'SHE', 'WEPT', 'AND', 'CRIED', 'TO', 'THE', 'EUNUCH', 'AND', 'SAID', 'TO', 'HIM', 'WOE', 'TO', 'THEE'] +2033-164914-0009-670: ref=['HE', 'WHO', 'RECITED', 'THE', 'FIRST', 'TIME', 'HATH', 'RECITED', 'A', 'SECOND', 'TIME', 'AND', 'I', 'HEARD', 'HIM', 'HARD', 'BY'] +2033-164914-0009-670: hyp=['HE', 'WHO', 'RECITED', 'THE', 'FIRST', 'TIME', 'HAD', 'RECITED', 'A', 'SECOND', 'TIME', 'AND', 'I', 'HEARD', 'HIM', 'HARD', 'BY'] +2033-164914-0010-671: ref=['BY', 'ALLAH', 'AN', 'THOU', 'FETCH', 'HIM', 'NOT', 'TO', 'ME', 'I', 'WILL', 'ASSUREDLY', 'ROUSE', 'THE', 'CHAMBERLAIN', 'ON', 'THEE', 'AND', 'HE', 'SHALL', 'BEAT', 'THEE', 'AND', 'CAST', 'THEE', 'OUT'] +2033-164914-0010-671: hyp=['BY', 'ALLAH', 'AN', 'THOU', 'FETCH', 'HIM', 'NOT', 'TO', 'ME', 'I', 'WILL', 'ASSUREDLY', 'ROUSE', 'THE', 'CHAMBERLAIN', 'ON', 'THEE', 'AND', 'HE', 'SHALL', 'BEAT', 'THEE', 'AND', 'CAST', 'THEE', 'OUT'] +2033-164914-0011-672: ref=['BUT', 'TAKE', 'THESE', 'HUNDRED', 'DINERS', 'AND', 'GIVE', 'THEM', 'TO', 'THE', 'SINGER', 'AND', 'BRING', 'HIM', 'TO', 'ME', 'GENTLY', 'AND', 'DO', 'HIM', 'NO', 'HURT'] +2033-164914-0011-672: hyp=['BUT', 'TAKE', 'THESE', 'HUNDRED', 'DINNERS', 'AND', 'GIVE', 'THEM', 'TO', 'THE', 'SINGER', 'AND', 'BRING', 'HIM', 'TO', 'ME', 'GENTLY', 'AND', 'DO', 'HIM', 'NO', 'HURT'] +2033-164914-0012-673: ref=['RETURN', 'QUICKLY', 'AND', 'LINGER', 'NOT'] +2033-164914-0012-673: hyp=['RETURN', 'QUICKLY', 'AND', 'LINGER', 'NOT'] +2033-164914-0013-674: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'THIRD', 'NIGHT'] +2033-164914-0013-674: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'THIRD', 'NIGHT'] +2033-164914-0014-675: ref=['BUT', 'THE', 'EUNUCH', 'SAID', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'TILL', 'THOU', 'SHOW', 'ME', 'WHO', 'IT', 'WAS', 'THAT', 'RECITED', 'THE', 'VERSES', 'FOR', 'I', 'DREAD', 'RETURNING', 'TO', 'MY', 'LADY', 'WITHOUT', 'HIM'] +2033-164914-0014-675: hyp=['BUT', 'THE', 'EUNUCH', 'SAID', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'TILL', 'THOU', 'SHOW', 'ME', 'WHO', 'IT', 'WAS', 'THAT', 'RECITED', 'THE', 'VERSES', 'FOR', 'I', 'DREAD', 'RETURNING', 'TO', 'MY', 'LADY', 'WITHOUT', 'HIM'] +2033-164914-0015-676: ref=['NOW', 'WHEN', 'THE', 'FIREMAN', 'HEARD', 'THESE', 'WORDS', 'HE', 'FEARED', 'FOR', 'ZAU', 'AL', 'MAKAN', 'AND', 'WEPT', 'WITH', 'EXCEEDING', 'WEEPING', 'AND', 'SAID', 'TO', 'THE', 'EUNUCH', 'BY', 'ALLAH', 'IT', 'WAS', 'NOT', 'I', 'AND', 'I', 'KNOW', 'HIM', 'NOT'] +2033-164914-0015-676: hyp=['NOW', 'WHEN', 'THE', 'FIREMAN', 'HEARD', 'THESE', 'WORDS', 'HE', 'FEARED', 'FOR', 'ZOAMA', 'KHAN', 'AND', 'WEPT', 'WITH', 'EXCEEDING', 'WEEPING', 'AND', 'SAID', 'TO', 'THE', 'EUNUCH', 'BY', 'ALLAH', 'IT', 'WAS', 'NOT', 'I', 'AND', 'I', 'KNOW', 'HIM', 'NOT'] +2033-164914-0016-677: ref=['SO', 'GO', 'THOU', 'TO', 'THY', 'STATION', 'AND', 'IF', 'THOU', 'AGAIN', 'MEET', 'ANY', 'ONE', 'AFTER', 'THIS', 'HOUR', 'RECITING', 'AUGHT', 'OF', 'POETRY', 'WHETHER', 'HE', 'BE', 'NEAR', 'OR', 'FAR', 'IT', 'WILL', 'BE', 'I', 'OR', 'SOME', 'ONE', 'I', 'KNOW', 'AND', 'THOU', 'SHALT', 'NOT', 'LEARN', 'OF', 'HIM', 'BUT', 'BY', 'ME'] +2033-164914-0016-677: hyp=['SO', 'GO', 'THOU', 'TO', 'THY', 'STATION', 'AND', 'IF', 'THOU', 'AGAIN', 'MEET', 'ANY', 'ONE', 'AFTER', 'THIS', 'HOUR', 'RECITING', 'AUGHT', 'OF', 'POETRY', 'WHETHER', 'HE', 'BE', 'NEAR', 'OR', 'FAR', 'IT', 'WILL', 'BE', 'I', 'OR', 'SOME', 'ONE', 'I', 'KNOW', 'AND', 'THOU', 'SHALT', 'NOT', 'LEARN', 'OF', 'HIM', 'BUT', 'BY', 'ME'] +2033-164914-0017-678: ref=['THEN', 'HE', 'KISSED', 'THE', "EUNUCH'S", 'HEAD', 'AND', 'SPAKE', 'HIM', 'FAIR', 'TILL', 'HE', 'WENT', 'AWAY', 'BUT', 'THE', 'CASTRATO', 'FETCHED', 'A', 'ROUND', 'AND', 'RETURNING', 'SECRETLY', 'CAME', 'AND', 'STOOD', 'BEHIND', 'THE', 'FIREMAN', 'FEARING', 'TO', 'GO', 'BACK', 'TO', 'HIS', 'MISTRESS', 'WITHOUT', 'TIDINGS'] +2033-164914-0017-678: hyp=['THEN', 'HE', 'KISSED', 'THE', "EUNUCH'S", 'HEAD', 'AND', 'SPAKE', 'HIM', 'FAIR', 'TILL', 'HE', 'WENT', 'AWAY', 'BUT', 'THE', 'GASTRATO', 'FETCHED', 'AROUND', 'AND', 'RETURNING', 'SECRETLY', 'CAME', 'AND', 'STOOD', 'BEHIND', 'THE', 'FIREMAN', 'FEARING', 'TO', 'GO', 'BACK', 'TO', 'HIS', 'MISTRESS', 'WITHOUT', 'TIDINGS'] +2033-164914-0018-679: ref=['I', 'SAY', 'WHAT', 'MADE', 'MY', 'IGNOMY', "WHATE'ER", 'THE', 'BITTER', 'CUP', 'I', 'DRAIN', 'FAR', 'BE', 'FRO', 'ME', 'THAT', 'LAND', 'TO', 'FLEE', 'NOR', 'WILL', 'I', 'BOW', 'TO', 'THOSE', 'WHO', 'BLAME', 'AND', 'FOR', 'SUCH', 'LOVE', 'WOULD', 'DEAL', 'ME', 'SHAME'] +2033-164914-0018-679: hyp=['I', 'SAY', 'WHAT', 'MADE', 'MY', 'CHOMI', 'WHATEVER', 'THE', 'BEACHER', 'CUP', 'I', 'DRAIN', 'FAR', 'BE', 'FROM', 'ME', 'THY', 'LAND', 'TO', 'FLEE', 'NOR', 'WILL', 'I', 'BOW', 'TO', 'THOSE', 'WHO', 'BLAME', 'AND', 'FOR', 'SUCH', 'LOVE', 'WOULD', 'DEAL', 'ME', 'SHAME'] +2033-164914-0019-680: ref=['THEN', 'SAID', 'THE', 'EUNUCH', 'TO', 'ZAU', 'AL', 'MAKAN', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'LORD'] +2033-164914-0019-680: hyp=['THEN', 'SAID', 'THE', 'EUNUCH', 'TO', 'ZA', 'MAKAN', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'LORD'] +2033-164914-0020-681: ref=['O', 'MY', 'LORD', 'CONTINUED', 'THE', 'EUNUCH', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164914-0020-681: hyp=['O', 'MY', 'LORD', 'CONTINUED', 'THE', 'EUNUCH', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THAT', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164914-0021-682: ref=['WE', 'WILL', 'DO', 'THEE', 'NO', 'UPRIGHT', 'O', 'MY', 'SON', 'NOR', 'WRONG', 'THEE', 'IN', 'AUGHT', 'BUT', 'OUR', 'OBJECT', 'IS', 'THAT', 'THOU', 'BEND', 'THY', 'GRACIOUS', 'STEPS', 'WITH', 'ME', 'TO', 'MY', 'MISTRESS', 'TO', 'RECEIVE', 'HER', 'ANSWER', 'AND', 'RETURN', 'IN', 'WEAL', 'AND', 'SAFETY', 'AND', 'THOU', 'SHALT', 'HAVE', 'A', 'HANDSOME', 'PRESENT', 'AS', 'ONE', 'WHO', 'BRINGETH', 'GOOD', 'NEWS'] +2033-164914-0021-682: hyp=['WE', 'WILL', 'DO', 'THEE', 'NO', 'UPRIGHT', 'O', 'MY', 'SON', 'NOR', 'WRONG', 'THEE', 'IN', 'AUGHT', 'BUT', 'OUR', 'OBJECT', 'IS', 'THAT', 'THOU', 'BEND', 'THY', 'GRACIOUS', 'STEPS', 'WITH', 'ME', 'TO', 'MY', 'MISTRESS', 'TO', 'RECEIVE', 'HER', 'ANSWER', 'AND', 'RETURNING', 'WHEEL', 'AND', 'SAFETY', 'AND', 'THOU', 'SHALT', 'HAVE', 'A', 'HANDSOME', 'PRESENT', 'AS', 'ONE', 'WHO', 'BRINGETH', 'GOOD', 'NEWS'] +2033-164914-0022-683: ref=['THEN', 'THE', 'EUNUCH', 'WENT', 'OUT', 'TO', 'ZAU', 'AL', 'MAKAN', 'AND', 'SAID', 'TO', 'HIM', 'RECITE', 'WHAT', 'VERSES', 'THOU', 'KNOWEST', 'FOR', 'MY', 'LADY', 'IS', 'HERE', 'HARD', 'BY', 'LISTENING', 'TO', 'THEE', 'AND', 'AFTER', 'I', 'WILL', 'ASK', 'THEE', 'OF', 'THY', 'NAME', 'AND', 'THY', 'NATIVE', 'COUNTRY', 'AND', 'THY', 'CONDITION'] +2033-164914-0022-683: hyp=['THEN', 'THE', 'EUNUCH', 'WENT', 'OUT', 'TO', 'ZAO', 'MAKAN', 'AND', 'SAID', 'TO', 'HIM', 'RECITE', 'WHAT', 'VERSE', 'IS', 'THOU', 'KNOWEST', 'FOR', 'MY', "LADY'S", 'HEAR', 'HARD', 'BY', 'LISTENING', 'TO', 'THEE', 'AND', 'AFTER', 'I', 'WILL', 'ASK', 'THEE', 'OF', 'THY', 'NAME', 'AND', 'THINE', 'NATIVE', 'COUNTRY', 'AND', 'THY', 'CONDITION'] +2033-164915-0000-643: ref=['AND', 'ALSO', 'THESE'] +2033-164915-0000-643: hyp=['AND', 'ALSO', 'THESE'] +2033-164915-0001-644: ref=['THEN', 'SHE', 'THREW', 'HERSELF', 'UPON', 'HIM', 'AND', 'HE', 'GATHERED', 'HER', 'TO', 'HIS', 'BOSOM', 'AND', 'THE', 'TWAIN', 'FELL', 'DOWN', 'IN', 'A', 'FAINTING', 'FIT'] +2033-164915-0001-644: hyp=['THEN', 'SHE', 'THREW', 'HERSELF', 'UPON', 'HIM', 'AND', 'HE', 'GATHERED', 'HER', 'TO', 'HIS', 'BOSOM', 'AND', 'ITALY', 'FELL', 'DOWN', 'IN', 'A', 'FAINTING', 'FIT'] +2033-164915-0002-645: ref=['WHEN', 'THE', 'EUNUCH', 'SAW', 'THIS', 'CASE', 'HE', 'WONDERED', 'AT', 'THEM', 'AND', 'THROWING', 'OVER', 'THEM', 'SOMEWHAT', 'TO', 'COVER', 'THEM', 'WAITED', 'TILL', 'THEY', 'SHOULD', 'RECOVER'] +2033-164915-0002-645: hyp=['WHEN', 'THE', 'EUNUCHS', 'SAW', 'THESE', 'CASE', 'HE', 'WONDERED', 'AT', 'THEM', 'AND', 'THROWING', 'OVER', 'THEM', 'SOMEWHAT', 'TO', 'COVER', 'THEM', 'WAITED', 'TILL', 'THEY', 'SHOULD', 'RECOVER'] +2033-164915-0003-646: ref=['AFTER', 'A', 'WHILE', 'THEY', 'CAME', 'TO', 'THEMSELVES', 'AND', 'NUZHAT', 'AL', 'ZAMAN', 'REJOICED', 'WITH', 'EXCEEDING', 'JOY', 'OPPRESSION', 'AND', 'DEPRESSION', 'LEFT', 'HER', 'AND', 'GLADNESS', 'TOOK', 'THE', 'MASTERY', 'OF', 'HER', 'AND', 'SHE', 'REPEATED', 'THESE', 'VERSES'] +2033-164915-0003-646: hyp=['AFTER', 'A', 'WHILE', 'THEY', 'CAME', 'TO', 'THEMSELVES', 'AND', 'USHART', 'AL', 'ZAMAN', 'REJOICED', 'WITH', 'EXCEEDING', 'JOY', 'OPPRESSION', 'AND', 'DEPRESSION', 'LAUGHTER', 'AND', 'GLADNESS', 'TOOK', 'THE', 'MASTERY', 'OF', 'HER', 'AND', 'SHE', 'REPEATED', 'THESE', 'VERSES'] +2033-164915-0004-647: ref=['ACCORDINGLY', 'SHE', 'TOLD', 'HIM', 'ALL', 'THAT', 'HAD', 'COME', 'TO', 'HER', 'SINCE', 'THEIR', 'SEPARATION', 'AT', 'THE', 'KHAN', 'AND', 'WHAT', 'HAD', 'HAPPENED', 'TO', 'HER', 'WITH', 'THE', 'BADAWI', 'HOW', 'THE', 'MERCHANT', 'HAD', 'BOUGHT', 'HER', 'OF', 'HIM', 'AND', 'HAD', 'TAKEN', 'HER', 'TO', 'HER', 'BROTHER', 'SHARRKAN', 'AND', 'HAD', 'SOLD', 'HER', 'TO', 'HIM', 'HOW', 'HE', 'HAD', 'FREED', 'HER', 'AT', 'THE', 'TIME', 'OF', 'BUYING', 'HOW', 'HE', 'HAD', 'MADE', 'A', 'MARRIAGE', 'CONTRACT', 'WITH', 'HER', 'AND', 'HAD', 'GONE', 'IN', 'TO', 'HER', 'AND', 'HOW', 'THE', 'KING', 'THEIR', 'SIRE', 'HAD', 'SENT', 'AND', 'ASKED', 'FOR', 'HER', 'FROM', 'SHARRKAN'] +2033-164915-0004-647: hyp=['ACCORDINGLY', 'SHE', 'TOLD', 'HIM', 'ALL', 'THAT', 'HAD', 'COME', 'TO', 'HER', 'SINCE', 'THEIR', 'SEPARATION', 'AT', 'THE', 'KHAN', 'AND', 'WHAT', 'HAD', 'HAPPENED', 'TO', 'HER', 'WITH', 'THE', 'BADAWI', 'HOW', 'THE', 'MERCHANT', 'HAD', 'BOUGHT', 'HER', 'OF', 'HIM', 'AND', 'HAD', 'TAKEN', 'HER', 'TO', 'HER', 'BROTHER', 'SHARKAN', 'AND', 'HAD', 'SOLD', 'HER', 'TO', 'HIM', 'HOW', 'HE', 'HAD', 'FREED', 'HER', 'AT', 'THE', 'TIME', 'OF', 'BUYING', 'HOW', 'HE', 'HAD', 'MADE', 'HER', 'MARRIAGE', 'CONTRACT', 'WITH', 'HER', 'AND', 'HAD', 'GONE', 'IN', 'TO', 'HER', 'AND', 'HOW', 'THE', 'KING', 'THEIR', 'SIRE', 'HAD', 'SENT', 'AND', 'ASKED', 'FOR', 'HER', 'FROM', 'SHARKAN'] +2033-164915-0005-648: ref=['BUT', 'NOW', 'GO', 'TO', 'THY', 'MASTER', 'AND', 'BRING', 'HIM', 'QUICKLY', 'TO', 'ME'] +2033-164915-0005-648: hyp=['BUT', 'NOW', 'GO', 'TO', 'THY', 'MASTER', 'AND', 'BRING', 'HIM', 'QUICKLY', 'TO', 'ME'] +2033-164915-0006-649: ref=['THE', 'CHAMBERLAIN', 'CALLED', 'THE', 'CASTRATO', 'AND', 'CHARGED', 'HIM', 'TO', 'DO', 'ACCORDINGLY', 'SO', 'HE', 'REPLIED', 'I', 'HEAR', 'AND', 'I', 'OBEY', 'AND', 'HE', 'TOOK', 'HIS', 'PAGES', 'WITH', 'HIM', 'AND', 'WENT', 'OUT', 'IN', 'SEARCH', 'OF', 'THE', 'STOKER', 'TILL', 'HE', 'FOUND', 'HIM', 'IN', 'THE', 'REAR', 'OF', 'THE', 'CARAVAN', 'GIRTHING', 'HIS', 'ASS', 'AND', 'PREPARING', 'FOR', 'FLIGHT'] +2033-164915-0006-649: hyp=['THE', 'CHAMBERLAIN', 'CALLED', 'THE', 'CASTRATO', 'AND', 'CHARGED', 'HIM', 'TO', 'DO', 'ACCORDINGLY', 'SO', 'HE', 'REPLIED', 'I', 'HEAR', 'AND', 'I', 'OBEY', 'AND', 'HE', 'TOOK', 'HIS', 'PAGES', 'WITH', 'HIM', 'AND', 'WENT', 'OUT', 'IN', 'SEARCH', 'OF', 'THE', 'STOCKER', 'TILL', 'HE', 'FOUND', 'HIM', 'IN', 'THE', 'REAR', 'OF', 'THE', 'CARAVAN', 'GIRDING', 'HIS', 'ASS', 'AND', 'PREPARING', 'FOR', 'FLIGHT'] +2033-164915-0007-650: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'THE', 'STOKER', 'GIRTHED', 'HIS', 'ASS', 'FOR', 'FLIGHT', 'AND', 'BESPAKE', 'HIMSELF', 'SAYING', 'OH', 'WOULD', 'I', 'KNEW', 'WHAT', 'IS', 'BECOME', 'OF', 'HIM'] +2033-164915-0007-650: hyp=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'THE', 'STOCKER', 'GIRDED', 'HIS', 'EYES', 'FOR', 'FLIGHT', 'AND', 'BESPAKE', 'HIMSELF', 'SAYING', 'O', 'WOULD', 'I', 'KNEW', 'WHAT', 'IS', 'BECOME', 'OF', 'HIM'] +2033-164915-0008-651: ref=['I', 'BELIEVE', 'HE', 'HATH', 'DENOUNCED', 'ME', 'TO', 'THE', 'EUNUCH', 'HENCE', 'THESE', 'PAGES', 'ET', 'ABOUT', 'ME', 'AND', 'HE', 'HATH', 'MADE', 'ME', 'AN', 'ACCOMPLICE', 'IN', 'HIS', 'CRIME'] +2033-164915-0008-651: hyp=['I', 'BELIEVE', 'HE', 'HATH', 'DENOUNCED', 'ME', 'TO', 'THE', 'EUNUCH', 'HENCE', 'THESE', 'PAGES', 'AT', 'ABOUT', 'ME', 'AND', 'HE', 'HATH', 'MADE', 'ME', 'AN', 'ACCOMPLICE', 'IN', 'HIS', 'CRIME'] +2033-164915-0009-652: ref=['WHY', 'DIDST', 'THOU', 'SAY', 'I', 'NEVER', 'REPEATED', 'THESE', 'COUPLETS', 'NOR', 'DO', 'I', 'KNOW', 'WHO', 'REPEATED', 'THEM', 'WHEN', 'IT', 'WAS', 'THY', 'COMPANION'] +2033-164915-0009-652: hyp=['WHY', 'DIDST', 'THOU', 'SAY', 'I', 'NEVER', 'REPEATED', 'THESE', 'COUPLETS', 'NOR', 'DO', 'I', 'KNOW', 'WHO', 'REPEATED', 'THEM', 'WHEN', 'IT', 'WAS', 'THY', 'COMPANION'] +2033-164915-0010-653: ref=['BUT', 'NOW', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'BETWEEN', 'THIS', 'PLACE', 'AND', 'BAGHDAD', 'AND', 'WHAT', 'BETIDETH', 'THY', 'COMRADE', 'SHALL', 'BETIDE', 'THEE'] +2033-164915-0010-653: hyp=['BUT', 'NOW', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'BETWEEN', 'THIS', 'PLACE', 'AND', 'BAGDAD', 'AND', 'WHAT', 'BETIDETH', 'THY', 'COMRADE', 'SHALL', 'BETIDE', 'THEE'] +2033-164915-0011-654: ref=['TWAS', 'AS', 'I', 'FEARED', 'THE', 'COMING', 'ILLS', 'DISCERNING', 'BUT', 'UNTO', 'ALLAH', 'WE', 'ARE', 'ALL', 'RETURNING'] +2033-164915-0011-654: hyp=['TOWARDS', 'AS', 'I', 'FEARED', 'THE', 'CARMINALS', 'DISCERNING', 'BUT', 'ON', 'TO', 'ALLAH', 'WE', 'ARE', 'ALL', 'RETURNING'] +2033-164915-0012-655: ref=['THEN', 'THE', 'EUNUCH', 'CRIED', 'UPON', 'THE', 'PAGES', 'SAYING', 'TAKE', 'HIM', 'OFF', 'THE', 'ASS'] +2033-164915-0012-655: hyp=['THEN', 'THE', 'EUNUCH', 'CRIED', 'UPON', 'IN', 'THE', 'PAGES', 'SAYING', 'TAKE', 'HIM', 'OFF', 'THE', 'ASS'] +2033-164915-0013-656: ref=['AND', 'HE', 'ANSWERED', 'I', 'AM', 'THE', 'CHAMBERLAIN', 'OF', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'KING', 'SHARRKAN', 'SON', 'OF', 'OMAR', 'BIN', 'AL', "NU'UMAN", 'LORD', 'OF', 'BAGHDAD', 'AND', 'OF', 'THE', 'LAND', 'OF', 'KHORASAN', 'AND', 'I', 'BRING', 'TRIBUTE', 'AND', 'PRESENTS', 'FROM', 'HIM', 'TO', 'HIS', 'FATHER', 'IN', 'BAGHDAD'] +2033-164915-0013-656: hyp=['AND', 'HE', 'ANSWERED', 'I', 'AM', 'THE', 'CHAMBERLAIN', 'OF', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'KING', 'SHARKAN', 'SUNG', 'OVER', 'MARBIN', 'AL', 'NUMA', 'LORD', 'OF', 'WABDAD', 'AND', 'OF', 'THE', 'LAND', 'OF', 'KHORASAN', 'AND', 'I', 'BRING', 'TRIBUTE', 'AND', 'PRESENTS', 'FROM', 'HIM', 'TO', 'HIS', 'FATHER', 'IN', 'BAGHDAD'] +2033-164915-0014-657: ref=['SO', 'FARE', 'YE', 'FORWARDS', 'NO', 'HARM', 'SHALL', 'BEFAL', 'YOU', 'TILL', 'YOU', 'JOIN', 'HIS', 'GRAND', 'WAZIR', 'DANDAN'] +2033-164915-0014-657: hyp=['SOPHIA', 'FORWARDS', 'NO', 'HARM', 'SHALL', 'BEFALL', 'YOU', 'TILL', 'YOU', 'JOIN', 'HIS', 'GRAND', 'WAZIR', 'THAN', 'DAN'] +2033-164915-0015-658: ref=['THEN', 'HE', 'BADE', 'HIM', 'BE', 'SEATED', 'AND', 'QUESTIONED', 'HIM', 'AND', 'HE', 'REPLIED', 'THAT', 'HE', 'WAS', 'CHAMBERLAIN', 'TO', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'AND', 'WAS', 'BOUND', 'TO', 'KING', 'OMAR', 'WITH', 'PRESENTS', 'AND', 'THE', 'TRIBUTE', 'OF', 'SYRIA'] +2033-164915-0015-658: hyp=['THEN', 'HE', 'BADE', 'HIM', 'BE', 'SEATED', 'AND', 'QUESTIONED', 'HIM', 'AND', 'HE', 'REPLIED', 'THAT', 'HE', 'WAS', 'TREMBLING', 'TO', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'AND', 'WAS', 'BOUND', 'TO', 'KING', 'OMAR', 'WITH', 'PRESENTS', 'AND', 'THE', 'TRIBUTE', 'OF', 'SYRIA'] +2033-164915-0016-659: ref=['SO', 'IT', 'WAS', 'AGREED', 'THAT', 'WE', 'GO', 'TO', 'DAMASCUS', 'AND', 'FETCH', 'THENCE', 'THE', "KING'S", 'SON', 'SHARRKAN', 'AND', 'MAKE', 'HIM', 'SULTAN', 'OVER', 'HIS', "FATHER'S", 'REALM'] +2033-164915-0016-659: hyp=['SO', 'IT', 'WAS', 'AGREED', 'THAT', 'WE', 'GO', 'TO', 'DAMASCUS', 'AND', 'FETCH', 'THENCE', 'THE', "KING'S", 'SON', 'SHARKAN', 'AND', 'MAY', 'CAME', 'SULTAN', 'OVER', 'HIS', "FATHER'S", 'REALM'] +2033-164915-0017-660: ref=['AND', 'AMONGST', 'THEM', 'WERE', 'SOME', 'WHO', 'WOULD', 'HAVE', 'CHOSEN', 'THE', 'CADET', 'ZAU', 'AL', 'MAKAN', 'FOR', 'QUOTH', 'THEY', 'HIS', 'NAME', 'BE', 'LIGHT', 'OF', 'THE', 'PLACE', 'AND', 'HE', 'HATH', 'A', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'HIGHS', 'THE', 'DELIGHT', 'OF', 'THE', 'TIME', 'BUT', 'THEY', 'SET', 'OUT', 'FIVE', 'YEARS', 'AGO', 'FOR', 'AL', 'HIJAZ', 'AND', 'NONE', 'WOTTETH', 'WHAT', 'IS', 'BECOME', 'OF', 'THEM'] +2033-164915-0017-660: hyp=['AND', 'AMONGST', 'THEM', 'WERE', 'SOME', 'WHO', 'WOULD', 'HAVE', 'CHOSEN', 'THE', 'CADET', 'THOUA', 'MAKAN', 'FOR', 'QUOTH', 'THEY', 'HIS', 'NAME', 'BE', 'LIGHT', 'OF', 'THE', 'PLACE', 'AND', 'HE', 'HATH', 'A', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'HIES', 'THE', 'DELIGHT', 'OF', 'THE', 'TIME', 'BUT', 'THEY', 'SET', 'OUT', 'FIVE', 'YEARS', 'AGO', 'FOR', 'AL', 'KI', 'JAS', 'AND', 'NONE', 'WOTTETH', 'WHAT', 'IS', 'BECOME', 'OF', 'THEM'] +2033-164916-0000-684: ref=['SO', 'HE', 'TURNED', 'TO', 'THE', 'WAZIR', 'DANDAN', 'AND', 'SAID', 'TO', 'HIM', 'VERILY', 'YOUR', 'TALE', 'IS', 'A', 'WONDER', 'OF', 'WONDERS'] +2033-164916-0000-684: hyp=['SO', 'HE', 'TURNED', 'TO', 'THE', 'WAZIR', 'DANDAN', 'AND', 'SAID', 'TO', 'HIM', 'VERILY', 'YOUR', 'TALE', 'IS', 'A', 'WANDER', 'OF', 'WONDERS'] +2033-164916-0001-685: ref=['KNOW', 'O', 'CHIEF', 'WAZIR', 'THAT', 'HERE', 'WHERE', 'YOU', 'HAVE', 'ENCOUNTERED', 'ME', 'ALLAH', 'HATH', 'GIVEN', 'YOU', 'REST', 'FROM', 'FATIGUE', 'AND', 'BRINGETH', 'YOU', 'YOUR', 'DESIRE', 'AFTER', 'THE', 'EASIEST', 'OF', 'FASHIONS', 'FOR', 'THAT', 'HIS', 'ALMIGHTY', 'WILL', 'RESTORETH', 'TO', 'YOU', 'ZAU', 'AL', 'MAKAN', 'AND', 'HIS', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'WHEREBY', 'WE', 'WILL', 'SETTLE', 'THE', 'MATTER', 'AS', 'WE', 'EASILY', 'CAN'] +2033-164916-0001-685: hyp=['NO', 'O', 'CHIEF', 'WAZIR', 'THAT', 'HERE', 'WHERE', 'YOU', 'HAVE', 'ENCOUNTERED', 'ME', 'ALLAH', 'HATH', 'GIVEN', 'YOU', 'REST', 'FROM', 'FATIGUE', 'AND', 'BRINGETH', 'YOU', 'YOUR', 'DESIRE', 'AFTER', 'THE', 'EASIEST', 'OF', 'FASHIONS', 'FOR', 'LET', 'HIS', 'ALMIGHTY', 'WILL', 'RESTORE', 'IT', 'TO', 'YOU', 'THOU', 'A', 'MAKAN', 'AND', 'HIS', 'SISTER', 'NOSHAT', 'AL', 'ZAMAN', 'WHEREBY', 'WE', 'WILL', 'SETTLE', 'THE', 'MATTER', 'AS', 'WE', 'EASILY', 'CAN'] +2033-164916-0002-686: ref=['WHEN', 'THE', 'MINISTER', 'HEARD', 'THESE', 'WORDS', 'HE', 'REJOICED', 'WITH', 'GREAT', 'JOY', 'AND', 'SAID', 'O', 'CHAMBERLAIN', 'TELL', 'ME', 'THE', 'TALE', 'OF', 'THE', 'TWAIN', 'AND', 'WHAT', 'BEFEL', 'THEM', 'AND', 'THE', 'CAUSE', 'OF', 'THEIR', 'LONG', 'ABSENCE'] +2033-164916-0002-686: hyp=['WHEN', 'THE', 'MEANS', 'SIR', 'HEARD', 'THESE', 'WORDS', 'HE', 'REJOICED', 'WITH', 'GRAY', 'JOY', 'AND', 'SAID', 'O', 'CHAMBERLAIN', 'TELL', 'ME', 'THE', 'TALE', 'OF', 'THE', 'TWAIN', 'AND', 'WHAT', 'BEFELL', 'THEM', 'AND', 'THE', 'CAUSE', 'OF', 'THEIR', 'LONG', 'ABSENCE'] +2033-164916-0003-687: ref=['ZAU', 'AL', 'MAKAN', 'BOWED', 'HIS', 'HEAD', 'AWHILE', 'AND', 'THEN', 'SAID', 'I', 'ACCEPT', 'THIS', 'POSITION', 'FOR', 'INDEED', 'THERE', 'WAS', 'NO', 'REFUSING', 'AND', 'HE', 'WAS', 'CERTIFIED', 'THAT', 'THE', 'CHAMBERLAIN', 'HAD', 'COUNSELLED', 'HIM', 'WELL', 'AND', 'WISELY', 'AND', 'SET', 'HIM', 'ON', 'THE', 'RIGHT', 'WAY'] +2033-164916-0003-687: hyp=['ZAO', 'MAKAN', 'BOWED', 'HIS', 'HEAD', 'A', 'WHILE', 'AND', 'THEN', 'SAID', 'I', 'ACCEPT', 'THE', 'POSITION', 'FOR', 'INDEED', 'THERE', 'WAS', 'NO', 'REFUSING', 'AND', 'HE', 'WAS', 'CERTIFIED', 'THAT', 'THE', 'CHAMBERLAIN', 'HAD', 'COUNSELLED', 'HIM', 'WELL', 'AND', 'WISELY', 'AND', 'SAT', 'HIM', 'ON', 'THE', 'RIGHT', 'WAY'] +2033-164916-0004-688: ref=['THEN', 'HE', 'ADDED', 'O', 'MY', 'UNCLE', 'HOW', 'SHALL', 'I', 'DO', 'WITH', 'MY', 'BROTHER', 'SHARRKAN'] +2033-164916-0004-688: hyp=['THEN', 'HE', 'ADDED', 'O', 'MY', 'UNCLE', 'HOW', 'SHALL', 'I', 'DO', 'WITH', 'MY', 'BROTHER', 'SHARKAN'] +2033-164916-0005-689: ref=['AFTER', 'AWHILE', 'THE', 'DUST', 'DISPERSED', 'AND', 'THERE', 'APPEARED', 'UNDER', 'IT', 'THE', 'ARMY', 'OF', 'BAGHDAD', 'AND', 'KHORASAN', 'A', 'CONQUERING', 'HOST', 'LIKE', 'THE', 'FULL', 'TIDE', 'SEA', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164916-0005-689: hyp=['AFTER', 'A', 'WHILE', 'THE', 'DUST', 'DISPERSED', 'AND', 'THERE', 'APPEARED', 'UNDER', 'IT', 'THE', 'ARMY', 'OF', 'BAGHDAD', 'AND', 'KHORASAN', 'A', 'CONQUERING', 'HOST', 'LIKE', 'THE', 'POOL', 'TIDE', 'SEA', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164916-0006-690: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'EIGHTH', 'NIGHT'] +2033-164916-0006-690: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'EIGHTH', 'NIGHT'] +2033-164916-0007-691: ref=['AND', 'IN', 'IT', 'ALL', 'REJOICED', 'AT', 'THE', 'ACCESSION', 'OF', 'THE', 'LIGHT', 'OF', 'THE', 'PLACE'] +2033-164916-0007-691: hyp=['ANY', 'NEAT', 'OR', 'REJOICED', 'AT', 'THE', 'ACCESSION', 'OF', 'THE', 'LIGHT', 'OF', 'THE', 'PLACE'] +2033-164916-0008-692: ref=['LASTLY', 'THE', 'MINISTER', 'WENT', 'IN', 'AND', 'KISSED', 'THE', 'GROUND', 'BEFORE', 'ZAU', 'AL', 'MAKAN', 'WHO', 'ROSE', 'TO', 'MEET', 'HIM', 'SAYING', 'WELCOME', 'O', 'WAZIR', 'AND', 'SIRE', 'SANS', 'PEER'] +2033-164916-0008-692: hyp=['LASTLY', 'THE', 'MINISTER', 'WENT', 'IN', 'AND', 'KISSED', 'THE', 'GROUND', 'BEFORE', 'ZAO', 'MAKAN', 'WHO', 'ROSE', 'TO', 'MEET', 'HIM', 'SAYING', 'WELCOME', 'O', 'WAZIR', 'AND', 'SIRS', 'SONSPIER'] +2033-164916-0009-693: ref=['MOREOVER', 'THE', 'SULTAN', 'COMMANDED', 'HIS', 'WAZIR', 'DANDAN', 'CALL', 'A', 'TEN', 'DAYS', 'HALT', 'OF', 'THE', 'ARMY', 'THAT', 'HE', 'MIGHT', 'BE', 'PRIVATE', 'WITH', 'HIM', 'AND', 'LEARN', 'FROM', 'HIM', 'HOW', 'AND', 'WHEREFORE', 'HIS', 'FATHER', 'HAD', 'BEEN', 'SLAIN'] +2033-164916-0009-693: hyp=['MOREOVER', 'THE', 'SULTAN', 'COMMANDED', 'HIS', 'WAZIR', 'DAN', 'CALL', 'AT', 'TEN', 'DAYS', 'HALT', 'OF', 'THE', 'ARMY', 'THAT', 'HE', 'MIGHT', 'BE', 'PRIVATE', 'WITH', 'HIM', 'AND', 'LEARN', 'FROM', 'HIM', 'HOW', 'AND', 'WHEREFORE', 'HIS', 'FATHER', 'HAD', 'BEEN', 'SLAIN'] +2033-164916-0010-694: ref=['HE', 'THEN', 'REPAIRED', 'TO', 'THE', 'HEART', 'OF', 'THE', 'ENCAMPMENT', 'AND', 'ORDERED', 'THE', 'HOST', 'TO', 'HALT', 'TEN', 'DAYS'] +2033-164916-0010-694: hyp=['HE', 'THEN', 'REPAIRED', 'TO', 'THE', 'HEARTS', 'OF', 'THE', 'ENCAMPMENT', 'AND', 'ORDERED', 'THAT', 'THE', 'HOST', 'TO', 'HALT', 'TEN', 'DAYS'] +2414-128291-0000-2689: ref=['WHAT', 'HATH', 'HAPPENED', 'UNTO', 'ME'] +2414-128291-0000-2689: hyp=['WHAT', 'HAD', 'HAPPENED', 'TO', 'ME'] +2414-128291-0001-2690: ref=['HE', 'ASKED', 'HIMSELF', 'SOMETHING', 'WARM', 'AND', 'LIVING', 'QUICKENETH', 'ME', 'IT', 'MUST', 'BE', 'IN', 'THE', 'NEIGHBOURHOOD'] +2414-128291-0001-2690: hyp=['HE', 'ASKED', 'HIMSELF', 'SOMETHING', 'WARM', 'AND', 'LIVING', 'QUICKENED', 'ME', 'IT', 'MUST', 'BE', 'IN', 'THE', 'NEIGHBOURHOOD'] +2414-128291-0002-2691: ref=['WHEN', 'HOWEVER', 'ZARATHUSTRA', 'WAS', 'QUITE', 'NIGH', 'UNTO', 'THEM', 'THEN', 'DID', 'HE', 'HEAR', 'PLAINLY', 'THAT', 'A', 'HUMAN', 'VOICE', 'SPAKE', 'IN', 'THE', 'MIDST', 'OF', 'THE', 'KINE', 'AND', 'APPARENTLY', 'ALL', 'OF', 'THEM', 'HAD', 'TURNED', 'THEIR', 'HEADS', 'TOWARDS', 'THE', 'SPEAKER'] +2414-128291-0002-2691: hyp=['WHEN', 'HOWEVER', 'THE', 'TWO', 'STRAW', 'WAS', 'QUITE', 'NIGH', 'AND', 'TO', 'THEM', 'THEN', 'DID', 'HE', 'HEAR', 'PLAINLY', 'WITH', 'HUMAN', 'VOICE', 'PIKE', 'IN', 'THE', 'MIDST', 'OF', 'THE', 'KIND', 'AND', 'A', 'FRIENDLY', 'ALL', 'OF', 'THEM', 'HAD', 'TURNED', 'THEIR', 'HEADS', 'TOWARDS', 'THE', 'SPEAKER'] +2414-128291-0003-2692: ref=['WHAT', 'DO', 'I', 'HERE', 'SEEK'] +2414-128291-0003-2692: hyp=['FOR', 'DO', 'I', 'HERE', 'SEEK'] +2414-128291-0004-2693: ref=['ANSWERED', 'HE', 'THE', 'SAME', 'THAT', 'THOU', 'SEEKEST', 'THOU', 'MISCHIEF', 'MAKER', 'THAT', 'IS', 'TO', 'SAY', 'HAPPINESS', 'UPON', 'EARTH'] +2414-128291-0004-2693: hyp=['ANSWERED', 'HE', 'THE', 'SAME', 'THAT', 'THOU', 'SEEKEST', 'THOU', 'MISCHIEF', 'MAKER', 'THAT', 'IS', 'TO', 'SAY', 'HAPPINESS', 'UPON', 'EARTH'] +2414-128291-0005-2694: ref=['FOR', 'I', 'TELL', 'THEE', 'THAT', 'I', 'HAVE', 'ALREADY', 'TALKED', 'HALF', 'A', 'MORNING', 'UNTO', 'THEM', 'AND', 'JUST', 'NOW', 'WERE', 'THEY', 'ABOUT', 'TO', 'GIVE', 'ME', 'THEIR', 'ANSWER'] +2414-128291-0005-2694: hyp=['FOR', 'I', 'TELL', 'THEE', 'THAT', 'I', 'HAVE', 'ALREADY', 'TALKED', 'HALF', 'A', 'MORNING', 'UNTO', 'THEM', 'AND', 'JUST', 'NOW', 'WHERE', 'THEY', 'ABOUT', 'TO', 'GIVE', 'ME', 'THE', 'ANSWER'] +2414-128291-0006-2695: ref=['HE', 'WOULD', 'NOT', 'BE', 'RID', 'OF', 'HIS', 'AFFLICTION'] +2414-128291-0006-2695: hyp=['HE', 'WOULD', 'NOT', 'BE', 'RID', 'OF', 'HIS', 'AFFLICATION'] +2414-128291-0007-2696: ref=['WHO', 'HATH', 'NOT', 'AT', 'PRESENT', 'HIS', 'HEART', 'HIS', 'MOUTH', 'AND', 'HIS', 'EYES', 'FULL', 'OF', 'DISGUST'] +2414-128291-0007-2696: hyp=['WHO', 'HAD', 'NOT', 'AT', 'PRESENT', 'HIS', 'HEART', 'HIS', 'MOUTH', 'AND', 'HIS', 'EYES', 'FULL', 'OF', 'DISGUST'] +2414-128291-0008-2697: ref=['THOU', 'ALSO', 'THOU', 'ALSO'] +2414-128291-0008-2697: hyp=['THOU', 'ALSO', 'THOU', 'ALSO'] +2414-128291-0009-2698: ref=['BUT', 'BEHOLD', 'THESE', 'KINE'] +2414-128291-0009-2698: hyp=['BUT', 'BEHOLD', 'HIS', 'KIND'] +2414-128291-0010-2699: ref=['THE', 'KINE', 'HOWEVER', 'GAZED', 'AT', 'IT', 'ALL', 'AND', 'WONDERED'] +2414-128291-0010-2699: hyp=['DECLINE', 'HOWEVER', 'GAZED', 'AT', 'IT', 'ALL', 'AND', 'WONDERED'] +2414-128291-0011-2700: ref=['WANTON', 'AVIDITY', 'BILIOUS', 'ENVY', 'CAREWORN', 'REVENGE', 'POPULACE', 'PRIDE', 'ALL', 'THESE', 'STRUCK', 'MINE', 'EYE'] +2414-128291-0011-2700: hyp=['WARRENTON', 'ALGITTEE', 'BILIOUS', 'ENVY', 'CAREWORN', 'REVENGE', 'POPULOUS', 'PRIDE', 'ALL', 'THIS', 'STRUCK', 'MIGHT', 'EYE'] +2414-128291-0012-2701: ref=['IT', 'IS', 'NO', 'LONGER', 'TRUE', 'THAT', 'THE', 'POOR', 'ARE', 'BLESSED'] +2414-128291-0012-2701: hyp=['IT', 'IS', 'NO', 'LONGER', 'TRUE', 'NEITHER', 'POOR', 'ARE', 'BLESSED'] +2414-128291-0013-2702: ref=['THE', 'KINGDOM', 'OF', 'HEAVEN', 'HOWEVER', 'IS', 'WITH', 'THE', 'KINE', 'AND', 'WHY', 'IS', 'IT', 'NOT', 'WITH', 'THE', 'RICH'] +2414-128291-0013-2702: hyp=['THE', 'KINGDOM', 'OF', 'HEAVEN', 'HOWEVER', 'IS', 'WITH', 'A', 'KIND', 'AND', 'WHY', 'IS', 'IT', 'NOT', 'WITH', 'A', 'RICH'] +2414-128291-0014-2703: ref=['WHY', 'DOST', 'THOU', 'TEMPT', 'ME'] +2414-128291-0014-2703: hyp=['WHY', 'THOSE', 'THOU', 'TEMPT', 'ME'] +2414-128291-0015-2704: ref=['ANSWERED', 'THE', 'OTHER'] +2414-128291-0015-2704: hyp=['ANSWERED', 'HER'] +2414-128291-0016-2705: ref=['THOU', 'KNOWEST', 'IT', 'THYSELF', 'BETTER', 'EVEN', 'THAN', 'I'] +2414-128291-0016-2705: hyp=['THOU', 'KNOWEST', 'IT', 'THYSELF', 'BETTER', 'EVEN', 'THAN', 'I'] +2414-128291-0017-2706: ref=['THUS', 'SPAKE', 'THE', 'PEACEFUL', 'ONE', 'AND', 'PUFFED', 'HIMSELF', 'AND', 'PERSPIRED', 'WITH', 'HIS', 'WORDS', 'SO', 'THAT', 'THE', 'KINE', 'WONDERED', 'ANEW'] +2414-128291-0017-2706: hyp=['DOES', 'SPEAK', 'THE', 'BEATHFUL', 'ONE', 'AND', 'PUFFED', 'HIMSELF', 'AND', 'POSPIRED', 'WITH', 'HIS', 'WORDS', 'FERNED', 'A', 'KIND', 'WOUNDED', 'I', 'KNEW'] +2414-128291-0018-2707: ref=['THOU', 'DOEST', 'VIOLENCE', 'TO', 'THYSELF', 'THOU', 'PREACHER', 'ON', 'THE', 'MOUNT', 'WHEN', 'THOU', 'USEST', 'SUCH', 'SEVERE', 'WORDS'] +2414-128291-0018-2707: hyp=['THOU', 'DOEST', 'VIOLENCE', 'TO', 'THYSELF', 'THOU', 'PREACHER', 'ON', 'THE', 'MOUNT', 'AND', 'THOU', 'USEST', 'SUCH', 'SAVIOUR', 'WORDS'] +2414-128291-0019-2708: ref=['THEY', 'ALSO', 'ABSTAIN', 'FROM', 'ALL', 'HEAVY', 'THOUGHTS', 'WHICH', 'INFLATE', 'THE', 'HEART'] +2414-128291-0019-2708: hyp=['THEY', 'ALSO', 'ABSTAINED', 'FROM', 'ALL', 'HEAVY', 'TORCH', 'WHICH', 'INFLATE', 'THE', 'HEART'] +2414-128291-0020-2709: ref=['WELL'] +2414-128291-0020-2709: hyp=['WELL'] +2414-128291-0021-2710: ref=['SAID', 'ZARATHUSTRA', 'THOU', 'SHOULDST', 'ALSO', 'SEE', 'MINE', 'ANIMALS', 'MINE', 'EAGLE', 'AND', 'MY', 'SERPENT', 'THEIR', 'LIKE', 'DO', 'NOT', 'AT', 'PRESENT', 'EXIST', 'ON', 'EARTH'] +2414-128291-0021-2710: hyp=['SAYS', 'THEREUSTRA', 'THOU', 'SHOULDEST', 'ALSO', 'SEE', 'MY', 'ANIMALS', 'MIGHT', 'EAGLE', 'AND', 'MY', 'SERPENT', 'THEIR', 'LIKE', 'DO', 'NOT', 'AT', 'PRESENT', 'EXIST', 'ON', 'EARTH'] +2414-128291-0022-2711: ref=['AND', 'TALK', 'TO', 'MINE', 'ANIMALS', 'OF', 'THE', 'HAPPINESS', 'OF', 'ANIMALS'] +2414-128291-0022-2711: hyp=['AND', 'TALK', 'TO', 'MINE', 'ANIMALS', 'OF', 'THE', 'HAPPINESS', 'OF', 'ANIMALS'] +2414-128291-0023-2712: ref=['NOW', 'HOWEVER', 'TAKE', 'LEAVE', 'AT', 'ONCE', 'OF', 'THY', 'KINE', 'THOU', 'STRANGE', 'ONE'] +2414-128291-0023-2712: hyp=['NOW', 'HOWEVER', 'TAKE', 'LEAVE', 'AT', 'ONCE', 'OF', 'THEIR', 'KIND', 'THOU', 'STRANGE', 'ONE'] +2414-128291-0024-2713: ref=['THOU', 'AMIABLE', 'ONE'] +2414-128291-0024-2713: hyp=['THOU', 'ADMIABLE', 'ONE'] +2414-128291-0025-2714: ref=['FOR', 'THEY', 'ARE', 'THY', 'WARMEST', 'FRIENDS', 'AND', 'PRECEPTORS'] +2414-128291-0025-2714: hyp=['FOR', 'THEY', 'ARE', 'DIVORITES', 'AND', 'PERCEPTIVES'] +2414-128291-0026-2715: ref=['THOU', 'EVIL', 'FLATTERER'] +2414-128291-0026-2715: hyp=['THOU', 'A', 'SLATTERER'] +2414-128292-0000-2618: ref=['WHITHER', 'HATH', 'MY', 'LONESOMENESS', 'GONE', 'SPAKE', 'HE'] +2414-128292-0000-2618: hyp=['WHITHER', 'HAD', 'MY', 'LONESOME', 'DISCOUR', 'SPAKE', 'HE'] +2414-128292-0001-2619: ref=['MY', 'SHADOW', 'CALLETH', 'ME'] +2414-128292-0001-2619: hyp=['MY', 'SHADOW', 'CALLETH', 'ME'] +2414-128292-0002-2620: ref=['WHAT', 'MATTER', 'ABOUT', 'MY', 'SHADOW'] +2414-128292-0002-2620: hyp=['WHAT', 'MATTER', 'ABOUT', 'MY', 'SHADOW'] +2414-128292-0003-2621: ref=['LET', 'IT', 'RUN', 'AFTER', 'ME', 'I', 'RUN', 'AWAY', 'FROM', 'IT'] +2414-128292-0003-2621: hyp=['LET', 'IT', 'RUN', 'AFTER', 'ME', 'I', 'RAN', 'AWAY', 'FROM', 'IT'] +2414-128292-0004-2622: ref=['THUS', 'SPAKE', 'ZARATHUSTRA', 'TO', 'HIS', 'HEART', 'AND', 'RAN', 'AWAY'] +2414-128292-0004-2622: hyp=['THUS', 'BIG', 'OR', 'TWO', 'STRIKE', 'TO', 'HIS', 'HEART', 'AND', 'RAN', 'AWAY'] +2414-128292-0005-2623: ref=['VERILY', 'MY', 'FOLLY', 'HATH', 'GROWN', 'BIG', 'IN', 'THE', 'MOUNTAINS'] +2414-128292-0005-2623: hyp=['VERILY', 'MY', 'FOLLY', 'HATH', 'GROWN', 'BIG', 'IN', 'THE', 'MOUNTAINS'] +2414-128292-0006-2624: ref=['NOW', 'DO', 'I', 'HEAR', 'SIX', 'OLD', 'FOOLS', 'LEGS', 'RATTLING', 'BEHIND', 'ONE', 'ANOTHER'] +2414-128292-0006-2624: hyp=['NOW', 'DO', 'I', 'HEAR', 'SIX', 'OLD', 'FOOTS', 'LEGS', 'RATTLING', 'BEHIND', 'ONE', 'ANOTHER'] +2414-128292-0007-2625: ref=['BUT', 'DOTH', 'ZARATHUSTRA', 'NEED', 'TO', 'BE', 'FRIGHTENED', 'BY', 'HIS', 'SHADOW'] +2414-128292-0007-2625: hyp=['BY', 'DIRTS', 'ARTISTRA', 'NEED', 'TO', 'BE', 'FRIGHTENED', 'BY', 'A', 'SHADOW'] +2414-128292-0008-2626: ref=['ALSO', 'METHINKETH', 'THAT', 'AFTER', 'ALL', 'IT', 'HATH', 'LONGER', 'LEGS', 'THAN', 'MINE'] +2414-128292-0008-2626: hyp=['ALSO', 'METHINK', 'IT', 'THAT', 'AFTER', 'ALL', 'IT', 'HAD', 'LONGER', 'LESS', 'THAN', 'MINE'] +2414-128292-0009-2627: ref=['FOR', 'WHEN', 'ZARATHUSTRA', 'SCRUTINISED', 'HIM', 'WITH', 'HIS', 'GLANCE', 'HE', 'WAS', 'FRIGHTENED', 'AS', 'BY', 'A', 'SUDDEN', 'APPARITION', 'SO', 'SLENDER', 'SWARTHY', 'HOLLOW', 'AND', 'WORN', 'OUT', 'DID', 'THIS', 'FOLLOWER', 'APPEAR'] +2414-128292-0009-2627: hyp=['FOR', 'WHEN', 'THEY', 'ARE', 'TOO', 'STRETS', 'CRIED', 'HIM', 'WITH', 'HIS', 'GLANCE', 'HE', 'WAS', 'FRIGHTENED', 'AS', 'BY', 'ASSERTED', 'APPARITION', 'SO', 'SLENDER', 'SWARTHY', 'HOLLOW', 'AND', 'WORN', 'OUT', 'WITH', 'HIS', 'FOLLOWER', 'APPEARED'] +2414-128292-0010-2628: ref=['ASKED', 'ZARATHUSTRA', 'VEHEMENTLY', 'WHAT', 'DOEST', 'THOU', 'HERE'] +2414-128292-0010-2628: hyp=['I', 'DECK', 'TO', 'ESTRAVA', 'IMAGED', 'WHAT', 'DO', 'IS', 'THOU', 'HERE'] +2414-128292-0011-2629: ref=['AND', 'WHY', 'CALLEST', 'THOU', 'THYSELF', 'MY', 'SHADOW'] +2414-128292-0011-2629: hyp=['AND', 'WHY', 'CALLEST', 'THOU', 'THYSELF', 'MY', 'SHADOW'] +2414-128292-0012-2630: ref=['THOU', 'ART', 'NOT', 'PLEASING', 'UNTO', 'ME'] +2414-128292-0012-2630: hyp=['THOU', 'ART', 'NOT', 'PLEASING', 'IN', 'TO', 'ME'] +2414-128292-0013-2631: ref=['MUST', 'I', 'EVER', 'BE', 'ON', 'THE', 'WAY'] +2414-128292-0013-2631: hyp=['MUST', 'I', 'EVER', 'BE', 'ON', 'THE', 'WAY'] +2414-128292-0014-2632: ref=['O', 'EARTH', 'THOU', 'HAST', 'BECOME', 'TOO', 'ROUND', 'FOR', 'ME'] +2414-128292-0014-2632: hyp=['O', 'ART', 'THOU', 'HAST', 'BECOME', 'TO', 'ROUND', 'FOR', 'ME'] +2414-128292-0015-2633: ref=['WHEN', 'THE', 'DEVIL', 'CASTETH', 'HIS', 'SKIN', 'DOTH', 'NOT', 'HIS', 'NAME', 'ALSO', 'FALL', 'AWAY', 'IT', 'IS', 'ALSO', 'SKIN'] +2414-128292-0015-2633: hyp=['WITH', 'A', 'DEVIL', 'CAST', 'AT', 'HIS', 'SKIN', 'DOTH', 'NOT', 'HIS', 'NAME', 'ALSO', 'FALL', 'AWAY', 'IT', 'IS', 'ALSO', 'SKINNED'] +2414-128292-0016-2634: ref=['THE', 'DEVIL', 'HIMSELF', 'IS', 'PERHAPS', 'SKIN'] +2414-128292-0016-2634: hyp=['THE', 'DEVIL', 'HIMSELF', 'IS', 'PERHAPS', 'KIN'] +2414-128292-0017-2635: ref=['SOMETIMES', 'I', 'MEANT', 'TO', 'LIE', 'AND', 'BEHOLD'] +2414-128292-0017-2635: hyp=['SOMETIMES', 'I', 'MEANT', 'TO', 'LIE', 'AND', 'BEHOLD'] +2414-128292-0018-2636: ref=['THEN', 'ONLY', 'DID', 'I', 'HIT', 'THE', 'TRUTH'] +2414-128292-0018-2636: hyp=['THEN', 'ALLEY', 'DID', 'I', 'HATE', 'THE', 'TRUTH'] +2414-128292-0019-2637: ref=['HOW', 'HAVE', 'I', 'STILL', 'INCLINATION'] +2414-128292-0019-2637: hyp=['HOW', 'HAVE', 'I', 'STILL', 'INCLINATION'] +2414-128292-0020-2638: ref=['HAVE', 'I', 'STILL', 'A', 'GOAL'] +2414-128292-0020-2638: hyp=['ERE', 'I', 'STILL', 'A', 'GOLD'] +2414-128292-0021-2639: ref=['A', 'HAVEN', 'TOWARDS', 'WHICH', 'MY', 'SAIL', 'IS', 'SET'] +2414-128292-0021-2639: hyp=['A', 'HEROIND', 'DOOR', 'SPREAD', 'MY', 'SAILORS', 'SAID'] +2414-128292-0022-2640: ref=['FOR', 'IT', 'DO', 'I', 'ASK', 'AND', 'SEEK', 'AND', 'HAVE', 'SOUGHT', 'BUT', 'HAVE', 'NOT', 'FOUND', 'IT'] +2414-128292-0022-2640: hyp=['FOR', 'IT', 'TOO', 'I', 'ASK', 'AND', 'SEEK', 'AND', 'HATH', 'THOUGHT', 'IT', 'HATH', 'NOT', 'FOUND', 'IT'] +2414-128292-0023-2641: ref=['O', 'ETERNAL', 'EVERYWHERE', 'O', 'ETERNAL', 'NOWHERE', 'O', 'ETERNAL', 'IN', 'VAIN'] +2414-128292-0023-2641: hyp=['OR', 'ETERNAL', 'EVERYWHERE', 'WHO', 'HAD', 'TURNED', 'OUT', 'NOWHERE', 'WHO', 'HAD', 'TURNED', 'IN', 'VAIN'] +2414-128292-0024-2642: ref=['THOU', 'ART', 'MY', 'SHADOW'] +2414-128292-0024-2642: hyp=['THOU', 'ART', 'MY', 'SHADOW'] +2414-128292-0025-2643: ref=['SAID', 'HE', 'AT', 'LAST', 'SADLY'] +2414-128292-0025-2643: hyp=['SAID', 'HE', 'ASSALY'] +2414-128292-0026-2644: ref=['THY', 'DANGER', 'IS', 'NOT', 'SMALL', 'THOU', 'FREE', 'SPIRIT', 'AND', 'WANDERER'] +2414-128292-0026-2644: hyp=['THY', 'DANGER', 'IS', 'MUCH', 'SMALL', 'THOU', 'FREE', 'SPIRIT', 'AND', 'WONDER'] +2414-128292-0027-2645: ref=['THEY', 'SLEEP', 'QUIETLY', 'THEY', 'ENJOY', 'THEIR', 'NEW', 'SECURITY'] +2414-128292-0027-2645: hyp=['DESLEY', 'QUIETLY', 'THEY', 'ENJOYED', 'THEIR', 'NEW', 'SECURITY'] +2414-128292-0028-2646: ref=['BEWARE', 'LEST', 'IN', 'THE', 'END', 'A', 'NARROW', 'FAITH', 'CAPTURE', 'THEE', 'A', 'HARD', 'RIGOROUS', 'DELUSION'] +2414-128292-0028-2646: hyp=['BE', 'REALIZED', 'IN', 'THE', 'END', 'A', 'NARROW', 'FATE', 'CAPTURED', 'THE', 'A', 'HARD', 'RECKLESS', 'ILLUSION'] +2414-128292-0029-2647: ref=['FOR', 'NOW', 'EVERYTHING', 'THAT', 'IS', 'NARROW', 'AND', 'FIXED', 'SEDUCETH', 'AND', 'TEMPTETH', 'THEE'] +2414-128292-0029-2647: hyp=['FOR', 'NOW', 'EVERYTHING', 'THAT', 'IS', 'NARROW', 'AND', 'FIXED', 'SEDUCE', 'IT', 'AND', 'TEMPT', 'IT', 'THEE'] +2414-128292-0030-2648: ref=['THOU', 'HAST', 'LOST', 'THY', 'GOAL'] +2414-128292-0030-2648: hyp=['THOU', 'HAST', 'LOST', 'DAGGOOD'] +2414-128292-0031-2649: ref=['THOU', 'POOR', 'ROVER', 'AND', 'RAMBLER', 'THOU', 'TIRED', 'BUTTERFLY'] +2414-128292-0031-2649: hyp=['THOUGH', 'POOR', 'ROVER', 'AND', 'RAMBLER', 'THOU', 'TIRED', 'BUT', 'TO', 'FLY'] +2414-128292-0032-2650: ref=['WILT', 'THOU', 'HAVE', 'A', 'REST', 'AND', 'A', 'HOME', 'THIS', 'EVENING'] +2414-128292-0032-2650: hyp=['WILT', 'THOU', 'HAVE', 'ARREST', 'AND', 'A', 'HOME', 'THIS', 'EVENING'] +2414-159411-0000-2653: ref=['ONCE', 'UPON', 'A', 'TIME', 'A', 'BRAHMAN', 'WHO', 'WAS', 'WALKING', 'ALONG', 'THE', 'ROAD', 'CAME', 'UPON', 'AN', 'IRON', 'CAGE', 'IN', 'WHICH', 'A', 'GREAT', 'TIGER', 'HAD', 'BEEN', 'SHUT', 'UP', 'BY', 'THE', 'VILLAGERS', 'WHO', 'CAUGHT', 'HIM'] +2414-159411-0000-2653: hyp=['WHENCE', 'A', 'WINTER', 'TIME', 'A', 'BRAHMAN', 'WHO', 'WAS', 'WALKING', 'ALONG', 'THE', 'ROAD', 'CAME', 'UPON', 'AN', 'IRON', 'CAGE', 'IN', 'WHICH', 'A', 'GREAT', 'TIGER', 'ADMIRED', 'UP', 'BY', 'THE', 'VILLAGES', 'WHO', 'CAUGHT', 'HIM'] +2414-159411-0001-2654: ref=['THE', 'BRAHMAN', 'ANSWERED', 'NO', 'I', 'WILL', 'NOT', 'FOR', 'IF', 'I', 'LET', 'YOU', 'OUT', 'OF', 'THE', 'CAGE', 'YOU', 'WILL', 'EAT', 'ME'] +2414-159411-0001-2654: hyp=['THE', 'BRAMIN', 'ANSWERED', 'NO', 'I', 'WILL', 'NOT', 'FOR', 'IF', 'I', 'LET', 'YOU', 'OUT', 'OF', 'THE', 'CAGE', 'YOU', 'WILL', 'EAT', 'ME'] +2414-159411-0002-2655: ref=['OH', 'FATHER', 'OF', 'MERCY', 'ANSWERED', 'THE', 'TIGER', 'IN', 'TRUTH', 'THAT', 'I', 'WILL', 'NOT'] +2414-159411-0002-2655: hyp=['OH', 'FATHER', 'OF', 'MERCY', 'ANSWERED', 'THE', 'TIGER', 'IN', 'TRUTH', 'THAT', 'I', 'WILL', 'NOT'] +2414-159411-0003-2656: ref=['I', 'WILL', 'NEVER', 'BE', 'SO', 'UNGRATEFUL', 'ONLY', 'LET', 'ME', 'OUT', 'THAT', 'I', 'MAY', 'DRINK', 'SOME', 'WATER', 'AND', 'RETURN'] +2414-159411-0003-2656: hyp=['I', 'WILL', 'NEVER', 'BE', 'SO', 'UNGRATEFUL', 'ONLY', 'LET', 'ME', 'OUT', 'THAT', 'I', 'MAY', 'DRINK', 'SOME', 'WATER', 'AND', 'RETURN'] +2414-159411-0004-2657: ref=['THEN', 'THE', 'BRAHMAN', 'TOOK', 'PITY', 'ON', 'HIM', 'AND', 'OPENED', 'THE', 'CAGE', 'DOOR', 'BUT', 'NO', 'SOONER', 'HAD', 'HE', 'DONE', 'SO', 'THAN', 'THE', 'TIGER', 'JUMPING', 'OUT', 'SAID', 'NOW', 'I', 'WILL', 'EAT', 'YOU', 'FIRST', 'AND', 'DRINK', 'THE', 'WATER', 'AFTERWARDS'] +2414-159411-0004-2657: hyp=['AND', 'IN', 'THE', 'BRAMMING', 'TOOK', 'PITY', 'ON', 'HIM', 'AND', 'OPENED', 'THE', 'CAGE', 'BUT', 'NO', 'SOONER', 'HAD', 'HE', 'TURNED', 'SO', 'THAN', 'THE', 'TIGER', 'JUMPING', 'OUT', 'SAID', 'NOW', 'I', 'WILL', 'EAT', 'YOU', 'FIRST', 'AND', 'DRINK', 'THE', 'WATER', 'AFTERWARDS'] +2414-159411-0005-2658: ref=['SO', 'THE', 'BRAHMAN', 'AND', 'THE', 'TIGER', 'WALKED', 'ON', 'TILL', 'THEY', 'CAME', 'TO', 'A', 'BANYAN', 'TREE', 'AND', 'THE', 'BRAHMAN', 'SAID', 'TO', 'IT', 'BANYAN', 'TREE', 'BANYAN', 'TREE', 'HEAR', 'AND', 'GIVE', 'JUDGMENT'] +2414-159411-0005-2658: hyp=['SO', 'THE', 'BRAMID', 'AND', 'THE', 'TIGER', 'WALKED', 'ON', 'TILL', 'THEY', 'CAME', 'TO', 'A', 'BENDONED', 'TREE', 'AND', 'THE', 'BRAMMEN', 'SAID', 'TO', 'IT', 'BANNY', 'TREE', 'BANDREE', 'HERE', 'AND', 'GIVE', 'GERMAN'] +2414-159411-0006-2659: ref=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'BANYAN', 'TREE'] +2414-159411-0006-2659: hyp=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'BEN', 'TREE'] +2414-159411-0007-2660: ref=['THIS', 'TIGER', 'SAID', 'THE', 'BRAHMAN', 'BEGGED', 'ME', 'TO', 'LET', 'HIM', 'OUT', 'OF', 'HIS', 'CAGE', 'TO', 'DRINK', 'A', 'LITTLE', 'WATER', 'AND', 'HE', 'PROMISED', 'NOT', 'TO', 'HURT', 'ME', 'IF', 'I', 'DID', 'SO', 'BUT', 'NOW', 'THAT', 'I', 'HAVE', 'LET', 'HIM', 'OUT', 'HE', 'WISHES', 'TO', 'EAT', 'ME'] +2414-159411-0007-2660: hyp=['THE', 'STAGER', 'SAID', 'DE', 'BRAMIN', 'BEG', 'ME', 'TO', 'LET', 'HIM', 'OUT', 'OF', 'HIS', 'CAGE', 'TO', 'DRINK', 'A', 'LITTLE', 'WATER', 'AND', 'HE', 'PROMISED', 'NOT', 'TO', 'HURT', 'ME', 'IF', 'I', 'DID', 'SO', 'BUT', 'NOW', 'THAT', 'I', 'HAVE', 'LEFT', 'HIM', 'OUT', 'HE', 'WISHES', 'TO', 'EAT', 'ME'] +2414-159411-0008-2661: ref=['IS', 'IT', 'JUST', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NO'] +2414-159411-0008-2661: hyp=["IT'S", 'A', 'JEALOUS', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NO'] +2414-159411-0009-2662: ref=['LET', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'ARE', 'AN', 'UNGRATEFUL', 'RACE'] +2414-159411-0009-2662: hyp=['LAID', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'ARE', 'IN', 'UNGRATEFUL', 'RACE'] +2414-159411-0010-2663: ref=['SIR', 'CAMEL', 'SIR', 'CAMEL', 'CRIED', 'THE', 'BRAHMAN', 'HEAR', 'AND', 'GIVE', 'JUDGMENT'] +2414-159411-0010-2663: hyp=['SIR', 'CAMELO', 'SIR', 'CAMEO', 'CRIED', 'THE', 'BRAHMAN', 'HERE', 'AND', 'GIVE', 'GEOGNANT'] +2414-159411-0011-2664: ref=['AT', 'A', 'LITTLE', 'DISTANCE', 'THEY', 'FOUND', 'A', 'BULLOCK', 'LYING', 'BY', 'THE', 'ROADSIDE'] +2414-159411-0011-2664: hyp=['AT', 'A', 'LITTLE', 'DISTANCE', 'THEY', 'FOUND', 'A', 'BULLOCK', 'LYING', 'BY', 'THE', 'ROADSIDE'] +2414-159411-0012-2665: ref=['IS', 'IT', 'FAIR', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NOT'] +2414-159411-0012-2665: hyp=['IS', 'IT', 'FAIR', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NOT'] +2414-159411-0013-2666: ref=['LET', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'HAVE', 'NO', 'PITY'] +2414-159411-0013-2666: hyp=['LED', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'HAVE', 'NO', 'PITY'] +2414-159411-0014-2667: ref=['THREE', 'OUT', 'OF', 'THE', 'SIX', 'HAD', 'GIVEN', 'JUDGMENT', 'AGAINST', 'THE', 'BRAHMAN', 'BUT', 'STILL', 'HE', 'DID', 'NOT', 'LOSE', 'ALL', 'HOPE', 'AND', 'DETERMINED', 'TO', 'ASK', 'THE', 'OTHER', 'THREE'] +2414-159411-0014-2667: hyp=['THREE', 'OUT', 'OF', 'THE', 'SIX', 'AND', 'GIVEN', 'JUDGMENT', 'AGAINST', 'THE', 'BRAHMEN', 'WITH', 'STILL', 'HE', 'DID', 'NOT', 'LOSE', 'ALL', 'HOPE', 'AND', 'DETERMINED', 'TO', 'ASK', 'THE', 'OTHER', 'THREE'] +2414-159411-0015-2668: ref=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'EAGLE'] +2414-159411-0015-2668: hyp=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JULIET', 'ASKED', 'THE', 'EAGLE'] +2414-159411-0016-2669: ref=['THE', 'BRAHMAN', 'STATED', 'THE', 'CASE', 'AND', 'THE', 'EAGLE', 'ANSWERED', 'WHENEVER', 'MEN', 'SEE', 'ME', 'THEY', 'TRY', 'TO', 'SHOOT', 'ME', 'THEY', 'CLIMB', 'THE', 'ROCKS', 'AND', 'STEAL', 'AWAY', 'MY', 'LITTLE', 'ONES'] +2414-159411-0016-2669: hyp=['THE', 'BRAMIN', 'SUITED', 'THE', 'CASE', 'AND', 'THE', 'EAGLE', 'ANSWERED', 'WHENEVER', 'MEN', 'SEE', 'ME', 'THEY', 'TRY', 'TO', 'SHOOT', 'ME', 'DECLINED', 'THE', 'ROCKS', 'AND', 'STEED', 'AWAY', 'MY', 'FEW', 'ONES'] +2414-159411-0017-2670: ref=['THEN', 'THE', 'TIGER', 'BEGAN', 'TO', 'ROAR', 'AND', 'SAID', 'THE', 'JUDGMENT', 'OF', 'ALL', 'IS', 'AGAINST', 'YOU', 'O', 'BRAHMAN'] +2414-159411-0017-2670: hyp=['IN', 'THE', 'TIGER', 'BEGAN', 'TO', 'ROAR', 'AND', 'SAID', 'JUDGMENT', 'OF', 'ALL', 'IS', 'AGAINST', 'YOU', 'O', 'BRAHMIN'] +2414-159411-0018-2671: ref=['AFTER', 'THIS', 'THEY', 'SAW', 'AN', 'ALLIGATOR', 'AND', 'THE', 'BRAHMAN', 'RELATED', 'THE', 'MATTER', 'TO', 'HIM', 'HOPING', 'FOR', 'A', 'MORE', 'FAVORABLE', 'VERDICT'] +2414-159411-0018-2671: hyp=['AFTER', 'THIS', 'THEY', 'SAW', 'AN', 'ALLIGATOR', 'AND', 'THE', 'BRAMMER', 'RELATED', 'THE', 'MATTER', 'TO', 'HIM', 'HOPING', 'FOR', 'A', 'MORE', 'FAVOURABLE', 'VERDICT'] +2414-159411-0019-2672: ref=['BUT', 'THE', 'ALLIGATOR', 'SAID', 'WHENEVER', 'I', 'PUT', 'MY', 'NOSE', 'OUT', 'OF', 'THE', 'WATER', 'MEN', 'TORMENT', 'ME', 'AND', 'TRY', 'TO', 'KILL', 'ME'] +2414-159411-0019-2672: hyp=['WITH', 'AN', 'ADDER', 'TO', 'SIT', 'WHENEVER', 'I', 'PUT', 'MY', 'NOSE', 'OUT', 'OF', 'THE', 'WATER', 'MEANTIME', 'AND', 'ME', 'AND', 'TRIED', 'TO', 'KILL', 'ME'] +2414-159411-0020-2673: ref=['THE', 'BRAHMAN', 'GAVE', 'HIMSELF', 'UP', 'AS', 'LOST', 'BUT', 'AGAIN', 'HE', 'PRAYED', 'THE', 'TIGER', 'TO', 'HAVE', 'PATIENCE', 'AND', 'LET', 'HIM', 'ASK', 'THE', 'OPINION', 'OF', 'THE', 'SIXTH', 'JUDGE'] +2414-159411-0020-2673: hyp=['NO', 'BROWN', 'MEN', 'GAVE', 'HIMSELF', 'UP', 'AS', 'LOST', 'BUT', 'AGAIN', 'HE', 'PRAYED', 'THE', 'TIGER', 'TO', 'HAVE', 'PATIENCE', 'AND', 'LET', 'HIM', 'ASK', 'THE', 'OPINION', 'OF', 'THE', 'SIX', 'JUDGE'] +2414-159411-0021-2674: ref=['NOW', 'THE', 'SIXTH', 'WAS', 'A', 'JACKAL'] +2414-159411-0021-2674: hyp=['ON', 'THE', 'SIXTH', 'WAS', 'A', 'JACKAL'] +2414-159411-0022-2675: ref=['THE', 'BRAHMAN', 'TOLD', 'HIS', 'STORY', 'AND', 'SAID', 'TO', 'HIM', 'UNCLE', 'JACKAL', 'UNCLE', 'JACKAL', 'SAY', 'WHAT', 'IS', 'YOUR', 'JUDGMENT'] +2414-159411-0022-2675: hyp=['THE', 'GRANDMOTHER', 'TOLD', 'HIS', 'STORY', 'AND', 'SAID', 'TO', 'HIM', 'UNCLE', 'JACKAL', 'AND', 'WILL', 'JACKAL', 'SAY', 'WHAT', 'IS', 'YOUR', 'JUDGMENT'] +2414-159411-0023-2676: ref=['SHOW', 'ME', 'THE', 'PLACE'] +2414-159411-0023-2676: hyp=['SHOW', 'ME', 'THE', 'PACE'] +2414-159411-0024-2677: ref=['WHEN', 'THEY', 'GOT', 'THERE', 'THE', 'JACKAL', 'SAID', 'NOW', 'BRAHMAN', 'SHOW', 'ME', 'EXACTLY', 'WHERE', 'YOU', 'STOOD'] +2414-159411-0024-2677: hyp=['AND', 'THE', 'COURT', 'DEER', 'THE', 'JACKAL', 'SAID', 'NABRAMIN', 'SHOW', 'ME', 'EXACTLY', 'WHERE', 'YOU', 'STOOD'] +2414-159411-0025-2678: ref=['EXACTLY', 'THERE', 'WAS', 'IT', 'ASKED', 'THE', 'JACKAL'] +2414-159411-0025-2678: hyp=['EXACTLY', 'THERE', 'WAS', 'IT', 'ASKED', 'THE', 'JACK', 'WHO'] +2414-159411-0026-2679: ref=['EXACTLY', 'HERE', 'REPLIED', 'THE', 'BRAHMAN'] +2414-159411-0026-2679: hyp=['EXACTLY', 'HERE', 'REPLIED', 'THE', 'PROMIN'] +2414-159411-0027-2680: ref=['WHERE', 'WAS', 'THE', 'TIGER', 'THEN'] +2414-159411-0027-2680: hyp=['THERE', 'WAS', 'THE', 'TIGER', 'THEN'] +2414-159411-0028-2681: ref=['WHY', 'I', 'STOOD', 'SO', 'SAID', 'THE', 'TIGER', 'JUMPING', 'INTO', 'THE', 'CAGE', 'AND', 'MY', 'HEAD', 'WAS', 'ON', 'THIS', 'SIDE'] +2414-159411-0028-2681: hyp=['WHY', 'I', 'STOOD', 'SO', 'SAID', 'THE', 'TIGER', 'JUMPING', 'INTO', 'THE', 'CAGE', 'AND', 'MY', 'HEAD', 'WAS', 'ON', 'THIS', 'SIDE'] +2414-159411-0029-2682: ref=['VERY', 'GOOD', 'SAID', 'THE', 'JACKAL', 'BUT', 'I', 'CANNOT', 'JUDGE', 'WITHOUT', 'UNDERSTANDING', 'THE', 'WHOLE', 'MATTER', 'EXACTLY'] +2414-159411-0029-2682: hyp=['VERY', 'GOOD', 'SAID', 'TO', 'JACK', 'HOO', 'BUT', 'I', 'CANNOT', 'JUDGE', 'WITHOUT', 'UNDERSTANDING', 'THE', 'WHOLE', 'MATTER', 'EXACTLY'] +2414-159411-0030-2683: ref=['SHUT', 'AND', 'BOLTED', 'SAID', 'THE', 'BRAHMAN'] +2414-159411-0030-2683: hyp=['SHUT', 'AND', 'BOLTED', 'SAID', 'DEBRAMIN'] +2414-159411-0031-2684: ref=['THEN', 'SHUT', 'AND', 'BOLT', 'IT', 'SAID', 'THE', 'JACKAL'] +2414-159411-0031-2684: hyp=['THEN', 'SHET', 'AND', 'BOLD', 'IT', 'SAID', 'TO', 'JACK', 'HO'] +2414-159411-0032-2685: ref=['WHEN', 'THE', 'BRAHMAN', 'HAD', 'DONE', 'THIS', 'THE', 'JACKAL', 'SAID', 'OH', 'YOU', 'WICKED', 'AND', 'UNGRATEFUL', 'TIGER'] +2414-159411-0032-2685: hyp=['WHEN', 'THE', 'BRAHMAN', 'HAD', 'TURNED', 'THIS', 'THE', 'JACKAL', 'SAID', 'OH', 'YOU', 'WICKED', 'AND', 'UNGRATEFUL', 'TIRE'] +2414-159411-0033-2686: ref=['WHEN', 'THE', 'GOOD', 'BRAHMAN', 'OPENED', 'YOUR', 'CAGE', 'DOOR', 'IS', 'TO', 'EAT', 'HIM', 'THE', 'ONLY', 'RETURN', 'YOU', 'WOULD', 'MAKE'] +2414-159411-0033-2686: hyp=['WITH', 'A', 'GOOD', 'BRAM', 'IN', 'OPEN', 'YOU', 'CAME', 'TO', 'HER', 'IS', 'TO', 'EAT', 'HIM', 'THE', 'ONLY', 'RETURN', 'YOU', 'WOULD', 'MAKE'] +2414-159411-0034-2687: ref=['PROCEED', 'ON', 'YOUR', 'JOURNEY', 'FRIEND', 'BRAHMAN'] +2414-159411-0034-2687: hyp=['PROCEED', 'ON', 'YOUR', 'JOURNEY', 'FRIN'] +2414-159411-0035-2688: ref=['YOUR', 'ROAD', 'LIES', 'THAT', 'WAY', 'AND', 'MINE', 'THIS'] +2414-159411-0035-2688: hyp=['HE', 'RULED', 'LIVES', 'THAT', 'WAY', 'IN', 'MIND', 'THIS'] +2414-165385-0000-2651: ref=['THUS', 'ACCOMPLISHED', 'HE', 'EXCITED', 'THE', 'ADMIRATION', 'OF', 'EVERY', 'SILLY', 'COQUETTE', 'AND', 'THE', 'ENVY', 'OF', 'EVERY', 'FLUTTERING', 'COXCOMB', 'BUT', 'BY', 'ALL', 'YOUNG', 'GENTLEMEN', 'AND', 'LADIES', 'OF', 'UNDERSTANDING', 'HE', 'WAS', 'HEARTILY', 'DESPISED', 'AS', 'A', 'MERE', 'CIVILIZED', 'MONKEY'] +2414-165385-0000-2651: hyp=['THUS', 'ACCOMPLISHED', 'EXCITED', 'ADMIRATION', 'OF', 'EVERY', 'SILLY', 'COCKET', 'AND', 'THE', 'ENVY', 'OF', 'EVERY', 'FACTIVE', 'ACCOUNT', 'BUT', 'BY', 'ALL', 'YOUNG', 'GENTLEMEN', 'AND', 'LADIES', 'OF', 'UNDERSTANDING', 'HE', 'WAS', 'HEARTILY', 'DESPISED', 'AS', 'A', 'MERE', 'CIVILIZED', 'MONKEY'] +2414-165385-0001-2652: ref=['THAT', 'HIS', 'SOUL', 'MIGHT', 'AFTERWARDS', 'OCCUPY', 'SUCH', 'A', 'STATION', 'AS', 'WOULD', 'BE', 'MOST', 'SUITABLE', 'TO', 'HIS', 'CHARACTER', 'IT', 'WAS', 'SENTENCED', 'TO', 'INHABIT', 'THE', 'BODY', 'OF', 'THAT', 'FINICAL', 'GRINNING', 'AND', 'MISCHIEVOUS', 'LITTLE', 'MIMICK', 'WITH', 'FOUR', 'LEGS', 'WHICH', 'YOU', 'NOW', 'BEHOLD', 'BEFORE', 'YOU'] +2414-165385-0001-2652: hyp=['THAT', 'HIS', 'SOUL', 'MIGHT', 'AFTERWARDS', 'OCCUPY', 'SUCH', 'A', 'STATION', 'AS', 'WOULD', 'BE', 'MOST', 'SUITABLE', 'TO', 'HIS', 'CHARACTER', 'IT', 'WAS', 'INTENSE', 'TO', 'INHABIT', 'A', 'BODY', 'OF', 'THAT', 'FINNICAL', 'GRINNING', 'AND', 'MISCHIEVOUS', 'LITTLE', 'MIMIC', 'WITH', 'FULL', 'LEGS', 'WHICH', 'YOU', 'NOW', 'BEHOLD', 'BEFORE', 'YOU'] +2609-156975-0000-2367: ref=['THEN', 'MOSES', 'WAS', 'AFRAID', 'AND', 'SAID', 'SURELY', 'THE', 'THING', 'IS', 'KNOWN'] +2609-156975-0000-2367: hyp=['THEN', 'MOSES', 'WAS', 'AFRAID', 'AND', 'SAID', 'SURELY', 'THE', 'THING', 'IS', 'KNOWN'] +2609-156975-0001-2368: ref=['HOLD', 'ON', 'HOLD', 'FAST', 'HOLD', 'OUT', 'PATIENCE', 'IS', 'GENIUS'] +2609-156975-0001-2368: hyp=['ERON', 'HER', 'FATS', 'ODOU', 'PATENTS', 'HIS', 'GENIUS'] +2609-156975-0002-2369: ref=['LET', 'US', 'HAVE', 'FAITH', 'THAT', 'RIGHT', 'MAKES', 'MIGHT', 'AND', 'IN', 'THAT', 'FAITH', 'LET', 'US', 'DARE', 'TO', 'DO', 'OUR', 'DUTY', 'AS', 'WE', 'UNDERSTAND', 'IT', 'LINCOLN'] +2609-156975-0002-2369: hyp=['LET', 'US', 'HAVE', 'FAITH', 'THAT', 'RIGHT', 'MATRON', 'MIGHT', 'AND', 'IN', 'THAT', 'FAITH', 'LET', 'STARED', 'TO', 'DO', 'OUR', 'DUTY', 'AS', 'WE', 'UNDERSTAND', 'IT', 'LINCOLN'] +2609-156975-0003-2370: ref=['THE', 'EGYPTIAN', 'BACKGROUND', 'OF', 'THE', 'BONDAGE'] +2609-156975-0003-2370: hyp=['THE', 'EGYPTIAN', 'BACKGROUND', 'OF', 'THE', 'BONDAGE'] +2609-156975-0004-2371: ref=['EVERY', 'ONE', 'WHO', 'IS', 'TURBULENT', 'HAS', 'BEEN', 'FOUND', 'BY', 'KING', 'MERNEPTAH', 'THE', 'TESTIMONY', 'OF', 'THE', 'OLDEST', 'BIBLICAL', 'NARRATIVES', 'REGARDING', 'THE', 'SOJOURN', 'OF', 'THE', 'HEBREWS', 'IN', 'EGYPT', 'IS', 'ALSO', 'IN', 'PERFECT', 'ACCORD', 'WITH', 'THE', 'PICTURE', 'WHICH', 'THE', 'CONTEMPORARY', 'EGYPTIAN', 'INSCRIPTIONS', 'GIVE', 'OF', 'THE', 'PERIOD'] +2609-156975-0004-2371: hyp=['EVERY', 'ONE', 'WHOSE', 'TURBOT', 'HAS', 'BEEN', 'FOUND', 'BY', 'KING', 'MARNET', 'PATH', 'DETACHEMONY', 'AS', 'THE', 'OLDEST', 'BAPLICO', 'NARRATIVE', 'REGARDING', 'THE', 'SOJOURN', 'OF', 'THE', 'HEBREWS', 'IN', 'EGYPT', 'IS', 'ALSO', 'IMPERFECT', 'ACCORD', 'WITH', 'THE', 'PICTURE', 'WHICH', 'THE', 'CONTEMPORARY', 'EGYPTIAN', 'SCRIPTIONS', 'GIVE', 'THIS', 'PERIOD'] +2609-156975-0005-2372: ref=['THE', 'ABSENCE', 'OF', 'DETAILED', 'REFERENCE', 'TO', 'THE', 'HEBREWS', 'IS', 'THEREFORE', 'PERFECTLY', 'NATURAL'] +2609-156975-0005-2372: hyp=['THE', 'ABSENCE', 'OF', 'THE', 'DETAILED', 'REFERENCES', 'THE', 'HEBREWS', 'IS', 'THEREFORE', 'PERFECTLY', 'NATURAL'] +2609-156975-0006-2373: ref=['IT', 'SEEMS', 'PROBABLE', 'THAT', 'NOT', 'ALL', 'BUT', 'ONLY', 'PART', 'OF', 'THE', 'TRIBES', 'WHICH', 'ULTIMATELY', 'COALESCED', 'INTO', 'THE', 'HEBREW', 'NATION', 'FOUND', 'THEIR', 'WAY', 'TO', 'EGYPT'] +2609-156975-0006-2373: hyp=['IT', 'SEEMS', 'PROBABLE', 'THAT', 'NOT', 'ALL', 'BUT', 'ONLY', 'PART', 'OF', 'THAT', 'TRIBES', 'WHICH', 'ULTIMATE', 'COVETTES', 'INTO', 'THE', 'HEBREW', 'NATION', 'FOUND', 'THEIR', 'WAY', 'TO', 'EGYPT'] +2609-156975-0007-2374: ref=['THE', 'STORIES', 'REGARDING', 'JOSEPH', 'THE', 'TRADITIONAL', 'FATHER', 'OF', 'EPHRAIM', 'AND', 'MANASSEH', 'IMPLY', 'THAT', 'THESE', 'STRONG', 'CENTRAL', 'TRIBES', 'POSSIBLY', 'TOGETHER', 'WITH', 'THE', 'SOUTHERN', 'TRIBES', 'OF', 'BENJAMIN', 'AND', 'JUDAH', 'WERE', 'THE', 'CHIEF', 'ACTORS', 'IN', 'THIS', 'OPENING', 'SCENE', 'IN', "ISRAEL'S", 'HISTORY'] +2609-156975-0007-2374: hyp=['THE', 'STORIES', 'REGARDING', 'JOSEPH', 'THE', 'TRADITIONAL', 'FOUND', 'THEIR', 'ATRONE', 'AND', 'MANOT', 'SAY', 'INCLINE', 'THAT', 'THESE', 'STRONG', 'CENTRAL', 'TRIBES', 'POSSIBLY', 'TOGETHER', 'WITH', 'THE', 'SOUTHERN', 'TRINES', 'OF', 'BINTAMEN', 'AND', 'JUDAH', 'WHERE', 'THE', 'CHIEF', 'ACTORS', 'WHO', 'THAT', 'SOMETHING', 'SCENE', 'IN', "ISRAEL'S", 'HISTORY'] +2609-156975-0008-2375: ref=['THE', 'BIBLICAL', 'NARRATIVES', 'APPARENTLY', 'DISAGREE', 'REGARDING', 'THE', 'DURATION', 'OF', 'THE', 'SOJOURN', 'IN', 'EGYPT'] +2609-156975-0008-2375: hyp=['THE', 'BEVOCO', 'NARRATIVES', 'APPARENTLY', 'DISAGRATING', 'GUARDING', 'THE', 'DIRECTION', 'OF', 'THE', 'SAJOURN', 'IN', 'EGYPT'] +2609-156975-0009-2376: ref=['THE', 'LATER', 'TRADITIONS', 'TEND', 'TO', 'EXTEND', 'THE', 'PERIOD'] +2609-156975-0009-2376: hyp=['THE', 'LATER', 'JUDICINT', 'INTERESTING', 'THE', 'PERIOD'] +2609-156975-0010-2377: ref=['HERE', 'WERE', 'FOUND', 'SEVERAL', 'INSCRIPTIONS', 'BEARING', 'THE', 'EGYPTIAN', 'NAME', 'OF', 'THE', 'CITY', 'P', 'ATUM', 'HOUSE', 'OF', 'THE', 'GOD', 'ATUM'] +2609-156975-0010-2377: hyp=['YOU', 'WERE', 'FOUND', 'SHEVARIN', 'SCRIPTIONS', 'BEARING', 'THE', 'EGYPTIAN', 'NAME', 'OF', 'THE', 'CITY', 'PATUM', 'OUTSIDE', 'THE', 'GOD', 'ANTUM'] +2609-156975-0011-2378: ref=['A', 'CONTEMPORARY', 'INSCRIPTION', 'ALSO', 'STATES', 'THAT', 'HE', 'FOUNDED', 'NEAR', 'PITHUM', 'THE', 'HOUSE', 'OF', 'RAMSES', 'A', 'CITY', 'WITH', 'A', 'ROYAL', 'RESIDENCE', 'AND', 'TEMPLES'] +2609-156975-0011-2378: hyp=['A', 'CONTEMPORARY', 'INSCRIPTION', 'ONCE', 'ESTATES', 'THAT', 'HE', 'FOUND', 'A', 'NEAR', 'PITTHAM', 'THE', 'HOUSE', 'OF', 'RAMESES', 'A', 'CITY', 'WITH', 'THE', 'ROYAL', 'RESIDENCE', 'AND', 'SIMPLES'] +2609-156975-0012-2379: ref=['THAT', 'THE', 'HEBREWS', 'WERE', 'RESTIVE', 'UNDER', 'THIS', 'TYRANNY', 'WAS', 'NATURAL', 'INEVITABLE'] +2609-156975-0012-2379: hyp=['THAT', 'THE', 'HEBREWS', 'WERE', 'RENTS', 'OF', 'UNDER', 'THIS', 'SOON', 'WAS', 'NATURALLY', 'INEVITABLE'] +2609-156975-0013-2380: ref=['WAS', 'ANY', 'OTHER', 'PROCEDURE', 'TO', 'BE', 'EXPECTED', 'FROM', 'A', 'DESPOTIC', 'RULER', 'OF', 'THAT', 'LAND', 'AND', 'DAY'] +2609-156975-0013-2380: hyp=['WAS', 'ANY', 'OTHER', 'PROCEDURE', 'TO', 'BE', 'SPECTRE', 'FROM', 'IT', 'THAT', 'SPONNET', 'ROAR', 'OF', 'THAT', 'LAND', 'AND', 'DAY'] +2609-156975-0014-2381: ref=['THE', 'MAKING', 'OF', 'A', 'LOYAL', 'PATRIOT'] +2609-156975-0014-2381: hyp=['THE', 'MAKING', 'OF', 'THE', 'LOYAL', 'PATRIOT'] +2609-156975-0015-2382: ref=['THE', 'STORY', 'OF', 'MOSES', 'BIRTH', 'AND', 'EARLY', 'CHILDHOOD', 'IS', 'ONE', 'OF', 'THE', 'MOST', 'INTERESTING', 'CHAPTERS', 'IN', 'BIBLICAL', 'HISTORY'] +2609-156975-0015-2382: hyp=['THE', 'STORY', 'OF', 'MOSES', 'BERTH', 'AN', 'EARLY', 'CHILDHOOD', 'IS', 'ONE', 'OF', 'THE', 'MOST', 'INTERESTING', 'CHAPTERS', 'IN', 'BEPPOCO', 'HISTORY'] +2609-156975-0016-2383: ref=['WAS', 'MOSES', 'JUSTIFIED', 'IN', 'RESISTING', 'THE', 'EGYPTIAN', 'TASKMASTER'] +2609-156975-0016-2383: hyp=['WISE', 'MOVES', "IT'S", 'JUST', 'FUN', 'AND', 'RESISTS', 'IN', 'THE', 'EGYPTIAN', 'TAX', 'MASTER'] +2609-156975-0017-2384: ref=['IS', 'PEONAGE', 'ALWAYS', 'DISASTROUS', 'NOT', 'ONLY', 'TO', 'ITS', 'VICTIMS', 'BUT', 'ALSO', 'TO', 'THE', 'GOVERNMENT', 'IMPOSING', 'IT'] +2609-156975-0017-2384: hyp=['HIS', 'PINION', 'ALWAYS', 'DISASTRATES', 'NOT', 'OWING', 'TO', 'ITS', 'VICTIMS', 'BUT', 'ALSO', 'TO', 'THE', 'GOVERNMENT', 'IMPOSING', 'IT'] +2609-156975-0018-2385: ref=['NATURALLY', 'HE', 'WENT', 'TO', 'THE', 'LAND', 'OF', 'MIDIAN'] +2609-156975-0018-2385: hyp=['NATURALLY', 'HE', 'WENT', 'TO', 'THE', 'LAND', 'OMIDIAN'] +2609-156975-0019-2386: ref=['THE', 'WILDERNESS', 'TO', 'THE', 'EAST', 'OF', 'EGYPT', 'HAD', 'FOR', 'CENTURIES', 'BEEN', 'THE', 'PLACE', 'OF', 'REFUGE', 'FOR', 'EGYPTIAN', 'FUGITIVES'] +2609-156975-0019-2386: hyp=['THE', 'WILDERNESS', 'TO', 'THE', 'EAST', 'OF', 'EGYPT', 'AND', 'FOR', 'CENTURIES', 'BEEN', 'THE', 'PLATE', 'OF', 'REFUGERY', 'EGYPTIAN', 'FUGITIVE'] +2609-156975-0020-2387: ref=['FROM', 'ABOUT', 'TWO', 'THOUSAND', 'B', 'C'] +2609-156975-0020-2387: hyp=['FROM', 'A', 'BOUT', 'TWO', 'THOUSAND', 'C'] +2609-156975-0021-2388: ref=['ON', 'THE', 'BORDERS', 'OF', 'THE', 'WILDERNESS', 'HE', 'FOUND', 'CERTAIN', 'BEDOUIN', 'HERDSMEN', 'WHO', 'RECEIVED', 'HIM', 'HOSPITABLY'] +2609-156975-0021-2388: hyp=['ON', 'THE', 'BORDERS', 'OF', 'THE', 'WIDERNESS', 'HE', 'FOUND', 'CERTAIN', 'BEDOING', 'HERDSMEN', 'WHO', 'RECEIVED', 'HIM', 'HOW', 'SPEEDABLY'] +2609-156975-0022-2389: ref=['THESE', 'SAND', 'WANDERERS', 'SENT', 'HIM', 'ON', 'FROM', 'TRIBE', 'TO', 'TRIBE', 'UNTIL', 'HE', 'REACHED', 'THE', 'LAND', 'OF', 'KEDEM', 'EAST', 'OF', 'THE', 'DEAD', 'SEA', 'WHERE', 'HE', 'REMAINED', 'FOR', 'A', 'YEAR', 'AND', 'A', 'HALF'] +2609-156975-0022-2389: hyp=['THESE', 'SEND', 'WONDERS', 'SENT', 'HIM', 'ON', 'FROM', 'TIME', 'TO', 'TRIUMPH', 'INTO', 'A', 'REACH', 'THE', 'LAND', 'OF', 'KIEDAM', 'EACH', 'OF', 'THE', 'DEAD', 'SEA', 'WHERE', 'HE', 'REMAINED', 'FOR', 'A', 'YEAR', 'AND', 'A', 'HALF'] +2609-156975-0023-2390: ref=['LATER', 'HE', 'FOUND', 'HIS', 'WAY', 'TO', 'THE', 'COURT', 'OF', 'ONE', 'OF', 'THE', 'LOCAL', 'KINGS', 'IN', 'CENTRAL', 'PALESTINE', 'WHERE', 'HE', 'MARRIED', 'AND', 'BECAME', 'IN', 'TIME', 'A', 'PROSPEROUS', 'LOCAL', 'PRINCE'] +2609-156975-0023-2390: hyp=['LATER', 'HE', 'FOUND', 'HIS', 'WAY', 'TO', 'THE', 'COURT', 'OF', 'ONE', 'OF', 'THE', 'LOCAL', 'KINGS', 'AND', 'CENTRAL', 'PALASTEIN', 'WHERE', 'HE', 'MARRIED', 'AND', 'BECAME', 'IN', 'THE', 'TIME', 'A', 'PROSPEROUS', 'LOCAL', 'PRINCE'] +2609-156975-0024-2391: ref=['THE', 'SCHOOL', 'OF', 'THE', 'WILDERNESS'] +2609-156975-0024-2391: hyp=['THE', 'SCHOOL', 'AND', 'THE', 'WEARINESS'] +2609-156975-0025-2392: ref=['THE', 'STORY', 'OF', 'MOSES', 'IS', 'IN', 'MANY', 'WAYS', 'CLOSELY', 'PARALLEL', 'TO', 'THAT', 'OF', 'SINUHIT'] +2609-156975-0025-2392: hyp=['THE', 'STORY', 'MOSES', 'IS', 'IN', 'MANY', 'WAYS', 'CLOSELY', 'PARALLEL', 'TO', 'THAT', 'AS', 'SOON', 'WIT'] +2609-156975-0026-2393: ref=['THE', 'PRIEST', 'OF', 'THE', 'SUB', 'TRIBE', 'OF', 'THE', 'KENITES', 'RECEIVED', 'HIM', 'INTO', 'HIS', 'HOME', 'AND', 'GAVE', 'HIM', 'HIS', 'DAUGHTER', 'IN', 'MARRIAGE'] +2609-156975-0026-2393: hyp=['THE', 'PRIEST', 'OF', 'THE', 'SUBTERRAB', 'OF', 'THE', 'KANITE', 'RECEIVED', 'HIM', 'INTO', 'HIS', 'HOME', 'AND', 'GAVE', 'HIM', 'HIS', 'DAUGHTER', 'IN', 'MARRIAGE'] +2609-156975-0027-2394: ref=['NOTE', 'THE', 'CHARACTERISTIC', 'ORIENTAL', 'IDEA', 'OF', 'MARRIAGE'] +2609-156975-0027-2394: hyp=['NOTE', 'THE', 'CARE', 'OF', 'RIDICT', 'ORIENTAL', 'AND', 'GIVE', "MARY'S"] +2609-156975-0028-2395: ref=['HERE', 'MOSES', 'LEARNED', 'THE', 'LESSONS', 'THAT', 'WERE', 'ESSENTIAL', 'FOR', 'HIS', 'TRAINING', 'AS', 'THE', 'LEADER', 'AND', 'DELIVERER', 'OF', 'HIS', 'PEOPLE'] +2609-156975-0028-2395: hyp=['HERE', 'MOSES', 'LEARNS', 'THAT', 'LESSONS', 'THAT', 'WERE', 'ESSENTIAL', 'FOR', 'HIS', 'TRAINING', 'AS', 'A', 'LEADER', 'AND', 'DELIVER', 'OF', 'HIS', 'PEOPLE'] +2609-156975-0029-2396: ref=['AFTER', 'THE', 'CAPTURE', 'OF', 'JERICHO', 'CERTAIN', 'OF', 'THEM', 'WENT', 'UP', 'WITH', 'THE', 'SOUTHERN', 'TRIBES', 'TO', 'CONQUER', 'SOUTHERN', 'PALESTINE'] +2609-156975-0029-2396: hyp=['ANSWERED', 'THE', 'CAPTURE', 'OF', 'JERICHO', 'CERTAIN', 'OF', 'THEM', 'WENT', 'UP', 'WITH', 'A', 'SUDDEN', 'TRIUMPHS', 'WHO', 'CONCUR', 'SOUTHERN', 'PALESTINE'] +2609-156975-0030-2397: ref=['MANY', 'MODERN', 'SCHOLARS', 'DRAW', 'THE', 'CONCLUSION', 'FROM', 'THE', 'BIBLICAL', 'NARRATIVE', 'THAT', 'IT', 'WAS', 'FROM', 'THE', 'KENITES', 'THAT', 'MOSES', 'FIRST', 'LEARNED', 'OF', 'YAHWEH', 'OR', 'AS', 'THE', 'DISTINCTIVE', 'NAME', 'OF', "ISRAEL'S", 'GOD', 'WAS', 'TRANSLATED', 'BY', 'LATER', 'JEWISH', 'SCRIBES', 'JEHOVAH'] +2609-156975-0030-2397: hyp=['MANY', 'MODERN', 'SCHOLARS', 'DRAWING', 'THE', 'CONCLUSION', 'FROM', 'THE', 'BIBBICAL', 'NARRATIVE', 'THAT', 'IT', 'WAS', 'FROM', 'THE', 'KENITE', 'SNAT', 'MOSES', 'FIRST', 'LEARNED', 'OF', 'YANAWAY', 'OR', 'AS', 'THE', 'DISTINCTIVE', 'NAME', 'OF', "ISRAEL'S", 'GONE', 'WAS', 'TRANSGRATED', 'BY', 'LATER', 'TO', 'ITS', 'GRIMES', 'JEHOVAH'] +2609-156975-0031-2398: ref=['DO', 'THE', 'EARLIEST', 'HEBREW', 'TRADITIONS', 'IMPLY', 'THAT', 'THE', 'ANCESTORS', 'OF', 'THE', 'ISRAELITES', 'WERE', 'WORSHIPPERS', 'OF', 'JEHOVAH'] +2609-156975-0031-2398: hyp=['DO', 'THE', 'OIETY', 'BERTRADIZANCE', 'IMPLY', 'THAT', 'INCES', 'OF', 'THE', 'ISRAITS', 'WERE', 'WORSE', 'SUPPOSED', 'OF', 'JEHOVAH'] +2609-156975-0032-2399: ref=['THE', 'TITLE', 'OF', 'HIS', 'FATHER', 'IN', 'LAW', 'IMPLIES', 'THAT', 'THIS', 'PRIEST', 'MINISTERED', 'AT', 'SOME', 'WILDERNESS', 'SANCTUARY'] +2609-156975-0032-2399: hyp=['THE', 'TANOV', 'IS', 'FUND', 'DE', 'MAU', 'IMPLIES', 'AT', 'THIS', 'PRIEST', 'MINISTER', 'AT', 'SOME', 'LITERN', 'SANCTUARY'] +2609-156975-0033-2400: ref=['MOSES', 'IN', 'THE', 'HOME', 'OF', 'THE', 'MIDIAN', 'PRIEST', 'WAS', 'BROUGHT', 'INTO', 'DIRECT', 'AND', 'CONSTANT', 'CONTACT', 'WITH', 'THE', 'JEHOVAH', 'WORSHIP'] +2609-156975-0033-2400: hyp=['MOSES', 'IN', 'THE', 'HOME', 'OF', 'THE', 'MENDIAN', 'PRIEST', 'WAS', 'BROUGHT', 'INTO', 'DIRECT', 'AND', 'CONSTANT', 'CONTACT', 'WITH', 'THE', 'JEHOVAH', 'WORSHIP'] +2609-156975-0034-2401: ref=['THE', 'CRUEL', 'FATE', 'OF', 'HIS', 'PEOPLE', 'AND', 'THE', 'PAINFUL', 'EXPERIENCE', 'IN', 'EGYPT', 'THAT', 'HAD', 'DRIVEN', 'HIM', 'INTO', 'THE', 'WILDERNESS', 'PREPARED', 'HIS', 'MIND', 'TO', 'RECEIVE', 'THIS', 'TRAINING'] +2609-156975-0034-2401: hyp=['THE', 'CRUEL', 'FATE', 'OF', 'HIS', 'PEOPLE', 'IN', 'THE', 'PAINFUL', 'EXPERIENCE', 'IN', 'EGYPT', 'THAT', 'HAD', 'DRIVEN', 'HIM', 'INTO', 'THE', 'WILDERNESS', 'PREPARED', 'HIS', 'MIND', 'TO', 'RECEIVE', 'THIS', 'TRAINING'] +2609-156975-0035-2402: ref=['HIS', 'QUEST', 'WAS', 'FOR', 'A', 'JUST', 'AND', 'STRONG', 'GOD', 'ABLE', 'TO', 'DELIVER', 'THE', 'OPPRESSED'] +2609-156975-0035-2402: hyp=['HIS', 'FRENCH', 'WAS', 'FOR', 'JETS', 'AND', 'STRONG', 'GOD', 'ABLE', 'TO', 'DRIVER', 'THE', 'OPPRESSED'] +2609-156975-0036-2403: ref=['THE', 'WILDERNESS', 'WITH', 'ITS', 'LURKING', 'FOES', 'AND', 'THE', 'EVER', 'PRESENT', 'DREAD', 'OF', 'HUNGER', 'AND', 'THIRST', 'DEEPENED', 'HIS', 'SENSE', 'OF', 'NEED', 'AND', 'OF', 'DEPENDENCE', 'UPON', 'A', 'POWER', 'ABLE', 'TO', 'GUIDE', 'THE', 'DESTINIES', 'OF', 'MEN'] +2609-156975-0036-2403: hyp=['THE', 'WIDENANCE', 'WITH', 'ITS', 'LURKING', 'FOES', 'AND', 'THE', 'EVER', 'PRESENT', 'DREAD', 'OF', 'HUNGER', 'AND', 'THIRST', 'DEEP', 'IN', 'DESCENTS', 'OF', 'NEED', 'AND', 'OF', 'DEPENDENCE', 'UPON', 'THE', 'POWER', 'ABLE', 'TO', 'GOD', 'THE', "DEBT'S", 'NEEDS', 'OF', 'MEN'] +2609-156975-0037-2404: ref=['THE', 'PEASANTS', 'OF', 'THE', 'VAST', 'ANTOLIAN', 'PLAIN', 'IN', 'CENTRAL', 'ASIA', 'MINOR', 'STILL', 'CALL', 'EVERY', 'LIFE', 'GIVING', 'SPRING', 'GOD', 'HATH', 'GIVEN'] +2609-156975-0037-2404: hyp=['THE', 'PEASANTS', 'OF', 'THE', 'VATS', 'IN', 'TOWING', 'IN', 'PLAIN', 'OF', 'CENTRAL', 'AS', 'A', 'MINOR', 'SO', 'WILL', 'CALL', 'EVERY', 'LIFE', 'GIVEN', 'SPRING', 'GOD', 'HATH', 'GIVEN'] +2609-156975-0038-2405: ref=['THE', 'CONSTANT', 'NECESSITY', 'OF', 'MEETING', 'THE', 'DANGERS', 'OF', 'THE', 'WILDERNESS', 'AND', 'OF', 'DEFENDING', 'THE', 'FLOCKS', 'ENTRUSTED', 'TO', 'MOSES', 'CARE', 'DEVELOPED', 'HIS', 'COURAGE', 'AND', 'POWER', 'OF', 'LEADERSHIP', 'AND', 'ACTION'] +2609-156975-0038-2405: hyp=['THEY', "CAN'T", 'SENT', 'THE', 'NECESSITY', 'A', 'MEETING', 'THE', 'DANGERS', 'OF', 'THE', 'WIDERNESS', 'AND', 'THE', 'DEFENDING', 'THE', 'FLAUNT', 'AND', "TRITESYMOSA'S", 'CARE', 'DEVELOPED', 'HIS', 'COURAGE', 'AND', 'POWER', 'OF', 'LEGERSHIP', 'AND', 'ACTION'] +2609-157645-0000-2352: ref=['EVIDENTLY', 'THE', 'INTENTION', 'WAS', 'TO', 'MAKE', 'THINGS', 'PLEASANT', 'FOR', 'THE', 'ROYAL', 'FOE', 'OF', 'TOBACCO', 'DURING', 'HIS', 'VISIT'] +2609-157645-0000-2352: hyp=['EVIDENTLY', 'THE', 'INTENTION', 'WHICH', 'MADE', 'THINGS', 'PRESENT', 'FOR', 'THE', 'ROYAL', 'FOLK', 'TOBACCO', 'DURING', 'HIS', 'VISIT'] +2609-157645-0001-2353: ref=['THE', 'PROHIBITION', 'IN', 'THE', 'REGULATION', 'QUOTED', 'OF', 'SMOKING', 'IN', 'SAINT', "MARY'S", 'CHURCH', 'REFERRED', 'IT', 'MAY', 'BE', 'NOTED', 'TO', 'THE', 'ACT', 'WHICH', 'WAS', 'HELD', 'THEREIN'] +2609-157645-0001-2353: hyp=['THE', "PROB'S", 'THE', 'REGULATING', 'QUOTED', 'HER', 'SMOKING', 'AND', 'SAINT', "MARY'S", 'CHURCH', 'REFERRED', 'IT', 'MAY', 'BE', 'NOTED', 'TO', 'THE', 'ACT', 'WHICH', 'WAS', 'HELD', 'THEREIN'] +2609-157645-0002-2354: ref=['SOMETIMES', 'TOBACCO', 'WAS', 'USED', 'IN', 'CHURCH', 'FOR', 'DISINFECTING', 'OR', 'DEODORIZING', 'PURPOSES'] +2609-157645-0002-2354: hyp=['SOMETIMES', 'TOBACCO', 'IS', 'USED', 'IN', 'CHURCH', 'FOR', 'DISINFACT', 'AND', 'ORDEALIZING', 'PURPOSES'] +2609-157645-0003-2355: ref=['BLACKBURN', 'ARCHBISHOP', 'OF', 'YORK', 'WAS', 'A', 'GREAT', 'SMOKER'] +2609-157645-0003-2355: hyp=['BRACKBURN', 'ARCHBISHOP', 'OF', 'YORK', 'WAS', 'A', 'GREAT', 'SMOKER'] +2609-157645-0004-2356: ref=['ON', 'ONE', 'OCCASION', 'HE', 'WAS', 'AT', 'SAINT', "MARY'S", 'CHURCH', 'NOTTINGHAM', 'FOR', 'A', 'CONFIRMATION'] +2609-157645-0004-2356: hyp=['ON', 'ONE', 'OCCASION', 'HE', 'WAS', 'AT', 'SAINT', "MARY'S", 'CHURCH', 'NINETEEN', 'HAM', 'FOR', 'A', 'CONFIRMATON'] +2609-157645-0005-2357: ref=['ANOTHER', 'EIGHTEENTH', 'CENTURY', 'CLERICAL', 'WORTHY', 'THE', 'FAMOUS', 'DOCTOR', 'PARR', 'AN', 'INVETERATE', 'SMOKER', 'WAS', 'ACCUSTOMED', 'TO', 'DO', 'WHAT', 'MISTER', 'DISNEY', 'PREVENTED', 'ARCHBISHOP', 'BLACKBURN', 'FROM', 'DOING', 'HE', 'SMOKED', 'IN', 'HIS', 'VESTRY', 'AT', 'HATTON'] +2609-157645-0005-2357: hyp=['ANOTHER', 'EIGHTEENTH', 'CENTURY', 'CLERICAL', 'WORTHY', 'THE', 'FAMOUS', 'DOCTRIPOS', 'AN', 'INVETERATE', 'SMOKER', 'WAS', 'ACCUSTOMED', 'TO', 'DO', 'AT', 'MIDSR', 'DYSNEY', 'PREVENTED', 'ARCHBISH', 'AT', 'BLACKBURN', 'FROM', 'DOING', 'HE', 'SMOKED', 'IN', 'HIS', 'VETCHERY', 'AT', 'HATTON'] +2609-157645-0006-2358: ref=['PARR', 'WAS', 'SUCH', 'A', 'CONTINUAL', 'SMOKER', 'THAT', 'ANYONE', 'WHO', 'CAME', 'INTO', 'HIS', 'COMPANY', 'IF', 'HE', 'HAD', 'NEVER', 'SMOKED', 'BEFORE', 'HAD', 'TO', 'LEARN', 'THE', 'USE', 'OF', 'A', 'PIPE', 'AS', 'A', 'MEANS', 'OF', 'SELF', 'DEFENCE'] +2609-157645-0006-2358: hyp=['POOR', 'WAS', 'SUCH', 'A', 'CONTINUOUS', 'MOKER', 'THAT', 'ANY', 'ONE', 'WHO', 'CAME', 'INTO', 'HIS', 'COMPANY', 'FEET', 'HAD', 'NEVER', 'SMOKED', 'BEFORE', 'AND', 'TO', 'LEARNED', 'THE', 'USE', 'OF', 'A', 'PIPE', 'AS', 'A', 'MEANS', 'OF', 'SELF', 'DEFENCE'] +2609-157645-0007-2359: ref=['ONE', 'SUNDAY', 'SAYS', 'MISTER', 'DITCHFIELD', 'HE', 'HAD', 'AN', 'EXTRA', 'PIPE', 'AND', 'JOSHUA', 'THE', 'CLERK', 'TOLD', 'HIM', 'THAT', 'THE', 'PEOPLE', 'WERE', 'GETTING', 'IMPATIENT'] +2609-157645-0007-2359: hyp=['ONE', 'SUNDAY', 'SAYS', 'MISTER', 'DIXFIELD', 'HE', 'HAD', 'IN', 'THAT', 'SIR', 'PIPE', 'AND', 'JAUNTS', 'HER', 'THE', 'CLERK', 'TOLD', 'HIM', 'THAT', 'THE', 'PEOPLE', 'WERE', 'GETTING', 'THEM', 'IMPATIENT'] +2609-157645-0008-2360: ref=['LET', 'THEM', 'SING', 'ANOTHER', 'PSALM', 'SAID', 'THE', 'CURATE'] +2609-157645-0008-2360: hyp=['THEM', 'TO', 'THEM', 'SING', 'AND', 'NEITHER', 'PSALMS', 'SAY', 'THAT', 'THE', 'CURATE'] +2609-157645-0009-2361: ref=['THEY', 'HAVE', 'SIR', 'REPLIED', 'THE', 'CLERK'] +2609-157645-0009-2361: hyp=['THEY', 'HAVE', 'SIR', 'REPLIED', 'THE', 'CLERK'] +2609-157645-0010-2362: ref=['THEN', 'LET', 'THEM', 'SING', 'THE', 'HUNDRED', 'AND', 'NINETEENTH', 'REPLIED', 'THE', 'CURATE'] +2609-157645-0010-2362: hyp=['THEN', 'LET', 'THEM', 'SING', 'THE', 'HUNDRED', 'AND', 'NINETEENTH', 'REPLIED', 'THE', 'CURATE'] +2609-157645-0011-2363: ref=['SIX', 'ARMS', 'THE', 'NEAREST', 'WITHIN', 'REACH', 'PRESENTED', 'WITH', 'AN', 'OBEDIENT', 'START', 'AS', 'MANY', 'TOBACCO', 'POUCHES', 'TO', 'THE', 'MAN', 'OF', 'OFFICE'] +2609-157645-0011-2363: hyp=['SIX', 'ARMS', 'THE', 'NURSE', 'WITHIN', 'REACH', 'PRESENTED', 'WITH', 'AN', 'OBEDIENT', 'START', 'AND', 'AS', 'MANY', 'TOBACCO', 'PIUCES', 'TO', 'THE', 'MAN', 'OF', 'OFFICE'] +2609-157645-0012-2364: ref=['DAVID', 'DEANS', 'HOWEVER', 'DID', 'NOT', 'AT', 'ALL', 'APPROVE', 'THIS', 'IRREVERENCE'] +2609-157645-0012-2364: hyp=['DAVID', 'DEAN', 'SAMUR', 'DID', 'NOT', 'AT', 'ALL', 'IMPROVE', 'THIS', 'IRREVERENCE'] +2609-157645-0013-2365: ref=['GOING', 'TO', 'CHURCH', 'AT', 'HAYES', 'IN', 'THOSE', 'DAYS', 'MUST', 'HAVE', 'BEEN', 'QUITE', 'AN', 'EXCITING', 'EXPERIENCE'] +2609-157645-0013-2365: hyp=['GO', 'INTO', 'CHURCH', 'AUNT', 'HAZE', 'AND', 'THUS', 'THE', 'DAYS', 'MISS', "I'VE", 'BEEN', 'ACQUAINT', 'AN', 'THESE', 'SIGNING', 'INSPIRANTS'] +2609-157645-0014-2366: ref=['WHEN', 'THESE', 'MEN', 'IN', 'THE', 'COURSE', 'OF', 'MY', 'REMONSTRANCE', 'FOUND', 'THAT', 'I', 'WAS', 'NOT', 'GOING', 'TO', 'CONTINUE', 'THE', 'CUSTOM', 'THEY', 'NO', 'LONGER', 'CARED', 'TO', 'BE', 'COMMUNICANTS'] +2609-157645-0014-2366: hyp=['WHEN', 'THESE', 'MEN', 'AND', 'THE', 'COURTS', 'OF', 'MY', 'REMONSTRANCE', 'FOUND', 'OUT', 'THAT', 'WAS', 'NOT', 'GOING', 'TO', 'CONTINUE', 'THE', 'CUSTOM', 'THEY', 'NO', 'LONGER', 'CARED', 'TO', 'BE', 'COMMUNICANTS'] +2609-169640-0000-2406: ref=['PROAS', 'IN', 'THAT', 'QUARTER', 'WERE', 'USUALLY', 'DISTRUSTED', 'BY', 'SHIPS', 'IT', 'IS', 'TRUE', 'BUT', 'THE', 'SEA', 'IS', 'FULL', 'OF', 'THEM', 'AND', 'FAR', 'MORE', 'ARE', 'INNOCENT', 'THAN', 'ARE', 'GUILTY', 'OF', 'ANY', 'ACTS', 'OF', 'VIOLENCE'] +2609-169640-0000-2406: hyp=['PRATS', 'IN', 'THAT', 'QUARTER', 'WERE', 'USUALLY', 'DISTRUDGED', 'BY', 'THE', 'STIPS', 'AT', 'IS', 'TRUE', 'BUT', 'THE', 'SEA', 'FLORID', 'THEM', 'FOR', 'MORE', 'OR', 'INNOCENT', 'THAN', 'OUR', 'GUILTY', 'OF', 'ANY', 'ACT', 'OF', 'ONLENETS'] +2609-169640-0001-2407: ref=['AN', 'HOUR', 'AFTER', 'THE', 'SUN', 'HAD', 'SET', 'THE', 'WIND', 'FELL', 'TO', 'A', 'LIGHT', 'AIR', 'THAT', 'JUST', 'KEPT', 'STEERAGE', 'WAY', 'ON', 'THE', 'SHIP'] +2609-169640-0001-2407: hyp=['NOW', 'I', 'AFTER', 'THE', 'SUN', 'HAD', 'SET', 'THE', 'WIND', 'FELL', 'TO', 'AN', 'LIGHT', 'AIR', 'DID', 'ITS', 'KEPT', 'STEERING', 'ON', 'THE', 'SHIP'] +2609-169640-0002-2408: ref=['FORTUNATELY', 'THE', 'JOHN', 'WAS', 'NOT', 'ONLY', 'FAST', 'BUT', 'SHE', 'MINDED', 'HER', 'HELM', 'AS', 'A', 'LIGHT', 'FOOTED', 'GIRL', 'TURNS', 'IN', 'A', 'LIVELY', 'DANCE'] +2609-169640-0002-2408: hyp=['FORTUNATELY', 'THE', 'JOHN', 'WAS', 'NOT', 'ONLY', 'FAST', 'BUT', 'SEA', 'MINDED', 'HER', 'HAIL', 'AS', 'THE', 'LIGHT', 'FOOTED', 'GIRL', 'TURNED', 'IN', 'A', 'LOVELY', 'DANCE'] +2609-169640-0003-2409: ref=['I', 'NEVER', 'WAS', 'IN', 'A', 'BETTER', 'STEERING', 'SHIP', 'MOST', 'ESPECIALLY', 'IN', 'MODERATE', 'WEATHER'] +2609-169640-0003-2409: hyp=['AND', 'EVER', 'WAS', 'IN', 'A', 'BETTER', 'STEERING', 'SHIP', 'POSSES', 'SPENT', 'FOR', 'AND', 'MONDER', 'IT', 'WEATHER'] +2609-169640-0004-2410: ref=['MISTER', 'MARBLE', 'HE', 'I', 'DO', 'BELIEVE', 'WAS', 'FAIRLY', 'SNOOZING', 'ON', 'THE', 'HEN', 'COOPS', 'BEING', 'LIKE', 'THE', 'SAILS', 'AS', 'ONE', 'MIGHT', 'SAY', 'BARELY', 'ASLEEP'] +2609-169640-0004-2410: hyp=['MISTER', 'MARBLE', 'HE', 'OUGHT', 'TO', 'BELIEVE', 'WAS', "FAIRLY'S", 'NEWSING', 'ON', 'THE', 'HINCOUPS', 'BEING', 'LIKE', 'THE', 'SAILORS', 'AS', 'ONE', 'MIGHT', 'SAY', 'VARIOUSLY'] +2609-169640-0005-2411: ref=['AT', 'THAT', 'MOMENT', 'I', 'HEARD', 'A', 'NOISE', 'ONE', 'FAMILIAR', 'TO', 'SEAMEN', 'THAT', 'OF', 'AN', 'OAR', 'FALLING', 'IN', 'A', 'BOAT'] +2609-169640-0005-2411: hyp=['AT', 'THAT', 'MOMENT', 'I', 'HAD', 'A', 'NOISE', 'WHEN', 'FAMILIAR', 'TO', 'SEE', 'MEN', 'THAT', 'OF', 'AN', 'OAR', 'FOLLOWING', 'IN', 'THE', 'BOAT'] +2609-169640-0006-2412: ref=['I', 'SANG', 'OUT', 'SAIL', 'HO', 'AND', 'CLOSE', 'ABOARD'] +2609-169640-0006-2412: hyp=['AS', 'I', 'YET', 'SAIL', 'HO', 'AND', 'CLOSER', 'BROAD'] +2609-169640-0007-2413: ref=['HE', 'WAS', 'TOO', 'MUCH', 'OF', 'A', 'SEAMAN', 'TO', 'REQUIRE', 'A', 'SECOND', 'LOOK', 'IN', 'ORDER', 'TO', 'ASCERTAIN', 'WHAT', 'WAS', 'TO', 'BE', 'DONE'] +2609-169640-0007-2413: hyp=['HE', 'WAS', 'CHIMNETS', 'OF', 'A', 'SEAMAN', 'TO', 'REQUIRE', 'A', 'SECOND', 'LOOK', 'IN', 'ORDER', 'TO', 'ASSERT', 'BUT', 'WAS', 'TO', 'BE', 'DONE'] +2609-169640-0008-2414: ref=['ALTHOUGH', 'THEY', 'WENT', 'THREE', 'FEET', 'TO', 'OUR', 'TWO', 'THIS', 'GAVE', 'US', 'A', 'MOMENT', 'OF', 'BREATHING', 'TIME'] +2609-169640-0008-2414: hyp=['ON', 'THOSE', 'THEY', 'WENT', 'THREE', 'FEET', 'TO', 'OUR', 'TWO', 'THIS', 'GAVE', 'UP', 'SOME', 'MOMENT', 'OF', 'BREASING', 'TIME'] +2609-169640-0009-2415: ref=['AS', 'OUR', 'SHEETS', 'WERE', 'ALL', 'FLYING', 'FORWARD', 'AND', 'REMAINED', 'SO', 'FOR', 'A', 'FEW', 'MINUTES', 'IT', 'GAVE', 'ME', 'LEISURE', 'TO', 'LOOK', 'ABOUT'] +2609-169640-0009-2415: hyp=['AS', 'OUR', 'SEATS', 'WERE', 'OFF', 'LYING', 'FORWARD', 'AND', 'REMAINED', 'SO', 'FOR', 'A', 'FEW', 'MINUTES', 'IT', 'GAVE', 'ME', 'A', 'LEISURE', 'TO', 'LOOK', 'ABOUT'] +2609-169640-0010-2416: ref=['I', 'SOON', 'SAW', 'BOTH', 'PROAS', 'AND', 'GLAD', 'ENOUGH', 'WAS', 'I', 'TO', 'PERCEIVE', 'THAT', 'THEY', 'HAD', 'NOT', 'APPROACHED', 'MATERIALLY', 'NEARER'] +2609-169640-0010-2416: hyp=['I', 'SOON', 'SAW', 'BOTH', 'PROPS', 'IN', 'GRINDING', 'UP', 'WAS', 'I', 'TO', 'PERCEIVE', 'THAT', 'THEY', 'HAD', 'NOT', 'APPROACHED', 'MATERIALLY', 'NEAR'] +2609-169640-0011-2417: ref=['MISTER', 'KITE', 'OBSERVED', 'THIS', 'ALSO', 'AND', 'REMARKED', 'THAT', 'OUR', 'MOVEMENTS', 'HAD', 'BEEN', 'SO', 'PROMPT', 'AS', 'TO', 'TAKE', 'THE', 'RASCALS', 'ABACK'] +2609-169640-0011-2417: hyp=['MICHIG', 'ALSO', 'IN', 'REMARK', 'THAT', 'OUR', 'MOVEMENTS', 'HAD', 'BEEN', 'SO', 'PROMPT', 'AS', 'TO', 'TAKE', 'THE', 'RASCUOUS', 'ABACK'] +2609-169640-0012-2418: ref=['A', 'BREATHLESS', 'STILLNESS', 'SUCCEEDED'] +2609-169640-0012-2418: hyp=['A', 'BRENT', 'WITCH', 'STILLNESS', 'SUCCEEDED'] +2609-169640-0013-2419: ref=['THE', 'PROAS', 'DID', 'NOT', 'ALTER', 'THEIR', 'COURSE', 'BUT', 'NEARED', 'US', 'FAST'] +2609-169640-0013-2419: hyp=['THE', 'POETS', 'DID', 'NOT', 'ENTER', 'THE', 'COURSE', 'BUT', 'NEARED', 'ITS', 'FAST'] +2609-169640-0014-2420: ref=['I', 'HEARD', 'THE', 'RATTLING', 'OF', 'THE', 'BOARDING', 'PIKES', 'TOO', 'AS', 'THEY', 'WERE', 'CUT', 'ADRIFT', 'FROM', 'THE', 'SPANKER', 'BOOM', 'AND', 'FELL', 'UPON', 'THE', 'DECKS'] +2609-169640-0014-2420: hyp=['I', 'HEARD', 'THE', 'RIDING', 'OF', 'THE', 'BOARDING', 'PINES', 'TO', 'AS', 'THEY', 'WERE', 'CUT', 'ADRIFT', 'FROM', 'THE', 'SPANKER', 'BOOM', 'AND', 'FELL', 'UPON', 'THE', 'DECKS'] +2609-169640-0015-2421: ref=['KITE', 'WENT', 'AFT', 'AND', 'RETURNED', 'WITH', 'THREE', 'OR', 'FOUR', 'MUSKETS', 'AND', 'AS', 'MANY', 'PIKES'] +2609-169640-0015-2421: hyp=['KINDLING', 'APT', 'AND', 'RETURNED', 'WITH', 'THREE', 'OR', 'FOUR', 'MUSKETS', 'AND', 'AS', 'MANY', 'PIKES'] +2609-169640-0016-2422: ref=['THE', 'STILLNESS', 'THAT', 'REIGNED', 'ON', 'BOTH', 'SIDES', 'WAS', 'LIKE', 'THAT', 'OF', 'DEATH'] +2609-169640-0016-2422: hyp=['THE', 'STILLNESS', 'THAT', 'RANGED', 'ON', 'BOTH', 'SIDES', 'WAS', 'LIKE', 'THAT', 'OF', 'DEATH'] +2609-169640-0017-2423: ref=['THE', 'JOHN', 'BEHAVED', 'BEAUTIFULLY', 'AND', 'CAME', 'ROUND', 'LIKE', 'A', 'TOP'] +2609-169640-0017-2423: hyp=['THE', 'JOHN', 'BEHAVED', 'BEAUTIFULLY', 'HE', 'CAME', 'ROUND', 'LIKE', 'A', 'TAR'] +2609-169640-0018-2424: ref=['THE', 'QUESTION', 'WAS', 'NOW', 'WHETHER', 'WE', 'COULD', 'PASS', 'THEM', 'OR', 'NOT', 'BEFORE', 'THEY', 'GOT', 'NEAR', 'ENOUGH', 'TO', 'GRAPPLE'] +2609-169640-0018-2424: hyp=['THE', 'QUESTION', 'WAS', 'NOW', 'WHETHER', 'WE', 'COULD', 'PASS', 'THEM', 'OR', 'NOT', 'BEFORE', 'THEY', 'GOT', 'NEARING', 'UP', 'TO', 'GRANTPLE'] +2609-169640-0019-2425: ref=['THE', 'CAPTAIN', 'BEHAVED', 'PERFECTLY', 'WELL', 'IN', 'THIS', 'CRITICAL', 'INSTANT', 'COMMANDING', 'A', 'DEAD', 'SILENCE', 'AND', 'THE', 'CLOSEST', 'ATTENTION', 'TO', 'HIS', 'ORDERS'] +2609-169640-0019-2425: hyp=['THE', 'CAPTAIN', 'BEHAVED', 'PERFECTLY', 'AWAY', 'ON', 'ITS', 'CRITICAL', 'INSTANT', 'COMMANDING', 'A', 'DEAD', 'SILENCE', 'IN', 'THE', 'CLOSETS', 'INTENTION', 'TO', 'HIS', 'ORDERS'] +2609-169640-0020-2426: ref=['NOT', 'A', 'SOUL', 'ON', 'BOARD', 'THE', 'JOHN', 'WAS', 'HURT'] +2609-169640-0020-2426: hyp=['NOW', 'AND', 'SO', 'ON', 'BOARD', 'THE', 'JOHN', 'WAS', 'SHARP'] +2609-169640-0021-2427: ref=['ON', 'OUR', 'SIDE', 'WE', 'GAVE', 'THE', 'GENTLEMEN', 'THE', 'FOUR', 'SIXES', 'TWO', 'AT', 'THE', 'NEAREST', 'AND', 'TWO', 'AT', 'THE', 'STERN', 'MOST', 'PROA', 'WHICH', 'WAS', 'STILL', 'NEAR', 'A', "CABLE'S", 'LENGTH', 'DISTANT'] +2609-169640-0021-2427: hyp=['WHEN', 'OURSAND', 'WE', 'GAVE', 'THE', 'GENTLEMAN', 'THE', 'FOUR', 'SAXES', 'TO', 'AUNT', 'THE', 'NURSE', 'AND', 'TWO', 'AT', 'THE', 'STERNMOST', 'PRO', 'WHICHELE', 'NEAR', 'A', "CABLE'S", 'LENGTH', 'OF', 'ITS'] +2609-169640-0022-2428: ref=['THEY', 'WERE', 'LIKE', 'THE', 'YELLS', 'OF', 'FIENDS', 'IN', 'ANGUISH'] +2609-169640-0022-2428: hyp=['THEY', 'WERE', 'NIGHT', 'THE', 'YEARS', 'OF', 'FIEND', 'IN', 'ENGLISH'] +2609-169640-0023-2429: ref=['I', 'DOUBT', 'IF', 'WE', 'TOUCHED', 'A', 'MAN', 'IN', 'THE', 'NEAREST', 'PROA'] +2609-169640-0023-2429: hyp=['AND', 'OUT', 'IF', 'WE', 'TOUCHED', 'A', 'MAN', 'IN', 'THE', 'NEW', 'EXPERIOR'] +2609-169640-0024-2430: ref=['IN', 'THIS', 'STATE', 'THE', 'SHIP', 'PASSED', 'AHEAD', 'ALL', 'HER', 'CANVAS', 'BEING', 'FULL', 'LEAVING', 'THE', 'PROA', 'MOTIONLESS', 'IN', 'HER', 'WAKE'] +2609-169640-0024-2430: hyp=['IN', 'THAT', 'STATE', 'THE', 'SHIP', 'PASSED', 'AHEAD', 'OF', 'HER', 'CANVAS', 'BEEN', 'FOR', 'LEAVING', 'THE', 'PROTINENT', 'IN', 'HER', 'WAKE'] +3005-163389-0000-1108: ref=['THEY', 'SWARMED', 'UP', 'IN', 'FRONT', 'OF', "SHERBURN'S", 'PALINGS', 'AS', 'THICK', 'AS', 'THEY', 'COULD', 'JAM', 'TOGETHER', 'AND', 'YOU', "COULDN'T", 'HEAR', 'YOURSELF', 'THINK', 'FOR', 'THE', 'NOISE'] +3005-163389-0000-1108: hyp=['THEY', 'SWARMED', 'UP', 'IN', 'FRONT', 'A', "SHERBOURNE'S", 'PALINGS', 'AS', 'THICK', 'AS', 'THEY', 'COULD', 'JAM', 'TOGETHER', 'AND', 'YOU', "COULDN'T", 'HEAR', 'YOURSELF', 'THINK', 'FOR', 'THE', 'NOISE'] +3005-163389-0001-1109: ref=['SOME', 'SUNG', 'OUT', 'TEAR', 'DOWN', 'THE', 'FENCE', 'TEAR', 'DOWN', 'THE', 'FENCE'] +3005-163389-0001-1109: hyp=['SOME', 'SUNG', 'OUT', 'TEAR', 'DOWN', 'THE', 'FENCE', 'TEAR', 'DOWN', 'THE', 'FENCE'] +3005-163389-0002-1110: ref=['THE', 'STILLNESS', 'WAS', 'AWFUL', 'CREEPY', 'AND', 'UNCOMFORTABLE'] +3005-163389-0002-1110: hyp=['THE', 'STILLNESS', 'WAS', 'AWFUL', 'CREEPY', 'AND', 'UNCOMFORTABLE'] +3005-163389-0003-1111: ref=['SHERBURN', 'RUN', 'HIS', 'EYE', 'SLOW', 'ALONG', 'THE', 'CROWD', 'AND', 'WHEREVER', 'IT', 'STRUCK', 'THE', 'PEOPLE', 'TRIED', 'A', 'LITTLE', 'TO', 'OUT', 'GAZE', 'HIM', 'BUT', 'THEY', "COULDN'T", 'THEY', 'DROPPED', 'THEIR', 'EYES', 'AND', 'LOOKED', 'SNEAKY'] +3005-163389-0003-1111: hyp=['SHERBIN', 'RUN', 'HIS', 'EYES', 'SLOW', 'ALONG', 'THE', 'CROWD', 'AND', 'WHEREVER', 'IT', 'STRUCK', 'THE', 'PEOPLE', 'TRIED', 'A', 'LITTLE', 'TO', 'OUTGAZE', 'HIM', 'BUT', 'THEY', "COULDN'T", 'THEY', 'DROPPED', 'THEIR', 'EYES', 'AND', 'LOOKED', 'SNEAKY'] +3005-163389-0004-1112: ref=['THE', 'AVERAGE', "MAN'S", 'A', 'COWARD'] +3005-163389-0004-1112: hyp=['THE', 'AVERAGE', "MAN'S", 'A', 'COWARD'] +3005-163389-0005-1113: ref=['BECAUSE', "THEY'RE", 'AFRAID', 'THE', "MAN'S", 'FRIENDS', 'WILL', 'SHOOT', 'THEM', 'IN', 'THE', 'BACK', 'IN', 'THE', 'DARKAND', "IT'S", 'JUST', 'WHAT', 'THEY', 'WOULD', 'DO'] +3005-163389-0005-1113: hyp=['BECAUSE', "THEY'RE", 'AFRAID', 'THE', "MAN'S", 'FRIENDS', 'WILL', 'SHOOT', 'THEM', 'IN', 'THE', 'BACK', 'IN', 'THE', 'DARK', 'AND', 'IS', 'JUST', 'WHAT', 'THEY', 'WOULD', 'DO'] +3005-163389-0006-1114: ref=['SO', 'THEY', 'ALWAYS', 'ACQUIT', 'AND', 'THEN', 'A', 'MAN', 'GOES', 'IN', 'THE', 'NIGHT', 'WITH', 'A', 'HUNDRED', 'MASKED', 'COWARDS', 'AT', 'HIS', 'BACK', 'AND', 'LYNCHES', 'THE', 'RASCAL'] +3005-163389-0006-1114: hyp=['SO', 'THEY', 'ALWAYS', 'ACQUIT', 'AND', 'THEN', 'A', 'MAN', 'GOES', 'IN', 'THE', 'NIGHT', 'WITH', 'A', 'HUNDRED', 'MASSED', 'COWARDS', 'AT', 'HIS', 'BACK', 'AND', 'LYNCHES', 'THE', 'RASCAL'] +3005-163389-0007-1115: ref=['YOU', "DIDN'T", 'WANT', 'TO', 'COME'] +3005-163389-0007-1115: hyp=['YOU', "DIDN'T", 'WANT', 'TO', 'COME'] +3005-163389-0008-1116: ref=['BUT', 'A', 'MOB', 'WITHOUT', 'ANY', 'MAN', 'AT', 'THE', 'HEAD', 'OF', 'IT', 'IS', 'BENEATH', 'PITIFULNESS'] +3005-163389-0008-1116: hyp=['BUT', 'A', 'MOB', 'WITHOUT', 'ANY', 'MAN', 'AT', 'THE', 'HEAD', 'OF', 'IT', 'IS', 'BENEATH', 'PITIFULNESS'] +3005-163389-0009-1117: ref=['NOW', 'LEAVE', 'AND', 'TAKE', 'YOUR', 'HALF', 'A', 'MAN', 'WITH', 'YOU', 'TOSSING', 'HIS', 'GUN', 'UP', 'ACROSS', 'HIS', 'LEFT', 'ARM', 'AND', 'COCKING', 'IT', 'WHEN', 'HE', 'SAYS', 'THIS'] +3005-163389-0009-1117: hyp=['NOW', 'LEAVE', 'AND', 'TAKE', 'YOUR', 'HALF', 'A', 'MAN', 'WITH', 'YOU', 'TAUSEN', 'HE', 'HAS', 'GUN', 'UP', 'ACROSS', 'HIS', 'LEFT', 'ARM', 'AND', 'COCKING', 'IT', 'WHEN', 'HE', 'SAYS', 'THIS'] +3005-163389-0010-1118: ref=['THE', 'CROWD', 'WASHED', 'BACK', 'SUDDEN', 'AND', 'THEN', 'BROKE', 'ALL', 'APART', 'AND', 'WENT', 'TEARING', 'OFF', 'EVERY', 'WHICH', 'WAY', 'AND', 'BUCK', 'HARKNESS', 'HE', 'HEELED', 'IT', 'AFTER', 'THEM', 'LOOKING', 'TOLERABLE', 'CHEAP'] +3005-163389-0010-1118: hyp=['THE', 'CROWD', 'WASHED', 'BACK', 'SUDDEN', 'AND', 'THEN', 'BROKE', 'ALL', 'APART', 'AND', 'WENT', 'TEARING', 'OFF', 'EVERY', 'WHICH', 'WAY', 'AND', 'BUCK', 'HARKNESS', 'HE', 'HEALED', 'IT', 'AFTER', 'THEM', 'LOOKING', 'TOLERABLE', 'CHEEK'] +3005-163389-0011-1119: ref=['YOU', "CAN'T", 'BE', 'TOO', 'CAREFUL'] +3005-163389-0011-1119: hyp=['HE', "CAN'T", 'BE', 'TOO', 'CAREFUL'] +3005-163389-0012-1120: ref=['THEY', 'ARGUED', 'AND', 'TRIED', 'TO', 'KEEP', 'HIM', 'OUT', 'BUT', 'HE', "WOULDN'T", 'LISTEN', 'AND', 'THE', 'WHOLE', 'SHOW', 'COME', 'TO', 'A', 'STANDSTILL'] +3005-163389-0012-1120: hyp=['THEY', 'ARGUED', 'AND', 'TRIED', 'TO', 'KEEP', 'HIM', 'OUT', 'BUT', 'HE', "WOULDN'T", 'LISTEN', 'AND', 'A', 'WHOLE', 'SHOW', 'COME', 'TO', 'A', 'FANSTILL'] +3005-163389-0013-1121: ref=['AND', 'ONE', 'OR', 'TWO', 'WOMEN', 'BEGUN', 'TO', 'SCREAM'] +3005-163389-0013-1121: hyp=['AND', 'ONE', 'OR', 'TWO', 'WOMEN', 'BEGAN', 'TO', 'SCREAM'] +3005-163389-0014-1122: ref=['SO', 'THEN', 'THE', 'RINGMASTER', 'HE', 'MADE', 'A', 'LITTLE', 'SPEECH', 'AND', 'SAID', 'HE', 'HOPED', 'THERE', "WOULDN'T", 'BE', 'NO', 'DISTURBANCE', 'AND', 'IF', 'THE', 'MAN', 'WOULD', 'PROMISE', 'HE', "WOULDN'T", 'MAKE', 'NO', 'MORE', 'TROUBLE', 'HE', 'WOULD', 'LET', 'HIM', 'RIDE', 'IF', 'HE', 'THOUGHT', 'HE', 'COULD', 'STAY', 'ON', 'THE', 'HORSE'] +3005-163389-0014-1122: hyp=['SO', 'THEN', 'A', 'RING', 'MASTER', 'HE', 'MADE', 'A', 'LITTLE', 'SPEECH', 'AND', 'SAID', 'HE', 'HOPED', 'THERE', "WOULDN'T", 'BE', 'NO', 'DISTURBANCE', 'AND', 'IF', 'THE', 'MAN', 'WOULD', 'PROMISE', 'HE', "WOULDN'T", 'MAKE', 'NO', 'MORE', 'TROUBLE', 'HE', 'WOULD', 'LET', 'HIM', 'RIDE', 'IF', 'HE', 'THOUGHT', 'HE', 'COULD', 'STAY', 'ON', 'THE', 'HORSE'] +3005-163389-0015-1123: ref=['IT', "WARN'T", 'FUNNY', 'TO', 'ME', 'THOUGH', 'I', 'WAS', 'ALL', 'OF', 'A', 'TREMBLE', 'TO', 'SEE', 'HIS', 'DANGER'] +3005-163389-0015-1123: hyp=['IT', "WARN'T", 'FUNNY', 'TO', 'ME', 'THOUGH', 'I', 'WAS', 'ALL', 'OF', 'A', 'TREMBLE', 'TO', 'SEE', 'HIS', 'DANGER'] +3005-163389-0016-1124: ref=['AND', 'THE', 'HORSE', 'A', 'GOING', 'LIKE', 'A', 'HOUSE', 'AFIRE', 'TOO'] +3005-163389-0016-1124: hyp=['AND', 'A', 'HORSE', 'A', 'GOING', 'LIKE', 'A', 'HOUSE', 'AFAR', 'TOO'] +3005-163389-0017-1125: ref=['HE', 'SHED', 'THEM', 'SO', 'THICK', 'THEY', 'KIND', 'OF', 'CLOGGED', 'UP', 'THE', 'AIR', 'AND', 'ALTOGETHER', 'HE', 'SHED', 'SEVENTEEN', 'SUITS'] +3005-163389-0017-1125: hyp=['HE', 'SHARED', 'THEM', 'SO', 'THICK', 'THAT', 'KIND', 'OF', 'CLOGGED', 'UP', 'THE', 'AIR', 'AND', 'ALTOGETHER', 'HE', 'SHED', 'SEVENTEEN', 'SUITS'] +3005-163389-0018-1126: ref=['WHY', 'IT', 'WAS', 'ONE', 'OF', 'HIS', 'OWN', 'MEN'] +3005-163389-0018-1126: hyp=['WHY', 'IT', 'WAS', 'ONE', 'OF', 'HIS', 'OWN', 'MEN'] +3005-163390-0000-1185: ref=['ANDBUT', 'NEVER', 'MIND', 'THE', 'REST', 'OF', 'HIS', 'OUTFIT', 'IT', 'WAS', 'JUST', 'WILD', 'BUT', 'IT', 'WAS', 'AWFUL', 'FUNNY'] +3005-163390-0000-1185: hyp=['AND', 'BUT', 'NEVER', 'MIND', 'THE', 'REST', 'OF', 'HIS', 'OUTFIT', 'IT', 'WAS', 'JUST', 'WILD', 'BUT', 'IT', 'WAS', 'AWFUL', 'FUNNY'] +3005-163390-0001-1186: ref=['THE', 'PEOPLE', 'MOST', 'KILLED', 'THEMSELVES', 'LAUGHING', 'AND', 'WHEN', 'THE', 'KING', 'GOT', 'DONE', 'CAPERING', 'AND', 'CAPERED', 'OFF', 'BEHIND', 'THE', 'SCENES', 'THEY', 'ROARED', 'AND', 'CLAPPED', 'AND', 'STORMED', 'AND', 'HAW', 'HAWED', 'TILL', 'HE', 'COME', 'BACK', 'AND', 'DONE', 'IT', 'OVER', 'AGAIN', 'AND', 'AFTER', 'THAT', 'THEY', 'MADE', 'HIM', 'DO', 'IT', 'ANOTHER', 'TIME'] +3005-163390-0001-1186: hyp=['THE', 'PEOPLE', 'MOST', 'KILLED', 'THEMSELVES', 'LAUGHING', 'AND', 'WHEN', 'THE', 'KING', 'GOT', 'DONE', 'CAPERING', 'AND', 'CAPERED', 'OFF', 'BEHIND', 'THE', 'SCENES', 'THEY', 'ROARED', 'AND', 'CLAPPED', 'AND', 'STORMED', 'AND', 'HAWED', 'TILL', 'HE', 'COME', 'BACK', 'AND', 'DONE', 'IT', 'OVER', 'AGAIN', 'AND', 'AFTER', 'THAT', 'THEY', 'MADE', 'HIM', 'DO', 'IT', 'ANOTHER', 'TIME'] +3005-163390-0002-1187: ref=['TWENTY', 'PEOPLE', 'SINGS', 'OUT'] +3005-163390-0002-1187: hyp=['TWENTY', 'PEOPLE', 'SANGS', 'OUT'] +3005-163390-0003-1188: ref=['THE', 'DUKE', 'SAYS', 'YES'] +3005-163390-0003-1188: hyp=['THE', 'DUKE', 'SAYS', 'YES'] +3005-163390-0004-1189: ref=['EVERYBODY', 'SINGS', 'OUT', 'SOLD'] +3005-163390-0004-1189: hyp=['EVERYBODY', 'SINGS', 'OUT', 'SOLD'] +3005-163390-0005-1190: ref=['BUT', 'A', 'BIG', 'FINE', 'LOOKING', 'MAN', 'JUMPS', 'UP', 'ON', 'A', 'BENCH', 'AND', 'SHOUTS', 'HOLD', 'ON'] +3005-163390-0005-1190: hyp=['BUT', 'A', 'BIG', 'FINE', 'LOOKING', 'MAN', 'JUMPS', 'UP', 'ON', 'A', 'BENCH', 'AN', 'SHOUTS', 'HOLD', 'ON'] +3005-163390-0006-1191: ref=['JUST', 'A', 'WORD', 'GENTLEMEN', 'THEY', 'STOPPED', 'TO', 'LISTEN'] +3005-163390-0006-1191: hyp=['JUST', 'A', 'WORD', 'GENTLEMEN', 'THEY', 'STOPPED', 'TO', 'LISTEN'] +3005-163390-0007-1192: ref=['WHAT', 'WE', 'WANT', 'IS', 'TO', 'GO', 'OUT', 'OF', 'HERE', 'QUIET', 'AND', 'TALK', 'THIS', 'SHOW', 'UP', 'AND', 'SELL', 'THE', 'REST', 'OF', 'THE', 'TOWN'] +3005-163390-0007-1192: hyp=['WHAT', 'WE', 'WANT', 'IS', 'TO', 'GO', 'OUT', 'OF', 'HERE', 'QUIET', 'AND', 'TALK', 'TO', 'SHOW', 'UP', 'AND', 'SELL', 'THE', 'REST', 'O', 'THE', 'TOWN'] +3005-163390-0008-1193: ref=['YOU', 'BET', 'IT', 'IS', 'THE', 'JEDGE', 'IS', 'RIGHT', 'EVERYBODY', 'SINGS', 'OUT'] +3005-163390-0008-1193: hyp=['YOU', 'BET', 'IT', 'IS', 'THE', 'JUDGE', 'IS', 'RIGHT', 'EVERYBODY', 'SINGS', 'OUT'] +3005-163390-0009-1194: ref=['WE', 'STRUCK', 'THE', 'RAFT', 'AT', 'THE', 'SAME', 'TIME', 'AND', 'IN', 'LESS', 'THAN', 'TWO', 'SECONDS', 'WE', 'WAS', 'GLIDING', 'DOWN', 'STREAM', 'ALL', 'DARK', 'AND', 'STILL', 'AND', 'EDGING', 'TOWARDS', 'THE', 'MIDDLE', 'OF', 'THE', 'RIVER', 'NOBODY', 'SAYING', 'A', 'WORD'] +3005-163390-0009-1194: hyp=['WE', 'STRUCK', 'THE', 'RAFT', 'AT', 'THE', 'SAME', 'TIME', 'AND', 'IN', 'LESS', 'THAN', 'TWO', 'SECONDS', 'WE', 'WAS', 'GLIDING', 'DOWN', 'STREAM', 'ALL', 'DARK', 'AND', 'STILL', 'AND', 'EDGING', 'TOWARDS', 'THE', 'MIDDLE', 'OF', 'THE', 'RIVER', 'NOBODY', 'SAYING', 'A', 'WORD'] +3005-163390-0010-1195: ref=['WE', 'NEVER', 'SHOWED', 'A', 'LIGHT', 'TILL', 'WE', 'WAS', 'ABOUT', 'TEN', 'MILE', 'BELOW', 'THE', 'VILLAGE'] +3005-163390-0010-1195: hyp=['WE', 'NEVER', 'SHOWED', 'A', 'LIGHT', 'TILL', 'WE', 'WAS', 'ABOUT', 'TEN', 'MILE', 'BELOW', 'THE', 'VILLAGE'] +3005-163390-0011-1196: ref=['GREENHORNS', 'FLATHEADS'] +3005-163390-0011-1196: hyp=['GREENHORNS', 'FLAT', 'HEADS'] +3005-163390-0012-1197: ref=['NO', 'I', 'SAYS', 'IT', "DON'T"] +3005-163390-0012-1197: hyp=['NO', 'I', 'SAY', 'IS', 'IT', "DON'T"] +3005-163390-0013-1198: ref=['WELL', 'IT', "DON'T", 'BECAUSE', "IT'S", 'IN', 'THE', 'BREED', 'I', 'RECKON', "THEY'RE", 'ALL', 'ALIKE'] +3005-163390-0013-1198: hyp=['WELL', 'IT', "DON'T", 'BECAUSE', "IT'S", 'IN', 'TO', 'BREATHE', 'I', 'RECKON', "THEY'RE", 'ALL', 'ALIKE'] +3005-163390-0014-1199: ref=['WELL', "THAT'S", 'WHAT', "I'M", 'A', 'SAYING', 'ALL', 'KINGS', 'IS', 'MOSTLY', 'RAPSCALLIONS', 'AS', 'FUR', 'AS', 'I', 'CAN', 'MAKE', 'OUT', 'IS', 'DAT', 'SO'] +3005-163390-0014-1199: hyp=['WELL', "THAT'S", 'WHAT', "I'M", 'A', 'SAYING', 'ALL', 'KINGS', 'IS', 'MOSTLY', 'RASCALIONS', 'AS', 'FUR', 'AS', 'I', 'KIN', 'MAKE', 'OUT', 'IS', 'DAT', 'SO'] +3005-163390-0015-1200: ref=['AND', 'LOOK', 'AT', 'CHARLES', 'SECOND', 'AND', 'LOUIS', 'FOURTEEN', 'AND', 'LOUIS', 'FIFTEEN', 'AND', 'JAMES', 'SECOND', 'AND', 'EDWARD', 'SECOND', 'AND', 'RICHARD', 'THIRD', 'AND', 'FORTY', 'MORE', 'BESIDES', 'ALL', 'THEM', 'SAXON', 'HEPTARCHIES', 'THAT', 'USED', 'TO', 'RIP', 'AROUND', 'SO', 'IN', 'OLD', 'TIMES', 'AND', 'RAISE', 'CAIN'] +3005-163390-0015-1200: hyp=['AND', 'LOOK', 'AT', 'CHARLES', 'SECOND', 'AND', 'LOUIS', 'FOURTEEN', 'AND', 'LOUIS', 'FIFTEEN', 'AND', 'JAMES', 'SECOND', 'AND', 'EDWARD', 'SECOND', 'AND', 'RICHARD', 'THIRD', 'AND', 'FORTY', 'MORE', 'BESIDES', 'ALL', 'THEM', 'SAXON', 'HEPTARCHIES', 'THAT', 'USED', 'TO', 'RIP', 'AROUND', 'SO', 'WHEN', 'OLD', 'TIMES', 'AND', 'RAISE', 'GAME'] +3005-163390-0016-1201: ref=['MY', 'YOU', 'OUGHT', 'TO', 'SEEN', 'OLD', 'HENRY', 'THE', 'EIGHT', 'WHEN', 'HE', 'WAS', 'IN', 'BLOOM', 'HE', 'WAS', 'A', 'BLOSSOM'] +3005-163390-0016-1201: hyp=['MY', 'YOU', 'OUGHT', 'TO', 'SEE', 'AN', 'OLD', 'HENRY', 'THE', 'EIGHT', 'WHEN', 'HE', 'WAS', 'IN', 'BLOOM', 'HE', 'WAS', 'A', 'BLOSSOM'] +3005-163390-0017-1202: ref=['RING', 'UP', 'FAIR', 'ROSAMUN'] +3005-163390-0017-1202: hyp=['RANG', 'UP', 'FAIR', 'ROSAMOND'] +3005-163390-0018-1203: ref=['WELL', 'HENRY', 'HE', 'TAKES', 'A', 'NOTION', 'HE', 'WANTS', 'TO', 'GET', 'UP', 'SOME', 'TROUBLE', 'WITH', 'THIS', 'COUNTRY'] +3005-163390-0018-1203: hyp=['WELL', 'HENRY', 'HE', 'TAKES', 'A', 'NOTION', 'HE', 'WANTS', 'TO', 'GET', 'UP', 'SOME', 'TROUBLE', 'WITH', 'THIS', 'COUNTRY'] +3005-163390-0019-1204: ref=["S'POSE", 'HE', 'OPENED', 'HIS', 'MOUTHWHAT', 'THEN'] +3005-163390-0019-1204: hyp=["S'POSE", 'HE', 'OPENED', 'HIS', 'MOUTH', 'WITHIN'] +3005-163390-0020-1205: ref=['ALL', 'I', 'SAY', 'IS', 'KINGS', 'IS', 'KINGS', 'AND', 'YOU', 'GOT', 'TO', 'MAKE', 'ALLOWANCES'] +3005-163390-0020-1205: hyp=['ALL', 'I', 'SAY', 'IS', 'KINGS', 'AS', 'KINGS', 'AN', 'YE', 'GOT', 'TO', 'MAKE', 'ALLOWANCES'] +3005-163390-0021-1206: ref=['TAKE', 'THEM', 'ALL', 'AROUND', "THEY'RE", 'A', 'MIGHTY', 'ORNERY', 'LOT', "IT'S", 'THE', 'WAY', "THEY'RE", 'RAISED'] +3005-163390-0021-1206: hyp=['TAKE', 'THEM', 'ALL', 'AROUND', "THEY'RE", 'A', 'MIGHTY', 'ORNERY', 'LOT', "IT'S", 'THE', 'WAY', "THEY'RE", 'RAISED'] +3005-163390-0022-1207: ref=['WELL', 'THEY', 'ALL', 'DO', 'JIM'] +3005-163390-0022-1207: hyp=['WELL', 'THEY', 'ALL', 'DO', 'JIM'] +3005-163390-0023-1208: ref=['NOW', 'DE', 'DUKE', "HE'S", 'A', 'TOLERBLE', 'LIKELY', 'MAN', 'IN', 'SOME', 'WAYS'] +3005-163390-0023-1208: hyp=['NOW', 'TO', 'DO', "HE'S", 'A', 'TOLERABLE', 'LIKE', 'THE', 'MAN', 'IN', 'SOME', 'WAYS'] +3005-163390-0024-1209: ref=['THIS', "ONE'S", 'A', 'MIDDLING', 'HARD', 'LOT', 'FOR', 'A', 'DUKE'] +3005-163390-0024-1209: hyp=['THIS', "ONE'S", 'A', 'MIDDLIN', 'HARD', 'LOT', 'FOR', 'A', 'DUKE'] +3005-163390-0025-1210: ref=['WHEN', 'I', 'WAKED', 'UP', 'JUST', 'AT', 'DAYBREAK', 'HE', 'WAS', 'SITTING', 'THERE', 'WITH', 'HIS', 'HEAD', 'DOWN', 'BETWIXT', 'HIS', 'KNEES', 'MOANING', 'AND', 'MOURNING', 'TO', 'HIMSELF'] +3005-163390-0025-1210: hyp=['WHEN', 'I', 'WAKED', 'UP', 'JEST', 'AT', 'DAYBREAK', 'HE', 'WAS', 'SITTING', 'THERE', 'WITH', 'HIS', 'HEAD', 'DOWN', 'BETWIXT', 'HIS', 'KNEES', 'MOANING', 'AND', 'MOURNING', 'TO', 'HIMSELF'] +3005-163390-0026-1211: ref=['IT', "DON'T", 'SEEM', 'NATURAL', 'BUT', 'I', 'RECKON', "IT'S", 'SO'] +3005-163390-0026-1211: hyp=['IT', "DON'T", 'SEEM', 'NATURAL', 'BUT', 'I', 'RECKON', "IT'S", 'SO'] +3005-163390-0027-1212: ref=['HE', 'WAS', 'OFTEN', 'MOANING', 'AND', 'MOURNING', 'THAT', 'WAY', 'NIGHTS', 'WHEN', 'HE', 'JUDGED', 'I', 'WAS', 'ASLEEP', 'AND', 'SAYING', 'PO', 'LITTLE', 'LIZABETH'] +3005-163390-0027-1212: hyp=['HE', 'WAS', 'OFTEN', 'MOANING', 'AND', 'MOURNING', 'IN', 'THAT', 'WAY', 'NIGHTS', 'WHEN', 'HE', 'JUDGED', 'I', 'WAS', 'ASLEEP', 'AND', 'SAYING', 'POLIT', 'LISBETH'] +3005-163390-0028-1213: ref=['DOAN', 'YOU', 'HEAR', 'ME', 'SHET', 'DE', 'DO'] +3005-163390-0028-1213: hyp=["DON'T", 'YOU', 'HEAR', 'ME', 'SHUT', 'DEAD', 'DOUGH'] +3005-163390-0029-1214: ref=['I', 'LAY', 'I', 'MAKE', 'YOU', 'MINE'] +3005-163390-0029-1214: hyp=['I', 'LAY', 'I', 'MAKE', 'YOU', 'MINE'] +3005-163390-0030-1215: ref=['JIS', 'AS', 'LOUD', 'AS', 'I', 'COULD', 'YELL'] +3005-163390-0030-1215: hyp=['IT', 'IS', 'LOUD', 'AS', 'I', 'COULD', 'YELL'] +3005-163391-0000-1127: ref=['WHICH', 'WAS', 'SOUND', 'ENOUGH', 'JUDGMENT', 'BUT', 'YOU', 'TAKE', 'THE', 'AVERAGE', 'MAN', 'AND', 'HE', "WOULDN'T", 'WAIT', 'FOR', 'HIM', 'TO', 'HOWL'] +3005-163391-0000-1127: hyp=['WHICH', 'WAS', 'SOUND', 'ENOUGH', 'JUDGMENT', 'BUT', 'YOU', 'TAKE', 'THE', 'AVERAGE', 'MAN', 'AND', 'HE', "WOULDN'T", 'WAIT', 'FOR', 'HIM', 'TO', 'HOW'] +3005-163391-0001-1128: ref=['THE', "KING'S", 'DUDS', 'WAS', 'ALL', 'BLACK', 'AND', 'HE', 'DID', 'LOOK', 'REAL', 'SWELL', 'AND', 'STARCHY'] +3005-163391-0001-1128: hyp=['THE', 'KING', 'DEADS', 'WAS', 'ALL', 'BLACK', 'AND', 'HE', 'DID', 'LOOK', 'REAL', 'SWELLIN', 'STARCHY'] +3005-163391-0002-1129: ref=['WHY', 'BEFORE', 'HE', 'LOOKED', 'LIKE', 'THE', 'ORNERIEST', 'OLD', 'RIP', 'THAT', 'EVER', 'WAS', 'BUT', 'NOW', 'WHEN', "HE'D", 'TAKE', 'OFF', 'HIS', 'NEW', 'WHITE', 'BEAVER', 'AND', 'MAKE', 'A', 'BOW', 'AND', 'DO', 'A', 'SMILE', 'HE', 'LOOKED', 'THAT', 'GRAND', 'AND', 'GOOD', 'AND', 'PIOUS', 'THAT', "YOU'D", 'SAY', 'HE', 'HAD', 'WALKED', 'RIGHT', 'OUT', 'OF', 'THE', 'ARK', 'AND', 'MAYBE', 'WAS', 'OLD', 'LEVITICUS', 'HIMSELF'] +3005-163391-0002-1129: hyp=['WHY', 'BEFORE', 'HE', 'LOOKED', 'LIKE', 'THE', 'ORNEIST', 'OLD', 'RIP', 'THAT', 'EVER', 'WAS', 'BUT', 'NOW', 'WHEN', "HE'D", 'TAKE', 'OFF', 'HIS', 'NEW', 'WHITE', 'BEAVER', 'AND', 'MAKE', 'A', 'BOW', 'AND', 'DO', 'A', 'SMILE', 'HE', 'LOOKED', 'THAT', 'GRAND', 'AND', 'GOOD', 'AND', 'PIOUS', 'THAT', "YOU'D", 'SAY', 'HE', 'HAD', 'WALKED', 'RIGHT', 'OUT', 'OF', 'THE', 'ARK', 'AND', 'MAYBE', 'WAS', 'OLD', 'LEVIKUS', 'HIMSELF'] +3005-163391-0003-1130: ref=['JIM', 'CLEANED', 'UP', 'THE', 'CANOE', 'AND', 'I', 'GOT', 'MY', 'PADDLE', 'READY'] +3005-163391-0003-1130: hyp=['JIM', 'CLEANED', 'UP', 'THE', 'CANOE', 'AND', 'I', 'GOT', 'MY', 'PADDLE', 'READY'] +3005-163391-0004-1131: ref=['WHER', 'YOU', 'BOUND', 'FOR', 'YOUNG', 'MAN'] +3005-163391-0004-1131: hyp=['WERE', 'YOU', 'BOUND', 'FOR', 'YOUNG', 'MAN'] +3005-163391-0005-1132: ref=['GIT', 'ABOARD', 'SAYS', 'THE', 'KING'] +3005-163391-0005-1132: hyp=['GET', 'ABOARD', 'SAYS', 'THE', 'KING'] +3005-163391-0006-1133: ref=['I', 'DONE', 'SO', 'AND', 'THEN', 'WE', 'ALL', 'THREE', 'STARTED', 'ON', 'AGAIN'] +3005-163391-0006-1133: hyp=['I', 'DONE', 'SO', 'AND', 'THEY', 'WE', 'ALL', 'THREE', 'STARTED', 'ON', 'AGAIN'] +3005-163391-0007-1134: ref=['THE', 'YOUNG', 'CHAP', 'WAS', 'MIGHTY', 'THANKFUL', 'SAID', 'IT', 'WAS', 'TOUGH', 'WORK', 'TOTING', 'HIS', 'BAGGAGE', 'SUCH', 'WEATHER'] +3005-163391-0007-1134: hyp=['THE', 'YOUNG', 'CHAP', 'WAS', 'MIGHTY', 'THANKFUL', 'SAID', 'HE', 'WAS', 'TOUGH', 'WORK', 'TOATING', 'HIS', 'BAGGAGE', 'SUCH', 'WEATHER'] +3005-163391-0008-1135: ref=['HE', 'ASKED', 'THE', 'KING', 'WHERE', 'HE', 'WAS', 'GOING', 'AND', 'THE', 'KING', 'TOLD', 'HIM', "HE'D", 'COME', 'DOWN', 'THE', 'RIVER', 'AND', 'LANDED', 'AT', 'THE', 'OTHER', 'VILLAGE', 'THIS', 'MORNING', 'AND', 'NOW', 'HE', 'WAS', 'GOING', 'UP', 'A', 'FEW', 'MILE', 'TO', 'SEE', 'AN', 'OLD', 'FRIEND', 'ON', 'A', 'FARM', 'UP', 'THERE', 'THE', 'YOUNG', 'FELLOW', 'SAYS'] +3005-163391-0008-1135: hyp=['PIERRE', 'THE', 'KING', 'WHERE', 'HE', 'WAS', 'GOING', 'AND', 'THE', 'KING', 'TOLD', 'HIM', "HE'D", 'COME', 'DOWN', 'A', 'RIVER', 'AND', 'LAND', 'IT', 'AT', 'THE', 'OTHER', 'VILLAGE', 'THIS', 'MORNING', 'AND', 'NOW', 'HE', 'WAS', 'GOING', 'UP', 'A', 'FEW', 'MILES', 'TO', 'SEE', 'AN', 'OLD', 'FRIEND', 'ON', 'A', 'FARM', 'UP', 'THERE', 'THE', 'YOUNG', 'FELLOW', 'SAYS'] +3005-163391-0009-1136: ref=['BUT', 'THEN', 'I', 'SAYS', 'AGAIN', 'NO', 'I', 'RECKON', 'IT', "AIN'T", 'HIM', 'OR', 'ELSE', 'HE', "WOULDN'T", 'BE', 'PADDLING', 'UP', 'THE', 'RIVER', 'YOU', "AIN'T", 'HIM', 'ARE', 'YOU'] +3005-163391-0009-1136: hyp=['BUT', 'THEN', 'I', 'SAYS', 'AGAIN', 'NO', 'I', 'RECKON', 'IT', "AIN'T", 'HIM', 'OR', 'ELSE', 'HE', "WOULDN'T", 'BE', 'PADDLIN', 'UP', 'THE', 'RIVER', 'YOU', "AIN'T", 'HIM', 'ARE', 'YOU'] +3005-163391-0010-1137: ref=['NO', 'MY', "NAME'S", 'BLODGETT', 'ELEXANDER', 'BLODGETT', 'REVEREND', 'ELEXANDER', 'BLODGETT', 'I', "S'POSE", 'I', 'MUST', 'SAY', 'AS', "I'M", 'ONE', 'O', 'THE', "LORD'S", 'POOR', 'SERVANTS'] +3005-163391-0010-1137: hyp=['NO', 'MY', "NAME'S", 'BLADGE', 'IT', 'ALEXANDER', 'BLADGET', 'REVEREND', 'ALEXANDER', 'BLOTCHETT', 'I', "S'POSE", 'I', 'MUST', 'SAY', 'AS', "I'M", 'ONE', 'OF', 'THE', 'LARGE', 'POOR', 'SERVANTS'] +3005-163391-0011-1138: ref=['YOU', 'SEE', 'HE', 'WAS', 'PRETTY', 'OLD', 'AND', "GEORGE'S", "G'YIRLS", 'WAS', 'TOO', 'YOUNG', 'TO', 'BE', 'MUCH', 'COMPANY', 'FOR', 'HIM', 'EXCEPT', 'MARY', 'JANE', 'THE', 'RED', 'HEADED', 'ONE', 'AND', 'SO', 'HE', 'WAS', 'KINDER', 'LONESOME', 'AFTER', 'GEORGE', 'AND', 'HIS', 'WIFE', 'DIED', 'AND', "DIDN'T", 'SEEM', 'TO', 'CARE', 'MUCH', 'TO', 'LIVE'] +3005-163391-0011-1138: hyp=['YE', 'SEE', 'HE', 'WAS', 'PRETTY', 'OLD', 'AN', 'GEORGE', 'IS', 'GOOD', "EARL'S", 'WAS', 'TOO', 'YOUNG', 'TO', 'BE', 'MUCH', 'COMPANY', 'FOR', 'HIM', 'EXCEPT', 'MARY', 'JANE', 'THE', 'RED', 'HEADED', 'ONE', 'AND', 'SO', 'HE', 'WAS', 'KIND', 'OR', 'LONESOME', 'AFTER', 'GEORGE', 'AND', 'HIS', 'WIFE', 'DIED', 'AND', "DIDN'T", 'SEEM', 'TO', 'CARE', 'MUCH', 'TO', 'LIVE'] +3005-163391-0012-1139: ref=['TOO', 'BAD', 'TOO', 'BAD', 'HE', "COULDN'T", 'A', 'LIVED', 'TO', 'SEE', 'HIS', 'BROTHERS', 'POOR', 'SOUL'] +3005-163391-0012-1139: hyp=['DO', 'BAD', 'TOO', 'BAD', 'HE', "COULDN'T", 'HAVE', 'LIVED', 'TO', 'SEE', 'HIS', "BROTHER'S", 'POOR', 'SOUL'] +3005-163391-0013-1140: ref=["I'M", 'GOING', 'IN', 'A', 'SHIP', 'NEXT', 'WEDNESDAY', 'FOR', 'RYO', 'JANEERO', 'WHERE', 'MY', 'UNCLE', 'LIVES'] +3005-163391-0013-1140: hyp=["I'M", 'GOING', 'IN', 'A', 'SHIP', 'NEXT', 'WEDNESDAY', 'FER', 'RYEO', 'GENERO', 'WHERE', 'MY', 'UNCLE', 'IS'] +3005-163391-0014-1141: ref=['BUT', "IT'LL", 'BE', 'LOVELY', 'WISHT', 'I', 'WAS', 'A', 'GOING'] +3005-163391-0014-1141: hyp=['BUT', "IT'LL", 'BE', 'LOVELY', 'WISHED', 'I', 'WAS', 'A', 'GOIN'] +3005-163391-0015-1142: ref=['MARY', "JANE'S", 'NINETEEN', "SUSAN'S", 'FIFTEEN', 'AND', "JOANNA'S", 'ABOUT', "FOURTEENTHAT'S", 'THE', 'ONE', 'THAT', 'GIVES', 'HERSELF', 'TO', 'GOOD', 'WORKS', 'AND', 'HAS', 'A', 'HARE', 'LIP', 'POOR', 'THINGS'] +3005-163391-0015-1142: hyp=['MARY', "JANE'S", 'NINETEEN', "SUSAN'S", 'FIFTEEN', 'AND', "JOANNA'S", 'ABOUT', 'FOURTEEN', "THAT'S", 'THE', 'ONE', 'THAT', 'GIVES', 'HERSELF', 'TO', 'GOOD', 'WORKS', 'AND', 'HAS', 'A', 'HAIR', 'LIP', 'POOR', 'THINGS'] +3005-163391-0016-1143: ref=['WELL', 'THEY', 'COULD', 'BE', 'WORSE', 'OFF'] +3005-163391-0016-1143: hyp=['WELL', 'THEY', 'COULD', 'BE', 'WORSE', 'OFF'] +3005-163391-0017-1144: ref=['OLD', 'PETER', 'HAD', 'FRIENDS', 'AND', 'THEY', "AIN'T", 'GOING', 'TO', 'LET', 'THEM', 'COME', 'TO', 'NO', 'HARM'] +3005-163391-0017-1144: hyp=['O', 'PETER', 'HAD', 'FRIENDS', 'AND', 'THEY', "AIN'T", 'GOIN', 'TO', 'LET', 'THEM', 'COME', 'TO', 'NO', 'HARM'] +3005-163391-0018-1145: ref=['BLAMED', 'IF', 'HE', "DIDN'T", 'INQUIRE', 'ABOUT', 'EVERYBODY', 'AND', 'EVERYTHING', 'IN', 'THAT', 'BLESSED', 'TOWN', 'AND', 'ALL', 'ABOUT', 'THE', 'WILKSES', 'AND', 'ABOUT', "PETER'S", 'BUSINESSWHICH', 'WAS', 'A', 'TANNER', 'AND', 'ABOUT', "GEORGE'SWHICH", 'WAS', 'A', 'CARPENTER', 'AND', 'ABOUT', "HARVEY'SWHICH", 'WAS', 'A', 'DISSENTERING', 'MINISTER', 'AND', 'SO', 'ON', 'AND', 'SO', 'ON', 'THEN', 'HE', 'SAYS'] +3005-163391-0018-1145: hyp=['BLAMED', 'IF', 'HE', "DIDN'T", 'ACQUIRE', 'ABOUT', 'EVERYBODY', 'AND', 'EVERYTHING', 'AND', 'THAT', 'BLESSED', 'TOWN', 'AND', 'ALL', 'ABOUT', 'THE', 'WILTZES', 'AND', 'ABOUT', "PETER'S", 'BUSINESS', 'WHICH', 'WAS', 'A', 'TANNER', 'AND', 'ABOUT', "GEORGE'S", 'WHICH', 'WAS', 'A', 'CARPENTER', 'AND', 'ABOUT', 'HARVEST', 'WHICH', 'WAS', 'A', 'DISSENTERING', 'MINISTER', 'AND', 'SO', 'ON', 'AND', 'SO', 'ON', 'THEN', 'HE', 'SAYS'] +3005-163391-0019-1146: ref=['WHEN', "THEY'RE", 'DEEP', 'THEY', "WON'T", 'STOP', 'FOR', 'A', 'HAIL'] +3005-163391-0019-1146: hyp=['WHEN', 'HER', 'DEEP', 'THEY', "WON'T", 'STOP', 'FOR', 'A', 'HAIL'] +3005-163391-0020-1147: ref=['WAS', 'PETER', 'WILKS', 'WELL', 'OFF'] +3005-163391-0020-1147: hyp=['WAS', 'PETER', 'WILKES', 'WELL', 'OFF'] +3005-163391-0021-1148: ref=['WHEN', 'WE', 'STRUCK', 'THE', 'BOAT', 'SHE', 'WAS', 'ABOUT', 'DONE', 'LOADING', 'AND', 'PRETTY', 'SOON', 'SHE', 'GOT', 'OFF'] +3005-163391-0021-1148: hyp=['WHEN', 'WAS', 'DRAP', 'THE', 'BOAT', 'SHE', 'WAS', 'ABOUT', 'DONE', 'LOADING', 'AN', 'PRETTY', 'SOON', 'SHE', 'GOT', 'OFF'] +3005-163391-0022-1149: ref=['NOW', 'HUSTLE', 'BACK', 'RIGHT', 'OFF', 'AND', 'FETCH', 'THE', 'DUKE', 'UP', 'HERE', 'AND', 'THE', 'NEW', 'CARPET', 'BAGS'] +3005-163391-0022-1149: hyp=['NOW', 'HUSTLE', 'BACK', 'RIGHT', 'OFF', 'AND', 'FETCH', 'THE', 'DUKE', 'UP', 'HERE', 'AND', 'THE', 'NEW', 'CARPET', 'BAGS'] +3005-163391-0023-1150: ref=['SO', 'THEN', 'THEY', 'WAITED', 'FOR', 'A', 'STEAMBOAT'] +3005-163391-0023-1150: hyp=['SO', 'THEN', 'THEY', 'WAITED', 'FOR', 'A', 'STEAMBOAT'] +3005-163391-0024-1151: ref=['BUT', 'THE', 'KING', 'WAS', "CA'M", 'HE', 'SAYS'] +3005-163391-0024-1151: hyp=['THAT', 'THE', 'KING', 'WAS', 'CALM', 'HE', 'SAYS'] +3005-163391-0025-1152: ref=['THEY', 'GIVE', 'A', 'GLANCE', 'AT', 'ONE', 'ANOTHER', 'AND', 'NODDED', 'THEIR', 'HEADS', 'AS', 'MUCH', 'AS', 'TO', 'SAY', 'WHAT', 'D', 'I', 'TELL', 'YOU'] +3005-163391-0025-1152: hyp=['THEY', 'GIVE', 'A', 'GLANCE', 'AT', 'ONE', 'ANOTHER', 'AND', 'NODDED', 'THEIR', 'HEADS', 'AS', 'MUCH', 'AS', 'TO', 'SAY', 'WOULD', 'DAT', 'TELL', 'YOU'] +3005-163391-0026-1153: ref=['THEN', 'ONE', 'OF', 'THEM', 'SAYS', 'KIND', 'OF', 'SOFT', 'AND', 'GENTLE'] +3005-163391-0026-1153: hyp=['THEN', 'ONE', 'OF', 'THEM', 'SAYS', 'KIND', 'O', 'SOFT', 'AND', 'GENTLE'] +3005-163399-0000-1154: ref=['PHELPS', 'WAS', 'ONE', 'OF', 'THESE', 'LITTLE', 'ONE', 'HORSE', 'COTTON', 'PLANTATIONS', 'AND', 'THEY', 'ALL', 'LOOK', 'ALIKE'] +3005-163399-0000-1154: hyp=['PHELPS', 'AS', 'WAS', 'ONE', 'OF', 'THESE', 'LITTLE', 'ONE', 'HORSE', 'COTTON', 'PLANTATIONS', 'AND', 'THEY', 'ALL', 'LOOK', 'ALIKE'] +3005-163399-0001-1155: ref=['I', 'WENT', 'AROUND', 'AND', 'CLUMB', 'OVER', 'THE', 'BACK', 'STILE', 'BY', 'THE', 'ASH', 'HOPPER', 'AND', 'STARTED', 'FOR', 'THE', 'KITCHEN'] +3005-163399-0001-1155: hyp=['I', 'WENT', 'AROUND', 'AND', 'CLIMB', 'OVER', 'THE', 'BACK', 'STILE', 'BY', 'THE', 'ASHHOPPER', 'AND', 'STARTED', 'FOR', 'THE', 'KITCHEN'] +3005-163399-0002-1156: ref=['I', 'OUT', 'WITH', 'A', "YES'M", 'BEFORE', 'I', 'THOUGHT'] +3005-163399-0002-1156: hyp=['AH', 'OUT', 'WITH', 'A', "YES'M", 'FOUR', 'I', 'THOUGHT'] +3005-163399-0003-1157: ref=['SO', 'THEN', 'SHE', 'STARTED', 'FOR', 'THE', 'HOUSE', 'LEADING', 'ME', 'BY', 'THE', 'HAND', 'AND', 'THE', 'CHILDREN', 'TAGGING', 'AFTER'] +3005-163399-0003-1157: hyp=['SO', 'THEN', 'SHE', 'STARTED', 'FOR', 'THE', 'HOUSE', 'LEADING', 'ME', 'BY', 'THE', 'HAND', 'AND', 'THE', 'CHILDREN', 'TAGGING', 'AFTER'] +3005-163399-0004-1158: ref=['WHEN', 'WE', 'GOT', 'THERE', 'SHE', 'SET', 'ME', 'DOWN', 'IN', 'A', 'SPLIT', 'BOTTOMED', 'CHAIR', 'AND', 'SET', 'HERSELF', 'DOWN', 'ON', 'A', 'LITTLE', 'LOW', 'STOOL', 'IN', 'FRONT', 'OF', 'ME', 'HOLDING', 'BOTH', 'OF', 'MY', 'HANDS', 'AND', 'SAYS'] +3005-163399-0004-1158: hyp=['WHEN', 'WE', 'GOT', 'THERE', 'SHE', 'SET', 'ME', 'DOWN', 'IN', 'A', 'SPLIT', 'BOTTOM', 'CHAIR', 'AND', 'SET', 'HERSELF', 'DOWN', 'ON', 'A', 'LITTLE', 'LOW', 'STOOL', 'IN', 'FRONT', 'OF', 'ME', 'HOLDING', 'BOTH', 'OF', 'MY', 'HANDS', 'AND', 'SAYS'] +3005-163399-0005-1159: ref=['WELL', "IT'S", 'LUCKY', 'BECAUSE', 'SOMETIMES', 'PEOPLE', 'DO', 'GET', 'HURT'] +3005-163399-0005-1159: hyp=['WELL', "IT'S", 'LUCKY', 'BECAUSE', 'SOMETIMES', 'PEOPLE', 'DO', 'GET', 'HURT'] +3005-163399-0006-1160: ref=['AND', 'I', 'THINK', 'HE', 'DIED', 'AFTERWARDS', 'HE', 'WAS', 'A', 'BAPTIST'] +3005-163399-0006-1160: hyp=['AND', 'I', 'THINK', 'HE', 'DIED', 'AFTERWARDS', 'HE', 'WAS', 'A', 'BAPTIST'] +3005-163399-0007-1161: ref=['YES', 'IT', 'WAS', 'MORTIFICATIONTHAT', 'WAS', 'IT'] +3005-163399-0007-1161: hyp=['YES', 'IT', 'WAS', 'MORTIFICATION', 'THAT', 'WAS', 'IT'] +3005-163399-0008-1162: ref=['YOUR', "UNCLE'S", 'BEEN', 'UP', 'TO', 'THE', 'TOWN', 'EVERY', 'DAY', 'TO', 'FETCH', 'YOU'] +3005-163399-0008-1162: hyp=['YOUR', "UNCLE'S", 'BEEN', 'UP', 'TO', 'THE', 'TOWN', 'EVERY', 'DAY', 'TO', 'FETCH', 'YOU'] +3005-163399-0009-1163: ref=['YOU', 'MUST', 'A', 'MET', 'HIM', 'ON', 'THE', 'ROAD', "DIDN'T", 'YOU', 'OLDISH', 'MAN', 'WITH', 'A'] +3005-163399-0009-1163: hyp=['YOU', 'MUST', 'IMMERED', 'HIM', 'ON', 'THE', 'ROAD', "DIDN'T", 'YOU', 'OLDISH', 'MAN', 'WIDTH', 'A'] +3005-163399-0010-1164: ref=['WHY', 'CHILD', 'IT', 'LL', 'BE', 'STOLE'] +3005-163399-0010-1164: hyp=['WHY', 'CHILD', "IT'LL", 'BESTOW'] +3005-163399-0011-1165: ref=['IT', 'WAS', 'KINDER', 'THIN', 'ICE', 'BUT', 'I', 'SAYS'] +3005-163399-0011-1165: hyp=['IT', 'WAS', 'KINDER', 'THIN', 'ICE', 'BUT', 'I', 'SAYS'] +3005-163399-0012-1166: ref=['I', 'HAD', 'MY', 'MIND', 'ON', 'THE', 'CHILDREN', 'ALL', 'THE', 'TIME', 'I', 'WANTED', 'TO', 'GET', 'THEM', 'OUT', 'TO', 'ONE', 'SIDE', 'AND', 'PUMP', 'THEM', 'A', 'LITTLE', 'AND', 'FIND', 'OUT', 'WHO', 'I', 'WAS'] +3005-163399-0012-1166: hyp=['I', 'HAD', 'MY', 'MIND', 'ON', 'THE', 'CHILDREN', 'ALL', 'THE', 'TIME', 'I', 'WANTED', 'TO', 'GIT', 'THEM', 'OUT', 'TO', 'ONE', 'SIDE', 'AND', 'PUMP', 'THEM', 'A', 'LITTLE', 'AND', 'FIND', 'OUT', 'WHO', 'I', 'WAS'] +3005-163399-0013-1167: ref=['PRETTY', 'SOON', 'SHE', 'MADE', 'THE', 'COLD', 'CHILLS', 'STREAK', 'ALL', 'DOWN', 'MY', 'BACK', 'BECAUSE', 'SHE', 'SAYS'] +3005-163399-0013-1167: hyp=['BERTIE', 'SOON', 'SHE', 'MADE', 'THE', 'COLD', 'CHILL', 'STREAK', 'ALL', 'DOWN', 'MY', 'BACK', 'BECAUSE', 'SHE', 'SAYS'] +3005-163399-0014-1168: ref=['I', 'SEE', 'IT', "WARN'T", 'A', 'BIT', 'OF', 'USE', 'TO', 'TRY', 'TO', 'GO', 'AHEAD', "I'D", 'GOT', 'TO', 'THROW', 'UP', 'MY', 'HAND'] +3005-163399-0014-1168: hyp=['I', 'SEE', 'IT', "WARN'T", 'A', 'BIT', 'OF', 'USE', 'TO', 'TRY', 'TO', 'GO', 'AHEAD', "I'D", 'GOT', 'TO', 'THROW', 'UP', 'MY', 'HAND'] +3005-163399-0015-1169: ref=['SO', 'I', 'SAYS', 'TO', 'MYSELF', "HERE'S", 'ANOTHER', 'PLACE', 'WHERE', 'I', 'GOT', 'TO', 'RESK', 'THE', 'TRUTH'] +3005-163399-0015-1169: hyp=['SO', 'I', 'SAYS', 'TO', 'MYSELF', "HERE'S", 'ANOTHER', 'PLACE', 'WHERE', 'I', 'GOT', 'TO', 'REST', 'THE', 'TRUTH'] +3005-163399-0016-1170: ref=['I', 'OPENED', 'MY', 'MOUTH', 'TO', 'BEGIN', 'BUT', 'SHE', 'GRABBED', 'ME', 'AND', 'HUSTLED', 'ME', 'IN', 'BEHIND', 'THE', 'BED', 'AND', 'SAYS', 'HERE', 'HE', 'COMES'] +3005-163399-0016-1170: hyp=['I', 'OPENED', 'MY', 'MOUTH', 'TO', 'BEGIN', 'BUT', 'SHE', 'GRABBED', 'ME', 'AND', 'HUSTLED', 'ME', 'IN', 'BEHIND', 'THE', 'BED', 'AND', 'SAYS', 'HERE', 'HE', 'COMES'] +3005-163399-0017-1171: ref=['CHILDREN', "DON'T", 'YOU', 'SAY', 'A', 'WORD'] +3005-163399-0017-1171: hyp=['CHILDREN', "DON'T", 'YOU', 'SAY', 'A', 'WORD'] +3005-163399-0018-1172: ref=['I', 'SEE', 'I', 'WAS', 'IN', 'A', 'FIX', 'NOW'] +3005-163399-0018-1172: hyp=['I', 'SEE', 'I', 'WAS', 'IN', 'A', 'FIX', 'NOW'] +3005-163399-0019-1173: ref=['MISSUS', 'PHELPS', 'SHE', 'JUMPS', 'FOR', 'HIM', 'AND', 'SAYS'] +3005-163399-0019-1173: hyp=['MISSUS', 'PHILP', 'SHE', 'JUMPED', 'FOR', 'HIM', 'AND', 'SAYS'] +3005-163399-0020-1174: ref=['HAS', 'HE', 'COME', 'NO', 'SAYS', 'HER', 'HUSBAND'] +3005-163399-0020-1174: hyp=['AS', 'HE', 'COME', 'NO', 'SAYS', 'HER', 'HUSBAND'] +3005-163399-0021-1175: ref=['I', "CAN'T", 'IMAGINE', 'SAYS', 'THE', 'OLD', 'GENTLEMAN', 'AND', 'I', 'MUST', 'SAY', 'IT', 'MAKES', 'ME', 'DREADFUL', 'UNEASY'] +3005-163399-0021-1175: hyp=['I', "CAN'T", 'IMAGINE', 'SAYS', 'THE', 'OLD', 'GENTLEMAN', 'AND', 'I', 'MUST', 'SAY', 'IT', 'MAKES', 'ME', 'DREADFUL', 'UNEASY'] +3005-163399-0022-1176: ref=['UNEASY', 'SHE', 'SAYS', "I'M", 'READY', 'TO', 'GO', 'DISTRACTED'] +3005-163399-0022-1176: hyp=['UNEASY', 'SHE', 'SAYS', "I'M", 'READY', 'TO', 'GO', 'DISTRACTED'] +3005-163399-0023-1177: ref=['HE', 'MUST', 'A', 'COME', 'AND', "YOU'VE", 'MISSED', 'HIM', 'ALONG', 'THE', 'ROAD'] +3005-163399-0023-1177: hyp=['HE', 'MUST', 'HAVE', 'COME', 'AND', "YOU'VE", 'MISSED', 'HIM', 'ALONG', 'THE', 'ROAD'] +3005-163399-0024-1178: ref=['OH', "DON'T", 'DISTRESS', 'ME', 'ANY', "MORE'N", "I'M", 'ALREADY', 'DISTRESSED'] +3005-163399-0024-1178: hyp=['OH', "DON'T", 'DISTRESS', 'ME', 'ANY', 'MORE', "I'M", 'ALREADY', 'DISTRESSED'] +3005-163399-0025-1179: ref=['WHY', 'SILAS', 'LOOK', 'YONDER', 'UP', 'THE', 'ROAD', "AIN'T", 'THAT', 'SOMEBODY', 'COMING'] +3005-163399-0025-1179: hyp=['WHY', 'SILAS', 'LOOK', 'YONDER', 'UP', 'THE', 'ROAD', "AIN'T", 'THAT', 'SOMEBODY', 'COMIN'] +3005-163399-0026-1180: ref=['THE', 'OLD', 'GENTLEMAN', 'STARED', 'AND', 'SAYS'] +3005-163399-0026-1180: hyp=['THE', 'OLD', 'GENTLEMAN', 'STARED', 'AND', 'SAYS'] +3005-163399-0027-1181: ref=['I', "HAIN'T", 'NO', 'IDEA', 'WHO', 'IS', 'IT'] +3005-163399-0027-1181: hyp=['I', "HAIN'T", 'NO', 'IDEA', 'WHO', 'IS', 'IT'] +3005-163399-0028-1182: ref=["IT'S", 'TOM', 'SAWYER'] +3005-163399-0028-1182: hyp=['IS', 'TOM', 'SAWYER'] +3005-163399-0029-1183: ref=['BEING', 'TOM', 'SAWYER', 'WAS', 'EASY', 'AND', 'COMFORTABLE', 'AND', 'IT', 'STAYED', 'EASY', 'AND', 'COMFORTABLE', 'TILL', 'BY', 'AND', 'BY', 'I', 'HEAR', 'A', 'STEAMBOAT', 'COUGHING', 'ALONG', 'DOWN', 'THE', 'RIVER'] +3005-163399-0029-1183: hyp=['BEING', 'TOM', 'SAWYER', 'WAS', 'EASY', 'AND', 'COMFORTABLE', 'AND', 'ITS', 'STEESEY', 'AND', 'COMFORTABLE', 'TILL', 'BY', 'AND', 'BY', 'I', 'HEAR', 'A', 'STEAMBOAT', 'COFFIN', 'ALONG', 'DOWN', 'THE', 'RIVER'] +3005-163399-0030-1184: ref=['THEN', 'I', 'SAYS', 'TO', 'MYSELF', "S'POSE", 'TOM', 'SAWYER', 'COMES', 'DOWN', 'ON', 'THAT', 'BOAT'] +3005-163399-0030-1184: hyp=['THEN', 'I', 'SAYS', 'TO', 'MYSELF', "S'POSE", 'TOM', 'SAWYER', 'COMES', 'DOWN', 'ON', 'THAT', 'BOAT'] +3080-5032-0000-312: ref=['BUT', 'I', 'AM', 'HUGELY', 'PLEASED', 'THAT', 'YOU', 'HAVE', 'SEEN', 'MY', 'LADY'] +3080-5032-0000-312: hyp=['BUT', 'I', 'AM', 'HUGELY', 'PLEASED', 'THAT', 'YOU', 'HAVE', 'SEEN', 'MY', 'LADY'] +3080-5032-0001-313: ref=['I', 'KNEW', 'YOU', 'COULD', 'NOT', 'CHOOSE', 'BUT', 'LIKE', 'HER', 'BUT', 'YET', 'LET', 'ME', 'TELL', 'YOU', 'YOU', 'HAVE', 'SEEN', 'BUT', 'THE', 'WORST', 'OF', 'HER'] +3080-5032-0001-313: hyp=['I', 'KNEW', 'YOU', 'COULD', 'NOT', 'CHOOSE', 'BUT', 'LIKE', 'HER', 'BUT', 'YET', 'LET', 'ME', 'TELL', 'YOU', 'YOU', 'HAVE', 'SEEN', 'BUT', 'THE', 'WORST', 'OF', 'HER'] +3080-5032-0002-314: ref=['HER', 'CONVERSATION', 'HAS', 'MORE', 'CHARMS', 'THAN', 'CAN', 'BE', 'IN', 'MERE', 'BEAUTY', 'AND', 'HER', 'HUMOUR', 'AND', 'DISPOSITION', 'WOULD', 'MAKE', 'A', 'DEFORMED', 'PERSON', 'APPEAR', 'LOVELY'] +3080-5032-0002-314: hyp=['HER', 'CONVERSATION', 'HAS', 'MORE', 'CHARMS', 'AND', 'CAN', 'BE', 'IN', 'MERE', 'BEAUTY', 'AND', 'A', 'HUMOUR', 'AND', 'DISPOSITION', 'WOULD', 'MAKE', 'A', 'DEFORMED', 'PERSON', 'APPEAR', 'LOVELY'] +3080-5032-0003-315: ref=['WHY', 'DID', 'YOU', 'NOT', 'SEND', 'ME', 'THAT', 'NEWS', 'AND', 'A', 'GARLAND'] +3080-5032-0003-315: hyp=['WHY', 'DID', 'YOU', 'NOT', 'SEND', 'ME', 'THAT', 'NEWS', 'AND', 'A', 'GARLAND'] +3080-5032-0004-316: ref=['WELL', 'THE', 'BEST', "ON'T", 'IS', 'I', 'HAVE', 'A', 'SQUIRE', 'NOW', 'THAT', 'IS', 'AS', 'GOOD', 'AS', 'A', 'KNIGHT'] +3080-5032-0004-316: hyp=['WHY', 'THE', 'BEST', 'ON', 'IT', 'IS', 'THAT', 'I', 'HAVE', 'A', 'SQUIRE', 'NOW', 'THAT', 'IS', 'AS', 'GOOD', 'AS', 'A', 'KNIGHT'] +3080-5032-0005-317: ref=['IN', 'EARNEST', 'WE', 'HAVE', 'HAD', 'SUCH', 'A', 'SKIRMISH', 'AND', 'UPON', 'SO', 'FOOLISH', 'AN', 'OCCASION', 'AS', 'I', 'CANNOT', 'TELL', 'WHICH', 'IS', 'STRANGEST'] +3080-5032-0005-317: hyp=['IN', 'EARNEST', 'WE', 'HAVE', 'HAD', 'SUCH', 'A', 'SKIRMISH', 'AND', 'UPON', 'SO', 'FOOLISH', 'AN', 'OCCASION', 'AS', 'I', 'CANNOT', 'TELL', 'WHICH', 'YOUR', "STRANGER'S"] +3080-5032-0006-318: ref=['ALL', 'THE', 'PEOPLE', 'THAT', 'I', 'HAD', 'EVER', 'IN', 'MY', 'LIFE', 'REFUSED', 'WERE', 'BROUGHT', 'AGAIN', 'UPON', 'THE', 'STAGE', 'LIKE', 'RICHARD', 'THE', 'THREE', 'S', 'GHOSTS', 'TO', 'REPROACH', 'ME', 'WITHAL', 'AND', 'ALL', 'THE', 'KINDNESS', 'HIS', 'DISCOVERIES', 'COULD', 'MAKE', 'I', 'HAD', 'FOR', 'YOU', 'WAS', 'LAID', 'TO', 'MY', 'CHARGE'] +3080-5032-0006-318: hyp=['ALL', 'THE', 'PEOPLE', 'THAT', 'I', 'HAD', 'EVER', 'IN', 'MY', 'LIFE', 'REFUSED', 'WERE', 'BROUGHT', 'AGAIN', 'UPON', 'THE', 'STAGE', 'LIKE', 'RICHARD', 'THE', "THIRD'S", 'GHOSTS', 'TO', 'REPROACH', 'ME', 'WITH', 'ALRE', 'KINDNESS', 'HIS', 'DISCOVERIES', 'COULD', 'MAKE', 'I', 'HAD', 'FOR', 'YOU', 'WAS', 'LAID', 'TO', 'MY', 'CHARGE'] +3080-5032-0007-319: ref=['MY', 'BEST', 'QUALITIES', 'IF', 'I', 'HAVE', 'ANY', 'THAT', 'ARE', 'GOOD', 'SERVED', 'BUT', 'FOR', 'AGGRAVATIONS', 'OF', 'MY', 'FAULT', 'AND', 'I', 'WAS', 'ALLOWED', 'TO', 'HAVE', 'WIT', 'AND', 'UNDERSTANDING', 'AND', 'DISCRETION', 'IN', 'OTHER', 'THINGS', 'THAT', 'IT', 'MIGHT', 'APPEAR', 'I', 'HAD', 'NONE', 'IN', 'THIS'] +3080-5032-0007-319: hyp=['MY', 'BEST', 'QUALITIES', 'IF', 'I', 'HAVE', 'ANY', 'THAT', 'ARE', 'GOOD', 'SERVED', 'BUT', 'FOR', 'AGGRAVATIONS', 'OF', 'MY', 'FAULT', 'AND', 'I', 'WAS', 'ALLOWED', 'TO', 'HAVE', 'WIT', 'AND', 'UNDERSTANDING', 'AND', 'DISCRETION', 'IN', 'OTHER', 'THINGS', 'THAT', 'IT', 'MIGHT', 'APPEAR', 'I', 'HAD', 'NONE', 'IN', 'THIS'] +3080-5032-0008-320: ref=['TIS', 'A', 'STRANGE', 'CHANGE', 'AND', 'I', 'AM', 'VERY', 'SORRY', 'FOR', 'IT', 'BUT', "I'LL", 'SWEAR', 'I', 'KNOW', 'NOT', 'HOW', 'TO', 'HELP', 'IT'] +3080-5032-0008-320: hyp=['TIS', 'A', 'STRANGE', 'CHANGE', 'AND', 'I', 'AM', 'VERY', 'SORRY', 'FOR', 'IT', 'BUT', "I'LL", 'SWEAR', 'I', 'KNOW', 'NOT', 'HOW', 'TO', 'HELP', 'IT'] +3080-5032-0009-321: ref=['MISTER', 'FISH', 'IS', 'THE', 'SQUIRE', 'OF', 'DAMES', 'AND', 'HAS', 'SO', 'MANY', 'MISTRESSES', 'THAT', 'ANYBODY', 'MAY', 'PRETEND', 'A', 'SHARE', 'IN', 'HIM', 'AND', 'BE', 'BELIEVED', 'BUT', 'THOUGH', 'I', 'HAVE', 'THE', 'HONOUR', 'TO', 'BE', 'HIS', 'NEAR', 'NEIGHBOUR', 'TO', 'SPEAK', 'FREELY', 'I', 'CANNOT', 'BRAG', 'MUCH', 'THAT', 'HE', 'MAKES', 'ANY', 'COURT', 'TO', 'ME', 'AND', 'I', 'KNOW', 'NO', 'YOUNG', 'WOMAN', 'IN', 'THE', 'COUNTRY', 'THAT', 'HE', 'DOES', 'NOT', 'VISIT', 'OFTEN'] +3080-5032-0009-321: hyp=['MISTER', 'FISH', 'IS', 'A', 'SQUIRE', 'OF', 'DAMES', 'AND', 'HAS', 'SO', 'MANY', 'MISTRESSES', 'THAT', 'ANYBODY', 'MAY', 'PRETEND', 'TO', 'SHARE', 'IN', 'HIM', 'AND', 'BE', 'BELIEVED', 'THAT', 'THOUGH', 'I', 'HAVE', 'THE', 'HONOUR', 'TO', 'BE', 'HIS', 'NEAR', 'NEIGHBOUR', 'TO', 'SPEAK', 'FREELY', 'I', 'CANNOT', 'BRAG', 'MUCH', 'THAT', 'HE', 'MAKES', 'ANY', 'COURT', 'TO', 'ME', 'AND', 'I', 'KNOW', 'NO', 'YOUNG', 'WOMAN', 'IN', 'THE', 'COUNTRY', 'THAT', 'HE', 'DOES', 'NOT', 'VISIT', 'OFTEN'] +3080-5032-0010-322: ref=['I', 'THINK', 'MY', 'YOUNGEST', 'BROTHER', 'COMES', 'DOWN', 'WITH', 'HIM'] +3080-5032-0010-322: hyp=['I', 'THINK', 'MY', 'YOUNGEST', 'BROTHER', 'COMES', 'DOWN', 'WITH', 'HIM'] +3080-5032-0011-323: ref=['I', 'CAN', 'NO', 'SOONER', 'GIVE', 'YOU', 'SOME', 'LITTLE', 'HINTS', 'WHEREABOUTS', 'THEY', 'LIVE', 'BUT', 'YOU', 'KNOW', 'THEM', 'PRESENTLY', 'AND', 'I', 'MEANT', 'YOU', 'SHOULD', 'BE', 'BEHOLDING', 'TO', 'ME', 'FOR', 'YOUR', 'ACQUAINTANCE'] +3080-5032-0011-323: hyp=['I', 'CAN', 'NO', 'SOONER', 'GIVE', 'YOU', 'SOME', 'LITTLE', 'HINTS', 'WHEREABOUT', 'THEY', 'LIVE', 'BUT', 'YOU', 'KNOW', 'THEM', 'PRESENTLY', 'AND', 'I', 'MEANT', 'YOU', 'SHOULD', 'BE', 'BEHOLDING', 'TO', 'ME', 'FOR', 'YOUR', 'ACQUAINTANCE'] +3080-5032-0012-324: ref=['BUT', 'IT', 'SEEMS', 'THIS', 'GENTLEMAN', 'IS', 'NOT', 'SO', 'EASY', 'ACCESS', 'BUT', 'YOU', 'MAY', 'ACKNOWLEDGE', 'SOMETHING', 'DUE', 'TO', 'ME', 'IF', 'I', 'INCLINE', 'HIM', 'TO', 'LOOK', 'GRACIOUSLY', 'UPON', 'YOU', 'AND', 'THEREFORE', 'THERE', 'IS', 'NOT', 'MUCH', 'HARM', 'DONE'] +3080-5032-0012-324: hyp=['BUT', 'IT', 'SEEMS', 'THIS', 'GENTLEMAN', 'IS', 'NOT', 'SO', 'EASY', 'AXIS', 'BUT', 'YOU', 'MAY', 'ACKNOWLEDGE', 'SOMETHING', 'DUE', 'TO', 'ME', 'IF', 'I', 'INCLINE', 'HIM', 'TO', 'LOOK', 'GRACIOUSLY', 'UPON', 'YOU', 'AND', 'THEREFORE', 'THERE', 'IS', 'NOT', 'MUCH', 'HARM', 'DONE'] +3080-5032-0013-325: ref=['I', 'HAVE', 'MISSED', 'FOUR', 'FITS', 'AND', 'HAD', 'BUT', 'FIVE', 'AND', 'HAVE', 'RECOVERED', 'SO', 'MUCH', 'STRENGTH', 'AS', 'MADE', 'ME', 'VENTURE', 'TO', 'MEET', 'YOUR', 'LETTER', 'ON', 'WEDNESDAY', 'A', 'MILE', 'FROM', 'HOME'] +3080-5032-0013-325: hyp=['I', 'HAVE', 'MISSED', 'FOUR', 'FITS', 'AND', 'HAVE', 'HAD', 'BUT', 'FIVE', 'AND', 'HAVE', 'RECOVERED', 'SO', 'MUCH', 'STRENGTH', 'AS', 'MADE', 'ME', 'VENTURE', 'TO', 'MEET', 'YOUR', 'LETTER', 'ON', 'WEDNESDAY', 'A', 'MILE', 'FROM', 'HOME'] +3080-5032-0014-326: ref=['BUT', 'BESIDES', 'I', 'CAN', 'GIVE', 'YOU', 'OTHERS'] +3080-5032-0014-326: hyp=['BUT', 'BESIDES', 'I', 'CAN', 'GIVE', 'YOU', 'OTHERS'] +3080-5032-0015-327: ref=['I', 'AM', 'HERE', 'MUCH', 'MORE', 'OUT', 'OF', "PEOPLE'S", 'WAY', 'THAN', 'IN', 'TOWN', 'WHERE', 'MY', 'AUNT', 'AND', 'SUCH', 'AS', 'PRETEND', 'AN', 'INTEREST', 'IN', 'ME', 'AND', 'A', 'POWER', 'OVER', 'ME', 'DO', 'SO', 'PERSECUTE', 'ME', 'WITH', 'THEIR', 'GOOD', 'NATURE', 'AND', 'TAKE', 'IT', 'SO', 'ILL', 'THAT', 'THEY', 'ARE', 'NOT', 'ACCEPTED', 'AS', 'I', 'WOULD', 'LIVE', 'IN', 'A', 'HOLLOW', 'TREE', 'TO', 'AVOID', 'THEM'] +3080-5032-0015-327: hyp=['I', 'AM', 'HERE', 'MUCH', 'MORE', 'OUT', 'OF', "PEOPLE'S", 'WAY', 'THAN', 'IN', 'TOWN', 'WHERE', 'MY', 'AUNTS', 'AND', 'SUCH', 'HAS', 'PRETEND', 'AND', 'INTEREST', 'IN', 'ME', 'AND', 'A', 'POWER', 'OVER', 'ME', 'DO', 'SO', 'PERSECUTE', 'ME', 'WITH', 'DEAR', 'GOOD', 'NATURE', 'AND', 'TAKE', 'IT', 'SO', 'ILL', 'THAT', 'THEY', 'ARE', 'NOT', 'ACCEPTED', 'AS', 'I', 'WOULD', 'LIVE', 'IN', 'A', 'HOLLOW', 'TREE', 'TO', 'AVOID', 'THEM'] +3080-5032-0016-328: ref=['YOU', 'WILL', 'THINK', 'HIM', 'ALTERED', 'AND', 'IF', 'IT', 'BE', 'POSSIBLE', 'MORE', 'MELANCHOLY', 'THAN', 'HE', 'WAS'] +3080-5032-0016-328: hyp=['YOU', 'WILL', 'THINK', 'HIM', 'ALTERED', 'AND', 'IF', 'IT', 'BE', 'POSSIBLE', 'MORE', 'MELANCHOLY', 'THAN', 'HE', 'WAS'] +3080-5032-0017-329: ref=['IF', 'MARRIAGE', 'AGREES', 'NO', 'BETTER', 'WITH', 'OTHER', 'PEOPLE', 'THAN', 'IT', 'DOES', 'WITH', 'HIM', 'I', 'SHALL', 'PRAY', 'THAT', 'ALL', 'MY', 'FRIENDS', 'MAY', 'SCAPE', 'IT'] +3080-5032-0017-329: hyp=['IF', 'MARRIAGE', 'AGREES', 'NO', 'BETTER', 'WHETHER', 'PEOPLE', 'THAN', 'IT', 'DOES', 'WITH', 'HIM', 'I', 'SHALL', 'PRAY', 'THAT', 'ALL', 'MY', 'FRIENDS', 'MAY', 'ESCAPE', 'IT'] +3080-5032-0018-330: ref=['WELL', 'IN', 'EARNEST', 'IF', 'I', 'WERE', 'A', 'PRINCE', 'THAT', 'LADY', 'SHOULD', 'BE', 'MY', 'MISTRESS', 'BUT', 'I', 'CAN', 'GIVE', 'NO', 'RULE', 'TO', 'ANY', 'ONE', 'ELSE', 'AND', 'PERHAPS', 'THOSE', 'THAT', 'ARE', 'IN', 'NO', 'DANGER', 'OF', 'LOSING', 'THEIR', 'HEARTS', 'TO', 'HER', 'MAY', 'BE', 'INFINITELY', 'TAKEN', 'WITH', 'ONE', 'I', 'SHOULD', 'NOT', 'VALUE', 'AT', 'ALL', 'FOR', 'SO', 'SAYS', 'THE', 'JUSTINIAN', 'WISE', 'PROVIDENCE', 'HAS', 'ORDAINED', 'IT', 'THAT', 'BY', 'THEIR', 'DIFFERENT', 'HUMOURS', 'EVERYBODY', 'MIGHT', 'FIND', 'SOMETHING', 'TO', 'PLEASE', 'THEMSELVES', 'WITHAL', 'WITHOUT', 'ENVYING', 'THEIR', 'NEIGHBOURS'] +3080-5032-0018-330: hyp=['WELL', 'IN', 'HONEST', 'IF', 'I', 'WERE', 'A', 'PRINCE', 'THAT', 'LADY', 'SHOULD', 'BE', 'MY', 'MISTRESS', 'BUT', 'I', 'CAN', 'GIVE', 'NO', 'RULE', 'TO', 'ANY', 'ONE', 'ELSE', 'AND', 'PERHAPS', 'THOSE', 'THAT', 'ARE', 'IN', 'NO', 'DANGER', 'OF', 'LOSING', 'THEIR', 'HEARTS', 'TO', 'HER', 'MAY', 'BE', 'INFINITELY', 'TAKEN', 'WITH', 'ONE', 'I', 'SHOULD', 'NOT', 'VALUE', 'AT', 'ALL', 'FOR', 'SO', 'SAYS', 'THE', 'JUSTINIAN', 'WISE', 'PROVIDENCE', 'HAS', 'ORDAINED', 'IT', 'THAT', 'BY', 'THEIR', 'DIFFERENT', 'HUMOURS', 'EVERYBODY', 'MIGHT', 'FIND', 'SOMETHING', 'TO', 'PLEASE', 'THEMSELVES', 'WITHAL', 'WITHOUT', 'ENVYING', 'THEIR', 'NEIGHBORS'] +3080-5032-0019-331: ref=['THE', 'MATTER', 'IS', 'NOT', 'GREAT', 'FOR', 'I', 'CONFESS', 'I', 'DO', 'NATURALLY', 'HATE', 'THE', 'NOISE', 'AND', 'TALK', 'OF', 'THE', 'WORLD', 'AND', 'SHOULD', 'BE', 'BEST', 'PLEASED', 'NEVER', 'TO', 'BE', 'KNOWN', "IN'T", 'UPON', 'ANY', 'OCCASION', 'WHATSOEVER', 'YET', 'SINCE', 'IT', 'CAN', 'NEVER', 'BE', 'WHOLLY', 'AVOIDED', 'ONE', 'MUST', 'SATISFY', 'ONESELF', 'BY', 'DOING', 'NOTHING', 'THAT', 'ONE', 'NEED', 'CARE', 'WHO', 'KNOWS'] +3080-5032-0019-331: hyp=['THE', 'MATTER', 'IS', 'NOT', 'GREAT', 'FOR', 'I', 'CONFESS', 'I', 'DO', 'NATURALLY', 'HATE', 'THE', 'NOISE', 'AND', 'TALK', 'OF', 'THE', 'WORLD', 'AND', 'SHOULD', 'BE', 'BEST', 'PLEASED', 'NEVER', 'TO', 'BE', 'KNOWN', 'IN', 'UPON', 'ANY', 'OCCASION', 'WHATSOEVER', 'YET', 'SINCE', 'IT', 'CAN', 'NEVER', 'BE', 'WHOLLY', 'AVOIDED', 'ONE', 'MUST', 'SATISFY', 'ONESELF', 'BY', 'DOING', 'NOTHING', 'THAT', 'ONE', 'NEED', 'CARE', 'ONE', 'ELSE'] +3080-5032-0020-332: ref=['IF', 'I', 'HAD', 'A', 'PICTURE', 'THAT', 'WERE', 'FIT', 'FOR', 'YOU', 'YOU', 'SHOULD', 'HAVE', 'IT'] +3080-5032-0020-332: hyp=['YOU', 'BY', 'HEART', 'A', 'PICTURE', 'THAT', 'WERE', 'FIT', 'FOR', 'YOU', 'YOU', 'SHOULD', 'HAVE', 'IT'] +3080-5032-0021-333: ref=['HOW', 'CAN', 'YOU', 'TALK', 'OF', 'DEFYING', 'FORTUNE', 'NOBODY', 'LIVES', 'WITHOUT', 'IT', 'AND', 'THEREFORE', 'WHY', 'SHOULD', 'YOU', 'IMAGINE', 'YOU', 'COULD'] +3080-5032-0021-333: hyp=['HOW', 'CAN', 'YOU', 'TALK', 'OF', 'DEFYING', 'FORTUNE', 'NOBODY', 'LIVES', 'WITHOUT', 'IT', 'AND', 'THEREFORE', 'WHY', 'SHOULD', 'YOU', 'IMAGINE', 'YOU', 'COULD'] +3080-5032-0022-334: ref=['I', 'KNOW', 'NOT', 'HOW', 'MY', 'BROTHER', 'COMES', 'TO', 'BE', 'SO', 'WELL', 'INFORMED', 'AS', 'YOU', 'SAY', 'BUT', 'I', 'AM', 'CERTAIN', 'HE', 'KNOWS', 'THE', 'UTMOST', 'OF', 'THE', 'INJURIES', 'YOU', 'HAVE', 'RECEIVED', 'FROM', 'HER'] +3080-5032-0022-334: hyp=['I', 'KNOW', 'NOT', 'HOW', 'MY', 'BROTHER', 'COMES', 'TO', 'BE', 'SO', 'WELL', 'INFORMED', 'AS', 'YOU', 'SAY', 'BUT', 'I', 'AM', 'CERTAIN', 'HE', 'KNOWS', 'UTMOST', 'OF', 'THE', 'INJURIES', 'YOU', 'HAVE', 'RECEIVED', 'FROM', 'HER'] +3080-5032-0023-335: ref=['WE', 'HAVE', 'HAD', 'ANOTHER', 'DEBATE', 'BUT', 'MUCH', 'MORE', 'CALMLY'] +3080-5032-0023-335: hyp=['WE', 'HAVE', 'HAD', 'ANOTHER', 'DEBATE', 'BUT', 'MUCH', 'MORE', 'CALMLY'] +3080-5032-0024-336: ref=['AND', 'BESIDES', 'THERE', 'WAS', 'A', 'TIME', 'WHEN', 'WE', 'OURSELVES', 'WERE', 'INDIFFERENT', 'TO', 'ONE', 'ANOTHER', 'DID', 'I', 'DO', 'SO', 'THEN', 'OR', 'HAVE', 'I', 'LEARNED', 'IT', 'SINCE'] +3080-5032-0024-336: hyp=['THEN', 'BESIDES', 'THERE', 'WAS', 'A', 'TIME', 'WHEN', 'WE', 'OURSELVES', 'WERE', 'INDIFFERENT', 'TO', 'ONE', 'ANOTHER', 'DID', 'I', 'DO', 'SO', 'THEN', 'OR', 'HAVE', 'I', 'LEARNED', 'IT', 'SINCE'] +3080-5032-0025-337: ref=['I', 'HAVE', 'BEEN', 'STUDYING', 'HOW', 'TOM', 'CHEEKE', 'MIGHT', 'COME', 'BY', 'HIS', 'INTELLIGENCE', 'AND', 'I', 'VERILY', 'BELIEVE', 'HE', 'HAS', 'IT', 'FROM', 'MY', 'COUSIN', 'PETERS'] +3080-5032-0025-337: hyp=['I', 'HAVE', 'BEEN', 'STUDYING', 'HOW', 'TOM', 'CHEEK', 'MIGHT', 'COME', 'BY', 'HIS', 'INTELLIGENCE', 'AND', 'I', 'VERY', 'BELIEVE', 'HE', 'HAS', 'IT', 'FROM', 'MY', 'COUSIN', 'PETERS'] +3080-5032-0026-338: ref=['HOW', 'KINDLY', 'DO', 'I', 'TAKE', 'THESE', 'CIVILITIES', 'OF', 'YOUR', "FATHER'S", 'IN', 'EARNEST', 'YOU', 'CANNOT', 'IMAGINE', 'HOW', 'HIS', 'LETTER', 'PLEASED', 'ME'] +3080-5032-0026-338: hyp=['HOW', 'KINDLY', 'DO', 'I', 'TAKE', 'THE', 'CIVILITIES', 'OF', 'YOUR', 'FATHERS', 'IN', 'EARNEST', 'YOU', 'CANNOT', 'IMAGINE', 'HOW', 'HIS', 'LETTER', 'PLEASED', 'ME'] +3080-5040-0000-278: ref=['WOULD', 'IT', 'WOULD', 'LEAVE', 'ME', 'AND', 'THEN', 'I', 'COULD', 'BELIEVE', 'I', 'SHALL', 'NOT', 'ALWAYS', 'HAVE', 'OCCASION', 'FOR', 'IT'] +3080-5040-0000-278: hyp=['WOULD', 'IT', 'WOULD', 'LEAVE', 'ME', 'AND', 'THEN', 'I', 'COULD', 'BELIEVE', 'I', 'SHALL', 'NOT', 'ALWAYS', 'HAVE', 'OCCASION', 'FOR', 'IT'] +3080-5040-0001-279: ref=['MY', 'POOR', 'LADY', 'VAVASOUR', 'IS', 'CARRIED', 'TO', 'THE', 'TOWER', 'AND', 'HER', 'GREAT', 'BELLY', 'COULD', 'NOT', 'EXCUSE', 'HER', 'BECAUSE', 'SHE', 'WAS', 'ACQUAINTED', 'BY', 'SOMEBODY', 'THAT', 'THERE', 'WAS', 'A', 'PLOT', 'AGAINST', 'THE', 'PROTECTOR', 'AND', 'DID', 'NOT', 'DISCOVER', 'IT'] +3080-5040-0001-279: hyp=['MY', 'POOR', 'LADY', 'VAVASOR', 'IS', 'CHARACTER', 'TOWER', 'AND', 'HER', 'GREAT', 'BELLY', 'COULD', 'NOT', 'EXCUSE', 'HER', 'BECAUSE', 'SHE', 'WAS', 'ACQUAINTED', 'BY', 'SOMEBODY', 'THAT', 'THERE', 'WAS', 'A', 'PLOT', 'AGAINST', 'THE', 'PROTECTOR', 'AND', 'DID', 'NOT', 'DISCOVER', 'IT'] +3080-5040-0002-280: ref=['SHE', 'HAS', 'TOLD', 'NOW', 'ALL', 'THAT', 'WAS', 'TOLD', 'HER', 'BUT', 'VOWS', 'SHE', 'WILL', 'NEVER', 'SAY', 'FROM', 'WHENCE', 'SHE', 'HAD', 'IT', 'WE', 'SHALL', 'SEE', 'WHETHER', 'HER', 'RESOLUTIONS', 'ARE', 'AS', 'UNALTERABLE', 'AS', 'THOSE', 'OF', 'MY', 'LADY', 'TALMASH'] +3080-5040-0002-280: hyp=['SHE', 'HAS', 'TOLD', 'NOW', 'ALL', 'THAT', 'WAS', 'TOLD', 'HER', 'BUT', 'VOWS', 'SHE', 'WILL', 'NEVER', 'SAY', 'FROM', 'WHENCE', 'SHE', 'HAD', 'IT', 'WE', 'SHALL', 'SEE', 'WHETHER', 'HER', 'RESOLUTIONS', 'ARE', 'AS', 'UNALTERABLE', 'AS', 'THOSE', 'OF', 'MY', 'LADY', 'THOMAS'] +3080-5040-0003-281: ref=['I', 'WONDER', 'HOW', 'SHE', 'BEHAVED', 'HERSELF', 'WHEN', 'SHE', 'WAS', 'MARRIED'] +3080-5040-0003-281: hyp=['I', 'WONDER', 'HOW', 'SHE', 'BEHAVED', 'HERSELF', 'WHEN', 'SHE', 'WAS', 'MARRIED'] +3080-5040-0004-282: ref=['I', 'NEVER', 'SAW', 'ANY', 'ONE', 'YET', 'THAT', 'DID', 'NOT', 'LOOK', 'SIMPLY', 'AND', 'OUT', 'OF', 'COUNTENANCE', 'NOR', 'EVER', 'KNEW', 'A', 'WEDDING', 'WELL', 'DESIGNED', 'BUT', 'ONE', 'AND', 'THAT', 'WAS', 'OF', 'TWO', 'PERSONS', 'WHO', 'HAD', 'TIME', 'ENOUGH', 'I', 'CONFESS', 'TO', 'CONTRIVE', 'IT', 'AND', 'NOBODY', 'TO', 'PLEASE', "IN'T", 'BUT', 'THEMSELVES'] +3080-5040-0004-282: hyp=['I', 'NEVER', 'SAW', 'ANY', 'ONE', 'YET', 'THAT', 'DID', 'NOT', 'LOOK', 'SIMPLY', 'AND', 'OUT', 'OF', 'COUNTENANCE', 'WHATEVER', 'KNEW', 'A', 'WEDDING', 'WELL', 'DESIGNED', 'BUT', 'ONE', 'AND', 'THAT', 'WAS', 'OF', 'TWO', 'PERSONS', 'WHO', 'HAD', 'TIME', 'ENOUGH', 'I', 'CONFESS', 'TO', 'CONTRIVE', 'IT', 'AND', 'NOBODY', 'TO', 'PLEASE', 'IN', 'BUT', 'THEMSELVES'] +3080-5040-0005-283: ref=['THE', 'TRUTH', 'IS', 'I', 'COULD', 'NOT', 'ENDURE', 'TO', 'BE', 'MISSUS', 'BRIDE', 'IN', 'A', 'PUBLIC', 'WEDDING', 'TO', 'BE', 'MADE', 'THE', 'HAPPIEST', 'PERSON', 'ON', 'EARTH'] +3080-5040-0005-283: hyp=['THE', 'TRUTH', 'IS', 'I', 'COULD', 'NOT', 'ENDURE', 'TO', 'BE', 'MISSUS', 'BRIDE', 'IN', 'A', 'PUBLIC', 'WEDDING', 'TO', 'BE', 'MADE', 'THE', 'HAPPIEST', 'PERSON', 'ON', 'EARTH'] +3080-5040-0006-284: ref=['DO', 'NOT', 'TAKE', 'IT', 'ILL', 'FOR', 'I', 'WOULD', 'ENDURE', 'IT', 'IF', 'I', 'COULD', 'RATHER', 'THAN', 'FAIL', 'BUT', 'IN', 'EARNEST', 'I', 'DO', 'NOT', 'THINK', 'IT', 'WERE', 'POSSIBLE', 'FOR', 'ME'] +3080-5040-0006-284: hyp=['DO', 'NOT', 'TAKE', 'IT', 'ILL', 'FOR', 'I', 'WOULD', 'ENDURE', 'IT', 'IF', 'I', 'COULD', 'RATHER', 'THAN', 'FAIL', 'BUT', 'IN', 'EARNEST', 'I', 'DO', 'NOT', 'THINK', 'IT', 'WERE', 'POSSIBLE', 'FOR', 'ME'] +3080-5040-0007-285: ref=['YET', 'IN', 'EARNEST', 'YOUR', 'FATHER', 'WILL', 'NOT', 'FIND', 'MY', 'BROTHER', 'PEYTON', 'WANTING', 'IN', 'CIVILITY', 'THOUGH', 'HE', 'IS', 'NOT', 'A', 'MAN', 'OF', 'MUCH', 'COMPLIMENT', 'UNLESS', 'IT', 'BE', 'IN', 'HIS', 'LETTERS', 'TO', 'ME', 'NOR', 'AN', 'UNREASONABLE', 'PERSON', 'IN', 'ANYTHING', 'SO', 'HE', 'WILL', 'ALLOW', 'HIM', 'OUT', 'OF', 'HIS', 'KINDNESS', 'TO', 'HIS', 'WIFE', 'TO', 'SET', 'A', 'HIGHER', 'VALUE', 'UPON', 'HER', 'SISTER', 'THAN', 'SHE', 'DESERVES'] +3080-5040-0007-285: hyp=['YET', 'IN', 'EARNEST', 'YOUR', 'FATHER', 'WILL', 'NOT', 'FIND', 'MY', 'BROTHER', 'PEYTON', 'WANTING', 'IN', 'CIVILITY', 'THOUGH', 'HE', 'IS', 'NOT', 'A', 'MAN', 'OF', 'MUCH', 'COMPLIMENT', 'UNLESS', 'IT', 'BE', 'IN', 'HIS', 'LETTERS', 'TO', 'ME', 'NO', 'AN', 'UNREASONABLE', 'PERSON', 'IN', 'ANYTHING', 'SO', 'HE', 'WILL', 'ALLOW', 'HIM', 'OUT', 'OF', 'HIS', 'KINDNESS', 'TO', 'HIS', 'WIFE', 'TO', 'SET', 'A', 'HIGHER', 'VALUE', 'UPON', 'HER', 'SISTER', 'THAN', 'SHE', 'DESERVES'] +3080-5040-0008-286: ref=['MY', 'AUNT', 'TOLD', 'ME', 'NO', 'LONGER', 'AGONE', 'THAN', 'YESTERDAY', 'THAT', 'I', 'WAS', 'THE', 'MOST', 'WILFUL', 'WOMAN', 'THAT', 'EVER', 'SHE', 'KNEW', 'AND', 'HAD', 'AN', 'OBSTINACY', 'OF', 'SPIRIT', 'NOTHING', 'COULD', 'OVERCOME', 'TAKE', 'HEED'] +3080-5040-0008-286: hyp=['MY', 'AUNT', 'TOLD', 'ME', 'NO', 'LONGER', 'A', 'GOD', 'IN', 'YESTERDAY', 'THAT', 'I', 'WAS', 'THE', 'MOST', 'WILFUL', 'WOMAN', 'THAT', 'EVER', 'SHE', 'KNEW', 'AND', 'HAD', 'AN', 'OBSTINACY', 'OF', 'SPIRIT', 'NOTHING', 'COULD', 'OVERCOME', 'TAKE', 'HEED'] +3080-5040-0009-287: ref=['YOU', 'SEE', 'I', 'GIVE', 'YOU', 'FAIR', 'WARNING'] +3080-5040-0009-287: hyp=['YOU', 'SEE', 'I', 'GIVE', 'YOU', 'FAIR', 'WARNING'] +3080-5040-0010-288: ref=['BY', 'THE', 'NEXT', 'I', 'SHALL', 'BE', 'GONE', 'INTO', 'KENT', 'AND', 'MY', 'OTHER', 'JOURNEY', 'IS', 'LAID', 'ASIDE', 'WHICH', 'I', 'AM', 'NOT', 'DISPLEASED', 'AT', 'BECAUSE', 'IT', 'WOULD', 'HAVE', 'BROKEN', 'OUR', 'INTERCOURSE', 'VERY', 'MUCH'] +3080-5040-0010-288: hyp=['BY', 'THE', 'NEXT', 'I', 'SHALL', 'BE', 'GONE', 'INTO', 'KENT', 'AND', 'MY', 'OTHER', 'JOURNEY', 'IS', 'LAID', 'ASIDE', 'WHICH', 'I', 'AM', 'NOT', 'DISPLEASED', 'AT', 'BECAUSE', 'IT', 'WOULD', 'HAVE', 'BROKEN', 'OUR', 'INTERCOURSE', 'VERY', 'MUCH'] +3080-5040-0011-289: ref=['HERE', 'ARE', 'SOME', 'VERSES', 'OF', "COWLEY'S", 'TELL', 'ME', 'HOW', 'YOU', 'LIKE', 'THEM'] +3080-5040-0011-289: hyp=['HERE', 'ARE', 'SOME', 'VERSES', 'OF', 'COLLEASE', 'TELL', 'ME', 'HOW', 'YOU', 'LIKE', 'THEM'] +3080-5040-0012-290: ref=['I', 'TOLD', 'YOU', 'IN', 'MY', 'LAST', 'THAT', 'MY', 'SUFFOLK', 'JOURNEY', 'WAS', 'LAID', 'ASIDE', 'AND', 'THAT', 'INTO', 'KENT', 'HASTENED'] +3080-5040-0012-290: hyp=['I', 'TOLD', 'YOU', 'IN', 'MY', 'LAST', 'THAT', 'MY', 'SUFFOLD', 'JOURNEY', 'WAS', 'LAID', 'ASIDE', 'AND', 'THAT', 'INTO', 'KENT', 'HASTENED'] +3080-5040-0013-291: ref=['IF', 'I', 'DROWN', 'BY', 'THE', 'WAY', 'THIS', 'WILL', 'BE', 'MY', 'LAST', 'LETTER', 'AND', 'LIKE', 'A', 'WILL', 'I', 'BEQUEATH', 'ALL', 'MY', 'KINDNESS', 'TO', 'YOU', 'IN', 'IT', 'WITH', 'A', 'CHARGE', 'NEVER', 'TO', 'BESTOW', 'IT', 'ALL', 'UPON', 'ANOTHER', 'MISTRESS', 'LEST', 'MY', 'GHOST', 'RISE', 'AGAIN', 'AND', 'HAUNT', 'YOU'] +3080-5040-0013-291: hyp=['IF', 'I', 'DROWN', 'BY', 'THE', 'WAY', 'THIS', 'WILL', 'BE', 'MY', 'LAST', 'LETTER', 'AND', 'LIKE', 'A', 'WILL', 'I', 'BEQUEATH', 'ALL', 'MY', 'KINDNESS', 'TO', 'YOU', 'IN', 'IT', 'WITH', 'A', 'CHARGE', 'NEVER', 'TO', 'BESTOW', 'IT', 'ALL', 'UPON', 'ANOTHER', 'MISTRESS', 'LEST', 'MY', 'GHOST', 'RISE', 'AGAIN', 'AND', 'HAUNT', 'YOU'] +3080-5040-0014-292: ref=['INDEED', 'I', 'LIKE', 'HIM', 'EXTREMELY', 'AND', 'HE', 'IS', 'COMMENDED', 'TO', 'ME', 'BY', 'PEOPLE', 'THAT', 'KNOW', 'HIM', 'VERY', 'WELL', 'AND', 'ARE', 'ABLE', 'TO', 'JUDGE', 'FOR', 'A', 'MOST', 'EXCELLENT', 'SERVANT', 'AND', 'FAITHFUL', 'AS', 'POSSIBLE'] +3080-5040-0014-292: hyp=['INDEED', 'I', 'LIKE', 'HIM', 'EXTREMELY', 'AND', 'HE', 'IS', 'COMMENDED', 'TO', 'ME', 'BY', 'PEOPLE', 'THAT', 'KNOW', 'HIM', 'VERY', 'WELL', 'AND', 'ARE', 'ABLE', 'TO', 'JUDGE', 'FOR', 'A', 'MOST', 'EXCELLENT', 'SERVANT', 'AND', 'FAITHFUL', 'AS', 'POSSIBLE'] +3080-5040-0015-293: ref=['BECAUSE', 'YOU', 'FIND', 'FAULT', 'WITH', 'MY', 'OTHER', 'LETTERS', 'THIS', 'IS', 'LIKE', 'TO', 'BE', 'SHORTER', 'THAN', 'THEY', 'I', 'DID', 'NOT', 'INTEND', 'IT', 'SO', 'THOUGH', 'I', 'CAN', 'ASSURE', 'YOU'] +3080-5040-0015-293: hyp=['BECAUSE', 'YOU', 'FIND', 'FAULT', 'WITH', 'MY', 'OTHER', 'LETTERS', 'THIS', 'IS', 'LIKE', 'TO', 'BE', 'SHORTER', 'THAN', 'THEY', 'I', 'DID', 'NOT', 'INTEND', 'IT', 'SO', 'THOUGH', 'I', 'CAN', 'ASSURE', 'YOU'] +3080-5040-0016-294: ref=['I', 'DO', 'NOT', 'FIND', 'IT', 'THOUGH', 'I', 'AM', 'TOLD', 'I', 'WAS', 'SO', 'EXTREMELY', 'WHEN', 'I', 'BELIEVED', 'YOU', 'LOVED', 'ME'] +3080-5040-0016-294: hyp=['I', 'DO', 'NOT', 'FIND', 'IT', 'THOUGH', 'I', 'AM', 'TOLD', 'I', 'WAS', 'SO', 'EXTREMELY', 'WHEN', 'I', 'BELIEVED', 'YOU', 'LOVE', 'ME'] +3080-5040-0017-295: ref=['BUT', 'I', 'AM', 'CALLED', 'UPON'] +3080-5040-0017-295: hyp=['BUT', 'I', 'AM', 'CALLED', 'UPON'] +3080-5040-0018-296: ref=['DIRECTED', 'FOR', 'YOUR', 'MASTER'] +3080-5040-0018-296: hyp=['DIRECTED', 'FOR', 'YOUR', 'MASTER'] +3080-5040-0019-297: ref=['I', 'SEE', 'YOU', 'CAN', 'CHIDE', 'WHEN', 'YOU', 'PLEASE', 'AND', 'WITH', 'AUTHORITY', 'BUT', 'I', 'DESERVE', 'IT', 'I', 'CONFESS', 'AND', 'ALL', 'I', 'CAN', 'SAY', 'FOR', 'MYSELF', 'IS', 'THAT', 'MY', 'FAULT', 'PROCEEDED', 'FROM', 'A', 'VERY', 'GOOD', 'PRINCIPLE', 'IN', 'ME'] +3080-5040-0019-297: hyp=['I', 'SEE', 'YOU', 'CAN', 'CHID', 'WHEN', 'YOU', 'PLEASE', 'AND', 'WITH', 'AUTHORITY', 'BUT', 'I', 'DESERVE', 'IT', 'I', 'CONFESS', 'AND', 'ALL', 'I', 'CAN', 'SAY', 'FOR', 'MYSELF', 'IS', 'THAT', 'MY', 'FAULT', 'PROCEEDED', 'FROM', 'A', 'VERY', 'GOOD', 'PRINCIPLE', 'IN', 'ME'] +3080-5040-0020-298: ref=['WE', 'DARE', 'NOT', 'LET', 'OUR', 'TONGUES', 'LIE', 'MORE', 'ON', 'ONE', 'SIDE', 'OF', 'OUR', 'MOUTHS', 'THAN', "T'OTHER", 'FOR', 'FEAR', 'OF', 'OVERTURNING', 'IT'] +3080-5040-0020-298: hyp=['WE', 'DARE', 'NOT', 'LET', 'OUR', 'TONGUES', 'LIE', 'MORE', 'ON', 'ONE', 'SIDE', 'OF', 'OUR', 'MOTHS', 'THAN', 'THE', 'OTHER', 'FOR', 'FEAR', 'OF', 'OVERTURNING', 'IT'] +3080-5040-0021-299: ref=['YOU', 'ARE', 'SATISFIED', 'I', 'HOPE', 'ERE', 'THIS', 'THAT', 'I', 'SCAPED', 'DROWNING'] +3080-5040-0021-299: hyp=['YOU', 'ARE', 'SATISFIED', 'I', 'HOPE', 'AT', 'THIS', 'THAT', 'I', 'ESCAPED', 'DROWNING'] +3080-5040-0022-300: ref=['BUT', 'I', 'AM', 'TROUBLED', 'MUCH', 'YOU', 'SHOULD', 'MAKE', 'SO', 'ILL', 'A', 'JOURNEY', 'TO', 'SO', 'LITTLE', 'PURPOSE', 'INDEED', 'I', 'WRIT', 'BY', 'THE', 'FIRST', 'POST', 'AFTER', 'MY', 'ARRIVAL', 'HERE', 'AND', 'CANNOT', 'IMAGINE', 'HOW', 'YOU', 'CAME', 'TO', 'MISS', 'OF', 'MY', 'LETTERS'] +3080-5040-0022-300: hyp=['BUT', 'I', 'AM', 'TROUBLED', 'MUCH', 'YOU', 'SHOULD', 'MAKE', 'SO', 'ILL', 'A', 'JOURNEY', 'TO', 'SO', 'LITTLE', 'PURPOSE', 'INDEED', 'I', 'WRITE', 'BY', 'THE', 'FIRST', 'POST', 'AFTER', 'MY', 'ARRIVAL', 'HERE', 'AND', 'CANNOT', 'IMAGINE', 'HOW', 'YOU', 'CAME', 'TO', 'MISS', 'OF', 'MY', 'LETTERS'] +3080-5040-0023-301: ref=['HOW', 'WELCOME', 'YOU', 'WILL', 'BE', 'BUT', 'ALAS'] +3080-5040-0023-301: hyp=['OH', 'WELCOME', 'YOU', 'WILL', 'BE', 'BUT', 'ALAS'] +3080-5040-0024-302: ref=['FOR', 'MY', 'LIFE', 'I', 'CANNOT', 'BEAT', 'INTO', 'THEIR', 'HEADS', 'A', 'PASSION', 'THAT', 'MUST', 'BE', 'SUBJECT', 'TO', 'NO', 'DECAY', 'AN', 'EVEN', 'PERFECT', 'KINDNESS', 'THAT', 'MUST', 'LAST', 'PERPETUALLY', 'WITHOUT', 'THE', 'LEAST', 'INTERMISSION'] +3080-5040-0024-302: hyp=['FOR', 'MY', 'LIFE', 'I', 'CANNOT', 'BEAT', 'INTO', 'THEIR', 'HEADS', 'A', 'PASSION', 'THAT', 'MUST', 'BE', 'SUBJECT', 'TO', 'NO', 'DECAY', 'AND', 'EVEN', 'PERFECT', 'KINDNESS', 'THAT', 'MUST', 'LAST', 'PERPETUALLY', 'WITHOUT', 'THE', 'LEAST', 'INTERMISSION'] +3080-5040-0025-303: ref=['THEY', 'LAUGH', 'TO', 'HEAR', 'ME', 'SAY', 'THAT', 'ONE', 'UNKIND', 'WORD', 'WOULD', 'DESTROY', 'ALL', 'THE', 'SATISFACTION', 'OF', 'MY', 'LIFE', 'AND', 'THAT', 'I', 'SHOULD', 'EXPECT', 'OUR', 'KINDNESS', 'SHOULD', 'INCREASE', 'EVERY', 'DAY', 'IF', 'IT', 'WERE', 'POSSIBLE', 'BUT', 'NEVER', 'LESSEN'] +3080-5040-0025-303: hyp=['THEY', 'LAUGH', 'TO', 'HEAR', 'ME', 'SAY', 'THAT', 'ONE', 'UNKIND', 'WORD', 'WOULD', 'DESTROY', 'ALL', 'THE', 'SATISFACTION', 'OF', 'MY', 'LIFE', 'AND', 'THAT', 'I', 'SHOULD', 'EXPECT', 'OUR', 'KINDNESS', 'SHOULD', 'INCREASE', 'EVERY', 'DAY', 'IF', 'IT', 'WERE', 'POSSIBLE', 'BUT', 'NEVER', 'LESSEN'] +3080-5040-0026-304: ref=['WE', 'GO', 'ABROAD', 'ALL', 'DAY', 'AND', 'PLAY', 'ALL', 'NIGHT', 'AND', 'SAY', 'OUR', 'PRAYERS', 'WHEN', 'WE', 'HAVE', 'TIME'] +3080-5040-0026-304: hyp=['WE', 'GO', 'BROAD', 'ALL', 'DAY', 'AND', 'PLAY', 'ALL', 'NIGHT', 'AND', 'SEE', 'OUR', 'PRAY', 'AS', 'WHEN', 'WE', 'HAVE', 'TIME'] +3080-5040-0027-305: ref=['WELL', 'IN', 'SOBER', 'EARNEST', 'NOW', 'I', 'WOULD', 'NOT', 'LIVE', 'THUS', 'A', 'TWELVEMONTH', 'TO', 'GAIN', 'ALL', 'THAT', 'THE', 'KING', 'HAS', 'LOST', 'UNLESS', 'IT', 'WERE', 'TO', 'GIVE', 'IT', 'HIM', 'AGAIN'] +3080-5040-0027-305: hyp=['WHILE', 'IN', 'SOBER', 'EARNEST', 'NOW', 'I', 'WOULD', 'NOT', 'LIVE', 'THUS', 'AT', 'TWELVEMONTH', 'TO', 'GAIN', 'ALL', 'THAT', 'KING', 'HAS', 'LOST', 'UNLESS', 'IT', 'WERE', 'TO', 'GIVE', 'IT', 'HIM', 'AGAIN'] +3080-5040-0028-306: ref=['WILL', 'YOU', 'BE', 'SO', 'GOOD', 'NATURED'] +3080-5040-0028-306: hyp=['WILL', 'YOU', 'BE', 'SO', 'GOOD', 'NATURED'] +3080-5040-0029-307: ref=['HE', 'HAS', 'ONE', 'SON', 'AND', 'TIS', 'THE', 'FINEST', 'BOY', 'THAT', "E'ER", 'YOU', 'SAW', 'AND', 'HAS', 'A', 'NOBLE', 'SPIRIT', 'BUT', 'YET', 'STANDS', 'IN', 'THAT', 'AWE', 'OF', 'HIS', 'FATHER', 'THAT', 'ONE', 'WORD', 'FROM', 'HIM', 'IS', 'AS', 'MUCH', 'AS', 'TWENTY', 'WHIPPINGS'] +3080-5040-0029-307: hyp=['HE', 'HAS', 'ONE', 'SON', 'AND', 'TIS', 'THE', 'FINEST', 'BOY', 'THAT', 'ERE', 'YOU', 'SAW', 'AND', 'HAS', 'A', 'NOBLE', 'SPIRIT', 'BUT', 'YET', 'STANDS', 'IN', 'THAT', 'AWE', 'OF', 'HIS', 'FATHER', 'THAT', 'ONE', 'WORD', 'FROM', 'HIM', 'IS', 'AS', 'MUCH', 'AS', 'TWENTY', 'WHIPPINGS'] +3080-5040-0030-308: ref=['YOU', 'MUST', 'GIVE', 'ME', 'LEAVE', 'TO', 'ENTERTAIN', 'YOU', 'THUS', 'WITH', 'DISCOURSES', 'OF', 'THE', 'FAMILY', 'FOR', 'I', 'CAN', 'TELL', 'YOU', 'NOTHING', 'ELSE', 'FROM', 'HENCE'] +3080-5040-0030-308: hyp=['YOU', 'MUST', 'GIVE', 'ME', 'LEAVE', 'TO', 'ENTERTAIN', 'YOURSELVES', 'WITH', 'DISCOURSES', 'OF', 'THE', 'FAMILY', 'FOR', 'I', 'CAN', 'TELL', 'YOU', 'NOTHING', 'ELSE', 'FROM', 'HENCE'] +3080-5040-0031-309: ref=['NOT', 'TO', 'KNOW', 'WHEN', 'YOU', 'WOULD', 'COME', 'HOME', 'I', 'CAN', 'ASSURE', 'YOU', 'NOR', 'FOR', 'ANY', 'OTHER', 'OCCASION', 'OF', 'MY', 'OWN', 'BUT', 'WITH', 'A', 'COUSIN', 'OF', 'MINE', 'THAT', 'HAD', 'LONG', 'DESIGNED', 'TO', 'MAKE', 'HERSELF', 'SPORT', 'WITH', 'HIM', 'AND', 'DID', 'NOT', 'MISS', 'OF', 'HER', 'AIM'] +3080-5040-0031-309: hyp=['NOT', 'TO', 'KNOW', 'WHEN', 'YOU', 'HAD', 'COME', 'HOME', 'I', 'CAN', 'ASSURE', 'YOU', 'NO', 'FOR', 'ANY', 'OTHER', 'CAPTAIN', 'OF', 'MY', 'OWN', 'BUT', 'WITH', 'A', 'COUSIN', 'OF', 'MINE', 'THAT', 'HAD', 'LONG', 'DESIGN', 'TO', 'MAKE', 'HERSELF', 'SPORT', 'WITH', 'HIM', 'AND', 'DID', 'NOT', 'MISS', 'OF', 'HER', 'AIM'] +3080-5040-0032-310: ref=['IN', 'MY', 'LIFE', 'I', 'NEVER', 'HEARD', 'SO', 'RIDICULOUS', 'A', 'DISCOURSE', 'AS', 'HE', 'MADE', 'US', 'AND', 'NO', 'OLD', 'WOMAN', 'WHO', 'PASSES', 'FOR', 'A', 'WITCH', 'COULD', 'HAVE', 'BEEN', 'MORE', 'PUZZLED', 'TO', 'SEEK', 'WHAT', 'TO', 'SAY', 'TO', 'REASONABLE', 'PEOPLE', 'THAN', 'HE', 'WAS'] +3080-5040-0032-310: hyp=['IN', 'MY', 'LIFE', 'I', 'NEVER', 'HEARD', 'SO', 'RIDICULOUS', 'A', 'DISCOURSE', 'AS', 'HE', 'MADE', 'US', 'AND', 'NO', 'OLD', 'WOMAN', 'WHO', 'PAUSES', 'FOR', 'A', 'WITCH', 'COULD', 'HAVE', 'BEEN', 'MORE', 'PUZZLED', 'TO', 'SEEK', 'WHAT', 'TO', 'SAY', 'TO', 'REASONABLE', 'PEOPLE', 'THAN', 'HE', 'WAS'] +3080-5040-0033-311: ref=['EVER', 'SINCE', 'THIS', 'ADVENTURE', 'I', 'HAVE', 'HAD', 'SO', 'GREAT', 'A', 'BELIEF', 'IN', 'ALL', 'THINGS', 'OF', 'THIS', 'NATURE', 'THAT', 'I', 'COULD', 'NOT', 'FORBEAR', 'LAYING', 'A', 'PEAS', 'COD', 'WITH', 'NINE', 'PEAS', "IN'T", 'UNDER', 'MY', 'DOOR', 'YESTERDAY', 'AND', 'WAS', 'INFORMED', 'BY', 'IT', 'THAT', 'MY', "HUSBAND'S", 'NAME', 'SHOULD', 'BE', 'THOMAS', 'HOW', 'DO', 'YOU', 'LIKE', 'THAT'] +3080-5040-0033-311: hyp=['EVER', 'SINCE', 'THIS', 'ADVENTURE', 'I', 'HAVE', 'HAD', 'SO', 'GREAT', 'A', 'BELIEF', 'IN', 'ALL', 'THINGS', 'FOR', 'THIS', 'NATURE', 'THAT', 'I', 'COULD', 'NOT', 'FORBEAR', 'LAYING', 'A', 'PEASE', 'CART', 'WITH', 'NINE', 'PEAS', 'INTO', 'UNDER', 'MY', 'DOOR', 'YESTERDAY', 'AND', 'WAS', 'INFORMED', 'BY', 'IT', 'THAT', 'MY', "HUSBAND'S", 'NAME', 'SHOULD', 'BE', 'THOMAS', 'HOW', 'DO', 'YOU', 'LIKE', 'THAT'] +3331-159605-0000-695: ref=['SHE', 'PULLED', 'HER', 'HAIR', 'DOWN', 'TURNED', 'HER', 'SKIRT', 'BACK', 'PUT', 'HER', 'FEET', 'ON', 'THE', 'FENDER', 'AND', 'TOOK', 'PUTTEL', 'INTO', 'HER', 'LAP', 'ALL', 'OF', 'WHICH', 'ARRANGEMENTS', 'SIGNIFIED', 'THAT', 'SOMETHING', 'VERY', 'IMPORTANT', 'HAD', 'GOT', 'TO', 'BE', 'THOUGHT', 'OVER', 'AND', 'SETTLED'] +3331-159605-0000-695: hyp=['SHE', 'PULLED', 'HER', 'HAIR', 'DOWN', 'TURNED', 'HIS', 'GOOD', 'BACK', 'PUT', 'HER', 'FEET', 'ON', 'THE', 'FENDER', 'AND', 'TOOK', 'POTTLE', 'INTO', 'HER', 'LAP', 'ALL', 'OF', 'WHICH', 'ARRANGEMENTS', 'SIGNIFIED', 'THAT', 'SOMETHING', 'VERY', 'IMPORTANT', 'HAD', 'GOT', 'TO', 'BE', 'THOUGHT', 'OVER', 'AND', 'SETTLED'] +3331-159605-0001-696: ref=['THE', 'MORE', 'PROPOSALS', 'THE', 'MORE', 'CREDIT'] +3331-159605-0001-696: hyp=['THE', 'MORE', 'PROPOSALS', 'THE', 'MORE', 'CREDIT'] +3331-159605-0002-697: ref=['I', 'VE', 'TRIED', 'IT', 'AND', 'LIKED', 'IT', 'AND', 'MAYBE', 'THIS', 'IS', 'THE', 'CONSEQUENCE', 'OF', 'THAT', "NIGHT'S", 'FUN'] +3331-159605-0002-697: hyp=["I'VE", 'TRIED', 'IT', 'AND', 'LIKED', 'IT', 'AND', 'MAYBE', 'THIS', 'IS', 'THE', 'CONSEQUENCE', 'OF', 'THAT', "NIGHT'S", 'FUN'] +3331-159605-0003-698: ref=['JUST', 'SUPPOSE', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'DOES', 'ASK', 'ME', 'AND', 'I', 'SAY', 'YES'] +3331-159605-0003-698: hyp=['JUST', 'SUPPOSE', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'DOES', 'ASK', 'ME', 'AND', 'I', 'SAY', 'YES'] +3331-159605-0004-699: ref=['WHAT', 'A', 'SPITEFUL', 'THING', 'I', 'AM'] +3331-159605-0004-699: hyp=['WHAT', 'A', 'SPITEFUL', 'THING', 'I', 'AM'] +3331-159605-0005-700: ref=['I', 'COULD', 'DO', 'SO', 'MUCH', 'FOR', 'ALL', 'AT', 'HOME', 'HOW', 'I', 'SHOULD', 'ENJOY', 'THAT'] +3331-159605-0005-700: hyp=['I', 'COULD', 'DO', 'SO', 'MUCH', 'FOR', 'ALL', 'AT', 'HOME', 'HOW', 'I', 'SHOULD', 'ENJOY', 'THAT'] +3331-159605-0006-701: ref=['LET', 'ME', 'SEE', 'HOW', 'CAN', 'I', 'BEGIN'] +3331-159605-0006-701: hyp=['THAT', 'MISS', 'C', 'HOW', 'CAN', 'I', 'BEGIN'] +3331-159605-0007-702: ref=['HE', 'HAS', 'KNOWN', 'HER', 'ALL', 'HER', 'LIFE', 'AND', 'HAS', 'A', 'GOOD', 'INFLUENCE', 'OVER', 'HER'] +3331-159605-0007-702: hyp=['HE', 'HAS', 'KNOWN', 'HER', 'ALL', 'HER', 'LIFE', 'AND', 'HAS', 'A', 'GOOD', 'INFLUENCE', 'OVER', 'HER'] +3331-159605-0008-703: ref=['NOW', 'AS', 'POLLY', 'WAS', 'BY', 'NO', 'MEANS', 'A', 'PERFECT', 'CREATURE', 'I', 'AM', 'FREE', 'TO', 'CONFESS', 'THAT', 'THE', 'OLD', 'TEMPTATION', 'ASSAILED', 'HER', 'MORE', 'THAN', 'ONCE', 'THAT', 'WEEK', 'FOR', 'WHEN', 'THE', 'FIRST', 'EXCITEMENT', 'OF', 'THE', 'DODGING', 'REFORM', 'HAD', 'SUBSIDED', 'SHE', 'MISSED', 'THE', 'PLEASANT', 'LITTLE', 'INTERVIEWS', 'THAT', 'USED', 'TO', 'PUT', 'A', 'CERTAIN', 'FLAVOR', 'OF', 'ROMANCE', 'INTO', 'HER', 'DULL', 'HARD', 'WORKING', 'DAYS'] +3331-159605-0008-703: hyp=['NOW', 'AS', 'POLLY', 'WAS', 'BY', 'NO', 'MEANS', 'A', 'PERFECT', 'CREATURE', 'I', 'AM', 'FREE', 'TO', 'CONFESS', 'THAT', 'THE', 'OLD', 'TEMPTATION', 'ASSAILED', 'HIM', 'MORE', 'THAN', 'ONCE', 'THE', 'WEEK', 'FOR', 'WHEN', 'THE', 'FIRST', 'EXCITEMENT', 'OF', 'THE', 'DODGING', 'REFORM', 'HAD', 'SUBSIDED', 'SHE', 'MISSED', 'THE', 'PLEASANT', 'LITTLE', 'INTERVIEWS', 'THAT', 'USED', 'TO', 'PUT', 'A', 'CERTAIN', 'FLAVOUR', 'OF', 'ROMANS', 'INTO', 'HER', 'DULL', 'HARD', 'WORKING', 'DAYS'] +3331-159605-0009-704: ref=['I', "DON'T", 'THINK', 'IT', 'WAS', 'HIS', 'WEALTH', 'ACCOMPLISHMENTS', 'OR', 'POSITION', 'THAT', 'MOST', 'ATTRACTED', 'POLLY', 'THOUGH', 'THESE', 'DOUBTLESS', 'POSSESSED', 'A', 'GREATER', 'INFLUENCE', 'THAN', 'SHE', 'SUSPECTED'] +3331-159605-0009-704: hyp=['I', "DON'T", 'THINK', 'IT', 'WAS', 'HIS', 'WEALTH', 'ACCOMPLISHMENTS', 'OPPOSITION', 'THAT', 'MOST', 'ATTRACTED', 'POLLY', 'THOUGH', 'THESE', 'DOUBTLESS', 'POSSESSED', 'A', 'GREATER', 'INFLUENCE', 'THAN', 'SHE', 'SUSPECTED'] +3331-159605-0010-705: ref=['IT', 'WAS', 'THAT', 'INDESCRIBABLE', 'SOMETHING', 'WHICH', 'WOMEN', 'ARE', 'QUICK', 'TO', 'SEE', 'AND', 'FEEL', 'IN', 'MEN', 'WHO', 'HAVE', 'BEEN', 'BLESSED', 'WITH', 'WISE', 'AND', 'GOOD', 'MOTHERS'] +3331-159605-0010-705: hyp=['IT', 'WAS', 'THAT', 'INDESCRIBABLE', 'SOMETHING', 'WHICH', 'WOMEN', 'ARE', 'QUICK', 'TO', 'SEE', 'AND', 'FEEL', 'IN', 'MEN', 'WHO', 'HAVE', 'BEEN', 'BLESSED', 'WISE', 'AND', 'GOOD', 'MOTHERS'] +3331-159605-0011-706: ref=['THIS', 'HAD', 'AN', 'ESPECIAL', 'CHARM', 'TO', 'POLLY', 'FOR', 'SHE', 'SOON', 'FOUND', 'THAT', 'THIS', 'SIDE', 'OF', 'HIS', 'CHARACTER', 'WAS', 'NOT', 'SHOWN', 'TO', 'EVERY', 'ONE'] +3331-159605-0011-706: hyp=['THIS', 'HAD', 'AN', 'ESPECIAL', 'CHARM', 'TO', 'POLLY', 'FOR', 'SHE', 'SOON', 'FOUND', 'THAT', 'THIS', 'SIDE', 'OF', 'HIS', 'CHARACTER', 'WAS', 'NOT', 'SHOWN', 'TO', 'EVERYONE'] +3331-159605-0012-707: ref=['LATELY', 'THIS', 'HAD', 'CHANGED', 'ESPECIALLY', 'TOWARDS', 'POLLY', 'AND', 'IT', 'FLATTERED', 'HER', 'MORE', 'THAN', 'SHE', 'WOULD', 'CONFESS', 'EVEN', 'TO', 'HERSELF'] +3331-159605-0012-707: hyp=['PLATELY', 'THIS', 'HAD', 'CHANGED', 'ESPECIALLY', 'TOWARDS', 'POLLY', 'AND', 'IT', 'FLUTTERED', 'HER', 'MORE', 'THAN', 'SHE', 'WOULD', 'CONFESS', 'EVEN', 'TO', 'HERSELF'] +3331-159605-0013-708: ref=['AT', 'FIRST', 'SHE', 'TRIED', 'TO', 'THINK', 'SHE', 'COULD', 'BUT', 'UNFORTUNATELY', 'HEARTS', 'ARE', 'SO', 'CONTRARY', 'THAT', 'THEY', "WON'T", 'BE', 'OBEDIENT', 'TO', 'REASON', 'WILL', 'OR', 'EVEN', 'GRATITUDE'] +3331-159605-0013-708: hyp=['AT', 'FIRST', 'SHE', 'TRIED', 'TO', 'THINK', 'SHE', 'COULD', 'BUT', 'UNFORTUNATELY', 'HEARTS', 'ARE', 'SO', 'CONTRARY', 'THAT', 'THEY', "WON'T", 'BE', 'OBEDIENT', 'TO', 'REASON', 'WILL', 'OR', 'EVEN', 'CREDIT'] +3331-159605-0014-709: ref=['POLLY', 'FELT', 'A', 'VERY', 'CORDIAL', 'FRIENDSHIP', 'FOR', 'MISTER', 'SYDNEY', 'BUT', 'NOT', 'ONE', 'PARTICLE', 'OF', 'THE', 'LOVE', 'WHICH', 'IS', 'THE', 'ONLY', 'COIN', 'IN', 'WHICH', 'LOVE', 'CAN', 'BE', 'TRULY', 'PAID'] +3331-159605-0014-709: hyp=['POLLY', 'FELT', 'A', 'VERY', 'CORDIAL', 'FRIENDSHIP', 'FOR', 'MISTER', 'SIDNEY', 'BUT', 'NOT', 'ONE', 'PARTICLE', 'OF', 'THE', 'LAW', 'PITCHES', 'THE', 'ONLY', 'KIND', 'IN', 'WHICH', 'LOVE', 'CAN', 'BE', 'TRULY', 'PAID'] +3331-159605-0015-710: ref=['THIS', 'FINISHED', "POLLY'S", 'INDECISION', 'AND', 'AFTER', 'THAT', 'NIGHT', 'SHE', 'NEVER', 'ALLOWED', 'HERSELF', 'TO', 'DWELL', 'UPON', 'THE', 'PLEASANT', 'TEMPTATION', 'WHICH', 'CAME', 'IN', 'A', 'GUISE', 'PARTICULARLY', 'ATTRACTIVE', 'TO', 'A', 'YOUNG', 'GIRL', 'WITH', 'A', 'SPICE', 'OF', 'THE', 'OLD', 'EVE', 'IN', 'HER', 'COMPOSITION'] +3331-159605-0015-710: hyp=['THIS', 'FINISHED', "POLLY'S", 'INDECISION', 'AND', 'AFTER', 'THAT', 'NIGHT', 'SHE', 'NEVER', 'ALLOWED', 'HERSELF', 'TO', 'DWELL', 'UPON', 'THE', 'PLEASANT', 'TEMPTATION', 'WHICH', 'CAME', 'IN', 'A', 'GUISE', 'PARTICULARLY', 'ATTRACTIVE', 'TO', 'A', 'YOUNG', 'GIRL', 'WITH', 'A', 'SPIES', 'OF', 'THE', 'OLD', 'EVE', 'IN', 'HER', 'COMPOSITION'] +3331-159605-0016-711: ref=['WHEN', 'SATURDAY', 'CAME', 'POLLY', 'STARTED', 'AS', 'USUAL', 'FOR', 'A', 'VISIT', 'TO', 'BECKY', 'AND', 'BESS', 'BUT', 'COULD', "N'T", 'RESIST', 'STOPPING', 'AT', 'THE', 'SHAWS', 'TO', 'LEAVE', 'A', 'LITTLE', 'PARCEL', 'FOR', 'FAN', 'THOUGH', 'IT', 'WAS', 'CALLING', 'TIME'] +3331-159605-0016-711: hyp=['WHEN', 'SATAN', 'CAME', 'POLLY', 'STARTED', 'AS', 'USUAL', 'FOR', 'A', 'VISIT', 'TO', 'BACKY', 'AND', 'BESS', 'BUT', "COULDN'T", 'RESIST', 'STOPPING', 'AT', 'THE', 'SHORES', 'TO', 'LEAVE', 'A', 'LITTLE', 'PARCEL', 'FOR', 'FAN', 'THAT', 'WAS', 'CALLING', 'TIME'] +3331-159605-0017-712: ref=['A', 'FOOLISH', 'LITTLE', 'SPEECH', 'TO', 'MAKE', 'TO', 'A', 'DOG', 'BUT', 'YOU', 'SEE', 'POLLY', 'WAS', 'ONLY', 'A', 'TENDER', 'HEARTED', 'GIRL', 'TRYING', 'TO', 'DO', 'HER', 'DUTY'] +3331-159605-0017-712: hyp=['A', 'FOOLISH', 'LITTLE', 'SPEECH', 'TO', 'MAKE', 'TO', 'A', 'DARK', 'BUT', 'YOU', 'SEE', 'POLLY', 'WAS', 'ONLY', 'A', 'TENDER', 'HEARTED', 'GIRL', 'TRYING', 'TO', 'HER', 'DUTY'] +3331-159605-0018-713: ref=['TAKE', 'HOLD', 'OF', 'MASTER', "CHARLEY'S", 'HAND', 'MISS', 'MAMIE', 'AND', 'WALK', 'PRETTY', 'LIKE', 'WILLY', 'AND', 'FLOSSY', 'SAID', 'THE', 'MAID'] +3331-159605-0018-713: hyp=['TAKE', 'HOLD', 'OF', 'MASSA', "CHARLIE'S", 'HAND', 'MISS', 'MAMMY', 'AND', 'WALK', 'PRETTY', 'LIKE', 'BILLY', 'AND', 'FLOSSIE', 'SAID', 'THE', 'MAID'] +3331-159605-0019-714: ref=['AT', 'A', 'STREET', 'CORNER', 'A', 'BLACK', 'EYED', 'SCHOOL', 'BOY', 'WAS', 'PARTING', 'FROM', 'A', 'ROSY', 'FACED', 'SCHOOL', 'GIRL', 'WHOSE', 'MUSIC', 'ROLL', 'HE', 'WAS', 'RELUCTANTLY', 'SURRENDERING'] +3331-159605-0019-714: hyp=['AT', 'A', 'STREET', 'CORNER', 'A', 'BLACK', 'EYED', 'SCHOOLBOY', 'WAS', 'PARTING', 'FROM', 'A', 'ROSY', 'FACED', 'SCHOOL', 'GIRL', 'WHOSE', 'MUSIC', 'ROLL', 'HE', 'WAS', 'RELUCTANTLY', 'SURRENDERING'] +3331-159605-0020-715: ref=['HOW', 'HE', 'GOT', 'THERE', 'WAS', 'NEVER', 'VERY', 'CLEAR', 'TO', 'POLLY', 'BUT', 'THERE', 'HE', 'WAS', 'FLUSHED', 'AND', 'A', 'LITTLE', 'OUT', 'OF', 'BREATH', 'BUT', 'LOOKING', 'SO', 'GLAD', 'TO', 'SEE', 'HER', 'THAT', 'SHE', 'HAD', "N'T", 'THE', 'HEART', 'TO', 'BE', 'STIFF', 'AND', 'COOL', 'AS', 'SHE', 'HAD', 'FULLY', 'INTENDED', 'TO', 'BE', 'WHEN', 'THEY', 'MET'] +3331-159605-0020-715: hyp=['HOW', 'HE', 'GOT', 'THERE', 'WAS', 'NEVER', 'VERY', 'CLEAR', 'TO', 'POLLY', 'BUT', 'THERE', 'HE', 'WAS', 'FLUSHED', 'AND', 'A', 'LITTLE', 'OUT', 'OF', 'BREATH', 'BUT', 'LOOKING', 'SO', 'GLAD', 'TO', 'SEE', 'HER', 'TILL', 'SHE', 'HAD', 'NOT', 'THE', 'HEART', 'TO', 'BE', 'STIFF', 'AND', 'COOL', 'AS', 'SHE', 'HAD', 'FULLY', 'INTENDED', 'TO', 'BE', 'WHEN', 'THEY', 'MET'] +3331-159605-0021-716: ref=['SHE', 'REALLY', 'COULD', "N'T", 'HELP', 'IT', 'IT', 'WAS', 'SO', 'PLEASANT', 'TO', 'SEE', 'HIM', 'AGAIN', 'JUST', 'WHEN', 'SHE', 'WAS', 'FEELING', 'SO', 'LONELY'] +3331-159605-0021-716: hyp=['SHE', 'REALLY', 'COULD', 'NOT', 'HELP', 'IT', 'IT', 'WAS', 'SO', 'PLEASANT', 'TO', 'SEE', 'HIM', 'AGAIN', 'JUST', 'WHEN', 'SHE', 'WAS', 'FEELING', 'SO', 'LONELY'] +3331-159605-0022-717: ref=['THAT', 'IS', 'THE', 'WAY', 'I', 'GET', 'TO', 'THE', 'ROTHS', 'ANSWERED', 'POLLY'] +3331-159605-0022-717: hyp=['THAT', 'IS', 'THE', 'WAY', 'I', 'GET', 'TO', 'THE', 'ROSS', 'ANSWERED', 'POLLY'] +3331-159605-0023-718: ref=['SHE', 'DID', 'NOT', 'MEAN', 'TO', 'TELL', 'BUT', 'HIS', 'FRANKNESS', 'WAS', 'SO', 'AGREEABLE', 'SHE', 'FORGOT', 'HERSELF'] +3331-159605-0023-718: hyp=['SHE', 'DID', 'NOT', 'MEAN', 'TO', 'TELL', 'BUT', 'HIS', 'FRANKNESS', 'WAS', 'TO', 'AGREEABLE', 'SHE', 'FORGOT', 'HERSELF'] +3331-159605-0024-719: ref=['BUT', 'I', 'KNOW', 'HER', 'BETTER', 'AND', 'I', 'ASSURE', 'YOU', 'THAT', 'SHE', 'DOES', 'IMPROVE', 'SHE', 'TRIES', 'TO', 'MEND', 'HER', 'FAULTS', 'THOUGH', 'SHE', "WON'T", 'OWN', 'IT', 'AND', 'WILL', 'SURPRISE', 'YOU', 'SOME', 'DAY', 'BY', 'THE', 'AMOUNT', 'OF', 'HEART', 'AND', 'SENSE', 'AND', 'GOODNESS', 'SHE', 'HAS', 'GOT'] +3331-159605-0024-719: hyp=['BUT', 'I', 'KNOW', 'HER', 'BETTER', 'AND', 'I', 'ASSURE', 'YOU', 'THAT', 'SHE', "DOESN'T", 'PROVE', 'SHE', 'TRIES', 'TO', 'MENTAL', 'FAULTS', 'THOUGH', 'SHE', "WON'T", 'OWN', 'IT', 'AND', 'WAS', 'SURPRISE', 'YOU', 'SOME', 'DAY', 'BY', 'THE', 'AMOUNT', 'OF', 'HEART', 'AND', 'SENSE', 'AND', 'GOODNESS', 'SHE', 'HAS', 'GOT'] +3331-159605-0025-720: ref=['THANK', 'YOU', 'NO'] +3331-159605-0025-720: hyp=['THANK', 'YOU', 'NO'] +3331-159605-0026-721: ref=['HOW', 'LOVELY', 'THE', 'PARK', 'LOOKS', 'SHE', 'SAID', 'IN', 'GREAT', 'CONFUSION'] +3331-159605-0026-721: hyp=['HOW', 'LOVELY', 'THE', 'PARK', 'LOOKS', 'SHE', 'SAID', 'IN', 'GREAT', 'CONFUSION'] +3331-159605-0027-722: ref=['ASKED', 'THE', 'ARTFUL', 'YOUNG', 'MAN', 'LAYING', 'A', 'TRAP', 'INTO', 'WHICH', 'POLLY', 'IMMEDIATELY', 'FELL'] +3331-159605-0027-722: hyp=['ASKED', 'THE', 'ARTFUL', 'YOUNG', 'MAN', 'LAYING', 'A', 'TRAP', 'INTO', 'WHICH', 'POLLY', 'IMMEDIATELY', 'FELL'] +3331-159605-0028-723: ref=['HE', 'WAS', 'QUICKER', 'TO', 'TAKE', 'A', 'HINT', 'THAN', 'SHE', 'HAD', 'EXPECTED', 'AND', 'BEING', 'BOTH', 'PROUD', 'AND', 'GENEROUS', 'RESOLVED', 'TO', 'SETTLE', 'THE', 'MATTER', 'AT', 'ONCE', 'FOR', "POLLY'S", 'SAKE', 'AS', 'WELL', 'AS', 'HIS', 'OWN'] +3331-159605-0028-723: hyp=['HE', 'WAS', 'QUICKER', 'TO', 'TAKE', 'A', 'HINT', 'THAN', 'SHE', 'HAD', 'EXPECTED', 'AND', 'BEING', 'BOTH', 'PROUD', 'AND', 'GENEROUS', 'WE', 'SOFT', 'TO', 'SETTLE', 'THE', 'MATTER', 'AT', 'ONCE', 'FOR', "POLLY'S", 'SAKE', 'AS', 'WELL', 'AS', 'HIS', 'OWN'] +3331-159605-0029-724: ref=['SO', 'WHEN', 'SHE', 'MADE', 'HER', 'LAST', 'BRILLIANT', 'REMARK', 'HE', 'SAID', 'QUIETLY', 'WATCHING', 'HER', 'FACE', 'KEENLY', 'ALL', 'THE', 'WHILE', 'I', 'THOUGHT', 'SO', 'WELL', 'I', 'M', 'GOING', 'OUT', 'OF', 'TOWN', 'ON', 'BUSINESS', 'FOR', 'SEVERAL', 'WEEKS', 'SO', 'YOU', 'CAN', 'ENJOY', 'YOUR', 'LITTLE', 'BIT', 'OF', 'COUNTRY', 'WITHOUT', 'BEING', 'ANNOYED', 'BY', 'ME', 'ANNOYED'] +3331-159605-0029-724: hyp=['SO', 'WHEN', 'SHE', 'MADE', 'HER', 'LAST', 'POINT', 'REMARK', 'HE', 'SAID', 'QUIETLY', 'WATCHING', 'HER', 'FACE', 'KEENLY', 'ALL', 'THE', 'WHILE', 'I', 'THOUGHT', 'SO', 'WELL', "I'M", 'GOING', 'OUT', 'OF', 'TOWN', 'ON', 'BUSINESS', 'FOR', 'SEVERAL', 'WEEKS', 'SO', 'YOU', 'CAN', 'ENJOY', 'YOU', 'LITTLE', 'BIT', 'OF', 'COUNTRY', 'WITHOUT', 'BEING', 'ANNOYED', 'BY', 'ME', 'ANNOY', 'IT'] +3331-159605-0030-725: ref=['SHE', 'THOUGHT', 'SHE', 'HAD', 'A', 'GOOD', 'DEAL', 'OF', 'THE', 'COQUETTE', 'IN', 'HER', 'AND', 'I', 'VE', 'NO', 'DOUBT', 'THAT', 'WITH', 'TIME', 'AND', 'TRAINING', 'SHE', 'WOULD', 'HAVE', 'BECOME', 'A', 'VERY', 'DANGEROUS', 'LITTLE', 'PERSON', 'BUT', 'NOW', 'SHE', 'WAS', 'FAR', 'TOO', 'TRANSPARENT', 'AND', 'STRAIGHTFORWARD', 'BY', 'NATURE', 'EVEN', 'TO', 'TELL', 'A', 'WHITE', 'LIE', 'CLEVERLY'] +3331-159605-0030-725: hyp=['SHE', 'THOUGHT', 'SHE', 'HAD', 'A', 'GOOD', 'DEAL', 'OF', 'THE', 'COQUETTE', 'IN', 'HER', 'AND', "I'VE", 'NO', 'DOUBT', 'THAT', 'WITH', 'TIME', 'AND', 'TRAINING', 'SHE', 'WOULD', 'HAVE', 'BECOME', 'A', 'VERY', 'DANGEROUS', 'LITTLE', 'PERSON', 'BUT', 'NOW', 'SHE', 'WAS', 'FAR', 'TO', 'TRANSPARENT', 'AND', 'STRAIGHTFORWARD', 'BY', 'NATURE', 'EVEN', 'TO', 'TELL', 'A', 'WIDE', 'LIKE', 'LEVELLY'] +3331-159605-0031-726: ref=['HE', 'WAS', 'GONE', 'BEFORE', 'SHE', 'COULD', 'DO', 'ANYTHING', 'BUT', 'LOOK', 'UP', 'AT', 'HIM', 'WITH', 'A', 'REMORSEFUL', 'FACE', 'AND', 'SHE', 'WALKED', 'ON', 'FEELING', 'THAT', 'THE', 'FIRST', 'AND', 'PERHAPS', 'THE', 'ONLY', 'LOVER', 'SHE', 'WOULD', 'EVER', 'HAVE', 'HAD', 'READ', 'HIS', 'ANSWER', 'AND', 'ACCEPTED', 'IT', 'IN', 'SILENCE'] +3331-159605-0031-726: hyp=['HE', 'WAS', 'GONE', 'BEFORE', 'SHE', 'COULD', 'DO', 'ANYTHING', 'BUT', 'LOOK', 'UP', 'AT', 'HIM', 'WITH', 'A', 'REMORSEFUL', 'FACE', 'AND', 'SHE', 'WALKED', 'ON', 'FEELING', 'THAT', 'THE', 'FIRST', 'AND', 'PERHAPS', 'THE', 'ONLY', 'LOVER', 'SHE', 'WOULD', 'EVER', 'HAVE', 'HAD', 'READ', 'HIS', 'ANSWER', 'AND', 'ACCEPTED', 'IN', 'SILENCE'] +3331-159605-0032-727: ref=['POLLY', 'DID', 'NOT', 'RETURN', 'TO', 'HER', 'FAVORITE', 'WALK', 'TILL', 'SHE', 'LEARNED', 'FROM', 'MINNIE', 'THAT', 'UNCLE', 'HAD', 'REALLY', 'LEFT', 'TOWN', 'AND', 'THEN', 'SHE', 'FOUND', 'THAT', 'HIS', 'FRIENDLY', 'COMPANY', 'AND', 'CONVERSATION', 'WAS', 'WHAT', 'HAD', 'MADE', 'THE', 'WAY', 'SO', 'PLEASANT', 'AFTER', 'ALL'] +3331-159605-0032-727: hyp=['POLLY', 'DID', 'NOT', 'RETURN', 'TO', 'HER', 'FAVOURITE', 'WALK', 'TILL', 'SHE', 'LEARNED', 'FOR', 'MINNIE', 'THAT', 'UNCLE', 'HAD', 'REALLY', 'LEFT', 'TOWN', 'AND', 'THEN', 'SHE', 'FOUND', 'THAT', 'HIS', 'FRIENDLY', 'COMPANY', 'AND', 'CONVERSATION', 'WAS', 'WHAT', 'HAD', 'MADE', 'THE', 'WAY', 'SO', 'PLEASANT', 'AFTER', 'ALL'] +3331-159605-0033-728: ref=['WAGGING', 'TO', 'AND', 'FRO', 'AS', 'USUAL', "WHAT'S", 'THE', 'NEWS', 'WITH', 'YOU'] +3331-159605-0033-728: hyp=['WORKING', 'TO', 'AND', 'FRO', 'AS', 'USUAL', "WHAT'S", 'THE', 'NEWS', 'WITH', 'YOU'] +3331-159605-0034-729: ref=['PERHAPS', 'SHE', 'LL', 'JILT', 'HIM'] +3331-159605-0034-729: hyp=['PERHAPS', "SHE'LL", 'CHILLED', 'HIM'] +3331-159605-0035-730: ref=['UTTERLY', 'DONE', 'WITH', 'AND', 'LAID', 'UPON', 'THE', 'SHELF'] +3331-159605-0035-730: hyp=['UTTERLY', 'DONE', 'WITH', 'AND', 'LAID', 'UPON', 'THE', 'SHELF'] +3331-159605-0036-731: ref=['MINNIE', 'SAID', 'THE', 'OTHER', 'DAY', 'SHE', 'WISHED', 'SHE', 'WAS', 'A', 'PIGEON', 'SO', 'SHE', 'COULD', 'PADDLE', 'IN', 'THE', 'PUDDLES', 'AND', 'NOT', 'FUSS', 'ABOUT', 'RUBBERS'] +3331-159605-0036-731: hyp=['MINNI', 'SAID', 'THE', 'OTHER', 'DAY', 'SHE', 'WISHED', 'SHE', 'WAS', 'A', 'PITCHEN', 'SO', 'SHE', 'COULD', 'PADDLE', 'IN', 'THE', 'BOTTLES', 'AND', 'NUT', 'FUSS', 'ABOUT', 'RUBBERS'] +3331-159605-0037-732: ref=['NOW', "DON'T", 'BE', 'AFFECTED', 'POLLY', 'BUT', 'JUST', 'TELL', 'ME', 'LIKE', 'A', 'DEAR', 'HAS', "N'T", 'HE', 'PROPOSED'] +3331-159605-0037-732: hyp=['NOW', "DON'T", 'BE', 'AFFECTED', 'POLLY', 'BUT', 'JUST', 'TELL', 'ME', 'LIKE', 'A', 'DEAR', 'HAS', 'NOT', 'HE', 'PROPOSED'] +3331-159605-0038-733: ref=["DON'T", 'YOU', 'THINK', 'HE', 'MEANS', 'TO'] +3331-159605-0038-733: hyp=["DON'T", 'YOU', 'THINK', 'HE', 'MEANS', 'TO'] +3331-159605-0039-734: ref=['TRULY', 'TRULY', 'FAN'] +3331-159605-0039-734: hyp=['TRULY', 'JULIE', 'FAN'] +3331-159605-0040-735: ref=['I', "DON'T", 'MEAN', 'TO', 'BE', 'PRYING', 'BUT', 'I', 'REALLY', 'THOUGHT', 'HE', 'DID'] +3331-159605-0040-735: hyp=['I', "DON'T", 'MEAN', 'TO', 'BE', 'PRYING', 'BUT', 'I', 'REALLY', 'THOUGHT', 'HE', 'DID'] +3331-159605-0041-736: ref=['WELL', 'I', 'ALWAYS', 'MEANT', 'TO', 'TRY', 'IT', 'IF', 'I', 'GOT', 'A', 'CHANCE', 'AND', 'I', 'HAVE'] +3331-159605-0041-736: hyp=['WELL', 'I', 'ALWAYS', 'MEANT', 'TO', 'TRY', 'IT', 'IF', 'I', 'GOT', 'A', 'CHANCE', 'AND', 'I', 'HAVE'] +3331-159605-0042-737: ref=['I', 'JUST', 'GAVE', 'HIM', 'A', 'HINT', 'AND', 'HE', 'TOOK', 'IT'] +3331-159605-0042-737: hyp=['I', 'JUST', 'GAVE', 'HIM', 'A', 'HINT', 'AND', 'HE', 'TOOK', 'IT'] +3331-159605-0043-738: ref=['HE', 'MEANT', 'TO', 'GO', 'AWAY', 'BEFORE', 'THAT', 'SO', "DON'T", 'THINK', 'HIS', 'HEART', 'IS', 'BROKEN', 'OR', 'MIND', 'WHAT', 'SILLY', 'TATTLERS', 'SAY'] +3331-159605-0043-738: hyp=['HE', 'MEANT', 'TO', 'GO', 'AWAY', 'BEFORE', 'THAT', 'SO', "DON'T", 'THINK', 'HIS', 'HEART', 'IS', 'BROKEN', 'O', 'MIND', 'WHAT', 'SIDI', 'TEDLERS', 'SAY'] +3331-159605-0044-739: ref=['HE', 'UNDERSTOOD', 'AND', 'BEING', 'A', 'GENTLEMAN', 'MADE', 'NO', 'FUSS'] +3331-159605-0044-739: hyp=['HE', 'UNDERSTOOD', 'AND', 'BEING', 'A', 'GENTLEMAN', 'MADE', 'NO', 'FUSS'] +3331-159605-0045-740: ref=['BUT', 'POLLY', 'IT', 'WOULD', 'HAVE', 'BEEN', 'A', 'GRAND', 'THING', 'FOR', 'YOU'] +3331-159605-0045-740: hyp=['BUT', 'POLLY', 'IT', 'WOULD', 'HAVE', 'BEEN', 'A', 'GRAND', 'THING', 'FOR', 'YOU'] +3331-159605-0046-741: ref=['I', 'M', 'ODD', 'YOU', 'KNOW', 'AND', 'PREFER', 'TO', 'BE', 'AN', 'INDEPENDENT', 'SPINSTER', 'AND', 'TEACH', 'MUSIC', 'ALL', 'MY', 'DAYS'] +3331-159605-0046-741: hyp=['I', 'AM', 'AUGHT', 'YOU', 'KNOW', 'AND', 'PREFER', 'TO', 'BE', 'AN', 'INDEPENDENT', 'SPINSTER', 'AND', 'TEACH', 'MUSIC', 'ALL', 'MY', 'DAYS'] +3331-159609-0000-742: ref=['NEVER', 'MIND', 'WHAT', 'THE', 'BUSINESS', 'WAS', 'IT', 'SUFFICES', 'TO', 'SAY', 'THAT', 'IT', 'WAS', 'A', 'GOOD', 'BEGINNING', 'FOR', 'A', 'YOUNG', 'MAN', 'LIKE', 'TOM', 'WHO', 'HAVING', 'BEEN', 'BORN', 'AND', 'BRED', 'IN', 'THE', 'MOST', 'CONSERVATIVE', 'CLASS', 'OF', 'THE', 'MOST', 'CONCEITED', 'CITY', 'IN', 'NEW', 'ENGLAND', 'NEEDED', 'JUST', 'THE', 'HEALTHY', 'HEARTY', 'SOCIAL', 'INFLUENCES', 'OF', 'THE', 'WEST', 'TO', 'WIDEN', 'HIS', 'VIEWS', 'AND', 'MAKE', 'A', 'MAN', 'OF', 'HIM'] +3331-159609-0000-742: hyp=['NEVER', 'MIND', 'WHAT', 'THE', 'BUSINESS', 'WAS', 'ITS', 'SURFACES', 'TO', 'SAY', 'THAT', 'IT', 'WAS', 'A', 'GOOD', 'BEGINNING', 'FOR', 'A', 'YOUNG', 'MAN', 'LIKE', 'TOM', 'WHO', 'HAVING', 'BEEN', 'BORN', 'AND', 'BRED', 'IN', 'THE', 'MOST', 'CONSERVATIVE', 'GLASS', 'OF', 'THE', 'MOST', 'CONCEITED', 'CITY', 'IN', 'NEW', 'ENGLAND', 'NEEDED', 'JUST', 'THE', 'HEALTHY', 'HEARTY', 'SOCIAL', 'INFLUENCES', 'OF', 'THE', 'WEST', 'TO', 'WIDEN', 'HIS', 'VIEWS', 'AND', 'MAKE', 'A', 'MAN', 'OF', 'HIM'] +3331-159609-0001-743: ref=['FORTUNATELY', 'EVERY', 'ONE', 'WAS', 'SO', 'BUSY', 'WITH', 'THE', 'NECESSARY', 'PREPARATIONS', 'THAT', 'THERE', 'WAS', 'NO', 'TIME', 'FOR', 'ROMANCE', 'OF', 'ANY', 'SORT', 'AND', 'THE', 'FOUR', 'YOUNG', 'PEOPLE', 'WORKED', 'TOGETHER', 'AS', 'SOBERLY', 'AND', 'SENSIBLY', 'AS', 'IF', 'ALL', 'SORTS', 'OF', 'EMOTIONS', 'WERE', 'NOT', 'BOTTLED', 'UP', 'IN', 'THEIR', 'RESPECTIVE', 'HEARTS'] +3331-159609-0001-743: hyp=['FORTUNATELY', 'EVERY', 'ONE', 'WAS', 'SO', 'BUSY', 'WITH', 'THE', 'NECESSARY', 'PREPARATIONS', 'THAT', 'THERE', 'WAS', 'NO', 'TIME', 'FOR', 'ROMANS', 'OF', 'ANY', 'SORT', 'AND', 'THE', 'FOUR', 'YOUNG', 'PEOPLE', 'WORKED', 'TOGETHER', 'AS', 'SOBERLY', 'AND', 'SENSIBLY', 'AS', 'IF', 'ALL', 'SORTS', 'OF', 'EMOTIONS', 'WERE', 'NOT', 'BOTHERED', 'UP', 'IN', 'THEIR', 'RESPECTIVE', 'HEARTS'] +3331-159609-0002-744: ref=['PITY', 'THAT', 'THE', 'END', 'SHOULD', 'COME', 'SO', 'SOON', 'BUT', 'THE', 'HOUR', 'DID', 'ITS', 'WORK', 'AND', 'WENT', 'ITS', 'WAY', 'LEAVING', 'A', 'CLEARER', 'ATMOSPHERE', 'BEHIND', 'THOUGH', 'THE', 'YOUNG', 'FOLKS', 'DID', 'NOT', 'SEE', 'IT', 'THEN', 'FOR', 'THEIR', 'EYES', 'WERE', 'DIM', 'BECAUSE', 'OF', 'THE', 'PARTINGS', 'THAT', 'MUST', 'BE'] +3331-159609-0002-744: hyp=['PITY', 'THAT', 'THE', 'END', 'SHOULD', 'COME', 'SO', 'SOON', 'BUT', 'THE', 'HOUR', 'DID', 'ITS', 'WORK', 'AND', 'WHEN', 'ITS', 'WAY', 'LEAVING', 'A', 'CLEARER', 'ATMOSPHERE', 'BEHIND', 'THAT', 'THE', 'YOUNG', 'FOLKS', 'DID', 'NOT', 'SEE', 'IT', 'THEN', 'FOR', 'THEIR', 'EYES', 'WERE', 'DIM', 'BECAUSE', 'OF', 'THE', 'PARTINGS', 'THAT', 'MUST', 'BE'] +3331-159609-0003-745: ref=['IF', 'IT', 'HAD', 'NOT', 'BEEN', 'FOR', 'TWO', 'THINGS', 'I', 'FEAR', 'SHE', 'NEVER', 'WOULD', 'HAVE', 'STOOD', 'A', 'SUMMER', 'IN', 'TOWN', 'BUT', 'SYDNEY', 'OFTEN', 'CALLED', 'TILL', 'HIS', 'VACATION', 'CAME', 'AND', 'A', 'VOLUMINOUS', 'CORRESPONDENCE', 'WITH', 'POLLY', 'BEGUILED', 'THE', 'LONG', 'DAYS'] +3331-159609-0003-745: hyp=['IF', 'IT', 'HAD', 'NOT', 'BEEN', 'FOR', 'TWO', 'THINGS', 'I', 'FEAR', 'SHE', 'NEVER', 'WOULD', 'HAVE', 'STOOD', 'A', 'SUMMER', 'IN', 'TOWN', 'BUT', 'SYDNEY', 'OFTEN', 'CALLED', 'TO', 'HIS', 'VACATION', 'CAME', 'AND', 'A', 'VOLUMINOUS', 'CORRESPONDENCE', 'WITH', 'POLLY', 'BEGUILD', 'THE', 'LONG', 'DAYS'] +3331-159609-0004-746: ref=['TOM', 'WROTE', 'ONCE', 'A', 'WEEK', 'TO', 'HIS', 'MOTHER', 'BUT', 'THE', 'LETTERS', 'WERE', 'SHORT', 'AND', 'NOT', 'VERY', 'SATISFACTORY', 'FOR', 'MEN', 'NEVER', 'DO', 'TELL', 'THE', 'INTERESTING', 'LITTLE', 'THINGS', 'THAT', 'WOMEN', 'BEST', 'LIKE', 'TO', 'HEAR'] +3331-159609-0004-746: hyp=['TUMULT', 'ONES', 'A', 'WEEK', 'TO', 'HIS', 'MOTHER', 'BUT', 'THEY', 'LET', 'US', 'WERE', 'SHORT', 'AND', 'NOT', 'VERY', 'SATISFACTORY', 'FOR', 'MEN', 'NEVER', 'DO', 'SO', 'THE', 'INTERESTING', 'LITTLE', 'THINGS', 'THAT', 'WOMEN', 'BEST', 'LIKE', 'TO', 'HEAR'] +3331-159609-0005-747: ref=['NO', 'I', 'M', 'ONLY', 'TIRED', 'HAD', 'A', 'GOOD', 'DEAL', 'TO', 'DO', 'LATELY', 'AND', 'THE', 'DULL', 'WEATHER', 'MAKES', 'ME', 'JUST', 'A', 'TRIFLE', 'BLUE'] +3331-159609-0005-747: hyp=['NO', 'I', 'AM', 'ONLY', 'TIRED', 'HAD', 'A', 'GOOD', 'DEAL', 'TO', 'DO', 'LATELY', 'AND', 'THE', 'DULL', 'WEATHER', 'MAKES', 'ME', 'CHOS', 'THE', 'TRAVEL', 'BLUE'] +3331-159609-0006-748: ref=['FORGIVE', 'ME', 'POLLY', 'BUT', 'I', "CAN'T", 'HELP', 'SAYING', 'IT', 'FOR', 'IT', 'IS', 'THERE', 'AND', 'I', 'WANT', 'TO', 'BE', 'AS', 'TRUE', 'TO', 'YOU', 'AS', 'YOU', 'WERE', 'TO', 'ME', 'IF', 'I', 'CAN'] +3331-159609-0006-748: hyp=['FORGIVE', 'ME', 'POLLY', 'BUT', 'I', "CAN'T", 'HELP', 'SAYING', 'IT', 'FOR', 'IT', 'IS', 'THERE', 'AND', 'I', 'WANT', 'TO', 'BE', 'AS', 'TRUE', 'TO', 'YOU', 'AS', 'YOU', 'WERE', 'TO', 'ME', 'IF', 'I', 'CAN'] +3331-159609-0007-749: ref=['I', 'TRY', 'NOT', 'TO', 'DECEIVE', 'MYSELF', 'BUT', 'IT', 'DOES', 'SEEM', 'AS', 'IF', 'THERE', 'WAS', 'A', 'CHANCE', 'OF', 'HAPPINESS', 'FOR', 'ME'] +3331-159609-0007-749: hyp=['I', 'TRIED', 'NOT', 'A', 'DECEIVE', 'MYSELF', 'BUT', 'IT', 'DOES', 'SEEM', 'AS', 'IF', 'THERE', 'WAS', 'A', 'CHANCE', 'OF', 'HAPPINESS', 'FOR', 'ME'] +3331-159609-0008-750: ref=['THANK', 'HEAVEN', 'FOR', 'THAT'] +3331-159609-0008-750: hyp=['THANK', 'HEAVEN', 'FOR', 'THAT'] +3331-159609-0009-751: ref=['CRIED', 'POLLY', 'WITH', 'THE', 'HEARTIEST', 'SATISFACTION', 'IN', 'HER', 'VOICE'] +3331-159609-0009-751: hyp=['CRIED', 'POLLY', 'WITH', 'THE', 'HARDIEST', 'SATISFACTION', 'IN', 'HER', 'VOICE'] +3331-159609-0010-752: ref=['POOR', 'POLLY', 'WAS', 'SO', 'TAKEN', 'BY', 'SURPRISE', 'THAT', 'SHE', 'HAD', 'NOT', 'A', 'WORD', 'TO', 'SAY'] +3331-159609-0010-752: hyp=['POOR', 'POLLY', 'WAS', 'SO', 'TAKEN', 'BY', 'SURPRISE', 'THAT', 'SHE', 'HAD', 'NOT', 'A', 'WORD', 'TO', 'SAY'] +3331-159609-0011-753: ref=['NONE', 'WERE', 'NEEDED', 'HER', 'TELLTALE', 'FACE', 'ANSWERED', 'FOR', 'HER', 'AS', 'WELL', 'AS', 'THE', 'IMPULSE', 'WHICH', 'MADE', 'HER', 'HIDE', 'HER', 'HEAD', 'IN', 'THE', 'SOFA', 'CUSHION', 'LIKE', 'A', 'FOOLISH', 'OSTRICH', 'WHEN', 'THE', 'HUNTERS', 'ARE', 'AFTER', 'IT'] +3331-159609-0011-753: hyp=['NONE', 'WERE', 'NEEDED', 'HOTEL', 'HER', 'FACE', 'ANSWERED', 'FOR', 'HER', 'AS', 'WELL', 'AS', 'THE', 'IMPULSE', 'WHICH', 'MADE', 'HER', 'HIDE', 'HER', 'HEAD', 'IN', 'THE', 'SOFA', 'CUSHION', 'LIKE', 'A', 'FOOLISH', 'OSTRICH', 'AND', 'THE', 'HANDS', 'ARE', 'AFTER', 'IT'] +3331-159609-0012-754: ref=['ONCE', 'OR', 'TWICE', 'BUT', 'SORT', 'OF', 'JOKINGLY', 'AND', 'I', 'THOUGHT', 'IT', 'WAS', 'ONLY', 'SOME', 'LITTLE', 'FLIRTATION'] +3331-159609-0012-754: hyp=['ONCE', 'OR', 'TWICE', 'BUT', 'SORT', 'OF', 'CHOKINGLY', 'AND', 'I', 'THOUGHT', 'IT', 'WAS', 'ONLY', 'SOME', 'LITTLE', 'FLIRTATION'] +3331-159609-0013-755: ref=['IT', 'WAS', 'SO', 'STUPID', 'OF', 'ME', 'NOT', 'TO', 'GUESS', 'BEFORE'] +3331-159609-0013-755: hyp=['IT', 'WAS', 'SO', 'STUPID', 'OF', 'ME', 'NOT', 'TO', 'GUESS', 'BEFORE'] +3331-159609-0014-756: ref=['IT', 'WAS', 'SO', 'TENDER', 'EARNEST', 'AND', 'DEFIANT', 'THAT', 'FANNY', 'FORGOT', 'THE', 'DEFENCE', 'OF', 'HER', 'OWN', 'LOVER', 'IN', 'ADMIRATION', 'OF', "POLLY'S", 'LOYALTY', 'TO', 'HERS', 'FOR', 'THIS', 'FAITHFUL', 'ALL', 'ABSORBING', 'LOVE', 'WAS', 'A', 'NEW', 'REVELATION', 'TO', 'FANNY', 'WHO', 'WAS', 'USED', 'TO', 'HEARING', 'HER', 'FRIENDS', 'BOAST', 'OF', 'TWO', 'OR', 'THREE', 'LOVERS', 'A', 'YEAR', 'AND', 'CALCULATE', 'THEIR', 'RESPECTIVE', 'VALUES', 'WITH', 'ALMOST', 'AS', 'MUCH', 'COOLNESS', 'AS', 'THE', 'YOUNG', 'MEN', 'DISCUSSED', 'THE', 'FORTUNES', 'OF', 'THE', 'GIRLS', 'THEY', 'WISHED', 'FOR', 'BUT', 'COULD', 'NOT', 'AFFORD', 'TO', 'MARRY'] +3331-159609-0014-756: hyp=['IT', 'WAS', 'HER', 'TENDER', 'EARNEST', 'AND', 'DEFIANT', 'THAT', 'FANNY', 'FORGOT', 'THE', 'DEFENCE', 'OF', 'HER', 'OWN', 'LOVE', 'IN', 'ADMIRATION', 'OF', "POLLY'S", 'LOYALTY', 'TO', 'HERS', 'FOR', 'THIS', 'FAITHFUL', 'ALL', 'ABSORBING', 'LOVE', 'WAS', 'A', 'NEW', 'REVELATION', 'TO', 'FANNY', 'WHO', 'WAS', 'USED', 'TO', 'HEARING', 'HER', 'FRIENDS', 'BOAST', 'OF', 'TWO', 'OR', 'THREE', 'LOVERS', 'A', 'YEAR', 'AND', 'CALCULATE', 'THEIR', 'RESPECTIVE', 'VALUES', 'WITH', 'ALMOST', 'AS', 'MUCH', 'COOLNESS', 'AS', 'THE', 'YOUNG', 'MEN', 'DISCUSSED', 'THE', 'FORTUNES', 'OF', 'THE', 'GIRLS', 'THEY', 'WISHED', 'FOR', 'BUT', 'COULD', 'NOT', 'AFFORD', 'TO', 'MARRY'] +3331-159609-0015-757: ref=['I', 'HOPE', 'MARIA', 'BAILEY', 'IS', 'ALL', 'HE', 'THINKS', 'HER', 'SHE', 'ADDED', 'SOFTLY', 'FOR', 'I', 'COULD', "N'T", 'BEAR', 'TO', 'HAVE', 'HIM', 'DISAPPOINTED', 'AGAIN'] +3331-159609-0015-757: hyp=['I', 'HOPE', 'MARIA', "BAILEY'S", 'ONLY', 'THINK', 'SIR', 'SHE', 'ADDED', 'SOFTLY', 'FOR', 'I', 'COULD', 'NOT', 'BEAR', 'TO', 'HAVE', 'HIM', 'DISAPPOINTED', 'AGAIN'] +3331-159609-0016-758: ref=['SAID', 'FANNY', 'TURNING', 'HOPEFUL', 'ALL', 'AT', 'ONCE'] +3331-159609-0016-758: hyp=['SAID', 'FANNY', 'TURNING', 'HOPEFUL', 'ALL', 'AT', 'ONCE'] +3331-159609-0017-759: ref=['SUPPOSE', 'I', 'SAY', 'A', 'WORD', 'TO', 'TOM', 'JUST', 'INQUIRE', 'AFTER', 'HIS', 'HEART', 'IN', 'A', 'GENERAL', 'WAY', 'YOU', 'KNOW', 'AND', 'GIVE', 'HIM', 'A', 'CHANCE', 'TO', 'TELL', 'ME', 'IF', 'THERE', 'IS', 'ANYTHING', 'TO', 'TELL'] +3331-159609-0017-759: hyp=['SUPPOSE', 'I', 'SAY', 'A', 'WORD', 'TO', 'TOM', 'JUST', 'INQUIRE', 'AFTER', 'HIS', 'HEART', 'IN', 'A', 'GENERAL', 'WAY', 'YOU', 'KNOW', 'AND', 'GIVE', 'HIM', 'A', 'CHANCE', 'TO', 'TELL', 'ME', 'IF', "THERE'S", 'ANYTHING', 'TO', 'TELL'] +3331-159609-0018-760: ref=['BEAR', 'IT', 'PEOPLE', 'ALWAYS', 'DO', 'BEAR', 'THINGS', 'SOMEHOW', 'ANSWERED', 'POLLY', 'LOOKING', 'AS', 'IF', 'SENTENCE', 'HAD', 'BEEN', 'PASSED', 'UPON', 'HER'] +3331-159609-0018-760: hyp=['BEAR', 'IT', 'PEOPLE', 'ALWAYS', 'DO', 'BARE', 'THINGS', 'SOMEHOW', 'ANSWERED', 'POLLY', 'LOOKING', 'AS', 'IF', 'SENTENCE', 'HAD', 'BEEN', 'PASSED', 'UPON', 'HER'] +3331-159609-0019-761: ref=['IT', 'WAS', 'A', 'VERY', 'DIFFERENT', 'WINTER', 'FROM', 'THE', 'LAST', 'FOR', 'BOTH', 'THE', 'GIRLS'] +3331-159609-0019-761: hyp=['IT', 'WAS', 'A', 'VERY', 'DIFFERENT', 'WINDOW', 'ON', 'THE', 'LAST', 'ABOVE', 'THE', 'GIRLS'] +3331-159609-0020-762: ref=['IF', 'FANNY', 'WANTED', 'TO', 'SHOW', 'HIM', 'WHAT', 'SHE', 'COULD', 'DO', 'TOWARD', 'MAKING', 'A', 'PLEASANT', 'HOME', 'SHE', 'CERTAINLY', 'SUCCEEDED', 'BETTER', 'THAN', 'SHE', 'SUSPECTED', 'FOR', 'IN', 'SPITE', 'OF', 'MANY', 'FAILURES', 'AND', 'DISCOURAGEMENTS', 'BEHIND', 'THE', 'SCENES', 'THE', 'LITTLE', 'HOUSE', 'BECAME', 'A', 'MOST', 'ATTRACTIVE', 'PLACE', 'TO', 'MISTER', 'SYDNEY', 'AT', 'LEAST', 'FOR', 'HE', 'WAS', 'MORE', 'THE', 'HOUSE', 'FRIEND', 'THAN', 'EVER', 'AND', 'SEEMED', 'DETERMINED', 'TO', 'PROVE', 'THAT', 'CHANGE', 'OF', 'FORTUNE', 'MADE', 'NO', 'DIFFERENCE', 'TO', 'HIM'] +3331-159609-0020-762: hyp=['IF', 'ANY', 'WANTED', 'TO', 'SHOW', 'HIM', 'WHAT', 'SHE', 'COULD', 'DO', 'TOWARD', 'MAKING', 'A', 'PLEASANT', 'HOME', 'SHE', 'CERTAINLY', 'SUCCEEDED', 'BY', 'THEN', 'SHE', 'SUSPECTED', 'FOR', 'IN', 'SPITE', 'OF', 'MANY', 'FAILURES', 'AND', 'DISCOURAGEMENTS', 'BEHIND', 'THE', 'SCENES', 'THE', 'LITTLE', 'HOUSE', 'BECAME', 'A', 'MOST', 'ATTRACTIVE', 'PLACE', 'TO', 'MISTER', 'SIDNEY', 'AT', 'LEAST', 'FOR', 'HE', 'WAS', 'MORE', 'THE', 'HOUSE', 'FRIEND', 'THAN', 'EVER', 'AND', 'SEEMED', 'DETERMINED', 'TO', 'PROVE', 'THAT', 'CHANGE', 'OF', 'FORTUNE', 'MADE', 'NO', 'DIFFERENCE', 'TO', 'HIM'] +3331-159609-0021-763: ref=['SHE', 'KEPT', 'MUCH', 'AT', 'HOME', 'WHEN', 'THE', "DAY'S", 'WORK', 'WAS', 'DONE', 'FINDING', 'IT', 'PLEASANTER', 'TO', 'SIT', 'DREAMING', 'OVER', 'BOOK', 'OR', 'SEWING', 'ALONE', 'THAN', 'TO', 'EXERT', 'HERSELF', 'EVEN', 'TO', 'GO', 'TO', 'THE', 'SHAWS'] +3331-159609-0021-763: hyp=['SHE', 'KEPT', 'MUCH', 'AT', 'HOME', 'WHEN', 'THE', "DAY'S", 'WORK', 'WAS', 'DONE', 'FINDING', 'IT', 'PLEASANTER', 'TO', 'SIT', 'DREAMING', 'OF', 'A', 'BOOK', 'OR', 'SEWING', 'ALONE', 'THAN', 'TO', 'EXERT', 'HERSELF', 'EVEN', 'TO', 'GO', 'TO', 'THE', 'SHORES'] +3331-159609-0022-764: ref=['POLLY', 'WAS', 'NOT', 'AT', 'ALL', 'LIKE', 'HERSELF', 'THAT', 'WINTER', 'AND', 'THOSE', 'NEAREST', 'TO', 'HER', 'SAW', 'AND', 'WONDERED', 'AT', 'IT', 'MOST'] +3331-159609-0022-764: hyp=['POLLY', 'WAS', 'NOT', 'AT', 'ALL', 'LIKE', 'HERSELF', 'THAT', 'WINDOW', 'AND', 'THOSE', 'NEAREST', 'TO', 'HER', 'SAW', 'AND', 'WANDERED', 'AT', 'IT', 'MOST'] +3331-159609-0023-765: ref=['FOR', 'NED', 'WAS', 'SO', 'ABSORBED', 'IN', 'BUSINESS', 'THAT', 'HE', 'IGNORED', 'THE', 'WHOLE', 'BAILEY', 'QUESTION', 'AND', 'LEFT', 'THEM', 'IN', 'UTTER', 'DARKNESS'] +3331-159609-0023-765: hyp=['FOR', 'NED', 'WAS', 'SO', 'ABSORBED', 'IN', 'BUSINESS', 'THAT', 'HE', 'NURED', 'THE', 'WHOLE', 'BAILIQUE', 'QUESTION', 'AND', 'LEFT', 'THEM', 'IN', 'OTHER', 'DARKNESS'] +3331-159609-0024-766: ref=['FANNY', 'CAME', 'WALKING', 'IN', 'UPON', 'HER', 'ONE', 'DAY', 'LOOKING', 'AS', 'IF', 'SHE', 'BROUGHT', 'TIDINGS', 'OF', 'SUCH', 'GREAT', 'JOY', 'THAT', 'SHE', 'HARDLY', 'KNEW', 'HOW', 'TO', 'TELL', 'THEM'] +3331-159609-0024-766: hyp=['THEN', 'HE', 'CAME', 'WALKING', 'IN', 'UPON', 'HER', 'ONE', 'DAY', 'LOOKING', 'AS', 'IF', 'SHE', 'POURED', 'HIDINGS', 'OF', 'SUCH', 'GREAT', 'JOY', 'THAT', 'SHE', 'HARDLY', 'KNEW', 'HOW', 'TO', 'TELL', 'THEM'] +3331-159609-0025-767: ref=['BUT', 'IF', 'WORK', 'BASKETS', 'WERE', 'GIFTED', 'WITH', 'POWERS', 'OF', 'SPEECH', 'THEY', 'COULD', 'TELL', 'STORIES', 'MORE', 'TRUE', 'AND', 'TENDER', 'THAN', 'ANY', 'WE', 'READ'] +3331-159609-0025-767: hyp=['BUT', 'IF', 'WORK', 'BASKETS', 'WERE', 'GIFTED', 'WITH', 'POWERS', 'OF', 'SPEECH', 'THEY', 'COULD', 'TELL', 'STORIES', 'MORE', 'TRUE', 'AND', 'TENDER', 'THAN', 'ANY', 'REED'] +3528-168656-0000-864: ref=['SHE', 'HAD', 'EVEN', 'BEEN', 'IN', 'SOCIETY', 'BEFORE', 'THE', 'REVOLUTION'] +3528-168656-0000-864: hyp=['SHE', 'HAD', 'EVEN', 'BEEN', 'IN', 'SOCIETY', 'BEFORE', 'THE', 'REVOLUTION'] +3528-168656-0001-865: ref=['IT', 'WAS', 'HER', 'PLEASURE', 'AND', 'HER', 'VANITY', 'TO', 'DRAG', 'IN', 'THESE', 'NAMES', 'ON', 'EVERY', 'PRETEXT'] +3528-168656-0001-865: hyp=['IT', 'WAS', 'HER', 'PLEASURE', 'AND', 'HER', 'VANITY', 'TO', 'DRAG', 'IN', 'THESE', 'NAMES', 'ON', 'EVERY', 'PRETEXT'] +3528-168656-0002-866: ref=['EVERY', 'YEAR', 'SHE', 'SOLEMNLY', 'RENEWED', 'HER', 'VOWS', 'AND', 'AT', 'THE', 'MOMENT', 'OF', 'TAKING', 'THE', 'OATH', 'SHE', 'SAID', 'TO', 'THE', 'PRIEST', 'MONSEIGNEUR', 'SAINT', 'FRANCOIS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'JULIEN', 'MONSEIGNEUR', 'SAINT', 'JULIEN', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'EUSEBIUS', 'MONSEIGNEUR', 'SAINT', 'EUSEBIUS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'PROCOPIUS', 'ET', 'CETERA', 'ET', 'CETERA'] +3528-168656-0002-866: hyp=['EVERY', 'YEAR', 'SHE', 'SOLEMNLY', 'RENEWED', 'HER', 'VOWS', 'AND', 'AT', 'THE', 'MOMENT', 'OF', 'TAKING', 'THE', 'OATH', 'SHE', 'SAID', 'TO', 'THE', 'PRIEST', 'MONSEIGNEUR', 'SAINT', 'FRANCOIS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAY', 'JULIEN', 'MONSEIGNEUR', 'SAINT', 'JULIAN', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'EUSCIBIUS', 'MONSIEUR', 'SAINT', 'EUSIBIUS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'PROCOPIAS', 'ET', 'CETERA', 'ET', 'CETERA'] +3528-168656-0003-867: ref=['AND', 'THE', 'SCHOOL', 'GIRLS', 'WOULD', 'BEGIN', 'TO', 'LAUGH', 'NOT', 'IN', 'THEIR', 'SLEEVES', 'BUT', 'UNDER', 'THEIR', 'VEILS', 'CHARMING', 'LITTLE', 'STIFLED', 'LAUGHS', 'WHICH', 'MADE', 'THE', 'VOCAL', 'MOTHERS', 'FROWN'] +3528-168656-0003-867: hyp=['AND', 'THE', 'SCHOOLGIRLS', 'WOULD', 'BEGIN', 'TO', 'LAUGH', 'NOT', 'IN', 'THEIR', 'SLEEVES', 'BUT', 'UNDER', 'THE', 'VEILS', 'CHARMING', 'LITTLE', 'STIFLED', 'LAUGHS', 'WHICH', 'MADE', 'THE', 'VOCAL', 'MOTHERS', 'FROWN'] +3528-168656-0004-868: ref=['IT', 'WAS', 'A', 'CENTURY', 'WHICH', 'SPOKE', 'THROUGH', 'HER', 'BUT', 'IT', 'WAS', 'THE', 'EIGHTEENTH', 'CENTURY'] +3528-168656-0004-868: hyp=['IT', 'WAS', 'A', 'CENTURY', 'WHICH', 'SPOKE', 'THROUGH', 'HER', 'BUT', 'IT', 'WAS', 'THE', 'EIGHTEENTH', 'CENTURY'] +3528-168656-0005-869: ref=['THE', 'RULE', 'OF', 'FONTEVRAULT', 'DID', 'NOT', 'FORBID', 'THIS'] +3528-168656-0005-869: hyp=['THE', 'RULE', 'OF', 'FONTREVAL', 'DID', 'NOT', 'FORBID', 'THIS'] +3528-168656-0006-870: ref=['SHE', 'WOULD', 'NOT', 'SHOW', 'THIS', 'OBJECT', 'TO', 'ANYONE'] +3528-168656-0006-870: hyp=['SHE', 'WOULD', 'NOT', 'SHOW', 'THE', 'SUBJECT', 'TO', 'ANY', 'ONE'] +3528-168656-0007-871: ref=['THUS', 'IT', 'FURNISHED', 'A', 'SUBJECT', 'OF', 'COMMENT', 'FOR', 'ALL', 'THOSE', 'WHO', 'WERE', 'UNOCCUPIED', 'OR', 'BORED', 'IN', 'THE', 'CONVENT'] +3528-168656-0007-871: hyp=['THUS', 'IT', 'FURNISHED', 'A', 'SUBJECT', 'OF', 'COMMENT', 'FOR', 'ALL', 'THOSE', 'WHO', 'WERE', 'UNOCCUPIED', 'OR', 'BORED', 'IN', 'THE', 'CONVENT'] +3528-168656-0008-872: ref=['SOME', 'UNIQUE', 'CHAPLET', 'SOME', 'AUTHENTIC', 'RELIC'] +3528-168656-0008-872: hyp=['SOME', 'UNIQUE', 'CHAPLET', 'SOME', 'AUTHENTIC', 'RELIC'] +3528-168656-0009-873: ref=['THEY', 'LOST', 'THEMSELVES', 'IN', 'CONJECTURES'] +3528-168656-0009-873: hyp=['THEY', 'LOST', 'THEMSELVES', 'IN', 'CONJECTURES'] +3528-168656-0010-874: ref=['WHEN', 'THE', 'POOR', 'OLD', 'WOMAN', 'DIED', 'THEY', 'RUSHED', 'TO', 'HER', 'CUPBOARD', 'MORE', 'HASTILY', 'THAN', 'WAS', 'FITTING', 'PERHAPS', 'AND', 'OPENED', 'IT'] +3528-168656-0010-874: hyp=['WHEN', 'THE', 'POOR', 'OLD', 'WOMAN', 'DIED', 'THEY', 'RUSHED', 'TO', 'HER', 'CUPBOARD', 'MORE', 'HASTILY', 'THAN', 'WAS', 'FITTING', 'PERHAPS', 'AND', 'OPENED', 'IT'] +3528-168656-0011-875: ref=['HE', 'IS', 'RESISTING', 'FLUTTERING', 'HIS', 'TINY', 'WINGS', 'AND', 'STILL', 'MAKING', 'AN', 'EFFORT', 'TO', 'FLY', 'BUT', 'THE', 'DANCER', 'IS', 'LAUGHING', 'WITH', 'A', 'SATANICAL', 'AIR'] +3528-168656-0011-875: hyp=['HE', 'IS', 'RESISTING', 'FLUTTERING', 'HIS', 'TINY', 'WINGS', 'AND', 'STILL', 'MAKING', 'AN', 'EFFORT', 'TO', 'FLY', 'BUT', 'THE', 'DANCERS', 'LAUGHING', 'WITH', 'US', 'SATANICAL', 'AIR'] +3528-168656-0012-876: ref=['MORAL', 'LOVE', 'CONQUERED', 'BY', 'THE', 'COLIC'] +3528-168656-0012-876: hyp=['MORAL', 'LOVE', 'CONQUERED', 'BY', 'THE', 'COLIC'] +3528-168669-0000-877: ref=['THE', 'PRIORESS', 'RETURNED', 'AND', 'SEATED', 'HERSELF', 'ONCE', 'MORE', 'ON', 'HER', 'CHAIR'] +3528-168669-0000-877: hyp=['THE', 'PRIORS', 'RETURNED', 'AND', 'SEATED', 'HERSELF', 'ONCE', 'MORE', 'ON', 'HER', 'CHAIR'] +3528-168669-0001-878: ref=['WE', 'WILL', 'PRESENT', 'A', 'STENOGRAPHIC', 'REPORT', 'OF', 'THE', 'DIALOGUE', 'WHICH', 'THEN', 'ENSUED', 'TO', 'THE', 'BEST', 'OF', 'OUR', 'ABILITY'] +3528-168669-0001-878: hyp=['WE', 'WILL', 'PRESENT', 'A', 'SYNOGRAPHIC', 'REPORT', 'OF', 'THE', 'DIALOGUE', 'WHICH', 'THEN', 'ENSUED', 'TO', 'THE', 'BEST', 'OF', 'OUR', 'ABILITY'] +3528-168669-0002-879: ref=['FATHER', 'FAUVENT'] +3528-168669-0002-879: hyp=['FATHER', 'FERVENT'] +3528-168669-0003-880: ref=['REVEREND', 'MOTHER', 'DO', 'YOU', 'KNOW', 'THE', 'CHAPEL'] +3528-168669-0003-880: hyp=['REVEREND', 'MOTHER', 'DO', 'YOU', 'KNOW', 'THE', 'CHAPEL'] +3528-168669-0004-881: ref=['AND', 'YOU', 'HAVE', 'BEEN', 'IN', 'THE', 'CHOIR', 'IN', 'PURSUANCE', 'OF', 'YOUR', 'DUTIES', 'TWO', 'OR', 'THREE', 'TIMES'] +3528-168669-0004-881: hyp=['AND', 'YOU', 'HAVE', 'BEEN', 'IN', 'THE', 'CHOIR', 'IN', 'PURSUANCE', 'OF', 'YOUR', 'DUTIES', 'TWO', 'OR', 'THREE', 'TIMES'] +3528-168669-0005-882: ref=['THERE', 'IS', 'A', 'STONE', 'TO', 'BE', 'RAISED', 'HEAVY'] +3528-168669-0005-882: hyp=['THERE', 'IS', 'A', 'STONE', 'TO', 'BE', 'RAISED', 'HEAVY'] +3528-168669-0006-883: ref=['THE', 'SLAB', 'OF', 'THE', 'PAVEMENT', 'WHICH', 'IS', 'AT', 'THE', 'SIDE', 'OF', 'THE', 'ALTAR'] +3528-168669-0006-883: hyp=['THE', 'SLAB', 'OF', 'THE', 'PAVEMENT', 'WHICH', 'IS', 'AT', 'THE', 'SIDE', 'OF', 'THE', 'ALTAR'] +3528-168669-0007-884: ref=['THE', 'SLAB', 'WHICH', 'CLOSES', 'THE', 'VAULT', 'YES'] +3528-168669-0007-884: hyp=['THE', 'FLAP', 'WHICH', 'CLOSES', 'THE', 'VAULT', 'YES'] +3528-168669-0008-885: ref=['IT', 'WOULD', 'BE', 'A', 'GOOD', 'THING', 'TO', 'HAVE', 'TWO', 'MEN', 'FOR', 'IT'] +3528-168669-0008-885: hyp=['IT', 'WOULD', 'BE', 'A', 'GOOD', 'THING', 'TO', 'HAVE', 'TWO', 'MEN', 'FOR', 'IT'] +3528-168669-0009-886: ref=['A', 'WOMAN', 'IS', 'NEVER', 'A', 'MAN'] +3528-168669-0009-886: hyp=['A', 'WOMAN', 'IS', 'NEVER', 'A', 'MAN'] +3528-168669-0010-887: ref=['BECAUSE', 'DOM', 'MABILLON', 'GIVES', 'FOUR', 'HUNDRED', 'AND', 'SEVENTEEN', 'EPISTLES', 'OF', 'SAINT', 'BERNARD', 'WHILE', 'MERLONUS', 'HORSTIUS', 'ONLY', 'GIVES', 'THREE', 'HUNDRED', 'AND', 'SIXTY', 'SEVEN', 'I', 'DO', 'NOT', 'DESPISE', 'MERLONUS', 'HORSTIUS', 'NEITHER', 'DO', 'I'] +3528-168669-0010-887: hyp=['BECAUSE', 'DON', 'MARBULAN', 'GIVES', 'FOUR', 'HUNDRED', 'AND', 'SEVENTEEN', 'EPISTLES', 'OF', 'SAINT', 'BERNARD', 'WHILE', 'MERLUNUS', 'HORSE', 'ONLY', 'GIVES', 'THREE', 'HUNDRED', 'AND', 'SIXTY', 'SEVEN', 'I', 'DO', 'NOT', 'DESPISE', 'MERLINUS', 'HORSES', 'NEITHER', 'DO', 'I'] +3528-168669-0011-888: ref=['MERIT', 'CONSISTS', 'IN', 'WORKING', 'ACCORDING', 'TO', "ONE'S", 'STRENGTH', 'A', 'CLOISTER', 'IS', 'NOT', 'A', 'DOCK', 'YARD'] +3528-168669-0011-888: hyp=['MARRIAGE', 'CONSISTS', 'IN', 'WORKING', 'ACCORDING', 'TO', "ONE'S", 'STRENGTH', 'A', 'CLOISTER', 'IS', 'NOT', 'A', 'DOCKYARD'] +3528-168669-0012-889: ref=['AND', 'A', 'WOMAN', 'IS', 'NOT', 'A', 'MAN', 'BUT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'ONE', 'THOUGH'] +3528-168669-0012-889: hyp=['ADD', 'A', 'WOMAN', 'IS', 'NOT', 'A', 'MAN', 'BUT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'ONE', 'THOUGH'] +3528-168669-0013-890: ref=['AND', 'CAN', 'YOU', 'GET', 'A', 'LEVER'] +3528-168669-0013-890: hyp=['AND', 'CAN', 'YOU', 'GET', 'A', 'LOVER'] +3528-168669-0014-891: ref=['THERE', 'IS', 'A', 'RING', 'IN', 'THE', 'STONE'] +3528-168669-0014-891: hyp=['THERE', 'IS', 'A', 'RING', 'IN', 'THE', 'STONE'] +3528-168669-0015-892: ref=['I', 'WILL', 'PUT', 'THE', 'LEVER', 'THROUGH', 'IT'] +3528-168669-0015-892: hyp=['I', 'WILL', 'PUT', 'THE', 'LOVER', 'THROUGH', 'IT'] +3528-168669-0016-893: ref=['THAT', 'IS', 'GOOD', 'REVEREND', 'MOTHER', 'I', 'WILL', 'OPEN', 'THE', 'VAULT'] +3528-168669-0016-893: hyp=['THAT', 'IS', 'GOOD', 'REVEREND', 'MOTHER', 'I', 'WILL', 'OPEN', 'THE', 'VAULT'] +3528-168669-0017-894: ref=['WILL', 'THAT', 'BE', 'ALL', 'NO'] +3528-168669-0017-894: hyp=['WILL', 'THAT', 'BE', 'ALL', 'NO'] +3528-168669-0018-895: ref=['GIVE', 'ME', 'YOUR', 'ORDERS', 'VERY', 'REVEREND', 'MOTHER'] +3528-168669-0018-895: hyp=['GIVE', 'ME', 'YOUR', 'ORDERS', 'VERY', 'REVEREND', 'MOTHER'] +3528-168669-0019-896: ref=['FAUVENT', 'WE', 'HAVE', 'CONFIDENCE', 'IN', 'YOU'] +3528-168669-0019-896: hyp=['FOR', 'THAT', 'WE', 'HAVE', 'CONFIDENCE', 'IN', 'YOU'] +3528-168669-0020-897: ref=['I', 'AM', 'HERE', 'TO', 'DO', 'ANYTHING', 'YOU', 'WISH'] +3528-168669-0020-897: hyp=['I', 'AM', 'HERE', 'TO', 'DO', 'ANYTHING', 'YOU', 'WISH'] +3528-168669-0021-898: ref=['AND', 'TO', 'HOLD', 'YOUR', 'PEACE', 'ABOUT', 'EVERYTHING', 'YES', 'REVEREND', 'MOTHER'] +3528-168669-0021-898: hyp=['AND', 'TO', 'HOLD', 'YOUR', 'PEACE', 'ABOUT', 'EVERYTHING', 'YES', 'ROBIN', 'MOTHER'] +3528-168669-0022-899: ref=['WHEN', 'THE', 'VAULT', 'IS', 'OPEN', 'I', 'WILL', 'CLOSE', 'IT', 'AGAIN'] +3528-168669-0022-899: hyp=['WHEN', 'THE', 'VOLT', 'IS', 'OPEN', 'I', 'WILL', 'CLOSE', 'IT', 'AGAIN'] +3528-168669-0023-900: ref=['BUT', 'BEFORE', 'THAT', 'WHAT', 'REVEREND', 'MOTHER'] +3528-168669-0023-900: hyp=['BUT', 'BEFORE', 'THAT', 'WHAT', 'REVEREND', 'MOTHER'] +3528-168669-0024-901: ref=['FATHER', 'FAUVENT', 'REVEREND', 'MOTHER'] +3528-168669-0024-901: hyp=['FATHER', 'FOR', 'REVERE', 'MOTHER'] +3528-168669-0025-902: ref=['YOU', 'KNOW', 'THAT', 'A', 'MOTHER', 'DIED', 'THIS', 'MORNING'] +3528-168669-0025-902: hyp=['YOU', 'KNOW', 'THAT', 'A', 'MOTHER', 'DIED', 'THIS', 'MORNING'] +3528-168669-0026-903: ref=['NO', 'DID', 'YOU', 'NOT', 'HEAR', 'THE', 'BELL'] +3528-168669-0026-903: hyp=['NO', 'DID', 'YOU', 'NOT', 'HEAR', 'THE', 'BELL'] +3528-168669-0027-904: ref=['NOTHING', 'CAN', 'BE', 'HEARD', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN', 'REALLY'] +3528-168669-0027-904: hyp=['NOTHING', 'CAN', 'BE', 'HEARD', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN', 'REALLY'] +3528-168669-0028-905: ref=['AND', 'THEN', 'THE', 'WIND', 'IS', 'NOT', 'BLOWING', 'IN', 'MY', 'DIRECTION', 'THIS', 'MORNING'] +3528-168669-0028-905: hyp=['AND', 'THEN', 'THE', 'WIND', 'IS', 'NOT', 'BLOWING', 'IN', 'MY', 'DIRECTION', 'THIS', 'MORNING'] +3528-168669-0029-906: ref=['IT', 'WAS', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0029-906: hyp=['IT', 'WAS', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0030-907: ref=['THREE', 'YEARS', 'AGO', 'MADAME', 'DE', 'BETHUNE', 'A', 'JANSENIST', 'TURNED', 'ORTHODOX', 'MERELY', 'FROM', 'HAVING', 'SEEN', 'MOTHER', 'CRUCIFIXION', 'AT', 'PRAYER', 'AH'] +3528-168669-0030-907: hyp=['THREE', 'YEARS', 'AGO', 'MADAME', 'DE', 'BESOON', 'A', 'GENT', 'ORTHODOX', 'MERELY', 'FROM', 'HAVING', 'SEEN', 'MOTHER', 'CRUCIFIXION', 'AT', 'PRAYER', 'AH'] +3528-168669-0031-908: ref=['THE', 'MOTHERS', 'HAVE', 'TAKEN', 'HER', 'TO', 'THE', 'DEAD', 'ROOM', 'WHICH', 'OPENS', 'ON', 'THE', 'CHURCH', 'I', 'KNOW'] +3528-168669-0031-908: hyp=['THE', 'MOTHERS', 'HAVE', 'TAKEN', 'HER', 'TO', 'THE', 'DEAD', 'ROOM', 'WHICH', 'OPENS', 'ON', 'THE', 'CHURCH', 'I', 'KNOW'] +3528-168669-0032-909: ref=['A', 'FINE', 'SIGHT', 'IT', 'WOULD', 'BE', 'TO', 'SEE', 'A', 'MAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'MORE', 'OFTEN'] +3528-168669-0032-909: hyp=['A', 'FINE', 'SIGHT', 'IT', 'WOULD', 'BE', 'TO', 'SEE', 'A', 'MAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'MORE', 'OFTEN'] +3528-168669-0033-910: ref=['HEY', 'MORE', 'OFTEN'] +3528-168669-0033-910: hyp=['HEY', 'MORE', 'OFTEN'] +3528-168669-0034-911: ref=['WHAT', 'DO', 'YOU', 'SAY'] +3528-168669-0034-911: hyp=['WHAT', 'DO', 'YOU', 'SAY'] +3528-168669-0035-912: ref=['I', 'SAY', 'MORE', 'OFTEN', 'MORE', 'OFTEN', 'THAN', 'WHAT'] +3528-168669-0035-912: hyp=['I', 'SAY', 'MORE', 'OFTEN', 'MORE', 'OFTEN', 'THAN', 'WHAT'] +3528-168669-0036-913: ref=['REVEREND', 'MOTHER', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN', 'THAN', 'WHAT', 'I', 'SAID', 'MORE', 'OFTEN'] +3528-168669-0036-913: hyp=['REVEREND', 'MOTHER', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN', 'THAN', 'WHAT', 'I', 'SAID', 'MORE', 'OFTEN'] +3528-168669-0037-914: ref=['BUT', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN'] +3528-168669-0037-914: hyp=['BUT', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN'] +3528-168669-0038-915: ref=['AT', 'THAT', 'MOMENT', 'NINE', "O'CLOCK", 'STRUCK'] +3528-168669-0038-915: hyp=['AT', 'THAT', 'MOMENT', 'NINE', "O'CLOCK", 'STRUCK'] +3528-168669-0039-916: ref=['AT', 'NINE', "O'CLOCK", 'IN', 'THE', 'MORNING', 'AND', 'AT', 'ALL', 'HOURS', 'PRAISED', 'AND', 'ADORED', 'BE', 'THE', 'MOST', 'HOLY', 'SACRAMENT', 'OF', 'THE', 'ALTAR', 'SAID', 'THE', 'PRIORESS'] +3528-168669-0039-916: hyp=['AT', 'NINE', "O'CLOCK", 'IN', 'THE', 'MORNING', 'AND', 'AT', 'ALL', 'HOURS', 'PRAISED', 'AND', 'ENDURED', 'BE', 'THE', 'MOST', 'HOLY', 'SACRAMENT', 'OF', 'THE', 'ALTAR', 'SAID', 'THE', 'PRIEST'] +3528-168669-0040-917: ref=['IT', 'CUT', 'MORE', 'OFTEN', 'SHORT'] +3528-168669-0040-917: hyp=['IT', 'CUT', 'MORE', 'OFTEN', 'SHORT'] +3528-168669-0041-918: ref=['FAUCHELEVENT', 'MOPPED', 'HIS', 'FOREHEAD'] +3528-168669-0041-918: hyp=['FAUCHELEVENT', 'MOPPED', 'HIS', 'FOREHEAD'] +3528-168669-0042-919: ref=['IN', 'HER', 'LIFETIME', 'MOTHER', 'CRUCIFIXION', 'MADE', 'CONVERTS', 'AFTER', 'HER', 'DEATH', 'SHE', 'WILL', 'PERFORM', 'MIRACLES', 'SHE', 'WILL'] +3528-168669-0042-919: hyp=['IN', 'HER', 'LIFETIME', 'MOTHER', 'CRUCIFIXION', 'MADE', 'CONVERTS', 'AFTER', 'HER', 'DEATH', 'SHE', 'WILL', 'PERFORM', 'MIRACLES', 'SHE', 'WILL'] +3528-168669-0043-920: ref=['FATHER', 'FAUVENT', 'THE', 'COMMUNITY', 'HAS', 'BEEN', 'BLESSED', 'IN', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0043-920: hyp=['FATHER', 'FUVENT', 'THE', 'COMMUNITY', 'HAS', 'BEEN', 'BLESSED', 'IN', 'MOTHER', 'CURSE', 'FICTION'] +3528-168669-0044-921: ref=['SHE', 'RETAINED', 'HER', 'CONSCIOUSNESS', 'TO', 'THE', 'VERY', 'LAST', 'MOMENT'] +3528-168669-0044-921: hyp=['SHE', 'RETAINED', 'HER', 'CONSCIOUSNESS', 'TO', 'THE', 'VERY', 'LAST', 'MOMENT'] +3528-168669-0045-922: ref=['SHE', 'GAVE', 'US', 'HER', 'LAST', 'COMMANDS'] +3528-168669-0045-922: hyp=['SHE', 'GAVE', 'US', 'HER', 'LAST', 'COMMANDS'] +3528-168669-0046-923: ref=['IF', 'YOU', 'HAD', 'A', 'LITTLE', 'MORE', 'FAITH', 'AND', 'IF', 'YOU', 'COULD', 'HAVE', 'BEEN', 'IN', 'HER', 'CELL', 'SHE', 'WOULD', 'HAVE', 'CURED', 'YOUR', 'LEG', 'MERELY', 'BY', 'TOUCHING', 'IT', 'SHE', 'SMILED'] +3528-168669-0046-923: hyp=['IF', 'YOU', 'HAD', 'A', 'LITTLE', 'MORE', 'FAITH', 'AND', 'IF', 'YOU', 'COULD', 'HAVE', 'BEEN', 'IN', 'HERSELF', 'SHE', 'WOULD', 'HAVE', 'CURED', 'YOUR', 'LEG', 'MERELY', 'BY', 'TOUCHING', 'IT', 'SHE', 'SMILED'] +3528-168669-0047-924: ref=['THERE', 'WAS', 'SOMETHING', 'OF', 'PARADISE', 'IN', 'THAT', 'DEATH'] +3528-168669-0047-924: hyp=['THERE', 'WAS', 'SOMETHING', 'OF', 'PARADISE', 'IN', 'THAT', 'DEATH'] +3528-168669-0048-925: ref=['FAUCHELEVENT', 'THOUGHT', 'THAT', 'IT', 'WAS', 'AN', 'ORISON', 'WHICH', 'SHE', 'WAS', 'FINISHING'] +3528-168669-0048-925: hyp=['FAUCHELEVENT', 'THOUGHT', 'THAT', 'IT', 'WAS', 'AN', 'ORISON', 'WHICH', 'SHE', 'WAS', 'FINISHING'] +3528-168669-0049-926: ref=['FAUCHELEVENT', 'HELD', 'HIS', 'PEACE', 'SHE', 'WENT', 'ON'] +3528-168669-0049-926: hyp=['FORCHELEVENT', 'HELD', 'HIS', 'PEACE', 'SHE', 'WENT', 'ON'] +3528-168669-0050-927: ref=['I', 'HAVE', 'CONSULTED', 'UPON', 'THIS', 'POINT', 'MANY', 'ECCLESIASTICS', 'LABORING', 'IN', 'OUR', 'LORD', 'WHO', 'OCCUPY', 'THEMSELVES', 'IN', 'THE', 'EXERCISES', 'OF', 'THE', 'CLERICAL', 'LIFE', 'AND', 'WHO', 'BEAR', 'WONDERFUL', 'FRUIT'] +3528-168669-0050-927: hyp=['I', 'HAVE', 'CONSULTED', 'UPON', 'THIS', 'POINT', 'MANY', 'ECCLESIASTICS', 'LABOURING', 'IN', 'OUR', 'LORD', 'WHO', 'OCCUPY', 'THEMSELVES', 'IN', 'THE', 'EXERCISES', 'OF', 'THE', 'CLERICAL', 'LIFE', 'AND', 'WHO', 'BEAR', 'WONDERFUL', 'FRUIT'] +3528-168669-0051-928: ref=['FORTUNATELY', 'THE', 'PRIORESS', 'COMPLETELY', 'ABSORBED', 'IN', 'HER', 'OWN', 'THOUGHTS', 'DID', 'NOT', 'HEAR', 'IT'] +3528-168669-0051-928: hyp=['FORTUNATELY', 'THE', 'PIRRUS', 'COMPLETELY', 'ABSORBED', 'IN', 'HER', 'OWN', 'THOUGHTS', 'DID', 'NOT', 'HEAR', 'IT'] +3528-168669-0052-929: ref=['SHE', 'CONTINUED', 'FATHER', 'FAUVENT'] +3528-168669-0052-929: hyp=['SHE', 'CONTINUED', 'FURTHER', 'PREVENT'] +3528-168669-0053-930: ref=['YES', 'REVEREND', 'MOTHER'] +3528-168669-0053-930: hyp=['YES', 'REVERE', 'MOTHER'] +3528-168669-0054-931: ref=['SAINT', 'TERENTIUS', 'BISHOP', 'OF', 'PORT', 'WHERE', 'THE', 'MOUTH', 'OF', 'THE', 'TIBER', 'EMPTIES', 'INTO', 'THE', 'SEA', 'REQUESTED', 'THAT', 'ON', 'HIS', 'TOMB', 'MIGHT', 'BE', 'ENGRAVED', 'THE', 'SIGN', 'WHICH', 'WAS', 'PLACED', 'ON', 'THE', 'GRAVES', 'OF', 'PARRICIDES', 'IN', 'THE', 'HOPE', 'THAT', 'PASSERS', 'BY', 'WOULD', 'SPIT', 'ON', 'HIS', 'TOMB', 'THIS', 'WAS', 'DONE'] +3528-168669-0054-931: hyp=['SAINT', 'TERENTIUS', 'BISHOP', 'OF', 'PORT', 'WEAR', 'THE', 'MOUTH', 'OF', 'THE', 'TYBER', 'EMPTIES', 'INTO', 'THE', 'SEA', 'REQUESTED', 'THAT', 'ON', 'HIS', 'TOMB', 'MIGHT', 'BE', 'ENGRAVED', 'THE', 'SIGN', 'WHICH', 'WAS', 'PLACED', 'ON', 'THE', 'GRAVES', 'OF', 'PARASITES', 'IN', 'THE', 'HOPE', 'THAT', 'PASSERS', 'BY', 'WOULD', 'SPIT', 'ON', 'HIS', 'TOMB', 'THIS', 'WAS', 'DONE'] +3528-168669-0055-932: ref=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'SO', 'BE', 'IT'] +3528-168669-0055-932: hyp=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'SO', 'BE', 'IT'] +3528-168669-0056-933: ref=['FOR', 'THAT', 'MATTER', 'NO', 'REVEREND', 'MOTHER'] +3528-168669-0056-933: hyp=['FOR', 'THAT', 'MATTER', 'NO', 'REVEREND', 'MOTHER'] +3528-168669-0057-934: ref=['FATHER', 'FAUVENT', 'MOTHER', 'CRUCIFIXION', 'WILL', 'BE', 'INTERRED', 'IN', 'THE', 'COFFIN', 'IN', 'WHICH', 'SHE', 'HAS', 'SLEPT', 'FOR', 'THE', 'LAST', 'TWENTY', 'YEARS', 'THAT', 'IS', 'JUST'] +3528-168669-0057-934: hyp=['FATHER', 'PREVENT', 'MOTHER', 'CRUCIFIXION', 'WILL', 'BE', 'INTERRED', 'IN', 'THE', 'COFFIN', 'IN', 'WHICH', 'SHE', 'HAS', 'SLEPT', 'FOR', 'THE', 'LAST', 'TWENTY', 'YEARS', 'THAT', 'IS', 'JUST'] +3528-168669-0058-935: ref=['IT', 'IS', 'A', 'CONTINUATION', 'OF', 'HER', 'SLUMBER'] +3528-168669-0058-935: hyp=['IT', 'IS', 'A', 'CONTINUATION', 'OF', 'HER', 'SLUMBER'] +3528-168669-0059-936: ref=['SO', 'I', 'SHALL', 'HAVE', 'TO', 'NAIL', 'UP', 'THAT', 'COFFIN', 'YES'] +3528-168669-0059-936: hyp=['SO', 'I', 'SHALL', 'HAVE', 'TO', 'NAIL', 'UP', 'THAT', 'COFFIN', 'YES'] +3528-168669-0060-937: ref=['I', 'AM', 'AT', 'THE', 'ORDERS', 'OF', 'THE', 'VERY', 'REVEREND', 'COMMUNITY'] +3528-168669-0060-937: hyp=['I', 'AM', 'AT', 'THE', 'ORDERS', 'OF', 'THE', 'VERY', 'REVEREND', 'COMMUNITY'] +3528-168669-0061-938: ref=['THE', 'FOUR', 'MOTHER', 'PRECENTORS', 'WILL', 'ASSIST', 'YOU'] +3528-168669-0061-938: hyp=['THE', 'FOUR', 'MOTHER', 'PRESENTERS', 'WILL', 'ASSIST', 'YOU'] +3528-168669-0062-939: ref=['NO', 'IN', 'LOWERING', 'THE', 'COFFIN'] +3528-168669-0062-939: hyp=['NO', 'IN', 'LORING', 'THE', 'COFFIN'] +3528-168669-0063-940: ref=['WHERE', 'INTO', 'THE', 'VAULT'] +3528-168669-0063-940: hyp=['WHERE', 'INTO', 'THE', 'VAULT'] +3528-168669-0064-941: ref=['FAUCHELEVENT', 'STARTED', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR'] +3528-168669-0064-941: hyp=['FAUCHELEVENT', 'STARTED', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR'] +3528-168669-0065-942: ref=['UNDER', 'THE', 'ALTAR', 'BUT'] +3528-168669-0065-942: hyp=['UNDER', 'THE', 'ALTAR', 'BUT'] +3528-168669-0066-943: ref=['YOU', 'WILL', 'HAVE', 'AN', 'IRON', 'BAR', 'YES', 'BUT'] +3528-168669-0066-943: hyp=['YOU', 'WILL', 'HAVE', 'AN', 'IRON', 'BAR', 'YES', 'BUT'] +3528-168669-0067-944: ref=['YOU', 'WILL', 'RAISE', 'THE', 'STONE', 'WITH', 'THE', 'BAR', 'BY', 'MEANS', 'OF', 'THE', 'RING', 'BUT'] +3528-168669-0067-944: hyp=['YOU', 'WILL', 'RAISE', 'THE', 'STONE', 'WITH', 'THE', 'BAR', 'BY', 'MEANS', 'OF', 'THE', 'RING', 'BUT'] +3528-168669-0068-945: ref=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL', 'NOT', 'TO', 'GO', 'TO', 'PROFANE', 'EARTH', 'TO', 'REMAIN', 'THERE', 'IN', 'DEATH', 'WHERE', 'SHE', 'PRAYED', 'WHILE', 'LIVING', 'SUCH', 'WAS', 'THE', 'LAST', 'WISH', 'OF', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0068-945: hyp=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL', 'NOT', 'TO', 'GO', 'TO', 'PROFANE', 'EARTH', 'TO', 'REMAIN', 'THERE', 'IN', 'DEATH', 'WHERE', 'SHE', 'PRAYED', 'WHILE', 'LIVING', 'SUCH', 'WAS', 'THE', 'LAST', 'WISH', 'OF', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0069-946: ref=['SHE', 'ASKED', 'IT', 'OF', 'US', 'THAT', 'IS', 'TO', 'SAY', 'COMMANDED', 'US'] +3528-168669-0069-946: hyp=['SHE', 'ASKED', 'IT', 'OF', 'US', 'THAT', 'IS', 'TO', 'SAY', 'COMMANDED', 'US'] +3528-168669-0070-947: ref=['BUT', 'IT', 'IS', 'FORBIDDEN'] +3528-168669-0070-947: hyp=['BUT', 'IT', 'IS', 'FORBIDDEN'] +3528-168669-0071-948: ref=['OH', 'I', 'AM', 'A', 'STONE', 'IN', 'YOUR', 'WALLS'] +3528-168669-0071-948: hyp=['OH', 'I', 'AM', 'A', 'STONE', 'IN', 'YOUR', 'WALLS'] +3528-168669-0072-949: ref=['THINK', 'FATHER', 'FAUVENT', 'IF', 'SHE', 'WERE', 'TO', 'WORK', 'MIRACLES', 'HERE'] +3528-168669-0072-949: hyp=['THINK', 'FATHER', 'FRAVAIN', 'IF', 'SHE', 'WERE', 'TO', 'WORK', 'MIRACLES', 'HERE'] +3528-168669-0073-950: ref=['WHAT', 'A', 'GLORY', 'OF', 'GOD', 'FOR', 'THE', 'COMMUNITY', 'AND', 'MIRACLES', 'ISSUE', 'FROM', 'TOMBS'] +3528-168669-0073-950: hyp=['WHAT', 'A', 'GLORY', 'OF', 'GOD', 'FOR', 'THE', 'COMMUNITY', 'AND', 'MIRACLES', 'ISSUE', 'FROM', 'TOMBS'] +3528-168669-0074-951: ref=['BUT', 'REVEREND', 'MOTHER', 'IF', 'THE', 'AGENT', 'OF', 'THE', 'SANITARY', 'COMMISSION'] +3528-168669-0074-951: hyp=['BUT', 'REVEREND', 'MOTHER', 'IF', 'THE', 'AGENTIVE', 'THE', 'SANITARY', 'COMMISSION'] +3528-168669-0075-952: ref=['BUT', 'THE', 'COMMISSARY', 'OF', 'POLICE'] +3528-168669-0075-952: hyp=['BUT', 'THE', 'COMMISSARY', 'OF', 'POLICE'] +3528-168669-0076-953: ref=['CHONODEMAIRE', 'ONE', 'OF', 'THE', 'SEVEN', 'GERMAN', 'KINGS', 'WHO', 'ENTERED', 'AMONG', 'THE', 'GAULS', 'UNDER', 'THE', 'EMPIRE', 'OF', 'CONSTANTIUS', 'EXPRESSLY', 'RECOGNIZED', 'THE', 'RIGHT', 'OF', 'NUNS', 'TO', 'BE', 'BURIED', 'IN', 'RELIGION', 'THAT', 'IS', 'TO', 'SAY', 'BENEATH', 'THE', 'ALTAR'] +3528-168669-0076-953: hyp=['SHADOW', 'DE', 'MAR', 'ONE', 'OF', 'THE', 'SEVEN', 'GERMAN', 'KINGS', 'WHO', 'ENTERED', 'AMONG', 'THE', 'GAULS', 'UNDER', 'THE', 'EMPIRE', 'OF', 'CONSTANTIUS', 'EXPRESSLY', 'RECOGNIZED', 'THE', 'RIGHT', 'OF', 'NUNS', 'TO', 'BE', 'BURIED', 'IN', 'RELIGION', 'THAT', 'IS', 'TO', 'SAY', 'BENEATH', 'THE', 'ALTAR'] +3528-168669-0077-954: ref=['THE', 'WORLD', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'THE', 'CROSS'] +3528-168669-0077-954: hyp=['THE', 'WORLD', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'THE', 'CROSS'] +3528-168669-0078-955: ref=['MARTIN', 'THE', 'ELEVENTH', 'GENERAL', 'OF', 'THE', 'CARTHUSIANS', 'GAVE', 'TO', 'HIS', 'ORDER', 'THIS', 'DEVICE', 'STAT', 'CRUX', 'DUM', 'VOLVITUR', 'ORBIS'] +3528-168669-0078-955: hyp=['MERTON', 'THE', 'ELEVENTH', 'GENERAL', 'OF', 'THE', 'CARTHUSIANS', 'GAVE', 'TO', 'HIS', 'ORDER', 'THIS', 'DEVICE', 'STAT', 'CREW', 'DOOMFUL', 'ORBIS'] +3528-168669-0079-956: ref=['THE', 'PRIORESS', 'WHO', 'WAS', 'USUALLY', 'SUBJECTED', 'TO', 'THE', 'BARRIER', 'OF', 'SILENCE', 'AND', 'WHOSE', 'RESERVOIR', 'WAS', 'OVERFULL', 'ROSE', 'AND', 'EXCLAIMED', 'WITH', 'THE', 'LOQUACITY', 'OF', 'A', 'DAM', 'WHICH', 'HAS', 'BROKEN', 'AWAY'] +3528-168669-0079-956: hyp=['THE', 'PIRRUS', 'WHO', 'WAS', 'USUALLY', 'SUBJECTED', 'TO', 'THE', 'BARRIER', 'OF', 'SILENCE', 'AND', 'WHOSE', 'RESERVOIR', 'WAS', 'OVER', 'FULL', 'ROSE', 'AND', 'EXCLAIMED', 'WITH', 'THE', 'LOQUACITY', 'OF', 'A', 'DAM', 'WHICH', 'HAS', 'BROKEN', 'AWAY'] +3528-168669-0080-957: ref=['I', 'HAVE', 'ON', 'MY', 'RIGHT', 'BENOIT', 'AND', 'ON', 'MY', 'LEFT', 'BERNARD', 'WHO', 'WAS', 'BERNARD'] +3528-168669-0080-957: hyp=['I', 'HAVE', 'ON', 'MY', 'RIGHT', 'BENOIS', 'AND', 'ALL', 'MY', 'LEFT', 'BERNARD', 'WHO', 'WAS', 'BERNARD'] +3528-168669-0081-958: ref=['THE', 'FIRST', 'ABBOT', 'OF', 'CLAIRVAUX'] +3528-168669-0081-958: hyp=['THE', 'FIRST', 'ABBOT', 'OF', 'CLERVAL'] +3528-168669-0082-959: ref=['HIS', 'ORDER', 'HAS', 'PRODUCED', 'FORTY', 'POPES', 'TWO', 'HUNDRED', 'CARDINALS', 'FIFTY', 'PATRIARCHS', 'SIXTEEN', 'HUNDRED', 'ARCHBISHOPS', 'FOUR', 'THOUSAND', 'SIX', 'HUNDRED', 'BISHOPS', 'FOUR', 'EMPERORS', 'TWELVE', 'EMPRESSES', 'FORTY', 'SIX', 'KINGS', 'FORTY', 'ONE', 'QUEENS', 'THREE', 'THOUSAND', 'SIX', 'HUNDRED', 'CANONIZED', 'SAINTS', 'AND', 'HAS', 'BEEN', 'IN', 'EXISTENCE', 'FOR', 'FOURTEEN', 'HUNDRED', 'YEARS'] +3528-168669-0082-959: hyp=['HIS', 'ORDER', 'HAS', 'PRODUCED', 'FORTY', 'POPES', 'TWO', 'HUNDRED', 'CARDINALS', 'FIFTY', 'PATRIARCHS', 'SIXTEEN', 'HUNDRED', 'ARCHBISHOPS', 'FOUR', 'THOUSAND', 'SIX', 'HUNDRED', 'BISHOPS', 'FOUR', 'EMPERORS', 'TWELVE', 'EMPRESSES', 'FORTY', 'SIX', 'KINGS', 'FORTY', 'ONE', 'QUEENS', 'THREE', 'THOUSAND', 'SIX', 'HUNDRED', 'CANNONIZED', 'SAINTS', 'AND', 'HAS', 'BEEN', 'IN', 'EXISTENCE', 'FOR', 'FOURTEEN', 'HUNDRED', 'YEARS'] +3528-168669-0083-960: ref=['ON', 'ONE', 'SIDE', 'SAINT', 'BERNARD', 'ON', 'THE', 'OTHER', 'THE', 'AGENT', 'OF', 'THE', 'SANITARY', 'DEPARTMENT'] +3528-168669-0083-960: hyp=['ON', 'ONE', 'SIDE', 'SAINT', 'BERNARD', 'ON', 'THE', 'OTHER', 'THE', 'AGENT', 'OF', 'THE', 'SENATORY', 'DEPARTMENT'] +3528-168669-0084-961: ref=['GOD', 'SUBORDINATED', 'TO', 'THE', 'COMMISSARY', 'OF', 'POLICE', 'SUCH', 'IS', 'THE', 'AGE', 'SILENCE', 'FAUVENT'] +3528-168669-0084-961: hyp=['GOD', 'SUBORDINATED', 'TO', 'THE', 'COMMISSARY', 'OF', 'POLICE', 'SUCH', 'AS', 'THE', 'AGE', 'SILENCE', 'FAVOT'] +3528-168669-0085-962: ref=['NO', 'ONE', 'DOUBTS', 'THE', 'RIGHT', 'OF', 'THE', 'MONASTERY', 'TO', 'SEPULTURE'] +3528-168669-0085-962: hyp=['NO', 'ONE', 'DOUBTS', 'THE', 'RIGHT', 'OF', 'THE', 'MONASTERY', 'TO', 'SEPULTURE'] +3528-168669-0086-963: ref=['ONLY', 'FANATICS', 'AND', 'THOSE', 'IN', 'ERROR', 'DENY', 'IT'] +3528-168669-0086-963: hyp=['ONLY', 'FANATICS', 'AND', 'THOSE', 'IN', 'ERROR', 'DENY', 'IT'] +3528-168669-0087-964: ref=['WE', 'LIVE', 'IN', 'TIMES', 'OF', 'TERRIBLE', 'CONFUSION'] +3528-168669-0087-964: hyp=['WE', 'LIVE', 'IN', 'TIMES', 'OF', 'TERRIBLE', 'CONFUSION'] +3528-168669-0088-965: ref=['WE', 'ARE', 'IGNORANT', 'AND', 'IMPIOUS'] +3528-168669-0088-965: hyp=['WE', 'ARE', 'IGNORANT', 'AND', 'IMPIOUS'] +3528-168669-0089-966: ref=['AND', 'THEN', 'RELIGION', 'IS', 'ATTACKED', 'WHY'] +3528-168669-0089-966: hyp=['AND', 'THEN', 'RELIGION', 'IS', 'ATTACKED', 'WHY'] +3528-168669-0090-967: ref=['BECAUSE', 'THERE', 'HAVE', 'BEEN', 'BAD', 'PRIESTS', 'BECAUSE', 'SAGITTAIRE', 'BISHOP', 'OF', 'GAP', 'WAS', 'THE', 'BROTHER', 'OF', 'SALONE', 'BISHOP', 'OF', 'EMBRUN', 'AND', 'BECAUSE', 'BOTH', 'OF', 'THEM', 'FOLLOWED', 'MOMMOL'] +3528-168669-0090-967: hyp=['BECAUSE', 'THERE', 'HAVE', 'BEEN', 'BAD', 'PRIESTS', 'BECAUSE', 'SAGOTARA', 'BISHOP', 'OF', 'GAP', 'WAS', 'A', 'BROTHER', 'OF', 'SALON', 'BISHOP', 'OF', 'AMBRON', 'AND', 'BECAUSE', 'BOTH', 'OF', 'THEM', 'FOLLOWED', 'MAMMA'] +3528-168669-0091-968: ref=['THEY', 'PERSECUTE', 'THE', 'SAINTS'] +3528-168669-0091-968: hyp=['THEY', 'PERSECUTE', 'THE', 'SAINTS'] +3528-168669-0092-969: ref=['THEY', 'SHUT', 'THEIR', 'EYES', 'TO', 'THE', 'TRUTH', 'DARKNESS', 'IS', 'THE', 'RULE'] +3528-168669-0092-969: hyp=['THEY', 'SHUT', 'THEIR', 'EYES', 'TO', 'THE', 'TRUTH', 'DARKNESS', 'IS', 'THE', 'RULE'] +3528-168669-0093-970: ref=['THE', 'MOST', 'FEROCIOUS', 'BEASTS', 'ARE', 'BEASTS', 'WHICH', 'ARE', 'BLIND'] +3528-168669-0093-970: hyp=['THE', 'MOST', 'FEROCIOUS', 'BEASTS', 'ARE', 'BEASTS', 'WHICH', 'ARE', 'BLIND'] +3528-168669-0094-971: ref=['OH', 'HOW', 'WICKED', 'PEOPLE', 'ARE'] +3528-168669-0094-971: hyp=['OH', 'HOW', 'WICKED', 'PEOPLE', 'ARE'] +3528-168669-0095-972: ref=['BY', 'ORDER', 'OF', 'THE', 'KING', 'SIGNIFIES', 'TO', 'DAY', 'BY', 'ORDER', 'OF', 'THE', 'REVOLUTION'] +3528-168669-0095-972: hyp=['BY', 'ORDER', 'OF', 'THE', 'KING', 'SIGNIFIES', 'TO', 'DAY', 'BY', 'ORDER', 'OF', 'THE', 'REVOLUTION'] +3528-168669-0096-973: ref=['ONE', 'NO', 'LONGER', 'KNOWS', 'WHAT', 'IS', 'DUE', 'TO', 'THE', 'LIVING', 'OR', 'TO', 'THE', 'DEAD', 'A', 'HOLY', 'DEATH', 'IS', 'PROHIBITED'] +3528-168669-0096-973: hyp=['ONE', 'NO', 'LONGER', 'KNOWS', 'WHAT', 'IS', 'DUE', 'TO', 'THE', 'LIVING', 'OR', 'TO', 'THE', 'DEAD', 'A', 'HOLY', 'DEATH', 'IS', 'PROHIBITED'] +3528-168669-0097-974: ref=['GAUTHIER', 'BISHOP', 'OF', 'CHALONS', 'HELD', 'HIS', 'OWN', 'IN', 'THIS', 'MATTER', 'AGAINST', 'OTHO', 'DUKE', 'OF', 'BURGUNDY'] +3528-168669-0097-974: hyp=['GATHIERRE', 'BISHOP', 'OF', 'CHALON', 'HELD', 'HIS', 'OWN', 'IN', 'THIS', 'MATTER', 'AGAINST', 'OTHO', 'DUKE', 'OF', 'BURGUNDY'] +3528-168669-0098-975: ref=['THE', 'PRIORESS', 'TOOK', 'BREATH', 'THEN', 'TURNED', 'TO', 'FAUCHELEVENT'] +3528-168669-0098-975: hyp=['THE', 'PRIORS', 'TOOK', 'BREATH', 'AND', 'TURNED', 'TO', 'FAUCHELEVENT'] +3528-168669-0099-976: ref=['YOU', 'WILL', 'CLOSE', 'THE', 'COFFIN', 'THE', 'SISTERS', 'WILL', 'CARRY', 'IT', 'TO', 'THE', 'CHAPEL'] +3528-168669-0099-976: hyp=['YOU', 'WILL', 'CLOSE', 'THE', 'COFFIN', 'THE', 'SISTERS', 'WILL', 'CARRY', 'IT', 'TO', 'THE', 'CHAPEL'] +3528-168669-0100-977: ref=['THE', 'OFFICE', 'FOR', 'THE', 'DEAD', 'WILL', 'THEN', 'BE', 'SAID'] +3528-168669-0100-977: hyp=['THE', 'OFFICE', 'FOR', 'THE', 'DEAD', 'WILL', 'THEN', 'BE', 'SET'] +3528-168669-0101-978: ref=['BUT', 'SHE', 'WILL', 'HEAR', 'SHE', 'WILL', 'NOT', 'LISTEN'] +3528-168669-0101-978: hyp=['BUT', 'SHE', 'WILL', 'HEAR', 'SHE', 'WILL', 'NOT', 'LISTEN'] +3528-168669-0102-979: ref=['BESIDES', 'WHAT', 'THE', 'CLOISTER', 'KNOWS', 'THE', 'WORLD', 'LEARNS', 'NOT'] +3528-168669-0102-979: hyp=['BESIDES', 'WHAT', 'THE', 'CLOSER', 'KNOWS', 'THE', 'WORLD', 'LEARNS', 'NOT'] +3528-168669-0103-980: ref=['A', 'PAUSE', 'ENSUED'] +3528-168669-0103-980: hyp=['A', 'PAUSE', 'IN', 'SUIT'] +3528-168669-0104-981: ref=['YOU', 'WILL', 'REMOVE', 'YOUR', 'BELL'] +3528-168669-0104-981: hyp=['YOU', 'WILL', 'REMOVE', 'YOUR', 'BELT'] +3528-168669-0105-982: ref=['HAS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'PAID', 'HIS', 'VISIT'] +3528-168669-0105-982: hyp=['HAS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'PAID', 'HIS', 'VISIT'] +3528-168669-0106-983: ref=['HE', 'WILL', 'PAY', 'IT', 'AT', 'FOUR', "O'CLOCK", 'TO', 'DAY'] +3528-168669-0106-983: hyp=['HE', 'WILL', 'PAY', 'IT', 'AT', 'FOUR', "O'CLOCK", 'TO', 'DAY'] +3528-168669-0107-984: ref=['THE', 'PEAL', 'WHICH', 'ORDERS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'TO', 'BE', 'SUMMONED', 'HAS', 'ALREADY', 'BEEN', 'RUNG'] +3528-168669-0107-984: hyp=['THE', 'PEAL', 'WHICH', 'ORDERS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEBT', 'TO', 'BE', 'SUMMONED', 'HAS', 'ALREADY', 'BEEN', 'RUN'] +3528-168669-0108-985: ref=['BUT', 'YOU', 'DO', 'NOT', 'UNDERSTAND', 'ANY', 'OF', 'THE', 'PEALS'] +3528-168669-0108-985: hyp=['BUT', 'YOU', 'DO', 'NOT', 'UNDERSTAND', 'ANY', 'OF', 'THE', 'PEALS'] +3528-168669-0109-986: ref=['THAT', 'IS', 'WELL', 'FATHER', 'FAUVENT'] +3528-168669-0109-986: hyp=['THAT', 'IS', 'WELL', 'FATHER', 'VENT'] +3528-168669-0110-987: ref=['WHERE', 'WILL', 'YOU', 'OBTAIN', 'IT'] +3528-168669-0110-987: hyp=['WHERE', 'WILL', 'YOU', 'OBTAIN', 'IT'] +3528-168669-0111-988: ref=['I', 'HAVE', 'MY', 'HEAP', 'OF', 'OLD', 'IRON', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN'] +3528-168669-0111-988: hyp=['I', 'HAVE', 'MY', 'HEAP', 'OF', 'OLD', 'IRON', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN'] +3528-168669-0112-989: ref=['REVEREND', 'MOTHER', 'WHAT'] +3528-168669-0112-989: hyp=['REVERE', 'MOTHER', 'WHAT'] +3528-168669-0113-990: ref=['IF', 'YOU', 'WERE', 'EVER', 'TO', 'HAVE', 'ANY', 'OTHER', 'JOBS', 'OF', 'THIS', 'SORT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'MAN', 'FOR', 'YOU', 'A', 'PERFECT', 'TURK'] +3528-168669-0113-990: hyp=['IF', 'YOU', 'WERE', 'EVER', 'TO', 'HAVE', 'ANY', 'OTHER', 'JOBS', 'OF', 'THIS', 'SORT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'MAN', 'FOR', 'YOU', 'A', 'PERFECT', 'TURK'] +3528-168669-0114-991: ref=['YOU', 'WILL', 'DO', 'IT', 'AS', 'SPEEDILY', 'AS', 'POSSIBLE'] +3528-168669-0114-991: hyp=['YOU', 'WILL', 'DO', 'IT', 'AS', 'SPEEDILY', 'AS', 'POSSIBLE'] +3528-168669-0115-992: ref=['I', 'CANNOT', 'WORK', 'VERY', 'FAST', 'I', 'AM', 'INFIRM', 'THAT', 'IS', 'WHY', 'I', 'REQUIRE', 'AN', 'ASSISTANT', 'I', 'LIMP'] +3528-168669-0115-992: hyp=['I', 'CANNOT', 'WORK', 'VERY', 'FAST', 'I', 'AM', 'INFIRM', 'THAT', 'IS', 'WHY', 'I', 'REQUIRE', 'AN', 'ASSISTANT', 'I', 'LIMP'] +3528-168669-0116-993: ref=['EVERYTHING', 'MUST', 'HAVE', 'BEEN', 'COMPLETED', 'A', 'GOOD', 'QUARTER', 'OF', 'AN', 'HOUR', 'BEFORE', 'THAT'] +3528-168669-0116-993: hyp=['EVERYTHING', 'MUST', 'HAVE', 'BEEN', 'COMPLETED', 'A', 'GOOD', 'QUARTER', 'OF', 'AN', 'HOUR', 'BEFORE', 'THAT'] +3528-168669-0117-994: ref=['I', 'WILL', 'DO', 'ANYTHING', 'TO', 'PROVE', 'MY', 'ZEAL', 'TOWARDS', 'THE', 'COMMUNITY', 'THESE', 'ARE', 'MY', 'ORDERS', 'I', 'AM', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN'] +3528-168669-0117-994: hyp=['I', 'WILL', 'DO', 'ANYTHING', 'TO', 'PROVE', 'MY', 'ZEAL', 'TOWARDS', 'THE', 'COMMUNITY', 'THESE', 'ARE', 'MY', 'ORDERS', 'I', 'AM', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN'] +3528-168669-0118-995: ref=['AT', 'ELEVEN', "O'CLOCK", 'EXACTLY', 'I', 'AM', 'TO', 'BE', 'IN', 'THE', 'CHAPEL'] +3528-168669-0118-995: hyp=['AT', 'ELEVEN', "O'CLOCK", 'EXACTLY', 'I', 'AM', 'TO', 'BE', 'IN', 'THE', 'CHAPEL'] +3528-168669-0119-996: ref=['MOTHER', 'ASCENSION', 'WILL', 'BE', 'THERE', 'TWO', 'MEN', 'WOULD', 'BE', 'BETTER'] +3528-168669-0119-996: hyp=['MOTHER', 'ASCENSION', 'WILL', 'BE', 'THERE', 'TWO', 'MEN', 'WOULD', 'BE', 'BETTER'] +3528-168669-0120-997: ref=['HOWEVER', 'NEVER', 'MIND', 'I', 'SHALL', 'HAVE', 'MY', 'LEVER'] +3528-168669-0120-997: hyp=['HOWEVER', 'NEVER', 'MIND', 'I', 'SHALL', 'HAVE', 'MY', 'LOVER'] +3528-168669-0121-998: ref=['AFTER', 'WHICH', 'THERE', 'WILL', 'BE', 'NO', 'TRACE', 'OF', 'ANYTHING'] +3528-168669-0121-998: hyp=['AFTER', 'WHICH', 'THERE', 'WILL', 'BE', 'NO', 'TRACE', 'OF', 'ANYTHING'] +3528-168669-0122-999: ref=['THE', 'GOVERNMENT', 'WILL', 'HAVE', 'NO', 'SUSPICION'] +3528-168669-0122-999: hyp=['THE', 'GOVERNMENT', 'WILL', 'HAVE', 'NO', 'SUSPICION'] +3528-168669-0123-1000: ref=['THE', 'EMPTY', 'COFFIN', 'REMAINS', 'THIS', 'PRODUCED', 'A', 'PAUSE'] +3528-168669-0123-1000: hyp=['THE', 'EMPTY', 'COFFIN', 'REMAINS', 'THIS', 'PRODUCED', 'A', 'PULSE'] +3528-168669-0124-1001: ref=['WHAT', 'IS', 'TO', 'BE', 'DONE', 'WITH', 'THAT', 'COFFIN', 'FATHER', 'FAUVENT'] +3528-168669-0124-1001: hyp=['WHAT', 'IS', 'TO', 'BE', 'DONE', 'WITH', 'THAT', 'COFFIN', 'FATHER', 'PREVENT'] +3528-168669-0125-1002: ref=['IT', 'WILL', 'BE', 'GIVEN', 'TO', 'THE', 'EARTH', 'EMPTY'] +3528-168669-0125-1002: hyp=['IT', 'WILL', 'BE', 'GIVEN', 'TO', 'THE', 'EARTH', 'EMPTY'] +3528-168669-0126-1003: ref=['AH', 'THE', 'DE', 'EXCLAIMED', 'FAUCHELEVENT'] +3528-168669-0126-1003: hyp=['AH', 'LIDA', 'EXCLAIMED', 'FAUCHELEVENT'] +3528-168669-0127-1004: ref=['THE', 'VIL', 'STUCK', 'FAST', 'IN', 'HIS', 'THROAT'] +3528-168669-0127-1004: hyp=['THE', 'VILLE', 'STUCK', 'FAST', 'IN', 'HIS', 'THROAT'] +3528-168669-0128-1005: ref=['HE', 'MADE', 'HASTE', 'TO', 'IMPROVISE', 'AN', 'EXPEDIENT', 'TO', 'MAKE', 'HER', 'FORGET', 'THE', 'OATH'] +3528-168669-0128-1005: hyp=['HE', 'MADE', 'HASTE', 'TO', 'IMPROVISE', 'AN', 'EXPEDIENT', 'TO', 'MAKE', 'HER', 'FORGET', 'THE', 'OATH'] +3528-168669-0129-1006: ref=['I', 'WILL', 'PUT', 'EARTH', 'IN', 'THE', 'COFFIN', 'REVEREND', 'MOTHER', 'THAT', 'WILL', 'PRODUCE', 'THE', 'EFFECT', 'OF', 'A', 'CORPSE'] +3528-168669-0129-1006: hyp=['I', 'WILL', 'PUT', 'EARTH', 'IN', 'THE', 'COFFIN', 'REVERED', 'MOTHER', 'THAT', 'WILL', 'PRODUCE', 'THE', 'EFFECT', 'OF', 'A', 'CORPSE'] +3528-168669-0130-1007: ref=['I', 'WILL', 'MAKE', 'THAT', 'MY', 'SPECIAL', 'BUSINESS'] +3528-168669-0130-1007: hyp=['I', 'WILL', 'MAKE', 'THAT', 'MY', 'SPECIAL', 'BUSINESS'] +3538-142836-0000-1567: ref=['GENERAL', 'OBSERVATIONS', 'ON', 'PRESERVES', 'CONFECTIONARY', 'ICES', 'AND', 'DESSERT', 'DISHES'] +3538-142836-0000-1567: hyp=['JOE', 'OBSERVATIONS', 'ON', 'PRESERVES', 'CONFECTIONARY', 'EYESES', 'AND', 'DESSERT', 'DISHES'] +3538-142836-0001-1568: ref=['THE', 'EXPENSE', 'OF', 'PRESERVING', 'THEM', 'WITH', 'SUGAR', 'IS', 'A', 'SERIOUS', 'OBJECTION', 'FOR', 'EXCEPT', 'THE', 'SUGAR', 'IS', 'USED', 'IN', 'CONSIDERABLE', 'QUANTITIES', 'THE', 'SUCCESS', 'IS', 'VERY', 'UNCERTAIN'] +3538-142836-0001-1568: hyp=['THE', 'EXPENSE', 'OF', 'PRESERVING', 'THEM', 'WITH', 'SUGAR', 'IS', 'A', 'SERIOUS', 'OBJECTION', 'FOR', 'EXCEPT', 'A', 'SUGAR', 'IS', 'USED', 'IN', 'CONSIDERABLE', 'QUALITIES', 'THE', 'SUCCESS', 'IS', 'VERY', 'UNCERTAIN'] +3538-142836-0002-1569: ref=['FRUIT', 'GATHERED', 'IN', 'WET', 'OR', 'FOGGY', 'WEATHER', 'WILL', 'SOON', 'BE', 'MILDEWED', 'AND', 'BE', 'OF', 'NO', 'SERVICE', 'FOR', 'PRESERVES'] +3538-142836-0002-1569: hyp=['FRUIT', 'GATHERED', 'IN', 'WET', 'OR', 'FOGGY', 'WEATHER', 'WILL', 'SOON', 'BE', 'MILDED', 'AND', 'BE', 'OF', 'NO', 'SERVICE', 'FOR', 'PRESERVES'] +3538-142836-0003-1570: ref=['BUT', 'TO', 'DISTINGUISH', 'THESE', 'PROPERLY', 'REQUIRES', 'VERY', 'GREAT', 'ATTENTION', 'AND', 'CONSIDERABLE', 'EXPERIENCE'] +3538-142836-0003-1570: hyp=['BUT', 'TO', 'DISTINGUISH', 'HIS', 'PROPER', 'REQUIRES', 'VERY', 'GREAT', 'ATTENTION', 'AND', 'CONSIDERABLE', 'EXPERIENCE'] +3538-142836-0004-1571: ref=['IF', 'YOU', 'DIP', 'THE', 'FINGER', 'INTO', 'THE', 'SYRUP', 'AND', 'APPLY', 'IT', 'TO', 'THE', 'THUMB', 'THE', 'TENACITY', 'OF', 'THE', 'SYRUP', 'WILL', 'ON', 'SEPARATING', 'THE', 'FINGER', 'AND', 'THUMB', 'AFFORD', 'A', 'THREAD', 'WHICH', 'SHORTLY', 'BREAKS', 'THIS', 'IS', 'THE', 'LITTLE', 'THREAD'] +3538-142836-0004-1571: hyp=['IF', 'YOU', 'DIP', 'THE', 'FINGER', 'INTO', 'THE', 'SERF', 'AND', 'APPLY', 'TO', 'THE', 'THUMB', 'THE', 'TENACITY', 'OF', 'THE', 'SURF', 'WILL', 'ON', 'SEPARATING', 'THE', 'FINGER', 'AND', 'THUMB', 'AFFORD', 'A', 'THREAD', 'WHICH', 'SHORTLY', 'BREAKS', 'THIS', 'IS', 'THE', 'LITTLE', 'THREAD'] +3538-142836-0005-1572: ref=['LET', 'IT', 'BOIL', 'UP', 'AGAIN', 'THEN', 'TAKE', 'IT', 'OFF', 'AND', 'REMOVE', 'CAREFULLY', 'THE', 'SCUM', 'THAT', 'HAS', 'RISEN'] +3538-142836-0005-1572: hyp=['LET', 'IT', 'BOIL', 'UP', 'AGAIN', 'THEN', 'TAKE', 'IT', 'OFF', 'AND', 'REMOVE', 'CAREFULLY', 'THE', 'SCUM', 'THAT', 'HAS', 'RISEN'] +3538-142836-0006-1573: ref=['IT', 'IS', 'CONSIDERED', 'TO', 'BE', 'SUFFICIENTLY', 'BOILED', 'WHEN', 'SOME', 'TAKEN', 'UP', 'IN', 'A', 'SPOON', 'POURS', 'OUT', 'LIKE', 'OIL'] +3538-142836-0006-1573: hyp=['IT', 'IS', 'CONSIDERED', 'TO', 'BE', 'SUFFICIENTLY', 'BOILED', 'WHEN', 'SOME', 'TAKEN', 'UP', 'IN', 'A', 'SPOON', 'POURS', 'OUT', 'LIKE', 'OIL'] +3538-142836-0007-1574: ref=['BEFORE', 'SUGAR', 'WAS', 'IN', 'USE', 'HONEY', 'WAS', 'EMPLOYED', 'TO', 'PRESERVE', 'MANY', 'VEGETABLE', 'PRODUCTIONS', 'THOUGH', 'THIS', 'SUBSTANCE', 'HAS', 'NOW', 'GIVEN', 'WAY', 'TO', 'THE', 'JUICE', 'OF', 'THE', 'SUGAR', 'CANE'] +3538-142836-0007-1574: hyp=['BEFORE', 'SUGAR', 'WAS', 'IN', 'USE', 'HONEY', 'WAS', 'EMPLOYED', 'TO', 'PRESERVE', 'MANY', 'VEGETABLE', 'PRODUCTIONS', 'THOUGH', 'THIS', 'SUBSTANCE', 'IS', 'NOW', 'GIVEN', 'WAY', 'TO', 'THE', 'JUICE', 'OF', 'THE', 'SUGAR', 'CANE'] +3538-142836-0008-1575: ref=['FOURTEEN', 'NINETY', 'NINE'] +3538-142836-0008-1575: hyp=['FOURTEEN', 'NINETY', 'NINE'] +3538-142836-0009-1576: ref=['BOIL', 'THEM', 'UP', 'THREE', 'DAYS', 'SUCCESSIVELY', 'SKIMMING', 'EACH', 'TIME', 'AND', 'THEY', 'WILL', 'THEN', 'BE', 'FINISHED', 'AND', 'IN', 'A', 'STATE', 'FIT', 'TO', 'BE', 'PUT', 'INTO', 'POTS', 'FOR', 'USE'] +3538-142836-0009-1576: hyp=['BOIL', 'THEM', 'UP', 'THREE', 'DAYS', 'SUCCESSIVELY', 'SKIMMING', 'EACH', 'TIME', 'AND', 'THEY', 'WILL', 'THEN', 'BE', 'FINISHED', 'AND', 'IN', 'A', 'STATE', 'FIT', 'TO', 'BE', 'PUT', 'INTO', 'POTS', 'FOR', 'USE'] +3538-142836-0010-1577: ref=['THE', 'REASON', 'WHY', 'THE', 'FRUIT', 'IS', 'EMPTIED', 'OUT', 'OF', 'THE', 'PRESERVING', 'PAN', 'INTO', 'AN', 'EARTHEN', 'PAN', 'IS', 'THAT', 'THE', 'ACID', 'OF', 'THE', 'FRUIT', 'ACTS', 'UPON', 'THE', 'COPPER', 'OF', 'WHICH', 'THE', 'PRESERVING', 'PANS', 'ARE', 'USUALLY', 'MADE'] +3538-142836-0010-1577: hyp=['THE', 'REASON', 'WHY', 'THE', 'FRUIT', 'IS', 'EMPTIED', 'OUT', 'OF', 'THE', 'PRESERVING', 'PAN', 'INTO', 'AN', 'EARTHEN', 'PAN', 'IS', 'THAT', 'THE', 'ACID', 'OF', 'THE', 'FRUIT', 'ACTS', 'UPON', 'THE', 'COPPER', 'OF', 'WHICH', 'THE', 'PRESERVING', 'PANS', 'ARE', 'USUALLY', 'MADE'] +3538-142836-0011-1578: ref=['FROM', 'THIS', 'EXAMPLE', 'THE', 'PROCESS', 'OF', 'PRESERVING', 'FRUITS', 'BY', 'SYRUP', 'WILL', 'BE', 'EASILY', 'COMPREHENDED'] +3538-142836-0011-1578: hyp=['FROM', 'THIS', 'EXAMPLE', 'THE', 'PROCESS', 'OF', 'PRESERVING', 'FRUITS', 'BY', 'SYRUP', 'WOULD', 'BE', 'EASILY', 'COMPREHENDED'] +3538-142836-0012-1579: ref=['THEY', 'SHOULD', 'BE', 'DRIED', 'IN', 'THE', 'STOVE', 'OR', 'OVEN', 'ON', 'A', 'SIEVE', 'AND', 'TURNED', 'EVERY', 'SIX', 'OR', 'EIGHT', 'HOURS', 'FRESH', 'POWDERED', 'SUGAR', 'BEING', 'SIFTED', 'OVER', 'THEM', 'EVERY', 'TIME', 'THEY', 'ARE', 'TURNED'] +3538-142836-0012-1579: hyp=['THIS', 'SHOULD', 'BE', 'DRIED', 'IN', 'THE', 'STOVE', 'OR', 'OVEN', 'ON', 'A', 'SEA', 'AND', 'TURNED', 'EVERY', 'SIX', 'OR', 'EIGHT', 'HOURS', 'FRESH', 'PATTERED', 'SUGAR', 'BEING', 'SIFTED', 'OVER', 'THEM', 'EVERY', 'TIME', 'THEY', 'ARE', 'TURNED'] +3538-142836-0013-1580: ref=['IN', 'THIS', 'WAY', 'IT', 'IS', 'ALSO', 'THAT', 'ORANGE', 'AND', 'LEMON', 'CHIPS', 'ARE', 'PRESERVED'] +3538-142836-0013-1580: hyp=['IN', 'THIS', 'WAY', 'IT', 'IS', 'ALSO', 'THAT', 'ORANGE', 'AND', 'LENNONSHIPS', 'ARE', 'PRESERVED'] +3538-142836-0014-1581: ref=['MARMALADES', 'JAMS', 'AND', 'FRUIT', 'PASTES', 'ARE', 'OF', 'THE', 'SAME', 'NATURE', 'AND', 'ARE', 'NOW', 'IN', 'VERY', 'GENERAL', 'REQUEST'] +3538-142836-0014-1581: hyp=['MARMALADES', 'JAMS', 'AND', 'FRUIT', 'PACE', 'ARE', 'OF', 'THE', 'SAME', 'NATURE', 'AND', 'ARE', 'NOW', 'IN', 'VERY', 'GENERAL', 'QUEST'] +3538-142836-0015-1582: ref=['MARMALADES', 'AND', 'JAMS', 'DIFFER', 'LITTLE', 'FROM', 'EACH', 'OTHER', 'THEY', 'ARE', 'PRESERVES', 'OF', 'A', 'HALF', 'LIQUID', 'CONSISTENCY', 'MADE', 'BY', 'BOILING', 'THE', 'PULP', 'OF', 'FRUITS', 'AND', 'SOMETIMES', 'PART', 'OF', 'THE', 'RINDS', 'WITH', 'SUGAR'] +3538-142836-0015-1582: hyp=['MARVELL', 'EATS', 'AND', "JAM'S", 'DIFFER', 'LITTLE', 'FROM', 'EACH', 'OTHER', 'THEIR', 'PRESERVES', 'OF', 'HALF', 'LIQUID', 'CONSISTENCY', 'MADE', 'BY', 'BOILING', 'THE', 'PULP', 'OF', 'FRUITS', 'AND', 'SOMETIMES', 'PART', 'OF', 'THE', 'RHINES', 'WITH', 'SUGAR'] +3538-142836-0016-1583: ref=['THAT', 'THEY', 'MAY', 'KEEP', 'IT', 'IS', 'NECESSARY', 'NOT', 'TO', 'BE', 'SPARING', 'OF', 'SUGAR', 'FIFTEEN', 'O', 'THREE'] +3538-142836-0016-1583: hyp=['THAT', 'THEY', 'MAY', 'KEEP', 'IT', 'IS', 'NECESSARY', 'NOT', 'TO', 'BE', 'SPARING', 'OF', 'SUGAR', 'FIFTEEN', 'O', 'THREE'] +3538-142836-0017-1584: ref=['IN', 'ALL', 'THE', 'OPERATIONS', 'FOR', 'PRESERVE', 'MAKING', 'WHEN', 'THE', 'PRESERVING', 'PAN', 'IS', 'USED', 'IT', 'SHOULD', 'NOT', 'BE', 'PLACED', 'ON', 'THE', 'FIRE', 'BUT', 'ON', 'A', 'TRIVET', 'UNLESS', 'THE', 'JAM', 'IS', 'MADE', 'ON', 'A', 'HOT', 'PLATE', 'WHEN', 'THIS', 'IS', 'NOT', 'NECESSARY'] +3538-142836-0017-1584: hyp=['IN', 'ALL', 'THE', 'OPERATIONS', 'FOR', 'PRESERVE', 'MAKING', 'WHEN', 'THE', 'PRESERVING', 'PAN', 'IS', 'USED', 'IT', 'SHOULD', 'NOT', 'BE', 'PLACED', 'ON', 'THE', 'FIRE', 'BUT', 'ON', 'A', 'TRIBUT', 'UNLESS', 'THE', 'JAME', 'IS', 'MADE', 'ON', 'A', 'HOT', 'PLATE', 'WHEN', 'THIS', 'IS', 'NOT', 'NECESSARY'] +3538-142836-0018-1585: ref=['CONFECTIONARY', 'FIFTEEN', 'O', 'EIGHT'] +3538-142836-0018-1585: hyp=['CONFECTIONARY', 'FIFTEEN', 'O', 'EIGHT'] +3538-142836-0019-1586: ref=['IN', 'SPEAKING', 'OF', 'CONFECTIONARY', 'IT', 'SHOULD', 'BE', 'REMARKED', 'THAT', 'ALL', 'THE', 'VARIOUS', 'PREPARATIONS', 'ABOVE', 'NAMED', 'COME', 'STRICTLY', 'SPEAKING', 'UNDER', 'THAT', 'HEAD', 'FOR', 'THE', 'VARIOUS', 'FRUITS', 'FLOWERS', 'HERBS', 'ROOTS', 'AND', 'JUICES', 'WHICH', 'WHEN', 'BOILED', 'WITH', 'SUGAR', 'WERE', 'FORMERLY', 'EMPLOYED', 'IN', 'PHARMACY', 'AS', 'WELL', 'AS', 'FOR', 'SWEETMEATS', 'WERE', 'CALLED', 'CONFECTIONS', 'FROM', 'THE', 'LATIN', 'WORD', 'CONFICERE', 'TO', 'MAKE', 'UP', 'BUT', 'THE', 'TERM', 'CONFECTIONARY', 'EMBRACES', 'A', 'VERY', 'LARGE', 'CLASS', 'INDEED', 'OF', 'SWEET', 'FOOD', 'MANY', 'KINDS', 'OF', 'WHICH', 'SHOULD', 'NOT', 'BE', 'ATTEMPTED', 'IN', 'THE', 'ORDINARY', 'CUISINE'] +3538-142836-0019-1586: hyp=['IN', 'SPEAKING', 'OF', 'CONFECTIONARIES', 'SHOULD', 'BE', 'REMARKED', 'THAT', 'ALL', 'THE', 'VARIOUS', 'PREPARATIONS', 'ABOVE', 'NAMED', 'COME', 'STRICTLY', 'SPEAKING', 'UNDER', 'THAT', 'HEAD', 'FOR', 'THE', 'VARIOUS', 'FRUITS', 'FLOWERS', 'HERBS', 'OR', 'SAUCES', 'WHICH', 'ONE', 'BOILED', 'WITH', 'SUGAR', 'WERE', 'FORMERLY', 'EMPLOYED', 'IN', 'PHARMACY', 'AS', 'WELL', 'AS', 'FOR', 'SWEETMEATS', 'WERE', 'CALLED', 'CONFECTIONS', 'FROM', 'THE', 'LATIN', 'WORD', 'CONFUSE', 'TO', 'MAKE', 'UP', 'BUT', 'THE', 'TERM', 'CONFECTIONARY', 'EMBRACES', 'A', 'VERY', 'LARGE', 'CLASS', 'INDEED', 'OF', 'SWEET', 'FOOD', 'MANY', 'KINDS', 'OF', 'WHICH', 'SHOULD', 'NOT', 'BE', 'ATTEMPTED', 'IN', 'THE', 'ORDINARY', 'COSEINE'] +3538-142836-0020-1587: ref=['THE', 'THOUSAND', 'AND', 'ONE', 'ORNAMENTAL', 'DISHES', 'THAT', 'ADORN', 'THE', 'TABLES', 'OF', 'THE', 'WEALTHY', 'SHOULD', 'BE', 'PURCHASED', 'FROM', 'THE', 'CONFECTIONER', 'THEY', 'CANNOT', 'PROFITABLY', 'BE', 'MADE', 'AT', 'HOME'] +3538-142836-0020-1587: hyp=['A', 'THOUSAND', 'AND', 'ONE', 'ORNAMENTAL', 'DISHES', 'THAT', 'ADORN', 'THE', 'TABLES', 'OF', 'THE', 'WEALTHY', 'SHOULD', 'BE', 'PURCHASED', 'FROM', 'THE', 'CONFECTIONER', 'THEY', 'CANNOT', 'PROFITABLY', 'BE', 'MADE', 'AT', 'HOME'] +3538-142836-0021-1588: ref=['HOWEVER', 'AS', 'LATE', 'AS', 'THE', 'REIGNS', 'OF', 'OUR', 'TWO', 'LAST', 'GEORGES', 'FABULOUS', 'SUMS', 'WERE', 'OFTEN', 'EXPENDED', 'UPON', 'FANCIFUL', 'DESSERTS'] +3538-142836-0021-1588: hyp=['HOWEVER', 'AS', 'LATE', 'AS', 'THE', 'REIGN', 'OF', 'OUR', 'TWO', 'LAST', 'GEORGES', 'FABULOUS', 'SUMS', 'WERE', 'OFTEN', 'EXPENDED', 'UPON', 'FANCIFUL', 'DESERTS'] +3538-142836-0022-1589: ref=['THE', 'SHAPE', 'OF', 'THE', 'DISHES', 'VARIES', 'AT', 'DIFFERENT', 'PERIODS', 'THE', 'PREVAILING', 'FASHION', 'AT', 'PRESENT', 'BEING', 'OVAL', 'AND', 'CIRCULAR', 'DISHES', 'ON', 'STEMS'] +3538-142836-0022-1589: hyp=['THE', 'SHAPE', 'OF', 'THE', 'DISH', 'IS', 'VARIES', 'AT', 'DIFFERENT', 'PERIODS', 'THE', 'PREVAILING', 'FASHION', 'AT', 'PRESENT', 'BEING', 'OVAL', 'AND', 'CIRCULAR', 'DISHES', 'ON', 'STEMS'] +3538-142836-0023-1590: ref=['ICES'] +3538-142836-0023-1590: hyp=['ISIS'] +3538-142836-0024-1591: ref=['AT', 'DESSERTS', 'OR', 'AT', 'SOME', 'EVENING', 'PARTIES', 'ICES', 'ARE', 'SCARCELY', 'TO', 'BE', 'DISPENSED', 'WITH'] +3538-142836-0024-1591: hyp=['I', 'DESERTS', 'OR', 'AT', 'SOME', 'EVENING', 'PARTIES', 'ICES', 'ARE', 'SCARCELY', 'DID', 'BE', 'DISPENSED', 'WITH'] +3538-142836-0025-1592: ref=['THE', 'SPADDLE', 'IS', 'GENERALLY', 'MADE', 'OF', 'COPPER', 'KEPT', 'BRIGHT', 'AND', 'CLEAN'] +3538-142836-0025-1592: hyp=['THE', 'SPATTLE', 'IS', 'GENERALLY', 'MADE', 'OF', 'COPPER', 'KEPT', 'BRIGHT', 'AND', 'CLEAN'] +3538-142836-0026-1593: ref=['THEY', 'SHOULD', 'BE', 'TAKEN', 'IMMEDIATELY', 'AFTER', 'THE', 'REPAST', 'OR', 'SOME', 'HOURS', 'AFTER', 'BECAUSE', 'THE', 'TAKING', 'THESE', 'SUBSTANCES', 'DURING', 'THE', 'PROCESS', 'OF', 'DIGESTION', 'IS', 'APT', 'TO', 'PROVOKE', 'INDISPOSITION'] +3538-142836-0026-1593: hyp=['THEY', 'SHOULD', 'BE', 'TAKEN', 'IMMEDIATELY', 'AFTER', 'THE', 'REPAST', 'OR', 'SOME', 'HOURS', 'AFTER', 'BECAUSE', 'THE', 'TAKING', 'OF', 'THESE', 'SUBSTANCES', 'DURING', 'THE', 'PROCESS', 'OF', 'DIGESTION', 'IS', 'APT', 'TO', 'PROVOKE', 'INDISPOSITION'] +3538-163619-0000-1500: ref=['THERE', 'WAS', 'ONCE', 'ON', 'A', 'TIME', 'A', 'WIDOWER', 'WHO', 'HAD', 'A', 'SON', 'AND', 'A', 'DAUGHTER', 'BY', 'HIS', 'FIRST', 'WIFE'] +3538-163619-0000-1500: hyp=['THERE', 'WAS', 'ONCE', 'ON', 'THE', 'TIME', 'A', 'WIDOWER', 'WHO', 'HAD', 'A', 'SUDDEN', 'AND', 'A', 'DAUGHTER', 'BY', 'HIS', 'FIRST', 'WIF'] +3538-163619-0001-1501: ref=['FROM', 'THE', 'VERY', 'DAY', 'THAT', 'THE', 'NEW', 'WIFE', 'CAME', 'INTO', 'THE', 'HOUSE', 'THERE', 'WAS', 'NO', 'PEACE', 'FOR', 'THE', "MAN'S", 'CHILDREN', 'AND', 'NOT', 'A', 'CORNER', 'TO', 'BE', 'FOUND', 'WHERE', 'THEY', 'COULD', 'GET', 'ANY', 'REST', 'SO', 'THE', 'BOY', 'THOUGHT', 'THAT', 'THE', 'BEST', 'THING', 'HE', 'COULD', 'DO', 'WAS', 'TO', 'GO', 'OUT', 'INTO', 'THE', 'WORLD', 'AND', 'TRY', 'TO', 'EARN', 'HIS', 'OWN', 'BREAD'] +3538-163619-0001-1501: hyp=['FROM', 'THE', 'VERY', 'DAY', 'THAT', 'THE', 'NEW', 'WIFE', 'CAME', 'INTO', 'THE', 'HOUSE', 'THERE', 'WAS', 'NO', 'PEACE', 'FOR', 'THE', "MAN'S", 'CHILDREN', 'AND', 'NOT', 'A', 'CORNER', 'TO', 'BE', 'FOUND', 'WHERE', 'THEY', 'COULD', 'GET', 'ANY', 'REST', 'SO', 'THE', 'BOY', 'THOUGHT', 'THAT', 'THE', 'BEST', 'THING', 'HE', 'COULD', 'DO', 'WAS', 'TO', 'GO', 'OUT', 'INTO', 'THE', 'WORLD', 'AND', 'TRY', 'TO', 'EARN', 'HIS', 'OWN', 'BREAD'] +3538-163619-0002-1502: ref=['BUT', 'HIS', 'SISTER', 'WHO', 'WAS', 'STILL', 'AT', 'HOME', 'FARED', 'WORSE', 'AND', 'WORSE'] +3538-163619-0002-1502: hyp=['BUT', 'HIS', 'SISTER', 'WHO', 'WAS', 'STILL', 'AT', 'HOME', 'FARED', 'WORSE', 'AND', 'WORSE'] +3538-163619-0003-1503: ref=['KISS', 'ME', 'GIRL', 'SAID', 'THE', 'HEAD'] +3538-163619-0003-1503: hyp=['KISS', 'ME', 'GO', 'SAID', 'THE', 'HEAD'] +3538-163619-0004-1504: ref=['WHEN', 'THE', 'KING', 'ENTERED', 'AND', 'SAW', 'IT', 'HE', 'STOOD', 'STILL', 'AS', 'IF', 'HE', 'WERE', 'IN', 'FETTERS', 'AND', 'COULD', 'NOT', 'STIR', 'FROM', 'THE', 'SPOT', 'FOR', 'THE', 'PICTURE', 'SEEMED', 'TO', 'HIM', 'SO', 'BEAUTIFUL'] +3538-163619-0004-1504: hyp=['WHEN', 'THE', 'KING', 'ENTERED', 'AND', 'SOUGHT', 'HE', 'STOOD', 'STILL', 'AS', 'IF', 'HE', 'WERE', 'IN', 'FETTERS', 'AND', 'COULD', 'NOT', 'STIR', 'FROM', 'THE', 'SPOT', 'FOR', 'THE', 'PICTURE', 'SEEMED', 'TO', 'HIM', 'SO', 'BEAUTIFUL'] +3538-163619-0005-1505: ref=['THE', 'YOUTH', 'PROMISED', 'TO', 'MAKE', 'ALL', 'THE', 'HASTE', 'HE', 'COULD', 'AND', 'SET', 'FORTH', 'FROM', 'THE', "KING'S", 'PALACE'] +3538-163619-0005-1505: hyp=['THESE', 'PROMISED', 'TO', 'MAKE', 'ALL', 'THE', 'HASTE', 'HE', 'COULD', 'AND', 'SET', 'FORTH', 'FROM', 'THE', "KING'S", 'PALACE'] +3538-163619-0006-1506: ref=['AT', 'LAST', 'THEY', 'CAME', 'IN', 'SIGHT', 'OF', 'LAND'] +3538-163619-0006-1506: hyp=['AT', 'LAST', 'THEY', 'CAME', 'IN', 'SIGHT', 'OF', 'LAND'] +3538-163619-0007-1507: ref=['WELL', 'IF', 'MY', 'BROTHER', 'SAYS', 'SO', 'I', 'MUST', 'DO', 'IT', 'SAID', 'THE', "MAN'S", 'DAUGHTER', 'AND', 'SHE', 'FLUNG', 'HER', 'CASKET', 'INTO', 'THE', 'SEA'] +3538-163619-0007-1507: hyp=['WELL', 'IF', 'MY', 'BROTHER', 'SAYS', 'SO', 'I', 'MUST', 'DO', 'IT', 'SAID', 'THE', "MAN'S", 'DAUGHTER', 'AND', 'SHE', 'FLUNG', 'HER', 'CASKET', 'INTO', 'THE', 'SEA'] +3538-163619-0008-1508: ref=['WHAT', 'IS', 'MY', 'BROTHER', 'SAYING', 'ASKED', 'HIS', 'SISTER', 'AGAIN'] +3538-163619-0008-1508: hyp=['WHAT', 'IS', 'MY', 'BROTHER', 'SAYING', 'ASKED', 'HIS', 'SISTER', 'AGAIN'] +3538-163619-0009-1509: ref=['ON', 'THE', 'FIRST', 'THURSDAY', 'NIGHT', 'AFTER', 'THIS', 'A', 'BEAUTIFUL', 'MAIDEN', 'CAME', 'INTO', 'THE', 'KITCHEN', 'OF', 'THE', 'PALACE', 'AND', 'BEGGED', 'THE', 'KITCHEN', 'MAID', 'WHO', 'SLEPT', 'THERE', 'TO', 'LEND', 'HER', 'A', 'BRUSH'] +3538-163619-0009-1509: hyp=['ON', 'THE', 'FIRST', 'THURSDAY', 'NIGHT', 'AFTER', 'THIS', 'A', 'BEAUTIFUL', 'MAIDEN', 'CAME', 'INTO', 'THE', 'KITCHEN', 'OF', 'THE', 'PALACE', 'AND', 'BEGGED', 'THE', 'KITCHEN', 'MAID', 'WHO', 'SLEPT', 'THERE', 'TO', 'LEND', 'HER', 'A', 'BRUSH'] +3538-163619-0010-1510: ref=['SHE', 'BEGGED', 'VERY', 'PRETTILY', 'AND', 'GOT', 'IT', 'AND', 'THEN', 'SHE', 'BRUSHED', 'HER', 'HAIR', 'AND', 'THE', 'GOLD', 'DROPPED', 'FROM', 'IT'] +3538-163619-0010-1510: hyp=['SHE', 'BEGGED', 'VERY', 'PRETTILY', 'AND', 'GOT', 'IT', 'AND', 'THEN', 'SHE', 'BRUSHED', 'HER', 'HAIR', 'AND', 'THE', 'GOLD', 'DROPPED', 'FROM', 'IT'] +3538-163619-0011-1511: ref=['OUT', 'ON', 'THEE', 'UGLY', 'BUSHY', 'BRIDE', 'SLEEPING', 'SO', 'SOFT', 'BY', 'THE', 'YOUNG', "KING'S", 'SIDE', 'ON', 'SAND', 'AND', 'STONES', 'MY', 'BED', 'I', 'MAKE', 'AND', 'MY', 'BROTHER', 'SLEEPS', 'WITH', 'THE', 'COLD', 'SNAKE', 'UNPITIED', 'AND', 'UNWEPT'] +3538-163619-0011-1511: hyp=['OUT', 'ON', 'THE', 'UGLY', 'BUSHY', 'BRIDE', 'SLEEPING', 'SO', 'SOFT', 'BY', 'THE', 'YOUNG', "KING'S", 'SIDE', 'ON', 'SAND', 'AND', 'STONES', 'MY', 'BED', 'I', 'MAKE', 'AND', 'MY', 'BROTHER', 'SLEEPS', 'WITH', 'THE', 'COLD', 'SNAKE', 'UNPITIED', 'AND', 'UNWEPT'] +3538-163619-0012-1512: ref=['I', 'SHALL', 'COME', 'TWICE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN', 'SAID', 'SHE'] +3538-163619-0012-1512: hyp=['I', 'SHALL', 'COME', 'TWICE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN', 'SAID', 'SHE'] +3538-163619-0013-1513: ref=['THIS', 'TIME', 'ALSO', 'AS', 'BEFORE', 'SHE', 'BORROWED', 'A', 'BRUSH', 'AND', 'BRUSHED', 'HER', 'HAIR', 'WITH', 'IT', 'AND', 'THE', 'GOLD', 'DROPPED', 'DOWN', 'AS', 'SHE', 'DID', 'IT', 'AND', 'AGAIN', 'SHE', 'SENT', 'THE', 'DOG', 'OUT', 'THREE', 'TIMES', 'AND', 'WHEN', 'DAY', 'DAWNED', 'SHE', 'DEPARTED', 'BUT', 'AS', 'SHE', 'WAS', 'GOING', 'SHE', 'SAID', 'AS', 'SHE', 'HAD', 'SAID', 'BEFORE', 'I', 'SHALL', 'COME', 'ONCE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN'] +3538-163619-0013-1513: hyp=['THIS', 'TIME', 'ALSO', 'AS', 'BEFORE', 'SHE', 'BORROWED', 'A', 'BRUSH', 'AND', 'BRUSHED', 'HER', 'HAIR', 'WITH', 'IT', 'AND', 'THE', 'GOLD', 'DROPPED', 'DOWN', 'AS', 'SHE', 'DID', 'IT', 'AND', 'AGAIN', 'SHE', 'SENT', 'THE', 'DOG', 'OUT', 'THREE', 'TIMES', 'AND', 'WHEN', 'DAY', 'DAWNED', 'SHE', 'DEPARTED', 'BUT', 'AS', 'SHE', 'WAS', 'GOING', 'SHE', 'SAID', 'AS', 'SHE', 'HAD', 'SAID', 'BEFORE', 'I', 'SHALL', 'COME', 'ONCE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN'] +3538-163619-0014-1514: ref=['NO', 'ONE', 'CAN', 'TELL', 'HOW', 'DELIGHTED', 'THE', 'KING', 'WAS', 'TO', 'GET', 'RID', 'OF', 'THAT', 'HIDEOUS', 'BUSHY', 'BRIDE', 'AND', 'GET', 'A', 'QUEEN', 'WHO', 'WAS', 'BRIGHT', 'AND', 'BEAUTIFUL', 'AS', 'DAY', 'ITSELF'] +3538-163619-0014-1514: hyp=['NO', 'ONE', 'CAN', 'TELL', 'HOW', 'DELIGHTED', 'THE', 'KING', 'WAS', 'TO', 'GET', 'RID', 'OF', 'THAT', 'HIDEOUS', 'BUSHY', 'BRIDE', 'AND', 'GET', 'A', 'QUEEN', 'WHO', 'WAS', 'BRIGHT', 'AND', 'BEAUTIFUL', 'AS', 'DAY', 'ITSELF'] +3538-163622-0000-1515: ref=['WILT', 'THOU', 'SERVE', 'ME', 'AND', 'WATCH', 'MY', 'SEVEN', 'FOALS', 'ASKED', 'THE', 'KING'] +3538-163622-0000-1515: hyp=['WILT', 'THOU', 'SERVE', 'ME', 'AND', 'WATCH', 'MY', 'SEVEN', 'FOOLS', 'ASKED', 'THE', 'KING'] +3538-163622-0001-1516: ref=['THE', 'YOUTH', 'THOUGHT', 'THAT', 'IT', 'WAS', 'VERY', 'EASY', 'WORK', 'TO', 'WATCH', 'THE', 'FOALS', 'AND', 'THAT', 'HE', 'COULD', 'DO', 'IT', 'WELL', 'ENOUGH'] +3538-163622-0001-1516: hyp=['THE', 'YOUTH', 'THOUGHT', 'IT', 'WAS', 'VERY', 'EASY', 'WORK', 'TO', 'WATCH', 'THE', 'FOLDS', 'AND', 'THAT', 'HE', 'COULD', 'DO', 'IT', 'WELL', 'ENOUGH'] +3538-163622-0002-1517: ref=['HAST', 'THOU', 'WATCHED', 'FAITHFULLY', 'AND', 'WELL', 'THE', 'WHOLE', 'DAY', 'LONG', 'SAID', 'THE', 'KING', 'WHEN', 'THE', 'LAD', 'CAME', 'INTO', 'HIS', 'PRESENCE', 'IN', 'THE', 'EVENING'] +3538-163622-0002-1517: hyp=['HAST', 'THOU', 'WATCHED', 'FAITHFULLY', 'AND', 'WELL', 'BEHOLDAY', 'LONG', 'SAID', 'THE', 'KING', 'WHEN', 'THE', 'LAD', 'CAME', 'INTO', 'HIS', 'PRESENCE', 'IN', 'THE', 'EVENING'] +3538-163622-0003-1518: ref=['YES', 'THAT', 'I', 'HAVE', 'SAID', 'THE', 'YOUTH'] +3538-163622-0003-1518: hyp=['YES', 'THAT', 'I', 'HAVE', 'SAID', 'THE', 'YOUTH'] +3538-163622-0004-1519: ref=['HE', 'HAD', 'GONE', 'OUT', 'ONCE', 'TO', 'SEEK', 'A', 'PLACE', 'HE', 'SAID', 'BUT', 'NEVER', 'WOULD', 'HE', 'DO', 'SUCH', 'A', 'THING', 'AGAIN'] +3538-163622-0004-1519: hyp=['HE', 'HAD', 'GONE', 'AT', 'ONCE', 'TO', 'SEEK', 'A', 'PLACE', 'HE', 'SAID', 'BUT', 'NEVER', 'WOULD', 'HE', 'DO', 'SUCH', 'A', 'THING', 'AGAIN'] +3538-163622-0005-1520: ref=['THEN', 'THE', 'KING', 'PROMISED', 'HIM', 'THE', 'SAME', 'PUNISHMENT', 'AND', 'THE', 'SAME', 'REWARD', 'THAT', 'HE', 'HAD', 'PROMISED', 'HIS', 'BROTHER'] +3538-163622-0005-1520: hyp=['THEN', 'THE', 'KING', 'PROMISED', 'HIM', 'THE', 'SAME', 'PUNISHMENT', 'AND', 'THE', 'SAME', 'REWARD', 'THAT', 'HE', 'HAD', 'PROMISED', 'HIS', 'BROTHER'] +3538-163622-0006-1521: ref=['WHEN', 'HE', 'HAD', 'RUN', 'AFTER', 'THE', 'FOALS', 'FOR', 'A', 'LONG', 'LONG', 'TIME', 'AND', 'WAS', 'HOT', 'AND', 'TIRED', 'HE', 'PASSED', 'BY', 'A', 'CLEFT', 'IN', 'THE', 'ROCK', 'WHERE', 'AN', 'OLD', 'WOMAN', 'WAS', 'SITTING', 'SPINNING', 'WITH', 'A', 'DISTAFF', 'AND', 'SHE', 'CALLED', 'TO', 'HIM'] +3538-163622-0006-1521: hyp=['WHEN', 'HE', 'HAD', 'RUN', 'AFTER', 'THE', 'FALLS', 'FOR', 'A', 'LONG', 'LONG', 'TIME', 'AND', 'WAS', 'HOT', 'AND', 'TIRED', 'HE', 'PASSED', 'BY', 'CLEF', 'IN', 'THE', 'ROCK', 'WHERE', 'AN', 'OLD', 'WOMAN', 'WAS', 'SITTING', 'SPINNING', 'WITH', 'THE', 'DISTANT', 'AND', 'SHE', 'CALLED', 'TO', 'HIM'] +3538-163622-0007-1522: ref=['COME', 'HITHER', 'COME', 'HITHER', 'MY', 'HANDSOME', 'SON', 'AND', 'LET', 'ME', 'COMB', 'YOUR', 'HAIR'] +3538-163622-0007-1522: hyp=['COMMANDER', 'COME', 'HITHER', 'MY', 'HANDSOME', 'SON', 'AND', 'LET', 'ME', 'CALM', 'YOUR', 'HAIR'] +3538-163622-0008-1523: ref=['THE', 'YOUTH', 'LIKED', 'THE', 'THOUGHT', 'OF', 'THIS', 'LET', 'THE', 'FOALS', 'RUN', 'WHERE', 'THEY', 'CHOSE', 'AND', 'SEATED', 'HIMSELF', 'IN', 'THE', 'CLEFT', 'OF', 'THE', 'ROCK', 'BY', 'THE', 'SIDE', 'OF', 'THE', 'OLD', 'HAG'] +3538-163622-0008-1523: hyp=['THE', 'YOUTH', 'LIKED', 'THE', 'THOUGHT', 'OF', 'THIS', 'LET', 'THE', 'FOLDS', 'RUM', 'WHERE', 'THEY', 'CHOSE', 'AND', 'SEATED', 'HIMSELF', 'IN', 'THE', 'CLEFT', 'OF', 'THE', 'ROCK', 'BY', 'THE', 'SIDE', 'OF', 'THE', 'OLD', 'HAG'] +3538-163622-0009-1524: ref=['SO', 'THERE', 'HE', 'SAT', 'WITH', 'HIS', 'HEAD', 'ON', 'HER', 'LAP', 'TAKING', 'HIS', 'EASE', 'THE', 'LIVELONG', 'DAY'] +3538-163622-0009-1524: hyp=['SO', 'THERE', 'HE', 'SAT', 'WITH', 'HIS', 'HEAD', 'ON', 'HER', 'LAP', 'TAKING', 'HIS', 'EASE', 'THE', 'LIVE', 'LONG', 'DAY'] +3538-163622-0010-1525: ref=['ON', 'THE', 'THIRD', 'DAY', 'CINDERLAD', 'WANTED', 'TO', 'SET', 'OUT'] +3538-163622-0010-1525: hyp=['ON', 'THE', 'THIRD', 'DAY', 'SAID', 'THE', 'LAD', 'WANTED', 'TO', 'SET', 'OUT'] +3538-163622-0011-1526: ref=['THE', 'TWO', 'BROTHERS', 'LAUGHED', 'AT', 'HIM', 'AND', 'HIS', 'FATHER', 'AND', 'MOTHER', 'BEGGED', 'HIM', 'NOT', 'TO', 'GO', 'BUT', 'ALL', 'TO', 'NO', 'PURPOSE', 'AND', 'CINDERLAD', 'SET', 'OUT', 'ON', 'HIS', 'WAY'] +3538-163622-0011-1526: hyp=['THE', 'TWO', 'BROTHERS', 'LAUGHED', 'AT', 'HIM', 'AND', 'HIS', 'FATHER', 'AND', 'MOTHER', 'BEGGED', 'HIM', 'NOT', 'TO', 'GO', 'BUT', 'ALL', 'TO', 'NO', 'PURPOSE', 'AND', 'SINDERLAD', 'SET', 'OUT', 'ON', 'HIS', 'WAY'] +3538-163622-0012-1527: ref=['I', 'AM', 'WALKING', 'ABOUT', 'IN', 'SEARCH', 'OF', 'A', 'PLACE', 'SAID', 'CINDERLAD'] +3538-163622-0012-1527: hyp=['I', 'AM', 'WALKING', 'ABOUT', 'IN', 'SEARCH', 'OF', 'A', 'PLACE', 'SAID', 'SINGLEAD'] +3538-163622-0013-1528: ref=['I', 'WOULD', 'MUCH', 'RATHER', 'HAVE', 'THE', 'PRINCESS', 'SAID', 'CINDERLAD'] +3538-163622-0013-1528: hyp=['I', 'WOULD', 'MUCH', 'RATHER', 'HAVE', 'THE', 'PRINCESS', 'SAID', 'CINDERLAD'] +3538-163622-0014-1529: ref=['AND', 'THUS', 'THEY', 'JOURNEYED', 'ONWARDS', 'A', 'LONG', 'LONG', 'WAY'] +3538-163622-0014-1529: hyp=['AND', 'THUS', 'THEY', 'JOURNEYED', 'ONWARDS', 'A', 'LONG', 'LONG', 'WAY'] +3538-163622-0015-1530: ref=['WHEN', 'THEY', 'HAD', 'GONE', 'THUS', 'FOR', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FOAL', 'AGAIN', 'ASKED', 'DOST', 'THOU', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0015-1530: hyp=['WHEN', 'THEY', 'HAD', 'GONE', 'THUS', 'FOR', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FOLIGAN', 'ASKED', 'DOST', 'THOU', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0016-1531: ref=['YES', 'NOW', 'I', 'SEE', 'SOMETHING', 'THAT', 'IS', 'WHITE', 'SAID', 'CINDERLAD'] +3538-163622-0016-1531: hyp=['YES', 'NOW', 'I', 'SEE', 'SOMETHING', 'THAT', 'IS', 'WHITE', 'SAID', 'CINDERLAD'] +3538-163622-0017-1532: ref=['IT', 'LOOKS', 'LIKE', 'THE', 'TRUNK', 'OF', 'A', 'GREAT', 'THICK', 'BIRCH', 'TREE'] +3538-163622-0017-1532: hyp=['IT', 'LOOKS', 'LIKE', 'THE', 'TRUNK', 'OF', 'A', 'GREAT', 'THICK', 'BIRCH', 'TREE'] +3538-163622-0018-1533: ref=['CINDERLAD', 'TRIED', 'BUT', 'COULD', 'NOT', 'DO', 'IT', 'SO', 'HE', 'HAD', 'TO', 'TAKE', 'A', 'DRAUGHT', 'FROM', 'THE', 'PITCHER', 'AND', 'THEN', 'ONE', 'MORE', 'AND', 'AFTER', 'THAT', 'STILL', 'ANOTHER', 'AND', 'THEN', 'HE', 'WAS', 'ABLE', 'TO', 'WIELD', 'THE', 'SWORD', 'WITH', 'PERFECT', 'EASE'] +3538-163622-0018-1533: hyp=['SIDNEYLOD', 'TRIED', 'BUT', 'COULD', 'NOT', 'DO', 'IT', 'SO', 'HE', 'HAD', 'TO', 'TAKE', 'A', 'DRAUGHT', 'FROM', 'THE', 'PITCHER', 'AND', 'THEN', 'ONE', 'MORE', 'AND', 'AFTER', 'THAT', 'STILL', 'ANOTHER', 'AND', 'THEN', 'HE', 'WAS', 'ABLE', 'TO', 'WIELD', 'THE', 'SWORD', 'WITH', 'PERFECT', 'EASE'] +3538-163622-0019-1534: ref=['FOR', 'WE', 'ARE', 'BROTHERS', 'OF', 'THE', 'PRINCESS', 'WHOM', 'THOU', 'ART', 'TO', 'HAVE', 'WHEN', 'THOU', 'CANST', 'TELL', 'THE', 'KING', 'WHAT', 'WE', 'EAT', 'AND', 'DRINK', 'BUT', 'THERE', 'IS', 'A', 'MIGHTY', 'TROLL', 'WHO', 'HAS', 'CAST', 'A', 'SPELL', 'OVER', 'US'] +3538-163622-0019-1534: hyp=['FOR', 'WE', 'ARE', 'BROTHERS', 'OF', 'THE', 'PRINCESS', 'WHOM', 'THOU', 'ART', 'TO', 'HAVE', 'WHEN', 'THOU', 'CANST', 'TELL', 'THE', 'KING', 'WHAT', 'WE', 'EAT', 'AND', 'DRINK', 'BUT', 'THERE', 'IS', 'A', 'MIGHTY', 'TROLL', 'WHO', 'HAS', 'CAST', 'A', 'SPELL', 'OVER', 'US'] +3538-163622-0020-1535: ref=['WHEN', 'THEY', 'HAD', 'TRAVELLED', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FOAL', 'SAID', 'DOST', 'THOU', 'SEE', 'ANYTHING'] +3538-163622-0020-1535: hyp=['WHEN', 'THEY', 'HAD', 'TRAVELLED', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FALL', 'SAID', 'DOST', 'THOU', 'SEE', 'ANYTHING'] +3538-163622-0021-1536: ref=['AND', 'NOW', 'INQUIRED', 'THE', 'FOAL', 'SEEST', 'THOU', 'NOTHING', 'NOW'] +3538-163622-0021-1536: hyp=['AND', 'NOW', 'INQUIRED', 'THE', 'WHOLE', 'CEASE', 'DONE', 'NOTHING', 'NOW'] +3538-163622-0022-1537: ref=['NOW', 'THEN', 'SAID', 'THE', 'FOAL', 'DOST', 'THOU', 'NOT', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0022-1537: hyp=['NOW', 'THEN', 'SAID', 'THE', 'FOOL', 'DOST', 'THOU', 'NOT', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0023-1538: ref=['THAT', 'IS', 'A', 'RIVER', 'SAID', 'THE', 'FOAL', 'AND', 'WE', 'HAVE', 'TO', 'CROSS', 'IT'] +3538-163622-0023-1538: hyp=['THAT', 'IS', 'A', 'RIVER', 'SAID', 'THE', 'FOAL', 'AND', 'WE', 'HAVE', 'TO', 'CROSS', 'IT'] +3538-163622-0024-1539: ref=['I', 'HAVE', 'DONE', 'MY', 'BEST', 'REPLIED', 'CINDERLAD'] +3538-163622-0024-1539: hyp=['I', 'HAVE', 'DONE', 'MY', 'BEST', 'REPLIED', 'SIR', 'LAD'] +3538-163624-0000-1540: ref=['ONCE', 'UPON', 'A', 'TIME', 'THERE', 'WAS', 'A', 'KING', 'IN', 'THE', 'NORTH', 'WHO', 'HAD', 'WON', 'MANY', 'WARS', 'BUT', 'NOW', 'HE', 'WAS', 'OLD'] +3538-163624-0000-1540: hyp=['ONCE', 'UPON', 'A', 'TIME', 'THERE', 'WAS', 'A', 'KING', 'IN', 'THE', 'NORTH', 'WHO', 'HAD', 'WON', 'MANY', 'WARS', 'BUT', 'NOW', 'HE', 'WAS', 'OLD'] +3538-163624-0001-1541: ref=['THE', 'OLD', 'KING', 'WENT', 'OUT', 'AND', 'FOUGHT', 'BRAVELY', 'BUT', 'AT', 'LAST', 'HIS', 'SWORD', 'BROKE', 'AND', 'HE', 'WAS', 'WOUNDED', 'AND', 'HIS', 'MEN', 'FLED'] +3538-163624-0001-1541: hyp=['THE', 'OLD', 'KING', 'WENT', 'OUT', 'AND', 'THOUGHT', 'BRAVELY', 'BUT', 'AT', 'LAST', 'HIS', 'SWORD', 'BROKE', 'AND', 'HE', 'WAS', 'WOUNDED', 'AND', 'HIS', 'MEN', 'FLED'] +3538-163624-0002-1542: ref=['BUT', 'IN', 'THE', 'NIGHT', 'WHEN', 'THE', 'BATTLE', 'WAS', 'OVER', 'HIS', 'YOUNG', 'WIFE', 'CAME', 'OUT', 'AND', 'SEARCHED', 'FOR', 'HIM', 'AMONG', 'THE', 'SLAIN', 'AND', 'AT', 'LAST', 'SHE', 'FOUND', 'HIM', 'AND', 'ASKED', 'WHETHER', 'HE', 'MIGHT', 'BE', 'HEALED'] +3538-163624-0002-1542: hyp=['BUT', 'IN', 'THE', 'NIGHT', 'WHEN', 'THE', 'BATTLE', 'IS', 'OVER', 'HIS', 'YOUNG', 'WIFE', 'CAME', 'OUT', 'IN', 'SEARCHED', 'FOR', 'HIM', 'AMONG', 'THE', 'SLAIN', 'AND', 'AT', 'LAST', 'SHE', 'FOUND', 'HIM', 'AND', 'ASKED', 'WHETHER', 'HE', 'MIGHT', 'BE', 'HEALED'] +3538-163624-0003-1543: ref=['SO', 'HE', 'ASKED', 'THE', 'QUEEN', 'HOW', 'DO', 'YOU', 'KNOW', 'IN', 'THE', 'DARK', 'OF', 'NIGHT', 'WHETHER', 'THE', 'HOURS', 'ARE', 'WEARING', 'TO', 'THE', 'MORNING', 'AND', 'SHE', 'SAID'] +3538-163624-0003-1543: hyp=['SO', 'YES', 'THE', 'QUEEN', 'HOW', 'DO', 'YOU', 'KNOW', 'IN', 'THE', 'DARK', 'OF', 'NIGHT', 'WHETHER', 'THE', 'HOURS', 'ARE', 'WEARING', 'TO', 'THE', 'MORNING', 'AND', 'SHE', 'SAID'] +3538-163624-0004-1544: ref=['THEN', 'THE', 'OLD', 'MAN', 'SAID', 'DRIVE', 'ALL', 'THE', 'HORSES', 'INTO', 'THE', 'RIVER', 'AND', 'CHOOSE', 'THE', 'ONE', 'THAT', 'SWIMS', 'ACROSS'] +3538-163624-0004-1544: hyp=['THEN', 'THE', 'OLD', 'MAN', 'SAID', 'DRIVE', 'ALL', 'THE', 'HORSES', 'INTO', 'THE', 'RIVER', 'AND', 'CHOOSE', 'THE', 'ONE', 'THAT', 'SWIMS', 'ACROSS'] +3538-163624-0005-1545: ref=['HE', 'IS', 'NO', 'BIGGER', 'THAN', 'OTHER', 'DRAGONS', 'SAID', 'THE', 'TUTOR', 'AND', 'IF', 'YOU', 'WERE', 'AS', 'BRAVE', 'AS', 'YOUR', 'FATHER', 'YOU', 'WOULD', 'NOT', 'FEAR', 'HIM'] +3538-163624-0005-1545: hyp=['HE', 'HAS', 'NO', 'BIGGER', 'THAN', 'OTHER', 'DRAGONS', 'SAID', 'THE', 'TUDOR', 'AND', 'IF', 'YOURS', 'BRAVE', 'AS', 'YOUR', 'FATHER', 'YOU', 'WOULD', 'NOT', 'FEAR', 'HIM'] +3538-163624-0006-1546: ref=['THEN', 'THE', 'PERSON', 'WHO', 'HAD', 'KILLED', 'OTTER', 'WENT', 'DOWN', 'AND', 'CAUGHT', 'THE', 'DWARF', 'WHO', 'OWNED', 'ALL', 'THE', 'TREASURE', 'AND', 'TOOK', 'IT', 'FROM', 'HIM'] +3538-163624-0006-1546: hyp=['THEN', 'THE', 'PERSON', 'WHO', 'HAD', 'KILLED', 'OTTER', 'WENT', 'DOWN', 'AND', 'CAUGHT', 'THE', 'DWARF', 'WHO', 'OWNED', 'ALL', 'THE', 'TREASURE', 'AND', 'TOOK', 'IT', 'FROM', 'HIM'] +3538-163624-0007-1547: ref=['ONLY', 'ONE', 'RING', 'WAS', 'LEFT', 'WHICH', 'THE', 'DWARF', 'WORE', 'AND', 'EVEN', 'THAT', 'WAS', 'TAKEN', 'FROM', 'HIM'] +3538-163624-0007-1547: hyp=['ONLY', 'ONE', 'RING', 'WAS', 'LEFT', 'WHICH', 'THE', 'DWARF', 'WORE', 'AND', 'EVEN', 'THAT', 'WAS', 'TAKEN', 'FROM', 'HIM'] +3538-163624-0008-1548: ref=['SO', 'REGIN', 'MADE', 'A', 'SWORD', 'AND', 'SIGURD', 'TRIED', 'IT', 'WITH', 'A', 'BLOW', 'ON', 'A', 'LUMP', 'OF', 'IRON', 'AND', 'THE', 'SWORD', 'BROKE'] +3538-163624-0008-1548: hyp=['SO', 'RIGAN', 'MADE', 'A', 'SWORD', 'AND', 'CIGAR', 'TRIED', 'IT', 'WITH', 'THE', 'BLOW', 'ON', 'A', 'LUMP', 'OF', 'IRON', 'AND', 'THE', 'SWORD', 'BROKE'] +3538-163624-0009-1549: ref=['THEN', 'SIGURD', 'WENT', 'TO', 'HIS', 'MOTHER', 'AND', 'ASKED', 'FOR', 'THE', 'BROKEN', 'PIECES', 'OF', 'HIS', "FATHER'S", 'BLADE', 'AND', 'GAVE', 'THEM', 'TO', 'REGIN'] +3538-163624-0009-1549: hyp=['THEN', 'CIGARET', 'WENT', 'TO', 'HIS', 'MOTHER', 'AND', 'ASKED', 'FOR', 'THE', 'BROKEN', 'PIECES', 'OF', 'HIS', "FATHER'S", 'BLADE', 'AND', 'GAVE', 'THEM', 'TO', 'RIGAN'] +3538-163624-0010-1550: ref=['SO', 'SIGURD', 'SAID', 'THAT', 'SWORD', 'WOULD', 'DO'] +3538-163624-0010-1550: hyp=['SO', 'CIGARET', 'SAID', 'THAT', 'SWORD', 'WOULD', 'DO'] +3538-163624-0011-1551: ref=['THEN', 'HE', 'SAW', 'THE', 'TRACK', 'WHICH', 'THE', 'DRAGON', 'MADE', 'WHEN', 'HE', 'WENT', 'TO', 'A', 'CLIFF', 'TO', 'DRINK', 'AND', 'THE', 'TRACK', 'WAS', 'AS', 'IF', 'A', 'GREAT', 'RIVER', 'HAD', 'ROLLED', 'ALONG', 'AND', 'LEFT', 'A', 'DEEP', 'VALLEY'] +3538-163624-0011-1551: hyp=['THEN', 'HE', 'SAW', 'THE', 'TRACK', 'WHICH', 'THE', 'DRAGON', 'HAD', 'MADE', 'WHEN', 'HE', 'WENT', 'TO', 'A', 'CLIFF', 'TO', 'DRINK', 'AND', 'THE', 'TRACK', 'WAS', 'AS', 'IF', 'A', 'GREAT', 'RIVER', 'HAD', 'ROLLED', 'ALONG', 'AND', 'LEFT', 'A', 'DEEP', 'VALLEY'] +3538-163624-0012-1552: ref=['BUT', 'SIGURD', 'WAITED', 'TILL', 'HALF', 'OF', 'HIM', 'HAD', 'CRAWLED', 'OVER', 'THE', 'PIT', 'AND', 'THEN', 'HE', 'THRUST', 'THE', 'SWORD', 'GRAM', 'RIGHT', 'INTO', 'HIS', 'VERY', 'HEART'] +3538-163624-0012-1552: hyp=['BUT', 'CIGARET', 'WAITED', 'TILL', 'HALF', 'OF', 'HIM', 'HAD', 'CRAWLED', 'OVER', 'THE', 'PIT', 'AND', 'THEN', 'HE', 'THRUST', 'THE', 'SWORD', 'GRAHAM', 'RIGHT', 'INTO', 'HIS', 'VERY', 'HEART'] +3538-163624-0013-1553: ref=['SIGURD', 'SAID', 'I', 'WOULD', 'TOUCH', 'NONE', 'OF', 'IT', 'IF', 'BY', 'LOSING', 'IT', 'I', 'SHOULD', 'NEVER', 'DIE'] +3538-163624-0013-1553: hyp=['CIGARET', 'SAID', 'I', 'WOULD', 'TOUCH', 'NONE', 'OF', 'IT', 'IF', 'BY', 'LOSING', 'IT', 'I', 'SHOULD', 'NEVER', 'DIE'] +3538-163624-0014-1554: ref=['BUT', 'ALL', 'MEN', 'DIE', 'AND', 'NO', 'BRAVE', 'MAN', 'LETS', 'DEATH', 'FRIGHTEN', 'HIM', 'FROM', 'HIS', 'DESIRE'] +3538-163624-0014-1554: hyp=['BUT', 'ALL', 'MEN', 'DIE', 'AND', 'NO', 'BRAVE', 'MAN', "LET'S", 'DEATH', 'FRIGHTEN', 'HIM', 'FROM', 'HIS', 'DESIRE'] +3538-163624-0015-1555: ref=['DIE', 'THOU', 'FAFNIR', 'AND', 'THEN', 'FAFNIR', 'DIED'] +3538-163624-0015-1555: hyp=['GUY', 'THOU', 'FAFNER', 'AND', 'THEN', 'STAFFNER', 'DIED'] +3538-163624-0016-1556: ref=['THEN', 'SIGURD', 'RODE', 'BACK', 'AND', 'MET', 'REGIN', 'AND', 'REGIN', 'ASKED', 'HIM', 'TO', 'ROAST', "FAFNIR'S", 'HEART', 'AND', 'LET', 'HIM', 'TASTE', 'OF', 'IT'] +3538-163624-0016-1556: hyp=['THEN', 'CIGAR', 'RODE', 'BACK', 'AND', 'MET', 'RIGAN', 'AND', 'RIGAN', 'ASKED', 'HIM', 'TO', 'ROAST', "FAFNER'S", 'HEART', 'AND', 'LET', 'HIM', 'TASTE', 'OF', 'IT'] +3538-163624-0017-1557: ref=['SO', 'SIGURD', 'PUT', 'THE', 'HEART', 'OF', 'FAFNIR', 'ON', 'A', 'STAKE', 'AND', 'ROASTED', 'IT'] +3538-163624-0017-1557: hyp=['SO', 'SIR', 'GOOD', 'PUT', 'THE', 'HEART', 'OF', 'FAFFNER', 'ON', 'A', 'STAKE', 'AND', 'ROASTED', 'IT'] +3538-163624-0018-1558: ref=['THERE', 'IS', 'SIGURD', 'ROASTING', "FAFNIR'S", 'HEART', 'FOR', 'ANOTHER', 'WHEN', 'HE', 'SHOULD', 'TASTE', 'OF', 'IT', 'HIMSELF', 'AND', 'LEARN', 'ALL', 'WISDOM'] +3538-163624-0018-1558: hyp=["THERE'S", 'CIGARET', 'ROASTING', "FAFTENNER'S", 'HEART', 'FOR', 'ANOTHER', 'WHEN', 'HE', 'SHOULD', 'TASTE', 'OF', 'IT', 'HIMSELF', 'AND', 'LEARN', 'ALL', 'WISDOM'] +3538-163624-0019-1559: ref=['THAT', 'LET', 'HIM', 'DO', 'AND', 'THEN', 'RIDE', 'OVER', 'HINDFELL', 'TO', 'THE', 'PLACE', 'WHERE', 'BRYNHILD', 'SLEEPS'] +3538-163624-0019-1559: hyp=['THAT', 'LET', 'HIM', 'DO', 'THEN', 'RIDE', 'OVER', 'HINFELD', 'TO', 'THE', 'PLACE', 'WHERE', 'BRINEHILL', 'SLEEPS'] +3538-163624-0020-1560: ref=['THERE', 'MUST', 'SHE', 'SLEEP', 'TILL', 'THOU', 'COMEST', 'FOR', 'HER', 'WAKING', 'RISE', 'UP', 'AND', 'RIDE', 'FOR', 'NOW', 'SURE', 'SHE', 'WILL', 'SWEAR', 'THE', 'VOW', 'FEARLESS', 'OF', 'BREAKING'] +3538-163624-0020-1560: hyp=['THERE', 'MUST', 'SHE', 'SLEEP', 'TILL', 'THOU', 'COMES', 'FOR', 'HER', 'WAKING', 'RISE', 'UP', 'AND', 'RIDE', 'FOR', 'NOW', 'SURE', 'SHE', 'WILL', 'SWEAR', 'THE', 'VOW', 'FEARLESS', 'OF', 'BREAKING'] +3538-163624-0021-1561: ref=['THEN', 'HE', 'TOOK', 'THE', 'HELMET', 'OFF', 'THE', 'HEAD', 'OF', 'THE', 'SLEEPER', 'AND', 'BEHOLD', 'SHE', 'WAS', 'A', 'MOST', 'BEAUTIFUL', 'LADY'] +3538-163624-0021-1561: hyp=['THEN', 'HE', 'TOOK', 'THE', 'HELMET', 'OFF', 'THE', 'HEAD', 'OF', 'THE', 'SLEEPER', 'AND', 'BEHOLD', 'SHE', 'WAS', 'A', 'MOST', 'BEAUTIFUL', 'LADY'] +3538-163624-0022-1562: ref=['THEN', 'SIGURD', 'RODE', 'AWAY', 'AND', 'HE', 'CAME', 'TO', 'THE', 'HOUSE', 'OF', 'A', 'KING', 'WHO', 'HAD', 'A', 'FAIR', 'DAUGHTER'] +3538-163624-0022-1562: hyp=['THEN', 'CIGARET', 'RODE', 'AWAY', 'AND', 'HE', 'CAME', 'TO', 'THE', 'HOUSE', 'OF', 'A', 'KING', 'WHO', 'HAD', 'A', 'FAIR', 'DAUGHTER'] +3538-163624-0023-1563: ref=['THEN', "BRYNHILD'S", 'FATHER', 'TOLD', 'GUNNAR', 'THAT', 'SHE', 'WOULD', 'MARRY', 'NONE', 'BUT', 'HIM', 'WHO', 'COULD', 'RIDE', 'THE', 'FLAME', 'IN', 'FRONT', 'OF', 'HER', 'ENCHANTED', 'TOWER', 'AND', 'THITHER', 'THEY', 'RODE', 'AND', 'GUNNAR', 'SET', 'HIS', 'HORSE', 'AT', 'THE', 'FLAME', 'BUT', 'HE', 'WOULD', 'NOT', 'FACE', 'IT'] +3538-163624-0023-1563: hyp=['WHEN', "BURNHIL'S", 'FATHER', 'TOLD', 'GUNNER', 'THAT', 'SHE', 'WOULD', 'MARRY', 'NONE', 'BUT', 'HIM', 'WHO', 'COULD', 'RIDE', 'THE', 'FLAME', 'IN', 'FRONT', 'OF', 'HER', 'ENCHANTED', 'TOWER', 'AND', 'THAT', 'AS', 'THEY', 'RODE', 'AND', 'GUTTER', 'SET', 'HIS', 'HORSE', 'AT', 'THE', 'FLAME', 'BUT', 'HE', 'WOULD', 'NOT', 'FACE', 'IT'] +3538-163624-0024-1564: ref=['FOR', 'ONE', 'DAY', 'WHEN', 'BRYNHILD', 'AND', 'GUDRUN', 'WERE', 'BATHING', 'BRYNHILD', 'WADED', 'FARTHEST', 'OUT', 'INTO', 'THE', 'RIVER', 'AND', 'SAID', 'SHE', 'DID', 'THAT', 'TO', 'SHOW', 'SHE', 'WAS', "GUIRUN'S", 'SUPERIOR'] +3538-163624-0024-1564: hyp=['FOR', 'ONE', 'DAY', 'WHEN', 'BURNEHELD', 'AND', 'GUNDRON', 'WERE', 'BATHING', 'BURNEHALD', 'WAITED', 'FARTHEST', 'OUT', 'INTO', 'THE', 'RIVER', 'AND', 'SAID', 'SHE', 'DID', 'THAT', 'TO', 'SHOW', 'SHE', 'WAS', 'GUNDRUE', 'SUPERIOR'] +3538-163624-0025-1565: ref=['FOR', 'HER', 'HUSBAND', 'SHE', 'SAID', 'HAD', 'RIDDEN', 'THROUGH', 'THE', 'FLAME', 'WHEN', 'NO', 'OTHER', 'MAN', 'DARED', 'FACE', 'IT'] +3538-163624-0025-1565: hyp=['FOR', 'HER', 'HUSBAND', 'SHE', 'SAID', 'HAD', 'RIDDEN', 'THROUGH', 'THE', 'FLAME', 'WHEN', 'NO', 'OTHER', 'MAN', 'DARED', 'FACE', 'IT'] +3538-163624-0026-1566: ref=['NOT', 'LONG', 'TO', 'WAIT', 'HE', 'SAID', 'TILL', 'THE', 'BITTER', 'SWORD', 'STANDS', 'FAST', 'IN', 'MY', 'HEART', 'AND', 'THOU', 'WILL', 'NOT', 'LIVE', 'LONG', 'WHEN', 'I', 'AM', 'DEAD'] +3538-163624-0026-1566: hyp=['NOT', 'LONG', 'TO', 'WAIT', 'HE', 'SAID', 'TILL', 'THE', 'BITTER', 'SWORD', 'STANDS', 'FAST', 'IN', 'MY', 'HEART', 'AND', 'THOU', 'WILT', 'NOT', 'LIVE', 'LONG', 'WHEN', 'I', 'AM', 'DEAD'] +367-130732-0000-1466: ref=['LOBSTERS', 'AND', 'LOBSTERS'] +367-130732-0000-1466: hyp=['LOBSTERS', 'AND', 'LOBSTERS'] +367-130732-0001-1467: ref=['WHEN', 'IS', 'A', 'LOBSTER', 'NOT', 'A', 'LOBSTER', 'WHEN', 'IT', 'IS', 'A', 'CRAYFISH'] +367-130732-0001-1467: hyp=['WHEN', 'IS', 'A', 'LOBSTER', 'NOT', 'A', 'LOBSTER', 'WHEN', 'IT', 'IS', 'A', 'CRAYFISH'] +367-130732-0002-1468: ref=['THIS', 'QUESTION', 'AND', 'ANSWER', 'MIGHT', 'WELL', 'GO', 'INTO', 'THE', 'PRIMER', 'OF', 'INFORMATION', 'FOR', 'THOSE', 'WHO', 'COME', 'TO', 'SAN', 'FRANCISCO', 'FROM', 'THE', 'EAST', 'FOR', 'WHAT', 'IS', 'CALLED', 'A', 'LOBSTER', 'IN', 'SAN', 'FRANCISCO', 'IS', 'NOT', 'A', 'LOBSTER', 'AT', 'ALL', 'BUT', 'A', 'CRAYFISH'] +367-130732-0002-1468: hyp=['THIS', 'QUESTION', 'IN', 'ANSWER', 'MIGHT', 'WELL', 'GO', 'INTO', 'THE', 'PRIMARY', 'OF', 'INFORMATION', 'FOR', 'LUCIKAM', 'THE', 'SENT', 'FRANCISCO', 'FROM', 'THE', 'EAST', 'FOR', 'WHAT', 'IS', 'CALLED', 'A', 'LOBSTER', 'IN', 'FRITZO', 'IS', 'NOT', 'A', 'LOBSURD', 'AT', 'ALL', 'BUT', 'A', 'CRAYFISH'] +367-130732-0003-1469: ref=['THE', 'PACIFIC', 'CRAYFISH', 'HOWEVER', 'SERVES', 'EVERY', 'PURPOSE', 'AND', 'WHILE', 'MANY', 'CONTEND', 'THAT', 'ITS', 'MEAT', 'IS', 'NOT', 'SO', 'DELICATE', 'IN', 'FLAVOR', 'AS', 'THAT', 'OF', 'ITS', 'EASTERN', 'COUSIN', 'THE', 'CALIFORNIAN', 'WILL', 'AS', 'STRENUOUSLY', 'INSIST', 'THAT', 'IT', 'IS', 'BETTER', 'BUT', 'OF', 'COURSE', 'SOMETHING', 'MUST', 'ALWAYS', 'BE', 'ALLOWED', 'FOR', 'THE', 'PATRIOTISM', 'OF', 'THE', 'CALIFORNIAN'] +367-130732-0003-1469: hyp=['THE', 'PACIFIC', 'CRAYFISHHORESERVES', 'EVERY', 'PURPOSE', 'AND', 'WHILE', 'MANY', 'CONTEND', 'THAT', 'ITS', 'MEAT', 'IS', 'NOT', 'SO', 'DELICATE', 'AND', 'FLARE', 'AS', 'THAT', 'OF', 'ITS', 'EASTERN', 'COUSIN', 'THE', 'CALIFORNIA', 'WALLA', 'STRENUOUSLY', 'INSISTS', 'AND', 'IT', 'IS', 'BETTER', 'BUT', 'OF', 'COURSE', 'SOMETHING', 'MUST', 'ALWAYS', 'BE', 'ALLOWED', 'FOR', 'THE', 'PATRIOTISM', 'OF', 'THE', 'CALIFORNI'] +367-130732-0004-1470: ref=['A', 'BOOK', 'COULD', 'BE', 'WRITTEN', 'ABOUT', 'THIS', 'RESTAURANT', 'AND', 'THEN', 'ALL', 'WOULD', 'NOT', 'BE', 'TOLD', 'FOR', 'ALL', 'ITS', 'SECRETS', 'CAN', 'NEVER', 'BE', 'KNOWN'] +367-130732-0004-1470: hyp=['A', 'BOOK', 'COULD', 'BE', 'WRITTEN', 'ABOUT', 'THIS', 'RESTAURANT', 'AND', 'THEN', 'ALL', 'WOULD', 'NOT', 'BE', 'TOLD', 'FOR', 'ALL', 'ITS', 'SECRETS', 'CAN', 'NEVER', 'BE', 'KNOWN'] +367-130732-0005-1471: ref=['IT', 'WAS', 'HERE', 'THAT', 'MOST', 'MAGNIFICENT', 'DINNERS', 'WERE', 'ARRANGED', 'IT', 'WAS', 'HERE', 'THAT', 'EXTRAORDINARY', 'DISHES', 'WERE', 'CONCOCTED', 'BY', 'CHEFS', 'OF', 'WORLD', 'WIDE', 'FAME', 'IT', 'WAS', 'HERE', 'THAT', 'LOBSTER', 'A', 'LA', 'NEWBERG', 'REACHED', 'ITS', 'HIGHEST', 'PERFECTION', 'AND', 'THIS', 'IS', 'THE', 'RECIPE', 'THAT', 'WAS', 'FOLLOWED', 'WHEN', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'DELMONICO'] +367-130732-0005-1471: hyp=['IT', 'WAS', 'HERE', 'THAT', 'MOST', 'MAGNIFICENT', 'DINNERS', 'WERE', 'ARRANGED', 'IT', 'WAS', 'HERE', 'THAT', 'EXTRAORDINARY', 'DISHES', 'WERE', 'CALLED', 'COCTED', 'BY', 'CHEFTS', 'OF', 'WOOLWRIGHT', 'FAME', 'IT', 'WAS', 'HERE', 'THAT', 'LOBSTER', 'ALENUBERG', 'REACHED', 'ITS', 'HIGHEST', 'PERFECTION', 'AND', 'THIS', 'IS', 'THE', 'RECIPE', 'THAT', 'WAS', 'FOLLOW', 'WHEN', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'DEMONICO'] +367-130732-0006-1472: ref=['LOBSTER', 'A', 'LA', 'NEWBERG'] +367-130732-0006-1472: hyp=['LOBSTER', 'OLLA', 'NEWBERG'] +367-130732-0007-1473: ref=['ONE', 'POUND', 'OF', 'LOBSTER', 'MEAT', 'ONE', 'TEASPOONFUL', 'OF', 'BUTTER', 'ONE', 'HALF', 'PINT', 'OF', 'CREAM', 'YOLKS', 'OF', 'FOUR', 'EGGS', 'ONE', 'WINE', 'GLASS', 'OF', 'SHERRY', 'LOBSTER', 'FAT'] +367-130732-0007-1473: hyp=['ONE', 'POUND', 'OF', 'LOBS', 'TO', 'MEAT', 'ONE', 'TEASPOONFUL', 'OF', 'BUTTER', 'ONE', 'HALF', 'PINT', 'OF', 'CREAM', 'YOLKS', 'OF', 'FOUR', 'EGGS', 'ONE', 'WINE', 'GLASS', 'OF', 'SHERRY', 'LOBSTER', 'FAT'] +367-130732-0008-1474: ref=['PUT', 'THIS', 'IN', 'A', 'DOUBLE', 'BOILER', 'AND', 'LET', 'COOK', 'UNTIL', 'THICK', 'STIRRING', 'CONSTANTLY'] +367-130732-0008-1474: hyp=['PUT', 'THIS', 'IN', 'A', 'DOUBLE', 'WHIRLER', 'AND', 'LET', 'COOK', 'UNTIL', 'THICK', 'STIRRING', 'CONSTANTLY'] +367-130732-0009-1475: ref=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'THIN', 'SLICES', 'OF', 'DRY', 'TOAST'] +367-130732-0009-1475: hyp=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'FLITTON', 'SIZES', 'OF', 'DRY', 'TOAST'] +367-130732-0010-1476: ref=['KING', 'OF', 'SHELL', 'FISH'] +367-130732-0010-1476: hyp=['KING', 'OF', 'SHELLFISH'] +367-130732-0011-1477: ref=['ONE', 'HAS', 'TO', 'COME', 'TO', 'SAN', 'FRANCISCO', 'TO', 'PARTAKE', 'OF', 'THE', 'KING', 'OF', 'SHELL', 'FISH', 'THE', 'MAMMOTH', 'PACIFIC', 'CRAB'] +367-130732-0011-1477: hyp=['ONE', 'HAS', 'TO', 'COME', 'TO', 'SENT', 'FRANCISCO', 'TO', 'PARTAKE', 'OF', 'THE', 'KING', 'OF', 'SHELLFISH', 'THE', 'MAMMOTH', 'PACIFIC', 'CRAB'] +367-130732-0012-1478: ref=['I', 'SAY', 'COME', 'TO', 'SAN', 'FRANCISCO', 'ADVISEDLY', 'FOR', 'WHILE', 'THE', 'CRAB', 'IS', 'FOUND', 'ALL', 'ALONG', 'THE', 'COAST', 'IT', 'IS', 'PREPARED', 'NOWHERE', 'SO', 'DELICIOUSLY', 'AS', 'IN', 'SAN', 'FRANCISCO'] +367-130732-0012-1478: hyp=['I', 'SAY', 'COME', 'TO', 'SAN', 'FRANCISCO', 'ADVISEDLY', 'FOR', 'WHILE', 'THE', 'CRAB', 'IS', 'FOUND', 'ALL', 'ALONG', 'THE', 'COAST', 'IT', 'IS', 'PREPARED', 'NOWHERE', 'SO', 'DELICIOUSLY', 'AS', 'IN', 'SAN', 'FRANCISCO'] +367-130732-0013-1479: ref=["GOBEY'S", 'PASSED', 'WITH', 'THE', 'FIRE', 'AND', 'THE', 'LITTLE', 'RESTAURANT', 'BEARING', 'HIS', 'NAME', 'AND', 'IN', 'CHARGE', 'OF', 'HIS', 'WIDOW', 'IN', 'UNION', 'SQUARE', 'AVENUE', 'HAS', 'NOT', 'ATTAINED', 'THE', 'FAME', 'OF', 'THE', 'OLD', 'PLACE'] +367-130732-0013-1479: hyp=["GOBY'S", 'PASS', 'WITH', 'THE', 'FIRE', 'AND', 'THE', 'LITTLE', 'RESTAURANT', 'BEARING', 'HIS', 'NAME', 'AND', 'IN', 'CHARGE', 'OF', 'HIS', 'WIDOW', 'IN', 'UNION', 'SQUARE', 'AVENUE', 'HAS', 'NOT', 'ATTAINED', 'THE', 'FAME', 'OF', 'THE', 'OLD', 'PLACE'] +367-130732-0014-1480: ref=['IT', 'IS', 'POSSIBLE', 'THAT', 'SHE', 'KNOWS', 'THE', 'SECRET', 'OF', 'PREPARING', 'CRAB', 'AS', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', "GOBEY'S", 'OF', 'BEFORE', 'THE', 'FIRE', 'BUT', 'HIS', 'PRESTIGE', 'DID', 'NOT', 'DESCEND', 'TO', 'HER'] +367-130732-0014-1480: hyp=['IT', 'IS', 'POSSIBLE', 'THAT', 'SHE', 'KNOWS', 'THE', 'SECRET', 'OF', 'PREPARING', 'CRAB', 'AS', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'GOBIES', 'OF', 'BEFORE', 'THE', 'FIRE', 'BUT', 'HIS', 'BESIEGE', 'DID', 'NOT', 'DESCEND', 'TO', 'HER'] +367-130732-0015-1481: ref=["GOBEY'S", 'CRAB', 'STEW'] +367-130732-0015-1481: hyp=['GOBIAS', 'CRABS', 'DO'] +367-130732-0016-1482: ref=['TAKE', 'THE', 'MEAT', 'OF', 'ONE', 'LARGE', 'CRAB', 'SCRAPING', 'OUT', 'ALL', 'OF', 'THE', 'FAT', 'FROM', 'THE', 'SHELL'] +367-130732-0016-1482: hyp=['TAKE', 'THE', 'MEAT', 'OF', 'ONE', 'LARGE', 'CRAB', 'SCRAPING', 'OUT', 'ALL', 'THE', 'BAT', 'FROM', 'THE', 'SHELL'] +367-130732-0017-1483: ref=['SOAK', 'THE', 'CRAB', 'MEAT', 'IN', 'THE', 'SHERRY', 'TWO', 'HOURS', 'BEFORE', 'COOKING'] +367-130732-0017-1483: hyp=['SOAK', 'THE', 'CRAB', 'ME', 'IN', 'THE', 'SHERRY', 'TWO', 'HOURS', 'BEFORE', 'COOKING'] +367-130732-0018-1484: ref=['CHOP', 'FINE', 'THE', 'ONION', 'SWEET', 'PEPPER', 'AND', 'TOMATO', 'WITH', 'THE', 'ROSEMARY'] +367-130732-0018-1484: hyp=['CHOP', 'FINE', 'THE', 'ONION', 'SWEEP', 'PEPPER', 'INTO', 'METAL', 'WITH', 'THE', 'ROSEMARY'] +367-130732-0019-1485: ref=['HEAT', 'THIS', 'IN', 'A', 'STEWPAN', 'AND', 'WHEN', 'SIMMERING', 'ADD', 'THE', 'SHERRY', 'AND', 'CRAB', 'MEAT', 'AND', 'LET', 'ALL', 'COOK', 'TOGETHER', 'WITH', 'A', 'SLOW', 'FIRE', 'FOR', 'EIGHT', 'MINUTES'] +367-130732-0019-1485: hyp=['HEATLESS', 'IN', 'A', 'STEWPENT', 'AND', 'WHEN', 'SIMMERING', 'ADD', 'THE', 'SHERRY', 'AND', 'CRAB', 'ME', 'AND', 'LET', 'ALL', 'COOK', 'TOGETHER', 'WITH', 'THE', 'SLOW', 'FIRE', 'FOR', 'EIGHT', 'MINUTES'] +367-130732-0020-1486: ref=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'TOASTED', 'CRACKERS', 'OR', 'THIN', 'SLICES', 'OF', 'TOASTED', 'BREAD'] +367-130732-0020-1486: hyp=['SERVE', 'IN', 'THE', 'CHAFING', 'DISH', 'WITH', 'TOASTED', 'CRACKERS', 'OR', 'THIN', 'SLICES', 'OF', 'TOASTED', 'BREAD'] +367-130732-0021-1487: ref=['LOBSTER', 'IN', 'MINIATURE'] +367-130732-0021-1487: hyp=['LOBSTER', 'AND', 'MINIATURE'] +367-130732-0022-1488: ref=['SO', 'FAR', 'IT', 'HAS', 'BEEN', 'USED', 'MOSTLY', 'FOR', 'GARNISHMENT', 'OF', 'OTHER', 'DISHES', 'AND', 'IT', 'IS', 'ONLY', 'RECENTLY', 'THAT', 'THE', 'HOF', 'BRAU', 'HAS', 'BEEN', 'MAKING', 'A', 'SPECIALTY', 'OF', 'THEM'] +367-130732-0022-1488: hyp=['SO', 'FAR', 'IT', 'HAS', 'BEEN', 'USED', 'MOSTLY', 'FOR', 'GARNISHMENT', 'OF', 'OTHER', 'DISHES', 'AND', 'IT', 'IS', 'ONLY', 'RECENTLY', 'THAT', 'THE', 'WHOLE', 'BROW', 'HAS', 'BEEN', 'MAKING', 'ESPECIALTY', 'OF', 'THEM'] +367-130732-0023-1489: ref=['ALL', 'OF', 'THE', 'BETTER', 'CLASS', 'RESTAURANTS', 'HOWEVER', 'WILL', 'SERVE', 'THEM', 'IF', 'YOU', 'ORDER', 'THEM'] +367-130732-0023-1489: hyp=['ALL', 'THE', 'BETTER', 'CLASS', 'RESTAURANTS', 'HOWEVER', 'WILL', 'SERVE', 'THEM', 'IF', 'YOU', 'ORDER', 'THEM'] +367-130732-0024-1490: ref=['THIS', 'IS', 'THE', 'RECIPE', 'FOR', 'EIGHT', 'PEOPLE', 'AND', 'IT', 'IS', 'WELL', 'WORTH', 'TRYING', 'IF', 'YOU', 'ARE', 'GIVING', 'A', 'DINNER', 'OF', 'IMPORTANCE'] +367-130732-0024-1490: hyp=['THIS', 'IS', 'THE', 'RECIPE', 'FOR', 'EIGHT', 'PEOPLE', 'AND', 'IT', 'IS', 'WELL', 'IT', 'WORTH', 'TRYING', 'IF', 'YOU', 'ARE', 'GIVING', 'A', 'DINNER', 'OF', 'IMPORTANCE'] +367-130732-0025-1491: ref=['BISQUE', 'OF', 'CRAWFISH'] +367-130732-0025-1491: hyp=['DISK', 'OF', 'CRAWFISH'] +367-130732-0026-1492: ref=['TAKE', 'THIRTY', 'CRAWFISH', 'FROM', 'WHICH', 'REMOVE', 'THE', 'GUT', 'CONTAINING', 'THE', 'GALL', 'IN', 'THE', 'FOLLOWING', 'MANNER', 'TAKE', 'FIRM', 'HOLD', 'OF', 'THE', 'CRAWFISH', 'WITH', 'THE', 'LEFT', 'HAND', 'SO', 'AS', 'TO', 'AVOID', 'BEING', 'PINCHED', 'BY', 'ITS', 'CLAWS', 'WITH', 'THE', 'THUMB', 'AND', 'FOREFINGER', 'OF', 'THE', 'RIGHT', 'HAND', 'PINCH', 'THE', 'EXTREME', 'END', 'OF', 'THE', 'CENTRAL', 'FIN', 'OF', 'THE', 'TAIL', 'AND', 'WITH', 'A', 'SUDDEN', 'JERK', 'THE', 'GUT', 'WILL', 'BE', 'WITHDRAWN'] +367-130732-0026-1492: hyp=['TAKE', 'THIRTY', 'CRAWFISH', 'FROM', 'WHICH', 'REMOVE', 'THE', 'GUT', 'CONTAINING', 'THE', 'GALL', 'IN', 'THE', 'FOLLOWING', 'MANNER', 'TAKE', 'FIRM', 'HOLD', 'OF', 'THE', 'CISH', 'WITH', 'THE', 'LEFT', 'HAND', 'SO', 'AS', 'TO', 'AVOID', 'BEING', 'PINCHED', 'BY', 'ITS', 'CLOTH', 'WITH', 'THE', 'THUMB', 'AND', 'FOREFINGER', 'OF', 'THE', 'RIGHT', 'HAND', 'PINCH', 'THE', 'EXTREME', 'END', 'OF', 'THE', 'CENTRAL', 'FIN', 'OF', 'THE', 'TAIL', 'AND', 'WITH', 'A', 'SUDDEN', 'JERK', 'THE', 'GUT', 'WILL', 'BE', 'WITHDRAWN'] +367-130732-0027-1493: ref=['MINCE', 'OR', 'CUT', 'INTO', 'SMALL', 'DICE', 'A', 'CARROT', 'AN', 'ONION', 'ONE', 'HEAD', 'OF', 'CELERY', 'AND', 'A', 'FEW', 'PARSLEY', 'ROOTS', 'AND', 'TO', 'THESE', 'ADD', 'A', 'BAY', 'LEAF', 'A', 'SPRIG', 'OF', 'THYME', 'A', 'LITTLE', 'MINIONETTE', 'PEPPER', 'AND', 'TWO', 'OUNCES', 'OF', 'BUTTER'] +367-130732-0027-1493: hyp=['MINSER', 'CUT', 'INTO', 'SMALL', 'DICE', 'A', 'CARROT', 'AND', 'ONION', 'ONE', 'HEAD', 'OF', 'CELERY', 'AND', 'A', 'FEW', 'PARSLEY', 'ROOTS', 'AND', 'TO', 'THESE', 'AT', 'A', 'BAY', 'LEAF', 'A', 'SPRIG', 'OF', 'THYME', 'A', 'LITTLE', 'MINOR', 'NUT', 'PEPPER', 'AND', 'TWO', 'OUNCE', 'OF', 'BUTTER'] +367-130732-0028-1494: ref=['PUT', 'THESE', 'INGREDIENTS', 'INTO', 'A', 'STEWPAN', 'AND', 'FRY', 'THEM', 'TEN', 'MINUTES', 'THEN', 'THROW', 'IN', 'THE', 'CRAWFISH', 'AND', 'POUR', 'ON', 'THEM', 'HALF', 'A', 'BOTTLE', 'OF', 'FRENCH', 'WHITE', 'WINE'] +367-130732-0028-1494: hyp=['PUT', 'THESE', 'INGREDIENTS', 'INTO', 'A', 'STEWPAN', 'AND', 'FRY', 'THEM', 'TEN', 'MINUTES', 'THEN', 'THROW', 'IN', 'THE', 'CROPPISH', 'AND', 'POUR', 'ON', 'THEM', 'HALF', 'A', 'BOTTLE', 'OF', 'FRENCH', 'WHITE', 'WINE'] +367-130732-0029-1495: ref=['ALLOW', 'THIS', 'TO', 'BOIL', 'AND', 'THEN', 'ADD', 'A', 'QUART', 'OF', 'STRONG', 'CONSOMME', 'AND', 'LET', 'ALL', 'CONTINUE', 'BOILING', 'FOR', 'HALF', 'AN', 'HOUR'] +367-130732-0029-1495: hyp=['ALLOW', 'US', 'TO', 'BOIL', 'AND', 'THEN', 'ADD', 'A', 'QUART', 'OF', 'STRONG', 'CONSUM', 'AND', 'LET', 'ALL', 'CONTINUE', 'BOILING', 'FOR', 'HALF', 'AN', 'HOUR'] +367-130732-0030-1496: ref=['PICK', 'OUT', 'THE', 'CRAWFISH', 'AND', 'STRAIN', 'THE', 'BROTH', 'THROUGH', 'A', 'NAPKIN', 'BY', 'PRESSURE', 'INTO', 'A', 'BASIN', 'IN', 'ORDER', 'TO', 'EXTRACT', 'ALL', 'THE', 'ESSENCE', 'FROM', 'THE', 'VEGETABLES'] +367-130732-0030-1496: hyp=['PICK', 'OUT', 'THE', 'CRAWFISH', 'AND', 'STRAIN', 'THE', 'BROTH', 'THROUGH', 'A', 'NAPKIN', 'BY', 'PRESSURE', 'INTO', 'A', 'BASIN', 'IN', 'ORDER', 'TO', 'EXTRACT', 'ALL', 'THE', 'ESSENCE', 'FROM', 'THE', 'VEGETABLES'] +367-130732-0031-1497: ref=['PICK', 'THE', 'SHELLS', 'OFF', 'TWENTY', 'FIVE', 'OF', 'THE', 'CRAWFISH', 'TAILS', 'TRIM', 'THEM', 'NEATLY', 'AND', 'SET', 'THEM', 'ASIDE', 'UNTIL', 'WANTED'] +367-130732-0031-1497: hyp=['PICK', 'THE', 'SHELLS', 'OF', 'TWENTY', 'FIVE', 'OF', 'THE', 'CRAWFISH', 'TAILS', 'TRIM', 'THEM', 'NEATLY', 'AND', 'SET', 'THEM', 'ASIDE', 'UNTIL', 'WANTON'] +367-130732-0032-1498: ref=['RESERVE', 'SOME', 'OF', 'THE', 'SPAWN', 'ALSO', 'HALF', 'OF', 'THE', 'BODY', 'SHELLS', 'WITH', 'WHICH', 'TO', 'MAKE', 'THE', 'CRAWFISH', 'BUTTER', 'TO', 'FINISH', 'THE', 'SOUP'] +367-130732-0032-1498: hyp=['RESERVE', 'SOME', 'OF', 'THE', 'SPAWN', 'ALSO', 'HAPPEN', 'THE', 'BODY', 'SHELLS', 'WITH', 'WHICH', 'TO', 'MAKE', 'THE', 'COFFISH', 'BUTTER', 'TO', 'FINISH', 'THE', 'SOUP'] +367-130732-0033-1499: ref=['THIS', 'BUTTER', 'IS', 'MADE', 'AS', 'FOLLOWS', 'PLACE', 'THE', 'SHELLS', 'ON', 'A', 'BAKING', 'SHEET', 'IN', 'THE', 'OVEN', 'TO', 'DRY', 'LET', 'THE', 'SHELLS', 'COOL', 'AND', 'THEN', 'POUND', 'THEM', 'IN', 'A', 'MORTAR', 'WITH', 'A', 'LITTLE', 'LOBSTER', 'CORAL', 'AND', 'FOUR', 'OUNCES', 'OF', 'FRESH', 'BUTTER', 'THOROUGHLY', 'BRUISING', 'THE', 'WHOLE', 'TOGETHER', 'SO', 'AS', 'TO', 'MAKE', 'A', 'FINE', 'PASTE'] +367-130732-0033-1499: hyp=['THIS', 'BUTTER', 'IS', 'MADE', 'AS', 'FOLLOWS', 'PLACE', 'THE', 'SHELLS', 'IN', 'A', 'BAKING', 'SHEET', 'IN', 'THE', 'OVEN', 'TO', 'DRY', 'LET', 'THE', 'SHELLS', 'COOL', 'AND', 'THEN', 'POUND', 'THEM', 'IN', 'A', 'MORTAR', 'WITH', 'A', 'LITTLE', 'LOBSTER', 'COAL', 'AND', 'FOUR', 'OUNCES', 'OF', 'FRESH', 'BUTTER', 'THOROUGHLY', 'BRUISING', 'THE', 'WHOLE', 'TOGETHER', 'SO', 'AS', 'TO', 'MAKE', 'A', 'FINE', 'PASTE'] +367-293981-0000-1445: ref=['I', 'SWEAR', 'IT', 'ANSWERED', 'SANCHO'] +367-293981-0000-1445: hyp=['I', 'SWEAR', 'ANSWERED', 'SANCHO'] +367-293981-0001-1446: ref=['I', 'SAY', 'SO', 'CONTINUED', 'DON', 'QUIXOTE', 'BECAUSE', 'I', 'HATE', 'TAKING', 'AWAY', "ANYONE'S", 'GOOD', 'NAME'] +367-293981-0001-1446: hyp=['I', 'SAY', 'SO', 'CONTINUED', 'DON', 'QUIXOTE', 'BECAUSE', 'I', 'HATE', 'TAKING', 'AWAY', 'ANY', "ONE'S", 'GOOD', 'NAME'] +367-293981-0002-1447: ref=['I', 'SAY', 'REPLIED', 'SANCHO', 'THAT', 'I', 'SWEAR', 'TO', 'HOLD', 'MY', 'TONGUE', 'ABOUT', 'IT', 'TILL', 'THE', 'END', 'OF', 'YOUR', "WORSHIP'S", 'DAYS', 'AND', 'GOD', 'GRANT', 'I', 'MAY', 'BE', 'ABLE', 'TO', 'LET', 'IT', 'OUT', 'TOMORROW'] +367-293981-0002-1447: hyp=['I', 'SAY', 'REPLIED', 'SANCHO', 'THAT', 'I', 'SWEAR', 'TO', 'HOLD', 'MY', 'TONGUE', 'ABOUT', 'IT', 'TILL', 'THE', 'END', 'OF', 'YOUR', 'WORSHIP', 'STAYS', 'AND', 'GONE', 'GRANT', 'I', 'MAY', 'BE', 'ABLE', 'TO', 'LET', 'IT', 'OUT', 'TO', 'MORROW'] +367-293981-0003-1448: ref=['THOUGH', 'YOUR', 'WORSHIP', 'WAS', 'NOT', 'SO', 'BADLY', 'OFF', 'HAVING', 'IN', 'YOUR', 'ARMS', 'THAT', 'INCOMPARABLE', 'BEAUTY', 'YOU', 'SPOKE', 'OF', 'BUT', 'I', 'WHAT', 'DID', 'I', 'HAVE', 'EXCEPT', 'THE', 'HEAVIEST', 'WHACKS', 'I', 'THINK', 'I', 'HAD', 'IN', 'ALL', 'MY', 'LIFE'] +367-293981-0003-1448: hyp=['THOUGH', 'YOUR', 'WORSHIP', 'WAS', 'NOT', 'SO', 'BADLY', 'OFF', 'HAVING', 'IN', 'YOUR', 'ARMS', 'THE', 'INN', 'COMPARABLE', 'BEAUTY', 'YOU', 'SPOKE', 'OF', 'BUT', 'I', 'WHAT', 'DID', 'I', 'HAVE', 'EXCEPT', 'THE', 'HEAVIEST', 'WAX', 'THAT', 'I', 'THINK', 'I', 'HAD', 'IN', 'ALL', 'MY', 'LIFE'] +367-293981-0004-1449: ref=['UNLUCKY', 'ME', 'AND', 'THE', 'MOTHER', 'THAT', 'BORE', 'ME'] +367-293981-0004-1449: hyp=['UNLUCKY', 'ME', 'AND', 'THE', 'MOTHER', 'THAT', 'BORE', 'ME'] +367-293981-0005-1450: ref=["DIDN'T", 'I', 'SAY', 'SO', 'WORSE', 'LUCK', 'TO', 'MY', 'LINE', 'SAID', 'SANCHO'] +367-293981-0005-1450: hyp=["DIDN'T", 'I', 'SAY', 'SO', 'WORSE', 'LUCK', 'TO', 'MY', 'LINE', 'SAID', 'SANCHO'] +367-293981-0006-1451: ref=['IT', 'CANNOT', 'BE', 'THE', 'MOOR', 'ANSWERED', 'DON', 'QUIXOTE', 'FOR', 'THOSE', 'UNDER', 'ENCHANTMENT', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'BY', 'ANYONE'] +367-293981-0006-1451: hyp=['IT', 'CANNOT', 'BE', 'THE', 'MORE', 'ANSWERED', 'DON', 'QUIXOTE', 'FOR', 'THOSE', 'UNDER', 'ENCHANTMENT', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'BY', 'ANYONE'] +367-293981-0007-1452: ref=['IF', 'THEY', "DON'T", 'LET', 'THEMSELVES', 'BE', 'SEEN', 'THEY', 'LET', 'THEMSELVES', 'BE', 'FELT', 'SAID', 'SANCHO', 'IF', 'NOT', 'LET', 'MY', 'SHOULDERS', 'SPEAK', 'TO', 'THE', 'POINT'] +367-293981-0007-1452: hyp=['IF', 'THEY', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'THEY', 'LET', 'THEMSELVES', 'BE', 'FELT', 'SAID', 'SANCHO', 'IF', 'NOT', 'LET', 'MY', 'SHOULDER', 'SPEAK', 'TO', 'THE', 'POINT'] +367-293981-0008-1453: ref=['MINE', 'COULD', 'SPEAK', 'TOO', 'SAID', 'DON', 'QUIXOTE', 'BUT', 'THAT', 'IS', 'NOT', 'A', 'SUFFICIENT', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'WHAT', 'WE', 'SEE', 'IS', 'THE', 'ENCHANTED', 'MOOR'] +367-293981-0008-1453: hyp=['MIKE', 'HAD', 'SPEAK', 'TOO', 'SAID', 'DON', 'QUIXOTE', 'BUT', 'THAT', 'IS', 'NOT', 'A', 'SUSPICIENT', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'WHAT', 'WE', 'SEE', 'IS', 'THE', 'ENCHANTED', 'MOOR'] +367-293981-0009-1454: ref=['THE', 'OFFICER', 'TURNED', 'TO', 'HIM', 'AND', 'SAID', 'WELL', 'HOW', 'GOES', 'IT', 'GOOD', 'MAN'] +367-293981-0009-1454: hyp=['THE', 'OFFICERS', 'TURNED', 'ROOM', 'AND', 'SAID', 'WELL', 'HOW', 'GOES', 'A', 'GOOD', 'MAN'] +367-293981-0010-1455: ref=['SANCHO', 'GOT', 'UP', 'WITH', 'PAIN', 'ENOUGH', 'IN', 'HIS', 'BONES', 'AND', 'WENT', 'AFTER', 'THE', 'INNKEEPER', 'IN', 'THE', 'DARK', 'AND', 'MEETING', 'THE', 'OFFICER', 'WHO', 'WAS', 'LOOKING', 'TO', 'SEE', 'WHAT', 'HAD', 'BECOME', 'OF', 'HIS', 'ENEMY', 'HE', 'SAID', 'TO', 'HIM', 'SENOR', 'WHOEVER', 'YOU', 'ARE', 'DO', 'US', 'THE', 'FAVOUR', 'AND', 'KINDNESS', 'TO', 'GIVE', 'US', 'A', 'LITTLE', 'ROSEMARY', 'OIL', 'SALT', 'AND', 'WINE', 'FOR', 'IT', 'IS', 'WANTED', 'TO', 'CURE', 'ONE', 'OF', 'THE', 'BEST', 'KNIGHTS', 'ERRANT', 'ON', 'EARTH', 'WHO', 'LIES', 'ON', 'YONDER', 'BED', 'WOUNDED', 'BY', 'THE', 'HANDS', 'OF', 'THE', 'ENCHANTED', 'MOOR', 'THAT', 'IS', 'IN', 'THIS', 'INN'] +367-293981-0010-1455: hyp=['SANCHO', 'GOT', 'UP', 'WITH', 'PAIN', 'ENOUGH', 'IN', 'HIS', 'BONES', 'AND', 'WENT', 'OUT', 'TO', 'THE', 'INNKEEPER', 'IN', 'THE', 'DARK', 'IN', 'MEETING', 'THE', 'OFFICER', 'WHO', 'WAS', 'LOOKING', 'TO', 'SEE', 'WHAT', 'HAD', 'BECOME', 'OF', 'HIS', 'ENEMY', 'HE', 'SAID', 'TO', 'HIM', 'SIGNOR', 'WHOEVER', 'YOU', 'ARE', 'DO', 'US', 'THE', 'FAVOUR', 'AND', 'KINDNESS', 'TO', 'GIVE', 'US', 'A', 'LITTLE', 'ROSEMARY', 'OIL', 'SALT', 'AND', 'WHITE', 'FOR', 'IT', 'IS', 'WATER', 'TO', 'CURE', 'ONE', 'OF', 'OUR', 'BEST', 'KNIGHTS', 'ERRANT', 'ON', 'EARTH', 'WHO', 'LIES', 'ON', 'YONDER', 'BED', 'WOUNDED', 'BY', 'THE', 'HANDS', 'OF', 'THE', 'ENCHANTED', 'MOOR', 'THAT', 'IS', 'IN', 'THIS', 'INN'] +367-293981-0011-1456: ref=['TO', 'BE', 'BRIEF', 'HE', 'TOOK', 'THE', 'MATERIALS', 'OF', 'WHICH', 'HE', 'MADE', 'A', 'COMPOUND', 'MIXING', 'THEM', 'ALL', 'AND', 'BOILING', 'THEM', 'A', 'GOOD', 'WHILE', 'UNTIL', 'IT', 'SEEMED', 'TO', 'HIM', 'THEY', 'HAD', 'COME', 'TO', 'PERFECTION'] +367-293981-0011-1456: hyp=['TO', 'BE', 'BRIEF', 'HE', 'TOOK', 'THE', 'MATERIORS', 'OF', 'WHICH', 'HE', 'MADE', 'A', 'COMPOUND', 'MIXING', 'THEM', 'ALL', 'BOILING', 'THEM', 'A', 'GOOD', 'WHILE', 'IT', 'UNTIL', 'IT', 'SEEMED', 'TO', 'HIM', 'THEY', 'HAD', 'COME', 'TO', 'PERFECTION'] +367-293981-0012-1457: ref=['SANCHO', 'PANZA', 'WHO', 'ALSO', 'REGARDED', 'THE', 'AMENDMENT', 'OF', 'HIS', 'MASTER', 'AS', 'MIRACULOUS', 'BEGGED', 'HIM', 'TO', 'GIVE', 'HIM', 'WHAT', 'WAS', 'LEFT', 'IN', 'THE', 'PIGSKIN', 'WHICH', 'WAS', 'NO', 'SMALL', 'QUANTITY'] +367-293981-0012-1457: hyp=['SANCHO', 'PANZA', 'WHO', 'ALSO', 'REGARDED', 'THE', 'AMENDMENT', 'OF', 'HIS', 'MASTER', 'AS', 'MIRACULOUS', 'BEGGED', 'HIM', 'TO', 'GIVE', 'HIM', 'WHAT', 'WAS', 'LEFT', 'IN', 'THE', 'PICTION', 'WHICH', 'WAS', 'NO', 'SMALL', 'QUANTITY'] +367-293981-0013-1458: ref=['DON', 'QUIXOTE', 'CONSENTED', 'AND', 'HE', 'TAKING', 'IT', 'WITH', 'BOTH', 'HANDS', 'IN', 'GOOD', 'FAITH', 'AND', 'WITH', 'A', 'BETTER', 'WILL', 'GULPED', 'DOWN', 'AND', 'DRAINED', 'OFF', 'VERY', 'LITTLE', 'LESS', 'THAN', 'HIS', 'MASTER'] +367-293981-0013-1458: hyp=['DON', 'QUIXOTE', 'CONSENTED', 'AND', 'HE', 'TAKING', 'IT', 'WITH', 'BOTH', 'HANDS', 'IN', 'GOOD', 'FAITH', 'AND', 'WITH', 'A', 'BETTER', 'WILL', 'GULPED', 'IT', 'DOWN', 'AND', 'DRAINED', 'OFF', 'VERY', 'LITTLE', 'LESS', 'THAN', 'HIS', 'MASTER'] +367-293981-0014-1459: ref=['IF', 'YOUR', 'WORSHIP', 'KNEW', 'THAT', 'RETURNED', 'SANCHO', 'WOE', 'BETIDE', 'ME', 'AND', 'ALL', 'MY', 'KINDRED', 'WHY', 'DID', 'YOU', 'LET', 'ME', 'TASTE', 'IT'] +367-293981-0014-1459: hyp=['IF', 'YOUR', 'WORSHIP', 'KNEW', 'THAT', 'RETURNED', 'SANCHO', 'WOE', 'BETIDE', 'ME', 'IN', 'ALL', 'MY', 'KINDRED', 'WHY', 'DID', 'YOU', 'LET', 'ME', 'TASTE', 'HIM'] +367-293981-0015-1460: ref=['SEARCH', 'YOUR', 'MEMORY', 'AND', 'IF', 'YOU', 'FIND', 'ANYTHING', 'OF', 'THIS', 'KIND', 'YOU', 'NEED', 'ONLY', 'TELL', 'ME', 'OF', 'IT', 'AND', 'I', 'PROMISE', 'YOU', 'BY', 'THE', 'ORDER', 'OF', 'KNIGHTHOOD', 'WHICH', 'I', 'HAVE', 'RECEIVED', 'TO', 'PROCURE', 'YOU', 'SATISFACTION', 'AND', 'REPARATION', 'TO', 'THE', 'UTMOST', 'OF', 'YOUR', 'DESIRE'] +367-293981-0015-1460: hyp=['SEARCH', 'YOUR', 'MEMORY', 'AND', 'IF', 'YOU', 'FIND', 'ANYTHING', 'OF', 'THIS', 'KIND', 'YOU', 'NEED', 'ONLY', 'TELL', 'ME', 'OF', 'IT', 'AND', 'I', 'PROMISE', 'YOU', 'BY', 'THE', 'ORDER', 'OF', 'KNIGHTHOOD', 'WHICH', 'I', 'HAVE', 'RECEIVED', 'TO', 'PROCURE', 'YOU', 'SATISFACTION', 'IN', 'REPARATION', 'TO', 'THE', 'UTMOST', 'OF', 'YOUR', 'DESIRE'] +367-293981-0016-1461: ref=['THEN', 'THIS', 'IS', 'AN', 'INN', 'SAID', 'DON', 'QUIXOTE'] +367-293981-0016-1461: hyp=['THEN', 'THIS', 'IS', 'AN', 'IN', 'SAID', 'DON', 'QUIXOTE'] +367-293981-0017-1462: ref=['AND', 'A', 'VERY', 'RESPECTABLE', 'ONE', 'SAID', 'THE', 'INNKEEPER'] +367-293981-0017-1462: hyp=['IN', 'A', 'VERY', 'RESPECTABLE', 'ONE', 'SAID', 'THE', 'INNKEEPER'] +367-293981-0018-1463: ref=['THE', 'CRIES', 'OF', 'THE', 'POOR', 'BLANKETED', 'WRETCH', 'WERE', 'SO', 'LOUD', 'THAT', 'THEY', 'REACHED', 'THE', 'EARS', 'OF', 'HIS', 'MASTER', 'WHO', 'HALTING', 'TO', 'LISTEN', 'ATTENTIVELY', 'WAS', 'PERSUADED', 'THAT', 'SOME', 'NEW', 'ADVENTURE', 'WAS', 'COMING', 'UNTIL', 'HE', 'CLEARLY', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'HIS', 'SQUIRE', 'WHO', 'UTTERED', 'THEM'] +367-293981-0018-1463: hyp=['THE', 'CRIES', 'OF', 'THE', 'POOR', 'BLANKET', 'WRETCH', 'WERE', 'SO', 'LOUD', 'THAT', 'THEY', 'REACHED', 'THE', 'EARS', 'OF', 'HIS', 'MASTER', 'WHO', 'HALTING', 'TO', 'LISTEN', 'ATTENTIVELY', 'WAS', 'PERSUADED', 'THAT', 'SOME', 'NEW', 'ADVENTURE', 'WAS', 'COMING', 'UNTIL', 'HE', 'CLEARLY', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'HIS', 'SQUIRE', 'WHO', 'UTTERED', 'THEM'] +367-293981-0019-1464: ref=['HE', 'SAW', 'HIM', 'RISING', 'AND', 'FALLING', 'IN', 'THE', 'AIR', 'WITH', 'SUCH', 'GRACE', 'AND', 'NIMBLENESS', 'THAT', 'HAD', 'HIS', 'RAGE', 'ALLOWED', 'HIM', 'IT', 'IS', 'MY', 'BELIEF', 'HE', 'WOULD', 'HAVE', 'LAUGHED'] +367-293981-0019-1464: hyp=['HE', 'SAW', 'HIM', 'RISING', 'AND', 'FALLING', 'IN', 'THE', 'AIR', 'WITH', 'SUCH', 'GRACE', 'AND', 'NIMBLENESS', 'THAT', 'HAD', 'HIS', 'RAGE', 'ALLOWED', 'HIM', 'IT', 'IS', 'MY', 'BELIEF', 'HE', 'WOULD', 'HAVE', 'LAUGHED'] +367-293981-0020-1465: ref=['SANCHO', 'TOOK', 'IT', 'AND', 'AS', 'HE', 'WAS', 'RAISING', 'IT', 'TO', 'HIS', 'MOUTH', 'HE', 'WAS', 'STOPPED', 'BY', 'THE', 'CRIES', 'OF', 'HIS', 'MASTER', 'EXCLAIMING', 'SANCHO', 'MY', 'SON', 'DRINK', 'NOT', 'WATER', 'DRINK', 'IT', 'NOT', 'MY', 'SON', 'FOR', 'IT', 'WILL', 'KILL', 'THEE', 'SEE', 'HERE', 'I', 'HAVE', 'THE', 'BLESSED', 'BALSAM', 'AND', 'HE', 'HELD', 'UP', 'THE', 'FLASK', 'OF', 'LIQUOR', 'AND', 'WITH', 'DRINKING', 'TWO', 'DROPS', 'OF', 'IT', 'THOU', 'WILT', 'CERTAINLY', 'BE', 'RESTORED'] +367-293981-0020-1465: hyp=['SANCHO', 'TOOK', 'IT', 'AND', 'AS', 'HE', 'WAS', 'RAISING', 'IT', 'TO', 'HIS', 'MOUTH', 'HE', 'WAS', 'STOPPED', 'BY', 'THE', 'CRIES', 'OF', 'HIS', 'MASTER', 'EXCLAIMING', 'SANCHO', 'MY', 'SON', 'DRINK', 'NOT', 'WATER', 'DRINK', 'IT', 'OUT', 'MY', 'SON', 'FOR', 'IT', 'WILL', 'KILL', 'THEE', 'SEE', 'HERE', 'I', 'HAD', 'THE', 'BLESSED', 'BALSAM', 'AND', 'HE', 'HELD', 'UP', 'THE', 'FLASK', 'OF', 'LIQUOR', 'AND', 'WITH', 'DRINKING', 'TWO', 'DROPS', 'WHAT', 'THOU', 'WILT', 'CERTAINLY', 'BE', 'RESTORED'] +3764-168670-0000-1666: ref=['THE', 'STRIDES', 'OF', 'A', 'LAME', 'MAN', 'ARE', 'LIKE', 'THE', 'OGLING', 'GLANCES', 'OF', 'A', 'ONE', 'EYED', 'MAN', 'THEY', 'DO', 'NOT', 'REACH', 'THEIR', 'GOAL', 'VERY', 'PROMPTLY'] +3764-168670-0000-1666: hyp=['THE', 'STRIDES', 'OF', 'A', 'LAME', 'MAN', 'LIKE', 'THE', 'OGLING', 'GLANCES', 'OF', 'A', 'ONE', 'EYED', 'MAN', 'THEY', 'DO', 'NOT', 'REACH', 'THEIR', 'GOAL', 'VERY', 'PROMPTLY'] +3764-168670-0001-1667: ref=['COSETTE', 'HAD', 'WAKED', 'UP'] +3764-168670-0001-1667: hyp=['COSETTE', 'HAD', 'WAKED', 'UP'] +3764-168670-0002-1668: ref=['JEAN', 'VALJEAN', 'HAD', 'PLACED', 'HER', 'NEAR', 'THE', 'FIRE'] +3764-168670-0002-1668: hyp=['JEAN', 'VALJEAN', 'HAD', 'PLACED', 'HER', 'NEAR', 'THE', 'FIRE'] +3764-168670-0003-1669: ref=['YOU', 'WILL', 'WAIT', 'FOR', 'ME', 'AT', 'A', "LADY'S", 'HOUSE', 'I', 'SHALL', 'COME', 'TO', 'FETCH', 'YOU'] +3764-168670-0003-1669: hyp=['YOU', 'WILL', 'WAIT', 'FOR', 'ME', 'AT', 'A', "LADY'S", 'HOUSE', 'I', 'SHALL', 'COME', 'TO', 'FETCH', 'YOU'] +3764-168670-0004-1670: ref=['EVERYTHING', 'IS', 'ARRANGED', 'AND', 'NOTHING', 'IS', 'SAID', 'FAUCHELEVENT'] +3764-168670-0004-1670: hyp=['EVERYTHING', 'IS', 'ARRANGED', 'AND', 'NOTHING', 'IS', 'SAID', 'FAUCHELEVENT'] +3764-168670-0005-1671: ref=['I', 'HAVE', 'PERMISSION', 'TO', 'BRING', 'YOU', 'IN', 'BUT', 'BEFORE', 'BRINGING', 'YOU', 'IN', 'YOU', 'MUST', 'BE', 'GOT', 'OUT'] +3764-168670-0005-1671: hyp=['I', 'HAVE', 'PERMISSION', 'TO', 'BRING', 'YOU', 'IN', 'BUT', 'BEFORE', 'BRINGING', 'YOU', 'IN', 'YOU', 'MUST', 'BE', 'GOT', 'OUT'] +3764-168670-0006-1672: ref=["THAT'S", 'WHERE', 'THE', 'DIFFICULTY', 'LIES'] +3764-168670-0006-1672: hyp=["THAT'S", 'WHERE', 'THE', 'DIFFICULTY', 'LIES'] +3764-168670-0007-1673: ref=['IT', 'IS', 'EASY', 'ENOUGH', 'WITH', 'THE', 'CHILD', 'YOU', 'WILL', 'CARRY', 'HER', 'OUT'] +3764-168670-0007-1673: hyp=['IT', 'IS', 'EASY', 'ENOUGH', 'WITH', 'THE', 'CHILD', 'YOU', 'WILL', 'CARRY', 'HER', 'OUT'] +3764-168670-0008-1674: ref=['AND', 'SHE', 'WILL', 'HOLD', 'HER', 'TONGUE', 'I', 'ANSWER', 'FOR', 'THAT'] +3764-168670-0008-1674: hyp=['AND', 'SHE', 'WILL', 'HOLD', 'HER', 'TONGUE', 'I', 'ANSWER', 'FOR', 'THAT'] +3764-168670-0009-1675: ref=['FAUCHELEVENT', 'GRUMBLED', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'JEAN', 'VALJEAN'] +3764-168670-0009-1675: hyp=['FOR', 'SCHLEVENT', 'GRUMBLED', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'JEAN', 'VALJEAN'] +3764-168670-0010-1676: ref=['YOU', 'UNDERSTAND', 'FATHER', 'MADELEINE', 'THE', 'GOVERNMENT', 'WILL', 'NOTICE', 'IT'] +3764-168670-0010-1676: hyp=['YOU', 'UNDERSTAND', 'FATHER', 'MADELEIN', 'THE', 'GOVERNMENT', 'WILL', 'NOTICE', 'IT'] +3764-168670-0011-1677: ref=['JEAN', 'VALJEAN', 'STARED', 'HIM', 'STRAIGHT', 'IN', 'THE', 'EYE', 'AND', 'THOUGHT', 'THAT', 'HE', 'WAS', 'RAVING'] +3764-168670-0011-1677: hyp=['JEAN', 'VALJEAN', 'STARED', 'HIM', 'STRAIGHT', 'IN', 'THE', 'EYE', 'AND', 'THOUGHT', 'THAT', 'HE', 'WAS', 'RAVING'] +3764-168670-0012-1678: ref=['FAUCHELEVENT', 'WENT', 'ON'] +3764-168670-0012-1678: hyp=['FOUCHELEVENT', 'WENT', 'ON'] +3764-168670-0013-1679: ref=['IT', 'IS', 'TO', 'MORROW', 'THAT', 'I', 'AM', 'TO', 'BRING', 'YOU', 'IN', 'THE', 'PRIORESS', 'EXPECTS', 'YOU'] +3764-168670-0013-1679: hyp=['IT', 'IS', 'TO', 'MORROW', 'THAT', 'I', 'AM', 'TO', 'BRING', 'YOU', 'IN', 'THE', 'PRIORS', 'EXPECTS', 'YOU'] +3764-168670-0014-1680: ref=['THEN', 'HE', 'EXPLAINED', 'TO', 'JEAN', 'VALJEAN', 'THAT', 'THIS', 'WAS', 'HIS', 'RECOMPENSE', 'FOR', 'A', 'SERVICE', 'WHICH', 'HE', 'FAUCHELEVENT', 'WAS', 'TO', 'RENDER', 'TO', 'THE', 'COMMUNITY'] +3764-168670-0014-1680: hyp=['THEN', 'HE', 'EXPLAINED', 'TO', 'JEAN', 'VALJEAN', 'THAT', 'THIS', 'WAS', 'HIS', 'RECOMPENSE', 'FOR', 'A', 'SERVICE', 'WHICH', 'HE', 'THRAUCHELEVENT', 'WAS', 'SURRENDER', 'TO', 'THE', 'COMMUNITY'] +3764-168670-0015-1681: ref=['THAT', 'THE', 'NUN', 'WHO', 'HAD', 'DIED', 'THAT', 'MORNING', 'HAD', 'REQUESTED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'COFFIN', 'WHICH', 'HAD', 'SERVED', 'HER', 'FOR', 'A', 'BED', 'AND', 'INTERRED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL'] +3764-168670-0015-1681: hyp=['THAT', 'THE', 'NUN', 'WHO', 'HAD', 'DIED', 'THAT', 'MORNING', 'HAD', 'REQUESTED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'COFFIN', 'WHICH', 'HAD', 'SERVED', 'HER', 'FOR', 'A', 'BED', 'AND', 'INTERRED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL'] +3764-168670-0016-1682: ref=['THAT', 'THE', 'PRIORESS', 'AND', 'THE', 'VOCAL', 'MOTHERS', 'INTENDED', 'TO', 'FULFIL', 'THE', 'WISH', 'OF', 'THE', 'DECEASED'] +3764-168670-0016-1682: hyp=['THAT', 'THE', 'PRIORS', 'AND', 'THE', 'VOCAL', 'MOTHERS', 'INTENDED', 'TO', 'FULFIL', 'THE', 'WISH', 'OF', 'THE', 'DECEASED'] +3764-168670-0017-1683: ref=['THAT', 'HE', 'FAUCHELEVENT', 'WAS', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN', 'IN', 'THE', 'CELL', 'RAISE', 'THE', 'STONE', 'IN', 'THE', 'CHAPEL', 'AND', 'LOWER', 'THE', 'CORPSE', 'INTO', 'THE', 'VAULT'] +3764-168670-0017-1683: hyp=['THAT', 'HE', 'FOR', 'SCHLEVENT', 'WAS', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN', 'IN', 'THE', 'CELL', 'RAISE', 'THE', 'STONE', 'IN', 'THE', 'CHAPEL', 'AND', 'BLOW', 'THE', 'CORPSE', 'INTO', 'THE', 'VAULT'] +3764-168670-0018-1684: ref=['AND', 'THEN', 'THAT', 'THERE', 'WAS', 'ANOTHER', 'THE', 'EMPTY', 'COFFIN'] +3764-168670-0018-1684: hyp=['AND', 'THEN', 'THAT', 'THERE', 'WAS', 'ANOTHER', 'THE', 'EMPTY', 'COFFIN'] +3764-168670-0019-1685: ref=['WHAT', 'IS', 'THAT', 'EMPTY', 'COFFIN'] +3764-168670-0019-1685: hyp=['WHAT', 'IS', 'THAT', 'EMPTY', 'COFFIN'] +3764-168670-0020-1686: ref=['ASKED', 'JEAN', 'VALJEAN', 'FAUCHELEVENT', 'REPLIED'] +3764-168670-0020-1686: hyp=['ASKED', 'JEAN', 'VALJEAN', 'FAUCHELEVENT', 'REPLIED'] +3764-168670-0021-1687: ref=['WHAT', 'COFFIN', 'WHAT', 'ADMINISTRATION'] +3764-168670-0021-1687: hyp=['WHAT', 'COFFIN', 'WHAT', 'ADMINISTRATION'] +3764-168670-0022-1688: ref=['FAUCHELEVENT', 'WHO', 'WAS', 'SEATED', 'SPRANG', 'UP', 'AS', 'THOUGH', 'A', 'BOMB', 'HAD', 'BURST', 'UNDER', 'HIS', 'CHAIR', 'YOU'] +3764-168670-0022-1688: hyp=['SO', 'SLAVENT', 'WHO', 'WAS', 'SEATED', 'SPRANG', 'UP', 'AS', 'THOUGH', 'A', 'BOMB', 'HAD', 'BURST', 'UNDER', 'HIS', 'CHAIR', 'YOU'] +3764-168670-0023-1689: ref=['YOU', 'KNOW', 'FAUCHELEVENT', 'WHAT', 'YOU', 'HAVE', 'SAID', 'MOTHER', 'CRUCIFIXION', 'IS', 'DEAD'] +3764-168670-0023-1689: hyp=['YOU', 'KNOW', 'FOURCHELEVENT', 'WHAT', 'YOU', 'HAVE', 'SAID', 'MOTHER', 'CRUCIFIXION', 'IS', 'DEAD'] +3764-168670-0024-1690: ref=['AND', 'I', 'ADD', 'AND', 'FATHER', 'MADELEINE', 'IS', 'BURIED', 'AH'] +3764-168670-0024-1690: hyp=['AND', 'I', 'ADD', 'AND', 'FATHER', 'MADELEIN', 'IS', 'BURIED', 'AH'] +3764-168670-0025-1691: ref=['YOU', 'ARE', 'NOT', 'LIKE', 'OTHER', 'MEN', 'FATHER', 'MADELEINE'] +3764-168670-0025-1691: hyp=['YOU', 'ARE', 'NOT', 'LIKE', 'OTHER', 'MEN', 'FATHER', 'MADELEINE'] +3764-168670-0026-1692: ref=['THIS', 'OFFERS', 'THE', 'MEANS', 'BUT', 'GIVE', 'ME', 'SOME', 'INFORMATION', 'IN', 'THE', 'FIRST', 'PLACE'] +3764-168670-0026-1692: hyp=['THIS', 'OFFERS', 'THE', 'MEANS', 'BUT', 'GIVE', 'ME', 'SOME', 'INFORMATION', 'IN', 'THE', 'FIRST', 'PLACE'] +3764-168670-0027-1693: ref=['HOW', 'LONG', 'IS', 'THE', 'COFFIN', 'SIX', 'FEET'] +3764-168670-0027-1693: hyp=['HOW', 'LONG', 'IS', 'THE', 'COFFIN', 'SIX', 'FEET'] +3764-168670-0028-1694: ref=['IT', 'IS', 'A', 'CHAMBER', 'ON', 'THE', 'GROUND', 'FLOOR', 'WHICH', 'HAS', 'A', 'GRATED', 'WINDOW', 'OPENING', 'ON', 'THE', 'GARDEN', 'WHICH', 'IS', 'CLOSED', 'ON', 'THE', 'OUTSIDE', 'BY', 'A', 'SHUTTER', 'AND', 'TWO', 'DOORS', 'ONE', 'LEADS', 'INTO', 'THE', 'CONVENT', 'THE', 'OTHER', 'INTO', 'THE', 'CHURCH', 'WHAT', 'CHURCH'] +3764-168670-0028-1694: hyp=['IT', 'IS', 'A', 'CHAMBER', 'ON', 'THE', 'GROUND', 'FLOOR', 'WHICH', 'HAS', 'A', 'GRATED', 'WINDOW', 'OPENING', 'ON', 'THE', 'GARDEN', 'WHICH', 'IS', 'CLOSED', 'ON', 'THE', 'OUTSIDE', 'BY', 'A', 'SHUTTER', 'AND', 'TWO', 'DOORS', 'ONE', 'LEADS', 'INTO', 'THE', 'CONVENT', 'THE', 'OTHER', 'INTO', 'THE', 'CHURCH', 'A', 'WATCH'] +3764-168670-0029-1695: ref=['THE', 'CHURCH', 'IN', 'THE', 'STREET', 'THE', 'CHURCH', 'WHICH', 'ANY', 'ONE', 'CAN', 'ENTER'] +3764-168670-0029-1695: hyp=['THE', 'CHURCH', 'IN', 'THE', 'STREET', 'AT', 'THE', 'CHURCH', 'WHICH', 'ANY', 'ONE', 'CAN', 'ENTER'] +3764-168670-0030-1696: ref=['HAVE', 'YOU', 'THE', 'KEYS', 'TO', 'THOSE', 'TWO', 'DOORS'] +3764-168670-0030-1696: hyp=['HAVE', 'YOU', 'THE', 'KEYS', 'TO', 'THOSE', 'TWO', 'DOORS'] +3764-168670-0031-1697: ref=['NO', 'I', 'HAVE', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CONVENT', 'THE', 'PORTER', 'HAS', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CHURCH'] +3764-168670-0031-1697: hyp=['NO', 'I', 'HAVE', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CONVENT', 'THE', 'PORTER', 'HAS', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CHURCH'] +3764-168670-0032-1698: ref=['ONLY', 'TO', 'ALLOW', 'THE', "UNDERTAKER'S", 'MEN', 'TO', 'ENTER', 'WHEN', 'THEY', 'COME', 'TO', 'GET', 'THE', 'COFFIN'] +3764-168670-0032-1698: hyp=['ONLY', 'TO', 'ALLOW', 'THE', "UNDERTAKER'S", 'MEN', 'TO', 'ENTER', 'WHEN', 'THEY', 'COME', 'TO', 'GET', 'THE', 'COFFIN'] +3764-168670-0033-1699: ref=['WHO', 'NAILS', 'UP', 'THE', 'COFFIN', 'I', 'DO'] +3764-168670-0033-1699: hyp=['WHO', 'NAILS', 'UP', 'THE', 'COFFIN', 'I', 'DO'] +3764-168670-0034-1700: ref=['WHO', 'SPREADS', 'THE', 'PALL', 'OVER', 'IT'] +3764-168670-0034-1700: hyp=['WHO', 'SPREADS', 'THE', 'POOL', 'OVER', 'IT'] +3764-168670-0035-1701: ref=['NOT', 'ANOTHER', 'MAN', 'EXCEPT', 'THE', 'POLICE', 'DOCTOR', 'CAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'THAT', 'IS', 'EVEN', 'WRITTEN', 'ON', 'THE', 'WALL'] +3764-168670-0035-1701: hyp=['NOT', 'ANOTHER', 'MAN', 'EXCEPT', 'THE', 'POLICE', 'DOCTOR', 'CAN', 'ENTER', 'THE', 'DEADROOM', 'THAT', 'IS', 'EVEN', 'WRITTEN', 'ON', 'THE', 'WALL'] +3764-168670-0036-1702: ref=['COULD', 'YOU', 'HIDE', 'ME', 'IN', 'THAT', 'ROOM', 'TO', 'NIGHT', 'WHEN', 'EVERY', 'ONE', 'IS', 'ASLEEP'] +3764-168670-0036-1702: hyp=['COULD', 'YOU', 'HIDE', 'ME', 'IN', 'THAT', 'ROOM', 'TO', 'NIGHT', 'WHEN', 'EVERY', 'ONE', 'IS', 'ASLEEP'] +3764-168670-0037-1703: ref=['ABOUT', 'THREE', "O'CLOCK", 'IN', 'THE', 'AFTERNOON'] +3764-168670-0037-1703: hyp=['ABOUT', 'THREE', "O'CLOCK", 'IN', 'THE', 'AFTERNOON'] +3764-168670-0038-1704: ref=['I', 'SHALL', 'BE', 'HUNGRY', 'I', 'WILL', 'BRING', 'YOU', 'SOMETHING'] +3764-168670-0038-1704: hyp=['I', 'SHALL', 'BE', 'HUNGRY', 'I', 'WILL', 'BRING', 'YOU', 'SOMETHING'] +3764-168670-0039-1705: ref=['YOU', 'CAN', 'COME', 'AND', 'NAIL', 'ME', 'UP', 'IN', 'THE', 'COFFIN', 'AT', 'TWO', "O'CLOCK"] +3764-168670-0039-1705: hyp=['YOU', 'CAN', 'COME', 'AND', 'NAIL', 'ME', 'UP', 'IN', 'THE', 'COFFIN', 'AT', 'TWO', "O'CLOCK"] +3764-168670-0040-1706: ref=['FAUCHELEVENT', 'RECOILED', 'AND', 'CRACKED', 'HIS', 'FINGER', 'JOINTS', 'BUT', 'THAT', 'IS', 'IMPOSSIBLE'] +3764-168670-0040-1706: hyp=['FUCHELEVENT', 'RECOILED', 'AND', 'CRACKED', 'HIS', 'FINGER', 'JOINTS', 'BUT', 'THAT', 'IS', 'IMPOSSIBLE'] +3764-168670-0041-1707: ref=['BAH', 'IMPOSSIBLE', 'TO', 'TAKE', 'A', 'HAMMER', 'AND', 'DRIVE', 'SOME', 'NAILS', 'IN', 'A', 'PLANK'] +3764-168670-0041-1707: hyp=['BAH', 'IMPOSSIBLE', 'TO', 'TAKE', 'A', 'HAMMER', 'AND', 'DRIVE', 'SOME', 'NAILS', 'IN', 'A', 'PLANK'] +3764-168670-0042-1708: ref=['JEAN', 'VALJEAN', 'HAD', 'BEEN', 'IN', 'WORSE', 'STRAITS', 'THAN', 'THIS'] +3764-168670-0042-1708: hyp=['JEAN', 'VALJEAN', 'HAD', 'BEEN', 'IN', 'WORSE', 'STRAITS', 'THAN', 'THIS'] +3764-168670-0043-1709: ref=['ANY', 'MAN', 'WHO', 'HAS', 'BEEN', 'A', 'PRISONER', 'UNDERSTANDS', 'HOW', 'TO', 'CONTRACT', 'HIMSELF', 'TO', 'FIT', 'THE', 'DIAMETER', 'OF', 'THE', 'ESCAPE'] +3764-168670-0043-1709: hyp=['ANY', 'MAN', 'WHO', 'HAS', 'BEEN', 'A', 'PRISONER', 'UNDERSTANDS', 'HOW', 'TO', 'CONTRACT', 'HIMSELF', 'TO', 'FIT', 'THE', 'DIAMETER', 'OF', 'THE', 'ESCAPE'] +3764-168670-0044-1710: ref=['WHAT', 'DOES', 'NOT', 'A', 'MAN', 'UNDERGO', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'CURE'] +3764-168670-0044-1710: hyp=['WHAT', 'DOES', 'NOT', 'A', 'MAN', 'UNDERGO', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'CURE'] +3764-168670-0045-1711: ref=['TO', 'HAVE', 'HIMSELF', 'NAILED', 'UP', 'IN', 'A', 'CASE', 'AND', 'CARRIED', 'OFF', 'LIKE', 'A', 'BALE', 'OF', 'GOODS', 'TO', 'LIVE', 'FOR', 'A', 'LONG', 'TIME', 'IN', 'A', 'BOX', 'TO', 'FIND', 'AIR', 'WHERE', 'THERE', 'IS', 'NONE', 'TO', 'ECONOMIZE', 'HIS', 'BREATH', 'FOR', 'HOURS', 'TO', 'KNOW', 'HOW', 'TO', 'STIFLE', 'WITHOUT', 'DYING', 'THIS', 'WAS', 'ONE', 'OF', 'JEAN', "VALJEAN'S", 'GLOOMY', 'TALENTS'] +3764-168670-0045-1711: hyp=['TO', 'HAVE', 'HIMSELF', 'NAILED', 'UP', 'IN', 'A', 'CASE', 'AND', 'CARRIED', 'OFF', 'LIKE', 'A', 'BAIL', 'OF', 'GOODS', 'TO', 'LIVE', 'FOR', 'A', 'LONG', 'TIME', 'IN', 'A', 'BOX', 'TO', 'FIND', 'AIR', 'WHERE', 'THERE', 'IS', 'NONE', 'TO', 'ECONOMIZE', 'HIS', 'BREATH', 'FOR', 'HOURS', 'TO', 'KNOW', 'HOW', 'TO', 'STIFLE', 'WITHOUT', 'DYING', 'THIS', 'WAS', 'ONE', 'OF', 'JEAN', "VALJEAN'S", 'GLOOMY', 'TALENTS'] +3764-168670-0046-1712: ref=['YOU', 'SURELY', 'MUST', 'HAVE', 'A', 'GIMLET', 'YOU', 'WILL', 'MAKE', 'A', 'FEW', 'HOLES', 'HERE', 'AND', 'THERE', 'AROUND', 'MY', 'MOUTH', 'AND', 'YOU', 'WILL', 'NAIL', 'THE', 'TOP', 'PLANK', 'ON', 'LOOSELY', 'GOOD', 'AND', 'WHAT', 'IF', 'YOU', 'SHOULD', 'HAPPEN', 'TO', 'COUGH', 'OR', 'TO', 'SNEEZE'] +3764-168670-0046-1712: hyp=['YOU', 'SURELY', 'MUST', 'HAVE', 'A', 'GIMLET', 'YOU', 'WILL', 'MAKE', 'A', 'FEW', 'HOLES', 'HERE', 'AND', 'THERE', 'AROUND', 'MY', 'MOUTH', 'AND', 'YOU', 'WILL', 'NAIL', 'THE', 'TOP', 'PLANCORN', 'LOOSELY', 'GOOD', 'AND', 'WHAT', 'IF', 'YOU', 'SHOULD', 'HAPPEN', 'TO', 'COUGH', 'OR', 'TO', 'SNEEZE'] +3764-168670-0047-1713: ref=['A', 'MAN', 'WHO', 'IS', 'MAKING', 'HIS', 'ESCAPE', 'DOES', 'NOT', 'COUGH', 'OR', 'SNEEZE'] +3764-168670-0047-1713: hyp=['A', 'MAN', 'WHO', 'IS', 'MAKING', 'HIS', 'ESCAPE', 'DOES', 'NOT', 'COUGH', 'OR', 'SNEEZE'] +3764-168670-0048-1714: ref=['WHO', 'IS', 'THERE', 'WHO', 'HAS', 'NOT', 'SAID', 'TO', 'A', 'CAT', 'DO', 'COME', 'IN'] +3764-168670-0048-1714: hyp=['WHO', 'IS', 'THERE', 'WHO', 'HAS', 'NOT', 'SAID', 'TO', 'A', 'CAT', 'DO', 'COME', 'IN'] +3764-168670-0049-1715: ref=['THE', 'OVER', 'PRUDENT', 'CATS', 'AS', 'THEY', 'ARE', 'AND', 'BECAUSE', 'THEY', 'ARE', 'CATS', 'SOMETIMES', 'INCUR', 'MORE', 'DANGER', 'THAN', 'THE', 'AUDACIOUS'] +3764-168670-0049-1715: hyp=['THE', 'OVER', 'PRUDENT', 'COUNTS', 'AS', 'THEY', 'ARE', 'AND', 'BECAUSE', 'THEY', 'ARE', 'CATS', 'SOMETIMES', 'INCUR', 'MORE', 'DANGER', 'THAN', 'THE', 'AUDACIOUS'] +3764-168670-0050-1716: ref=['BUT', 'JEAN', "VALJEAN'S", 'COOLNESS', 'PREVAILED', 'OVER', 'HIM', 'IN', 'SPITE', 'OF', 'HIMSELF', 'HE', 'GRUMBLED'] +3764-168670-0050-1716: hyp=['BUT', 'JEAN', "VALJEAN'S", 'COOLNESS', 'PREVAILED', 'OVER', 'HIM', 'IN', 'SPITE', 'OF', 'HIMSELF', 'HE', 'GRUMBLED'] +3764-168670-0051-1717: ref=['IF', 'YOU', 'ARE', 'SURE', 'OF', 'COMING', 'OUT', 'OF', 'THE', 'COFFIN', 'ALL', 'RIGHT', 'I', 'AM', 'SURE', 'OF', 'GETTING', 'YOU', 'OUT', 'OF', 'THE', 'GRAVE'] +3764-168670-0051-1717: hyp=['IF', 'YOU', 'ARE', 'SURE', 'OF', 'COMING', 'OUT', 'OF', 'THE', 'COFFIN', 'ALL', 'RIGHT', 'I', 'AM', 'SURE', 'OF', 'GETTING', 'OUT', 'OF', 'THE', 'GRAVE'] +3764-168670-0052-1718: ref=['AN', 'OLD', 'FELLOW', 'OF', 'THE', 'OLD', 'SCHOOL', 'THE', 'GRAVE', 'DIGGER', 'PUTS', 'THE', 'CORPSES', 'IN', 'THE', 'GRAVE', 'AND', 'I', 'PUT', 'THE', 'GRAVE', 'DIGGER', 'IN', 'MY', 'POCKET'] +3764-168670-0052-1718: hyp=['AN', 'OLD', 'FELLOW', 'OF', 'THE', 'OLD', 'SCHOOL', 'THE', 'GRAVE', 'DIGGER', 'PUTS', 'THE', 'CORPSES', 'IN', 'THE', 'GRAVE', 'AND', 'I', 'PUT', 'THE', 'GRAVE', 'DIGGER', 'IN', 'MY', 'POCKET'] +3764-168670-0053-1719: ref=['I', 'SHALL', 'FOLLOW', 'THAT', 'IS', 'MY', 'BUSINESS'] +3764-168670-0053-1719: hyp=['I', 'SHALL', 'FOLLOW', 'THAT', 'IS', 'MY', 'BUSINESS'] +3764-168670-0054-1720: ref=['THE', 'HEARSE', 'HALTS', 'THE', "UNDERTAKER'S", 'MEN', 'KNOT', 'A', 'ROPE', 'AROUND', 'YOUR', 'COFFIN', 'AND', 'LOWER', 'YOU', 'DOWN'] +3764-168670-0054-1720: hyp=['THE', 'HOUSE', 'HALTS', 'THE', 'UNDERTAKERS', 'MEN', 'NOT', 'A', 'ROPE', 'AROUND', 'YOUR', 'COFFIN', 'AND', 'LOWER', 'YOU', 'DOWN'] +3764-168670-0055-1721: ref=['THE', 'PRIEST', 'SAYS', 'THE', 'PRAYERS', 'MAKES', 'THE', 'SIGN', 'OF', 'THE', 'CROSS', 'SPRINKLES', 'THE', 'HOLY', 'WATER', 'AND', 'TAKES', 'HIS', 'DEPARTURE'] +3764-168670-0055-1721: hyp=['THE', 'PRIESTS', 'AS', 'THE', 'PRAYERS', 'MAKES', 'THE', 'SIGN', 'OF', 'THE', 'CROSS', 'SPRINKLES', 'THE', 'HOLY', 'WATER', 'AND', 'TAKES', 'HIS', 'DEPARTURE'] +3764-168670-0056-1722: ref=['ONE', 'OF', 'TWO', 'THINGS', 'WILL', 'HAPPEN', 'HE', 'WILL', 'EITHER', 'BE', 'SOBER', 'OR', 'HE', 'WILL', 'NOT', 'BE', 'SOBER'] +3764-168670-0056-1722: hyp=['ONE', 'OF', 'TWO', 'THINGS', 'WILL', 'HAPPEN', 'HE', 'WILL', 'EITHER', 'BE', 'SOBER', 'OR', 'HE', 'WILL', 'NOT', 'BE', 'SOBER'] +3764-168670-0057-1723: ref=['THAT', 'IS', 'SETTLED', 'FATHER', 'FAUCHELEVENT', 'ALL', 'WILL', 'GO', 'WELL'] +3764-168670-0057-1723: hyp=['THAT', 'IS', 'SETTLED', 'FATHER', 'FAUCHELEVENT', 'ALL', 'WILL', 'GO', 'WELL'] +3764-168671-0000-1724: ref=['ON', 'THE', 'FOLLOWING', 'DAY', 'AS', 'THE', 'SUN', 'WAS', 'DECLINING', 'THE', 'VERY', 'RARE', 'PASSERS', 'BY', 'ON', 'THE', 'BOULEVARD', 'DU', 'MAINE', 'PULLED', 'OFF', 'THEIR', 'HATS', 'TO', 'AN', 'OLD', 'FASHIONED', 'HEARSE', 'ORNAMENTED', 'WITH', 'SKULLS', 'CROSS', 'BONES', 'AND', 'TEARS'] +3764-168671-0000-1724: hyp=['ON', 'THE', 'FOLLOWING', 'DAY', 'AS', 'THE', 'SUN', 'WAS', 'DECLINING', 'THE', 'VERY', 'RARE', 'PASSES', 'BY', 'ON', 'THE', 'BOULEVARD', 'DUMEN', 'PULLED', 'OFF', 'THEIR', 'HATS', 'TO', 'AN', 'OLD', 'FASHIONED', 'HEARSE', 'ORNAMENTED', 'WITH', 'SKULLS', 'CROSS', 'BONES', 'AND', 'TEARS'] +3764-168671-0001-1725: ref=['THIS', 'HEARSE', 'CONTAINED', 'A', 'COFFIN', 'COVERED', 'WITH', 'A', 'WHITE', 'CLOTH', 'OVER', 'WHICH', 'SPREAD', 'A', 'LARGE', 'BLACK', 'CROSS', 'LIKE', 'A', 'HUGE', 'CORPSE', 'WITH', 'DROOPING', 'ARMS'] +3764-168671-0001-1725: hyp=['THIS', 'HEARSE', 'CONTAINED', 'A', 'COFFIN', 'COVERED', 'WITH', 'A', 'WHITE', 'CLOTH', 'OVER', 'WHICH', 'SPREAD', 'A', 'LARGE', 'BLACK', 'CROSS', 'LIKE', 'A', 'HUGE', 'CORPSE', 'WITH', 'DROOPING', 'ARMS'] +3764-168671-0002-1726: ref=['A', 'MOURNING', 'COACH', 'IN', 'WHICH', 'COULD', 'BE', 'SEEN', 'A', 'PRIEST', 'IN', 'HIS', 'SURPLICE', 'AND', 'A', 'CHOIR', 'BOY', 'IN', 'HIS', 'RED', 'CAP', 'FOLLOWED'] +3764-168671-0002-1726: hyp=['THE', 'MORNING', 'COACH', 'IN', 'WHICH', 'COULD', 'BE', 'SEEN', 'A', 'PRIEST', 'IN', 'HIS', 'SURPLICE', 'AND', 'A', 'CHOIR', 'BOY', 'IN', 'HIS', 'RED', 'CAP', 'FOLLOWED'] +3764-168671-0003-1727: ref=['BEHIND', 'IT', 'CAME', 'AN', 'OLD', 'MAN', 'IN', 'THE', 'GARMENTS', 'OF', 'A', 'LABORER', 'WHO', 'LIMPED', 'ALONG'] +3764-168671-0003-1727: hyp=['BEHIND', 'IT', 'CAME', 'AN', 'OLD', 'MAN', 'IN', 'THE', 'GARMENTS', 'OF', 'A', 'LABORER', 'WHO', 'LIMPED', 'ALONG'] +3764-168671-0004-1728: ref=['THE', 'GRAVE', 'DIGGERS', 'BEING', 'THUS', 'BOUND', 'TO', 'SERVICE', 'IN', 'THE', 'EVENING', 'IN', 'SUMMER', 'AND', 'AT', 'NIGHT', 'IN', 'WINTER', 'IN', 'THIS', 'CEMETERY', 'THEY', 'WERE', 'SUBJECTED', 'TO', 'A', 'SPECIAL', 'DISCIPLINE'] +3764-168671-0004-1728: hyp=['THE', 'GRAVE', 'DIGGERS', 'BEING', 'THUS', 'BOUND', 'TO', 'SERVICE', 'IN', 'THE', 'EVENING', 'IN', 'SUMMER', 'AND', 'AT', 'NIGHT', 'IN', 'WINTER', 'IN', 'THIS', 'CEMETERY', 'THEY', 'WERE', 'SUBJECTED', 'TO', 'A', 'SPECIAL', 'DISCIPLINE'] +3764-168671-0005-1729: ref=['THESE', 'GATES', 'THEREFORE', 'SWUNG', 'INEXORABLY', 'ON', 'THEIR', 'HINGES', 'AT', 'THE', 'INSTANT', 'WHEN', 'THE', 'SUN', 'DISAPPEARED', 'BEHIND', 'THE', 'DOME', 'OF', 'THE', 'INVALIDES'] +3764-168671-0005-1729: hyp=['THESE', 'GATES', 'THEREFORE', 'SWUNG', 'INEXORABLY', 'ON', 'THEIR', 'HINGES', 'AT', 'THE', 'INSTANT', 'WHEN', 'THE', 'SUN', 'DISAPPEARED', 'BEHIND', 'THE', 'DOME', 'OF', 'THE', 'INVALID'] +3764-168671-0006-1730: ref=['DAMPNESS', 'WAS', 'INVADING', 'IT', 'THE', 'FLOWERS', 'WERE', 'DESERTING', 'IT'] +3764-168671-0006-1730: hyp=['DAMPNESS', 'WAS', 'INVADING', 'IT', 'THE', 'FLOWERS', 'WERE', 'DESERTING', 'IT'] +3764-168671-0007-1731: ref=['THE', 'BOURGEOIS', 'DID', 'NOT', 'CARE', 'MUCH', 'ABOUT', 'BEING', 'BURIED', 'IN', 'THE', 'VAUGIRARD', 'IT', 'HINTED', 'AT', 'POVERTY', 'PERE', 'LACHAISE', 'IF', 'YOU', 'PLEASE'] +3764-168671-0007-1731: hyp=['THE', 'BOURGEOIS', 'DID', 'NOT', 'CARE', 'MUCH', 'ABOUT', 'BEING', 'BURIED', 'IN', 'THE', 'VIGOR', 'IT', 'HINTED', 'AT', 'POVERTY', 'BAT', 'LACHES', 'IF', 'YOU', 'PLEASE'] +3764-168671-0008-1732: ref=['TO', 'BE', 'BURIED', 'IN', 'PERE', 'LACHAISE', 'IS', 'EQUIVALENT', 'TO', 'HAVING', 'FURNITURE', 'OF', 'MAHOGANY', 'IT', 'IS', 'RECOGNIZED', 'AS', 'ELEGANT'] +3764-168671-0008-1732: hyp=['TO', 'BE', 'BURIED', 'IN', 'PETERS', 'IS', 'EQUIVALENT', 'TO', 'HAVING', 'FURNITURE', 'OF', 'MAHOGANY', 'IT', 'IS', 'RECOGNIZED', 'AS', 'ELEGANT'] +3764-168671-0009-1733: ref=['THE', 'INTERMENT', 'OF', 'MOTHER', 'CRUCIFIXION', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'THE', 'EXIT', 'OF', 'COSETTE', 'THE', 'INTRODUCTION', 'OF', 'JEAN', 'VALJEAN', 'TO', 'THE', 'DEAD', 'ROOM', 'ALL', 'HAD', 'BEEN', 'EXECUTED', 'WITHOUT', 'DIFFICULTY', 'AND', 'THERE', 'HAD', 'BEEN', 'NO', 'HITCH', 'LET', 'US', 'REMARK', 'IN', 'PASSING', 'THAT', 'THE', 'BURIAL', 'OF', 'MOTHER', 'CRUCIFIXION', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CONVENT', 'IS', 'A', 'PERFECTLY', 'VENIAL', 'OFFENCE', 'IN', 'OUR', 'SIGHT'] +3764-168671-0009-1733: hyp=['THE', 'INTERMENT', 'OF', 'MOTHER', 'CRUCIFIXION', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'THE', 'EXIT', 'OF', 'COSETTE', 'THE', 'INTRODUCTION', 'OF', 'JEAN', 'VALJEAN', 'INTO', 'THE', 'DEAD', 'ROOM', 'ALL', 'HAD', 'BEEN', 'EXECUTED', 'WITHOUT', 'DIFFICULTY', 'AND', 'THERE', 'HAD', 'BEEN', 'NO', 'HITCH', 'LET', 'US', 'REMARK', 'IN', 'PASSING', 'THAT', 'THE', 'BURIAL', 'OF', 'MOTHER', 'CRUCIFIXION', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CONVENT', 'IS', 'A', 'PERFECTLY', 'VENIAL', 'OFFENCE', 'IN', 'OUR', 'SIGHT'] +3764-168671-0010-1734: ref=['IT', 'IS', 'ONE', 'OF', 'THE', 'FAULTS', 'WHICH', 'RESEMBLE', 'A', 'DUTY'] +3764-168671-0010-1734: hyp=['IT', 'IS', 'ONE', 'OF', 'THE', 'FAULTS', 'WHICH', 'RESEMBLE', 'A', 'DUTY'] +3764-168671-0011-1735: ref=['THE', 'NUNS', 'HAD', 'COMMITTED', 'IT', 'NOT', 'ONLY', 'WITHOUT', 'DIFFICULTY', 'BUT', 'EVEN', 'WITH', 'THE', 'APPLAUSE', 'OF', 'THEIR', 'OWN', 'CONSCIENCES'] +3764-168671-0011-1735: hyp=['THE', 'NUNS', 'HAD', 'COMMITTED', 'IT', 'NOT', 'ONLY', 'WITHOUT', 'DIFFICULTY', 'BUT', 'EVEN', 'WITH', 'THE', 'APPLAUSE', 'OF', 'THEIR', 'OWN', 'CONSCIENCES'] +3764-168671-0012-1736: ref=['IN', 'THE', 'CLOISTER', 'WHAT', 'IS', 'CALLED', 'THE', 'GOVERNMENT', 'IS', 'ONLY', 'AN', 'INTERMEDDLING', 'WITH', 'AUTHORITY', 'AN', 'INTERFERENCE', 'WHICH', 'IS', 'ALWAYS', 'QUESTIONABLE'] +3764-168671-0012-1736: hyp=['IN', 'THE', 'CLOISTER', 'WHAT', 'IS', 'CALLED', 'THE', 'GOVERNMENT', 'IS', 'ONLY', 'AN', 'INTERMEDDLING', 'WITH', 'AUTHORITY', 'AN', 'INTERFERENCE', 'WHICH', 'IS', 'ALWAYS', 'QUESTIONABLE'] +3764-168671-0013-1737: ref=['MAKE', 'AS', 'MANY', 'LAWS', 'AS', 'YOU', 'PLEASE', 'MEN', 'BUT', 'KEEP', 'THEM', 'FOR', 'YOURSELVES'] +3764-168671-0013-1737: hyp=['MAKE', 'AS', 'MANY', 'LAWS', 'AS', 'YOU', 'PLEASE', 'MEN', 'BUT', 'KEEP', 'THEM', 'FOR', 'YOURSELVES'] +3764-168671-0014-1738: ref=['A', 'PRINCE', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'PRINCIPLE'] +3764-168671-0014-1738: hyp=['A', 'PRINCE', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'PRINCIPLE'] +3764-168671-0015-1739: ref=['FAUCHELEVENT', 'LIMPED', 'ALONG', 'BEHIND', 'THE', 'HEARSE', 'IN', 'A', 'VERY', 'CONTENTED', 'FRAME', 'OF', 'MIND'] +3764-168671-0015-1739: hyp=['FLUCHELEVENT', 'LIMPED', 'ALONG', 'BEHIND', 'THE', 'HEARSE', 'IN', 'A', 'VERY', 'CONTENTED', 'FRAME', 'OF', 'MIND'] +3764-168671-0016-1740: ref=['JEAN', "VALJEAN'S", 'COMPOSURE', 'WAS', 'ONE', 'OF', 'THOSE', 'POWERFUL', 'TRANQUILLITIES', 'WHICH', 'ARE', 'CONTAGIOUS'] +3764-168671-0016-1740: hyp=['JEAN', "VALJEAN'S", 'COMPOSURE', 'WAS', 'ONE', 'OF', 'THOSE', 'POWERFUL', 'TRANQUILLITIES', 'WHICH', 'ARE', 'CONTAGIOUS'] +3764-168671-0017-1741: ref=['WHAT', 'REMAINED', 'TO', 'BE', 'DONE', 'WAS', 'A', 'MERE', 'NOTHING'] +3764-168671-0017-1741: hyp=['WHAT', 'REMAINED', 'TO', 'BE', 'DONE', 'WAS', 'A', 'MERE', 'NOTHING'] +3764-168671-0018-1742: ref=['HE', 'PLAYED', 'WITH', 'FATHER', 'MESTIENNE'] +3764-168671-0018-1742: hyp=['HE', 'PLAYED', 'WITH', 'FATHER', 'MESTINE'] +3764-168671-0019-1743: ref=['HE', 'DID', 'WHAT', 'HE', 'LIKED', 'WITH', 'HIM', 'HE', 'MADE', 'HIM', 'DANCE', 'ACCORDING', 'TO', 'HIS', 'WHIM'] +3764-168671-0019-1743: hyp=['HE', 'DID', 'WHAT', 'HE', 'LIKED', 'WITH', 'HIM', 'HE', 'MADE', 'HIM', 'DANCE', 'ACCORDING', 'TO', 'HIS', 'WHIM'] +3764-168671-0020-1744: ref=['THE', 'PERMISSION', 'FOR', 'INTERMENT', 'MUST', 'BE', 'EXHIBITED'] +3764-168671-0020-1744: hyp=['THE', 'PERMISSION', 'FOR', 'INTERMENT', 'MUST', 'BE', 'EXHIBITED'] +3764-168671-0021-1745: ref=['HE', 'WAS', 'A', 'SORT', 'OF', 'LABORING', 'MAN', 'WHO', 'WORE', 'A', 'WAISTCOAT', 'WITH', 'LARGE', 'POCKETS', 'AND', 'CARRIED', 'A', 'MATTOCK', 'UNDER', 'HIS', 'ARM'] +3764-168671-0021-1745: hyp=['HE', 'WAS', 'A', 'SORT', 'OF', 'LABORING', 'MAN', 'WHO', 'WORE', 'A', 'WAISTCOAT', 'WITH', 'LARGE', 'POCKETS', 'AND', 'CARRIED', 'A', 'MATTOCK', 'UNDER', 'HIS', 'ARM'] +3764-168671-0022-1746: ref=['THE', 'MAN', 'REPLIED', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0022-1746: hyp=['THE', 'MAN', 'REPLIED', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0023-1747: ref=['THE', 'GRAVE', 'DIGGER', 'YES'] +3764-168671-0023-1747: hyp=['THE', 'GRAVE', 'DIGGER', 'YES'] +3764-168671-0024-1748: ref=['YOU', 'I'] +3764-168671-0024-1748: hyp=['YOU', 'I'] +3764-168671-0025-1749: ref=['FATHER', 'MESTIENNE', 'IS', 'THE', 'GRAVE', 'DIGGER', 'HE', 'WAS'] +3764-168671-0025-1749: hyp=['FATHER', 'MISS', 'CHANN', 'IS', 'THE', 'GRAVE', 'DIGGER', 'HE', 'WAS'] +3764-168671-0026-1750: ref=['FAUCHELEVENT', 'HAD', 'EXPECTED', 'ANYTHING', 'BUT', 'THIS', 'THAT', 'A', 'GRAVE', 'DIGGER', 'COULD', 'DIE'] +3764-168671-0026-1750: hyp=['FUSSION', 'OF', 'WHAT', 'HAD', 'EXPECTED', 'ANYTHING', 'BUT', 'THIS', 'THAT', 'A', 'GRAVE', 'DIGGER', 'COULD', 'DIE'] +3764-168671-0027-1751: ref=['IT', 'IS', 'TRUE', 'NEVERTHELESS', 'THAT', 'GRAVE', 'DIGGERS', 'DO', 'DIE', 'THEMSELVES'] +3764-168671-0027-1751: hyp=['IT', 'IS', 'TRUE', 'NEVERTHELESS', 'THAT', 'GRAVE', 'DIGGERS', 'DO', 'DIE', 'THEMSELVES'] +3764-168671-0028-1752: ref=['HE', 'HAD', 'HARDLY', 'THE', 'STRENGTH', 'TO', 'STAMMER'] +3764-168671-0028-1752: hyp=['HE', 'HAD', 'HARDLY', 'THE', 'STRENGTH', 'TO', 'STAMMER'] +3764-168671-0029-1753: ref=['BUT', 'HE', 'PERSISTED', 'FEEBLY', 'FATHER', 'MESTIENNE', 'IS', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0029-1753: hyp=['BUT', 'HE', 'PERSISTED', 'FEEBLY', 'FATHER', 'MISSED', 'HERE', 'IS', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0030-1754: ref=['DO', 'YOU', 'KNOW', 'WHO', 'LITTLE', 'FATHER', 'LENOIR', 'IS', 'HE', 'IS', 'A', 'JUG', 'OF', 'RED', 'WINE'] +3764-168671-0030-1754: hyp=['DO', 'YOU', 'KNOW', 'WHO', 'LITTLE', 'FATHERLAND', 'WARRITZ', 'HE', 'IS', 'A', 'JUG', 'OF', 'RED', 'WINE'] +3764-168671-0031-1755: ref=['BUT', 'YOU', 'ARE', 'A', 'JOLLY', 'FELLOW', 'TOO'] +3764-168671-0031-1755: hyp=['BUT', "YOU'RE", 'A', 'JOLLY', 'FELLOW', 'TOO'] +3764-168671-0032-1756: ref=['ARE', 'YOU', 'NOT', 'COMRADE', "WE'LL", 'GO', 'AND', 'HAVE', 'A', 'DRINK', 'TOGETHER', 'PRESENTLY'] +3764-168671-0032-1756: hyp=['ARE', 'YOU', 'NOT', 'COMRADE', "WE'LL", 'GO', 'AND', 'HAVE', 'A', 'DRINK', 'TOGETHER', 'PRESENTLY'] +3764-168671-0033-1757: ref=['THE', 'MAN', 'REPLIED'] +3764-168671-0033-1757: hyp=['THE', 'MAN', 'REPLIED'] +3764-168671-0034-1758: ref=['HE', 'LIMPED', 'MORE', 'OUT', 'OF', 'ANXIETY', 'THAN', 'FROM', 'INFIRMITY'] +3764-168671-0034-1758: hyp=['HE', 'LIMPED', 'MORE', 'OUT', 'OF', 'ANXIETY', 'THAN', 'FROM', 'INFIRMITY'] +3764-168671-0035-1759: ref=['THE', 'GRAVE', 'DIGGER', 'WALKED', 'ON', 'IN', 'FRONT', 'OF', 'HIM'] +3764-168671-0035-1759: hyp=['THE', 'GRAVE', 'DIGGER', 'WALKED', 'ON', 'IN', 'FRONT', 'OF', 'HIM'] +3764-168671-0036-1760: ref=['FAUCHELEVENT', 'PASSED', 'THE', 'UNEXPECTED', 'GRIBIER', 'ONCE', 'MORE', 'IN', 'REVIEW'] +3764-168671-0036-1760: hyp=['FAUCHELEVENT', 'PASSED', 'THE', 'UNEXPECTED', 'CLAVIER', 'ONCE', 'MORE', 'IN', 'REVIEW'] +3764-168671-0037-1761: ref=['FAUCHELEVENT', 'WHO', 'WAS', 'ILLITERATE', 'BUT', 'VERY', 'SHARP', 'UNDERSTOOD', 'THAT', 'HE', 'HAD', 'TO', 'DEAL', 'WITH', 'A', 'FORMIDABLE', 'SPECIES', 'OF', 'MAN', 'WITH', 'A', 'FINE', 'TALKER', 'HE', 'MUTTERED'] +3764-168671-0037-1761: hyp=['FASHIONEVENT', 'WHO', 'WAS', 'ILLITERATE', 'BUT', 'VERY', 'SHARP', 'UNDERSTOOD', 'THAT', 'HE', 'HAD', 'TO', 'DEAL', 'WITH', 'A', 'FORMIDABLE', 'SPECIES', 'OF', 'MAN', 'WITH', 'A', 'FINE', 'TALKER', 'HE', 'MUTTERED'] +3764-168671-0038-1762: ref=['SO', 'FATHER', 'MESTIENNE', 'IS', 'DEAD'] +3764-168671-0038-1762: hyp=['MISS', 'OH', 'FATHER', 'MESS', 'TIEN', 'IS', 'DEAD'] +3764-168671-0039-1763: ref=['THE', 'MAN', 'REPLIED', 'COMPLETELY'] +3764-168671-0039-1763: hyp=['THE', 'MAN', 'REPLIED', 'COMPLETELY'] +3764-168671-0040-1764: ref=['THE', 'GOOD', 'GOD', 'CONSULTED', 'HIS', 'NOTE', 'BOOK', 'WHICH', 'SHOWS', 'WHEN', 'THE', 'TIME', 'IS', 'UP', 'IT', 'WAS', 'FATHER', "MESTIENNE'S", 'TURN', 'FATHER', 'MESTIENNE', 'DIED'] +3764-168671-0040-1764: hyp=['THE', 'GOOD', 'GOD', 'CONSULTED', 'HIS', 'NOTEBOOK', 'WHICH', 'SHARES', 'WHEN', 'THE', 'TIME', 'IS', 'UP', 'IT', 'WAS', 'FATHER', "MESTINE'S", 'TURN', 'FATHER', 'MESS', 'HE', 'HAD', 'DIED'] +3764-168671-0041-1765: ref=['STAMMERED', 'FAUCHELEVENT', 'IT', 'IS', 'MADE'] +3764-168671-0041-1765: hyp=['STAMMERED', 'FAUCHELEVENT', 'IT', 'IS', 'MADE'] +3764-168671-0042-1766: ref=['YOU', 'ARE', 'A', 'PEASANT', 'I', 'AM', 'A', 'PARISIAN'] +3764-168671-0042-1766: hyp=['YOU', 'ARE', 'A', 'PEASANT', 'I', 'AM', 'A', 'PARISIAN'] +3764-168671-0043-1767: ref=['FAUCHELEVENT', 'THOUGHT', 'I', 'AM', 'LOST'] +3764-168671-0043-1767: hyp=['FRESHEN', 'THOUGHT', 'I', 'AM', 'LOST'] +3764-168671-0044-1768: ref=['THEY', 'WERE', 'ONLY', 'A', 'FEW', 'TURNS', 'OF', 'THE', 'WHEEL', 'DISTANT', 'FROM', 'THE', 'SMALL', 'ALLEY', 'LEADING', 'TO', 'THE', 'NUNS', 'CORNER'] +3764-168671-0044-1768: hyp=['THEY', 'WERE', 'ONLY', 'A', 'FEW', 'TURNS', 'OF', 'THE', 'WHEEL', 'DISTANT', 'FROM', 'THE', 'SMALL', 'ALLEY', 'LEADING', 'TO', 'THE', "NUN'S", 'CORNER'] +3764-168671-0045-1769: ref=['AND', 'HE', 'ADDED', 'WITH', 'THE', 'SATISFACTION', 'OF', 'A', 'SERIOUS', 'MAN', 'WHO', 'IS', 'TURNING', 'A', 'PHRASE', 'WELL'] +3764-168671-0045-1769: hyp=['AND', 'HE', 'ADDED', 'WITH', 'THE', 'SATISFACTION', 'OF', 'A', 'SERIOUS', 'MAN', 'WHO', 'IS', 'TURNING', 'A', 'PHRASE', 'WELL'] +3764-168671-0046-1770: ref=['FORTUNATELY', 'THE', 'SOIL', 'WHICH', 'WAS', 'LIGHT', 'AND', 'WET', 'WITH', 'THE', 'WINTER', 'RAINS', 'CLOGGED', 'THE', 'WHEELS', 'AND', 'RETARDED', 'ITS', 'SPEED'] +3764-168671-0046-1770: hyp=['FORTUNATELY', 'THE', 'SOIL', 'WHICH', 'WAS', 'LIGHT', 'AND', 'WET', 'WITH', 'THE', 'WINTER', 'RAINS', 'CLOGGED', 'THE', 'WHEELS', 'AND', 'RETARDED', 'ITS', 'SPEED'] +3764-168671-0047-1771: ref=['MY', 'FATHER', 'WAS', 'A', 'PORTER', 'AT', 'THE', 'PRYTANEUM', 'TOWN', 'HALL'] +3764-168671-0047-1771: hyp=['MY', 'FATHER', 'WAS', 'A', 'PORTER', 'AT', 'THE', 'BRITTANNIUM', 'TOWN', 'HALL'] +3764-168671-0048-1772: ref=['BUT', 'HE', 'HAD', 'REVERSES', 'HE', 'HAD', 'LOSSES', 'ON', 'CHANGE', 'I', 'WAS', 'OBLIGED', 'TO', 'RENOUNCE', 'THE', 'PROFESSION', 'OF', 'AUTHOR', 'BUT', 'I', 'AM', 'STILL', 'A', 'PUBLIC', 'WRITER'] +3764-168671-0048-1772: hyp=['BUT', 'HE', 'HAD', 'REVERSES', 'HE', 'HAD', 'LOSSES', 'UNCHANGED', 'I', 'WAS', 'OBLIGED', 'TO', 'RENOUNCE', 'THE', 'PROFESSION', 'OF', 'AUTHOR', 'BUT', 'I', 'AM', 'STILL', 'A', 'PUBLIC', 'WRITER'] +3764-168671-0049-1773: ref=['SO', 'YOU', 'ARE', 'NOT', 'A', 'GRAVE', 'DIGGER', 'THEN'] +3764-168671-0049-1773: hyp=['SIR', 'YOU', 'ARE', 'NOT', 'A', 'GRAVE', 'DIGGER', 'THEN'] +3764-168671-0050-1774: ref=['RETURNED', 'FAUCHELEVENT', 'CLUTCHING', 'AT', 'THIS', 'BRANCH', 'FEEBLE', 'AS', 'IT', 'WAS'] +3764-168671-0050-1774: hyp=['RETURNED', 'FAUCHELEVENT', 'CLUTCHING', 'AT', 'THIS', 'BRANCH', 'FEEBLE', 'AS', 'IT', 'WAS'] +3764-168671-0051-1775: ref=['HERE', 'A', 'REMARK', 'BECOMES', 'NECESSARY'] +3764-168671-0051-1775: hyp=['HERE', 'A', 'REMARK', 'BECOMES', 'NECESSARY'] +3764-168671-0052-1776: ref=['FAUCHELEVENT', 'WHATEVER', 'HIS', 'ANGUISH', 'OFFERED', 'A', 'DRINK', 'BUT', 'HE', 'DID', 'NOT', 'EXPLAIN', 'HIMSELF', 'ON', 'ONE', 'POINT', 'WHO', 'WAS', 'TO', 'PAY'] +3764-168671-0052-1776: hyp=['A', 'FISHE', 'WHATEVER', 'HIS', 'ANGUISH', 'OFFERED', 'A', 'DRINK', 'BUT', 'HE', 'DID', 'NOT', 'EXPLAIN', 'HIMSELF', 'ON', 'ONE', 'POINT', 'WHO', 'WAS', 'TO', 'PAY'] +3764-168671-0053-1777: ref=['THE', 'GRAVE', 'DIGGER', 'WENT', 'ON', 'WITH', 'A', 'SUPERIOR', 'SMILE'] +3764-168671-0053-1777: hyp=['THE', 'GRAVE', 'DIGGER', 'WENT', 'ON', 'WITH', 'THE', 'SUPERIOR', 'SMILE'] +3764-168671-0054-1778: ref=['ONE', 'MUST', 'EAT'] +3764-168671-0054-1778: hyp=['ONE', 'MUST', 'EAT'] +3997-180294-0000-1800: ref=['THE', 'DUKE', 'COMES', 'EVERY', 'MORNING', 'THEY', 'WILL', 'TELL', 'HIM', 'WHEN', 'HE', 'COMES', 'THAT', 'I', 'AM', 'ASLEEP', 'AND', 'PERHAPS', 'HE', 'WILL', 'WAIT', 'UNTIL', 'I', 'WAKE'] +3997-180294-0000-1800: hyp=['THE', 'DUKE', 'COMES', 'EVERY', 'MORNING', 'THEY', 'WILL', 'TELL', 'HIM', 'WHEN', 'HE', 'COMES', 'THAT', 'I', 'AM', 'ASLEEP', 'AND', 'PERHAPS', 'HE', 'WILL', 'WAIT', 'UNTIL', 'I', 'AWAKE'] +3997-180294-0001-1801: ref=['YES', 'BUT', 'IF', 'I', 'SHOULD', 'ALREADY', 'ASK', 'FOR', 'SOMETHING', 'WHAT'] +3997-180294-0001-1801: hyp=['YES', 'BUT', 'IF', 'I', 'SHOULD', 'ALREADY', 'ASK', 'FOR', 'SOMETHING', 'WHAT'] +3997-180294-0002-1802: ref=['WELL', 'DO', 'IT', 'FOR', 'ME', 'FOR', 'I', 'SWEAR', 'TO', 'YOU', 'THAT', 'I', "DON'T", 'LOVE', 'YOU', 'AS', 'THE', 'OTHERS', 'HAVE', 'LOVED', 'YOU'] +3997-180294-0002-1802: hyp=['WELL', 'DO', 'IT', 'FOR', 'ME', 'FOR', 'I', 'SWEAR', 'TO', 'YOU', 'THY', "DON'T", 'LOVE', 'YOU', 'AS', 'THE', 'OTHERS', 'HAVE', 'LOVED', 'YOU'] +3997-180294-0003-1803: ref=['THERE', 'ARE', 'BOLTS', 'ON', 'THE', 'DOOR', 'WRETCH'] +3997-180294-0003-1803: hyp=['THERE', 'ARE', 'BOLTS', 'IN', 'THE', 'DOOR', 'WRETCH'] +3997-180294-0004-1804: ref=['I', "DON'T", 'KNOW', 'HOW', 'IT', 'IS', 'BUT', 'IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'I', 'DO'] +3997-180294-0004-1804: hyp=['I', "DON'T", 'KNOW', 'HOW', 'IT', 'IS', 'BUT', 'IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'I', 'DO'] +3997-180294-0005-1805: ref=['NOW', 'GO', 'I', "CAN'T", 'KEEP', 'MY', 'EYES', 'OPEN'] +3997-180294-0005-1805: hyp=['THOU', 'GO', 'I', "CAN'T", 'KEEP', 'MY', 'EYES', 'OPEN'] +3997-180294-0006-1806: ref=['IT', 'SEEMED', 'TO', 'ME', 'AS', 'IF', 'THIS', 'SLEEPING', 'CITY', 'BELONGED', 'TO', 'ME', 'I', 'SEARCHED', 'MY', 'MEMORY', 'FOR', 'THE', 'NAMES', 'OF', 'THOSE', 'WHOSE', 'HAPPINESS', 'I', 'HAD', 'ONCE', 'ENVIED', 'AND', 'I', 'COULD', 'NOT', 'RECALL', 'ONE', 'WITHOUT', 'FINDING', 'MYSELF', 'THE', 'HAPPIER'] +3997-180294-0006-1806: hyp=['IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'THIS', 'SLEEPING', 'CITY', 'BELONGS', 'TO', 'ME', 'I', 'SEARCHED', 'MY', 'MEMORY', 'FOR', 'THE', 'NAMES', 'OF', 'THOSE', 'WHOSE', 'HAPPINESS', 'I', 'HAD', 'ONCE', 'ENVIED', 'AND', 'I', 'COULD', 'NOT', 'RECALL', 'ONE', 'WITHOUT', 'FINDING', 'MYSELF', 'THE', 'HAPPIER'] +3997-180294-0007-1807: ref=['EDUCATION', 'FAMILY', 'FEELING', 'THE', 'SENSE', 'OF', 'DUTY', 'THE', 'FAMILY', 'ARE', 'STRONG', 'SENTINELS', 'BUT', 'THERE', 'ARE', 'NO', 'SENTINELS', 'SO', 'VIGILANT', 'AS', 'NOT', 'TO', 'BE', 'DECEIVED', 'BY', 'A', 'GIRL', 'OF', 'SIXTEEN', 'TO', 'WHOM', 'NATURE', 'BY', 'THE', 'VOICE', 'OF', 'THE', 'MAN', 'SHE', 'LOVES', 'GIVES', 'THE', 'FIRST', 'COUNSELS', 'OF', 'LOVE', 'ALL', 'THE', 'MORE', 'ARDENT', 'BECAUSE', 'THEY', 'SEEM', 'SO', 'PURE'] +3997-180294-0007-1807: hyp=['EDUCATION', 'FAMILY', 'FEELING', 'THE', 'SENSE', 'OF', 'DUTY', 'THE', 'FAMILY', 'ARE', 'STRONG', 'SENTINELS', 'BUT', 'THERE', 'ARE', 'NO', 'SENTINELS', 'SO', 'VIGILANT', 'AS', 'NOT', 'TO', 'BE', 'DECEIVED', 'BY', 'A', 'GIRL', 'OF', 'SIXTEEN', 'TO', 'WHOM', 'NATURE', 'BY', 'THE', 'VOICE', 'OF', 'THE', 'MAN', 'SHE', 'LOVES', 'GIVES', 'THE', 'FIRST', 'COUNCIL', 'OF', 'LOVE', 'ALL', 'THE', 'MORE', 'ARDENTS', 'BECAUSE', 'THEY', 'SEEM', 'SO', 'PURE'] +3997-180294-0008-1808: ref=['THE', 'MORE', 'A', 'GIRL', 'BELIEVES', 'IN', 'GOODNESS', 'THE', 'MORE', 'EASILY', 'WILL', 'SHE', 'GIVE', 'WAY', 'IF', 'NOT', 'TO', 'HER', 'LOVER', 'AT', 'LEAST', 'TO', 'LOVE', 'FOR', 'BEING', 'WITHOUT', 'MISTRUST', 'SHE', 'IS', 'WITHOUT', 'FORCE', 'AND', 'TO', 'WIN', 'HER', 'LOVE', 'IS', 'A', 'TRIUMPH', 'THAT', 'CAN', 'BE', 'GAINED', 'BY', 'ANY', 'YOUNG', 'MAN', 'OF', 'FIVE', 'AND', 'TWENTY', 'SEE', 'HOW', 'YOUNG', 'GIRLS', 'ARE', 'WATCHED', 'AND', 'GUARDED'] +3997-180294-0008-1808: hyp=['THE', 'MORE', 'GIRL', 'BELIEVES', 'IN', 'GOODNESS', 'THE', 'MORE', 'IS', 'WE', 'WILL', 'SHE', 'GIVE', 'WAY', 'IF', 'NOT', 'TO', 'HER', 'LOVER', 'AT', 'LEAST', 'TO', 'LOVE', 'FOR', 'BEING', 'WITHOUT', 'MISTRUST', 'SHE', 'IS', 'WITHOUT', 'FORCE', 'AND', 'TO', 'WIN', 'HER', 'LOVE', 'AS', 'A', 'TRIUMPH', 'THAT', 'CAN', 'BE', 'GAINED', 'BY', 'ANY', 'YOUNG', 'MAN', 'OF', 'FIVE', 'AND', 'TWENTY', 'SEE', 'HOW', 'YOUNG', 'GIRLS', 'ARE', 'WATCHED', 'AND', 'GUARDED'] +3997-180294-0009-1809: ref=['THEN', 'HOW', 'SURELY', 'MUST', 'THEY', 'DESIRE', 'THE', 'WORLD', 'WHICH', 'IS', 'HIDDEN', 'FROM', 'THEM', 'HOW', 'SURELY', 'MUST', 'THEY', 'FIND', 'IT', 'TEMPTING', 'HOW', 'SURELY', 'MUST', 'THEY', 'LISTEN', 'TO', 'THE', 'FIRST', 'VOICE', 'WHICH', 'COMES', 'TO', 'TELL', 'ITS', 'SECRETS', 'THROUGH', 'THEIR', 'BARS', 'AND', 'BLESS', 'THE', 'HAND', 'WHICH', 'IS', 'THE', 'FIRST', 'TO', 'RAISE', 'A', 'CORNER', 'OF', 'THE', 'MYSTERIOUS', 'VEIL'] +3997-180294-0009-1809: hyp=['THEN', 'HOW', 'SURELY', 'MUST', 'THEY', 'DESIRE', 'THE', 'WORLD', 'WHICH', 'IS', 'HIDDEN', 'FROM', 'THEM', 'HOSTUALLY', 'MUST', 'THEY', 'FIND', 'IT', 'TEMPTING', 'HOW', 'SURELY', 'MUST', 'THEY', 'LISTEN', 'TO', 'THE', 'FIRST', 'VOICE', 'WHICH', 'COMES', 'TO', 'TELL', 'ITS', 'SECRETS', 'THROUGH', 'THEIR', 'BARS', 'AND', 'BLESS', 'THE', 'HAND', 'WHICH', 'IS', 'THE', 'FIRST', 'TO', 'RAISE', 'A', 'CORNER', 'OF', 'THE', 'MYSTERY', 'VEIL'] +3997-180294-0010-1810: ref=['WITH', 'THEM', 'THE', 'BODY', 'HAS', 'WORN', 'OUT', 'THE', 'SOUL', 'THE', 'SENSES', 'HAVE', 'BURNED', 'UP', 'THE', 'HEART', 'DISSIPATION', 'HAS', 'BLUNTED', 'THE', 'FEELINGS'] +3997-180294-0010-1810: hyp=['WITH', 'THEM', 'THE', 'BODY', 'HAS', 'WORN', 'OUT', 'THE', 'SOUL', 'THE', 'SENSES', 'HALF', 'BURNED', 'UP', 'THE', 'HEART', 'DISSIPATION', 'HAS', 'BLUNTED', 'THE', 'FEELINGS'] +3997-180294-0011-1811: ref=['THEY', 'LOVE', 'BY', 'PROFESSION', 'AND', 'NOT', 'BY', 'INSTINCT'] +3997-180294-0011-1811: hyp=['THEY', 'LOVE', 'BY', 'PROFESSION', 'AND', 'NOT', 'BY', 'INSTINCT'] +3997-180294-0012-1812: ref=['WHEN', 'A', 'CREATURE', 'WHO', 'HAS', 'ALL', 'HER', 'PAST', 'TO', 'REPROACH', 'HERSELF', 'WITH', 'IS', 'TAKEN', 'ALL', 'AT', 'ONCE', 'BY', 'A', 'PROFOUND', 'SINCERE', 'IRRESISTIBLE', 'LOVE', 'OF', 'WHICH', 'SHE', 'HAD', 'NEVER', 'FELT', 'HERSELF', 'CAPABLE', 'WHEN', 'SHE', 'HAS', 'CONFESSED', 'HER', 'LOVE', 'HOW', 'ABSOLUTELY', 'THE', 'MAN', 'WHOM', 'SHE', 'LOVES', 'DOMINATES', 'HER'] +3997-180294-0012-1812: hyp=['WHEN', 'A', 'CREATURE', 'WHO', 'HAS', 'ALL', 'HER', 'PAST', 'TO', 'REPROACH', 'HERSELF', 'WITH', 'IS', 'TAKEN', 'ALL', 'AT', 'ONCE', 'BY', 'A', 'PROFOUND', 'SINCERE', 'IRRESISTIBLE', 'LOVE', 'OF', 'WHICH', 'SHE', 'HAD', 'NEVER', 'FELT', 'HERSELF', 'CAPABLE', 'WHEN', 'SHE', 'HAS', 'CONFESSED', 'HER', 'LOVE', 'HOW', 'ABSOLUTELY', 'THE', 'MAN', 'WHOM', 'SHE', 'LOVES', 'DOMINATES', 'HER'] +3997-180294-0013-1813: ref=['THEY', 'KNOW', 'NOT', 'WHAT', 'PROOF', 'TO', 'GIVE'] +3997-180294-0013-1813: hyp=['THEY', 'KNOW', 'NOT', 'WHAT', 'PROOF', 'TO', 'GIVE'] +3997-180294-0014-1814: ref=['IN', 'ORDER', 'TO', 'DISTURB', 'THE', 'LABOURERS', 'IN', 'THE', 'FIELD', 'WAS', 'ONE', 'DAY', 'DEVOURED', 'BY', 'A', 'WOLF', 'BECAUSE', 'THOSE', 'WHOM', 'HE', 'HAD', 'SO', 'OFTEN', 'DECEIVED', 'NO', 'LONGER', 'BELIEVED', 'IN', 'HIS', 'CRIES', 'FOR', 'HELP'] +3997-180294-0014-1814: hyp=['IN', 'ORDER', 'TO', 'DISTURB', 'THE', 'LABORERS', 'IN', 'THE', 'FIELDS', 'WAS', 'ONE', 'DAY', 'DEVOURED', 'BY', 'A', 'WOLF', 'BECAUSE', 'THOSE', 'WHOM', 'HE', 'HAD', 'SO', 'OFTEN', 'DECEIVED', 'NO', 'LONGER', 'BELIEVED', 'IN', 'HIS', 'CRIES', 'FOR', 'HELP'] +3997-180294-0015-1815: ref=['IT', 'IS', 'THE', 'SAME', 'WITH', 'THESE', 'UNHAPPY', 'WOMEN', 'WHEN', 'THEY', 'LOVE', 'SERIOUSLY'] +3997-180294-0015-1815: hyp=['THIS', 'IS', 'THE', 'SAME', 'WITH', 'THESE', 'UNHAPPY', 'WOMEN', 'WHEN', 'THEY', 'LOVE', 'SERIOUSLY'] +3997-180294-0016-1816: ref=['BUT', 'WHEN', 'THE', 'MAN', 'WHO', 'INSPIRES', 'THIS', 'REDEEMING', 'LOVE', 'IS', 'GREAT', 'ENOUGH', 'IN', 'SOUL', 'TO', 'RECEIVE', 'IT', 'WITHOUT', 'REMEMBERING', 'THE', 'PAST', 'WHEN', 'HE', 'GIVES', 'HIMSELF', 'UP', 'TO', 'IT', 'WHEN', 'IN', 'SHORT', 'HE', 'LOVES', 'AS', 'HE', 'IS', 'LOVED', 'THIS', 'MAN', 'DRAINS', 'AT', 'ONE', 'DRAUGHT', 'ALL', 'EARTHLY', 'EMOTIONS', 'AND', 'AFTER', 'SUCH', 'A', 'LOVE', 'HIS', 'HEART', 'WILL', 'BE', 'CLOSED', 'TO', 'EVERY', 'OTHER'] +3997-180294-0016-1816: hyp=['BUT', 'WHEN', 'THE', 'MAN', 'WHO', 'INSPIRES', 'THIS', 'REDEEMING', 'LOVE', 'IS', 'GREAT', 'ENOUGH', 'IN', 'SOUL', 'TO', 'RECEIVE', 'IT', 'WITHOUT', 'REMEMBERING', 'THE', 'PAST', 'WHEN', 'HE', 'GIVES', 'HIMSELF', 'UP', 'TO', 'IT', 'WHEN', 'IN', 'SHORT', 'HE', 'LOVES', 'AS', 'HE', 'IS', 'LOVED', 'THIS', 'MAN', 'DREAMS', 'AT', 'ONE', 'DROUGHT', 'ALL', 'EARTHLY', 'EMOTIONS', 'AND', 'AFTER', 'SUCH', 'A', 'LOVE', 'HIS', 'HEART', 'WILL', 'BE', 'CLOSED', 'TO', 'EVERY', 'OTHER'] +3997-180294-0017-1817: ref=['BUT', 'TO', 'RETURN', 'TO', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'LIAISON'] +3997-180294-0017-1817: hyp=['BUT', 'TO', 'RETURN', 'TO', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'LEAR', 'SONG'] +3997-180294-0018-1818: ref=['WHEN', 'I', 'REACHED', 'HOME', 'I', 'WAS', 'IN', 'A', 'STATE', 'OF', 'MAD', 'GAIETY'] +3997-180294-0018-1818: hyp=['WHEN', 'I', 'REACHED', 'HOME', 'I', 'WAS', 'IN', 'A', 'STATE', 'OF', 'MAD', 'GAIETY'] +3997-180294-0019-1819: ref=['THE', 'WOMAN', 'BECOMES', 'THE', "MAN'S", 'MISTRESS', 'AND', 'LOVES', 'HIM'] +3997-180294-0019-1819: hyp=['THE', 'WOMAN', 'BECOMES', 'THE', "MAN'S", 'MISTRESS', 'AND', 'LOVES', 'HIM'] +3997-180294-0020-1820: ref=['HOW', 'WHY'] +3997-180294-0020-1820: hyp=['HOW', 'WHY'] +3997-180294-0021-1821: ref=['MY', 'WHOLE', 'BEING', 'WAS', 'EXALTED', 'INTO', 'JOY', 'AT', 'THE', 'MEMORY', 'OF', 'THE', 'WORDS', 'WE', 'HAD', 'EXCHANGED', 'DURING', 'THAT', 'FIRST', 'NIGHT'] +3997-180294-0021-1821: hyp=['MY', 'WHOLE', 'BEING', 'WAS', 'EXALTED', 'INTO', 'JOY', 'AT', 'THE', 'MEMORY', 'OF', 'THE', 'WORDS', 'WE', 'HAD', 'EXCHANGED', 'DURING', 'THAT', 'FIRST', 'NIGHT'] +3997-180294-0022-1822: ref=['HERE', 'ARE', 'MY', 'ORDERS', 'TO', 'NIGHT', 'AT', 'THE', 'VAUDEVILLE'] +3997-180294-0022-1822: hyp=['HERE', 'ARE', 'MY', 'ORDERS', 'TO', 'NIGHT', 'AT', 'A', 'VAUDEVILLE'] +3997-180294-0023-1823: ref=['COME', 'DURING', 'THE', 'THIRD', "ENTR'ACTE"] +3997-180294-0023-1823: hyp=['COME', 'DURING', 'THE', 'THIRD', 'AND', 'TRACT'] +3997-180294-0024-1824: ref=['THE', 'BOXES', 'FILLED', 'ONE', 'AFTER', 'ANOTHER'] +3997-180294-0024-1824: hyp=['THE', 'BOXES', 'FILLED', 'ONE', 'AFTER', 'ANOTHER'] +3997-180294-0025-1825: ref=['ONLY', 'ONE', 'REMAINED', 'EMPTY', 'THE', 'STAGE', 'BOX'] +3997-180294-0025-1825: hyp=['ONLY', 'ONE', 'REMAINS', 'EMPTY', 'THE', 'STAGE', 'BOX'] +3997-180294-0026-1826: ref=['AT', 'THE', 'BEGINNING', 'OF', 'THE', 'THIRD', 'ACT', 'I', 'HEARD', 'THE', 'DOOR', 'OF', 'THE', 'BOX', 'ON', 'WHICH', 'MY', 'EYES', 'HAD', 'BEEN', 'ALMOST', 'CONSTANTLY', 'FIXED', 'OPEN', 'AND', 'MARGUERITE', 'APPEARED'] +3997-180294-0026-1826: hyp=['AT', 'THE', 'BEGINNING', 'OF', 'THE', 'THIRD', 'ACT', 'I', 'HEARD', 'THE', 'DOOR', 'OF', 'THE', 'BOX', 'ON', 'WHICH', 'MY', 'EYES', 'HAD', 'BEEN', 'ALMOST', 'CONSTANTLY', 'FIXED', 'OPEN', 'AND', 'MARGUERITE', 'APPEARED'] +3997-180294-0027-1827: ref=['DID', 'SHE', 'LOVE', 'ME', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'THE', 'MORE', 'BEAUTIFUL', 'SHE', 'LOOKED', 'THE', 'HAPPIER', 'I', 'SHOULD', 'BE'] +3997-180294-0027-1827: hyp=['THAT', 'SHE', 'LOVED', 'ME', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'THE', 'MORE', 'BEAUTIFUL', 'SHE', 'LOOKS', 'THE', 'HAPPIER', 'I', 'SHOULD', 'BE'] +3997-180294-0028-1828: ref=['WHAT', 'IS', 'THE', 'MATTER', 'WITH', 'YOU', 'TO', 'NIGHT', 'SAID', 'MARGUERITE', 'RISING', 'AND', 'COMING', 'TO', 'THE', 'BACK', 'OF', 'THE', 'BOX', 'AND', 'KISSING', 'ME', 'ON', 'THE', 'FOREHEAD'] +3997-180294-0028-1828: hyp=['WHAT', 'IS', 'THE', 'MATTER', 'WITH', 'YOU', 'TO', 'NIGHT', 'SAID', 'MARGUERITE', 'RISING', 'AND', 'COMING', 'TO', 'THE', 'BACK', 'OF', 'THE', 'BOX', 'AND', 'KISSING', 'ME', 'ON', 'THE', 'FOREHEAD'] +3997-180294-0029-1829: ref=['YOU', 'SHOULD', 'GO', 'TO', 'BED', 'SHE', 'REPLIED', 'WITH', 'THAT', 'IRONICAL', 'AIR', 'WHICH', 'WENT', 'SO', 'WELL', 'WITH', 'HER', 'DELICATE', 'AND', 'WITTY', 'FACE'] +3997-180294-0029-1829: hyp=['YOU', 'SHOULD', 'GO', 'TO', 'BED', 'SHE', 'REPLIED', 'WITH', 'THAT', 'IRONIC', 'ERROR', 'WHICH', 'WENT', 'SO', 'WELL', 'WITH', 'HER', 'DELICATE', 'AND', 'WITTY', 'FACE'] +3997-180294-0030-1830: ref=['WHERE', 'AT', 'HOME'] +3997-180294-0030-1830: hyp=['WHERE', 'AT', 'HOME'] +3997-180294-0031-1831: ref=['YOU', 'STILL', 'LOVE', 'ME', 'CAN', 'YOU', 'ASK'] +3997-180294-0031-1831: hyp=['YOU', 'STILL', 'LOVE', 'ME', 'CAN', 'YOU', 'ASK'] +3997-180294-0032-1832: ref=['BECAUSE', 'YOU', "DON'T", 'LIKE', 'SEEING', 'HIM'] +3997-180294-0032-1832: hyp=['BECAUSE', 'YOU', "DON'T", 'LIKE', 'SEEING', 'HIM'] +3997-180294-0033-1833: ref=['NONETHELESS', 'I', 'WAS', 'VERY', 'UNHAPPY', 'ALL', 'THE', 'REST', 'OF', 'THE', 'EVENING', 'AND', 'WENT', 'AWAY', 'VERY', 'SADLY', 'AFTER', 'HAVING', 'SEEN', 'PRUDENCE', 'THE', 'COUNT', 'AND', 'MARGUERITE', 'GET', 'INTO', 'THE', 'CARRIAGE', 'WHICH', 'WAS', 'WAITING', 'FOR', 'THEM', 'AT', 'THE', 'DOOR'] +3997-180294-0033-1833: hyp=['NONE', 'THE', 'LESS', 'I', 'WAS', 'VERY', 'UNHAPPY', 'ALL', 'THE', 'REST', 'OF', 'THE', 'EVENING', 'AND', 'WENT', 'AWAY', 'VERY', 'SADLY', 'AFTER', 'HAVING', 'SEEN', 'PRUDENCE', 'THE', 'COUNT', 'AND', 'MARGUERITE', 'GAINED', 'TO', 'THE', 'CARRIAGE', 'WHICH', 'WAS', 'WINNING', 'FOR', 'THEM', 'AT', 'THE', 'DOOR'] +3997-180297-0000-1834: ref=['I', 'HAVE', 'NOT', 'COME', 'TO', 'HINDER', 'YOU', 'FROM', 'LEAVING', 'PARIS'] +3997-180297-0000-1834: hyp=['I', 'HAVE', 'NOT', 'COME', 'TO', 'HINDER', 'YOU', 'FROM', 'LEAVING', 'PARIS'] +3997-180297-0001-1835: ref=['YOU', 'IN', 'THE', 'WAY', 'MARGUERITE', 'BUT', 'HOW'] +3997-180297-0001-1835: hyp=['YOU', 'IN', 'THE', 'WAY', 'MARGUERITE', 'BUT', 'HOW'] +3997-180297-0002-1836: ref=['WELL', 'YOU', 'MIGHT', 'HAVE', 'HAD', 'A', 'WOMAN', 'HERE', 'SAID', 'PRUDENCE', 'AND', 'IT', 'WOULD', 'HARDLY', 'HAVE', 'BEEN', 'AMUSING', 'FOR', 'HER', 'TO', 'SEE', 'TWO', 'MORE', 'ARRIVE'] +3997-180297-0002-1836: hyp=['WELL', 'YOU', 'MIGHT', 'HAVE', 'HAD', 'A', 'WOMAN', 'HERE', 'SAID', 'PRUDENCE', 'AND', 'IT', 'WOULD', 'HARDLY', 'HAVE', 'BEEN', 'AMUSING', 'FOR', 'HER', 'TO', 'SEE', 'TWO', 'MORE', 'ARRIVE'] +3997-180297-0003-1837: ref=['DURING', 'THIS', 'REMARK', 'MARGUERITE', 'LOOKED', 'AT', 'ME', 'ATTENTIVELY'] +3997-180297-0003-1837: hyp=['DURING', 'THIS', 'REMARK', 'MARGUERITE', 'LOOKED', 'AT', 'ME', 'ATTENTIVELY'] +3997-180297-0004-1838: ref=['MY', 'DEAR', 'PRUDENCE', 'I', 'ANSWERED', 'YOU', 'DO', 'NOT', 'KNOW', 'WHAT', 'YOU', 'ARE', 'SAYING'] +3997-180297-0004-1838: hyp=['MY', 'DEAR', 'PRUDENCE', 'I', 'ANSWERED', 'YOU', 'DO', 'NOT', 'KNOW', 'WHAT', 'YOU', 'ARE', 'SAYING'] +3997-180297-0005-1839: ref=['YES', 'BUT', 'BESIDES', 'NOT', 'WISHING', 'TO', 'PUT', 'YOU', 'OUT', 'I', 'WAS', 'SURE', 'THAT', 'IF', 'YOU', 'CAME', 'AS', 'FAR', 'AS', 'MY', 'DOOR', 'YOU', 'WOULD', 'WANT', 'TO', 'COME', 'UP', 'AND', 'AS', 'I', 'COULD', 'NOT', 'LET', 'YOU', 'I', 'DID', 'NOT', 'WISH', 'TO', 'LET', 'YOU', 'GO', 'AWAY', 'BLAMING', 'ME', 'FOR', 'SAYING', 'NO'] +3997-180297-0005-1839: hyp=['YES', 'BUT', 'BESIDES', 'NOT', 'WISHING', 'TO', 'PUT', 'YOU', 'OUT', 'I', 'WAS', 'SURE', 'THAT', 'IF', 'YOU', 'CAME', 'AS', 'FAR', 'AS', 'MY', 'DOOR', 'YOU', 'WOULD', 'WANT', 'TO', 'COME', 'UP', 'AND', 'AS', 'I', 'COULD', 'NOT', 'LET', 'YOU', 'I', 'DID', 'NOT', 'WISH', 'TO', 'LET', 'YOU', 'GO', 'AWAY', 'BLAMING', 'ME', 'FOR', 'SAYING', 'NO'] +3997-180297-0006-1840: ref=['BECAUSE', 'I', 'AM', 'WATCHED', 'AND', 'THE', 'LEAST', 'SUSPICION', 'MIGHT', 'DO', 'ME', 'THE', 'GREATEST', 'HARM'] +3997-180297-0006-1840: hyp=['BECAUSE', 'I', 'AM', 'WATCHED', 'AND', 'THE', 'LEAST', 'SUSPICION', 'MIGHT', 'TO', 'ME', 'THE', 'GREATEST', 'HARM'] +3997-180297-0007-1841: ref=['IS', 'THAT', 'REALLY', 'THE', 'ONLY', 'REASON'] +3997-180297-0007-1841: hyp=['IS', 'THAT', 'REALLY', 'THE', 'ONLY', 'REASON'] +3997-180297-0008-1842: ref=['IF', 'THERE', 'WERE', 'ANY', 'OTHER', 'I', 'WOULD', 'TELL', 'YOU', 'FOR', 'WE', 'ARE', 'NOT', 'TO', 'HAVE', 'ANY', 'SECRETS', 'FROM', 'ONE', 'ANOTHER', 'NOW'] +3997-180297-0008-1842: hyp=['IF', 'THERE', 'WERE', 'ANY', 'OTHER', 'I', 'WOULD', 'TELL', 'YOU', 'FOR', 'WE', 'ARE', 'NOT', 'TO', 'HAVE', 'ANY', 'SECRETS', 'FROM', 'ONE', 'ANOTHER', 'NOW'] +3997-180297-0009-1843: ref=['HONESTLY', 'DO', 'YOU', 'CARE', 'FOR', 'ME', 'A', 'LITTLE', 'A', 'GREAT', 'DEAL'] +3997-180297-0009-1843: hyp=['ON', 'THE', 'STREET', 'DO', 'YOU', 'CARE', 'FOR', 'ME', 'A', 'LITTLE', 'A', 'GREAT', 'DEAL'] +3997-180297-0010-1844: ref=['I', 'FANCIED', 'FOR', 'A', 'MOMENT', 'THAT', 'I', 'MIGHT', 'GIVE', 'MYSELF', 'THAT', 'HAPPINESS', 'FOR', 'SIX', 'MONTHS', 'YOU', 'WOULD', 'NOT', 'HAVE', 'IT', 'YOU', 'INSISTED', 'ON', 'KNOWING', 'THE', 'MEANS'] +3997-180297-0010-1844: hyp=['I', 'FANCIED', 'FOR', 'A', 'MOMENT', 'THAT', 'IT', 'MIGHT', 'GIVE', 'MYSELF', 'THAT', 'HAPPINESS', 'FOR', 'SIX', 'MONTHS', 'YOU', 'WILL', 'NOT', 'HAVE', 'IT', 'YOU', 'INSISTED', 'ON', 'KNOWING', 'THE', 'MEANS'] +3997-180297-0011-1845: ref=['WELL', 'GOOD', 'HEAVENS', 'THE', 'MEANS', 'WERE', 'EASY', 'ENOUGH', 'TO', 'GUESS'] +3997-180297-0011-1845: hyp=['WELL', 'GOOD', 'HEAVENS', 'THE', 'MEANS', 'WERE', 'EASY', 'ENOUGH', 'TO', 'GUESS'] +3997-180297-0012-1846: ref=['I', 'LISTENED', 'AND', 'I', 'GAZED', 'AT', 'MARGUERITE', 'WITH', 'ADMIRATION'] +3997-180297-0012-1846: hyp=['I', 'LISTENED', 'AND', 'I', 'GAZED', 'AT', 'MARGUERITE', 'WITH', 'ADMIRATION'] +3997-180297-0013-1847: ref=['WHEN', 'I', 'THOUGHT', 'THAT', 'THIS', 'MARVELLOUS', 'CREATURE', 'WHOSE', 'FEET', 'I', 'HAD', 'ONCE', 'LONGED', 'TO', 'KISS', 'WAS', 'WILLING', 'TO', 'LET', 'ME', 'TAKE', 'MY', 'PLACE', 'IN', 'HER', 'THOUGHTS', 'MY', 'PART', 'IN', 'HER', 'LIFE', 'AND', 'THAT', 'I', 'WAS', 'NOT', 'YET', 'CONTENT', 'WITH', 'WHAT', 'SHE', 'GAVE', 'ME', 'I', 'ASKED', 'IF', "MAN'S", 'DESIRE', 'HAS', 'INDEED', 'LIMITS', 'WHEN', 'SATISFIED', 'AS', 'PROMPTLY', 'AS', 'MINE', 'HAD', 'BEEN', 'IT', 'REACHED', 'AFTER', 'SOMETHING', 'FURTHER'] +3997-180297-0013-1847: hyp=['WHEN', 'THEY', 'THOUGHT', 'THAT', 'THIS', 'MARVELLOUS', 'CREATURE', 'WHOSE', 'FEET', 'I', 'HAD', 'ONCE', 'LONGED', 'TO', 'KISS', 'WAS', 'WILLING', 'TO', 'LET', 'ME', 'TAKE', 'MY', 'PLACE', 'IN', 'HER', 'THOUGHTS', 'BY', 'PARTS', 'IN', 'HER', 'LIFE', 'AND', 'THAT', 'I', 'WAS', 'NOT', 'YET', 'CONTENT', 'WITH', 'WHAT', 'SHE', 'GAVE', 'ME', 'I', 'ASKED', 'IF', "MEN'S", 'DESIRE', 'HAD', 'INDEED', 'LIMITS', 'WHEN', 'SATISFIED', 'AS', 'PROMPTLY', 'AS', 'MINE', 'HAD', 'BEEN', 'IT', 'REACHED', 'AFTER', 'SOMETHING', 'FURTHER'] +3997-180297-0014-1848: ref=['TRULY', 'SHE', 'CONTINUED', 'WE', 'POOR', 'CREATURES', 'OF', 'CHANCE', 'HAVE', 'FANTASTIC', 'DESIRES', 'AND', 'INCONCEIVABLE', 'LOVES'] +3997-180297-0014-1848: hyp=['TRULY', 'SHE', 'CONTINUED', 'WE', 'POOR', 'CREATURES', 'OF', 'CHANCE', 'HAVE', 'FANTASTIC', 'DESIRE', 'AND', 'INCONCEIVABLE', 'LOVES'] +3997-180297-0015-1849: ref=['WE', 'ARE', 'NOT', 'ALLOWED', 'TO', 'HAVE', 'HEARTS', 'UNDER', 'PENALTY', 'OF', 'BEING', 'HOOTED', 'DOWN', 'AND', 'OF', 'RUINING', 'OUR', 'CREDIT'] +3997-180297-0015-1849: hyp=['WE', 'ARE', 'NOT', 'ALLOWED', 'TO', 'HAVE', 'HEARTS', 'UNDER', 'PENALTY', 'OF', 'BEING', 'HOOTED', 'DOWN', 'AND', 'OF', 'RUINING', 'OUR', 'CREDIT'] +3997-180297-0016-1850: ref=['WE', 'NO', 'LONGER', 'BELONG', 'TO', 'OURSELVES'] +3997-180297-0016-1850: hyp=['WE', 'NO', 'LONGER', 'BELONG', 'TO', 'OURSELVES'] +3997-180297-0017-1851: ref=['WE', 'STAND', 'FIRST', 'IN', 'THEIR', 'SELF', 'ESTEEM', 'LAST', 'IN', 'THEIR', 'ESTEEM'] +3997-180297-0017-1851: hyp=['WE', 'STAND', 'FIRST', 'IN', 'THEIR', 'SELF', 'ESTEEM', 'LAST', 'IN', 'THEIR', 'ESTEEM'] +3997-180297-0018-1852: ref=['NEVER', 'DO', 'THEY', 'GIVE', 'YOU', 'ADVICE', 'WHICH', 'IS', 'NOT', 'LUCRATIVE'] +3997-180297-0018-1852: hyp=['NEVER', 'DID', 'HE', 'GIVE', 'YOU', 'ADVICE', 'WHICH', 'IS', 'NOT', 'LOOK', 'ATTENTIVE'] +3997-180297-0019-1853: ref=['IT', 'MEANS', 'LITTLE', 'ENOUGH', 'TO', 'THEM', 'THAT', 'WE', 'SHOULD', 'HAVE', 'TEN', 'LOVERS', 'EXTRA', 'AS', 'LONG', 'AS', 'THEY', 'GET', 'DRESSES', 'OR', 'A', 'BRACELET', 'OUT', 'OF', 'THEM', 'AND', 'THAT', 'THEY', 'CAN', 'DRIVE', 'IN', 'OUR', 'CARRIAGE', 'FROM', 'TIME', 'TO', 'TIME', 'OR', 'COME', 'TO', 'OUR', 'BOX', 'AT', 'THE', 'THEATRE'] +3997-180297-0019-1853: hyp=['IT', 'MEANS', 'LITTLE', 'ENOUGH', 'TO', 'THEM', 'THAT', 'WE', 'SHOULD', 'HAVE', 'TEN', 'LOVERS', 'EXTRA', 'AS', 'LONG', 'AS', 'THEY', 'GET', 'DRESSES', 'OR', 'A', 'BRACELET', 'OUT', 'OF', 'THEM', 'AND', 'THAT', 'THEY', 'CAN', 'DRIVE', 'AND', 'ARE', 'CARRIAGE', 'FROM', 'TIME', 'TO', 'TIME', 'OR', 'COME', 'TO', 'OUR', 'BOX', 'AT', 'THE', 'FUTURE'] +3997-180297-0020-1854: ref=['SUCH', 'A', 'MAN', 'I', 'FOUND', 'IN', 'THE', 'DUKE', 'BUT', 'THE', 'DUKE', 'IS', 'OLD', 'AND', 'OLD', 'AGE', 'NEITHER', 'PROTECTS', 'NOR', 'CONSOLES'] +3997-180297-0020-1854: hyp=['SUCH', 'A', 'MAN', 'I', 'FOUND', 'IN', 'THE', 'DUKE', 'BUT', 'THE', 'DUKE', 'IS', 'OLD', 'AND', 'OLD', 'AGE', 'NEITHER', 'PROTECTS', 'NOR', 'CONSOLES'] +3997-180297-0021-1855: ref=['I', 'THOUGHT', 'I', 'COULD', 'ACCEPT', 'THE', 'LIFE', 'WHICH', 'HE', 'OFFERED', 'ME', 'BUT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +3997-180297-0021-1855: hyp=['I', 'THOUGHT', 'I', 'COULD', 'ACCEPT', 'THE', 'LIFE', 'WHICH', 'HE', 'OFFERED', 'ME', 'OR', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +3997-180297-0022-1856: ref=['WHAT', 'I', 'LOVED', 'IN', 'YOU', 'WAS', 'NOT', 'THE', 'MAN', 'WHO', 'WAS', 'BUT', 'THE', 'MAN', 'WHO', 'WAS', 'GOING', 'TO', 'BE'] +3997-180297-0022-1856: hyp=['WHAT', 'I', 'LOVED', 'IN', 'YOU', 'WAS', 'NOT', 'THE', 'MAN', 'WHO', 'WAS', 'BUT', 'THE', 'MAN', 'WHO', 'WAS', 'GOING', 'TO', 'BE'] +3997-180297-0023-1857: ref=['MARGUERITE', 'TIRED', 'OUT', 'WITH', 'THIS', 'LONG', 'CONFESSION', 'THREW', 'HERSELF', 'BACK', 'ON', 'THE', 'SOFA', 'AND', 'TO', 'STIFLE', 'A', 'SLIGHT', 'COUGH', 'PUT', 'UP', 'HER', 'HANDKERCHIEF', 'TO', 'HER', 'LIPS', 'AND', 'FROM', 'THAT', 'TO', 'HER', 'EYES'] +3997-180297-0023-1857: hyp=['MARGUERITE', 'HIRED', 'OUT', 'WITH', 'THIS', 'LONG', 'CONFESSION', 'THREW', 'HERSELF', 'BACK', 'ON', 'THE', 'SOFA', 'AND', 'TO', 'STIFLE', 'A', 'SLIGHT', 'COUGH', 'PUT', 'UP', 'HER', 'HANDKERCHIEF', 'TO', 'HER', 'LIPS', 'AND', 'FROM', 'THAT', 'TO', 'HER', 'EYES'] +3997-180297-0024-1858: ref=['MARGUERITE', 'DO', 'WITH', 'ME', 'AS', 'YOU', 'WILL', 'I', 'AM', 'YOUR', 'SLAVE', 'YOUR', 'DOG', 'BUT', 'IN', 'THE', 'NAME', 'OF', 'HEAVEN', 'TEAR', 'UP', 'THE', 'LETTER', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'AND', 'DO', 'NOT', 'MAKE', 'ME', 'LEAVE', 'YOU', 'TO', 'MORROW', 'IT', 'WOULD', 'KILL', 'ME'] +3997-180297-0024-1858: hyp=['MARGUERITE', 'DO', 'WITH', 'ME', 'AS', 'YOU', 'WILL', 'I', 'AM', 'YOUR', 'SLAVE', 'YOUR', 'DOG', 'BUT', 'IN', 'THE', 'NAME', 'OF', 'HEAVEN', 'TEAR', 'UP', 'THE', 'LETTER', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'AND', 'DO', 'NOT', 'MAKE', 'ME', 'LEAVE', 'YOU', 'TO', 'MORROW', 'IT', 'WOULD', 'KILL', 'ME'] +3997-180297-0025-1859: ref=['MARGUERITE', 'DREW', 'THE', 'LETTER', 'FROM', 'HER', 'BOSOM', 'AND', 'HANDING', 'IT', 'TO', 'ME', 'WITH', 'A', 'SMILE', 'OF', 'INFINITE', 'SWEETNESS', 'SAID'] +3997-180297-0025-1859: hyp=['MARGUERITE', 'DREW', 'THE', 'LETTER', 'FROM', 'HER', 'BOSOM', 'AND', 'HANDING', 'IT', 'TO', 'ME', 'WITH', 'A', 'SMILE', 'OF', 'INFINITE', 'SWEETNESS', 'SAID'] +3997-180297-0026-1860: ref=['HERE', 'IT', 'IS', 'I', 'HAVE', 'BROUGHT', 'IT', 'BACK'] +3997-180297-0026-1860: hyp=['HERE', 'IT', 'IS', 'I', 'HAVE', 'BROUGHT', 'IT', 'BACK'] +3997-180297-0027-1861: ref=['I', 'TORE', 'THE', 'LETTER', 'INTO', 'FRAGMENTS', 'AND', 'KISSED', 'WITH', 'TEARS', 'THE', 'HAND', 'THAT', 'GAVE', 'IT', 'TO', 'ME'] +3997-180297-0027-1861: hyp=['I', 'TORE', 'THE', 'LETTER', 'INTO', 'FRAGMENTS', 'AND', 'KISSED', 'WITH', 'TEARS', 'THE', 'HAND', 'THAT', 'I', 'GAVE', 'IT', 'TO', 'ME'] +3997-180297-0028-1862: ref=['LOOK', 'HERE', 'PRUDENCE', 'DO', 'YOU', 'KNOW', 'WHAT', 'HE', 'WANTS', 'SAID', 'MARGUERITE'] +3997-180297-0028-1862: hyp=['LOOK', 'HERE', 'PRUDENCE', 'DO', 'YOU', 'KNOW', 'WHAT', 'HE', 'WANTS', 'SAID', 'MARGUERITE'] +3997-180297-0029-1863: ref=['HE', 'WANTS', 'YOU', 'TO', 'FORGIVE', 'HIM'] +3997-180297-0029-1863: hyp=['HE', 'WANTS', 'YOU', 'TO', 'FORGIVE', 'HIM'] +3997-180297-0030-1864: ref=['ONE', 'HAS', 'TO', 'BUT', 'HE', 'WANTS', 'MORE', 'THAN', 'THAT', 'WHAT', 'THEN'] +3997-180297-0030-1864: hyp=['ONE', 'HAS', 'TWO', 'BUT', 'HE', 'ONCE', 'MORE', 'THAN', 'THAT', 'WHAT', 'THEN'] +3997-180297-0031-1865: ref=['I', 'EMBRACED', 'MARGUERITE', 'UNTIL', 'SHE', 'WAS', 'ALMOST', 'STIFLED'] +3997-180297-0031-1865: hyp=['I', 'EMBRACED', 'MARGUERITE', 'UNTIL', 'SHE', 'WAS', 'ALMOST', 'STIFLED'] +3997-182399-0000-1779: ref=['OL', 'MISTAH', 'BUZZARD', 'GRINNED'] +3997-182399-0000-1779: hyp=['ALL', 'MISTER', 'BUZZARD', 'GRINNED'] +3997-182399-0001-1780: ref=['THIS', 'SOUNDED', 'LIKE', 'ANOTHER', 'STORY'] +3997-182399-0001-1780: hyp=['THIS', 'SOUNDED', 'LIKE', 'ANOTHER', 'STORY'] +3997-182399-0002-1781: ref=['HE', 'WAS', 'CURIOUS', 'ABOUT', 'THAT', 'BLACK', 'HEADED', 'COUSIN', 'OF', 'OL', 'MISTAH', 'BUZZARD', 'VERY', 'CURIOUS', 'INDEED'] +3997-182399-0002-1781: hyp=['HE', 'WAS', 'CURIOUS', 'ABOUT', 'THAT', 'BLACK', 'HEADED', 'COUSIN', 'OF', 'ALL', 'MISTER', 'BUZZARD', 'VERY', 'CURIOUS', 'INDEED'] +3997-182399-0003-1782: ref=['ANYWAY', 'HE', 'WOULD', 'FIND', 'OUT'] +3997-182399-0003-1782: hyp=['ANYWAY', 'HE', 'WOULD', 'FIND', 'OUT'] +3997-182399-0004-1783: ref=['PLEASE', 'MISTER', 'BUZZARD', 'PLEASE', 'TELL', 'US', 'THE', 'STORY', 'HE', 'BEGGED'] +3997-182399-0004-1783: hyp=['PLEASE', 'MISTER', 'BUZZARD', 'PLEASE', 'TELL', 'US', 'THE', 'STORY', 'HE', 'BEGGED'] +3997-182399-0005-1784: ref=['NOW', 'OL', 'MISTAH', 'BUZZARD', 'IS', 'NATURALLY', 'GOOD', 'NATURED', 'AND', 'ACCOMMODATING', 'AND', 'WHEN', 'PETER', 'BEGGED', 'SO', 'HARD', 'HE', 'JUST', "COULDN'T", 'FIND', 'IT', 'IN', 'HIS', 'HEART', 'TO', 'REFUSE'] +3997-182399-0005-1784: hyp=['NOW', 'ALL', 'MISTER', 'BUZZARD', 'IS', 'NATURALLY', 'GOOD', 'NATURED', 'AND', 'ACCOMMODATING', 'AND', 'WHEN', 'PETER', 'BAG', 'SO', 'HARD', 'HE', 'JUST', "COULDN'T", 'FIND', 'IT', 'IN', 'HIS', 'HEART', 'TO', 'REFUSE'] +3997-182399-0006-1785: ref=['WAY', 'BACK', 'IN', 'THE', 'DAYS', 'WHEN', 'GRANDPAP', 'BUZZARD', 'HAD', 'HIS', 'LIL', 'FALLING', 'OUT', 'WITH', 'OL', 'KING', 'EAGLE', 'AND', 'DONE', 'FLY', 'SO', 'HIGH', 'HE', "SCO'TCH", 'THE', 'FEATHERS', 'OFFEN', 'HIS', 'HAID', 'HE', 'HAD', 'A', 'COUSIN', 'DID', 'GRANDPAP', 'BUZZARD', 'AND', 'THIS', 'COUSIN', 'WAS', 'JES', 'NATURALLY', 'LAZY', 'AND', 'NO', 'COUNT'] +3997-182399-0006-1785: hyp=['WAY', 'BACK', 'IN', 'THE', 'DAYS', 'WHEN', 'GRANDPAPAZZARD', 'HAD', 'HIS', 'LITTLE', 'FALLING', 'OUT', 'WITH', 'OLD', 'KING', 'EAGLE', 'AND', 'DON', 'FLIES', 'SO', 'HIGH', 'HE', 'SCORCHED', 'THE', 'FEATHERS', 'OFTEN', 'HIS', 'HEAD', 'HE', 'HAD', 'A', 'COUSIN', 'DID', 'GRANDPAP', 'BUZZARD', 'AND', 'THIS', 'COUSIN', 'WAS', 'JUST', 'NATURALLY', 'LAZY', 'AND', 'NO', 'COUNT'] +3997-182399-0007-1786: ref=['LIKE', 'MOST', 'NO', 'COUNT', 'PEOPLE', 'HE', 'USED', 'TO', 'MAKE', 'A', 'REGULAR', 'NUISANCE', 'OF', 'HISSELF', 'POKING', 'HIS', 'NOSE', 'INTO', "EV'YBODY'S", 'BUSINESS', 'AND', 'NEVER', 'TENDING', 'TO', 'HIS', 'OWN'] +3997-182399-0007-1786: hyp=['LIKE', 'MOST', 'NO', 'COUNT', 'PEOPLE', 'HE', 'USED', 'TO', 'MAKE', 'A', 'REGULAR', 'NUISANCE', 'OF', 'HIMSELF', 'POKING', 'HIS', 'NOSE', 'INTO', 'EVERY', "BODY'S", 'BUSINESS', 'AND', 'NEVER', 'TENDING', 'TO', 'HIS', 'OWN'] +3997-182399-0008-1787: ref=["WASN'T", 'ANYTHING', 'GOING', 'ON', 'THAT', 'THIS', 'TRIFLING', 'MEMBER', 'OF', 'THE', 'BUZZARD', "FAM'LY", "DIDN'T", 'FIND', 'OUT', 'ABOUT', 'AND', 'MEDDLE', 'IN', 'HE', 'COULD', 'ASK', 'MO', 'QUESTIONS', 'THAN', 'PETER', 'RABBIT', 'CAN', 'AN', 'ANYBODY', 'THAT', 'CAN', 'DO', 'THAT', 'HAS', 'GOT', 'TO', 'ASK', 'A', 'LOT'] +3997-182399-0008-1787: hyp=["WASN'T", 'ANYTHING', 'GOING', 'ON', 'THAT', 'THIS', 'TRIFLING', 'MEMBER', 'OF', 'THE', 'BUZZARD', 'FAMILY', "DIDN'T", 'FIND', 'OUT', 'ABOUT', 'A', 'MEDDLE', 'IN', 'HE', 'COULD', 'ASK', 'MORE', 'QUESTIONS', 'THAN', 'PETER', 'RABBIT', 'KENN', 'AND', 'ANYBODY', 'THAT', 'CAN', 'DO', 'THAT', 'HAS', 'GOT', 'TO', 'ASK', 'A', 'LOT'] +3997-182399-0009-1788: ref=['EVERYBODY', 'LOOKED', 'AT', 'PETER', 'AND', 'LAUGHED'] +3997-182399-0009-1788: hyp=['EVERYBODY', 'LOOKED', 'AT', 'PETER', 'AND', 'LAUGHED'] +3997-182399-0010-1789: ref=['SO', 'WE', 'UNS', 'SIT', 'ON', 'THE', 'CHIMNEY', 'TOPS', 'WHENEVER', 'OL', 'JACK', 'FROST', 'GETS', 'TO', 'STRAYING', 'DOWN', 'WHERE', 'HE', 'HAVE', 'NO', 'BUSINESS'] +3997-182399-0010-1789: hyp=['SO', 'WE', 'UNS', 'SET', 'ON', 'THE', 'CHIMNEY', 'TOPS', 'WHENEVER', 'OLD', 'JACK', 'FROST', 'GETS', 'TO', 'STRAIN', 'DOWN', 'WHERE', 'HE', 'HAVE', 'NO', 'BUSINESS'] +3997-182399-0011-1790: ref=['ONE', 'DAY', 'THIS', 'NO', 'COUNT', 'TRIFLING', 'COUSIN', 'OF', 'GRANDPAP', 'BUZZARD', 'GET', 'COLD', 'IN', 'HIS', 'FEET'] +3997-182399-0011-1790: hyp=['ONE', 'DAY', "THERE'S", 'NO', 'COUNT', 'TRIFLING', 'COUSIN', 'OF', 'GRANDPAP', 'BAZARD', 'GET', 'COLD', 'IN', 'HIS', 'FEET'] +3997-182399-0012-1791: ref=['IT', 'WAS', 'ON', 'A', 'LIL', 'OL', 'HOUSE', 'A', 'LIL', 'OL', 'TUMBLE', 'DOWN', 'HOUSE'] +3997-182399-0012-1791: hyp=['IT', 'WAS', 'ON', 'THE', 'LITTLE', 'OLD', 'HOUSE', 'A', 'LITTLE', 'OLD', 'TUMBLE', 'DOWN', 'HOUSE'] +3997-182399-0013-1792: ref=['WHY', 'HE', 'JES', 'STRETCH', 'HIS', 'FOOL', 'HAID', 'AS', 'FAR', 'DOWN', 'THAT', 'CHIMNEY', 'AS', 'HE', 'CAN', 'AN', 'LISTEN', 'AN', 'LISTEN'] +3997-182399-0013-1792: hyp=['WHY', 'HE', 'JUST', 'STRETCH', 'HIS', 'FULL', 'HEAD', 'AS', 'FAR', 'DOWN', 'THE', 'CHIMNEY', 'AS', 'HE', 'CAN', 'AND', 'LISTEN', 'AND', 'LISTEN'] +3997-182399-0014-1793: ref=['BUT', 'HE', "DON'T", 'MIND', 'THAT'] +3997-182399-0014-1793: hyp=['BUT', 'HE', "DON'T", 'MIND', 'THAT'] +3997-182399-0015-1794: ref=['WILL', "YO'", 'ALLS', 'PLEASE', 'SPEAK', 'A', 'LIL', 'LOUDER', 'HE', 'HOLLER', 'DOWN', 'THE', 'CHIMNEY', 'JES', 'LIKE', 'THAT'] +3997-182399-0015-1794: hyp=['OH', 'YOU', 'ALL', 'PLEASE', 'SPEAK', 'A', 'LOW', 'LOUDER', 'HE', 'HOLLERED', 'DOWN', 'THE', 'CHIMNEY', 'JUST', 'LIKE', 'THAT'] +3997-182399-0016-1795: ref=['YES', 'SAH', 'SHE', "SHO'LY", 'WAS', 'PLUMB', 'SCARED'] +3997-182399-0016-1795: hyp=['YES', 'SAH', 'SHE', 'SURELY', 'WAS', 'PLUM', 'SCARED'] +3997-182399-0017-1796: ref=['THEY', 'LIKE', 'TO', 'CHOKE', 'THAT', 'NO', 'COUNT', 'BUZZARD', 'TO', 'DEATH'] +3997-182399-0017-1796: hyp=['THEY', 'LIKED', 'TO', 'CHOKE', 'THAT', 'NO', 'COMPOSER', 'TO', 'DEATH'] +3997-182399-0018-1797: ref=['WHEN', 'HE', 'GET', 'HOME', 'HE', 'TRY', 'AN', 'TRY', 'TO', 'BRUSH', 'THAT', 'SOOT', 'OFF', 'BUT', 'IT', 'DONE', 'GET', 'INTO', 'THE', 'SKIN', 'AN', 'IT', 'STAY', 'THERE'] +3997-182399-0018-1797: hyp=['WHEN', 'HE', 'GET', 'HOME', "HE'D", 'TRY', 'AND', 'TRIES', 'TO', 'BRUSH', 'US', 'SOOT', 'OFF', 'BUT', 'IT', 'DONE', 'GET', 'INTO', 'THE', 'SKIN', 'AND', "IT'S", 'STAY', 'THERE'] +3997-182399-0019-1798: ref=['A', 'LITTLE', 'SIGH', 'OF', 'SATISFACTION', 'WENT', 'AROUND', 'THE', 'CIRCLE', 'OF', 'LISTENERS'] +3997-182399-0019-1798: hyp=['A', 'LITTLE', 'SIGH', 'OF', 'SATISFACTION', 'WENT', 'ROUND', 'THE', 'CIRCLE', 'OF', 'LISTENERS'] +3997-182399-0020-1799: ref=['IT', 'WAS', 'JUST', 'AS', 'GOOD', 'AS', 'ONE', 'OF', 'GRANDFATHER', "FROG'S"] +3997-182399-0020-1799: hyp=['IT', 'WAS', 'JUST', 'AS', 'GOOD', 'AS', 'ONE', 'OF', 'GRANDFATHER', "FROG'S"] +4198-12259-0000-203: ref=['DRAW', 'REACH', 'FILL', 'MIX', 'GIVE', 'IT', 'ME', 'WITHOUT', 'WATER'] +4198-12259-0000-203: hyp=['DRAW', 'REACH', 'FILL', 'MIX', 'GIVE', 'IT', 'ME', 'WITHOUT', 'WATER'] +4198-12259-0001-204: ref=['SO', 'MY', 'FRIEND', 'SO', 'WHIP', 'ME', 'OFF', 'THIS', 'GLASS', 'NEATLY', 'BRING', 'ME', 'HITHER', 'SOME', 'CLARET', 'A', 'FULL', 'WEEPING', 'GLASS', 'TILL', 'IT', 'RUN', 'OVER'] +4198-12259-0001-204: hyp=['SO', 'MY', 'FRIEND', 'SO', 'WHIP', 'ME', 'OFF', 'THIS', 'GLASS', 'NEATLY', 'BRING', 'ME', 'HITHER', 'SOME', 'CLARE', 'IT', 'A', 'FULL', 'WEEPING', 'GLASS', 'TILL', 'IT', 'RUN', 'OVER'] +4198-12259-0002-205: ref=['A', 'CESSATION', 'AND', 'TRUCE', 'WITH', 'THIRST'] +4198-12259-0002-205: hyp=['A', 'CESSATION', 'AND', 'TRUCE', 'WITH', 'THIRST'] +4198-12259-0003-206: ref=['YOU', 'HAVE', 'CATCHED', 'A', 'COLD', 'GAMMER', 'YEA', 'FORSOOTH', 'SIR'] +4198-12259-0003-206: hyp=['YOU', 'HAVE', 'CAST', 'A', 'COLD', 'GAMMER', 'YEA', 'FORSOOTH', 'SIR'] +4198-12259-0004-207: ref=['BY', 'THE', 'BELLY', 'OF', 'SANCT', 'BUFF', 'LET', 'US', 'TALK', 'OF', 'OUR', 'DRINK', 'I', 'NEVER', 'DRINK', 'BUT', 'AT', 'MY', 'HOURS', 'LIKE', 'THE', "POPE'S", 'MULE'] +4198-12259-0004-207: hyp=['BY', 'THE', 'BELLY', 'OF', 'SAINT', 'BUFF', 'LET', 'US', 'TALK', 'OF', 'OUR', 'DRINK', 'I', 'NEVER', 'DRINK', 'BUT', 'AT', 'MY', 'HOURS', 'LIKE', 'THE', "POPE'S", 'MULE'] +4198-12259-0005-208: ref=['WHICH', 'WAS', 'FIRST', 'THIRST', 'OR', 'DRINKING'] +4198-12259-0005-208: hyp=['WHICH', 'WAS', 'FIRST', 'THOSE', 'TWO', 'DRINKING'] +4198-12259-0006-209: ref=['WHAT', 'IT', 'SEEMS', 'I', 'DO', 'NOT', 'DRINK', 'BUT', 'BY', 'AN', 'ATTORNEY'] +4198-12259-0006-209: hyp=['WHAT', 'IT', 'SEEMS', 'I', 'DO', 'NOT', 'DRINK', 'BUT', 'BUY', 'AN', 'ATTORNEY'] +4198-12259-0007-210: ref=['DRINK', 'ALWAYS', 'AND', 'YOU', 'SHALL', 'NEVER', 'DIE'] +4198-12259-0007-210: hyp=['DRINK', 'ALWAYS', 'AND', 'YOU', 'SHALL', 'NEVER', 'DIE'] +4198-12259-0008-211: ref=['IF', 'I', 'DRINK', 'NOT', 'I', 'AM', 'A', 'GROUND', 'DRY', 'GRAVELLED', 'AND', 'SPENT', 'I', 'AM', 'STARK', 'DEAD', 'WITHOUT', 'DRINK', 'AND', 'MY', 'SOUL', 'READY', 'TO', 'FLY', 'INTO', 'SOME', 'MARSH', 'AMONGST', 'FROGS', 'THE', 'SOUL', 'NEVER', 'DWELLS', 'IN', 'A', 'DRY', 'PLACE', 'DROUTH', 'KILLS', 'IT'] +4198-12259-0008-211: hyp=['IF', 'I', 'DRINK', 'NOT', 'I', 'AM', 'A', 'GROUND', 'DRY', 'GRAVELLED', 'AND', 'SPENT', 'I', 'AM', 'STARK', 'DEAD', 'WITHOUT', 'DRINK', 'AND', 'MY', 'SOUL', 'READY', 'TO', 'FLY', 'INTO', 'SOME', 'MARS', 'A', "MONTH'S", 'FROGS', 'THE', 'SOUL', 'NEVER', 'DWELLS', 'IN', 'A', 'DRY', 'PLACE', 'DROUGHT', 'KILLET', 'IT'] +4198-12259-0009-212: ref=['HE', 'DRINKS', 'IN', 'VAIN', 'THAT', 'FEELS', 'NOT', 'THE', 'PLEASURE', 'OF', 'IT'] +4198-12259-0009-212: hyp=['HE', 'DRINK', 'SO', 'VAIN', 'THAT', 'FILLS', 'NOT', 'THE', 'PLEASURE', 'OF', 'IT'] +4198-12259-0010-213: ref=['IT', 'IS', 'ENOUGH', 'TO', 'BREAK', 'BOTH', 'GIRDS', 'AND', 'PETREL'] +4198-12259-0010-213: hyp=['IT', 'IS', 'ENOUGH', 'TO', 'BREAK', 'BOTH', 'GORGE', 'AND', 'PETAL'] +4198-12259-0011-214: ref=['WHAT', 'DIFFERENCE', 'IS', 'THERE', 'BETWEEN', 'A', 'BOTTLE', 'AND', 'A', 'FLAGON'] +4198-12259-0011-214: hyp=['WHAT', 'DIFFERENCE', 'IS', 'THERE', 'BETWEEN', 'A', 'BOTTLE', 'AND', 'A', 'FLAGON'] +4198-12259-0012-215: ref=['BRAVELY', 'AND', 'WELL', 'PLAYED', 'UPON', 'THE', 'WORDS'] +4198-12259-0012-215: hyp=['BRAVELY', 'AND', 'WELL', 'PLAYED', 'UPON', 'THE', 'WORDS'] +4198-12259-0013-216: ref=['OUR', 'FATHERS', 'DRANK', 'LUSTILY', 'AND', 'EMPTIED', 'THEIR', 'CANS'] +4198-12259-0013-216: hyp=['OUR', 'FATHERS', 'DRANK', 'LUSTILY', 'AND', 'EMPTIED', 'THEIR', 'CANES'] +4198-12259-0014-217: ref=['WELL', 'CACKED', 'WELL', 'SUNG'] +4198-12259-0014-217: hyp=['WELL', 'CAGLE', 'WELL', 'SUNG'] +4198-12259-0015-218: ref=['COME', 'LET', 'US', 'DRINK', 'WILL', 'YOU', 'SEND', 'NOTHING', 'TO', 'THE', 'RIVER'] +4198-12259-0015-218: hyp=['COME', 'LET', 'US', 'DRINK', 'WILL', 'YOU', 'SEND', 'NOTHING', 'TO', 'THE', 'RIVER'] +4198-12259-0016-219: ref=['I', 'DRINK', 'NO', 'MORE', 'THAN', 'A', 'SPONGE'] +4198-12259-0016-219: hyp=['I', 'DRINK', 'NO', 'MORE', 'THAN', 'HIS', 'SPINES'] +4198-12259-0017-220: ref=['I', 'DRINK', 'LIKE', 'A', 'TEMPLAR', 'KNIGHT'] +4198-12259-0017-220: hyp=['I', 'DRINK', 'LIKE', 'A', 'TEMPLAR', 'NIGHT'] +4198-12259-0018-221: ref=['AND', 'I', 'TANQUAM', 'SPONSUS'] +4198-12259-0018-221: hyp=['AND', 'I', 'TANK', 'QUON', 'SPONSES'] +4198-12259-0019-222: ref=['AND', 'I', 'SICUT', 'TERRA', 'SINE', 'AQUA'] +4198-12259-0019-222: hyp=['AND', 'I', 'CICEROSINAQUA'] +4198-12259-0020-223: ref=['GIVE', 'ME', 'A', 'SYNONYMON', 'FOR', 'A', 'GAMMON', 'OF', 'BACON'] +4198-12259-0020-223: hyp=['GIVE', 'ME', 'A', 'SINNING', 'FOR', 'A', 'GAMIN', 'OF', 'BACON'] +4198-12259-0021-224: ref=['IT', 'IS', 'THE', 'COMPULSORY', 'OF', 'DRINKERS', 'IT', 'IS', 'A', 'PULLEY'] +4198-12259-0021-224: hyp=['IT', 'IS', 'THE', 'COMPULSORY', 'OF', 'DRAKERS', 'IT', 'IS', 'A', 'PULLEY'] +4198-12259-0022-225: ref=['A', 'LITTLE', 'RAIN', 'ALLAYS', 'A', 'GREAT', 'DEAL', 'OF', 'WIND', 'LONG', 'TIPPLING', 'BREAKS', 'THE', 'THUNDER'] +4198-12259-0022-225: hyp=['A', 'LITTLE', 'RAIN', 'ALLAYS', 'A', 'GREAT', 'DEAL', 'OF', 'WIND', 'LONG', 'TIPPLING', 'BREAKS', 'THE', 'THUNDER'] +4198-12259-0023-226: ref=['BUT', 'IF', 'THERE', 'CAME', 'SUCH', 'LIQUOR', 'FROM', 'MY', 'BALLOCK', 'WOULD', 'YOU', 'NOT', 'WILLINGLY', 'THEREAFTER', 'SUCK', 'THE', 'UDDER', 'WHENCE', 'IT', 'ISSUED'] +4198-12259-0023-226: hyp=['BUT', 'IF', 'THERE', 'CAME', 'SUCH', 'LIQUOR', 'FOR', 'MY', 'BALLOCK', 'WERE', 'YOU', 'NOT', 'WILLINGLY', 'THEREAFTER', 'SUCK', 'THE', 'UTTER', 'WHENCE', 'IT', 'ISSUED'] +4198-12259-0024-227: ref=['HERE', 'PAGE', 'FILL'] +4198-12259-0024-227: hyp=['HERE', 'PAGE', 'FILL'] +4198-12259-0025-228: ref=['I', 'APPEAL', 'FROM', 'THIRST', 'AND', 'DISCLAIM', 'ITS', 'JURISDICTION'] +4198-12259-0025-228: hyp=['I', 'APPEAL', 'FROM', 'THIRST', 'AND', 'DISCLAIM', 'ITS', 'JURISDICTION'] +4198-12259-0026-229: ref=['I', 'WAS', 'WONT', 'HERETOFORE', 'TO', 'DRINK', 'OUT', 'ALL', 'BUT', 'NOW', 'I', 'LEAVE', 'NOTHING'] +4198-12259-0026-229: hyp=['I', 'WAS', 'WONT', 'HERE', 'TO', 'FOR', 'TO', 'DRINK', 'OUT', 'ALL', 'BUT', 'NOW', 'I', 'LEAVE', 'NOTHING'] +4198-12259-0027-230: ref=['HEYDAY', 'HERE', 'ARE', 'TRIPES', 'FIT', 'FOR', 'OUR', 'SPORT', 'AND', 'IN', 'EARNEST', 'EXCELLENT', 'GODEBILLIOS', 'OF', 'THE', 'DUN', 'OX', 'YOU', 'KNOW', 'WITH', 'THE', 'BLACK', 'STREAK'] +4198-12259-0027-230: hyp=['HAY', 'THEE', 'HERE', 'A', "TRIPE'S", 'FIT', 'FOR', 'OUR', 'SPORT', 'AND', 'IN', 'EARNEST', 'EXCELLENT', 'GO', 'TO', 'BEHOLS', 'OF', 'THE', 'DUN', 'AX', 'YOU', 'KNOW', 'WITH', 'THE', 'BLACK', 'STREET'] +4198-12259-0028-231: ref=['O', 'FOR', "GOD'S", 'SAKE', 'LET', 'US', 'LASH', 'THEM', 'SOUNDLY', 'YET', 'THRIFTILY'] +4198-12259-0028-231: hyp=['OH', 'FOR', "GOD'S", 'SAKE', 'LET', 'US', 'LAST', 'THEM', 'SOUNDLY', 'YET', 'THRIFTILY'] +4198-12259-0029-232: ref=['SPARROWS', 'WILL', 'NOT', 'EAT', 'UNLESS', 'YOU', 'BOB', 'THEM', 'ON', 'THE', 'TAIL', 'NOR', 'CAN', 'I', 'DRINK', 'IF', 'I', 'BE', 'NOT', 'FAIRLY', 'SPOKE', 'TO'] +4198-12259-0029-232: hyp=['SPARROWS', 'WHEN', 'I', 'EAT', 'UNLESS', 'YOU', 'BOB', 'THEM', 'ON', 'THE', 'TAIL', 'NOR', 'CAN', 'I', 'DRINK', 'IF', 'I', 'BE', 'NOT', 'FAIRLY', 'SPOKE', 'TO'] +4198-12259-0030-233: ref=['HO', 'THIS', 'WILL', 'BANG', 'IT', 'SOUNDLY'] +4198-12259-0030-233: hyp=['OH', 'THIS', 'WAS', "BENITT'S", 'ONLY'] +4198-12259-0031-234: ref=['BUT', 'THIS', 'SHALL', 'BANISH', 'IT', 'UTTERLY'] +4198-12259-0031-234: hyp=['BUT', 'THIS', 'I', 'BANISH', 'THE', 'UTTERLY'] +4198-12259-0032-235: ref=['LET', 'US', 'WIND', 'OUR', 'HORNS', 'BY', 'THE', 'SOUND', 'OF', 'FLAGONS', 'AND', 'BOTTLES', 'AND', 'CRY', 'ALOUD', 'THAT', 'WHOEVER', 'HATH', 'LOST', 'HIS', 'THIRST', 'COME', 'NOT', 'HITHER', 'TO', 'SEEK', 'IT'] +4198-12259-0032-235: hyp=['LET', 'US', 'WIND', 'OUR', 'HORNS', 'BY', 'THE', 'SOUND', 'OF', 'FLAGONS', 'AND', 'BIDLES', 'AND', 'CRY', 'ALOUD', 'THERE', 'WHOEVER', 'HATH', 'LOST', 'HIS', 'THIRST', 'COME', 'NIGH', 'HITHER', 'TO', 'SEEK', 'IT'] +4198-12259-0033-236: ref=['THE', 'GREAT', 'GOD', 'MADE', 'THE', 'PLANETS', 'AND', 'WE', 'MAKE', 'THE', 'PLATTERS', 'NEAT'] +4198-12259-0033-236: hyp=['THE', 'GREAT', 'GOD', 'MADE', 'THE', 'PLANETS', 'AND', 'WE', 'MAKE', 'THE', 'PLATTERS', 'NEAT'] +4198-12259-0034-237: ref=['APPETITE', 'COMES', 'WITH', 'EATING', 'SAYS', 'ANGESTON', 'BUT', 'THE', 'THIRST', 'GOES', 'AWAY', 'WITH', 'DRINKING'] +4198-12259-0034-237: hyp=['APPETITE', 'COMBED', 'WITH', 'EATING', 'SAYS', 'ANGER', 'SN', 'BUT', 'THE', 'THIRST', 'GOES', 'AWAY', 'WITH', 'DRINKING'] +4198-12259-0035-238: ref=['I', 'HAVE', 'A', 'REMEDY', 'AGAINST', 'THIRST', 'QUITE', 'CONTRARY', 'TO', 'THAT', 'WHICH', 'IS', 'GOOD', 'AGAINST', 'THE', 'BITING', 'OF', 'A', 'MAD', 'DOG'] +4198-12259-0035-238: hyp=['I', 'HAVE', 'A', 'REMEDY', 'AGAINST', 'THIRST', 'QUITE', 'CONTRARY', 'TO', 'THAT', 'WHICH', 'IS', 'GOOD', 'AGAINST', 'ABIDING', 'OF', 'A', 'MAN', 'DOLE'] +4198-12259-0036-239: ref=['WHITE', 'WINE', 'HERE', 'WINE', 'BOYS'] +4198-12259-0036-239: hyp=['WHY', 'HERE', 'WHY', 'BOYS'] +4198-12259-0037-240: ref=['O', 'LACHRYMA', 'CHRISTI', 'IT', 'IS', 'OF', 'THE', 'BEST', 'GRAPE'] +4198-12259-0037-240: hyp=['O', 'LACK', 'ROOM', 'I', 'CHRISTI', 'IT', 'IS', 'OF', 'THE', 'BEST', 'GRAPE'] +4198-12259-0038-241: ref=["I'FAITH", 'PURE', 'GREEK', 'GREEK', 'O', 'THE', 'FINE', 'WHITE', 'WINE'] +4198-12259-0038-241: hyp=['I', 'FAITH', 'PURE', 'GREEK', 'GREEK', 'O', 'THE', 'FINE', 'WHITE', 'WINE'] +4198-12259-0039-242: ref=['THERE', 'IS', 'NO', 'ENCHANTMENT', 'NOR', 'CHARM', 'THERE', 'EVERY', 'ONE', 'OF', 'YOU', 'HATH', 'SEEN', 'IT'] +4198-12259-0039-242: hyp=['THERE', 'IS', 'NO', 'ENCHANTMENT', 'NOR', 'CHARM', 'THERE', 'EVERY', 'ONE', 'OF', 'YOU', 'HATH', 'SEEN', 'IT'] +4198-12259-0040-243: ref=['MY', 'PRENTICESHIP', 'IS', 'OUT', 'I', 'AM', 'A', 'FREE', 'MAN', 'AT', 'THIS', 'TRADE'] +4198-12259-0040-243: hyp=['MY', 'PREDICUP', 'IS', 'OUT', "I'M", 'A', 'FREE', 'MAN', 'AT', 'THIS', 'TRADE'] +4198-12259-0041-244: ref=['I', 'SHOULD', 'SAY', 'MASTER', 'PAST'] +4198-12259-0041-244: hyp=['AS', 'YOU', 'SEE', 'MASTER', 'PASS'] +4198-12259-0042-245: ref=['O', 'THE', 'DRINKERS', 'THOSE', 'THAT', 'ARE', 'A', 'DRY', 'O', 'POOR', 'THIRSTY', 'SOULS'] +4198-12259-0042-245: hyp=['OH', 'THE', 'DRAKE', 'IS', 'THOSE', 'THAT', 'ARE', 'DRY', 'O', 'PORT', 'THIRSTY', 'SOULS'] +4198-12259-0043-246: ref=['CLEAR', 'OFF', 'NEAT', 'SUPERNACULUM'] +4198-12259-0043-246: hyp=['CLEAR', 'OFF', 'MEAT', 'SUPERNACULUM'] +4198-12281-0000-187: ref=['ALTHOUGH', 'THE', 'PLAGUE', 'WAS', 'THERE', 'IN', 'THE', 'MOST', 'PART', 'OF', 'ALL', 'THE', 'HOUSES', 'THEY', 'NEVERTHELESS', 'ENTERED', 'EVERYWHERE', 'THEN', 'PLUNDERED', 'AND', 'CARRIED', 'AWAY', 'ALL', 'THAT', 'WAS', 'WITHIN', 'AND', 'YET', 'FOR', 'ALL', 'THIS', 'NOT', 'ONE', 'OF', 'THEM', 'TOOK', 'ANY', 'HURT', 'WHICH', 'IS', 'A', 'MOST', 'WONDERFUL', 'CASE'] +4198-12281-0000-187: hyp=['ALTHOUGH', 'THE', 'PLAGUE', 'WAS', 'THERE', 'IN', 'THE', 'MOST', 'PART', 'OF', 'ALL', 'THE', 'HOUSES', 'THEY', 'NEVERTHELESS', 'ENTERED', 'EVERYWHERE', 'THEN', 'PLUNDERED', 'AND', 'CARRIED', 'AWAY', 'ALL', 'THAT', 'WAS', 'WITHIN', 'AND', 'YET', 'FOR', 'ALL', 'THIS', 'NOT', 'ONE', 'OF', 'THEM', 'TOOK', 'ANY', 'HURT', 'WHICH', 'IS', 'A', 'MOST', 'WONDERFUL', 'CASE'] +4198-12281-0001-188: ref=['I', 'BESEECH', 'YOU', 'THINK', 'UPON', 'IT'] +4198-12281-0001-188: hyp=['I', 'BESEECH', 'YOU', 'THINK', 'UPON', 'IT'] +4198-12281-0002-189: ref=['NEVERTHELESS', 'AT', 'ALL', 'ADVENTURES', 'THEY', 'RANG', 'THE', 'BELLS', 'AD', 'CAPITULUM', 'CAPITULANTES'] +4198-12281-0002-189: hyp=['NEVERTHELESS', 'AT', 'ALL', 'VENTURES', 'THEY', 'RANG', 'THE', 'BELLS', 'AT', 'CAPITULUM', 'CAPITULAT', 'DAYS'] +4198-12281-0003-190: ref=['BY', 'THE', 'VIRTUE', 'OF', 'GOD', 'WHY', 'DO', 'NOT', 'YOU', 'SING', 'PANNIERS', 'FAREWELL', 'VINTAGE', 'IS', 'DONE'] +4198-12281-0003-190: hyp=['BY', 'THE', 'VIRTUE', 'OF', 'GOD', 'WHY', 'DO', 'NOT', 'YOU', 'SING', 'PANNIERS', 'FAREWELL', 'VENTAGE', 'IS', 'NONE'] +4198-12281-0004-191: ref=['BY', 'THE', 'BELLY', 'OF', 'SANCT', 'JAMES', 'WHAT', 'SHALL', 'WE', 'POOR', 'DEVILS', 'DRINK', 'THE', 'WHILE'] +4198-12281-0004-191: hyp=['BY', 'THE', 'BELLY', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SHALL', 'WE', 'POOR', 'DEVILS', 'DRINK', 'THE', 'WHILE'] +4198-12281-0005-192: ref=['LORD', 'GOD', 'DA', 'MIHI', 'POTUM'] +4198-12281-0005-192: hyp=['LORD', 'GOD', 'DALMY', 'HE', 'POT', 'EM'] +4198-12281-0006-193: ref=['LET', 'HIM', 'BE', 'CARRIED', 'TO', 'PRISON', 'FOR', 'TROUBLING', 'THE', 'DIVINE', 'SERVICE'] +4198-12281-0006-193: hyp=['LET', 'HIM', 'BE', 'CARRIED', 'THE', 'PRISON', 'FOR', 'TROUBLING', 'THE', 'DIVINE', 'SERVICE'] +4198-12281-0007-194: ref=['WHEREFORE', 'IS', 'IT', 'THAT', 'OUR', 'DEVOTIONS', 'WERE', 'INSTITUTED', 'TO', 'BE', 'SHORT', 'IN', 'THE', 'TIME', 'OF', 'HARVEST', 'AND', 'VINTAGE', 'AND', 'LONG', 'IN', 'THE', 'ADVENT', 'AND', 'ALL', 'THE', 'WINTER'] +4198-12281-0007-194: hyp=['WHEREFORE', 'IS', 'IT', 'THAT', 'OUR', 'DEVOTIONS', 'WERE', 'INSTITUTED', 'TO', 'BE', 'SHORT', 'IN', 'THE', 'TIME', 'OF', 'HARVEST', 'AND', 'VINTAGE', 'AND', 'LONG', 'IN', 'ADVENT', 'IN', 'ALL', 'THE', 'WINTER'] +4198-12281-0008-195: ref=['HARK', 'YOU', 'MY', 'MASTERS', 'YOU', 'THAT', 'LOVE', 'THE', 'WINE', "COP'S", 'BODY', 'FOLLOW', 'ME', 'FOR', 'SANCT', 'ANTHONY', 'BURN', 'ME', 'AS', 'FREELY', 'AS', 'A', 'FAGGOT', 'IF', 'THEY', 'GET', 'LEAVE', 'TO', 'TASTE', 'ONE', 'DROP', 'OF', 'THE', 'LIQUOR', 'THAT', 'WILL', 'NOT', 'NOW', 'COME', 'AND', 'FIGHT', 'FOR', 'RELIEF', 'OF', 'THE', 'VINE'] +4198-12281-0008-195: hyp=['ARE', 'YOU', 'MY', 'MASTERS', 'YOU', 'THAT', 'LOVE', 'THEM', 'WHY', "COP'S", 'BODY', 'FOLLOW', 'ME', 'FOR', 'SAINT', 'ANTHONY', 'BURN', 'ME', 'AS', 'FREELY', 'AS', 'A', 'FAGGOT', 'THEY', 'GET', 'LEAVE', 'TO', 'TASTE', 'ONE', 'DROP', 'OF', 'THE', 'LIQUOR', 'THAT', 'WOULD', 'NOT', 'NOW', 'COME', 'AND', 'FIGHT', 'FOR', 'RELIEF', 'OF', 'THE', 'VINE'] +4198-12281-0009-196: ref=['TO', 'OTHERS', 'AGAIN', 'HE', 'UNJOINTED', 'THE', 'SPONDYLES', 'OR', 'KNUCKLES', 'OF', 'THE', 'NECK', 'DISFIGURED', 'THEIR', 'CHAPS', 'GASHED', 'THEIR', 'FACES', 'MADE', 'THEIR', 'CHEEKS', 'HANG', 'FLAPPING', 'ON', 'THEIR', 'CHIN', 'AND', 'SO', 'SWINGED', 'AND', 'BALAMMED', 'THEM', 'THAT', 'THEY', 'FELL', 'DOWN', 'BEFORE', 'HIM', 'LIKE', 'HAY', 'BEFORE', 'A', 'MOWER'] +4198-12281-0009-196: hyp=['TO', 'OTHERS', 'AGAIN', 'HE', 'UNJOINTED', 'THE', 'SPONGEALS', 'OR', 'KNUCKLES', 'OF', 'THE', 'NECK', 'THIS', 'FIGURED', 'THEIR', 'CHAPS', 'GASH', 'THEIR', 'FACES', 'MADE', 'THEIR', 'CHEEKS', 'HANG', 'FLAPPING', 'ON', 'THEIR', 'CHIN', 'AND', 'SO', 'SWINGED', 'AND', 'BELLAMED', 'THEM', 'THAT', 'THEY', 'FELL', 'DOWN', 'BEFORE', 'HIM', 'LIKE', 'HAY', 'BEFORE', 'HIM', 'OVER'] +4198-12281-0010-197: ref=['TO', 'SOME', 'WITH', 'A', 'SMART', 'SOUSE', 'ON', 'THE', 'EPIGASTER', 'HE', 'WOULD', 'MAKE', 'THEIR', 'MIDRIFF', 'SWAG', 'THEN', 'REDOUBLING', 'THE', 'BLOW', 'GAVE', 'THEM', 'SUCH', 'A', 'HOMEPUSH', 'ON', 'THE', 'NAVEL', 'THAT', 'HE', 'MADE', 'THEIR', 'PUDDINGS', 'TO', 'GUSH', 'OUT'] +4198-12281-0010-197: hyp=['TO', 'SOME', 'WOULD', 'THEY', 'SMART', 'SOUS', 'ON', 'THEIR', 'EPIGASTER', 'HE', 'WOULD', 'MAKE', 'THEIR', 'MIDRIFTS', 'WAG', 'THEN', 'REDOUBLING', 'THE', 'BLOW', 'GAVE', 'THEM', 'SUCH', 'A', 'HOME', 'PUSH', 'ON', 'THE', 'NAVEL', 'THAT', 'HE', 'MADE', 'THEIR', 'PUDDINGS', 'TO', 'GUSH', 'OUT'] +4198-12281-0011-198: ref=['BELIEVE', 'THAT', 'IT', 'WAS', 'THE', 'MOST', 'HORRIBLE', 'SPECTACLE', 'THAT', 'EVER', 'ONE', 'SAW'] +4198-12281-0011-198: hyp=['BELIEVE', 'THEN', 'IT', 'WAS', 'THE', 'MOST', 'HORRIBLE', 'SPECTACLE', 'THAT', 'EVER', 'ONE', 'SAW'] +4198-12281-0012-199: ref=['O', 'THE', 'HOLY', 'LADY', 'NYTOUCH', 'SAID', 'ONE', 'THE', 'GOOD', 'SANCTESS', 'O', 'OUR', 'LADY', 'OF', 'SUCCOURS', 'SAID', 'ANOTHER', 'HELP', 'HELP'] +4198-12281-0012-199: hyp=['ALL', 'THE', 'HOLY', 'LADY', 'KNIGHTSAGE', 'SAID', 'ONE', 'THE', 'GOOD', 'SANCTUS', 'O', 'OUR', 'LADY', 'OFURUS', 'SAID', 'ANOTHER', 'HELP', 'HELP'] +4198-12281-0013-200: ref=['SOME', 'DIED', 'WITHOUT', 'SPEAKING', 'OTHERS', 'SPOKE', 'WITHOUT', 'DYING', 'SOME', 'DIED', 'IN', 'SPEAKING', 'OTHERS', 'SPOKE', 'IN', 'DYING'] +4198-12281-0013-200: hyp=['SOME', 'DIED', 'WITHOUT', 'SPEAKING', 'OTHERS', 'SPOKE', 'WITHOUT', 'DYING', 'SOME', 'DIED', 'IN', 'SPEAKING', 'OTHERS', 'SPOKE', 'AND', 'DYING'] +4198-12281-0014-201: ref=['CAN', 'YOU', 'TELL', 'WITH', 'WHAT', 'INSTRUMENTS', 'THEY', 'DID', 'IT'] +4198-12281-0014-201: hyp=['CAN', 'YOU', 'TELL', 'WITH', 'WHAT', 'INSTRUMENTS', 'THEY', 'DID', 'IT'] +4198-12281-0015-202: ref=['IN', 'THE', 'MEANTIME', 'FRIAR', 'JOHN', 'WITH', 'HIS', 'FORMIDABLE', 'BATON', 'OF', 'THE', 'CROSS', 'GOT', 'TO', 'THE', 'BREACH', 'WHICH', 'THE', 'ENEMIES', 'HAD', 'MADE', 'AND', 'THERE', 'STOOD', 'TO', 'SNATCH', 'UP', 'THOSE', 'THAT', 'ENDEAVOURED', 'TO', 'ESCAPE'] +4198-12281-0015-202: hyp=['IN', 'THE', 'MEANTIME', 'FRY', 'JOHN', 'WITH', 'HIS', 'FORMIDABLE', 'BATON', 'OF', 'THE', 'CROSS', 'GOT', 'TO', 'THE', 'BREACH', 'WHICH', 'THE', 'ENEMIES', 'HAD', 'MADE', 'AND', 'THERE', 'STOOD', 'TO', 'SNATCH', 'UP', 'THOSE', 'THAT', 'ENDEAVOURED', 'TO', 'ESCAPE'] +4198-61336-0000-247: ref=['IT', 'IS', 'SIGNIFICANT', 'TO', 'NOTE', 'IN', 'THIS', 'CONNECTION', 'THAT', 'THE', 'NEW', 'KING', 'WAS', 'AN', 'UNSWERVING', 'ADHERENT', 'OF', 'THE', 'CULT', 'OF', 'ASHUR', 'BY', 'THE', 'ADHERENTS', 'OF', 'WHICH', 'HE', 'WAS', 'PROBABLY', 'STRONGLY', 'SUPPORTED'] +4198-61336-0000-247: hyp=['IT', 'IS', 'SIGNIFICANT', 'TO', 'NOTE', 'IN', 'THIS', 'CONNECTION', 'THAT', 'THE', 'NEW', 'KING', 'WAS', 'AN', 'UNSWERVING', 'ADHERENT', 'OF', 'THE', 'CULT', 'OF', 'ASHER', 'BY', 'THE', 'ADHERENCE', 'OF', 'WHICH', 'HE', 'WAS', 'PROBABLY', 'STRONGLY', 'SUPPORTED'] +4198-61336-0001-248: ref=['AT', 'THE', 'BEGINNING', 'OF', 'HIS', 'REIGN', 'THERE', 'WAS', 'MUCH', 'SOCIAL', 'DISCONTENT', 'AND', 'SUFFERING'] +4198-61336-0001-248: hyp=['AT', 'THE', 'BEGINNING', 'OF', 'HIS', 'REIGN', 'THERE', 'WAS', 'MUCH', 'SOCIAL', 'DISCONTENT', 'AND', 'SUFFERING'] +4198-61336-0002-249: ref=['WELL', 'MIGHT', 'SHARDURIS', 'EXCLAIM', 'IN', 'THE', 'WORDS', 'OF', 'THE', 'PROPHET', 'WHERE', 'IS', 'THE', 'KING', 'OF', 'ARPAD'] +4198-61336-0002-249: hyp=['WELL', 'MIGHT', 'YOURIS', 'EXCLAIM', 'IN', 'THE', 'WORDS', 'OF', 'THE', 'PROPHET', 'WHERE', 'IS', 'THE', 'KING', 'OF', 'ARPET'] +4198-61336-0003-250: ref=['TIGLATH', 'PILESER', 'HOWEVER', 'CROSSED', 'THE', 'EUPHRATES', 'AND', 'MOVING', 'NORTHWARD', 'DELIVERED', 'AN', 'UNEXPECTED', 'ATTACK', 'ON', 'THE', 'URARTIAN', 'ARMY', 'IN', 'QUMMUKH'] +4198-61336-0003-250: hyp=['TIGG', 'LAUGHED', 'BELLEZER', 'HOWEVER', 'CROSSED', 'THE', 'EUPHADIS', 'AND', 'MOVING', 'NORTHWARD', 'DELIVERED', 'AN', 'UNEXPECTED', 'ATTACK', 'ON', 'THE', 'GERGIAN', 'ARMY', 'AND', 'KUMAK'] +4198-61336-0004-251: ref=['A', 'FIERCE', 'BATTLE', 'ENSUED', 'AND', 'ONE', 'OF', 'ITS', 'DRAMATIC', 'INCIDENTS', 'WAS', 'A', 'SINGLE', 'COMBAT', 'BETWEEN', 'THE', 'RIVAL', 'KINGS'] +4198-61336-0004-251: hyp=['A', 'FIERCE', 'BATTLE', 'ENSUED', 'AND', 'ONE', 'OF', 'HIS', 'DRAMATIC', 'INCIDENTS', 'WAS', 'A', 'SINGLE', 'COMBAT', 'BETWEEN', 'THE', 'RIVAL', 'KINGS'] +4198-61336-0005-252: ref=['AN', 'ATTEMPT', 'WAS', 'MADE', 'TO', 'CAPTURE', 'KING', 'SHARDURIS', 'WHO', 'LEAPT', 'FROM', 'HIS', 'CHARIOT', 'AND', 'MADE', 'HASTY', 'ESCAPE', 'ON', 'HORSEBACK', 'HOTLY', 'PURSUED', 'IN', 'THE', 'GATHERING', 'DARKNESS', 'BY', 'AN', 'ASSYRIAN', 'CONTINGENT', 'OF', 'CAVALRY'] +4198-61336-0005-252: hyp=['AN', 'ATTEMPT', 'WAS', 'MADE', 'TO', 'CAPTURE', 'KING', 'CHARS', 'WHO', 'LEAPT', 'FROM', 'HIS', 'CHARIOT', 'AND', 'MADE', 'HASTY', 'ESCAPE', 'ON', 'HORSEBACK', 'HOTLY', 'PURSUED', 'IN', 'THE', 'GATHERING', 'DARKNESS', 'BY', 'AN', 'ASSYRIAN', 'CONTENDENT', 'OF', 'CAVALRY'] +4198-61336-0006-253: ref=['DESPITE', 'THE', 'BLOW', 'DEALT', 'AGAINST', 'URARTU', 'ASSYRIA', 'DID', 'NOT', 'IMMEDIATELY', 'REGAIN', 'POSSESSION', 'OF', 'NORTH', 'SYRIA'] +4198-61336-0006-253: hyp=['DESPITE', 'THE', 'BLOW', 'DEALT', 'AGAINST', 'YOU', 'ARE', 'TO', 'ASSYRIA', 'DID', 'NOT', 'IMMEDIATELY', 'REGAIN', 'POSSESSION', 'OF', 'NORTH', 'SYRIA'] +4198-61336-0007-254: ref=['THE', 'SHIFTY', 'MATI', 'ILU', 'EITHER', 'CHERISHED', 'THE', 'HOPE', 'THAT', 'SHARDURIS', 'WOULD', 'RECOVER', 'STRENGTH', 'AND', 'AGAIN', 'INVADE', 'NORTH', 'SYRIA', 'OR', 'THAT', 'HE', 'MIGHT', 'HIMSELF', 'ESTABLISH', 'AN', 'EMPIRE', 'IN', 'THAT', 'REGION'] +4198-61336-0007-254: hyp=['THE', 'SHIFTY', 'MANTIL', 'EITHER', 'CHERISH', 'THE', 'HOPE', 'THAT', 'SHALL', 'DORIS', 'WOULD', 'RECOVER', 'STRENGTH', 'AND', 'AGAIN', 'INVADE', 'NORTH', 'SYRIA', 'OR', 'THAT', 'HE', 'MIGHT', 'HIMSELF', 'ESTABLISH', 'AN', 'EMPIRE', 'IN', 'THAT', 'REGION'] +4198-61336-0008-255: ref=['TIGLATH', 'PILESER', 'HAD', 'THEREFORE', 'TO', 'MARCH', 'WESTWARD', 'AGAIN'] +4198-61336-0008-255: hyp=['TIG', 'LASS', 'BELIEU', 'HAD', 'THEREFORE', 'TO', 'MARCH', 'WESTWARD', 'AGAIN'] +4198-61336-0009-256: ref=['FOR', 'THREE', 'YEARS', 'HE', 'CONDUCTED', 'VIGOROUS', 'CAMPAIGNS', 'IN', 'THE', 'WESTERN', 'LAND', 'WHERE', 'HE', 'MET', 'WITH', 'VIGOROUS', 'RESISTANCE'] +4198-61336-0009-256: hyp=['FOR', 'THREE', 'YEARS', 'HE', 'CONDUCTED', 'VIGOROUS', 'CAMPAIGNS', 'IN', 'THE', 'WESTERN', 'LAND', 'WHERE', 'HE', 'MET', 'WITH', 'VIGOROUS', 'RESISTANCE'] +4198-61336-0010-257: ref=['ARPAD', 'WAS', 'CAPTURED', 'AND', 'MATI', 'ILU', 'DEPOSED', 'AND', 'PROBABLY', 'PUT', 'TO', 'DEATH'] +4198-61336-0010-257: hyp=['OUR', 'PAD', 'WAS', 'CAPTURED', 'AND', 'MEANT', 'TO', 'ILL', 'YOU', 'DEPOSED', 'AND', 'PROBABLY', 'PUT', 'TO', 'DEATH'] +4198-61336-0011-258: ref=['ONCE', 'AGAIN', 'THE', 'HEBREWS', 'CAME', 'INTO', 'CONTACT', 'WITH', 'ASSYRIA'] +4198-61336-0011-258: hyp=['ONCE', 'AGAIN', 'THE', 'HEBREWS', 'CAME', 'INTO', 'CONTACT', 'WITH', 'THE', 'ZERIA'] +4198-61336-0012-259: ref=['ITS', 'FALL', 'MAY', 'NOT', 'HAVE', 'BEEN', 'UNCONNECTED', 'WITH', 'THE', 'TREND', 'OF', 'EVENTS', 'IN', 'ASSYRIA', 'DURING', 'THE', 'CLOSING', 'YEARS', 'OF', 'THE', 'MIDDLE', 'EMPIRE'] +4198-61336-0012-259: hyp=["IT'S", 'FOR', 'ME', 'NOT', 'HAVE', 'BEEN', 'UNCONNECTED', 'WITH', 'THE', 'TREND', 'OF', 'EVENTS', 'IN', 'A', 'SYRIA', 'DURING', 'THE', 'CLOSING', 'YEARS', 'OF', 'THE', 'MIDDLE', 'EMPIRE'] +4198-61336-0013-260: ref=['JEHOASH', 'THE', 'GRANDSON', 'OF', 'JEHU', 'HAD', 'ACHIEVED', 'SUCCESSES', 'IN', 'CONFLICT', 'WITH', 'DAMASCUS'] +4198-61336-0013-260: hyp=['JOESH', 'THE', 'GRANDSON', 'OF', 'JEHU', 'HAD', 'ACHIEVED', 'SUCCESSES', 'IN', 'CONFLICT', 'WITH', 'DAMASCUS'] +4198-61336-0014-261: ref=['SIX', 'MONTHS', 'AFTERWARDS', 'HE', 'WAS', 'ASSASSINATED', 'BY', 'SHALLUM'] +4198-61336-0014-261: hyp=['SIX', 'MONTHS', 'AFTERWARD', 'HE', 'WAS', 'ASSASSINATED', 'BY', 'CHARLEM'] +4198-61336-0015-262: ref=['THIS', 'USURPER', 'HELD', 'SWAY', 'AT', 'SAMARIA', 'FOR', 'ONLY', 'A', 'MONTH'] +4198-61336-0015-262: hyp=['THIS', 'USURPER', 'HELDS', 'WEIGH', 'AT', 'SAMARIA', 'FOR', 'ONLY', 'A', 'MONTH'] +4198-61336-0016-263: ref=['NO', 'RESISTANCE', 'WAS', 'POSSIBLE', 'ON', 'THE', 'PART', 'OF', 'MENAHEM', 'THE', 'USURPER', 'WHO', 'WAS', 'PROBABLY', 'READY', 'TO', 'WELCOME', 'THE', 'ASSYRIAN', 'CONQUEROR', 'SO', 'THAT', 'BY', 'ARRANGING', 'AN', 'ALLIANCE', 'HE', 'MIGHT', 'SECURE', 'HIS', 'OWN', 'POSITION'] +4198-61336-0016-263: hyp=['NO', 'RESISTANCE', 'WAS', 'POSSIBLE', 'ON', 'THE', 'PART', 'OF', 'MANY', 'HIM', 'THE', 'USURPER', 'WHO', 'WAS', 'PROBABLY', 'READY', 'TO', 'WELCOME', 'THE', 'ASSYRIAN', 'CONQUEROR', 'SO', 'THAT', 'BY', 'ARRANGING', 'AN', 'ALLIANCE', 'HE', 'MIGHT', 'SECURE', 'HIS', 'OWN', 'POSITION'] +4198-61336-0017-264: ref=['TIGLATH', 'PILESER', 'NEXT', 'OPERATED', 'AGAINST', 'THE', 'MEDIAN', 'AND', 'OTHER', 'HILL', 'TRIBES', 'IN', 'THE', 'NORTH', 'EAST'] +4198-61336-0017-264: hyp=['TAKE', 'THAT', 'PLEASURE', 'NEXT', 'OPERATED', 'AGAINST', 'THE', 'MEDIAN', 'AND', 'OTHER', 'HILL', 'TRIBES', 'IN', 'THE', 'NORTHEAST'] +4198-61336-0018-265: ref=['HE', 'OVERTHREW', 'BUILDINGS', 'DESTROYED', 'ORCHARDS', 'AND', 'TRANSPORTED', 'TO', 'NINEVEH', 'THOSE', 'OF', 'THE', 'INHABITANTS', 'HE', 'HAD', 'NOT', 'PUT', 'TO', 'THE', 'SWORD', 'WITH', 'ALL', 'THE', 'LIVE', 'STOCK', 'HE', 'COULD', 'LAY', 'HANDS', 'ON'] +4198-61336-0018-265: hyp=['HE', 'OVERTHREW', 'BUILDINGS', 'DESTROYED', 'ORCHARDS', 'AND', 'TRANSPORTED', 'TO', 'NINEVEH', 'THOSE', 'OF', 'THE', 'INHABITANTS', 'HE', 'HAD', 'NOT', 'PUT', 'TO', 'THIS', 'WOOD', 'WITH', 'ALL', 'THE', 'LIVE', 'STOCK', 'HE', 'COULD', 'LAY', 'HANDS', 'ON'] +4198-61336-0019-266: ref=['THUS', 'WAS', 'URARTU', 'CRIPPLED', 'AND', 'HUMILIATED', 'IT', 'NEVER', 'REGAINED', 'ITS', 'FORMER', 'PRESTIGE', 'AMONG', 'THE', 'NORTHERN', 'STATES'] +4198-61336-0019-266: hyp=['THUS', 'WAS', 'HERE', 'TO', 'CRIPPLED', 'AND', 'HUMILIATED', 'IT', 'NEVER', 'REGAINED', 'ITS', 'FORM', 'OF', 'PRESTIGE', 'AMONG', 'THE', 'NORTHERN', 'STATES'] +4198-61336-0020-267: ref=['IN', 'THE', 'FOLLOWING', 'YEAR', 'TIGLATH', 'PILESER', 'RETURNED', 'TO', 'SYRIA'] +4198-61336-0020-267: hyp=['IN', 'THE', 'FOLLOWING', 'YEAR', 'TIGLASS', 'BELIEVER', 'RETURNED', 'TO', 'SYRIA'] +4198-61336-0021-268: ref=['MENAHEM', 'KING', 'OF', 'ISRAEL', 'HAD', 'DIED', 'AND', 'WAS', 'SUCCEEDED', 'BY', 'HIS', 'SON', 'PEKAHIAH'] +4198-61336-0021-268: hyp=['MANY', 'KING', 'OF', 'ISRAEL', 'HAD', 'DIED', 'AND', 'WAS', 'SUCCEEDED', 'BY', 'HIS', 'SON', 'PEKAHIA'] +4198-61336-0022-269: ref=['JUDAH', 'HAD', 'TAKEN', 'ADVANTAGE', 'OF', 'THE', 'DISTURBED', 'CONDITIONS', 'IN', 'ISRAEL', 'TO', 'ASSERT', 'ITS', 'INDEPENDENCE'] +4198-61336-0022-269: hyp=['JULIA', 'HAD', 'TAKEN', 'ADVANTAGE', 'OF', 'THE', 'DISTURBED', 'CONDITIONS', 'IN', 'ISRAEL', 'TO', 'ASSERT', 'ITS', 'INDEPENDENCE'] +4198-61336-0023-270: ref=['HE', 'CONDEMNED', 'ISRAEL', 'FOR', 'ITS', 'IDOLATRIES', 'AND', 'CRIED'] +4198-61336-0023-270: hyp=['HE', 'CONDEMNED', 'ISRAEL', 'FOR', 'ITS', 'IDOLATRIES', 'AND', 'CRIED'] +4198-61336-0024-271: ref=['FOR', 'THUS', 'SAITH', 'THE', 'LORD', 'UNTO', 'THE', 'HOUSE', 'OF', 'ISRAEL', 'SEEK', 'YE', 'ME', 'AND', 'YE', 'SHALL', 'LIVE', 'HAVE', 'YE', 'OFFERED', 'UNTO', 'ME', 'SACRIFICES', 'AND', 'OFFERINGS', 'IN', 'THE', 'WILDERNESS', 'FORTY', 'YEARS', 'O', 'HOUSE', 'OF', 'ISRAEL'] +4198-61336-0024-271: hyp=['FOR', 'THIS', 'SAITH', 'THE', 'LORD', 'UNTO', 'THE', 'HOUSE', 'OF', 'ISRAEL', 'SEEK', 'YE', 'ME', 'TO', 'LIVE', 'HAVE', 'YE', 'OFFERED', 'UNTO', 'ME', 'SACRIFICES', 'AND', 'OFFERINGS', 'IN', 'THE', 'WILDERNESS', 'FORTY', 'YEARS', 'O', 'HOUSE', 'OF', 'ISRAEL'] +4198-61336-0025-272: ref=['THE', 'REMNANT', 'OF', 'THE', 'PHILISTINES', 'SHALL', 'PERISH'] +4198-61336-0025-272: hyp=['THE', 'REMNANT', 'OF', 'THE', 'PHILISTINES', 'SHALL', 'PERISH'] +4198-61336-0026-273: ref=['ISRAEL', 'WAS', 'ALSO', 'DEALT', 'WITH'] +4198-61336-0026-273: hyp=['ISRAEL', 'WAS', 'ALSO', 'DEALT', 'WITH'] +4198-61336-0027-274: ref=['HE', 'SWEPT', 'THROUGH', 'ISRAEL', 'LIKE', 'A', 'HURRICANE'] +4198-61336-0027-274: hyp=['HE', 'SWEPT', 'THROUGH', 'ISRAEL', 'LIKE', 'A', 'HURRICANE'] +4198-61336-0028-275: ref=['THE', 'PHILISTINES', 'AND', 'THE', 'ARABIANS', 'OF', 'THE', 'DESERT', 'WERE', 'ALSO', 'SUBDUED'] +4198-61336-0028-275: hyp=['THE', 'FAIRLY', 'STEAMS', 'AND', 'ARABIANS', 'OF', 'THE', 'DESERT', 'WERE', 'ALSO', 'SUBDUED'] +4198-61336-0029-276: ref=['HE', 'INVADED', 'BABYLONIA'] +4198-61336-0029-276: hyp=['HE', 'INVADED', 'BABYLONIA'] +4198-61336-0030-277: ref=['UKINZER', 'TOOK', 'REFUGE', 'IN', 'HIS', 'CAPITAL', 'SHAPIA', 'WHICH', 'HELD', 'OUT', 'SUCCESSFULLY', 'ALTHOUGH', 'THE', 'SURROUNDING', 'COUNTRY', 'WAS', 'RAVAGED', 'AND', 'DESPOILED'] +4198-61336-0030-277: hyp=['A', 'KINDRED', 'TOOK', 'REFUGE', 'IN', 'HIS', 'CAPITAL', 'SHAPIA', 'WHICH', 'HELD', 'OUT', 'SUCCESSFULLY', 'ALTHOUGH', 'THE', 'SURROUNDING', 'COUNTRY', 'WAS', 'RAVAGED', 'AND', 'DESPOILED'] +4294-14317-0000-1866: ref=['AS', 'I', 'THOUGHT', 'THAT', 'THIS', 'WAS', 'DUE', 'TO', 'SOME', 'FAULT', 'IN', 'THE', 'EARTH', 'I', 'WANTED', 'TO', 'MAKE', 'THESE', 'FIRST', 'EXPERIMENTS', 'BEFORE', 'I', 'UNDERTOOK', 'MY', 'PERSEUS'] +4294-14317-0000-1866: hyp=['AS', 'I', 'THOUGHT', 'THAT', 'THIS', 'WAS', 'DUE', 'TO', 'SOME', 'FAULT', 'IN', 'THE', 'EARTH', 'I', 'WANTED', 'TO', 'MAKE', 'THESE', 'FIRST', 'EXPERIMENTS', 'BEFORE', 'I', 'UNDERTOOK', 'MY', 'PERSEUS'] +4294-14317-0001-1867: ref=['WHEN', 'I', 'SAW', 'THAT', 'THIS', 'BUST', 'CAME', 'OUT', 'SHARP', 'AND', 'CLEAN', 'I', 'SET', 'AT', 'ONCE', 'TO', 'CONSTRUCT', 'A', 'LITTLE', 'FURNACE', 'IN', 'THE', 'WORKSHOP', 'ERECTED', 'FOR', 'ME', 'BY', 'THE', 'DUKE', 'AFTER', 'MY', 'OWN', 'PLANS', 'AND', 'DESIGN', 'IN', 'THE', 'HOUSE', 'WHICH', 'THE', 'DUKE', 'HAD', 'GIVEN', 'ME'] +4294-14317-0001-1867: hyp=['WHEN', 'I', 'SAW', 'THIS', 'BEST', 'CAME', 'OUT', 'SHARP', 'AND', 'CLEAN', 'I', 'SAID', 'AT', 'ONCE', 'TO', 'CONSTRUCT', 'A', 'LITTLE', 'FURNACE', 'IN', 'THE', 'WORKSHOP', 'ERECTED', 'FOR', 'ME', 'BY', 'THE', 'DUKE', 'AFTER', 'MY', 'OWN', 'PLANS', 'AND', 'DESIGN', 'IN', 'THE', 'HOUSE', 'WHICH', 'THE', 'DUKE', 'HAD', 'GIVEN', 'ME'] +4294-14317-0002-1868: ref=['IT', 'WAS', 'AN', 'EXTREMELY', 'DIFFICULT', 'TASK', 'AND', 'I', 'WAS', 'ANXIOUS', 'TO', 'OBSERVE', 'ALL', 'THE', 'NICETIES', 'OF', 'ART', 'WHICH', 'I', 'HAD', 'LEARNED', 'SO', 'AS', 'NOT', 'TO', 'LAPSE', 'INTO', 'SOME', 'ERROR'] +4294-14317-0002-1868: hyp=['IT', 'WAS', 'AN', 'EXTREMELY', 'DIFFICULT', 'TASK', 'AND', 'I', 'WAS', 'ANXIOUS', 'TO', 'OBSERVE', 'ALL', 'THE', 'NICETIES', 'OF', 'ART', 'WHICH', 'I', 'HAD', 'LEARNED', 'SO', 'AS', 'NOT', 'TO', 'LAPSE', 'INTO', 'SOME', 'ERROR'] +4294-14317-0003-1869: ref=['I', 'IN', 'MY', 'TURN', 'FEEL', 'THE', 'SAME', 'DESIRE', 'AND', 'HOPE', 'TO', 'PLAY', 'MY', 'PART', 'LIKE', 'THEM', 'THEREFORE', 'MY', 'LORD', 'GIVE', 'ME', 'THE', 'LEAVE', 'TO', 'GO'] +4294-14317-0003-1869: hyp=['I', 'IN', 'MY', 'TURN', 'FEEL', 'THE', 'SAME', 'DESIRE', 'AND', 'HOPE', 'TO', 'PLAY', 'MY', 'PART', 'LIKE', 'THEM', 'THEREFORE', 'MY', 'LORD', 'GIVE', 'ME', 'THE', 'LEAVE', 'TO', 'GO'] +4294-14317-0004-1870: ref=['BUT', 'BEWARE', 'OF', 'LETTING', 'BANDINELLO', 'QUIT', 'YOU', 'RATHER', 'BESTOW', 'UPON', 'HIM', 'ALWAYS', 'MORE', 'THAN', 'HE', 'DEMANDS', 'FOR', 'IF', 'HE', 'GOES', 'INTO', 'FOREIGN', 'PARTS', 'HIS', 'IGNORANCE', 'IS', 'SO', 'PRESUMPTUOUS', 'THAT', 'HE', 'IS', 'JUST', 'THE', 'MAN', 'TO', 'DISGRACE', 'OUR', 'MOST', 'ILLUSTRIOUS', 'SCHOOL'] +4294-14317-0004-1870: hyp=['BUT', 'BEWARE', 'OF', 'LETTING', 'BEND', 'NELLO', 'QUIT', 'YOU', 'RATHER', 'BESTOW', 'UPON', 'HIM', 'ALWAYS', 'MORE', 'THAN', 'HE', 'DEMANDS', 'FOR', 'IF', 'HE', 'GOES', 'INTO', 'FOREIGN', 'PARTS', 'HIS', 'IGNORANCE', 'IS', 'SO', 'PRESUMPTUOUS', 'THAT', 'HE', 'IS', 'JUST', 'THE', 'MAN', 'TO', 'DISGRACE', 'OUR', 'MOST', 'ILLUSTRIOUS', 'SCHOOL'] +4294-14317-0005-1871: ref=['I', 'ASK', 'NO', 'FURTHER', 'REWARD', 'FOR', 'MY', 'LABOURS', 'UP', 'TO', 'THIS', 'TIME', 'THAN', 'THE', 'GRACIOUS', 'FAVOUR', 'OF', 'YOUR', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0005-1871: hyp=['I', 'ASKED', 'NO', 'FURTHER', 'REWARD', 'FOR', 'MY', 'LABOURS', 'UP', 'TO', 'THIS', 'TIME', 'THAN', 'THE', 'GRACIOUS', 'FAVOUR', 'OF', 'YOUR', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0006-1872: ref=['THEN', 'I', 'THANKED', 'HIM', 'AND', 'SAID', 'I', 'HAD', 'NO', 'GREATER', 'DESIRE', 'THAN', 'TO', 'SHOW', 'THOSE', 'ENVIOUS', 'FOLK', 'THAT', 'I', 'HAD', 'IT', 'IN', 'ME', 'TO', 'EXECUTE', 'THE', 'PROMISED', 'WORK'] +4294-14317-0006-1872: hyp=['THEN', 'I', 'THANKED', 'HIM', 'AND', 'SAID', 'I', 'HAD', 'NO', 'GREATER', 'DESIRE', 'THAN', 'TO', 'SHOW', 'THOSE', 'ENVIOUS', 'FOLK', 'THAT', 'I', 'HAD', 'IT', 'IN', 'ME', 'TO', 'EXECUTE', 'THE', 'PROMISED', 'WORK'] +4294-14317-0007-1873: ref=['I', 'HAD', 'BETTER', 'LOOK', 'TO', 'MY', 'CONDUCT', 'FOR', 'IT', 'HAD', 'COME', 'TO', 'HIS', 'EARS', 'THAT', 'I', 'RELIED', 'UPON', 'HIS', 'FAVOUR', 'TO', 'TAKE', 'IN', 'FIRST', 'ONE', 'MAN', 'AND', 'THEN', 'ANOTHER'] +4294-14317-0007-1873: hyp=['I', 'HAD', 'BETTER', 'LOOK', 'TO', 'MY', 'CONDUCT', 'FOR', 'IT', 'HAS', 'COME', 'TO', 'HIS', 'EARS', 'THAT', 'I', 'RELIED', 'UPON', 'HIS', 'FAVOUR', 'TO', 'TAKE', 'IN', 'FIRST', 'ONE', 'MAN', 'AND', 'THEN', 'ANOTHER'] +4294-14317-0008-1874: ref=['I', 'BEGGED', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY', 'TO', 'NAME', 'A', 'SINGLE', 'PERSON', 'WHOM', 'I', 'HAD', 'EVER', 'TAKEN', 'IN'] +4294-14317-0008-1874: hyp=['I', 'BEGGED', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY', 'TO', 'NAME', 'A', 'SINGLE', 'PERSON', 'WHY', 'HAD', 'EVER', 'TAKEN', 'IN'] +4294-14317-0009-1875: ref=['I', 'SAID', 'MY', 'LORD', 'I', 'THANK', 'YOU', 'AND', 'BEG', 'YOU', 'TO', 'CONDESCEND', 'SO', 'FAR', 'AS', 'TO', 'LISTEN', 'TO', 'FOUR', 'WORDS', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'LENT', 'ME', 'A', 'PAIR', 'OF', 'OLD', 'SCALES', 'TWO', 'ANVILS', 'AND', 'THREE', 'LITTLE', 'HAMMERS', 'WHICH', 'ARTICLES', 'I', 'BEGGED', 'HIS', 'WORKMAN', 'GIORGIO', 'DA', 'CORTONA', 'FIFTEEN', 'DAYS', 'AGO', 'TO', 'FETCH', 'BACK'] +4294-14317-0009-1875: hyp=['I', 'SAID', 'MY', 'LORD', 'I', 'THANK', 'YOU', 'AND', 'BEG', 'YOU', 'TO', 'CONDESCEND', 'SO', 'FAR', 'AS', 'TO', 'LISTEN', 'TO', 'FOUR', 'WORDS', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'LENT', 'ME', 'A', 'PAIR', 'OF', 'OLD', 'SCALES', 'TWO', 'ANVILS', 'AND', 'THREE', 'LITTLE', 'HAMMERS', 'WHICH', 'ARTICLES', 'I', 'BEGGED', 'HIS', 'WORKMEN', 'GEORGIO', 'DE', 'CORTEANA', 'FIFTEEN', 'DAYS', 'AGO', 'TO', 'FETCH', 'BACK'] +4294-14317-0010-1876: ref=['GIORGIO', 'CAME', 'FOR', 'THEM', 'HIMSELF'] +4294-14317-0010-1876: hyp=['YOUR', 'JOE', 'CAME', 'FOR', 'THEM', 'HIS', 'HEALTH'] +4294-14317-0011-1877: ref=['I', 'HOPE', 'TO', 'PROVE', 'ON', 'WHAT', 'ACCOUNT', 'THAT', 'SCOUNDREL', 'TRIES', 'TO', 'BRING', 'ME', 'INTO', 'DISGRACE'] +4294-14317-0011-1877: hyp=['I', 'HOPE', 'TO', 'PROVE', 'ON', 'WHAT', 'ACCOUNT', 'THAT', 'SCOUNDREL', 'TRIES', 'TO', 'BRING', 'ME', 'INTO', 'DISGRACE'] +4294-14317-0012-1878: ref=['WHEN', 'HE', 'HAD', 'HEARD', 'THIS', 'SPEECH', 'THE', 'DUKE', 'ROSE', 'UP', 'IN', 'ANGER', 'AND', 'SENT', 'FOR', 'BERNARDONE', 'WHO', 'WAS', 'FORCED', 'TO', 'TAKE', 'FLIGHT', 'AS', 'FAR', 'AS', 'VENICE', 'HE', 'AND', 'ANTONIO', 'LANDI', 'WITH', 'HIM'] +4294-14317-0012-1878: hyp=['WHEN', 'HE', 'HAD', 'HEARD', 'THIS', 'SPEECH', 'THE', 'DUKE', 'ROSE', 'UP', 'IN', 'ANGER', 'AND', 'SENT', 'FOR', 'BERNARDONE', 'WHO', 'WAS', 'FORCED', 'TO', 'TAKE', 'FLIGHT', 'AS', 'FAR', 'AS', 'VENICE', 'HE', 'AND', 'ANTONIA', 'LANDIE', 'WITH', 'HIM'] +4294-14317-0013-1879: ref=['YOU', 'HAD', 'BETTER', 'PUT', 'THIS', 'TO', 'THE', 'PROOF', 'AND', 'I', 'WILL', 'GO', 'AT', 'ONCE', 'TO', 'THE', 'BARGELLO'] +4294-14317-0013-1879: hyp=['YOU', 'HAD', 'BETTER', 'PUT', 'THIS', 'TO', 'THE', 'PROOF', 'AND', 'I', 'WILL', 'GO', 'AT', 'ONCE', 'TO', 'THE', 'BARGIENLO'] +4294-14317-0014-1880: ref=['I', 'AM', 'WILLING', 'TO', 'ENTER', 'INTO', 'COMPETITION', 'WITH', 'THE', 'ANCIENTS', 'AND', 'FEEL', 'ABLE', 'TO', 'SURPASS', 'THEM', 'FOR', 'SINCE', 'THOSE', 'EARLY', 'DAYS', 'IN', 'WHICH', 'I', 'MADE', 'THE', 'MEDALS', 'OF', 'POPE', 'CLEMENT', 'I', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'THAT', 'I', 'CAN', 'NOW', 'PRODUCE', 'FAR', 'BETTER', 'PIECES', 'OF', 'THE', 'KIND', 'I', 'THINK', 'I', 'CAN', 'ALSO', 'OUTDO', 'THE', 'COINS', 'I', 'STRUCK', 'FOR', 'DUKE', 'ALESSANDRO', 'WHICH', 'ARE', 'STILL', 'HELD', 'IN', 'HIGH', 'ESTEEM', 'IN', 'LIKE', 'MANNER', 'I', 'COULD', 'MAKE', 'FOR', 'YOU', 'LARGE', 'PIECES', 'OF', 'GOLD', 'AND', 'SILVER', 'PLATE', 'AS', 'I', 'DID', 'SO', 'OFTEN', 'FOR', 'THAT', 'NOBLE', 'MONARCH', 'KING', 'FRANCIS', 'OF', 'FRANCE', 'THANKS', 'TO', 'THE', 'GREAT', 'CONVENIENCES', 'HE', 'ALLOWED', 'ME', 'WITHOUT', 'EVER', 'LOSING', 'TIME', 'FOR', 'THE', 'EXECUTION', 'OF', 'COLOSSAL', 'STATUES', 'OR', 'OTHER', 'WORKS', 'OF', 'THE', 'SCULPTORS', 'CRAFT'] +4294-14317-0014-1880: hyp=['I', 'AM', 'WILLING', 'TO', 'ENTER', 'INTO', 'COMPETITION', 'WITH', 'THE', 'ANCIENTS', 'AND', 'FEEL', 'ABLE', 'TO', 'SURPASS', 'THEM', 'FOR', 'SINCE', 'THOSE', 'EARLY', 'DAYS', 'IN', 'WHICH', 'I', 'MADE', 'THE', 'METALS', 'OF', 'POPE', 'CLEMENT', 'I', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'THAT', 'I', 'CAN', 'NOW', 'PRODUCE', 'FAR', 'BETTER', 'PIECES', 'OF', 'THE', 'KIND', 'I', 'THINK', 'I', 'CAN', 'ALSO', 'OUTDO', 'THE', 'COINS', 'I', 'STRUCK', 'FOR', 'DUKE', 'ALISANDRO', 'WHICH', 'IS', 'STILL', 'HELD', 'IN', 'HIGH', 'ESTEEM', 'IN', 'LIKE', 'MANNER', 'I', 'COULD', 'MAKE', 'FOR', 'YOU', 'LARGE', 'PIECES', 'OF', 'GOLD', 'AND', 'SILVER', 'PLATE', 'AS', 'I', 'DID', 'SO', 'OFTEN', 'FOR', 'THAT', 'NOBLE', 'MONARCH', 'KING', 'FRANCIS', 'OF', 'FRANCE', 'THANKS', 'TO', 'THE', 'GREAT', 'CONVENIENCES', 'HE', 'ALLOWED', 'ME', 'WITHOUT', 'EVER', 'LOSING', 'TIME', 'FOR', 'THE', 'EXECUTION', 'OF', 'COLOSSAL', 'STATUES', 'OR', 'OTHER', 'WORKS', 'OF', 'THE', "SCULPTOR'S", 'CRAFT'] +4294-14317-0015-1881: ref=['AFTER', 'SEVERAL', 'MONTHS', 'WERE', 'WASTED', 'AND', 'PIERO', 'WOULD', 'NEITHER', 'WORK', 'NOR', 'PUT', 'MEN', 'TO', 'WORK', 'UPON', 'THE', 'PIECE', 'I', 'MADE', 'HIM', 'GIVE', 'IT', 'BACK'] +4294-14317-0015-1881: hyp=['AFTER', 'SEVERAL', 'MONTHS', 'WERE', 'WASTED', 'AND', 'PIERO', 'WOULD', 'NEITHER', 'WORK', 'NOR', 'PUT', 'MEN', 'TO', 'WORK', 'UPON', 'THE', 'PIECE', 'I', 'MADE', 'HIM', 'GIVE', 'IT', 'BACK'] +4294-14317-0016-1882: ref=['AMONG', 'ARTISTS', 'CERTAIN', 'ENRAGED', 'SCULPTORS', 'LAUGHED', 'AT', 'ME', 'AND', 'CALLED', 'ME', 'THE', 'NEW', 'SCULPTOR'] +4294-14317-0016-1882: hyp=['AMONG', 'ARTISTS', 'CERTAIN', 'ENRAGE', 'SCULPTORS', 'LAUGHED', 'AT', 'ME', 'AND', 'CALLED', 'ME', 'THE', 'NEW', 'SCULPTOR'] +4294-14317-0017-1883: ref=['NOW', 'I', 'HOPE', 'TO', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'AN', 'OLD', 'SCULPTOR', 'IF', 'GOD', 'SHALL', 'GRANT', 'ME', 'THE', 'BOON', 'OF', 'FINISHING', 'MY', 'PERSEUS', 'FOR', 'THAT', 'NOBLE', 'PIAZZA', 'OF', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0017-1883: hyp=['NOW', 'I', 'HOPE', 'TO', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'AN', 'OLD', 'SCULPTOR', 'IF', 'GOD', 'SHALL', 'GRANT', 'ME', 'THE', 'BOON', 'OF', 'FINISHING', 'MY', 'PERSEUS', 'FOR', 'THAT', 'NOBLE', 'PIAZZA', 'OF', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0018-1884: ref=['HAVING', 'THIS', 'EXCELLENT', 'RESOLVE', 'IN', 'HEART', 'I', 'REACHED', 'MY', 'HOME'] +4294-14317-0018-1884: hyp=['HAVING', 'THIS', 'EXCELLENT', 'RESOLVE', 'IN', 'HEART', 'I', 'REACHED', 'MY', 'HOME'] +4294-32859-0000-1942: ref=['WYLDER', 'WAS', 'RATHER', 'SURLY', 'AFTER', 'THE', 'LADIES', 'HAD', 'FLOATED', 'AWAY', 'FROM', 'THE', 'SCENE', 'AND', 'HE', 'DRANK', 'HIS', 'LIQUOR', 'DOGGEDLY'] +4294-32859-0000-1942: hyp=['WYLDER', 'WAS', 'RATHER', 'SURLY', 'AFTER', 'THE', 'LADIES', 'HAD', 'FLOATED', 'AWAY', 'FROM', 'THE', 'SCENE', 'AND', 'HE', 'DRANK', 'HIS', 'LIQUOR', 'DOGGEDLY'] +4294-32859-0001-1943: ref=['IT', 'WAS', 'HIS', 'FANCY', 'I', 'SUPPOSE', 'TO', 'REVIVE', 'CERTAIN', 'SENTIMENTAL', 'RELATIONS', 'WHICH', 'HAD', 'IT', 'MAY', 'BE', 'ONCE', 'EXISTED', 'BETWEEN', 'HIM', 'AND', 'MISS', 'LAKE', 'AND', 'HE', 'WAS', 'A', 'PERSON', 'OF', 'THAT', 'COMBATIVE', 'TEMPERAMENT', 'THAT', 'MAGNIFIES', 'AN', 'OBJECT', 'IN', 'PROPORTION', 'AS', 'ITS', 'PURSUIT', 'IS', 'THWARTED'] +4294-32859-0001-1943: hyp=['IT', 'WAS', 'HIS', 'FANCY', 'I', 'SUPPOSE', 'TO', 'REVIVE', 'CERTAIN', 'SENTIMENTAL', 'RELATIONS', 'WHICH', 'HAD', 'IT', 'MAY', 'BE', 'ONCE', 'EXISTED', 'BETWEEN', 'HIM', 'AND', 'MISS', 'LAKE', 'AND', 'HE', 'WAS', 'A', 'PERSON', 'OF', 'THAT', 'COMBATIVE', 'TEMPERAMENT', 'THAT', 'MAGNIFIES', 'AN', 'OBJECT', 'IN', 'PROPORTION', 'AS', 'ITS', 'PURSUIT', 'IS', 'THWARTED'] +4294-32859-0002-1944: ref=['THE', 'STORY', 'OF', 'FRIDOLIN', 'AND', "RETZCH'S", 'PRETTY', 'OUTLINES'] +4294-32859-0002-1944: hyp=['THE', 'STORY', 'OF', 'FRIEDLIN', 'AND', 'WRETCH', 'IS', 'PRETTY', 'OUTLINES'] +4294-32859-0003-1945: ref=['SIT', 'DOWN', 'BESIDE', 'ME', 'AND', "I'LL", 'TELL', 'YOU', 'THE', 'STORY'] +4294-32859-0003-1945: hyp=['SIT', 'DOWN', 'BESIDE', 'ME', 'AND', "I'LL", 'TELL', 'YOU', 'THE', 'STORY'] +4294-32859-0004-1946: ref=['HE', 'ASSISTED', 'AT', 'IT', 'BUT', 'TOOK', 'NO', 'PART', 'AND', 'IN', 'FACT', 'WAS', 'LISTENING', 'TO', 'THAT', 'OTHER', 'CONVERSATION', 'WHICH', 'SOUNDED', 'WITH', 'ITS', 'PLEASANT', 'GABBLE', 'AND', 'LAUGHTER', 'LIKE', 'A', 'LITTLE', 'MUSICAL', 'TINKLE', 'OF', 'BELLS', 'IN', 'THE', 'DISTANCE'] +4294-32859-0004-1946: hyp=['HE', 'ASSISTED', 'AT', 'IT', 'BUT', 'TOOK', 'NO', 'PART', 'AND', 'IN', 'FACT', 'WAS', 'LISTENING', 'TO', 'THAT', 'OTHER', 'CONVERSATION', 'WHICH', 'SOUNDED', 'WITH', 'ITS', 'PLEASANT', 'GABBLE', 'AND', 'LAUGHTER', 'LIKE', 'A', 'LITTLE', 'MUSICAL', 'TINKLE', 'OF', 'BELLS', 'IN', 'THE', 'DISTANCE'] +4294-32859-0005-1947: ref=['BUT', 'HONEST', 'MARK', 'FORGOT', 'THAT', 'YOUNG', 'LADIES', 'DO', 'NOT', 'ALWAYS', 'COME', 'OUT', 'QUITE', 'ALONE', 'AND', 'JUMP', 'UNASSISTED', 'INTO', 'THEIR', 'VEHICLES'] +4294-32859-0005-1947: hyp=['BUT', 'HONEST', 'MARK', 'FORGOT', 'THAT', 'YOUNG', 'LADIES', 'DO', 'NOT', 'ALWAYS', 'COME', 'OUT', 'QUITE', 'ALONE', 'AND', 'JUMP', 'UNASSISTED', 'INTO', 'THEIR', 'VEHICLES'] +4294-35475-0000-1885: ref=['BUT', 'THE', 'MIDDLE', 'SON', 'WAS', 'LITTLE', 'AND', 'LORN', 'HE', 'WAS', 'NEITHER', 'DARK', 'NOR', 'FAIR', 'HE', 'WAS', 'NEITHER', 'HANDSOME', 'NOR', 'STRONG'] +4294-35475-0000-1885: hyp=['BUT', 'THE', 'MIDDLE', 'SUN', 'WAS', 'LITTLE', 'AND', 'LORN', 'HE', 'WAS', 'NEITHER', 'DARK', 'NOR', 'FAIR', 'HE', 'WAS', 'NEITHER', 'HANDSOME', 'NOR', 'STRONG'] +4294-35475-0001-1886: ref=['THROWING', 'HIMSELF', 'ON', 'HIS', 'KNEES', 'BEFORE', 'THE', 'KING', 'HE', 'CRIED', 'OH', 'ROYAL', 'SIRE', 'BESTOW', 'UPON', 'ME', 'ALSO', 'A', 'SWORD', 'AND', 'A', 'STEED', 'THAT', 'I', 'MAY', 'UP', 'AND', 'AWAY', 'TO', 'FOLLOW', 'MY', 'BRETHREN'] +4294-35475-0001-1886: hyp=['ROWING', 'HIMSELF', 'ON', 'HIS', 'KNEES', 'BEFORE', 'THE', 'KING', 'HE', 'CRIED', 'O', 'ROYAL', 'SIRE', 'BESTOW', 'UPON', 'ME', 'ALSO', 'A', 'SWORD', 'AND', 'A', 'STEED', 'THAT', 'I', 'MAY', 'UP', 'AND', 'WAIT', 'TO', 'FOLLOW', 'MY', 'BRETHREN'] +4294-35475-0002-1887: ref=['BUT', 'THE', 'KING', 'LAUGHED', 'HIM', 'TO', 'SCORN', 'THOU', 'A', 'SWORD', 'HE', 'QUOTH'] +4294-35475-0002-1887: hyp=['BUT', 'THE', 'KING', 'LAUGHED', 'HIM', 'TO', 'SCORN', 'THOU', 'A', 'SWORD', 'HE', 'QUOTH'] +4294-35475-0003-1888: ref=['IN', 'SOOTH', 'THOU', 'SHALT', 'HAVE', 'ONE', 'BUT', 'IT', 'SHALL', 'BE', 'ONE', 'BEFITTING', 'THY', 'MAIDEN', 'SIZE', 'AND', 'COURAGE', 'IF', 'SO', 'SMALL', 'A', 'WEAPON', 'CAN', 'BE', 'FOUND', 'IN', 'ALL', 'MY', 'KINGDOM'] +4294-35475-0003-1888: hyp=['IN', 'SOOTH', 'THOU', 'SHALT', 'HAVE', 'ONE', 'BUT', 'IT', 'SHALL', 'BE', 'ONE', 'BEFITTING', 'THY', 'MAIDEN', 'SIGHS', 'AND', 'COURAGE', 'IF', 'SO', 'SMALL', 'A', 'WEAPON', 'CAN', 'BE', 'FOUND', 'IN', 'ALL', 'MY', 'KINGDOM'] +4294-35475-0004-1889: ref=['FORTHWITH', 'THE', 'GRINNING', 'JESTER', 'BEGAN', 'SHRIEKING', 'WITH', 'LAUGHTER', 'SO', 'THAT', 'THE', 'BELLS', 'UPON', 'HIS', 'MOTLEY', 'CAP', 'WERE', 'ALL', 'SET', 'A', 'JANGLING'] +4294-35475-0004-1889: hyp=['FORTHWITH', 'THE', 'GRINNING', 'JESTER', 'BEGAN', 'SHRIEKING', 'WITH', 'LAUGHTER', 'SO', 'THAT', 'THE', 'BELLS', 'UPON', 'HIS', 'MOTLEY', 'CAP', 'WERE', 'ALL', 'SET', 'A', 'JANGLING'] +4294-35475-0005-1890: ref=['I', 'DID', 'BUT', 'LAUGH', 'TO', 'THINK', 'THE', 'SWORD', 'OF', 'ETHELRIED', 'HAD', 'BEEN', 'SO', 'QUICKLY', 'FOUND', 'RESPONDED', 'THE', 'JESTER', 'AND', 'HE', 'POINTED', 'TO', 'THE', 'SCISSORS', 'HANGING', 'FROM', 'THE', "TAILOR'S", 'GIRDLE'] +4294-35475-0005-1890: hyp=['I', 'DID', 'BUT', 'LAUGH', 'TO', 'THINK', 'THE', 'SWORD', 'OF', 'EFFLARIDE', 'HAD', 'BEEN', 'SO', 'QUICKLY', 'FOUND', 'RESPONDED', 'THE', 'JESTER', 'AND', 'HE', 'POINTED', 'TO', 'THE', 'SCISSORS', 'HANGING', 'FROM', 'THE', "TAILOR'S", 'GIRDLE'] +4294-35475-0006-1891: ref=['ONE', 'NIGHT', 'AS', 'HE', 'LAY', 'IN', 'A', 'DEEP', 'FOREST', 'TOO', 'UNHAPPY', 'TO', 'SLEEP', 'HE', 'HEARD', 'A', 'NOISE', 'NEAR', 'AT', 'HAND', 'IN', 'THE', 'BUSHES'] +4294-35475-0006-1891: hyp=['ONE', 'NIGHT', 'AS', 'HE', 'LAY', 'IN', 'A', 'DEEP', 'FOREST', 'TWO', 'UNHAPPY', 'TO', 'SLEEP', 'HE', 'HEARD', 'A', 'NOISE', 'NEAR', 'AT', 'HAND', 'IN', 'THE', 'BUSHES'] +4294-35475-0007-1892: ref=['THOU', 'SHALT', 'HAVE', 'THY', 'LIBERTY', 'HE', 'CRIED', 'EVEN', 'THOUGH', 'THOU', 'SHOULDST', 'REND', 'ME', 'IN', 'PIECES', 'THE', 'MOMENT', 'THOU', 'ART', 'FREE'] +4294-35475-0007-1892: hyp=['THOU', 'SHALT', 'HAVE', 'THY', 'LIBERTY', 'HE', 'CRIED', 'EVEN', 'THOUGH', 'THOU', 'SHOULDST', 'RUN', 'ME', 'IN', 'PIECES', 'THE', 'MOMENT', 'THOU', 'ART', 'FREE'] +4294-35475-0008-1893: ref=['IT', 'HAD', 'SUDDENLY', 'DISAPPEARED', 'AND', 'IN', 'ITS', 'PLACE', 'STOOD', 'A', 'BEAUTIFUL', 'FAIRY', 'WITH', 'FILMY', 'WINGS', 'WHICH', 'SHONE', 'LIKE', 'RAINBOWS', 'IN', 'THE', 'MOONLIGHT'] +4294-35475-0008-1893: hyp=['HE', 'HAD', 'HITTED', 'SUDDENLY', 'DISAPPEARED', 'AND', 'IN', 'ITS', 'PLACE', 'STOOD', 'A', 'BEAUTIFUL', 'FAIRY', 'WITH', 'FILMY', 'WINGS', 'WHICH', 'SHONE', 'LIKE', 'RAINBOWS', 'IN', 'THE', 'MOONLIGHT'] +4294-35475-0009-1894: ref=['AT', 'THIS', 'MOMENT', 'THERE', 'WAS', 'A', 'DISTANT', 'RUMBLING', 'AS', 'OF', 'THUNDER', 'TIS', 'THE', 'OGRE', 'CRIED', 'THE', 'FAIRY', 'WE', 'MUST', 'HASTEN'] +4294-35475-0009-1894: hyp=['AT', 'THIS', 'MOMENT', 'THERE', 'WAS', 'A', 'DISTANT', 'RUMBLING', 'AS', 'OF', 'THUNDER', 'TIS', 'THE', 'OGRE', 'CRIED', 'THE', 'FAIRY', 'WE', 'MUST', 'HASTEN'] +4294-35475-0010-1895: ref=['SCISSORS', 'GROW', 'A', "GIANT'S", 'HEIGHT', 'AND', 'SAVE', 'US', 'FROM', 'THE', "OGRE'S", 'MIGHT'] +4294-35475-0010-1895: hyp=['SCISSORS', 'GROW', 'A', "GIANT'S", 'HEIGHT', 'AND', 'SAVE', 'US', 'FROM', 'THE', 'OGRES', 'MIGHT'] +4294-35475-0011-1896: ref=['HE', 'COULD', 'SEE', 'THE', 'OGRE', 'STANDING', 'POWERLESS', 'TO', 'HURT', 'HIM', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'CHASM', 'AND', 'GNASHING', 'HIS', 'TEETH', 'EACH', 'ONE', 'OF', 'WHICH', 'WAS', 'AS', 'BIG', 'AS', 'A', 'MILLSTON'] +4294-35475-0011-1896: hyp=['HE', 'COULD', 'SEE', 'THE', 'OGRE', 'STANDING', 'POWERLESS', 'TO', 'HURT', 'HIM', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'CHASM', 'AND', 'GNASHING', 'HIS', 'TEETH', 'EACH', 'ONE', 'OF', 'WHICH', 'WAS', 'AS', 'BIG', 'AS', 'A', 'MILLSTONE'] +4294-35475-0012-1897: ref=['THE', 'SIGHT', 'WAS', 'SO', 'TERRIBLE', 'THAT', 'HE', 'TURNED', 'ON', 'HIS', 'HEEL', 'AND', 'FLED', 'AWAY', 'AS', 'FAST', 'AS', 'HIS', 'FEET', 'COULD', 'CARRY', 'HIM'] +4294-35475-0012-1897: hyp=['THE', 'SIGHT', 'WAS', 'SO', 'TERRIBLE', 'THAT', 'HE', 'TURNED', 'ON', 'HIS', 'HEEL', 'AND', 'FLED', 'AWAY', 'AS', 'FAST', 'AS', 'HIS', 'FEET', 'COULD', 'CARRY', 'HIM'] +4294-35475-0013-1898: ref=['THOU', 'SHALT', 'NOT', 'BE', 'LEFT', 'A', 'PRISONER', 'IN', 'THIS', 'DISMAL', 'SPOT', 'WHILE', 'I', 'HAVE', 'THE', 'POWER', 'TO', 'HELP', 'THEE'] +4294-35475-0013-1898: hyp=['THOU', 'SHALT', 'NOT', 'BE', 'LEFT', 'A', 'PRISONER', 'IN', 'THIS', 'DISMAL', 'SPOT', 'WHILE', 'I', 'HAVE', 'THE', 'POWER', 'TO', 'HELP', 'THEE'] +4294-35475-0014-1899: ref=['HE', 'LIFTED', 'THE', 'SCISSORS', 'AND', 'WITH', 'ONE', 'STROKE', 'DESTROYED', 'THE', 'WEB', 'AND', 'GAVE', 'THE', 'FLY', 'ITS', 'FREEDOM'] +4294-35475-0014-1899: hyp=['HE', 'LIFTED', 'THE', 'SCISSORS', 'AND', 'WITH', 'ONE', 'STROKE', 'DESTROYED', 'THE', 'WEB', 'AND', 'GAVE', 'THE', 'FLY', 'TO', 'READ', 'THEM'] +4294-35475-0015-1900: ref=['A', 'FAINT', 'GLIMMER', 'OF', 'LIGHT', 'ON', 'THE', 'OPPOSITE', 'WALL', 'SHOWS', 'ME', 'THE', 'KEYHOLE'] +4294-35475-0015-1900: hyp=['A', 'FAINT', 'GLIMMER', 'OF', 'LIGHT', 'ON', 'THE', 'OPPOSITE', 'WALL', 'SHOWS', 'ME', 'THE', 'KEYHOLE'] +4294-35475-0016-1901: ref=['THE', 'PRINCE', 'SPENT', 'ALL', 'THE', 'FOLLOWING', 'TIME', 'UNTIL', 'MIDNIGHT', 'TRYING', 'TO', 'THINK', 'OF', 'A', 'SUITABLE', 'VERSE', 'TO', 'SAY', 'TO', 'THE', 'SCISSORS'] +4294-35475-0016-1901: hyp=['THE', 'PRINCE', 'SPENT', 'ALL', 'THE', 'FOLLOWING', 'TIME', 'UNTIL', 'MIDNIGHT', 'TRYING', 'TO', 'THINK', 'OF', 'A', 'SUITABLE', 'VERSE', 'TO', 'SAY', 'TO', 'THE', 'SCISSORS'] +4294-35475-0017-1902: ref=['AS', 'HE', 'UTTERED', 'THE', 'WORDS', 'THE', 'SCISSORS', 'LEAPED', 'OUT', 'OF', 'HIS', 'HAND', 'AND', 'BEGAN', 'TO', 'CUT', 'THROUGH', 'THE', 'WOODEN', 'SHUTTERS', 'AS', 'EASILY', 'AS', 'THROUGH', 'A', 'CHEESE'] +4294-35475-0017-1902: hyp=['AS', 'HE', 'UTTERED', 'THE', 'WORDS', 'THE', 'SCISSORS', 'LEAPED', 'OUT', 'OF', 'HIS', 'HAND', 'AND', 'BEGAN', 'TO', 'CUT', 'THROUGH', 'THE', 'WOODEN', 'SHUTTERS', 'AS', 'EASILY', 'AS', 'THROUGH', 'ITS', 'CHEESE'] +4294-35475-0018-1903: ref=['IN', 'A', 'VERY', 'SHORT', 'TIME', 'THE', 'PRINCE', 'HAD', 'CRAWLED', 'THROUGH', 'THE', 'OPENING'] +4294-35475-0018-1903: hyp=['IN', 'THE', 'VERY', 'SHORT', 'TIME', 'THE', 'PRINCE', 'HAD', 'CRAWLED', 'THROUGH', 'THE', 'OPENING'] +4294-35475-0019-1904: ref=['WHILE', 'HE', 'STOOD', 'LOOKING', 'AROUND', 'HIM', 'IN', 'BEWILDERMENT', 'A', 'FIREFLY', 'ALIGHTED', 'ON', 'HIS', 'ARM', 'FLASHING', 'ITS', 'LITTLE', 'LANTERN', 'IN', 'THE', "PRINCE'S", 'FACE', 'IT', 'CRIED', 'THIS', 'WAY', 'MY', 'FRIEND', 'THE', 'FLY', 'SENT', 'ME', 'TO', 'GUIDE', 'YOU', 'TO', 'A', 'PLACE', 'OF', 'SAFETY'] +4294-35475-0019-1904: hyp=['WHILE', 'HE', 'STOOD', 'LOOKING', 'AROUND', 'HIM', 'IN', 'BEWILDERMENT', 'A', 'FIREFLY', 'ALIGHTED', 'ON', 'HIS', 'HEART', 'FLASHING', 'ITS', 'LITTLE', 'LANTERN', 'IN', 'THE', "PRINCE'S", 'FACE', 'IT', 'CRIED', 'THIS', 'WAY', 'MY', 'FRIEND', 'THE', 'FLY', 'SENT', 'ME', 'TO', 'GUIDE', 'YOU', 'TO', 'A', 'PLACE', 'OF', 'SAFETY'] +4294-35475-0020-1905: ref=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'ME', 'CRIED', 'THE', 'POOR', 'PEASANT'] +4294-35475-0020-1905: hyp=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'ME', 'CRIED', 'THE', 'POOR', 'PEASANT'] +4294-35475-0021-1906: ref=['MY', 'GRAIN', 'MUST', 'FALL', 'AND', 'ROT', 'IN', 'THE', 'FIELD', 'FROM', 'OVERRIPENESS', 'BECAUSE', 'I', 'HAVE', 'NOT', 'THE', 'STRENGTH', 'TO', 'RISE', 'AND', 'HARVEST', 'IT', 'THEN', 'INDEED', 'MUST', 'WE', 'ALL', 'STARVE'] +4294-35475-0021-1906: hyp=['MY', 'GRAIN', 'MUST', 'FALL', 'IN', 'ROT', 'IN', 'THE', 'FIELD', 'FROM', 'OVER', 'RIPENESS', 'BECAUSE', 'I', 'HAVE', 'NOT', 'THE', 'STRENGTH', 'TO', 'RISE', 'IN', 'HARVEST', 'IT', 'THEN', 'INDEED', 'MUST', 'WE', 'ALL', 'STARVE'] +4294-35475-0022-1907: ref=['THE', 'GRANDAME', 'WHOM', 'HE', 'SUPPLIED', 'WITH', 'FAGOTS', 'THE', 'MERCHANT', 'WHOM', 'HE', 'RESCUED', 'FROM', 'ROBBERS', 'THE', "KING'S", 'COUNCILLOR', 'TO', 'WHOM', 'HE', 'GAVE', 'AID', 'ALL', 'BECAME', 'HIS', 'FRIENDS', 'UP', 'AND', 'DOWN', 'THE', 'LAND', 'TO', 'BEGGAR', 'OR', 'LORD', 'HOMELESS', 'WANDERER', 'OR', 'HIGH', 'BORN', 'DAME', 'HE', 'GLADLY', 'GAVE', 'UNSELFISH', 'SERVICE', 'ALL', 'UNSOUGHT', 'AND', 'SUCH', 'AS', 'HE', 'HELPED', 'STRAIGHTWAY', 'BECAME', 'HIS', 'FRIENDS'] +4294-35475-0022-1907: hyp=['THE', 'GRAND', 'DAME', 'WHOM', 'HE', 'SUPPLIED', 'WITH', 'FAGOTS', 'THE', 'MERCHANT', 'WHOM', 'HE', 'RESCUED', 'FROM', 'ROBBERS', 'THE', "KING'S", 'COUNSELLOR', 'TO', 'WHOM', 'HE', 'GAVE', 'AID', 'ALL', 'BECAME', 'HIS', 'FRIENDS', 'UP', 'AND', 'DOWN', 'THE', 'LAND', 'TO', 'BEGGAR', 'O', 'LORD', 'HOMELESS', 'WANDERER', 'HIGH', 'BORN', 'DAME', 'HE', 'GLADLY', 'GAVE', 'UNSELFISH', 'SERVICE', 'ALL', 'UNSOUGHT', 'AND', 'SUCH', 'AS', 'HE', 'HELPED', 'STRAIGHTWAY', 'BECAME', 'HIS', 'FRIENDS'] +4294-35475-0023-1908: ref=['TO', 'HIM', 'WHO', 'COULD', 'BRING', 'HER', 'BACK', 'TO', 'HER', "FATHER'S", 'CASTLE', 'SHOULD', 'BE', 'GIVEN', 'THE', 'THRONE', 'AND', 'KINGDOM', 'AS', 'WELL', 'AS', 'THE', 'PRINCESS', 'HERSELF', 'SO', 'FROM', 'FAR', 'AND', 'NEAR', 'INDEED', 'FROM', 'ALMOST', 'EVERY', 'COUNTRY', 'UNDER', 'THE', 'SUN', 'CAME', 'KNIGHTS', 'AND', 'PRINCES', 'TO', 'FIGHT', 'THE', 'OGRE'] +4294-35475-0023-1908: hyp=['TO', 'HIM', 'WHO', 'COULD', 'BRING', 'HER', 'BACK', 'TO', 'HER', "FATHER'S", 'CASTLE', 'SHOULD', 'BE', 'GIVEN', 'THE', 'THRONE', 'AND', 'KINGDOM', 'AS', 'WELL', 'AS', 'THE', 'PRINCESS', 'HERSELF', 'SO', 'FROM', 'FAR', 'AND', 'NEAR', 'INDEED', 'FROM', 'ALMOST', 'EVERY', 'COUNTRY', 'UNDER', 'THE', 'SUN', 'CAME', 'NIGHTS', 'AND', 'PRINCES', 'TO', 'FIGHT', 'THE', 'OGRE'] +4294-35475-0024-1909: ref=['AMONG', 'THOSE', 'WHO', 'DREW', 'BACK', 'WERE', "ETHELRIED'S", 'BROTHERS', 'THE', 'THREE', 'THAT', 'WERE', 'DARK', 'AND', 'THE', 'THREE', 'THAT', 'WERE', 'FAIR'] +4294-35475-0024-1909: hyp=['AMONG', 'THOSE', 'WHO', 'DREW', 'BACK', 'WHERE', "ETHELRE'S", 'BROTHERS', 'THE', 'THREE', 'THAT', 'WERE', 'DARK', 'AND', 'THE', 'THREE', 'THAT', 'WERE', 'FAIR'] +4294-35475-0025-1910: ref=['BUT', 'ETHELRIED', 'HEEDED', 'NOT', 'THEIR', 'TAUNTS'] +4294-35475-0025-1910: hyp=['BUT', 'ETHEL', 'READ', 'HEATED', 'NOT', 'THEIR', 'TAUNTS'] +4294-35475-0026-1911: ref=['SO', 'THEY', 'ALL', 'CRIED', 'OUT', 'LONG', 'AND', 'LOUD', 'LONG', 'LIVE', 'THE', 'PRINCE', 'PRINCE', 'CISEAUX'] +4294-35475-0026-1911: hyp=['SO', 'THEY', 'ALL', 'CRIED', 'OUT', 'LONG', 'AND', 'LOUD', 'LONG', 'LIVE', 'THE', 'PRINCE', 'PRINCESO'] +4294-9934-0000-1912: ref=['HE', 'FELT', 'WHAT', 'THE', 'EARTH', 'MAY', 'POSSIBLY', 'FEEL', 'AT', 'THE', 'MOMENT', 'WHEN', 'IT', 'IS', 'TORN', 'OPEN', 'WITH', 'THE', 'IRON', 'IN', 'ORDER', 'THAT', 'GRAIN', 'MAY', 'BE', 'DEPOSITED', 'WITHIN', 'IT', 'IT', 'FEELS', 'ONLY', 'THE', 'WOUND', 'THE', 'QUIVER', 'OF', 'THE', 'GERM', 'AND', 'THE', 'JOY', 'OF', 'THE', 'FRUIT', 'ONLY', 'ARRIVE', 'LATER'] +4294-9934-0000-1912: hyp=['HE', 'FELT', 'WITH', 'THE', 'EARTH', 'MAY', 'POSSIBLY', 'FEEL', 'AT', 'THE', 'MOMENT', 'WHEN', 'IT', 'IS', 'TORN', 'OPEN', 'WITH', 'THE', 'IRON', 'IN', 'ORDER', 'THAT', 'GRAIN', 'MAY', 'BE', 'DEPOSITED', 'WITHIN', 'IT', 'IT', 'FEELS', 'ONLY', 'THE', 'WOUND', 'THE', 'QUIVER', 'OF', 'THE', 'GERM', 'THE', 'JOY', 'OF', 'THE', 'FRUIT', 'ONLY', 'ARRIVED', 'LATER'] +4294-9934-0001-1913: ref=['HE', 'HAD', 'BUT', 'JUST', 'ACQUIRED', 'A', 'FAITH', 'MUST', 'HE', 'THEN', 'REJECT', 'IT', 'ALREADY'] +4294-9934-0001-1913: hyp=["HE'D", 'BUT', 'JUST', 'ACQUIRED', 'A', 'FAITH', 'MUST', 'HE', 'THEN', 'REJECTED', 'ALREADY'] +4294-9934-0002-1914: ref=['HE', 'AFFIRMED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'HE', 'DECLARED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'DOUBT', 'AND', 'HE', 'BEGAN', 'TO', 'DOUBT', 'IN', 'SPITE', 'OF', 'HIMSELF'] +4294-9934-0002-1914: hyp=['HE', 'AFFIRMED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'HE', 'DECLARED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'DOUBT', 'AND', 'HE', 'BEGAN', 'TO', 'DOUBT', 'IN', 'SPITE', 'OF', 'HIMSELF'] +4294-9934-0003-1915: ref=['TO', 'STAND', 'BETWEEN', 'TWO', 'RELIGIONS', 'FROM', 'ONE', 'OF', 'WHICH', 'YOU', 'HAVE', 'NOT', 'AS', 'YET', 'EMERGED', 'AND', 'ANOTHER', 'INTO', 'WHICH', 'YOU', 'HAVE', 'NOT', 'YET', 'ENTERED', 'IS', 'INTOLERABLE', 'AND', 'TWILIGHT', 'IS', 'PLEASING', 'ONLY', 'TO', 'BAT', 'LIKE', 'SOULS'] +4294-9934-0003-1915: hyp=['TO', 'STAND', 'BETWEEN', 'TWO', 'RELIGIONS', 'FROM', 'ONE', 'OF', 'WHICH', 'YOU', 'HAVE', 'NOT', 'AS', 'YET', 'EMERGED', 'IN', 'ANOTHER', 'INTO', 'WHICH', 'YOU', 'HAVE', 'NOT', 'YET', 'ENTERED', 'IS', 'INTOLERABLE', 'AND', 'TWILIGHT', 'IS', 'PLEASING', 'ONLY', 'TO', 'BAT', 'LIKE', 'SOULS'] +4294-9934-0004-1916: ref=['MARIUS', 'WAS', 'CLEAR', 'EYED', 'AND', 'HE', 'REQUIRED', 'THE', 'TRUE', 'LIGHT'] +4294-9934-0004-1916: hyp=['MARIUS', 'WAS', 'CLEAR', 'EYED', 'AND', 'HE', 'REQUIRED', 'THE', 'TRUE', 'LIGHT'] +4294-9934-0005-1917: ref=['THE', 'HALF', 'LIGHTS', 'OF', 'DOUBT', 'PAINED', 'HIM'] +4294-9934-0005-1917: hyp=['THE', 'HALF', 'LIGHTS', 'OF', 'DOUBT', 'PAINED', 'HIM'] +4294-9934-0006-1918: ref=['WHATEVER', 'MAY', 'HAVE', 'BEEN', 'HIS', 'DESIRE', 'TO', 'REMAIN', 'WHERE', 'HE', 'WAS', 'HE', 'COULD', 'NOT', 'HALT', 'THERE', 'HE', 'WAS', 'IRRESISTIBLY', 'CONSTRAINED', 'TO', 'CONTINUE', 'TO', 'ADVANCE', 'TO', 'EXAMINE', 'TO', 'THINK', 'TO', 'MARCH', 'FURTHER'] +4294-9934-0006-1918: hyp=['WHATEVER', 'MAY', 'HAVE', 'BEEN', 'HIS', 'DESIRE', 'TO', 'REMAIN', 'WHERE', 'HE', 'WAS', 'HE', 'COULD', 'NOT', 'HALT', 'THERE', 'HE', 'WAS', 'IRRESISTIBLY', 'CONSTRAINED', 'TO', 'CONTINUE', 'TO', 'ADVANCE', 'TO', 'EXAMINE', 'TO', 'THINK', 'TO', 'MARCH', 'FURTHER'] +4294-9934-0007-1919: ref=['HE', 'FEARED', 'AFTER', 'HAVING', 'TAKEN', 'SO', 'MANY', 'STEPS', 'WHICH', 'HAD', 'BROUGHT', 'HIM', 'NEARER', 'TO', 'HIS', 'FATHER', 'TO', 'NOW', 'TAKE', 'A', 'STEP', 'WHICH', 'SHOULD', 'ESTRANGE', 'HIM', 'FROM', 'THAT', 'FATHER'] +4294-9934-0007-1919: hyp=['HE', 'FEARED', 'AFTER', 'HAVING', 'TAKEN', 'SO', 'MANY', 'STEPS', 'WHICH', 'HAD', 'BROUGHT', 'HIM', 'NEARER', 'TO', 'HIS', 'FATHER', 'TO', 'NOW', 'TAKE', 'A', 'STEP', 'WHICH', 'SHOULD', 'ESTRANGE', 'HIM', 'FROM', 'THAT', 'FATHER'] +4294-9934-0008-1920: ref=['HIS', 'DISCOMFORT', 'WAS', 'AUGMENTED', 'BY', 'ALL', 'THE', 'REFLECTIONS', 'WHICH', 'OCCURRED', 'TO', 'HIM'] +4294-9934-0008-1920: hyp=['HIS', 'DISCOMFORT', 'WAS', 'AUGMENTED', 'BY', 'ALL', 'THE', 'REFLECTIONS', 'WHICH', 'OCCURRED', 'TO', 'HIM'] +4294-9934-0009-1921: ref=['IN', 'THE', 'TROUBLED', 'STATE', 'OF', 'HIS', 'CONSCIENCE', 'HE', 'NO', 'LONGER', 'THOUGHT', 'OF', 'CERTAIN', 'SERIOUS', 'SIDES', 'OF', 'EXISTENCE'] +4294-9934-0009-1921: hyp=['IN', 'THE', 'TROUBLED', 'STATE', 'OF', 'HIS', 'CONSCIENCE', 'HE', 'NO', 'LONGER', 'THOUGHT', 'OF', 'CERTAIN', 'SERIOUS', 'SIDES', 'OF', 'EXISTENCE'] +4294-9934-0010-1922: ref=['THEY', 'SOON', 'ELBOWED', 'HIM', 'ABRUPTLY'] +4294-9934-0010-1922: hyp=['THEY', 'SOON', 'ELBOWED', 'HIM', 'ABRUPTLY'] +4294-9934-0011-1923: ref=['REQUEST', 'COURFEYRAC', 'TO', 'COME', 'AND', 'TALK', 'WITH', 'ME', 'SAID', 'MARIUS'] +4294-9934-0011-1923: hyp=['REQUEST', 'HER', 'FORACT', 'TO', 'COME', 'AND', 'TALK', 'WITH', 'ME', 'SAID', 'MARIUS'] +4294-9934-0012-1924: ref=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'YOU', 'SAID', 'COURFEYRAC'] +4294-9934-0012-1924: hyp=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'YOU', 'SAID', 'CURFYRAC'] +4294-9934-0013-1925: ref=['WHAT', 'ARE', 'YOU', 'GOING', 'TO', 'DO', 'I', 'DO', 'NOT', 'KNOW'] +4294-9934-0013-1925: hyp=['WHAT', 'ARE', 'YOU', 'GOING', 'TO', 'DO', 'I', 'DO', 'NOT', 'KNOW'] +4294-9934-0014-1926: ref=['SILVER', 'GOLD', 'HERE', 'IT', 'IS'] +4294-9934-0014-1926: hyp=['SILVER', 'GOLD', 'HERE', 'IT', 'IS'] +4294-9934-0015-1927: ref=['YOU', 'WILL', 'THEN', 'HAVE', 'ONLY', 'A', 'PAIR', 'OF', 'TROUSERS', 'A', 'WAISTCOAT', 'A', 'HAT', 'AND', 'A', 'COAT', 'AND', 'MY', 'BOOTS'] +4294-9934-0015-1927: hyp=['YOU', 'WILL', 'THEN', 'HAVE', 'ONLY', 'A', 'PAIR', 'OF', 'TROUSERS', 'A', 'WAISTCOAT', 'A', 'HAT', 'AND', 'A', 'COAT', 'AND', 'MY', 'BOOTS'] +4294-9934-0016-1928: ref=['THAT', 'WILL', 'BE', 'ENOUGH'] +4294-9934-0016-1928: hyp=['THAT', 'WILL', 'BE', 'ENOUGH'] +4294-9934-0017-1929: ref=['NO', 'IT', 'IS', 'NOT', 'GOOD', 'WHAT', 'WILL', 'YOU', 'DO', 'AFTER', 'THAT'] +4294-9934-0017-1929: hyp=['NO', 'IT', 'IS', 'NOT', 'GOOD', 'WHAT', 'WILL', 'YOU', 'DO', 'AFTER', 'THAT'] +4294-9934-0018-1930: ref=['DO', 'YOU', 'KNOW', 'GERMAN', 'NO'] +4294-9934-0018-1930: hyp=['DO', 'YOU', 'KNOW', 'GERMAN', 'NO'] +4294-9934-0019-1931: ref=['IT', 'IS', 'BADLY', 'PAID', 'WORK', 'BUT', 'ONE', 'CAN', 'LIVE', 'BY', 'IT'] +4294-9934-0019-1931: hyp=['IT', 'IS', 'BADLY', 'PAID', 'WORK', 'BUT', 'ONE', 'CAN', 'LIVE', 'BY', 'IT'] +4294-9934-0020-1932: ref=['THE', 'CLOTHES', 'DEALER', 'WAS', 'SENT', 'FOR'] +4294-9934-0020-1932: hyp=['THE', 'CLOTHES', 'DEALER', 'WAS', 'SENT', 'FOR'] +4294-9934-0021-1933: ref=['HE', 'PAID', 'TWENTY', 'FRANCS', 'FOR', 'THE', 'CAST', 'OFF', 'GARMENTS', 'THEY', 'WENT', 'TO', 'THE', "WATCHMAKER'S"] +4294-9934-0021-1933: hyp=['HE', 'PAID', 'TWENTY', 'FRANCS', 'FOR', 'THE', 'CAST', 'OFF', 'GARMENTS', 'THEY', 'WENT', 'TO', 'THE', 'WATCHMAKERS'] +4294-9934-0022-1934: ref=['HE', 'BOUGHT', 'THE', 'WATCH', 'FOR', 'FORTY', 'FIVE', 'FRANCS'] +4294-9934-0022-1934: hyp=['HE', 'BOUGHT', 'THE', 'WATCH', 'FOR', 'FORTY', 'FIVE', 'FRANCS'] +4294-9934-0023-1935: ref=['HELLO', 'I', 'HAD', 'FORGOTTEN', 'THAT', 'SAID', 'MARIUS'] +4294-9934-0023-1935: hyp=['HALLO', 'I', 'HAD', 'FORGOTTEN', 'THAT', 'SAID', 'MARIUS'] +4294-9934-0024-1936: ref=['THE', 'LANDLORD', 'PRESENTED', 'HIS', 'BILL', 'WHICH', 'HAD', 'TO', 'BE', 'PAID', 'ON', 'THE', 'SPOT'] +4294-9934-0024-1936: hyp=['THE', 'LANDLORD', 'PRESENTED', 'HIS', 'BILL', 'WHICH', 'HAD', 'TO', 'BE', 'PAID', 'ON', 'THE', 'SPOT'] +4294-9934-0025-1937: ref=['I', 'HAVE', 'TEN', 'FRANCS', 'LEFT', 'SAID', 'MARIUS'] +4294-9934-0025-1937: hyp=['I', 'HAVE', 'TEN', 'FRANCS', 'LEFT', 'SAID', 'MARIUS'] +4294-9934-0026-1938: ref=['THAT', 'WILL', 'BE', 'SWALLOWING', 'A', 'TONGUE', 'VERY', 'FAST', 'OR', 'A', 'HUNDRED', 'SOUS', 'VERY', 'SLOWLY'] +4294-9934-0026-1938: hyp=['THAT', 'WILL', 'BE', 'SWALLOWING', 'A', 'TONGUE', 'VERY', 'FAST', 'OR', 'A', 'HUNDRED', 'SOUS', 'VERY', 'SLOWLY'] +4294-9934-0027-1939: ref=['ONE', 'MORNING', 'ON', 'HIS', 'RETURN', 'FROM', 'THE', 'LAW', 'SCHOOL', 'MARIUS', 'FOUND', 'A', 'LETTER', 'FROM', 'HIS', 'AUNT', 'AND', 'THE', 'SIXTY', 'PISTOLES', 'THAT', 'IS', 'TO', 'SAY', 'SIX', 'HUNDRED', 'FRANCS', 'IN', 'GOLD', 'IN', 'A', 'SEALED', 'BOX'] +4294-9934-0027-1939: hyp=['ONE', 'MORNING', 'ON', 'HIS', 'RETURN', 'FROM', 'THE', 'LAST', 'SCHOOL', 'MARIUS', 'FOUND', 'A', 'LETTER', 'FROM', 'HIS', 'AUNT', 'AND', 'THE', 'SIXTY', 'PISTOL', 'THAT', 'IS', 'TO', 'SAY', 'SIX', 'HUNDRED', 'FRANCS', 'IN', 'GOLD', 'AND', 'A', 'SEALED', 'BOX'] +4294-9934-0028-1940: ref=['MARIUS', 'SENT', 'BACK', 'THE', 'THIRTY', 'LOUIS', 'TO', 'HIS', 'AUNT', 'WITH', 'A', 'RESPECTFUL', 'LETTER', 'IN', 'WHICH', 'HE', 'STATED', 'THAT', 'HE', 'HAD', 'SUFFICIENT', 'MEANS', 'OF', 'SUBSISTENCE', 'AND', 'THAT', 'HE', 'SHOULD', 'BE', 'ABLE', 'THENCEFORTH', 'TO', 'SUPPLY', 'ALL', 'HIS', 'NEEDS'] +4294-9934-0028-1940: hyp=['MARIUS', 'SENT', 'BACK', 'THE', 'THIRTY', 'LOUIS', 'TO', 'HIS', 'AUNT', 'WITH', 'THE', 'RESPECTFUL', 'LETTER', 'IN', 'WHICH', 'SHE', 'STATED', 'THAT', 'HE', 'HAD', 'SUSPICION', 'MEANS', 'OF', 'SUBSISTENCE', 'AND', 'THAT', 'HE', 'SHOULD', 'BE', 'ABLE', 'THENCEFORTH', 'TO', 'SUPPLY', 'ALL', 'HIS', 'NEEDS'] +4294-9934-0029-1941: ref=['AT', 'THAT', 'MOMENT', 'HE', 'HAD', 'THREE', 'FRANCS', 'LEFT'] +4294-9934-0029-1941: hyp=['AT', 'THAT', 'MOMENT', 'HE', 'HAD', 'THREE', 'FRANCS', 'LEFT'] +4350-10919-0000-2716: ref=['HE', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'NO', 'GOOD', 'TALKING', 'TO', 'THE', 'OLD', 'MAN', 'AND', 'THAT', 'THE', 'PRINCIPAL', 'PERSON', 'IN', 'THE', 'HOUSE', 'WAS', 'THE', 'MOTHER'] +4350-10919-0000-2716: hyp=['HE', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'NO', 'GOOD', 'TALKING', 'TO', 'THE', 'OLD', 'MAN', 'AND', 'THAT', 'THE', 'PRINCIPAL', 'PERSON', 'IN', 'THE', 'HOUSE', 'WAS', 'THE', 'MOTHER'] +4350-10919-0001-2717: ref=['BEFORE', 'HER', 'HE', 'DECIDED', 'TO', 'SCATTER', 'HIS', 'PEARLS'] +4350-10919-0001-2717: hyp=['BEFORE', 'HER', 'HE', 'DECIDED', 'TO', 'SCATTER', 'HIS', 'PEARLS'] +4350-10919-0002-2718: ref=['THE', 'PRINCESS', 'WAS', 'DISTRACTED', 'AND', 'DID', 'NOT', 'KNOW', 'WHAT', 'TO', 'DO', 'SHE', 'FELT', 'SHE', 'HAD', 'SINNED', 'AGAINST', 'KITTY'] +4350-10919-0002-2718: hyp=['THE', 'PRINCESS', 'WAS', 'DISTRACTED', 'AND', 'DID', 'NOT', 'KNOW', 'WHAT', 'TO', 'DO', 'SHE', 'FELT', 'SHE', 'HAD', 'SENT', 'AGAINST', 'KITTY'] +4350-10919-0003-2719: ref=['WELL', 'DOCTOR', 'DECIDE', 'OUR', 'FATE', 'SAID', 'THE', 'PRINCESS', 'TELL', 'ME', 'EVERYTHING'] +4350-10919-0003-2719: hyp=['WELL', 'DOCTOR', 'DECIDE', 'OUR', 'FATE', 'SAID', 'THE', 'PRINCESS', 'TELL', 'ME', 'EVERYTHING'] +4350-10919-0004-2720: ref=['IS', 'THERE', 'HOPE', 'SHE', 'MEANT', 'TO', 'SAY', 'BUT', 'HER', 'LIPS', 'QUIVERED', 'AND', 'SHE', 'COULD', 'NOT', 'UTTER', 'THE', 'QUESTION', 'WELL', 'DOCTOR'] +4350-10919-0004-2720: hyp=['IS', 'THERE', 'HOPE', 'SHE', 'MEANT', 'TO', 'SAY', 'BUT', 'HER', 'LIPS', 'QUIVERED', 'AND', 'SHE', 'COULD', 'NOT', 'UTTER', 'THE', 'QUESTION', 'WELL', 'DOCTOR'] +4350-10919-0005-2721: ref=['AS', 'YOU', 'PLEASE', 'THE', 'PRINCESS', 'WENT', 'OUT', 'WITH', 'A', 'SIGH'] +4350-10919-0005-2721: hyp=['AS', 'YOU', 'PLEASE', 'THE', 'PRINCESS', 'WENT', 'OUT', 'WITH', 'A', 'SIGH'] +4350-10919-0006-2722: ref=['THE', 'FAMILY', 'DOCTOR', 'RESPECTFULLY', 'CEASED', 'IN', 'THE', 'MIDDLE', 'OF', 'HIS', 'OBSERVATIONS'] +4350-10919-0006-2722: hyp=['THE', 'FAMILY', 'DOCTOR', 'RESPECTFULLY', 'CEASED', 'IN', 'THE', 'MIDDLE', 'OF', 'HIS', 'OBSERVATIONS'] +4350-10919-0007-2723: ref=['AND', 'THERE', 'ARE', 'INDICATIONS', 'MALNUTRITION', 'NERVOUS', 'EXCITABILITY', 'AND', 'SO', 'ON'] +4350-10919-0007-2723: hyp=['AND', 'THERE', 'ARE', 'INDICATIONS', 'MALLETRICIAN', 'NERVOUS', 'EXCITABILITY', 'AND', 'SO', 'ON'] +4350-10919-0008-2724: ref=['THE', 'QUESTION', 'STANDS', 'THUS', 'IN', 'PRESENCE', 'OF', 'INDICATIONS', 'OF', 'TUBERCULOUS', 'PROCESS', 'WHAT', 'IS', 'TO', 'BE', 'DONE', 'TO', 'MAINTAIN', 'NUTRITION'] +4350-10919-0008-2724: hyp=['THE', 'QUESTION', 'SENDS', 'THUS', 'IN', 'PRESENCE', 'OF', 'INDICATIONS', 'OF', "TIBERICAN'S", 'PROCESS', 'WHAT', 'IS', 'TO', 'BE', 'DONE', 'TO', 'MAINTAIN', 'UTRITION'] +4350-10919-0009-2725: ref=['YES', "THAT'S", 'AN', 'UNDERSTOOD', 'THING', 'RESPONDED', 'THE', 'CELEBRATED', 'PHYSICIAN', 'AGAIN', 'GLANCING', 'AT', 'HIS', 'WATCH'] +4350-10919-0009-2725: hyp=['YES', 'I', 'CAN', 'UNDERSTOOD', 'THING', 'RESPONDED', 'THE', 'CELEBRATED', 'PHYSICIAN', 'AGAIN', 'GLANCING', 'AT', 'HIS', 'WATCH'] +4350-10919-0010-2726: ref=['BEG', 'PARDON', 'IS', 'THE', 'YAUSKY', 'BRIDGE', 'DONE', 'YET', 'OR', 'SHALL', 'I', 'HAVE', 'TO', 'DRIVE', 'AROUND'] +4350-10919-0010-2726: hyp=['BEG', 'PARDON', 'IS', 'THE', 'YSKEEPER', 'STANDARD', 'OR', 'SHALL', 'I', 'HAVE', 'TO', 'DRIVE', 'HER', 'ON'] +4350-10919-0011-2727: ref=['HE', 'ASKED', 'AH', 'IT', 'IS'] +4350-10919-0011-2727: hyp=['HE', 'ASKED', 'AH', 'IT', 'IS'] +4350-10919-0012-2728: ref=['OH', 'WELL', 'THEN', 'I', 'CAN', 'DO', 'IT', 'IN', 'TWENTY', 'MINUTES'] +4350-10919-0012-2728: hyp=['OH', 'WELL', 'THEN', 'I', 'CAN', 'DO', 'IT', 'IN', 'TWENTY', 'MINUTES'] +4350-10919-0013-2729: ref=['AND', 'HOW', 'ABOUT', 'A', 'TOUR', 'ABROAD', 'ASKED', 'THE', 'FAMILY', 'DOCTOR'] +4350-10919-0013-2729: hyp=['AND', 'HOW', 'ABOUT', 'IT', 'TO', 'HER', 'BROAD', 'ASKED', 'THE', 'FELLOW', 'DOCTOR'] +4350-10919-0014-2730: ref=['WHAT', 'IS', 'WANTED', 'IS', 'MEANS', 'OF', 'IMPROVING', 'NUTRITION', 'AND', 'NOT', 'FOR', 'LOWERING', 'IT'] +4350-10919-0014-2730: hyp=['WHAT', 'IS', 'WANTED', 'IS', 'THE', 'MEANS', 'OF', 'IMPROVING', 'UTRITION', 'AND', 'NOT', 'FOR', 'LOWERING', 'IT'] +4350-10919-0015-2731: ref=['THE', 'FAMILY', 'DOCTOR', 'LISTENED', 'ATTENTIVELY', 'AND', 'RESPECTFULLY'] +4350-10919-0015-2731: hyp=['THE', 'FAMILY', 'DOCTOR', 'LISTENED', 'ATTENTIVELY', 'AND', 'RESPECTFULLY'] +4350-10919-0016-2732: ref=['BUT', 'IN', 'FAVOR', 'OF', 'FOREIGN', 'TRAVEL', 'I', 'WOULD', 'URGE', 'THE', 'CHANGE', 'OF', 'HABITS', 'THE', 'REMOVAL', 'FROM', 'CONDITIONS', 'CALLING', 'UP', 'REMINISCENCES'] +4350-10919-0016-2732: hyp=['BUT', 'IN', 'FAVOUR', 'OF', 'FOREIGN', 'TRAVEL', 'I', 'WOULD', 'URGE', 'THE', 'CHANGE', 'OF', 'HABITS', 'THE', 'REMOVAL', 'FROM', 'CONDITIONS', 'CALLING', 'UP', 'REMINISCENCES'] +4350-10919-0017-2733: ref=['AND', 'THEN', 'THE', 'MOTHER', 'WISHES', 'IT', 'HE', 'ADDED'] +4350-10919-0017-2733: hyp=['AND', 'THEN', 'THE', 'MOTHER', 'WISHES', 'IT', 'HE', 'ADDED'] +4350-10919-0018-2734: ref=['AH', 'WELL', 'IN', 'THAT', 'CASE', 'TO', 'BE', 'SURE', 'LET', 'THEM', 'GO', 'ONLY', 'THOSE', 'GERMAN', 'QUACKS', 'ARE', 'MISCHIEVOUS'] +4350-10919-0018-2734: hyp=['AH', 'WELL', 'IN', 'THAT', 'CASE', 'TO', 'BE', 'SURE', 'LET', 'THEM', 'GO', 'ONLY', 'THOSE', 'GERMAN', 'QUACKS', 'ARE', 'MISCHIEVOUS'] +4350-10919-0019-2735: ref=['OH', "TIME'S", 'UP', 'ALREADY', 'AND', 'HE', 'WENT', 'TO', 'THE', 'DOOR'] +4350-10919-0019-2735: hyp=['O', 'TIMES', 'UP', 'ALREADY', 'AND', 'HE', 'WENT', 'TO', 'THE', 'DOOR'] +4350-10919-0020-2736: ref=['THE', 'CELEBRATED', 'DOCTOR', 'ANNOUNCED', 'TO', 'THE', 'PRINCESS', 'A', 'FEELING', 'OF', 'WHAT', 'WAS', 'DUE', 'FROM', 'HIM', 'DICTATED', 'HIS', 'DOING', 'SO', 'THAT', 'HE', 'OUGHT', 'TO', 'SEE', 'THE', 'PATIENT', 'ONCE', 'MORE'] +4350-10919-0020-2736: hyp=['THE', 'CELEBRATED', 'DOCTOR', 'ANNOUNCED', 'TO', 'THE', 'PRINCESS', 'A', 'FEELING', 'OF', 'WHAT', 'WAS', 'DUE', 'FROM', 'HIM', 'DICTATED', 'HIS', 'DOING', 'SO', 'THAT', 'HE', 'OUGHT', 'TO', 'SEE', 'THE', 'PATIENT', 'ONCE', 'MORE'] +4350-10919-0021-2737: ref=['OH', 'NO', 'ONLY', 'A', 'FEW', 'DETAILS', 'PRINCESS', 'COME', 'THIS', 'WAY'] +4350-10919-0021-2737: hyp=['OH', 'NO', 'ONLY', 'A', 'FEW', 'DETAILS', 'PRINCESS', 'COME', 'THIS', 'WAY'] +4350-10919-0022-2738: ref=['AND', 'THE', 'MOTHER', 'ACCOMPANIED', 'BY', 'THE', 'DOCTOR', 'WENT', 'INTO', 'THE', 'DRAWING', 'ROOM', 'TO', 'KITTY'] +4350-10919-0022-2738: hyp=['AND', 'THE', 'MOTHER', 'ACCOMPANIED', 'BY', 'THE', 'DOCTOR', 'WENT', 'INTO', 'THE', 'DRAWING', 'ROOM', 'TO', 'KITTY'] +4350-10919-0023-2739: ref=['WHEN', 'THE', 'DOCTOR', 'CAME', 'IN', 'SHE', 'FLUSHED', 'CRIMSON', 'AND', 'HER', 'EYES', 'FILLED', 'WITH', 'TEARS'] +4350-10919-0023-2739: hyp=['WHEN', 'THE', 'DOCTOR', 'CAME', 'IN', 'SHE', 'FLUSHED', 'CRIMSON', 'AND', 'HER', 'EYES', 'FILLED', 'WITH', 'TEARS'] +4350-10919-0024-2740: ref=['SHE', 'ANSWERED', 'HIM', 'AND', 'ALL', 'AT', 'ONCE', 'GOT', 'UP', 'FURIOUS'] +4350-10919-0024-2740: hyp=['SHE', 'ANSWERED', 'HIM', 'AND', 'ALL', 'AT', 'ONCE', 'GOT', 'UP', 'FURIOUS'] +4350-10919-0025-2741: ref=['EXCUSE', 'ME', 'DOCTOR', 'BUT', 'THERE', 'IS', 'REALLY', 'NO', 'OBJECT', 'IN', 'THIS'] +4350-10919-0025-2741: hyp=['EXCUSE', 'ME', 'DOCTOR', 'BUT', 'THERE', 'IS', 'REALLY', 'NO', 'OBJECT', 'IN', 'THIS'] +4350-10919-0026-2742: ref=['THIS', 'IS', 'THE', 'THIRD', 'TIME', "YOU'VE", 'ASKED', 'ME', 'THE', 'SAME', 'THING'] +4350-10919-0026-2742: hyp=['THIS', 'IS', 'THE', 'THIRD', 'TIME', "YOU'VE", 'ASKED', 'ME', 'THE', 'SAME', 'THING'] +4350-10919-0027-2743: ref=['THE', 'CELEBRATED', 'DOCTOR', 'DID', 'NOT', 'TAKE', 'OFFENSE'] +4350-10919-0027-2743: hyp=['THE', 'CELEBRATED', 'DOCTOR', 'DID', 'NOT', 'TAKE', 'OFFENCE'] +4350-10919-0028-2744: ref=['NERVOUS', 'IRRITABILITY', 'HE', 'SAID', 'TO', 'THE', 'PRINCESS', 'WHEN', 'KITTY', 'HAD', 'LEFT', 'THE', 'ROOM', 'HOWEVER', 'I', 'HAD', 'FINISHED'] +4350-10919-0028-2744: hyp=['NERVOUS', 'IRRITABILITY', 'HE', 'SAID', 'TO', 'THE', 'PRINCESS', 'WHEN', 'KATY', 'HAD', 'LEFT', 'THE', 'ROOM', 'HOWEVER', 'I', 'HAD', 'FINISHED'] +4350-10919-0029-2745: ref=['AND', 'THE', 'DOCTOR', 'BEGAN', 'SCIENTIFICALLY', 'EXPLAINING', 'TO', 'THE', 'PRINCESS', 'AS', 'AN', 'EXCEPTIONALLY', 'INTELLIGENT', 'WOMAN', 'THE', 'CONDITION', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'AND', 'CONCLUDED', 'BY', 'INSISTING', 'ON', 'THE', 'DRINKING', 'OF', 'THE', 'WATERS', 'WHICH', 'WERE', 'CERTAINLY', 'HARMLESS'] +4350-10919-0029-2745: hyp=['AND', 'THE', 'DOCTOR', 'BEGAN', 'SCIENTIFICALLY', 'EXPLAINING', 'TO', 'THE', 'PRINCESS', 'AS', 'AN', 'EXCEPTIONALLY', 'INTELLIGENT', 'WOMAN', 'THE', 'CONDITION', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'AND', 'CONCLUDED', 'BY', 'INSISTING', 'ON', 'THE', 'DRINKING', 'OF', 'THE', 'WATERS', 'WHICH', 'WERE', 'CERTAINLY', 'HARMLESS'] +4350-10919-0030-2746: ref=['AT', 'THE', 'QUESTION', 'SHOULD', 'THEY', 'GO', 'ABROAD', 'THE', 'DOCTOR', 'PLUNGED', 'INTO', 'DEEP', 'MEDITATION', 'AS', 'THOUGH', 'RESOLVING', 'A', 'WEIGHTY', 'PROBLEM'] +4350-10919-0030-2746: hyp=['BUT', 'THE', 'QUESTION', 'SHOULD', 'THEY', 'GO', 'ABROAD', 'THE', 'DOCTOR', 'PLUNGED', 'INTO', 'DEEP', 'MEDITATION', 'AS', 'THOUGH', 'RESOLVING', 'A', 'WEIGHTY', 'PROBLEM'] +4350-10919-0031-2747: ref=['FINALLY', 'HIS', 'DECISION', 'WAS', 'PRONOUNCED', 'THEY', 'WERE', 'TO', 'GO', 'ABROAD', 'BUT', 'TO', 'PUT', 'NO', 'FAITH', 'IN', 'FOREIGN', 'QUACKS', 'AND', 'TO', 'APPLY', 'TO', 'HIM', 'IN', 'ANY', 'NEED'] +4350-10919-0031-2747: hyp=['FINALLY', 'HIS', 'DECISION', 'WAS', 'PRONOUNCED', 'THEY', 'WERE', 'TO', 'GO', 'ABROAD', 'BUT', 'TO', 'PUT', 'NO', 'FAITH', 'IN', 'FOREIGN', 'QUACKS', 'AND', 'TO', 'APPLY', 'TO', 'HIM', 'IN', 'ANY', 'NEED'] +4350-10919-0032-2748: ref=['IT', 'SEEMED', 'AS', 'THOUGH', 'SOME', 'PIECE', 'OF', 'GOOD', 'FORTUNE', 'HAD', 'COME', 'TO', 'PASS', 'AFTER', 'THE', 'DOCTOR', 'HAD', 'GONE'] +4350-10919-0032-2748: hyp=['IT', 'SEEMED', 'AS', 'THOUGH', 'SOME', 'PIECE', 'OF', 'GOOD', 'FORTUNE', 'HAD', 'COME', 'TO', 'PASS', 'AFTER', 'THE', 'DOCTOR', 'HAD', 'GONE'] +4350-10919-0033-2749: ref=['THE', 'MOTHER', 'WAS', 'MUCH', 'MORE', 'CHEERFUL', 'WHEN', 'SHE', 'WENT', 'BACK', 'TO', 'HER', 'DAUGHTER', 'AND', 'KITTY', 'PRETENDED', 'TO', 'BE', 'MORE', 'CHEERFUL'] +4350-10919-0033-2749: hyp=['THE', 'MOTHER', 'WAS', 'MUCH', 'MORE', 'CHEERFUL', 'WHEN', 'SHE', 'WENT', 'BACK', 'TO', 'HER', 'DAUGHTER', 'AND', 'KITTY', 'PRETENDED', 'TO', 'BE', 'MORE', 'CHEERFUL'] +4350-9170-0000-2750: ref=['EDUCATED', 'PEOPLE', 'OF', 'THE', 'UPPER', 'CLASSES', 'ARE', 'TRYING', 'TO', 'STIFLE', 'THE', 'EVER', 'GROWING', 'SENSE', 'OF', 'THE', 'NECESSITY', 'OF', 'TRANSFORMING', 'THE', 'EXISTING', 'SOCIAL', 'ORDER'] +4350-9170-0000-2750: hyp=['EDUCATED', 'PEOPLE', 'OF', 'THE', 'UPPER', 'CLASSES', 'ARE', 'TRYING', 'TO', 'STIFLE', 'THE', 'EVERGREWING', 'SENSE', 'OF', 'THE', 'NECESSITY', 'OF', 'TRANSFORMING', 'THE', 'EXISTING', 'SOCIAL', 'ORDER'] +4350-9170-0001-2751: ref=['THIS', 'IS', 'ABSOLUTELY', 'INCORRECT'] +4350-9170-0001-2751: hyp=['MISSUS', 'ABSOLUTELY', 'AND', 'CORRECT'] +4350-9170-0002-2752: ref=['IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IT', 'IS', 'SUPPOSED', 'THAT', 'SINCE', 'THE', 'AIM', 'OF', 'LIFE', 'IS', 'FOUND', 'IN', 'GROUPS', 'OF', 'INDIVIDUALS', 'INDIVIDUALS', 'WILL', 'VOLUNTARILY', 'SACRIFICE', 'THEIR', 'OWN', 'INTERESTS', 'FOR', 'THE', 'INTERESTS', 'OF', 'THE', 'GROUP'] +4350-9170-0002-2752: hyp=['IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IT', 'IS', 'SUPPOSED', 'SINCE', 'THE', 'AIM', 'OF', 'LIFE', 'IS', 'FOUND', 'IN', 'GROUPS', 'OF', 'INDIVIDUALS', 'INDIVIDUALS', 'WHO', 'VOLUNTARILY', 'SACRIFICE', 'THEIR', 'OWN', 'INTEREST', 'FOR', 'THE', 'INTEREST', 'OF', 'THE', 'GROUP'] +4350-9170-0003-2753: ref=['THE', 'CHAMPIONS', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'USUALLY', 'TRY', 'TO', 'CONNECT', 'THE', 'IDEA', 'OF', 'AUTHORITY', 'THAT', 'IS', 'OF', 'VIOLENCE', 'WITH', 'THE', 'IDEA', 'OF', 'MORAL', 'INFLUENCE', 'BUT', 'THIS', 'CONNECTION', 'IS', 'QUITE', 'IMPOSSIBLE'] +4350-9170-0003-2753: hyp=['THE', 'CHAMPIONS', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'USUALLY', 'TRY', 'TO', 'CONNECT', 'THE', 'IDEA', 'OF', 'AUTHORITY', 'THAT', 'IS', 'OF', 'VIOLENCE', 'WITH', 'THE', 'IDEA', 'OF', 'MORAL', 'INFLUENCE', 'BUT', 'THIS', 'CONNECTION', 'IS', 'QUITE', 'IMPOSSIBLE'] +4350-9170-0004-2754: ref=['THE', 'MAN', 'WHO', 'IS', 'CONTROLLED', 'BY', 'MORAL', 'INFLUENCE', 'ACTS', 'IN', 'ACCORDANCE', 'WITH', 'HIS', 'OWN', 'DESIRES'] +4350-9170-0004-2754: hyp=['THE', 'MAN', 'WHO', 'HAS', 'CONTROLLED', 'BY', 'MORAL', 'INFLUENCE', 'ACTS', 'IN', 'ACCORDANCE', 'WITH', 'HIS', 'OWN', 'DESIRES'] +4350-9170-0005-2755: ref=['THE', 'BASIS', 'OF', 'AUTHORITY', 'IS', 'BODILY', 'VIOLENCE'] +4350-9170-0005-2755: hyp=['THE', 'BASIS', 'OF', 'AUTHORITY', 'IS', 'BODILY', 'VIOLENCE'] +4350-9170-0006-2756: ref=['THE', 'POSSIBILITY', 'OF', 'APPLYING', 'BODILY', 'VIOLENCE', 'TO', 'PEOPLE', 'IS', 'PROVIDED', 'ABOVE', 'ALL', 'BY', 'AN', 'ORGANIZATION', 'OF', 'ARMED', 'MEN', 'TRAINED', 'TO', 'ACT', 'IN', 'UNISON', 'IN', 'SUBMISSION', 'TO', 'ONE', 'WILL'] +4350-9170-0006-2756: hyp=['THE', 'POSSIBILITY', 'OF', 'APPLYING', 'BODILY', 'VIOLENCE', 'THE', 'PEOPLE', 'IS', 'PROVIDED', 'ABOVE', 'ALL', 'BY', 'AN', 'ORGANIZATION', 'OF', 'ARMED', 'MEN', 'TRAINED', 'TO', 'ACT', 'IN', 'UNISON', 'AND', 'SUBMISSION', 'TO', 'ONE', 'WILL'] +4350-9170-0007-2757: ref=['THESE', 'BANDS', 'OF', 'ARMED', 'MEN', 'SUBMISSIVE', 'TO', 'A', 'SINGLE', 'WILL', 'ARE', 'WHAT', 'CONSTITUTE', 'THE', 'ARMY'] +4350-9170-0007-2757: hyp=['THESE', 'BANDS', 'OF', 'ARMED', 'MEN', 'SUBMISSIVE', 'TO', 'A', 'SINGLE', 'WILL', 'ARE', 'WHAT', 'CONSTITUTE', 'THE', 'ARMY'] +4350-9170-0008-2758: ref=['THE', 'ARMY', 'HAS', 'ALWAYS', 'BEEN', 'AND', 'STILL', 'IS', 'THE', 'BASIS', 'OF', 'POWER'] +4350-9170-0008-2758: hyp=['THE', 'ARMY', 'HAS', 'ALWAYS', 'BEEN', 'AND', 'STILL', 'IS', 'THE', 'BASIS', 'OF', 'POWER'] +4350-9170-0009-2759: ref=['POWER', 'IS', 'ALWAYS', 'IN', 'THE', 'HANDS', 'OF', 'THOSE', 'WHO', 'CONTROL', 'THE', 'ARMY', 'AND', 'ALL', 'MEN', 'IN', 'POWER', 'FROM', 'THE', 'ROMAN', 'CAESARS', 'TO', 'THE', 'RUSSIAN', 'AND', 'GERMAN', 'EMPERORS', 'TAKE', 'MORE', 'INTEREST', 'IN', 'THEIR', 'ARMY', 'THAN', 'IN', 'ANYTHING', 'AND', 'COURT', 'POPULARITY', 'IN', 'THE', 'ARMY', 'KNOWING', 'THAT', 'IF', 'THAT', 'IS', 'ON', 'THEIR', 'SIDE', 'THEIR', 'POWER', 'IS', 'SECURE'] +4350-9170-0009-2759: hyp=['POWER', 'IS', 'ALWAYS', 'IN', 'THE', 'HANDS', 'OF', 'THOSE', 'WHO', 'CONTROL', 'THE', 'ARMY', 'AND', 'ALL', 'MEN', 'IN', 'POWER', 'FROM', 'THE', 'ROMAN', 'CAESARS', 'TO', 'THE', 'RUSSIAN', 'AND', 'GERMAN', 'EMPERORS', 'TAKE', 'MORE', 'INTEREST', 'IN', 'THEIR', 'ARMY', 'THAN', 'IN', 'ANYTHING', 'IN', 'COURT', 'POPULARITY', 'IN', 'THE', 'ARMY', 'KNOWING', 'THAT', 'IF', 'THAT', 'IS', 'ON', 'THEIR', 'SIDE', 'THEIR', 'POWER', 'IS', 'SECURE'] +4350-9170-0010-2760: ref=['INDEED', 'IT', 'COULD', 'NOT', 'BE', 'OTHERWISE'] +4350-9170-0010-2760: hyp=['INDEED', 'IT', 'COULD', 'NOT', 'BE', 'OTHERWISE'] +4350-9170-0011-2761: ref=['ONLY', 'UNDER', 'THOSE', 'CONDITIONS', 'COULD', 'THE', 'SOCIAL', 'ORGANIZATION', 'BE', 'JUSTIFIED'] +4350-9170-0011-2761: hyp=['ONLY', 'UNDER', 'THOSE', 'CONDITIONS', 'COULD', 'THE', 'SOCIAL', 'ORGANIZATION', 'BE', 'JUSTIFIED'] +4350-9170-0012-2762: ref=['BUT', 'SINCE', 'THIS', 'IS', 'NOT', 'THE', 'CASE', 'AND', 'ON', 'THE', 'CONTRARY', 'MEN', 'IN', 'POWER', 'ARE', 'ALWAYS', 'FAR', 'FROM', 'BEING', 'SAINTS', 'THROUGH', 'THE', 'VERY', 'FACT', 'OF', 'THEIR', 'POSSESSION', 'OF', 'POWER', 'THE', 'SOCIAL', 'ORGANIZATION', 'BASED', 'ON', 'POWER', 'HAS', 'NO', 'JUSTIFICATION'] +4350-9170-0012-2762: hyp=['BUT', 'SINCE', 'THIS', 'IS', 'NOT', 'THE', 'CASE', 'AND', 'ON', 'THE', 'CONTRARY', 'MEN', 'AND', 'POWER', 'ALWAYS', 'FAR', 'FROM', 'BEING', 'SAINTS', 'THROUGH', 'THE', 'VERY', 'FACT', 'OF', 'THEIR', 'POSSESSION', 'OF', 'POWER', 'THE', 'SOCIAL', 'ORGANIZATION', 'BASED', 'ON', 'POWER', 'HAS', 'NO', 'JUSTIFICATION'] +4350-9170-0013-2763: ref=['EVEN', 'IF', 'THERE', 'WAS', 'ONCE', 'A', 'TIME', 'WHEN', 'OWING', 'TO', 'THE', 'LOW', 'STANDARD', 'OF', 'MORALS', 'AND', 'THE', 'DISPOSITION', 'OF', 'MEN', 'TO', 'VIOLENCE', 'THE', 'EXISTENCE', 'OF', 'AN', 'AUTHORITY', 'TO', 'RESTRAIN', 'SUCH', 'VIOLENCE', 'WAS', 'AN', 'ADVANTAGE', 'BECAUSE', 'THE', 'VIOLENCE', 'OF', 'GOVERNMENT', 'WAS', 'LESS', 'THAN', 'THE', 'VIOLENCE', 'OF', 'INDIVIDUALS', 'ONE', 'CANNOT', 'BUT', 'SEE', 'THAT', 'THIS', 'ADVANTAGE', 'COULD', 'NOT', 'BE', 'LASTING'] +4350-9170-0013-2763: hyp=['EVEN', 'IF', 'THERE', 'WAS', 'ONCE', 'WHEN', 'OWING', 'TO', 'THE', 'LOW', 'STANDARDS', 'OF', 'MORALS', 'ON', 'THE', 'DISPOSITION', 'OF', 'MEN', 'TO', 'VIOLENCE', 'THE', 'EXISTENCE', 'OF', 'AN', 'AUTHORITY', 'TO', 'RESTRAIN', 'SUCH', 'VIOLENCE', 'WAS', 'AN', 'ADVANTAGE', 'BECAUSE', 'THE', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'WAS', 'LESS', 'THAN', 'THE', 'VIOLENCE', 'OF', 'INDIVIDUALS', 'ONE', 'CANNOT', 'BUT', 'SEE', 'THAT', 'THIS', 'ADVANTAGE', 'COULD', 'NOT', 'BE', 'LASTING'] +4350-9170-0014-2764: ref=['BETWEEN', 'THE', 'MEMBERS', 'OF', 'ONE', 'STATE', 'SUBJECT', 'TO', 'A', 'SINGLE', 'AUTHORITY', 'THE', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'SEEMS', 'STILL', 'LESS', 'AND', 'THE', 'LIFE', 'OF', 'THE', 'STATE', 'SEEMS', 'EVEN', 'MORE', 'SECURE'] +4350-9170-0014-2764: hyp=['BETWEEN', 'THE', 'MEMBERS', 'OF', 'ONE', 'STATE', 'SUBJECT', 'TO', 'A', 'SINGLE', 'AUTHORITY', 'THE', 'STRIPE', 'BETWEEN', 'THE', 'INDIVIDUALS', 'SEEMED', 'STILL', 'LESS', 'AND', 'A', 'LIFE', 'OF', 'THE', 'STATE', 'SEEMS', 'EVEN', 'MORE', 'SECURE'] +4350-9170-0015-2765: ref=['IT', 'WAS', 'PRODUCED', 'ON', 'ONE', 'HAND', 'BY', 'THE', 'NATURAL', 'GROWTH', 'OF', 'POPULATION', 'AND', 'ON', 'THE', 'OTHER', 'BY', 'STRUGGLE', 'AND', 'CONQUEST'] +4350-9170-0015-2765: hyp=['IT', 'WAS', 'PRODUCED', 'ON', 'ONE', 'HAND', 'BY', 'THE', 'NATURAL', 'GROWTH', 'OF', 'POPULATION', 'AND', 'ON', 'THE', 'OTHER', 'BY', 'STRUGGLE', 'AND', 'CONQUEST'] +4350-9170-0016-2766: ref=['AFTER', 'CONQUEST', 'THE', 'POWER', 'OF', 'THE', 'EMPEROR', 'PUTS', 'AN', 'END', 'TO', 'INTERNAL', 'DISSENSIONS', 'AND', 'SO', 'THE', 'STATE', 'CONCEPTION', 'OF', 'LIFE', 'JUSTIFIES', 'ITSELF'] +4350-9170-0016-2766: hyp=['AFTER', 'CONQUEST', 'THE', 'POWER', 'OF', 'THE', 'EMPEROR', 'PUTS', 'AN', 'END', 'TO', 'INTERNAL', 'DISSENSIONS', 'AND', 'SO', 'THE', 'STATE', 'CONCEPTION', 'OF', 'LIFE', 'JUSTIFIES', 'ITSELF'] +4350-9170-0017-2767: ref=['BUT', 'THIS', 'JUSTIFICATION', 'IS', 'NEVER', 'MORE', 'THAN', 'TEMPORARY'] +4350-9170-0017-2767: hyp=['BUT', 'THIS', 'JUSTIFICATION', 'IS', 'NEVER', 'MORE', 'THAN', 'TEMPORARY'] +4350-9170-0018-2768: ref=['INTERNAL', 'DISSENSIONS', 'DISAPPEAR', 'ONLY', 'IN', 'PROPORTION', 'TO', 'THE', 'DEGREE', 'OF', 'OPPRESSION', 'EXERTED', 'BY', 'THE', 'AUTHORITY', 'OVER', 'THE', 'DISSENTIENT', 'INDIVIDUALS'] +4350-9170-0018-2768: hyp=['AND', 'HERALD', 'ASCENSIONS', 'DISAPPEAR', 'ONLY', 'IN', 'PROPORTION', 'TO', 'THE', 'DEGREE', 'OF', 'OPPRESSION', 'EXERTED', 'BY', 'THE', 'AUTHORITY', 'OVER', 'THE', 'DYSINTHIAN', 'INDIVIDUALS'] +4350-9170-0019-2769: ref=['GOVERNMENT', 'AUTHORITY', 'EVEN', 'IF', 'IT', 'DOES', 'SUPPRESS', 'PRIVATE', 'VIOLENCE', 'ALWAYS', 'INTRODUCES', 'INTO', 'THE', 'LIFE', 'OF', 'MEN', 'FRESH', 'FORMS', 'OF', 'VIOLENCE', 'WHICH', 'TEND', 'TO', 'BECOME', 'GREATER', 'AND', 'GREATER', 'IN', 'PROPORTION', 'TO', 'THE', 'DURATION', 'AND', 'STRENGTH', 'OF', 'THE', 'GOVERNMENT'] +4350-9170-0019-2769: hyp=['GOVERN', 'AUTHORITY', 'EVEN', 'IF', 'IT', 'DOES', 'SUPPRESS', 'PRIVATE', 'VIOLENCE', 'ALWAYS', 'INTRODUCES', 'INTO', 'THE', 'LIFE', 'OF', 'MEN', 'FRESH', 'FORMS', 'OF', 'VIOLENCE', 'WHICH', 'TEND', 'TO', 'BECOME', 'GREATER', 'AND', 'GREATER', 'IN', 'PROPORTION', 'TO', 'THE', 'DURATION', 'AND', 'STRENGTH', 'OF', 'THE', 'GOVERNMENT'] +4350-9170-0020-2770: ref=['AND', 'THEREFORE', 'THE', 'OPPRESSION', 'OF', 'THE', 'OPPRESSED', 'ALWAYS', 'GOES', 'ON', 'GROWING', 'UP', 'TO', 'THE', 'FURTHEST', 'LIMIT', 'BEYOND', 'WHICH', 'IT', 'CANNOT', 'GO', 'WITHOUT', 'KILLING', 'THE', 'GOOSE', 'WITH', 'THE', 'GOLDEN', 'EGGS'] +4350-9170-0020-2770: hyp=['AND', 'THEREFORE', 'THE', 'OPPRESSION', 'OF', 'THE', 'OPPRESS', 'ALWAYS', 'GOES', 'ON', 'GROWING', 'UP', 'TO', 'THE', 'FURTHEST', 'LIMIT', 'BEYOND', 'WHICH', 'IT', 'CANNOT', 'GO', 'WITHOUT', 'KILLING', 'THE', 'GOOSE', 'WITH', 'THE', 'GOLD', 'NICE'] +4350-9170-0021-2771: ref=['THE', 'MOST', 'CONVINCING', 'EXAMPLE', 'OF', 'THIS', 'IS', 'TO', 'BE', 'FOUND', 'IN', 'THE', 'CONDITION', 'OF', 'THE', 'WORKING', 'CLASSES', 'OF', 'OUR', 'EPOCH', 'WHO', 'ARE', 'IN', 'REALITY', 'NO', 'BETTER', 'THAN', 'THE', 'SLAVES', 'OF', 'ANCIENT', 'TIMES', 'SUBDUED', 'BY', 'CONQUEST'] +4350-9170-0021-2771: hyp=['THE', 'MOST', 'CONVINCING', 'EXAMPLE', 'OF', 'THIS', 'IS', 'TO', 'BE', 'FOUND', 'IN', 'THE', 'CONDITION', 'OF', 'THE', 'WORKING', 'CLASSES', 'OF', 'OUR', 'EPOCH', 'WHO', 'ARE', 'IN', 'REALITY', 'NO', 'BETTER', 'THAN', 'THE', 'SLAVES', 'OF', 'ANCIENT', 'TIMES', 'SUBDUED', 'BY', 'CONQUEST'] +4350-9170-0022-2772: ref=['SO', 'IT', 'HAS', 'ALWAYS', 'BEEN'] +4350-9170-0022-2772: hyp=['SO', 'IT', 'IS', 'ALWAYS', 'THEN'] +4350-9170-0023-2773: ref=['FOOTNOTE', 'THE', 'FACT', 'THAT', 'IN', 'AMERICA', 'THE', 'ABUSES', 'OF', 'AUTHORITY', 'EXIST', 'IN', 'SPITE', 'OF', 'THE', 'SMALL', 'NUMBER', 'OF', 'THEIR', 'TROOPS', 'NOT', 'ONLY', 'FAILS', 'TO', 'DISPROVE', 'THIS', 'POSITION', 'BUT', 'POSITIVELY', 'CONFIRMS', 'IT'] +4350-9170-0023-2773: hyp=['FOOTNOTE', 'THE', 'FACT', 'THAT', 'IN', 'AMERICA', 'THE', 'ABUSES', 'OF', 'AUTHORITY', 'EXIST', 'IN', 'SPITE', 'OF', 'THE', 'SMALL', 'NUMBER', 'OF', 'THEIR', 'TRUCE', 'NOT', 'ONLY', 'FAILS', 'TO', 'DISPROVE', 'THIS', 'POSITION', 'BUT', 'POSITIVELY', 'CONFIRMS', 'IT'] +4350-9170-0024-2774: ref=['THE', 'UPPER', 'CLASSES', 'KNOW', 'THAT', 'AN', 'ARMY', 'OF', 'FIFTY', 'THOUSAND', 'WILL', 'SOON', 'BE', 'INSUFFICIENT', 'AND', 'NO', 'LONGER', 'RELYING', 'ON', "PINKERTON'S", 'MEN', 'THEY', 'FEEL', 'THAT', 'THE', 'SECURITY', 'OF', 'THEIR', 'POSITION', 'DEPENDS', 'ON', 'THE', 'INCREASED', 'STRENGTH', 'OF', 'THE', 'ARMY'] +4350-9170-0024-2774: hyp=['THE', 'UPPER', 'CLASSES', 'KNOW', 'THAT', 'AN', 'ARMY', 'OF', 'FIFTY', 'THOUSAND', 'WILL', 'SOON', 'BE', 'INSUFFICIENT', 'AND', 'NO', 'LONGER', 'RELYING', 'ON', "PINKERTON'S", 'MEN', 'THEY', 'FEEL', 'THAT', 'SECURITY', 'OF', 'THEIR', 'POSITION', 'DEPENDS', 'ON', 'THE', 'INCREASED', 'STRENGTH', 'OF', 'THE', 'ARMY'] +4350-9170-0025-2775: ref=['THE', 'REASON', 'TO', 'WHICH', 'HE', 'GAVE', 'EXPRESSION', 'IS', 'ESSENTIALLY', 'THE', 'SAME', 'AS', 'THAT', 'WHICH', 'MADE', 'THE', 'FRENCH', 'KINGS', 'AND', 'THE', 'POPES', 'ENGAGE', 'SWISS', 'AND', 'SCOTCH', 'GUARDS', 'AND', 'MAKES', 'THE', 'RUSSIAN', 'AUTHORITIES', 'OF', 'TO', 'DAY', 'SO', 'CAREFULLY', 'DISTRIBUTE', 'THE', 'RECRUITS', 'SO', 'THAT', 'THE', 'REGIMENTS', 'FROM', 'THE', 'FRONTIERS', 'ARE', 'STATIONED', 'IN', 'CENTRAL', 'DISTRICTS', 'AND', 'THE', 'REGIMENTS', 'FROM', 'THE', 'CENTER', 'ARE', 'STATIONED', 'ON', 'THE', 'FRONTIERS'] +4350-9170-0025-2775: hyp=['THE', 'REASON', 'TO', 'WHICH', 'HE', 'GAVE', 'EXPRESSION', 'IS', 'ESSENTIALLY', 'THE', 'SAME', 'AS', 'THAT', 'WHICH', 'MADE', 'THE', 'FRENCH', 'KINGS', 'AND', 'THE', 'POPES', 'ENGAGE', 'SWISS', 'AND', 'SCOTCH', 'GUARDS', 'AND', 'MAKES', 'THE', 'RUSSIAN', 'AUTHORITIES', 'OF', 'TO', 'DAY', 'SO', 'CAREFULLY', 'DISTRIBUTE', 'THE', 'RECRUITS', 'SO', 'THAT', 'THE', 'REGIMENTS', 'FROM', 'THE', 'FRONTIER', 'THEIR', 'STATIONED', 'IN', 'CENTRAL', 'DISTRICTS', 'AND', 'THE', 'REGIMENTS', 'FROM', 'THE', 'CENTRE', 'ARE', 'STATIONED', 'ON', 'THE', 'FRONTIERS'] +4350-9170-0026-2776: ref=['THE', 'MEANING', 'OF', "CAPRIVI'S", 'SPEECH', 'PUT', 'INTO', 'PLAIN', 'LANGUAGE', 'IS', 'THAT', 'FUNDS', 'ARE', 'NEEDED', 'NOT', 'TO', 'RESIST', 'FOREIGN', 'FOES', 'BUT', 'TO', 'BUY', 'UNDER', 'OFFICERS', 'TO', 'BE', 'READY', 'TO', 'ACT', 'AGAINST', 'THE', 'ENSLAVED', 'TOILING', 'MASSES'] +4350-9170-0026-2776: hyp=['THE', 'MEANING', 'OF', 'THE', 'PREVIOUS', 'SPEECH', 'PUT', 'INTO', 'PLAIN', 'LANGUAGE', 'IS', 'THAT', 'FUNDS', 'ARE', 'NEEDED', 'NOT', 'TO', 'RESIST', 'FOREIGN', 'FOES', 'BUT', 'TO', 'BUY', 'UNDER', 'OFFICERS', 'TO', 'BE', 'READY', 'TO', 'ACT', 'AGAINST', 'THE', 'ENSLAVED', 'TOILING', 'MASSES'] +4350-9170-0027-2777: ref=['AND', 'THIS', 'ABNORMAL', 'ORDER', 'OF', 'THINGS', 'IS', 'MAINTAINED', 'BY', 'THE', 'ARMY'] +4350-9170-0027-2777: hyp=['AND', 'THIS', 'ABNORMAL', 'ORDER', 'OF', 'THANKS', 'IS', 'MAINTAINED', 'BY', 'THE', 'ARMY'] +4350-9170-0028-2778: ref=['BUT', 'THERE', 'IS', 'NOT', 'ONLY', 'ONE', 'GOVERNMENT', 'THERE', 'ARE', 'OTHER', 'GOVERNMENTS', 'EXPLOITING', 'THEIR', 'SUBJECTS', 'BY', 'VIOLENCE', 'IN', 'THE', 'SAME', 'WAY', 'AND', 'ALWAYS', 'READY', 'TO', 'POUNCE', 'DOWN', 'ON', 'ANY', 'OTHER', 'GOVERNMENT', 'AND', 'CARRY', 'OFF', 'THE', 'FRUITS', 'OF', 'THE', 'TOIL', 'OF', 'ITS', 'ENSLAVED', 'SUBJECTS'] +4350-9170-0028-2778: hyp=['BUT', 'THERE', 'IS', 'NOT', 'ONLY', 'ONE', 'GOVERNMENT', 'THERE', 'ARE', 'OTHER', 'GOVERNMENTS', 'EXPLODING', 'THEIR', 'SUBJECTS', 'BY', 'VIOLENCE', 'IN', 'THE', 'SAME', 'WAY', 'AND', 'ARE', 'ALWAYS', 'READY', 'TO', 'POUNCE', 'DOWN', 'ON', 'ANY', 'OTHER', 'GOVERNMENT', 'AND', 'CARRY', 'OFF', 'THE', 'FRUITS', 'OF', 'THE', 'TOIL', 'OF', 'ITS', 'ENSLAVE', 'SUBJECTS'] +4350-9170-0029-2779: ref=['AND', 'SO', 'EVERY', 'GOVERNMENT', 'NEEDS', 'AN', 'ARMY', 'ALSO', 'TO', 'PROTECT', 'ITS', 'BOOTY', 'FROM', 'ITS', 'NEIGHBOR', 'BRIGANDS'] +4350-9170-0029-2779: hyp=['AND', 'SO', 'EVERY', 'GOVERNMENT', 'NEEDS', 'AN', 'ARMY', 'ALSO', 'TO', 'PROTECT', 'ITS', 'BOOTY', 'FROM', 'ITS', 'NEIGHBOR', 'BRIGANDS'] +4350-9170-0030-2780: ref=['THIS', 'INCREASE', 'IS', 'CONTAGIOUS', 'AS', 'MONTESQUIEU', 'POINTED', 'OUT', 'ONE', 'HUNDRED', 'FIFTY', 'YEARS', 'AGO'] +4350-9170-0030-2780: hyp=['THIS', 'INCREASES', 'CONTAGIOUS', 'AS', 'MONTESQUIEU', 'POINTED', 'OUT', 'A', 'HUNDRED', 'AND', 'FIFTY', 'YEARS', 'AGO'] +4350-9170-0031-2781: ref=['EVERY', 'INCREASE', 'IN', 'THE', 'ARMY', 'OF', 'ONE', 'STATE', 'WITH', 'THE', 'AIM', 'OF', 'SELF', 'DEFENSE', 'AGAINST', 'ITS', 'SUBJECTS', 'BECOMES', 'A', 'SOURCE', 'OF', 'DANGER', 'FOR', 'NEIGHBORING', 'STATES', 'AND', 'CALLS', 'FOR', 'A', 'SIMILAR', 'INCREASE', 'IN', 'THEIR', 'ARMIES'] +4350-9170-0031-2781: hyp=['EVERY', 'INCREASE', 'IN', 'THE', 'ARMY', 'OF', 'ONE', 'STATE', 'WITH', 'THE', 'AIM', 'OF', 'SELF', 'DEFENCE', 'AGAINST', 'ITS', 'SUBJECTS', 'BECOMES', 'A', 'SORT', 'OF', 'DANGER', 'FOR', 'NEIGHBORING', 'STATES', 'AND', 'CALLS', 'FOR', 'A', 'SIMILAR', 'INCREASE', 'IN', 'THEIR', 'ARMIES'] +4350-9170-0032-2782: ref=['THE', 'DESPOTISM', 'OF', 'A', 'GOVERNMENT', 'ALWAYS', 'INCREASES', 'WITH', 'THE', 'STRENGTH', 'OF', 'THE', 'ARMY', 'AND', 'ITS', 'EXTERNAL', 'SUCCESSES', 'AND', 'THE', 'AGGRESSIVENESS', 'OF', 'A', 'GOVERNMENT', 'INCREASES', 'WITH', 'ITS', 'INTERNAL', 'DESPOTISM'] +4350-9170-0032-2782: hyp=['THE', 'DESPOTISM', 'OF', 'THE', 'GOVERNMENT', 'ALWAYS', 'INCREASES', 'WITH', 'THE', 'STRENGTH', 'OF', 'THE', 'ARMY', 'AND', 'ITS', 'EXTERNAL', 'SUCCESSES', 'AND', 'THE', 'AGGRESSIVENESS', 'OF', 'A', 'GOVERNMENT', 'INCREASES', 'WITH', 'ITS', 'INTERNAL', 'DESPOTISM'] +4350-9170-0033-2783: ref=['THE', 'RIVALRY', 'OF', 'THE', 'EUROPEAN', 'STATES', 'IN', 'CONSTANTLY', 'INCREASING', 'THEIR', 'FORCES', 'HAS', 'REDUCED', 'THEM', 'TO', 'THE', 'NECESSITY', 'OF', 'HAVING', 'RECOURSE', 'TO', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'SINCE', 'BY', 'THAT', 'MEANS', 'THE', 'GREATEST', 'POSSIBLE', 'NUMBER', 'OF', 'SOLDIERS', 'IS', 'OBTAINED', 'AT', 'THE', 'LEAST', 'POSSIBLE', 'EXPENSE'] +4350-9170-0033-2783: hyp=['THE', 'RIVALRY', 'OF', 'THE', 'EUROPEAN', 'STATES', 'AND', 'CONSTANTLY', 'INCREASING', 'THEIR', 'FORCES', 'HAS', 'REDUCED', 'THEM', 'TO', 'THE', 'NECESSITY', 'OF', 'HAVING', 'RECOURSE', 'TO', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'SINCE', 'BY', 'THAT', 'MEANS', 'THE', 'GREATEST', 'POSSIBLE', 'NUMBER', 'OF', 'SOLDIERS', 'IS', 'OBTAINED', 'AT', 'THE', 'LEAST', 'POSSIBLE', 'EXPENSE'] +4350-9170-0034-2784: ref=['AND', 'BY', 'THIS', 'MEANS', 'ALL', 'CITIZENS', 'ARE', 'UNDER', 'ARMS', 'TO', 'SUPPORT', 'THE', 'INIQUITIES', 'PRACTICED', 'UPON', 'THEM', 'ALL', 'CITIZENS', 'HAVE', 'BECOME', 'THEIR', 'OWN', 'OPPRESSORS'] +4350-9170-0034-2784: hyp=['AND', 'BY', 'THIS', 'MEANS', 'ALL', 'CITIZENS', 'ARE', 'UNDER', 'ARMS', 'TO', 'SUPPORT', 'THE', 'INIQUITIES', 'PRACTISED', 'UPON', 'THEM', 'ALSO', 'CITIZENS', 'HAVE', 'BECOME', 'THEIR', 'OWN', 'IMPRESSORS'] +4350-9170-0035-2785: ref=['THIS', 'INCONSISTENCY', 'HAS', 'BECOME', 'OBVIOUS', 'IN', 'UNIVERSAL', 'MILITARY', 'SERVICE'] +4350-9170-0035-2785: hyp=['THIS', 'INCONSISTENCY', 'HAS', 'BECOME', 'OBVIOUS', 'AND', 'UNIVERSAL', 'MILITARY', 'SERVICE'] +4350-9170-0036-2786: ref=['IN', 'FACT', 'THE', 'WHOLE', 'SIGNIFICANCE', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'CONSISTS', 'IN', "MAN'S", 'RECOGNITION', 'OF', 'THE', 'BARBARITY', 'OF', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'AND', 'THE', 'TRANSITORINESS', 'OF', 'PERSONAL', 'LIFE', 'ITSELF', 'AND', 'THE', 'TRANSFERENCE', 'OF', 'THE', 'AIM', 'OF', 'LIFE', 'TO', 'GROUPS', 'OF', 'PERSONS'] +4350-9170-0036-2786: hyp=['IN', 'FACT', 'THE', 'WHOLE', 'SIGNIFICANCE', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'CONSISTS', 'IN', "MAN'S", 'RECOGNITION', 'OF', 'THE', 'BARBARITY', 'OF', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'AND', 'THE', 'TRANSITORINESS', 'OF', 'PERSONAL', 'LIFE', 'ITSELF', 'AND', 'THE', 'TRANSFERENCE', 'OF', 'THE', 'AIM', 'OF', 'LIFE', 'THE', 'GROUPS', 'OF', 'PERSONS'] +4350-9170-0037-2787: ref=['BUT', 'WITH', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'IT', 'COMES', 'TO', 'PASS', 'THAT', 'MEN', 'AFTER', 'MAKING', 'EVERY', 'SACRIFICE', 'TO', 'GET', 'RID', 'OF', 'THE', 'CRUELTY', 'OF', 'STRIFE', 'AND', 'THE', 'INSECURITY', 'OF', 'EXISTENCE', 'ARE', 'CALLED', 'UPON', 'TO', 'FACE', 'ALL', 'THE', 'PERILS', 'THEY', 'HAD', 'MEANT', 'TO', 'AVOID'] +4350-9170-0037-2787: hyp=['BUT', 'WITH', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'IT', 'COMES', 'TO', 'PASS', 'THAT', 'MEN', 'AFTER', 'MAKING', 'EVERY', 'SACRIFICE', 'TO', 'GET', 'RID', 'OF', 'THE', 'CRUELTY', 'OF', 'STRIFE', 'AND', 'THE', 'INSECURITY', 'OF', 'EXISTENCE', 'ARE', 'CALLED', 'UPON', 'TO', 'FACE', 'ALL', 'THE', 'PERILS', 'THEY', 'HAD', 'MEANT', 'TO', 'AVOID'] +4350-9170-0038-2788: ref=['BUT', 'INSTEAD', 'OF', 'DOING', 'THAT', 'THEY', 'EXPOSE', 'THE', 'INDIVIDUALS', 'TO', 'THE', 'SAME', 'NECESSITY', 'OF', 'STRIFE', 'SUBSTITUTING', 'STRIFE', 'WITH', 'INDIVIDUALS', 'OF', 'OTHER', 'STATES', 'FOR', 'STRIFE', 'WITH', 'NEIGHBORS'] +4350-9170-0038-2788: hyp=['BUT', 'INSTEAD', 'OF', 'DOING', 'THAT', 'THEY', 'EXPOSED', 'THE', 'INDIVIDUALS', 'TO', 'THE', 'SAME', 'NECESSITY', 'OF', 'STRIFE', 'SUBSTITUTING', 'STRIFE', 'WITH', 'INDIVIDUALS', 'OF', 'OTHER', 'STATES', 'FOR', 'STRIFE', 'WITH', 'NEIGHBORS'] +4350-9170-0039-2789: ref=['THE', 'TAXES', 'RAISED', 'FROM', 'THE', 'PEOPLE', 'FOR', 'WAR', 'PREPARATIONS', 'ABSORB', 'THE', 'GREATER', 'PART', 'OF', 'THE', 'PRODUCE', 'OF', 'LABOR', 'WHICH', 'THE', 'ARMY', 'OUGHT', 'TO', 'DEFEND'] +4350-9170-0039-2789: hyp=['THE', 'TAXES', 'RAISED', 'FROM', 'THE', 'PEOPLE', 'FOR', 'WAR', 'PREPARATIONS', 'ABSORB', 'THE', 'GREATER', 'PART', 'OF', 'THE', 'PRODUCE', 'OF', 'LABOR', 'WHICH', 'THE', 'ARMY', 'OUGHT', 'TO', 'DEFEND'] +4350-9170-0040-2790: ref=['THE', 'DANGER', 'OF', 'WAR', 'EVER', 'READY', 'TO', 'BREAK', 'OUT', 'RENDERS', 'ALL', 'REFORMS', 'OF', 'LIFE', 'SOCIAL', 'LIFE', 'VAIN', 'AND', 'FRUITLESS'] +4350-9170-0040-2790: hyp=['THE', 'DANGER', 'OF', 'WAR', 'EVER', 'READY', 'TO', 'BREAK', 'OUT', 'RENDERS', 'ALL', 'REFORMS', 'OF', 'LIFE', 'SOCIAL', 'LIFE', 'VAIN', 'AND', 'FRUITLESS'] +4350-9170-0041-2791: ref=['BUT', 'THE', 'FATAL', 'SIGNIFICANCE', 'OF', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'AS', 'THE', 'MANIFESTATION', 'OF', 'THE', 'CONTRADICTION', 'INHERENT', 'IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IS', 'NOT', 'ONLY', 'APPARENT', 'IN', 'THAT'] +4350-9170-0041-2791: hyp=['BUT', 'THE', 'FIELD', 'SIGNIFICANCE', 'OF', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'AS', 'THE', 'MANIFESTATION', 'OF', 'THE', 'CONTRADICTION', 'INHERENT', 'IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IS', 'NOT', 'ONLY', 'APPARENT', 'IN', 'THAT'] +4350-9170-0042-2792: ref=['GOVERNMENTS', 'ASSERT', 'THAT', 'ARMIES', 'ARE', 'NEEDED', 'ABOVE', 'ALL', 'FOR', 'EXTERNAL', 'DEFENSE', 'BUT', 'THAT', 'IS', 'NOT', 'TRUE'] +4350-9170-0042-2792: hyp=['GOVERNMENTS', 'ASSERT', 'THAT', 'ARMIES', 'ARE', 'NEEDED', 'ABOVE', 'ALL', 'FOR', 'EXTERNAL', 'DEFENCE', 'BUT', 'THAT', 'IS', 'NOT', 'TRUE'] +4350-9170-0043-2793: ref=['THEY', 'ARE', 'NEEDED', 'PRINCIPALLY', 'AGAINST', 'THEIR', 'SUBJECTS', 'AND', 'EVERY', 'MAN', 'UNDER', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'BECOMES', 'AN', 'ACCOMPLICE', 'IN', 'ALL', 'THE', 'ACTS', 'OF', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'AGAINST', 'THE', 'CITIZENS', 'WITHOUT', 'ANY', 'CHOICE', 'OF', 'HIS', 'OWN'] +4350-9170-0043-2793: hyp=['THERE', 'NEEDED', 'PRINCIPALLY', 'AGAINST', 'THEIR', 'SUBJECTS', 'AND', 'EVERY', 'MAN', 'UNDER', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'BECOMES', 'AN', 'ACCOMPLICE', 'IN', 'ALL', 'THAT', 'ACTS', 'OF', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'AGAINST', 'THE', 'CITIZENS', 'WITHOUT', 'ANY', 'CHOICE', 'OF', 'HIS', 'OWN'] +4350-9170-0044-2794: ref=['AND', 'FOR', 'THE', 'SAKE', 'OF', 'WHAT', 'AM', 'I', 'MAKING', 'THEM'] +4350-9170-0044-2794: hyp=['AND', 'FOR', 'THE', 'SAKE', 'OF', 'WHAT', 'I', 'MAKING', 'EM'] +4350-9170-0045-2795: ref=['I', 'AM', 'EXPECTED', 'FOR', 'THE', 'SAKE', 'OF', 'THE', 'STATE', 'TO', 'MAKE', 'THESE', 'SACRIFICES', 'TO', 'RENOUNCE', 'EVERYTHING', 'THAT', 'CAN', 'BE', 'PRECIOUS', 'TO', 'MAN', 'PEACE', 'FAMILY', 'SECURITY', 'AND', 'HUMAN', 'DIGNITY'] +4350-9170-0045-2795: hyp=['I', 'AM', 'EXPECTED', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'STATE', 'TO', 'MAKE', 'THESE', 'SACRIFICES', 'TO', 'RENOUNCE', 'EVERYTHING', 'THAT', 'CAN', 'BE', 'PRECIOUS', 'TO', 'MAN', 'PEACE', 'FAMILY', 'SECURITY', 'AND', 'HUMAN', 'DIGNITY'] +4350-9170-0046-2796: ref=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'SAY', 'WE', 'SHOULD', 'BE', 'EXPOSED', 'TO', 'THE', 'ATTACKS', 'OF', 'EVIL', 'DISPOSED', 'PERSONS', 'IN', 'OUR', 'OWN', 'COUNTRY'] +4350-9170-0046-2796: hyp=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'SAY', 'WE', 'SHOULD', 'BE', 'EXPOSED', 'TO', 'THE', 'ATTACKS', 'OF', 'EVIL', 'DISPOSED', 'PERSONS', 'IN', 'OUR', 'OWN', 'COUNTRY'] +4350-9170-0047-2797: ref=['WE', 'KNOW', 'NOW', 'THAT', 'THREATS', 'AND', 'PUNISHMENTS', 'CANNOT', 'DIMINISH', 'THEIR', 'NUMBER', 'THAT', 'THAT', 'CAN', 'ONLY', 'BE', 'DONE', 'BY', 'CHANGE', 'OF', 'ENVIRONMENT', 'AND', 'MORAL', 'INFLUENCE'] +4350-9170-0047-2797: hyp=['WE', 'NOW', 'KNOW', 'THAT', 'THREATS', 'AND', 'PUNISHMENTS', 'CANNOT', 'DIMINISH', 'THEIR', 'NUMBER', 'THAT', 'THAT', 'CAN', 'ONLY', 'BE', 'DONE', 'BY', 'CHANGE', 'OF', 'ENVIRONMENT', 'AND', 'MORAL', 'INFLUENCE'] +4350-9170-0048-2798: ref=['SO', 'THAT', 'THE', 'JUSTIFICATION', 'OF', 'STATE', 'VIOLENCE', 'ON', 'THE', 'GROUND', 'OF', 'THE', 'PROTECTION', 'IT', 'GIVES', 'US', 'FROM', 'EVIL', 'DISPOSED', 'PERSONS', 'EVEN', 'IF', 'IT', 'HAD', 'SOME', 'FOUNDATION', 'THREE', 'OR', 'FOUR', 'CENTURIES', 'AGO', 'HAS', 'NONE', 'WHATEVER', 'NOW'] +4350-9170-0048-2798: hyp=['SO', 'THAT', 'THE', 'JUSTIFICATION', 'OF', 'STATE', 'VIOLENCE', 'ON', 'THE', 'GROUND', 'OF', 'THE', 'PROTECTION', 'IT', 'GIVES', 'US', 'FROM', 'EVIL', 'DISPOSE', 'PERSONS', 'EVEN', 'IF', 'IT', 'HAD', 'SOME', 'FOUNDATION', 'THREE', 'OR', 'FOUR', 'CENTURIES', 'AGO', 'HAS', 'NONE', 'WHATEVER', 'NOW'] +4350-9170-0049-2799: ref=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'TELL', 'US', 'WE', 'SHOULD', 'NOT', 'HAVE', 'ANY', 'RELIGION', 'EDUCATION', 'CULTURE', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'SO', 'ON'] +4350-9170-0049-2799: hyp=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'TELL', 'US', 'WE', 'SHOULD', 'NOT', 'HAVE', 'ANY', 'RELIGION', 'EDUCATION', 'CULTURE', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'SO', 'ON'] +4350-9170-0050-2800: ref=['WITHOUT', 'THE', 'STATE', 'MEN', 'WOULD', 'NOT', 'HAVE', 'BEEN', 'ABLE', 'TO', 'FORM', 'THE', 'SOCIAL', 'INSTITUTIONS', 'NEEDED', 'FOR', 'DOING', 'ANY', 'THING'] +4350-9170-0050-2800: hyp=['WITHOUT', 'THE', 'STATE', 'MEN', 'WOULD', 'NOT', 'HAVE', 'BEEN', 'ABLE', 'TO', 'FORM', 'THE', 'SOCIAL', 'INSTITUTIONS', 'NEEDED', 'FOR', 'DOING', 'ANYTHING'] +4350-9170-0051-2801: ref=['THIS', 'ARGUMENT', 'TOO', 'WAS', 'WELL', 'FOUNDED', 'ONLY', 'SOME', 'CENTURIES', 'AGO'] +4350-9170-0051-2801: hyp=['THIS', 'ARGUMENT', 'TOO', 'WAS', 'WELL', 'FOUNDED', 'ONLY', 'SOME', 'CENTURIES', 'AGO'] +4350-9170-0052-2802: ref=['THE', 'GREAT', 'EXTENSION', 'OF', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'INTERCHANGE', 'OF', 'IDEAS', 'HAS', 'MADE', 'MEN', 'COMPLETELY', 'ABLE', 'TO', 'DISPENSE', 'WITH', 'STATE', 'AID', 'IN', 'FORMING', 'SOCIETIES', 'ASSOCIATIONS', 'CORPORATIONS', 'AND', 'CONGRESSES', 'FOR', 'SCIENTIFIC', 'ECONOMIC', 'AND', 'POLITICAL', 'OBJECTS'] +4350-9170-0052-2802: hyp=['THE', 'GREAT', 'EXTENSION', 'OF', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'INTERCHANGE', 'OF', 'IDEAS', 'HAS', 'MADE', 'MEN', 'COMPLETELY', 'ABLE', 'TO', 'DISPENSE', 'WITH', 'STATE', 'AID', 'IN', 'FORMING', 'SOCIETIES', 'ASSOCIATIONS', 'CORPORATIONS', 'AND', 'CONGRESSES', 'FOR', 'SCIENTIFIC', 'AGONIC', 'AND', 'POLITICAL', 'OBJECTS'] +4350-9170-0053-2803: ref=['WITHOUT', 'GOVERNMENTS', 'NATIONS', 'WOULD', 'BE', 'ENSLAVED', 'BY', 'THEIR', 'NEIGHBORS'] +4350-9170-0053-2803: hyp=['WITHOUT', "GOVERNMENT'S", 'NATIONS', 'WOULD', 'BE', 'ENSLAVED', 'BY', 'THEIR', 'NEIGHBORS'] +4350-9170-0054-2804: ref=['THE', 'GOVERNMENT', 'THEY', 'TELL', 'US', 'WITH', 'ITS', 'ARMY', 'IS', 'NECESSARY', 'TO', 'DEFEND', 'US', 'FROM', 'NEIGHBORING', 'STATES', 'WHO', 'MIGHT', 'ENSLAVE', 'US'] +4350-9170-0054-2804: hyp=['THE', 'GOVERNMENT', 'THEY', 'TELL', 'US', 'WITH', 'ITS', 'ARMY', 'IS', 'NECESSARY', 'TO', 'DEFEND', 'US', 'FROM', 'NEIGHBORING', 'STATES', 'WHO', 'MIGHT', 'ENSLAVE', 'US'] +4350-9170-0055-2805: ref=['AND', 'IF', 'DEFENSE', 'AGAINST', 'BARBAROUS', 'NATIONS', 'IS', 'MEANT', 'ONE', 'THOUSANDTH', 'PART', 'OF', 'THE', 'TROOPS', 'NOW', 'UNDER', 'ARMS', 'WOULD', 'BE', 'AMPLY', 'SUFFICIENT', 'FOR', 'THAT', 'PURPOSE'] +4350-9170-0055-2805: hyp=['AND', 'IF', 'DEFENCE', 'AGAINST', 'BARBAROUS', 'NATIONS', 'IS', 'MEANT', 'ONE', 'THOUSAND', 'PART', 'OF', 'THE', 'TROOPS', 'NOW', 'UNDER', 'ARMS', 'WOULD', 'BE', 'AMPLY', 'SUFFICIENT', 'FOR', 'THAT', 'PURPOSE'] +4350-9170-0056-2806: ref=['THE', 'POWER', 'OF', 'THE', 'STATE', 'FAR', 'FROM', 'BEING', 'A', 'SECURITY', 'AGAINST', 'THE', 'ATTACKS', 'OF', 'OUR', 'NEIGHBORS', 'EXPOSES', 'US', 'ON', 'THE', 'CONTRARY', 'TO', 'MUCH', 'GREATER', 'DANGER', 'OF', 'SUCH', 'ATTACKS'] +4350-9170-0056-2806: hyp=['THE', 'POWER', 'OF', 'THE', 'STATE', 'FAR', 'FROM', 'BEING', 'A', 'SECURITY', 'AGAINST', 'THE', 'ATTACKS', 'OF', 'OUR', 'NEIGHBORS', 'EXPOSES', 'US', 'ON', 'THE', 'CONTRARY', 'TO', 'MUCH', 'GREATER', 'DANGER', 'OF', 'SUCH', 'ATTACKS'] +4350-9170-0057-2807: ref=['EVEN', 'LOOKING', 'AT', 'IT', 'PRACTICALLY', 'WEIGHING', 'THAT', 'IS', 'TO', 'SAY', 'ALL', 'THE', 'BURDENS', 'LAID', 'ON', 'HIM', 'BY', 'THE', 'STATE', 'NO', 'MAN', 'CAN', 'FAIL', 'TO', 'SEE', 'THAT', 'FOR', 'HIM', 'PERSONALLY', 'TO', 'COMPLY', 'WITH', 'STATE', 'DEMANDS', 'AND', 'SERVE', 'IN', 'THE', 'ARMY', 'WOULD', 'IN', 'THE', 'MAJORITY', 'OF', 'CASES', 'BE', 'MORE', 'DISADVANTAGEOUS', 'THAN', 'TO', 'REFUSE', 'TO', 'DO', 'SO'] +4350-9170-0057-2807: hyp=['EVEN', 'LOOKING', 'AT', 'IT', 'PRACTICALLY', 'WEIGHING', 'THAT', 'IS', 'TO', 'SAY', 'ALL', 'THE', 'BIRDS', 'LAID', 'ON', 'HIM', 'BY', 'THE', 'STATES', 'NO', 'MAN', 'CAN', 'FAIL', 'TO', 'SEE', 'THAT', 'FOR', 'HIM', 'PERSONALLY', 'TO', 'COMPLY', 'WITH', 'THE', 'STATE', 'DEMANDS', 'AND', 'SERVE', 'IN', 'THE', 'ARMY', 'WOULD', 'IN', 'THE', 'MAJORITY', 'OF', 'CASES', 'BE', 'MORE', 'DISADVANTAGEOUS', 'THAN', 'TO', 'REFUSE', 'TO', 'DO', 'SO'] +4350-9170-0058-2808: ref=['TO', 'RESIST', 'WOULD', 'NEED', 'INDEPENDENT', 'THOUGHT', 'AND', 'EFFORT', 'OF', 'WHICH', 'EVERY', 'MAN', 'IS', 'NOT', 'CAPABLE'] +4350-9170-0058-2808: hyp=['TO', 'RESIST', 'WOULD', 'NEED', 'INDEPENDENT', 'THOUGHT', 'AND', 'EFFORT', 'OF', 'WHICH', 'EVERY', 'MAN', 'IS', 'NOT', 'CAPABLE'] +4350-9170-0059-2809: ref=['SO', 'MUCH', 'FOR', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'OF', 'BOTH', 'LINES', 'OF', 'CONDUCT', 'FOR', 'A', 'MAN', 'OF', 'THE', 'WEALTHY', 'CLASSES', 'AN', 'OPPRESSOR'] +4350-9170-0059-2809: hyp=['SO', 'MUCH', 'FOR', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'OF', 'BOTH', 'LINES', 'OF', 'CONDUCT', 'FOR', 'A', 'MAN', 'OF', 'THE', 'WEALTHY', 'CLASS', 'AN', 'OPPRESSOR'] +4350-9170-0060-2810: ref=['FOR', 'A', 'MAN', 'OF', 'THE', 'POOR', 'WORKING', 'CLASS', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'WILL', 'BE', 'THE', 'SAME', 'BUT', 'WITH', 'A', 'GREAT', 'INCREASE', 'OF', 'DISADVANTAGES'] +4350-9170-0060-2810: hyp=['FOR', 'A', 'MAN', 'OF', 'THE', 'POOR', 'WORKING', 'CLASS', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'WILL', 'BE', 'THE', 'SAME', 'BUT', 'WITH', 'A', 'GREAT', 'INCREASE', 'OF', 'DISADVANTAGES'] +4852-28311-0000-2098: ref=['SAY', 'YOU', 'KNOW', 'SUMTHIN'] +4852-28311-0000-2098: hyp=['SAY', 'YOU', 'KNOW', 'SOMETHING'] +4852-28311-0001-2099: ref=['CHRIS', 'LOOKED', 'FROM', 'A', 'NICKEL', 'PLATED', 'FLASHLIGHT', 'TO', 'A', 'CAR', 'JACK', 'AND', 'SPARK', 'PLUG'] +4852-28311-0001-2099: hyp=['CHRIS', 'LOOKED', 'FROM', 'A', 'NICKEL', 'PLATED', 'FLASH', 'LIKE', 'TO', 'A', 'CAR', 'JACK', 'AND', 'SPARK', 'PLUG'] +4852-28311-0002-2100: ref=['KNOW', 'WHO', 'NEEDS', 'A', 'JOB', 'BAD', "THAT'S", 'JAKEY', 'HARRIS'] +4852-28311-0002-2100: hyp=['NO', 'ONE', 'NEEDS', 'A', 'JOB', 'BAND', "THAT'S", 'JI', 'HARRIS'] +4852-28311-0003-2101: ref=['O', 'K', 'HE', 'SAID'] +4852-28311-0003-2101: hyp=['O', 'K', 'HE', 'SAID'] +4852-28311-0004-2102: ref=['ONLY', 'WHY', "DIDN'T", 'YOU', 'ASK', 'HIM', 'YOURSELF'] +4852-28311-0004-2102: hyp=['ONLY', 'WHY', "DIDN'T", 'YOU', 'ASK', 'HIM', 'YOURSELF'] +4852-28311-0005-2103: ref=['MIKE', 'BECAME', 'UNEASY', 'AND', 'FISHED', 'AN', 'ELASTIC', 'BAND', 'OUT', 'OF', 'HIS', 'POCKET', 'MADE', 'A', 'FLICK', 'OF', 'PAPER', 'AND', 'SENT', 'IT', 'SOARING', 'OUT', 'INTO', 'M', 'STREET'] +4852-28311-0005-2103: hyp=['MIKE', 'BECAME', 'UNEASY', 'AND', 'FISHED', 'AND', 'MOLASTIC', 'BAND', 'OUT', 'OF', 'HIS', 'POCKET', 'MADE', 'A', 'FLICK', 'OF', 'PAPER', 'AND', 'SENT', 'IT', 'SOARING', 'OUT', 'IN', 'EM', 'STREET'] +4852-28311-0006-2104: ref=['WELL', 'HE', 'ADMITTED', 'I', 'DID'] +4852-28311-0006-2104: hyp=['WELL', 'HE', 'ADMITTED', 'I', 'DID'] +4852-28311-0007-2105: ref=['CHRIS', 'ASKED', 'AND', 'FOR', 'THE', 'FIRST', 'TIME', 'THAT', 'DAY', 'THE', 'HEAVY', 'WEIGHT', 'HE', 'CARRIED', 'WITHIN', 'HIM', 'LIFTED', 'AND', 'LIGHTENED', 'A', 'LITTLE'] +4852-28311-0007-2105: hyp=['CHRIS', 'ASKED', 'HIM', 'FOR', 'THE', 'FIRST', 'TIME', 'THAT', 'DAY', 'THAT', 'HEAVY', 'WEIGHT', 'HE', 'CARRIED', 'WITHIN', 'HIM', 'LIFTED', 'AND', 'LIGHTENED', 'A', 'LITTLE'] +4852-28311-0008-2106: ref=['THINK', 'HE', 'REALLY', 'NEEDS', 'IT', 'HE', 'PURSUED'] +4852-28311-0008-2106: hyp=['THEY', 'CAME', 'REALLY', 'NEEDS', 'IT', 'HE', 'PURSUED'] +4852-28311-0009-2107: ref=['HE', 'WOULD', 'HAVE', 'LIKED', 'TO', 'GET', 'THE', 'JOB', 'FOR', 'JAKEY', 'WHO', 'NEEDED', 'IT', 'BUT', 'SOMEHOW', 'THE', 'TASK', 'OF', 'FACING', 'MISTER', 'WICKER', 'ESPECIALLY', 'NOW', 'THAT', 'THE', 'LIGHT', 'WAS', 'GOING', 'AND', 'DUSK', 'EDGING', 'INTO', 'THE', 'STREETS', 'WAS', 'NOT', 'WHAT', 'CHRIS', 'HAD', 'INTENDED', 'FOR', 'ENDING', 'THE', 'AFTERNOON'] +4852-28311-0009-2107: hyp=['HE', 'WOULD', 'HAVE', 'LIKED', 'TO', 'GET', 'THE', 'JOB', 'FOR', 'JAKIE', 'WHO', 'NEEDED', 'IT', 'BUT', 'SOMEHOW', 'THE', 'TASK', 'OF', 'FACING', 'MISTER', 'WICKER', 'ESPECIALLY', 'NOW', 'THAT', 'THE', 'LIGHT', 'WAS', 'GOING', 'AND', 'DUSK', 'EDGED', 'INTO', 'THE', 'STREETS', 'WAS', 'NOT', 'WHAT', 'CHRISTEN', 'TENDED', 'FOR', 'ENDING', 'THE', 'AFTERNOON'] +4852-28311-0010-2108: ref=["MIKE'S", 'EXPRESSION', 'CHANGED', 'AT', 'ONCE', 'TO', 'ONE', 'OF', 'TRIUMPH', 'BUT', 'CHRIS', 'WAS', 'ONLY', 'PARTLY', 'ENCOURAGED'] +4852-28311-0010-2108: hyp=["MIKE'S", 'EXPRESSION', 'CHANGED', 'AT', 'WHAT', 'ONCE', 'TO', 'ONE', 'OF', 'TRIUMPH', 'BUT', 'CHRIS', 'WAS', 'ONLY', 'PARSLY', 'ENCOURAGED'] +4852-28311-0011-2109: ref=['BETCHA', "AREN'T", 'GOIN', 'AFTER', 'ALL', 'CHRIS', 'TURNED', 'ON', 'HIM'] +4852-28311-0011-2109: hyp=['BUT', "YOU'RE", 'A', 'GOIN', 'AFTER', 'ALL', 'THIS', 'TURNED', 'ON', 'HIM'] +4852-28311-0012-2110: ref=['MIKE', 'WAS', 'STANDING', 'ON', 'THE', 'CORNER'] +4852-28311-0012-2110: hyp=['MIKE', 'WAS', 'STANDING', 'ON', 'THE', 'CORNER'] +4852-28311-0013-2111: ref=['AW', 'SHUCKS'] +4852-28311-0013-2111: hyp=['AH', 'SHOCKS'] +4852-28311-0014-2112: ref=['CHRIS', 'STARTED', 'OFF', 'ONCE', 'MORE', 'PASSING', 'THE', 'BLEAK', 'LITTLE', 'VICTORIAN', 'CHURCH', 'PERCHED', 'ON', 'THE', 'HILL', 'ABOVE', 'MISTER', "WICKER'S", 'HOUSE'] +4852-28311-0014-2112: hyp=['CHRIS', 'STARTED', 'OFF', 'ONCE', 'MORE', 'PASSING', 'A', 'BLEAK', 'LITTLE', 'VICTORIAN', 'CHURCH', 'PERCHED', 'ON', 'THE', 'HILL', 'ABOVE', 'MISTER', "WICKER'S", 'HOUSE'] +4852-28311-0015-2113: ref=['AN', 'EMPTY', 'LOT', 'CUT', 'INTO', 'BY', 'CHURCH', 'LANE', 'GAVE', 'A', 'LOOK', 'OF', 'ISOLATION', 'TO', 'THE', 'L', 'SHAPED', 'BRICK', 'BUILDING', 'THAT', 'SERVED', 'MISTER', 'WICKER', 'AS', 'BOTH', 'HOUSE', 'AND', 'PLACE', 'OF', 'BUSINESS'] +4852-28311-0015-2113: hyp=['AN', 'EMPTY', 'LOT', 'CUT', 'IN', 'INTO', 'BY', 'CHURCH', 'LANE', 'GAVE', 'A', 'LOOK', 'OF', 'ISOLATION', 'TO', 'THE', 'ELE', 'SHAPED', 'BRICK', 'BUILDING', 'THAT', 'SERVED', 'MISTER', "WICKER'S", 'BOTH', 'HOUSE', 'AND', 'PLACE', 'OF', 'BUSINESS'] +4852-28311-0016-2114: ref=['THE', 'LONGER', 'WING', 'TOWARD', 'THE', 'BACK', 'HAD', 'A', 'BACK', 'DOOR', 'THAT', 'OPENED', 'ONTO', 'WATER', 'STREET', 'THE', 'SPACE', 'BETWEEN', 'THE', 'HOUSE', 'AND', 'WISCONSIN', 'AVENUE', 'HAD', 'BEEN', 'MADE', 'INTO', 'A', 'NEAT', 'OBLONG', 'FLOWER', 'GARDEN', 'FENCED', 'OFF', 'FROM', 'THE', 'SIDEWALK', 'BY', 'BOX', 'SHRUBS', 'AND', 'A', 'WHITE', 'PICKET', 'FENCE'] +4852-28311-0016-2114: hyp=['NO', 'LONGER', 'WINGED', 'TOWARD', 'THE', 'BACK', 'GOT', 'A', 'BACK', 'DOOR', 'THAT', 'OPENED', 'ON', 'A', 'WATER', 'STREET', 'THE', 'SPACE', 'BETWEEN', 'THE', 'HOUSE', 'AND', 'MISS', 'CONSIN', 'ATTIGUE', 'HAD', 'BEEN', 'MADE', 'INTO', 'A', 'NEAT', 'OBLONG', 'FLOWER', 'GARDEN', 'FENCED', 'OFF', 'FROM', 'THE', 'SIDEWALK', 'BY', 'BOX', 'SHRUGS', 'AND', 'A', 'WHITE', 'PICKET', 'FENCE'] +4852-28311-0017-2115: ref=['A', 'LIVID', 'YELLOW', 'STAINED', 'THE', 'HORIZON', 'BEYOND', 'THE', 'FACTORIES', 'AND', 'GRAY', 'CLOUDS', 'LOWERED', 'AND', 'TUMBLED', 'ABOVE'] +4852-28311-0017-2115: hyp=['A', 'LIVID', 'YELLOW', 'STAINED', 'THE', 'HORIZON', 'BEYOND', 'THE', 'FACTORIES', 'AND', 'GLAY', 'CLOUDS', 'LOWERED', 'AND', 'TUMBLED', 'ABOVE'] +4852-28311-0018-2116: ref=['THE', 'AIR', 'WAS', 'GROWING', 'CHILL', 'AND', 'CHRIS', 'DECIDED', 'TO', 'FINISH', 'HIS', 'JOB'] +4852-28311-0018-2116: hyp=['THE', 'AIR', 'WAS', 'GROWING', 'CHILL', 'AND', 'CHRIS', 'DECIDED', 'TO', 'FINISH', 'THE', 'JOB'] +4852-28311-0019-2117: ref=['ALL', 'AT', 'ONCE', 'HE', 'WONDERED', 'HOW', 'HIS', 'MOTHER', 'WAS', 'AND', 'EVERYTHING', 'IN', 'HIM', 'PINCHED', 'AND', 'TIGHTENED', 'ITSELF'] +4852-28311-0019-2117: hyp=['ALL', 'AT', 'ONCE', 'YOU', 'WONDERED', 'HOW', 'HIS', 'MOTHER', 'WAS', 'AND', 'EVERYTHING', 'IN', 'HIM', 'PINCHED', 'AND', 'TIGHTENED', 'ITSELF'] +4852-28311-0020-2118: ref=['AT', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'HE', 'REACHED', 'THE', 'HOUSE'] +4852-28311-0020-2118: hyp=['AT', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'HE', 'REACHED', 'THE', 'HOUSE'] +4852-28311-0021-2119: ref=['THERE', 'WERE', 'THREE', 'THINGS', 'THAT', 'ALWAYS', 'CAUGHT', 'HIS', 'EYE', 'AMID', 'THE', 'LITTER', 'OF', 'DUSTY', 'PIECES'] +4852-28311-0021-2119: hyp=['THERE', 'WERE', 'THREE', 'THINGS', 'THAT', 'ALWAYS', 'CAUGHT', 'HIS', 'EYE', 'AMID', 'THE', 'LITTER', 'OF', 'DUSTY', 'PIECES'] +4852-28311-0022-2120: ref=['ON', 'THE', 'LEFT', 'THE', 'COIL', 'OF', 'ROPE', 'IN', 'THE', 'CENTER', 'THE', 'MODEL', 'OF', 'A', 'SAILING', 'SHIP', 'IN', 'A', 'GREEN', 'GLASS', 'BOTTLE', 'AND', 'ON', 'THE', 'RIGHT', 'THE', 'WOODEN', 'STATUE', 'OF', 'A', 'NEGRO', 'BOY', 'IN', 'BAGGY', 'TROUSERS', 'TURKISH', 'JACKET', 'AND', 'WHITE', 'TURBAN'] +4852-28311-0022-2120: hyp=['ON', 'THE', 'LEFT', 'THE', 'COIL', 'OF', 'ROPE', 'IN', 'THE', 'CENTER', 'OF', 'THE', 'MODEL', 'OF', 'A', 'SAILING', 'SHIP', 'IN', 'A', 'GREEN', 'GLASS', 'BOTTLE', 'AND', 'ON', 'THE', 'RIGHT', 'THE', 'WOODEN', 'STATUE', 'OF', 'A', 'NEGRO', 'BOY', 'IN', 'BAGGY', 'TROUSERS', 'TURKISH', 'JACKET', 'AND', 'WHITE', 'TURBAN'] +4852-28311-0023-2121: ref=['BUT', 'THE', 'NAME', 'STILL', 'SHOWED', 'AT', 'THE', 'PROW', 'AND', 'MANY', 'A', 'TIME', 'CHRIS', 'SAFE', 'AT', 'HOME', 'IN', 'BED', 'HAD', 'SAILED', 'IMAGINARY', 'VOYAGES', 'IN', 'THE', 'MIRABELLE'] +4852-28311-0023-2121: hyp=['BUT', 'THE', 'NAME', 'STILL', 'SHOWED', 'AT', 'THE', 'PROW', 'AND', 'MANY', 'A', 'TIME', 'CHRIS', 'SAFE', 'AT', 'HOME', 'IN', 'BED', 'HAD', 'SAILED', 'IMAGINARY', 'VOYAGES', 'IN', 'THE', 'MIRABELLE'] +4852-28311-0024-2122: ref=['HE', 'HAD', 'NEVER', 'SEEN', 'ANYONE', 'GO', 'INTO', 'MISTER', "WICKER'S", 'SHOP', 'NOW', 'HE', 'THOUGHT', 'OF', 'IT'] +4852-28311-0024-2122: hyp=["HE'D", 'NEVER', 'SEEN', 'ANYONE', 'GO', 'INTO', 'MISTER', "WICKER'S", 'SHOP', 'NOW', 'HE', 'THOUGHT', 'OF', 'IT'] +4852-28311-0025-2123: ref=['HOW', 'THEN', 'DID', 'HE', 'LIVE', 'AND', 'WHAT', 'DID', 'HE', 'EVER', 'SELL'] +4852-28311-0025-2123: hyp=['HOW', 'THEN', 'DID', 'HE', 'LIVE', 'AND', 'WHAT', 'DID', 'HE', 'EVER', 'SELL'] +4852-28311-0026-2124: ref=['A', 'SUDDEN', 'CAR', 'HORN', 'WOKE', 'HIM', 'FROM', 'HIS', 'DREAM'] +4852-28311-0026-2124: hyp=['A', 'SUDDEN', 'CAR', 'HORN', 'WALKING', 'FROM', 'THE', 'STREAM'] +4852-28312-0000-2125: ref=['OF', 'THE', 'MANY', 'TIMES', 'HE', 'HAD', 'EXAMINED', 'MISTER', "WICKER'S", 'WINDOW', 'AND', 'PORED', 'OVER', 'THE', 'ROPE', 'THE', 'SHIP', 'AND', 'THE', 'NUBIAN', 'BOY', 'HE', 'HAD', 'NEVER', 'GONE', 'INTO', 'MISTER', "WICKER'S", 'SHOP'] +4852-28312-0000-2125: hyp=['OF', 'THE', 'MANY', 'TIMES', 'YOU', 'EXAMINED', 'MISTER', "WICKER'S", 'WINDOW', 'AND', 'POURED', 'OVER', 'THE', 'ROPE', 'TO', 'SHIP', 'AND', 'THE', 'NUBIAN', 'BOY', 'HE', 'HAD', 'NEVER', 'GONE', 'INTO', 'MISTER', "WICKER'S", 'SHOP'] +4852-28312-0001-2126: ref=['SO', 'NOW', 'ALONE', 'UNTIL', 'SOMEONE', 'SHOULD', 'ANSWER', 'THE', 'BELL', 'HE', 'LOOKED', 'EAGERLY', 'IF', 'UNEASILY', 'AROUND', 'HIM'] +4852-28312-0001-2126: hyp=['SO', 'NOW', 'ALONE', 'UNTIL', 'SOME', 'ONE', 'SHOULD', 'ANSWER', 'THE', 'BELL', 'THEY', 'LOOKED', 'EAGERLY', 'IF', 'UNEASILY', 'AROUND', 'HIM'] +4852-28312-0002-2127: ref=['WHAT', 'WITH', 'THE', 'ONE', 'WINDOW', 'AND', 'THE', 'LOWERING', 'DAY', 'OUTSIDE', 'THE', 'LONG', 'NARROW', 'SHOP', 'WAS', 'SOMBER'] +4852-28312-0002-2127: hyp=['WHAT', 'WITH', 'THE', 'ONE', 'WINDOW', 'AND', 'THE', 'LOWERING', 'DAY', 'OUTSIDE', 'THE', 'LONG', 'NARROW', 'SHOP', 'WAS', 'SOMBER'] +4852-28312-0003-2128: ref=['HEAVY', 'HAND', 'HEWN', 'BEAMS', 'CROSSED', 'IT', 'FROM', 'ONE', 'SIDE', 'TO', 'THE', 'OTHER'] +4852-28312-0003-2128: hyp=['HEAVY', 'HAND', 'YOU', 'AND', 'BEAMS', 'CROSSED', 'IT', 'FROM', 'ONE', 'SIDE', 'TO', 'THE', 'OTHER'] +4852-28312-0004-2129: ref=['MISTER', "WICKER'S", 'BACK', 'BEING', 'TOWARD', 'THE', 'SOURCE', 'OF', 'LIGHT', 'CHRIS', 'COULD', 'NOT', 'SEE', 'HIS', 'FACE'] +4852-28312-0004-2129: hyp=['MISS', 'JOKERS', 'BACK', 'BEING', 'TOWARD', 'THE', 'SOURCE', 'OF', 'LIGHT', 'CHRIS', 'COULD', 'NOT', 'SEE', 'HIS', 'FACE'] +4852-28312-0005-2130: ref=['THE', 'DOUBLE', 'FANS', 'OF', 'MINUTE', 'WRINKLES', 'BREAKING', 'FROM', 'EYE', 'CORNER', 'TO', 'TEMPLE', 'AND', 'JOINING', 'WITH', 'THOSE', 'OVER', 'THE', 'CHEEKBONES', 'WERE', 'DRAWN', 'INTO', 'THE', 'HORIZONTAL', 'LINES', 'ACROSS', 'THE', 'DOMED', 'FOREHEAD'] +4852-28312-0005-2130: hyp=['THE', 'DOUBLE', 'FANS', 'A', 'MINUTE', 'WRINKLES', 'BREAKING', 'FROM', 'EYE', 'CORNER', 'TO', 'TEMPLE', 'AND', 'JOINING', 'WITH', 'THOSE', 'OVER', 'THE', 'CHEAP', 'BONES', 'WERE', 'DRAWN', 'INTO', 'THE', 'HORIZONTAL', 'LINES', 'ACROSS', 'THE', 'DOMED', 'FOREHEAD'] +4852-28312-0006-2131: ref=['LITTLE', 'TUFTS', 'OF', 'WHITE', 'FUZZ', 'ABOVE', 'THE', 'EARS', 'WERE', 'ALL', 'THAT', 'REMAINED', 'OF', 'THE', "ANTIQUARIAN'S", 'HAIR', 'BUT', 'WHAT', 'DREW', 'AND', 'HELD', "CHRIS'S", 'GAZE', 'WERE', 'THE', 'OLD', "MAN'S", 'EYES'] +4852-28312-0006-2131: hyp=['LITTLE', 'TUFTS', 'OF', 'WHITE', 'FUZ', 'ABOVE', 'THE', 'EARS', 'WERE', 'ALL', 'THAT', 'REMAINED', 'OF', 'THE', "ANTIQUARIAN'S", 'HAIR', 'BUT', 'WHAT', 'DREW', 'AND', 'HELD', "CHRIS'S", 'GAZE', 'WITH', 'THE', 'OLD', "MAN'S", 'EYES'] +4852-28312-0007-2132: ref=['CHRIS', 'BLINKED', 'AND', 'LOOKED', 'AGAIN', 'YES', 'THEY', 'WERE', 'STILL', 'THERE'] +4852-28312-0007-2132: hyp=['CRISP', 'BINKED', 'AND', 'LOOKED', 'AGAIN', 'YES', 'THEY', 'WERE', 'STILL', 'THERE'] +4852-28312-0008-2133: ref=['CHRIS', 'SWALLOWED', 'AND', 'HIS', 'VOICE', 'CAME', 'BACK', 'TO', 'HIM'] +4852-28312-0008-2133: hyp=['GRIS', 'SWALLOW', 'AND', 'HIS', 'VOICE', 'CAME', 'BACK', 'TO', 'HIM'] +4852-28312-0009-2134: ref=['YES', 'SIR', 'HE', 'SAID'] +4852-28312-0009-2134: hyp=['YES', 'SIR', 'HE', 'SAID'] +4852-28312-0010-2135: ref=['I', 'SAW', 'YOUR', 'SIGN', 'AND', 'I', 'KNOW', 'A', 'BOY', 'WHO', 'NEEDS', 'THE', 'JOB'] +4852-28312-0010-2135: hyp=['I', 'SAW', 'YOUR', 'SIGN', 'AND', 'I', 'KNOW', 'A', 'BOY', 'WHO', 'NEEDS', 'THE', 'JOB'] +4852-28312-0011-2136: ref=["HE'S", 'A', 'SCHOOLMATE', 'OF', 'MINE'] +4852-28312-0011-2136: hyp=["HE'S", 'A', 'SCHOOLMATE', 'OF', 'MINE'] +4852-28312-0012-2137: ref=['JAKEY', 'HARRIS', 'HIS', 'NAME', 'IS', 'AND', 'HE', 'REALLY', 'NEEDS', 'THE', 'JOB'] +4852-28312-0012-2137: hyp=['JAGGIE', "ARIST'S", 'NAME', "ISN'T", 'HE', 'REALLY', 'NEEDS', 'THE', 'JOB'] +4852-28312-0013-2138: ref=['I', 'I', 'JUST', 'WONDERED', 'IF', 'THE', 'PLACE', 'WAS', 'STILL', 'OPEN'] +4852-28312-0013-2138: hyp=['I', 'I', 'JUST', 'WANTED', 'IF', 'THE', 'PLACE', 'WAS', 'STILL', 'OPEN'] +4852-28312-0014-2139: ref=['WHAT', 'HE', 'SAW', 'WAS', 'A', 'FRESH', 'CHEEKED', 'LAD', 'TALL', 'FOR', 'THIRTEEN', 'STURDY', 'WITH', 'SINCERITY', 'AND', 'GOOD', 'HUMOR', 'IN', 'HIS', 'FACE', 'AND', 'SOMETHING', 'SENSITIVE', 'AND', 'APPEALING', 'ABOUT', 'HIS', 'EYES'] +4852-28312-0014-2139: hyp=['WHAT', 'HE', 'SAW', 'WAS', 'A', 'FRESH', 'CHEEKED', 'LAD', 'TALL', 'FOR', 'THIRTEEN', 'STURDY', 'WITH', 'SINCERITY', 'AND', 'GOOD', 'HUMOUR', 'IN', 'HIS', 'FACE', 'AND', 'SOMETHING', 'SCENTED', 'AND', 'APPEALING', 'ABOUT', 'HIS', 'EYES'] +4852-28312-0015-2140: ref=['HE', 'GUESSED', 'THERE', 'MUST', 'BE', 'A', 'LIVELY', 'FIRE', 'IN', 'THAT', 'ROOM', 'BEYOND'] +4852-28312-0015-2140: hyp=['HE', 'GUESSED', 'THERE', 'MUST', 'BE', 'A', 'LIVELY', 'FIRE', 'IN', 'THAT', 'ROOM', 'BEYOND'] +4852-28312-0016-2141: ref=['WOULD', 'THAT', 'INTERFERE', 'WITH', "JAKEY'S", 'GETTING', 'THE', 'JOB', 'SIR'] +4852-28312-0016-2141: hyp=['WOULD', 'THAT', 'INFERE', 'WITH', 'JAKI', 'GIGGS', 'GETTING', 'THE', 'JOB', 'SIR'] +4852-28312-0017-2142: ref=['BUT', 'EVEN', 'AS', 'HE', 'SLOWLY', 'TURNED', 'THE', 'THOUGHT', 'PIERCED', 'HIS', 'MIND', 'WHY', 'HAD', 'HE', 'NOT', 'SEEN', 'THE', 'REFLECTION', 'OF', 'THE', 'HEADLIGHTS', 'OF', 'THE', 'CARS', 'MOVING', 'UP', 'AROUND', 'THE', 'CORNER', 'OF', 'WATER', 'STREET', 'AND', 'UP', 'THE', 'HILL', 'TOWARD', 'THE', 'TRAFFIC', 'SIGNALS'] +4852-28312-0017-2142: hyp=['BUT', 'EVEN', 'AS', 'HE', 'SLOWLY', 'TURNED', 'THE', 'THOUGHT', 'PIERCED', 'HIS', 'MIND', 'WHY', 'HE', 'NOT', 'SEEN', 'THE', 'REFLECTION', 'OF', 'THE', 'HEADLIGHTS', 'OF', 'THE', 'CARS', 'MOVING', 'UP', 'AROUND', 'THE', 'CORRIE', 'WALL', 'UNDER', 'STREET', 'NOT', 'THE', 'HILL', 'TOWARD', 'THE', 'LIFE', 'SIGNALS'] +4852-28312-0018-2143: ref=['THE', 'ROOM', 'SEEMED', 'OVERLY', 'STILL'] +4852-28312-0018-2143: hyp=['THE', 'ROME', 'SEEMED', 'OVERLY', 'STILL'] +4852-28312-0019-2144: ref=['THEN', 'IN', 'THAT', 'SECOND', 'HE', 'TURNED', 'AND', 'FACED', 'ABOUT'] +4852-28312-0019-2144: hyp=['THEN', 'IN', 'THAT', 'SECOND', 'HE', 'TURNED', 'AND', 'FACED', 'ABOUT'] +4852-28312-0020-2145: ref=['THE', 'WIDE', 'BOW', 'WINDOW', 'WAS', 'THERE', 'BEFORE', 'HIM', 'THE', 'THREE', 'OBJECTS', 'HE', 'LIKED', 'BEST', 'SHOWING', 'FROSTY', 'IN', 'THE', 'MOONLIGHT', 'THAT', 'POURED', 'IN', 'FROM', 'ACROSS', 'THE', 'WATER'] +4852-28312-0020-2145: hyp=['THE', 'WIDE', 'BOW', 'WIND', 'THAT', 'WAS', 'THERE', 'BEFORE', 'HIM', 'THE', 'THREE', 'OBJECTS', 'HE', 'LIKED', 'BEST', 'SHOWING', 'FROSTY', 'IN', 'THE', 'MOONLIGHT', 'THAT', 'POURED', 'IN', 'FROM', 'ACROSS', 'THE', 'WATER'] +4852-28312-0021-2146: ref=['ACROSS', 'THE', 'WATER', 'WHERE', 'WAS', 'THE', 'FREEWAY'] +4852-28312-0021-2146: hyp=['ACROSS', 'THE', 'WATER', 'WHERE', 'IS', 'THE', 'FREE', 'WAY'] +4852-28312-0022-2147: ref=['IT', 'WAS', 'NO', 'LONGER', 'THERE', 'NOR', 'WERE', 'THE', 'HIGH', 'WALLS', 'AND', 'SMOKESTACKS', 'OF', 'FACTORIES', 'TO', 'BE', 'SEEN'] +4852-28312-0022-2147: hyp=['IT', 'WAS', 'NO', 'LONGER', 'THERE', 'NOR', 'WERE', 'THE', 'HIGH', 'WALLS', 'AND', 'SMOKESTACKS', 'OF', 'FACTORIES', 'TO', 'BE', 'SEEN'] +4852-28312-0023-2148: ref=['THE', 'WAREHOUSES', 'WERE', 'STILL', 'THERE'] +4852-28312-0023-2148: hyp=['THE', 'WAREHOUSES', 'WERE', 'STILL', 'THERE'] +4852-28312-0024-2149: ref=['FLABBERGASTED', 'AND', 'BREATHLESS', 'CHRIS', 'WAS', 'UNAWARE', 'THAT', 'HE', 'HAD', 'MOVED', 'CLOSER', 'TO', 'PEER', 'OUT', 'THE', 'WINDOW', 'IN', 'EVERY', 'DIRECTION'] +4852-28312-0024-2149: hyp=['FLAVAGASTED', 'AND', 'BREATHLESS', 'CHRIS', 'WAS', 'UNAWARE', 'THAT', 'HE', 'HAD', 'MOVED', 'CLOSER', 'TO', 'PEER', 'OUT', 'THE', 'WINDOW', 'IN', 'EVERY', 'DIRECTION'] +4852-28312-0025-2150: ref=['NO', 'ELECTRIC', 'SIGNS', 'NO', 'LAMPLIT', 'STREETS'] +4852-28312-0025-2150: hyp=['NO', 'ELECTRIC', 'SIGNS', 'NO', 'LAMPLIT', 'STREETS'] +4852-28312-0026-2151: ref=['WHERE', 'THE', "PEOPLE'S", 'DRUGSTORE', 'HAD', 'STOOD', 'BUT', 'A', 'HALF', 'HOUR', 'BEFORE', 'ROSE', 'THE', 'ROOFS', 'OF', 'WHAT', 'WAS', 'EVIDENTLY', 'AN', 'INN'] +4852-28312-0026-2151: hyp=['WHERE', 'THE', "PEOPLE'S", 'DRUG', 'STORE', 'IT', 'STOOD', 'BUT', 'HALF', 'AN', 'HOUR', 'BEFORE', 'ROSE', 'THE', 'ROOFS', 'OF', 'WHAT', 'WAS', 'EVIDENTLY', 'AN', 'INN'] +4852-28312-0027-2152: ref=['A', 'COURTYARD', 'WAS', 'SPARSELY', 'LIT', 'BY', 'A', 'FLARING', 'TORCH', 'OR', 'TWO', 'SHOWING', 'A', 'SWINGING', 'SIGN', 'HUNG', 'ON', 'A', 'POST'] +4852-28312-0027-2152: hyp=['A', 'COURTYARD', 'WAS', 'FIRSTLY', 'LIT', 'BY', 'A', 'FLARING', 'TORTURE', 'TO', 'SHOWING', 'A', 'SWINGING', 'SIGN', 'HUNG', 'ON', 'THE', 'POST'] +4852-28312-0028-2153: ref=['THE', 'POST', 'WAS', 'PLANTED', 'AT', 'THE', 'EDGE', 'OF', 'WHAT', 'WAS', 'NOW', 'A', 'BROAD', 'AND', 'MUDDY', 'ROAD'] +4852-28312-0028-2153: hyp=['THE', 'POST', 'IS', 'BLOODED', 'AT', 'THE', 'EDGE', 'OF', 'IT', 'WAS', 'NOW', 'A', 'BROAD', 'AND', 'MONEY', 'ROAD'] +4852-28312-0029-2154: ref=['A', 'COACH', 'WITH', 'ITS', 'TOP', 'PILED', 'HIGH', 'WITH', 'LUGGAGE', 'STAMPED', 'TO', 'A', 'HALT', 'BESIDE', 'THE', 'FLAGGED', 'COURTYARD'] +4852-28312-0029-2154: hyp=['A', 'COACH', 'WHEN', 'THEY', 'STOPPED', 'PILED', 'HIGH', 'WITH', 'LUGGAGE', 'STAMPED', 'OR', 'HALT', 'BESIDE', 'THE', 'FLAGGED', 'COURTYARD'] +4852-28312-0030-2155: ref=['THEY', 'MOVED', 'INTO', 'THE', 'INN', 'THE', 'COACH', 'RATTLED', 'OFF', 'TO', 'THE', 'STABLE'] +4852-28312-0030-2155: hyp=['THEY', 'MOVED', 'INTO', 'THE', 'INN', 'THE', 'COACH', 'RATTLED', 'OFF', 'TO', 'THE', 'STABLE'] +4852-28312-0031-2156: ref=['MY', 'WINDOW', 'HAS', 'A', 'POWER', 'FOR', 'THOSE', 'FEW', 'WHO', 'ARE', 'TO', 'SEE'] +4852-28312-0031-2156: hyp=['MY', 'WINDOW', 'AS', 'A', 'POWER', 'FOR', 'THOSE', 'FEW', 'WHO', 'ARE', 'TO', 'SEE'] +4852-28319-0000-2070: ref=['THE', 'LEARNING', 'OF', 'MAGIC', 'WAS', 'BY', 'NO', 'MEANS', 'EASY'] +4852-28319-0000-2070: hyp=['THE', 'LEARNING', 'OF', 'MAGIC', 'WAS', 'BY', 'NO', 'MEANS', 'EASY'] +4852-28319-0001-2071: ref=['HE', 'HAD', 'TOLD', 'HIS', 'MASTER', 'AT', 'ONCE', 'ABOUT', 'SIMON', 'GOSLER', 'HIS', 'HORDE', 'OF', 'MONEY', 'AND', 'HIS', 'HIDING', 'PLACES', 'FOR', 'IT'] +4852-28319-0001-2071: hyp=['HE', 'TOLD', 'HIS', 'MASTER', 'AT', 'ONCE', 'HE', 'GOT', 'SIMON', 'GOSPIR', 'HIS', 'HOARD', 'OF', 'MONEY', 'IN', 'HIS', 'HIDING', 'PLACES', 'FOR', 'IT'] +4852-28319-0002-2072: ref=['CHRIS', 'THEREFORE', 'THREW', 'HIMSELF', 'INTO', 'ALL', 'THE', 'PRELIMINARIES', 'OF', 'HIS', 'TASK'] +4852-28319-0002-2072: hyp=['CHRIS', 'THEREFORE', 'THREW', 'HIMSELF', 'AND', 'ALL', 'THE', 'PROLIMINARIES', 'OF', 'HIS', 'TASK'] +4852-28319-0003-2073: ref=['ONE', 'AFTERNOON', 'WHEN', 'HE', 'RETURNED', 'AFTER', 'A', 'REST', 'TO', 'MISTER', "WICKER'S", 'STUDY', 'HE', 'SAW', 'THAT', 'THERE', 'WAS', 'SOMETHING', 'NEW', 'IN', 'THE', 'ROOM', 'A', 'BOWL', 'WITH', 'A', 'GOLDFISH', 'IN', 'IT', 'STOOD', 'ON', 'THE', 'TABLE', 'BUT', 'MISTER', 'WICKER', 'WAS', 'NOT', 'TO', 'BE', 'SEEN'] +4852-28319-0003-2073: hyp=['ONE', 'AFTERNOON', 'WHEN', 'HE', 'HAD', 'RETURNED', 'AFTER', 'A', 'REST', 'TO', 'MISTER', "WICKER'S", 'STUDY', 'HE', 'SAW', 'THAT', 'THERE', 'WAS', 'SOMETHING', 'NEW', 'IN', 'THE', 'ROOM', 'A', 'BULL', 'WITH', 'A', 'GOLD', 'FISH', 'IN', 'IT', 'STOOD', 'ON', 'THE', 'TABLE', 'BUT', 'MISTER', 'WICKER', 'WAS', 'NOT', 'TO', 'BE', 'SEEN'] +4852-28319-0004-2074: ref=['WHAT', 'SHALL', 'I', 'DO', 'FIRST'] +4852-28319-0004-2074: hyp=['WHAT', 'SHOULD', 'I', 'DO', 'FIRST'] +4852-28319-0005-2075: ref=['HOW', 'YOU', 'HAVE', 'IMPROVED', 'MY', 'BOY', 'HE', 'EXCLAIMED', 'IT', 'IS', 'NOW', 'TIME', 'FOR', 'YOU', 'TO', 'TRY', 'AND', 'THIS', 'IS', 'AS', 'GOOD', 'A', 'CHANGE', 'AS', 'ANY'] +4852-28319-0005-2075: hyp=['HOW', 'YOU', 'OFTEN', 'PROVED', 'MY', 'BOY', 'IT', 'EXCLAIMED', 'IS', 'NOW', 'TIME', 'FOR', 'YOU', 'TO', 'TRY', 'MISSUS', 'GOT', 'A', 'CHANGE', 'AS', 'ANY'] +4852-28319-0006-2076: ref=['SUPPOSE', 'I', 'CHANGE', 'AND', "CAN'T", 'CHANGE', 'BACK'] +4852-28319-0006-2076: hyp=['SUPPOSE', 'I', 'CHANGE', 'AND', 'CATCH', 'ITS', 'BACK'] +4852-28319-0007-2077: ref=['MISTER', 'WICKER', 'WAITED', 'PATIENTLY', 'BESIDE', 'HIM', 'FOR', 'A', 'FEW', 'MOMENTS', 'FOR', 'CHRIS', 'TO', 'GET', 'UP', 'HIS', 'COURAGE'] +4852-28319-0007-2077: hyp=['MISTER', 'WICKER', 'WAITED', 'PATIENTLY', 'BESIDE', 'HIM', 'FOR', 'A', 'FEW', 'MOMENTS', 'FOR', 'CHRIS', 'TO', 'GET', 'UP', 'HIS', 'COURAGE'] +4852-28319-0008-2078: ref=['THEN', 'AS', 'NOTHING', 'HAPPENED', 'WITH', 'A', 'VOICE', 'LIKE', 'A', 'WHIP', 'MISTER', 'WICKER', 'SAID', 'START', 'AT', 'ONCE'] +4852-28319-0008-2078: hyp=['THAT', 'IS', 'NOTHING', 'HAPPENED', 'WITH', 'A', 'VOICE', 'LIKE', 'A', 'WHIP', 'MISTER', 'WICKER', 'SAID', 'STARTED', 'ONCE'] +4852-28319-0009-2079: ref=['THE', 'SENSATION', 'SPREAD', 'FASTER', 'AND', 'FASTER'] +4852-28319-0009-2079: hyp=['THE', 'SENSATION', 'SPREAD', 'FASTER', 'AND', 'FASTER'] +4852-28319-0010-2080: ref=['HIS', 'HEAD', 'SWAM', 'AND', 'HE', 'FELT', 'FAINT', 'AND', 'A', 'LITTLE', 'SICK', 'BUT', 'HE', 'PERSISTED', 'THROUGH', 'THE', 'FINAL', 'WORDS'] +4852-28319-0010-2080: hyp=['HIS', 'HEAD', 'SWAM', 'AND', 'HE', 'FELT', 'FAINT', 'IN', 'A', 'LITTLE', 'SICK', 'BUT', 'HE', 'PERSISTED', 'THROUGH', 'THE', 'FINAL', 'WORDS'] +4852-28319-0011-2081: ref=['HE', 'THOUGHT', 'NOT', 'WITHOUT', 'A', 'FEELING', 'OF', 'PRIDE', 'AND', 'COMMENCED', 'EXPERIMENTING', 'WITH', 'HIS', 'TAIL', 'AND', 'FINS', 'WITH', 'SUCH', 'ENTHUSIASM', 'AND', 'DELIGHT', 'THAT', 'SOME', 'LITTLE', 'TIME', 'ELAPSED', 'BEFORE', 'MISTER', "WICKER'S", 'VOICE', 'BOOMED', 'CLOSE', 'BY'] +4852-28319-0011-2081: hyp=['HE', 'THOUGHT', 'NOW', 'WITHOUT', 'A', 'FEELING', 'OF', 'PRIDE', 'AND', 'COMMENCED', 'THE', 'EXPERIMENTING', 'WITH', 'HIS', 'TAIL', 'AND', 'FINS', 'WITH', 'SUCH', 'ENTHUSIASM', 'AND', 'DELIGHT', 'THAT', 'SOME', 'LITTLE', 'TIME', 'ELAPSED', 'BEFORE', 'MISTER', "WICKER'S", 'VOICE', 'BOOM', 'BUT', 'OAST', 'BY'] +4852-28319-0012-2082: ref=['SEVENTY', 'FOUR', 'BOOK', 'ONE', 'THE', 'RETURN'] +4852-28319-0012-2082: hyp=['SEVENTY', 'FOUR', 'BOOK', 'ONE', 'THE', 'RETURN'] +4852-28319-0013-2083: ref=['THE', "FIGURE'S", 'SHOES', 'CARVED', 'IN', 'SOME', 'EASTERN', 'STYLE', 'HAD', 'CURVED', 'UP', 'POINTING', 'TOES'] +4852-28319-0013-2083: hyp=['THE', "FIGURE'S", 'SHOES', 'CARVED', 'IN', 'SOME', 'EASTERN', 'STYLE', 'HAD', 'CURVED', 'UP', 'POINTING', 'TOES'] +4852-28319-0014-2084: ref=['THEN', 'ALL', 'AT', 'ONCE', 'THE', 'IDEA', 'CAME', 'TO', 'CHRIS'] +4852-28319-0014-2084: hyp=['THEN', 'ALL', 'AT', 'ONCE', 'THE', 'IDEA', 'CAME', 'TO', 'CHRIS'] +4852-28319-0015-2085: ref=['IF', 'HE', 'WAS', 'TO', 'BE', 'A', 'MAGICIAN', 'COULD', 'HE', 'MAKE', 'THIS', 'BOY', 'COME', 'TO', 'LIFE'] +4852-28319-0015-2085: hyp=['IF', 'HE', 'WAS', 'TO', 'BE', 'A', 'MAGICIAN', 'COULD', 'HE', 'MAKE', 'THIS', 'BOY', 'COME', 'TO', 'LIFE'] +4852-28319-0016-2086: ref=['HE', 'SQUATTED', 'ON', 'HIS', 'HAUNCHES', 'EXAMINING', 'THE', 'CARVED', 'WOODEN', 'FIGURE', 'ATTENTIVELY', 'AND', 'FELT', 'CONVINCED', 'THAT', 'ONCE', 'ALIVE', 'THE', 'BOY', 'WOULD', 'BE', 'AN', 'IDEAL', 'AND', 'HAPPY', 'COMPANION'] +4852-28319-0016-2086: hyp=['IT', 'SQUATTED', 'ON', 'HIS', 'HAUNCHES', 'EXAMINED', 'THE', 'CARVED', 'WOOD', 'AND', 'FIGURE', 'ATTENTIVELY', 'AND', 'FELT', 'CONVINCED', 'THAT', 'ONCE', 'ALIVE', 'THE', 'BOY', 'WOULD', 'BE', 'AN', 'IDEAL', 'AND', 'HAPPY', 'COMPANION'] +4852-28319-0017-2087: ref=['BUT', 'HOW', 'DID', 'ONE', 'CHANGE', 'INANIMATE', 'TO', 'ANIMATE'] +4852-28319-0017-2087: hyp=['BUT', 'HOW', 'DID', 'ONE', 'A', 'CHANGE', 'INANIMATE', 'TO', 'ENEMY'] +4852-28319-0018-2088: ref=['CHRIS', 'GOT', 'UP', 'AND', 'STOLE', 'BACK', 'TO', 'MISTER', "WICKER'S", 'DOOR'] +4852-28319-0018-2088: hyp=['CHRIS', 'GOT', 'UP', 'AND', 'STOLE', 'BACK', 'TO', 'MISTER', "WICKER'S", 'DOOR'] +4852-28319-0019-2089: ref=['HE', 'HEARD', 'THE', 'MAGICIAN', 'GOING', 'UP', 'THE', 'SPIRAL', 'STAIRCASE', 'TO', 'HIS', 'ROOM', 'ABOVE', 'AND', 'AFTER', 'CHANGING', 'HIMSELF', 'TO', 'A', 'MOUSE', 'TO', 'SLIP', 'UNDER', 'THE', 'DOOR', 'AND', 'SEE', 'THAT', 'THE', 'ROOM', 'WAS', 'REALLY', 'EMPTY', 'CHRIS', 'RESUMED', 'HIS', 'PROPER', 'SHAPE', 'AND', 'OPENED', 'THE', 'DOORS', 'OF', 'THE', 'CUPBOARD', 'AT', 'THE', 'FAR', 'END', 'OF', 'THE', 'ROOM'] +4852-28319-0019-2089: hyp=['HE', 'HEARD', 'THAT', 'MAGICIAN', 'GOING', 'UP', 'THE', 'SPIRAL', 'STAIRCASE', 'TO', 'HIS', 'ROOM', 'ABOVE', 'AND', 'AFTER', 'CHANGING', 'HIMSELF', 'TO', 'A', 'MOUSE', 'TO', 'SLIP', 'UNDER', 'THE', 'DOOR', 'AND', 'SEE', 'THAT', 'THE', 'ROOM', 'WAS', 'REELING', 'EMPTY', 'MISTER', "JAMES'S", 'PROPER', 'SHAPE', 'AND', 'OPENED', 'THE', 'DOORS', 'OF', 'THE', 'CUPBOARD', 'AT', 'THE', 'FAR', 'END', 'OF', 'THE', 'ROOM'] +4852-28319-0020-2090: ref=['THE', 'AFTERNOON', 'RAINY', 'BEFORE', 'INCREASED', 'IN', 'STORM'] +4852-28319-0020-2090: hyp=['THE', 'AFTERNOON', 'RAINING', 'BEFORE', 'INCREASED', 'IN', 'STORM'] +4852-28319-0021-2091: ref=['DUSK', 'CAME', 'TWO', 'HOURS', 'BEFORE', 'ITS', 'TIME', 'THUNDER', 'SNARLED', 'IN', 'THE', 'SKY'] +4852-28319-0021-2091: hyp=['THUS', 'CAME', 'TWO', 'HOURS', 'BEFORE', 'ITS', 'TIME', 'THUNDER', 'SNARLS', 'IN', 'THE', 'SKY'] +4852-28319-0022-2092: ref=['CERTAIN', 'ELEMENTS', 'WERE', 'TO', 'BE', 'MIXED', 'AND', 'POURED', 'AT', 'THE', 'PROPER', 'TIME'] +4852-28319-0022-2092: hyp=['CERTAIN', 'ELEMENTS', 'WERE', 'TO', 'BE', 'MIXED', 'AND', 'POURED', 'AT', 'THE', 'PROPER', 'TIME'] +4852-28319-0023-2093: ref=['MISTER', 'WICKER', 'BEGAN', 'MOVING', 'ABOUT', 'UPSTAIRS', 'THE', 'FLOORBOARDS', 'CREAKED', 'AND', 'STILL', 'CHRIS', 'COULD', 'NOT', 'LEAVE', 'UNTIL', 'THE', 'POTION', 'FUMED', 'AND', 'GLOWED'] +4852-28319-0023-2093: hyp=['MISTER', 'WICKER', 'BEGAN', 'MOVING', 'ABOUT', 'UPSTAIRS', 'THE', 'FLOOR', 'BOARDS', 'CREAKED', 'AND', 'STILL', 'CHRIS', 'COULD', 'NOT', 'LEAVE', 'UNTIL', 'THE', 'FOCIN', 'FUMED', 'AND', 'GLOWED'] +4852-28319-0024-2094: ref=['WITH', 'INFINITE', 'CAUTION', 'CHRIS', 'CLOSED', 'THE', 'DOOR', 'SILENTLY', 'BEHIND', 'HIM', 'AND', 'RUNNING', 'LIGHTLY', 'FORWARD', 'REACHED', 'THE', 'FIGURE', 'OF', 'THE', 'NEGRO', 'BOY'] +4852-28319-0024-2094: hyp=['WITH', 'INFINITE', 'CAUTION', 'CHRIS', 'CLOSED', 'THE', 'DOOR', 'SILENTLY', 'BEHIND', 'HIM', 'AND', 'RUNNING', 'LATE', 'BEFORE', 'REACHED', 'THE', 'FIGURE', 'AT', 'THE', 'NEGRO', 'BOY'] +4852-28319-0025-2095: ref=['IT', 'WAS', 'AS', 'IF', 'THE', 'STIFFNESS', 'MELTED'] +4852-28319-0025-2095: hyp=['IT', 'WAS', 'AS', 'IF', 'THE', 'STIFFNESS', 'MELTED'] +4852-28319-0026-2096: ref=['UNDER', 'HIS', 'EYES', 'THE', 'WOODEN', 'FOLDS', 'OF', 'CLOTH', 'BECAME', 'RICH', 'SILK', 'EMBROIDERY', 'GLEAMED', 'IN', 'ITS', 'REALITY', 'UPON', 'THE', 'COAT', 'AND', 'OH', 'THE', 'FACE'] +4852-28319-0026-2096: hyp=['UNDER', 'HIS', 'EYES', 'WOODEN', 'FOLDS', 'OF', 'CLOTH', 'BECAME', 'RICH', 'SILK', 'EMBROIDERY', 'GLEAMED', 'IN', 'ITS', 'REALITY', 'UPON', 'THE', 'COAT', 'AND', 'OH', 'THE', 'FACE'] +4852-28319-0027-2097: ref=['THE', 'WOODEN', 'GRIN', 'LOOSENED', 'THE', 'LARGE', 'EYES', 'TURNED', 'THE', 'HAND', 'HOLDING', 'THE', 'HARD', 'BOUQUET', 'OF', 'CARVED', 'FLOWERS', 'MOVED', 'AND', 'LET', 'THE', 'BOUQUET', 'FALL'] +4852-28319-0027-2097: hyp=['THE', 'WOODEN', 'GRIN', 'LOOSENED', 'THE', 'LARGE', 'EYES', 'TURNED', 'THE', 'HAND', 'HOLDING', 'THE', 'HARD', 'BOUQUET', 'OF', 'CARVED', 'FLOWERS', 'MOVED', 'THE', 'BOUQUET', 'FALL'] +4852-28330-0000-2044: ref=['THEY', 'WENT', 'DOWN', 'TO', 'THEIR', 'QUARTERS', 'FIRST'] +4852-28330-0000-2044: hyp=['THEY', 'WENT', 'DOWN', 'TO', 'THEIR', 'QUARTERS', 'FIRST'] +4852-28330-0001-2045: ref=['GUESS', 'MISTER', 'FINNEY', 'WENT', 'TO', 'HIS', 'QUARTERS', 'I', "DON'T", 'REMEMBER', 'SEEING', 'HIM', 'CROSS', 'THE', 'DECK', 'OR', 'COME', 'OVER', 'THAT', 'WAY', 'AT', 'ALL'] +4852-28330-0001-2045: hyp=['GUESS', 'MISTER', 'FINNEY', 'WENT', 'TO', 'HIS', 'QUARTERS', 'I', "DON'T", 'REMEMBER', 'SEEING', 'HIM', 'CROSS', 'THE', 'DECK', 'OR', 'COME', 'OVER', 'THAT', 'WAY', 'AT', 'ALL'] +4852-28330-0002-2046: ref=['NEXT', 'NED', 'CILLEY', 'WAS', 'RELIEVED', 'AT', 'THE', 'HELM', 'BY', 'ELBERT', 'JONES', 'WHO', 'TOOK', 'OVER', 'NED', 'WENT', 'ON', 'DOWN'] +4852-28330-0002-2046: hyp=['NEXT', 'NED', 'SILLY', 'WAS', 'RELIEVED', 'TO', 'THE', 'HOME', 'BY', 'HILBER', 'JONES', 'WHO', 'TOOK', 'OVER', 'NED', 'WENT', 'ON', 'DOWN'] +4852-28330-0003-2047: ref=['IT', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'IT', 'COULD', 'HAVE', 'BEEN', 'ONE', 'OF', 'SEVERAL', 'PEOPLE', 'AND', "I'LL", 'BE', 'SWITCHED', 'IF', 'I', 'KNOW', 'WHO', "I'LL", 'KEEP', 'MY', 'EYES', 'OPEN'] +4852-28330-0003-2047: hyp=['IT', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'IT', 'COULD', 'BE', 'ONE', 'OF', 'SEVERAL', 'PEOPLE', 'AND', "I'LL", 'BE', 'SWITCHED', 'IF', 'I', 'KNOW', 'WHO', 'LOOK', 'GIVE', 'MY', 'EYES', 'UP', 'AND'] +4852-28330-0004-2048: ref=['THE', 'MIRABELLE', 'WAS', 'NEARING', 'TAHITI'] +4852-28330-0004-2048: hyp=['THE', 'MIRABELLE', 'WAS', 'NEARING', 'TEDI'] +4852-28330-0005-2049: ref=["WE'VE", 'WATER', 'AND', 'FRESH', 'STORES', 'TO', 'TAKE', 'ON', 'THERE'] +4852-28330-0005-2049: hyp=['WE', 'WATER', 'AND', 'FRESH', 'STALLS', 'TO', 'TAKE', 'ON', 'THERE'] +4852-28330-0006-2050: ref=['CHRIS', 'LOST', 'NO', 'TIME', 'AS', 'SOON', 'AS', 'HE', 'COULD', 'DO', 'IT', 'WITHOUT', 'BEING', 'NOTICED', 'IN', 'HURRYING', 'DOWN', 'TO', 'HIS', 'CABIN'] +4852-28330-0006-2050: hyp=['CHRIS', 'LOST', 'NO', 'TIME', 'AS', 'SOON', 'AS', 'HE', 'COULD', 'DO', 'IT', 'WITHOUT', 'BEING', 'NOTICED', 'AND', 'HURRYING', 'DOWN', 'TO', 'HIS', 'CABIN'] +4852-28330-0007-2051: ref=['CERTAINLY', 'MY', 'BOY', 'BOOMED', 'OUT', 'THE', 'CAPTAIN', 'HIS', 'BLUE', 'EYES', 'ABRUPTLY', 'KEEN', 'AND', 'PENETRATING'] +4852-28330-0007-2051: hyp=['CERTAINLY', 'MY', 'BOY', 'BOOMED', 'OUT', 'THE', 'CAPTAIN', 'AS', 'BLUE', 'EYES', 'ABRUPTLY', 'KEEN', 'AND', 'PENETRATING'] +4852-28330-0008-2052: ref=['MISTER', 'FINNEY', 'WILL', 'BE', 'SOME', 'TIME', 'ON', 'DECK', 'WE', 'CANNOT', 'BE', 'OVERHEARD', 'IN', 'HERE'] +4852-28330-0008-2052: hyp=['MISTER', 'FINNELL', 'WILL', 'BE', 'SOME', 'TIME', 'ON', 'DECK', 'WE', 'CANNOT', 'BE', 'OWN', 'HEARD', 'IN', 'HERE'] +4852-28330-0009-2053: ref=['HIS', 'FACE', 'FROZE', 'WITH', 'NERVOUSNESS', 'THAT', 'THIS', 'MIGHT', 'NOT', 'DO', 'AS', 'AN', 'ANSWER', 'AND', 'HE', 'STOOD', 'STIFF', 'AND', 'STILL', 'BEFORE', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0009-2053: hyp=['HIS', 'FACE', 'ROSE', 'WITH', 'NERVOUSNESS', 'THAT', 'THIS', 'MIGHT', 'DO', 'AS', 'AN', 'ANSWER', 'AND', 'HE', 'STOOD', 'STIFF', 'AND', 'STILL', 'BEFORE', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0010-2054: ref=['THE', 'CAPTAIN', 'SAT', 'FORWARD', 'IN', 'HIS', 'CHAIR', 'LOOKING', 'AT', 'HIM', 'FOR', 'A', 'LONG', 'MOMENT', 'CONSIDERING'] +4852-28330-0010-2054: hyp=['THE', 'CAPTAIN', 'SAT', 'FORWARD', 'IN', 'HIS', 'CHAIR', 'LOOKING', 'AT', 'HIM', 'FOR', 'A', 'LONG', 'MOMENT', 'CONSIDERING'] +4852-28330-0011-2055: ref=['THEN', 'HE', 'SAID', 'WELL', 'I', 'DO', 'NOT', 'CARE', 'FOR', 'IT', 'I', 'CANNOT', 'SAY', 'I', 'DO'] +4852-28330-0011-2055: hyp=['THEN', 'HE', 'SAID', 'WELL', 'I', 'DO', 'NOT', 'CARE', 'FOR', 'IT', 'I', 'CANNOT', 'SAY', 'THAT', 'DO'] +4852-28330-0012-2056: ref=['THIS', 'SHIP', 'IS', 'MORE', 'TO', 'ME', 'THAN', 'WIFE', 'OR', 'MOTHER', 'OR', 'FAMILY'] +4852-28330-0012-2056: hyp=['THE', 'SHIP', 'IS', 'MORE', 'TO', 'ME', 'THAN', 'MY', 'FOREMOTHER', 'OR', 'FAMILY'] +4852-28330-0013-2057: ref=['HE', 'PAUSED', 'FINGERING', 'HIS', 'LOWER', 'LIP', 'AND', 'LOOKING', 'SIDEWAYS', 'IN', 'A', 'REFLECTIVE', 'FASHION', 'AT', 'CHRIS', 'STANDING', 'BEFORE', 'HIM'] +4852-28330-0013-2057: hyp=['AND', 'PAUSED', 'FINGERING', 'HIS', 'LOWER', 'LIP', 'AND', 'LOOKING', 'SIDEWAYS', 'INTO', 'REFLECTIVE', 'FASHION', 'AT', 'CHRIS', 'STANDING', 'BEFORE', 'HIM'] +4852-28330-0014-2058: ref=['WE', 'SHALL', 'SAY', 'NO', 'MORE', 'BUT', 'I', 'TRUST', 'YOU', 'UNDERSTAND', 'THE', 'RESPONSIBILITY', 'YOU', 'HAVE'] +4852-28330-0014-2058: hyp=['WE', 'SHALL', 'SAY', 'NO', 'MORE', 'BUT', 'I', 'TRUST', 'YOU', 'UNDERSTAND', 'THE', 'RESPONSIBILITY', 'YOU', 'HAVE'] +4852-28330-0015-2059: ref=['THIS', 'SHIP', 'ITS', 'CARGO', 'AND', 'ITS', 'MEN', 'WILL', 'BE', 'IN', 'YOUR', 'HANDS'] +4852-28330-0015-2059: hyp=['THE', 'SHIP', 'ITS', 'CARGO', 'IN', 'ITS', 'MEN', 'WILL', 'BE', 'IN', 'YOUR', 'HANDS'] +4852-28330-0016-2060: ref=['YES', 'SIR', 'I', 'THINK', 'I', 'CAN', 'DO', 'IT', 'SAFELY', 'OR', 'I', 'SHOULD', 'NOT', 'TRY', 'SIR'] +4852-28330-0016-2060: hyp=['YES', 'SIR', 'I', 'THINK', 'I', 'CAN', 'DO', 'IT', 'SAFELY', 'OR', 'I', 'SHOULD', 'NOT', 'TRY', 'SIR'] +4852-28330-0017-2061: ref=['CAPTAIN', "BLIZZARD'S", 'ROUND', 'PINK', 'FACE', 'CREASED', 'IN', 'HIS', 'WINNING', 'SMILE'] +4852-28330-0017-2061: hyp=['CAPTAIN', "BLIZZARD'S", 'ROUND', 'PINK', 'FACED', 'CREASED', 'IN', 'ITS', 'WINNING', 'SMILE'] +4852-28330-0018-2062: ref=['HE', 'THEN', 'WENT', 'ON', 'TO', 'DESCRIBE', 'WHAT', 'ELSE', 'WAS', 'TO', 'FOLLOW', 'THE', 'COVERING', 'OF', 'THE', 'SHIP', 'WITH', 'LEAVES', 'TO', 'MAKE', 'IT', 'BLEND', 'WITH', 'ITS', 'SURROUNDINGS'] +4852-28330-0018-2062: hyp=['HE', 'THEN', 'WENT', 'ON', 'TO', 'DESCRIBE', 'WHAT', 'ELSE', 'WAS', 'TO', 'FOLLOW', 'THE', 'COVERING', 'OF', 'THE', 'SHIP', 'WITH', 'LEAVES', 'TO', 'MAKE', 'IT', 'BLEND', 'WITH', 'ITS', 'SURROUNDINGS'] +4852-28330-0019-2063: ref=['CAMOUFLAGE', 'WAS', 'NOT', 'A', 'WORD', 'THE', 'CAPTAIN', 'OR', 'ANYONE', 'ELSE', 'OF', 'HIS', 'TIME', 'YET', 'UNDERSTOOD'] +4852-28330-0019-2063: hyp=['THE', 'CAMEL', 'FLASH', 'WAS', 'NOT', 'A', 'WORD', 'THE', 'CAPTAIN', 'OR', 'ANY', 'ONE', 'ELSE', 'OF', 'HIS', 'TIME', 'HE', 'HAD', 'UNDERSTOOD'] +4852-28330-0020-2064: ref=['WHAT', 'CAN', 'BE', 'SAID', 'DURING', 'THAT', 'TIME', 'SIR', 'CHRIS', 'THOUGHT', 'TO', 'ASK'] +4852-28330-0020-2064: hyp=['WHAT', 'CAN', 'BE', 'SAID', 'DURING', 'THAT', 'TIME', 'SIR', 'CHRISTO', 'TO', 'ASK'] +4852-28330-0021-2065: ref=['I', 'AM', 'SOMEWHAT', 'SKILLED', 'IN', 'MEDICAMENTS', 'I', 'HAVE', 'TO', 'BE', 'AS', 'CAPTAIN', 'OF', 'A', 'SHIP', 'AND', 'THE', 'CREW', 'KNOW', 'IT'] +4852-28330-0021-2065: hyp=['I', 'AM', 'SOMEWHAT', 'SKILLED', 'AT', 'MEDICAMENTS', 'I', 'HAVE', 'TO', 'BE', 'AS', 'A', 'CAPTAIN', 'OF', 'SHIP', 'AND', 'THE', 'CREW', 'KNOW', 'IT'] +4852-28330-0022-2066: ref=['I', 'SHALL', 'SAY', 'THAT', 'YOU', 'ARE', 'IN', 'MY', 'OWN', 'CABIN', 'SO', 'THAT', 'I', 'CAN', 'CARE', 'FOR', 'YOU'] +4852-28330-0022-2066: hyp=['I', 'SHALL', 'SAY', 'THAT', 'YOU', 'ARE', 'IN', 'MY', 'OWN', 'CABIN', 'SO', 'THAT', 'I', 'CAN', 'CARE', 'FOR', 'YOU'] +4852-28330-0023-2067: ref=['NOT', 'SINCE', 'HE', 'HAD', 'LEFT', 'MISTER', 'WICKER', 'HAD', 'CHRIS', 'FELT', 'SUCH', 'CONFIDENCE', 'AS', 'HE', 'DID', 'IN', 'THE', 'WORDS', 'AND', 'ACTIONS', 'OF', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0023-2067: hyp=['NOT', 'SINCE', 'HE', 'HAD', 'LEFT', 'MISTER', 'WICKER', 'AND', 'CHRIS', 'FELT', 'SUCH', 'CONFIDENCE', 'AS', 'HE', 'DID', 'IN', 'THE', 'WORDS', 'AND', 'ACTIONS', 'OF', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0024-2068: ref=['HE', 'KNEW', 'NOW', 'THAT', 'HIS', 'ABSENCE', 'FOR', 'AS', 'LONG', 'AS', 'HE', 'HAD', 'TO', 'BE', 'AWAY', 'WOULD', 'BE', 'COVERED', 'UP', 'AND', 'SATISFACTORILY', 'ACCOUNTED', 'FOR'] +4852-28330-0024-2068: hyp=['HE', 'KNEW', 'NOW', 'THAT', 'HIS', 'ABSENCE', 'FOR', 'AS', 'LONG', 'AS', 'HE', 'HAD', 'HAD', 'TO', 'BE', 'AWAY', 'WOULD', 'BE', 'COVERED', 'UP', 'AND', 'SATISFACTORILY', 'ACCOUNTED', 'FOR'] +4852-28330-0025-2069: ref=['THEIR', 'CONVERSATION', 'HAD', 'TAKEN', 'SOME', 'LITTLE', 'WHILE'] +4852-28330-0025-2069: hyp=['THEIR', 'CONVERSATION', 'HAD', 'TAKEN', 'SOME', 'OF', 'THE', 'WHILE'] +533-1066-0000-796: ref=['WHEN', 'CHURCHYARDS', 'YAWN'] +533-1066-0000-796: hyp=['WHEN', 'CHURCHYARDS', 'YAWN'] +533-1066-0001-797: ref=['I', 'KNEW', 'WELL', 'ENOUGH', 'THAT', 'HE', 'MIGHT', 'BE', 'CARRIED', 'THOUSANDS', 'OF', 'MILES', 'IN', 'THE', 'BOX', 'CAR', 'LOCKED', 'IN', 'PERHAPS', 'WITHOUT', 'WATER', 'OR', 'FOOD'] +533-1066-0001-797: hyp=['I', 'KNEW', 'WELL', 'ENOUGH', 'THAT', 'HE', 'MIGHT', 'BE', 'CARRIED', 'THOUSAND', 'OF', 'MILES', 'IN', 'THE', 'BOX', 'CAR', 'LOCKED', 'IN', 'PERHAPS', 'WITHOUT', 'WATER', 'OR', 'FOLD'] +533-1066-0002-798: ref=['I', 'AM', 'SURE', 'I', 'KISSED', 'LIDDY', 'AND', 'I', 'HAVE', 'HAD', 'TERRIBLE', 'MOMENTS', 'SINCE', 'WHEN', 'I', 'SEEM', 'TO', 'REMEMBER', 'KISSING', 'MISTER', 'JAMIESON', 'TOO', 'IN', 'THE', 'EXCITEMENT'] +533-1066-0002-798: hyp=['I', 'AM', 'SURE', 'I', 'GUESS', 'LIVY', 'AND', "I'VE", 'HAD', 'SEVERAL', 'MOMENTS', 'SINCE', 'WHEN', 'I', 'SEEMED', 'TO', 'REMEMBER', 'KISSING', 'MISTER', 'JAMIESON', 'TOO', 'IN', 'THE', 'EXCITEMENT'] +533-1066-0003-799: ref=['FORTUNATELY', 'WARNER', 'AND', 'THE', 'DETECTIVES', 'WERE', 'KEEPING', 'BACHELOR', 'HALL', 'IN', 'THE', 'LODGE'] +533-1066-0003-799: hyp=['FORTUNATELY', 'WARNER', 'UNDER', 'THE', 'TETE', 'WERE', 'KEEPING', 'BACHELOR', 'HALL', 'IN', 'THE', 'LODGE'] +533-1066-0004-800: ref=['OUT', 'OF', 'DEFERENCE', 'TO', 'LIDDY', 'THEY', 'WASHED', 'THEIR', 'DISHES', 'ONCE', 'A', 'DAY', 'AND', 'THEY', 'CONCOCTED', 'QUEER', 'MESSES', 'ACCORDING', 'TO', 'THEIR', 'SEVERAL', 'ABILITIES'] +533-1066-0004-800: hyp=['OUT', 'OF', 'THEIR', 'FIRST', 'LIVY', 'THEY', 'WASHED', 'HER', 'DISHES', 'ONCE', 'TO', 'DAY', 'AND', 'THEY', 'CONCLUDED', 'QUEER', 'MASSES', 'ACCORDING', 'TO', 'THEIR', 'SEVERAL', 'ABILITIES'] +533-1066-0005-801: ref=['MISS', 'INNES', 'HE', 'SAID', 'STOPPING', 'ME', 'AS', 'I', 'WAS', 'ABOUT', 'TO', 'GO', 'TO', 'MY', 'ROOM', 'UP', 'STAIRS', 'HOW', 'ARE', 'YOUR', 'NERVES', 'TONIGHT'] +533-1066-0005-801: hyp=['MISS', 'EANS', 'HE', 'SAID', 'STOPPING', 'ME', 'AS', 'I', 'WAS', 'ABOUT', 'TO', 'GO', 'TO', 'MY', 'ROOM', 'UP', 'STAIRS', 'HOW', 'ARE', 'YOUR', 'NERVES', 'TO', 'NIGHT'] +533-1066-0006-802: ref=['I', 'HAVE', 'NONE', 'I', 'SAID', 'HAPPILY'] +533-1066-0006-802: hyp=['I', 'HAVE', 'NONE', 'I', 'SAID', 'HAPPILY'] +533-1066-0007-803: ref=['I', 'MEAN', 'HE', 'PERSISTED', 'DO', 'YOU', 'FEEL', 'AS', 'THOUGH', 'YOU', 'COULD', 'GO', 'THROUGH', 'WITH', 'SOMETHING', 'RATHER', 'UNUSUAL'] +533-1066-0007-803: hyp=['I', 'MEAN', 'HE', 'PERSISTED', 'DO', 'YOU', 'FEEL', 'AS', 'THOUGH', 'YOU', 'COULD', 'GO', 'THROUGH', 'WITH', 'SOMETHING', 'RATHER', 'UNUSUAL'] +533-1066-0008-804: ref=['THE', 'MOST', 'UNUSUAL', 'THING', 'I', 'CAN', 'THINK', 'OF', 'WOULD', 'BE', 'A', 'PEACEFUL', 'NIGHT'] +533-1066-0008-804: hyp=['THE', 'MOST', 'UNUSUAL', 'THING', 'I', 'CAN', 'THINK', 'OF', 'WOULD', 'BE', 'A', 'PEACEFUL', 'NIGHT'] +533-1066-0009-805: ref=['SOMETHING', 'IS', 'GOING', 'TO', 'OCCUR', 'HE', 'SAID'] +533-1066-0009-805: hyp=['SOMETHING', 'IS', 'GOING', 'TO', 'OCCUR', 'HE', 'SAID'] +533-1066-0010-806: ref=['PUT', 'ON', 'HEAVY', 'SHOES', 'AND', 'SOME', 'OLD', 'DARK', 'CLOTHES', 'AND', 'MAKE', 'UP', 'YOUR', 'MIND', 'NOT', 'TO', 'BE', 'SURPRISED', 'AT', 'ANYTHING'] +533-1066-0010-806: hyp=['PUT', 'ON', 'HEAVY', 'SHOES', 'AND', 'SOME', 'ALL', 'DARK', 'CLOTHES', 'AND', 'MAKE', 'UP', 'YOUR', 'MIND', 'NOT', 'BE', 'SURPRISED', 'AT', 'ANYTHING'] +533-1066-0011-807: ref=['LIDDY', 'WAS', 'SLEEPING', 'THE', 'SLEEP', 'OF', 'THE', 'JUST', 'WHEN', 'I', 'WENT', 'UP', 'STAIRS', 'AND', 'I', 'HUNTED', 'OUT', 'MY', 'THINGS', 'CAUTIOUSLY'] +533-1066-0011-807: hyp=['LINING', 'WAS', 'SLEEPING', 'SLEEP', 'OF', 'THE', 'JUST', 'WHEN', 'I', 'WENT', 'UPSTAIRS', 'AND', 'I', 'HUNTED', 'OUT', 'MY', 'THINGS', 'CAUTIOUSLY'] +533-1066-0012-808: ref=['THEY', 'WERE', 'TALKING', 'CONFIDENTIALLY', 'TOGETHER', 'BUT', 'WHEN', 'I', 'CAME', 'DOWN', 'THEY', 'CEASED'] +533-1066-0012-808: hyp=['DO', 'YOU', 'TALKING', 'TO', 'FILLIENTLY', 'TOGETHER', 'BUT', 'WHEN', 'I', 'CAME', 'DOWN', 'THEY', 'CEASED'] +533-1066-0013-809: ref=['THERE', 'WERE', 'A', 'FEW', 'PREPARATIONS', 'TO', 'BE', 'MADE', 'THE', 'LOCKS', 'TO', 'BE', 'GONE', 'OVER', 'WINTERS', 'TO', 'BE', 'INSTRUCTED', 'AS', 'TO', 'RENEWED', 'VIGILANCE', 'AND', 'THEN', 'AFTER', 'EXTINGUISHING', 'THE', 'HALL', 'LIGHT', 'WE', 'CREPT', 'IN', 'THE', 'DARKNESS', 'THROUGH', 'THE', 'FRONT', 'DOOR', 'AND', 'INTO', 'THE', 'NIGHT'] +533-1066-0013-809: hyp=['THEY', 'WERE', 'A', 'FEW', 'PREPARATIONS', 'TO', 'BE', 'MADE', 'LOGS', 'TO', 'BE', 'GONE', 'OVER', 'WINTERSPIN', 'INSTRUCTIVE', 'AS', 'TO', 'RENEWED', 'VISIONS', 'AND', 'THEN', 'AFTER', 'EXTINGUISHING', 'THE', 'WHOLE', 'LIGHT', 'WE', 'CREPT', 'IN', 'THE', 'DARKNESS', 'THROUGH', 'THE', 'FRONT', 'DOOR', 'AND', 'INTO', 'THE', 'NIGHT'] +533-1066-0014-810: ref=['I', 'ASKED', 'NO', 'QUESTIONS'] +533-1066-0014-810: hyp=['I', 'ASKED', 'NO', 'QUESTIONS'] +533-1066-0015-811: ref=['ONCE', 'ONLY', 'SOMEBODY', 'SPOKE', 'AND', 'THEN', 'IT', 'WAS', 'AN', 'EMPHATIC', 'BIT', 'OF', 'PROFANITY', 'FROM', 'DOCTOR', 'STEWART', 'WHEN', 'HE', 'RAN', 'INTO', 'A', 'WIRE', 'FENCE'] +533-1066-0015-811: hyp=['WAS', 'ONLY', 'SOMEBODY', 'SPOKE', 'AND', 'THEN', 'IT', 'WAS', 'AN', 'EMPHATIC', 'WID', 'OF', 'PROFANITY', 'FROM', 'DOCTOR', 'STEWART', 'WHEN', 'HE', 'RAN', 'INTO', 'A', 'WIRE', 'FENCE'] +533-1066-0016-812: ref=['I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'EXPECTED'] +533-1066-0016-812: hyp=['I', 'ARE', 'TO', 'KNOW', 'WHAT', 'I', 'EXPECTED'] +533-1066-0017-813: ref=['THE', 'DOCTOR', 'WAS', 'PUFFING', 'SOMEWHAT', 'WHEN', 'WE', 'FINALLY', 'CAME', 'TO', 'A', 'HALT'] +533-1066-0017-813: hyp=['THE', 'DOCTOR', 'WAS', 'PUFFING', 'SOMEWHAT', 'WHEN', 'WE', 'FINALLY', 'CAME', 'TO', 'A', 'HALT'] +533-1066-0018-814: ref=['I', 'CONFESS', 'THAT', 'JUST', 'AT', 'THAT', 'MINUTE', 'EVEN', 'SUNNYSIDE', 'SEEMED', 'A', 'CHEERFUL', 'SPOT'] +533-1066-0018-814: hyp=['I', 'CONFESS', 'THAT', 'JUST', 'AT', 'THAT', 'MINUTE', 'EVEN', 'SUNNYSIDE', 'SEEMED', 'A', 'CHEERFUL', 'SPOT'] +533-1066-0019-815: ref=['IN', 'SPITE', 'OF', 'MYSELF', 'I', 'DREW', 'MY', 'BREATH', 'IN', 'SHARPLY'] +533-1066-0019-815: hyp=['IN', 'SPITE', 'OF', 'MYSELF', 'I', 'DREW', 'MY', 'BREATH', 'IN', 'SHARPLY'] +533-1066-0020-816: ref=['IT', 'WAS', 'ALEX', 'ARMED', 'WITH', 'TWO', 'LONG', 'HANDLED', 'SPADES'] +533-1066-0020-816: hyp=['IT', 'WAS', 'ALEX', 'ARMED', 'WITH', 'TWO', 'LONG', 'HANDLED', 'SPADES'] +533-1066-0021-817: ref=['THE', 'DOCTOR', 'KEPT', 'A', 'KEEN', 'LOOKOUT', 'BUT', 'NO', 'ONE', 'APPEARED'] +533-1066-0021-817: hyp=['DOCTOR', 'KEPT', 'A', 'KIN', 'LOOK', 'OUT', 'BUT', 'NO', 'ONE', 'APPEARED'] +533-1066-0022-818: ref=["THERE'S", 'ONE', 'THING', 'SURE', "I'LL", 'NOT', 'BE', 'SUSPECTED', 'OF', 'COMPLICITY'] +533-1066-0022-818: hyp=["THERE'S", 'ONE', 'THING', 'SURE', "I'LL", 'NOT', 'BE', 'SUSPECTED', 'OF', 'COMPLICITY'] +533-1066-0023-819: ref=['A', 'DOCTOR', 'IS', 'GENERALLY', 'SUPPOSED', 'TO', 'BE', 'HANDIER', 'AT', 'BURYING', 'FOLKS', 'THAN', 'AT', 'DIGGING', 'THEM', 'UP'] +533-1066-0023-819: hyp=['DOCTOR', 'IS', 'GENERALLY', 'SUPPOSED', 'TO', 'BE', 'A', 'HANDIER', 'AT', 'BEARING', 'FOLKS', 'THAN', 'A', 'TIGING', 'THEM', 'UP'] +533-1066-0024-820: ref=['I', 'HELD', 'ON', 'TO', 'HIM', 'FRANTICALLY', 'AND', 'SOMEHOW', 'I', 'GOT', 'THERE', 'AND', 'LOOKED', 'DOWN'] +533-1066-0024-820: hyp=['I', 'HELD', 'ON', 'TO', 'HIM', 'FRANTICALLY', 'AND', 'SOMEHOW', 'I', 'GOT', 'THERE', 'AND', 'LOOKED', 'DOWN'] +533-131556-0000-821: ref=['BUT', 'HOW', 'AM', 'I', 'TO', 'GET', 'OVER', 'THE', 'TEN', 'OR', 'TWELVE', 'DAYS', 'THAT', 'MUST', 'YET', 'ELAPSE', 'BEFORE', 'THEY', 'GO'] +533-131556-0000-821: hyp=['BUT', 'HOW', 'AM', 'I', 'TO', 'HER', 'OVER', 'THE', 'TEN', 'OR', 'TWELVE', 'DAYS', 'THAT', 'MUST', 'YET', 'ELAPSE', 'BEFORE', 'THEY', 'GO'] +533-131556-0001-822: ref=['FOR', 'NONE', 'COULD', 'INJURE', 'ME', 'AS', 'HE', 'HAS', 'DONE', 'OH'] +533-131556-0001-822: hyp=['FOR', 'NONE', 'COULD', 'ENDURE', 'ME', 'AS', 'HE', 'HAS', 'DONE', 'OH'] +533-131556-0002-823: ref=['THE', 'WORD', 'STARES', 'ME', 'IN', 'THE', 'FACE', 'LIKE', 'A', 'GUILTY', 'CONFESSION', 'BUT', 'IT', 'IS', 'TRUE', 'I', 'HATE', 'HIM', 'I', 'HATE', 'HIM'] +533-131556-0002-823: hyp=['THE', 'WORDS', 'TEARS', 'ME', 'IN', 'THE', 'FACE', 'LIKE', 'A', 'GUILTY', 'CONFESSION', 'BUT', 'IT', 'IS', 'TRUE', 'I', 'HATE', 'HIM', 'I', 'HATE', 'HIM'] +533-131556-0003-824: ref=['I', 'SOMETIMES', 'THINK', 'I', 'OUGHT', 'TO', 'GIVE', 'HIM', 'CREDIT', 'FOR', 'THE', 'GOOD', 'FEELING', 'HE', 'SIMULATES', 'SO', 'WELL', 'AND', 'THEN', 'AGAIN', 'I', 'THINK', 'IT', 'IS', 'MY', 'DUTY', 'TO', 'SUSPECT', 'HIM', 'UNDER', 'THE', 'PECULIAR', 'CIRCUMSTANCES', 'IN', 'WHICH', 'I', 'AM', 'PLACED'] +533-131556-0003-824: hyp=['I', 'SOMETIMES', 'THINK', 'I', 'OUGHT', 'TO', 'GIVE', 'HIM', 'CREDIT', 'FOR', 'THE', 'GOOD', 'FEELING', 'SIMILATE', 'SO', 'WELL', 'AND', 'THEN', 'AGAIN', 'I', 'THINK', 'IT', 'IS', 'MY', 'DUTY', 'TO', 'SUSPECT', 'HIM', 'UNDER', 'THE', 'PECULIAR', 'CIRCUMSTANCES', 'IN', 'WHICH', 'I', 'AM', 'PLACED'] +533-131556-0004-825: ref=['I', 'HAVE', 'DONE', 'WELL', 'TO', 'RECORD', 'THEM', 'SO', 'MINUTELY'] +533-131556-0004-825: hyp=['I', 'HAVE', 'DONE', 'WELL', 'TO', 'RECORD', 'HIM', 'SOMINUTELY'] +533-131556-0005-826: ref=['THEY', 'HAD', 'BETAKEN', 'THEMSELVES', 'TO', 'THEIR', 'WORK', 'I', 'LESS', 'TO', 'DIVERT', 'MY', 'MIND', 'THAN', 'TO', 'DEPRECATE', 'CONVERSATION', 'HAD', 'PROVIDED', 'MYSELF', 'WITH', 'A', 'BOOK'] +533-131556-0005-826: hyp=['THEY', 'HAVE', 'HAD', 'TAKEN', 'THEMSELVES', 'TO', 'THEIR', 'WORK', 'I', 'LESS', 'DIVERT', 'MY', 'MIND', 'THAN', 'TO', 'DEPRECATE', 'CONVERSATION', 'AT', 'REVOLT', 'A', 'BOOK'] +533-131556-0006-827: ref=['I', 'AM', 'TOO', 'WELL', 'ACQUAINTED', 'WITH', 'YOUR', 'CHARACTER', 'AND', 'CONDUCT', 'TO', 'FEEL', 'ANY', 'REAL', 'FRIENDSHIP', 'FOR', 'YOU', 'AND', 'AS', 'I', 'AM', 'WITHOUT', 'YOUR', 'TALENT', 'FOR', 'DISSIMULATION', 'I', 'CANNOT', 'ASSUME', 'THE', 'APPEARANCE', 'OF', 'IT'] +533-131556-0006-827: hyp=['I', 'AM', 'TOO', 'WELL', 'ACQUAINTED', 'WITH', 'THEIR', 'CHARACTER', 'AND', 'CONDUCT', 'TO', 'FEEL', 'ANY', 'REAL', 'FRIENDSHIP', 'FOR', 'YOU', 'AND', 'AS', 'I', 'AM', 'WITHOUT', 'YOUR', 'TALENT', 'FOR', 'THE', 'SIMULATION', 'I', 'CANNOT', 'ASSUME', 'THE', 'APPEARANCE', 'OF', 'IT'] +533-131556-0007-828: ref=['UPON', 'PERUSING', 'THIS', 'SHE', 'TURNED', 'SCARLET', 'AND', 'BIT', 'HER', 'LIP'] +533-131556-0007-828: hyp=['UP', 'AND', 'PERUSING', 'THIS', 'SHE', 'TURNED', 'SCARLET', 'AND', 'BIT', 'HER', 'LIP'] +533-131556-0008-829: ref=['YOU', 'MAY', 'GO', 'MILICENT', 'AND', "SHE'LL", 'FOLLOW', 'IN', 'A', 'WHILE', 'MILICENT', 'WENT'] +533-131556-0008-829: hyp=['YOU', 'MAY', 'GO', 'MILLICINE', 'AND', "SHE'LL", 'FOLLOWING', 'A', 'WHILE', 'MELLICENT', 'WENT'] +533-131556-0009-830: ref=['WILL', 'YOU', 'OBLIGE', 'ME', 'HELEN', 'CONTINUED', 'SHE'] +533-131556-0009-830: hyp=['OLY', 'OBLIGED', 'ME', 'ALLAN', 'CONTINUED', 'SHE'] +533-131556-0010-831: ref=['AH', 'YOU', 'ARE', 'SUSPICIOUS'] +533-131556-0010-831: hyp=['HA', 'YOU', 'ARE', 'SUSPICIOUS'] +533-131556-0011-832: ref=['IF', 'I', 'WERE', 'SUSPICIOUS', 'I', 'REPLIED', 'I', 'SHOULD', 'HAVE', 'DISCOVERED', 'YOUR', 'INFAMY', 'LONG', 'BEFORE'] +533-131556-0011-832: hyp=['IF', 'I', 'WERE', 'SUSPICIOUS', 'I', 'REPLIED', 'I', 'SHOULD', 'HAVE', 'DISCOVERED', 'YOUR', 'INFAMYLON', 'BEFORE'] +533-131556-0012-833: ref=['I', 'ENJOY', 'A', 'MOONLIGHT', 'RAMBLE', 'AS', 'WELL', 'AS', 'YOU', 'I', 'ANSWERED', 'STEADILY', 'FIXING', 'MY', 'EYES', 'UPON', 'HER', 'AND', 'THE', 'SHRUBBERY', 'HAPPENS', 'TO', 'BE', 'ONE', 'OF', 'MY', 'FAVOURITE', 'RESORTS'] +533-131556-0012-833: hyp=['I', 'ENJOY', 'A', 'MONTH', 'RAMBLE', 'AS', 'WELL', 'AS', 'YOU', 'I', 'ANSWERED', 'STEADILY', 'FIXING', 'MY', 'EYES', 'UP', 'AND', 'EARTH', 'AND', 'SHRABBERY', 'HAPPENS', 'TO', 'BE', 'ONE', 'OF', 'MY', 'FAVORITE', 'RESORTS'] +533-131556-0013-834: ref=['SHE', 'COLOURED', 'AGAIN', 'EXCESSIVELY', 'AND', 'REMAINED', 'SILENT', 'PRESSING', 'HER', 'FINGER', 'AGAINST', 'HER', 'TEETH', 'AND', 'GAZING', 'INTO', 'THE', 'FIRE'] +533-131556-0013-834: hyp=['SHE', 'COLOURED', 'BEGAN', 'EXCESSIVELY', 'AND', 'REMAINED', 'SILENT', 'RAISING', 'HER', 'FINGER', 'AGAINST', 'HER', 'CHEEKS', 'AND', 'GAZING', 'INTO', 'THE', 'FIRE'] +533-131556-0014-835: ref=['I', 'WATCHED', 'HER', 'A', 'FEW', 'MOMENTS', 'WITH', 'A', 'FEELING', 'OF', 'MALEVOLENT', 'GRATIFICATION', 'THEN', 'MOVING', 'TOWARDS', 'THE', 'DOOR', 'I', 'CALMLY', 'ASKED', 'IF', 'SHE', 'HAD', 'ANYTHING', 'MORE', 'TO', 'SAY'] +533-131556-0014-835: hyp=['I', 'WATCH', 'FOR', 'A', 'FEW', 'MOMENTS', 'TO', 'THE', 'FEELING', 'OF', 'MALEVOLENT', 'GRATIFICATION', 'THEN', 'MOVING', 'TOWARDS', 'THE', 'DOOR', 'I', 'CALMLY', 'ASKED', 'IF', 'SHE', 'HAD', 'ANYTHING', 'MORE', 'TO', 'SAY'] +533-131556-0015-836: ref=['YES', 'YES'] +533-131556-0015-836: hyp=['YES', 'YES'] +533-131556-0016-837: ref=['SUPPOSE', 'I', 'DO'] +533-131556-0016-837: hyp=['SUPPOSE', 'I', 'DO'] +533-131556-0017-838: ref=['SHE', 'PAUSED', 'IN', 'EVIDENT', 'DISCONCERTION', 'AND', 'PERPLEXITY', 'MINGLED', 'WITH', 'ANGER', 'SHE', 'DARED', 'NOT', 'SHOW'] +533-131556-0017-838: hyp=['SHE', 'PAUSED', 'IN', 'EVIDENT', 'DISCONCERTION', 'AND', 'PERPLEXITY', 'MINGLED', 'WITH', 'ANGER', 'SHE', 'DARED', 'NOT', 'SHOW'] +533-131556-0018-839: ref=['I', 'CANNOT', 'RENOUNCE', 'WHAT', 'IS', 'DEARER', 'THAN', 'LIFE', 'SHE', 'MUTTERED', 'IN', 'A', 'LOW', 'HURRIED', 'TONE'] +533-131556-0018-839: hyp=['I', 'CANNOT', 'RENOUNCE', 'WHAT', 'IS', 'DEARER', 'THAN', 'LIFE', 'SHE', 'MUTTERED', 'IN', 'A', 'LOW', 'HURRIED', 'TONE'] +533-131556-0019-840: ref=['IF', 'YOU', 'ARE', 'GENEROUS', 'HERE', 'IS', 'A', 'FITTING', 'OPPORTUNITY', 'FOR', 'THE', 'EXERCISE', 'OF', 'YOUR', 'MAGNANIMITY', 'IF', 'YOU', 'ARE', 'PROUD', 'HERE', 'AM', 'I', 'YOUR', 'RIVAL', 'READY', 'TO', 'ACKNOWLEDGE', 'MYSELF', 'YOUR', 'DEBTOR', 'FOR', 'AN', 'ACT', 'OF', 'THE', 'MOST', 'NOBLE', 'FORBEARANCE'] +533-131556-0019-840: hyp=['IF', 'YOU', 'ARE', 'GENERALS', 'HERE', 'IS', 'A', 'FEELING', 'OPPORTUNITY', 'FOR', 'THE', 'EXERCISE', 'OF', 'YOUR', 'MAGNANIMITY', 'IF', 'YOU', 'ARE', 'PROUD', 'HEAR', 'AM', 'I', 'YOUR', 'RIVAL', 'RETICOSE', 'MYSELF', 'YOUR', 'DAUGHTER', 'FOR', 'ACT', 'OF', 'MOST', 'NOBLE', 'FORBEARANCE'] +533-131556-0020-841: ref=['I', 'SHALL', 'NOT', 'TELL', 'HIM'] +533-131556-0020-841: hyp=['I', 'SHALL', 'NOT', 'TELL', 'HIM'] +533-131556-0021-842: ref=['GIVE', 'ME', 'NO', 'THANKS', 'IT', 'IS', 'NOT', 'FOR', 'YOUR', 'SAKE', 'THAT', 'I', 'REFRAIN'] +533-131556-0021-842: hyp=['GIVE', 'ME', 'NO', 'THANKS', 'IT', 'IS', 'NOT', 'FOR', 'YOUR', 'SAKE', 'THAT', 'I', 'REFRAIN'] +533-131556-0022-843: ref=['AND', 'MILICENT', 'WILL', 'YOU', 'TELL', 'HER'] +533-131556-0022-843: hyp=['AND', 'MILLISON', 'WILL', 'IT', 'TELL', 'HER'] +533-131556-0023-844: ref=['I', 'WOULD', 'NOT', 'FOR', 'MUCH', 'THAT', 'SHE', 'SHOULD', 'KNOW', 'THE', 'INFAMY', 'AND', 'DISGRACE', 'OF', 'HER', 'RELATION'] +533-131556-0023-844: hyp=['I', 'WILL', 'NOT', 'FOR', 'MUCH', 'THAT', 'YOU', 'SHOULD', 'NOT', 'EVEN', 'IN', 'DISGRACE', 'OF', 'HER', 'RELATION'] +533-131556-0024-845: ref=['YOU', 'USE', 'HARD', 'WORDS', 'MISSUS', 'HUNTINGDON', 'BUT', 'I', 'CAN', 'PARDON', 'YOU'] +533-131556-0024-845: hyp=['YOU', 'USE', 'OUR', 'WORDS', 'MISSUS', 'HUNTINGDON', 'BUT', 'I', 'CAN', 'PARDON', 'YOU'] +533-131556-0025-846: ref=['HOW', 'DARE', 'YOU', 'MENTION', 'HIS', 'NAME', 'TO', 'ME'] +533-131556-0025-846: hyp=['HOW', 'DARE', 'YOU', 'MENTION', 'HIS', 'NAME', 'TO', 'ME'] +533-131562-0000-847: ref=['IT', 'SEEMS', 'VERY', 'INTERESTING', 'LOVE', 'SAID', 'HE', 'LIFTING', 'HIS', 'HEAD', 'AND', 'TURNING', 'TO', 'WHERE', 'I', 'STOOD', 'WRINGING', 'MY', 'HANDS', 'IN', 'SILENT', 'RAGE', 'AND', 'ANGUISH', 'BUT', "IT'S", 'RATHER', 'LONG', "I'LL", 'LOOK', 'AT', 'IT', 'SOME', 'OTHER', 'TIME', 'AND', 'MEANWHILE', "I'LL", 'TROUBLE', 'YOU', 'FOR', 'YOUR', 'KEYS', 'MY', 'DEAR', 'WHAT', 'KEYS'] +533-131562-0000-847: hyp=['IT', 'SEEMS', 'VERY', 'INTERESTING', 'LOVE', 'SAID', 'HE', 'LIFTING', 'HIS', 'HEAD', 'AND', 'SHIRTING', 'TO', 'HER', 'EYES', 'TOO', 'WRINGING', 'MY', 'HAND', 'IN', 'SILENT', 'RATES', 'AND', 'ANGUISH', 'BUT', "IT'S", 'RATHER', 'LONG', 'I', 'LOOK', 'AT', 'IT', 'SOME', 'OTHER', 'TIME', 'AND', 'MEANWHILE', "I'LL", 'TROUBLE', 'YOU', 'FOR', 'YOUR', 'CASE', 'MY', 'DEAR', 'WHAT', 'CASE'] +533-131562-0001-848: ref=['THE', 'KEYS', 'OF', 'YOUR', 'CABINET', 'DESK', 'DRAWERS', 'AND', 'WHATEVER', 'ELSE', 'YOU', 'POSSESS', 'SAID', 'HE', 'RISING', 'AND', 'HOLDING', 'OUT', 'HIS', 'HAND'] +533-131562-0001-848: hyp=['IT', 'A', 'KISS', 'OF', 'YOUR', 'CABINET', 'DESK', 'DRAWER', 'AND', 'WHATEVER', 'ELSE', 'YOU', 'POSSESS', 'SAID', 'HE', 'RISING', 'AND', 'HOLDING', 'OUT', 'HIS', 'HAND'] +533-131562-0002-849: ref=['THE', 'KEY', 'OF', 'MY', 'DESK', 'IN', 'FACT', 'WAS', 'AT', 'THAT', 'MOMENT', 'IN', 'THE', 'LOCK', 'AND', 'THE', 'OTHERS', 'WERE', 'ATTACHED', 'TO', 'IT'] +533-131562-0002-849: hyp=['THE', 'KEY', 'OF', 'MY', 'VES', 'IN', 'FACT', 'WAS', 'AT', 'THAT', 'MOMENT', 'IN', 'LOVE', 'AND', 'THE', 'OTHERS', 'WERE', 'ATTACHED', 'TO', 'IT'] +533-131562-0003-850: ref=['NOW', 'THEN', 'SNEERED', 'HE', 'WE', 'MUST', 'HAVE', 'A', 'CONFISCATION', 'OF', 'PROPERTY'] +533-131562-0003-850: hyp=['NOW', 'THEN', 'SNEERED', 'HE', 'WE', 'MUST', 'HAVE', 'A', 'CONFISCATION', 'OF', 'PROPERTY'] +533-131562-0004-851: ref=['AND', 'PUTTING', 'THE', 'KEYS', 'INTO', 'HIS', 'POCKET', 'HE', 'WALKED', 'INTO', 'THE', 'LIBRARY'] +533-131562-0004-851: hyp=['AND', 'PUT', 'IN', 'THE', 'KEYS', 'INTO', 'HIS', 'POCKET', 'HE', 'WALKED', 'INTO', 'THE', 'LIBRARY'] +533-131562-0005-852: ref=['THAT', 'AND', 'ALL', 'REPLIED', 'THE', 'MASTER', 'AND', 'THE', 'THINGS', 'WERE', 'CLEARED', 'AWAY'] +533-131562-0005-852: hyp=['THAT', 'AND', 'ALL', 'REPLIED', 'THE', 'MERCER', 'AND', 'THE', 'THINGS', 'WERE', 'CLEARED', 'AWAY'] +533-131562-0006-853: ref=['MISTER', 'HUNTINGDON', 'THEN', 'WENT', 'UP', 'STAIRS'] +533-131562-0006-853: hyp=['MISTER', 'HUNTINGDON', 'THEN', 'WENT', 'UPSTAIRS'] +533-131562-0007-854: ref=['MUTTERED', 'HE', 'STARTING', 'BACK', "SHE'S", 'THE', 'VERY', 'DEVIL', 'FOR', 'SPITE'] +533-131562-0007-854: hyp=['MUTTERED', 'HE', 'STARTING', 'BACK', "SHE'S", 'VERY', 'DEVIL', 'FOR', 'SPITE'] +533-131562-0008-855: ref=['I', "DIDN'T", 'SAY', "I'D", 'BROKEN', 'IT', 'DID', 'I', 'RETURNED', 'HE'] +533-131562-0008-855: hyp=['I', 'THEN', 'SAY', "I'VE", 'BROKEN', 'IT', 'DID', 'I', 'RETURNED', 'HE'] +533-131562-0009-856: ref=['I', 'SHALL', 'PUT', 'YOU', 'UPON', 'A', 'SMALL', 'MONTHLY', 'ALLOWANCE', 'IN', 'FUTURE', 'FOR', 'YOUR', 'OWN', 'PRIVATE', 'EXPENSES', 'AND', 'YOU', "NEEDN'T", 'TROUBLE', 'YOURSELF', 'ANY', 'MORE', 'ABOUT', 'MY', 'CONCERNS', 'I', 'SHALL', 'LOOK', 'OUT', 'FOR', 'A', 'STEWARD', 'MY', 'DEAR', 'I', "WON'T", 'EXPOSE', 'YOU', 'TO', 'THE', 'TEMPTATION'] +533-131562-0009-856: hyp=['I', 'SHALL', 'PUT', 'YOU', 'UP', 'IN', 'A', 'SMALL', 'MOUTHLY', 'ALLOW', 'US', 'IN', 'FUTURE', 'FOR', 'YOUR', 'OWN', 'PRIVATE', 'EXPENSES', 'AND', 'YOU', "NEEDN'T", 'TROUBLE', 'YOURSELF', 'ANY', 'MORE', 'ABOUT', 'MY', 'CONCERNS', 'I', 'SHALL', 'LOOK', 'OUT', 'FOR', 'A', 'STEWARD', 'MY', 'DEAR', 'I', "WON'T", 'EXPOSE', 'YOU', 'TO', 'TEMPTATION'] +533-131562-0010-857: ref=['AND', 'AS', 'FOR', 'THE', 'HOUSEHOLD', 'MATTERS', 'MISSUS', 'GREAVES', 'MUST', 'BE', 'VERY', 'PARTICULAR', 'IN', 'KEEPING', 'HER', 'ACCOUNTS', 'WE', 'MUST', 'GO', 'UPON', 'AN', 'ENTIRELY', 'NEW', 'PLAN'] +533-131562-0010-857: hyp=['AND', 'AS', 'FOR', 'THE', 'HOUSE', 'OF', 'MATTERS', 'MISSUS', 'GREEBS', 'MUST', 'BE', 'VERY', 'PARTICULAR', 'IN', 'KEEPING', 'HER', 'ACCOUNTS', 'WE', 'MUST', 'GO', 'UP', 'IN', 'AN', 'ENCHANTING', 'NEW', 'PLAN'] +533-131562-0011-858: ref=['WHAT', 'GREAT', 'DISCOVERY', 'HAVE', 'YOU', 'MADE', 'NOW', 'MISTER', 'HUNTINGDON'] +533-131562-0011-858: hyp=['WHAT', 'GREAT', 'DISCOVERY', 'HAVE', 'YOU', 'MADE', 'NOW', 'MISTER', 'HONEYMAN'] +533-131562-0012-859: ref=['HAVE', 'I', 'ATTEMPTED', 'TO', 'DEFRAUD', 'YOU'] +533-131562-0012-859: hyp=['EVER', 'ATTENDED', 'TO', 'DEFRAUD', 'YOU'] +533-131562-0013-860: ref=['NOT', 'IN', 'MONEY', 'MATTERS', 'EXACTLY', 'IT', 'SEEMS', 'BUT', "IT'S", 'BEST', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'TEMPTATION'] +533-131562-0013-860: hyp=['NOT', 'IN', 'MONEY', 'MATTERS', 'EXACTLY', 'IT', 'SEEMS', 'BUT', "IT'S", 'BEST', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'TEMPTATION'] +533-131562-0014-861: ref=['HERE', 'BENSON', 'ENTERED', 'WITH', 'THE', 'CANDLES', 'AND', 'THERE', 'FOLLOWED', 'A', 'BRIEF', 'INTERVAL', 'OF', 'SILENCE', 'I', 'SITTING', 'STILL', 'IN', 'MY', 'CHAIR', 'AND', 'HE', 'STANDING', 'WITH', 'HIS', 'BACK', 'TO', 'THE', 'FIRE', 'SILENTLY', 'TRIUMPHING', 'IN', 'MY', 'DESPAIR'] +533-131562-0014-861: hyp=['HERE', 'BESIN', 'ENTERED', 'THE', 'CANDLES', 'AND', 'THERE', 'FELL', 'THE', 'BRIEF', 'INTERVAL', 'OF', 'SILENCE', 'I', 'SITTING', 'STEALING', 'MY', 'CHAIR', 'AND', 'HE', 'STANDING', 'WITH', 'HIS', 'BACK', 'TO', 'THE', 'FIRE', 'SILENTLY', 'TRIUMPHING', 'IN', 'MY', 'DESPAIR'] +533-131562-0015-862: ref=['I', 'KNOW', 'THAT', 'DAY', 'AFTER', 'DAY', 'SUCH', 'FEELINGS', 'WILL', 'RETURN', 'UPON', 'ME'] +533-131562-0015-862: hyp=['I', 'KNOW', 'THAT', 'DAY', 'AFTER', 'DAY', 'SUCH', 'FEELINGS', 'TO', 'RETURN', 'UP', 'ON', 'ME'] +533-131562-0016-863: ref=['I', 'TRY', 'TO', 'LOOK', 'TO', 'HIM', 'AND', 'RAISE', 'MY', 'HEART', 'TO', 'HEAVEN', 'BUT', 'IT', 'WILL', 'CLEAVE', 'TO', 'THE', 'DUST'] +533-131562-0016-863: hyp=['I', 'TRIED', 'TO', 'LOOK', 'TO', 'HIM', 'AND', 'RAISE', 'MY', 'HEART', 'TO', 'HEAVEN', 'BUT', 'IT', 'WILL', 'CLIFF', 'TO', 'THE', 'DUST'] +533-131564-0000-768: ref=['VAIN', 'HOPE', 'I', 'FEAR'] +533-131564-0000-768: hyp=['VAIN', 'HOPE', 'I', 'FEAR'] +533-131564-0001-769: ref=['MISTER', 'AND', 'MISSUS', 'HATTERSLEY', 'HAVE', 'BEEN', 'STAYING', 'AT', 'THE', 'GROVE', 'A', 'FORTNIGHT', 'AND', 'AS', 'MISTER', 'HARGRAVE', 'IS', 'STILL', 'ABSENT', 'AND', 'THE', 'WEATHER', 'WAS', 'REMARKABLY', 'FINE', 'I', 'NEVER', 'PASSED', 'A', 'DAY', 'WITHOUT', 'SEEING', 'MY', 'TWO', 'FRIENDS', 'MILICENT', 'AND', 'ESTHER', 'EITHER', 'THERE', 'OR', 'HERE'] +533-131564-0001-769: hyp=['MISTER', 'AND', 'MISSUS', 'HAUGHTERSLEY', 'HAVE', 'BEEN', 'SEEING', 'IT', 'TO', 'GROW', 'BEFORE', 'NIGHT', 'AND', 'AS', 'MISSUS', 'HARGRAVE', 'IS', 'STILL', 'ABSENT', 'AND', 'THE', 'WEATHER', 'WAS', 'REMARKABLY', 'FINE', 'AND', 'REPAST', 'A', 'DAY', 'WITHOUT', 'SEEING', 'MY', 'TWO', 'FRIENDS', 'MILLSON', 'AND', 'ESTHER', 'EITHER', 'THERE', 'OR', 'HERE'] +533-131564-0002-770: ref=['NO', 'UNLESS', 'YOU', 'CAN', 'TELL', 'ME', 'WHEN', 'TO', 'EXPECT', 'HIM', 'HOME'] +533-131564-0002-770: hyp=['NO', 'UNLESS', 'YOU', 'CAN', 'TELL', 'ME', 'WHEN', 'TO', 'EXPECT', 'HIM', 'HOME'] +533-131564-0003-771: ref=['I', "CAN'T", 'YOU', "DON'T", 'WANT', 'HIM', 'DO', 'YOU'] +533-131564-0003-771: hyp=['I', "CAN'T", 'EVEN', 'ONE', 'WANTS', 'HIM', 'DO', 'YOU'] +533-131564-0004-772: ref=['IT', 'IS', 'A', 'RESOLUTION', 'YOU', 'OUGHT', 'TO', 'HAVE', 'FORMED', 'LONG', 'AGO'] +533-131564-0004-772: hyp=['IT', 'IS', 'A', 'RESOLUTION', 'YOU', 'ARE', 'REFORMED', 'LONG', 'AGO'] +533-131564-0005-773: ref=['WE', 'ALL', 'HAVE', 'A', 'BIT', 'OF', 'A', 'LIKING', 'FOR', 'HIM', 'AT', 'THE', 'BOTTOM', 'OF', 'OUR', 'HEARTS', 'THOUGH', 'WE', "CAN'T", 'RESPECT', 'HIM'] +533-131564-0005-773: hyp=['WE', 'ALL', 'HAVE', 'A', 'BIT', 'OF', 'A', 'LIKING', 'FOR', 'HIM', 'AT', 'THE', 'BOTTOM', 'OF', 'OUR', 'HEART', 'THOUGH', 'IT', "CAN'T", 'RESPECT', 'HIM'] +533-131564-0006-774: ref=['NO', "I'D", 'RATHER', 'BE', 'LIKE', 'MYSELF', 'BAD', 'AS', 'I', 'AM'] +533-131564-0006-774: hyp=['NO', "I'D", 'RATHER', 'BE', 'LIKE', 'MYSELF', 'WHETHER', 'I', 'AM'] +533-131564-0007-775: ref=['NEVER', 'MIND', 'MY', 'PLAIN', 'SPEAKING', 'SAID', 'I', 'IT', 'IS', 'FROM', 'THE', 'BEST', 'OF', 'MOTIVES'] +533-131564-0007-775: hyp=['NEVER', 'MIND', 'MY', 'PLAIN', 'SPEAKING', 'SAID', 'I', 'IT', 'IS', 'FROM', 'THE', 'BEST', 'OF', 'MOTIVES'] +533-131564-0008-776: ref=['BUT', 'TELL', 'ME', 'SHOULD', 'YOU', 'WISH', 'YOUR', 'SONS', 'TO', 'BE', 'LIKE', 'MISTER', 'HUNTINGDON', 'OR', 'EVEN', 'LIKE', 'YOURSELF'] +533-131564-0008-776: hyp=['BUT', 'TELL', 'ME', 'SHOULD', 'YOU', 'WISH', 'YOUR', 'SONS', 'TO', 'BE', 'LIKE', 'MISTER', 'HUNTINGDON', 'OR', 'EVEN', 'LIKE', 'YOURSELF'] +533-131564-0009-777: ref=['OH', 'NO', 'I', "COULDN'T", 'STAND', 'THAT'] +533-131564-0009-777: hyp=['OH', 'NO', 'I', "COULDN'T", 'STAND', 'THAT'] +533-131564-0010-778: ref=['FIRE', 'AND', 'FURY'] +533-131564-0010-778: hyp=['FORE', 'AND', 'FURY'] +533-131564-0011-779: ref=['NOW', "DON'T", 'BURST', 'INTO', 'A', 'TEMPEST', 'AT', 'THAT'] +533-131564-0011-779: hyp=['NOW', "DON'T", 'FORCE', 'INTO', 'A', 'TEMPEST', 'AT', 'THAT'] +533-131564-0012-780: ref=['BUT', 'HANG', 'IT', "THAT'S", 'NOT', 'MY', 'FAULT'] +533-131564-0012-780: hyp=['BUT', 'HANG', 'IT', "THAT'S", 'NOT', 'MY', 'FAULT'] +533-131564-0013-781: ref=['NOT', 'YEARS', 'FOR', "SHE'S", 'ONLY', 'FIVE', 'AND', 'TWENTY'] +533-131564-0013-781: hyp=['NOT', 'EARS', 'FOR', "SHE'S", 'ONLY', 'FIVE', 'AND', 'TWENTY'] +533-131564-0014-782: ref=['WHAT', 'WOULD', 'YOU', 'MAKE', 'OF', 'ME', 'AND', 'THE', 'CHILDREN', 'TO', 'BE', 'SURE', 'THAT', 'WORRY', 'HER', 'TO', 'DEATH', 'BETWEEN', 'THEM'] +533-131564-0014-782: hyp=['WHAT', 'DID', 'YOU', 'MAKE', 'OF', 'ME', 'AND', 'THE', 'CHILDREN', 'TO', 'BE', 'SURE', 'THAT', 'WERE', 'HE', 'HURT', 'DEATH', 'BETWEEN', 'THEM'] +533-131564-0015-783: ref=['I', 'KNOW', 'THEY', 'ARE', 'BLESS', 'THEM'] +533-131564-0015-783: hyp=['I', 'KNOW', 'THEY', 'ARE', 'BLESS', 'THEM'] +533-131564-0016-784: ref=['HE', 'FOLLOWED', 'ME', 'INTO', 'THE', 'LIBRARY'] +533-131564-0016-784: hyp=['IF', 'ALL', 'OF', 'ME', 'INTO', 'THE', 'LIBRARY'] +533-131564-0017-785: ref=['I', 'SOUGHT', 'OUT', 'AND', 'PUT', 'INTO', 'HIS', 'HANDS', 'TWO', 'OF', "MILICENT'S", 'LETTERS', 'ONE', 'DATED', 'FROM', 'LONDON', 'AND', 'WRITTEN', 'DURING', 'ONE', 'OF', 'HIS', 'WILDEST', 'SEASONS', 'OF', 'RECKLESS', 'DISSIPATION', 'THE', 'OTHER', 'IN', 'THE', 'COUNTRY', 'DURING', 'A', 'LUCID', 'INTERVAL'] +533-131564-0017-785: hyp=['I', 'SET', 'OUT', 'AND', 'PUT', 'INTO', 'HIS', 'HANDS', 'TWO', 'OF', "MILLSON'S", 'LETTERS', 'ONE', 'DID', 'IT', 'FROM', 'LONDON', 'AND', 'WRITTEN', 'DURING', 'ONE', 'OF', 'HIS', 'WALLACE', 'SEASONS', 'OF', 'RECKLESS', 'DISSIPATION', 'THE', 'OTHER', 'IN', 'THE', 'COUNTRY', 'DURING', 'A', 'LUCID', 'INTERVAL'] +533-131564-0018-786: ref=['THE', 'FORMER', 'WAS', 'FULL', 'OF', 'TROUBLE', 'AND', 'ANGUISH', 'NOT', 'ACCUSING', 'HIM', 'BUT', 'DEEPLY', 'REGRETTING', 'HIS', 'CONNECTION', 'WITH', 'HIS', 'PROFLIGATE', 'COMPANIONS', 'ABUSING', 'MISTER', 'GRIMSBY', 'AND', 'OTHERS', 'INSINUATING', 'BITTER', 'THINGS', 'AGAINST', 'MISTER', 'HUNTINGDON', 'AND', 'MOST', 'INGENIOUSLY', 'THROWING', 'THE', 'BLAME', 'OF', 'HER', "HUSBAND'S", 'MISCONDUCT', 'ON', 'TO', 'OTHER', "MEN'S", 'SHOULDERS'] +533-131564-0018-786: hyp=['THE', 'FORMER', 'WAS', 'FULL', 'OF', 'TROUBLE', 'AND', 'ANGUISH', 'NOT', 'ACCUSING', 'HIM', 'BUT', 'DEEPLY', 'REGRETTING', 'HIS', 'CONNECTION', 'WITH', 'HIS', 'PROFLIGATE', 'COMPANIONS', 'ABUSING', 'MISTER', 'GRIMSBY', 'AND', 'OTHERS', 'INSINUATING', 'BITTER', 'THINGS', 'AGAINST', 'MISTER', 'HUNTON', 'AND', 'MOST', 'INGENUOUSLY', 'THROWING', 'THE', 'BLAME', 'OF', 'HER', "HUSBAND'S", 'MISCONDUCT', 'ON', 'THE', 'OTHER', "MAN'S", 'SHOULDERS'] +533-131564-0019-787: ref=["I'VE", 'BEEN', 'A', 'CURSED', 'RASCAL', 'GOD', 'KNOWS', 'SAID', 'HE', 'AS', 'HE', 'GAVE', 'IT', 'A', 'HEARTY', 'SQUEEZE', 'BUT', 'YOU', 'SEE', 'IF', 'I', "DON'T", 'MAKE', 'AMENDS', 'FOR', 'IT', 'D', 'N', 'ME', 'IF', 'I', "DON'T"] +533-131564-0019-787: hyp=["I'VE", 'BEEN', 'ACCURSED', 'RASCAL', 'GOD', 'KNOWS', 'SAID', 'HE', 'AS', 'HE', 'GAVE', 'IT', 'AN', 'EARTHLY', 'SQUEEZE', 'BUT', 'YOU', 'SEE', 'IF', 'I', "DON'T", 'MAKE', 'AMENDS', 'FOR', 'IT', 'THEN', 'ME', 'IF', 'I', "DON'T"] +533-131564-0020-788: ref=['IF', 'YOU', 'INTEND', 'TO', 'REFORM', 'INVOKE', "GOD'S", 'BLESSING', 'HIS', 'MERCY', 'AND', 'HIS', 'AID', 'NOT', 'HIS', 'CURSE'] +533-131564-0020-788: hyp=['IF', 'YOU', 'INTEND', 'TO', 'REFORM', 'INVOKE', "GOD'S", 'BLESSING', 'IS', 'MERCY', 'IN', 'THIS', 'APE', 'NOT', 'DISCOURSE'] +533-131564-0021-789: ref=['GOD', 'HELP', 'ME', 'THEN', 'FOR', "I'M", 'SURE', 'I', 'NEED', 'IT'] +533-131564-0021-789: hyp=['GOD', 'HELP', 'ME', 'THEN', 'FOR', 'I', 'AM', 'SURE', 'I', 'NEED', 'IT'] +533-131564-0022-790: ref=["WHERE'S", 'MILICENT'] +533-131564-0022-790: hyp=["WHERE'S", 'MILLICENT'] +533-131564-0023-791: ref=['NAY', 'NOT', 'I', 'SAID', 'HE', 'TURNING', 'HER', 'ROUND', 'AND', 'PUSHING', 'HER', 'TOWARDS', 'ME'] +533-131564-0023-791: hyp=['NAY', 'NOT', 'I', 'SAID', 'HE', 'TURNING', 'ROUND', 'AND', 'PUSHING', 'TOWARDS', 'ME'] +533-131564-0024-792: ref=['MILICENT', 'FLEW', 'TO', 'THANK', 'ME', 'OVERFLOWING', 'WITH', 'GRATITUDE'] +533-131564-0024-792: hyp=['MILLISON', 'FLUD', 'TO', 'THANK', 'ME', 'OVERWHELMING', 'ITS', 'GRATITUDE'] +533-131564-0025-793: ref=['CRIED', 'SHE', 'I', "COULDN'T", 'HAVE', 'INFLUENCED', 'HIM', "I'M", 'SURE', 'BY', 'ANYTHING', 'THAT', 'I', 'COULD', 'HAVE', 'SAID'] +533-131564-0025-793: hyp=['CRIED', 'SHE', 'I', "COULDN'T", 'HAVE', 'EVILISED', 'HIM', "I'M", 'SURE', 'BY', 'ANYTHING', 'THAT', 'I', 'COULD', 'HAVE', 'SAID'] +533-131564-0026-794: ref=['YOU', 'NEVER', 'TRIED', 'ME', 'MILLY', 'SAID', 'HE'] +533-131564-0026-794: hyp=['YOU', 'NEVER', 'TRIED', 'ME', 'MERELY', 'SAID', 'HE'] +533-131564-0027-795: ref=['AFTER', 'THAT', 'THEY', 'WILL', 'REPAIR', 'TO', 'THEIR', 'COUNTRY', 'HOME'] +533-131564-0027-795: hyp=['AFTER', 'THAT', 'THEY', 'WILL', 'REPAIR', 'TO', 'THEIR', 'COUNTRY', 'HOME'] +5442-32873-0000-1365: ref=['CAPTAIN', 'LAKE', 'DID', 'NOT', 'LOOK', 'AT', 'ALL', 'LIKE', 'A', 'LONDON', 'DANDY', 'NOW'] +5442-32873-0000-1365: hyp=['CAPTAIN', 'LAKE', 'DID', 'NOT', 'LOOK', 'AT', 'ON', 'LIKE', 'A', 'LONDON', 'DANDY', 'NOW'] +5442-32873-0001-1366: ref=['THERE', 'WAS', 'A', 'VERY', 'NATURAL', 'SAVAGERY', 'AND', 'DEJECTION', 'THERE', 'AND', 'A', 'WILD', 'LEER', 'IN', 'HIS', 'YELLOW', 'EYES', 'RACHEL', 'SAT', 'DOWN'] +5442-32873-0001-1366: hyp=['THERE', 'WAS', 'A', 'VERY', 'NATURAL', 'SAVAGERY', 'AND', 'DEJECTION', 'THEN', 'AND', 'A', 'WIND', "URINA'S", 'YELLOW', 'EYES', 'RACHEL', 'SAT', 'DOWN'] +5442-32873-0002-1367: ref=['A', 'SLAVE', 'ONLY', 'THINK', 'A', 'SLAVE'] +5442-32873-0002-1367: hyp=['A', 'SLAVE', 'ONLY', 'THINK', 'A', 'SLAVE'] +5442-32873-0003-1368: ref=['OH', 'FRIGHTFUL', 'FRIGHTFUL', 'IS', 'IT', 'A', 'DREAM'] +5442-32873-0003-1368: hyp=['OH', 'FRIGHTFUL', 'FRIGHTFUL', 'IS', 'IT', 'A', 'DREAM'] +5442-32873-0004-1369: ref=['OH', 'FRIGHTFUL', 'FRIGHTFUL'] +5442-32873-0004-1369: hyp=['ALL', 'FRIGHTFUL', 'CRIED', 'FAWN'] +5442-32873-0005-1370: ref=['STANLEY', 'STANLEY', 'IT', 'WOULD', 'BE', 'MERCY', 'TO', 'KILL', 'ME', 'SHE', 'BROKE', 'OUT', 'AGAIN'] +5442-32873-0005-1370: hyp=['STANLEY', 'STANLEY', 'IT', 'WOULD', 'BE', 'MERCY', 'TO', 'KILL', 'ME', 'SHE', 'BROKE', 'HER', 'AGAIN'] +5442-32873-0006-1371: ref=['BRIGHT', 'AND', 'NATTY', 'WERE', 'THE', 'CHINTZ', 'CURTAINS', 'AND', 'THE', 'LITTLE', 'TOILET', 'SET', 'OUT', 'NOT', 'INELEGANTLY', 'AND', 'HER', 'PET', 'PIPING', 'GOLDFINCH', 'ASLEEP', 'ON', 'HIS', 'PERCH', 'WITH', 'HIS', 'BIT', 'OF', 'SUGAR', 'BETWEEN', 'THE', 'WIRES', 'OF', 'HIS', 'CAGE', 'HER', 'PILLOW', 'SO', 'WHITE', 'AND', 'UNPRESSED', 'WITH', 'ITS', 'LITTLE', 'EDGING', 'OF', 'LACE'] +5442-32873-0006-1371: hyp=['BRIGHT', 'AND', 'NATTY', 'WITH', 'A', 'CHIN', 'CURTAINS', 'AND', 'THE', 'LITTLE', 'TOILET', 'SET', 'OUT', 'NOT', 'IN', 'ELEGANTLY', 'AND', 'HER', 'BED', 'PIPING', 'GOLDFINCH', 'ASLEEP', 'ON', 'HIS', 'PERCH', 'WITH', 'HIS', 'BIT', 'OF', 'SUGAR', 'BETWEEN', 'THE', 'WISE', 'OF', 'HIS', 'CAGE', 'HER', 'PILLOW', 'SO', 'WHITE', 'AND', 'UNPRESSED', 'WITH', 'ITS', 'LITTLE', 'EDGING', 'OF', 'LACE'] +5442-32873-0007-1372: ref=['WHEN', 'HE', 'CAME', 'BACK', 'TO', 'THE', 'DRAWING', 'ROOM', 'A', 'TOILET', 'BOTTLE', 'OF', 'EAU', 'DE', 'COLOGNE', 'IN', 'HIS', 'HAND', 'WITH', 'HER', 'LACE', 'HANDKERCHIEF', 'HE', 'BATHED', 'HER', 'TEMPLES', 'AND', 'FOREHEAD'] +5442-32873-0007-1372: hyp=['WHEN', 'HE', 'CAME', 'BACK', 'TO', 'THE', 'DRAWING', 'ROOM', 'I', 'TOLD', 'IT', 'WHAT', 'HE', 'OF', 'OVERLUME', 'IN', 'HIS', 'HAND', 'WITH', 'HER', 'LACE', 'HANDKERCHIEF', 'HE', 'BATHED', 'HER', 'TEMPLE', 'AND', 'FOREHEAD'] +5442-32873-0008-1373: ref=['THERE', 'WAS', 'NOTHING', 'VERY', 'BROTHERLY', 'IN', 'HIS', 'LOOK', 'AS', 'HE', 'PEERED', 'INTO', 'HER', 'PALE', 'SHARP', 'FEATURES', 'DURING', 'THE', 'PROCESS'] +5442-32873-0008-1373: hyp=['THERE', 'WAS', 'NOTHING', 'VERY', 'BROTHERLY', 'IN', 'HIS', 'LOOK', 'AS', 'HE', 'PEERED', 'INTO', 'A', 'PALE', 'SHARP', 'FEATURES', 'DURING', 'THE', 'PROCESS'] +5442-32873-0009-1374: ref=['THERE', "DON'T", 'MIND', 'ME', 'SHE', 'SAID', 'SHARPLY', 'AND', 'GETTING', 'UP', 'SHE', 'LOOKED', 'DOWN', 'AT', 'HER', 'DRESS', 'AND', 'THIN', 'SHOES', 'AND', 'SEEMING', 'TO', 'RECOLLECT', 'HERSELF', 'SHE', 'TOOK', 'THE', 'CANDLE', 'HE', 'HAD', 'JUST', 'SET', 'DOWN', 'AND', 'WENT', 'SWIFTLY', 'TO', 'HER', 'ROOM'] +5442-32873-0009-1374: hyp=['THERE', "DON'T", 'MIND', 'ME', 'SHE', 'SAID', 'SHARPLY', 'AND', 'GETTING', 'UP', 'SHE', 'LOOKED', 'DOWN', 'AT', 'HER', 'DRESS', 'AND', 'THIN', 'SHOES', 'AND', 'SEEMING', 'TO', 'RECOLLECT', 'HERSELF', 'SHE', 'TOOK', 'THE', 'CANDLE', 'HE', 'HAD', 'JUST', 'SAT', 'DOWN', 'AND', 'WHEN', 'SWIFTLY', 'TO', 'HER', 'ROOM'] +5442-32873-0010-1375: ref=['AND', 'SHE', 'THREW', 'BACK', 'HER', 'VEIL', 'AND', 'GOING', 'HURRIEDLY', 'TO', 'THE', 'TOILET', 'MECHANICALLY', 'SURVEYED', 'HERSELF', 'IN', 'THE', 'GLASS'] +5442-32873-0010-1375: hyp=['AND', 'SHE', 'THREW', 'BACK', 'HER', 'VEAL', 'AND', 'GOING', 'HURRIEDLY', 'TO', 'THE', 'DOLIGHT', 'MECHANICALLY', 'SURVEYED', 'HERSELF', 'FROM', 'THE', 'GLANCE'] +5442-32873-0011-1376: ref=['RACHEL', 'LAKE', 'RACHEL', 'LAKE', 'WHAT', 'ARE', 'YOU', 'NOW'] +5442-32873-0011-1376: hyp=['ORIGINALLY', 'LATER', 'LAKE', 'WHAT', 'ARE', 'YOU', 'NOW'] +5442-32873-0012-1377: ref=["I'LL", 'STAY', 'HERE', 'THAT', 'IS', 'IN', 'THE', 'DRAWING', 'ROOM', 'SHE', 'ANSWERED', 'AND', 'THE', 'FACE', 'WAS', 'WITHDRAWN'] +5442-32873-0012-1377: hyp=["I'LL", 'STAY', 'HERE', 'THAT', 'IS', 'IN', 'THE', 'DRAWING', 'ROOM', 'SHE', 'ANSWERED', 'AND', 'THE', 'FACE', 'WAS', 'WITHDRAWN'] +5442-32873-0013-1378: ref=['HE', 'SLACKENED', 'HIS', 'PACE', 'AND', 'TAPPED', 'SHARPLY', 'AT', 'THE', 'LITTLE', 'WINDOW', 'OF', 'THAT', 'MODEST', 'POST', 'OFFICE', 'AT', 'WHICH', 'THE', 'YOUNG', 'LADIES', 'IN', 'THE', 'PONY', 'CARRIAGE', 'HAD', 'PULLED', 'UP', 'THE', 'DAY', 'BEFORE', 'AND', 'WITHIN', 'WHICH', 'LUKE', 'WAGGOT', 'WAS', 'WONT', 'TO', 'SLEEP', 'IN', 'A', 'SORT', 'OF', 'WOODEN', 'BOX', 'THAT', 'FOLDED', 'UP', 'AND', 'APPEARED', 'TO', 'BE', 'A', 'CHEST', 'OF', 'DRAWERS', 'ALL', 'DAY'] +5442-32873-0013-1378: hyp=['HIS', 'CLACKENED', 'HIS', 'FACE', 'AND', 'TAP', 'SHARPLY', 'AT', 'THE', 'LITTLE', 'WINDOW', 'OF', 'THE', 'MODEST', 'POST', 'OFFICE', 'AT', 'WHICH', 'THE', 'YOUNG', 'LADIES', 'IN', 'THE', 'PONY', 'CARRIAGE', 'HAD', 'PULLED', 'UP', 'THE', 'DAY', 'BEFORE', 'AND', 'WITHIN', 'WHICH', 'LUKE', 'WAGGET', 'WAS', 'WONT', 'TO', 'SLEEP', 'IN', 'A', 'SORT', 'OF', 'WOODEN', 'BOX', 'THAT', 'FOLDED', 'UP', 'AND', 'APPEARED', 'TO', 'BE', 'A', 'CHEST', 'OF', 'DRAWERS', 'ALL', 'DAY'] +5442-32873-0014-1379: ref=['LUKE', 'TOOK', 'CARE', 'OF', 'MISTER', "LARKIN'S", 'DOGS', 'AND', 'GROOMED', 'MISTER', "WYLDER'S", 'HORSE', 'AND', 'CLEANED', 'UP', 'HIS', 'DOG', 'CART', 'FOR', 'MARK', 'BEING', 'CLOSE', 'ABOUT', 'MONEY', 'AND', 'FINDING', 'THAT', 'THE', 'THING', 'WAS', 'TO', 'BE', 'DONE', 'MORE', 'CHEAPLY', 'THAT', 'WAY', 'PUT', 'UP', 'HIS', 'HORSE', 'AND', 'DOG', 'CART', 'IN', 'THE', 'POST', 'OFFICE', 'PREMISES', 'AND', 'SO', 'EVADED', 'THE', 'LIVERY', 'CHARGES', 'OF', 'THE', 'BRANDON', 'ARMS'] +5442-32873-0014-1379: hyp=['LOOK', 'TO', 'CARE', 'OF', 'MISTER', 'LARKINS', 'DOG', 'AND', 'GROOMED', 'MISTER', "WILDER'S", 'HORSE', 'AND', 'CLEANED', 'UP', 'HIS', 'DOOR', 'CART', 'FOR', 'MARK', 'BEING', 'CLOSE', 'ABOUT', 'MONEY', 'AND', 'FINDING', 'THAT', 'THE', 'THING', 'WAS', 'TO', 'BE', 'DONE', 'MORE', 'CHEAPLY', 'THAT', 'WAY', 'PUT', 'UP', 'HIS', 'HORSE', 'AND', 'DOG', 'CART', 'IN', 'THE', 'POST', 'OF', 'HIS', 'PREMISES', 'AND', 'SO', 'EVADED', 'THE', 'LIVERY', 'CHARGES', 'OF', 'THE', 'BRANDON', 'ARMS'] +5442-32873-0015-1380: ref=['BUT', 'LUKE', 'WAS', 'NOT', 'THERE', 'AND', 'CAPTAIN', 'LAKE', 'RECOLLECTING', 'HIS', 'HABITS', 'AND', 'HIS', 'HAUNT', 'HURRIED', 'ON', 'TO', 'THE', 'SILVER', 'LION', 'WHICH', 'HAS', 'ITS', 'GABLE', 'TOWARDS', 'THE', 'COMMON', 'ONLY', 'ABOUT', 'A', 'HUNDRED', 'STEPS', 'AWAY', 'FOR', 'DISTANCES', 'ARE', 'NOT', 'GREAT', 'IN', 'GYLINGDEN'] +5442-32873-0015-1380: hyp=['THE', 'LUKE', 'WAS', 'KNOWN', 'THERE', 'AND', 'CAPTAIN', 'LAKE', 'RECOLLECTING', 'HIS', 'HABITS', 'AND', 'HIS', 'HAUNT', 'HURRIED', 'ON', 'TO', 'THE', 'SILVER', 'LION', 'WHICH', 'HAS', 'ITS', 'CABLE', 'TOWARDS', 'A', 'COMMON', 'ONLY', 'ABOUT', 'A', 'HUNDRED', 'STEPS', 'AWAY', 'FOR', 'DISTANCES', 'ARE', 'NOT', 'GREAT', 'IN', 'GYLINGDEN'] +5442-32873-0016-1381: ref=['HERE', 'WERE', 'THE', 'FLOW', 'OF', 'SOUL', 'AND', 'OF', 'STOUT', 'LONG', 'PIPES', 'LONG', 'YARNS', 'AND', 'TOLERABLY', 'LONG', 'CREDITS', 'AND', 'THE', 'HUMBLE', 'SCAPEGRACES', 'OF', 'THE', 'TOWN', 'RESORTED', 'THITHER', 'FOR', 'THE', 'PLEASURES', 'OF', 'A', 'CLUB', 'LIFE', 'AND', 'OFTEN', 'REVELLED', 'DEEP', 'INTO', 'THE', 'SMALL', 'HOURS', 'OF', 'THE', 'MORNING'] +5442-32873-0016-1381: hyp=['HERE', 'WERE', 'THE', 'FLOW', 'OF', 'SOUL', 'AND', 'OF', 'STOUT', 'LONG', 'PIPES', 'LONG', 'YARNS', 'AND', 'TOLERABLY', 'LONG', 'CREDITS', 'AND', 'THE', 'HUMBLE', 'SKIPPED', 'BRACES', 'OF', 'THE', 'TOWN', 'RESORTED', 'THITHER', 'FOR', 'THE', 'PLEASURES', 'OF', 'A', 'CLUB', 'LIFE', 'AND', 'OFTEN', 'REVELLED', 'DEEP', 'INTO', 'THE', 'SMALL', 'HOURS', 'OF', 'THE', 'MORNING'] +5442-32873-0017-1382: ref=['LOSE', 'NO', 'TIME', 'AND', "I'LL", 'GIVE', 'YOU', 'HALF', 'A', 'CROWN'] +5442-32873-0017-1382: hyp=['LOSE', 'NO', 'TIME', 'WHEN', "I'LL", 'GIVE', 'YOU', 'HALF', 'A', 'CROWN'] +5442-32873-0018-1383: ref=['LUKE', 'STUCK', 'ON', 'HIS', 'GREASY', 'WIDEAWAKE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'MORE', 'THE', 'DOG', 'CART', 'WAS', 'TRUNDLED', 'OUT', 'INTO', 'THE', 'LANE', 'AND', 'THE', 'HORSE', 'HARNESSED', 'WENT', 'BETWEEN', 'THE', 'SHAFTS', 'WITH', 'THAT', 'WONDERFUL', 'CHEERFULNESS', 'WITH', 'WHICH', 'THEY', 'BEAR', 'TO', 'BE', 'CALLED', 'UP', 'UNDER', 'STARTLING', 'CIRCUMSTANCES', 'AT', 'UNSEASONABLE', 'HOURS'] +5442-32873-0018-1383: hyp=['LUKE', 'STUCK', 'ON', 'HIS', 'GREASY', 'WIDE', 'AWAKE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'MORE', 'THE', 'DOOR', 'CART', 'WAS', 'TUMBLED', 'OUT', 'INTO', 'THE', 'LANE', 'AND', 'THE', 'HORSE', 'HARNESSED', 'WENT', 'BETWEEN', 'THE', 'SHAFTS', 'WITH', 'THAT', 'WONDERFUL', 'CHEERFULNESS', 'WITH', 'WHICH', 'THEY', 'BEARED', 'TO', 'BE', 'CALLED', 'UP', 'AND', 'THE', 'STARTLING', 'CIRCUMSTANCES', 'THAT', 'UNSEASONABLE', 'HOURS'] +5442-32873-0019-1384: ref=['IF', 'I', 'THOUGHT', "YOU'D", 'FAIL', 'ME', 'NOW', 'TAMAR', 'I', 'SHOULD', 'NEVER', 'COME', 'BACK', 'GOOD', 'NIGHT', 'TAMAR'] +5442-32873-0019-1384: hyp=['IF', 'I', 'THOUGHT', "YOU'D", 'FILL', 'ME', 'NOW', 'TO', 'MORROW', 'I', 'SHOULD', 'NEVER', 'COME', 'BACK', 'GOOD', 'NIGHT', 'TO', 'MORROW'] +5442-41168-0000-1385: ref=['THE', 'ACT', 'SAID', 'THAT', 'IN', 'CASE', 'OF', 'DIFFERENCE', 'OF', 'OPINION', 'THERE', 'MUST', 'BE', 'A', 'BALLOT'] +5442-41168-0000-1385: hyp=['THE', 'ACT', 'SAID', 'THAT', 'IN', 'CASE', 'OF', 'DIFFERENCE', 'OF', 'OPINION', 'THERE', 'MUST', 'BE', 'A', 'BALLOT'] +5442-41168-0001-1386: ref=['HE', 'WENT', 'UP', 'TO', 'THE', 'TABLE', 'AND', 'STRIKING', 'IT', 'WITH', 'HIS', 'FINGER', 'RING', 'HE', 'SHOUTED', 'LOUDLY', 'A', 'BALLOT'] +5442-41168-0001-1386: hyp=['HE', 'WENT', 'UP', 'TO', 'THE', 'TABLE', 'AND', 'STRIKING', 'IT', 'WITH', 'HIS', 'FINGERING', 'HE', 'SHOUTED', 'LOUDLY', 'A', 'BALLOT'] +5442-41168-0002-1387: ref=['HE', 'WAS', 'SHOUTING', 'FOR', 'THE', 'VERY', 'COURSE', 'SERGEY', 'IVANOVITCH', 'HAD', 'PROPOSED', 'BUT', 'IT', 'WAS', 'EVIDENT', 'THAT', 'HE', 'HATED', 'HIM', 'AND', 'ALL', 'HIS', 'PARTY', 'AND', 'THIS', 'FEELING', 'OF', 'HATRED', 'SPREAD', 'THROUGH', 'THE', 'WHOLE', 'PARTY', 'AND', 'ROUSED', 'IN', 'OPPOSITION', 'TO', 'IT', 'THE', 'SAME', 'VINDICTIVENESS', 'THOUGH', 'IN', 'A', 'MORE', 'SEEMLY', 'FORM', 'ON', 'THE', 'OTHER', 'SIDE'] +5442-41168-0002-1387: hyp=['HE', 'WAS', 'SHOUTING', 'FOR', 'THE', 'VERY', 'COARSE', 'SURGY', 'IVANOVITCH', 'HAD', 'PROPOSED', 'BUT', 'IT', 'WAS', 'EVIDENT', 'THAT', 'HE', 'HATED', 'HIM', 'AND', 'ALL', 'HIS', 'PARTY', 'AND', 'THIS', 'FEELING', 'OF', 'HATRED', 'SPREAD', 'THROUGH', 'THE', 'WHOLE', 'PARTY', 'AND', 'RALPHED', 'IN', 'OUR', 'POSITION', 'TO', 'IT', 'THE', 'SAME', 'VINDICTIVENESS', 'THOUGH', 'IN', 'A', 'MORE', 'SEEMLY', 'FORM', 'ON', 'THE', 'OTHER', 'SIDE'] +5442-41168-0003-1388: ref=['SHOUTS', 'WERE', 'RAISED', 'AND', 'FOR', 'A', 'MOMENT', 'ALL', 'WAS', 'CONFUSION', 'SO', 'THAT', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'HAD', 'TO', 'CALL', 'FOR', 'ORDER', 'A', 'BALLOT'] +5442-41168-0003-1388: hyp=['SHOUTS', 'WERE', 'RAISED', 'AND', 'FOR', 'A', 'MOMENT', 'ALL', 'WAS', 'CONFUSION', 'SO', 'THAT', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'HAD', 'TO', 'CALL', 'FOR', 'OTTO', 'A', 'BALLOT'] +5442-41168-0004-1389: ref=['WE', 'SHED', 'OUR', 'BLOOD', 'FOR', 'OUR', 'COUNTRY'] +5442-41168-0004-1389: hyp=['WE', 'SHUT', 'OUR', 'BLOOD', 'FOR', 'OUR', 'COUNTRY'] +5442-41168-0005-1390: ref=['THE', 'CONFIDENCE', 'OF', 'THE', 'MONARCH', 'NO', 'CHECKING', 'THE', 'ACCOUNTS', 'OF', 'THE', 'MARSHAL', "HE'S", 'NOT', 'A', 'CASHIER', 'BUT', "THAT'S", 'NOT', 'THE', 'POINT'] +5442-41168-0005-1390: hyp=['THE', 'CONFIDENCE', 'OF', 'THE', 'MONARCH', 'NO', 'COOKING', 'THE', 'ACCOUNTS', 'OF', 'THE', 'MARTIAN', 'IS', 'NOT', 'A', 'CASHIER', 'BUT', "THAT'S", 'NOT', 'THE', 'POINT'] +5442-41168-0006-1391: ref=['VOTES', 'PLEASE', 'BEASTLY'] +5442-41168-0006-1391: hyp=['VOTES', 'PLEASE', 'PIECE'] +5442-41168-0007-1392: ref=['THEY', 'EXPRESSED', 'THE', 'MOST', 'IMPLACABLE', 'HATRED'] +5442-41168-0007-1392: hyp=['THEY', 'EXPRESSED', 'THE', 'MOST', 'IMPLACABLE', 'HATRED'] +5442-41168-0008-1393: ref=['LEVIN', 'DID', 'NOT', 'IN', 'THE', 'LEAST', 'UNDERSTAND', 'WHAT', 'WAS', 'THE', 'MATTER', 'AND', 'HE', 'MARVELED', 'AT', 'THE', 'PASSION', 'WITH', 'WHICH', 'IT', 'WAS', 'DISPUTED', 'WHETHER', 'OR', 'NOT', 'THE', 'DECISION', 'ABOUT', 'FLEROV', 'SHOULD', 'BE', 'PUT', 'TO', 'THE', 'VOTE'] +5442-41168-0008-1393: hyp=['LEVIN', 'DID', 'NOT', 'IN', 'THE', 'LEAST', 'UNDERSTAND', 'WHAT', 'WAS', 'THE', 'MATTER', 'AND', 'HE', 'MARVELLED', 'AT', 'THE', 'PASSION', 'WITH', 'WHICH', 'IT', 'WAS', 'DISPUTED', 'WHETHER', 'OR', 'NOT', 'THE', 'DECISION', 'ABOUT', 'FLAREOV', 'SHOULD', 'BE', 'PUT', 'TO', 'THE', 'VOTE'] +5442-41168-0009-1394: ref=['HE', 'FORGOT', 'AS', 'SERGEY', 'IVANOVITCH', 'EXPLAINED', 'TO', 'HIM', 'AFTERWARDS', 'THIS', 'SYLLOGISM', 'THAT', 'IT', 'WAS', 'NECESSARY', 'FOR', 'THE', 'PUBLIC', 'GOOD', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'THAT', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'IT', 'WAS', 'NECESSARY', 'TO', 'HAVE', 'A', 'MAJORITY', 'OF', 'VOTES', 'THAT', 'TO', 'GET', 'A', 'MAJORITY', 'OF', 'VOTES', 'IT', 'WAS', 'NECESSARY', 'TO', 'SECURE', "FLEROV'S", 'RIGHT', 'TO', 'VOTE', 'THAT', 'TO', 'SECURE', 'THE', 'RECOGNITION', 'OF', "FLEROV'S", 'RIGHT', 'TO', 'VOTE', 'THEY', 'MUST', 'DECIDE', 'ON', 'THE', 'INTERPRETATION', 'TO', 'BE', 'PUT', 'ON', 'THE', 'ACT'] +5442-41168-0009-1394: hyp=['HE', 'FORGOT', 'AS', 'SO', 'GEVINOVITCH', 'EXPLAINED', 'TO', 'HIM', 'AFTERWARDS', 'THIS', 'SILLIGIOUS', 'EM', 'THAT', 'IT', 'WAS', 'NECESSARY', 'FOR', 'THE', 'PUBLIC', 'GOOD', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'THAT', 'TO', 'GET', 'HER', 'TO', 'THE', 'MARSHAL', 'IT', 'WAS', 'NECESSARY', 'TO', 'HAVE', 'A', 'MAJORITY', 'OF', 'VOTES', 'THAT', 'TOGETHER', 'MAJORITY', 'OF', 'VOTES', 'IT', 'WAS', 'NECESSARY', 'TO', 'SECURE', "FLIROV'S", 'RIGHT', 'TO', 'VOTE', 'THAT', 'TO', 'SECURED', 'THE', 'RECOGNITION', 'OF', "FLIROV'S", 'RIGHT', 'TO', 'VOTE', 'THEY', 'MUST', 'DECIDE', 'ON', 'THE', 'INTERPRETATION', 'TO', 'BE', 'PUT', 'ON', 'THE', 'ACT'] +5442-41168-0010-1395: ref=['BUT', 'LEVIN', 'FORGOT', 'ALL', 'THAT', 'AND', 'IT', 'WAS', 'PAINFUL', 'TO', 'HIM', 'TO', 'SEE', 'ALL', 'THESE', 'EXCELLENT', 'PERSONS', 'FOR', 'WHOM', 'HE', 'HAD', 'A', 'RESPECT', 'IN', 'SUCH', 'AN', 'UNPLEASANT', 'AND', 'VICIOUS', 'STATE', 'OF', 'EXCITEMENT'] +5442-41168-0010-1395: hyp=['BUT', 'LEVIN', 'FORGOT', 'ALL', 'THAT', 'AND', 'IT', 'WAS', 'PAINFUL', 'TO', 'HIM', 'TO', 'SEE', 'ALL', 'THESE', 'EXCELLENT', 'PERSONS', 'FOR', 'WHOM', 'HE', 'HAD', 'A', 'RESPECT', 'IN', 'SUCH', 'AN', 'UNPLEASANT', 'AND', 'VICIOUS', 'STATE', 'OF', 'EXCITEMENT'] +5442-41168-0011-1396: ref=['TO', 'ESCAPE', 'FROM', 'THIS', 'PAINFUL', 'FEELING', 'HE', 'WENT', 'AWAY', 'INTO', 'THE', 'OTHER', 'ROOM', 'WHERE', 'THERE', 'WAS', 'NOBODY', 'EXCEPT', 'THE', 'WAITERS', 'AT', 'THE', 'REFRESHMENT', 'BAR'] +5442-41168-0011-1396: hyp=['TO', 'US', 'GIVE', 'FROM', 'THIS', 'PAINFUL', 'FEELING', 'HE', 'WENT', 'AWAY', 'INTO', 'THE', 'OTHER', 'ROOM', 'WHERE', 'THERE', 'WAS', 'NOBODY', 'EXCEPT', 'THE', 'WAITERS', 'AT', 'THE', 'FRESHMENT', 'BAR'] +5442-41168-0012-1397: ref=['HE', 'PARTICULARLY', 'LIKED', 'THE', 'WAY', 'ONE', 'GRAY', 'WHISKERED', 'WAITER', 'WHO', 'SHOWED', 'HIS', 'SCORN', 'FOR', 'THE', 'OTHER', 'YOUNGER', 'ONES', 'AND', 'WAS', 'JEERED', 'AT', 'BY', 'THEM', 'WAS', 'TEACHING', 'THEM', 'HOW', 'TO', 'FOLD', 'UP', 'NAPKINS', 'PROPERLY'] +5442-41168-0012-1397: hyp=['HE', 'PARTICULARLY', 'LIKED', 'THE', 'WAY', 'ONE', 'GREY', 'WAS', 'GOOD', 'WAITER', 'WHO', 'SHOWED', 'US', 'GONE', 'FOR', 'THE', 'OTHER', 'YOUNGER', 'ONES', 'AND', 'WAS', 'JOURED', 'AT', 'BY', 'THEM', 'WAS', 'TEACHING', 'THEM', 'HOW', 'TO', 'FOLD', 'UP', 'NAPKINS', 'PROPERLY'] +5442-41168-0013-1398: ref=['LEVIN', 'ADVANCED', 'BUT', 'UTTERLY', 'FORGETTING', 'WHAT', 'HE', 'WAS', 'TO', 'DO', 'AND', 'MUCH', 'EMBARRASSED', 'HE', 'TURNED', 'TO', 'SERGEY', 'IVANOVITCH', 'WITH', 'THE', 'QUESTION', 'WHERE', 'AM', 'I', 'TO', 'PUT', 'IT'] +5442-41168-0013-1398: hyp=['LEVIN', 'ADVANCED', 'BUT', 'UTTERLY', 'FORGETTING', 'WHAT', 'HE', 'WAS', 'TO', 'DO', 'AND', 'MUCH', 'EMBARRASSED', 'HE', 'TURNED', 'TO', 'SERGEY', 'IVANOVITCH', 'WITH', 'THE', 'QUESTION', 'WHERE', 'AM', 'I', 'TO', 'PUT', 'IT'] +5442-41168-0014-1399: ref=['SERGEY', 'IVANOVITCH', 'FROWNED'] +5442-41168-0014-1399: hyp=['SO', 'YOU', 'IVANOVITCH', 'GROUND'] +5442-41168-0015-1400: ref=['THAT', 'IS', 'A', 'MATTER', 'FOR', 'EACH', "MAN'S", 'OWN', 'DECISION', 'HE', 'SAID', 'SEVERELY'] +5442-41168-0015-1400: hyp=['THAT', 'IS', 'A', 'MATTER', 'FOR', 'EACH', "MAN'S", 'OWN', 'DECISION', 'HE', 'SAID', 'SEVERELY'] +5442-41168-0016-1401: ref=['HAVING', 'PUT', 'IT', 'IN', 'HE', 'RECOLLECTED', 'THAT', 'HE', 'OUGHT', 'TO', 'HAVE', 'THRUST', 'HIS', 'LEFT', 'HAND', 'TOO', 'AND', 'SO', 'HE', 'THRUST', 'IT', 'IN', 'THOUGH', 'TOO', 'LATE', 'AND', 'STILL', 'MORE', 'OVERCOME', 'WITH', 'CONFUSION', 'HE', 'BEAT', 'A', 'HASTY', 'RETREAT', 'INTO', 'THE', 'BACKGROUND'] +5442-41168-0016-1401: hyp=['HAVING', 'PUT', 'IT', 'IN', 'HE', 'RECOLLECTED', 'THAT', 'HE', 'OUGHT', 'HAVE', 'THRUST', 'HIS', 'LEFT', 'HAND', 'TOO', 'AND', 'SO', 'HE', 'THRUST', 'IT', 'THOUGH', 'TOO', 'LATE', 'AND', 'STILL', 'MORE', 'OVERCOME', 'WITH', 'CONFUSION', 'HE', 'BEAT', 'A', 'HASTY', 'RETREAT', 'INTO', 'THE', 'BACKGROUND'] +5442-41168-0017-1402: ref=['A', 'HUNDRED', 'AND', 'TWENTY', 'SIX', 'FOR', 'ADMISSION', 'NINETY', 'EIGHT', 'AGAINST'] +5442-41168-0017-1402: hyp=['A', 'HUNDRED', 'AND', 'TWENTY', 'SIX', 'FOR', 'ADMISSION', 'NINETY', 'EIGHT', 'AGAINST'] +5442-41168-0018-1403: ref=['SANG', 'OUT', 'THE', 'VOICE', 'OF', 'THE', 'SECRETARY', 'WHO', 'COULD', 'NOT', 'PRONOUNCE', 'THE', 'LETTER', 'R'] +5442-41168-0018-1403: hyp=['SANG', 'ALL', 'THE', 'VOICE', 'OF', 'THE', 'SECRETARY', 'WHO', 'COULD', 'NOT', 'PRONOUNCE', 'A', 'LETTER', 'R'] +5442-41168-0019-1404: ref=['THEN', 'THERE', 'WAS', 'A', 'LAUGH', 'A', 'BUTTON', 'AND', 'TWO', 'NUTS', 'WERE', 'FOUND', 'IN', 'THE', 'BOX'] +5442-41168-0019-1404: hyp=['THEN', 'THERE', 'WAS', 'A', 'LAUGH', 'AT', 'BOTTOM', 'AND', 'TWO', 'KNOTS', 'WERE', 'FOUND', 'IN', 'THE', 'BOX'] +5442-41168-0020-1405: ref=['BUT', 'THE', 'OLD', 'PARTY', 'DID', 'NOT', 'CONSIDER', 'THEMSELVES', 'CONQUERED'] +5442-41168-0020-1405: hyp=['BUT', 'THE', 'OLD', 'PARTY', 'DID', 'NOT', 'CONSIDER', 'THEMSELVES', 'CONQUERED'] +5442-41168-0021-1406: ref=['IN', 'REPLY', 'SNETKOV', 'SPOKE', 'OF', 'THE', 'TRUST', 'THE', 'NOBLEMEN', 'OF', 'THE', 'PROVINCE', 'HAD', 'PLACED', 'IN', 'HIM', 'THE', 'AFFECTION', 'THEY', 'HAD', 'SHOWN', 'HIM', 'WHICH', 'HE', 'DID', 'NOT', 'DESERVE', 'AS', 'HIS', 'ONLY', 'MERIT', 'HAD', 'BEEN', 'HIS', 'ATTACHMENT', 'TO', 'THE', 'NOBILITY', 'TO', 'WHOM', 'HE', 'HAD', 'DEVOTED', 'TWELVE', 'YEARS', 'OF', 'SERVICE'] +5442-41168-0021-1406: hyp=['IN', 'THE', 'PLACE', 'NED', 'GOFF', 'SPOKE', 'OF', 'THE', 'TRUST', 'AND', 'NOBLEMEN', 'OF', 'THE', 'PROVINCE', 'HAD', 'PLACED', 'IN', 'HIM', 'THE', 'AFFECTANT', 'THEY', 'HAD', 'SHOWN', 'HIM', 'WHICH', 'HE', 'DID', 'NOT', 'DESERVE', 'AS', 'HIS', 'ONLY', 'MERIT', 'HAD', 'BEEN', 'HIS', 'ATTACHMENT', 'TO', 'THE', 'NOBILITY', 'TO', 'WHOM', 'HE', 'HAD', 'DEVOTED', 'TWELVE', 'YEARS', 'OF', 'SERVICE'] +5442-41168-0022-1407: ref=['THIS', 'EXPRESSION', 'IN', 'THE', "MARSHAL'S", 'FACE', 'WAS', 'PARTICULARLY', 'TOUCHING', 'TO', 'LEVIN', 'BECAUSE', 'ONLY', 'THE', 'DAY', 'BEFORE', 'HE', 'HAD', 'BEEN', 'AT', 'HIS', 'HOUSE', 'ABOUT', 'HIS', 'TRUSTEE', 'BUSINESS', 'AND', 'HAD', 'SEEN', 'HIM', 'IN', 'ALL', 'HIS', 'GRANDEUR', 'A', 'KIND', 'HEARTED', 'FATHERLY', 'MAN'] +5442-41168-0022-1407: hyp=['THIS', 'EXPRESSION', 'IN', 'THE', "MARSHAL'S", 'FACE', 'WAS', 'PARTICULARLY', 'TOUCHING', 'TO', 'LEVIN', 'BECAUSE', 'ONLY', 'THE', 'DAY', 'BEFORE', 'HE', 'HAD', 'BEEN', 'AT', 'HIS', 'HOUSE', 'ABOUT', 'HIS', 'TRUSTY', 'BUSINESS', 'AND', 'HAD', 'SEEN', 'HIM', 'IN', 'ALL', 'HIS', 'GRANDEUR', 'A', 'KIND', 'HEARTED', 'FATHERLY', 'MAN'] +5442-41168-0023-1408: ref=['IF', 'THERE', 'ARE', 'MEN', 'YOUNGER', 'AND', 'MORE', 'DESERVING', 'THAN', 'I', 'LET', 'THEM', 'SERVE'] +5442-41168-0023-1408: hyp=['IF', 'THERE', 'ARE', 'MEN', 'YOUNGER', 'AND', 'MORE', 'DESERVING', 'THAN', 'I', 'LET', 'THEM', 'SERVE'] +5442-41168-0024-1409: ref=['AND', 'THE', 'MARSHAL', 'DISAPPEARED', 'THROUGH', 'A', 'SIDE', 'DOOR'] +5442-41168-0024-1409: hyp=['AND', 'THE', 'MARSHAL', 'DISAPPEARED', 'THROUGH', 'A', 'SIDE', 'DOOR'] +5442-41168-0025-1410: ref=['THEY', 'WERE', 'TO', 'PROCEED', 'IMMEDIATELY', 'TO', 'THE', 'ELECTION'] +5442-41168-0025-1410: hyp=['THERE', 'WERE', 'TO', 'PROCEED', 'IMMEDIATELY', 'TO', 'THE', 'ELECTION'] +5442-41168-0026-1411: ref=['TWO', 'NOBLE', 'GENTLEMEN', 'WHO', 'HAD', 'A', 'WEAKNESS', 'FOR', 'STRONG', 'DRINK', 'HAD', 'BEEN', 'MADE', 'DRUNK', 'BY', 'THE', 'PARTISANS', 'OF', 'SNETKOV', 'AND', 'A', 'THIRD', 'HAD', 'BEEN', 'ROBBED', 'OF', 'HIS', 'UNIFORM'] +5442-41168-0026-1411: hyp=['DO', 'NOBLE', 'GENTLEMEN', 'WHO', 'HAD', 'A', 'WEAKNESS', 'FOR', 'STRONG', 'DRINK', 'HAD', 'BEEN', 'MADE', 'DRUNK', 'BY', 'THE', 'PARTISANS', 'OF', 'SNACKOV', 'AND', 'THE', 'THIRD', 'HAD', 'BEEN', 'ROBBED', 'OF', 'HIS', 'UNIFORM'] +5442-41168-0027-1412: ref=['ON', 'LEARNING', 'THIS', 'THE', 'NEW', 'PARTY', 'HAD', 'MADE', 'HASTE', 'DURING', 'THE', 'DISPUTE', 'ABOUT', 'FLEROV', 'TO', 'SEND', 'SOME', 'OF', 'THEIR', 'MEN', 'IN', 'A', 'SLEDGE', 'TO', 'CLOTHE', 'THE', 'STRIPPED', 'GENTLEMAN', 'AND', 'TO', 'BRING', 'ALONG', 'ONE', 'OF', 'THE', 'INTOXICATED', 'TO', 'THE', 'MEETING'] +5442-41168-0027-1412: hyp=['ON', 'LEARNING', 'THIS', 'THE', 'NEW', 'PARTY', 'HAD', 'MADE', 'HASTE', 'DURING', 'THE', 'DISPUTABLE', 'FLIROFF', 'TO', 'SEND', 'SOME', 'OF', 'THEIR', 'MEN', 'IN', 'A', 'SLEDGE', 'TO', 'CLOTHE', 'THE', 'STRIPPED', 'GENTLEMEN', 'AND', 'TO', 'BRING', 'ALONG', 'ONE', 'OF', 'THE', 'INTOXICATED', 'TO', 'THE', 'MEETING'] +5442-41169-0000-1413: ref=['LEVIN', 'DID', 'NOT', 'CARE', 'TO', 'EAT', 'AND', 'HE', 'WAS', 'NOT', 'SMOKING', 'HE', 'DID', 'NOT', 'WANT', 'TO', 'JOIN', 'HIS', 'OWN', 'FRIENDS', 'THAT', 'IS', 'SERGEY', 'IVANOVITCH', 'STEPAN', 'ARKADYEVITCH', 'SVIAZHSKY', 'AND', 'THE', 'REST', 'BECAUSE', 'VRONSKY', 'IN', 'HIS', "EQUERRY'S", 'UNIFORM', 'WAS', 'STANDING', 'WITH', 'THEM', 'IN', 'EAGER', 'CONVERSATION'] +5442-41169-0000-1413: hyp=['LEVIN', 'DID', 'NOT', 'CARE', 'TO', 'EAT', 'AND', 'HE', 'WAS', 'NOT', 'SMOKING', 'HE', 'DID', 'NOT', 'WANT', 'TO', 'JOIN', 'HIS', 'OWN', 'FRIENDS', 'THAT', 'IS', 'SOJI', 'IVANOVITCH', 'STEPAN', 'ARKADYEVITCH', 'SVIAZHSKY', 'AND', 'THE', 'REST', 'BECAUSE', 'VRONSKY', 'IN', 'AN', "EQUEROR'S", 'UNIFORM', 'WAS', 'STANDING', 'WITH', 'THEM', 'IN', 'EAGER', 'CONVERSATION'] +5442-41169-0001-1414: ref=['HE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'SAT', 'DOWN', 'SCANNING', 'THE', 'GROUPS', 'AND', 'LISTENING', 'TO', 'WHAT', 'WAS', 'BEING', 'SAID', 'AROUND', 'HIM'] +5442-41169-0001-1414: hyp=['HE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'SAT', 'DOWN', 'SCANNING', 'THE', 'GROUPS', 'AND', 'LISTENING', 'TO', 'WHAT', 'WAS', 'BEING', 'SAID', 'AROUND', 'HIM'] +5442-41169-0002-1415: ref=["HE'S", 'SUCH', 'A', 'BLACKGUARD'] +5442-41169-0002-1415: hyp=['IS', 'SUCH', 'A', 'BLANKARD'] +5442-41169-0003-1416: ref=['I', 'HAVE', 'TOLD', 'HIM', 'SO', 'BUT', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'ONLY', 'THINK', 'OF', 'IT'] +5442-41169-0003-1416: hyp=['I', 'HAVE', 'TOLD', 'HIM', 'SO', 'BUT', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'ONLY', 'THINK', 'OF', 'IT'] +5442-41169-0004-1417: ref=['THESE', 'PERSONS', 'WERE', 'UNMISTAKABLY', 'SEEKING', 'A', 'PLACE', 'WHERE', 'THEY', 'COULD', 'TALK', 'WITHOUT', 'BEING', 'OVERHEARD'] +5442-41169-0004-1417: hyp=['THESE', 'PERSONS', 'WERE', 'UNMISTAKABLY', 'SEEKING', 'A', 'PLACE', 'WHERE', 'THEY', 'COULD', 'TALK', 'WITHOUT', 'BEING', 'OVERHEARD'] +5442-41169-0005-1418: ref=['SHALL', 'WE', 'GO', 'ON', 'YOUR', 'EXCELLENCY', 'FINE', 'CHAMPAGNE'] +5442-41169-0005-1418: hyp=['SHALL', 'WE', 'GO', 'ON', 'YOUR', 'EXCELLENCY', 'FINE', 'CHAMPAGNE'] +5442-41169-0006-1419: ref=['LAST', 'YEAR', 'AT', 'OUR', 'DISTRICT', 'MARSHAL', 'NIKOLAY', "IVANOVITCH'S"] +5442-41169-0006-1419: hyp=['LOST', 'YOUR', 'OTHER', 'DISTRICT', 'MARTIAL', 'NIKOLA', "IVANOVITCH'S"] +5442-41169-0007-1420: ref=['OH', 'STILL', 'JUST', 'THE', 'SAME', 'ALWAYS', 'AT', 'A', 'LOSS', 'THE', 'LANDOWNER', 'ANSWERED', 'WITH', 'A', 'RESIGNED', 'SMILE', 'BUT', 'WITH', 'AN', 'EXPRESSION', 'OF', 'SERENITY', 'AND', 'CONVICTION', 'THAT', 'SO', 'IT', 'MUST', 'BE'] +5442-41169-0007-1420: hyp=['OH', 'STILL', 'JUST', 'THE', 'SAME', 'ALWAYS', 'AT', 'A', 'LOSS', 'THE', 'LANDOWNER', 'ANSWERED', 'WITH', 'A', 'RESIGNED', 'SMILE', 'BUT', 'WITH', 'AN', 'EXPRESSION', 'OF', 'SERENITY', 'AND', 'CONVICTION', 'THAT', 'SO', 'IT', 'MUST', 'BE'] +5442-41169-0008-1421: ref=['WHY', 'WHAT', 'IS', 'THERE', 'TO', 'UNDERSTAND'] +5442-41169-0008-1421: hyp=['WHY', 'WHAT', 'IS', 'THAT', 'TO', 'UNDERSTAND'] +5442-41169-0009-1422: ref=["THERE'S", 'NO', 'MEANING', 'IN', 'IT', 'AT', 'ALL'] +5442-41169-0009-1422: hyp=['THERE', 'IS', 'NO', 'MEANING', 'IN', 'IT', 'AT', 'ALL'] +5442-41169-0010-1423: ref=['THEN', 'TOO', 'ONE', 'MUST', 'KEEP', 'UP', 'CONNECTIONS'] +5442-41169-0010-1423: hyp=['THEN', 'TOO', 'ONE', 'MUST', 'KEEP', 'UP', 'CONNECTIONS'] +5442-41169-0011-1424: ref=["IT'S", 'A', 'MORAL', 'OBLIGATION', 'OF', 'A', 'SORT'] +5442-41169-0011-1424: hyp=["IT'S", 'A', 'MORTAL', 'OBLIGATION', 'OF', 'A', 'SORT'] +5442-41169-0012-1425: ref=['AND', 'THEN', 'TO', 'TELL', 'THE', 'TRUTH', "THERE'S", "ONE'S", 'OWN', 'INTERESTS'] +5442-41169-0012-1425: hyp=['AND', 'THEN', 'TO', 'TELL', 'THE', 'TRUTH', "THERE'S", "ONE'S", 'OWN', 'INTEREST'] +5442-41169-0013-1426: ref=["THEY'RE", 'PROPRIETORS', 'OF', 'A', 'SORT', 'BUT', "WE'RE", 'THE', 'LANDOWNERS'] +5442-41169-0013-1426: hyp=['THEIR', 'PROPRIETORS', 'OF', 'A', 'SORT', 'BUT', 'WE', 'ARE', 'THE', 'LANDOWNERS'] +5442-41169-0014-1427: ref=['THAT', 'IT', 'MAY', 'BE', 'BUT', 'STILL', 'IT', 'OUGHT', 'TO', 'BE', 'TREATED', 'A', 'LITTLE', 'MORE', 'RESPECTFULLY'] +5442-41169-0014-1427: hyp=['THAT', 'IT', 'MAY', 'BE', 'BUT', 'STILL', 'IT', 'OUGHT', 'TO', 'BE', 'TREATED', 'A', 'LITTLE', 'MORE', 'RESPECTFULLY'] +5442-41169-0015-1428: ref=['IF', "WE'RE", 'LAYING', 'OUT', 'A', 'GARDEN', 'PLANNING', 'ONE', 'BEFORE', 'THE', 'HOUSE', 'YOU', 'KNOW', 'AND', 'THERE', "YOU'VE", 'A', 'TREE', "THAT'S", 'STOOD', 'FOR', 'CENTURIES', 'IN', 'THE', 'VERY', 'SPOT', 'OLD', 'AND', 'GNARLED', 'IT', 'MAY', 'BE', 'AND', 'YET', 'YOU', "DON'T", 'CUT', 'DOWN', 'THE', 'OLD', 'FELLOW', 'TO', 'MAKE', 'ROOM', 'FOR', 'THE', 'FLOWERBEDS', 'BUT', 'LAY', 'OUT', 'YOUR', 'BEDS', 'SO', 'AS', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'THE', 'TREE'] +5442-41169-0015-1428: hyp=['IF', 'WE', 'ARE', 'LAYING', 'OUT', 'A', 'GARDEN', 'PLANNING', 'ONE', 'BEFORE', 'THE', 'HOUSE', 'YOU', 'KNOW', 'AND', 'THERE', 'YOU', 'HAVE', 'A', 'TREE', 'THAT', 'STOOD', 'IN', 'CENTURIES', 'IN', 'THE', 'VERY', 'SPOT', 'OLD', 'AND', 'KNOLLED', 'IT', 'MAY', 'BE', 'AND', 'YET', 'YOU', "DON'T", 'CUT', 'DOWN', 'THE', 'OLD', 'FELLOW', 'TO', 'MAKE', 'ROOM', 'FOR', 'THE', 'FLOWER', 'BEDS', 'BUT', 'LAY', 'OUT', 'YOUR', 'BEDS', 'SO', 'AS', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'THE', 'TREE'] +5442-41169-0016-1429: ref=['WELL', 'AND', 'HOW', 'IS', 'YOUR', 'LAND', 'DOING'] +5442-41169-0016-1429: hyp=['WELL', 'AND', 'HOW', 'IS', 'YOUR', 'LAND', 'DOING'] +5442-41169-0017-1430: ref=['BUT', "ONE'S", 'WORK', 'IS', 'THROWN', 'IN', 'FOR', 'NOTHING'] +5442-41169-0017-1430: hyp=['BUT', "ONE'S", 'WORK', 'IS', 'THROWN', 'IN', 'FOR', 'NOTHING'] +5442-41169-0018-1431: ref=['OH', 'WELL', 'ONE', 'DOES', 'IT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +5442-41169-0018-1431: hyp=['OH', 'WELL', 'ONE', 'DOES', 'IT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +5442-41169-0019-1432: ref=['AND', "WHAT'S", 'MORE', 'THE', 'LANDOWNER', 'WENT', 'ON', 'LEANING', 'HIS', 'ELBOWS', 'ON', 'THE', 'WINDOW', 'AND', 'CHATTING', 'ON', 'MY', 'SON', 'I', 'MUST', 'TELL', 'YOU', 'HAS', 'NO', 'TASTE', 'FOR', 'IT'] +5442-41169-0019-1432: hyp=['AND', 'ONCE', 'MORE', 'THE', 'LANDOWNER', 'WENT', 'ON', 'LEANING', 'HIS', 'ELBOWS', 'ON', 'THE', 'WINDOW', 'AND', 'CHATTING', 'ON', 'MY', 'SON', 'I', 'MUST', 'TELL', 'YOU', 'HAS', 'NO', 'TASTE', 'FOR', 'IT'] +5442-41169-0020-1433: ref=['SO', "THERE'LL", 'BE', 'NO', 'ONE', 'TO', 'KEEP', 'IT', 'UP', 'AND', 'YET', 'ONE', 'DOES', 'IT'] +5442-41169-0020-1433: hyp=['SO', 'THERE', 'WILL', 'BE', 'NO', 'ONE', 'TO', 'KEEP', 'IT', 'UP', 'AND', 'YET', 'ONE', 'DOES', 'IT'] +5442-41169-0021-1434: ref=['WE', 'WALKED', 'ABOUT', 'THE', 'FIELDS', 'AND', 'THE', 'GARDEN', 'NO', 'SAID', 'HE', 'STEPAN', 'VASSILIEVITCH', "EVERYTHING'S", 'WELL', 'LOOKED', 'AFTER', 'BUT', 'YOUR', "GARDEN'S", 'NEGLECTED'] +5442-41169-0021-1434: hyp=['WE', 'WALKED', 'ABOUT', 'THE', 'FIELDS', 'ON', 'THE', 'GARDEN', 'NO', 'SAID', 'HE', 'STEP', 'ON', 'MISS', 'LEVITCH', "EVERYTHING'S", 'WELL', 'LOOKED', 'AFTER', 'BUT', 'YOUR', 'GARDENS', 'NEGLECTED'] +5442-41169-0022-1435: ref=['TO', 'MY', 'THINKING', "I'D", 'CUT', 'DOWN', 'THAT', 'LIME', 'TREE'] +5442-41169-0022-1435: hyp=['TO', 'MY', 'THINKING', "I'VE", 'CUT', 'DOWN', 'THE', 'LIMETERY'] +5442-41169-0023-1436: ref=['HERE', "YOU'VE", 'THOUSANDS', 'OF', 'LIMES', 'AND', 'EACH', 'WOULD', 'MAKE', 'TWO', 'GOOD', 'BUNDLES', 'OF', 'BARK'] +5442-41169-0023-1436: hyp=['HERE', 'YOUR', 'THOUSANDS', 'OF', 'LIMES', 'AND', 'EACH', 'WOULD', 'MAKE', 'TOO', 'GOOD', 'BUNDLES', 'OF', 'BULK'] +5442-41169-0024-1437: ref=["YOU'RE", 'MARRIED', "I'VE", 'HEARD', 'SAID', 'THE', 'LANDOWNER'] +5442-41169-0024-1437: hyp=["YOU'RE", 'MARRIED', 'I', 'HEARD', 'SAID', 'THE', 'LANDOWNER'] +5442-41169-0025-1438: ref=['YES', "IT'S", 'RATHER', 'STRANGE', 'HE', 'WENT', 'ON'] +5442-41169-0025-1438: hyp=['YES', "IT'S", 'ALL', 'THE', 'STRANGE', 'HE', 'WENT', 'ON'] +5442-41169-0026-1439: ref=['THE', 'LANDOWNER', 'CHUCKLED', 'UNDER', 'HIS', 'WHITE', 'MUSTACHES'] +5442-41169-0026-1439: hyp=['THE', 'LANDLORD', 'CHLED', 'UNDER', 'HIS', 'WHITE', 'MOUSTACHES'] +5442-41169-0027-1440: ref=['WHY', "DON'T", 'WE', 'CUT', 'DOWN', 'OUR', 'PARKS', 'FOR', 'TIMBER'] +5442-41169-0027-1440: hyp=['WHY', "DON'T", 'WE', 'GOT', 'DOWN', 'OUR', 'BOX', 'FOR', 'TIMBOO'] +5442-41169-0028-1441: ref=['SAID', 'LEVIN', 'RETURNING', 'TO', 'A', 'THOUGHT', 'THAT', 'HAD', 'STRUCK', 'HIM'] +5442-41169-0028-1441: hyp=['SAID', 'LEVIN', 'RETURNING', 'TO', 'A', 'THOUGHT', 'THAT', 'HAD', 'STRUCK', 'HIM'] +5442-41169-0029-1442: ref=["THERE'S", 'A', 'CLASS', 'INSTINCT', 'TOO', 'OF', 'WHAT', 'ONE', 'OUGHT', 'AND', "OUGHTN'T", 'TO', 'DO'] +5442-41169-0029-1442: hyp=["THERE'S", 'A', 'CLASS', 'INSTINCT', 'TOO', 'OF', 'WHAT', 'ONE', 'OUGHT', 'AND', 'OUGHT', 'NOT', 'TO', 'DO'] +5442-41169-0030-1443: ref=["THERE'S", 'THE', 'PEASANTS', 'TOO', 'I', 'WONDER', 'AT', 'THEM', 'SOMETIMES', 'ANY', 'GOOD', 'PEASANT', 'TRIES', 'TO', 'TAKE', 'ALL', 'THE', 'LAND', 'HE', 'CAN'] +5442-41169-0030-1443: hyp=["THERE'S", 'THE', 'PEASANTS', 'TOO', 'I', 'WONDER', 'AT', 'THEM', 'SOMETIMES', 'ANY', 'GOOD', 'PEASANT', 'TRIES', 'TO', 'TAKE', 'ALL', 'THE', 'LAND', 'HE', 'CAN'] +5442-41169-0031-1444: ref=['WITHOUT', 'A', 'RETURN', 'TOO', 'AT', 'A', 'SIMPLE', 'LOSS'] +5442-41169-0031-1444: hyp=['WITHOUT', 'A', 'RETURN', 'TO', 'ADD', 'A', 'SIMPLE', 'LAWS'] +5484-24317-0000-571: ref=['WHEN', 'HE', 'CAME', 'FROM', 'THE', 'BATH', 'PROCLUS', 'VISITED', 'HIM', 'AGAIN'] +5484-24317-0000-571: hyp=['WHEN', 'HE', 'CAME', 'FROM', 'THE', 'BATH', 'PROCKLESS', 'VISITED', 'HIM', 'AGAIN'] +5484-24317-0001-572: ref=['BUT', 'HERMON', 'WAS', 'NOT', 'IN', 'THE', 'MOOD', 'TO', 'SHARE', 'A', 'JOYOUS', 'REVEL', 'AND', 'HE', 'FRANKLY', 'SAID', 'SO', 'ALTHOUGH', 'IMMEDIATELY', 'AFTER', 'HIS', 'RETURN', 'HE', 'HAD', 'ACCEPTED', 'THE', 'INVITATION', 'TO', 'THE', 'FESTIVAL', 'WHICH', 'THE', 'WHOLE', 'FELLOWSHIP', 'OF', 'ARTISTS', 'WOULD', 'GIVE', 'THE', 'FOLLOWING', 'DAY', 'IN', 'HONOUR', 'OF', 'THE', 'SEVENTIETH', 'BIRTHDAY', 'OF', 'THE', 'OLD', 'SCULPTOR', 'EUPHRANOR'] +5484-24317-0001-572: hyp=['BUT', 'HERMAN', 'WAS', 'NOT', 'IN', 'THE', 'MOOD', 'TO', 'SHARE', 'A', 'JOYOUS', 'REVEL', 'AND', 'HE', 'FRANKLY', 'SAID', 'SO', 'ALTHOUGH', 'IMMEDIATELY', 'AFTER', 'HIS', 'RETURN', 'HE', 'HAD', 'ACCEPTED', 'THE', 'INVITATION', 'TO', 'THE', 'FESTIVAL', 'WHICH', 'THE', 'WHOLE', 'FELLOWSHIP', 'OF', 'ARTISTS', 'WOULD', 'GIVE', 'THE', 'FOLLOWING', 'DAY', 'AND', 'HONOR', 'OF', 'THE', 'SEVENTEENTH', 'BIRTHDAY', 'OF', 'THE', 'OLD', 'SCULPTOR', 'EUPHRANER'] +5484-24317-0002-573: ref=['SHE', 'WOULD', 'APPEAR', 'HERSELF', 'AT', 'DESSERT', 'AND', 'THE', 'BANQUET', 'MUST', 'THEREFORE', 'BEGIN', 'AT', 'AN', 'UNUSUALLY', 'EARLY', 'HOUR'] +5484-24317-0002-573: hyp=['SHE', 'WOULD', 'APPEAR', 'HERSELF', 'A', 'DESSERT', 'AND', 'THE', 'BANQUET', 'MUST', 'THEREFORE', 'BEGIN', 'AT', 'AN', 'UNUSUALLY', 'EARLY', 'HOUR'] +5484-24317-0003-574: ref=['SO', 'THE', 'ARTIST', 'FOUND', 'HIMSELF', 'OBLIGED', 'TO', 'RELINQUISH', 'HIS', 'OPPOSITION'] +5484-24317-0003-574: hyp=['SO', 'THE', 'ARTIST', 'FOUND', 'HIMSELF', 'OBLIGED', 'TO', 'RELINQUISH', 'HIS', 'OPPOSITION'] +5484-24317-0004-575: ref=['THE', 'BANQUET', 'WAS', 'TO', 'BEGIN', 'IN', 'A', 'FEW', 'HOURS', 'YET', 'HE', 'COULD', 'NOT', 'LET', 'THE', 'DAY', 'PASS', 'WITHOUT', 'SEEING', 'DAPHNE', 'AND', 'TELLING', 'HER', 'THE', 'WORDS', 'OF', 'THE', 'ORACLE'] +5484-24317-0004-575: hyp=['THE', 'BANQUET', 'WAS', 'TO', 'BEGIN', 'IN', 'A', 'FEW', 'HOURS', 'YET', 'HE', 'COULD', 'NOT', 'LET', 'THE', 'DAY', 'PASS', 'WITHOUT', 'SEEING', 'DAPHNE', 'AND', 'TELLING', 'HER', 'THE', 'WORDS', 'OF', 'THE', 'ORACLE'] +5484-24317-0005-576: ref=['HE', 'LONGED', 'WITH', 'ARDENT', 'YEARNING', 'FOR', 'THE', 'SOUND', 'OF', 'HER', 'VOICE', 'AND', 'STILL', 'MORE', 'TO', 'UNBURDEN', 'HIS', 'SORELY', 'TROUBLED', 'SOUL', 'TO', 'HER'] +5484-24317-0005-576: hyp=['HE', 'LONGED', 'WITH', 'ARDENT', 'YEARNING', 'FOR', 'THE', 'SOUND', 'OF', 'HER', 'VOICE', 'AND', 'STILL', 'MORE', 'TO', 'UNBURDEN', 'HIS', 'SORELY', 'TROUBLED', 'SOUL', 'TO', 'HER'] +5484-24317-0006-577: ref=['SINCE', 'HIS', 'RETURN', 'FROM', 'THE', 'ORACLE', 'THE', 'FEAR', 'THAT', 'THE', 'RESCUED', 'DEMETER', 'MIGHT', 'YET', 'BE', 'THE', 'WORK', 'OF', 'MYRTILUS', 'HAD', 'AGAIN', 'MASTERED', 'HIM'] +5484-24317-0006-577: hyp=['SINCE', 'HIS', 'RETURN', 'FROM', 'THE', 'ORACLE', 'THE', 'FEAR', 'THAT', 'THE', 'RESCUE', 'DEMETER', 'MIGHT', 'YET', 'BE', 'THE', 'WORK', 'OF', 'MERTULIST', 'HAD', 'AGAIN', 'MASTERED', 'HIM'] +5484-24317-0007-578: ref=['THE', 'APPROVAL', 'AS', 'WELL', 'AS', 'THE', 'DOUBTS', 'WHICH', 'IT', 'AROUSED', 'IN', 'OTHERS', 'STRENGTHENED', 'HIS', 'OPINION', 'ALTHOUGH', 'EVEN', 'NOW', 'HE', 'COULD', 'NOT', 'SUCCEED', 'IN', 'BRINGING', 'IT', 'INTO', 'HARMONY', 'WITH', 'THE', 'FACTS'] +5484-24317-0007-578: hyp=['THE', 'APPROVAL', 'AS', 'WELL', 'AS', 'A', 'DOUBT', 'WHICH', 'HAD', 'ARISED', 'IN', 'OTHERS', 'STRENGTHENED', 'HIS', 'OPINION', 'ALTHOUGH', 'EVEN', 'NOW', 'HE', 'COULD', 'NOT', 'SUCCEED', 'IN', 'BRINGING', 'IT', 'INTO', 'HARMONY', 'WITH', 'THE', 'FACTS'] +5484-24317-0008-579: ref=['THEN', 'HE', 'WENT', 'DIRECTLY', 'TO', 'THE', 'NEIGHBOURING', 'PALACE', 'THE', 'QUEEN', 'MIGHT', 'HAVE', 'APPEARED', 'ALREADY', 'AND', 'IT', 'WOULD', 'NOT', 'DO', 'TO', 'KEEP', 'HER', 'WAITING'] +5484-24317-0008-579: hyp=['THEN', 'HE', 'WENT', 'DIRECTLY', 'TO', 'THE', 'NEIGHBORING', 'PALACE', 'THE', 'QUEEN', 'MIGHT', 'HAVE', 'APPEARED', 'ALREADY', 'AND', 'IT', 'WOULD', 'NOT', 'DO', 'TO', 'KEEP', 'HER', 'WAITING'] +5484-24317-0009-580: ref=['HITHERTO', 'THE', 'MERCHANT', 'HAD', 'BEEN', 'INDUCED', 'IT', 'IS', 'TRUE', 'TO', 'ADVANCE', 'LARGE', 'SUMS', 'OF', 'MONEY', 'TO', 'THE', 'QUEEN', 'BUT', 'THE', 'LOYAL', 'DEVOTION', 'WHICH', 'HE', 'SHOWED', 'TO', 'HER', 'ROYAL', 'HUSBAND', 'HAD', 'RENDERED', 'IT', 'IMPOSSIBLE', 'TO', 'GIVE', 'HIM', 'EVEN', 'A', 'HINT', 'OF', 'THE', 'CONSPIRACY'] +5484-24317-0009-580: hyp=['HITHERTO', 'THE', 'MERCHANT', 'HAD', 'BEEN', 'INDUCED', 'IT', 'IS', 'TRUE', 'TO', 'ADVANCE', 'LARGE', 'SUMS', 'OF', 'MONEY', 'TO', 'THE', 'QUEEN', 'BUT', 'THE', 'LOYAL', 'DEVOTION', 'WHICH', 'HE', 'SHOWED', 'TO', 'HER', 'ROYAL', 'HUSBAND', 'HAD', 'RENDERED', 'AN', 'IMPOSSIBLE', 'TO', 'GIVE', 'HIM', 'EVEN', 'A', 'HINT', 'OF', 'THE', 'CONSPIRACY'] +5484-24317-0010-581: ref=['WHEN', 'HERMON', 'ENTERED', 'THE', 'RESIDENCE', 'OF', 'THE', 'GRAMMATEUS', 'IN', 'THE', 'PALACE', 'THE', 'GUESTS', 'HAD', 'ALREADY', 'ASSEMBLED'] +5484-24317-0010-581: hyp=['WHEN', 'HERMANN', 'ENTERED', 'THE', 'RESIDENCE', 'OF', 'THE', 'GRAMMATIUS', 'IN', 'THE', 'PALACE', 'THEY', 'GUESTS', 'HAD', 'ALREADY', 'ASSEMBLED'] +5484-24317-0011-582: ref=['THE', 'PLACE', 'BY', "HERMON'S", 'SIDE', 'WHICH', 'ALTHEA', 'HAD', 'CHOSEN', 'FOR', 'HERSELF', 'WOULD', 'THEN', 'BE', 'GIVEN', 'UP', 'TO', 'ARSINOE'] +5484-24317-0011-582: hyp=['THEY', 'PLACED', 'BY', "HARMONT'S", 'SIDE', 'WHICH', 'ALTHIE', 'HAD', 'CHOSEN', 'FOR', 'HERSELF', 'WOULD', 'THEN', 'BE', 'GIVEN', 'UP', 'TO', 'ARSENO'] +5484-24317-0012-583: ref=['TRUE', 'AN', 'INTERESTING', 'CONVERSATION', 'STILL', 'HAD', 'POWER', 'TO', 'CHARM', 'HIM', 'BUT', 'OFTEN', 'DURING', 'ITS', 'CONTINUANCE', 'THE', 'FULL', 'CONSCIOUSNESS', 'OF', 'HIS', 'MISFORTUNE', 'FORCED', 'ITSELF', 'UPON', 'HIS', 'MIND', 'FOR', 'THE', 'MAJORITY', 'OF', 'THE', 'SUBJECTS', 'DISCUSSED', 'BY', 'THE', 'ARTISTS', 'CAME', 'TO', 'THEM', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'SIGHT', 'AND', 'REFERRED', 'TO', 'NEW', 'CREATIONS', 'OF', 'ARCHITECTURE', 'SCULPTURE', 'AND', 'PAINTING', 'FROM', 'WHOSE', 'ENJOYMENT', 'HIS', 'BLINDNESS', 'DEBARRED', 'HIM'] +5484-24317-0012-583: hyp=['TRUE', 'AN', 'INTERESTING', 'CONVERSATION', 'STILL', 'HAD', 'POWER', 'TO', 'CHARM', 'HIM', 'BUT', 'OFTEN', 'DURING', 'ITS', 'CONTINUANCE', 'THE', 'FULL', 'CONSCIOUSNESS', 'OF', 'HIS', 'MISFORTUNE', 'FORCED', 'ITSELF', 'UPON', 'HIS', 'MIND', 'FOR', 'THE', 'MAJORITY', 'OF', 'THE', 'SUBJECTS', 'DISCUSSED', 'BY', 'THE', 'ARTISTS', 'CAME', 'TO', 'THEM', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'SIGHT', 'AND', 'REFERRED', 'TO', 'NEW', 'CREATIONS', 'OF', 'ARCHITECTURE', 'SCULPTURE', 'AND', 'PAINTING', 'FROM', 'WHOSE', 'ENJOYMENT', 'HIS', 'BLINDNESS', 'DEBARRED', 'HIM'] +5484-24317-0013-584: ref=['A', 'STRANGER', 'OUT', 'OF', 'HIS', 'OWN', 'SPHERE', 'HE', 'FELT', 'CHILLED', 'AMONG', 'THESE', 'CLOSELY', 'UNITED', 'MEN', 'AND', 'WOMEN', 'TO', 'WHOM', 'NO', 'TIE', 'BOUND', 'HIM', 'SAVE', 'THE', 'PRESENCE', 'OF', 'THE', 'SAME', 'HOST'] +5484-24317-0013-584: hyp=['A', 'STRANGER', 'OUT', 'OF', 'HIS', 'OWN', 'SPHERE', 'HE', 'FELL', 'CHILLED', 'AMONG', 'THESE', 'CLOSELY', 'UNITED', 'MEN', 'AND', 'WOMEN', 'TO', 'WHOM', 'NO', 'TIE', 'BOUND', 'HIM', 'SAVE', 'THE', 'PRESENCE', 'OF', 'THE', 'SAME', 'HOST'] +5484-24317-0014-585: ref=['CRATES', 'HAD', 'REALLY', 'BEEN', 'INVITED', 'IN', 'ORDER', 'TO', 'WIN', 'HIM', 'OVER', 'TO', 'THE', "QUEEN'S", 'CAUSE', 'BUT', 'CHARMING', 'FAIR', 'HAIRED', 'NICO', 'HAD', 'BEEN', 'COMMISSIONED', 'BY', 'THE', 'CONSPIRATORS', 'TO', 'PERSUADE', 'HIM', 'TO', 'SING', "ARSINOE'S", 'PRAISES', 'AMONG', 'HIS', 'PROFESSIONAL', 'ASSOCIATES'] +5484-24317-0014-585: hyp=['CREATES', 'HAD', 'REALLY', 'BEEN', 'INVITED', 'IN', 'ORDER', 'TO', 'WIN', 'HIM', 'OVER', 'TO', 'THE', "QUEEN'S", 'CAUSE', 'BUT', 'CHARMING', 'FAIR', 'HAIRED', 'NACO', 'HAD', 'BEEN', 'COMMISSIONED', 'BY', 'THE', 'CONSPIRATORS', 'TO', 'PERSUADE', 'HIM', 'TO', 'SING', "ARSENO'S", 'PRAISES', 'AMONG', 'HIS', 'PROFESSIONAL', 'ASSOCIATES'] +5484-24317-0015-586: ref=['HIS', 'SON', 'HAD', 'BEEN', 'THIS', 'ROYAL', "DAME'S", 'FIRST', 'HUSBAND', 'AND', 'SHE', 'HAD', 'DESERTED', 'HIM', 'TO', 'MARRY', 'LYSIMACHUS', 'THE', 'AGED', 'KING', 'OF', 'THRACE'] +5484-24317-0015-586: hyp=['HIS', 'SON', 'HAD', 'BEEN', 'THE', 'ROYAL', 'JAMES', 'FIRST', 'HUSBAND', 'AND', 'SHE', 'HAD', 'DESERTED', 'HIM', 'TO', 'MARRY', 'LISAKETH', 'THE', 'AGED', 'KING', 'OF', 'THRACE'] +5484-24317-0016-587: ref=['THE', "KING'S", 'SISTER', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'CRIED', 'HERMON', 'INCREDULOUSLY'] +5484-24317-0016-587: hyp=['THE', "KING'S", 'SISTER', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'CRIED', 'HARMON', 'INCREDULOUSLY'] +5484-24317-0017-588: ref=['WE', 'WOMEN', 'ARE', 'ONLY', 'AS', 'OLD', 'AS', 'WE', 'LOOK', 'AND', 'THE', 'LEECHES', 'AND', 'TIRING', 'WOMEN', 'OF', 'THIS', 'BEAUTY', 'OF', 'FORTY', 'PRACTISE', 'ARTS', 'WHICH', 'GIVE', 'HER', 'THE', 'APPEARANCE', 'OF', 'TWENTY', 'FIVE', 'YET', 'PERHAPS', 'THE', 'KING', 'VALUES', 'HER', 'INTELLECT', 'MORE', 'THAN', 'HER', 'PERSON', 'AND', 'THE', 'WISDOM', 'OF', 'A', 'HUNDRED', 'SERPENTS', 'IS', 'CERTAINLY', 'UNITED', 'IN', 'THIS', "WOMAN'S", 'HEAD'] +5484-24317-0017-588: hyp=['WE', 'WOMEN', 'ARE', 'EARLIEST', 'OLD', 'AS', 'WE', 'LOOK', 'AND', 'THE', 'LEECH', 'HAS', 'ENTIRE', 'WOMAN', 'OF', 'THIS', 'BEAUTY', 'OF', 'FORTY', 'PRACTISE', 'ARTS', 'WHICH', 'GIVE', 'HER', 'THE', 'APPEARANCE', 'OF', 'TWENTY', 'FIVE', 'YET', 'PERHAPS', 'THE', 'KING', 'VALUES', 'HER', 'INTELLECT', 'MORE', 'THAN', 'HER', 'PERSON', 'AND', 'THE', 'WISDOM', 'OF', 'A', 'HUNDRED', 'SERPENTS', 'IS', 'CERTAINLY', 'UNITED', 'IN', 'THIS', "WOMAN'S", 'HEAD'] +5484-24317-0018-589: ref=['THE', 'THREE', 'MOST', 'TRUSTWORTHY', 'ONES', 'ARE', 'HERE', 'AMYNTAS', 'THE', 'LEECH', 'CHRYSIPPUS', 'AND', 'THE', 'ADMIRABLE', 'PROCLUS'] +5484-24317-0018-589: hyp=['THE', 'THREE', 'MOST', 'TRUSTWORTHY', 'ONES', 'I', 'HEAR', 'I', 'MEANTUS', 'THE', 'LIEGE', 'CHRYSIPPUS', 'IN', 'THE', 'ADMIRABLE', 'PROCLIS'] +5484-24317-0019-590: ref=['LET', 'US', 'HOPE', 'THAT', 'YOU', 'WILL', 'MAKE', 'THIS', 'THREE', 'LEAVED', 'CLOVER', 'THE', 'LUCK', 'PROMISING', 'FOUR', 'LEAVED', 'ONE'] +5484-24317-0019-590: hyp=['LET', 'US', 'HOPE', 'THAT', 'YOU', 'WILL', 'MAKE', 'THIS', 'THREE', 'LEAVED', 'CLOVER', 'THE', 'LUCK', 'PROMISING', 'FOOL', 'LEAVE', 'TO', 'ONE'] +5484-24317-0020-591: ref=['YOUR', 'UNCLE', 'TOO', 'HAS', 'OFTEN', 'WITH', 'PRAISEWORTHY', 'GENEROSITY', 'HELPED', 'ARSINOE', 'IN', 'MANY', 'AN', 'EMBARRASSMENT'] +5484-24317-0020-591: hyp=['YOUR', 'UNCLE', 'TOO', 'HAS', 'OFTEN', 'WITH', 'PRAISE', 'WORTHY', 'GENEROSITY', 'HELPED', 'ALSO', 'IN', 'MANY', 'AN', 'EMBARRASSMENT'] +5484-24317-0021-592: ref=['HOW', 'LONG', 'HE', 'KEPT', 'YOU', 'WAITING', 'FOR', 'THE', 'FIRST', 'WORD', 'CONCERNING', 'A', 'WORK', 'WHICH', 'JUSTLY', 'TRANSPORTED', 'THE', 'WHOLE', 'CITY', 'WITH', 'DELIGHT'] +5484-24317-0021-592: hyp=['HOW', 'LONG', 'HE', 'KEPT', 'YOU', 'WAITING', 'FROM', 'THE', 'FIRST', 'WORD', 'CONCERNING', 'A', 'WORK', 'WHICH', 'JUSTLY', 'TRANSPORTED', 'THE', 'WHOLE', 'CITY', 'WITH', 'DELIGHT'] +5484-24317-0022-593: ref=['WHEN', 'HE', 'DID', 'FINALLY', 'SUMMON', 'YOU', 'HE', 'SAID', 'THINGS', 'WHICH', 'MUST', 'HAVE', 'WOUNDED', 'YOU'] +5484-24317-0022-593: hyp=['WHEN', 'HE', 'DID', 'FINALLY', 'SUMMON', 'YOU', 'HE', 'SAID', 'THINGS', 'WHICH', 'MUST', 'HAVE', 'WOUNDED', 'YOU'] +5484-24317-0023-594: ref=['THAT', 'IS', 'GOING', 'TOO', 'FAR', 'REPLIED', 'HERMON'] +5484-24317-0023-594: hyp=['THAT', 'IS', 'GOING', 'TOO', 'FAR', 'REPLIED', 'HERMAN'] +5484-24317-0024-595: ref=['HE', 'WINKED', 'AT', 'HER', 'AND', 'MADE', 'A', 'SIGNIFICANT', 'GESTURE', 'AS', 'HE', 'SPOKE', 'AND', 'THEN', 'INFORMED', 'THE', 'BLIND', 'ARTIST', 'HOW', 'GRACIOUSLY', 'ARSINOE', 'HAD', 'REMEMBERED', 'HIM', 'WHEN', 'SHE', 'HEARD', 'OF', 'THE', 'REMEDY', 'BY', 'WHOSE', 'AID', 'MANY', 'A', 'WONDERFUL', 'CURE', 'OF', 'BLIND', 'EYES', 'HAD', 'BEEN', 'MADE', 'IN', 'RHODES'] +5484-24317-0024-595: hyp=['HE', 'WAITED', 'HER', 'AND', 'MADE', 'A', 'SIGNIFICANT', 'GESTURE', 'AS', 'HE', 'SPOKE', 'AND', 'THEN', 'INFORMED', 'THE', 'BLIND', 'ARTIST', 'HOW', 'GRACIOUSLY', 'ARSENO', 'HAD', 'REMEMBERED', 'HIM', 'WHEN', 'SHE', 'HEARD', 'OF', 'THE', 'REMEDY', 'BY', 'WHOSE', 'AID', 'MANY', 'A', 'WONDERFUL', 'CURE', 'OF', 'BLIND', 'EYE', 'HAD', 'BEEN', 'MADE', 'IN', 'ROADS'] +5484-24317-0025-596: ref=['THE', 'ROYAL', 'LADY', 'HAD', 'INQUIRED', 'ABOUT', 'HIM', 'AND', 'HIS', 'SUFFERINGS', 'WITH', 'ALMOST', 'SISTERLY', 'INTEREST', 'AND', 'ALTHEA', 'EAGERLY', 'CONFIRMED', 'THE', 'STATEMENT'] +5484-24317-0025-596: hyp=['THE', 'ROYAL', 'LADY', 'HAD', 'INQUIRED', 'ABOUT', 'HIM', 'AND', 'HIS', 'SUFFERINGS', 'WITH', 'ALMOST', 'SISTERLY', 'INTEREST', 'AND', 'ALTHIA', 'EAGERLY', 'CONFIRMED', 'THE', 'STATEMENT'] +5484-24317-0026-597: ref=['HERMON', 'LISTENED', 'TO', 'THE', 'PAIR', 'IN', 'SILENCE'] +5484-24317-0026-597: hyp=['HERMA', 'LISTENED', 'TO', 'THE', 'PARENT', 'SILENCE'] +5484-24317-0027-598: ref=['THE', 'RHODIAN', 'WAS', 'JUST', 'BEGINNING', 'TO', 'PRAISE', 'ARSINOE', 'ALSO', 'AS', 'A', 'SPECIAL', 'FRIEND', 'AND', 'CONNOISSEUR', 'OF', 'THE', "SCULPTOR'S", 'ART', 'WHEN', 'CRATES', "HERMON'S", 'FELLOW', 'STUDENT', 'ASKED', 'THE', 'BLIND', 'ARTIST', 'IN', 'BEHALF', 'OF', 'HIS', 'BEAUTIFUL', 'COMPANION', 'WHY', 'HIS', 'DEMETER', 'WAS', 'PLACED', 'UPON', 'A', 'PEDESTAL', 'WHICH', 'TO', 'OTHERS', 'AS', 'WELL', 'AS', 'HIMSELF', 'SEEMED', 'TOO', 'HIGH', 'FOR', 'THE', 'SIZE', 'OF', 'THE', 'STATUE'] +5484-24317-0027-598: hyp=['THE', 'RADIAN', 'WAS', 'JUST', 'BEGINNING', 'TO', 'PRAISE', 'ARSENO', 'ALSO', 'AS', 'A', 'SPECIAL', 'FRIEND', 'AND', 'CONNOISSEUR', 'OF', 'THE', 'SCULPTURES', 'ART', 'WHEN', 'CRATES', "HERMANN'S", 'FELLOW', 'STUDENT', 'ASKED', 'THE', 'BLIND', 'ARTIST', 'IN', 'BEHALF', 'OF', 'HIS', 'BEAUTIFUL', 'COMPANION', 'WHY', 'HIS', 'DEMETER', 'WAS', 'PLACED', 'UPON', 'A', 'PEDESTAL', 'WITCH', 'TO', 'OTHERS', 'AS', 'WELL', 'AS', 'HIMSELF', 'SEEMED', 'TOO', 'HIGH', 'FOR', 'THE', 'SIZE', 'OF', 'THE', 'STATUE'] +5484-24317-0028-599: ref=['YET', 'WHAT', 'MATTERED', 'IT', 'EVEN', 'IF', 'THESE', 'MISERABLE', 'PEOPLE', 'CONSIDERED', 'THEMSELVES', 'DECEIVED', 'AND', 'POINTED', 'THE', 'FINGER', 'OF', 'SCORN', 'AT', 'HIM'] +5484-24317-0028-599: hyp=['YET', 'WHAT', 'MATTERED', 'IT', 'EVEN', 'IF', 'THESE', 'MISERABLE', 'PEOPLE', 'CONSIDERED', 'THEMSELVES', 'DECEIVED', 'AND', 'POINTED', 'THE', 'FINGER', 'OF', 'SCORN', 'AT', 'HIM'] +5484-24317-0029-600: ref=['A', 'WOMAN', 'WHO', 'YEARNS', 'FOR', 'THE', 'REGARD', 'OF', 'ALL', 'MEN', 'AND', 'MAKES', 'LOVE', 'A', 'TOY', 'EASILY', 'LESSENS', 'THE', 'DEMANDS', 'SHE', 'IMPOSES', 'UPON', 'INDIVIDUALS'] +5484-24317-0029-600: hyp=['A', 'WOMAN', 'WHO', 'URNS', 'FOR', 'THE', 'REGARD', 'OF', 'ALL', 'MEN', 'AND', 'MAKES', 'LOVE', 'A', 'TOY', 'EASILY', 'LESSENS', 'THE', 'DEMAND', 'SHE', 'IMPOSES', 'UPON', 'INDIVIDUALS'] +5484-24317-0030-601: ref=['ONLY', 'EVEN', 'THOUGH', 'LOVE', 'HAS', 'WHOLLY', 'DISAPPEARED', 'SHE', 'STILL', 'CLAIMS', 'CONSIDERATION', 'AND', 'ALTHEA', 'DID', 'NOT', 'WISH', 'TO', 'LOSE', "HERMON'S", 'REGARD'] +5484-24317-0030-601: hyp=['ONLY', 'EVEN', 'THOUGH', 'LOVE', 'HAS', 'WHOLLY', 'DISAPPEARED', 'SHE', 'STILL', 'CLAIMS', 'CONSIDERATION', 'AND', 'ALTHEA', 'DID', 'NOT', 'WISH', 'TO', 'LOSE', "HARMON'S", 'REGARD'] +5484-24317-0031-602: ref=['HOW', 'INDIFFERENT', 'YOU', 'LOOK', 'BUT', 'I', 'TELL', 'YOU', 'HER', 'DEEP', 'BLUE', 'EYES', 'FLASHED', 'AS', 'SHE', 'SPOKE', 'THAT', 'SO', 'LONG', 'AS', 'YOU', 'WERE', 'STILL', 'A', 'GENUINE', 'CREATING', 'ARTIST', 'THE', 'CASE', 'WAS', 'DIFFERENT'] +5484-24317-0031-602: hyp=['HOW', 'INDIFFERENT', 'YOU', 'LOOK', 'BUT', 'I', 'TELL', 'YOU', 'HER', 'DEEP', 'BLUE', 'EYES', 'FLASHED', 'AS', 'SHE', 'SPOKE', 'THAT', 'SO', 'LONG', 'AS', 'YOU', 'WAS', 'STILL', 'A', 'GENUINE', 'CREATING', 'ARTIST', 'THE', 'CASE', 'WAS', 'DIFFERENT'] +5484-24317-0032-603: ref=['THOUGH', 'SO', 'LOUD', 'A', 'DENIAL', 'IS', 'WRITTEN', 'ON', 'YOUR', 'FACE', 'I', 'PERSIST', 'IN', 'MY', 'CONVICTION', 'AND', 'THAT', 'NO', 'IDLE', 'DELUSION', 'ENSNARES', 'ME', 'I', 'CAN', 'PROVE'] +5484-24317-0032-603: hyp=['THOUGH', 'SO', 'LOUD', 'A', 'DENIAL', 'IS', 'WRITTEN', 'ON', 'YOUR', 'FACE', 'I', 'PERSIST', 'IN', 'MY', 'CONVICTION', 'AND', 'THAT', 'NO', 'IDLE', 'DELUSION', 'AND', 'SNAS', 'ME', 'I', 'CAN', 'PROVE'] +5484-24317-0033-604: ref=['IT', 'WAS', 'NAY', 'IT', 'COULD', 'HAVE', 'BEEN', 'NOTHING', 'ELSE', 'THAT', 'VERY', 'SPIDER'] +5484-24317-0033-604: hyp=['IT', 'WAS', 'NAY', 'IT', 'COULD', 'HAVE', 'BEEN', 'NOTHING', 'ELSE', 'THAT', 'VERY', 'SPIDER'] +5484-24318-0000-605: ref=['NOT', 'A', 'SOUND', 'IF', 'YOU', 'VALUE', 'YOUR', 'LIVES'] +5484-24318-0000-605: hyp=['NOT', 'A', 'SOUND', 'IF', 'YOU', 'VALUE', 'YOUR', 'LIVES'] +5484-24318-0001-606: ref=['TO', 'OFFER', 'RESISTANCE', 'WOULD', 'HAVE', 'BEEN', 'MADNESS', 'FOR', 'EVEN', 'HERMON', 'PERCEIVED', 'BY', 'THE', 'LOUD', 'CLANKING', 'OF', 'WEAPONS', 'AROUND', 'THEM', 'THE', 'GREATLY', 'SUPERIOR', 'POWER', 'OF', 'THE', 'ENEMY', 'AND', 'THEY', 'WERE', 'ACTING', 'BY', 'THE', 'ORDERS', 'OF', 'THE', 'KING', 'TO', 'THE', 'PRISON', 'NEAR', 'THE', 'PLACE', 'OF', 'EXECUTION'] +5484-24318-0001-606: hyp=['TO', 'OFFER', 'RESISTANCE', 'WOULD', 'HAVE', 'BEEN', 'MADNESS', 'FOR', 'EVEN', 'HERMON', 'PERCEIVED', 'BY', 'THE', 'LOUD', 'CLANKING', 'OF', 'WEAPONS', 'ROUND', 'THEM', 'THEY', 'GREATLY', 'SUPERIOR', 'POWER', 'OF', 'THE', 'ENEMY', 'AND', 'THEY', 'WERE', 'ACTING', 'BY', 'THE', 'ORDERS', 'OF', 'THE', 'KING', 'TO', 'THE', 'PRISON', 'NEAR', 'THE', 'PLACE', 'OF', 'EXECUTION'] +5484-24318-0002-607: ref=['WAS', 'HE', 'TO', 'BE', 'LED', 'TO', 'THE', "EXECUTIONER'S", 'BLOCK'] +5484-24318-0002-607: hyp=['WAS', 'HE', 'TO', 'BE', 'LED', 'TO', 'THE', "EXECUTIONER'S", 'BLOCK'] +5484-24318-0003-608: ref=['WHAT', 'PLEASURE', 'HAD', 'LIFE', 'TO', 'OFFER', 'HIM', 'THE', 'BLIND', 'MAN', 'WHO', 'WAS', 'ALREADY', 'DEAD', 'TO', 'HIS', 'ART'] +5484-24318-0003-608: hyp=['WHAT', 'PLEASURE', 'HAD', 'LIFE', 'TO', 'OFFER', 'HIM', 'THE', 'BLIND', 'MAN', 'WHO', 'WAS', 'ALREADY', 'DEAD', 'TO', 'HIS', 'ART'] +5484-24318-0004-609: ref=['OUGHT', 'HE', 'NOT', 'TO', 'GREET', 'THIS', 'SUDDEN', 'END', 'AS', 'A', 'BOON', 'FROM', 'THE', 'IMMORTALS'] +5484-24318-0004-609: hyp=['OUGHT', 'HE', 'NOT', 'TO', 'GREET', 'HIS', 'SUDDEN', 'END', 'AS', 'THE', 'BOOM', 'FROM', 'THE', 'IMMORTALS'] +5484-24318-0005-610: ref=['DID', 'IT', 'NOT', 'SPARE', 'HIM', 'A', 'HUMILIATION', 'AS', 'GREAT', 'AND', 'PAINFUL', 'AS', 'COULD', 'BE', 'IMAGINED'] +5484-24318-0005-610: hyp=['DID', 'IT', 'NOT', 'SPARE', 'HIM', 'A', 'HUMILIATION', 'AS', 'GREAT', 'AND', 'PAINFUL', 'AS', 'COULD', 'BE', 'IMAGINED'] +5484-24318-0006-611: ref=['WHATEVER', 'MIGHT', 'AWAIT', 'HIM', 'HE', 'DESIRED', 'NO', 'BETTER', 'FATE'] +5484-24318-0006-611: hyp=['WHATEVER', 'MIGHT', 'AWAIT', 'HIM', 'HE', 'DESIRED', 'NO', 'BETTER', 'FATE'] +5484-24318-0007-612: ref=['IF', 'HE', 'HAD', 'PASSED', 'INTO', 'ANNIHILATION', 'HE', 'HERMON', 'WISHED', 'TO', 'FOLLOW', 'HIM', 'THITHER', 'AND', 'ANNIHILATION', 'CERTAINLY', 'MEANT', 'REDEMPTION', 'FROM', 'PAIN', 'AND', 'MISERY'] +5484-24318-0007-612: hyp=['IF', 'HE', 'HAD', 'PASSED', 'INTO', 'ANNIHILATION', 'HE', 'HERMAN', 'WISHED', 'TO', 'FOLLOW', 'HIM', 'THITHER', 'AND', 'ANNIHILATION', 'CERTAINLY', 'MEANT', 'REDEMPTION', 'FROM', 'PAIN', 'AND', 'MISERY'] +5484-24318-0008-613: ref=['BUT', 'IF', 'HE', 'WERE', 'DESTINED', 'TO', 'MEET', 'HIS', 'MYRTILUS', 'AND', 'HIS', 'MOTHER', 'IN', 'THE', 'WORLD', 'BEYOND', 'THE', 'GRAVE', 'WHAT', 'HAD', 'HE', 'NOT', 'TO', 'TELL', 'THEM', 'HOW', 'SURE', 'HE', 'WAS', 'OF', 'FINDING', 'A', 'JOYFUL', 'RECEPTION', 'THERE', 'FROM', 'BOTH'] +5484-24318-0008-613: hyp=['BUT', 'IF', 'HE', 'WERE', 'DESTINED', 'TO', 'MEET', 'HIS', 'BURTULAS', 'AND', 'HIS', 'MOTHER', 'IN', 'THE', 'WORLD', 'BEYOND', 'THE', 'GRAVE', 'WHAT', 'HAD', 'HE', 'NOT', 'TO', 'TELL', 'THEM', 'HOW', 'SURE', 'HE', 'WAS', 'A', 'FINDING', 'A', 'JOYFUL', 'RECEPTION', 'THERE', 'FROM', 'BOTH'] +5484-24318-0009-614: ref=['THE', 'POWER', 'WHICH', 'DELIVERED', 'HIM', 'OVER', 'TO', 'DEATH', 'JUST', 'AT', 'THAT', 'MOMENT', 'WAS', 'NOT', 'NEMESIS', 'NO', 'IT', 'WAS', 'A', 'KINDLY', 'DEITY'] +5484-24318-0009-614: hyp=['THE', 'POWER', 'WHICH', 'DELIVERED', 'HIM', 'OVER', 'TO', 'DEATH', 'JUST', 'AT', 'THAT', 'MOMENT', 'WAS', 'NOT', 'NEMESIS', 'NO', 'IT', 'WAS', 'A', 'KINDLY', 'DEITY'] +5484-24318-0010-615: ref=['YET', 'IT', 'WAS', 'NO', 'ILLUSION', 'THAT', 'DECEIVED', 'HIM'] +5484-24318-0010-615: hyp=['YET', 'IT', 'WAS', 'NO', 'ILLUSION', 'THAT', 'DECEIVED', 'HIM'] +5484-24318-0011-616: ref=['AGAIN', 'HE', 'HEARD', 'THE', 'BELOVED', 'VOICE', 'AND', 'THIS', 'TIME', 'IT', 'ADDRESSED', 'NOT', 'ONLY', 'HIM', 'BUT', 'WITH', 'THE', 'UTMOST', 'HASTE', 'THE', 'COMMANDER', 'OF', 'THE', 'SOLDIERS'] +5484-24318-0011-616: hyp=['AGAIN', 'HE', 'HEARD', 'THE', 'BELOVED', 'VOICE', 'AND', 'THIS', 'TIME', 'IT', 'ADDRESSED', 'NOT', 'ONLY', 'HIM', 'BUT', 'WITH', 'THE', 'UTMOST', 'HASTE', 'THE', 'COMMANDER', 'OF', 'THE', 'SOLDIERS'] +5484-24318-0012-617: ref=['SOMETIMES', 'WITH', 'TOUCHING', 'ENTREATY', 'SOMETIMES', 'WITH', 'IMPERIOUS', 'COMMAND', 'SHE', 'PROTESTED', 'AFTER', 'GIVING', 'HIM', 'HER', 'NAME', 'THAT', 'THIS', 'MATTER', 'COULD', 'BE', 'NOTHING', 'BUT', 'AN', 'UNFORTUNATE', 'MISTAKE'] +5484-24318-0012-617: hyp=['SOMETIMES', 'WITH', 'THE', 'TOUCHING', 'ENTREATY', 'SOMETIMES', 'WITH', 'IMPERIOUS', 'COMMAND', 'SHE', 'PROTESTED', 'AFTER', 'GIVING', 'HIM', 'HER', 'NAME', 'THAT', 'THIS', 'MATTER', 'COULD', 'BE', 'NOTHING', 'BUT', 'AN', 'UNFORTUNATE', 'MISTAKE'] +5484-24318-0013-618: ref=['LASTLY', 'WITH', 'EARNEST', 'WARMTH', 'SHE', 'BESOUGHT', 'HIM', 'BEFORE', 'TAKING', 'THE', 'PRISONERS', 'AWAY', 'TO', 'PERMIT', 'HER', 'TO', 'SPEAK', 'TO', 'THE', 'COMMANDING', 'GENERAL', 'PHILIPPUS', 'HER', "FATHER'S", 'GUEST', 'WHO', 'SHE', 'WAS', 'CERTAIN', 'WAS', 'IN', 'THE', 'PALACE'] +5484-24318-0013-618: hyp=['LASTLY', 'WITH', 'EARNEST', 'WARMTH', 'SHE', 'BESOUGHT', 'HIM', 'BEFORE', 'TAKING', 'THE', 'PRISONERS', 'AWAY', 'TO', 'PERMIT', 'HER', 'TO', 'SPEAK', 'TO', 'THE', 'COMMANDING', 'GENERAL', 'PHILIPPUS', 'HER', "FATHER'S", 'GUEST', 'WHO', 'SHE', 'WAS', 'CERTAIN', 'WAS', 'IN', 'THE', 'PALACE'] +5484-24318-0014-619: ref=['CRIED', 'HERMON', 'IN', 'GRATEFUL', 'AGITATION', 'BUT', 'SHE', 'WOULD', 'NOT', 'LISTEN', 'TO', 'HIM', 'AND', 'FOLLOWED', 'THE', 'SOLDIER', 'WHOM', 'THE', 'CAPTAIN', 'DETAILED', 'TO', 'GUIDE', 'HER', 'INTO', 'THE', 'PALACE'] +5484-24318-0014-619: hyp=['CRIED', 'HERMAND', 'IN', 'GRATEFUL', 'AGITATION', 'BUT', 'SHE', 'WOULD', 'NOT', 'LISTEN', 'TO', 'HIM', 'AND', 'FOLLOW', 'THE', 'SOLDIER', 'WHOM', 'THE', 'CAPTAIN', 'DETAILED', 'TO', 'GUIDE', 'HER', 'INTO', 'THE', 'PALACE'] +5484-24318-0015-620: ref=['TO', 'MORROW', 'YOU', 'SHALL', 'CONFESS', 'TO', 'ME', 'WHO', 'TREACHEROUSLY', 'DIRECTED', 'YOU', 'TO', 'THIS', 'DANGEROUS', 'PATH'] +5484-24318-0015-620: hyp=['TO', 'MORROW', 'YOU', 'SHALL', 'CONFESS', 'TO', 'ME', 'WHO', 'TREACHEROUSLY', 'DIRECTED', 'YOU', 'TO', 'THIS', 'DANGEROUS', 'PATH'] +5484-24318-0016-621: ref=['DAPHNE', 'AGAIN', 'PLEADED', 'FOR', 'THE', 'LIBERATION', 'OF', 'THE', 'PRISONERS', 'BUT', 'PHILIPPUS', 'SILENCED', 'HER', 'WITH', 'THE', 'GRAVE', 'EXCLAMATION', 'THE', 'ORDER', 'OF', 'THE', 'KING'] +5484-24318-0016-621: hyp=['DAPHNE', 'AGAIN', 'PLEADED', 'FOR', 'THE', 'LIBERATION', 'OF', 'THE', 'PRISONERS', 'BUT', "PHILIP'S", 'SILENCE', 'CHARRED', 'WITH', 'A', 'GRAVE', 'EXCLAMATION', 'THE', 'ORDER', 'OF', 'THE', 'KING'] +5484-24318-0017-622: ref=['AS', 'SOON', 'AS', 'THE', 'CAPTIVE', 'ARTIST', 'WAS', 'ALONE', 'WITH', 'THE', 'WOMAN', 'HE', 'LOVED', 'HE', 'CLASPED', 'HER', 'HAND', 'POURING', 'FORTH', 'INCOHERENT', 'WORDS', 'OF', 'THE', 'MOST', 'ARDENT', 'GRATITUDE', 'AND', 'WHEN', 'HE', 'FELT', 'HER', 'WARMLY', 'RETURN', 'THE', 'PRESSURE', 'HE', 'COULD', 'NOT', 'RESTRAIN', 'THE', 'DESIRE', 'TO', 'CLASP', 'HER', 'TO', 'HIS', 'HEART'] +5484-24318-0017-622: hyp=['AS', 'SOON', 'AS', 'THE', 'CAPTIVE', 'ARTIST', 'WAS', 'ALONE', 'WITH', 'A', 'WOMAN', 'HE', 'LOVED', 'HE', 'CLASPED', 'HER', 'HAND', 'POURING', 'FORTH', 'INCOHERENT', 'WORDS', 'OF', 'THE', 'MOST', 'ARDENT', 'GRATITUDE', 'AND', 'WHEN', 'HE', 'FELT', 'HER', 'WARMLY', 'RETURN', 'THE', 'PRESSURE', 'HE', 'COULD', 'NOT', 'RESTRAIN', 'THE', 'DESIRE', 'TO', 'CLASP', 'HER', 'TO', 'HIS', 'HEART'] +5484-24318-0018-623: ref=['IN', 'SPITE', 'OF', 'HIS', 'DEEP', 'MENTAL', 'DISTRESS', 'HE', 'COULD', 'HAVE', 'SHOUTED', 'ALOUD', 'IN', 'HIS', 'DELIGHT', 'AND', 'GRATITUDE'] +5484-24318-0018-623: hyp=['IN', 'SPITE', 'OF', 'HIS', 'DEEP', 'MENTAL', 'DISTRESS', 'HE', 'COULD', 'HAVE', 'SHOUTED', 'ALOUD', 'IN', 'HIS', 'DELIGHT', 'AND', 'GRATITUDE'] +5484-24318-0019-624: ref=['HE', 'MIGHT', 'NOW', 'HAVE', 'BEEN', 'PERMITTED', 'TO', 'BIND', 'FOREVER', 'TO', 'HIS', 'LIFE', 'THE', 'WOMAN', 'WHO', 'HAD', 'JUST', 'RESCUED', 'HIM', 'FROM', 'THE', 'GREATEST', 'DANGER', 'BUT', 'THE', 'CONFESSION', 'HE', 'MUST', 'MAKE', 'TO', 'HIS', 'FELLOW', 'ARTISTS', 'IN', 'THE', 'PALAESTRA', 'THE', 'FOLLOWING', 'MORNING', 'STILL', 'SEALED', 'HIS', 'LIPS', 'YET', 'IN', 'THIS', 'HOUR', 'HE', 'FELT', 'THAT', 'HE', 'WAS', 'UNITED', 'TO', 'HER', 'AND', 'OUGHT', 'NOT', 'TO', 'CONCEAL', 'WHAT', 'AWAITED', 'HIM', 'SO', 'OBEYING', 'A', 'STRONG', 'IMPULSE', 'HE', 'EXCLAIMED', 'YOU', 'KNOW', 'THAT', 'I', 'LOVE', 'YOU'] +5484-24318-0019-624: hyp=['HE', 'MIGHT', 'NOW', 'HAVE', 'BEEN', 'PERMITTED', 'TO', 'FIND', 'FOREVER', 'TO', 'HIS', 'LIFE', 'THE', 'WOMAN', 'WHO', 'HAD', 'JUST', 'RESCUED', 'HIM', 'FROM', 'THE', 'GREATEST', 'DANGER', 'BUT', 'THE', 'CONFESSION', 'HE', 'MUST', 'MAKE', 'TO', 'HIS', 'FELLOW', 'ARTISTS', 'IN', 'THE', 'PELUSTER', 'THE', 'FOLLOWING', 'MORNING', 'STILL', 'SEALED', 'HIS', 'LIPS', 'YET', 'IN', 'THIS', 'HOUR', 'HE', 'FELT', 'THAT', 'HE', 'WAS', 'UNITED', 'TO', 'HER', 'AND', 'OUGHT', 'NOT', 'TO', 'CONCEAL', 'WHAT', 'AWAITED', 'HIM', 'SO', 'OBEYING', 'A', 'STRONG', 'IMPULSE', 'HE', 'EXCLAIMED', 'YOU', 'KNOW', 'THAT', 'I', 'LOVE', 'YOU'] +5484-24318-0020-625: ref=['I', 'LOVE', 'YOU', 'AND', 'HAVE', 'LOVED', 'YOU', 'ALWAYS'] +5484-24318-0020-625: hyp=['I', 'LOVE', 'YOU', 'AND', 'HAVE', 'LOVED', 'YOU', 'ALWAYS'] +5484-24318-0021-626: ref=['DAPHNE', 'EXCLAIMED', 'TENDERLY', 'WHAT', 'MORE', 'IS', 'NEEDED'] +5484-24318-0021-626: hyp=['DAPHNEY', 'EXCLAIMED', 'TENDERLY', 'WHAT', 'MORE', "IT'S", 'NEEDED'] +5484-24318-0022-627: ref=['BUT', 'HERMON', 'WITH', 'DROOPING', 'HEAD', 'MURMURED', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'WHAT', 'I', 'AM', 'NOW'] +5484-24318-0022-627: hyp=['BUT', 'HERMAN', 'WITH', 'DROOPING', 'HEAD', 'MURMURED', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'WHAT', 'I', 'AM', 'NOW'] +5484-24318-0023-628: ref=['THEN', 'DAPHNE', 'RAISED', 'HER', 'FACE', 'TO', 'HIS', 'ASKING', 'SO', 'THE', 'DEMETER', 'IS', 'THE', 'WORK', 'OF', 'MYRTILUS'] +5484-24318-0023-628: hyp=['THEN', 'JAPLY', 'RAISED', 'HER', 'FACE', 'TO', 'HIS', 'ASKING', 'SO', 'THE', 'DEMETER', 'IS', 'THE', 'WORK', 'OF', 'MYRTULAS'] +5484-24318-0024-629: ref=['WHAT', 'A', 'TERRIBLE', 'ORDEAL', 'AGAIN', 'AWAITS', 'YOU'] +5484-24318-0024-629: hyp=['WHAT', 'A', 'TERRIBLE', 'ORDEAL', 'AGAIN', 'AWAITS', 'YOU'] +5484-24318-0025-630: ref=['AND', 'I', 'FOOL', 'BLINDED', 'ALSO', 'IN', 'MIND', 'COULD', 'BE', 'VEXED', 'WITH', 'YOU', 'FOR', 'IT'] +5484-24318-0025-630: hyp=['AND', 'I', 'FOOL', 'BLINDED', 'ALL', 'SOR', 'IN', 'MIND', 'COULD', 'BE', 'VEXED', 'WITH', 'YOU', 'FOR', 'IT'] +5484-24318-0026-631: ref=['BRING', 'THIS', 'BEFORE', 'YOUR', 'MIND', 'AND', 'EVERYTHING', 'ELSE', 'THAT', 'YOU', 'MUST', 'ACCEPT', 'WITH', 'IT', 'IF', 'YOU', 'CONSENT', 'WHEN', 'THE', 'TIME', 'ARRIVES', 'TO', 'BECOME', 'MINE', 'CONCEAL', 'AND', 'PALLIATE', 'NOTHING'] +5484-24318-0026-631: hyp=['BRING', 'THIS', 'BEFORE', 'YOUR', 'MIND', 'AND', 'EVERYTHING', 'ELSE', 'THAT', 'YOU', 'MUST', 'ACCEPT', 'WITH', 'IT', 'IF', 'YOU', 'CONSENT', 'WITH', 'THE', 'TIME', 'ARRIVES', 'TO', 'BECOME', 'MINE', 'CONCEAL', 'IMPALIATE', 'NOTHING'] +5484-24318-0027-632: ref=['SO', 'ARCHIAS', 'INTENDED', 'TO', 'LEAVE', 'THE', 'CITY', 'ON', 'ONE', 'OF', 'HIS', 'OWN', 'SHIPS', 'THAT', 'VERY', 'DAY'] +5484-24318-0027-632: hyp=['SORCAS', 'INTENDED', 'TO', 'LEAVE', 'THE', 'CITY', 'ON', 'ONE', 'OF', 'HIS', 'OWN', 'SHIPS', 'THAT', 'VERY', 'DAY'] +5484-24318-0028-633: ref=['HE', 'HIMSELF', 'ON', 'THE', 'WAY', 'TO', 'EXPOSE', 'HIMSELF', 'TO', 'THE', 'MALICE', 'AND', 'MOCKERY', 'OF', 'THE', 'WHOLE', 'CITY'] +5484-24318-0028-633: hyp=['HE', 'HIMSELF', 'ON', 'THE', 'WAY', 'TO', 'EXPOSE', 'HIMSELF', 'TO', 'THE', 'MALICE', 'AND', 'MOCKERY', 'OF', 'THE', 'WHOLE', 'CITY'] +5484-24318-0029-634: ref=['HIS', 'HEART', 'CONTRACTED', 'PAINFULLY', 'AND', 'HIS', 'SOLICITUDE', 'ABOUT', 'HIS', "UNCLE'S", 'FATE', 'INCREASED', 'WHEN', 'PHILIPPUS', 'INFORMED', 'HIM', 'THAT', 'THE', 'CONSPIRATORS', 'HAD', 'BEEN', 'ARRESTED', 'AT', 'THE', 'BANQUET', 'AND', 'HEADED', 'BY', 'AMYNTAS', 'THE', 'RHODIAN', 'CHRYSIPPUS', 'AND', 'PROCLUS', 'HAD', 'PERISHED', 'BY', 'THE', "EXECUTIONER'S", 'SWORD', 'AT', 'SUNRISE'] +5484-24318-0029-634: hyp=['HIS', 'HEART', 'CONTRACTED', 'PAINFULLY', 'AND', 'HIS', 'SOLICITUDE', 'ABOUT', 'HIS', "UNCLE'S", 'FATE', 'INCREASED', 'WHEN', 'PHILIPUS', 'INFORMED', 'HIM', 'THAT', 'THE', 'CONSPIRATORS', 'HAD', 'BEEN', 'ARRESTED', 'AT', 'THE', 'BANQUET', 'AND', 'HEADED', 'BY', 'A', 'MEANTES', 'THE', 'RODIAN', 'CHRYSIPPUS', 'AND', 'PROCLIS', 'HAD', 'PERISHED', 'BY', 'THE', "EXECUTIONER'S", 'SWORD', 'AT', 'SUNRISE'] +5484-24318-0030-635: ref=['BESIDES', 'HE', 'KNEW', 'THAT', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'WOULD', 'NOT', 'PART', 'FROM', 'HIM', 'WITHOUT', 'GRANTING', 'HIM', 'ONE', 'LAST', 'WORD'] +5484-24318-0030-635: hyp=['BESIDES', 'HE', 'KNEW', 'THAT', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'WOULD', 'NOT', 'PART', 'FROM', 'HIM', 'WITHOUT', 'GRANTING', 'HIM', 'ONE', 'LAST', 'WORD'] +5484-24318-0031-636: ref=['ON', 'THE', 'WAY', 'HIS', 'HEART', 'THROBBED', 'ALMOST', 'TO', 'BURSTING'] +5484-24318-0031-636: hyp=['ON', 'THE', 'WAY', 'HIS', 'HARD', 'THROPPED', 'ALMOST', 'TO', 'BURSTING'] +5484-24318-0032-637: ref=['EVEN', "DAPHNE'S", 'IMAGE', 'AND', 'WHAT', 'THREATENED', 'HER', 'FATHER', 'AND', 'HER', 'WITH', 'HIM', 'RECEDED', 'FAR', 'INTO', 'THE', 'BACKGROUND'] +5484-24318-0032-637: hyp=['EVEN', 'AFTER', 'THESE', 'IMAGE', 'AND', 'WHAT', 'THREATENED', 'HER', 'FATHER', 'AND', 'HER', 'WITH', 'HIM', 'WAS', 'SEATED', 'FAR', 'INTO', 'THE', 'BACKGROUND'] +5484-24318-0033-638: ref=['HE', 'WAS', 'APPEARING', 'BEFORE', 'HIS', 'COMPANIONS', 'ONLY', 'TO', 'GIVE', 'TRUTH', 'ITS', 'JUST', 'DUE'] +5484-24318-0033-638: hyp=['HE', 'WAS', 'APPEARING', 'BEFORE', 'HIS', 'COMPANIONS', 'ONLY', 'TO', 'GIVE', 'TRUTH', 'ITS', 'JUST', 'DUE'] +5484-24318-0034-639: ref=['THE', 'EGYPTIAN', 'OBEYED', 'AND', 'HIS', 'MASTER', 'CROSSED', 'THE', 'WIDE', 'SPACE', 'STREWN', 'WITH', 'SAND', 'AND', 'APPROACHED', 'THE', 'STAGE', 'WHICH', 'HAD', 'BEEN', 'ERECTED', 'FOR', 'THE', 'FESTAL', 'PERFORMANCES', 'EVEN', 'HAD', 'HIS', 'EYES', 'RETAINED', 'THE', 'POWER', 'OF', 'SIGHT', 'HIS', 'BLOOD', 'WAS', 'COURSING', 'SO', 'WILDLY', 'THROUGH', 'HIS', 'VEINS', 'THAT', 'HE', 'MIGHT', 'PERHAPS', 'HAVE', 'BEEN', 'UNABLE', 'TO', 'DISTINGUISH', 'THE', 'STATUES', 'AROUND', 'HIM', 'AND', 'THE', 'THOUSANDS', 'OF', 'SPECTATORS', 'WHO', 'CROWDED', 'CLOSELY', 'TOGETHER', 'RICHLY', 'GARLANDED', 'THEIR', 'CHEEKS', 'GLOWING', 'WITH', 'ENTHUSIASM', 'SURROUNDED', 'THE', 'ARENA', 'HERMON'] +5484-24318-0034-639: hyp=['THE', 'EGYPTIAN', 'OBEY', 'AND', 'HIS', 'MASTER', 'CROSSED', 'THE', 'WIDE', 'SPACE', 'STREWN', 'WITH', 'SAND', 'AND', 'APPROACHED', 'THE', 'STAGE', 'WHICH', 'HAD', 'BEEN', 'ERECTED', 'FOR', 'THE', 'FEAST', 'HELL', 'PERFORMANCES', 'EVEN', 'HAD', 'HIS', 'EYES', 'RETAINED', 'THE', 'POWER', 'OF', 'SIGHT', 'HIS', 'BLOOD', 'WAS', 'CURSING', 'SO', 'WIDELY', 'THROUGH', 'HIS', 'VEINS', 'THAT', 'HE', 'MIGHT', 'PERHAPS', 'HAVE', 'BEEN', 'UNABLE', 'TO', 'DISTINGUISH', 'THE', 'STATUES', 'AROUND', 'HIM', 'AND', 'THE', 'THOUSANDS', 'OF', 'SPECTATORS', 'WHO', 'CROWDED', 'CLOSELY', 'TOGETHER', 'RICHLY', 'GARLANDED', 'THEIR', 'CHIEFS', 'GLOWING', 'WITH', 'ENTHUSIASM', 'SURROUNDED', 'THE', 'ARENA', 'HERMAN'] +5484-24318-0035-640: ref=['SHOUTED', 'HIS', 'FRIEND', 'SOTELES', 'IN', 'JOYFUL', 'SURPRISE', 'IN', 'THE', 'MIDST', 'OF', 'THIS', 'PAINFUL', 'WALK', 'HERMON'] +5484-24318-0035-640: hyp=['SHOUTED', 'HIS', 'FRIEND', 'SORTILESS', 'AND', 'JOYFUL', 'SURPRISE', 'IN', 'THE', 'MIDST', 'OF', 'HIS', 'PAINFUL', 'WALK', 'HAREMON'] +5484-24318-0036-641: ref=['EVEN', 'WHILE', 'HE', 'BELIEVED', 'HIMSELF', 'TO', 'BE', 'THE', 'CREATOR', 'OF', 'THE', 'DEMETER', 'HE', 'HAD', 'BEEN', 'SERIOUSLY', 'TROUBLED', 'BY', 'THE', 'PRAISE', 'OF', 'SO', 'MANY', 'CRITICS', 'BECAUSE', 'IT', 'HAD', 'EXPOSED', 'HIM', 'TO', 'THE', 'SUSPICION', 'OF', 'HAVING', 'BECOME', 'FAITHLESS', 'TO', 'HIS', 'ART', 'AND', 'HIS', 'NATURE'] +5484-24318-0036-641: hyp=['EVEN', 'WHILE', 'HE', 'BELIEVED', 'HIMSELF', 'TO', 'BE', 'THE', 'CREATOR', 'OF', 'THE', 'DEMETER', 'HE', 'HAD', 'BEEN', 'SERIOUSLY', 'TROUBLED', 'BY', 'THE', 'PRAISE', 'OF', 'SO', 'MANY', 'CRITICS', 'BECAUSE', 'IT', 'HAD', 'EXPOSED', 'HIM', 'TO', 'THE', 'SUSPICION', 'OF', 'HAVING', 'BECOME', 'FAITHLESS', 'TO', 'HIS', 'ART', 'AND', 'HIS', 'NATURE'] +5484-24318-0037-642: ref=['HONOUR', 'TO', 'MYRTILUS', 'AND', 'HIS', 'ART', 'BUT', 'HE', 'TRUSTED', 'THIS', 'NOBLE', 'FESTAL', 'ASSEMBLAGE', 'WOULD', 'PARDON', 'THE', 'UNINTENTIONAL', 'DECEPTION', 'AND', 'AID', 'HIS', 'PRAYER', 'FOR', 'RECOVERY'] +5484-24318-0037-642: hyp=['HONOUR', 'TO', 'MYRTULAS', 'AND', 'HIS', 'ART', 'BUT', 'HE', 'TRUSTED', 'THE', 'SNOWBLE', 'FEAST', 'ELL', 'ASSEMBLAGE', 'WOULD', 'PARDON', 'THE', 'UNINTENTIONAL', 'DECEPTION', 'AND', 'AID', 'HIS', 'PRAYER', 'FOR', 'RECOVERY'] +5764-299665-0000-405: ref=['AFTERWARD', 'IT', 'WAS', 'SUPPOSED', 'THAT', 'HE', 'WAS', 'SATISFIED', 'WITH', 'THE', 'BLOOD', 'OF', 'OXEN', 'LAMBS', 'AND', 'DOVES', 'AND', 'THAT', 'IN', 'EXCHANGE', 'FOR', 'OR', 'ON', 'ACCOUNT', 'OF', 'THESE', 'SACRIFICES', 'THIS', 'GOD', 'GAVE', 'RAIN', 'SUNSHINE', 'AND', 'HARVEST'] +5764-299665-0000-405: hyp=['AFTERWARD', 'IT', 'WAS', 'SUPPOSED', 'THAT', 'HE', 'WAS', 'SATISFIED', 'WITH', 'THE', 'BLOOD', 'OF', 'OXEN', 'LAMPS', 'AND', 'DOVES', 'AND', 'THAT', 'IN', 'EXCHANGE', 'FOR', 'OR', 'IN', 'ACCOUNT', 'OF', 'THESE', 'SACRIFICES', 'THESE', 'GOD', 'GAVE', 'REIGN', 'SUNSHINE', 'AND', 'HARVEST'] +5764-299665-0001-406: ref=['WHETHER', 'HE', 'WAS', 'THE', 'CREATOR', 'OF', 'YOURSELF', 'AND', 'MYSELF'] +5764-299665-0001-406: hyp=['WHETHER', 'HE', 'WAS', 'THE', 'CREATOR', 'OF', 'YOUR', 'SELF', 'AND', 'MYSELF'] +5764-299665-0002-407: ref=['WHETHER', 'ANY', 'PRAYER', 'WAS', 'EVER', 'ANSWERED'] +5764-299665-0002-407: hyp=['WEATHER', 'AND', 'A', 'PRAYER', 'WAS', 'EVER', 'ANSWERED'] +5764-299665-0003-408: ref=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'INTELLECTUALLY', 'INFERIOR'] +5764-299665-0003-408: hyp=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'INTELLECTUAL', 'INFERIOR'] +5764-299665-0004-409: ref=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'DEFORMED', 'AND', 'HELPLESS', 'WHY', 'DID', 'HE', 'CREATE', 'THE', 'CRIMINAL', 'THE', 'IDIOTIC', 'THE', 'INSANE'] +5764-299665-0004-409: hyp=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'DEFORMED', 'AND', 'HELPLESS', 'WHY', 'DID', 'HE', 'CREATE', 'THE', 'CRIMINAL', 'THE', 'IDIOTIC', 'THE', 'INSANE'] +5764-299665-0005-410: ref=['ARE', 'THE', 'FAILURES', 'UNDER', 'OBLIGATION', 'TO', 'THEIR', 'CREATOR'] +5764-299665-0005-410: hyp=['ARE', 'THE', 'FAILURES', 'AND', 'THE', 'OBLIGATION', 'TO', 'THEIR', 'CREATOR'] +5764-299665-0006-411: ref=['IS', 'HE', 'RESPONSIBLE', 'FOR', 'ALL', 'THE', 'WARS', 'THAT', 'HAVE', 'BEEN', 'WAGED', 'FOR', 'ALL', 'THE', 'INNOCENT', 'BLOOD', 'THAT', 'HAS', 'BEEN', 'SHED'] +5764-299665-0006-411: hyp=['HIS', 'IRRESPONSIBLE', 'FOR', 'ALL', 'THE', 'WALLS', 'THAT', 'HAVE', 'BEEN', 'WAGED', 'FOR', 'ALL', 'THE', 'INNOCENT', 'BLOOD', 'THAT', 'HAS', 'BEEN', 'SHED'] +5764-299665-0007-412: ref=['IS', 'HE', 'RESPONSIBLE', 'FOR', 'THE', 'CENTURIES', 'OF', 'SLAVERY', 'FOR', 'THE', 'BACKS', 'THAT', 'HAVE', 'BEEN', 'SCARRED', 'WITH', 'THE', 'LASH', 'FOR', 'THE', 'BABES', 'THAT', 'HAVE', 'BEEN', 'SOLD', 'FROM', 'THE', 'BREASTS', 'OF', 'MOTHERS', 'FOR', 'THE', 'FAMILIES', 'THAT', 'HAVE', 'BEEN', 'SEPARATED', 'AND', 'DESTROYED'] +5764-299665-0007-412: hyp=['IF', 'YOU', 'RESPONSIBLE', 'FOR', 'THE', 'CENTURIES', 'OF', 'SLAVERY', 'FOR', 'THE', 'BACKS', 'THAT', 'HAVE', 'BEEN', 'SCARRED', 'WITH', 'A', 'LASH', 'FOR', 'THE', 'BABE', 'THAT', 'HAVE', 'BEEN', 'SOLD', 'FROM', 'THE', 'BREASTS', 'OF', 'MOTHERS', 'FOR', 'THE', 'FAMILIES', 'THAT', 'HAVE', 'BEEN', 'SEPARATED', 'AND', 'DESTROYED'] +5764-299665-0008-413: ref=['IS', 'THIS', 'GOD', 'RESPONSIBLE', 'FOR', 'RELIGIOUS', 'PERSECUTION', 'FOR', 'THE', 'INQUISITION', 'FOR', 'THE', 'THUMB', 'SCREW', 'AND', 'RACK', 'AND', 'FOR', 'ALL', 'THE', 'INSTRUMENTS', 'OF', 'TORTURE'] +5764-299665-0008-413: hyp=['IS', 'THESE', 'GOT', 'RESPONSIBLE', 'FOR', 'RELIGIOUS', 'PERSECUTION', 'FOR', 'THE', 'INQUISITION', 'FOR', 'THE', "TENTH'S", 'CREW', 'AND', 'RACK', 'AND', 'FOR', 'ALL', 'THE', 'INSTRUMENTS', 'OF', 'TORTURE'] +5764-299665-0009-414: ref=['DID', 'THIS', 'GOD', 'ALLOW', 'THE', 'CRUEL', 'AND', 'VILE', 'TO', 'DESTROY', 'THE', 'BRAVE', 'AND', 'VIRTUOUS'] +5764-299665-0009-414: hyp=['THESE', 'GOT', 'THE', 'LOAD', 'THE', 'CRUEL', 'AND', 'VILE', 'TO', 'DESTROY', 'THE', 'BRAVE', 'AND', 'VIRTUOUS'] +5764-299665-0010-415: ref=['DID', 'HE', 'ALLOW', 'TYRANTS', 'TO', 'SHED', 'THE', 'BLOOD', 'OF', 'PATRIOTS'] +5764-299665-0010-415: hyp=['DID', 'HE', 'ALONE', 'TYRANTS', 'TO', 'SHED', 'A', 'BLOOD', 'OF', 'PATRIOTS'] +5764-299665-0011-416: ref=['CAN', 'WE', 'CONCEIVE', 'OF', 'A', 'DEVIL', 'BASE', 'ENOUGH', 'TO', 'PREFER', 'HIS', 'ENEMIES', 'TO', 'HIS', 'FRIENDS'] +5764-299665-0011-416: hyp=['CAN', 'WE', 'CONCEIVE', 'OF', 'A', 'DEVIL', 'BASE', 'ENOUGH', 'TO', 'PREFER', 'HIS', 'ENEMIES', 'TO', 'HIS', 'FRIENDS'] +5764-299665-0012-417: ref=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'THE', 'WILD', 'BEASTS', 'THAT', 'DEVOUR', 'HUMAN', 'BEINGS', 'FOR', 'THE', 'FANGED', 'SERPENTS', 'WHOSE', 'BITE', 'IS', 'DEATH'] +5764-299665-0012-417: hyp=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'THE', 'WILD', 'BEASTS', 'THAT', 'THE', 'FOUR', 'HUMAN', 'BEINGS', 'FOR', 'THE', 'FACT', 'SERPENTS', 'WHOSE', 'BITE', 'ITS', 'DEATH'] +5764-299665-0013-418: ref=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'A', 'WORLD', 'WHERE', 'LIFE', 'FEEDS', 'ON', 'LIFE'] +5764-299665-0013-418: hyp=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'A', 'WORLD', 'WILL', 'LIE', 'FEATS', 'ON', 'LIFE'] +5764-299665-0014-419: ref=['DID', 'INFINITE', 'WISDOM', 'INTENTIONALLY', 'PRODUCE', 'THE', 'MICROSCOPIC', 'BEASTS', 'THAT', 'FEED', 'UPON', 'THE', 'OPTIC', 'NERVE', 'THINK', 'OF', 'BLINDING', 'A', 'MAN', 'TO', 'SATISFY', 'THE', 'APPETITE', 'OF', 'A', 'MICROBE'] +5764-299665-0014-419: hyp=['DID', 'INFINITE', 'WISDOM', 'INTENTIONALLY', 'PRODUCED', 'THE', 'MICROSCOPIC', 'BEASTS', 'THAT', 'FEED', 'UPON', 'THE', 'OPTIC', 'NURSE', 'THINK', 'OF', 'BLINDING', 'A', 'MAN', 'TO', 'SATISFY', 'THE', 'APPETITE', 'OF', 'A', 'MICROBE'] +5764-299665-0015-420: ref=['FEAR', 'BUILDS', 'THE', 'ALTAR', 'AND', 'OFFERS', 'THE', 'SACRIFICE'] +5764-299665-0015-420: hyp=['FEAR', 'BIDS', 'THE', 'ALTAR', 'AND', 'OFFERS', 'THE', 'SACRIFICE'] +5764-299665-0016-421: ref=['FEAR', 'ERECTS', 'THE', 'CATHEDRAL', 'AND', 'BOWS', 'THE', 'HEAD', 'OF', 'MAN', 'IN', 'WORSHIP'] +5764-299665-0016-421: hyp=['FEAR', 'ERECTS', 'THE', 'CATEURAL', 'AND', 'BOWS', 'THE', 'HEAD', 'OF', 'MAN', 'IN', 'WORSHIP'] +5764-299665-0017-422: ref=['LIPS', 'RELIGIOUS', 'AND', 'FEARFUL', 'TREMBLINGLY', 'REPEAT', 'THIS', 'PASSAGE', 'THOUGH', 'HE', 'SLAY', 'ME', 'YET', 'WILL', 'I', 'TRUST', 'HIM'] +5764-299665-0017-422: hyp=['LITS', 'RELIGIOUS', 'AND', 'FEARFUL', 'TREMBLINGLY', 'REPEAT', 'THIS', 'PASSAGE', 'THOUGH', 'HE', 'SLAY', 'ME', 'YET', 'WE', 'LIKE', 'TRUST', 'HIM'] +5764-299665-0018-423: ref=['CAN', 'WE', 'SAY', 'THAT', 'HE', 'CARED', 'FOR', 'THE', 'CHILDREN', 'OF', 'MEN'] +5764-299665-0018-423: hyp=['CAN', 'WE', 'SAY', 'THAT', 'HE', 'CARED', 'FOR', 'THE', 'CHILDREN', 'OF', 'MEN'] +5764-299665-0019-424: ref=['CAN', 'WE', 'SAY', 'THAT', 'HIS', 'MERCY', 'ENDURETH', 'FOREVER'] +5764-299665-0019-424: hyp=['CAN', 'WE', 'SAY', 'THAT', 'HIS', 'MERCY', 'AND', 'DURE', 'FOR', 'EVER'] +5764-299665-0020-425: ref=['DO', 'WE', 'PROVE', 'HIS', 'GOODNESS', 'BY', 'SHOWING', 'THAT', 'HE', 'HAS', 'OPENED', 'THE', 'EARTH', 'AND', 'SWALLOWED', 'THOUSANDS', 'OF', 'HIS', 'HELPLESS', 'CHILDREN', 'OR', 'THAT', 'WITH', 'THE', 'VOLCANOES', 'HE', 'HAS', 'OVERWHELMED', 'THEM', 'WITH', 'RIVERS', 'OF', 'FIRE'] +5764-299665-0020-425: hyp=['THE', 'REPROVE', 'HIS', 'GOODNESS', 'BY', 'SHOWING', 'THAT', 'HE', 'HAS', 'OPENED', 'THE', 'EARTH', 'AND', 'SWALLOWED', 'THOUSAND', 'OF', 'HIS', 'HELPLESS', 'CHILDREN', 'ALL', 'THAT', 'WIT', 'THE', 'VOLCANOES', 'HE', 'HAS', 'OVERWHELMED', 'THEM', 'WITH', 'RIVERS', 'OF', 'FIRE'] +5764-299665-0021-426: ref=['WAS', 'THERE', 'GOODNESS', 'WAS', 'THERE', 'WISDOM', 'IN', 'THIS'] +5764-299665-0021-426: hyp=['WAS', 'THERE', 'GOODNESS', 'WAS', 'THEIR', 'WISDOM', 'IN', 'THIS'] +5764-299665-0022-427: ref=['OUGHT', 'THE', 'SUPERIOR', 'RACES', 'TO', 'THANK', 'GOD', 'THAT', 'THEY', 'ARE', 'NOT', 'THE', 'INFERIOR'] +5764-299665-0022-427: hyp=['ALL', 'DISAPPEAR', 'RAYS', 'TWO', 'THANK', 'GOT', 'THAT', 'THEY', 'ARE', 'NOT', 'THE', 'INFERIOR'] +5764-299665-0023-428: ref=['MOST', 'PEOPLE', 'CLING', 'TO', 'THE', 'SUPERNATURAL'] +5764-299665-0023-428: hyp=['MOST', 'PEOPLE', 'CLINK', 'TO', 'THE', 'SUPERNATURAL'] +5764-299665-0024-429: ref=['IF', 'THEY', 'GIVE', 'UP', 'ONE', 'GOD', 'THEY', 'IMAGINE', 'ANOTHER'] +5764-299665-0024-429: hyp=['IF', 'THEY', 'GIVE', 'UP', 'ONE', 'GOD', 'THEY', 'IMAGINE', 'ANOTHER'] +5764-299665-0025-430: ref=['WHAT', 'IS', 'THIS', 'POWER'] +5764-299665-0025-430: hyp=['WHAT', 'IS', 'THIS', 'POWER'] +5764-299665-0026-431: ref=['MAN', 'ADVANCES', 'AND', 'NECESSARILY', 'ADVANCES', 'THROUGH', 'EXPERIENCE'] +5764-299665-0026-431: hyp=['MAN', 'ADVANCES', 'AND', 'NECESSARILY', 'ADVANCES', 'TO', 'EXPERIENCE'] +5764-299665-0027-432: ref=['A', 'MAN', 'WISHING', 'TO', 'GO', 'TO', 'A', 'CERTAIN', 'PLACE', 'COMES', 'TO', 'WHERE', 'THE', 'ROAD', 'DIVIDES'] +5764-299665-0027-432: hyp=['A', 'MAN', 'WISHING', 'TO', 'GO', 'TO', 'A', 'CERTAIN', 'PLACE', 'COME', 'TO', 'WHERE', 'THE', 'RULE', 'DIVIDES'] +5764-299665-0028-433: ref=['HE', 'HAS', 'TRIED', 'THAT', 'ROAD', 'AND', 'KNOWS', 'THAT', 'IT', 'IS', 'THE', 'WRONG', 'ROAD'] +5764-299665-0028-433: hyp=['HE', 'HAS', 'TRIED', 'THAT', 'ROAD', 'AND', 'KNOWS', 'THAT', 'IT', 'IS', 'THE', 'WRONG', 'ROAD'] +5764-299665-0029-434: ref=['A', 'CHILD', 'CHARMED', 'BY', 'THE', 'BEAUTY', 'OF', 'THE', 'FLAME', 'GRASPS', 'IT', 'WITH', 'ITS', 'DIMPLED', 'HAND'] +5764-299665-0029-434: hyp=['A', 'CHILD', 'SHONE', 'BY', 'THE', 'BEAUTY', 'OF', 'THE', 'FLAME', 'GRASPED', 'IT', 'WITH', 'HIS', 'DIMPLED', 'HAND'] +5764-299665-0030-435: ref=['THE', 'POWER', 'THAT', 'WORKS', 'FOR', 'RIGHTEOUSNESS', 'HAS', 'TAUGHT', 'THE', 'CHILD', 'A', 'LESSON'] +5764-299665-0030-435: hyp=['THE', 'POWER', 'THAT', 'WORK', 'FOR', 'RIGHTEOUSNESS', 'HAD', 'TAUGHT', 'THE', 'CHILD', 'A', 'LESSON'] +5764-299665-0031-436: ref=['IT', 'IS', 'A', 'RESULT'] +5764-299665-0031-436: hyp=['IT', 'IS', 'A', 'RESULT'] +5764-299665-0032-437: ref=['IT', 'IS', 'INSISTED', 'BY', 'THESE', 'THEOLOGIANS', 'AND', 'BY', 'MANY', 'OF', 'THE', 'SO', 'CALLED', 'PHILOSOPHERS', 'THAT', 'THIS', 'MORAL', 'SENSE', 'THIS', 'SENSE', 'OF', 'DUTY', 'OF', 'OBLIGATION', 'WAS', 'IMPORTED', 'AND', 'THAT', 'CONSCIENCE', 'IS', 'AN', 'EXOTIC'] +5764-299665-0032-437: hyp=['IT', 'IS', 'INSISTED', 'BY', 'THESE', 'THEOLOGIANS', 'AND', 'BY', 'MANY', 'OF', 'THE', 'SOUL', 'CALLED', 'PHILOSOPHERS', 'THAT', 'THIS', 'MORAL', 'SENSE', 'THIS', 'SENSE', 'OF', 'DUTY', 'OF', 'OBLIGATION', 'WAS', 'IMPORTED', 'AND', 'THAT', 'CONSCIENCE', 'IS', 'AN', 'EXOTIC'] +5764-299665-0033-438: ref=['WE', 'LIVE', 'TOGETHER', 'IN', 'FAMILIES', 'TRIBES', 'AND', 'NATIONS'] +5764-299665-0033-438: hyp=['REALLY', 'TOGETHER', 'IN', 'FAMILIES', 'TRIBES', 'AND', 'NATIONS'] +5764-299665-0034-439: ref=['THEY', 'ARE', 'PRAISED', 'ADMIRED', 'AND', 'RESPECTED'] +5764-299665-0034-439: hyp=['THEY', 'ARE', 'PRAISED', 'ADMIRED', 'AND', 'RESPECTED'] +5764-299665-0035-440: ref=['THEY', 'ARE', 'REGARDED', 'AS', 'GOOD', 'THAT', 'IS', 'TO', 'SAY', 'AS', 'MORAL'] +5764-299665-0035-440: hyp=['THEY', 'ARE', 'REGARDED', 'AS', 'GOOD', 'THAT', 'IS', 'TO', 'SAY', 'AS', 'MORAL'] +5764-299665-0036-441: ref=['THE', 'MEMBERS', 'WHO', 'ADD', 'TO', 'THE', 'MISERY', 'OF', 'THE', 'FAMILY', 'THE', 'TRIBE', 'OR', 'THE', 'NATION', 'ARE', 'CONSIDERED', 'BAD', 'MEMBERS'] +5764-299665-0036-441: hyp=['THE', 'MEMBERS', 'WHO', 'ADD', 'TO', 'THE', 'MISERY', 'OF', 'THE', 'FAMILY', 'THE', 'TRIBE', 'OF', 'THE', 'NATION', 'ARE', 'CONSIDERED', 'BAD', 'MEMBERS'] +5764-299665-0037-442: ref=['THE', 'GREATEST', 'OF', 'HUMAN', 'BEINGS', 'HAS', 'SAID', 'CONSCIENCE', 'IS', 'BORN', 'OF', 'LOVE'] +5764-299665-0037-442: hyp=['THE', 'GREATEST', 'OF', 'HUMAN', 'BEINGS', 'HAD', 'SAID', 'CONSCIENCE', 'IS', 'BORN', 'OF', 'LOVE'] +5764-299665-0038-443: ref=['AS', 'PEOPLE', 'ADVANCE', 'THE', 'REMOTE', 'CONSEQUENCES', 'ARE', 'PERCEIVED'] +5764-299665-0038-443: hyp=['AS', 'PEOPLE', 'ADVANCE', 'THE', 'REMOTE', 'CONSEQUENCES', 'ARE', 'PERCEIVED'] +5764-299665-0039-444: ref=['THE', 'IMAGINATION', 'IS', 'CULTIVATED'] +5764-299665-0039-444: hyp=['THE', 'IMAGINATION', 'IS', 'CULTIVATED'] +5764-299665-0040-445: ref=['A', 'MAN', 'PUTS', 'HIMSELF', 'IN', 'THE', 'PLACE', 'OF', 'ANOTHER'] +5764-299665-0040-445: hyp=['A', 'MAN', 'BUT', 'HIMSELF', 'IN', 'THE', 'PLACE', 'OF', 'ANOTHER'] +5764-299665-0041-446: ref=['THE', 'SENSE', 'OF', 'DUTY', 'BECOMES', 'STRONGER', 'MORE', 'IMPERATIVE'] +5764-299665-0041-446: hyp=['THE', 'SENSE', 'OF', 'DUTY', 'BECOMES', 'STRONGER', 'MORE', 'IMPERATIVE'] +5764-299665-0042-447: ref=['MAN', 'JUDGES', 'HIMSELF'] +5764-299665-0042-447: hyp=['MAN', 'JUDGES', 'HIMSELF'] +5764-299665-0043-448: ref=['IN', 'ALL', 'THIS', 'THERE', 'IS', 'NOTHING', 'SUPERNATURAL'] +5764-299665-0043-448: hyp=['IN', 'ALL', 'THIS', 'THERE', 'IS', 'NOTHING', 'SUPERNATURAL'] +5764-299665-0044-449: ref=['MAN', 'HAS', 'DECEIVED', 'HIMSELF'] +5764-299665-0044-449: hyp=['MAN', 'HAS', 'DECEIVED', 'HIMSELF'] +5764-299665-0045-450: ref=['HAS', 'CHRISTIANITY', 'DONE', 'GOOD'] +5764-299665-0045-450: hyp=['HISTORY', 'STUNNITY', 'DONE', 'GOOD'] +5764-299665-0046-451: ref=['WHEN', 'THE', 'CHURCH', 'HAD', 'CONTROL', 'WERE', 'MEN', 'MADE', 'BETTER', 'AND', 'HAPPIER'] +5764-299665-0046-451: hyp=['WHEN', 'THE', 'CHURCH', 'HAD', 'CONTROL', 'WERE', 'MEN', 'MADE', 'BETTER', 'AND', 'HAPPIER'] +5764-299665-0047-452: ref=['WHAT', 'HAS', 'RELIGION', 'DONE', 'FOR', 'HUNGARY', 'OR', 'AUSTRIA'] +5764-299665-0047-452: hyp=['WHAT', 'HAS', 'RELIGION', 'DONE', 'FOR', 'HUNGARY', 'O', 'AUSTRIA'] +5764-299665-0048-453: ref=['COULD', 'THESE', 'COUNTRIES', 'HAVE', 'BEEN', 'WORSE', 'WITHOUT', 'RELIGION'] +5764-299665-0048-453: hyp=['GOOD', 'THESE', 'COUNTRIES', 'HAVE', 'BEEN', 'WORSE', 'WITHOUT', 'RELIGION'] +5764-299665-0049-454: ref=['COULD', 'THEY', 'HAVE', 'BEEN', 'WORSE', 'HAD', 'THEY', 'HAD', 'ANY', 'OTHER', 'RELIGION', 'THAN', 'CHRISTIANITY'] +5764-299665-0049-454: hyp=['COULD', 'THEY', 'HAVE', 'BEEN', 'WORSE', 'HAD', 'THEY', 'HAD', 'ANY', 'OTHER', 'RELIGION', 'THAN', 'CHRISTIANITY'] +5764-299665-0050-455: ref=['WHAT', 'DID', 'CHRISTIANITY', 'DO', 'FOR', 'THEM'] +5764-299665-0050-455: hyp=['WHAT', 'DID', 'CHRISTIANITY', 'DO', 'FAULT', 'THEM'] +5764-299665-0051-456: ref=['THEY', 'HATED', 'PLEASURE'] +5764-299665-0051-456: hyp=['THEY', 'HATED', 'PLEASURE'] +5764-299665-0052-457: ref=['THEY', 'MUFFLED', 'ALL', 'THE', 'BELLS', 'OF', 'GLADNESS'] +5764-299665-0052-457: hyp=['THEY', 'MUFFLED', 'ALL', 'THE', 'BELLS', 'OF', 'GLADNESS'] +5764-299665-0053-458: ref=['THE', 'RELIGION', 'OF', 'THE', 'PURITAN', 'WAS', 'AN', 'UNADULTERATED', 'CURSE'] +5764-299665-0053-458: hyp=['DURING', 'RELIGION', 'OF', 'THE', 'PURITAN', 'WAS', 'AN', 'ADULTERATED', 'CURSE'] +5764-299665-0054-459: ref=['THE', 'PURITAN', 'BELIEVED', 'THE', 'BIBLE', 'TO', 'BE', 'THE', 'WORD', 'OF', 'GOD', 'AND', 'THIS', 'BELIEF', 'HAS', 'ALWAYS', 'MADE', 'THOSE', 'WHO', 'HELD', 'IT', 'CRUEL', 'AND', 'WRETCHED'] +5764-299665-0054-459: hyp=['THE', 'PURITAN', 'BELIEF', 'THE', 'BIBLE', 'TO', 'BE', 'THE', 'WORLD', 'OF', 'GOD', 'AND', 'THIS', 'BELIEF', 'HAS', 'ALWAYS', 'MADE', 'THOSE', 'WHO', 'HELD', 'IT', 'CRUEL', 'AND', 'WRETCHED'] +5764-299665-0055-460: ref=['LET', 'ME', 'REFER', 'TO', 'JUST', 'ONE', 'FACT', 'SHOWING', 'THE', 'INFLUENCE', 'OF', 'A', 'BELIEF', 'IN', 'THE', 'BIBLE', 'ON', 'HUMAN', 'BEINGS'] +5764-299665-0055-460: hyp=['LET', 'ME', 'REFER', 'TO', 'JUST', 'ONE', 'FACT', 'SHOWING', 'THE', 'INFLUENCE', 'OF', 'A', 'BELIEF', 'IN', 'THE', 'BIBLE', 'ON', 'HUMAN', 'BEINGS'] +5764-299665-0056-461: ref=['THE', 'QUEEN', 'RECEIVED', 'THE', 'BIBLE', 'KISSED', 'IT', 'AND', 'PLEDGED', 'HERSELF', 'TO', 'DILIGENTLY', 'READ', 'THEREIN'] +5764-299665-0056-461: hyp=['THE', 'QUEEN', 'RECEIVED', 'THE', 'BIBLE', 'KISSED', 'IT', 'AND', 'PLEDGED', 'HERSELF', 'TO', 'DILIGENTLY', 'READ', 'THEREIN'] +5764-299665-0057-462: ref=['IN', 'OTHER', 'WORDS', 'IT', 'WAS', 'JUST', 'AS', 'FIENDISH', 'JUST', 'AS', 'INFAMOUS', 'AS', 'THE', 'CATHOLIC', 'SPIRIT'] +5764-299665-0057-462: hyp=['IN', 'OTHER', 'WORDS', 'IT', 'WAS', 'JUST', 'AS', 'FIENDISH', 'JUST', 'AS', 'IN', 'FAMOUS', 'AS', 'THE', 'CATTLE', 'EXPERIMENT'] +5764-299665-0058-463: ref=['HAS', 'THE', 'BIBLE', 'MADE', 'THE', 'PEOPLE', 'OF', 'GEORGIA', 'KIND', 'AND', 'MERCIFUL'] +5764-299665-0058-463: hyp=['HAS', 'THE', 'VARIABLE', 'MADE', 'THE', 'PEOPLE', 'OF', 'GEORGE', 'A', 'KIND', 'AND', 'MERCIFUL'] +5764-299665-0059-464: ref=['RELIGION', 'HAS', 'BEEN', 'TRIED', 'AND', 'IN', 'ALL', 'COUNTRIES', 'IN', 'ALL', 'TIMES', 'HAS', 'FAILED'] +5764-299665-0059-464: hyp=['WHO', 'LEGION', 'HAVE', 'BEEN', 'TRIED', 'AND', 'IN', 'ALL', 'COUNTRIES', 'IN', 'ALL', 'TIMES', 'BEST', 'FAILED'] +5764-299665-0060-465: ref=['RELIGION', 'HAS', 'ALWAYS', 'BEEN', 'THE', 'ENEMY', 'OF', 'SCIENCE', 'OF', 'INVESTIGATION', 'AND', 'THOUGHT'] +5764-299665-0060-465: hyp=['RELIGION', 'HATH', 'ALWAYS', 'BEEN', 'THE', 'ENEMY', 'OF', 'SCIENCE', 'OF', 'INVESTIGATION', 'AND', 'THOUGHT'] +5764-299665-0061-466: ref=['RELIGION', 'HAS', 'NEVER', 'MADE', 'MAN', 'FREE'] +5764-299665-0061-466: hyp=['RELIGIONISTS', 'NEVER', 'MADE', 'MEN', 'FREE'] +5764-299665-0062-467: ref=['IT', 'HAS', 'NEVER', 'MADE', 'MAN', 'MORAL', 'TEMPERATE', 'INDUSTRIOUS', 'AND', 'HONEST'] +5764-299665-0062-467: hyp=['HE', 'JUST', 'NEVER', 'MADE', 'MAN', 'MORAL', 'TEMPERATE', 'INDUSTRIOUS', 'AND', 'HONEST'] +5764-299665-0063-468: ref=['ARE', 'CHRISTIANS', 'MORE', 'TEMPERATE', 'NEARER', 'VIRTUOUS', 'NEARER', 'HONEST', 'THAN', 'SAVAGES'] +5764-299665-0063-468: hyp=['AH', 'CHRISTIAN', 'SMALL', 'TEMPERATE', 'NEARER', 'VIRTUOUS', 'NEARER', 'HONEST', 'THAN', 'SAVAGES'] +5764-299665-0064-469: ref=['CAN', 'WE', 'CURE', 'DISEASE', 'BY', 'SUPPLICATION'] +5764-299665-0064-469: hyp=['CAN', 'WE', 'CURE', 'DISEASE', 'BY', 'SUPPLICATION'] +5764-299665-0065-470: ref=['CAN', 'WE', 'RECEIVE', 'VIRTUE', 'OR', 'HONOR', 'AS', 'ALMS'] +5764-299665-0065-470: hyp=['CAN', 'WE', 'RECEIVE', 'VIRTUE', 'OR', 'HANNER', 'AS', 'ALMS'] +5764-299665-0066-471: ref=['RELIGION', 'RESTS', 'ON', 'THE', 'IDEA', 'THAT', 'NATURE', 'HAS', 'A', 'MASTER', 'AND', 'THAT', 'THIS', 'MASTER', 'WILL', 'LISTEN', 'TO', 'PRAYER', 'THAT', 'THIS', 'MASTER', 'PUNISHES', 'AND', 'REWARDS', 'THAT', 'HE', 'LOVES', 'PRAISE', 'AND', 'FLATTERY', 'AND', 'HATES', 'THE', 'BRAVE', 'AND', 'FREE'] +5764-299665-0066-471: hyp=['RELIGION', 'RESTS', 'ON', 'THE', 'IDEA', 'THAT', 'NATURE', 'HAS', 'A', 'MASTER', 'AND', 'THAT', 'THIS', 'MASTER', 'WILL', 'LISTEN', 'TO', 'PRAYER', 'THAT', 'HIS', 'MASTER', 'PUNISHES', 'AND', 'REWARDS', 'THAT', 'HE', 'LOVES', 'PRAISE', 'AND', 'FLATTERY', 'AND', 'HATES', 'THE', 'BRAVE', 'AND', 'FREE'] +5764-299665-0067-472: ref=['WE', 'MUST', 'HAVE', 'CORNER', 'STONES'] +5764-299665-0067-472: hyp=['WE', 'MUST', 'HAVE', 'CORN', 'THE', 'STONES'] +5764-299665-0068-473: ref=['THE', 'STRUCTURE', 'MUST', 'HAVE', 'A', 'BASEMENT'] +5764-299665-0068-473: hyp=['THE', 'STRUCTURE', 'MUST', 'HAVE', 'ABASEMENT'] +5764-299665-0069-474: ref=['IF', 'WE', 'BUILD', 'WE', 'MUST', 'BEGIN', 'AT', 'THE', 'BOTTOM'] +5764-299665-0069-474: hyp=['IF', 'WE', 'BUILT', 'WE', 'MUST', 'BEGIN', 'AT', 'THE', 'BOTTOM'] +5764-299665-0070-475: ref=['I', 'HAVE', 'A', 'THEORY', 'AND', 'I', 'HAVE', 'FOUR', 'CORNER', 'STONES'] +5764-299665-0070-475: hyp=['I', 'HAVE', 'A', 'THEORY', 'AND', 'I', 'HAVE', 'FOUR', 'CORNESTONES'] +5764-299665-0071-476: ref=['THE', 'FIRST', 'STONE', 'IS', 'THAT', 'MATTER', 'SUBSTANCE', 'CANNOT', 'BE', 'DESTROYED', 'CANNOT', 'BE', 'ANNIHILATED'] +5764-299665-0071-476: hyp=['THE', 'FIRST', 'STONE', 'EAST', 'AT', 'MATHA', 'SUBSTANCE', 'CANNOT', 'BE', 'DESTROYED', 'CANNOT', 'BE', 'ANNIHILATED'] +5764-299665-0072-477: ref=['IF', 'THESE', 'CORNER', 'STONES', 'ARE', 'FACTS', 'IT', 'FOLLOWS', 'AS', 'A', 'NECESSITY', 'THAT', 'MATTER', 'AND', 'FORCE', 'ARE', 'FROM', 'AND', 'TO', 'ETERNITY', 'THAT', 'THEY', 'CAN', 'NEITHER', 'BE', 'INCREASED', 'NOR', 'DIMINISHED'] +5764-299665-0072-477: hyp=['IF', 'THESE', 'CORN', 'THE', 'STONES', 'ARE', 'FACTS', 'IT', 'FOLLOWS', 'AS', 'A', 'NECESSITY', 'THAT', 'MATTER', 'AND', 'FORCE', 'ARE', 'FROM', 'END', 'TO', 'ETERNITY', 'THAT', 'THEY', 'CAN', 'NEITHER', 'BE', 'INCREASED', 'NOR', 'DIMINISHED'] +5764-299665-0073-478: ref=['IT', 'FOLLOWS', 'THAT', 'NOTHING', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'CREATED', 'THAT', 'THERE', 'NEVER', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'A', 'CREATOR'] +5764-299665-0073-478: hyp=['IT', 'FOLLOWS', 'THAT', 'NOTHING', 'HATH', 'BEEN', 'OR', 'CAN', 'BE', 'CREATED', 'THAT', 'THERE', 'NEVER', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'A', 'CREATOR'] +5764-299665-0074-479: ref=['IT', 'FOLLOWS', 'THAT', 'THERE', 'COULD', 'NOT', 'HAVE', 'BEEN', 'ANY', 'INTELLIGENCE', 'ANY', 'DESIGN', 'BACK', 'OF', 'MATTER', 'AND', 'FORCE'] +5764-299665-0074-479: hyp=['IT', 'FOLLOWED', 'THAT', 'THERE', 'COULD', 'NOT', 'HAVE', 'BEEN', 'ANY', 'INTELLIGENCE', 'AND', 'A', 'DESIGN', 'BACK', 'OF', 'MATTER', 'AND', 'FORCE'] +5764-299665-0075-480: ref=['I', 'SAY', 'WHAT', 'I', 'THINK'] +5764-299665-0075-480: hyp=['I', 'SAY', 'WHAT', 'I', 'THINK'] +5764-299665-0076-481: ref=['EVERY', 'EVENT', 'HAS', 'PARENTS'] +5764-299665-0076-481: hyp=['EVERY', 'EVENT', 'HAS', 'PARENTS'] +5764-299665-0077-482: ref=['THAT', 'WHICH', 'HAS', 'NOT', 'HAPPENED', 'COULD', 'NOT'] +5764-299665-0077-482: hyp=['THAT', 'WHICH', 'HATH', 'NOT', 'HAPPENED', 'COULD', 'NOT'] +5764-299665-0078-483: ref=['IN', 'THE', 'INFINITE', 'CHAIN', 'THERE', 'IS', 'AND', 'THERE', 'CAN', 'BE', 'NO', 'BROKEN', 'NO', 'MISSING', 'LINK'] +5764-299665-0078-483: hyp=['IN', 'THE', 'INFINITE', 'CHANGE', 'WRITHS', 'AND', 'THERE', 'CAN', 'BE', 'NO', 'BROKEN', 'NO', 'MISSING', 'LINK'] +5764-299665-0079-484: ref=['WE', 'NOW', 'KNOW', 'THAT', 'OUR', 'FIRST', 'PARENTS', 'WERE', 'NOT', 'FOREIGNERS'] +5764-299665-0079-484: hyp=['WE', 'NOW', 'KNOW', 'THAT', 'OUR', 'FIRST', 'PARENTS', 'WERE', 'NOT', 'FOREIGNERS'] +5764-299665-0080-485: ref=['WE', 'NOW', 'KNOW', 'IF', 'WE', 'KNOW', 'ANYTHING', 'THAT', 'THE', 'UNIVERSE', 'IS', 'NATURAL', 'AND', 'THAT', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'NATURALLY', 'PRODUCED'] +5764-299665-0080-485: hyp=['WE', 'NOW', 'KNOW', 'IF', 'WE', 'KNOW', 'ANYTHING', 'THAT', 'THE', 'UNIVERSE', 'IS', 'NATURAL', 'AND', 'THAT', 'MAN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'NATURALLY', 'PRODUCED'] +5764-299665-0081-486: ref=['WE', 'KNOW', 'THE', 'PATHS', 'THAT', 'LIFE', 'HAS', 'TRAVELED'] +5764-299665-0081-486: hyp=['WE', 'KNOW', 'THE', 'PATHS', 'THAT', 'LIFE', 'HAS', 'TRAVELLED'] +5764-299665-0082-487: ref=['WE', 'KNOW', 'THE', 'FOOTSTEPS', 'OF', 'ADVANCE', 'THEY', 'HAVE', 'BEEN', 'TRACED'] +5764-299665-0082-487: hyp=['WE', 'KNOW', 'THE', 'FOOTSTEPS', 'OF', 'ADVANCE', 'THEY', 'HAVE', 'BEEN', 'TRACED'] +5764-299665-0083-488: ref=['FOR', 'THOUSANDS', 'OF', 'YEARS', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'TRYING', 'TO', 'REFORM', 'THE', 'WORLD'] +5764-299665-0083-488: hyp=['FOUR', 'THOUSANDS', 'OF', 'YEARS', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'TRYING', 'TO', 'REFORM', 'THE', 'WORLD'] +5764-299665-0084-489: ref=['WHY', 'HAVE', 'THE', 'REFORMERS', 'FAILED'] +5764-299665-0084-489: hyp=['WHY', 'HAVE', 'REFORMERS', 'FAME'] +5764-299665-0085-490: ref=['THEY', 'DEPEND', 'ON', 'THE', 'LORD', 'ON', 'LUCK', 'AND', 'CHARITY'] +5764-299665-0085-490: hyp=['THEY', 'DEPEND', 'ON', 'THE', 'LOT', 'UNLUCK', 'AND', 'CHARITY'] +5764-299665-0086-491: ref=['THEY', 'LIVE', 'BY', 'FRAUD', 'AND', 'VIOLENCE', 'AND', 'BEQUEATH', 'THEIR', 'VICES', 'TO', 'THEIR', 'CHILDREN'] +5764-299665-0086-491: hyp=['THEY', 'LEAVE', 'THY', 'FRAUD', 'AND', 'VIOLENCE', 'AND', 'BEQUEATH', 'THEIR', 'VICES', 'TO', 'THEIR', 'CHILDREN'] +5764-299665-0087-492: ref=['FAILURE', 'SEEMS', 'TO', 'BE', 'THE', 'TRADEMARK', 'OF', 'NATURE', 'WHY'] +5764-299665-0087-492: hyp=['FAILURE', 'SEEMS', 'TO', 'BE', 'THE', 'TRADEMARK', 'OF', 'NATURE', 'WHY'] +5764-299665-0088-493: ref=['NATURE', 'PRODUCES', 'WITHOUT', 'PURPOSE', 'SUSTAINS', 'WITHOUT', 'INTENTION', 'AND', 'DESTROYS', 'WITHOUT', 'THOUGHT'] +5764-299665-0088-493: hyp=['NATURE', 'PRODUED', 'WITHOUT', 'PURPOSE', 'SUSTAINS', 'WITHOUT', 'INTENTION', 'AND', 'DESTROYS', 'WITHOUT', 'THOUGHT'] +5764-299665-0089-494: ref=['MUST', 'THE', 'WORLD', 'FOREVER', 'REMAIN', 'THE', 'VICTIM', 'OF', 'IGNORANT', 'PASSION'] +5764-299665-0089-494: hyp=['MISTER', 'WORLD', 'FOR', 'EVER', 'REMAINED', 'A', 'VICTIM', 'OF', 'IGNORANT', 'PASSION'] +5764-299665-0090-495: ref=['WHY', 'SHOULD', 'MEN', 'AND', 'WOMEN', 'HAVE', 'CHILDREN', 'THAT', 'THEY', 'CANNOT', 'TAKE', 'CARE', 'OF', 'CHILDREN', 'THAT', 'ARE', 'BURDENS', 'AND', 'CURSES', 'WHY'] +5764-299665-0090-495: hyp=['WHY', 'SHOULD', 'MEN', 'AND', 'WOMEN', 'HAVE', 'CHILDREN', 'THAT', 'THEY', 'CANNOT', 'TAKE', 'CARE', 'OF', 'CHILDREN', 'THAT', 'ARE', 'BURGLAR', 'AND', 'CURSES', 'WHY'] +5764-299665-0091-496: ref=['PASSION', 'IS', 'AND', 'ALWAYS', 'HAS', 'BEEN', 'DEAF'] +5764-299665-0091-496: hyp=['PASSION', 'IS', 'AND', 'ALWAYS', 'HAS', 'BEEN', 'DEAF'] +5764-299665-0092-497: ref=['LAW', 'CAN', 'PUNISH', 'BUT', 'IT', 'CAN', 'NEITHER', 'REFORM', 'CRIMINALS', 'NOR', 'PREVENT', 'CRIME'] +5764-299665-0092-497: hyp=['LAW', 'CAN', 'PUNISH', 'BUT', 'IT', 'CAN', 'NEITHER', 'REFORM', 'CRIMINALS', 'NOR', 'PREVENT', 'CRIME'] +5764-299665-0093-498: ref=['THIS', 'CANNOT', 'BE', 'DONE', 'BY', 'TALK', 'OR', 'EXAMPLE'] +5764-299665-0093-498: hyp=['THESE', 'CANNOT', 'BE', 'DONE', 'BY', 'TALK', 'OR', 'EXAMPLE'] +5764-299665-0094-499: ref=['THIS', 'IS', 'THE', 'SOLUTION', 'OF', 'THE', 'WHOLE', 'QUESTION'] +5764-299665-0094-499: hyp=['THIS', 'IS', 'THE', 'SOLUTION', 'OF', 'THE', 'WHOLE', 'QUESTION'] +5764-299665-0095-500: ref=['THIS', 'FREES', 'WOMAN'] +5764-299665-0095-500: hyp=['THIS', 'FREEZE', 'WOMEN'] +5764-299665-0096-501: ref=['POVERTY', 'AND', 'CRIME', 'WILL', 'BE', 'CHILDLESS'] +5764-299665-0096-501: hyp=['POVERTY', 'AND', 'CRIME', 'WILL', 'BE', 'CHIMELESS'] +5764-299665-0097-502: ref=['IT', 'IS', 'FAR', 'BETTER', 'TO', 'BE', 'FREE', 'TO', 'LEAVE', 'THE', 'FORTS', 'AND', 'BARRICADES', 'OF', 'FEAR', 'TO', 'STAND', 'ERECT', 'AND', 'FACE', 'THE', 'FUTURE', 'WITH', 'A', 'SMILE'] +5764-299665-0097-502: hyp=['IT', 'IS', 'FAR', 'BETTER', 'TO', 'BE', 'FREE', 'TO', 'LEAVE', 'THE', 'FAULTS', 'AND', 'BARRICADES', 'OF', 'FEAR', 'TO', 'STAND', 'ERECT', 'AND', 'FAITH', 'THE', 'FUTURE', 'WE', 'DESP', 'MINE'] +6070-63485-0000-2599: ref=["THEY'RE", 'DONE', 'FOR', 'SAID', 'THE', 'SCHOOLMASTER', 'IN', 'A', 'LOW', 'KEY', 'TO', 'THE', 'CHOUETTE', 'OUT', 'WITH', 'YOUR', 'VITRIOL', 'AND', 'MIND', 'YOUR', 'EYE'] +6070-63485-0000-2599: hyp=['THEIR', 'DUNFAR', 'SAID', 'THE', 'SCHOOLMASTER', 'IN', 'A', 'LOW', 'KEY', 'TO', 'THE', 'SWEAT', 'OUT', 'WITH', 'OUR', 'VITRIOL', 'AND', 'MIND', 'YOUR', 'EYE'] +6070-63485-0001-2600: ref=['THE', 'TWO', 'MONSTERS', 'TOOK', 'OFF', 'THEIR', 'SHOES', 'AND', 'MOVED', 'STEALTHILY', 'ALONG', 'KEEPING', 'IN', 'THE', 'SHADOWS', 'OF', 'THE', 'HOUSES'] +6070-63485-0001-2600: hyp=['THE', 'TWO', 'MONSTERS', 'TOOK', 'OFF', 'THEIR', 'SHOES', 'AND', 'MOVED', 'STEALTHILY', 'ALONG', 'KEEPING', 'IN', 'THE', 'SHADOWS', 'OF', 'THE', 'HOUSES'] +6070-63485-0002-2601: ref=['BY', 'MEANS', 'OF', 'THIS', 'STRATAGEM', 'THEY', 'FOLLOWED', 'SO', 'CLOSELY', 'THAT', 'ALTHOUGH', 'WITHIN', 'A', 'FEW', 'STEPS', 'OF', 'SARAH', 'AND', 'TOM', 'THEY', 'DID', 'NOT', 'HEAR', 'THEM'] +6070-63485-0002-2601: hyp=['BY', 'MEANS', 'OF', 'THIS', 'STRATAGEM', 'THEY', 'FOLLOWED', 'SO', 'CLOSELY', 'THAT', 'ALTHOUGH', 'WITHIN', 'A', 'FEW', 'STEPS', 'OF', 'SEREN', 'TOM', 'THEY', 'DID', 'NOT', 'HEAR', 'THEM'] +6070-63485-0003-2602: ref=['SARAH', 'AND', 'HER', 'BROTHER', 'HAVING', 'AGAIN', 'PASSED', 'BY', 'THE', 'TAPIS', 'FRANC', 'ARRIVED', 'CLOSE', 'TO', 'THE', 'DILAPIDATED', 'HOUSE', 'WHICH', 'WAS', 'PARTLY', 'IN', 'RUINS', 'AND', 'ITS', 'OPENED', 'CELLARS', 'FORMED', 'A', 'KIND', 'OF', 'GULF', 'ALONG', 'WHICH', 'THE', 'STREET', 'RAN', 'IN', 'THAT', 'DIRECTION'] +6070-63485-0003-2602: hyp=['SARAH', 'AND', 'HER', 'BROTHER', 'HAVING', 'AGAIN', 'PASSED', 'BY', 'THE', 'TAPPY', 'FROG', 'ARRIVED', 'CLOSE', 'TO', 'THE', 'DILAPIDATED', 'HOUSE', 'WHICH', 'WAS', 'PARTLY', 'IN', 'RUINS', 'AND', 'ITS', 'OPEN', 'CELLARS', 'FORMED', 'A', 'KIND', 'OF', 'GULF', 'ALONG', 'WHICH', 'THE', 'STREET', 'RAN', 'IN', 'THAT', 'DIRECTION'] +6070-63485-0004-2603: ref=['IN', 'AN', 'INSTANT', 'THE', 'SCHOOLMASTER', 'WITH', 'A', 'LEAP', 'RESEMBLING', 'IN', 'STRENGTH', 'AND', 'AGILITY', 'THE', 'SPRING', 'OF', 'A', 'TIGER', 'SEIZED', 'SEYTON', 'WITH', 'ONE', 'HAND', 'BY', 'THE', 'THROAT', 'AND', 'EXCLAIMED', 'YOUR', 'MONEY', 'OR', 'I', 'WILL', 'FLING', 'YOU', 'INTO', 'THIS', 'HOLE'] +6070-63485-0004-2603: hyp=['IN', 'AN', 'INSTANT', 'THE', 'SCHOOLMASTER', 'WITH', 'A', 'LEAP', 'RESEMBLING', 'IN', 'STRENGTH', 'AND', 'AGILITY', 'THE', 'SPRING', 'OF', 'A', 'TIGER', 'SEIZED', 'SEATING', 'WITH', 'ONE', 'HAND', 'BY', 'THE', 'THROAT', 'AND', 'EXCLAIMED', 'YOUR', 'MONEY', 'OR', 'I', 'WILL', 'FLING', 'YOU', 'INTO', 'THIS', 'HALL'] +6070-63485-0005-2604: ref=['NO', 'SAID', 'THE', 'OLD', 'BRUTE', 'GRUMBLINGLY', 'NO', 'NOT', 'ONE', 'RING', 'WHAT', 'A', 'SHAME'] +6070-63485-0005-2604: hyp=['NO', 'SAID', 'THE', 'OLD', 'BRUTE', 'TREMBLINGLY', 'NO', 'NOT', 'ONE', 'RING', 'WHAT', 'A', 'SHAME'] +6070-63485-0006-2605: ref=['TOM', 'SEYTON', 'DID', 'NOT', 'LOSE', 'HIS', 'PRESENCE', 'OF', 'MIND', 'DURING', 'THIS', 'SCENE', 'RAPIDLY', 'AND', 'UNEXPECTEDLY', 'AS', 'IT', 'HAD', 'OCCURRED'] +6070-63485-0006-2605: hyp=['TOM', 'SEYTON', 'DID', 'NOT', 'LOSE', 'HIS', 'PRESENCE', 'OF', 'MIND', 'DURING', 'THIS', 'SCENE', 'RAPIDLY', 'AND', 'UNEXPECTEDLY', 'AS', 'IT', 'HAD', 'OCCURRED'] +6070-63485-0007-2606: ref=['OH', 'AH', 'TO', 'LAY', 'A', 'TRAP', 'TO', 'CATCH', 'US', 'REPLIED', 'THE', 'THIEF'] +6070-63485-0007-2606: hyp=['U', 'AH', 'TO', 'LAY', 'A', 'TRAP', 'TO', 'CATCH', 'US', 'REPLIED', 'THE', 'THIEF'] +6070-63485-0008-2607: ref=['THEN', 'ADDRESSING', 'THOMAS', 'SEYTON', 'YOU', 'KNOW', 'THE', 'PLAIN', 'OF', 'SAINT', 'DENIS'] +6070-63485-0008-2607: hyp=['THEN', 'ADDRESSING', 'THOMAS', 'SETTON', 'YOU', 'KNOW', 'THE', 'PLANE', 'OF', 'SAINT', 'DENIS'] +6070-63485-0009-2608: ref=['DID', 'YOU', 'SEE', 'IN', 'THE', 'CABARET', 'WE', 'HAVE', 'JUST', 'LEFT', 'FOR', 'I', 'KNOW', 'YOU', 'AGAIN', 'THE', 'MAN', 'WHOM', 'THE', 'CHARCOAL', 'MAN', 'CAME', 'TO', 'SEEK'] +6070-63485-0009-2608: hyp=['DID', 'YOU', 'SEE', 'IN', 'THE', 'CABARET', 'WE', 'HAD', 'JUST', 'LEFT', 'FOR', 'I', 'KNOW', 'YOU', 'AGAIN', 'THE', 'MAN', 'WHOM', 'THE', 'CHARCOAL', 'MAN', 'CAME', 'TO', 'SEEK'] +6070-63485-0010-2609: ref=['CRIED', 'THE', 'SCHOOLMASTER', 'A', 'THOUSAND', 'FRANCS', 'AND', "I'LL", 'KILL', 'HIM'] +6070-63485-0010-2609: hyp=['CRIED', 'THE', 'SCHOOLMASTER', 'A', 'THOUSAND', 'FRANCS', 'AND', "I'LL", 'KILL', 'HIM'] +6070-63485-0011-2610: ref=['WRETCH', 'I', 'DO', 'NOT', 'SEEK', 'HIS', 'LIFE', 'REPLIED', 'SARAH', 'TO', 'THE', 'SCHOOLMASTER'] +6070-63485-0011-2610: hyp=['THATCH', 'I', 'DO', 'NOT', 'SEE', 'HIS', 'LIFE', 'REPLIED', 'SARAH', 'TO', 'THE', 'SCHOOLMASTER'] +6070-63485-0012-2611: ref=["LET'S", 'GO', 'AND', 'MEET', 'HIM'] +6070-63485-0012-2611: hyp=["LET'S", 'GO', 'AND', 'MEET', 'HIM'] +6070-63485-0013-2612: ref=['OLD', 'BOY', 'IT', 'WILL', 'PAY', 'FOR', 'LOOKING', 'AFTER'] +6070-63485-0013-2612: hyp=['OLD', 'BOY', 'IT', 'WILL', 'PAY', 'FOR', 'LOOKING', 'AFTER'] +6070-63485-0014-2613: ref=['WELL', 'MY', 'WIFE', 'SHALL', 'BE', 'THERE', 'SAID', 'THE', 'SCHOOLMASTER', 'YOU', 'WILL', 'TELL', 'HER', 'WHAT', 'YOU', 'WANT', 'AND', 'I', 'SHALL', 'SEE'] +6070-63485-0014-2613: hyp=['WELL', 'MY', 'WIFE', 'SHALL', 'BE', 'THERE', 'SAID', 'THE', 'SCHOOLMASTER', 'YOU', 'WILL', 'TELL', 'HER', 'WHAT', 'YOU', 'WANT', 'AND', 'I', 'SHALL', 'SEE'] +6070-63485-0015-2614: ref=['IN', 'THE', 'PLAIN', 'OF', 'SAINT', 'DENIS'] +6070-63485-0015-2614: hyp=['IN', 'THE', 'PLANE', 'OF', 'SAINT', 'DENY'] +6070-63485-0016-2615: ref=['BETWEEN', 'SAINT', 'OUEN', 'AND', 'THE', 'ROAD', 'OF', 'LA', 'REVOLTE', 'AT', 'THE', 'END', 'OF', 'THE', 'ROAD', 'AGREED'] +6070-63485-0016-2615: hyp=['BETWEEN', 'SAINT', 'LAUIS', 'AND', 'THE', 'ROAD', 'OF', 'LA', 'REVOLT', 'AT', 'THE', 'END', 'OF', 'THE', 'ROAD', 'AGREED'] +6070-63485-0017-2616: ref=['HE', 'HAD', 'FORGOTTEN', 'THE', 'ADDRESS', 'OF', 'THE', 'SELF', 'STYLED', 'FAN', 'PAINTER'] +6070-63485-0017-2616: hyp=['HE', 'HAD', 'FORGOTTEN', 'THE', 'ADDRESS', 'OF', 'THE', 'SELF', 'STYLED', 'PAMP', 'PAINTER'] +6070-63485-0018-2617: ref=['THE', 'FIACRE', 'STARTED'] +6070-63485-0018-2617: hyp=['THE', 'FIATHIS', 'STARTED'] +6070-86744-0000-2569: ref=['FRANZ', 'WHO', 'SEEMED', 'ATTRACTED', 'BY', 'SOME', 'INVISIBLE', 'INFLUENCE', 'TOWARDS', 'THE', 'COUNT', 'IN', 'WHICH', 'TERROR', 'WAS', 'STRANGELY', 'MINGLED', 'FELT', 'AN', 'EXTREME', 'RELUCTANCE', 'TO', 'PERMIT', 'HIS', 'FRIEND', 'TO', 'BE', 'EXPOSED', 'ALONE', 'TO', 'THE', 'SINGULAR', 'FASCINATION', 'THAT', 'THIS', 'MYSTERIOUS', 'PERSONAGE', 'SEEMED', 'TO', 'EXERCISE', 'OVER', 'HIM', 'AND', 'THEREFORE', 'MADE', 'NO', 'OBJECTION', 'TO', "ALBERT'S", 'REQUEST', 'BUT', 'AT', 'ONCE', 'ACCOMPANIED', 'HIM', 'TO', 'THE', 'DESIRED', 'SPOT', 'AND', 'AFTER', 'A', 'SHORT', 'DELAY', 'THE', 'COUNT', 'JOINED', 'THEM', 'IN', 'THE', 'SALON'] +6070-86744-0000-2569: hyp=['FRANCE', 'WHO', 'SEEMED', 'ATTRACTED', 'BY', 'SOME', 'INVISIBLE', 'INFLUENCE', 'TO', 'WHICH', 'THE', 'COUNT', 'IN', 'WHICH', 'TERROR', 'WAS', 'STRANGELY', 'MINGLED', 'FELT', 'AN', 'EXTREME', 'RELUCTANCE', 'TO', 'PERMIT', 'HIS', 'FRIEND', 'TO', 'BE', 'EXPOSED', 'ALONE', 'TO', 'THE', 'SINGULAR', 'FASCINATION', 'THAT', 'THIS', 'MYSTERIOUS', 'PERSONAGE', 'SEEMED', 'TO', 'EXERCISE', 'OVER', 'HIM', 'AND', 'THEREFORE', 'MADE', 'NO', 'OBJECTION', 'TO', "ALBERT'S", 'REQUEST', 'BUT', 'AT', 'ONCE', 'ACCOMPANIED', 'HIM', 'TO', 'THE', 'DESIRED', 'SPOT', 'AND', 'AFTER', 'A', 'SHORT', 'DELAY', 'THE', 'COUNT', 'JOINED', 'THEM', 'IN', 'THE', 'SALON'] +6070-86744-0001-2570: ref=['MY', 'VERY', 'GOOD', 'FRIEND', 'AND', 'EXCELLENT', 'NEIGHBOR', 'REPLIED', 'THE', 'COUNT', 'WITH', 'A', 'SMILE', 'YOU', 'REALLY', 'EXAGGERATE', 'MY', 'TRIFLING', 'EXERTIONS'] +6070-86744-0001-2570: hyp=['MY', 'VERY', 'GOOD', 'FRIEND', 'AN', 'EXCELLENT', 'NEIGHBOR', 'REPLIED', 'THE', 'COUNT', 'WITH', 'A', 'SMILE', 'YOU', 'REALLY', 'EXAGGERATE', 'MY', 'TRIFLING', 'EXERTIONS'] +6070-86744-0002-2571: ref=['MY', 'FATHER', 'THE', 'COMTE', 'DE', 'MORCERF', 'ALTHOUGH', 'OF', 'SPANISH', 'ORIGIN', 'POSSESSES', 'CONSIDERABLE', 'INFLUENCE', 'BOTH', 'AT', 'THE', 'COURT', 'OF', 'FRANCE', 'AND', 'MADRID', 'AND', 'I', 'UNHESITATINGLY', 'PLACE', 'THE', 'BEST', 'SERVICES', 'OF', 'MYSELF', 'AND', 'ALL', 'TO', 'WHOM', 'MY', 'LIFE', 'IS', 'DEAR', 'AT', 'YOUR', 'DISPOSAL'] +6070-86744-0002-2571: hyp=['MY', 'FATHER', 'THE', 'COMTE', 'DE', 'MORCERF', 'ALTHOUGH', 'A', 'SPANISH', 'ORIGIN', 'POSSESSES', 'CONSIDERABLE', 'INFLUENCE', 'BOTH', 'AT', 'THE', 'COURT', 'OF', 'FRANCE', 'AND', 'MADRID', 'AND', 'I', 'UNHESITATINGLY', 'PLACED', 'THE', 'BEST', 'SERVICES', 'OF', 'MYSELF', 'AND', 'ALL', 'TO', 'WHOM', 'MY', 'LIFE', 'IS', 'DEAR', 'AT', 'YOUR', 'DISPOSAL'] +6070-86744-0003-2572: ref=['I', 'CAN', 'SCARCELY', 'CREDIT', 'IT'] +6070-86744-0003-2572: hyp=['I', 'CAN', 'SCARCELY', 'CREDIT', 'IT'] +6070-86744-0004-2573: ref=['THEN', 'IT', 'IS', 'SETTLED', 'SAID', 'THE', 'COUNT', 'AND', 'I', 'GIVE', 'YOU', 'MY', 'SOLEMN', 'ASSURANCE', 'THAT', 'I', 'ONLY', 'WAITED', 'AN', 'OPPORTUNITY', 'LIKE', 'THE', 'PRESENT', 'TO', 'REALIZE', 'PLANS', 'THAT', 'I', 'HAVE', 'LONG', 'MEDITATED'] +6070-86744-0004-2573: hyp=['THEN', 'IT', 'IS', 'SETTLED', 'SAID', 'THE', 'COUNT', 'AND', 'I', 'GIVE', 'YOU', 'MY', 'SOLEMN', 'ASSURANCE', 'THAT', 'I', 'ONLY', 'WAITED', 'IN', 'A', 'PETULITY', 'LIKE', 'THE', 'PRESENT', 'TO', 'REALIZE', 'PLANS', 'THAT', 'I', 'HAVE', 'LONG', 'MEDITATED'] +6070-86744-0005-2574: ref=['SHALL', 'WE', 'MAKE', 'A', 'POSITIVE', 'APPOINTMENT', 'FOR', 'A', 'PARTICULAR', 'DAY', 'AND', 'HOUR', 'INQUIRED', 'THE', 'COUNT', 'ONLY', 'LET', 'ME', 'WARN', 'YOU', 'THAT', 'I', 'AM', 'PROVERBIAL', 'FOR', 'MY', 'PUNCTILIOUS', 'EXACTITUDE', 'IN', 'KEEPING', 'MY', 'ENGAGEMENTS', 'DAY', 'FOR', 'DAY', 'HOUR', 'FOR', 'HOUR', 'SAID', 'ALBERT', 'THAT', 'WILL', 'SUIT', 'ME', 'TO', 'A', 'DOT'] +6070-86744-0005-2574: hyp=['SHOW', 'A', 'MAKE', 'A', 'POSITIVE', 'APPOINTMENT', 'FOR', 'A', 'PARTICULAR', 'DAY', 'AND', 'HOUR', 'INQUIRED', 'THE', 'COUNT', 'ONLY', 'LET', 'ME', 'WARN', 'YOU', 'THAT', 'I', 'AM', 'PROVERBIAL', 'FOR', 'MY', 'PUNCTILIOUS', 'EXACTITUDE', 'IN', 'KEEPING', 'MY', 'ENGAGEMENTS', 'DAY', 'FOR', 'DAY', 'HOUR', 'FOR', 'HOUR', 'SAID', 'ALBERT', 'THAT', 'WILL', 'SUIT', 'ME', 'TO', 'A', 'DOT'] +6070-86744-0006-2575: ref=['SO', 'BE', 'IT', 'THEN', 'REPLIED', 'THE', 'COUNT', 'AND', 'EXTENDING', 'HIS', 'HAND', 'TOWARDS', 'A', 'CALENDAR', 'SUSPENDED', 'NEAR', 'THE', 'CHIMNEY', 'PIECE', 'HE', 'SAID', 'TO', 'DAY', 'IS', 'THE', 'TWENTY', 'FIRST', 'OF', 'FEBRUARY', 'AND', 'DRAWING', 'OUT', 'HIS', 'WATCH', 'ADDED', 'IT', 'IS', 'EXACTLY', 'HALF', 'PAST', 'TEN', "O'CLOCK", 'NOW', 'PROMISE', 'ME', 'TO', 'REMEMBER', 'THIS', 'AND', 'EXPECT', 'ME', 'THE', 'TWENTY', 'FIRST', 'OF', 'MAY', 'AT', 'THE', 'SAME', 'HOUR', 'IN', 'THE', 'FORENOON'] +6070-86744-0006-2575: hyp=['SO', 'BE', 'IT', 'THEN', 'REPLIED', 'THE', 'COUNT', 'AND', 'EXTENDING', 'HIS', 'HAND', 'TOWARDS', 'THE', 'CALENDER', 'SUSPENDED', 'NEAR', 'THE', 'CHIMNEY', 'PIECE', 'HE', 'SAID', 'TO', 'DAY', 'IS', 'THE', 'TWENTY', 'FIRST', 'OF', 'FEBRUARY', 'AND', 'DRAWING', 'OUT', 'HIS', 'WATCH', 'ADDED', 'IT', 'IS', 'EXACTLY', 'HALF', 'PAST', 'TEN', "O'CLOCK", 'NOW', 'PROMISE', 'ME', 'TO', 'REMEMBER', 'THIS', 'AND', 'EXPECT', 'ME', 'THE', 'TWENTY', 'FIRST', 'OF', 'MAY', 'AT', 'THE', 'SAME', 'HOUR', 'IN', 'THE', 'FORENOON'] +6070-86744-0007-2576: ref=['I', 'RESIDE', 'IN', 'MY', "FATHER'S", 'HOUSE', 'BUT', 'OCCUPY', 'A', 'PAVILION', 'AT', 'THE', 'FARTHER', 'SIDE', 'OF', 'THE', 'COURT', 'YARD', 'ENTIRELY', 'SEPARATED', 'FROM', 'THE', 'MAIN', 'BUILDING'] +6070-86744-0007-2576: hyp=['I', 'RESIDE', 'IN', 'MY', "FATHER'S", 'HOUSE', 'BUT', 'OCCUPY', 'A', 'PAVILION', 'AT', 'THE', 'FARTHER', 'SIDE', 'OF', 'THE', 'COURTYARD', 'AND', 'TIRELESS', 'SEPARATED', 'FROM', 'THE', 'MAIN', 'BUILDING'] +6070-86744-0008-2577: ref=['NOW', 'THEN', 'SAID', 'THE', 'COUNT', 'RETURNING', 'HIS', 'TABLETS', 'TO', 'HIS', 'POCKET', 'MAKE', 'YOURSELF', 'PERFECTLY', 'EASY', 'THE', 'HAND', 'OF', 'YOUR', 'TIME', 'PIECE', 'WILL', 'NOT', 'BE', 'MORE', 'ACCURATE', 'IN', 'MARKING', 'THE', 'TIME', 'THAN', 'MYSELF'] +6070-86744-0008-2577: hyp=['NOW', 'THEN', 'SAID', 'THE', 'COUNT', 'RETURNING', 'HIS', 'TABLETS', 'TO', 'HIS', 'POCKET', 'MAKE', 'YOURSELF', 'PERFECTLY', 'EASY', 'THE', 'HAND', 'OF', 'YOUR', 'TIME', 'PEACE', 'WILL', 'NOT', 'BE', 'MORE', 'ACCURATE', 'IN', 'MARKING', 'THE', 'TIME', 'THAN', 'MYSELF'] +6070-86744-0009-2578: ref=['THAT', 'DEPENDS', 'WHEN', 'DO', 'YOU', 'LEAVE'] +6070-86744-0009-2578: hyp=['THAT', 'DEPENDS', 'WHEN', "D'YE", 'LEAVE'] +6070-86744-0010-2579: ref=['FOR', 'FRANCE', 'NO', 'FOR', 'VENICE', 'I', 'SHALL', 'REMAIN', 'IN', 'ITALY', 'FOR', 'ANOTHER', 'YEAR', 'OR', 'TWO'] +6070-86744-0010-2579: hyp=['FOR', 'FRANCE', 'NO', 'FOR', 'VENICE', 'I', 'SHALL', 'REMAIN', 'IN', 'ITALY', 'FOR', 'ANOTHER', 'YEAR', 'OR', 'TWO'] +6070-86744-0011-2580: ref=['THEN', 'WE', 'SHALL', 'NOT', 'MEET', 'IN', 'PARIS'] +6070-86744-0011-2580: hyp=['THEN', 'WE', 'SHALL', 'NOT', 'MEET', 'IN', 'PARIS'] +6070-86744-0012-2581: ref=['I', 'FEAR', 'I', 'SHALL', 'NOT', 'HAVE', 'THAT', 'HONOR'] +6070-86744-0012-2581: hyp=['I', 'FEAR', 'I', 'SHALL', 'NOT', 'HAVE', 'THAT', 'HONOR'] +6070-86744-0013-2582: ref=['WELL', 'SINCE', 'WE', 'MUST', 'PART', 'SAID', 'THE', 'COUNT', 'HOLDING', 'OUT', 'A', 'HAND', 'TO', 'EACH', 'OF', 'THE', 'YOUNG', 'MEN', 'ALLOW', 'ME', 'TO', 'WISH', 'YOU', 'BOTH', 'A', 'SAFE', 'AND', 'PLEASANT', 'JOURNEY'] +6070-86744-0013-2582: hyp=['WELL', 'SINCE', 'WE', 'MUST', 'PART', 'SAID', 'THE', 'COUNT', 'HOLDING', 'OUT', 'A', 'HAND', 'TO', 'EACH', 'OF', 'THE', 'YOUNG', 'MEN', 'ALLOW', 'ME', 'TO', 'WISH', 'YOU', 'BOTH', 'AS', 'SAFE', 'AND', 'PLEASANT', 'JOURNEY'] +6070-86744-0014-2583: ref=['WHAT', 'IS', 'THE', 'MATTER', 'ASKED', 'ALBERT', 'OF', 'FRANZ', 'WHEN', 'THEY', 'HAD', 'RETURNED', 'TO', 'THEIR', 'OWN', 'APARTMENTS', 'YOU', 'SEEM', 'MORE', 'THAN', 'COMMONLY', 'THOUGHTFUL'] +6070-86744-0014-2583: hyp=['WHAT', 'IS', 'THE', 'MATTER', 'ASKED', 'ALBERT', 'OF', 'FRANZ', 'WHEN', 'THEY', 'HAD', 'RETURNED', 'TO', 'THEIR', 'OWN', 'APARTMENTS', 'YOU', 'SEE', 'MORE', 'THAN', 'COMMONLY', 'THOUGHTFUL'] +6070-86744-0015-2584: ref=['I', 'WILL', 'CONFESS', 'TO', 'YOU', 'ALBERT', 'REPLIED', 'FRANZ', 'THE', 'COUNT', 'IS', 'A', 'VERY', 'SINGULAR', 'PERSON', 'AND', 'THE', 'APPOINTMENT', 'YOU', 'HAVE', 'MADE', 'TO', 'MEET', 'HIM', 'IN', 'PARIS', 'FILLS', 'ME', 'WITH', 'A', 'THOUSAND', 'APPREHENSIONS'] +6070-86744-0015-2584: hyp=['I', 'WILL', 'CONSIST', 'TO', 'YOU', 'ALBERT', 'REPLIED', 'FRANZ', 'THE', 'COUNT', 'IS', 'A', 'VERY', 'SINGULAR', 'PERSON', 'AND', 'THE', 'APPOINTMENT', 'YOU', 'HAVE', 'MADE', 'TO', 'MEET', 'HIM', 'IN', 'PARIS', 'FILLS', 'ME', 'WITH', 'A', 'THOUSAND', 'APPREHENSIONS'] +6070-86744-0016-2585: ref=['DID', 'YOU', 'EVER', 'MEET', 'HIM', 'PREVIOUSLY', 'TO', 'COMING', 'HITHER'] +6070-86744-0016-2585: hyp=['DID', 'YOU', 'EVER', 'MEET', 'HIM', 'PREVIOUSLY', 'TO', 'COMING', 'HITHER'] +6070-86744-0017-2586: ref=['UPON', 'MY', 'HONOR', 'THEN', 'LISTEN', 'TO', 'ME'] +6070-86744-0017-2586: hyp=['UPON', 'MY', 'HONOUR', 'THEN', 'LISTEN', 'TO', 'ME'] +6070-86744-0018-2587: ref=['HE', 'DWELT', 'WITH', 'CONSIDERABLE', 'FORCE', 'AND', 'ENERGY', 'ON', 'THE', 'ALMOST', 'MAGICAL', 'HOSPITALITY', 'HE', 'HAD', 'RECEIVED', 'FROM', 'THE', 'COUNT', 'AND', 'THE', 'MAGNIFICENCE', 'OF', 'HIS', 'ENTERTAINMENT', 'IN', 'THE', 'GROTTO', 'OF', 'THE', 'THOUSAND', 'AND', 'ONE', 'NIGHTS', 'HE', 'RECOUNTED', 'WITH', 'CIRCUMSTANTIAL', 'EXACTITUDE', 'ALL', 'THE', 'PARTICULARS', 'OF', 'THE', 'SUPPER', 'THE', 'HASHISH', 'THE', 'STATUES', 'THE', 'DREAM', 'AND', 'HOW', 'AT', 'HIS', 'AWAKENING', 'THERE', 'REMAINED', 'NO', 'PROOF', 'OR', 'TRACE', 'OF', 'ALL', 'THESE', 'EVENTS', 'SAVE', 'THE', 'SMALL', 'YACHT', 'SEEN', 'IN', 'THE', 'DISTANT', 'HORIZON', 'DRIVING', 'UNDER', 'FULL', 'SAIL', 'TOWARD', 'PORTO', 'VECCHIO'] +6070-86744-0018-2587: hyp=['HE', 'DWELT', 'WITH', 'CONSIDERABLE', 'FORCE', 'AND', 'ENERGY', 'ON', 'THE', 'ALMOST', 'MAGICAL', 'HOSPITALITY', 'HE', 'HAD', 'RECEIVED', 'FROM', 'THE', 'COUNT', 'AND', 'THE', 'MAGNIFICENCE', 'OF', 'HIS', 'ENTERTAINMENT', 'IN', 'THE', 'DRATO', 'OF', 'THE', 'THOUSAND', 'AND', 'ONE', 'NIGHTS', 'HE', 'RECOUNTED', 'WITH', 'CIRCUMSTANTIAL', 'EXACTITUDE', 'ALL', 'THE', 'PARTICULARS', 'OF', 'THE', 'SUPPER', 'THE', 'HASHISH', 'THE', 'STATUES', 'THE', 'DREAM', 'AND', 'HOW', 'AT', 'HIS', 'AWAKENING', 'THERE', 'REMAINED', 'NO', 'PROOF', 'OF', 'TRACE', 'OF', 'ALL', 'THESE', 'EVENTS', 'SAVE', 'THE', 'SMALL', 'YACHT', 'SEEN', 'IN', 'THE', 'DISTANT', 'HORIZON', 'DRIVING', 'UNDER', 'FULL', 'SAIL', 'TOWARD', 'PORTO', 'VECCHIO'] +6070-86744-0019-2588: ref=['THEN', 'HE', 'DETAILED', 'THE', 'CONVERSATION', 'OVERHEARD', 'BY', 'HIM', 'AT', 'THE', 'COLOSSEUM', 'BETWEEN', 'THE', 'COUNT', 'AND', 'VAMPA', 'IN', 'WHICH', 'THE', 'COUNT', 'HAD', 'PROMISED', 'TO', 'OBTAIN', 'THE', 'RELEASE', 'OF', 'THE', 'BANDIT', 'PEPPINO', 'AN', 'ENGAGEMENT', 'WHICH', 'AS', 'OUR', 'READERS', 'ARE', 'AWARE', 'HE', 'MOST', 'FAITHFULLY', 'FULFILLED'] +6070-86744-0019-2588: hyp=['THEN', 'HE', 'DETAILED', 'THE', 'CONVERSATION', 'OVERHEARD', 'BY', 'HIM', 'AT', 'THE', 'COLISEUM', 'BETWEEN', 'THE', 'COUNT', 'AND', 'VAMPA', 'IN', 'WHICH', 'THE', 'COUNT', 'HAD', 'PROMISED', 'TO', 'OBTAIN', 'THE', 'RELEASE', 'OF', 'THE', 'BANDIT', 'PEPPINO', 'AND', 'ENGAGEMENT', 'WHICH', 'AS', 'OUR', 'READERS', 'ARE', 'AWARE', 'HE', 'MOST', 'FAITHFULLY', 'FULFILLED'] +6070-86744-0020-2589: ref=['BUT', 'SAID', 'FRANZ', 'THE', 'CORSICAN', 'BANDITS', 'THAT', 'WERE', 'AMONG', 'THE', 'CREW', 'OF', 'HIS', 'VESSEL'] +6070-86744-0020-2589: hyp=['BUT', 'SAID', 'FRANZ', 'THE', 'CORSICIAN', 'BANDITS', 'THAT', 'WERE', 'AMONG', 'THE', 'CREW', 'OF', 'HIS', 'VESSEL'] +6070-86744-0021-2590: ref=['WHY', 'REALLY', 'THE', 'THING', 'SEEMS', 'TO', 'ME', 'SIMPLE', 'ENOUGH'] +6070-86744-0021-2590: hyp=['WHY', 'REALLY', 'THE', 'THING', 'SEEMS', 'TO', 'ME', 'SIMPLE', 'ENOUGH'] +6070-86744-0022-2591: ref=['TALKING', 'OF', 'COUNTRIES', 'REPLIED', 'FRANZ', 'OF', 'WHAT', 'COUNTRY', 'IS', 'THE', 'COUNT', 'WHAT', 'IS', 'HIS', 'NATIVE', 'TONGUE', 'WHENCE', 'DOES', 'HE', 'DERIVE', 'HIS', 'IMMENSE', 'FORTUNE', 'AND', 'WHAT', 'WERE', 'THOSE', 'EVENTS', 'OF', 'HIS', 'EARLY', 'LIFE', 'A', 'LIFE', 'AS', 'MARVELLOUS', 'AS', 'UNKNOWN', 'THAT', 'HAVE', 'TINCTURED', 'HIS', 'SUCCEEDING', 'YEARS', 'WITH', 'SO', 'DARK', 'AND', 'GLOOMY', 'A', 'MISANTHROPY'] +6070-86744-0022-2591: hyp=['TALKING', 'OF', 'COUNTRIES', 'REPLIED', 'FRANZ', 'OF', 'WHAT', 'COUNTRIES', 'THE', 'COUNT', 'WHAT', 'IS', 'HIS', 'NATIVE', 'DONG', 'WHENCE', 'DOES', 'HE', 'DERIVE', 'HIS', 'IMMENSE', 'FORTUNE', 'AND', 'WHAT', 'WERE', 'THOSE', 'EVENTS', 'OF', 'HIS', 'EARLY', 'LIFE', 'A', 'LIFE', 'AS', 'MARVELLOUS', 'AS', 'UNKNOWN', 'THAT', 'HAVE', 'TINTED', 'HIS', 'SUCCEEDING', 'YEARS', 'WITH', 'SORE', 'DARK', 'AND', 'BLOOMY', 'AND', 'MISANTHROPY'] +6070-86744-0023-2592: ref=['CERTAINLY', 'THESE', 'ARE', 'QUESTIONS', 'THAT', 'IN', 'YOUR', 'PLACE', 'I', 'SHOULD', 'LIKE', 'TO', 'HAVE', 'ANSWERED'] +6070-86744-0023-2592: hyp=['CERTAINLY', 'THESE', 'ARE', 'QUESTIONS', 'THAT', 'IN', 'YOUR', 'PLACE', 'I', 'SHOULD', 'LIKE', 'TO', 'HAVE', 'ANSWERED'] +6070-86744-0024-2593: ref=['MY', 'DEAR', 'FRANZ', 'REPLIED', 'ALBERT', 'WHEN', 'UPON', 'RECEIPT', 'OF', 'MY', 'LETTER', 'YOU', 'FOUND', 'THE', 'NECESSITY', 'OF', 'ASKING', 'THE', "COUNT'S", 'ASSISTANCE', 'YOU', 'PROMPTLY', 'WENT', 'TO', 'HIM', 'SAYING', 'MY', 'FRIEND', 'ALBERT', 'DE', 'MORCERF', 'IS', 'IN', 'DANGER', 'HELP', 'ME', 'TO', 'DELIVER', 'HIM'] +6070-86744-0024-2593: hyp=['MY', 'DEAR', 'FRANCE', 'REPLIED', 'ALBERT', 'WHEN', 'UPON', 'RECEIPT', 'OF', 'MY', 'LETTER', 'YOU', 'FOUND', 'THE', 'NECESSITY', 'OF', 'ASKING', 'THE', "COUNT'S", 'ASSISTANCE', 'YOU', 'PROMPTLY', 'WENT', 'TO', 'HIM', 'SAYING', 'MY', 'FRIEND', 'ALBERT', 'DE', 'MORCERF', 'IS', 'IN', 'DANGER', 'HELP', 'ME', 'TO', 'DELIVER', 'HIM'] +6070-86744-0025-2594: ref=['WHAT', 'ARE', 'HIS', 'MEANS', 'OF', 'EXISTENCE', 'WHAT', 'IS', 'HIS', 'BIRTHPLACE', 'OF', 'WHAT', 'COUNTRY', 'IS', 'HE', 'A', 'NATIVE'] +6070-86744-0025-2594: hyp=['WHAT', 'ARE', 'HIS', 'MEANS', 'OF', 'EXISTENCE', 'WHAT', 'IS', 'HIS', 'BOTH', 'PLEASE', 'OF', 'WHAT', 'COUNTRIES', 'HE', 'A', 'NATIVE'] +6070-86744-0026-2595: ref=['I', 'CONFESS', 'HE', 'ASKED', 'ME', 'NONE', 'NO', 'HE', 'MERELY', 'CAME', 'AND', 'FREED', 'ME', 'FROM', 'THE', 'HANDS', 'OF', 'SIGNOR', 'VAMPA', 'WHERE', 'I', 'CAN', 'ASSURE', 'YOU', 'IN', 'SPITE', 'OF', 'ALL', 'MY', 'OUTWARD', 'APPEARANCE', 'OF', 'EASE', 'AND', 'UNCONCERN', 'I', 'DID', 'NOT', 'VERY', 'PARTICULARLY', 'CARE', 'TO', 'REMAIN'] +6070-86744-0026-2595: hyp=['I', 'CONFESS', 'HE', 'ASKED', 'ME', 'NONE', 'NO', 'HE', 'MERELY', 'CAME', 'AND', 'FREED', 'ME', 'FROM', 'THE', 'HANDS', 'OF', 'SENOR', 'VAMPA', 'WHERE', 'I', 'CAN', 'ASSURE', 'YOU', 'IN', 'SPITE', 'OF', 'ALL', 'MY', 'OUTWARD', 'APPEARANCE', 'OF', 'EASE', 'AND', 'UNCONCERN', 'I', 'DID', 'NOT', 'VERY', 'PARTICULARLY', 'CARE', 'TO', 'REMAIN'] +6070-86744-0027-2596: ref=['AND', 'THIS', 'TIME', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'CONTRARY', 'TO', 'THE', 'USUAL', 'STATE', 'OF', 'AFFAIRS', 'IN', 'DISCUSSIONS', 'BETWEEN', 'THE', 'YOUNG', 'MEN', 'THE', 'EFFECTIVE', 'ARGUMENTS', 'WERE', 'ALL', 'ON', "ALBERT'S", 'SIDE'] +6070-86744-0027-2596: hyp=['AND', 'THIS', 'TIME', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'CONTRARY', 'TO', 'THE', 'USUAL', 'STATE', 'OF', 'AFFAIRS', 'IN', 'DISCUSSIONS', 'BETWEEN', 'THE', 'YOUNG', 'MEN', 'THE', 'EFFECTIVE', 'ARGUMENTS', 'WERE', 'ALL', 'ON', "ALBERT'S", 'SIDE'] +6070-86744-0028-2597: ref=['WELL', 'SAID', 'FRANZ', 'WITH', 'A', 'SIGH', 'DO', 'AS', 'YOU', 'PLEASE', 'MY', 'DEAR', 'VISCOUNT', 'FOR', 'YOUR', 'ARGUMENTS', 'ARE', 'BEYOND', 'MY', 'POWERS', 'OF', 'REFUTATION'] +6070-86744-0028-2597: hyp=['WELL', 'SAID', 'FRANZ', 'WITH', 'A', 'SIGH', 'DO', 'AS', 'YOU', 'PLEASE', 'MY', 'DEAR', 'VISCOUNT', 'FOR', 'YOUR', 'ARGUMENTS', 'ARE', 'BEYOND', 'MY', 'POWERS', 'OF', 'REFUTATION'] +6070-86744-0029-2598: ref=['AND', 'NOW', 'MY', 'DEAR', 'FRANZ', 'LET', 'US', 'TALK', 'OF', 'SOMETHING', 'ELSE'] +6070-86744-0029-2598: hyp=['AND', 'NOW', 'MY', 'DEAR', 'FRANZ', 'LET', 'US', 'TALK', 'OF', 'SOMETHING', 'ELSE'] +6070-86745-0000-2549: ref=['THEN', 'SHOULD', 'ANYTHING', 'APPEAR', 'TO', 'MERIT', 'A', 'MORE', 'MINUTE', 'EXAMINATION', 'ALBERT', 'DE', 'MORCERF', 'COULD', 'FOLLOW', 'UP', 'HIS', 'RESEARCHES', 'BY', 'MEANS', 'OF', 'A', 'SMALL', 'GATE', 'SIMILAR', 'TO', 'THAT', 'CLOSE', 'TO', 'THE', "CONCIERGE'S", 'DOOR', 'AND', 'WHICH', 'MERITS', 'A', 'PARTICULAR', 'DESCRIPTION'] +6070-86745-0000-2549: hyp=['THEN', 'SHOULD', 'ANYTHING', 'APPEAR', 'TO', 'MERIT', 'A', 'MORE', 'MINUTE', 'EXAMINATION', "I'LL", 'BEAR', 'THE', 'MORCERF', 'COULD', 'FOLLOW', 'UP', 'HIS', 'RESEARCHES', 'BY', 'MEANS', 'OF', 'A', 'SMALL', 'GATE', 'SIMILAR', 'TO', 'THAT', 'CLOSE', 'TO', 'THE', "CONCIERGE'S", 'DOOR', 'AND', 'WHICH', 'MERITS', 'ARE', 'PARTICULAR', 'DESCRIPTION'] +6070-86745-0001-2550: ref=['SHRUBS', 'AND', 'CREEPING', 'PLANTS', 'COVERED', 'THE', 'WINDOWS', 'AND', 'HID', 'FROM', 'THE', 'GARDEN', 'AND', 'COURT', 'THESE', 'TWO', 'APARTMENTS', 'THE', 'ONLY', 'ROOMS', 'INTO', 'WHICH', 'AS', 'THEY', 'WERE', 'ON', 'THE', 'GROUND', 'FLOOR', 'THE', 'PRYING', 'EYES', 'OF', 'THE', 'CURIOUS', 'COULD', 'PENETRATE'] +6070-86745-0001-2550: hyp=['SHRUBS', 'AND', 'CREEPING', 'PLANTS', 'COVERED', 'THE', 'WINDOWS', 'AND', 'HID', 'FROM', 'THE', 'GARDEN', 'AND', 'COURT', 'THESE', 'TWO', 'APARTMENTS', 'THE', 'ONLY', 'ROOMS', 'INTO', 'WHICH', 'AS', 'THEY', 'WERE', 'ON', 'THE', 'GROUND', 'FLOOR', 'THE', 'PRYING', 'EYES', 'OF', 'THE', 'CURIOUS', 'COULD', 'PENETRATE'] +6070-86745-0002-2551: ref=['AT', 'A', 'QUARTER', 'TO', 'TEN', 'A', 'VALET', 'ENTERED', 'HE', 'COMPOSED', 'WITH', 'A', 'LITTLE', 'GROOM', 'NAMED', 'JOHN', 'AND', 'WHO', 'ONLY', 'SPOKE', 'ENGLISH', 'ALL', "ALBERT'S", 'ESTABLISHMENT', 'ALTHOUGH', 'THE', 'COOK', 'OF', 'THE', 'HOTEL', 'WAS', 'ALWAYS', 'AT', 'HIS', 'SERVICE', 'AND', 'ON', 'GREAT', 'OCCASIONS', 'THE', "COUNT'S", 'CHASSEUR', 'ALSO'] +6070-86745-0002-2551: hyp=['AT', 'A', 'QUARTER', 'TO', 'TEN', 'THE', 'VALLED', 'ENTERED', 'HE', 'COMPOSED', 'WITH', 'A', 'LITTLE', 'ROOM', 'NAMED', 'JOHN', 'AND', 'WHO', 'ONLY', 'SPOKE', 'ENGLISH', 'ALL', "ALBERT'S", 'ESTABLISHMENT', 'ALTHOUGH', 'THE', 'COOK', 'OF', 'THE', 'HOTEL', 'WAS', 'ALWAYS', 'AT', 'HIS', 'SERVICE', 'AND', 'ON', 'GREAT', 'OCCASIONS', 'THE', 'COUNT', 'CHASSEUR', 'ALSO'] +6070-86745-0003-2552: ref=['WAIT', 'THEN', 'DURING', 'THE', 'DAY', 'TELL', 'ROSA', 'THAT', 'WHEN', 'I', 'LEAVE', 'THE', 'OPERA', 'I', 'WILL', 'SUP', 'WITH', 'HER', 'AS', 'SHE', 'WISHES'] +6070-86745-0003-2552: hyp=['WAIT', 'THEN', 'DURING', 'THE', 'DAY', 'TELL', 'ROSA', 'THAT', 'WHEN', 'I', 'LEAVE', 'THE', 'OPERA', 'I', 'WILL', 'SUP', 'WITH', 'HER', 'AS', 'SHE', 'WISHES'] +6070-86745-0004-2553: ref=['VERY', 'WELL', 'AT', 'HALF', 'PAST', 'TEN'] +6070-86745-0004-2553: hyp=['VERY', 'WELL', 'AT', 'HALF', 'PAST', 'TEN'] +6070-86745-0005-2554: ref=['IS', 'THE', 'COUNTESS', 'UP', 'YET'] +6070-86745-0005-2554: hyp=['IS', 'THE', 'COUNTESS', 'UP', 'YET'] +6070-86745-0006-2555: ref=['THE', 'VALET', 'LEFT', 'THE', 'ROOM'] +6070-86745-0006-2555: hyp=['THE', 'VALET', 'LEFT', 'THE', 'ROOM'] +6070-86745-0007-2556: ref=['GOOD', 'MORNING', 'LUCIEN', 'GOOD', 'MORNING', 'SAID', 'ALBERT', 'YOUR', 'PUNCTUALITY', 'REALLY', 'ALARMS', 'ME'] +6070-86745-0007-2556: hyp=['GOOD', 'MORNING', 'MISS', 'YOUNG', 'GOOD', 'MORNING', 'SAID', 'ALBERT', 'YOUR', 'PUNCTUALITY', 'REALLY', 'ALARMS', 'ME'] +6070-86745-0008-2557: ref=['YOU', 'WHOM', 'I', 'EXPECTED', 'LAST', 'YOU', 'ARRIVE', 'AT', 'FIVE', 'MINUTES', 'TO', 'TEN', 'WHEN', 'THE', 'TIME', 'FIXED', 'WAS', 'HALF', 'PAST'] +6070-86745-0008-2557: hyp=['YOU', 'WHOM', 'I', 'EXPECTED', 'LAST', 'YOU', 'ARRIVE', 'AT', 'FIVE', 'MINUTES', 'TO', 'TEN', 'WHEN', 'THE', 'TIME', 'FIXED', 'WAS', 'HALF', 'PAST'] +6070-86745-0009-2558: ref=['NO', 'NO', 'MY', 'DEAR', 'FELLOW', 'DO', 'NOT', 'CONFOUND', 'OUR', 'PLANS'] +6070-86745-0009-2558: hyp=['NO', 'NO', 'MY', 'DEAR', 'FELLOW', 'DO', 'NOT', 'CONFOUND', 'OUR', 'PLANS'] +6070-86745-0010-2559: ref=['YES', 'HE', 'HAS', 'NOT', 'MUCH', 'TO', 'COMPLAIN', 'OF', 'BOURGES', 'IS', 'THE', 'CAPITAL', 'OF', 'CHARLES', 'SEVEN'] +6070-86745-0010-2559: hyp=['YES', 'HE', 'HAS', 'NOT', 'MUCH', 'TO', 'COMPLAIN', 'OF', 'BOURGE', 'IS', 'THE', 'CAPITAL', 'OF', 'CHARLES', 'THE', 'SEVENTH'] +6070-86745-0011-2560: ref=['IT', 'IS', 'FOR', 'THAT', 'REASON', 'YOU', 'SEE', 'ME', 'SO', 'EARLY'] +6070-86745-0011-2560: hyp=['IT', 'IS', 'FOR', 'THAT', 'REASON', 'YOU', 'SEE', 'ME', 'SO', 'EARLY'] +6070-86745-0012-2561: ref=['I', 'RETURNED', 'HOME', 'AT', 'DAYBREAK', 'AND', 'STROVE', 'TO', 'SLEEP', 'BUT', 'MY', 'HEAD', 'ACHED', 'AND', 'I', 'GOT', 'UP', 'TO', 'HAVE', 'A', 'RIDE', 'FOR', 'AN', 'HOUR'] +6070-86745-0012-2561: hyp=['I', 'RETURNED', 'HOME', 'AT', 'DAYBREAK', 'AND', 'STROVE', 'TO', 'SLEEP', 'BUT', 'MY', 'HEAD', 'ACHED', 'AND', 'I', 'GOT', 'UP', 'TO', 'HAVE', 'A', 'RIDE', 'FOR', 'AN', 'HOUR'] +6070-86745-0013-2562: ref=['PESTE', 'I', 'WILL', 'DO', 'NOTHING', 'OF', 'THE', 'KIND', 'THE', 'MOMENT', 'THEY', 'COME', 'FROM', 'GOVERNMENT', 'YOU', 'WOULD', 'FIND', 'THEM', 'EXECRABLE'] +6070-86745-0013-2562: hyp=['PESTS', 'I', 'WILL', 'DO', 'NOTHING', 'OF', 'THE', 'KIND', 'THE', 'MOMENT', 'THEY', 'COME', 'FROM', 'GOVERNMENT', 'YOU', 'WOULD', 'FIND', 'THEM', 'EXECRABLE'] +6070-86745-0014-2563: ref=['BESIDES', 'THAT', 'DOES', 'NOT', 'CONCERN', 'THE', 'HOME', 'BUT', 'THE', 'FINANCIAL', 'DEPARTMENT'] +6070-86745-0014-2563: hyp=['BESIDES', 'THAT', 'DOES', 'NOT', 'CONCERN', 'THE', 'HOME', 'BUT', 'THE', 'FINANCIAL', 'DEPARTMENT'] +6070-86745-0015-2564: ref=['ABOUT', 'WHAT', 'ABOUT', 'THE', 'PAPERS'] +6070-86745-0015-2564: hyp=['ABOUT', 'WHAT', 'ABOUT', 'THE', 'PAPERS'] +6070-86745-0016-2565: ref=['IN', 'THE', 'ENTIRE', 'POLITICAL', 'WORLD', 'OF', 'WHICH', 'YOU', 'ARE', 'ONE', 'OF', 'THE', 'LEADERS'] +6070-86745-0016-2565: hyp=['IN', 'THE', 'ENTIRE', 'POLITICAL', 'WORLD', 'OF', 'WHICH', 'YOU', 'ARE', 'ONE', 'OF', 'THE', 'LEADERS'] +6070-86745-0017-2566: ref=['THEY', 'SAY', 'THAT', 'IT', 'IS', 'QUITE', 'FAIR', 'AND', 'THAT', 'SOWING', 'SO', 'MUCH', 'RED', 'YOU', 'OUGHT', 'TO', 'REAP', 'A', 'LITTLE', 'BLUE'] +6070-86745-0017-2566: hyp=['THEY', 'SAY', 'THAT', 'IT', 'IS', 'QUITE', 'FAIR', 'AND', 'THAT', 'SOWING', 'SO', 'MUCH', 'RED', 'YOU', 'OUGHT', 'TO', 'REAP', 'A', 'LITTLE', 'BLUE'] +6070-86745-0018-2567: ref=['COME', 'COME', 'THAT', 'IS', 'NOT', 'BAD', 'SAID', 'LUCIEN'] +6070-86745-0018-2567: hyp=['COME', 'COME', 'THAT', 'IS', 'NOT', 'BAD', 'SAID', 'LUCIAN'] +6070-86745-0019-2568: ref=['WITH', 'YOUR', 'TALENTS', 'YOU', 'WOULD', 'MAKE', 'YOUR', 'FORTUNE', 'IN', 'THREE', 'OR', 'FOUR', 'YEARS'] +6070-86745-0019-2568: hyp=['WITH', 'THE', 'OTALONS', 'HE', 'WOULD', 'MAKE', 'YOUR', 'FORTUNE', 'IN', 'THREE', 'OR', 'FOUR', 'YEARS'] +6128-63240-0000-503: ref=['THE', 'GENTLEMAN', 'HAD', 'NOT', 'EVEN', 'NEEDED', 'TO', 'SIT', 'DOWN', 'TO', 'BECOME', 'INTERESTED', 'APPARENTLY', 'HE', 'HAD', 'TAKEN', 'UP', 'THE', 'VOLUME', 'FROM', 'A', 'TABLE', 'AS', 'SOON', 'AS', 'HE', 'CAME', 'IN', 'AND', 'STANDING', 'THERE', 'AFTER', 'A', 'SINGLE', 'GLANCE', 'ROUND', 'THE', 'APARTMENT', 'HAD', 'LOST', 'HIMSELF', 'IN', 'ITS', 'PAGES'] +6128-63240-0000-503: hyp=['THE', 'GENTLEMAN', 'HAD', 'NOT', 'EVEN', 'NEEDED', 'TO', 'SIT', 'DOWN', 'TO', 'BECOME', 'INTERESTED', 'APPARENTLY', 'HE', 'HAD', 'TAKEN', 'UP', 'THE', 'VOLUME', 'FROM', 'A', 'TABLE', 'AS', 'SOON', 'AS', 'HE', 'CAME', 'IN', 'AND', 'STANDING', 'THERE', 'AFTER', 'A', 'SINGLE', 'GLANCE', 'ROUND', 'THE', 'APARTMENT', 'HAD', 'LOST', 'HIMSELF', 'IN', 'HIS', 'PAGES'] +6128-63240-0001-504: ref=['THAT', 'HAS', 'AN', 'UNFLATTERING', 'SOUND', 'FOR', 'ME', 'SAID', 'THE', 'YOUNG', 'MAN'] +6128-63240-0001-504: hyp=['THAT', 'HAS', 'AN', 'UNFLATTERING', 'SOUND', 'FOR', 'ME', 'SAID', 'THE', 'YOUNG', 'MAN'] +6128-63240-0002-505: ref=['SHE', 'IS', 'WILLING', 'TO', 'RISK', 'THAT'] +6128-63240-0002-505: hyp=['SHE', 'IS', 'WILLING', 'TO', 'RISK', 'THAT'] +6128-63240-0003-506: ref=['JUST', 'AS', 'I', 'AM', 'THE', 'VISITOR', 'INQUIRED', 'PRESENTING', 'HIMSELF', 'WITH', 'RATHER', 'A', 'WORK', 'A', 'DAY', 'ASPECT'] +6128-63240-0003-506: hyp=['JUST', 'AS', 'I', 'AM', 'THE', 'VISITOR', 'INQUIRED', 'PRESENTING', 'HIMSELF', 'WITH', 'RATHER', 'A', 'WORKADAY', 'ASPECT'] +6128-63240-0004-507: ref=['HE', 'WAS', 'TALL', 'AND', 'LEAN', 'AND', 'DRESSED', 'THROUGHOUT', 'IN', 'BLACK', 'HIS', 'SHIRT', 'COLLAR', 'WAS', 'LOW', 'AND', 'WIDE', 'AND', 'THE', 'TRIANGLE', 'OF', 'LINEN', 'A', 'LITTLE', 'CRUMPLED', 'EXHIBITED', 'BY', 'THE', 'OPENING', 'OF', 'HIS', 'WAISTCOAT', 'WAS', 'ADORNED', 'BY', 'A', 'PIN', 'CONTAINING', 'A', 'SMALL', 'RED', 'STONE'] +6128-63240-0004-507: hyp=['HE', 'WAS', 'TALL', 'AND', 'LEAN', 'AND', 'DRESSED', 'THROUGHOUT', 'IN', 'BLACK', 'HIS', 'SHIRT', 'COLLAR', 'WAS', 'LOW', 'AND', 'WIDE', 'AND', 'THE', 'TRIANGLE', 'OF', 'LINEN', 'A', 'LITTLE', 'CRAMPLED', 'EXHIBITED', 'BY', 'THE', 'OPENING', 'OF', 'HIS', 'WAISTCOAT', 'WAS', 'ADORNED', 'BY', 'A', 'PIN', 'CONTAINING', 'A', 'SMALL', 'RED', 'STONE'] +6128-63240-0005-508: ref=['IN', 'SPITE', 'OF', 'THIS', 'DECORATION', 'THE', 'YOUNG', 'MAN', 'LOOKED', 'POOR', 'AS', 'POOR', 'AS', 'A', 'YOUNG', 'MAN', 'COULD', 'LOOK', 'WHO', 'HAD', 'SUCH', 'A', 'FINE', 'HEAD', 'AND', 'SUCH', 'MAGNIFICENT', 'EYES'] +6128-63240-0005-508: hyp=['IN', 'SPITE', 'OF', 'THIS', 'DECORATION', 'THE', 'YOUNG', 'MAN', 'LOOKED', 'POOR', 'AS', 'FAR', 'AS', 'A', 'YOUNG', 'MAN', 'COULD', 'LOOK', 'WHO', 'HAD', 'SUCH', 'A', 'FINE', 'HAIR', 'AND', 'SUCH', 'MAGNIFICENT', 'EYES'] +6128-63240-0006-509: ref=['THOSE', 'OF', 'BASIL', 'RANSOM', 'WERE', 'DARK', 'DEEP', 'AND', 'GLOWING', 'HIS', 'HEAD', 'HAD', 'A', 'CHARACTER', 'OF', 'ELEVATION', 'WHICH', 'FAIRLY', 'ADDED', 'TO', 'HIS', 'STATURE', 'IT', 'WAS', 'A', 'HEAD', 'TO', 'BE', 'SEEN', 'ABOVE', 'THE', 'LEVEL', 'OF', 'A', 'CROWD', 'ON', 'SOME', 'JUDICIAL', 'BENCH', 'OR', 'POLITICAL', 'PLATFORM', 'OR', 'EVEN', 'ON', 'A', 'BRONZE', 'MEDAL'] +6128-63240-0006-509: hyp=['THOSE', 'OF', 'BASIL', 'RANSOM', 'WENT', 'DARK', 'DEEP', 'AND', 'GLOWING', 'HIS', 'HEAD', 'HAD', 'A', 'CHARACTER', 'OF', 'ELEVATION', 'WHICH', 'FAIRLY', 'ADDED', 'TO', 'HIS', 'STATUE', 'IT', 'WAS', 'A', 'HEAD', 'TO', 'BE', 'SEEN', 'ABOVE', 'THE', 'LEVEL', 'OF', 'A', 'CROWD', 'ON', 'SOME', 'JUDICIAL', 'BENCH', 'OR', 'POLITICAL', 'PLATFORM', 'OR', 'EVEN', 'ON', 'A', 'BRONZE', 'MEDDLE'] +6128-63240-0007-510: ref=['THESE', 'THINGS', 'THE', 'EYES', 'ESPECIALLY', 'WITH', 'THEIR', 'SMOULDERING', 'FIRE', 'MIGHT', 'HAVE', 'INDICATED', 'THAT', 'HE', 'WAS', 'TO', 'BE', 'A', 'GREAT', 'AMERICAN', 'STATESMAN', 'OR', 'ON', 'THE', 'OTHER', 'HAND', 'THEY', 'MIGHT', 'SIMPLY', 'HAVE', 'PROVED', 'THAT', 'HE', 'CAME', 'FROM', 'CAROLINA', 'OR', 'ALABAMA'] +6128-63240-0007-510: hyp=['THESE', 'THINGS', 'THE', 'EYES', 'ESPECIALLY', 'WITH', 'THEIR', 'SMOULDERING', 'FIRE', 'MIGHT', 'HAVE', 'INDICATED', 'THAT', 'HE', 'WAS', 'TO', 'BE', 'GREAT', 'AMERICAN', 'STATESMAN', 'OR', 'ON', 'THE', 'OTHER', 'HAND', 'THERE', 'MIGHT', 'SIMPLY', 'HAVE', 'PROVED', 'THAT', 'HE', 'CAME', 'FROM', 'CAROLINA', 'OR', 'ALADAMA'] +6128-63240-0008-511: ref=['AND', 'YET', 'THE', 'READER', 'WHO', 'LIKES', 'A', 'COMPLETE', 'IMAGE', 'WHO', 'DESIRES', 'TO', 'READ', 'WITH', 'THE', 'SENSES', 'AS', 'WELL', 'AS', 'WITH', 'THE', 'REASON', 'IS', 'ENTREATED', 'NOT', 'TO', 'FORGET', 'THAT', 'HE', 'PROLONGED', 'HIS', 'CONSONANTS', 'AND', 'SWALLOWED', 'HIS', 'VOWELS', 'THAT', 'HE', 'WAS', 'GUILTY', 'OF', 'ELISIONS', 'AND', 'INTERPOLATIONS', 'WHICH', 'WERE', 'EQUALLY', 'UNEXPECTED', 'AND', 'THAT', 'HIS', 'DISCOURSE', 'WAS', 'PERVADED', 'BY', 'SOMETHING', 'SULTRY', 'AND', 'VAST', 'SOMETHING', 'ALMOST', 'AFRICAN', 'IN', 'ITS', 'RICH', 'BASKING', 'TONE', 'SOMETHING', 'THAT', 'SUGGESTED', 'THE', 'TEEMING', 'EXPANSE', 'OF', 'THE', 'COTTON', 'FIELD'] +6128-63240-0008-511: hyp=['AND', 'YET', 'THE', 'READER', 'WHO', 'LIKES', 'A', 'COMPLETE', 'IMAGE', 'WHO', 'DESIRES', 'TO', 'READ', 'WITH', 'THE', 'SENSES', 'AS', 'WELL', 'AS', 'WITH', 'THE', 'REASON', 'IS', 'ENTREATED', 'NOT', 'TO', 'FORGET', 'THAT', 'HE', 'PROLONGED', 'HIS', 'COUNTENANCE', 'AND', 'SWALLOWED', 'HIS', 'VOWELS', 'THAT', 'HE', 'WAS', 'GUILTY', 'VILLAGE', 'AND', 'INTERPOLATIONS', 'WHICH', 'WERE', 'EQUALLY', 'INEXPECTED', 'AND', 'THAT', 'HIS', 'DISCOURSE', 'WAS', 'PERVADED', 'BY', 'SOMETHING', 'SULTRY', 'AND', 'VAST', 'SOMETHING', 'ALMOST', 'AFRICAN', 'IN', 'ITS', 'RICH', 'BASKING', 'TONE', 'SOMETHING', 'THAT', 'SUGGESTED', 'THE', 'TEEMING', 'EXPANSE', 'OF', 'THE', 'COTTON', 'FIELD'] +6128-63240-0009-512: ref=['AND', 'HE', 'TOOK', 'UP', 'HIS', 'HAT', 'VAGUELY', 'A', 'SOFT', 'BLACK', 'HAT', 'WITH', 'A', 'LOW', 'CROWN', 'AND', 'AN', 'IMMENSE', 'STRAIGHT', 'BRIM'] +6128-63240-0009-512: hyp=['AND', 'HE', 'TOOK', 'UP', 'HIS', 'HAT', 'VAGUELY', 'A', 'SOFT', 'BLACK', 'HAT', 'WITH', 'A', 'LOW', 'CROWN', 'AND', 'AN', 'IMMENSE', 'STRAIGHT', 'BRIM'] +6128-63240-0010-513: ref=['WELL', 'SO', 'IT', 'IS', 'THEY', 'ARE', 'ALL', 'WITCHES', 'AND', 'WIZARDS', 'MEDIUMS', 'AND', 'SPIRIT', 'RAPPERS', 'AND', 'ROARING', 'RADICALS'] +6128-63240-0010-513: hyp=['WELL', 'SO', 'IT', 'IS', 'THERE', 'ARE', 'ALL', 'WITCHES', 'AND', 'WIZARDS', 'MEDIUMS', 'AND', 'SPIRIT', 'WRAPPERS', 'AND', 'ROWING', 'RADICALS'] +6128-63240-0011-514: ref=['IF', 'YOU', 'ARE', 'GOING', 'TO', 'DINE', 'WITH', 'HER', 'YOU', 'HAD', 'BETTER', 'KNOW', 'IT', 'OH', 'MURDER'] +6128-63240-0011-514: hyp=['IF', 'YOU', 'ARE', 'GOING', 'TO', 'DINE', 'WITH', 'HER', 'YOU', 'HAD', 'BETTER', 'KNOW', 'IT', 'OH', 'MURDER'] +6128-63240-0012-515: ref=['HE', 'LOOKED', 'AT', 'MISSUS', 'LUNA', 'WITH', 'INTELLIGENT', 'INCREDULITY'] +6128-63240-0012-515: hyp=['HE', 'LIFTED', 'MISSUS', 'LEWINA', 'WITH', 'INTELLIGENT', 'INCREDULITY'] +6128-63240-0013-516: ref=['SHE', 'WAS', 'ATTRACTIVE', 'AND', 'IMPERTINENT', 'ESPECIALLY', 'THE', 'LATTER'] +6128-63240-0013-516: hyp=['SHE', 'WAS', 'ATTRACTIVE', 'AND', 'IMPERTINENT', 'ESPECIALLY', 'THE', 'LATTER'] +6128-63240-0014-517: ref=['HAVE', 'YOU', 'BEEN', 'IN', 'EUROPE'] +6128-63240-0014-517: hyp=['HAVE', 'YOU', 'BEEN', 'IN', 'EUROPE'] +6128-63240-0015-518: ref=['NO', 'I', "HAVEN'T", 'BEEN', 'ANYWHERE'] +6128-63240-0015-518: hyp=['NO', 'I', "HAVEN'T", 'BEEN', 'ANYWHERE'] +6128-63240-0016-519: ref=['SHE', 'HATES', 'IT', 'SHE', 'WOULD', 'LIKE', 'TO', 'ABOLISH', 'IT'] +6128-63240-0016-519: hyp=['SHE', 'HATES', 'IT', 'SHE', 'WOULD', 'LIKE', 'TO', 'ABOLISH', 'IT'] +6128-63240-0017-520: ref=['THIS', 'LAST', 'REMARK', 'HE', 'MADE', 'AT', 'A', 'VENTURE', 'FOR', 'HE', 'HAD', 'NATURALLY', 'NOT', 'DEVOTED', 'ANY', 'SUPPOSITION', 'WHATEVER', 'TO', 'MISSUS', 'LUNA'] +6128-63240-0017-520: hyp=['THIS', 'LAST', 'REMARK', 'HE', 'MADE', 'THAT', 'ADVENTURE', 'FOR', 'HE', 'HAD', 'NATURALLY', 'NOT', 'DEVOTED', 'ANY', 'SUPPOSITION', 'WHATEVER', 'TO', 'MISSUS', 'LENA'] +6128-63240-0018-521: ref=['ARE', 'YOU', 'VERY', 'AMBITIOUS', 'YOU', 'LOOK', 'AS', 'IF', 'YOU', 'WERE'] +6128-63240-0018-521: hyp=['ARE', 'YOU', 'VERY', 'AMBITIOUS', 'YOU', 'LOOK', 'AS', 'IF', 'YOU', 'WERE'] +6128-63240-0019-522: ref=['AND', 'MISSUS', 'LUNA', 'ADDED', 'THAT', 'NOW', 'SHE', 'WAS', 'BACK', 'SHE', "DIDN'T", 'KNOW', 'WHAT', 'SHE', 'SHOULD', 'DO'] +6128-63240-0019-522: hyp=['AND', 'MISSUS', 'LENA', 'ADDED', 'THAT', 'NOW', 'SHE', 'WAS', 'BACK', 'SHE', "DIDN'T", 'KNOW', 'WHAT', 'SHE', 'SHOULD', 'DO'] +6128-63240-0020-523: ref=['ONE', "DIDN'T", 'EVEN', 'KNOW', 'WHAT', 'ONE', 'HAD', 'COME', 'BACK', 'FOR'] +6128-63240-0020-523: hyp=['ONE', "DIDN'T", 'EVEN', 'THERE', 'WHAT', 'ONE', 'HAD', 'COME', 'BACK', 'FOR'] +6128-63240-0021-524: ref=['BESIDES', 'OLIVE', "DIDN'T", 'WANT', 'HER', 'IN', 'BOSTON', 'AND', "DIDN'T", 'GO', 'THROUGH', 'THE', 'FORM', 'OF', 'SAYING', 'SO'] +6128-63240-0021-524: hyp=['BESIDES', 'OLIVE', "DIDN'T", 'WANT', 'HER', 'IN', 'BOSTON', 'AND', "DIDN'T", 'GO', 'THROUGH', 'THE', 'FORM', 'OF', 'SAYING', 'SO'] +6128-63240-0022-525: ref=['THAT', 'WAS', 'ONE', 'COMFORT', 'WITH', 'OLIVE', 'SHE', 'NEVER', 'WENT', 'THROUGH', 'ANY', 'FORMS'] +6128-63240-0022-525: hyp=['THAT', 'WAS', 'ONE', 'COMFORT', 'WITH', 'ALIVE', 'SHE', 'NEVER', 'WENT', 'THROUGH', 'ANY', 'FORMS'] +6128-63240-0023-526: ref=['SHE', 'STOOD', 'THERE', 'LOOKING', 'CONSCIOUSLY', 'AND', 'RATHER', 'SERIOUSLY', 'AT', 'MISTER', 'RANSOM', 'A', 'SMILE', 'OF', 'EXCEEDING', 'FAINTNESS', 'PLAYED', 'ABOUT', 'HER', 'LIPS', 'IT', 'WAS', 'JUST', 'PERCEPTIBLE', 'ENOUGH', 'TO', 'LIGHT', 'UP', 'THE', 'NATIVE', 'GRAVITY', 'OF', 'HER', 'FACE'] +6128-63240-0023-526: hyp=['SHE', 'STOOD', 'THERE', 'LOOKING', 'CONSCIOUSLY', 'AND', 'RATHER', 'SERIOUSLY', 'AND', 'MISTER', 'RANSOM', 'A', 'SMILE', 'OF', 'EXCEEDING', 'FAINTNESS', 'PLAYED', 'ABOUT', 'HER', 'LIPS', 'IT', 'WAS', 'JUST', 'PERCEPTIBLE', 'ENOUGH', 'TO', 'LIGHT', 'UP', 'THE', 'NATIVE', 'GRAVITY', 'OF', 'HER', 'FACE'] +6128-63240-0024-527: ref=['HER', 'VOICE', 'WAS', 'LOW', 'AND', 'AGREEABLE', 'A', 'CULTIVATED', 'VOICE', 'AND', 'SHE', 'EXTENDED', 'A', 'SLENDER', 'WHITE', 'HAND', 'TO', 'HER', 'VISITOR', 'WHO', 'REMARKED', 'WITH', 'SOME', 'SOLEMNITY', 'HE', 'FELT', 'A', 'CERTAIN', 'GUILT', 'OF', 'PARTICIPATION', 'IN', 'MISSUS', "LUNA'S", 'INDISCRETION', 'THAT', 'HE', 'WAS', 'INTENSELY', 'HAPPY', 'TO', 'MAKE', 'HER', 'ACQUAINTANCE'] +6128-63240-0024-527: hyp=['HER', 'VOICE', 'WAS', 'LOW', 'AND', 'AGREEABLE', 'A', 'CULTIVATED', 'VOICE', 'AND', 'SHE', 'EXTENDED', 'A', 'SLENDER', 'WHITE', 'HAND', 'TO', 'HER', 'VISITOR', 'HER', 'REMARKED', 'WITH', 'SOME', 'SOLEMNITY', 'HE', 'FELT', 'A', 'CERTAIN', 'GUILT', 'OF', 'PARTICIPATION', 'IN', 'MISSUS', "LUNA'S", 'INDISCRETION', 'THAT', 'HE', 'WAS', 'INTENSELY', 'HAPPY', 'TO', 'MAKE', 'HER', 'ACQUAINTANCE'] +6128-63240-0025-528: ref=['HE', 'OBSERVED', 'THAT', 'MISS', "CHANCELLOR'S", 'HAND', 'WAS', 'AT', 'ONCE', 'COLD', 'AND', 'LIMP', 'SHE', 'MERELY', 'PLACED', 'IT', 'IN', 'HIS', 'WITHOUT', 'EXERTING', 'THE', 'SMALLEST', 'PRESSURE'] +6128-63240-0025-528: hyp=['HE', 'OBSERVED', 'THAT', 'MISS', "CHANCELLOR'S", 'HAND', 'WAS', 'AT', 'ONCE', 'CALLED', 'IN', 'LIMP', 'SHE', 'MERELY', 'PLACED', 'IT', 'IN', 'HIS', 'WITHOUT', 'EXERTING', 'THE', 'SMALLEST', 'PRESSURE'] +6128-63240-0026-529: ref=['I', 'SHALL', 'BE', 'BACK', 'VERY', 'LATE', 'WE', 'ARE', 'GOING', 'TO', 'A', 'THEATRE', 'PARTY', "THAT'S", 'WHY', 'WE', 'DINE', 'SO', 'EARLY'] +6128-63240-0026-529: hyp=['I', 'SHALL', 'BE', 'BACK', 'VERY', 'LATE', 'WILL', "DON'T", 'YOU', 'THE', 'PARTY', "THAT'S", 'WHY', 'WE', 'DINE', 'SO', 'EARLY'] +6128-63240-0027-530: ref=['MISSUS', "LUNA'S", 'FAMILIARITY', 'EXTENDED', 'EVEN', 'TO', 'HER', 'SISTER', 'SHE', 'REMARKED', 'TO', 'MISS', 'CHANCELLOR', 'THAT', 'SHE', 'LOOKED', 'AS', 'IF', 'SHE', 'WERE', 'GOT', 'UP', 'FOR', 'A', 'SEA', 'VOYAGE'] +6128-63240-0027-530: hyp=['MISSUS', "LEANY'S", 'FAMILIARITY', 'EXTENDED', 'EVEN', 'TO', 'HER', 'SISTER', 'SHE', 'REMARKED', 'TO', 'MISS', 'CHANCELLOR', 'THAT', 'SHE', 'LOOKED', 'AS', 'IF', 'SHE', 'WERE', 'GOT', 'UP', 'FOR', 'A', 'SEA', 'VOYAGE'] +6128-63241-0000-557: ref=['POOR', 'RANSOM', 'ANNOUNCED', 'THIS', 'FACT', 'TO', 'HIMSELF', 'AS', 'IF', 'HE', 'HAD', 'MADE', 'A', 'GREAT', 'DISCOVERY', 'BUT', 'IN', 'REALITY', 'HE', 'HAD', 'NEVER', 'BEEN', 'SO', 'BOEOTIAN', 'AS', 'AT', 'THAT', 'MOMENT'] +6128-63241-0000-557: hyp=['POOR', 'RANSOM', 'ANNOUNCED', 'THIS', 'THAT', 'TO', 'HIMSELF', 'AS', 'IF', 'HE', 'HAD', 'MADE', 'A', 'GREAT', 'DISCOVERY', 'BUT', 'IN', 'REALITY', 'HE', 'HAD', 'NEVER', 'BEEN', 'SO', 'BE', 'OCHIAN', 'AS', 'AT', 'THAT', 'MOMENT'] +6128-63241-0001-558: ref=['THE', 'WOMEN', 'HE', 'HAD', 'HITHERTO', 'KNOWN', 'HAD', 'BEEN', 'MAINLY', 'OF', 'HIS', 'OWN', 'SOFT', 'CLIME', 'AND', 'IT', 'WAS', 'NOT', 'OFTEN', 'THEY', 'EXHIBITED', 'THE', 'TENDENCY', 'HE', 'DETECTED', 'AND', 'CURSORILY', 'DEPLORED', 'IN', 'MISSUS', "LUNA'S", 'SISTER'] +6128-63241-0001-558: hyp=['THE', 'WOMEN', 'HE', 'HAD', 'HITHERTO', 'KNOWN', 'HAD', 'BEEN', 'MAINLY', 'OF', 'HIS', 'OWN', 'SOFT', 'CLIMB', 'AND', 'IT', 'WAS', 'NOT', 'OFTEN', 'THEY', 'EXHIBITED', 'THE', 'TENDENCY', 'HE', 'DETECTED', 'AND', 'CURSORILY', 'DEPLORED', 'IN', 'MISSUS', "LUNA'S", 'SISTER'] +6128-63241-0002-559: ref=['RANSOM', 'WAS', 'PLEASED', 'WITH', 'THE', 'VISION', 'OF', 'THAT', 'REMEDY', 'IT', 'MUST', 'BE', 'REPEATED', 'THAT', 'HE', 'WAS', 'VERY', 'PROVINCIAL'] +6128-63241-0002-559: hyp=['RANSOM', 'WAS', 'PLEASED', 'WITH', 'THE', 'VISION', 'OF', 'THAT', 'REMEDY', 'IT', 'MUST', 'BE', 'REPEATED', 'THAT', 'HE', 'WAS', 'VERY', 'PROVINCIAL'] +6128-63241-0003-560: ref=['HE', 'WAS', 'SORRY', 'FOR', 'HER', 'BUT', 'HE', 'SAW', 'IN', 'A', 'FLASH', 'THAT', 'NO', 'ONE', 'COULD', 'HELP', 'HER', 'THAT', 'WAS', 'WHAT', 'MADE', 'HER', 'TRAGIC'] +6128-63241-0003-560: hyp=['HE', 'WAS', 'SORRY', 'FOR', 'HER', 'BUT', 'HIS', 'SORROW', 'IN', 'A', 'FLASH', 'THAT', 'NO', 'ONE', 'COULD', 'HELP', 'HER', 'THAT', 'WAS', 'WHAT', 'MADE', 'HER', 'TRAGIC'] +6128-63241-0004-561: ref=['SHE', 'COULD', 'NOT', 'DEFEND', 'HERSELF', 'AGAINST', 'A', 'RICH', 'ADMIRATION', 'A', 'KIND', 'OF', 'TENDERNESS', 'OF', 'ENVY', 'OF', 'ANY', 'ONE', 'WHO', 'HAD', 'BEEN', 'SO', 'HAPPY', 'AS', 'TO', 'HAVE', 'THAT', 'OPPORTUNITY'] +6128-63241-0004-561: hyp=['SHE', 'COULD', 'NOT', 'DEFEND', 'HERSELF', 'AGAINST', 'A', 'RICH', 'ADMIRATION', 'A', 'KIND', 'OF', 'TENDERNESS', 'OF', 'ENVY', 'OF', 'ANY', 'ONE', 'WHO', 'HAD', 'BEEN', 'SO', 'HAPPY', 'AS', 'TO', 'HAVE', 'THAT', 'OPPORTUNITY'] +6128-63241-0005-562: ref=['HIS', 'FAMILY', 'WAS', 'RUINED', 'THEY', 'HAD', 'LOST', 'THEIR', 'SLAVES', 'THEIR', 'PROPERTY', 'THEIR', 'FRIENDS', 'AND', 'RELATIONS', 'THEIR', 'HOME', 'HAD', 'TASTED', 'OF', 'ALL', 'THE', 'CRUELTY', 'OF', 'DEFEAT'] +6128-63241-0005-562: hyp=['HIS', 'FAMILY', 'WAS', 'RUINED', 'THEY', 'HAD', 'LOST', 'THEIR', 'SLAVES', 'THEIR', 'PROPERTY', 'THE', 'FRIENDS', 'AND', 'RELATIONS', 'THE', 'HOME', 'HAD', 'TASTED', 'OF', 'ALL', 'THE', 'CRUELTY', 'OF', 'DEFEAT'] +6128-63241-0006-563: ref=['THE', 'STATE', 'OF', 'MISSISSIPPI', 'SEEMED', 'TO', 'HIM', 'THE', 'STATE', 'OF', 'DESPAIR', 'SO', 'HE', 'SURRENDERED', 'THE', 'REMNANTS', 'OF', 'HIS', 'PATRIMONY', 'TO', 'HIS', 'MOTHER', 'AND', 'SISTERS', 'AND', 'AT', 'NEARLY', 'THIRTY', 'YEARS', 'OF', 'AGE', 'ALIGHTED', 'FOR', 'THE', 'FIRST', 'TIME', 'IN', 'NEW', 'YORK', 'IN', 'THE', 'COSTUME', 'OF', 'HIS', 'PROVINCE', 'WITH', 'FIFTY', 'DOLLARS', 'IN', 'HIS', 'POCKET', 'AND', 'A', 'GNAWING', 'HUNGER', 'IN', 'HIS', 'HEART'] +6128-63241-0006-563: hyp=['THE', 'STATE', 'OF', 'MISSISSIPPI', 'SEEM', 'TO', 'HIM', 'THE', 'STATE', 'OF', 'DESPAIR', 'SO', 'HE', 'SURRENDERED', 'THE', 'REMNANTS', 'OF', 'HIS', 'PATRIMONY', 'TO', 'HIS', 'MOTHER', 'AND', 'SISTERS', 'AND', 'AT', 'NEARLY', 'THIRTY', 'YEARS', 'OF', 'AGE', 'DELIGHTED', 'FOR', 'THE', 'FIRST', 'TIME', 'IN', 'NEW', 'YORK', 'IN', 'THE', 'COSTUME', 'OF', 'HIS', 'PROVINCE', 'WITH', 'FIFTY', 'DOLLARS', 'IN', 'HIS', 'POCKET', 'AND', 'A', 'GNARRING', 'HUNGER', 'IN', 'HIS', 'HEART'] +6128-63241-0007-564: ref=['IT', 'WAS', 'IN', 'THE', 'FEMALE', 'LINE', 'AS', 'BASIL', 'RANSOM', 'HAD', 'WRITTEN', 'IN', 'ANSWERING', 'HER', 'LETTER', 'WITH', 'A', 'GOOD', 'DEAL', 'OF', 'FORM', 'AND', 'FLOURISH', 'HE', 'SPOKE', 'AS', 'IF', 'THEY', 'HAD', 'BEEN', 'ROYAL', 'HOUSES'] +6128-63241-0007-564: hyp=['IT', 'WAS', 'IN', 'THE', 'FEMALE', 'LINE', 'AS', 'BALES', 'HAD', 'RANSOM', 'HAD', 'WRITTEN', 'IN', 'ANSWERING', 'HER', 'LETTER', 'WITH', 'A', 'GOOD', 'DEAL', 'OF', 'FORM', 'AND', 'FLOURISH', 'HE', 'SPOKE', 'AS', 'IF', 'THEY', 'HAD', 'BEEN', 'ROYAL', 'HOUSES'] +6128-63241-0008-565: ref=['IF', 'IT', 'HAD', 'BEEN', 'POSSIBLE', 'TO', 'SEND', 'MISSUS', 'RANSOM', 'MONEY', 'OR', 'EVEN', 'CLOTHES', 'SHE', 'WOULD', 'HAVE', 'LIKED', 'THAT', 'BUT', 'SHE', 'HAD', 'NO', 'MEANS', 'OF', 'ASCERTAINING', 'HOW', 'SUCH', 'AN', 'OFFERING', 'WOULD', 'BE', 'TAKEN'] +6128-63241-0008-565: hyp=['IF', 'IT', 'HAD', 'BEEN', 'POSSIBLE', 'TO', 'SEND', 'MISSUS', 'RANSOM', 'MONEY', 'OR', 'EVEN', 'CLOTHES', 'SHE', 'WOULD', 'HAVE', 'LIKED', 'THAT', 'BUT', 'SHE', 'HAD', 'NO', 'MEANS', 'OF', 'ASSERTING', 'HER', 'SUCH', 'AN', 'OFFERING', 'WOULD', 'BE', 'TAKEN'] +6128-63241-0009-566: ref=['OLIVE', 'HAD', 'A', 'FEAR', 'OF', 'EVERYTHING', 'BUT', 'HER', 'GREATEST', 'FEAR', 'WAS', 'OF', 'BEING', 'AFRAID'] +6128-63241-0009-566: hyp=['OLIV', 'HAD', 'A', 'FEAR', 'OF', 'EVERYTHING', 'BUT', 'HER', 'GREATEST', 'FEAR', 'WAS', 'OF', 'BEING', 'AFRAID'] +6128-63241-0010-567: ref=['SHE', 'HAD', 'ERECTED', 'IT', 'INTO', 'A', 'SORT', 'OF', 'RULE', 'OF', 'CONDUCT', 'THAT', 'WHENEVER', 'SHE', 'SAW', 'A', 'RISK', 'SHE', 'WAS', 'TO', 'TAKE', 'IT', 'AND', 'SHE', 'HAD', 'FREQUENT', 'HUMILIATIONS', 'AT', 'FINDING', 'HERSELF', 'SAFE', 'AFTER', 'ALL'] +6128-63241-0010-567: hyp=['SHE', 'HAD', 'ERECTED', 'IT', 'INTO', 'A', 'SORT', 'OF', 'RULE', 'OF', 'CONDUCT', 'THAT', 'WHENEVER', 'SHE', 'SAW', 'A', 'RISK', 'SHE', 'WAS', 'TO', 'TAKE', 'IT', 'AND', 'SHE', 'HAD', 'FREQUENT', 'HUMILIATIONS', 'AT', 'FINDING', 'HERSELF', 'SAVED', 'AFTER', 'ALL'] +6128-63241-0011-568: ref=['SHE', 'WAS', 'PERFECTLY', 'SAFE', 'AFTER', 'WRITING', 'TO', 'BASIL', 'RANSOM', 'AND', 'INDEED', 'IT', 'WAS', 'DIFFICULT', 'TO', 'SEE', 'WHAT', 'HE', 'COULD', 'HAVE', 'DONE', 'TO', 'HER', 'EXCEPT', 'THANK', 'HER', 'HE', 'WAS', 'ONLY', 'EXCEPTIONALLY', 'SUPERLATIVE', 'FOR', 'HER', 'LETTER', 'AND', 'ASSURE', 'HER', 'THAT', 'HE', 'WOULD', 'COME', 'AND', 'SEE', 'HER', 'THE', 'FIRST', 'TIME', 'HIS', 'BUSINESS', 'HE', 'WAS', 'BEGINNING', 'TO', 'GET', 'A', 'LITTLE', 'SHOULD', 'TAKE', 'HIM', 'TO', 'BOSTON'] +6128-63241-0011-568: hyp=['SHE', 'WAS', 'PERFECTLY', 'SAFE', 'AFTER', 'WRITING', 'TO', 'BASIL', 'RANSOM', 'AND', 'INDEED', 'IT', 'WAS', 'DIFFICULT', 'TO', 'SEE', 'WHAT', 'HE', 'COULD', 'HAVE', 'DONE', 'TO', 'HER', 'EXCEPT', 'THANK', 'HER', 'HE', 'WAS', 'ONLY', 'EXCEPTIONALLY', 'SUPERLATIVE', 'FOR', 'HER', 'LETTER', 'AND', 'ASSURE', 'HER', 'THAT', 'HE', 'WOULD', 'COME', 'AND', 'SEE', 'HER', 'THE', 'FIRST', 'TIME', 'HIS', 'BUSINESS', 'HE', 'WAS', 'BEGINNING', 'TO', 'GET', 'A', 'LITTLE', 'SHOULD', 'TAKE', 'HIM', 'TO', 'BOSTON'] +6128-63241-0012-569: ref=['HE', 'WAS', 'TOO', 'SIMPLE', 'TOO', 'MISSISSIPPIAN', 'FOR', 'THAT', 'SHE', 'WAS', 'ALMOST', 'DISAPPOINTED'] +6128-63241-0012-569: hyp=['HE', 'WAS', 'TOO', 'SIMPLE', 'TOO', 'MISSISSIPPIAN', 'FOR', 'THAT', 'SHE', 'WAS', 'ALMOST', 'DISAPPOINTED'] +6128-63241-0013-570: ref=['OF', 'ALL', 'THINGS', 'IN', 'THE', 'WORLD', 'CONTENTION', 'WAS', 'MOST', 'SWEET', 'TO', 'HER', 'THOUGH', 'WHY', 'IT', 'IS', 'HARD', 'TO', 'IMAGINE', 'FOR', 'IT', 'ALWAYS', 'COST', 'HER', 'TEARS', 'HEADACHES', 'A', 'DAY', 'OR', 'TWO', 'IN', 'BED', 'ACUTE', 'EMOTION', 'AND', 'IT', 'WAS', 'VERY', 'POSSIBLE', 'BASIL', 'RANSOM', 'WOULD', 'NOT', 'CARE', 'TO', 'CONTEND'] +6128-63241-0013-570: hyp=['OF', 'ALL', 'THINGS', 'IN', 'THE', 'WORLD', 'CONTENTION', 'WAS', 'MOST', 'SWEET', 'TO', 'HER', 'THOUGH', 'WHY', 'IT', 'IS', 'HARD', 'TO', 'IMAGINE', 'FOR', 'IT', 'ALWAYS', 'COST', 'HER', 'TEARS', 'HEADACHES', 'A', 'DAY', 'OR', 'TWO', 'IN', 'BED', 'ACUTEATION', 'AND', 'IT', 'WAS', 'VERY', 'POSSIBLE', 'BASER', 'RANSOM', 'WOULD', 'NOT', 'CARE', 'TO', 'COMPEND'] +6128-63244-0000-531: ref=['MISS', 'CHANCELLOR', 'HERSELF', 'HAD', 'THOUGHT', 'SO', 'MUCH', 'ON', 'THE', 'VITAL', 'SUBJECT', 'WOULD', 'NOT', 'SHE', 'MAKE', 'A', 'FEW', 'REMARKS', 'AND', 'GIVE', 'THEM', 'SOME', 'OF', 'HER', 'EXPERIENCES'] +6128-63244-0000-531: hyp=['MISS', 'CHANCELLOR', 'HERSELF', 'HAD', 'THOUGHT', 'SO', 'MUCH', 'ON', 'THE', 'VITAL', 'SUBJECT', 'WOULD', 'NOT', 'SHE', 'MAKE', 'A', 'FEW', 'REMARKS', 'AND', 'GIVE', 'THEM', 'SOME', 'OF', 'HER', 'EXPERIENCES'] +6128-63244-0001-532: ref=['HOW', 'DID', 'THE', 'LADIES', 'ON', 'BEACON', 'STREET', 'FEEL', 'ABOUT', 'THE', 'BALLOT'] +6128-63244-0001-532: hyp=['HOW', 'DID', 'THE', 'LADIES', 'AND', 'BEACON', 'STREET', 'FEEL', 'ABOUT', 'THE', 'BURIT'] +6128-63244-0002-533: ref=['PERHAPS', 'SHE', 'COULD', 'SPEAK', 'FOR', 'THEM', 'MORE', 'THAN', 'FOR', 'SOME', 'OTHERS'] +6128-63244-0002-533: hyp=['THERE', 'SHE', 'COULD', 'SPEAK', 'FOR', 'THEM', 'MORE', 'THAN', 'FOR', 'SOME', 'OTHERS'] +6128-63244-0003-534: ref=['WITH', 'HER', 'IMMENSE', 'SYMPATHY', 'FOR', 'REFORM', 'SHE', 'FOUND', 'HERSELF', 'SO', 'OFTEN', 'WISHING', 'THAT', 'REFORMERS', 'WERE', 'A', 'LITTLE', 'DIFFERENT'] +6128-63244-0003-534: hyp=['WITH', 'HER', 'MOST', 'SYMPATHY', 'FOR', 'REFORM', 'SHE', 'FOUND', 'HERSELF', 'SO', 'OFTEN', 'WISHING', 'THAT', 'WE', 'FELL', 'IN', 'AS', 'WHERE', 'A', 'LITTLE', 'DIFFERENT'] +6128-63244-0004-535: ref=['OLIVE', 'HATED', 'TO', 'HEAR', 'THAT', 'FINE', 'AVENUE', 'TALKED', 'ABOUT', 'AS', 'IF', 'IT', 'WERE', 'SUCH', 'A', 'REMARKABLE', 'PLACE', 'AND', 'TO', 'LIVE', 'THERE', 'WERE', 'A', 'PROOF', 'OF', 'WORLDLY', 'GLORY'] +6128-63244-0004-535: hyp=['I', 'HATED', 'DEER', 'THAT', 'FINE', 'AVENUE', 'TALKED', 'ABOUT', 'AS', 'IF', 'IT', 'WERE', 'SUCH', 'A', 'REMARKABLE', 'PLACE', 'AND', 'TO', 'LIVE', 'THERE', 'WHERE', 'A', 'PROOF', 'OF', 'WORLDLY', 'GLORY'] +6128-63244-0005-536: ref=['ALL', 'SORTS', 'OF', 'INFERIOR', 'PEOPLE', 'LIVED', 'THERE', 'AND', 'SO', 'BRILLIANT', 'A', 'WOMAN', 'AS', 'MISSUS', 'FARRINDER', 'WHO', 'LIVED', 'AT', 'ROXBURY', 'OUGHT', 'NOT', 'TO', 'MIX', 'THINGS', 'UP'] +6128-63244-0005-536: hyp=['ALL', 'SORTS', 'HAVE', 'CONTRAY', 'YOUR', 'PEOPLE', 'LIVE', 'THERE', 'AND', 'SO', 'BRILLIANT', 'A', 'WOMAN', 'AS', 'MISSUS', 'FARRENDER', 'WHO', 'LIVED', 'AT', 'ROXBURY', 'OUGHT', 'NOT', 'TO', 'MIX', 'THINGS', 'UP'] +6128-63244-0006-537: ref=['SHE', 'KNEW', 'HER', 'PLACE', 'IN', 'THE', 'BOSTON', 'HIERARCHY', 'AND', 'IT', 'WAS', 'NOT', 'WHAT', 'MISSUS', 'FARRINDER', 'SUPPOSED', 'SO', 'THAT', 'THERE', 'WAS', 'A', 'WANT', 'OF', 'PERSPECTIVE', 'IN', 'TALKING', 'TO', 'HER', 'AS', 'IF', 'SHE', 'HAD', 'BEEN', 'A', 'REPRESENTATIVE', 'OF', 'THE', 'ARISTOCRACY'] +6128-63244-0006-537: hyp=['SHE', 'KNEW', 'HER', 'PLACE', 'IN', 'THE', 'BOSTON', 'HALLWAKE', 'AND', 'IT', 'WAS', 'NOT', 'WHAT', 'MISSUS', 'FARRINGERS', 'SUPPOSED', 'SELL', 'HIM', 'THERE', 'WAS', 'A', 'WANT', 'OF', 'PERSPECTIVE', 'IN', 'TALKING', 'TO', 'HER', 'AS', 'IF', 'SHE', 'HAD', 'BEEN', 'I', 'REPRESENTATIVE', 'OF', 'THE', 'ARISTOCRACY'] +6128-63244-0007-538: ref=['SHE', 'WISHED', 'TO', 'WORK', 'IN', 'ANOTHER', 'FIELD', 'SHE', 'HAD', 'LONG', 'BEEN', 'PREOCCUPIED', 'WITH', 'THE', 'ROMANCE', 'OF', 'THE', 'PEOPLE'] +6128-63244-0007-538: hyp=['SHE', 'WISHED', 'TO', 'WORK', 'IN', 'ANOTHER', 'FIELD', 'SHE', 'HAD', 'LONG', 'BEEN', 'PREOCCUPIED', 'WITH', 'THE', 'ROMANCE', 'OF', 'A', 'PEOPLE'] +6128-63244-0008-539: ref=['THIS', 'MIGHT', 'SEEM', 'ONE', 'OF', 'THE', 'MOST', 'ACCESSIBLE', 'OF', 'PLEASURES', 'BUT', 'IN', 'POINT', 'OF', 'FACT', 'SHE', 'HAD', 'NOT', 'FOUND', 'IT', 'SO'] +6128-63244-0008-539: hyp=['THIS', 'MIGHT', 'SEEM', 'ONE', 'OF', 'THE', 'MOST', 'ACCESSIBLE', 'OF', 'PLEASURES', 'BUT', 'IN', 'POINT', 'OF', 'FACT', 'SHE', 'HAD', 'NOT', 'FOUND', 'IT', 'SO'] +6128-63244-0009-540: ref=['CHARLIE', 'WAS', 'A', 'YOUNG', 'MAN', 'IN', 'A', 'WHITE', 'OVERCOAT', 'AND', 'A', 'PAPER', 'COLLAR', 'IT', 'WAS', 'FOR', 'HIM', 'IN', 'THE', 'LAST', 'ANALYSIS', 'THAT', 'THEY', 'CARED', 'MUCH', 'THE', 'MOST'] +6128-63244-0009-540: hyp=['CHARLIE', 'WAS', 'A', 'YOUNG', 'MAN', 'IN', 'A', 'WIDE', 'OVERCOAT', 'AND', 'A', 'PAPER', 'COLLAR', 'IT', 'WAS', 'BOUHAIR', 'IN', 'THE', 'LAST', 'OF', 'NICES', 'THAT', 'THE', 'CARED', 'MUCH', 'THE', 'MOST'] +6128-63244-0010-541: ref=['OLIVE', 'CHANCELLOR', 'WONDERED', 'HOW', 'MISSUS', 'FARRINDER', 'WOULD', 'TREAT', 'THAT', 'BRANCH', 'OF', 'THE', 'QUESTION'] +6128-63244-0010-541: hyp=['ALL', 'THE', 'CHANCELLOR', 'WONDERED', 'HOW', 'MISSUS', 'GREYNDER', 'WOULD', 'TREAT', 'THEIR', 'BRANCH', 'AT', 'THE', 'QUESTION'] +6128-63244-0011-542: ref=['IF', 'IT', 'BE', 'NECESSARY', 'WE', 'ARE', 'PREPARED', 'TO', 'TAKE', 'CERTAIN', 'STEPS', 'TO', 'CONCILIATE', 'THE', 'SHRINKING'] +6128-63244-0011-542: hyp=['IT', 'HAD', 'BEEN', 'NECESSARY', 'WE', 'ARE', 'PREPARED', 'TO', 'TAKE', 'CERTAIN', 'STEPS', 'TO', 'CONCILIATE', 'THE', 'SHRINKING'] +6128-63244-0012-543: ref=['OUR', 'MOVEMENT', 'IS', 'FOR', 'ALL', 'IT', 'APPEALS', 'TO', 'THE', 'MOST', 'DELICATE', 'LADIES'] +6128-63244-0012-543: hyp=["I'LL", 'MOVEMENT', 'IS', 'FULL', 'IT', 'APPEALS', 'TO', 'THE', 'MOST', 'DELICATE', 'LADIES'] +6128-63244-0013-544: ref=['RAISE', 'THE', 'STANDARD', 'AMONG', 'THEM', 'AND', 'BRING', 'ME', 'A', 'THOUSAND', 'NAMES'] +6128-63244-0013-544: hyp=['THAT', 'IS', 'THE', 'STANDARD', 'AMONG', 'THEM', 'AND', 'BRING', 'ME', 'YOUR', 'THOUSAND', 'NAMES'] +6128-63244-0014-545: ref=['I', 'LOOK', 'AFTER', 'THE', 'DETAILS', 'AS', 'WELL', 'AS', 'THE', 'BIG', 'CURRENTS', 'MISSUS', 'FARRINDER', 'ADDED', 'IN', 'A', 'TONE', 'AS', 'EXPLANATORY', 'AS', 'COULD', 'BE', 'EXPECTED', 'OF', 'SUCH', 'A', 'WOMAN', 'AND', 'WITH', 'A', 'SMILE', 'OF', 'WHICH', 'THE', 'SWEETNESS', 'WAS', 'THRILLING', 'TO', 'HER', 'LISTENER'] +6128-63244-0014-545: hyp=['I', 'LOOK', 'AFTER', 'THE', 'DETAILS', 'AS', 'WELL', 'AS', 'THE', 'BIG', 'CURRANTS', 'MISSUS', 'VERUNDER', 'ADDED', 'IN', 'A', 'TONE', 'AS', 'EXPLANATORY', 'AS', 'COULD', 'BE', 'EXPECTED', 'OF', 'SUCH', 'A', 'WOMAN', 'AND', 'WITH', 'A', 'SMILE', 'OF', 'WHICH', 'THE', 'SWEETNESS', 'WAS', 'THRILLING', 'TO', 'HER', 'LISTENER'] +6128-63244-0015-546: ref=['SAID', 'OLIVE', 'CHANCELLOR', 'WITH', 'A', 'FACE', 'WHICH', 'SEEMED', 'TO', 'PLEAD', 'FOR', 'A', 'REMISSION', 'OF', 'RESPONSIBILITY'] +6128-63244-0015-546: hyp=['SAID', 'OLD', 'CHANCELLOR', 'WITH', 'A', 'FACE', 'WHICH', 'SEEMED', 'TO', 'PLEAD', 'FOR', 'A', "REMISSIONER'S", 'RESPONSIBILITY'] +6128-63244-0016-547: ref=['I', 'WANT', 'TO', 'BE', 'NEAR', 'TO', 'THEM', 'TO', 'HELP', 'THEM'] +6128-63244-0016-547: hyp=['HOW', 'WARNED', 'TO', 'BE', 'NEAR', 'TO', 'THEM', 'TO', 'HELP', 'THEM'] +6128-63244-0017-548: ref=['IT', 'WAS', 'ONE', 'THING', 'TO', 'CHOOSE', 'FOR', 'HERSELF', 'BUT', 'NOW', 'THE', 'GREAT', 'REPRESENTATIVE', 'OF', 'THE', 'ENFRANCHISEMENT', 'OF', 'THEIR', 'SEX', 'FROM', 'EVERY', 'FORM', 'OF', 'BONDAGE', 'HAD', 'CHOSEN', 'FOR', 'HER'] +6128-63244-0017-548: hyp=['IT', 'WAS', 'ONE', 'THING', 'TO', 'CHOOSE', 'TO', 'HERSELF', 'BUT', 'NOW', 'THE', 'GREAT', 'REPRESENTATIVE', 'OF', 'THE', 'ENCRONTISEMENT', 'OF', 'THEIR', 'SEX', 'FROM', 'EVERY', 'FORM', 'OF', 'BANDAGE', 'HAD', 'CHOSEN', 'FOR', 'HER'] +6128-63244-0018-549: ref=['THE', 'UNHAPPINESS', 'OF', 'WOMEN'] +6128-63244-0018-549: hyp=['THE', 'UNHAPPINESS', 'OF', 'WOMEN'] +6128-63244-0019-550: ref=['THEY', 'WERE', 'HER', 'SISTERS', 'THEY', 'WERE', 'HER', 'OWN', 'AND', 'THE', 'DAY', 'OF', 'THEIR', 'DELIVERY', 'HAD', 'DAWNED'] +6128-63244-0019-550: hyp=['THEY', 'WERE', 'HER', 'SISTERS', 'THERE', 'WERE', 'HER', 'OWN', 'AND', 'THE', 'DAY', 'OF', 'THEIR', 'DELIVERY', 'HAD', 'DAWNED'] +6128-63244-0020-551: ref=['THIS', 'WAS', 'THE', 'ONLY', 'SACRED', 'CAUSE', 'THIS', 'WAS', 'THE', 'GREAT', 'THE', 'JUST', 'REVOLUTION', 'IT', 'MUST', 'TRIUMPH', 'IT', 'MUST', 'SWEEP', 'EVERYTHING', 'BEFORE', 'IT', 'IT', 'MUST', 'EXACT', 'FROM', 'THE', 'OTHER', 'THE', 'BRUTAL', 'BLOOD', 'STAINED', 'RAVENING', 'RACE', 'THE', 'LAST', 'PARTICLE', 'OF', 'EXPIATION'] +6128-63244-0020-551: hyp=['THIS', 'WAS', 'THE', 'ONLY', 'SACRED', 'CAUSE', 'THIS', 'WAS', 'THE', 'GREAT', 'THE', 'DESTRULIAN', 'IT', 'WAS', 'TRIUMPH', 'IT', 'WAS', 'SWEEP', 'EVERYTHING', 'BEFORE', 'IT', 'IT', 'MUST', 'EXACT', 'FROM', 'THE', 'OTHER', 'THE', 'BRUTAL', 'BLOOD', 'STAINED', 'RAVENING', 'RACE', 'THE', 'LOST', 'PARTICLE', 'OF', 'EXPLANATION'] +6128-63244-0021-552: ref=['THEY', 'WOULD', 'BE', 'NAMES', 'OF', 'WOMEN', 'WEAK', 'INSULTED', 'PERSECUTED', 'BUT', 'DEVOTED', 'IN', 'EVERY', 'PULSE', 'OF', 'THEIR', 'BEING', 'TO', 'THE', 'CAUSE', 'AND', 'ASKING', 'NO', 'BETTER', 'FATE', 'THAN', 'TO', 'DIE', 'FOR', 'IT'] +6128-63244-0021-552: hyp=['THERE', 'HAD', 'BEEN', 'NAMES', 'OF', 'WOMEN', 'WEAK', 'INSULTED', 'PERSECUTED', 'BUT', 'DEVOTED', 'IN', 'EVERY', 'PART', 'OF', 'THEIR', 'BEING', 'TO', 'THE', 'CAUSE', 'AND', 'ASKING', 'NO', 'BETTER', 'FATE', 'THAN', 'TO', 'DIE', 'FOR', 'IT'] +6128-63244-0022-553: ref=['IT', 'WAS', 'NOT', 'CLEAR', 'TO', 'THIS', 'INTERESTING', 'GIRL', 'IN', 'WHAT', 'MANNER', 'SUCH', 'A', 'SACRIFICE', 'AS', 'THIS', 'LAST', 'WOULD', 'BE', 'REQUIRED', 'OF', 'HER', 'BUT', 'SHE', 'SAW', 'THE', 'MATTER', 'THROUGH', 'A', 'KIND', 'OF', 'SUNRISE', 'MIST', 'OF', 'EMOTION', 'WHICH', 'MADE', 'DANGER', 'AS', 'ROSY', 'AS', 'SUCCESS'] +6128-63244-0022-553: hyp=['IT', 'WILL', 'NOT', 'CLEAR', 'TO', 'THIS', 'INTERESTING', 'GIRL', 'IN', 'WHAT', 'MANNER', 'SUCH', 'A', 'SACRIFICE', 'AS', 'THIS', 'LAST', 'WOULD', 'BE', 'REQUIRED', 'OF', 'HER', 'BUT', 'SHE', 'SOLD', 'A', 'MATTER', 'THROUGH', 'A', 'KIND', 'OF', 'SUNRISE', 'MISTAGINATION', 'WHICH', 'MADE', 'DANGER', 'AS', 'ROSY', 'IS', 'SUCCESS'] +6128-63244-0023-554: ref=['WHEN', 'MISS', 'BIRDSEYE', 'APPROACHED', 'IT', 'TRANSFIGURED', 'HER', 'FAMILIAR', 'HER', 'COMICAL', 'SHAPE', 'AND', 'MADE', 'THE', 'POOR', 'LITTLE', 'HUMANITARY', 'HACK', 'SEEM', 'ALREADY', 'A', 'MARTYR'] +6128-63244-0023-554: hyp=['WHEN', 'MISS', "BIRD'S", 'EYED', 'APPROACHED', 'IT', 'TRANSFIGURED', 'HER', 'FAMILIAR', 'HER', 'COMICAL', 'SHAPE', 'AND', 'MADE', 'THE', 'POOR', 'LITTLE', 'HUMANITY', 'HACK', 'SIMPLE', 'ALREADY', 'A', 'MARTYR'] +6128-63244-0024-555: ref=['OLIVE', 'CHANCELLOR', 'LOOKED', 'AT', 'HER', 'WITH', 'LOVE', 'REMEMBERED', 'THAT', 'SHE', 'HAD', 'NEVER', 'IN', 'HER', 'LONG', 'UNREWARDED', 'WEARY', 'LIFE', 'HAD', 'A', 'THOUGHT', 'OR', 'AN', 'IMPULSE', 'FOR', 'HERSELF'] +6128-63244-0024-555: hyp=['I', 'LEAVE', 'CHANCELLOR', 'LOOKED', 'AT', 'HER', 'WITH', 'LOVE', 'REMEMBERED', 'THAT', 'SHE', 'HAD', 'NEVER', 'IN', 'HER', 'LONG', 'AND', 'REWARDED', 'WEARY', 'LIFE', 'HAD', 'A', 'THOUGHT', 'OF', 'AN', 'IMPULSE', 'FOR', 'HERSELF'] +6128-63244-0025-556: ref=['SHE', 'HAD', 'BEEN', 'CONSUMED', 'BY', 'THE', 'PASSION', 'OF', 'SYMPATHY', 'IT', 'HAD', 'CRUMPLED', 'HER', 'INTO', 'AS', 'MANY', 'CREASES', 'AS', 'AN', 'OLD', 'GLAZED', 'DISTENDED', 'GLOVE'] +6128-63244-0025-556: hyp=['SHE', 'HAD', 'BEEN', 'CONSUMED', 'BY', 'THE', 'PASSION', 'OF', 'SYMPATHY', 'IT', 'HAD', 'CRUMBLED', 'HER', 'INTO', 'AS', 'MANY', 'CREASES', 'AS', 'AN', 'OLD', 'GLAZED', 'DISTENDED', 'GLOVE'] +6432-63722-0000-2431: ref=['BUT', 'SCUSE', 'ME', "DIDN'T", 'YO', 'FIGGER', 'ON', 'DOIN', 'SOME', 'DETECTIN', 'AN', 'GIVE', 'UP', 'FISHIN'] +6432-63722-0000-2431: hyp=['PECUSE', 'ME', 'THEN', "YOU'LL", 'FORGON', 'DOIN', 'SOME', 'DETECTIVE', 'AND', 'GIVEN', 'UP', "FISHIN'"] +6432-63722-0001-2432: ref=['AND', 'SHAG', 'WITH', 'THE', 'FREEDOM', 'OF', 'AN', 'OLD', 'SERVANT', 'STOOD', 'LOOKING', 'AT', 'HIS', 'MASTER', 'AS', 'IF', 'NOT', 'QUITE', 'UNDERSTANDING', 'THE', 'NEW', 'TWIST', 'THE', 'AFFAIRS', 'HAD', 'TAKEN'] +6432-63722-0001-2432: hyp=['AND', 'SHAG', 'WITH', 'THE', 'FREEDOM', 'OF', 'AN', 'OLD', 'SERVANT', 'STOOD', 'LOOKING', 'AT', 'HIS', 'MASTER', 'AS', 'IF', 'NOT', 'QUITE', 'UNDERSTANDING', 'THE', 'NEW', 'TWIST', 'THE', 'AFFAIRS', 'HAD', 'TAKEN'] +6432-63722-0002-2433: ref=["I'M", 'GOING', 'OFF', 'FISHING', 'I', 'MAY', 'NOT', 'CATCH', 'ANYTHING', 'I', 'MAY', 'NOT', 'WANT', 'TO', 'AFTER', 'I', 'GET', 'THERE'] +6432-63722-0002-2433: hyp=["I'M", 'GOING', 'OUR', 'FISHIN', 'I', 'MAY', 'NOT', 'CATCH', 'ANYTHING', 'I', 'MAY', 'NOT', 'WANT', 'TO', 'AFTER', 'I', 'GET', 'THERE'] +6432-63722-0003-2434: ref=['GET', 'READY', 'SHAG', 'YES', 'SAH', 'COLONEL'] +6432-63722-0003-2434: hyp=['GET', 'READY', 'SHAGG', 'YES', 'I', 'CAN'] +6432-63722-0004-2435: ref=['AND', 'HAVING', 'PUT', 'HIMSELF', 'IN', 'A', 'FAIR', 'WAY', 'AS', 'HE', 'HOPED', 'TO', 'SOLVE', 'SOME', 'OF', 'THE', 'PROBLEMS', 'CONNECTED', 'WITH', 'THE', 'DARCY', 'CASE', 'COLONEL', 'ASHLEY', 'WENT', 'DOWN', 'TO', 'POLICE', 'HEADQUARTERS', 'TO', 'LEARN', 'MORE', 'FACTS', 'IN', 'CONNECTION', 'WITH', 'THE', 'MURDER', 'OF', 'THE', 'EAST', 'INDIAN'] +6432-63722-0004-2435: hyp=['AND', 'HAVING', 'PUT', 'HIMSELF', 'IN', 'A', 'FAIR', 'WAY', 'AS', 'HE', 'HOPED', 'TO', 'SOLVE', 'SOME', 'OF', 'THE', 'PROBLEMS', 'CONNECTED', 'WITH', 'THE', 'DARCY', 'CASE', 'COLONEL', 'HASHY', 'WENT', 'DOWN', 'TO', 'POLICE', 'HEADQUARTERS', 'TO', 'LEARN', 'MORE', 'FACTS', 'IN', 'THE', 'CONNECTION', 'WITH', 'THE', 'MURDER', 'OF', 'THE', 'EAST', 'INDIAN'] +6432-63722-0005-2436: ref=['PINKUS', 'AND', 'DONOVAN', "HAVEN'T", 'THEY', 'CARROLL', 'YEP'] +6432-63722-0005-2436: hyp=['PINKIS', 'AND', 'DONOVAN', "HAVEN'T", 'THEY', 'CAROL', 'HEIP'] +6432-63722-0006-2437: ref=['CARROLL', 'WAS', 'TOO', 'MUCH', 'ENGAGED', 'IN', 'WATCHING', 'THE', 'BLUE', 'SMOKE', 'CURL', 'LAZILY', 'UPWARD', 'FROM', 'HIS', 'CIGAR', 'JUST', 'THEN', 'TO', 'SAY', 'MORE'] +6432-63722-0006-2437: hyp=['GAL', 'WAS', 'TOO', 'MUCH', 'ENGAGED', 'IN', 'WATCHING', 'THE', 'BLUE', 'SMOKE', 'CURL', 'LAZILY', 'UPWARD', 'FROM', 'HIS', 'CIGAR', 'JUST', 'THEN', 'TO', 'SAY', 'MORE'] +6432-63722-0007-2438: ref=['ARE', 'YOU', 'GOING', 'TO', 'WORK', 'ON', 'THAT', 'CASE', 'COLONEL'] +6432-63722-0007-2438: hyp=['ARE', 'YOU', 'GOING', 'TO', 'WORK', 'ON', 'THAT', 'CASE', 'COLONEL'] +6432-63722-0008-2439: ref=['BUT', 'HE', "HADN'T", 'ANY', 'MORE', 'TO', 'DO', 'WITH', 'IT', 'COLONEL', 'THAN', 'THAT', 'CAT'] +6432-63722-0008-2439: hyp=['BUT', 'HE', "HADN'T", 'ANY', 'MORE', 'TO', 'DO', 'WITH', 'IT', 'COLONEL', 'THAN', 'THAT', 'CAT'] +6432-63722-0009-2440: ref=['PERHAPS', 'NOT', 'ADMITTED', 'COLONEL', 'ASHLEY'] +6432-63722-0009-2440: hyp=['PERHAPS', 'NOT', 'ADMITTED', 'COLONEL', 'ASHLEY'] +6432-63722-0010-2441: ref=["WE'VE", 'GOT', 'OUR', 'MAN', 'AND', "THAT'S", 'ALL', 'WE', 'WANT'] +6432-63722-0010-2441: hyp=["WE'VE", 'GOT', 'OUR', 'MAN', 'AND', "THAT'S", 'ALL', 'WE', 'WANT'] +6432-63722-0011-2442: ref=["YOU'RE", 'ON', 'THE', 'DARCY', 'CASE', 'THEY', 'TELL', 'ME', 'IN', 'A', 'WAY', 'YES'] +6432-63722-0011-2442: hyp=["YOU'RE", 'ON', 'THE', 'DARCY', 'CASE', 'THEY', 'TELL', 'ME', 'IN', 'A', 'WAY', 'YES'] +6432-63722-0012-2443: ref=["I'M", 'WORKING', 'IN', 'THE', 'INTERESTS', 'OF', 'THE', 'YOUNG', 'MAN'] +6432-63722-0012-2443: hyp=["I'M", 'WORKING', 'IN', 'THE', 'INTEREST', 'OF', 'THE', 'YOUNG', 'MAN'] +6432-63722-0013-2444: ref=["IT'S", 'JUST', 'ONE', 'OF', 'THEM', 'COINCIDENCES', 'LIKE'] +6432-63722-0013-2444: hyp=["IT'S", 'JUST', 'ONE', 'OF', 'THEM', 'COINCIDENCES', 'LIKE'] +6432-63722-0014-2445: ref=['BUSTED', 'HIS', 'HEAD', 'IN', 'WITH', 'A', 'HEAVY', 'CANDLESTICK', 'ONE', 'OF', 'A', 'PAIR'] +6432-63722-0014-2445: hyp=['BUSTED', 'HIS', 'HEAD', 'IN', 'WITH', 'A', 'HEAVY', 'CANDLESTICK', 'ONE', 'OF', 'A', 'PAIR'] +6432-63722-0015-2446: ref=['GAD', 'EXCLAIMED', 'THE', 'COLONEL'] +6432-63722-0015-2446: hyp=['GAD', 'EXPLAINED', 'THE', 'COLONEL'] +6432-63722-0016-2447: ref=['THE', 'VERY', 'PAIR', 'I', 'WAS', 'GOING', 'TO', 'BUY'] +6432-63722-0016-2447: hyp=['THE', 'VERY', 'PAIR', 'I', 'WAS', 'GOING', 'TO', 'BUY'] +6432-63722-0017-2448: ref=['LOOK', 'HERE', 'COLONEL', 'DO', 'YOU', 'KNOW', 'ANYTHING', 'ABOUT', 'THIS'] +6432-63722-0017-2448: hyp=['LOOK', 'HERE', 'COLONEL', 'DO', 'YOU', 'KNOW', 'ANYTHING', 'ABOUT', 'THIS'] +6432-63722-0018-2449: ref=['AND', 'THE', "DETECTIVE'S", 'PROFESSIONAL', 'INSTINCTS', 'GOT', 'THE', 'UPPER', 'HAND', 'OF', 'HIS', 'FRIENDLINESS', 'NOT', 'THE', 'LEAST', 'IN', 'THE', 'WORLD', 'NOT', 'AS', 'MUCH', 'AS', 'YOU', 'DO', 'WAS', 'THE', 'COOL', 'ANSWER'] +6432-63722-0018-2449: hyp=['AND', 'THE', "DETECTIVE'S", 'PROFESSIONAL', 'INSTINCTS', 'GOT', 'THE', 'UPPER', 'HAND', 'OF', 'HIS', 'FRIENDLINESS', 'NOT', 'THE', 'LEAST', 'IN', 'THE', 'WORLD', 'NOT', 'AS', 'MUCH', 'AS', 'YOU', 'DO', 'WAS', 'THE', 'COOL', 'ANSWER'] +6432-63722-0019-2450: ref=['I', 'HAPPENED', 'TO', 'SEE', 'THOSE', 'CANDLESTICKS', 'IN', 'THE', 'WINDOW', 'OF', 'SINGA', "PHUT'S", 'SHOP', 'THE', 'OTHER', 'DAY', 'AND', 'I', 'MADE', 'UP', 'MY', 'MIND', 'TO', 'BUY', 'THEM', 'WHEN', 'I', 'HAD', 'A', 'CHANCE'] +6432-63722-0019-2450: hyp=['I', 'HAPPENED', 'TO', 'SEE', 'THOSE', 'CANDLESTICKS', 'IN', 'THE', 'WINDOW', 'OF', 'SINGA', 'PHUT', 'SHOP', 'THE', 'OTHER', 'DAY', 'AND', 'I', 'MADE', 'UP', 'MY', 'MIND', 'TO', 'BUY', 'THEM', 'WHEN', 'I', 'HAD', 'A', 'CHANCE'] +6432-63722-0020-2451: ref=['NOW', "I'M", 'AFRAID', 'I', "WON'T", 'BUT', 'HOW', 'DID', 'IT', 'HAPPEN'] +6432-63722-0020-2451: hyp=['NOW', "I'M", 'AFRAID', 'I', "WON'T", 'BUT', 'HOW', 'DID', 'IT', 'HAPPEN'] +6432-63722-0021-2452: ref=['PHUT', 'I', "DON'T", 'KNOW', 'WHETHER', "THAT'S", 'HIS', 'FIRST', 'OR', 'HIS', 'LAST', 'NAME', 'ANYHOW', 'HE', 'HAD', 'A', 'PARTNER', 'NAMED', 'SHERE', 'ALI'] +6432-63722-0021-2452: hyp=['FAT', 'I', "DON'T", 'KNOW', 'WHETHER', "THAT'S", 'HIS', 'FIRST', 'OR', 'HIS', 'LAST', 'NAME', 'ANYHOW', 'HE', 'HAD', 'A', 'PARTNER', 'NAMED', 'SHEAR', 'ALI'] +6432-63722-0022-2453: ref=['ANYHOW', 'HE', 'AND', 'PHUT', "DIDN'T", 'GET', 'ALONG', 'VERY', 'WELL', 'IT', 'SEEMS'] +6432-63722-0022-2453: hyp=['ANYHOW', 'HE', 'INFORT', "DIDN'T", 'GET', 'ALONG', 'VERY', 'WELL', 'IT', 'SEEMS'] +6432-63722-0023-2454: ref=['NEIGHBORS', 'OFTEN', 'HEARD', 'EM', 'SCRAPPIN', 'A', 'LOT', 'AND', 'THIS', 'AFTERNOON', 'THEY', 'WENT', 'AT', 'IT', 'AGAIN', 'HOT', 'AND', 'HEAVY'] +6432-63722-0023-2454: hyp=['LABORS', 'OFTEN', 'HEARD', 'HIM', 'SCRAP', 'IN', 'A', 'LOT', 'AND', 'THIS', 'AFTERNOON', 'THEY', 'WENT', 'AT', 'IT', 'AGAIN', 'AT', 'HOT', 'AND', 'HEAVY'] +6432-63722-0024-2455: ref=['TOWARD', 'DARK', 'A', 'MAN', 'WENT', 'IN', 'TO', 'BUY', 'A', 'LAMP'] +6432-63722-0024-2455: hyp=['TO', 'OUR', 'DARK', 'A', 'MAN', 'WENT', 'IN', 'TO', 'BUY', 'A', 'LAMP'] +6432-63722-0025-2456: ref=['HE', 'FOUND', 'THE', 'PLACE', 'WITHOUT', 'A', 'LIGHT', 'IN', 'IT', 'STUMBLED', 'OVER', 'SOMETHING', 'ON', 'THE', 'FLOOR', 'AND', 'THERE', 'WAS', "ALI'S", 'BODY', 'WITH', 'THE', 'HEAD', 'BUSTED', 'IN', 'AND', 'THIS', 'HEAVY', 'CANDLESTICK', 'NEAR', 'IT'] +6432-63722-0025-2456: hyp=['HE', 'FOUND', 'THE', 'PLACE', 'WITHOUT', 'A', 'LIGHT', 'IN', 'IT', 'STUMBLED', 'OVER', 'SOMETHING', 'ON', 'THE', 'FLOOR', 'AND', 'THERE', 'WAS', "ALI'S", 'BODY', 'WITH', 'THE', 'HEAD', 'BUSTED', 'IN', 'AND', 'THIS', 'HEAVY', 'CANDLESTICK', 'NEAR', 'IT'] +6432-63722-0026-2457: ref=['SURE', 'HELD', 'SO', 'TIGHT', 'WE', 'COULD', 'HARDLY', 'GET', 'IT', 'OUT'] +6432-63722-0026-2457: hyp=['SURE', 'HELD', 'SO', 'TIGHT', 'WE', 'COULD', 'HARDLY', 'GET', 'IT', 'OUT'] +6432-63722-0027-2458: ref=['MAYBE', 'THE', 'FIGHT', 'WAS', 'ABOUT', 'WHO', 'OWNED', 'THE', 'WATCH', 'FOR', 'THE', 'DAGOS', 'TALKED', 'IN', 'THEIR', 'FOREIGN', 'LINGO', 'AND', 'NONE', 'OF', 'THE', 'NEIGHBORS', 'COULD', 'TELL', 'WHAT', 'THEY', 'WERE', 'SAYIN', 'I', 'SEE'] +6432-63722-0027-2458: hyp=['MAYBE', 'THE', 'FIGHT', 'WAS', 'ABOUT', 'WHO', 'ON', 'THE', 'WATCH', 'FOR', 'THE', 'DAGGERS', 'TALKED', 'IN', 'THEIR', 'FOREIGN', 'LINGO', 'AND', 'NONE', 'OF', 'THE', 'NEIGHBORS', 'COULD', 'TELL', 'WHAT', 'THEY', 'WERE', 'SAYING', 'I', 'SEE'] +6432-63722-0028-2459: ref=['AND', 'THE', 'WATCH', 'HAVE', 'YOU', 'IT', 'YES', "IT'S", 'HERE'] +6432-63722-0028-2459: hyp=['AND', 'THE', 'WATCH', 'HAVE', 'YOU', 'IT', 'YES', "IT'S", 'HERE'] +6432-63722-0029-2460: ref=["THAT'S", 'THE', 'WATCH', 'ANNOUNCED', 'THE', 'HEADQUARTERS', 'DETECTIVE', 'REACHING', 'IN', 'FOR', 'IT', 'GOING', 'YET', 'SEE'] +6432-63722-0029-2460: hyp=["THAT'S", 'THE', 'WATCH', 'ANNOUNCED', 'THE', 'HEADQUARTERS', 'DETECTIVE', 'REACHING', 'IN', 'FOR', 'IT', 'GOING', 'IN', 'SEE'] +6432-63722-0030-2461: ref=["YOU'RE", 'NOT', 'AS', 'SQUEAMISH', 'AS', 'ALL', 'THAT', 'ARE', 'YOU', 'JUST', 'BECAUSE', 'IT', 'WAS', 'IN', 'A', 'DEAD', "MAN'S", 'HAND', 'AND', 'IN', 'A', "WOMAN'S"] +6432-63722-0030-2461: hyp=["YOU'RE", 'NOT', 'A', 'SCREAMISH', 'AS', 'ALL', 'THAT', 'ARE', 'YOU', 'JUST', 'BECAUSE', 'IT', 'WAS', 'IN', 'A', 'DEAD', "MAN'S", 'HANDS', 'AND', 'A', "WOMAN'S"] +6432-63722-0031-2462: ref=['AND', "DONOVAN'S", 'VOICE', 'WAS', 'PLAINLY', 'SKEPTICAL'] +6432-63722-0031-2462: hyp=['AND', "DOLOMAN'S", 'VOICE', 'WAS', 'PLAINLY', 'SCEPTICAL'] +6432-63722-0032-2463: ref=['YES', 'IT', 'MAY', 'HAVE', 'SOME', 'ROUGH', 'EDGES', 'ON', 'IT'] +6432-63722-0032-2463: hyp=['YES', 'IT', 'MAY', 'HAVE', 'SOME', 'ROUGH', 'EDGES', 'ON', 'IT'] +6432-63722-0033-2464: ref=['AND', "I'VE", 'READ', 'ENOUGH', 'ABOUT', 'GERMS', 'TO', 'KNOW', 'THE', 'DANGER', "I'D", 'ADVISE', 'YOU', 'TO', 'BE', 'CAREFUL'] +6432-63722-0033-2464: hyp=['AND', "I'VE", 'READ', 'ENOUGH', 'ABOUT', 'GERMS', 'TO', 'KNOW', 'THE', 'DANGER', "I'D", 'ADVISE', 'YOU', 'TO', 'BE', 'CAREFUL'] +6432-63722-0034-2465: ref=['IF', 'YOU', "DON'T", 'MIND', 'I', 'SHOULD', 'LIKE', 'TO', 'EXAMINE', 'THIS', 'A', 'BIT'] +6432-63722-0034-2465: hyp=['IF', 'YOU', "DON'T", 'MIND', 'I', 'SHOULD', 'LIKE', 'TO', 'EXAMINE', 'THIS', 'A', 'BIT'] +6432-63722-0035-2466: ref=['BEFORE', 'THE', 'BIG', 'WIND', 'IN', 'IRELAND', 'SUGGESTED', 'THONG', 'WITH', 'A', 'NOD', 'AT', 'HIS', 'IRISH', 'COMPATRIOT', 'SLIGHTLY', 'LAUGHED', 'THE', 'COLONEL'] +6432-63722-0035-2466: hyp=['BEFORE', 'THE', 'BIG', 'WIND', 'IN', 'IRELAND', 'SUGGESTED', 'THONG', 'WITH', 'A', 'NOD', 'OF', 'HIS', 'IRISH', 'COMPATRIOT', "SLIGHTLY'LL", 'HAVE', 'THE', 'COLONEL'] +6432-63722-0036-2467: ref=["THAT'S", 'RIGHT', 'AGREED', 'THE', 'COLONEL', 'AS', 'HE', 'CONTINUED', 'TO', 'MOVE', 'HIS', 'MAGNIFYING', 'GLASS', 'OVER', 'THE', 'SURFACE', 'OF', 'THE', 'STILL', 'TICKING', 'WATCH'] +6432-63722-0036-2467: hyp=["THAT'S", 'RIGHT', 'AGREED', 'THE', 'COLONEL', 'AS', 'HE', 'CONTINUED', 'TO', 'MOVE', 'HIS', 'MAGNIFYING', 'GLASS', 'OVER', 'THE', 'SURFACE', 'OF', 'THE', 'STILL', 'TICKING', 'WATCH'] +6432-63722-0037-2468: ref=['AND', 'A', 'CLOSE', 'OBSERVER', 'MIGHT', 'HAVE', 'OBSERVED', 'THAT', 'HE', 'DID', 'NOT', 'TOUCH', 'HIS', 'BARE', 'FINGERS', 'TO', 'THE', 'TIMEPIECE', 'BUT', 'POKED', 'IT', 'ABOUT', 'AND', 'TOUCHED', 'IT', 'HERE', 'AND', 'THERE', 'WITH', 'THE', 'END', 'OF', 'A', 'LEADPENCIL'] +6432-63722-0037-2468: hyp=['IN', 'THE', 'CLOSE', 'OBSERVER', 'MIGHT', 'HAVE', 'OBSERVED', 'THAT', 'HE', 'DID', 'NOT', 'TOUCH', 'HIS', 'BARE', 'FINGERS', 'TO', 'THE', 'TIMEPIECE', 'BUT', 'POKED', 'IT', 'ABOUT', 'AND', 'TOUCHED', 'IT', 'HERE', 'AND', 'THERE', 'WITH', 'THE', 'END', 'OF', 'A', 'LEAD', 'PENCIL'] +6432-63722-0038-2469: ref=['AND', 'DONOVAN', 'TAKE', 'A', "FRIEND'S", 'ADVICE', 'AND', "DON'T", 'BE', 'TOO', 'FREE', 'WITH', 'THAT', 'WATCH', 'TOO', 'FREE', 'WITH', 'IT'] +6432-63722-0038-2469: hyp=['AND', 'DONOON', 'TAKE', 'HER', "FRIEND'S", 'ADVICE', 'AND', "DON'T", 'BE', 'TOO', 'FREE', 'WITH', 'THAT', 'WATCH', 'TOO', 'FREE', 'WITH', 'IT'] +6432-63722-0039-2470: ref=['ASKED', 'THE', 'SURPRISED', 'DETECTIVE', 'YES'] +6432-63722-0039-2470: hyp=['AS', 'THE', 'SURPRISE', 'DETECTIVE', 'YES'] +6432-63722-0040-2471: ref=["DON'T", 'SCRATCH', 'YOURSELF', 'ON', 'IT', 'WHATEVER', 'YOU', 'DO', 'WHY', 'NOT'] +6432-63722-0040-2471: hyp=["DON'T", 'SCRATCH', 'YOURSELF', 'ON', 'IT', 'WHATEVER', 'YOU', 'DO', 'WHY', 'NOT'] +6432-63722-0041-2472: ref=['SIMPLY', 'BECAUSE', 'THIS', 'WATCH'] +6432-63722-0041-2472: hyp=['SIMPLY', 'BECAUSE', 'THIS', 'WATCH'] +6432-63722-0042-2473: ref=['SOME', 'ONE', 'OUT', 'HERE', 'TO', 'SEE', 'YOU'] +6432-63722-0042-2473: hyp=['SOME', 'ONE', 'OUT', 'HER', 'TO', 'SEE', 'YOU'] +6432-63722-0043-2474: ref=['ALL', 'RIGHT', 'BE', 'THERE', 'IN', 'A', 'SECOND'] +6432-63722-0043-2474: hyp=['ALL', 'RIGHT', 'BE', 'THERE', 'IN', 'A', 'SECOND'] +6432-63722-0044-2475: ref=['SINGA', 'PHUT', 'WAS', 'THE', 'PANTING', 'ANSWER'] +6432-63722-0044-2475: hyp=['SHANGHAT', 'WAS', 'THE', 'PANTING', 'ANSWER'] +6432-63722-0045-2476: ref=['I', 'WANT', 'TO', 'TALK', 'OVER', "DARCY'S", 'CASE', 'WITH', 'YOU', 'THE', 'COLONEL', 'HAD', 'SAID', 'AND', 'THE', 'TWO', 'HAD', 'TALKED', 'HAD', 'THOUGHT', 'HAD', 'TALKED', 'AGAIN', 'AND', 'NOW', 'WERE', 'SILENT', 'FOR', 'A', 'TIME'] +6432-63722-0045-2476: hyp=['I', 'WANT', 'TO', 'TALK', 'OVER', "DARCY'S", 'CASE', 'WITH', 'YOU', 'THE', 'COLONEL', 'HAD', 'SAID', 'AND', 'THE', 'TWO', 'HAD', 'TALKED', 'HAD', 'THOUGHT', 'HAD', 'TALKED', 'AGAIN', 'AND', 'NOW', 'WERE', 'SILENT', 'FOR', 'A', 'TIME'] +6432-63722-0046-2477: ref=['WHAT', 'ARE', 'THE', 'CHANCES', 'OF', 'GETTING', 'HIM', 'OFF', 'LEGALLY', 'IF', 'WE', 'GO', 'AT', 'IT', 'FROM', 'A', 'NEGATIVE', 'STANDPOINT', 'ASKED', 'THE', 'COLONEL'] +6432-63722-0046-2477: hyp=['WHAT', 'ARE', 'THE', 'CHANCES', 'OF', 'GETTING', 'HIM', 'OFF', 'LEGALLY', 'IF', 'WE', 'GO', 'AT', 'IT', 'FROM', 'A', 'NEGATIVE', 'STANDPOINT', 'ASKED', 'THE', 'COLONEL'] +6432-63722-0047-2478: ref=['RATHER', 'A', 'HYPOTHETICAL', 'QUESTION', 'COLONEL', 'BUT', 'I', 'SHOULD', 'SAY', 'IT', 'MIGHT', 'BE', 'A', 'FIFTY', 'FIFTY', 'PROPOSITION'] +6432-63722-0047-2478: hyp=['RATHER', 'A', 'HYPOTHETICAL', 'QUESTION', 'COLONEL', 'BUT', 'I', 'SHOULD', 'SAY', 'IT', 'MIGHT', 'BE', 'A', 'FIFTY', 'FIFTY', 'PROPOSITION'] +6432-63722-0048-2479: ref=['AT', 'BEST', 'HE', 'WOULD', 'GET', 'OFF', 'WITH', 'A', 'SCOTCH', 'VERDICT', 'OF', 'NOT', 'PROVEN', 'BUT', 'HE', "DOESN'T", 'WANT', 'THAT', 'NOR', 'DO', 'I'] +6432-63722-0048-2479: hyp=['AT', 'BEST', 'HE', 'WOULD', 'GET', 'OFF', 'WITH', 'A', 'SCOTCH', 'VERDICT', 'OF', 'NOT', 'PROVING', 'BUT', 'HE', "DOESN'T", 'WANT', 'THAT', 'NOR', 'DO', 'I'] +6432-63722-0049-2480: ref=['AND', 'YOU', 'I', "DON'T", 'WANT', 'IT', 'EITHER'] +6432-63722-0049-2480: hyp=['AND', 'YOU', 'I', "DON'T", 'WANT', 'IT', 'EITHER'] +6432-63722-0050-2481: ref=['BUT', 'I', 'WANT', 'TO', 'KNOW', 'JUST', 'WHERE', 'WE', 'STAND', 'NOW', 'I', 'KNOW'] +6432-63722-0050-2481: hyp=['BUT', 'I', 'WANT', 'TO', 'KNOW', 'JUST', 'WHERE', 'WE', 'STAND', 'NOW', 'I', 'KNOW'] +6432-63722-0051-2482: ref=['BUT', 'I', 'NEED', 'TO', 'DO', 'A', 'LITTLE', 'MORE', 'SMOKING', 'OUT', 'FIRST', 'NOW', 'I', 'WANT', 'TO', 'THINK'] +6432-63722-0051-2482: hyp=['BUT', 'I', 'NEED', 'TO', 'DO', 'A', 'LITTLE', 'MORE', 'SMOKING', 'OUT', 'FIRST', 'NOW', 'I', 'WANT', 'TO', 'THINK'] +6432-63722-0052-2483: ref=['IF', "YOU'LL", 'EXCUSE', 'ME', "I'LL", 'PRETEND', "I'M", 'FISHING', 'AND', 'I', 'MAY', 'CATCH', 'SOMETHING'] +6432-63722-0052-2483: hyp=['IF', "YOU'LL", 'EXCUSE', 'ME', "I'LL", 'PRETEND', "I'M", 'FISHING', 'AND', 'I', 'MAY', 'CATCH', 'SOMETHING'] +6432-63722-0053-2484: ref=['IN', 'FACT', 'I', 'HAVE', 'A', 'FEELING', 'THAT', "I'LL", 'LAND', 'MY', 'FISH'] +6432-63722-0053-2484: hyp=['IN', 'FACT', 'I', 'HAVE', 'A', 'FEELING', 'THAT', 'I', 'LAND', 'MY', 'FISH'] +6432-63722-0054-2485: ref=["I'D", 'RECOMMEND', 'HIM', 'TO', 'YOU', 'INSTEAD', 'OF', 'BLACKSTONE', 'THANKS', 'LAUGHED', 'KENNETH'] +6432-63722-0054-2485: hyp=['I', 'RECOMMEND', 'HIM', 'TO', 'YOU', 'INSTEAD', 'OF', 'BLACKSTONE', 'THANKS', 'LAP', 'KENNETH'] +6432-63722-0055-2486: ref=['WHAT', 'IS', 'IT', 'PERHAPS', 'I', 'CAN', 'HELP', 'YOU'] +6432-63722-0055-2486: hyp=['WHAT', 'IS', 'IT', 'PERHAPS', 'I', 'CAN', 'HELP', 'YOU'] +6432-63722-0056-2487: ref=['THE', 'OLD', 'ADAGE', 'OF', 'TWO', 'HEADS', 'YOU', 'KNOW'] +6432-63722-0056-2487: hyp=['THE', 'OLD', 'ADAGE', 'OF', 'TWO', 'HEADS', 'YOU', 'KNOW'] +6432-63722-0057-2488: ref=['YES', 'IT', 'STILL', 'HOLDS', 'GOOD'] +6432-63722-0057-2488: hyp=['YES', 'IT', 'STILL', 'HOLDS', 'GOOD'] +6432-63722-0058-2489: ref=['NO', 'ALIMONY', 'REPEATED', 'THE', 'COLONEL', 'PUZZLED', 'YES', 'JUST', 'THAT'] +6432-63722-0058-2489: hyp=['NO', 'ALIMONY', 'REPLIED', 'THE', 'COLONEL', 'PUZZLED', 'YES', 'JUST', 'THAT'] +6432-63722-0059-2490: ref=['AND', "THERE'S", 'NO', 'REASON', 'YOU', "SHOULDN'T", 'KNOW'] +6432-63722-0059-2490: hyp=['AND', "THERE'S", 'NO', 'REASON', 'YOU', "SHOULDN'T", 'KNOW'] +6432-63723-0000-2491: ref=['CHUCKLED', 'THE', 'COLONEL', 'AS', 'HE', 'SKILFULLY', 'PLAYED', 'THE', 'LUCKLESS', 'TROUT', 'NOW', 'STRUGGLING', 'TO', 'GET', 'LOOSE', 'FROM', 'THE', 'HOOK'] +6432-63723-0000-2491: hyp=['CHUCKLED', 'THE', 'COLONEL', 'AS', 'HE', 'SKILFULLY', 'PLAYED', 'THE', 'LUCKLESS', 'TROUT', 'NOW', 'STRUGGLING', 'TO', 'GET', 'LOOSE', 'FROM', 'THE', 'HOOK'] +6432-63723-0001-2492: ref=['AND', 'WHEN', 'THE', 'FISH', 'WAS', 'LANDED', 'PANTING', 'ON', 'THE', 'GRASS', 'AND', 'SHAG', 'HAD', 'BEEN', 'ROUSED', 'FROM', 'HIS', 'SLUMBER', 'TO', 'SLIP', 'THE', 'NOW', 'LIMP', 'FISH', 'INTO', 'THE', 'CREEL', 'COLONEL', 'ASHLEY', 'GAVE', 'A', 'SIGH', 'OF', 'RELIEF', 'AND', 'REMARKED', 'I', 'THINK', 'I', 'SEE', 'IT', 'NOW'] +6432-63723-0001-2492: hyp=['AND', 'WHEN', 'THE', 'FISH', 'WAS', 'LANDED', 'PANTING', 'ON', 'THE', 'GRASS', 'AND', 'SHAG', 'HAD', 'BEEN', 'ROUSED', 'FROM', 'HIS', 'SLUMBER', 'TO', 'SLIP', 'A', 'NOW', 'LIMP', 'FISH', 'INTO', 'THE', 'CREO', 'COLONEL', 'ASHLEY', 'GAVE', 'A', 'SIGH', 'OF', 'RELIEF', 'AND', 'REMARKED', 'I', 'THINK', 'I', 'SEE', 'IT', 'NOW'] +6432-63723-0002-2493: ref=['THE', 'REASON', 'SHE', 'ASKED', 'NO', 'ALIMONY', 'INQUIRED', 'KENNETH'] +6432-63723-0002-2493: hyp=['THE', 'REASON', 'SHE', 'ASKED', 'NO', 'ALIMONY', 'INQUIRED', 'KENNETH'] +6432-63723-0003-2494: ref=['NO', 'I', "WASN'T", 'THINKING', 'OF', 'THAT'] +6432-63723-0003-2494: hyp=['NO', 'I', "WASN'T", 'THINKING', 'OF', 'THAT'] +6432-63723-0004-2495: ref=['HOWEVER', "DON'T", 'THINK', "I'M", 'NOT', 'INTERESTED', 'IN', 'YOUR', 'CASE', "I'VE", 'FISHED', 'ENOUGH', 'FOR', 'TO', 'DAY'] +6432-63723-0004-2495: hyp=['HOWEVER', "DON'T", 'THINK', "I'M", 'NOT', 'INTERESTED', 'IN', 'YOUR', 'CASE', "I'VE", 'FINISHED', 'ENOUGH', 'FOR', 'TO', 'DAY'] +6432-63723-0005-2496: ref=['WELL', 'I', "DON'T", 'KNOW', 'THAT', 'YOU', 'CAN'] +6432-63723-0005-2496: hyp=['WELL', 'I', "DON'T", 'KNOW', 'THAT', 'YOU', 'CAN'] +6432-63723-0006-2497: ref=['IT', "ISN'T", 'GENERALLY', 'KNOWN', 'WENT', 'ON', 'THE', 'LAWYER', 'THAT', 'THE', 'HOTEL', "KEEPER'S", 'WIFE', 'HAS', 'LEFT', 'HIM'] +6432-63723-0006-2497: hyp=['IT', 'IS', 'IN', 'GENERALLY', 'KNOWN', 'WENT', 'ON', 'THE', 'LAWYER', 'THAT', 'THE', 'HOTEL', "KEEPER'S", 'WIFE', 'HAS', 'LEFT', 'HIM'] +6432-63723-0007-2498: ref=['IT', 'WAS', 'ONE', 'OF', 'WHAT', 'AT', 'FIRST', 'MIGHT', 'BE', 'CALLED', 'REFINED', 'CRUELTY', 'ON', 'HER', "HUSBAND'S", 'PART', 'DEGENERATING', 'GRADUALLY', 'INTO', 'THAT', 'OF', 'THE', 'BASER', 'SORT'] +6432-63723-0007-2498: hyp=['IT', 'WAS', 'ONE', 'OF', 'WHAT', 'AT', 'FIRST', 'MIGHT', 'BE', 'CALLED', 'REFINED', 'CRUELTY', 'ON', 'HER', "HUSBAND'S", 'PART', 'DEGENERATING', 'GRADUALLY', 'INTO', 'THAT', 'OF', 'A', 'BASER', 'SORT'] +6432-63723-0008-2499: ref=['YOU', "DON'T", 'MEAN', 'THAT', 'LARCH', 'STRUCK', 'HER', 'THAT', 'THERE', 'WAS', 'PHYSICAL', 'ABUSE', 'DO', 'YOU', 'ASKED', 'THE', 'COLONEL', "THAT'S", 'WHAT', 'HE', 'DID'] +6432-63723-0008-2499: hyp=['YOU', "DON'T", 'MEAN', 'THAT', 'LARGE', 'STRUCK', 'HER', 'THAT', 'THERE', 'WAS', 'PHYSICAL', 'ABUSE', 'DO', 'YOU', 'ASKED', 'THE', 'COLONEL', "THAT'S", 'WHAT', 'HE', 'DID'] +6432-63723-0009-2500: ref=['THE', 'COLONEL', 'DID', 'NOT', 'DISCLOSE', 'THE', 'FACT', 'THAT', 'IT', 'WAS', 'NO', 'NEWS', 'TO', 'HIM'] +6432-63723-0009-2500: hyp=['THE', 'COLONEL', 'DID', 'NOT', 'DISCLOSE', 'THE', 'FACT', 'THAT', 'IT', 'WAS', 'NO', 'NEWS', 'TO', 'HIM'] +6432-63723-0010-2501: ref=['AARON', "GRAFTON'S", 'STATEMENT', 'WAS', 'BEING', 'UNEXPECTEDLY', 'CONFIRMED'] +6432-63723-0010-2501: hyp=['AARON', "GRAFTON'S", 'STATEMENT', 'WAS', 'BEING', 'UNEXPECTEDLY', 'CONFIRMED'] +6432-63723-0011-2502: ref=['HE', 'REMEMBERED', 'THAT', 'CYNTHIA', 'AND', 'GRAFTON', 'HAD', 'ONCE', 'BEEN', 'IN', 'LOVE', 'WITH', 'EACH', 'OTHER'] +6432-63723-0011-2502: hyp=['HE', 'REMEMBERED', 'THAT', 'CYNTHIA', 'AND', 'GRAFTON', 'HAD', 'ONCE', 'BEEN', 'IN', 'LOVE', 'WITH', 'EACH', 'OTHER'] +6432-63723-0012-2503: ref=['SHE', 'SAID', 'HE', 'HAD', 'STRUCK', 'HER', 'MORE', 'THAN', 'ONCE', 'AND', 'SHE', 'COULD', 'STAND', 'IT', 'NO', 'LONGER'] +6432-63723-0012-2503: hyp=['SHE', 'SAID', 'HE', 'HAD', 'STRUCK', 'HER', 'MORE', 'THAN', 'ONCE', 'AND', 'SHE', 'COULD', 'STAND', 'IT', 'NO', 'LONGER'] +6432-63723-0013-2504: ref=['BECAUSE', 'LARCH', 'MADE', 'NO', 'DEFENSE'] +6432-63723-0013-2504: hyp=['BECAUSE', 'LARGE', 'MADE', 'NO', 'DEFENCE'] +6432-63723-0014-2505: ref=['LARCH', 'BY', 'REFUSING', 'TO', 'APPEAR', 'PRACTICALLY', 'ADMITTED', 'THE', 'CHARGES', 'AGAINST', 'HIM', 'AND', 'DID', 'NOT', 'OPPOSE', 'THE', 'SEPARATION'] +6432-63723-0014-2505: hyp=['LARGE', 'BY', 'REFUSING', 'TO', 'APPEAR', 'PRACTICALLY', 'ADMITTED', 'THE', 'CHARGES', 'AGAINST', 'HIM', 'AND', 'DID', 'NOT', 'OPPOSE', 'THE', 'SEPARATION'] +6432-63723-0015-2506: ref=['SO', 'I', 'HAD', 'TO', 'LET', 'HER', 'HAVE', 'HER', 'WAY', 'AND', 'WE', 'DID', 'NOT', 'ASK', 'THE', 'COURT', 'FOR', 'MONEY', 'THOUGH', 'I', 'HAD', 'NO', 'SUCH', 'SQUEAMISH', 'FEELINGS', 'WHEN', 'IT', 'CAME', 'TO', 'MY', 'COUNSEL', 'FEE'] +6432-63723-0015-2506: hyp=['SO', 'I', 'HAD', 'TO', 'LET', 'HER', 'HAVE', 'HER', 'WAY', 'AND', 'WE', 'DID', 'NOT', 'ASK', 'THE', 'COURT', 'FOR', 'MONEY', 'THOUGH', 'I', 'HAD', 'NO', 'SUCH', 'SQUEAMISH', 'FEELINGS', 'WHEN', 'IT', 'CAME', 'TO', 'MY', 'COUNCIL', 'FEET'] +6432-63723-0016-2507: ref=['NO', 'BUT', 'HE', 'WILL', 'OR', "I'LL", 'SUE', 'HIM', 'AND', 'GET', 'JUDGMENT', 'OH', "HE'LL", 'PAY', 'ALL', 'RIGHT'] +6432-63723-0016-2507: hyp=['NO', 'BUT', 'HE', 'WILL', 'OR', 'ELSE', 'SUE', 'EM', 'AND', 'GET', 'JUDGMENT', 'OH', "HE'LL", 'PAY', 'ALL', 'RIGHT'] +6432-63723-0017-2508: ref=['AND', 'IT', 'TAKES', 'ALL', 'SORTS', 'OF', 'PERSONS', 'TO', 'MAKE', 'IT', 'UP'] +6432-63723-0017-2508: hyp=['AND', 'IT', 'TAKES', 'ALL', 'SORTS', 'OF', 'PERSONS', 'TO', 'MAKE', 'IT', 'UP'] +6432-63723-0018-2509: ref=['STILL', 'I', 'WOULD', 'LIKE', 'TO', 'KNOW'] +6432-63723-0018-2509: hyp=['STILL', 'I', 'WOULD', 'LIKE', 'TO', 'KNOW'] +6432-63723-0019-2510: ref=['THE', 'MURDER', 'OF', 'MISSUS', 'DARCY', 'HAD', 'SOME', 'TIME', 'AGO', 'BEEN', 'SHIFTED', 'OFF', 'THE', 'FRONT', 'PAGE', 'THOUGH', 'IT', 'WOULD', 'GET', 'BACK', 'THERE', 'WHEN', 'THE', 'YOUNG', 'JEWELER', 'WAS', 'TRIED'] +6432-63723-0019-2510: hyp=['THE', 'MURDERER', 'OF', 'MISSUS', 'DARCY', 'HAD', 'SOME', 'TIME', 'AGO', 'BEEN', 'SHIFTED', 'OFF', 'THE', 'FRONT', 'PAGE', 'THOUGH', 'IT', 'WOULD', 'GET', 'BACK', 'THERE', 'WHEN', 'THE', 'YOUNG', 'JEWELLER', 'WAS', 'TRIED'] +6432-63723-0020-2511: ref=['IT', 'HAD', 'A', 'DOUBLE', 'REPUTATION', 'SO', 'TO', 'SPEAK'] +6432-63723-0020-2511: hyp=['IT', 'HAD', 'A', 'DOUBLE', 'REPUTATION', 'SO', 'TO', 'SPEAK'] +6432-63723-0021-2512: ref=['GRAVE', 'AND', 'EVEN', 'REVEREND', 'CONVENTIONS', 'ASSEMBLED', 'IN', 'ITS', 'BALLROOM', 'AND', 'POLITICIANS', 'OF', 'THE', 'UPPER', 'IF', 'NOT', 'BETTER', 'CLASS', 'WERE', 'FREQUENTLY', 'SEEN', 'IN', 'ITS', 'DINING', 'ROOM', 'OR', 'CAFE'] +6432-63723-0021-2512: hyp=['GRAVE', 'AND', 'EVEN', 'REVEREND', 'THE', 'CONVENTIONS', 'ASSEMBLED', 'IN', 'ITS', 'BALL', 'ROOM', 'IN', 'POLITICIANS', 'OF', 'THE', 'UPPER', 'IF', 'NOT', 'BETTER', 'CLASS', 'WERE', 'FREQUENTLY', 'SEEN', 'IN', 'ITS', 'DINING', 'ROOM', 'OR', 'CAFE'] +6432-63723-0022-2513: ref=['LARCH', 'HIMSELF', 'WAS', 'A', 'PECULIAR', 'CHARACTER'] +6432-63723-0022-2513: hyp=['LARGE', 'HIMSELF', 'WAS', 'A', 'PECULIAR', 'CHARACTER'] +6432-63723-0023-2514: ref=['IN', 'A', 'SMALLER', 'PLACE', 'HE', 'WOULD', 'HAVE', 'BEEN', 'CALLED', 'A', 'SALOON', 'KEEPER'] +6432-63723-0023-2514: hyp=['IN', 'A', 'SMALLER', 'PLACE', 'HE', 'WOULD', 'HAVE', 'BEEN', 'CALLED', 'A', 'SALOON', 'KEEPER'] +6432-63723-0024-2515: ref=['AND', 'IT', 'WAS', 'THIS', 'MAN', 'RICH', 'IT', 'WAS', 'SAID', 'HANDSOME', 'CERTAINLY', 'THAT', 'CYNTHIA', 'RATCHFORD', 'HAD', 'MARRIED'] +6432-63723-0024-2515: hyp=['AND', 'IT', 'WAS', 'THIS', 'MAN', 'RICH', 'OVER', 'SAID', 'HANDSOME', 'CERTAINLY', 'THAT', 'CENTIA', 'RETFORD', 'HAD', 'MARRIED'] +6432-63723-0025-2516: ref=['TO', 'THIS', 'WAS', 'THE', 'ANSWER', 'WHISPERED', 'MONEY'] +6432-63723-0025-2516: hyp=['TO', 'THIS', 'WAS', 'THE', 'ANSWER', 'WHISPERED', 'MONEY'] +6432-63723-0026-2517: ref=['AND', 'IN', 'A', 'WAY', 'IT', 'WAS', 'TRUE'] +6432-63723-0026-2517: hyp=['AND', 'IN', 'A', 'WAY', 'IT', 'WAS', 'TRUE'] +6432-63723-0027-2518: ref=['SHE', 'ALSO', 'SAW', 'AN', 'OPPORTUNITY', 'OF', 'PAYING', 'OLD', 'DEBTS', 'AND', 'REAPING', 'SOME', 'REVENGES'] +6432-63723-0027-2518: hyp=['SHE', 'ALSO', 'SAW', 'AN', 'OPPORTUNITY', 'OF', 'PAYING', 'OLD', 'DEBTS', 'AND', 'REAPING', 'SOME', 'REVENGES'] +6432-63723-0028-2519: ref=['AFTER', 'THE', 'MARRIAGE', 'WHICH', 'WAS', 'A', 'BRILLIANT', 'AND', 'GAY', 'ONE', 'IF', 'NOT', 'HAPPY', 'THE', 'LARCH', 'HOTEL', 'IT', 'COULD', 'HARDLY', 'BE', 'CALLED', 'A', 'HOME', 'BECAME', 'THE', 'SCENE', 'OF', 'MANY', 'FESTIVE', 'OCCASIONS'] +6432-63723-0028-2519: hyp=['AFTER', 'THE', 'MARRIAGE', 'WHICH', 'WAS', 'A', 'BRILLIANT', 'AND', 'GAY', 'ONE', 'IF', 'NOT', 'HAPPY', 'THE', 'LARGE', 'HOTEL', 'IT', 'COULD', 'HARDLY', 'BE', 'CALLED', 'HOME', 'BECAME', 'THE', 'SCENE', 'OF', 'MANY', 'FESTIVATIONS'] +6432-63723-0029-2520: ref=['THEN', 'IT', 'WAS', 'SAID', 'OF', 'LARCH', 'THAT', 'SOON', 'AFTER', 'THE', 'ECHOES', 'OF', 'THE', 'WEDDING', 'CHIMES', 'HAD', 'DIED', 'AWAY', 'HE', 'HAD', 'BEGUN', 'TO', 'TREAT', 'HIS', 'WIFE', 'WITH', 'REFINED', 'CRUELTY', 'THAT', 'HIDDEN', 'AWAY', 'FROM', 'THE', 'PUBLIC', 'UNDERNEATH', 'HIS', 'HABITUAL', 'MANNER', 'THERE', 'WAS', 'THE', 'RAWNESS', 'OF', 'THE', 'BRUTE'] +6432-63723-0029-2520: hyp=['THEN', 'IT', 'WAS', 'SAID', 'OF', 'LARGE', 'THAT', 'SOON', 'AFTER', 'THE', 'ECHOES', 'OF', 'THE', 'WEDDING', 'CHIMES', 'HAD', 'DIED', 'AWAY', 'HE', 'HAD', 'BEGUN', 'TO', 'TREAT', 'HIS', 'WIFE', 'FOR', 'THE', 'REFINED', 'CRUELTY', 'THAT', 'HIDDEN', 'AWAY', 'FROM', 'THE', 'PUBLIC', 'UNDERNEATH', 'HIS', 'HABITUAL', 'MANNER', 'THERE', 'WAS', 'THE', 'RAWNESS', 'OF', 'THE', 'BRUTE'] +6432-63723-0030-2521: ref=['BUT', 'IT', 'WAS', 'NOTICED', 'THAT', 'THE', 'OLDER', 'AND', 'MORE', 'CONSERVATIVE', 'FAMILIES', 'WERE', 'LESS', 'OFTEN', 'REPRESENTED', 'AND', 'WHEN', 'THEY', 'WERE', 'IT', 'WAS', 'BY', 'SOME', 'OF', 'THE', 'YOUNGER', 'MEMBERS', 'WHOSE', 'REPUTATIONS', 'WERE', 'ALREADY', 'SMIRCHED', 'OR', 'WHO', 'HAD', 'NOT', 'YET', 'ACQUIRED', 'ANY', 'AND', 'WERE', 'WILLING', 'TO', 'TAKE', 'A', 'CHANCE'] +6432-63723-0030-2521: hyp=['BUT', 'IT', 'WAS', 'NOTICED', 'THAT', 'THE', 'OLDER', 'AND', 'MORE', 'CONSERVATIVE', 'FAMILIES', 'WERE', 'LESS', 'OFTEN', 'REPRESENTED', 'AND', 'WHEN', 'THEY', 'WERE', 'IT', 'WAS', 'BY', 'SOME', 'OF', 'THE', 'YOUNGER', 'MEMBERS', 'WHOSE', 'REPUTATIONS', 'WERE', 'ALREADY', 'SMARCHED', 'OR', 'WHO', 'HAD', 'NOT', 'YET', 'ACQUIRED', 'ANY', 'AND', 'WERE', 'WILLING', 'TO', 'TAKE', 'A', 'CHANCE'] +6432-63723-0031-2522: ref=['IT', "WOULDN'T", 'DO', 'YOU', 'KNOW', 'AFTER', 'THAT', 'STORY', 'CAME', 'OUT', 'FOR', 'ME', 'AND', 'THE', 'VICE', 'CHANCELLOR', 'WHO', 'SAT', 'IN', 'THE', 'CASE', 'AS', 'WELL', 'AS', 'OTHER', 'JUDGES', 'AND', 'MEMBERS', 'OF', 'THE', 'BAR', 'TO', 'BE', 'SEEN', 'THERE', 'KENNETH', 'EXPLAINED', 'TO', 'THE', 'COLONEL'] +6432-63723-0031-2522: hyp=['IT', "WOULDN'T", 'DO', 'YOU', 'KNOW', 'AFTER', 'THAT', 'STORY', 'CAME', 'OUT', 'FOR', 'ME', 'IN', 'THE', 'VICE', 'CHANCELLOR', 'WHO', 'SAT', 'IN', 'A', 'CASE', 'AS', 'WELL', 'AS', 'OTHER', 'JUDGES', 'AND', 'MEMBERS', 'OF', 'THE', 'BAR', 'TO', 'BE', 'SEEN', 'THERE', 'KENNETH', 'EXPLAINED', 'TO', 'THE', 'COLONEL'] +6432-63723-0032-2523: ref=['MEANWHILE', 'COLONEL', 'ASHLEY', 'WAS', 'A', 'VERY', 'BUSY', 'MAN', 'AND', 'TO', 'NO', 'ONE', 'DID', 'HE', 'TELL', 'VERY', 'MUCH', 'ABOUT', 'HIS', 'ACTIVITIES', 'HE', 'SAW', 'DARCY', 'FREQUENTLY', 'AT', 'THE', 'JAIL', 'AND', 'TO', 'THAT', 'YOUNG', "MAN'S", 'PLEADINGS', 'THAT', 'SOMETHING', 'BE', 'DONE', 'ALWAYS', 'RETURNED', 'THE', 'ANSWER'] +6432-63723-0032-2523: hyp=['MEANWHILE', 'COLONEL', 'ASHLEY', 'WAS', 'A', 'VERY', 'BUSY', 'MAN', 'AND', 'TO', 'NO', 'ONE', 'DID', 'HE', 'TELL', 'VERY', 'MUCH', 'ABOUT', 'HIS', 'ACTIVITIES', 'HE', 'SAW', 'DARCY', 'FREQUENTLY', 'AT', 'THE', 'JAIL', 'AND', 'TO', 'THAT', 'YOUNG', "MAN'S", 'PLEADINGS', 'THAT', 'SOMETHING', 'TO', 'BE', 'DONE', 'ALWAYS', 'RETURNED', 'THE', 'ANSWER'] +6432-63723-0033-2524: ref=["DON'T", 'WORRY', 'IT', 'WILL', 'COME', 'OUT', 'ALL', 'RIGHT'] +6432-63723-0033-2524: hyp=['DON', 'WORRY', 'IT', 'WILL', 'COME', 'OUT', 'ALL', 'RIGHT'] +6432-63723-0034-2525: ref=["I'M", 'GOING', 'TO', 'RECTIFY', 'THEM', 'BUT', 'IT', 'WILL', 'TAKE', 'TIME'] +6432-63723-0034-2525: hyp=["I'M", 'GOING', 'DIRECTIFY', 'THEM', 'BUT', 'IT', 'WILL', 'TAKE', 'TIME'] +6432-63723-0035-2526: ref=["IT'S", 'HARD', 'FOR', 'MISS', 'MASON', 'TOO', 'ALTHOUGH', "SHE'S", 'BEARING', 'UP', 'LIKE', 'A', 'MAJOR'] +6432-63723-0035-2526: hyp=["IT'S", 'HARD', 'FOR', 'MISS', 'MASON', 'TOO', 'ALTHOUGH', "SHE'S", 'BEARING', 'UP', 'LIKE', 'A', 'MAJOR'] +6432-63723-0036-2527: ref=['SO', 'KING', 'GOT', 'BAIL', 'WHO', 'PUT', 'IT', 'UP'] +6432-63723-0036-2527: hyp=['SO', 'KING', 'GOD', 'BAIL', 'WHO', 'PUT', 'IT', 'UP'] +6432-63723-0037-2528: ref=['IT', 'WAS', 'HIGH', 'LARCH'] +6432-63723-0037-2528: hyp=['IT', 'WAS', 'TIME', 'LARCH'] +6432-63723-0038-2529: ref=['THEY', 'TOOK', 'HARRY', 'AWAY', 'A', 'WHILE', 'AGO'] +6432-63723-0038-2529: hyp=['THEY', 'TOOK', 'HARRY', 'AWAY'] +6432-63723-0039-2530: ref=['BUT', 'HIS', 'ARE', 'PRETTY', 'UNCERTAIN', 'SHOES', 'TO', 'BE', 'IN', 'JUST', 'THE', 'SAME'] +6432-63723-0039-2530: hyp=['BUT', 'HIS', 'ARE', 'PRETTY', 'UNCERTAIN', 'SHOES', 'TO', 'BE', 'IN', 'JUST', 'THE', 'SAME'] +6432-63723-0040-2531: ref=['ONLY', 'THAT', 'I', 'DARCY', 'HESITATED', 'AND', 'GREW', 'RED'] +6432-63723-0040-2531: hyp=['ONLY', 'THAT', 'I', 'DARCY', 'HESITATED', 'AND', 'GREW', 'RED'] +6432-63723-0041-2532: ref=['GOOD', 'EVENING', 'COLONEL', 'HE', 'CALLED', 'GENIALLY', 'WILL', 'YOU', 'JOIN', 'ME', 'IN', 'A', 'WELSH', 'RABBIT'] +6432-63723-0041-2532: hyp=['GOOD', 'EVENING', 'COLONEL', 'HE', 'CALLED', 'GENIALLY', 'WHERE', 'YOU', 'JOIN', 'ME', 'IN', 'A', 'WELL', 'RABBIT'] +6432-63723-0042-2533: ref=['THANK', 'YOU', 'NO'] +6432-63723-0042-2533: hyp=['THANK', 'YOU', 'NO'] +6432-63723-0043-2534: ref=["I'M", 'AFRAID', 'MY', 'DIGESTION', "ISN'T", 'QUITE', 'UP', 'TO', 'THAT', 'AS', "I'VE", 'HAD', 'TO', 'CUT', 'OUT', 'MY', 'FISHING', 'OF', 'LATE'] +6432-63723-0043-2534: hyp=["I'M", 'AFRAID', 'MY', 'DIAD', "ISN'T", 'QUITE', 'UP', 'TO', 'THAT', 'AS', "I'VE", 'HAD', 'TO', 'CUT', 'OUT', 'MY', 'FISHING', 'OF', 'LATE'] +6432-63723-0044-2535: ref=['NOW', 'AS', 'TO', 'CERTAIN', 'MATTERS', 'IN', 'THE', 'STORE', 'ON', 'THE', 'MORNING', 'OF', 'THE', 'MURDER'] +6432-63723-0044-2535: hyp=['NOW', 'AS', 'TO', 'CERTAIN', 'MATTERS', 'IN', 'THE', 'STORE', 'ON', 'THE', 'MORNING', 'OF', 'THE', 'MURDER'] +6432-63723-0045-2536: ref=['THE', 'STOPPED', 'CLOCKS', 'FOR', 'INSTANCE', 'HAVE', 'YOU', 'ANY', 'THEORY'] +6432-63723-0045-2536: hyp=['THEY', 'STOPPED', 'CLOCKS', 'FOR', 'INSTANCE', 'HAVE', 'YOU', 'ANY', 'THEORY'] +6432-63723-0046-2537: ref=['THERE', 'WERE', 'THREE', 'OF', 'THEM', 'THE', 'CENTER', 'FIGURE', 'BEING', 'THAT', 'OF', 'HARRY', 'KING', 'AND', 'HE', 'WAS', 'VERY', 'MUCH', 'INTOXICATED'] +6432-63723-0046-2537: hyp=['THERE', 'WERE', 'THREE', 'OF', 'THEM', 'THE', 'CENTRE', 'FIGURE', 'BEING', 'THAT', 'OF', 'HARRY', 'KING', 'AND', 'HE', 'WAS', 'VERY', 'MUCH', 'INTOXICATED'] +6432-63723-0047-2538: ref=['THAT', 'IS', 'NOT', 'ALWAYS', 'BUT', 'SOMETIMES', 'IT', 'HAPPENED', 'TO', 'BE', 'SO', 'NOW'] +6432-63723-0047-2538: hyp=['THAT', 'IS', 'NOT', 'ALWAYS', 'BUT', 'SOMETIMES', 'IT', 'HAPPENED', 'TO', 'BE', 'SO', 'NOW'] +6432-63723-0048-2539: ref=['I', 'BEG', 'YOUR', 'PARDON', 'HE', 'SAID', 'IN', 'THE', 'CULTURED', 'TONES', 'HE', 'KNEW', 'SO', 'WELL', 'HOW', 'TO', 'USE', 'YET', 'OF', 'WHICH', 'HE', 'MADE', 'SO', 'LITTLE', 'USE', 'OF', 'LATE'] +6432-63723-0048-2539: hyp=['I', 'BEG', 'YOUR', 'PARDON', 'HE', 'SAID', 'IN', 'THE', 'CULTURED', 'TONES', 'HE', 'KNEW', 'SO', 'WELL', 'HOW', 'TO', 'USE', 'YET', 'OF', 'WHICH', 'HE', 'MADE', 'SO', 'LITTLE', 'USE', 'OF', 'LATE'] +6432-63723-0049-2540: ref=['I', 'SAID', 'WHERE', 'HAVE', 'YOU', 'BEEN', 'REMARKED', 'THE', 'OTHER', "WE'VE", 'MISSED', 'YOU'] +6432-63723-0049-2540: hyp=['I', 'SAID', 'WHERE', 'HAVE', 'YOU', 'BEEN', 'REMARKED', 'THE', 'OTHER', "WE'VE", 'MISSED', 'YOU'] +6432-63723-0050-2541: ref=['I', 'SAID', 'I', 'WAS', 'GOLFING', 'HE', 'WENT', 'ON', 'EXCEEDINGLY', 'DISTINCTLY', 'THOUGH', 'WITH', 'AN', 'EFFORT'] +6432-63723-0050-2541: hyp=['I', 'SAID', 'I', 'WAS', 'GOLFING', 'HE', 'WENT', 'ON', 'EXCEEDINGLY', 'DISTINCTLY', 'THOUGH', 'WITH', 'AN', 'EFFORT'] +6432-63723-0051-2542: ref=['WHY', 'POLONIUS', 'SOME', 'ONE', 'ASKED'] +6432-63723-0051-2542: hyp=['WHY', 'POLONIUS', 'SOME', 'ONE', 'ASKED'] +6432-63723-0052-2543: ref=['BECAUSE', 'DEAR', 'FRIEND', 'REPLIED', 'KING', 'SOFTLY', 'HE', 'SOMEWHAT', 'RESEMBLES', 'A', 'CERTAIN', 'PERSON', 'HERE', 'WHO', 'TALKS', 'TOO', 'MUCH', 'BUT', 'WHO', 'IS', 'NOT', 'SO', 'WISE', 'AS', 'HE', 'THINKS'] +6432-63723-0052-2543: hyp=['BECAUSE', 'DEAR', 'FRIEND', 'REPLIED', 'KING', 'SOFTLY', 'HE', 'SOMEWHAT', 'RESEMBLES', 'A', 'CERTAIN', 'PERSON', 'HERE', 'WHO', 'TALKS', 'TOO', 'MUCH', 'BUT', 'WHO', 'IS', 'NOT', 'SO', 'WISE', 'AS', 'HE', 'THINKS'] +6432-63723-0053-2544: ref=['THERE', 'WAS', 'A', 'RATTLE', 'OF', 'COINS', 'ON', 'THE', 'MAHOGANY', 'BAR', 'AS', 'KING', 'SOUGHT', 'TO', 'DISENTANGLE', 'A', 'SINGLE', 'BILL', 'FROM', 'THE', 'WADDED', 'UP', 'CURRENCY', 'IN', 'HIS', 'POCKET'] +6432-63723-0053-2544: hyp=['THERE', 'WAS', 'A', 'RATTLE', 'OF', 'COIN', 'DOWN', 'THE', 'MAHOGANY', 'BARS', 'KING', 'SOUGHT', 'TO', 'DISENTANGLE', 'A', 'SINGLE', 'BILL', 'FROM', 'THE', 'WATERED', 'UP', 'CURRENCY', 'IN', 'HIS', 'POCKET'] +6432-63723-0054-2545: ref=["IT'S", "IT'S", 'AN', 'ODD', 'COIN', 'AN', 'OLD', 'ROMAN', 'ONE', 'THAT', 'MISSUS', 'DARCY', 'HAD', 'IN', 'HER', 'PRIVATE', 'COLLECTION', 'KEPT', 'IN', 'THE', 'JEWELRY', 'STORE', 'SAFE', 'WAS', 'THE', 'WHISPERED', 'ANSWER'] +6432-63723-0054-2545: hyp=["IT'S", "IT'S", 'AN', 'ODD', 'COIN', 'AN', 'OLD', 'ROMAN', 'ONE', 'THAT', 'MISSUS', 'DARCY', 'HAD', 'IN', 'HER', 'PRIVATE', 'COLLECTION', 'KEPT', 'IN', 'THE', 'JEWELRY', 'STORE', 'SAFE', 'WAS', 'THE', 'WHISPERED', 'ANSWER'] +6432-63723-0055-2546: ref=['I', 'WENT', 'OVER', 'THEM', 'THE', 'OTHER', 'DAY', 'AND', 'NOTICED', 'SOME', 'WERE', 'MISSING', 'THOUGH', 'I', 'SAW', 'THEM', 'ALL', 'WHEN', 'I', 'PAID', 'A', 'VISIT', 'TO', 'HER', 'JUST', 'A', 'SHORT', 'TIME', 'BEFORE', 'SHE', 'WAS', 'KILLED'] +6432-63723-0055-2546: hyp=['I', 'WENT', 'OVER', 'THEM', 'NEAR', 'THE', 'DAY', 'AND', 'NOTICED', 'SOME', 'WERE', 'MISSING', 'THOUGH', 'I', 'SAW', 'THEM', 'ALL', 'WHEN', 'I', 'PAID', 'A', 'VISIT', 'TO', 'HER', 'JUST', 'A', 'SHORT', 'TIME', 'BEFORE', 'SHE', 'WAS', 'KILLED'] +6432-63723-0056-2547: ref=['THAT', 'WAS', 'HERS', 'WENT', 'ON', 'THE', 'JEWELER'] +6432-63723-0056-2547: hyp=['THAT', 'WAS', 'HERS', 'WENT', 'ON', 'THE', 'JUROR'] +6432-63723-0057-2548: ref=['NOW', 'HARRY', 'KING', 'HAS', 'IT', 'EXCLAIMED', 'COLONEL', 'ASHLEY'] +6432-63723-0057-2548: hyp=['NOW', 'HARRY', 'KING', 'HAS', 'IT', 'EXCLAIMED', 'COLONEL', 'ASHLEY'] +6938-70848-0000-1216: ref=['EVEN', 'THE', 'SUN', 'CAME', 'OUT', 'PALE', 'AND', 'WATERY', 'AT', 'NOON'] +6938-70848-0000-1216: hyp=['EVEN', 'THE', 'SUN', 'CAME', 'OUT', 'PALE', 'AND', 'WATERY', 'AT', 'NOON'] +6938-70848-0001-1217: ref=['THE', 'COLDS', 'AND', 'RHEUMATISM', 'OF', 'THE', 'RAINY', 'MONTHS', 'VANISHED'] +6938-70848-0001-1217: hyp=['THE', 'GOLDS', 'AND', 'RHEUMATISM', 'OF', 'THE', 'REINY', 'MONTHS', 'VANISHED'] +6938-70848-0002-1218: ref=['ASKED', 'A', 'WORKER', 'LAST', 'SUNDAY', 'YOU', 'DID', 'IT', 'WHEN', 'THE', 'YUNKERS'] +6938-70848-0002-1218: hyp=['AS', 'TO', 'WORKER', 'LAST', 'SUNDAY', 'YOU', 'DID', 'IT', 'WHEN', 'THE', 'YUNKERS'] +6938-70848-0003-1219: ref=['WELL', "DIDN'T", 'THEY', 'SHOOT', 'US', 'ONE', 'MAN', 'EXHIBITED', 'HIS', 'ARM', 'IN', 'A', 'SLING'] +6938-70848-0003-1219: hyp=['WELL', "DIDN'T", 'ISSUED', 'US', 'ONE', 'MAN', 'EXHIBITED', 'HIS', 'ARM', 'IN', 'A', 'SLING'] +6938-70848-0004-1220: ref=["HAVEN'T", 'I', 'GOT', 'SOMETHING', 'TO', 'REMEMBER', 'THEM', 'BY', 'THE', 'DEVILS'] +6938-70848-0004-1220: hyp=["HAVEN'T", 'I', 'GOT', 'SOMETHING', 'TO', 'REMEMBER', 'THEM', 'BY', 'THE', 'DEVILS'] +6938-70848-0005-1221: ref=['WHO', 'ARE', 'YOU', 'TO', 'DESTROY', 'THE', 'LEGAL', 'GOVERNMENT', 'WHO', 'IS', 'LENIN', 'A', 'GERMAN'] +6938-70848-0005-1221: hyp=['WHO', 'ARE', 'YOU', 'TO', 'DESTROY', 'THE', 'LEGAL', 'GOVERNMENT', 'WITH', 'LENNING', 'A', 'GERMAN'] +6938-70848-0006-1222: ref=['WHO', 'ARE', 'YOU', 'A', 'COUNTER', 'REVOLUTIONIST', 'A', 'PROVOCATOR', 'THEY', 'BELLOWED', 'AT', 'HIM'] +6938-70848-0006-1222: hyp=['WHO', 'ARE', 'YOU', 'A', 'COUNTER', 'REVOLUTIONIST', 'A', 'PROVOCATOR', 'THEY', 'BELOVED', 'AT', 'HIM'] +6938-70848-0007-1223: ref=['YOU', 'CALL', 'YOURSELVES', 'THE', 'PEOPLE', 'OF', 'RUSSIA', 'BUT', "YOU'RE", 'NOT', 'THE', 'PEOPLE', 'OF', 'RUSSIA'] +6938-70848-0007-1223: hyp=['YOU', 'CALL', 'YOURSELVES', 'THE', 'PEOPLE', 'OF', 'RACHEL', 'BUT', 'YOU', 'ARE', 'NOT', 'THE', 'PEOPLE', 'OF', 'RATIA'] +6938-70848-0008-1224: ref=['THE', 'PEASANTS', 'ARE', 'THE', 'PEOPLE', 'OF', 'RUSSIA', 'WAIT', 'UNTIL', 'THE', 'PEASANTS'] +6938-70848-0008-1224: hyp=['TO', 'PIECE', 'AND', 'OTHER', 'PEOPLE', 'OF', 'RATIA', 'WAIT', 'UNTIL', 'THE', 'PEASANTS'] +6938-70848-0009-1225: ref=['WE', 'KNOW', 'WHAT', 'THE', 'PEASANTS', 'WILL', 'SAY', "AREN'T", 'THEY', 'WORKINGMEN', 'LIKE', 'OURSELVES'] +6938-70848-0009-1225: hyp=['WE', 'KNOW', 'WHAT', 'THE', 'PEASANTS', 'WILL', 'SAY', "AREN'T", 'THEY', 'WORKING', 'MAN', 'LIKE', 'OURSELVES'] +6938-70848-0010-1226: ref=['THESE', 'MEN', 'ESPECIALLY', 'WELCOMED', 'THE', 'CALL', 'TO', 'A', 'CONGRESS', 'OF', 'PEASANTS'] +6938-70848-0010-1226: hyp=['THIS', 'MAN', 'HAS', 'SPECIALLY', 'WELCOMED', 'TO', 'CALL', 'TO', 'A', 'CONGRESS', 'OF', 'PEASANTS'] +6938-70848-0011-1227: ref=['THESE', 'LAST', 'WERE', 'THE', 'YOUNG', 'GENERATION', 'WHO', 'HAD', 'BEEN', 'SERVING', 'IN', 'THE', 'ARMY'] +6938-70848-0011-1227: hyp=['THIS', 'LAST', 'WED', 'THE', 'YOUNG', 'GENERATION', 'WHO', 'HAD', 'BEEN', 'SERVING', 'IN', 'THE', 'ARMY'] +6938-70848-0012-1228: ref=['WHEREUPON', 'THE', 'OLD', 'EXECUTIVE', 'COMMITTEE', 'LEFT', 'THE', 'HALL'] +6938-70848-0012-1228: hyp=['WHEREUPON', 'THE', 'OLD', 'EXECUTED', 'COMMITTEE', 'LEFT', 'THE', 'HALL'] +6938-70848-0013-1229: ref=['DOWN', 'WITH', 'HIM', 'THEY', 'SHRIEKED'] +6938-70848-0013-1229: hyp=['DOWN', 'WITH', 'HIM', 'THEY', 'SHRIEKED'] +6938-70848-0014-1230: ref=['FEARFUL', 'TUMULT', 'CRIES', 'DOWN', 'WITH', 'THE', 'BOLSHEVIKI'] +6938-70848-0014-1230: hyp=['FEARFUL', 'TUMULT', 'CRIES', 'DOWN', 'WITH', 'THE', 'BALL', 'CHEVIKI'] +6938-70848-0015-1231: ref=['UPON', 'MY', 'RETURN', 'I', 'VISITED', 'SMOLNY', 'NO', 'SUCH', 'ACCUSATION', 'WAS', 'MADE', 'AGAINST', 'ME', 'THERE', 'AFTER', 'A', 'BRIEF', 'CONVERSATION', 'I', 'LEFT', 'AND', "THAT'S", 'ALL', 'LET', 'ANY', 'ONE', 'PRESENT', 'MAKE', 'SUCH', 'AN', 'ACCUSATION'] +6938-70848-0015-1231: hyp=['UPON', 'MY', 'RETURN', 'I', 'VISITED', 'MOLLY', 'NO', 'SUCH', 'ACCUSATION', 'WAS', 'MADE', 'AGAINST', 'ME', 'THERE', 'AFTER', 'A', 'BRIEF', 'CONVERSATION', 'I', 'LEFT', 'AND', 'THAT', 'SOUL', 'LATINUE', 'IN', 'PRESENT', 'MAKE', 'SUCH', 'AN', 'ACCUSATION'] +6938-70848-0016-1232: ref=['MEANWHILE', 'THE', 'QUESTION', 'OF', 'THE', 'STATUS', 'OF', 'THE', 'EXECUTIVE', 'COMMITTEE', 'WAS', 'AGITATING', 'ALL', 'MINDS'] +6938-70848-0016-1232: hyp=['MEANWHILE', 'THE', 'QUESTION', 'OF', 'THE', 'STRATUS', 'OF', 'THE', 'EXECUTORY', 'COMMITTEE', 'WAS', 'AGITATING', 'ALL', 'MINDS'] +6938-70848-0017-1233: ref=['BY', 'DECLARING', 'THE', 'ASSEMBLY', 'EXTRAORDINARY', 'CONFERENCE', 'IT', 'HAD', 'BEEN', 'PLANNED', 'TO', 'BLOCK', 'THE', 'REELECTION', 'OF', 'THE', 'EXECUTIVE', 'COMMITTEE'] +6938-70848-0017-1233: hyp=['BY', 'DECLARING', 'THEIR', 'ASSEMBLY', 'EXTRAORDINARILY', 'CONFERENCE', 'IT', 'HAD', 'BEEN', 'PLANNED', 'TO', 'PLUCK', 'THIRD', 'RELECTION', 'OF', 'THE', 'EXECUTED', 'COMMITTEE'] +6938-70848-0018-1234: ref=['BUT', 'THIS', 'WORKED', 'BOTH', 'WAYS', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONISTS', 'DECIDED', 'THAT', 'IF', 'THE', 'CONGRESS', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'EXECUTIVE', 'COMMITTEE', 'THEN', 'THE', 'EXECUTIVE', 'COMMITTEE', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'CONGRESS'] +6938-70848-0018-1234: hyp=['BUT', 'THIS', 'WORK', 'BOTH', 'WAGE', 'THE', 'LAD', 'SOCIALLY', 'REVOLUTIONIST', 'DECIDED', 'THAT', 'IF', 'THE', 'CONGRESS', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'EXUDY', 'COMMITTEE', 'TEN', 'TO', 'EXECUTE', 'COMMITTEE', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'CONGRESS'] +6938-70848-0019-1235: ref=['ON', 'THE', 'TWENTY', 'SEVENTH', 'OCCURRED', 'THE', 'DEBATE', 'ON', 'THE', 'LAND', 'QUESTION', 'WHICH', 'REVEALED', 'THE', 'DIFFERENCES', 'BETWEEN', 'THE', 'AGRARIAN', 'PROGRAMME', 'OF', 'THE', 'BOLSHEVIKI', 'AND', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONARIES'] +6938-70848-0019-1235: hyp=['ON', 'THE', 'TWENTY', 'SEVENTH', 'OCCURRED', 'THE', 'DEBATE', 'ON', 'THE', 'LAND', 'QUESTION', 'WHICH', 'REVIL', 'TO', 'DIFFERENCES', 'BETWEEN', 'THE', 'INGREDIAN', 'PROGRAM', 'OF', 'THE', 'BULGEHEVIKI', 'AND', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONARIES'] +6938-70848-0020-1236: ref=['THE', 'CONSTITUENT', 'ASSEMBLY', 'WILL', 'NOT', 'DARE', 'TO', 'BREAK', 'WITH', 'THE', 'WILL', 'OF', 'THE', 'PEOPLE'] +6938-70848-0020-1236: hyp=['THE', 'CONSTITUENT', 'ASSEMBLY', 'WILL', 'NOT', 'DARE', 'TO', 'BREAK', 'WITH', 'THE', 'WILL', 'OF', 'THE', 'PEOPLE'] +6938-70848-0021-1237: ref=['FOLLOWED', 'HIM', 'LENIN', 'LISTENED', 'TO', 'NOW', 'WITH', 'ABSORBING', 'INTENSITY'] +6938-70848-0021-1237: hyp=['FOLLOWED', 'HIM', 'LENIN', 'LISTENED', 'TO', 'NOW', 'WITH', 'ABSORBING', 'INTENSITY'] +6938-70848-0022-1238: ref=['THE', 'FIRST', 'STAGE', 'WAS', 'THE', 'CRUSHING', 'OF', 'AUTOCRACY', 'AND', 'THE', 'CRUSHING', 'OF', 'THE', 'POWER', 'OF', 'THE', 'INDUSTRIAL', 'CAPITALISTS', 'AND', 'LAND', 'OWNERS', 'WHOSE', 'INTERESTS', 'ARE', 'CLOSELY', 'RELATED'] +6938-70848-0022-1238: hyp=['THE', 'FIRST', 'AGE', 'WAS', 'A', 'CRUSHING', 'OF', 'AUTOCRACY', 'AND', 'THE', 'CRASHING', 'OF', 'THE', 'POWER', 'OF', 'THE', 'INDUSTRIAL', 'CAPITALIST', 'AND', 'THE', 'LANDOWNERS', 'WHOSE', 'INTERESTS', 'ARE', 'CLOTHESILY'] +6938-70848-0023-1239: ref=['THE', 'DUMAS', 'AND', 'ZEMSTVOS', 'WERE', 'DROPPED'] +6938-70848-0023-1239: hyp=['DID', 'YOU', 'MESS', 'AND', 'THEM', 'STOOLS', 'WERE', 'DROPPED'] +6938-70848-0024-1240: ref=['HE', 'KNEW', 'THAT', 'AN', 'AGREEMENT', 'WITH', 'THE', 'BOLSHEVIKI', 'WAS', 'BEING', 'DISCUSSED', 'BUT', 'HE', 'DID', 'NOT', 'KNOW', 'THAT', 'IT', 'HAD', 'BEEN', 'CONCLUDED'] +6938-70848-0024-1240: hyp=['HE', 'KNEW', 'THAT', 'AN', 'AGREEMENT', 'WITH', 'THE', 'BOLSHEVIKI', 'WAS', 'BEING', 'DISCUSSED', 'BUT', 'HE', 'DID', 'NOT', 'KNOW', 'THAT', 'IT', 'HAD', 'BEEN', 'CONCLUDED'] +6938-70848-0025-1241: ref=['HE', 'SPOKE', 'TO', 'THE', 'RUMP', 'CONVENTION'] +6938-70848-0025-1241: hyp=['HE', 'SPOKE', 'TO', 'THE', 'RUM', 'CONVENTION'] +6938-70848-0026-1242: ref=['THE', 'VILLAGES', 'WILL', 'SAVE', 'US', 'IN', 'THE', 'END'] +6938-70848-0026-1242: hyp=['THE', 'RELIGIOUS', 'WILL', 'SAVE', 'US', 'IN', 'THE', 'END'] +6938-70848-0027-1243: ref=['BUT', 'THE', 'PRESENT', 'MOVEMENT', 'IS', 'INTERNATIONAL', 'AND', 'THAT', 'IS', 'WHY', 'IT', 'IS', 'INVINCIBLE'] +6938-70848-0027-1243: hyp=['BUT', 'THE', 'PRESENT', 'MOMENT', 'IS', 'INTERNATIONAL', 'AND', 'THAT', 'IS', 'WHY', 'IT', 'IS', 'INVINCIBLE'] +6938-70848-0028-1244: ref=['THE', 'WILL', 'OF', 'MILLIONS', 'OF', 'WORKERS', 'IS', 'NOW', 'CONCENTRATED', 'IN', 'THIS', 'HALL'] +6938-70848-0028-1244: hyp=['THE', 'WHEEL', 'OF', 'MILLIONS', 'OF', 'WORKERS', 'IS', 'SOME', 'CONCENTRATED', 'IN', 'THE', 'HALL'] +6938-70848-0029-1245: ref=['A', 'NEW', 'HUMANITY', 'WILL', 'BE', 'BORN', 'OF', 'THIS', 'WAR'] +6938-70848-0029-1245: hyp=['A', 'NEW', 'HUMANITY', 'WILL', 'BE', 'BORN', 'OF', 'THIS', 'WAR'] +6938-70848-0030-1246: ref=['I', 'GREET', 'YOU', 'WITH', 'THE', 'CHRISTENING', 'OF', 'A', 'NEW', 'RUSSIAN', 'LIFE', 'AND', 'FREEDOM'] +6938-70848-0030-1246: hyp=['I', 'GREET', 'YOU', 'WITH', 'THE', 'CHRISTIANING', 'OF', 'A', 'NEW', 'RUSSIAN', 'LIFE', 'AND', 'FREEDOM'] +7018-75788-0000-135: ref=['THEN', 'I', 'TOOK', 'UP', 'A', 'GREAT', 'STONE', 'FROM', 'AMONG', 'THE', 'TREES', 'AND', 'COMING', 'UP', 'TO', 'HIM', 'SMOTE', 'HIM', 'THEREWITH', 'ON', 'THE', 'HEAD', 'WITH', 'ALL', 'MY', 'MIGHT', 'AND', 'CRUSHED', 'IN', 'HIS', 'SKULL', 'AS', 'HE', 'LAY', 'DEAD', 'DRUNK'] +7018-75788-0000-135: hyp=['THEN', 'I', 'TOOK', 'UP', 'A', 'GREAT', 'STONE', 'FROM', 'AMONG', 'THE', 'TREES', 'AND', 'COMING', 'UP', 'TO', 'HIM', 'SMOTE', 'HIM', 'THEREWITH', 'ON', 'THE', 'HEAD', 'WITH', 'ALL', 'MY', 'MIGHT', 'AND', 'CRUSHED', 'IN', 'HIS', 'SKULL', 'AS', 'HE', 'LAY', 'DEAD', 'DRUNK'] +7018-75788-0001-136: ref=['BEHOLD', 'A', 'SHIP', 'WAS', 'MAKING', 'FOR', 'THE', 'ISLAND', 'THROUGH', 'THE', 'DASHING', 'SEA', 'AND', 'CLASHING', 'WAVES'] +7018-75788-0001-136: hyp=['BEHOLD', 'A', 'SHIP', 'WAS', 'MAKING', 'FOR', 'THE', 'ISLAND', 'THROUGH', 'THE', 'DASHING', 'SEA', 'AND', 'CLASHING', 'WAVES'] +7018-75788-0002-137: ref=['HEARING', 'THIS', 'I', 'WAS', 'SORE', 'TROUBLED', 'REMEMBERING', 'WHAT', 'I', 'HAD', 'BEFORE', 'SUFFERED', 'FROM', 'THE', 'APE', 'KIND'] +7018-75788-0002-137: hyp=['HEARING', 'THIS', 'I', 'WAS', 'SORE', 'TROUBLED', 'REMEMBERING', 'WHAT', 'I', 'HAD', 'BEFORE', 'SUFFERED', 'FROM', 'THE', 'APE', 'KIND'] +7018-75788-0003-138: ref=['UPON', 'THIS', 'HE', 'BROUGHT', 'ME', 'A', 'COTTON', 'BAG', 'AND', 'GIVING', 'IT', 'TO', 'ME', 'SAID', 'TAKE', 'THIS', 'BAG', 'AND', 'FILL', 'IT', 'WITH', 'PEBBLES', 'FROM', 'THE', 'BEACH', 'AND', 'GO', 'FORTH', 'WITH', 'A', 'COMPANY', 'OF', 'THE', 'TOWNSFOLK', 'TO', 'WHOM', 'I', 'WILL', 'GIVE', 'A', 'CHARGE', 'RESPECTING', 'THEE'] +7018-75788-0003-138: hyp=['UPON', 'THIS', 'HE', 'BROUGHT', 'ME', 'A', 'COTTON', 'BAG', 'AND', 'GIVEN', 'IT', 'TO', 'HIM', 'HE', 'SAID', 'TAKE', 'THIS', 'BAG', 'AND', 'FILL', 'IT', 'WITH', 'PEBBLES', 'FROM', 'THE', 'BEACH', 'AND', 'GO', 'FORTH', 'WITH', 'A', 'COMPANY', 'OF', 'THE', 'TOWNSFOLK', 'TO', 'WHOM', 'I', 'WILL', 'GIVE', 'A', 'CHARGE', 'RESPECTING', 'THEE'] +7018-75788-0004-139: ref=['DO', 'AS', 'THEY', 'DO', 'AND', 'BELIKE', 'THOU', 'SHALT', 'GAIN', 'WHAT', 'MAY', 'FURTHER', 'THY', 'RETURN', 'VOYAGE', 'TO', 'THY', 'NATIVE', 'LAND'] +7018-75788-0004-139: hyp=['DO', 'AS', 'THEY', 'DO', 'AND', 'BE', 'LIKE', 'THOU', 'SHALT', 'GAIN', 'WHAT', 'MAY', 'FURTHER', 'THY', 'RETURN', 'VOYAGE', 'TO', 'THY', 'NATIVE', 'LAND'] +7018-75788-0005-140: ref=['THEN', 'HE', 'CARRIED', 'ME', 'TO', 'THE', 'BEACH', 'WHERE', 'I', 'FILLED', 'MY', 'BAG', 'WITH', 'PEBBLES', 'LARGE', 'AND', 'SMALL', 'AND', 'PRESENTLY', 'WE', 'SAW', 'A', 'COMPANY', 'OF', 'FOLK', 'ISSUE', 'FROM', 'THE', 'TOWN', 'EACH', 'BEARING', 'A', 'BAG', 'LIKE', 'MINE', 'FILLED', 'WITH', 'PEBBLES'] +7018-75788-0005-140: hyp=['THEN', 'HE', 'CARRIED', 'ME', 'TO', 'THE', 'BEACH', 'WHERE', 'I', 'FILLED', 'MY', 'BAG', 'AND', 'WITH', 'PEBBLES', 'LARGE', 'AND', 'SMALL', 'AND', 'PRESENTLY', 'WE', 'SAW', 'A', 'COMPANY', 'OF', 'FOLK', 'ISSUED', 'FROM', 'THE', 'TOWN', 'EACH', 'BEARING', 'A', 'BAG', 'LIKE', 'MINE', 'FILLED', 'WITH', 'PEBBLES'] +7018-75788-0006-141: ref=['TO', 'THESE', 'HE', 'COMMITTED', 'ME', 'COMMENDING', 'ME', 'TO', 'THEIR', 'CARE', 'AND', 'SAYING', 'THIS', 'MAN', 'IS', 'A', 'STRANGER', 'SO', 'TAKE', 'HIM', 'WITH', 'YOU', 'AND', 'TEACH', 'HIM', 'HOW', 'TO', 'GATHER', 'THAT', 'HE', 'MAY', 'GET', 'HIS', 'DAILY', 'BREAD', 'AND', 'YOU', 'WILL', 'EARN', 'YOUR', 'REWARD', 'AND', 'RECOMPENSE', 'IN', 'HEAVEN'] +7018-75788-0006-141: hyp=['TO', 'THESE', 'HE', 'COMMITTED', 'ME', 'COMMENDING', 'ME', 'TO', 'THEIR', 'CARE', 'AND', 'SAYING', 'THIS', 'MAN', 'IS', 'A', 'STRANGER', 'SO', 'TAKE', 'HIM', 'WITH', 'YOU', 'AND', 'TEACH', 'HIM', 'HOW', 'TO', 'GATHER', 'THAT', 'HE', 'MAY', 'GET', 'HIS', 'DAILY', 'BREAD', 'AND', 'YOU', 'WILL', 'EARN', 'YOUR', 'REWARD', 'AND', 'RECOMPENSE', 'IN', 'HEAVEN'] +7018-75788-0007-142: ref=['NOW', 'SLEEPING', 'UNDER', 'THESE', 'TREES', 'WERE', 'MANY', 'APES', 'WHICH', 'WHEN', 'THEY', 'SAW', 'US', 'ROSE', 'AND', 'FLED', 'FROM', 'US', 'AND', 'SWARMED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'WHEREUPON', 'MY', 'COMPANIONS', 'BEGAN', 'TO', 'PELT', 'THEM', 'WITH', 'WHAT', 'THEY', 'HAD', 'IN', 'THEIR', 'BAGS', 'AND', 'THE', 'APES', 'FELL', 'TO', 'PLUCKING', 'OF', 'THE', 'FRUIT', 'OF', 'THE', 'TREES', 'AND', 'CASTING', 'THEM', 'AT', 'THE', 'FOLK'] +7018-75788-0007-142: hyp=['NOW', 'SLEEPING', 'UNDER', 'THESE', 'TREES', 'WERE', 'MANY', 'IPES', 'WHICH', 'WHEN', 'THEY', 'SAW', 'US', 'ROSE', 'AND', 'FLED', 'FROM', 'US', 'AND', 'SWARMED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'WHEREUPON', 'MY', 'COMPANIONS', 'BEGAN', 'TO', 'PELT', 'THEM', 'WITH', 'WHAT', 'THEY', 'HAD', 'IN', 'THEIR', 'BAGS', 'AND', 'THE', 'APES', 'FELL', 'TO', 'PLUCKING', 'OF', 'THE', 'FRUIT', 'OF', 'THE', 'TREES', 'AND', 'CASTING', 'THEM', 'AT', 'THE', 'FOLK'] +7018-75788-0008-143: ref=['WE', 'WEIGHED', 'ANCHOR', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'SAYING', 'HER', 'PERMITTED', 'SAY'] +7018-75788-0008-143: hyp=['WE', 'WEIGHED', 'ANCHOR', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'SAYING', 'HER', 'PERMITTED', 'SAY'] +7018-75788-0009-144: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'FIFTY', 'NINTH', 'NIGHT'] +7018-75788-0009-144: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'FIFTY', 'NINTH', 'NIGHT'] +7018-75788-0010-145: ref=['AND', 'CEASED', 'NOT', 'SAILING', 'TILL', 'WE', 'ARRIVED', 'SAFELY', 'AT', 'BASSORAH'] +7018-75788-0010-145: hyp=['AND', 'CEASED', 'NOT', 'SAILING', 'TILL', 'WE', 'ARRIVED', 'SAFELY', 'AT', 'PESSORAH'] +7018-75788-0011-146: ref=['THERE', 'I', 'ABODE', 'A', 'LITTLE', 'AND', 'THEN', 'WENT', 'ON', 'TO', 'BAGHDAD', 'WHERE', 'I', 'ENTERED', 'MY', 'QUARTER', 'AND', 'FOUND', 'MY', 'HOUSE', 'AND', 'FOREGATHERED', 'WITH', 'MY', 'FAMILY', 'AND', 'SALUTED', 'MY', 'FRIENDS', 'WHO', 'GAVE', 'ME', 'JOY', 'OF', 'MY', 'SAFE', 'RETURN', 'AND', 'I', 'LAID', 'UP', 'ALL', 'MY', 'GOODS', 'AND', 'VALUABLES', 'IN', 'MY', 'STOREHOUSES'] +7018-75788-0011-146: hyp=['THERE', 'I', 'ABODE', 'A', 'LITTLE', 'AND', 'THEN', 'WENT', 'ON', 'TO', 'BAGDAD', 'WHERE', 'I', 'ENTERED', 'MY', 'QUARTER', 'AND', 'FOUND', 'MY', 'HOUSE', 'AND', 'FORGATHERED', 'WITH', 'MY', 'FAMILY', 'AND', 'SALUTED', 'MY', 'FRIENDS', 'WHO', 'GAVE', 'ME', 'JOY', 'OF', 'MY', 'SAFE', 'RETURN', 'AND', 'I', 'LAID', 'UP', 'ALL', 'MY', 'GOODS', 'AND', 'VALUABLES', 'IN', 'MY', 'STOREHOUSES'] +7018-75788-0012-147: ref=['AFTER', 'WHICH', 'I', 'RETURNED', 'TO', 'MY', 'OLD', 'MERRY', 'WAY', 'OF', 'LIFE', 'AND', 'FORGOT', 'ALL', 'I', 'HAD', 'SUFFERED', 'IN', 'THE', 'GREAT', 'PROFIT', 'AND', 'GAIN', 'I', 'HAD', 'MADE'] +7018-75788-0012-147: hyp=['AFTER', 'WHICH', 'I', 'RETURNED', 'TO', 'MY', 'OLD', 'MERRY', 'WAY', 'OF', 'LIFE', 'AND', 'FORGOT', 'ALL', 'I', 'HAD', 'SUFFERED', 'IN', 'THE', 'GREAT', 'PROFIT', 'AND', 'GAIN', 'I', 'HAD', 'MADE'] +7018-75788-0013-148: ref=['NEXT', 'MORNING', 'AS', 'SOON', 'AS', 'IT', 'WAS', 'LIGHT', 'HE', 'PRAYED', 'THE', 'DAWN', 'PRAYER', 'AND', 'AFTER', 'BLESSING', 'MOHAMMED', 'THE', 'CREAM', 'OF', 'ALL', 'CREATURES', 'BETOOK', 'HIMSELF', 'TO', 'THE', 'HOUSE', 'OF', 'SINDBAD', 'THE', 'SEAMAN', 'AND', 'WISHED', 'HIM', 'A', 'GOOD', 'DAY'] +7018-75788-0013-148: hyp=['NEXT', 'MORNING', 'AS', 'SOON', 'AS', 'IT', 'WAS', 'LIGHT', 'HE', 'PRAYED', 'THE', 'DAWN', 'PRAYER', 'AND', 'AFTER', 'BLESSING', 'MOHAMMED', 'THE', 'CREAM', 'OF', 'ALL', 'CREATURES', 'BETOOK', 'HIMSELF', 'TO', 'THE', 'HOUSE', 'OF', 'SINBAD', 'THE', 'SEAMAN', 'AND', 'WISHED', 'HIM', 'A', 'GOOD', 'DAY'] +7018-75788-0014-149: ref=['HERE', 'I', 'FOUND', 'A', 'GREAT', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'FULL', 'OF', 'MERCHANTS', 'AND', 'NOTABLES', 'WHO', 'HAD', 'WITH', 'THEM', 'GOODS', 'OF', 'PRICE', 'SO', 'I', 'EMBARKED', 'MY', 'BALES', 'THEREIN'] +7018-75788-0014-149: hyp=['HERE', 'I', 'FOUND', 'A', 'GREAT', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'FULL', 'OF', 'MERCHANTS', 'AND', 'NOTABLES', 'WHO', 'HAD', 'WITH', 'THEM', 'GOODS', 'OF', 'PRICE', 'SO', 'I', 'EMBARKED', 'MY', 'BALES', 'THEREIN'] +7018-75788-0015-150: ref=['HAPLY', 'AMONGST', 'YOU', 'IS', 'ONE', 'RIGHTEOUS', 'WHOSE', 'PRAYERS', 'THE', 'LORD', 'WILL', 'ACCEPT'] +7018-75788-0015-150: hyp=['HAPPILY', 'AMONGST', 'YOU', 'IS', 'ONE', 'RIGHTEOUS', 'WHOSE', 'PRAYERS', 'THE', 'LORD', 'WILL', 'ACCEPT'] +7018-75788-0016-151: ref=['PRESENTLY', 'THE', 'SHIP', 'STRUCK', 'THE', 'MOUNTAIN', 'AND', 'BROKE', 'UP', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'OF', 'HER', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75788-0016-151: hyp=['PRESENTLY', 'THE', 'SHIP', 'STRUCK', 'THE', 'MOUNTAIN', 'AND', 'BROKE', 'UP', 'AND', 'ALL', 'THEN', 'EVERYTHING', 'ON', 'BOARD', 'OF', 'HER', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75788-0017-152: ref=['BUT', 'IT', 'BURNETH', 'IN', 'THEIR', 'BELLIES', 'SO', 'THEY', 'CAST', 'IT', 'UP', 'AGAIN', 'AND', 'IT', 'CONGEALETH', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'WATER', 'WHEREBY', 'ITS', 'COLOR', 'AND', 'QUANTITIES', 'ARE', 'CHANGED', 'AND', 'AT', 'LAST', 'THE', 'WAVES', 'CAST', 'IT', 'ASHORE', 'AND', 'THE', 'TRAVELLERS', 'AND', 'MERCHANTS', 'WHO', 'KNOW', 'IT', 'COLLECT', 'IT', 'AND', 'SELL', 'IT'] +7018-75788-0017-152: hyp=['BUT', 'AT', 'BERNNETH', 'IN', 'THEIR', 'BELLIES', 'SO', 'THEY', 'CAST', 'IT', 'UP', 'AGAIN', 'AND', 'IT', 'CONCEALETH', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'WATER', 'WHEREBY', 'ITS', 'COLOR', 'AND', 'QUANTITIES', 'ARE', 'CHANGED', 'AND', 'AT', 'LAST', 'THE', 'WAVES', 'CAST', 'IT', 'ASHORE', 'AND', 'THE', 'TRAVELLERS', 'AND', 'MERCHANTS', 'WHO', 'KNOW', 'IT', 'COLLECTED', 'AND', 'SELL', 'IT'] +7018-75788-0018-153: ref=['EACH', 'THAT', 'DIED', 'WE', 'WASHED', 'AND', 'SHROUDED', 'IN', 'SOME', 'OF', 'THE', 'CLOTHES', 'AND', 'LINEN', 'CAST', 'ASHORE', 'BY', 'THE', 'TIDES', 'AND', 'AFTER', 'A', 'LITTLE', 'THE', 'REST', 'OF', 'MY', 'FELLOWS', 'PERISHED', 'ONE', 'BY', 'ONE', 'TILL', 'I', 'HAD', 'BURIED', 'THE', 'LAST', 'OF', 'THE', 'PARTY', 'AND', 'ABODE', 'ALONE', 'ON', 'THE', 'ISLAND', 'WITH', 'BUT', 'A', 'LITTLE', 'PROVISION', 'LEFT', 'I', 'WHO', 'WAS', 'WONT', 'TO', 'HAVE', 'SO', 'MUCH'] +7018-75788-0018-153: hyp=['EACH', 'THAT', 'DIED', 'WE', 'WASHED', 'AND', 'SHROUDED', 'IN', 'SOME', 'OF', 'THE', 'CLOTHES', 'AND', 'LINEN', 'CAST', 'ASHORE', 'BY', 'THE', 'TIDES', 'AND', 'AFTER', 'LITTLE', 'THE', 'REST', 'OF', 'MY', 'FELLOWS', 'PERISHED', 'ONE', 'BY', 'ONE', 'TILL', 'I', 'HAD', 'BURIED', 'THE', 'LAST', 'OF', 'THE', 'PARTY', 'AND', 'ABODE', 'ALONE', 'ON', 'THE', 'ISLAND', 'WITH', 'BUT', 'A', 'LITTLE', 'PROVISION', 'LEFT', 'I', 'WHO', 'WAS', 'WONT', 'TO', 'HAVE', 'SO', 'MUCH'] +7018-75788-0019-154: ref=['BUT', 'THERE', 'IS', 'MAJESTY', 'AND', 'THERE', 'IS', 'NO', 'MIGHT', 'SAVE', 'IN', 'ALLAH', 'THE', 'GLORIOUS', 'THE', 'GREAT'] +7018-75788-0019-154: hyp=['BUT', 'THERE', 'IS', 'MAJESTY', 'AND', 'THERE', 'IS', 'NO', 'MIGHT', 'SAVE', 'IN', 'ALLAH', 'THE', 'GLORIOUS', 'THE', 'GREAT'] +7018-75789-0000-155: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'FIRST', 'NIGHT'] +7018-75789-0000-155: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'FIRST', 'NIGHT'] +7018-75789-0001-156: ref=['THEN', 'SIGHING', 'FOR', 'MYSELF', 'I', 'SET', 'TO', 'WORK', 'COLLECTING', 'A', 'NUMBER', 'OF', 'PIECES', 'OF', 'CHINESE', 'AND', 'COMORIN', 'ALOES', 'WOOD', 'AND', 'I', 'BOUND', 'THEM', 'TOGETHER', 'WITH', 'ROPES', 'FROM', 'THE', 'WRECKAGE', 'THEN', 'I', 'CHOSE', 'OUT', 'FROM', 'THE', 'BROKEN', 'UP', 'SHIPS', 'STRAIGHT', 'PLANKS', 'OF', 'EVEN', 'SIZE', 'AND', 'FIXED', 'THEM', 'FIRMLY', 'UPON', 'THE', 'ALOES', 'WOOD', 'MAKING', 'ME', 'A', 'BOAT', 'RAFT', 'A', 'LITTLE', 'NARROWER', 'THAN', 'THE', 'CHANNEL', 'OF', 'THE', 'STREAM', 'AND', 'I', 'TIED', 'IT', 'TIGHTLY', 'AND', 'FIRMLY', 'AS', 'THOUGH', 'IT', 'WERE', 'NAILED'] +7018-75789-0001-156: hyp=['THEN', 'SIGNED', 'FOR', 'MYSELF', 'I', 'SET', 'TO', 'WORK', 'COLLECTING', 'A', 'NUMBER', 'OF', 'PIECES', 'OF', 'CHINESE', 'AND', 'CORMOR', 'AND', 'ALOES', 'WOOD', 'AND', 'I', 'BOUND', 'THEM', 'TOGETHER', 'WITH', 'ROPES', 'FROM', 'THE', 'WRECKAGE', 'THEN', 'I', 'CHOSE', 'OUT', 'FROM', 'THE', 'BROKEN', 'UP', 'SHIP', 'STRAIGHT', 'PLANKS', 'OF', 'EVEN', 'SIZE', 'AND', 'FIXED', 'THEM', 'FIRMLY', 'UPON', 'THE', "ALLO'S", 'WOOD', 'MAKING', 'ME', 'A', 'BOAT', 'RAFT', 'A', 'LITTLE', 'NARROWER', 'THAN', 'THE', 'CHANNEL', 'OF', 'THE', 'STREAM', 'AND', 'I', 'TIED', 'IT', 'TIGHTLY', 'AND', 'FIRMLY', 'AS', 'THOUGH', 'IT', 'WERE', 'NAILED'] +7018-75789-0002-157: ref=['LAND', 'AFTER', 'LAND', 'SHALT', 'THOU', 'SEEK', 'AND', 'FIND', 'BUT', 'NO', 'OTHER', 'LIFE', 'ON', 'THY', 'WISH', 'SHALL', 'WAIT', 'FRET', 'NOT', 'THY', 'SOUL', 'IN', 'THY', 'THOUGHTS', 'O', 'NIGHT', 'ALL', 'WOES', 'SHALL', 'END', 'OR', 'SOONER', 'OR', 'LATE'] +7018-75789-0002-157: hyp=['LAND', 'AFTER', 'LAND', 'SHALT', 'THOU', 'SEE', 'CONFINED', 'BUT', 'NO', 'OTHER', 'LIFE', 'ON', 'THY', 'WISH', 'SHALL', 'WAIT', 'FRED', 'NOT', 'THY', 'SOUL', 'IN', 'THY', 'THOUGHTS', 'A', 'KNIGHT', 'OR', 'WOES', 'SHALL', 'END', 'OR', 'SOONER', 'OR', 'LATE'] +7018-75789-0003-158: ref=['I', 'ROWED', 'MY', 'CONVEYANCE', 'INTO', 'THE', 'PLACE', 'WHICH', 'WAS', 'INTENSELY', 'DARK', 'AND', 'THE', 'CURRENT', 'CARRIED', 'THE', 'RAFT', 'WITH', 'IT', 'DOWN', 'THE', 'UNDERGROUND', 'CHANNEL'] +7018-75789-0003-158: hyp=['I', 'RIDE', 'MY', 'CONVEYANCE', 'INTO', 'THE', 'PLACE', 'WHICH', 'WAS', 'INTENSELY', 'DARK', 'AND', 'THE', 'CURRENT', 'CARRIED', 'ME', 'THE', 'RAFT', 'WITH', 'IT', 'DOWN', 'THE', 'UNDERGROUND', 'CHANNEL'] +7018-75789-0004-159: ref=['AND', 'I', 'THREW', 'MYSELF', 'DOWN', 'UPON', 'MY', 'FACE', 'ON', 'THE', 'RAFT', 'BY', 'REASON', 'OF', 'THE', 'NARROWNESS', 'OF', 'THE', 'CHANNEL', 'WHILST', 'THE', 'STREAM', 'CEASED', 'NOT', 'TO', 'CARRY', 'ME', 'ALONG', 'KNOWING', 'NOT', 'NIGHT', 'FROM', 'DAY', 'FOR', 'THE', 'EXCESS', 'OF', 'THE', 'GLOOM', 'WHICH', 'ENCOMPASSED', 'ME', 'ABOUT', 'AND', 'MY', 'TERROR', 'AND', 'CONCERN', 'FOR', 'MYSELF', 'LEST', 'I', 'SHOULD', 'PERISH'] +7018-75789-0004-159: hyp=['AND', 'I', 'THREW', 'MYSELF', 'DOWN', 'UPON', 'MY', 'FACE', 'ON', 'THE', 'RAFT', 'BY', 'REASON', 'OF', 'THE', 'NARROWNESS', 'OF', 'THE', 'CHANNEL', 'WHILST', 'THE', 'STREAM', 'CEASED', 'NOT', 'TO', 'CARRY', 'ME', 'ALONG', 'KNOWING', 'NOT', 'NIGHT', 'FROM', 'DAY', 'FOR', 'THE', 'EXCESS', 'OF', 'THE', 'GLOOM', 'WHICH', 'ENCOMPASSED', 'ME', 'ABOUT', 'IN', 'MY', 'TERROR', 'AND', 'CONCERN', 'FOR', 'MYSELF', 'LEST', 'I', 'SHOULD', 'PERISH'] +7018-75789-0005-160: ref=['WHEN', 'I', 'AWOKE', 'AT', 'LAST', 'I', 'FOUND', 'MYSELF', 'IN', 'THE', 'LIGHT', 'OF', 'HEAVEN', 'AND', 'OPENING', 'MY', 'EYES', 'I', 'SAW', 'MYSELF', 'IN', 'A', 'BROAD', 'STREAM', 'AND', 'THE', 'RAFT', 'MOORED', 'TO', 'AN', 'ISLAND', 'IN', 'THE', 'MIDST', 'OF', 'A', 'NUMBER', 'OF', 'INDIANS', 'AND', 'ABYSSINIANS'] +7018-75789-0005-160: hyp=['WHEN', 'I', 'AWOKE', 'AT', 'LAST', 'I', 'FOUND', 'MYSELF', 'IN', 'THE', 'LIGHT', 'OF', 'HEAVEN', 'AND', 'OPENING', 'MY', 'EYES', 'I', 'SAW', 'MYSELF', 'IN', 'A', 'BROAD', 'STREAM', 'AND', 'THE', 'RAFT', 'MOORED', 'TO', 'AN', 'ISLAND', 'IN', 'THE', 'MIDST', 'OF', 'A', 'NUMBER', 'OF', 'INDIANS', 'AND', 'ABYSSINIANS'] +7018-75789-0006-161: ref=['BUT', 'I', 'WAS', 'DELIGHTED', 'AT', 'MY', 'ESCAPE', 'FROM', 'THE', 'RIVER'] +7018-75789-0006-161: hyp=['BUT', 'I', 'WAS', 'DELIGHTED', 'AT', 'MY', 'ESCAPE', 'FROM', 'THE', 'RIVER'] +7018-75789-0007-162: ref=['WHEN', 'THEY', 'SAW', 'I', 'UNDERSTOOD', 'THEM', 'NOT', 'AND', 'MADE', 'THEM', 'NO', 'ANSWER', 'ONE', 'OF', 'THEM', 'CAME', 'FORWARD', 'AND', 'SAID', 'TO', 'ME', 'IN', 'ARABIC', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'BROTHER'] +7018-75789-0007-162: hyp=['WHEN', 'THEY', 'SAW', 'I', 'UNDERSTOOD', 'THEM', 'NIGHT', 'AND', 'MADE', 'THEM', 'NO', 'ANSWER', 'ONE', 'OF', 'THEM', 'CAME', 'FORWARD', 'AND', 'SAID', 'TO', 'ME', 'IN', 'ARABIC', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'BROTHER'] +7018-75789-0008-163: ref=['O', 'MY', 'BROTHER', 'ANSWERED', 'HE', 'WE', 'ARE', 'HUSBANDMEN', 'AND', 'TILLERS', 'OF', 'THE', 'SOIL', 'WHO', 'CAME', 'OUT', 'TO', 'WATER', 'OUR', 'FIELDS', 'AND', 'PLANTATIONS', 'AND', 'FINDING', 'THEE', 'ASLEEP', 'ON', 'THIS', 'RAFT', 'LAID', 'HOLD', 'OF', 'IT', 'AND', 'MADE', 'IT', 'FAST', 'BY', 'US', 'AGAINST', 'THOU', 'SHOULDST', 'AWAKE', 'AT', 'THY', 'LEISURE'] +7018-75789-0008-163: hyp=['O', 'MY', 'BROTHER', 'ANSWERED', 'HE', 'WE', 'ARE', 'HUSBANDMEN', 'AND', 'TELLERS', 'OF', 'THE', 'SOIL', 'WHO', 'CAME', 'OUT', 'TO', 'WATER', 'OUR', 'FIELDS', 'IN', 'PLANTATIONS', 'AND', 'FINDING', 'THEE', 'ASLEEP', 'ON', 'THIS', 'RAFT', 'LAID', 'HOLD', 'OF', 'IT', 'AND', 'MADE', 'IT', 'FAST', 'BY', 'US', 'AGAINST', 'THOU', 'SHOULDST', 'AWAKE', 'AT', 'THY', 'LEISURE'] +7018-75789-0009-164: ref=['I', 'ANSWERED', 'FOR', "ALLAH'S", 'SAKE', 'O', 'MY', 'LORD', 'ERE', 'I', 'SPEAK', 'GIVE', 'ME', 'SOMEWHAT', 'TO', 'EAT', 'FOR', 'I', 'AM', 'STARVING', 'AND', 'AFTER', 'ASK', 'ME', 'WHAT', 'THOU', 'WILT'] +7018-75789-0009-164: hyp=['I', 'ANSWERED', 'FOR', "ALLAH'S", 'SAKE', 'AND', 'MY', 'LORD', 'ERE', 'I', 'SPEAK', 'GIVE', 'ME', 'SOMEWHAT', 'TO', 'EAT', 'FOR', 'I', 'AM', 'STARVING', 'AND', 'AFTER', 'ASK', 'ME', 'WHAT', 'THOU', 'WILT'] +7018-75789-0010-165: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'SECOND', 'NIGHT'] +7018-75789-0010-165: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'SECOND', 'NIGHT'] +7018-75789-0011-166: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'SINDBAD', 'THE', 'SEAMAN', 'CONTINUED', 'WHEN', 'I', 'LANDED', 'AND', 'FOUND', 'MYSELF', 'AMONGST', 'THE', 'INDIANS', 'AND', 'ABYSSINIANS', 'AND', 'HAD', 'TAKEN', 'SOME', 'REST', 'THEY', 'CONSULTED', 'AMONG', 'THEMSELVES', 'AND', 'SAID', 'TO', 'ONE', 'ANOTHER', 'THERE', 'IS', 'NO', 'HELP', 'FOR', 'IT', 'BUT', 'WE', 'CARRY', 'HIM', 'WITH', 'US', 'AND', 'PRESENT', 'HIM', 'TO', 'OUR', 'KING', 'THAT', 'HE', 'MAY', 'ACQUAINT', 'HIM', 'WITH', 'HIS', 'ADVENTURES'] +7018-75789-0011-166: hyp=['SHE', 'SAID', 'IT', 'HATH', 'RAGED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'SINDBAD', 'THE', 'SEAMAN', 'CONTINUED', 'WHEN', 'I', 'LANDED', 'AND', 'FOUND', 'MYSELF', 'AMONGST', 'THE', 'INDIANS', 'AND', 'ABYSSINIANS', 'AND', 'HAD', 'TAKEN', 'SOME', 'REST', 'THEY', 'CONSULTED', 'AMONG', 'THEMSELVES', 'AND', 'SAID', 'TO', 'ONE', 'ANOTHER', 'THERE', 'IS', 'NO', 'HELP', 'FOR', 'IT', 'BUT', 'WE', 'CARRY', 'HIM', 'WITH', 'US', 'AND', 'PRESENT', 'HIM', 'TO', 'OUR', 'KING', 'THAT', 'HE', 'MAY', 'ACQUAINT', 'HIM', 'WITH', 'HIS', 'ADVENTURES'] +7018-75789-0012-167: ref=['SO', 'I', 'CONSORTED', 'WITH', 'THE', 'CHIEF', 'OF', 'THE', 'ISLANDERS', 'AND', 'THEY', 'PAID', 'ME', 'THE', 'UTMOST', 'RESPECT'] +7018-75789-0012-167: hyp=['SO', 'I', 'CONSORTED', 'WITH', 'THE', 'CHIEF', 'OF', 'THE', 'ISLANDERS', 'AND', 'THEY', 'PAID', 'ME', 'THE', 'UTMOST', 'RESPECT'] +7018-75789-0013-168: ref=['SO', 'I', 'ROSE', 'WITHOUT', 'STAY', 'OR', 'DELAY', 'AND', 'KISSED', 'THE', "KING'S", 'HAND', 'AND', 'ACQUAINTED', 'HIM', 'WITH', 'MY', 'LONGING', 'TO', 'SET', 'OUT', 'WITH', 'THE', 'MERCHANTS', 'FOR', 'THAT', 'I', 'PINED', 'AFTER', 'MY', 'PEOPLE', 'AND', 'MINE', 'OWN', 'LAND'] +7018-75789-0013-168: hyp=['SO', 'I', 'ROSE', 'WITHOUT', 'STAY', 'OR', 'DELAY', 'AND', 'KISSED', 'THE', "KING'S", 'HAND', 'AND', 'ACQUAINTED', 'HIM', 'WITH', 'MY', 'LONGING', 'TO', 'SET', 'OUT', 'WITH', 'THE', 'MERCHANTS', 'FOR', 'THAT', 'I', 'PINED', 'AFTER', 'MY', 'PEOPLE', 'AND', 'MY', 'OWN', 'LAND'] +7018-75789-0014-169: ref=['QUOTH', 'HE', 'THOU', 'ART', 'THINE', 'OWN', 'MASTER', 'YET', 'IF', 'IT', 'BE', 'THY', 'WILL', 'TO', 'ABIDE', 'WITH', 'US', 'ON', 'OUR', 'HEAD', 'AND', 'EYES', 'BE', 'IT', 'FOR', 'THOU', 'GLADDENEST', 'US', 'WITH', 'THY', 'COMPANY'] +7018-75789-0014-169: hyp=['QUOTH', 'HE', 'THOU', 'ART', 'THINE', 'OWN', 'MASTER', 'YET', 'IF', 'IT', 'BE', 'THY', 'WILL', 'TO', 'ABIDE', 'WITH', 'US', 'ON', 'OUR', 'HEAD', 'AND', 'EYES', 'BE', 'IT', 'FOR', 'THOU', 'GLADNESSED', 'US', 'WITH', 'THY', 'COMPANY'] +7018-75789-0015-170: ref=['BY', 'ALLAH', 'O', 'MY', 'LORD', 'ANSWERED', 'I', 'THOU', 'HAST', 'INDEED', 'OVERWHELMED', 'ME', 'WITH', 'THY', 'FAVOURS', 'AND', 'WELL', 'DOINGS', 'BUT', 'I', 'WEARY', 'FOR', 'A', 'SIGHT', 'OF', 'MY', 'FRIENDS', 'AND', 'FAMILY', 'AND', 'NATIVE', 'COUNTRY'] +7018-75789-0015-170: hyp=['BY', 'ALLAH', 'ARE', 'MY', 'LORD', 'ANSWERED', 'I', 'THOU', 'HAST', 'INDEED', 'OVERWHELMED', 'ME', 'WITH', 'THY', 'FAVOURS', 'AND', 'WELL', 'DOINGS', 'BUT', 'I', 'WEARY', 'FOR', 'A', 'SIGHT', 'OF', 'MY', 'FRIENDS', 'AND', 'FAMILY', 'AND', 'NATIVE', 'COUNTRY'] +7018-75789-0016-171: ref=['THEN', 'I', 'TOOK', 'LEAVE', 'OF', 'HIM', 'AND', 'OF', 'ALL', 'MY', 'INTIMATES', 'AND', 'ACQUAINTANCES', 'IN', 'THE', 'ISLAND', 'AND', 'EMBARKED', 'WITH', 'THE', 'MERCHANTS', 'AFORESAID'] +7018-75789-0016-171: hyp=['THEN', 'I', 'TOOK', 'LEAVE', 'OF', 'HIM', 'AND', 'OF', 'ALL', 'MY', 'INTIMATES', 'AND', 'ACQUAINTANCES', 'IN', 'THE', 'ISLAND', 'AND', 'EMBARKED', 'WITH', 'THE', 'MERCHANTS', 'AFOR', 'SAID'] +7018-75789-0017-172: ref=['HE', 'ASKED', 'ME', 'WHENCE', 'THEY', 'CAME', 'AND', 'I', 'SAID', 'TO', 'HIM', 'BY', 'ALLAH', 'O', 'COMMANDER', 'OF', 'THE', 'FAITHFUL', 'I', 'KNOW', 'NOT', 'THE', 'NAME', 'OF', 'THE', 'CITY', 'NOR', 'THE', 'WAY', 'THITHER'] +7018-75789-0017-172: hyp=['HE', 'ASKED', 'ME', 'WHENCE', 'THEY', 'CAME', 'AND', 'I', 'SAID', 'TO', 'HIM', 'BY', 'ALLAH', 'O', 'COMMANDER', 'OF', 'THE', 'FAITHFUL', 'I', 'KNOW', 'NOT', 'THE', 'NAME', 'OF', 'THE', 'CITY', 'NOR', 'THE', 'WAY', 'THITHER'] +7018-75789-0018-173: ref=['FOR', 'STATE', 'PROCESSIONS', 'A', 'THRONE', 'IS', 'SET', 'FOR', 'HIM', 'UPON', 'A', 'HUGE', 'ELEPHANT', 'ELEVEN', 'CUBITS', 'HIGH', 'AND', 'UPON', 'THIS', 'HE', 'SITTETH', 'HAVING', 'HIS', 'GREAT', 'LORDS', 'AND', 'OFFICERS', 'AND', 'GUESTS', 'STANDING', 'IN', 'TWO', 'RANKS', 'ON', 'HIS', 'RIGHT', 'HAND', 'AND', 'ON', 'HIS', 'LEFT'] +7018-75789-0018-173: hyp=['FOR', 'STATE', 'PROCESSIONS', 'ARE', 'THRONE', 'IS', 'SAID', 'FOR', 'HIM', 'UPON', 'A', 'HUGE', 'ELEPHANT', 'ELEVEN', 'CUBITS', 'HIGH', 'AND', 'UPON', 'THIS', 'HE', 'SITTETH', 'HAVING', 'HIS', 'GREAT', 'LORDS', 'AND', 'OFFICERS', 'AND', 'GUESTS', 'STANDING', 'IN', 'TWO', 'RANKS', 'ON', 'HIS', 'RIGHT', 'HAND', 'AND', 'ON', 'HIS', 'LEFT'] +7018-75789-0019-174: ref=['HIS', 'LETTER', 'HATH', 'SHOWN', 'ME', 'THIS', 'AND', 'AS', 'FOR', 'THE', 'MIGHTINESS', 'OF', 'HIS', 'DOMINION', 'THOU', 'HAST', 'TOLD', 'US', 'WHAT', 'THOU', 'HAST', 'EYE', 'WITNESSED'] +7018-75789-0019-174: hyp=['HIS', 'LETTER', 'HATH', 'SHOWN', 'ME', 'THIS', 'AND', 'AS', 'FOR', 'THE', 'MIGHTINESS', 'OF', 'HIS', 'DOMINION', 'THOU', 'HAST', 'TOLD', 'US', 'WHAT', 'THOU', 'HAST', 'DIE', 'WITNESSED'] +7018-75789-0020-175: ref=['PRESENTLY', 'MY', 'FRIENDS', 'CAME', 'TO', 'ME', 'AND', 'I', 'DISTRIBUTED', 'PRESENTS', 'AMONG', 'MY', 'FAMILY', 'AND', 'GAVE', 'ALMS', 'AND', 'LARGESSE', 'AFTER', 'WHICH', 'I', 'YIELDED', 'MYSELF', 'TO', 'JOYANCE', 'AND', 'ENJOYMENT', 'MIRTH', 'AND', 'MERRY', 'MAKING', 'AND', 'FORGOT', 'ALL', 'THAT', 'I', 'HAD', 'SUFFERED'] +7018-75789-0020-175: hyp=['PRESENTLY', 'MY', 'FRIENDS', 'CAME', 'TO', 'ME', 'AND', 'I', 'DISTRIBUTED', 'PRESENTS', 'AMONG', 'MY', 'FAMILY', 'AND', 'GAVE', 'ARMS', 'IN', 'LARGESSE', 'AFTER', 'WHICH', 'I', 'YIELDED', 'MYSELF', 'TO', 'JOYANCE', 'AND', 'ENJOYMENT', 'MIRTH', 'AND', 'MERRYMAKING', 'AND', 'FORGOT', 'ALL', 'THAT', 'I', 'HAD', 'SUFFERED'] +7018-75789-0021-176: ref=['SUCH', 'THEN', 'O', 'MY', 'BROTHERS', 'IS', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFEL', 'ME', 'IN', 'MY', 'SIXTH', 'VOYAGE', 'AND', 'TO', 'MORROW', 'INSHALLAH'] +7018-75789-0021-176: hyp=['SUCH', 'THEN', 'ARE', 'MY', 'BROTHERS', 'IS', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFELL', 'ME', 'IN', 'MY', 'SIXTH', 'VOYAGE', 'AND', 'TO', 'MORROW', 'INSHALLAH'] +7018-75789-0022-177: ref=['I', 'WILL', 'TELL', 'YOU', 'THE', 'STORY', 'OF', 'MY', 'SEVENTH', 'AND', 'LAST', 'VOYAGE', 'WHICH', 'IS', 'STILL', 'MORE', 'WONDROUS', 'AND', 'MARVELLOUS', 'THAN', 'THAT', 'OF', 'THE', 'FIRST', 'SIX'] +7018-75789-0022-177: hyp=['I', 'WILL', 'TELL', 'YOU', 'THE', 'STORY', 'OF', 'MY', 'SEVENTH', 'AND', 'LAST', 'VOYAGE', 'WHICH', 'IS', 'STILL', 'MORE', 'WONDROUS', 'AND', 'MARVELLOUS', 'THAN', 'THAT', 'OF', 'THE', 'FIRST', 'SIX'] +7018-75789-0023-178: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'THIRD', 'NIGHT'] +7018-75789-0023-178: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'THIRD', 'NIGHT'] +7018-75789-0024-179: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'SINDBAD', 'THE', 'SEAMAN', 'HAD', 'RELATED', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFEL', 'HIM', 'IN', 'HIS', 'SIXTH', 'VOYAGE', 'AND', 'ALL', 'THE', 'COMPANY', 'HAD', 'DISPERSED', 'SINDBAD', 'THE', 'LANDSMAN', 'WENT', 'HOME', 'AND', 'SLEPT', 'AS', 'OF', 'WONT'] +7018-75789-0024-179: hyp=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'SINDBAD', 'THE', 'SEAMEN', 'HAD', 'RELIGHTED', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFELL', 'HIM', 'IN', 'HIS', 'SIXTH', 'VOYAGE', 'AND', 'ALL', 'THE', 'COMPANY', 'HAD', 'DISPERSED', 'SINDBAD', 'THE', 'LANDSMAN', 'WENT', 'HOME', 'AND', 'SLEPT', 'AS', 'OF', 'WANT'] +7018-75789-0025-180: ref=['THE', 'SEVENTH', 'VOYAGE', 'OF', 'SINDBAD', 'THE', 'SEAMAN'] +7018-75789-0025-180: hyp=['THE', 'SEVENTH', 'VOYAGE', 'OF', 'SINBAD', 'THE', 'SEAMAN'] +7018-75789-0026-181: ref=['KNOW', 'O', 'COMPANY', 'THAT', 'AFTER', 'MY', 'RETURN', 'FROM', 'MY', 'SIXTH', 'VOYAGE', 'WHICH', 'BROUGHT', 'ME', 'ABUNDANT', 'PROFIT', 'I', 'RESUMED', 'MY', 'FORMER', 'LIFE', 'IN', 'ALL', 'POSSIBLE', 'JOYANCE', 'AND', 'ENJOYMENT', 'AND', 'MIRTH', 'AND', 'MAKING', 'MERRY', 'DAY', 'AND', 'NIGHT', 'AND', 'I', 'TARRIED', 'SOME', 'TIME', 'IN', 'THIS', 'SOLACE', 'AND', 'SATISFACTION', 'TILL', 'MY', 'SOUL', 'BEGAN', 'ONCE', 'MORE', 'TO', 'LONG', 'TO', 'SAIL', 'THE', 'SEAS', 'AND', 'SEE', 'FOREIGN', 'COUNTRIES', 'AND', 'COMPANY', 'WITH', 'MERCHANTS', 'AND', 'HEAR', 'NEW', 'THINGS'] +7018-75789-0026-181: hyp=['NO', 'O', 'COMPANY', 'THAT', 'AFTER', 'MY', 'RETURN', 'FROM', 'MY', 'SIXTH', 'VOYAGE', 'WHICH', 'BROUGHT', 'ME', 'ABUNDANT', 'PROPHET', 'I', 'RESUMED', 'MY', 'FORMER', 'LIFE', 'AND', 'ALL', 'POSSIBLE', 'JOYANCE', 'AND', 'ENJOYMENT', 'AND', 'MIRTH', 'AND', 'MAKING', 'MERRY', 'DAY', 'AND', 'NIGHT', 'AND', 'I', 'TARRIED', 'SOME', 'TIME', 'IN', 'THIS', 'SOLACE', 'AND', 'SATISFACTION', 'TILL', 'MY', 'SOUL', 'BEGAN', 'ONCE', 'MORE', 'TO', 'LONG', 'TO', 'SAIL', 'THE', 'SEAS', 'AND', 'SEE', 'FOREIGN', 'COUNTRIES', 'AND', 'COMPANY', 'WITH', 'MERCHANTS', 'AND', 'HERE', 'NEW', 'THINGS'] +7018-75789-0027-182: ref=['SO', 'HAVING', 'MADE', 'UP', 'MY', 'MIND', 'I', 'PACKED', 'UP', 'IN', 'BALES', 'A', 'QUANTITY', 'OF', 'PRECIOUS', 'STUFFS', 'SUITED', 'FOR', 'SEA', 'TRADE', 'AND', 'REPAIRED', 'WITH', 'THEM', 'FROM', 'BAGHDAD', 'CITY', 'TO', 'BASSORAH', 'TOWN', 'WHERE', 'I', 'FOUND', 'A', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'IN', 'HER', 'A', 'COMPANY', 'OF', 'CONSIDERABLE', 'MERCHANTS'] +7018-75789-0027-182: hyp=['SO', 'HAVING', 'MADE', 'UP', 'MY', 'MIND', 'I', 'PACKED', 'UP', 'IN', 'BALES', 'A', 'QUANTITY', 'OF', 'PRECIOUS', 'STUFFS', 'SUITED', 'FOR', 'SEA', 'TRADE', 'AND', 'REPAIRED', 'WITH', 'THEM', 'FROM', 'BAGDAD', 'CITY', 'TO', 'BASSORA', 'TOWN', 'WHERE', 'I', 'FOUND', 'A', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'IN', 'HER', 'OUR', 'COMPANY', 'OF', 'CONSIDERABLE', 'MERCHANTS'] +7018-75789-0028-183: ref=['BUT', 'THE', 'CAPTAIN', 'AROSE', 'AND', 'TIGHTENING', 'HIS', 'GIRDLE', 'TUCKED', 'UP', 'HIS', 'SKIRTS', 'AND', 'AFTER', 'TAKING', 'REFUGE', 'WITH', 'ALLAH', 'FROM', 'SATAN', 'THE', 'STONED', 'CLOMB', 'TO', 'THE', 'MAST', 'HEAD', 'WHENCE', 'HE', 'LOOKED', 'OUT', 'RIGHT', 'AND', 'LEFT', 'AND', 'GAZING', 'AT', 'THE', 'PASSENGERS', 'AND', 'CREW', 'FELL', 'TO', 'BUFFETING', 'HIS', 'FACE', 'AND', 'PLUCKING', 'OUT', 'HIS', 'BEARD'] +7018-75789-0028-183: hyp=['BUT', 'THE', 'CAPTAIN', 'AROSE', 'AND', 'TIGHTENING', 'HIS', 'GIRDLE', 'TUCKED', 'UP', 'HIS', 'SKIRTS', 'AND', 'AFTER', 'TAKING', 'REFUGE', 'WITH', 'ALLAH', 'FROM', 'SATAN', 'THE', 'STONE', 'CLIMBED', 'TO', 'THE', 'MAST', 'HEAD', 'WHENCE', 'HE', 'LOOKED', 'OUT', 'RIGHT', 'AND', 'LEFT', 'AND', 'GAZING', 'AT', 'THE', 'PASSENGERS', 'AND', 'CREW', 'FELL', 'TO', 'BUFFET', 'IN', 'HIS', 'FACE', 'AND', 'PLUCKING', 'OUT', 'HIS', 'BEARD'] +7018-75789-0029-184: ref=['THIS', 'HE', 'SET', 'IN', 'A', 'SAUCER', 'WETTED', 'WITH', 'A', 'LITTLE', 'WATER', 'AND', 'AFTER', 'WAITING', 'A', 'SHORT', 'TIME', 'SMELT', 'AND', 'TASTED', 'IT', 'AND', 'THEN', 'HE', 'TOOK', 'OUT', 'OF', 'THE', 'CHEST', 'A', 'BOOKLET', 'WHEREIN', 'HE', 'READ', 'AWHILE', 'AND', 'SAID', 'WEEPING', 'KNOW', 'O', 'YE', 'PASSENGERS', 'THAT', 'IN', 'THIS', 'BOOK', 'IS', 'A', 'MARVELLOUS', 'MATTER', 'DENOTING', 'THAT', 'WHOSO', 'COMETH', 'HITHER', 'SHALL', 'SURELY', 'DIE', 'WITHOUT', 'HOPE', 'OF', 'ESCAPE', 'FOR', 'THAT', 'THIS', 'OCEAN', 'IS', 'CALLED', 'THE', 'SEA', 'OF', 'THE', 'CLIME', 'OF', 'THE', 'KING', 'WHEREIN', 'IS', 'THE', 'SEPULCHRE', 'OF', 'OUR', 'LORD', 'SOLOMON', 'SON', 'OF', 'DAVID', 'ON', 'BOTH', 'BE', 'PEACE'] +7018-75789-0029-184: hyp=['THIS', 'HE', 'SAID', 'IN', 'A', 'SAUCER', 'WETTED', 'WITH', 'A', 'LITTLE', 'WATER', 'AND', 'AFTER', 'WAITING', 'A', 'SHORT', 'TIME', 'SMELT', 'AND', 'TASTED', 'IT', 'AND', 'THEN', 'HE', 'TOOK', 'OUT', 'OF', 'THE', 'CHEST', 'A', 'BOOKLET', 'WHEREIN', 'HE', 'READ', 'AWHILE', 'AND', 'SAID', 'WEEPING', 'NO', 'ARE', 'YE', 'PASSENGERS', 'THAT', 'IN', 'THIS', 'BOOK', 'IS', 'A', 'MARVELLOUS', 'MATTER', 'DENOTING', 'THAT', 'WHOSO', 'COME', 'THITHER', 'SHALL', 'SURELY', 'DIE', 'WITHOUT', 'HOPE', 'OF', 'ESCAPE', 'FOR', 'THAT', 'THIS', 'OCEAN', 'IS', 'CALLED', 'THE', 'SEA', 'OF', 'THE', 'CLIME', 'OF', 'THE', 'KING', 'WHEREIN', 'IS', 'A', 'SEPULCHRE', 'OF', 'OUR', 'LORD', 'SOLOMON', 'SON', 'OF', 'DAVID', 'ON', 'BOTH', 'BE', 'PEACE'] +7018-75789-0030-185: ref=['A', 'SECOND', 'FISH', 'MADE', 'ITS', 'APPEARANCE', 'THAN', 'WHICH', 'WE', 'HAD', 'SEEN', 'NAUGHT', 'MORE', 'MONSTROUS'] +7018-75789-0030-185: hyp=['A', 'SECOND', 'FISH', 'MADE', 'ITS', 'APPEARANCE', 'AND', 'WHICH', 'WE', 'HAD', 'SEEN', 'NOUGHT', 'MORE', 'MONSTROUS'] +7018-75789-0031-186: ref=['WHEN', 'SUDDENLY', 'A', 'VIOLENT', 'SQUALL', 'OF', 'WIND', 'AROSE', 'AND', 'SMOTE', 'THE', 'SHIP', 'WHICH', 'ROSE', 'OUT', 'OF', 'THE', 'WATER', 'AND', 'SETTLED', 'UPON', 'A', 'GREAT', 'REEF', 'THE', 'HAUNT', 'OF', 'SEA', 'MONSTERS', 'WHERE', 'IT', 'BROKE', 'UP', 'AND', 'FELL', 'ASUNDER', 'INTO', 'PLANKS', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75789-0031-186: hyp=['WHEN', 'SUDDENLY', 'A', 'VIOLENT', 'SQUALL', 'OF', 'WIND', 'AROSE', 'AND', 'SMOTE', 'THE', 'SHIP', 'WHICH', 'ROSE', 'OUT', 'OF', 'THE', 'WATER', 'AND', 'SETTLED', 'UPON', 'A', 'GREAT', 'REEF', 'THE', 'HAUNT', 'OF', 'SEA', 'MONSTERS', 'WHERE', 'IT', 'BROKE', 'UP', 'AND', 'FELL', 'ASUNDER', 'INTO', 'PLANKS', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7105-2330-0000-2310: ref=['UNFORTUNATELY', 'THERE', 'COULD', 'BE', 'NO', 'DOUBT', 'OR', 'MISCONCEPTION', 'AS', 'TO', "PLATTERBAFF'S", 'GUILT'] +7105-2330-0000-2310: hyp=['UNFORTUNATELY', 'THERE', 'COULD', 'BE', 'NO', 'DOUBT', 'OR', 'MISCONCEPTION', 'AS', 'THE', "PLATTERBUFF'S", 'GUILT'] +7105-2330-0001-2311: ref=['HE', 'HAD', 'NOT', 'ONLY', 'PLEADED', 'GUILTY', 'BUT', 'HAD', 'EXPRESSED', 'HIS', 'INTENTION', 'OF', 'REPEATING', 'HIS', 'ESCAPADE', 'IN', 'OTHER', 'DIRECTIONS', 'AS', 'SOON', 'AS', 'CIRCUMSTANCES', 'PERMITTED', 'THROUGHOUT', 'THE', 'TRIAL', 'HE', 'WAS', 'BUSY', 'EXAMINING', 'A', 'SMALL', 'MODEL', 'OF', 'THE', 'FREE', 'TRADE', 'HALL', 'IN', 'MANCHESTER'] +7105-2330-0001-2311: hyp=['HE', 'HAD', 'NOT', 'ONLY', 'PLEADED', 'GUILTY', 'BUT', 'HAD', 'EXPRESSED', 'HIS', 'INTENTION', 'OF', 'REPEATING', 'HIS', 'ESCAPADE', 'IN', 'OTHER', 'DIRECTIONS', 'AS', 'SOON', 'AS', 'CIRCUMSTANCES', 'PERMITTED', 'THROUGHOUT', 'THE', 'TRIAL', 'HE', 'WAS', 'BUSY', 'EXAMINING', 'A', 'SMALL', 'MODEL', 'OF', 'THE', 'FREE', 'TRADE', 'HALL', 'IN', 'MANCHESTER'] +7105-2330-0002-2312: ref=['THE', 'JURY', 'COULD', 'NOT', 'POSSIBLY', 'FIND', 'THAT', 'THE', 'PRISONER', 'HAD', 'NOT', 'DELIBERATELY', 'AND', 'INTENTIONALLY', 'BLOWN', 'UP', 'THE', 'ALBERT', 'HALL', 'THE', 'QUESTION', 'WAS', 'COULD', 'THEY', 'FIND', 'ANY', 'EXTENUATING', 'CIRCUMSTANCES', 'WHICH', 'WOULD', 'PERMIT', 'OF', 'AN', 'ACQUITTAL'] +7105-2330-0002-2312: hyp=['VIRTUARY', 'COULD', 'NOT', 'POSSIBLY', 'FIND', 'THAT', 'THE', 'PRISONER', 'HAD', 'NOT', 'DELIBERATELY', 'AND', 'INTENTIONALLY', 'BLOWN', 'UP', 'WE', 'ALBERT', 'HALL', 'THE', 'QUESTION', 'WAS', 'COULD', 'THEY', 'FIND', 'ANY', 'EXTENUATING', 'CIRCUMSTANCES', 'WHICH', 'WOULD', 'PERMIT', 'OF', 'AN', 'ACQUITTAL'] +7105-2330-0003-2313: ref=['OF', 'COURSE', 'ANY', 'SENTENCE', 'WHICH', 'THE', 'LAW', 'MIGHT', 'FEEL', 'COMPELLED', 'TO', 'INFLICT', 'WOULD', 'BE', 'FOLLOWED', 'BY', 'AN', 'IMMEDIATE', 'PARDON', 'BUT', 'IT', 'WAS', 'HIGHLY', 'DESIRABLE', 'FROM', 'THE', "GOVERNMENT'S", 'POINT', 'OF', 'VIEW', 'THAT', 'THE', 'NECESSITY', 'FOR', 'SUCH', 'AN', 'EXERCISE', 'OF', 'CLEMENCY', 'SHOULD', 'NOT', 'ARISE'] +7105-2330-0003-2313: hyp=['OF', 'COURSE', 'ANY', 'SENTENCE', 'WHICH', 'THE', 'LAW', 'MIGHT', 'FILL', 'COMPELLED', 'TO', 'INFLICT', 'WOULD', 'BE', 'FOLLOWED', 'BY', 'AN', 'IMMEDIATE', 'PARDON', 'BUT', 'IT', 'WAS', 'HIGHLY', 'DESIRABLE', 'FROM', 'THE', 'GOVERNMENT', 'SPITE', 'OF', 'VIEW', 'THAT', 'THE', 'NECESSITY', 'FOR', 'SUCH', 'AN', 'EXERCISE', 'OF', 'CLEMENCY', 'SHOULD', 'NOT', 'ARISE'] +7105-2330-0004-2314: ref=['A', 'HEADLONG', 'PARDON', 'ON', 'THE', 'EVE', 'OF', 'A', 'BYE', 'ELECTION', 'WITH', 'THREATS', 'OF', 'A', 'HEAVY', 'VOTING', 'DEFECTION', 'IF', 'IT', 'WERE', 'WITHHELD', 'OR', 'EVEN', 'DELAYED', 'WOULD', 'NOT', 'NECESSARILY', 'BE', 'A', 'SURRENDER', 'BUT', 'IT', 'WOULD', 'LOOK', 'LIKE', 'ONE'] +7105-2330-0004-2314: hyp=['I', 'HAD', 'LONG', 'PARDON', 'AND', 'THE', 'EVE', 'OF', 'A', 'BI', 'ELECTION', 'WITH', 'THREATS', 'OF', 'A', 'HEAVY', 'VOTING', 'DEFECTION', 'IF', 'IT', 'WERE', 'WITHHELD', 'OR', 'EVEN', 'DELAYED', 'WOULD', 'NOT', 'NECESSARILY', 'BE', 'A', 'SURRENDER', 'BUT', 'IT', 'WOULD', 'LOOK', 'LIKE', 'ONE'] +7105-2330-0005-2315: ref=['HENCE', 'THE', 'ANXIETY', 'IN', 'THE', 'CROWDED', 'COURT', 'AND', 'IN', 'THE', 'LITTLE', 'GROUPS', 'GATHERED', 'ROUND', 'THE', 'TAPE', 'MACHINES', 'IN', 'WHITEHALL', 'AND', 'DOWNING', 'STREET', 'AND', 'OTHER', 'AFFECTED', 'CENTRES'] +7105-2330-0005-2315: hyp=['HENCE', 'THE', 'ANCIDE', 'IN', 'THE', 'CROWDED', 'COURT', 'AND', 'IN', 'THE', 'LITTLE', 'GROUPS', 'GATHERED', 'ROUND', 'THE', 'TAPE', 'MACHINES', 'IN', 'WHITEHALL', 'AND', 'DAWNING', 'STREET', 'AND', 'OTHER', 'AFFECTED', 'CENTRES'] +7105-2330-0006-2316: ref=['THE', 'JURY', 'RETURNED', 'FROM', 'CONSIDERING', 'THEIR', 'VERDICT', 'THERE', 'WAS', 'A', 'FLUTTER', 'AN', 'EXCITED', 'MURMUR', 'A', 'DEATHLIKE', 'HUSH'] +7105-2330-0006-2316: hyp=['THE', 'JURY', 'TURN', 'FROM', 'CONSIDERING', 'THEIR', 'VERDICT', 'THERE', 'WAS', 'A', 'FLUTTER', 'AN', 'EXCITED', 'MURMUR', 'A', 'DEATHLIKE', 'HUSH'] +7105-2330-0007-2317: ref=['THE', 'FOREMAN', 'DELIVERED', 'HIS', 'MESSAGE'] +7105-2330-0007-2317: hyp=['THEREFORE', 'MAN', 'DELIVERED', 'HIS', 'MESSAGE'] +7105-2330-0008-2318: ref=['THE', 'JURY', 'FIND', 'THE', 'PRISONER', 'GUILTY', 'OF', 'BLOWING', 'UP', 'THE', 'ALBERT', 'HALL'] +7105-2330-0008-2318: hyp=['THE', 'JURY', 'FIND', 'THE', 'PRISONER', 'GUILTY', 'OF', 'BLOWING', 'UP', 'THE', 'ALBERT', 'HALL'] +7105-2330-0009-2319: ref=['THE', 'JURY', 'WISH', 'TO', 'ADD', 'A', 'RIDER', 'DRAWING', 'ATTENTION', 'TO', 'THE', 'FACT', 'THAT', 'A', 'BY', 'ELECTION', 'IS', 'PENDING', 'IN', 'THE', 'PARLIAMENTARY', 'DIVISION', 'OF', 'NEMESIS', 'ON', 'HAND'] +7105-2330-0009-2319: hyp=['THEY', 'JERRY', 'WISH', 'TO', 'ADD', 'A', 'RIDER', 'DRAWING', 'ATTENTION', 'TO', 'THE', 'FACT', 'THAT', 'A', 'BY', 'ELECTION', 'EXPENDING', 'IN', 'THE', 'PARLIAMENTARY', 'DIVISION', 'OF', 'NEMESIS', 'ON', 'HAND'] +7105-2330-0010-2320: ref=['AND', 'MAY', 'THE', 'LORD', 'HAVE', 'MERCY', 'ON', 'THE', 'POLL', 'A', 'JUNIOR', 'COUNSEL', 'EXCLAIMED', 'IRREVERENTLY'] +7105-2330-0010-2320: hyp=['AND', 'MADE', 'THE', 'LARD', 'HAVE', 'MERCY', 'ON', 'THE', 'POLE', 'A', 'JUNIOR', 'COUNCIL', 'EXCLAIMED', 'IRREVERENTLY'] +7105-2330-0011-2321: ref=['FIFTEEN', 'HUNDRED', 'SAID', 'THE', 'PRIME', 'MINISTER', 'WITH', 'A', 'SHUDDER', "IT'S", 'TOO', 'HORRIBLE', 'TO', 'THINK', 'OF'] +7105-2330-0011-2321: hyp=['FIFTEEN', 'HUNDRED', 'SAID', 'A', 'PRIME', 'MINISTER', 'WITH', 'A', 'SHUDDER', "IT'S", 'TOO', 'HORRIBLE', 'TO', 'THINK', 'OF'] +7105-2330-0012-2322: ref=['OUR', 'MAJORITY', 'LAST', 'TIME', 'WAS', 'ONLY', 'A', 'THOUSAND', 'AND', 'SEVEN'] +7105-2330-0012-2322: hyp=['OUR', 'MAJORITY', 'LAST', 'TIME', 'WAS', 'ONLY', 'A', 'THOUSAND', 'AND', 'SEVEN'] +7105-2330-0013-2323: ref=['SEVEN', 'THIRTY', 'AMENDED', 'THE', 'PRIME', 'MINISTER', 'WE', 'MUST', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PRECIPITANCY'] +7105-2330-0013-2323: hyp=['SEVEN', 'THIRTY', 'AMENDED', 'THE', 'PRIME', 'MINISTER', 'WE', 'MUST', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PRECIPITANCY'] +7105-2330-0014-2324: ref=['NOT', 'LATER', 'THAN', 'SEVEN', 'THIRTY', 'THEN', 'SAID', 'THE', 'CHIEF', 'ORGANISER', 'I', 'HAVE', 'PROMISED', 'THE', 'AGENT', 'DOWN', 'THERE', 'THAT', 'HE', 'SHALL', 'BE', 'ABLE', 'TO', 'DISPLAY', 'POSTERS', 'ANNOUNCING', 'PLATTERBAFF', 'IS', 'OUT', 'BEFORE', 'THE', 'POLL', 'OPENS'] +7105-2330-0014-2324: hyp=['NOT', 'LATER', 'THEN', 'SEVEN', 'THIRTY', 'THEN', 'SAID', 'THE', 'CHIEF', 'ORGANIZER', 'I', 'HAVE', 'PROMISED', 'THE', 'AGENT', 'DOWN', 'THERE', 'THAT', 'HE', 'SHALL', 'BE', 'ABLE', 'TO', 'DISPLAY', 'POSTERS', 'ANNOUNCING', 'PLATTER', 'BAFF', 'IS', 'OUT', 'BEHELD', 'A', 'POLE', 'OPENS'] +7105-2330-0015-2325: ref=['HE', 'SAID', 'IT', 'WAS', 'OUR', 'ONLY', 'CHANCE', 'OF', 'GETTING', 'A', 'TELEGRAM', 'RADPROP', 'IS', 'IN', 'TO', 'NIGHT'] +7105-2330-0015-2325: hyp=['HE', 'SAID', 'IT', 'WAS', 'HER', 'ONLY', 'CHANCE', 'OF', 'GETTING', 'A', 'TELEGRAM', 'RED', 'RAPIS', 'IN', 'TO', 'NIGHT'] +7105-2330-0016-2326: ref=['DESPITE', 'THE', 'EARLINESS', 'OF', 'THE', 'HOUR', 'A', 'SMALL', 'CROWD', 'HAD', 'GATHERED', 'IN', 'THE', 'STREET', 'OUTSIDE', 'AND', 'THE', 'HORRIBLE', 'MENACING', 'TRELAWNEY', 'REFRAIN', 'OF', 'THE', 'FIFTEEN', 'HUNDRED', 'VOTING', 'MEN', 'CAME', 'IN', 'A', 'STEADY', 'MONOTONOUS', 'CHANT'] +7105-2330-0016-2326: hyp=['THIS', 'SPEED', 'THE', 'EARLINESS', 'OF', 'THE', 'HOUR', 'A', 'SMALL', 'CROWD', 'HAD', 'GATHERED', 'IN', 'THE', 'STREET', 'OUTSIDE', 'AND', 'THE', 'HORRIBLE', 'MENACING', 'TREE', 'LONGER', 'REFRAIN', 'OF', 'THE', 'FIFTEEN', 'HUNDRED', 'VOTING', 'MEN', 'CAME', 'IN', 'A', 'STEADY', 'MONOTONOUS', 'CHANT'] +7105-2330-0017-2327: ref=['HE', 'EXCLAIMED', "WON'T", 'GO'] +7105-2330-0017-2327: hyp=['HE', 'EXCLAIMED', "WON'T", 'GO'] +7105-2330-0018-2328: ref=['HE', 'SAYS', 'HE', 'NEVER', 'HAS', 'LEFT', 'PRISON', 'WITHOUT', 'A', 'BRASS', 'BAND', 'TO', 'PLAY', 'HIM', 'OUT', 'AND', "HE'S", 'NOT', 'GOING', 'TO', 'GO', 'WITHOUT', 'ONE', 'NOW'] +7105-2330-0018-2328: hyp=['HE', 'SAYS', 'HE', 'NEVER', 'HAS', 'LEFT', 'PRISON', 'WITHOUT', 'A', 'BREASTPAND', 'TO', 'PLAY', 'HIM', 'OUT', 'AND', 'HE', 'SNUG', 'GOING', 'TO', 'GO', 'WITHOUT', 'ONE', 'NOW'] +7105-2330-0019-2329: ref=['SAID', 'THE', 'PRIME', 'MINISTER', 'WE', 'CAN', 'HARDLY', 'BE', 'SUPPOSED', 'TO', 'SUPPLY', 'A', 'RELEASED', 'PRISONER', 'WITH', 'A', 'BRASS', 'BAND', 'HOW', 'ON', 'EARTH', 'COULD', 'WE', 'DEFEND', 'IT', 'ON', 'THE', 'ESTIMATES'] +7105-2330-0019-2329: hyp=['SAID', 'A', 'PRIME', 'MINISTER', 'WE', 'CAN', 'HARDLY', 'BE', 'SUPPOSED', 'TO', 'SUPPLY', 'A', 'RELIEF', 'PRISONER', 'WITH', 'A', 'BRASS', 'BAND', 'HOW', 'ON', 'EARTH', 'COULD', 'WE', 'DEFEND', 'IT', 'UNDEST'] +7105-2330-0020-2330: ref=['ANYWAY', 'HE', "WON'T", 'GO', 'UNLESS', 'HE', 'HAS', 'A', 'BAND'] +7105-2330-0020-2330: hyp=['AN', 'AWAY', 'HE', "WON'T", 'GO', 'UNLESS', 'HE', 'HAS', 'A', 'BAND'] +7105-2330-0021-2331: ref=['POLL', 'OPENS', 'IN', 'FIVE', 'MINUTES'] +7105-2330-0021-2331: hyp=['PAUL', 'OPENS', 'IN', 'FIVE', 'MINUTES'] +7105-2330-0022-2332: ref=['IS', 'PLATTERBAFF', 'OUT', 'YET'] +7105-2330-0022-2332: hyp=['IS', 'FLATHER', 'BATH', 'OUT', 'YET'] +7105-2330-0023-2333: ref=['IN', "HEAVEN'S", 'NAME', 'WHY'] +7105-2330-0023-2333: hyp=['IN', "HEAVEN'S", 'NAME', 'WHY'] +7105-2330-0024-2334: ref=['THE', 'CHIEF', 'ORGANISER', 'RANG', 'OFF'] +7105-2330-0024-2334: hyp=['THE', 'CHIEF', 'ORGANIZER', 'RANG', 'OFF'] +7105-2330-0025-2335: ref=['THIS', 'IS', 'NOT', 'A', 'MOMENT', 'FOR', 'STANDING', 'ON', 'DIGNITY', 'HE', 'OBSERVED', 'BLUNTLY', 'MUSICIANS', 'MUST', 'BE', 'SUPPLIED', 'AT', 'ONCE'] +7105-2330-0025-2335: hyp=['THIS', 'IS', 'NOT', 'A', 'MOMENT', 'FOR', 'STANDING', 'ON', 'DIGNITY', 'HE', 'OBSERVED', 'BLUNTLY', 'MY', 'SICIENTS', 'MUST', 'BE', 'SUPPLIED', 'AT', 'ONCE'] +7105-2330-0026-2336: ref=["CAN'T", 'YOU', 'GET', 'A', 'STRIKE', 'PERMIT', 'ASKED', 'THE', 'ORGANISER'] +7105-2330-0026-2336: hyp=["CAN'T", 'YOU', 'GET', 'A', 'STRIKE', 'PERMIT', 'ASKED', 'THE', 'ORGANIZER'] +7105-2330-0027-2337: ref=["I'LL", 'TRY', 'SAID', 'THE', 'HOME', 'SECRETARY', 'AND', 'WENT', 'TO', 'THE', 'TELEPHONE'] +7105-2330-0027-2337: hyp=["I'LL", 'TRY', 'SAID', 'THE', 'HOME', 'SECRETARY', 'AND', 'WENT', 'TO', 'THE', 'TELEPHONE'] +7105-2330-0028-2338: ref=['EIGHT', "O'CLOCK", 'STRUCK', 'THE', 'CROWD', 'OUTSIDE', 'CHANTED', 'WITH', 'AN', 'INCREASING', 'VOLUME', 'OF', 'SOUND', 'WILL', 'VOTE', 'THE', 'OTHER', 'WAY'] +7105-2330-0028-2338: hyp=['EIGHT', "O'CLOCK", 'STRUCK', 'THE', 'CROWD', 'OUTSIDE', 'CHANTED', 'WITH', 'AN', 'INCREASING', 'VOLUME', 'OF', 'SOUND', 'WITHOUT', 'THE', 'OTHER', 'WAY'] +7105-2330-0029-2339: ref=['A', 'TELEGRAM', 'WAS', 'BROUGHT', 'IN'] +7105-2330-0029-2339: hyp=['I', 'TELEGRAM', 'WAS', 'BROUGHT', 'IN'] +7105-2330-0030-2340: ref=['IT', 'WAS', 'FROM', 'THE', 'CENTRAL', 'COMMITTEE', 'ROOMS', 'AT', 'NEMESIS'] +7105-2330-0030-2340: hyp=['IT', 'WAS', 'FROM', 'THE', 'CENTRAL', 'COME', 'INTO', 'ROOMS', 'AT', 'NEMESIS'] +7105-2330-0031-2341: ref=['WITHOUT', 'A', 'BAND', 'HE', 'WOULD', 'NOT', 'GO', 'AND', 'THEY', 'HAD', 'NO', 'BAND'] +7105-2330-0031-2341: hyp=['WITHOUT', 'A', 'BAND', 'HE', 'WOULD', 'NOT', 'GO', 'AND', 'THEY', 'HAD', 'NO', 'BAND'] +7105-2330-0032-2342: ref=['A', 'QUARTER', 'PAST', 'TEN', 'HALF', 'PAST'] +7105-2330-0032-2342: hyp=['ACQUIRED', 'THEIR', 'PAST', 'TEN', 'HALF', 'PAST'] +7105-2330-0033-2343: ref=['HAVE', 'YOU', 'ANY', 'BAND', 'INSTRUMENTS', 'OF', 'AN', 'EASY', 'NATURE', 'TO', 'PLAY'] +7105-2330-0033-2343: hyp=['HAVE', 'YOU', 'ANY', 'BAND', 'INSTRUMENTS', 'OF', 'AN', 'EASY', 'NATURE', 'TO', 'PLAY'] +7105-2330-0034-2344: ref=['DEMANDED', 'THE', 'CHIEF', 'ORGANISER', 'OF', 'THE', 'PRISON', 'GOVERNOR', 'DRUMS', 'CYMBALS', 'THOSE', 'SORT', 'OF', 'THINGS'] +7105-2330-0034-2344: hyp=['DEMANDED', 'THE', 'CHIEF', 'ORGANIZER', 'OF', 'THE', 'PRISON', 'GOVERNOR', 'DRUMS', 'SYMBOLS', 'THOSE', 'SORT', 'OF', 'THINGS'] +7105-2330-0035-2345: ref=['THE', 'WARDERS', 'HAVE', 'A', 'PRIVATE', 'BAND', 'OF', 'THEIR', 'OWN', 'SAID', 'THE', 'GOVERNOR', 'BUT', 'OF', 'COURSE', 'I', "COULDN'T", 'ALLOW', 'THE', 'MEN', 'THEMSELVES'] +7105-2330-0035-2345: hyp=['THOUGH', 'OURS', 'HAVE', 'A', 'PRIVATE', 'BAND', 'OF', 'THEIR', 'OWN', 'SAID', 'THE', 'GOVERNOR', 'BUT', 'OF', 'COURSE', 'I', "COULDN'T", 'ALLOW', 'THEM', 'IN', 'THEMSELVES'] +7105-2330-0036-2346: ref=['LEND', 'US', 'THE', 'INSTRUMENTS', 'SAID', 'THE', 'CHIEF', 'ORGANISER'] +7105-2330-0036-2346: hyp=['LEND', 'US', 'THE', 'INSTRUMENTS', 'SAID', 'THE', 'CHIEF', 'ORGANIZER'] +7105-2330-0037-2347: ref=['THE', 'POPULAR', 'SONG', 'OF', 'THE', 'MOMENT', 'REPLIED', 'THE', 'AGITATOR', 'AFTER', 'A', "MOMENT'S", 'REFLECTION'] +7105-2330-0037-2347: hyp=['THEIR', 'POPULAR', 'SONG', 'OF', 'THE', 'MOMENT', 'REPLIED', 'THE', 'AGITATOR', 'AFTER', 'A', "MOMENT'S", 'REFLECTION'] +7105-2330-0038-2348: ref=['IT', 'WAS', 'A', 'TUNE', 'THEY', 'HAD', 'ALL', 'HEARD', 'HUNDREDS', 'OF', 'TIMES', 'SO', 'THERE', 'WAS', 'NO', 'DIFFICULTY', 'IN', 'TURNING', 'OUT', 'A', 'PASSABLE', 'IMITATION', 'OF', 'IT', 'TO', 'THE', 'IMPROVISED', 'STRAINS', 'OF', 'I', "DIDN'T", 'WANT', 'TO', 'DO', 'IT', 'THE', 'PRISONER', 'STRODE', 'FORTH', 'TO', 'FREEDOM'] +7105-2330-0038-2348: hyp=['IT', 'WAS', 'A', 'TUNE', 'THEY', 'HAD', 'ALL', 'HEARD', 'HUNDREDS', 'OF', 'TIMES', 'SO', 'THERE', 'WAS', 'NO', 'DIFFICULTY', 'IN', 'TURNING', 'OUT', 'A', 'PASSABLE', 'IMITATION', 'OF', 'IT', 'TO', 'THE', 'IMPROVISED', 'TRAINS', 'OF', 'I', "DON'T", 'WANT', 'TO', 'DO', 'IT', 'THE', 'PRISONERS', 'STROLLED', 'FORTH', 'TO', 'FREEDOM'] +7105-2330-0039-2349: ref=['THE', 'WORD', 'OF', 'THE', 'SONG', 'HAD', 'REFERENCE', 'IT', 'WAS', 'UNDERSTOOD', 'TO', 'THE', 'INCARCERATING', 'GOVERNMENT', 'AND', 'NOT', 'TO', 'THE', 'DESTROYER', 'OF', 'THE', 'ALBERT', 'HALL'] +7105-2330-0039-2349: hyp=['THE', 'WORD', 'OF', 'THE', 'SONG', 'HAD', 'REFERENCE', 'IT', 'WAS', 'UNDERSTOOD', 'THAT', 'THE', 'INCARCERATING', 'GOVERNMENT', 'AND', 'NOT', 'TO', 'THE', 'DESTROYER', 'OF', 'THE', 'ALBERT', 'HALL'] +7105-2330-0040-2350: ref=['THE', 'SEAT', 'WAS', 'LOST', 'AFTER', 'ALL', 'BY', 'A', 'NARROW', 'MAJORITY'] +7105-2330-0040-2350: hyp=['THIS', 'SEAT', 'WAS', 'LOST', 'AFTER', 'ALL', 'BY', 'A', 'NARROW', 'MATURITY'] +7105-2330-0041-2351: ref=['THE', 'LOCAL', 'TRADE', 'UNIONISTS', 'TOOK', 'OFFENCE', 'AT', 'THE', 'FACT', 'OF', 'CABINET', 'MINISTERS', 'HAVING', 'PERSONALLY', 'ACTED', 'AS', 'STRIKE', 'BREAKERS', 'AND', 'EVEN', 'THE', 'RELEASE', 'OF', 'PLATTERBAFF', 'FAILED', 'TO', 'PACIFY', 'THEM'] +7105-2330-0041-2351: hyp=['THE', 'LOCAL', 'TRADE', 'UNIONISTS', 'TOOK', 'OFFENCE', 'AT', 'THE', 'FACT', 'OF', 'CABINET', 'MINISTERS', 'HAVING', 'PERSONALLY', 'ACTED', 'AS', 'STRIKE', 'BREAKERS', 'AND', 'EVEN', 'THE', 'RELEASE', 'OF', 'PLATTERBUFF', 'FAILED', 'TO', 'PACIFY', 'THEM'] +7105-2340-0000-2272: ref=['WITH', 'THAT', 'NOTORIOUS', 'FAILING', 'OF', 'HIS', 'HE', 'WAS', 'NOT', 'THE', 'SORT', 'OF', 'PERSON', 'ONE', 'WANTED', 'IN', "ONE'S", 'HOUSE'] +7105-2340-0000-2272: hyp=['WITH', 'THAT', 'NOTORIOUS', 'FAILING', 'OF', 'HIS', 'HE', 'WAS', 'NOT', 'A', 'SORT', 'OF', 'PERSON', 'ONE', 'WANTED', 'IN', "ONE'S", 'HOUSE'] +7105-2340-0001-2273: ref=['WELL', 'THE', 'FAILING', 'STILL', 'EXISTS', "DOESN'T", 'IT', 'SAID', 'HER', 'HUSBAND', 'OR', 'DO', 'YOU', 'SUPPOSE', 'A', 'REFORM', 'OF', 'CHARACTER', 'IS', 'ENTAILED', 'ALONG', 'WITH', 'THE', 'ESTATE'] +7105-2340-0001-2273: hyp=['WELL', 'THE', 'FAILING', 'STILL', 'EXISTS', 'DOESNATE', 'SAID', 'THE', 'HUSBAND', 'OR', 'DO', 'YOU', 'SUPPOSE', 'A', 'REFORM', 'OF', 'CHARACTER', 'IS', 'ENTAILED', 'ALONG', 'WITH', 'THE', 'ESTATE'] +7105-2340-0002-2274: ref=['BESIDES', 'CYNICISM', 'APART', 'HIS', 'BEING', 'RICH', 'WILL', 'MAKE', 'A', 'DIFFERENCE', 'IN', 'THE', 'WAY', 'PEOPLE', 'WILL', 'LOOK', 'AT', 'HIS', 'FAILING'] +7105-2340-0002-2274: hyp=['BESIDES', 'CYNICISM', 'APART', 'IS', 'VERY', 'RICH', 'WE', 'MAKE', 'A', 'DIFFERENCE', 'IN', 'THE', 'WAY', 'PEOPLE', 'WILL', 'LOOK', 'AT', 'HIS', 'FEELING'] +7105-2340-0003-2275: ref=['WHEN', 'A', 'MAN', 'IS', 'ABSOLUTELY', 'WEALTHY', 'NOT', 'MERELY', 'WELL', 'TO', 'DO', 'ALL', 'SUSPICION', 'OF', 'SORDID', 'MOTIVE', 'NATURALLY', 'DISAPPEARS', 'THE', 'THING', 'BECOMES', 'MERELY', 'A', 'TIRESOME', 'MALADY'] +7105-2340-0003-2275: hyp=['WHEN', 'A', 'MAN', 'IS', 'ABSOLUTELY', 'WEALTHY', 'NOT', 'MERELY', 'WELL', 'TO', 'DO', 'ALL', 'SUSPICION', 'OF', 'SARDID', 'MOTIVE', 'NATURAL', 'DISAPPEARS', 'THE', 'THING', 'BECOMES', 'MERELY', 'A', 'PARASSOME', 'MALADY'] +7105-2340-0004-2276: ref=['WILFRID', 'PIGEONCOTE', 'HAD', 'SUDDENLY', 'BECOME', 'HEIR', 'TO', 'HIS', 'UNCLE', 'SIR', 'WILFRID', 'PIGEONCOTE', 'ON', 'THE', 'DEATH', 'OF', 'HIS', 'COUSIN', 'MAJOR', 'WILFRID', 'PIGEONCOTE', 'WHO', 'HAD', 'SUCCUMBED', 'TO', 'THE', 'AFTER', 'EFFECTS', 'OF', 'A', 'POLO', 'ACCIDENT'] +7105-2340-0004-2276: hyp=['WILFRIED', 'DIGEON', 'CODE', 'HAD', 'SUDDENLY', 'BECOME', 'HEIR', 'TO', 'HIS', 'UNCLE', 'SIR', 'WILL', 'FID', 'PIGEON', 'COAT', 'UNDER', 'DEATH', 'OF', 'HIS', 'COUSIN', 'MAJOR', 'WILFRID', 'PIGEONCOTE', 'WHO', 'HAD', 'SUCCUMBED', 'THE', 'AFTER', 'EFFECTS', 'OF', 'APOLLO', 'ACCIDENT'] +7105-2340-0005-2277: ref=['A', 'WILFRID', 'PIGEONCOTE', 'HAD', 'COVERED', 'HIMSELF', 'WITH', 'HONOURS', 'IN', 'THE', 'COURSE', 'OF', "MARLBOROUGH'S", 'CAMPAIGNS', 'AND', 'THE', 'NAME', 'WILFRID', 'HAD', 'BEEN', 'A', 'BAPTISMAL', 'WEAKNESS', 'IN', 'THE', 'FAMILY', 'EVER', 'SINCE', 'THE', 'NEW', 'HEIR', 'TO', 'THE', 'FAMILY', 'DIGNITY', 'AND', 'ESTATES', 'WAS', 'A', 'YOUNG', 'MAN', 'OF', 'ABOUT', 'FIVE', 'AND', 'TWENTY', 'WHO', 'WAS', 'KNOWN', 'MORE', 'BY', 'REPUTATION', 'THAN', 'BY', 'PERSON', 'TO', 'A', 'WIDE', 'CIRCLE', 'OF', 'COUSINS', 'AND', 'KINSFOLK'] +7105-2340-0005-2277: hyp=['OUR', 'WILFRED', 'FEATURE', 'HAD', 'COVERED', 'HIMSELF', 'WITH', 'HONORS', 'IN', 'THE', 'COURSE', 'OF', "MARLBOROUGH'S", 'CAMPAIGNS', 'AND', 'THE', 'NAME', 'WILFRID', 'HAD', 'BEEN', 'A', 'BABYSMAL', 'WEAKNESS', 'IN', 'THE', 'FAMILY', 'EVER', 'SINCE', 'THE', 'NEW', 'HEIR', 'TO', 'THE', 'FAMILY', 'DIGNITY', 'AND', 'ESTATES', 'WAS', 'A', 'YOUNG', 'MAN', 'OF', 'ABOUT', 'FIVE', 'AND', 'TWENTY', 'WHO', 'WAS', 'KNOWN', 'MORE', 'BY', 'REPETITION', 'THAN', 'BY', 'PERSON', 'TO', 'AVOID', 'CIRCLE', 'OF', 'COUSINS', 'AND', 'KINSFOLK'] +7105-2340-0006-2278: ref=['AND', 'THE', 'REPUTATION', 'WAS', 'AN', 'UNPLEASANT', 'ONE'] +7105-2340-0006-2278: hyp=['AND', 'THE', 'REPUTATION', 'WAS', 'AN', 'UNPLEASANT', 'ONE'] +7105-2340-0007-2279: ref=['FROM', 'HIS', 'LATE', 'SCHOOLDAYS', 'ONWARD', 'HE', 'HAD', 'BEEN', 'POSSESSED', 'BY', 'AN', 'ACUTE', 'AND', 'OBSTINATE', 'FORM', 'OF', 'KLEPTOMANIA', 'HE', 'HAD', 'THE', 'ACQUISITIVE', 'INSTINCT', 'OF', 'THE', 'COLLECTOR', 'WITHOUT', 'ANY', 'OF', 'THE', "COLLECTOR'S", 'DISCRIMINATION'] +7105-2340-0007-2279: hyp=['FROM', 'HIS', 'LATE', 'SCHOOL', 'DAYS', 'ONWARD', 'HE', 'HAD', 'BEEN', 'POSSESSED', 'BY', 'AN', 'ACUTE', 'AND', 'OBSTINATE', 'FORM', 'OF', 'CLUBTOMANIA', 'HE', 'HAD', 'THE', 'ACQUISITIVE', 'INSTINCT', 'OF', 'THE', 'COLLECTOR', 'WITHOUT', 'ANY', 'OF', 'THE', "COLLECTOR'S", 'DISCRIMINATION'] +7105-2340-0008-2280: ref=['THE', 'SEARCH', 'USUALLY', 'PRODUCED', 'A', 'LARGE', 'AND', 'VARIED', 'YIELD', 'THIS', 'IS', 'FUNNY', 'SAID', 'PETER', 'PIGEONCOTE', 'TO', 'HIS', 'WIFE', 'SOME', 'HALF', 'HOUR', 'AFTER', 'THEIR', 'CONVERSATION', "HERE'S", 'A', 'TELEGRAM', 'FROM', 'WILFRID', 'SAYING', "HE'S", 'PASSING', 'THROUGH', 'HERE', 'IN', 'HIS', 'MOTOR', 'AND', 'WOULD', 'LIKE', 'TO', 'STOP', 'AND', 'PAY', 'US', 'HIS', 'RESPECTS'] +7105-2340-0008-2280: hyp=['THIS', 'SEARCH', 'USUALLY', 'PRODUCED', 'A', 'LARGE', 'AND', 'VARIED', 'YIELD', 'THIS', 'IS', 'FUNNY', 'SAID', 'PETER', 'PIGEON', 'BOAT', 'TO', 'HIS', 'WIFE', "I'M", 'HALF', 'OUR', 'AFTER', 'THEIR', 'CONVERSATION', "HERE'S", 'A', 'TELEGRAM', 'FROM', 'WILFRID', 'SAYING', "HE'S", 'PASSING', 'THROUGH', 'HERE', 'IN', 'HIS', 'MOTTAR', 'AND', 'WOULD', 'LIKE', 'TO', 'STOP', 'AND', 'PAY', 'US', 'HIS', 'RESPECTS'] +7105-2340-0009-2281: ref=['SIGNED', 'WILFRID', 'PIGEONCOTE'] +7105-2340-0009-2281: hyp=['SIGN', 'WILFRED', 'PIGEON', 'COAT'] +7105-2340-0010-2282: ref=['I', 'SUPPOSE', "HE'S", 'BRINGING', 'US', 'A', 'PRESENT', 'FOR', 'THE', 'SILVER', 'WEDDING', 'GOOD', 'GRACIOUS'] +7105-2340-0010-2282: hyp=['I', 'SUPPOSE', 'IS', 'BRINGING', 'US', 'A', 'PRESENT', 'FURTHER', 'SILVER', 'WEDDING', 'GOOD', 'GRACIOUS'] +7105-2340-0011-2283: ref=['THE', 'TALK', 'FLITTED', 'NERVOUSLY', 'AND', 'HURRIEDLY', 'FROM', 'ONE', 'IMPERSONAL', 'TOPIC', 'TO', 'ANOTHER'] +7105-2340-0011-2283: hyp=['THE', 'TALK', 'FLITTED', 'NERVOUSLY', 'AND', 'HURRIEDLY', 'FROM', 'ONE', 'IMPERSONAL', 'TOPIC', 'TO', 'ANOTHER'] +7105-2340-0012-2284: ref=['IN', 'THE', 'DRAWING', 'ROOM', 'AFTER', 'DINNER', 'THEIR', 'NERVOUSNESS', 'AND', 'AWKWARDNESS', 'INCREASED'] +7105-2340-0012-2284: hyp=['IN', 'THE', 'DRAWING', 'ROOM', 'AFTER', 'DINNER', 'THEIR', 'NERVOUSNESS', 'AND', 'AWKWARDNESS', 'INCREASED'] +7105-2340-0013-2285: ref=['OH', 'WE', "HAVEN'T", 'SHOWN', 'YOU', 'THE', 'SILVER', 'WEDDING', 'PRESENTS', 'SAID', 'MISSUS', 'PETER', 'SUDDENLY', 'AS', 'THOUGH', 'STRUCK', 'BY', 'A', 'BRILLIANT', 'IDEA', 'FOR', 'ENTERTAINING', 'THE', 'GUEST', 'HERE', 'THEY', 'ALL', 'ARE'] +7105-2340-0013-2285: hyp=['OH', 'WE', "HAVEN'T", 'SHOWN', 'YOU', 'THE', 'SILVER', 'WEDDING', 'PRESENTS', 'SAID', 'MISSUS', 'PETER', 'SUDDENLY', 'AS', 'THOSE', 'STRUCK', 'BY', 'A', 'BRILLIANT', 'IDEA', 'OF', 'HER', 'ENTERTAINING', 'THE', 'GUEST', 'HERE', 'THEY', 'ALL', 'ARE'] +7105-2340-0014-2286: ref=['SUCH', 'NICE', 'USEFUL', 'GIFTS', 'A', 'FEW', 'DUPLICATES', 'OF', 'COURSE'] +7105-2340-0014-2286: hyp=['SUCH', 'NICE', 'YEARS', 'FORGIVES', 'A', 'FEW', 'DIPPLICATES', 'OF', 'COURSE'] +7105-2340-0015-2287: ref=['SEVEN', 'CREAM', 'JUGS', 'PUT', 'IN', 'PETER'] +7105-2340-0015-2287: hyp=['SEVEN', 'CREAM', 'JUGS', 'PUT', 'IN', 'PETER'] +7105-2340-0016-2288: ref=['WE', 'FEEL', 'THAT', 'WE', 'MUST', 'LIVE', 'ON', 'CREAM', 'FOR', 'THE', 'REST', 'OF', 'OUR', 'LIVES'] +7105-2340-0016-2288: hyp=['WE', 'FEEL', 'THAT', 'WE', 'MUST', 'LIVE', 'UNCLEAN', 'FOR', 'THE', 'REST', 'OF', 'OUR', 'LIVES'] +7105-2340-0017-2289: ref=['OF', 'COURSE', 'SOME', 'OF', 'THEM', 'CAN', 'BE', 'CHANGED'] +7105-2340-0017-2289: hyp=['OF', 'COURSE', 'SOME', 'OF', 'THEM', 'CAN', 'BE', 'CHANGED'] +7105-2340-0018-2290: ref=['I', 'PUT', 'IT', 'DOWN', 'BY', 'THE', 'CLARET', 'JUG', 'SAID', 'WILFRID', 'BUSY', 'WITH', 'ANOTHER', 'OBJECT'] +7105-2340-0018-2290: hyp=['I', 'PUT', 'IT', 'DOWN', 'BY', 'THE', 'CLARA', 'JUG', 'SAID', 'WILFRID', 'BUSY', 'WITH', 'ANOTHER', 'OBJECT'] +7105-2340-0019-2291: ref=['VIGILANCE', 'WAS', 'NOT', 'COMPLETELY', 'CROWNED', 'WITH', 'A', 'SENSE', 'OF', 'VICTORY'] +7105-2340-0019-2291: hyp=['VICHILLENZ', 'WAS', 'NOT', 'COMPLETELY', 'CROWNED', 'WITH', 'A', 'SENSE', 'OF', 'VICTORY'] +7105-2340-0020-2292: ref=['AFTER', 'THEY', 'HAD', 'SAID', 'GOOD', 'NIGHT', 'TO', 'THEIR', 'VISITOR', 'MISSUS', 'PETER', 'EXPRESSED', 'HER', 'CONVICTION', 'THAT', 'HE', 'HAD', 'TAKEN', 'SOMETHING'] +7105-2340-0020-2292: hyp=['AFTER', 'THEY', 'HAD', 'SAID', 'GOOD', 'NIGHT', 'TO', 'THEIR', 'VISITOR', 'MISSUS', 'PETER', 'EXPRESSED', 'HER', 'CONVICTION', 'THAT', 'HE', 'HAD', 'TAKEN', 'SOMETHING'] +7105-2340-0021-2293: ref=['HOW', 'ON', 'EARTH', 'ARE', 'WE', 'TO', 'KNOW', 'SAID', 'PETER', 'THE', 'MEAN', 'PIG', "HASN'T", 'BROUGHT', 'US', 'A', 'PRESENT', 'AND', "I'M", 'HANGED', 'IF', 'HE', 'SHALL', 'CARRY', 'ONE', 'OFF'] +7105-2340-0021-2293: hyp=['HOW', 'ON', 'EARTH', 'ARE', 'WE', 'TO', 'KNOW', 'SAID', 'PETER', 'THE', 'MEAN', 'PIG', "HASN'T", 'BROUGHT', 'US', 'A', 'PRESENT', 'AND', "I'M", 'HANGED', 'IF', 'HE', 'SHALL', 'CARRY', 'ONE', 'OFF'] +7105-2340-0022-2294: ref=["IT'S", 'THE', 'ONLY', 'THING', 'TO', 'DO'] +7105-2340-0022-2294: hyp=['IS', 'THE', 'ONLY', 'THING', 'TO', 'DO'] +7105-2340-0023-2295: ref=['WILFRID', 'WAS', 'LATE', 'IN', 'COMING', 'DOWN', 'TO', 'BREAKFAST', 'AND', 'HIS', 'MANNER', 'SHOWED', 'PLAINLY', 'THAT', 'SOMETHING', 'WAS', 'AMISS'] +7105-2340-0023-2295: hyp=['WILFRED', 'WAS', 'LAID', 'IN', 'COMING', 'DOWN', 'TO', 'BREAKFAST', 'AND', 'HIS', 'MANNERS', 'SHOWED', 'PLAINLY', 'THAT', 'SOMETHING', 'WAS', 'AMISS'] +7105-2340-0024-2296: ref=["IT'S", 'AN', 'UNPLEASANT', 'THING', 'TO', 'HAVE', 'TO', 'SAY', 'HE', 'BLURTED', 'OUT', 'PRESENTLY', 'BUT', "I'M", 'AFRAID', 'YOU', 'MUST', 'HAVE', 'A', 'THIEF', 'AMONG', 'YOUR', 'SERVANTS', "SOMETHING'S", 'BEEN', 'TAKEN', 'OUT', 'OF', 'MY', 'PORTMANTEAU'] +7105-2340-0024-2296: hyp=['YES', 'AND', 'AN', 'UNPLEASANT', 'THING', 'TO', 'HAVE', 'TO', 'SAY', 'HE', 'BLURTED', 'OUT', 'PRESENTLY', 'BUT', "I'M", 'AFRAID', 'YOU', 'MUST', 'HAVE', 'A', 'THIEF', 'AMONG', 'YOUR', 'SERVANTS', "SOMETHING'S", 'BEEN', 'TAKEN', 'OUT', 'OF', 'MY', 'PART', 'MANTLE'] +7105-2340-0025-2297: ref=['IT', 'WAS', 'A', 'LITTLE', 'PRESENT', 'FROM', 'MY', 'MOTHER', 'AND', 'MYSELF', 'FOR', 'YOUR', 'SILVER', 'WEDDING'] +7105-2340-0025-2297: hyp=['IT', 'WAS', 'A', 'LITTLE', 'PRESENT', 'FOR', 'MY', 'MOTHER', 'AND', 'MYSELF', 'FOR', 'YOUR', 'SILVER', 'WEDDING'] +7105-2340-0026-2298: ref=['I', 'SHOULD', 'HAVE', 'GIVEN', 'IT', 'TO', 'YOU', 'LAST', 'NIGHT', 'AFTER', 'DINNER', 'ONLY', 'IT', 'HAPPENED', 'TO', 'BE', 'A', 'CREAM', 'JUG', 'AND', 'YOU', 'SEEMED', 'ANNOYED', 'AT', 'HAVING', 'SO', 'MANY', 'DUPLICATES', 'SO', 'I', 'FELT', 'RATHER', 'AWKWARD', 'ABOUT', 'GIVING', 'YOU', 'ANOTHER'] +7105-2340-0026-2298: hyp=['I', 'SHOULD', 'HAVE', 'GIVEN', 'IT', 'TO', 'YOU', 'LAST', 'NIGHT', 'AFTER', 'DINNER', 'ON', 'IT', 'HAPPENED', 'TO', 'BE', 'A', 'QUEEN', 'JUG', 'AND', 'YOU', 'SEEMED', 'ANNOYED', 'AT', 'HAVING', 'SO', 'MANY', 'DUPLICATES', 'SO', 'I', 'FELT', 'RATHER', 'AWKWARD', 'OF', 'A', 'GIVING', 'YOU', 'ANOTHER'] +7105-2340-0027-2299: ref=['THE', 'SNATCHER', 'HAD', 'BEEN', 'AN', 'ORPHAN', 'THESE', 'MANY', 'YEARS'] +7105-2340-0027-2299: hyp=['THIS', 'NATURE', 'HAD', 'BEEN', 'AN', 'ORPHAN', 'THIS', 'MANY', 'YEARS'] +7105-2340-0028-2300: ref=['LADY', 'ERNESTINE', 'PIGEONCOTE', 'HIS', 'MOTHER', 'MOVED', 'IN', 'CIRCLES', 'WHICH', 'WERE', 'ENTIRELY', 'BEYOND', 'THEIR', 'COMPASS', 'OR', 'AMBITIONS', 'AND', 'THE', 'SON', 'WOULD', 'PROBABLY', 'ONE', 'DAY', 'BE', 'AN', 'AMBASSADOR'] +7105-2340-0028-2300: hyp=['LADY', 'ERNESTON', 'BEEJON', 'KOTE', 'HIS', 'MOTHER', 'MOVED', 'IN', 'CIRCLES', 'WHICH', 'WERE', 'ENTIRELY', 'BEYOND', 'THEIR', 'COMPASS', 'OR', 'AMBITIONS', 'AND', 'THE', 'SON', 'WOULD', 'PROBABLY', 'ONE', 'DAY', 'BE', 'AN', 'AMBASSADOR'] +7105-2340-0029-2301: ref=['HUSBAND', 'AND', 'WIFE', 'LOOKED', 'BLANKLY', 'AND', 'DESPERATELY', 'AT', 'ONE', 'ANOTHER'] +7105-2340-0029-2301: hyp=['HUSBAND', 'AND', 'WIFE', 'LOOKED', 'BLANKLY', 'AND', 'DESPERATELY', 'AT', 'ONE', 'ANOTHER'] +7105-2340-0030-2302: ref=['IT', 'WAS', 'MISSUS', 'PETER', 'WHO', 'ARRIVED', 'FIRST', 'AT', 'AN', 'INSPIRATION', 'HOW', 'DREADFUL', 'TO', 'THINK', 'THERE', 'ARE', 'THIEVES', 'IN', 'THE', 'HOUSE', 'WE', 'KEEP', 'THE', 'DRAWING', 'ROOM', 'LOCKED', 'UP', 'AT', 'NIGHT', 'OF', 'COURSE', 'BUT', 'ANYTHING', 'MIGHT', 'BE', 'CARRIED', 'OFF', 'WHILE', 'WE', 'ARE', 'AT', 'BREAKFAST'] +7105-2340-0030-2302: hyp=['IT', 'WAS', 'MISSUS', 'PETER', 'WHO', 'ARRIVED', 'FIRST', 'AT', 'AN', 'INSPIRATION', 'HOW', 'DREADFUL', 'THE', 'THING', 'THERE', 'ARE', 'THIEVES', 'IN', 'THE', 'HOUSE', 'WE', 'GIVE', 'THE', 'DRAWING', 'ROOM', 'LOCKED', 'UP', 'AT', 'NIGHT', 'OF', 'COURSE', 'BUT', 'ANYTHING', 'MIGHT', 'BE', 'CARRIED', 'OFF', 'WHILE', 'WE', 'WERE', 'AT', 'BREAKFAST'] +7105-2340-0031-2303: ref=['SHE', 'ROSE', 'AND', 'WENT', 'OUT', 'HURRIEDLY', 'AS', 'THOUGH', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'THE', 'DRAWING', 'ROOM', 'WAS', 'NOT', 'BEING', 'STRIPPED', 'OF', 'ITS', 'SILVERWARE', 'AND', 'RETURNED', 'A', 'MOMENT', 'LATER', 'BEARING', 'A', 'CREAM', 'JUG', 'IN', 'HER', 'HANDS'] +7105-2340-0031-2303: hyp=['SHE', 'ROSE', 'AND', 'WENT', 'OUT', 'HURRIEDLY', 'AS', 'THOUGH', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'THE', 'DRAWING', 'ROOM', 'WAS', 'NOT', 'BEING', 'STRIPPED', 'OF', 'ITS', 'SILVERWARE', 'AND', 'RETURNED', 'A', 'MOMENT', 'LATER', 'BEARING', 'A', 'CREAM', 'CHUG', 'IN', 'HER', 'HANDS'] +7105-2340-0032-2304: ref=['THE', 'PIGEONCOTES', 'HAD', 'TURNED', 'PALER', 'THAN', 'EVER', 'MISSUS', 'PETER', 'HAD', 'A', 'FINAL', 'INSPIRATION'] +7105-2340-0032-2304: hyp=['THE', 'PIGEON', 'COATS', 'HAD', 'TURNED', 'PALER', 'THAN', 'EVER', 'MISSUS', 'PETER', 'HAD', 'A', 'FINAL', 'INSPIRATION'] +7105-2340-0033-2305: ref=['PETER', 'DASHED', 'OUT', 'OF', 'THE', 'ROOM', 'WITH', 'GLAD', 'RELIEF', 'HE', 'HAD', 'LIVED', 'SO', 'LONG', 'DURING', 'THE', 'LAST', 'FEW', 'MINUTES', 'THAT', 'A', 'GOLDEN', 'WEDDING', 'SEEMED', 'WITHIN', 'MEASURABLE', 'DISTANCE'] +7105-2340-0033-2305: hyp=['PETER', 'DASHED', 'OUT', 'OF', 'THE', 'ROOM', 'WITH', 'GLAD', 'RELIEF', 'HE', 'HAD', 'LIVED', 'SO', 'LONG', 'DURING', 'THE', 'LAST', 'FEW', 'MINUTES', 'THAT', 'A', 'GOLDEN', 'WEDDING', 'SEEMED', 'WITHIN', 'MEASURABLE', 'DISTANCE'] +7105-2340-0034-2306: ref=['MISSUS', 'PETER', 'TURNED', 'TO', 'HER', 'GUEST', 'WITH', 'CONFIDENTIAL', 'COYNESS'] +7105-2340-0034-2306: hyp=['MISSUS', 'PETER', 'TURNED', 'TO', 'HER', 'GUEST', 'WITH', 'CONFIDENTIAL', 'KINDNESS'] +7105-2340-0035-2307: ref=["PETER'S", 'LITTLE', 'WEAKNESS', 'IT', 'RUNS', 'IN', 'THE', 'FAMILY', 'GOOD', 'LORD'] +7105-2340-0035-2307: hyp=["PETER'S", 'LITTLE', 'WEAKNESS', 'A', 'TRANSCENDI', 'FAMILY', 'GOOD', 'LORD'] +7105-2340-0036-2308: ref=['DO', 'YOU', 'MEAN', 'TO', 'SAY', "HE'S", 'A', 'KLEPTOMANIAC', 'LIKE', 'COUSIN', 'SNATCHER'] +7105-2340-0036-2308: hyp=['DO', 'YOU', 'MEAN', 'TO', 'SAY', "HE'S", 'A', 'CLEPTOMANIA', 'LIKE', 'COUSIN', 'SNATCHER'] +7105-2340-0037-2309: ref=['BRAVE', 'LITTLE', 'WOMAN', 'SAID', 'PETER', 'WITH', 'A', 'GASP', 'OF', 'RELIEF', 'I', 'COULD', 'NEVER', 'HAVE', 'DONE', 'IT'] +7105-2340-0037-2309: hyp=['BRAVE', 'LITTLE', 'WOMAN', 'SAID', 'PETER', 'WITH', 'A', 'GASP', 'OF', 'RELIEF', 'I', 'COULD', 'NEVER', 'HAVE', 'DONE', 'IT'] +7902-96591-0000-0: ref=['I', 'AM', 'FROM', 'THE', 'CUTTER', 'LYING', 'OFF', 'THE', 'COAST'] +7902-96591-0000-0: hyp=["I'M", 'FROM', 'THE', 'CUTTER', 'LYING', 'OFF', 'THE', 'COAST'] +7902-96591-0001-1: ref=["DON'T", 'CRY', 'HE', 'SAID', 'I', 'WAS', 'OBLIGED', 'TO', 'COME'] +7902-96591-0001-1: hyp=["DON'T", 'CRY', 'HE', 'SAID', 'I', 'WAS', 'OBLIGED', 'TO', 'COME'] +7902-96591-0002-2: ref=['AND', 'AND', 'YOU', 'HAVE', 'NOT', 'FOUND', 'OUT', 'ANYTHING', 'CAME', 'IN', 'QUICK', 'FRIGHTENED', 'TONES'] +7902-96591-0002-2: hyp=['AND', 'AND', 'YOU', 'HAVE', 'NOT', 'FOUND', 'OUT', 'ANYTHING', 'CAME', 'IN', 'QUICK', 'FRIGHTENED', 'TONES'] +7902-96591-0003-3: ref=['I', 'WISH', 'YOU', 'WOULD', 'BELIEVE', 'ME', 'THAT', 'I', 'AM', 'IN', 'AS', 'GREAT', 'TROUBLE', 'ABOUT', 'IT', 'AS', 'YOU', 'ARE'] +7902-96591-0003-3: hyp=['I', 'WISH', 'YOU', 'WOULD', 'BELIEVE', 'ME', 'THAT', 'I', 'AM', 'IN', 'AS', 'GREAT', 'TROUBLE', 'ABOUT', 'IT', 'AS', 'YOU', 'ARE'] +7902-96591-0004-4: ref=['THAT', 'MY', 'FATHER', 'SIR', 'RISDON', 'GRAEME', 'HAS', 'SMUGGLED', 'GOODS', 'HERE'] +7902-96591-0004-4: hyp=['THAT', 'MY', 'FATHER', 'SIR', 'RISDON', 'GRAHAM', 'SMUGGLED', 'GOODS', 'HERE'] +7902-96591-0005-5: ref=['HE', 'COULD', 'NOT', 'HELP', 'IT', 'HE', 'HATES', 'THE', 'SMUGGLERS', 'YOU', 'SHALL', 'NOT', 'TELL'] +7902-96591-0005-5: hyp=['HE', 'COULD', 'NOT', 'HELP', 'IT', 'HE', 'HATE', 'THIS', 'MOTHERS', 'YOU', 'SHALL', 'NOT', 'TELL'] +7902-96591-0006-6: ref=['PRAY', 'PRAY', 'SAY', 'YOU', 'WILL', 'NOT', 'ARCHY', 'WAS', 'SILENT'] +7902-96591-0006-6: hyp=['PRAY', 'PRAY', 'SAY', 'YOU', 'WILL', 'NOT', 'ARCHIE', 'WAS', 'SILENT'] +7902-96591-0007-7: ref=['THEN', 'AS', 'ARCHY', 'STOOD', 'IN', 'THE', 'DARK', 'LITERALLY', 'AGHAST', 'WITH', 'ASTONISHMENT', 'HE', 'HEARD', 'THE', 'FAINT', 'RUSTLING', 'ONCE', 'MORE', 'AND', 'AGAIN', 'ALL', 'WAS', 'SILENT'] +7902-96591-0007-7: hyp=['THEN', 'AS', 'ARCHIE', 'STOOD', 'IN', 'THE', 'DARK', 'LITERALLY', 'AGHAST', 'WITH', 'ASTONISHMENT', 'HE', 'HEARD', 'THE', 'FAINT', 'RUSTLING', 'ONCE', 'MORE', 'AND', 'AGAIN', 'ALL', 'WAS', 'SILENT'] +7902-96591-0008-8: ref=['HE', 'LAUGHED', 'BUT', 'IT', 'WAS', 'A', 'CURIOUS', 'KIND', 'OF', 'LAUGH', 'FULL', 'OF', 'VEXATION', 'INJURED', 'AMOUR', 'PROPRE', 'AS', 'THE', 'FRENCH', 'CALL', 'OUR', 'LOVE', 'OF', 'OUR', 'OWN', 'DIGNITY', 'OF', 'WHICH', 'ARCHIBALD', 'RAYSTOKE', 'IN', 'THE', 'FULL', 'FLUSH', 'OF', 'HIS', 'YOUNG', 'BELIEF', 'IN', 'HIS', 'IMPORTANCE', 'AS', 'A', 'BRITISH', 'OFFICER', 'HAD', 'A', 'PRETTY', 'GOOD', 'STOCK'] +7902-96591-0008-8: hyp=['HE', 'LAUGHED', 'BUT', 'IT', 'WAS', 'A', 'CURIOUS', 'KIND', 'OF', 'LAUGH', 'FULL', 'OF', 'VEXATION', 'INJURED', 'AMOPRA', 'AS', 'THE', 'FRENCH', 'CALLER', 'LOVE', 'OF', 'HER', 'OWN', 'DIGNITY', 'OF', 'WHICH', 'ARQUEBALD', 'RAYSTROKE', 'IN', 'THE', 'FULL', 'FLUSH', 'OF', 'HIS', 'YOUNG', 'BELIEF', 'IN', 'HIS', 'IMPORTANCE', 'AS', 'A', 'BRITISH', 'OFFICER', 'HAD', 'A', 'PRETTY', 'GOOD', 'STOCK'] +7902-96591-0009-9: ref=['IT', 'ALL', 'COMES', 'OF', 'DRESSING', 'UP', 'IN', 'THIS', 'STUPID', 'WAY', 'LIKE', 'A', 'ROUGH', 'FISHER', 'LAD'] +7902-96591-0009-9: hyp=['AND', 'ALL', 'COMES', 'OF', 'DRESSING', 'UP', 'IN', 'THE', 'STUPID', 'WAY', 'LIKE', 'A', 'ROUGH', 'FISHER', 'LAD'] +7902-96591-0010-10: ref=['COLD', 'WATER', 'CAME', 'ON', 'THIS', 'IDEA', 'DIRECTLY', 'AS', 'HE', 'RECALLED', 'THE', 'FACT', 'THAT', 'THE', 'DARKNESS', 'WAS', 'INTENSE', 'AND', 'CELIA', 'COULD', 'NOT', 'HAVE', 'SEEN', 'HIM'] +7902-96591-0010-10: hyp=['COLD', 'WATER', 'CAME', 'ON', 'THIS', 'IDEA', 'DIRECTLY', 'AS', 'HE', 'RECALLED', 'THE', 'FACT', 'THAT', 'THE', 'DARKNESS', 'WAS', 'INTENSE', 'AND', 'CELIA', 'COULD', 'NOT', 'HAVE', 'SEEN', 'HIM'] +7902-96591-0011-11: ref=["I'LL", 'SOON', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'NOT', 'GOING', 'TO', 'BE', 'PLAYED', 'WITH'] +7902-96591-0011-11: hyp=["I'LL", 'SOON', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'NOT', 'GOING', 'TO', 'BE', 'PLAYED', 'WITH'] +7902-96591-0012-12: ref=['FOR', 'IT', 'SUDDENLY', 'OCCURRED', 'TO', 'HIM', 'THAT', 'HE', 'WAS', 'NOT', 'ONLY', 'A', 'PRISONER', 'BUT', 'A', 'PRISONER', 'IN', 'THE', 'POWER', 'OF', 'A', 'VERY', 'RECKLESS', 'SET', 'OF', 'PEOPLE', 'WHO', 'WOULD', 'STOP', 'AT', 'NOTHING'] +7902-96591-0012-12: hyp=['FOR', 'IT', 'SUDDENLY', 'OCCURRED', 'TO', 'HIM', 'THAT', 'HE', 'WAS', 'NOT', 'ONLY', 'A', 'PRISONER', 'BUT', 'A', 'PRISONER', 'IN', 'THE', 'POWER', 'OF', 'A', 'VERY', 'RECKLESS', 'SET', 'OF', 'PEOPLE', 'AND', 'WOULD', 'STOP', 'AT', 'NOTHING'] +7902-96591-0013-13: ref=['NO', 'HE', 'THOUGHT', 'TO', 'HIMSELF', 'I', "DON'T", 'BELIEVE', 'THEY', 'WOULD', 'KILL', 'ME', 'BUT', 'THEY', 'WOULD', 'KNOCK', 'ME', 'ABOUT'] +7902-96591-0013-13: hyp=['NO', 'HE', 'THOUGHT', 'TO', 'HIMSELF', 'I', "DON'T", 'BELIEVE', 'THEY', 'WOULD', 'KILL', 'ME', 'BUT', 'THEY', 'WOULD', 'KNOCK', 'ME', 'ABOUT'] +7902-96591-0014-14: ref=['THE', 'KICK', 'HE', 'HAD', 'RECEIVED', 'WAS', 'A', 'FORETASTE', 'OF', 'WHAT', 'HE', 'MIGHT', 'EXPECT', 'AND', 'AFTER', 'A', 'LITTLE', 'CONSIDERATION', 'HE', 'CAME', 'TO', 'THE', 'CONCLUSION', 'THAT', 'HIS', 'DUTY', 'WAS', 'TO', 'ESCAPE', 'AND', 'GET', 'BACK', 'TO', 'THE', 'CUTTER', 'AS', 'QUICKLY', 'AS', 'HE', 'COULD'] +7902-96591-0014-14: hyp=['THE', 'KICKY', 'HAD', 'RECEIVED', 'WAS', 'A', 'FORETASTE', 'OF', 'WHAT', 'HE', 'MIGHT', 'EXPECT', 'AND', 'AFTER', 'A', 'LITTLE', 'CONSIDERATION', 'HE', 'CAME', 'TO', 'THE', 'CONCLUSION', 'THAT', 'HIS', 'DUTY', 'WAS', 'TO', 'ESCAPE', 'AND', 'GET', 'BACK', 'TO', 'THE', 'CUTTER', 'AS', 'QUICKLY', 'AS', 'HE', 'COULD'] +7902-96591-0015-15: ref=['TO', 'DO', 'THIS', 'HE', 'MUST', 'SCHEME', 'LIE', 'HID', 'TILL', 'MORNING', 'THEN', 'MAKE', 'FOR', 'THE', 'NEAREST', 'POINT', 'AND', 'SIGNAL', 'FOR', 'HELP', 'UNLESS', 'A', "BOAT'S", 'CREW', 'WERE', 'ALREADY', 'SEARCHING', 'FOR', 'HIM', 'HOW', 'TO', 'ESCAPE'] +7902-96591-0015-15: hyp=['TO', 'DO', 'THIS', 'HE', 'MUST', 'SCHEME', 'LIE', 'HID', 'TILL', 'MORNING', 'THAN', 'MAKE', 'FOR', 'THE', 'NEAREST', 'POINT', 'A', 'SIGNAL', 'FOR', 'HELP', 'UNLESS', 'A', "BOAT'S", 'CREW', 'WERE', 'ALREADY', 'SURGING', 'FOR', 'HIM', 'HOW', 'TO', 'ESCAPE'] +7902-96591-0016-16: ref=['THE', 'WINDOW', 'WAS', 'BARRED', 'BUT', 'HE', 'WENT', 'TO', 'IT', 'AND', 'TRIED', 'THE', 'BARS', 'ONE', 'BY', 'ONE', 'TO', 'FIND', 'THEM', 'ALL', 'SOLIDLY', 'FITTED', 'INTO', 'THE', 'STONE', 'SILL'] +7902-96591-0016-16: hyp=['THE', 'WINDOW', 'WAS', 'BARRED', 'BUT', 'HE', 'WENT', 'TO', 'IT', 'AND', 'TRIED', 'THE', 'BARS', 'ONE', 'BY', 'ONE', 'TO', 'FIND', 'THEM', 'ALL', 'SOLIDLY', 'FITTED', 'INTO', 'THE', 'STONE', 'SILL'] +7902-96591-0017-17: ref=['NEXT', 'MOMENT', 'AS', 'HE', 'FELT', 'HIS', 'WAY', 'ABOUT', 'HIS', 'HAND', 'TOUCHED', 'AN', 'OLD', 'FASHIONED', 'MARBLE', 'MANTELPIECE', 'FIREPLACE', 'CHIMNEY'] +7902-96591-0017-17: hyp=['NEXT', 'MOMENT', 'AS', 'HE', 'FELT', 'HIS', 'WAY', 'ABOUT', 'HIS', 'HAND', 'TOUCHED', 'AN', 'OLD', 'FASHIONED', 'MARBLE', 'MANTELPIECE', 'FIREPLACE', 'CHIMNEY'] +7902-96591-0018-18: ref=['YES', 'IF', 'OTHER', 'WAYS', 'FAILED', 'HE', 'COULD', 'ESCAPE', 'UP', 'THE', 'CHIMNEY'] +7902-96591-0018-18: hyp=['YES', 'IF', 'OTHERWAYS', 'FAILED', 'HE', 'COULD', 'ESCAPE', 'UP', 'THE', 'CHIMNEY'] +7902-96591-0019-19: ref=['NO', 'THAT', 'WAS', 'TOO', 'BAD', 'HE', 'COULD', 'NOT', 'DO', 'THAT'] +7902-96591-0019-19: hyp=['NO', 'THAT', 'WAS', 'TOO', 'BAD', 'HE', 'COULD', 'NOT', 'DO', 'THAT'] +7902-96591-0020-20: ref=['SYMPATHY', 'AND', 'PITY', 'FOR', 'THE', 'DWELLERS', 'IN', 'THE', 'HOZE', 'WERE', 'COMPLETELY', 'GONE', 'NOW', 'AND', 'HE', 'SET', 'HIS', 'TEETH', 'FAST', 'AND', 'MENTALLY', 'CALLED', 'HIMSELF', 'A', 'WEAK', 'IDIOT', 'FOR', 'EVER', 'THINKING', 'ABOUT', 'SUCH', 'PEOPLE'] +7902-96591-0020-20: hyp=['SYMPATHY', 'AND', 'PITY', 'FOR', 'THE', 'DWELLERS', 'IN', 'THE', 'HOSE', 'WERE', 'COMPLETELY', 'GONE', 'NOW', 'AND', 'HE', 'SET', 'HIS', 'TEETH', 'FAST', 'AND', 'MENTALLY', 'CALLED', 'HIMSELF', 'A', 'WEEK', 'IDIOT', 'FOR', 'EVER', 'THINKING', 'ABOUT', 'SUCH', 'PEOPLE'] +7902-96591-0021-21: ref=['A', 'NARROW', 'TABLE', 'AGAINST', 'THE', 'WALL', 'IN', 'TWO', 'PLACES'] +7902-96591-0021-21: hyp=['A', 'NARROW', 'TABLE', 'AGAINST', 'THE', 'WALL', 'IN', 'TWO', 'PLACES'] +7902-96591-0022-22: ref=['HE', 'WENT', 'AND', 'TRIED', 'TO', 'FORCE', 'HIS', 'HEAD', 'THROUGH', 'RECALLING', 'AS', 'HE', 'DID', 'THAT', 'WHERE', 'A', "PERSON'S", 'HEAD', 'WOULD', 'GO', 'THE', 'REST', 'OF', 'THE', 'BODY', 'WOULD', 'PASS'] +7902-96591-0022-22: hyp=['HE', 'WENT', 'AND', 'TRIED', 'TO', 'FORCE', 'HIS', 'HEAD', 'THROUGH', 'RECALLING', 'AS', 'HE', 'DID', 'THAT', 'WHERE', 'A', "PERSON'S", 'HEAD', 'WOULD', 'GO', 'THE', 'REST', 'OF', 'THE', 'BODY', 'WOULD', 'PASS'] +7902-96591-0023-23: ref=['BUT', 'THERE', 'WAS', 'NO', 'CHANCE', 'FOR', 'HIS', 'BODY', 'THERE', 'THE', 'HEAD', 'WOULD', 'NOT', 'GO', 'FIRST'] +7902-96591-0023-23: hyp=['BUT', 'THERE', 'WAS', 'NO', 'CHANCE', 'FOR', 'HIS', 'BODY', 'THERE', 'THE', 'HEAD', 'WOULD', 'NOT', 'GO', 'FIRST'] +7902-96591-0024-24: ref=['A', 'FELLOW', 'WHO', 'WAS', 'SHUT', 'UP', 'IN', 'PRISON', 'FOR', 'LIFE', 'MIGHT', 'DO', 'IT', 'HE', 'SAID', 'BUT', 'NOT', 'IN', 'A', 'CASE', 'LIKE', 'THIS'] +7902-96591-0024-24: hyp=['A', 'FELLOW', 'WHO', 'WAS', 'SHUT', 'UP', 'IN', 'PRISONED', 'FOR', 'LIFE', 'MIGHT', 'DO', 'IT', 'HE', 'SAID', 'BUT', 'NOT', 'IN', 'A', 'CASE', 'LIKE', 'THIS'] +7902-96592-0000-25: ref=['SURE', "YOU'VE", 'LOOKED', 'ROUND', 'EVERYWHERE', 'BOY', 'YES', 'FATHER', 'QUITE'] +7902-96592-0000-25: hyp=['SURE', 'YOU', 'LOOK', 'ROUND', 'EVERYWHERE', 'BOY', 'YES', 'FATHER', 'QUITE'] +7902-96592-0001-26: ref=["I'M", 'GOING', 'HOME', 'TO', 'BREAKFAST'] +7902-96592-0001-26: hyp=["I'M", 'GOING', 'HOME', 'TO', 'BREAKFAST'] +7902-96592-0002-27: ref=['SHALL', 'I', 'COME', 'TOO', 'FATHER', 'NO'] +7902-96592-0002-27: hyp=['SHALL', 'I', 'COME', 'TO', 'FATHER', 'NO'] +7902-96592-0003-28: ref=['STOP', 'HERE', 'TILL', 'SIR', 'RISDON', 'COMES', 'DOWN', 'AND', 'TELL', 'HIM', "I'M", 'VERY', 'SORRY', 'THAT', 'WE', 'SHOULD', 'HAVE', 'CLEARED', 'OUT', 'LAST', 'NIGHT', 'ONLY', 'A', 'BORN', 'FOOL', 'SAW', 'JERRY', "NANDY'S", 'LOBSTER', 'BOAT', 'COMING', 'INTO', 'THE', 'COVE', 'AND', 'CAME', 'RUNNING', 'TO', 'SAY', 'IT', 'WAS', 'A', 'PARTY', 'FROM', 'THE', 'CUTTER', 'YES', 'FATHER'] +7902-96592-0003-28: hyp=['STOP', 'HERE', 'TILL', 'SIR', 'RISDON', 'COMES', 'DOWN', 'AND', 'TELL', 'HIM', "I'M", 'VERY', 'SORRY', 'THAT', 'WE', 'SHOULD', 'HAVE', 'CLEARED', 'OUT', 'LAST', 'NIGHT', 'ONLY', 'A', 'BORN', 'FOOL', 'SAW', 'JERRY', "ANDY'S", 'LOBSTERBOAT', 'COMING', 'INTO', 'THE', 'COVE', 'AND', 'CAME', 'RUNNING', 'TO', 'SAY', 'IT', 'WAS', 'A', 'PARTY', 'FROM', 'THE', 'CUTTER', 'YES', 'FATHER'] +7902-96592-0004-29: ref=['TELL', 'HIM', 'NOT', 'TO', 'BE', 'UNEASY', 'TIS', 'ALL', 'RIGHT', 'AND', "I'LL", 'HAVE', 'EVERYTHING', 'CLEAR', 'AWAY', 'TO', 'NIGHT'] +7902-96592-0004-29: hyp=['TELL', 'HIM', 'NOT', 'TO', 'BE', 'UNEASY', 'TIS', 'ALL', 'RIGHT', 'AND', "I'LL", 'HAVE', 'EVERYTHING', 'CLEAR', 'AWAY', 'TO', 'NIGHT'] +7902-96592-0005-30: ref=['THE', 'DULL', 'SOUND', 'OF', 'DEPARTING', 'STEPS', 'AND', 'A', 'LOW', 'WHISTLING', 'SOUND', 'COMING', 'DOWN', 'THROUGH', 'THE', 'SKYLIGHT', 'WINDOW', 'INTO', 'THE', 'CABIN', 'WHERE', 'ARCHY', 'RAYSTOKE', 'LAY', 'WITH', 'HIS', 'HEAVY', 'EYELIDS', 'PRESSED', 'DOWN', 'BY', 'SLEEP'] +7902-96592-0005-30: hyp=['THE', 'DULL', 'SOUND', 'OF', 'DEPARTING', 'STEPS', 'AND', 'A', 'LOW', 'WHISTLING', 'SOUND', 'COMING', 'DOWN', 'THROUGH', 'THE', 'SKYLIGHT', 'WINDOW', 'INTO', 'THE', 'CABIN', 'WHERE', 'ARCHIE', 'RAY', 'STROKE', 'LAY', 'WITH', 'HIS', 'HEAVY', 'EYELIDS', 'PRESSED', 'DOWN', 'BY', 'SLEEP'] +7902-96592-0006-31: ref=['WHAT', 'A', 'QUEER', 'DREAM', 'HE', 'THOUGHT', 'TO', 'HIMSELF'] +7902-96592-0006-31: hyp=['WHAT', 'A', 'QUEER', 'DREAM', 'HE', 'THOUGHT', 'TO', 'HIMSELF'] +7902-96592-0007-32: ref=['BUT', 'HOW', 'QUEER', 'FOR', 'MISTER', 'GURR', 'TO', 'BE', 'TALKING', 'LIKE', 'THAT', 'TO', 'ANDREW', 'TEAL', 'THE', 'BOY', 'WHO', 'HELPED', 'THE', 'COOK'] +7902-96592-0007-32: hyp=['BUT', 'HOW', 'QUEER', 'FOR', 'MISTER', 'GORE', 'TO', 'BE', 'TALKING', 'LIKE', 'THAT', 'TOIL', 'THE', 'BOY', 'WHO', 'HELPS', 'THE', 'COOK'] +7902-96592-0008-33: ref=['AND', 'WHY', 'DID', 'ANDY', 'CALL', 'MISTER', 'GURR', 'FATHER'] +7902-96592-0008-33: hyp=['AND', 'WHY', 'DID', 'ANDY', 'CALL', 'MISTER', 'GRAF'] +7902-96592-0009-34: ref=['THERE', 'WAS', 'AN', 'INTERVAL', 'OF', 'THINKING', 'OVER', 'THIS', 'KNOTTY', 'QUESTION', 'DURING', 'WHICH', 'THE', 'LOW', 'WHISTLING', 'WENT', 'ON'] +7902-96592-0009-34: hyp=['THERE', 'WAS', 'AN', 'INTERVAL', 'OF', 'THINKING', 'OVER', 'THIS', 'NAUGHTY', 'QUESTION', 'DURING', 'WHICH', 'THE', 'LOW', 'WHISTLING', 'WENT', 'ON'] +7902-96592-0010-35: ref=['AND', "I'M", 'HUNGRY', 'TOO', 'TIME', 'I', 'WAS', 'UP', 'I', 'SUPPOSE'] +7902-96592-0010-35: hyp=['AND', "I'M", 'HUNGRY', 'TOO', 'TIE', 'WAS', 'UP', 'I', 'SUPPOSE'] +7902-96592-0011-36: ref=['NO', 'HE', 'WAS', 'NOT', 'DREAMING', 'FOR', 'HE', 'WAS', 'LOOKING', 'OUT', 'ON', 'THE', 'SEA', 'OVER', 'WHICH', 'A', 'FAINT', 'MIST', 'HUNG', 'LIKE', 'WREATHS', 'OF', 'SMOKE'] +7902-96592-0011-36: hyp=['NO', 'HE', 'WAS', 'NOT', 'DREAMING', 'FOR', 'HE', 'WAS', 'LOOKING', 'OUT', 'ON', 'THE', 'SEA', 'OVER', 'WHICH', 'A', 'FAINT', 'MIST', 'HUNG', 'LIKE', 'WREATHS', 'OF', 'SMOKE'] +7902-96592-0012-37: ref=['WHAT', 'DID', 'THEY', 'SAY', 'FALSE', 'ALARM', 'TELL', 'SIR', 'RISDON', 'THEY', 'WOULD', 'CLEAR', 'ALL', 'AWAY', 'TO', 'NIGHT', 'SEE', 'IF', 'ANYTHING', 'HAD', 'BEEN', 'LEFT', 'ABOUT', 'LOBSTER', 'BOAT'] +7902-96592-0012-37: hyp=['WHAT', 'DID', 'THEY', 'SAY', 'FALSE', 'ALARM', 'TELL', 'SERVANTS', 'AND', 'THEY', 'WOULD', 'CLEAR', 'ALL', 'AWAY', 'TO', 'NIGHT', 'SEE', 'IF', 'ANYTHING', 'HAD', 'BEEN', 'LEFT', 'ABOUT', 'LOBSTER', 'WROTE'] +7902-96592-0013-38: ref=['ONCE', 'OUT', 'OF', 'THAT', 'ROOM', 'HE', 'COULD', 'RAN', 'AND', 'BY', 'DAYLIGHT', 'THE', 'SMUGGLERS', 'DARE', 'NOT', 'HUNT', 'HIM', 'DOWN'] +7902-96592-0013-38: hyp=['ONCE', 'OUT', 'OF', 'THAT', 'ROOM', 'HE', 'COULD', 'RAN', 'AND', 'BY', 'DAYLIGHT', 'THE', 'SMUGGERS', 'DARE', 'NOT', 'HUNT', 'HIM', 'DOWN'] +7902-96592-0014-39: ref=['OH', 'THOSE', 'BARS', 'HE', 'MENTALLY', 'EXCLAIMED', 'AND', 'HE', 'WAS', 'ADVANCING', 'TOWARD', 'THEM', 'WHEN', 'JUST', 'AS', 'HE', 'DREW', 'NEAR', 'THERE', 'WAS', 'A', 'RUSTLING', 'NOISE', 'UNDER', 'THE', 'WINDOW', 'A', 'COUPLE', 'OF', 'HANDS', 'SEIZED', 'THE', 'BARS', 'THERE', 'WAS', 'A', 'SCRATCHING', 'OF', 'BOOT', 'TOES', 'AGAINST', 'STONE', 'WORK', 'AND', "RAM'S", 'FACE', 'APPEARED', 'TO', 'GAZE', 'INTO', 'THE', 'ROOM', 'BY', 'INTENTION', 'BUT', 'INTO', 'THE', 'ASTONISHED', 'COUNTENANCE', 'OF', 'THE', 'YOUNG', 'MIDSHIPMAN', 'INSTEAD'] +7902-96592-0014-39: hyp=['OH', 'THOSE', 'BARS', 'HE', 'MENTALLY', 'EXCLAIMED', 'AND', 'HE', 'WAS', 'ADVANCING', 'TOWARDS', 'THEM', 'WHEN', 'JUST', 'AS', 'HE', 'DREW', 'NEAR', 'THERE', 'WAS', 'A', 'RUSTLING', 'NOISE', 'UNDER', 'THE', 'WINDOW', 'A', 'COUPLE', 'OF', 'HANDS', 'SEIZED', 'THE', 'BARS', 'THERE', 'WAS', 'A', 'SCRATCHING', 'OF', 'BOOT', 'TOES', 'AGAINST', 'STONE', 'WORK', 'AND', "RAM'S", 'FACE', 'APPEARED', 'TO', 'GAZE', 'INTO', 'THE', 'ROOM', 'BY', 'INTENTION', 'BUT', 'INTO', 'THE', 'ASTONISHED', 'COUNTENANCE', 'OF', 'THE', 'YOUNG', 'MITCHIPMAN', 'INSTEAD'] +7902-96592-0015-40: ref=['RAM', 'WAS', 'THE', 'FIRST', 'TO', 'RECOVER', 'FROM', 'HIS', 'SURPRISE'] +7902-96592-0015-40: hyp=['ROOM', 'WAS', 'THE', 'FIRST', 'TO', 'RECOVER', 'FROM', 'HIS', 'SURPRISE'] +7902-96592-0016-41: ref=['HULLO', 'HE', 'SAID', 'WHO', 'ARE', 'YOU'] +7902-96592-0016-41: hyp=['HULLO', 'HE', 'SAID', 'WHO', 'ARE', 'YOU'] +7902-96592-0017-42: ref=['GO', 'ROUND', 'AND', 'OPEN', 'THE', 'DOOR', 'I', 'WAS', 'SHUT', 'IN', 'LAST', 'NIGHT', 'BY', 'MISTAKE'] +7902-96592-0017-42: hyp=['GO', 'ROUND', 'AND', 'OPEN', 'THE', 'DOOR', 'I', 'WAS', 'SHUT', 'IN', 'LAST', 'NIGHT', 'BY', 'MISTAKE'] +7902-96592-0018-43: ref=['I', 'SAW', 'YOU', 'LAST', 'NIGHT', 'AND', 'WONDERED', 'WHOSE', 'BOY', 'YOU', 'WAS'] +7902-96592-0018-43: hyp=['I', 'SAW', 'YOU', 'LAST', 'NIGHT', 'AND', 'WONDERED', 'WHOSE', 'BOY', 'YOU', 'WAS'] +7902-96592-0019-44: ref=['IT', 'WAS', 'YOU', 'FATHER', 'KICKED', 'FOR', 'SHIRKING', 'AND', 'MY', 'WELL', 'I', 'HARDLY', 'KNOWED', 'YOU'] +7902-96592-0019-44: hyp=['IT', 'WAS', 'YOUR', 'FATHER', 'KICKED', 'FOR', 'SHIRKING', 'AND', 'MY', 'WELL', 'I', 'HARDLY', 'KNOWED', 'YOU'] +7902-96592-0020-45: ref=['NONSENSE'] +7902-96592-0020-45: hyp=['NONSENSE'] +7902-96592-0021-46: ref=["WON'T", 'DO', 'SAID', 'RAM', 'GRINNING'] +7902-96592-0021-46: hyp=["WON'T", 'DO', 'SAID', 'RAM', 'GRINNIE'] +7902-96592-0022-47: ref=['THINK', 'I', "DON'T", 'KNOW', 'YOU', 'MISTER', 'ORFICER'] +7902-96592-0022-47: hyp=['THINK', 'I', "DON'T", 'KNOW', 'YOU', 'MISTER', 'ORFICER'] +7902-96592-0023-48: ref=["WON'T", 'DO', 'SAID', 'RAM', 'QUICKLY', 'I', 'KNOW', 'YOU'] +7902-96592-0023-48: hyp=["WON'T", 'DO', 'SAID', 'RUM', 'QUICKLY', 'I', 'KNOW', 'YOU'] +7902-96592-0024-49: ref=['BEEN', 'PLAYING', 'THE', 'SPY', "THAT'S", 'WHAT', "YOU'VE", 'BEEN', 'DOING', 'WHO', 'LOCKED', 'YOU', 'IN'] +7902-96592-0024-49: hyp=['THEN', 'PLAYING', 'THE', 'SPY', "THAT'S", 'WHAT', "YOU'VE", 'BEEN', 'DOING', 'WHO', 'LOCKED', 'YOU', 'IN'] +7902-96592-0025-50: ref=['ARCHY', 'STEPPED', 'BACK', 'TO', 'THE', 'DOOR', 'LISTENING', 'BUT', 'THERE', 'WAS', 'NOT', 'A', 'SOUND'] +7902-96592-0025-50: hyp=['ARCHIE', 'STEPPED', 'BACK', 'TO', 'THE', 'DOOR', 'LISTENING', 'BUT', 'THERE', 'WAS', 'NOT', 'A', 'SOUND'] +7902-96592-0026-51: ref=['HE', 'HAS', 'GONE', 'TO', 'GIVE', 'THE', 'ALARM', 'THOUGHT', 'THE', 'PRISONER', 'AND', 'HE', 'LOOKED', 'EXCITEDLY', 'ROUND', 'FOR', 'A', 'WAY', 'OF', 'ESCAPE'] +7902-96592-0026-51: hyp=['HE', 'HAS', 'GONE', 'TO', 'GIVE', 'THE', 'ALARM', 'THOUGHT', 'THE', 'PRISONER', 'AND', 'HE', 'LOOKED', 'EXCITEDLY', 'ROUND', 'FOR', 'A', 'WAY', 'OF', 'ESCAPE'] +7902-96592-0027-52: ref=['NOTHING', 'BUT', 'THE', 'CHIMNEY', 'PRESENTED', 'ITSELF'] +7902-96592-0027-52: hyp=['NOTHING', 'BUT', 'THE', 'CHIMNEY', 'PRESENTED', 'ITSELF'] +7902-96592-0028-53: ref=['A', 'HAPPY', 'INSPIRATION', 'HAD', 'COME', 'AND', 'PLACING', 'ONE', 'HAND', 'UPON', 'HIS', 'BREAST', 'HE', 'THRUST', 'IN', 'THE', 'OTHER', 'GAVE', 'A', 'TUG', 'AND', 'DREW', 'OUT', 'HIS', 'LITTLE', 'CURVED', 'DIRK', 'GLANCED', 'AT', 'THE', 'EDGE', 'RAN', 'TO', 'THE', 'WINDOW', 'AND', 'BEGAN', 'TO', 'CUT', 'AT', 'ONE', 'OF', 'THE', 'BARS', 'LABOUR', 'IN', 'VAIN'] +7902-96592-0028-53: hyp=['A', 'HAPPY', 'INSPIRATION', 'HAD', 'COME', 'AND', 'PLACING', 'ONE', 'HAND', 'UPON', 'HIS', 'CHEST', 'HE', 'THRUST', 'IN', 'THE', 'OTHER', 'GAVE', 'A', 'TUG', 'AND', 'DREW', 'OUT', 'HIS', 'LITTLE', 'CURVED', 'DIRK', 'GLANCED', 'AT', 'THE', 'EDGE', 'RAN', 'TO', 'THE', 'WINDOW', 'AND', 'BEGAN', 'TO', 'CUT', 'AT', 'ONE', 'OF', 'THE', 'BARS', 'LABOR', 'IN', 'VAIN'] +7902-96592-0029-54: ref=['HE', 'DIVIDED', 'THE', 'PAINT', 'AND', 'PRODUCED', 'A', 'FEW', 'SQUEAKS', 'AND', 'GRATING', 'SOUNDS', 'AS', 'HE', 'REALISED', 'THAT', 'THE', 'ATTEMPT', 'WAS', 'MADNESS'] +7902-96592-0029-54: hyp=['HE', 'DIVIDED', 'THE', 'PAIN', 'AND', 'PRODUCED', 'A', 'FEW', 'SQUEAKS', 'IN', 'GRATING', 'SOUNDS', 'AS', 'HE', 'REALIZED', 'THAT', 'THE', 'ATTEMPT', 'WAS', 'MADNESS'] +7902-96592-0030-55: ref=['THE', 'RESULT', 'WAS', 'NOT', 'VERY', 'SATISFACTORY', 'BUT', 'SUFFICIENTLY', 'SO', 'TO', 'MAKE', 'HIM', 'ESSAY', 'THE', 'BAR', 'OF', 'THE', 'WINDOW', 'ONCE', 'MORE', 'PRODUCING', 'A', 'GRATING', 'EAR', 'ASSAILING', 'SOUND', 'AS', 'HE', 'FOUND', 'THAT', 'NOW', 'HE', 'DID', 'MAKE', 'A', 'LITTLE', 'IMPRESSION', 'SO', 'LITTLE', 'THOUGH', 'THAT', 'THE', 'PROBABILITY', 'WAS', 'IF', 'HE', 'KEPT', 'ON', 'WORKING', 'WELL', 'FOR', 'TWENTY', 'FOUR', 'HOURS', 'HE', 'WOULD', 'NOT', 'GET', 'THROUGH'] +7902-96592-0030-55: hyp=['THE', 'RESULT', 'WAS', 'NOT', 'VERY', 'SATISFACTORY', 'BUT', 'SUFFICIENTLY', 'SO', 'TO', 'MAKE', 'HIM', 'ESSAY', 'THE', 'BAR', 'OF', 'THE', 'WINDOW', 'ONCE', 'MORE', 'PRODUCING', 'A', 'GRATING', 'IRRESCELLING', 'SOUND', 'AS', 'HE', 'FOUND', 'THAT', 'NOW', 'HE', 'DID', 'MAKE', 'A', 'LITTLE', 'IMPRESSION', 'SO', 'LITTLE', 'THOUGH', 'THAT', 'THE', 'PROBABILITY', 'WAS', 'IF', 'HE', 'KEPT', 'ON', 'WORKING', 'WELL', 'FOR', 'TWENTY', 'FOUR', 'HOURS', 'HE', 'WOULD', 'NOT', 'GET', 'THROUGH'] +7902-96592-0031-56: ref=['BUT', 'AT', 'THE', 'END', 'OF', 'FIVE', 'MINUTES', 'HE', 'STOPPED', 'AND', 'THRUST', 'BACK', 'THE', 'DIRK', 'INTO', 'ITS', 'SHEATH'] +7902-96592-0031-56: hyp=['BUT', 'AT', 'THE', 'END', 'OF', 'FIVE', 'MINUTES', 'HE', 'STOPPED', 'AND', 'THRUST', 'BACK', 'THE', 'DARK', 'INTO', 'ITS', 'SHEATH'] +7902-96592-0032-57: ref=['NO', 'I', "CAN'T", 'PART', 'WITH', 'THAT', 'HA', 'HA', 'HA', 'LAUGHED', 'THE', 'BOY', 'JEERINGLY'] +7902-96592-0032-57: hyp=['NO', 'I', "CAN'T", 'PART', 'WITH', 'THAT', 'HA', 'HA', 'LAUGHED', 'THE', 'BOY', 'JEERINGLY'] +7902-96592-0033-58: ref=['BUT', "I'LL", 'YES', "I'LL", 'GIVE', 'YOU', 'A', 'GUINEA', 'IF', 'YOU', 'WILL', 'LET', 'ME', 'OUT'] +7902-96592-0033-58: hyp=['BLOW', 'YES', "I'LL", 'GIVE', 'YOU', 'A', 'GUINEA', 'IF', 'YOU', 'WILL', 'LET', 'ME', 'OUT'] +7902-96592-0034-59: ref=['GUINEA', 'SAID', 'THE', 'BOY', 'THINK', "I'D", 'DO', 'IT', 'FOR', 'A', 'GUINEA', 'WELL', 'THEN', 'TWO'] +7902-96592-0034-59: hyp=['GUINEAS', 'OF', 'THE', 'BOY', 'THINK', "I'LL", 'DO', 'IT', 'FOR', 'A', 'GUINEA', 'WELL', 'THEN', 'TOO'] +7902-96592-0035-60: ref=['BE', 'QUICK', "THERE'S", 'A', 'GOOD', 'FELLOW', 'I', 'WANT', 'TO', 'GET', 'AWAY', 'AT', 'ONCE'] +7902-96592-0035-60: hyp=['BE', 'QUICK', "THERE'S", 'A', 'GOOD', 'FELLOW', 'I', 'WANT', 'TO', 'GET', 'AWAY', 'AT', 'ONCE'] +7902-96592-0036-61: ref=['NOT', 'YOU', 'ONLY', 'A', 'SHAM'] +7902-96592-0036-61: hyp=['NOT', 'YOU', 'ONLY', 'A', 'SHAM'] +7902-96592-0037-62: ref=['WHY', 'YOUR', 'CLOTHES', "DON'T", 'FIT', 'YOU', 'AND', 'YOUR', "CAP'S", 'PUT', 'ON', 'ALL', 'SKEW', 'REW'] +7902-96592-0037-62: hyp=['WHY', "YOU'RE", 'CLOTHES', "DON'T", 'FIT', 'YOU', 'AND', 'YOUR', 'CAPS', 'PUT', 'ON', 'ALL', 'SKEERO'] +7902-96592-0038-63: ref=['NEVER', 'MIND', 'ABOUT', 'THAT', 'LET', 'ME', 'OUT', 'OF', 'THIS', 'PLACE'] +7902-96592-0038-63: hyp=['NEVER', 'MIND', 'ABOUT', 'THAT', 'LET', 'ME', 'OUT', 'OF', 'THIS', 'PLACE'] +7902-96592-0039-64: ref=['I', 'TOLD', 'YOU', 'A', 'FISHER', 'BOY', 'CRIED', 'ARCHY', 'IMPATIENTLY', 'BUT', 'TRYING', 'NOT', 'TO', 'OFFEND', 'HIS', 'VISITOR', 'WHO', 'POSSESSED', 'THE', 'POWER', 'OF', 'CONFERRING', 'FREEDOM', 'BY', 'SPEAKING', 'SHARPLY'] +7902-96592-0039-64: hyp=['I', 'TOLD', 'YOU', 'A', 'FISHER', 'BOY', 'CRIED', 'ARCHIE', 'IMPATIENTLY', 'BUT', 'TRYING', 'NOT', 'TO', 'OFFEND', 'HIS', 'VISITOR', 'WHO', 'POSSESSED', 'THE', 'POWER', 'OF', 'CONFERRING', 'FREEDOM', 'BY', 'SPEAKING', 'SHARPLY'] +7902-96592-0040-65: ref=['NOT', 'YOU', 'LOOK', 'LIKE', 'A', 'WILD', 'BEAST', 'IN', 'A', 'CAGE', 'LIKE', 'A', 'MONKEY', 'YOU', 'INSOLENT'] +7902-96592-0040-65: hyp=['NOT', 'YOU', 'LOOK', 'LIKE', 'A', 'WILD', 'BEAST', 'IN', 'A', 'CAGE', 'LIKE', 'A', 'MONKEY', 'YOU', 'INSOLENT'] +7902-96592-0041-66: ref=['ARCHY', 'CHECKED', 'HIMSELF', 'AND', 'THE', 'BOY', 'LAUGHED'] +7902-96592-0041-66: hyp=['ARCHIE', 'CHECKED', 'HIMSELF', 'IN', 'THE', 'BOY', 'LAUGHED'] +7902-96592-0042-67: ref=['IT', 'WAS', 'YOUR', 'TURN', 'YESTERDAY', "IT'S", 'MINE', 'TO', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0042-67: hyp=['IT', 'WAS', 'YOUR', 'TURN', 'YESTERDAY', "IT'S", 'MINE', 'TO', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0043-68: ref=['YOU', 'LAUGHED', 'AND', 'FLEERED', 'AT', 'ME', 'WHEN', 'I', 'WAS', 'ON', 'THE', "CUTTER'S", 'DECK'] +7902-96592-0043-68: hyp=['YOU', 'LAUGHED', 'AND', 'FLEERED', 'AT', 'ME', 'WHEN', 'I', 'WAS', 'ON', 'THE', "CUTTER'S", 'DECK'] +7902-96592-0044-69: ref=['I', 'SAY', 'YOU', 'DO', 'LOOK', 'A', 'RUM', 'UN', 'JUST', 'LIKE', 'A', 'BIG', 'MONKEY', 'IN', 'A', 'SHOW'] +7902-96592-0044-69: hyp=['I', 'SAY', 'YOU', 'DO', 'LOOK', 'LIKE', 'A', 'ROMAN', 'JUST', 'LIKE', 'A', 'BIG', 'MONKEY', 'IN', 'A', 'SHOW'] +7902-96592-0045-70: ref=['RAM', 'SHOWED', 'HIS', 'WHITE', 'TEETH', 'AS', 'HE', 'BURST', 'OUT', 'WITH', 'A', 'LONG', 'LOW', 'FIT', 'OF', 'LAUGHTER'] +7902-96592-0045-70: hyp=['RAM', 'SHOWED', 'HIS', 'WHITE', 'TEETH', 'AS', 'HE', 'BURST', 'OUT', 'WITH', 'A', 'LONG', 'LOW', 'FIT', 'OF', 'LAUGHTER'] +7902-96592-0046-71: ref=['YOU', "ROPE'S", 'END', 'ME', 'HE', 'SAID'] +7902-96592-0046-71: hyp=['EUPS', 'AND', 'ME', 'HE', 'SAID'] +7902-96592-0047-72: ref=['WHY', 'I', 'COULD', 'TIE', 'YOU', 'UP', 'IN', 'A', 'KNOT', 'AND', 'HEAVE', 'YOU', 'OFF', 'THE', 'CLIFF', 'ANY', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0047-72: hyp=['WHY', 'I', 'COULD', 'TIE', 'YOU', 'UP', 'IN', 'A', 'KNOT', 'AND', 'HEAVE', 'YOU', 'OFF', 'THE', 'CLIFF', 'ANY', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0048-73: ref=['BIT', 'OF', 'A', 'MIDDY', 'FED', 'ON', 'SALT', 'TACK', 'AND', 'WEEVILLY', 'BISCUIT', 'TALK', 'OF', 'GIVING', 'ME', "ROPE'S", 'END'] +7902-96592-0048-73: hyp=['BIT', 'OF', 'A', 'MIDDY', 'FED', 'ON', 'A', 'SALT', 'TACK', 'AND', 'WEEBLY', 'BISCUIT', 'TALK', 'OF', 'GIVING', 'ME', 'ROPES', 'AND'] +7902-96592-0049-74: ref=['ONCE', 'MORE', 'WILL', 'YOU', 'COME', 'AND', 'LET', 'ME', 'OUT', 'NO'] +7902-96592-0049-74: hyp=['ONCE', 'MORE', 'WILL', 'YOU', 'COME', 'AND', 'LET', 'ME', 'OUT', 'NO'] +7902-96592-0050-75: ref=['TO', 'HIS', 'ASTONISHMENT', 'THE', 'BOY', 'DID', 'NOT', 'FLINCH', 'BUT', 'THRUST', 'HIS', 'OWN', 'ARMS', 'THROUGH', 'PLACING', 'THEM', 'ABOUT', 'THE', "MIDDY'S", 'WAIST', 'CLENCHING', 'HIS', 'HANDS', 'BEHIND', 'AND', 'UTTERING', 'A', 'SHARP', 'WHISTLE'] +7902-96592-0050-75: hyp=['TO', 'HIS', 'ASTONISHMENT', 'THE', 'BOY', 'DID', 'NOT', 'FLINCH', 'BUT', 'THRUST', 'HIS', 'OWN', 'ARMS', 'THROUGH', 'REPLACING', 'THEM', 'ABOUT', 'THE', "MIDDY'S", 'WAIST', 'CLENCHING', 'HIS', 'HAND', 'BEHIND', 'AND', 'UTTERING', 'A', 'SHARP', 'WHISTLE'] +7902-96594-0000-76: ref=['SEEMED', 'IN', 'GOOD', 'SPIRITS', 'LAST', 'NIGHT', 'MISTER', 'GURR', 'EH'] +7902-96594-0000-76: hyp=['SEEMING', 'EXPERIENCE', 'LAST', 'NIGHT', 'MISTER', 'GURR', 'HE'] +7902-96594-0001-77: ref=['YES', 'SIR', 'BUT', 'HE', 'MAY', 'TURN', 'UP', 'ON', 'THE', 'CLIFF', 'AT', 'ANY', 'MOMENT'] +7902-96594-0001-77: hyp=['YES', 'SIR', 'BUT', 'HE', 'MAY', 'TURN', 'UPON', 'THE', 'CLIFF', 'AT', 'ANY', 'MOMENT'] +7902-96594-0002-78: ref=['YES', 'MEN', 'QUITE', 'READY', 'YES', 'SIR'] +7902-96594-0002-78: hyp=['YES', 'MEN', 'QUITE', 'READY', 'YES', 'SIR'] +7902-96594-0003-79: ref=["THAT'S", 'RIGHT', 'OF', 'COURSE', 'WELL', 'ARMED'] +7902-96594-0003-79: hyp=["IT'S", 'RIGHT', 'OF', 'COURSE', 'WELL', 'ARMED'] +7902-96594-0004-80: ref=['SOON', 'AS', 'THE', 'SIGNAL', 'COMES', 'WE', 'SHALL', 'PUSH', 'OFF'] +7902-96594-0004-80: hyp=['SOON', 'AS', 'THE', 'SIGNAL', 'COMES', 'WE', 'SHALL', 'PUSH', 'OFF'] +7902-96594-0005-81: ref=['AWKWARD', 'BIT', 'O', 'COUNTRY', 'SIR', 'SIX', 'MILES', 'ROW', 'BEFORE', 'YOU', 'CAN', 'FIND', 'A', 'PLACE', 'TO', 'LAND'] +7902-96594-0005-81: hyp=['OF', 'HER', 'BIT', 'OF', 'COUNTRY', 'SIR', 'SIX', 'MILES', 'ROW', 'FOR', 'YOU', 'CAN', 'FIND', 'A', 'PLACE', 'TO', 'LAND'] +7902-96594-0006-82: ref=['SO', 'SHALL', 'WE', 'YET', 'SIR'] +7902-96594-0006-82: hyp=['SO', 'SHALL', 'WE', 'GET', 'SIR'] +7902-96594-0007-83: ref=['YOU', "DON'T", 'THINK', 'MISTER', 'GURR', 'THAT', 'THEY', 'WOULD', 'DARE', 'TO', 'INJURE', 'HIM', 'IF', 'HE', 'WAS', 'SO', 'UNLUCKY', 'AS', 'TO', 'BE', 'CAUGHT'] +7902-96594-0007-83: hyp=['YOU', "DON'T", 'THINK', 'MISTER', 'GIRL', 'THAT', 'THEY', 'WOULD', 'DARE', 'TO', 'INJURE', 'HIM', 'IF', 'HE', 'WAS', 'SO', 'UNLUCKY', 'AS', 'TO', 'BE', 'CAUGHT'] +7902-96594-0008-84: ref=['WELL', 'SIR', 'SAID', 'THE', 'MASTER', 'HESITATING', 'SMUGGLERS', 'ARE', 'SMUGGLERS'] +7902-96594-0008-84: hyp=['WELL', 'SIR', 'SAID', 'THE', 'MASTER', 'HESITATING', 'SMUGGLERS', 'OR', 'SMUGGLERS'] +7902-96594-0009-85: ref=['CERTAINLY', 'SIR', 'SMUGGLERS', 'ARE', 'SMUGGLERS', 'INDEED'] +7902-96594-0009-85: hyp=['CERTAINLY', 'SIR', 'SMUGGLERS', 'ARE', 'SMUGGLERS', 'INDEED'] +7902-96594-0010-86: ref=['BEG', 'PARDON', 'SIR', "DIDN'T", 'MEAN', 'ANY', 'HARM'] +7902-96594-0010-86: hyp=['THEY', 'PARDON', 'SIR', "DIDN'T", 'MEAN', 'ANY', 'HARM'] +7902-96594-0011-87: ref=["I'M", 'GETTING', 'VERY', 'ANXIOUS', 'ABOUT', 'MISTER', 'RAYSTOKE', 'START', 'AT', 'ONCE', 'SIR'] +7902-96594-0011-87: hyp=["I'M", 'GETTING', 'VERY', 'ANXIOUS', 'ABOUT', 'MISTER', 'RAYSTROKE', 'START', 'AT', 'ONCE', 'SIR'] +7902-96594-0012-88: ref=['NO', 'WAIT', 'ANOTHER', 'HALF', 'HOUR'] +7902-96594-0012-88: hyp=['NO', 'WHERE', 'ANOTHER', 'HALF', 'HOUR'] +7902-96594-0013-89: ref=['VERY', 'ILL', 'ADVISED', 'THING', 'TO', 'DO'] +7902-96594-0013-89: hyp=['VERY', 'ILL', 'ADVICE', 'THING', 'TO', 'DO'] +7902-96594-0014-90: ref=['THEN', 'I', 'MUST', 'REQUEST', 'THAT', 'YOU', 'WILL', 'NOT', 'MAKE', 'IT', 'AGAIN', 'VERY', 'TRUE'] +7902-96594-0014-90: hyp=['THAT', 'I', 'MUST', 'REQUEST', 'THAT', 'YOU', 'WILL', 'NOT', 'MAKE', 'IT', 'AGAIN', 'VERY', 'TRUE'] +7902-96594-0015-91: ref=['AWK', 'WARD', 'MISTER', 'GURR', 'AWKWARD'] +7902-96594-0015-91: hyp=['AWKWARD', 'MISTER', 'GARR', 'AWKWARD'] +7902-96594-0016-92: ref=['YES', 'SIR', 'OF', 'COURSE'] +7902-96594-0016-92: hyp=['YES', 'SIR', 'OF', 'COURSE'] +7902-96594-0017-93: ref=['SAY', 'AWK', 'WARD', 'IN', 'FUTURE', 'NOT', "AWK'ARD"] +7902-96594-0017-93: hyp=['SAY', 'AWKWARD', 'IN', 'THE', 'FUTURE', 'NOT', 'UPWARD'] +7902-96594-0018-94: ref=['I', 'MEAN', 'ALL', 'ALONE', 'BY', 'MYSELF', 'SIR'] +7902-96594-0018-94: hyp=['I', 'MEAN', 'ALL', 'ALONE', 'BY', 'MYSELF', 'SIR'] +7902-96594-0019-95: ref=['WHAT', 'FOR', 'THERE', "AREN'T", 'A', 'PUBLIC', 'HOUSE', 'FOR', 'TEN', 'MILES', "DIDN'T", 'MEAN', 'THAT'] +7902-96594-0019-95: hyp=['WHAT', 'FOR', 'THERE', "AREN'T", 'A', 'PUBLIC', 'HOUSE', 'FOR', 'TEN', 'MILES', "DIDN'T", 'MEAN', 'THAT'] +7902-96594-0020-96: ref=['THEN', 'WHAT', 'DID', 'YOU', 'MEAN', 'SPEAK', 'OUT', 'AND', "DON'T", 'DO', 'THE', 'DOUBLE', 'SHUFFLE', 'ALL', 'OVER', 'MY', 'CLEAN', 'DECK', 'NO', 'SIR'] +7902-96594-0020-96: hyp=['THEN', 'WHAT', 'DID', 'JULIA', 'SPEAK', 'OUT', 'AND', "DON'T", 'DO', 'THE', 'DOUBLE', 'SHOVEL', 'ALL', 'OVER', 'MY', 'CLEAN', 'DECK', 'NO', 'SIR'] +7902-96594-0021-97: ref=['HOPPING', 'ABOUT', 'LIKE', 'A', 'CAT', 'ON', 'HOT', 'BRICKS'] +7902-96594-0021-97: hyp=['HAVING', 'ABOUT', 'THE', 'GUQUET', 'ON', 'HOT', 'BRICKS'] +7902-96594-0022-98: ref=['NOW', 'THEN', 'WHY', 'DO', 'YOU', 'WANT', 'TO', 'GO', 'ASHORE'] +7902-96594-0022-98: hyp=['NOW', 'THEN', 'WHY', 'DO', 'YOU', 'WANT', 'TO', 'GO', 'SHORE'] +7902-96594-0023-99: ref=['BEG', 'PARDON', "DIDN'T", 'MEAN', 'NOWT', 'SIR', 'SAID', 'THE', 'SAILOR', 'TOUCHING', 'HIS', 'FORELOCK'] +7902-96594-0023-99: hyp=['THEY', 'PARDON', "DIDN'T", 'MEAN', 'THAT', 'SIR', 'SAID', 'THE', 'SAILOR', 'TOUCHING', 'HIS', 'FORELOCK'] +7902-96594-0024-100: ref=['YES', 'SIR', 'SAID', 'THE', 'MAN', 'HUMBLY', 'SHALL', 'I', 'GO', 'AT', 'ONCE', 'SIR'] +7902-96594-0024-100: hyp=['YES', 'SIR', 'SAID', 'THE', 'MADAMELY', 'SHALL', 'I', 'GO', 'AT', 'ONCE', 'SIR'] +7902-96594-0025-101: ref=['NO', 'WAIT'] +7902-96594-0025-101: hyp=['NO', 'WAIT'] +7902-96594-0026-102: ref=['KEEP', 'A', 'SHARP', 'LOOK', 'OUT', 'ON', 'THE', 'CLIFF', 'TO', 'SEE', 'IF', 'MISTER', 'RAYSTOKE', 'IS', 'MAKING', 'SIGNALS', 'FOR', 'A', 'BOAT'] +7902-96594-0026-102: hyp=['HE', 'WAS', 'SHARP', 'LOOK', 'OUT', 'ON', 'THE', 'CLIFF', 'AS', 'EVEN', 'MISTER', 'RACE', 'JOKE', 'IS', 'MAKING', 'SIGNALS', 'FOR', 'A', 'BOAT'] +7902-96594-0027-103: ref=['HE', 'SWUNG', 'ROUND', 'WALKED', 'AFT', 'AND', 'BEGAN', 'SWEEPING', 'THE', 'SHORE', 'AGAIN', 'WITH', 'HIS', 'GLASS', 'WHILE', 'THE', 'MASTER', 'AND', 'DICK', 'EXCHANGED', 'GLANCES', 'WHICH', 'MEANT', 'A', 'GREAT', 'DEAL'] +7902-96594-0027-103: hyp=['HE', 'SWUNG', 'ROUND', 'WALKED', 'AFT', 'AND', 'BEGAN', 'SWEEPING', 'ASHORE', 'AGAIN', 'WITH', 'HIS', 'GLASS', 'WHILE', 'THE', 'MASTER', 'AND', 'DICK', 'EXCHANGED', 'GLANCES', 'WHICH', 'MEANT', 'A', 'GREAT', 'DEAL'] +7902-96594-0028-104: ref=['AT', 'LAST', 'THE', 'LITTLE', 'LIEUTENANT', 'COULD', 'BEAR', 'THE', 'ANXIETY', 'NO', 'LONGER'] +7902-96594-0028-104: hyp=['AT', 'LAST', 'THE', 'LITTLE', 'TANNIC', 'COULD', 'BEAR', 'THE', 'ANXIETY', 'NO', 'LONGER'] +7902-96594-0029-105: ref=['PIPE', 'AWAY', 'THE', 'MEN', 'TO', 'THAT', 'BOAT', 'THERE', 'HE', 'SAID', 'AND', 'AS', 'THE', 'CREW', 'SPRANG', 'IN'] +7902-96594-0029-105: hyp=['PEP', 'AWAY', 'THE', 'MEN', 'TO', 'THAT', 'BOAT', 'THERE', 'HE', 'SAID', 'AND', 'AS', 'THE', 'CREW', 'SPRANG', 'IN'] +7902-96594-0030-106: ref=['NOW', 'MISTER', 'GURR', 'HE', 'SAID', "I'M", 'ONLY', 'GOING', 'TO', 'SAY', 'ONE', 'THING', 'TO', 'YOU', 'IN', 'THE', 'WAY', 'OF', 'INSTRUCTIONS', 'YES', 'SIR'] +7902-96594-0030-106: hyp=['NOW', 'MISTER', 'GURG', 'HE', 'SAID', "I'M", 'ONLY', 'GOING', 'TO', 'SAY', 'ONE', 'THING', 'TO', 'YOU', 'IN', 'THE', 'WAY', 'OF', 'INSTRUCTIONS', 'YES', 'SIR'] +7902-96594-0031-107: ref=['BEG', 'PARDON', 'SIR', 'SAID', 'THE', 'MASTER', 'DEPRECATINGLY'] +7902-96594-0031-107: hyp=['BEG', 'PARDON', 'SIR', 'SAID', 'THE', 'MASTER', 'DEPRECATINGLY'] +7902-96594-0032-108: ref=['STEADY', 'MY', 'LADS', 'STEADY', 'CRIED', 'THE', 'MASTER', 'KEEP', 'STROKE', 'AND', 'THEN', 'HE', 'BEGAN', 'TO', 'MAKE', 'PLANS', 'AS', 'TO', 'HIS', 'FIRST', 'PROCEEDINGS', 'ON', 'GETTING', 'ASHORE'] +7902-96594-0032-108: hyp=['STEADY', 'MY', 'LAD', 'STEADY', 'CRIED', 'THE', 'MASTER', 'KEEP', 'STROKE', 'AND', 'THEN', 'HE', 'BEGAN', 'TO', 'MAKE', 'PLANS', 'AS', 'TO', 'HIS', 'FIRST', 'PROCEEDINGS', "I'M", 'GETTING', 'ASHORE'] +7902-96595-0000-109: ref=['SAY', 'MESTER', 'GURR', 'SAID', 'DICK', 'AFTER', 'ONE', 'OF', 'THESE', 'SEARCHES', 'HE', "WOULDN'T", 'RUN', 'AWAY', 'WHAT'] +7902-96595-0000-109: hyp=['SAY', 'MISTER', 'GIRK', 'SAID', 'DICK', 'AFTER', 'ONE', 'OF', 'THESE', 'SEARCHES', 'HE', "WOULDN'T", 'RUN', 'AWAY', 'WHAT'] +7902-96595-0001-110: ref=['MISTER', 'RAYSTOKE', 'SIR', "DON'T", 'BE', 'A', 'FOOL'] +7902-96595-0001-110: hyp=['MISTER', 'RAYSTOKE', 'SIR', "DON'T", 'BE', 'A', 'FOOL'] +7902-96595-0002-111: ref=['WHAT', 'CHUCKED', 'HIM', 'OFF', 'YONDER'] +7902-96595-0002-111: hyp=['WHAT', 'TECHTAMORPHYANDER'] +7902-96595-0003-112: ref=['GURR', 'GLANCED', 'ROUND', 'TO', 'SEE', 'IF', 'THE', 'MEN', 'WERE', 'LOOKING', 'AND', 'THEN', 'SAID', 'RATHER', 'HUSKILY', 'BUT', 'KINDLY'] +7902-96595-0003-112: hyp=['GER', 'GLANCED', 'ROUND', 'TO', 'SEE', 'IF', 'THE', 'MEN', 'WERE', 'LOOKING', 'AND', 'THEN', 'SAID', 'WHETHER', 'HUSKILY', 'BE', 'KINDLY'] +7902-96595-0004-113: ref=['AH', 'EJACULATED', 'DICK', 'SADLY'] +7902-96595-0004-113: hyp=['AH', 'EJACULATED', 'DICK', 'SADLY'] +7902-96595-0005-114: ref=['SAY', 'MESTER', 'GURR', 'SIR', 'WHICH', 'THANKFUL', 'I', 'AM', 'TO', 'YOU', 'FOR', 'SPEAKING', 'SO', 'BUT', 'YOU', "DON'T", 'REALLY', 'THINK', 'AS', 'HE', 'HAS', 'COME', 'TO', 'HARM'] +7902-96595-0005-114: hyp=['SAY', 'MISTER', 'GURSER', 'WHICH', 'THANKFUL', 'I', 'AM', 'FOR', 'YOU', 'FOR', 'SPEAKING', 'SO', 'BUT', 'YOU', "DON'T", 'THINK', 'AS', 'HE', 'HAS', 'COME', 'TO', 'HARM'] +7902-96595-0006-115: ref=['I', 'HOPE', 'NOT', 'DICK', 'I', 'HOPE', 'NOT', 'BUT', 'SMUGGLERS', "DON'T", 'STAND', 'AT', 'ANYTHING', 'SOMETIMES'] +7902-96595-0006-115: hyp=['I', 'HOPE', 'NOT', 'DICK', 'I', 'HOPE', 'NOT', 'BUT', 'SMOKE', 'WAS', "DON'T", 'STAND', 'AT', 'ANYTHING', 'SOMETIMES'] +7902-96595-0007-116: ref=['I', 'DO', 'ASSURE', 'YOU', "THERE'S", 'NOTHING', 'HERE', 'BUT', 'WHAT', 'YOU', 'MAY', 'SEE'] +7902-96595-0007-116: hyp=['I', 'DO', 'ASSURE', 'YOU', "THERE'S", 'NOTHING', 'HERE', 'BUT', 'WHAT', 'YOU', 'MAY', 'SEE'] +7902-96595-0008-117: ref=['IF', "YOU'D", 'LET', 'ME', 'FINISH', "YOU'D", 'KNOW', 'SAID', 'GURR', 'GRUFFLY', 'ONE', 'OF', 'OUR', 'BOYS', 'IS', 'MISSING', 'SEEN', 'HIM', 'UP', 'HERE'] +7902-96595-0008-117: hyp=['IF', 'YOU', 'LET', 'ME', 'FINISH', "YOU'D", 'KNOW', 'SAID', 'GURG', 'ROUGHLY', 'ONE', 'OF', 'OUR', 'BOYS', 'IS', 'MISSING', 'SEEN', 'EM', 'UP', 'HERE'] +7902-96595-0009-118: ref=['BOY', 'BOUT', 'SEVENTEEN', 'WITH', 'A', 'RED', 'CAP', 'NO', 'SIR', 'INDEED', "I'VE", 'NOT'] +7902-96595-0009-118: hyp=['BOY', 'ABOUT', 'SEVENTEEN', 'WITH', 'A', 'RED', 'CAP', 'NO', 'SIR', 'INDEED', 'OF', 'NONE'] +7902-96595-0010-119: ref=["DON'T", 'KNOW', 'AS', 'HE', 'HAS', 'BEEN', 'SEEN', 'ABOUT', 'HERE', 'DO', 'YOU', 'SAID', 'GURR', 'LOOKING', 'AT', 'HER', 'SEARCHINGLY', 'NO', 'SIR'] +7902-96595-0010-119: hyp=["DON'T", 'KNOW', 'AS', 'HE', 'HAS', 'BEEN', 'SEEN', 'ABOUT', 'HERE', 'DO', 'YOU', 'SAID', 'GIRL', 'LOOKING', 'AT', 'HER', 'SEARCHINGLY', 'NO', 'SIR'] +7902-96595-0011-120: ref=['IF', 'SHE', 'KNEW', 'EVIL', 'HAD', 'COME', 'TO', 'THE', 'POOR', 'LAD', 'HER', 'FACE', 'WOULD', 'TELL', 'TALES', 'LIKE', 'PRINT'] +7902-96595-0011-120: hyp=['IF', 'SHE', 'KNEW', 'EVIL', 'HAD', 'COME', 'TO', 'THE', 'POOR', 'LAD', 'HER', 'FACE', 'WOULD', 'TELL', 'TALES', 'LIKE', 'PRINT'] +7902-96595-0012-121: ref=['I', 'SAID', 'A', 'LAD', 'BOUT', 'SEVENTEEN', 'IN', 'A', 'RED', 'CAP', 'LIKE', 'YOURS', 'SAID', 'GURR', 'VERY', 'SHORTLY'] +7902-96595-0012-121: hyp=['I', 'SAID', 'A', 'LAD', 'ABOUT', 'SEVENTEEN', 'AND', 'A', 'RED', 'CATHOLIC', 'YOURS', 'SAID', 'GREW', 'VERY', 'SHORTLY'] +7902-96595-0013-122: ref=['THE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'AND', 'STARED', 'AS', 'IF', 'HE', "DIDN'T", 'HALF', 'UNDERSTAND', 'THE', 'DRIFT', 'OF', 'WHAT', 'WAS', 'SAID'] +7902-96595-0013-122: hyp=['THE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'AND', 'STARED', 'AS', 'IF', 'HE', "DIDN'T", 'HALF', 'UNDERSTAND', 'THE', 'DRIFT', 'OF', 'OGA', 'SAID'] +7902-96595-0014-123: ref=['HERE', 'MY', 'LAD', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0014-123: hyp=['HERE', 'MILAD', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0015-124: ref=['EH', 'I', 'SAY', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0015-124: hyp=['THEY', 'I', 'SAY', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0016-125: ref=['GURR', 'TURNED', 'AWAY', 'IMPATIENTLY', 'AGAIN', 'AND', 'SIGNING', 'TO', 'HIS', 'MEN', 'TO', 'FOLLOW', 'THEY', 'ALL', 'BEGAN', 'TO', 'TRAMP', 'UP', 'THE', 'STEEP', 'TRACK', 'LEADING', 'TOWARD', 'THE', 'HOZE', 'WITH', 'THE', 'RABBITS', 'SCUTTLING', 'AWAY', 'AMONG', 'THE', 'FURZE', 'AND', 'SHOWING', 'THEIR', 'WHITE', 'COTTONY', 'TAILS', 'FOR', 'A', 'MOMENT', 'AS', 'THEY', 'DARTED', 'DOWN', 'INTO', 'THEIR', 'HOLES'] +7902-96595-0016-125: hyp=['GERT', 'TURNED', 'AWAY', 'IMPATIENTLY', 'AGAIN', 'AND', 'SIGNING', 'TO', 'HIS', 'MEN', 'TO', 'FOLLOW', 'THEY', 'ALL', 'BEGAN', 'TO', 'TRAMP', 'UP', 'THE', 'STEEP', 'TRACK', 'LEADING', 'TOWARD', 'THE', 'HOSE', 'WITH', 'THE', 'RABBIT', 'SCUTTLING', 'AWAY', 'AMONG', 'THE', 'FIRS', 'AND', 'SHOWING', 'THEIR', 'WHITE', 'COTTONY', 'TAILS', 'FOR', 'A', 'MOMENT', 'AS', 'THEY', 'DARTED', 'DOWN', 'INTO', 'THEIR', 'HOLES'] +7902-96595-0017-126: ref=['I', 'DUNNO', 'MUTTERED', 'DICK', 'AND', 'A', 'MAN', "CAN'T", 'BE', 'SURE'] +7902-96595-0017-126: hyp=['I', 'DUNNO', 'MUTTERED', 'DICK', 'AND', 'A', 'MEN', "CAN'T", 'BE', 'SURE'] +7902-96595-0018-127: ref=['GURR', 'SALUTED', 'AND', 'STATED', 'HIS', 'BUSINESS', 'WHILE', 'THE', 'BARONET', 'WHO', 'HAD', 'TURNED', 'SALLOWER', 'AND', 'MORE', 'CAREWORN', 'THAN', 'HIS', 'LOT', 'DREW', 'A', 'BREATH', 'FULL', 'OF', 'RELIEF', 'ONE', 'OF', 'YOUR', 'SHIP', 'BOYS', 'HE', 'SAID'] +7902-96595-0018-127: hyp=['DUR', 'SALUTED', 'INSTEAD', 'OF', 'HIS', 'BUSINESS', 'WHILE', 'THE', 'BARONET', 'WHO', 'HAD', 'TURNED', 'SALARY', 'AND', 'MORE', 'CARE', 'MORE', 'THAN', 'HIS', 'LOT', 'DREW', 'A', 'BREATH', 'OF', 'FULL', 'OF', 'RELIEF', 'ONE', 'OF', 'YOUR', 'VOYS', 'HE', 'SAID'] +7902-96595-0019-128: ref=['A', 'LAD', 'LOOKING', 'LIKE', 'A', 'COMMON', 'SAILOR', 'AND', 'WEARING', 'A', 'RED', 'CAP', 'NO', 'SAID', 'SIR', 'RISDON'] +7902-96595-0019-128: hyp=['A', 'LAD', 'LOOKING', 'LIKE', 'A', 'COMMON', 'SAILOR', 'AND', 'WEARING', 'A', 'RED', 'CAP', 'NO', 'SAID', 'SIR', 'RISDON'] +7902-96595-0020-129: ref=['I', 'HAVE', 'SEEN', 'NO', 'ONE', 'ANSWERING', 'TO', 'THE', 'DESCRIPTION', 'HERE'] +7902-96595-0020-129: hyp=['I', 'HAVE', 'SEEN', 'NO', 'ONE', 'ANSWERING', 'TO', 'THE', 'DESCRIPTION', 'HERE'] +7902-96595-0021-130: ref=['BEG', 'PARDON', 'SIR', 'BUT', 'CAN', 'YOU', 'AS', 'A', 'GENTLEMAN', 'ASSURE', 'ME', 'THAT', 'HE', 'IS', 'NOT', 'HERE', 'CERTAINLY', 'SAID', 'SIR', 'RISDON'] +7902-96595-0021-130: hyp=['BIG', 'PARTICER', 'BECAUSE', 'YOU', 'AS', 'GENTLEMEN', 'ASSURE', 'ME', 'THAT', 'HE', 'IS', 'NOT', 'HERE', 'CERTAINLY', 'SAID', 'SIR', 'RISDON'] +7902-96595-0022-131: ref=['SURELY', 'CRIED', 'SIR', 'RISDON', 'EXCITEDLY'] +7902-96595-0022-131: hyp=['SURELY', 'CRIED', 'SIR', 'RISDON', 'EXCITEDLY'] +7902-96595-0023-132: ref=['SIR', 'RISDON', 'WAS', 'SILENT'] +7902-96595-0023-132: hyp=['SIR', 'RICHARD', 'WAS', 'SILENT'] +7902-96595-0024-133: ref=['LADY', 'GRAEME', 'LOOKED', 'GHASTLY'] +7902-96595-0024-133: hyp=['LADY', 'GRAHAM', 'LOOKED', 'GHASTLY'] +7902-96595-0025-134: ref=['YOU', 'DO', 'NOT', 'KNOW', 'NO'] +7902-96595-0025-134: hyp=['YOU', 'DO', 'NOT', 'KNOW', 'NO'] +7975-280057-0000-1008: ref=['THESE', 'HATREDS', 'WERE', 'SOON', 'TO', 'MAKE', 'TROUBLE', 'FOR', 'ME', 'OF', 'WHICH', 'I', 'HAD', 'NEVER', 'DREAMED'] +7975-280057-0000-1008: hyp=['THESE', 'HATREDS', 'WERE', 'SOON', 'TO', 'MAKE', 'TROUBLE', 'FOR', 'ME', 'OF', 'WHICH', 'I', 'HAD', 'NEVER', 'DREAMED'] +7975-280057-0001-1009: ref=['HENRY', 'WASHINGTON', 'YOUNGER', 'MY', 'FATHER', 'REPRESENTED', 'JACKSON', 'COUNTY', 'THREE', 'TIMES', 'IN', 'THE', 'LEGISLATURE', 'AND', 'WAS', 'ALSO', 'JUDGE', 'OF', 'THE', 'COUNTY', 'COURT'] +7975-280057-0001-1009: hyp=['HENRY', 'WASHINGTON', 'YOUNGER', 'MY', 'FATHER', 'REPRESENTED', 'JACKSON', 'COUNTY', 'THREE', 'TIMES', 'IN', 'THE', 'LEGISLATURE', 'AND', 'WAS', 'ALSO', 'A', 'JUDGE', 'OF', 'THE', 'COUNTY', 'COURT'] +7975-280057-0002-1010: ref=['MY', 'MOTHER', 'WHO', 'WAS', 'BURSHEBA', 'FRISTOE', 'OF', 'INDEPENDENCE', 'WAS', 'THE', 'DAUGHTER', 'OF', 'RICHARD', 'FRISTOE', 'WHO', 'FOUGHT', 'UNDER', 'GENERAL', 'ANDREW', 'JACKSON', 'AT', 'NEW', 'ORLEANS', 'JACKSON', 'COUNTY', 'HAVING', 'BEEN', 'SO', 'NAMED', 'AT', 'MY', 'GRANDFATHER', "FRISTOE'S", 'INSISTENCE'] +7975-280057-0002-1010: hyp=['MY', 'MOTHER', 'WHO', 'WAS', 'PERCEIVED', 'HER', 'FOR', 'STOVE', 'OF', 'INDEPENDENCE', 'WAS', 'A', 'DAUGHTER', 'OF', 'RICHARD', 'CRISTO', 'WHO', 'FOUGHT', 'UNDER', 'GENERAL', 'ANDREW', 'JACKSON', 'AT', 'NEW', 'ORLEANS', 'JACKSON', 'COUNTY', 'HAVING', 'BEEN', 'SO', 'NAMED', 'IN', 'MY', 'GRANDFATHER', 'FRUSTES', 'INSISTENCE'] +7975-280057-0003-1011: ref=['I', 'CANNOT', 'REMEMBER', 'WHEN', 'I', 'DID', 'NOT', 'KNOW', 'HOW', 'TO', 'SHOOT'] +7975-280057-0003-1011: hyp=['I', 'CANNOT', 'REMEMBER', 'WHEN', 'I', 'DID', 'NOT', 'KNOW', 'HOW', 'TO', 'SHOOT'] +7975-280057-0004-1012: ref=['MY', 'BROTHER', 'JAMES', 'WAS', 'BORN', 'JANUARY', 'FIFTEENTH', 'EIGHTEEN', 'FORTY', 'EIGHT', 'JOHN', 'IN', 'EIGHTEEN', 'FIFTY', 'ONE', 'AND', 'ROBERT', 'IN', 'DECEMBER', 'EIGHTEEN', 'FIFTY', 'THREE'] +7975-280057-0004-1012: hyp=['MY', 'BROTHER', 'JAMES', 'WAS', 'BORN', 'JANUARY', 'FIFTEENTH', 'EIGHTEEN', 'FORTY', 'EIGHT', 'JOHN', 'IN', 'EIGHTEEN', 'FIFTY', 'ONE', 'AND', 'ROBERT', 'IN', 'DECEMBER', 'EIGHTEEN', 'FIFTY', 'THREE'] +7975-280057-0005-1013: ref=['MY', 'ELDEST', 'BROTHER', 'RICHARD', 'DIED', 'IN', 'EIGHTEEN', 'SIXTY'] +7975-280057-0005-1013: hyp=['MY', 'ELDEST', 'BROTHER', 'RICHARD', 'DIED', 'IN', 'EIGHTEEN', 'SIXTY'] +7975-280057-0006-1014: ref=['MY', 'FATHER', 'WAS', 'IN', 'THE', 'EMPLOY', 'OF', 'THE', 'UNITED', 'STATES', 'GOVERNMENT', 'AND', 'HAD', 'THE', 'MAIL', 'CONTRACT', 'FOR', 'FIVE', 'HUNDRED', 'MILES'] +7975-280057-0006-1014: hyp=['MY', 'FATHER', 'WAS', 'IN', 'THE', 'EMPLOY', 'OF', 'THE', 'UNITED', 'STATES', 'GOVERNMENT', 'AND', 'HAD', 'THE', 'MALE', 'CONTRACT', 'FOR', 'FIVE', 'HUNDRED', 'MILES'] +7975-280057-0007-1015: ref=['HE', 'HAD', 'STARTED', 'BACK', 'TO', 'HARRISONVILLE', 'IN', 'A', 'BUGGY', 'BUT', 'WAS', 'WAYLAID', 'ONE', 'MILE', 'SOUTH', 'OF', 'WESTPORT', 'A', 'SUBURB', 'OF', 'KANSAS', 'CITY', 'AND', 'BRUTALLY', 'MURDERED', 'FALLING', 'OUT', 'OF', 'HIS', 'BUGGY', 'INTO', 'THE', 'ROAD', 'WITH', 'THREE', 'MORTAL', 'BULLET', 'WOUNDS'] +7975-280057-0007-1015: hyp=['HE', 'HAD', 'STARTED', 'BACK', 'TO', 'HARRISONVILLE', 'IN', 'A', 'BUGGY', 'BUT', 'WAS', 'WAYLAID', 'ONE', 'MILE', 'SOUTH', 'OF', 'WESTBURT', 'A', 'SUBURB', 'OF', 'KANSA', 'CITY', 'AND', 'BRUTALLY', 'MURDERED', 'FALLING', 'OUT', 'OF', 'HIS', 'BUGGY', 'INTO', 'THE', 'ROAD', 'WITH', 'THREE', 'MORTAL', 'BULLET', 'WOUNDS'] +7975-280057-0008-1016: ref=['MISSUS', 'WASHINGTON', 'WELLS', 'AND', 'HER', 'SON', 'SAMUEL', 'ON', 'THE', 'ROAD', 'HOME', 'FROM', 'KANSAS', 'CITY', 'TO', "LEE'S", 'SUMMIT', 'RECOGNIZED', 'THE', 'BODY', 'AS', 'THAT', 'OF', 'MY', 'FATHER'] +7975-280057-0008-1016: hyp=['MISS', 'WASHINGTON', 'WALES', 'AND', 'HER', 'SON', 'SAMUEL', 'ON', 'THE', 'ROAD', 'HOME', 'FROM', 'KANSA', 'CITY', 'TO', 'LEE', 'SOMEWHAT', 'RECOGNIZED', 'THE', 'BODY', 'AS', 'THAT', 'OF', 'MY', 'FATHER'] +7975-280057-0009-1017: ref=['MISSUS', 'WELLS', 'STAYED', 'TO', 'GUARD', 'THE', 'REMAINS', 'WHILE', 'HER', 'SON', 'CARRIED', 'THE', 'NEWS', 'OF', 'THE', 'MURDER', 'TO', 'COLONEL', 'PEABODY', 'OF', 'THE', 'FEDERAL', 'COMMAND', 'WHO', 'WAS', 'THEN', 'IN', 'CAMP', 'AT', 'KANSAS', 'CITY'] +7975-280057-0009-1017: hyp=['MISS', 'WELL', 'STAYED', 'TO', 'GUARD', 'THE', 'REMAINS', 'WHILE', 'HER', 'SOON', 'CARRIED', 'THE', 'NEWS', 'OF', 'THE', 'MURDER', 'TO', 'COLONEL', 'PEABODY', 'OF', 'THE', 'FEDERAL', 'COMMAND', 'WHO', 'WAS', 'THEN', 'ENCAMP', 'AT', 'KANSAS', 'CITY'] +7975-280057-0010-1018: ref=['MISSUS', 'MC', 'CORKLE', 'JUMPED', 'FROM', 'THE', 'WINDOW', 'OF', 'THE', 'HOUSE', 'AND', 'ESCAPED'] +7975-280057-0010-1018: hyp=['MISS', 'MC', 'CORKEL', 'JUMPED', 'FROM', 'THE', 'WINDOW', 'OF', 'THE', 'HOUSE', 'AND', 'ESCAPED'] +7975-280057-0011-1019: ref=['AS', 'THE', 'RAIDERS', 'LEFT', 'ONE', 'OF', 'THEM', 'SHOUTED'] +7975-280057-0011-1019: hyp=['AS', 'THE', 'RAIDERS', 'LIVED', 'ONE', 'OF', 'THEM', 'SHOUTED'] +7975-280057-0012-1020: ref=['NOW', 'OLD', 'LADY', 'CALL', 'ON', 'YOUR', 'PROTECTORS', 'WHY', "DON'T", 'YOU', 'CALL', 'ON', 'COLE', 'YOUNGER', 'NOW'] +7975-280057-0012-1020: hyp=['NOW', 'LADY', 'CALL', 'ON', 'YOUR', 'PROTECTORS', 'WHY', "DON'T", 'YOU', 'CALL', 'ON', 'CO', 'YOUNGER', 'NOW'] +7975-280057-0013-1021: ref=['EVERY', 'KNOT', 'REPRESENTED', 'A', 'HUMAN', 'LIFE'] +7975-280057-0013-1021: hyp=['EVERY', 'KNOT', 'REPRESENTED', 'A', 'HUMAN', 'LIFE'] +7975-280057-0014-1022: ref=['BUT', 'SHE', 'FAILED', 'TO', 'FIND', 'THE', 'COMFORT', 'SHE', 'SOUGHT', 'FOR', 'ANNOYANCES', 'CONTINUED', 'IN', 'A', 'MORE', 'AGGRAVATED', 'FORM'] +7975-280057-0014-1022: hyp=['BUT', 'SHE', 'FAILED', 'TO', 'FIND', 'THE', 'COMFORT', 'SHE', 'SOUGHT', 'FOR', 'ANNOYANCES', 'CONTINUED', 'IN', 'A', 'MORE', 'AGGRAVATED', 'FORM'] +7975-280057-0015-1023: ref=['TWO', 'MONTHS', 'AFTER', 'THIS', 'INCIDENT', 'THE', 'SAME', 'PERSECUTORS', 'AGAIN', 'ENTERED', 'OUR', 'HOME', 'IN', 'THE', 'DEAD', 'OF', 'THE', 'NIGHT', 'AND', 'AT', 'THE', 'POINT', 'OF', 'A', 'PISTOL', 'TRIED', 'TO', 'FORCE', 'MY', 'MOTHER', 'TO', 'SET', 'FIRE', 'TO', 'HER', 'OWN', 'HOME'] +7975-280057-0015-1023: hyp=['TWO', 'MONTHS', 'AFTER', 'THIS', 'INCIDENT', 'THE', 'SAME', 'PERSECUTORS', 'AGAIN', 'ENTERED', 'OUR', 'HOME', 'IN', 'THE', 'DAY', 'OF', 'THE', 'NIGHT', 'AND', 'AT', 'THE', 'POINT', 'OF', 'A', 'PISTOL', 'TRIED', 'TO', 'FORCE', 'MY', 'MOTHER', 'TO', 'SET', 'FIRE', 'TO', 'HER', 'OWN', 'HOME'] +7975-280057-0016-1024: ref=['I', 'HAVE', 'ALWAYS', 'FELT', 'THAT', 'THE', 'EXPOSURE', 'TO', 'WHICH', 'SHE', 'WAS', 'SUBJECTED', 'ON', 'THIS', 'CRUEL', 'JOURNEY', 'TOO', 'HARD', 'EVEN', 'FOR', 'A', 'MAN', 'TO', 'TAKE', 'WAS', 'THE', 'DIRECT', 'CAUSE', 'OF', 'HER', 'DEATH'] +7975-280057-0016-1024: hyp=['I', 'HAVE', 'ALWAYS', 'FELT', 'THAT', 'THE', 'EXPOSURE', 'TO', 'WHICH', 'SHE', 'WAS', 'SUBJECTED', 'ON', 'THIS', 'CRUEL', 'JOURNEY', 'TOO', 'HARD', 'EVEN', 'FOR', 'A', 'MAN', 'TO', 'TAKE', 'WAS', 'A', 'DIRECT', 'CAUSE', 'OF', 'HER', 'DEATH'] +7975-280057-0017-1025: ref=['FROM', 'HARRISONVILLE', 'SHE', 'WENT', 'TO', 'WAVERLY', 'WHERE', 'SHE', 'WAS', 'HOUNDED', 'CONTINUALLY'] +7975-280057-0017-1025: hyp=['FROM', 'HARRISON', 'BILL', 'SHE', 'WENT', 'TO', 'WAVERLEY', 'WHERE', 'SHE', 'WAS', 'HOUNDY', 'CONTINUALLY'] +7975-280057-0018-1026: ref=['ONE', 'OF', 'THE', 'CONDITIONS', 'UPON', 'WHICH', 'HER', 'LIFE', 'WAS', 'SPARED', 'WAS', 'THAT', 'SHE', 'WOULD', 'REPORT', 'AT', 'LEXINGTON', 'WEEKLY'] +7975-280057-0018-1026: hyp=['ONE', 'OF', 'THE', 'CONDITIONS', 'UPON', 'WHICH', 'HER', 'LIFE', 'WAS', 'SPARED', 'WAS', 'THAT', 'SHE', 'WOULD', 'REPORT', 'AT', 'LESSINGTON', 'WEAKLY'] +7975-280057-0019-1027: ref=['ONE', 'OF', 'MY', 'OLD', 'SCHOOL', 'TEACHERS', 'WHOM', 'I', 'HAVE', 'NEVER', 'SEEN', 'SINCE', 'THE', 'SPRING', 'OR', 'SUMMER', 'OF', 'EIGHTEEN', 'SIXTY', 'TWO', 'IS', 'STEPHEN', 'B', 'ELKINS', 'SENATOR', 'FROM', 'WEST', 'VIRGINIA'] +7975-280057-0019-1027: hyp=['ONE', 'OF', 'MY', 'OLD', 'SCHOOL', 'TEACHERS', 'WHOM', 'I', 'HAVE', 'NEVER', 'SEEN', 'SINCE', 'THE', 'SPRING', 'OF', 'SUMMER', 'OF', 'EIGHTEEN', 'SIXTY', 'TWO', 'IS', 'STEPHEN', 'B', 'ELKINS', 'SENATOR', 'FROM', 'WEST', 'VIRGINIA'] +7975-280057-0020-1028: ref=['WHEN', 'I', 'WAS', 'TAKEN', 'PRISONER', 'I', 'EXPECTED', 'TO', 'BE', 'SHOT', 'WITHOUT', 'CEREMONY'] +7975-280057-0020-1028: hyp=['WHEN', 'I', 'WAS', 'TAKEN', 'PRISONER', 'I', 'EXPECTED', 'TO', 'BE', 'SHOT', 'WITHOUT', 'CEREMONY'] +7975-280063-0000-1058: ref=['WE', 'TOOK', 'THE', 'OATH', 'PERHAPS', 'THREE', 'HUNDRED', 'OF', 'US', 'DOWN', 'ON', 'LUTHER', "MASON'S", 'FARM', 'A', 'FEW', 'MILES', 'FROM', 'WHERE', 'I', 'NOW', 'WRITE', 'WHERE', 'COLONEL', 'HAYS', 'HAD', 'ENCAMPED', 'AFTER', 'INDEPENDENCE'] +7975-280063-0000-1058: hyp=['WE', 'TOOK', 'THE', 'OATH', 'PERHAPS', 'THREE', 'HUNDRED', 'OF', 'US', 'DOWN', 'ON', 'LUTHER', "MASON'S", 'FARM', 'A', 'FEW', 'MILES', 'FROM', 'WHERE', 'I', 'NOW', 'RIGHT', 'WHERE', 'COLONEL', 'HAYES', 'HAD', 'ENCAMPED', 'AFTER', 'INDEPENDENCE'] +7975-280063-0001-1059: ref=['BOONE', 'MUIR', 'AND', 'MYSELF', 'MET', 'COFFEE', 'AND', 'THE', 'REST', 'BELOW', 'ROSE', 'HILL', 'ON', 'GRAND', 'RIVER'] +7975-280063-0001-1059: hyp=['WHOM', 'YOU', 'AND', 'MYSELF', 'MAKE', 'COFFEE', 'AND', 'THE', 'REST', 'BELOW', 'ROSE', 'HILL', 'ON', 'GRAND', 'RIVER'] +7975-280063-0002-1060: ref=['ACCORDINGLY', 'I', 'WAS', 'SHORTLY', 'AWAKENED', 'TO', 'ACCOMPANY', 'HIM', 'TO', 'LONE', 'JACK', 'WHERE', 'HE', 'WOULD', 'PERSONALLY', 'MAKE', 'KNOWN', 'THE', 'SITUATION', 'TO', 'THE', 'OTHER', 'COLONELS'] +7975-280063-0002-1060: hyp=['ACCORDINGLY', 'I', 'WAS', 'SHORTLY', 'AWAKENED', 'TO', 'ACCOMPANY', 'HIM', 'THE', 'LONG', 'JACK', 'WHERE', 'HE', 'WOULD', 'PERSONALLY', 'MAKE', 'KNOWN', 'THE', 'SITUATION', 'TO', 'THE', 'OTHER', 'COLONELS'] +7975-280063-0003-1061: ref=['FOSTER', 'HAD', 'NEARLY', 'ONE', 'THOUSAND', 'CAVALRYMEN', 'AND', 'TWO', 'PIECES', 'OF', "RABB'S", 'INDIANA', 'BATTERY', 'THAT', 'HAD', 'ALREADY', 'MADE', 'FOR', 'ITSELF', 'A', 'NAME', 'FOR', 'HARD', 'FIGHTING'] +7975-280063-0003-1061: hyp=['FOSTER', 'HAD', 'NEARLY', 'ONE', 'THOUSAND', 'CAVERN', 'AND', 'TWO', 'PIECES', 'OF', 'RABS', 'INDIANA', 'BATTERY', 'THAT', 'HAD', 'ALREADY', 'MADE', 'FOR', 'ITSELF', 'A', 'NAME', 'FOR', 'HARD', 'FIGHTING'] +7975-280063-0004-1062: ref=['COME', 'IN', 'COLONEL', 'HAYS', 'EXCLAIMED', 'COLONEL', 'COCKRELL'] +7975-280063-0004-1062: hyp=['COME', 'IN', 'COLONEL', 'HAYES', 'EXCLAIMED', 'COLONEL', 'COCKROL'] +7975-280063-0005-1063: ref=['I', 'THINK', "HE'LL", 'BE', 'RATHER', 'TOUGH', 'MEAT', 'FOR', 'BREAKFAST', 'I', 'REPLIED', 'HE', 'MIGHT', 'BE', 'ALL', 'RIGHT', 'FOR', 'DINNER'] +7975-280063-0005-1063: hyp=['I', 'THINK', "HE'LL", 'BE', 'RATHER', 'TO', 'HAVE', 'ME', 'FOR', 'BREAKFAST', 'I', 'REPLIED', 'HE', 'MIGHT', 'BE', 'ALL', 'RIPER', 'DINNER'] +7975-280063-0006-1064: ref=['JACKMAN', 'WITH', 'A', 'PARTY', 'OF', 'THIRTY', 'SEASONED', 'MEN', 'CHARGED', 'THE', 'INDIANA', 'GUNS', 'AND', 'CAPTURED', 'THEM', 'BUT', 'MAJOR', 'FOSTER', 'LED', 'A', 'GALLANT', 'CHARGE', 'AGAINST', 'THE', 'INVADERS', 'AND', 'RECAPTURED', 'THE', 'PIECES'] +7975-280063-0006-1064: hyp=['JACKMEN', 'WITH', 'A', 'PARTY', 'OF', 'THIRTY', 'SEASONED', 'MEN', 'CHARGED', 'THE', 'INDIANA', 'GUNS', 'AND', 'CAPTURED', 'THEM', 'BUT', 'MAJOR', 'FOXTER', 'LIT', 'A', 'GALLANT', 'CHARGE', 'AGAINST', 'THE', 'INVADERS', 'AND', 'RECAPTURED', 'THE', 'PIECES'] +7975-280063-0007-1065: ref=['WE', 'WERE', 'OUT', 'OF', 'AMMUNITION', 'AND', 'WERE', 'HELPLESS', 'HAD', 'THE', 'FIGHT', 'BEEN', 'PRESSED'] +7975-280063-0007-1065: hyp=['WE', 'WERE', 'OUT', 'OF', 'AMMUNITION', 'AND', 'WERE', 'HELPLESS', 'HAD', 'THE', 'FIGHT', 'BEEN', 'PRESSED'] +7975-280063-0008-1066: ref=['THEY', 'DID', 'MARK', 'MY', 'CLOTHES', 'IN', 'ONE', 'OR', 'TWO', 'PLACES', 'HOWEVER'] +7975-280063-0008-1066: hyp=['THEY', 'DID', 'MARK', 'MY', 'CLOTHES', 'IN', 'ONE', 'OR', 'TWO', 'PLACES', 'HOWEVER'] +7975-280063-0009-1067: ref=['MAJOR', 'FOSTER', 'IN', 'A', 'LETTER', 'TO', 'JUDGE', 'GEORGE', 'M', 'BENNETT', 'OF', 'MINNEAPOLIS', 'SAID'] +7975-280063-0009-1067: hyp=['MEASURE', 'FOSTER', 'IN', 'A', 'LETTER', 'TO', 'JOE', 'GEORGIUM', 'BENNET', 'OF', 'MINNEAPOLIS', 'SAID'] +7975-280063-0010-1068: ref=['I', 'WAS', 'TOLD', 'BY', 'SOME', 'OF', 'OUR', 'MEN', 'FROM', 'THE', 'WESTERN', 'BORDER', 'OF', 'THE', 'STATE', 'THAT', 'THEY', 'RECOGNIZED', 'THE', 'DARING', 'YOUNG', 'RIDER', 'AS', 'COLE', 'YOUNGER'] +7975-280063-0010-1068: hyp=['I', 'WAS', 'TOLD', 'BY', 'SOME', 'OF', 'OUR', 'MEN', 'FROM', 'THE', 'WESTERN', 'BORDER', 'OF', 'THE', 'STATE', 'THAT', 'THEY', 'RECOGNIZED', 'A', 'DARING', 'OWN', "WRITER'S", 'COAL', 'YOUNGER'] +7975-280063-0011-1069: ref=['ABOUT', 'NINE', 'THIRTY', 'A', 'M', 'I', 'WAS', 'SHOT', 'DOWN'] +7975-280063-0011-1069: hyp=['ABOUT', 'NINE', 'THIRTY', 'A', 'M', 'I', 'WAS', 'SHOT', 'DOWN'] +7975-280063-0012-1070: ref=['THE', 'WOUNDED', 'OF', 'BOTH', 'FORCES', 'WERE', 'GATHERED', 'UP', 'AND', 'WERE', 'PLACED', 'IN', 'HOUSES'] +7975-280063-0012-1070: hyp=['THE', 'WOUNDED', 'OF', 'BOTH', 'FORCES', 'WERE', 'GATHERED', 'UP', 'AND', 'WERE', 'PLACED', 'IN', 'HOUSES'] +7975-280076-0000-1029: ref=['ALTHOUGH', 'EVERY', 'BOOK', 'PURPORTING', 'TO', 'NARRATE', 'THE', 'LIVES', 'OF', 'THE', 'YOUNGER', 'BROTHERS', 'HAS', 'TOLD', 'OF', 'THE', 'LIBERTY', 'ROBBERY', 'AND', 'IMPLIED', 'THAT', 'WE', 'HAD', 'A', 'PART', 'IN', 'IT', 'THE', 'YOUNGERS', 'WERE', 'NOT', 'SUSPECTED', 'AT', 'THAT', 'TIME', 'NOR', 'FOR', 'A', 'LONG', 'TIME', 'AFTERWARD'] +7975-280076-0000-1029: hyp=['ALTHOUGH', 'EVERY', 'BOOK', 'PORPORTING', 'TO', 'THEIR', 'EIGHTH', 'LIVES', 'OF', 'THE', 'YOUNGER', 'BROTHERS', 'HAS', 'TOLD', 'THAT', 'THE', 'LIBERTY', 'ROBBERY', 'AND', 'IMPLIED', 'THAT', 'WE', 'HAD', 'A', 'PART', 'IN', 'IT', 'THE', 'YOUNGERS', 'WERE', 'NOT', 'SUSPECTED', 'AT', 'THAT', 'TIME', 'NOR', 'FOR', 'A', 'LONG', 'TIME', 'AFTERWARD'] +7975-280076-0001-1030: ref=['IT', 'WAS', 'CLAIMED', 'BY', 'PEOPLE', 'OF', 'LIBERTY', 'THAT', 'THEY', 'POSITIVELY', 'RECOGNIZED', 'AMONG', 'THE', 'ROBBERS', 'OLL', 'SHEPHERD', 'RED', 'MONKERS', 'AND', 'BUD', 'PENCE', 'WHO', 'HAD', 'SEEN', 'SERVICE', 'WITH', 'QUANTRELL'] +7975-280076-0001-1030: hyp=['IT', 'WAS', 'CLAIMED', 'BY', 'PEOPLE', 'OF', 'LIBERTY', 'THAT', 'THEY', 'POSIT', 'TILL', 'WE', 'RECOGNIZED', 'AMONG', 'THE', 'ROBBERS', 'ALL', 'SHEPARD', 'RED', 'MOCKERS', 'AND', 'BUD', 'PANTS', 'WHO', 'HAD', 'SEEN', 'SERVICE', 'WITH', 'QUANTRAIL'] +7975-280076-0002-1031: ref=['THIS', 'RAID', 'WAS', 'ACCOMPANIED', 'BY', 'BLOODSHED', 'JUDGE', 'MC', 'LAIN', 'THE', 'BANKER', 'BEING', 'SHOT', 'THOUGH', 'NOT', 'FATALLY'] +7975-280076-0002-1031: hyp=['THIS', 'RAY', 'WAS', 'ACCOMPANIED', 'BY', 'BLOCHHEAD', 'JUDGE', 'MC', 'LANE', 'THE', 'BANKER', 'BEING', 'SHOT', 'THOUGH', 'NOT', 'FATALLY'] +7975-280076-0003-1032: ref=['NO', 'WARRANT', 'WAS', 'ISSUED', 'FOR', 'THE', 'YOUNGERS', 'BUT', 'SUBSEQUENT', 'HISTORIANS', 'HAVE', 'INFERENTIALLY', 'AT', 'LEAST', 'ACCUSED', 'US', 'OF', 'TAKING', 'PART', 'BUT', 'AS', 'I', 'SAID', 'BEFORE', 'THERE', 'IS', 'NO', 'TRUTH', 'IN', 'THE', 'ACCUSATION'] +7975-280076-0003-1032: hyp=['THOUGH', 'WARRANT', 'WAS', 'ISSUED', 'FOR', 'THE', 'YOUNGERS', 'BUT', 'SUBSEQUENT', 'HISTORIANS', 'HAVE', 'INFERENTIALLY', 'AT', 'LEAST', 'ACCUSED', 'US', 'OF', 'TAKING', 'PART', 'BUT', 'AS', 'I', 'SAID', 'BEFORE', 'THERE', 'IS', 'NO', 'TRUTH', 'IN', 'THE', 'ACCUSATION'] +7975-280076-0004-1033: ref=['JUNE', 'THIRD', 'EIGHTEEN', 'SEVENTY', 'ONE', 'OBOCOCK', 'BROTHERS', 'BANK', 'AT', 'CORYDON', 'IOWA', 'WAS', 'ROBBED', 'OF', 'FORTY', 'THOUSAND', 'DOLLARS', 'BY', 'SEVEN', 'MEN', 'IN', 'BROAD', 'DAYLIGHT'] +7975-280076-0004-1033: hyp=['JUNE', 'THIRD', 'EIGHTEEN', 'SEVENTY', 'ONE', 'OBEK', "BROTHER'S", 'BANK', 'AT', 'CROYD', 'AND', 'IOWA', 'WAS', 'ROBBED', 'OF', 'FORTY', 'THOUSAND', 'DOLLARS', 'BY', 'SEVEN', 'MEN', 'IN', 'BROAD', 'DAYLIGHT'] +7975-280076-0005-1034: ref=['IT', 'WAS', 'CHARGED', 'THAT', 'ARTHUR', 'MC', 'COY', 'OR', 'A', 'C', 'MC', 'COY', 'AND', 'MYSELF', 'HAD', 'BEEN', 'PARTICIPANTS', 'IN', 'THE', "GAD'S", 'HILL', 'AFFAIR', 'AND', 'THE', 'TWO', 'STAGE', 'ROBBERIES'] +7975-280076-0005-1034: hyp=['IT', 'WAS', 'CHARGE', 'THAT', 'AWFUL', 'MAC', 'COY', 'OR', 'A', 'SEMICA', 'MYSELF', 'HAD', 'BEEN', 'PARTICIPANTS', 'IN', 'THE', "GAD'S", 'HILL', 'AFFAIR', 'AND', 'THE', 'TWO', 'STAGE', 'ROBBERS'] +7975-280076-0006-1035: ref=['THE', 'PARTS', 'OF', 'THIS', 'LETTER', 'NOW', 'RELEVANT', 'ARE', 'AS', 'FOLLOWS'] +7975-280076-0006-1035: hyp=['THE', 'PART', 'OF', 'THIS', 'LETTER', 'NOW', 'ELEVANT', 'OUR', 'AS', 'FOLLOWS'] +7975-280076-0007-1036: ref=['YOU', 'MAY', 'USE', 'THIS', 'LETTER', 'IN', 'YOUR', 'OWN', 'WAY'] +7975-280076-0007-1036: hyp=['YOU', 'MAY', 'USE', 'THIS', 'LETTER', 'IN', 'YOUR', 'OWN', 'WAY'] +7975-280076-0008-1037: ref=['I', 'WILL', 'GIVE', 'YOU', 'THIS', 'OUTLINE', 'AND', 'SKETCH', 'OF', 'MY', 'WHEREABOUTS', 'AND', 'ACTIONS', 'AT', 'THE', 'TIME', 'OF', 'CERTAIN', 'ROBBERIES', 'WITH', 'WHICH', 'I', 'AM', 'CHARGED'] +7975-280076-0008-1037: hyp=['I', 'WILL', 'GIVE', 'YOU', 'THIS', 'OUTLINE', 'AND', 'SKETCH', 'OF', 'MY', 'WHEREABOUTS', 'AND', 'ACTIONS', 'AT', 'THE', 'TIME', 'OF', 'CERTAIN', 'ROBBERS', 'WITH', 'WHICH', 'I', 'AM', 'CHARGED'] +7975-280076-0009-1038: ref=['AT', 'THE', 'TIME', 'OF', 'THE', 'GALLATIN', 'BANK', 'ROBBERY', 'I', 'WAS', 'GATHERING', 'CATTLE', 'IN', 'ELLIS', 'COUNTY', 'TEXAS', 'CATTLE', 'THAT', 'I', 'BOUGHT', 'FROM', 'PLEAS', 'TAYLOR', 'AND', 'RECTOR'] +7975-280076-0009-1038: hyp=['AT', 'THE', 'TIME', 'OF', 'THE', 'YELLED', 'AND', 'BANK', 'ROBBERY', 'I', 'WAS', 'GATHERING', 'CATTLE', 'AND', 'ELLIS', 'COUNTY', 'TEXAS', 'CATTLET', 'ABOUT', 'FROM', 'PLAYERS', 'TAILOR', 'AND', 'RECTOR'] +7975-280076-0010-1039: ref=['THIS', 'CAN', 'BE', 'PROVED', 'BY', 'BOTH', 'OF', 'THEM', 'ALSO', 'BY', 'SHERIFF', 'BARKLEY', 'AND', 'FIFTY', 'OTHER', 'RESPECTABLE', 'MEN', 'OF', 'THAT', 'COUNTY'] +7975-280076-0010-1039: hyp=['THIS', 'CAN', 'BE', 'PROVED', 'BY', 'BOTH', 'OF', 'THEM', 'ALSO', 'BY', 'HYR', 'PARKLEY', 'AND', 'FIFTY', 'OTHER', 'RESPECTABLE', 'MEN', 'OF', 'THAT', 'COUNTY'] +7975-280076-0011-1040: ref=['I', 'BROUGHT', 'THE', 'CATTLE', 'TO', 'KANSAS', 'THAT', 'FALL', 'AND', 'REMAINED', 'IN', 'SAINT', 'CLAIR', 'COUNTY', 'UNTIL', 'FEBRUARY'] +7975-280076-0011-1040: hyp=['ABRUPT', 'THE', 'CATTLE', 'THE', 'KANSAS', 'SET', 'FALL', 'AND', 'REMAINED', 'IN', 'SAINT', 'CLAIR', 'COUNTY', 'UNTIL', 'FEBRUARY'] +7975-280076-0012-1041: ref=['I', 'THEN', 'WENT', 'TO', 'ARKANSAS', 'AND', 'RETURNED', 'TO', 'SAINT', 'CLAIR', 'COUNTY', 'ABOUT', 'THE', 'FIRST', 'OF', 'MAY'] +7975-280076-0012-1041: hyp=['AND', 'THEN', 'WENT', 'TO', 'OUR', 'CONCERN', 'RETURNED', 'TO', 'SAINT', 'CLAIR', 'COUNTY', 'ABOUT', 'THE', 'FIRST', 'OF', 'MAY'] +7975-280076-0013-1042: ref=['I', 'WENT', 'TO', 'KANSAS', 'WHERE', 'OUR', 'CATTLE', 'WERE', 'IN', 'WOODSON', 'COUNTY', 'AT', 'COLONEL', "RIDGE'S"] +7975-280076-0013-1042: hyp=['AND', 'WENT', 'TO', 'KANSAS', 'WHERE', 'KETTLE', 'WERE', 'IN', 'WOODS', 'AND', 'COUNTY', 'AT', 'COLONEL', 'RIDGES'] +7975-280076-0014-1043: ref=['DURING', 'THE', 'SUMMER', 'I', 'WAS', 'EITHER', 'IN', 'SAINT', 'CLAIR', 'JACKSON', 'OR', 'KANSAS', 'BUT', 'AS', 'THERE', 'WAS', 'NO', 'ROBBERY', 'COMMITTED', 'THAT', 'SUMMER', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'WHERE', 'I', 'WAS'] +7975-280076-0014-1043: hyp=['DURING', 'THE', 'SUMMER', 'I', 'WAS', 'EITHER', 'IN', 'SAINT', 'CLAIR', "JACK'S", 'UNDER', 'KANSAS', 'BUT', 'AS', 'THERE', 'WAS', 'NO', 'ROBBERY', 'COMMITTED', 'THAT', 'SUMMER', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'WHERE', 'I', 'WAS'] +7975-280076-0015-1044: ref=['I', 'WENT', 'THROUGH', 'INDEPENDENCE', 'AND', 'FROM', 'THERE', 'TO', 'ACE', "WEBB'S"] +7975-280076-0015-1044: hyp=['AND', 'WENT', 'THROUGH', 'INDEPENDENCE', 'AND', 'FROM', 'THERE', 'TO', 'ACE', 'WHIPS'] +7975-280076-0016-1045: ref=['THERE', 'I', 'TOOK', 'DINNER', 'AND', 'THEN', 'WENT', 'TO', 'DOCTOR', 'L', 'W', "TWYMAN'S"] +7975-280076-0016-1045: hyp=['THERE', 'I', 'TOOK', 'DINNER', 'AND', 'THEN', 'WENT', 'TO', 'DOCTOR', 'L', 'W', 'TWEMINS'] +7975-280076-0017-1046: ref=['OUR', 'BUSINESS', 'THERE', 'WAS', 'TO', 'SEE', 'E', 'P', 'WEST', 'HE', 'WAS', 'NOT', 'AT', 'HOME', 'BUT', 'THE', 'FAMILY', 'WILL', 'REMEMBER', 'THAT', 'WE', 'WERE', 'THERE'] +7975-280076-0017-1046: hyp=['OUR', 'BUSINESS', 'THERE', 'WAS', 'TO', 'SEE', 'E', 'WEST', 'HE', 'WAS', 'NOT', 'AT', 'HOME', 'BUT', 'THE', 'FAMILY', 'WILL', 'REMEMBER', 'THAT', 'WE', 'WERE', 'THERE'] +7975-280076-0018-1047: ref=['WE', 'CROSSED', 'ON', 'THE', 'BRIDGE', 'STAYED', 'IN', 'THE', 'CITY', 'ALL', 'NIGHT', 'AND', 'THE', 'NEXT', 'MORNING', 'WE', 'RODE', 'UP', 'THROUGH', 'THE', 'CITY'] +7975-280076-0018-1047: hyp=['WE', 'CROSSED', 'ON', 'THE', 'BRIDGE', 'STATING', 'THE', 'CITY', 'ALL', 'NIGHT', 'AND', 'THE', 'NEXT', 'MORNING', 'WE', 'RODE', 'UP', 'TO', 'THE', 'CITY'] +7975-280076-0019-1048: ref=['I', 'MET', 'SEVERAL', 'OF', 'MY', 'FRIENDS', 'AMONG', 'THEM', 'WAS', 'BOB', 'HUDSPETH'] +7975-280076-0019-1048: hyp=['AMID', 'SEVERAL', 'OF', 'MY', 'FRIENDS', 'AMONG', 'THEM', 'WAS', 'BOB', 'HUSBITH'] +7975-280076-0020-1049: ref=['WE', 'WERE', 'NOT', 'ON', 'GOOD', 'TERMS', 'AT', 'THE', 'TIME', 'NOR', 'HAVE', 'WE', 'BEEN', 'FOR', 'SEVERAL', 'YEARS'] +7975-280076-0020-1049: hyp=['WE', 'WERE', 'NOT', 'ON', 'THE', 'TERMS', 'AT', 'THE', 'TIME', 'NOR', 'HAVE', 'WE', 'BEEN', 'FOR', 'SEVERAL', 'YEARS'] +7975-280076-0021-1050: ref=['POOR', 'JOHN', 'HE', 'HAS', 'BEEN', 'HUNTED', 'DOWN', 'AND', 'SHOT', 'LIKE', 'A', 'WILD', 'BEAST', 'AND', 'NEVER', 'WAS', 'A', 'BOY', 'MORE', 'INNOCENT'] +7975-280076-0021-1050: hyp=['POOR', 'JOHN', 'HE', 'HAS', 'BEEN', 'HUNTED', 'DOWN', 'AND', 'SHOT', 'LIKE', 'A', 'WILD', 'BEAST', 'AND', 'NEVER', 'WAS', 'A', 'BOY', 'MORE', 'INNOCENT'] +7975-280076-0022-1051: ref=['DOCTOR', 'L', 'LEWIS', 'WAS', 'HIS', 'PHYSICIAN'] +7975-280076-0022-1051: hyp=['DOCTOR', 'L', 'LOOSE', 'WAS', 'HIS', 'PHYSICIAN'] +7975-280076-0023-1052: ref=['THERE', 'WERE', 'FIFTY', 'OR', 'A', 'HUNDRED', 'PERSONS', 'THERE', 'WHO', 'WILL', 'TESTIFY', 'IN', 'ANY', 'COURT', 'THAT', 'JOHN', 'AND', 'I', 'WERE', 'THERE'] +7975-280076-0023-1052: hyp=['THERE', 'WERE', 'FIFTY', 'OR', 'A', 'HUNDRED', 'PERSONS', 'THERE', 'WHO', 'WILL', 'TESTIFY', 'IN', 'ANY', 'COURT', 'THAT', 'JOHN', 'AND', 'I', 'WERE', 'THERE'] +7975-280076-0024-1053: ref=['HELVIN', 'FICKLE', 'AND', 'WIFE', 'OF', 'GREENTON', 'VALLEY', 'WERE', 'ATTENDING', 'THE', 'SPRINGS', 'AT', 'THAT', 'TIME', 'AND', 'EITHER', 'OF', 'THEM', 'WILL', 'TESTIFY', 'TO', 'THE', 'ABOVE', 'FOR', 'JOHN', 'AND', 'I', 'SAT', 'IN', 'FRONT', 'OF', 'MISTER', 'SMITH', 'WHILE', 'HE', 'WAS', 'PREACHING', 'AND', 'WAS', 'IN', 'HIS', 'COMPANY', 'FOR', 'A', 'FEW', 'MOMENTS', 'TOGETHER', 'WITH', 'HIS', 'WIFE', 'AND', 'MISTER', 'AND', 'MISSUS', 'FICKLE', 'AFTER', 'SERVICE'] +7975-280076-0024-1053: hyp=['HELD', 'AND', 'FICKLE', 'AND', 'WIFE', 'OF', 'GREENTON', 'VALLEY', 'WERE', 'ATTENDING', 'THE', 'SPRINGS', 'AT', 'THAT', 'TIME', 'AND', 'EITHER', 'OF', 'THEM', 'WILL', 'TESTIFY', 'TO', 'THE', 'ABOVE', 'FOR', 'JOHN', 'AND', 'I', 'SET', 'IN', 'FRONT', 'OF', 'MISTER', 'SMITH', 'WHILE', 'HE', 'WAS', 'PREACHING', 'AND', 'WAS', 'IN', 'HIS', 'COMPANY', 'FOR', 'A', 'FEW', 'MOMENTS', 'TOGETHER', 'WITH', 'HIS', 'WIFE', 'AND', 'MISTER', 'AND', 'MISS', 'FICKLE', 'AFTER', 'THE', 'SERVICE'] +7975-280076-0025-1054: ref=['ABOUT', 'THE', 'LAST', 'OF', 'DECEMBER', 'EIGHTEEN', 'SEVENTY', 'THREE', 'I', 'ARRIVED', 'IN', 'CARROLL', 'PARISH', 'LOUISIANA'] +7975-280076-0025-1054: hyp=['ABOUT', 'THE', 'LAST', 'OF', 'DECEMBER', 'EIGHTEEN', 'SEVENTY', 'THREE', 'I', 'ARRIVED', 'IN', 'CAROL', 'PARRISH', 'LOUISIANA'] +7975-280076-0026-1055: ref=['I', 'STAYED', 'THERE', 'UNTIL', 'THE', 'EIGHTH', 'OF', 'FEBRUARY', 'EIGHTEEN', 'SEVENTY', 'FOUR'] +7975-280076-0026-1055: hyp=['I', 'STAYED', 'THERE', 'UNTIL', 'THE', 'EIGHTH', 'OF', 'FEBRUARY', 'EIGHTEEN', 'SEVENTY', 'FOUR'] +7975-280076-0027-1056: ref=['I', 'HAD', 'NOT', 'HEARD', 'OF', 'THAT', 'WHEN', 'I', 'WROTE', 'THE', 'LETTER', 'OF', 'EIGHTEEN', 'SEVENTY', 'FOUR', 'AND', 'TO', 'CORRECT', 'ANY', 'MISAPPREHENSION', 'THAT', 'MIGHT', 'BE', 'CREATED', 'BY', 'OMITTING', 'IT', 'I', 'WILL', 'SAY', 'THAT', 'AT', 'THAT', 'TIME', 'I', 'WAS', 'AT', 'NEOSHO', 'KANSAS', 'WITH', 'A', 'DROVE', 'OF', 'CATTLE', 'WHICH', 'I', 'SOLD', 'TO', 'MAJOR', 'RAY'] +7975-280076-0027-1056: hyp=['I', 'HAD', 'NOT', 'HEARD', 'OF', 'THAT', 'WHEN', 'I', 'WROTE', 'THE', 'LETTER', 'OF', 'EIGHTEEN', 'SEVENTY', 'FOUR', 'AND', 'TO', 'CORRECT', 'ANY', 'MISAPPREHENSION', 'THAT', 'MIGHT', 'BE', 'CREATED', 'BY', 'OMITTING', 'IT', 'I', 'WILL', 'SAY', 'THAT', 'AT', 'THE', 'TIME', 'I', 'WAS', 'AT', 'NEOSHIL', 'OF', 'KANSAS', 'WITH', 'A', 'DROVE', 'OF', 'CATTLE', 'WHICH', 'I', 'SOLD', 'TO', 'MAJOR', 'RAY'] +7975-280076-0028-1057: ref=['IT', 'WAS', 'IMMEDIATELY', 'FOLLOWING', 'THE', 'ROCK', 'ISLAND', 'ROBBERY', 'AT', 'ADAIR', 'IOWA', 'THAT', 'THERE', 'FIRST', 'APPEARED', 'A', 'DELIBERATE', 'ENLISTMENT', 'OF', 'SOME', 'LOCAL', 'PAPERS', 'IN', 'MISSOURI', 'TO', 'CONNECT', 'US', 'WITH', 'THIS', 'ROBBERY'] +7975-280076-0028-1057: hyp=['IT', 'WAS', 'IMMEDIATELY', 'FOLLOWING', 'THE', 'ROCK', 'ISLAND', 'ROBBERY', 'AT', 'EIGHT', 'ER', 'IOWA', 'THAT', 'THEIR', 'FIRST', 'APPEARED', 'A', 'DELIBERATE', 'ENLISTMENT', 'OF', 'SOME', 'LOCAL', 'PAPERS', 'IN', 'MISSOURI', 'TO', 'CONNECT', 'US', 'WITH', 'THIS', 'ROBBERY'] +7975-280084-0000-1090: ref=['I', 'URGED', 'ON', 'THE', 'BOYS', 'THAT', 'WHATEVER', 'HAPPENED', 'WE', 'SHOULD', 'NOT', 'SHOOT', 'ANY', 'ONE'] +7975-280084-0000-1090: hyp=['I', 'URGED', 'ON', 'THE', 'BOYS', 'AT', 'WHATEVER', 'HAPPEN', 'WE', 'SHOULD', 'NOT', 'SHOOT', 'ANY', 'ONE'] +7975-280084-0001-1091: ref=['WHEN', 'MILLER', 'AND', 'I', 'CROSSED', 'THE', 'BRIDGE', 'THE', 'THREE', 'WERE', 'ON', 'SOME', 'DRY', 'GOODS', 'BOXES', 'AT', 'THE', 'CORNER', 'NEAR', 'THE', 'BANK', 'AND', 'AS', 'SOON', 'AS', 'THEY', 'SAW', 'US', 'WENT', 'RIGHT', 'INTO', 'THE', 'BANK', 'INSTEAD', 'OF', 'WAITING', 'FOR', 'US', 'TO', 'GET', 'THERE'] +7975-280084-0001-1091: hyp=['WHEN', 'MILRON', 'I', 'CROSSED', 'THE', 'BRIDGE', 'THE', 'THREE', 'WERE', 'ON', 'SOME', 'DRY', 'GOOD', 'BOXES', 'AT', 'THE', 'CORNER', 'NEAR', 'THE', 'BANK', 'AND', 'AS', 'SOON', 'AS', 'THEY', 'SAW', 'US', 'WENT', 'RIGHT', 'INTO', 'THE', 'BANK', 'INSTEAD', 'OF', 'WAITING', 'FOR', 'US', 'TO', 'GET', 'THERE'] +7975-280084-0002-1092: ref=['WHEN', 'WE', 'CAME', 'UP', 'I', 'TOLD', 'MILLER', 'TO', 'SHUT', 'THE', 'BANK', 'DOOR', 'WHICH', 'THEY', 'HAD', 'LEFT', 'OPEN', 'IN', 'THEIR', 'HURRY'] +7975-280084-0002-1092: hyp=['WHEN', 'WE', 'CAME', 'UP', 'I', 'TOO', 'MILLER', 'TO', 'SHUT', 'THE', 'BANK', 'DOOR', 'WHICH', 'THEY', 'HAD', 'LEFT', 'OPEN', 'IN', 'THEIR', 'HURRY'] +7975-280084-0003-1093: ref=['J', 'S', 'ALLEN', 'WHOSE', 'HARDWARE', 'STORE', 'WAS', 'NEAR', 'TRIED', 'TO', 'GO', 'INTO', 'THE', 'BANK', 'BUT', 'MILLER', 'ORDERED', 'HIM', 'AWAY', 'AND', 'HE', 'RAN', 'AROUND', 'THE', 'CORNER', 'SHOUTING'] +7975-280084-0003-1093: hyp=['J', 'SALEN', 'WHOSE', 'HARDWORSTOE', 'WAS', 'NEAR', 'TRIED', 'TO', 'GO', 'INTO', 'THE', 'BANK', 'BUT', 'MILLER', 'ORDERED', 'HIM', 'AWAY', 'AND', 'HE', 'RAN', 'ROUND', 'THE', 'CORNER', 'SHOUTING'] +7975-280084-0004-1094: ref=['GET', 'YOUR', 'GUNS', 'BOYS', "THEY'RE", 'ROBBING', 'THE', 'BANK'] +7975-280084-0004-1094: hyp=['GET', 'YOUR', 'GUNS', 'BOYS', "THEY'RE", 'ROBBING', 'THE', 'BANK'] +7975-280084-0005-1095: ref=['AND', 'I', 'CALLED', 'TO', 'HIM', 'TO', 'GET', 'INSIDE', 'AT', 'THE', 'SAME', 'TIME', 'FIRING', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'AIR', 'AS', 'A', 'SIGNAL', 'TO', 'THE', 'THREE', 'BOYS', 'AT', 'THE', 'BRIDGE', 'THAT', 'WE', 'HAD', 'BEEN', 'DISCOVERED'] +7975-280084-0005-1095: hyp=['AND', 'I', 'CALLED', 'TO', 'HIM', 'TO', 'GET', 'INSIDE', 'AT', 'THE', 'SAME', 'TIME', 'FIRING', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'AIR', 'AS', 'A', 'SIGNAL', 'TO', 'THE', 'THREE', 'BOYS', 'AT', 'THE', 'BRIDGE', 'THAT', 'WE', 'HAD', 'BEEN', 'DISCOVERED'] +7975-280084-0006-1096: ref=['ALMOST', 'AT', 'THIS', 'INSTANT', 'I', 'HEARD', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'BANK'] +7975-280084-0006-1096: hyp=['ALMOST', 'AT', 'THIS', 'INSTANT', 'I', 'HEARD', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'BANK'] +7975-280084-0007-1097: ref=['CHADWELL', 'WOODS', 'AND', 'JIM', 'RODE', 'UP', 'AND', 'JOINED', 'US', 'SHOUTING', 'TO', 'PEOPLE', 'IN', 'THE', 'STREET', 'TO', 'GET', 'INSIDE', 'AND', 'FIRING', 'THEIR', 'PISTOLS', 'TO', 'EMPHASIZE', 'THEIR', 'COMMANDS'] +7975-280084-0007-1097: hyp=['SAID', 'WELL', 'WOODS', 'AND', 'JIM', 'RODE', 'UP', 'AND', 'JARS', 'SHOUTING', 'TO', 'THE', 'PEOPLE', 'IN', 'THE', 'STREET', 'TO', 'GET', 'INSIDE', 'AND', 'FIRING', 'THEIR', 'PISTOLS', 'TO', 'EMPHASIZE', 'THEIR', 'COMMANDS'] +7975-280084-0008-1098: ref=['IF', 'ANY', 'OF', 'OUR', 'PARTY', 'SHOT', 'HIM', 'IT', 'MUST', 'HAVE', 'BEEN', 'WOODS'] +7975-280084-0008-1098: hyp=['IF', 'ANY', 'OF', 'OUR', 'PARTY', 'SHOT', 'HIM', 'IT', 'MUST', 'HAVE', 'BEEN', 'WOODS'] +7975-280084-0009-1099: ref=['MEANTIME', 'THE', 'STREET', 'WAS', 'GETTING', 'UNCOMFORTABLY', 'HOT'] +7975-280084-0009-1099: hyp=['MEANTIME', 'THE', 'STREET', 'WAS', 'GETTING', 'UNCOMFORTABLY', 'HOT'] +7975-280084-0010-1100: ref=['EVERY', 'TIME', 'I', 'SAW', 'ANY', 'ONE', 'WITH', 'A', 'BEAD', 'ON', 'ME', 'I', 'WOULD', 'DROP', 'OFF', 'MY', 'HORSE', 'AND', 'TRY', 'TO', 'DRIVE', 'THE', 'SHOOTER', 'INSIDE', 'BUT', 'I', 'COULD', 'NOT', 'SEE', 'IN', 'EVERY', 'DIRECTION'] +7975-280084-0010-1100: hyp=['EVERY', 'TIME', 'I', 'SAW', 'ANY', 'ONE', 'WITH', 'A', 'BEAD', 'ON', 'ME', 'I', 'WOULD', 'DROP', 'OFF', 'MY', 'HORSE', 'AND', 'TROUT', 'TO', 'DRIVE', 'THE', 'SHEETTER', 'INSIDE', 'BUT', 'I', 'COULD', 'NOT', 'SEE', 'IN', 'EVERY', 'DIRECTION'] +7975-280084-0011-1101: ref=['DOCTOR', 'WHEELER', 'WHO', 'HAD', 'GONE', 'UPSTAIRS', 'IN', 'THE', 'HOTEL', 'SHOT', 'MILLER', 'AND', 'HE', 'LAY', 'DYING', 'IN', 'THE', 'STREET'] +7975-280084-0011-1101: hyp=['DOCTOR', 'WHALER', 'WHO', 'HAD', 'GONE', 'UPSTAIRS', 'IN', 'THE', 'HOTEL', 'SHOT', 'MILLER', 'AND', 'HE', 'LAY', 'DYING', 'IN', 'THE', 'STREET'] +7975-280084-0012-1102: ref=['CHANGING', 'HIS', 'PISTOL', 'TO', 'HIS', 'LEFT', 'HAND', 'BOB', 'RAN', 'OUT', 'AND', 'MOUNTED', "MILLER'S", 'MARE'] +7975-280084-0012-1102: hyp=['CHANGING', 'HIS', 'PISTOL', 'TO', 'HIS', 'LEFT', 'HAND', 'BOB', 'RAN', 'OUT', 'AND', 'MOUNTED', "MILLER'S", 'MAYOR'] +7975-280084-0013-1103: ref=['WHAT', 'KEPT', 'YOU', 'SO', 'LONG', 'I', 'ASKED', 'PITTS'] +7975-280084-0013-1103: hyp=['BUT', 'KEPT', 'YOU', 'SO', 'LONG', 'AS', 'PITT'] +7975-280084-0014-1104: ref=['AS', 'TO', 'THE', 'REST', 'OF', 'THE', 'AFFAIR', 'INSIDE', 'THE', 'BANK', 'I', 'TAKE', 'THE', 'ACCOUNT', 'OF', 'A', 'NORTHFIELD', 'NARRATOR'] +7975-280084-0014-1104: hyp=['AS', 'TO', 'THE', 'REST', 'OF', 'THE', 'AFFAIR', 'INSIDE', 'THE', 'BANK', 'I', 'TAKE', 'THE', 'ACCOUNT', 'OF', 'A', 'NORTH', 'FIELD', 'NARRATOR'] +7975-280084-0015-1105: ref=["WHERE'S", 'THE', 'MONEY', 'OUTSIDE', 'THE', 'SAFE', 'BOB', 'ASKED'] +7975-280084-0015-1105: hyp=["WHERE'S", 'THE', 'MONEY', 'OUTSIDE', 'TO', 'SAFE', 'BOB', 'ASKED'] +7975-280084-0016-1106: ref=['THE', 'SHUTTERS', 'WERE', 'CLOSED', 'AND', 'THIS', 'CAUSED', 'BUNKER', 'AN', "INSTANT'S", 'DELAY', 'THAT', 'WAS', 'ALMOST', 'FATAL', 'PITTS', 'CHASED', 'HIM', 'WITH', 'A', 'BULLET'] +7975-280084-0016-1106: hyp=['THE', 'SHOUTERS', 'WERE', 'CLOSED', 'AND', 'THIS', 'CAUSED', 'BUNKER', 'AN', 'INSTANCE', 'DELAY', 'THAT', 'WAS', 'ALMOST', 'FATAL', 'FITZ', 'CHASED', 'HIM', 'WITH', 'A', 'BULLET'] +7975-280084-0017-1107: ref=['THE', 'FIRST', 'ONE', 'MISSED', 'HIM', 'BUT', 'THE', 'SECOND', 'WENT', 'THROUGH', 'HIS', 'RIGHT', 'SHOULDER'] +7975-280084-0017-1107: hyp=['THE', 'FIRST', 'ONE', 'MISSED', 'HIM', 'BUT', 'THE', 'SECOND', 'WENT', 'THROUGH', 'HIS', 'RIGHT', 'SHOULDER'] +7975-280085-0000-1071: ref=['THAT', 'NIGHT', 'IT', 'STARTED', 'TO', 'RAIN', 'AND', 'WE', 'WORE', 'OUT', 'OUR', 'HORSES'] +7975-280085-0000-1071: hyp=['THAT', 'NIGHT', 'IT', 'STARTED', 'TO', 'RAIN', 'AND', 'WE', 'WORE', 'OUT', 'OUR', 'HORSES'] +7975-280085-0001-1072: ref=['FRIDAY', 'WE', 'MOVED', 'TOWARD', 'WATERVILLE', 'AND', 'FRIDAY', 'NIGHT', 'WE', 'CAMPED', 'BETWEEN', 'ELYSIAN', 'AND', 'GERMAN', 'LAKE'] +7975-280085-0001-1072: hyp=['FRIDAY', 'WE', 'MOVED', 'TOWARD', 'WATERVILLE', 'AND', 'FRIDAY', 'NIGHT', 'WE', "CAN'T", 'BETWEEN', 'ELYSIAN', 'AND', 'GERMAN', 'LAKE'] +7975-280085-0002-1073: ref=["BOB'S", 'SHATTERED', 'ELBOW', 'WAS', 'REQUIRING', 'FREQUENT', 'ATTENTION', 'AND', 'THAT', 'NIGHT', 'WE', 'MADE', 'ONLY', 'NINE', 'MILES', 'AND', 'MONDAY', 'MONDAY', 'NIGHT', 'AND', 'TUESDAY', 'WE', 'SPENT', 'IN', 'A', 'DESERTED', 'FARM', 'HOUSE', 'CLOSE', 'TO', 'MANKATO'] +7975-280085-0002-1073: hyp=['BOB', 'SHUTTERED', 'ELBOWS', 'REQUIRING', 'FREQUENT', 'ATTENTION', 'AND', 'THAT', 'NIGHT', 'WE', 'MADE', 'ONLY', 'NINE', 'MILES', 'AND', 'MONDAY', 'MONDAY', 'NIGHT', 'IN', 'TUESDAY', 'WE', 'SPENT', 'IN', 'A', 'DESERTED', 'FARM', 'HOUSE', 'CLOSE', 'TO', 'MAIN', 'CATO'] +7975-280085-0003-1074: ref=['THAT', 'DAY', 'A', 'MAN', 'NAMED', 'DUNNING', 'DISCOVERED', 'US', 'AND', 'WE', 'TOOK', 'HIM', 'PRISONER'] +7975-280085-0003-1074: hyp=['THAT', 'THEY', 'A', 'MAN', 'NAMED', 'DUNNING', 'DISCOVERED', 'US', 'AND', 'WE', 'TOOK', 'HIM', 'PRISONER'] +7975-280085-0004-1075: ref=['FINALLY', 'WE', 'ADMINISTERED', 'TO', 'HIM', 'AN', 'OATH', 'NOT', 'TO', 'BETRAY', 'OUR', 'WHEREABOUTS', 'UNTIL', 'WE', 'HAD', 'TIME', 'TO', 'MAKE', 'OUR', 'ESCAPE', 'AND', 'HE', 'AGREED', 'NOT', 'TO'] +7975-280085-0004-1075: hyp=['FINALLY', 'WE', 'ADMINISTERED', 'TO', 'HIM', 'AN', 'OATH', 'NOT', 'TO', 'BETRAY', 'OUR', 'WHEREABOUTS', 'UNTIL', 'WE', 'HAD', 'TIME', 'TO', 'MAKE', 'OUR', 'ESCAPE', 'AND', 'HE', 'AGREED', 'NOT', 'TO'] +7975-280085-0005-1076: ref=['NO', 'SOONER', 'HOWEVER', 'WAS', 'HE', 'RELEASED', 'THAN', 'HE', 'MADE', 'POSTHASTE', 'INTO', 'MANKATO', 'TO', 'ANNOUNCE', 'OUR', 'PRESENCE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'ANOTHER', 'POSSE', 'WAS', 'LOOKING', 'FOR', 'US'] +7975-280085-0005-1076: hyp=['NO', 'SOONER', 'HOWEVER', 'WAS', 'HE', 'RELEASED', 'THAN', 'HE', 'MADE', 'POST', 'TASTE', 'INTO', 'MANCATEO', 'TO', 'ANNOUNCE', 'OUR', 'PRESENCE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'ANOTHER', 'POSSE', 'WAS', 'LOOKING', 'FOR', 'US'] +7975-280085-0006-1077: ref=['THE', 'WHISTLE', 'ON', 'THE', 'OIL', 'MILL', 'BLEW', 'AND', 'WE', 'FEARED', 'THAT', 'IT', 'WAS', 'A', 'SIGNAL', 'THAT', 'HAD', 'BEEN', 'AGREED', 'UPON', 'TO', 'ALARM', 'THE', 'TOWN', 'IN', 'CASE', 'WE', 'WERE', 'OBSERVED', 'BUT', 'WE', 'WERE', 'NOT', 'MOLESTED'] +7975-280085-0006-1077: hyp=['THE', 'WHISTLE', 'ON', 'THE', 'OARMEIL', 'BLUE', 'AND', 'WE', 'FEARED', 'THAT', 'IT', 'WAS', 'A', 'SIGNAL', 'THAT', 'HAD', 'BEEN', 'AGREED', 'UPON', 'TO', 'ALARM', 'THE', 'TOWN', 'IN', 'CASE', 'WE', 'WERE', 'OBSERVED', 'BUT', 'WE', 'WERE', 'NOT', 'MOLESTED'] +7975-280085-0007-1078: ref=['HE', 'HAD', 'TO', 'SLEEP', 'WITH', 'IT', 'PILLOWED', 'ON', 'MY', 'BREAST', 'JIM', 'BEING', 'ALSO', 'CRIPPLED', 'WITH', 'A', 'WOUND', 'IN', 'HIS', 'SHOULDER', 'AND', 'WE', 'COULD', 'NOT', 'GET', 'MUCH', 'SLEEP'] +7975-280085-0007-1078: hyp=['HE', 'HAD', 'TO', 'SLEEP', 'WITH', 'IT', 'PILLOWED', 'ON', 'MY', 'BREAST', 'JIM', 'BEING', 'ALSO', 'CRIPPLED', 'WITH', 'A', 'WOUND', 'IN', 'HIS', 'SHOULDER', 'AND', 'WE', 'COULD', 'NOT', 'GET', 'MUCH', 'SLEEP'] +7975-280085-0008-1079: ref=['BUT', 'THEY', 'SOON', 'AFTER', 'GOT', 'CLOSE', 'ENOUGH', 'SO', 'THAT', 'ONE', 'OF', 'THEM', 'BROKE', 'MY', 'WALKING', 'STICK', 'WITH', 'A', 'SHOT'] +7975-280085-0008-1079: hyp=['BUT', 'THEY', 'SOON', 'AFTER', 'GOT', 'CLOSE', 'ENOUGH', 'SO', 'THAT', 'ONE', 'OF', 'THEM', 'BROKE', 'MY', 'WALKING', 'STICK', 'WITH', 'A', 'SHOT'] +7975-280085-0009-1080: ref=['WE', 'WERE', 'IN', 'SIGHT', 'OF', 'OUR', 'LONG', 'SOUGHT', 'HORSES', 'WHEN', 'THEY', 'CUT', 'US', 'OFF', 'FROM', 'THE', 'ANIMALS', 'AND', 'OUR', 'LAST', 'HOPE', 'WAS', 'GONE'] +7975-280085-0009-1080: hyp=['WE', 'WERE', 'INSIDE', 'OF', 'OUR', 'LONG', 'SOUGHT', 'HORSES', 'WHEN', 'THEY', 'CUT', 'US', 'OFF', 'FROM', 'THE', 'ANIMALS', 'AND', 'OUR', 'LAST', 'HOPE', 'WAS', 'GONE'] +7975-280085-0010-1081: ref=['SIX', 'STEPPED', 'TO', 'THE', 'FRONT', 'SHERIFF', 'GLISPIN', 'COLONEL', 'T', 'L', 'VOUGHT', 'B', 'M', 'RICE', 'G', 'A', 'BRADFORD', 'C', 'A', 'POMEROY', 'AND', 'S', 'J', 'SEVERSON'] +7975-280085-0010-1081: hyp=['SIX', 'STEPS', 'OF', 'THE', 'FRONT', 'SHERIFF', 'LISPIN', 'COLONEL', 'T', 'L', 'VAULT', 'B', 'M', 'RICE', 'G', 'A', 'BRADFORD', 'C', 'A', 'POMERALI', 'AND', 'S', 'VERSON'] +7975-280085-0011-1082: ref=['FORMING', 'IN', 'LINE', 'FOUR', 'PACES', 'APART', 'HE', 'ORDERED', 'THEM', 'TO', 'ADVANCE', 'RAPIDLY', 'AND', 'CONCENTRATE', 'THE', 'FIRE', 'OF', 'THE', 'WHOLE', 'LINE', 'THE', 'INSTANT', 'THE', 'ROBBERS', 'WERE', 'DISCOVERED'] +7975-280085-0011-1082: hyp=['FORMING', 'A', 'LINE', 'FOUR', 'PACES', 'APART', 'HE', 'ORDERED', 'THEM', 'TO', 'ADVANCE', 'RAPIDLY', 'AND', 'CONCENTRATE', 'THE', 'FIRE', 'OF', 'THE', 'WHOLE', 'LINE', 'THE', 'INSTANT', 'THE', 'ROBBERS', 'WERE', 'DISCOVERED'] +7975-280085-0012-1083: ref=['MAKE', 'FOR', 'THE', 'HORSES', 'I', 'SAID', 'EVERY', 'MAN', 'FOR', 'HIMSELF'] +7975-280085-0012-1083: hyp=['MAKE', 'FOR', 'THE', 'HORSES', 'I', 'SAID', 'EVERY', 'MAN', 'FOR', 'HIMSELF'] +7975-280085-0013-1084: ref=['THERE', 'IS', 'NO', 'USE', 'STOPPING', 'TO', 'PICK', 'UP', 'A', 'COMRADE', 'HERE', 'FOR', 'WE', "CAN'T", 'GET', 'HIM', 'THROUGH', 'THE', 'LINE', 'JUST', 'CHARGE', 'THEM', 'AND', 'MAKE', 'IT', 'IF', 'WE', 'CAN'] +7975-280085-0013-1084: hyp=["THERE'S", 'NO', 'USE', 'STOPPING', 'TO', 'PICK', 'UP', 'A', 'COMRADE', 'HERE', 'FOR', 'WE', "CAN'T", 'GET', 'HIM', 'THROUGH', 'THE', 'LINE', 'JUST', 'SHARZAN', 'AND', 'MAKE', 'IT', 'IF', 'WE', 'CAN'] +7975-280085-0014-1085: ref=['I', 'GOT', 'UP', 'AS', 'THE', 'SIGNAL', 'FOR', 'THE', 'CHARGE', 'AND', 'WE', 'FIRED', 'ONE', 'VOLLEY'] +7975-280085-0014-1085: hyp=['I', 'GOT', 'UP', 'AS', 'A', 'SIGNAL', 'FOR', 'THE', 'CHARGE', 'AND', 'WE', 'FIRED', 'ONE', 'VOLLEY'] +7975-280085-0015-1086: ref=['ONE', 'OF', 'THE', 'FELLOWS', 'IN', 'THE', 'OUTER', 'LINE', 'NOT', 'BRAVE', 'ENOUGH', 'HIMSELF', 'TO', 'JOIN', 'THE', 'VOLUNTEERS', 'WHO', 'HAD', 'COME', 'IN', 'TO', 'BEAT', 'US', 'OUT', 'WAS', 'NOT', 'DISPOSED', 'TO', 'BELIEVE', 'IN', 'THE', 'SURRENDER', 'AND', 'HAD', 'HIS', 'GUN', 'LEVELLED', 'ON', 'BOB', 'IN', 'SPITE', 'OF', 'THE', 'HANDKERCHIEF', 'WHICH', 'WAS', 'WAVING', 'AS', 'A', 'FLAG', 'OF', 'TRUCE'] +7975-280085-0015-1086: hyp=['ONE', 'OF', 'THE', 'FELLOWS', 'IN', 'THE', 'OUTER', 'LINE', 'NOT', 'BRAVE', 'ENOUGH', 'HIMSELF', 'TO', 'JOIN', 'THE', 'VOLUNTEERS', 'WHO', 'HAD', 'COME', 'IN', 'TO', 'BE', 'DISOUT', 'WAS', 'NOT', 'DISPOSED', 'TO', 'BELIEVE', 'IN', 'THE', 'SURRENDER', 'AND', 'HAD', 'HIS', 'GUN', 'LEVELLED', 'ON', 'BOB', 'IN', 'SPITE', 'OF', 'THE', 'HANDKERCHIEF', 'WHICH', 'WAS', 'WAVING', 'AS', 'A', 'FLAG', 'OF', 'TRUCE'] +7975-280085-0016-1087: ref=['SHERIFF', 'GLISPIN', 'OF', 'WATONWAN', 'COUNTY', 'WHO', 'WAS', 'TAKING', "BOB'S", 'PISTOL', 'FROM', 'HIM', 'WAS', 'ALSO', 'SHOUTING', 'TO', 'THE', 'FELLOW'] +7975-280085-0016-1087: hyp=['SHERIFF', 'GLISBON', 'OF', 'WATERWIN', 'COUNTY', 'WHO', 'WAS', 'TAKING', "BOB'S", 'PISTOL', 'FROM', 'HIM', 'WAS', 'ALSO', 'SHOUTING', 'TO', 'THE', 'FELLOW'] +7975-280085-0017-1088: ref=['INCLUDING', 'THOSE', 'RECEIVED', 'IN', 'AND', 'ON', 'THE', 'WAY', 'FROM', 'NORTHFIELD', 'I', 'HAD', 'ELEVEN', 'WOUNDS'] +7975-280085-0017-1088: hyp=['INCLUDING', 'THOSE', 'RECEIVED', 'IN', 'AND', 'ON', 'THE', 'WAY', 'FROM', 'NORTH', 'FIELD', 'I', 'HAD', 'ELEVEN', 'WINDS'] +7975-280085-0018-1089: ref=['AND', 'SHERIFF', "GLISPIN'S", 'ORDER', 'NOT', 'TO', 'SHOOT', 'WAS', 'THE', 'BEGINNING', 'OF', 'THE', 'PROTECTORATE', 'THAT', 'MINNESOTA', 'PEOPLE', 'ESTABLISHED', 'OVER', 'US'] +7975-280085-0018-1089: hyp=['IN', 'SHERIFF', "GLISBON'S", 'ORDER', 'NOT', 'TO', 'SHOOT', 'WAS', 'THE', 'BEGINNING', 'OF', 'THE', 'PROTECTOR', 'THAT', 'MINNESOTA', 'PEOPLE', 'ESTABLISHED', 'OVER', 'US'] +8131-117016-0000-1303: ref=['CAPTAIN', 'MURDOCH'] +8131-117016-0000-1303: hyp=['CAPTAIN', 'MURDOCK'] +8131-117016-0001-1304: ref=['BUT', 'MARSPORT', 'HAD', 'FLOURISHED', 'ENOUGH', 'TO', 'KILL', 'IT', 'OFF'] +8131-117016-0001-1304: hyp=['BUT', 'MARSPORT', 'HAD', 'FLOURISHED', 'ENOUGH', 'TO', 'KILL', 'IT', 'OFF'] +8131-117016-0002-1305: ref=['SOME', 'OF', 'MARS', 'LAWS', 'DATED', 'FROM', 'THE', 'TIME', 'WHEN', 'LAW', 'ENFORCEMENT', 'HAD', 'BEEN', 'HAMPERED', 'BY', 'LACK', 'OF', 'MEN', 'RATHER', 'THAN', 'BY', 'THE', 'TYPE', 'OF', 'MEN'] +8131-117016-0002-1305: hyp=['SOME', 'OF', 'MARS', 'LAWS', 'DATED', 'FROM', 'THE', 'TIME', 'WHEN', 'LAWN', 'FORCEMENT', 'HAD', 'BEEN', 'HAMPERED', 'BY', 'LACK', 'OF', 'MEN', 'RATHER', 'THAN', 'BY', 'THE', 'TYPE', 'OF', 'MEN'] +8131-117016-0003-1306: ref=['THE', 'STONEWALL', 'GANG', 'NUMBERED', 'PERHAPS', 'FIVE', 'HUNDRED'] +8131-117016-0003-1306: hyp=['THE', 'STONE', 'WALL', 'GANG', 'NUMBERED', 'PERHAPS', 'FIVE', 'HUNDRED'] +8131-117016-0004-1307: ref=['EVEN', 'DERELICTS', 'AND', 'FAILURES', 'HAD', 'TO', 'EAT', 'THERE', 'WERE', 'STORES', 'AND', 'SHOPS', 'THROUGHOUT', 'THE', 'DISTRICT', 'WHICH', 'EKED', 'OUT', 'SOME', 'KIND', 'OF', 'A', 'MARGINAL', 'LIVING'] +8131-117016-0004-1307: hyp=['EVEN', 'DEAR', 'ALEXE', 'AND', 'FAILURES', 'HAD', 'TO', 'EAT', 'THERE', 'WERE', 'STORIES', 'AND', 'SHOPS', 'THROUGHOUT', 'THE', 'DISTRICT', 'WHICH', 'EKED', 'OUT', 'SOME', 'KIND', 'OF', 'A', 'MARGINAL', 'LIVING'] +8131-117016-0005-1308: ref=['THEY', 'WERE', 'SAFE', 'FROM', 'PROTECTION', 'RACKETEERS', 'THERE', 'NONE', 'BOTHERED', 'TO', 'COME', 'SO', 'FAR', 'OUT'] +8131-117016-0005-1308: hyp=['THEY', 'WERE', 'SAFE', 'FROM', 'PROTECTION', 'RACKETERS', 'THERE', 'NONE', 'BOTHERED', 'TO', 'COME', 'SO', 'FAR', 'OUT'] +8131-117016-0006-1309: ref=['THE', 'SHOPKEEPERS', 'AND', 'SOME', 'OF', 'THE', 'LESS', 'UNFORTUNATE', 'PEOPLE', 'THERE', 'HAD', 'PROTESTED', 'LOUD', 'ENOUGH', 'TO', 'REACH', 'CLEAR', 'BACK', 'TO', 'EARTH'] +8131-117016-0006-1309: hyp=['THE', 'SHOPKEEPERS', 'AND', 'SOME', 'OF', 'THE', 'LESS', 'UNFORTUNATE', 'PEOPLE', 'THERE', 'HAD', 'PROTESTED', 'LOUD', 'ENOUGH', 'TO', 'REACH', 'CLEAR', 'BACK', 'TO', 'EARTH'] +8131-117016-0007-1310: ref=['CAPTAIN', 'MURDOCH', 'WAS', 'AN', 'UNKNOWN', 'FACTOR', 'AND', 'NOW', 'WAS', 'ASKING', 'FOR', 'MORE', 'MEN'] +8131-117016-0007-1310: hyp=['CAPTAIN', 'MURDOCK', 'WAS', 'AN', 'UNKNOWN', 'FACTOR', 'AND', 'NOW', 'WAS', 'ASKING', 'FOR', 'MORE', 'MEN'] +8131-117016-0008-1311: ref=['THE', 'PRESSURE', 'WAS', 'ENOUGH', 'TO', 'GET', 'THEM', 'FOR', 'HIM'] +8131-117016-0008-1311: hyp=['THE', 'PRESSURE', 'WAS', 'ENOUGH', 'TO', 'GET', 'THEM', 'FOR', 'HIM'] +8131-117016-0009-1312: ref=['GORDON', 'REPORTED', 'FOR', 'WORK', 'WITH', 'A', 'SENSE', 'OF', 'THE', 'BOTTOM', 'FALLING', 'OUT', 'MIXED', 'WITH', 'A', 'VAGUE', 'RELIEF'] +8131-117016-0009-1312: hyp=['GORDON', 'REPORTED', 'FOR', 'WORK', 'WITH', 'A', 'SENSE', 'OF', 'THE', 'BOTTOM', 'FALLING', 'OUT', 'MIXED', 'WITH', 'A', 'VAGUE', 'RELIEF'] +8131-117016-0010-1313: ref=["I'VE", 'GOT', 'A', 'FREE', 'HAND', 'AND', "WE'RE", 'GOING', 'TO', 'RUN', 'THIS', 'THE', 'WAY', 'WE', 'WOULD', 'ON', 'EARTH'] +8131-117016-0010-1313: hyp=["I'VE", 'GOT', 'A', 'FREE', 'HAND', 'AND', "WE'RE", 'GOING', 'TO', 'RUN', 'THIS', 'THE', 'WAY', 'WE', 'WOULD', 'ON', 'EARTH'] +8131-117016-0011-1314: ref=['YOUR', 'JOB', 'IS', 'TO', 'PROTECT', 'THE', 'CITIZENS', 'HERE', 'AND', 'THAT', 'MEANS', 'EVERYONE', 'NOT', 'BREAKING', 'THE', 'LAWS', 'WHETHER', 'YOU', 'FEEL', 'LIKE', 'IT', 'OR', 'NOT', 'NO', 'GRAFT'] +8131-117016-0011-1314: hyp=['YOUR', 'JOB', 'IS', 'TO', 'PROTECT', 'THE', 'CITIZENS', 'HERE', 'AND', 'THAT', 'MEANS', 'EVERY', 'ONE', 'NOT', 'BREAKING', 'THE', 'LAWS', 'WHETHER', 'YOU', 'FEEL', 'LIKE', 'IT', 'OR', 'NOT', 'NO', 'GRAFT'] +8131-117016-0012-1315: ref=['THE', 'FIRST', 'MAN', 'MAKING', 'A', 'SHAKEDOWN', 'WILL', 'GET', 'THE', 'SAME', 'TREATMENT', "WE'RE", 'GOING', 'TO', 'USE', 'ON', 'THE', 'STONEWALL', 'BOYS', "YOU'LL", 'GET', 'DOUBLE', 'PAY', 'HERE', 'AND', 'YOU', 'CAN', 'LIVE', 'ON', 'IT'] +8131-117016-0012-1315: hyp=['THE', 'FIRST', 'MAN', 'MAKING', 'A', 'SHAKE', 'DOWN', 'WILL', 'GET', 'THE', 'SAME', 'TREATMENT', "WE'RE", 'GOING', 'TO', 'USE', 'ON', 'THE', 'STONE', 'WALL', 'BOYS', "YOU'LL", 'GET', 'DOUBLE', 'PAY', 'HERE', 'AND', 'YOU', 'CAN', 'LIVE', 'ON', 'IT'] +8131-117016-0013-1316: ref=['HE', 'PICKED', 'OUT', 'FIVE', 'OF', 'THE', 'MEN', 'INCLUDING', 'GORDON', 'YOU', 'FIVE', 'WILL', 'COME', 'WITH', 'ME'] +8131-117016-0013-1316: hyp=['HE', 'PICKED', 'OUT', 'FIVE', 'OF', 'THE', 'MEN', 'INCLUDING', 'GORDON', 'YOU', 'FIVE', 'WILL', 'COME', 'WITH', 'ME'] +8131-117016-0014-1317: ref=['THE', 'REST', 'OF', 'YOU', 'CAN', 'TEAM', 'UP', 'ANY', 'WAY', 'YOU', 'WANT', 'TONIGHT', 'PICK', 'ANY', 'ROUTE', "THAT'S", 'OPEN', 'OKAY', 'MEN', "LET'S", 'GO'] +8131-117016-0014-1317: hyp=['THE', 'REST', 'OF', 'YOU', 'CAN', 'TEAM', 'UP', 'ANY', 'WAY', 'YOU', 'WANT', 'TO', 'NIGHT', 'PICK', 'ANY', 'ROW', 'OF', 'THIS', 'OPEN', 'O', 'CAME', 'IN', "LET'S", 'GO'] +8131-117016-0015-1318: ref=['BRUCE', 'GORDON', 'GRINNED', 'SLOWLY', 'AS', 'HE', 'SWUNG', 'THE', 'STICK', 'AND', "MURDOCH'S", 'EYES', 'FELL', 'ON', 'HIM', 'EARTH', 'COP'] +8131-117016-0015-1318: hyp=['BORDON', 'GRINNED', 'SLOWLY', 'AS', 'HE', 'SWUNG', 'THE', 'STICK', 'AND', "MARDOCK'S", 'EYES', 'FELL', 'ON', 'HIM', 'EARTHCOP'] +8131-117016-0016-1319: ref=['TWO', 'YEARS', 'GORDON', 'ADMITTED'] +8131-117016-0016-1319: hyp=['TWO', 'YEARS', 'GORDON', 'ADMITTED'] +8131-117016-0017-1320: ref=['FOR', 'A', 'SECOND', 'GORDON', 'CURSED', 'HIMSELF'] +8131-117016-0017-1320: hyp=['FOR', 'A', 'SECOND', 'GORDON', 'CURSED', 'HIMSELF'] +8131-117016-0018-1321: ref=['HE', 'BEGAN', 'WONDERING', 'ABOUT', 'SECURITY', 'THEN'] +8131-117016-0018-1321: hyp=['HE', 'BEGAN', 'WONDERING', 'ABOUT', 'SECURITY', 'THEN'] +8131-117016-0019-1322: ref=['NOBODY', 'HAD', 'TRIED', 'TO', 'GET', 'IN', 'TOUCH', 'WITH', 'HIM'] +8131-117016-0019-1322: hyp=['NOBODY', 'HAD', 'TRIED', 'TO', 'GET', 'IN', 'TOUCH', 'WITH', 'HIM'] +8131-117016-0020-1323: ref=['THERE', 'WAS', 'A', 'CRUDE', 'LIGHTING', 'SYSTEM', 'HERE', 'PUT', 'UP', 'BY', 'THE', 'CITIZENS', 'AT', 'THE', 'FRONT', 'OF', 'EACH', 'BUILDING', 'A', 'DIM', 'PHOSPHOR', 'BULB', 'GLOWED', 'WHEN', 'DARKNESS', 'FELL', 'THEY', 'WOULD', 'HAVE', 'NOTHING', 'ELSE', 'TO', 'SEE', 'BY'] +8131-117016-0020-1323: hyp=['THERE', 'WAS', 'A', 'CRUDE', 'LIGHTING', 'SYSTEM', 'HERE', 'PUT', 'UP', 'BY', 'THE', 'CITIZENS', 'AT', 'THE', 'FRONT', 'OF', 'EACH', 'BUILDING', 'A', 'DIM', 'PHOSPHO', 'BOB', 'GLOWED', 'WHEN', 'DARKNESS', 'FELL', 'THEY', 'WOULD', 'HAVE', 'NOTHING', 'ELSE', 'TO', 'SEE', 'BY'] +8131-117016-0021-1324: ref=['MOVING', 'IN', 'TWO', 'GROUPS', 'OF', 'THREES', 'AT', 'OPPOSITE', 'SIDES', 'OF', 'THE', 'STREET', 'THEY', 'BEGAN', 'THEIR', 'BEAT'] +8131-117016-0021-1324: hyp=['MOVING', 'IN', 'TWO', 'GROUPS', 'OF', 'THREES', 'IT', 'OPPOSITE', 'SIDES', 'OF', 'THE', 'STREET', 'THEY', 'BEGAN', 'THEIR', 'BEAT'] +8131-117016-0022-1325: ref=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'SAVE', 'THE', 'CITIZEN', 'WHO', 'WAS', 'DYING', 'FROM', 'LACK', 'OF', 'AIR'] +8131-117016-0022-1325: hyp=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'SAVE', 'THE', 'CITIZEN', 'WHO', 'WAS', 'DYING', 'FROM', 'LACK', 'OF', 'AIR'] +8131-117016-0023-1326: ref=['GORDON', 'FELT', 'THE', 'SOLID', 'PLEASURE', 'OF', 'THE', 'FINELY', 'TURNED', 'CLUB', 'IN', 'HIS', 'HANDS'] +8131-117016-0023-1326: hyp=['GORDON', 'FELT', 'THE', 'SOLID', 'PLEASURE', 'OF', 'THE', 'FINELY', 'TURNED', 'CLUB', 'IN', 'HIS', 'HANDS'] +8131-117016-0024-1327: ref=["GORDON'S", 'EYES', 'POPPED', 'AT', 'THAT'] +8131-117016-0024-1327: hyp=["GORDON'S", 'EYES', 'POPPED', 'AT', 'THAT'] +8131-117016-0025-1328: ref=['HE', 'SWALLOWED', 'THE', 'SENTIMENT', 'HIS', 'OWN', 'CLUB', 'WAS', 'MOVING', 'NOW'] +8131-117016-0025-1328: hyp=['HE', 'SWALLOWED', 'THE', 'SENTIMENT', 'HIS', 'OWN', 'CLUB', 'WAS', 'MOVING', 'NOW'] +8131-117016-0026-1329: ref=['THE', 'OTHER', 'FOUR', 'COPS', 'HAD', 'COME', 'IN', 'RELUCTANTLY'] +8131-117016-0026-1329: hyp=['THE', 'OTHER', 'FOUR', 'COPS', 'HAD', 'COME', 'IN', 'RELUCTANTLY'] +8131-117016-0027-1330: ref=['HE', 'BROUGHT', 'HIM', 'TO', 'THE', 'GROUND', 'WITH', 'A', 'SINGLE', 'BLOW', 'ACROSS', 'THE', 'KIDNEYS'] +8131-117016-0027-1330: hyp=['HE', 'BROUGHT', 'HIM', 'TO', 'THE', 'GROUND', 'WITH', 'A', 'SINGLE', 'BLOW', 'ACROSS', 'THE', 'KIDNEYS'] +8131-117016-0028-1331: ref=['THEY', 'ROUNDED', 'UP', 'THE', 'MEN', 'OF', 'THE', 'GANG', 'AND', 'ONE', 'OF', 'THE', 'COPS', 'STARTED', 'OFF'] +8131-117016-0028-1331: hyp=['THEY', 'ROUNDED', 'UP', 'THE', 'MEN', 'OF', 'THE', 'GANG', 'AND', 'ONE', 'OF', 'THE', 'CUPS', 'STARTED', 'OFF'] +8131-117016-0029-1332: ref=['TO', 'FIND', 'A', 'PHONE', 'AND', 'CALL', 'THE', 'WAGON'] +8131-117016-0029-1332: hyp=['TO', 'FIND', 'A', 'PHONE', 'AND', 'CALL', 'THE', 'WAGON'] +8131-117016-0030-1333: ref=["WE'RE", 'NOT', 'USING', 'WAGONS', 'MURDOCH', 'TOLD', 'HIM', 'LINE', 'THEM', 'UP'] +8131-117016-0030-1333: hyp=['WERE', 'NOT', 'USING', 'WAGONS', 'MURDOCK', 'TOLD', 'HIM', 'LIE', 'THEM', 'UP'] +8131-117016-0031-1334: ref=['IF', 'THEY', 'TRIED', 'TO', 'RUN', 'THEY', 'WERE', 'HIT', 'FROM', 'BEHIND', 'IF', 'THEY', 'STOOD', 'STILL', 'THEY', 'WERE', 'CLUBBED', 'CAREFULLY'] +8131-117016-0031-1334: hyp=['IF', 'THEY', 'TRIED', 'TO', 'RUN', 'THEY', 'WERE', 'HIT', 'FROM', 'BEHIND', 'IF', 'THEY', 'STOOD', 'STILL', 'THEY', 'WERE', 'CLUBBED', 'CAREFULLY'] +8131-117016-0032-1335: ref=['MURDOCH', 'INDICATED', 'ONE', 'WHO', 'STOOD', 'WITH', 'HIS', 'SHOULDERS', 'SHAKING', 'AND', 'TEARS', 'RUNNING', 'DOWN', 'HIS', 'CHEEKS'] +8131-117016-0032-1335: hyp=['MURDOCK', 'INDICATED', 'ONE', 'WHO', 'STOOD', 'WITH', 'HIS', 'SHOULDER', 'SHAKING', 'AND', 'TEARS', 'RUNNING', 'DOWN', 'HIS', 'CHEEKS'] +8131-117016-0033-1336: ref=['THE', "CAPTAIN'S", 'FACE', 'WAS', 'AS', 'SICK', 'AS', 'GORDON', 'FELT'] +8131-117016-0033-1336: hyp=['THE', "CAPTAIN'S", 'FACE', 'WAS', 'AS', 'SICK', 'AS', "GORDON'S", 'FELT'] +8131-117016-0034-1337: ref=['I', 'WANT', 'THE', 'NAME', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'GANG', 'YOU', 'CAN', 'REMEMBER', 'HE', 'TOLD', 'THE', 'MAN'] +8131-117016-0034-1337: hyp=['I', 'WANT', 'THE', 'NAME', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'GANG', 'YOU', 'CAN', 'REMEMBER', 'HE', 'TOLD', 'THE', 'MAN'] +8131-117016-0035-1338: ref=['COLONEL', "THEY'D", 'KILL', 'ME', 'I', "DON'T", 'KNOW'] +8131-117016-0035-1338: hyp=['COLONEL', "THEY'D", 'KILL', 'ME', 'I', "DON'T", 'KNOW'] +8131-117016-0036-1339: ref=['MURDOCH', 'TOOK', 'HIS', 'NOD', 'AS', 'EVIDENCE', 'ENOUGH', 'AND', 'TURNED', 'TO', 'THE', 'WRETCHED', 'TOUGHS'] +8131-117016-0036-1339: hyp=['MURDOCK', 'TOOK', 'HIS', 'NOD', 'AS', 'EVIDENCE', 'ENOUGH', 'AND', 'TURNED', 'TO', 'THE', 'WRETCHED', 'TUFTS'] +8131-117016-0037-1340: ref=['IF', 'HE', 'SHOULD', 'TURN', 'UP', 'DEAD', "I'LL", 'KNOW', 'YOU', 'BOYS', 'ARE', 'RESPONSIBLE', 'AND', "I'LL", 'FIND', 'YOU'] +8131-117016-0037-1340: hyp=['IF', 'HE', 'SHOULD', 'TURN', 'UP', 'DEAD', "I'LL", 'KNOW', 'YOU', 'BOYS', 'ARE', 'RESPONSIBLE', 'AND', "I'LL", 'FIND', 'YOU'] +8131-117016-0038-1341: ref=['TROUBLE', 'BEGAN', 'BREWING', 'SHORTLY', 'AFTER', 'THOUGH'] +8131-117016-0038-1341: hyp=['TROUBLE', 'BEGAN', 'BREWING', 'SHORTLY', 'AFTER', 'THOUGH'] +8131-117016-0039-1342: ref=['MURDOCH', 'SENT', 'ONE', 'OF', 'THE', 'MEN', 'TO', 'PICK', 'UP', 'A', 'SECOND', 'SQUAD', 'OF', 'SIX', 'AND', 'THEN', 'A', 'THIRD'] +8131-117016-0039-1342: hyp=['MARDACK', 'SENT', 'ONE', 'OF', 'THE', 'MEN', 'TO', 'PICK', 'UP', 'A', 'SECOND', 'SQUAD', 'OF', 'SIX', 'AND', 'THEN', 'A', 'THIRD'] +8131-117016-0040-1343: ref=['IN', 'THE', 'THIRD', 'ONE', 'BRUCE', 'GORDON', 'SPOTTED', 'ONE', 'OF', 'THE', 'MEN', "WHO'D", 'BEEN', 'BEATEN', 'BEFORE'] +8131-117016-0040-1343: hyp=['AND', 'THE', 'THIRD', 'ONE', 'BRUSH', 'GORDON', 'SPOTTED', 'ONE', 'OF', 'THE', 'MEN', 'WHO', 'HAD', 'BEEN', 'BEATEN', 'BEFORE'] +8131-117016-0041-1344: ref=['GET', 'A', 'STRETCHER', 'AND', 'TAKE', 'HIM', 'WHEREVER', 'HE', 'BELONGS', 'HE', 'ORDERED'] +8131-117016-0041-1344: hyp=['GET', 'A', 'STRETCHER', 'AND', 'TAKE', 'HIM', 'WHEREVER', 'HE', 'BELONGS', 'HE', 'ORDERED'] +8131-117016-0042-1345: ref=['BUT', 'THE', 'CAPTAIN', 'STIRRED', 'FINALLY', 'SIGHING'] +8131-117016-0042-1345: hyp=['BUT', 'THE', 'CAPTAIN', 'STIRRED', 'FINALLY', 'SIGHING'] +8131-117016-0043-1346: ref=['NO', 'THE', 'COPS', "THEY'RE", 'GIVING', 'ME', "WE'RE", 'COVERED', 'GORDON'] +8131-117016-0043-1346: hyp=['NO', 'THE', 'COPS', 'ARE', 'GIVING', 'ME', "WE'RE", 'COVERED', 'GORDON'] +8131-117016-0044-1347: ref=['BUT', 'THE', 'STONEWALL', 'GANG', 'IS', 'BACKING', 'WAYNE'] +8131-117016-0044-1347: hyp=['BUT', 'THE', 'STERN', 'WALL', 'GANG', 'IS', 'BACKING', 'WANE'] +8131-117016-0045-1348: ref=['BUT', "IT'S", 'GOING', 'TO', 'BE', 'TOUGH', 'ON', 'THEM'] +8131-117016-0045-1348: hyp=['BUT', "IT'S", 'GOING', 'TO', 'BE', 'TOUGH', 'ON', 'THEM'] +8131-117016-0046-1349: ref=['BRUCE', 'GORDON', 'GRIMACED', "I'VE", 'GOT', 'A', 'YELLOW', 'TICKET', 'FROM', 'SECURITY'] +8131-117016-0046-1349: hyp=['BRUCE', 'GORDON', 'GRIMACED', "I'VE", 'GOT', 'A', 'YELLOW', 'TICKET', 'FROM', 'SECURITY'] +8131-117016-0047-1350: ref=['MURDOCH', 'BLINKED', 'HE', 'DROPPED', 'HIS', 'EYES', 'SLOWLY'] +8131-117016-0047-1350: hyp=['MARDOCK', 'BLINKED', 'HE', 'DROPPED', 'HIS', 'EYES', 'SLOWLY'] +8131-117016-0048-1351: ref=['WHAT', 'MAKES', 'YOU', 'THINK', 'WAYNE', 'WILL', 'BE', 'RE', 'ELECTED'] +8131-117016-0048-1351: hyp=['WHAT', 'MAKES', 'YOU', 'THINK', 'WAIN', 'WILL', 'BE', 'REELECTED'] +8131-117016-0049-1352: ref=['NOBODY', 'WANTS', 'HIM', 'EXCEPT', 'A', 'GANG', 'OF', 'CROOKS', 'AND', 'THOSE', 'IN', 'POWER'] +8131-117016-0049-1352: hyp=['NOBODY', 'WANTS', 'HIM', 'EXCEPT', 'A', 'GANG', 'OF', 'CROOKS', 'AND', 'THOSE', 'IN', 'POWER'] +8131-117016-0050-1353: ref=['EVER', 'SEE', 'A', 'MARTIAN', 'ELECTION'] +8131-117016-0050-1353: hyp=['EVER', 'SEE', 'A', 'MARTIAN', 'ELECTION'] +8131-117016-0051-1354: ref=['NO', "YOU'RE", 'A', 'FIRSTER', 'HE', "CAN'T", 'LOSE'] +8131-117016-0051-1354: hyp=['NO', "YOU'RE", 'A', 'FIRST', 'TER', 'HE', "CAN'T", 'LOSE'] +8131-117016-0052-1355: ref=['AND', 'THEN', 'HELL', 'IS', 'GOING', 'TO', 'POP', 'AND', 'THIS', 'WHOLE', 'PLANET', 'MAY', 'BE', 'BLOWN', 'WIDE', 'OPEN'] +8131-117016-0052-1355: hyp=['AND', 'THEN', 'HELL', 'IS', 'GOING', 'TO', 'POP', 'IN', 'THIS', 'WHOLE', 'PLANET', 'MAY', 'BE', 'BLOWN', 'WIDE', 'OPEN'] +8131-117016-0053-1356: ref=['IT', 'FITTED', 'WITH', 'THE', 'DIRE', 'PREDICTIONS', 'OF', 'SECURITY', 'AND', 'WITH', 'THE', 'SPYING', 'GORDON', 'WAS', 'GOING', 'TO', 'DO', 'ACCORDING', 'TO', 'THEM'] +8131-117016-0053-1356: hyp=['IT', 'FITTED', 'WITH', 'THE', 'DIRE', 'PREDICTIONS', 'OF', 'SECURITY', 'AND', 'WITH', 'THE', 'SPYING', 'GORDON', 'WAS', 'GOING', 'TO', 'DO', 'ACCORDING', 'TO', 'THEM'] +8131-117016-0054-1357: ref=['HE', 'WAS', 'GETTING', 'EVEN', 'FATTER', 'NOW', 'THAT', 'HE', 'WAS', 'EATING', 'BETTER', 'FOOD', 'FROM', 'THE', 'FAIR', 'RESTAURANT', 'AROUND', 'THE', 'CORNER'] +8131-117016-0054-1357: hyp=['HE', 'WAS', 'GETTING', 'EVEN', 'FATTER', 'NOW', 'THAT', 'HE', 'WAS', 'EATING', 'BETTER', 'FOOD', 'FROM', 'THE', 'FAIR', 'RESTAURANT', 'AROUND', 'THE', 'CORNER'] +8131-117016-0055-1358: ref=['COST', 'EM', 'MORE', 'BUT', "THEY'D", 'BE', 'RESPECTABLE'] +8131-117016-0055-1358: hyp=['COSTUME', 'MORE', 'BUT', "THEY'D", 'BE', 'RESPECTABLE'] +8131-117016-0056-1359: ref=['BECAUSE', 'IZZY', 'IS', 'ALWAYS', 'HONEST', 'ACCORDING', 'TO', 'HOW', 'HE', 'SEES', 'IT'] +8131-117016-0056-1359: hyp=['BECAUSE', 'IZZIE', 'IS', 'ALWAYS', 'HONEST', 'ACCORDING', 'TO', 'HOW', 'HE', 'SEES', 'IT'] +8131-117016-0057-1360: ref=['BUT', 'YOU', 'GOT', 'EARTH', 'IDEAS', 'OF', 'THE', 'STUFF', 'LIKE', 'I', 'HAD', 'ONCE'] +8131-117016-0057-1360: hyp=['BUT', 'YOU', 'GOT', 'EARTH', 'IDEAS', 'OF', 'THE', 'STUFF', 'LIKE', 'I', 'HAD', 'ONCE'] +8131-117016-0058-1361: ref=['THE', 'GROUPS', 'GREW', 'MORE', 'EXPERIENCED', 'AND', 'MURDOCH', 'WAS', 'TRAINING', 'A', 'NEW', 'SQUAD', 'EVERY', 'NIGHT'] +8131-117016-0058-1361: hyp=['THE', 'GROUPS', 'GREW', 'MORE', 'EXPERIENCED', 'AND', 'MURDOCK', 'WAS', 'TRAINING', 'A', 'NEW', 'SQUAD', 'EVERY', 'NIGHT'] +8131-117016-0059-1362: ref=['IT', "WASN'T", 'EXACTLY', 'LEGAL', 'BUT', 'NOTHING', 'WAS', 'HERE'] +8131-117016-0059-1362: hyp=['IT', 'WAS', 'AN', 'EXACTLY', 'LEGAL', 'BUT', 'NOTHING', 'WAS', 'HERE'] +8131-117016-0060-1363: ref=['THIS', 'COULD', 'LEAD', 'TO', 'ABUSES', 'AS', "HE'D", 'SEEN', 'ON', 'EARTH'] +8131-117016-0060-1363: hyp=['THIS', 'COULD', 'LEAD', 'TO', 'ABUSES', 'AS', "HE'D", 'SEEN', 'ON', 'EARTH'] +8131-117016-0061-1364: ref=['BUT', 'THERE', 'PROBABLY', "WOULDN'T", 'BE', 'TIME', 'FOR', 'IT', 'IF', 'MAYOR', 'WAYNE', 'WAS', 'RE', 'ELECTED'] +8131-117016-0061-1364: hyp=['BUT', 'THERE', 'PROBABLY', "WOULDN'T", 'BE', 'TIME', 'FOR', 'IF', 'MAYOR', 'WAIN', 'WAS', 'RE', 'ELECTED'] +8131-117017-0000-1270: ref=['IT', 'WAS', 'NIGHT', 'OUTSIDE', 'AND', 'THE', 'PHOSPHOR', 'BULBS', 'AT', 'THE', 'CORNERS', 'GLOWED', 'DIMLY', 'GIVING', 'HIM', 'BARELY', 'ENOUGH', 'LIGHT', 'BY', 'WHICH', 'TO', 'LOCATE', 'THE', 'WAY', 'TO', 'THE', 'EXTEMPORIZED', 'PRECINCT', 'HOUSE'] +8131-117017-0000-1270: hyp=['IT', 'WAS', 'NIGHT', 'OUTSIDE', 'AND', 'THE', 'PHOSPHO', 'BOBS', 'AT', 'THE', 'CORNERS', 'GLOWED', 'DIMLY', 'GIVING', 'HIM', 'BARELY', 'ENOUGH', 'LIGHT', 'BY', 'WHICH', 'TO', 'LOCATE', 'THE', 'WAY', 'TO', 'THE', 'EXTEMPORIZED', 'PRECINCT', 'HOUSE'] +8131-117017-0001-1271: ref=['IT', 'HAD', 'PROBABLY', 'BEEN', 'YEARS', 'SINCE', 'ANY', 'HAD', 'DARED', 'RISK', 'IT', 'AFTER', 'THE', 'SUN', 'WENT', 'DOWN'] +8131-117017-0001-1271: hyp=['IT', 'HAD', 'PROBABLY', 'BEEN', 'YEARS', 'SINCE', 'ANY', 'HAD', 'DARED', 'RISK', 'IT', 'AFTER', 'THE', 'SUN', 'WENT', 'DOWN'] +8131-117017-0002-1272: ref=['AND', 'THE', 'SLOW', 'DOUBTFUL', 'RESPECT', 'ON', 'THE', 'FACES', 'OF', 'THE', 'CITIZENS', 'AS', 'THEY', 'NODDED', 'TO', 'HIM', 'WAS', 'EVEN', 'MORE', 'PROOF', 'THAT', "HALEY'S", 'SYSTEM', 'WAS', 'WORKING'] +8131-117017-0002-1272: hyp=['AND', 'THE', 'SLOW', 'DOUBTFUL', 'RESPECT', 'ON', 'THE', 'FACES', 'OF', 'THE', 'CITIZENS', 'AS', 'THEY', 'NODDED', 'TO', 'HIM', 'WAS', 'EVEN', 'MORE', 'PROOF', 'THAT', 'HAYE', 'SYSTEM', 'WAS', 'WORKING'] +8131-117017-0003-1273: ref=['GORDON', 'HIT', 'THE', 'SIGNAL', 'SWITCH', 'AND', 'THE', 'MARSPEAKER', 'LET', 'OUT', 'A', 'SHRILL', 'WHISTLE'] +8131-117017-0003-1273: hyp=['GORDON', 'HIT', 'THE', 'SIGNAL', 'SWITCH', 'AND', 'THE', 'MARKEE', 'LED', 'OUT', 'A', 'SHRILL', 'WHISTLE'] +8131-117017-0004-1274: ref=['GUNS', 'SUDDENLY', 'SEEMED', 'TO', 'BE', 'FLOURISHING', 'EVERYWHERE'] +8131-117017-0004-1274: hyp=['GUN', 'SUDDENLY', 'SEEMED', 'TO', 'BE', 'FLOURISHING', 'EVERYWHERE'] +8131-117017-0005-1275: ref=['YOU', "CAN'T", 'DO', 'IT', 'TO', 'ME'] +8131-117017-0005-1275: hyp=['YOU', "CAN'T", 'DO', 'IT', 'TO', 'ME'] +8131-117017-0006-1276: ref=["I'M", 'REFORMED', "I'M", 'GOING', 'STRAIGHT'] +8131-117017-0006-1276: hyp=["I'M", 'REFORMED', "I'M", 'GOING', 'STRAIGHT'] +8131-117017-0007-1277: ref=['YOU', 'DAMNED', 'COPS', "CAN'T", "O'NEILL", 'WAS', 'BLUBBERING'] +8131-117017-0007-1277: hyp=['YOU', 'DAMNED', 'COPSE', "CAN'T", "O'NEIA", 'WAS', 'BLUBBERING'] +8131-117017-0008-1278: ref=['ONE', 'LOOK', 'WAS', 'ENOUGH', 'THE', 'WORK', 'PAPERS', 'HAD', 'THE', 'TELLTALE', 'OVER', 'THICKENING', 'OF', 'THE', 'SIGNATURE', 'THAT', 'HAD', 'SHOWED', 'UP', 'ON', 'OTHER', 'PAPERS', 'OBVIOUSLY', 'FORGERIES'] +8131-117017-0008-1278: hyp=['ONE', 'LOOK', 'WAS', 'ENOUGH', 'THE', 'WORK', 'PAPERS', 'HAD', 'THE', 'TELL', 'TALE', 'OVER', 'THICKENING', 'OF', 'THE', 'SIGNATURE', 'THEY', 'HAD', 'SHOWED', 'UP', 'ON', 'OTHER', 'PAPERS', 'OBVIOUSLY', 'FORGERIES'] +8131-117017-0009-1279: ref=['SOME', 'TURNED', 'AWAY', 'AS', 'GORDON', 'AND', 'THE', 'OTHER', 'COP', 'WENT', 'TO', 'WORK', 'BUT', 'MOST', 'OF', 'THEM', "WEREN'T", 'SQUEAMISH'] +8131-117017-0009-1279: hyp=['SOME', 'TURNED', 'AWAY', 'AS', 'GORDON', 'AND', 'THE', 'OTHER', 'COP', 'WENT', 'TO', 'WORK', 'BUT', 'MOST', 'OF', 'THEM', "WEREN'T", 'SQUEAMISH'] +8131-117017-0010-1280: ref=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'TWO', 'PICKED', 'UP', 'THEIR', 'WHIMPERING', 'CAPTIVE'] +8131-117017-0010-1280: hyp=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'TWO', 'PICKED', 'UP', 'THEIR', 'WHIMPERING', 'CAPTIVE'] +8131-117017-0011-1281: ref=['JENKINS', 'THE', 'OTHER', 'COP', 'HAD', 'BEEN', 'HOLDING', 'THE', 'WALLET'] +8131-117017-0011-1281: hyp=['JENKINS', 'THE', 'OTHER', 'COP', 'HAD', 'BEEN', 'HOLDING', 'THE', 'WALLET'] +8131-117017-0012-1282: ref=['MUST', 'OF', 'BEEN', 'MAKING', 'A', 'BIG', 'CONTACT', 'IN', 'SOMETHING', 'FIFTY', 'FIFTY'] +8131-117017-0012-1282: hyp=['MUST', 'HAVE', 'BEEN', 'MAKING', 'A', 'BIG', 'CONTACT', 'IN', 'SOMETHING', 'FIFTY', 'FIFTY'] +8131-117017-0013-1283: ref=['THERE', 'MUST', 'HAVE', 'BEEN', 'OVER', 'TWO', 'THOUSAND', 'CREDITS', 'IN', 'THE', 'WALLET'] +8131-117017-0013-1283: hyp=['THERE', 'MUST', 'HAVE', 'BEEN', 'OVER', 'TWO', 'THOUSAND', 'CREDITS', 'IN', 'THE', 'WALLET'] +8131-117017-0014-1284: ref=['WHEN', 'GORDON', 'AND', 'JENKINS', 'CAME', 'BACK', 'MURDOCH', 'TOSSED', 'THE', 'MONEY', 'TO', 'THEM', 'SPLIT', 'IT'] +8131-117017-0014-1284: hyp=['WHEN', 'GORDON', 'AND', 'JENKINS', 'CAME', 'BACK', 'MARDOCK', 'TOSSED', 'THE', 'MONEY', 'TO', 'THEM', 'SPLIT', 'IT'] +8131-117017-0015-1285: ref=['WHATEVER', 'COMES', 'TO', 'HAND', "GOV'NOR"] +8131-117017-0015-1285: hyp=['WHATEVER', 'COMES', 'TO', 'HAND', 'GOVERNOR'] +8131-117017-0016-1286: ref=['LIKE', 'THIS', 'SOCIAL', 'CALL', 'GORDON', 'ASKED', 'HIM'] +8131-117017-0016-1286: hyp=['LIKE', 'THIS', 'SOCIAL', 'CALL', 'GORDON', 'ASKED', 'HIM'] +8131-117017-0017-1287: ref=['THE', 'LITTLE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'HIS', 'ANCIENT', 'EIGHTEEN', 'YEAR', 'OLD', 'FACE', 'TURNING', 'SOBER', 'NOPE'] +8131-117017-0017-1287: hyp=['THE', 'LITTLE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'HIS', 'ANCIENT', 'EIGHTEEN', 'YEAR', 'OLD', 'FACE', 'TURNING', 'SOBER', 'NOTE'] +8131-117017-0018-1288: ref=['YOU', 'OWE', 'ME', 'SOME', 'BILLS', "GOV'NOR"] +8131-117017-0018-1288: hyp=['YOU', 'OWE', 'ME', 'SOME', 'BILLS', "GUV'NER"] +8131-117017-0019-1289: ref=['ELEVEN', 'HUNDRED', 'FIFTY', 'CREDITS'] +8131-117017-0019-1289: hyp=['ELEVEN', 'HUNDRED', 'FIFTY', 'CREDITS'] +8131-117017-0020-1290: ref=['YOU', "DIDN'T", 'PAY', 'UP', 'YOUR', 'PLEDGE', 'TO', 'THE', 'CAMPAIGN', 'FUND', 'SO', 'I', 'HADDA', 'FILL', 'IN'] +8131-117017-0020-1290: hyp=['YOU', "DIDN'T", 'PAY', 'UP', 'YOUR', 'PLEDGE', 'TO', 'THE', 'CAPTAIN', 'FUND', 'SO', 'I', 'HAD', 'A', 'FILL', 'IN'] +8131-117017-0021-1291: ref=['A', 'THOUSAND', 'INTEREST', 'AT', 'TEN', 'PER', 'CENT', 'A', 'WEEK', 'STANDARD', 'RIGHT'] +8131-117017-0021-1291: hyp=['A', 'THOUSAND', 'INTERESTS', 'AT', 'TEN', 'PER', 'CENT', 'A', 'WEEK', 'STANDARD', 'RIGHT'] +8131-117017-0022-1292: ref=['GORDON', 'HAD', 'HEARD', 'OF', 'THE', 'FRIENDLY', 'INTEREST', 'CHARGED', 'ON', 'THE', 'SIDE', 'HERE', 'BUT', 'HE', 'SHOOK', 'HIS', 'HEAD', 'WRONG', 'IZZY'] +8131-117017-0022-1292: hyp=['GORDON', 'HAD', 'HEARD', 'OF', 'THE', 'FRIENDLY', 'INTEREST', 'CHARGED', 'ON', 'THE', 'SIDE', 'HERE', 'BUT', 'HE', 'SHOOK', 'HIS', 'HEAD', 'WRONG', 'IS', 'HE'] +8131-117017-0023-1293: ref=['HUH', 'IZZY', 'TURNED', 'IT', 'OVER', 'AND', 'SHOOK', 'HIS', 'HEAD'] +8131-117017-0023-1293: hyp=['HER', 'AS', 'HE', 'TURNED', 'IT', 'OVER', 'AND', 'SHOOK', 'HIS', 'HEAD'] +8131-117017-0024-1294: ref=['NOW', 'SHOW', 'ME', 'WHERE', 'I', 'SIGNED', 'ANY', 'AGREEMENT', 'SAYING', "I'D", 'PAY', 'YOU', 'BACK'] +8131-117017-0024-1294: hyp=['NOW', 'SHOW', 'ME', 'WHERE', 'I', 'SIGNED', 'ANY', 'AGREEMENT', 'SAYING', "I'D", 'PAY', 'YOU', 'BACK'] +8131-117017-0025-1295: ref=['FOR', 'A', 'SECOND', "IZZY'S", 'FACE', 'WENT', 'BLANK', 'THEN', 'HE', 'CHUCKLED'] +8131-117017-0025-1295: hyp=['FOR', 'A', 'SECOND', 'IS', 'HIS', 'FACE', 'WENT', 'BLANK', 'THEN', 'HE', 'CHUCKLED'] +8131-117017-0026-1296: ref=['HE', 'PULLED', 'OUT', 'THE', 'BILLS', 'AND', 'HANDED', 'THEM', 'OVER'] +8131-117017-0026-1296: hyp=['HE', 'POURED', 'OUT', 'THE', 'BILLS', 'AND', 'HANDED', 'THEM', 'OVER'] +8131-117017-0027-1297: ref=['THANKS', 'IZZY', 'THANKS', 'YOURSELF'] +8131-117017-0027-1297: hyp=['THANKS', 'IS', 'HE', 'THANKS', 'YOURSELF'] +8131-117017-0028-1298: ref=['THE', 'KID', 'POCKETED', 'THE', 'MONEY', 'CHEERFULLY', 'NODDING'] +8131-117017-0028-1298: hyp=['THE', 'KID', 'POCKETED', 'THE', 'MONEY', 'CHEERFULLY', 'NODDING'] +8131-117017-0029-1299: ref=['THE', 'LITTLE', 'GUY', 'KNEW', 'MARS', 'AS', 'FEW', 'OTHERS', 'DID', 'APPARENTLY', 'FROM', 'ALL', 'SIDES'] +8131-117017-0029-1299: hyp=['THE', 'LITTLE', 'GUY', 'KNEW', 'MARS', 'AS', 'FEW', 'OTHERS', 'DID', 'APPARENTLY', 'FROM', 'ALL', 'SIDES'] +8131-117017-0030-1300: ref=['AND', 'IF', 'ANY', 'OF', 'THE', 'OTHER', 'COPS', 'HAD', 'PRIVATE', 'RACKETS', 'OF', 'THEIR', 'OWN', 'IZZY', 'WAS', 'UNDOUBTEDLY', 'THE', 'MAN', 'TO', 'FIND', 'IT', 'OUT', 'AND', 'USE', 'THE', 'INFORMATION', 'WITH', 'A', 'BEAT', 'SUCH', 'AS', 'THAT', 'EVEN', 'GOING', 'HALVES', 'AND', 'WITH', 'ALL', 'THE', 'GRAFT', 'TO', 'THE', 'UPPER', 'BRACKETS', "HE'D", 'STILL', 'BE', 'ABLE', 'TO', 'MAKE', 'HIS', 'PILE', 'IN', 'A', 'MATTER', 'OF', 'MONTHS'] +8131-117017-0030-1300: hyp=['AND', 'IF', 'ANY', 'OF', 'THE', 'OTHER', 'COPS', 'HAD', 'PRIVATE', 'RACKETS', 'OF', 'THEIR', 'OWN', 'IZZIE', 'WAS', 'UNDOUBTEDLY', 'THE', 'MAN', 'TO', 'FIND', 'IT', 'OUT', 'AND', 'USED', 'THE', 'INFORMATION', 'WITH', 'A', 'BEAT', 'SUCH', 'AS', 'THAT', 'EVEN', 'GOING', 'HALVES', 'AND', 'WITH', 'ALL', 'THE', 'GRAFT', 'OF', 'THE', 'UPPER', 'BRACKETS', "HE'D", 'STILL', 'BE', 'ABLE', 'TO', 'MAKE', 'HIS', 'PILE', 'IN', 'A', 'MATTER', 'OF', 'MONTHS'] +8131-117017-0031-1301: ref=['THE', 'CAPTAIN', 'LOOKED', 'COMPLETELY', 'BEATEN', 'AS', 'HE', 'CAME', 'INTO', 'THE', 'ROOM', 'AND', 'DROPPED', 'ONTO', 'THE', 'BENCH'] +8131-117017-0031-1301: hyp=['THE', 'CAPTAIN', 'LOOKED', 'COMPLETELY', 'BEATEN', 'AS', 'HE', 'CAME', 'INTO', 'THE', 'ROOM', 'AND', 'DROPPED', 'INTO', 'THE', 'BENCH'] +8131-117017-0032-1302: ref=['GO', 'ON', 'ACCEPT', 'DAMN', 'IT'] +8131-117017-0032-1302: hyp=['GO', 'ON', 'EXCEPT', 'DEMON'] +8131-117029-0000-1247: ref=['THERE', 'WAS', 'A', 'MAN', 'COMING', 'FROM', 'EARTH', 'ON', 'A', 'SECOND', 'SHIP', 'WHO', 'WOULD', 'SEE', 'HIM'] +8131-117029-0000-1247: hyp=['THERE', 'WAS', 'A', 'MAN', 'COMING', 'FROM', 'EARTH', 'ON', 'A', 'SECOND', 'SHIP', 'WHO', 'WOULD', 'SEE', 'HIM'] +8131-117029-0001-1248: ref=['THE', 'LITTLE', 'PUBLISHER', 'WAS', 'BACK', 'AT', 'THE', 'CRUSADER', 'AGAIN'] +8131-117029-0001-1248: hyp=['THE', 'LITTLE', 'PUBLISHER', 'WAS', 'BACK', 'AT', 'THE', 'CRUSADER', 'AGAIN'] +8131-117029-0002-1249: ref=['ONLY', 'GORDON', 'AND', 'SHEILA', 'WERE', 'LEFT'] +8131-117029-0002-1249: hyp=['ONLY', 'GORDON', 'AND', 'SHEILA', 'WERE', 'LEFT'] +8131-117029-0003-1250: ref=['CREDIT', 'HAD', 'BEEN', 'ESTABLISHED', 'AGAIN', 'AND', 'THE', 'BUSINESSES', 'WERE', 'OPEN'] +8131-117029-0003-1250: hyp=['CREDIT', 'HAD', 'BEEN', 'ESTABLISHED', 'AGAIN', 'AND', 'THE', 'BUSINESSES', 'WERE', 'OPEN'] +8131-117029-0004-1251: ref=['GORDON', 'CAME', 'TO', 'A', 'ROW', 'OF', 'TEMPORARY', 'BUBBLES', 'INDIVIDUAL', 'DWELLINGS', 'BUILT', 'LIKE', 'THE', 'DOME', 'BUT', 'OPAQUE', 'FOR', 'PRIVACY'] +8131-117029-0004-1251: hyp=['GORDON', 'CAME', 'TO', 'A', 'ROW', 'OF', 'TEMPORARY', 'BUBBLES', 'INDIVIDUAL', 'DWELLINGS', 'BUILT', 'LIKE', 'THE', 'DOME', 'BUT', 'OPAQUE', 'FOR', 'PRIVACY'] +8131-117029-0005-1252: ref=['THEY', 'HAD', 'BEEN', 'LUCKY'] +8131-117029-0005-1252: hyp=['THEY', 'HAD', 'BEEN', 'LUCKY'] +8131-117029-0006-1253: ref=["SCHULBERG'S", 'VOLUNTEERS', 'WERE', 'OFFICIAL', 'NOW'] +8131-117029-0006-1253: hyp=["SHELBERG'S", 'VOLUNTEERS', 'WERE', 'OFFICIAL', 'NOW'] +8131-117029-0007-1254: ref=['FATS', 'PLACE', 'WAS', 'STILL', 'OPEN', 'THOUGH', 'THE', 'CROOKED', 'TABLES', 'HAD', 'BEEN', 'REMOVED', 'GORDON', 'DROPPED', 'TO', 'A', 'STOOL', 'SLIPPING', 'OFF', 'HIS', 'HELMET'] +8131-117029-0007-1254: hyp=['FAT', 'PLACE', 'WAS', 'STILL', 'OPEN', 'THOUGH', 'THE', 'CROOKED', 'TABLES', 'HAD', 'BEEN', 'REMOVED', 'GORDON', 'DROPPED', 'TO', 'A', 'STOOL', 'SLIPPING', 'OFF', 'HIS', 'HELMET'] +8131-117029-0008-1255: ref=['HE', 'REACHED', 'AUTOMATICALLY', 'FOR', 'THE', 'GLASS', 'OF', 'ETHER', 'NEEDLED', 'BEER'] +8131-117029-0008-1255: hyp=['HE', 'REACHED', 'AUTOMATICALLY', 'FOR', 'THE', 'GLASS', 'OF', 'ETHER', 'NEEDLED', 'BEER'] +8131-117029-0009-1256: ref=['THOUGHT', "YOU'D", 'BE', 'IN', 'THE', 'CHIPS'] +8131-117029-0009-1256: hyp=['THOUGHT', "YOU'D", 'BE', 'IN', 'THE', 'CHIPS'] +8131-117029-0010-1257: ref=["THAT'S", 'MARS', 'GORDON', 'ECHOED', 'THE', "OTHER'S", 'COMMENT', 'WHY', "DON'T", 'YOU', 'PULL', 'OFF', 'THE', 'PLANET', 'FATS', 'YOU', 'COULD', 'GO', 'BACK', 'TO', 'EARTH', "I'D", 'GUESS', 'THE', 'OTHER', 'NODDED'] +8131-117029-0010-1257: hyp=["THAT'S", 'MARS', 'GORDON', 'ACCORD', 'OTHERS', 'COMMENT', 'WHY', "DON'T", 'YOU', 'PULL', 'OFF', 'THE', 'PLANET', 'FATS', 'YOU', 'COULD', 'GO', 'BACK', 'TO', 'EARTH', "I'D", 'GUESS', 'THE', 'OTHER', 'NODDED'] +8131-117029-0011-1258: ref=['GUESS', 'A', 'MAN', 'GETS', 'USED', 'TO', 'ANYTHING', 'HELL', 'MAYBE', 'I', 'CAN', 'HIRE', 'SOME', 'BUMS', 'TO', 'SIT', 'AROUND', 'AND', 'WHOOP', 'IT', 'UP', 'WHEN', 'THE', 'SHIPS', 'COME', 'IN', 'AND', 'BILL', 'THIS', 'AS', 'A', 'REAL', 'OLD', 'MARTIAN', 'DEN', 'OF', 'SIN'] +8131-117029-0011-1258: hyp=['GUESSIMIAN', 'MAN', 'GETS', 'USED', 'TO', 'ANYTHING', 'HELL', 'MAYBE', 'I', 'CAN', 'HIRE', 'SOME', 'BUMS', 'TO', 'SIT', 'AROUND', 'AND', 'WHOOP', 'IT', 'UP', 'WHEN', 'THE', 'SHIPS', 'COME', 'IN', 'AND', 'BUILD', 'THIS', 'AS', 'A', 'REAL', 'OLD', 'MARTIAN', 'DEN', 'OF', 'SIN'] +8131-117029-0012-1259: ref=['THERE', 'WAS', 'A', 'GRIN', 'ON', 'THE', "OTHER'S", 'FACE'] +8131-117029-0012-1259: hyp=['THERE', 'WAS', 'A', 'GRIN', 'ON', 'THE', "OTHER'S", 'FACE'] +8131-117029-0013-1260: ref=['FINALLY', 'GOT', 'OUR', 'ORDERS', 'FOR', 'YOU', "IT'S", 'MERCURY'] +8131-117029-0013-1260: hyp=['FINALLY', 'GOT', 'OUR', 'ORDERS', 'FOR', 'YOU', "IT'S", 'MERCURY'] +8131-117029-0014-1261: ref=['WE', 'SENT', 'TWENTY', 'OTHERS', 'THE', 'SAME', 'WAY', 'AND', 'THEY', 'FAILED'] +8131-117029-0014-1261: hyp=['WE', 'SENT', 'TWENTY', 'OTHERS', 'THE', 'SAME', 'WAY', 'AND', 'THEY', 'FAILED'] +8131-117029-0015-1262: ref=["LET'S", 'SAY', "YOU'VE", 'SHIFTED', 'SOME', 'OF', 'THE', 'MISERY', 'AROUND', 'A', 'BIT', 'AND', 'GIVEN', 'THEM', 'A', 'CHANCE', 'TO', 'DO', 'BETTER'] +8131-117029-0015-1262: hyp=["LET'S", 'SAVE', 'SHIFTED', 'SOME', 'OF', 'THE', 'MISERY', 'AROUND', 'A', 'BIT', 'AND', 'GIVEN', 'THEM', 'A', 'CHANCE', 'TO', 'DO', 'BETTER'] +8131-117029-0016-1263: ref=['YOU', "CAN'T", 'STAY', 'HERE'] +8131-117029-0016-1263: hyp=['YOU', "CAN'T", 'STAY', 'HERE'] +8131-117029-0017-1264: ref=["THERE'S", 'A', 'ROCKET', 'WAITING', 'TO', 'TRANSSHIP', 'YOU', 'TO', 'THE', 'MOON', 'ON', 'THE', 'WAY', 'TO', 'MERCURY', 'RIGHT', 'NOW', 'GORDON', 'SIGHED'] +8131-117029-0017-1264: hyp=["THERE'S", 'A', 'ROCKET', 'WAITING', 'TO', 'TRANSHIP', 'YOU', 'TO', 'THE', 'MOON', 'ON', 'THE', 'WAY', 'TO', 'MERCURY', 'RIGHT', 'NOW', 'GORDON', 'SIGHED'] +8131-117029-0018-1265: ref=['AND', "I'VE", 'PAID', 'HER', 'THE', 'PAY', 'WE', 'OWE', 'YOU', 'FROM', 'THE', 'TIME', 'YOU', 'BEGAN', 'USING', 'YOUR', 'BADGE', "SHE'S", 'OUT', 'SHOPPING'] +8131-117029-0018-1265: hyp=['AND', 'I', 'PAID', 'HER', 'THE', 'PAY', 'WE', 'OWE', 'YOU', 'FROM', 'THE', 'TIME', 'YOU', 'BEGIN', 'USING', 'YOUR', 'BADGE', "SHE'S", 'OUT', 'SHOPPING'] +8131-117029-0019-1266: ref=['BUT', 'HIS', 'OLD', 'EYES', 'WERE', 'GLINTING'] +8131-117029-0019-1266: hyp=['BUT', 'HIS', 'OLD', 'EYES', 'WERE', 'GLINTING'] +8131-117029-0020-1267: ref=['DID', 'YOU', 'THINK', "WE'D", 'LET', 'YOU', 'GO', 'WITHOUT', 'SEEING', 'YOU', 'OFF', 'COBBER', 'HE', 'ASKED'] +8131-117029-0020-1267: hyp=['DID', 'YOU', 'THINK', "WE'D", 'LET', 'YOU', 'GO', 'WITHOUT', 'SEEING', 'YOU', 'OFF', 'COPPER', 'HE', 'ASKED'] +8131-117029-0021-1268: ref=['I', 'I', 'OH', 'DRAT', 'IT', "I'M", 'GETTING', 'OLD', 'IZZY', 'YOU', 'TELL', 'HIM'] +8131-117029-0021-1268: hyp=['I', 'I', 'OH', 'DREAD', 'IT', "I'M", 'GETTING', 'OLD', 'IS', 'HE', 'YOU', 'TELL', 'HIM'] +8131-117029-0022-1269: ref=['HE', 'GRABBED', "GORDON'S", 'HAND', 'AND', 'WADDLED', 'DOWN', 'THE', 'LANDING', 'PLANK', 'IZZY', 'SHOOK', 'HIS', 'HEAD'] +8131-117029-0022-1269: hyp=['HE', 'GRABBED', "GORDON'S", 'HAND', 'AND', 'WADDLED', 'DOWN', 'THE', 'LANDING', 'PLANK', 'IS', 'HE', 'SHOOK', 'HIS', 'HEAD'] +8188-269288-0000-2881: ref=['ANNIE', 'COLCHESTER', 'HAD', 'BEGUN', 'TO', 'MAKE', 'FRIENDS', 'WITH', 'LESLIE'] +8188-269288-0000-2881: hyp=['ANY', 'COLCHESTER', 'HAD', 'BEGUN', 'TO', 'MAKE', 'FRIENDS', 'WITH', 'LIZZLE'] +8188-269288-0001-2882: ref=['LESLIE', 'DETERMINED', 'TO', 'TRY', 'FOR', 'HONORS', 'IN', 'ENGLISH', 'LANGUAGE', 'AND', 'LITERATURE'] +8188-269288-0001-2882: hyp=['LESLIE', 'DETERMINED', 'TO', 'TRIFLE', 'HONORS', 'IN', 'ENGLISH', 'LANGUAGE', 'AND', 'LITERATURE'] +8188-269288-0002-2883: ref=['HER', 'TASTES', 'ALL', 'LAY', 'IN', 'THIS', 'DIRECTION', 'HER', 'IDEA', 'BEING', 'BY', 'AND', 'BY', 'TO', 'FOLLOW', 'HER', "MOTHER'S", 'PROFESSION', 'OF', 'JOURNALISM', 'FOR', 'WHICH', 'SHE', 'ALREADY', 'SHOWED', 'CONSIDERABLE', 'APTITUDE'] +8188-269288-0002-2883: hyp=['HER', 'TASTES', 'ALL', 'LAY', 'IN', 'THIS', 'DIRECTION', 'HER', 'IDEA', 'BEING', 'BY', 'AND', 'BY', 'TO', 'FOLLOW', 'HER', "MOTHER'S", 'PROFICIENT', 'OF', 'JOURNALISM', 'FOR', 'WHICH', 'SHE', 'ALWAYS', 'SHOWED', 'CONSIDERABLE', 'APTITUDE'] +8188-269288-0003-2884: ref=['SHE', 'HAD', 'NO', 'IDEA', 'OF', 'ALLOWING', 'HERSELF', 'TO', 'BREAK', 'DOWN'] +8188-269288-0003-2884: hyp=['SHE', 'HAD', 'NO', 'IDEA', 'OF', 'ALLOWING', 'HERSELF', 'TO', 'BREAK', 'DOWN'] +8188-269288-0004-2885: ref=['WHAT', 'DO', 'YOU', 'MEAN', 'REPLIED', 'LESLIE'] +8188-269288-0004-2885: hyp=['WHAT', 'DO', 'YOU', 'MEAN', 'REPLIED', 'LISLEY'] +8188-269288-0005-2886: ref=['WHY', 'YOU', 'WILL', 'BE', 'PARTING', 'FROM', 'ME', 'YOU', 'KNOW'] +8188-269288-0005-2886: hyp=['WHY', 'YOU', 'WILL', 'BE', 'PARTING', 'FROM', 'ME', 'YOU', 'KNOW'] +8188-269288-0006-2887: ref=['I', "WON'T", 'BE', 'THE', 'CONSTANT', 'WORRY', 'AND', 'PLAGUE', 'OF', 'YOUR', 'LIFE'] +8188-269288-0006-2887: hyp=['I', "WON'T", 'BE', 'THE', 'CONSTANT', 'WORRY', 'AND', 'PLAGUE', 'OF', 'YOUR', 'LIFE'] +8188-269288-0007-2888: ref=['IT', 'IS', 'THIS', 'IF', 'BY', 'ANY', 'CHANCE', 'YOU', "DON'T", 'LEAVE', 'SAINT', "WODE'S", 'ANNIE', 'I', 'HOPE', 'YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'BE', 'YOUR', 'ROOMFELLOW', 'AGAIN', 'NEXT', 'TERM'] +8188-269288-0007-2888: hyp=['THIS', 'IS', 'THIS', 'IF', 'BY', 'ANY', 'CHANCE', 'YOU', "DON'T", 'LEAVE', 'SAINT', 'WORDS', 'ANNIE', 'I', 'HOPE', 'YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'BE', 'YOUR', 'ROOM', 'FELLOW', 'AGAIN', 'NEXT', 'TERM'] +8188-269288-0008-2889: ref=['SAID', 'ANNIE', 'A', 'FLASH', 'OF', 'LIGHT', 'COMING', 'INTO', 'HER', 'EYES', 'AND', 'THEN', 'LEAVING', 'THEM'] +8188-269288-0008-2889: hyp=['SAID', 'ANNIE', 'A', 'FLASH', 'OF', 'LIGHT', 'COMING', 'INTO', 'HER', 'EYES', 'AND', 'THEN', 'LEAVING', 'THEM'] +8188-269288-0009-2890: ref=['BUT', 'SHE', 'ADDED', 'ABRUPTLY', 'YOU', 'SPEAK', 'OF', 'SOMETHING', 'WHICH', 'MUST', 'NOT', 'TAKE', 'PLACE'] +8188-269288-0009-2890: hyp=['BUT', 'SHE', 'ADDED', 'ABRUPTLY', 'YOU', 'SPEAK', 'OF', 'SOMETHING', 'WHICH', 'MUST', 'NOT', 'TAKE', 'PLACE'] +8188-269288-0010-2891: ref=['I', 'MUST', 'PASS', 'IN', 'HONORS', 'IF', 'I', "DON'T", 'I', 'SHALL', 'DIE'] +8188-269288-0010-2891: hyp=['I', 'MUST', 'PASS', 'AN', 'HONOUR', 'IF', 'I', "DON'T", 'I', 'SHALL', 'DIE'] +8188-269288-0011-2892: ref=['A', 'FEW', 'MOMENTS', 'LATER', 'THERE', 'CAME', 'A', 'TAP', 'AT', 'THE', 'DOOR'] +8188-269288-0011-2892: hyp=['A', 'FEW', 'MOMENTS', 'LATER', 'DICK', 'CAME', 'A', 'TAP', 'AT', 'THE', 'DOOR'] +8188-269288-0012-2893: ref=['LESLIE', 'OPENED', 'THE', 'DOOR'] +8188-269288-0012-2893: hyp=['LESLIE', 'OPENED', 'THE', 'DOOR'] +8188-269288-0013-2894: ref=['JANE', 'HERIOT', 'STOOD', 'WITHOUT'] +8188-269288-0013-2894: hyp=['JANE', 'HARRIET', 'STOOD', 'WITHOUT'] +8188-269288-0014-2895: ref=['THESE', 'LETTERS', 'HAVE', 'JUST', 'COME', 'FOR', 'YOU', 'AND', 'ANNIE', 'COLCHESTER', 'SHE', 'SAID', 'AND', 'AS', 'I', 'WAS', 'COMING', 'UPSTAIRS', 'I', 'THOUGHT', 'I', 'WOULD', 'LEAVE', 'THEM', 'WITH', 'YOU'] +8188-269288-0014-2895: hyp=['THESE', 'LETTERS', 'HAVE', 'JUST', 'COME', 'FOR', 'YOU', 'IN', 'ANY', 'COLCHESTER', 'SHE', 'SAID', 'AND', 'AS', 'I', 'WAS', 'COMING', 'UPSTAIRS', 'I', 'THOUGHT', 'I', 'WOULD', 'LEAVE', 'THEM', 'WITH', 'YOU'] +8188-269288-0015-2896: ref=['LESLIE', 'THANKED', 'HER', 'AND', 'EAGERLY', 'GRASPED', 'THE', 'LITTLE', 'PARCEL'] +8188-269288-0015-2896: hyp=['LISLEY', 'THANKED', 'HER', 'AND', 'EAGERLY', 'GRASPED', 'THE', 'LITTLE', 'PARCEL'] +8188-269288-0016-2897: ref=['HER', 'EYES', 'SHONE', 'WITH', 'PLEASURE', 'AT', 'THE', 'ANTICIPATION', 'OF', 'THE', 'DELIGHTFUL', 'TIME', 'SHE', 'WOULD', 'HAVE', 'REVELING', 'IN', 'THE', 'HOME', 'NEWS', 'THE', 'OTHER', 'LETTER', 'WAS', 'DIRECTED', 'TO', 'ANNIE', 'COLCHESTER'] +8188-269288-0016-2897: hyp=['HER', 'EYES', 'SHONE', 'WITH', 'PLEASURE', 'AT', 'THE', 'ANTICIPATION', 'OF', 'THE', 'DELIGHTFUL', 'TIME', 'SHE', 'WOULD', 'HAVE', 'RIVELING', 'IN', 'THE', 'HOME', 'NEWS', 'THE', 'OTHER', 'LETTER', 'WAS', 'DIRECTED', 'TO', 'ANY', 'COLCHESTER'] +8188-269288-0017-2898: ref=['HERE', 'IS', 'A', 'LETTER', 'FOR', 'YOU', 'ANNIE', 'CRIED', 'LESLIE'] +8188-269288-0017-2898: hyp=['HERE', 'IS', 'A', 'LETTER', 'FOR', 'YOU', 'ANY', 'CRIED', 'LIZZIE'] +8188-269288-0018-2899: ref=['HER', 'FACE', 'GREW', 'SUDDENLY', 'WHITE', 'AS', 'DEATH', 'WHAT', 'IS', 'IT', 'DEAR'] +8188-269288-0018-2899: hyp=['HER', 'FACE', 'GREW', 'SUDDENLY', 'WHITE', 'AS', 'DEATH', 'WHAT', 'IS', 'IT', 'DEAR'] +8188-269288-0019-2900: ref=['I', 'HAVE', 'BEEN', 'STARVING', 'OR', 'RATHER', 'I', 'HAVE', 'BEEN', 'THIRSTING'] +8188-269288-0019-2900: hyp=['I', 'HAVE', 'BEEN', 'STARLING', 'OR', 'RATHER', 'I', 'HAVE', 'BEEN', 'THIRSTING'] +8188-269288-0020-2901: ref=['WELL', 'READ', 'IT', 'IN', 'PEACE', 'SAID', 'LESLIE', 'I', "WON'T", 'DISTURB', 'YOU'] +8188-269288-0020-2901: hyp=['WELL', 'READ', 'IT', 'IN', 'PEACE', 'SAID', 'LIDNESLEY', 'I', "WON'T", 'DISTURB', 'YOU'] +8188-269288-0021-2902: ref=['I', 'AM', 'TRULY', 'GLAD', 'IT', 'HAS', 'COME'] +8188-269288-0021-2902: hyp=['I', 'AM', 'TRULY', 'GLAD', 'IT', 'HAS', 'COME'] +8188-269288-0022-2903: ref=['LESLIE', 'SEATED', 'HERSELF', 'WITH', 'HER', 'BACK', 'TO', 'HER', 'COMPANION', 'AND', 'OPENED', 'HER', 'OWN', 'LETTERS'] +8188-269288-0022-2903: hyp=['LISSLY', 'SEATED', 'HERSELF', 'WITH', 'HER', 'BACK', 'TO', 'HER', 'COMPANION', 'AND', 'OPENED', 'HER', 'ON', 'LETTERS'] +8188-269288-0023-2904: ref=["DON'T", 'NOTICE', 'ME', 'REPLIED', 'ANNIE'] +8188-269288-0023-2904: hyp=["DON'T", 'NOTICE', 'ME', 'REPLIED', 'ANY'] +8188-269288-0024-2905: ref=['I', 'MUST', 'GO', 'INTO', 'THE', 'GROUNDS', 'THE', 'AIR', 'IS', 'STIFLING'] +8188-269288-0024-2905: hyp=['I', 'MUST', 'GO', 'INTO', 'THE', 'GROUND', 'THE', 'AIR', 'IS', 'STIFLING'] +8188-269288-0025-2906: ref=['BUT', 'THEY', 'ARE', 'JUST', 'SHUTTING', 'UP'] +8188-269288-0025-2906: hyp=['BUT', 'THEY', 'HAD', 'JUST', 'SHUTTING', 'UP'] +8188-269288-0026-2907: ref=['I', 'SHALL', 'GO', 'I', 'KNOW', 'A', 'WAY'] +8188-269288-0026-2907: hyp=['I', 'SHALL', 'GO', 'I', 'KNOW', 'A', 'WAY'] +8188-269288-0027-2908: ref=['JUST', 'AFTER', 'MIDNIGHT', 'SHE', 'ROSE', 'WITH', 'A', 'SIGH', 'TO', 'PREPARE', 'FOR', 'BED'] +8188-269288-0027-2908: hyp=['JUST', 'AFTER', 'MIDNIGHT', 'SHE', 'ROSE', 'WITH', 'A', 'SIGH', 'TO', 'PREPARE', 'FOR', 'BED'] +8188-269288-0028-2909: ref=['SHE', 'LOOKED', 'ROUND', 'THE', 'ROOM'] +8188-269288-0028-2909: hyp=['SHE', 'LOOKED', 'ROUND', 'THE', 'ROOM'] +8188-269288-0029-2910: ref=['NOW', 'I', 'REMEMBER', 'SHE', 'GOT', 'A', 'LETTER', 'WHICH', 'UPSET', 'HER', 'VERY', 'MUCH', 'AND', 'WENT', 'OUT'] +8188-269288-0029-2910: hyp=['NOW', 'I', 'REMEMBER', 'SHE', 'GOT', 'A', 'LETTER', 'WHICH', 'UPSET', 'HER', 'VERY', 'MUCH', 'AND', 'WENT', 'OUT'] +8188-269288-0030-2911: ref=['LESLIE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'FLUNG', 'IT', 'OPEN', 'SHE', 'PUT', 'HER', 'HEAD', 'OUT', 'AND', 'TRIED', 'TO', 'PEER', 'INTO', 'THE', 'DARKNESS', 'BUT', 'THE', 'MOON', 'HAD', 'ALREADY', 'SET', 'AND', 'SHE', 'COULD', 'NOT', 'SEE', 'MORE', 'THAN', 'A', 'COUPLE', 'OF', 'YARDS', 'IN', 'FRONT', 'OF', 'HER'] +8188-269288-0030-2911: hyp=['LISLEY', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'FLUNG', 'IT', 'OPEN', 'SHE', 'PUT', 'HER', 'HEAD', 'OUT', 'AND', 'TRIED', 'TO', 'PEER', 'INTO', 'THE', 'DARKNESS', 'BUT', 'THE', 'MOON', 'HAD', 'ALREADY', 'SET', 'AND', 'SHE', 'COULD', 'NOT', 'SEE', 'MORE', 'THAN', 'A', 'COUPLE', 'OF', 'YARDS', 'IN', 'FRONT', 'OF', 'HER'] +8188-269288-0031-2912: ref=['SHE', 'IS', 'A', 'VERY', 'QUEER', 'ERRATIC', 'CREATURE', 'AND', 'THAT', 'LETTER', 'THERE', 'WAS', 'BAD', 'NEWS', 'IN', 'THAT', 'LETTER'] +8188-269288-0031-2912: hyp=['SHE', 'IS', 'A', 'VERY', 'QUEER', 'THE', 'RATT', 'CREATURE', 'AND', 'THAT', 'LETTER', 'THERE', 'WAS', 'BAD', 'NEWS', 'IN', 'THAT', 'LETTER'] +8188-269288-0032-2913: ref=['WHAT', 'CAN', 'SHE', 'BE', 'DOING', 'OUT', 'BY', 'HERSELF'] +8188-269288-0032-2913: hyp=['WHAT', "CAN'T", 'YOU', 'DOING', 'OUT', 'BY', 'HERSELF'] +8188-269288-0033-2914: ref=['LESLIE', 'LEFT', 'THE', 'ROOM', 'BUT', 'SHE', 'HAD', 'SCARCELY', 'GONE', 'A', 'DOZEN', 'PACES', 'DOWN', 'THE', 'CORRIDOR', 'BEFORE', 'SHE', 'MET', 'ANNIE', 'RETURNING'] +8188-269288-0033-2914: hyp=['THIS', 'LILY', 'LEFT', 'THE', 'ROOM', 'BUT', 'SHE', 'HAD', 'SCARCELY', 'GONE', 'A', 'DOZEN', 'PLACES', 'DOWN', 'THE', 'CORRIDOR', 'BEFORE', 'SHE', 'MET', 'ANY', 'RETURNING'] +8188-269288-0034-2915: ref=["ANNIE'S", 'EYES', 'WERE', 'VERY', 'BRIGHT', 'HER', 'CHEEKS', 'WERE', 'NO', 'LONGER', 'PALE', 'AND', 'THERE', 'WAS', 'A', 'BRILLIANT', 'COLOR', 'IN', 'THEM'] +8188-269288-0034-2915: hyp=['ANY', 'EYES', 'WERE', 'VERY', 'BRIGHT', 'HER', 'CHEEKS', 'WERE', 'NO', 'LONGER', 'PALE', 'AND', 'THERE', 'WAS', 'A', 'BRILLIANT', 'COLOUR', 'IN', 'THEM'] +8188-269288-0035-2916: ref=['SHE', 'DID', 'NOT', 'TAKE', 'THE', 'LEAST', 'NOTICE', 'OF', 'LESLIE', 'BUT', 'GOING', 'INTO', 'THE', 'ROOM', 'SHUT', 'THE', 'DOOR'] +8188-269288-0035-2916: hyp=['SHE', 'DID', 'NOT', 'TAKE', 'THE', 'LEAST', 'NOTICE', 'OF', 'PLEASING', 'BUT', 'GOING', 'INTO', 'THE', 'ROOM', 'SHUT', 'THE', 'DOOR'] +8188-269288-0036-2917: ref=["DON'T", 'BEGIN', 'SAID', 'ANNIE'] +8188-269288-0036-2917: hyp=["DON'T", 'BEGIN', 'SAID', 'ANNIE'] +8188-269288-0037-2918: ref=["DON'T", 'BEGIN', 'WHAT', 'DO', 'YOU', 'MEAN'] +8188-269288-0037-2918: hyp=["DON'T", 'BEGIN', 'WHAT', 'DO', 'YOU', 'MEAN'] +8188-269288-0038-2919: ref=['I', 'MEAN', 'THAT', 'I', "DON'T", 'WANT', 'YOU', 'TO', 'BEGIN', 'TO', 'ASK', 'QUESTIONS'] +8188-269288-0038-2919: hyp=['I', 'MEAN', 'THAT', 'I', "DON'T", 'WANT', 'YOU', 'TO', 'BEGIN', 'TO', 'ASK', 'QUESTIONS'] +8188-269288-0039-2920: ref=['I', 'WALKED', 'UP', 'AND', 'DOWN', 'AS', 'FAST', 'AS', 'EVER', 'I', 'COULD', 'OUTSIDE', 'IN', 'ORDER', 'TO', 'MAKE', 'MYSELF', 'SLEEPY'] +8188-269288-0039-2920: hyp=['I', 'WALKED', 'UP', 'AND', 'DOWN', 'AS', 'FAST', 'AS', 'EVER', 'I', 'COULD', 'OUTSIDE', 'IN', 'ORDER', 'TO', 'MAKE', 'MYSELF', 'SLEEPY'] +8188-269288-0040-2921: ref=["DON'T", 'TALK', 'TO', 'ME', 'LESLIE', "DON'T", 'SAY', 'A', 'SINGLE', 'WORD'] +8188-269288-0040-2921: hyp=["THEY'RE", 'TALK', 'TO', 'ME', 'LISLEY', "DON'T", 'SAY', 'A', 'SINGLE', 'WORD'] +8188-269288-0041-2922: ref=['I', 'SHALL', 'GO', 'OFF', 'TO', 'SLEEP', 'THAT', 'IS', 'ALL', 'I', 'CARE', 'FOR'] +8188-269288-0041-2922: hyp=['I', 'SHALL', 'GO', 'OFF', 'TO', 'SLEEP', 'THAT', 'IS', 'ALL', 'I', 'CARE', 'FOR'] +8188-269288-0042-2923: ref=["DON'T", 'SAID', 'ANNIE'] +8188-269288-0042-2923: hyp=["DON'T", 'SAID', 'ANNIE'] +8188-269288-0043-2924: ref=['NOW', 'DRINK', 'THIS', 'AT', 'ONCE', 'SHE', 'SAID', 'IN', 'A', 'VOICE', 'OF', 'AUTHORITY', 'IF', 'YOU', 'REALLY', 'WISH', 'TO', 'SLEEP'] +8188-269288-0043-2924: hyp=['NOW', 'DRINK', 'THIS', 'AT', 'ONCE', 'SHE', 'SAID', 'IN', 'A', 'VOICE', 'OF', 'AUTHORITY', 'IF', 'YOU', 'REALLY', 'WISH', 'TO', 'SLEEP'] +8188-269288-0044-2925: ref=['ANNIE', 'STARED', 'VACANTLY', 'AT', 'THE', 'COCOA', 'THEN', 'SHE', 'UTTERED', 'A', 'LAUGH'] +8188-269288-0044-2925: hyp=['ANY', 'STEERED', 'VACANTLY', 'AT', 'THE', 'CUCKOO', 'DID', 'SHE', 'UTTERED', 'A', 'LAUGH'] +8188-269288-0045-2926: ref=['DRINK', 'THAT', 'SHE', 'SAID'] +8188-269288-0045-2926: hyp=['DRINK', 'THAT', 'SHE', 'SAID'] +8188-269288-0046-2927: ref=['DO', 'YOU', 'WANT', 'TO', 'KILL', 'ME', "DON'T", 'TALK', 'ANY', 'MORE'] +8188-269288-0046-2927: hyp=['DO', 'YOU', 'WANT', 'TO', 'KILL', 'ME', "DON'T", 'TALK', 'ANY', 'MORE'] +8188-269288-0047-2928: ref=['I', 'AM', 'SLEEPY', 'I', 'SHALL', 'SLEEP'] +8188-269288-0047-2928: hyp=['I', 'AM', 'SLEEPY', 'I', 'SHALL', 'SLEEP'] +8188-269288-0048-2929: ref=['SHE', 'GOT', 'INTO', 'BED', 'AS', 'SHE', 'SPOKE', 'AND', 'WRAPPED', 'THE', 'CLOTHES', 'TIGHTLY', 'ROUND', 'HER'] +8188-269288-0048-2929: hyp=['SHE', 'GOT', 'INTO', 'BED', 'AS', 'SHE', 'SPOKE', 'AND', 'WRAPPED', 'THE', 'CLOTHES', 'TIGHTLY', 'ROUND', 'HER'] +8188-269288-0049-2930: ref=["CAN'T", 'YOU', 'MANAGE', 'WITH', 'A', 'CANDLE', 'JUST', 'FOR', 'ONCE'] +8188-269288-0049-2930: hyp=['CAN', 'YOU', 'MANAGE', 'WITH', 'A', 'CANDLE', 'JUST', 'FOR', 'ONCE'] +8188-269288-0050-2931: ref=['CERTAINLY', 'SAID', 'LESLIE'] +8188-269288-0050-2931: hyp=['CERTAINLY', 'CITIZELY'] +8188-269288-0051-2932: ref=['SHE', 'TURNED', 'OFF', 'THE', 'LIGHT', 'AND', 'LIT', 'A', 'CANDLE', 'WHICH', 'SHE', 'PUT', 'BEHIND', 'HER', 'SCREEN', 'THEN', 'PREPARED', 'TO', 'GET', 'INTO', 'BED'] +8188-269288-0051-2932: hyp=['SHE', 'TURNED', 'OFF', 'THE', 'LIGHT', 'AND', 'LET', 'HER', 'CANDLE', 'WOULD', 'SHE', 'PUT', 'BEHIND', 'HER', 'SCREEN', 'THEN', 'PREPARED', 'TO', 'GET', 'INTO', 'BED'] +8188-269288-0052-2933: ref=["ANNIE'S", 'MANNER', 'WAS', 'VERY', 'MYSTERIOUS'] +8188-269288-0052-2933: hyp=["ANY'S", 'MANNER', 'WAS', 'VERY', 'MYSTERIOUS'] +8188-269288-0053-2934: ref=['ANNIE', 'DID', 'NOT', 'MEAN', 'TO', 'CONFIDE', 'IN', 'ANYONE', 'THAT', 'NIGHT', 'AND', 'THE', 'KINDEST', 'THING', 'WAS', 'TO', 'LEAVE', 'HER', 'ALONE'] +8188-269288-0053-2934: hyp=['AND', 'HE', 'DID', 'NOT', 'MEAN', 'TO', 'CONFINE', 'IN', 'ANY', 'ONE', 'THAT', 'NIGHT', 'AND', 'THE', 'KINDEST', 'THING', 'WAS', 'TO', 'LEAVE', 'HER', 'ALONE'] +8188-269288-0054-2935: ref=['TIRED', 'OUT', 'LESLIE', 'HERSELF', 'DROPPED', 'ASLEEP'] +8188-269288-0054-2935: hyp=['TIE', 'IT', 'OUT', 'LESLIE', 'HERSELF', 'DROPPED', 'ASLEEP'] +8188-269288-0055-2936: ref=['ANNIE', 'IS', 'THAT', 'YOU', 'SHE', 'CALLED', 'OUT'] +8188-269288-0055-2936: hyp=['ANY', 'IS', 'THAT', 'YOU', 'SHE', 'CALLED', 'OUT'] +8188-269288-0056-2937: ref=['THERE', 'WAS', 'NO', 'REPLY', 'BUT', 'THE', 'SOUND', 'OF', 'HURRYING', 'STEPS', 'CAME', 'QUICKER', 'AND', 'QUICKER', 'NOW', 'AND', 'THEN', 'THEY', 'WERE', 'INTERRUPTED', 'BY', 'A', 'GROAN'] +8188-269288-0056-2937: hyp=['THERE', 'WAS', 'NO', 'REPLY', 'BUT', 'THE', 'SOUND', 'OF', 'HURRYING', 'STEPS', 'CAME', 'QUICKER', 'AND', 'QUICKER', 'NOW', 'AND', 'THEN', 'THEIR', 'INTERRUPTED', 'BY', 'A', 'GROAN'] +8188-269288-0057-2938: ref=['OH', 'THIS', 'WILL', 'KILL', 'ME', 'MY', 'HEART', 'WILL', 'BREAK', 'THIS', 'WILL', 'KILL', 'ME'] +8188-269288-0057-2938: hyp=['OH', 'THIS', 'WILL', 'KILL', 'ME', 'MY', 'HEART', 'WILL', 'BREAK', 'THIS', 'WILL', 'KILL', 'ME'] +8188-269290-0000-2823: ref=['THE', 'GUILD', 'OF', 'SAINT', 'ELIZABETH'] +8188-269290-0000-2823: hyp=['THE', 'GUILD', 'OF', 'SAINT', 'ELIZABETH'] +8188-269290-0001-2824: ref=['IMMEDIATELY', 'AFTER', 'DINNER', 'THAT', 'EVENING', 'LESLIE', 'RAN', 'UP', 'TO', 'HER', 'ROOM', 'TO', 'MAKE', 'PREPARATIONS', 'FOR', 'HER', 'VISIT', 'TO', 'EAST', 'HALL'] +8188-269290-0001-2824: hyp=['IMMEDIATELY', 'AFTER', 'DINNER', 'THAT', 'EVENING', 'LISLEY', 'RAN', 'UP', 'TO', 'HER', 'ROOM', 'TO', 'MAKE', 'PREPARATIONS', 'FOR', 'HER', 'VISIT', 'TO', 'EAST', 'HALL'] +8188-269290-0002-2825: ref=["I'M", 'NOT', 'COMING', 'SAID', 'ANNIE'] +8188-269290-0002-2825: hyp=["I'M", 'NOT', 'COMING', 'SAID', 'ANNIE'] +8188-269290-0003-2826: ref=['EVERY', 'STUDENT', 'IS', 'TO', 'BE', 'IN', 'EAST', 'HALL', 'AT', 'HALF', 'PAST', 'EIGHT'] +8188-269290-0003-2826: hyp=['EVERY', 'STUDENT', 'IS', 'TO', 'BE', 'AN', 'EAST', 'HALL', 'AT', 'HALF', 'PAST', 'EIGHT'] +8188-269290-0004-2827: ref=['IT', "DOESN'T", 'MATTER', 'REPLIED', 'ANNIE', 'WHETHER', 'IT', 'IS', 'AN', 'ORDER', 'OR', 'NOT', "I'M", 'NOT', 'COMING', 'SAY', 'NOTHING', 'ABOUT', 'ME', 'PLEASE'] +8188-269290-0004-2827: hyp=['IT', 'DOES', 'MATTER', 'REPLIED', 'ANNIE', 'WHITHER', 'IT', 'IS', 'AN', 'ORDER', 'OR', 'NOT', 'I', 'AM', 'NOT', 'COMING', 'SAY', 'NOTHING', 'ABOUT', 'ME', 'PLEASE'] +8188-269290-0005-2828: ref=['IT', 'BURNED', 'AS', 'IF', 'WITH', 'FEVER'] +8188-269290-0005-2828: hyp=['IT', 'BURNED', 'AS', 'IF', 'WITH', 'FEVER'] +8188-269290-0006-2829: ref=['YOU', "DON'T", 'KNOW', 'WHAT', 'A', 'TRIAL', 'IT', 'IS', 'FOR', 'ME', 'TO', 'HAVE', 'YOU', 'HERE'] +8188-269290-0006-2829: hyp=['YOU', "DON'T", 'KNOW', 'WHAT', 'A', 'TRIAL', 'IT', 'IS', 'FOR', 'ME', 'TO', 'HAVE', 'YOU', 'HERE'] +8188-269290-0007-2830: ref=['I', 'WANT', 'TO', 'BE', 'ALONE', 'GO'] +8188-269290-0007-2830: hyp=['I', 'WANT', 'TO', 'BE', 'ALONE', 'GO'] +8188-269290-0008-2831: ref=['I', 'KNOW', 'YOU', "DON'T", 'QUITE', 'MEAN', 'WHAT', 'YOU', 'SAY', 'SAID', 'LESLIE', 'BUT', 'OF', 'COURSE', 'IF', 'YOU', 'REALLY', 'WISH', 'ME'] +8188-269290-0008-2831: hyp=['I', 'KNOW', 'YOU', "DON'T", 'QUITE', 'MEAN', 'WHAT', 'YOU', 'SAY', 'SAID', 'LIZZIE', 'BUT', 'OF', 'COURSE', 'IF', 'YOU', 'REALLY', 'WISH', 'ME'] +8188-269290-0009-2832: ref=['YOU', 'FRET', 'ME', 'BEYOND', 'ENDURANCE'] +8188-269290-0009-2832: hyp=['YOU', 'FRET', 'ME', 'BEYOND', 'ENDURANCE'] +8188-269290-0010-2833: ref=['WRAPPING', 'A', 'PRETTY', 'BLUE', 'SHAWL', 'ROUND', 'HER', 'HEAD', 'AND', 'SHOULDERS', 'SHE', 'TURNED', 'TO', 'ANNIE'] +8188-269290-0010-2833: hyp=['WRAPPING', 'A', 'PRETTY', 'BLUE', 'SHAWL', 'AROUND', 'A', 'HIDDEN', 'SHOULDERS', 'SHE', 'TURNED', 'TO', 'ANNIE'] +8188-269290-0011-2834: ref=['LESLIE', 'WAS', 'JUST', 'CLOSING', 'THE', 'DOOR', 'BEHIND', 'HER', 'WHEN', 'ANNIE', 'CALLED', 'AFTER', 'HER'] +8188-269290-0011-2834: hyp=['LESLIE', 'WAS', 'JUST', 'CLOSING', 'THE', 'DOOR', 'BEHIND', 'HER', 'WHEN', 'NY', 'CALLED', 'AFTER', 'HER'] +8188-269290-0012-2835: ref=['I', 'TOOK', 'IT', 'OUT', 'SAID', 'LESLIE', 'TOOK', 'IT', 'OUT'] +8188-269290-0012-2835: hyp=['I', 'TOOK', 'IT', 'OUT', 'SAID', 'LISLEY', 'TOOK', 'IT', 'OUT'] +8188-269290-0013-2836: ref=['HAVE', 'THE', 'GOODNESS', 'TO', 'FIND', 'IT', 'AND', 'PUT', 'IT', 'BACK'] +8188-269290-0013-2836: hyp=['HAVE', 'THE', 'GOODNESS', 'TO', 'FIND', 'IT', 'AND', 'PUT', 'IT', 'BACK'] +8188-269290-0014-2837: ref=['BUT', "DON'T", 'LOCK', 'ME', 'OUT', 'PLEASE', 'ANNIE'] +8188-269290-0014-2837: hyp=['BUT', "DON'T", 'LOCK', 'ME', 'OUT', 'PLEASE', 'ANY'] +8188-269290-0015-2838: ref=['OH', 'I', "WON'T", 'LOCK', 'YOU', 'OUT', 'SHE', 'SAID', 'BUT', 'I', 'MUST', 'HAVE', 'THE', 'KEY'] +8188-269290-0015-2838: hyp=['OH', 'I', "WON'T", 'LOCK', 'YOU', 'ABOUT', 'SHE', 'SAID', 'BUT', 'I', 'MUST', 'HAVE', 'THE', 'KEY'] +8188-269290-0016-2839: ref=['JANE', "HERIOT'S", 'VOICE', 'WAS', 'HEARD', 'IN', 'THE', 'PASSAGE'] +8188-269290-0016-2839: hyp=['JANE', "HEARIT'S", 'VOICE', 'WAS', 'HEARD', 'IN', 'THE', 'PASSAGE'] +8188-269290-0017-2840: ref=['AS', 'SHE', 'WALKED', 'DOWN', 'THE', 'CORRIDOR', 'SHE', 'HEARD', 'IT', 'BEING', 'TURNED', 'IN', 'THE', 'LOCK'] +8188-269290-0017-2840: hyp=['AS', 'SHE', 'WALKED', 'DOWN', 'THE', 'CORRIDOR', 'SHE', 'HEARD', 'IT', 'BEING', 'TURNED', 'TO', 'THE', 'LOCK'] +8188-269290-0018-2841: ref=['WHAT', 'CAN', 'THIS', 'MEAN', 'SHE', 'SAID', 'TO', 'HERSELF'] +8188-269290-0018-2841: hyp=['WHAT', 'CAN', 'THIS', 'MEAN', 'SHE', 'SAID', 'TO', 'HERSELF'] +8188-269290-0019-2842: ref=['OH', 'I', "WON'T", 'PRESS', 'YOU', 'REPLIED', 'JANE'] +8188-269290-0019-2842: hyp=['OH', 'I', "WON'T", 'PRESS', 'YOU', 'REPLIED', 'JANE'] +8188-269290-0020-2843: ref=['OH', 'I', 'SHALL', 'NEVER', 'DO', 'THAT', 'REPLIED', 'LESLIE'] +8188-269290-0020-2843: hyp=['OH', 'I', 'SHALL', 'NEVER', 'DO', 'THAT', 'REPLIED', 'LISLEY'] +8188-269290-0021-2844: ref=['YOU', 'SEE', 'ALL', 'THE', 'GIRLS', 'EXCEPT', 'EILEEN', 'AND', 'MARJORIE', 'LAUGH', 'AT', 'HER', 'AND', 'THAT', 'SEEMS', 'TO', 'ME', 'TO', 'MAKE', 'HER', 'WORSE'] +8188-269290-0021-2844: hyp=['YOU', 'SEE', 'ALL', 'THE', 'GIRLS', 'EXCEPT', 'EILEEN', 'AND', 'MARJORIE', 'LAUGH', 'AT', 'HER', 'AND', 'THAT', 'SEEMS', 'TO', 'ME', 'TO', 'MAKE', 'HER', 'WORSE'] +8188-269290-0022-2845: ref=['SOME', 'DAY', 'JANE', 'YOU', 'MUST', 'SEE', 'HER'] +8188-269290-0022-2845: hyp=['SOME', 'DAY', 'JANE', 'YOU', 'MUST', 'SEE', 'HER'] +8188-269290-0023-2846: ref=['IF', 'YOU', 'ARE', 'IN', 'LONDON', 'DURING', 'THE', 'SUMMER', 'YOU', 'MUST', 'COME', 'AND', 'PAY', 'US', 'A', 'VISIT', 'WILL', 'YOU'] +8188-269290-0023-2846: hyp=['IF', 'YOU', 'IN', 'LONDON', 'DURING', 'THE', 'SUMMER', 'YOU', 'MUST', 'COME', 'IN', 'PAIR', 'FOR', 'VISIT', 'WILL', 'YOU'] +8188-269290-0024-2847: ref=['THAT', 'IS', 'IF', 'YOU', 'CARE', 'TO', 'CONFIDE', 'IN', 'ME'] +8188-269290-0024-2847: hyp=['THAT', 'IS', 'IF', 'YOU', 'CARE', 'TO', 'CONFIDE', 'IN', 'ME'] +8188-269290-0025-2848: ref=['I', 'BELIEVE', 'POOR', 'ANNIE', 'IS', 'DREADFULLY', 'UNHAPPY'] +8188-269290-0025-2848: hyp=['I', 'BELIEVE', 'POOR', 'ANNIE', 'IS', 'DREADFULLY', 'UNHAPPY'] +8188-269290-0026-2849: ref=["THAT'S", 'JUST', 'IT', 'JANE', 'THAT', 'IS', 'WHAT', 'FRIGHTENS', 'ME', 'SHE', 'REFUSES', 'TO', 'COME'] +8188-269290-0026-2849: hyp=["THAT'S", 'JUST', 'A', 'CHAIN', 'THAT', 'IS', 'WHAT', 'FRIGHTENS', 'ME', 'SHE', 'REFUSES', 'TO', 'COME'] +8188-269290-0027-2850: ref=['REFUSES', 'TO', 'COME', 'SHE', 'CRIED'] +8188-269290-0027-2850: hyp=['REFUSES', 'TO', 'COME', 'SHE', 'CRIED'] +8188-269290-0028-2851: ref=['SHE', 'WILL', 'GET', 'INTO', 'AN', 'AWFUL', 'SCRAPE'] +8188-269290-0028-2851: hyp=["SHE'LL", 'GET', 'IN', 'HER', 'AWFUL', 'SCRAPE'] +8188-269290-0029-2852: ref=['I', 'AM', 'SURE', 'SHE', 'IS', 'ILL', 'SHE', 'WORKS', 'TOO', 'HARD', 'AND', 'SHE', 'BUT', 'THERE', 'I', "DON'T", 'KNOW', 'THAT', 'I', 'OUGHT', 'TO', 'SAY', 'ANY', 'MORE'] +8188-269290-0029-2852: hyp=['I', 'AM', 'SURE', 'SHE', 'IS', 'ILL', 'SHE', 'WORKS', 'TOO', 'HARD', 'AND', 'SHE', 'BUT', 'THERE', 'I', "DON'T", 'KNOW', 'THAT', 'I', 'OUGHT', 'TO', 'SAY', 'ANY', 'MORE'] +8188-269290-0030-2853: ref=["I'LL", 'WAIT', 'FOR', 'YOU', 'HERE', 'SAID', 'LESLIE'] +8188-269290-0030-2853: hyp=["I'LL", 'WAIT', 'FOR', 'YOU', 'HERE', 'SAID', 'LISLEY'] +8188-269290-0031-2854: ref=['DO', 'COME', 'ANNIE', 'DO'] +8188-269290-0031-2854: hyp=['DO', 'COME', 'ANY', 'DO'] +8188-269290-0032-2855: ref=['SCARCELY', 'LIKELY', 'REPLIED', 'LESLIE', 'SHE', 'TOLD', 'ME', 'SHE', 'WAS', 'DETERMINED', 'NOT', 'TO', 'COME', 'TO', 'THE', 'MEETING'] +8188-269290-0032-2855: hyp=['SCARCELY', 'LIKELY', 'REPLIED', 'LESLIE', 'SHE', 'TOLD', 'ME', 'SHE', 'WAS', 'DETERMINED', 'NOT', 'TO', 'COME', 'TO', 'THE', 'MEETING'] +8188-269290-0033-2856: ref=['BUT', 'MARJORIE', 'AND', 'EILEEN', 'HAD', 'ALREADY', 'DEPARTED', 'AND', 'LESLIE', 'AND', 'JANE', 'FOUND', 'THEMSELVES', 'AMONG', 'THE', 'LAST', 'STUDENTS', 'TO', 'ARRIVE', 'AT', 'THE', 'GREAT', 'EAST', 'HALL'] +8188-269290-0033-2856: hyp=['BUT', 'MARJORIE', 'AND', 'IDLEEN', 'HAD', 'ALREADY', 'DEPARTED', 'AND', 'LISLEY', 'AND', 'JANE', 'FOUND', 'THEMSELVES', 'AMONG', 'THE', 'LAST', 'STUDENTS', 'TO', 'ARRIVE', 'AT', 'THE', 'GREAT', 'EAST', 'HALL'] +8188-269290-0034-2857: ref=['MISS', 'LAUDERDALE', 'WAS', 'STANDING', 'WITH', 'THE', 'OTHER', 'TUTORS', 'AND', 'PRINCIPALS', 'OF', 'THE', 'DIFFERENT', 'HALLS', 'ON', 'A', 'RAISED', 'PLATFORM'] +8188-269290-0034-2857: hyp=['MISS', 'LAUDER', 'DALE', 'WAS', 'STANDING', 'WITH', 'THE', 'OTHER', 'TUTORS', 'AND', 'PRINCIPLES', 'OF', 'THE', 'DIFFERENT', 'HALLS', 'ARE', 'A', 'RAISED', 'PLATFORM'] +8188-269290-0035-2858: ref=['THEN', 'A', 'ROLL', 'CALL', 'WAS', 'GONE', 'THROUGH', 'BY', 'ONE', 'OF', 'THE', 'TUTORS', 'THE', 'ONLY', 'ABSENTEE', 'WAS', 'ANNIE', 'COLCHESTER'] +8188-269290-0035-2858: hyp=['THEN', 'A', 'ROCKLE', 'WAS', 'GONE', 'THROUGH', 'BY', 'ONE', 'OF', 'THE', 'TUTORS', 'THE', 'ONLY', 'EBSENTEE', 'WAS', 'ANY', 'COLCHESTER'] +8188-269290-0036-2859: ref=['THE', 'PHYSICAL', 'PART', 'OF', 'YOUR', 'TRAINING', 'AND', 'ALSO', 'THE', 'MENTAL', 'PART', 'ARE', 'ABUNDANTLY', 'SUPPLIED', 'IN', 'THIS', 'GREAT', 'HOUSE', 'OF', 'LEARNING', 'SHE', 'CONTINUED', 'BUT', 'THE', 'SPIRITUAL', 'PART', 'IT', 'SEEMS', 'TO', 'ME', 'OUGHT', 'NOW', 'TO', 'BE', 'STRENGTHENED'] +8188-269290-0036-2859: hyp=['THE', 'PHYSICAL', 'PART', 'OF', 'THE', 'ORTRAINING', 'AND', 'ALSO', 'THE', 'MENTAL', 'PART', 'ARE', 'ABUNDANTLY', 'SUPPLIED', 'IN', 'THIS', 'GREAT', 'HOUSE', 'OF', 'LEARNING', 'SHE', 'CONTINUED', 'BUT', 'THE', 'SPIRITUAL', 'PART', 'IT', 'SEEMS', 'TO', 'ME', 'OUGHT', 'NOW', 'TO', 'BE', 'STRENGTHENED'] +8188-269290-0037-2860: ref=['HEAR', 'HEAR', 'AND', 'ONCE', 'AGAIN', 'HEAR'] +8188-269290-0037-2860: hyp=['HEAR', 'HERE', 'AND', 'ONCE', 'AGAIN', 'HAIR'] +8188-269290-0038-2861: ref=['SHE', 'UTTERED', 'HER', 'STRANGE', 'REMARK', 'STANDING', 'UP'] +8188-269290-0038-2861: hyp=['SHE', 'UTTERED', 'A', 'STRANGE', 'REMARK', 'STANDING', 'UP'] +8188-269290-0039-2862: ref=['MARJORIE', 'AND', 'EILEEN', 'WERE', 'CLOSE', 'TO', 'HER'] +8188-269290-0039-2862: hyp=['MARJORIE', 'AND', 'ILINE', 'WERE', 'CLOSE', 'TO', 'HER'] +8188-269290-0040-2863: ref=['I', 'WILL', 'TALK', 'WITH', 'YOU', 'BELLE', 'ACHESON', 'PRESENTLY', 'SHE', 'SAID'] +8188-269290-0040-2863: hyp=['I', 'WILL', 'TALK', 'WITH', 'YOU', 'BELL', 'ARTISON', 'PRESENTLY', 'SHE', 'SAID'] +8188-269290-0041-2864: ref=['THE', 'NAMES', 'OF', 'PROPOSED', 'MEMBERS', 'ARE', 'TO', 'BE', 'SUBMITTED', 'TO', 'ME', 'BEFORE', 'THIS', 'DAY', 'WEEK'] +8188-269290-0041-2864: hyp=['THE', 'NAMES', 'OF', 'THE', 'PROPOSED', 'MEMBERS', 'ARE', 'TO', 'BE', 'SUBMITTED', 'TO', 'ME', 'BEFORE', 'THIS', 'DAY', 'WEEK'] +8188-269290-0042-2865: ref=['AM', 'I', 'MY', "BROTHER'S", 'KEEPER'] +8188-269290-0042-2865: hyp=['AM', 'I', 'MY', "BROTHER'S", 'KEEPER'] +8188-269290-0043-2866: ref=['YOU', 'ASK', 'SHE', 'CONTINUED'] +8188-269290-0043-2866: hyp=['YOU', 'ASK', 'SHE', 'CONTINUED'] +8188-269290-0044-2867: ref=['GOD', 'ANSWERS', 'TO', 'EACH', 'OF', 'YOU', 'YOU', 'ARE'] +8188-269290-0044-2867: hyp=['GOD', 'AUTHEST', 'EACH', 'OF', 'YOU', 'YOU', 'ARE'] +8188-269290-0045-2868: ref=['THE', 'WORLD', 'SAYS', 'NO', 'I', 'AM', 'NOT', 'BUT', 'GOD', 'SAYS', 'YES', 'YOU', 'ARE'] +8188-269290-0045-2868: hyp=['THE', 'WORLD', 'TASTE', 'NO', 'I', 'AM', 'NOT', 'BUT', 'GOD', 'SAKES', 'YES', 'YOU', 'ARE'] +8188-269290-0046-2869: ref=['ALL', 'MEN', 'ARE', 'YOUR', 'BROTHERS'] +8188-269290-0046-2869: hyp=['ALL', 'MEN', 'ARE', 'BROTHERS'] +8188-269290-0047-2870: ref=['FOR', 'ALL', 'WHO', 'SIN', 'ALL', 'WHO', 'SUFFER', 'YOU', 'ARE', 'TO', 'A', 'CERTAIN', 'EXTENT', 'RESPONSIBLE'] +8188-269290-0047-2870: hyp=['FOR', 'ALL', 'WHO', 'SIN', 'ALL', 'WHO', 'SUFFER', 'YOU', 'ARE', 'TO', 'EXERT', 'AN', 'EXTENT', 'RESPONSIBLE'] +8188-269290-0048-2871: ref=['AFTER', 'THE', 'ADDRESS', 'THE', 'GIRLS', 'THEMSELVES', 'WERE', 'ENCOURAGED', 'TO', 'SPEAK', 'AND', 'A', 'VERY', 'ANIMATED', 'DISCUSSION', 'FOLLOWED'] +8188-269290-0048-2871: hyp=['AFTER', 'THE', 'ADDRESS', 'THE', 'GIRLS', 'THEMSELVES', 'WERE', 'ENCOURAGED', 'TO', 'SPEAK', 'AND', 'A', 'VERY', 'ANIMATED', 'DISCUSSION', 'FOLLOWED'] +8188-269290-0049-2872: ref=['IT', 'WAS', 'PAST', 'TEN', "O'CLOCK", 'WHEN', 'SHE', 'LEFT', 'THE', 'HALL'] +8188-269290-0049-2872: hyp=['IT', 'WAS', 'PAST', 'TEN', "O'CLOCK", 'WHEN', 'SHE', 'LEFT', 'THE', 'HALL'] +8188-269290-0050-2873: ref=['JUST', 'AS', 'SHE', 'WAS', 'DOING', 'SO', 'MISS', 'FRERE', 'CAME', 'UP'] +8188-269290-0050-2873: hyp=['JUST', 'AS', 'SHE', 'WAS', 'DOING', 'SO', 'WAS', 'FRERE', 'CAME', 'UP'] +8188-269290-0051-2874: ref=['ANNIE', 'COLCHESTER', 'IS', 'YOUR', 'ROOMFELLOW', 'IS', 'SHE', 'NOT', 'SHE', 'SAID'] +8188-269290-0051-2874: hyp=['ANY', 'COLCHISED', 'AS', 'YOUR', 'ROOM', 'FELLOW', 'IS', 'SHE', 'NOT', 'SHE', 'SAID'] +8188-269290-0052-2875: ref=['I', 'SEE', 'BY', 'YOUR', 'FACE', 'MISS', 'GILROY', 'THAT', 'YOU', 'ARE', 'DISTRESSED', 'ABOUT', 'SOMETHING', 'ARE', 'YOU', 'KEEPING', 'ANYTHING', 'BACK'] +8188-269290-0052-2875: hyp=['I', 'SEE', 'BY', 'YOUR', 'FACE', 'MY', 'SCALE', 'ROY', 'THAT', 'YOU', 'ARE', 'DISTRESSED', 'ABOUT', 'SOMETHING', 'ARE', 'KEEPING', 'ANYTHING', 'BACK'] +8188-269290-0053-2876: ref=['I', 'AM', 'AFRAID', 'I', 'AM', 'REPLIED', 'LESLIE', 'DISTRESS', 'NOW', 'IN', 'HER', 'TONE'] +8188-269290-0053-2876: hyp=['I', 'AM', 'AFRAID', 'I', 'AM', 'REPLIED', 'LESLIE', 'DISTRESSED', 'NOW', 'IN', 'HER', 'TONE'] +8188-269290-0054-2877: ref=['I', 'MUST', 'SEE', 'HER', 'MYSELF', 'EARLY', 'IN', 'THE', 'MORNING', 'AND', 'I', 'AM', 'QUITE', 'SURE', 'THAT', 'NOTHING', 'WILL', 'SATISFY', 'MISS', 'LAUDERDALE', 'EXCEPT', 'A', 'VERY', 'AMPLE', 'APOLOGY', 'AND', 'A', 'FULL', 'EXPLANATION', 'OF', 'THE', 'REASON', 'WHY', 'SHE', 'ABSENTED', 'HERSELF'] +8188-269290-0054-2877: hyp=['I', 'MUST', 'SEE', 'HER', 'MYSELF', 'EARLY', 'IN', 'THE', 'MORNING', 'AND', 'I', 'AM', 'QUITE', 'SURE', 'THAT', 'NOTHING', 'WILL', 'SATISFY', 'MISS', 'LAURDELL', 'EXCEPT', 'A', 'VERY', 'AMPLE', 'APOLOGY', 'AND', 'A', 'FULL', 'EXPLANATION', 'OF', 'THE', 'REASON', 'WHY', 'SHE', 'ABSENTED', 'HERSELF'] +8188-269290-0055-2878: ref=['EXCUSES', 'MAKE', 'NO', 'DIFFERENCE'] +8188-269290-0055-2878: hyp=['EXCUSES', 'MAKE', 'NO', 'DIFFERENCE'] +8188-269290-0056-2879: ref=['THE', 'GIRL', 'WHO', 'BREAKS', 'THE', 'RULES', 'HAS', 'TO', 'BE', 'PUNISHED'] +8188-269290-0056-2879: hyp=['THE', 'GIRL', 'WHO', 'BREAKS', 'THE', 'RULES', 'HAVE', 'TO', 'BE', 'PUNISHED'] +8188-269290-0057-2880: ref=['I', 'WILL', 'TELL', 'HER'] +8188-269290-0057-2880: hyp=['I', 'WILL', 'TELL', 'HER'] +8188-274364-0000-2811: ref=['THE', 'COMMONS', 'ALSO', 'VOTED', 'THAT', 'THE', 'NEW', 'CREATED', 'PEERS', 'OUGHT', 'TO', 'HAVE', 'NO', 'VOICE', 'IN', 'THIS', 'TRIAL', 'BECAUSE', 'THE', 'ACCUSATION', 'BEING', 'AGREED', 'TO', 'WHILE', 'THEY', 'WERE', 'COMMONERS', 'THEIR', 'CONSENT', 'TO', 'IT', 'WAS', 'IMPLIED', 'WITH', 'THAT', 'OF', 'ALL', 'THE', 'COMMONS', 'OF', 'ENGLAND'] +8188-274364-0000-2811: hyp=['THE', 'COMMONS', 'ALSO', 'VOTED', 'THAT', 'THE', 'NEW', 'CREATED', 'PEERS', 'OUGHT', 'TO', 'HAVE', 'NO', 'VOICE', 'IN', 'THIS', 'TRIAL', 'BECAUSE', 'THE', 'ACCUSATION', 'BEING', 'AGREED', 'TO', 'WHILE', 'THEY', 'WERE', 'COMMONERS', 'THEIR', 'CONSENT', 'TO', 'IT', 'WAS', 'IMPLIED', 'WITH', 'THAT', 'OF', 'ALL', 'THE', 'COMMONS', 'OF', 'ENGLAND'] +8188-274364-0001-2812: ref=['IN', 'THE', 'GOVERNMENT', 'OF', 'IRELAND', 'HIS', 'ADMINISTRATION', 'HAD', 'BEEN', 'EQUALLY', 'PROMOTIVE', 'OF', 'HIS', "MASTER'S", 'INTEREST', 'AND', 'THAT', 'OF', 'THE', 'SUBJECTS', 'COMMITTED', 'TO', 'HIS', 'CARE'] +8188-274364-0001-2812: hyp=['THE', 'GOVERNMENT', 'OF', 'IRELAND', 'HIS', 'ADMINISTRATION', 'HAD', 'BEEN', 'EQUALLY', 'PROMOTED', 'OF', 'HIS', "MASTER'S", 'INTEREST', 'AND', 'THAT', 'OF', 'THE', 'SUBJECTS', 'COMMITTED', 'TO', 'HIS', 'CARE'] +8188-274364-0002-2813: ref=['THE', 'CASE', 'OF', 'LORD', 'MOUNTNORRIS', 'OF', 'ALL', 'THOSE', 'WHICH', 'WERE', 'COLLECTED', 'WITH', 'SO', 'MUCH', 'INDUSTRY', 'IS', 'THE', 'MOST', 'FLAGRANT', 'AND', 'THE', 'LEAST', 'EXCUSABLE'] +8188-274364-0002-2813: hyp=['THE', 'CASE', 'OF', 'LORD', 'MONTORIS', 'OF', 'ALL', 'THOSE', 'WHICH', 'WERE', 'COLLECTED', 'WITH', 'SO', 'ACT', 'INDUSTRY', 'IS', 'THE', 'MOST', 'FLAGRANT', 'AND', 'THE', 'LEAST', 'EXCUSABLE'] +8188-274364-0003-2814: ref=['THE', 'COURT', 'WHICH', 'CONSISTED', 'OF', 'THE', 'CHIEF', 'OFFICERS', 'OF', 'THE', 'ARMY', 'FOUND', 'THE', 'CRIME', 'TO', 'BE', 'CAPITAL', 'AND', 'CONDEMNED', 'THAT', 'NOBLEMAN', 'TO', 'LOSE', 'HIS', 'HEAD'] +8188-274364-0003-2814: hyp=['THE', 'COURT', 'WHICH', 'CONSISTED', 'OF', 'THE', 'CHEAP', 'OFFICIALS', 'OF', 'THE', 'ARMY', 'FOUND', 'THE', 'CROWN', 'TO', 'BE', 'CAPITAL', 'AND', 'CONDEMNED', 'THAT', 'NOBLEMAN', 'TO', 'LOSE', 'HIS', 'HEAD'] +8188-274364-0004-2815: ref=['WHERE', 'THE', 'TOKEN', 'BY', 'WHICH', 'I', 'SHOULD', 'DISCOVER', 'IT'] +8188-274364-0004-2815: hyp=['WITH', 'A', 'TOKEN', 'BY', 'WHICH', 'I', 'SHALL', 'DISCOVER', 'IT'] +8188-274364-0005-2816: ref=['IT', 'IS', 'NOW', 'FULL', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'YEARS', 'SINCE', 'TREASONS', 'WERE', 'DEFINED', 'AND', 'SO', 'LONG', 'HAS', 'IT', 'BEEN', 'SINCE', 'ANY', 'MAN', 'WAS', 'TOUCHED', 'TO', 'THIS', 'EXTENT', 'UPON', 'THIS', 'CRIME', 'BEFORE', 'MYSELF'] +8188-274364-0005-2816: hyp=['IT', 'IS', 'NOW', 'A', 'FULL', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'YEARS', 'SINCE', 'TREASONS', 'WERE', 'DEFINED', 'AND', 'SO', 'LONG', 'HAS', 'IT', 'BEEN', 'SINCE', 'ANY', 'MAN', 'WAS', 'TOUCHED', 'TO', 'THIS', 'EXTENT', 'UPON', 'THIS', 'CRIME', 'FOR', 'MYSELF'] +8188-274364-0006-2817: ref=['LET', 'US', 'NOT', 'TO', 'OUR', 'OWN', 'DESTRUCTION', 'AWAKE', 'THOSE', 'SLEEPING', 'LIONS', 'BY', 'RATTLING', 'UP', 'A', 'COMPANY', 'OF', 'OLD', 'RECORDS', 'WHICH', 'HAVE', 'LAIN', 'FOR', 'SO', 'MANY', 'AGES', 'BY', 'THE', 'WALL', 'FORGOTTEN', 'AND', 'NEGLECTED'] +8188-274364-0006-2817: hyp=['LET', 'US', 'NOT', 'TO', 'HER', 'UNDESTRUCTION', 'AWAKE', 'THOSE', 'KEEPING', 'LIONS', 'BY', 'RATTLING', 'UP', 'A', 'COMPANY', 'OF', 'ALL', 'RICARDS', 'WHICH', 'HAVE', 'LAIN', 'FOR', 'SO', 'MANY', 'AGES', 'BY', 'THE', 'WAR', 'FORGOTTEN', 'AND', 'NEGLECTED'] +8188-274364-0007-2818: ref=['HOWEVER', 'THESE', 'GENTLEMEN', 'AT', 'THE', 'BAR', 'SAY', 'THEY', 'SPEAK', 'FOR', 'THE', 'COMMONWEALTH', 'AND', 'THEY', 'BELIEVE', 'SO', 'YET', 'UNDER', 'FAVOR', 'IT', 'IS', 'I', 'WHO', 'IN', 'THIS', 'PARTICULAR', 'SPEAK', 'FOR', 'THE', 'COMMONWEALTH'] +8188-274364-0007-2818: hyp=['HERBID', 'THESE', 'GENTLEMEN', 'AT', 'THE', 'BAR', 'SO', 'THEY', 'SPEAK', 'FOR', 'THE', 'CORNWEALTH', 'AND', 'THEY', 'BELIEVE', 'SO', 'YET', 'UNDER', 'FAVOUR', 'IT', 'IS', 'I', 'WHO', 'IN', 'THIS', 'PARTICULAR', 'SPEAK', 'FOR', 'THE', 'CORNWEALTH'] +8188-274364-0008-2819: ref=['MY', 'LORDS', 'I', 'HAVE', 'NOW', 'TROUBLED', 'YOUR', 'LORDSHIPS', 'A', 'GREAT', 'DEAL', 'LONGER', 'THAN', 'I', 'SHOULD', 'HAVE', 'DONE'] +8188-274364-0008-2819: hyp=['MY', 'LORDS', 'I', 'HAVE', 'NOW', 'TROUBLED', 'YOUR', 'LORDSHIPS', 'A', 'GREAT', 'DEAL', 'LONGER', 'THAN', 'I', 'SHOULD', 'HAVE', 'DONE'] +8188-274364-0009-2820: ref=['YOUNG', 'VANE', 'FALLING', 'UPON', 'THIS', 'PAPER', 'OF', 'NOTES', 'DEEMED', 'THE', 'MATTER', 'OF', 'THE', 'UTMOST', 'IMPORTANCE', 'AND', 'IMMEDIATELY', 'COMMUNICATED', 'IT', 'TO', 'PYM', 'WHO', 'NOW', 'PRODUCED', 'THE', 'PAPER', 'BEFORE', 'THE', 'HOUSE', 'OF', 'COMMONS'] +8188-274364-0009-2820: hyp=['YOUNG', 'VANE', 'FALLING', 'UPON', 'THIS', 'PAPER', 'OF', 'NOTES', 'DEEMED', 'THE', 'MATTER', 'OF', 'THE', 'UTMOST', 'IMPORTANCE', 'AND', 'IMMEDIATELY', 'COMMUNICATED', 'IT', 'TO', 'POEM', 'WHO', 'NOW', 'PRODUCED', 'THE', 'PAPER', 'BEFORE', 'THE', 'HOUSE', 'OF', 'COMMONS'] +8188-274364-0010-2821: ref=['THE', 'KING', 'PROPOSES', 'THIS', 'DIFFICULTY', 'BUT', 'HOW', 'CAN', 'I', 'UNDERTAKE', 'OFFENSIVE', 'WAR', 'IF', 'I', 'HAVE', 'NO', 'MORE', 'MONEY'] +8188-274364-0010-2821: hyp=['THE', 'KING', 'PROPOSES', 'THIS', 'DIFFICULTY', 'BUT', 'HOW', 'CAN', 'I', 'UNDERTAKE', 'OFFENSIVE', 'WAR', 'IF', 'I', 'HAVE', 'NO', 'MORE', 'MONEY'] +8188-274364-0011-2822: ref=['YOUR', 'MAJESTY', 'HAVING', 'TRIED', 'THE', 'AFFECTIONS', 'OF', 'YOUR', 'PEOPLE', 'YOU', 'ARE', 'ABSOLVED', 'AND', 'LOOSE', 'FROM', 'ALL', 'RULES', 'OF', 'GOVERNMENT', 'AND', 'MAY', 'DO', 'WHAT', 'POWER', 'WILL', 'ADMIT'] +8188-274364-0011-2822: hyp=['YOUR', 'MAJESTY', 'HAVING', 'TRIED', 'THE', 'AFFECTIONS', 'OF', 'YOUR', 'PEOPLE', 'YOU', 'ABSOLVED', 'AND', 'LOOSE', 'FROM', 'ALL', 'RULES', 'OF', 'GOVERNMENT', 'AND', 'MAY', 'DO', 'WHAT', 'POWER', 'WILL', 'ADMIT'] +8280-266249-0000-339: ref=['OLD', 'MISTER', 'DINSMORE', 'HAD', 'ACCEPTED', 'A', 'PRESSING', 'INVITATION', 'FROM', 'HIS', 'GRANDDAUGHTER', 'AND', 'HER', 'HUSBAND', 'TO', 'JOIN', 'THE', 'PARTY', 'AND', 'WITH', 'THE', 'ADDITION', 'OF', 'SERVANTS', 'IT', 'WAS', 'A', 'LARGE', 'ONE'] +8280-266249-0000-339: hyp=['OLD', 'MISTER', 'DINSMORE', 'HAD', 'ACCEPTED', 'OPPRESSING', 'INVITATION', 'FROM', 'HIS', 'GRANDDAUGHTER', 'AND', 'HER', 'HUSBAND', 'TO', 'JOIN', 'THE', 'PARTY', 'AND', 'WITH', 'THE', 'ADDITION', 'OF', 'SERVANTS', 'IT', 'WAS', 'A', 'LARGE', 'ONE'] +8280-266249-0001-340: ref=['AS', 'THEY', 'WERE', 'IN', 'NO', 'HASTE', 'AND', 'THE', 'CONFINEMENT', 'OF', 'A', 'RAILROAD', 'CAR', 'WOULD', 'BE', 'VERY', 'IRKSOME', 'TO', 'THE', 'YOUNGER', 'CHILDREN', 'IT', 'HAD', 'BEEN', 'DECIDED', 'TO', 'MAKE', 'THE', 'JOURNEY', 'BY', 'WATER'] +8280-266249-0001-340: hyp=['AS', 'THEY', 'WERE', 'IN', 'NO', 'HASTE', 'AND', 'THE', 'CONFINEMENT', 'OF', 'A', 'RAILROAD', 'CAR', 'WILL', 'BE', 'VERY', 'IRKSOME', 'TO', 'THE', 'YOUNGER', 'CHILDREN', 'IT', 'HAD', 'BEEN', 'DECIDED', 'TO', 'MAKE', 'THE', 'JOURNEY', 'BY', 'WATER'] +8280-266249-0002-341: ref=['THERE', 'WERE', 'NO', 'SAD', 'LEAVE', 'TAKINGS', 'TO', 'MAR', 'THEIR', 'PLEASURE', 'THE', 'CHILDREN', 'WERE', 'IN', 'WILD', 'SPIRITS', 'AND', 'ALL', 'SEEMED', 'CHEERFUL', 'AND', 'HAPPY', 'AS', 'THEY', 'SAT', 'OR', 'STOOD', 'UPON', 'THE', 'DECK', 'WATCHING', 'THE', 'RECEDING', 'SHORE', 'AS', 'THE', 'VESSEL', 'STEAMED', 'OUT', 'OF', 'THE', 'HARBOR'] +8280-266249-0002-341: hyp=['THERE', 'WERE', 'NO', 'SAD', 'LEAVE', 'TAKINGS', 'TO', 'MAR', 'THEIR', 'PLEASURE', 'THE', 'CHILDREN', 'WERE', 'IN', 'WILD', 'SPIRITS', 'AND', 'ALL', 'SEEMED', 'CHEERFUL', 'AND', 'HAPPY', 'AS', 'THEY', 'SAT', 'OR', 'STOOD', 'UPON', 'THE', 'DECK', 'WATCHING', 'THE', 'RECEDING', 'SHORE', 'AS', 'THE', 'VESSEL', 'STEAMED', 'OUT', 'OF', 'THE', 'HARBOUR'] +8280-266249-0003-342: ref=['AT', 'LENGTH', 'THE', 'LAND', 'HAD', 'QUITE', 'DISAPPEARED', 'NOTHING', 'COULD', 'BE', 'SEEN', 'BUT', 'THE', 'SKY', 'OVERHEAD', 'AND', 'A', 'VAST', 'EXPANSE', 'OF', 'WATER', 'ALL', 'AROUND', 'AND', 'THE', 'PASSENGERS', 'FOUND', 'LEISURE', 'TO', 'TURN', 'THEIR', 'ATTENTION', 'UPON', 'EACH', 'OTHER'] +8280-266249-0003-342: hyp=['AT', 'LENGTH', 'THE', 'LAND', 'HAD', 'QUITE', 'DISAPPEARED', 'NOTHING', 'COULD', 'BE', 'SEEN', 'BUT', 'THE', 'SKY', 'OVERHEAD', 'AND', 'A', 'VAST', 'EXPANSE', 'OF', 'WATER', 'ALL', 'ROUND', 'AND', 'THE', 'PASSENGERS', 'FOUND', 'LEISURE', 'TO', 'TURN', 'THEIR', 'ATTENTION', 'UPON', 'EACH', 'OTHER'] +8280-266249-0004-343: ref=['THERE', 'ARE', 'SOME', 'NICE', 'LOOKING', 'PEOPLE', 'ON', 'BOARD', 'REMARKED', 'MISTER', 'TRAVILLA', 'IN', 'AN', 'UNDERTONE', 'TO', 'HIS', 'WIFE'] +8280-266249-0004-343: hyp=['THERE', 'ARE', 'SOME', 'NICE', 'LOOKING', 'PEOPLE', 'ON', 'BOARD', 'REMARKED', 'MISTER', 'TRAVILLA', 'IN', 'AN', 'UNDERTONE', 'TO', 'HIS', 'WIFE'] +8280-266249-0005-344: ref=['BESIDE', 'OURSELVES', 'ADDED', 'COUSIN', 'RONALD', 'LAUGHING'] +8280-266249-0005-344: hyp=['BESIDES', 'OURSELVES', 'ADDED', 'COUSIN', 'RANALD', 'LAUGHING'] +8280-266249-0006-345: ref=['YES', 'SHE', 'ANSWERED', 'THAT', 'LITTLE', 'GROUP', 'YONDER', 'A', 'YOUNG', 'MINISTER', 'AND', 'HIS', 'WIFE', 'AND', 'CHILD', 'I', 'SUPPOSE'] +8280-266249-0006-345: hyp=['YES', 'SHE', 'ANSWERED', 'THAT', 'LITTLE', 'GROUP', 'YONDER', 'A', 'YOUNG', 'MINISTER', 'AND', 'HIS', 'WIFE', 'AND', 'CHILD', 'I', 'SUPPOSE'] +8280-266249-0007-346: ref=['AND', 'WHAT', 'A', 'DEAR', 'LITTLE', 'FELLOW', 'HE', 'IS', 'JUST', 'ABOUT', 'THE', 'AGE', 'OF', 'OUR', 'HAROLD', 'I', 'SHOULD', 'JUDGE'] +8280-266249-0007-346: hyp=['AND', 'WHEN', 'A', 'DEAR', 'LITTLE', 'FELLOW', 'HE', 'IS', 'JUST', 'ABOUT', 'THE', 'AGE', 'OF', 'OUR', 'HERALD', 'I', 'SHOULD', 'JUDGE'] +8280-266249-0008-347: ref=['DO', 'YOU', 'SON', 'WAS', 'THE', 'SMILING', 'REJOINDER'] +8280-266249-0008-347: hyp=['DO', 'YOU', 'SON', 'WAS', 'THE', 'SMILING', 'REJOINDER'] +8280-266249-0009-348: ref=['HE', 'CERTAINLY', 'LOOKS', 'LIKE', 'A', 'VERY', 'NICE', 'LITTLE', 'BOY'] +8280-266249-0009-348: hyp=['HE', 'CERTAINLY', 'LOOKS', 'LIKE', 'A', 'VERY', 'NICE', 'LITTLE', 'BOY'] +8280-266249-0010-349: ref=['SUPPOSE', 'YOU', 'AND', 'HE', 'SHAKE', 'HANDS', 'FRANK'] +8280-266249-0010-349: hyp=['SUPPOSE', 'YOU', 'AND', 'HE', 'SHAKE', 'HANDS', 'FRANK'] +8280-266249-0011-350: ref=['I', 'DO', 'INDEED', 'THOUGH', 'PROBABLY', 'COMPARATIVELY', 'FEW', 'ARE', 'AWARE', 'THAT', 'TOBACCO', 'IS', 'THE', 'CAUSE', 'OF', 'THEIR', 'AILMENTS'] +8280-266249-0011-350: hyp=['I', 'DO', 'INDEED', 'THE', 'PROBABLY', 'COMPARATIVELY', 'FEW', 'ARE', 'AWARE', 'THAT', 'TOBACCO', 'IS', 'THE', 'CAUSE', 'OF', 'THEIR', 'AILMENTS'] +8280-266249-0012-351: ref=['DOUBTLESS', 'THAT', 'IS', 'THE', 'CASE', 'REMARKED', 'MISTER', 'DINSMORE'] +8280-266249-0012-351: hyp=['DOUBTLESS', 'THAT', 'IS', 'THE', 'CASE', 'REMARKED', 'MISTER', 'DINSMORE'] +8280-266249-0013-352: ref=['WITH', 'ALL', 'MY', 'HEART', 'IF', 'YOU', 'WILL', 'STEP', 'INTO', 'THE', "GENTLEMEN'S", 'CABIN', 'WHERE', "THERE'S", 'A', 'LIGHT'] +8280-266249-0013-352: hyp=['WITH', 'ALL', 'MY', 'HEART', 'IF', 'YOU', 'WILL', 'STEP', 'INTO', 'THE', "GENTLEMAN'S", 'CABIN', 'WHERE', "THERE'S", 'A', 'LIGHT'] +8280-266249-0014-353: ref=['HE', 'LED', 'THE', 'WAY', 'THE', 'OTHERS', 'ALL', 'FOLLOWING', 'AND', 'TAKING', 'OUT', 'A', 'SLIP', 'OF', 'PAPER', 'READ', 'FROM', 'IT', 'IN', 'A', 'DISTINCT', 'TONE', 'LOUD', 'ENOUGH', 'TO', 'BE', 'HEARD', 'BY', 'THOSE', 'ABOUT', 'HIM', 'WITHOUT', 'DISTURBING', 'THE', 'OTHER', 'PASSENGERS'] +8280-266249-0014-353: hyp=['HE', 'LED', 'THE', 'WAY', 'THE', 'OTHERS', 'ALL', 'FOLLOWING', 'AND', 'TAKING', 'OUT', 'A', 'SLIP', 'OF', 'PAPER', 'READ', 'FROM', 'IT', 'IN', 'A', 'DISTINCT', 'TONE', 'LOUD', 'ENOUGH', 'TO', 'BE', 'HEARD', 'BY', 'THOSE', 'ALL', 'ABOUT', 'HIM', 'WITHOUT', 'DISTURBING', 'THE', 'OTHER', 'PASSENGERS'] +8280-266249-0015-354: ref=['ONE', 'DROP', 'OF', 'NICOTINE', 'EXTRACT', 'OF', 'TOBACCO', 'PLACED', 'ON', 'THE', 'TONGUE', 'OF', 'A', 'DOG', 'WILL', 'KILL', 'HIM', 'IN', 'A', 'MINUTE', 'THE', 'HUNDREDTH', 'PART', 'OF', 'A', 'GRAIN', 'PICKED', 'UNDER', 'THE', 'SKIN', 'OF', 'A', "MAN'S", 'ARM', 'WILL', 'PRODUCE', 'NAUSEA', 'AND', 'FAINTING'] +8280-266249-0015-354: hyp=['ONE', 'DROP', 'OF', 'NICOTINE', 'EXTRACTED', 'TOBACCO', 'PLACED', 'ON', 'THE', 'TONGUE', 'OF', 'THE', 'DOG', 'WILL', 'KILL', 'HIM', 'IN', 'A', 'MINUTE', 'THE', 'HUNDREDTH', 'PART', 'OF', 'A', 'GRAIN', 'PRICKED', 'UNDER', 'THE', 'SKIN', 'OF', 'A', "MAN'S", 'ARM', 'WILL', 'PRODUCE', 'NAUSEA', 'AND', 'FAINTING'] +8280-266249-0016-355: ref=['THE', 'HALF', 'DOZEN', 'CIGARS', 'WHICH', 'MOST', 'SMOKERS', 'USE', 'A', 'DAY', 'CONTAIN', 'SIX', 'OR', 'SEVEN', 'GRAINS', 'ENOUGH', 'IF', 'CONCENTRATED', 'AND', 'ABSORBED', 'TO', 'KILL', 'THREE', 'MEN', 'AND', 'A', 'POUND', 'OF', 'TOBACCO', 'ACCORDING', 'TO', 'ITS', 'QUALITY', 'CONTAINS', 'FROM', 'ONE', 'QUARTER', 'TO', 'ONE', 'AND', 'A', 'QUARTER', 'OUNCES'] +8280-266249-0016-355: hyp=['THE', 'HALF', 'DOZEN', 'CIGARS', 'WHICH', 'MOST', 'SMOKERS', 'USED', 'A', 'DAY', 'CONTAIN', 'SIX', 'OR', 'SEVEN', 'GRAINS', 'ENOUGH', 'IF', 'CONCENTRATED', 'AND', 'ABSORBED', 'TO', 'KILL', 'THREE', 'MEN', 'AND', 'A', 'POUND', 'OR', 'TOBACCO', 'ACCORDING', 'TO', 'ITS', 'QUALITY', 'CONTAINS', 'FROM', 'ONE', 'QUARTER', 'TO', 'ONE', 'AND', 'A', 'QUARTER', 'OUNCES'] +8280-266249-0017-356: ref=['IS', 'IT', 'STRANGE', 'THEN', 'THAT', 'SMOKERS', 'AND', 'CHEWERS', 'HAVE', 'A', 'THOUSAND', 'AILMENTS'] +8280-266249-0017-356: hyp=['IS', 'IT', 'STRANGE', 'THEN', 'THAT', 'SMOKERS', 'AND', 'SHOERS', 'HAVE', 'A', 'THOUSAND', 'AILMENTS'] +8280-266249-0018-357: ref=['THAT', 'THE', 'FRENCH', 'POLYTECHNIC', 'INSTITUTE', 'HAD', 'TO', 'PROHIBIT', 'ITS', 'USE', 'ON', 'ACCOUNT', 'OF', 'ITS', 'EFFECTS', 'ON', 'THE', 'MIND'] +8280-266249-0018-357: hyp=['THAT', 'THE', 'FRENCH', 'POLYTECHNICHER', 'INSTITUTE', 'HAD', 'TO', 'PROHIBIT', 'ITS', 'USE', 'ON', 'ACCOUNT', 'OF', 'ITS', 'EFFECTS', 'UPON', 'THE', 'MIND'] +8280-266249-0019-358: ref=['NOTICE', 'THE', 'MULTITUDE', 'OF', 'SUDDEN', 'DEATHS', 'AND', 'SEE', 'HOW', 'MANY', 'ARE', 'SMOKERS', 'AND', 'CHEWERS'] +8280-266249-0019-358: hyp=['NOTICE', 'THE', 'MULTITUDE', 'OF', 'SUDDEN', 'DEATHS', 'AND', 'SEE', 'HOW', 'MANY', 'OUR', 'SMOKERS', 'AND', 'CHEWERS'] +8280-266249-0020-359: ref=['IN', 'A', 'SMALL', 'COUNTRY', 'TOWN', 'SEVEN', 'OF', 'THESE', 'MYSTERIOUS', 'PROVIDENCES', 'OCCURRED', 'WITHIN', 'THE', 'CIRCUIT', 'OF', 'A', 'MILE', 'ALL', 'DIRECTLY', 'TRACEABLE', 'TO', 'TOBACCO', 'AND', 'ANY', 'PHYSICIAN', 'ON', 'A', 'FEW', 'MOMENTS', 'REFLECTION', 'CAN', 'MATCH', 'THIS', 'FACT', 'BY', 'HIS', 'OWN', 'OBSERVATION'] +8280-266249-0020-359: hyp=['IN', 'A', 'SMALL', 'COUNTRY', 'TOWN', 'SEVEN', 'OF', 'THESE', 'MYSTERIOUS', 'PROVIDENCES', 'OCCURRED', 'WITHIN', 'THE', 'CIRCUIT', 'OF', 'A', 'MILE', 'ALL', 'DIRECTLY', 'TRACEABLE', 'TO', 'TOBACCO', 'AND', 'ANY', 'PHYSICIAN', 'ON', 'A', 'FEW', 'MOMENTS', 'REFLECTION', 'CAN', 'MATCH', 'THIS', 'FACT', 'BY', 'HIS', 'OWN', 'OBSERVATION'] +8280-266249-0021-360: ref=['AND', 'THEN', 'SUCH', 'POWERFUL', 'ACIDS', 'PRODUCE', 'INTENSE', 'IRRITATION', 'AND', 'THIRST', 'THIRST', 'WHICH', 'WATER', 'DOES', 'NOT', 'QUENCH'] +8280-266249-0021-360: hyp=['AND', 'THEN', 'SUCH', 'POWERFUL', 'ACIDS', 'PRODUCE', 'INTENSE', 'IRRITATION', 'AND', 'THIRST', 'THIRST', 'WHICH', 'WATER', 'DOES', 'NOT', 'QUENCH'] +8280-266249-0022-361: ref=['HENCE', 'A', 'RESORT', 'TO', 'CIDER', 'AND', 'BEER'] +8280-266249-0022-361: hyp=['HENCE', 'A', 'RESORT', 'TO', 'CIDER', 'AND', 'BEER'] +8280-266249-0023-362: ref=['NO', 'SIR', 'WHAT', 'KNOW', 'YE', 'NOT', 'THAT', 'YOUR', 'BODY', 'IS', 'THE', 'TEMPLE', 'OF', 'THE', 'HOLY', 'GHOST', 'WHICH', 'IS', 'IN', 'YOU', 'WHICH', 'YE', 'HAVE', 'OF', 'GOD', 'AND', 'YE', 'ARE', 'NOT', 'YOUR', 'OWN'] +8280-266249-0023-362: hyp=['NO', 'SIR', 'WHAT', 'KNOW', 'YE', 'NOT', 'THAT', 'YOUR', 'BODY', 'IS', 'THE', 'TEMPLE', 'OF', 'THE', 'HOLY', 'GHOST', 'WHICH', 'IS', 'IN', 'YOU', 'WHICH', 'YE', 'HAVE', 'OF', 'GOD', 'AND', 'YE', 'ARE', 'NOT', 'YOUR', 'OWN'] +8280-266249-0024-363: ref=['FOR', 'YE', 'ARE', 'BOUGHT', 'WITH', 'A', 'PRICE', 'THEREFORE', 'GLORIFY', 'GOD', 'IN', 'YOUR', 'BODY', 'AND', 'IN', 'YOUR', 'SPIRIT', 'WHICH', 'ARE', "GOD'S"] +8280-266249-0024-363: hyp=['FOR', 'YOU', 'ARE', 'BOUGHT', 'WITH', 'A', 'PRICE', 'THEREFORE', 'GLORIFY', 'GOD', 'IN', 'YOUR', 'BODY', 'AND', 'IN', 'YOUR', 'SPIRIT', 'WHICH', 'ARE', 'GODS'] +8280-266249-0025-364: ref=['WE', 'CERTAINLY', 'HAVE', 'NO', 'RIGHT', 'TO', 'INJURE', 'OUR', 'BODIES', 'EITHER', 'BY', 'NEGLECT', 'OR', 'SELF', 'INDULGENCE'] +8280-266249-0025-364: hyp=['WE', 'CERTAINLY', 'HAVE', 'NO', 'RIGHT', 'TO', 'INJURE', 'OUR', 'BODIES', 'EITHER', 'BY', 'NEGLECT', 'OR', 'SELF', 'INDULGENCE'] +8280-266249-0026-365: ref=['AND', 'AGAIN', 'I', 'BESEECH', 'YOU', 'THEREFORE', 'BRETHREN', 'BY', 'THE', 'MERCIES', 'OF', 'GOD', 'THAT', 'YE', 'PRESENT', 'YOUR', 'BODIES', 'A', 'LIVING', 'SACRIFICE', 'HOLY', 'ACCEPTABLE', 'UNTO', 'GOD', 'WHICH', 'IS', 'YOUR', 'REASONABLE', 'SERVICE'] +8280-266249-0026-365: hyp=['AND', 'AGAIN', 'I', 'BESEECH', 'YOU', 'THEREFORE', 'BRETHREN', 'BY', 'THE', 'MERCIES', 'OF', 'GOD', 'THAT', 'YE', 'PRESENT', 'YOUR', 'BODIES', 'A', 'LIVING', 'SACRIFICE', 'HOLY', 'ACCEPTABLE', 'UNTO', 'GOD', 'WHICH', 'IS', 'YOUR', 'REASONABLE', 'SERVICE'] +8280-266249-0027-366: ref=['IT', 'MUST', 'REQUIRE', 'A', 'GOOD', 'DEAL', 'OF', 'RESOLUTION', 'FOR', 'ONE', 'WHO', 'HAS', 'BECOME', 'FOND', 'OF', 'THE', 'INDULGENCE', 'TO', 'GIVE', 'IT', 'UP', 'REMARKED', 'MISTER', 'DALY'] +8280-266249-0027-366: hyp=['IT', 'MUST', 'REQUIRE', 'A', 'GOOD', 'DEAL', 'OF', 'RESOLUTION', 'FOR', 'ONE', 'WHO', 'HAS', 'BECOME', 'FOND', 'OF', 'THE', 'INDULGENCE', 'TO', 'GIVE', 'IT', 'UP', 'REMARKED', 'MISTER', 'DALY'] +8280-266249-0028-367: ref=['NO', 'DOUBT', 'NO', 'DOUBT', 'RETURNED', 'MISTER', 'LILBURN', 'BUT', 'IF', 'THY', 'RIGHT', 'EYE', 'OFFEND', 'THEE', 'PLUCK', 'IT', 'OUT', 'AND', 'CAST', 'IT', 'FROM', 'THEE', 'FOR', 'IT', 'IS', 'PROFITABLE', 'FOR', 'THEE', 'THAT', 'ONE', 'OF', 'THY', 'MEMBERS', 'SHOULD', 'PERISH', 'AND', 'NOT', 'THAT', 'THY', 'WHOLE', 'BODY', 'SHOULD', 'BE', 'CAST', 'INTO', 'HELL'] +8280-266249-0028-367: hyp=['NO', 'DOUBT', 'NO', 'DOUBT', 'RETURNED', 'MISTER', 'LILBOURNE', 'BUT', 'IF', 'THY', 'RIGHT', 'I', 'OFFENDLY', 'PLUCK', 'IT', 'UP', 'AND', 'CAST', 'IT', 'FROM', 'ME', 'FOR', 'IT', 'IS', 'PROFITABLE', 'FOR', 'THEE', 'THAT', 'ONE', 'OF', 'THY', 'MEMBERS', 'SHOULD', 'PERISH', 'AND', 'NOT', 'THAT', 'THY', 'WHOLE', 'BODY', 'SHOULD', 'BE', 'CAST', 'INTO', 'HELL'] +8280-266249-0029-368: ref=['THERE', 'WAS', 'A', 'PAUSE', 'BROKEN', 'BY', 'YOUNG', 'HORACE', 'WHO', 'HAD', 'BEEN', 'WATCHING', 'A', 'GROUP', 'OF', 'MEN', 'GATHERED', 'ABOUT', 'A', 'TABLE', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'ROOM'] +8280-266249-0029-368: hyp=['THERE', 'WAS', 'A', 'PAUSE', 'BROKEN', 'BY', 'YOUNG', 'HORACE', 'WHO', 'HAD', 'BEEN', 'WATCHING', 'A', 'GROUP', 'OF', 'MEN', 'GATHERED', 'ABOUT', 'A', 'TABLE', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'ROOM'] +8280-266249-0030-369: ref=['THEY', 'ARE', 'GAMBLING', 'YONDER', 'AND', "I'M", 'AFRAID', 'THAT', 'YOUNG', 'FELLOW', 'IS', 'BEING', 'BADLY', 'FLEECED', 'BY', 'THAT', 'MIDDLE', 'AGED', 'MAN', 'OPPOSITE'] +8280-266249-0030-369: hyp=['THEY', 'ARE', 'GAMBLING', 'YONDER', 'AND', "I'M", 'AFRAID', 'THAT', 'YOUNG', 'FELLOW', 'IS', 'BEING', 'BADLY', 'FLEECED', 'BY', 'THE', 'MIDDLE', 'AGED', 'MAN', 'OPPOSITE'] +8280-266249-0031-370: ref=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'WERE', 'AT', 'ONCE', 'TURNED', 'IN', 'THAT', 'DIRECTION'] +8280-266249-0031-370: hyp=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'WERE', 'AT', 'ONCE', 'TURNED', 'IN', 'THAT', 'DIRECTION'] +8280-266249-0032-371: ref=['NO', 'SIR', 'HE', 'IS', 'NOT', 'HERE'] +8280-266249-0032-371: hyp=['NO', 'SIR', 'HE', 'IS', 'NOT', 'HERE'] +8280-266249-0033-372: ref=['AND', 'THE', 'DOOR', 'WAS', 'SLAMMED', 'VIOLENTLY', 'TO'] +8280-266249-0033-372: hyp=['AT', 'THE', 'DOOR', 'WAS', 'SLAMMED', 'VIOLENTLY', 'TOO'] +8280-266249-0034-373: ref=['NOW', 'THE', 'VOICE', 'CAME', 'FROM', 'THE', 'SKYLIGHT', 'OVERHEAD', 'APPARENTLY', 'AND', 'WITH', 'A', 'FIERCE', 'IMPRECATION', 'THE', 'IRATE', 'GAMESTER', 'RUSHED', 'UPON', 'DECK', 'AND', 'RAN', 'HITHER', 'AND', 'THITHER', 'IN', 'SEARCH', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0034-373: hyp=['NOW', 'THE', 'VOICE', 'CAME', 'FROM', 'THE', 'SKYLIGHT', 'OVERHEAD', 'APPARENTLY', 'AND', 'WITH', 'A', 'FIERCE', 'IMPRECATION', 'THE', 'IRATE', 'GAMESTER', 'RUSHED', 'UPON', 'DECK', 'AND', 'RAN', 'HITHER', 'AND', 'THITHER', 'IN', 'SEARCH', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0035-374: ref=['HIS', 'VICTIM', 'WHO', 'HAD', 'BEEN', 'LOOKING', 'ON', 'DURING', 'THE', 'LITTLE', 'SCENE', 'AND', 'LISTENING', 'TO', 'THE', 'MYSTERIOUS', 'VOICE', 'IN', 'SILENT', 'WIDE', 'EYED', 'WONDER', 'AND', 'FEAR', 'NOW', 'ROSE', 'HASTILY', 'HIS', 'FACE', 'DEATHLY', 'PALE', 'WITH', 'TREMBLING', 'HANDS', 'GATHERED', 'UP', 'THE', 'MONEY', 'HE', 'HAD', 'STAKED', 'AND', 'HURRYING', 'INTO', 'HIS', 'STATE', 'ROOM', 'LOCKED', 'HIMSELF', 'IN'] +8280-266249-0035-374: hyp=['HIS', 'VICTIM', 'WHO', 'HAD', 'BEEN', 'LOOKING', 'ON', 'DURING', 'THE', 'LITTLE', 'SCENE', 'AND', 'LISTENING', 'TO', 'THE', 'MYSTERIOUS', 'VOICE', 'AND', 'SILENT', 'WIDE', 'EYED', 'WONDER', 'AND', 'FEAR', 'NOW', 'ROSE', 'HASTILY', 'HIS', 'FACE', 'DEATHLY', 'PALE', 'WITH', 'TREMBLING', 'HANDS', 'GATHERED', 'UP', 'THE', 'MONEY', 'HE', 'HAD', 'STAKED', 'AND', 'HURRYING', 'TO', 'HIS', 'STATEROOM', 'LOCKED', 'HIMSELF', 'IN'] +8280-266249-0036-375: ref=['WHAT', 'DOES', 'IT', 'MEAN', 'CRIED', 'ONE'] +8280-266249-0036-375: hyp=['WHAT', 'DOES', 'IT', 'MEAN', 'CRIED', 'ONE'] +8280-266249-0037-376: ref=['A', 'VENTRILOQUIST', 'ABOARD', 'OF', 'COURSE', 'RETURNED', 'ANOTHER', "LET'S", 'FOLLOW', 'AND', 'SEE', 'THE', 'FUN'] +8280-266249-0037-376: hyp=['A', 'VENTILOQUE', 'QUESTERED', 'BOARD', 'OF', 'COURSE', 'RETURNED', 'ANOTHER', "LET'S", 'FOLLOW', 'AND', 'SEE', 'THE', 'FUN'] +8280-266249-0038-377: ref=['I', 'WONDER', 'WHICH', 'OF', 'US', 'IT', 'IS', 'REMARKED', 'THE', 'FIRST', 'LOOKING', 'HARD', 'AT', 'OUR', 'PARTY', 'I', "DON'T", 'KNOW', 'BUT', 'COME', 'ON'] +8280-266249-0038-377: hyp=['I', 'WONDER', 'WHICH', 'OF', 'US', 'IT', 'IS', 'REMARKED', 'THE', 'FIRST', 'LOOKING', 'HARD', 'AT', 'OUR', 'PARTY', 'I', "DON'T", 'KNOW', 'BUT', 'COME', 'ON'] +8280-266249-0039-378: ref=['THAT', 'FELLOW', 'NICK', 'WARD', 'IS', 'A', 'NOTED', 'BLACKLEG', 'AND', 'RUFFIAN', 'HAD', 'HIS', 'NOSE', 'BROKEN', 'IN', 'A', 'FIGHT', 'AND', 'IS', 'SENSITIVE', 'ON', 'THE', 'SUBJECT', 'WAS', 'CHEATING', 'OF', 'COURSE'] +8280-266249-0039-378: hyp=['THAT', 'FELLOW', 'NICK', 'WARD', 'IS', 'A', 'NOTED', 'BLACK', 'LAG', 'AND', 'RUFFIAN', 'HAD', 'HIS', 'NOSE', 'BROKEN', 'IN', 'A', 'FIGHT', 'AND', 'IS', 'SENSITIVE', 'ON', 'THE', 'SUBJECT', 'WAS', 'CHEATING', 'OF', 'COURSE'] +8280-266249-0040-379: ref=['WHO', 'ASKED', 'THE', 'MATE', "I'VE", 'SEEN', 'NONE', 'UP', 'HERE', 'THOUGH', 'THERE', 'ARE', 'SOME', 'IN', 'THE', 'STEERAGE'] +8280-266249-0040-379: hyp=['WHO', 'ASKED', 'THE', 'MATE', "I'VE", 'SEEN', 'NO', 'NAP', 'HERE', 'THOUGH', 'THERE', 'ARE', 'SOME', 'IN', 'THE', 'STEERAGE'] +8280-266249-0041-380: ref=['THEY', 'HEARD', 'HIM', 'IN', 'SILENCE', 'WITH', 'A', 'COOL', 'PHLEGMATIC', 'INDIFFERENCE', 'MOST', 'EXASPERATING', 'TO', 'ONE', 'IN', 'HIS', 'PRESENT', 'MOOD'] +8280-266249-0041-380: hyp=['THEY', 'HEARD', 'HIM', 'IN', 'SILENCE', 'WITH', 'A', 'COOL', 'PHLEGMATIC', 'INDIFFERENCE', 'MOST', 'EXASPERATING', 'TO', 'ONE', 'IN', 'HIS', 'PRESENT', 'MOOD'] +8280-266249-0042-381: ref=['A', 'MAN', 'OF', 'GIANT', 'SIZE', 'AND', 'HERCULEAN', 'STRENGTH', 'HAD', 'LAID', 'ASIDE', 'HIS', 'PIPE', 'AND', 'SLOWLY', 'RISING', 'TO', 'HIS', 'FEET', 'SEIZED', 'THE', 'SCOUNDREL', 'IN', 'HIS', 'POWERFUL', 'GRASP'] +8280-266249-0042-381: hyp=['A', 'MAN', 'OF', 'GIANT', 'SIZE', 'AND', 'HERCULEAN', 'STRENGTH', 'HAD', 'LAID', 'ASIDE', 'HIS', 'PIPE', 'AND', 'SLOWLY', 'RISING', 'TO', 'HIS', 'FEET', 'SEIZED', 'THE', 'SCOUNDREL', 'IN', 'HIS', 'POWERFUL', 'GRASP'] +8280-266249-0043-382: ref=['LET', 'ME', 'GO', 'YELLED', 'WARD', 'MAKING', 'A', 'DESPERATE', 'EFFORT', 'TO', 'FREE', 'HIS', 'ARMS'] +8280-266249-0043-382: hyp=['LET', 'ME', 'GO', 'YELLED', 'WARD', 'MAKING', 'A', 'DESPERATE', 'EFFORT', 'TO', 'FREE', 'HIS', 'ARMS'] +8280-266249-0044-383: ref=['I', 'DINKS', 'NO', 'I', 'DINKS', 'I', 'DEACH', 'YOU', 'VON', 'LESSON', 'RETURNED', 'HIS', 'CAPTOR', 'NOT', 'RELAXING', 'HIS', 'GRASP', 'IN', 'THE', 'LEAST'] +8280-266249-0044-383: hyp=['I', 'THINK', 'NO', 'I', 'THINK', 'I', 'DID', 'YOU', 'FOR', 'MESSUM', 'RETURNED', 'HIS', 'CAPTOR', 'NOT', 'RELAXING', 'HIS', 'GRASP', 'IN', 'THE', 'LEAST'] +8280-266249-0045-384: ref=['THE', 'GERMAN', 'RELEASED', 'HIS', 'PRISONER', 'AND', 'THE', 'LATTER', 'SLUNK', 'AWAY', 'WITH', 'MUTTERED', 'THREATS', 'AND', 'IMPRECATIONS', 'UPON', 'THE', 'HEAD', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0045-384: hyp=['THE', 'GERMAN', 'RELEASED', 'HIS', 'PRISONER', 'AND', 'THE', 'LATTER', 'SUNK', 'AWAY', 'WITH', 'MUTTERED', 'THREATS', 'AND', 'IMPRECATIONS', 'UPON', 'THE', 'HEAD', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0046-385: ref=['MISTER', 'LILBURN', 'AND', 'MISTER', 'DALY', 'EACH', 'AT', 'A', 'DIFFERENT', 'TIME', 'SOUGHT', 'OUT', 'THE', 'YOUNG', 'MAN', "WARD'S", 'INTENDED', 'VICTIM', 'AND', 'TRIED', 'TO', 'INFLUENCE', 'HIM', 'FOR', 'GOOD'] +8280-266249-0046-385: hyp=['MISTER', 'LILLBURN', 'AND', 'MISTER', 'DALY', 'EACH', 'HAD', 'A', 'DIFFERENT', 'TIME', 'SOUGHT', 'OUT', 'THE', 'YOUNG', 'MAN', 'WORDS', 'INTENDED', 'VICTIM', 'AND', 'TRIED', 'TO', 'INFLUENCE', 'HIM', 'FOR', 'GOOD'] +8280-266249-0047-386: ref=['YET', 'THERE', 'WAS', 'GAMBLING', 'AGAIN', 'THE', 'SECOND', 'NIGHT', 'BETWEEN', 'WARD', 'AND', 'SEVERAL', 'OTHERS', 'OF', 'HIS', 'PROFESSION'] +8280-266249-0047-386: hyp=['YET', 'THERE', 'WAS', 'GAMBLING', 'AGAIN', 'THE', 'SECOND', 'NIGHT', 'BETWEEN', 'WARD', 'AND', 'SEVERAL', 'OTHERS', 'OF', 'HIS', 'PROFESSIONS'] +8280-266249-0048-387: ref=['THEY', 'KEPT', 'IT', 'UP', 'TILL', 'AFTER', 'MIDNIGHT'] +8280-266249-0048-387: hyp=['THEY', 'KEPT', 'IT', 'UP', 'TILL', 'AFTER', 'MIDNIGHT'] +8280-266249-0049-388: ref=['THEN', 'MISTER', 'LILBURN', 'WAKING', 'FROM', 'HIS', 'FIRST', 'SLEEP', 'IN', 'A', 'STATEROOM', 'NEAR', 'BY', 'THOUGHT', 'HE', 'WOULD', 'BREAK', 'IT', 'UP', 'ONCE', 'MORE'] +8280-266249-0049-388: hyp=['THEN', 'MISTER', 'LILLO', 'WAKING', 'FROM', 'HIS', 'FIRST', 'SLEEP', 'IN', 'A', 'STATEROOM', 'NEAR', 'BY', 'THOUGHT', 'HE', 'WOULD', 'BREAK', 'IT', 'UP', 'ONCE', 'MORE'] +8280-266249-0050-389: ref=['AN', 'INTENSE', 'VOICELESS', 'EXCITEMENT', 'POSSESSED', 'THE', 'PLAYERS', 'FOR', 'THE', 'GAME', 'WAS', 'A', 'CLOSE', 'ONE', 'AND', 'THE', 'STAKES', 'WERE', 'VERY', 'HEAVY'] +8280-266249-0050-389: hyp=['AN', 'INTENSE', 'VOICELESS', 'EXCITEMENT', 'POSSESSED', 'THE', 'PLAYERS', 'FOR', 'THE', 'GAME', 'WAS', 'A', 'CLOSE', 'ONE', 'AND', 'THE', 'STAKES', 'WERE', 'VERY', 'HEAVY'] +8280-266249-0051-390: ref=['THEY', 'BENT', 'EAGERLY', 'OVER', 'THE', 'BOARD', 'EACH', 'WATCHING', 'WITH', 'FEVERISH', 'ANXIETY', 'HIS', "COMPANION'S", 'MOVEMENTS', 'EACH', 'CASTING', 'NOW', 'AND', 'AGAIN', 'A', 'GLOATING', 'EYE', 'UPON', 'THE', 'HEAP', 'OF', 'GOLD', 'AND', 'GREENBACKS', 'THAT', 'LAY', 'BETWEEN', 'THEM', 'AND', 'AT', 'TIMES', 'HALF', 'STRETCHING', 'OUT', 'HIS', 'HAND', 'TO', 'CLUTCH', 'IT'] +8280-266249-0051-390: hyp=["THEY'VE", 'BEEN', 'EAGERLY', 'OVER', 'THE', 'BOARD', 'EACH', 'WATCHING', 'WITH', 'FEVERISH', 'ANXIETY', 'HIS', "COMPANION'S", 'MOVEMENTS', 'EACH', 'CASTING', 'NOW', 'AND', 'AGAIN', 'A', 'GLOATING', 'EYE', 'UPON', 'THE', 'HEAP', 'OF', 'GOLD', 'AND', 'GREENBACKS', 'THAT', 'LAY', 'BETWEEN', 'THEM', 'AND', 'AT', 'TIMES', 'HALF', 'STRETCHING', 'OUT', 'HIS', 'HAND', 'TO', 'CLUTCH', 'IT'] +8280-266249-0052-391: ref=['A', 'DEEP', 'GROAN', 'STARTLED', 'THEM', 'AND', 'THEY', 'SPRANG', 'TO', 'THEIR', 'FEET', 'PALE', 'AND', 'TREMBLING', 'WITH', 'SUDDEN', 'TERROR', 'EACH', 'HOLDING', 'HIS', 'BREATH', 'AND', 'STRAINING', 'HIS', 'EAR', 'TO', 'CATCH', 'A', 'REPETITION', 'OF', 'THE', 'DREAD', 'SOUND'] +8280-266249-0052-391: hyp=['A', 'DEEP', 'GROAN', 'STARTLED', 'THEM', 'AND', 'THEY', 'SPRANG', 'TO', 'THEIR', 'FEET', 'PALE', 'AND', 'TREMBLING', 'WITH', 'SUDDEN', 'TERROR', 'EACH', 'HOLDING', 'HIS', 'BREATH', 'AND', 'STRAINING', 'HIS', 'EAR', 'TO', 'CATCH', 'A', 'REPETITION', 'OF', 'THE', 'DREAD', 'SOUND'] +8280-266249-0053-392: ref=['BUT', 'ALL', 'WAS', 'SILENT', 'AND', 'AFTER', 'A', 'MOMENT', 'OF', 'ANXIOUS', 'WAITING', 'THEY', 'SAT', 'DOWN', 'TO', 'THEIR', 'GAME', 'AGAIN', 'TRYING', 'TO', 'CONCEAL', 'AND', 'SHAKE', 'OFF', 'THEIR', 'FEARS', 'WITH', 'A', 'FORCED', 'UNNATURAL', 'LAUGH'] +8280-266249-0053-392: hyp=['BUT', 'ALWAYS', 'SILENT', 'AND', 'AFTER', 'A', 'MOMENT', 'OF', 'ANXIOUS', 'WAITING', 'THEY', 'SAT', 'DOWN', 'TO', 'THEIR', 'GAME', 'AGAIN', 'TRYING', 'TO', 'CONCEAL', 'AND', 'SHAKE', 'OFF', 'THEIR', 'FEARS', 'TO', 'THE', 'FORCED', 'UNNATURAL', 'LAUGH'] +8280-266249-0054-393: ref=['IT', 'CAME', 'FROM', 'UNDER', 'THE', 'TABLE', 'GASPED', 'WARD', 'LOOK', "WHAT'S", 'THERE', 'LOOK', 'YOURSELF'] +8280-266249-0054-393: hyp=['IT', 'CAME', 'FROM', 'UNDER', 'THE', 'TABLE', 'GASPED', 'WARREN', 'LOOK', "WHAT'S", 'THERE', 'LOOK', 'TO', 'YOURSELF'] +8280-266249-0055-394: ref=['WHAT', 'CAN', 'IT', 'HAVE', 'BEEN', 'THEY', 'ASKED', 'EACH', 'OTHER'] +8280-266249-0055-394: hyp=['WHAT', 'CAN', 'IT', 'HAVE', 'BEEN', 'THEY', 'ASKED', 'EACH', 'OTHER'] +8280-266249-0056-395: ref=['OH', 'NONSENSE', 'WHAT', 'FOOLS', 'WE', 'ARE'] +8280-266249-0056-395: hyp=['OH', 'NONSENSE', 'WHAT', 'FOOLS', 'WE', 'ARE'] +8280-266249-0057-396: ref=['IT', 'WAS', 'THE', 'LAST', 'GAME', 'OF', 'CARDS', 'FOR', 'THAT', 'TRIP'] +8280-266249-0057-396: hyp=['IT', 'WAS', 'THE', 'LAST', 'GAME', 'OF', 'CARDS', 'FOR', 'THAT', 'TRIP'] +8280-266249-0058-397: ref=['THE', 'CAPTAIN', 'COMING', 'IN', 'SHORTLY', 'AFTER', 'THE', 'SUDDEN', 'FLIGHT', 'OF', 'THE', 'GAMBLERS', 'TOOK', 'CHARGE', 'OF', 'THE', 'MONEY', 'AND', 'THE', 'NEXT', 'DAY', 'RESTORED', 'IT', 'TO', 'THE', 'OWNERS'] +8280-266249-0058-397: hyp=['THE', 'CAPTAIN', 'COMING', 'IN', 'SHORTLY', 'AFTER', 'THE', 'SUDDEN', 'FLIGHT', 'OF', 'THE', 'GAMBLERS', 'TOOK', 'CHARGE', 'OF', 'THE', 'MONEY', 'AND', 'THE', 'NEXT', 'DAY', 'RESTORED', 'IT', 'TO', 'THE', 'OWNERS'] +8280-266249-0059-398: ref=['TO', "ELSIE'S", 'OBSERVANT', 'EYES', 'IT', 'PRESENTLY', 'BECAME', 'EVIDENT', 'THAT', 'THE', 'DALYS', 'WERE', 'IN', 'VERY', 'STRAITENED', 'CIRCUMSTANCES'] +8280-266249-0059-398: hyp=['TO', "ELSIE'S", 'OBSERVANT', 'EYES', 'IT', 'PRESENTLY', 'BECAME', 'EVIDENT', 'THAT', 'THE', 'DAILIES', 'RAN', 'VERY', 'STRAIGHT', 'IN', 'CIRCUMSTANCES'] +8280-266249-0060-399: ref=['OH', 'HOW', 'KIND', 'HOW', 'VERY', 'KIND', 'MISSUS', 'DALY', 'SAID', 'WITH', 'TEARS', 'OF', 'JOY', 'AND', 'GRATITUDE', 'WE', 'HAVE', 'HARDLY', 'KNOWN', 'HOW', 'WE', 'SHOULD', 'MEET', 'THE', 'MOST', 'NECESSARY', 'EXPENSES', 'OF', 'THIS', 'TRIP', 'BUT', 'HAVE', 'BEEN', 'TRYING', 'TO', 'CAST', 'OUR', 'CARE', 'UPON', 'THE', 'LORD', 'ASKING', 'HIM', 'TO', 'PROVIDE'] +8280-266249-0060-399: hyp=['OH', 'HOW', 'KIND', 'HOW', 'VERY', 'KIND', 'MISSUS', 'DALEY', 'SAID', 'WITH', 'TEARS', 'OF', 'JOY', 'AND', 'GRATITUDE', 'WE', 'HAVE', 'HARDLY', 'KNOWN', 'HOW', 'WE', 'SHOULD', 'MEET', 'THE', 'MOST', 'NECESSARY', 'EXPENSES', 'OF', 'THIS', 'TRIP', 'BUT', 'HAVE', 'BEEN', 'TRYING', 'TO', 'CAST', 'OUR', 'CARE', 'UPON', 'THE', 'LORD', 'ASKING', 'HIM', 'TO', 'PROVIDE'] +8280-266249-0061-400: ref=['AND', 'HOW', 'WONDERFULLY', 'HE', 'HAS', 'ANSWERED', 'OUR', 'PETITIONS'] +8280-266249-0061-400: hyp=['AND', 'HOW', 'WONDERFULLY', 'HE', 'HAS', 'ANSWERED', 'OUR', 'PETITIONS'] +8280-266249-0062-401: ref=['ELSIE', 'ANSWERED', 'PRESSING', 'HER', 'HAND', 'AFFECTIONATELY', 'ART', 'WE', 'NOT', 'SISTERS', 'IN', 'CHRIST'] +8280-266249-0062-401: hyp=['ELSIE', 'ANSWERED', 'PRESSING', 'HER', 'HAND', 'AFFECTIONATELY', 'ARE', 'WE', 'NOT', 'SISTERS', 'IN', 'CHRIST'] +8280-266249-0063-402: ref=['YE', 'ARE', 'ALL', 'THE', 'CHILDREN', 'OF', 'GOD', 'BY', 'FAITH', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0063-402: hyp=['YE', 'ARE', 'ALL', 'THE', 'CHILDREN', 'OF', 'GOD', 'BY', 'FAITH', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0064-403: ref=['YE', 'ARE', 'ALL', 'ONE', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0064-403: hyp=['YEAR', 'ALL', 'ONE', 'AND', 'CHRIST', 'JESUS'] +8280-266249-0065-404: ref=['WE', 'FEEL', 'MY', 'HUSBAND', 'AND', 'I', 'THAT', 'WE', 'ARE', 'ONLY', 'THE', 'STEWARDS', 'OF', 'HIS', 'BOUNTY', 'AND', 'THAT', 'BECAUSE', 'HE', 'HAS', 'SAID', 'INASMUCH', 'AS', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ONE', 'OF', 'THE', 'LEAST', 'OF', 'THESE', 'MY', 'BRETHREN', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ME', 'IT', 'IS', 'THE', 'GREATEST', 'PRIVILEGE', 'AND', 'DELIGHT', 'TO', 'DO', 'ANYTHING', 'FOR', 'HIS', 'PEOPLE'] +8280-266249-0065-404: hyp=['WE', 'SEE', 'ON', 'MY', 'HUSBAND', 'AND', 'I', 'THAT', 'WE', 'ARE', 'ONLY', 'THE', 'STEWARDS', 'OF', 'HIS', 'BOUNTY', 'AND', 'BECAUSE', 'HE', 'HAS', 'SAID', 'INASMUCH', 'AS', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ONE', 'OF', 'THE', 'LEAST', 'OF', 'THESE', 'MY', 'BRETHREN', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ME', 'IT', 'IS', 'THE', 'GREATEST', 'PRIVILEGE', 'AND', 'DELIGHT', 'TO', 'DO', 'ANYTHING', 'FOR', 'HIS', 'PEOPLE'] +8461-258277-0000-1649: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVEN', 'HUNDRED', 'AND', 'EIGHTEENTH', 'NIGHT'] +8461-258277-0000-1649: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVEN', 'HUNDRED', 'AND', 'EIGHTEENTH', 'NIGHT'] +8461-258277-0001-1650: ref=['BUT', 'HE', 'ANSWERED', 'NEEDS', 'MUST', 'I', 'HAVE', 'ZAYNAB', 'ALSO', 'NOW', 'SUDDENLY', 'THERE', 'CAME', 'A', 'RAP', 'AT', 'THE', 'DOOR', 'AND', 'THE', 'MAID', 'SAID', 'WHO', 'IS', 'AT', 'THE', 'DOOR'] +8461-258277-0001-1650: hyp=['BUT', 'HE', 'ANSWERED', 'NEEDS', 'MICE', 'THY', 'HALVES', 'THINE', 'APPLES', 'SAY', 'NOW', 'CERTAINLY', 'THERE', 'CAME', 'A', 'RAP', 'AT', 'THE', 'DOOR', 'AND', 'THE', 'MAID', 'SAID', 'WHO', 'IS', 'AT', 'THE', 'DOOR'] +8461-258277-0002-1651: ref=['THE', 'KNOCKER', 'REPLIED', 'KAMAR', 'DAUGHTER', 'OF', 'AZARIAH', 'THE', 'JEW', 'SAY', 'ME', 'IS', 'ALI', 'OF', 'CAIRO', 'WITH', 'YOU'] +8461-258277-0002-1651: hyp=['THE', 'KNOCKER', 'REPLIED', 'COME', 'ALL', 'DAUGHTER', 'VASSARIAH', 'THE', 'JEW', 'SAY', 'ME', 'IS', 'ALI', 'OF', 'CAIRO', 'WITH', 'YOU'] +8461-258277-0003-1652: ref=['REPLIED', 'THE', "BROKER'S", 'DAUGHTER', 'O', 'THOU', 'DAUGHTER', 'OF', 'A', 'DOG'] +8461-258277-0003-1652: hyp=['REPLIED', 'THE', "BROKER'S", 'DAUGHTER', 'O', 'THOU', 'DAUGHTER', 'OF', 'A', 'DOG'] +8461-258277-0004-1653: ref=['AND', 'HAVING', 'THUS', 'ISLAMISED', 'SHE', 'ASKED', 'HIM', 'DO', 'MEN', 'IN', 'THE', 'FAITH', 'OF', 'AL', 'ISLAM', 'GIVE', 'MARRIAGE', 'PORTIONS', 'TO', 'WOMEN', 'OR', 'DO', 'WOMEN', 'DOWER', 'MEN'] +8461-258277-0004-1653: hyp=['ON', 'HAVING', 'THUS', 'ISLAMIZED', 'SHE', 'ASKED', 'HIM', 'TWO', 'MEN', 'IN', 'THE', 'FAITH', 'OF', 'ALI', 'SLAM', 'GAVE', 'MARRIAGE', 'PORTIONS', 'TO', 'WOMEN', 'OR', 'TWO', 'WOMEN', 'TO', 'OUR', 'MEN'] +8461-258277-0005-1654: ref=['AND', 'SHE', 'THREW', 'DOWN', 'THE', "JEW'S", 'HEAD', 'BEFORE', 'HIM'] +8461-258277-0005-1654: hyp=['AND', 'SHE', 'THREW', 'DOWN', 'THE', "JEW'S", 'HEAD', 'BEFORE', 'HIM'] +8461-258277-0006-1655: ref=['NOW', 'THE', 'CAUSE', 'OF', 'HER', 'SLAYING', 'HER', 'SIRE', 'WAS', 'AS', 'FOLLOWS'] +8461-258277-0006-1655: hyp=['NOW', 'THE', 'CAUSE', 'OF', 'HER', 'SLAYING', 'HER', 'SIRE', 'WAS', 'AS', 'FOLLOWS'] +8461-258277-0007-1656: ref=['THEN', 'HE', 'SET', 'OUT', 'REJOICING', 'TO', 'RETURN', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTY'] +8461-258277-0007-1656: hyp=['THEN', 'HE', 'SAT', 'OUT', 'REJOICING', 'TO', 'RETURN', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTE'] +8461-258277-0008-1657: ref=['SO', 'HE', 'ATE', 'AND', 'FELL', 'DOWN', 'SENSELESS', 'FOR', 'THE', 'SWEETMEATS', 'WERE', 'DRUGGED', 'WITH', 'BHANG', 'WHEREUPON', 'THE', 'KAZI', 'BUNDLED', 'HIM', 'INTO', 'THE', 'SACK', 'AND', 'MADE', 'OFF', 'WITH', 'HIM', 'CHARGER', 'AND', 'CHEST', 'AND', 'ALL', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTY'] +8461-258277-0008-1657: hyp=['SO', 'HE', 'ATE', 'AND', 'FELL', 'DOWN', 'SENSELESS', 'FOR', 'THE', 'SWEETMEATS', 'WERE', 'DRUGGED', 'WITH', 'BANG', 'WHEREUPON', 'THE', 'KAZI', 'BUNDLED', 'HIM', 'INTO', 'THE', 'SACK', 'AND', 'MADE', 'OFF', 'WITH', 'HIM', 'CHARGER', 'AND', 'CHEST', 'AND', 'ALL', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTE'] +8461-258277-0009-1658: ref=['PRESENTLY', 'HASAN', 'SHUMAN', 'CAME', 'OUT', 'OF', 'A', 'CLOSET', 'AND', 'SAID', 'TO', 'HIM', 'HAST', 'THOU', 'GOTTEN', 'THE', 'GEAR', 'O', 'ALI'] +8461-258277-0009-1658: hyp=['PRESENTLY', 'HER', 'SANCHUMAIN', 'CAME', 'OUT', 'OF', 'A', 'CLOTH', 'AND', 'SAID', 'TO', 'HIM', 'HAST', 'THOU', 'GOTTEN', 'AGAIN', 'O', 'ALI'] +8461-258277-0010-1659: ref=['SO', 'HE', 'TOLD', 'HIM', 'WHAT', 'HAD', 'BEFALLEN', 'HIM', 'AND', 'ADDED', 'IF', 'I', 'KNOW', 'WHITHER', 'THE', 'RASCAL', 'IS', 'GONE', 'AND', 'WHERE', 'TO', 'FIND', 'THE', 'KNAVE', 'I', 'WOULD', 'PAY', 'HIM', 'OUT'] +8461-258277-0010-1659: hyp=['SO', 'HE', 'TOLD', 'THEM', 'WHAT', 'HAD', 'BEFALLEN', 'HIM', 'AND', 'ADDED', 'IF', 'I', 'KNOW', 'WHETHER', 'THE', 'RASCAL', 'IS', 'GONE', 'AND', 'WHERE', 'TO', 'FIND', 'THE', 'KNAVE', 'I', 'WOULD', 'PAY', 'HIM', 'OUT'] +8461-258277-0011-1660: ref=['KNOWEST', 'THOU', 'WHITHER', 'HE', 'WENT'] +8461-258277-0011-1660: hyp=['KNOWEST', 'THOU', 'WHITHER', 'HE', 'WENT'] +8461-258277-0012-1661: ref=['ANSWERED', 'HASAN', 'I', 'KNOW', 'WHERE', 'HE', 'IS', 'AND', 'OPENING', 'THE', 'DOOR', 'OF', 'THE', 'CLOSET', 'SHOWED', 'HIM', 'THE', 'SWEETMEAT', 'SELLER', 'WITHIN', 'DRUGGED', 'AND', 'SENSELESS'] +8461-258277-0012-1661: hyp=['ANSWERED', 'HASAN', 'I', 'KNOW', 'WHERE', 'HE', 'IS', 'AND', 'OPENING', 'THE', 'DOOR', 'OF', 'THE', 'CLOSET', 'SHOWED', 'HIM', 'THE', 'SWEETMEAT', 'CELLAR', 'WITHIN', 'DRUGGED', 'AND', 'SENSELESS'] +8461-258277-0013-1662: ref=['SO', 'I', 'WENT', 'ROUND', 'ABOUT', 'THE', 'HIGHWAYS', 'OF', 'THE', 'CITY', 'TILL', 'I', 'MET', 'A', 'SWEETMEAT', 'SELLER', 'AND', 'BUYING', 'HIS', 'CLOTHES', 'AND', 'STOCK', 'IN', 'TRADE', 'AND', 'GEAR', 'FOR', 'TEN', 'DINARS', 'DID', 'WHAT', 'WAS', 'DONE'] +8461-258277-0013-1662: hyp=['SO', 'I', 'WENT', 'ROUND', 'ABOUT', 'THE', 'HIGHWAYS', 'OF', 'THE', 'CITY', 'TILL', 'I', 'MET', 'A', 'SWEETMEAT', 'CELLAR', 'AND', 'BUYING', 'HIS', 'CLOTHES', 'AND', 'STOCK', 'IN', 'TRADE', 'AND', 'GEAR', 'FOR', 'TEN', 'DINARS', 'DID', 'WHAT', 'WAS', 'DONE'] +8461-258277-0014-1663: ref=['QUOTH', 'AL', 'RASHID', 'WHOSE', 'HEAD', 'IS', 'THIS'] +8461-258277-0014-1663: hyp=['QUOTH', 'A', 'RASCHID', 'WHOSE', 'HEAD', 'IS', 'THIS'] +8461-258277-0015-1664: ref=['SO', 'ALI', 'RELATED', 'TO', 'HIM', 'ALL', 'THAT', 'HAD', 'PASSED', 'FROM', 'FIRST', 'TO', 'LAST', 'AND', 'THE', 'CALIPH', 'SAID', 'I', 'HAD', 'NOT', 'THOUGHT', 'THOU', 'WOULDST', 'KILL', 'HIM', 'FOR', 'THAT', 'HE', 'WAS', 'A', 'SORCERER'] +8461-258277-0015-1664: hyp=['SO', 'I', 'RELATED', 'TO', 'HIM', 'ALL', 'THAT', 'PASSED', 'FROM', 'FIRST', 'LAST', 'AND', 'THE', 'CALIPH', 'SAID', 'I', 'HATE', 'NOT', 'THOUGHT', 'THOU', 'WOULDST', 'KILL', 'HIM', 'FOR', 'THAT', 'HE', 'WAS', 'A', 'SORCERER'] +8461-258277-0016-1665: ref=['HE', 'REPLIED', 'I', 'HAVE', 'FORTY', 'LADS', 'BUT', 'THEY', 'ARE', 'IN', 'CAIRO'] +8461-258277-0016-1665: hyp=['HE', 'REPLIED', 'I', 'HAVE', 'FORTY', 'LADS', 'BUT', 'THEY', 'ARE', 'IN', 'CAIRO'] +8461-278226-0000-1633: ref=['AND', 'LAURA', 'HAD', 'HER', 'OWN', 'PET', 'PLANS'] +8461-278226-0000-1633: hyp=['AND', 'LAURA', 'HAD', 'HER', 'OWN', 'PET', 'PLANS'] +8461-278226-0001-1634: ref=['SHE', 'MEANT', 'TO', 'BE', 'SCRUPULOUSLY', 'CONSCIENTIOUS', 'IN', 'THE', 'ADMINISTRATION', 'OF', 'HER', 'TALENTS', 'AND', 'SOMETIMES', 'AT', 'CHURCH', 'ON', 'A', 'SUNDAY', 'WHEN', 'THE', 'SERMON', 'WAS', 'PARTICULARLY', 'AWAKENING', 'SHE', 'MENTALLY', 'DEBATED', 'THE', 'SERIOUS', 'QUESTION', 'AS', 'TO', 'WHETHER', 'NEW', 'BONNETS', 'AND', 'A', 'PAIR', 'OF', "JOUVIN'S", 'GLOVES', 'DAILY', 'WERE', 'NOT', 'SINFUL', 'BUT', 'I', 'THINK', 'SHE', 'DECIDED', 'THAT', 'THE', 'NEW', 'BONNETS', 'AND', 'GLOVES', 'WERE', 'ON', 'THE', 'WHOLE', 'A', 'PARDONABLE', 'WEAKNESS', 'AS', 'BEING', 'GOOD', 'FOR', 'TRADE'] +8461-278226-0001-1634: hyp=['SHE', 'MEANT', 'TO', 'BE', 'SCRUPULOUSLY', 'CONSCIENTIOUS', 'IN', 'THE', 'ADMINISTRATION', 'OF', 'A', 'TALENT', 'AND', 'SOMETIMES', 'AT', 'CHURCH', 'ON', 'A', 'SUNDAY', 'WHEN', 'THE', 'SIMON', 'WAS', 'PARTICULARLY', 'AWAKENING', 'SHE', 'MENTALLY', 'DEBATED', 'A', 'SERIOUS', 'QUESTION', 'AS', 'TO', 'WHETHER', 'NEW', 'BONNET', 'AND', 'A', 'PAIR', 'OF', 'ZO', 'BOUNDS', 'GLOVES', 'DAILY', 'WERE', 'NOT', 'SINFUL', 'BUT', 'I', 'THINK', 'SHE', 'DECIDED', 'THAT', 'THE', 'NEW', 'BONNETS', 'AND', 'GLOVES', 'WERE', 'ON', 'THE', 'WHOLE', 'A', 'PIONABLE', 'WEAKNESS', 'AS', 'BEING', 'GOOD', 'FOR', 'TRADE'] +8461-278226-0002-1635: ref=['ONE', 'MORNING', 'LAURA', 'TOLD', 'HER', 'HUSBAND', 'WITH', 'A', 'GAY', 'LAUGH', 'THAT', 'SHE', 'WAS', 'GOING', 'TO', 'VICTIMIZE', 'HIM', 'BUT', 'HE', 'WAS', 'TO', 'PROMISE', 'TO', 'BE', 'PATIENT', 'AND', 'BEAR', 'WITH', 'HER', 'FOR', 'ONCE', 'IN', 'A', 'WAY'] +8461-278226-0002-1635: hyp=['ONE', 'MORNING', 'LAURA', 'TOLD', 'HER', 'HUSBAND', 'WITH', 'A', 'GAY', 'LAUGH', 'THAT', 'SHE', 'WAS', 'GOING', 'TO', 'VICTIMIZE', 'HIM', 'BUT', 'HE', 'WAS', 'TO', 'PROMISE', 'TO', 'BE', 'PATIENT', 'AND', 'BEAR', 'WITH', 'HER', 'FOR', 'ONCE', 'IN', 'A', 'WAY'] +8461-278226-0003-1636: ref=['I', 'WANT', 'TO', 'SEE', 'ALL', 'THE', 'PICTURES', 'THE', 'MODERN', 'PICTURES', 'ESPECIALLY'] +8461-278226-0003-1636: hyp=['I', 'WANT', 'TO', 'SEE', 'ALL', 'THE', 'PICTURES', 'THE', 'MODERN', 'PICTURES', 'ESPECIALLY'] +8461-278226-0004-1637: ref=['I', 'REMEMBER', 'ALL', 'THE', 'RUBENSES', 'AT', 'THE', 'LOUVRE', 'FOR', 'I', 'SAW', 'THEM', 'THREE', 'YEARS', 'AGO', 'WHEN', 'I', 'WAS', 'STAYING', 'IN', 'PARIS', 'WITH', 'GRANDPAPA'] +8461-278226-0004-1637: hyp=['I', 'REMEMBER', 'ALL', 'THE', 'RUBEN', 'SAYS', 'THAT', 'THE', 'LOUVRE', 'FOR', 'I', 'SAW', 'THEM', 'FOR', 'YEARS', 'AGO', 'WHEN', 'I', 'WAS', 'STAYING', 'IN', 'PARIS', 'WITH', 'GRANDPAPA'] +8461-278226-0005-1638: ref=['SHE', 'RETURNED', 'IN', 'A', 'LITTLE', 'MORE', 'THAN', 'TEN', 'MINUTES', 'IN', 'THE', 'FRESHEST', 'TOILETTE', 'ALL', 'PALE', 'SHIMMERING', 'BLUE', 'LIKE', 'THE', 'SPRING', 'SKY', 'WITH', 'PEARL', 'GREY', 'GLOVES', 'AND', 'BOOTS', 'AND', 'PARASOL', 'AND', 'A', 'BONNET', 'THAT', 'SEEMED', 'MADE', 'OF', 'AZURE', 'BUTTERFLIES'] +8461-278226-0005-1638: hyp=['SHE', 'RETURNED', 'IN', 'A', 'LITTLE', 'MORE', 'THAN', 'TEN', 'MINUTES', 'IN', 'THE', 'FRESHEST', 'TOILETTE', 'ALL', 'PALE', 'SHIMMERING', 'BLUE', 'LIKE', 'THE', 'SPRING', 'SKY', 'WITH', 'PEAR', 'GRAY', 'GLOVES', 'AND', 'BOOTS', 'AND', 'PARASOL', 'AND', 'A', 'BONNET', 'THAT', 'SEEMED', 'MADE', 'TO', 'USURE', 'BUTTERFLIES'] +8461-278226-0006-1639: ref=['IT', 'WAS', 'DRAWING', 'TOWARDS', 'THE', 'CLOSE', 'OF', 'THIS', 'DELIGHTFUL', 'HONEYMOON', 'TOUR', 'AND', 'IT', 'WAS', 'A', 'BRIGHT', 'SUNSHINY', 'MORNING', 'EARLY', 'IN', 'FEBRUARY', 'BUT', 'FEBRUARY', 'IN', 'PARIS', 'IS', 'SOMETIMES', 'BETTER', 'THAN', 'APRIL', 'IN', 'LONDON'] +8461-278226-0006-1639: hyp=['HE', 'WAS', 'DRAWING', 'TOWARDS', 'THE', 'CLOSE', 'OF', 'THIS', 'DELIGHTFUL', 'HONEYMOON', 'TOUR', 'AND', 'IT', 'WAS', 'A', 'BRIGHT', 'SUNSHINY', 'MORNING', 'EARLY', 'IN', 'FEBRUARY', 'BUT', 'FEBRUARY', 'IN', 'PARIS', 'IS', 'SOMETIMES', 'BETTER', 'THAN', 'APRIL', 'IN', 'LONDON'] +8461-278226-0007-1640: ref=['BUT', 'SHE', 'FIXED', 'UPON', 'A', 'PICTURE', 'WHICH', 'SHE', 'SAID', 'SHE', 'PREFERRED', 'TO', 'ANYTHING', 'SHE', 'HAD', 'SEEN', 'IN', 'THE', 'GALLERY'] +8461-278226-0007-1640: hyp=['BUT', 'SHE', 'FIXED', 'UPON', 'A', 'PICTURE', 'WHICH', 'SHE', 'SAID', 'SHE', 'PREFERRED', 'TO', 'ANYTHING', 'SHE', 'HAD', 'SEEN', 'IN', 'THE', 'GALLERY'] +8461-278226-0008-1641: ref=['PHILIP', 'JOCELYN', 'WAS', 'EXAMINING', 'SOME', 'PICTURES', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'ROOM', 'WHEN', 'HIS', 'WIFE', 'MADE', 'THIS', 'DISCOVERY'] +8461-278226-0008-1641: hyp=['PHILIP', 'JOSCELYN', 'WAS', 'EXAMINING', 'SOME', 'PICTURES', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'ROOM', 'WHEN', 'HIS', 'WIFE', 'MADE', 'THE', 'DISCOVERY'] +8461-278226-0009-1642: ref=['HOW', 'I', 'WISH', 'YOU', 'COULD', 'GET', 'ME', 'A', 'COPY', 'OF', 'THAT', 'PICTURE', 'PHILIP', 'LAURA', 'SAID', 'ENTREATINGLY'] +8461-278226-0009-1642: hyp=['HOW', 'I', 'WISH', 'YOU', 'COULD', 'GET', 'ME', 'A', 'COPY', 'OF', 'THAT', 'PICTURE', 'FILLIP', 'LAURA', 'SAID', 'ENTREATINGLY'] +8461-278226-0010-1643: ref=['I', 'SHOULD', 'SO', 'LIKE', 'ONE', 'TO', 'HANG', 'IN', 'MY', 'MORNING', 'ROOM', 'AT', "JOCELYN'S", 'ROCK'] +8461-278226-0010-1643: hyp=['I', 'SHOULD', 'SO', 'LIKE', 'ONE', 'TO', 'HANG', 'IN', 'MY', 'MORNING', 'ROOM', 'A', 'JOSCELYN', 'STRUCK'] +8461-278226-0011-1644: ref=['SHE', 'TURNED', 'TO', 'THE', 'FRENCH', 'ARTIST', 'PRESENTLY', 'AND', 'ASKED', 'HIM', 'WHERE', 'THE', 'ELDER', 'MISTER', 'KERSTALL', 'LIVED', 'AND', 'IF', 'THERE', 'WAS', 'ANY', 'POSSIBILITY', 'OF', 'SEEING', 'HIM'] +8461-278226-0011-1644: hyp=['SHE', 'TURNED', 'TO', 'THE', 'FRENCH', 'ARD', 'THIS', 'PRESENTLY', 'AND', 'ASKED', 'HIM', 'WHERE', 'THE', 'ELDER', 'MISTER', 'CURSON', 'LIVED', 'AND', 'IF', 'THERE', 'WAS', 'ANY', 'POSSIBILITY', 'OF', 'SEEING', 'HIM'] +8461-278226-0012-1645: ref=['THEY', 'HAVE', 'SAID', 'THAT', 'HE', 'IS', 'EVEN', 'A', 'LITTLE', 'IMBECILE', 'THAT', 'HE', 'DOES', 'NOT', 'REMEMBER', 'HIMSELF', 'OF', 'THE', 'MOST', 'COMMON', 'EVENTS', 'OF', 'HIS', 'LIFE'] +8461-278226-0012-1645: hyp=['THEY', 'HAVE', 'SAID', 'THAT', 'HE', 'IS', 'EVEN', 'A', 'LITTLE', 'IMBECILE', 'THAT', 'HE', 'DOES', 'NOT', 'REMEMBER', 'HIMSELF', 'OF', 'THE', 'MOST', 'COMMON', 'EVENTS', 'OF', 'HIS', 'LIFE'] +8461-278226-0013-1646: ref=['BUT', 'THERE', 'ARE', 'SOME', 'OTHERS', 'WHO', 'SAY', 'THAT', 'HIS', 'MEMORY', 'HAS', 'NOT', 'ALTOGETHER', 'FAILED', 'AND', 'THAT', 'HE', 'IS', 'STILL', 'ENOUGH', 'HARSHLY', 'CRITICAL', 'TOWARDS', 'THE', 'WORKS', 'OF', 'OTHERS'] +8461-278226-0013-1646: hyp=['BUT', 'THERE', 'ARE', 'SOME', 'OTHERS', 'WHO', 'SAY', 'THAT', 'HIS', 'MEMORY', 'HAS', 'NOT', 'ALTOGETHER', 'FAILED', 'AND', 'THAT', 'HE', 'STILL', 'ENOUGH', 'HARSHLY', 'CRITICAL', 'TOWARD', 'THE', 'WORKS', 'OF', 'OTHERS'] +8461-278226-0014-1647: ref=['I', "DON'T", 'THINK', 'YOU', 'WILL', 'HAVE', 'ANY', 'DIFFICULTY', 'IN', 'FINDING', 'THE', 'HOUSE'] +8461-278226-0014-1647: hyp=['I', "DON'T", 'THINK', 'YOU', 'WILL', 'HAVE', 'ANY', 'DIFFICULTY', 'IN', 'FINDING', 'THE', 'HOUSE'] +8461-278226-0015-1648: ref=['YOU', 'WILL', 'BE', 'DOING', 'ME', 'SUCH', 'A', 'FAVOUR', 'PHILIP', 'IF', "YOU'LL", 'SAY', 'YES'] +8461-278226-0015-1648: hyp=['YOU', 'WILL', 'BETRAY', 'ME', 'SUCH', 'A', 'FAVOUR', 'FELLOW', 'IF', 'YOU', 'SAY', 'YES'] +8461-281231-0000-1594: ref=['HIS', 'FOLLOWERS', 'RUSHED', 'FORWARD', 'TO', 'WHERE', 'HE', 'LAY', 'AND', 'THEIR', 'UNITED', 'FORCE', 'COMPELLING', 'THE', 'BLACK', 'KNIGHT', 'TO', 'PAUSE', 'THEY', 'DRAGGED', 'THEIR', 'WOUNDED', 'LEADER', 'WITHIN', 'THE', 'WALLS'] +8461-281231-0000-1594: hyp=['HIS', 'FOLLOWERS', 'RUSH', 'FORWARD', 'WHERE', 'HE', 'LAY', 'AND', 'THEIR', 'UNITED', 'FORCE', 'COMPELLING', 'THE', 'BLACK', 'NIGHT', 'TO', 'PAUSE', 'THEY', 'DRAGGED', 'THE', 'WOUNDED', 'LEADER', 'WITHIN', 'THE', 'WALLS'] +8461-281231-0001-1595: ref=['IT', 'WAS', 'ON', 'THEIR', 'JOURNEY', 'TO', 'THAT', 'TOWN', 'THAT', 'THEY', 'WERE', 'OVERTAKEN', 'ON', 'THE', 'ROAD', 'BY', 'CEDRIC', 'AND', 'HIS', 'PARTY', 'IN', 'WHOSE', 'COMPANY', 'THEY', 'WERE', 'AFTERWARDS', 'CARRIED', 'CAPTIVE', 'TO', 'THE', 'CASTLE', 'OF', 'TORQUILSTONE'] +8461-281231-0001-1595: hyp=['IT', 'WAS', 'ON', 'THEIR', 'JOURNEY', 'TO', 'THAT', 'TOWN', 'THAT', 'THEY', 'WERE', 'OVERTAKEN', 'ON', 'THE', 'ROAD', 'BY', 'SADRIC', 'AND', 'HIS', 'PARTY', 'IN', 'WHOSE', 'COMPANY', 'THEY', 'WERE', 'AFTERWARDS', 'CARRIED', 'CAPTIVE', 'TO', 'THE', 'COUNCIL', 'OF', 'TORCLESTONE'] +8461-281231-0002-1596: ref=['AS', 'HE', 'LAY', 'UPON', 'HIS', 'BED', 'RACKED', 'WITH', 'PAIN', 'AND', 'MENTAL', 'AGONY', 'AND', 'FILLED', 'WITH', 'THE', 'FEAR', 'OF', 'RAPIDLY', 'APPROACHING', 'DEATH', 'HE', 'HEARD', 'A', 'VOICE', 'ADDRESS', 'HIM'] +8461-281231-0002-1596: hyp=['I', 'SEE', 'LAY', 'UPON', 'HIS', 'BED', 'RAPPED', 'WITH', 'PAIN', 'AND', 'MANTLE', 'AGONY', 'AND', 'FILLED', 'WITH', 'THE', 'FEAR', 'OF', 'RAPIDLY', 'APPROACHING', 'DEATH', 'HE', 'HEARD', 'A', 'VOICE', 'ADDRESS', 'HIM'] +8461-281231-0003-1597: ref=['WHAT', 'ART', 'THOU', 'HE', 'EXCLAIMED', 'IN', 'TERROR'] +8461-281231-0003-1597: hyp=['WHAT', 'ART', 'THOU', 'HE', 'EXCLAIMED', 'IN', 'TERROR'] +8461-281231-0004-1598: ref=['LEAVE', 'ME', 'AND', 'SEEK', 'THE', 'SAXON', 'WITCH', 'ULRICA', 'WHO', 'WAS', 'MY', 'TEMPTRESS', 'LET', 'HER', 'AS', 'WELL', 'AS', 'I', 'TASTE', 'THE', 'TORTURES', 'WHICH', 'ANTICIPATE', 'HELL'] +8461-281231-0004-1598: hyp=['LEAVE', 'ME', 'AND', 'SEEK', 'THE', 'SAXON', 'WHICH', 'OREKA', 'WHO', 'WAS', 'MY', 'TEMPTRESS', 'LET', 'HER', 'AS', 'WELL', 'AS', 'I', 'TASTE', 'THE', 'TORTURES', 'WHICH', 'ANTICIPATE', 'HELL'] +8461-281231-0005-1599: ref=['EXCLAIMED', 'THE', 'NORMAN', 'HO'] +8461-281231-0005-1599: hyp=['EXCLAIMED', 'THE', 'NORMAN', 'OH'] +8461-281231-0006-1600: ref=['REMEMBEREST', 'THOU', 'THE', 'MAGAZINE', 'OF', 'FUEL', 'THAT', 'IS', 'STORED', 'BENEATH', 'THESE', 'APARTMENTS', 'WOMAN'] +8461-281231-0006-1600: hyp=['REMEMBER', 'AS', 'THOU', 'THE', 'MAGAZINE', 'OF', 'FUEL', 'THAT', 'IS', 'STOLE', 'BENEATH', 'THESE', 'APARTMENTS', 'WOMAN'] +8461-281231-0007-1601: ref=['THEY', 'ARE', 'FAST', 'RISING', 'AT', 'LEAST', 'SAID', 'ULRICA', 'AND', 'A', 'SIGNAL', 'SHALL', 'SOON', 'WAVE', 'TO', 'WARN', 'THE', 'BESIEGERS', 'TO', 'PRESS', 'HARD', 'UPON', 'THOSE', 'WHO', 'WOULD', 'EXTINGUISH', 'THEM'] +8461-281231-0007-1601: hyp=['THEY', 'ARE', 'FAST', 'RISING', 'AT', 'LEAST', 'SAID', 'A', 'RIKA', 'AND', 'A', 'SIGNAL', 'SHALL', 'SOON', 'WAVE', 'TOWARD', 'THE', 'BESIEGERS', 'TO', 'PRESS', 'HARD', 'UPON', 'THOSE', 'WHO', 'WOULD', 'EXTINGUISH', 'THEM'] +8461-281231-0008-1602: ref=['MEANWHILE', 'THE', 'BLACK', 'KNIGHT', 'HAD', 'LED', 'HIS', 'FORCES', 'AGAIN', 'TO', 'THE', 'ATTACK', 'AND', 'SO', 'VIGOROUS', 'WAS', 'THEIR', 'ASSAULT', 'THAT', 'BEFORE', 'LONG', 'THE', 'GATE', 'OF', 'THE', 'CASTLE', 'ALONE', 'SEPARATED', 'THEM', 'FROM', 'THOSE', 'WITHIN'] +8461-281231-0008-1602: hyp=['MEANWHILE', 'THE', 'BLACK', 'KNIGHT', 'HAD', 'LED', 'HIS', 'FORCES', 'AGAIN', 'TO', 'THE', 'ATTACK', 'AND', 'SO', 'VIGOROUS', 'WAS', 'THEIR', 'ASSAULT', 'THAT', 'BEFORE', 'LONG', 'THE', 'GATE', 'OF', 'THE', 'CASTLE', 'ALONE', 'SEPARATED', 'THEM', 'FROM', 'THOSE', 'WITHIN'] +8461-281231-0009-1603: ref=['THE', 'DEFENDERS', 'FINDING', 'THE', 'CASTLE', 'TO', 'BE', 'ON', 'FIRE', 'NOW', 'DETERMINED', 'TO', 'SELL', 'THEIR', 'LIVES', 'AS', 'DEARLY', 'AS', 'THEY', 'COULD', 'AND', 'HEADED', 'BY', 'DE', 'BRACY', 'THEY', 'THREW', 'OPEN', 'THE', 'GATE', 'AND', 'WERE', 'AT', 'ONCE', 'INVOLVED', 'IN', 'A', 'TERRIFIC', 'CONFLICT', 'WITH', 'THOSE', 'OUTSIDE'] +8461-281231-0009-1603: hyp=['THE', 'DEFENDERS', 'FIND', 'IN', 'THE', 'CASTLE', 'TO', 'BE', 'ON', 'FIRE', 'NOW', 'DETERMINED', 'TO', 'SELL', 'THEIR', 'LIVES', 'AS', 'DAILY', 'AS', 'THEY', 'COULD', 'AND', 'HEADED', 'BY', 'THE', 'BRAZY', 'THEY', 'THREW', 'OPEN', 'THE', 'GATE', 'AND', 'WERE', 'AT', 'ONCE', 'INVOLVED', 'IN', 'A', 'TERRIFIC', 'CONFLICT', 'WITH', 'THOSE', 'OUTSIDE'] +8461-281231-0010-1604: ref=['THE', 'BLACK', 'KNIGHT', 'WITH', 'PORTENTOUS', 'STRENGTH', 'FORCED', 'HIS', 'WAY', 'INWARD', 'IN', 'DESPITE', 'OF', 'DE', 'BRACY', 'AND', 'HIS', 'FOLLOWERS'] +8461-281231-0010-1604: hyp=['THE', 'BLACK', 'NIGHT', 'WITH', 'POTENTAL', 'STRENGTH', 'FORCES', 'AWAY', 'IN', 'WOOD', 'IN', 'DESPITE', 'OF', 'THE', 'BRACY', 'AND', 'HIS', 'FOLLOWERS'] +8461-281231-0011-1605: ref=['TWO', 'OF', 'THE', 'FOREMOST', 'INSTANTLY', 'FELL', 'AND', 'THE', 'REST', 'GAVE', 'WAY', 'NOTWITHSTANDING', 'ALL', 'THEIR', 'LEADERS', 'EFFORTS', 'TO', 'STOP', 'THEM'] +8461-281231-0011-1605: hyp=['TWO', 'OF', 'THE', 'FOREMOST', 'INSTANTLY', 'FELL', 'AND', 'THE', 'REST', 'GAVE', 'WAY', 'NOTWITHSTANDING', 'ALL', 'THE', "LEADER'S", 'EFFORTS', 'TO', 'STOP', 'THEM'] +8461-281231-0012-1606: ref=['THE', 'BLACK', 'KNIGHT', 'WAS', 'SOON', 'ENGAGED', 'IN', 'DESPERATE', 'COMBAT', 'WITH', 'THE', 'NORMAN', 'CHIEF', 'AND', 'THE', 'VAULTED', 'ROOF', 'OF', 'THE', 'HALL', 'RUNG', 'WITH', 'THEIR', 'FURIOUS', 'BLOWS'] +8461-281231-0012-1606: hyp=['THE', 'BLACK', 'NIGHT', 'WAS', 'SOON', 'ENGAGED', 'IN', 'DESPERATE', 'COMBAT', 'WITH', 'THE', 'NORMAN', 'CHIEF', 'AND', 'DEVOTED', 'ROOF', 'OF', 'THE', 'HALL', 'RUNG', 'WITH', 'A', 'FURIOUS', 'BLOWS'] +8461-281231-0013-1607: ref=['AT', 'LENGTH', 'DE', 'BRACY', 'FELL'] +8461-281231-0013-1607: hyp=['AT', 'LENGTH', 'THE', 'BRACEY', 'FELL'] +8461-281231-0014-1608: ref=['TELL', 'ME', 'THY', 'NAME', 'OR', 'WORK', 'THY', 'PLEASURE', 'ON', 'ME'] +8461-281231-0014-1608: hyp=['TELL', 'ME', 'THY', 'NAME', 'OR', 'WORK', 'THY', 'PLEASURE', 'ON', 'ME'] +8461-281231-0015-1609: ref=['YET', 'FIRST', 'LET', 'ME', 'SAY', 'SAID', 'DE', 'BRACY', 'WHAT', 'IT', 'IMPORTS', 'THEE', 'TO', 'KNOW'] +8461-281231-0015-1609: hyp=['YET', 'FIRST', 'LET', 'ME', 'SAY', 'SAID', 'DEBRACY', 'WHAT', 'DID', 'IMPORTS', 'THEE', 'TO', 'KNOW'] +8461-281231-0016-1610: ref=['EXCLAIMED', 'THE', 'BLACK', 'KNIGHT', 'PRISONER', 'AND', 'PERISH'] +8461-281231-0016-1610: hyp=['EXCLAIMED', 'THE', 'BLACK', 'KNIGHT', 'PRISONER', 'AND', 'PARISH'] +8461-281231-0017-1611: ref=['THE', 'LIFE', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'CASTLE', 'SHALL', 'ANSWER', 'IT', 'IF', 'A', 'HAIR', 'OF', 'HIS', 'HEAD', 'BE', 'SINGED', 'SHOW', 'ME', 'HIS', 'CHAMBER'] +8461-281231-0017-1611: hyp=['THE', 'LIFE', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'CASTLE', "SHE'LL", 'ANSWER', 'IT', 'IF', 'A', 'HAIR', 'OF', 'HIS', 'HEAD', 'BE', 'SINGED', 'SHOW', 'ME', 'HIS', 'CHAMBER'] +8461-281231-0018-1612: ref=['RAISING', 'THE', 'WOUNDED', 'MAN', 'WITH', 'EASE', 'THE', 'BLACK', 'KNIGHT', 'RUSHED', 'WITH', 'HIM', 'TO', 'THE', 'POSTERN', 'GATE', 'AND', 'HAVING', 'THERE', 'DELIVERED', 'HIS', 'BURDEN', 'TO', 'THE', 'CARE', 'OF', 'TWO', 'YEOMEN', 'HE', 'AGAIN', 'ENTERED', 'THE', 'CASTLE', 'TO', 'ASSIST', 'IN', 'THE', 'RESCUE', 'OF', 'THE', 'OTHER', 'PRISONERS'] +8461-281231-0018-1612: hyp=['RAISING', 'THE', 'WOUNDED', 'MAN', 'WITH', 'THESE', 'THE', 'BLACK', 'KNIGHT', 'RUSHED', 'WITH', 'THEM', 'TO', 'THE', 'PASTING', 'GATE', 'AND', 'HAVING', 'THERE', 'DELIVERED', 'HIS', 'BURDEN', 'TO', 'THE', 'CARE', 'OF', 'TWO', 'YEOMAN', 'HE', 'AGAIN', 'ENTERED', 'THE', 'CASTLE', 'TO', 'ASSIST', 'IN', 'THE', 'RESCUE', 'OF', 'THAT', 'A', 'PRISONERS'] +8461-281231-0019-1613: ref=['BUT', 'IN', 'OTHER', 'PARTS', 'THE', 'BESIEGERS', 'PURSUED', 'THE', 'DEFENDERS', 'OF', 'THE', 'CASTLE', 'FROM', 'CHAMBER', 'TO', 'CHAMBER', 'AND', 'SATIATED', 'IN', 'THEIR', 'BLOOD', 'THE', 'VENGEANCE', 'WHICH', 'HAD', 'LONG', 'ANIMATED', 'THEM', 'AGAINST', 'THE', 'SOLDIERS', 'OF', 'THE', 'TYRANT', 'FRONT', 'DE', 'BOEUF'] +8461-281231-0019-1613: hyp=['BUT', 'IN', 'OTHER', 'PARTS', 'THE', 'BESIEGERS', 'PURSUED', 'THE', 'DEFENDERS', 'OF', 'THE', 'CASTLE', 'FROM', 'CHAMBER', 'TO', 'CHAMBER', 'AND', 'SATIATED', 'IN', 'THE', 'BLOOD', 'THE', 'VENGEANCE', 'WHICH', 'HAD', 'LONG', 'ANIMATED', 'THEM', 'AGAINST', 'THE', 'SOLDIERS', 'OF', 'THE', 'TYRANT', 'FRONT', 'DE', 'BOEUF'] +8461-281231-0020-1614: ref=['AS', 'THE', 'FIRE', 'COMMENCED', 'TO', 'SPREAD', 'RAPIDLY', 'THROUGH', 'ALL', 'PARTS', 'OF', 'THE', 'CASTLE', 'ULRICA', 'APPEARED', 'ON', 'ONE', 'OF', 'THE', 'TURRETS'] +8461-281231-0020-1614: hyp=['AS', 'THE', 'FIRE', 'COMMANDS', 'TO', 'SPREAD', 'RAPIDLY', 'THROUGH', 'ALL', 'PARTS', 'OF', 'THE', 'CASTLE', 'OR', 'RICA', 'APPEARED', 'ON', 'ONE', 'OF', 'THE', 'TURRETS'] +8461-281231-0021-1615: ref=['BEFORE', 'LONG', 'THE', 'TOWERING', 'FLAMES', 'HAD', 'SURMOUNTED', 'EVERY', 'OBSTRUCTION', 'AND', 'ROSE', 'TO', 'THE', 'EVENING', 'SKIES', 'ONE', 'HUGE', 'AND', 'BURNING', 'BEACON', 'SEEN', 'FAR', 'AND', 'WIDE', 'THROUGH', 'THE', 'ADJACENT', 'COUNTRY', 'TOWER', 'AFTER', 'TOWER', 'CRASHED', 'DOWN', 'WITH', 'BLAZING', 'ROOF', 'AND', 'RAFTER'] +8461-281231-0021-1615: hyp=['BEFORE', 'LONG', 'THE', 'TOWERING', 'FLAMES', 'HAD', 'SURMOUNTED', 'EVERY', 'OBSTRUCTION', 'AND', 'ROSE', 'TO', 'THE', 'EVENING', 'SKIES', 'WHEN', 'HUGE', 'AND', 'BURNING', 'BEACON', 'SEEMED', 'FAR', 'AND', 'WIDE', 'THROUGH', 'THE', 'ADJACENT', 'COUNTRY', 'TOWERED', 'AFTER', 'TOWER', 'CRASHED', 'DOWN', 'WITH', 'BLAZING', 'ROOF', 'AND', 'RAFTER'] +8461-281231-0022-1616: ref=['AT', 'LENGTH', 'WITH', 'A', 'TERRIFIC', 'CRASH', 'THE', 'WHOLE', 'TURRET', 'GAVE', 'WAY', 'AND', 'SHE', 'PERISHED', 'IN', 'THE', 'FLAMES', 'WHICH', 'HAD', 'CONSUMED', 'HER', 'TYRANT'] +8461-281231-0022-1616: hyp=['AT', 'LENGTH', 'WITH', 'A', 'TERRIFIC', 'CRASH', 'THE', 'WHOLE', 'TORR', 'GAVE', 'WAY', 'AND', 'SHE', 'PERISHED', 'IN', 'FLAMES', 'WHICH', 'I', 'CONSUMED', 'HER', 'TYRANT'] +8461-281231-0023-1617: ref=['WHEN', 'THE', 'OUTLAWS', 'HAD', 'DIVIDED', 'THE', 'SPOILS', 'WHICH', 'THEY', 'HAD', 'TAKEN', 'FROM', 'THE', 'CASTLE', 'OF', 'TORQUILSTONE', 'CEDRIC', 'PREPARED', 'TO', 'TAKE', 'HIS', 'DEPARTURE'] +8461-281231-0023-1617: hyp=['WHEN', 'THE', 'OUTLAWS', 'HAD', 'DIVIDED', 'THE', 'SPOILS', 'WHICH', 'THEY', 'HAD', 'TAKEN', 'FROM', 'THE', 'CASTLE', 'OF', 'TORKILSTONE', 'CEDRIC', 'PREPARED', 'TO', 'TAKE', 'HIS', 'DEPARTURE'] +8461-281231-0024-1618: ref=['HE', 'LEFT', 'THE', 'GALLANT', 'BAND', 'OF', 'FORESTERS', 'SORROWING', 'DEEPLY', 'FOR', 'HIS', 'LOST', 'FRIEND', 'THE', 'LORD', 'OF', 'CONINGSBURGH', 'AND', 'HE', 'AND', 'HIS', 'FOLLOWERS', 'HAD', 'SCARCE', 'DEPARTED', 'WHEN', 'A', 'PROCESSION', 'MOVED', 'SLOWLY', 'FROM', 'UNDER', 'THE', 'GREENWOOD', 'BRANCHES', 'IN', 'THE', 'DIRECTION', 'WHICH', 'HE', 'HAD', 'TAKEN', 'IN', 'THE', 'CENTRE', 'OF', 'WHICH', 'WAS', 'THE', 'CAR', 'IN', 'WHICH', 'THE', 'BODY', 'OF', 'ATHELSTANE', 'WAS', 'LAID'] +8461-281231-0024-1618: hyp=['HE', 'LEFT', 'THE', 'GALLANT', 'BAND', 'OF', 'FORESTERS', 'SORROWING', 'DEEPLY', 'FOR', 'HIS', 'LOST', 'FRIEND', 'THE', 'LORD', 'OF', 'CONNINGSBURG', 'AND', 'HE', 'AND', 'HIS', 'FOLLOWERS', 'HAD', 'SCARCE', 'DEPARTED', 'WHEN', 'A', 'PROCESSION', 'MOVED', 'SLOWLY', 'FROM', 'UNDER', 'THE', 'GREENWOOD', 'BRANCHES', 'IN', 'THE', 'DIRECTION', 'WHICH', 'HE', 'HAD', 'TAKEN', 'IN', 'THE', 'CENTRE', 'OF', 'WHICH', 'WAS', 'THE', 'CAR', 'IN', 'WHICH', 'THE', 'BODY', 'OF', 'ADDSTEIN', 'WAS', 'LAID'] +8461-281231-0025-1619: ref=['DE', 'BRACY', 'BOWED', 'LOW', 'AND', 'IN', 'SILENCE', 'THREW', 'HIMSELF', 'UPON', 'A', 'HORSE', 'AND', 'GALLOPED', 'OFF', 'THROUGH', 'THE', 'WOOD'] +8461-281231-0025-1619: hyp=['DEBRACY', 'BOWED', 'LOW', 'AND', 'IN', 'SILENCE', 'THREW', 'HIMSELF', 'UPON', 'A', 'HORSE', 'AND', 'GALLOPED', 'OFF', 'THROUGH', 'THE', 'WOOD'] +8461-281231-0026-1620: ref=['HERE', 'IS', 'A', 'BUGLE', 'WHICH', 'AN', 'ENGLISH', 'YEOMAN', 'HAS', 'ONCE', 'WORN', 'I', 'PRAY', 'YOU', 'TO', 'KEEP', 'IT', 'AS', 'A', 'MEMORIAL', 'OF', 'YOUR', 'GALLANT', 'BEARING'] +8461-281231-0026-1620: hyp=['HERE', 'IS', 'A', 'BUGLE', 'WHICH', 'AN', 'ENGLISH', 'YEOMAN', 'HAS', 'ONCE', 'WORN', 'I', 'PRAY', 'YOU', 'TO', 'KEEP', 'IT', 'AS', 'A', 'MEMORIAL', 'OF', 'YOUR', 'GALLANT', 'BEARING'] +8461-281231-0027-1621: ref=['SO', 'SAYING', 'HE', 'MOUNTED', 'HIS', 'STRONG', 'WAR', 'HORSE', 'AND', 'RODE', 'OFF', 'THROUGH', 'THE', 'FOREST'] +8461-281231-0027-1621: hyp=['SO', 'SAYING', 'HE', 'MOUNTED', 'HIS', 'STRONG', 'WAR', 'HORSE', 'AND', 'RODE', 'OFF', 'THROUGH', 'THE', 'FOREST'] +8461-281231-0028-1622: ref=['DURING', 'ALL', 'THIS', 'TIME', 'ISAAC', 'OF', 'YORK', 'SAT', 'MOURNFULLY', 'APART', 'GRIEVING', 'FOR', 'THE', 'LOSS', 'OF', 'HIS', 'DEARLY', 'LOVED', 'DAUGHTER', 'REBECCA'] +8461-281231-0028-1622: hyp=['DURING', 'ALL', 'THIS', 'TIME', 'ISAAC', 'OF', 'YORK', 'SAT', 'MOURNFULLY', 'APART', 'GRIEVING', 'FOR', 'THE', 'LOSS', 'OF', 'HIS', 'STEELY', 'LOVED', 'DAUGHTER', 'REBECCA'] +8461-281231-0029-1623: ref=['AND', 'WITH', 'THIS', 'EPISTLE', 'THE', 'UNHAPPY', 'OLD', 'MAN', 'SET', 'OUT', 'TO', 'PROCURE', 'HIS', "DAUGHTER'S", 'LIBERATION'] +8461-281231-0029-1623: hyp=['AND', 'WITH', 'THIS', 'EPISTLE', 'THEN', 'HAPPY', 'OLD', 'MAN', 'SET', 'OUT', 'TO', 'PROCURE', 'HIS', "DAUGHTER'S", 'LIBERATION'] +8461-281231-0030-1624: ref=['THE', 'TEMPLAR', 'IS', 'FLED', 'SAID', 'DE', 'BRACY', 'IN', 'ANSWER', 'TO', 'THE', "PRINCE'S", 'EAGER', 'QUESTIONS', 'FRONT', 'DE', 'BOEUF', 'YOU', 'WILL', 'NEVER', 'SEE', 'MORE', 'AND', 'HE', 'ADDED', 'IN', 'A', 'LOW', 'AND', 'EMPHATIC', 'TONE', 'RICHARD', 'IS', 'IN', 'ENGLAND', 'I', 'HAVE', 'SEEN', 'HIM', 'AND', 'SPOKEN', 'WITH', 'HIM'] +8461-281231-0030-1624: hyp=['THE', 'TEMPLAR', 'IS', 'FLED', 'SAID', 'THE', 'BRACY', 'IN', 'ANSWER', 'TO', 'THE', "PRINCE'S", 'EAGER', 'QUESTIONS', 'FROM', 'THE', 'BIRTH', 'YOU', 'WILL', 'NEVER', 'SEE', 'MORE', 'AND', 'HE', 'ADDED', 'IN', 'A', 'LOW', 'AND', 'EMPHATIC', 'TONE', 'RICHARD', 'IS', 'AN', 'ENGLAND', 'I', 'HAVE', 'SEEN', 'HIM', 'AND', 'SPOKEN', 'WITH', 'HIM'] +8461-281231-0031-1625: ref=['HE', 'APPEALED', 'TO', 'DE', 'BRACY', 'TO', 'ASSIST', 'HIM', 'IN', 'THIS', 'PROJECT', 'AND', 'BECAME', 'AT', 'ONCE', 'DEEPLY', 'SUSPICIOUS', 'OF', 'THE', "KNIGHT'S", 'LOYALTY', 'TOWARDS', 'HIM', 'WHEN', 'HE', 'DECLINED', 'TO', 'LIFT', 'HAND', 'AGAINST', 'THE', 'MAN', 'WHO', 'HAD', 'SPARED', 'HIS', 'OWN', 'LIFE'] +8461-281231-0031-1625: hyp=['HE', 'APPEALED', 'TO', 'THE', 'BRACELE', 'TO', 'ASSIST', 'HIM', 'IN', 'HIS', 'PROJECT', 'AND', 'BECAME', 'AT', 'ONCE', 'DEEPLY', 'SUSPICIOUS', 'OF', 'THE', "NIGHT'S", 'LOYALTY', 'TOWARDS', 'HIM', 'WHEN', 'HE', 'DECLINED', 'TO', 'LIFT', 'HAND', 'AGAINST', 'THE', 'MAN', 'WHO', 'HAD', 'SPARED', 'HIS', 'OWN', 'LIFE'] +8461-281231-0032-1626: ref=['BEFORE', 'REACHING', 'HIS', 'DESTINATION', 'HE', 'WAS', 'TOLD', 'THAT', 'LUCAS', 'DE', 'BEAUMANOIR', 'THE', 'GRAND', 'MASTER', 'OF', 'THE', 'ORDER', 'OF', 'THE', 'TEMPLARS', 'WAS', 'THEN', 'ON', 'VISIT', 'TO', 'THE', 'PRECEPTORY'] +8461-281231-0032-1626: hyp=['BEFORE', 'REACHING', 'ITS', 'DESTINATION', 'HE', 'WAS', 'TOLD', 'THAT', 'LUCAS', 'THE', 'BOURMANOIR', 'THE', 'GRAND', 'MASTER', 'OF', 'THE', 'ORDER', 'OF', 'THE', 'TEMPLARS', 'WAS', 'THEN', 'ON', 'VISIT', 'TO', 'THEIR', 'PERCEPTORY'] +8461-281231-0033-1627: ref=['HE', 'HAD', 'NOT', 'UNTIL', 'THEN', 'BEEN', 'INFORMED', 'OF', 'THE', 'PRESENCE', 'OF', 'THE', 'JEWISH', 'MAIDEN', 'IN', 'THE', 'ABODE', 'OF', 'THE', 'TEMPLARS', 'AND', 'GREAT', 'WAS', 'HIS', 'FURY', 'AND', 'INDIGNATION', 'ON', 'LEARNING', 'THAT', 'SHE', 'WAS', 'AMONGST', 'THEM'] +8461-281231-0033-1627: hyp=['HE', 'HAD', 'NOT', 'UNTIL', 'THEN', 'BEEN', 'INFORMED', 'TO', 'THE', 'PRESENCE', 'OF', 'THE', 'JEWISH', 'MAIDEN', 'IN', 'THE', 'ABODE', 'OF', 'THE', 'TEMPLARS', 'AND', 'GREAT', 'WAS', 'HIS', 'FURY', 'AND', 'INDIGNATION', 'OF', 'LEARNING', 'THAT', 'SHE', 'WAS', 'AMONGST', 'THEM'] +8461-281231-0034-1628: ref=['POOR', 'ISAAC', 'WAS', 'HURRIED', 'OFF', 'ACCORDINGLY', 'AND', 'EXPELLED', 'FROM', 'THE', 'PRECEPTORY', 'ALL', 'HIS', 'ENTREATIES', 'AND', 'EVEN', 'HIS', 'OFFERS', 'UNHEARD', 'AND', 'DISREGARDED'] +8461-281231-0034-1628: hyp=['POOR', 'ISAAC', 'WAS', 'HURRIED', 'OFF', 'ACCORDINGLY', 'AND', 'EXPELLED', 'FROM', 'THE', 'PRECEPTORY', 'ALL', 'HIS', 'ENTREATIES', 'AND', 'EVEN', 'HIS', 'OFFICE', 'UNHEARD', 'AND', 'DISREGARDED'] +8461-281231-0035-1629: ref=['THE', 'ASSURANCE', 'THAT', 'SHE', 'POSSESSED', 'SOME', 'FRIEND', 'IN', 'THIS', 'AWFUL', 'ASSEMBLY', 'GAVE', 'HER', 'COURAGE', 'TO', 'LOOK', 'AROUND', 'AND', 'TO', 'MARK', 'INTO', 'WHOSE', 'PRESENCE', 'SHE', 'HAD', 'BEEN', 'CONDUCTED'] +8461-281231-0035-1629: hyp=['THE', 'ASSURANCE', 'THAT', 'SHE', 'POSSESSED', 'SOME', 'FRIEND', 'AND', 'HIS', 'AWFUL', 'ASSEMBLY', 'GAVE', 'A', 'COURAGE', 'TO', 'LOOK', 'ROUND', 'AND', 'TO', 'MARK', 'INTO', 'WHOSE', 'PRESENCE', 'SHE', 'HAD', 'BEEN', 'CONDUCTED'] +8461-281231-0036-1630: ref=['SHE', 'GAZED', 'ACCORDINGLY', 'UPON', 'A', 'SCENE', 'WHICH', 'MIGHT', 'WELL', 'HAVE', 'STRUCK', 'TERROR', 'INTO', 'A', 'BOLDER', 'HEART', 'THAN', 'HERS'] +8461-281231-0036-1630: hyp=['SHE', 'GAZED', 'ACCORDINGLY', 'UPON', 'A', 'SCENE', 'WHICH', 'MIGHT', 'WELL', 'HAVE', 'STRUCK', 'TERROR', 'INTO', 'A', 'BOLDER', 'HEART', 'THAN', 'HERS'] +8461-281231-0037-1631: ref=['AT', 'HIS', 'FEET', 'WAS', 'PLACED', 'A', 'TABLE', 'OCCUPIED', 'BY', 'TWO', 'SCRIBES', 'WHOSE', 'DUTY', 'IT', 'WAS', 'TO', 'RECORD', 'THE', 'PROCEEDINGS', 'OF', 'THE', 'DAY'] +8461-281231-0037-1631: hyp=['AT', 'HIS', 'FEET', 'WAS', 'PLACED', 'THE', 'TABLE', 'OCCUPIED', 'BY', 'TWO', 'SCRIBES', 'WHOSE', 'DUTY', 'WAS', 'TO', 'RECORD', 'THE', 'PROCEEDINGS', 'OF', 'THE', 'DAY'] +8461-281231-0038-1632: ref=['THE', 'PRECEPTORS', 'OF', 'WHOM', 'THERE', 'WERE', 'FOUR', 'PRESENT', 'OCCUPIED', 'SEATS', 'BEHIND', 'THEIR', 'SUPERIORS', 'AND', 'BEHIND', 'THEM', 'STOOD', 'THE', 'ESQUIRES', 'OF', 'THE', 'ORDER', 'ROBED', 'IN', 'WHITE'] +8461-281231-0038-1632: hyp=['THE', 'PRECEPTORS', 'OF', 'WHOM', 'THEY', 'WERE', 'FOUR', 'PRESENT', 'OCCUPIED', 'SEATS', 'BEHIND', 'THE', 'SUPERIORS', 'AND', 'BEHIND', 'THEM', 'STOOD', 'THE', 'ESQUIRES', 'OF', 'THE', 'ORDER', 'ROPED', 'IN', 'WHITE'] diff --git a/log/greedy_search/wer-summary-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/wer-summary-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a86ed6e882e8fe0ce0e174690f23ebabbfd483b --- /dev/null +++ b/log/greedy_search/wer-summary-test-clean-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,2 @@ +settings WER +greedy_search 3.94 diff --git a/log/greedy_search/wer-summary-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt b/log/greedy_search/wer-summary-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..71a943baa57387e87cf4a4091905896eeeddb7e5 --- /dev/null +++ b/log/greedy_search/wer-summary-test-other-greedy_search-epoch-30-avg-9-streaming-chunk-size-32-context-2-max-sym-per-frame-1-use-averaged-model.txt @@ -0,0 +1,2 @@ +settings WER +greedy_search 9.79 diff --git a/log/log-train-2023-02-05-17-58-35-0 b/log/log-train-2023-02-05-17-58-35-0 new file mode 100644 index 0000000000000000000000000000000000000000..a8e8df3fce38ee4d2be657e3eed35f419e963b82 --- /dev/null +++ b/log/log-train-2023-02-05-17-58-35-0 @@ -0,0 +1,25165 @@ +2023-02-05 17:58:35,365 INFO [train.py:973] (0/4) Training started +2023-02-05 17:58:35,371 INFO [train.py:983] (0/4) Device: cuda:0 +2023-02-05 17:58:35,412 INFO [train.py:992] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n07', 'IP address': '10.1.7.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-05 17:58:35,412 INFO [train.py:994] (0/4) About to create model +2023-02-05 17:58:36,048 INFO [zipformer.py:402] (0/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-05 17:58:36,065 INFO [train.py:998] (0/4) Number of model parameters: 20697573 +2023-02-05 17:58:51,140 INFO [train.py:1013] (0/4) Using DDP +2023-02-05 17:58:51,426 INFO [asr_datamodule.py:420] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-05 17:58:52,644 INFO [asr_datamodule.py:224] (0/4) Enable MUSAN +2023-02-05 17:58:52,645 INFO [asr_datamodule.py:225] (0/4) About to get Musan cuts +2023-02-05 17:58:54,428 INFO [asr_datamodule.py:249] (0/4) Enable SpecAugment +2023-02-05 17:58:54,428 INFO [asr_datamodule.py:250] (0/4) Time warp factor: 80 +2023-02-05 17:58:54,428 INFO [asr_datamodule.py:260] (0/4) Num frame mask: 10 +2023-02-05 17:58:54,428 INFO [asr_datamodule.py:273] (0/4) About to create train dataset +2023-02-05 17:58:54,428 INFO [asr_datamodule.py:300] (0/4) Using DynamicBucketingSampler. +2023-02-05 17:58:54,448 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:58:57,424 INFO [asr_datamodule.py:316] (0/4) About to create train dataloader +2023-02-05 17:58:57,424 INFO [asr_datamodule.py:430] (0/4) About to get dev-clean cuts +2023-02-05 17:58:57,425 INFO [asr_datamodule.py:437] (0/4) About to get dev-other cuts +2023-02-05 17:58:57,426 INFO [asr_datamodule.py:347] (0/4) About to create dev dataset +2023-02-05 17:58:57,789 INFO [asr_datamodule.py:364] (0/4) About to create dev dataloader +2023-02-05 17:59:07,110 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:59:11,984 INFO [train.py:901] (0/4) Epoch 1, batch 0, loss[loss=7.062, simple_loss=6.39, pruned_loss=6.707, over 7704.00 frames. ], tot_loss[loss=7.062, simple_loss=6.39, pruned_loss=6.707, over 7704.00 frames. ], batch size: 18, lr: 2.50e-02, grad_scale: 2.0 +2023-02-05 17:59:11,985 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 17:59:24,177 INFO [train.py:935] (0/4) Epoch 1, validation: loss=6.888, simple_loss=6.229, pruned_loss=6.575, over 944034.00 frames. +2023-02-05 17:59:24,178 INFO [train.py:936] (0/4) Maximum memory allocated so far is 5748MB +2023-02-05 17:59:28,621 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=45.43 vs. limit=5.0 +2023-02-05 17:59:37,492 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=28.71 vs. limit=5.0 +2023-02-05 17:59:37,730 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 17:59:40,349 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=6.92 vs. limit=2.0 +2023-02-05 17:59:54,276 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=80.10 vs. limit=5.0 +2023-02-05 17:59:55,490 INFO [train.py:901] (0/4) Epoch 1, batch 50, loss[loss=1.445, simple_loss=1.28, pruned_loss=1.472, over 8763.00 frames. ], tot_loss[loss=2.165, simple_loss=1.956, pruned_loss=2.001, over 359569.46 frames. ], batch size: 30, lr: 2.75e-02, grad_scale: 0.25 +2023-02-05 17:59:56,133 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:06,234 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=22.09 vs. limit=2.0 +2023-02-05 18:00:11,291 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 18:00:13,725 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:00:28,699 INFO [train.py:901] (0/4) Epoch 1, batch 100, loss[loss=1.19, simple_loss=1.018, pruned_loss=1.361, over 8101.00 frames. ], tot_loss[loss=1.649, simple_loss=1.468, pruned_loss=1.625, over 639424.52 frames. ], batch size: 23, lr: 3.00e-02, grad_scale: 0.0625 +2023-02-05 18:00:28,820 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:32,361 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 18:00:32,814 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.087e+01 6.689e+01 1.862e+02 6.030e+02 6.185e+04, threshold=3.723e+02, percent-clipped=0.0 +2023-02-05 18:01:00,488 INFO [train.py:901] (0/4) Epoch 1, batch 150, loss[loss=1.051, simple_loss=0.8963, pruned_loss=1.124, over 8474.00 frames. ], tot_loss[loss=1.41, simple_loss=1.239, pruned_loss=1.441, over 856513.68 frames. ], batch size: 25, lr: 3.25e-02, grad_scale: 0.0625 +2023-02-05 18:01:02,597 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=40.01 vs. limit=5.0 +2023-02-05 18:01:18,442 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=86.14 vs. limit=5.0 +2023-02-05 18:01:27,981 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=3.24 vs. limit=2.0 +2023-02-05 18:01:34,596 INFO [train.py:901] (0/4) Epoch 1, batch 200, loss[loss=1, simple_loss=0.8467, pruned_loss=1.031, over 8478.00 frames. ], tot_loss[loss=1.27, simple_loss=1.106, pruned_loss=1.306, over 1025749.67 frames. ], batch size: 25, lr: 3.50e-02, grad_scale: 0.125 +2023-02-05 18:01:37,992 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.848e+01 5.119e+01 6.630e+01 8.708e+01 3.236e+02, threshold=1.326e+02, percent-clipped=1.0 +2023-02-05 18:01:45,609 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.52 vs. limit=2.0 +2023-02-05 18:01:50,414 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=17.46 vs. limit=5.0 +2023-02-05 18:02:05,438 INFO [train.py:901] (0/4) Epoch 1, batch 250, loss[loss=0.8579, simple_loss=0.7197, pruned_loss=0.861, over 7421.00 frames. ], tot_loss[loss=1.182, simple_loss=1.021, pruned_loss=1.209, over 1155566.62 frames. ], batch size: 17, lr: 3.75e-02, grad_scale: 0.125 +2023-02-05 18:02:14,823 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 18:02:22,944 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 18:02:23,769 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=11.14 vs. limit=5.0 +2023-02-05 18:02:37,913 INFO [train.py:901] (0/4) Epoch 1, batch 300, loss[loss=0.9402, simple_loss=0.7819, pruned_loss=0.9226, over 8249.00 frames. ], tot_loss[loss=1.132, simple_loss=0.9702, pruned_loss=1.146, over 1266587.67 frames. ], batch size: 24, lr: 4.00e-02, grad_scale: 0.25 +2023-02-05 18:02:42,325 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=306.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:02:42,690 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.041e+01 5.570e+01 7.201e+01 9.677e+01 1.807e+02, threshold=1.440e+02, percent-clipped=6.0 +2023-02-05 18:02:46,523 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.09 vs. limit=2.0 +2023-02-05 18:02:47,403 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=314.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:03:10,258 INFO [train.py:901] (0/4) Epoch 1, batch 350, loss[loss=0.9084, simple_loss=0.7508, pruned_loss=0.8663, over 7651.00 frames. ], tot_loss[loss=1.089, simple_loss=0.9264, pruned_loss=1.089, over 1341372.47 frames. ], batch size: 19, lr: 4.25e-02, grad_scale: 0.25 +2023-02-05 18:03:30,777 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.41 vs. limit=5.0 +2023-02-05 18:03:42,314 INFO [train.py:901] (0/4) Epoch 1, batch 400, loss[loss=0.9906, simple_loss=0.8123, pruned_loss=0.9259, over 8327.00 frames. ], tot_loss[loss=1.061, simple_loss=0.8956, pruned_loss=1.047, over 1406491.97 frames. ], batch size: 26, lr: 4.50e-02, grad_scale: 0.5 +2023-02-05 18:03:44,609 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=405.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:03:45,466 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 4.847e+01 5.714e+01 6.661e+01 8.261e+01 1.252e+02, threshold=1.332e+02, percent-clipped=0.0 +2023-02-05 18:03:55,280 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=421.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:04:11,512 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=445.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:04:15,517 INFO [train.py:901] (0/4) Epoch 1, batch 450, loss[loss=0.9387, simple_loss=0.766, pruned_loss=0.8553, over 8087.00 frames. ], tot_loss[loss=1.035, simple_loss=0.8675, pruned_loss=1.006, over 1448980.14 frames. ], batch size: 21, lr: 4.75e-02, grad_scale: 0.5 +2023-02-05 18:04:18,234 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6355, 5.6725, 5.6731, 5.6728, 5.6732, 5.6732, 5.6708, 5.6731], + device='cuda:0'), covar=tensor([0.0025, 0.0032, 0.0029, 0.0036, 0.0031, 0.0039, 0.0033, 0.0036], + device='cuda:0'), in_proj_covar=tensor([0.0012, 0.0013, 0.0013, 0.0014, 0.0012, 0.0013, 0.0014, 0.0013], + device='cuda:0'), out_proj_covar=tensor([8.7666e-06, 9.0568e-06, 9.0095e-06, 8.9489e-06, 9.1031e-06, 8.8468e-06, + 8.8892e-06, 8.9351e-06], device='cuda:0') +2023-02-05 18:04:25,496 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3666, 2.3279, 4.4231, 4.4585, 4.5501, 3.8236, 3.8460, 4.3119], + device='cuda:0'), covar=tensor([0.0142, 0.0450, 0.0182, 0.0136, 0.0102, 0.0175, 0.0308, 0.0158], + device='cuda:0'), in_proj_covar=tensor([0.0014, 0.0015, 0.0015, 0.0015, 0.0015, 0.0015, 0.0016, 0.0014], + device='cuda:0'), out_proj_covar=tensor([9.2644e-06, 9.7546e-06, 9.5960e-06, 9.5450e-06, 9.3455e-06, 9.5027e-06, + 9.6717e-06, 9.3750e-06], device='cuda:0') +2023-02-05 18:04:45,729 INFO [train.py:901] (0/4) Epoch 1, batch 500, loss[loss=1.024, simple_loss=0.8321, pruned_loss=0.9099, over 8239.00 frames. ], tot_loss[loss=1.016, simple_loss=0.8458, pruned_loss=0.9703, over 1484599.60 frames. ], batch size: 24, lr: 4.99e-02, grad_scale: 1.0 +2023-02-05 18:04:49,472 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.283e+01 6.268e+01 7.626e+01 9.977e+01 2.238e+02, threshold=1.525e+02, percent-clipped=10.0 +2023-02-05 18:05:04,099 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=10.81 vs. limit=5.0 +2023-02-05 18:05:16,930 INFO [train.py:901] (0/4) Epoch 1, batch 550, loss[loss=0.8317, simple_loss=0.6842, pruned_loss=0.6948, over 7420.00 frames. ], tot_loss[loss=1, simple_loss=0.8295, pruned_loss=0.9346, over 1512549.36 frames. ], batch size: 17, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:22,218 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=560.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:33,864 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=580.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:39,251 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=586.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:47,854 INFO [train.py:901] (0/4) Epoch 1, batch 600, loss[loss=0.933, simple_loss=0.774, pruned_loss=0.7431, over 8364.00 frames. ], tot_loss[loss=0.9849, simple_loss=0.8158, pruned_loss=0.8952, over 1534391.37 frames. ], batch size: 26, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:51,149 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 5.986e+01 8.101e+01 1.064e+02 1.512e+02 3.340e+02, threshold=2.128e+02, percent-clipped=22.0 +2023-02-05 18:05:51,940 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=608.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:57,480 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 18:06:15,545 INFO [train.py:901] (0/4) Epoch 1, batch 650, loss[loss=0.7697, simple_loss=0.6433, pruned_loss=0.5881, over 7413.00 frames. ], tot_loss[loss=0.9651, simple_loss=0.8001, pruned_loss=0.8506, over 1553972.71 frames. ], batch size: 17, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:16,667 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6573, 3.2249, 4.2544, 3.3797, 3.9081, 4.0795, 4.1592, 4.2451], + device='cuda:0'), covar=tensor([0.3645, 0.1044, 0.0146, 0.1674, 0.1096, 0.0480, 0.0314, 0.0163], + device='cuda:0'), in_proj_covar=tensor([0.0015, 0.0015, 0.0015, 0.0015, 0.0014, 0.0014, 0.0014, 0.0014], + device='cuda:0'), out_proj_covar=tensor([1.0089e-05, 1.0593e-05, 9.6553e-06, 1.0564e-05, 9.8548e-06, 9.6059e-06, + 9.4800e-06, 9.3276e-06], device='cuda:0') +2023-02-05 18:06:18,948 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0317, 0.9921, 1.2374, 1.4060, 1.0335, 1.0047, 1.2737, 1.1100], + device='cuda:0'), covar=tensor([0.6608, 0.9232, 0.5610, 0.3609, 0.4855, 0.6556, 0.4504, 0.5955], + device='cuda:0'), in_proj_covar=tensor([0.0043, 0.0048, 0.0040, 0.0036, 0.0039, 0.0050, 0.0039, 0.0043], + device='cuda:0'), out_proj_covar=tensor([2.8100e-05, 3.2251e-05, 2.9394e-05, 2.2395e-05, 2.5689e-05, 2.9535e-05, + 2.6128e-05, 2.7634e-05], device='cuda:0') +2023-02-05 18:06:20,640 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=658.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:31,062 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=677.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:44,419 INFO [train.py:901] (0/4) Epoch 1, batch 700, loss[loss=0.8819, simple_loss=0.7296, pruned_loss=0.6744, over 8236.00 frames. ], tot_loss[loss=0.9394, simple_loss=0.7808, pruned_loss=0.8021, over 1567608.44 frames. ], batch size: 22, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:45,058 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=702.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:48,203 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 2.109e+02 3.132e+02 4.412e+02 1.990e+03, threshold=6.264e+02, percent-clipped=73.0 +2023-02-05 18:07:14,466 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=749.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:07:15,374 INFO [train.py:901] (0/4) Epoch 1, batch 750, loss[loss=0.6826, simple_loss=0.5745, pruned_loss=0.493, over 7263.00 frames. ], tot_loss[loss=0.9109, simple_loss=0.7595, pruned_loss=0.7538, over 1579447.87 frames. ], batch size: 16, lr: 4.97e-02, grad_scale: 1.0 +2023-02-05 18:07:25,628 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 18:07:26,836 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=773.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:32,315 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 18:07:43,634 INFO [train.py:901] (0/4) Epoch 1, batch 800, loss[loss=0.7957, simple_loss=0.6825, pruned_loss=0.5425, over 8486.00 frames. ], tot_loss[loss=0.8849, simple_loss=0.7407, pruned_loss=0.7095, over 1589739.26 frames. ], batch size: 28, lr: 4.97e-02, grad_scale: 2.0 +2023-02-05 18:07:46,609 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.528e+02 3.354e+02 4.455e+02 1.086e+03, threshold=6.708e+02, percent-clipped=4.0 +2023-02-05 18:07:46,898 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.03 vs. limit=2.0 +2023-02-05 18:07:51,294 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=816.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:05,152 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=841.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:11,179 INFO [train.py:901] (0/4) Epoch 1, batch 850, loss[loss=0.8285, simple_loss=0.7108, pruned_loss=0.5569, over 8454.00 frames. ], tot_loss[loss=0.8578, simple_loss=0.7211, pruned_loss=0.6672, over 1597597.96 frames. ], batch size: 27, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:22,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=864.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:22,881 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=865.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:42,859 INFO [train.py:901] (0/4) Epoch 1, batch 900, loss[loss=0.6883, simple_loss=0.5921, pruned_loss=0.4541, over 7689.00 frames. ], tot_loss[loss=0.8311, simple_loss=0.7016, pruned_loss=0.628, over 1599772.90 frames. ], batch size: 18, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:46,413 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 3.070e+02 3.818e+02 4.702e+02 7.623e+02, threshold=7.636e+02, percent-clipped=5.0 +2023-02-05 18:08:55,002 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1197, 0.7853, 1.2201, 1.6118, 1.0989, 1.0041, 1.3250, 1.3545], + device='cuda:0'), covar=tensor([1.7977, 2.5836, 1.4440, 0.9846, 1.5354, 1.5254, 1.7958, 1.9836], + device='cuda:0'), in_proj_covar=tensor([0.0090, 0.0087, 0.0081, 0.0075, 0.0081, 0.0089, 0.0091, 0.0095], + device='cuda:0'), out_proj_covar=tensor([5.9339e-05, 6.2556e-05, 5.7150e-05, 4.4411e-05, 5.6099e-05, 5.6563e-05, + 6.1271e-05, 6.3668e-05], device='cuda:0') +2023-02-05 18:08:55,935 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=924.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:58,993 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=930.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:10,100 INFO [train.py:901] (0/4) Epoch 1, batch 950, loss[loss=0.7954, simple_loss=0.6831, pruned_loss=0.5202, over 8488.00 frames. ], tot_loss[loss=0.8075, simple_loss=0.6847, pruned_loss=0.5933, over 1600496.14 frames. ], batch size: 29, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:09:10,749 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=952.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:16,849 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.15 vs. limit=2.0 +2023-02-05 18:09:18,365 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 18:09:26,439 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 18:09:37,675 INFO [train.py:901] (0/4) Epoch 1, batch 1000, loss[loss=0.7615, simple_loss=0.6534, pruned_loss=0.4934, over 8560.00 frames. ], tot_loss[loss=0.7857, simple_loss=0.6689, pruned_loss=0.5628, over 1600277.08 frames. ], batch size: 31, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:09:40,947 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 3.215e+02 4.159e+02 4.799e+02 1.770e+03, threshold=8.319e+02, percent-clipped=6.0 +2023-02-05 18:09:49,495 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.82 vs. limit=2.0 +2023-02-05 18:09:52,900 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1029.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:09:53,918 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 18:09:59,173 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1039.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:59,970 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 18:10:02,610 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1045.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:05,083 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 18:10:05,584 INFO [train.py:901] (0/4) Epoch 1, batch 1050, loss[loss=0.7116, simple_loss=0.6135, pruned_loss=0.4525, over 8238.00 frames. ], tot_loss[loss=0.7675, simple_loss=0.6559, pruned_loss=0.5365, over 1602544.88 frames. ], batch size: 22, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:10:07,185 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1054.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:14,060 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1067.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:33,037 INFO [train.py:901] (0/4) Epoch 1, batch 1100, loss[loss=0.7241, simple_loss=0.6334, pruned_loss=0.4451, over 8350.00 frames. ], tot_loss[loss=0.7482, simple_loss=0.6421, pruned_loss=0.511, over 1607440.84 frames. ], batch size: 24, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:10:36,085 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.463e+02 4.480e+02 5.452e+02 1.232e+03, threshold=8.959e+02, percent-clipped=3.0 +2023-02-05 18:10:43,721 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1120.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:56,853 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1145.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:59,912 INFO [train.py:901] (0/4) Epoch 1, batch 1150, loss[loss=0.6159, simple_loss=0.5411, pruned_loss=0.3729, over 8025.00 frames. ], tot_loss[loss=0.7292, simple_loss=0.6287, pruned_loss=0.4871, over 1608944.87 frames. ], batch size: 22, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:11:01,596 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 18:11:11,751 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1171.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:11:27,730 INFO [train.py:901] (0/4) Epoch 1, batch 1200, loss[loss=0.7309, simple_loss=0.6416, pruned_loss=0.4403, over 8489.00 frames. ], tot_loss[loss=0.7181, simple_loss=0.6218, pruned_loss=0.4697, over 1610304.18 frames. ], batch size: 29, lr: 4.93e-02, grad_scale: 4.0 +2023-02-05 18:11:30,970 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.424e+02 4.173e+02 5.178e+02 8.029e+02, threshold=8.346e+02, percent-clipped=0.0 +2023-02-05 18:11:32,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1209.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:11:56,781 INFO [train.py:901] (0/4) Epoch 1, batch 1250, loss[loss=0.6768, simple_loss=0.5954, pruned_loss=0.4037, over 8067.00 frames. ], tot_loss[loss=0.7047, simple_loss=0.6128, pruned_loss=0.4522, over 1614248.95 frames. ], batch size: 21, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:21,152 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1295.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:24,264 INFO [train.py:901] (0/4) Epoch 1, batch 1300, loss[loss=0.6242, simple_loss=0.5567, pruned_loss=0.3627, over 8128.00 frames. ], tot_loss[loss=0.6926, simple_loss=0.6043, pruned_loss=0.437, over 1613115.47 frames. ], batch size: 22, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:24,443 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1301.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:27,427 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 3.917e+02 4.747e+02 6.152e+02 9.080e+02, threshold=9.493e+02, percent-clipped=1.0 +2023-02-05 18:12:34,702 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1320.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:36,281 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1323.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:36,743 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1324.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:37,927 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1326.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:51,928 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1348.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:53,375 INFO [train.py:901] (0/4) Epoch 1, batch 1350, loss[loss=0.6582, simple_loss=0.5873, pruned_loss=0.3804, over 8238.00 frames. ], tot_loss[loss=0.6793, simple_loss=0.5948, pruned_loss=0.4219, over 1611014.27 frames. ], batch size: 22, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:22,443 INFO [train.py:901] (0/4) Epoch 1, batch 1400, loss[loss=0.5712, simple_loss=0.5087, pruned_loss=0.3294, over 7676.00 frames. ], tot_loss[loss=0.6715, simple_loss=0.5899, pruned_loss=0.4111, over 1618952.52 frames. ], batch size: 18, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:25,824 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.466e+02 4.520e+02 5.912e+02 1.396e+03, threshold=9.040e+02, percent-clipped=6.0 +2023-02-05 18:13:39,765 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9694, 0.9354, 1.0524, 1.2808, 0.8859, 0.7541, 0.7290, 1.2804], + device='cuda:0'), covar=tensor([0.9348, 0.9147, 0.7403, 0.3867, 0.9678, 1.0891, 0.9906, 0.7617], + device='cuda:0'), in_proj_covar=tensor([0.0139, 0.0132, 0.0124, 0.0102, 0.0155, 0.0151, 0.0146, 0.0142], + device='cuda:0'), out_proj_covar=tensor([9.5277e-05, 9.4346e-05, 8.9994e-05, 6.2316e-05, 1.0979e-04, 1.0418e-04, + 1.0368e-04, 9.9812e-05], device='cuda:0') +2023-02-05 18:13:50,937 INFO [train.py:901] (0/4) Epoch 1, batch 1450, loss[loss=0.5333, simple_loss=0.5039, pruned_loss=0.2818, over 8094.00 frames. ], tot_loss[loss=0.6617, simple_loss=0.5835, pruned_loss=0.3995, over 1615644.80 frames. ], batch size: 21, lr: 4.90e-02, grad_scale: 4.0 +2023-02-05 18:13:51,607 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1452.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:13:54,972 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 18:14:21,306 INFO [train.py:901] (0/4) Epoch 1, batch 1500, loss[loss=0.6424, simple_loss=0.5767, pruned_loss=0.3638, over 8516.00 frames. ], tot_loss[loss=0.6531, simple_loss=0.5784, pruned_loss=0.3891, over 1621508.33 frames. ], batch size: 26, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:14:24,736 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.084e+02 4.059e+02 4.884e+02 5.820e+02 1.191e+03, threshold=9.769e+02, percent-clipped=4.0 +2023-02-05 18:14:29,246 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1515.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:14:50,473 INFO [train.py:901] (0/4) Epoch 1, batch 1550, loss[loss=0.6685, simple_loss=0.5763, pruned_loss=0.3956, over 7576.00 frames. ], tot_loss[loss=0.6469, simple_loss=0.5743, pruned_loss=0.3814, over 1620758.52 frames. ], batch size: 73, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:14:54,640 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-05 18:15:08,618 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1580.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:10,869 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1584.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:20,753 INFO [train.py:901] (0/4) Epoch 1, batch 1600, loss[loss=0.5118, simple_loss=0.4812, pruned_loss=0.272, over 7547.00 frames. ], tot_loss[loss=0.6388, simple_loss=0.5685, pruned_loss=0.3731, over 1621122.73 frames. ], batch size: 18, lr: 4.88e-02, grad_scale: 8.0 +2023-02-05 18:15:23,969 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1605.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:24,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.844e+02 4.893e+02 6.465e+02 8.597e+02 2.177e+03, threshold=1.293e+03, percent-clipped=12.0 +2023-02-05 18:15:32,188 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-05 18:15:37,780 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1629.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:38,281 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1630.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:50,679 INFO [train.py:901] (0/4) Epoch 1, batch 1650, loss[loss=0.6551, simple_loss=0.5707, pruned_loss=0.3795, over 6814.00 frames. ], tot_loss[loss=0.6306, simple_loss=0.5626, pruned_loss=0.365, over 1614061.40 frames. ], batch size: 71, lr: 4.87e-02, grad_scale: 8.0 +2023-02-05 18:16:21,954 INFO [train.py:901] (0/4) Epoch 1, batch 1700, loss[loss=0.5567, simple_loss=0.5041, pruned_loss=0.3084, over 7698.00 frames. ], tot_loss[loss=0.6194, simple_loss=0.5553, pruned_loss=0.3548, over 1609010.73 frames. ], batch size: 18, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:16:25,350 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.633e+02 4.287e+02 5.230e+02 6.455e+02 2.107e+03, threshold=1.046e+03, percent-clipped=2.0 +2023-02-05 18:16:49,137 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4064, 2.5674, 1.5729, 2.3622, 2.3275, 2.1960, 2.0051, 2.7086], + device='cuda:0'), covar=tensor([0.3527, 0.3549, 0.4976, 0.3033, 0.4125, 0.4216, 0.3629, 0.2976], + device='cuda:0'), in_proj_covar=tensor([0.0118, 0.0108, 0.0094, 0.0100, 0.0136, 0.0113, 0.0099, 0.0114], + device='cuda:0'), out_proj_covar=tensor([8.9427e-05, 7.7024e-05, 7.0985e-05, 7.5784e-05, 9.8553e-05, 8.2951e-05, + 7.6344e-05, 8.3422e-05], device='cuda:0') +2023-02-05 18:16:51,249 INFO [train.py:901] (0/4) Epoch 1, batch 1750, loss[loss=0.6061, simple_loss=0.5627, pruned_loss=0.3264, over 8558.00 frames. ], tot_loss[loss=0.6146, simple_loss=0.5525, pruned_loss=0.3493, over 1613329.64 frames. ], batch size: 31, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:17:18,058 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1796.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:17:21,113 INFO [train.py:901] (0/4) Epoch 1, batch 1800, loss[loss=0.5207, simple_loss=0.479, pruned_loss=0.2827, over 7800.00 frames. ], tot_loss[loss=0.6089, simple_loss=0.5491, pruned_loss=0.3434, over 1610477.81 frames. ], batch size: 19, lr: 4.85e-02, grad_scale: 8.0 +2023-02-05 18:17:24,722 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.688e+02 4.554e+02 5.596e+02 6.733e+02 1.418e+03, threshold=1.119e+03, percent-clipped=4.0 +2023-02-05 18:17:52,117 INFO [train.py:901] (0/4) Epoch 1, batch 1850, loss[loss=0.5165, simple_loss=0.4981, pruned_loss=0.2669, over 8496.00 frames. ], tot_loss[loss=0.6049, simple_loss=0.5472, pruned_loss=0.3387, over 1617370.65 frames. ], batch size: 26, lr: 4.84e-02, grad_scale: 8.0 +2023-02-05 18:17:55,051 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1856.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:06,750 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1875.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:13,284 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1886.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:18:14,328 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:21,902 INFO [train.py:901] (0/4) Epoch 1, batch 1900, loss[loss=0.5806, simple_loss=0.5413, pruned_loss=0.3104, over 8046.00 frames. ], tot_loss[loss=0.5965, simple_loss=0.5426, pruned_loss=0.3312, over 1616765.15 frames. ], batch size: 22, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:25,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.326e+02 4.483e+02 5.242e+02 7.443e+02 2.270e+03, threshold=1.048e+03, percent-clipped=7.0 +2023-02-05 18:18:27,923 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1911.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:27,934 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1911.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:18:37,725 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:45,004 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 18:18:47,529 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2294, 1.7132, 3.1471, 2.2438, 3.0025, 2.8859, 2.8379, 2.8852], + device='cuda:0'), covar=tensor([0.0199, 0.1875, 0.0246, 0.0525, 0.0236, 0.0229, 0.0289, 0.0322], + device='cuda:0'), in_proj_covar=tensor([0.0053, 0.0148, 0.0070, 0.0088, 0.0071, 0.0070, 0.0079, 0.0088], + device='cuda:0'), out_proj_covar=tensor([3.1692e-05, 9.3882e-05, 4.2070e-05, 5.8846e-05, 4.0672e-05, 3.9936e-05, + 4.6738e-05, 5.2428e-05], device='cuda:0') +2023-02-05 18:18:52,617 INFO [train.py:901] (0/4) Epoch 1, batch 1950, loss[loss=0.6157, simple_loss=0.5609, pruned_loss=0.3358, over 8607.00 frames. ], tot_loss[loss=0.5902, simple_loss=0.5384, pruned_loss=0.3257, over 1614127.86 frames. ], batch size: 31, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:55,546 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 18:19:05,701 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:19:11,330 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 18:19:13,569 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 18:19:22,194 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-2000.pt +2023-02-05 18:19:23,718 INFO [train.py:901] (0/4) Epoch 1, batch 2000, loss[loss=0.5396, simple_loss=0.5064, pruned_loss=0.2864, over 8025.00 frames. ], tot_loss[loss=0.5858, simple_loss=0.5364, pruned_loss=0.3213, over 1617582.26 frames. ], batch size: 22, lr: 4.82e-02, grad_scale: 8.0 +2023-02-05 18:19:27,549 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.734e+02 4.600e+02 5.655e+02 7.771e+02 1.691e+03, threshold=1.131e+03, percent-clipped=5.0 +2023-02-05 18:19:50,353 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2043.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:19:56,664 INFO [train.py:901] (0/4) Epoch 1, batch 2050, loss[loss=0.6043, simple_loss=0.5602, pruned_loss=0.3242, over 8343.00 frames. ], tot_loss[loss=0.5768, simple_loss=0.5311, pruned_loss=0.3141, over 1620229.34 frames. ], batch size: 26, lr: 4.81e-02, grad_scale: 8.0 +2023-02-05 18:20:03,236 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7805, 1.8276, 1.8462, 1.9054, 1.5613, 1.7554, 0.6695, 1.0874], + device='cuda:0'), covar=tensor([0.1339, 0.0623, 0.0811, 0.0911, 0.1394, 0.0983, 0.2971, 0.1474], + device='cuda:0'), in_proj_covar=tensor([0.0090, 0.0073, 0.0077, 0.0083, 0.0098, 0.0074, 0.0120, 0.0094], + device='cuda:0'), out_proj_covar=tensor([6.1794e-05, 4.9907e-05, 5.0106e-05, 5.7602e-05, 7.0531e-05, 4.6409e-05, + 8.3969e-05, 6.6210e-05], device='cuda:0') +2023-02-05 18:20:21,043 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2088.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:20:29,073 INFO [train.py:901] (0/4) Epoch 1, batch 2100, loss[loss=0.5244, simple_loss=0.4981, pruned_loss=0.2754, over 7913.00 frames. ], tot_loss[loss=0.5691, simple_loss=0.5268, pruned_loss=0.308, over 1620915.09 frames. ], batch size: 20, lr: 4.80e-02, grad_scale: 16.0 +2023-02-05 18:20:32,718 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.532e+02 4.654e+02 5.875e+02 8.240e+02 2.515e+03, threshold=1.175e+03, percent-clipped=11.0 +2023-02-05 18:21:01,649 INFO [train.py:901] (0/4) Epoch 1, batch 2150, loss[loss=0.5392, simple_loss=0.4938, pruned_loss=0.2923, over 7541.00 frames. ], tot_loss[loss=0.5616, simple_loss=0.5232, pruned_loss=0.3017, over 1623217.31 frames. ], batch size: 18, lr: 4.79e-02, grad_scale: 16.0 +2023-02-05 18:21:11,749 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2167.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:21:29,901 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2192.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:21:35,019 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:35,571 INFO [train.py:901] (0/4) Epoch 1, batch 2200, loss[loss=0.4796, simple_loss=0.4641, pruned_loss=0.2476, over 7821.00 frames. ], tot_loss[loss=0.5524, simple_loss=0.5178, pruned_loss=0.2949, over 1622082.51 frames. ], batch size: 20, lr: 4.78e-02, grad_scale: 16.0 +2023-02-05 18:21:39,334 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.979e+02 3.885e+02 5.100e+02 6.280e+02 1.293e+03, threshold=1.020e+03, percent-clipped=3.0 +2023-02-05 18:21:40,788 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5004, 1.3296, 4.3680, 2.4653, 4.0451, 3.7920, 3.6528, 3.7748], + device='cuda:0'), covar=tensor([0.0179, 0.3444, 0.0191, 0.1034, 0.0279, 0.0231, 0.0346, 0.0371], + device='cuda:0'), in_proj_covar=tensor([0.0061, 0.0174, 0.0077, 0.0097, 0.0083, 0.0079, 0.0085, 0.0096], + device='cuda:0'), out_proj_covar=tensor([3.8018e-05, 1.0834e-04, 4.7521e-05, 6.5599e-05, 4.6665e-05, 4.3482e-05, + 4.9092e-05, 5.6088e-05], device='cuda:0') +2023-02-05 18:21:46,984 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:55,781 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:22:07,870 INFO [train.py:901] (0/4) Epoch 1, batch 2250, loss[loss=0.6515, simple_loss=0.583, pruned_loss=0.36, over 7097.00 frames. ], tot_loss[loss=0.5446, simple_loss=0.5132, pruned_loss=0.2891, over 1617345.32 frames. ], batch size: 72, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:41,023 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2299.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:42,096 INFO [train.py:901] (0/4) Epoch 1, batch 2300, loss[loss=0.4982, simple_loss=0.473, pruned_loss=0.2617, over 7551.00 frames. ], tot_loss[loss=0.5405, simple_loss=0.511, pruned_loss=0.2858, over 1618424.39 frames. ], batch size: 18, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:45,954 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.442e+02 5.272e+02 6.513e+02 7.975e+02 1.884e+03, threshold=1.303e+03, percent-clipped=9.0 +2023-02-05 18:22:51,203 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2315.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:56,953 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2324.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:03,184 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2334.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:09,675 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2344.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:12,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2347.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:14,709 INFO [train.py:901] (0/4) Epoch 1, batch 2350, loss[loss=0.4615, simple_loss=0.4622, pruned_loss=0.2304, over 8029.00 frames. ], tot_loss[loss=0.5343, simple_loss=0.5075, pruned_loss=0.2812, over 1613236.33 frames. ], batch size: 22, lr: 4.76e-02, grad_scale: 16.0 +2023-02-05 18:23:19,261 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2358.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:26,044 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2369.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:41,127 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 18:23:46,436 INFO [train.py:901] (0/4) Epoch 1, batch 2400, loss[loss=0.4752, simple_loss=0.4801, pruned_loss=0.2352, over 8324.00 frames. ], tot_loss[loss=0.5297, simple_loss=0.5057, pruned_loss=0.2774, over 1618649.05 frames. ], batch size: 26, lr: 4.75e-02, grad_scale: 16.0 +2023-02-05 18:23:50,349 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.591e+02 4.467e+02 5.905e+02 7.151e+02 1.301e+03, threshold=1.181e+03, percent-clipped=0.0 +2023-02-05 18:24:13,820 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1515, 1.1506, 1.0279, 1.1471, 0.8239, 0.8154, 0.9169, 1.1843], + device='cuda:0'), covar=tensor([0.2211, 0.1995, 0.2143, 0.0939, 0.3205, 0.3088, 0.2719, 0.2139], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0190, 0.0170, 0.0135, 0.0242, 0.0215, 0.0241, 0.0199], + device='cuda:0'), out_proj_covar=tensor([1.4572e-04, 1.4395e-04, 1.3575e-04, 9.5700e-05, 1.7650e-04, 1.5924e-04, + 1.7827e-04, 1.5342e-04], device='cuda:0') +2023-02-05 18:24:15,213 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3516, 1.9242, 2.9591, 1.4109, 2.0315, 2.0076, 1.2200, 2.0083], + device='cuda:0'), covar=tensor([0.2459, 0.2253, 0.0331, 0.1819, 0.1667, 0.2409, 0.2489, 0.1920], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0110, 0.0060, 0.0092, 0.0127, 0.0124, 0.0110, 0.0133], + device='cuda:0'), out_proj_covar=tensor([7.3969e-05, 7.5969e-05, 3.8170e-05, 6.1394e-05, 8.3709e-05, 9.0299e-05, + 7.1550e-05, 8.9489e-05], device='cuda:0') +2023-02-05 18:24:20,800 INFO [train.py:901] (0/4) Epoch 1, batch 2450, loss[loss=0.5398, simple_loss=0.5212, pruned_loss=0.2792, over 8456.00 frames. ], tot_loss[loss=0.5269, simple_loss=0.5035, pruned_loss=0.2756, over 1613909.64 frames. ], batch size: 29, lr: 4.74e-02, grad_scale: 16.0 +2023-02-05 18:24:52,759 INFO [train.py:901] (0/4) Epoch 1, batch 2500, loss[loss=0.5914, simple_loss=0.5514, pruned_loss=0.3157, over 8488.00 frames. ], tot_loss[loss=0.5255, simple_loss=0.5032, pruned_loss=0.2742, over 1616523.58 frames. ], batch size: 28, lr: 4.73e-02, grad_scale: 16.0 +2023-02-05 18:24:56,549 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.099e+02 5.238e+02 6.448e+02 8.237e+02 1.660e+03, threshold=1.290e+03, percent-clipped=6.0 +2023-02-05 18:25:19,774 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.01 vs. limit=2.0 +2023-02-05 18:25:25,600 INFO [train.py:901] (0/4) Epoch 1, batch 2550, loss[loss=0.5509, simple_loss=0.5325, pruned_loss=0.2847, over 8475.00 frames. ], tot_loss[loss=0.5264, simple_loss=0.504, pruned_loss=0.2747, over 1618999.91 frames. ], batch size: 25, lr: 4.72e-02, grad_scale: 16.0 +2023-02-05 18:25:38,474 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2571.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:51,084 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2590.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:54,852 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2596.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:25:57,891 INFO [train.py:901] (0/4) Epoch 1, batch 2600, loss[loss=0.5284, simple_loss=0.5172, pruned_loss=0.2698, over 8244.00 frames. ], tot_loss[loss=0.5194, simple_loss=0.4999, pruned_loss=0.2696, over 1616207.34 frames. ], batch size: 22, lr: 4.71e-02, grad_scale: 16.0 +2023-02-05 18:25:59,371 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2603.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:01,603 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.388e+02 4.352e+02 5.534e+02 7.344e+02 1.370e+03, threshold=1.107e+03, percent-clipped=3.0 +2023-02-05 18:26:06,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2615.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:08,422 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 18:26:15,228 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2628.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:31,161 INFO [train.py:901] (0/4) Epoch 1, batch 2650, loss[loss=0.4195, simple_loss=0.4358, pruned_loss=0.2016, over 8355.00 frames. ], tot_loss[loss=0.5147, simple_loss=0.4978, pruned_loss=0.2659, over 1613494.11 frames. ], batch size: 24, lr: 4.70e-02, grad_scale: 16.0 +2023-02-05 18:26:37,067 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5339, 1.9879, 4.0508, 1.5052, 2.5483, 2.4491, 1.4861, 2.4751], + device='cuda:0'), covar=tensor([0.2093, 0.2260, 0.0176, 0.1712, 0.1719, 0.2300, 0.2137, 0.1856], + device='cuda:0'), in_proj_covar=tensor([0.0111, 0.0112, 0.0065, 0.0101, 0.0137, 0.0140, 0.0118, 0.0149], + device='cuda:0'), out_proj_covar=tensor([7.9182e-05, 7.7887e-05, 4.0440e-05, 6.8655e-05, 9.1238e-05, 1.0226e-04, + 7.7374e-05, 9.9452e-05], device='cuda:0') +2023-02-05 18:27:03,817 INFO [train.py:901] (0/4) Epoch 1, batch 2700, loss[loss=0.47, simple_loss=0.48, pruned_loss=0.23, over 8345.00 frames. ], tot_loss[loss=0.5117, simple_loss=0.4954, pruned_loss=0.2641, over 1616729.86 frames. ], batch size: 26, lr: 4.69e-02, grad_scale: 16.0 +2023-02-05 18:27:04,575 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2702.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:27:05,219 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:27:08,312 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.214e+02 4.351e+02 5.311e+02 6.408e+02 1.471e+03, threshold=1.062e+03, percent-clipped=4.0 +2023-02-05 18:27:37,287 INFO [train.py:901] (0/4) Epoch 1, batch 2750, loss[loss=0.489, simple_loss=0.4891, pruned_loss=0.2445, over 8025.00 frames. ], tot_loss[loss=0.5066, simple_loss=0.4935, pruned_loss=0.26, over 1620489.35 frames. ], batch size: 22, lr: 4.68e-02, grad_scale: 16.0 +2023-02-05 18:28:05,773 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5588, 1.7939, 2.2013, 1.5346, 1.6040, 1.9216, 1.0421, 1.3819], + device='cuda:0'), covar=tensor([0.1407, 0.0615, 0.0449, 0.0917, 0.0879, 0.1040, 0.1960, 0.1275], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0089, 0.0081, 0.0090, 0.0098, 0.0084, 0.0140, 0.0124], + device='cuda:0'), out_proj_covar=tensor([7.6570e-05, 6.3812e-05, 5.5964e-05, 6.5291e-05, 7.0947e-05, 5.8194e-05, + 1.0517e-04, 9.4265e-05], device='cuda:0') +2023-02-05 18:28:11,566 INFO [train.py:901] (0/4) Epoch 1, batch 2800, loss[loss=0.4946, simple_loss=0.4879, pruned_loss=0.2507, over 8026.00 frames. ], tot_loss[loss=0.502, simple_loss=0.4904, pruned_loss=0.2569, over 1614532.32 frames. ], batch size: 22, lr: 4.67e-02, grad_scale: 16.0 +2023-02-05 18:28:15,257 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.916e+02 4.898e+02 6.530e+02 2.276e+03, threshold=9.797e+02, percent-clipped=2.0 +2023-02-05 18:28:21,889 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2817.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:28:38,508 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2842.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:28:44,178 INFO [train.py:901] (0/4) Epoch 1, batch 2850, loss[loss=0.4538, simple_loss=0.4536, pruned_loss=0.227, over 7976.00 frames. ], tot_loss[loss=0.5012, simple_loss=0.4899, pruned_loss=0.2563, over 1611591.54 frames. ], batch size: 21, lr: 4.66e-02, grad_scale: 16.0 +2023-02-05 18:29:07,282 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.01 vs. limit=2.0 +2023-02-05 18:29:18,794 INFO [train.py:901] (0/4) Epoch 1, batch 2900, loss[loss=0.4998, simple_loss=0.4958, pruned_loss=0.2519, over 8589.00 frames. ], tot_loss[loss=0.5007, simple_loss=0.4898, pruned_loss=0.2558, over 1612376.32 frames. ], batch size: 31, lr: 4.65e-02, grad_scale: 16.0 +2023-02-05 18:29:22,677 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.417e+02 4.413e+02 5.664e+02 7.338e+02 1.737e+03, threshold=1.133e+03, percent-clipped=8.0 +2023-02-05 18:29:38,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.26 vs. limit=2.0 +2023-02-05 18:29:48,934 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 18:29:52,162 INFO [train.py:901] (0/4) Epoch 1, batch 2950, loss[loss=0.4712, simple_loss=0.4682, pruned_loss=0.2371, over 7941.00 frames. ], tot_loss[loss=0.4998, simple_loss=0.4899, pruned_loss=0.2549, over 1608077.38 frames. ], batch size: 20, lr: 4.64e-02, grad_scale: 16.0 +2023-02-05 18:29:54,898 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2955.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:30:21,053 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7667, 1.7280, 5.3666, 2.6356, 5.0881, 4.7589, 4.7229, 4.6750], + device='cuda:0'), covar=tensor([0.0155, 0.3371, 0.0132, 0.1222, 0.0197, 0.0171, 0.0323, 0.0312], + device='cuda:0'), in_proj_covar=tensor([0.0078, 0.0240, 0.0099, 0.0135, 0.0115, 0.0114, 0.0121, 0.0133], + device='cuda:0'), out_proj_covar=tensor([4.9940e-05, 1.4391e-04, 6.4020e-05, 8.9970e-05, 6.7817e-05, 6.6619e-05, + 7.4881e-05, 8.3152e-05], device='cuda:0') +2023-02-05 18:30:25,906 INFO [train.py:901] (0/4) Epoch 1, batch 3000, loss[loss=0.4612, simple_loss=0.4704, pruned_loss=0.226, over 8529.00 frames. ], tot_loss[loss=0.4998, simple_loss=0.49, pruned_loss=0.2549, over 1614305.12 frames. ], batch size: 26, lr: 4.63e-02, grad_scale: 16.0 +2023-02-05 18:30:25,906 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 18:30:40,786 INFO [train.py:935] (0/4) Epoch 1, validation: loss=0.4518, simple_loss=0.5106, pruned_loss=0.1966, over 944034.00 frames. +2023-02-05 18:30:40,787 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6257MB +2023-02-05 18:30:44,887 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.692e+02 4.264e+02 5.642e+02 7.781e+02 1.743e+03, threshold=1.128e+03, percent-clipped=6.0 +2023-02-05 18:31:07,307 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3037.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:13,910 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3047.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:16,514 INFO [train.py:901] (0/4) Epoch 1, batch 3050, loss[loss=0.4799, simple_loss=0.4873, pruned_loss=0.2363, over 8591.00 frames. ], tot_loss[loss=0.4965, simple_loss=0.4882, pruned_loss=0.2524, over 1617089.16 frames. ], batch size: 31, lr: 4.62e-02, grad_scale: 16.0 +2023-02-05 18:31:29,826 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 18:31:30,849 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3073.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:41,202 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.72 vs. limit=5.0 +2023-02-05 18:31:47,473 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3098.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:49,290 INFO [train.py:901] (0/4) Epoch 1, batch 3100, loss[loss=0.4634, simple_loss=0.477, pruned_loss=0.2249, over 8503.00 frames. ], tot_loss[loss=0.4965, simple_loss=0.4874, pruned_loss=0.2528, over 1617438.56 frames. ], batch size: 26, lr: 4.61e-02, grad_scale: 16.0 +2023-02-05 18:31:53,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.570e+02 4.257e+02 6.045e+02 8.311e+02 2.838e+03, threshold=1.209e+03, percent-clipped=13.0 +2023-02-05 18:32:24,771 INFO [train.py:901] (0/4) Epoch 1, batch 3150, loss[loss=0.4393, simple_loss=0.4555, pruned_loss=0.2116, over 7932.00 frames. ], tot_loss[loss=0.4936, simple_loss=0.4852, pruned_loss=0.251, over 1614670.30 frames. ], batch size: 20, lr: 4.60e-02, grad_scale: 16.0 +2023-02-05 18:32:32,211 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3162.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:44,602 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7271, 4.0188, 3.4627, 1.1603, 3.3083, 3.6053, 3.4815, 2.9845], + device='cuda:0'), covar=tensor([0.1047, 0.0555, 0.0771, 0.4295, 0.0550, 0.0498, 0.1192, 0.0702], + device='cuda:0'), in_proj_covar=tensor([0.0167, 0.0123, 0.0146, 0.0208, 0.0113, 0.0094, 0.0148, 0.0109], + device='cuda:0'), out_proj_covar=tensor([1.2544e-04, 1.0251e-04, 9.8361e-05, 1.4224e-04, 7.6705e-05, 6.7290e-05, + 1.1470e-04, 7.4880e-05], device='cuda:0') +2023-02-05 18:32:47,630 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:57,074 INFO [train.py:901] (0/4) Epoch 1, batch 3200, loss[loss=0.4357, simple_loss=0.4253, pruned_loss=0.223, over 7427.00 frames. ], tot_loss[loss=0.4917, simple_loss=0.4837, pruned_loss=0.2499, over 1612254.38 frames. ], batch size: 17, lr: 4.59e-02, grad_scale: 16.0 +2023-02-05 18:33:00,919 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 4.232e+02 5.266e+02 6.948e+02 2.778e+03, threshold=1.053e+03, percent-clipped=2.0 +2023-02-05 18:33:32,105 INFO [train.py:901] (0/4) Epoch 1, batch 3250, loss[loss=0.567, simple_loss=0.53, pruned_loss=0.302, over 8640.00 frames. ], tot_loss[loss=0.491, simple_loss=0.483, pruned_loss=0.2495, over 1616665.62 frames. ], batch size: 49, lr: 4.58e-02, grad_scale: 16.0 +2023-02-05 18:34:04,403 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3299.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:05,629 INFO [train.py:901] (0/4) Epoch 1, batch 3300, loss[loss=0.5655, simple_loss=0.5436, pruned_loss=0.2937, over 8231.00 frames. ], tot_loss[loss=0.4874, simple_loss=0.4815, pruned_loss=0.2467, over 1616771.27 frames. ], batch size: 22, lr: 4.57e-02, grad_scale: 16.0 +2023-02-05 18:34:05,826 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3301.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:08,942 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3306.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:34:09,426 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 4.334e+02 5.638e+02 7.160e+02 2.697e+03, threshold=1.128e+03, percent-clipped=10.0 +2023-02-05 18:34:39,417 INFO [train.py:901] (0/4) Epoch 1, batch 3350, loss[loss=0.4091, simple_loss=0.4264, pruned_loss=0.1959, over 8083.00 frames. ], tot_loss[loss=0.4834, simple_loss=0.4791, pruned_loss=0.2439, over 1614974.48 frames. ], batch size: 21, lr: 4.56e-02, grad_scale: 16.0 +2023-02-05 18:34:58,220 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.09 vs. limit=2.0 +2023-02-05 18:35:01,938 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3381.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:02,004 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.2610, 1.9015, 4.7349, 2.8178, 4.6803, 4.1182, 4.3726, 4.2091], + device='cuda:0'), covar=tensor([0.0101, 0.3281, 0.0182, 0.0875, 0.0189, 0.0218, 0.0250, 0.0281], + device='cuda:0'), in_proj_covar=tensor([0.0083, 0.0255, 0.0112, 0.0144, 0.0124, 0.0124, 0.0122, 0.0139], + device='cuda:0'), out_proj_covar=tensor([5.1228e-05, 1.5082e-04, 7.3966e-05, 9.7457e-05, 7.5781e-05, 7.5110e-05, + 7.7015e-05, 8.9081e-05], device='cuda:0') +2023-02-05 18:35:14,980 INFO [train.py:901] (0/4) Epoch 1, batch 3400, loss[loss=0.516, simple_loss=0.5062, pruned_loss=0.2629, over 8250.00 frames. ], tot_loss[loss=0.483, simple_loss=0.4794, pruned_loss=0.2433, over 1616190.23 frames. ], batch size: 24, lr: 4.55e-02, grad_scale: 16.0 +2023-02-05 18:35:19,028 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.486e+02 3.960e+02 5.068e+02 6.311e+02 1.481e+03, threshold=1.014e+03, percent-clipped=3.0 +2023-02-05 18:35:23,770 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:26,534 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3418.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:35:43,733 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3443.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:48,755 INFO [train.py:901] (0/4) Epoch 1, batch 3450, loss[loss=0.4862, simple_loss=0.4957, pruned_loss=0.2383, over 8367.00 frames. ], tot_loss[loss=0.4802, simple_loss=0.4775, pruned_loss=0.2414, over 1616706.84 frames. ], batch size: 24, lr: 4.54e-02, grad_scale: 16.0 +2023-02-05 18:35:54,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3239, 1.6848, 1.5670, 1.4659, 1.1597, 1.6066, 0.4619, 1.1602], + device='cuda:0'), covar=tensor([0.0905, 0.0694, 0.0621, 0.0668, 0.0961, 0.0786, 0.2077, 0.0964], + device='cuda:0'), in_proj_covar=tensor([0.0124, 0.0117, 0.0101, 0.0106, 0.0117, 0.0095, 0.0159, 0.0123], + device='cuda:0'), out_proj_covar=tensor([8.8045e-05, 8.9513e-05, 7.1986e-05, 7.5796e-05, 8.7248e-05, 6.6607e-05, + 1.1928e-04, 9.6599e-05], device='cuda:0') +2023-02-05 18:36:21,020 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:36:24,209 INFO [train.py:901] (0/4) Epoch 1, batch 3500, loss[loss=0.4607, simple_loss=0.4774, pruned_loss=0.222, over 8464.00 frames. ], tot_loss[loss=0.482, simple_loss=0.479, pruned_loss=0.2425, over 1615300.44 frames. ], batch size: 29, lr: 4.53e-02, grad_scale: 16.0 +2023-02-05 18:36:28,199 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.405e+02 5.773e+02 7.537e+02 2.537e+03, threshold=1.155e+03, percent-clipped=7.0 +2023-02-05 18:36:36,243 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 18:36:57,810 INFO [train.py:901] (0/4) Epoch 1, batch 3550, loss[loss=0.461, simple_loss=0.4805, pruned_loss=0.2208, over 8108.00 frames. ], tot_loss[loss=0.4801, simple_loss=0.478, pruned_loss=0.2411, over 1613094.82 frames. ], batch size: 23, lr: 4.51e-02, grad_scale: 16.0 +2023-02-05 18:37:02,066 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3557.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:37:07,145 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3564.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:37:19,178 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3582.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:37:33,297 INFO [train.py:901] (0/4) Epoch 1, batch 3600, loss[loss=0.4815, simple_loss=0.4953, pruned_loss=0.2338, over 8330.00 frames. ], tot_loss[loss=0.4845, simple_loss=0.48, pruned_loss=0.2445, over 1614792.57 frames. ], batch size: 25, lr: 4.50e-02, grad_scale: 16.0 +2023-02-05 18:37:37,961 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.853e+02 4.660e+02 6.337e+02 8.772e+02 4.832e+03, threshold=1.267e+03, percent-clipped=11.0 +2023-02-05 18:38:05,407 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0926, 1.1974, 2.1081, 0.3202, 1.7848, 1.8492, 1.1716, 2.2755], + device='cuda:0'), covar=tensor([0.0855, 0.0514, 0.0390, 0.1395, 0.0772, 0.0699, 0.1183, 0.0308], + device='cuda:0'), in_proj_covar=tensor([0.0090, 0.0074, 0.0067, 0.0091, 0.0072, 0.0077, 0.0096, 0.0071], + device='cuda:0'), out_proj_covar=tensor([6.3558e-05, 4.9461e-05, 4.6247e-05, 6.8877e-05, 5.2372e-05, 5.3235e-05, + 6.8093e-05, 4.6859e-05], device='cuda:0') +2023-02-05 18:38:06,587 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3650.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:38:07,048 INFO [train.py:901] (0/4) Epoch 1, batch 3650, loss[loss=0.6025, simple_loss=0.5565, pruned_loss=0.3242, over 8594.00 frames. ], tot_loss[loss=0.482, simple_loss=0.4789, pruned_loss=0.2426, over 1613797.08 frames. ], batch size: 31, lr: 4.49e-02, grad_scale: 16.0 +2023-02-05 18:38:16,493 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.03 vs. limit=2.0 +2023-02-05 18:38:19,603 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3670.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:34,672 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1946, 2.1223, 1.6334, 2.6164, 1.5693, 1.3238, 1.7619, 2.0992], + device='cuda:0'), covar=tensor([0.1251, 0.1667, 0.1792, 0.0327, 0.2461, 0.2392, 0.2461, 0.1427], + device='cuda:0'), in_proj_covar=tensor([0.0242, 0.0247, 0.0236, 0.0153, 0.0301, 0.0288, 0.0322, 0.0243], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-05 18:38:36,797 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3694.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:38:37,526 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3695.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:40,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 18:38:41,126 INFO [train.py:901] (0/4) Epoch 1, batch 3700, loss[loss=0.5595, simple_loss=0.5394, pruned_loss=0.2898, over 8588.00 frames. ], tot_loss[loss=0.4825, simple_loss=0.4788, pruned_loss=0.2431, over 1604826.50 frames. ], batch size: 31, lr: 4.48e-02, grad_scale: 16.0 +2023-02-05 18:38:45,133 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.178e+02 4.586e+02 6.278e+02 1.050e+03 3.437e+03, threshold=1.256e+03, percent-clipped=14.0 +2023-02-05 18:39:17,444 INFO [train.py:901] (0/4) Epoch 1, batch 3750, loss[loss=0.5267, simple_loss=0.5113, pruned_loss=0.271, over 8437.00 frames. ], tot_loss[loss=0.4804, simple_loss=0.4774, pruned_loss=0.2417, over 1606863.20 frames. ], batch size: 27, lr: 4.47e-02, grad_scale: 16.0 +2023-02-05 18:39:18,333 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3752.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:39:27,111 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3765.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:39:35,221 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3777.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:39:51,672 INFO [train.py:901] (0/4) Epoch 1, batch 3800, loss[loss=0.4774, simple_loss=0.4727, pruned_loss=0.241, over 8247.00 frames. ], tot_loss[loss=0.4753, simple_loss=0.4742, pruned_loss=0.2382, over 1611789.36 frames. ], batch size: 22, lr: 4.46e-02, grad_scale: 16.0 +2023-02-05 18:39:55,874 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.457e+02 5.389e+02 6.979e+02 9.091e+02 1.609e+03, threshold=1.396e+03, percent-clipped=5.0 +2023-02-05 18:40:27,880 INFO [train.py:901] (0/4) Epoch 1, batch 3850, loss[loss=0.5702, simple_loss=0.5309, pruned_loss=0.3048, over 8185.00 frames. ], tot_loss[loss=0.474, simple_loss=0.4736, pruned_loss=0.2372, over 1612659.99 frames. ], batch size: 23, lr: 4.45e-02, grad_scale: 16.0 +2023-02-05 18:40:36,209 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 18:40:36,795 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.2050, 0.8129, 1.0516, 0.1129, 0.6936, 0.5957, 0.1069, 0.8981], + device='cuda:0'), covar=tensor([0.0816, 0.0510, 0.0373, 0.1089, 0.0517, 0.0761, 0.1010, 0.0518], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0077, 0.0069, 0.0098, 0.0075, 0.0085, 0.0098, 0.0077], + device='cuda:0'), out_proj_covar=tensor([6.9988e-05, 5.1770e-05, 4.6694e-05, 7.6608e-05, 5.5707e-05, 5.9700e-05, + 7.1457e-05, 5.0921e-05], device='cuda:0') +2023-02-05 18:40:46,552 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 18:40:54,062 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8669, 1.6169, 3.2878, 1.5805, 2.1716, 3.7561, 3.3002, 3.0735], + device='cuda:0'), covar=tensor([0.2086, 0.2295, 0.0283, 0.2380, 0.1092, 0.0144, 0.0244, 0.0390], + device='cuda:0'), in_proj_covar=tensor([0.0216, 0.0238, 0.0133, 0.0223, 0.0172, 0.0096, 0.0098, 0.0136], + device='cuda:0'), out_proj_covar=tensor([1.6694e-04, 1.7885e-04, 1.1270e-04, 1.6122e-04, 1.4635e-04, 7.6774e-05, + 8.6735e-05, 1.0823e-04], device='cuda:0') +2023-02-05 18:40:57,951 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3390, 1.7681, 2.0464, 1.6266, 1.6451, 2.1143, 0.7323, 1.0158], + device='cuda:0'), covar=tensor([0.0963, 0.0631, 0.0450, 0.0584, 0.0641, 0.0319, 0.1783, 0.1020], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0124, 0.0109, 0.0116, 0.0122, 0.0094, 0.0160, 0.0129], + device='cuda:0'), out_proj_covar=tensor([9.3903e-05, 9.7055e-05, 7.9660e-05, 8.4725e-05, 9.3800e-05, 6.5627e-05, + 1.2388e-04, 1.0316e-04], device='cuda:0') +2023-02-05 18:41:00,999 INFO [train.py:901] (0/4) Epoch 1, batch 3900, loss[loss=0.4413, simple_loss=0.4576, pruned_loss=0.2125, over 8643.00 frames. ], tot_loss[loss=0.4736, simple_loss=0.4736, pruned_loss=0.2368, over 1612737.06 frames. ], batch size: 34, lr: 4.44e-02, grad_scale: 16.0 +2023-02-05 18:41:02,503 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7080, 2.3612, 2.1325, 1.9834, 2.1350, 1.9553, 2.6409, 3.0145], + device='cuda:0'), covar=tensor([0.1830, 0.2305, 0.2203, 0.2209, 0.1790, 0.2222, 0.1825, 0.1163], + device='cuda:0'), in_proj_covar=tensor([0.0252, 0.0260, 0.0235, 0.0248, 0.0259, 0.0236, 0.0252, 0.0238], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-05 18:41:04,999 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.102e+02 5.552e+02 7.100e+02 9.321e+02 1.906e+03, threshold=1.420e+03, percent-clipped=2.0 +2023-02-05 18:41:05,709 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3908.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:41:29,964 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:41:35,351 INFO [train.py:901] (0/4) Epoch 1, batch 3950, loss[loss=0.4402, simple_loss=0.4633, pruned_loss=0.2085, over 8333.00 frames. ], tot_loss[loss=0.4726, simple_loss=0.4733, pruned_loss=0.236, over 1616131.68 frames. ], batch size: 25, lr: 4.43e-02, grad_scale: 16.0 +2023-02-05 18:42:06,739 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4195, 1.6432, 2.2104, 0.5386, 2.0235, 1.5277, 0.7526, 1.8640], + device='cuda:0'), covar=tensor([0.0855, 0.0835, 0.0376, 0.1439, 0.0696, 0.1116, 0.1366, 0.0438], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0082, 0.0071, 0.0105, 0.0078, 0.0087, 0.0104, 0.0076], + device='cuda:0'), out_proj_covar=tensor([7.4751e-05, 5.5733e-05, 4.9549e-05, 8.0619e-05, 5.8522e-05, 6.1885e-05, + 7.5669e-05, 5.1059e-05], device='cuda:0') +2023-02-05 18:42:09,367 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-4000.pt +2023-02-05 18:42:10,929 INFO [train.py:901] (0/4) Epoch 1, batch 4000, loss[loss=0.4369, simple_loss=0.4345, pruned_loss=0.2197, over 7648.00 frames. ], tot_loss[loss=0.467, simple_loss=0.4688, pruned_loss=0.2326, over 1611562.40 frames. ], batch size: 19, lr: 4.42e-02, grad_scale: 8.0 +2023-02-05 18:42:15,527 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.262e+02 4.572e+02 5.687e+02 7.371e+02 1.820e+03, threshold=1.137e+03, percent-clipped=4.0 +2023-02-05 18:42:24,588 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4021.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:25,860 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4023.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:31,864 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-05 18:42:36,390 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4038.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:42,722 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4046.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:46,104 INFO [train.py:901] (0/4) Epoch 1, batch 4050, loss[loss=0.5192, simple_loss=0.5149, pruned_loss=0.2618, over 8340.00 frames. ], tot_loss[loss=0.469, simple_loss=0.4701, pruned_loss=0.2339, over 1606781.63 frames. ], batch size: 26, lr: 4.41e-02, grad_scale: 8.0 +2023-02-05 18:43:22,349 INFO [train.py:901] (0/4) Epoch 1, batch 4100, loss[loss=0.4278, simple_loss=0.4517, pruned_loss=0.202, over 8324.00 frames. ], tot_loss[loss=0.4675, simple_loss=0.4694, pruned_loss=0.2328, over 1612152.66 frames. ], batch size: 25, lr: 4.40e-02, grad_scale: 8.0 +2023-02-05 18:43:26,892 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.479e+02 4.889e+02 6.474e+02 8.616e+02 2.054e+03, threshold=1.295e+03, percent-clipped=5.0 +2023-02-05 18:43:45,111 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3257, 1.2939, 1.5679, 0.1864, 1.2148, 1.0211, 0.2659, 1.2170], + device='cuda:0'), covar=tensor([0.0813, 0.0444, 0.0390, 0.1362, 0.0709, 0.0736, 0.1246, 0.0369], + device='cuda:0'), in_proj_covar=tensor([0.0109, 0.0083, 0.0073, 0.0107, 0.0078, 0.0092, 0.0105, 0.0077], + device='cuda:0'), out_proj_covar=tensor([7.6747e-05, 5.7009e-05, 5.1024e-05, 8.2382e-05, 6.0186e-05, 6.5240e-05, + 7.6998e-05, 5.1658e-05], device='cuda:0') +2023-02-05 18:43:56,550 INFO [train.py:901] (0/4) Epoch 1, batch 4150, loss[loss=0.3667, simple_loss=0.3865, pruned_loss=0.1735, over 7202.00 frames. ], tot_loss[loss=0.4642, simple_loss=0.4666, pruned_loss=0.2309, over 1606846.75 frames. ], batch size: 16, lr: 4.39e-02, grad_scale: 8.0 +2023-02-05 18:43:58,171 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4153.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:44:02,272 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4159.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:44:20,632 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 18:44:33,534 INFO [train.py:901] (0/4) Epoch 1, batch 4200, loss[loss=0.4916, simple_loss=0.4858, pruned_loss=0.2487, over 8433.00 frames. ], tot_loss[loss=0.4601, simple_loss=0.4648, pruned_loss=0.2277, over 1609930.40 frames. ], batch size: 27, lr: 4.38e-02, grad_scale: 8.0 +2023-02-05 18:44:38,311 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 4.057e+02 5.109e+02 6.409e+02 1.525e+03, threshold=1.022e+03, percent-clipped=2.0 +2023-02-05 18:44:44,323 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 18:45:04,973 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 18:45:07,077 INFO [train.py:901] (0/4) Epoch 1, batch 4250, loss[loss=0.3845, simple_loss=0.4233, pruned_loss=0.1728, over 8249.00 frames. ], tot_loss[loss=0.4586, simple_loss=0.4636, pruned_loss=0.2268, over 1613154.67 frames. ], batch size: 22, lr: 4.36e-02, grad_scale: 8.0 +2023-02-05 18:45:26,611 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4279.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:33,325 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4288.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:42,872 INFO [train.py:901] (0/4) Epoch 1, batch 4300, loss[loss=0.4075, simple_loss=0.4108, pruned_loss=0.202, over 7937.00 frames. ], tot_loss[loss=0.4578, simple_loss=0.4628, pruned_loss=0.2264, over 1612447.37 frames. ], batch size: 20, lr: 4.35e-02, grad_scale: 8.0 +2023-02-05 18:45:45,691 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4304.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:47,006 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4306.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:48,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.647e+02 4.666e+02 6.207e+02 8.078e+02 1.600e+03, threshold=1.241e+03, percent-clipped=6.0 +2023-02-05 18:46:00,776 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2962, 1.2385, 0.8557, 1.3607, 1.0532, 0.9045, 1.2194, 1.5871], + device='cuda:0'), covar=tensor([0.0950, 0.0870, 0.1881, 0.0583, 0.1354, 0.1266, 0.1294, 0.0673], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0175, 0.0274, 0.0183, 0.0246, 0.0208, 0.0282, 0.0221], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 18:46:18,302 INFO [train.py:901] (0/4) Epoch 1, batch 4350, loss[loss=0.4512, simple_loss=0.4806, pruned_loss=0.2109, over 8471.00 frames. ], tot_loss[loss=0.4557, simple_loss=0.4613, pruned_loss=0.225, over 1609397.11 frames. ], batch size: 25, lr: 4.34e-02, grad_scale: 8.0 +2023-02-05 18:46:37,371 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 18:46:52,941 INFO [train.py:901] (0/4) Epoch 1, batch 4400, loss[loss=0.3878, simple_loss=0.4153, pruned_loss=0.1802, over 8204.00 frames. ], tot_loss[loss=0.4546, simple_loss=0.4607, pruned_loss=0.2243, over 1608001.59 frames. ], batch size: 23, lr: 4.33e-02, grad_scale: 8.0 +2023-02-05 18:46:54,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:46:57,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 4.338e+02 5.789e+02 7.262e+02 1.136e+03, threshold=1.158e+03, percent-clipped=0.0 +2023-02-05 18:46:58,921 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4409.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:47:03,214 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.05 vs. limit=5.0 +2023-02-05 18:47:18,593 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4434.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:47:21,209 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 18:47:28,090 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2193, 0.8903, 2.6798, 0.1928, 2.0033, 0.8544, 0.7245, 1.4920], + device='cuda:0'), covar=tensor([0.0669, 0.0560, 0.0190, 0.1216, 0.0733, 0.0702, 0.0971, 0.0327], + device='cuda:0'), in_proj_covar=tensor([0.0121, 0.0093, 0.0075, 0.0117, 0.0086, 0.0107, 0.0113, 0.0085], + device='cuda:0'), out_proj_covar=tensor([8.6563e-05, 6.3993e-05, 5.3238e-05, 9.2655e-05, 6.7449e-05, 7.6536e-05, + 8.4121e-05, 5.9770e-05], device='cuda:0') +2023-02-05 18:47:29,976 INFO [train.py:901] (0/4) Epoch 1, batch 4450, loss[loss=0.3652, simple_loss=0.3851, pruned_loss=0.1726, over 7542.00 frames. ], tot_loss[loss=0.4533, simple_loss=0.4595, pruned_loss=0.2236, over 1605207.56 frames. ], batch size: 18, lr: 4.32e-02, grad_scale: 8.0 +2023-02-05 18:47:54,978 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 18:48:04,127 INFO [train.py:901] (0/4) Epoch 1, batch 4500, loss[loss=0.4801, simple_loss=0.4771, pruned_loss=0.2415, over 8531.00 frames. ], tot_loss[loss=0.4541, simple_loss=0.4597, pruned_loss=0.2243, over 1606196.85 frames. ], batch size: 31, lr: 4.31e-02, grad_scale: 8.0 +2023-02-05 18:48:05,591 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4503.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:48:05,701 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8032, 1.7629, 1.2806, 1.4362, 2.1669, 1.5316, 1.6289, 2.1279], + device='cuda:0'), covar=tensor([0.2097, 0.2641, 0.2976, 0.2723, 0.1735, 0.2616, 0.1951, 0.1526], + device='cuda:0'), in_proj_covar=tensor([0.0264, 0.0277, 0.0258, 0.0264, 0.0278, 0.0248, 0.0260, 0.0256], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 18:48:09,055 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.251e+02 4.383e+02 5.863e+02 8.313e+02 2.632e+03, threshold=1.173e+03, percent-clipped=9.0 +2023-02-05 18:48:09,187 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5859, 3.8076, 3.2516, 1.4490, 3.1287, 3.2683, 3.3297, 2.7650], + device='cuda:0'), covar=tensor([0.0860, 0.0524, 0.0828, 0.3210, 0.0469, 0.0430, 0.1093, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0218, 0.0167, 0.0189, 0.0242, 0.0146, 0.0114, 0.0186, 0.0127], + device='cuda:0'), out_proj_covar=tensor([1.6849e-04, 1.3041e-04, 1.2596e-04, 1.6317e-04, 9.7094e-05, 8.2199e-05, + 1.4205e-04, 8.9655e-05], device='cuda:0') +2023-02-05 18:48:15,347 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 18:48:21,817 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6260, 2.2166, 4.3664, 1.2865, 2.6704, 2.3717, 1.3721, 2.7461], + device='cuda:0'), covar=tensor([0.1124, 0.1406, 0.0153, 0.1394, 0.1087, 0.1780, 0.1253, 0.1049], + device='cuda:0'), in_proj_covar=tensor([0.0201, 0.0210, 0.0155, 0.0219, 0.0248, 0.0282, 0.0216, 0.0244], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 18:48:36,302 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4101, 1.3335, 3.6236, 1.5141, 1.9236, 4.7925, 4.3185, 4.2702], + device='cuda:0'), covar=tensor([0.1516, 0.2196, 0.0251, 0.2264, 0.1061, 0.0161, 0.0301, 0.0453], + device='cuda:0'), in_proj_covar=tensor([0.0228, 0.0253, 0.0135, 0.0243, 0.0180, 0.0105, 0.0106, 0.0148], + device='cuda:0'), out_proj_covar=tensor([1.8055e-04, 1.9671e-04, 1.2068e-04, 1.8267e-04, 1.6191e-04, 8.6699e-05, + 9.6650e-05, 1.2486e-04], device='cuda:0') +2023-02-05 18:48:41,814 INFO [train.py:901] (0/4) Epoch 1, batch 4550, loss[loss=0.415, simple_loss=0.447, pruned_loss=0.1915, over 8102.00 frames. ], tot_loss[loss=0.452, simple_loss=0.4589, pruned_loss=0.2226, over 1608097.08 frames. ], batch size: 23, lr: 4.30e-02, grad_scale: 8.0 +2023-02-05 18:49:16,709 INFO [train.py:901] (0/4) Epoch 1, batch 4600, loss[loss=0.4111, simple_loss=0.4143, pruned_loss=0.204, over 7720.00 frames. ], tot_loss[loss=0.4514, simple_loss=0.4586, pruned_loss=0.2221, over 1611980.98 frames. ], batch size: 18, lr: 4.29e-02, grad_scale: 8.0 +2023-02-05 18:49:21,483 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.983e+02 5.037e+02 6.922e+02 1.236e+03, threshold=1.007e+03, percent-clipped=2.0 +2023-02-05 18:49:28,456 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4618.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:51,595 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4650.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:52,226 INFO [train.py:901] (0/4) Epoch 1, batch 4650, loss[loss=0.4288, simple_loss=0.46, pruned_loss=0.1988, over 8508.00 frames. ], tot_loss[loss=0.4495, simple_loss=0.4576, pruned_loss=0.2207, over 1613569.24 frames. ], batch size: 28, lr: 4.28e-02, grad_scale: 8.0 +2023-02-05 18:49:59,117 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4659.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:16,202 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4684.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:27,581 INFO [train.py:901] (0/4) Epoch 1, batch 4700, loss[loss=0.4419, simple_loss=0.4607, pruned_loss=0.2116, over 8615.00 frames. ], tot_loss[loss=0.4473, simple_loss=0.4567, pruned_loss=0.219, over 1619311.53 frames. ], batch size: 39, lr: 4.27e-02, grad_scale: 8.0 +2023-02-05 18:50:28,850 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.84 vs. limit=5.0 +2023-02-05 18:50:32,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.254e+02 4.576e+02 5.443e+02 6.674e+02 1.320e+03, threshold=1.089e+03, percent-clipped=4.0 +2023-02-05 18:51:01,879 INFO [train.py:901] (0/4) Epoch 1, batch 4750, loss[loss=0.4264, simple_loss=0.4264, pruned_loss=0.2131, over 5971.00 frames. ], tot_loss[loss=0.4457, simple_loss=0.4549, pruned_loss=0.2183, over 1614325.18 frames. ], batch size: 13, lr: 4.26e-02, grad_scale: 8.0 +2023-02-05 18:51:12,256 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4765.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:51:21,690 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 18:51:23,828 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 18:51:37,812 INFO [train.py:901] (0/4) Epoch 1, batch 4800, loss[loss=0.4493, simple_loss=0.4646, pruned_loss=0.217, over 8361.00 frames. ], tot_loss[loss=0.4472, simple_loss=0.4566, pruned_loss=0.2189, over 1618432.35 frames. ], batch size: 24, lr: 4.25e-02, grad_scale: 8.0 +2023-02-05 18:51:42,624 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.690e+02 4.367e+02 5.327e+02 7.244e+02 1.939e+03, threshold=1.065e+03, percent-clipped=6.0 +2023-02-05 18:52:11,418 INFO [train.py:901] (0/4) Epoch 1, batch 4850, loss[loss=0.424, simple_loss=0.4456, pruned_loss=0.2012, over 7937.00 frames. ], tot_loss[loss=0.4467, simple_loss=0.4556, pruned_loss=0.2189, over 1616809.59 frames. ], batch size: 20, lr: 4.24e-02, grad_scale: 8.0 +2023-02-05 18:52:13,498 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 18:52:27,463 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4874.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:45,941 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3854, 1.1471, 2.6509, 1.3243, 1.8694, 2.8699, 2.7431, 2.5068], + device='cuda:0'), covar=tensor([0.1878, 0.2287, 0.0341, 0.2397, 0.0918, 0.0288, 0.0289, 0.0488], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0248, 0.0141, 0.0243, 0.0181, 0.0108, 0.0104, 0.0156], + device='cuda:0'), out_proj_covar=tensor([1.8623e-04, 1.9680e-04, 1.2941e-04, 1.8674e-04, 1.6287e-04, 9.3641e-05, + 9.6509e-05, 1.3366e-04], device='cuda:0') +2023-02-05 18:52:47,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4899.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:48,555 INFO [train.py:901] (0/4) Epoch 1, batch 4900, loss[loss=0.4095, simple_loss=0.432, pruned_loss=0.1935, over 8287.00 frames. ], tot_loss[loss=0.4443, simple_loss=0.4537, pruned_loss=0.2174, over 1618347.91 frames. ], batch size: 23, lr: 4.23e-02, grad_scale: 8.0 +2023-02-05 18:52:53,381 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.332e+02 4.394e+02 5.447e+02 6.722e+02 1.310e+03, threshold=1.089e+03, percent-clipped=5.0 +2023-02-05 18:53:10,532 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5884, 4.8142, 4.0046, 2.3452, 3.9756, 3.9544, 4.3404, 3.3757], + device='cuda:0'), covar=tensor([0.0573, 0.0230, 0.0583, 0.2295, 0.0364, 0.0414, 0.0703, 0.0464], + device='cuda:0'), in_proj_covar=tensor([0.0221, 0.0163, 0.0186, 0.0239, 0.0146, 0.0118, 0.0191, 0.0122], + device='cuda:0'), out_proj_covar=tensor([1.7003e-04, 1.2340e-04, 1.2410e-04, 1.6094e-04, 9.8682e-05, 8.6213e-05, + 1.4129e-04, 8.6516e-05], device='cuda:0') +2023-02-05 18:53:22,701 INFO [train.py:901] (0/4) Epoch 1, batch 4950, loss[loss=0.5399, simple_loss=0.5225, pruned_loss=0.2787, over 8190.00 frames. ], tot_loss[loss=0.4413, simple_loss=0.4521, pruned_loss=0.2153, over 1618538.24 frames. ], batch size: 23, lr: 4.21e-02, grad_scale: 8.0 +2023-02-05 18:53:34,365 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0297, 2.0216, 1.5298, 1.5385, 2.0017, 1.4922, 1.8841, 2.4754], + device='cuda:0'), covar=tensor([0.1867, 0.2430, 0.2787, 0.2512, 0.1857, 0.2510, 0.1875, 0.1274], + device='cuda:0'), in_proj_covar=tensor([0.0271, 0.0284, 0.0273, 0.0273, 0.0281, 0.0255, 0.0267, 0.0261], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 18:53:47,766 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.04 vs. limit=2.0 +2023-02-05 18:53:59,098 INFO [train.py:901] (0/4) Epoch 1, batch 5000, loss[loss=0.5241, simple_loss=0.51, pruned_loss=0.2691, over 8102.00 frames. ], tot_loss[loss=0.4392, simple_loss=0.4504, pruned_loss=0.2141, over 1614843.72 frames. ], batch size: 23, lr: 4.20e-02, grad_scale: 8.0 +2023-02-05 18:54:04,631 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.658e+02 4.358e+02 5.438e+02 7.182e+02 1.797e+03, threshold=1.088e+03, percent-clipped=3.0 +2023-02-05 18:54:13,624 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5021.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:30,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:33,880 INFO [train.py:901] (0/4) Epoch 1, batch 5050, loss[loss=0.44, simple_loss=0.4351, pruned_loss=0.2225, over 7818.00 frames. ], tot_loss[loss=0.4385, simple_loss=0.4497, pruned_loss=0.2137, over 1614259.70 frames. ], batch size: 20, lr: 4.19e-02, grad_scale: 8.0 +2023-02-05 18:54:50,671 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 18:55:08,927 INFO [train.py:901] (0/4) Epoch 1, batch 5100, loss[loss=0.5021, simple_loss=0.4604, pruned_loss=0.2719, over 7431.00 frames. ], tot_loss[loss=0.4388, simple_loss=0.4498, pruned_loss=0.2139, over 1611802.45 frames. ], batch size: 17, lr: 4.18e-02, grad_scale: 8.0 +2023-02-05 18:55:13,606 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.507e+02 4.431e+02 5.257e+02 6.582e+02 1.311e+03, threshold=1.051e+03, percent-clipped=2.0 +2023-02-05 18:55:45,845 INFO [train.py:901] (0/4) Epoch 1, batch 5150, loss[loss=0.5332, simple_loss=0.5025, pruned_loss=0.282, over 6772.00 frames. ], tot_loss[loss=0.4385, simple_loss=0.4492, pruned_loss=0.2139, over 1605162.64 frames. ], batch size: 72, lr: 4.17e-02, grad_scale: 8.0 +2023-02-05 18:55:58,062 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.92 vs. limit=2.0 +2023-02-05 18:56:19,014 INFO [train.py:901] (0/4) Epoch 1, batch 5200, loss[loss=0.4029, simple_loss=0.4351, pruned_loss=0.1853, over 8298.00 frames. ], tot_loss[loss=0.4399, simple_loss=0.4503, pruned_loss=0.2148, over 1609685.69 frames. ], batch size: 23, lr: 4.16e-02, grad_scale: 8.0 +2023-02-05 18:56:23,457 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 18:56:23,573 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.039e+02 3.937e+02 5.264e+02 6.479e+02 1.558e+03, threshold=1.053e+03, percent-clipped=7.0 +2023-02-05 18:56:51,652 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 18:56:55,106 INFO [train.py:901] (0/4) Epoch 1, batch 5250, loss[loss=0.4481, simple_loss=0.4551, pruned_loss=0.2205, over 8471.00 frames. ], tot_loss[loss=0.4386, simple_loss=0.4495, pruned_loss=0.2139, over 1608978.18 frames. ], batch size: 25, lr: 4.15e-02, grad_scale: 8.0 +2023-02-05 18:57:28,853 INFO [train.py:901] (0/4) Epoch 1, batch 5300, loss[loss=0.4632, simple_loss=0.475, pruned_loss=0.2257, over 8142.00 frames. ], tot_loss[loss=0.4391, simple_loss=0.4497, pruned_loss=0.2143, over 1609366.74 frames. ], batch size: 22, lr: 4.14e-02, grad_scale: 8.0 +2023-02-05 18:57:33,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.076e+02 4.278e+02 4.955e+02 6.641e+02 1.586e+03, threshold=9.909e+02, percent-clipped=4.0 +2023-02-05 18:58:04,345 INFO [train.py:901] (0/4) Epoch 1, batch 5350, loss[loss=0.4458, simple_loss=0.4611, pruned_loss=0.2152, over 8340.00 frames. ], tot_loss[loss=0.44, simple_loss=0.4505, pruned_loss=0.2148, over 1606720.97 frames. ], batch size: 26, lr: 4.13e-02, grad_scale: 8.0 +2023-02-05 18:58:39,814 INFO [train.py:901] (0/4) Epoch 1, batch 5400, loss[loss=0.5075, simple_loss=0.5052, pruned_loss=0.2549, over 8460.00 frames. ], tot_loss[loss=0.4376, simple_loss=0.4487, pruned_loss=0.2133, over 1609503.05 frames. ], batch size: 27, lr: 4.12e-02, grad_scale: 8.0 +2023-02-05 18:58:44,289 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.977e+02 4.515e+02 5.788e+02 7.308e+02 1.362e+03, threshold=1.158e+03, percent-clipped=5.0 +2023-02-05 18:58:55,202 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5575, 3.8126, 3.2925, 1.3045, 3.0900, 3.2863, 3.3840, 2.9365], + device='cuda:0'), covar=tensor([0.0891, 0.0514, 0.0698, 0.3310, 0.0479, 0.0423, 0.1051, 0.0413], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0174, 0.0198, 0.0260, 0.0159, 0.0129, 0.0196, 0.0119], + device='cuda:0'), out_proj_covar=tensor([1.8102e-04, 1.2747e-04, 1.3337e-04, 1.7181e-04, 1.0701e-04, 9.4842e-05, + 1.4412e-04, 8.4881e-05], device='cuda:0') +2023-02-05 18:59:02,359 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 18:59:13,402 INFO [train.py:901] (0/4) Epoch 1, batch 5450, loss[loss=0.4618, simple_loss=0.4716, pruned_loss=0.226, over 8362.00 frames. ], tot_loss[loss=0.4366, simple_loss=0.4481, pruned_loss=0.2125, over 1610779.66 frames. ], batch size: 24, lr: 4.11e-02, grad_scale: 8.0 +2023-02-05 18:59:15,715 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8992, 2.6305, 1.2720, 2.3480, 2.4193, 1.9101, 1.4424, 2.8317], + device='cuda:0'), covar=tensor([0.2098, 0.0912, 0.1947, 0.1071, 0.1381, 0.1440, 0.2681, 0.1106], + device='cuda:0'), in_proj_covar=tensor([0.0283, 0.0198, 0.0318, 0.0222, 0.0282, 0.0242, 0.0306, 0.0260], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 18:59:40,660 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6373, 2.3750, 3.1723, 3.5214, 2.4714, 1.6304, 2.6761, 2.7430], + device='cuda:0'), covar=tensor([0.1863, 0.1110, 0.0380, 0.0351, 0.0825, 0.1055, 0.0760, 0.1067], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0150, 0.0103, 0.0120, 0.0157, 0.0170, 0.0175, 0.0180], + device='cuda:0'), out_proj_covar=tensor([1.4447e-04, 9.0765e-05, 6.1179e-05, 6.9521e-05, 8.9759e-05, 1.0079e-04, + 1.0105e-04, 1.0140e-04], device='cuda:0') +2023-02-05 18:59:41,787 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 18:59:49,950 INFO [train.py:901] (0/4) Epoch 1, batch 5500, loss[loss=0.3973, simple_loss=0.4166, pruned_loss=0.189, over 7538.00 frames. ], tot_loss[loss=0.4322, simple_loss=0.4454, pruned_loss=0.2095, over 1606800.07 frames. ], batch size: 18, lr: 4.10e-02, grad_scale: 8.0 +2023-02-05 18:59:54,515 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.397e+02 4.451e+02 5.295e+02 6.340e+02 1.239e+03, threshold=1.059e+03, percent-clipped=2.0 +2023-02-05 19:00:20,584 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-05 19:00:23,629 INFO [train.py:901] (0/4) Epoch 1, batch 5550, loss[loss=0.3576, simple_loss=0.3754, pruned_loss=0.1698, over 7702.00 frames. ], tot_loss[loss=0.4335, simple_loss=0.4463, pruned_loss=0.2103, over 1609009.21 frames. ], batch size: 18, lr: 4.09e-02, grad_scale: 8.0 +2023-02-05 19:00:38,976 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9490, 2.0214, 1.0804, 2.1668, 1.7620, 1.5244, 1.4376, 2.2409], + device='cuda:0'), covar=tensor([0.1208, 0.0878, 0.2006, 0.0686, 0.1222, 0.1342, 0.1932, 0.0776], + device='cuda:0'), in_proj_covar=tensor([0.0279, 0.0194, 0.0308, 0.0220, 0.0280, 0.0238, 0.0299, 0.0252], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:00:59,679 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1385, 0.9560, 2.0319, 0.2554, 1.4314, 0.9218, 0.4554, 1.5493], + device='cuda:0'), covar=tensor([0.0604, 0.0353, 0.0274, 0.0912, 0.0441, 0.0731, 0.0932, 0.0324], + device='cuda:0'), in_proj_covar=tensor([0.0137, 0.0100, 0.0080, 0.0133, 0.0097, 0.0146, 0.0136, 0.0106], + device='cuda:0'), out_proj_covar=tensor([9.8080e-05, 7.1079e-05, 5.9889e-05, 1.0724e-04, 7.9129e-05, 1.0947e-04, + 1.0497e-04, 7.5110e-05], device='cuda:0') +2023-02-05 19:01:00,921 INFO [train.py:901] (0/4) Epoch 1, batch 5600, loss[loss=0.5119, simple_loss=0.5017, pruned_loss=0.2611, over 8479.00 frames. ], tot_loss[loss=0.435, simple_loss=0.4486, pruned_loss=0.2107, over 1613718.90 frames. ], batch size: 49, lr: 4.08e-02, grad_scale: 8.0 +2023-02-05 19:01:05,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 3.916e+02 5.301e+02 6.582e+02 1.340e+03, threshold=1.060e+03, percent-clipped=3.0 +2023-02-05 19:01:13,361 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9036, 1.4048, 1.3805, 0.0715, 1.0511, 0.9622, 0.2635, 1.4158], + device='cuda:0'), covar=tensor([0.0672, 0.0292, 0.0340, 0.1088, 0.0647, 0.0784, 0.1071, 0.0301], + device='cuda:0'), in_proj_covar=tensor([0.0140, 0.0104, 0.0083, 0.0136, 0.0101, 0.0149, 0.0141, 0.0108], + device='cuda:0'), out_proj_covar=tensor([1.0051e-04, 7.3682e-05, 6.1927e-05, 1.0937e-04, 8.2345e-05, 1.1145e-04, + 1.0865e-04, 7.6480e-05], device='cuda:0') +2023-02-05 19:01:34,544 INFO [train.py:901] (0/4) Epoch 1, batch 5650, loss[loss=0.4862, simple_loss=0.4811, pruned_loss=0.2456, over 8453.00 frames. ], tot_loss[loss=0.4354, simple_loss=0.4486, pruned_loss=0.211, over 1613895.18 frames. ], batch size: 27, lr: 4.07e-02, grad_scale: 8.0 +2023-02-05 19:01:45,699 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 19:01:45,821 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5668.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:02:09,329 INFO [train.py:901] (0/4) Epoch 1, batch 5700, loss[loss=0.4094, simple_loss=0.4269, pruned_loss=0.1959, over 8680.00 frames. ], tot_loss[loss=0.4358, simple_loss=0.4483, pruned_loss=0.2117, over 1613234.54 frames. ], batch size: 49, lr: 4.06e-02, grad_scale: 8.0 +2023-02-05 19:02:15,263 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.140e+02 4.740e+02 5.744e+02 8.008e+02 1.790e+03, threshold=1.149e+03, percent-clipped=10.0 +2023-02-05 19:02:44,479 INFO [train.py:901] (0/4) Epoch 1, batch 5750, loss[loss=0.4475, simple_loss=0.4546, pruned_loss=0.2202, over 8607.00 frames. ], tot_loss[loss=0.4356, simple_loss=0.4489, pruned_loss=0.2111, over 1617568.36 frames. ], batch size: 31, lr: 4.05e-02, grad_scale: 8.0 +2023-02-05 19:02:51,404 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 19:02:59,828 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5773.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:03:09,173 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 19:03:18,269 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0276, 2.1333, 1.8130, 2.6818, 1.8048, 1.6720, 2.0197, 2.0499], + device='cuda:0'), covar=tensor([0.0952, 0.1269, 0.1276, 0.0274, 0.1800, 0.1659, 0.1620, 0.1247], + device='cuda:0'), in_proj_covar=tensor([0.0287, 0.0314, 0.0299, 0.0177, 0.0352, 0.0336, 0.0380, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 19:03:18,566 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.59 vs. limit=5.0 +2023-02-05 19:03:19,619 INFO [train.py:901] (0/4) Epoch 1, batch 5800, loss[loss=0.4281, simple_loss=0.4452, pruned_loss=0.2055, over 8249.00 frames. ], tot_loss[loss=0.4339, simple_loss=0.4486, pruned_loss=0.2096, over 1618185.41 frames. ], batch size: 24, lr: 4.04e-02, grad_scale: 8.0 +2023-02-05 19:03:24,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.671e+02 4.595e+02 5.667e+02 1.405e+03, threshold=9.190e+02, percent-clipped=2.0 +2023-02-05 19:03:57,239 INFO [train.py:901] (0/4) Epoch 1, batch 5850, loss[loss=0.4218, simple_loss=0.4454, pruned_loss=0.1991, over 8746.00 frames. ], tot_loss[loss=0.431, simple_loss=0.4466, pruned_loss=0.2077, over 1616855.27 frames. ], batch size: 30, lr: 4.03e-02, grad_scale: 8.0 +2023-02-05 19:04:15,187 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:04:32,485 INFO [train.py:901] (0/4) Epoch 1, batch 5900, loss[loss=0.4443, simple_loss=0.4598, pruned_loss=0.2144, over 8532.00 frames. ], tot_loss[loss=0.4304, simple_loss=0.4458, pruned_loss=0.2076, over 1613178.72 frames. ], batch size: 28, lr: 4.02e-02, grad_scale: 8.0 +2023-02-05 19:04:37,231 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 3.095e+02 4.155e+02 5.559e+02 6.668e+02 2.372e+03, threshold=1.112e+03, percent-clipped=6.0 +2023-02-05 19:05:09,341 INFO [train.py:901] (0/4) Epoch 1, batch 5950, loss[loss=0.4307, simple_loss=0.4447, pruned_loss=0.2084, over 8089.00 frames. ], tot_loss[loss=0.4308, simple_loss=0.4456, pruned_loss=0.208, over 1612027.49 frames. ], batch size: 21, lr: 4.01e-02, grad_scale: 8.0 +2023-02-05 19:05:09,579 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5813, 2.0377, 3.2875, 1.1752, 2.4486, 2.0964, 1.5914, 2.1132], + device='cuda:0'), covar=tensor([0.1193, 0.1376, 0.0287, 0.1482, 0.1150, 0.1499, 0.1113, 0.1217], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0270, 0.0234, 0.0285, 0.0337, 0.0328, 0.0279, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:05:25,872 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3268, 1.4055, 2.1769, 2.0504, 1.7555, 1.2848, 1.3817, 1.7009], + device='cuda:0'), covar=tensor([0.1642, 0.0853, 0.0287, 0.0314, 0.0500, 0.0738, 0.0677, 0.0589], + device='cuda:0'), in_proj_covar=tensor([0.0267, 0.0170, 0.0118, 0.0139, 0.0178, 0.0182, 0.0189, 0.0209], + device='cuda:0'), out_proj_covar=tensor([1.5956e-04, 1.0466e-04, 7.1609e-05, 8.2486e-05, 1.0184e-04, 1.0822e-04, + 1.0891e-04, 1.1885e-04], device='cuda:0') +2023-02-05 19:05:42,943 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-6000.pt +2023-02-05 19:05:44,547 INFO [train.py:901] (0/4) Epoch 1, batch 6000, loss[loss=0.3618, simple_loss=0.3928, pruned_loss=0.1653, over 7648.00 frames. ], tot_loss[loss=0.4289, simple_loss=0.4444, pruned_loss=0.2068, over 1612615.76 frames. ], batch size: 19, lr: 4.00e-02, grad_scale: 16.0 +2023-02-05 19:05:44,547 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 19:06:02,002 INFO [train.py:935] (0/4) Epoch 1, validation: loss=0.3351, simple_loss=0.4011, pruned_loss=0.1346, over 944034.00 frames. +2023-02-05 19:06:02,003 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6432MB +2023-02-05 19:06:06,797 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 3.694e+02 4.999e+02 6.330e+02 1.596e+03, threshold=9.998e+02, percent-clipped=5.0 +2023-02-05 19:06:06,995 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:06:09,468 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6012.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:06:35,740 INFO [train.py:901] (0/4) Epoch 1, batch 6050, loss[loss=0.4445, simple_loss=0.4363, pruned_loss=0.2264, over 7791.00 frames. ], tot_loss[loss=0.4323, simple_loss=0.4456, pruned_loss=0.2096, over 1611324.39 frames. ], batch size: 19, lr: 3.99e-02, grad_scale: 8.0 +2023-02-05 19:06:42,855 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:12,055 INFO [train.py:901] (0/4) Epoch 1, batch 6100, loss[loss=0.4321, simple_loss=0.4487, pruned_loss=0.2077, over 7244.00 frames. ], tot_loss[loss=0.4295, simple_loss=0.4438, pruned_loss=0.2076, over 1609404.35 frames. ], batch size: 16, lr: 3.98e-02, grad_scale: 8.0 +2023-02-05 19:07:17,503 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.508e+02 4.942e+02 6.048e+02 7.564e+02 1.774e+03, threshold=1.210e+03, percent-clipped=15.0 +2023-02-05 19:07:23,140 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6117.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:29,001 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 19:07:29,786 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6127.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:07:45,974 INFO [train.py:901] (0/4) Epoch 1, batch 6150, loss[loss=0.455, simple_loss=0.4494, pruned_loss=0.2303, over 7185.00 frames. ], tot_loss[loss=0.4285, simple_loss=0.4426, pruned_loss=0.2072, over 1605306.05 frames. ], batch size: 71, lr: 3.97e-02, grad_scale: 8.0 +2023-02-05 19:07:47,369 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6153.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:03,891 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4694, 4.7264, 4.0296, 1.7956, 3.9306, 3.9635, 4.3250, 3.6023], + device='cuda:0'), covar=tensor([0.0800, 0.0328, 0.0615, 0.3348, 0.0365, 0.0505, 0.0745, 0.0444], + device='cuda:0'), in_proj_covar=tensor([0.0244, 0.0179, 0.0209, 0.0264, 0.0164, 0.0131, 0.0199, 0.0123], + device='cuda:0'), out_proj_covar=tensor([1.8648e-04, 1.2841e-04, 1.3574e-04, 1.7370e-04, 1.0699e-04, 9.3245e-05, + 1.4131e-04, 8.7974e-05], device='cuda:0') +2023-02-05 19:08:12,159 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6188.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:22,924 INFO [train.py:901] (0/4) Epoch 1, batch 6200, loss[loss=0.3855, simple_loss=0.4225, pruned_loss=0.1743, over 8462.00 frames. ], tot_loss[loss=0.4293, simple_loss=0.4429, pruned_loss=0.2078, over 1603102.77 frames. ], batch size: 29, lr: 3.96e-02, grad_scale: 8.0 +2023-02-05 19:08:28,568 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.743e+02 4.155e+02 5.130e+02 7.106e+02 1.864e+03, threshold=1.026e+03, percent-clipped=2.0 +2023-02-05 19:08:36,379 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:37,158 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:42,653 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6229.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:44,631 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:57,419 INFO [train.py:901] (0/4) Epoch 1, batch 6250, loss[loss=0.4364, simple_loss=0.4514, pruned_loss=0.2107, over 8587.00 frames. ], tot_loss[loss=0.4298, simple_loss=0.4443, pruned_loss=0.2076, over 1609182.34 frames. ], batch size: 39, lr: 3.95e-02, grad_scale: 8.0 +2023-02-05 19:09:20,013 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:09:22,612 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7410, 5.9164, 4.9264, 1.6712, 4.7188, 5.0888, 5.2375, 4.7758], + device='cuda:0'), covar=tensor([0.0653, 0.0271, 0.0686, 0.4229, 0.0360, 0.0485, 0.1045, 0.0428], + device='cuda:0'), in_proj_covar=tensor([0.0250, 0.0178, 0.0207, 0.0267, 0.0163, 0.0134, 0.0194, 0.0122], + device='cuda:0'), out_proj_covar=tensor([1.9024e-04, 1.2727e-04, 1.3407e-04, 1.7438e-04, 1.0577e-04, 9.5388e-05, + 1.3715e-04, 8.8490e-05], device='cuda:0') +2023-02-05 19:09:32,692 INFO [train.py:901] (0/4) Epoch 1, batch 6300, loss[loss=0.3905, simple_loss=0.4293, pruned_loss=0.1758, over 8337.00 frames. ], tot_loss[loss=0.4264, simple_loss=0.4419, pruned_loss=0.2055, over 1608993.98 frames. ], batch size: 26, lr: 3.94e-02, grad_scale: 8.0 +2023-02-05 19:09:38,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.783e+02 4.352e+02 5.159e+02 6.362e+02 1.735e+03, threshold=1.032e+03, percent-clipped=4.0 +2023-02-05 19:09:44,350 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.85 vs. limit=2.0 +2023-02-05 19:09:56,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6335.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:07,345 INFO [train.py:901] (0/4) Epoch 1, batch 6350, loss[loss=0.3989, simple_loss=0.4283, pruned_loss=0.1848, over 8254.00 frames. ], tot_loss[loss=0.4274, simple_loss=0.4421, pruned_loss=0.2064, over 1610644.43 frames. ], batch size: 24, lr: 3.93e-02, grad_scale: 8.0 +2023-02-05 19:10:08,102 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6352.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:28,913 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6383.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:10:40,801 INFO [train.py:901] (0/4) Epoch 1, batch 6400, loss[loss=0.3611, simple_loss=0.3889, pruned_loss=0.1666, over 7695.00 frames. ], tot_loss[loss=0.4264, simple_loss=0.4412, pruned_loss=0.2058, over 1606558.23 frames. ], batch size: 18, lr: 3.92e-02, grad_scale: 8.0 +2023-02-05 19:10:43,626 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6405.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:45,790 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6408.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:10:46,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.785e+02 4.017e+02 4.991e+02 6.603e+02 1.156e+03, threshold=9.981e+02, percent-clipped=3.0 +2023-02-05 19:10:55,638 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-05 19:11:03,510 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4909, 1.5541, 2.5008, 1.7151, 1.5284, 1.7994, 0.5310, 1.3901], + device='cuda:0'), covar=tensor([0.0794, 0.0502, 0.0335, 0.0443, 0.0567, 0.0559, 0.1513, 0.0840], + device='cuda:0'), in_proj_covar=tensor([0.0134, 0.0106, 0.0097, 0.0126, 0.0117, 0.0084, 0.0165, 0.0140], + device='cuda:0'), out_proj_covar=tensor([1.1179e-04, 9.6465e-05, 7.9493e-05, 1.0016e-04, 1.0407e-04, 7.0098e-05, + 1.3672e-04, 1.1981e-04], device='cuda:0') +2023-02-05 19:11:16,785 INFO [train.py:901] (0/4) Epoch 1, batch 6450, loss[loss=0.5041, simple_loss=0.4796, pruned_loss=0.2643, over 7932.00 frames. ], tot_loss[loss=0.4244, simple_loss=0.4395, pruned_loss=0.2046, over 1604209.70 frames. ], batch size: 20, lr: 3.91e-02, grad_scale: 8.0 +2023-02-05 19:11:27,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6467.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:36,481 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:39,740 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6485.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:41,897 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:47,710 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:50,294 INFO [train.py:901] (0/4) Epoch 1, batch 6500, loss[loss=0.4135, simple_loss=0.443, pruned_loss=0.1921, over 8081.00 frames. ], tot_loss[loss=0.4226, simple_loss=0.4387, pruned_loss=0.2033, over 1602788.12 frames. ], batch size: 21, lr: 3.90e-02, grad_scale: 8.0 +2023-02-05 19:11:55,446 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 4.204e+02 5.270e+02 6.161e+02 1.286e+03, threshold=1.054e+03, percent-clipped=6.0 +2023-02-05 19:11:58,508 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6513.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:03,286 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6520.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:11,142 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:25,065 INFO [train.py:901] (0/4) Epoch 1, batch 6550, loss[loss=0.3743, simple_loss=0.4125, pruned_loss=0.1681, over 8296.00 frames. ], tot_loss[loss=0.4229, simple_loss=0.4397, pruned_loss=0.203, over 1602634.64 frames. ], batch size: 23, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:12:35,937 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:37,958 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 19:12:41,463 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6573.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:53,791 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:57,648 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 19:13:00,396 INFO [train.py:901] (0/4) Epoch 1, batch 6600, loss[loss=0.3982, simple_loss=0.4209, pruned_loss=0.1877, over 8085.00 frames. ], tot_loss[loss=0.4207, simple_loss=0.4377, pruned_loss=0.2019, over 1603069.72 frames. ], batch size: 21, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:13:05,684 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.999e+02 4.035e+02 4.985e+02 6.404e+02 1.328e+03, threshold=9.970e+02, percent-clipped=3.0 +2023-02-05 19:13:07,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:10,624 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:13,646 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-05 19:13:18,642 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6628.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,561 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,598 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:34,162 INFO [train.py:901] (0/4) Epoch 1, batch 6650, loss[loss=0.4454, simple_loss=0.454, pruned_loss=0.2184, over 8253.00 frames. ], tot_loss[loss=0.4189, simple_loss=0.4367, pruned_loss=0.2005, over 1608372.91 frames. ], batch size: 22, lr: 3.88e-02, grad_scale: 8.0 +2023-02-05 19:13:43,591 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8948, 1.7504, 4.0044, 1.6373, 2.3474, 5.1310, 4.2041, 4.4923], + device='cuda:0'), covar=tensor([0.1746, 0.1977, 0.0246, 0.2241, 0.0913, 0.0157, 0.0261, 0.0351], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0251, 0.0159, 0.0241, 0.0188, 0.0120, 0.0120, 0.0172], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 19:13:56,271 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:01,301 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:09,932 INFO [train.py:901] (0/4) Epoch 1, batch 6700, loss[loss=0.4743, simple_loss=0.4821, pruned_loss=0.2332, over 8449.00 frames. ], tot_loss[loss=0.4197, simple_loss=0.437, pruned_loss=0.2012, over 1610222.87 frames. ], batch size: 27, lr: 3.87e-02, grad_scale: 8.0 +2023-02-05 19:14:15,399 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.351e+02 4.140e+02 4.960e+02 6.260e+02 1.494e+03, threshold=9.921e+02, percent-clipped=3.0 +2023-02-05 19:14:25,020 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6723.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:38,636 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6743.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:42,137 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:43,751 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 19:14:44,015 INFO [train.py:901] (0/4) Epoch 1, batch 6750, loss[loss=0.3832, simple_loss=0.3894, pruned_loss=0.1885, over 7309.00 frames. ], tot_loss[loss=0.4187, simple_loss=0.4365, pruned_loss=0.2004, over 1608266.37 frames. ], batch size: 16, lr: 3.86e-02, grad_scale: 8.0 +2023-02-05 19:14:45,643 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4388, 1.7946, 3.5202, 1.0551, 2.5121, 1.9931, 1.5893, 2.1658], + device='cuda:0'), covar=tensor([0.1153, 0.1651, 0.0348, 0.1658, 0.1135, 0.1810, 0.1110, 0.1480], + device='cuda:0'), in_proj_covar=tensor([0.0282, 0.0284, 0.0273, 0.0316, 0.0365, 0.0356, 0.0300, 0.0346], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:14:48,810 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6347, 3.8241, 3.1547, 1.3503, 3.1217, 3.0226, 3.3661, 2.4482], + device='cuda:0'), covar=tensor([0.1022, 0.0514, 0.0949, 0.4196, 0.0563, 0.0648, 0.1132, 0.0829], + device='cuda:0'), in_proj_covar=tensor([0.0252, 0.0184, 0.0217, 0.0278, 0.0169, 0.0134, 0.0203, 0.0127], + device='cuda:0'), out_proj_covar=tensor([1.8891e-04, 1.3284e-04, 1.4250e-04, 1.8164e-04, 1.1198e-04, 9.5753e-05, + 1.3944e-04, 9.4776e-05], device='cuda:0') +2023-02-05 19:15:00,928 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:14,377 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 19:15:19,964 INFO [train.py:901] (0/4) Epoch 1, batch 6800, loss[loss=0.4147, simple_loss=0.4435, pruned_loss=0.1929, over 8486.00 frames. ], tot_loss[loss=0.4207, simple_loss=0.4386, pruned_loss=0.2014, over 1611996.73 frames. ], batch size: 29, lr: 3.85e-02, grad_scale: 8.0 +2023-02-05 19:15:20,157 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6801.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:25,328 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 4.226e+02 5.434e+02 7.341e+02 1.725e+03, threshold=1.087e+03, percent-clipped=4.0 +2023-02-05 19:15:35,594 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6824.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:39,015 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6829.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:54,377 INFO [train.py:901] (0/4) Epoch 1, batch 6850, loss[loss=0.5122, simple_loss=0.5001, pruned_loss=0.2622, over 7150.00 frames. ], tot_loss[loss=0.4194, simple_loss=0.4376, pruned_loss=0.2006, over 1607690.62 frames. ], batch size: 71, lr: 3.84e-02, grad_scale: 8.0 +2023-02-05 19:16:04,825 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 19:16:05,246 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 19:16:06,405 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:11,624 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8521, 2.1756, 1.2429, 2.0167, 1.7302, 1.1333, 1.4547, 2.2377], + device='cuda:0'), covar=tensor([0.1286, 0.0695, 0.1576, 0.0860, 0.1350, 0.1802, 0.1925, 0.0752], + device='cuda:0'), in_proj_covar=tensor([0.0323, 0.0226, 0.0346, 0.0269, 0.0319, 0.0289, 0.0341, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002], + device='cuda:0') +2023-02-05 19:16:23,433 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6893.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:25,435 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6896.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:16:29,307 INFO [train.py:901] (0/4) Epoch 1, batch 6900, loss[loss=0.4649, simple_loss=0.4763, pruned_loss=0.2268, over 8500.00 frames. ], tot_loss[loss=0.4213, simple_loss=0.4396, pruned_loss=0.2015, over 1612465.38 frames. ], batch size: 26, lr: 3.83e-02, grad_scale: 8.0 +2023-02-05 19:16:31,384 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6903.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:35,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.796e+02 4.754e+02 6.076e+02 1.448e+03, threshold=9.507e+02, percent-clipped=2.0 +2023-02-05 19:16:48,741 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6927.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:49,406 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:54,163 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7537, 2.7013, 4.5688, 1.1692, 3.0280, 2.3035, 1.9873, 2.5218], + device='cuda:0'), covar=tensor([0.1180, 0.1301, 0.0196, 0.1756, 0.1162, 0.1670, 0.1070, 0.1419], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0296, 0.0273, 0.0324, 0.0378, 0.0353, 0.0306, 0.0359], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:16:54,871 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6936.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:56,878 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6939.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,414 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,459 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:03,372 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 19:17:05,068 INFO [train.py:901] (0/4) Epoch 1, batch 6950, loss[loss=0.4751, simple_loss=0.4732, pruned_loss=0.2385, over 7522.00 frames. ], tot_loss[loss=0.4231, simple_loss=0.4407, pruned_loss=0.2028, over 1610127.32 frames. ], batch size: 71, lr: 3.82e-02, grad_scale: 8.0 +2023-02-05 19:17:11,185 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 19:17:11,610 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.90 vs. limit=2.0 +2023-02-05 19:17:12,128 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6961.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:17,891 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:21,911 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5587, 0.9968, 5.2835, 2.4663, 4.7035, 4.6266, 4.8982, 4.6401], + device='cuda:0'), covar=tensor([0.0190, 0.3879, 0.0175, 0.1101, 0.0736, 0.0199, 0.0214, 0.0257], + device='cuda:0'), in_proj_covar=tensor([0.0147, 0.0331, 0.0176, 0.0205, 0.0217, 0.0183, 0.0166, 0.0197], + device='cuda:0'), out_proj_covar=tensor([9.2478e-05, 1.8279e-04, 1.0824e-04, 1.3202e-04, 1.2577e-04, 1.0947e-04, + 1.0024e-04, 1.2553e-04], device='cuda:0') +2023-02-05 19:17:32,918 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6991.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:38,574 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6999.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:39,832 INFO [train.py:901] (0/4) Epoch 1, batch 7000, loss[loss=0.4256, simple_loss=0.4553, pruned_loss=0.198, over 8586.00 frames. ], tot_loss[loss=0.4223, simple_loss=0.4407, pruned_loss=0.2019, over 1616195.53 frames. ], batch size: 31, lr: 3.81e-02, grad_scale: 8.0 +2023-02-05 19:17:45,245 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.380e+02 4.090e+02 4.918e+02 6.048e+02 1.151e+03, threshold=9.836e+02, percent-clipped=6.0 +2023-02-05 19:17:57,730 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:16,022 INFO [train.py:901] (0/4) Epoch 1, batch 7050, loss[loss=0.4699, simple_loss=0.4837, pruned_loss=0.228, over 8187.00 frames. ], tot_loss[loss=0.4195, simple_loss=0.4384, pruned_loss=0.2003, over 1611155.01 frames. ], batch size: 23, lr: 3.80e-02, grad_scale: 8.0 +2023-02-05 19:18:26,085 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7066.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:18:32,282 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0699, 1.5155, 1.9857, 0.2340, 1.5557, 1.3517, 0.3954, 1.7320], + device='cuda:0'), covar=tensor([0.0528, 0.0249, 0.0208, 0.0836, 0.0381, 0.0583, 0.0796, 0.0246], + device='cuda:0'), in_proj_covar=tensor([0.0154, 0.0124, 0.0100, 0.0165, 0.0111, 0.0180, 0.0166, 0.0125], + device='cuda:0'), out_proj_covar=tensor([1.1376e-04, 9.1073e-05, 7.9094e-05, 1.2731e-04, 9.1348e-05, 1.4211e-04, + 1.2867e-04, 9.4121e-05], device='cuda:0') +2023-02-05 19:18:34,326 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0649, 1.6803, 3.0750, 3.0811, 2.2046, 1.5750, 1.5116, 2.0499], + device='cuda:0'), covar=tensor([0.1495, 0.1149, 0.0220, 0.0221, 0.0565, 0.0691, 0.0789, 0.0892], + device='cuda:0'), in_proj_covar=tensor([0.0329, 0.0227, 0.0150, 0.0172, 0.0226, 0.0227, 0.0238, 0.0271], + device='cuda:0'), out_proj_covar=tensor([1.9455e-04, 1.4054e-04, 9.0445e-05, 9.9180e-05, 1.2754e-04, 1.3444e-04, + 1.3773e-04, 1.5255e-04], device='cuda:0') +2023-02-05 19:18:44,483 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-05 19:18:50,234 INFO [train.py:901] (0/4) Epoch 1, batch 7100, loss[loss=0.3659, simple_loss=0.3956, pruned_loss=0.1681, over 7821.00 frames. ], tot_loss[loss=0.4165, simple_loss=0.4363, pruned_loss=0.1983, over 1612547.62 frames. ], batch size: 19, lr: 3.79e-02, grad_scale: 8.0 +2023-02-05 19:18:53,876 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:55,758 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.791e+02 4.613e+02 6.150e+02 1.722e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 19:19:10,381 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9308, 0.9049, 1.8784, 0.9814, 1.0455, 1.6903, 0.2971, 0.9017], + device='cuda:0'), covar=tensor([0.0635, 0.0419, 0.0277, 0.0398, 0.0458, 0.0299, 0.1122, 0.0523], + device='cuda:0'), in_proj_covar=tensor([0.0130, 0.0107, 0.0100, 0.0125, 0.0115, 0.0075, 0.0156, 0.0129], + device='cuda:0'), out_proj_covar=tensor([1.0967e-04, 9.7546e-05, 8.4730e-05, 1.0243e-04, 1.0441e-04, 6.5917e-05, + 1.3322e-04, 1.1224e-04], device='cuda:0') +2023-02-05 19:19:18,813 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6566, 3.8603, 3.3118, 1.5576, 3.1499, 3.1784, 3.4983, 2.8510], + device='cuda:0'), covar=tensor([0.0811, 0.0463, 0.0703, 0.3179, 0.0428, 0.0563, 0.0781, 0.0455], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0186, 0.0218, 0.0286, 0.0172, 0.0136, 0.0205, 0.0132], + device='cuda:0'), out_proj_covar=tensor([1.9254e-04, 1.3197e-04, 1.4281e-04, 1.8513e-04, 1.1163e-04, 9.7055e-05, + 1.3945e-04, 9.4535e-05], device='cuda:0') +2023-02-05 19:19:25,906 INFO [train.py:901] (0/4) Epoch 1, batch 7150, loss[loss=0.4762, simple_loss=0.4599, pruned_loss=0.2462, over 7965.00 frames. ], tot_loss[loss=0.414, simple_loss=0.4343, pruned_loss=0.1968, over 1613381.68 frames. ], batch size: 21, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:19:46,604 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7181.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:56,061 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7195.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,480 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,950 INFO [train.py:901] (0/4) Epoch 1, batch 7200, loss[loss=0.3936, simple_loss=0.4048, pruned_loss=0.1912, over 7413.00 frames. ], tot_loss[loss=0.4164, simple_loss=0.4359, pruned_loss=0.1984, over 1613380.46 frames. ], batch size: 17, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:20:05,331 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.325e+02 4.231e+02 5.262e+02 7.053e+02 1.685e+03, threshold=1.052e+03, percent-clipped=7.0 +2023-02-05 19:20:13,032 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:16,297 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:25,377 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0395, 1.2869, 2.9631, 1.0306, 1.8174, 3.1365, 3.0391, 2.8847], + device='cuda:0'), covar=tensor([0.2116, 0.1924, 0.0411, 0.2567, 0.0991, 0.0304, 0.0340, 0.0422], + device='cuda:0'), in_proj_covar=tensor([0.0245, 0.0260, 0.0165, 0.0258, 0.0188, 0.0125, 0.0124, 0.0182], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 19:20:25,920 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7240.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:20:33,011 INFO [train.py:901] (0/4) Epoch 1, batch 7250, loss[loss=0.4558, simple_loss=0.4802, pruned_loss=0.2157, over 8198.00 frames. ], tot_loss[loss=0.4168, simple_loss=0.4365, pruned_loss=0.1985, over 1615588.43 frames. ], batch size: 23, lr: 3.77e-02, grad_scale: 8.0 +2023-02-05 19:20:48,466 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:03,980 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:09,002 INFO [train.py:901] (0/4) Epoch 1, batch 7300, loss[loss=0.4508, simple_loss=0.4696, pruned_loss=0.216, over 8475.00 frames. ], tot_loss[loss=0.4165, simple_loss=0.437, pruned_loss=0.198, over 1620166.23 frames. ], batch size: 29, lr: 3.76e-02, grad_scale: 8.0 +2023-02-05 19:21:14,313 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.413e+02 4.263e+02 5.448e+02 6.514e+02 1.215e+03, threshold=1.090e+03, percent-clipped=2.0 +2023-02-05 19:21:18,594 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7976, 2.0325, 1.9002, 2.7830, 1.2163, 1.2482, 1.7216, 1.8015], + device='cuda:0'), covar=tensor([0.1266, 0.1592, 0.1321, 0.0328, 0.2252, 0.2223, 0.2239, 0.1276], + device='cuda:0'), in_proj_covar=tensor([0.0309, 0.0323, 0.0305, 0.0201, 0.0348, 0.0339, 0.0389, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 19:21:42,633 INFO [train.py:901] (0/4) Epoch 1, batch 7350, loss[loss=0.3431, simple_loss=0.3691, pruned_loss=0.1586, over 5963.00 frames. ], tot_loss[loss=0.4169, simple_loss=0.4373, pruned_loss=0.1983, over 1620572.95 frames. ], batch size: 13, lr: 3.75e-02, grad_scale: 8.0 +2023-02-05 19:21:45,525 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7355.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:21:50,123 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7362.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:51,639 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 19:21:56,009 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 19:22:08,446 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7386.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:09,144 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7387.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:18,307 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 19:22:18,974 INFO [train.py:901] (0/4) Epoch 1, batch 7400, loss[loss=0.4382, simple_loss=0.4547, pruned_loss=0.2109, over 8491.00 frames. ], tot_loss[loss=0.4165, simple_loss=0.4368, pruned_loss=0.1981, over 1621267.96 frames. ], batch size: 39, lr: 3.74e-02, grad_scale: 8.0 +2023-02-05 19:22:24,409 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.824e+02 4.270e+02 5.603e+02 6.704e+02 2.452e+03, threshold=1.121e+03, percent-clipped=4.0 +2023-02-05 19:22:25,146 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7410.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:22:35,057 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:50,635 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9335, 4.0905, 3.4389, 1.5691, 3.3844, 3.3989, 3.5883, 3.1695], + device='cuda:0'), covar=tensor([0.1128, 0.0684, 0.1034, 0.4210, 0.0493, 0.0728, 0.2004, 0.0478], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0182, 0.0224, 0.0292, 0.0173, 0.0137, 0.0208, 0.0132], + device='cuda:0'), out_proj_covar=tensor([1.9750e-04, 1.2763e-04, 1.4680e-04, 1.8740e-04, 1.1254e-04, 9.9242e-05, + 1.4099e-04, 9.3385e-05], device='cuda:0') +2023-02-05 19:22:52,553 INFO [train.py:901] (0/4) Epoch 1, batch 7450, loss[loss=0.4049, simple_loss=0.4349, pruned_loss=0.1874, over 8040.00 frames. ], tot_loss[loss=0.4154, simple_loss=0.4361, pruned_loss=0.1973, over 1618394.38 frames. ], batch size: 22, lr: 3.73e-02, grad_scale: 8.0 +2023-02-05 19:22:56,049 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 19:23:27,518 INFO [train.py:901] (0/4) Epoch 1, batch 7500, loss[loss=0.3906, simple_loss=0.4072, pruned_loss=0.187, over 5948.00 frames. ], tot_loss[loss=0.4129, simple_loss=0.434, pruned_loss=0.1959, over 1616524.45 frames. ], batch size: 13, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:23:34,187 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 4.060e+02 5.044e+02 6.934e+02 1.457e+03, threshold=1.009e+03, percent-clipped=3.0 +2023-02-05 19:23:45,042 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7525.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:23:45,145 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7525.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:23:48,427 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2832, 4.4540, 3.7449, 1.6352, 3.6743, 3.3560, 4.0164, 3.1112], + device='cuda:0'), covar=tensor([0.0690, 0.0357, 0.0685, 0.3265, 0.0395, 0.0623, 0.0862, 0.0471], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0182, 0.0220, 0.0287, 0.0177, 0.0139, 0.0206, 0.0135], + device='cuda:0'), out_proj_covar=tensor([1.9864e-04, 1.2872e-04, 1.4452e-04, 1.8522e-04, 1.1414e-04, 1.0013e-04, + 1.4026e-04, 9.5095e-05], device='cuda:0') +2023-02-05 19:23:55,048 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3587, 1.9918, 1.6215, 1.4165, 2.1009, 1.7546, 2.1164, 2.2855], + device='cuda:0'), covar=tensor([0.1409, 0.1793, 0.2524, 0.2148, 0.1232, 0.1955, 0.1257, 0.1045], + device='cuda:0'), in_proj_covar=tensor([0.0271, 0.0285, 0.0297, 0.0280, 0.0264, 0.0259, 0.0257, 0.0248], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:0') +2023-02-05 19:24:02,231 INFO [train.py:901] (0/4) Epoch 1, batch 7550, loss[loss=0.396, simple_loss=0.4421, pruned_loss=0.1749, over 8201.00 frames. ], tot_loss[loss=0.4111, simple_loss=0.4328, pruned_loss=0.1947, over 1618273.81 frames. ], batch size: 23, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:24:09,368 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:24:36,299 INFO [train.py:901] (0/4) Epoch 1, batch 7600, loss[loss=0.4142, simple_loss=0.4428, pruned_loss=0.1928, over 8103.00 frames. ], tot_loss[loss=0.4121, simple_loss=0.4331, pruned_loss=0.1955, over 1619382.07 frames. ], batch size: 23, lr: 3.71e-02, grad_scale: 8.0 +2023-02-05 19:24:41,739 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.765e+02 4.361e+02 5.460e+02 6.853e+02 1.164e+03, threshold=1.092e+03, percent-clipped=2.0 +2023-02-05 19:24:43,964 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7611.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:25:03,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7636.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:25:03,859 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7637.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:05,940 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7640.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,275 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,358 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:10,528 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8434, 5.9329, 4.9393, 1.7881, 4.8092, 5.2988, 5.4040, 4.8630], + device='cuda:0'), covar=tensor([0.0547, 0.0447, 0.0485, 0.3080, 0.0295, 0.0223, 0.0957, 0.0239], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0178, 0.0219, 0.0281, 0.0178, 0.0135, 0.0209, 0.0133], + device='cuda:0'), out_proj_covar=tensor([1.9373e-04, 1.2532e-04, 1.4315e-04, 1.8169e-04, 1.1455e-04, 9.7686e-05, + 1.4215e-04, 9.5243e-05], device='cuda:0') +2023-02-05 19:25:13,186 INFO [train.py:901] (0/4) Epoch 1, batch 7650, loss[loss=0.3901, simple_loss=0.4256, pruned_loss=0.1773, over 8106.00 frames. ], tot_loss[loss=0.4097, simple_loss=0.4318, pruned_loss=0.1938, over 1617717.52 frames. ], batch size: 23, lr: 3.70e-02, grad_scale: 8.0 +2023-02-05 19:25:23,881 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7667.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:46,144 INFO [train.py:901] (0/4) Epoch 1, batch 7700, loss[loss=0.3548, simple_loss=0.3769, pruned_loss=0.1664, over 7536.00 frames. ], tot_loss[loss=0.4088, simple_loss=0.431, pruned_loss=0.1933, over 1619301.74 frames. ], batch size: 18, lr: 3.69e-02, grad_scale: 8.0 +2023-02-05 19:25:51,306 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.517e+02 4.083e+02 4.742e+02 6.161e+02 2.101e+03, threshold=9.483e+02, percent-clipped=6.0 +2023-02-05 19:25:52,867 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9765, 2.2388, 2.5784, 1.4834, 1.4755, 2.3191, 0.5874, 1.7914], + device='cuda:0'), covar=tensor([0.0480, 0.0396, 0.0278, 0.0393, 0.0580, 0.0494, 0.1368, 0.0532], + device='cuda:0'), in_proj_covar=tensor([0.0133, 0.0113, 0.0101, 0.0143, 0.0118, 0.0085, 0.0167, 0.0135], + device='cuda:0'), out_proj_covar=tensor([1.1308e-04, 1.0516e-04, 8.6391e-05, 1.1905e-04, 1.0963e-04, 7.6359e-05, + 1.4444e-04, 1.1963e-04], device='cuda:0') +2023-02-05 19:26:07,593 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 19:26:21,658 INFO [train.py:901] (0/4) Epoch 1, batch 7750, loss[loss=0.3355, simple_loss=0.3778, pruned_loss=0.1466, over 7210.00 frames. ], tot_loss[loss=0.4095, simple_loss=0.4314, pruned_loss=0.1938, over 1613678.07 frames. ], batch size: 16, lr: 3.68e-02, grad_scale: 8.0 +2023-02-05 19:26:23,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7752.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:29,120 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:32,510 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0450, 2.1794, 1.7701, 2.8547, 1.3023, 1.0900, 1.7199, 2.3640], + device='cuda:0'), covar=tensor([0.1294, 0.1402, 0.1451, 0.0378, 0.2300, 0.2405, 0.2306, 0.1005], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0323, 0.0306, 0.0198, 0.0342, 0.0330, 0.0386, 0.0290], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 19:26:34,413 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7769.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:42,667 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7781.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:26:56,374 INFO [train.py:901] (0/4) Epoch 1, batch 7800, loss[loss=0.3816, simple_loss=0.3928, pruned_loss=0.1852, over 7650.00 frames. ], tot_loss[loss=0.4092, simple_loss=0.4312, pruned_loss=0.1936, over 1615879.31 frames. ], batch size: 19, lr: 3.67e-02, grad_scale: 8.0 +2023-02-05 19:26:59,817 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7806.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:27:01,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.720e+02 4.585e+02 5.523e+02 1.290e+03, threshold=9.170e+02, percent-clipped=3.0 +2023-02-05 19:27:09,242 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:29,715 INFO [train.py:901] (0/4) Epoch 1, batch 7850, loss[loss=0.3606, simple_loss=0.3854, pruned_loss=0.1679, over 7686.00 frames. ], tot_loss[loss=0.4104, simple_loss=0.4316, pruned_loss=0.1946, over 1613779.23 frames. ], batch size: 18, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:27:52,020 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7884.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:59,877 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7896.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:00,498 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8920, 2.1595, 1.9881, 2.8863, 1.3694, 1.3482, 1.8499, 2.2002], + device='cuda:0'), covar=tensor([0.1156, 0.1200, 0.1206, 0.0313, 0.2147, 0.2028, 0.2009, 0.1254], + device='cuda:0'), in_proj_covar=tensor([0.0301, 0.0328, 0.0315, 0.0200, 0.0341, 0.0341, 0.0388, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0002, 0.0004, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 19:28:01,773 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2377, 1.0795, 3.2088, 1.1920, 2.6632, 2.6740, 2.6845, 2.8027], + device='cuda:0'), covar=tensor([0.0303, 0.2672, 0.0316, 0.1348, 0.0870, 0.0368, 0.0333, 0.0411], + device='cuda:0'), in_proj_covar=tensor([0.0161, 0.0343, 0.0186, 0.0211, 0.0241, 0.0205, 0.0181, 0.0204], + device='cuda:0'), out_proj_covar=tensor([9.8768e-05, 1.8720e-04, 1.1075e-04, 1.3309e-04, 1.3541e-04, 1.1884e-04, + 1.0693e-04, 1.2485e-04], device='cuda:0') +2023-02-05 19:28:03,027 INFO [train.py:901] (0/4) Epoch 1, batch 7900, loss[loss=0.4486, simple_loss=0.46, pruned_loss=0.2186, over 7204.00 frames. ], tot_loss[loss=0.4098, simple_loss=0.4316, pruned_loss=0.1941, over 1613338.94 frames. ], batch size: 71, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:28:08,440 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.732e+02 4.923e+02 6.190e+02 1.863e+03, threshold=9.845e+02, percent-clipped=5.0 +2023-02-05 19:28:16,371 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:28,389 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-05 19:28:35,814 INFO [train.py:901] (0/4) Epoch 1, batch 7950, loss[loss=0.4244, simple_loss=0.4465, pruned_loss=0.2012, over 8525.00 frames. ], tot_loss[loss=0.4094, simple_loss=0.4319, pruned_loss=0.1934, over 1616138.65 frames. ], batch size: 28, lr: 3.65e-02, grad_scale: 8.0 +2023-02-05 19:28:53,533 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 19:28:59,117 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7986.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:07,277 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9336, 1.9678, 3.4120, 1.3462, 2.6411, 2.3853, 1.8825, 2.5455], + device='cuda:0'), covar=tensor([0.0887, 0.1256, 0.0261, 0.1521, 0.0896, 0.1096, 0.0804, 0.1132], + device='cuda:0'), in_proj_covar=tensor([0.0323, 0.0320, 0.0314, 0.0345, 0.0402, 0.0371, 0.0321, 0.0382], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:29:08,526 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-8000.pt +2023-02-05 19:29:10,060 INFO [train.py:901] (0/4) Epoch 1, batch 8000, loss[loss=0.4953, simple_loss=0.492, pruned_loss=0.2493, over 8279.00 frames. ], tot_loss[loss=0.4093, simple_loss=0.432, pruned_loss=0.1933, over 1616236.33 frames. ], batch size: 23, lr: 3.64e-02, grad_scale: 8.0 +2023-02-05 19:29:15,109 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:15,550 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.650e+02 3.959e+02 4.934e+02 6.403e+02 1.426e+03, threshold=9.868e+02, percent-clipped=4.0 +2023-02-05 19:29:20,569 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.03 vs. limit=2.0 +2023-02-05 19:29:31,421 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8033.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:35,305 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3756, 2.0224, 1.8063, 1.4791, 2.8661, 2.0530, 3.0467, 3.1157], + device='cuda:0'), covar=tensor([0.1237, 0.2588, 0.2834, 0.2503, 0.1107, 0.2344, 0.1162, 0.1068], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0287, 0.0302, 0.0282, 0.0260, 0.0262, 0.0259, 0.0248], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:0') +2023-02-05 19:29:42,078 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 19:29:43,013 INFO [train.py:901] (0/4) Epoch 1, batch 8050, loss[loss=0.3777, simple_loss=0.3897, pruned_loss=0.1829, over 7250.00 frames. ], tot_loss[loss=0.4075, simple_loss=0.43, pruned_loss=0.1925, over 1609721.87 frames. ], batch size: 16, lr: 3.63e-02, grad_scale: 16.0 +2023-02-05 19:30:05,327 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-1.pt +2023-02-05 19:30:17,284 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 19:30:20,858 INFO [train.py:901] (0/4) Epoch 2, batch 0, loss[loss=0.4617, simple_loss=0.4599, pruned_loss=0.2317, over 8598.00 frames. ], tot_loss[loss=0.4617, simple_loss=0.4599, pruned_loss=0.2317, over 8598.00 frames. ], batch size: 31, lr: 3.56e-02, grad_scale: 8.0 +2023-02-05 19:30:20,859 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 19:30:32,400 INFO [train.py:935] (0/4) Epoch 2, validation: loss=0.3107, simple_loss=0.3861, pruned_loss=0.1176, over 944034.00 frames. +2023-02-05 19:30:32,401 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6469MB +2023-02-05 19:30:41,565 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.11 vs. limit=2.0 +2023-02-05 19:30:44,064 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8101.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:46,609 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 19:30:46,668 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:49,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 3.846e+02 4.676e+02 6.027e+02 1.450e+03, threshold=9.352e+02, percent-clipped=5.0 +2023-02-05 19:31:04,191 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0978, 1.4299, 1.3720, 0.3850, 1.2847, 1.1131, 0.2176, 1.4017], + device='cuda:0'), covar=tensor([0.0244, 0.0134, 0.0236, 0.0477, 0.0223, 0.0548, 0.0615, 0.0154], + device='cuda:0'), in_proj_covar=tensor([0.0163, 0.0123, 0.0102, 0.0169, 0.0118, 0.0195, 0.0172, 0.0131], + device='cuda:0'), out_proj_covar=tensor([1.1774e-04, 8.8088e-05, 8.2302e-05, 1.2591e-04, 9.3992e-05, 1.5349e-04, + 1.3041e-04, 9.5800e-05], device='cuda:0') +2023-02-05 19:31:06,756 INFO [train.py:901] (0/4) Epoch 2, batch 50, loss[loss=0.455, simple_loss=0.4591, pruned_loss=0.2254, over 8237.00 frames. ], tot_loss[loss=0.4048, simple_loss=0.4289, pruned_loss=0.1903, over 362283.81 frames. ], batch size: 22, lr: 3.55e-02, grad_scale: 8.0 +2023-02-05 19:31:11,118 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8140.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:12,615 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.91 vs. limit=5.0 +2023-02-05 19:31:20,789 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 19:31:23,162 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.26 vs. limit=5.0 +2023-02-05 19:31:28,325 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:29,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8165.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:41,604 INFO [train.py:901] (0/4) Epoch 2, batch 100, loss[loss=0.3335, simple_loss=0.3771, pruned_loss=0.145, over 8191.00 frames. ], tot_loss[loss=0.4035, simple_loss=0.4278, pruned_loss=0.1897, over 639893.19 frames. ], batch size: 23, lr: 3.54e-02, grad_scale: 8.0 +2023-02-05 19:31:44,284 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 19:31:59,433 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.246e+02 4.943e+02 6.491e+02 9.375e+02, threshold=9.885e+02, percent-clipped=1.0 +2023-02-05 19:32:06,341 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:15,466 INFO [train.py:901] (0/4) Epoch 2, batch 150, loss[loss=0.4276, simple_loss=0.4586, pruned_loss=0.1983, over 8341.00 frames. ], tot_loss[loss=0.4066, simple_loss=0.431, pruned_loss=0.1911, over 860906.30 frames. ], batch size: 26, lr: 3.53e-02, grad_scale: 8.0 +2023-02-05 19:32:47,318 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-05 19:32:47,798 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,390 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8283.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,933 INFO [train.py:901] (0/4) Epoch 2, batch 200, loss[loss=0.376, simple_loss=0.4003, pruned_loss=0.1758, over 7811.00 frames. ], tot_loss[loss=0.4036, simple_loss=0.4282, pruned_loss=0.1895, over 1026502.68 frames. ], batch size: 20, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:33:08,598 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.581e+02 3.727e+02 4.975e+02 6.903e+02 1.681e+03, threshold=9.950e+02, percent-clipped=7.0 +2023-02-05 19:33:24,841 INFO [train.py:901] (0/4) Epoch 2, batch 250, loss[loss=0.3636, simple_loss=0.394, pruned_loss=0.1666, over 7934.00 frames. ], tot_loss[loss=0.4063, simple_loss=0.4299, pruned_loss=0.1914, over 1155217.99 frames. ], batch size: 20, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:33:36,305 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 19:33:40,664 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:46,005 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 19:33:58,438 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:59,606 INFO [train.py:901] (0/4) Epoch 2, batch 300, loss[loss=0.3976, simple_loss=0.4396, pruned_loss=0.1778, over 8596.00 frames. ], tot_loss[loss=0.4023, simple_loss=0.4271, pruned_loss=0.1888, over 1258728.42 frames. ], batch size: 34, lr: 3.51e-02, grad_scale: 8.0 +2023-02-05 19:34:18,661 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 4.043e+02 4.737e+02 5.583e+02 9.957e+02, threshold=9.474e+02, percent-clipped=1.0 +2023-02-05 19:34:35,499 INFO [train.py:901] (0/4) Epoch 2, batch 350, loss[loss=0.407, simple_loss=0.4234, pruned_loss=0.1953, over 8609.00 frames. ], tot_loss[loss=0.4034, simple_loss=0.4279, pruned_loss=0.1894, over 1340989.84 frames. ], batch size: 34, lr: 3.50e-02, grad_scale: 8.0 +2023-02-05 19:34:43,511 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6134, 3.3278, 1.8558, 2.1950, 2.1146, 1.9995, 1.7630, 3.0018], + device='cuda:0'), covar=tensor([0.1974, 0.0503, 0.1078, 0.1339, 0.1399, 0.1211, 0.1982, 0.1202], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0224, 0.0344, 0.0295, 0.0336, 0.0300, 0.0346, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 19:35:03,547 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:09,448 INFO [train.py:901] (0/4) Epoch 2, batch 400, loss[loss=0.3779, simple_loss=0.4045, pruned_loss=0.1757, over 7792.00 frames. ], tot_loss[loss=0.4036, simple_loss=0.4278, pruned_loss=0.1897, over 1401292.78 frames. ], batch size: 19, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:20,905 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:27,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.339e+02 4.887e+02 6.099e+02 1.134e+03, threshold=9.773e+02, percent-clipped=6.0 +2023-02-05 19:35:43,489 INFO [train.py:901] (0/4) Epoch 2, batch 450, loss[loss=0.3683, simple_loss=0.4114, pruned_loss=0.1626, over 8292.00 frames. ], tot_loss[loss=0.4026, simple_loss=0.4271, pruned_loss=0.1891, over 1449852.68 frames. ], batch size: 23, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:44,349 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:01,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:17,995 INFO [train.py:901] (0/4) Epoch 2, batch 500, loss[loss=0.3658, simple_loss=0.4024, pruned_loss=0.1645, over 8237.00 frames. ], tot_loss[loss=0.401, simple_loss=0.4264, pruned_loss=0.1878, over 1487848.81 frames. ], batch size: 22, lr: 3.48e-02, grad_scale: 8.0 +2023-02-05 19:36:36,148 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.361e+02 3.910e+02 4.803e+02 5.619e+02 9.699e+02, threshold=9.605e+02, percent-clipped=0.0 +2023-02-05 19:36:47,548 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8627.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:52,666 INFO [train.py:901] (0/4) Epoch 2, batch 550, loss[loss=0.3543, simple_loss=0.3886, pruned_loss=0.16, over 7796.00 frames. ], tot_loss[loss=0.3973, simple_loss=0.4236, pruned_loss=0.1855, over 1516448.79 frames. ], batch size: 20, lr: 3.47e-02, grad_scale: 8.0 +2023-02-05 19:37:26,521 INFO [train.py:901] (0/4) Epoch 2, batch 600, loss[loss=0.4646, simple_loss=0.4721, pruned_loss=0.2286, over 8610.00 frames. ], tot_loss[loss=0.3976, simple_loss=0.4241, pruned_loss=0.1856, over 1537952.71 frames. ], batch size: 39, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:37:43,312 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.752e+02 3.934e+02 5.073e+02 6.758e+02 1.500e+03, threshold=1.015e+03, percent-clipped=5.0 +2023-02-05 19:37:44,745 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 19:37:52,109 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4957, 2.2759, 1.2520, 1.9372, 2.0923, 1.4050, 1.2713, 2.2036], + device='cuda:0'), covar=tensor([0.1390, 0.0671, 0.1392, 0.0833, 0.0884, 0.1280, 0.1452, 0.0765], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0239, 0.0352, 0.0298, 0.0337, 0.0314, 0.0346, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 19:37:59,728 INFO [train.py:901] (0/4) Epoch 2, batch 650, loss[loss=0.4464, simple_loss=0.4488, pruned_loss=0.222, over 8511.00 frames. ], tot_loss[loss=0.3958, simple_loss=0.422, pruned_loss=0.1848, over 1553681.48 frames. ], batch size: 26, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:38:05,396 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:31,188 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:35,550 INFO [train.py:901] (0/4) Epoch 2, batch 700, loss[loss=0.4667, simple_loss=0.4625, pruned_loss=0.2354, over 7968.00 frames. ], tot_loss[loss=0.3954, simple_loss=0.4218, pruned_loss=0.1845, over 1567560.93 frames. ], batch size: 21, lr: 3.45e-02, grad_scale: 8.0 +2023-02-05 19:38:53,121 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.421e+02 3.759e+02 4.676e+02 6.060e+02 1.461e+03, threshold=9.352e+02, percent-clipped=1.0 +2023-02-05 19:38:53,977 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6504, 1.3122, 2.9010, 1.0436, 1.9629, 3.3357, 2.9457, 2.8201], + device='cuda:0'), covar=tensor([0.1369, 0.1890, 0.0431, 0.2504, 0.0839, 0.0256, 0.0364, 0.0475], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0253, 0.0171, 0.0249, 0.0186, 0.0137, 0.0133, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 19:39:09,181 INFO [train.py:901] (0/4) Epoch 2, batch 750, loss[loss=0.3341, simple_loss=0.3748, pruned_loss=0.1466, over 7979.00 frames. ], tot_loss[loss=0.3961, simple_loss=0.4225, pruned_loss=0.1848, over 1582762.58 frames. ], batch size: 21, lr: 3.44e-02, grad_scale: 8.0 +2023-02-05 19:39:22,679 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 19:39:26,420 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 19:39:35,552 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 19:39:44,334 INFO [train.py:901] (0/4) Epoch 2, batch 800, loss[loss=0.4221, simple_loss=0.4507, pruned_loss=0.1967, over 8447.00 frames. ], tot_loss[loss=0.3989, simple_loss=0.4247, pruned_loss=0.1866, over 1594996.16 frames. ], batch size: 24, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:02,289 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 4.043e+02 5.225e+02 6.708e+02 1.302e+03, threshold=1.045e+03, percent-clipped=9.0 +2023-02-05 19:40:18,497 INFO [train.py:901] (0/4) Epoch 2, batch 850, loss[loss=0.3637, simple_loss=0.3781, pruned_loss=0.1746, over 7713.00 frames. ], tot_loss[loss=0.3986, simple_loss=0.4245, pruned_loss=0.1864, over 1600417.35 frames. ], batch size: 18, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:26,053 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8945.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:40:49,494 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0701, 1.7942, 1.8302, 0.2684, 1.6793, 1.0991, 0.2411, 1.6166], + device='cuda:0'), covar=tensor([0.0343, 0.0131, 0.0170, 0.0662, 0.0250, 0.0530, 0.0614, 0.0191], + device='cuda:0'), in_proj_covar=tensor([0.0166, 0.0126, 0.0106, 0.0171, 0.0121, 0.0204, 0.0176, 0.0145], + device='cuda:0'), out_proj_covar=tensor([1.1747e-04, 8.9364e-05, 8.1633e-05, 1.2431e-04, 9.4079e-05, 1.5639e-04, + 1.3026e-04, 1.0699e-04], device='cuda:0') +2023-02-05 19:40:52,660 INFO [train.py:901] (0/4) Epoch 2, batch 900, loss[loss=0.4604, simple_loss=0.4814, pruned_loss=0.2197, over 8507.00 frames. ], tot_loss[loss=0.3956, simple_loss=0.423, pruned_loss=0.1841, over 1607216.89 frames. ], batch size: 26, lr: 3.42e-02, grad_scale: 8.0 +2023-02-05 19:41:03,966 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8998.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:08,143 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9004.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:12,013 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.317e+02 3.660e+02 4.402e+02 6.333e+02 1.420e+03, threshold=8.805e+02, percent-clipped=4.0 +2023-02-05 19:41:15,319 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-05 19:41:21,870 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9023.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:27,241 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9031.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:29,119 INFO [train.py:901] (0/4) Epoch 2, batch 950, loss[loss=0.3412, simple_loss=0.3939, pruned_loss=0.1442, over 8356.00 frames. ], tot_loss[loss=0.3959, simple_loss=0.4233, pruned_loss=0.1843, over 1613082.77 frames. ], batch size: 24, lr: 3.41e-02, grad_scale: 8.0 +2023-02-05 19:41:57,094 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 19:41:58,730 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9408, 2.2247, 3.5548, 1.2256, 2.7238, 2.2597, 1.9785, 2.3487], + device='cuda:0'), covar=tensor([0.0807, 0.1062, 0.0301, 0.1475, 0.0885, 0.1348, 0.0714, 0.1240], + device='cuda:0'), in_proj_covar=tensor([0.0330, 0.0325, 0.0334, 0.0369, 0.0427, 0.0396, 0.0339, 0.0411], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 19:42:04,019 INFO [train.py:901] (0/4) Epoch 2, batch 1000, loss[loss=0.3647, simple_loss=0.4108, pruned_loss=0.1593, over 8197.00 frames. ], tot_loss[loss=0.394, simple_loss=0.4222, pruned_loss=0.1829, over 1613542.44 frames. ], batch size: 23, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:22,620 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.505e+02 3.676e+02 4.681e+02 5.718e+02 9.745e+02, threshold=9.362e+02, percent-clipped=2.0 +2023-02-05 19:42:30,648 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9122.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:42:31,268 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 19:42:39,157 INFO [train.py:901] (0/4) Epoch 2, batch 1050, loss[loss=0.4084, simple_loss=0.429, pruned_loss=0.1939, over 8034.00 frames. ], tot_loss[loss=0.3939, simple_loss=0.4221, pruned_loss=0.1828, over 1614320.46 frames. ], batch size: 22, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:43,232 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 19:43:12,153 INFO [train.py:901] (0/4) Epoch 2, batch 1100, loss[loss=0.4099, simple_loss=0.4377, pruned_loss=0.1911, over 8494.00 frames. ], tot_loss[loss=0.3943, simple_loss=0.4219, pruned_loss=0.1834, over 1614089.15 frames. ], batch size: 28, lr: 3.39e-02, grad_scale: 8.0 +2023-02-05 19:43:30,062 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.895e+02 4.986e+02 6.293e+02 1.172e+03, threshold=9.973e+02, percent-clipped=2.0 +2023-02-05 19:43:39,797 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-05 19:43:47,492 INFO [train.py:901] (0/4) Epoch 2, batch 1150, loss[loss=0.3914, simple_loss=0.4024, pruned_loss=0.1902, over 7543.00 frames. ], tot_loss[loss=0.3931, simple_loss=0.4206, pruned_loss=0.1828, over 1612810.51 frames. ], batch size: 18, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:43:49,769 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9237.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:43:51,007 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 19:44:22,137 INFO [train.py:901] (0/4) Epoch 2, batch 1200, loss[loss=0.3766, simple_loss=0.4046, pruned_loss=0.1743, over 7805.00 frames. ], tot_loss[loss=0.3919, simple_loss=0.4203, pruned_loss=0.1818, over 1612087.57 frames. ], batch size: 20, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:44:25,529 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9289.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:44:41,025 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.810e+02 4.160e+02 4.885e+02 6.720e+02 4.965e+03, threshold=9.769e+02, percent-clipped=5.0 +2023-02-05 19:44:56,723 INFO [train.py:901] (0/4) Epoch 2, batch 1250, loss[loss=0.4183, simple_loss=0.4435, pruned_loss=0.1966, over 8143.00 frames. ], tot_loss[loss=0.3937, simple_loss=0.4212, pruned_loss=0.1831, over 1612584.14 frames. ], batch size: 22, lr: 3.37e-02, grad_scale: 4.0 +2023-02-05 19:45:07,486 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9348.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:25,822 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9375.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:31,822 INFO [train.py:901] (0/4) Epoch 2, batch 1300, loss[loss=0.3319, simple_loss=0.3835, pruned_loss=0.1402, over 8122.00 frames. ], tot_loss[loss=0.3904, simple_loss=0.4187, pruned_loss=0.181, over 1609873.95 frames. ], batch size: 22, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:45:45,732 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9404.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:50,302 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.161e+02 4.162e+02 5.656e+02 7.688e+02 2.529e+03, threshold=1.131e+03, percent-clipped=11.0 +2023-02-05 19:46:05,022 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:06,286 INFO [train.py:901] (0/4) Epoch 2, batch 1350, loss[loss=0.3632, simple_loss=0.3869, pruned_loss=0.1697, over 7290.00 frames. ], tot_loss[loss=0.388, simple_loss=0.4172, pruned_loss=0.1794, over 1613935.16 frames. ], batch size: 16, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:46:27,283 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:41,356 INFO [train.py:901] (0/4) Epoch 2, batch 1400, loss[loss=0.3931, simple_loss=0.4285, pruned_loss=0.1789, over 8607.00 frames. ], tot_loss[loss=0.3884, simple_loss=0.418, pruned_loss=0.1794, over 1616846.02 frames. ], batch size: 39, lr: 3.35e-02, grad_scale: 4.0 +2023-02-05 19:46:45,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9490.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:47,577 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9493.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:59,478 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.192e+02 3.889e+02 4.981e+02 6.326e+02 1.555e+03, threshold=9.962e+02, percent-clipped=1.0 +2023-02-05 19:47:04,243 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9518.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:47:15,060 INFO [train.py:901] (0/4) Epoch 2, batch 1450, loss[loss=0.3909, simple_loss=0.417, pruned_loss=0.1824, over 7975.00 frames. ], tot_loss[loss=0.3894, simple_loss=0.4186, pruned_loss=0.1801, over 1615037.99 frames. ], batch size: 21, lr: 3.34e-02, grad_scale: 4.0 +2023-02-05 19:47:19,054 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 19:47:27,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 19:47:39,011 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6152, 1.8507, 1.7021, 2.4627, 1.2572, 1.2036, 1.7140, 1.8874], + device='cuda:0'), covar=tensor([0.1265, 0.1156, 0.1391, 0.0489, 0.1700, 0.2197, 0.1361, 0.0978], + device='cuda:0'), in_proj_covar=tensor([0.0313, 0.0338, 0.0324, 0.0210, 0.0340, 0.0352, 0.0394, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 19:47:49,250 INFO [train.py:901] (0/4) Epoch 2, batch 1500, loss[loss=0.4647, simple_loss=0.4807, pruned_loss=0.2243, over 8460.00 frames. ], tot_loss[loss=0.3909, simple_loss=0.4205, pruned_loss=0.1807, over 1617453.94 frames. ], batch size: 27, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:47:59,299 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8075, 2.3278, 1.5052, 2.3467, 2.0978, 1.5722, 1.7994, 2.3122], + device='cuda:0'), covar=tensor([0.1493, 0.0693, 0.1188, 0.0716, 0.0876, 0.1097, 0.1523, 0.0749], + device='cuda:0'), in_proj_covar=tensor([0.0371, 0.0248, 0.0361, 0.0302, 0.0346, 0.0316, 0.0372, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 19:48:01,348 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:07,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.496e+02 4.006e+02 4.905e+02 6.157e+02 1.300e+03, threshold=9.811e+02, percent-clipped=3.0 +2023-02-05 19:48:23,388 INFO [train.py:901] (0/4) Epoch 2, batch 1550, loss[loss=0.3957, simple_loss=0.4356, pruned_loss=0.1779, over 8314.00 frames. ], tot_loss[loss=0.3891, simple_loss=0.4187, pruned_loss=0.1798, over 1617764.47 frames. ], batch size: 25, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:41,689 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9660.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:52,351 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9676.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:57,577 INFO [train.py:901] (0/4) Epoch 2, batch 1600, loss[loss=0.3433, simple_loss=0.3757, pruned_loss=0.1554, over 7829.00 frames. ], tot_loss[loss=0.3889, simple_loss=0.4184, pruned_loss=0.1797, over 1617059.82 frames. ], batch size: 20, lr: 3.32e-02, grad_scale: 8.0 +2023-02-05 19:48:58,385 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9685.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:03,003 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3256, 4.5789, 3.8996, 1.9318, 3.8049, 3.8520, 4.1465, 3.6138], + device='cuda:0'), covar=tensor([0.0945, 0.0402, 0.0922, 0.3471, 0.0446, 0.0483, 0.0964, 0.0388], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0199, 0.0242, 0.0318, 0.0204, 0.0157, 0.0225, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 19:49:17,084 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.192e+02 5.177e+02 6.492e+02 1.266e+03, threshold=1.035e+03, percent-clipped=2.0 +2023-02-05 19:49:22,879 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9719.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:30,586 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-05 19:49:33,628 INFO [train.py:901] (0/4) Epoch 2, batch 1650, loss[loss=0.4427, simple_loss=0.4632, pruned_loss=0.2111, over 8104.00 frames. ], tot_loss[loss=0.3864, simple_loss=0.4173, pruned_loss=0.1778, over 1621287.47 frames. ], batch size: 23, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:49:40,256 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9744.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:41,549 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9746.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:44,161 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0361, 1.8534, 1.8624, 0.2962, 1.6876, 1.4862, 0.2594, 1.8626], + device='cuda:0'), covar=tensor([0.0217, 0.0069, 0.0133, 0.0393, 0.0172, 0.0296, 0.0482, 0.0095], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0121, 0.0108, 0.0179, 0.0123, 0.0208, 0.0179, 0.0146], + device='cuda:0'), out_proj_covar=tensor([1.1905e-04, 8.4022e-05, 8.1694e-05, 1.2763e-04, 9.3386e-05, 1.5451e-04, + 1.2926e-04, 1.0367e-04], device='cuda:0') +2023-02-05 19:49:48,284 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3724, 1.5378, 2.3465, 1.0807, 1.8421, 1.5708, 1.4050, 1.6764], + device='cuda:0'), covar=tensor([0.0949, 0.1130, 0.0315, 0.1444, 0.0739, 0.1373, 0.0914, 0.0814], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0335, 0.0346, 0.0373, 0.0430, 0.0399, 0.0343, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 19:49:51,536 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:54,946 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6786, 2.5724, 4.6603, 1.0999, 2.7540, 2.1286, 2.0714, 2.5321], + device='cuda:0'), covar=tensor([0.1092, 0.1142, 0.0327, 0.1686, 0.1159, 0.1572, 0.0817, 0.1476], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0336, 0.0348, 0.0375, 0.0431, 0.0401, 0.0345, 0.0417], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 19:49:58,952 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:02,237 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:07,334 INFO [train.py:901] (0/4) Epoch 2, batch 1700, loss[loss=0.4642, simple_loss=0.4681, pruned_loss=0.2301, over 8105.00 frames. ], tot_loss[loss=0.3875, simple_loss=0.4179, pruned_loss=0.1786, over 1620812.41 frames. ], batch size: 23, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:50:26,240 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.640e+02 4.068e+02 5.098e+02 6.535e+02 1.207e+03, threshold=1.020e+03, percent-clipped=5.0 +2023-02-05 19:50:42,246 INFO [train.py:901] (0/4) Epoch 2, batch 1750, loss[loss=0.3617, simple_loss=0.4125, pruned_loss=0.1555, over 8470.00 frames. ], tot_loss[loss=0.3875, simple_loss=0.4172, pruned_loss=0.179, over 1610571.39 frames. ], batch size: 29, lr: 3.30e-02, grad_scale: 8.0 +2023-02-05 19:50:43,029 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7360, 2.4767, 1.3887, 2.1561, 2.1177, 1.2262, 1.5044, 2.4718], + device='cuda:0'), covar=tensor([0.1484, 0.0602, 0.1428, 0.0797, 0.1045, 0.1451, 0.1696, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0373, 0.0246, 0.0365, 0.0299, 0.0351, 0.0312, 0.0378, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 19:51:16,311 INFO [train.py:901] (0/4) Epoch 2, batch 1800, loss[loss=0.3725, simple_loss=0.3824, pruned_loss=0.1813, over 7435.00 frames. ], tot_loss[loss=0.3867, simple_loss=0.4162, pruned_loss=0.1786, over 1604682.25 frames. ], batch size: 17, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:21,255 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:51:34,074 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.365e+02 4.111e+02 5.198e+02 6.626e+02 1.120e+03, threshold=1.040e+03, percent-clipped=3.0 +2023-02-05 19:51:49,945 INFO [train.py:901] (0/4) Epoch 2, batch 1850, loss[loss=0.4174, simple_loss=0.4369, pruned_loss=0.199, over 8136.00 frames. ], tot_loss[loss=0.3863, simple_loss=0.4156, pruned_loss=0.1785, over 1605015.13 frames. ], batch size: 22, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:58,649 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9946.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:24,360 INFO [train.py:901] (0/4) Epoch 2, batch 1900, loss[loss=0.3523, simple_loss=0.3846, pruned_loss=0.16, over 8089.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.4155, pruned_loss=0.1777, over 1609531.42 frames. ], batch size: 21, lr: 3.28e-02, grad_scale: 8.0 +2023-02-05 19:52:35,047 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-10000.pt +2023-02-05 19:52:43,411 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1488, 2.0399, 1.8119, 0.3585, 1.9216, 1.2850, 0.3281, 1.9903], + device='cuda:0'), covar=tensor([0.0220, 0.0059, 0.0166, 0.0270, 0.0134, 0.0369, 0.0375, 0.0084], + device='cuda:0'), in_proj_covar=tensor([0.0177, 0.0125, 0.0111, 0.0168, 0.0125, 0.0217, 0.0181, 0.0152], + device='cuda:0'), out_proj_covar=tensor([1.2202e-04, 8.6516e-05, 8.1972e-05, 1.1786e-04, 9.3166e-05, 1.5942e-04, + 1.2926e-04, 1.0817e-04], device='cuda:0') +2023-02-05 19:52:43,808 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.513e+02 4.327e+02 5.785e+02 1.080e+03, threshold=8.653e+02, percent-clipped=1.0 +2023-02-05 19:52:48,221 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:52:49,913 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:50,742 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2099, 1.9521, 1.7660, 0.2105, 1.6491, 1.2285, 0.2370, 2.0189], + device='cuda:0'), covar=tensor([0.0198, 0.0065, 0.0147, 0.0283, 0.0150, 0.0418, 0.0391, 0.0075], + device='cuda:0'), in_proj_covar=tensor([0.0176, 0.0126, 0.0109, 0.0167, 0.0125, 0.0216, 0.0180, 0.0152], + device='cuda:0'), out_proj_covar=tensor([1.2150e-04, 8.6732e-05, 8.0194e-05, 1.1712e-04, 9.3715e-05, 1.5911e-04, + 1.2816e-04, 1.0794e-04], device='cuda:0') +2023-02-05 19:52:54,616 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 19:52:59,200 INFO [train.py:901] (0/4) Epoch 2, batch 1950, loss[loss=0.3697, simple_loss=0.4148, pruned_loss=0.1623, over 8350.00 frames. ], tot_loss[loss=0.3849, simple_loss=0.415, pruned_loss=0.1774, over 1610990.95 frames. ], batch size: 24, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:06,860 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 19:53:15,918 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10057.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:19,405 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:25,592 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 19:53:33,050 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10080.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:53:35,478 INFO [train.py:901] (0/4) Epoch 2, batch 2000, loss[loss=0.4506, simple_loss=0.4647, pruned_loss=0.2182, over 8470.00 frames. ], tot_loss[loss=0.3874, simple_loss=0.4175, pruned_loss=0.1787, over 1618961.14 frames. ], batch size: 25, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:46,356 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2256, 1.4479, 2.2726, 0.9468, 1.9841, 1.4743, 1.3113, 1.6697], + device='cuda:0'), covar=tensor([0.1302, 0.1354, 0.0417, 0.1941, 0.0783, 0.1741, 0.1130, 0.1010], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0335, 0.0352, 0.0387, 0.0435, 0.0401, 0.0344, 0.0424], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 19:53:50,427 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:55,730 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.648e+02 4.167e+02 5.413e+02 6.926e+02 6.671e+03, threshold=1.083e+03, percent-clipped=14.0 +2023-02-05 19:54:10,563 INFO [train.py:901] (0/4) Epoch 2, batch 2050, loss[loss=0.3765, simple_loss=0.4255, pruned_loss=0.1638, over 8352.00 frames. ], tot_loss[loss=0.3871, simple_loss=0.4166, pruned_loss=0.1788, over 1617347.19 frames. ], batch size: 24, lr: 3.26e-02, grad_scale: 4.0 +2023-02-05 19:54:11,450 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10135.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:19,453 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:36,879 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:45,485 INFO [train.py:901] (0/4) Epoch 2, batch 2100, loss[loss=0.3905, simple_loss=0.4436, pruned_loss=0.1687, over 8505.00 frames. ], tot_loss[loss=0.3852, simple_loss=0.4156, pruned_loss=0.1774, over 1617483.95 frames. ], batch size: 26, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:06,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.788e+02 4.646e+02 5.840e+02 1.328e+03, threshold=9.292e+02, percent-clipped=3.0 +2023-02-05 19:55:11,270 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:55:20,261 INFO [train.py:901] (0/4) Epoch 2, batch 2150, loss[loss=0.3699, simple_loss=0.4072, pruned_loss=0.1663, over 8598.00 frames. ], tot_loss[loss=0.3834, simple_loss=0.4145, pruned_loss=0.1761, over 1615640.36 frames. ], batch size: 48, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:50,778 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1019, 2.6442, 3.8800, 4.2853, 2.8756, 2.2310, 1.9659, 2.5310], + device='cuda:0'), covar=tensor([0.0713, 0.0763, 0.0145, 0.0177, 0.0398, 0.0384, 0.0568, 0.0709], + device='cuda:0'), in_proj_covar=tensor([0.0445, 0.0354, 0.0255, 0.0296, 0.0375, 0.0330, 0.0351, 0.0398], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 19:55:53,983 INFO [train.py:901] (0/4) Epoch 2, batch 2200, loss[loss=0.3833, simple_loss=0.4156, pruned_loss=0.1755, over 8046.00 frames. ], tot_loss[loss=0.3851, simple_loss=0.4154, pruned_loss=0.1774, over 1613885.37 frames. ], batch size: 22, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:06,177 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7714, 1.9815, 3.6078, 1.1542, 2.7014, 2.2367, 1.7619, 2.0035], + device='cuda:0'), covar=tensor([0.0904, 0.1215, 0.0281, 0.1697, 0.0850, 0.1280, 0.0871, 0.1359], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0345, 0.0366, 0.0395, 0.0445, 0.0406, 0.0356, 0.0433], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 19:56:14,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.803e+02 4.971e+02 6.310e+02 1.458e+03, threshold=9.942e+02, percent-clipped=6.0 +2023-02-05 19:56:18,156 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10317.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:29,263 INFO [train.py:901] (0/4) Epoch 2, batch 2250, loss[loss=0.386, simple_loss=0.4246, pruned_loss=0.1737, over 8453.00 frames. ], tot_loss[loss=0.3872, simple_loss=0.4171, pruned_loss=0.1787, over 1615145.18 frames. ], batch size: 27, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:34,609 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10342.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:03,211 INFO [train.py:901] (0/4) Epoch 2, batch 2300, loss[loss=0.4557, simple_loss=0.4648, pruned_loss=0.2234, over 7128.00 frames. ], tot_loss[loss=0.3858, simple_loss=0.4157, pruned_loss=0.1779, over 1605823.81 frames. ], batch size: 72, lr: 3.23e-02, grad_scale: 4.0 +2023-02-05 19:57:08,297 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10391.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:15,020 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10401.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:23,809 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.389e+02 3.989e+02 5.161e+02 7.086e+02 1.471e+03, threshold=1.032e+03, percent-clipped=7.0 +2023-02-05 19:57:25,921 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10416.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:31,788 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10424.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:57:33,881 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:39,139 INFO [train.py:901] (0/4) Epoch 2, batch 2350, loss[loss=0.3116, simple_loss=0.3526, pruned_loss=0.1353, over 7687.00 frames. ], tot_loss[loss=0.3845, simple_loss=0.4148, pruned_loss=0.1771, over 1606883.75 frames. ], batch size: 18, lr: 3.22e-02, grad_scale: 4.0 +2023-02-05 19:58:05,149 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:07,801 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:12,897 INFO [train.py:901] (0/4) Epoch 2, batch 2400, loss[loss=0.3516, simple_loss=0.3849, pruned_loss=0.1591, over 7719.00 frames. ], tot_loss[loss=0.3832, simple_loss=0.414, pruned_loss=0.1762, over 1607972.56 frames. ], batch size: 18, lr: 3.22e-02, grad_scale: 8.0 +2023-02-05 19:58:24,664 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:32,501 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.364e+02 3.956e+02 5.047e+02 6.263e+02 1.564e+03, threshold=1.009e+03, percent-clipped=2.0 +2023-02-05 19:58:34,724 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10516.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:47,330 INFO [train.py:901] (0/4) Epoch 2, batch 2450, loss[loss=0.3721, simple_loss=0.4156, pruned_loss=0.1643, over 8336.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.4157, pruned_loss=0.1776, over 1611519.61 frames. ], batch size: 26, lr: 3.21e-02, grad_scale: 8.0 +2023-02-05 19:58:50,867 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10539.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:58:53,810 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.29 vs. limit=5.0 +2023-02-05 19:59:22,252 INFO [train.py:901] (0/4) Epoch 2, batch 2500, loss[loss=0.3596, simple_loss=0.3976, pruned_loss=0.1608, over 8511.00 frames. ], tot_loss[loss=0.3827, simple_loss=0.4136, pruned_loss=0.1759, over 1613363.48 frames. ], batch size: 28, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 19:59:42,162 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.520e+02 3.522e+02 4.438e+02 6.473e+02 1.354e+03, threshold=8.876e+02, percent-clipped=4.0 +2023-02-05 19:59:43,168 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 19:59:55,951 INFO [train.py:901] (0/4) Epoch 2, batch 2550, loss[loss=0.4213, simple_loss=0.4369, pruned_loss=0.2029, over 8484.00 frames. ], tot_loss[loss=0.3835, simple_loss=0.414, pruned_loss=0.1765, over 1612684.25 frames. ], batch size: 26, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 20:00:31,355 INFO [train.py:901] (0/4) Epoch 2, batch 2600, loss[loss=0.4055, simple_loss=0.4125, pruned_loss=0.1993, over 7806.00 frames. ], tot_loss[loss=0.3838, simple_loss=0.4144, pruned_loss=0.1766, over 1610797.26 frames. ], batch size: 19, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:00:50,499 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 4.188e+02 4.914e+02 6.333e+02 1.432e+03, threshold=9.828e+02, percent-clipped=6.0 +2023-02-05 20:01:05,101 INFO [train.py:901] (0/4) Epoch 2, batch 2650, loss[loss=0.3706, simple_loss=0.4113, pruned_loss=0.1649, over 8543.00 frames. ], tot_loss[loss=0.3836, simple_loss=0.4148, pruned_loss=0.1762, over 1614975.55 frames. ], batch size: 49, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:01:24,204 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10762.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:30,872 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:31,714 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10772.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:40,306 INFO [train.py:901] (0/4) Epoch 2, batch 2700, loss[loss=0.3779, simple_loss=0.3991, pruned_loss=0.1784, over 7798.00 frames. ], tot_loss[loss=0.3836, simple_loss=0.4145, pruned_loss=0.1763, over 1616409.37 frames. ], batch size: 19, lr: 3.18e-02, grad_scale: 8.0 +2023-02-05 20:01:46,654 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10792.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:48,795 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10795.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:01:50,102 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10797.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:01,038 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.290e+02 4.005e+02 5.458e+02 7.000e+02 2.619e+03, threshold=1.092e+03, percent-clipped=7.0 +2023-02-05 20:02:03,290 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10816.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:06,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10820.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:02:15,182 INFO [train.py:901] (0/4) Epoch 2, batch 2750, loss[loss=0.3862, simple_loss=0.4029, pruned_loss=0.1848, over 7975.00 frames. ], tot_loss[loss=0.3825, simple_loss=0.4136, pruned_loss=0.1757, over 1613564.91 frames. ], batch size: 21, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:49,765 INFO [train.py:901] (0/4) Epoch 2, batch 2800, loss[loss=0.3605, simple_loss=0.3614, pruned_loss=0.1798, over 6798.00 frames. ], tot_loss[loss=0.3809, simple_loss=0.4124, pruned_loss=0.1747, over 1607136.97 frames. ], batch size: 15, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:51,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10886.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:03,313 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10903.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:03:10,630 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.367e+02 3.535e+02 4.531e+02 6.001e+02 1.335e+03, threshold=9.062e+02, percent-clipped=2.0 +2023-02-05 20:03:23,111 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:25,018 INFO [train.py:901] (0/4) Epoch 2, batch 2850, loss[loss=0.4083, simple_loss=0.4299, pruned_loss=0.1934, over 8677.00 frames. ], tot_loss[loss=0.3795, simple_loss=0.4118, pruned_loss=0.1736, over 1612744.78 frames. ], batch size: 39, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:03:59,104 INFO [train.py:901] (0/4) Epoch 2, batch 2900, loss[loss=0.4482, simple_loss=0.4656, pruned_loss=0.2154, over 8416.00 frames. ], tot_loss[loss=0.3782, simple_loss=0.4112, pruned_loss=0.1726, over 1614932.58 frames. ], batch size: 29, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:04:19,452 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.551e+02 4.216e+02 5.196e+02 6.845e+02 2.226e+03, threshold=1.039e+03, percent-clipped=10.0 +2023-02-05 20:04:34,447 INFO [train.py:901] (0/4) Epoch 2, batch 2950, loss[loss=0.5481, simple_loss=0.522, pruned_loss=0.2871, over 8490.00 frames. ], tot_loss[loss=0.3792, simple_loss=0.4114, pruned_loss=0.1735, over 1613129.81 frames. ], batch size: 29, lr: 3.15e-02, grad_scale: 8.0 +2023-02-05 20:04:39,270 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 20:04:50,967 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7488, 2.0629, 1.8769, 2.8261, 1.4984, 1.2294, 1.7354, 2.1865], + device='cuda:0'), covar=tensor([0.1364, 0.1513, 0.1615, 0.0424, 0.2073, 0.2560, 0.2243, 0.1175], + device='cuda:0'), in_proj_covar=tensor([0.0301, 0.0322, 0.0307, 0.0207, 0.0317, 0.0323, 0.0368, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:0') +2023-02-05 20:05:08,646 INFO [train.py:901] (0/4) Epoch 2, batch 3000, loss[loss=0.4348, simple_loss=0.4418, pruned_loss=0.2139, over 6811.00 frames. ], tot_loss[loss=0.3799, simple_loss=0.4118, pruned_loss=0.174, over 1612671.16 frames. ], batch size: 72, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:05:08,646 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 20:05:24,857 INFO [train.py:935] (0/4) Epoch 2, validation: loss=0.2878, simple_loss=0.369, pruned_loss=0.1033, over 944034.00 frames. +2023-02-05 20:05:24,859 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6484MB +2023-02-05 20:05:27,734 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0461, 1.1556, 3.2353, 1.0529, 2.7103, 2.7882, 2.8298, 2.8756], + device='cuda:0'), covar=tensor([0.0365, 0.2540, 0.0321, 0.1420, 0.0912, 0.0374, 0.0364, 0.0454], + device='cuda:0'), in_proj_covar=tensor([0.0178, 0.0357, 0.0220, 0.0249, 0.0296, 0.0231, 0.0219, 0.0249], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:05:39,940 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0678, 1.2233, 1.4210, 1.0779, 0.8939, 1.4108, 0.1025, 0.7151], + device='cuda:0'), covar=tensor([0.0820, 0.0461, 0.0259, 0.0529, 0.0674, 0.0298, 0.1464, 0.0668], + device='cuda:0'), in_proj_covar=tensor([0.0122, 0.0098, 0.0085, 0.0135, 0.0119, 0.0083, 0.0159, 0.0126], + device='cuda:0'), out_proj_covar=tensor([1.1463e-04, 1.0033e-04, 8.0509e-05, 1.2561e-04, 1.1919e-04, 7.8357e-05, + 1.4852e-04, 1.2342e-04], device='cuda:0') +2023-02-05 20:05:40,474 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:05:45,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.542e+02 3.795e+02 4.955e+02 6.193e+02 1.384e+03, threshold=9.910e+02, percent-clipped=4.0 +2023-02-05 20:06:00,081 INFO [train.py:901] (0/4) Epoch 2, batch 3050, loss[loss=0.3314, simple_loss=0.3756, pruned_loss=0.1436, over 7653.00 frames. ], tot_loss[loss=0.3806, simple_loss=0.4121, pruned_loss=0.1745, over 1609645.38 frames. ], batch size: 19, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:06:01,558 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:05,881 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11142.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:14,855 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5141, 2.1666, 3.3301, 3.0916, 2.8870, 2.1865, 1.5081, 2.3187], + device='cuda:0'), covar=tensor([0.0645, 0.0786, 0.0141, 0.0184, 0.0277, 0.0307, 0.0532, 0.0568], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0382, 0.0270, 0.0310, 0.0399, 0.0343, 0.0367, 0.0411], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:06:23,841 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 20:06:24,312 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:35,407 INFO [train.py:901] (0/4) Epoch 2, batch 3100, loss[loss=0.3631, simple_loss=0.3995, pruned_loss=0.1634, over 7974.00 frames. ], tot_loss[loss=0.3789, simple_loss=0.4115, pruned_loss=0.1732, over 1613396.45 frames. ], batch size: 21, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:06:37,612 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:40,548 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-05 20:06:40,860 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,382 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,866 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.930e+02 4.987e+02 6.652e+02 1.229e+03, threshold=9.974e+02, percent-clipped=5.0 +2023-02-05 20:07:01,724 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:10,341 INFO [train.py:901] (0/4) Epoch 2, batch 3150, loss[loss=0.3331, simple_loss=0.3754, pruned_loss=0.1454, over 8356.00 frames. ], tot_loss[loss=0.3779, simple_loss=0.4103, pruned_loss=0.1728, over 1610006.04 frames. ], batch size: 24, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:07:20,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11247.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:07:22,964 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11251.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:46,072 INFO [train.py:901] (0/4) Epoch 2, batch 3200, loss[loss=0.3618, simple_loss=0.4088, pruned_loss=0.1574, over 8362.00 frames. ], tot_loss[loss=0.3777, simple_loss=0.4103, pruned_loss=0.1725, over 1612463.87 frames. ], batch size: 24, lr: 3.12e-02, grad_scale: 8.0 +2023-02-05 20:08:06,196 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 3.889e+02 4.508e+02 6.050e+02 1.565e+03, threshold=9.016e+02, percent-clipped=4.0 +2023-02-05 20:08:21,236 INFO [train.py:901] (0/4) Epoch 2, batch 3250, loss[loss=0.3397, simple_loss=0.3831, pruned_loss=0.1482, over 8240.00 frames. ], tot_loss[loss=0.3783, simple_loss=0.4113, pruned_loss=0.1726, over 1615898.45 frames. ], batch size: 22, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:08:39,994 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11362.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:08:55,024 INFO [train.py:901] (0/4) Epoch 2, batch 3300, loss[loss=0.3985, simple_loss=0.4346, pruned_loss=0.1812, over 8478.00 frames. ], tot_loss[loss=0.38, simple_loss=0.4127, pruned_loss=0.1737, over 1617516.86 frames. ], batch size: 29, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:09:16,005 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.686e+02 3.650e+02 4.417e+02 5.589e+02 1.513e+03, threshold=8.834e+02, percent-clipped=8.0 +2023-02-05 20:09:30,178 INFO [train.py:901] (0/4) Epoch 2, batch 3350, loss[loss=0.3563, simple_loss=0.3862, pruned_loss=0.1632, over 7807.00 frames. ], tot_loss[loss=0.3784, simple_loss=0.412, pruned_loss=0.1723, over 1622531.82 frames. ], batch size: 20, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:09:40,002 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-05 20:09:53,262 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4803, 2.4170, 2.7264, 1.0770, 3.0497, 1.9687, 0.9517, 1.7091], + device='cuda:0'), covar=tensor([0.0245, 0.0088, 0.0336, 0.0240, 0.0127, 0.0271, 0.0423, 0.0178], + device='cuda:0'), in_proj_covar=tensor([0.0191, 0.0124, 0.0124, 0.0182, 0.0131, 0.0230, 0.0196, 0.0168], + device='cuda:0'), out_proj_covar=tensor([1.2343e-04, 8.0534e-05, 8.5780e-05, 1.1887e-04, 9.0968e-05, 1.6021e-04, + 1.3265e-04, 1.1235e-04], device='cuda:0') +2023-02-05 20:10:00,582 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11477.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:05,260 INFO [train.py:901] (0/4) Epoch 2, batch 3400, loss[loss=0.3609, simple_loss=0.3932, pruned_loss=0.1643, over 7920.00 frames. ], tot_loss[loss=0.3768, simple_loss=0.4112, pruned_loss=0.1712, over 1621409.78 frames. ], batch size: 20, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:11,476 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8461, 1.6136, 5.7223, 2.4485, 5.1284, 4.8378, 5.2747, 5.3173], + device='cuda:0'), covar=tensor([0.0254, 0.2696, 0.0150, 0.0979, 0.0594, 0.0206, 0.0176, 0.0227], + device='cuda:0'), in_proj_covar=tensor([0.0179, 0.0363, 0.0221, 0.0250, 0.0308, 0.0243, 0.0224, 0.0255], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:10:17,702 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11502.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:21,203 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11507.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:25,777 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.120e+02 3.730e+02 4.591e+02 5.662e+02 1.223e+03, threshold=9.181e+02, percent-clipped=5.0 +2023-02-05 20:10:39,490 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:40,652 INFO [train.py:901] (0/4) Epoch 2, batch 3450, loss[loss=0.4436, simple_loss=0.4613, pruned_loss=0.213, over 8467.00 frames. ], tot_loss[loss=0.3781, simple_loss=0.4119, pruned_loss=0.1721, over 1619113.85 frames. ], batch size: 25, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:10:42,064 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:51,630 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11550.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:11:13,179 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-05 20:11:15,647 INFO [train.py:901] (0/4) Epoch 2, batch 3500, loss[loss=0.346, simple_loss=0.3965, pruned_loss=0.1478, over 8509.00 frames. ], tot_loss[loss=0.376, simple_loss=0.4105, pruned_loss=0.1708, over 1615781.24 frames. ], batch size: 26, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:11:18,536 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2075, 1.8277, 3.1596, 2.7386, 2.4353, 1.9169, 1.4116, 1.7452], + device='cuda:0'), covar=tensor([0.0752, 0.0825, 0.0129, 0.0212, 0.0340, 0.0368, 0.0528, 0.0652], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0387, 0.0282, 0.0311, 0.0413, 0.0355, 0.0373, 0.0408], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:11:35,936 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 4.071e+02 4.877e+02 6.297e+02 1.257e+03, threshold=9.753e+02, percent-clipped=3.0 +2023-02-05 20:11:39,541 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11618.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:11:40,716 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 20:11:50,745 INFO [train.py:901] (0/4) Epoch 2, batch 3550, loss[loss=0.3703, simple_loss=0.4003, pruned_loss=0.1702, over 8075.00 frames. ], tot_loss[loss=0.3753, simple_loss=0.4101, pruned_loss=0.1703, over 1613573.65 frames. ], batch size: 21, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:11:57,562 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11643.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:12:02,962 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:04,344 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:11,698 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7643, 1.5586, 2.4304, 2.0885, 2.1015, 1.4760, 1.2852, 1.5560], + device='cuda:0'), covar=tensor([0.0649, 0.0544, 0.0133, 0.0173, 0.0220, 0.0343, 0.0453, 0.0404], + device='cuda:0'), in_proj_covar=tensor([0.0476, 0.0387, 0.0284, 0.0313, 0.0412, 0.0353, 0.0372, 0.0413], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:12:14,505 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 20:12:25,617 INFO [train.py:901] (0/4) Epoch 2, batch 3600, loss[loss=0.3544, simple_loss=0.3804, pruned_loss=0.1642, over 7231.00 frames. ], tot_loss[loss=0.3774, simple_loss=0.4115, pruned_loss=0.1716, over 1614394.58 frames. ], batch size: 16, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:12:29,814 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7656, 1.1803, 3.2026, 1.1876, 2.0682, 3.4983, 3.3609, 3.0813], + device='cuda:0'), covar=tensor([0.1123, 0.1801, 0.0398, 0.1954, 0.0893, 0.0240, 0.0248, 0.0511], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0267, 0.0177, 0.0252, 0.0202, 0.0150, 0.0146, 0.0217], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:12:38,541 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-05 20:12:45,376 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.676e+02 3.688e+02 4.691e+02 6.662e+02 1.491e+03, threshold=9.383e+02, percent-clipped=3.0 +2023-02-05 20:12:48,303 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:59,433 INFO [train.py:901] (0/4) Epoch 2, batch 3650, loss[loss=0.4306, simple_loss=0.4519, pruned_loss=0.2047, over 8663.00 frames. ], tot_loss[loss=0.3768, simple_loss=0.4108, pruned_loss=0.1714, over 1612002.77 frames. ], batch size: 34, lr: 3.07e-02, grad_scale: 8.0 +2023-02-05 20:13:07,671 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 20:13:33,833 INFO [train.py:901] (0/4) Epoch 2, batch 3700, loss[loss=0.3875, simple_loss=0.4147, pruned_loss=0.1802, over 8284.00 frames. ], tot_loss[loss=0.3761, simple_loss=0.41, pruned_loss=0.1711, over 1612519.33 frames. ], batch size: 23, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:13:44,419 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:13:53,763 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.910e+02 4.224e+02 5.211e+02 6.213e+02 2.304e+03, threshold=1.042e+03, percent-clipped=10.0 +2023-02-05 20:14:08,520 INFO [train.py:901] (0/4) Epoch 2, batch 3750, loss[loss=0.4344, simple_loss=0.4451, pruned_loss=0.2118, over 8341.00 frames. ], tot_loss[loss=0.3742, simple_loss=0.4087, pruned_loss=0.1699, over 1612558.19 frames. ], batch size: 49, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:14:08,622 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:28,580 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11864.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:29,992 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6657, 1.9331, 3.5976, 1.1910, 2.4342, 1.8834, 1.6604, 2.0237], + device='cuda:0'), covar=tensor([0.0921, 0.1204, 0.0334, 0.1606, 0.0877, 0.1305, 0.0916, 0.1275], + device='cuda:0'), in_proj_covar=tensor([0.0364, 0.0349, 0.0380, 0.0408, 0.0457, 0.0414, 0.0371, 0.0460], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 20:14:42,002 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 20:14:43,036 INFO [train.py:901] (0/4) Epoch 2, batch 3800, loss[loss=0.3917, simple_loss=0.418, pruned_loss=0.1827, over 7647.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.4096, pruned_loss=0.1707, over 1615938.77 frames. ], batch size: 19, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:14:49,683 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11894.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:58,752 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:02,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.750e+02 4.056e+02 4.773e+02 6.198e+02 1.391e+03, threshold=9.546e+02, percent-clipped=3.0 +2023-02-05 20:15:16,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:17,491 INFO [train.py:901] (0/4) Epoch 2, batch 3850, loss[loss=0.3915, simple_loss=0.4246, pruned_loss=0.1792, over 8357.00 frames. ], tot_loss[loss=0.373, simple_loss=0.4079, pruned_loss=0.1691, over 1621345.70 frames. ], batch size: 24, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:15:20,310 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11938.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:39,723 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11966.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:47,063 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 20:15:51,654 INFO [train.py:901] (0/4) Epoch 2, batch 3900, loss[loss=0.3289, simple_loss=0.374, pruned_loss=0.1418, over 7778.00 frames. ], tot_loss[loss=0.3734, simple_loss=0.4081, pruned_loss=0.1693, over 1619684.08 frames. ], batch size: 19, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:15:52,513 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7568, 2.0593, 1.9535, 2.7044, 1.1974, 1.2514, 1.7002, 2.1205], + device='cuda:0'), covar=tensor([0.1518, 0.1899, 0.1404, 0.0438, 0.2371, 0.2538, 0.2244, 0.1364], + device='cuda:0'), in_proj_covar=tensor([0.0323, 0.0342, 0.0329, 0.0214, 0.0337, 0.0338, 0.0373, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:16:01,112 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11997.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:03,786 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-12000.pt +2023-02-05 20:16:06,073 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12002.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:10,872 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12009.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:13,175 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.113e+02 3.926e+02 4.686e+02 5.678e+02 1.222e+03, threshold=9.373e+02, percent-clipped=4.0 +2023-02-05 20:16:20,221 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9630, 1.7023, 1.2848, 1.2523, 1.7191, 1.4474, 1.5936, 1.5684], + device='cuda:0'), covar=tensor([0.0818, 0.1493, 0.2202, 0.1774, 0.0727, 0.1673, 0.0946, 0.0812], + device='cuda:0'), in_proj_covar=tensor([0.0226, 0.0260, 0.0286, 0.0253, 0.0224, 0.0247, 0.0222, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:16:28,141 INFO [train.py:901] (0/4) Epoch 2, batch 3950, loss[loss=0.3896, simple_loss=0.426, pruned_loss=0.1766, over 8690.00 frames. ], tot_loss[loss=0.375, simple_loss=0.4089, pruned_loss=0.1705, over 1616696.45 frames. ], batch size: 34, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:46,991 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:02,473 INFO [train.py:901] (0/4) Epoch 2, batch 4000, loss[loss=0.3703, simple_loss=0.4063, pruned_loss=0.1671, over 8460.00 frames. ], tot_loss[loss=0.3724, simple_loss=0.4067, pruned_loss=0.169, over 1610200.88 frames. ], batch size: 29, lr: 3.03e-02, grad_scale: 8.0 +2023-02-05 20:17:09,203 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12094.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:22,644 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12112.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:23,112 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.955e+02 4.453e+02 5.904e+02 7.845e+02 2.502e+03, threshold=1.181e+03, percent-clipped=13.0 +2023-02-05 20:17:36,866 INFO [train.py:901] (0/4) Epoch 2, batch 4050, loss[loss=0.3806, simple_loss=0.4221, pruned_loss=0.1696, over 8322.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.4081, pruned_loss=0.1692, over 1614643.44 frames. ], batch size: 25, lr: 3.03e-02, grad_scale: 16.0 +2023-02-05 20:17:46,315 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0626, 3.7796, 2.3623, 2.6711, 2.4946, 2.4790, 2.7830, 2.5737], + device='cuda:0'), covar=tensor([0.1776, 0.0426, 0.0953, 0.1064, 0.1144, 0.0996, 0.1338, 0.1254], + device='cuda:0'), in_proj_covar=tensor([0.0372, 0.0251, 0.0352, 0.0313, 0.0366, 0.0321, 0.0358, 0.0329], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:17:48,188 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9965, 1.0422, 1.0604, 0.8912, 0.6842, 0.9900, 0.1500, 0.6318], + device='cuda:0'), covar=tensor([0.0747, 0.0707, 0.0486, 0.0757, 0.1069, 0.0452, 0.2092, 0.1121], + device='cuda:0'), in_proj_covar=tensor([0.0116, 0.0102, 0.0090, 0.0143, 0.0123, 0.0085, 0.0157, 0.0125], + device='cuda:0'), out_proj_covar=tensor([1.1421e-04, 1.0727e-04, 8.9514e-05, 1.3675e-04, 1.2681e-04, 8.4791e-05, + 1.5188e-04, 1.2796e-04], device='cuda:0') +2023-02-05 20:18:06,022 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:06,225 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-02-05 20:18:07,290 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12178.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:11,165 INFO [train.py:901] (0/4) Epoch 2, batch 4100, loss[loss=0.3238, simple_loss=0.3689, pruned_loss=0.1393, over 7806.00 frames. ], tot_loss[loss=0.3729, simple_loss=0.4074, pruned_loss=0.1692, over 1614059.59 frames. ], batch size: 19, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:18:27,596 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12208.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:30,899 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.728e+02 4.672e+02 5.863e+02 2.072e+03, threshold=9.344e+02, percent-clipped=1.0 +2023-02-05 20:18:47,040 INFO [train.py:901] (0/4) Epoch 2, batch 4150, loss[loss=0.3979, simple_loss=0.4167, pruned_loss=0.1895, over 8564.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.4067, pruned_loss=0.1686, over 1612815.74 frames. ], batch size: 31, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:19:08,901 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12265.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:20,420 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12282.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:21,722 INFO [train.py:901] (0/4) Epoch 2, batch 4200, loss[loss=0.3241, simple_loss=0.366, pruned_loss=0.1412, over 8132.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.408, pruned_loss=0.1692, over 1614372.38 frames. ], batch size: 22, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:19:25,930 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12290.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:28,617 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:40,048 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12310.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:42,058 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.324e+02 3.573e+02 4.694e+02 5.833e+02 1.413e+03, threshold=9.388e+02, percent-clipped=6.0 +2023-02-05 20:19:43,524 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 20:19:49,252 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12323.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:57,052 INFO [train.py:901] (0/4) Epoch 2, batch 4250, loss[loss=0.3528, simple_loss=0.3956, pruned_loss=0.155, over 8201.00 frames. ], tot_loss[loss=0.3741, simple_loss=0.4088, pruned_loss=0.1697, over 1611205.27 frames. ], batch size: 23, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:20:00,212 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 20:20:06,019 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:06,650 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 20:20:20,971 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12368.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:31,220 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 20:20:32,224 INFO [train.py:901] (0/4) Epoch 2, batch 4300, loss[loss=0.3655, simple_loss=0.4131, pruned_loss=0.159, over 8348.00 frames. ], tot_loss[loss=0.3727, simple_loss=0.4076, pruned_loss=0.1689, over 1609601.53 frames. ], batch size: 26, lr: 3.00e-02, grad_scale: 16.0 +2023-02-05 20:20:38,551 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12393.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:41,197 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:53,213 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.244e+02 3.864e+02 4.648e+02 5.983e+02 1.525e+03, threshold=9.296e+02, percent-clipped=6.0 +2023-02-05 20:21:00,840 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:05,798 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:06,899 INFO [train.py:901] (0/4) Epoch 2, batch 4350, loss[loss=0.4486, simple_loss=0.4523, pruned_loss=0.2224, over 8345.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.4086, pruned_loss=0.1689, over 1615219.28 frames. ], batch size: 25, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:21:09,738 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12438.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:23,500 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12457.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:26,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12461.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:27,749 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 20:21:28,045 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:37,547 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9319, 4.2014, 3.6592, 1.7917, 3.5663, 3.4660, 3.7527, 3.0309], + device='cuda:0'), covar=tensor([0.0849, 0.0402, 0.0782, 0.3533, 0.0487, 0.0517, 0.0904, 0.0630], + device='cuda:0'), in_proj_covar=tensor([0.0319, 0.0217, 0.0254, 0.0341, 0.0231, 0.0181, 0.0238, 0.0168], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:21:38,137 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 20:21:42,134 INFO [train.py:901] (0/4) Epoch 2, batch 4400, loss[loss=0.347, simple_loss=0.3845, pruned_loss=0.1548, over 7802.00 frames. ], tot_loss[loss=0.3726, simple_loss=0.4082, pruned_loss=0.1685, over 1616415.41 frames. ], batch size: 20, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:22:02,392 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.494e+02 4.041e+02 4.964e+02 6.742e+02 1.213e+03, threshold=9.928e+02, percent-clipped=4.0 +2023-02-05 20:22:16,721 INFO [train.py:901] (0/4) Epoch 2, batch 4450, loss[loss=0.3578, simple_loss=0.4107, pruned_loss=0.1525, over 8507.00 frames. ], tot_loss[loss=0.3713, simple_loss=0.4076, pruned_loss=0.1675, over 1616637.45 frames. ], batch size: 28, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:17,400 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 20:22:27,259 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:29,906 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12553.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:44,461 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8545, 2.3552, 2.7091, 1.7560, 1.5438, 2.6713, 0.4478, 1.7382], + device='cuda:0'), covar=tensor([0.1350, 0.1099, 0.0492, 0.0787, 0.1487, 0.0323, 0.2597, 0.1027], + device='cuda:0'), in_proj_covar=tensor([0.0120, 0.0099, 0.0090, 0.0149, 0.0130, 0.0082, 0.0161, 0.0123], + device='cuda:0'), out_proj_covar=tensor([1.2038e-04, 1.0631e-04, 9.2233e-05, 1.4435e-04, 1.3459e-04, 8.3272e-05, + 1.5650e-04, 1.2923e-04], device='cuda:0') +2023-02-05 20:22:45,076 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12574.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:49,129 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:52,403 INFO [train.py:901] (0/4) Epoch 2, batch 4500, loss[loss=0.3433, simple_loss=0.3728, pruned_loss=0.1569, over 7796.00 frames. ], tot_loss[loss=0.3715, simple_loss=0.4082, pruned_loss=0.1673, over 1615691.68 frames. ], batch size: 19, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:52,541 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8658, 1.1474, 5.6198, 2.3376, 5.0398, 4.8295, 5.3162, 5.2288], + device='cuda:0'), covar=tensor([0.0227, 0.3337, 0.0183, 0.1311, 0.0678, 0.0271, 0.0221, 0.0248], + device='cuda:0'), in_proj_covar=tensor([0.0182, 0.0376, 0.0234, 0.0268, 0.0323, 0.0258, 0.0241, 0.0261], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:23:06,034 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12604.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:12,542 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 20:23:13,219 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.312e+02 4.309e+02 5.092e+02 6.256e+02 1.421e+03, threshold=1.018e+03, percent-clipped=5.0 +2023-02-05 20:23:18,779 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 20:23:27,081 INFO [train.py:901] (0/4) Epoch 2, batch 4550, loss[loss=0.3634, simple_loss=0.3845, pruned_loss=0.1711, over 7534.00 frames. ], tot_loss[loss=0.3703, simple_loss=0.4072, pruned_loss=0.1668, over 1614997.52 frames. ], batch size: 18, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:23:40,657 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:43,559 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-05 20:23:57,693 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12678.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:59,752 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:01,601 INFO [train.py:901] (0/4) Epoch 2, batch 4600, loss[loss=0.4312, simple_loss=0.4478, pruned_loss=0.2073, over 8475.00 frames. ], tot_loss[loss=0.372, simple_loss=0.4078, pruned_loss=0.1681, over 1613704.52 frames. ], batch size: 49, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:24:17,913 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:23,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 3.817e+02 4.647e+02 5.826e+02 1.354e+03, threshold=9.293e+02, percent-clipped=3.0 +2023-02-05 20:24:25,430 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:26,767 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2179, 1.5786, 1.8903, 1.5102, 1.0029, 1.9735, 0.2560, 0.9874], + device='cuda:0'), covar=tensor([0.1036, 0.0689, 0.0653, 0.0701, 0.1056, 0.0464, 0.2268, 0.0946], + device='cuda:0'), in_proj_covar=tensor([0.0110, 0.0096, 0.0086, 0.0145, 0.0121, 0.0081, 0.0154, 0.0120], + device='cuda:0'), out_proj_covar=tensor([1.1134e-04, 1.0328e-04, 8.8480e-05, 1.4119e-04, 1.2655e-04, 8.2358e-05, + 1.5142e-04, 1.2675e-04], device='cuda:0') +2023-02-05 20:24:37,083 INFO [train.py:901] (0/4) Epoch 2, batch 4650, loss[loss=0.3435, simple_loss=0.3916, pruned_loss=0.1477, over 8128.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4087, pruned_loss=0.1688, over 1617402.42 frames. ], batch size: 22, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:24:42,614 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:52,923 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-05 20:25:08,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4572, 2.4756, 1.3374, 2.1336, 2.1265, 1.1442, 1.6881, 2.0888], + device='cuda:0'), covar=tensor([0.1558, 0.0480, 0.1738, 0.0892, 0.1182, 0.1732, 0.1518, 0.0838], + device='cuda:0'), in_proj_covar=tensor([0.0368, 0.0249, 0.0359, 0.0309, 0.0360, 0.0323, 0.0358, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:25:09,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:11,643 INFO [train.py:901] (0/4) Epoch 2, batch 4700, loss[loss=0.4061, simple_loss=0.4392, pruned_loss=0.1865, over 8308.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.4104, pruned_loss=0.1704, over 1617183.72 frames. ], batch size: 49, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:25:28,064 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12808.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:29,569 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12809.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:32,793 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.540e+02 4.122e+02 5.358e+02 6.927e+02 1.344e+03, threshold=1.072e+03, percent-clipped=8.0 +2023-02-05 20:25:47,179 INFO [train.py:901] (0/4) Epoch 2, batch 4750, loss[loss=0.4072, simple_loss=0.4357, pruned_loss=0.1894, over 8097.00 frames. ], tot_loss[loss=0.3739, simple_loss=0.4083, pruned_loss=0.1697, over 1614787.54 frames. ], batch size: 23, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:25:47,402 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:09,928 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2196, 4.0311, 2.4821, 3.1473, 3.5450, 2.3095, 2.5758, 3.1878], + device='cuda:0'), covar=tensor([0.1346, 0.0461, 0.1036, 0.0724, 0.0605, 0.1089, 0.1172, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0376, 0.0251, 0.0363, 0.0318, 0.0373, 0.0326, 0.0371, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 20:26:18,690 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 20:26:20,739 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 20:26:22,726 INFO [train.py:901] (0/4) Epoch 2, batch 4800, loss[loss=0.4267, simple_loss=0.444, pruned_loss=0.2047, over 8199.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4082, pruned_loss=0.169, over 1612854.36 frames. ], batch size: 23, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:26:36,103 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1286, 1.5002, 4.1889, 2.1281, 3.6102, 3.4336, 3.7083, 3.7147], + device='cuda:0'), covar=tensor([0.0305, 0.3374, 0.0237, 0.1480, 0.0839, 0.0417, 0.0354, 0.0386], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0387, 0.0238, 0.0275, 0.0332, 0.0264, 0.0250, 0.0268], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:26:43,398 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.314e+02 3.678e+02 4.471e+02 5.888e+02 1.234e+03, threshold=8.941e+02, percent-clipped=3.0 +2023-02-05 20:26:49,680 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12923.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:57,686 INFO [train.py:901] (0/4) Epoch 2, batch 4850, loss[loss=0.4242, simple_loss=0.4375, pruned_loss=0.2055, over 8358.00 frames. ], tot_loss[loss=0.3707, simple_loss=0.4066, pruned_loss=0.1674, over 1610731.86 frames. ], batch size: 24, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:12,744 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 20:27:13,738 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-05 20:27:25,497 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9109, 2.2977, 2.1154, 1.9106, 2.2873, 2.2236, 2.6985, 2.3688], + device='cuda:0'), covar=tensor([0.0600, 0.0986, 0.1338, 0.1283, 0.0697, 0.1160, 0.0688, 0.0575], + device='cuda:0'), in_proj_covar=tensor([0.0223, 0.0255, 0.0276, 0.0248, 0.0225, 0.0243, 0.0214, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:27:32,103 INFO [train.py:901] (0/4) Epoch 2, batch 4900, loss[loss=0.4049, simple_loss=0.4232, pruned_loss=0.1933, over 7690.00 frames. ], tot_loss[loss=0.3704, simple_loss=0.4061, pruned_loss=0.1674, over 1610313.74 frames. ], batch size: 18, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:53,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 4.170e+02 5.532e+02 7.452e+02 1.588e+03, threshold=1.106e+03, percent-clipped=9.0 +2023-02-05 20:28:06,715 INFO [train.py:901] (0/4) Epoch 2, batch 4950, loss[loss=0.373, simple_loss=0.3897, pruned_loss=0.1782, over 7274.00 frames. ], tot_loss[loss=0.3716, simple_loss=0.4066, pruned_loss=0.1683, over 1610575.29 frames. ], batch size: 16, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:28:41,854 INFO [train.py:901] (0/4) Epoch 2, batch 5000, loss[loss=0.2924, simple_loss=0.3592, pruned_loss=0.1128, over 8612.00 frames. ], tot_loss[loss=0.3715, simple_loss=0.4067, pruned_loss=0.1682, over 1609350.38 frames. ], batch size: 39, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:29:02,462 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.193e+02 4.113e+02 5.050e+02 6.511e+02 1.788e+03, threshold=1.010e+03, percent-clipped=5.0 +2023-02-05 20:29:05,924 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7914, 1.8728, 3.2703, 1.1002, 2.3638, 1.8729, 1.6126, 2.0113], + device='cuda:0'), covar=tensor([0.0780, 0.1166, 0.0307, 0.1689, 0.0897, 0.1319, 0.0800, 0.1215], + device='cuda:0'), in_proj_covar=tensor([0.0385, 0.0368, 0.0407, 0.0443, 0.0491, 0.0432, 0.0383, 0.0491], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 20:29:09,710 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=13125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:15,847 INFO [train.py:901] (0/4) Epoch 2, batch 5050, loss[loss=0.3176, simple_loss=0.3674, pruned_loss=0.1339, over 8468.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4061, pruned_loss=0.1675, over 1611000.11 frames. ], batch size: 49, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:29:47,469 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13179.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:47,963 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 20:29:50,615 INFO [train.py:901] (0/4) Epoch 2, batch 5100, loss[loss=0.3599, simple_loss=0.3968, pruned_loss=0.1615, over 8185.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4056, pruned_loss=0.1667, over 1610247.05 frames. ], batch size: 23, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:30:04,619 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13204.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:08,423 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8305, 2.6706, 1.9738, 3.0443, 1.4531, 1.2380, 1.7472, 2.4174], + device='cuda:0'), covar=tensor([0.1281, 0.0998, 0.1579, 0.0401, 0.1750, 0.2094, 0.1947, 0.0919], + device='cuda:0'), in_proj_covar=tensor([0.0314, 0.0337, 0.0327, 0.0231, 0.0320, 0.0334, 0.0373, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0005, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:30:11,532 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.930e+02 4.883e+02 5.892e+02 1.355e+03, threshold=9.766e+02, percent-clipped=3.0 +2023-02-05 20:30:24,591 INFO [train.py:901] (0/4) Epoch 2, batch 5150, loss[loss=0.3651, simple_loss=0.4141, pruned_loss=0.158, over 8199.00 frames. ], tot_loss[loss=0.3701, simple_loss=0.4063, pruned_loss=0.167, over 1608583.38 frames. ], batch size: 23, lr: 2.91e-02, grad_scale: 4.0 +2023-02-05 20:30:28,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:58,999 INFO [train.py:901] (0/4) Epoch 2, batch 5200, loss[loss=0.3956, simple_loss=0.4315, pruned_loss=0.1798, over 8500.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.4057, pruned_loss=0.1665, over 1607894.94 frames. ], batch size: 29, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:02,045 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.64 vs. limit=5.0 +2023-02-05 20:31:03,170 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0407, 1.2510, 4.2910, 1.7413, 3.7251, 3.6479, 3.6564, 3.7757], + device='cuda:0'), covar=tensor([0.0402, 0.3022, 0.0262, 0.1464, 0.0844, 0.0379, 0.0340, 0.0381], + device='cuda:0'), in_proj_covar=tensor([0.0192, 0.0380, 0.0241, 0.0272, 0.0332, 0.0260, 0.0242, 0.0264], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:31:05,932 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2994, 2.4133, 1.5157, 2.0807, 1.8979, 1.3053, 1.4889, 2.2304], + device='cuda:0'), covar=tensor([0.1233, 0.0410, 0.1015, 0.0636, 0.0798, 0.1116, 0.1212, 0.0658], + device='cuda:0'), in_proj_covar=tensor([0.0379, 0.0249, 0.0367, 0.0317, 0.0366, 0.0336, 0.0375, 0.0342], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 20:31:20,904 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 4.339e+02 5.206e+02 6.705e+02 1.063e+03, threshold=1.041e+03, percent-clipped=3.0 +2023-02-05 20:31:26,493 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2695, 1.4839, 1.5822, 1.2620, 1.0390, 1.5130, 0.1484, 0.5467], + device='cuda:0'), covar=tensor([0.0976, 0.0665, 0.0451, 0.0649, 0.1222, 0.0473, 0.1915, 0.1342], + device='cuda:0'), in_proj_covar=tensor([0.0112, 0.0094, 0.0079, 0.0135, 0.0128, 0.0085, 0.0149, 0.0121], + device='cuda:0'), out_proj_covar=tensor([1.1541e-04, 1.0473e-04, 8.3619e-05, 1.3648e-04, 1.3472e-04, 8.8883e-05, + 1.5138e-04, 1.2869e-04], device='cuda:0') +2023-02-05 20:31:33,608 INFO [train.py:901] (0/4) Epoch 2, batch 5250, loss[loss=0.3381, simple_loss=0.3961, pruned_loss=0.1401, over 8027.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4056, pruned_loss=0.1667, over 1605268.55 frames. ], batch size: 22, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:42,975 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 20:32:07,576 INFO [train.py:901] (0/4) Epoch 2, batch 5300, loss[loss=0.3385, simple_loss=0.3808, pruned_loss=0.1482, over 8241.00 frames. ], tot_loss[loss=0.3681, simple_loss=0.4047, pruned_loss=0.1658, over 1610285.32 frames. ], batch size: 22, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:29,093 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.093e+02 3.821e+02 4.884e+02 6.417e+02 1.823e+03, threshold=9.767e+02, percent-clipped=6.0 +2023-02-05 20:32:42,514 INFO [train.py:901] (0/4) Epoch 2, batch 5350, loss[loss=0.332, simple_loss=0.3764, pruned_loss=0.1438, over 7522.00 frames. ], tot_loss[loss=0.3711, simple_loss=0.4068, pruned_loss=0.1677, over 1613575.61 frames. ], batch size: 18, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:33:16,587 INFO [train.py:901] (0/4) Epoch 2, batch 5400, loss[loss=0.2785, simple_loss=0.3217, pruned_loss=0.1176, over 7418.00 frames. ], tot_loss[loss=0.3704, simple_loss=0.406, pruned_loss=0.1674, over 1615662.08 frames. ], batch size: 17, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:33:24,787 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6220, 3.7055, 3.2094, 1.6233, 3.0731, 3.0956, 3.3051, 2.7875], + device='cuda:0'), covar=tensor([0.1184, 0.0609, 0.1012, 0.4253, 0.0717, 0.0738, 0.1124, 0.0663], + device='cuda:0'), in_proj_covar=tensor([0.0314, 0.0214, 0.0255, 0.0337, 0.0231, 0.0177, 0.0237, 0.0159], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:33:24,907 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:38,016 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.355e+02 3.820e+02 4.559e+02 5.766e+02 1.205e+03, threshold=9.119e+02, percent-clipped=6.0 +2023-02-05 20:33:41,737 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4448, 1.7906, 3.5030, 0.8976, 2.1670, 1.5632, 1.3611, 1.6985], + device='cuda:0'), covar=tensor([0.1290, 0.1499, 0.0379, 0.2224, 0.1311, 0.2026, 0.1280, 0.1804], + device='cuda:0'), in_proj_covar=tensor([0.0399, 0.0380, 0.0412, 0.0446, 0.0506, 0.0441, 0.0396, 0.0502], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 20:33:43,022 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:51,323 INFO [train.py:901] (0/4) Epoch 2, batch 5450, loss[loss=0.4097, simple_loss=0.4338, pruned_loss=0.1928, over 8656.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4064, pruned_loss=0.1674, over 1613040.40 frames. ], batch size: 34, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:34:25,970 INFO [train.py:901] (0/4) Epoch 2, batch 5500, loss[loss=0.4009, simple_loss=0.4423, pruned_loss=0.1797, over 8247.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4062, pruned_loss=0.1675, over 1611775.44 frames. ], batch size: 24, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:34:28,057 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 20:34:40,937 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2759, 1.5602, 1.9620, 1.2553, 0.9590, 1.7532, 0.3123, 0.9729], + device='cuda:0'), covar=tensor([0.1326, 0.0807, 0.0515, 0.1048, 0.1579, 0.0638, 0.2625, 0.1386], + device='cuda:0'), in_proj_covar=tensor([0.0107, 0.0092, 0.0081, 0.0133, 0.0132, 0.0081, 0.0147, 0.0117], + device='cuda:0'), out_proj_covar=tensor([1.1187e-04, 1.0181e-04, 8.5565e-05, 1.3545e-04, 1.3969e-04, 8.5669e-05, + 1.5146e-04, 1.2653e-04], device='cuda:0') +2023-02-05 20:34:46,540 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.726e+02 4.817e+02 6.308e+02 1.682e+03, threshold=9.635e+02, percent-clipped=6.0 +2023-02-05 20:34:59,984 INFO [train.py:901] (0/4) Epoch 2, batch 5550, loss[loss=0.3676, simple_loss=0.3922, pruned_loss=0.1715, over 7922.00 frames. ], tot_loss[loss=0.3685, simple_loss=0.4049, pruned_loss=0.166, over 1612453.12 frames. ], batch size: 20, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:35:01,428 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2448, 1.4280, 1.7115, 1.3099, 1.0604, 1.7149, 0.4985, 0.9613], + device='cuda:0'), covar=tensor([0.1094, 0.1423, 0.0421, 0.0952, 0.1208, 0.0436, 0.2888, 0.1589], + device='cuda:0'), in_proj_covar=tensor([0.0113, 0.0095, 0.0083, 0.0139, 0.0136, 0.0084, 0.0154, 0.0124], + device='cuda:0'), out_proj_covar=tensor([1.1757e-04, 1.0561e-04, 8.8889e-05, 1.4163e-04, 1.4379e-04, 8.9130e-05, + 1.5758e-04, 1.3318e-04], device='cuda:0') +2023-02-05 20:35:35,316 INFO [train.py:901] (0/4) Epoch 2, batch 5600, loss[loss=0.3874, simple_loss=0.4305, pruned_loss=0.1722, over 8117.00 frames. ], tot_loss[loss=0.3686, simple_loss=0.4055, pruned_loss=0.1659, over 1612455.31 frames. ], batch size: 23, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:35:55,773 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 3.833e+02 4.619e+02 6.071e+02 1.383e+03, threshold=9.238e+02, percent-clipped=5.0 +2023-02-05 20:35:58,636 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5427, 1.8753, 1.4875, 1.3588, 1.8986, 1.7175, 1.8182, 2.0753], + device='cuda:0'), covar=tensor([0.1089, 0.1492, 0.2168, 0.1986, 0.0913, 0.1613, 0.1079, 0.0720], + device='cuda:0'), in_proj_covar=tensor([0.0222, 0.0253, 0.0279, 0.0249, 0.0225, 0.0242, 0.0215, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:36:08,566 INFO [train.py:901] (0/4) Epoch 2, batch 5650, loss[loss=0.4257, simple_loss=0.4557, pruned_loss=0.1978, over 8497.00 frames. ], tot_loss[loss=0.3675, simple_loss=0.4049, pruned_loss=0.165, over 1615543.72 frames. ], batch size: 26, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:36:23,365 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=13755.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:36:34,187 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 20:36:43,564 INFO [train.py:901] (0/4) Epoch 2, batch 5700, loss[loss=0.3413, simple_loss=0.4017, pruned_loss=0.1404, over 8571.00 frames. ], tot_loss[loss=0.3679, simple_loss=0.4057, pruned_loss=0.1651, over 1617233.49 frames. ], batch size: 34, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:01,016 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9227, 2.5146, 2.1373, 2.9762, 1.5649, 1.3056, 2.0211, 2.1799], + device='cuda:0'), covar=tensor([0.1257, 0.1266, 0.1349, 0.0429, 0.1771, 0.2256, 0.1694, 0.1368], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0328, 0.0318, 0.0231, 0.0308, 0.0327, 0.0367, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0005, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:37:05,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.337e+02 4.261e+02 5.123e+02 6.631e+02 2.352e+03, threshold=1.025e+03, percent-clipped=5.0 +2023-02-05 20:37:18,916 INFO [train.py:901] (0/4) Epoch 2, batch 5750, loss[loss=0.3244, simple_loss=0.3823, pruned_loss=0.1332, over 8241.00 frames. ], tot_loss[loss=0.3693, simple_loss=0.4061, pruned_loss=0.1663, over 1615399.99 frames. ], batch size: 22, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:38,963 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 20:37:54,468 INFO [train.py:901] (0/4) Epoch 2, batch 5800, loss[loss=0.3548, simple_loss=0.3964, pruned_loss=0.1566, over 8137.00 frames. ], tot_loss[loss=0.3667, simple_loss=0.404, pruned_loss=0.1647, over 1613711.81 frames. ], batch size: 22, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:02,404 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-05 20:38:15,740 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.601e+02 3.784e+02 4.729e+02 6.225e+02 2.390e+03, threshold=9.458e+02, percent-clipped=5.0 +2023-02-05 20:38:29,064 INFO [train.py:901] (0/4) Epoch 2, batch 5850, loss[loss=0.3933, simple_loss=0.4201, pruned_loss=0.1832, over 7923.00 frames. ], tot_loss[loss=0.3671, simple_loss=0.4043, pruned_loss=0.165, over 1616278.07 frames. ], batch size: 20, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:39,342 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6685, 2.3928, 1.6818, 2.2351, 2.0453, 1.3622, 1.7360, 2.3449], + device='cuda:0'), covar=tensor([0.1113, 0.0383, 0.0974, 0.0666, 0.0783, 0.1277, 0.0979, 0.0615], + device='cuda:0'), in_proj_covar=tensor([0.0375, 0.0245, 0.0359, 0.0318, 0.0362, 0.0333, 0.0356, 0.0331], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:39:03,917 INFO [train.py:901] (0/4) Epoch 2, batch 5900, loss[loss=0.4424, simple_loss=0.4408, pruned_loss=0.222, over 7293.00 frames. ], tot_loss[loss=0.367, simple_loss=0.404, pruned_loss=0.165, over 1609435.04 frames. ], batch size: 72, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:39:15,923 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-14000.pt +2023-02-05 20:39:27,076 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.452e+02 3.946e+02 4.724e+02 6.297e+02 1.551e+03, threshold=9.448e+02, percent-clipped=7.0 +2023-02-05 20:39:40,162 INFO [train.py:901] (0/4) Epoch 2, batch 5950, loss[loss=0.4412, simple_loss=0.4528, pruned_loss=0.2148, over 7492.00 frames. ], tot_loss[loss=0.3669, simple_loss=0.4041, pruned_loss=0.1648, over 1610885.59 frames. ], batch size: 71, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,633 INFO [train.py:901] (0/4) Epoch 2, batch 6000, loss[loss=0.2961, simple_loss=0.338, pruned_loss=0.1271, over 7436.00 frames. ], tot_loss[loss=0.364, simple_loss=0.4011, pruned_loss=0.1634, over 1607719.44 frames. ], batch size: 17, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,634 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 20:40:27,827 INFO [train.py:935] (0/4) Epoch 2, validation: loss=0.2758, simple_loss=0.3606, pruned_loss=0.0955, over 944034.00 frames. +2023-02-05 20:40:27,828 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6484MB +2023-02-05 20:40:32,051 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:38,735 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14099.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:49,504 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 3.733e+02 4.780e+02 6.772e+02 2.203e+03, threshold=9.561e+02, percent-clipped=10.0 +2023-02-05 20:40:53,745 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:02,696 INFO [train.py:901] (0/4) Epoch 2, batch 6050, loss[loss=0.3269, simple_loss=0.3628, pruned_loss=0.1455, over 7569.00 frames. ], tot_loss[loss=0.3635, simple_loss=0.4006, pruned_loss=0.1632, over 1605917.21 frames. ], batch size: 18, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:05,424 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14138.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:41:25,413 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4669, 2.6702, 1.5314, 2.0932, 2.0091, 1.2944, 1.9910, 2.0655], + device='cuda:0'), covar=tensor([0.1217, 0.0335, 0.1115, 0.0705, 0.0867, 0.1146, 0.0927, 0.0772], + device='cuda:0'), in_proj_covar=tensor([0.0370, 0.0250, 0.0360, 0.0320, 0.0358, 0.0327, 0.0349, 0.0335], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:41:29,380 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6110, 3.0805, 2.5522, 3.8594, 1.9601, 1.4959, 2.1454, 2.7582], + device='cuda:0'), covar=tensor([0.1050, 0.1341, 0.1417, 0.0293, 0.1768, 0.2287, 0.2211, 0.1123], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0339, 0.0324, 0.0234, 0.0313, 0.0335, 0.0369, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0004, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:41:33,311 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4620, 2.1220, 3.3062, 0.9159, 2.1461, 1.5169, 1.4961, 1.8047], + device='cuda:0'), covar=tensor([0.1152, 0.1257, 0.0459, 0.2241, 0.1242, 0.1983, 0.1099, 0.1769], + device='cuda:0'), in_proj_covar=tensor([0.0390, 0.0373, 0.0418, 0.0442, 0.0493, 0.0437, 0.0394, 0.0485], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 20:41:37,173 INFO [train.py:901] (0/4) Epoch 2, batch 6100, loss[loss=0.3469, simple_loss=0.3946, pruned_loss=0.1496, over 8470.00 frames. ], tot_loss[loss=0.3644, simple_loss=0.401, pruned_loss=0.1639, over 1608330.75 frames. ], batch size: 25, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:40,655 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.1124, 5.2580, 4.4812, 2.0616, 4.3949, 4.5809, 4.8540, 4.1174], + device='cuda:0'), covar=tensor([0.0525, 0.0233, 0.0676, 0.3387, 0.0340, 0.0430, 0.0526, 0.0459], + device='cuda:0'), in_proj_covar=tensor([0.0316, 0.0213, 0.0254, 0.0339, 0.0225, 0.0183, 0.0233, 0.0160], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:41:51,630 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9317, 3.8509, 2.1156, 2.0625, 2.5413, 1.7722, 2.2088, 2.4726], + device='cuda:0'), covar=tensor([0.1491, 0.0222, 0.0978, 0.1179, 0.0873, 0.1013, 0.1330, 0.0953], + device='cuda:0'), in_proj_covar=tensor([0.0372, 0.0249, 0.0358, 0.0322, 0.0356, 0.0327, 0.0355, 0.0333], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:41:58,388 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14214.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:58,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 3.920e+02 4.920e+02 6.492e+02 2.677e+03, threshold=9.840e+02, percent-clipped=6.0 +2023-02-05 20:42:07,558 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3212, 1.5617, 2.0475, 1.3156, 0.9524, 1.7995, 0.2355, 0.8641], + device='cuda:0'), covar=tensor([0.2206, 0.1099, 0.0493, 0.1090, 0.1612, 0.0516, 0.3100, 0.1310], + device='cuda:0'), in_proj_covar=tensor([0.0113, 0.0099, 0.0085, 0.0143, 0.0143, 0.0086, 0.0161, 0.0123], + device='cuda:0'), out_proj_covar=tensor([1.2028e-04, 1.1350e-04, 9.1997e-05, 1.4874e-04, 1.5364e-04, 9.4816e-05, + 1.6764e-04, 1.3480e-04], device='cuda:0') +2023-02-05 20:42:08,080 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 20:42:11,469 INFO [train.py:901] (0/4) Epoch 2, batch 6150, loss[loss=0.3848, simple_loss=0.4236, pruned_loss=0.1731, over 8425.00 frames. ], tot_loss[loss=0.3625, simple_loss=0.4004, pruned_loss=0.1623, over 1611098.71 frames. ], batch size: 29, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:46,424 INFO [train.py:901] (0/4) Epoch 2, batch 6200, loss[loss=0.3236, simple_loss=0.3722, pruned_loss=0.1375, over 8036.00 frames. ], tot_loss[loss=0.3633, simple_loss=0.4006, pruned_loss=0.163, over 1609464.69 frames. ], batch size: 22, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:56,106 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5391, 1.2937, 1.3233, 1.2259, 1.4569, 1.4067, 1.2519, 1.2952], + device='cuda:0'), covar=tensor([0.0924, 0.1475, 0.2043, 0.1696, 0.0741, 0.1574, 0.1005, 0.0765], + device='cuda:0'), in_proj_covar=tensor([0.0218, 0.0246, 0.0276, 0.0243, 0.0217, 0.0241, 0.0210, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:43:08,134 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 3.453e+02 4.846e+02 6.394e+02 2.249e+03, threshold=9.691e+02, percent-clipped=6.0 +2023-02-05 20:43:15,404 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.90 vs. limit=2.0 +2023-02-05 20:43:21,530 INFO [train.py:901] (0/4) Epoch 2, batch 6250, loss[loss=0.4214, simple_loss=0.4499, pruned_loss=0.1964, over 8665.00 frames. ], tot_loss[loss=0.3634, simple_loss=0.4006, pruned_loss=0.1631, over 1605998.93 frames. ], batch size: 34, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:43:55,855 INFO [train.py:901] (0/4) Epoch 2, batch 6300, loss[loss=0.3613, simple_loss=0.4044, pruned_loss=0.1591, over 8245.00 frames. ], tot_loss[loss=0.3647, simple_loss=0.4016, pruned_loss=0.1639, over 1611159.72 frames. ], batch size: 24, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:17,496 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.643e+02 3.823e+02 4.655e+02 5.877e+02 1.568e+03, threshold=9.309e+02, percent-clipped=4.0 +2023-02-05 20:44:28,377 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14431.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:30,244 INFO [train.py:901] (0/4) Epoch 2, batch 6350, loss[loss=0.3815, simple_loss=0.4183, pruned_loss=0.1723, over 8246.00 frames. ], tot_loss[loss=0.3656, simple_loss=0.4022, pruned_loss=0.1645, over 1610297.35 frames. ], batch size: 24, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:30,312 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14434.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:39,683 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5613, 1.8445, 3.2184, 1.0084, 2.3168, 1.6090, 1.5280, 1.9820], + device='cuda:0'), covar=tensor([0.0892, 0.1140, 0.0268, 0.1804, 0.0897, 0.1546, 0.0847, 0.1239], + device='cuda:0'), in_proj_covar=tensor([0.0407, 0.0382, 0.0432, 0.0456, 0.0510, 0.0450, 0.0402, 0.0507], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 20:44:51,460 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14465.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:54,880 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:03,089 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14482.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:45:04,274 INFO [train.py:901] (0/4) Epoch 2, batch 6400, loss[loss=0.3266, simple_loss=0.3781, pruned_loss=0.1376, over 8506.00 frames. ], tot_loss[loss=0.3642, simple_loss=0.4011, pruned_loss=0.1636, over 1612929.84 frames. ], batch size: 28, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:08,457 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7873, 3.4258, 2.4943, 4.0503, 1.7635, 2.0100, 2.3725, 3.4763], + device='cuda:0'), covar=tensor([0.0879, 0.1001, 0.1186, 0.0222, 0.1768, 0.1829, 0.2034, 0.0752], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0336, 0.0324, 0.0234, 0.0316, 0.0333, 0.0368, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:0') +2023-02-05 20:45:12,409 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14495.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:19,137 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14505.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:25,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.974e+02 5.065e+02 7.362e+02 1.328e+03, threshold=1.013e+03, percent-clipped=8.0 +2023-02-05 20:45:38,722 INFO [train.py:901] (0/4) Epoch 2, batch 6450, loss[loss=0.4155, simple_loss=0.4376, pruned_loss=0.1967, over 8134.00 frames. ], tot_loss[loss=0.3652, simple_loss=0.4019, pruned_loss=0.1642, over 1612635.77 frames. ], batch size: 22, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:48,966 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:10,585 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14580.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:13,173 INFO [train.py:901] (0/4) Epoch 2, batch 6500, loss[loss=0.306, simple_loss=0.3747, pruned_loss=0.1187, over 8367.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4007, pruned_loss=0.1628, over 1608384.69 frames. ], batch size: 24, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:46:22,643 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14597.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:46:35,358 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.999e+02 5.009e+02 6.288e+02 1.522e+03, threshold=1.002e+03, percent-clipped=8.0 +2023-02-05 20:46:48,444 INFO [train.py:901] (0/4) Epoch 2, batch 6550, loss[loss=0.3785, simple_loss=0.42, pruned_loss=0.1685, over 8635.00 frames. ], tot_loss[loss=0.367, simple_loss=0.4039, pruned_loss=0.1651, over 1612768.45 frames. ], batch size: 34, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:47:16,641 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 20:47:23,564 INFO [train.py:901] (0/4) Epoch 2, batch 6600, loss[loss=0.3322, simple_loss=0.3905, pruned_loss=0.1369, over 8141.00 frames. ], tot_loss[loss=0.3624, simple_loss=0.4003, pruned_loss=0.1622, over 1611987.65 frames. ], batch size: 22, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:47:32,650 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3222, 2.0195, 1.9410, 0.4932, 1.9503, 1.3927, 0.3328, 1.7422], + device='cuda:0'), covar=tensor([0.0150, 0.0072, 0.0081, 0.0179, 0.0104, 0.0252, 0.0267, 0.0089], + device='cuda:0'), in_proj_covar=tensor([0.0194, 0.0137, 0.0124, 0.0186, 0.0135, 0.0250, 0.0199, 0.0179], + device='cuda:0'), out_proj_covar=tensor([1.1092e-04, 7.7753e-05, 7.3749e-05, 1.0455e-04, 8.1603e-05, 1.5364e-04, + 1.1549e-04, 1.0381e-04], device='cuda:0') +2023-02-05 20:47:36,575 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:47:45,898 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.289e+02 3.681e+02 4.457e+02 5.556e+02 1.208e+03, threshold=8.913e+02, percent-clipped=4.0 +2023-02-05 20:47:58,960 INFO [train.py:901] (0/4) Epoch 2, batch 6650, loss[loss=0.3344, simple_loss=0.39, pruned_loss=0.1394, over 8353.00 frames. ], tot_loss[loss=0.3626, simple_loss=0.4008, pruned_loss=0.1622, over 1616256.76 frames. ], batch size: 24, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:16,415 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14758.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:25,755 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 20:48:28,656 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14775.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:34,798 INFO [train.py:901] (0/4) Epoch 2, batch 6700, loss[loss=0.3493, simple_loss=0.4021, pruned_loss=0.1482, over 8528.00 frames. ], tot_loss[loss=0.3595, simple_loss=0.3981, pruned_loss=0.1604, over 1610648.67 frames. ], batch size: 28, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:50,211 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:56,685 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.056e+02 3.873e+02 4.634e+02 6.203e+02 1.536e+03, threshold=9.268e+02, percent-clipped=6.0 +2023-02-05 20:49:07,160 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14830.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:10,368 INFO [train.py:901] (0/4) Epoch 2, batch 6750, loss[loss=0.3697, simple_loss=0.402, pruned_loss=0.1687, over 8032.00 frames. ], tot_loss[loss=0.3608, simple_loss=0.3995, pruned_loss=0.161, over 1616820.84 frames. ], batch size: 22, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:11,940 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14836.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:21,258 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14849.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:24,100 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14853.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:49:29,662 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14861.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:41,505 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14878.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:49:45,993 INFO [train.py:901] (0/4) Epoch 2, batch 6800, loss[loss=0.388, simple_loss=0.411, pruned_loss=0.1825, over 8624.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4015, pruned_loss=0.1623, over 1617587.88 frames. ], batch size: 31, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:50,353 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14890.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:51,039 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5827, 3.8847, 2.3483, 2.6313, 2.8837, 1.9339, 2.1622, 2.7417], + device='cuda:0'), covar=tensor([0.1458, 0.0461, 0.0953, 0.0834, 0.0798, 0.1182, 0.1424, 0.1120], + device='cuda:0'), in_proj_covar=tensor([0.0382, 0.0247, 0.0366, 0.0317, 0.0354, 0.0338, 0.0359, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 20:49:54,312 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 20:49:54,456 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9033, 0.9213, 4.0311, 1.5576, 3.4192, 3.3147, 3.5077, 3.5878], + device='cuda:0'), covar=tensor([0.0361, 0.3442, 0.0334, 0.1858, 0.0968, 0.0450, 0.0404, 0.0422], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0381, 0.0245, 0.0280, 0.0338, 0.0267, 0.0260, 0.0275], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:50:07,691 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.663e+02 4.715e+02 6.092e+02 1.805e+03, threshold=9.431e+02, percent-clipped=7.0 +2023-02-05 20:50:21,330 INFO [train.py:901] (0/4) Epoch 2, batch 6850, loss[loss=0.312, simple_loss=0.3833, pruned_loss=0.1204, over 8474.00 frames. ], tot_loss[loss=0.3636, simple_loss=0.402, pruned_loss=0.1626, over 1613195.98 frames. ], batch size: 25, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:50:42,744 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14964.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:50:45,353 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 20:50:57,108 INFO [train.py:901] (0/4) Epoch 2, batch 6900, loss[loss=0.3758, simple_loss=0.4174, pruned_loss=0.1671, over 8466.00 frames. ], tot_loss[loss=0.3638, simple_loss=0.4024, pruned_loss=0.1626, over 1615612.76 frames. ], batch size: 27, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:51:19,286 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.011e+02 4.191e+02 5.097e+02 7.005e+02 1.700e+03, threshold=1.019e+03, percent-clipped=5.0 +2023-02-05 20:51:20,411 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 20:51:30,020 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4509, 1.7702, 2.0734, 1.6215, 1.0669, 1.8655, 0.4272, 1.2116], + device='cuda:0'), covar=tensor([0.1515, 0.1303, 0.0687, 0.1253, 0.2607, 0.0777, 0.3898, 0.1541], + device='cuda:0'), in_proj_covar=tensor([0.0107, 0.0093, 0.0079, 0.0139, 0.0140, 0.0082, 0.0156, 0.0112], + device='cuda:0'), out_proj_covar=tensor([1.1922e-04, 1.1052e-04, 8.8005e-05, 1.4862e-04, 1.5254e-04, 9.3523e-05, + 1.6629e-04, 1.2750e-04], device='cuda:0') +2023-02-05 20:51:32,598 INFO [train.py:901] (0/4) Epoch 2, batch 6950, loss[loss=0.3712, simple_loss=0.4227, pruned_loss=0.1598, over 8591.00 frames. ], tot_loss[loss=0.3615, simple_loss=0.4004, pruned_loss=0.1614, over 1612843.20 frames. ], batch size: 34, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:51:46,956 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5422, 1.6314, 5.4633, 2.1864, 4.8075, 4.5030, 4.8514, 4.7878], + device='cuda:0'), covar=tensor([0.0290, 0.2972, 0.0168, 0.1485, 0.0824, 0.0318, 0.0358, 0.0382], + device='cuda:0'), in_proj_covar=tensor([0.0208, 0.0382, 0.0244, 0.0282, 0.0341, 0.0277, 0.0261, 0.0279], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 20:51:56,478 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 20:52:05,177 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0691, 1.7345, 1.7469, 1.1704, 1.0007, 1.5230, 0.1739, 0.8051], + device='cuda:0'), covar=tensor([0.1985, 0.0935, 0.0512, 0.1106, 0.2180, 0.0536, 0.3502, 0.1464], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0092, 0.0078, 0.0136, 0.0136, 0.0080, 0.0152, 0.0111], + device='cuda:0'), out_proj_covar=tensor([1.1993e-04, 1.0899e-04, 8.6352e-05, 1.4546e-04, 1.4939e-04, 9.1325e-05, + 1.6171e-04, 1.2568e-04], device='cuda:0') +2023-02-05 20:52:08,415 INFO [train.py:901] (0/4) Epoch 2, batch 7000, loss[loss=0.3752, simple_loss=0.4115, pruned_loss=0.1695, over 8083.00 frames. ], tot_loss[loss=0.3597, simple_loss=0.3994, pruned_loss=0.1601, over 1612175.97 frames. ], batch size: 21, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:52:21,512 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15102.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:30,569 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.233e+02 3.928e+02 4.810e+02 5.818e+02 1.410e+03, threshold=9.621e+02, percent-clipped=1.0 +2023-02-05 20:52:44,346 INFO [train.py:901] (0/4) Epoch 2, batch 7050, loss[loss=0.3451, simple_loss=0.3924, pruned_loss=0.1489, over 8457.00 frames. ], tot_loss[loss=0.3613, simple_loss=0.4006, pruned_loss=0.161, over 1618706.84 frames. ], batch size: 25, lr: 2.75e-02, grad_scale: 16.0 +2023-02-05 20:52:52,913 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:10,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15171.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:18,777 INFO [train.py:901] (0/4) Epoch 2, batch 7100, loss[loss=0.4176, simple_loss=0.4181, pruned_loss=0.2086, over 7112.00 frames. ], tot_loss[loss=0.3598, simple_loss=0.3992, pruned_loss=0.1602, over 1614933.91 frames. ], batch size: 72, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:53:39,773 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15213.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:41,002 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.718e+02 4.413e+02 5.855e+02 1.165e+03, threshold=8.826e+02, percent-clipped=3.0 +2023-02-05 20:53:42,524 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15217.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:44,504 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:52,746 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.17 vs. limit=5.0 +2023-02-05 20:53:53,627 INFO [train.py:901] (0/4) Epoch 2, batch 7150, loss[loss=0.3743, simple_loss=0.4102, pruned_loss=0.1692, over 8372.00 frames. ], tot_loss[loss=0.36, simple_loss=0.3993, pruned_loss=0.1604, over 1613879.62 frames. ], batch size: 49, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:54:02,063 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:54:19,457 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 20:54:29,193 INFO [train.py:901] (0/4) Epoch 2, batch 7200, loss[loss=0.3727, simple_loss=0.4085, pruned_loss=0.1685, over 8314.00 frames. ], tot_loss[loss=0.3583, simple_loss=0.3986, pruned_loss=0.159, over 1616505.41 frames. ], batch size: 25, lr: 2.73e-02, grad_scale: 16.0 +2023-02-05 20:54:30,791 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5016, 1.7930, 1.9726, 0.3640, 2.0419, 1.3590, 0.4534, 1.7980], + device='cuda:0'), covar=tensor([0.0107, 0.0057, 0.0058, 0.0164, 0.0083, 0.0197, 0.0188, 0.0062], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0132, 0.0120, 0.0182, 0.0135, 0.0246, 0.0198, 0.0171], + device='cuda:0'), out_proj_covar=tensor([1.1097e-04, 7.3825e-05, 6.9222e-05, 9.9501e-05, 7.9556e-05, 1.4948e-04, + 1.1297e-04, 9.7030e-05], device='cuda:0') +2023-02-05 20:54:31,471 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4264, 1.8582, 1.4614, 1.9039, 1.7247, 1.1248, 1.4738, 1.9578], + device='cuda:0'), covar=tensor([0.1219, 0.0577, 0.1198, 0.0742, 0.0879, 0.1352, 0.1134, 0.0693], + device='cuda:0'), in_proj_covar=tensor([0.0378, 0.0248, 0.0354, 0.0326, 0.0351, 0.0329, 0.0355, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:0') +2023-02-05 20:54:51,171 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.528e+02 3.704e+02 4.905e+02 6.625e+02 1.855e+03, threshold=9.809e+02, percent-clipped=12.0 +2023-02-05 20:55:02,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8382, 1.3562, 3.2603, 1.2586, 2.1162, 3.6586, 3.4061, 3.1068], + device='cuda:0'), covar=tensor([0.1202, 0.1776, 0.0362, 0.1950, 0.0817, 0.0320, 0.0394, 0.0591], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0261, 0.0190, 0.0254, 0.0195, 0.0164, 0.0154, 0.0236], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:55:04,904 INFO [train.py:901] (0/4) Epoch 2, batch 7250, loss[loss=0.3679, simple_loss=0.4167, pruned_loss=0.1595, over 8107.00 frames. ], tot_loss[loss=0.3577, simple_loss=0.3984, pruned_loss=0.1585, over 1612649.29 frames. ], batch size: 23, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:55:39,921 INFO [train.py:901] (0/4) Epoch 2, batch 7300, loss[loss=0.3749, simple_loss=0.4286, pruned_loss=0.1606, over 8531.00 frames. ], tot_loss[loss=0.3564, simple_loss=0.3971, pruned_loss=0.1578, over 1610605.17 frames. ], batch size: 28, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:56:02,319 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.194e+02 3.434e+02 4.292e+02 5.923e+02 1.449e+03, threshold=8.584e+02, percent-clipped=5.0 +2023-02-05 20:56:14,885 INFO [train.py:901] (0/4) Epoch 2, batch 7350, loss[loss=0.3683, simple_loss=0.409, pruned_loss=0.1638, over 8327.00 frames. ], tot_loss[loss=0.356, simple_loss=0.3969, pruned_loss=0.1576, over 1609209.28 frames. ], batch size: 25, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:42,772 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15473.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:56:43,901 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 20:56:49,847 INFO [train.py:901] (0/4) Epoch 2, batch 7400, loss[loss=0.3849, simple_loss=0.4107, pruned_loss=0.1796, over 8082.00 frames. ], tot_loss[loss=0.356, simple_loss=0.3964, pruned_loss=0.1577, over 1612713.48 frames. ], batch size: 21, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:59,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:01,981 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 20:57:11,815 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.940e+02 4.956e+02 6.362e+02 1.377e+03, threshold=9.912e+02, percent-clipped=7.0 +2023-02-05 20:57:24,681 INFO [train.py:901] (0/4) Epoch 2, batch 7450, loss[loss=0.4035, simple_loss=0.4347, pruned_loss=0.1862, over 8458.00 frames. ], tot_loss[loss=0.357, simple_loss=0.397, pruned_loss=0.1585, over 1612476.92 frames. ], batch size: 27, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:57:40,458 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15557.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:41,773 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 20:57:59,035 INFO [train.py:901] (0/4) Epoch 2, batch 7500, loss[loss=0.298, simple_loss=0.3564, pruned_loss=0.1198, over 7811.00 frames. ], tot_loss[loss=0.3585, simple_loss=0.3975, pruned_loss=0.1597, over 1609419.37 frames. ], batch size: 20, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:21,359 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.662e+02 4.519e+02 5.678e+02 1.466e+03, threshold=9.038e+02, percent-clipped=6.0 +2023-02-05 20:58:28,532 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 20:58:30,929 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4768, 1.5070, 1.4240, 1.3368, 2.0018, 1.6272, 1.7345, 1.7904], + device='cuda:0'), covar=tensor([0.0727, 0.1431, 0.2040, 0.1575, 0.0674, 0.1454, 0.0936, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0209, 0.0238, 0.0273, 0.0237, 0.0208, 0.0237, 0.0201, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 20:58:33,546 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0215, 1.4623, 1.3661, 1.2369, 1.6192, 1.4161, 1.3445, 1.5041], + device='cuda:0'), covar=tensor([0.0798, 0.1510, 0.2192, 0.1740, 0.0803, 0.1720, 0.1099, 0.0721], + device='cuda:0'), in_proj_covar=tensor([0.0209, 0.0238, 0.0273, 0.0237, 0.0208, 0.0237, 0.0201, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 20:58:34,053 INFO [train.py:901] (0/4) Epoch 2, batch 7550, loss[loss=0.3682, simple_loss=0.4054, pruned_loss=0.1655, over 8258.00 frames. ], tot_loss[loss=0.3585, simple_loss=0.3979, pruned_loss=0.1595, over 1613029.15 frames. ], batch size: 24, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:46,908 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6113, 2.0679, 3.4561, 3.0244, 2.7398, 1.9607, 1.5001, 1.7049], + device='cuda:0'), covar=tensor([0.0665, 0.0877, 0.0158, 0.0288, 0.0361, 0.0404, 0.0520, 0.0793], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0456, 0.0355, 0.0390, 0.0505, 0.0422, 0.0446, 0.0462], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 20:58:53,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.37 vs. limit=5.0 +2023-02-05 20:59:00,904 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15672.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:59:03,264 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 20:59:05,743 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-05 20:59:08,580 INFO [train.py:901] (0/4) Epoch 2, batch 7600, loss[loss=0.3588, simple_loss=0.4096, pruned_loss=0.154, over 8110.00 frames. ], tot_loss[loss=0.3587, simple_loss=0.398, pruned_loss=0.1597, over 1613868.71 frames. ], batch size: 23, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 20:59:31,059 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.634e+02 4.473e+02 6.191e+02 1.516e+03, threshold=8.946e+02, percent-clipped=5.0 +2023-02-05 20:59:43,082 INFO [train.py:901] (0/4) Epoch 2, batch 7650, loss[loss=0.383, simple_loss=0.4193, pruned_loss=0.1733, over 8140.00 frames. ], tot_loss[loss=0.359, simple_loss=0.3981, pruned_loss=0.16, over 1615595.19 frames. ], batch size: 22, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 21:00:19,420 INFO [train.py:901] (0/4) Epoch 2, batch 7700, loss[loss=0.3729, simple_loss=0.4096, pruned_loss=0.1682, over 7813.00 frames. ], tot_loss[loss=0.3563, simple_loss=0.3963, pruned_loss=0.1581, over 1614904.28 frames. ], batch size: 20, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:00:41,047 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 3.880e+02 4.902e+02 6.175e+02 1.322e+03, threshold=9.805e+02, percent-clipped=4.0 +2023-02-05 21:00:41,248 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0086, 1.2489, 2.2078, 0.9567, 1.8893, 2.4591, 2.2409, 2.0975], + device='cuda:0'), covar=tensor([0.1306, 0.1309, 0.0494, 0.1920, 0.0563, 0.0342, 0.0383, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0261, 0.0190, 0.0258, 0.0196, 0.0165, 0.0156, 0.0236], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:00:47,292 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6500, 1.4325, 2.9429, 1.1822, 2.0746, 3.2182, 3.0561, 2.6591], + device='cuda:0'), covar=tensor([0.1124, 0.1564, 0.0465, 0.2051, 0.0774, 0.0333, 0.0333, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0259, 0.0189, 0.0256, 0.0195, 0.0164, 0.0155, 0.0236], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:00:51,193 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 21:00:53,924 INFO [train.py:901] (0/4) Epoch 2, batch 7750, loss[loss=0.3544, simple_loss=0.4007, pruned_loss=0.1541, over 8480.00 frames. ], tot_loss[loss=0.3557, simple_loss=0.3963, pruned_loss=0.1576, over 1618998.12 frames. ], batch size: 49, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:28,171 INFO [train.py:901] (0/4) Epoch 2, batch 7800, loss[loss=0.3725, simple_loss=0.4137, pruned_loss=0.1657, over 8281.00 frames. ], tot_loss[loss=0.3546, simple_loss=0.3961, pruned_loss=0.1565, over 1619600.78 frames. ], batch size: 23, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:40,331 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:01:48,105 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.39 vs. limit=5.0 +2023-02-05 21:01:50,923 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.569e+02 4.742e+02 5.990e+02 9.896e+02, threshold=9.484e+02, percent-clipped=1.0 +2023-02-05 21:01:59,049 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:02,909 INFO [train.py:901] (0/4) Epoch 2, batch 7850, loss[loss=0.3284, simple_loss=0.3943, pruned_loss=0.1313, over 8237.00 frames. ], tot_loss[loss=0.3537, simple_loss=0.3953, pruned_loss=0.1561, over 1620196.86 frames. ], batch size: 24, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:15,666 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15953.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:36,219 INFO [train.py:901] (0/4) Epoch 2, batch 7900, loss[loss=0.426, simple_loss=0.4572, pruned_loss=0.1974, over 8522.00 frames. ], tot_loss[loss=0.3531, simple_loss=0.395, pruned_loss=0.1556, over 1619086.00 frames. ], batch size: 28, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:46,886 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-16000.pt +2023-02-05 21:02:58,246 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.808e+02 4.602e+02 5.936e+02 1.299e+03, threshold=9.205e+02, percent-clipped=9.0 +2023-02-05 21:02:58,743 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-05 21:03:00,385 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16019.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:03:10,216 INFO [train.py:901] (0/4) Epoch 2, batch 7950, loss[loss=0.401, simple_loss=0.4265, pruned_loss=0.1877, over 8468.00 frames. ], tot_loss[loss=0.3533, simple_loss=0.3948, pruned_loss=0.1559, over 1619919.16 frames. ], batch size: 25, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:03:43,334 INFO [train.py:901] (0/4) Epoch 2, batch 8000, loss[loss=0.3664, simple_loss=0.4103, pruned_loss=0.1612, over 8245.00 frames. ], tot_loss[loss=0.3528, simple_loss=0.3942, pruned_loss=0.1557, over 1619693.94 frames. ], batch size: 22, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:03:56,059 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16103.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:04:04,544 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.336e+02 4.123e+02 4.991e+02 6.647e+02 1.461e+03, threshold=9.983e+02, percent-clipped=10.0 +2023-02-05 21:04:16,514 INFO [train.py:901] (0/4) Epoch 2, batch 8050, loss[loss=0.2755, simple_loss=0.3284, pruned_loss=0.1113, over 7565.00 frames. ], tot_loss[loss=0.3513, simple_loss=0.3924, pruned_loss=0.1551, over 1607052.78 frames. ], batch size: 18, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:04:18,269 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 21:04:39,777 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-2.pt +2023-02-05 21:04:51,775 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 21:04:55,113 INFO [train.py:901] (0/4) Epoch 3, batch 0, loss[loss=0.4537, simple_loss=0.451, pruned_loss=0.2281, over 7700.00 frames. ], tot_loss[loss=0.4537, simple_loss=0.451, pruned_loss=0.2281, over 7700.00 frames. ], batch size: 18, lr: 2.53e-02, grad_scale: 8.0 +2023-02-05 21:04:55,114 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 21:05:06,956 INFO [train.py:935] (0/4) Epoch 3, validation: loss=0.2731, simple_loss=0.3579, pruned_loss=0.09417, over 944034.00 frames. +2023-02-05 21:05:06,957 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6524MB +2023-02-05 21:05:07,108 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:05:23,572 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 21:05:42,768 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.402e+02 4.065e+02 5.070e+02 6.931e+02 1.670e+03, threshold=1.014e+03, percent-clipped=5.0 +2023-02-05 21:05:42,788 INFO [train.py:901] (0/4) Epoch 3, batch 50, loss[loss=0.3241, simple_loss=0.3541, pruned_loss=0.1471, over 7237.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.3917, pruned_loss=0.1534, over 362656.40 frames. ], batch size: 16, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:05:58,801 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 21:06:03,002 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:06:18,211 INFO [train.py:901] (0/4) Epoch 3, batch 100, loss[loss=0.3182, simple_loss=0.3761, pruned_loss=0.1302, over 8227.00 frames. ], tot_loss[loss=0.3539, simple_loss=0.3962, pruned_loss=0.1558, over 640706.42 frames. ], batch size: 22, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:06:18,956 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 21:06:53,425 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.291e+02 3.520e+02 4.471e+02 5.811e+02 1.196e+03, threshold=8.942e+02, percent-clipped=3.0 +2023-02-05 21:06:53,445 INFO [train.py:901] (0/4) Epoch 3, batch 150, loss[loss=0.4067, simple_loss=0.423, pruned_loss=0.1952, over 6573.00 frames. ], tot_loss[loss=0.35, simple_loss=0.3928, pruned_loss=0.1536, over 851914.58 frames. ], batch size: 71, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:06:59,311 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-05 21:07:23,134 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6260, 1.3665, 5.5045, 2.3636, 4.7608, 4.6793, 5.1159, 5.1143], + device='cuda:0'), covar=tensor([0.0264, 0.3291, 0.0206, 0.1306, 0.0916, 0.0298, 0.0344, 0.0348], + device='cuda:0'), in_proj_covar=tensor([0.0210, 0.0398, 0.0257, 0.0297, 0.0362, 0.0285, 0.0279, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 21:07:23,178 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16360.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:24,938 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16363.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:27,430 INFO [train.py:901] (0/4) Epoch 3, batch 200, loss[loss=0.3467, simple_loss=0.3715, pruned_loss=0.161, over 7799.00 frames. ], tot_loss[loss=0.3504, simple_loss=0.3927, pruned_loss=0.154, over 1022134.96 frames. ], batch size: 19, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:42,144 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16389.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:01,475 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 3.609e+02 4.419e+02 5.456e+02 1.161e+03, threshold=8.837e+02, percent-clipped=3.0 +2023-02-05 21:08:01,496 INFO [train.py:901] (0/4) Epoch 3, batch 250, loss[loss=0.4508, simple_loss=0.4758, pruned_loss=0.2129, over 8458.00 frames. ], tot_loss[loss=0.3521, simple_loss=0.3939, pruned_loss=0.1552, over 1154664.74 frames. ], batch size: 49, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:13,913 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 21:08:22,564 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 21:08:22,623 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:35,585 INFO [train.py:901] (0/4) Epoch 3, batch 300, loss[loss=0.38, simple_loss=0.4247, pruned_loss=0.1676, over 8762.00 frames. ], tot_loss[loss=0.3515, simple_loss=0.3936, pruned_loss=0.1546, over 1258738.43 frames. ], batch size: 30, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:43,567 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16478.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:43,709 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-05 21:08:47,101 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.92 vs. limit=2.0 +2023-02-05 21:09:05,161 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16511.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:09,093 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.982e+02 3.752e+02 4.774e+02 5.919e+02 1.248e+03, threshold=9.549e+02, percent-clipped=6.0 +2023-02-05 21:09:09,112 INFO [train.py:901] (0/4) Epoch 3, batch 350, loss[loss=0.4263, simple_loss=0.4422, pruned_loss=0.2052, over 6992.00 frames. ], tot_loss[loss=0.3533, simple_loss=0.3955, pruned_loss=0.1556, over 1338637.28 frames. ], batch size: 71, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:09:40,930 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16562.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:44,009 INFO [train.py:901] (0/4) Epoch 3, batch 400, loss[loss=0.3643, simple_loss=0.3996, pruned_loss=0.1645, over 7971.00 frames. ], tot_loss[loss=0.3533, simple_loss=0.3956, pruned_loss=0.1555, over 1402353.36 frames. ], batch size: 21, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:18,098 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:18,544 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.210e+02 3.588e+02 4.493e+02 6.059e+02 1.047e+03, threshold=8.987e+02, percent-clipped=2.0 +2023-02-05 21:10:18,565 INFO [train.py:901] (0/4) Epoch 3, batch 450, loss[loss=0.383, simple_loss=0.4205, pruned_loss=0.1727, over 8362.00 frames. ], tot_loss[loss=0.3505, simple_loss=0.3936, pruned_loss=0.1537, over 1447752.95 frames. ], batch size: 24, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:24,813 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16626.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:35,577 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16641.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:53,050 INFO [train.py:901] (0/4) Epoch 3, batch 500, loss[loss=0.3529, simple_loss=0.4078, pruned_loss=0.149, over 8467.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3922, pruned_loss=0.1529, over 1483851.80 frames. ], batch size: 25, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:11:20,368 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 21:11:27,935 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 3.547e+02 4.664e+02 6.145e+02 2.246e+03, threshold=9.327e+02, percent-clipped=7.0 +2023-02-05 21:11:27,955 INFO [train.py:901] (0/4) Epoch 3, batch 550, loss[loss=0.3542, simple_loss=0.4108, pruned_loss=0.1488, over 8471.00 frames. ], tot_loss[loss=0.347, simple_loss=0.3906, pruned_loss=0.1518, over 1513257.59 frames. ], batch size: 25, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:11:38,640 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16733.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:39,433 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16734.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:56,568 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16759.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:01,692 INFO [train.py:901] (0/4) Epoch 3, batch 600, loss[loss=0.455, simple_loss=0.4561, pruned_loss=0.2269, over 7069.00 frames. ], tot_loss[loss=0.3487, simple_loss=0.392, pruned_loss=0.1527, over 1540310.20 frames. ], batch size: 72, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:16,351 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 21:12:36,648 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.715e+02 4.834e+02 5.984e+02 1.404e+03, threshold=9.668e+02, percent-clipped=7.0 +2023-02-05 21:12:36,669 INFO [train.py:901] (0/4) Epoch 3, batch 650, loss[loss=0.3727, simple_loss=0.3952, pruned_loss=0.1751, over 8623.00 frames. ], tot_loss[loss=0.3483, simple_loss=0.3916, pruned_loss=0.1526, over 1557241.73 frames. ], batch size: 49, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:37,556 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16818.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:41,599 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7867, 2.1409, 4.6000, 1.0538, 2.9641, 2.5049, 1.7703, 2.5923], + device='cuda:0'), covar=tensor([0.1146, 0.1443, 0.0410, 0.2235, 0.1062, 0.1457, 0.0963, 0.1777], + device='cuda:0'), in_proj_covar=tensor([0.0422, 0.0395, 0.0458, 0.0482, 0.0527, 0.0460, 0.0423, 0.0526], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 21:12:54,023 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:57,233 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:10,251 INFO [train.py:901] (0/4) Epoch 3, batch 700, loss[loss=0.3531, simple_loss=0.3846, pruned_loss=0.1608, over 7653.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.392, pruned_loss=0.1534, over 1570103.25 frames. ], batch size: 19, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:20,396 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16882.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:23,027 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16886.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:13:38,462 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:40,030 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.34 vs. limit=5.0 +2023-02-05 21:13:44,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.086e+02 3.932e+02 4.613e+02 6.231e+02 2.383e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 21:13:44,858 INFO [train.py:901] (0/4) Epoch 3, batch 750, loss[loss=0.3964, simple_loss=0.4105, pruned_loss=0.1911, over 7927.00 frames. ], tot_loss[loss=0.3479, simple_loss=0.3918, pruned_loss=0.152, over 1584968.59 frames. ], batch size: 20, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:49,801 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16924.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:59,055 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 21:14:07,690 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 21:14:19,203 INFO [train.py:901] (0/4) Epoch 3, batch 800, loss[loss=0.3039, simple_loss=0.3586, pruned_loss=0.1246, over 7650.00 frames. ], tot_loss[loss=0.3471, simple_loss=0.3909, pruned_loss=0.1516, over 1588217.69 frames. ], batch size: 19, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:14:41,391 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1368, 2.5643, 1.9853, 3.0293, 1.1974, 1.4964, 1.8450, 2.5178], + device='cuda:0'), covar=tensor([0.1058, 0.1168, 0.1610, 0.0483, 0.2410, 0.2534, 0.1971, 0.0929], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0324, 0.0313, 0.0230, 0.0299, 0.0325, 0.0347, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:14:53,628 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.211e+02 3.452e+02 4.368e+02 5.287e+02 1.393e+03, threshold=8.735e+02, percent-clipped=4.0 +2023-02-05 21:14:53,648 INFO [train.py:901] (0/4) Epoch 3, batch 850, loss[loss=0.364, simple_loss=0.4117, pruned_loss=0.1582, over 8345.00 frames. ], tot_loss[loss=0.3486, simple_loss=0.3917, pruned_loss=0.1527, over 1594767.47 frames. ], batch size: 26, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:28,355 INFO [train.py:901] (0/4) Epoch 3, batch 900, loss[loss=0.3168, simple_loss=0.3588, pruned_loss=0.1374, over 7782.00 frames. ], tot_loss[loss=0.3482, simple_loss=0.391, pruned_loss=0.1527, over 1591881.55 frames. ], batch size: 19, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:53,772 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17104.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:02,287 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.375e+02 3.695e+02 4.540e+02 5.760e+02 9.795e+02, threshold=9.080e+02, percent-clipped=3.0 +2023-02-05 21:16:02,307 INFO [train.py:901] (0/4) Epoch 3, batch 950, loss[loss=0.3601, simple_loss=0.4027, pruned_loss=0.1588, over 8104.00 frames. ], tot_loss[loss=0.3461, simple_loss=0.3897, pruned_loss=0.1513, over 1595890.33 frames. ], batch size: 23, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:16:10,477 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17129.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:25,713 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 21:16:36,863 INFO [train.py:901] (0/4) Epoch 3, batch 1000, loss[loss=0.3499, simple_loss=0.3921, pruned_loss=0.1539, over 8253.00 frames. ], tot_loss[loss=0.3469, simple_loss=0.3906, pruned_loss=0.1516, over 1603200.33 frames. ], batch size: 22, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:16:57,950 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 21:17:03,576 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:10,139 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 4.093e+02 4.952e+02 6.088e+02 1.030e+03, threshold=9.904e+02, percent-clipped=7.0 +2023-02-05 21:17:10,160 INFO [train.py:901] (0/4) Epoch 3, batch 1050, loss[loss=0.4067, simple_loss=0.4451, pruned_loss=0.1842, over 8038.00 frames. ], tot_loss[loss=0.3496, simple_loss=0.3927, pruned_loss=0.1533, over 1606727.87 frames. ], batch size: 22, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:10,172 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 21:17:19,690 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17230.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:17:45,189 INFO [train.py:901] (0/4) Epoch 3, batch 1100, loss[loss=0.331, simple_loss=0.3816, pruned_loss=0.1402, over 8470.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3919, pruned_loss=0.153, over 1608258.27 frames. ], batch size: 25, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:45,924 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:51,895 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1335, 1.2448, 3.2244, 1.0166, 2.7462, 2.7617, 2.9172, 2.8884], + device='cuda:0'), covar=tensor([0.0347, 0.2464, 0.0380, 0.1682, 0.1011, 0.0483, 0.0363, 0.0447], + device='cuda:0'), in_proj_covar=tensor([0.0224, 0.0391, 0.0270, 0.0303, 0.0363, 0.0289, 0.0280, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 21:18:15,933 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8029, 1.4226, 3.0550, 1.2460, 2.2232, 3.4211, 3.2834, 2.9827], + device='cuda:0'), covar=tensor([0.1064, 0.1334, 0.0366, 0.1710, 0.0628, 0.0258, 0.0263, 0.0450], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0260, 0.0197, 0.0259, 0.0197, 0.0161, 0.0157, 0.0234], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:18:19,090 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.272e+02 3.840e+02 4.434e+02 5.714e+02 1.415e+03, threshold=8.869e+02, percent-clipped=3.0 +2023-02-05 21:18:19,110 INFO [train.py:901] (0/4) Epoch 3, batch 1150, loss[loss=0.316, simple_loss=0.3801, pruned_loss=0.126, over 8132.00 frames. ], tot_loss[loss=0.3473, simple_loss=0.3907, pruned_loss=0.1519, over 1608442.53 frames. ], batch size: 22, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:18:22,456 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 21:18:24,020 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3373, 2.2060, 3.2087, 0.6977, 3.0966, 2.1502, 1.2395, 1.7025], + device='cuda:0'), covar=tensor([0.0206, 0.0066, 0.0050, 0.0207, 0.0069, 0.0173, 0.0239, 0.0115], + device='cuda:0'), in_proj_covar=tensor([0.0210, 0.0144, 0.0122, 0.0192, 0.0133, 0.0255, 0.0205, 0.0177], + device='cuda:0'), out_proj_covar=tensor([1.1081e-04, 7.4997e-05, 6.3813e-05, 9.7844e-05, 7.0855e-05, 1.4360e-04, + 1.0958e-04, 9.3967e-05], device='cuda:0') +2023-02-05 21:18:38,623 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17345.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:18:52,830 INFO [train.py:901] (0/4) Epoch 3, batch 1200, loss[loss=0.3208, simple_loss=0.3766, pruned_loss=0.1325, over 8458.00 frames. ], tot_loss[loss=0.3461, simple_loss=0.3901, pruned_loss=0.151, over 1615496.71 frames. ], batch size: 29, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:18:55,915 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.35 vs. limit=5.0 +2023-02-05 21:19:02,185 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17380.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:04,920 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17383.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:09,646 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5245, 1.2244, 3.1402, 1.2544, 2.1610, 3.3505, 3.2296, 2.7500], + device='cuda:0'), covar=tensor([0.1306, 0.1749, 0.0421, 0.1986, 0.0792, 0.0362, 0.0344, 0.0704], + device='cuda:0'), in_proj_covar=tensor([0.0225, 0.0258, 0.0194, 0.0253, 0.0196, 0.0161, 0.0159, 0.0233], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:19:17,661 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17401.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:19:28,365 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 3.772e+02 4.989e+02 5.905e+02 9.785e+02, threshold=9.978e+02, percent-clipped=4.0 +2023-02-05 21:19:28,385 INFO [train.py:901] (0/4) Epoch 3, batch 1250, loss[loss=0.3069, simple_loss=0.351, pruned_loss=0.1314, over 7657.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3903, pruned_loss=0.1514, over 1617222.57 frames. ], batch size: 19, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:19:37,226 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4995, 4.6918, 4.0927, 1.8221, 3.9802, 3.9345, 4.2407, 3.5382], + device='cuda:0'), covar=tensor([0.0793, 0.0502, 0.0931, 0.3568, 0.0483, 0.0572, 0.1066, 0.0609], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0228, 0.0275, 0.0356, 0.0249, 0.0199, 0.0252, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:20:02,612 INFO [train.py:901] (0/4) Epoch 3, batch 1300, loss[loss=0.294, simple_loss=0.3557, pruned_loss=0.1162, over 8246.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.3915, pruned_loss=0.1518, over 1618466.87 frames. ], batch size: 24, lr: 2.44e-02, grad_scale: 8.0 +2023-02-05 21:20:37,546 INFO [train.py:901] (0/4) Epoch 3, batch 1350, loss[loss=0.3196, simple_loss=0.368, pruned_loss=0.1356, over 8082.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3922, pruned_loss=0.153, over 1619607.84 frames. ], batch size: 21, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:20:37,719 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5689, 1.6124, 3.0017, 1.2883, 2.1269, 3.3297, 2.9770, 2.9658], + device='cuda:0'), covar=tensor([0.1075, 0.1199, 0.0317, 0.1769, 0.0658, 0.0256, 0.0380, 0.0418], + device='cuda:0'), in_proj_covar=tensor([0.0228, 0.0260, 0.0195, 0.0256, 0.0199, 0.0164, 0.0163, 0.0233], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:20:38,226 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 4.258e+02 5.812e+02 8.345e+02 8.746e+03, threshold=1.162e+03, percent-clipped=16.0 +2023-02-05 21:21:00,229 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17551.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:21:11,044 INFO [train.py:901] (0/4) Epoch 3, batch 1400, loss[loss=0.3351, simple_loss=0.3913, pruned_loss=0.1395, over 8290.00 frames. ], tot_loss[loss=0.3455, simple_loss=0.3897, pruned_loss=0.1507, over 1621414.23 frames. ], batch size: 23, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:21:20,762 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-05 21:21:34,781 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17601.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:21:46,916 INFO [train.py:901] (0/4) Epoch 3, batch 1450, loss[loss=0.2766, simple_loss=0.3427, pruned_loss=0.1052, over 8079.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.3874, pruned_loss=0.1495, over 1613176.40 frames. ], batch size: 21, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:21:47,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.138e+02 3.309e+02 4.161e+02 5.035e+02 1.114e+03, threshold=8.322e+02, percent-clipped=0.0 +2023-02-05 21:21:48,927 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 21:21:53,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17626.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:22:02,523 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17639.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:19,026 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17664.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,425 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17666.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,936 INFO [train.py:901] (0/4) Epoch 3, batch 1500, loss[loss=0.3279, simple_loss=0.3739, pruned_loss=0.1409, over 8098.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3868, pruned_loss=0.149, over 1611290.37 frames. ], batch size: 23, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,188 INFO [train.py:901] (0/4) Epoch 3, batch 1550, loss[loss=0.3629, simple_loss=0.4171, pruned_loss=0.1544, over 8494.00 frames. ], tot_loss[loss=0.342, simple_loss=0.3866, pruned_loss=0.1487, over 1613429.75 frames. ], batch size: 28, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,832 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.415e+02 3.678e+02 4.620e+02 5.892e+02 1.697e+03, threshold=9.239e+02, percent-clipped=9.0 +2023-02-05 21:22:56,993 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:01,058 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17724.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:16,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17745.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:23:17,954 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:29,331 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 21:23:30,930 INFO [train.py:901] (0/4) Epoch 3, batch 1600, loss[loss=0.41, simple_loss=0.413, pruned_loss=0.2035, over 6399.00 frames. ], tot_loss[loss=0.3426, simple_loss=0.3868, pruned_loss=0.1492, over 1612864.19 frames. ], batch size: 14, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:23:43,907 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3133, 2.3309, 1.6209, 1.4471, 1.9807, 1.7617, 2.3830, 1.9943], + device='cuda:0'), covar=tensor([0.0747, 0.1373, 0.2248, 0.1791, 0.0887, 0.1804, 0.1066, 0.0731], + device='cuda:0'), in_proj_covar=tensor([0.0200, 0.0227, 0.0268, 0.0232, 0.0199, 0.0229, 0.0193, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:23:58,114 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4374, 0.9849, 1.0999, 0.8917, 1.2365, 1.0498, 1.1312, 1.0494], + device='cuda:0'), covar=tensor([0.0886, 0.2043, 0.2979, 0.2027, 0.0850, 0.2297, 0.1089, 0.0900], + device='cuda:0'), in_proj_covar=tensor([0.0199, 0.0227, 0.0267, 0.0231, 0.0198, 0.0227, 0.0193, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:24:05,145 INFO [train.py:901] (0/4) Epoch 3, batch 1650, loss[loss=0.3309, simple_loss=0.3914, pruned_loss=0.1351, over 8532.00 frames. ], tot_loss[loss=0.3429, simple_loss=0.388, pruned_loss=0.1489, over 1616559.77 frames. ], batch size: 28, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,803 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.309e+02 4.132e+02 5.477e+02 8.650e+02, threshold=8.264e+02, percent-clipped=0.0 +2023-02-05 21:24:16,288 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.21 vs. limit=5.0 +2023-02-05 21:24:20,689 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17839.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:24:32,058 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1792, 1.3170, 1.4575, 1.2691, 0.9007, 1.5110, 0.1046, 0.9395], + device='cuda:0'), covar=tensor([0.1946, 0.1704, 0.0892, 0.1447, 0.2493, 0.1119, 0.3805, 0.1565], + device='cuda:0'), in_proj_covar=tensor([0.0110, 0.0102, 0.0084, 0.0145, 0.0148, 0.0082, 0.0148, 0.0111], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:24:35,255 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17860.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:24:39,698 INFO [train.py:901] (0/4) Epoch 3, batch 1700, loss[loss=0.3653, simple_loss=0.4171, pruned_loss=0.1568, over 8354.00 frames. ], tot_loss[loss=0.3436, simple_loss=0.3886, pruned_loss=0.1493, over 1618028.74 frames. ], batch size: 24, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:13,891 INFO [train.py:901] (0/4) Epoch 3, batch 1750, loss[loss=0.35, simple_loss=0.3836, pruned_loss=0.1582, over 7653.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.3899, pruned_loss=0.1503, over 1618172.02 frames. ], batch size: 19, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:14,600 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.998e+02 5.161e+02 6.686e+02 1.470e+03, threshold=1.032e+03, percent-clipped=12.0 +2023-02-05 21:25:17,629 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17922.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:35,775 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17947.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:40,544 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-05 21:25:48,886 INFO [train.py:901] (0/4) Epoch 3, batch 1800, loss[loss=0.3535, simple_loss=0.3999, pruned_loss=0.1535, over 8493.00 frames. ], tot_loss[loss=0.3439, simple_loss=0.3892, pruned_loss=0.1493, over 1619395.29 frames. ], batch size: 26, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:25:57,029 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17978.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:12,788 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-18000.pt +2023-02-05 21:26:25,063 INFO [train.py:901] (0/4) Epoch 3, batch 1850, loss[loss=0.3104, simple_loss=0.3569, pruned_loss=0.1319, over 7523.00 frames. ], tot_loss[loss=0.3428, simple_loss=0.3878, pruned_loss=0.1489, over 1616642.55 frames. ], batch size: 18, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:26:25,635 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.564e+02 4.327e+02 5.819e+02 2.228e+03, threshold=8.654e+02, percent-clipped=8.0 +2023-02-05 21:26:55,926 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18062.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:59,430 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5731, 1.9014, 1.8669, 1.3948, 1.1607, 1.9262, 0.3532, 1.1461], + device='cuda:0'), covar=tensor([0.3744, 0.1890, 0.1063, 0.1727, 0.3174, 0.0953, 0.3814, 0.1720], + device='cuda:0'), in_proj_covar=tensor([0.0110, 0.0102, 0.0082, 0.0143, 0.0152, 0.0080, 0.0144, 0.0111], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:26:59,913 INFO [train.py:901] (0/4) Epoch 3, batch 1900, loss[loss=0.3471, simple_loss=0.3796, pruned_loss=0.1573, over 7544.00 frames. ], tot_loss[loss=0.3439, simple_loss=0.3884, pruned_loss=0.1497, over 1620341.25 frames. ], batch size: 18, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:27:17,419 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18092.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:19,599 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:24,176 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 21:27:29,725 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6781, 3.5922, 3.2275, 1.6135, 3.1996, 3.0346, 3.4476, 2.9051], + device='cuda:0'), covar=tensor([0.0860, 0.0647, 0.0982, 0.4327, 0.0657, 0.0803, 0.1085, 0.0723], + device='cuda:0'), in_proj_covar=tensor([0.0337, 0.0232, 0.0279, 0.0364, 0.0246, 0.0208, 0.0261, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:27:34,369 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18116.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:27:34,803 INFO [train.py:901] (0/4) Epoch 3, batch 1950, loss[loss=0.2911, simple_loss=0.3588, pruned_loss=0.1117, over 8457.00 frames. ], tot_loss[loss=0.3431, simple_loss=0.3877, pruned_loss=0.1492, over 1618936.25 frames. ], batch size: 27, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:27:35,478 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.131e+02 3.385e+02 4.094e+02 5.586e+02 1.173e+03, threshold=8.188e+02, percent-clipped=3.0 +2023-02-05 21:27:36,199 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 21:27:37,028 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:43,951 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 21:27:51,192 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18141.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:27:55,031 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 21:28:09,105 INFO [train.py:901] (0/4) Epoch 3, batch 2000, loss[loss=0.4038, simple_loss=0.4334, pruned_loss=0.1871, over 7820.00 frames. ], tot_loss[loss=0.3458, simple_loss=0.3896, pruned_loss=0.151, over 1617348.09 frames. ], batch size: 20, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:16,362 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18177.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:27,330 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:27,997 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18193.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:38,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:41,703 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 21:28:44,786 INFO [train.py:901] (0/4) Epoch 3, batch 2050, loss[loss=0.353, simple_loss=0.3928, pruned_loss=0.1566, over 8588.00 frames. ], tot_loss[loss=0.3458, simple_loss=0.3897, pruned_loss=0.1509, over 1618209.88 frames. ], batch size: 39, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:46,128 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.474e+02 3.817e+02 4.995e+02 6.129e+02 1.664e+03, threshold=9.991e+02, percent-clipped=7.0 +2023-02-05 21:29:18,698 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7523, 2.0717, 1.9036, 2.6378, 1.1084, 1.2986, 1.6505, 2.0438], + device='cuda:0'), covar=tensor([0.1414, 0.1219, 0.1593, 0.0601, 0.2081, 0.2558, 0.2190, 0.1207], + device='cuda:0'), in_proj_covar=tensor([0.0309, 0.0324, 0.0323, 0.0238, 0.0294, 0.0337, 0.0349, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:29:19,884 INFO [train.py:901] (0/4) Epoch 3, batch 2100, loss[loss=0.2864, simple_loss=0.3401, pruned_loss=0.1163, over 8248.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.3893, pruned_loss=0.1508, over 1617057.84 frames. ], batch size: 22, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:20,111 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6020, 1.9229, 1.8172, 0.5665, 1.8013, 1.3388, 0.3820, 1.8354], + device='cuda:0'), covar=tensor([0.0114, 0.0063, 0.0073, 0.0149, 0.0091, 0.0261, 0.0211, 0.0062], + device='cuda:0'), in_proj_covar=tensor([0.0213, 0.0149, 0.0126, 0.0190, 0.0145, 0.0265, 0.0209, 0.0177], + device='cuda:0'), out_proj_covar=tensor([1.0889e-04, 7.6253e-05, 6.3287e-05, 9.3655e-05, 7.6118e-05, 1.4493e-04, + 1.0853e-04, 9.1966e-05], device='cuda:0') +2023-02-05 21:29:55,191 INFO [train.py:901] (0/4) Epoch 3, batch 2150, loss[loss=0.352, simple_loss=0.3978, pruned_loss=0.153, over 8678.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.3897, pruned_loss=0.1506, over 1622871.08 frames. ], batch size: 34, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,884 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.297e+02 3.744e+02 4.718e+02 5.936e+02 1.452e+03, threshold=9.436e+02, percent-clipped=4.0 +2023-02-05 21:29:59,195 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18322.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:30:31,065 INFO [train.py:901] (0/4) Epoch 3, batch 2200, loss[loss=0.3742, simple_loss=0.3894, pruned_loss=0.1795, over 7270.00 frames. ], tot_loss[loss=0.3449, simple_loss=0.3891, pruned_loss=0.1504, over 1615656.11 frames. ], batch size: 16, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:31:06,828 INFO [train.py:901] (0/4) Epoch 3, batch 2250, loss[loss=0.3334, simple_loss=0.3854, pruned_loss=0.1407, over 8106.00 frames. ], tot_loss[loss=0.3443, simple_loss=0.3888, pruned_loss=0.1499, over 1609868.06 frames. ], batch size: 23, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:07,494 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.379e+02 3.424e+02 4.222e+02 5.561e+02 1.530e+03, threshold=8.445e+02, percent-clipped=2.0 +2023-02-05 21:31:18,175 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18433.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:21,446 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18437.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:23,565 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2481, 1.5414, 1.4717, 1.2452, 0.8776, 1.5331, 0.1051, 0.9026], + device='cuda:0'), covar=tensor([0.2286, 0.1869, 0.1023, 0.1661, 0.3774, 0.0847, 0.4215, 0.1865], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0100, 0.0080, 0.0145, 0.0155, 0.0078, 0.0147, 0.0109], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:31:33,898 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 21:31:36,370 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18458.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:38,299 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7810, 3.8835, 3.3909, 1.7277, 3.3315, 3.1341, 3.4229, 2.8775], + device='cuda:0'), covar=tensor([0.0839, 0.0487, 0.0858, 0.4110, 0.0627, 0.0804, 0.1169, 0.0714], + device='cuda:0'), in_proj_covar=tensor([0.0342, 0.0236, 0.0275, 0.0361, 0.0249, 0.0207, 0.0261, 0.0191], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:31:39,788 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:42,290 INFO [train.py:901] (0/4) Epoch 3, batch 2300, loss[loss=0.3422, simple_loss=0.3814, pruned_loss=0.1515, over 7801.00 frames. ], tot_loss[loss=0.3435, simple_loss=0.3883, pruned_loss=0.1494, over 1609300.85 frames. ], batch size: 19, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:56,792 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:08,041 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-05 21:32:10,464 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18508.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:17,112 INFO [train.py:901] (0/4) Epoch 3, batch 2350, loss[loss=0.4341, simple_loss=0.4471, pruned_loss=0.2106, over 6869.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3861, pruned_loss=0.1476, over 1607880.88 frames. ], batch size: 71, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:17,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.449e+02 3.759e+02 4.661e+02 5.652e+02 9.227e+02, threshold=9.323e+02, percent-clipped=1.0 +2023-02-05 21:32:23,358 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18526.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:32:30,480 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:31,154 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18537.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:51,323 INFO [train.py:901] (0/4) Epoch 3, batch 2400, loss[loss=0.3074, simple_loss=0.3528, pruned_loss=0.131, over 7716.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3863, pruned_loss=0.1474, over 1612651.47 frames. ], batch size: 18, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:33:05,132 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2143, 1.8991, 3.0585, 2.6542, 2.3280, 1.7633, 1.2255, 1.2368], + device='cuda:0'), covar=tensor([0.0844, 0.0929, 0.0173, 0.0334, 0.0398, 0.0514, 0.0793, 0.0795], + device='cuda:0'), in_proj_covar=tensor([0.0575, 0.0494, 0.0398, 0.0446, 0.0544, 0.0463, 0.0482, 0.0488], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:33:24,846 INFO [train.py:901] (0/4) Epoch 3, batch 2450, loss[loss=0.3176, simple_loss=0.3822, pruned_loss=0.1265, over 8366.00 frames. ], tot_loss[loss=0.3411, simple_loss=0.3865, pruned_loss=0.1478, over 1611766.70 frames. ], batch size: 24, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:33:25,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 3.618e+02 4.763e+02 6.456e+02 1.024e+03, threshold=9.527e+02, percent-clipped=2.0 +2023-02-05 21:33:33,903 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3219, 2.4212, 4.3562, 3.5032, 3.0929, 2.5064, 1.6536, 1.9901], + device='cuda:0'), covar=tensor([0.0806, 0.1232, 0.0205, 0.0391, 0.0526, 0.0448, 0.0667, 0.1091], + device='cuda:0'), in_proj_covar=tensor([0.0572, 0.0498, 0.0396, 0.0450, 0.0545, 0.0467, 0.0489, 0.0491], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:33:49,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:49,834 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18652.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:59,424 INFO [train.py:901] (0/4) Epoch 3, batch 2500, loss[loss=0.3575, simple_loss=0.3918, pruned_loss=0.1616, over 7801.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3843, pruned_loss=0.1463, over 1607269.88 frames. ], batch size: 20, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:17,683 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:18,337 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18693.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:33,854 INFO [train.py:901] (0/4) Epoch 3, batch 2550, loss[loss=0.3567, simple_loss=0.4027, pruned_loss=0.1554, over 8028.00 frames. ], tot_loss[loss=0.3407, simple_loss=0.386, pruned_loss=0.1477, over 1604578.75 frames. ], batch size: 22, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:34,503 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.889e+02 4.529e+02 5.619e+02 1.309e+03, threshold=9.058e+02, percent-clipped=5.0 +2023-02-05 21:34:34,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:35:08,023 INFO [train.py:901] (0/4) Epoch 3, batch 2600, loss[loss=0.3124, simple_loss=0.3555, pruned_loss=0.1347, over 7810.00 frames. ], tot_loss[loss=0.3387, simple_loss=0.3843, pruned_loss=0.1466, over 1604994.76 frames. ], batch size: 20, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:37,086 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4748, 1.7355, 1.5641, 1.3463, 1.7113, 1.4381, 1.8387, 1.8975], + device='cuda:0'), covar=tensor([0.0762, 0.1557, 0.1991, 0.1776, 0.0814, 0.1772, 0.1010, 0.0664], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0228, 0.0267, 0.0228, 0.0192, 0.0228, 0.0188, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:35:44,462 INFO [train.py:901] (0/4) Epoch 3, batch 2650, loss[loss=0.2872, simple_loss=0.3508, pruned_loss=0.1118, over 8535.00 frames. ], tot_loss[loss=0.3387, simple_loss=0.3838, pruned_loss=0.1468, over 1604664.53 frames. ], batch size: 28, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:45,139 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.426e+02 4.272e+02 5.708e+02 1.020e+03, threshold=8.544e+02, percent-clipped=5.0 +2023-02-05 21:36:08,378 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18852.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:19,355 INFO [train.py:901] (0/4) Epoch 3, batch 2700, loss[loss=0.3409, simple_loss=0.4051, pruned_loss=0.1383, over 8035.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3841, pruned_loss=0.1457, over 1608225.04 frames. ], batch size: 22, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:19,548 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9053, 2.2956, 1.8969, 2.7502, 1.6345, 1.5556, 2.0485, 2.4170], + device='cuda:0'), covar=tensor([0.1028, 0.1652, 0.1407, 0.0561, 0.1697, 0.2104, 0.1874, 0.1197], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0313, 0.0310, 0.0229, 0.0288, 0.0318, 0.0336, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:36:21,492 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18870.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:36:47,168 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:47,838 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18908.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:53,561 INFO [train.py:901] (0/4) Epoch 3, batch 2750, loss[loss=0.351, simple_loss=0.3991, pruned_loss=0.1514, over 8023.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.3859, pruned_loss=0.1469, over 1613040.93 frames. ], batch size: 22, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:54,224 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.973e+02 3.360e+02 4.052e+02 5.079e+02 9.265e+02, threshold=8.105e+02, percent-clipped=2.0 +2023-02-05 21:36:59,752 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1223, 2.6029, 2.2650, 2.8061, 1.5042, 1.4904, 2.3074, 2.6638], + device='cuda:0'), covar=tensor([0.0919, 0.0979, 0.1178, 0.0531, 0.1481, 0.2003, 0.1390, 0.0730], + device='cuda:0'), in_proj_covar=tensor([0.0293, 0.0311, 0.0310, 0.0230, 0.0290, 0.0315, 0.0336, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:37:00,356 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5522, 4.6126, 4.1322, 2.0308, 3.9879, 4.1640, 4.2993, 3.6435], + device='cuda:0'), covar=tensor([0.0806, 0.0441, 0.0799, 0.3807, 0.0609, 0.0476, 0.1249, 0.0547], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0237, 0.0278, 0.0359, 0.0257, 0.0211, 0.0267, 0.0192], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:37:04,999 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:05,666 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18933.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:15,744 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:28,078 INFO [train.py:901] (0/4) Epoch 3, batch 2800, loss[loss=0.3, simple_loss=0.3642, pruned_loss=0.1179, over 8558.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3872, pruned_loss=0.1481, over 1614664.82 frames. ], batch size: 31, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:37:28,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:41,131 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18985.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:37:58,125 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4349, 2.0408, 3.2856, 2.7659, 2.5804, 1.9377, 1.3699, 1.3040], + device='cuda:0'), covar=tensor([0.0876, 0.1031, 0.0187, 0.0381, 0.0437, 0.0492, 0.0608, 0.1043], + device='cuda:0'), in_proj_covar=tensor([0.0576, 0.0502, 0.0399, 0.0450, 0.0555, 0.0464, 0.0487, 0.0494], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:38:03,343 INFO [train.py:901] (0/4) Epoch 3, batch 2850, loss[loss=0.368, simple_loss=0.4031, pruned_loss=0.1664, over 8080.00 frames. ], tot_loss[loss=0.3391, simple_loss=0.3855, pruned_loss=0.1464, over 1617109.99 frames. ], batch size: 21, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:03,922 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 3.511e+02 4.402e+02 5.555e+02 1.104e+03, threshold=8.804e+02, percent-clipped=5.0 +2023-02-05 21:38:09,004 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 21:38:15,996 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19036.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:38:37,623 INFO [train.py:901] (0/4) Epoch 3, batch 2900, loss[loss=0.3148, simple_loss=0.3788, pruned_loss=0.1254, over 8287.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.3854, pruned_loss=0.1468, over 1617208.14 frames. ], batch size: 23, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:39:02,485 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 21:39:11,757 INFO [train.py:901] (0/4) Epoch 3, batch 2950, loss[loss=0.3596, simple_loss=0.4135, pruned_loss=0.1528, over 8336.00 frames. ], tot_loss[loss=0.3392, simple_loss=0.385, pruned_loss=0.1467, over 1613614.93 frames. ], batch size: 25, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:12,413 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.613e+02 4.498e+02 5.900e+02 1.326e+03, threshold=8.996e+02, percent-clipped=8.0 +2023-02-05 21:39:32,497 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19147.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:39:35,225 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:39:46,217 INFO [train.py:901] (0/4) Epoch 3, batch 3000, loss[loss=0.3924, simple_loss=0.4131, pruned_loss=0.1858, over 7694.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.3856, pruned_loss=0.1471, over 1611090.29 frames. ], batch size: 18, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:46,218 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 21:39:58,669 INFO [train.py:935] (0/4) Epoch 3, validation: loss=0.2584, simple_loss=0.3473, pruned_loss=0.08481, over 944034.00 frames. +2023-02-05 21:39:58,670 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6524MB +2023-02-05 21:40:14,606 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7719, 3.8725, 2.1579, 2.5623, 2.4401, 1.7490, 2.2170, 2.5550], + device='cuda:0'), covar=tensor([0.1332, 0.0258, 0.0749, 0.0724, 0.0823, 0.1041, 0.0977, 0.1000], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0245, 0.0333, 0.0307, 0.0338, 0.0315, 0.0348, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 21:40:33,768 INFO [train.py:901] (0/4) Epoch 3, batch 3050, loss[loss=0.3563, simple_loss=0.4075, pruned_loss=0.1525, over 8192.00 frames. ], tot_loss[loss=0.3417, simple_loss=0.3863, pruned_loss=0.1485, over 1611893.32 frames. ], batch size: 23, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:40:34,455 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.016e+02 3.526e+02 4.458e+02 6.217e+02 1.354e+03, threshold=8.917e+02, percent-clipped=3.0 +2023-02-05 21:40:38,077 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:40:50,779 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19241.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:40:55,425 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19248.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:07,574 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19266.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:41:08,009 INFO [train.py:901] (0/4) Epoch 3, batch 3100, loss[loss=0.3498, simple_loss=0.3832, pruned_loss=0.1582, over 8074.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3847, pruned_loss=0.1466, over 1614650.83 frames. ], batch size: 21, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:41:26,053 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19292.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:43,833 INFO [train.py:901] (0/4) Epoch 3, batch 3150, loss[loss=0.3331, simple_loss=0.3533, pruned_loss=0.1564, over 7235.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3851, pruned_loss=0.1468, over 1613701.47 frames. ], batch size: 16, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:41:44,473 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.556e+02 3.507e+02 4.387e+02 6.193e+02 1.521e+03, threshold=8.773e+02, percent-clipped=4.0 +2023-02-05 21:42:17,837 INFO [train.py:901] (0/4) Epoch 3, batch 3200, loss[loss=0.2825, simple_loss=0.3428, pruned_loss=0.1111, over 8033.00 frames. ], tot_loss[loss=0.3383, simple_loss=0.3848, pruned_loss=0.1459, over 1617136.90 frames. ], batch size: 22, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:45,865 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:45,907 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:49,228 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19412.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:53,526 INFO [train.py:901] (0/4) Epoch 3, batch 3250, loss[loss=0.3304, simple_loss=0.3822, pruned_loss=0.1393, over 8321.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3852, pruned_loss=0.1459, over 1619313.89 frames. ], batch size: 49, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:54,129 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 3.440e+02 4.583e+02 5.736e+02 1.373e+03, threshold=9.167e+02, percent-clipped=8.0 +2023-02-05 21:43:03,719 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:43:26,830 INFO [train.py:901] (0/4) Epoch 3, batch 3300, loss[loss=0.3875, simple_loss=0.4134, pruned_loss=0.1807, over 6726.00 frames. ], tot_loss[loss=0.3413, simple_loss=0.3871, pruned_loss=0.1477, over 1617063.58 frames. ], batch size: 71, lr: 2.32e-02, grad_scale: 8.0 +2023-02-05 21:43:43,377 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19491.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:44:00,734 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-05 21:44:01,037 INFO [train.py:901] (0/4) Epoch 3, batch 3350, loss[loss=0.3865, simple_loss=0.4317, pruned_loss=0.1707, over 8639.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3868, pruned_loss=0.1472, over 1618690.29 frames. ], batch size: 34, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:44:01,702 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 3.690e+02 4.650e+02 5.581e+02 1.223e+03, threshold=9.300e+02, percent-clipped=5.0 +2023-02-05 21:44:24,038 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 21:44:35,821 INFO [train.py:901] (0/4) Epoch 3, batch 3400, loss[loss=0.3384, simple_loss=0.39, pruned_loss=0.1434, over 8355.00 frames. ], tot_loss[loss=0.3412, simple_loss=0.3873, pruned_loss=0.1475, over 1615240.43 frames. ], batch size: 26, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:02,599 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19606.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:45:09,806 INFO [train.py:901] (0/4) Epoch 3, batch 3450, loss[loss=0.3363, simple_loss=0.3915, pruned_loss=0.1405, over 8451.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3854, pruned_loss=0.1467, over 1613493.45 frames. ], batch size: 25, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:10,432 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.543e+02 3.801e+02 4.733e+02 6.108e+02 1.526e+03, threshold=9.466e+02, percent-clipped=4.0 +2023-02-05 21:45:11,267 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6036, 1.0765, 4.6786, 1.9609, 4.0396, 3.8717, 4.1653, 4.1985], + device='cuda:0'), covar=tensor([0.0299, 0.3507, 0.0237, 0.1653, 0.0791, 0.0438, 0.0346, 0.0334], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0421, 0.0292, 0.0322, 0.0395, 0.0315, 0.0307, 0.0331], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 21:45:23,378 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19636.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:30,355 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 21:45:42,886 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19663.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:45,373 INFO [train.py:901] (0/4) Epoch 3, batch 3500, loss[loss=0.3611, simple_loss=0.413, pruned_loss=0.1546, over 8394.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3842, pruned_loss=0.1452, over 1613266.22 frames. ], batch size: 49, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:45:58,037 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 21:45:59,527 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:03,405 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5226, 2.5952, 1.7209, 2.2255, 2.1392, 1.2836, 2.0682, 2.1167], + device='cuda:0'), covar=tensor([0.1276, 0.0416, 0.1028, 0.0631, 0.0636, 0.1238, 0.0944, 0.0789], + device='cuda:0'), in_proj_covar=tensor([0.0364, 0.0247, 0.0337, 0.0312, 0.0339, 0.0319, 0.0353, 0.0319], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 21:46:19,291 INFO [train.py:901] (0/4) Epoch 3, batch 3550, loss[loss=0.2735, simple_loss=0.3207, pruned_loss=0.1131, over 7690.00 frames. ], tot_loss[loss=0.3346, simple_loss=0.3817, pruned_loss=0.1438, over 1607915.18 frames. ], batch size: 18, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:46:19,968 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.944e+02 3.514e+02 4.193e+02 5.166e+02 1.109e+03, threshold=8.387e+02, percent-clipped=2.0 +2023-02-05 21:46:29,856 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 21:46:46,351 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19756.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:54,364 INFO [train.py:901] (0/4) Epoch 3, batch 3600, loss[loss=0.3706, simple_loss=0.4176, pruned_loss=0.1619, over 8508.00 frames. ], tot_loss[loss=0.3338, simple_loss=0.3817, pruned_loss=0.143, over 1615124.16 frames. ], batch size: 28, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:47:28,240 INFO [train.py:901] (0/4) Epoch 3, batch 3650, loss[loss=0.312, simple_loss=0.3581, pruned_loss=0.133, over 8505.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3829, pruned_loss=0.1439, over 1616146.97 frames. ], batch size: 49, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:47:28,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 3.610e+02 4.497e+02 5.952e+02 1.837e+03, threshold=8.994e+02, percent-clipped=7.0 +2023-02-05 21:47:58,730 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 21:47:59,604 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19862.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:48:02,764 INFO [train.py:901] (0/4) Epoch 3, batch 3700, loss[loss=0.273, simple_loss=0.3399, pruned_loss=0.1031, over 7797.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3831, pruned_loss=0.1439, over 1613187.81 frames. ], batch size: 19, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:05,662 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19871.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:48:17,450 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19887.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:48:37,846 INFO [train.py:901] (0/4) Epoch 3, batch 3750, loss[loss=0.372, simple_loss=0.4219, pruned_loss=0.1611, over 8453.00 frames. ], tot_loss[loss=0.3357, simple_loss=0.3835, pruned_loss=0.1439, over 1612387.94 frames. ], batch size: 25, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:38,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 3.342e+02 4.116e+02 5.480e+02 1.463e+03, threshold=8.233e+02, percent-clipped=1.0 +2023-02-05 21:48:40,669 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5471, 1.9875, 3.4030, 2.6525, 2.7495, 2.0363, 1.3438, 1.3948], + device='cuda:0'), covar=tensor([0.0979, 0.1248, 0.0242, 0.0523, 0.0542, 0.0572, 0.0718, 0.1249], + device='cuda:0'), in_proj_covar=tensor([0.0601, 0.0513, 0.0421, 0.0466, 0.0574, 0.0482, 0.0501, 0.0504], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:48:59,472 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0984, 1.1520, 1.1666, 0.9712, 0.7444, 1.1820, 0.0138, 0.8291], + device='cuda:0'), covar=tensor([0.2766, 0.1849, 0.1128, 0.1880, 0.4790, 0.0900, 0.6254, 0.2177], + device='cuda:0'), in_proj_covar=tensor([0.0111, 0.0095, 0.0081, 0.0149, 0.0169, 0.0077, 0.0154, 0.0108], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:49:12,035 INFO [train.py:901] (0/4) Epoch 3, batch 3800, loss[loss=0.3861, simple_loss=0.4055, pruned_loss=0.1833, over 7807.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3833, pruned_loss=0.1437, over 1613163.82 frames. ], batch size: 20, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:49:20,772 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19980.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:49:34,901 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-20000.pt +2023-02-05 21:49:48,450 INFO [train.py:901] (0/4) Epoch 3, batch 3850, loss[loss=0.3538, simple_loss=0.4015, pruned_loss=0.153, over 8234.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3822, pruned_loss=0.1434, over 1613536.42 frames. ], batch size: 22, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:49:49,085 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.536e+02 4.444e+02 5.257e+02 1.055e+03, threshold=8.889e+02, percent-clipped=4.0 +2023-02-05 21:49:54,826 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-05 21:50:01,902 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 21:50:02,709 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20038.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:22,587 INFO [train.py:901] (0/4) Epoch 3, batch 3900, loss[loss=0.3107, simple_loss=0.3617, pruned_loss=0.1299, over 7658.00 frames. ], tot_loss[loss=0.3343, simple_loss=0.3821, pruned_loss=0.1433, over 1615929.68 frames. ], batch size: 19, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:41,522 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:50,422 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20107.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:56,834 INFO [train.py:901] (0/4) Epoch 3, batch 3950, loss[loss=0.2628, simple_loss=0.3246, pruned_loss=0.1005, over 7419.00 frames. ], tot_loss[loss=0.3355, simple_loss=0.3834, pruned_loss=0.1438, over 1613368.25 frames. ], batch size: 17, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:57,395 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.492e+02 4.461e+02 6.032e+02 1.371e+03, threshold=8.922e+02, percent-clipped=4.0 +2023-02-05 21:51:05,035 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:06,967 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20130.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:21,454 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20152.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:31,210 INFO [train.py:901] (0/4) Epoch 3, batch 4000, loss[loss=0.3075, simple_loss=0.3577, pruned_loss=0.1287, over 8246.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3822, pruned_loss=0.1431, over 1615080.91 frames. ], batch size: 22, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:51:40,492 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 21:51:49,473 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0533, 2.2501, 3.9586, 3.4064, 2.8217, 2.2756, 1.5105, 2.0097], + device='cuda:0'), covar=tensor([0.0794, 0.1263, 0.0229, 0.0389, 0.0576, 0.0462, 0.0648, 0.1014], + device='cuda:0'), in_proj_covar=tensor([0.0592, 0.0507, 0.0423, 0.0463, 0.0573, 0.0479, 0.0498, 0.0496], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:52:05,183 INFO [train.py:901] (0/4) Epoch 3, batch 4050, loss[loss=0.3195, simple_loss=0.3771, pruned_loss=0.1309, over 8665.00 frames. ], tot_loss[loss=0.3328, simple_loss=0.381, pruned_loss=0.1423, over 1615710.55 frames. ], batch size: 39, lr: 2.28e-02, grad_scale: 16.0 +2023-02-05 21:52:05,854 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.226e+02 3.505e+02 4.242e+02 5.307e+02 1.364e+03, threshold=8.485e+02, percent-clipped=4.0 +2023-02-05 21:52:09,346 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20222.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:52:40,361 INFO [train.py:901] (0/4) Epoch 3, batch 4100, loss[loss=0.274, simple_loss=0.3359, pruned_loss=0.106, over 7924.00 frames. ], tot_loss[loss=0.3336, simple_loss=0.3819, pruned_loss=0.1426, over 1619653.86 frames. ], batch size: 20, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:52:41,917 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4912, 1.6783, 1.4673, 1.2721, 1.7533, 1.4904, 1.7373, 1.7715], + device='cuda:0'), covar=tensor([0.0806, 0.1570, 0.2396, 0.1878, 0.0889, 0.1978, 0.1011, 0.0763], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0224, 0.0262, 0.0224, 0.0189, 0.0227, 0.0185, 0.0187], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 21:53:14,418 INFO [train.py:901] (0/4) Epoch 3, batch 4150, loss[loss=0.3206, simple_loss=0.3836, pruned_loss=0.1288, over 8528.00 frames. ], tot_loss[loss=0.3328, simple_loss=0.381, pruned_loss=0.1423, over 1620107.08 frames. ], batch size: 26, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:53:15,798 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.167e+02 3.849e+02 4.660e+02 5.932e+02 1.097e+03, threshold=9.320e+02, percent-clipped=6.0 +2023-02-05 21:53:20,125 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2871, 2.4332, 1.2020, 1.6970, 1.8562, 1.1070, 1.3985, 2.0348], + device='cuda:0'), covar=tensor([0.1590, 0.0540, 0.1604, 0.0981, 0.1025, 0.1627, 0.1680, 0.1032], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0239, 0.0329, 0.0311, 0.0330, 0.0311, 0.0353, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 21:53:38,374 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20351.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:53:38,578 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-05 21:53:49,995 INFO [train.py:901] (0/4) Epoch 3, batch 4200, loss[loss=0.3221, simple_loss=0.3742, pruned_loss=0.135, over 7940.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3823, pruned_loss=0.1439, over 1618662.79 frames. ], batch size: 20, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:53:55,443 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 21:53:56,306 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20376.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:00,087 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:14,970 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7329, 3.7693, 3.3116, 1.3696, 3.2327, 3.1098, 3.4657, 2.8575], + device='cuda:0'), covar=tensor([0.0831, 0.0623, 0.0902, 0.4574, 0.0720, 0.0717, 0.1066, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0342, 0.0250, 0.0273, 0.0361, 0.0260, 0.0210, 0.0256, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 21:54:16,275 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20406.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,805 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 21:54:24,056 INFO [train.py:901] (0/4) Epoch 3, batch 4250, loss[loss=0.365, simple_loss=0.4088, pruned_loss=0.1606, over 8248.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.3827, pruned_loss=0.1442, over 1618425.15 frames. ], batch size: 24, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:54:24,349 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-05 21:54:25,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.360e+02 3.627e+02 5.036e+02 6.332e+02 1.636e+03, threshold=1.007e+03, percent-clipped=4.0 +2023-02-05 21:54:37,012 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20436.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:47,724 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20451.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:59,257 INFO [train.py:901] (0/4) Epoch 3, batch 4300, loss[loss=0.3123, simple_loss=0.3731, pruned_loss=0.1258, over 7414.00 frames. ], tot_loss[loss=0.3365, simple_loss=0.3839, pruned_loss=0.1446, over 1620443.16 frames. ], batch size: 17, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:04,854 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20474.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:20,169 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:33,525 INFO [train.py:901] (0/4) Epoch 3, batch 4350, loss[loss=0.3034, simple_loss=0.3639, pruned_loss=0.1214, over 8086.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.3831, pruned_loss=0.144, over 1620603.00 frames. ], batch size: 21, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:34,898 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.452e+02 4.356e+02 5.638e+02 1.577e+03, threshold=8.711e+02, percent-clipped=2.0 +2023-02-05 21:55:46,526 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 21:56:06,879 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:06,985 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:07,509 INFO [train.py:901] (0/4) Epoch 3, batch 4400, loss[loss=0.3903, simple_loss=0.4217, pruned_loss=0.1794, over 8106.00 frames. ], tot_loss[loss=0.3377, simple_loss=0.3843, pruned_loss=0.1456, over 1622953.66 frames. ], batch size: 23, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:24,132 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20589.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:28,185 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 21:56:36,777 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:44,146 INFO [train.py:901] (0/4) Epoch 3, batch 4450, loss[loss=0.3192, simple_loss=0.379, pruned_loss=0.1297, over 8588.00 frames. ], tot_loss[loss=0.334, simple_loss=0.382, pruned_loss=0.143, over 1621409.89 frames. ], batch size: 31, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:45,429 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.404e+02 4.420e+02 6.069e+02 1.310e+03, threshold=8.839e+02, percent-clipped=8.0 +2023-02-05 21:57:08,019 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5361, 1.8293, 2.0445, 0.7778, 2.0370, 1.4571, 0.5183, 1.6509], + device='cuda:0'), covar=tensor([0.0136, 0.0094, 0.0054, 0.0164, 0.0102, 0.0258, 0.0223, 0.0082], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0163, 0.0136, 0.0212, 0.0161, 0.0282, 0.0221, 0.0189], + device='cuda:0'), out_proj_covar=tensor([1.0942e-04, 7.8161e-05, 6.3258e-05, 9.7395e-05, 7.8224e-05, 1.4447e-04, + 1.0759e-04, 8.9885e-05], device='cuda:0') +2023-02-05 21:57:18,491 INFO [train.py:901] (0/4) Epoch 3, batch 4500, loss[loss=0.2586, simple_loss=0.3108, pruned_loss=0.1032, over 7429.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3816, pruned_loss=0.1434, over 1617915.47 frames. ], batch size: 17, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:20,852 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 21:57:27,443 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:57:46,056 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2231, 2.1962, 3.9224, 3.6570, 3.3069, 2.5533, 1.6643, 1.8116], + device='cuda:0'), covar=tensor([0.1011, 0.1479, 0.0257, 0.0407, 0.0539, 0.0503, 0.0719, 0.1265], + device='cuda:0'), in_proj_covar=tensor([0.0599, 0.0514, 0.0432, 0.0468, 0.0585, 0.0476, 0.0497, 0.0502], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 21:57:53,171 INFO [train.py:901] (0/4) Epoch 3, batch 4550, loss[loss=0.3024, simple_loss=0.3623, pruned_loss=0.1213, over 7959.00 frames. ], tot_loss[loss=0.3333, simple_loss=0.3807, pruned_loss=0.143, over 1611694.69 frames. ], batch size: 21, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:54,484 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.207e+02 3.483e+02 4.570e+02 6.300e+02 1.347e+03, threshold=9.139e+02, percent-clipped=2.0 +2023-02-05 21:58:04,580 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 21:58:14,820 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20750.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:17,079 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:26,235 INFO [train.py:901] (0/4) Epoch 3, batch 4600, loss[loss=0.3057, simple_loss=0.3528, pruned_loss=0.1293, over 7656.00 frames. ], tot_loss[loss=0.3325, simple_loss=0.3795, pruned_loss=0.1427, over 1608116.46 frames. ], batch size: 19, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:58:34,559 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:34,761 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.35 vs. limit=5.0 +2023-02-05 21:58:35,789 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20780.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:00,498 INFO [train.py:901] (0/4) Epoch 3, batch 4650, loss[loss=0.4144, simple_loss=0.4451, pruned_loss=0.1918, over 8355.00 frames. ], tot_loss[loss=0.3321, simple_loss=0.3796, pruned_loss=0.1423, over 1611159.86 frames. ], batch size: 24, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:02,528 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 3.299e+02 4.239e+02 5.426e+02 9.400e+02, threshold=8.478e+02, percent-clipped=1.0 +2023-02-05 21:59:04,813 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20822.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:21,386 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20845.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:22,733 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20847.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:34,627 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20865.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:35,790 INFO [train.py:901] (0/4) Epoch 3, batch 4700, loss[loss=0.3037, simple_loss=0.3446, pruned_loss=0.1314, over 7775.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.378, pruned_loss=0.1412, over 1608846.63 frames. ], batch size: 19, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:37,902 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20870.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:54,881 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20895.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:08,985 INFO [train.py:901] (0/4) Epoch 3, batch 4750, loss[loss=0.3189, simple_loss=0.3751, pruned_loss=0.1313, over 8290.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3798, pruned_loss=0.1426, over 1609423.85 frames. ], batch size: 23, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 22:00:10,303 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.634e+02 4.432e+02 5.821e+02 1.296e+03, threshold=8.863e+02, percent-clipped=5.0 +2023-02-05 22:00:19,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0557, 2.2683, 3.0987, 0.9597, 3.0529, 1.7758, 1.3422, 2.0763], + device='cuda:0'), covar=tensor([0.0153, 0.0079, 0.0062, 0.0216, 0.0103, 0.0220, 0.0228, 0.0096], + device='cuda:0'), in_proj_covar=tensor([0.0227, 0.0163, 0.0133, 0.0207, 0.0155, 0.0273, 0.0222, 0.0191], + device='cuda:0'), out_proj_covar=tensor([1.0768e-04, 7.7221e-05, 6.1022e-05, 9.4978e-05, 7.4801e-05, 1.3865e-04, + 1.0773e-04, 8.9446e-05], device='cuda:0') +2023-02-05 22:00:22,600 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20937.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:24,394 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 22:00:26,463 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 22:00:32,638 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:41,446 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:44,559 INFO [train.py:901] (0/4) Epoch 3, batch 4800, loss[loss=0.4079, simple_loss=0.4404, pruned_loss=0.1877, over 8428.00 frames. ], tot_loss[loss=0.3327, simple_loss=0.3808, pruned_loss=0.1423, over 1616770.68 frames. ], batch size: 50, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:18,122 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 22:01:18,802 INFO [train.py:901] (0/4) Epoch 3, batch 4850, loss[loss=0.3291, simple_loss=0.3678, pruned_loss=0.1452, over 7790.00 frames. ], tot_loss[loss=0.3333, simple_loss=0.3804, pruned_loss=0.1431, over 1613110.82 frames. ], batch size: 19, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:20,193 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.459e+02 3.687e+02 4.412e+02 5.668e+02 1.155e+03, threshold=8.825e+02, percent-clipped=6.0 +2023-02-05 22:01:51,976 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21065.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:01:53,059 INFO [train.py:901] (0/4) Epoch 3, batch 4900, loss[loss=0.4749, simple_loss=0.4821, pruned_loss=0.2338, over 8650.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3802, pruned_loss=0.143, over 1612699.39 frames. ], batch size: 39, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:55,916 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:13,394 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3376, 2.0563, 4.2516, 3.2472, 3.1394, 1.9341, 1.5644, 1.8831], + device='cuda:0'), covar=tensor([0.1669, 0.1852, 0.0267, 0.0561, 0.0706, 0.0909, 0.1059, 0.1549], + device='cuda:0'), in_proj_covar=tensor([0.0603, 0.0520, 0.0434, 0.0487, 0.0589, 0.0488, 0.0501, 0.0513], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:02:27,709 INFO [train.py:901] (0/4) Epoch 3, batch 4950, loss[loss=0.3488, simple_loss=0.3895, pruned_loss=0.1541, over 8503.00 frames. ], tot_loss[loss=0.3342, simple_loss=0.3813, pruned_loss=0.1436, over 1617424.20 frames. ], batch size: 28, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:02:29,096 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 3.569e+02 4.502e+02 6.229e+02 1.133e+03, threshold=9.004e+02, percent-clipped=2.0 +2023-02-05 22:02:30,680 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:41,067 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 22:02:41,336 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:48,071 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:51,449 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:01,832 INFO [train.py:901] (0/4) Epoch 3, batch 5000, loss[loss=0.2949, simple_loss=0.3362, pruned_loss=0.1268, over 7648.00 frames. ], tot_loss[loss=0.332, simple_loss=0.3799, pruned_loss=0.142, over 1615717.98 frames. ], batch size: 19, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:08,652 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:37,219 INFO [train.py:901] (0/4) Epoch 3, batch 5050, loss[loss=0.3719, simple_loss=0.409, pruned_loss=0.1674, over 8465.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3783, pruned_loss=0.1412, over 1614223.11 frames. ], batch size: 25, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:38,541 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.043e+02 3.325e+02 4.224e+02 5.254e+02 1.187e+03, threshold=8.447e+02, percent-clipped=3.0 +2023-02-05 22:03:57,070 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 22:04:11,248 INFO [train.py:901] (0/4) Epoch 3, batch 5100, loss[loss=0.365, simple_loss=0.4055, pruned_loss=0.1623, over 8139.00 frames. ], tot_loss[loss=0.3306, simple_loss=0.3781, pruned_loss=0.1415, over 1610940.88 frames. ], batch size: 22, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:46,363 INFO [train.py:901] (0/4) Epoch 3, batch 5150, loss[loss=0.3586, simple_loss=0.4017, pruned_loss=0.1578, over 8023.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.3777, pruned_loss=0.141, over 1610101.22 frames. ], batch size: 22, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:47,676 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.142e+02 3.453e+02 4.061e+02 5.332e+02 1.278e+03, threshold=8.122e+02, percent-clipped=4.0 +2023-02-05 22:04:49,993 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21321.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:04:52,011 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5440, 1.1044, 4.5684, 1.5840, 4.0212, 3.6541, 4.0934, 3.9296], + device='cuda:0'), covar=tensor([0.0276, 0.3381, 0.0241, 0.1779, 0.0945, 0.0510, 0.0368, 0.0518], + device='cuda:0'), in_proj_covar=tensor([0.0253, 0.0433, 0.0303, 0.0339, 0.0408, 0.0328, 0.0318, 0.0349], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 22:04:56,028 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21330.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,479 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,544 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:19,999 INFO [train.py:901] (0/4) Epoch 3, batch 5200, loss[loss=0.3179, simple_loss=0.3802, pruned_loss=0.1278, over 8191.00 frames. ], tot_loss[loss=0.3305, simple_loss=0.3785, pruned_loss=0.1412, over 1613569.23 frames. ], batch size: 23, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:52,823 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:54,863 INFO [train.py:901] (0/4) Epoch 3, batch 5250, loss[loss=0.3554, simple_loss=0.3932, pruned_loss=0.1588, over 7192.00 frames. ], tot_loss[loss=0.3295, simple_loss=0.3776, pruned_loss=0.1407, over 1612597.97 frames. ], batch size: 73, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:54,881 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 22:05:56,261 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 3.353e+02 4.281e+02 5.765e+02 2.364e+03, threshold=8.563e+02, percent-clipped=11.0 +2023-02-05 22:06:09,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2395, 1.7422, 1.7068, 0.5538, 1.6177, 1.2379, 0.3291, 1.5566], + device='cuda:0'), covar=tensor([0.0124, 0.0063, 0.0066, 0.0136, 0.0089, 0.0250, 0.0196, 0.0068], + device='cuda:0'), in_proj_covar=tensor([0.0223, 0.0159, 0.0134, 0.0207, 0.0161, 0.0279, 0.0220, 0.0196], + device='cuda:0'), out_proj_covar=tensor([1.0445e-04, 7.4113e-05, 6.0828e-05, 9.3271e-05, 7.6606e-05, 1.3987e-04, + 1.0548e-04, 9.1584e-05], device='cuda:0') +2023-02-05 22:06:30,396 INFO [train.py:901] (0/4) Epoch 3, batch 5300, loss[loss=0.305, simple_loss=0.3598, pruned_loss=0.125, over 8476.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3785, pruned_loss=0.1407, over 1611161.23 frames. ], batch size: 27, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:06:39,471 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:04,794 INFO [train.py:901] (0/4) Epoch 3, batch 5350, loss[loss=0.3836, simple_loss=0.428, pruned_loss=0.1696, over 8360.00 frames. ], tot_loss[loss=0.3297, simple_loss=0.3787, pruned_loss=0.1404, over 1608918.48 frames. ], batch size: 24, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:07:06,076 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.338e+02 4.128e+02 5.460e+02 1.129e+03, threshold=8.255e+02, percent-clipped=3.0 +2023-02-05 22:07:13,627 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21529.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:40,163 INFO [train.py:901] (0/4) Epoch 3, batch 5400, loss[loss=0.3547, simple_loss=0.4009, pruned_loss=0.1542, over 8504.00 frames. ], tot_loss[loss=0.3311, simple_loss=0.3796, pruned_loss=0.1413, over 1610468.20 frames. ], batch size: 26, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:07:59,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21595.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:12,090 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21613.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:14,745 INFO [train.py:901] (0/4) Epoch 3, batch 5450, loss[loss=0.3037, simple_loss=0.361, pruned_loss=0.1232, over 8575.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3814, pruned_loss=0.1424, over 1608564.85 frames. ], batch size: 31, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:16,068 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 3.746e+02 4.366e+02 5.874e+02 2.172e+03, threshold=8.732e+02, percent-clipped=6.0 +2023-02-05 22:08:24,271 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21631.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:27,714 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5677, 4.7096, 3.9644, 1.7249, 3.9527, 4.0251, 4.2625, 3.3511], + device='cuda:0'), covar=tensor([0.0815, 0.0496, 0.1010, 0.4538, 0.0676, 0.0709, 0.1170, 0.0761], + device='cuda:0'), in_proj_covar=tensor([0.0367, 0.0245, 0.0290, 0.0377, 0.0272, 0.0216, 0.0271, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 22:08:41,812 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 22:08:42,203 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-05 22:08:49,813 INFO [train.py:901] (0/4) Epoch 3, batch 5500, loss[loss=0.3636, simple_loss=0.4025, pruned_loss=0.1624, over 8082.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3786, pruned_loss=0.1398, over 1609545.50 frames. ], batch size: 21, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:55,219 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21674.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:05,879 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21690.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:09,270 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3684, 1.5966, 1.3328, 1.9937, 0.8595, 1.1580, 1.3635, 1.6061], + device='cuda:0'), covar=tensor([0.1262, 0.1210, 0.1567, 0.0563, 0.1602, 0.2124, 0.1361, 0.0967], + device='cuda:0'), in_proj_covar=tensor([0.0306, 0.0307, 0.0303, 0.0229, 0.0287, 0.0313, 0.0329, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 22:09:23,536 INFO [train.py:901] (0/4) Epoch 3, batch 5550, loss[loss=0.4018, simple_loss=0.4361, pruned_loss=0.1837, over 8483.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3788, pruned_loss=0.1409, over 1607655.08 frames. ], batch size: 28, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:09:24,903 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 3.296e+02 4.063e+02 5.206e+02 8.291e+02, threshold=8.125e+02, percent-clipped=0.0 +2023-02-05 22:09:29,325 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-05 22:09:58,420 INFO [train.py:901] (0/4) Epoch 3, batch 5600, loss[loss=0.2959, simple_loss=0.3519, pruned_loss=0.12, over 7924.00 frames. ], tot_loss[loss=0.3307, simple_loss=0.3789, pruned_loss=0.1412, over 1607770.58 frames. ], batch size: 20, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:08,483 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:11,824 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21785.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:14,437 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21789.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:25,392 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:28,816 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21810.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:33,420 INFO [train.py:901] (0/4) Epoch 3, batch 5650, loss[loss=0.3192, simple_loss=0.3486, pruned_loss=0.1449, over 7174.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3785, pruned_loss=0.141, over 1605950.60 frames. ], batch size: 16, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:34,793 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.984e+02 3.614e+02 4.526e+02 5.980e+02 8.654e+02, threshold=9.051e+02, percent-clipped=4.0 +2023-02-05 22:10:45,255 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 22:10:56,856 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21851.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:07,103 INFO [train.py:901] (0/4) Epoch 3, batch 5700, loss[loss=0.3408, simple_loss=0.4057, pruned_loss=0.138, over 8248.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.3774, pruned_loss=0.1399, over 1609493.90 frames. ], batch size: 24, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:07,924 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:13,376 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:42,901 INFO [train.py:901] (0/4) Epoch 3, batch 5750, loss[loss=0.2747, simple_loss=0.3404, pruned_loss=0.1045, over 8134.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.3777, pruned_loss=0.1398, over 1614714.06 frames. ], batch size: 22, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:44,222 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.339e+02 3.657e+02 4.422e+02 5.345e+02 1.248e+03, threshold=8.845e+02, percent-clipped=3.0 +2023-02-05 22:11:49,705 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 22:12:10,243 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21957.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:16,963 INFO [train.py:901] (0/4) Epoch 3, batch 5800, loss[loss=0.4449, simple_loss=0.4483, pruned_loss=0.2207, over 8603.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3787, pruned_loss=0.1409, over 1614662.97 frames. ], batch size: 34, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:22,428 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:31,002 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21988.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:39,154 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-22000.pt +2023-02-05 22:12:52,159 INFO [train.py:901] (0/4) Epoch 3, batch 5850, loss[loss=0.3503, simple_loss=0.3905, pruned_loss=0.1551, over 8361.00 frames. ], tot_loss[loss=0.3289, simple_loss=0.3775, pruned_loss=0.1401, over 1612924.94 frames. ], batch size: 24, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:53,404 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.662e+02 4.461e+02 5.594e+02 1.608e+03, threshold=8.923e+02, percent-clipped=8.0 +2023-02-05 22:13:01,004 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4570, 1.8548, 2.9147, 1.1450, 2.2163, 1.7806, 1.6235, 1.7716], + device='cuda:0'), covar=tensor([0.1237, 0.1421, 0.0509, 0.2402, 0.0962, 0.1806, 0.1110, 0.1505], + device='cuda:0'), in_proj_covar=tensor([0.0443, 0.0417, 0.0487, 0.0505, 0.0548, 0.0485, 0.0430, 0.0557], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 22:13:02,468 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.86 vs. limit=5.0 +2023-02-05 22:13:11,805 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:22,187 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:25,934 INFO [train.py:901] (0/4) Epoch 3, batch 5900, loss[loss=0.3156, simple_loss=0.3708, pruned_loss=0.1302, over 8078.00 frames. ], tot_loss[loss=0.3282, simple_loss=0.3766, pruned_loss=0.1399, over 1612308.15 frames. ], batch size: 21, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:13:28,807 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:30,185 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22072.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:30,260 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7814, 1.6120, 2.4373, 2.0519, 2.0908, 1.6348, 1.2937, 0.8434], + device='cuda:0'), covar=tensor([0.1026, 0.0968, 0.0262, 0.0419, 0.0421, 0.0580, 0.0690, 0.1004], + device='cuda:0'), in_proj_covar=tensor([0.0618, 0.0533, 0.0446, 0.0489, 0.0601, 0.0502, 0.0511, 0.0517], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:13:39,564 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22086.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:42,233 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:00,337 INFO [train.py:901] (0/4) Epoch 3, batch 5950, loss[loss=0.3717, simple_loss=0.4024, pruned_loss=0.1704, over 8586.00 frames. ], tot_loss[loss=0.3313, simple_loss=0.3787, pruned_loss=0.1419, over 1612177.27 frames. ], batch size: 34, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:02,407 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.045e+02 3.353e+02 4.485e+02 5.691e+02 1.558e+03, threshold=8.970e+02, percent-clipped=6.0 +2023-02-05 22:14:07,162 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:22,762 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.06 vs. limit=5.0 +2023-02-05 22:14:23,935 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22148.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:36,950 INFO [train.py:901] (0/4) Epoch 3, batch 6000, loss[loss=0.3477, simple_loss=0.3963, pruned_loss=0.1496, over 8651.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3776, pruned_loss=0.1403, over 1616010.43 frames. ], batch size: 34, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:36,951 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 22:14:49,939 INFO [train.py:935] (0/4) Epoch 3, validation: loss=0.2472, simple_loss=0.3383, pruned_loss=0.07805, over 944034.00 frames. +2023-02-05 22:14:49,940 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-05 22:15:08,352 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22194.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:21,649 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:25,121 INFO [train.py:901] (0/4) Epoch 3, batch 6050, loss[loss=0.3312, simple_loss=0.3671, pruned_loss=0.1476, over 7438.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3765, pruned_loss=0.1393, over 1613516.11 frames. ], batch size: 17, lr: 2.18e-02, grad_scale: 8.0 +2023-02-05 22:15:26,479 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.554e+02 3.417e+02 4.364e+02 5.364e+02 3.571e+03, threshold=8.727e+02, percent-clipped=6.0 +2023-02-05 22:15:36,164 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22233.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:40,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:51,054 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22255.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:59,776 INFO [train.py:901] (0/4) Epoch 3, batch 6100, loss[loss=0.3145, simple_loss=0.3781, pruned_loss=0.1255, over 8366.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3787, pruned_loss=0.1408, over 1613262.52 frames. ], batch size: 24, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:06,254 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.63 vs. limit=5.0 +2023-02-05 22:16:18,457 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 22:16:35,129 INFO [train.py:901] (0/4) Epoch 3, batch 6150, loss[loss=0.3369, simple_loss=0.3843, pruned_loss=0.1447, over 8605.00 frames. ], tot_loss[loss=0.3313, simple_loss=0.3796, pruned_loss=0.1415, over 1614343.25 frames. ], batch size: 31, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:36,460 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.615e+02 4.380e+02 5.688e+02 1.525e+03, threshold=8.759e+02, percent-clipped=2.0 +2023-02-05 22:16:41,830 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:42,024 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-05 22:16:42,471 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22328.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:45,071 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22332.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:54,506 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:59,256 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22353.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:08,711 INFO [train.py:901] (0/4) Epoch 3, batch 6200, loss[loss=0.2861, simple_loss=0.3463, pruned_loss=0.113, over 7818.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3786, pruned_loss=0.1398, over 1619605.70 frames. ], batch size: 20, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:17:11,711 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22371.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:32,833 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.53 vs. limit=5.0 +2023-02-05 22:17:34,503 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:44,418 INFO [train.py:901] (0/4) Epoch 3, batch 6250, loss[loss=0.3894, simple_loss=0.4194, pruned_loss=0.1797, over 8692.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3785, pruned_loss=0.1399, over 1617287.10 frames. ], batch size: 39, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:17:45,750 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.506e+02 4.308e+02 5.585e+02 1.214e+03, threshold=8.617e+02, percent-clipped=6.0 +2023-02-05 22:18:05,739 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:19,135 INFO [train.py:901] (0/4) Epoch 3, batch 6300, loss[loss=0.2676, simple_loss=0.3239, pruned_loss=0.1057, over 7533.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.3763, pruned_loss=0.1386, over 1615797.14 frames. ], batch size: 18, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:35,761 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22492.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:39,286 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:54,107 INFO [train.py:901] (0/4) Epoch 3, batch 6350, loss[loss=0.3102, simple_loss=0.3462, pruned_loss=0.1371, over 7719.00 frames. ], tot_loss[loss=0.3241, simple_loss=0.374, pruned_loss=0.1371, over 1612309.80 frames. ], batch size: 18, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:55,440 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.104e+02 3.537e+02 4.368e+02 5.315e+02 1.494e+03, threshold=8.736e+02, percent-clipped=5.0 +2023-02-05 22:18:57,026 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:08,836 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22538.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:28,745 INFO [train.py:901] (0/4) Epoch 3, batch 6400, loss[loss=0.3211, simple_loss=0.3773, pruned_loss=0.1325, over 8032.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.3737, pruned_loss=0.1366, over 1613056.39 frames. ], batch size: 22, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:19:35,377 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22577.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:39,427 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:46,067 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6890, 1.5130, 2.7421, 1.2271, 2.0669, 2.9387, 2.7548, 2.5396], + device='cuda:0'), covar=tensor([0.1112, 0.1338, 0.0453, 0.1958, 0.0647, 0.0328, 0.0404, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0263, 0.0200, 0.0260, 0.0209, 0.0179, 0.0176, 0.0249], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:19:50,039 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22599.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:55,815 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22607.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:56,553 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22608.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:03,194 INFO [train.py:901] (0/4) Epoch 3, batch 6450, loss[loss=0.3196, simple_loss=0.3744, pruned_loss=0.1324, over 7810.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3729, pruned_loss=0.1362, over 1611506.05 frames. ], batch size: 19, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:04,473 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 3.557e+02 4.436e+02 5.729e+02 1.082e+03, threshold=8.871e+02, percent-clipped=7.0 +2023-02-05 22:20:28,474 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:31,157 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22657.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:35,791 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3784, 1.9320, 1.9245, 0.8834, 1.9187, 1.4694, 0.3692, 1.7694], + device='cuda:0'), covar=tensor([0.0118, 0.0066, 0.0063, 0.0133, 0.0074, 0.0218, 0.0195, 0.0057], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0166, 0.0141, 0.0217, 0.0161, 0.0290, 0.0228, 0.0193], + device='cuda:0'), out_proj_covar=tensor([1.0969e-04, 7.4442e-05, 6.3151e-05, 9.6748e-05, 7.5123e-05, 1.4177e-04, + 1.0452e-04, 8.5482e-05], device='cuda:0') +2023-02-05 22:20:37,604 INFO [train.py:901] (0/4) Epoch 3, batch 6500, loss[loss=0.3155, simple_loss=0.3846, pruned_loss=0.1232, over 8495.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3705, pruned_loss=0.1345, over 1609619.31 frames. ], batch size: 26, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:55,236 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:02,654 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:09,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22714.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:11,765 INFO [train.py:901] (0/4) Epoch 3, batch 6550, loss[loss=0.3055, simple_loss=0.3577, pruned_loss=0.1267, over 7160.00 frames. ], tot_loss[loss=0.3215, simple_loss=0.3717, pruned_loss=0.1356, over 1606092.13 frames. ], batch size: 16, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:21:13,168 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.155e+02 3.258e+02 3.883e+02 5.357e+02 1.264e+03, threshold=7.766e+02, percent-clipped=3.0 +2023-02-05 22:21:19,296 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:28,103 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0793, 1.4926, 1.3887, 1.1790, 1.5698, 1.4011, 1.4133, 1.5612], + device='cuda:0'), covar=tensor([0.0737, 0.1385, 0.1993, 0.1607, 0.0691, 0.1682, 0.0925, 0.0608], + device='cuda:0'), in_proj_covar=tensor([0.0180, 0.0214, 0.0252, 0.0211, 0.0177, 0.0215, 0.0180, 0.0177], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 22:21:28,621 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 22:21:32,840 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22747.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:47,206 INFO [train.py:901] (0/4) Epoch 3, batch 6600, loss[loss=0.3766, simple_loss=0.4256, pruned_loss=0.1638, over 8628.00 frames. ], tot_loss[loss=0.3226, simple_loss=0.3733, pruned_loss=0.136, over 1610926.45 frames. ], batch size: 39, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:21:48,605 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 22:22:19,046 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22812.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:22,356 INFO [train.py:901] (0/4) Epoch 3, batch 6650, loss[loss=0.3483, simple_loss=0.3998, pruned_loss=0.1484, over 8251.00 frames. ], tot_loss[loss=0.3229, simple_loss=0.3737, pruned_loss=0.1361, over 1612192.63 frames. ], batch size: 24, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:22:24,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.373e+02 3.456e+02 4.169e+02 5.335e+02 9.931e+02, threshold=8.339e+02, percent-clipped=8.0 +2023-02-05 22:22:29,339 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6453, 2.4789, 1.5603, 2.7904, 1.1958, 1.3565, 1.9536, 2.4454], + device='cuda:0'), covar=tensor([0.1576, 0.1099, 0.2681, 0.0621, 0.2094, 0.2416, 0.1696, 0.0973], + device='cuda:0'), in_proj_covar=tensor([0.0306, 0.0304, 0.0305, 0.0230, 0.0281, 0.0313, 0.0317, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 22:22:40,069 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:53,754 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22862.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:54,512 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22863.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:57,082 INFO [train.py:901] (0/4) Epoch 3, batch 6700, loss[loss=0.2136, simple_loss=0.277, pruned_loss=0.07506, over 7696.00 frames. ], tot_loss[loss=0.3221, simple_loss=0.3731, pruned_loss=0.1356, over 1611078.95 frames. ], batch size: 18, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:09,422 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-05 22:23:12,647 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:26,904 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:32,715 INFO [train.py:901] (0/4) Epoch 3, batch 6750, loss[loss=0.3216, simple_loss=0.3677, pruned_loss=0.1377, over 8361.00 frames. ], tot_loss[loss=0.3224, simple_loss=0.3734, pruned_loss=0.1357, over 1613719.84 frames. ], batch size: 24, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:34,754 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.597e+02 4.402e+02 5.483e+02 1.400e+03, threshold=8.804e+02, percent-clipped=7.0 +2023-02-05 22:23:44,512 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22934.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:53,616 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:05,893 INFO [train.py:901] (0/4) Epoch 3, batch 6800, loss[loss=0.2587, simple_loss=0.3123, pruned_loss=0.1025, over 7563.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3742, pruned_loss=0.1371, over 1613471.03 frames. ], batch size: 18, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:05,903 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 22:24:08,779 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22970.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:10,813 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:26,130 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22995.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:30,232 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23001.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:41,422 INFO [train.py:901] (0/4) Epoch 3, batch 6850, loss[loss=0.3351, simple_loss=0.3787, pruned_loss=0.1458, over 7195.00 frames. ], tot_loss[loss=0.3238, simple_loss=0.3737, pruned_loss=0.137, over 1611676.81 frames. ], batch size: 16, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:43,433 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.425e+02 4.505e+02 5.413e+02 1.323e+03, threshold=9.011e+02, percent-clipped=6.0 +2023-02-05 22:24:54,853 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 22:25:13,998 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2555, 1.3244, 2.2657, 0.9882, 2.1648, 2.4181, 2.3100, 2.0918], + device='cuda:0'), covar=tensor([0.1081, 0.1209, 0.0466, 0.2020, 0.0489, 0.0380, 0.0463, 0.0729], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0261, 0.0201, 0.0259, 0.0202, 0.0178, 0.0177, 0.0246], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:25:15,259 INFO [train.py:901] (0/4) Epoch 3, batch 6900, loss[loss=0.3852, simple_loss=0.4073, pruned_loss=0.1816, over 7223.00 frames. ], tot_loss[loss=0.3247, simple_loss=0.3742, pruned_loss=0.1376, over 1610321.23 frames. ], batch size: 71, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:50,208 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23116.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:50,692 INFO [train.py:901] (0/4) Epoch 3, batch 6950, loss[loss=0.2973, simple_loss=0.3472, pruned_loss=0.1237, over 8131.00 frames. ], tot_loss[loss=0.3245, simple_loss=0.3742, pruned_loss=0.1375, over 1611235.71 frames. ], batch size: 22, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:51,604 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:52,729 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.525e+02 4.440e+02 6.025e+02 1.140e+03, threshold=8.880e+02, percent-clipped=3.0 +2023-02-05 22:25:57,641 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23126.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:02,130 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 22:26:09,856 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:18,630 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23156.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:26,189 INFO [train.py:901] (0/4) Epoch 3, batch 7000, loss[loss=0.2801, simple_loss=0.3288, pruned_loss=0.1157, over 7409.00 frames. ], tot_loss[loss=0.3252, simple_loss=0.3744, pruned_loss=0.138, over 1608966.12 frames. ], batch size: 17, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:26:39,913 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:44,867 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4405, 1.7011, 1.5095, 1.2899, 1.5000, 1.5875, 1.7522, 1.6831], + device='cuda:0'), covar=tensor([0.0641, 0.1346, 0.2013, 0.1646, 0.0842, 0.1612, 0.0919, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0176, 0.0211, 0.0248, 0.0211, 0.0176, 0.0214, 0.0174, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 22:27:01,198 INFO [train.py:901] (0/4) Epoch 3, batch 7050, loss[loss=0.3613, simple_loss=0.4046, pruned_loss=0.159, over 8027.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3756, pruned_loss=0.1393, over 1609383.15 frames. ], batch size: 22, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:03,862 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 3.682e+02 4.488e+02 5.424e+02 1.788e+03, threshold=8.977e+02, percent-clipped=6.0 +2023-02-05 22:27:14,101 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23235.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:18,797 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0484, 1.3704, 4.1633, 1.6823, 3.5601, 3.4002, 3.6961, 3.6518], + device='cuda:0'), covar=tensor([0.0309, 0.2983, 0.0299, 0.1666, 0.0965, 0.0500, 0.0369, 0.0395], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0429, 0.0321, 0.0340, 0.0405, 0.0334, 0.0316, 0.0356], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:0') +2023-02-05 22:27:22,745 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:36,329 INFO [train.py:901] (0/4) Epoch 3, batch 7100, loss[loss=0.356, simple_loss=0.3992, pruned_loss=0.1565, over 7812.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.3755, pruned_loss=0.139, over 1611144.61 frames. ], batch size: 20, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:39,094 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:59,141 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:28:08,841 INFO [train.py:901] (0/4) Epoch 3, batch 7150, loss[loss=0.3096, simple_loss=0.3631, pruned_loss=0.1281, over 8329.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3757, pruned_loss=0.1383, over 1614416.04 frames. ], batch size: 26, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:10,863 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.845e+02 4.572e+02 5.960e+02 1.048e+03, threshold=9.143e+02, percent-clipped=2.0 +2023-02-05 22:28:17,308 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=5.70 vs. limit=5.0 +2023-02-05 22:28:42,756 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9731, 1.7701, 4.1289, 1.7757, 2.4888, 4.6594, 4.3779, 4.1198], + device='cuda:0'), covar=tensor([0.1174, 0.1320, 0.0239, 0.1755, 0.0696, 0.0153, 0.0275, 0.0482], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0262, 0.0207, 0.0263, 0.0205, 0.0182, 0.0184, 0.0254], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:28:43,292 INFO [train.py:901] (0/4) Epoch 3, batch 7200, loss[loss=0.4062, simple_loss=0.4434, pruned_loss=0.1845, over 8642.00 frames. ], tot_loss[loss=0.328, simple_loss=0.377, pruned_loss=0.1395, over 1610593.85 frames. ], batch size: 34, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:47,562 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23372.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:04,751 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:17,769 INFO [train.py:901] (0/4) Epoch 3, batch 7250, loss[loss=0.2848, simple_loss=0.3422, pruned_loss=0.1137, over 7802.00 frames. ], tot_loss[loss=0.3275, simple_loss=0.3762, pruned_loss=0.1393, over 1612642.50 frames. ], batch size: 20, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:20,312 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.518e+02 3.505e+02 4.323e+02 5.847e+02 9.851e+02, threshold=8.646e+02, percent-clipped=2.0 +2023-02-05 22:29:31,973 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1097, 1.4659, 1.5326, 1.0923, 0.8541, 1.5357, 0.0766, 1.0872], + device='cuda:0'), covar=tensor([0.3674, 0.2366, 0.1538, 0.2764, 0.5916, 0.0918, 0.6310, 0.2371], + device='cuda:0'), in_proj_covar=tensor([0.0120, 0.0115, 0.0086, 0.0159, 0.0183, 0.0080, 0.0150, 0.0118], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:29:52,889 INFO [train.py:901] (0/4) Epoch 3, batch 7300, loss[loss=0.3058, simple_loss=0.3609, pruned_loss=0.1253, over 7647.00 frames. ], tot_loss[loss=0.3272, simple_loss=0.3766, pruned_loss=0.1389, over 1614949.50 frames. ], batch size: 19, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:55,064 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:08,356 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9275, 1.0721, 4.0974, 1.5217, 3.3290, 3.3240, 3.5770, 3.5305], + device='cuda:0'), covar=tensor([0.0510, 0.3765, 0.0370, 0.2059, 0.1355, 0.0581, 0.0492, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0439, 0.0321, 0.0349, 0.0417, 0.0346, 0.0320, 0.0368], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 22:30:14,526 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2865, 4.3563, 3.7790, 1.6434, 3.7415, 3.6135, 3.9253, 3.3669], + device='cuda:0'), covar=tensor([0.0780, 0.0516, 0.0839, 0.4435, 0.0634, 0.0744, 0.1193, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0363, 0.0249, 0.0276, 0.0365, 0.0269, 0.0223, 0.0267, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 22:30:20,012 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7924, 2.0901, 1.9681, 1.8133, 1.8764, 1.9930, 2.3616, 2.1558], + device='cuda:0'), covar=tensor([0.0529, 0.0941, 0.1452, 0.1284, 0.0660, 0.1199, 0.0660, 0.0473], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0208, 0.0243, 0.0210, 0.0171, 0.0211, 0.0174, 0.0174], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 22:30:21,347 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23506.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:28,682 INFO [train.py:901] (0/4) Epoch 3, batch 7350, loss[loss=0.2958, simple_loss=0.3528, pruned_loss=0.1194, over 7800.00 frames. ], tot_loss[loss=0.3269, simple_loss=0.3759, pruned_loss=0.139, over 1609445.33 frames. ], batch size: 19, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:30:31,447 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.204e+02 3.295e+02 4.174e+02 5.897e+02 1.266e+03, threshold=8.348e+02, percent-clipped=6.0 +2023-02-05 22:30:35,744 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23527.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:45,652 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 22:30:52,309 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23552.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:56,337 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23558.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:03,072 INFO [train.py:901] (0/4) Epoch 3, batch 7400, loss[loss=0.3948, simple_loss=0.4388, pruned_loss=0.1754, over 8475.00 frames. ], tot_loss[loss=0.3265, simple_loss=0.3763, pruned_loss=0.1384, over 1610812.34 frames. ], batch size: 29, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:03,934 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9834, 1.2110, 1.8014, 0.8654, 1.3695, 1.1525, 1.0605, 1.2062], + device='cuda:0'), covar=tensor([0.0935, 0.1120, 0.0385, 0.1800, 0.0776, 0.1423, 0.0901, 0.1153], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0413, 0.0490, 0.0502, 0.0550, 0.0488, 0.0435, 0.0556], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 22:31:05,768 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 22:31:11,874 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:14,796 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:16,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23585.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:20,319 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:38,776 INFO [train.py:901] (0/4) Epoch 3, batch 7450, loss[loss=0.4289, simple_loss=0.4597, pruned_loss=0.1991, over 8679.00 frames. ], tot_loss[loss=0.3282, simple_loss=0.3778, pruned_loss=0.1393, over 1615403.75 frames. ], batch size: 34, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:41,492 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.560e+02 4.542e+02 5.434e+02 8.209e+02, threshold=9.083e+02, percent-clipped=0.0 +2023-02-05 22:31:44,191 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 22:32:03,634 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7352, 1.5944, 3.3662, 1.3889, 2.1618, 3.5989, 3.3202, 3.1055], + device='cuda:0'), covar=tensor([0.1041, 0.1292, 0.0295, 0.1728, 0.0655, 0.0234, 0.0313, 0.0547], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0266, 0.0210, 0.0264, 0.0206, 0.0184, 0.0187, 0.0262], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 22:32:11,862 INFO [train.py:901] (0/4) Epoch 3, batch 7500, loss[loss=0.3516, simple_loss=0.3939, pruned_loss=0.1546, over 7140.00 frames. ], tot_loss[loss=0.3283, simple_loss=0.3777, pruned_loss=0.1395, over 1610814.49 frames. ], batch size: 71, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:23,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4814, 1.8962, 2.1085, 1.6791, 1.0343, 1.9602, 0.4422, 1.4896], + device='cuda:0'), covar=tensor([0.3600, 0.1861, 0.1289, 0.2284, 0.6641, 0.1115, 0.5974, 0.2020], + device='cuda:0'), in_proj_covar=tensor([0.0122, 0.0115, 0.0084, 0.0159, 0.0187, 0.0082, 0.0147, 0.0119], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:32:31,221 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23694.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:39,257 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:47,008 INFO [train.py:901] (0/4) Epoch 3, batch 7550, loss[loss=0.2173, simple_loss=0.2897, pruned_loss=0.07251, over 7255.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.376, pruned_loss=0.1382, over 1608829.91 frames. ], batch size: 16, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:49,786 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 3.573e+02 4.120e+02 5.568e+02 9.909e+02, threshold=8.240e+02, percent-clipped=1.0 +2023-02-05 22:33:21,013 INFO [train.py:901] (0/4) Epoch 3, batch 7600, loss[loss=0.3067, simple_loss=0.3799, pruned_loss=0.1168, over 8033.00 frames. ], tot_loss[loss=0.3246, simple_loss=0.3746, pruned_loss=0.1373, over 1608575.87 frames. ], batch size: 22, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:55,888 INFO [train.py:901] (0/4) Epoch 3, batch 7650, loss[loss=0.3031, simple_loss=0.3455, pruned_loss=0.1303, over 8082.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.3741, pruned_loss=0.1374, over 1608970.87 frames. ], batch size: 21, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:58,400 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.349e+02 3.333e+02 4.379e+02 5.791e+02 1.321e+03, threshold=8.759e+02, percent-clipped=7.0 +2023-02-05 22:34:12,405 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 22:34:12,868 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23841.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:19,390 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23850.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:29,028 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 22:34:30,201 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:30,651 INFO [train.py:901] (0/4) Epoch 3, batch 7700, loss[loss=0.2973, simple_loss=0.362, pruned_loss=0.1163, over 8367.00 frames. ], tot_loss[loss=0.325, simple_loss=0.3752, pruned_loss=0.1374, over 1614386.11 frames. ], batch size: 24, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:34:50,583 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 22:34:50,859 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 22:35:04,780 INFO [train.py:901] (0/4) Epoch 3, batch 7750, loss[loss=0.3087, simple_loss=0.3657, pruned_loss=0.1258, over 8324.00 frames. ], tot_loss[loss=0.3259, simple_loss=0.3759, pruned_loss=0.138, over 1615756.04 frames. ], batch size: 25, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:08,099 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.458e+02 4.167e+02 5.729e+02 1.393e+03, threshold=8.335e+02, percent-clipped=8.0 +2023-02-05 22:35:27,638 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:37,132 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:39,025 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23965.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:40,191 INFO [train.py:901] (0/4) Epoch 3, batch 7800, loss[loss=0.36, simple_loss=0.4064, pruned_loss=0.1567, over 8346.00 frames. ], tot_loss[loss=0.326, simple_loss=0.3761, pruned_loss=0.138, over 1617299.08 frames. ], batch size: 26, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:45,535 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:53,464 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:02,019 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-24000.pt +2023-02-05 22:36:14,019 INFO [train.py:901] (0/4) Epoch 3, batch 7850, loss[loss=0.3165, simple_loss=0.3635, pruned_loss=0.1347, over 7809.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.3771, pruned_loss=0.1383, over 1619500.14 frames. ], batch size: 20, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:36:16,543 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.230e+02 3.608e+02 4.565e+02 5.801e+02 1.089e+03, threshold=9.129e+02, percent-clipped=5.0 +2023-02-05 22:36:20,032 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7694, 6.0109, 4.9336, 2.0742, 5.1526, 5.3677, 5.4148, 4.7990], + device='cuda:0'), covar=tensor([0.0714, 0.0300, 0.0883, 0.4578, 0.0508, 0.0444, 0.1082, 0.0561], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0246, 0.0286, 0.0369, 0.0273, 0.0230, 0.0270, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 22:36:39,258 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24055.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:47,372 INFO [train.py:901] (0/4) Epoch 3, batch 7900, loss[loss=0.3326, simple_loss=0.3799, pruned_loss=0.1427, over 8249.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3778, pruned_loss=0.1383, over 1622018.17 frames. ], batch size: 22, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:20,421 INFO [train.py:901] (0/4) Epoch 3, batch 7950, loss[loss=0.2908, simple_loss=0.3342, pruned_loss=0.1237, over 7698.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.3745, pruned_loss=0.1363, over 1615384.22 frames. ], batch size: 18, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:23,174 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.080e+02 3.295e+02 4.369e+02 5.897e+02 2.335e+03, threshold=8.738e+02, percent-clipped=5.0 +2023-02-05 22:37:27,563 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5680, 2.0764, 3.3508, 2.8514, 2.7424, 2.1303, 1.5816, 1.3469], + device='cuda:0'), covar=tensor([0.0936, 0.1195, 0.0252, 0.0502, 0.0539, 0.0580, 0.0695, 0.1253], + device='cuda:0'), in_proj_covar=tensor([0.0632, 0.0548, 0.0464, 0.0517, 0.0628, 0.0509, 0.0523, 0.0536], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:37:54,063 INFO [train.py:901] (0/4) Epoch 3, batch 8000, loss[loss=0.3349, simple_loss=0.3809, pruned_loss=0.1445, over 8402.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.3749, pruned_loss=0.1369, over 1615800.44 frames. ], batch size: 48, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:38:25,220 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 22:38:27,961 INFO [train.py:901] (0/4) Epoch 3, batch 8050, loss[loss=0.2415, simple_loss=0.3127, pruned_loss=0.08514, over 7227.00 frames. ], tot_loss[loss=0.3237, simple_loss=0.3729, pruned_loss=0.1373, over 1588716.38 frames. ], batch size: 16, lr: 2.09e-02, grad_scale: 8.0 +2023-02-05 22:38:30,260 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8917, 1.3145, 1.4046, 1.1363, 1.4161, 1.2950, 1.5673, 1.5470], + device='cuda:0'), covar=tensor([0.0727, 0.1386, 0.2040, 0.1672, 0.0716, 0.1727, 0.0892, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0207, 0.0247, 0.0209, 0.0168, 0.0216, 0.0175, 0.0175], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 22:38:30,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.328e+02 4.149e+02 5.404e+02 3.135e+03, threshold=8.298e+02, percent-clipped=6.0 +2023-02-05 22:38:31,020 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:38:46,182 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8088, 1.6619, 2.3114, 1.9633, 2.0403, 1.5609, 1.2367, 1.2161], + device='cuda:0'), covar=tensor([0.0788, 0.0865, 0.0223, 0.0356, 0.0365, 0.0468, 0.0592, 0.0785], + device='cuda:0'), in_proj_covar=tensor([0.0629, 0.0548, 0.0467, 0.0515, 0.0621, 0.0510, 0.0523, 0.0527], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:38:48,136 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24246.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:38:51,288 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-3.pt +2023-02-05 22:39:03,748 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 22:39:07,719 INFO [train.py:901] (0/4) Epoch 4, batch 0, loss[loss=0.3673, simple_loss=0.3982, pruned_loss=0.1682, over 8457.00 frames. ], tot_loss[loss=0.3673, simple_loss=0.3982, pruned_loss=0.1682, over 8457.00 frames. ], batch size: 27, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:39:07,720 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 22:39:18,718 INFO [train.py:935] (0/4) Epoch 4, validation: loss=0.2476, simple_loss=0.3384, pruned_loss=0.07836, over 944034.00 frames. +2023-02-05 22:39:18,719 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-05 22:39:24,332 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1191, 3.0969, 2.7853, 1.4889, 2.7779, 2.7009, 2.8788, 2.3916], + device='cuda:0'), covar=tensor([0.1262, 0.0799, 0.1173, 0.4282, 0.0897, 0.0936, 0.1489, 0.0996], + device='cuda:0'), in_proj_covar=tensor([0.0364, 0.0248, 0.0291, 0.0374, 0.0277, 0.0234, 0.0271, 0.0216], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 22:39:34,133 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 22:39:52,980 INFO [train.py:901] (0/4) Epoch 4, batch 50, loss[loss=0.3377, simple_loss=0.3911, pruned_loss=0.1421, over 8453.00 frames. ], tot_loss[loss=0.3251, simple_loss=0.3754, pruned_loss=0.1374, over 367592.82 frames. ], batch size: 27, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:40:07,593 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.017e+02 3.527e+02 4.250e+02 5.116e+02 9.987e+02, threshold=8.500e+02, percent-clipped=2.0 +2023-02-05 22:40:09,015 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 22:40:27,955 INFO [train.py:901] (0/4) Epoch 4, batch 100, loss[loss=0.3389, simple_loss=0.3785, pruned_loss=0.1497, over 8295.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3743, pruned_loss=0.1369, over 644855.47 frames. ], batch size: 23, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:40:31,335 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 22:40:43,199 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.04 vs. limit=2.0 +2023-02-05 22:40:55,695 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-05 22:41:01,465 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24399.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:41:02,234 INFO [train.py:901] (0/4) Epoch 4, batch 150, loss[loss=0.3173, simple_loss=0.3686, pruned_loss=0.133, over 8411.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3706, pruned_loss=0.1325, over 863117.43 frames. ], batch size: 49, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:41:16,207 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.54 vs. limit=5.0 +2023-02-05 22:41:17,163 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.490e+02 4.203e+02 5.614e+02 1.653e+03, threshold=8.406e+02, percent-clipped=4.0 +2023-02-05 22:41:37,211 INFO [train.py:901] (0/4) Epoch 4, batch 200, loss[loss=0.3254, simple_loss=0.3791, pruned_loss=0.1359, over 8252.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3713, pruned_loss=0.1325, over 1032763.20 frames. ], batch size: 24, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:41:57,240 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-05 22:41:59,067 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5581, 2.0940, 2.2378, 0.9749, 2.1834, 1.3985, 0.6081, 1.8219], + device='cuda:0'), covar=tensor([0.0184, 0.0076, 0.0059, 0.0159, 0.0081, 0.0278, 0.0220, 0.0080], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0177, 0.0142, 0.0226, 0.0170, 0.0307, 0.0243, 0.0210], + device='cuda:0'), out_proj_covar=tensor([1.1334e-04, 7.6250e-05, 6.1314e-05, 9.6080e-05, 7.5836e-05, 1.4412e-04, + 1.0749e-04, 8.9656e-05], device='cuda:0') +2023-02-05 22:42:11,042 INFO [train.py:901] (0/4) Epoch 4, batch 250, loss[loss=0.325, simple_loss=0.3714, pruned_loss=0.1393, over 8030.00 frames. ], tot_loss[loss=0.3187, simple_loss=0.3716, pruned_loss=0.1329, over 1163169.78 frames. ], batch size: 22, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:12,466 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5531, 1.0495, 3.9488, 1.6302, 2.8833, 3.2192, 3.6047, 3.5480], + device='cuda:0'), covar=tensor([0.1092, 0.5429, 0.0939, 0.2880, 0.2578, 0.1353, 0.0946, 0.1073], + device='cuda:0'), in_proj_covar=tensor([0.0280, 0.0446, 0.0336, 0.0362, 0.0428, 0.0361, 0.0343, 0.0387], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 22:42:13,782 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2070, 1.1928, 4.3547, 1.6017, 3.7197, 3.6281, 3.9455, 3.8387], + device='cuda:0'), covar=tensor([0.0392, 0.3380, 0.0285, 0.2024, 0.0972, 0.0518, 0.0438, 0.0475], + device='cuda:0'), in_proj_covar=tensor([0.0281, 0.0447, 0.0337, 0.0363, 0.0429, 0.0361, 0.0344, 0.0388], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 22:42:20,364 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24514.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:42:23,593 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 22:42:24,846 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.156e+02 3.531e+02 4.434e+02 5.277e+02 1.190e+03, threshold=8.868e+02, percent-clipped=4.0 +2023-02-05 22:42:31,609 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 22:42:45,996 INFO [train.py:901] (0/4) Epoch 4, batch 300, loss[loss=0.2939, simple_loss=0.3527, pruned_loss=0.1176, over 8497.00 frames. ], tot_loss[loss=0.3211, simple_loss=0.3734, pruned_loss=0.1344, over 1263971.15 frames. ], batch size: 28, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:56,998 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:12,330 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24587.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:21,665 INFO [train.py:901] (0/4) Epoch 4, batch 350, loss[loss=0.3505, simple_loss=0.4044, pruned_loss=0.1483, over 8593.00 frames. ], tot_loss[loss=0.3191, simple_loss=0.3716, pruned_loss=0.1333, over 1341984.07 frames. ], batch size: 39, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:43:31,764 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8254, 2.0964, 2.7630, 1.0461, 2.3917, 1.8490, 1.5559, 1.9397], + device='cuda:0'), covar=tensor([0.0194, 0.0082, 0.0068, 0.0184, 0.0128, 0.0193, 0.0180, 0.0105], + device='cuda:0'), in_proj_covar=tensor([0.0259, 0.0178, 0.0146, 0.0229, 0.0172, 0.0306, 0.0242, 0.0210], + device='cuda:0'), out_proj_covar=tensor([1.1364e-04, 7.6424e-05, 6.2785e-05, 9.6814e-05, 7.6290e-05, 1.4319e-04, + 1.0660e-04, 8.9545e-05], device='cuda:0') +2023-02-05 22:43:34,094 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-05 22:43:35,592 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 3.300e+02 4.421e+02 5.071e+02 1.044e+03, threshold=8.841e+02, percent-clipped=4.0 +2023-02-05 22:43:52,427 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 22:43:56,478 INFO [train.py:901] (0/4) Epoch 4, batch 400, loss[loss=0.3377, simple_loss=0.3797, pruned_loss=0.1479, over 8293.00 frames. ], tot_loss[loss=0.3206, simple_loss=0.3735, pruned_loss=0.1338, over 1407851.75 frames. ], batch size: 23, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:04,954 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-05 22:44:13,450 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 22:44:30,023 INFO [train.py:901] (0/4) Epoch 4, batch 450, loss[loss=0.2978, simple_loss=0.3653, pruned_loss=0.1152, over 8193.00 frames. ], tot_loss[loss=0.3234, simple_loss=0.376, pruned_loss=0.1354, over 1461069.57 frames. ], batch size: 23, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:44,799 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.414e+02 4.548e+02 5.600e+02 1.007e+03, threshold=9.096e+02, percent-clipped=5.0 +2023-02-05 22:45:04,960 INFO [train.py:901] (0/4) Epoch 4, batch 500, loss[loss=0.2939, simple_loss=0.3684, pruned_loss=0.1097, over 8325.00 frames. ], tot_loss[loss=0.3214, simple_loss=0.3744, pruned_loss=0.1342, over 1495997.26 frames. ], batch size: 25, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:19,893 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24770.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:28,211 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24783.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:36,833 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24795.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:40,170 INFO [train.py:901] (0/4) Epoch 4, batch 550, loss[loss=0.3532, simple_loss=0.3948, pruned_loss=0.1558, over 8812.00 frames. ], tot_loss[loss=0.3206, simple_loss=0.3736, pruned_loss=0.1338, over 1524840.04 frames. ], batch size: 40, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:53,858 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 3.369e+02 4.426e+02 5.591e+02 8.767e+02, threshold=8.852e+02, percent-clipped=0.0 +2023-02-05 22:46:13,961 INFO [train.py:901] (0/4) Epoch 4, batch 600, loss[loss=0.291, simple_loss=0.3544, pruned_loss=0.1138, over 8099.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3722, pruned_loss=0.1327, over 1546316.06 frames. ], batch size: 23, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:24,931 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:46:28,939 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 22:46:39,222 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2621, 1.8871, 2.1576, 0.8741, 2.0984, 1.5968, 0.5306, 1.6773], + device='cuda:0'), covar=tensor([0.0150, 0.0068, 0.0053, 0.0152, 0.0096, 0.0211, 0.0212, 0.0074], + device='cuda:0'), in_proj_covar=tensor([0.0252, 0.0175, 0.0140, 0.0219, 0.0165, 0.0297, 0.0236, 0.0207], + device='cuda:0'), out_proj_covar=tensor([1.0948e-04, 7.5440e-05, 5.9494e-05, 9.1786e-05, 7.2555e-05, 1.3915e-04, + 1.0301e-04, 8.7562e-05], device='cuda:0') +2023-02-05 22:46:49,144 INFO [train.py:901] (0/4) Epoch 4, batch 650, loss[loss=0.2878, simple_loss=0.3236, pruned_loss=0.126, over 7687.00 frames. ], tot_loss[loss=0.3196, simple_loss=0.3727, pruned_loss=0.1333, over 1564631.71 frames. ], batch size: 18, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:55,178 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:03,752 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 3.310e+02 4.230e+02 5.108e+02 1.167e+03, threshold=8.459e+02, percent-clipped=4.0 +2023-02-05 22:47:10,601 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:24,034 INFO [train.py:901] (0/4) Epoch 4, batch 700, loss[loss=0.2965, simple_loss=0.3641, pruned_loss=0.1145, over 8464.00 frames. ], tot_loss[loss=0.3185, simple_loss=0.3719, pruned_loss=0.1325, over 1577429.77 frames. ], batch size: 27, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:47:59,262 INFO [train.py:901] (0/4) Epoch 4, batch 750, loss[loss=0.305, simple_loss=0.3654, pruned_loss=0.1223, over 8293.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3707, pruned_loss=0.1326, over 1581041.11 frames. ], batch size: 23, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:00,335 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.55 vs. limit=5.0 +2023-02-05 22:48:08,255 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25013.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:13,344 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.175e+02 4.108e+02 5.247e+02 1.235e+03, threshold=8.217e+02, percent-clipped=4.0 +2023-02-05 22:48:14,041 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 22:48:15,510 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:22,610 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 22:48:30,660 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:33,224 INFO [train.py:901] (0/4) Epoch 4, batch 800, loss[loss=0.27, simple_loss=0.3284, pruned_loss=0.1058, over 7714.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3702, pruned_loss=0.1327, over 1583146.38 frames. ], batch size: 18, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:33,437 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9792, 2.3497, 3.8180, 2.8640, 2.8362, 2.2904, 1.3509, 1.6749], + device='cuda:0'), covar=tensor([0.0948, 0.1243, 0.0260, 0.0670, 0.0759, 0.0590, 0.0801, 0.1370], + device='cuda:0'), in_proj_covar=tensor([0.0655, 0.0563, 0.0488, 0.0538, 0.0663, 0.0525, 0.0539, 0.0550], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 22:48:38,091 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9197, 1.6453, 4.3244, 1.7699, 2.1911, 5.0347, 4.7732, 4.4122], + device='cuda:0'), covar=tensor([0.1322, 0.1477, 0.0290, 0.1929, 0.1055, 0.0221, 0.0295, 0.0511], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0254, 0.0204, 0.0261, 0.0206, 0.0182, 0.0185, 0.0259], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 22:49:06,963 INFO [train.py:901] (0/4) Epoch 4, batch 850, loss[loss=0.3446, simple_loss=0.3653, pruned_loss=0.162, over 8092.00 frames. ], tot_loss[loss=0.3187, simple_loss=0.3705, pruned_loss=0.1334, over 1589347.34 frames. ], batch size: 21, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:49:22,452 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 3.301e+02 4.277e+02 5.478e+02 1.022e+03, threshold=8.554e+02, percent-clipped=4.0 +2023-02-05 22:49:26,598 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:49:42,457 INFO [train.py:901] (0/4) Epoch 4, batch 900, loss[loss=0.3222, simple_loss=0.3745, pruned_loss=0.1349, over 8102.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3705, pruned_loss=0.1325, over 1597738.03 frames. ], batch size: 23, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:16,689 INFO [train.py:901] (0/4) Epoch 4, batch 950, loss[loss=0.3155, simple_loss=0.3571, pruned_loss=0.1369, over 7251.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3715, pruned_loss=0.1323, over 1606324.43 frames. ], batch size: 16, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:23,518 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25210.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:30,882 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.215e+02 3.501e+02 4.488e+02 5.717e+02 1.063e+03, threshold=8.976e+02, percent-clipped=5.0 +2023-02-05 22:50:40,875 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 22:50:46,559 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25242.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:51,546 INFO [train.py:901] (0/4) Epoch 4, batch 1000, loss[loss=0.3769, simple_loss=0.4167, pruned_loss=0.1686, over 8290.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.3725, pruned_loss=0.133, over 1613793.48 frames. ], batch size: 23, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:12,233 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25280.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:13,323 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 22:51:25,837 INFO [train.py:901] (0/4) Epoch 4, batch 1050, loss[loss=0.3176, simple_loss=0.3755, pruned_loss=0.1298, over 8468.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3709, pruned_loss=0.1318, over 1612656.85 frames. ], batch size: 25, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:26,411 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 22:51:27,154 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:28,970 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:39,506 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 3.519e+02 4.399e+02 5.664e+02 1.146e+03, threshold=8.797e+02, percent-clipped=2.0 +2023-02-05 22:51:42,387 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25325.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:43,774 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:58,864 INFO [train.py:901] (0/4) Epoch 4, batch 1100, loss[loss=0.3309, simple_loss=0.3796, pruned_loss=0.1411, over 8460.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.3693, pruned_loss=0.1312, over 1611694.60 frames. ], batch size: 27, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:52:04,504 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:52:34,677 INFO [train.py:901] (0/4) Epoch 4, batch 1150, loss[loss=0.3249, simple_loss=0.3665, pruned_loss=0.1417, over 7213.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.3681, pruned_loss=0.1304, over 1612523.99 frames. ], batch size: 16, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:52:37,396 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 22:52:49,212 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.195e+02 3.278e+02 3.972e+02 4.649e+02 8.065e+02, threshold=7.944e+02, percent-clipped=0.0 +2023-02-05 22:53:06,097 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25446.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:53:08,498 INFO [train.py:901] (0/4) Epoch 4, batch 1200, loss[loss=0.2994, simple_loss=0.3591, pruned_loss=0.1199, over 8453.00 frames. ], tot_loss[loss=0.3151, simple_loss=0.369, pruned_loss=0.1306, over 1617751.36 frames. ], batch size: 29, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:23,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:42,023 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:43,174 INFO [train.py:901] (0/4) Epoch 4, batch 1250, loss[loss=0.2502, simple_loss=0.3012, pruned_loss=0.09958, over 7542.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.368, pruned_loss=0.1305, over 1616771.68 frames. ], batch size: 18, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:57,802 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 3.538e+02 4.328e+02 6.105e+02 1.271e+03, threshold=8.657e+02, percent-clipped=4.0 +2023-02-05 22:53:59,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25523.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:18,013 INFO [train.py:901] (0/4) Epoch 4, batch 1300, loss[loss=0.3413, simple_loss=0.3943, pruned_loss=0.1441, over 8247.00 frames. ], tot_loss[loss=0.314, simple_loss=0.3678, pruned_loss=0.1301, over 1614779.22 frames. ], batch size: 24, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:32,102 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 22:54:39,383 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25581.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:46,394 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 22:54:53,274 INFO [train.py:901] (0/4) Epoch 4, batch 1350, loss[loss=0.3429, simple_loss=0.393, pruned_loss=0.1463, over 8238.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3675, pruned_loss=0.1291, over 1619590.90 frames. ], batch size: 24, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:57,489 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:55:08,863 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.283e+02 4.098e+02 5.393e+02 1.175e+03, threshold=8.196e+02, percent-clipped=3.0 +2023-02-05 22:55:15,332 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3476, 1.5124, 1.2830, 1.9548, 0.7640, 1.0924, 1.1820, 1.5125], + device='cuda:0'), covar=tensor([0.1167, 0.1237, 0.1945, 0.0679, 0.1915, 0.2594, 0.1538, 0.1263], + device='cuda:0'), in_proj_covar=tensor([0.0286, 0.0297, 0.0313, 0.0223, 0.0278, 0.0306, 0.0314, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 22:55:28,882 INFO [train.py:901] (0/4) Epoch 4, batch 1400, loss[loss=0.272, simple_loss=0.3268, pruned_loss=0.1086, over 7970.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3683, pruned_loss=0.1304, over 1615165.97 frames. ], batch size: 21, lr: 1.91e-02, grad_scale: 8.0 +2023-02-05 22:55:29,831 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5009, 2.0023, 2.0884, 0.5888, 2.1540, 1.3092, 0.5473, 1.7190], + device='cuda:0'), covar=tensor([0.0167, 0.0080, 0.0096, 0.0188, 0.0104, 0.0315, 0.0234, 0.0083], + device='cuda:0'), in_proj_covar=tensor([0.0256, 0.0184, 0.0146, 0.0226, 0.0169, 0.0301, 0.0252, 0.0211], + device='cuda:0'), out_proj_covar=tensor([1.0935e-04, 7.7768e-05, 6.0936e-05, 9.3158e-05, 7.2259e-05, 1.3835e-04, + 1.0928e-04, 8.8565e-05], device='cuda:0') +2023-02-05 22:55:43,751 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 22:56:03,149 INFO [train.py:901] (0/4) Epoch 4, batch 1450, loss[loss=0.305, simple_loss=0.3505, pruned_loss=0.1297, over 8090.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3665, pruned_loss=0.1296, over 1612101.62 frames. ], batch size: 21, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:05,833 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 22:56:18,895 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.243e+02 3.964e+02 4.847e+02 1.034e+03, threshold=7.929e+02, percent-clipped=2.0 +2023-02-05 22:56:23,169 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:56:38,593 INFO [train.py:901] (0/4) Epoch 4, batch 1500, loss[loss=0.2856, simple_loss=0.3415, pruned_loss=0.1149, over 8127.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3668, pruned_loss=0.1294, over 1611400.37 frames. ], batch size: 22, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:40,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:57:02,622 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2865, 4.2025, 3.7787, 1.5774, 3.8059, 3.6188, 4.0514, 3.3121], + device='cuda:0'), covar=tensor([0.0949, 0.0543, 0.0869, 0.4639, 0.0662, 0.0741, 0.1052, 0.0789], + device='cuda:0'), in_proj_covar=tensor([0.0380, 0.0254, 0.0300, 0.0381, 0.0294, 0.0236, 0.0280, 0.0226], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 22:57:05,978 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25790.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:57:12,690 INFO [train.py:901] (0/4) Epoch 4, batch 1550, loss[loss=0.2926, simple_loss=0.3539, pruned_loss=0.1157, over 8136.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.3666, pruned_loss=0.1292, over 1614361.90 frames. ], batch size: 22, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:57:27,004 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 3.100e+02 3.836e+02 5.066e+02 1.009e+03, threshold=7.672e+02, percent-clipped=5.0 +2023-02-05 22:57:46,730 INFO [train.py:901] (0/4) Epoch 4, batch 1600, loss[loss=0.3302, simple_loss=0.3963, pruned_loss=0.1321, over 8569.00 frames. ], tot_loss[loss=0.3143, simple_loss=0.3675, pruned_loss=0.1306, over 1610026.07 frames. ], batch size: 31, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:04,744 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25876.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:58:21,015 INFO [train.py:901] (0/4) Epoch 4, batch 1650, loss[loss=0.3737, simple_loss=0.4187, pruned_loss=0.1643, over 8433.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3689, pruned_loss=0.1315, over 1611805.57 frames. ], batch size: 29, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:24,577 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25905.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:35,944 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.823e+02 4.768e+02 5.766e+02 1.707e+03, threshold=9.535e+02, percent-clipped=9.0 +2023-02-05 22:58:56,110 INFO [train.py:901] (0/4) Epoch 4, batch 1700, loss[loss=0.3093, simple_loss=0.3551, pruned_loss=0.1317, over 7792.00 frames. ], tot_loss[loss=0.3183, simple_loss=0.3704, pruned_loss=0.1331, over 1612277.88 frames. ], batch size: 19, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:59:14,216 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.57 vs. limit=5.0 +2023-02-05 22:59:31,297 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-26000.pt +2023-02-05 22:59:32,226 INFO [train.py:901] (0/4) Epoch 4, batch 1750, loss[loss=0.2731, simple_loss=0.3326, pruned_loss=0.1068, over 8248.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3694, pruned_loss=0.132, over 1616882.45 frames. ], batch size: 24, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 22:59:47,004 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.187e+02 3.816e+02 4.801e+02 8.317e+02, threshold=7.632e+02, percent-clipped=0.0 +2023-02-05 23:00:06,096 INFO [train.py:901] (0/4) Epoch 4, batch 1800, loss[loss=0.3253, simple_loss=0.3695, pruned_loss=0.1405, over 8132.00 frames. ], tot_loss[loss=0.3156, simple_loss=0.369, pruned_loss=0.1311, over 1618350.92 frames. ], batch size: 22, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:30,841 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0520, 2.5365, 1.8587, 2.9989, 1.5039, 1.3295, 1.8502, 2.5553], + device='cuda:0'), covar=tensor([0.1004, 0.1280, 0.1591, 0.0429, 0.1823, 0.2532, 0.1678, 0.0922], + device='cuda:0'), in_proj_covar=tensor([0.0290, 0.0289, 0.0301, 0.0222, 0.0270, 0.0304, 0.0311, 0.0280], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:0') +2023-02-05 23:00:41,281 INFO [train.py:901] (0/4) Epoch 4, batch 1850, loss[loss=0.3001, simple_loss=0.3569, pruned_loss=0.1216, over 8105.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3693, pruned_loss=0.1312, over 1618219.34 frames. ], batch size: 23, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:55,431 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:00:56,607 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.260e+02 3.379e+02 4.261e+02 5.084e+02 1.608e+03, threshold=8.521e+02, percent-clipped=6.0 +2023-02-05 23:01:15,405 INFO [train.py:901] (0/4) Epoch 4, batch 1900, loss[loss=0.3249, simple_loss=0.3867, pruned_loss=0.1315, over 8623.00 frames. ], tot_loss[loss=0.3155, simple_loss=0.3693, pruned_loss=0.1308, over 1621379.56 frames. ], batch size: 34, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:16,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1415, 2.5932, 1.9609, 3.0690, 1.5528, 1.4913, 1.7932, 2.6959], + device='cuda:0'), covar=tensor([0.1213, 0.1358, 0.1746, 0.0459, 0.1965, 0.2565, 0.2038, 0.0996], + device='cuda:0'), in_proj_covar=tensor([0.0291, 0.0291, 0.0302, 0.0225, 0.0272, 0.0305, 0.0312, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:01:22,866 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26161.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:01:36,364 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-05 23:01:40,595 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:01:40,657 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26186.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:01:41,086 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 23:01:49,747 INFO [train.py:901] (0/4) Epoch 4, batch 1950, loss[loss=0.359, simple_loss=0.389, pruned_loss=0.1646, over 8489.00 frames. ], tot_loss[loss=0.3165, simple_loss=0.3701, pruned_loss=0.1314, over 1622916.16 frames. ], batch size: 49, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:52,432 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 23:02:04,041 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26220.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:02:05,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.303e+02 3.684e+02 4.572e+02 6.046e+02 1.247e+03, threshold=9.144e+02, percent-clipped=2.0 +2023-02-05 23:02:10,411 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 23:02:24,334 INFO [train.py:901] (0/4) Epoch 4, batch 2000, loss[loss=0.3458, simple_loss=0.3958, pruned_loss=0.1479, over 8194.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.371, pruned_loss=0.1326, over 1621107.20 frames. ], batch size: 23, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:02:26,518 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.42 vs. limit=5.0 +2023-02-05 23:02:34,556 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6775, 1.2771, 3.8819, 1.3362, 3.3618, 3.2491, 3.4527, 3.3649], + device='cuda:0'), covar=tensor([0.0455, 0.2875, 0.0344, 0.2015, 0.0920, 0.0518, 0.0451, 0.0574], + device='cuda:0'), in_proj_covar=tensor([0.0278, 0.0444, 0.0339, 0.0361, 0.0429, 0.0361, 0.0342, 0.0383], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:02:36,646 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:02:59,488 INFO [train.py:901] (0/4) Epoch 4, batch 2050, loss[loss=0.2966, simple_loss=0.3385, pruned_loss=0.1273, over 7424.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3701, pruned_loss=0.132, over 1618498.10 frames. ], batch size: 17, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:03:14,379 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.223e+02 3.433e+02 4.198e+02 5.260e+02 1.263e+03, threshold=8.396e+02, percent-clipped=5.0 +2023-02-05 23:03:24,275 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26335.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:03:34,742 INFO [train.py:901] (0/4) Epoch 4, batch 2100, loss[loss=0.2446, simple_loss=0.3033, pruned_loss=0.0929, over 7234.00 frames. ], tot_loss[loss=0.3143, simple_loss=0.368, pruned_loss=0.1303, over 1615312.77 frames. ], batch size: 16, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:08,222 INFO [train.py:901] (0/4) Epoch 4, batch 2150, loss[loss=0.3357, simple_loss=0.3989, pruned_loss=0.1362, over 8595.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.368, pruned_loss=0.1301, over 1616672.10 frames. ], batch size: 31, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:24,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.431e+02 3.407e+02 4.210e+02 5.616e+02 1.521e+03, threshold=8.419e+02, percent-clipped=4.0 +2023-02-05 23:04:31,149 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:04:43,606 INFO [train.py:901] (0/4) Epoch 4, batch 2200, loss[loss=0.3557, simple_loss=0.3941, pruned_loss=0.1587, over 8580.00 frames. ], tot_loss[loss=0.3127, simple_loss=0.3668, pruned_loss=0.1293, over 1617289.32 frames. ], batch size: 39, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:50,514 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6304, 1.9575, 3.1544, 1.1533, 2.3820, 1.8697, 1.5873, 1.9369], + device='cuda:0'), covar=tensor([0.1136, 0.1265, 0.0429, 0.2578, 0.0964, 0.1774, 0.1116, 0.1564], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0414, 0.0506, 0.0508, 0.0550, 0.0488, 0.0437, 0.0559], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:04:53,172 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:18,116 INFO [train.py:901] (0/4) Epoch 4, batch 2250, loss[loss=0.3253, simple_loss=0.3827, pruned_loss=0.134, over 8360.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3643, pruned_loss=0.1273, over 1614718.16 frames. ], batch size: 24, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:05:33,091 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.188e+02 3.857e+02 4.748e+02 9.287e+02, threshold=7.714e+02, percent-clipped=1.0 +2023-02-05 23:05:35,873 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4297, 1.2661, 4.6050, 1.8053, 3.8995, 3.8235, 4.1846, 3.9713], + device='cuda:0'), covar=tensor([0.0339, 0.3407, 0.0243, 0.1962, 0.0931, 0.0480, 0.0344, 0.0474], + device='cuda:0'), in_proj_covar=tensor([0.0277, 0.0446, 0.0345, 0.0361, 0.0432, 0.0360, 0.0347, 0.0385], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:05:38,937 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26530.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:52,824 INFO [train.py:901] (0/4) Epoch 4, batch 2300, loss[loss=0.2722, simple_loss=0.3418, pruned_loss=0.1013, over 7813.00 frames. ], tot_loss[loss=0.3104, simple_loss=0.365, pruned_loss=0.1279, over 1615504.07 frames. ], batch size: 20, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:12,944 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:21,879 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26591.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:06:27,671 INFO [train.py:901] (0/4) Epoch 4, batch 2350, loss[loss=0.3024, simple_loss=0.3515, pruned_loss=0.1267, over 7285.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3673, pruned_loss=0.1292, over 1620412.48 frames. ], batch size: 16, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:35,933 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:38,730 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26616.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:06:42,411 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.505e+02 4.841e+02 5.770e+02 1.247e+03, threshold=9.683e+02, percent-clipped=6.0 +2023-02-05 23:06:58,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26645.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:59,875 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5035, 2.8720, 1.6894, 2.0651, 2.3428, 1.5219, 2.1080, 2.1419], + device='cuda:0'), covar=tensor([0.1260, 0.0356, 0.0921, 0.0683, 0.0555, 0.1126, 0.0813, 0.0744], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0241, 0.0316, 0.0311, 0.0322, 0.0311, 0.0336, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 23:07:01,511 INFO [train.py:901] (0/4) Epoch 4, batch 2400, loss[loss=0.3966, simple_loss=0.4268, pruned_loss=0.1832, over 8241.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3686, pruned_loss=0.1303, over 1618613.13 frames. ], batch size: 24, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:14,820 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-05 23:07:37,137 INFO [train.py:901] (0/4) Epoch 4, batch 2450, loss[loss=0.3619, simple_loss=0.4017, pruned_loss=0.161, over 8499.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.3693, pruned_loss=0.1305, over 1618243.62 frames. ], batch size: 39, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:42,981 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-05 23:07:51,855 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.211e+02 4.300e+02 5.616e+02 1.854e+03, threshold=8.599e+02, percent-clipped=7.0 +2023-02-05 23:07:55,328 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26727.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:10,589 INFO [train.py:901] (0/4) Epoch 4, batch 2500, loss[loss=0.2539, simple_loss=0.3197, pruned_loss=0.09401, over 7970.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3676, pruned_loss=0.1291, over 1619258.18 frames. ], batch size: 21, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:08:29,254 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:45,264 INFO [train.py:901] (0/4) Epoch 4, batch 2550, loss[loss=0.2797, simple_loss=0.3386, pruned_loss=0.1103, over 7540.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3666, pruned_loss=0.1287, over 1617811.07 frames. ], batch size: 18, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:09:01,302 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.235e+02 3.301e+02 4.146e+02 5.074e+02 1.055e+03, threshold=8.293e+02, percent-clipped=2.0 +2023-02-05 23:09:06,365 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4898, 1.1496, 1.4224, 1.0948, 1.0749, 1.2668, 1.1689, 1.3382], + device='cuda:0'), covar=tensor([0.0728, 0.1606, 0.2429, 0.1728, 0.0719, 0.2049, 0.0906, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0161, 0.0199, 0.0240, 0.0201, 0.0160, 0.0203, 0.0166, 0.0166], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0007, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:09:10,581 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26835.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:20,517 INFO [train.py:901] (0/4) Epoch 4, batch 2600, loss[loss=0.3068, simple_loss=0.36, pruned_loss=0.1268, over 8285.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.3667, pruned_loss=0.1292, over 1616102.24 frames. ], batch size: 23, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:27,742 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26860.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:41,431 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5057, 2.0096, 2.2150, 0.7425, 2.3092, 1.5434, 0.6662, 1.8290], + device='cuda:0'), covar=tensor([0.0191, 0.0089, 0.0080, 0.0172, 0.0112, 0.0285, 0.0250, 0.0078], + device='cuda:0'), in_proj_covar=tensor([0.0261, 0.0187, 0.0154, 0.0227, 0.0175, 0.0309, 0.0251, 0.0210], + device='cuda:0'), out_proj_covar=tensor([1.0952e-04, 7.7415e-05, 6.2289e-05, 9.2108e-05, 7.4025e-05, 1.3876e-04, + 1.0655e-04, 8.5655e-05], device='cuda:0') +2023-02-05 23:09:50,377 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:56,440 INFO [train.py:901] (0/4) Epoch 4, batch 2650, loss[loss=0.3063, simple_loss=0.3759, pruned_loss=0.1184, over 8731.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3678, pruned_loss=0.129, over 1621998.69 frames. ], batch size: 34, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:57,292 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:12,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 3.245e+02 3.916e+02 5.024e+02 1.006e+03, threshold=7.831e+02, percent-clipped=3.0 +2023-02-05 23:10:12,526 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26922.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:10:15,347 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26926.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:32,094 INFO [train.py:901] (0/4) Epoch 4, batch 2700, loss[loss=0.2947, simple_loss=0.3628, pruned_loss=0.1134, over 8446.00 frames. ], tot_loss[loss=0.3139, simple_loss=0.3681, pruned_loss=0.1298, over 1619090.98 frames. ], batch size: 27, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:10:44,338 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26968.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:54,311 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26983.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:06,043 INFO [train.py:901] (0/4) Epoch 4, batch 2750, loss[loss=0.303, simple_loss=0.3504, pruned_loss=0.1278, over 7977.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.3683, pruned_loss=0.1304, over 1618508.38 frames. ], batch size: 21, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:07,092 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-05 23:11:12,373 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:21,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 3.589e+02 4.354e+02 5.460e+02 1.197e+03, threshold=8.707e+02, percent-clipped=9.0 +2023-02-05 23:11:40,850 INFO [train.py:901] (0/4) Epoch 4, batch 2800, loss[loss=0.3826, simple_loss=0.4091, pruned_loss=0.178, over 7107.00 frames. ], tot_loss[loss=0.3147, simple_loss=0.3684, pruned_loss=0.1305, over 1616026.55 frames. ], batch size: 71, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:53,133 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5818, 1.9279, 3.2338, 2.7568, 2.6156, 2.0218, 1.3821, 1.2701], + device='cuda:0'), covar=tensor([0.1136, 0.1528, 0.0310, 0.0596, 0.0660, 0.0629, 0.0833, 0.1459], + device='cuda:0'), in_proj_covar=tensor([0.0667, 0.0596, 0.0498, 0.0559, 0.0678, 0.0543, 0.0551, 0.0561], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:12:14,849 INFO [train.py:901] (0/4) Epoch 4, batch 2850, loss[loss=0.3709, simple_loss=0.4142, pruned_loss=0.1638, over 8196.00 frames. ], tot_loss[loss=0.3161, simple_loss=0.3693, pruned_loss=0.1314, over 1614834.72 frames. ], batch size: 23, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:12:30,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 3.374e+02 4.464e+02 5.831e+02 1.992e+03, threshold=8.927e+02, percent-clipped=6.0 +2023-02-05 23:12:47,561 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:12:49,278 INFO [train.py:901] (0/4) Epoch 4, batch 2900, loss[loss=0.2792, simple_loss=0.3381, pruned_loss=0.1102, over 7976.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.37, pruned_loss=0.1323, over 1614193.18 frames. ], batch size: 21, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:00,613 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27166.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:05,273 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:11,802 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 23:13:24,193 INFO [train.py:901] (0/4) Epoch 4, batch 2950, loss[loss=0.2551, simple_loss=0.3314, pruned_loss=0.08938, over 8112.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3706, pruned_loss=0.1324, over 1615551.55 frames. ], batch size: 23, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:38,737 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 3.092e+02 3.649e+02 5.055e+02 1.216e+03, threshold=7.299e+02, percent-clipped=3.0 +2023-02-05 23:13:58,840 INFO [train.py:901] (0/4) Epoch 4, batch 3000, loss[loss=0.4059, simple_loss=0.4328, pruned_loss=0.1894, over 8529.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3678, pruned_loss=0.1297, over 1616361.02 frames. ], batch size: 49, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:58,840 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 23:14:05,912 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6665, 1.8439, 1.4963, 2.1924, 1.3166, 1.2971, 1.5652, 1.8906], + device='cuda:0'), covar=tensor([0.1049, 0.1164, 0.1676, 0.0681, 0.1626, 0.2223, 0.1424, 0.1068], + device='cuda:0'), in_proj_covar=tensor([0.0283, 0.0288, 0.0306, 0.0225, 0.0268, 0.0295, 0.0307, 0.0276], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:14:11,268 INFO [train.py:935] (0/4) Epoch 4, validation: loss=0.2374, simple_loss=0.3304, pruned_loss=0.07225, over 944034.00 frames. +2023-02-05 23:14:11,268 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-05 23:14:23,004 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27266.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:14:31,389 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 23:14:45,719 INFO [train.py:901] (0/4) Epoch 4, batch 3050, loss[loss=0.2767, simple_loss=0.3388, pruned_loss=0.1074, over 8247.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.3679, pruned_loss=0.1295, over 1621353.84 frames. ], batch size: 22, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:14:54,658 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27312.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:15:01,945 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.415e+02 4.317e+02 5.768e+02 1.933e+03, threshold=8.634e+02, percent-clipped=10.0 +2023-02-05 23:15:06,198 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4678, 1.8171, 3.1506, 1.0839, 2.2279, 1.6757, 1.5508, 1.7476], + device='cuda:0'), covar=tensor([0.1442, 0.1728, 0.0566, 0.2851, 0.1263, 0.2268, 0.1239, 0.2044], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0422, 0.0503, 0.0510, 0.0561, 0.0492, 0.0432, 0.0566], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:15:20,639 INFO [train.py:901] (0/4) Epoch 4, batch 3100, loss[loss=0.3292, simple_loss=0.3819, pruned_loss=0.1383, over 8606.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.3684, pruned_loss=0.1303, over 1616901.27 frames. ], batch size: 31, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:15:41,818 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27381.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:15:54,967 INFO [train.py:901] (0/4) Epoch 4, batch 3150, loss[loss=0.2584, simple_loss=0.317, pruned_loss=0.09993, over 7815.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.3682, pruned_loss=0.1303, over 1615903.92 frames. ], batch size: 20, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:16:09,488 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 3.237e+02 4.041e+02 5.193e+02 1.210e+03, threshold=8.082e+02, percent-clipped=3.0 +2023-02-05 23:16:13,654 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:16:29,608 INFO [train.py:901] (0/4) Epoch 4, batch 3200, loss[loss=0.3078, simple_loss=0.3478, pruned_loss=0.1339, over 7808.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3672, pruned_loss=0.1293, over 1612742.18 frames. ], batch size: 19, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:17:03,119 INFO [train.py:901] (0/4) Epoch 4, batch 3250, loss[loss=0.3763, simple_loss=0.4247, pruned_loss=0.164, over 8130.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.3676, pruned_loss=0.1297, over 1616194.49 frames. ], batch size: 22, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:17:10,576 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:17:18,427 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 3.449e+02 4.059e+02 4.930e+02 7.939e+02, threshold=8.117e+02, percent-clipped=0.0 +2023-02-05 23:17:19,284 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3957, 2.5138, 1.5702, 2.2140, 1.9043, 1.3722, 1.6648, 1.9869], + device='cuda:0'), covar=tensor([0.1005, 0.0293, 0.0809, 0.0506, 0.0588, 0.1013, 0.0922, 0.0616], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0239, 0.0308, 0.0299, 0.0322, 0.0307, 0.0332, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 23:17:26,726 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3188, 1.6952, 1.6367, 0.6866, 1.6602, 1.2813, 0.2747, 1.6125], + device='cuda:0'), covar=tensor([0.0152, 0.0088, 0.0090, 0.0144, 0.0112, 0.0277, 0.0250, 0.0064], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0194, 0.0159, 0.0230, 0.0183, 0.0313, 0.0253, 0.0213], + device='cuda:0'), out_proj_covar=tensor([1.1027e-04, 7.9632e-05, 6.3276e-05, 9.1527e-05, 7.6609e-05, 1.3842e-04, + 1.0639e-04, 8.5385e-05], device='cuda:0') +2023-02-05 23:17:37,483 INFO [train.py:901] (0/4) Epoch 4, batch 3300, loss[loss=0.3615, simple_loss=0.4156, pruned_loss=0.1537, over 8523.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3665, pruned_loss=0.1288, over 1616911.88 frames. ], batch size: 34, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:12,408 INFO [train.py:901] (0/4) Epoch 4, batch 3350, loss[loss=0.2725, simple_loss=0.3387, pruned_loss=0.1031, over 8307.00 frames. ], tot_loss[loss=0.3119, simple_loss=0.3662, pruned_loss=0.1289, over 1611583.10 frames. ], batch size: 23, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:28,385 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 3.326e+02 4.176e+02 5.439e+02 1.733e+03, threshold=8.353e+02, percent-clipped=9.0 +2023-02-05 23:18:30,472 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27625.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:18:38,342 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27637.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:18:46,882 INFO [train.py:901] (0/4) Epoch 4, batch 3400, loss[loss=0.3356, simple_loss=0.3869, pruned_loss=0.1421, over 8297.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3673, pruned_loss=0.13, over 1609260.94 frames. ], batch size: 23, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:18:54,513 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9143, 2.4480, 2.8274, 1.0878, 2.7001, 1.9329, 1.3935, 1.9507], + device='cuda:0'), covar=tensor([0.0254, 0.0104, 0.0120, 0.0182, 0.0109, 0.0236, 0.0258, 0.0115], + device='cuda:0'), in_proj_covar=tensor([0.0267, 0.0195, 0.0157, 0.0227, 0.0181, 0.0312, 0.0254, 0.0216], + device='cuda:0'), out_proj_covar=tensor([1.1066e-04, 8.0368e-05, 6.2371e-05, 9.0077e-05, 7.5706e-05, 1.3771e-04, + 1.0684e-04, 8.6755e-05], device='cuda:0') +2023-02-05 23:18:55,787 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27662.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:18:56,358 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6851, 2.5665, 1.8230, 3.0679, 1.1870, 1.4164, 1.5608, 2.5319], + device='cuda:0'), covar=tensor([0.1477, 0.1206, 0.2052, 0.0567, 0.2045, 0.2622, 0.1765, 0.0997], + device='cuda:0'), in_proj_covar=tensor([0.0279, 0.0292, 0.0308, 0.0226, 0.0270, 0.0297, 0.0309, 0.0278], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:19:10,310 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27683.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:21,465 INFO [train.py:901] (0/4) Epoch 4, batch 3450, loss[loss=0.3061, simple_loss=0.3745, pruned_loss=0.1189, over 8325.00 frames. ], tot_loss[loss=0.3124, simple_loss=0.3666, pruned_loss=0.1292, over 1612601.63 frames. ], batch size: 25, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:19:26,937 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27708.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:36,056 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.499e+02 3.357e+02 4.072e+02 5.275e+02 9.264e+02, threshold=8.144e+02, percent-clipped=1.0 +2023-02-05 23:19:43,534 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27732.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:19:55,901 INFO [train.py:901] (0/4) Epoch 4, batch 3500, loss[loss=0.3862, simple_loss=0.4084, pruned_loss=0.182, over 8234.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.3666, pruned_loss=0.1293, over 1614711.39 frames. ], batch size: 49, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:10,685 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 23:20:31,241 INFO [train.py:901] (0/4) Epoch 4, batch 3550, loss[loss=0.2853, simple_loss=0.3465, pruned_loss=0.112, over 8630.00 frames. ], tot_loss[loss=0.3111, simple_loss=0.3657, pruned_loss=0.1283, over 1616239.32 frames. ], batch size: 49, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:46,086 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.262e+02 3.955e+02 5.254e+02 1.114e+03, threshold=7.909e+02, percent-clipped=8.0 +2023-02-05 23:21:05,474 INFO [train.py:901] (0/4) Epoch 4, batch 3600, loss[loss=0.2809, simple_loss=0.3463, pruned_loss=0.1078, over 8470.00 frames. ], tot_loss[loss=0.3123, simple_loss=0.3664, pruned_loss=0.1291, over 1616814.71 frames. ], batch size: 25, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:27,347 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27881.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:39,931 INFO [train.py:901] (0/4) Epoch 4, batch 3650, loss[loss=0.2788, simple_loss=0.3504, pruned_loss=0.1036, over 8300.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3658, pruned_loss=0.1283, over 1614084.76 frames. ], batch size: 23, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:44,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27906.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:56,103 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.998e+02 3.334e+02 3.945e+02 4.811e+02 1.062e+03, threshold=7.891e+02, percent-clipped=4.0 +2023-02-05 23:22:13,486 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:22:14,780 INFO [train.py:901] (0/4) Epoch 4, batch 3700, loss[loss=0.2702, simple_loss=0.3302, pruned_loss=0.1051, over 7706.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3664, pruned_loss=0.1287, over 1617046.20 frames. ], batch size: 18, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:22:49,711 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-28000.pt +2023-02-05 23:22:50,647 INFO [train.py:901] (0/4) Epoch 4, batch 3750, loss[loss=0.3347, simple_loss=0.3815, pruned_loss=0.1439, over 7970.00 frames. ], tot_loss[loss=0.3115, simple_loss=0.3659, pruned_loss=0.1285, over 1612499.94 frames. ], batch size: 21, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:05,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.245e+02 3.553e+02 4.442e+02 6.055e+02 1.985e+03, threshold=8.883e+02, percent-clipped=11.0 +2023-02-05 23:23:09,663 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-05 23:23:23,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5961, 1.9060, 2.0850, 1.7413, 1.0349, 1.9822, 0.1406, 1.2312], + device='cuda:0'), covar=tensor([0.4073, 0.2906, 0.1033, 0.2284, 0.7932, 0.1123, 0.6958, 0.2606], + device='cuda:0'), in_proj_covar=tensor([0.0125, 0.0111, 0.0084, 0.0160, 0.0194, 0.0081, 0.0150, 0.0119], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:23:25,244 INFO [train.py:901] (0/4) Epoch 4, batch 3800, loss[loss=0.3356, simple_loss=0.3623, pruned_loss=0.1544, over 7421.00 frames. ], tot_loss[loss=0.3119, simple_loss=0.3659, pruned_loss=0.129, over 1611927.58 frames. ], batch size: 17, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:25,396 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5007, 1.3975, 4.6866, 1.7064, 4.0227, 3.9025, 4.1040, 4.0623], + device='cuda:0'), covar=tensor([0.0415, 0.3507, 0.0304, 0.2079, 0.1030, 0.0566, 0.0471, 0.0476], + device='cuda:0'), in_proj_covar=tensor([0.0285, 0.0459, 0.0355, 0.0370, 0.0442, 0.0367, 0.0357, 0.0393], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:23:26,807 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6079, 1.9839, 2.2158, 0.7120, 2.2467, 1.4556, 0.5694, 1.7851], + device='cuda:0'), covar=tensor([0.0158, 0.0081, 0.0077, 0.0184, 0.0128, 0.0259, 0.0236, 0.0078], + device='cuda:0'), in_proj_covar=tensor([0.0264, 0.0197, 0.0156, 0.0230, 0.0185, 0.0315, 0.0255, 0.0217], + device='cuda:0'), out_proj_covar=tensor([1.0842e-04, 8.0473e-05, 6.0952e-05, 9.1517e-05, 7.6514e-05, 1.3804e-04, + 1.0640e-04, 8.6654e-05], device='cuda:0') +2023-02-05 23:23:42,794 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28076.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:24:00,297 INFO [train.py:901] (0/4) Epoch 4, batch 3850, loss[loss=0.3176, simple_loss=0.3857, pruned_loss=0.1248, over 8337.00 frames. ], tot_loss[loss=0.3117, simple_loss=0.3659, pruned_loss=0.1287, over 1617595.34 frames. ], batch size: 25, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:15,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 3.238e+02 4.124e+02 5.182e+02 9.210e+02, threshold=8.247e+02, percent-clipped=1.0 +2023-02-05 23:24:17,316 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 23:24:34,697 INFO [train.py:901] (0/4) Epoch 4, batch 3900, loss[loss=0.2892, simple_loss=0.3477, pruned_loss=0.1153, over 7664.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3662, pruned_loss=0.1283, over 1620926.04 frames. ], batch size: 19, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:43,612 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3529, 1.4548, 1.5653, 0.8970, 1.4600, 1.1248, 0.6096, 1.4045], + device='cuda:0'), covar=tensor([0.0115, 0.0071, 0.0045, 0.0105, 0.0074, 0.0194, 0.0190, 0.0054], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0190, 0.0155, 0.0226, 0.0184, 0.0311, 0.0250, 0.0216], + device='cuda:0'), out_proj_covar=tensor([1.0746e-04, 7.6893e-05, 6.0238e-05, 8.9438e-05, 7.5971e-05, 1.3585e-04, + 1.0406e-04, 8.6044e-05], device='cuda:0') +2023-02-05 23:25:02,700 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28191.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:25:08,703 INFO [train.py:901] (0/4) Epoch 4, batch 3950, loss[loss=0.3672, simple_loss=0.4144, pruned_loss=0.16, over 8467.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3668, pruned_loss=0.1291, over 1618451.83 frames. ], batch size: 39, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:24,839 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.357e+02 4.080e+02 5.453e+02 1.389e+03, threshold=8.161e+02, percent-clipped=8.0 +2023-02-05 23:25:41,184 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:25:43,098 INFO [train.py:901] (0/4) Epoch 4, batch 4000, loss[loss=0.2897, simple_loss=0.3771, pruned_loss=0.1011, over 8356.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3668, pruned_loss=0.1286, over 1617842.15 frames. ], batch size: 24, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:59,930 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28273.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:26:01,392 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4423, 2.0972, 3.5039, 1.0903, 2.4148, 1.6049, 1.6335, 2.0645], + device='cuda:0'), covar=tensor([0.1296, 0.1392, 0.0589, 0.2557, 0.1176, 0.2108, 0.1146, 0.1848], + device='cuda:0'), in_proj_covar=tensor([0.0460, 0.0432, 0.0514, 0.0513, 0.0577, 0.0498, 0.0439, 0.0575], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:26:17,610 INFO [train.py:901] (0/4) Epoch 4, batch 4050, loss[loss=0.3006, simple_loss=0.3538, pruned_loss=0.1238, over 8337.00 frames. ], tot_loss[loss=0.3122, simple_loss=0.3669, pruned_loss=0.1287, over 1617992.21 frames. ], batch size: 25, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:26:21,154 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:26:34,450 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.482e+02 4.201e+02 5.400e+02 1.078e+03, threshold=8.403e+02, percent-clipped=4.0 +2023-02-05 23:26:47,080 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4737, 2.0205, 2.2293, 1.0143, 2.2212, 1.3579, 0.7557, 1.7190], + device='cuda:0'), covar=tensor([0.0203, 0.0085, 0.0076, 0.0168, 0.0131, 0.0280, 0.0241, 0.0089], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0194, 0.0155, 0.0231, 0.0188, 0.0313, 0.0253, 0.0220], + device='cuda:0'), out_proj_covar=tensor([1.0823e-04, 7.8485e-05, 5.9572e-05, 9.1054e-05, 7.7504e-05, 1.3668e-04, + 1.0471e-04, 8.6932e-05], device='cuda:0') +2023-02-05 23:26:50,717 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 23:26:52,366 INFO [train.py:901] (0/4) Epoch 4, batch 4100, loss[loss=0.3202, simple_loss=0.3731, pruned_loss=0.1336, over 8142.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.3656, pruned_loss=0.1278, over 1614006.99 frames. ], batch size: 22, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:27:27,460 INFO [train.py:901] (0/4) Epoch 4, batch 4150, loss[loss=0.3814, simple_loss=0.4106, pruned_loss=0.1761, over 8512.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.3655, pruned_loss=0.1278, over 1615798.49 frames. ], batch size: 28, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:27:43,618 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.222e+02 3.372e+02 4.170e+02 5.520e+02 1.384e+03, threshold=8.341e+02, percent-clipped=6.0 +2023-02-05 23:27:56,827 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-05 23:28:00,676 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28447.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:28:02,476 INFO [train.py:901] (0/4) Epoch 4, batch 4200, loss[loss=0.3266, simple_loss=0.3884, pruned_loss=0.1324, over 8337.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.3643, pruned_loss=0.127, over 1616100.94 frames. ], batch size: 25, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:07,668 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 23:28:17,496 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28472.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:28:29,058 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 23:28:36,444 INFO [train.py:901] (0/4) Epoch 4, batch 4250, loss[loss=0.2934, simple_loss=0.3427, pruned_loss=0.122, over 7521.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.3643, pruned_loss=0.127, over 1615086.64 frames. ], batch size: 18, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:39,186 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28504.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:43,326 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:51,869 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 3.170e+02 4.105e+02 5.662e+02 1.430e+03, threshold=8.210e+02, percent-clipped=9.0 +2023-02-05 23:29:10,383 INFO [train.py:901] (0/4) Epoch 4, batch 4300, loss[loss=0.2867, simple_loss=0.3537, pruned_loss=0.1098, over 8468.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3634, pruned_loss=0.1261, over 1613910.68 frames. ], batch size: 25, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:38,451 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:29:45,350 INFO [train.py:901] (0/4) Epoch 4, batch 4350, loss[loss=0.2889, simple_loss=0.3574, pruned_loss=0.1102, over 8142.00 frames. ], tot_loss[loss=0.3081, simple_loss=0.3634, pruned_loss=0.1264, over 1612337.83 frames. ], batch size: 22, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:57,577 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28617.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:29:58,774 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 23:30:01,437 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 3.285e+02 3.917e+02 4.771e+02 1.131e+03, threshold=7.833e+02, percent-clipped=1.0 +2023-02-05 23:30:19,069 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28649.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:30:19,683 INFO [train.py:901] (0/4) Epoch 4, batch 4400, loss[loss=0.251, simple_loss=0.3165, pruned_loss=0.09274, over 7434.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3634, pruned_loss=0.126, over 1611560.17 frames. ], batch size: 17, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:30:41,082 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 23:30:54,264 INFO [train.py:901] (0/4) Epoch 4, batch 4450, loss[loss=0.2675, simple_loss=0.325, pruned_loss=0.105, over 7432.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3627, pruned_loss=0.1255, over 1610616.17 frames. ], batch size: 17, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:30:58,473 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:31:10,733 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.181e+02 3.229e+02 4.056e+02 4.786e+02 8.259e+02, threshold=8.113e+02, percent-clipped=1.0 +2023-02-05 23:31:17,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28732.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:31:30,204 INFO [train.py:901] (0/4) Epoch 4, batch 4500, loss[loss=0.2801, simple_loss=0.3556, pruned_loss=0.1023, over 8341.00 frames. ], tot_loss[loss=0.3061, simple_loss=0.3621, pruned_loss=0.125, over 1608527.23 frames. ], batch size: 26, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:31:36,222 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 23:31:39,887 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28764.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:05,576 INFO [train.py:901] (0/4) Epoch 4, batch 4550, loss[loss=0.2733, simple_loss=0.3225, pruned_loss=0.112, over 7554.00 frames. ], tot_loss[loss=0.3079, simple_loss=0.3639, pruned_loss=0.126, over 1610987.38 frames. ], batch size: 18, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:21,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.133e+02 4.046e+02 5.517e+02 1.256e+03, threshold=8.093e+02, percent-clipped=3.0 +2023-02-05 23:32:39,606 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:40,909 INFO [train.py:901] (0/4) Epoch 4, batch 4600, loss[loss=0.2966, simple_loss=0.3686, pruned_loss=0.1122, over 8483.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.3643, pruned_loss=0.1263, over 1612354.50 frames. ], batch size: 49, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:43,584 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28854.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:00,429 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28879.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:14,780 INFO [train.py:901] (0/4) Epoch 4, batch 4650, loss[loss=0.3836, simple_loss=0.4145, pruned_loss=0.1764, over 8504.00 frames. ], tot_loss[loss=0.31, simple_loss=0.365, pruned_loss=0.1275, over 1613898.47 frames. ], batch size: 29, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:19,736 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 23:33:30,678 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 3.425e+02 4.570e+02 5.631e+02 1.457e+03, threshold=9.141e+02, percent-clipped=7.0 +2023-02-05 23:33:49,349 INFO [train.py:901] (0/4) Epoch 4, batch 4700, loss[loss=0.2565, simple_loss=0.3183, pruned_loss=0.09733, over 7722.00 frames. ], tot_loss[loss=0.3081, simple_loss=0.3635, pruned_loss=0.1264, over 1612728.98 frames. ], batch size: 18, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:58,481 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:59,185 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28963.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:03,890 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:15,883 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:16,546 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28988.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:34:24,504 INFO [train.py:901] (0/4) Epoch 4, batch 4750, loss[loss=0.3347, simple_loss=0.3946, pruned_loss=0.1374, over 8490.00 frames. ], tot_loss[loss=0.3069, simple_loss=0.3627, pruned_loss=0.1256, over 1613512.34 frames. ], batch size: 29, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:34:33,275 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29013.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:34:37,313 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5661, 1.6819, 1.5341, 1.3523, 1.3608, 1.4241, 1.7800, 1.7875], + device='cuda:0'), covar=tensor([0.0593, 0.1308, 0.1908, 0.1458, 0.0739, 0.1678, 0.0850, 0.0585], + device='cuda:0'), in_proj_covar=tensor([0.0153, 0.0197, 0.0233, 0.0197, 0.0153, 0.0202, 0.0161, 0.0165], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:34:38,681 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:38,759 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:40,432 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.078e+02 3.145e+02 3.754e+02 5.040e+02 8.107e+02, threshold=7.508e+02, percent-clipped=0.0 +2023-02-05 23:34:40,462 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 23:34:42,471 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 23:34:50,947 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 23:34:56,217 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:59,307 INFO [train.py:901] (0/4) Epoch 4, batch 4800, loss[loss=0.2793, simple_loss=0.351, pruned_loss=0.1038, over 8576.00 frames. ], tot_loss[loss=0.3075, simple_loss=0.3631, pruned_loss=0.1259, over 1613814.70 frames. ], batch size: 31, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:03,001 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-05 23:35:34,006 INFO [train.py:901] (0/4) Epoch 4, batch 4850, loss[loss=0.3369, simple_loss=0.3653, pruned_loss=0.1542, over 7944.00 frames. ], tot_loss[loss=0.3072, simple_loss=0.3623, pruned_loss=0.126, over 1611373.96 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,020 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 23:35:49,596 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.099e+02 3.374e+02 4.405e+02 6.016e+02 1.134e+03, threshold=8.810e+02, percent-clipped=7.0 +2023-02-05 23:35:58,046 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.66 vs. limit=5.0 +2023-02-05 23:36:08,349 INFO [train.py:901] (0/4) Epoch 4, batch 4900, loss[loss=0.2455, simple_loss=0.304, pruned_loss=0.09349, over 7772.00 frames. ], tot_loss[loss=0.3062, simple_loss=0.3613, pruned_loss=0.1255, over 1609289.51 frames. ], batch size: 19, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:42,074 INFO [train.py:901] (0/4) Epoch 4, batch 4950, loss[loss=0.3406, simple_loss=0.3946, pruned_loss=0.1433, over 8462.00 frames. ], tot_loss[loss=0.306, simple_loss=0.361, pruned_loss=0.1255, over 1610026.94 frames. ], batch size: 29, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:47,410 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.4409, 5.3391, 4.6955, 1.9240, 4.8184, 4.9138, 5.0505, 4.5008], + device='cuda:0'), covar=tensor([0.0700, 0.0447, 0.0839, 0.4685, 0.0719, 0.0478, 0.1045, 0.0618], + device='cuda:0'), in_proj_covar=tensor([0.0375, 0.0277, 0.0299, 0.0387, 0.0300, 0.0245, 0.0292, 0.0226], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 23:36:56,263 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:36:58,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.208e+02 3.912e+02 5.596e+02 9.849e+02, threshold=7.824e+02, percent-clipped=2.0 +2023-02-05 23:36:58,863 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:00,350 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:04,475 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6631, 1.9886, 2.1444, 1.7021, 1.0422, 2.2600, 0.2750, 1.0849], + device='cuda:0'), covar=tensor([0.5545, 0.1711, 0.0924, 0.2730, 0.5932, 0.0861, 0.5641, 0.2687], + device='cuda:0'), in_proj_covar=tensor([0.0124, 0.0107, 0.0080, 0.0151, 0.0191, 0.0082, 0.0137, 0.0117], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:37:10,461 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 23:37:12,973 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29244.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:16,649 INFO [train.py:901] (0/4) Epoch 4, batch 5000, loss[loss=0.3165, simple_loss=0.356, pruned_loss=0.1385, over 7910.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3618, pruned_loss=0.1264, over 1609269.24 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:37:16,868 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29250.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:19,513 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29254.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:51,646 INFO [train.py:901] (0/4) Epoch 4, batch 5050, loss[loss=0.2465, simple_loss=0.3197, pruned_loss=0.08669, over 7922.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.361, pruned_loss=0.1249, over 1609537.78 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:37:54,558 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2179, 2.4015, 1.4451, 1.9303, 1.8350, 1.2412, 1.6479, 1.9481], + device='cuda:0'), covar=tensor([0.1225, 0.0259, 0.0967, 0.0503, 0.0629, 0.1180, 0.0944, 0.0708], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0237, 0.0302, 0.0301, 0.0327, 0.0309, 0.0333, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 23:38:07,699 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 3.436e+02 4.072e+02 5.001e+02 1.022e+03, threshold=8.144e+02, percent-clipped=3.0 +2023-02-05 23:38:14,940 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 23:38:18,448 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29338.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:19,212 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0894, 2.5158, 2.9463, 1.1252, 3.0883, 2.0042, 1.4286, 1.7091], + device='cuda:0'), covar=tensor([0.0222, 0.0127, 0.0112, 0.0200, 0.0102, 0.0253, 0.0273, 0.0129], + device='cuda:0'), in_proj_covar=tensor([0.0280, 0.0202, 0.0160, 0.0236, 0.0190, 0.0323, 0.0262, 0.0219], + device='cuda:0'), out_proj_covar=tensor([1.1226e-04, 8.0749e-05, 6.1088e-05, 9.1787e-05, 7.7058e-05, 1.3893e-04, + 1.0667e-04, 8.5441e-05], device='cuda:0') +2023-02-05 23:38:19,602 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 23:38:26,651 INFO [train.py:901] (0/4) Epoch 4, batch 5100, loss[loss=0.3484, simple_loss=0.3936, pruned_loss=0.1516, over 8100.00 frames. ], tot_loss[loss=0.3071, simple_loss=0.3628, pruned_loss=0.1258, over 1615660.73 frames. ], batch size: 23, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:36,213 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29364.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:41,414 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3289, 1.6883, 1.9078, 0.7758, 1.9234, 1.2910, 0.5604, 1.4474], + device='cuda:0'), covar=tensor([0.0141, 0.0070, 0.0049, 0.0130, 0.0082, 0.0212, 0.0204, 0.0077], + device='cuda:0'), in_proj_covar=tensor([0.0280, 0.0202, 0.0160, 0.0239, 0.0190, 0.0326, 0.0262, 0.0221], + device='cuda:0'), out_proj_covar=tensor([1.1203e-04, 8.0644e-05, 6.1241e-05, 9.2880e-05, 7.6989e-05, 1.4033e-04, + 1.0639e-04, 8.6588e-05], device='cuda:0') +2023-02-05 23:39:00,598 INFO [train.py:901] (0/4) Epoch 4, batch 5150, loss[loss=0.3314, simple_loss=0.3725, pruned_loss=0.1452, over 6828.00 frames. ], tot_loss[loss=0.308, simple_loss=0.3634, pruned_loss=0.1263, over 1614853.61 frames. ], batch size: 71, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:16,232 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.145e+02 3.888e+02 4.871e+02 1.199e+03, threshold=7.777e+02, percent-clipped=1.0 +2023-02-05 23:39:35,362 INFO [train.py:901] (0/4) Epoch 4, batch 5200, loss[loss=0.3225, simple_loss=0.3745, pruned_loss=0.1353, over 8502.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3643, pruned_loss=0.1272, over 1613141.07 frames. ], batch size: 28, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:54,966 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29479.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:39:54,998 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6970, 1.9172, 2.0809, 1.5793, 1.0548, 2.1937, 0.4423, 1.0965], + device='cuda:0'), covar=tensor([0.3128, 0.2053, 0.0976, 0.3089, 0.7750, 0.1021, 0.5831, 0.2812], + device='cuda:0'), in_proj_covar=tensor([0.0118, 0.0109, 0.0081, 0.0154, 0.0191, 0.0080, 0.0140, 0.0116], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:40:02,249 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2978, 1.1498, 1.1609, 1.0261, 1.1377, 1.1098, 1.1656, 1.1035], + device='cuda:0'), covar=tensor([0.0763, 0.1488, 0.1956, 0.1533, 0.0679, 0.1794, 0.0892, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0155, 0.0197, 0.0234, 0.0199, 0.0155, 0.0202, 0.0164, 0.0166], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:40:09,486 INFO [train.py:901] (0/4) Epoch 4, batch 5250, loss[loss=0.2901, simple_loss=0.3374, pruned_loss=0.1214, over 7245.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3643, pruned_loss=0.1269, over 1613665.35 frames. ], batch size: 16, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:12,206 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 23:40:25,985 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.346e+02 3.507e+02 4.371e+02 5.555e+02 1.318e+03, threshold=8.742e+02, percent-clipped=11.0 +2023-02-05 23:40:26,903 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4993, 1.8341, 3.5017, 1.0654, 2.5472, 1.6179, 1.5247, 2.0476], + device='cuda:0'), covar=tensor([0.1461, 0.1804, 0.0661, 0.2928, 0.1298, 0.2402, 0.1361, 0.2290], + device='cuda:0'), in_proj_covar=tensor([0.0455, 0.0429, 0.0505, 0.0515, 0.0559, 0.0498, 0.0435, 0.0575], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:40:43,421 INFO [train.py:901] (0/4) Epoch 4, batch 5300, loss[loss=0.4092, simple_loss=0.4268, pruned_loss=0.1958, over 6552.00 frames. ], tot_loss[loss=0.3089, simple_loss=0.3643, pruned_loss=0.1268, over 1616223.94 frames. ], batch size: 71, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:57,669 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29569.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:14,739 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29594.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:17,287 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29598.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:18,729 INFO [train.py:901] (0/4) Epoch 4, batch 5350, loss[loss=0.2818, simple_loss=0.3392, pruned_loss=0.1122, over 7982.00 frames. ], tot_loss[loss=0.3081, simple_loss=0.3636, pruned_loss=0.1263, over 1613358.45 frames. ], batch size: 21, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:41:32,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29619.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:35,507 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 3.127e+02 4.006e+02 4.952e+02 2.682e+03, threshold=8.012e+02, percent-clipped=7.0 +2023-02-05 23:41:53,620 INFO [train.py:901] (0/4) Epoch 4, batch 5400, loss[loss=0.2987, simple_loss=0.3743, pruned_loss=0.1115, over 8491.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3617, pruned_loss=0.1247, over 1617169.82 frames. ], batch size: 28, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:14,106 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 23:42:14,533 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:28,607 INFO [train.py:901] (0/4) Epoch 4, batch 5450, loss[loss=0.333, simple_loss=0.3902, pruned_loss=0.1379, over 8584.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3614, pruned_loss=0.1245, over 1614789.75 frames. ], batch size: 31, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:36,132 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9868, 1.5088, 1.4506, 1.2642, 1.3427, 1.3965, 1.5717, 1.4903], + device='cuda:0'), covar=tensor([0.0598, 0.1130, 0.1643, 0.1365, 0.0653, 0.1505, 0.0755, 0.0588], + device='cuda:0'), in_proj_covar=tensor([0.0153, 0.0196, 0.0232, 0.0197, 0.0153, 0.0200, 0.0162, 0.0165], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:42:37,304 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29713.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:44,142 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 23:42:44,948 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.180e+02 3.089e+02 4.007e+02 5.016e+02 9.074e+02, threshold=8.014e+02, percent-clipped=4.0 +2023-02-05 23:42:52,681 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29735.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:58,016 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 23:43:02,823 INFO [train.py:901] (0/4) Epoch 4, batch 5500, loss[loss=0.3213, simple_loss=0.3892, pruned_loss=0.1267, over 8596.00 frames. ], tot_loss[loss=0.3051, simple_loss=0.3614, pruned_loss=0.1244, over 1615553.36 frames. ], batch size: 34, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:10,325 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29760.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:32,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3936, 1.2990, 2.7359, 1.1693, 2.0711, 3.0441, 2.7930, 2.5387], + device='cuda:0'), covar=tensor([0.1241, 0.1658, 0.0446, 0.2169, 0.0707, 0.0320, 0.0493, 0.0796], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0266, 0.0216, 0.0264, 0.0217, 0.0190, 0.0199, 0.0264], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:43:33,851 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6632, 3.5911, 3.3121, 1.5812, 3.2448, 3.1120, 3.3757, 2.7308], + device='cuda:0'), covar=tensor([0.1090, 0.0747, 0.0959, 0.4717, 0.0852, 0.1049, 0.1411, 0.1151], + device='cuda:0'), in_proj_covar=tensor([0.0374, 0.0269, 0.0295, 0.0390, 0.0296, 0.0246, 0.0289, 0.0221], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-05 23:43:37,890 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1577, 1.7733, 4.3511, 1.7603, 2.4059, 5.0174, 4.5254, 4.4064], + device='cuda:0'), covar=tensor([0.1216, 0.1489, 0.0273, 0.1986, 0.0760, 0.0174, 0.0337, 0.0572], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0266, 0.0215, 0.0264, 0.0216, 0.0189, 0.0198, 0.0263], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-05 23:43:38,486 INFO [train.py:901] (0/4) Epoch 4, batch 5550, loss[loss=0.3341, simple_loss=0.3972, pruned_loss=0.1355, over 8193.00 frames. ], tot_loss[loss=0.3074, simple_loss=0.3634, pruned_loss=0.1257, over 1614138.36 frames. ], batch size: 23, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:48,539 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0874, 1.0774, 1.0676, 0.9129, 0.7622, 1.2364, 0.0154, 0.8423], + device='cuda:0'), covar=tensor([0.3639, 0.2512, 0.1293, 0.2383, 0.6249, 0.0960, 0.5758, 0.2888], + device='cuda:0'), in_proj_covar=tensor([0.0119, 0.0111, 0.0082, 0.0157, 0.0191, 0.0080, 0.0141, 0.0120], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:43:51,762 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:54,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.211e+02 3.931e+02 4.808e+02 9.688e+02, threshold=7.861e+02, percent-clipped=2.0 +2023-02-05 23:44:12,162 INFO [train.py:901] (0/4) Epoch 4, batch 5600, loss[loss=0.3443, simple_loss=0.3978, pruned_loss=0.1454, over 8508.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3631, pruned_loss=0.1258, over 1609835.25 frames. ], batch size: 26, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:46,052 INFO [train.py:901] (0/4) Epoch 4, batch 5650, loss[loss=0.3182, simple_loss=0.3779, pruned_loss=0.1292, over 8247.00 frames. ], tot_loss[loss=0.3069, simple_loss=0.3625, pruned_loss=0.1257, over 1608019.41 frames. ], batch size: 24, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:55,342 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29913.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:03,286 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.236e+02 4.025e+02 5.119e+02 8.732e+02, threshold=8.050e+02, percent-clipped=2.0 +2023-02-05 23:45:03,319 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 23:45:20,770 INFO [train.py:901] (0/4) Epoch 4, batch 5700, loss[loss=0.3122, simple_loss=0.371, pruned_loss=0.1267, over 8508.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3615, pruned_loss=0.1245, over 1606744.42 frames. ], batch size: 26, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:45:34,465 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:52,043 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29994.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:56,066 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-30000.pt +2023-02-05 23:45:56,992 INFO [train.py:901] (0/4) Epoch 4, batch 5750, loss[loss=0.3066, simple_loss=0.3613, pruned_loss=0.1259, over 8025.00 frames. ], tot_loss[loss=0.3026, simple_loss=0.3597, pruned_loss=0.1227, over 1611533.63 frames. ], batch size: 22, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:46:00,485 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8412, 2.2929, 3.7306, 3.0056, 2.9227, 2.1620, 1.5890, 1.6710], + device='cuda:0'), covar=tensor([0.1301, 0.1926, 0.0377, 0.0785, 0.0849, 0.0809, 0.0972, 0.1863], + device='cuda:0'), in_proj_covar=tensor([0.0694, 0.0624, 0.0531, 0.0599, 0.0714, 0.0583, 0.0570, 0.0588], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:46:07,156 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 23:46:13,263 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.352e+02 3.278e+02 4.024e+02 4.787e+02 1.009e+03, threshold=8.047e+02, percent-clipped=4.0 +2023-02-05 23:46:13,352 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:16,813 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30028.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:32,303 INFO [train.py:901] (0/4) Epoch 4, batch 5800, loss[loss=0.355, simple_loss=0.4069, pruned_loss=0.1516, over 8346.00 frames. ], tot_loss[loss=0.303, simple_loss=0.3597, pruned_loss=0.1231, over 1614513.76 frames. ], batch size: 26, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:47:06,537 INFO [train.py:901] (0/4) Epoch 4, batch 5850, loss[loss=0.2573, simple_loss=0.3072, pruned_loss=0.1037, over 7286.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3602, pruned_loss=0.1235, over 1618933.58 frames. ], batch size: 16, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:23,103 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 3.427e+02 4.657e+02 5.932e+02 9.223e+02, threshold=9.314e+02, percent-clipped=4.0 +2023-02-05 23:47:33,290 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30139.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:35,876 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:41,683 INFO [train.py:901] (0/4) Epoch 4, batch 5900, loss[loss=0.2966, simple_loss=0.3677, pruned_loss=0.1127, over 8321.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.362, pruned_loss=0.1246, over 1618104.11 frames. ], batch size: 25, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:51,359 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:16,092 INFO [train.py:901] (0/4) Epoch 4, batch 5950, loss[loss=0.2778, simple_loss=0.341, pruned_loss=0.1073, over 7645.00 frames. ], tot_loss[loss=0.3057, simple_loss=0.3617, pruned_loss=0.1248, over 1614044.72 frames. ], batch size: 19, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:24,327 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1138, 1.8683, 2.2458, 2.0956, 1.7212, 2.1944, 1.2727, 1.8671], + device='cuda:0'), covar=tensor([0.2693, 0.3093, 0.1126, 0.1780, 0.4172, 0.0873, 0.4422, 0.2377], + device='cuda:0'), in_proj_covar=tensor([0.0124, 0.0119, 0.0085, 0.0160, 0.0199, 0.0084, 0.0148, 0.0124], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:48:32,441 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.869e+02 3.143e+02 3.968e+02 4.977e+02 1.070e+03, threshold=7.937e+02, percent-clipped=1.0 +2023-02-05 23:48:35,337 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30227.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:50,184 INFO [train.py:901] (0/4) Epoch 4, batch 6000, loss[loss=0.3083, simple_loss=0.3682, pruned_loss=0.1242, over 8354.00 frames. ], tot_loss[loss=0.3067, simple_loss=0.3628, pruned_loss=0.1253, over 1612815.58 frames. ], batch size: 24, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:50,185 INFO [train.py:926] (0/4) Computing validation loss +2023-02-05 23:49:02,861 INFO [train.py:935] (0/4) Epoch 4, validation: loss=0.2338, simple_loss=0.3275, pruned_loss=0.07005, over 944034.00 frames. +2023-02-05 23:49:02,862 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-05 23:49:08,402 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6531, 1.4270, 3.7782, 1.5369, 3.2242, 3.1636, 3.3043, 3.1876], + device='cuda:0'), covar=tensor([0.0468, 0.3195, 0.0387, 0.2228, 0.1141, 0.0753, 0.0506, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0461, 0.0365, 0.0377, 0.0450, 0.0380, 0.0365, 0.0407], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:49:09,870 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2188, 2.3754, 1.8387, 1.7191, 1.8446, 2.0343, 2.3304, 2.0614], + device='cuda:0'), covar=tensor([0.0648, 0.1135, 0.1762, 0.1443, 0.0715, 0.1406, 0.0788, 0.0617], + device='cuda:0'), in_proj_covar=tensor([0.0153, 0.0196, 0.0233, 0.0198, 0.0154, 0.0201, 0.0162, 0.0165], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0006, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-05 23:49:17,528 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 23:49:22,561 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:25,895 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:33,988 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9375, 2.0262, 3.7599, 3.2443, 3.1783, 2.3103, 1.4354, 1.4645], + device='cuda:0'), covar=tensor([0.1474, 0.2364, 0.0452, 0.0767, 0.0891, 0.0851, 0.1072, 0.2186], + device='cuda:0'), in_proj_covar=tensor([0.0698, 0.0635, 0.0535, 0.0597, 0.0718, 0.0592, 0.0572, 0.0586], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:49:37,706 INFO [train.py:901] (0/4) Epoch 4, batch 6050, loss[loss=0.3414, simple_loss=0.3919, pruned_loss=0.1455, over 8641.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3626, pruned_loss=0.1255, over 1614866.73 frames. ], batch size: 49, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:49:44,064 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30309.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:47,537 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 23:49:53,937 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 3.338e+02 3.992e+02 4.649e+02 1.183e+03, threshold=7.984e+02, percent-clipped=3.0 +2023-02-05 23:50:12,474 INFO [train.py:901] (0/4) Epoch 4, batch 6100, loss[loss=0.316, simple_loss=0.3737, pruned_loss=0.1292, over 8252.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3612, pruned_loss=0.1246, over 1615354.31 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:14,283 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.11 vs. limit=5.0 +2023-02-05 23:50:32,435 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30378.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:39,276 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 23:50:44,144 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30395.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:47,443 INFO [train.py:901] (0/4) Epoch 4, batch 6150, loss[loss=0.4465, simple_loss=0.469, pruned_loss=0.212, over 8612.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3615, pruned_loss=0.1245, over 1614864.13 frames. ], batch size: 31, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:51:02,482 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30420.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:05,079 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.511e+02 4.267e+02 5.249e+02 1.089e+03, threshold=8.535e+02, percent-clipped=6.0 +2023-02-05 23:51:23,142 INFO [train.py:901] (0/4) Epoch 4, batch 6200, loss[loss=0.2494, simple_loss=0.3037, pruned_loss=0.09759, over 6785.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3613, pruned_loss=0.1246, over 1612385.12 frames. ], batch size: 15, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:51:48,270 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30487.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:57,466 INFO [train.py:901] (0/4) Epoch 4, batch 6250, loss[loss=0.3117, simple_loss=0.3726, pruned_loss=0.1254, over 8463.00 frames. ], tot_loss[loss=0.3061, simple_loss=0.362, pruned_loss=0.1251, over 1613262.07 frames. ], batch size: 27, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:14,465 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 3.291e+02 3.933e+02 5.014e+02 1.132e+03, threshold=7.866e+02, percent-clipped=4.0 +2023-02-05 23:52:22,898 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:32,695 INFO [train.py:901] (0/4) Epoch 4, batch 6300, loss[loss=0.263, simple_loss=0.3372, pruned_loss=0.09444, over 8256.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.36, pruned_loss=0.1231, over 1614806.39 frames. ], batch size: 24, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:39,494 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:47,316 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30571.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:06,881 INFO [train.py:901] (0/4) Epoch 4, batch 6350, loss[loss=0.2902, simple_loss=0.361, pruned_loss=0.1097, over 8287.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.3611, pruned_loss=0.1237, over 1612068.30 frames. ], batch size: 23, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:08,352 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:23,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.165e+02 3.849e+02 5.077e+02 1.430e+03, threshold=7.697e+02, percent-clipped=4.0 +2023-02-05 23:53:37,224 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0631, 1.4379, 4.2225, 1.5519, 3.7072, 3.5535, 3.8524, 3.7271], + device='cuda:0'), covar=tensor([0.0410, 0.3214, 0.0451, 0.2344, 0.1067, 0.0687, 0.0402, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0469, 0.0380, 0.0386, 0.0458, 0.0377, 0.0369, 0.0414], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:53:42,473 INFO [train.py:901] (0/4) Epoch 4, batch 6400, loss[loss=0.2577, simple_loss=0.3255, pruned_loss=0.09494, over 8280.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3615, pruned_loss=0.124, over 1611170.28 frames. ], batch size: 23, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:58,068 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30673.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:03,301 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 23:54:07,687 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30686.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:16,882 INFO [train.py:901] (0/4) Epoch 4, batch 6450, loss[loss=0.315, simple_loss=0.3587, pruned_loss=0.1356, over 8129.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3616, pruned_loss=0.1244, over 1611140.23 frames. ], batch size: 22, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:54:31,537 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30722.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:31,608 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5897, 1.5107, 4.6427, 1.9021, 4.1016, 3.9122, 4.3395, 4.1329], + device='cuda:0'), covar=tensor([0.0282, 0.3219, 0.0271, 0.2054, 0.0772, 0.0493, 0.0320, 0.0405], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0471, 0.0379, 0.0391, 0.0459, 0.0380, 0.0372, 0.0417], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:54:32,829 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.022e+02 3.987e+02 5.645e+02 1.412e+03, threshold=7.975e+02, percent-clipped=10.0 +2023-02-05 23:54:50,957 INFO [train.py:901] (0/4) Epoch 4, batch 6500, loss[loss=0.3154, simple_loss=0.3839, pruned_loss=0.1234, over 8614.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3604, pruned_loss=0.1233, over 1612054.22 frames. ], batch size: 34, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:55:07,036 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-05 23:55:18,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5469, 1.8089, 1.9685, 1.5834, 1.0496, 2.0109, 0.3069, 1.1835], + device='cuda:0'), covar=tensor([0.2794, 0.2103, 0.0849, 0.2077, 0.6414, 0.1009, 0.5643, 0.2478], + device='cuda:0'), in_proj_covar=tensor([0.0116, 0.0114, 0.0079, 0.0159, 0.0199, 0.0081, 0.0142, 0.0119], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-05 23:55:25,836 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-05 23:55:26,309 INFO [train.py:901] (0/4) Epoch 4, batch 6550, loss[loss=0.2461, simple_loss=0.3135, pruned_loss=0.08931, over 7696.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.3608, pruned_loss=0.1237, over 1611403.59 frames. ], batch size: 18, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:55:42,637 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.539e+02 4.251e+02 5.114e+02 1.135e+03, threshold=8.501e+02, percent-clipped=1.0 +2023-02-05 23:55:50,036 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 23:55:51,478 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30837.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:00,638 INFO [train.py:901] (0/4) Epoch 4, batch 6600, loss[loss=0.2403, simple_loss=0.299, pruned_loss=0.09078, over 7639.00 frames. ], tot_loss[loss=0.3034, simple_loss=0.3603, pruned_loss=0.1233, over 1612642.97 frames. ], batch size: 19, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:06,240 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30858.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:08,701 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:56:24,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30883.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:35,342 INFO [train.py:901] (0/4) Epoch 4, batch 6650, loss[loss=0.3238, simple_loss=0.3893, pruned_loss=0.1291, over 8504.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3595, pruned_loss=0.1226, over 1614059.96 frames. ], batch size: 26, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:50,093 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:51,878 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 3.362e+02 4.352e+02 5.461e+02 1.446e+03, threshold=8.703e+02, percent-clipped=3.0 +2023-02-05 23:57:04,046 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30942.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:04,650 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30943.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:09,171 INFO [train.py:901] (0/4) Epoch 4, batch 6700, loss[loss=0.3947, simple_loss=0.4185, pruned_loss=0.1855, over 6638.00 frames. ], tot_loss[loss=0.3032, simple_loss=0.3601, pruned_loss=0.1232, over 1616616.53 frames. ], batch size: 71, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:10,674 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30952.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:21,614 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:45,172 INFO [train.py:901] (0/4) Epoch 4, batch 6750, loss[loss=0.3021, simple_loss=0.3646, pruned_loss=0.1198, over 8746.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3596, pruned_loss=0.1225, over 1617548.30 frames. ], batch size: 34, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:56,385 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31017.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:00,816 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.225e+02 3.317e+02 4.136e+02 5.252e+02 1.678e+03, threshold=8.272e+02, percent-clipped=4.0 +2023-02-05 23:58:04,848 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31029.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:58:18,930 INFO [train.py:901] (0/4) Epoch 4, batch 6800, loss[loss=0.3303, simple_loss=0.3919, pruned_loss=0.1344, over 8248.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3604, pruned_loss=0.1232, over 1617399.65 frames. ], batch size: 24, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:58:19,588 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 23:58:31,183 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0424, 1.7162, 1.2882, 1.7485, 1.3975, 1.0676, 1.3105, 1.5854], + device='cuda:0'), covar=tensor([0.0849, 0.0343, 0.0838, 0.0340, 0.0535, 0.1049, 0.0604, 0.0501], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0239, 0.0303, 0.0298, 0.0321, 0.0311, 0.0330, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-05 23:58:48,707 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31093.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:53,203 INFO [train.py:901] (0/4) Epoch 4, batch 6850, loss[loss=0.2826, simple_loss=0.3617, pruned_loss=0.1018, over 8283.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3599, pruned_loss=0.1218, over 1620628.71 frames. ], batch size: 23, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:59:06,887 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:10,000 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 23:59:10,569 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.178e+02 3.797e+02 5.313e+02 1.260e+03, threshold=7.594e+02, percent-clipped=4.0 +2023-02-05 23:59:16,192 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31132.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:28,758 INFO [train.py:901] (0/4) Epoch 4, batch 6900, loss[loss=0.2892, simple_loss=0.3467, pruned_loss=0.1158, over 8088.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3608, pruned_loss=0.1224, over 1621711.78 frames. ], batch size: 21, lr: 1.73e-02, grad_scale: 8.0 +2023-02-05 23:59:38,460 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2979, 1.2949, 4.4097, 1.6041, 3.6919, 3.5998, 3.9352, 3.8428], + device='cuda:0'), covar=tensor([0.0409, 0.3464, 0.0297, 0.2313, 0.1107, 0.0651, 0.0416, 0.0519], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0461, 0.0368, 0.0388, 0.0454, 0.0378, 0.0373, 0.0414], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-05 23:59:57,134 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 00:00:03,443 INFO [train.py:901] (0/4) Epoch 4, batch 6950, loss[loss=0.3355, simple_loss=0.3856, pruned_loss=0.1427, over 8368.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3597, pruned_loss=0.1219, over 1617548.64 frames. ], batch size: 24, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:18,138 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 00:00:20,068 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 3.425e+02 4.122e+02 5.302e+02 9.579e+02, threshold=8.244e+02, percent-clipped=6.0 +2023-02-06 00:00:38,131 INFO [train.py:901] (0/4) Epoch 4, batch 7000, loss[loss=0.3087, simple_loss=0.3562, pruned_loss=0.1306, over 7530.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3609, pruned_loss=0.1229, over 1620785.18 frames. ], batch size: 18, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:48,749 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:03,269 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:09,176 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:11,662 INFO [train.py:901] (0/4) Epoch 4, batch 7050, loss[loss=0.2749, simple_loss=0.3394, pruned_loss=0.1052, over 7928.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3615, pruned_loss=0.1239, over 1618198.47 frames. ], batch size: 20, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:28,375 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 3.221e+02 3.873e+02 5.326e+02 1.178e+03, threshold=7.746e+02, percent-clipped=8.0 +2023-02-06 00:01:47,439 INFO [train.py:901] (0/4) Epoch 4, batch 7100, loss[loss=0.4096, simple_loss=0.424, pruned_loss=0.1975, over 7110.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3619, pruned_loss=0.1245, over 1615484.52 frames. ], batch size: 71, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:48,712 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 00:02:02,608 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31373.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:02:08,038 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:13,363 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:21,165 INFO [train.py:901] (0/4) Epoch 4, batch 7150, loss[loss=0.2455, simple_loss=0.3062, pruned_loss=0.09238, over 7936.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3626, pruned_loss=0.1256, over 1611946.96 frames. ], batch size: 20, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:02:22,726 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:28,563 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:29,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:37,019 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.187e+02 3.955e+02 5.000e+02 8.847e+02, threshold=7.910e+02, percent-clipped=2.0 +2023-02-06 00:02:39,209 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:56,011 INFO [train.py:901] (0/4) Epoch 4, batch 7200, loss[loss=0.2884, simple_loss=0.3326, pruned_loss=0.1221, over 7554.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3619, pruned_loss=0.1246, over 1613235.57 frames. ], batch size: 18, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:03:22,241 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31488.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:03:30,967 INFO [train.py:901] (0/4) Epoch 4, batch 7250, loss[loss=0.2765, simple_loss=0.3216, pruned_loss=0.1157, over 7533.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3592, pruned_loss=0.1233, over 1607921.39 frames. ], batch size: 18, lr: 1.73e-02, grad_scale: 16.0 +2023-02-06 00:03:37,358 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31509.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:03:43,637 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.63 vs. limit=5.0 +2023-02-06 00:03:47,340 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.150e+02 3.858e+02 4.938e+02 9.845e+02, threshold=7.715e+02, percent-clipped=4.0 +2023-02-06 00:03:52,988 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4393, 1.2956, 1.4054, 1.0733, 1.1373, 1.3553, 1.2431, 1.2357], + device='cuda:0'), covar=tensor([0.0693, 0.1247, 0.1849, 0.1537, 0.0559, 0.1484, 0.0760, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0147, 0.0190, 0.0229, 0.0192, 0.0145, 0.0193, 0.0157, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:04:05,055 INFO [train.py:901] (0/4) Epoch 4, batch 7300, loss[loss=0.3451, simple_loss=0.4035, pruned_loss=0.1433, over 8470.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3598, pruned_loss=0.1237, over 1608998.87 frames. ], batch size: 25, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:33,536 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 00:04:40,541 INFO [train.py:901] (0/4) Epoch 4, batch 7350, loss[loss=0.2934, simple_loss=0.3599, pruned_loss=0.1135, over 8344.00 frames. ], tot_loss[loss=0.3026, simple_loss=0.3593, pruned_loss=0.123, over 1611329.52 frames. ], batch size: 26, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:57,283 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.182e+02 2.774e+02 3.613e+02 4.483e+02 1.102e+03, threshold=7.227e+02, percent-clipped=2.0 +2023-02-06 00:04:59,980 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 00:05:05,400 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:14,568 INFO [train.py:901] (0/4) Epoch 4, batch 7400, loss[loss=0.2602, simple_loss=0.3278, pruned_loss=0.09625, over 8245.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3589, pruned_loss=0.1224, over 1611937.39 frames. ], batch size: 24, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:19,258 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 00:05:20,106 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:21,981 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:26,687 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:37,005 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:43,497 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:48,576 INFO [train.py:901] (0/4) Epoch 4, batch 7450, loss[loss=0.3087, simple_loss=0.3471, pruned_loss=0.1351, over 7708.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3576, pruned_loss=0.1216, over 1608188.68 frames. ], batch size: 18, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:58,000 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 00:06:04,225 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:05,434 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.933e+02 3.216e+02 3.933e+02 5.503e+02 1.387e+03, threshold=7.866e+02, percent-clipped=9.0 +2023-02-06 00:06:19,833 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31744.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:06:21,846 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4252, 1.6096, 1.6029, 1.2649, 1.4646, 1.5708, 1.8734, 1.9692], + device='cuda:0'), covar=tensor([0.0610, 0.1245, 0.1806, 0.1551, 0.0672, 0.1561, 0.0714, 0.0513], + device='cuda:0'), in_proj_covar=tensor([0.0147, 0.0190, 0.0232, 0.0194, 0.0145, 0.0196, 0.0156, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:06:23,707 INFO [train.py:901] (0/4) Epoch 4, batch 7500, loss[loss=0.3585, simple_loss=0.4044, pruned_loss=0.1563, over 8326.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3572, pruned_loss=0.1212, over 1606300.84 frames. ], batch size: 25, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:06:36,772 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31769.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:06:37,944 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:47,164 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-06 00:06:57,684 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0301, 1.6396, 2.3137, 2.0851, 2.1439, 1.7721, 1.3688, 0.7144], + device='cuda:0'), covar=tensor([0.1291, 0.1413, 0.0397, 0.0727, 0.0571, 0.0803, 0.0864, 0.1362], + device='cuda:0'), in_proj_covar=tensor([0.0708, 0.0632, 0.0533, 0.0609, 0.0724, 0.0592, 0.0575, 0.0588], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:06:58,254 INFO [train.py:901] (0/4) Epoch 4, batch 7550, loss[loss=0.2999, simple_loss=0.3377, pruned_loss=0.1311, over 7427.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3575, pruned_loss=0.1217, over 1609979.53 frames. ], batch size: 17, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:02,941 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:16,189 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.990e+02 2.842e+02 3.963e+02 5.244e+02 1.193e+03, threshold=7.926e+02, percent-clipped=8.0 +2023-02-06 00:07:27,216 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:27,983 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4160, 1.6810, 1.4523, 1.2420, 1.4045, 1.4044, 1.9245, 1.9223], + device='cuda:0'), covar=tensor([0.0605, 0.1210, 0.1884, 0.1598, 0.0676, 0.1599, 0.0730, 0.0540], + device='cuda:0'), in_proj_covar=tensor([0.0147, 0.0189, 0.0233, 0.0194, 0.0144, 0.0196, 0.0156, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:07:34,061 INFO [train.py:901] (0/4) Epoch 4, batch 7600, loss[loss=0.2361, simple_loss=0.3164, pruned_loss=0.07787, over 7923.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.3559, pruned_loss=0.1207, over 1605227.83 frames. ], batch size: 20, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:36,149 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31853.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:07:40,185 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6059, 1.8505, 2.0597, 1.4698, 0.8905, 1.9868, 0.2979, 1.1846], + device='cuda:0'), covar=tensor([0.2884, 0.2039, 0.1591, 0.3040, 0.7070, 0.0981, 0.6173, 0.2478], + device='cuda:0'), in_proj_covar=tensor([0.0122, 0.0113, 0.0083, 0.0165, 0.0205, 0.0083, 0.0151, 0.0121], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:07:58,616 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:59,322 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3338, 1.5441, 1.5717, 1.3111, 0.8402, 1.5621, 0.1098, 1.0016], + device='cuda:0'), covar=tensor([0.4332, 0.2317, 0.1522, 0.2816, 0.7206, 0.1062, 0.6892, 0.2773], + device='cuda:0'), in_proj_covar=tensor([0.0124, 0.0113, 0.0083, 0.0165, 0.0205, 0.0083, 0.0151, 0.0122], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:08:05,400 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:07,993 INFO [train.py:901] (0/4) Epoch 4, batch 7650, loss[loss=0.3173, simple_loss=0.3755, pruned_loss=0.1296, over 8316.00 frames. ], tot_loss[loss=0.2995, simple_loss=0.3569, pruned_loss=0.1211, over 1609339.35 frames. ], batch size: 25, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:12,222 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8184, 2.1485, 1.6695, 2.7283, 1.3394, 1.3940, 1.6392, 2.2235], + device='cuda:0'), covar=tensor([0.1022, 0.1059, 0.1708, 0.0550, 0.1544, 0.2277, 0.1571, 0.1066], + device='cuda:0'), in_proj_covar=tensor([0.0272, 0.0275, 0.0298, 0.0224, 0.0260, 0.0286, 0.0297, 0.0268], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:08:26,147 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.122e+02 3.181e+02 3.860e+02 4.828e+02 9.649e+02, threshold=7.720e+02, percent-clipped=2.0 +2023-02-06 00:08:31,494 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:43,636 INFO [train.py:901] (0/4) Epoch 4, batch 7700, loss[loss=0.2545, simple_loss=0.3209, pruned_loss=0.0941, over 8238.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3574, pruned_loss=0.1216, over 1609162.55 frames. ], batch size: 22, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:43,815 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8910, 1.3675, 4.3508, 1.8095, 2.3919, 4.9155, 4.7682, 4.3951], + device='cuda:0'), covar=tensor([0.1239, 0.1529, 0.0238, 0.1807, 0.0757, 0.0211, 0.0306, 0.0465], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0271, 0.0225, 0.0268, 0.0224, 0.0202, 0.0214, 0.0274], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:08:51,019 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 00:08:55,989 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:56,026 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31968.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:09:06,798 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 00:09:10,169 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1945, 1.5573, 1.6101, 1.2533, 1.4522, 1.5347, 1.9865, 1.6940], + device='cuda:0'), covar=tensor([0.0675, 0.1337, 0.1857, 0.1569, 0.0665, 0.1539, 0.0782, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0148, 0.0185, 0.0228, 0.0191, 0.0142, 0.0193, 0.0155, 0.0159], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:09:17,589 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-32000.pt +2023-02-06 00:09:18,512 INFO [train.py:901] (0/4) Epoch 4, batch 7750, loss[loss=0.3394, simple_loss=0.4039, pruned_loss=0.1374, over 8293.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3583, pruned_loss=0.1222, over 1608694.05 frames. ], batch size: 23, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:09:35,964 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.165e+02 3.163e+02 3.927e+02 5.355e+02 1.239e+03, threshold=7.853e+02, percent-clipped=4.0 +2023-02-06 00:09:53,600 INFO [train.py:901] (0/4) Epoch 4, batch 7800, loss[loss=0.2989, simple_loss=0.3561, pruned_loss=0.1208, over 8249.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3586, pruned_loss=0.123, over 1605021.79 frames. ], batch size: 22, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:05,162 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:10:27,386 INFO [train.py:901] (0/4) Epoch 4, batch 7850, loss[loss=0.2995, simple_loss=0.3409, pruned_loss=0.129, over 6354.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3575, pruned_loss=0.1221, over 1602723.11 frames. ], batch size: 14, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:43,942 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.460e+02 3.521e+02 4.480e+02 6.179e+02 1.308e+03, threshold=8.960e+02, percent-clipped=13.0 +2023-02-06 00:10:55,637 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:00,792 INFO [train.py:901] (0/4) Epoch 4, batch 7900, loss[loss=0.3711, simple_loss=0.3858, pruned_loss=0.1782, over 7276.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3583, pruned_loss=0.1233, over 1601882.01 frames. ], batch size: 16, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:00,856 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:12,554 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:14,001 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-06 00:11:21,557 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:24,168 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:34,457 INFO [train.py:901] (0/4) Epoch 4, batch 7950, loss[loss=0.2956, simple_loss=0.3508, pruned_loss=0.1202, over 8528.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.3603, pruned_loss=0.1247, over 1605015.58 frames. ], batch size: 28, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:51,250 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32224.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:11:51,630 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.248e+02 3.536e+02 4.226e+02 5.315e+02 1.259e+03, threshold=8.452e+02, percent-clipped=4.0 +2023-02-06 00:11:59,171 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7603, 1.9479, 5.8694, 1.9557, 5.1158, 4.8650, 5.4010, 5.3730], + device='cuda:0'), covar=tensor([0.0335, 0.3257, 0.0214, 0.2195, 0.0779, 0.0529, 0.0296, 0.0349], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0466, 0.0377, 0.0384, 0.0447, 0.0375, 0.0367, 0.0411], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 00:12:01,783 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:08,285 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32249.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:12:08,757 INFO [train.py:901] (0/4) Epoch 4, batch 8000, loss[loss=0.2556, simple_loss=0.3283, pruned_loss=0.09139, over 8744.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.36, pruned_loss=0.1237, over 1606347.02 frames. ], batch size: 30, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:12:19,080 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:26,788 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:42,459 INFO [train.py:901] (0/4) Epoch 4, batch 8050, loss[loss=0.2462, simple_loss=0.3065, pruned_loss=0.09295, over 7165.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3592, pruned_loss=0.1244, over 1590082.76 frames. ], batch size: 16, lr: 1.70e-02, grad_scale: 8.0 +2023-02-06 00:12:42,636 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:42,668 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7340, 1.7908, 2.0293, 1.6231, 1.0636, 1.8402, 0.2974, 1.2260], + device='cuda:0'), covar=tensor([0.3833, 0.3243, 0.1391, 0.2300, 0.8030, 0.1833, 0.7732, 0.2743], + device='cuda:0'), in_proj_covar=tensor([0.0122, 0.0109, 0.0078, 0.0161, 0.0200, 0.0081, 0.0145, 0.0118], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:12:50,703 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:52,791 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9626, 2.6623, 3.9635, 3.1077, 3.1709, 2.5424, 1.6079, 1.9799], + device='cuda:0'), covar=tensor([0.1333, 0.1619, 0.0352, 0.0839, 0.0843, 0.0752, 0.0911, 0.1691], + device='cuda:0'), in_proj_covar=tensor([0.0695, 0.0627, 0.0521, 0.0595, 0.0707, 0.0581, 0.0567, 0.0579], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:12:58,915 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.697e+02 3.496e+02 4.220e+02 5.135e+02 1.064e+03, threshold=8.441e+02, percent-clipped=2.0 +2023-02-06 00:13:01,780 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3931, 1.9107, 3.0141, 2.3918, 2.3779, 1.8851, 1.3897, 1.0392], + device='cuda:0'), covar=tensor([0.1517, 0.1691, 0.0369, 0.0782, 0.0796, 0.0948, 0.1061, 0.1695], + device='cuda:0'), in_proj_covar=tensor([0.0700, 0.0632, 0.0526, 0.0596, 0.0710, 0.0585, 0.0570, 0.0583], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:13:04,772 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-4.pt +2023-02-06 00:13:16,071 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 00:13:19,657 INFO [train.py:901] (0/4) Epoch 5, batch 0, loss[loss=0.4153, simple_loss=0.4495, pruned_loss=0.1905, over 8458.00 frames. ], tot_loss[loss=0.4153, simple_loss=0.4495, pruned_loss=0.1905, over 8458.00 frames. ], batch size: 27, lr: 1.59e-02, grad_scale: 8.0 +2023-02-06 00:13:19,658 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 00:13:31,621 INFO [train.py:935] (0/4) Epoch 5, validation: loss=0.2309, simple_loss=0.3254, pruned_loss=0.06822, over 944034.00 frames. +2023-02-06 00:13:31,622 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 00:13:46,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 00:13:46,594 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:13:53,860 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.57 vs. limit=5.0 +2023-02-06 00:14:06,998 INFO [train.py:901] (0/4) Epoch 5, batch 50, loss[loss=0.3059, simple_loss=0.3523, pruned_loss=0.1297, over 8241.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3563, pruned_loss=0.1201, over 365399.09 frames. ], batch size: 22, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:13,361 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:20,370 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6540, 1.9924, 3.1353, 1.1934, 2.3018, 1.8863, 1.6972, 1.7335], + device='cuda:0'), covar=tensor([0.1340, 0.1640, 0.0609, 0.2953, 0.1271, 0.2270, 0.1316, 0.2203], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0440, 0.0524, 0.0532, 0.0576, 0.0508, 0.0446, 0.0583], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:14:22,903 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 00:14:36,522 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.148e+02 3.721e+02 4.839e+02 1.477e+03, threshold=7.442e+02, percent-clipped=1.0 +2023-02-06 00:14:38,037 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:41,778 INFO [train.py:901] (0/4) Epoch 5, batch 100, loss[loss=0.3938, simple_loss=0.4234, pruned_loss=0.1821, over 8660.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.3576, pruned_loss=0.1213, over 638641.78 frames. ], batch size: 34, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:44,562 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:45,031 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 00:14:56,153 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5165, 1.2311, 2.6781, 1.1923, 2.0366, 2.9502, 2.8075, 2.5317], + device='cuda:0'), covar=tensor([0.1130, 0.1459, 0.0456, 0.1934, 0.0722, 0.0344, 0.0453, 0.0736], + device='cuda:0'), in_proj_covar=tensor([0.0226, 0.0262, 0.0218, 0.0258, 0.0222, 0.0194, 0.0209, 0.0266], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:15:02,113 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:11,551 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 00:15:15,808 INFO [train.py:901] (0/4) Epoch 5, batch 150, loss[loss=0.2184, simple_loss=0.2833, pruned_loss=0.07677, over 7426.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3585, pruned_loss=0.1205, over 858566.01 frames. ], batch size: 17, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:43,036 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:45,468 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.082e+02 3.007e+02 3.818e+02 4.644e+02 8.323e+02, threshold=7.636e+02, percent-clipped=1.0 +2023-02-06 00:15:50,800 INFO [train.py:901] (0/4) Epoch 5, batch 200, loss[loss=0.2902, simple_loss=0.3384, pruned_loss=0.121, over 7806.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3558, pruned_loss=0.1184, over 1026726.09 frames. ], batch size: 20, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:59,813 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:01,170 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2677, 1.8752, 1.9148, 0.7981, 1.8921, 1.2869, 0.4482, 1.7097], + device='cuda:0'), covar=tensor([0.0188, 0.0092, 0.0072, 0.0177, 0.0133, 0.0318, 0.0262, 0.0078], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0205, 0.0165, 0.0252, 0.0200, 0.0329, 0.0269, 0.0234], + device='cuda:0'), out_proj_covar=tensor([1.1103e-04, 7.7410e-05, 6.0617e-05, 9.2566e-05, 7.6672e-05, 1.3401e-04, + 1.0360e-04, 8.7436e-05], device='cuda:0') +2023-02-06 00:16:06,402 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:23,707 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:24,862 INFO [train.py:901] (0/4) Epoch 5, batch 250, loss[loss=0.3542, simple_loss=0.4055, pruned_loss=0.1515, over 8325.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3557, pruned_loss=0.1187, over 1159360.17 frames. ], batch size: 25, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:16:36,290 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 00:16:45,136 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:46,300 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 00:16:54,491 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 3.241e+02 4.131e+02 4.869e+02 1.219e+03, threshold=8.263e+02, percent-clipped=9.0 +2023-02-06 00:17:00,682 INFO [train.py:901] (0/4) Epoch 5, batch 300, loss[loss=0.2698, simple_loss=0.3406, pruned_loss=0.09953, over 8353.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.356, pruned_loss=0.1191, over 1262036.48 frames. ], batch size: 24, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:03,006 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:10,880 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:27,573 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:34,188 INFO [train.py:901] (0/4) Epoch 5, batch 350, loss[loss=0.2951, simple_loss=0.3531, pruned_loss=0.1185, over 8032.00 frames. ], tot_loss[loss=0.2986, simple_loss=0.357, pruned_loss=0.1201, over 1340403.59 frames. ], batch size: 22, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:34,409 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:51,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:18:02,407 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9913, 2.5408, 3.3233, 0.9400, 3.1330, 2.0839, 1.3615, 1.7331], + device='cuda:0'), covar=tensor([0.0282, 0.0104, 0.0068, 0.0257, 0.0113, 0.0261, 0.0325, 0.0164], + device='cuda:0'), in_proj_covar=tensor([0.0288, 0.0201, 0.0165, 0.0248, 0.0197, 0.0328, 0.0267, 0.0230], + device='cuda:0'), out_proj_covar=tensor([1.0905e-04, 7.5591e-05, 5.9840e-05, 9.0840e-05, 7.4895e-05, 1.3290e-04, + 1.0252e-04, 8.5651e-05], device='cuda:0') +2023-02-06 00:18:04,027 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.189e+02 4.031e+02 4.810e+02 8.158e+02, threshold=8.062e+02, percent-clipped=0.0 +2023-02-06 00:18:09,328 INFO [train.py:901] (0/4) Epoch 5, batch 400, loss[loss=0.3751, simple_loss=0.4078, pruned_loss=0.1712, over 8336.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.359, pruned_loss=0.1213, over 1402671.96 frames. ], batch size: 26, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:18:19,554 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1291, 1.1250, 3.2027, 1.0064, 2.7436, 2.6801, 2.8969, 2.7973], + device='cuda:0'), covar=tensor([0.0493, 0.3265, 0.0625, 0.2459, 0.1346, 0.0852, 0.0580, 0.0723], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0475, 0.0386, 0.0395, 0.0460, 0.0380, 0.0380, 0.0420], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 00:18:43,766 INFO [train.py:901] (0/4) Epoch 5, batch 450, loss[loss=0.3098, simple_loss=0.3707, pruned_loss=0.1245, over 7981.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3566, pruned_loss=0.1199, over 1448320.50 frames. ], batch size: 21, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:19:00,937 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 00:19:02,061 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0783, 1.2015, 1.1659, 0.1503, 1.1716, 0.9464, 0.1156, 1.0764], + device='cuda:0'), covar=tensor([0.0142, 0.0098, 0.0082, 0.0195, 0.0102, 0.0320, 0.0249, 0.0097], + device='cuda:0'), in_proj_covar=tensor([0.0287, 0.0204, 0.0163, 0.0247, 0.0199, 0.0326, 0.0268, 0.0229], + device='cuda:0'), out_proj_covar=tensor([1.0851e-04, 7.6543e-05, 5.8974e-05, 9.0359e-05, 7.5521e-05, 1.3192e-04, + 1.0272e-04, 8.5108e-05], device='cuda:0') +2023-02-06 00:19:12,444 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.263e+02 3.122e+02 4.068e+02 4.898e+02 9.897e+02, threshold=8.137e+02, percent-clipped=5.0 +2023-02-06 00:19:17,681 INFO [train.py:901] (0/4) Epoch 5, batch 500, loss[loss=0.3228, simple_loss=0.3749, pruned_loss=0.1353, over 8289.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.3559, pruned_loss=0.1191, over 1484139.13 frames. ], batch size: 23, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:21,935 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4182, 1.7883, 1.7768, 1.5934, 0.8554, 1.8095, 0.2034, 1.2030], + device='cuda:0'), covar=tensor([0.4197, 0.2235, 0.1329, 0.2265, 0.8307, 0.1158, 0.5854, 0.2101], + device='cuda:0'), in_proj_covar=tensor([0.0125, 0.0112, 0.0079, 0.0162, 0.0208, 0.0078, 0.0143, 0.0118], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:19:52,903 INFO [train.py:901] (0/4) Epoch 5, batch 550, loss[loss=0.2543, simple_loss=0.3345, pruned_loss=0.08704, over 8506.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.3553, pruned_loss=0.1185, over 1515478.39 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:55,207 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3838, 1.6757, 1.6021, 1.3526, 0.8573, 1.6143, 0.2008, 1.0529], + device='cuda:0'), covar=tensor([0.3693, 0.1466, 0.1275, 0.1946, 0.5978, 0.1037, 0.4862, 0.1923], + device='cuda:0'), in_proj_covar=tensor([0.0126, 0.0113, 0.0079, 0.0164, 0.0211, 0.0079, 0.0145, 0.0119], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:20:21,235 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.184e+02 3.133e+02 3.697e+02 5.126e+02 1.321e+03, threshold=7.393e+02, percent-clipped=4.0 +2023-02-06 00:20:23,468 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6416, 1.3990, 2.7297, 1.0319, 2.0610, 3.0365, 2.8424, 2.4805], + device='cuda:0'), covar=tensor([0.0938, 0.1325, 0.0487, 0.2177, 0.0700, 0.0298, 0.0508, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0263, 0.0223, 0.0261, 0.0224, 0.0195, 0.0215, 0.0272], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:20:26,722 INFO [train.py:901] (0/4) Epoch 5, batch 600, loss[loss=0.3224, simple_loss=0.3778, pruned_loss=0.1335, over 8456.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3542, pruned_loss=0.1175, over 1537674.72 frames. ], batch size: 27, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:50,096 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 00:21:02,155 INFO [train.py:901] (0/4) Epoch 5, batch 650, loss[loss=0.3425, simple_loss=0.4029, pruned_loss=0.141, over 8476.00 frames. ], tot_loss[loss=0.2924, simple_loss=0.3529, pruned_loss=0.116, over 1556110.21 frames. ], batch size: 25, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:21:02,982 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:21:18,950 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6584, 1.5835, 3.4038, 1.1370, 2.2479, 3.9628, 3.5509, 3.3453], + device='cuda:0'), covar=tensor([0.1157, 0.1305, 0.0349, 0.2071, 0.0765, 0.0190, 0.0367, 0.0574], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0261, 0.0221, 0.0260, 0.0225, 0.0193, 0.0212, 0.0270], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:21:28,368 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9747, 2.1947, 1.9770, 2.5951, 1.7902, 1.8552, 1.9860, 2.3821], + device='cuda:0'), covar=tensor([0.0983, 0.1141, 0.1237, 0.0670, 0.1338, 0.1548, 0.1245, 0.0937], + device='cuda:0'), in_proj_covar=tensor([0.0274, 0.0274, 0.0294, 0.0220, 0.0255, 0.0285, 0.0291, 0.0269], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:21:30,782 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 3.090e+02 3.854e+02 5.024e+02 8.355e+02, threshold=7.708e+02, percent-clipped=4.0 +2023-02-06 00:21:35,148 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 00:21:36,137 INFO [train.py:901] (0/4) Epoch 5, batch 700, loss[loss=0.2894, simple_loss=0.357, pruned_loss=0.1108, over 8109.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3531, pruned_loss=0.1162, over 1566106.90 frames. ], batch size: 23, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:11,097 INFO [train.py:901] (0/4) Epoch 5, batch 750, loss[loss=0.3142, simple_loss=0.3806, pruned_loss=0.1239, over 8358.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3549, pruned_loss=0.1174, over 1577127.12 frames. ], batch size: 24, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:14,419 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:22:36,874 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 00:22:40,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 3.306e+02 4.079e+02 5.042e+02 1.499e+03, threshold=8.159e+02, percent-clipped=7.0 +2023-02-06 00:22:45,500 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 00:22:46,149 INFO [train.py:901] (0/4) Epoch 5, batch 800, loss[loss=0.3045, simple_loss=0.3544, pruned_loss=0.1273, over 8024.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.3546, pruned_loss=0.1174, over 1585408.01 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:13,714 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:23:19,949 INFO [train.py:901] (0/4) Epoch 5, batch 850, loss[loss=0.3298, simple_loss=0.385, pruned_loss=0.1373, over 8144.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3551, pruned_loss=0.1182, over 1594443.75 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:49,971 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.888e+02 3.855e+02 5.468e+02 1.103e+03, threshold=7.709e+02, percent-clipped=2.0 +2023-02-06 00:23:56,030 INFO [train.py:901] (0/4) Epoch 5, batch 900, loss[loss=0.2942, simple_loss=0.3618, pruned_loss=0.1133, over 8120.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3532, pruned_loss=0.1173, over 1594201.81 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:24:29,744 INFO [train.py:901] (0/4) Epoch 5, batch 950, loss[loss=0.3055, simple_loss=0.3676, pruned_loss=0.1217, over 8463.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3537, pruned_loss=0.117, over 1601273.49 frames. ], batch size: 25, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:01,022 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.004e+02 3.759e+02 4.642e+02 8.675e+02, threshold=7.519e+02, percent-clipped=2.0 +2023-02-06 00:25:03,061 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:25:05,099 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 00:25:06,446 INFO [train.py:901] (0/4) Epoch 5, batch 1000, loss[loss=0.2645, simple_loss=0.3336, pruned_loss=0.09771, over 8187.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3526, pruned_loss=0.116, over 1605136.59 frames. ], batch size: 23, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:34,765 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 00:25:40,441 INFO [train.py:901] (0/4) Epoch 5, batch 1050, loss[loss=0.3066, simple_loss=0.3678, pruned_loss=0.1227, over 8312.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3514, pruned_loss=0.1162, over 1599833.65 frames. ], batch size: 25, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,475 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 00:25:52,226 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 00:26:08,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.885e+02 3.252e+02 3.786e+02 4.850e+02 9.380e+02, threshold=7.572e+02, percent-clipped=3.0 +2023-02-06 00:26:13,582 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:14,846 INFO [train.py:901] (0/4) Epoch 5, batch 1100, loss[loss=0.3108, simple_loss=0.3815, pruned_loss=0.1201, over 8626.00 frames. ], tot_loss[loss=0.2912, simple_loss=0.3511, pruned_loss=0.1157, over 1605711.58 frames. ], batch size: 49, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:22,995 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:29,012 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7434, 1.9723, 2.1484, 1.7005, 1.1214, 2.1261, 0.2904, 1.1122], + device='cuda:0'), covar=tensor([0.2660, 0.2359, 0.0847, 0.2379, 0.7015, 0.0790, 0.6195, 0.2987], + device='cuda:0'), in_proj_covar=tensor([0.0124, 0.0116, 0.0079, 0.0162, 0.0208, 0.0080, 0.0144, 0.0121], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:26:50,043 INFO [train.py:901] (0/4) Epoch 5, batch 1150, loss[loss=0.2968, simple_loss=0.3659, pruned_loss=0.1139, over 8740.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3518, pruned_loss=0.1159, over 1610559.06 frames. ], batch size: 30, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:54,912 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:02,059 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 00:27:13,077 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:18,265 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 3.101e+02 4.052e+02 5.357e+02 1.331e+03, threshold=8.105e+02, percent-clipped=11.0 +2023-02-06 00:27:23,603 INFO [train.py:901] (0/4) Epoch 5, batch 1200, loss[loss=0.31, simple_loss=0.3543, pruned_loss=0.1329, over 8633.00 frames. ], tot_loss[loss=0.2918, simple_loss=0.3518, pruned_loss=0.1159, over 1607940.29 frames. ], batch size: 49, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:27:32,543 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:53,514 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5417, 5.5118, 4.9208, 1.6677, 4.8321, 5.1883, 5.2531, 4.6131], + device='cuda:0'), covar=tensor([0.0724, 0.0550, 0.0893, 0.5394, 0.0699, 0.0909, 0.1200, 0.0964], + device='cuda:0'), in_proj_covar=tensor([0.0375, 0.0281, 0.0303, 0.0393, 0.0301, 0.0262, 0.0286, 0.0232], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 00:28:00,128 INFO [train.py:901] (0/4) Epoch 5, batch 1250, loss[loss=0.2388, simple_loss=0.3073, pruned_loss=0.08514, over 8083.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.3527, pruned_loss=0.1167, over 1610974.76 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:15,585 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8119, 3.7305, 3.4057, 1.6806, 3.3242, 3.3137, 3.4436, 2.8755], + device='cuda:0'), covar=tensor([0.0941, 0.0719, 0.0997, 0.4265, 0.0809, 0.0935, 0.1316, 0.0948], + device='cuda:0'), in_proj_covar=tensor([0.0371, 0.0277, 0.0300, 0.0390, 0.0296, 0.0260, 0.0287, 0.0231], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 00:28:29,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.057e+02 3.737e+02 5.343e+02 1.068e+03, threshold=7.474e+02, percent-clipped=1.0 +2023-02-06 00:28:34,050 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:28:34,562 INFO [train.py:901] (0/4) Epoch 5, batch 1300, loss[loss=0.268, simple_loss=0.3267, pruned_loss=0.1047, over 8092.00 frames. ], tot_loss[loss=0.2937, simple_loss=0.3532, pruned_loss=0.1172, over 1608499.17 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:29:10,156 INFO [train.py:901] (0/4) Epoch 5, batch 1350, loss[loss=0.346, simple_loss=0.3938, pruned_loss=0.1491, over 8295.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3554, pruned_loss=0.1188, over 1610390.12 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:29:18,147 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:21,565 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:30,025 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 00:29:38,302 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:39,358 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 3.141e+02 3.942e+02 4.566e+02 9.800e+02, threshold=7.885e+02, percent-clipped=1.0 +2023-02-06 00:29:44,201 INFO [train.py:901] (0/4) Epoch 5, batch 1400, loss[loss=0.2868, simple_loss=0.3561, pruned_loss=0.1087, over 8456.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3555, pruned_loss=0.1183, over 1609557.40 frames. ], batch size: 25, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:29:52,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7776, 2.5588, 2.9134, 2.3438, 1.5031, 2.8243, 0.7253, 1.7397], + device='cuda:0'), covar=tensor([0.3008, 0.2926, 0.0892, 0.2181, 0.6134, 0.0808, 0.5784, 0.2377], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0117, 0.0081, 0.0167, 0.0212, 0.0081, 0.0148, 0.0125], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:30:18,167 INFO [train.py:901] (0/4) Epoch 5, batch 1450, loss[loss=0.3151, simple_loss=0.3775, pruned_loss=0.1264, over 8515.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3566, pruned_loss=0.1187, over 1614651.82 frames. ], batch size: 26, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:32,288 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 00:30:32,501 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:49,119 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.073e+02 3.068e+02 3.705e+02 5.190e+02 1.303e+03, threshold=7.410e+02, percent-clipped=4.0 +2023-02-06 00:30:50,045 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:54,036 INFO [train.py:901] (0/4) Epoch 5, batch 1500, loss[loss=0.3347, simple_loss=0.3854, pruned_loss=0.142, over 7820.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3545, pruned_loss=0.1172, over 1614103.97 frames. ], batch size: 20, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:54,786 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:09,215 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 00:31:27,613 INFO [train.py:901] (0/4) Epoch 5, batch 1550, loss[loss=0.2238, simple_loss=0.3003, pruned_loss=0.07369, over 8083.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3555, pruned_loss=0.1178, over 1619801.46 frames. ], batch size: 21, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:31:31,093 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:48,411 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:58,126 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.067e+02 3.419e+02 4.027e+02 4.998e+02 8.696e+02, threshold=8.054e+02, percent-clipped=2.0 +2023-02-06 00:32:03,152 INFO [train.py:901] (0/4) Epoch 5, batch 1600, loss[loss=0.2891, simple_loss=0.3609, pruned_loss=0.1087, over 8291.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3557, pruned_loss=0.1183, over 1619440.90 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:32:12,809 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6945, 1.4344, 2.9812, 1.3401, 2.2543, 3.2168, 3.1325, 2.8110], + device='cuda:0'), covar=tensor([0.1086, 0.1387, 0.0431, 0.1988, 0.0695, 0.0308, 0.0413, 0.0631], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0266, 0.0216, 0.0260, 0.0221, 0.0195, 0.0220, 0.0270], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:32:14,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:20,115 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:30,687 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8965, 1.4698, 3.4234, 1.4746, 2.3191, 3.8362, 3.6120, 3.2998], + device='cuda:0'), covar=tensor([0.1058, 0.1498, 0.0307, 0.1862, 0.0790, 0.0284, 0.0354, 0.0551], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0266, 0.0217, 0.0259, 0.0222, 0.0194, 0.0220, 0.0272], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:32:37,058 INFO [train.py:901] (0/4) Epoch 5, batch 1650, loss[loss=0.274, simple_loss=0.3245, pruned_loss=0.1118, over 7425.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.3528, pruned_loss=0.1162, over 1618541.71 frames. ], batch size: 17, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:32:48,559 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-34000.pt +2023-02-06 00:33:07,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.926e+02 3.722e+02 4.611e+02 9.053e+02, threshold=7.444e+02, percent-clipped=4.0 +2023-02-06 00:33:11,841 INFO [train.py:901] (0/4) Epoch 5, batch 1700, loss[loss=0.2791, simple_loss=0.3539, pruned_loss=0.1022, over 8499.00 frames. ], tot_loss[loss=0.293, simple_loss=0.353, pruned_loss=0.1165, over 1616792.15 frames. ], batch size: 26, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:17,258 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:33,629 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:39,804 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7136, 5.6482, 4.9920, 2.2702, 5.0965, 5.3082, 5.3030, 4.8643], + device='cuda:0'), covar=tensor([0.0610, 0.0486, 0.0716, 0.4263, 0.0543, 0.0534, 0.1057, 0.0471], + device='cuda:0'), in_proj_covar=tensor([0.0392, 0.0294, 0.0311, 0.0405, 0.0313, 0.0273, 0.0302, 0.0244], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 00:33:44,837 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 00:33:47,846 INFO [train.py:901] (0/4) Epoch 5, batch 1750, loss[loss=0.2853, simple_loss=0.3507, pruned_loss=0.11, over 8352.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3527, pruned_loss=0.116, over 1621559.54 frames. ], batch size: 24, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:34:16,516 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.177e+02 3.118e+02 3.687e+02 4.787e+02 9.448e+02, threshold=7.373e+02, percent-clipped=7.0 +2023-02-06 00:34:21,845 INFO [train.py:901] (0/4) Epoch 5, batch 1800, loss[loss=0.2978, simple_loss=0.366, pruned_loss=0.1148, over 8290.00 frames. ], tot_loss[loss=0.292, simple_loss=0.3522, pruned_loss=0.1159, over 1615987.31 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:34:24,330 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 00:34:36,703 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:43,245 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:57,289 INFO [train.py:901] (0/4) Epoch 5, batch 1850, loss[loss=0.2516, simple_loss=0.3317, pruned_loss=0.08574, over 8480.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3526, pruned_loss=0.1158, over 1617669.14 frames. ], batch size: 27, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:35:12,354 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34205.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:26,207 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.489e+02 4.150e+02 5.670e+02 1.027e+03, threshold=8.299e+02, percent-clipped=7.0 +2023-02-06 00:35:29,068 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:30,895 INFO [train.py:901] (0/4) Epoch 5, batch 1900, loss[loss=0.2885, simple_loss=0.3584, pruned_loss=0.1093, over 8342.00 frames. ], tot_loss[loss=0.292, simple_loss=0.3526, pruned_loss=0.1157, over 1617969.96 frames. ], batch size: 26, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:00,402 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 00:36:05,944 INFO [train.py:901] (0/4) Epoch 5, batch 1950, loss[loss=0.3154, simple_loss=0.3721, pruned_loss=0.1293, over 8029.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.3535, pruned_loss=0.1165, over 1616680.35 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:09,866 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 00:36:18,691 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:36:23,389 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 00:36:35,385 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.967e+02 3.945e+02 4.927e+02 1.257e+03, threshold=7.890e+02, percent-clipped=2.0 +2023-02-06 00:36:40,164 INFO [train.py:901] (0/4) Epoch 5, batch 2000, loss[loss=0.2833, simple_loss=0.3446, pruned_loss=0.111, over 8103.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.353, pruned_loss=0.1161, over 1615777.45 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:42,291 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 00:37:14,372 INFO [train.py:901] (0/4) Epoch 5, batch 2050, loss[loss=0.2918, simple_loss=0.3385, pruned_loss=0.1226, over 7695.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3544, pruned_loss=0.1173, over 1612140.44 frames. ], batch size: 18, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:31,116 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:33,923 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:38,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:45,186 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 3.245e+02 4.066e+02 4.898e+02 1.293e+03, threshold=8.132e+02, percent-clipped=4.0 +2023-02-06 00:37:49,849 INFO [train.py:901] (0/4) Epoch 5, batch 2100, loss[loss=0.2668, simple_loss=0.3449, pruned_loss=0.09438, over 8632.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3542, pruned_loss=0.1178, over 1610322.14 frames. ], batch size: 34, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:51,412 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:55,819 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 00:38:05,709 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 00:38:23,172 INFO [train.py:901] (0/4) Epoch 5, batch 2150, loss[loss=0.3113, simple_loss=0.381, pruned_loss=0.1207, over 8314.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3521, pruned_loss=0.1163, over 1610359.80 frames. ], batch size: 26, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:38:30,787 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3184, 1.9555, 3.0697, 2.6226, 2.5367, 1.9607, 1.3853, 1.1021], + device='cuda:0'), covar=tensor([0.1554, 0.1760, 0.0394, 0.0785, 0.0717, 0.0901, 0.0961, 0.1797], + device='cuda:0'), in_proj_covar=tensor([0.0723, 0.0660, 0.0563, 0.0643, 0.0749, 0.0614, 0.0590, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:38:39,929 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:46,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 00:38:50,630 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:53,807 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.929e+02 3.753e+02 4.663e+02 1.529e+03, threshold=7.506e+02, percent-clipped=2.0 +2023-02-06 00:38:59,147 INFO [train.py:901] (0/4) Epoch 5, batch 2200, loss[loss=0.307, simple_loss=0.3545, pruned_loss=0.1297, over 8235.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3508, pruned_loss=0.1151, over 1611631.80 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:39:16,174 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6474, 3.7949, 2.0066, 2.4012, 2.6449, 1.6385, 2.2327, 2.8971], + device='cuda:0'), covar=tensor([0.1461, 0.0249, 0.1010, 0.0773, 0.0649, 0.1146, 0.1019, 0.0815], + device='cuda:0'), in_proj_covar=tensor([0.0363, 0.0253, 0.0323, 0.0315, 0.0326, 0.0314, 0.0346, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 00:39:32,422 INFO [train.py:901] (0/4) Epoch 5, batch 2250, loss[loss=0.2695, simple_loss=0.349, pruned_loss=0.09494, over 8448.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3516, pruned_loss=0.1161, over 1609791.05 frames. ], batch size: 27, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:39:37,138 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1990, 1.0532, 3.2728, 0.9272, 2.8626, 2.7452, 2.9084, 2.8265], + device='cuda:0'), covar=tensor([0.0475, 0.3287, 0.0513, 0.2559, 0.1185, 0.0715, 0.0577, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0323, 0.0488, 0.0399, 0.0410, 0.0481, 0.0397, 0.0400, 0.0441], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 00:39:52,216 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:39:59,486 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:02,640 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.996e+02 3.688e+02 4.883e+02 6.349e+02 4.437e+03, threshold=9.766e+02, percent-clipped=16.0 +2023-02-06 00:40:07,917 INFO [train.py:901] (0/4) Epoch 5, batch 2300, loss[loss=0.3418, simple_loss=0.3891, pruned_loss=0.1473, over 8344.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3535, pruned_loss=0.1174, over 1612962.01 frames. ], batch size: 26, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:12,715 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:16,079 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:34,977 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:42,256 INFO [train.py:901] (0/4) Epoch 5, batch 2350, loss[loss=0.2899, simple_loss=0.3599, pruned_loss=0.11, over 8518.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3541, pruned_loss=0.1175, over 1614244.30 frames. ], batch size: 49, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:51,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:11,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 3.189e+02 4.018e+02 4.942e+02 1.178e+03, threshold=8.036e+02, percent-clipped=1.0 +2023-02-06 00:41:12,928 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34728.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:41:16,110 INFO [train.py:901] (0/4) Epoch 5, batch 2400, loss[loss=0.3336, simple_loss=0.3913, pruned_loss=0.138, over 8572.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3533, pruned_loss=0.1176, over 1610855.19 frames. ], batch size: 31, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:41:47,413 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:51,214 INFO [train.py:901] (0/4) Epoch 5, batch 2450, loss[loss=0.2915, simple_loss=0.3427, pruned_loss=0.1201, over 8047.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3537, pruned_loss=0.1174, over 1613821.26 frames. ], batch size: 20, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:41:51,353 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7122, 1.4497, 5.7844, 2.1956, 5.0334, 4.8367, 5.2238, 5.1915], + device='cuda:0'), covar=tensor([0.0371, 0.3848, 0.0222, 0.2263, 0.0885, 0.0492, 0.0417, 0.0438], + device='cuda:0'), in_proj_covar=tensor([0.0316, 0.0488, 0.0400, 0.0406, 0.0474, 0.0395, 0.0396, 0.0436], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 00:41:55,099 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 00:42:04,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:19,453 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.991e+02 3.791e+02 4.954e+02 1.109e+03, threshold=7.583e+02, percent-clipped=3.0 +2023-02-06 00:42:24,144 INFO [train.py:901] (0/4) Epoch 5, batch 2500, loss[loss=0.1958, simple_loss=0.2628, pruned_loss=0.06436, over 7694.00 frames. ], tot_loss[loss=0.2934, simple_loss=0.3527, pruned_loss=0.117, over 1612122.88 frames. ], batch size: 18, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:43,923 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0784, 1.1301, 1.1030, 0.9011, 0.7296, 1.1468, 0.0160, 0.9630], + device='cuda:0'), covar=tensor([0.3352, 0.2381, 0.1421, 0.2476, 0.6424, 0.1113, 0.5518, 0.2263], + device='cuda:0'), in_proj_covar=tensor([0.0126, 0.0123, 0.0080, 0.0164, 0.0207, 0.0081, 0.0147, 0.0120], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:42:55,972 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:59,771 INFO [train.py:901] (0/4) Epoch 5, batch 2550, loss[loss=0.2348, simple_loss=0.3088, pruned_loss=0.08041, over 8234.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3536, pruned_loss=0.1178, over 1611398.45 frames. ], batch size: 22, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:13,472 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:43:29,141 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 2.947e+02 3.618e+02 4.736e+02 1.253e+03, threshold=7.237e+02, percent-clipped=4.0 +2023-02-06 00:43:33,913 INFO [train.py:901] (0/4) Epoch 5, batch 2600, loss[loss=0.3123, simple_loss=0.3718, pruned_loss=0.1264, over 7803.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.3555, pruned_loss=0.1193, over 1612276.61 frames. ], batch size: 20, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:49,004 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:00,259 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5460, 3.5575, 3.1771, 2.0397, 3.2128, 3.1154, 3.2867, 2.7988], + device='cuda:0'), covar=tensor([0.0909, 0.0680, 0.0950, 0.3514, 0.0733, 0.0829, 0.1186, 0.0804], + device='cuda:0'), in_proj_covar=tensor([0.0391, 0.0281, 0.0310, 0.0400, 0.0304, 0.0269, 0.0292, 0.0237], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 00:44:09,533 INFO [train.py:901] (0/4) Epoch 5, batch 2650, loss[loss=0.3211, simple_loss=0.3834, pruned_loss=0.1294, over 8544.00 frames. ], tot_loss[loss=0.2954, simple_loss=0.3544, pruned_loss=0.1182, over 1611975.68 frames. ], batch size: 39, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:44:10,272 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:13,017 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:33,030 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 00:44:39,302 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.922e+02 3.827e+02 4.980e+02 8.274e+02, threshold=7.654e+02, percent-clipped=5.0 +2023-02-06 00:44:43,764 INFO [train.py:901] (0/4) Epoch 5, batch 2700, loss[loss=0.3251, simple_loss=0.3811, pruned_loss=0.1345, over 8125.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3538, pruned_loss=0.1176, over 1612460.22 frames. ], batch size: 22, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:45:09,474 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:10,729 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35072.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:45:18,332 INFO [train.py:901] (0/4) Epoch 5, batch 2750, loss[loss=0.347, simple_loss=0.3993, pruned_loss=0.1474, over 8491.00 frames. ], tot_loss[loss=0.2934, simple_loss=0.353, pruned_loss=0.1168, over 1613582.54 frames. ], batch size: 28, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:45:29,743 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:32,506 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:37,935 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:48,687 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.004e+02 3.039e+02 3.659e+02 5.251e+02 1.248e+03, threshold=7.317e+02, percent-clipped=8.0 +2023-02-06 00:45:53,667 INFO [train.py:901] (0/4) Epoch 5, batch 2800, loss[loss=0.3071, simple_loss=0.36, pruned_loss=0.1271, over 7798.00 frames. ], tot_loss[loss=0.295, simple_loss=0.3549, pruned_loss=0.1176, over 1618443.51 frames. ], batch size: 20, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:27,512 INFO [train.py:901] (0/4) Epoch 5, batch 2850, loss[loss=0.3283, simple_loss=0.3766, pruned_loss=0.14, over 8649.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3551, pruned_loss=0.1183, over 1620896.80 frames. ], batch size: 39, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:30,504 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35187.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:46:58,368 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.990e+02 3.598e+02 4.675e+02 1.498e+03, threshold=7.197e+02, percent-clipped=4.0 +2023-02-06 00:47:03,662 INFO [train.py:901] (0/4) Epoch 5, batch 2900, loss[loss=0.3421, simple_loss=0.3809, pruned_loss=0.1516, over 7916.00 frames. ], tot_loss[loss=0.2969, simple_loss=0.3562, pruned_loss=0.1188, over 1621784.31 frames. ], batch size: 20, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:16,753 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.05 vs. limit=5.0 +2023-02-06 00:47:36,540 INFO [train.py:901] (0/4) Epoch 5, batch 2950, loss[loss=0.3347, simple_loss=0.3926, pruned_loss=0.1383, over 8570.00 frames. ], tot_loss[loss=0.2956, simple_loss=0.3554, pruned_loss=0.1179, over 1622430.94 frames. ], batch size: 39, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:41,876 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 00:48:06,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 3.185e+02 3.825e+02 4.988e+02 1.295e+03, threshold=7.649e+02, percent-clipped=4.0 +2023-02-06 00:48:07,075 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:12,107 INFO [train.py:901] (0/4) Epoch 5, batch 3000, loss[loss=0.2249, simple_loss=0.2931, pruned_loss=0.07832, over 7648.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.3556, pruned_loss=0.1184, over 1621384.04 frames. ], batch size: 19, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:48:12,108 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 00:48:25,509 INFO [train.py:935] (0/4) Epoch 5, validation: loss=0.2228, simple_loss=0.319, pruned_loss=0.0633, over 944034.00 frames. +2023-02-06 00:48:25,510 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 00:48:39,249 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:41,982 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:44,687 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:47,325 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35363.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:48:59,229 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:01,090 INFO [train.py:901] (0/4) Epoch 5, batch 3050, loss[loss=0.2717, simple_loss=0.3412, pruned_loss=0.1011, over 7964.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3552, pruned_loss=0.1179, over 1620152.36 frames. ], batch size: 21, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:01,941 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:07,184 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:20,088 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5897, 2.2652, 4.5210, 1.1839, 2.8129, 2.0281, 1.5625, 2.4734], + device='cuda:0'), covar=tensor([0.1573, 0.1778, 0.0696, 0.3123, 0.1470, 0.2469, 0.1466, 0.2453], + device='cuda:0'), in_proj_covar=tensor([0.0473, 0.0448, 0.0531, 0.0535, 0.0577, 0.0515, 0.0455, 0.0589], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:49:29,664 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.238e+02 3.010e+02 3.735e+02 4.816e+02 9.592e+02, threshold=7.471e+02, percent-clipped=3.0 +2023-02-06 00:49:34,188 INFO [train.py:901] (0/4) Epoch 5, batch 3100, loss[loss=0.2633, simple_loss=0.3239, pruned_loss=0.1013, over 7799.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.354, pruned_loss=0.1171, over 1619599.19 frames. ], batch size: 19, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:41,030 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35443.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:49:48,035 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35454.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:59,629 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35468.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:50:09,986 INFO [train.py:901] (0/4) Epoch 5, batch 3150, loss[loss=0.2885, simple_loss=0.3604, pruned_loss=0.1083, over 8503.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.354, pruned_loss=0.1166, over 1620899.80 frames. ], batch size: 26, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:50:10,784 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0414, 2.8307, 2.7009, 1.4816, 2.7357, 2.6685, 2.7976, 2.4106], + device='cuda:0'), covar=tensor([0.1176, 0.0879, 0.1091, 0.4552, 0.0988, 0.1212, 0.1293, 0.1132], + device='cuda:0'), in_proj_covar=tensor([0.0392, 0.0283, 0.0318, 0.0407, 0.0310, 0.0273, 0.0297, 0.0242], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 00:50:39,622 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.108e+02 3.249e+02 4.087e+02 5.030e+02 9.472e+02, threshold=8.174e+02, percent-clipped=3.0 +2023-02-06 00:50:44,418 INFO [train.py:901] (0/4) Epoch 5, batch 3200, loss[loss=0.2616, simple_loss=0.3261, pruned_loss=0.09851, over 8255.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3541, pruned_loss=0.1171, over 1617383.52 frames. ], batch size: 22, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:50:54,830 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:09,455 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:20,160 INFO [train.py:901] (0/4) Epoch 5, batch 3250, loss[loss=0.3049, simple_loss=0.3716, pruned_loss=0.1191, over 8451.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3559, pruned_loss=0.1177, over 1621355.46 frames. ], batch size: 27, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:51:26,857 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 00:51:50,492 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.378e+02 4.149e+02 5.121e+02 1.146e+03, threshold=8.298e+02, percent-clipped=3.0 +2023-02-06 00:51:55,280 INFO [train.py:901] (0/4) Epoch 5, batch 3300, loss[loss=0.2462, simple_loss=0.3096, pruned_loss=0.09136, over 7698.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3529, pruned_loss=0.1158, over 1617429.18 frames. ], batch size: 18, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:52:30,152 INFO [train.py:901] (0/4) Epoch 5, batch 3350, loss[loss=0.3105, simple_loss=0.3556, pruned_loss=0.1327, over 7812.00 frames. ], tot_loss[loss=0.2929, simple_loss=0.3533, pruned_loss=0.1162, over 1621556.89 frames. ], batch size: 20, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:52:47,810 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35707.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:53:01,460 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.966e+02 3.555e+02 4.125e+02 4.946e+02 1.065e+03, threshold=8.250e+02, percent-clipped=5.0 +2023-02-06 00:53:06,222 INFO [train.py:901] (0/4) Epoch 5, batch 3400, loss[loss=0.2238, simple_loss=0.2903, pruned_loss=0.07865, over 8244.00 frames. ], tot_loss[loss=0.2926, simple_loss=0.353, pruned_loss=0.1161, over 1617918.54 frames. ], batch size: 22, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:08,383 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:20,098 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6538, 2.1730, 4.3325, 1.1794, 2.9043, 2.2805, 1.5192, 2.3613], + device='cuda:0'), covar=tensor([0.1517, 0.2066, 0.0669, 0.3303, 0.1395, 0.2233, 0.1542, 0.2638], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0443, 0.0523, 0.0531, 0.0573, 0.0512, 0.0446, 0.0590], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:53:39,717 INFO [train.py:901] (0/4) Epoch 5, batch 3450, loss[loss=0.2525, simple_loss=0.3159, pruned_loss=0.09453, over 8090.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3533, pruned_loss=0.1164, over 1614052.11 frames. ], batch size: 21, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:43,879 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:45,410 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 00:54:01,268 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1864, 1.8306, 3.0702, 1.5940, 2.4386, 3.4502, 3.2495, 2.9742], + device='cuda:0'), covar=tensor([0.0829, 0.1208, 0.0490, 0.1669, 0.0742, 0.0235, 0.0398, 0.0578], + device='cuda:0'), in_proj_covar=tensor([0.0227, 0.0262, 0.0218, 0.0259, 0.0216, 0.0195, 0.0224, 0.0268], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 00:54:07,864 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35822.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:54:09,943 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:10,372 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.123e+02 3.051e+02 3.738e+02 4.571e+02 6.690e+02, threshold=7.475e+02, percent-clipped=0.0 +2023-02-06 00:54:12,575 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35829.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:15,092 INFO [train.py:901] (0/4) Epoch 5, batch 3500, loss[loss=0.2876, simple_loss=0.3592, pruned_loss=0.1081, over 8095.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3547, pruned_loss=0.1172, over 1616912.45 frames. ], batch size: 23, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:27,016 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:27,674 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:40,698 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 00:54:40,871 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6805, 2.9911, 2.4954, 3.9891, 2.0588, 2.0929, 2.3158, 3.2330], + device='cuda:0'), covar=tensor([0.1023, 0.1270, 0.1379, 0.0320, 0.1782, 0.2023, 0.2081, 0.1158], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0267, 0.0287, 0.0223, 0.0252, 0.0277, 0.0290, 0.0264], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 00:54:48,819 INFO [train.py:901] (0/4) Epoch 5, batch 3550, loss[loss=0.2502, simple_loss=0.3061, pruned_loss=0.09713, over 7528.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3541, pruned_loss=0.117, over 1614014.35 frames. ], batch size: 18, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:54,912 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:55:16,695 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 00:55:19,528 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.232e+02 3.318e+02 3.882e+02 4.908e+02 1.221e+03, threshold=7.763e+02, percent-clipped=6.0 +2023-02-06 00:55:23,985 INFO [train.py:901] (0/4) Epoch 5, batch 3600, loss[loss=0.3276, simple_loss=0.376, pruned_loss=0.1396, over 8098.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3541, pruned_loss=0.1175, over 1613236.35 frames. ], batch size: 23, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:55:57,695 INFO [train.py:901] (0/4) Epoch 5, batch 3650, loss[loss=0.2961, simple_loss=0.3553, pruned_loss=0.1185, over 8791.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3542, pruned_loss=0.1175, over 1611813.30 frames. ], batch size: 30, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:56:08,724 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4695, 2.1479, 3.0199, 2.5849, 2.6316, 1.9935, 1.5275, 1.6436], + device='cuda:0'), covar=tensor([0.1321, 0.1526, 0.0386, 0.0765, 0.0660, 0.0866, 0.0915, 0.1479], + device='cuda:0'), in_proj_covar=tensor([0.0730, 0.0668, 0.0566, 0.0651, 0.0759, 0.0626, 0.0601, 0.0621], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 00:56:09,201 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-36000.pt +2023-02-06 00:56:15,053 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:56:27,680 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.237e+02 3.345e+02 4.197e+02 5.280e+02 9.599e+02, threshold=8.394e+02, percent-clipped=10.0 +2023-02-06 00:56:28,834 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.27 vs. limit=5.0 +2023-02-06 00:56:32,338 INFO [train.py:901] (0/4) Epoch 5, batch 3700, loss[loss=0.2847, simple_loss=0.3498, pruned_loss=0.1098, over 8444.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3533, pruned_loss=0.1166, over 1609001.93 frames. ], batch size: 27, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:56:40,798 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 00:57:04,797 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36078.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:57:07,920 INFO [train.py:901] (0/4) Epoch 5, batch 3750, loss[loss=0.2431, simple_loss=0.3151, pruned_loss=0.08553, over 7808.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3539, pruned_loss=0.117, over 1611516.32 frames. ], batch size: 20, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:57:14,130 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5154, 1.9997, 3.9330, 1.1196, 2.4432, 1.6187, 1.6447, 2.2756], + device='cuda:0'), covar=tensor([0.2149, 0.2549, 0.0721, 0.4170, 0.1877, 0.3286, 0.2182, 0.2807], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0449, 0.0521, 0.0538, 0.0574, 0.0521, 0.0453, 0.0586], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 00:57:21,376 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36103.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:57:24,147 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:37,183 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 3.033e+02 3.704e+02 4.599e+02 1.470e+03, threshold=7.408e+02, percent-clipped=9.0 +2023-02-06 00:57:40,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:41,236 INFO [train.py:901] (0/4) Epoch 5, batch 3800, loss[loss=0.2868, simple_loss=0.3464, pruned_loss=0.1136, over 8610.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.3535, pruned_loss=0.1165, over 1610505.64 frames. ], batch size: 31, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:57:41,310 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:05,376 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2060, 1.3605, 2.2554, 0.9569, 2.2362, 2.4638, 2.4022, 2.0710], + device='cuda:0'), covar=tensor([0.1020, 0.1026, 0.0474, 0.1923, 0.0487, 0.0384, 0.0543, 0.0761], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0267, 0.0224, 0.0262, 0.0222, 0.0199, 0.0231, 0.0271], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 00:58:09,320 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:15,710 INFO [train.py:901] (0/4) Epoch 5, batch 3850, loss[loss=0.2654, simple_loss=0.3204, pruned_loss=0.1052, over 7558.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3539, pruned_loss=0.1176, over 1606480.57 frames. ], batch size: 18, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:27,209 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:41,068 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 00:58:45,745 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 3.284e+02 4.097e+02 5.243e+02 1.380e+03, threshold=8.194e+02, percent-clipped=10.0 +2023-02-06 00:58:49,706 INFO [train.py:901] (0/4) Epoch 5, batch 3900, loss[loss=0.252, simple_loss=0.3297, pruned_loss=0.08714, over 8299.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3532, pruned_loss=0.1162, over 1610584.43 frames. ], batch size: 23, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:59,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:02,912 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8098, 1.3912, 5.8426, 2.0995, 5.1742, 4.9633, 5.3428, 5.3194], + device='cuda:0'), covar=tensor([0.0320, 0.3786, 0.0170, 0.2165, 0.0704, 0.0436, 0.0304, 0.0345], + device='cuda:0'), in_proj_covar=tensor([0.0336, 0.0498, 0.0415, 0.0420, 0.0491, 0.0407, 0.0398, 0.0451], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 00:59:09,628 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:24,818 INFO [train.py:901] (0/4) Epoch 5, batch 3950, loss[loss=0.2956, simple_loss=0.3581, pruned_loss=0.1165, over 8558.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3499, pruned_loss=0.1145, over 1606296.37 frames. ], batch size: 31, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:59:28,389 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:28,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:54,576 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.894e+02 2.959e+02 3.540e+02 4.519e+02 1.633e+03, threshold=7.079e+02, percent-clipped=6.0 +2023-02-06 00:59:58,379 INFO [train.py:901] (0/4) Epoch 5, batch 4000, loss[loss=0.2699, simple_loss=0.3232, pruned_loss=0.1083, over 7807.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3512, pruned_loss=0.1157, over 1611216.18 frames. ], batch size: 20, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:00:22,602 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6906, 5.7973, 4.8943, 2.4581, 4.9799, 5.3557, 5.3407, 4.8632], + device='cuda:0'), covar=tensor([0.0735, 0.0422, 0.0873, 0.4118, 0.0608, 0.0726, 0.1097, 0.0659], + device='cuda:0'), in_proj_covar=tensor([0.0387, 0.0280, 0.0314, 0.0399, 0.0312, 0.0270, 0.0298, 0.0241], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:0') +2023-02-06 01:00:31,743 INFO [train.py:901] (0/4) Epoch 5, batch 4050, loss[loss=0.2543, simple_loss=0.3301, pruned_loss=0.08919, over 8288.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.352, pruned_loss=0.1162, over 1616164.57 frames. ], batch size: 23, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:03,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 3.036e+02 3.577e+02 4.439e+02 7.437e+02, threshold=7.154e+02, percent-clipped=1.0 +2023-02-06 01:01:07,958 INFO [train.py:901] (0/4) Epoch 5, batch 4100, loss[loss=0.2468, simple_loss=0.3061, pruned_loss=0.09373, over 7783.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.3505, pruned_loss=0.1149, over 1616473.55 frames. ], batch size: 19, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:41,908 INFO [train.py:901] (0/4) Epoch 5, batch 4150, loss[loss=0.3142, simple_loss=0.3783, pruned_loss=0.125, over 8503.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3509, pruned_loss=0.1147, over 1620716.74 frames. ], batch size: 28, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:56,697 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:13,616 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.751e+02 3.740e+02 4.679e+02 9.033e+02, threshold=7.480e+02, percent-clipped=3.0 +2023-02-06 01:02:15,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:17,751 INFO [train.py:901] (0/4) Epoch 5, batch 4200, loss[loss=0.2929, simple_loss=0.365, pruned_loss=0.1104, over 8638.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3499, pruned_loss=0.114, over 1617021.24 frames. ], batch size: 34, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:02:24,845 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:25,690 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36544.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:43,322 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 01:02:43,535 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:52,975 INFO [train.py:901] (0/4) Epoch 5, batch 4250, loss[loss=0.2801, simple_loss=0.3512, pruned_loss=0.1045, over 7969.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3506, pruned_loss=0.1145, over 1616698.65 frames. ], batch size: 21, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:05,722 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 01:03:20,914 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,291 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,819 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.959e+02 3.120e+02 3.802e+02 4.654e+02 9.583e+02, threshold=7.605e+02, percent-clipped=3.0 +2023-02-06 01:03:27,126 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:28,984 INFO [train.py:901] (0/4) Epoch 5, batch 4300, loss[loss=0.3352, simple_loss=0.3933, pruned_loss=0.1386, over 8616.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3502, pruned_loss=0.1144, over 1610724.12 frames. ], batch size: 39, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:47,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:04:05,007 INFO [train.py:901] (0/4) Epoch 5, batch 4350, loss[loss=0.2603, simple_loss=0.3323, pruned_loss=0.09418, over 7650.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3498, pruned_loss=0.1137, over 1611752.67 frames. ], batch size: 19, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:04:28,892 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36718.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:04:34,838 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.000e+02 3.265e+02 4.032e+02 4.973e+02 1.053e+03, threshold=8.064e+02, percent-clipped=5.0 +2023-02-06 01:04:36,272 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 01:04:38,962 INFO [train.py:901] (0/4) Epoch 5, batch 4400, loss[loss=0.2821, simple_loss=0.3512, pruned_loss=0.1065, over 8446.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.351, pruned_loss=0.1146, over 1610354.11 frames. ], batch size: 27, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:15,305 INFO [train.py:901] (0/4) Epoch 5, batch 4450, loss[loss=0.2948, simple_loss=0.3619, pruned_loss=0.1139, over 8316.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3514, pruned_loss=0.115, over 1612627.32 frames. ], batch size: 25, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:18,095 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 01:05:43,210 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36823.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:05:45,712 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 3.029e+02 3.648e+02 4.687e+02 9.435e+02, threshold=7.296e+02, percent-clipped=4.0 +2023-02-06 01:05:49,707 INFO [train.py:901] (0/4) Epoch 5, batch 4500, loss[loss=0.3179, simple_loss=0.3669, pruned_loss=0.1344, over 8682.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3489, pruned_loss=0.1139, over 1609490.54 frames. ], batch size: 39, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:15,008 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 01:06:25,784 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 01:06:26,116 INFO [train.py:901] (0/4) Epoch 5, batch 4550, loss[loss=0.2288, simple_loss=0.2922, pruned_loss=0.08267, over 7539.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.349, pruned_loss=0.1138, over 1606410.36 frames. ], batch size: 18, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:47,949 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36914.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:06:56,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 3.174e+02 3.779e+02 4.790e+02 8.988e+02, threshold=7.559e+02, percent-clipped=4.0 +2023-02-06 01:06:58,101 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:00,568 INFO [train.py:901] (0/4) Epoch 5, batch 4600, loss[loss=0.3309, simple_loss=0.3745, pruned_loss=0.1437, over 6719.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3493, pruned_loss=0.1141, over 1609144.86 frames. ], batch size: 71, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:07:04,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:23,544 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:25,626 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36970.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:29,033 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:30,326 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:35,634 INFO [train.py:901] (0/4) Epoch 5, batch 4650, loss[loss=0.2597, simple_loss=0.3285, pruned_loss=0.09549, over 7537.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3482, pruned_loss=0.1138, over 1609596.61 frames. ], batch size: 18, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:07:53,422 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3058, 1.5353, 1.2765, 1.9301, 0.8285, 1.2181, 1.2620, 1.4419], + device='cuda:0'), covar=tensor([0.1233, 0.1209, 0.1642, 0.0623, 0.1559, 0.2002, 0.1155, 0.1030], + device='cuda:0'), in_proj_covar=tensor([0.0275, 0.0269, 0.0294, 0.0227, 0.0259, 0.0291, 0.0289, 0.0266], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:08:02,858 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:06,068 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 3.207e+02 3.974e+02 5.163e+02 9.904e+02, threshold=7.949e+02, percent-clipped=4.0 +2023-02-06 01:08:10,729 INFO [train.py:901] (0/4) Epoch 5, batch 4700, loss[loss=0.2312, simple_loss=0.2985, pruned_loss=0.08198, over 7812.00 frames. ], tot_loss[loss=0.2871, simple_loss=0.3472, pruned_loss=0.1135, over 1603049.21 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:16,882 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6412, 1.1515, 1.3785, 1.0578, 1.0221, 1.2083, 1.3157, 1.2841], + device='cuda:0'), covar=tensor([0.0613, 0.1427, 0.1864, 0.1596, 0.0637, 0.1679, 0.0797, 0.0590], + device='cuda:0'), in_proj_covar=tensor([0.0138, 0.0181, 0.0222, 0.0183, 0.0132, 0.0191, 0.0147, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 01:08:29,882 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37062.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:08:43,582 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:44,112 INFO [train.py:901] (0/4) Epoch 5, batch 4750, loss[loss=0.2275, simple_loss=0.301, pruned_loss=0.07695, over 8234.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.3484, pruned_loss=0.1141, over 1606065.66 frames. ], batch size: 22, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:45,690 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:49,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:09:08,545 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2080, 3.1101, 2.9177, 1.5452, 2.8767, 2.8234, 2.9328, 2.5916], + device='cuda:0'), covar=tensor([0.1208, 0.0766, 0.1087, 0.4497, 0.1036, 0.0975, 0.1349, 0.0960], + device='cuda:0'), in_proj_covar=tensor([0.0399, 0.0290, 0.0324, 0.0406, 0.0323, 0.0270, 0.0305, 0.0247], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:09:15,976 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 3.010e+02 3.846e+02 4.879e+02 1.523e+03, threshold=7.692e+02, percent-clipped=5.0 +2023-02-06 01:09:17,396 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 01:09:20,173 INFO [train.py:901] (0/4) Epoch 5, batch 4800, loss[loss=0.2837, simple_loss=0.3549, pruned_loss=0.1062, over 8361.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3482, pruned_loss=0.1135, over 1610309.96 frames. ], batch size: 24, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:09:20,179 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 01:09:44,350 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37167.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:09:51,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37177.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:09:51,920 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2380, 1.3277, 2.1241, 1.0883, 2.0886, 2.2879, 2.2824, 1.9662], + device='cuda:0'), covar=tensor([0.1049, 0.1143, 0.0612, 0.1871, 0.0598, 0.0412, 0.0563, 0.0800], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0270, 0.0225, 0.0262, 0.0225, 0.0198, 0.0231, 0.0273], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 01:09:55,225 INFO [train.py:901] (0/4) Epoch 5, batch 4850, loss[loss=0.2646, simple_loss=0.3226, pruned_loss=0.1033, over 7970.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3481, pruned_loss=0.1132, over 1609988.45 frames. ], batch size: 21, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:10,661 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 01:10:12,992 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.01 vs. limit=2.0 +2023-02-06 01:10:27,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.956e+02 3.581e+02 4.871e+02 1.087e+03, threshold=7.163e+02, percent-clipped=6.0 +2023-02-06 01:10:31,481 INFO [train.py:901] (0/4) Epoch 5, batch 4900, loss[loss=0.2668, simple_loss=0.3447, pruned_loss=0.09444, over 8448.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.3483, pruned_loss=0.113, over 1612761.51 frames. ], batch size: 27, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:59,372 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:05,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37282.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:11:06,080 INFO [train.py:901] (0/4) Epoch 5, batch 4950, loss[loss=0.2733, simple_loss=0.3419, pruned_loss=0.1024, over 8470.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3475, pruned_loss=0.112, over 1611177.46 frames. ], batch size: 28, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:08,182 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:17,614 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1775, 1.0983, 1.1350, 1.0937, 0.7728, 1.1597, 0.0280, 0.8678], + device='cuda:0'), covar=tensor([0.3440, 0.2357, 0.1179, 0.2065, 0.6705, 0.1062, 0.5514, 0.2055], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0124, 0.0078, 0.0173, 0.0209, 0.0082, 0.0150, 0.0126], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:11:31,041 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:35,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 3.052e+02 3.616e+02 4.696e+02 1.143e+03, threshold=7.231e+02, percent-clipped=5.0 +2023-02-06 01:11:40,307 INFO [train.py:901] (0/4) Epoch 5, batch 5000, loss[loss=0.2695, simple_loss=0.3245, pruned_loss=0.1073, over 7201.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3474, pruned_loss=0.1123, over 1608064.85 frames. ], batch size: 16, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:43,908 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:45,908 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:49,228 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:55,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7729, 2.9550, 3.0444, 1.9314, 1.3914, 3.1781, 0.4620, 1.6937], + device='cuda:0'), covar=tensor([0.2442, 0.1252, 0.0960, 0.3851, 0.6393, 0.0539, 0.7023, 0.3071], + device='cuda:0'), in_proj_covar=tensor([0.0130, 0.0125, 0.0078, 0.0174, 0.0209, 0.0082, 0.0150, 0.0126], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:12:01,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:02,610 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2286, 1.4495, 1.2316, 1.8703, 0.9248, 1.0694, 1.2225, 1.4208], + device='cuda:0'), covar=tensor([0.1201, 0.1209, 0.1556, 0.0746, 0.1739, 0.2390, 0.1397, 0.1057], + device='cuda:0'), in_proj_covar=tensor([0.0273, 0.0267, 0.0291, 0.0229, 0.0259, 0.0289, 0.0294, 0.0265], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:12:03,189 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,304 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:05,923 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:13,775 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:15,009 INFO [train.py:901] (0/4) Epoch 5, batch 5050, loss[loss=0.2849, simple_loss=0.3565, pruned_loss=0.1066, over 8323.00 frames. ], tot_loss[loss=0.2871, simple_loss=0.349, pruned_loss=0.1126, over 1615362.93 frames. ], batch size: 25, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:18,395 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:44,032 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7859, 1.8657, 2.1764, 1.6042, 0.9401, 2.1842, 0.3874, 1.0659], + device='cuda:0'), covar=tensor([0.2991, 0.2543, 0.0860, 0.2560, 0.6281, 0.0687, 0.5661, 0.2547], + device='cuda:0'), in_proj_covar=tensor([0.0133, 0.0127, 0.0079, 0.0175, 0.0211, 0.0083, 0.0151, 0.0128], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:12:44,482 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 3.496e+02 4.122e+02 5.072e+02 9.522e+02, threshold=8.245e+02, percent-clipped=6.0 +2023-02-06 01:12:48,495 INFO [train.py:901] (0/4) Epoch 5, batch 5100, loss[loss=0.2302, simple_loss=0.2937, pruned_loss=0.08336, over 7438.00 frames. ], tot_loss[loss=0.2866, simple_loss=0.3487, pruned_loss=0.1122, over 1612339.75 frames. ], batch size: 17, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:48,506 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 01:12:48,711 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37433.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:12:49,914 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:53,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1100, 1.0823, 1.1098, 1.1127, 0.7794, 1.2253, 0.0347, 0.8986], + device='cuda:0'), covar=tensor([0.3177, 0.2677, 0.1176, 0.1999, 0.6322, 0.0859, 0.5069, 0.2162], + device='cuda:0'), in_proj_covar=tensor([0.0132, 0.0127, 0.0079, 0.0174, 0.0210, 0.0083, 0.0151, 0.0127], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:13:06,528 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:13:15,615 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6922, 1.2012, 3.8233, 1.3591, 3.2757, 3.2103, 3.4242, 3.3869], + device='cuda:0'), covar=tensor([0.0480, 0.3517, 0.0425, 0.2638, 0.1109, 0.0646, 0.0530, 0.0599], + device='cuda:0'), in_proj_covar=tensor([0.0336, 0.0497, 0.0417, 0.0421, 0.0487, 0.0411, 0.0404, 0.0453], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 01:13:16,275 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8620, 1.5181, 3.0598, 1.2526, 2.2276, 3.2931, 3.2014, 2.7866], + device='cuda:0'), covar=tensor([0.0958, 0.1401, 0.0406, 0.1997, 0.0673, 0.0275, 0.0518, 0.0716], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0277, 0.0228, 0.0266, 0.0227, 0.0200, 0.0233, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 01:13:22,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:23,387 INFO [train.py:901] (0/4) Epoch 5, batch 5150, loss[loss=0.2796, simple_loss=0.3442, pruned_loss=0.1075, over 8027.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3501, pruned_loss=0.1133, over 1609955.77 frames. ], batch size: 22, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:13:28,193 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 01:13:33,446 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7256, 3.1723, 2.4446, 4.0805, 2.0552, 2.1512, 2.3036, 3.1756], + device='cuda:0'), covar=tensor([0.0859, 0.1030, 0.1305, 0.0256, 0.1451, 0.1917, 0.1623, 0.1037], + device='cuda:0'), in_proj_covar=tensor([0.0274, 0.0265, 0.0292, 0.0227, 0.0255, 0.0291, 0.0291, 0.0264], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:13:53,498 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.138e+02 3.879e+02 5.454e+02 1.167e+03, threshold=7.757e+02, percent-clipped=4.0 +2023-02-06 01:13:57,560 INFO [train.py:901] (0/4) Epoch 5, batch 5200, loss[loss=0.2794, simple_loss=0.3418, pruned_loss=0.1085, over 8247.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.3489, pruned_loss=0.1128, over 1610070.55 frames. ], batch size: 24, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:01,122 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37538.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:14:19,384 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37563.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:14:33,081 INFO [train.py:901] (0/4) Epoch 5, batch 5250, loss[loss=0.2854, simple_loss=0.3319, pruned_loss=0.1194, over 7707.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3489, pruned_loss=0.1128, over 1610278.22 frames. ], batch size: 18, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:45,217 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 01:14:57,683 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9531, 2.4649, 1.8912, 3.0641, 1.5804, 1.8072, 1.8789, 2.5215], + device='cuda:0'), covar=tensor([0.1102, 0.1121, 0.1388, 0.0394, 0.1463, 0.1876, 0.1687, 0.1083], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0260, 0.0284, 0.0220, 0.0250, 0.0281, 0.0287, 0.0259], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:15:03,493 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 3.205e+02 3.781e+02 5.298e+02 9.083e+02, threshold=7.562e+02, percent-clipped=4.0 +2023-02-06 01:15:05,547 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:07,536 INFO [train.py:901] (0/4) Epoch 5, batch 5300, loss[loss=0.337, simple_loss=0.387, pruned_loss=0.1434, over 8598.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.3493, pruned_loss=0.1134, over 1607796.66 frames. ], batch size: 34, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:15,186 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:32,580 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:42,448 INFO [train.py:901] (0/4) Epoch 5, batch 5350, loss[loss=0.2381, simple_loss=0.3079, pruned_loss=0.08416, over 7715.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3493, pruned_loss=0.1134, over 1610320.70 frames. ], batch size: 18, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:47,815 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:05,060 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:11,801 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:12,347 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 3.472e+02 4.206e+02 5.536e+02 1.524e+03, threshold=8.412e+02, percent-clipped=7.0 +2023-02-06 01:16:17,026 INFO [train.py:901] (0/4) Epoch 5, batch 5400, loss[loss=0.2908, simple_loss=0.353, pruned_loss=0.1143, over 8037.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.349, pruned_loss=0.1137, over 1609855.06 frames. ], batch size: 22, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:16:19,889 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:25,206 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:36,759 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:50,837 INFO [train.py:901] (0/4) Epoch 5, batch 5450, loss[loss=0.2717, simple_loss=0.3464, pruned_loss=0.09857, over 8255.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3501, pruned_loss=0.1143, over 1608398.29 frames. ], batch size: 24, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:22,465 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 2.936e+02 3.882e+02 5.021e+02 1.156e+03, threshold=7.764e+02, percent-clipped=3.0 +2023-02-06 01:17:23,290 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9336, 2.3741, 4.8327, 1.3257, 3.1261, 2.5816, 1.9373, 2.5685], + device='cuda:0'), covar=tensor([0.1227, 0.1620, 0.0528, 0.2892, 0.1334, 0.1887, 0.1239, 0.2331], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0439, 0.0521, 0.0530, 0.0577, 0.0507, 0.0442, 0.0586], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 01:17:26,495 INFO [train.py:901] (0/4) Epoch 5, batch 5500, loss[loss=0.2948, simple_loss=0.3545, pruned_loss=0.1176, over 7972.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3501, pruned_loss=0.1145, over 1609222.38 frames. ], batch size: 21, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:30,479 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 01:17:31,993 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:18:00,735 INFO [train.py:901] (0/4) Epoch 5, batch 5550, loss[loss=0.2516, simple_loss=0.3144, pruned_loss=0.09438, over 7815.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.3492, pruned_loss=0.1137, over 1605586.76 frames. ], batch size: 20, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:13,690 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4322, 1.9340, 2.1371, 0.9370, 2.1242, 1.3455, 0.5072, 1.7164], + device='cuda:0'), covar=tensor([0.0244, 0.0104, 0.0063, 0.0190, 0.0127, 0.0345, 0.0340, 0.0092], + device='cuda:0'), in_proj_covar=tensor([0.0313, 0.0225, 0.0183, 0.0267, 0.0215, 0.0362, 0.0284, 0.0258], + device='cuda:0'), out_proj_covar=tensor([1.1242e-04, 7.8658e-05, 6.2861e-05, 9.3047e-05, 7.7154e-05, 1.3821e-04, + 1.0211e-04, 9.0575e-05], device='cuda:0') +2023-02-06 01:18:19,135 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1349, 2.0122, 1.9559, 1.8361, 1.7189, 1.7770, 2.7393, 2.4545], + device='cuda:0'), covar=tensor([0.0784, 0.1891, 0.2809, 0.1872, 0.0768, 0.2324, 0.0820, 0.0727], + device='cuda:0'), in_proj_covar=tensor([0.0136, 0.0180, 0.0219, 0.0181, 0.0131, 0.0191, 0.0146, 0.0154], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 01:18:32,126 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 3.124e+02 3.941e+02 5.093e+02 9.977e+02, threshold=7.882e+02, percent-clipped=4.0 +2023-02-06 01:18:36,192 INFO [train.py:901] (0/4) Epoch 5, batch 5600, loss[loss=0.321, simple_loss=0.3768, pruned_loss=0.1326, over 8615.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.3491, pruned_loss=0.1136, over 1607697.60 frames. ], batch size: 31, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:53,364 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 01:19:10,750 INFO [train.py:901] (0/4) Epoch 5, batch 5650, loss[loss=0.3041, simple_loss=0.364, pruned_loss=0.1221, over 8340.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3495, pruned_loss=0.1137, over 1607209.59 frames. ], batch size: 25, lr: 1.47e-02, grad_scale: 4.0 +2023-02-06 01:19:20,215 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37997.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:22,188 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-38000.pt +2023-02-06 01:19:23,901 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:34,405 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 01:19:41,416 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:42,548 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 3.005e+02 3.717e+02 4.758e+02 1.120e+03, threshold=7.434e+02, percent-clipped=3.0 +2023-02-06 01:19:45,906 INFO [train.py:901] (0/4) Epoch 5, batch 5700, loss[loss=0.246, simple_loss=0.318, pruned_loss=0.08707, over 8517.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.3489, pruned_loss=0.1131, over 1611830.00 frames. ], batch size: 26, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:20,747 INFO [train.py:901] (0/4) Epoch 5, batch 5750, loss[loss=0.2016, simple_loss=0.2717, pruned_loss=0.06571, over 7426.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.3509, pruned_loss=0.1144, over 1610963.26 frames. ], batch size: 17, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:27,208 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.17 vs. limit=5.0 +2023-02-06 01:20:30,247 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:37,193 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 01:20:46,788 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:50,577 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.053e+02 3.876e+02 4.925e+02 1.023e+03, threshold=7.752e+02, percent-clipped=4.0 +2023-02-06 01:20:54,530 INFO [train.py:901] (0/4) Epoch 5, batch 5800, loss[loss=0.2537, simple_loss=0.323, pruned_loss=0.09223, over 7804.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3504, pruned_loss=0.1141, over 1610615.09 frames. ], batch size: 20, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:21:03,327 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6319, 1.5626, 3.0408, 1.1990, 2.1526, 3.2439, 3.3117, 2.8220], + device='cuda:0'), covar=tensor([0.1075, 0.1289, 0.0373, 0.1883, 0.0707, 0.0298, 0.0401, 0.0679], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0269, 0.0225, 0.0263, 0.0225, 0.0201, 0.0232, 0.0277], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 01:21:29,412 INFO [train.py:901] (0/4) Epoch 5, batch 5850, loss[loss=0.2997, simple_loss=0.3562, pruned_loss=0.1216, over 7820.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3502, pruned_loss=0.1144, over 1607733.63 frames. ], batch size: 20, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:01,285 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.005e+02 3.016e+02 3.759e+02 4.889e+02 1.185e+03, threshold=7.518e+02, percent-clipped=2.0 +2023-02-06 01:22:04,817 INFO [train.py:901] (0/4) Epoch 5, batch 5900, loss[loss=0.2752, simple_loss=0.3479, pruned_loss=0.1013, over 8239.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3495, pruned_loss=0.114, over 1607358.88 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:25,340 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4191, 1.4899, 1.5173, 1.2494, 1.2254, 1.4883, 1.6724, 1.4987], + device='cuda:0'), covar=tensor([0.0552, 0.1283, 0.1941, 0.1489, 0.0649, 0.1616, 0.0821, 0.0610], + device='cuda:0'), in_proj_covar=tensor([0.0134, 0.0177, 0.0217, 0.0181, 0.0129, 0.0187, 0.0142, 0.0152], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 01:22:40,895 INFO [train.py:901] (0/4) Epoch 5, batch 5950, loss[loss=0.2814, simple_loss=0.3435, pruned_loss=0.1096, over 8143.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3501, pruned_loss=0.1146, over 1609008.09 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:55,068 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1218, 3.0982, 2.8611, 1.4614, 2.7639, 2.7802, 2.9637, 2.6033], + device='cuda:0'), covar=tensor([0.1405, 0.0832, 0.1152, 0.4894, 0.1112, 0.1069, 0.1402, 0.1116], + device='cuda:0'), in_proj_covar=tensor([0.0403, 0.0291, 0.0320, 0.0409, 0.0320, 0.0272, 0.0305, 0.0252], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:23:12,021 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.191e+02 3.790e+02 5.332e+02 1.075e+03, threshold=7.580e+02, percent-clipped=7.0 +2023-02-06 01:23:15,479 INFO [train.py:901] (0/4) Epoch 5, batch 6000, loss[loss=0.2557, simple_loss=0.3228, pruned_loss=0.09426, over 8491.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3495, pruned_loss=0.1142, over 1609157.44 frames. ], batch size: 29, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:23:15,479 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 01:23:28,273 INFO [train.py:935] (0/4) Epoch 5, validation: loss=0.2196, simple_loss=0.3162, pruned_loss=0.06146, over 944034.00 frames. +2023-02-06 01:23:28,274 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 01:23:33,797 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:23:55,755 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5810, 1.9679, 1.6517, 2.4197, 1.0188, 1.3277, 1.5402, 1.7996], + device='cuda:0'), covar=tensor([0.1221, 0.1218, 0.1611, 0.0628, 0.1728, 0.2226, 0.1553, 0.1229], + device='cuda:0'), in_proj_covar=tensor([0.0271, 0.0256, 0.0278, 0.0218, 0.0248, 0.0281, 0.0291, 0.0263], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:23:58,478 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38378.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:24:01,709 INFO [train.py:901] (0/4) Epoch 5, batch 6050, loss[loss=0.3499, simple_loss=0.4051, pruned_loss=0.1474, over 8461.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3505, pruned_loss=0.1154, over 1613333.87 frames. ], batch size: 25, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:12,375 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8028, 2.0862, 2.2153, 1.8206, 1.0848, 2.1843, 0.3499, 1.2866], + device='cuda:0'), covar=tensor([0.3023, 0.2036, 0.1095, 0.2398, 0.6939, 0.0845, 0.5734, 0.2658], + device='cuda:0'), in_proj_covar=tensor([0.0128, 0.0121, 0.0082, 0.0167, 0.0211, 0.0080, 0.0140, 0.0124], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:24:26,789 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.76 vs. limit=5.0 +2023-02-06 01:24:33,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 3.108e+02 3.868e+02 4.827e+02 8.119e+02, threshold=7.737e+02, percent-clipped=1.0 +2023-02-06 01:24:37,063 INFO [train.py:901] (0/4) Epoch 5, batch 6100, loss[loss=0.2493, simple_loss=0.3049, pruned_loss=0.09688, over 7662.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3497, pruned_loss=0.1149, over 1610014.82 frames. ], batch size: 19, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:53,384 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:25:09,667 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 01:25:10,931 INFO [train.py:901] (0/4) Epoch 5, batch 6150, loss[loss=0.2735, simple_loss=0.3412, pruned_loss=0.1029, over 8246.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3498, pruned_loss=0.1145, over 1612078.57 frames. ], batch size: 24, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:42,283 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 3.089e+02 4.008e+02 5.119e+02 1.011e+03, threshold=8.016e+02, percent-clipped=7.0 +2023-02-06 01:25:45,624 INFO [train.py:901] (0/4) Epoch 5, batch 6200, loss[loss=0.2893, simple_loss=0.3305, pruned_loss=0.1241, over 7439.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3492, pruned_loss=0.114, over 1611390.20 frames. ], batch size: 17, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:50,627 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 01:26:03,318 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 01:26:20,108 INFO [train.py:901] (0/4) Epoch 5, batch 6250, loss[loss=0.2627, simple_loss=0.3369, pruned_loss=0.09424, over 8286.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3498, pruned_loss=0.1141, over 1616438.96 frames. ], batch size: 23, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:26:51,168 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.239e+02 3.994e+02 4.997e+02 1.061e+03, threshold=7.988e+02, percent-clipped=3.0 +2023-02-06 01:26:54,590 INFO [train.py:901] (0/4) Epoch 5, batch 6300, loss[loss=0.2704, simple_loss=0.3433, pruned_loss=0.09881, over 8116.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3494, pruned_loss=0.1141, over 1616701.92 frames. ], batch size: 23, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:29,604 INFO [train.py:901] (0/4) Epoch 5, batch 6350, loss[loss=0.3467, simple_loss=0.387, pruned_loss=0.1532, over 8467.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3485, pruned_loss=0.1136, over 1611495.89 frames. ], batch size: 29, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:32,386 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2373, 4.2706, 3.8279, 1.7257, 3.6631, 3.7564, 4.0257, 3.4014], + device='cuda:0'), covar=tensor([0.0806, 0.0573, 0.0893, 0.4898, 0.0840, 0.0885, 0.1028, 0.0729], + device='cuda:0'), in_proj_covar=tensor([0.0389, 0.0293, 0.0318, 0.0400, 0.0309, 0.0271, 0.0301, 0.0250], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:27:43,123 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3335, 1.8453, 2.9392, 2.4090, 2.5850, 1.8808, 1.4679, 1.3397], + device='cuda:0'), covar=tensor([0.1587, 0.2064, 0.0499, 0.0993, 0.0790, 0.1079, 0.1027, 0.1955], + device='cuda:0'), in_proj_covar=tensor([0.0755, 0.0684, 0.0580, 0.0663, 0.0781, 0.0640, 0.0611, 0.0635], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:27:49,908 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38712.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:27:56,415 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:00,326 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.862e+02 3.826e+02 4.732e+02 1.596e+03, threshold=7.652e+02, percent-clipped=5.0 +2023-02-06 01:28:03,610 INFO [train.py:901] (0/4) Epoch 5, batch 6400, loss[loss=0.326, simple_loss=0.3705, pruned_loss=0.1407, over 8101.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3499, pruned_loss=0.1141, over 1616275.33 frames. ], batch size: 23, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:06,523 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:17,088 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7513, 3.7119, 3.4210, 1.7011, 3.4125, 3.1918, 3.4899, 3.0867], + device='cuda:0'), covar=tensor([0.1163, 0.0741, 0.1073, 0.4946, 0.0829, 0.1285, 0.1394, 0.1186], + device='cuda:0'), in_proj_covar=tensor([0.0400, 0.0296, 0.0323, 0.0407, 0.0310, 0.0273, 0.0305, 0.0254], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:28:32,976 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8879, 1.4962, 4.2905, 1.9228, 3.1409, 3.3854, 3.7982, 3.8065], + device='cuda:0'), covar=tensor([0.1112, 0.5551, 0.0871, 0.3312, 0.2247, 0.1275, 0.1050, 0.1132], + device='cuda:0'), in_proj_covar=tensor([0.0338, 0.0488, 0.0424, 0.0429, 0.0484, 0.0405, 0.0405, 0.0450], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 01:28:36,397 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1095, 2.2668, 2.8437, 0.9678, 2.8974, 1.8920, 1.3231, 1.6507], + device='cuda:0'), covar=tensor([0.0252, 0.0170, 0.0104, 0.0288, 0.0136, 0.0312, 0.0367, 0.0187], + device='cuda:0'), in_proj_covar=tensor([0.0318, 0.0227, 0.0192, 0.0274, 0.0220, 0.0365, 0.0286, 0.0264], + device='cuda:0'), out_proj_covar=tensor([1.1291e-04, 7.8632e-05, 6.5704e-05, 9.4632e-05, 7.7646e-05, 1.3749e-04, + 1.0192e-04, 9.2356e-05], device='cuda:0') +2023-02-06 01:28:38,890 INFO [train.py:901] (0/4) Epoch 5, batch 6450, loss[loss=0.354, simple_loss=0.3898, pruned_loss=0.1591, over 6522.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3496, pruned_loss=0.1138, over 1615813.87 frames. ], batch size: 71, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:57,430 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9164, 2.2573, 3.0168, 0.9735, 2.8648, 1.7876, 1.4327, 1.5850], + device='cuda:0'), covar=tensor([0.0274, 0.0132, 0.0066, 0.0267, 0.0125, 0.0317, 0.0354, 0.0184], + device='cuda:0'), in_proj_covar=tensor([0.0318, 0.0225, 0.0191, 0.0273, 0.0220, 0.0363, 0.0286, 0.0263], + device='cuda:0'), out_proj_covar=tensor([1.1293e-04, 7.7755e-05, 6.5417e-05, 9.4386e-05, 7.7793e-05, 1.3670e-04, + 1.0163e-04, 9.2006e-05], device='cuda:0') +2023-02-06 01:29:09,816 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.470e+02 3.536e+02 4.141e+02 5.010e+02 9.096e+02, threshold=8.281e+02, percent-clipped=4.0 +2023-02-06 01:29:13,125 INFO [train.py:901] (0/4) Epoch 5, batch 6500, loss[loss=0.3105, simple_loss=0.3697, pruned_loss=0.1256, over 8338.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3508, pruned_loss=0.1141, over 1617265.24 frames. ], batch size: 26, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:16,020 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:18,630 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:30,093 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 01:29:48,032 INFO [train.py:901] (0/4) Epoch 5, batch 6550, loss[loss=0.3278, simple_loss=0.3856, pruned_loss=0.135, over 8670.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.3513, pruned_loss=0.1143, over 1618501.39 frames. ], batch size: 34, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:04,623 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5109, 2.4317, 1.6742, 2.0409, 2.0461, 1.5163, 1.8059, 1.9534], + device='cuda:0'), covar=tensor([0.0889, 0.0283, 0.0754, 0.0428, 0.0497, 0.0998, 0.0660, 0.0550], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0236, 0.0306, 0.0299, 0.0317, 0.0312, 0.0336, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 01:30:19,276 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.239e+02 3.737e+02 4.952e+02 1.438e+03, threshold=7.474e+02, percent-clipped=4.0 +2023-02-06 01:30:22,026 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 01:30:22,684 INFO [train.py:901] (0/4) Epoch 5, batch 6600, loss[loss=0.3183, simple_loss=0.3785, pruned_loss=0.129, over 8444.00 frames. ], tot_loss[loss=0.2889, simple_loss=0.3503, pruned_loss=0.1137, over 1618295.89 frames. ], batch size: 27, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:39,809 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 01:30:58,105 INFO [train.py:901] (0/4) Epoch 5, batch 6650, loss[loss=0.2709, simple_loss=0.3401, pruned_loss=0.1008, over 8537.00 frames. ], tot_loss[loss=0.2891, simple_loss=0.3509, pruned_loss=0.1137, over 1622616.36 frames. ], batch size: 31, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:15,369 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 01:31:29,865 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.068e+02 3.660e+02 4.252e+02 1.265e+03, threshold=7.321e+02, percent-clipped=3.0 +2023-02-06 01:31:33,329 INFO [train.py:901] (0/4) Epoch 5, batch 6700, loss[loss=0.2811, simple_loss=0.3504, pruned_loss=0.1059, over 8481.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3496, pruned_loss=0.113, over 1614677.29 frames. ], batch size: 29, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:08,107 INFO [train.py:901] (0/4) Epoch 5, batch 6750, loss[loss=0.2429, simple_loss=0.3232, pruned_loss=0.0813, over 8248.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.3496, pruned_loss=0.1127, over 1613902.71 frames. ], batch size: 24, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:15,826 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:33,078 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:40,327 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.802e+02 3.437e+02 4.300e+02 8.945e+02, threshold=6.874e+02, percent-clipped=2.0 +2023-02-06 01:32:43,708 INFO [train.py:901] (0/4) Epoch 5, batch 6800, loss[loss=0.2848, simple_loss=0.3502, pruned_loss=0.1097, over 8103.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.349, pruned_loss=0.1118, over 1617324.07 frames. ], batch size: 23, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:32:54,739 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 01:33:17,611 INFO [train.py:901] (0/4) Epoch 5, batch 6850, loss[loss=0.3284, simple_loss=0.3818, pruned_loss=0.1375, over 8198.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3506, pruned_loss=0.1134, over 1613815.18 frames. ], batch size: 23, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:33:19,090 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:31,216 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:38,544 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2151, 1.3752, 4.0636, 1.7892, 2.1655, 4.7506, 4.7383, 4.1162], + device='cuda:0'), covar=tensor([0.1132, 0.1703, 0.0313, 0.2034, 0.0988, 0.0245, 0.0332, 0.0559], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0266, 0.0224, 0.0262, 0.0224, 0.0199, 0.0231, 0.0272], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 01:33:43,525 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 01:33:48,726 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 3.284e+02 3.960e+02 5.468e+02 1.321e+03, threshold=7.919e+02, percent-clipped=11.0 +2023-02-06 01:33:52,161 INFO [train.py:901] (0/4) Epoch 5, batch 6900, loss[loss=0.3312, simple_loss=0.3717, pruned_loss=0.1453, over 7001.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3507, pruned_loss=0.1134, over 1610795.27 frames. ], batch size: 71, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:26,649 INFO [train.py:901] (0/4) Epoch 5, batch 6950, loss[loss=0.3896, simple_loss=0.4381, pruned_loss=0.1706, over 8579.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3524, pruned_loss=0.1143, over 1614561.65 frames. ], batch size: 31, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:38,143 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:39,400 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:49,519 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 01:34:58,305 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.231e+02 3.801e+02 5.196e+02 1.038e+03, threshold=7.603e+02, percent-clipped=4.0 +2023-02-06 01:35:01,671 INFO [train.py:901] (0/4) Epoch 5, batch 7000, loss[loss=0.2679, simple_loss=0.33, pruned_loss=0.1029, over 7928.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3513, pruned_loss=0.1141, over 1610269.58 frames. ], batch size: 20, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:02,173 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 01:35:35,811 INFO [train.py:901] (0/4) Epoch 5, batch 7050, loss[loss=0.2745, simple_loss=0.3225, pruned_loss=0.1133, over 8083.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.351, pruned_loss=0.1142, over 1612065.11 frames. ], batch size: 21, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:40,647 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8098, 2.7643, 1.7584, 4.2186, 1.8889, 1.5491, 2.4057, 3.0322], + device='cuda:0'), covar=tensor([0.2272, 0.1749, 0.3343, 0.0340, 0.2376, 0.3144, 0.2149, 0.1233], + device='cuda:0'), in_proj_covar=tensor([0.0276, 0.0254, 0.0289, 0.0229, 0.0248, 0.0282, 0.0287, 0.0257], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:36:06,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 2.867e+02 3.538e+02 4.706e+02 1.662e+03, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 01:36:10,286 INFO [train.py:901] (0/4) Epoch 5, batch 7100, loss[loss=0.2728, simple_loss=0.3451, pruned_loss=0.1002, over 8038.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3493, pruned_loss=0.1127, over 1613557.58 frames. ], batch size: 22, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:44,807 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:36:46,011 INFO [train.py:901] (0/4) Epoch 5, batch 7150, loss[loss=0.2899, simple_loss=0.3606, pruned_loss=0.1096, over 8100.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3478, pruned_loss=0.1118, over 1608936.50 frames. ], batch size: 23, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:17,186 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.977e+02 2.912e+02 3.907e+02 4.774e+02 1.202e+03, threshold=7.813e+02, percent-clipped=7.0 +2023-02-06 01:37:20,758 INFO [train.py:901] (0/4) Epoch 5, batch 7200, loss[loss=0.2471, simple_loss=0.2999, pruned_loss=0.09717, over 7284.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3483, pruned_loss=0.1126, over 1608925.88 frames. ], batch size: 16, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:30,581 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:37,481 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:51,898 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 01:37:55,143 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:56,305 INFO [train.py:901] (0/4) Epoch 5, batch 7250, loss[loss=0.3129, simple_loss=0.3756, pruned_loss=0.1251, over 8324.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3465, pruned_loss=0.1112, over 1607004.08 frames. ], batch size: 25, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:27,208 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 2.862e+02 3.679e+02 5.056e+02 1.142e+03, threshold=7.358e+02, percent-clipped=8.0 +2023-02-06 01:38:30,508 INFO [train.py:901] (0/4) Epoch 5, batch 7300, loss[loss=0.3115, simple_loss=0.3777, pruned_loss=0.1227, over 7969.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3474, pruned_loss=0.1121, over 1606903.06 frames. ], batch size: 21, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:39,559 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:38:44,000 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 01:38:47,105 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39657.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:38:50,501 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39662.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:39:05,555 INFO [train.py:901] (0/4) Epoch 5, batch 7350, loss[loss=0.2789, simple_loss=0.3506, pruned_loss=0.1036, over 8450.00 frames. ], tot_loss[loss=0.2851, simple_loss=0.3471, pruned_loss=0.1115, over 1606969.25 frames. ], batch size: 27, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:07,072 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5453, 1.7903, 3.1392, 2.6698, 2.7206, 1.9169, 1.4536, 1.3726], + device='cuda:0'), covar=tensor([0.1751, 0.2503, 0.0476, 0.1076, 0.0962, 0.1155, 0.1276, 0.2177], + device='cuda:0'), in_proj_covar=tensor([0.0755, 0.0687, 0.0584, 0.0671, 0.0773, 0.0644, 0.0609, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:39:33,436 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 01:39:36,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.846e+02 3.982e+02 4.999e+02 1.878e+03, threshold=7.964e+02, percent-clipped=11.0 +2023-02-06 01:39:39,621 INFO [train.py:901] (0/4) Epoch 5, batch 7400, loss[loss=0.2671, simple_loss=0.3215, pruned_loss=0.1064, over 7210.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.3475, pruned_loss=0.1114, over 1613349.51 frames. ], batch size: 16, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:52,996 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 01:39:59,142 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:13,744 INFO [train.py:901] (0/4) Epoch 5, batch 7450, loss[loss=0.2479, simple_loss=0.3235, pruned_loss=0.08615, over 8297.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.3496, pruned_loss=0.1133, over 1612849.78 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:15,949 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:32,192 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 01:40:33,685 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4671, 1.0375, 1.4656, 1.0976, 1.5650, 1.2789, 1.8750, 1.9653], + device='cuda:0'), covar=tensor([0.0683, 0.2141, 0.2953, 0.2027, 0.0690, 0.2546, 0.0920, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0132, 0.0177, 0.0216, 0.0180, 0.0128, 0.0186, 0.0142, 0.0150], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 01:40:43,630 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:45,620 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 3.116e+02 3.779e+02 4.440e+02 1.107e+03, threshold=7.558e+02, percent-clipped=3.0 +2023-02-06 01:40:48,852 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-06 01:40:49,055 INFO [train.py:901] (0/4) Epoch 5, batch 7500, loss[loss=0.2252, simple_loss=0.3037, pruned_loss=0.07338, over 8236.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.348, pruned_loss=0.1121, over 1610977.45 frames. ], batch size: 22, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:49,193 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:22,987 INFO [train.py:901] (0/4) Epoch 5, batch 7550, loss[loss=0.2328, simple_loss=0.3127, pruned_loss=0.07646, over 8299.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.3466, pruned_loss=0.1114, over 1610144.58 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:41:43,308 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1155, 2.7455, 3.2460, 1.3653, 3.2818, 2.0815, 1.5710, 1.8332], + device='cuda:0'), covar=tensor([0.0259, 0.0115, 0.0081, 0.0244, 0.0177, 0.0321, 0.0313, 0.0203], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0222, 0.0191, 0.0273, 0.0218, 0.0364, 0.0284, 0.0261], + device='cuda:0'), out_proj_covar=tensor([1.0851e-04, 7.5473e-05, 6.4940e-05, 9.3373e-05, 7.6239e-05, 1.3584e-04, + 9.9626e-05, 8.9550e-05], device='cuda:0') +2023-02-06 01:41:48,005 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:54,548 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.144e+02 3.978e+02 5.379e+02 1.554e+03, threshold=7.957e+02, percent-clipped=6.0 +2023-02-06 01:41:57,850 INFO [train.py:901] (0/4) Epoch 5, batch 7600, loss[loss=0.3022, simple_loss=0.374, pruned_loss=0.1152, over 8339.00 frames. ], tot_loss[loss=0.283, simple_loss=0.3452, pruned_loss=0.1104, over 1609177.41 frames. ], batch size: 25, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:42:02,463 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:04,534 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39943.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:07,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9687, 1.5550, 5.9435, 2.1461, 5.2383, 5.0767, 5.5265, 5.4532], + device='cuda:0'), covar=tensor([0.0338, 0.3813, 0.0250, 0.2505, 0.0942, 0.0499, 0.0346, 0.0418], + device='cuda:0'), in_proj_covar=tensor([0.0335, 0.0488, 0.0430, 0.0424, 0.0492, 0.0412, 0.0398, 0.0452], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 01:42:32,755 INFO [train.py:901] (0/4) Epoch 5, batch 7650, loss[loss=0.2612, simple_loss=0.3428, pruned_loss=0.08985, over 8480.00 frames. ], tot_loss[loss=0.2837, simple_loss=0.3458, pruned_loss=0.1108, over 1609997.83 frames. ], batch size: 29, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:42:44,239 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-40000.pt +2023-02-06 01:42:45,878 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40001.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:42:54,832 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9377, 1.5833, 3.3962, 1.3987, 2.1247, 3.8754, 3.8018, 3.2629], + device='cuda:0'), covar=tensor([0.1037, 0.1344, 0.0328, 0.1832, 0.0802, 0.0213, 0.0373, 0.0588], + device='cuda:0'), in_proj_covar=tensor([0.0236, 0.0263, 0.0223, 0.0263, 0.0222, 0.0201, 0.0235, 0.0276], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 01:42:57,013 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:05,596 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.281e+02 3.028e+02 3.689e+02 4.703e+02 1.290e+03, threshold=7.379e+02, percent-clipped=1.0 +2023-02-06 01:43:08,884 INFO [train.py:901] (0/4) Epoch 5, batch 7700, loss[loss=0.3285, simple_loss=0.3718, pruned_loss=0.1426, over 8336.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3461, pruned_loss=0.1111, over 1607260.69 frames. ], batch size: 26, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:15,453 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:44,285 INFO [train.py:901] (0/4) Epoch 5, batch 7750, loss[loss=0.2579, simple_loss=0.3268, pruned_loss=0.09446, over 8031.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3467, pruned_loss=0.1115, over 1606652.45 frames. ], batch size: 22, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:44,301 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 01:43:47,496 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-06 01:44:07,444 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40116.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:44:15,427 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.050e+02 3.016e+02 3.638e+02 4.428e+02 8.911e+02, threshold=7.276e+02, percent-clipped=8.0 +2023-02-06 01:44:16,242 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:18,946 INFO [train.py:901] (0/4) Epoch 5, batch 7800, loss[loss=0.2494, simple_loss=0.3312, pruned_loss=0.08381, over 8287.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3458, pruned_loss=0.1113, over 1606891.16 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:44:38,906 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4870, 1.9798, 1.9327, 0.8727, 2.0363, 1.3182, 0.3968, 1.6236], + device='cuda:0'), covar=tensor([0.0152, 0.0090, 0.0077, 0.0165, 0.0098, 0.0313, 0.0282, 0.0091], + device='cuda:0'), in_proj_covar=tensor([0.0312, 0.0225, 0.0192, 0.0275, 0.0220, 0.0367, 0.0291, 0.0265], + device='cuda:0'), out_proj_covar=tensor([1.0823e-04, 7.6175e-05, 6.4895e-05, 9.3683e-05, 7.6646e-05, 1.3668e-04, + 1.0178e-04, 9.1007e-05], device='cuda:0') +2023-02-06 01:44:50,158 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:54,273 INFO [train.py:901] (0/4) Epoch 5, batch 7850, loss[loss=0.2509, simple_loss=0.3178, pruned_loss=0.09196, over 8247.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.347, pruned_loss=0.1123, over 1611884.50 frames. ], batch size: 24, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:45:03,223 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:14,682 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:20,135 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:24,750 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.315e+02 3.285e+02 3.978e+02 4.753e+02 1.108e+03, threshold=7.955e+02, percent-clipped=4.0 +2023-02-06 01:45:28,290 INFO [train.py:901] (0/4) Epoch 5, batch 7900, loss[loss=0.3973, simple_loss=0.4292, pruned_loss=0.1827, over 7049.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3472, pruned_loss=0.1117, over 1609958.52 frames. ], batch size: 71, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:45:30,521 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40236.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:35,953 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:02,038 INFO [train.py:901] (0/4) Epoch 5, batch 7950, loss[loss=0.2317, simple_loss=0.302, pruned_loss=0.08071, over 7684.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3459, pruned_loss=0.1113, over 1607069.51 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:07,572 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:09,034 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:33,074 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.942e+02 3.003e+02 3.931e+02 4.743e+02 9.937e+02, threshold=7.862e+02, percent-clipped=4.0 +2023-02-06 01:46:36,404 INFO [train.py:901] (0/4) Epoch 5, batch 8000, loss[loss=0.3276, simple_loss=0.3869, pruned_loss=0.1341, over 8338.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3478, pruned_loss=0.1123, over 1613490.56 frames. ], batch size: 26, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:46,588 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:47:03,164 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40372.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:47:10,480 INFO [train.py:901] (0/4) Epoch 5, batch 8050, loss[loss=0.2133, simple_loss=0.2724, pruned_loss=0.07705, over 7539.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3468, pruned_loss=0.1119, over 1606116.83 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 8.0 +2023-02-06 01:47:20,239 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:47:23,690 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3556, 2.3314, 2.0770, 1.2816, 2.0408, 1.9752, 2.1481, 1.8854], + device='cuda:0'), covar=tensor([0.1088, 0.0819, 0.1038, 0.3436, 0.0967, 0.1117, 0.1275, 0.0873], + device='cuda:0'), in_proj_covar=tensor([0.0404, 0.0284, 0.0323, 0.0412, 0.0312, 0.0277, 0.0303, 0.0250], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:47:33,786 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-5.pt +2023-02-06 01:47:44,680 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 01:47:48,104 INFO [train.py:901] (0/4) Epoch 6, batch 0, loss[loss=0.2632, simple_loss=0.3334, pruned_loss=0.09648, over 8242.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3334, pruned_loss=0.09648, over 8242.00 frames. ], batch size: 22, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:47:48,104 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 01:47:59,053 INFO [train.py:935] (0/4) Epoch 6, validation: loss=0.2203, simple_loss=0.3165, pruned_loss=0.06206, over 944034.00 frames. +2023-02-06 01:47:59,054 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 01:48:07,794 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.052e+02 3.992e+02 5.098e+02 1.227e+03, threshold=7.983e+02, percent-clipped=7.0 +2023-02-06 01:48:13,423 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 01:48:14,983 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3225, 1.4860, 1.2407, 1.9851, 0.8418, 1.1170, 1.2594, 1.4996], + device='cuda:0'), covar=tensor([0.1397, 0.1209, 0.1948, 0.0742, 0.1569, 0.2299, 0.1240, 0.1089], + device='cuda:0'), in_proj_covar=tensor([0.0278, 0.0256, 0.0287, 0.0226, 0.0250, 0.0281, 0.0282, 0.0259], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:48:17,292 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 01:48:34,118 INFO [train.py:901] (0/4) Epoch 6, batch 50, loss[loss=0.2515, simple_loss=0.3118, pruned_loss=0.09554, over 7794.00 frames. ], tot_loss[loss=0.2876, simple_loss=0.3507, pruned_loss=0.1122, over 369705.46 frames. ], batch size: 19, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:48:48,504 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 01:48:57,358 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:04,769 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:08,809 INFO [train.py:901] (0/4) Epoch 6, batch 100, loss[loss=0.3516, simple_loss=0.3967, pruned_loss=0.1532, over 8342.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3529, pruned_loss=0.1138, over 651350.97 frames. ], batch size: 26, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:49:13,092 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 01:49:15,329 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:17,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.877e+02 3.627e+02 4.294e+02 7.601e+02, threshold=7.253e+02, percent-clipped=0.0 +2023-02-06 01:49:27,173 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0064, 1.4043, 4.2457, 1.5328, 3.6707, 3.5632, 3.8114, 3.6652], + device='cuda:0'), covar=tensor([0.0480, 0.3145, 0.0357, 0.2332, 0.1018, 0.0612, 0.0456, 0.0567], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0490, 0.0436, 0.0431, 0.0499, 0.0418, 0.0407, 0.0458], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 01:49:31,507 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:37,659 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:39,059 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6968, 5.7743, 5.0029, 2.0083, 5.0483, 5.3438, 5.3048, 4.9598], + device='cuda:0'), covar=tensor([0.0612, 0.0430, 0.0776, 0.4748, 0.0672, 0.0756, 0.1116, 0.0670], + device='cuda:0'), in_proj_covar=tensor([0.0403, 0.0288, 0.0324, 0.0414, 0.0316, 0.0283, 0.0304, 0.0254], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:49:44,253 INFO [train.py:901] (0/4) Epoch 6, batch 150, loss[loss=0.2317, simple_loss=0.3204, pruned_loss=0.07151, over 8332.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3546, pruned_loss=0.1146, over 868955.13 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:49:49,733 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:54,327 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:04,510 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4237, 1.5942, 1.3477, 1.8680, 0.9184, 1.1951, 1.3318, 1.6129], + device='cuda:0'), covar=tensor([0.1165, 0.0971, 0.1558, 0.0700, 0.1394, 0.2115, 0.1237, 0.0827], + device='cuda:0'), in_proj_covar=tensor([0.0273, 0.0254, 0.0281, 0.0227, 0.0247, 0.0280, 0.0279, 0.0257], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:50:19,258 INFO [train.py:901] (0/4) Epoch 6, batch 200, loss[loss=0.2827, simple_loss=0.3517, pruned_loss=0.1069, over 8742.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3491, pruned_loss=0.111, over 1041131.89 frames. ], batch size: 30, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:28,762 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 3.079e+02 3.898e+02 5.213e+02 9.157e+02, threshold=7.795e+02, percent-clipped=3.0 +2023-02-06 01:50:32,275 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:54,066 INFO [train.py:901] (0/4) Epoch 6, batch 250, loss[loss=0.2988, simple_loss=0.3688, pruned_loss=0.1144, over 8335.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3482, pruned_loss=0.1105, over 1166216.85 frames. ], batch size: 26, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:56,282 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:58,382 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:59,877 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.02 vs. limit=5.0 +2023-02-06 01:51:04,085 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 01:51:12,275 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 01:51:13,021 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:15,100 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:29,252 INFO [train.py:901] (0/4) Epoch 6, batch 300, loss[loss=0.2247, simple_loss=0.2798, pruned_loss=0.08482, over 7516.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3476, pruned_loss=0.1105, over 1265930.38 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:51:38,588 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 3.025e+02 3.729e+02 4.724e+02 9.863e+02, threshold=7.458e+02, percent-clipped=3.0 +2023-02-06 01:51:52,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:03,714 INFO [train.py:901] (0/4) Epoch 6, batch 350, loss[loss=0.2468, simple_loss=0.3, pruned_loss=0.09678, over 7689.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3467, pruned_loss=0.1101, over 1341678.59 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:17,247 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5507, 1.9432, 1.9601, 0.9001, 2.0161, 1.4210, 0.3587, 1.6846], + device='cuda:0'), covar=tensor([0.0181, 0.0115, 0.0095, 0.0165, 0.0130, 0.0334, 0.0296, 0.0094], + device='cuda:0'), in_proj_covar=tensor([0.0324, 0.0234, 0.0195, 0.0282, 0.0228, 0.0374, 0.0300, 0.0274], + device='cuda:0'), out_proj_covar=tensor([1.1224e-04, 7.8652e-05, 6.5579e-05, 9.5827e-05, 7.8811e-05, 1.3856e-04, + 1.0426e-04, 9.3391e-05], device='cuda:0') +2023-02-06 01:52:32,424 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:38,236 INFO [train.py:901] (0/4) Epoch 6, batch 400, loss[loss=0.2342, simple_loss=0.3039, pruned_loss=0.0822, over 7791.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3471, pruned_loss=0.1104, over 1405916.21 frames. ], batch size: 19, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:46,905 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.939e+02 3.080e+02 3.801e+02 5.022e+02 1.220e+03, threshold=7.601e+02, percent-clipped=4.0 +2023-02-06 01:53:04,898 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:12,732 INFO [train.py:901] (0/4) Epoch 6, batch 450, loss[loss=0.236, simple_loss=0.3103, pruned_loss=0.0808, over 8085.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3471, pruned_loss=0.1103, over 1451608.41 frames. ], batch size: 21, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:24,456 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:47,833 INFO [train.py:901] (0/4) Epoch 6, batch 500, loss[loss=0.2995, simple_loss=0.3558, pruned_loss=0.1216, over 8627.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3463, pruned_loss=0.1092, over 1492191.42 frames. ], batch size: 34, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:49,255 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:56,741 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:57,239 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.058e+02 3.738e+02 5.288e+02 8.550e+02, threshold=7.476e+02, percent-clipped=3.0 +2023-02-06 01:54:12,294 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:13,635 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:22,945 INFO [train.py:901] (0/4) Epoch 6, batch 550, loss[loss=0.2453, simple_loss=0.3088, pruned_loss=0.09088, over 7693.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3439, pruned_loss=0.1072, over 1522784.68 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:54:25,204 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:30,610 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:49,826 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:55,069 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:56,475 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:57,037 INFO [train.py:901] (0/4) Epoch 6, batch 600, loss[loss=0.3228, simple_loss=0.3714, pruned_loss=0.1371, over 8548.00 frames. ], tot_loss[loss=0.2788, simple_loss=0.3434, pruned_loss=0.1071, over 1544432.91 frames. ], batch size: 31, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:06,096 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.839e+02 3.515e+02 4.292e+02 8.268e+02, threshold=7.031e+02, percent-clipped=4.0 +2023-02-06 01:55:06,982 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:09,481 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 01:55:29,303 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:29,383 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:31,087 INFO [train.py:901] (0/4) Epoch 6, batch 650, loss[loss=0.2793, simple_loss=0.3515, pruned_loss=0.1036, over 8257.00 frames. ], tot_loss[loss=0.2812, simple_loss=0.3452, pruned_loss=0.1086, over 1562462.51 frames. ], batch size: 24, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:43,234 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.11 vs. limit=5.0 +2023-02-06 01:55:46,207 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:05,824 INFO [train.py:901] (0/4) Epoch 6, batch 700, loss[loss=0.2509, simple_loss=0.3152, pruned_loss=0.09332, over 7773.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3462, pruned_loss=0.1098, over 1574631.49 frames. ], batch size: 19, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:56:08,623 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,230 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,741 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.998e+02 3.776e+02 4.654e+02 1.221e+03, threshold=7.553e+02, percent-clipped=4.0 +2023-02-06 01:56:40,070 INFO [train.py:901] (0/4) Epoch 6, batch 750, loss[loss=0.3432, simple_loss=0.388, pruned_loss=0.1492, over 8478.00 frames. ], tot_loss[loss=0.2836, simple_loss=0.3469, pruned_loss=0.1102, over 1585084.56 frames. ], batch size: 29, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:56:52,792 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 01:57:00,960 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 01:57:15,161 INFO [train.py:901] (0/4) Epoch 6, batch 800, loss[loss=0.2118, simple_loss=0.2949, pruned_loss=0.06435, over 8190.00 frames. ], tot_loss[loss=0.2836, simple_loss=0.3473, pruned_loss=0.1099, over 1598082.01 frames. ], batch size: 23, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:21,525 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:22,746 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:24,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.937e+02 3.578e+02 4.897e+02 8.076e+02, threshold=7.157e+02, percent-clipped=3.0 +2023-02-06 01:57:38,348 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:40,899 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7462, 4.6994, 4.1348, 1.7778, 4.1295, 4.1648, 4.2970, 3.7573], + device='cuda:0'), covar=tensor([0.0642, 0.0504, 0.0841, 0.4648, 0.0693, 0.0799, 0.1302, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0404, 0.0286, 0.0319, 0.0407, 0.0310, 0.0280, 0.0301, 0.0256], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:57:46,851 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:49,530 INFO [train.py:901] (0/4) Epoch 6, batch 850, loss[loss=0.2537, simple_loss=0.3339, pruned_loss=0.0868, over 8038.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3468, pruned_loss=0.11, over 1601819.87 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:59,689 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-06 01:58:08,939 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3456, 2.0507, 3.1063, 2.5114, 2.5568, 2.0269, 1.5590, 1.3440], + device='cuda:0'), covar=tensor([0.2111, 0.2038, 0.0492, 0.1175, 0.1037, 0.1193, 0.1193, 0.2226], + device='cuda:0'), in_proj_covar=tensor([0.0770, 0.0702, 0.0601, 0.0695, 0.0797, 0.0660, 0.0624, 0.0650], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 01:58:23,892 INFO [train.py:901] (0/4) Epoch 6, batch 900, loss[loss=0.2911, simple_loss=0.3569, pruned_loss=0.1126, over 8498.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.3458, pruned_loss=0.1088, over 1608520.88 frames. ], batch size: 26, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:28,819 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.1782, 1.6075, 5.2624, 2.0909, 4.6217, 4.5509, 4.8685, 4.8638], + device='cuda:0'), covar=tensor([0.0377, 0.3549, 0.0373, 0.2419, 0.0970, 0.0614, 0.0407, 0.0395], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0510, 0.0449, 0.0441, 0.0511, 0.0434, 0.0420, 0.0474], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 01:58:33,478 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.871e+02 3.405e+02 4.321e+02 1.147e+03, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 01:58:42,519 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:53,863 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:58,557 INFO [train.py:901] (0/4) Epoch 6, batch 950, loss[loss=0.2345, simple_loss=0.3057, pruned_loss=0.08165, over 7538.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3453, pruned_loss=0.1087, over 1609500.06 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:06,373 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:11,053 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:21,436 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 01:59:26,984 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:28,455 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:32,977 INFO [train.py:901] (0/4) Epoch 6, batch 1000, loss[loss=0.2594, simple_loss=0.3281, pruned_loss=0.09532, over 8239.00 frames. ], tot_loss[loss=0.2801, simple_loss=0.3442, pruned_loss=0.1081, over 1612013.98 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:33,844 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6972, 1.9711, 1.6390, 2.3806, 1.2315, 1.3292, 1.7219, 2.0560], + device='cuda:0'), covar=tensor([0.1081, 0.1083, 0.1510, 0.0643, 0.1487, 0.2104, 0.1222, 0.0741], + device='cuda:0'), in_proj_covar=tensor([0.0277, 0.0253, 0.0284, 0.0227, 0.0250, 0.0282, 0.0287, 0.0261], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 01:59:41,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.042e+02 3.293e+02 3.921e+02 5.074e+02 1.211e+03, threshold=7.843e+02, percent-clipped=6.0 +2023-02-06 01:59:55,293 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 02:00:06,232 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:07,521 INFO [train.py:901] (0/4) Epoch 6, batch 1050, loss[loss=0.2292, simple_loss=0.2974, pruned_loss=0.08049, over 7680.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3447, pruned_loss=0.1082, over 1616931.01 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:08,220 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 02:00:13,132 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:19,810 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1115, 1.5881, 1.5195, 1.3075, 1.4614, 1.6444, 2.1275, 2.0691], + device='cuda:0'), covar=tensor([0.0619, 0.1821, 0.2734, 0.2018, 0.0760, 0.2179, 0.0849, 0.0682], + device='cuda:0'), in_proj_covar=tensor([0.0131, 0.0176, 0.0219, 0.0183, 0.0130, 0.0188, 0.0140, 0.0150], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:00:42,284 INFO [train.py:901] (0/4) Epoch 6, batch 1100, loss[loss=0.2401, simple_loss=0.3255, pruned_loss=0.07737, over 8029.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3442, pruned_loss=0.1079, over 1616731.58 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:46,701 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:51,106 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.937e+02 3.488e+02 4.376e+02 9.981e+02, threshold=6.976e+02, percent-clipped=3.0 +2023-02-06 02:01:16,052 INFO [train.py:901] (0/4) Epoch 6, batch 1150, loss[loss=0.2666, simple_loss=0.3312, pruned_loss=0.101, over 8034.00 frames. ], tot_loss[loss=0.2797, simple_loss=0.3438, pruned_loss=0.1077, over 1615185.08 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:18,801 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 02:01:25,439 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:38,055 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:50,378 INFO [train.py:901] (0/4) Epoch 6, batch 1200, loss[loss=0.2501, simple_loss=0.3242, pruned_loss=0.08797, over 8511.00 frames. ], tot_loss[loss=0.2797, simple_loss=0.3437, pruned_loss=0.1078, over 1616054.06 frames. ], batch size: 26, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:55,784 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:58,418 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5689, 1.8577, 2.0709, 1.6986, 1.0311, 2.1005, 0.2934, 1.2221], + device='cuda:0'), covar=tensor([0.2160, 0.1630, 0.0685, 0.1598, 0.5111, 0.0607, 0.4223, 0.1976], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0129, 0.0080, 0.0173, 0.0209, 0.0081, 0.0145, 0.0133], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:02:00,249 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.965e+02 3.060e+02 3.864e+02 4.910e+02 1.275e+03, threshold=7.729e+02, percent-clipped=9.0 +2023-02-06 02:02:03,207 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:19,543 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:24,691 INFO [train.py:901] (0/4) Epoch 6, batch 1250, loss[loss=0.3735, simple_loss=0.3982, pruned_loss=0.1744, over 6745.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3435, pruned_loss=0.1081, over 1615773.61 frames. ], batch size: 72, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:02:29,662 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:59,807 INFO [train.py:901] (0/4) Epoch 6, batch 1300, loss[loss=0.2071, simple_loss=0.2722, pruned_loss=0.07099, over 7704.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3425, pruned_loss=0.107, over 1618146.11 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:08,608 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.137e+02 4.028e+02 4.813e+02 9.668e+02, threshold=8.056e+02, percent-clipped=5.0 +2023-02-06 02:03:09,507 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:27,418 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:34,647 INFO [train.py:901] (0/4) Epoch 6, batch 1350, loss[loss=0.2497, simple_loss=0.3113, pruned_loss=0.09404, over 7808.00 frames. ], tot_loss[loss=0.2791, simple_loss=0.3432, pruned_loss=0.1075, over 1617453.79 frames. ], batch size: 20, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:42,954 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:00,405 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:09,625 INFO [train.py:901] (0/4) Epoch 6, batch 1400, loss[loss=0.2454, simple_loss=0.2984, pruned_loss=0.0962, over 7535.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3426, pruned_loss=0.107, over 1621581.02 frames. ], batch size: 18, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:18,120 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.090e+02 3.079e+02 3.704e+02 4.589e+02 8.838e+02, threshold=7.407e+02, percent-clipped=2.0 +2023-02-06 02:04:22,369 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:39,964 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:44,589 INFO [train.py:901] (0/4) Epoch 6, batch 1450, loss[loss=0.374, simple_loss=0.3985, pruned_loss=0.1748, over 6532.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.3431, pruned_loss=0.107, over 1620639.77 frames. ], batch size: 71, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:47,875 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 02:05:10,758 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1794, 1.2558, 4.2256, 1.6158, 3.7325, 3.5545, 3.8739, 3.7989], + device='cuda:0'), covar=tensor([0.0405, 0.3851, 0.0449, 0.2480, 0.1141, 0.0755, 0.0443, 0.0499], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0500, 0.0443, 0.0436, 0.0500, 0.0419, 0.0410, 0.0463], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 02:05:18,593 INFO [train.py:901] (0/4) Epoch 6, batch 1500, loss[loss=0.3303, simple_loss=0.3946, pruned_loss=0.133, over 8030.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.3446, pruned_loss=0.1083, over 1617204.39 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:24,645 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:05:27,836 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 2.922e+02 3.542e+02 4.432e+02 1.007e+03, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 02:05:53,224 INFO [train.py:901] (0/4) Epoch 6, batch 1550, loss[loss=0.3163, simple_loss=0.375, pruned_loss=0.1288, over 8281.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3453, pruned_loss=0.1087, over 1617143.25 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:53,373 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41966.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:06:16,688 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-42000.pt +2023-02-06 02:06:28,441 INFO [train.py:901] (0/4) Epoch 6, batch 1600, loss[loss=0.2985, simple_loss=0.379, pruned_loss=0.109, over 8672.00 frames. ], tot_loss[loss=0.2796, simple_loss=0.3438, pruned_loss=0.1078, over 1616791.20 frames. ], batch size: 34, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:06:28,511 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:06:37,881 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.135e+02 3.132e+02 3.836e+02 5.392e+02 3.005e+03, threshold=7.672e+02, percent-clipped=11.0 +2023-02-06 02:06:47,626 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6348, 1.9243, 2.1073, 1.6343, 0.9353, 2.2926, 0.2856, 1.0899], + device='cuda:0'), covar=tensor([0.3380, 0.1998, 0.1003, 0.2383, 0.6790, 0.0548, 0.5109, 0.2498], + device='cuda:0'), in_proj_covar=tensor([0.0132, 0.0132, 0.0082, 0.0181, 0.0216, 0.0082, 0.0147, 0.0136], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:06:50,468 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 02:07:03,786 INFO [train.py:901] (0/4) Epoch 6, batch 1650, loss[loss=0.2855, simple_loss=0.344, pruned_loss=0.1136, over 8523.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.343, pruned_loss=0.1072, over 1619350.28 frames. ], batch size: 39, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:27,173 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5715, 3.6996, 2.2665, 2.3421, 2.9129, 1.7604, 2.2361, 2.7138], + device='cuda:0'), covar=tensor([0.1431, 0.0281, 0.0796, 0.0737, 0.0505, 0.1157, 0.0965, 0.0799], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0233, 0.0310, 0.0293, 0.0313, 0.0307, 0.0334, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:07:39,088 INFO [train.py:901] (0/4) Epoch 6, batch 1700, loss[loss=0.3145, simple_loss=0.3681, pruned_loss=0.1305, over 8199.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3418, pruned_loss=0.1062, over 1615784.97 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:44,497 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 02:07:47,895 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.826e+02 3.670e+02 4.452e+02 1.049e+03, threshold=7.339e+02, percent-clipped=2.0 +2023-02-06 02:07:49,305 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:08:14,471 INFO [train.py:901] (0/4) Epoch 6, batch 1750, loss[loss=0.2436, simple_loss=0.3056, pruned_loss=0.09086, over 8145.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3402, pruned_loss=0.1058, over 1615729.90 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:42,599 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3382, 2.2236, 1.7866, 1.6799, 1.4386, 1.8775, 2.3619, 2.0836], + device='cuda:0'), covar=tensor([0.0454, 0.1055, 0.1612, 0.1315, 0.0643, 0.1484, 0.0629, 0.0527], + device='cuda:0'), in_proj_covar=tensor([0.0128, 0.0174, 0.0213, 0.0178, 0.0127, 0.0183, 0.0139, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:08:44,743 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7408, 1.6507, 1.9681, 1.8743, 1.1122, 2.2936, 0.6116, 1.2073], + device='cuda:0'), covar=tensor([0.2859, 0.2993, 0.1202, 0.1671, 0.5956, 0.0550, 0.5144, 0.2907], + device='cuda:0'), in_proj_covar=tensor([0.0132, 0.0133, 0.0084, 0.0178, 0.0218, 0.0082, 0.0144, 0.0136], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:08:49,394 INFO [train.py:901] (0/4) Epoch 6, batch 1800, loss[loss=0.3048, simple_loss=0.3655, pruned_loss=0.1221, over 8562.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3405, pruned_loss=0.1059, over 1615151.71 frames. ], batch size: 39, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:59,177 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 3.032e+02 3.540e+02 4.353e+02 2.015e+03, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 02:09:24,925 INFO [train.py:901] (0/4) Epoch 6, batch 1850, loss[loss=0.2625, simple_loss=0.3367, pruned_loss=0.09416, over 8580.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.341, pruned_loss=0.1058, over 1616050.20 frames. ], batch size: 31, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:09:26,411 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:09:47,994 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0184, 1.6995, 6.0087, 2.0398, 5.3374, 5.1578, 5.7362, 5.6104], + device='cuda:0'), covar=tensor([0.0415, 0.3722, 0.0287, 0.2457, 0.0979, 0.0522, 0.0397, 0.0396], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0510, 0.0445, 0.0442, 0.0505, 0.0424, 0.0415, 0.0468], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 02:09:55,345 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42310.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:09:59,278 INFO [train.py:901] (0/4) Epoch 6, batch 1900, loss[loss=0.2574, simple_loss=0.3317, pruned_loss=0.09157, over 8345.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3391, pruned_loss=0.1047, over 1613625.31 frames. ], batch size: 26, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:10:08,776 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.992e+02 2.715e+02 3.297e+02 4.142e+02 7.213e+02, threshold=6.594e+02, percent-clipped=2.0 +2023-02-06 02:10:19,490 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2950, 1.7788, 2.9720, 2.3238, 2.4432, 1.8816, 1.4750, 1.2067], + device='cuda:0'), covar=tensor([0.2061, 0.2508, 0.0541, 0.1252, 0.1055, 0.1382, 0.1331, 0.2228], + device='cuda:0'), in_proj_covar=tensor([0.0775, 0.0710, 0.0618, 0.0708, 0.0804, 0.0662, 0.0620, 0.0655], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:10:23,931 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 02:10:34,060 INFO [train.py:901] (0/4) Epoch 6, batch 1950, loss[loss=0.2577, simple_loss=0.3346, pruned_loss=0.09041, over 8475.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3397, pruned_loss=0.1046, over 1614331.14 frames. ], batch size: 29, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:10:36,640 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 02:10:37,662 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 02:10:46,201 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:48,975 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42387.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:56,190 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 02:10:56,312 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:06,394 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:08,998 INFO [train.py:901] (0/4) Epoch 6, batch 2000, loss[loss=0.3458, simple_loss=0.3865, pruned_loss=0.1525, over 8471.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3398, pruned_loss=0.1049, over 1616522.53 frames. ], batch size: 27, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:11:15,164 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42425.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:11:18,300 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.766e+02 3.581e+02 4.303e+02 8.011e+02, threshold=7.162e+02, percent-clipped=3.0 +2023-02-06 02:11:43,879 INFO [train.py:901] (0/4) Epoch 6, batch 2050, loss[loss=0.2163, simple_loss=0.2935, pruned_loss=0.06959, over 7537.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.339, pruned_loss=0.1042, over 1616203.85 frames. ], batch size: 18, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:11:49,370 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0939, 1.7786, 1.8374, 1.5532, 1.4287, 1.7197, 2.4566, 1.7469], + device='cuda:0'), covar=tensor([0.0496, 0.1219, 0.1687, 0.1425, 0.0683, 0.1530, 0.0664, 0.0602], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0174, 0.0213, 0.0180, 0.0126, 0.0185, 0.0139, 0.0151], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:12:17,264 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0355, 1.7325, 1.3564, 1.7873, 1.4355, 1.1618, 1.4529, 1.5795], + device='cuda:0'), covar=tensor([0.0749, 0.0334, 0.0854, 0.0390, 0.0500, 0.1026, 0.0608, 0.0515], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0234, 0.0312, 0.0297, 0.0313, 0.0306, 0.0338, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:12:17,698 INFO [train.py:901] (0/4) Epoch 6, batch 2100, loss[loss=0.3192, simple_loss=0.3554, pruned_loss=0.1415, over 7817.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3403, pruned_loss=0.1046, over 1619852.57 frames. ], batch size: 20, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:23,857 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:12:27,687 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.916e+02 3.481e+02 4.572e+02 1.310e+03, threshold=6.962e+02, percent-clipped=2.0 +2023-02-06 02:12:30,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1990, 1.0311, 1.0964, 0.9809, 0.7462, 0.9773, 1.0200, 0.9083], + device='cuda:0'), covar=tensor([0.0491, 0.0894, 0.1257, 0.1032, 0.0500, 0.1074, 0.0565, 0.0441], + device='cuda:0'), in_proj_covar=tensor([0.0127, 0.0173, 0.0211, 0.0178, 0.0125, 0.0183, 0.0137, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:12:52,377 INFO [train.py:901] (0/4) Epoch 6, batch 2150, loss[loss=0.2793, simple_loss=0.3283, pruned_loss=0.1152, over 7425.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3421, pruned_loss=0.1062, over 1619879.13 frames. ], batch size: 17, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:24,198 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 02:13:27,027 INFO [train.py:901] (0/4) Epoch 6, batch 2200, loss[loss=0.2427, simple_loss=0.3094, pruned_loss=0.08796, over 8241.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3424, pruned_loss=0.1063, over 1620569.66 frames. ], batch size: 22, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:36,143 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 3.020e+02 3.729e+02 5.072e+02 1.122e+03, threshold=7.459e+02, percent-clipped=5.0 +2023-02-06 02:13:43,115 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:13:47,048 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5016, 1.7536, 2.0148, 0.7886, 2.1019, 1.3282, 0.5252, 1.7144], + device='cuda:0'), covar=tensor([0.0241, 0.0133, 0.0104, 0.0255, 0.0217, 0.0399, 0.0336, 0.0133], + device='cuda:0'), in_proj_covar=tensor([0.0331, 0.0239, 0.0211, 0.0297, 0.0234, 0.0388, 0.0305, 0.0277], + device='cuda:0'), out_proj_covar=tensor([1.1269e-04, 7.9082e-05, 6.9848e-05, 9.9620e-05, 7.9360e-05, 1.4094e-04, + 1.0432e-04, 9.3080e-05], device='cuda:0') +2023-02-06 02:13:59,699 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:00,825 INFO [train.py:901] (0/4) Epoch 6, batch 2250, loss[loss=0.2419, simple_loss=0.3264, pruned_loss=0.07865, over 8472.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3417, pruned_loss=0.1057, over 1619743.31 frames. ], batch size: 29, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:01,616 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:11,874 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42681.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:14:29,255 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42706.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:14:29,877 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6565, 2.9919, 2.4327, 4.0258, 1.7981, 1.9057, 2.3112, 3.3331], + device='cuda:0'), covar=tensor([0.0863, 0.1041, 0.1138, 0.0263, 0.1592, 0.1875, 0.1450, 0.0819], + device='cuda:0'), in_proj_covar=tensor([0.0272, 0.0252, 0.0283, 0.0226, 0.0251, 0.0281, 0.0284, 0.0258], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 02:14:35,831 INFO [train.py:901] (0/4) Epoch 6, batch 2300, loss[loss=0.2352, simple_loss=0.3104, pruned_loss=0.07994, over 8137.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.342, pruned_loss=0.1067, over 1618630.19 frames. ], batch size: 22, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:45,246 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.977e+02 3.532e+02 4.435e+02 7.362e+02, threshold=7.063e+02, percent-clipped=0.0 +2023-02-06 02:14:53,234 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:15:11,251 INFO [train.py:901] (0/4) Epoch 6, batch 2350, loss[loss=0.2946, simple_loss=0.3607, pruned_loss=0.1142, over 8351.00 frames. ], tot_loss[loss=0.2791, simple_loss=0.3425, pruned_loss=0.1078, over 1612400.27 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:46,588 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-06 02:15:46,903 INFO [train.py:901] (0/4) Epoch 6, batch 2400, loss[loss=0.2592, simple_loss=0.3286, pruned_loss=0.09491, over 8461.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.3436, pruned_loss=0.1085, over 1618063.01 frames. ], batch size: 25, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:56,305 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.099e+02 3.712e+02 4.452e+02 1.076e+03, threshold=7.425e+02, percent-clipped=4.0 +2023-02-06 02:16:08,777 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.09 vs. limit=5.0 +2023-02-06 02:16:14,310 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:20,849 INFO [train.py:901] (0/4) Epoch 6, batch 2450, loss[loss=0.1987, simple_loss=0.2716, pruned_loss=0.06286, over 7196.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3445, pruned_loss=0.1091, over 1618204.23 frames. ], batch size: 16, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:16:22,280 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:29,129 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:42,202 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:45,021 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5754, 1.9898, 1.9859, 1.8964, 1.6498, 1.9927, 2.2053, 2.1214], + device='cuda:0'), covar=tensor([0.0663, 0.0976, 0.1348, 0.1121, 0.0597, 0.1196, 0.0767, 0.0449], + device='cuda:0'), in_proj_covar=tensor([0.0129, 0.0174, 0.0215, 0.0180, 0.0125, 0.0183, 0.0139, 0.0150], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:16:54,609 INFO [train.py:901] (0/4) Epoch 6, batch 2500, loss[loss=0.3022, simple_loss=0.3666, pruned_loss=0.1189, over 8194.00 frames. ], tot_loss[loss=0.2812, simple_loss=0.3446, pruned_loss=0.1089, over 1615965.04 frames. ], batch size: 23, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:05,200 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 3.094e+02 4.004e+02 4.995e+02 1.056e+03, threshold=8.009e+02, percent-clipped=4.0 +2023-02-06 02:17:29,434 INFO [train.py:901] (0/4) Epoch 6, batch 2550, loss[loss=0.3162, simple_loss=0.3528, pruned_loss=0.1398, over 7200.00 frames. ], tot_loss[loss=0.2818, simple_loss=0.345, pruned_loss=0.1093, over 1614860.09 frames. ], batch size: 16, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:41,648 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:00,621 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9190, 1.6196, 3.3995, 1.4226, 2.1865, 3.8692, 3.7919, 3.2807], + device='cuda:0'), covar=tensor([0.1044, 0.1352, 0.0331, 0.1915, 0.0839, 0.0204, 0.0333, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0267, 0.0225, 0.0264, 0.0236, 0.0209, 0.0248, 0.0279], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 02:18:01,213 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:04,437 INFO [train.py:901] (0/4) Epoch 6, batch 2600, loss[loss=0.2595, simple_loss=0.3338, pruned_loss=0.09264, over 8363.00 frames. ], tot_loss[loss=0.2826, simple_loss=0.3459, pruned_loss=0.1097, over 1619095.47 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:13,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.052e+02 3.779e+02 5.019e+02 1.784e+03, threshold=7.558e+02, percent-clipped=4.0 +2023-02-06 02:18:39,586 INFO [train.py:901] (0/4) Epoch 6, batch 2650, loss[loss=0.2607, simple_loss=0.332, pruned_loss=0.09469, over 8290.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3439, pruned_loss=0.1085, over 1614809.38 frames. ], batch size: 23, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:47,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9290, 1.9425, 2.4125, 1.7376, 0.9884, 2.2847, 0.3862, 1.1534], + device='cuda:0'), covar=tensor([0.3423, 0.4111, 0.0850, 0.3180, 0.7648, 0.0783, 0.5793, 0.2702], + device='cuda:0'), in_proj_covar=tensor([0.0133, 0.0131, 0.0081, 0.0179, 0.0217, 0.0082, 0.0141, 0.0133], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:19:05,101 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0206, 0.9230, 1.0097, 0.9780, 0.6986, 1.1037, 0.0363, 0.6237], + device='cuda:0'), covar=tensor([0.2764, 0.2234, 0.0948, 0.1892, 0.5193, 0.0749, 0.4521, 0.2616], + device='cuda:0'), in_proj_covar=tensor([0.0134, 0.0133, 0.0083, 0.0182, 0.0219, 0.0084, 0.0144, 0.0136], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:19:11,128 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:14,338 INFO [train.py:901] (0/4) Epoch 6, batch 2700, loss[loss=0.2761, simple_loss=0.3432, pruned_loss=0.1045, over 8541.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3421, pruned_loss=0.1076, over 1615302.11 frames. ], batch size: 39, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:19:15,193 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4635, 1.8339, 3.1043, 1.1276, 2.1951, 1.7641, 1.5253, 1.8672], + device='cuda:0'), covar=tensor([0.1597, 0.2008, 0.0678, 0.3515, 0.1476, 0.2644, 0.1531, 0.2276], + device='cuda:0'), in_proj_covar=tensor([0.0472, 0.0462, 0.0529, 0.0542, 0.0594, 0.0528, 0.0449, 0.0601], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 02:19:20,968 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:23,436 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.935e+02 3.532e+02 4.548e+02 1.003e+03, threshold=7.064e+02, percent-clipped=2.0 +2023-02-06 02:19:28,345 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:29,792 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 02:19:47,952 INFO [train.py:901] (0/4) Epoch 6, batch 2750, loss[loss=0.2474, simple_loss=0.3175, pruned_loss=0.08862, over 8354.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3408, pruned_loss=0.1064, over 1616808.14 frames. ], batch size: 24, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:19:57,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9309, 2.4721, 3.1015, 1.0556, 3.1539, 2.0049, 1.4369, 1.6874], + device='cuda:0'), covar=tensor([0.0340, 0.0164, 0.0127, 0.0303, 0.0186, 0.0331, 0.0354, 0.0222], + device='cuda:0'), in_proj_covar=tensor([0.0321, 0.0235, 0.0206, 0.0288, 0.0227, 0.0381, 0.0298, 0.0273], + device='cuda:0'), out_proj_covar=tensor([1.0839e-04, 7.7382e-05, 6.8384e-05, 9.5718e-05, 7.6279e-05, 1.3795e-04, + 1.0173e-04, 9.1606e-05], device='cuda:0') +2023-02-06 02:20:02,613 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6292, 2.3869, 4.3296, 1.2245, 3.0935, 2.0358, 1.9847, 2.3089], + device='cuda:0'), covar=tensor([0.1821, 0.2031, 0.0721, 0.3637, 0.1571, 0.2723, 0.1576, 0.2791], + device='cuda:0'), in_proj_covar=tensor([0.0469, 0.0460, 0.0526, 0.0535, 0.0589, 0.0523, 0.0444, 0.0590], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 02:20:22,673 INFO [train.py:901] (0/4) Epoch 6, batch 2800, loss[loss=0.2837, simple_loss=0.328, pruned_loss=0.1197, over 7533.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3405, pruned_loss=0.1057, over 1618529.87 frames. ], batch size: 18, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:20:26,177 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:32,056 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.696e+02 3.315e+02 4.271e+02 8.534e+02, threshold=6.630e+02, percent-clipped=4.0 +2023-02-06 02:20:39,129 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:40,378 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:55,847 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:56,925 INFO [train.py:901] (0/4) Epoch 6, batch 2850, loss[loss=0.2284, simple_loss=0.3193, pruned_loss=0.06875, over 8337.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3401, pruned_loss=0.1052, over 1621510.32 frames. ], batch size: 25, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:11,140 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6425, 1.8658, 1.4584, 2.3485, 1.0911, 1.2292, 1.5310, 1.9931], + device='cuda:0'), covar=tensor([0.1058, 0.1228, 0.1549, 0.0607, 0.1494, 0.2193, 0.1356, 0.0920], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0254, 0.0282, 0.0229, 0.0247, 0.0284, 0.0286, 0.0254], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 02:21:31,687 INFO [train.py:901] (0/4) Epoch 6, batch 2900, loss[loss=0.2787, simple_loss=0.3484, pruned_loss=0.1046, over 8357.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3403, pruned_loss=0.1052, over 1616712.09 frames. ], batch size: 24, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:41,574 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.996e+02 3.885e+02 4.976e+02 9.964e+02, threshold=7.771e+02, percent-clipped=9.0 +2023-02-06 02:21:46,036 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:00,458 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:01,688 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 02:22:07,135 INFO [train.py:901] (0/4) Epoch 6, batch 2950, loss[loss=0.2696, simple_loss=0.3362, pruned_loss=0.1015, over 8032.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3407, pruned_loss=0.1056, over 1615371.12 frames. ], batch size: 22, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:17,903 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:35,337 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:41,770 INFO [train.py:901] (0/4) Epoch 6, batch 3000, loss[loss=0.2497, simple_loss=0.3171, pruned_loss=0.09119, over 8074.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3407, pruned_loss=0.106, over 1609830.71 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:41,771 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 02:22:53,883 INFO [train.py:935] (0/4) Epoch 6, validation: loss=0.2158, simple_loss=0.3124, pruned_loss=0.05962, over 944034.00 frames. +2023-02-06 02:22:53,884 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 02:23:03,877 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.288e+02 4.080e+02 5.339e+02 1.082e+03, threshold=8.161e+02, percent-clipped=5.0 +2023-02-06 02:23:28,759 INFO [train.py:901] (0/4) Epoch 6, batch 3050, loss[loss=0.3791, simple_loss=0.4012, pruned_loss=0.1784, over 7107.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3414, pruned_loss=0.1059, over 1614774.46 frames. ], batch size: 71, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:03,332 INFO [train.py:901] (0/4) Epoch 6, batch 3100, loss[loss=0.2849, simple_loss=0.3492, pruned_loss=0.1103, over 7801.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3405, pruned_loss=0.1055, over 1611799.90 frames. ], batch size: 19, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:12,750 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.871e+02 3.509e+02 4.582e+02 1.148e+03, threshold=7.017e+02, percent-clipped=4.0 +2023-02-06 02:24:14,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:24:21,729 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4382, 1.8674, 2.0053, 1.0802, 2.1610, 1.2127, 0.5502, 1.7197], + device='cuda:0'), covar=tensor([0.0276, 0.0129, 0.0095, 0.0231, 0.0132, 0.0429, 0.0369, 0.0119], + device='cuda:0'), in_proj_covar=tensor([0.0327, 0.0242, 0.0208, 0.0297, 0.0237, 0.0390, 0.0303, 0.0277], + device='cuda:0'), out_proj_covar=tensor([1.1037e-04, 7.9632e-05, 6.8545e-05, 9.8509e-05, 7.9991e-05, 1.4146e-04, + 1.0310e-04, 9.2497e-05], device='cuda:0') +2023-02-06 02:24:38,265 INFO [train.py:901] (0/4) Epoch 6, batch 3150, loss[loss=0.3142, simple_loss=0.3739, pruned_loss=0.1273, over 7465.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3393, pruned_loss=0.1048, over 1607313.62 frames. ], batch size: 75, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:57,055 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:10,974 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:13,470 INFO [train.py:901] (0/4) Epoch 6, batch 3200, loss[loss=0.2916, simple_loss=0.3481, pruned_loss=0.1175, over 8528.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3416, pruned_loss=0.1062, over 1610452.38 frames. ], batch size: 49, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:14,372 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:23,586 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.828e+02 3.409e+02 4.222e+02 1.719e+03, threshold=6.818e+02, percent-clipped=4.0 +2023-02-06 02:25:28,577 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43637.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:38,332 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 02:25:49,155 INFO [train.py:901] (0/4) Epoch 6, batch 3250, loss[loss=0.336, simple_loss=0.3667, pruned_loss=0.1527, over 8096.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3406, pruned_loss=0.1055, over 1611179.78 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:49,999 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1771, 1.1575, 2.0276, 1.0739, 1.8823, 2.2433, 2.2241, 1.9142], + device='cuda:0'), covar=tensor([0.0889, 0.1111, 0.0537, 0.1631, 0.0657, 0.0358, 0.0494, 0.0686], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0272, 0.0225, 0.0264, 0.0239, 0.0212, 0.0250, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 02:26:23,412 INFO [train.py:901] (0/4) Epoch 6, batch 3300, loss[loss=0.2921, simple_loss=0.3505, pruned_loss=0.1168, over 6596.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3404, pruned_loss=0.1053, over 1613151.50 frames. ], batch size: 71, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:33,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.968e+02 3.670e+02 5.054e+02 9.057e+02, threshold=7.341e+02, percent-clipped=6.0 +2023-02-06 02:26:38,183 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 02:26:58,037 INFO [train.py:901] (0/4) Epoch 6, batch 3350, loss[loss=0.2681, simple_loss=0.345, pruned_loss=0.09556, over 8452.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3395, pruned_loss=0.1049, over 1612868.75 frames. ], batch size: 27, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:27:06,833 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3096, 2.3845, 1.6227, 1.9871, 1.9550, 1.2955, 1.7173, 1.9318], + device='cuda:0'), covar=tensor([0.1121, 0.0309, 0.0890, 0.0509, 0.0680, 0.1241, 0.0782, 0.0786], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0232, 0.0311, 0.0302, 0.0317, 0.0312, 0.0335, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:27:25,482 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:27:31,888 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-06 02:27:33,330 INFO [train.py:901] (0/4) Epoch 6, batch 3400, loss[loss=0.3146, simple_loss=0.3661, pruned_loss=0.1315, over 8236.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3406, pruned_loss=0.106, over 1615643.29 frames. ], batch size: 22, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:27:42,440 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.693e+02 3.397e+02 4.441e+02 9.371e+02, threshold=6.793e+02, percent-clipped=2.0 +2023-02-06 02:28:07,549 INFO [train.py:901] (0/4) Epoch 6, batch 3450, loss[loss=0.2935, simple_loss=0.3608, pruned_loss=0.1131, over 8467.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3404, pruned_loss=0.106, over 1614912.31 frames. ], batch size: 25, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:08,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9508, 1.4791, 3.3841, 1.4357, 2.4042, 3.7611, 3.8083, 3.1470], + device='cuda:0'), covar=tensor([0.1075, 0.1563, 0.0357, 0.1968, 0.0748, 0.0257, 0.0329, 0.0601], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0272, 0.0224, 0.0266, 0.0237, 0.0213, 0.0249, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 02:28:09,127 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2027, 2.1791, 1.5656, 1.9159, 1.7984, 1.2436, 1.5871, 1.7593], + device='cuda:0'), covar=tensor([0.1046, 0.0294, 0.0827, 0.0450, 0.0570, 0.1065, 0.0858, 0.0678], + device='cuda:0'), in_proj_covar=tensor([0.0342, 0.0233, 0.0307, 0.0302, 0.0311, 0.0309, 0.0333, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:28:14,344 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:17,160 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9438, 2.1962, 2.2850, 1.7176, 1.1166, 2.3415, 0.4302, 1.2748], + device='cuda:0'), covar=tensor([0.2904, 0.1779, 0.1167, 0.2263, 0.5401, 0.0945, 0.4638, 0.2373], + device='cuda:0'), in_proj_covar=tensor([0.0134, 0.0131, 0.0086, 0.0178, 0.0218, 0.0081, 0.0137, 0.0133], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:28:42,237 INFO [train.py:901] (0/4) Epoch 6, batch 3500, loss[loss=0.2999, simple_loss=0.3643, pruned_loss=0.1177, over 8453.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3398, pruned_loss=0.1052, over 1617154.22 frames. ], batch size: 27, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:50,513 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:52,405 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 3.111e+02 3.775e+02 4.956e+02 7.195e+02, threshold=7.550e+02, percent-clipped=1.0 +2023-02-06 02:28:56,821 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 02:28:59,182 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 02:29:16,407 INFO [train.py:901] (0/4) Epoch 6, batch 3550, loss[loss=0.2285, simple_loss=0.2955, pruned_loss=0.08077, over 7699.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.34, pruned_loss=0.1051, over 1616426.91 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:29:24,728 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:34,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:40,255 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-44000.pt +2023-02-06 02:29:52,632 INFO [train.py:901] (0/4) Epoch 6, batch 3600, loss[loss=0.3068, simple_loss=0.3589, pruned_loss=0.1274, over 8293.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3408, pruned_loss=0.106, over 1615581.02 frames. ], batch size: 23, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:30:02,263 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.983e+02 3.632e+02 4.470e+02 1.452e+03, threshold=7.265e+02, percent-clipped=1.0 +2023-02-06 02:30:27,003 INFO [train.py:901] (0/4) Epoch 6, batch 3650, loss[loss=0.3142, simple_loss=0.3732, pruned_loss=0.1276, over 8465.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3399, pruned_loss=0.1054, over 1612008.79 frames. ], batch size: 25, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:00,588 INFO [train.py:901] (0/4) Epoch 6, batch 3700, loss[loss=0.1887, simple_loss=0.2718, pruned_loss=0.05279, over 7438.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.339, pruned_loss=0.1044, over 1606344.73 frames. ], batch size: 17, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:01,281 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 02:31:11,218 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 3.019e+02 3.651e+02 4.413e+02 8.839e+02, threshold=7.303e+02, percent-clipped=3.0 +2023-02-06 02:31:23,982 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:31:35,822 INFO [train.py:901] (0/4) Epoch 6, batch 3750, loss[loss=0.2475, simple_loss=0.3053, pruned_loss=0.09483, over 7722.00 frames. ], tot_loss[loss=0.2749, simple_loss=0.3395, pruned_loss=0.1052, over 1608384.73 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:09,333 INFO [train.py:901] (0/4) Epoch 6, batch 3800, loss[loss=0.3018, simple_loss=0.3323, pruned_loss=0.1357, over 7786.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3397, pruned_loss=0.1056, over 1605988.65 frames. ], batch size: 19, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:19,582 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.008e+02 3.761e+02 4.930e+02 1.044e+03, threshold=7.521e+02, percent-clipped=7.0 +2023-02-06 02:32:32,410 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:37,978 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6738, 4.6327, 4.1217, 1.8709, 4.0986, 4.1082, 4.2738, 3.8730], + device='cuda:0'), covar=tensor([0.0688, 0.0595, 0.0966, 0.4783, 0.0731, 0.0941, 0.1407, 0.0764], + device='cuda:0'), in_proj_covar=tensor([0.0398, 0.0298, 0.0329, 0.0411, 0.0316, 0.0289, 0.0317, 0.0262], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:32:44,230 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:45,453 INFO [train.py:901] (0/4) Epoch 6, batch 3850, loss[loss=0.3492, simple_loss=0.4012, pruned_loss=0.1485, over 8557.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.3409, pruned_loss=0.1063, over 1607078.66 frames. ], batch size: 49, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:48,938 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:49,759 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:02,818 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 02:33:20,469 INFO [train.py:901] (0/4) Epoch 6, batch 3900, loss[loss=0.2656, simple_loss=0.3357, pruned_loss=0.09772, over 7935.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3413, pruned_loss=0.1063, over 1610011.87 frames. ], batch size: 20, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:33:23,928 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:30,570 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 2.909e+02 3.535e+02 4.398e+02 8.405e+02, threshold=7.069e+02, percent-clipped=2.0 +2023-02-06 02:33:30,740 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0982, 2.3986, 1.7132, 2.8827, 1.3871, 1.3937, 2.0385, 2.4279], + device='cuda:0'), covar=tensor([0.0987, 0.0892, 0.1532, 0.0445, 0.1459, 0.2049, 0.1251, 0.0873], + device='cuda:0'), in_proj_covar=tensor([0.0273, 0.0251, 0.0285, 0.0225, 0.0244, 0.0279, 0.0289, 0.0258], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 02:33:56,223 INFO [train.py:901] (0/4) Epoch 6, batch 3950, loss[loss=0.2314, simple_loss=0.2953, pruned_loss=0.08375, over 7701.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3408, pruned_loss=0.1059, over 1612411.39 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:09,756 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:34:17,206 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3087, 1.9495, 3.1678, 2.4278, 2.6979, 2.0215, 1.5555, 1.3869], + device='cuda:0'), covar=tensor([0.2222, 0.2469, 0.0555, 0.1447, 0.1147, 0.1324, 0.1243, 0.2666], + device='cuda:0'), in_proj_covar=tensor([0.0796, 0.0729, 0.0626, 0.0719, 0.0813, 0.0668, 0.0636, 0.0666], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:34:30,790 INFO [train.py:901] (0/4) Epoch 6, batch 4000, loss[loss=0.2361, simple_loss=0.3184, pruned_loss=0.07689, over 8247.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3411, pruned_loss=0.1057, over 1615017.92 frames. ], batch size: 24, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:40,323 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.014e+02 2.805e+02 3.702e+02 4.857e+02 8.487e+02, threshold=7.405e+02, percent-clipped=7.0 +2023-02-06 02:34:43,276 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4453, 1.2444, 1.3503, 1.1638, 0.8556, 1.1879, 1.1671, 1.0359], + device='cuda:0'), covar=tensor([0.0588, 0.1332, 0.1781, 0.1454, 0.0610, 0.1601, 0.0753, 0.0650], + device='cuda:0'), in_proj_covar=tensor([0.0125, 0.0173, 0.0214, 0.0178, 0.0122, 0.0182, 0.0137, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:34:44,595 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:05,725 INFO [train.py:901] (0/4) Epoch 6, batch 4050, loss[loss=0.2842, simple_loss=0.3623, pruned_loss=0.103, over 8539.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3406, pruned_loss=0.1056, over 1614442.80 frames. ], batch size: 28, lr: 1.27e-02, grad_scale: 16.0 +2023-02-06 02:35:06,074 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 02:35:06,869 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-06 02:35:41,190 INFO [train.py:901] (0/4) Epoch 6, batch 4100, loss[loss=0.2293, simple_loss=0.3002, pruned_loss=0.07923, over 8084.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3398, pruned_loss=0.1054, over 1612651.71 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:35:44,032 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:50,486 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.131e+02 3.987e+02 5.314e+02 1.327e+03, threshold=7.973e+02, percent-clipped=4.0 +2023-02-06 02:36:00,537 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:00,598 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:14,412 INFO [train.py:901] (0/4) Epoch 6, batch 4150, loss[loss=0.2621, simple_loss=0.3333, pruned_loss=0.09546, over 8297.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3393, pruned_loss=0.105, over 1612012.16 frames. ], batch size: 23, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:15,774 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:49,812 INFO [train.py:901] (0/4) Epoch 6, batch 4200, loss[loss=0.2643, simple_loss=0.3313, pruned_loss=0.09864, over 8247.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3377, pruned_loss=0.1039, over 1609759.40 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:58,994 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 2.806e+02 3.559e+02 4.787e+02 1.284e+03, threshold=7.119e+02, percent-clipped=4.0 +2023-02-06 02:37:05,653 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 02:37:07,964 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:23,784 INFO [train.py:901] (0/4) Epoch 6, batch 4250, loss[loss=0.3014, simple_loss=0.3612, pruned_loss=0.1208, over 8365.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3371, pruned_loss=0.103, over 1607110.89 frames. ], batch size: 24, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:37:24,678 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:29,276 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 02:37:41,577 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:58,483 INFO [train.py:901] (0/4) Epoch 6, batch 4300, loss[loss=0.2505, simple_loss=0.3202, pruned_loss=0.09039, over 7921.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3367, pruned_loss=0.1025, over 1606185.83 frames. ], batch size: 20, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:00,029 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:38:05,900 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 02:38:08,670 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.771e+02 3.321e+02 4.102e+02 9.930e+02, threshold=6.641e+02, percent-clipped=2.0 +2023-02-06 02:38:28,323 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 02:38:31,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8500, 3.8957, 2.4172, 2.6669, 3.2088, 1.9976, 2.5030, 3.1630], + device='cuda:0'), covar=tensor([0.1432, 0.0269, 0.0882, 0.0743, 0.0620, 0.1179, 0.0934, 0.0865], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0233, 0.0308, 0.0304, 0.0316, 0.0312, 0.0331, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:38:33,200 INFO [train.py:901] (0/4) Epoch 6, batch 4350, loss[loss=0.2643, simple_loss=0.3323, pruned_loss=0.0982, over 8078.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3375, pruned_loss=0.1041, over 1605811.37 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:00,034 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 02:39:06,572 INFO [train.py:901] (0/4) Epoch 6, batch 4400, loss[loss=0.2568, simple_loss=0.3163, pruned_loss=0.09866, over 6795.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3382, pruned_loss=0.1048, over 1604701.43 frames. ], batch size: 15, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:16,200 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1749, 1.8086, 2.9245, 2.3469, 2.4467, 1.8040, 1.3612, 1.1289], + device='cuda:0'), covar=tensor([0.2333, 0.2553, 0.0581, 0.1268, 0.1001, 0.1440, 0.1474, 0.2429], + device='cuda:0'), in_proj_covar=tensor([0.0783, 0.0724, 0.0619, 0.0716, 0.0806, 0.0658, 0.0629, 0.0657], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:39:17,273 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 3.434e+02 4.206e+02 5.183e+02 1.151e+03, threshold=8.413e+02, percent-clipped=11.0 +2023-02-06 02:39:40,219 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 02:39:42,260 INFO [train.py:901] (0/4) Epoch 6, batch 4450, loss[loss=0.2512, simple_loss=0.3098, pruned_loss=0.09634, over 7534.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3379, pruned_loss=0.1052, over 1600141.13 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:58,549 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:13,885 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:16,361 INFO [train.py:901] (0/4) Epoch 6, batch 4500, loss[loss=0.3435, simple_loss=0.3793, pruned_loss=0.1539, over 6619.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.339, pruned_loss=0.1058, over 1602132.24 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:40:26,436 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.100e+02 3.740e+02 5.266e+02 1.703e+03, threshold=7.479e+02, percent-clipped=4.0 +2023-02-06 02:40:31,783 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 02:40:44,433 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 02:40:52,034 INFO [train.py:901] (0/4) Epoch 6, batch 4550, loss[loss=0.3362, simple_loss=0.3842, pruned_loss=0.1441, over 8576.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3404, pruned_loss=0.1068, over 1602687.53 frames. ], batch size: 39, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:16,644 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2744, 1.3253, 4.5471, 1.7268, 3.8007, 3.7285, 4.0324, 3.8828], + device='cuda:0'), covar=tensor([0.0488, 0.3633, 0.0355, 0.2419, 0.1159, 0.0737, 0.0472, 0.0571], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0501, 0.0457, 0.0440, 0.0512, 0.0421, 0.0423, 0.0480], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 02:41:18,792 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:27,057 INFO [train.py:901] (0/4) Epoch 6, batch 4600, loss[loss=0.1941, simple_loss=0.2703, pruned_loss=0.05899, over 7438.00 frames. ], tot_loss[loss=0.2777, simple_loss=0.3411, pruned_loss=0.1071, over 1605287.37 frames. ], batch size: 17, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:34,812 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:36,654 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.977e+02 3.732e+02 4.434e+02 1.135e+03, threshold=7.465e+02, percent-clipped=1.0 +2023-02-06 02:42:02,750 INFO [train.py:901] (0/4) Epoch 6, batch 4650, loss[loss=0.3227, simple_loss=0.3815, pruned_loss=0.1319, over 8125.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3412, pruned_loss=0.1068, over 1606369.91 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:08,367 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:25,260 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:36,888 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 02:42:36,979 INFO [train.py:901] (0/4) Epoch 6, batch 4700, loss[loss=0.2263, simple_loss=0.3069, pruned_loss=0.07279, over 8091.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3399, pruned_loss=0.1055, over 1609163.82 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:46,395 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.041e+02 3.187e+02 3.833e+02 4.569e+02 1.251e+03, threshold=7.667e+02, percent-clipped=2.0 +2023-02-06 02:42:49,953 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 02:43:11,110 INFO [train.py:901] (0/4) Epoch 6, batch 4750, loss[loss=0.2993, simple_loss=0.3541, pruned_loss=0.1222, over 8148.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3409, pruned_loss=0.1064, over 1610131.85 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:30,409 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 02:43:31,798 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 02:43:45,096 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6437, 1.9420, 2.3025, 1.2881, 2.4718, 1.4752, 0.7486, 1.8958], + device='cuda:0'), covar=tensor([0.0297, 0.0140, 0.0104, 0.0212, 0.0120, 0.0415, 0.0338, 0.0111], + device='cuda:0'), in_proj_covar=tensor([0.0330, 0.0237, 0.0205, 0.0297, 0.0236, 0.0381, 0.0302, 0.0280], + device='cuda:0'), out_proj_covar=tensor([1.1020e-04, 7.6743e-05, 6.6699e-05, 9.7493e-05, 7.8530e-05, 1.3569e-04, + 1.0132e-04, 9.2449e-05], device='cuda:0') +2023-02-06 02:43:46,229 INFO [train.py:901] (0/4) Epoch 6, batch 4800, loss[loss=0.1996, simple_loss=0.2659, pruned_loss=0.06663, over 7691.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.342, pruned_loss=0.1072, over 1612904.21 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:50,544 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1774, 1.1960, 1.1921, 1.1342, 0.8506, 1.2346, 0.1009, 1.0448], + device='cuda:0'), covar=tensor([0.3323, 0.2161, 0.1009, 0.2118, 0.5674, 0.0902, 0.4811, 0.2017], + device='cuda:0'), in_proj_covar=tensor([0.0137, 0.0136, 0.0088, 0.0184, 0.0226, 0.0084, 0.0146, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:43:55,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 3.021e+02 3.501e+02 4.623e+02 8.497e+02, threshold=7.001e+02, percent-clipped=1.0 +2023-02-06 02:44:16,239 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:20,163 INFO [train.py:901] (0/4) Epoch 6, batch 4850, loss[loss=0.2341, simple_loss=0.2843, pruned_loss=0.09195, over 5957.00 frames. ], tot_loss[loss=0.2778, simple_loss=0.3418, pruned_loss=0.1069, over 1614193.04 frames. ], batch size: 13, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:44:20,857 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 02:44:22,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3746, 1.7338, 1.8680, 1.0743, 2.0438, 1.2740, 0.3945, 1.5940], + device='cuda:0'), covar=tensor([0.0249, 0.0141, 0.0111, 0.0187, 0.0113, 0.0422, 0.0364, 0.0115], + device='cuda:0'), in_proj_covar=tensor([0.0329, 0.0239, 0.0206, 0.0297, 0.0236, 0.0382, 0.0304, 0.0283], + device='cuda:0'), out_proj_covar=tensor([1.0989e-04, 7.7520e-05, 6.6989e-05, 9.7550e-05, 7.8243e-05, 1.3595e-04, + 1.0183e-04, 9.3360e-05], device='cuda:0') +2023-02-06 02:44:28,390 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2630, 1.7286, 1.6569, 1.8033, 1.7918, 1.7934, 2.4504, 1.7094], + device='cuda:0'), covar=tensor([0.0460, 0.1297, 0.1784, 0.1271, 0.0534, 0.1542, 0.0658, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0126, 0.0175, 0.0216, 0.0178, 0.0123, 0.0183, 0.0138, 0.0151], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:44:32,436 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:35,152 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45285.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:50,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:56,003 INFO [train.py:901] (0/4) Epoch 6, batch 4900, loss[loss=0.2694, simple_loss=0.3352, pruned_loss=0.1018, over 8232.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3413, pruned_loss=0.1066, over 1606783.56 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:05,436 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.896e+02 3.521e+02 4.501e+02 9.960e+02, threshold=7.042e+02, percent-clipped=7.0 +2023-02-06 02:45:13,764 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-06 02:45:30,238 INFO [train.py:901] (0/4) Epoch 6, batch 4950, loss[loss=0.2802, simple_loss=0.3375, pruned_loss=0.1114, over 7924.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3413, pruned_loss=0.1062, over 1609655.29 frames. ], batch size: 20, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:30,381 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:46:05,717 INFO [train.py:901] (0/4) Epoch 6, batch 5000, loss[loss=0.2889, simple_loss=0.3597, pruned_loss=0.1091, over 8354.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3416, pruned_loss=0.1066, over 1614451.08 frames. ], batch size: 24, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:07,204 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:08,576 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3390, 2.2648, 1.6723, 2.1644, 1.7280, 1.3887, 1.6624, 1.7869], + device='cuda:0'), covar=tensor([0.1048, 0.0284, 0.0873, 0.0381, 0.0723, 0.1202, 0.0715, 0.0608], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0228, 0.0305, 0.0297, 0.0314, 0.0311, 0.0329, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:46:15,101 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.255e+02 4.005e+02 4.887e+02 1.315e+03, threshold=8.009e+02, percent-clipped=7.0 +2023-02-06 02:46:24,042 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:40,013 INFO [train.py:901] (0/4) Epoch 6, batch 5050, loss[loss=0.2134, simple_loss=0.3045, pruned_loss=0.06114, over 8446.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3397, pruned_loss=0.1054, over 1612926.12 frames. ], batch size: 25, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:49,880 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 02:46:58,882 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 02:47:14,058 INFO [train.py:901] (0/4) Epoch 6, batch 5100, loss[loss=0.2544, simple_loss=0.3317, pruned_loss=0.0886, over 8481.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3404, pruned_loss=0.1055, over 1615791.85 frames. ], batch size: 25, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:47:24,712 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.842e+02 3.419e+02 4.219e+02 7.828e+02, threshold=6.837e+02, percent-clipped=0.0 +2023-02-06 02:47:26,958 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:43,433 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:49,305 INFO [train.py:901] (0/4) Epoch 6, batch 5150, loss[loss=0.317, simple_loss=0.3772, pruned_loss=0.1284, over 8327.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3398, pruned_loss=0.1051, over 1615610.54 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:23,748 INFO [train.py:901] (0/4) Epoch 6, batch 5200, loss[loss=0.2689, simple_loss=0.3366, pruned_loss=0.1006, over 8595.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3377, pruned_loss=0.1038, over 1611925.13 frames. ], batch size: 31, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:34,003 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.204e+02 4.015e+02 4.654e+02 8.708e+02, threshold=8.029e+02, percent-clipped=4.0 +2023-02-06 02:48:38,460 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7229, 1.9602, 2.2337, 1.6370, 1.1249, 2.2124, 0.5444, 1.3733], + device='cuda:0'), covar=tensor([0.2637, 0.1758, 0.0623, 0.2557, 0.5717, 0.0490, 0.4592, 0.2215], + device='cuda:0'), in_proj_covar=tensor([0.0138, 0.0137, 0.0088, 0.0184, 0.0228, 0.0083, 0.0148, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:48:57,683 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 02:48:59,759 INFO [train.py:901] (0/4) Epoch 6, batch 5250, loss[loss=0.224, simple_loss=0.3016, pruned_loss=0.07321, over 8145.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3383, pruned_loss=0.1045, over 1611501.00 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:30,199 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45710.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:49:34,102 INFO [train.py:901] (0/4) Epoch 6, batch 5300, loss[loss=0.3018, simple_loss=0.3624, pruned_loss=0.1206, over 8232.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3393, pruned_loss=0.1057, over 1607537.75 frames. ], batch size: 24, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:36,937 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:49:43,557 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.935e+02 3.437e+02 4.667e+02 1.283e+03, threshold=6.874e+02, percent-clipped=3.0 +2023-02-06 02:50:09,986 INFO [train.py:901] (0/4) Epoch 6, batch 5350, loss[loss=0.2919, simple_loss=0.36, pruned_loss=0.1119, over 8030.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3408, pruned_loss=0.106, over 1615833.63 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:25,436 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:27,326 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,756 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,775 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:43,910 INFO [train.py:901] (0/4) Epoch 6, batch 5400, loss[loss=0.307, simple_loss=0.3586, pruned_loss=0.1277, over 8343.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3415, pruned_loss=0.1061, over 1619171.06 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:49,971 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45825.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:50:53,700 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.842e+02 3.609e+02 4.644e+02 1.367e+03, threshold=7.218e+02, percent-clipped=2.0 +2023-02-06 02:50:59,021 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:00,972 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0159, 1.6646, 3.3382, 1.5024, 2.3198, 3.7987, 3.7311, 3.3311], + device='cuda:0'), covar=tensor([0.0991, 0.1293, 0.0399, 0.1781, 0.0757, 0.0254, 0.0385, 0.0535], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0266, 0.0229, 0.0267, 0.0236, 0.0216, 0.0251, 0.0276], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 02:51:17,302 INFO [train.py:901] (0/4) Epoch 6, batch 5450, loss[loss=0.2383, simple_loss=0.2971, pruned_loss=0.08977, over 7240.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3399, pruned_loss=0.1048, over 1616925.24 frames. ], batch size: 16, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:51:25,559 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:47,555 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 02:51:52,367 INFO [train.py:901] (0/4) Epoch 6, batch 5500, loss[loss=0.3354, simple_loss=0.3825, pruned_loss=0.1442, over 8538.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3402, pruned_loss=0.1049, over 1612231.88 frames. ], batch size: 31, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:52:03,083 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.821e+02 3.418e+02 4.385e+02 9.516e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 02:52:05,380 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4765, 1.8048, 3.3043, 1.1451, 2.3504, 1.8859, 1.4742, 2.0124], + device='cuda:0'), covar=tensor([0.1459, 0.1885, 0.0606, 0.3161, 0.1262, 0.2307, 0.1543, 0.1844], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0458, 0.0524, 0.0540, 0.0583, 0.0530, 0.0443, 0.0581], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 02:52:27,011 INFO [train.py:901] (0/4) Epoch 6, batch 5550, loss[loss=0.2341, simple_loss=0.2988, pruned_loss=0.08469, over 7927.00 frames. ], tot_loss[loss=0.274, simple_loss=0.3389, pruned_loss=0.1045, over 1609140.33 frames. ], batch size: 20, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:52:51,788 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-46000.pt +2023-02-06 02:53:03,509 INFO [train.py:901] (0/4) Epoch 6, batch 5600, loss[loss=0.2411, simple_loss=0.3184, pruned_loss=0.08196, over 8026.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3387, pruned_loss=0.1039, over 1612167.82 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:04,342 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3146, 2.0938, 3.8205, 2.0382, 2.3930, 4.3186, 4.2157, 3.8478], + device='cuda:0'), covar=tensor([0.0959, 0.1188, 0.0407, 0.1683, 0.0941, 0.0259, 0.0364, 0.0509], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0264, 0.0226, 0.0264, 0.0236, 0.0212, 0.0248, 0.0272], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 02:53:13,360 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.809e+02 3.495e+02 4.670e+02 1.291e+03, threshold=6.989e+02, percent-clipped=6.0 +2023-02-06 02:53:25,613 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3040, 2.1321, 1.6670, 1.9317, 1.8111, 1.3061, 1.6315, 1.7806], + device='cuda:0'), covar=tensor([0.1153, 0.0330, 0.0935, 0.0502, 0.0572, 0.1226, 0.0796, 0.0755], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0237, 0.0314, 0.0307, 0.0315, 0.0318, 0.0341, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 02:53:35,998 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46064.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:53:37,297 INFO [train.py:901] (0/4) Epoch 6, batch 5650, loss[loss=0.2829, simple_loss=0.3454, pruned_loss=0.1102, over 8611.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3382, pruned_loss=0.1032, over 1614021.93 frames. ], batch size: 34, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:47,403 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:53:51,162 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 02:54:04,722 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46106.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:54:12,521 INFO [train.py:901] (0/4) Epoch 6, batch 5700, loss[loss=0.2795, simple_loss=0.3418, pruned_loss=0.1086, over 8685.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3383, pruned_loss=0.1037, over 1612739.93 frames. ], batch size: 34, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:22,533 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.829e+02 3.489e+02 4.392e+02 1.030e+03, threshold=6.978e+02, percent-clipped=3.0 +2023-02-06 02:54:25,965 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:54:32,807 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7818, 2.2232, 3.9637, 2.9355, 3.1738, 2.3534, 1.7966, 1.4973], + device='cuda:0'), covar=tensor([0.2191, 0.2693, 0.0561, 0.1558, 0.1249, 0.1189, 0.1117, 0.2923], + device='cuda:0'), in_proj_covar=tensor([0.0800, 0.0736, 0.0626, 0.0725, 0.0830, 0.0676, 0.0640, 0.0676], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:54:46,332 INFO [train.py:901] (0/4) Epoch 6, batch 5750, loss[loss=0.2719, simple_loss=0.3342, pruned_loss=0.1048, over 8281.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.338, pruned_loss=0.1037, over 1613010.64 frames. ], batch size: 23, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:50,064 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 02:54:53,644 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 02:54:55,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:04,383 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-06 02:55:21,188 INFO [train.py:901] (0/4) Epoch 6, batch 5800, loss[loss=0.374, simple_loss=0.4203, pruned_loss=0.1639, over 7049.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3372, pruned_loss=0.1027, over 1613782.50 frames. ], batch size: 72, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:55:24,805 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:32,618 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.949e+02 3.358e+02 4.338e+02 9.471e+02, threshold=6.717e+02, percent-clipped=1.0 +2023-02-06 02:55:45,808 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:55,778 INFO [train.py:901] (0/4) Epoch 6, batch 5850, loss[loss=0.2316, simple_loss=0.2944, pruned_loss=0.08441, over 7780.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3374, pruned_loss=0.1026, over 1612204.49 frames. ], batch size: 19, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:14,970 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:56:29,466 INFO [train.py:901] (0/4) Epoch 6, batch 5900, loss[loss=0.3047, simple_loss=0.362, pruned_loss=0.1237, over 8365.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3359, pruned_loss=0.1021, over 1609434.37 frames. ], batch size: 49, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:39,473 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 3.022e+02 3.849e+02 5.141e+02 8.536e+02, threshold=7.697e+02, percent-clipped=7.0 +2023-02-06 02:56:43,676 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:57:04,654 INFO [train.py:901] (0/4) Epoch 6, batch 5950, loss[loss=0.1972, simple_loss=0.2684, pruned_loss=0.063, over 7438.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3353, pruned_loss=0.1014, over 1611971.31 frames. ], batch size: 17, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,429 INFO [train.py:901] (0/4) Epoch 6, batch 6000, loss[loss=0.2308, simple_loss=0.3049, pruned_loss=0.07837, over 8074.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3351, pruned_loss=0.1017, over 1611500.87 frames. ], batch size: 21, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,430 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 02:57:50,763 INFO [train.py:935] (0/4) Epoch 6, validation: loss=0.2127, simple_loss=0.3094, pruned_loss=0.05799, over 944034.00 frames. +2023-02-06 02:57:50,764 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 02:57:55,753 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2931, 4.3170, 3.8438, 1.9515, 3.7462, 3.9235, 4.0652, 3.3457], + device='cuda:0'), covar=tensor([0.0929, 0.0610, 0.1127, 0.5007, 0.0828, 0.0744, 0.1284, 0.0887], + device='cuda:0'), in_proj_covar=tensor([0.0414, 0.0313, 0.0341, 0.0431, 0.0332, 0.0305, 0.0327, 0.0274], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 02:58:01,257 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.683e+02 3.226e+02 4.100e+02 1.140e+03, threshold=6.453e+02, percent-clipped=1.0 +2023-02-06 02:58:04,332 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:21,754 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:25,654 INFO [train.py:901] (0/4) Epoch 6, batch 6050, loss[loss=0.2144, simple_loss=0.299, pruned_loss=0.06489, over 8234.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3342, pruned_loss=0.1009, over 1607945.22 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:58:56,091 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:02,111 INFO [train.py:901] (0/4) Epoch 6, batch 6100, loss[loss=0.2924, simple_loss=0.3403, pruned_loss=0.1222, over 7534.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3351, pruned_loss=0.1017, over 1606521.38 frames. ], batch size: 18, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:12,641 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 3.046e+02 3.657e+02 4.398e+02 9.620e+02, threshold=7.315e+02, percent-clipped=4.0 +2023-02-06 02:59:13,543 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:17,679 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4796, 1.4411, 1.5814, 1.3226, 1.1755, 1.4056, 1.7626, 1.5459], + device='cuda:0'), covar=tensor([0.0490, 0.1254, 0.1679, 0.1369, 0.0604, 0.1488, 0.0694, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0122, 0.0171, 0.0211, 0.0173, 0.0121, 0.0180, 0.0134, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 02:59:24,580 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 02:59:37,759 INFO [train.py:901] (0/4) Epoch 6, batch 6150, loss[loss=0.3216, simple_loss=0.3679, pruned_loss=0.1377, over 8339.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.336, pruned_loss=0.1023, over 1608445.60 frames. ], batch size: 26, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:43,344 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7742, 1.7185, 5.8813, 2.0591, 5.2638, 4.8781, 5.4610, 5.3367], + device='cuda:0'), covar=tensor([0.0492, 0.3819, 0.0311, 0.2589, 0.0945, 0.0641, 0.0421, 0.0497], + device='cuda:0'), in_proj_covar=tensor([0.0366, 0.0517, 0.0468, 0.0445, 0.0518, 0.0428, 0.0437, 0.0481], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 02:59:56,241 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:11,856 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:13,815 INFO [train.py:901] (0/4) Epoch 6, batch 6200, loss[loss=0.2753, simple_loss=0.337, pruned_loss=0.1068, over 8258.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3383, pruned_loss=0.1036, over 1612940.81 frames. ], batch size: 24, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:00:14,710 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:16,761 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:24,151 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.053e+02 3.051e+02 3.861e+02 4.926e+02 1.016e+03, threshold=7.722e+02, percent-clipped=3.0 +2023-02-06 03:00:28,935 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:48,690 INFO [train.py:901] (0/4) Epoch 6, batch 6250, loss[loss=0.2089, simple_loss=0.2788, pruned_loss=0.06946, over 7524.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.3361, pruned_loss=0.1024, over 1609402.09 frames. ], batch size: 18, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:08,350 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6979, 1.9951, 1.5969, 2.4730, 1.1712, 1.2381, 1.6623, 2.0660], + device='cuda:0'), covar=tensor([0.1117, 0.1131, 0.1556, 0.0683, 0.1480, 0.1987, 0.1386, 0.1001], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0251, 0.0282, 0.0228, 0.0248, 0.0278, 0.0283, 0.0248], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 03:01:22,870 INFO [train.py:901] (0/4) Epoch 6, batch 6300, loss[loss=0.2819, simple_loss=0.3462, pruned_loss=0.1087, over 8335.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3377, pruned_loss=0.1029, over 1612958.78 frames. ], batch size: 26, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:34,428 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.734e+02 3.399e+02 4.377e+02 1.449e+03, threshold=6.797e+02, percent-clipped=4.0 +2023-02-06 03:01:49,390 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:01:57,778 INFO [train.py:901] (0/4) Epoch 6, batch 6350, loss[loss=0.2701, simple_loss=0.3517, pruned_loss=0.09423, over 8360.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3372, pruned_loss=0.1026, over 1615053.54 frames. ], batch size: 24, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:32,254 INFO [train.py:901] (0/4) Epoch 6, batch 6400, loss[loss=0.2995, simple_loss=0.3635, pruned_loss=0.1178, over 8656.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3386, pruned_loss=0.1037, over 1614250.61 frames. ], batch size: 34, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:41,187 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:02:43,100 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.744e+02 3.578e+02 4.396e+02 9.504e+02, threshold=7.157e+02, percent-clipped=5.0 +2023-02-06 03:03:07,338 INFO [train.py:901] (0/4) Epoch 6, batch 6450, loss[loss=0.325, simple_loss=0.3678, pruned_loss=0.1411, over 8478.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3396, pruned_loss=0.1039, over 1618391.63 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:09,170 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 03:03:41,568 INFO [train.py:901] (0/4) Epoch 6, batch 6500, loss[loss=0.2561, simple_loss=0.333, pruned_loss=0.08965, over 8100.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3397, pruned_loss=0.1047, over 1618794.99 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:51,600 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.994e+02 3.001e+02 3.759e+02 4.377e+02 1.086e+03, threshold=7.517e+02, percent-clipped=1.0 +2023-02-06 03:04:09,436 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:14,546 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:15,766 INFO [train.py:901] (0/4) Epoch 6, batch 6550, loss[loss=0.3429, simple_loss=0.3776, pruned_loss=0.1541, over 7172.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3413, pruned_loss=0.1056, over 1617123.89 frames. ], batch size: 72, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:37,514 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 03:04:45,791 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:50,972 INFO [train.py:901] (0/4) Epoch 6, batch 6600, loss[loss=0.2392, simple_loss=0.3243, pruned_loss=0.07703, over 8107.00 frames. ], tot_loss[loss=0.2749, simple_loss=0.3403, pruned_loss=0.1048, over 1614722.06 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:56,464 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 03:05:01,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.927e+02 3.687e+02 4.772e+02 1.123e+03, threshold=7.374e+02, percent-clipped=4.0 +2023-02-06 03:05:03,290 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:14,290 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 03:05:15,514 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 03:05:25,532 INFO [train.py:901] (0/4) Epoch 6, batch 6650, loss[loss=0.2881, simple_loss=0.3469, pruned_loss=0.1146, over 7967.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.3407, pruned_loss=0.1054, over 1613463.26 frames. ], batch size: 21, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:05:30,595 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:35,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:45,855 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 03:06:00,869 INFO [train.py:901] (0/4) Epoch 6, batch 6700, loss[loss=0.2561, simple_loss=0.3247, pruned_loss=0.0937, over 8098.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3406, pruned_loss=0.1053, over 1616493.70 frames. ], batch size: 21, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:12,492 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.777e+02 3.640e+02 4.922e+02 1.093e+03, threshold=7.281e+02, percent-clipped=6.0 +2023-02-06 03:06:34,847 INFO [train.py:901] (0/4) Epoch 6, batch 6750, loss[loss=0.2407, simple_loss=0.3288, pruned_loss=0.07627, over 7825.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.3398, pruned_loss=0.1047, over 1617070.06 frames. ], batch size: 20, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:38,953 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:10,701 INFO [train.py:901] (0/4) Epoch 6, batch 6800, loss[loss=0.2781, simple_loss=0.3484, pruned_loss=0.1039, over 8102.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3389, pruned_loss=0.1043, over 1616068.64 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:12,684 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 03:07:21,197 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.942e+02 3.591e+02 4.804e+02 1.528e+03, threshold=7.182e+02, percent-clipped=7.0 +2023-02-06 03:07:40,021 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:44,574 INFO [train.py:901] (0/4) Epoch 6, batch 6850, loss[loss=0.2895, simple_loss=0.3324, pruned_loss=0.1233, over 7658.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3397, pruned_loss=0.1048, over 1615678.04 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:49,205 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-06 03:07:58,968 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:00,934 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 03:08:19,247 INFO [train.py:901] (0/4) Epoch 6, batch 6900, loss[loss=0.232, simple_loss=0.2992, pruned_loss=0.08242, over 7431.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3409, pruned_loss=0.1061, over 1613408.59 frames. ], batch size: 17, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:08:28,203 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:30,614 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 2.873e+02 3.537e+02 4.379e+02 9.664e+02, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 03:08:32,864 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:44,849 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:49,769 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:54,249 INFO [train.py:901] (0/4) Epoch 6, batch 6950, loss[loss=0.2541, simple_loss=0.3294, pruned_loss=0.08938, over 8087.00 frames. ], tot_loss[loss=0.2761, simple_loss=0.3408, pruned_loss=0.1057, over 1613600.15 frames. ], batch size: 21, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:00,533 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3616, 1.7550, 1.7012, 0.9878, 1.7448, 1.3360, 0.3077, 1.5881], + device='cuda:0'), covar=tensor([0.0184, 0.0131, 0.0125, 0.0162, 0.0153, 0.0397, 0.0326, 0.0097], + device='cuda:0'), in_proj_covar=tensor([0.0333, 0.0250, 0.0207, 0.0300, 0.0243, 0.0390, 0.0309, 0.0285], + device='cuda:0'), out_proj_covar=tensor([1.0959e-04, 8.0680e-05, 6.5955e-05, 9.6708e-05, 7.9365e-05, 1.3656e-04, + 1.0196e-04, 9.2720e-05], device='cuda:0') +2023-02-06 03:09:09,773 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 03:09:15,140 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:09:28,488 INFO [train.py:901] (0/4) Epoch 6, batch 7000, loss[loss=0.2677, simple_loss=0.3431, pruned_loss=0.0961, over 8109.00 frames. ], tot_loss[loss=0.2734, simple_loss=0.3386, pruned_loss=0.1041, over 1615698.67 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:39,928 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.860e+02 2.784e+02 3.553e+02 4.437e+02 1.281e+03, threshold=7.106e+02, percent-clipped=4.0 +2023-02-06 03:10:03,570 INFO [train.py:901] (0/4) Epoch 6, batch 7050, loss[loss=0.2502, simple_loss=0.3283, pruned_loss=0.08607, over 8447.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3381, pruned_loss=0.1036, over 1618056.65 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:10:37,637 INFO [train.py:901] (0/4) Epoch 6, batch 7100, loss[loss=0.2708, simple_loss=0.3462, pruned_loss=0.09772, over 8461.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3383, pruned_loss=0.1034, over 1619802.39 frames. ], batch size: 27, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:10:48,827 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.154e+02 3.207e+02 3.842e+02 5.073e+02 1.424e+03, threshold=7.684e+02, percent-clipped=2.0 +2023-02-06 03:10:56,276 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:12,596 INFO [train.py:901] (0/4) Epoch 6, batch 7150, loss[loss=0.2067, simple_loss=0.3015, pruned_loss=0.05596, over 8315.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3383, pruned_loss=0.1032, over 1621530.55 frames. ], batch size: 25, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:14,075 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:22,813 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.22 vs. limit=5.0 +2023-02-06 03:11:37,932 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:46,767 INFO [train.py:901] (0/4) Epoch 6, batch 7200, loss[loss=0.2746, simple_loss=0.3394, pruned_loss=0.1048, over 8080.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3387, pruned_loss=0.1033, over 1621992.54 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:49,876 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.50 vs. limit=5.0 +2023-02-06 03:11:57,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.983e+02 3.737e+02 4.630e+02 8.445e+02, threshold=7.473e+02, percent-clipped=4.0 +2023-02-06 03:12:22,017 INFO [train.py:901] (0/4) Epoch 6, batch 7250, loss[loss=0.2599, simple_loss=0.3393, pruned_loss=0.09022, over 8504.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3382, pruned_loss=0.1023, over 1623778.66 frames. ], batch size: 28, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:56,472 INFO [train.py:901] (0/4) Epoch 6, batch 7300, loss[loss=0.2376, simple_loss=0.3053, pruned_loss=0.08489, over 8086.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3387, pruned_loss=0.1029, over 1622126.58 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:57,892 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:07,200 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.071e+02 3.696e+02 4.839e+02 1.031e+03, threshold=7.393e+02, percent-clipped=2.0 +2023-02-06 03:13:13,254 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:23,397 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:30,044 INFO [train.py:901] (0/4) Epoch 6, batch 7350, loss[loss=0.2672, simple_loss=0.3354, pruned_loss=0.0995, over 7808.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3384, pruned_loss=0.1027, over 1619024.09 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:13:48,783 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 03:13:59,269 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:05,860 INFO [train.py:901] (0/4) Epoch 6, batch 7400, loss[loss=0.2295, simple_loss=0.3054, pruned_loss=0.0768, over 7807.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.339, pruned_loss=0.1034, over 1618648.16 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:08,028 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 03:14:13,529 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47827.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:14:17,407 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 3.124e+02 3.904e+02 4.877e+02 9.892e+02, threshold=7.808e+02, percent-clipped=5.0 +2023-02-06 03:14:33,710 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:40,236 INFO [train.py:901] (0/4) Epoch 6, batch 7450, loss[loss=0.2253, simple_loss=0.2922, pruned_loss=0.07917, over 7189.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3379, pruned_loss=0.1032, over 1612440.01 frames. ], batch size: 16, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:46,244 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 03:15:15,310 INFO [train.py:901] (0/4) Epoch 6, batch 7500, loss[loss=0.2541, simple_loss=0.3403, pruned_loss=0.08389, over 8106.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3372, pruned_loss=0.1031, over 1610517.88 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:25,968 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.098e+02 3.102e+02 3.706e+02 4.699e+02 1.511e+03, threshold=7.412e+02, percent-clipped=9.0 +2023-02-06 03:15:49,279 INFO [train.py:901] (0/4) Epoch 6, batch 7550, loss[loss=0.2665, simple_loss=0.3383, pruned_loss=0.0973, over 8473.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3369, pruned_loss=0.1025, over 1610457.69 frames. ], batch size: 27, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:54,919 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:11,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:12,443 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-48000.pt +2023-02-06 03:16:24,750 INFO [train.py:901] (0/4) Epoch 6, batch 7600, loss[loss=0.3191, simple_loss=0.3718, pruned_loss=0.1332, over 7291.00 frames. ], tot_loss[loss=0.2729, simple_loss=0.3383, pruned_loss=0.1037, over 1613651.97 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:16:32,919 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.95 vs. limit=5.0 +2023-02-06 03:16:37,192 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.897e+02 3.536e+02 4.611e+02 2.294e+03, threshold=7.072e+02, percent-clipped=5.0 +2023-02-06 03:17:01,515 INFO [train.py:901] (0/4) Epoch 6, batch 7650, loss[loss=0.2396, simple_loss=0.3142, pruned_loss=0.08254, over 8129.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3381, pruned_loss=0.1036, over 1612781.74 frames. ], batch size: 22, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:17,561 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48090.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:24,159 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:32,445 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:34,944 INFO [train.py:901] (0/4) Epoch 6, batch 7700, loss[loss=0.3035, simple_loss=0.3618, pruned_loss=0.1226, over 8384.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3365, pruned_loss=0.103, over 1606514.98 frames. ], batch size: 49, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:46,047 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.821e+02 3.617e+02 4.667e+02 9.808e+02, threshold=7.234e+02, percent-clipped=3.0 +2023-02-06 03:17:50,869 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:57,332 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 03:17:59,321 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:10,114 INFO [train.py:901] (0/4) Epoch 6, batch 7750, loss[loss=0.2668, simple_loss=0.3268, pruned_loss=0.1034, over 7803.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3364, pruned_loss=0.1029, over 1606680.93 frames. ], batch size: 19, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:13,410 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:18:43,285 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:43,803 INFO [train.py:901] (0/4) Epoch 6, batch 7800, loss[loss=0.2958, simple_loss=0.3613, pruned_loss=0.1152, over 8786.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3371, pruned_loss=0.1032, over 1609728.45 frames. ], batch size: 30, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:53,435 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48230.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:18:54,592 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 3.053e+02 3.731e+02 4.789e+02 1.133e+03, threshold=7.462e+02, percent-clipped=3.0 +2023-02-06 03:19:05,483 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3707, 2.6810, 1.6154, 2.0742, 2.0730, 1.3962, 1.8389, 2.1850], + device='cuda:0'), covar=tensor([0.1248, 0.0351, 0.1148, 0.0625, 0.0714, 0.1316, 0.1040, 0.0693], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0229, 0.0314, 0.0301, 0.0312, 0.0314, 0.0338, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 03:19:10,040 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5959, 2.3908, 4.2812, 1.2125, 2.6374, 1.7911, 1.8581, 2.1960], + device='cuda:0'), covar=tensor([0.1696, 0.2038, 0.0780, 0.3704, 0.1787, 0.2978, 0.1622, 0.3014], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0466, 0.0528, 0.0547, 0.0592, 0.0530, 0.0447, 0.0588], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:19:16,568 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:19:17,081 INFO [train.py:901] (0/4) Epoch 6, batch 7850, loss[loss=0.2822, simple_loss=0.349, pruned_loss=0.1078, over 8735.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.337, pruned_loss=0.1033, over 1608638.08 frames. ], batch size: 30, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:19:30,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48286.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:19:35,338 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2935, 1.6530, 1.6407, 1.4488, 1.0501, 1.3908, 1.6776, 1.7592], + device='cuda:0'), covar=tensor([0.0486, 0.1130, 0.1653, 0.1266, 0.0604, 0.1446, 0.0709, 0.0534], + device='cuda:0'), in_proj_covar=tensor([0.0119, 0.0166, 0.0208, 0.0169, 0.0118, 0.0174, 0.0129, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 03:19:51,006 INFO [train.py:901] (0/4) Epoch 6, batch 7900, loss[loss=0.2904, simple_loss=0.3555, pruned_loss=0.1126, over 8522.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3365, pruned_loss=0.1028, over 1611999.03 frames. ], batch size: 26, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:01,872 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.927e+02 3.494e+02 4.326e+02 7.205e+02, threshold=6.988e+02, percent-clipped=0.0 +2023-02-06 03:20:09,333 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:10,797 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6641, 2.0439, 2.2664, 0.9644, 2.3874, 1.6441, 0.7134, 1.8950], + device='cuda:0'), covar=tensor([0.0312, 0.0147, 0.0137, 0.0290, 0.0160, 0.0367, 0.0421, 0.0152], + device='cuda:0'), in_proj_covar=tensor([0.0339, 0.0248, 0.0202, 0.0302, 0.0240, 0.0388, 0.0314, 0.0288], + device='cuda:0'), out_proj_covar=tensor([1.1131e-04, 7.9301e-05, 6.3516e-05, 9.6137e-05, 7.7835e-05, 1.3544e-04, + 1.0276e-04, 9.3068e-05], device='cuda:0') +2023-02-06 03:20:25,105 INFO [train.py:901] (0/4) Epoch 6, batch 7950, loss[loss=0.225, simple_loss=0.2956, pruned_loss=0.07721, over 7814.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3373, pruned_loss=0.1031, over 1612877.20 frames. ], batch size: 20, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:55,091 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6213, 4.6592, 4.1135, 1.6717, 4.0020, 4.1267, 4.2356, 3.7302], + device='cuda:0'), covar=tensor([0.0819, 0.0604, 0.1176, 0.5280, 0.0846, 0.0728, 0.1321, 0.0812], + device='cuda:0'), in_proj_covar=tensor([0.0397, 0.0312, 0.0330, 0.0413, 0.0322, 0.0301, 0.0312, 0.0265], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:20:58,700 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:59,246 INFO [train.py:901] (0/4) Epoch 6, batch 8000, loss[loss=0.297, simple_loss=0.3396, pruned_loss=0.1272, over 7254.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3377, pruned_loss=0.1037, over 1609146.94 frames. ], batch size: 16, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:10,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.174e+02 2.873e+02 3.488e+02 4.217e+02 8.104e+02, threshold=6.977e+02, percent-clipped=2.0 +2023-02-06 03:21:11,761 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:16,739 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:22,407 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 03:21:33,916 INFO [train.py:901] (0/4) Epoch 6, batch 8050, loss[loss=0.2376, simple_loss=0.3024, pruned_loss=0.08641, over 7538.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3353, pruned_loss=0.1032, over 1585261.79 frames. ], batch size: 18, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:37,653 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:54,558 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48496.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:56,889 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-6.pt +2023-02-06 03:22:07,495 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 03:22:10,944 INFO [train.py:901] (0/4) Epoch 7, batch 0, loss[loss=0.239, simple_loss=0.3063, pruned_loss=0.08582, over 7809.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3063, pruned_loss=0.08582, over 7809.00 frames. ], batch size: 19, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:10,945 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 03:22:22,759 INFO [train.py:935] (0/4) Epoch 7, validation: loss=0.2113, simple_loss=0.3091, pruned_loss=0.05678, over 944034.00 frames. +2023-02-06 03:22:22,760 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 03:22:37,619 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:38,082 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 03:22:41,832 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 03:22:45,433 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.727e+02 3.570e+02 4.321e+02 1.428e+03, threshold=7.140e+02, percent-clipped=5.0 +2023-02-06 03:22:46,419 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5294, 2.3259, 4.5193, 1.2988, 2.9451, 2.1243, 1.6920, 2.8667], + device='cuda:0'), covar=tensor([0.1516, 0.1785, 0.0564, 0.3259, 0.1461, 0.2365, 0.1481, 0.1959], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0474, 0.0530, 0.0554, 0.0595, 0.0538, 0.0452, 0.0596], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:22:53,309 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48542.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:22:55,835 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:57,635 INFO [train.py:901] (0/4) Epoch 7, batch 50, loss[loss=0.3093, simple_loss=0.3709, pruned_loss=0.1238, over 8325.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3354, pruned_loss=0.1005, over 365673.56 frames. ], batch size: 26, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:57,793 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:23:09,795 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48567.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:23:12,925 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 03:23:14,279 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48574.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:23:22,854 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4835, 1.3626, 4.3203, 2.1321, 2.4082, 4.9274, 4.8792, 4.3951], + device='cuda:0'), covar=tensor([0.0998, 0.1465, 0.0227, 0.1710, 0.0888, 0.0228, 0.0320, 0.0553], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0273, 0.0227, 0.0269, 0.0236, 0.0211, 0.0264, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:23:25,142 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-06 03:23:31,387 INFO [train.py:901] (0/4) Epoch 7, batch 100, loss[loss=0.2491, simple_loss=0.3244, pruned_loss=0.08685, over 7932.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.3373, pruned_loss=0.1018, over 644102.63 frames. ], batch size: 20, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:23:34,997 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 03:23:54,579 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 2.936e+02 3.434e+02 4.642e+02 8.961e+02, threshold=6.868e+02, percent-clipped=3.0 +2023-02-06 03:24:06,736 INFO [train.py:901] (0/4) Epoch 7, batch 150, loss[loss=0.2529, simple_loss=0.3264, pruned_loss=0.08969, over 8653.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3393, pruned_loss=0.104, over 860109.51 frames. ], batch size: 34, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:24:08,457 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.65 vs. limit=5.0 +2023-02-06 03:24:09,542 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0897, 2.5829, 3.4182, 0.9914, 3.2929, 2.1220, 1.5824, 2.0057], + device='cuda:0'), covar=tensor([0.0359, 0.0167, 0.0106, 0.0349, 0.0193, 0.0361, 0.0394, 0.0209], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0251, 0.0206, 0.0306, 0.0240, 0.0390, 0.0315, 0.0289], + device='cuda:0'), out_proj_covar=tensor([1.1297e-04, 8.0335e-05, 6.4819e-05, 9.7474e-05, 7.7261e-05, 1.3538e-04, + 1.0264e-04, 9.3195e-05], device='cuda:0') +2023-02-06 03:24:31,946 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:24:34,067 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48689.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:24:40,629 INFO [train.py:901] (0/4) Epoch 7, batch 200, loss[loss=0.2925, simple_loss=0.3468, pruned_loss=0.1191, over 8139.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3382, pruned_loss=0.103, over 1026522.52 frames. ], batch size: 22, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:03,500 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.629e+02 3.306e+02 4.274e+02 1.004e+03, threshold=6.612e+02, percent-clipped=3.0 +2023-02-06 03:25:15,507 INFO [train.py:901] (0/4) Epoch 7, batch 250, loss[loss=0.2814, simple_loss=0.3586, pruned_loss=0.1021, over 8499.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3372, pruned_loss=0.1023, over 1156735.90 frames. ], batch size: 26, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:21,474 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2883, 1.1909, 2.2593, 1.1588, 2.0086, 2.4215, 2.4884, 2.0547], + device='cuda:0'), covar=tensor([0.0960, 0.1184, 0.0497, 0.1842, 0.0623, 0.0392, 0.0509, 0.0797], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0271, 0.0226, 0.0267, 0.0235, 0.0211, 0.0263, 0.0277], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:25:22,702 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:26,760 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 03:25:35,604 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 03:25:41,107 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:50,547 INFO [train.py:901] (0/4) Epoch 7, batch 300, loss[loss=0.2415, simple_loss=0.3284, pruned_loss=0.07728, over 8466.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3381, pruned_loss=0.1027, over 1259952.67 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:52,221 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:54,978 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:12,367 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:13,533 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.973e+02 3.476e+02 4.340e+02 1.124e+03, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 03:26:18,388 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:25,097 INFO [train.py:901] (0/4) Epoch 7, batch 350, loss[loss=0.2518, simple_loss=0.3035, pruned_loss=0.1001, over 7440.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3368, pruned_loss=0.1017, over 1337161.43 frames. ], batch size: 17, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:26:30,603 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:43,218 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:00,371 INFO [train.py:901] (0/4) Epoch 7, batch 400, loss[loss=0.2512, simple_loss=0.3187, pruned_loss=0.09185, over 7796.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3353, pruned_loss=0.1008, over 1397314.46 frames. ], batch size: 19, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:01,276 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:09,181 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5519, 1.3910, 2.8455, 1.1690, 2.0443, 3.0587, 2.9937, 2.6734], + device='cuda:0'), covar=tensor([0.1000, 0.1337, 0.0417, 0.1961, 0.0767, 0.0283, 0.0487, 0.0636], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0275, 0.0227, 0.0270, 0.0236, 0.0211, 0.0264, 0.0279], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:27:22,458 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.855e+02 2.734e+02 3.619e+02 4.506e+02 1.679e+03, threshold=7.237e+02, percent-clipped=8.0 +2023-02-06 03:27:32,152 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48945.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:27:34,584 INFO [train.py:901] (0/4) Epoch 7, batch 450, loss[loss=0.2992, simple_loss=0.3714, pruned_loss=0.1135, over 8105.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3366, pruned_loss=0.1018, over 1444237.09 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:49,638 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48970.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:28:10,027 INFO [train.py:901] (0/4) Epoch 7, batch 500, loss[loss=0.3104, simple_loss=0.3667, pruned_loss=0.1271, over 8345.00 frames. ], tot_loss[loss=0.2694, simple_loss=0.3361, pruned_loss=0.1013, over 1481673.06 frames. ], batch size: 24, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:28:32,340 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.572e+02 3.184e+02 4.227e+02 8.649e+02, threshold=6.369e+02, percent-clipped=1.0 +2023-02-06 03:28:33,279 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4225, 1.8454, 3.1405, 1.1727, 2.1679, 1.8434, 1.4285, 1.9612], + device='cuda:0'), covar=tensor([0.1581, 0.1822, 0.0633, 0.3359, 0.1417, 0.2558, 0.1664, 0.2136], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0473, 0.0526, 0.0547, 0.0590, 0.0532, 0.0454, 0.0593], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:28:43,901 INFO [train.py:901] (0/4) Epoch 7, batch 550, loss[loss=0.2648, simple_loss=0.3286, pruned_loss=0.1005, over 8204.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3343, pruned_loss=0.1004, over 1507269.18 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:28:50,288 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:07,253 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:19,500 INFO [train.py:901] (0/4) Epoch 7, batch 600, loss[loss=0.3168, simple_loss=0.3625, pruned_loss=0.1355, over 6672.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3336, pruned_loss=0.1003, over 1529732.99 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:31,445 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 03:29:41,659 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49130.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:42,821 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.845e+02 3.510e+02 4.694e+02 1.227e+03, threshold=7.020e+02, percent-clipped=5.0 +2023-02-06 03:29:54,594 INFO [train.py:901] (0/4) Epoch 7, batch 650, loss[loss=0.3089, simple_loss=0.374, pruned_loss=0.1219, over 8449.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3326, pruned_loss=0.09919, over 1549585.73 frames. ], batch size: 27, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:58,943 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:59,702 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:15,517 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6066, 4.5378, 4.1288, 1.7576, 4.1333, 4.0504, 4.2674, 3.8420], + device='cuda:0'), covar=tensor([0.0697, 0.0573, 0.0825, 0.4284, 0.0687, 0.0707, 0.0951, 0.0613], + device='cuda:0'), in_proj_covar=tensor([0.0412, 0.0318, 0.0339, 0.0424, 0.0326, 0.0307, 0.0313, 0.0268], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:30:17,657 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:18,958 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:28,558 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9941, 1.2697, 4.2816, 1.5750, 3.6676, 3.5755, 3.7554, 3.6471], + device='cuda:0'), covar=tensor([0.0475, 0.3809, 0.0376, 0.2670, 0.1050, 0.0713, 0.0507, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0371, 0.0515, 0.0458, 0.0454, 0.0518, 0.0428, 0.0432, 0.0487], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 03:30:29,824 INFO [train.py:901] (0/4) Epoch 7, batch 700, loss[loss=0.2302, simple_loss=0.2919, pruned_loss=0.08424, over 7548.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3321, pruned_loss=0.09898, over 1561147.44 frames. ], batch size: 18, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:30:30,704 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:54,554 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 2.876e+02 3.436e+02 4.276e+02 6.994e+02, threshold=6.873e+02, percent-clipped=0.0 +2023-02-06 03:31:03,863 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 03:31:06,140 INFO [train.py:901] (0/4) Epoch 7, batch 750, loss[loss=0.2664, simple_loss=0.3291, pruned_loss=0.1018, over 7968.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3335, pruned_loss=0.09989, over 1573067.80 frames. ], batch size: 21, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:06,455 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 03:31:09,786 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:18,082 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 03:31:26,430 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 03:31:40,949 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:41,433 INFO [train.py:901] (0/4) Epoch 7, batch 800, loss[loss=0.2944, simple_loss=0.3563, pruned_loss=0.1162, over 8320.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3333, pruned_loss=0.09955, over 1584927.33 frames. ], batch size: 26, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:52,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:59,464 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4850, 4.5476, 4.0034, 2.0435, 4.0059, 4.1472, 4.2469, 3.8123], + device='cuda:0'), covar=tensor([0.0919, 0.0500, 0.0908, 0.4776, 0.0821, 0.0838, 0.1070, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0413, 0.0319, 0.0346, 0.0430, 0.0331, 0.0312, 0.0318, 0.0270], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:32:05,537 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.832e+02 3.318e+02 4.162e+02 1.224e+03, threshold=6.636e+02, percent-clipped=6.0 +2023-02-06 03:32:17,939 INFO [train.py:901] (0/4) Epoch 7, batch 850, loss[loss=0.2707, simple_loss=0.3476, pruned_loss=0.09687, over 8514.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3336, pruned_loss=0.09976, over 1593862.55 frames. ], batch size: 28, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:32:45,200 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3113, 1.7321, 2.7939, 1.1412, 1.8664, 1.6355, 1.4359, 1.6930], + device='cuda:0'), covar=tensor([0.1377, 0.1539, 0.0600, 0.2737, 0.1254, 0.2220, 0.1400, 0.1769], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0471, 0.0529, 0.0545, 0.0590, 0.0527, 0.0450, 0.0593], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:32:52,553 INFO [train.py:901] (0/4) Epoch 7, batch 900, loss[loss=0.2512, simple_loss=0.3243, pruned_loss=0.08904, over 7970.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3344, pruned_loss=0.1002, over 1597202.88 frames. ], batch size: 21, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:17,134 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.812e+02 3.278e+02 4.578e+02 1.649e+03, threshold=6.556e+02, percent-clipped=8.0 +2023-02-06 03:33:21,981 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:33:28,081 INFO [train.py:901] (0/4) Epoch 7, batch 950, loss[loss=0.2276, simple_loss=0.2981, pruned_loss=0.07853, over 7541.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.335, pruned_loss=0.1006, over 1600050.18 frames. ], batch size: 18, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:28,868 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49450.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:33:50,514 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 03:33:50,653 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4644, 1.0568, 4.6146, 1.6247, 3.9917, 3.7495, 4.1979, 4.0400], + device='cuda:0'), covar=tensor([0.0371, 0.4280, 0.0384, 0.2969, 0.1072, 0.0704, 0.0436, 0.0497], + device='cuda:0'), in_proj_covar=tensor([0.0369, 0.0516, 0.0464, 0.0460, 0.0522, 0.0432, 0.0438, 0.0489], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 03:34:03,650 INFO [train.py:901] (0/4) Epoch 7, batch 1000, loss[loss=0.2512, simple_loss=0.3197, pruned_loss=0.09139, over 7924.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3332, pruned_loss=0.09885, over 1604639.86 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:34:24,199 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 03:34:27,690 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.091e+02 3.599e+02 4.515e+02 1.445e+03, threshold=7.198e+02, percent-clipped=7.0 +2023-02-06 03:34:35,910 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 03:34:38,740 INFO [train.py:901] (0/4) Epoch 7, batch 1050, loss[loss=0.2808, simple_loss=0.336, pruned_loss=0.1128, over 7927.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3344, pruned_loss=0.09951, over 1610682.75 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:34:43,039 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:34:54,535 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49571.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:00,737 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:13,191 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2115, 1.5963, 3.2945, 1.3825, 2.2487, 3.6816, 3.6297, 3.1194], + device='cuda:0'), covar=tensor([0.0842, 0.1391, 0.0388, 0.1902, 0.0833, 0.0231, 0.0400, 0.0589], + device='cuda:0'), in_proj_covar=tensor([0.0238, 0.0274, 0.0230, 0.0270, 0.0233, 0.0213, 0.0266, 0.0277], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:35:13,238 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:14,556 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:15,146 INFO [train.py:901] (0/4) Epoch 7, batch 1100, loss[loss=0.3226, simple_loss=0.3754, pruned_loss=0.1349, over 7045.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3349, pruned_loss=0.09982, over 1610794.15 frames. ], batch size: 72, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:35:38,352 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.766e+02 3.386e+02 4.310e+02 6.415e+02, threshold=6.771e+02, percent-clipped=0.0 +2023-02-06 03:35:46,770 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 03:35:49,452 INFO [train.py:901] (0/4) Epoch 7, batch 1150, loss[loss=0.2595, simple_loss=0.3288, pruned_loss=0.09506, over 8445.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3346, pruned_loss=0.09988, over 1614689.72 frames. ], batch size: 29, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:23,378 INFO [train.py:901] (0/4) Epoch 7, batch 1200, loss[loss=0.2573, simple_loss=0.3338, pruned_loss=0.09037, over 8633.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.335, pruned_loss=0.1008, over 1610406.32 frames. ], batch size: 34, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:27,559 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8230, 1.4609, 1.4526, 1.2899, 1.0319, 1.2855, 1.4207, 1.3492], + device='cuda:0'), covar=tensor([0.0551, 0.1259, 0.1735, 0.1376, 0.0650, 0.1488, 0.0756, 0.0586], + device='cuda:0'), in_proj_covar=tensor([0.0118, 0.0167, 0.0206, 0.0170, 0.0117, 0.0175, 0.0129, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 03:36:33,700 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:36:47,165 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.915e+02 3.820e+02 5.048e+02 1.193e+03, threshold=7.640e+02, percent-clipped=11.0 +2023-02-06 03:36:57,971 INFO [train.py:901] (0/4) Epoch 7, batch 1250, loss[loss=0.2769, simple_loss=0.3324, pruned_loss=0.1107, over 7538.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3353, pruned_loss=0.1012, over 1613515.69 frames. ], batch size: 18, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:22,193 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:22,269 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:29,683 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49794.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:37:32,814 INFO [train.py:901] (0/4) Epoch 7, batch 1300, loss[loss=0.2973, simple_loss=0.3572, pruned_loss=0.1187, over 8132.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3349, pruned_loss=0.1002, over 1616519.38 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:57,661 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.650e+02 3.390e+02 4.402e+02 9.600e+02, threshold=6.781e+02, percent-clipped=3.0 +2023-02-06 03:38:08,125 INFO [train.py:901] (0/4) Epoch 7, batch 1350, loss[loss=0.2587, simple_loss=0.3272, pruned_loss=0.0951, over 8025.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3358, pruned_loss=0.1009, over 1618054.59 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:42,144 INFO [train.py:901] (0/4) Epoch 7, batch 1400, loss[loss=0.2765, simple_loss=0.3484, pruned_loss=0.1023, over 8286.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3361, pruned_loss=0.101, over 1616250.59 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:42,332 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:38:49,798 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49909.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:38:53,179 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 03:39:07,157 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 2.921e+02 3.790e+02 4.996e+02 8.997e+02, threshold=7.579e+02, percent-clipped=6.0 +2023-02-06 03:39:11,355 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 03:39:18,097 INFO [train.py:901] (0/4) Epoch 7, batch 1450, loss[loss=0.2393, simple_loss=0.301, pruned_loss=0.08884, over 7258.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3351, pruned_loss=0.1006, over 1617733.32 frames. ], batch size: 16, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:39:26,259 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3101, 1.5495, 4.4363, 1.8882, 2.3785, 5.1012, 4.9041, 4.4654], + device='cuda:0'), covar=tensor([0.1009, 0.1531, 0.0260, 0.1919, 0.0915, 0.0156, 0.0304, 0.0451], + device='cuda:0'), in_proj_covar=tensor([0.0238, 0.0275, 0.0231, 0.0272, 0.0236, 0.0215, 0.0269, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:39:31,675 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:41,668 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.45 vs. limit=5.0 +2023-02-06 03:39:49,073 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:52,393 INFO [train.py:901] (0/4) Epoch 7, batch 1500, loss[loss=0.2594, simple_loss=0.3348, pruned_loss=0.09198, over 8344.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3345, pruned_loss=0.09999, over 1615232.47 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:39:53,213 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-50000.pt +2023-02-06 03:40:07,868 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 03:40:16,579 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.823e+02 3.555e+02 4.038e+02 9.229e+02, threshold=7.110e+02, percent-clipped=3.0 +2023-02-06 03:40:27,903 INFO [train.py:901] (0/4) Epoch 7, batch 1550, loss[loss=0.3368, simple_loss=0.3925, pruned_loss=0.1406, over 8504.00 frames. ], tot_loss[loss=0.267, simple_loss=0.334, pruned_loss=0.1, over 1610298.12 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:57,943 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5216, 1.7083, 1.9363, 1.6301, 1.0284, 1.8787, 0.2065, 1.1562], + device='cuda:0'), covar=tensor([0.3115, 0.1985, 0.0883, 0.1931, 0.6052, 0.0579, 0.4434, 0.2336], + device='cuda:0'), in_proj_covar=tensor([0.0141, 0.0134, 0.0082, 0.0186, 0.0225, 0.0084, 0.0144, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:41:02,316 INFO [train.py:901] (0/4) Epoch 7, batch 1600, loss[loss=0.3186, simple_loss=0.3712, pruned_loss=0.133, over 8081.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.334, pruned_loss=0.1002, over 1606781.55 frames. ], batch size: 21, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:22,664 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:25,922 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.849e+02 3.464e+02 4.418e+02 7.019e+02, threshold=6.928e+02, percent-clipped=0.0 +2023-02-06 03:41:35,372 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:35,569 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 03:41:37,165 INFO [train.py:901] (0/4) Epoch 7, batch 1650, loss[loss=0.218, simple_loss=0.2922, pruned_loss=0.0719, over 7794.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3338, pruned_loss=0.09987, over 1608911.84 frames. ], batch size: 19, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:40,091 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9526, 2.7200, 3.1553, 1.0945, 3.1726, 1.9486, 1.5164, 2.0188], + device='cuda:0'), covar=tensor([0.0416, 0.0150, 0.0110, 0.0362, 0.0147, 0.0389, 0.0413, 0.0217], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0258, 0.0215, 0.0311, 0.0249, 0.0403, 0.0315, 0.0292], + device='cuda:0'), out_proj_covar=tensor([1.1342e-04, 8.1425e-05, 6.7136e-05, 9.8328e-05, 7.9676e-05, 1.3926e-04, + 1.0242e-04, 9.3340e-05], device='cuda:0') +2023-02-06 03:41:41,427 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:48,662 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50165.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:41:59,244 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:05,842 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:11,953 INFO [train.py:901] (0/4) Epoch 7, batch 1700, loss[loss=0.2291, simple_loss=0.3015, pruned_loss=0.07831, over 8083.00 frames. ], tot_loss[loss=0.266, simple_loss=0.333, pruned_loss=0.0995, over 1607717.39 frames. ], batch size: 21, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:42:22,978 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50215.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:35,242 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.886e+02 3.481e+02 4.608e+02 1.233e+03, threshold=6.962e+02, percent-clipped=3.0 +2023-02-06 03:42:41,484 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9063, 2.4454, 4.7890, 1.3456, 3.1385, 2.2195, 2.0093, 2.7117], + device='cuda:0'), covar=tensor([0.1310, 0.1646, 0.0477, 0.2869, 0.1054, 0.2133, 0.1168, 0.1941], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0472, 0.0530, 0.0550, 0.0590, 0.0534, 0.0451, 0.0592], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:42:42,135 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:46,679 INFO [train.py:901] (0/4) Epoch 7, batch 1750, loss[loss=0.3058, simple_loss=0.3727, pruned_loss=0.1194, over 8105.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3336, pruned_loss=0.09969, over 1609107.18 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:21,551 INFO [train.py:901] (0/4) Epoch 7, batch 1800, loss[loss=0.3123, simple_loss=0.3609, pruned_loss=0.1318, over 8123.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3335, pruned_loss=0.09977, over 1607572.18 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:44,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.922e+02 3.562e+02 4.379e+02 1.030e+03, threshold=7.125e+02, percent-clipped=4.0 +2023-02-06 03:43:56,163 INFO [train.py:901] (0/4) Epoch 7, batch 1850, loss[loss=0.2528, simple_loss=0.3226, pruned_loss=0.0915, over 7800.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3318, pruned_loss=0.09869, over 1605144.41 frames. ], batch size: 19, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:23,780 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 03:44:28,232 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4298, 1.4792, 1.6323, 1.2666, 0.9286, 1.6907, 0.0838, 0.9870], + device='cuda:0'), covar=tensor([0.3321, 0.2010, 0.0926, 0.2176, 0.5201, 0.0616, 0.4158, 0.2231], + device='cuda:0'), in_proj_covar=tensor([0.0143, 0.0138, 0.0082, 0.0188, 0.0226, 0.0086, 0.0145, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:44:30,595 INFO [train.py:901] (0/4) Epoch 7, batch 1900, loss[loss=0.2537, simple_loss=0.3254, pruned_loss=0.09098, over 8601.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.333, pruned_loss=0.0994, over 1604849.89 frames. ], batch size: 34, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:43,989 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 03:44:53,920 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.786e+02 3.637e+02 4.614e+02 8.948e+02, threshold=7.273e+02, percent-clipped=3.0 +2023-02-06 03:44:56,019 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 03:45:04,592 INFO [train.py:901] (0/4) Epoch 7, batch 1950, loss[loss=0.2894, simple_loss=0.335, pruned_loss=0.1219, over 7715.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3349, pruned_loss=0.1004, over 1610462.00 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:15,309 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 03:45:33,289 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:39,200 INFO [train.py:901] (0/4) Epoch 7, batch 2000, loss[loss=0.2586, simple_loss=0.3307, pruned_loss=0.09326, over 8583.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3355, pruned_loss=0.1004, over 1615003.14 frames. ], batch size: 34, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:39,410 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50499.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:56,773 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:03,226 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.800e+02 3.583e+02 4.591e+02 1.075e+03, threshold=7.166e+02, percent-clipped=7.0 +2023-02-06 03:46:13,948 INFO [train.py:901] (0/4) Epoch 7, batch 2050, loss[loss=0.1986, simple_loss=0.2692, pruned_loss=0.06401, over 7545.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3355, pruned_loss=0.1, over 1619515.89 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:19,889 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7501, 5.7721, 5.0614, 2.1434, 5.2280, 5.3996, 5.3205, 5.1106], + device='cuda:0'), covar=tensor([0.0489, 0.0372, 0.0823, 0.4518, 0.0638, 0.0506, 0.0948, 0.0432], + device='cuda:0'), in_proj_covar=tensor([0.0418, 0.0314, 0.0344, 0.0430, 0.0333, 0.0317, 0.0322, 0.0275], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:46:20,540 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50559.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:46:23,780 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:47,297 INFO [train.py:901] (0/4) Epoch 7, batch 2100, loss[loss=0.3224, simple_loss=0.3723, pruned_loss=0.1363, over 8440.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3357, pruned_loss=0.1006, over 1619653.74 frames. ], batch size: 27, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:51,696 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:47:10,012 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-06 03:47:11,032 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 3.066e+02 3.697e+02 4.610e+02 1.063e+03, threshold=7.394e+02, percent-clipped=3.0 +2023-02-06 03:47:22,377 INFO [train.py:901] (0/4) Epoch 7, batch 2150, loss[loss=0.3087, simple_loss=0.3677, pruned_loss=0.1249, over 8416.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3344, pruned_loss=0.09954, over 1619953.06 frames. ], batch size: 49, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:47:40,232 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50674.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:47:56,606 INFO [train.py:901] (0/4) Epoch 7, batch 2200, loss[loss=0.256, simple_loss=0.3264, pruned_loss=0.09279, over 8247.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3348, pruned_loss=0.09995, over 1618977.28 frames. ], batch size: 24, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:48:07,076 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-06 03:48:20,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.939e+02 3.492e+02 4.230e+02 8.261e+02, threshold=6.983e+02, percent-clipped=2.0 +2023-02-06 03:48:31,245 INFO [train.py:901] (0/4) Epoch 7, batch 2250, loss[loss=0.2997, simple_loss=0.3636, pruned_loss=0.1179, over 8204.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3353, pruned_loss=0.1002, over 1619856.62 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:48:53,491 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 03:49:05,384 INFO [train.py:901] (0/4) Epoch 7, batch 2300, loss[loss=0.2819, simple_loss=0.3383, pruned_loss=0.1128, over 8460.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3361, pruned_loss=0.1011, over 1617232.39 frames. ], batch size: 48, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:06,296 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:49:23,395 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50826.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:49:28,611 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.140e+02 4.073e+02 5.620e+02 1.608e+03, threshold=8.146e+02, percent-clipped=16.0 +2023-02-06 03:49:30,326 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 03:49:39,826 INFO [train.py:901] (0/4) Epoch 7, batch 2350, loss[loss=0.2285, simple_loss=0.3005, pruned_loss=0.07823, over 7968.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3363, pruned_loss=0.1018, over 1620645.71 frames. ], batch size: 21, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:47,967 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:04,996 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:14,222 INFO [train.py:901] (0/4) Epoch 7, batch 2400, loss[loss=0.2765, simple_loss=0.3478, pruned_loss=0.1026, over 8195.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3345, pruned_loss=0.1006, over 1619742.81 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:20,197 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:32,392 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7577, 3.6728, 3.3735, 1.7068, 3.2810, 3.2639, 3.4186, 2.8638], + device='cuda:0'), covar=tensor([0.0987, 0.0794, 0.1172, 0.4862, 0.0892, 0.0990, 0.1264, 0.0948], + device='cuda:0'), in_proj_covar=tensor([0.0417, 0.0316, 0.0350, 0.0436, 0.0337, 0.0313, 0.0320, 0.0275], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:50:35,192 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50930.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:50:36,987 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.900e+02 3.414e+02 4.196e+02 7.276e+02, threshold=6.828e+02, percent-clipped=0.0 +2023-02-06 03:50:47,541 INFO [train.py:901] (0/4) Epoch 7, batch 2450, loss[loss=0.2364, simple_loss=0.3217, pruned_loss=0.07553, over 8294.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3339, pruned_loss=0.1002, over 1615387.36 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:52,346 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50955.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:50:59,071 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:22,800 INFO [train.py:901] (0/4) Epoch 7, batch 2500, loss[loss=0.2998, simple_loss=0.3652, pruned_loss=0.1172, over 8338.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3336, pruned_loss=0.09974, over 1617057.76 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:51:39,931 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51023.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:46,387 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.634e+02 3.421e+02 4.023e+02 8.503e+02, threshold=6.842e+02, percent-clipped=1.0 +2023-02-06 03:51:56,889 INFO [train.py:901] (0/4) Epoch 7, batch 2550, loss[loss=0.2649, simple_loss=0.3331, pruned_loss=0.09833, over 8032.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3334, pruned_loss=0.09949, over 1615723.52 frames. ], batch size: 22, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:16,208 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2148, 3.0668, 2.9057, 1.3996, 2.8034, 2.8625, 2.9370, 2.6176], + device='cuda:0'), covar=tensor([0.1184, 0.0860, 0.1240, 0.4544, 0.1046, 0.1104, 0.1489, 0.1202], + device='cuda:0'), in_proj_covar=tensor([0.0404, 0.0308, 0.0336, 0.0422, 0.0326, 0.0305, 0.0315, 0.0269], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:52:18,133 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:52:31,078 INFO [train.py:901] (0/4) Epoch 7, batch 2600, loss[loss=0.3184, simple_loss=0.3783, pruned_loss=0.1293, over 8510.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3341, pruned_loss=0.1002, over 1618397.44 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:54,868 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.187e+02 3.140e+02 3.874e+02 4.757e+02 8.436e+02, threshold=7.747e+02, percent-clipped=5.0 +2023-02-06 03:53:02,023 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:53:05,173 INFO [train.py:901] (0/4) Epoch 7, batch 2650, loss[loss=0.288, simple_loss=0.3544, pruned_loss=0.1108, over 8249.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3341, pruned_loss=0.1002, over 1615963.33 frames. ], batch size: 24, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:19,234 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51170.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:53:32,640 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:39,227 INFO [train.py:901] (0/4) Epoch 7, batch 2700, loss[loss=0.2744, simple_loss=0.3237, pruned_loss=0.1126, over 7529.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3345, pruned_loss=0.1001, over 1613806.37 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:55,361 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4402, 1.7810, 1.8314, 0.9987, 1.9124, 1.2819, 0.5562, 1.5863], + device='cuda:0'), covar=tensor([0.0257, 0.0133, 0.0103, 0.0212, 0.0153, 0.0348, 0.0310, 0.0114], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0252, 0.0215, 0.0308, 0.0251, 0.0392, 0.0309, 0.0288], + device='cuda:0'), out_proj_covar=tensor([1.1117e-04, 7.9022e-05, 6.6944e-05, 9.6476e-05, 7.9891e-05, 1.3440e-04, + 9.9883e-05, 9.1433e-05], device='cuda:0') +2023-02-06 03:54:02,460 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 3.046e+02 3.584e+02 4.560e+02 9.753e+02, threshold=7.169e+02, percent-clipped=4.0 +2023-02-06 03:54:14,245 INFO [train.py:901] (0/4) Epoch 7, batch 2750, loss[loss=0.3025, simple_loss=0.346, pruned_loss=0.1295, over 7145.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3353, pruned_loss=0.1008, over 1616643.17 frames. ], batch size: 71, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:21,068 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:34,380 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:36,584 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 03:54:38,278 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51285.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:54:47,532 INFO [train.py:901] (0/4) Epoch 7, batch 2800, loss[loss=0.2911, simple_loss=0.3398, pruned_loss=0.1212, over 8336.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3352, pruned_loss=0.1012, over 1612470.58 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:51,134 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:53,733 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:55:11,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.811e+02 3.563e+02 4.674e+02 6.809e+02, threshold=7.126e+02, percent-clipped=0.0 +2023-02-06 03:55:22,689 INFO [train.py:901] (0/4) Epoch 7, batch 2850, loss[loss=0.3165, simple_loss=0.3689, pruned_loss=0.132, over 6924.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3352, pruned_loss=0.1006, over 1618337.56 frames. ], batch size: 71, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:48,728 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4897, 2.0620, 3.3633, 2.4679, 2.6822, 2.0658, 1.6336, 1.3766], + device='cuda:0'), covar=tensor([0.2611, 0.2721, 0.0620, 0.1865, 0.1539, 0.1613, 0.1490, 0.3120], + device='cuda:0'), in_proj_covar=tensor([0.0817, 0.0756, 0.0657, 0.0753, 0.0843, 0.0698, 0.0656, 0.0689], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 03:55:57,257 INFO [train.py:901] (0/4) Epoch 7, batch 2900, loss[loss=0.263, simple_loss=0.3188, pruned_loss=0.1036, over 7441.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3343, pruned_loss=0.09945, over 1617790.22 frames. ], batch size: 17, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:58,208 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51400.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:04,949 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:13,550 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:14,126 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:19,633 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 03:56:20,271 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.833e+02 3.577e+02 4.732e+02 1.075e+03, threshold=7.153e+02, percent-clipped=9.0 +2023-02-06 03:56:31,120 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2351, 1.6177, 1.6172, 1.4040, 1.1878, 1.6002, 1.9210, 1.6871], + device='cuda:0'), covar=tensor([0.0526, 0.1154, 0.1732, 0.1383, 0.0637, 0.1453, 0.0639, 0.0572], + device='cuda:0'), in_proj_covar=tensor([0.0118, 0.0166, 0.0208, 0.0173, 0.0117, 0.0174, 0.0127, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 03:56:32,317 INFO [train.py:901] (0/4) Epoch 7, batch 2950, loss[loss=0.234, simple_loss=0.2941, pruned_loss=0.08696, over 7221.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3343, pruned_loss=0.09966, over 1615373.16 frames. ], batch size: 16, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:57:06,418 INFO [train.py:901] (0/4) Epoch 7, batch 3000, loss[loss=0.2744, simple_loss=0.3473, pruned_loss=0.1008, over 8363.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3346, pruned_loss=0.09978, over 1617183.85 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:57:06,419 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 03:57:21,702 INFO [train.py:935] (0/4) Epoch 7, validation: loss=0.2071, simple_loss=0.305, pruned_loss=0.05459, over 944034.00 frames. +2023-02-06 03:57:21,703 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 03:57:31,180 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:32,555 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51515.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:45,147 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.842e+02 3.422e+02 4.197e+02 1.269e+03, threshold=6.844e+02, percent-clipped=2.0 +2023-02-06 03:57:45,235 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51534.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:57:48,652 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:49,344 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51540.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:50,068 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51541.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:57:55,346 INFO [train.py:901] (0/4) Epoch 7, batch 3050, loss[loss=0.2536, simple_loss=0.3277, pruned_loss=0.08976, over 8330.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3347, pruned_loss=0.09968, over 1614059.56 frames. ], batch size: 25, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:06,941 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51566.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:58:22,439 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-06 03:58:29,894 INFO [train.py:901] (0/4) Epoch 7, batch 3100, loss[loss=0.2652, simple_loss=0.3367, pruned_loss=0.0968, over 8514.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3352, pruned_loss=0.1004, over 1614444.75 frames. ], batch size: 28, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:54,843 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.035e+02 3.902e+02 5.145e+02 1.067e+03, threshold=7.804e+02, percent-clipped=7.0 +2023-02-06 03:59:05,324 INFO [train.py:901] (0/4) Epoch 7, batch 3150, loss[loss=0.2357, simple_loss=0.3001, pruned_loss=0.08564, over 8083.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3343, pruned_loss=0.1005, over 1609967.25 frames. ], batch size: 21, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:05,482 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0266, 1.5170, 3.4969, 1.4725, 2.4071, 3.9637, 3.8461, 3.3866], + device='cuda:0'), covar=tensor([0.0974, 0.1330, 0.0299, 0.1721, 0.0676, 0.0201, 0.0382, 0.0560], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0272, 0.0228, 0.0271, 0.0241, 0.0214, 0.0271, 0.0279], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 03:59:05,508 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51649.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:59:17,186 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8087, 1.2741, 5.8965, 1.9959, 5.1087, 4.8137, 5.3907, 5.2832], + device='cuda:0'), covar=tensor([0.0368, 0.4247, 0.0280, 0.2894, 0.1008, 0.0709, 0.0365, 0.0422], + device='cuda:0'), in_proj_covar=tensor([0.0377, 0.0518, 0.0471, 0.0461, 0.0526, 0.0439, 0.0433, 0.0491], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 03:59:22,855 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4489, 1.8726, 3.1164, 1.2330, 2.1358, 1.7321, 1.6080, 1.8529], + device='cuda:0'), covar=tensor([0.1546, 0.1823, 0.0628, 0.3300, 0.1405, 0.2444, 0.1497, 0.2094], + device='cuda:0'), in_proj_covar=tensor([0.0473, 0.0471, 0.0530, 0.0549, 0.0588, 0.0528, 0.0451, 0.0589], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 03:59:26,399 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:59:40,118 INFO [train.py:901] (0/4) Epoch 7, batch 3200, loss[loss=0.2674, simple_loss=0.3338, pruned_loss=0.1006, over 7648.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3325, pruned_loss=0.09965, over 1608819.70 frames. ], batch size: 19, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:43,597 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:01,508 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0550, 1.2753, 1.1551, 0.4064, 1.2144, 0.9354, 0.0977, 1.1489], + device='cuda:0'), covar=tensor([0.0226, 0.0153, 0.0144, 0.0261, 0.0168, 0.0439, 0.0348, 0.0144], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0260, 0.0217, 0.0317, 0.0255, 0.0404, 0.0316, 0.0294], + device='cuda:0'), out_proj_covar=tensor([1.1391e-04, 8.1587e-05, 6.7352e-05, 9.9332e-05, 8.0862e-05, 1.3819e-04, + 1.0200e-04, 9.2874e-05], device='cuda:0') +2023-02-06 04:00:05,271 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.946e+02 3.588e+02 4.680e+02 7.788e+02, threshold=7.176e+02, percent-clipped=0.0 +2023-02-06 04:00:10,372 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 04:00:12,045 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:16,038 INFO [train.py:901] (0/4) Epoch 7, batch 3250, loss[loss=0.2341, simple_loss=0.3128, pruned_loss=0.07766, over 8504.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3325, pruned_loss=0.09909, over 1614438.84 frames. ], batch size: 26, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:00:19,459 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:26,774 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51765.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:46,789 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:49,330 INFO [train.py:901] (0/4) Epoch 7, batch 3300, loss[loss=0.2953, simple_loss=0.3599, pruned_loss=0.1153, over 8249.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3322, pruned_loss=0.0987, over 1611770.38 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:02,344 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9763, 2.6320, 3.1195, 1.1131, 3.1377, 1.8543, 1.4614, 1.9868], + device='cuda:0'), covar=tensor([0.0427, 0.0160, 0.0114, 0.0380, 0.0205, 0.0419, 0.0483, 0.0219], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0253, 0.0213, 0.0312, 0.0249, 0.0397, 0.0309, 0.0288], + device='cuda:0'), out_proj_covar=tensor([1.1177e-04, 7.9248e-05, 6.6127e-05, 9.7606e-05, 7.8758e-05, 1.3592e-04, + 9.9669e-05, 9.0739e-05], device='cuda:0') +2023-02-06 04:01:03,663 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:14,385 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.988e+02 3.662e+02 4.246e+02 9.313e+02, threshold=7.324e+02, percent-clipped=2.0 +2023-02-06 04:01:24,632 INFO [train.py:901] (0/4) Epoch 7, batch 3350, loss[loss=0.2774, simple_loss=0.3601, pruned_loss=0.09731, over 8341.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.333, pruned_loss=0.09865, over 1617303.66 frames. ], batch size: 26, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:30,916 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:32,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51859.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:39,525 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:39,585 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5619, 1.8576, 3.4356, 1.2668, 2.5707, 1.9297, 1.6626, 2.0571], + device='cuda:0'), covar=tensor([0.1509, 0.1896, 0.0515, 0.3082, 0.1120, 0.2272, 0.1493, 0.2003], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0469, 0.0531, 0.0546, 0.0586, 0.0525, 0.0453, 0.0591], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:01:59,343 INFO [train.py:901] (0/4) Epoch 7, batch 3400, loss[loss=0.2685, simple_loss=0.3374, pruned_loss=0.09982, over 8500.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3337, pruned_loss=0.09931, over 1618529.76 frames. ], batch size: 26, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:03,608 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51905.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:02:20,932 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51930.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:02:23,259 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.723e+02 3.470e+02 4.144e+02 7.359e+02, threshold=6.940e+02, percent-clipped=1.0 +2023-02-06 04:02:34,620 INFO [train.py:901] (0/4) Epoch 7, batch 3450, loss[loss=0.2397, simple_loss=0.2946, pruned_loss=0.09235, over 7549.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3333, pruned_loss=0.09881, over 1616956.66 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:50,954 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:02:53,155 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.86 vs. limit=2.0 +2023-02-06 04:03:09,379 INFO [train.py:901] (0/4) Epoch 7, batch 3500, loss[loss=0.2483, simple_loss=0.3169, pruned_loss=0.08983, over 7806.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3336, pruned_loss=0.0987, over 1621562.46 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:10,082 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-52000.pt +2023-02-06 04:03:22,438 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 04:03:33,496 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.822e+02 3.302e+02 4.435e+02 1.594e+03, threshold=6.604e+02, percent-clipped=5.0 +2023-02-06 04:03:43,726 INFO [train.py:901] (0/4) Epoch 7, batch 3550, loss[loss=0.2467, simple_loss=0.3243, pruned_loss=0.08457, over 8367.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3333, pruned_loss=0.09811, over 1618418.43 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:49,970 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:19,843 INFO [train.py:901] (0/4) Epoch 7, batch 3600, loss[loss=0.2183, simple_loss=0.2902, pruned_loss=0.07316, over 7798.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.333, pruned_loss=0.09821, over 1620049.80 frames. ], batch size: 19, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:26,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:30,884 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:37,693 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:43,470 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.818e+02 3.176e+02 4.094e+02 8.086e+02, threshold=6.353e+02, percent-clipped=5.0 +2023-02-06 04:04:47,734 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:53,394 INFO [train.py:901] (0/4) Epoch 7, batch 3650, loss[loss=0.2692, simple_loss=0.3345, pruned_loss=0.102, over 7791.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3334, pruned_loss=0.09887, over 1619773.35 frames. ], batch size: 19, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:54,172 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:23,192 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:05:28,581 INFO [train.py:901] (0/4) Epoch 7, batch 3700, loss[loss=0.2149, simple_loss=0.2878, pruned_loss=0.07102, over 7425.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3335, pruned_loss=0.09858, over 1623522.93 frames. ], batch size: 17, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:05:36,882 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:41,634 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8221, 2.3881, 4.7497, 1.2625, 3.1641, 2.1897, 1.8303, 2.8757], + device='cuda:0'), covar=tensor([0.1451, 0.1891, 0.0540, 0.3375, 0.1234, 0.2461, 0.1553, 0.2326], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0469, 0.0530, 0.0550, 0.0586, 0.0523, 0.0455, 0.0597], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:05:47,075 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:49,859 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:53,727 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.602e+02 3.554e+02 4.404e+02 9.700e+02, threshold=7.108e+02, percent-clipped=5.0 +2023-02-06 04:06:04,143 INFO [train.py:901] (0/4) Epoch 7, batch 3750, loss[loss=0.2596, simple_loss=0.3216, pruned_loss=0.09879, over 7790.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3325, pruned_loss=0.09782, over 1620472.62 frames. ], batch size: 19, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:06:05,821 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7241, 2.3798, 4.3726, 1.2262, 2.7178, 1.9396, 1.7228, 2.7314], + device='cuda:0'), covar=tensor([0.1518, 0.1860, 0.0690, 0.3359, 0.1513, 0.2600, 0.1522, 0.2232], + device='cuda:0'), in_proj_covar=tensor([0.0473, 0.0471, 0.0532, 0.0550, 0.0585, 0.0525, 0.0456, 0.0596], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:06:07,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:06:12,925 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9901, 3.2289, 2.4867, 4.2613, 1.9176, 2.2309, 2.5881, 3.1162], + device='cuda:0'), covar=tensor([0.0903, 0.0997, 0.1217, 0.0289, 0.1408, 0.1463, 0.1371, 0.1042], + device='cuda:0'), in_proj_covar=tensor([0.0254, 0.0238, 0.0280, 0.0223, 0.0241, 0.0266, 0.0274, 0.0241], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 04:06:24,074 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-06 04:06:38,747 INFO [train.py:901] (0/4) Epoch 7, batch 3800, loss[loss=0.3083, simple_loss=0.3614, pruned_loss=0.1276, over 8630.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3316, pruned_loss=0.09793, over 1613061.35 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:01,885 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52330.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:07:04,415 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.780e+02 3.361e+02 4.228e+02 6.516e+02, threshold=6.722e+02, percent-clipped=0.0 +2023-02-06 04:07:15,844 INFO [train.py:901] (0/4) Epoch 7, batch 3850, loss[loss=0.2194, simple_loss=0.2961, pruned_loss=0.0713, over 7930.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3311, pruned_loss=0.09778, over 1608249.08 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:25,312 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9090, 2.5230, 4.7125, 1.5047, 3.2488, 2.4878, 1.8994, 2.6648], + device='cuda:0'), covar=tensor([0.1392, 0.1755, 0.0517, 0.3182, 0.1236, 0.2244, 0.1451, 0.2373], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0474, 0.0533, 0.0549, 0.0591, 0.0528, 0.0458, 0.0595], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:07:30,483 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 04:07:49,735 INFO [train.py:901] (0/4) Epoch 7, batch 3900, loss[loss=0.2068, simple_loss=0.2783, pruned_loss=0.06762, over 6832.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.3302, pruned_loss=0.09751, over 1605106.12 frames. ], batch size: 15, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:51,969 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:08:15,067 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.697e+02 3.207e+02 4.225e+02 1.297e+03, threshold=6.414e+02, percent-clipped=5.0 +2023-02-06 04:08:25,214 INFO [train.py:901] (0/4) Epoch 7, batch 3950, loss[loss=0.3247, simple_loss=0.3771, pruned_loss=0.1361, over 8586.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.3302, pruned_loss=0.09678, over 1607706.47 frames. ], batch size: 39, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:08:48,028 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:00,579 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 04:09:00,800 INFO [train.py:901] (0/4) Epoch 7, batch 4000, loss[loss=0.318, simple_loss=0.3938, pruned_loss=0.121, over 8257.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3291, pruned_loss=0.09647, over 1605797.47 frames. ], batch size: 24, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:04,495 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8353, 2.4993, 2.9016, 1.0033, 2.9856, 1.6993, 1.5687, 1.6936], + device='cuda:0'), covar=tensor([0.0433, 0.0175, 0.0141, 0.0390, 0.0251, 0.0442, 0.0402, 0.0256], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0257, 0.0215, 0.0317, 0.0258, 0.0403, 0.0311, 0.0292], + device='cuda:0'), out_proj_covar=tensor([1.1301e-04, 8.0308e-05, 6.6344e-05, 9.9245e-05, 8.1370e-05, 1.3735e-04, + 9.9871e-05, 9.1926e-05], device='cuda:0') +2023-02-06 04:09:05,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:13,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:23,936 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 04:09:24,184 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.935e+02 3.629e+02 4.693e+02 1.248e+03, threshold=7.258e+02, percent-clipped=9.0 +2023-02-06 04:09:28,444 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9325, 3.1883, 2.6350, 3.9120, 1.9752, 2.0750, 2.4513, 3.4003], + device='cuda:0'), covar=tensor([0.0732, 0.0990, 0.1136, 0.0296, 0.1429, 0.1447, 0.1288, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0240, 0.0282, 0.0224, 0.0243, 0.0268, 0.0278, 0.0240], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 04:09:35,738 INFO [train.py:901] (0/4) Epoch 7, batch 4050, loss[loss=0.2187, simple_loss=0.2911, pruned_loss=0.0731, over 7533.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.329, pruned_loss=0.09638, over 1601999.57 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:39,787 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:52,443 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52573.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:10:10,473 INFO [train.py:901] (0/4) Epoch 7, batch 4100, loss[loss=0.2667, simple_loss=0.3295, pruned_loss=0.102, over 8041.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.33, pruned_loss=0.09725, over 1604060.61 frames. ], batch size: 22, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:33,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.709e+02 3.346e+02 4.687e+02 1.096e+03, threshold=6.691e+02, percent-clipped=5.0 +2023-02-06 04:10:44,015 INFO [train.py:901] (0/4) Epoch 7, batch 4150, loss[loss=0.2321, simple_loss=0.3095, pruned_loss=0.07733, over 8464.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3309, pruned_loss=0.09808, over 1604164.96 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:59,828 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:02,369 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:20,441 INFO [train.py:901] (0/4) Epoch 7, batch 4200, loss[loss=0.3327, simple_loss=0.3687, pruned_loss=0.1484, over 7549.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.331, pruned_loss=0.09758, over 1611554.41 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:11:30,521 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 04:11:43,883 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.675e+02 3.334e+02 4.108e+02 1.082e+03, threshold=6.669e+02, percent-clipped=4.0 +2023-02-06 04:11:53,178 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 04:11:53,853 INFO [train.py:901] (0/4) Epoch 7, batch 4250, loss[loss=0.2837, simple_loss=0.3532, pruned_loss=0.1071, over 8025.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3313, pruned_loss=0.09757, over 1614219.87 frames. ], batch size: 22, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:08,145 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:10,305 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:22,411 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,478 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,937 INFO [train.py:901] (0/4) Epoch 7, batch 4300, loss[loss=0.2613, simple_loss=0.3257, pruned_loss=0.09849, over 7919.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3298, pruned_loss=0.096, over 1612764.98 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:53,669 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.879e+02 3.462e+02 4.347e+02 1.112e+03, threshold=6.924e+02, percent-clipped=5.0 +2023-02-06 04:13:02,725 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1353, 2.4583, 1.9082, 2.9663, 1.4383, 1.5211, 1.8891, 2.3661], + device='cuda:0'), covar=tensor([0.0965, 0.0989, 0.1346, 0.0472, 0.1461, 0.1927, 0.1507, 0.1033], + device='cuda:0'), in_proj_covar=tensor([0.0254, 0.0241, 0.0281, 0.0224, 0.0237, 0.0270, 0.0278, 0.0240], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 04:13:03,877 INFO [train.py:901] (0/4) Epoch 7, batch 4350, loss[loss=0.2291, simple_loss=0.295, pruned_loss=0.08162, over 7683.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3293, pruned_loss=0.09544, over 1614280.31 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:24,436 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 04:13:38,502 INFO [train.py:901] (0/4) Epoch 7, batch 4400, loss[loss=0.2248, simple_loss=0.2919, pruned_loss=0.07884, over 7792.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3275, pruned_loss=0.09457, over 1614371.95 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:50,741 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52917.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:13:56,759 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:02,504 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.715e+02 3.689e+02 4.508e+02 8.331e+02, threshold=7.379e+02, percent-clipped=6.0 +2023-02-06 04:14:06,672 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 04:14:13,201 INFO [train.py:901] (0/4) Epoch 7, batch 4450, loss[loss=0.3643, simple_loss=0.393, pruned_loss=0.1678, over 7815.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3281, pruned_loss=0.09526, over 1612824.53 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:14,716 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:46,591 INFO [train.py:901] (0/4) Epoch 7, batch 4500, loss[loss=0.2102, simple_loss=0.2767, pruned_loss=0.07181, over 7543.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3285, pruned_loss=0.0957, over 1613249.14 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:59,381 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 04:15:10,352 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53032.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:15:11,481 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.236e+02 2.890e+02 3.405e+02 4.030e+02 1.067e+03, threshold=6.809e+02, percent-clipped=4.0 +2023-02-06 04:15:18,995 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:22,047 INFO [train.py:901] (0/4) Epoch 7, batch 4550, loss[loss=0.29, simple_loss=0.3534, pruned_loss=0.1133, over 8561.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3297, pruned_loss=0.09671, over 1615667.63 frames. ], batch size: 31, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:15:36,997 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:39,626 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:56,222 INFO [train.py:901] (0/4) Epoch 7, batch 4600, loss[loss=0.249, simple_loss=0.3065, pruned_loss=0.0958, over 7430.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3294, pruned_loss=0.09688, over 1610450.88 frames. ], batch size: 17, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:16:06,490 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:16:20,973 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.951e+02 3.579e+02 4.375e+02 1.013e+03, threshold=7.158e+02, percent-clipped=5.0 +2023-02-06 04:16:31,869 INFO [train.py:901] (0/4) Epoch 7, batch 4650, loss[loss=0.2276, simple_loss=0.2947, pruned_loss=0.08027, over 7778.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3291, pruned_loss=0.09701, over 1606166.47 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:16:43,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.03 vs. limit=2.0 +2023-02-06 04:17:03,262 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 04:17:06,942 INFO [train.py:901] (0/4) Epoch 7, batch 4700, loss[loss=0.2067, simple_loss=0.2838, pruned_loss=0.06481, over 7653.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3291, pruned_loss=0.09609, over 1612759.55 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:27,457 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:30,525 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.793e+02 3.469e+02 4.275e+02 9.300e+02, threshold=6.939e+02, percent-clipped=3.0 +2023-02-06 04:17:41,201 INFO [train.py:901] (0/4) Epoch 7, batch 4750, loss[loss=0.317, simple_loss=0.373, pruned_loss=0.1304, over 8547.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3306, pruned_loss=0.09692, over 1612164.27 frames. ], batch size: 31, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:48,713 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:56,664 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 04:17:59,377 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 04:18:09,718 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53288.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:18:16,599 INFO [train.py:901] (0/4) Epoch 7, batch 4800, loss[loss=0.256, simple_loss=0.3344, pruned_loss=0.0888, over 8099.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3309, pruned_loss=0.09708, over 1611138.05 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:26,459 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53313.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:18:32,424 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:39,896 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:41,111 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.630e+02 3.191e+02 3.984e+02 9.617e+02, threshold=6.381e+02, percent-clipped=3.0 +2023-02-06 04:18:50,453 INFO [train.py:901] (0/4) Epoch 7, batch 4850, loss[loss=0.2933, simple_loss=0.3605, pruned_loss=0.113, over 8030.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3299, pruned_loss=0.09623, over 1611478.70 frames. ], batch size: 22, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:51,169 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 04:19:06,226 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3351, 1.5485, 1.2889, 1.9049, 0.8182, 1.1265, 1.3878, 1.4827], + device='cuda:0'), covar=tensor([0.1018, 0.0906, 0.1486, 0.0651, 0.1452, 0.1971, 0.0986, 0.0897], + device='cuda:0'), in_proj_covar=tensor([0.0253, 0.0241, 0.0278, 0.0226, 0.0241, 0.0268, 0.0280, 0.0238], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 04:19:16,474 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:26,613 INFO [train.py:901] (0/4) Epoch 7, batch 4900, loss[loss=0.2069, simple_loss=0.2741, pruned_loss=0.06991, over 7335.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3301, pruned_loss=0.09676, over 1614390.35 frames. ], batch size: 16, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:19:40,202 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:40,289 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2462, 1.3743, 2.2726, 1.1820, 2.1250, 2.5283, 2.4643, 2.0947], + device='cuda:0'), covar=tensor([0.0939, 0.1050, 0.0506, 0.1842, 0.0545, 0.0369, 0.0612, 0.0802], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0278, 0.0235, 0.0275, 0.0243, 0.0219, 0.0279, 0.0284], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 04:19:48,325 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3705, 1.3222, 4.4798, 1.6838, 3.9381, 3.7586, 4.0749, 3.9346], + device='cuda:0'), covar=tensor([0.0319, 0.3710, 0.0330, 0.2968, 0.0850, 0.0633, 0.0405, 0.0498], + device='cuda:0'), in_proj_covar=tensor([0.0387, 0.0530, 0.0478, 0.0465, 0.0527, 0.0442, 0.0433, 0.0496], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 04:19:49,287 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 04:19:51,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.811e+02 3.269e+02 4.328e+02 9.769e+02, threshold=6.539e+02, percent-clipped=6.0 +2023-02-06 04:19:53,023 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0103, 4.1274, 2.5536, 2.7552, 2.9124, 2.2103, 2.7291, 2.8659], + device='cuda:0'), covar=tensor([0.1284, 0.0163, 0.0753, 0.0583, 0.0496, 0.0974, 0.0746, 0.0854], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0233, 0.0310, 0.0299, 0.0308, 0.0316, 0.0338, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 04:20:00,855 INFO [train.py:901] (0/4) Epoch 7, batch 4950, loss[loss=0.2755, simple_loss=0.3393, pruned_loss=0.1059, over 8511.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3322, pruned_loss=0.09792, over 1618728.29 frames. ], batch size: 28, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:27,197 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:36,826 INFO [train.py:901] (0/4) Epoch 7, batch 5000, loss[loss=0.2482, simple_loss=0.3285, pruned_loss=0.08395, over 8311.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3319, pruned_loss=0.09759, over 1615867.13 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:45,260 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:48,754 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5807, 1.9795, 2.0784, 1.2003, 2.2918, 1.3631, 0.6755, 1.7220], + device='cuda:0'), covar=tensor([0.0361, 0.0155, 0.0133, 0.0254, 0.0176, 0.0427, 0.0389, 0.0151], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0261, 0.0218, 0.0318, 0.0258, 0.0402, 0.0312, 0.0289], + device='cuda:0'), out_proj_covar=tensor([1.1305e-04, 8.1287e-05, 6.7222e-05, 9.8440e-05, 8.1323e-05, 1.3600e-04, + 9.9618e-05, 9.0591e-05], device='cuda:0') +2023-02-06 04:21:01,979 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:03,201 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.612e+02 3.156e+02 4.000e+02 8.821e+02, threshold=6.312e+02, percent-clipped=7.0 +2023-02-06 04:21:12,894 INFO [train.py:901] (0/4) Epoch 7, batch 5050, loss[loss=0.2771, simple_loss=0.3451, pruned_loss=0.1046, over 8468.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3303, pruned_loss=0.09589, over 1619418.55 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:31,984 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 04:21:43,488 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6936, 1.5737, 3.0177, 1.3156, 2.1233, 3.3064, 3.3862, 2.7825], + device='cuda:0'), covar=tensor([0.0979, 0.1280, 0.0382, 0.1988, 0.0808, 0.0290, 0.0433, 0.0654], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0275, 0.0234, 0.0272, 0.0241, 0.0219, 0.0276, 0.0282], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 04:21:46,780 INFO [train.py:901] (0/4) Epoch 7, batch 5100, loss[loss=0.2549, simple_loss=0.3121, pruned_loss=0.0988, over 7644.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3317, pruned_loss=0.09731, over 1615363.89 frames. ], batch size: 19, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:51,221 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:13,233 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.889e+02 3.391e+02 4.238e+02 9.606e+02, threshold=6.783e+02, percent-clipped=10.0 +2023-02-06 04:22:23,450 INFO [train.py:901] (0/4) Epoch 7, batch 5150, loss[loss=0.2581, simple_loss=0.3369, pruned_loss=0.08968, over 8100.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3312, pruned_loss=0.09682, over 1615376.34 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:22:34,950 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:42,264 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:56,582 INFO [train.py:901] (0/4) Epoch 7, batch 5200, loss[loss=0.2438, simple_loss=0.3048, pruned_loss=0.09139, over 7797.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3316, pruned_loss=0.09719, over 1612327.94 frames. ], batch size: 19, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:22:59,250 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2943, 1.2053, 1.4332, 1.0552, 0.8197, 1.1870, 1.1720, 1.1080], + device='cuda:0'), covar=tensor([0.0575, 0.1230, 0.1615, 0.1405, 0.0553, 0.1529, 0.0654, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0116, 0.0165, 0.0204, 0.0169, 0.0114, 0.0172, 0.0126, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 04:23:10,750 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:14,969 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 04:23:17,965 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:22,034 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.983e+02 3.078e+02 4.028e+02 5.378e+02 1.177e+03, threshold=8.056e+02, percent-clipped=8.0 +2023-02-06 04:23:28,939 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 04:23:32,186 INFO [train.py:901] (0/4) Epoch 7, batch 5250, loss[loss=0.3249, simple_loss=0.368, pruned_loss=0.1409, over 7011.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3315, pruned_loss=0.09759, over 1610454.32 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:54,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:00,273 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:02,347 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:06,925 INFO [train.py:901] (0/4) Epoch 7, batch 5300, loss[loss=0.2423, simple_loss=0.3017, pruned_loss=0.09146, over 7692.00 frames. ], tot_loss[loss=0.2629, simple_loss=0.331, pruned_loss=0.09738, over 1609412.23 frames. ], batch size: 18, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:24:17,527 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:31,354 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.647e+02 3.169e+02 3.870e+02 1.211e+03, threshold=6.339e+02, percent-clipped=2.0 +2023-02-06 04:24:39,044 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:42,219 INFO [train.py:901] (0/4) Epoch 7, batch 5350, loss[loss=0.2541, simple_loss=0.3157, pruned_loss=0.09625, over 7975.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.3307, pruned_loss=0.09735, over 1607896.59 frames. ], batch size: 21, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:24:53,451 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3785, 1.4554, 1.6110, 1.3757, 0.9243, 1.7106, 0.0381, 1.0161], + device='cuda:0'), covar=tensor([0.3635, 0.1866, 0.0963, 0.2033, 0.5833, 0.0857, 0.4002, 0.2005], + device='cuda:0'), in_proj_covar=tensor([0.0145, 0.0143, 0.0085, 0.0193, 0.0231, 0.0089, 0.0147, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:25:11,784 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3820, 1.8653, 3.3734, 1.1097, 2.3022, 1.7502, 1.3915, 2.0913], + device='cuda:0'), covar=tensor([0.1660, 0.2016, 0.0623, 0.3304, 0.1477, 0.2667, 0.1725, 0.2289], + device='cuda:0'), in_proj_covar=tensor([0.0476, 0.0475, 0.0528, 0.0549, 0.0601, 0.0530, 0.0452, 0.0588], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:25:17,621 INFO [train.py:901] (0/4) Epoch 7, batch 5400, loss[loss=0.2421, simple_loss=0.3156, pruned_loss=0.08435, over 7805.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.332, pruned_loss=0.09834, over 1608497.86 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:41,933 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.728e+02 3.458e+02 4.119e+02 1.009e+03, threshold=6.915e+02, percent-clipped=3.0 +2023-02-06 04:25:51,251 INFO [train.py:901] (0/4) Epoch 7, batch 5450, loss[loss=0.2834, simple_loss=0.3259, pruned_loss=0.1205, over 7443.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3312, pruned_loss=0.09752, over 1610534.77 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:01,442 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1506, 1.5893, 1.4614, 1.3421, 1.0777, 1.3960, 1.5468, 1.8084], + device='cuda:0'), covar=tensor([0.0523, 0.1186, 0.1759, 0.1394, 0.0583, 0.1482, 0.0705, 0.0477], + device='cuda:0'), in_proj_covar=tensor([0.0115, 0.0166, 0.0205, 0.0169, 0.0115, 0.0173, 0.0128, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 04:26:08,822 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:18,667 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 04:26:26,958 INFO [train.py:901] (0/4) Epoch 7, batch 5500, loss[loss=0.2413, simple_loss=0.3229, pruned_loss=0.07981, over 8455.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3301, pruned_loss=0.0966, over 1613794.83 frames. ], batch size: 27, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:27,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:27,656 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-54000.pt +2023-02-06 04:26:52,078 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.929e+02 2.794e+02 3.496e+02 4.646e+02 1.157e+03, threshold=6.993e+02, percent-clipped=7.0 +2023-02-06 04:26:53,596 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,150 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,610 INFO [train.py:901] (0/4) Epoch 7, batch 5550, loss[loss=0.355, simple_loss=0.3872, pruned_loss=0.1614, over 7172.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3322, pruned_loss=0.09823, over 1616749.09 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:10,518 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:18,025 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:21,374 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0224, 3.0929, 3.4012, 2.3417, 1.8601, 3.5731, 0.8121, 2.1669], + device='cuda:0'), covar=tensor([0.2422, 0.1422, 0.0405, 0.2079, 0.4943, 0.0580, 0.4579, 0.1888], + device='cuda:0'), in_proj_covar=tensor([0.0142, 0.0140, 0.0083, 0.0187, 0.0225, 0.0087, 0.0147, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:27:37,055 INFO [train.py:901] (0/4) Epoch 7, batch 5600, loss[loss=0.3028, simple_loss=0.3588, pruned_loss=0.1234, over 8030.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3333, pruned_loss=0.09855, over 1617175.72 frames. ], batch size: 22, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:37,951 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:55,814 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:28:02,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.954e+02 2.795e+02 3.455e+02 4.516e+02 9.788e+02, threshold=6.911e+02, percent-clipped=3.0 +2023-02-06 04:28:12,203 INFO [train.py:901] (0/4) Epoch 7, batch 5650, loss[loss=0.2563, simple_loss=0.3272, pruned_loss=0.09275, over 7800.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.3334, pruned_loss=0.09894, over 1612500.33 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:28:25,370 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 04:28:25,853 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.32 vs. limit=5.0 +2023-02-06 04:28:31,830 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-06 04:28:41,255 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0402, 2.5772, 2.6911, 1.4603, 3.1391, 1.8041, 1.5937, 1.8809], + device='cuda:0'), covar=tensor([0.0464, 0.0187, 0.0191, 0.0370, 0.0228, 0.0511, 0.0426, 0.0264], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0261, 0.0220, 0.0325, 0.0262, 0.0412, 0.0317, 0.0300], + device='cuda:0'), out_proj_covar=tensor([1.1201e-04, 8.1238e-05, 6.7417e-05, 1.0037e-04, 8.2197e-05, 1.3914e-04, + 1.0091e-04, 9.3669e-05], device='cuda:0') +2023-02-06 04:28:47,274 INFO [train.py:901] (0/4) Epoch 7, batch 5700, loss[loss=0.2241, simple_loss=0.2899, pruned_loss=0.07916, over 7443.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3321, pruned_loss=0.09845, over 1610161.49 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:12,994 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.786e+02 3.155e+02 4.023e+02 8.991e+02, threshold=6.311e+02, percent-clipped=4.0 +2023-02-06 04:29:22,468 INFO [train.py:901] (0/4) Epoch 7, batch 5750, loss[loss=0.2578, simple_loss=0.3396, pruned_loss=0.08798, over 8040.00 frames. ], tot_loss[loss=0.2629, simple_loss=0.3308, pruned_loss=0.09747, over 1612307.29 frames. ], batch size: 22, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:31,461 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 04:29:56,263 INFO [train.py:901] (0/4) Epoch 7, batch 5800, loss[loss=0.2247, simple_loss=0.3042, pruned_loss=0.07266, over 7978.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.332, pruned_loss=0.09772, over 1616380.99 frames. ], batch size: 21, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:22,036 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.127e+02 2.882e+02 3.737e+02 4.385e+02 9.194e+02, threshold=7.474e+02, percent-clipped=5.0 +2023-02-06 04:30:32,283 INFO [train.py:901] (0/4) Epoch 7, batch 5850, loss[loss=0.2274, simple_loss=0.3133, pruned_loss=0.07078, over 8469.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3328, pruned_loss=0.09819, over 1613164.02 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:06,117 INFO [train.py:901] (0/4) Epoch 7, batch 5900, loss[loss=0.2705, simple_loss=0.3395, pruned_loss=0.1007, over 8284.00 frames. ], tot_loss[loss=0.2629, simple_loss=0.3314, pruned_loss=0.09718, over 1614973.93 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:30,649 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.634e+02 3.151e+02 3.851e+02 7.879e+02, threshold=6.301e+02, percent-clipped=2.0 +2023-02-06 04:31:40,692 INFO [train.py:901] (0/4) Epoch 7, batch 5950, loss[loss=0.2761, simple_loss=0.3382, pruned_loss=0.107, over 8514.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3309, pruned_loss=0.09734, over 1617983.52 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:13,857 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2297, 1.7096, 1.6261, 0.8280, 1.6708, 1.2233, 0.2776, 1.5704], + device='cuda:0'), covar=tensor([0.0245, 0.0134, 0.0108, 0.0217, 0.0155, 0.0449, 0.0368, 0.0117], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0262, 0.0223, 0.0325, 0.0263, 0.0414, 0.0322, 0.0300], + device='cuda:0'), out_proj_covar=tensor([1.1091e-04, 8.1536e-05, 6.8065e-05, 1.0005e-04, 8.2443e-05, 1.3947e-04, + 1.0222e-04, 9.3704e-05], device='cuda:0') +2023-02-06 04:32:14,308 INFO [train.py:901] (0/4) Epoch 7, batch 6000, loss[loss=0.2628, simple_loss=0.3333, pruned_loss=0.0961, over 8247.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3318, pruned_loss=0.09773, over 1622311.25 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:14,309 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 04:32:26,546 INFO [train.py:935] (0/4) Epoch 7, validation: loss=0.2048, simple_loss=0.3036, pruned_loss=0.05298, over 944034.00 frames. +2023-02-06 04:32:26,547 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 04:32:50,868 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.687e+02 3.524e+02 4.445e+02 8.914e+02, threshold=7.048e+02, percent-clipped=8.0 +2023-02-06 04:33:00,122 INFO [train.py:901] (0/4) Epoch 7, batch 6050, loss[loss=0.3142, simple_loss=0.3639, pruned_loss=0.1323, over 8596.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3324, pruned_loss=0.09839, over 1618626.09 frames. ], batch size: 31, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:36,263 INFO [train.py:901] (0/4) Epoch 7, batch 6100, loss[loss=0.2493, simple_loss=0.3157, pruned_loss=0.09144, over 7943.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3327, pruned_loss=0.0988, over 1616463.58 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:55,843 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1088, 1.7752, 1.6049, 1.5552, 1.2647, 1.6086, 2.2252, 1.9264], + device='cuda:0'), covar=tensor([0.0444, 0.1164, 0.1755, 0.1347, 0.0590, 0.1532, 0.0657, 0.0586], + device='cuda:0'), in_proj_covar=tensor([0.0115, 0.0165, 0.0206, 0.0167, 0.0115, 0.0171, 0.0127, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 04:34:00,446 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 04:34:01,816 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.824e+02 3.447e+02 4.351e+02 1.012e+03, threshold=6.894e+02, percent-clipped=2.0 +2023-02-06 04:34:11,155 INFO [train.py:901] (0/4) Epoch 7, batch 6150, loss[loss=0.3115, simple_loss=0.3783, pruned_loss=0.1224, over 8471.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3327, pruned_loss=0.09887, over 1615284.20 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:34:34,017 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54682.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:34:38,970 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-06 04:34:46,674 INFO [train.py:901] (0/4) Epoch 7, batch 6200, loss[loss=0.2242, simple_loss=0.3027, pruned_loss=0.07284, over 8184.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3332, pruned_loss=0.09886, over 1618664.74 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:12,145 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.928e+02 3.624e+02 4.953e+02 9.267e+02, threshold=7.248e+02, percent-clipped=4.0 +2023-02-06 04:35:21,776 INFO [train.py:901] (0/4) Epoch 7, batch 6250, loss[loss=0.2333, simple_loss=0.3103, pruned_loss=0.07812, over 8243.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.332, pruned_loss=0.09828, over 1619392.88 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:40,874 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3064, 2.6091, 1.9326, 2.9559, 1.5629, 1.8013, 1.9745, 2.5236], + device='cuda:0'), covar=tensor([0.0700, 0.0761, 0.1121, 0.0466, 0.1148, 0.1384, 0.1204, 0.0764], + device='cuda:0'), in_proj_covar=tensor([0.0249, 0.0235, 0.0274, 0.0225, 0.0235, 0.0264, 0.0273, 0.0238], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 04:35:55,498 INFO [train.py:901] (0/4) Epoch 7, batch 6300, loss[loss=0.2748, simple_loss=0.3369, pruned_loss=0.1064, over 7932.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3317, pruned_loss=0.09833, over 1620020.39 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:22,282 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 3.001e+02 3.662e+02 4.451e+02 9.002e+02, threshold=7.325e+02, percent-clipped=3.0 +2023-02-06 04:36:32,316 INFO [train.py:901] (0/4) Epoch 7, batch 6350, loss[loss=0.2865, simple_loss=0.3571, pruned_loss=0.1079, over 8240.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3314, pruned_loss=0.0979, over 1617263.92 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:53,641 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:37:06,004 INFO [train.py:901] (0/4) Epoch 7, batch 6400, loss[loss=0.2765, simple_loss=0.3469, pruned_loss=0.103, over 8325.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3323, pruned_loss=0.09889, over 1617440.44 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:21,359 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5727, 2.2440, 3.5986, 2.7326, 2.8230, 2.1458, 1.6921, 1.6571], + device='cuda:0'), covar=tensor([0.3017, 0.3343, 0.0802, 0.1902, 0.1911, 0.1721, 0.1540, 0.3527], + device='cuda:0'), in_proj_covar=tensor([0.0839, 0.0782, 0.0670, 0.0771, 0.0866, 0.0724, 0.0672, 0.0712], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:37:31,201 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.661e+02 3.281e+02 3.949e+02 1.010e+03, threshold=6.562e+02, percent-clipped=2.0 +2023-02-06 04:37:40,720 INFO [train.py:901] (0/4) Epoch 7, batch 6450, loss[loss=0.2278, simple_loss=0.3006, pruned_loss=0.07747, over 8508.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3308, pruned_loss=0.09779, over 1615438.65 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:57,771 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0688, 1.6008, 3.1480, 1.5243, 2.1899, 3.5060, 3.4226, 3.0321], + device='cuda:0'), covar=tensor([0.0873, 0.1324, 0.0416, 0.1819, 0.0885, 0.0257, 0.0425, 0.0529], + device='cuda:0'), in_proj_covar=tensor([0.0245, 0.0278, 0.0233, 0.0272, 0.0245, 0.0218, 0.0278, 0.0280], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 04:38:15,593 INFO [train.py:901] (0/4) Epoch 7, batch 6500, loss[loss=0.2674, simple_loss=0.3477, pruned_loss=0.09357, over 8556.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3304, pruned_loss=0.0976, over 1612985.99 frames. ], batch size: 31, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:33,806 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55026.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:38:39,665 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.546e+02 3.271e+02 4.197e+02 5.859e+02, threshold=6.542e+02, percent-clipped=0.0 +2023-02-06 04:38:49,577 INFO [train.py:901] (0/4) Epoch 7, batch 6550, loss[loss=0.2088, simple_loss=0.285, pruned_loss=0.06633, over 7980.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3294, pruned_loss=0.0969, over 1615889.54 frames. ], batch size: 21, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:12,193 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 04:39:25,812 INFO [train.py:901] (0/4) Epoch 7, batch 6600, loss[loss=0.1947, simple_loss=0.2642, pruned_loss=0.06257, over 7708.00 frames. ], tot_loss[loss=0.26, simple_loss=0.328, pruned_loss=0.09601, over 1613088.41 frames. ], batch size: 18, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:32,340 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:39:49,418 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.699e+02 3.503e+02 4.413e+02 7.218e+02, threshold=7.007e+02, percent-clipped=4.0 +2023-02-06 04:39:53,563 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55141.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:39:58,774 INFO [train.py:901] (0/4) Epoch 7, batch 6650, loss[loss=0.3198, simple_loss=0.3684, pruned_loss=0.1356, over 7252.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.33, pruned_loss=0.09751, over 1617350.34 frames. ], batch size: 71, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:10,773 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:17,203 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 04:40:17,527 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7583, 3.6641, 3.4013, 1.6390, 3.2867, 3.3504, 3.3750, 3.0621], + device='cuda:0'), covar=tensor([0.1007, 0.0698, 0.1104, 0.4699, 0.0974, 0.1011, 0.1313, 0.0985], + device='cuda:0'), in_proj_covar=tensor([0.0411, 0.0317, 0.0348, 0.0430, 0.0338, 0.0314, 0.0325, 0.0277], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:40:34,123 INFO [train.py:901] (0/4) Epoch 7, batch 6700, loss[loss=0.2589, simple_loss=0.3479, pruned_loss=0.08493, over 8467.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3293, pruned_loss=0.0966, over 1616400.29 frames. ], batch size: 27, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:51,581 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:55,048 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:58,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 3.031e+02 3.759e+02 4.673e+02 1.170e+03, threshold=7.519e+02, percent-clipped=9.0 +2023-02-06 04:41:07,996 INFO [train.py:901] (0/4) Epoch 7, batch 6750, loss[loss=0.2434, simple_loss=0.3166, pruned_loss=0.08508, over 7975.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.33, pruned_loss=0.09683, over 1618489.51 frames. ], batch size: 21, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:41:42,534 INFO [train.py:901] (0/4) Epoch 7, batch 6800, loss[loss=0.3269, simple_loss=0.3819, pruned_loss=0.1359, over 8677.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3296, pruned_loss=0.09651, over 1616305.02 frames. ], batch size: 34, lr: 1.07e-02, grad_scale: 16.0 +2023-02-06 04:41:47,226 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 04:42:08,166 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8025, 1.5887, 2.8986, 2.3620, 2.5342, 1.5580, 1.2839, 1.1939], + device='cuda:0'), covar=tensor([0.4074, 0.3962, 0.0805, 0.1766, 0.1588, 0.2437, 0.2164, 0.3396], + device='cuda:0'), in_proj_covar=tensor([0.0816, 0.0765, 0.0660, 0.0759, 0.0848, 0.0703, 0.0658, 0.0693], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:42:08,554 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.859e+02 3.364e+02 4.161e+02 9.626e+02, threshold=6.728e+02, percent-clipped=3.0 +2023-02-06 04:42:11,508 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:42:18,206 INFO [train.py:901] (0/4) Epoch 7, batch 6850, loss[loss=0.2258, simple_loss=0.303, pruned_loss=0.0743, over 7967.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3287, pruned_loss=0.09554, over 1614882.30 frames. ], batch size: 21, lr: 1.06e-02, grad_scale: 16.0 +2023-02-06 04:42:34,172 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 04:42:50,635 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55397.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:42:52,430 INFO [train.py:901] (0/4) Epoch 7, batch 6900, loss[loss=0.1772, simple_loss=0.2581, pruned_loss=0.0481, over 7526.00 frames. ], tot_loss[loss=0.2597, simple_loss=0.3289, pruned_loss=0.09524, over 1610379.18 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:42:53,700 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 04:43:09,619 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55422.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:43:19,271 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.767e+02 3.318e+02 4.413e+02 7.718e+02, threshold=6.635e+02, percent-clipped=1.0 +2023-02-06 04:43:28,912 INFO [train.py:901] (0/4) Epoch 7, batch 6950, loss[loss=0.315, simple_loss=0.3601, pruned_loss=0.135, over 8568.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3301, pruned_loss=0.0959, over 1612539.49 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:38,693 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6867, 1.6758, 3.3980, 1.3794, 2.1102, 3.7212, 3.7149, 3.1402], + device='cuda:0'), covar=tensor([0.1193, 0.1461, 0.0327, 0.2025, 0.0985, 0.0231, 0.0422, 0.0605], + device='cuda:0'), in_proj_covar=tensor([0.0242, 0.0274, 0.0232, 0.0271, 0.0243, 0.0214, 0.0278, 0.0277], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 04:43:46,609 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 04:43:59,777 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6954, 1.9006, 1.9196, 1.4777, 2.0441, 1.5974, 1.1484, 1.7574], + device='cuda:0'), covar=tensor([0.0234, 0.0140, 0.0092, 0.0187, 0.0139, 0.0289, 0.0310, 0.0111], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0265, 0.0222, 0.0319, 0.0262, 0.0410, 0.0320, 0.0299], + device='cuda:0'), out_proj_covar=tensor([1.0918e-04, 8.1999e-05, 6.7707e-05, 9.7117e-05, 8.1776e-05, 1.3766e-04, + 1.0075e-04, 9.2945e-05], device='cuda:0') +2023-02-06 04:44:02,255 INFO [train.py:901] (0/4) Epoch 7, batch 7000, loss[loss=0.2612, simple_loss=0.3445, pruned_loss=0.08895, over 8254.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.33, pruned_loss=0.0958, over 1610059.91 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:09,455 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:44:28,250 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.942e+02 3.699e+02 4.542e+02 1.220e+03, threshold=7.399e+02, percent-clipped=11.0 +2023-02-06 04:44:37,065 INFO [train.py:901] (0/4) Epoch 7, batch 7050, loss[loss=0.2593, simple_loss=0.3285, pruned_loss=0.095, over 8035.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3295, pruned_loss=0.09551, over 1610312.92 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:54,228 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:44:59,848 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9533, 1.7298, 1.7088, 1.6396, 1.3179, 1.7661, 2.3541, 1.9501], + device='cuda:0'), covar=tensor([0.0485, 0.1245, 0.1731, 0.1338, 0.0578, 0.1432, 0.0631, 0.0556], + device='cuda:0'), in_proj_covar=tensor([0.0112, 0.0166, 0.0203, 0.0167, 0.0113, 0.0171, 0.0126, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 04:45:03,889 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8991, 1.6156, 5.9990, 2.2726, 5.3856, 5.0705, 5.5744, 5.4861], + device='cuda:0'), covar=tensor([0.0350, 0.3571, 0.0239, 0.2335, 0.0766, 0.0591, 0.0344, 0.0360], + device='cuda:0'), in_proj_covar=tensor([0.0385, 0.0525, 0.0481, 0.0464, 0.0537, 0.0441, 0.0446, 0.0500], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 04:45:09,299 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:11,742 INFO [train.py:901] (0/4) Epoch 7, batch 7100, loss[loss=0.2803, simple_loss=0.3547, pruned_loss=0.103, over 8356.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3312, pruned_loss=0.09648, over 1612722.41 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:45:23,749 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 04:45:26,282 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:29,481 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:31,781 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 04:45:36,697 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.720e+02 3.297e+02 4.008e+02 7.250e+02, threshold=6.594e+02, percent-clipped=0.0 +2023-02-06 04:45:45,958 INFO [train.py:901] (0/4) Epoch 7, batch 7150, loss[loss=0.3051, simple_loss=0.3588, pruned_loss=0.1257, over 8104.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.333, pruned_loss=0.09737, over 1619873.50 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:14,207 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:46:21,660 INFO [train.py:901] (0/4) Epoch 7, batch 7200, loss[loss=0.2297, simple_loss=0.2944, pruned_loss=0.08253, over 7799.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3323, pruned_loss=0.09693, over 1619972.59 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:47,134 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.857e+02 3.487e+02 4.455e+02 1.230e+03, threshold=6.974e+02, percent-clipped=5.0 +2023-02-06 04:46:55,811 INFO [train.py:901] (0/4) Epoch 7, batch 7250, loss[loss=0.2677, simple_loss=0.3463, pruned_loss=0.0946, over 8472.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3318, pruned_loss=0.09659, over 1618591.39 frames. ], batch size: 25, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:07,765 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:47:31,009 INFO [train.py:901] (0/4) Epoch 7, batch 7300, loss[loss=0.2376, simple_loss=0.3146, pruned_loss=0.08036, over 8129.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3314, pruned_loss=0.09711, over 1615916.48 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:55,728 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.887e+02 3.402e+02 4.424e+02 1.529e+03, threshold=6.804e+02, percent-clipped=7.0 +2023-02-06 04:48:04,224 INFO [train.py:901] (0/4) Epoch 7, batch 7350, loss[loss=0.1809, simple_loss=0.2576, pruned_loss=0.05214, over 7658.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3297, pruned_loss=0.09616, over 1617471.54 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:15,846 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:24,830 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 04:48:26,630 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55881.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:27,829 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 04:48:40,229 INFO [train.py:901] (0/4) Epoch 7, batch 7400, loss[loss=0.2546, simple_loss=0.3187, pruned_loss=0.09525, over 8130.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3293, pruned_loss=0.09629, over 1614432.88 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:45,307 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:50,027 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 04:49:05,902 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.646e+02 3.471e+02 4.467e+02 1.348e+03, threshold=6.942e+02, percent-clipped=5.0 +2023-02-06 04:49:07,533 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9533, 2.5321, 2.7297, 1.1012, 2.8663, 1.5648, 1.4826, 1.5783], + device='cuda:0'), covar=tensor([0.0439, 0.0206, 0.0169, 0.0402, 0.0220, 0.0518, 0.0488, 0.0325], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0269, 0.0224, 0.0325, 0.0264, 0.0415, 0.0323, 0.0305], + device='cuda:0'), out_proj_covar=tensor([1.1116e-04, 8.2890e-05, 6.8041e-05, 9.8718e-05, 8.2162e-05, 1.3894e-04, + 1.0112e-04, 9.5054e-05], device='cuda:0') +2023-02-06 04:49:11,626 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:14,918 INFO [train.py:901] (0/4) Epoch 7, batch 7450, loss[loss=0.242, simple_loss=0.3229, pruned_loss=0.08058, over 8479.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3297, pruned_loss=0.09599, over 1615980.13 frames. ], batch size: 25, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:49:25,921 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 04:49:28,827 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:50,668 INFO [train.py:901] (0/4) Epoch 7, batch 7500, loss[loss=0.2932, simple_loss=0.3645, pruned_loss=0.1109, over 8515.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3306, pruned_loss=0.09664, over 1617889.96 frames. ], batch size: 28, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:49:51,378 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-56000.pt +2023-02-06 04:50:04,033 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5772, 2.2078, 4.3302, 1.2019, 2.6398, 2.0956, 1.6686, 2.2912], + device='cuda:0'), covar=tensor([0.1733, 0.2106, 0.0797, 0.3706, 0.1763, 0.2697, 0.1674, 0.2840], + device='cuda:0'), in_proj_covar=tensor([0.0476, 0.0477, 0.0533, 0.0559, 0.0598, 0.0532, 0.0457, 0.0597], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:50:17,131 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.816e+02 3.537e+02 4.737e+02 9.745e+02, threshold=7.074e+02, percent-clipped=6.0 +2023-02-06 04:50:25,625 INFO [train.py:901] (0/4) Epoch 7, batch 7550, loss[loss=0.3336, simple_loss=0.3714, pruned_loss=0.1479, over 8585.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3298, pruned_loss=0.09644, over 1616888.58 frames. ], batch size: 31, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:31,719 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8411, 1.4347, 3.2596, 1.3042, 2.2666, 3.6079, 3.5674, 3.0112], + device='cuda:0'), covar=tensor([0.1147, 0.1570, 0.0385, 0.2197, 0.0835, 0.0261, 0.0476, 0.0735], + device='cuda:0'), in_proj_covar=tensor([0.0245, 0.0275, 0.0233, 0.0274, 0.0242, 0.0214, 0.0281, 0.0278], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 04:50:58,639 INFO [train.py:901] (0/4) Epoch 7, batch 7600, loss[loss=0.2755, simple_loss=0.344, pruned_loss=0.1035, over 8246.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3292, pruned_loss=0.09596, over 1611707.37 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:06,776 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:25,297 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.750e+02 3.495e+02 4.537e+02 9.121e+02, threshold=6.990e+02, percent-clipped=3.0 +2023-02-06 04:51:34,934 INFO [train.py:901] (0/4) Epoch 7, batch 7650, loss[loss=0.2737, simple_loss=0.3373, pruned_loss=0.105, over 7666.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3296, pruned_loss=0.09626, over 1610789.41 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:44,431 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:08,620 INFO [train.py:901] (0/4) Epoch 7, batch 7700, loss[loss=0.2737, simple_loss=0.3444, pruned_loss=0.1015, over 8107.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.33, pruned_loss=0.09693, over 1616649.18 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:11,648 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2585, 1.9164, 3.0246, 2.3541, 2.6065, 2.0401, 1.5279, 1.4044], + device='cuda:0'), covar=tensor([0.2620, 0.2897, 0.0670, 0.1684, 0.1415, 0.1433, 0.1458, 0.2909], + device='cuda:0'), in_proj_covar=tensor([0.0830, 0.0778, 0.0668, 0.0777, 0.0865, 0.0722, 0.0673, 0.0706], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:52:16,092 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:26,756 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:34,691 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.791e+02 3.394e+02 3.978e+02 9.035e+02, threshold=6.788e+02, percent-clipped=3.0 +2023-02-06 04:52:34,720 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 04:52:44,019 INFO [train.py:901] (0/4) Epoch 7, batch 7750, loss[loss=0.2153, simple_loss=0.2951, pruned_loss=0.06774, over 7180.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3296, pruned_loss=0.09681, over 1617854.80 frames. ], batch size: 16, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:18,263 INFO [train.py:901] (0/4) Epoch 7, batch 7800, loss[loss=0.2276, simple_loss=0.3136, pruned_loss=0.07081, over 8195.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.329, pruned_loss=0.09639, over 1613204.73 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:27,613 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.00 vs. limit=5.0 +2023-02-06 04:53:31,126 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6271, 4.6234, 4.1835, 1.7719, 4.0980, 3.9982, 4.2204, 3.6257], + device='cuda:0'), covar=tensor([0.0653, 0.0417, 0.0843, 0.4540, 0.0673, 0.0741, 0.0987, 0.0808], + device='cuda:0'), in_proj_covar=tensor([0.0422, 0.0328, 0.0361, 0.0451, 0.0348, 0.0321, 0.0333, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:53:35,678 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:53:42,619 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.702e+02 3.307e+02 4.383e+02 8.490e+02, threshold=6.613e+02, percent-clipped=4.0 +2023-02-06 04:53:51,371 INFO [train.py:901] (0/4) Epoch 7, batch 7850, loss[loss=0.2831, simple_loss=0.3462, pruned_loss=0.11, over 8246.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3286, pruned_loss=0.09637, over 1608151.59 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:54:24,871 INFO [train.py:901] (0/4) Epoch 7, batch 7900, loss[loss=0.2709, simple_loss=0.3331, pruned_loss=0.1043, over 7533.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.329, pruned_loss=0.09661, over 1608656.30 frames. ], batch size: 18, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:54:49,425 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.758e+02 3.520e+02 4.424e+02 1.197e+03, threshold=7.039e+02, percent-clipped=9.0 +2023-02-06 04:54:58,059 INFO [train.py:901] (0/4) Epoch 7, batch 7950, loss[loss=0.3065, simple_loss=0.3569, pruned_loss=0.128, over 8524.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3307, pruned_loss=0.09788, over 1611538.90 frames. ], batch size: 50, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:19,780 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4176, 2.5342, 1.7786, 2.1825, 2.0563, 1.4105, 1.8497, 1.9012], + device='cuda:0'), covar=tensor([0.1298, 0.0375, 0.0936, 0.0536, 0.0574, 0.1276, 0.0870, 0.0840], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0236, 0.0312, 0.0300, 0.0307, 0.0317, 0.0336, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 04:55:19,805 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:31,651 INFO [train.py:901] (0/4) Epoch 7, batch 8000, loss[loss=0.3108, simple_loss=0.3619, pruned_loss=0.1298, over 6818.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3314, pruned_loss=0.09804, over 1609997.32 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:36,299 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0753, 3.9938, 2.8755, 2.9105, 3.4215, 2.3603, 3.0399, 2.9173], + device='cuda:0'), covar=tensor([0.1484, 0.0309, 0.0725, 0.0680, 0.0452, 0.1018, 0.0889, 0.0815], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0239, 0.0312, 0.0301, 0.0308, 0.0317, 0.0337, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 04:55:36,304 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:36,340 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7351, 2.4878, 4.8434, 1.3732, 3.1554, 2.2432, 1.7763, 2.6747], + device='cuda:0'), covar=tensor([0.1602, 0.1859, 0.0528, 0.3442, 0.1486, 0.2667, 0.1718, 0.2429], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0478, 0.0536, 0.0561, 0.0607, 0.0538, 0.0459, 0.0600], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 04:55:36,834 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:56,107 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.809e+02 3.378e+02 4.457e+02 7.052e+02, threshold=6.755e+02, percent-clipped=1.0 +2023-02-06 04:55:59,624 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56541.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:56:04,837 INFO [train.py:901] (0/4) Epoch 7, batch 8050, loss[loss=0.3478, simple_loss=0.3853, pruned_loss=0.1551, over 6640.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3309, pruned_loss=0.09917, over 1592653.91 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:56:27,848 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-7.pt +2023-02-06 04:56:39,418 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 04:56:42,633 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:56:43,094 INFO [train.py:901] (0/4) Epoch 8, batch 0, loss[loss=0.2834, simple_loss=0.3501, pruned_loss=0.1083, over 8334.00 frames. ], tot_loss[loss=0.2834, simple_loss=0.3501, pruned_loss=0.1083, over 8334.00 frames. ], batch size: 25, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:56:43,095 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 04:56:54,079 INFO [train.py:935] (0/4) Epoch 8, validation: loss=0.205, simple_loss=0.3028, pruned_loss=0.05355, over 944034.00 frames. +2023-02-06 04:56:54,080 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 04:57:08,613 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 04:57:10,747 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:22,341 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:28,990 INFO [train.py:901] (0/4) Epoch 8, batch 50, loss[loss=0.3111, simple_loss=0.3688, pruned_loss=0.1267, over 8499.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3356, pruned_loss=0.09996, over 368684.87 frames. ], batch size: 26, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:57:31,766 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.831e+02 3.488e+02 4.265e+02 1.069e+03, threshold=6.975e+02, percent-clipped=2.0 +2023-02-06 04:57:43,145 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 04:57:49,382 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 04:58:03,657 INFO [train.py:901] (0/4) Epoch 8, batch 100, loss[loss=0.2672, simple_loss=0.327, pruned_loss=0.1037, over 6989.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3333, pruned_loss=0.09791, over 645338.78 frames. ], batch size: 71, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:03,875 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7368, 2.7700, 3.1586, 2.2079, 1.5522, 3.4432, 0.5588, 1.9744], + device='cuda:0'), covar=tensor([0.3063, 0.1870, 0.0774, 0.3625, 0.6855, 0.0330, 0.5097, 0.2609], + device='cuda:0'), in_proj_covar=tensor([0.0144, 0.0147, 0.0086, 0.0195, 0.0235, 0.0088, 0.0144, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:58:05,727 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 04:58:21,631 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-06 04:58:27,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4376, 4.4823, 3.9231, 1.8159, 3.8878, 4.0404, 4.1080, 3.6370], + device='cuda:0'), covar=tensor([0.0944, 0.0622, 0.1157, 0.5302, 0.0914, 0.0923, 0.1230, 0.1053], + device='cuda:0'), in_proj_covar=tensor([0.0428, 0.0329, 0.0364, 0.0456, 0.0351, 0.0329, 0.0339, 0.0294], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:58:36,381 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3425, 1.2234, 1.4200, 1.1422, 0.8357, 1.2144, 1.1764, 0.9858], + device='cuda:0'), covar=tensor([0.0591, 0.1265, 0.1854, 0.1380, 0.0581, 0.1645, 0.0701, 0.0643], + device='cuda:0'), in_proj_covar=tensor([0.0112, 0.0163, 0.0202, 0.0165, 0.0113, 0.0170, 0.0124, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 04:58:38,288 INFO [train.py:901] (0/4) Epoch 8, batch 150, loss[loss=0.2732, simple_loss=0.3367, pruned_loss=0.1048, over 8262.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3321, pruned_loss=0.09603, over 864542.10 frames. ], batch size: 24, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:38,550 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2394, 1.8661, 2.9432, 2.2707, 2.4641, 2.0372, 1.5332, 1.1451], + device='cuda:0'), covar=tensor([0.2682, 0.2838, 0.0699, 0.1680, 0.1336, 0.1537, 0.1410, 0.3110], + device='cuda:0'), in_proj_covar=tensor([0.0828, 0.0781, 0.0671, 0.0781, 0.0869, 0.0720, 0.0671, 0.0708], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 04:58:40,995 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.710e+02 3.372e+02 4.105e+02 8.611e+02, threshold=6.744e+02, percent-clipped=2.0 +2023-02-06 04:59:13,819 INFO [train.py:901] (0/4) Epoch 8, batch 200, loss[loss=0.2362, simple_loss=0.3164, pruned_loss=0.07801, over 8342.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3335, pruned_loss=0.09714, over 1032215.25 frames. ], batch size: 25, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:41,297 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:59:48,621 INFO [train.py:901] (0/4) Epoch 8, batch 250, loss[loss=0.2691, simple_loss=0.3436, pruned_loss=0.09728, over 8097.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3308, pruned_loss=0.09565, over 1158057.77 frames. ], batch size: 23, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:51,336 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.703e+02 3.318e+02 4.204e+02 1.022e+03, threshold=6.636e+02, percent-clipped=1.0 +2023-02-06 04:59:56,870 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 05:00:06,242 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 05:00:21,331 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0167, 2.1113, 1.9272, 2.3281, 1.6873, 1.6848, 1.9165, 2.2262], + device='cuda:0'), covar=tensor([0.0682, 0.0732, 0.0894, 0.0617, 0.1109, 0.1288, 0.0916, 0.0694], + device='cuda:0'), in_proj_covar=tensor([0.0251, 0.0235, 0.0275, 0.0223, 0.0234, 0.0266, 0.0271, 0.0237], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:00:21,368 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:23,883 INFO [train.py:901] (0/4) Epoch 8, batch 300, loss[loss=0.2697, simple_loss=0.3472, pruned_loss=0.09606, over 8470.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3309, pruned_loss=0.09566, over 1262970.46 frames. ], batch size: 29, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:00:25,998 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56885.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:00:38,036 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:54,953 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:58,766 INFO [train.py:901] (0/4) Epoch 8, batch 350, loss[loss=0.3239, simple_loss=0.3642, pruned_loss=0.1417, over 6549.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3305, pruned_loss=0.09523, over 1341805.31 frames. ], batch size: 71, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:01,443 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.612e+02 3.168e+02 3.951e+02 1.059e+03, threshold=6.336e+02, percent-clipped=3.0 +2023-02-06 05:01:17,025 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8074, 5.9597, 5.0525, 2.4309, 5.1024, 5.6979, 5.3355, 5.2472], + device='cuda:0'), covar=tensor([0.0570, 0.0387, 0.0893, 0.4512, 0.0688, 0.0610, 0.1031, 0.0784], + device='cuda:0'), in_proj_covar=tensor([0.0414, 0.0324, 0.0354, 0.0442, 0.0343, 0.0324, 0.0335, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:01:33,158 INFO [train.py:901] (0/4) Epoch 8, batch 400, loss[loss=0.2322, simple_loss=0.2977, pruned_loss=0.08331, over 7657.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3296, pruned_loss=0.09468, over 1406128.42 frames. ], batch size: 19, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:43,241 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7876, 1.3909, 2.9575, 1.2316, 2.1418, 3.3679, 3.2872, 2.8274], + device='cuda:0'), covar=tensor([0.1093, 0.1461, 0.0414, 0.2020, 0.0825, 0.0246, 0.0503, 0.0625], + device='cuda:0'), in_proj_covar=tensor([0.0253, 0.0283, 0.0241, 0.0279, 0.0250, 0.0218, 0.0288, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 05:01:45,439 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57000.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:01:52,566 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:01:53,808 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:02:04,603 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7374, 2.2870, 3.8688, 2.7805, 3.0871, 2.2858, 1.7795, 1.7055], + device='cuda:0'), covar=tensor([0.2897, 0.3338, 0.0754, 0.2076, 0.1652, 0.1610, 0.1448, 0.3593], + device='cuda:0'), in_proj_covar=tensor([0.0831, 0.0782, 0.0672, 0.0783, 0.0870, 0.0721, 0.0673, 0.0710], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:02:07,050 INFO [train.py:901] (0/4) Epoch 8, batch 450, loss[loss=0.2191, simple_loss=0.2984, pruned_loss=0.06991, over 7547.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3301, pruned_loss=0.09529, over 1455368.10 frames. ], batch size: 18, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:02:10,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 2.769e+02 3.532e+02 4.551e+02 9.004e+02, threshold=7.064e+02, percent-clipped=7.0 +2023-02-06 05:02:18,427 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1133, 2.2052, 1.5343, 1.7784, 1.8272, 1.2071, 1.5548, 1.6477], + device='cuda:0'), covar=tensor([0.1166, 0.0308, 0.0949, 0.0483, 0.0626, 0.1274, 0.0831, 0.0777], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0233, 0.0316, 0.0300, 0.0309, 0.0316, 0.0339, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 05:02:41,864 INFO [train.py:901] (0/4) Epoch 8, batch 500, loss[loss=0.2277, simple_loss=0.307, pruned_loss=0.07419, over 8137.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3286, pruned_loss=0.09423, over 1490899.23 frames. ], batch size: 22, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:03:15,889 INFO [train.py:901] (0/4) Epoch 8, batch 550, loss[loss=0.2936, simple_loss=0.3419, pruned_loss=0.1227, over 8254.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.328, pruned_loss=0.09414, over 1519549.55 frames. ], batch size: 24, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:03:18,518 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.761e+02 3.532e+02 4.192e+02 1.400e+03, threshold=7.064e+02, percent-clipped=6.0 +2023-02-06 05:03:39,525 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:03:50,882 INFO [train.py:901] (0/4) Epoch 8, batch 600, loss[loss=0.2639, simple_loss=0.337, pruned_loss=0.09536, over 8452.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3284, pruned_loss=0.09411, over 1541500.37 frames. ], batch size: 27, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:04:03,142 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 05:04:06,342 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:13,912 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:25,747 INFO [train.py:901] (0/4) Epoch 8, batch 650, loss[loss=0.2322, simple_loss=0.308, pruned_loss=0.07824, over 7808.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3292, pruned_loss=0.09455, over 1560909.45 frames. ], batch size: 20, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:04:28,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.474e+02 3.242e+02 4.284e+02 1.059e+03, threshold=6.484e+02, percent-clipped=6.0 +2023-02-06 05:04:29,944 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6641, 2.1154, 2.0945, 1.2904, 2.3225, 1.3972, 0.7933, 1.6886], + device='cuda:0'), covar=tensor([0.0331, 0.0142, 0.0127, 0.0269, 0.0164, 0.0463, 0.0448, 0.0175], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0271, 0.0224, 0.0330, 0.0266, 0.0418, 0.0325, 0.0302], + device='cuda:0'), out_proj_covar=tensor([1.1024e-04, 8.2685e-05, 6.7404e-05, 1.0004e-04, 8.2607e-05, 1.3904e-04, + 1.0134e-04, 9.3016e-05], device='cuda:0') +2023-02-06 05:04:41,268 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5901, 2.1675, 2.0993, 1.2377, 2.3797, 1.3393, 0.7675, 1.6328], + device='cuda:0'), covar=tensor([0.0322, 0.0154, 0.0133, 0.0288, 0.0163, 0.0488, 0.0410, 0.0176], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0272, 0.0224, 0.0333, 0.0268, 0.0420, 0.0328, 0.0303], + device='cuda:0'), out_proj_covar=tensor([1.1101e-04, 8.3030e-05, 6.7443e-05, 1.0093e-04, 8.3005e-05, 1.3962e-04, + 1.0202e-04, 9.3388e-05], device='cuda:0') +2023-02-06 05:04:41,970 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57256.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:04:46,631 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:51,980 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:59,505 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57280.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:00,218 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57281.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:05:00,683 INFO [train.py:901] (0/4) Epoch 8, batch 700, loss[loss=0.2757, simple_loss=0.3429, pruned_loss=0.1043, over 8573.00 frames. ], tot_loss[loss=0.2597, simple_loss=0.3297, pruned_loss=0.09483, over 1576519.34 frames. ], batch size: 49, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:34,920 INFO [train.py:901] (0/4) Epoch 8, batch 750, loss[loss=0.2619, simple_loss=0.343, pruned_loss=0.09041, over 8361.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.329, pruned_loss=0.09489, over 1582193.80 frames. ], batch size: 24, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:38,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.846e+02 3.371e+02 4.091e+02 7.333e+02, threshold=6.742e+02, percent-clipped=1.0 +2023-02-06 05:05:49,687 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 05:05:51,140 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:52,569 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:57,758 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 05:05:58,576 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3039, 1.4874, 1.3161, 1.8967, 0.7434, 1.1572, 1.2486, 1.4818], + device='cuda:0'), covar=tensor([0.0988, 0.0979, 0.1420, 0.0570, 0.1352, 0.1777, 0.0992, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0253, 0.0235, 0.0275, 0.0217, 0.0232, 0.0267, 0.0271, 0.0237], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:06:09,703 INFO [train.py:901] (0/4) Epoch 8, batch 800, loss[loss=0.2941, simple_loss=0.3574, pruned_loss=0.1153, over 8585.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3292, pruned_loss=0.09528, over 1587625.49 frames. ], batch size: 49, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:10,113 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 05:06:11,937 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:33,598 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:44,181 INFO [train.py:901] (0/4) Epoch 8, batch 850, loss[loss=0.2261, simple_loss=0.2856, pruned_loss=0.08327, over 7434.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3283, pruned_loss=0.0944, over 1593899.89 frames. ], batch size: 17, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:46,897 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.664e+02 3.287e+02 4.255e+02 8.769e+02, threshold=6.575e+02, percent-clipped=4.0 +2023-02-06 05:07:06,502 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3350, 1.4521, 2.2855, 1.1916, 1.4051, 1.6889, 1.4037, 1.4725], + device='cuda:0'), covar=tensor([0.1643, 0.1874, 0.0685, 0.3293, 0.1592, 0.2518, 0.1749, 0.1816], + device='cuda:0'), in_proj_covar=tensor([0.0472, 0.0473, 0.0526, 0.0550, 0.0593, 0.0525, 0.0452, 0.0591], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:07:11,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:12,465 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:12,498 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4808, 1.6116, 1.6802, 1.4497, 0.8661, 1.6663, 0.0700, 1.0585], + device='cuda:0'), covar=tensor([0.3200, 0.1940, 0.0703, 0.1841, 0.5990, 0.0862, 0.3781, 0.2139], + device='cuda:0'), in_proj_covar=tensor([0.0145, 0.0144, 0.0086, 0.0193, 0.0235, 0.0091, 0.0146, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:07:17,827 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:19,067 INFO [train.py:901] (0/4) Epoch 8, batch 900, loss[loss=0.2259, simple_loss=0.3071, pruned_loss=0.07236, over 8138.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3277, pruned_loss=0.09407, over 1599385.14 frames. ], batch size: 22, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:40,915 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:53,666 INFO [train.py:901] (0/4) Epoch 8, batch 950, loss[loss=0.2908, simple_loss=0.3498, pruned_loss=0.1159, over 8370.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3264, pruned_loss=0.0933, over 1600113.19 frames. ], batch size: 24, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:56,421 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.713e+02 3.197e+02 4.416e+02 7.629e+02, threshold=6.394e+02, percent-clipped=6.0 +2023-02-06 05:07:56,671 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:02,727 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5349, 2.6510, 1.6833, 2.0582, 2.1621, 1.3732, 1.8064, 1.9888], + device='cuda:0'), covar=tensor([0.1107, 0.0262, 0.0945, 0.0561, 0.0525, 0.1198, 0.0767, 0.0784], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0238, 0.0317, 0.0306, 0.0311, 0.0317, 0.0341, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 05:08:04,590 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:08,603 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 05:08:13,023 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:14,317 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 05:08:14,513 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:29,124 INFO [train.py:901] (0/4) Epoch 8, batch 1000, loss[loss=0.2914, simple_loss=0.3437, pruned_loss=0.1196, over 6760.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3267, pruned_loss=0.09357, over 1604333.74 frames. ], batch size: 71, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:08:45,888 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:47,905 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 05:09:00,599 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 05:09:03,900 INFO [train.py:901] (0/4) Epoch 8, batch 1050, loss[loss=0.3065, simple_loss=0.3567, pruned_loss=0.1281, over 8126.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3279, pruned_loss=0.09465, over 1612441.21 frames. ], batch size: 22, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:09:06,638 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.778e+02 2.733e+02 3.382e+02 4.210e+02 1.523e+03, threshold=6.765e+02, percent-clipped=11.0 +2023-02-06 05:09:10,045 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:24,430 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:26,492 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:30,460 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2663, 1.4683, 2.2287, 1.1588, 1.5197, 1.5048, 1.3223, 1.4023], + device='cuda:0'), covar=tensor([0.1598, 0.1726, 0.0707, 0.3180, 0.1409, 0.2517, 0.1655, 0.1774], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0479, 0.0531, 0.0551, 0.0596, 0.0530, 0.0453, 0.0595], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:09:31,676 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:37,592 INFO [train.py:901] (0/4) Epoch 8, batch 1100, loss[loss=0.2616, simple_loss=0.3398, pruned_loss=0.09168, over 8608.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.329, pruned_loss=0.09512, over 1617877.18 frames. ], batch size: 31, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:10:05,377 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:08,891 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,252 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57728.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,687 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 05:10:12,759 INFO [train.py:901] (0/4) Epoch 8, batch 1150, loss[loss=0.2391, simple_loss=0.314, pruned_loss=0.0821, over 8235.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3279, pruned_loss=0.09381, over 1618885.09 frames. ], batch size: 22, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:10:15,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.752e+02 3.349e+02 4.211e+02 1.172e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 05:10:26,700 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:26,761 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:28,117 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:28,126 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3514, 1.4844, 1.5845, 1.1757, 0.8297, 1.6434, 0.0867, 1.0128], + device='cuda:0'), covar=tensor([0.3281, 0.2068, 0.0815, 0.2400, 0.5894, 0.0792, 0.3649, 0.2122], + device='cuda:0'), in_proj_covar=tensor([0.0143, 0.0143, 0.0084, 0.0190, 0.0232, 0.0089, 0.0143, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:10:32,786 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:48,133 INFO [train.py:901] (0/4) Epoch 8, batch 1200, loss[loss=0.244, simple_loss=0.3249, pruned_loss=0.08152, over 8430.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.328, pruned_loss=0.09329, over 1621256.31 frames. ], batch size: 29, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:11:12,640 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6029, 1.7793, 2.0783, 1.8259, 1.3356, 2.1306, 0.5439, 1.4741], + device='cuda:0'), covar=tensor([0.3384, 0.1667, 0.0551, 0.1817, 0.3899, 0.0594, 0.3832, 0.1961], + device='cuda:0'), in_proj_covar=tensor([0.0144, 0.0143, 0.0084, 0.0189, 0.0230, 0.0089, 0.0143, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:11:17,880 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:23,243 INFO [train.py:901] (0/4) Epoch 8, batch 1250, loss[loss=0.2532, simple_loss=0.3223, pruned_loss=0.09202, over 8466.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3291, pruned_loss=0.09498, over 1620110.05 frames. ], batch size: 25, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:11:25,902 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.817e+02 3.577e+02 4.191e+02 8.690e+02, threshold=7.155e+02, percent-clipped=5.0 +2023-02-06 05:11:41,389 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:50,898 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0127, 1.4644, 1.4844, 1.3088, 1.0653, 1.4322, 1.6297, 1.5398], + device='cuda:0'), covar=tensor([0.0462, 0.1041, 0.1459, 0.1178, 0.0531, 0.1306, 0.0593, 0.0507], + device='cuda:0'), in_proj_covar=tensor([0.0110, 0.0163, 0.0200, 0.0165, 0.0112, 0.0170, 0.0124, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 05:11:53,669 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:58,367 INFO [train.py:901] (0/4) Epoch 8, batch 1300, loss[loss=0.2143, simple_loss=0.2993, pruned_loss=0.06462, over 8454.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3283, pruned_loss=0.09456, over 1617464.64 frames. ], batch size: 29, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:12:24,241 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57919.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:32,235 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:33,386 INFO [train.py:901] (0/4) Epoch 8, batch 1350, loss[loss=0.2553, simple_loss=0.3295, pruned_loss=0.09052, over 8329.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3277, pruned_loss=0.09404, over 1617564.25 frames. ], batch size: 26, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:12:36,119 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.787e+02 3.281e+02 4.089e+02 1.129e+03, threshold=6.562e+02, percent-clipped=4.0 +2023-02-06 05:12:38,228 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:41,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:49,710 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:01,716 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:05,168 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:08,425 INFO [train.py:901] (0/4) Epoch 8, batch 1400, loss[loss=0.2647, simple_loss=0.3442, pruned_loss=0.09265, over 8598.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3258, pruned_loss=0.09273, over 1616753.63 frames. ], batch size: 31, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:20,620 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-58000.pt +2023-02-06 05:13:23,091 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:25,799 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1205, 1.7374, 2.6951, 2.1194, 2.3669, 1.9026, 1.5152, 1.0551], + device='cuda:0'), covar=tensor([0.2740, 0.2761, 0.0729, 0.1598, 0.1217, 0.1563, 0.1350, 0.2685], + device='cuda:0'), in_proj_covar=tensor([0.0835, 0.0785, 0.0675, 0.0778, 0.0867, 0.0720, 0.0665, 0.0709], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:13:41,318 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 05:13:43,354 INFO [train.py:901] (0/4) Epoch 8, batch 1450, loss[loss=0.255, simple_loss=0.3151, pruned_loss=0.09741, over 7930.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3259, pruned_loss=0.09311, over 1617091.43 frames. ], batch size: 20, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:46,043 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.668e+02 3.298e+02 4.223e+02 1.032e+03, threshold=6.596e+02, percent-clipped=5.0 +2023-02-06 05:14:07,377 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4992, 2.0508, 3.0984, 2.4662, 2.5859, 2.1339, 1.6987, 1.2525], + device='cuda:0'), covar=tensor([0.2364, 0.2600, 0.0682, 0.1633, 0.1274, 0.1487, 0.1304, 0.2850], + device='cuda:0'), in_proj_covar=tensor([0.0832, 0.0784, 0.0672, 0.0774, 0.0863, 0.0721, 0.0663, 0.0710], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:14:18,668 INFO [train.py:901] (0/4) Epoch 8, batch 1500, loss[loss=0.2553, simple_loss=0.3396, pruned_loss=0.08554, over 8103.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3263, pruned_loss=0.09343, over 1618670.85 frames. ], batch size: 23, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:28,061 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:52,812 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:53,267 INFO [train.py:901] (0/4) Epoch 8, batch 1550, loss[loss=0.3093, simple_loss=0.3599, pruned_loss=0.1293, over 6738.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3256, pruned_loss=0.09337, over 1615209.84 frames. ], batch size: 72, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:56,005 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.601e+02 3.218e+02 3.979e+02 6.246e+02, threshold=6.435e+02, percent-clipped=0.0 +2023-02-06 05:15:03,106 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 05:15:10,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:12,039 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58159.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:14,753 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:27,769 INFO [train.py:901] (0/4) Epoch 8, batch 1600, loss[loss=0.3141, simple_loss=0.3798, pruned_loss=0.1242, over 8111.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3267, pruned_loss=0.09413, over 1616927.83 frames. ], batch size: 23, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:15:36,605 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:47,383 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:54,025 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:00,054 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:02,605 INFO [train.py:901] (0/4) Epoch 8, batch 1650, loss[loss=0.2735, simple_loss=0.3445, pruned_loss=0.1012, over 8096.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3267, pruned_loss=0.0942, over 1616150.44 frames. ], batch size: 23, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:16:05,268 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.785e+02 3.241e+02 4.331e+02 1.468e+03, threshold=6.482e+02, percent-clipped=4.0 +2023-02-06 05:16:16,841 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:20,347 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 05:16:30,961 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1483, 1.7920, 1.8110, 1.5752, 1.1666, 1.7960, 2.0391, 2.0339], + device='cuda:0'), covar=tensor([0.0394, 0.1224, 0.1628, 0.1274, 0.0575, 0.1359, 0.0647, 0.0535], + device='cuda:0'), in_proj_covar=tensor([0.0110, 0.0162, 0.0200, 0.0165, 0.0112, 0.0170, 0.0124, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 05:16:37,572 INFO [train.py:901] (0/4) Epoch 8, batch 1700, loss[loss=0.2261, simple_loss=0.2942, pruned_loss=0.07906, over 7649.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.327, pruned_loss=0.09428, over 1613549.20 frames. ], batch size: 19, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:17:11,721 INFO [train.py:901] (0/4) Epoch 8, batch 1750, loss[loss=0.2435, simple_loss=0.3088, pruned_loss=0.08915, over 7665.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3266, pruned_loss=0.09394, over 1613883.59 frames. ], batch size: 19, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:17:15,045 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.752e+02 3.204e+02 3.949e+02 8.384e+02, threshold=6.409e+02, percent-clipped=4.0 +2023-02-06 05:17:16,105 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.44 vs. limit=5.0 +2023-02-06 05:17:43,877 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3396, 2.5412, 1.6323, 1.9483, 2.0810, 1.2396, 1.7012, 1.7950], + device='cuda:0'), covar=tensor([0.1204, 0.0282, 0.0927, 0.0550, 0.0612, 0.1257, 0.0883, 0.0928], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0231, 0.0311, 0.0298, 0.0305, 0.0315, 0.0336, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 05:17:45,746 INFO [train.py:901] (0/4) Epoch 8, batch 1800, loss[loss=0.272, simple_loss=0.3491, pruned_loss=0.09749, over 8479.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3284, pruned_loss=0.09542, over 1614331.45 frames. ], batch size: 25, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:18:21,312 INFO [train.py:901] (0/4) Epoch 8, batch 1850, loss[loss=0.1949, simple_loss=0.2728, pruned_loss=0.05852, over 7818.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3275, pruned_loss=0.09503, over 1613687.39 frames. ], batch size: 20, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:18:24,004 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.956e+02 3.603e+02 4.636e+02 8.044e+02, threshold=7.207e+02, percent-clipped=5.0 +2023-02-06 05:18:45,039 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58466.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:18:46,288 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6902, 2.3221, 4.7416, 2.8555, 4.2989, 4.1199, 4.4522, 4.3855], + device='cuda:0'), covar=tensor([0.0399, 0.2893, 0.0448, 0.2237, 0.0824, 0.0674, 0.0386, 0.0477], + device='cuda:0'), in_proj_covar=tensor([0.0405, 0.0528, 0.0500, 0.0474, 0.0539, 0.0452, 0.0453, 0.0511], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 05:18:55,854 INFO [train.py:901] (0/4) Epoch 8, batch 1900, loss[loss=0.3228, simple_loss=0.3701, pruned_loss=0.1378, over 7080.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3265, pruned_loss=0.09402, over 1612296.17 frames. ], batch size: 71, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:18:56,037 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0009, 2.2931, 1.7981, 2.7104, 1.4879, 1.6331, 1.9932, 2.2630], + device='cuda:0'), covar=tensor([0.0828, 0.0894, 0.1079, 0.0445, 0.1295, 0.1633, 0.1012, 0.0834], + device='cuda:0'), in_proj_covar=tensor([0.0246, 0.0229, 0.0263, 0.0213, 0.0232, 0.0264, 0.0264, 0.0232], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:19:01,996 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:10,025 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:12,701 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:19,277 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 05:19:28,969 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3178, 1.4439, 4.4981, 1.6929, 3.9750, 3.7890, 4.0391, 3.9769], + device='cuda:0'), covar=tensor([0.0498, 0.3489, 0.0364, 0.2795, 0.0927, 0.0741, 0.0453, 0.0507], + device='cuda:0'), in_proj_covar=tensor([0.0406, 0.0529, 0.0504, 0.0476, 0.0541, 0.0453, 0.0454, 0.0512], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 05:19:30,158 INFO [train.py:901] (0/4) Epoch 8, batch 1950, loss[loss=0.2451, simple_loss=0.3307, pruned_loss=0.07974, over 8202.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3265, pruned_loss=0.09406, over 1613107.57 frames. ], batch size: 23, lr: 9.75e-03, grad_scale: 16.0 +2023-02-06 05:19:30,810 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 05:19:32,812 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.699e+02 3.417e+02 4.103e+02 8.210e+02, threshold=6.834e+02, percent-clipped=5.0 +2023-02-06 05:19:50,865 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 05:20:04,902 INFO [train.py:901] (0/4) Epoch 8, batch 2000, loss[loss=0.2195, simple_loss=0.2903, pruned_loss=0.07435, over 7440.00 frames. ], tot_loss[loss=0.258, simple_loss=0.3269, pruned_loss=0.09457, over 1612449.79 frames. ], batch size: 17, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:25,668 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-06 05:20:29,385 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58618.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:31,402 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:39,661 INFO [train.py:901] (0/4) Epoch 8, batch 2050, loss[loss=0.2402, simple_loss=0.3102, pruned_loss=0.08504, over 7976.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3275, pruned_loss=0.09494, over 1610308.60 frames. ], batch size: 21, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:42,943 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.785e+02 3.396e+02 4.687e+02 1.585e+03, threshold=6.792e+02, percent-clipped=4.0 +2023-02-06 05:21:13,667 INFO [train.py:901] (0/4) Epoch 8, batch 2100, loss[loss=0.2564, simple_loss=0.334, pruned_loss=0.08942, over 8612.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3277, pruned_loss=0.09471, over 1612964.11 frames. ], batch size: 31, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:25,813 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:21:30,401 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7087, 5.7947, 4.9523, 2.0667, 5.0587, 5.3617, 5.4560, 4.9833], + device='cuda:0'), covar=tensor([0.0695, 0.0369, 0.0912, 0.5057, 0.0698, 0.0767, 0.1000, 0.0611], + device='cuda:0'), in_proj_covar=tensor([0.0413, 0.0320, 0.0348, 0.0438, 0.0341, 0.0320, 0.0329, 0.0280], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:21:47,831 INFO [train.py:901] (0/4) Epoch 8, batch 2150, loss[loss=0.2474, simple_loss=0.3278, pruned_loss=0.08354, over 8470.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.327, pruned_loss=0.09413, over 1610189.57 frames. ], batch size: 25, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:51,088 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.818e+02 3.372e+02 4.104e+02 8.704e+02, threshold=6.743e+02, percent-clipped=2.0 +2023-02-06 05:22:04,053 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.48 vs. limit=5.0 +2023-02-06 05:22:23,683 INFO [train.py:901] (0/4) Epoch 8, batch 2200, loss[loss=0.2274, simple_loss=0.2998, pruned_loss=0.07752, over 7933.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3289, pruned_loss=0.09508, over 1618874.95 frames. ], batch size: 20, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:22:31,539 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58793.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:22:58,809 INFO [train.py:901] (0/4) Epoch 8, batch 2250, loss[loss=0.236, simple_loss=0.3011, pruned_loss=0.08543, over 7530.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3273, pruned_loss=0.09397, over 1619333.28 frames. ], batch size: 18, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:02,320 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.600e+02 3.138e+02 4.259e+02 8.800e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-06 05:23:29,288 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:29,308 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4697, 1.9734, 3.3586, 1.2029, 2.6977, 1.8915, 1.5494, 2.1571], + device='cuda:0'), covar=tensor([0.1518, 0.1845, 0.0663, 0.3281, 0.1187, 0.2417, 0.1635, 0.2140], + device='cuda:0'), in_proj_covar=tensor([0.0478, 0.0484, 0.0533, 0.0560, 0.0605, 0.0533, 0.0457, 0.0594], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:23:31,287 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:32,129 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 05:23:34,466 INFO [train.py:901] (0/4) Epoch 8, batch 2300, loss[loss=0.2562, simple_loss=0.3279, pruned_loss=0.09227, over 8638.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3264, pruned_loss=0.09308, over 1613901.54 frames. ], batch size: 39, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:46,513 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:48,579 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:03,546 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:09,482 INFO [train.py:901] (0/4) Epoch 8, batch 2350, loss[loss=0.2761, simple_loss=0.3454, pruned_loss=0.1034, over 8497.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3273, pruned_loss=0.09391, over 1615792.70 frames. ], batch size: 26, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:24:12,940 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.611e+02 3.221e+02 3.780e+02 8.999e+02, threshold=6.441e+02, percent-clipped=2.0 +2023-02-06 05:24:32,449 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1220, 2.7171, 3.1792, 1.2248, 3.3449, 1.9685, 1.4927, 1.8114], + device='cuda:0'), covar=tensor([0.0450, 0.0180, 0.0186, 0.0386, 0.0165, 0.0449, 0.0534, 0.0299], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0270, 0.0224, 0.0328, 0.0267, 0.0416, 0.0321, 0.0303], + device='cuda:0'), out_proj_covar=tensor([1.0782e-04, 8.1729e-05, 6.7163e-05, 9.8482e-05, 8.2190e-05, 1.3724e-04, + 9.9038e-05, 9.2534e-05], device='cuda:0') +2023-02-06 05:24:44,042 INFO [train.py:901] (0/4) Epoch 8, batch 2400, loss[loss=0.2615, simple_loss=0.3246, pruned_loss=0.09917, over 7917.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3276, pruned_loss=0.09429, over 1614341.33 frames. ], batch size: 20, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:25:18,645 INFO [train.py:901] (0/4) Epoch 8, batch 2450, loss[loss=0.2621, simple_loss=0.3217, pruned_loss=0.1013, over 8232.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3269, pruned_loss=0.09355, over 1615064.00 frames. ], batch size: 22, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:25:21,873 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 3.001e+02 3.706e+02 4.542e+02 9.599e+02, threshold=7.413e+02, percent-clipped=3.0 +2023-02-06 05:25:26,088 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:25:52,496 INFO [train.py:901] (0/4) Epoch 8, batch 2500, loss[loss=0.2569, simple_loss=0.3278, pruned_loss=0.09303, over 8460.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3264, pruned_loss=0.0933, over 1615469.68 frames. ], batch size: 29, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:15,602 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1725, 2.9756, 3.3650, 2.0240, 1.7771, 3.5829, 0.6493, 2.1538], + device='cuda:0'), covar=tensor([0.2109, 0.1427, 0.0615, 0.2844, 0.5041, 0.0421, 0.4922, 0.2401], + device='cuda:0'), in_proj_covar=tensor([0.0149, 0.0147, 0.0085, 0.0199, 0.0235, 0.0090, 0.0154, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:26:19,789 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 05:26:27,103 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2074, 1.8711, 2.7251, 2.1929, 2.3844, 1.9634, 1.5944, 1.0015], + device='cuda:0'), covar=tensor([0.2868, 0.2891, 0.0755, 0.1609, 0.1276, 0.1785, 0.1376, 0.3142], + device='cuda:0'), in_proj_covar=tensor([0.0846, 0.0794, 0.0684, 0.0786, 0.0883, 0.0737, 0.0672, 0.0720], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:26:27,546 INFO [train.py:901] (0/4) Epoch 8, batch 2550, loss[loss=0.2759, simple_loss=0.3407, pruned_loss=0.1055, over 7980.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3254, pruned_loss=0.09272, over 1615648.14 frames. ], batch size: 21, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:30,876 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.676e+02 3.180e+02 4.175e+02 9.807e+02, threshold=6.360e+02, percent-clipped=4.0 +2023-02-06 05:26:30,960 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:26:44,653 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7154, 1.4809, 2.0861, 1.8157, 1.9116, 1.4934, 1.2842, 1.1222], + device='cuda:0'), covar=tensor([0.1967, 0.2195, 0.0645, 0.1226, 0.1085, 0.1349, 0.1077, 0.2009], + device='cuda:0'), in_proj_covar=tensor([0.0843, 0.0793, 0.0683, 0.0787, 0.0881, 0.0735, 0.0671, 0.0719], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:26:45,980 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:03,041 INFO [train.py:901] (0/4) Epoch 8, batch 2600, loss[loss=0.3345, simple_loss=0.3821, pruned_loss=0.1435, over 8433.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3249, pruned_loss=0.09234, over 1616259.72 frames. ], batch size: 27, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:03,144 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:13,290 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0582, 2.3574, 1.6908, 2.7262, 1.4750, 1.4799, 1.9679, 2.3689], + device='cuda:0'), covar=tensor([0.0755, 0.0814, 0.1167, 0.0468, 0.1282, 0.1668, 0.1049, 0.0763], + device='cuda:0'), in_proj_covar=tensor([0.0245, 0.0227, 0.0263, 0.0214, 0.0231, 0.0264, 0.0266, 0.0236], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:27:38,229 INFO [train.py:901] (0/4) Epoch 8, batch 2650, loss[loss=0.3119, simple_loss=0.3708, pruned_loss=0.1265, over 8557.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3256, pruned_loss=0.09254, over 1618907.52 frames. ], batch size: 39, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:41,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.757e+02 3.213e+02 4.207e+02 1.360e+03, threshold=6.426e+02, percent-clipped=6.0 +2023-02-06 05:27:51,997 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:59,925 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0187, 2.2962, 1.8516, 2.7022, 1.3784, 1.6833, 1.8429, 2.3766], + device='cuda:0'), covar=tensor([0.0795, 0.0803, 0.1076, 0.0475, 0.1312, 0.1454, 0.1090, 0.0739], + device='cuda:0'), in_proj_covar=tensor([0.0248, 0.0228, 0.0265, 0.0215, 0.0231, 0.0266, 0.0267, 0.0236], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:28:03,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:12,228 INFO [train.py:901] (0/4) Epoch 8, batch 2700, loss[loss=0.2853, simple_loss=0.3555, pruned_loss=0.1075, over 8479.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3241, pruned_loss=0.09157, over 1617141.61 frames. ], batch size: 25, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:46,774 INFO [train.py:901] (0/4) Epoch 8, batch 2750, loss[loss=0.2906, simple_loss=0.3467, pruned_loss=0.1173, over 7774.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3239, pruned_loss=0.0918, over 1611966.96 frames. ], batch size: 19, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:50,101 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.846e+02 3.367e+02 4.274e+02 9.837e+02, threshold=6.735e+02, percent-clipped=6.0 +2023-02-06 05:29:20,730 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0170, 4.0664, 3.6561, 1.7699, 3.5902, 3.5884, 3.7570, 3.3366], + device='cuda:0'), covar=tensor([0.1097, 0.0658, 0.1127, 0.5367, 0.0818, 0.0942, 0.1276, 0.0912], + device='cuda:0'), in_proj_covar=tensor([0.0426, 0.0332, 0.0360, 0.0451, 0.0352, 0.0329, 0.0342, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:29:22,662 INFO [train.py:901] (0/4) Epoch 8, batch 2800, loss[loss=0.2311, simple_loss=0.3072, pruned_loss=0.07752, over 8238.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3248, pruned_loss=0.09248, over 1608314.23 frames. ], batch size: 22, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:29:23,481 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:44,974 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:56,675 INFO [train.py:901] (0/4) Epoch 8, batch 2850, loss[loss=0.2127, simple_loss=0.2862, pruned_loss=0.06963, over 7647.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3259, pruned_loss=0.09342, over 1607853.30 frames. ], batch size: 19, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:00,149 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.577e+02 2.974e+02 3.773e+02 5.956e+02, threshold=5.948e+02, percent-clipped=0.0 +2023-02-06 05:30:01,823 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:05,295 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0489, 0.9123, 1.0355, 1.0622, 0.7778, 1.1358, 0.0536, 0.8331], + device='cuda:0'), covar=tensor([0.2082, 0.1832, 0.0609, 0.1359, 0.3877, 0.0600, 0.3129, 0.1530], + device='cuda:0'), in_proj_covar=tensor([0.0147, 0.0146, 0.0084, 0.0195, 0.0231, 0.0089, 0.0151, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:30:12,852 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0721, 1.2748, 1.2053, 0.3508, 1.2637, 0.9947, 0.1396, 1.1341], + device='cuda:0'), covar=tensor([0.0239, 0.0159, 0.0168, 0.0302, 0.0261, 0.0470, 0.0387, 0.0163], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0274, 0.0225, 0.0327, 0.0266, 0.0413, 0.0323, 0.0304], + device='cuda:0'), out_proj_covar=tensor([1.0911e-04, 8.2662e-05, 6.7817e-05, 9.8048e-05, 8.1858e-05, 1.3586e-04, + 9.9259e-05, 9.2562e-05], device='cuda:0') +2023-02-06 05:30:32,532 INFO [train.py:901] (0/4) Epoch 8, batch 2900, loss[loss=0.2843, simple_loss=0.3642, pruned_loss=0.1022, over 8576.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.326, pruned_loss=0.09255, over 1615466.03 frames. ], batch size: 31, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:51,212 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:57,864 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3012, 1.5144, 1.5105, 1.3052, 1.0440, 1.4394, 1.6274, 1.6220], + device='cuda:0'), covar=tensor([0.0495, 0.1165, 0.1733, 0.1340, 0.0570, 0.1537, 0.0702, 0.0568], + device='cuda:0'), in_proj_covar=tensor([0.0111, 0.0161, 0.0200, 0.0166, 0.0111, 0.0170, 0.0124, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 05:30:59,185 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:03,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:04,356 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 05:31:07,552 INFO [train.py:901] (0/4) Epoch 8, batch 2950, loss[loss=0.3157, simple_loss=0.3772, pruned_loss=0.1271, over 8285.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.326, pruned_loss=0.09281, over 1610263.84 frames. ], batch size: 23, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:08,340 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:10,787 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.847e+02 3.468e+02 5.057e+02 9.591e+02, threshold=6.936e+02, percent-clipped=13.0 +2023-02-06 05:31:42,183 INFO [train.py:901] (0/4) Epoch 8, batch 3000, loss[loss=0.2317, simple_loss=0.321, pruned_loss=0.07119, over 8505.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3261, pruned_loss=0.09286, over 1612504.16 frames. ], batch size: 28, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:42,183 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 05:31:54,433 INFO [train.py:935] (0/4) Epoch 8, validation: loss=0.2021, simple_loss=0.3001, pruned_loss=0.05199, over 944034.00 frames. +2023-02-06 05:31:54,435 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 05:32:25,108 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7961, 2.3132, 3.8764, 2.6705, 3.0359, 2.3782, 1.8525, 1.5414], + device='cuda:0'), covar=tensor([0.2756, 0.3514, 0.0766, 0.2161, 0.1601, 0.1668, 0.1444, 0.3635], + device='cuda:0'), in_proj_covar=tensor([0.0843, 0.0798, 0.0681, 0.0784, 0.0883, 0.0735, 0.0670, 0.0720], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:32:30,901 INFO [train.py:901] (0/4) Epoch 8, batch 3050, loss[loss=0.2704, simple_loss=0.3398, pruned_loss=0.1005, over 8462.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3255, pruned_loss=0.09266, over 1612272.83 frames. ], batch size: 29, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:32:34,249 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.623e+02 3.324e+02 4.059e+02 7.396e+02, threshold=6.648e+02, percent-clipped=1.0 +2023-02-06 05:32:35,800 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:37,136 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:45,610 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2148, 4.1934, 3.8011, 1.8691, 3.7349, 3.6699, 3.7901, 3.4120], + device='cuda:0'), covar=tensor([0.0827, 0.0621, 0.1128, 0.4472, 0.0775, 0.0844, 0.1321, 0.0786], + device='cuda:0'), in_proj_covar=tensor([0.0427, 0.0336, 0.0357, 0.0452, 0.0351, 0.0331, 0.0341, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:32:52,395 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:33:05,112 INFO [train.py:901] (0/4) Epoch 8, batch 3100, loss[loss=0.3058, simple_loss=0.3752, pruned_loss=0.1182, over 8494.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3258, pruned_loss=0.09286, over 1611549.78 frames. ], batch size: 26, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:40,027 INFO [train.py:901] (0/4) Epoch 8, batch 3150, loss[loss=0.2411, simple_loss=0.3216, pruned_loss=0.08025, over 8291.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3244, pruned_loss=0.09239, over 1608810.36 frames. ], batch size: 23, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:43,231 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.052e+02 2.894e+02 3.427e+02 4.526e+02 8.691e+02, threshold=6.853e+02, percent-clipped=4.0 +2023-02-06 05:33:56,416 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1470, 2.6722, 3.0584, 1.3375, 3.4002, 2.1401, 1.5898, 1.7231], + device='cuda:0'), covar=tensor([0.0406, 0.0184, 0.0181, 0.0370, 0.0192, 0.0405, 0.0455, 0.0277], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0277, 0.0226, 0.0328, 0.0264, 0.0418, 0.0323, 0.0303], + device='cuda:0'), out_proj_covar=tensor([1.0951e-04, 8.3622e-05, 6.7701e-05, 9.8110e-05, 8.1076e-05, 1.3753e-04, + 9.9177e-05, 9.2321e-05], device='cuda:0') +2023-02-06 05:34:14,623 INFO [train.py:901] (0/4) Epoch 8, batch 3200, loss[loss=0.2723, simple_loss=0.3492, pruned_loss=0.09767, over 8239.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3246, pruned_loss=0.09265, over 1611996.55 frames. ], batch size: 22, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:50,980 INFO [train.py:901] (0/4) Epoch 8, batch 3250, loss[loss=0.2319, simple_loss=0.3145, pruned_loss=0.07467, over 8468.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3247, pruned_loss=0.09292, over 1608241.81 frames. ], batch size: 27, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:54,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.545e+02 3.201e+02 4.295e+02 9.179e+02, threshold=6.402e+02, percent-clipped=6.0 +2023-02-06 05:35:13,093 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:14,543 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:20,851 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 05:35:25,226 INFO [train.py:901] (0/4) Epoch 8, batch 3300, loss[loss=0.2755, simple_loss=0.3417, pruned_loss=0.1047, over 8499.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.324, pruned_loss=0.09229, over 1609248.51 frames. ], batch size: 29, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:35:35,300 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:41,746 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:52,436 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:59,533 INFO [train.py:901] (0/4) Epoch 8, batch 3350, loss[loss=0.214, simple_loss=0.2868, pruned_loss=0.0706, over 7943.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3248, pruned_loss=0.09241, over 1610124.06 frames. ], batch size: 20, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:02,906 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.795e+02 3.400e+02 4.166e+02 8.824e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 05:36:09,851 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5818, 1.9690, 3.1800, 2.3404, 2.8326, 2.2813, 1.8285, 1.2652], + device='cuda:0'), covar=tensor([0.2589, 0.3060, 0.0701, 0.1836, 0.1294, 0.1506, 0.1316, 0.3131], + device='cuda:0'), in_proj_covar=tensor([0.0843, 0.0809, 0.0685, 0.0789, 0.0890, 0.0741, 0.0679, 0.0723], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:36:24,470 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5968, 1.4817, 2.7419, 1.2126, 1.9800, 3.0362, 3.0444, 2.5343], + device='cuda:0'), covar=tensor([0.1027, 0.1307, 0.0427, 0.2012, 0.0794, 0.0302, 0.0491, 0.0670], + device='cuda:0'), in_proj_covar=tensor([0.0248, 0.0282, 0.0239, 0.0273, 0.0250, 0.0222, 0.0287, 0.0285], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 05:36:31,811 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:36:33,703 INFO [train.py:901] (0/4) Epoch 8, batch 3400, loss[loss=0.3127, simple_loss=0.3827, pruned_loss=0.1214, over 8336.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3258, pruned_loss=0.09315, over 1612984.36 frames. ], batch size: 26, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:46,612 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-60000.pt +2023-02-06 05:36:48,455 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1593, 1.8904, 2.8201, 2.2169, 2.5138, 2.0431, 1.5652, 1.1646], + device='cuda:0'), covar=tensor([0.3026, 0.3028, 0.0854, 0.1953, 0.1492, 0.1751, 0.1483, 0.3199], + device='cuda:0'), in_proj_covar=tensor([0.0848, 0.0815, 0.0691, 0.0793, 0.0897, 0.0744, 0.0682, 0.0727], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:37:09,546 INFO [train.py:901] (0/4) Epoch 8, batch 3450, loss[loss=0.3238, simple_loss=0.375, pruned_loss=0.1364, over 6697.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3278, pruned_loss=0.09459, over 1611562.35 frames. ], batch size: 71, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:12,868 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.668e+02 3.106e+02 3.891e+02 9.201e+02, threshold=6.211e+02, percent-clipped=2.0 +2023-02-06 05:37:21,753 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9970, 1.7117, 1.2982, 1.5823, 1.3494, 1.0641, 1.2126, 1.3641], + device='cuda:0'), covar=tensor([0.0909, 0.0382, 0.0966, 0.0458, 0.0693, 0.1218, 0.0736, 0.0697], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0232, 0.0306, 0.0294, 0.0306, 0.0315, 0.0340, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 05:37:27,901 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:36,389 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:44,243 INFO [train.py:901] (0/4) Epoch 8, batch 3500, loss[loss=0.2328, simple_loss=0.303, pruned_loss=0.08131, over 7929.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.325, pruned_loss=0.09287, over 1606075.60 frames. ], batch size: 20, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:45,046 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:53,896 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60096.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:38:02,916 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 05:38:18,657 INFO [train.py:901] (0/4) Epoch 8, batch 3550, loss[loss=0.2266, simple_loss=0.2894, pruned_loss=0.08192, over 7529.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3253, pruned_loss=0.09293, over 1609189.94 frames. ], batch size: 18, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:38:22,098 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.844e+02 3.449e+02 4.512e+02 7.529e+02, threshold=6.898e+02, percent-clipped=5.0 +2023-02-06 05:38:54,378 INFO [train.py:901] (0/4) Epoch 8, batch 3600, loss[loss=0.2685, simple_loss=0.3362, pruned_loss=0.1004, over 8357.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3249, pruned_loss=0.09293, over 1609731.93 frames. ], batch size: 26, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:08,515 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4171, 1.5126, 2.8883, 1.2479, 2.1302, 3.2326, 3.2210, 2.7421], + device='cuda:0'), covar=tensor([0.1254, 0.1499, 0.0490, 0.1969, 0.0976, 0.0307, 0.0513, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0246, 0.0279, 0.0238, 0.0270, 0.0250, 0.0221, 0.0284, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 05:39:14,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:18,777 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:29,810 INFO [train.py:901] (0/4) Epoch 8, batch 3650, loss[loss=0.2599, simple_loss=0.3338, pruned_loss=0.09299, over 7937.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3252, pruned_loss=0.09231, over 1617225.48 frames. ], batch size: 20, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:32,061 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:33,119 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.875e+02 2.702e+02 3.457e+02 4.155e+02 9.631e+02, threshold=6.915e+02, percent-clipped=4.0 +2023-02-06 05:39:42,736 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:48,804 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:02,764 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 05:40:04,773 INFO [train.py:901] (0/4) Epoch 8, batch 3700, loss[loss=0.2843, simple_loss=0.3429, pruned_loss=0.1129, over 8022.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.326, pruned_loss=0.09283, over 1616402.84 frames. ], batch size: 22, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:34,558 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:39,100 INFO [train.py:901] (0/4) Epoch 8, batch 3750, loss[loss=0.1905, simple_loss=0.2693, pruned_loss=0.05587, over 7544.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3249, pruned_loss=0.09227, over 1613347.57 frames. ], batch size: 18, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:43,045 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.685e+02 3.295e+02 3.882e+02 8.274e+02, threshold=6.589e+02, percent-clipped=2.0 +2023-02-06 05:41:02,727 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:13,776 INFO [train.py:901] (0/4) Epoch 8, batch 3800, loss[loss=0.2356, simple_loss=0.3015, pruned_loss=0.08487, over 7442.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3246, pruned_loss=0.09239, over 1610273.75 frames. ], batch size: 17, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:41:16,616 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4558, 1.8840, 3.1223, 2.3597, 2.7044, 2.1743, 1.6761, 1.2147], + device='cuda:0'), covar=tensor([0.2834, 0.3348, 0.0795, 0.2027, 0.1327, 0.1663, 0.1353, 0.3344], + device='cuda:0'), in_proj_covar=tensor([0.0836, 0.0811, 0.0692, 0.0786, 0.0892, 0.0741, 0.0680, 0.0730], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:41:21,924 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.2106, 1.8166, 5.2953, 2.2192, 4.7816, 4.4731, 4.9024, 4.7836], + device='cuda:0'), covar=tensor([0.0418, 0.3442, 0.0330, 0.2653, 0.0799, 0.0642, 0.0401, 0.0427], + device='cuda:0'), in_proj_covar=tensor([0.0414, 0.0537, 0.0513, 0.0481, 0.0549, 0.0467, 0.0461, 0.0514], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 05:41:28,151 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:36,458 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:45,866 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:49,067 INFO [train.py:901] (0/4) Epoch 8, batch 3850, loss[loss=0.2592, simple_loss=0.329, pruned_loss=0.09475, over 8033.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3252, pruned_loss=0.09283, over 1609144.91 frames. ], batch size: 22, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:41:52,285 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.691e+02 3.271e+02 4.212e+02 1.032e+03, threshold=6.541e+02, percent-clipped=5.0 +2023-02-06 05:41:54,194 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:08,504 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 05:42:22,376 INFO [train.py:901] (0/4) Epoch 8, batch 3900, loss[loss=0.209, simple_loss=0.2804, pruned_loss=0.06881, over 8098.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3255, pruned_loss=0.09301, over 1612565.17 frames. ], batch size: 21, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:42:45,280 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4512, 1.8879, 2.0427, 1.1446, 2.1625, 1.4220, 0.6485, 1.5565], + device='cuda:0'), covar=tensor([0.0338, 0.0173, 0.0110, 0.0289, 0.0238, 0.0506, 0.0459, 0.0183], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0278, 0.0227, 0.0336, 0.0269, 0.0430, 0.0330, 0.0307], + device='cuda:0'), out_proj_covar=tensor([1.0888e-04, 8.3411e-05, 6.7611e-05, 1.0025e-04, 8.2620e-05, 1.4129e-04, + 1.0130e-04, 9.3300e-05], device='cuda:0') +2023-02-06 05:42:46,589 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:55,283 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:57,129 INFO [train.py:901] (0/4) Epoch 8, batch 3950, loss[loss=0.2929, simple_loss=0.3553, pruned_loss=0.1152, over 8109.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3254, pruned_loss=0.09275, over 1616439.49 frames. ], batch size: 23, lr: 9.59e-03, grad_scale: 8.0 +2023-02-06 05:43:00,412 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.562e+02 3.362e+02 4.082e+02 8.516e+02, threshold=6.724e+02, percent-clipped=2.0 +2023-02-06 05:43:03,866 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:13,341 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:16,391 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:19,340 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-06 05:43:31,323 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,760 INFO [train.py:901] (0/4) Epoch 8, batch 4000, loss[loss=0.2399, simple_loss=0.3031, pruned_loss=0.08838, over 7803.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3257, pruned_loss=0.09292, over 1612689.16 frames. ], batch size: 19, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:43:48,250 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:59,775 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:06,259 INFO [train.py:901] (0/4) Epoch 8, batch 4050, loss[loss=0.2746, simple_loss=0.3419, pruned_loss=0.1037, over 8467.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3255, pruned_loss=0.09271, over 1612358.67 frames. ], batch size: 29, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:44:09,148 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7976, 1.5031, 3.0169, 1.1552, 2.0619, 3.2716, 3.3271, 2.7692], + device='cuda:0'), covar=tensor([0.1076, 0.1509, 0.0444, 0.2241, 0.0949, 0.0311, 0.0502, 0.0678], + device='cuda:0'), in_proj_covar=tensor([0.0245, 0.0279, 0.0238, 0.0271, 0.0248, 0.0219, 0.0285, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 05:44:09,649 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.835e+02 3.722e+02 4.462e+02 8.493e+02, threshold=7.445e+02, percent-clipped=1.0 +2023-02-06 05:44:17,205 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:19,828 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8878, 1.5840, 3.3348, 1.3538, 2.2316, 3.6190, 3.6077, 3.0497], + device='cuda:0'), covar=tensor([0.0997, 0.1358, 0.0293, 0.1832, 0.0839, 0.0228, 0.0400, 0.0624], + device='cuda:0'), in_proj_covar=tensor([0.0244, 0.0278, 0.0237, 0.0271, 0.0248, 0.0218, 0.0284, 0.0281], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 05:44:36,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:41,544 INFO [train.py:901] (0/4) Epoch 8, batch 4100, loss[loss=0.2761, simple_loss=0.3434, pruned_loss=0.1044, over 8474.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3246, pruned_loss=0.09209, over 1613223.71 frames. ], batch size: 27, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:04,536 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 05:45:08,295 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1595, 4.1977, 3.7146, 1.6766, 3.6571, 3.6151, 3.7822, 3.2667], + device='cuda:0'), covar=tensor([0.0913, 0.0719, 0.1311, 0.5077, 0.0976, 0.1123, 0.1655, 0.1039], + device='cuda:0'), in_proj_covar=tensor([0.0420, 0.0332, 0.0353, 0.0452, 0.0352, 0.0332, 0.0341, 0.0288], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:45:16,315 INFO [train.py:901] (0/4) Epoch 8, batch 4150, loss[loss=0.272, simple_loss=0.3385, pruned_loss=0.1027, over 7986.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.324, pruned_loss=0.09189, over 1610817.95 frames. ], batch size: 21, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:19,083 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:19,620 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.904e+02 3.574e+02 4.093e+02 8.234e+02, threshold=7.147e+02, percent-clipped=2.0 +2023-02-06 05:45:33,750 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 05:45:45,313 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:51,066 INFO [train.py:901] (0/4) Epoch 8, batch 4200, loss[loss=0.2078, simple_loss=0.2937, pruned_loss=0.06089, over 7970.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3264, pruned_loss=0.09316, over 1616682.19 frames. ], batch size: 21, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:45:54,021 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,583 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,603 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:07,984 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 05:46:10,873 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:11,504 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60811.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:20,154 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:25,966 INFO [train.py:901] (0/4) Epoch 8, batch 4250, loss[loss=0.2939, simple_loss=0.3657, pruned_loss=0.111, over 8559.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.326, pruned_loss=0.0932, over 1613503.12 frames. ], batch size: 39, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:46:28,891 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:30,092 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.807e+02 3.546e+02 4.515e+02 1.213e+03, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 05:46:30,813 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 05:46:49,911 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8683, 2.1077, 1.7096, 2.5906, 1.3765, 1.6194, 1.8200, 2.2531], + device='cuda:0'), covar=tensor([0.0849, 0.0868, 0.1125, 0.0448, 0.1265, 0.1480, 0.1116, 0.0826], + device='cuda:0'), in_proj_covar=tensor([0.0248, 0.0233, 0.0269, 0.0220, 0.0228, 0.0265, 0.0268, 0.0235], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 05:47:01,071 INFO [train.py:901] (0/4) Epoch 8, batch 4300, loss[loss=0.2118, simple_loss=0.2957, pruned_loss=0.06396, over 8190.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3271, pruned_loss=0.09382, over 1615166.37 frames. ], batch size: 23, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:47:35,202 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:35,682 INFO [train.py:901] (0/4) Epoch 8, batch 4350, loss[loss=0.2115, simple_loss=0.2891, pruned_loss=0.06697, over 8038.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3257, pruned_loss=0.09314, over 1614933.65 frames. ], batch size: 22, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:47:39,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.751e+02 3.442e+02 4.335e+02 7.709e+02, threshold=6.884e+02, percent-clipped=1.0 +2023-02-06 05:47:51,971 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:54,693 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6416, 1.2770, 4.7714, 1.8387, 4.1644, 3.9470, 4.3931, 4.1650], + device='cuda:0'), covar=tensor([0.0482, 0.4124, 0.0374, 0.3004, 0.0984, 0.0807, 0.0426, 0.0524], + device='cuda:0'), in_proj_covar=tensor([0.0419, 0.0540, 0.0521, 0.0490, 0.0548, 0.0469, 0.0467, 0.0520], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 05:47:59,291 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 05:48:10,325 INFO [train.py:901] (0/4) Epoch 8, batch 4400, loss[loss=0.2344, simple_loss=0.3041, pruned_loss=0.08231, over 8468.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3249, pruned_loss=0.09252, over 1615071.16 frames. ], batch size: 27, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:48:33,374 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1954, 1.3146, 2.3075, 1.0539, 1.7089, 1.5396, 1.2338, 1.5522], + device='cuda:0'), covar=tensor([0.2081, 0.2478, 0.0720, 0.4216, 0.1480, 0.3090, 0.2251, 0.1846], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0487, 0.0522, 0.0559, 0.0598, 0.0542, 0.0455, 0.0593], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:48:42,544 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 05:48:44,683 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0086, 2.4571, 2.7436, 1.5023, 3.1320, 1.8234, 1.4374, 1.5875], + device='cuda:0'), covar=tensor([0.0404, 0.0201, 0.0158, 0.0346, 0.0196, 0.0471, 0.0476, 0.0271], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0280, 0.0227, 0.0339, 0.0268, 0.0430, 0.0333, 0.0306], + device='cuda:0'), out_proj_covar=tensor([1.0817e-04, 8.3407e-05, 6.7626e-05, 1.0127e-04, 8.2175e-05, 1.4119e-04, + 1.0195e-04, 9.2573e-05], device='cuda:0') +2023-02-06 05:48:45,134 INFO [train.py:901] (0/4) Epoch 8, batch 4450, loss[loss=0.2639, simple_loss=0.3407, pruned_loss=0.09352, over 8507.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3253, pruned_loss=0.09301, over 1611045.23 frames. ], batch size: 26, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:48:49,128 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.403e+02 3.130e+02 3.948e+02 8.767e+02, threshold=6.260e+02, percent-clipped=3.0 +2023-02-06 05:49:11,085 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 05:49:17,369 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:17,967 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:19,297 INFO [train.py:901] (0/4) Epoch 8, batch 4500, loss[loss=0.2528, simple_loss=0.3298, pruned_loss=0.08788, over 8101.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3252, pruned_loss=0.09326, over 1609516.45 frames. ], batch size: 23, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:36,537 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 05:49:53,484 INFO [train.py:901] (0/4) Epoch 8, batch 4550, loss[loss=0.3066, simple_loss=0.3605, pruned_loss=0.1264, over 6962.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3262, pruned_loss=0.09428, over 1606166.98 frames. ], batch size: 71, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:58,150 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.768e+02 3.493e+02 4.645e+02 1.007e+03, threshold=6.986e+02, percent-clipped=6.0 +2023-02-06 05:50:29,329 INFO [train.py:901] (0/4) Epoch 8, batch 4600, loss[loss=0.2789, simple_loss=0.3334, pruned_loss=0.1122, over 8091.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3259, pruned_loss=0.09423, over 1603968.76 frames. ], batch size: 21, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:50:38,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:51:04,224 INFO [train.py:901] (0/4) Epoch 8, batch 4650, loss[loss=0.2913, simple_loss=0.3677, pruned_loss=0.1074, over 8475.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3249, pruned_loss=0.09336, over 1603969.03 frames. ], batch size: 25, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:51:08,277 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 2.693e+02 3.115e+02 3.876e+02 8.832e+02, threshold=6.229e+02, percent-clipped=3.0 +2023-02-06 05:51:38,689 INFO [train.py:901] (0/4) Epoch 8, batch 4700, loss[loss=0.2837, simple_loss=0.3571, pruned_loss=0.1052, over 8102.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3253, pruned_loss=0.09339, over 1603474.10 frames. ], batch size: 23, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:52:13,892 INFO [train.py:901] (0/4) Epoch 8, batch 4750, loss[loss=0.2182, simple_loss=0.3007, pruned_loss=0.06781, over 7968.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3261, pruned_loss=0.09323, over 1608351.98 frames. ], batch size: 21, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:52:17,856 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.870e+02 3.425e+02 4.672e+02 9.837e+02, threshold=6.850e+02, percent-clipped=8.0 +2023-02-06 05:52:36,769 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 05:52:40,112 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 05:52:48,197 INFO [train.py:901] (0/4) Epoch 8, batch 4800, loss[loss=0.2235, simple_loss=0.2849, pruned_loss=0.08108, over 7249.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3248, pruned_loss=0.09228, over 1608984.65 frames. ], batch size: 16, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:52:53,983 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4574, 1.8543, 3.4667, 1.1919, 2.5247, 2.0314, 1.4841, 2.3027], + device='cuda:0'), covar=tensor([0.1573, 0.2021, 0.0625, 0.3501, 0.1397, 0.2348, 0.1746, 0.2069], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0492, 0.0528, 0.0566, 0.0606, 0.0542, 0.0457, 0.0600], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:53:16,782 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:22,688 INFO [train.py:901] (0/4) Epoch 8, batch 4850, loss[loss=0.2231, simple_loss=0.2934, pruned_loss=0.07642, over 7639.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3245, pruned_loss=0.09243, over 1608965.60 frames. ], batch size: 19, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:53:26,658 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.780e+02 3.448e+02 4.323e+02 7.771e+02, threshold=6.895e+02, percent-clipped=1.0 +2023-02-06 05:53:28,679 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 05:53:36,184 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61451.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:53,267 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:56,861 INFO [train.py:901] (0/4) Epoch 8, batch 4900, loss[loss=0.2503, simple_loss=0.3257, pruned_loss=0.08744, over 8495.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3243, pruned_loss=0.09219, over 1606326.61 frames. ], batch size: 28, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:31,292 INFO [train.py:901] (0/4) Epoch 8, batch 4950, loss[loss=0.2773, simple_loss=0.3486, pruned_loss=0.103, over 8505.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3245, pruned_loss=0.09224, over 1607358.49 frames. ], batch size: 26, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:35,324 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.701e+02 3.325e+02 4.582e+02 7.633e+02, threshold=6.649e+02, percent-clipped=1.0 +2023-02-06 05:54:35,524 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61538.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:54:53,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 05:55:07,162 INFO [train.py:901] (0/4) Epoch 8, batch 5000, loss[loss=0.211, simple_loss=0.3018, pruned_loss=0.06012, over 8365.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3238, pruned_loss=0.09184, over 1605638.91 frames. ], batch size: 24, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:15,979 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.09 vs. limit=2.0 +2023-02-06 05:55:25,674 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-06 05:55:42,154 INFO [train.py:901] (0/4) Epoch 8, batch 5050, loss[loss=0.265, simple_loss=0.3236, pruned_loss=0.1032, over 7534.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3229, pruned_loss=0.09161, over 1604782.74 frames. ], batch size: 18, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:46,797 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.771e+02 3.459e+02 4.924e+02 1.310e+03, threshold=6.919e+02, percent-clipped=9.0 +2023-02-06 05:56:07,737 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 05:56:17,402 INFO [train.py:901] (0/4) Epoch 8, batch 5100, loss[loss=0.2011, simple_loss=0.2805, pruned_loss=0.06089, over 8237.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3233, pruned_loss=0.09206, over 1606200.97 frames. ], batch size: 22, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:19,699 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4108, 1.6277, 2.8211, 1.1389, 2.1580, 1.7238, 1.5087, 1.8258], + device='cuda:0'), covar=tensor([0.1647, 0.2190, 0.0650, 0.3712, 0.1314, 0.2764, 0.1767, 0.1920], + device='cuda:0'), in_proj_covar=tensor([0.0478, 0.0491, 0.0536, 0.0568, 0.0609, 0.0540, 0.0458, 0.0601], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 05:56:52,943 INFO [train.py:901] (0/4) Epoch 8, batch 5150, loss[loss=0.2973, simple_loss=0.3667, pruned_loss=0.114, over 8514.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3244, pruned_loss=0.09216, over 1611096.45 frames. ], batch size: 26, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:57,121 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.584e+02 3.190e+02 4.018e+02 8.337e+02, threshold=6.381e+02, percent-clipped=2.0 +2023-02-06 05:57:05,484 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:13,682 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:27,544 INFO [train.py:901] (0/4) Epoch 8, batch 5200, loss[loss=0.2514, simple_loss=0.3291, pruned_loss=0.08683, over 8473.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.326, pruned_loss=0.09319, over 1615730.57 frames. ], batch size: 27, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:57:35,703 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:53,066 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:58:02,415 INFO [train.py:901] (0/4) Epoch 8, batch 5250, loss[loss=0.1854, simple_loss=0.2645, pruned_loss=0.05313, over 7269.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3245, pruned_loss=0.0925, over 1614778.84 frames. ], batch size: 16, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:58:06,461 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.711e+02 3.309e+02 4.013e+02 1.150e+03, threshold=6.618e+02, percent-clipped=3.0 +2023-02-06 05:58:07,808 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 05:58:37,169 INFO [train.py:901] (0/4) Epoch 8, batch 5300, loss[loss=0.2238, simple_loss=0.3115, pruned_loss=0.06802, over 8496.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3234, pruned_loss=0.09204, over 1610493.90 frames. ], batch size: 26, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,099 INFO [train.py:901] (0/4) Epoch 8, batch 5350, loss[loss=0.2264, simple_loss=0.3096, pruned_loss=0.07162, over 8471.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3237, pruned_loss=0.09184, over 1611643.26 frames. ], batch size: 25, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,250 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:59:16,966 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.785e+02 2.596e+02 3.249e+02 3.983e+02 1.109e+03, threshold=6.498e+02, percent-clipped=6.0 +2023-02-06 05:59:47,806 INFO [train.py:901] (0/4) Epoch 8, batch 5400, loss[loss=0.2553, simple_loss=0.3378, pruned_loss=0.08634, over 8472.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3239, pruned_loss=0.09201, over 1612506.60 frames. ], batch size: 27, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:01,018 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-62000.pt +2023-02-06 06:00:23,971 INFO [train.py:901] (0/4) Epoch 8, batch 5450, loss[loss=0.2534, simple_loss=0.3319, pruned_loss=0.08739, over 8131.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3244, pruned_loss=0.09157, over 1615855.46 frames. ], batch size: 22, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:28,693 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.625e+02 3.240e+02 4.068e+02 8.471e+02, threshold=6.479e+02, percent-clipped=5.0 +2023-02-06 06:01:00,417 INFO [train.py:901] (0/4) Epoch 8, batch 5500, loss[loss=0.2566, simple_loss=0.3335, pruned_loss=0.0898, over 8718.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.325, pruned_loss=0.09186, over 1619020.85 frames. ], batch size: 34, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:01,800 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 06:01:08,451 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:15,827 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:34,831 INFO [train.py:901] (0/4) Epoch 8, batch 5550, loss[loss=0.2294, simple_loss=0.298, pruned_loss=0.08041, over 7791.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.325, pruned_loss=0.09165, over 1618581.97 frames. ], batch size: 19, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:38,602 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.724e+02 3.277e+02 4.222e+02 9.983e+02, threshold=6.553e+02, percent-clipped=5.0 +2023-02-06 06:01:40,098 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:41,543 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0164, 1.2211, 1.2105, 0.4185, 1.2400, 0.9963, 0.0686, 1.0116], + device='cuda:0'), covar=tensor([0.0252, 0.0177, 0.0164, 0.0318, 0.0206, 0.0520, 0.0438, 0.0191], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0275, 0.0225, 0.0335, 0.0264, 0.0422, 0.0324, 0.0304], + device='cuda:0'), out_proj_covar=tensor([1.0585e-04, 8.1793e-05, 6.6734e-05, 1.0025e-04, 8.0082e-05, 1.3780e-04, + 9.9031e-05, 9.1440e-05], device='cuda:0') +2023-02-06 06:02:09,237 INFO [train.py:901] (0/4) Epoch 8, batch 5600, loss[loss=0.3296, simple_loss=0.373, pruned_loss=0.1431, over 7242.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3259, pruned_loss=0.09248, over 1615327.14 frames. ], batch size: 72, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:02:19,258 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0831, 1.2146, 4.1867, 1.4718, 3.6363, 3.4709, 3.7587, 3.6020], + device='cuda:0'), covar=tensor([0.0488, 0.4139, 0.0474, 0.3190, 0.1109, 0.0833, 0.0549, 0.0668], + device='cuda:0'), in_proj_covar=tensor([0.0431, 0.0540, 0.0522, 0.0496, 0.0559, 0.0470, 0.0473, 0.0525], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 06:02:27,944 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:35,377 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:42,271 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9145, 1.6065, 3.2526, 1.3974, 2.2017, 3.5703, 3.5005, 2.9533], + device='cuda:0'), covar=tensor([0.1020, 0.1369, 0.0368, 0.1901, 0.0920, 0.0257, 0.0463, 0.0728], + device='cuda:0'), in_proj_covar=tensor([0.0250, 0.0279, 0.0241, 0.0271, 0.0253, 0.0222, 0.0289, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:02:44,113 INFO [train.py:901] (0/4) Epoch 8, batch 5650, loss[loss=0.2363, simple_loss=0.3118, pruned_loss=0.08035, over 8243.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3261, pruned_loss=0.09316, over 1614680.22 frames. ], batch size: 22, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:02:48,206 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.893e+02 3.442e+02 4.058e+02 7.819e+02, threshold=6.884e+02, percent-clipped=2.0 +2023-02-06 06:03:03,304 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 06:03:14,924 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:16,527 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4577, 1.9005, 3.0228, 2.4532, 2.6578, 2.1273, 1.6117, 1.3886], + device='cuda:0'), covar=tensor([0.2651, 0.3161, 0.0833, 0.1876, 0.1420, 0.1675, 0.1478, 0.3314], + device='cuda:0'), in_proj_covar=tensor([0.0849, 0.0805, 0.0679, 0.0792, 0.0893, 0.0740, 0.0675, 0.0730], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:03:19,746 INFO [train.py:901] (0/4) Epoch 8, batch 5700, loss[loss=0.2063, simple_loss=0.2789, pruned_loss=0.06679, over 7807.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3253, pruned_loss=0.09268, over 1615031.45 frames. ], batch size: 20, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:03:26,007 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:53,764 INFO [train.py:901] (0/4) Epoch 8, batch 5750, loss[loss=0.2369, simple_loss=0.3028, pruned_loss=0.08548, over 7976.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.324, pruned_loss=0.09225, over 1614503.73 frames. ], batch size: 21, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:03:58,445 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.702e+02 3.342e+02 4.214e+02 1.406e+03, threshold=6.684e+02, percent-clipped=3.0 +2023-02-06 06:04:07,822 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 06:04:28,273 INFO [train.py:901] (0/4) Epoch 8, batch 5800, loss[loss=0.2556, simple_loss=0.3427, pruned_loss=0.08427, over 8198.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3236, pruned_loss=0.09204, over 1610729.88 frames. ], batch size: 23, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:04:35,226 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:48,144 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:52,278 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1168, 2.4859, 1.9038, 2.8378, 1.2843, 1.5609, 1.8506, 2.2581], + device='cuda:0'), covar=tensor([0.0963, 0.0899, 0.1291, 0.0420, 0.1488, 0.1842, 0.1330, 0.1057], + device='cuda:0'), in_proj_covar=tensor([0.0248, 0.0225, 0.0264, 0.0214, 0.0228, 0.0261, 0.0263, 0.0231], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 06:05:04,103 INFO [train.py:901] (0/4) Epoch 8, batch 5850, loss[loss=0.3017, simple_loss=0.3537, pruned_loss=0.1249, over 7003.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3236, pruned_loss=0.09198, over 1607964.55 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:05:04,209 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7824, 5.8866, 5.0048, 2.5108, 5.1534, 5.6172, 5.4292, 5.1120], + device='cuda:0'), covar=tensor([0.0861, 0.0626, 0.1096, 0.4407, 0.0747, 0.0578, 0.1479, 0.0536], + device='cuda:0'), in_proj_covar=tensor([0.0435, 0.0344, 0.0363, 0.0462, 0.0361, 0.0341, 0.0350, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:05:08,211 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.970e+02 2.690e+02 3.286e+02 4.000e+02 6.740e+02, threshold=6.571e+02, percent-clipped=1.0 +2023-02-06 06:05:17,217 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 06:05:27,159 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:34,603 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:38,455 INFO [train.py:901] (0/4) Epoch 8, batch 5900, loss[loss=0.2374, simple_loss=0.3342, pruned_loss=0.07032, over 8341.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3236, pruned_loss=0.09125, over 1610004.03 frames. ], batch size: 26, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:05:39,866 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:44,013 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:51,355 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:54,094 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0008, 2.3878, 2.7072, 1.3268, 2.7402, 1.8286, 1.6292, 1.7420], + device='cuda:0'), covar=tensor([0.0410, 0.0208, 0.0138, 0.0349, 0.0224, 0.0401, 0.0434, 0.0237], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0283, 0.0230, 0.0341, 0.0273, 0.0432, 0.0333, 0.0310], + device='cuda:0'), out_proj_covar=tensor([1.0759e-04, 8.3933e-05, 6.7697e-05, 1.0158e-04, 8.2743e-05, 1.4112e-04, + 1.0179e-04, 9.2918e-05], device='cuda:0') +2023-02-06 06:06:13,419 INFO [train.py:901] (0/4) Epoch 8, batch 5950, loss[loss=0.3018, simple_loss=0.3601, pruned_loss=0.1218, over 7240.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3242, pruned_loss=0.09178, over 1609514.72 frames. ], batch size: 72, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:15,676 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0848, 2.9957, 3.2018, 1.9342, 1.6585, 3.4044, 0.5464, 2.1765], + device='cuda:0'), covar=tensor([0.2002, 0.1881, 0.0544, 0.3466, 0.5612, 0.0351, 0.5704, 0.2652], + device='cuda:0'), in_proj_covar=tensor([0.0152, 0.0154, 0.0089, 0.0202, 0.0243, 0.0092, 0.0162, 0.0155], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 06:06:17,412 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 2.652e+02 3.220e+02 3.904e+02 8.315e+02, threshold=6.439e+02, percent-clipped=2.0 +2023-02-06 06:06:34,787 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:06:47,824 INFO [train.py:901] (0/4) Epoch 8, batch 6000, loss[loss=0.2345, simple_loss=0.2888, pruned_loss=0.09013, over 7262.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3241, pruned_loss=0.09174, over 1608096.94 frames. ], batch size: 16, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:47,824 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 06:07:00,017 INFO [train.py:935] (0/4) Epoch 8, validation: loss=0.1996, simple_loss=0.2985, pruned_loss=0.05037, over 944034.00 frames. +2023-02-06 06:07:00,018 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 06:07:12,292 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62599.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:14,954 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:33,808 INFO [train.py:901] (0/4) Epoch 8, batch 6050, loss[loss=0.2136, simple_loss=0.2881, pruned_loss=0.0696, over 7804.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.326, pruned_loss=0.09307, over 1610109.38 frames. ], batch size: 20, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:07:35,914 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:37,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.575e+02 3.268e+02 4.071e+02 9.720e+02, threshold=6.536e+02, percent-clipped=3.0 +2023-02-06 06:07:44,189 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:02,400 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:08,751 INFO [train.py:901] (0/4) Epoch 8, batch 6100, loss[loss=0.2175, simple_loss=0.2905, pruned_loss=0.07223, over 6014.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3255, pruned_loss=0.0929, over 1607240.99 frames. ], batch size: 13, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:08:10,156 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:21,655 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 06:08:37,233 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 06:08:43,079 INFO [train.py:901] (0/4) Epoch 8, batch 6150, loss[loss=0.2426, simple_loss=0.3255, pruned_loss=0.0798, over 8508.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3261, pruned_loss=0.09356, over 1611507.50 frames. ], batch size: 26, lr: 9.42e-03, grad_scale: 8.0 +2023-02-06 06:08:47,076 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.667e+02 3.544e+02 4.037e+02 8.376e+02, threshold=7.087e+02, percent-clipped=5.0 +2023-02-06 06:08:55,074 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:57,034 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:09,054 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8186, 2.8133, 2.3042, 3.8244, 1.5479, 1.8373, 2.0479, 3.1056], + device='cuda:0'), covar=tensor([0.0666, 0.1070, 0.1095, 0.0275, 0.1468, 0.1766, 0.1423, 0.0834], + device='cuda:0'), in_proj_covar=tensor([0.0249, 0.0226, 0.0268, 0.0215, 0.0229, 0.0263, 0.0264, 0.0232], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 06:09:17,709 INFO [train.py:901] (0/4) Epoch 8, batch 6200, loss[loss=0.2445, simple_loss=0.3114, pruned_loss=0.08883, over 8469.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3249, pruned_loss=0.09278, over 1613511.49 frames. ], batch size: 25, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:23,750 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:52,744 INFO [train.py:901] (0/4) Epoch 8, batch 6250, loss[loss=0.2177, simple_loss=0.2988, pruned_loss=0.06826, over 8120.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3234, pruned_loss=0.09169, over 1615898.35 frames. ], batch size: 22, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:56,741 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.905e+02 2.717e+02 3.222e+02 4.596e+02 9.217e+02, threshold=6.445e+02, percent-clipped=3.0 +2023-02-06 06:09:57,784 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-06 06:10:08,418 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:16,900 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:24,983 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:26,036 INFO [train.py:901] (0/4) Epoch 8, batch 6300, loss[loss=0.2567, simple_loss=0.3377, pruned_loss=0.08779, over 7822.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3222, pruned_loss=0.091, over 1610378.88 frames. ], batch size: 20, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:10:28,871 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9653, 1.6973, 3.4689, 1.5815, 2.3097, 3.8841, 3.8188, 3.2738], + device='cuda:0'), covar=tensor([0.1049, 0.1308, 0.0339, 0.1811, 0.0916, 0.0228, 0.0478, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0249, 0.0282, 0.0244, 0.0272, 0.0253, 0.0225, 0.0292, 0.0284], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:10:44,084 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:44,437 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 06:10:50,363 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-06 06:11:01,296 INFO [train.py:901] (0/4) Epoch 8, batch 6350, loss[loss=0.2747, simple_loss=0.3314, pruned_loss=0.109, over 7927.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3216, pruned_loss=0.09046, over 1611539.76 frames. ], batch size: 20, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:05,345 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.824e+02 3.504e+02 4.161e+02 7.437e+02, threshold=7.007e+02, percent-clipped=2.0 +2023-02-06 06:11:09,081 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.72 vs. limit=2.0 +2023-02-06 06:11:12,101 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:12,141 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6590, 5.7370, 5.0033, 2.0701, 5.0042, 5.3441, 5.2730, 4.7532], + device='cuda:0'), covar=tensor([0.0606, 0.0384, 0.0785, 0.4840, 0.0741, 0.0613, 0.0944, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0420, 0.0334, 0.0350, 0.0443, 0.0349, 0.0329, 0.0341, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:11:20,472 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-06 06:11:35,235 INFO [train.py:901] (0/4) Epoch 8, batch 6400, loss[loss=0.3117, simple_loss=0.3723, pruned_loss=0.1255, over 8337.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3218, pruned_loss=0.09057, over 1616617.41 frames. ], batch size: 26, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:43,457 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3602, 2.2920, 3.1743, 2.0904, 2.8021, 3.4981, 3.3671, 3.1924], + device='cuda:0'), covar=tensor([0.0798, 0.1000, 0.0544, 0.1410, 0.0848, 0.0254, 0.0505, 0.0514], + device='cuda:0'), in_proj_covar=tensor([0.0250, 0.0279, 0.0241, 0.0271, 0.0249, 0.0222, 0.0290, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:11:52,108 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:03,482 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:06,107 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5033, 1.3329, 4.6509, 1.8821, 4.0743, 3.8882, 4.1250, 4.1007], + device='cuda:0'), covar=tensor([0.0451, 0.4302, 0.0379, 0.2903, 0.0960, 0.0701, 0.0535, 0.0595], + device='cuda:0'), in_proj_covar=tensor([0.0430, 0.0542, 0.0524, 0.0496, 0.0555, 0.0466, 0.0472, 0.0527], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 06:12:07,444 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:09,561 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:10,070 INFO [train.py:901] (0/4) Epoch 8, batch 6450, loss[loss=0.2281, simple_loss=0.3194, pruned_loss=0.06836, over 8285.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3219, pruned_loss=0.0901, over 1614419.26 frames. ], batch size: 23, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:12:14,122 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 2.983e+02 3.820e+02 5.218e+02 9.633e+02, threshold=7.640e+02, percent-clipped=4.0 +2023-02-06 06:12:16,481 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5422, 1.8950, 3.1849, 1.3028, 2.3370, 1.9580, 1.6853, 1.9892], + device='cuda:0'), covar=tensor([0.1570, 0.2139, 0.0555, 0.3378, 0.1259, 0.2373, 0.1542, 0.2087], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0490, 0.0538, 0.0570, 0.0604, 0.0537, 0.0459, 0.0607], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 06:12:31,364 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:45,218 INFO [train.py:901] (0/4) Epoch 8, batch 6500, loss[loss=0.2892, simple_loss=0.3508, pruned_loss=0.1138, over 8026.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3205, pruned_loss=0.08944, over 1611557.02 frames. ], batch size: 22, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:03,585 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.03 vs. limit=5.0 +2023-02-06 06:13:14,195 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:20,105 INFO [train.py:901] (0/4) Epoch 8, batch 6550, loss[loss=0.2659, simple_loss=0.3467, pruned_loss=0.09256, over 8498.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3221, pruned_loss=0.09014, over 1616805.24 frames. ], batch size: 29, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:22,296 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:24,223 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.645e+02 3.116e+02 3.905e+02 9.747e+02, threshold=6.232e+02, percent-clipped=3.0 +2023-02-06 06:13:27,744 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:27,804 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5686, 2.0609, 4.4834, 1.1699, 3.0654, 2.3209, 1.4829, 2.8453], + device='cuda:0'), covar=tensor([0.1494, 0.2003, 0.0607, 0.3269, 0.1253, 0.2187, 0.1533, 0.1916], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0487, 0.0533, 0.0566, 0.0600, 0.0535, 0.0458, 0.0603], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:13:29,194 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2604, 2.5085, 1.8670, 2.0649, 2.0085, 1.4371, 1.6730, 2.0025], + device='cuda:0'), covar=tensor([0.1257, 0.0325, 0.0852, 0.0479, 0.0633, 0.1262, 0.0906, 0.0741], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0235, 0.0308, 0.0294, 0.0304, 0.0315, 0.0337, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 06:13:31,958 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:46,245 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7658, 1.6680, 1.9096, 1.3648, 1.0148, 1.9881, 0.2233, 1.3290], + device='cuda:0'), covar=tensor([0.2667, 0.1838, 0.0626, 0.2146, 0.5692, 0.0555, 0.3875, 0.1940], + device='cuda:0'), in_proj_covar=tensor([0.0153, 0.0155, 0.0091, 0.0204, 0.0243, 0.0095, 0.0159, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:13:49,407 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 06:13:54,672 INFO [train.py:901] (0/4) Epoch 8, batch 6600, loss[loss=0.2346, simple_loss=0.3134, pruned_loss=0.07795, over 8475.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3233, pruned_loss=0.09107, over 1613413.37 frames. ], batch size: 25, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:13:56,251 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5787, 1.9907, 3.0898, 2.4440, 2.7065, 2.2969, 1.8024, 1.3348], + device='cuda:0'), covar=tensor([0.2884, 0.3413, 0.0811, 0.2108, 0.1644, 0.1687, 0.1461, 0.3660], + device='cuda:0'), in_proj_covar=tensor([0.0845, 0.0806, 0.0680, 0.0792, 0.0892, 0.0741, 0.0673, 0.0727], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:14:07,499 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 06:14:25,323 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.45 vs. limit=5.0 +2023-02-06 06:14:29,745 INFO [train.py:901] (0/4) Epoch 8, batch 6650, loss[loss=0.2359, simple_loss=0.3198, pruned_loss=0.076, over 8470.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3236, pruned_loss=0.09144, over 1612982.48 frames. ], batch size: 27, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:33,637 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 2.943e+02 3.528e+02 4.449e+02 1.178e+03, threshold=7.055e+02, percent-clipped=8.0 +2023-02-06 06:14:34,502 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:14:42,440 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:01,176 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:03,731 INFO [train.py:901] (0/4) Epoch 8, batch 6700, loss[loss=0.2333, simple_loss=0.3038, pruned_loss=0.08143, over 7810.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3237, pruned_loss=0.09156, over 1613450.88 frames. ], batch size: 20, lr: 9.38e-03, grad_scale: 16.0 +2023-02-06 06:15:19,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:29,352 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:34,401 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 06:15:38,588 INFO [train.py:901] (0/4) Epoch 8, batch 6750, loss[loss=0.2423, simple_loss=0.3184, pruned_loss=0.08308, over 8253.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3236, pruned_loss=0.09112, over 1611379.95 frames. ], batch size: 24, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:15:43,297 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.603e+02 3.068e+02 3.707e+02 1.416e+03, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 06:15:46,209 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:13,012 INFO [train.py:901] (0/4) Epoch 8, batch 6800, loss[loss=0.2485, simple_loss=0.314, pruned_loss=0.09152, over 7974.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3266, pruned_loss=0.09285, over 1620705.85 frames. ], batch size: 21, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:16:19,053 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6438, 5.6756, 4.9972, 2.4179, 5.0486, 5.3156, 5.2303, 4.8056], + device='cuda:0'), covar=tensor([0.0584, 0.0412, 0.0958, 0.4412, 0.0736, 0.0599, 0.0989, 0.0754], + device='cuda:0'), in_proj_covar=tensor([0.0436, 0.0343, 0.0360, 0.0451, 0.0358, 0.0335, 0.0349, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:16:20,977 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 06:16:24,459 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63399.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:42,711 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:47,937 INFO [train.py:901] (0/4) Epoch 8, batch 6850, loss[loss=0.2431, simple_loss=0.323, pruned_loss=0.08157, over 8242.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3268, pruned_loss=0.09298, over 1623605.48 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:16:52,292 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:52,781 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.669e+02 3.418e+02 4.059e+02 7.847e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 06:16:53,659 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:57,223 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9496, 3.8521, 2.4130, 2.5069, 2.9139, 1.7499, 2.6114, 2.8918], + device='cuda:0'), covar=tensor([0.1537, 0.0282, 0.0883, 0.0794, 0.0597, 0.1320, 0.0989, 0.0971], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0237, 0.0309, 0.0295, 0.0306, 0.0315, 0.0336, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 06:17:11,112 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 06:17:22,529 INFO [train.py:901] (0/4) Epoch 8, batch 6900, loss[loss=0.2211, simple_loss=0.2968, pruned_loss=0.07269, over 7791.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3264, pruned_loss=0.09227, over 1625735.05 frames. ], batch size: 19, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:17:39,577 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,281 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,749 INFO [train.py:901] (0/4) Epoch 8, batch 6950, loss[loss=0.2459, simple_loss=0.3045, pruned_loss=0.09364, over 7983.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3256, pruned_loss=0.09229, over 1619660.79 frames. ], batch size: 21, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:18:03,563 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.671e+02 3.369e+02 4.495e+02 9.890e+02, threshold=6.738e+02, percent-clipped=4.0 +2023-02-06 06:18:18,587 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 06:18:32,004 INFO [train.py:901] (0/4) Epoch 8, batch 7000, loss[loss=0.2875, simple_loss=0.3435, pruned_loss=0.1158, over 8134.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.326, pruned_loss=0.09291, over 1619402.62 frames. ], batch size: 22, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:18:32,804 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:36,312 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:48,864 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:19:00,965 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 06:19:08,074 INFO [train.py:901] (0/4) Epoch 8, batch 7050, loss[loss=0.2477, simple_loss=0.3289, pruned_loss=0.08326, over 8292.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3248, pruned_loss=0.09184, over 1611300.47 frames. ], batch size: 23, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:19:12,575 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.720e+02 3.242e+02 3.930e+02 7.648e+02, threshold=6.484e+02, percent-clipped=3.0 +2023-02-06 06:19:38,210 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.73 vs. limit=5.0 +2023-02-06 06:19:42,433 INFO [train.py:901] (0/4) Epoch 8, batch 7100, loss[loss=0.2587, simple_loss=0.3274, pruned_loss=0.09494, over 8475.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3242, pruned_loss=0.0918, over 1608682.13 frames. ], batch size: 25, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:19:53,540 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:17,179 INFO [train.py:901] (0/4) Epoch 8, batch 7150, loss[loss=0.2446, simple_loss=0.3083, pruned_loss=0.0904, over 7802.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.326, pruned_loss=0.09259, over 1614388.81 frames. ], batch size: 20, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:21,739 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.778e+02 3.549e+02 4.516e+02 1.097e+03, threshold=7.098e+02, percent-clipped=7.0 +2023-02-06 06:20:25,962 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3157, 1.6127, 1.6965, 0.8600, 1.7176, 1.2663, 0.2518, 1.5394], + device='cuda:0'), covar=tensor([0.0226, 0.0157, 0.0124, 0.0230, 0.0166, 0.0471, 0.0405, 0.0130], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0287, 0.0231, 0.0340, 0.0275, 0.0437, 0.0333, 0.0314], + device='cuda:0'), out_proj_covar=tensor([1.0887e-04, 8.4336e-05, 6.8109e-05, 1.0058e-04, 8.2936e-05, 1.4226e-04, + 1.0089e-04, 9.4080e-05], device='cuda:0') +2023-02-06 06:20:51,753 INFO [train.py:901] (0/4) Epoch 8, batch 7200, loss[loss=0.2717, simple_loss=0.3504, pruned_loss=0.09648, over 8347.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3248, pruned_loss=0.0918, over 1616734.44 frames. ], batch size: 26, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:51,831 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:53,157 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:07,239 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3684, 3.0907, 3.5183, 2.0592, 1.9230, 3.4330, 0.6963, 2.1642], + device='cuda:0'), covar=tensor([0.2248, 0.1610, 0.0464, 0.3196, 0.5082, 0.0713, 0.5257, 0.2577], + device='cuda:0'), in_proj_covar=tensor([0.0155, 0.0153, 0.0090, 0.0199, 0.0242, 0.0095, 0.0158, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 06:21:23,981 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:26,391 INFO [train.py:901] (0/4) Epoch 8, batch 7250, loss[loss=0.32, simple_loss=0.3694, pruned_loss=0.1353, over 8505.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3245, pruned_loss=0.0919, over 1612889.85 frames. ], batch size: 28, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:21:30,981 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.694e+02 3.202e+02 4.148e+02 8.009e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 06:21:41,873 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6483, 1.5917, 1.9417, 1.4509, 1.0613, 1.9615, 0.2667, 1.3259], + device='cuda:0'), covar=tensor([0.3014, 0.2103, 0.0482, 0.2153, 0.5651, 0.0667, 0.4268, 0.2082], + device='cuda:0'), in_proj_covar=tensor([0.0154, 0.0152, 0.0090, 0.0197, 0.0241, 0.0094, 0.0157, 0.0155], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 06:21:47,995 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:49,574 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 06:22:00,268 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0109, 2.3200, 1.7489, 2.7229, 1.4669, 1.5441, 1.9866, 2.3851], + device='cuda:0'), covar=tensor([0.0750, 0.0770, 0.1047, 0.0388, 0.1034, 0.1421, 0.0902, 0.0721], + device='cuda:0'), in_proj_covar=tensor([0.0243, 0.0222, 0.0264, 0.0213, 0.0222, 0.0258, 0.0263, 0.0229], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 06:22:00,779 INFO [train.py:901] (0/4) Epoch 8, batch 7300, loss[loss=0.2875, simple_loss=0.3536, pruned_loss=0.1107, over 8352.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3236, pruned_loss=0.09148, over 1611322.64 frames. ], batch size: 24, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:12,928 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:14,320 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:33,994 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2710, 1.4793, 1.4956, 1.3076, 1.1000, 1.4560, 1.6585, 1.4409], + device='cuda:0'), covar=tensor([0.0477, 0.1233, 0.1863, 0.1431, 0.0595, 0.1531, 0.0687, 0.0637], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0161, 0.0199, 0.0165, 0.0112, 0.0169, 0.0123, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:22:36,699 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:37,305 INFO [train.py:901] (0/4) Epoch 8, batch 7350, loss[loss=0.3294, simple_loss=0.3878, pruned_loss=0.1355, over 8605.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3236, pruned_loss=0.09143, over 1609197.44 frames. ], batch size: 49, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:38,872 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:42,246 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.551e+02 3.183e+02 3.767e+02 5.416e+02, threshold=6.365e+02, percent-clipped=0.0 +2023-02-06 06:22:48,924 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63948.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:53,925 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:05,931 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 06:23:10,850 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:12,545 INFO [train.py:901] (0/4) Epoch 8, batch 7400, loss[loss=0.2201, simple_loss=0.2937, pruned_loss=0.07324, over 8103.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3221, pruned_loss=0.09095, over 1607975.88 frames. ], batch size: 21, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:24,821 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 06:23:25,610 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-64000.pt +2023-02-06 06:23:29,609 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 06:23:48,438 INFO [train.py:901] (0/4) Epoch 8, batch 7450, loss[loss=0.2324, simple_loss=0.3222, pruned_loss=0.07127, over 8465.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3234, pruned_loss=0.09158, over 1612429.33 frames. ], batch size: 25, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:53,167 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.827e+02 3.358e+02 3.935e+02 9.777e+02, threshold=6.715e+02, percent-clipped=5.0 +2023-02-06 06:23:54,693 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:58,108 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:05,487 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 06:24:10,178 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:23,172 INFO [train.py:901] (0/4) Epoch 8, batch 7500, loss[loss=0.2582, simple_loss=0.3267, pruned_loss=0.09482, over 8480.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3233, pruned_loss=0.09109, over 1616778.88 frames. ], batch size: 29, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:24:34,552 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 06:24:53,421 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-06 06:24:57,461 INFO [train.py:901] (0/4) Epoch 8, batch 7550, loss[loss=0.3141, simple_loss=0.3729, pruned_loss=0.1276, over 8669.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3265, pruned_loss=0.09306, over 1618898.33 frames. ], batch size: 34, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:02,130 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.846e+02 3.017e+02 3.905e+02 4.969e+02 7.546e+02, threshold=7.810e+02, percent-clipped=1.0 +2023-02-06 06:25:07,026 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2475, 1.8335, 1.9344, 1.7077, 1.2465, 1.8768, 2.1722, 1.9824], + device='cuda:0'), covar=tensor([0.0440, 0.1186, 0.1668, 0.1261, 0.0614, 0.1402, 0.0641, 0.0539], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0162, 0.0200, 0.0164, 0.0113, 0.0169, 0.0123, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:25:11,678 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:13,072 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:24,180 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:28,956 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:30,997 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:32,163 INFO [train.py:901] (0/4) Epoch 8, batch 7600, loss[loss=0.2798, simple_loss=0.3538, pruned_loss=0.1029, over 8541.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3245, pruned_loss=0.09203, over 1614883.66 frames. ], batch size: 31, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:49,059 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:07,009 INFO [train.py:901] (0/4) Epoch 8, batch 7650, loss[loss=0.2682, simple_loss=0.3397, pruned_loss=0.09838, over 8451.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.325, pruned_loss=0.09234, over 1617270.69 frames. ], batch size: 27, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:11,833 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.778e+02 3.467e+02 5.154e+02 1.113e+03, threshold=6.933e+02, percent-clipped=3.0 +2023-02-06 06:26:19,350 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:38,168 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:39,043 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4911, 1.9882, 3.1022, 1.2800, 2.2023, 1.8864, 1.7760, 1.8605], + device='cuda:0'), covar=tensor([0.1640, 0.1821, 0.0812, 0.3477, 0.1430, 0.2541, 0.1513, 0.2110], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0486, 0.0527, 0.0562, 0.0601, 0.0538, 0.0457, 0.0601], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:26:40,901 INFO [train.py:901] (0/4) Epoch 8, batch 7700, loss[loss=0.2166, simple_loss=0.2868, pruned_loss=0.07322, over 7921.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3263, pruned_loss=0.09315, over 1619570.85 frames. ], batch size: 20, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:45,263 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:56,501 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:08,123 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,003 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,466 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 06:27:13,352 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:14,859 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 06:27:16,533 INFO [train.py:901] (0/4) Epoch 8, batch 7750, loss[loss=0.3104, simple_loss=0.3723, pruned_loss=0.1242, over 8110.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3259, pruned_loss=0.09262, over 1622575.58 frames. ], batch size: 23, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:27:17,264 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4309, 1.4899, 1.6037, 1.3474, 1.2545, 1.4767, 1.7575, 1.7368], + device='cuda:0'), covar=tensor([0.0496, 0.1269, 0.1833, 0.1418, 0.0639, 0.1614, 0.0708, 0.0614], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0162, 0.0200, 0.0166, 0.0112, 0.0170, 0.0122, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:27:21,028 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.516e+02 3.070e+02 3.996e+02 6.859e+02, threshold=6.139e+02, percent-clipped=0.0 +2023-02-06 06:27:25,263 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:38,614 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:51,129 INFO [train.py:901] (0/4) Epoch 8, batch 7800, loss[loss=0.2021, simple_loss=0.2823, pruned_loss=0.06101, over 7538.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3253, pruned_loss=0.09187, over 1620011.04 frames. ], batch size: 18, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:27:53,236 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:58,776 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:28:25,447 INFO [train.py:901] (0/4) Epoch 8, batch 7850, loss[loss=0.2792, simple_loss=0.3387, pruned_loss=0.1099, over 8362.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.325, pruned_loss=0.09179, over 1620516.59 frames. ], batch size: 24, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:30,097 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.075e+02 2.873e+02 3.519e+02 4.505e+02 1.254e+03, threshold=7.037e+02, percent-clipped=6.0 +2023-02-06 06:28:58,102 INFO [train.py:901] (0/4) Epoch 8, batch 7900, loss[loss=0.2109, simple_loss=0.2824, pruned_loss=0.06975, over 7532.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3241, pruned_loss=0.09096, over 1616067.75 frames. ], batch size: 18, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:58,897 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:10,215 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:13,087 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 06:29:32,366 INFO [train.py:901] (0/4) Epoch 8, batch 7950, loss[loss=0.2549, simple_loss=0.3216, pruned_loss=0.0941, over 8360.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3228, pruned_loss=0.09003, over 1618009.46 frames. ], batch size: 24, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:29:37,071 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.689e+02 3.383e+02 4.341e+02 8.251e+02, threshold=6.766e+02, percent-clipped=4.0 +2023-02-06 06:29:40,088 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:56,713 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:03,487 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:06,037 INFO [train.py:901] (0/4) Epoch 8, batch 8000, loss[loss=0.2373, simple_loss=0.3163, pruned_loss=0.07914, over 8438.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3229, pruned_loss=0.09024, over 1613413.13 frames. ], batch size: 29, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:14,234 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:20,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:23,051 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:39,931 INFO [train.py:901] (0/4) Epoch 8, batch 8050, loss[loss=0.2161, simple_loss=0.2918, pruned_loss=0.07014, over 7544.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.322, pruned_loss=0.08989, over 1598075.51 frames. ], batch size: 18, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:44,635 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.434e+02 2.955e+02 3.616e+02 6.730e+02, threshold=5.909e+02, percent-clipped=0.0 +2023-02-06 06:30:51,558 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:02,945 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-8.pt +2023-02-06 06:31:13,933 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 06:31:17,607 INFO [train.py:901] (0/4) Epoch 9, batch 0, loss[loss=0.2973, simple_loss=0.3649, pruned_loss=0.1149, over 8449.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.3649, pruned_loss=0.1149, over 8449.00 frames. ], batch size: 27, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:31:17,608 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 06:31:28,852 INFO [train.py:935] (0/4) Epoch 9, validation: loss=0.1983, simple_loss=0.2974, pruned_loss=0.04961, over 944034.00 frames. +2023-02-06 06:31:28,853 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6689MB +2023-02-06 06:31:29,663 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,186 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:43,423 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 06:31:56,869 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:58,423 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:02,443 INFO [train.py:901] (0/4) Epoch 9, batch 50, loss[loss=0.2512, simple_loss=0.3377, pruned_loss=0.08239, over 8499.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3304, pruned_loss=0.09316, over 367886.81 frames. ], batch size: 26, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:32:06,618 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:16,338 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 06:32:18,980 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.818e+02 3.347e+02 4.122e+02 1.189e+03, threshold=6.695e+02, percent-clipped=9.0 +2023-02-06 06:32:31,577 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:31,743 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 06:32:36,071 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:37,300 INFO [train.py:901] (0/4) Epoch 9, batch 100, loss[loss=0.2232, simple_loss=0.289, pruned_loss=0.07872, over 7531.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3271, pruned_loss=0.09325, over 642141.14 frames. ], batch size: 18, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:32:41,522 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:42,101 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 06:32:49,740 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:11,709 INFO [train.py:901] (0/4) Epoch 9, batch 150, loss[loss=0.2839, simple_loss=0.3387, pruned_loss=0.1145, over 7810.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3275, pruned_loss=0.09248, over 860820.17 frames. ], batch size: 20, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:33:16,731 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:20,054 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:27,775 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.896e+02 2.577e+02 3.213e+02 3.848e+02 9.281e+02, threshold=6.425e+02, percent-clipped=3.0 +2023-02-06 06:33:45,631 INFO [train.py:901] (0/4) Epoch 9, batch 200, loss[loss=0.3503, simple_loss=0.3909, pruned_loss=0.1548, over 8339.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3265, pruned_loss=0.09268, over 1029274.19 frames. ], batch size: 26, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:34:21,137 INFO [train.py:901] (0/4) Epoch 9, batch 250, loss[loss=0.2367, simple_loss=0.3154, pruned_loss=0.07902, over 8045.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3253, pruned_loss=0.09149, over 1159356.76 frames. ], batch size: 22, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:34,287 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 06:34:36,812 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 2.841e+02 3.295e+02 4.179e+02 1.029e+03, threshold=6.590e+02, percent-clipped=5.0 +2023-02-06 06:34:39,015 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:42,838 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 06:34:44,889 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:54,123 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 06:34:54,262 INFO [train.py:901] (0/4) Epoch 9, batch 300, loss[loss=0.2183, simple_loss=0.2907, pruned_loss=0.07295, over 7937.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3238, pruned_loss=0.09036, over 1262001.09 frames. ], batch size: 20, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:54,462 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:11,911 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:15,047 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:26,441 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:30,327 INFO [train.py:901] (0/4) Epoch 9, batch 350, loss[loss=0.2467, simple_loss=0.3163, pruned_loss=0.08857, over 8086.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3225, pruned_loss=0.08988, over 1338769.37 frames. ], batch size: 21, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:35:46,491 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.892e+02 2.570e+02 3.183e+02 3.796e+02 1.000e+03, threshold=6.367e+02, percent-clipped=4.0 +2023-02-06 06:36:03,863 INFO [train.py:901] (0/4) Epoch 9, batch 400, loss[loss=0.1963, simple_loss=0.2671, pruned_loss=0.06276, over 7439.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3241, pruned_loss=0.09072, over 1409986.03 frames. ], batch size: 17, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:03,936 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65065.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:04,755 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:09,395 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:12,869 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:30,519 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:33,060 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:37,770 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:38,396 INFO [train.py:901] (0/4) Epoch 9, batch 450, loss[loss=0.2742, simple_loss=0.3361, pruned_loss=0.1062, over 8374.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3235, pruned_loss=0.09087, over 1452696.21 frames. ], batch size: 49, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:46,201 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4422, 1.1598, 1.3720, 1.0980, 0.7863, 1.1457, 1.1598, 1.0697], + device='cuda:0'), covar=tensor([0.0538, 0.1386, 0.1819, 0.1450, 0.0608, 0.1633, 0.0687, 0.0655], + device='cuda:0'), in_proj_covar=tensor([0.0108, 0.0161, 0.0200, 0.0164, 0.0111, 0.0168, 0.0123, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:36:46,210 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:56,307 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.705e+02 3.323e+02 3.920e+02 9.407e+02, threshold=6.647e+02, percent-clipped=6.0 +2023-02-06 06:37:13,478 INFO [train.py:901] (0/4) Epoch 9, batch 500, loss[loss=0.2112, simple_loss=0.3013, pruned_loss=0.06054, over 8037.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3226, pruned_loss=0.09052, over 1490133.09 frames. ], batch size: 22, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:37:23,268 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:35,030 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:45,914 INFO [train.py:901] (0/4) Epoch 9, batch 550, loss[loss=0.2457, simple_loss=0.3195, pruned_loss=0.08599, over 7810.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3233, pruned_loss=0.09094, over 1516294.61 frames. ], batch size: 20, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:37:51,957 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:52,660 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:56,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:03,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.829e+02 3.496e+02 4.355e+02 8.306e+02, threshold=6.991e+02, percent-clipped=2.0 +2023-02-06 06:38:21,259 INFO [train.py:901] (0/4) Epoch 9, batch 600, loss[loss=0.2227, simple_loss=0.2966, pruned_loss=0.07435, over 7653.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3219, pruned_loss=0.09009, over 1535941.18 frames. ], batch size: 19, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:38:37,883 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7725, 1.7506, 2.1344, 1.5658, 1.1734, 2.2301, 0.2268, 1.2213], + device='cuda:0'), covar=tensor([0.2785, 0.1853, 0.0502, 0.3092, 0.4751, 0.0476, 0.4193, 0.2466], + device='cuda:0'), in_proj_covar=tensor([0.0155, 0.0153, 0.0091, 0.0201, 0.0242, 0.0095, 0.0156, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:38:38,361 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 06:38:46,504 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:54,303 INFO [train.py:901] (0/4) Epoch 9, batch 650, loss[loss=0.286, simple_loss=0.3531, pruned_loss=0.1094, over 7932.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3222, pruned_loss=0.09007, over 1550052.34 frames. ], batch size: 20, lr: 8.75e-03, grad_scale: 16.0 +2023-02-06 06:38:55,145 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2250, 1.7845, 4.2050, 1.9898, 2.3336, 4.7939, 4.5799, 4.1193], + device='cuda:0'), covar=tensor([0.1117, 0.1536, 0.0284, 0.1822, 0.1024, 0.0210, 0.0398, 0.0636], + device='cuda:0'), in_proj_covar=tensor([0.0251, 0.0282, 0.0241, 0.0273, 0.0254, 0.0224, 0.0296, 0.0282], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:38:59,102 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,303 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,880 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.656e+02 3.252e+02 4.080e+02 6.220e+02, threshold=6.503e+02, percent-clipped=0.0 +2023-02-06 06:39:17,067 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:29,085 INFO [train.py:901] (0/4) Epoch 9, batch 700, loss[loss=0.2358, simple_loss=0.322, pruned_loss=0.07484, over 8207.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3203, pruned_loss=0.08851, over 1565082.59 frames. ], batch size: 23, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:39:40,993 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65381.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:57,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:03,865 INFO [train.py:901] (0/4) Epoch 9, batch 750, loss[loss=0.2459, simple_loss=0.3232, pruned_loss=0.08425, over 8247.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3208, pruned_loss=0.089, over 1575805.28 frames. ], batch size: 24, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:05,337 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:18,160 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:19,949 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.803e+02 3.527e+02 4.474e+02 1.505e+03, threshold=7.053e+02, percent-clipped=7.0 +2023-02-06 06:40:21,291 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 06:40:30,021 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 06:40:30,185 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:36,145 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65461.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:38,577 INFO [train.py:901] (0/4) Epoch 9, batch 800, loss[loss=0.2347, simple_loss=0.3154, pruned_loss=0.07698, over 8467.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3214, pruned_loss=0.0895, over 1585380.20 frames. ], batch size: 25, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:47,447 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:53,602 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:55,599 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2303, 1.7812, 1.8363, 1.6541, 1.2618, 1.7437, 1.9072, 1.8161], + device='cuda:0'), covar=tensor([0.0502, 0.0972, 0.1421, 0.1177, 0.0597, 0.1178, 0.0594, 0.0494], + device='cuda:0'), in_proj_covar=tensor([0.0107, 0.0159, 0.0199, 0.0164, 0.0111, 0.0168, 0.0122, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:41:05,671 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:10,493 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:13,594 INFO [train.py:901] (0/4) Epoch 9, batch 850, loss[loss=0.2374, simple_loss=0.2969, pruned_loss=0.08892, over 7692.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3209, pruned_loss=0.08952, over 1592998.24 frames. ], batch size: 18, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:25,164 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:29,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.736e+02 3.271e+02 4.209e+02 1.110e+03, threshold=6.542e+02, percent-clipped=5.0 +2023-02-06 06:41:37,094 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65550.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:41:47,587 INFO [train.py:901] (0/4) Epoch 9, batch 900, loss[loss=0.2891, simple_loss=0.3594, pruned_loss=0.1094, over 8455.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3207, pruned_loss=0.08934, over 1597480.90 frames. ], batch size: 27, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:51,814 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3423, 1.3599, 4.5070, 1.7552, 3.9891, 3.7561, 4.1160, 3.9078], + device='cuda:0'), covar=tensor([0.0467, 0.3838, 0.0389, 0.2989, 0.1094, 0.0769, 0.0447, 0.0604], + device='cuda:0'), in_proj_covar=tensor([0.0431, 0.0552, 0.0540, 0.0503, 0.0570, 0.0483, 0.0477, 0.0540], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 06:42:03,885 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9701, 1.5919, 1.7116, 1.2710, 1.0068, 1.4018, 1.6076, 1.7495], + device='cuda:0'), covar=tensor([0.0557, 0.1102, 0.1645, 0.1406, 0.0611, 0.1428, 0.0668, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0107, 0.0159, 0.0198, 0.0163, 0.0111, 0.0167, 0.0122, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 06:42:12,210 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3038, 1.9049, 2.8862, 2.2165, 2.4457, 2.1173, 1.6344, 1.2047], + device='cuda:0'), covar=tensor([0.2977, 0.3009, 0.0859, 0.1962, 0.1565, 0.1758, 0.1509, 0.3291], + device='cuda:0'), in_proj_covar=tensor([0.0850, 0.0814, 0.0699, 0.0804, 0.0891, 0.0754, 0.0687, 0.0727], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:42:12,365 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 06:42:23,181 INFO [train.py:901] (0/4) Epoch 9, batch 950, loss[loss=0.2649, simple_loss=0.336, pruned_loss=0.09693, over 8463.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3207, pruned_loss=0.08919, over 1601313.44 frames. ], batch size: 25, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:39,187 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.498e+02 3.047e+02 4.041e+02 6.463e+02, threshold=6.094e+02, percent-clipped=0.0 +2023-02-06 06:42:44,682 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:42:50,344 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 06:42:56,309 INFO [train.py:901] (0/4) Epoch 9, batch 1000, loss[loss=0.187, simple_loss=0.263, pruned_loss=0.05552, over 7934.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3208, pruned_loss=0.08958, over 1604466.93 frames. ], batch size: 20, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:23,100 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 06:43:27,438 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:31,859 INFO [train.py:901] (0/4) Epoch 9, batch 1050, loss[loss=0.2433, simple_loss=0.3242, pruned_loss=0.08122, over 8491.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3205, pruned_loss=0.08892, over 1607306.28 frames. ], batch size: 29, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:35,708 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 06:43:44,958 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:46,903 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4308, 1.8644, 3.0879, 1.1498, 2.2022, 1.9467, 1.5768, 1.8290], + device='cuda:0'), covar=tensor([0.1930, 0.2113, 0.0866, 0.4179, 0.1646, 0.2936, 0.1946, 0.2409], + device='cuda:0'), in_proj_covar=tensor([0.0480, 0.0494, 0.0531, 0.0576, 0.0608, 0.0543, 0.0465, 0.0607], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:43:47,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 2.877e+02 3.398e+02 4.338e+02 8.070e+02, threshold=6.796e+02, percent-clipped=6.0 +2023-02-06 06:44:03,478 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:05,257 INFO [train.py:901] (0/4) Epoch 9, batch 1100, loss[loss=0.2473, simple_loss=0.3083, pruned_loss=0.09315, over 7800.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3188, pruned_loss=0.08816, over 1602135.17 frames. ], batch size: 19, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:44:20,621 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:35,706 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 06:44:38,693 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:39,885 INFO [train.py:901] (0/4) Epoch 9, batch 1150, loss[loss=0.2649, simple_loss=0.3176, pruned_loss=0.1062, over 7426.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.32, pruned_loss=0.08914, over 1603675.11 frames. ], batch size: 17, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:44:42,076 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0301, 1.1061, 4.2320, 1.6473, 3.6840, 3.5288, 3.7554, 3.6881], + device='cuda:0'), covar=tensor([0.0582, 0.4294, 0.0483, 0.3136, 0.1216, 0.0933, 0.0625, 0.0703], + device='cuda:0'), in_proj_covar=tensor([0.0431, 0.0550, 0.0538, 0.0502, 0.0569, 0.0483, 0.0479, 0.0541], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 06:44:44,628 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 06:44:56,759 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.552e+02 3.121e+02 3.966e+02 8.304e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-06 06:45:06,289 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2736, 1.8374, 4.0923, 1.8400, 2.4025, 4.7076, 4.6782, 4.1282], + device='cuda:0'), covar=tensor([0.1097, 0.1475, 0.0308, 0.1909, 0.1085, 0.0215, 0.0343, 0.0545], + device='cuda:0'), in_proj_covar=tensor([0.0255, 0.0286, 0.0246, 0.0277, 0.0261, 0.0228, 0.0302, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:45:09,010 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65856.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:45:09,693 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7374, 2.3981, 3.7081, 2.7950, 2.9544, 2.5766, 1.9176, 1.7519], + device='cuda:0'), covar=tensor([0.2954, 0.3370, 0.0888, 0.2137, 0.1851, 0.1597, 0.1468, 0.3762], + device='cuda:0'), in_proj_covar=tensor([0.0848, 0.0810, 0.0699, 0.0804, 0.0895, 0.0752, 0.0686, 0.0732], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:45:14,580 INFO [train.py:901] (0/4) Epoch 9, batch 1200, loss[loss=0.2994, simple_loss=0.3611, pruned_loss=0.1188, over 8699.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3211, pruned_loss=0.0898, over 1606298.10 frames. ], batch size: 39, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:45:33,707 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65894.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:47,641 INFO [train.py:901] (0/4) Epoch 9, batch 1250, loss[loss=0.2406, simple_loss=0.3019, pruned_loss=0.08962, over 7193.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3223, pruned_loss=0.09052, over 1608459.87 frames. ], batch size: 16, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:45:57,331 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.7802, 0.7382, 0.8231, 0.7856, 0.5188, 0.8298, 0.0919, 0.7065], + device='cuda:0'), covar=tensor([0.2396, 0.1658, 0.0682, 0.1356, 0.3978, 0.0557, 0.3233, 0.1872], + device='cuda:0'), in_proj_covar=tensor([0.0156, 0.0154, 0.0092, 0.0204, 0.0245, 0.0095, 0.0159, 0.0157], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 06:46:05,023 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.844e+02 3.477e+02 4.312e+02 8.167e+02, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 06:46:23,823 INFO [train.py:901] (0/4) Epoch 9, batch 1300, loss[loss=0.216, simple_loss=0.2948, pruned_loss=0.06861, over 8248.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3222, pruned_loss=0.09022, over 1609271.90 frames. ], batch size: 22, lr: 8.70e-03, grad_scale: 16.0 +2023-02-06 06:46:48,091 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-66000.pt +2023-02-06 06:46:55,300 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66009.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:46:59,214 INFO [train.py:901] (0/4) Epoch 9, batch 1350, loss[loss=0.2397, simple_loss=0.303, pruned_loss=0.08821, over 7931.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3226, pruned_loss=0.09018, over 1612283.19 frames. ], batch size: 20, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:47:01,402 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:06,410 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 06:47:17,555 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 2.557e+02 3.336e+02 4.233e+02 1.201e+03, threshold=6.672e+02, percent-clipped=8.0 +2023-02-06 06:47:19,776 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:35,110 INFO [train.py:901] (0/4) Epoch 9, batch 1400, loss[loss=0.2378, simple_loss=0.3061, pruned_loss=0.08472, over 8071.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.323, pruned_loss=0.0907, over 1613391.32 frames. ], batch size: 21, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:48:09,440 INFO [train.py:901] (0/4) Epoch 9, batch 1450, loss[loss=0.2237, simple_loss=0.2846, pruned_loss=0.08142, over 7233.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3227, pruned_loss=0.09054, over 1612305.65 frames. ], batch size: 16, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:12,162 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 06:48:21,005 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8734, 2.0064, 2.3940, 1.9138, 1.2562, 2.4698, 0.4006, 1.4868], + device='cuda:0'), covar=tensor([0.2780, 0.1891, 0.0458, 0.2106, 0.5807, 0.0491, 0.4660, 0.2566], + device='cuda:0'), in_proj_covar=tensor([0.0150, 0.0151, 0.0090, 0.0197, 0.0240, 0.0092, 0.0154, 0.0152], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:48:26,155 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.633e+02 3.463e+02 4.686e+02 9.003e+02, threshold=6.925e+02, percent-clipped=5.0 +2023-02-06 06:48:42,248 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:48:44,183 INFO [train.py:901] (0/4) Epoch 9, batch 1500, loss[loss=0.2612, simple_loss=0.3334, pruned_loss=0.09455, over 8315.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3222, pruned_loss=0.08974, over 1618754.19 frames. ], batch size: 25, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:52,885 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66178.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:08,955 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66200.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:09,817 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 06:49:18,739 INFO [train.py:901] (0/4) Epoch 9, batch 1550, loss[loss=0.2871, simple_loss=0.359, pruned_loss=0.1076, over 8356.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3226, pruned_loss=0.08991, over 1622540.03 frames. ], batch size: 24, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:49:35,630 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.559e+02 2.942e+02 3.565e+02 7.942e+02, threshold=5.885e+02, percent-clipped=2.0 +2023-02-06 06:49:53,211 INFO [train.py:901] (0/4) Epoch 9, batch 1600, loss[loss=0.2134, simple_loss=0.2923, pruned_loss=0.06723, over 7258.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3235, pruned_loss=0.09008, over 1626508.50 frames. ], batch size: 16, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:49:53,460 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66265.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:50:02,310 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:50:11,771 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66290.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:29,757 INFO [train.py:901] (0/4) Epoch 9, batch 1650, loss[loss=0.2734, simple_loss=0.3491, pruned_loss=0.0989, over 8420.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3228, pruned_loss=0.08983, over 1619614.38 frames. ], batch size: 49, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:50:29,942 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66315.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:50:35,446 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2205, 1.8555, 2.6018, 2.0632, 2.2660, 2.1394, 1.7324, 0.9634], + device='cuda:0'), covar=tensor([0.3312, 0.3443, 0.0961, 0.2065, 0.1667, 0.1803, 0.1731, 0.3392], + device='cuda:0'), in_proj_covar=tensor([0.0860, 0.0820, 0.0703, 0.0811, 0.0899, 0.0761, 0.0692, 0.0736], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:50:46,548 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.536e+02 3.360e+02 4.258e+02 7.701e+02, threshold=6.719e+02, percent-clipped=5.0 +2023-02-06 06:50:49,638 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 06:50:54,222 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-06 06:51:03,465 INFO [train.py:901] (0/4) Epoch 9, batch 1700, loss[loss=0.2255, simple_loss=0.2979, pruned_loss=0.07655, over 7792.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3223, pruned_loss=0.08923, over 1614512.95 frames. ], batch size: 19, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:51:39,905 INFO [train.py:901] (0/4) Epoch 9, batch 1750, loss[loss=0.2304, simple_loss=0.3118, pruned_loss=0.07444, over 8349.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3231, pruned_loss=0.08977, over 1613028.00 frames. ], batch size: 24, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:51:57,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.942e+02 3.542e+02 4.261e+02 7.419e+02, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 06:52:13,915 INFO [train.py:901] (0/4) Epoch 9, batch 1800, loss[loss=0.2739, simple_loss=0.3416, pruned_loss=0.1031, over 8466.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3234, pruned_loss=0.09054, over 1609663.99 frames. ], batch size: 27, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:42,174 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:52:48,995 INFO [train.py:901] (0/4) Epoch 9, batch 1850, loss[loss=0.2834, simple_loss=0.3498, pruned_loss=0.1085, over 8347.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3216, pruned_loss=0.08927, over 1614239.73 frames. ], batch size: 26, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:53,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66522.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:05,510 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66539.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:05,995 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.848e+02 3.228e+02 4.154e+02 1.120e+03, threshold=6.457e+02, percent-clipped=1.0 +2023-02-06 06:53:11,893 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9140, 1.5166, 6.0125, 2.0677, 5.2501, 4.9845, 5.5428, 5.4193], + device='cuda:0'), covar=tensor([0.0357, 0.4166, 0.0246, 0.2999, 0.0835, 0.0618, 0.0390, 0.0447], + device='cuda:0'), in_proj_covar=tensor([0.0437, 0.0554, 0.0537, 0.0510, 0.0574, 0.0488, 0.0482, 0.0544], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 06:53:16,636 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 06:53:22,984 INFO [train.py:901] (0/4) Epoch 9, batch 1900, loss[loss=0.2591, simple_loss=0.3291, pruned_loss=0.09452, over 8581.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.323, pruned_loss=0.08983, over 1616520.76 frames. ], batch size: 49, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:27,124 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66571.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:43,852 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66596.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:47,130 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 06:53:56,559 INFO [train.py:901] (0/4) Epoch 9, batch 1950, loss[loss=0.2325, simple_loss=0.3182, pruned_loss=0.07337, over 8488.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3229, pruned_loss=0.09012, over 1607669.90 frames. ], batch size: 26, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:58,597 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 06:54:00,023 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:00,817 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:12,890 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66637.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:54:13,546 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3814, 1.7908, 3.3803, 1.1602, 2.2000, 1.8300, 1.4304, 2.1098], + device='cuda:0'), covar=tensor([0.1796, 0.2383, 0.0710, 0.3944, 0.1884, 0.2998, 0.1937, 0.2517], + device='cuda:0'), in_proj_covar=tensor([0.0480, 0.0492, 0.0535, 0.0569, 0.0610, 0.0540, 0.0466, 0.0604], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 06:54:14,619 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.852e+02 3.410e+02 4.369e+02 9.021e+02, threshold=6.820e+02, percent-clipped=7.0 +2023-02-06 06:54:20,062 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 06:54:32,101 INFO [train.py:901] (0/4) Epoch 9, batch 2000, loss[loss=0.1991, simple_loss=0.2835, pruned_loss=0.05737, over 8319.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3213, pruned_loss=0.08882, over 1610714.63 frames. ], batch size: 25, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:54:43,828 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 06:55:06,982 INFO [train.py:901] (0/4) Epoch 9, batch 2050, loss[loss=0.2405, simple_loss=0.3099, pruned_loss=0.08555, over 7237.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3203, pruned_loss=0.08836, over 1610800.42 frames. ], batch size: 16, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:55:20,379 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:55:23,653 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.024e+02 2.763e+02 3.349e+02 4.333e+02 1.017e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 06:55:31,186 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66749.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:55:42,433 INFO [train.py:901] (0/4) Epoch 9, batch 2100, loss[loss=0.234, simple_loss=0.3007, pruned_loss=0.08361, over 7419.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3205, pruned_loss=0.08865, over 1613290.61 frames. ], batch size: 17, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:17,421 INFO [train.py:901] (0/4) Epoch 9, batch 2150, loss[loss=0.2327, simple_loss=0.3162, pruned_loss=0.07458, over 8101.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3212, pruned_loss=0.08939, over 1610565.21 frames. ], batch size: 23, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:34,547 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.810e+02 3.362e+02 4.511e+02 1.000e+03, threshold=6.724e+02, percent-clipped=7.0 +2023-02-06 06:56:36,817 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.49 vs. limit=5.0 +2023-02-06 06:56:53,074 INFO [train.py:901] (0/4) Epoch 9, batch 2200, loss[loss=0.288, simple_loss=0.3475, pruned_loss=0.1143, over 7720.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3216, pruned_loss=0.08866, over 1613441.52 frames. ], batch size: 77, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:01,059 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:05,631 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66883.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:57:12,263 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66893.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:57:18,133 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:27,004 INFO [train.py:901] (0/4) Epoch 9, batch 2250, loss[loss=0.2373, simple_loss=0.3037, pruned_loss=0.08544, over 7928.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.322, pruned_loss=0.08925, over 1615303.22 frames. ], batch size: 20, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:29,215 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66918.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:43,696 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.716e+02 3.375e+02 4.203e+02 7.579e+02, threshold=6.750e+02, percent-clipped=1.0 +2023-02-06 06:58:00,326 INFO [train.py:901] (0/4) Epoch 9, batch 2300, loss[loss=0.1896, simple_loss=0.2629, pruned_loss=0.05811, over 7541.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3214, pruned_loss=0.08894, over 1613843.26 frames. ], batch size: 18, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:58:04,398 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 06:58:07,524 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:19,746 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:24,438 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66998.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:58:36,115 INFO [train.py:901] (0/4) Epoch 9, batch 2350, loss[loss=0.2492, simple_loss=0.3294, pruned_loss=0.08446, over 8242.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3213, pruned_loss=0.08904, over 1613483.36 frames. ], batch size: 22, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:58:36,990 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:43,074 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5363, 1.7470, 1.7496, 1.2889, 1.8564, 1.3756, 0.8963, 1.5487], + device='cuda:0'), covar=tensor([0.0254, 0.0138, 0.0103, 0.0212, 0.0147, 0.0357, 0.0342, 0.0128], + device='cuda:0'), in_proj_covar=tensor([0.0366, 0.0292, 0.0243, 0.0354, 0.0283, 0.0443, 0.0336, 0.0320], + device='cuda:0'), out_proj_covar=tensor([1.0935e-04, 8.5246e-05, 7.1412e-05, 1.0370e-04, 8.4229e-05, 1.4228e-04, + 1.0067e-04, 9.4783e-05], device='cuda:0') +2023-02-06 06:58:53,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.755e+02 3.236e+02 4.430e+02 1.005e+03, threshold=6.472e+02, percent-clipped=3.0 +2023-02-06 06:59:10,049 INFO [train.py:901] (0/4) Epoch 9, batch 2400, loss[loss=0.2569, simple_loss=0.3308, pruned_loss=0.09149, over 8332.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3232, pruned_loss=0.09091, over 1614204.94 frames. ], batch size: 26, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:59:10,298 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6519, 1.9679, 2.1436, 1.3709, 2.2829, 1.3721, 0.8238, 1.7887], + device='cuda:0'), covar=tensor([0.0397, 0.0198, 0.0158, 0.0297, 0.0162, 0.0557, 0.0471, 0.0178], + device='cuda:0'), in_proj_covar=tensor([0.0367, 0.0292, 0.0243, 0.0355, 0.0284, 0.0441, 0.0336, 0.0321], + device='cuda:0'), out_proj_covar=tensor([1.0949e-04, 8.5292e-05, 7.1180e-05, 1.0401e-04, 8.4612e-05, 1.4138e-04, + 1.0074e-04, 9.4993e-05], device='cuda:0') +2023-02-06 06:59:28,514 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67093.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:59:34,522 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:59:44,442 INFO [train.py:901] (0/4) Epoch 9, batch 2450, loss[loss=0.265, simple_loss=0.3418, pruned_loss=0.09405, over 8555.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3226, pruned_loss=0.09035, over 1613102.19 frames. ], batch size: 31, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 07:00:02,012 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.801e+02 2.787e+02 3.467e+02 4.148e+02 8.119e+02, threshold=6.934e+02, percent-clipped=3.0 +2023-02-06 07:00:17,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8085, 2.0313, 1.6868, 2.5521, 1.3719, 1.2720, 1.7659, 2.0865], + device='cuda:0'), covar=tensor([0.0888, 0.1038, 0.1209, 0.0524, 0.1218, 0.1908, 0.1088, 0.0840], + device='cuda:0'), in_proj_covar=tensor([0.0247, 0.0226, 0.0265, 0.0218, 0.0224, 0.0264, 0.0267, 0.0231], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:00:18,498 INFO [train.py:901] (0/4) Epoch 9, batch 2500, loss[loss=0.2448, simple_loss=0.3222, pruned_loss=0.08372, over 8200.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3227, pruned_loss=0.09092, over 1609753.07 frames. ], batch size: 23, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:00:48,176 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67208.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:00:52,872 INFO [train.py:901] (0/4) Epoch 9, batch 2550, loss[loss=0.2323, simple_loss=0.3001, pruned_loss=0.08221, over 7545.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3218, pruned_loss=0.08973, over 1615470.74 frames. ], batch size: 18, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:00,606 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1486, 1.8167, 2.7658, 2.2177, 2.5190, 2.0609, 1.5612, 1.1070], + device='cuda:0'), covar=tensor([0.3543, 0.3461, 0.0972, 0.2151, 0.1587, 0.1886, 0.1641, 0.3814], + device='cuda:0'), in_proj_covar=tensor([0.0844, 0.0811, 0.0693, 0.0807, 0.0894, 0.0752, 0.0684, 0.0733], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:01:12,346 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.679e+02 3.405e+02 4.203e+02 8.726e+02, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 07:01:21,880 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:01:26,653 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.43 vs. limit=5.0 +2023-02-06 07:01:29,611 INFO [train.py:901] (0/4) Epoch 9, batch 2600, loss[loss=0.2384, simple_loss=0.3131, pruned_loss=0.0819, over 8331.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.323, pruned_loss=0.09067, over 1619530.42 frames. ], batch size: 26, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:39,251 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67279.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:02:03,430 INFO [train.py:901] (0/4) Epoch 9, batch 2650, loss[loss=0.2679, simple_loss=0.3448, pruned_loss=0.09557, over 8341.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3232, pruned_loss=0.09018, over 1620092.26 frames. ], batch size: 25, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:02:06,288 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:02:21,821 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.739e+02 3.376e+02 4.238e+02 9.756e+02, threshold=6.752e+02, percent-clipped=4.0 +2023-02-06 07:02:29,142 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 07:02:37,550 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4534, 2.0530, 2.1835, 1.3375, 2.2653, 1.4776, 0.6173, 1.7587], + device='cuda:0'), covar=tensor([0.0420, 0.0183, 0.0108, 0.0306, 0.0213, 0.0563, 0.0537, 0.0168], + device='cuda:0'), in_proj_covar=tensor([0.0370, 0.0293, 0.0246, 0.0359, 0.0285, 0.0446, 0.0340, 0.0324], + device='cuda:0'), out_proj_covar=tensor([1.1052e-04, 8.5331e-05, 7.2211e-05, 1.0526e-04, 8.4795e-05, 1.4292e-04, + 1.0181e-04, 9.6105e-05], device='cuda:0') +2023-02-06 07:02:38,164 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4323, 2.7561, 1.9344, 2.2077, 2.1012, 1.6083, 2.1019, 2.2203], + device='cuda:0'), covar=tensor([0.1157, 0.0291, 0.0880, 0.0548, 0.0583, 0.1162, 0.0860, 0.0818], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0233, 0.0310, 0.0296, 0.0305, 0.0320, 0.0340, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:02:39,313 INFO [train.py:901] (0/4) Epoch 9, batch 2700, loss[loss=0.3204, simple_loss=0.3721, pruned_loss=0.1344, over 8474.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3237, pruned_loss=0.09042, over 1619084.51 frames. ], batch size: 49, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:03:09,421 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1016, 2.2781, 1.6518, 1.8594, 1.7560, 1.4206, 1.5910, 1.7308], + device='cuda:0'), covar=tensor([0.1186, 0.0329, 0.0971, 0.0516, 0.0644, 0.1225, 0.0939, 0.0752], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0236, 0.0313, 0.0298, 0.0307, 0.0321, 0.0342, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:03:13,103 INFO [train.py:901] (0/4) Epoch 9, batch 2750, loss[loss=0.2637, simple_loss=0.331, pruned_loss=0.09817, over 7915.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3222, pruned_loss=0.08944, over 1617455.36 frames. ], batch size: 20, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:03:26,069 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:29,963 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.867e+02 3.446e+02 4.196e+02 9.783e+02, threshold=6.892e+02, percent-clipped=3.0 +2023-02-06 07:03:34,242 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67445.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:47,998 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67464.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:03:48,379 INFO [train.py:901] (0/4) Epoch 9, batch 2800, loss[loss=0.2867, simple_loss=0.3537, pruned_loss=0.1099, over 8347.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3221, pruned_loss=0.0893, over 1617293.72 frames. ], batch size: 26, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:04:05,319 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67489.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:04:23,679 INFO [train.py:901] (0/4) Epoch 9, batch 2850, loss[loss=0.2172, simple_loss=0.2877, pruned_loss=0.07332, over 7796.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3224, pruned_loss=0.08982, over 1612754.81 frames. ], batch size: 19, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:04:29,811 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6842, 3.0910, 2.7213, 3.9680, 1.9375, 2.1736, 2.3765, 3.2578], + device='cuda:0'), covar=tensor([0.0758, 0.0756, 0.0951, 0.0318, 0.1256, 0.1533, 0.1242, 0.0850], + device='cuda:0'), in_proj_covar=tensor([0.0247, 0.0223, 0.0263, 0.0215, 0.0223, 0.0261, 0.0265, 0.0228], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:04:34,537 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:40,479 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.765e+02 3.269e+02 4.105e+02 6.649e+02, threshold=6.538e+02, percent-clipped=0.0 +2023-02-06 07:04:55,075 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:58,190 INFO [train.py:901] (0/4) Epoch 9, batch 2900, loss[loss=0.234, simple_loss=0.3171, pruned_loss=0.07543, over 8460.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3221, pruned_loss=0.08941, over 1613418.60 frames. ], batch size: 27, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:04,081 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 07:05:24,281 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 07:05:33,897 INFO [train.py:901] (0/4) Epoch 9, batch 2950, loss[loss=0.2599, simple_loss=0.3424, pruned_loss=0.08872, over 8098.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.322, pruned_loss=0.08861, over 1615759.99 frames. ], batch size: 23, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:51,259 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.115e+02 2.827e+02 3.390e+02 4.435e+02 7.404e+02, threshold=6.780e+02, percent-clipped=4.0 +2023-02-06 07:05:55,134 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-06 07:06:08,207 INFO [train.py:901] (0/4) Epoch 9, batch 3000, loss[loss=0.2396, simple_loss=0.3183, pruned_loss=0.08043, over 8026.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3207, pruned_loss=0.08817, over 1609904.87 frames. ], batch size: 22, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:08,208 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 07:06:20,344 INFO [train.py:935] (0/4) Epoch 9, validation: loss=0.1965, simple_loss=0.2957, pruned_loss=0.04864, over 944034.00 frames. +2023-02-06 07:06:20,346 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 07:06:37,426 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:43,344 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:52,168 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:55,499 INFO [train.py:901] (0/4) Epoch 9, batch 3050, loss[loss=0.2337, simple_loss=0.3167, pruned_loss=0.07532, over 8108.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3208, pruned_loss=0.08839, over 1612441.01 frames. ], batch size: 23, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:55,703 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:07:00,566 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3283, 1.1279, 1.4564, 1.1604, 0.7564, 1.2304, 1.1856, 1.0669], + device='cuda:0'), covar=tensor([0.0548, 0.1343, 0.1723, 0.1394, 0.0549, 0.1548, 0.0669, 0.0664], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0158, 0.0196, 0.0161, 0.0108, 0.0166, 0.0121, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 07:07:13,228 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.629e+02 3.194e+02 3.976e+02 7.575e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:07:29,789 INFO [train.py:901] (0/4) Epoch 9, batch 3100, loss[loss=0.2455, simple_loss=0.3153, pruned_loss=0.08778, over 7820.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.321, pruned_loss=0.08885, over 1614085.66 frames. ], batch size: 20, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:07:57,805 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3738, 1.4976, 1.7187, 1.3891, 1.0799, 1.4836, 1.8110, 1.7428], + device='cuda:0'), covar=tensor([0.0450, 0.1278, 0.1614, 0.1337, 0.0559, 0.1449, 0.0699, 0.0573], + device='cuda:0'), in_proj_covar=tensor([0.0106, 0.0159, 0.0197, 0.0162, 0.0109, 0.0167, 0.0121, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 07:08:04,167 INFO [train.py:901] (0/4) Epoch 9, batch 3150, loss[loss=0.2543, simple_loss=0.3205, pruned_loss=0.09407, over 8504.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3212, pruned_loss=0.08919, over 1612524.39 frames. ], batch size: 28, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:05,040 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:21,137 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.202e+02 2.768e+02 3.401e+02 4.235e+02 8.418e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 07:08:21,976 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:37,976 INFO [train.py:901] (0/4) Epoch 9, batch 3200, loss[loss=0.1988, simple_loss=0.271, pruned_loss=0.06331, over 7929.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3227, pruned_loss=0.09026, over 1611919.19 frames. ], batch size: 20, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:38,989 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 07:08:44,726 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:47,156 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 07:09:12,191 INFO [train.py:901] (0/4) Epoch 9, batch 3250, loss[loss=0.2436, simple_loss=0.3134, pruned_loss=0.0869, over 7704.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3212, pruned_loss=0.08875, over 1611093.99 frames. ], batch size: 18, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:09:14,078 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 07:09:29,470 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.709e+02 3.362e+02 4.203e+02 8.128e+02, threshold=6.724e+02, percent-clipped=5.0 +2023-02-06 07:09:31,093 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6992, 2.2112, 3.4610, 1.2219, 2.5731, 2.1019, 1.7887, 2.3423], + device='cuda:0'), covar=tensor([0.1664, 0.1920, 0.0800, 0.3774, 0.1477, 0.2624, 0.1667, 0.2235], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0494, 0.0527, 0.0566, 0.0601, 0.0540, 0.0460, 0.0603], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:09:46,666 INFO [train.py:901] (0/4) Epoch 9, batch 3300, loss[loss=0.2651, simple_loss=0.3409, pruned_loss=0.09465, over 8349.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3226, pruned_loss=0.08962, over 1611753.28 frames. ], batch size: 26, lr: 8.57e-03, grad_scale: 8.0 +2023-02-06 07:10:04,398 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:10,457 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:11,136 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-68000.pt +2023-02-06 07:10:22,541 INFO [train.py:901] (0/4) Epoch 9, batch 3350, loss[loss=0.2395, simple_loss=0.2968, pruned_loss=0.09113, over 7791.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3232, pruned_loss=0.08966, over 1614344.05 frames. ], batch size: 19, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:10:39,228 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.493e+02 3.108e+02 4.287e+02 1.101e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-06 07:10:40,596 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:49,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:56,400 INFO [train.py:901] (0/4) Epoch 9, batch 3400, loss[loss=0.2511, simple_loss=0.3012, pruned_loss=0.1005, over 5973.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3222, pruned_loss=0.08901, over 1614535.08 frames. ], batch size: 13, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:11:24,318 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68105.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:11:30,817 INFO [train.py:901] (0/4) Epoch 9, batch 3450, loss[loss=0.195, simple_loss=0.2629, pruned_loss=0.06353, over 7666.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3231, pruned_loss=0.08997, over 1616209.89 frames. ], batch size: 19, lr: 8.56e-03, grad_scale: 16.0 +2023-02-06 07:11:46,275 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0766, 2.4282, 1.8999, 2.8631, 1.5527, 1.5102, 1.9456, 2.4329], + device='cuda:0'), covar=tensor([0.0851, 0.0838, 0.1073, 0.0423, 0.1181, 0.1676, 0.1160, 0.0777], + device='cuda:0'), in_proj_covar=tensor([0.0244, 0.0225, 0.0263, 0.0219, 0.0223, 0.0262, 0.0266, 0.0227], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:11:48,139 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.585e+02 3.242e+02 3.955e+02 1.617e+03, threshold=6.484e+02, percent-clipped=7.0 +2023-02-06 07:11:59,832 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:05,797 INFO [train.py:901] (0/4) Epoch 9, batch 3500, loss[loss=0.3393, simple_loss=0.3907, pruned_loss=0.144, over 6962.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3239, pruned_loss=0.09074, over 1613872.14 frames. ], batch size: 71, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:08,690 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:17,955 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 07:12:40,955 INFO [train.py:901] (0/4) Epoch 9, batch 3550, loss[loss=0.2362, simple_loss=0.3113, pruned_loss=0.08052, over 8140.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3224, pruned_loss=0.08958, over 1614619.63 frames. ], batch size: 22, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:58,953 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.838e+02 3.387e+02 4.304e+02 7.616e+02, threshold=6.774e+02, percent-clipped=6.0 +2023-02-06 07:13:02,583 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:14,899 INFO [train.py:901] (0/4) Epoch 9, batch 3600, loss[loss=0.278, simple_loss=0.3522, pruned_loss=0.1019, over 8250.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3199, pruned_loss=0.08802, over 1614199.37 frames. ], batch size: 24, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:13:19,101 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:49,592 INFO [train.py:901] (0/4) Epoch 9, batch 3650, loss[loss=0.2586, simple_loss=0.3317, pruned_loss=0.09272, over 8180.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3193, pruned_loss=0.08746, over 1613575.54 frames. ], batch size: 23, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:08,231 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.637e+02 3.214e+02 4.100e+02 7.421e+02, threshold=6.428e+02, percent-clipped=2.0 +2023-02-06 07:14:09,680 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:18,952 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:14:25,007 INFO [train.py:901] (0/4) Epoch 9, batch 3700, loss[loss=0.2945, simple_loss=0.3676, pruned_loss=0.1107, over 8670.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3194, pruned_loss=0.08777, over 1612646.43 frames. ], batch size: 34, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:44,966 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9842, 1.8321, 1.8275, 1.7063, 1.4407, 1.8456, 2.4051, 2.5769], + device='cuda:0'), covar=tensor([0.0461, 0.1115, 0.1566, 0.1240, 0.0493, 0.1338, 0.0554, 0.0453], + device='cuda:0'), in_proj_covar=tensor([0.0106, 0.0157, 0.0195, 0.0159, 0.0108, 0.0165, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 07:14:57,586 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:58,733 INFO [train.py:901] (0/4) Epoch 9, batch 3750, loss[loss=0.2545, simple_loss=0.3361, pruned_loss=0.08641, over 8184.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3202, pruned_loss=0.08817, over 1617183.99 frames. ], batch size: 23, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:15:00,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:06,058 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:14,901 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:16,803 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.868e+02 3.639e+02 4.960e+02 1.282e+03, threshold=7.278e+02, percent-clipped=8.0 +2023-02-06 07:15:22,893 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68449.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:15:23,624 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:29,204 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:33,815 INFO [train.py:901] (0/4) Epoch 9, batch 3800, loss[loss=0.1881, simple_loss=0.2779, pruned_loss=0.04913, over 7814.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3183, pruned_loss=0.08731, over 1611893.02 frames. ], batch size: 20, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:15:37,786 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 07:16:07,819 INFO [train.py:901] (0/4) Epoch 9, batch 3850, loss[loss=0.2475, simple_loss=0.324, pruned_loss=0.08551, over 8258.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3196, pruned_loss=0.08785, over 1615960.52 frames. ], batch size: 24, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:25,023 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 07:16:25,655 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.582e+02 3.048e+02 3.724e+02 6.674e+02, threshold=6.096e+02, percent-clipped=0.0 +2023-02-06 07:16:42,279 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68564.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:16:42,694 INFO [train.py:901] (0/4) Epoch 9, batch 3900, loss[loss=0.3332, simple_loss=0.399, pruned_loss=0.1338, over 8508.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3205, pruned_loss=0.0884, over 1616423.60 frames. ], batch size: 26, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:17:04,307 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:17:17,701 INFO [train.py:901] (0/4) Epoch 9, batch 3950, loss[loss=0.2115, simple_loss=0.2913, pruned_loss=0.06587, over 7655.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3197, pruned_loss=0.08816, over 1613721.28 frames. ], batch size: 19, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:17:35,330 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.589e+02 3.045e+02 4.133e+02 1.084e+03, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 07:17:51,772 INFO [train.py:901] (0/4) Epoch 9, batch 4000, loss[loss=0.2349, simple_loss=0.3214, pruned_loss=0.0742, over 8021.00 frames. ], tot_loss[loss=0.2462, simple_loss=0.3183, pruned_loss=0.08703, over 1611922.39 frames. ], batch size: 22, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:17,927 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68703.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,235 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,678 INFO [train.py:901] (0/4) Epoch 9, batch 4050, loss[loss=0.2046, simple_loss=0.2788, pruned_loss=0.06521, over 7549.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3199, pruned_loss=0.08812, over 1614859.88 frames. ], batch size: 18, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:41,426 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 07:18:42,621 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:43,699 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.478e+02 3.133e+02 3.692e+02 8.585e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 07:18:57,741 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:58,668 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.37 vs. limit=5.0 +2023-02-06 07:19:00,371 INFO [train.py:901] (0/4) Epoch 9, batch 4100, loss[loss=0.2727, simple_loss=0.3429, pruned_loss=0.1013, over 8255.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3194, pruned_loss=0.08763, over 1608424.64 frames. ], batch size: 24, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:19,602 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2139, 2.2501, 1.5504, 1.9745, 1.7973, 1.2345, 1.6907, 1.7489], + device='cuda:0'), covar=tensor([0.1132, 0.0305, 0.1017, 0.0484, 0.0579, 0.1363, 0.0785, 0.0761], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0238, 0.0314, 0.0301, 0.0307, 0.0324, 0.0348, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:19:23,693 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 07:19:34,807 INFO [train.py:901] (0/4) Epoch 9, batch 4150, loss[loss=0.2223, simple_loss=0.2916, pruned_loss=0.07649, over 7520.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3202, pruned_loss=0.08823, over 1607469.64 frames. ], batch size: 18, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:38,342 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68820.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:19:52,098 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.558e+02 3.576e+02 4.352e+02 8.740e+02, threshold=7.151e+02, percent-clipped=5.0 +2023-02-06 07:19:55,811 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68845.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:20:02,869 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 07:20:09,486 INFO [train.py:901] (0/4) Epoch 9, batch 4200, loss[loss=0.2229, simple_loss=0.3099, pruned_loss=0.06801, over 8460.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3201, pruned_loss=0.08765, over 1609387.34 frames. ], batch size: 25, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:12,587 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 07:20:17,156 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:20:23,068 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 07:20:44,846 INFO [train.py:901] (0/4) Epoch 9, batch 4250, loss[loss=0.2416, simple_loss=0.3161, pruned_loss=0.08353, over 8361.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.32, pruned_loss=0.08723, over 1610598.94 frames. ], batch size: 24, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:45,581 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 07:20:51,294 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4099, 1.9114, 3.0770, 2.3874, 2.7730, 2.0754, 1.7306, 1.3874], + device='cuda:0'), covar=tensor([0.3192, 0.3464, 0.0905, 0.2035, 0.1467, 0.1852, 0.1521, 0.3646], + device='cuda:0'), in_proj_covar=tensor([0.0854, 0.0816, 0.0698, 0.0808, 0.0902, 0.0761, 0.0686, 0.0733], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:21:02,849 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:21:03,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.856e+02 3.701e+02 4.402e+02 9.379e+02, threshold=7.403e+02, percent-clipped=2.0 +2023-02-06 07:21:20,748 INFO [train.py:901] (0/4) Epoch 9, batch 4300, loss[loss=0.203, simple_loss=0.2933, pruned_loss=0.05636, over 8242.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3206, pruned_loss=0.08807, over 1611009.25 frames. ], batch size: 22, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:21:55,042 INFO [train.py:901] (0/4) Epoch 9, batch 4350, loss[loss=0.1942, simple_loss=0.2757, pruned_loss=0.0564, over 7980.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3202, pruned_loss=0.08759, over 1612913.52 frames. ], batch size: 21, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:03,359 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 07:22:12,980 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.761e+02 3.203e+02 3.985e+02 6.558e+02, threshold=6.405e+02, percent-clipped=0.0 +2023-02-06 07:22:16,276 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 07:22:17,163 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1826, 1.5793, 1.5891, 1.4019, 1.2413, 1.3973, 1.7938, 1.7694], + device='cuda:0'), covar=tensor([0.0492, 0.1178, 0.1724, 0.1334, 0.0556, 0.1463, 0.0669, 0.0576], + device='cuda:0'), in_proj_covar=tensor([0.0106, 0.0159, 0.0197, 0.0161, 0.0110, 0.0167, 0.0122, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 07:22:17,702 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:23,135 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:27,146 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:29,683 INFO [train.py:901] (0/4) Epoch 9, batch 4400, loss[loss=0.2056, simple_loss=0.2695, pruned_loss=0.07089, over 7457.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3206, pruned_loss=0.08731, over 1617766.27 frames. ], batch size: 17, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:55,804 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 07:23:04,525 INFO [train.py:901] (0/4) Epoch 9, batch 4450, loss[loss=0.2025, simple_loss=0.2802, pruned_loss=0.06236, over 7430.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3185, pruned_loss=0.08631, over 1613368.94 frames. ], batch size: 17, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:16,664 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:22,374 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.746e+02 3.298e+02 3.852e+02 8.052e+02, threshold=6.596e+02, percent-clipped=4.0 +2023-02-06 07:23:32,401 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1376, 4.1257, 3.7277, 1.7561, 3.6582, 3.7074, 3.7839, 3.3926], + device='cuda:0'), covar=tensor([0.0867, 0.0569, 0.1002, 0.4778, 0.0827, 0.1001, 0.1161, 0.0971], + device='cuda:0'), in_proj_covar=tensor([0.0442, 0.0350, 0.0369, 0.0459, 0.0363, 0.0345, 0.0362, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:23:33,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:37,252 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:39,006 INFO [train.py:901] (0/4) Epoch 9, batch 4500, loss[loss=0.2164, simple_loss=0.3058, pruned_loss=0.06353, over 8192.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3194, pruned_loss=0.08694, over 1614397.43 frames. ], batch size: 23, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:49,160 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 07:23:49,294 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:53,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:54,609 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8576, 2.1172, 1.6435, 2.6683, 1.0676, 1.5030, 1.7996, 2.1680], + device='cuda:0'), covar=tensor([0.0868, 0.0784, 0.1204, 0.0384, 0.1280, 0.1357, 0.0903, 0.0748], + device='cuda:0'), in_proj_covar=tensor([0.0250, 0.0225, 0.0266, 0.0223, 0.0228, 0.0264, 0.0268, 0.0230], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:24:13,186 INFO [train.py:901] (0/4) Epoch 9, batch 4550, loss[loss=0.2211, simple_loss=0.3118, pruned_loss=0.06517, over 8253.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3191, pruned_loss=0.08682, over 1615885.13 frames. ], batch size: 24, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:24:19,720 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4942, 1.6703, 2.8381, 1.1915, 2.0407, 1.7774, 1.5011, 1.7827], + device='cuda:0'), covar=tensor([0.1645, 0.2047, 0.0646, 0.3735, 0.1406, 0.2689, 0.1724, 0.2055], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0491, 0.0522, 0.0564, 0.0601, 0.0542, 0.0462, 0.0603], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:24:31,949 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.522e+02 2.943e+02 3.743e+02 5.945e+02, threshold=5.886e+02, percent-clipped=0.0 +2023-02-06 07:24:33,334 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:47,696 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69263.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:24:48,919 INFO [train.py:901] (0/4) Epoch 9, batch 4600, loss[loss=0.236, simple_loss=0.3153, pruned_loss=0.07834, over 8287.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3188, pruned_loss=0.08697, over 1613187.75 frames. ], batch size: 23, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:24:57,779 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7622, 3.7629, 3.3713, 1.6352, 3.3373, 3.3793, 3.4062, 3.2099], + device='cuda:0'), covar=tensor([0.1131, 0.0685, 0.1214, 0.5752, 0.1024, 0.1235, 0.1653, 0.0937], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0356, 0.0375, 0.0467, 0.0369, 0.0351, 0.0368, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:25:07,510 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:22,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:24,607 INFO [train.py:901] (0/4) Epoch 9, batch 4650, loss[loss=0.2256, simple_loss=0.2984, pruned_loss=0.07644, over 7653.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3188, pruned_loss=0.08667, over 1613341.15 frames. ], batch size: 19, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:38,790 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:40,085 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:42,427 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 07:25:42,602 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.666e+02 3.298e+02 3.900e+02 8.712e+02, threshold=6.595e+02, percent-clipped=8.0 +2023-02-06 07:25:58,526 INFO [train.py:901] (0/4) Epoch 9, batch 4700, loss[loss=0.2393, simple_loss=0.3172, pruned_loss=0.08067, over 8339.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.3194, pruned_loss=0.08746, over 1616485.24 frames. ], batch size: 25, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:26:14,060 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:26,888 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:31,844 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:33,681 INFO [train.py:901] (0/4) Epoch 9, batch 4750, loss[loss=0.2237, simple_loss=0.3082, pruned_loss=0.06961, over 8323.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3196, pruned_loss=0.08749, over 1618111.70 frames. ], batch size: 25, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:26:35,885 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:47,322 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2054, 2.3997, 1.9393, 2.8135, 1.4242, 1.7711, 2.0348, 2.4420], + device='cuda:0'), covar=tensor([0.0675, 0.0809, 0.0995, 0.0428, 0.1161, 0.1270, 0.0960, 0.0711], + device='cuda:0'), in_proj_covar=tensor([0.0248, 0.0223, 0.0264, 0.0222, 0.0227, 0.0265, 0.0270, 0.0231], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:26:49,137 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 07:26:50,996 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 07:26:51,644 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.564e+02 3.173e+02 4.227e+02 9.736e+02, threshold=6.346e+02, percent-clipped=4.0 +2023-02-06 07:26:53,203 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:08,536 INFO [train.py:901] (0/4) Epoch 9, batch 4800, loss[loss=0.2182, simple_loss=0.2977, pruned_loss=0.06936, over 8247.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3199, pruned_loss=0.08735, over 1620014.27 frames. ], batch size: 22, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:15,863 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9020, 2.2393, 3.6729, 2.7096, 2.9045, 2.4039, 2.0585, 1.7934], + device='cuda:0'), covar=tensor([0.2995, 0.3989, 0.0961, 0.2303, 0.1949, 0.1830, 0.1441, 0.3902], + device='cuda:0'), in_proj_covar=tensor([0.0859, 0.0826, 0.0710, 0.0816, 0.0911, 0.0768, 0.0688, 0.0743], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:27:41,298 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 07:27:43,207 INFO [train.py:901] (0/4) Epoch 9, batch 4850, loss[loss=0.2255, simple_loss=0.3067, pruned_loss=0.07219, over 8239.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3198, pruned_loss=0.08757, over 1617880.31 frames. ], batch size: 24, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:46,744 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:49,409 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:53,327 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:00,627 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.717e+02 3.193e+02 3.973e+02 8.915e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:28:17,593 INFO [train.py:901] (0/4) Epoch 9, batch 4900, loss[loss=0.2758, simple_loss=0.3473, pruned_loss=0.1022, over 8568.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3214, pruned_loss=0.08859, over 1615565.21 frames. ], batch size: 31, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:28:33,114 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:47,471 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69607.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:28:53,429 INFO [train.py:901] (0/4) Epoch 9, batch 4950, loss[loss=0.2485, simple_loss=0.334, pruned_loss=0.08153, over 8257.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3212, pruned_loss=0.08827, over 1612369.48 frames. ], batch size: 22, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:06,606 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:09,347 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:10,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.734e+02 3.225e+02 4.131e+02 8.295e+02, threshold=6.450e+02, percent-clipped=5.0 +2023-02-06 07:29:13,218 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:27,240 INFO [train.py:901] (0/4) Epoch 9, batch 5000, loss[loss=0.2587, simple_loss=0.3146, pruned_loss=0.1014, over 7815.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3219, pruned_loss=0.0887, over 1612315.59 frames. ], batch size: 20, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:39,108 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:46,861 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69692.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:29:53,586 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:01,399 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 07:30:02,818 INFO [train.py:901] (0/4) Epoch 9, batch 5050, loss[loss=0.2617, simple_loss=0.3163, pruned_loss=0.1036, over 7704.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3199, pruned_loss=0.08784, over 1609497.51 frames. ], batch size: 18, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:30:07,754 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69722.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:30:12,761 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:18,139 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 07:30:20,812 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.703e+02 3.249e+02 3.895e+02 8.845e+02, threshold=6.498e+02, percent-clipped=2.0 +2023-02-06 07:30:26,990 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:29,703 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6184, 1.3431, 2.8414, 1.1711, 2.0022, 2.9855, 3.0953, 2.5114], + device='cuda:0'), covar=tensor([0.1110, 0.1555, 0.0433, 0.2201, 0.0876, 0.0367, 0.0522, 0.0800], + device='cuda:0'), in_proj_covar=tensor([0.0255, 0.0289, 0.0250, 0.0278, 0.0264, 0.0231, 0.0307, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 07:30:30,968 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:36,707 INFO [train.py:901] (0/4) Epoch 9, batch 5100, loss[loss=0.2494, simple_loss=0.3196, pruned_loss=0.08961, over 7649.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3206, pruned_loss=0.08838, over 1610563.71 frames. ], batch size: 19, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:30:44,222 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:50,853 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0892, 1.6334, 1.5870, 1.2499, 0.9799, 1.3882, 1.5201, 1.5408], + device='cuda:0'), covar=tensor([0.0546, 0.1246, 0.1877, 0.1483, 0.0654, 0.1614, 0.0746, 0.0656], + device='cuda:0'), in_proj_covar=tensor([0.0106, 0.0160, 0.0199, 0.0163, 0.0110, 0.0168, 0.0122, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 07:30:59,030 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:01,905 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:03,212 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7517, 3.9219, 2.4951, 2.6348, 2.7911, 2.0877, 2.5717, 2.8436], + device='cuda:0'), covar=tensor([0.1813, 0.0283, 0.0918, 0.0864, 0.0748, 0.1248, 0.1153, 0.1188], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0241, 0.0315, 0.0301, 0.0310, 0.0324, 0.0345, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:31:11,529 INFO [train.py:901] (0/4) Epoch 9, batch 5150, loss[loss=0.2293, simple_loss=0.3076, pruned_loss=0.0755, over 8098.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3188, pruned_loss=0.0872, over 1609126.05 frames. ], batch size: 23, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:29,714 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 2.410e+02 3.240e+02 3.896e+02 9.119e+02, threshold=6.481e+02, percent-clipped=3.0 +2023-02-06 07:31:32,788 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:46,668 INFO [train.py:901] (0/4) Epoch 9, batch 5200, loss[loss=0.2485, simple_loss=0.3251, pruned_loss=0.08593, over 8506.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3187, pruned_loss=0.0867, over 1613278.37 frames. ], batch size: 26, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:50,875 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:02,077 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3178, 1.8740, 2.8721, 2.2078, 2.5458, 2.0695, 1.7147, 1.1876], + device='cuda:0'), covar=tensor([0.3208, 0.3367, 0.0903, 0.2463, 0.1650, 0.1931, 0.1544, 0.3659], + device='cuda:0'), in_proj_covar=tensor([0.0853, 0.0820, 0.0700, 0.0807, 0.0907, 0.0758, 0.0686, 0.0735], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 07:32:06,757 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69895.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:11,376 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:17,771 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 07:32:20,229 INFO [train.py:901] (0/4) Epoch 9, batch 5250, loss[loss=0.2405, simple_loss=0.3129, pruned_loss=0.08402, over 8027.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3181, pruned_loss=0.08661, over 1613703.93 frames. ], batch size: 22, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:32:23,748 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69920.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:27,836 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:38,261 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 2.909e+02 3.504e+02 4.160e+02 7.603e+02, threshold=7.007e+02, percent-clipped=5.0 +2023-02-06 07:32:50,718 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:55,306 INFO [train.py:901] (0/4) Epoch 9, batch 5300, loss[loss=0.2175, simple_loss=0.3007, pruned_loss=0.06714, over 8126.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3179, pruned_loss=0.08637, over 1615139.46 frames. ], batch size: 22, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:04,976 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69978.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:08,299 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:19,761 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-70000.pt +2023-02-06 07:33:22,906 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70003.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:33:24,830 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:31,224 INFO [train.py:901] (0/4) Epoch 9, batch 5350, loss[loss=0.2451, simple_loss=0.313, pruned_loss=0.08862, over 7655.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.319, pruned_loss=0.08773, over 1615328.07 frames. ], batch size: 19, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:42,004 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:45,278 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70036.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:33:48,395 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.965e+02 3.484e+02 4.155e+02 9.515e+02, threshold=6.968e+02, percent-clipped=2.0 +2023-02-06 07:33:57,281 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:05,194 INFO [train.py:901] (0/4) Epoch 9, batch 5400, loss[loss=0.24, simple_loss=0.3193, pruned_loss=0.08038, over 8025.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3209, pruned_loss=0.08895, over 1613697.47 frames. ], batch size: 22, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:34:14,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:31,571 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:40,958 INFO [train.py:901] (0/4) Epoch 9, batch 5450, loss[loss=0.2105, simple_loss=0.2989, pruned_loss=0.06099, over 8243.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3208, pruned_loss=0.0886, over 1614787.65 frames. ], batch size: 24, lr: 8.44e-03, grad_scale: 8.0 +2023-02-06 07:34:48,367 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,042 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,618 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:58,813 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.682e+02 3.191e+02 4.046e+02 1.028e+03, threshold=6.382e+02, percent-clipped=4.0 +2023-02-06 07:35:04,363 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 07:35:05,796 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70151.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:35:06,511 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70152.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:35:15,858 INFO [train.py:901] (0/4) Epoch 9, batch 5500, loss[loss=0.2113, simple_loss=0.2832, pruned_loss=0.06972, over 7649.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3196, pruned_loss=0.08753, over 1614629.32 frames. ], batch size: 19, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:35:50,277 INFO [train.py:901] (0/4) Epoch 9, batch 5550, loss[loss=0.2448, simple_loss=0.3252, pruned_loss=0.08222, over 8319.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.319, pruned_loss=0.08716, over 1615275.17 frames. ], batch size: 25, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:36:07,864 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.484e+02 3.031e+02 3.937e+02 9.276e+02, threshold=6.062e+02, percent-clipped=2.0 +2023-02-06 07:36:24,758 INFO [train.py:901] (0/4) Epoch 9, batch 5600, loss[loss=0.2091, simple_loss=0.2992, pruned_loss=0.0595, over 8126.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3182, pruned_loss=0.08637, over 1615209.47 frames. ], batch size: 22, lr: 8.43e-03, grad_scale: 16.0 +2023-02-06 07:36:34,072 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:36:35,514 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5955, 1.9772, 3.3676, 1.2315, 2.6318, 2.1134, 1.7937, 2.3531], + device='cuda:0'), covar=tensor([0.1762, 0.2276, 0.0882, 0.4000, 0.1412, 0.2594, 0.1681, 0.2062], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0496, 0.0524, 0.0564, 0.0600, 0.0537, 0.0463, 0.0603], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:36:59,387 INFO [train.py:901] (0/4) Epoch 9, batch 5650, loss[loss=0.2078, simple_loss=0.2799, pruned_loss=0.06787, over 7940.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3173, pruned_loss=0.08572, over 1616678.90 frames. ], batch size: 20, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:08,096 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 07:37:18,052 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.609e+02 3.248e+02 4.005e+02 8.106e+02, threshold=6.497e+02, percent-clipped=5.0 +2023-02-06 07:37:32,964 INFO [train.py:901] (0/4) Epoch 9, batch 5700, loss[loss=0.2825, simple_loss=0.3536, pruned_loss=0.1057, over 8456.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3197, pruned_loss=0.08723, over 1618557.45 frames. ], batch size: 29, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:56,390 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6830, 2.2256, 3.6840, 2.9268, 3.2759, 2.3733, 1.9176, 1.7458], + device='cuda:0'), covar=tensor([0.3173, 0.3716, 0.0992, 0.2105, 0.1753, 0.1824, 0.1432, 0.3929], + device='cuda:0'), in_proj_covar=tensor([0.0861, 0.0829, 0.0707, 0.0806, 0.0905, 0.0765, 0.0686, 0.0737], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 07:38:02,536 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7127, 4.6524, 4.2256, 2.5945, 4.1006, 4.2404, 4.3595, 3.9333], + device='cuda:0'), covar=tensor([0.0798, 0.0477, 0.0804, 0.4594, 0.0822, 0.1081, 0.1124, 0.0869], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0353, 0.0367, 0.0468, 0.0365, 0.0350, 0.0362, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:38:02,659 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:08,014 INFO [train.py:901] (0/4) Epoch 9, batch 5750, loss[loss=0.2431, simple_loss=0.329, pruned_loss=0.07859, over 8249.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.32, pruned_loss=0.08775, over 1614362.73 frames. ], batch size: 24, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:12,148 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 07:38:20,262 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70432.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:38:27,529 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.028e+02 2.898e+02 3.376e+02 4.229e+02 8.555e+02, threshold=6.753e+02, percent-clipped=3.0 +2023-02-06 07:38:43,388 INFO [train.py:901] (0/4) Epoch 9, batch 5800, loss[loss=0.2322, simple_loss=0.3102, pruned_loss=0.0771, over 8122.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3192, pruned_loss=0.08705, over 1615703.40 frames. ], batch size: 22, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:48,302 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:38:48,579 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 07:39:18,648 INFO [train.py:901] (0/4) Epoch 9, batch 5850, loss[loss=0.2158, simple_loss=0.2975, pruned_loss=0.06703, over 8321.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3182, pruned_loss=0.08641, over 1613712.27 frames. ], batch size: 25, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:39:37,364 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.477e+02 3.501e+02 4.376e+02 8.995e+02, threshold=7.001e+02, percent-clipped=4.0 +2023-02-06 07:39:48,211 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2743, 1.8203, 2.8590, 2.2793, 2.4559, 2.1492, 1.6466, 1.1874], + device='cuda:0'), covar=tensor([0.3261, 0.3637, 0.0893, 0.1965, 0.1615, 0.1945, 0.1585, 0.3657], + device='cuda:0'), in_proj_covar=tensor([0.0853, 0.0819, 0.0703, 0.0805, 0.0900, 0.0761, 0.0683, 0.0733], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 07:39:53,321 INFO [train.py:901] (0/4) Epoch 9, batch 5900, loss[loss=0.246, simple_loss=0.321, pruned_loss=0.08553, over 8137.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3186, pruned_loss=0.08674, over 1614958.20 frames. ], batch size: 22, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:40:08,006 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:27,232 INFO [train.py:901] (0/4) Epoch 9, batch 5950, loss[loss=0.258, simple_loss=0.3317, pruned_loss=0.09216, over 8513.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3183, pruned_loss=0.08647, over 1614794.88 frames. ], batch size: 29, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:40:32,699 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:45,950 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.730e+02 3.193e+02 3.849e+02 7.953e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 07:41:02,078 INFO [train.py:901] (0/4) Epoch 9, batch 6000, loss[loss=0.2554, simple_loss=0.3294, pruned_loss=0.09068, over 8241.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3167, pruned_loss=0.08548, over 1613966.62 frames. ], batch size: 24, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:02,079 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 07:41:14,595 INFO [train.py:935] (0/4) Epoch 9, validation: loss=0.1952, simple_loss=0.2947, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 07:41:14,596 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 07:41:49,714 INFO [train.py:901] (0/4) Epoch 9, batch 6050, loss[loss=0.2735, simple_loss=0.3479, pruned_loss=0.09959, over 7981.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3185, pruned_loss=0.08665, over 1612574.10 frames. ], batch size: 21, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:42:03,382 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6006, 2.8925, 1.7822, 2.2727, 2.2945, 1.4892, 2.0659, 2.1768], + device='cuda:0'), covar=tensor([0.1187, 0.0272, 0.0929, 0.0615, 0.0583, 0.1281, 0.0912, 0.0747], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0234, 0.0307, 0.0294, 0.0304, 0.0314, 0.0335, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:42:04,738 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:42:07,995 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.822e+02 3.602e+02 4.348e+02 1.269e+03, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 07:42:24,368 INFO [train.py:901] (0/4) Epoch 9, batch 6100, loss[loss=0.2633, simple_loss=0.3458, pruned_loss=0.0904, over 8560.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.318, pruned_loss=0.08625, over 1607966.83 frames. ], batch size: 39, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:42:42,094 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 07:42:53,129 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7305, 3.0174, 2.4159, 4.1585, 1.8070, 2.0291, 2.3235, 3.4290], + device='cuda:0'), covar=tensor([0.0757, 0.0884, 0.1040, 0.0217, 0.1241, 0.1549, 0.1401, 0.0768], + device='cuda:0'), in_proj_covar=tensor([0.0246, 0.0220, 0.0262, 0.0219, 0.0223, 0.0261, 0.0267, 0.0227], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:43:00,373 INFO [train.py:901] (0/4) Epoch 9, batch 6150, loss[loss=0.2663, simple_loss=0.347, pruned_loss=0.09282, over 8035.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3174, pruned_loss=0.08525, over 1614287.06 frames. ], batch size: 22, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:18,323 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.692e+02 3.232e+02 3.879e+02 7.941e+02, threshold=6.463e+02, percent-clipped=1.0 +2023-02-06 07:43:19,249 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:43:22,053 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7195, 2.2443, 4.5818, 1.2739, 2.9577, 2.3207, 1.6732, 2.7661], + device='cuda:0'), covar=tensor([0.1658, 0.2163, 0.0605, 0.3759, 0.1643, 0.2535, 0.1752, 0.2317], + device='cuda:0'), in_proj_covar=tensor([0.0488, 0.0503, 0.0533, 0.0574, 0.0616, 0.0548, 0.0469, 0.0615], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 07:43:33,938 INFO [train.py:901] (0/4) Epoch 9, batch 6200, loss[loss=0.2557, simple_loss=0.3315, pruned_loss=0.08993, over 8329.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3164, pruned_loss=0.08443, over 1612482.23 frames. ], batch size: 26, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:36,216 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:44:08,529 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5367, 1.5385, 1.7532, 1.4377, 0.9638, 1.7615, 0.0752, 1.2928], + device='cuda:0'), covar=tensor([0.3122, 0.2051, 0.0606, 0.1585, 0.5252, 0.0703, 0.3695, 0.1691], + device='cuda:0'), in_proj_covar=tensor([0.0157, 0.0157, 0.0090, 0.0207, 0.0242, 0.0095, 0.0158, 0.0155], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 07:44:09,710 INFO [train.py:901] (0/4) Epoch 9, batch 6250, loss[loss=0.2359, simple_loss=0.3136, pruned_loss=0.07914, over 7812.00 frames. ], tot_loss[loss=0.244, simple_loss=0.317, pruned_loss=0.0855, over 1612006.98 frames. ], batch size: 20, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:44:28,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.792e+02 3.423e+02 4.432e+02 1.474e+03, threshold=6.847e+02, percent-clipped=7.0 +2023-02-06 07:44:43,938 INFO [train.py:901] (0/4) Epoch 9, batch 6300, loss[loss=0.2526, simple_loss=0.3277, pruned_loss=0.08877, over 8236.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3155, pruned_loss=0.08467, over 1614115.48 frames. ], batch size: 22, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:44:47,606 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5414, 1.9278, 3.2815, 1.3347, 2.3611, 2.0031, 1.6755, 2.1660], + device='cuda:0'), covar=tensor([0.1670, 0.2138, 0.0774, 0.3616, 0.1614, 0.2672, 0.1661, 0.2298], + device='cuda:0'), in_proj_covar=tensor([0.0488, 0.0504, 0.0533, 0.0576, 0.0614, 0.0550, 0.0468, 0.0611], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:45:03,875 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:16,112 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:19,447 INFO [train.py:901] (0/4) Epoch 9, batch 6350, loss[loss=0.2224, simple_loss=0.2983, pruned_loss=0.07329, over 7796.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3151, pruned_loss=0.08429, over 1614493.59 frames. ], batch size: 19, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:21,635 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:23,674 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5022, 1.9770, 3.3452, 1.2913, 2.4474, 1.9298, 1.6450, 2.2803], + device='cuda:0'), covar=tensor([0.1737, 0.2141, 0.0626, 0.3774, 0.1500, 0.2677, 0.1633, 0.2038], + device='cuda:0'), in_proj_covar=tensor([0.0486, 0.0501, 0.0528, 0.0573, 0.0612, 0.0547, 0.0465, 0.0609], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:45:38,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.814e+02 3.293e+02 4.210e+02 8.338e+02, threshold=6.585e+02, percent-clipped=5.0 +2023-02-06 07:45:42,976 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:54,197 INFO [train.py:901] (0/4) Epoch 9, batch 6400, loss[loss=0.2686, simple_loss=0.3501, pruned_loss=0.09354, over 8102.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3161, pruned_loss=0.08454, over 1616068.69 frames. ], batch size: 23, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:46:28,849 INFO [train.py:901] (0/4) Epoch 9, batch 6450, loss[loss=0.22, simple_loss=0.3061, pruned_loss=0.06698, over 8369.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3154, pruned_loss=0.08452, over 1615052.82 frames. ], batch size: 24, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:46:48,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.655e+02 3.350e+02 4.272e+02 1.011e+03, threshold=6.701e+02, percent-clipped=3.0 +2023-02-06 07:47:03,607 INFO [train.py:901] (0/4) Epoch 9, batch 6500, loss[loss=0.2043, simple_loss=0.2774, pruned_loss=0.06557, over 7227.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3158, pruned_loss=0.08503, over 1615597.59 frames. ], batch size: 16, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:03,824 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4853, 1.4820, 1.7420, 1.4427, 0.9241, 1.7519, 0.0674, 1.1975], + device='cuda:0'), covar=tensor([0.2662, 0.2096, 0.0655, 0.1917, 0.5267, 0.0782, 0.4036, 0.1825], + device='cuda:0'), in_proj_covar=tensor([0.0157, 0.0158, 0.0094, 0.0208, 0.0242, 0.0096, 0.0158, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 07:47:37,714 INFO [train.py:901] (0/4) Epoch 9, batch 6550, loss[loss=0.1999, simple_loss=0.2703, pruned_loss=0.06476, over 7546.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3146, pruned_loss=0.08466, over 1613653.06 frames. ], batch size: 18, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:40,259 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 07:47:50,022 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 07:47:52,260 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5426, 1.9434, 1.9860, 1.0032, 2.2309, 1.3500, 0.5332, 1.7239], + device='cuda:0'), covar=tensor([0.0375, 0.0176, 0.0151, 0.0332, 0.0185, 0.0502, 0.0477, 0.0163], + device='cuda:0'), in_proj_covar=tensor([0.0375, 0.0302, 0.0246, 0.0358, 0.0287, 0.0448, 0.0340, 0.0324], + device='cuda:0'), out_proj_covar=tensor([1.1096e-04, 8.6896e-05, 7.1166e-05, 1.0374e-04, 8.4438e-05, 1.4164e-04, + 1.0073e-04, 9.5381e-05], device='cuda:0') +2023-02-06 07:47:58,021 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.500e+02 3.444e+02 4.178e+02 7.414e+02, threshold=6.887e+02, percent-clipped=1.0 +2023-02-06 07:48:10,529 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:48:13,163 INFO [train.py:901] (0/4) Epoch 9, batch 6600, loss[loss=0.2664, simple_loss=0.3487, pruned_loss=0.09208, over 8358.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3153, pruned_loss=0.0852, over 1612155.62 frames. ], batch size: 24, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:48:47,048 INFO [train.py:901] (0/4) Epoch 9, batch 6650, loss[loss=0.2946, simple_loss=0.3518, pruned_loss=0.1186, over 7055.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3173, pruned_loss=0.08602, over 1616522.34 frames. ], batch size: 74, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:48:51,522 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 07:48:55,997 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6251, 3.7215, 2.2576, 2.3489, 2.6466, 1.9517, 2.2921, 2.8301], + device='cuda:0'), covar=tensor([0.1676, 0.0310, 0.0990, 0.0841, 0.0714, 0.1302, 0.1154, 0.0996], + device='cuda:0'), in_proj_covar=tensor([0.0340, 0.0235, 0.0307, 0.0295, 0.0302, 0.0315, 0.0334, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 07:49:05,724 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.641e+02 3.214e+02 4.234e+02 1.005e+03, threshold=6.427e+02, percent-clipped=4.0 +2023-02-06 07:49:11,620 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.62 vs. limit=5.0 +2023-02-06 07:49:13,927 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:21,682 INFO [train.py:901] (0/4) Epoch 9, batch 6700, loss[loss=0.2468, simple_loss=0.3297, pruned_loss=0.0819, over 8331.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3173, pruned_loss=0.08661, over 1613915.69 frames. ], batch size: 25, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:41,045 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:56,536 INFO [train.py:901] (0/4) Epoch 9, batch 6750, loss[loss=0.2643, simple_loss=0.3428, pruned_loss=0.09296, over 8289.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3178, pruned_loss=0.08648, over 1616246.31 frames. ], batch size: 23, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:50:15,374 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 3.032e+02 3.821e+02 4.704e+02 1.129e+03, threshold=7.641e+02, percent-clipped=7.0 +2023-02-06 07:50:23,400 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 07:50:30,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3838, 1.5460, 2.3067, 1.2301, 1.6486, 1.6404, 1.4468, 1.5461], + device='cuda:0'), covar=tensor([0.1733, 0.2015, 0.0720, 0.3469, 0.1372, 0.2851, 0.1732, 0.1762], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0500, 0.0529, 0.0575, 0.0611, 0.0548, 0.0469, 0.0612], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:50:30,842 INFO [train.py:901] (0/4) Epoch 9, batch 6800, loss[loss=0.2554, simple_loss=0.3156, pruned_loss=0.09756, over 7975.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3181, pruned_loss=0.08622, over 1618886.61 frames. ], batch size: 21, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:50:33,613 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:01,251 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:06,469 INFO [train.py:901] (0/4) Epoch 9, batch 6850, loss[loss=0.2673, simple_loss=0.3221, pruned_loss=0.1063, over 5979.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3184, pruned_loss=0.08678, over 1612713.48 frames. ], batch size: 13, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:14,498 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 07:51:25,245 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.628e+02 3.217e+02 4.054e+02 6.964e+02, threshold=6.433e+02, percent-clipped=0.0 +2023-02-06 07:51:40,045 INFO [train.py:901] (0/4) Epoch 9, batch 6900, loss[loss=0.3051, simple_loss=0.3704, pruned_loss=0.1199, over 8284.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3173, pruned_loss=0.086, over 1610925.36 frames. ], batch size: 23, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:48,539 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4563, 1.8124, 4.3527, 1.6589, 2.4184, 4.8397, 4.9014, 4.0268], + device='cuda:0'), covar=tensor([0.1056, 0.1519, 0.0308, 0.2093, 0.1058, 0.0261, 0.0365, 0.0801], + device='cuda:0'), in_proj_covar=tensor([0.0253, 0.0282, 0.0249, 0.0278, 0.0261, 0.0226, 0.0305, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 07:52:15,385 INFO [train.py:901] (0/4) Epoch 9, batch 6950, loss[loss=0.2202, simple_loss=0.3028, pruned_loss=0.06883, over 7973.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3173, pruned_loss=0.08564, over 1612682.23 frames. ], batch size: 21, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:23,477 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 07:52:29,643 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:52:35,516 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.622e+02 3.284e+02 3.978e+02 8.428e+02, threshold=6.567e+02, percent-clipped=2.0 +2023-02-06 07:52:50,437 INFO [train.py:901] (0/4) Epoch 9, batch 7000, loss[loss=0.2299, simple_loss=0.3112, pruned_loss=0.0743, over 8244.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3175, pruned_loss=0.08549, over 1616435.26 frames. ], batch size: 24, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:19,913 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 07:53:21,190 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-06 07:53:24,881 INFO [train.py:901] (0/4) Epoch 9, batch 7050, loss[loss=0.202, simple_loss=0.2841, pruned_loss=0.05993, over 8454.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3167, pruned_loss=0.08522, over 1610703.65 frames. ], batch size: 25, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:32,430 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71725.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:45,154 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.886e+02 3.338e+02 4.007e+02 6.250e+02, threshold=6.676e+02, percent-clipped=0.0 +2023-02-06 07:53:48,776 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6024, 1.6086, 5.7420, 2.0466, 5.0034, 4.7661, 5.3253, 5.0939], + device='cuda:0'), covar=tensor([0.0482, 0.4306, 0.0362, 0.3315, 0.1066, 0.0813, 0.0451, 0.0521], + device='cuda:0'), in_proj_covar=tensor([0.0447, 0.0552, 0.0553, 0.0513, 0.0582, 0.0495, 0.0485, 0.0545], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 07:53:50,707 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:59,278 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:00,447 INFO [train.py:901] (0/4) Epoch 9, batch 7100, loss[loss=0.2577, simple_loss=0.3235, pruned_loss=0.096, over 8249.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3177, pruned_loss=0.08588, over 1613137.30 frames. ], batch size: 22, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:54:08,147 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 07:54:16,148 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:34,522 INFO [train.py:901] (0/4) Epoch 9, batch 7150, loss[loss=0.25, simple_loss=0.3258, pruned_loss=0.08712, over 8581.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3168, pruned_loss=0.08502, over 1616801.17 frames. ], batch size: 31, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:54:54,742 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.560e+02 3.246e+02 4.043e+02 1.359e+03, threshold=6.493e+02, percent-clipped=7.0 +2023-02-06 07:55:10,753 INFO [train.py:901] (0/4) Epoch 9, batch 7200, loss[loss=0.2455, simple_loss=0.305, pruned_loss=0.093, over 7420.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3168, pruned_loss=0.08557, over 1617453.73 frames. ], batch size: 17, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:37,711 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 07:55:40,058 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1528, 2.9966, 3.4519, 2.3758, 1.8314, 3.6014, 0.8119, 2.1562], + device='cuda:0'), covar=tensor([0.1663, 0.1340, 0.0469, 0.2308, 0.4445, 0.0290, 0.4069, 0.2319], + device='cuda:0'), in_proj_covar=tensor([0.0157, 0.0157, 0.0092, 0.0205, 0.0244, 0.0095, 0.0154, 0.0155], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 07:55:41,108 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.24 vs. limit=5.0 +2023-02-06 07:55:43,960 INFO [train.py:901] (0/4) Epoch 9, batch 7250, loss[loss=0.2405, simple_loss=0.3243, pruned_loss=0.0783, over 8505.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3184, pruned_loss=0.08655, over 1619719.01 frames. ], batch size: 26, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:58,950 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:02,879 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.597e+02 3.277e+02 3.984e+02 9.565e+02, threshold=6.554e+02, percent-clipped=6.0 +2023-02-06 07:56:19,573 INFO [train.py:901] (0/4) Epoch 9, batch 7300, loss[loss=0.2281, simple_loss=0.2977, pruned_loss=0.07926, over 7425.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3184, pruned_loss=0.08667, over 1619153.76 frames. ], batch size: 17, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:56:28,819 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:29,624 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2907, 2.6748, 2.1902, 4.0109, 1.8795, 1.9164, 2.1447, 3.0277], + device='cuda:0'), covar=tensor([0.0777, 0.1027, 0.1034, 0.0206, 0.1112, 0.1411, 0.1258, 0.0874], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0218, 0.0258, 0.0215, 0.0219, 0.0257, 0.0260, 0.0224], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 07:56:33,716 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3635, 2.0296, 3.3938, 1.2217, 2.4996, 1.7967, 1.5411, 2.2940], + device='cuda:0'), covar=tensor([0.1741, 0.2041, 0.0660, 0.3753, 0.1421, 0.2766, 0.1765, 0.2062], + device='cuda:0'), in_proj_covar=tensor([0.0480, 0.0493, 0.0527, 0.0569, 0.0605, 0.0541, 0.0462, 0.0606], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:56:43,618 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-72000.pt +2023-02-06 07:56:54,481 INFO [train.py:901] (0/4) Epoch 9, batch 7350, loss[loss=0.2016, simple_loss=0.279, pruned_loss=0.0621, over 7810.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3186, pruned_loss=0.08647, over 1621181.52 frames. ], batch size: 20, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:02,499 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 07:57:04,268 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-06 07:57:13,532 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.925e+02 3.749e+02 4.804e+02 1.068e+03, threshold=7.499e+02, percent-clipped=9.0 +2023-02-06 07:57:22,475 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 07:57:30,037 INFO [train.py:901] (0/4) Epoch 9, batch 7400, loss[loss=0.212, simple_loss=0.2763, pruned_loss=0.07387, over 7820.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3181, pruned_loss=0.08617, over 1621151.78 frames. ], batch size: 19, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:40,384 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6149, 2.3329, 4.6426, 1.4391, 3.0009, 2.1066, 1.7380, 2.8183], + device='cuda:0'), covar=tensor([0.1617, 0.2007, 0.0520, 0.3625, 0.1504, 0.2659, 0.1677, 0.2142], + device='cuda:0'), in_proj_covar=tensor([0.0481, 0.0494, 0.0530, 0.0569, 0.0608, 0.0539, 0.0463, 0.0609], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 07:57:50,406 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:03,889 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 07:58:05,139 INFO [train.py:901] (0/4) Epoch 9, batch 7450, loss[loss=0.2271, simple_loss=0.2887, pruned_loss=0.08274, over 7932.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3189, pruned_loss=0.08608, over 1622838.41 frames. ], batch size: 20, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:58:23,972 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.410e+02 3.229e+02 3.860e+02 9.903e+02, threshold=6.459e+02, percent-clipped=1.0 +2023-02-06 07:58:26,821 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:38,700 INFO [train.py:901] (0/4) Epoch 9, batch 7500, loss[loss=0.2336, simple_loss=0.3052, pruned_loss=0.081, over 8092.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3182, pruned_loss=0.08605, over 1620170.80 frames. ], batch size: 21, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:09,166 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1661, 2.6736, 3.1297, 1.1334, 3.1942, 1.9941, 1.5567, 2.0761], + device='cuda:0'), covar=tensor([0.0602, 0.0239, 0.0255, 0.0523, 0.0334, 0.0513, 0.0582, 0.0309], + device='cuda:0'), in_proj_covar=tensor([0.0381, 0.0311, 0.0257, 0.0371, 0.0298, 0.0456, 0.0349, 0.0333], + device='cuda:0'), out_proj_covar=tensor([1.1208e-04, 8.9315e-05, 7.4195e-05, 1.0741e-04, 8.7533e-05, 1.4361e-04, + 1.0318e-04, 9.7734e-05], device='cuda:0') +2023-02-06 07:59:15,018 INFO [train.py:901] (0/4) Epoch 9, batch 7550, loss[loss=0.239, simple_loss=0.3204, pruned_loss=0.0788, over 8326.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3183, pruned_loss=0.08666, over 1621467.65 frames. ], batch size: 25, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:33,687 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.819e+02 3.433e+02 4.309e+02 8.597e+02, threshold=6.865e+02, percent-clipped=4.0 +2023-02-06 07:59:48,148 INFO [train.py:901] (0/4) Epoch 9, batch 7600, loss[loss=0.2761, simple_loss=0.3429, pruned_loss=0.1046, over 8457.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.3193, pruned_loss=0.08774, over 1617537.14 frames. ], batch size: 27, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:58,686 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:22,658 INFO [train.py:901] (0/4) Epoch 9, batch 7650, loss[loss=0.2655, simple_loss=0.321, pruned_loss=0.105, over 7939.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3178, pruned_loss=0.08638, over 1616534.80 frames. ], batch size: 20, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:00:42,776 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.706e+02 3.178e+02 3.983e+02 6.818e+02, threshold=6.357e+02, percent-clipped=0.0 +2023-02-06 08:00:43,581 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:47,004 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:53,771 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3595, 2.6877, 1.8655, 2.2245, 2.1199, 1.4903, 1.9292, 2.0076], + device='cuda:0'), covar=tensor([0.1287, 0.0311, 0.0868, 0.0522, 0.0593, 0.1266, 0.0842, 0.0813], + device='cuda:0'), in_proj_covar=tensor([0.0342, 0.0237, 0.0312, 0.0298, 0.0303, 0.0322, 0.0336, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:00:57,577 INFO [train.py:901] (0/4) Epoch 9, batch 7700, loss[loss=0.208, simple_loss=0.2948, pruned_loss=0.06061, over 8326.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3175, pruned_loss=0.08638, over 1612416.20 frames. ], batch size: 25, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:03,906 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:09,788 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 08:01:18,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:22,917 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3041, 1.5307, 1.5171, 1.3603, 0.9247, 1.3781, 1.8296, 1.4890], + device='cuda:0'), covar=tensor([0.0502, 0.1238, 0.1872, 0.1455, 0.0615, 0.1573, 0.0685, 0.0629], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0157, 0.0198, 0.0162, 0.0108, 0.0168, 0.0119, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:01:32,219 INFO [train.py:901] (0/4) Epoch 9, batch 7750, loss[loss=0.2488, simple_loss=0.3189, pruned_loss=0.08935, over 7808.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.318, pruned_loss=0.08626, over 1610801.46 frames. ], batch size: 20, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:53,040 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.670e+02 3.267e+02 4.054e+02 1.108e+03, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 08:01:57,608 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3529, 1.7971, 2.7240, 1.1300, 1.9736, 1.6357, 1.5331, 1.6791], + device='cuda:0'), covar=tensor([0.1860, 0.2284, 0.0823, 0.4283, 0.1672, 0.3120, 0.1900, 0.2369], + device='cuda:0'), in_proj_covar=tensor([0.0478, 0.0495, 0.0528, 0.0571, 0.0607, 0.0537, 0.0463, 0.0608], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:02:08,200 INFO [train.py:901] (0/4) Epoch 9, batch 7800, loss[loss=0.2125, simple_loss=0.2968, pruned_loss=0.06408, over 8248.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3166, pruned_loss=0.08569, over 1606766.93 frames. ], batch size: 24, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:02:10,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6719, 2.0984, 3.2146, 2.4951, 2.8416, 2.3157, 1.9422, 1.5821], + device='cuda:0'), covar=tensor([0.2818, 0.3398, 0.0896, 0.2032, 0.1520, 0.1623, 0.1334, 0.3397], + device='cuda:0'), in_proj_covar=tensor([0.0851, 0.0818, 0.0692, 0.0805, 0.0892, 0.0754, 0.0681, 0.0736], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 08:02:25,495 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:02:41,269 INFO [train.py:901] (0/4) Epoch 9, batch 7850, loss[loss=0.2454, simple_loss=0.3281, pruned_loss=0.08133, over 8471.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3159, pruned_loss=0.08495, over 1604306.44 frames. ], batch size: 25, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:02:59,530 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.530e+02 3.201e+02 3.890e+02 8.475e+02, threshold=6.403e+02, percent-clipped=6.0 +2023-02-06 08:03:14,032 INFO [train.py:901] (0/4) Epoch 9, batch 7900, loss[loss=0.2389, simple_loss=0.294, pruned_loss=0.09185, over 7695.00 frames. ], tot_loss[loss=0.244, simple_loss=0.317, pruned_loss=0.08547, over 1609492.81 frames. ], batch size: 18, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:03:38,853 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0710, 3.7311, 2.1716, 2.9406, 2.6744, 1.5722, 2.6203, 2.8944], + device='cuda:0'), covar=tensor([0.1547, 0.0262, 0.1124, 0.0620, 0.0750, 0.1582, 0.1018, 0.1062], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0233, 0.0309, 0.0294, 0.0299, 0.0318, 0.0332, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:03:41,420 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:03:47,073 INFO [train.py:901] (0/4) Epoch 9, batch 7950, loss[loss=0.2573, simple_loss=0.3292, pruned_loss=0.09265, over 8649.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3188, pruned_loss=0.08624, over 1613596.41 frames. ], batch size: 39, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:04:05,356 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.498e+02 3.176e+02 4.184e+02 8.861e+02, threshold=6.353e+02, percent-clipped=6.0 +2023-02-06 08:04:11,439 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:19,766 INFO [train.py:901] (0/4) Epoch 9, batch 8000, loss[loss=0.1991, simple_loss=0.2709, pruned_loss=0.06367, over 7789.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3191, pruned_loss=0.08649, over 1608958.08 frames. ], batch size: 19, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:04:27,787 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:28,664 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 08:04:34,984 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:52,985 INFO [train.py:901] (0/4) Epoch 9, batch 8050, loss[loss=0.2209, simple_loss=0.2872, pruned_loss=0.07728, over 7553.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3156, pruned_loss=0.08579, over 1586000.70 frames. ], batch size: 18, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:05:11,531 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.635e+02 3.102e+02 3.711e+02 7.462e+02, threshold=6.205e+02, percent-clipped=1.0 +2023-02-06 08:05:15,393 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-9.pt +2023-02-06 08:05:27,546 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 08:05:31,306 INFO [train.py:901] (0/4) Epoch 10, batch 0, loss[loss=0.267, simple_loss=0.3297, pruned_loss=0.1021, over 8114.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3297, pruned_loss=0.1021, over 8114.00 frames. ], batch size: 23, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:05:31,306 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 08:05:43,263 INFO [train.py:935] (0/4) Epoch 10, validation: loss=0.1954, simple_loss=0.295, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 08:05:43,264 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 08:05:57,152 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 08:06:17,969 INFO [train.py:901] (0/4) Epoch 10, batch 50, loss[loss=0.2856, simple_loss=0.3468, pruned_loss=0.1122, over 8148.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3185, pruned_loss=0.0874, over 363884.40 frames. ], batch size: 22, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:06:21,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:06:31,243 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 08:06:42,350 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2497, 1.4466, 1.5906, 1.4193, 1.0854, 1.4058, 1.7389, 1.4886], + device='cuda:0'), covar=tensor([0.0517, 0.1274, 0.1762, 0.1444, 0.0601, 0.1537, 0.0710, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0106, 0.0157, 0.0197, 0.0162, 0.0107, 0.0168, 0.0121, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:06:49,409 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.716e+02 3.124e+02 3.887e+02 7.160e+02, threshold=6.248e+02, percent-clipped=5.0 +2023-02-06 08:06:52,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 08:06:52,968 INFO [train.py:901] (0/4) Epoch 10, batch 100, loss[loss=0.2959, simple_loss=0.3595, pruned_loss=0.1162, over 8547.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3175, pruned_loss=0.08574, over 642401.97 frames. ], batch size: 31, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:07:03,832 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:22,329 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:22,976 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3217, 1.3423, 1.6069, 1.3879, 1.0816, 1.3051, 1.7949, 1.6326], + device='cuda:0'), covar=tensor([0.0499, 0.1342, 0.1781, 0.1455, 0.0596, 0.1623, 0.0683, 0.0611], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0156, 0.0196, 0.0161, 0.0107, 0.0167, 0.0120, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:07:30,284 INFO [train.py:901] (0/4) Epoch 10, batch 150, loss[loss=0.2551, simple_loss=0.3259, pruned_loss=0.09217, over 8509.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3189, pruned_loss=0.08648, over 862262.50 frames. ], batch size: 29, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:07:33,248 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4015, 1.9407, 3.2353, 1.1513, 2.2423, 1.8160, 1.5442, 2.1123], + device='cuda:0'), covar=tensor([0.1832, 0.2040, 0.0674, 0.4154, 0.1694, 0.2903, 0.1866, 0.2360], + device='cuda:0'), in_proj_covar=tensor([0.0485, 0.0498, 0.0540, 0.0575, 0.0615, 0.0544, 0.0470, 0.0616], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:0') +2023-02-06 08:07:40,087 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:08:01,162 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.670e+02 3.307e+02 4.288e+02 9.841e+02, threshold=6.614e+02, percent-clipped=3.0 +2023-02-06 08:08:04,566 INFO [train.py:901] (0/4) Epoch 10, batch 200, loss[loss=0.2025, simple_loss=0.2916, pruned_loss=0.05666, over 7935.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.318, pruned_loss=0.08487, over 1028845.18 frames. ], batch size: 20, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:10,995 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5799, 3.0874, 2.7202, 4.3080, 1.9346, 1.9871, 2.3374, 3.3867], + device='cuda:0'), covar=tensor([0.0800, 0.0843, 0.0908, 0.0208, 0.1151, 0.1521, 0.1233, 0.0758], + device='cuda:0'), in_proj_covar=tensor([0.0243, 0.0220, 0.0261, 0.0218, 0.0223, 0.0257, 0.0263, 0.0225], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 08:08:29,449 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72982.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:08:41,028 INFO [train.py:901] (0/4) Epoch 10, batch 250, loss[loss=0.2711, simple_loss=0.3403, pruned_loss=0.101, over 8579.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3177, pruned_loss=0.08465, over 1163516.27 frames. ], batch size: 31, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:47,852 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 08:08:56,879 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 08:09:12,574 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.688e+02 3.158e+02 3.760e+02 5.735e+02, threshold=6.316e+02, percent-clipped=0.0 +2023-02-06 08:09:16,056 INFO [train.py:901] (0/4) Epoch 10, batch 300, loss[loss=0.3038, simple_loss=0.3592, pruned_loss=0.1242, over 7101.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3186, pruned_loss=0.085, over 1266150.07 frames. ], batch size: 72, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:09:23,794 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:40,906 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:51,622 INFO [train.py:901] (0/4) Epoch 10, batch 350, loss[loss=0.2292, simple_loss=0.3194, pruned_loss=0.06953, over 8323.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3174, pruned_loss=0.08449, over 1343855.71 frames. ], batch size: 26, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:10:07,137 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3686, 1.1383, 4.5366, 1.7182, 4.0109, 3.7976, 4.0634, 3.9117], + device='cuda:0'), covar=tensor([0.0427, 0.4348, 0.0388, 0.3182, 0.0992, 0.0747, 0.0471, 0.0584], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0558, 0.0554, 0.0517, 0.0587, 0.0501, 0.0489, 0.0552], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 08:10:23,510 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 2.632e+02 3.058e+02 3.924e+02 7.931e+02, threshold=6.116e+02, percent-clipped=5.0 +2023-02-06 08:10:26,925 INFO [train.py:901] (0/4) Epoch 10, batch 400, loss[loss=0.2338, simple_loss=0.3103, pruned_loss=0.07868, over 7646.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3164, pruned_loss=0.08435, over 1403413.74 frames. ], batch size: 19, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:01,346 INFO [train.py:901] (0/4) Epoch 10, batch 450, loss[loss=0.263, simple_loss=0.3342, pruned_loss=0.09588, over 8601.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3182, pruned_loss=0.08584, over 1452681.27 frames. ], batch size: 31, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:33,882 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.629e+02 3.140e+02 3.877e+02 8.143e+02, threshold=6.279e+02, percent-clipped=4.0 +2023-02-06 08:11:37,159 INFO [train.py:901] (0/4) Epoch 10, batch 500, loss[loss=0.2526, simple_loss=0.3191, pruned_loss=0.09307, over 7149.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3183, pruned_loss=0.08606, over 1488469.40 frames. ], batch size: 71, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:42,539 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:10,506 INFO [train.py:901] (0/4) Epoch 10, batch 550, loss[loss=0.2661, simple_loss=0.3289, pruned_loss=0.1016, over 8368.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3167, pruned_loss=0.08559, over 1511036.56 frames. ], batch size: 24, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:19,349 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:21,520 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 08:12:29,143 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73326.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:12:41,607 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.475e+02 3.100e+02 3.629e+02 1.040e+03, threshold=6.201e+02, percent-clipped=3.0 +2023-02-06 08:12:44,825 INFO [train.py:901] (0/4) Epoch 10, batch 600, loss[loss=0.2561, simple_loss=0.3241, pruned_loss=0.094, over 8510.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3166, pruned_loss=0.08569, over 1534006.27 frames. ], batch size: 26, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:56,213 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 08:13:01,733 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:13:20,018 INFO [train.py:901] (0/4) Epoch 10, batch 650, loss[loss=0.2072, simple_loss=0.2815, pruned_loss=0.06647, over 7672.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3169, pruned_loss=0.08603, over 1550792.13 frames. ], batch size: 18, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:13:50,101 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73441.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:13:51,168 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.848e+02 2.469e+02 3.040e+02 3.840e+02 6.530e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 08:13:54,595 INFO [train.py:901] (0/4) Epoch 10, batch 700, loss[loss=0.3072, simple_loss=0.361, pruned_loss=0.1267, over 6957.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.317, pruned_loss=0.0859, over 1558883.23 frames. ], batch size: 71, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:13:55,439 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4674, 1.4346, 4.4460, 1.6711, 2.4417, 5.2171, 4.9936, 4.4690], + device='cuda:0'), covar=tensor([0.1062, 0.1787, 0.0271, 0.2004, 0.1009, 0.0158, 0.0348, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0254, 0.0289, 0.0253, 0.0282, 0.0264, 0.0230, 0.0313, 0.0290], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:14:31,488 INFO [train.py:901] (0/4) Epoch 10, batch 750, loss[loss=0.2349, simple_loss=0.3147, pruned_loss=0.0775, over 8578.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3164, pruned_loss=0.08542, over 1570722.66 frames. ], batch size: 49, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:33,783 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 08:14:45,806 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 08:14:54,768 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 08:15:02,255 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.756e+02 3.307e+02 3.958e+02 8.111e+02, threshold=6.615e+02, percent-clipped=6.0 +2023-02-06 08:15:02,474 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1591, 1.9785, 2.1577, 2.0899, 1.2063, 2.0775, 2.3214, 2.4121], + device='cuda:0'), covar=tensor([0.0421, 0.1101, 0.1577, 0.1182, 0.0576, 0.1258, 0.0608, 0.0491], + device='cuda:0'), in_proj_covar=tensor([0.0105, 0.0158, 0.0198, 0.0162, 0.0108, 0.0167, 0.0120, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 08:15:05,724 INFO [train.py:901] (0/4) Epoch 10, batch 800, loss[loss=0.2589, simple_loss=0.3315, pruned_loss=0.09318, over 8099.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3159, pruned_loss=0.08542, over 1580647.24 frames. ], batch size: 23, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:41,996 INFO [train.py:901] (0/4) Epoch 10, batch 850, loss[loss=0.2244, simple_loss=0.308, pruned_loss=0.07043, over 8190.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3155, pruned_loss=0.08504, over 1587988.47 frames. ], batch size: 23, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:45,293 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 08:15:47,435 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 08:15:49,897 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:02,993 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73627.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:13,772 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.847e+02 3.470e+02 4.482e+02 1.720e+03, threshold=6.940e+02, percent-clipped=10.0 +2023-02-06 08:16:17,271 INFO [train.py:901] (0/4) Epoch 10, batch 900, loss[loss=0.2301, simple_loss=0.291, pruned_loss=0.08456, over 7425.00 frames. ], tot_loss[loss=0.243, simple_loss=0.316, pruned_loss=0.08505, over 1597020.60 frames. ], batch size: 17, lr: 7.83e-03, grad_scale: 16.0 +2023-02-06 08:16:20,245 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:22,229 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73655.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:31,260 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1459, 1.5826, 3.3738, 1.3153, 2.1840, 3.7129, 3.6624, 3.1245], + device='cuda:0'), covar=tensor([0.0952, 0.1394, 0.0335, 0.2144, 0.0968, 0.0217, 0.0480, 0.0617], + device='cuda:0'), in_proj_covar=tensor([0.0257, 0.0291, 0.0254, 0.0282, 0.0267, 0.0232, 0.0315, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:16:52,147 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73697.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:16:52,449 INFO [train.py:901] (0/4) Epoch 10, batch 950, loss[loss=0.4566, simple_loss=0.4671, pruned_loss=0.2231, over 7064.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3182, pruned_loss=0.08646, over 1607638.75 frames. ], batch size: 72, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:10,363 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73722.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:17:12,004 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 08:17:18,320 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 08:17:20,528 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2433, 2.7129, 3.1504, 1.1664, 3.0815, 2.0049, 1.5430, 1.8743], + device='cuda:0'), covar=tensor([0.0536, 0.0260, 0.0186, 0.0533, 0.0337, 0.0542, 0.0561, 0.0377], + device='cuda:0'), in_proj_covar=tensor([0.0371, 0.0305, 0.0256, 0.0360, 0.0288, 0.0454, 0.0340, 0.0332], + device='cuda:0'), out_proj_covar=tensor([1.0883e-04, 8.6973e-05, 7.3696e-05, 1.0355e-04, 8.3810e-05, 1.4272e-04, + 1.0006e-04, 9.7171e-05], device='cuda:0') +2023-02-06 08:17:24,917 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.751e+02 3.323e+02 4.211e+02 1.163e+03, threshold=6.645e+02, percent-clipped=9.0 +2023-02-06 08:17:27,463 INFO [train.py:901] (0/4) Epoch 10, batch 1000, loss[loss=0.2276, simple_loss=0.2974, pruned_loss=0.07886, over 8300.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3178, pruned_loss=0.0859, over 1613600.64 frames. ], batch size: 23, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:28,913 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:42,250 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:50,787 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 08:18:00,668 INFO [train.py:901] (0/4) Epoch 10, batch 1050, loss[loss=0.2637, simple_loss=0.3363, pruned_loss=0.09551, over 8238.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3188, pruned_loss=0.08662, over 1615294.77 frames. ], batch size: 24, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:18:01,394 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 08:18:34,181 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.634e+02 3.058e+02 3.903e+02 1.179e+03, threshold=6.116e+02, percent-clipped=2.0 +2023-02-06 08:18:36,803 INFO [train.py:901] (0/4) Epoch 10, batch 1100, loss[loss=0.2701, simple_loss=0.3412, pruned_loss=0.09952, over 8135.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.3178, pruned_loss=0.08569, over 1612802.32 frames. ], batch size: 22, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:09,829 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 08:19:10,509 INFO [train.py:901] (0/4) Epoch 10, batch 1150, loss[loss=0.2717, simple_loss=0.3554, pruned_loss=0.09401, over 8188.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.318, pruned_loss=0.08543, over 1617568.26 frames. ], batch size: 23, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:35,224 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9214, 1.5080, 1.6638, 1.2118, 0.9797, 1.3588, 1.5806, 1.4408], + device='cuda:0'), covar=tensor([0.0483, 0.1201, 0.1620, 0.1369, 0.0546, 0.1481, 0.0637, 0.0585], + device='cuda:0'), in_proj_covar=tensor([0.0104, 0.0157, 0.0195, 0.0161, 0.0107, 0.0167, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:19:42,441 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.356e+02 2.791e+02 3.726e+02 1.227e+03, threshold=5.583e+02, percent-clipped=4.0 +2023-02-06 08:19:43,299 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7351, 1.9078, 2.3717, 1.6558, 1.1874, 2.5587, 0.4732, 1.3137], + device='cuda:0'), covar=tensor([0.2572, 0.1852, 0.0500, 0.2247, 0.5107, 0.0415, 0.3892, 0.2316], + device='cuda:0'), in_proj_covar=tensor([0.0162, 0.0162, 0.0095, 0.0213, 0.0252, 0.0097, 0.0159, 0.0161], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:19:45,127 INFO [train.py:901] (0/4) Epoch 10, batch 1200, loss[loss=0.2455, simple_loss=0.3084, pruned_loss=0.09128, over 8091.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.317, pruned_loss=0.08465, over 1614826.01 frames. ], batch size: 21, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:48,551 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:20,054 INFO [train.py:901] (0/4) Epoch 10, batch 1250, loss[loss=0.2542, simple_loss=0.3369, pruned_loss=0.08572, over 8256.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3167, pruned_loss=0.0845, over 1615796.04 frames. ], batch size: 24, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:20:21,454 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-74000.pt +2023-02-06 08:20:39,875 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:46,794 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.58 vs. limit=5.0 +2023-02-06 08:20:51,572 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.519e+02 3.075e+02 3.983e+02 7.817e+02, threshold=6.150e+02, percent-clipped=4.0 +2023-02-06 08:20:54,943 INFO [train.py:901] (0/4) Epoch 10, batch 1300, loss[loss=0.2125, simple_loss=0.2831, pruned_loss=0.07097, over 7800.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3167, pruned_loss=0.08495, over 1618439.24 frames. ], batch size: 19, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:20:57,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:58,730 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 08:21:07,838 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:19,112 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:21,532 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-06 08:21:26,833 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:29,354 INFO [train.py:901] (0/4) Epoch 10, batch 1350, loss[loss=0.2568, simple_loss=0.3206, pruned_loss=0.09653, over 8091.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3204, pruned_loss=0.08706, over 1625716.79 frames. ], batch size: 21, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:21:59,543 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.773e+02 3.448e+02 4.052e+02 8.675e+02, threshold=6.895e+02, percent-clipped=5.0 +2023-02-06 08:22:02,259 INFO [train.py:901] (0/4) Epoch 10, batch 1400, loss[loss=0.2562, simple_loss=0.3204, pruned_loss=0.09601, over 7522.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3211, pruned_loss=0.08791, over 1624933.15 frames. ], batch size: 18, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:16,762 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 08:22:38,035 INFO [train.py:901] (0/4) Epoch 10, batch 1450, loss[loss=0.1913, simple_loss=0.2688, pruned_loss=0.05695, over 7802.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3187, pruned_loss=0.0864, over 1623161.63 frames. ], batch size: 19, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:41,671 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 08:22:45,869 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:01,273 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6033, 4.6147, 4.1190, 1.8530, 4.0756, 4.2837, 4.1993, 3.8249], + device='cuda:0'), covar=tensor([0.0572, 0.0484, 0.0892, 0.4715, 0.0756, 0.0688, 0.1192, 0.0751], + device='cuda:0'), in_proj_covar=tensor([0.0448, 0.0356, 0.0377, 0.0470, 0.0372, 0.0353, 0.0361, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:23:01,603 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 08:23:07,376 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9898, 1.5568, 3.0047, 1.2141, 2.1456, 3.3188, 3.3752, 2.7998], + device='cuda:0'), covar=tensor([0.0982, 0.1489, 0.0424, 0.2125, 0.0970, 0.0271, 0.0460, 0.0682], + device='cuda:0'), in_proj_covar=tensor([0.0256, 0.0293, 0.0254, 0.0284, 0.0269, 0.0232, 0.0318, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:23:09,187 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.838e+02 2.525e+02 3.045e+02 3.954e+02 1.310e+03, threshold=6.089e+02, percent-clipped=4.0 +2023-02-06 08:23:11,846 INFO [train.py:901] (0/4) Epoch 10, batch 1500, loss[loss=0.189, simple_loss=0.2643, pruned_loss=0.0569, over 7412.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3175, pruned_loss=0.08529, over 1620239.00 frames. ], batch size: 17, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:18,346 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:46,603 INFO [train.py:901] (0/4) Epoch 10, batch 1550, loss[loss=0.2255, simple_loss=0.3046, pruned_loss=0.07315, over 8101.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3173, pruned_loss=0.08507, over 1618445.84 frames. ], batch size: 23, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:47,771 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 08:24:05,673 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:19,877 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.540e+02 3.095e+02 3.981e+02 6.537e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 08:24:22,705 INFO [train.py:901] (0/4) Epoch 10, batch 1600, loss[loss=0.2142, simple_loss=0.2865, pruned_loss=0.07093, over 7787.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3173, pruned_loss=0.08506, over 1616956.92 frames. ], batch size: 19, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:24:22,901 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:56,947 INFO [train.py:901] (0/4) Epoch 10, batch 1650, loss[loss=0.239, simple_loss=0.3043, pruned_loss=0.08684, over 7526.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3171, pruned_loss=0.08514, over 1617053.47 frames. ], batch size: 18, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:18,074 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:25:30,269 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.508e+02 3.008e+02 3.971e+02 8.483e+02, threshold=6.016e+02, percent-clipped=6.0 +2023-02-06 08:25:31,723 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2994, 1.4725, 1.2343, 1.8688, 0.7497, 1.0048, 1.2470, 1.4034], + device='cuda:0'), covar=tensor([0.1028, 0.0935, 0.1301, 0.0603, 0.1330, 0.1925, 0.0991, 0.0912], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0216, 0.0261, 0.0217, 0.0221, 0.0255, 0.0261, 0.0222], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 08:25:32,856 INFO [train.py:901] (0/4) Epoch 10, batch 1700, loss[loss=0.2491, simple_loss=0.3267, pruned_loss=0.08576, over 8460.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3169, pruned_loss=0.08423, over 1620782.86 frames. ], batch size: 25, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:44,193 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:00,777 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:05,897 INFO [train.py:901] (0/4) Epoch 10, batch 1750, loss[loss=0.1857, simple_loss=0.2712, pruned_loss=0.05004, over 8241.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3178, pruned_loss=0.08525, over 1620091.83 frames. ], batch size: 22, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:26:36,848 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74541.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:38,772 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.804e+02 3.517e+02 4.698e+02 1.546e+03, threshold=7.034e+02, percent-clipped=7.0 +2023-02-06 08:26:41,525 INFO [train.py:901] (0/4) Epoch 10, batch 1800, loss[loss=0.2436, simple_loss=0.3209, pruned_loss=0.08319, over 8249.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3185, pruned_loss=0.08583, over 1622644.78 frames. ], batch size: 24, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:27:14,907 INFO [train.py:901] (0/4) Epoch 10, batch 1850, loss[loss=0.1986, simple_loss=0.2714, pruned_loss=0.06288, over 7436.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3181, pruned_loss=0.08602, over 1621981.83 frames. ], batch size: 17, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:27:17,782 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:25,264 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:46,970 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.251e+02 2.722e+02 3.219e+02 4.226e+02 1.097e+03, threshold=6.437e+02, percent-clipped=2.0 +2023-02-06 08:27:50,399 INFO [train.py:901] (0/4) Epoch 10, batch 1900, loss[loss=0.2499, simple_loss=0.316, pruned_loss=0.0919, over 8080.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3183, pruned_loss=0.0857, over 1620711.93 frames. ], batch size: 21, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:13,885 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 08:28:25,273 INFO [train.py:901] (0/4) Epoch 10, batch 1950, loss[loss=0.1955, simple_loss=0.2689, pruned_loss=0.06102, over 7419.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3172, pruned_loss=0.0848, over 1616851.44 frames. ], batch size: 17, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:25,967 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 08:28:38,213 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:28:41,921 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 08:28:43,969 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 08:28:56,745 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.465e+02 3.030e+02 3.717e+02 6.494e+02, threshold=6.060e+02, percent-clipped=3.0 +2023-02-06 08:28:59,513 INFO [train.py:901] (0/4) Epoch 10, batch 2000, loss[loss=0.238, simple_loss=0.3108, pruned_loss=0.08267, over 8255.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3181, pruned_loss=0.08495, over 1619357.99 frames. ], batch size: 22, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:29:34,146 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:34,597 INFO [train.py:901] (0/4) Epoch 10, batch 2050, loss[loss=0.2286, simple_loss=0.3067, pruned_loss=0.07529, over 8248.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3163, pruned_loss=0.08426, over 1619462.22 frames. ], batch size: 24, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:29:50,346 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:58,215 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3649, 1.2060, 1.5526, 1.2173, 0.7140, 1.3437, 1.2640, 1.0860], + device='cuda:0'), covar=tensor([0.0544, 0.1347, 0.1747, 0.1490, 0.0588, 0.1560, 0.0686, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0103, 0.0156, 0.0197, 0.0162, 0.0106, 0.0167, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:30:04,620 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.471e+02 3.084e+02 4.282e+02 1.276e+03, threshold=6.169e+02, percent-clipped=5.0 +2023-02-06 08:30:06,169 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9993, 1.6365, 1.3361, 1.5523, 1.3761, 1.1021, 1.1472, 1.3417], + device='cuda:0'), covar=tensor([0.0949, 0.0377, 0.1076, 0.0467, 0.0643, 0.1326, 0.0826, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0342, 0.0232, 0.0312, 0.0296, 0.0304, 0.0317, 0.0337, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:30:07,354 INFO [train.py:901] (0/4) Epoch 10, batch 2100, loss[loss=0.2267, simple_loss=0.309, pruned_loss=0.0722, over 8249.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3179, pruned_loss=0.08514, over 1617856.43 frames. ], batch size: 24, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:30:18,141 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6349, 1.5917, 1.9545, 1.5310, 0.9719, 2.0252, 0.1850, 1.1541], + device='cuda:0'), covar=tensor([0.2821, 0.2089, 0.0498, 0.2148, 0.4950, 0.0461, 0.3993, 0.2136], + device='cuda:0'), in_proj_covar=tensor([0.0162, 0.0163, 0.0092, 0.0213, 0.0254, 0.0097, 0.0161, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:30:21,942 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5722, 4.6294, 4.1065, 1.9835, 4.1288, 4.1166, 4.1924, 3.8483], + device='cuda:0'), covar=tensor([0.0801, 0.0539, 0.1081, 0.4912, 0.0853, 0.0870, 0.1134, 0.0802], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0358, 0.0379, 0.0465, 0.0367, 0.0352, 0.0361, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:30:24,010 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4444, 1.8296, 1.7953, 1.0579, 1.9432, 1.4277, 0.4250, 1.6220], + device='cuda:0'), covar=tensor([0.0307, 0.0171, 0.0197, 0.0315, 0.0238, 0.0515, 0.0495, 0.0142], + device='cuda:0'), in_proj_covar=tensor([0.0374, 0.0306, 0.0265, 0.0364, 0.0296, 0.0452, 0.0348, 0.0332], + device='cuda:0'), out_proj_covar=tensor([1.0950e-04, 8.7103e-05, 7.5990e-05, 1.0479e-04, 8.5882e-05, 1.4168e-04, + 1.0204e-04, 9.6580e-05], device='cuda:0') +2023-02-06 08:30:43,220 INFO [train.py:901] (0/4) Epoch 10, batch 2150, loss[loss=0.2625, simple_loss=0.3331, pruned_loss=0.0959, over 8196.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3169, pruned_loss=0.08483, over 1615901.20 frames. ], batch size: 23, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:02,633 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:13,920 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.626e+02 3.226e+02 3.775e+02 6.882e+02, threshold=6.451e+02, percent-clipped=1.0 +2023-02-06 08:31:16,706 INFO [train.py:901] (0/4) Epoch 10, batch 2200, loss[loss=0.2525, simple_loss=0.3264, pruned_loss=0.08927, over 8349.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3161, pruned_loss=0.08411, over 1613079.58 frames. ], batch size: 24, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:22,705 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:33,576 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:39,398 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:50,363 INFO [train.py:901] (0/4) Epoch 10, batch 2250, loss[loss=0.2276, simple_loss=0.3108, pruned_loss=0.07222, over 8135.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.316, pruned_loss=0.0842, over 1615525.85 frames. ], batch size: 22, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:31:50,574 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:21,307 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7321, 2.3687, 1.6421, 2.7001, 1.3929, 1.2706, 2.0924, 2.3122], + device='cuda:0'), covar=tensor([0.1002, 0.0780, 0.1426, 0.0469, 0.1168, 0.1851, 0.0915, 0.0763], + device='cuda:0'), in_proj_covar=tensor([0.0244, 0.0216, 0.0262, 0.0218, 0.0223, 0.0255, 0.0263, 0.0223], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 08:32:23,118 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.748e+02 3.468e+02 4.709e+02 1.048e+03, threshold=6.936e+02, percent-clipped=3.0 +2023-02-06 08:32:25,870 INFO [train.py:901] (0/4) Epoch 10, batch 2300, loss[loss=0.1994, simple_loss=0.2824, pruned_loss=0.05815, over 8249.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.317, pruned_loss=0.08514, over 1619217.36 frames. ], batch size: 24, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:32:42,300 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:59,565 INFO [train.py:901] (0/4) Epoch 10, batch 2350, loss[loss=0.2237, simple_loss=0.2893, pruned_loss=0.07908, over 7654.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3164, pruned_loss=0.08493, over 1615624.09 frames. ], batch size: 19, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:33:17,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2077, 4.2201, 3.8191, 1.7787, 3.7731, 3.7549, 3.8986, 3.4729], + device='cuda:0'), covar=tensor([0.0788, 0.0568, 0.0925, 0.4339, 0.0768, 0.0968, 0.1013, 0.0836], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0362, 0.0383, 0.0472, 0.0371, 0.0354, 0.0366, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:33:33,027 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.526e+02 3.215e+02 4.182e+02 1.054e+03, threshold=6.430e+02, percent-clipped=5.0 +2023-02-06 08:33:35,802 INFO [train.py:901] (0/4) Epoch 10, batch 2400, loss[loss=0.2257, simple_loss=0.3086, pruned_loss=0.07143, over 8104.00 frames. ], tot_loss[loss=0.243, simple_loss=0.3163, pruned_loss=0.08486, over 1617261.49 frames. ], batch size: 23, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:33:40,204 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 08:34:08,760 INFO [train.py:901] (0/4) Epoch 10, batch 2450, loss[loss=0.2258, simple_loss=0.2921, pruned_loss=0.07977, over 8243.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3164, pruned_loss=0.08534, over 1617670.86 frames. ], batch size: 22, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:34:26,434 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 08:34:40,829 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.613e+02 3.092e+02 4.227e+02 1.037e+03, threshold=6.184e+02, percent-clipped=5.0 +2023-02-06 08:34:43,537 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:34:44,707 INFO [train.py:901] (0/4) Epoch 10, batch 2500, loss[loss=0.235, simple_loss=0.3066, pruned_loss=0.08168, over 8325.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3166, pruned_loss=0.08552, over 1618669.00 frames. ], batch size: 25, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:00,228 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:11,669 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:18,083 INFO [train.py:901] (0/4) Epoch 10, batch 2550, loss[loss=0.1803, simple_loss=0.2543, pruned_loss=0.0531, over 7543.00 frames. ], tot_loss[loss=0.243, simple_loss=0.3165, pruned_loss=0.08476, over 1623007.91 frames. ], batch size: 18, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:36,578 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:38,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:49,134 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.664e+02 3.245e+02 3.791e+02 6.757e+02, threshold=6.490e+02, percent-clipped=2.0 +2023-02-06 08:35:51,836 INFO [train.py:901] (0/4) Epoch 10, batch 2600, loss[loss=0.2523, simple_loss=0.3229, pruned_loss=0.09083, over 8571.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3169, pruned_loss=0.08467, over 1627938.05 frames. ], batch size: 31, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:55,406 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:19,394 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:27,351 INFO [train.py:901] (0/4) Epoch 10, batch 2650, loss[loss=0.1974, simple_loss=0.2969, pruned_loss=0.04896, over 8472.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3162, pruned_loss=0.08463, over 1623415.07 frames. ], batch size: 25, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:36:52,096 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0358, 1.6530, 1.3355, 1.5725, 1.3245, 1.2056, 1.2616, 1.4238], + device='cuda:0'), covar=tensor([0.0935, 0.0385, 0.1042, 0.0477, 0.0693, 0.1251, 0.0744, 0.0684], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0235, 0.0313, 0.0299, 0.0305, 0.0319, 0.0340, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:36:55,407 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2668, 1.3674, 4.5849, 1.9016, 3.6290, 3.6519, 4.0914, 4.0742], + device='cuda:0'), covar=tensor([0.0961, 0.6342, 0.0922, 0.4342, 0.2237, 0.1420, 0.0991, 0.0999], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0560, 0.0566, 0.0516, 0.0593, 0.0506, 0.0494, 0.0555], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 08:36:56,747 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:58,552 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.723e+02 3.413e+02 4.384e+02 8.455e+02, threshold=6.827e+02, percent-clipped=3.0 +2023-02-06 08:37:01,350 INFO [train.py:901] (0/4) Epoch 10, batch 2700, loss[loss=0.2028, simple_loss=0.2871, pruned_loss=0.05925, over 8187.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3158, pruned_loss=0.08453, over 1622645.02 frames. ], batch size: 23, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:01,849 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 08:37:37,693 INFO [train.py:901] (0/4) Epoch 10, batch 2750, loss[loss=0.2727, simple_loss=0.3317, pruned_loss=0.1069, over 8500.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3146, pruned_loss=0.0841, over 1617162.26 frames. ], batch size: 39, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:38:01,209 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-06 08:38:08,180 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.609e+02 3.111e+02 3.957e+02 1.084e+03, threshold=6.223e+02, percent-clipped=3.0 +2023-02-06 08:38:10,729 INFO [train.py:901] (0/4) Epoch 10, batch 2800, loss[loss=0.2891, simple_loss=0.3646, pruned_loss=0.1068, over 8025.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3136, pruned_loss=0.08363, over 1610036.63 frames. ], batch size: 22, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:38:15,241 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 08:38:39,095 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:38:45,071 INFO [train.py:901] (0/4) Epoch 10, batch 2850, loss[loss=0.2244, simple_loss=0.3128, pruned_loss=0.06802, over 8485.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3132, pruned_loss=0.08329, over 1610868.66 frames. ], batch size: 29, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:38:52,328 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 08:39:06,714 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1995, 1.4678, 4.6335, 1.8909, 2.7654, 5.2370, 5.0747, 4.5398], + device='cuda:0'), covar=tensor([0.1111, 0.1749, 0.0222, 0.1861, 0.0850, 0.0147, 0.0307, 0.0556], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0293, 0.0253, 0.0284, 0.0268, 0.0233, 0.0319, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:39:09,360 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:09,745 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 08:39:16,271 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:17,406 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.670e+02 3.172e+02 3.749e+02 6.038e+02, threshold=6.343e+02, percent-clipped=0.0 +2023-02-06 08:39:20,068 INFO [train.py:901] (0/4) Epoch 10, batch 2900, loss[loss=0.2493, simple_loss=0.3205, pruned_loss=0.08911, over 8445.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3151, pruned_loss=0.08469, over 1612182.07 frames. ], batch size: 27, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:32,775 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:49,791 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 08:39:53,279 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:53,742 INFO [train.py:901] (0/4) Epoch 10, batch 2950, loss[loss=0.2576, simple_loss=0.3313, pruned_loss=0.09195, over 8678.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3156, pruned_loss=0.08487, over 1610866.32 frames. ], batch size: 39, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:39:58,746 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:10,317 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 08:40:11,383 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:26,680 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.480e+02 3.030e+02 3.596e+02 1.304e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-06 08:40:28,944 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:29,494 INFO [train.py:901] (0/4) Epoch 10, batch 3000, loss[loss=0.1964, simple_loss=0.2782, pruned_loss=0.05731, over 8089.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3165, pruned_loss=0.0853, over 1614631.82 frames. ], batch size: 21, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:40:29,494 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 08:40:41,876 INFO [train.py:935] (0/4) Epoch 10, validation: loss=0.1918, simple_loss=0.2916, pruned_loss=0.04599, over 944034.00 frames. +2023-02-06 08:40:41,877 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 08:41:15,537 INFO [train.py:901] (0/4) Epoch 10, batch 3050, loss[loss=0.227, simple_loss=0.308, pruned_loss=0.07302, over 8113.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3182, pruned_loss=0.08634, over 1619423.78 frames. ], batch size: 23, lr: 7.72e-03, grad_scale: 16.0 +2023-02-06 08:41:47,920 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.518e+02 3.138e+02 4.468e+02 1.006e+03, threshold=6.276e+02, percent-clipped=13.0 +2023-02-06 08:41:50,031 INFO [train.py:901] (0/4) Epoch 10, batch 3100, loss[loss=0.2132, simple_loss=0.2799, pruned_loss=0.07318, over 7701.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3186, pruned_loss=0.08625, over 1614848.46 frames. ], batch size: 18, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:19,305 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 08:42:25,560 INFO [train.py:901] (0/4) Epoch 10, batch 3150, loss[loss=0.2194, simple_loss=0.2898, pruned_loss=0.07457, over 7652.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3184, pruned_loss=0.08652, over 1609482.04 frames. ], batch size: 19, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:57,479 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.584e+02 3.323e+02 3.941e+02 8.938e+02, threshold=6.646e+02, percent-clipped=3.0 +2023-02-06 08:42:59,542 INFO [train.py:901] (0/4) Epoch 10, batch 3200, loss[loss=0.2554, simple_loss=0.3409, pruned_loss=0.08496, over 8515.00 frames. ], tot_loss[loss=0.244, simple_loss=0.317, pruned_loss=0.08551, over 1607415.25 frames. ], batch size: 31, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:43:09,332 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75961.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:28,328 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:36,373 INFO [train.py:901] (0/4) Epoch 10, batch 3250, loss[loss=0.3027, simple_loss=0.358, pruned_loss=0.1237, over 6761.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3164, pruned_loss=0.08534, over 1603131.85 frames. ], batch size: 71, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:43:37,556 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 08:43:37,964 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-76000.pt +2023-02-06 08:43:41,248 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:56,207 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-06 08:43:57,873 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:44:09,017 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.688e+02 3.300e+02 3.989e+02 9.835e+02, threshold=6.601e+02, percent-clipped=4.0 +2023-02-06 08:44:11,020 INFO [train.py:901] (0/4) Epoch 10, batch 3300, loss[loss=0.2918, simple_loss=0.3378, pruned_loss=0.1229, over 7656.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3178, pruned_loss=0.08624, over 1608549.95 frames. ], batch size: 19, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:47,579 INFO [train.py:901] (0/4) Epoch 10, batch 3350, loss[loss=0.1931, simple_loss=0.2666, pruned_loss=0.05979, over 7801.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3159, pruned_loss=0.08493, over 1605263.83 frames. ], batch size: 20, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:18,108 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7642, 1.4977, 2.7807, 1.2450, 2.0644, 2.9701, 3.0526, 2.5260], + device='cuda:0'), covar=tensor([0.1037, 0.1346, 0.0436, 0.2045, 0.0846, 0.0316, 0.0580, 0.0759], + device='cuda:0'), in_proj_covar=tensor([0.0256, 0.0293, 0.0253, 0.0284, 0.0268, 0.0234, 0.0319, 0.0289], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:45:18,633 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.652e+02 3.239e+02 4.192e+02 7.352e+02, threshold=6.477e+02, percent-clipped=1.0 +2023-02-06 08:45:20,670 INFO [train.py:901] (0/4) Epoch 10, batch 3400, loss[loss=0.2342, simple_loss=0.3115, pruned_loss=0.0785, over 8510.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3153, pruned_loss=0.08416, over 1610189.01 frames. ], batch size: 28, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:55,814 INFO [train.py:901] (0/4) Epoch 10, batch 3450, loss[loss=0.2336, simple_loss=0.3206, pruned_loss=0.07326, over 7975.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.316, pruned_loss=0.08444, over 1612827.34 frames. ], batch size: 21, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:30,137 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.394e+02 3.045e+02 3.881e+02 9.338e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 08:46:32,217 INFO [train.py:901] (0/4) Epoch 10, batch 3500, loss[loss=0.3005, simple_loss=0.3517, pruned_loss=0.1246, over 8514.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3167, pruned_loss=0.08526, over 1610108.39 frames. ], batch size: 49, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:39,255 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3184, 2.7399, 1.8458, 2.1991, 2.1405, 1.4637, 1.8573, 2.2475], + device='cuda:0'), covar=tensor([0.1367, 0.0326, 0.1074, 0.0580, 0.0659, 0.1415, 0.0990, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0234, 0.0314, 0.0299, 0.0308, 0.0325, 0.0340, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:46:48,050 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 08:46:51,371 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 08:46:59,271 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76287.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:47:06,736 INFO [train.py:901] (0/4) Epoch 10, batch 3550, loss[loss=0.2074, simple_loss=0.2674, pruned_loss=0.07372, over 7197.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3152, pruned_loss=0.08424, over 1607160.95 frames. ], batch size: 16, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:17,280 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:47:39,387 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76341.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:47:42,026 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 2.725e+02 3.470e+02 4.316e+02 7.747e+02, threshold=6.941e+02, percent-clipped=6.0 +2023-02-06 08:47:44,169 INFO [train.py:901] (0/4) Epoch 10, batch 3600, loss[loss=0.2271, simple_loss=0.2977, pruned_loss=0.07827, over 8073.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.314, pruned_loss=0.08368, over 1604911.80 frames. ], batch size: 21, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:53,341 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0259, 1.4327, 4.2893, 1.5867, 3.7221, 3.5056, 3.8377, 3.6990], + device='cuda:0'), covar=tensor([0.0640, 0.4414, 0.0500, 0.3433, 0.1249, 0.0901, 0.0590, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0453, 0.0568, 0.0573, 0.0524, 0.0603, 0.0512, 0.0499, 0.0564], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 08:48:18,377 INFO [train.py:901] (0/4) Epoch 10, batch 3650, loss[loss=0.2543, simple_loss=0.3281, pruned_loss=0.09022, over 8502.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3132, pruned_loss=0.08342, over 1602057.31 frames. ], batch size: 39, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:50,984 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.530e+02 3.057e+02 3.624e+02 8.995e+02, threshold=6.114e+02, percent-clipped=3.0 +2023-02-06 08:48:52,986 INFO [train.py:901] (0/4) Epoch 10, batch 3700, loss[loss=0.246, simple_loss=0.3272, pruned_loss=0.08241, over 8143.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3145, pruned_loss=0.08362, over 1609308.79 frames. ], batch size: 22, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:54,935 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 08:49:28,897 INFO [train.py:901] (0/4) Epoch 10, batch 3750, loss[loss=0.2882, simple_loss=0.3475, pruned_loss=0.1145, over 6933.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3162, pruned_loss=0.08457, over 1610426.70 frames. ], batch size: 71, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:00,300 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 2.765e+02 3.416e+02 4.278e+02 1.031e+03, threshold=6.832e+02, percent-clipped=4.0 +2023-02-06 08:50:02,995 INFO [train.py:901] (0/4) Epoch 10, batch 3800, loss[loss=0.2178, simple_loss=0.2946, pruned_loss=0.07051, over 7529.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3154, pruned_loss=0.0839, over 1610611.34 frames. ], batch size: 18, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:38,435 INFO [train.py:901] (0/4) Epoch 10, batch 3850, loss[loss=0.2287, simple_loss=0.3118, pruned_loss=0.07279, over 8448.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3149, pruned_loss=0.08398, over 1609506.45 frames. ], batch size: 27, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:50:59,075 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 08:51:00,498 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76631.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:51:09,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.563e+02 3.093e+02 4.191e+02 1.151e+03, threshold=6.187e+02, percent-clipped=5.0 +2023-02-06 08:51:11,980 INFO [train.py:901] (0/4) Epoch 10, batch 3900, loss[loss=0.2301, simple_loss=0.3178, pruned_loss=0.0712, over 8497.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3151, pruned_loss=0.08413, over 1611193.79 frames. ], batch size: 28, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:14,241 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5093, 1.7698, 2.8547, 1.3319, 1.9757, 1.8937, 1.5801, 1.7946], + device='cuda:0'), covar=tensor([0.1778, 0.2228, 0.0707, 0.3726, 0.1560, 0.2712, 0.1719, 0.2081], + device='cuda:0'), in_proj_covar=tensor([0.0482, 0.0502, 0.0529, 0.0565, 0.0605, 0.0544, 0.0463, 0.0601], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:51:17,431 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:51:38,048 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76685.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:51:48,068 INFO [train.py:901] (0/4) Epoch 10, batch 3950, loss[loss=0.2481, simple_loss=0.3258, pruned_loss=0.08522, over 7805.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3154, pruned_loss=0.08427, over 1612281.96 frames. ], batch size: 20, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:50,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6292, 1.3262, 1.5324, 1.1545, 0.8764, 1.2628, 1.4266, 1.2364], + device='cuda:0'), covar=tensor([0.0564, 0.1329, 0.1869, 0.1551, 0.0635, 0.1648, 0.0759, 0.0702], + device='cuda:0'), in_proj_covar=tensor([0.0103, 0.0157, 0.0197, 0.0162, 0.0106, 0.0166, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 08:52:19,565 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.430e+02 3.097e+02 3.693e+02 7.444e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-06 08:52:20,444 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76746.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:52:21,615 INFO [train.py:901] (0/4) Epoch 10, batch 4000, loss[loss=0.2019, simple_loss=0.2734, pruned_loss=0.06523, over 7536.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3138, pruned_loss=0.0834, over 1608746.61 frames. ], batch size: 18, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:32,357 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 08:52:36,775 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4580, 2.0158, 2.1120, 1.0555, 2.1802, 1.4201, 0.5337, 1.6678], + device='cuda:0'), covar=tensor([0.0423, 0.0204, 0.0152, 0.0372, 0.0242, 0.0644, 0.0562, 0.0189], + device='cuda:0'), in_proj_covar=tensor([0.0375, 0.0307, 0.0263, 0.0369, 0.0299, 0.0454, 0.0351, 0.0338], + device='cuda:0'), out_proj_covar=tensor([1.0894e-04, 8.7150e-05, 7.4858e-05, 1.0548e-04, 8.6795e-05, 1.4130e-04, + 1.0227e-04, 9.8215e-05], device='cuda:0') +2023-02-06 08:52:37,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:52:56,078 INFO [train.py:901] (0/4) Epoch 10, batch 4050, loss[loss=0.2335, simple_loss=0.3136, pruned_loss=0.07672, over 8641.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.314, pruned_loss=0.08307, over 1611832.62 frames. ], batch size: 34, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:52:57,688 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76800.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:53:29,318 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.637e+02 3.294e+02 4.061e+02 9.505e+02, threshold=6.587e+02, percent-clipped=7.0 +2023-02-06 08:53:31,234 INFO [train.py:901] (0/4) Epoch 10, batch 4100, loss[loss=0.2688, simple_loss=0.3356, pruned_loss=0.101, over 8016.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.314, pruned_loss=0.08304, over 1611391.57 frames. ], batch size: 22, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:53:37,249 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:54:04,759 INFO [train.py:901] (0/4) Epoch 10, batch 4150, loss[loss=0.251, simple_loss=0.3405, pruned_loss=0.08071, over 8487.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3135, pruned_loss=0.08294, over 1610909.83 frames. ], batch size: 28, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:38,768 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.505e+02 2.967e+02 3.617e+02 8.554e+02, threshold=5.933e+02, percent-clipped=2.0 +2023-02-06 08:54:40,851 INFO [train.py:901] (0/4) Epoch 10, batch 4200, loss[loss=0.2697, simple_loss=0.3487, pruned_loss=0.09538, over 8468.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3121, pruned_loss=0.08166, over 1611767.45 frames. ], batch size: 25, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:54,286 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5653, 3.5128, 3.2130, 2.3697, 3.1601, 3.1043, 3.3644, 2.8745], + device='cuda:0'), covar=tensor([0.0862, 0.0755, 0.0952, 0.3105, 0.0864, 0.1050, 0.1123, 0.1002], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0359, 0.0370, 0.0468, 0.0364, 0.0355, 0.0361, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:55:00,931 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 08:55:14,228 INFO [train.py:901] (0/4) Epoch 10, batch 4250, loss[loss=0.2363, simple_loss=0.313, pruned_loss=0.07983, over 8072.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3125, pruned_loss=0.08165, over 1614296.46 frames. ], batch size: 21, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:17,198 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77002.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:23,796 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 08:55:34,046 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77027.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:34,056 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:46,498 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.530e+02 3.131e+02 3.743e+02 6.568e+02, threshold=6.262e+02, percent-clipped=1.0 +2023-02-06 08:55:48,445 INFO [train.py:901] (0/4) Epoch 10, batch 4300, loss[loss=0.2493, simple_loss=0.3298, pruned_loss=0.08445, over 8510.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3133, pruned_loss=0.08245, over 1616455.53 frames. ], batch size: 26, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:52,728 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:55,458 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77056.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:57,562 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 08:56:12,947 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:56:23,977 INFO [train.py:901] (0/4) Epoch 10, batch 4350, loss[loss=0.2186, simple_loss=0.2884, pruned_loss=0.07436, over 7517.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3154, pruned_loss=0.08379, over 1617920.08 frames. ], batch size: 18, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:56:24,088 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7803, 5.9171, 4.9538, 2.4429, 5.0430, 5.4493, 5.3949, 5.1012], + device='cuda:0'), covar=tensor([0.0527, 0.0452, 0.0932, 0.4456, 0.0755, 0.0591, 0.1070, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0360, 0.0371, 0.0470, 0.0366, 0.0357, 0.0364, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:56:33,987 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:56:53,837 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 08:56:54,996 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.751e+02 3.331e+02 4.491e+02 1.022e+03, threshold=6.663e+02, percent-clipped=8.0 +2023-02-06 08:56:57,034 INFO [train.py:901] (0/4) Epoch 10, batch 4400, loss[loss=0.1847, simple_loss=0.2654, pruned_loss=0.05202, over 7257.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3151, pruned_loss=0.08406, over 1611131.37 frames. ], batch size: 16, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:57:03,161 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5067, 2.9451, 1.8763, 2.2167, 2.1452, 1.5777, 1.9482, 2.3086], + device='cuda:0'), covar=tensor([0.1324, 0.0275, 0.0927, 0.0599, 0.0656, 0.1279, 0.0972, 0.0865], + device='cuda:0'), in_proj_covar=tensor([0.0338, 0.0230, 0.0304, 0.0290, 0.0297, 0.0317, 0.0333, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:57:33,159 INFO [train.py:901] (0/4) Epoch 10, batch 4450, loss[loss=0.2623, simple_loss=0.3372, pruned_loss=0.09374, over 8498.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3158, pruned_loss=0.08404, over 1610575.81 frames. ], batch size: 26, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:57:35,375 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:57:36,013 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 08:57:54,223 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:04,752 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.784e+02 3.289e+02 4.035e+02 8.452e+02, threshold=6.579e+02, percent-clipped=2.0 +2023-02-06 08:58:06,785 INFO [train.py:901] (0/4) Epoch 10, batch 4500, loss[loss=0.2446, simple_loss=0.3243, pruned_loss=0.08242, over 8616.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3163, pruned_loss=0.08397, over 1612205.23 frames. ], batch size: 39, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:27,561 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 08:58:36,268 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 08:58:38,004 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:41,450 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:43,377 INFO [train.py:901] (0/4) Epoch 10, batch 4550, loss[loss=0.1997, simple_loss=0.2884, pruned_loss=0.05554, over 8139.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3159, pruned_loss=0.0836, over 1618223.24 frames. ], batch size: 22, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:47,524 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3724, 4.4140, 3.9199, 1.9606, 3.8430, 3.9965, 3.9705, 3.6094], + device='cuda:0'), covar=tensor([0.0742, 0.0589, 0.1039, 0.4684, 0.0792, 0.0898, 0.1427, 0.0731], + device='cuda:0'), in_proj_covar=tensor([0.0454, 0.0358, 0.0372, 0.0471, 0.0370, 0.0357, 0.0369, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 08:58:55,653 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:59:07,702 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4779, 2.8304, 1.8479, 2.1476, 2.2509, 1.4488, 1.9055, 2.1735], + device='cuda:0'), covar=tensor([0.1401, 0.0332, 0.1019, 0.0642, 0.0598, 0.1403, 0.1001, 0.0874], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0236, 0.0312, 0.0298, 0.0304, 0.0326, 0.0341, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:59:14,841 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.639e+02 3.213e+02 4.072e+02 8.769e+02, threshold=6.426e+02, percent-clipped=3.0 +2023-02-06 08:59:16,946 INFO [train.py:901] (0/4) Epoch 10, batch 4600, loss[loss=0.198, simple_loss=0.2741, pruned_loss=0.06092, over 7415.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3166, pruned_loss=0.0845, over 1611048.86 frames. ], batch size: 17, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:59:19,226 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8113, 1.9443, 2.1983, 1.5903, 1.2233, 2.3494, 0.2903, 1.4984], + device='cuda:0'), covar=tensor([0.3206, 0.1850, 0.0579, 0.2596, 0.4883, 0.0590, 0.4415, 0.1982], + device='cuda:0'), in_proj_covar=tensor([0.0160, 0.0162, 0.0091, 0.0211, 0.0250, 0.0099, 0.0161, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 08:59:21,272 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4695, 2.8334, 1.8220, 2.1100, 2.1586, 1.4571, 1.8367, 2.2156], + device='cuda:0'), covar=tensor([0.1460, 0.0298, 0.1131, 0.0697, 0.0734, 0.1475, 0.1075, 0.0946], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0237, 0.0315, 0.0299, 0.0305, 0.0327, 0.0342, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 08:59:50,936 INFO [train.py:901] (0/4) Epoch 10, batch 4650, loss[loss=0.2741, simple_loss=0.3416, pruned_loss=0.1033, over 8316.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3169, pruned_loss=0.08493, over 1612093.88 frames. ], batch size: 26, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:19,436 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 09:00:25,446 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 3.048e+02 3.591e+02 4.434e+02 8.168e+02, threshold=7.182e+02, percent-clipped=8.0 +2023-02-06 09:00:27,547 INFO [train.py:901] (0/4) Epoch 10, batch 4700, loss[loss=0.2329, simple_loss=0.3143, pruned_loss=0.07574, over 7799.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.316, pruned_loss=0.08434, over 1612797.77 frames. ], batch size: 20, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:33,873 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:00:59,619 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7731, 1.9524, 2.2654, 1.6203, 1.1779, 2.4452, 0.4435, 1.4491], + device='cuda:0'), covar=tensor([0.2234, 0.1396, 0.0479, 0.2357, 0.4594, 0.0376, 0.3240, 0.1785], + device='cuda:0'), in_proj_covar=tensor([0.0158, 0.0161, 0.0091, 0.0210, 0.0248, 0.0098, 0.0161, 0.0156], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 09:01:02,738 INFO [train.py:901] (0/4) Epoch 10, batch 4750, loss[loss=0.2465, simple_loss=0.3253, pruned_loss=0.08387, over 8622.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3165, pruned_loss=0.08455, over 1611215.75 frames. ], batch size: 49, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:17,019 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6581, 1.4844, 1.5669, 1.2472, 0.9779, 1.3758, 1.5409, 1.5371], + device='cuda:0'), covar=tensor([0.0534, 0.1214, 0.1725, 0.1442, 0.0571, 0.1529, 0.0679, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0195, 0.0160, 0.0105, 0.0165, 0.0118, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 09:01:25,489 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0863, 1.5783, 1.5723, 1.3616, 1.1432, 1.4486, 1.7606, 1.5973], + device='cuda:0'), covar=tensor([0.0466, 0.1117, 0.1650, 0.1333, 0.0536, 0.1451, 0.0600, 0.0606], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0156, 0.0195, 0.0161, 0.0105, 0.0165, 0.0118, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 09:01:28,493 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 09:01:30,513 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 09:01:35,870 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.648e+02 3.312e+02 4.103e+02 1.054e+03, threshold=6.623e+02, percent-clipped=5.0 +2023-02-06 09:01:37,926 INFO [train.py:901] (0/4) Epoch 10, batch 4800, loss[loss=0.2555, simple_loss=0.3129, pruned_loss=0.0991, over 7533.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3167, pruned_loss=0.08483, over 1614048.05 frames. ], batch size: 18, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:54,055 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,100 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,621 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:10,896 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:11,382 INFO [train.py:901] (0/4) Epoch 10, batch 4850, loss[loss=0.2217, simple_loss=0.2975, pruned_loss=0.07293, over 7926.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3169, pruned_loss=0.08538, over 1612794.42 frames. ], batch size: 20, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:16,304 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 09:02:19,184 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5712, 2.9652, 2.4562, 3.7384, 1.6778, 1.9767, 2.1312, 3.1195], + device='cuda:0'), covar=tensor([0.0696, 0.0787, 0.0965, 0.0320, 0.1265, 0.1416, 0.1313, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0215, 0.0260, 0.0221, 0.0224, 0.0253, 0.0263, 0.0223], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 09:02:21,887 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2543, 2.1920, 1.6813, 1.9709, 1.8163, 1.2910, 1.5109, 1.6478], + device='cuda:0'), covar=tensor([0.1198, 0.0332, 0.1000, 0.0446, 0.0564, 0.1459, 0.0923, 0.0779], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0236, 0.0314, 0.0300, 0.0303, 0.0328, 0.0342, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:02:29,382 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6581, 1.7200, 4.9124, 1.8040, 4.3174, 4.1946, 4.4318, 4.2790], + device='cuda:0'), covar=tensor([0.0429, 0.3694, 0.0390, 0.3205, 0.1103, 0.0873, 0.0428, 0.0571], + device='cuda:0'), in_proj_covar=tensor([0.0457, 0.0558, 0.0562, 0.0517, 0.0591, 0.0501, 0.0495, 0.0566], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:02:37,996 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:38,613 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:41,995 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:46,052 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.616e+02 3.128e+02 3.870e+02 7.279e+02, threshold=6.256e+02, percent-clipped=1.0 +2023-02-06 09:02:48,054 INFO [train.py:901] (0/4) Epoch 10, batch 4900, loss[loss=0.2813, simple_loss=0.346, pruned_loss=0.1083, over 8468.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.317, pruned_loss=0.08515, over 1612074.90 frames. ], batch size: 25, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:51,344 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 09:03:08,153 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-06 09:03:15,419 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:18,975 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 09:03:19,985 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:21,734 INFO [train.py:901] (0/4) Epoch 10, batch 4950, loss[loss=0.2525, simple_loss=0.3325, pruned_loss=0.08626, over 8335.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3177, pruned_loss=0.08572, over 1616496.29 frames. ], batch size: 26, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:26,726 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.71 vs. limit=5.0 +2023-02-06 09:03:54,524 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 2.775e+02 3.348e+02 4.012e+02 9.680e+02, threshold=6.695e+02, percent-clipped=4.0 +2023-02-06 09:03:57,188 INFO [train.py:901] (0/4) Epoch 10, batch 5000, loss[loss=0.2289, simple_loss=0.2904, pruned_loss=0.08373, over 7432.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3168, pruned_loss=0.08513, over 1613228.37 frames. ], batch size: 17, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:58,685 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:01,926 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:10,213 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-06 09:04:19,116 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7709, 2.1760, 3.6368, 2.7859, 3.1529, 2.4877, 1.9089, 1.8801], + device='cuda:0'), covar=tensor([0.3190, 0.3984, 0.1002, 0.2379, 0.1773, 0.1940, 0.1637, 0.4283], + device='cuda:0'), in_proj_covar=tensor([0.0879, 0.0851, 0.0717, 0.0829, 0.0927, 0.0782, 0.0699, 0.0757], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:04:30,717 INFO [train.py:901] (0/4) Epoch 10, batch 5050, loss[loss=0.27, simple_loss=0.3299, pruned_loss=0.1051, over 8131.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3174, pruned_loss=0.08525, over 1618901.31 frames. ], batch size: 22, lr: 7.62e-03, grad_scale: 8.0 +2023-02-06 09:04:51,126 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:52,878 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 09:05:02,962 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.592e+02 3.051e+02 4.098e+02 9.089e+02, threshold=6.102e+02, percent-clipped=4.0 +2023-02-06 09:05:05,647 INFO [train.py:901] (0/4) Epoch 10, batch 5100, loss[loss=0.2621, simple_loss=0.3137, pruned_loss=0.1052, over 7515.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3154, pruned_loss=0.08356, over 1620091.96 frames. ], batch size: 18, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:09,146 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:05:28,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9570, 2.3904, 2.6363, 1.5173, 2.8152, 1.7792, 1.5737, 1.8809], + device='cuda:0'), covar=tensor([0.0411, 0.0230, 0.0149, 0.0393, 0.0209, 0.0429, 0.0446, 0.0264], + device='cuda:0'), in_proj_covar=tensor([0.0370, 0.0306, 0.0261, 0.0369, 0.0296, 0.0454, 0.0342, 0.0332], + device='cuda:0'), out_proj_covar=tensor([1.0746e-04, 8.6909e-05, 7.4505e-05, 1.0521e-04, 8.5564e-05, 1.4086e-04, + 9.9553e-05, 9.5814e-05], device='cuda:0') +2023-02-06 09:05:40,070 INFO [train.py:901] (0/4) Epoch 10, batch 5150, loss[loss=0.203, simple_loss=0.2857, pruned_loss=0.06014, over 8196.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3152, pruned_loss=0.08342, over 1621605.00 frames. ], batch size: 23, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:10,562 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7466, 1.9080, 2.2559, 1.7169, 1.3658, 2.2657, 0.3712, 1.3895], + device='cuda:0'), covar=tensor([0.2727, 0.1621, 0.0516, 0.1835, 0.4169, 0.0529, 0.3907, 0.2194], + device='cuda:0'), in_proj_covar=tensor([0.0161, 0.0164, 0.0092, 0.0211, 0.0250, 0.0098, 0.0161, 0.0161], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 09:06:11,264 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:11,731 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.805e+02 3.349e+02 3.898e+02 8.134e+02, threshold=6.697e+02, percent-clipped=4.0 +2023-02-06 09:06:13,804 INFO [train.py:901] (0/4) Epoch 10, batch 5200, loss[loss=0.2117, simple_loss=0.2804, pruned_loss=0.07146, over 6773.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3144, pruned_loss=0.08315, over 1615353.74 frames. ], batch size: 15, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:29,068 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:35,807 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:50,964 INFO [train.py:901] (0/4) Epoch 10, batch 5250, loss[loss=0.1948, simple_loss=0.2836, pruned_loss=0.05297, over 8133.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3142, pruned_loss=0.08301, over 1617678.98 frames. ], batch size: 22, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:06:52,496 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-78000.pt +2023-02-06 09:06:56,852 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 09:06:57,791 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:00,626 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:14,846 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:17,393 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:19,991 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:23,912 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.672e+02 3.378e+02 4.041e+02 9.848e+02, threshold=6.756e+02, percent-clipped=3.0 +2023-02-06 09:07:25,980 INFO [train.py:901] (0/4) Epoch 10, batch 5300, loss[loss=0.2468, simple_loss=0.3245, pruned_loss=0.08458, over 8498.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3138, pruned_loss=0.08277, over 1623159.30 frames. ], batch size: 28, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:07:57,634 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:00,802 INFO [train.py:901] (0/4) Epoch 10, batch 5350, loss[loss=0.281, simple_loss=0.3479, pruned_loss=0.1071, over 8744.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3142, pruned_loss=0.08338, over 1622460.52 frames. ], batch size: 30, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:08:34,365 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.713e+02 3.238e+02 4.266e+02 6.892e+02, threshold=6.476e+02, percent-clipped=1.0 +2023-02-06 09:08:35,741 INFO [train.py:901] (0/4) Epoch 10, batch 5400, loss[loss=0.2665, simple_loss=0.3382, pruned_loss=0.09734, over 8551.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3143, pruned_loss=0.08372, over 1618540.99 frames. ], batch size: 31, lr: 7.61e-03, grad_scale: 8.0 +2023-02-06 09:08:40,001 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:07,680 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.58 vs. limit=5.0 +2023-02-06 09:09:08,707 INFO [train.py:901] (0/4) Epoch 10, batch 5450, loss[loss=0.2177, simple_loss=0.2908, pruned_loss=0.07233, over 7925.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3148, pruned_loss=0.08424, over 1618059.85 frames. ], batch size: 20, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:11,046 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2722, 1.2541, 4.4886, 1.6963, 3.8560, 3.6198, 3.9811, 3.8266], + device='cuda:0'), covar=tensor([0.0546, 0.4394, 0.0496, 0.3503, 0.1252, 0.0915, 0.0595, 0.0641], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0560, 0.0562, 0.0520, 0.0600, 0.0503, 0.0497, 0.0566], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:09:17,777 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 09:09:25,579 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9311, 2.1904, 3.3523, 1.6680, 2.6744, 2.2876, 2.0550, 2.6285], + device='cuda:0'), covar=tensor([0.1260, 0.1815, 0.0632, 0.2944, 0.1214, 0.2038, 0.1334, 0.1589], + device='cuda:0'), in_proj_covar=tensor([0.0486, 0.0508, 0.0531, 0.0575, 0.0609, 0.0551, 0.0466, 0.0606], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:09:43,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.697e+02 3.396e+02 4.413e+02 8.943e+02, threshold=6.791e+02, percent-clipped=7.0 +2023-02-06 09:09:44,805 INFO [train.py:901] (0/4) Epoch 10, batch 5500, loss[loss=0.2311, simple_loss=0.3222, pruned_loss=0.07002, over 8251.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3142, pruned_loss=0.08358, over 1613720.75 frames. ], batch size: 24, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:46,323 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:46,834 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 09:10:18,555 INFO [train.py:901] (0/4) Epoch 10, batch 5550, loss[loss=0.2803, simple_loss=0.3452, pruned_loss=0.1077, over 8257.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3159, pruned_loss=0.08439, over 1612255.35 frames. ], batch size: 24, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:32,884 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 09:10:42,880 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.35 vs. limit=5.0 +2023-02-06 09:10:52,577 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.563e+02 3.102e+02 4.076e+02 7.679e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 09:10:54,679 INFO [train.py:901] (0/4) Epoch 10, batch 5600, loss[loss=0.2201, simple_loss=0.2947, pruned_loss=0.07276, over 8293.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3149, pruned_loss=0.08378, over 1613151.59 frames. ], batch size: 23, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:55,585 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:00,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5083, 1.8126, 1.8513, 1.2978, 2.0311, 1.4021, 0.4635, 1.7315], + device='cuda:0'), covar=tensor([0.0310, 0.0200, 0.0175, 0.0268, 0.0218, 0.0565, 0.0487, 0.0143], + device='cuda:0'), in_proj_covar=tensor([0.0374, 0.0307, 0.0263, 0.0372, 0.0300, 0.0459, 0.0347, 0.0332], + device='cuda:0'), out_proj_covar=tensor([1.0823e-04, 8.6926e-05, 7.4862e-05, 1.0610e-04, 8.6683e-05, 1.4272e-04, + 1.0089e-04, 9.5404e-05], device='cuda:0') +2023-02-06 09:11:12,347 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:15,171 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.45 vs. limit=5.0 +2023-02-06 09:11:28,359 INFO [train.py:901] (0/4) Epoch 10, batch 5650, loss[loss=0.199, simple_loss=0.2886, pruned_loss=0.0547, over 7649.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3147, pruned_loss=0.08336, over 1613272.52 frames. ], batch size: 19, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:11:28,796 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-06 09:11:36,926 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:48,309 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 09:11:54,453 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:02,397 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.443e+02 2.913e+02 3.480e+02 5.594e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 09:12:03,745 INFO [train.py:901] (0/4) Epoch 10, batch 5700, loss[loss=0.236, simple_loss=0.3043, pruned_loss=0.08385, over 7650.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3154, pruned_loss=0.0833, over 1612003.72 frames. ], batch size: 19, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:19,552 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7148, 1.7388, 2.0276, 1.6189, 1.2605, 2.0275, 0.2612, 1.2887], + device='cuda:0'), covar=tensor([0.2623, 0.1796, 0.0510, 0.1675, 0.4255, 0.0666, 0.3458, 0.2003], + device='cuda:0'), in_proj_covar=tensor([0.0159, 0.0163, 0.0091, 0.0208, 0.0249, 0.0098, 0.0159, 0.0158], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 09:12:25,605 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:26,994 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0818, 1.5458, 3.4527, 1.4744, 2.2177, 3.8168, 3.8131, 3.2782], + device='cuda:0'), covar=tensor([0.0862, 0.1451, 0.0303, 0.1740, 0.0911, 0.0197, 0.0420, 0.0565], + device='cuda:0'), in_proj_covar=tensor([0.0254, 0.0291, 0.0252, 0.0282, 0.0266, 0.0230, 0.0322, 0.0283], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 09:12:27,752 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4954, 1.9286, 3.6392, 1.2879, 2.4380, 1.9701, 1.6017, 2.3156], + device='cuda:0'), covar=tensor([0.1728, 0.2147, 0.0606, 0.3729, 0.1656, 0.2731, 0.1728, 0.2185], + device='cuda:0'), in_proj_covar=tensor([0.0485, 0.0506, 0.0529, 0.0573, 0.0613, 0.0550, 0.0465, 0.0604], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:12:39,142 INFO [train.py:901] (0/4) Epoch 10, batch 5750, loss[loss=0.1736, simple_loss=0.2499, pruned_loss=0.04865, over 7660.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3155, pruned_loss=0.08345, over 1610103.52 frames. ], batch size: 19, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:53,272 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 09:13:11,243 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.793e+02 3.478e+02 4.404e+02 1.244e+03, threshold=6.955e+02, percent-clipped=11.0 +2023-02-06 09:13:12,605 INFO [train.py:901] (0/4) Epoch 10, batch 5800, loss[loss=0.2582, simple_loss=0.3408, pruned_loss=0.08781, over 8473.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3157, pruned_loss=0.08373, over 1609635.69 frames. ], batch size: 25, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:13:31,624 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:45,411 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:48,015 INFO [train.py:901] (0/4) Epoch 10, batch 5850, loss[loss=0.2992, simple_loss=0.3615, pruned_loss=0.1185, over 8337.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3164, pruned_loss=0.08455, over 1609721.11 frames. ], batch size: 25, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:19,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.589e+02 3.164e+02 4.281e+02 9.296e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-06 09:14:21,258 INFO [train.py:901] (0/4) Epoch 10, batch 5900, loss[loss=0.246, simple_loss=0.3233, pruned_loss=0.08432, over 8501.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.315, pruned_loss=0.08334, over 1610518.46 frames. ], batch size: 29, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:32,172 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6037, 1.3932, 1.5912, 1.2334, 0.8966, 1.3787, 1.6487, 1.6111], + device='cuda:0'), covar=tensor([0.0577, 0.1295, 0.1824, 0.1401, 0.0601, 0.1615, 0.0684, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0103, 0.0156, 0.0196, 0.0160, 0.0106, 0.0166, 0.0119, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:14:57,568 INFO [train.py:901] (0/4) Epoch 10, batch 5950, loss[loss=0.1959, simple_loss=0.2715, pruned_loss=0.0602, over 7792.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3136, pruned_loss=0.08282, over 1610967.82 frames. ], batch size: 19, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:05,417 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:15:30,047 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.430e+02 2.939e+02 3.954e+02 7.661e+02, threshold=5.878e+02, percent-clipped=3.0 +2023-02-06 09:15:31,435 INFO [train.py:901] (0/4) Epoch 10, batch 6000, loss[loss=0.2505, simple_loss=0.3363, pruned_loss=0.08232, over 8491.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3147, pruned_loss=0.08347, over 1614859.61 frames. ], batch size: 29, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:31,436 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 09:15:43,955 INFO [train.py:935] (0/4) Epoch 10, validation: loss=0.1914, simple_loss=0.2907, pruned_loss=0.04604, over 944034.00 frames. +2023-02-06 09:15:43,956 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 09:16:14,654 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.12 vs. limit=2.0 +2023-02-06 09:16:18,420 INFO [train.py:901] (0/4) Epoch 10, batch 6050, loss[loss=0.2638, simple_loss=0.3281, pruned_loss=0.09973, over 7403.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3146, pruned_loss=0.08346, over 1612616.78 frames. ], batch size: 72, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:16:35,958 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:16:40,303 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 09:16:52,859 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.842e+02 3.348e+02 4.641e+02 9.072e+02, threshold=6.696e+02, percent-clipped=15.0 +2023-02-06 09:16:54,181 INFO [train.py:901] (0/4) Epoch 10, batch 6100, loss[loss=0.2304, simple_loss=0.3144, pruned_loss=0.07316, over 8077.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3157, pruned_loss=0.08438, over 1616293.37 frames. ], batch size: 21, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:24,368 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 09:17:27,730 INFO [train.py:901] (0/4) Epoch 10, batch 6150, loss[loss=0.2552, simple_loss=0.3367, pruned_loss=0.08684, over 8332.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3155, pruned_loss=0.08394, over 1619540.80 frames. ], batch size: 25, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:41,311 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:17:51,534 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 09:17:54,625 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:01,039 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.486e+02 3.076e+02 3.632e+02 7.166e+02, threshold=6.152e+02, percent-clipped=1.0 +2023-02-06 09:18:02,472 INFO [train.py:901] (0/4) Epoch 10, batch 6200, loss[loss=0.2386, simple_loss=0.3168, pruned_loss=0.08023, over 8690.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3152, pruned_loss=0.08364, over 1622002.85 frames. ], batch size: 34, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:18:15,684 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:28,060 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:32,919 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:38,071 INFO [train.py:901] (0/4) Epoch 10, batch 6250, loss[loss=0.197, simple_loss=0.2712, pruned_loss=0.06135, over 7700.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3144, pruned_loss=0.08309, over 1622500.26 frames. ], batch size: 18, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:18:47,162 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3586, 1.9598, 3.1994, 1.1837, 2.3286, 1.7598, 1.4824, 2.1293], + device='cuda:0'), covar=tensor([0.1813, 0.2083, 0.0737, 0.3850, 0.1621, 0.2969, 0.1819, 0.2161], + device='cuda:0'), in_proj_covar=tensor([0.0486, 0.0511, 0.0533, 0.0576, 0.0613, 0.0554, 0.0467, 0.0606], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:19:01,795 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79033.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:19:07,232 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 09:19:10,142 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.509e+02 3.177e+02 4.128e+02 1.006e+03, threshold=6.355e+02, percent-clipped=7.0 +2023-02-06 09:19:11,555 INFO [train.py:901] (0/4) Epoch 10, batch 6300, loss[loss=0.245, simple_loss=0.3314, pruned_loss=0.0793, over 8403.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3149, pruned_loss=0.08347, over 1620079.53 frames. ], batch size: 49, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:47,655 INFO [train.py:901] (0/4) Epoch 10, batch 6350, loss[loss=0.2628, simple_loss=0.3139, pruned_loss=0.1059, over 8336.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3153, pruned_loss=0.08465, over 1615389.38 frames. ], batch size: 26, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:00,245 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 09:20:01,435 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2209, 1.8512, 2.7599, 2.2397, 2.4300, 2.0257, 1.5843, 1.2339], + device='cuda:0'), covar=tensor([0.3828, 0.3837, 0.1023, 0.2029, 0.1662, 0.2126, 0.1954, 0.3410], + device='cuda:0'), in_proj_covar=tensor([0.0865, 0.0842, 0.0701, 0.0816, 0.0911, 0.0772, 0.0685, 0.0742], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:20:20,614 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.998e+02 3.636e+02 4.667e+02 1.201e+03, threshold=7.271e+02, percent-clipped=11.0 +2023-02-06 09:20:21,297 INFO [train.py:901] (0/4) Epoch 10, batch 6400, loss[loss=0.2925, simple_loss=0.3481, pruned_loss=0.1185, over 8331.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3153, pruned_loss=0.08445, over 1617246.06 frames. ], batch size: 25, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:54,142 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:20:57,426 INFO [train.py:901] (0/4) Epoch 10, batch 6450, loss[loss=0.1743, simple_loss=0.2552, pruned_loss=0.04668, over 7717.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3141, pruned_loss=0.08375, over 1614056.67 frames. ], batch size: 18, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:21:12,196 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79218.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:21:27,157 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1544, 1.7818, 2.5390, 1.9840, 2.2599, 1.9835, 1.6358, 0.9979], + device='cuda:0'), covar=tensor([0.3553, 0.3448, 0.1109, 0.2457, 0.1856, 0.2125, 0.1729, 0.3900], + device='cuda:0'), in_proj_covar=tensor([0.0871, 0.0846, 0.0707, 0.0821, 0.0918, 0.0775, 0.0690, 0.0748], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:21:31,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.577e+02 3.130e+02 4.050e+02 7.383e+02, threshold=6.260e+02, percent-clipped=1.0 +2023-02-06 09:21:32,337 INFO [train.py:901] (0/4) Epoch 10, batch 6500, loss[loss=0.2508, simple_loss=0.3261, pruned_loss=0.08772, over 8166.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3148, pruned_loss=0.08348, over 1614657.35 frames. ], batch size: 48, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:21:35,280 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 09:21:49,240 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3697, 1.8688, 2.9102, 2.1912, 2.6547, 2.0709, 1.6925, 1.2402], + device='cuda:0'), covar=tensor([0.3634, 0.3798, 0.1034, 0.2579, 0.1721, 0.2056, 0.1714, 0.4142], + device='cuda:0'), in_proj_covar=tensor([0.0880, 0.0853, 0.0714, 0.0830, 0.0925, 0.0782, 0.0699, 0.0756], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:21:59,867 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79289.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:06,996 INFO [train.py:901] (0/4) Epoch 10, batch 6550, loss[loss=0.3027, simple_loss=0.3651, pruned_loss=0.1201, over 6527.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.314, pruned_loss=0.08304, over 1611596.75 frames. ], batch size: 71, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:11,149 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79303.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:22:18,365 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79314.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:27,871 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:36,563 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 09:22:41,242 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.767e+02 3.312e+02 4.239e+02 1.073e+03, threshold=6.623e+02, percent-clipped=3.0 +2023-02-06 09:22:41,948 INFO [train.py:901] (0/4) Epoch 10, batch 6600, loss[loss=0.2415, simple_loss=0.3264, pruned_loss=0.07834, over 8254.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3151, pruned_loss=0.08363, over 1613711.99 frames. ], batch size: 24, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:44,149 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:53,884 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 09:23:04,713 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:11,888 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79393.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:23:15,196 INFO [train.py:901] (0/4) Epoch 10, batch 6650, loss[loss=0.2396, simple_loss=0.3118, pruned_loss=0.08375, over 8532.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3172, pruned_loss=0.08514, over 1617970.96 frames. ], batch size: 28, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:23:33,628 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4747, 2.9202, 1.8714, 2.3958, 2.3115, 1.5585, 2.1337, 2.2231], + device='cuda:0'), covar=tensor([0.1437, 0.0292, 0.1009, 0.0581, 0.0568, 0.1295, 0.0891, 0.0902], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0229, 0.0308, 0.0296, 0.0303, 0.0319, 0.0336, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:23:47,672 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:50,867 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.666e+02 3.220e+02 4.193e+02 8.839e+02, threshold=6.440e+02, percent-clipped=3.0 +2023-02-06 09:23:51,581 INFO [train.py:901] (0/4) Epoch 10, batch 6700, loss[loss=0.2776, simple_loss=0.3415, pruned_loss=0.1069, over 8135.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3162, pruned_loss=0.08464, over 1614299.56 frames. ], batch size: 22, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:24,670 INFO [train.py:901] (0/4) Epoch 10, batch 6750, loss[loss=0.2307, simple_loss=0.2988, pruned_loss=0.08128, over 7671.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3174, pruned_loss=0.08574, over 1615830.19 frames. ], batch size: 19, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:28,996 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1133, 2.7610, 2.2343, 2.3861, 2.3943, 2.0185, 2.2220, 2.4415], + device='cuda:0'), covar=tensor([0.0885, 0.0221, 0.0678, 0.0464, 0.0468, 0.0892, 0.0682, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0229, 0.0308, 0.0296, 0.0305, 0.0320, 0.0338, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:25:00,368 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.662e+02 3.188e+02 4.113e+02 8.575e+02, threshold=6.376e+02, percent-clipped=4.0 +2023-02-06 09:25:01,059 INFO [train.py:901] (0/4) Epoch 10, batch 6800, loss[loss=0.1741, simple_loss=0.2599, pruned_loss=0.04417, over 7422.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3156, pruned_loss=0.08442, over 1614639.23 frames. ], batch size: 17, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:11,659 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 09:25:14,138 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 09:25:19,537 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4186, 1.4610, 1.3622, 1.8805, 0.7612, 1.1813, 1.3369, 1.4906], + device='cuda:0'), covar=tensor([0.0832, 0.0828, 0.1185, 0.0565, 0.1179, 0.1513, 0.0805, 0.0782], + device='cuda:0'), in_proj_covar=tensor([0.0239, 0.0211, 0.0254, 0.0215, 0.0218, 0.0251, 0.0258, 0.0224], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 09:25:35,817 INFO [train.py:901] (0/4) Epoch 10, batch 6850, loss[loss=0.2493, simple_loss=0.3311, pruned_loss=0.08377, over 8359.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3147, pruned_loss=0.08357, over 1614852.29 frames. ], batch size: 24, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:39,343 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:25:59,645 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 09:26:10,489 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.480e+02 2.958e+02 3.519e+02 6.592e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-06 09:26:10,574 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79647.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:26:11,113 INFO [train.py:901] (0/4) Epoch 10, batch 6900, loss[loss=0.2567, simple_loss=0.3309, pruned_loss=0.09122, over 8502.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3138, pruned_loss=0.08268, over 1614813.99 frames. ], batch size: 26, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:30,977 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:44,397 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:46,400 INFO [train.py:901] (0/4) Epoch 10, batch 6950, loss[loss=0.2395, simple_loss=0.3088, pruned_loss=0.08512, over 7804.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3144, pruned_loss=0.08269, over 1615173.68 frames. ], batch size: 20, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:46,618 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:03,548 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:05,482 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:10,656 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 09:27:12,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79737.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:27:19,257 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.770e+02 3.379e+02 4.019e+02 1.115e+03, threshold=6.759e+02, percent-clipped=8.0 +2023-02-06 09:27:19,980 INFO [train.py:901] (0/4) Epoch 10, batch 7000, loss[loss=0.2748, simple_loss=0.3375, pruned_loss=0.106, over 8789.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3147, pruned_loss=0.08367, over 1612165.11 frames. ], batch size: 30, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:27:30,359 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:43,435 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:55,331 INFO [train.py:901] (0/4) Epoch 10, batch 7050, loss[loss=0.2159, simple_loss=0.2793, pruned_loss=0.07628, over 7711.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3142, pruned_loss=0.08349, over 1609374.94 frames. ], batch size: 18, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:04,452 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:25,524 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:26,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2036, 1.7050, 1.9344, 1.6083, 1.3493, 1.8419, 2.4731, 2.7678], + device='cuda:0'), covar=tensor([0.0435, 0.1229, 0.1636, 0.1352, 0.0601, 0.1454, 0.0596, 0.0428], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0152, 0.0194, 0.0159, 0.0106, 0.0164, 0.0118, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:28:29,361 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.704e+02 3.361e+02 4.306e+02 1.362e+03, threshold=6.722e+02, percent-clipped=5.0 +2023-02-06 09:28:30,076 INFO [train.py:901] (0/4) Epoch 10, batch 7100, loss[loss=0.2482, simple_loss=0.3194, pruned_loss=0.08854, over 8550.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3142, pruned_loss=0.08344, over 1609969.81 frames. ], batch size: 31, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:32,878 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79852.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:29:06,027 INFO [train.py:901] (0/4) Epoch 10, batch 7150, loss[loss=0.2623, simple_loss=0.3411, pruned_loss=0.09182, over 8188.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3152, pruned_loss=0.0837, over 1608338.76 frames. ], batch size: 23, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:29:16,898 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3479, 1.2778, 1.5729, 1.2493, 0.7722, 1.3742, 1.1978, 1.3112], + device='cuda:0'), covar=tensor([0.0556, 0.1182, 0.1758, 0.1396, 0.0594, 0.1479, 0.0672, 0.0607], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0153, 0.0195, 0.0160, 0.0105, 0.0164, 0.0118, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:29:30,376 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9753, 1.6086, 2.1646, 1.8048, 1.9857, 1.9059, 1.5827, 0.7014], + device='cuda:0'), covar=tensor([0.3923, 0.3643, 0.1201, 0.2191, 0.1754, 0.1966, 0.1660, 0.3752], + device='cuda:0'), in_proj_covar=tensor([0.0877, 0.0852, 0.0716, 0.0823, 0.0925, 0.0783, 0.0698, 0.0759], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:29:39,480 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.702e+02 3.262e+02 4.332e+02 1.613e+03, threshold=6.525e+02, percent-clipped=3.0 +2023-02-06 09:29:39,570 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:29:40,183 INFO [train.py:901] (0/4) Epoch 10, batch 7200, loss[loss=0.2382, simple_loss=0.2987, pruned_loss=0.08888, over 7792.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3155, pruned_loss=0.08415, over 1611302.70 frames. ], batch size: 19, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:09,195 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4777, 1.3545, 4.6828, 1.7735, 4.0945, 3.8708, 4.1848, 4.0596], + device='cuda:0'), covar=tensor([0.0543, 0.4201, 0.0436, 0.3215, 0.1102, 0.0754, 0.0517, 0.0616], + device='cuda:0'), in_proj_covar=tensor([0.0464, 0.0560, 0.0564, 0.0519, 0.0587, 0.0498, 0.0491, 0.0562], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:30:13,865 INFO [train.py:901] (0/4) Epoch 10, batch 7250, loss[loss=0.207, simple_loss=0.2784, pruned_loss=0.06782, over 7211.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3131, pruned_loss=0.08285, over 1608868.74 frames. ], batch size: 16, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:14,881 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 09:30:15,334 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-80000.pt +2023-02-06 09:30:30,524 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80018.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:31,020 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80019.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:30:33,692 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6566, 1.8463, 2.2307, 1.7798, 1.1909, 2.3239, 0.3295, 1.4778], + device='cuda:0'), covar=tensor([0.3276, 0.1661, 0.0463, 0.2047, 0.4433, 0.0521, 0.3468, 0.1918], + device='cuda:0'), in_proj_covar=tensor([0.0162, 0.0163, 0.0093, 0.0208, 0.0253, 0.0100, 0.0159, 0.0160], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 09:30:47,886 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80043.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:50,296 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.755e+02 3.243e+02 3.993e+02 1.489e+03, threshold=6.485e+02, percent-clipped=9.0 +2023-02-06 09:30:50,951 INFO [train.py:901] (0/4) Epoch 10, batch 7300, loss[loss=0.1869, simple_loss=0.2609, pruned_loss=0.05645, over 7441.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3128, pruned_loss=0.08263, over 1607185.15 frames. ], batch size: 17, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:31:00,590 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:03,340 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:04,626 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1133, 4.1228, 3.7510, 2.1288, 3.6787, 3.7306, 3.7788, 3.4577], + device='cuda:0'), covar=tensor([0.0905, 0.0596, 0.1022, 0.4064, 0.0928, 0.0946, 0.1321, 0.0767], + device='cuda:0'), in_proj_covar=tensor([0.0450, 0.0354, 0.0370, 0.0466, 0.0369, 0.0354, 0.0361, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:31:19,981 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80091.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,176 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,603 INFO [train.py:901] (0/4) Epoch 10, batch 7350, loss[loss=0.2683, simple_loss=0.3318, pruned_loss=0.1024, over 8474.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3146, pruned_loss=0.08316, over 1610551.93 frames. ], batch size: 25, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:31:27,536 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0649, 2.2892, 1.7979, 2.7327, 1.2135, 1.5026, 1.6734, 2.3206], + device='cuda:0'), covar=tensor([0.0802, 0.0877, 0.1107, 0.0457, 0.1328, 0.1583, 0.1237, 0.0796], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0214, 0.0254, 0.0218, 0.0219, 0.0251, 0.0259, 0.0227], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 09:31:31,669 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80108.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:31:32,372 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0769, 1.7642, 2.5718, 2.0437, 2.3227, 2.0211, 1.6514, 0.9999], + device='cuda:0'), covar=tensor([0.4027, 0.3843, 0.1135, 0.2329, 0.1747, 0.2016, 0.1584, 0.3830], + device='cuda:0'), in_proj_covar=tensor([0.0872, 0.0846, 0.0711, 0.0819, 0.0921, 0.0780, 0.0694, 0.0755], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:31:40,383 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5192, 1.3034, 4.6973, 1.8136, 4.1096, 3.9308, 4.2122, 4.0613], + device='cuda:0'), covar=tensor([0.0483, 0.4442, 0.0391, 0.3253, 0.0970, 0.0770, 0.0497, 0.0613], + device='cuda:0'), in_proj_covar=tensor([0.0457, 0.0554, 0.0554, 0.0508, 0.0580, 0.0490, 0.0486, 0.0552], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:31:42,390 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:44,301 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:50,381 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80133.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:31:50,991 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:54,314 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4206, 2.6571, 1.7905, 2.2773, 2.1215, 1.4039, 1.9936, 2.3125], + device='cuda:0'), covar=tensor([0.1529, 0.0384, 0.1218, 0.0679, 0.0687, 0.1646, 0.1109, 0.0813], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0233, 0.0311, 0.0297, 0.0309, 0.0323, 0.0340, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:31:56,732 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 09:31:59,431 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.519e+02 3.343e+02 4.224e+02 9.659e+02, threshold=6.686e+02, percent-clipped=6.0 +2023-02-06 09:32:00,147 INFO [train.py:901] (0/4) Epoch 10, batch 7400, loss[loss=0.2154, simple_loss=0.2832, pruned_loss=0.07384, over 7538.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3143, pruned_loss=0.08312, over 1611352.92 frames. ], batch size: 18, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:16,225 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 09:32:34,304 INFO [train.py:901] (0/4) Epoch 10, batch 7450, loss[loss=0.2467, simple_loss=0.3326, pruned_loss=0.08047, over 8187.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3144, pruned_loss=0.08358, over 1607193.87 frames. ], batch size: 23, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:35,893 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1828, 1.2133, 3.2916, 0.9739, 2.8449, 2.7272, 3.0083, 2.8861], + device='cuda:0'), covar=tensor([0.0741, 0.4070, 0.0766, 0.3611, 0.1390, 0.0984, 0.0762, 0.0884], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0558, 0.0561, 0.0511, 0.0584, 0.0492, 0.0489, 0.0555], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:32:39,979 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6215, 1.9342, 3.3031, 1.3390, 2.4020, 2.0561, 1.6379, 2.1174], + device='cuda:0'), covar=tensor([0.1597, 0.2185, 0.0641, 0.3711, 0.1501, 0.2643, 0.1716, 0.2270], + device='cuda:0'), in_proj_covar=tensor([0.0488, 0.0511, 0.0532, 0.0574, 0.0614, 0.0554, 0.0467, 0.0606], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:32:54,360 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 09:33:02,477 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:09,145 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.637e+02 3.217e+02 3.901e+02 6.824e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:33:09,863 INFO [train.py:901] (0/4) Epoch 10, batch 7500, loss[loss=0.2556, simple_loss=0.3169, pruned_loss=0.09714, over 7785.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3155, pruned_loss=0.08434, over 1610274.16 frames. ], batch size: 19, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:33:19,685 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 09:33:43,947 INFO [train.py:901] (0/4) Epoch 10, batch 7550, loss[loss=0.2224, simple_loss=0.3071, pruned_loss=0.06885, over 8035.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3152, pruned_loss=0.08386, over 1612156.94 frames. ], batch size: 22, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:33:46,241 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:57,962 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:15,027 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:16,558 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 09:34:17,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.399e+02 2.908e+02 3.933e+02 1.078e+03, threshold=5.816e+02, percent-clipped=3.0 +2023-02-06 09:34:18,142 INFO [train.py:901] (0/4) Epoch 10, batch 7600, loss[loss=0.2135, simple_loss=0.288, pruned_loss=0.06949, over 7543.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.315, pruned_loss=0.083, over 1618060.42 frames. ], batch size: 18, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:34:22,311 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1100, 2.3682, 2.6460, 1.3531, 2.8024, 1.6944, 1.5384, 1.9717], + device='cuda:0'), covar=tensor([0.0468, 0.0243, 0.0206, 0.0476, 0.0240, 0.0537, 0.0559, 0.0298], + device='cuda:0'), in_proj_covar=tensor([0.0370, 0.0304, 0.0259, 0.0374, 0.0293, 0.0452, 0.0345, 0.0336], + device='cuda:0'), out_proj_covar=tensor([1.0690e-04, 8.5945e-05, 7.3445e-05, 1.0645e-04, 8.4249e-05, 1.3921e-04, + 9.9935e-05, 9.6759e-05], device='cuda:0') +2023-02-06 09:34:49,205 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80390.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:54,346 INFO [train.py:901] (0/4) Epoch 10, batch 7650, loss[loss=0.2842, simple_loss=0.3557, pruned_loss=0.1063, over 8240.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3145, pruned_loss=0.08285, over 1610111.13 frames. ], batch size: 24, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:03,327 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:06,125 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:20,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1680, 1.4994, 1.6634, 1.3662, 1.0467, 1.5093, 1.8724, 1.7370], + device='cuda:0'), covar=tensor([0.0494, 0.1329, 0.1736, 0.1419, 0.0619, 0.1514, 0.0683, 0.0572], + device='cuda:0'), in_proj_covar=tensor([0.0103, 0.0154, 0.0196, 0.0160, 0.0106, 0.0165, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:35:27,279 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.621e+02 3.149e+02 3.913e+02 9.838e+02, threshold=6.298e+02, percent-clipped=6.0 +2023-02-06 09:35:27,989 INFO [train.py:901] (0/4) Epoch 10, batch 7700, loss[loss=0.2136, simple_loss=0.2911, pruned_loss=0.06806, over 8024.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3139, pruned_loss=0.08243, over 1610071.81 frames. ], batch size: 22, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:51,546 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:01,547 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:03,351 INFO [train.py:901] (0/4) Epoch 10, batch 7750, loss[loss=0.2049, simple_loss=0.2839, pruned_loss=0.06299, over 8235.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3135, pruned_loss=0.08283, over 1608146.48 frames. ], batch size: 22, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:06,760 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 09:36:13,282 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:15,240 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1521, 1.2671, 4.3060, 1.5517, 3.8267, 3.5890, 3.8514, 3.7463], + device='cuda:0'), covar=tensor([0.0521, 0.4024, 0.0393, 0.3119, 0.0952, 0.0814, 0.0513, 0.0630], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0566, 0.0564, 0.0519, 0.0589, 0.0498, 0.0496, 0.0564], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:36:18,625 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:19,249 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2513, 1.3038, 3.3350, 0.9599, 2.9866, 2.8505, 3.0634, 2.9783], + device='cuda:0'), covar=tensor([0.0628, 0.3391, 0.0639, 0.3270, 0.1158, 0.0919, 0.0624, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0470, 0.0564, 0.0561, 0.0517, 0.0587, 0.0496, 0.0494, 0.0562], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:36:23,373 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5870, 1.8730, 2.1705, 1.4887, 2.2654, 1.4251, 0.7331, 1.7779], + device='cuda:0'), covar=tensor([0.0432, 0.0233, 0.0159, 0.0314, 0.0228, 0.0609, 0.0547, 0.0211], + device='cuda:0'), in_proj_covar=tensor([0.0373, 0.0303, 0.0260, 0.0373, 0.0294, 0.0452, 0.0344, 0.0336], + device='cuda:0'), out_proj_covar=tensor([1.0778e-04, 8.5584e-05, 7.3774e-05, 1.0625e-04, 8.4444e-05, 1.3911e-04, + 9.9402e-05, 9.6612e-05], device='cuda:0') +2023-02-06 09:36:36,355 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.713e+02 3.406e+02 4.090e+02 8.759e+02, threshold=6.812e+02, percent-clipped=3.0 +2023-02-06 09:36:37,066 INFO [train.py:901] (0/4) Epoch 10, batch 7800, loss[loss=0.2094, simple_loss=0.2916, pruned_loss=0.06358, over 8112.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3135, pruned_loss=0.08287, over 1612742.53 frames. ], batch size: 23, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:48,397 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2597, 3.1721, 2.8682, 1.4095, 2.8614, 2.8830, 2.8999, 2.7106], + device='cuda:0'), covar=tensor([0.1015, 0.0752, 0.1308, 0.4923, 0.1123, 0.1160, 0.1522, 0.1050], + device='cuda:0'), in_proj_covar=tensor([0.0460, 0.0358, 0.0378, 0.0477, 0.0377, 0.0363, 0.0367, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:37:09,868 INFO [train.py:901] (0/4) Epoch 10, batch 7850, loss[loss=0.2438, simple_loss=0.3225, pruned_loss=0.08258, over 8473.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3137, pruned_loss=0.08303, over 1615871.31 frames. ], batch size: 25, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:12,391 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 09:37:40,857 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:37:42,735 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.556e+02 3.372e+02 4.255e+02 7.191e+02, threshold=6.744e+02, percent-clipped=1.0 +2023-02-06 09:37:43,434 INFO [train.py:901] (0/4) Epoch 10, batch 7900, loss[loss=0.2474, simple_loss=0.3319, pruned_loss=0.08143, over 8257.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3143, pruned_loss=0.08367, over 1614238.02 frames. ], batch size: 24, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:07,142 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9933, 1.6298, 1.8181, 1.4885, 1.2799, 1.8257, 2.3016, 1.8651], + device='cuda:0'), covar=tensor([0.0439, 0.1211, 0.1697, 0.1425, 0.0566, 0.1406, 0.0605, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0159, 0.0106, 0.0165, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:38:16,940 INFO [train.py:901] (0/4) Epoch 10, batch 7950, loss[loss=0.2503, simple_loss=0.3, pruned_loss=0.1003, over 7192.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3135, pruned_loss=0.08323, over 1615971.37 frames. ], batch size: 16, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:50,730 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 2.660e+02 3.023e+02 3.700e+02 9.606e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-06 09:38:51,443 INFO [train.py:901] (0/4) Epoch 10, batch 8000, loss[loss=0.2366, simple_loss=0.3227, pruned_loss=0.07521, over 8275.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3123, pruned_loss=0.08206, over 1615860.14 frames. ], batch size: 23, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:38:55,699 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:56,301 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:59,809 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:20,341 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:24,998 INFO [train.py:901] (0/4) Epoch 10, batch 8050, loss[loss=0.3267, simple_loss=0.3741, pruned_loss=0.1397, over 7120.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3119, pruned_loss=0.08185, over 1606665.23 frames. ], batch size: 71, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:39:43,556 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:45,321 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 09:39:48,194 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-10.pt +2023-02-06 09:39:58,264 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 09:40:01,801 INFO [train.py:901] (0/4) Epoch 11, batch 0, loss[loss=0.2901, simple_loss=0.3512, pruned_loss=0.1145, over 8127.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.3512, pruned_loss=0.1145, over 8127.00 frames. ], batch size: 22, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:01,802 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 09:40:13,094 INFO [train.py:935] (0/4) Epoch 11, validation: loss=0.1907, simple_loss=0.2907, pruned_loss=0.04534, over 944034.00 frames. +2023-02-06 09:40:13,095 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 09:40:23,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.899e+02 3.439e+02 4.416e+02 1.589e+03, threshold=6.879e+02, percent-clipped=9.0 +2023-02-06 09:40:27,464 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 09:40:30,160 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:39,882 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80870.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:47,958 INFO [train.py:901] (0/4) Epoch 11, batch 50, loss[loss=0.2047, simple_loss=0.2797, pruned_loss=0.06479, over 7807.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3233, pruned_loss=0.08664, over 368246.31 frames. ], batch size: 19, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:41:03,896 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 09:41:24,358 INFO [train.py:901] (0/4) Epoch 11, batch 100, loss[loss=0.2022, simple_loss=0.2723, pruned_loss=0.06603, over 7522.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3203, pruned_loss=0.08466, over 646462.23 frames. ], batch size: 18, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:41:29,247 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 09:41:30,742 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:32,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8825, 1.4988, 1.6183, 1.4112, 1.0092, 1.4375, 1.6067, 1.5338], + device='cuda:0'), covar=tensor([0.0472, 0.1172, 0.1653, 0.1325, 0.0574, 0.1439, 0.0677, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0155, 0.0197, 0.0161, 0.0107, 0.0165, 0.0119, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:41:32,119 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5105, 1.8488, 1.9515, 0.9502, 2.0541, 1.4659, 0.4280, 1.7198], + device='cuda:0'), covar=tensor([0.0328, 0.0207, 0.0154, 0.0319, 0.0193, 0.0529, 0.0543, 0.0147], + device='cuda:0'), in_proj_covar=tensor([0.0381, 0.0310, 0.0262, 0.0377, 0.0297, 0.0459, 0.0351, 0.0343], + device='cuda:0'), out_proj_covar=tensor([1.0991e-04, 8.7396e-05, 7.3964e-05, 1.0731e-04, 8.5085e-05, 1.4165e-04, + 1.0120e-04, 9.8394e-05], device='cuda:0') +2023-02-06 09:41:35,316 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.679e+02 3.187e+02 3.933e+02 1.063e+03, threshold=6.374e+02, percent-clipped=2.0 +2023-02-06 09:41:51,862 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:58,405 INFO [train.py:901] (0/4) Epoch 11, batch 150, loss[loss=0.3116, simple_loss=0.3665, pruned_loss=0.1284, over 6803.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3187, pruned_loss=0.08424, over 864003.76 frames. ], batch size: 71, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:09,088 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2856, 2.2623, 1.7387, 2.0332, 1.8583, 1.4449, 1.8098, 1.7977], + device='cuda:0'), covar=tensor([0.1118, 0.0335, 0.0844, 0.0485, 0.0551, 0.1213, 0.0791, 0.0764], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0234, 0.0312, 0.0298, 0.0305, 0.0326, 0.0338, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:42:14,053 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0089, 1.1022, 0.9948, 1.3047, 0.6578, 0.8849, 1.0475, 1.1419], + device='cuda:0'), covar=tensor([0.0690, 0.0654, 0.0860, 0.0570, 0.0914, 0.1148, 0.0576, 0.0583], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0216, 0.0258, 0.0219, 0.0221, 0.0258, 0.0258, 0.0228], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 09:42:23,751 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:25,279 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 09:42:34,650 INFO [train.py:901] (0/4) Epoch 11, batch 200, loss[loss=0.2065, simple_loss=0.3012, pruned_loss=0.05589, over 8253.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.318, pruned_loss=0.08443, over 1028823.00 frames. ], batch size: 24, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:37,132 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 09:42:43,008 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:47,015 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.935e+02 2.662e+02 3.186e+02 4.005e+02 8.686e+02, threshold=6.371e+02, percent-clipped=5.0 +2023-02-06 09:43:10,560 INFO [train.py:901] (0/4) Epoch 11, batch 250, loss[loss=0.2676, simple_loss=0.3187, pruned_loss=0.1083, over 5932.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3152, pruned_loss=0.08302, over 1158832.68 frames. ], batch size: 13, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:43:21,554 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 09:43:22,302 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:31,363 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 09:43:42,733 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:46,016 INFO [train.py:901] (0/4) Epoch 11, batch 300, loss[loss=0.2915, simple_loss=0.3541, pruned_loss=0.1145, over 8127.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3141, pruned_loss=0.08203, over 1263113.96 frames. ], batch size: 22, lr: 7.13e-03, grad_scale: 16.0 +2023-02-06 09:43:48,269 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:48,880 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:57,132 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.697e+02 3.136e+02 4.054e+02 9.565e+02, threshold=6.271e+02, percent-clipped=1.0 +2023-02-06 09:44:00,791 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:22,521 INFO [train.py:901] (0/4) Epoch 11, batch 350, loss[loss=0.2364, simple_loss=0.3028, pruned_loss=0.08498, over 8242.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3141, pruned_loss=0.08219, over 1343443.33 frames. ], batch size: 22, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:44:23,375 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9479, 3.8701, 2.5746, 2.6783, 2.7848, 2.1023, 2.8914, 3.1196], + device='cuda:0'), covar=tensor([0.1580, 0.0430, 0.0939, 0.0739, 0.0766, 0.1337, 0.1100, 0.0949], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0232, 0.0308, 0.0297, 0.0303, 0.0324, 0.0337, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:44:33,038 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:44,351 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:45,750 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3125, 1.8919, 2.8033, 2.2126, 2.4566, 2.1510, 1.7490, 1.1710], + device='cuda:0'), covar=tensor([0.3888, 0.4011, 0.1080, 0.2477, 0.1833, 0.2131, 0.1668, 0.4064], + device='cuda:0'), in_proj_covar=tensor([0.0889, 0.0861, 0.0718, 0.0831, 0.0932, 0.0791, 0.0702, 0.0760], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:44:49,696 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:53,725 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:56,286 INFO [train.py:901] (0/4) Epoch 11, batch 400, loss[loss=0.1903, simple_loss=0.2796, pruned_loss=0.05054, over 8538.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3155, pruned_loss=0.08314, over 1404719.32 frames. ], batch size: 39, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:08,650 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.601e+02 3.216e+02 4.274e+02 6.931e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:45:10,270 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:11,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:32,911 INFO [train.py:901] (0/4) Epoch 11, batch 450, loss[loss=0.2649, simple_loss=0.3372, pruned_loss=0.09629, over 8515.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3139, pruned_loss=0.08294, over 1449371.17 frames. ], batch size: 28, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:57,096 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:58,453 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:46:06,251 INFO [train.py:901] (0/4) Epoch 11, batch 500, loss[loss=0.2552, simple_loss=0.3303, pruned_loss=0.09005, over 8460.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.3131, pruned_loss=0.08235, over 1487075.84 frames. ], batch size: 29, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:46:17,545 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.501e+02 3.364e+02 4.069e+02 6.845e+02, threshold=6.728e+02, percent-clipped=2.0 +2023-02-06 09:46:40,091 INFO [train.py:901] (0/4) Epoch 11, batch 550, loss[loss=0.2163, simple_loss=0.2986, pruned_loss=0.06695, over 8286.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3144, pruned_loss=0.08351, over 1517090.13 frames. ], batch size: 23, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:46:59,741 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0077, 2.4118, 1.8773, 3.0135, 1.3203, 1.6584, 1.9749, 2.4456], + device='cuda:0'), covar=tensor([0.0764, 0.0890, 0.1011, 0.0344, 0.1176, 0.1477, 0.1050, 0.0726], + device='cuda:0'), in_proj_covar=tensor([0.0238, 0.0213, 0.0253, 0.0216, 0.0216, 0.0254, 0.0257, 0.0224], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 09:47:15,688 INFO [train.py:901] (0/4) Epoch 11, batch 600, loss[loss=0.2354, simple_loss=0.3243, pruned_loss=0.07324, over 8509.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3119, pruned_loss=0.08184, over 1536588.67 frames. ], batch size: 26, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:27,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.633e+02 3.080e+02 3.885e+02 6.931e+02, threshold=6.160e+02, percent-clipped=1.0 +2023-02-06 09:47:35,461 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 09:47:42,482 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:48,462 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:50,436 INFO [train.py:901] (0/4) Epoch 11, batch 650, loss[loss=0.2374, simple_loss=0.3025, pruned_loss=0.08608, over 7966.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3131, pruned_loss=0.08212, over 1558864.81 frames. ], batch size: 21, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:59,441 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:08,364 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:26,942 INFO [train.py:901] (0/4) Epoch 11, batch 700, loss[loss=0.2186, simple_loss=0.2843, pruned_loss=0.07648, over 7423.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3121, pruned_loss=0.08156, over 1568063.29 frames. ], batch size: 17, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:48:27,104 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:38,764 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.541e+02 3.049e+02 3.626e+02 6.264e+02, threshold=6.097e+02, percent-clipped=1.0 +2023-02-06 09:48:48,477 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2052, 1.1580, 3.3407, 0.9568, 2.9108, 2.7980, 3.0466, 2.9153], + device='cuda:0'), covar=tensor([0.0755, 0.3866, 0.0784, 0.3477, 0.1573, 0.1067, 0.0766, 0.0895], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0572, 0.0577, 0.0526, 0.0604, 0.0509, 0.0502, 0.0576], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:49:01,395 INFO [train.py:901] (0/4) Epoch 11, batch 750, loss[loss=0.3008, simple_loss=0.3671, pruned_loss=0.1173, over 8767.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3148, pruned_loss=0.08328, over 1583142.60 frames. ], batch size: 30, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:07,228 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.53 vs. limit=5.0 +2023-02-06 09:49:10,215 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:24,851 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 09:49:33,713 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 09:49:36,445 INFO [train.py:901] (0/4) Epoch 11, batch 800, loss[loss=0.2436, simple_loss=0.3286, pruned_loss=0.07934, over 8472.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3127, pruned_loss=0.08179, over 1590684.22 frames. ], batch size: 29, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:49,260 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.494e+02 2.971e+02 3.970e+02 9.403e+02, threshold=5.941e+02, percent-clipped=2.0 +2023-02-06 09:49:58,255 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:59,634 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:11,544 INFO [train.py:901] (0/4) Epoch 11, batch 850, loss[loss=0.2382, simple_loss=0.3103, pruned_loss=0.083, over 7801.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3125, pruned_loss=0.08184, over 1595423.93 frames. ], batch size: 19, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:16,510 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:37,526 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9373, 1.6238, 2.2965, 1.7880, 2.0048, 1.8729, 1.4929, 0.6547], + device='cuda:0'), covar=tensor([0.3777, 0.3520, 0.1104, 0.2226, 0.1639, 0.2181, 0.1776, 0.3581], + device='cuda:0'), in_proj_covar=tensor([0.0877, 0.0849, 0.0714, 0.0825, 0.0920, 0.0785, 0.0698, 0.0754], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:50:46,071 INFO [train.py:901] (0/4) Epoch 11, batch 900, loss[loss=0.2192, simple_loss=0.2989, pruned_loss=0.06975, over 7150.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3123, pruned_loss=0.08157, over 1596655.94 frames. ], batch size: 71, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:58,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.543e+02 3.289e+02 4.286e+02 9.063e+02, threshold=6.577e+02, percent-clipped=7.0 +2023-02-06 09:51:18,647 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:20,048 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:21,850 INFO [train.py:901] (0/4) Epoch 11, batch 950, loss[loss=0.2275, simple_loss=0.2932, pruned_loss=0.08087, over 7432.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.312, pruned_loss=0.08122, over 1600370.74 frames. ], batch size: 17, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:51:22,935 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-06 09:51:24,920 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8955, 1.5760, 2.1936, 1.7641, 2.0069, 1.8277, 1.5073, 0.7126], + device='cuda:0'), covar=tensor([0.4079, 0.3828, 0.1153, 0.2338, 0.1621, 0.2163, 0.1622, 0.3739], + device='cuda:0'), in_proj_covar=tensor([0.0887, 0.0856, 0.0721, 0.0833, 0.0925, 0.0793, 0.0705, 0.0760], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 09:51:51,860 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 09:51:56,015 INFO [train.py:901] (0/4) Epoch 11, batch 1000, loss[loss=0.2061, simple_loss=0.2839, pruned_loss=0.06411, over 7653.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3128, pruned_loss=0.08137, over 1605844.64 frames. ], batch size: 19, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:52:07,439 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.713e+02 3.211e+02 4.023e+02 7.481e+02, threshold=6.422e+02, percent-clipped=3.0 +2023-02-06 09:52:08,383 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:27,199 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 09:52:27,401 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:31,964 INFO [train.py:901] (0/4) Epoch 11, batch 1050, loss[loss=0.3133, simple_loss=0.3568, pruned_loss=0.1349, over 6678.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3114, pruned_loss=0.08042, over 1609714.79 frames. ], batch size: 71, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:52:39,058 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 09:53:06,143 INFO [train.py:901] (0/4) Epoch 11, batch 1100, loss[loss=0.2669, simple_loss=0.3269, pruned_loss=0.1035, over 8192.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3119, pruned_loss=0.08083, over 1613969.38 frames. ], batch size: 23, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:18,511 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.436e+02 2.887e+02 3.709e+02 9.106e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 09:53:41,605 INFO [train.py:901] (0/4) Epoch 11, batch 1150, loss[loss=0.2167, simple_loss=0.2881, pruned_loss=0.07268, over 7646.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3109, pruned_loss=0.08024, over 1615644.75 frames. ], batch size: 19, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:51,768 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 09:53:55,964 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-82000.pt +2023-02-06 09:54:00,447 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:17,960 INFO [train.py:901] (0/4) Epoch 11, batch 1200, loss[loss=0.2178, simple_loss=0.3033, pruned_loss=0.06614, over 8337.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3108, pruned_loss=0.08064, over 1614758.98 frames. ], batch size: 25, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:54:18,717 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:18,842 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:20,232 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:29,502 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.664e+02 3.172e+02 3.772e+02 1.117e+03, threshold=6.345e+02, percent-clipped=5.0 +2023-02-06 09:54:36,802 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:38,208 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:47,997 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:48,015 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3485, 1.3826, 2.2564, 1.2201, 2.0749, 2.4523, 2.5937, 2.0715], + device='cuda:0'), covar=tensor([0.1009, 0.1182, 0.0488, 0.1880, 0.0658, 0.0371, 0.0584, 0.0746], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0298, 0.0256, 0.0288, 0.0274, 0.0233, 0.0333, 0.0290], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 09:54:53,469 INFO [train.py:901] (0/4) Epoch 11, batch 1250, loss[loss=0.2067, simple_loss=0.2684, pruned_loss=0.07245, over 7716.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3108, pruned_loss=0.08012, over 1613894.93 frames. ], batch size: 18, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:55:29,353 INFO [train.py:901] (0/4) Epoch 11, batch 1300, loss[loss=0.2273, simple_loss=0.3089, pruned_loss=0.07285, over 8197.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3111, pruned_loss=0.07999, over 1614838.10 frames. ], batch size: 23, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:55:40,430 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:55:40,863 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.583e+02 3.223e+02 4.179e+02 7.623e+02, threshold=6.447e+02, percent-clipped=2.0 +2023-02-06 09:56:03,693 INFO [train.py:901] (0/4) Epoch 11, batch 1350, loss[loss=0.2798, simple_loss=0.3573, pruned_loss=0.1011, over 8195.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3122, pruned_loss=0.081, over 1614478.37 frames. ], batch size: 23, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:04,909 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 09:56:38,846 INFO [train.py:901] (0/4) Epoch 11, batch 1400, loss[loss=0.2405, simple_loss=0.3061, pruned_loss=0.08743, over 7909.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3117, pruned_loss=0.08074, over 1615256.89 frames. ], batch size: 20, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:51,062 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.627e+02 3.119e+02 3.954e+02 1.224e+03, threshold=6.238e+02, percent-clipped=1.0 +2023-02-06 09:57:13,600 INFO [train.py:901] (0/4) Epoch 11, batch 1450, loss[loss=0.2332, simple_loss=0.3066, pruned_loss=0.07992, over 7525.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3125, pruned_loss=0.08094, over 1618297.50 frames. ], batch size: 18, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:57:17,215 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1760, 1.2562, 1.4431, 1.2245, 0.7990, 1.3232, 1.1951, 0.8945], + device='cuda:0'), covar=tensor([0.0581, 0.1272, 0.1791, 0.1410, 0.0617, 0.1579, 0.0755, 0.0740], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0153, 0.0195, 0.0158, 0.0105, 0.0165, 0.0118, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 09:57:27,725 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 09:57:48,796 INFO [train.py:901] (0/4) Epoch 11, batch 1500, loss[loss=0.2283, simple_loss=0.3041, pruned_loss=0.07623, over 8657.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3122, pruned_loss=0.08094, over 1623511.42 frames. ], batch size: 34, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:58:01,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.943e+02 2.743e+02 3.193e+02 4.270e+02 9.879e+02, threshold=6.387e+02, percent-clipped=7.0 +2023-02-06 09:58:02,225 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:10,585 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1328, 1.2314, 1.2084, 0.6699, 1.2382, 1.0014, 0.1030, 1.1489], + device='cuda:0'), covar=tensor([0.0237, 0.0210, 0.0190, 0.0293, 0.0208, 0.0573, 0.0465, 0.0201], + device='cuda:0'), in_proj_covar=tensor([0.0383, 0.0315, 0.0265, 0.0378, 0.0301, 0.0464, 0.0353, 0.0349], + device='cuda:0'), out_proj_covar=tensor([1.1029e-04, 8.8593e-05, 7.4803e-05, 1.0718e-04, 8.5846e-05, 1.4273e-04, + 1.0163e-04, 9.9819e-05], device='cuda:0') +2023-02-06 09:58:13,167 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:24,626 INFO [train.py:901] (0/4) Epoch 11, batch 1550, loss[loss=0.239, simple_loss=0.3185, pruned_loss=0.07972, over 8290.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3113, pruned_loss=0.08, over 1624717.91 frames. ], batch size: 23, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:58:39,991 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82403.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:50,112 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:57,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:59,757 INFO [train.py:901] (0/4) Epoch 11, batch 1600, loss[loss=0.1741, simple_loss=0.2502, pruned_loss=0.049, over 7264.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3103, pruned_loss=0.07995, over 1619932.67 frames. ], batch size: 16, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:13,003 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.328e+02 2.878e+02 3.468e+02 7.869e+02, threshold=5.757e+02, percent-clipped=2.0 +2023-02-06 09:59:24,144 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:59:26,842 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8420, 5.8263, 5.1546, 2.2266, 5.1966, 5.5753, 5.4814, 5.2886], + device='cuda:0'), covar=tensor([0.0473, 0.0429, 0.0813, 0.4562, 0.0577, 0.0841, 0.1068, 0.0533], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0375, 0.0384, 0.0482, 0.0377, 0.0368, 0.0369, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 09:59:36,415 INFO [train.py:901] (0/4) Epoch 11, batch 1650, loss[loss=0.21, simple_loss=0.2798, pruned_loss=0.07008, over 7824.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3119, pruned_loss=0.08114, over 1621159.96 frames. ], batch size: 20, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:37,935 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8159, 2.3639, 2.6931, 1.0929, 2.7229, 1.6321, 1.4171, 1.6673], + device='cuda:0'), covar=tensor([0.0755, 0.0291, 0.0248, 0.0619, 0.0300, 0.0693, 0.0778, 0.0450], + device='cuda:0'), in_proj_covar=tensor([0.0384, 0.0318, 0.0266, 0.0377, 0.0304, 0.0466, 0.0354, 0.0350], + device='cuda:0'), out_proj_covar=tensor([1.1041e-04, 8.9431e-05, 7.5022e-05, 1.0647e-04, 8.6759e-05, 1.4345e-04, + 1.0202e-04, 1.0024e-04], device='cuda:0') +2023-02-06 09:59:55,484 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 09:59:57,331 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82511.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:00:11,624 INFO [train.py:901] (0/4) Epoch 11, batch 1700, loss[loss=0.2442, simple_loss=0.3143, pruned_loss=0.08704, over 7969.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3109, pruned_loss=0.08097, over 1617810.07 frames. ], batch size: 21, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 10:00:12,460 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:00:23,208 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.517e+02 3.185e+02 4.066e+02 8.085e+02, threshold=6.370e+02, percent-clipped=5.0 +2023-02-06 10:00:35,356 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 10:00:47,536 INFO [train.py:901] (0/4) Epoch 11, batch 1750, loss[loss=0.2988, simple_loss=0.3554, pruned_loss=0.1211, over 7519.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3119, pruned_loss=0.08175, over 1614569.54 frames. ], batch size: 73, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:00:52,560 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:01:23,327 INFO [train.py:901] (0/4) Epoch 11, batch 1800, loss[loss=0.2166, simple_loss=0.2857, pruned_loss=0.07378, over 7982.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3126, pruned_loss=0.08211, over 1617419.21 frames. ], batch size: 21, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:01:35,825 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.598e+02 3.107e+02 4.193e+02 1.199e+03, threshold=6.213e+02, percent-clipped=8.0 +2023-02-06 10:01:38,054 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7403, 1.5265, 1.7221, 1.5755, 1.1254, 1.6938, 2.2783, 1.9811], + device='cuda:0'), covar=tensor([0.0481, 0.1334, 0.1787, 0.1468, 0.0651, 0.1551, 0.0674, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0155, 0.0198, 0.0160, 0.0106, 0.0166, 0.0119, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:01:46,763 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5942, 1.4966, 1.5998, 1.5445, 0.9438, 1.5248, 1.5397, 1.3168], + device='cuda:0'), covar=tensor([0.0494, 0.1081, 0.1617, 0.1249, 0.0553, 0.1311, 0.0645, 0.0632], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0154, 0.0197, 0.0160, 0.0106, 0.0165, 0.0119, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:01:58,587 INFO [train.py:901] (0/4) Epoch 11, batch 1850, loss[loss=0.2351, simple_loss=0.3148, pruned_loss=0.07771, over 8478.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3119, pruned_loss=0.08158, over 1617211.83 frames. ], batch size: 27, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:03,664 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7711, 2.3617, 1.8110, 2.2017, 2.0816, 1.6667, 2.0122, 2.1947], + device='cuda:0'), covar=tensor([0.1001, 0.0269, 0.0822, 0.0446, 0.0509, 0.1021, 0.0651, 0.0628], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0231, 0.0311, 0.0297, 0.0300, 0.0322, 0.0339, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:02:18,370 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:26,580 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:34,008 INFO [train.py:901] (0/4) Epoch 11, batch 1900, loss[loss=0.2522, simple_loss=0.3281, pruned_loss=0.08818, over 8292.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3116, pruned_loss=0.08115, over 1614689.26 frames. ], batch size: 23, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:43,683 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:45,529 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.442e+02 3.142e+02 3.936e+02 6.780e+02, threshold=6.284e+02, percent-clipped=1.0 +2023-02-06 10:03:08,879 INFO [train.py:901] (0/4) Epoch 11, batch 1950, loss[loss=0.2233, simple_loss=0.311, pruned_loss=0.06778, over 8453.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3112, pruned_loss=0.08114, over 1610964.71 frames. ], batch size: 25, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:03:12,343 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 10:03:13,856 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:17,446 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 10:03:26,482 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 10:03:32,226 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:39,063 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:39,129 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6467, 1.8710, 2.9829, 1.3653, 2.2699, 1.9807, 1.6678, 2.1301], + device='cuda:0'), covar=tensor([0.1472, 0.2164, 0.0578, 0.3580, 0.1376, 0.2453, 0.1683, 0.1961], + device='cuda:0'), in_proj_covar=tensor([0.0481, 0.0512, 0.0534, 0.0575, 0.0614, 0.0546, 0.0469, 0.0609], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:03:45,036 INFO [train.py:901] (0/4) Epoch 11, batch 2000, loss[loss=0.2076, simple_loss=0.2747, pruned_loss=0.07022, over 7635.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.311, pruned_loss=0.08092, over 1608719.47 frames. ], batch size: 19, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:03:47,025 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 10:03:56,665 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.675e+02 3.279e+02 3.987e+02 1.082e+03, threshold=6.559e+02, percent-clipped=7.0 +2023-02-06 10:04:01,551 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82855.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:04:19,321 INFO [train.py:901] (0/4) Epoch 11, batch 2050, loss[loss=0.2218, simple_loss=0.3118, pruned_loss=0.06587, over 8334.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3115, pruned_loss=0.08086, over 1613148.71 frames. ], batch size: 26, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:27,382 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0123, 6.1113, 5.1724, 2.5641, 5.3942, 5.8027, 5.5809, 5.4182], + device='cuda:0'), covar=tensor([0.0560, 0.0374, 0.0825, 0.4793, 0.0667, 0.0568, 0.0969, 0.0553], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0375, 0.0383, 0.0483, 0.0375, 0.0366, 0.0370, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:04:55,307 INFO [train.py:901] (0/4) Epoch 11, batch 2100, loss[loss=0.2359, simple_loss=0.3062, pruned_loss=0.08281, over 8236.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3108, pruned_loss=0.08049, over 1612674.65 frames. ], batch size: 22, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,375 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:07,190 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.489e+02 3.174e+02 3.706e+02 9.083e+02, threshold=6.348e+02, percent-clipped=2.0 +2023-02-06 10:05:13,279 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0114, 1.4523, 1.6101, 1.3097, 0.8966, 1.5101, 1.7381, 1.6392], + device='cuda:0'), covar=tensor([0.0486, 0.1249, 0.1609, 0.1361, 0.0608, 0.1471, 0.0696, 0.0549], + device='cuda:0'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0160, 0.0105, 0.0166, 0.0118, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:05:22,081 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82970.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:05:29,198 INFO [train.py:901] (0/4) Epoch 11, batch 2150, loss[loss=0.2662, simple_loss=0.3403, pruned_loss=0.09606, over 8360.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3119, pruned_loss=0.08088, over 1613028.02 frames. ], batch size: 24, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:05:29,366 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82981.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:49,411 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 10:06:04,060 INFO [train.py:901] (0/4) Epoch 11, batch 2200, loss[loss=0.2374, simple_loss=0.3079, pruned_loss=0.08341, over 7803.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3102, pruned_loss=0.08022, over 1608784.25 frames. ], batch size: 20, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:06:15,765 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:16,954 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.510e+02 3.092e+02 4.104e+02 1.639e+03, threshold=6.185e+02, percent-clipped=4.0 +2023-02-06 10:06:38,935 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:40,019 INFO [train.py:901] (0/4) Epoch 11, batch 2250, loss[loss=0.2464, simple_loss=0.3251, pruned_loss=0.08379, over 7919.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3101, pruned_loss=0.0804, over 1605172.93 frames. ], batch size: 20, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:06:46,975 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4926, 2.6059, 1.7956, 2.1851, 2.0849, 1.4930, 2.1856, 2.2330], + device='cuda:0'), covar=tensor([0.1518, 0.0408, 0.1167, 0.0724, 0.0753, 0.1460, 0.0927, 0.0983], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0236, 0.0315, 0.0297, 0.0304, 0.0326, 0.0341, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:06:49,943 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 10:06:55,778 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:07:14,108 INFO [train.py:901] (0/4) Epoch 11, batch 2300, loss[loss=0.2254, simple_loss=0.286, pruned_loss=0.08243, over 7433.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3115, pruned_loss=0.08098, over 1611182.52 frames. ], batch size: 17, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:07:25,656 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.480e+02 3.199e+02 4.275e+02 9.806e+02, threshold=6.398e+02, percent-clipped=6.0 +2023-02-06 10:07:48,922 INFO [train.py:901] (0/4) Epoch 11, batch 2350, loss[loss=0.2477, simple_loss=0.3228, pruned_loss=0.08626, over 8485.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3121, pruned_loss=0.08125, over 1615122.34 frames. ], batch size: 29, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:12,070 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:08:20,033 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83226.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:08:23,038 INFO [train.py:901] (0/4) Epoch 11, batch 2400, loss[loss=0.2502, simple_loss=0.3182, pruned_loss=0.0911, over 8365.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3123, pruned_loss=0.08095, over 1618669.69 frames. ], batch size: 24, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:35,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.359e+02 2.853e+02 3.666e+02 7.740e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-06 10:08:37,251 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83251.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:08:58,645 INFO [train.py:901] (0/4) Epoch 11, batch 2450, loss[loss=0.2393, simple_loss=0.3083, pruned_loss=0.08513, over 7942.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3107, pruned_loss=0.07961, over 1618729.32 frames. ], batch size: 20, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:09:06,343 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6355, 2.1577, 4.3528, 1.3459, 2.9724, 2.2010, 1.5621, 2.7127], + device='cuda:0'), covar=tensor([0.1742, 0.2366, 0.0781, 0.4070, 0.1663, 0.2803, 0.2015, 0.2450], + device='cuda:0'), in_proj_covar=tensor([0.0481, 0.0507, 0.0532, 0.0569, 0.0611, 0.0543, 0.0464, 0.0603], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:09:13,729 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:13,795 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:29,007 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:30,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:32,764 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 10:09:33,050 INFO [train.py:901] (0/4) Epoch 11, batch 2500, loss[loss=0.261, simple_loss=0.3332, pruned_loss=0.09436, over 8336.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3107, pruned_loss=0.07979, over 1623750.39 frames. ], batch size: 26, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:09:44,607 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.634e+02 3.143e+02 3.904e+02 7.323e+02, threshold=6.285e+02, percent-clipped=4.0 +2023-02-06 10:10:07,382 INFO [train.py:901] (0/4) Epoch 11, batch 2550, loss[loss=0.1829, simple_loss=0.2647, pruned_loss=0.05056, over 7923.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.311, pruned_loss=0.08024, over 1621693.66 frames. ], batch size: 20, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:28,997 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5281, 1.4701, 2.7830, 1.2972, 1.8806, 2.9557, 3.1017, 2.5180], + device='cuda:0'), covar=tensor([0.1107, 0.1456, 0.0390, 0.1974, 0.0964, 0.0315, 0.0453, 0.0678], + device='cuda:0'), in_proj_covar=tensor([0.0259, 0.0297, 0.0254, 0.0288, 0.0271, 0.0234, 0.0333, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 10:10:43,102 INFO [train.py:901] (0/4) Epoch 11, batch 2600, loss[loss=0.2461, simple_loss=0.3236, pruned_loss=0.08428, over 8348.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3115, pruned_loss=0.081, over 1615320.44 frames. ], batch size: 24, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:48,776 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1694, 1.4787, 4.3257, 1.9869, 2.4853, 5.0331, 4.9988, 4.3735], + device='cuda:0'), covar=tensor([0.1017, 0.1648, 0.0254, 0.1818, 0.0956, 0.0144, 0.0260, 0.0496], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0295, 0.0253, 0.0286, 0.0268, 0.0232, 0.0331, 0.0285], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:0') +2023-02-06 10:10:49,493 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:10:54,710 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.607e+02 3.192e+02 4.372e+02 8.439e+02, threshold=6.384e+02, percent-clipped=10.0 +2023-02-06 10:11:17,502 INFO [train.py:901] (0/4) Epoch 11, batch 2650, loss[loss=0.2124, simple_loss=0.2874, pruned_loss=0.0687, over 8546.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3112, pruned_loss=0.08104, over 1616756.18 frames. ], batch size: 31, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:11:52,414 INFO [train.py:901] (0/4) Epoch 11, batch 2700, loss[loss=0.2859, simple_loss=0.3399, pruned_loss=0.1159, over 8687.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3127, pruned_loss=0.08189, over 1620349.64 frames. ], batch size: 49, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:12:04,665 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.578e+02 3.131e+02 4.095e+02 6.916e+02, threshold=6.263e+02, percent-clipped=2.0 +2023-02-06 10:12:11,619 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:12:18,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 10:12:27,317 INFO [train.py:901] (0/4) Epoch 11, batch 2750, loss[loss=0.2336, simple_loss=0.3214, pruned_loss=0.07288, over 8259.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3115, pruned_loss=0.08095, over 1619557.13 frames. ], batch size: 24, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:03,328 INFO [train.py:901] (0/4) Epoch 11, batch 2800, loss[loss=0.2446, simple_loss=0.3213, pruned_loss=0.08399, over 8483.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3128, pruned_loss=0.08205, over 1620659.66 frames. ], batch size: 28, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:10,443 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8195, 1.4313, 1.6903, 1.3551, 0.9673, 1.4979, 1.5840, 1.4007], + device='cuda:0'), covar=tensor([0.0523, 0.1247, 0.1610, 0.1367, 0.0598, 0.1473, 0.0701, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0154, 0.0194, 0.0159, 0.0105, 0.0163, 0.0117, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:13:13,810 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:15,057 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.535e+02 3.136e+02 3.769e+02 1.201e+03, threshold=6.273e+02, percent-clipped=3.0 +2023-02-06 10:13:32,689 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:37,608 INFO [train.py:901] (0/4) Epoch 11, batch 2850, loss[loss=0.2639, simple_loss=0.3271, pruned_loss=0.1004, over 7235.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3126, pruned_loss=0.082, over 1621803.42 frames. ], batch size: 16, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:47,886 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83696.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:04,215 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1178, 2.4948, 1.8387, 3.0949, 1.4516, 1.5489, 1.9866, 2.4933], + device='cuda:0'), covar=tensor([0.0820, 0.0767, 0.1101, 0.0358, 0.1238, 0.1611, 0.1161, 0.0864], + device='cuda:0'), in_proj_covar=tensor([0.0241, 0.0217, 0.0256, 0.0218, 0.0217, 0.0254, 0.0259, 0.0226], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 10:14:05,191 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 10:14:05,621 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:12,967 INFO [train.py:901] (0/4) Epoch 11, batch 2900, loss[loss=0.1889, simple_loss=0.2587, pruned_loss=0.05952, over 7726.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3126, pruned_loss=0.08202, over 1620948.22 frames. ], batch size: 18, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:14:15,839 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7728, 1.4085, 1.6571, 1.3134, 1.0185, 1.4624, 1.6845, 1.4526], + device='cuda:0'), covar=tensor([0.0495, 0.1203, 0.1569, 0.1338, 0.0561, 0.1465, 0.0623, 0.0627], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0153, 0.0194, 0.0158, 0.0105, 0.0163, 0.0116, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:14:25,266 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.545e+02 3.159e+02 4.165e+02 9.643e+02, threshold=6.318e+02, percent-clipped=5.0 +2023-02-06 10:14:34,258 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:46,310 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:48,176 INFO [train.py:901] (0/4) Epoch 11, batch 2950, loss[loss=0.2471, simple_loss=0.3261, pruned_loss=0.08409, over 8476.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3126, pruned_loss=0.0815, over 1619770.27 frames. ], batch size: 29, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:14:53,615 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 10:15:09,068 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 10:15:18,759 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-06 10:15:22,306 INFO [train.py:901] (0/4) Epoch 11, batch 3000, loss[loss=0.2439, simple_loss=0.3127, pruned_loss=0.08751, over 8028.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3131, pruned_loss=0.08196, over 1619704.62 frames. ], batch size: 22, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:15:22,306 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 10:15:34,552 INFO [train.py:935] (0/4) Epoch 11, validation: loss=0.1889, simple_loss=0.2886, pruned_loss=0.04461, over 944034.00 frames. +2023-02-06 10:15:34,552 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 10:15:46,617 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.511e+02 2.977e+02 3.600e+02 5.313e+02, threshold=5.953e+02, percent-clipped=0.0 +2023-02-06 10:16:10,350 INFO [train.py:901] (0/4) Epoch 11, batch 3050, loss[loss=0.2301, simple_loss=0.2997, pruned_loss=0.08022, over 8232.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3137, pruned_loss=0.08192, over 1618623.30 frames. ], batch size: 22, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:17,990 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.34 vs. limit=5.0 +2023-02-06 10:16:40,426 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3306, 1.9166, 2.9913, 2.3568, 2.5932, 2.1747, 1.7031, 1.3285], + device='cuda:0'), covar=tensor([0.3965, 0.4296, 0.1221, 0.2572, 0.2053, 0.2229, 0.1671, 0.4421], + device='cuda:0'), in_proj_covar=tensor([0.0887, 0.0865, 0.0727, 0.0837, 0.0938, 0.0796, 0.0703, 0.0766], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:16:43,140 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:16:44,274 INFO [train.py:901] (0/4) Epoch 11, batch 3100, loss[loss=0.2248, simple_loss=0.304, pruned_loss=0.07284, over 8247.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3123, pruned_loss=0.0809, over 1617477.03 frames. ], batch size: 22, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:45,446 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-06 10:16:55,423 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 2.748e+02 3.262e+02 3.755e+02 7.942e+02, threshold=6.525e+02, percent-clipped=1.0 +2023-02-06 10:17:00,084 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:08,761 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:18,436 INFO [train.py:901] (0/4) Epoch 11, batch 3150, loss[loss=0.2024, simple_loss=0.2631, pruned_loss=0.07089, over 7220.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3121, pruned_loss=0.08104, over 1613485.68 frames. ], batch size: 16, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:17:31,199 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-84000.pt +2023-02-06 10:17:34,193 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:44,259 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:53,249 INFO [train.py:901] (0/4) Epoch 11, batch 3200, loss[loss=0.2033, simple_loss=0.2729, pruned_loss=0.06685, over 7723.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3123, pruned_loss=0.08145, over 1615310.37 frames. ], batch size: 18, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:01,369 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:05,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.726e+02 3.369e+02 4.220e+02 9.302e+02, threshold=6.739e+02, percent-clipped=4.0 +2023-02-06 10:18:27,179 INFO [train.py:901] (0/4) Epoch 11, batch 3250, loss[loss=0.245, simple_loss=0.3194, pruned_loss=0.08535, over 7442.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3126, pruned_loss=0.08167, over 1614302.29 frames. ], batch size: 17, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:50,412 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:53,044 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6924, 4.5378, 4.1167, 2.0301, 4.0614, 4.2746, 4.3091, 3.8789], + device='cuda:0'), covar=tensor([0.0682, 0.0601, 0.1026, 0.4908, 0.0836, 0.0797, 0.1087, 0.0820], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0375, 0.0380, 0.0482, 0.0377, 0.0372, 0.0375, 0.0330], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:18:55,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:19:01,828 INFO [train.py:901] (0/4) Epoch 11, batch 3300, loss[loss=0.2058, simple_loss=0.2828, pruned_loss=0.06439, over 7817.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3119, pruned_loss=0.08119, over 1615779.69 frames. ], batch size: 20, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:13,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.729e+02 3.101e+02 4.103e+02 8.191e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 10:19:35,412 INFO [train.py:901] (0/4) Epoch 11, batch 3350, loss[loss=0.2503, simple_loss=0.3185, pruned_loss=0.09108, over 8238.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3116, pruned_loss=0.08163, over 1612848.75 frames. ], batch size: 22, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:41,345 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 10:20:01,008 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:10,209 INFO [train.py:901] (0/4) Epoch 11, batch 3400, loss[loss=0.2279, simple_loss=0.3085, pruned_loss=0.07372, over 8315.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3106, pruned_loss=0.08087, over 1610268.89 frames. ], batch size: 26, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:20:10,593 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 10:20:15,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:15,907 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8781, 1.3001, 1.4707, 1.2026, 0.9315, 1.3322, 1.6306, 1.4552], + device='cuda:0'), covar=tensor([0.0505, 0.1242, 0.1735, 0.1485, 0.0590, 0.1507, 0.0695, 0.0619], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0153, 0.0190, 0.0157, 0.0104, 0.0163, 0.0116, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:0') +2023-02-06 10:20:22,012 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8221, 2.0202, 2.0279, 1.6168, 2.1082, 1.7181, 1.1872, 1.8137], + device='cuda:0'), covar=tensor([0.0311, 0.0187, 0.0129, 0.0261, 0.0224, 0.0394, 0.0395, 0.0167], + device='cuda:0'), in_proj_covar=tensor([0.0389, 0.0323, 0.0267, 0.0386, 0.0309, 0.0471, 0.0358, 0.0352], + device='cuda:0'), out_proj_covar=tensor([1.1151e-04, 9.0463e-05, 7.4865e-05, 1.0915e-04, 8.7875e-05, 1.4464e-04, + 1.0276e-04, 1.0047e-04], device='cuda:0') +2023-02-06 10:20:23,163 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 2.553e+02 3.068e+02 3.977e+02 7.727e+02, threshold=6.137e+02, percent-clipped=2.0 +2023-02-06 10:20:26,648 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:45,375 INFO [train.py:901] (0/4) Epoch 11, batch 3450, loss[loss=0.2274, simple_loss=0.3095, pruned_loss=0.07265, over 8360.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3123, pruned_loss=0.08161, over 1612176.11 frames. ], batch size: 24, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:06,413 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:20,272 INFO [train.py:901] (0/4) Epoch 11, batch 3500, loss[loss=0.2689, simple_loss=0.334, pruned_loss=0.1019, over 8660.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3135, pruned_loss=0.08219, over 1615231.40 frames. ], batch size: 34, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:31,064 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:32,263 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.912e+02 2.703e+02 3.166e+02 4.187e+02 8.001e+02, threshold=6.332e+02, percent-clipped=6.0 +2023-02-06 10:21:36,444 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:41,538 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 10:21:48,760 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 10:21:54,128 INFO [train.py:901] (0/4) Epoch 11, batch 3550, loss[loss=0.181, simple_loss=0.2608, pruned_loss=0.05063, over 7803.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3119, pruned_loss=0.08098, over 1614284.47 frames. ], batch size: 19, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:25,834 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:28,976 INFO [train.py:901] (0/4) Epoch 11, batch 3600, loss[loss=0.3101, simple_loss=0.3645, pruned_loss=0.1278, over 8326.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3125, pruned_loss=0.08162, over 1614245.14 frames. ], batch size: 26, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:41,783 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.788e+02 3.447e+02 4.179e+02 1.001e+03, threshold=6.895e+02, percent-clipped=4.0 +2023-02-06 10:22:48,586 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:50,733 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:01,848 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8936, 2.6866, 3.4361, 2.1220, 1.7251, 3.4832, 0.4388, 2.0638], + device='cuda:0'), covar=tensor([0.2358, 0.1570, 0.0358, 0.2596, 0.4679, 0.0546, 0.4283, 0.2125], + device='cuda:0'), in_proj_covar=tensor([0.0163, 0.0165, 0.0097, 0.0212, 0.0252, 0.0104, 0.0165, 0.0165], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:23:03,524 INFO [train.py:901] (0/4) Epoch 11, batch 3650, loss[loss=0.206, simple_loss=0.2724, pruned_loss=0.06983, over 7539.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3122, pruned_loss=0.08146, over 1613420.08 frames. ], batch size: 18, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:23:11,594 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84493.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:28,880 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84518.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:37,222 INFO [train.py:901] (0/4) Epoch 11, batch 3700, loss[loss=0.2199, simple_loss=0.3067, pruned_loss=0.06661, over 8189.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3123, pruned_loss=0.08141, over 1618018.94 frames. ], batch size: 23, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:23:46,128 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:48,580 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:23:49,862 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.648e+02 3.219e+02 3.938e+02 7.332e+02, threshold=6.437e+02, percent-clipped=1.0 +2023-02-06 10:23:56,591 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8158, 2.0653, 2.3160, 1.1376, 2.3625, 1.6202, 0.7484, 1.8873], + device='cuda:0'), covar=tensor([0.0401, 0.0258, 0.0182, 0.0417, 0.0248, 0.0570, 0.0554, 0.0213], + device='cuda:0'), in_proj_covar=tensor([0.0381, 0.0318, 0.0265, 0.0378, 0.0304, 0.0466, 0.0354, 0.0346], + device='cuda:0'), out_proj_covar=tensor([1.0910e-04, 8.9188e-05, 7.3993e-05, 1.0689e-04, 8.6500e-05, 1.4288e-04, + 1.0138e-04, 9.8619e-05], device='cuda:0') +2023-02-06 10:23:58,476 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:07,283 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:11,838 INFO [train.py:901] (0/4) Epoch 11, batch 3750, loss[loss=0.2516, simple_loss=0.3259, pruned_loss=0.08861, over 8344.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3118, pruned_loss=0.08139, over 1614182.63 frames. ], batch size: 26, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:23,981 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:46,862 INFO [train.py:901] (0/4) Epoch 11, batch 3800, loss[loss=0.2317, simple_loss=0.3063, pruned_loss=0.07854, over 8111.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3115, pruned_loss=0.08165, over 1609319.03 frames. ], batch size: 23, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:58,773 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.607e+02 3.118e+02 4.251e+02 1.041e+03, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:25:00,927 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:02,488 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-06 10:25:18,150 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:21,362 INFO [train.py:901] (0/4) Epoch 11, batch 3850, loss[loss=0.2399, simple_loss=0.3106, pruned_loss=0.08456, over 7799.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3124, pruned_loss=0.08218, over 1610235.32 frames. ], batch size: 20, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:25:22,271 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:30,954 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3011, 1.7133, 1.7306, 1.5565, 1.0600, 1.4520, 1.8830, 1.5660], + device='cuda:0'), covar=tensor([0.0454, 0.1180, 0.1666, 0.1308, 0.0563, 0.1505, 0.0688, 0.0636], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0193, 0.0160, 0.0105, 0.0165, 0.0118, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:25:32,800 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:39,737 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:43,775 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:47,051 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:51,579 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 10:25:55,494 INFO [train.py:901] (0/4) Epoch 11, batch 3900, loss[loss=0.2503, simple_loss=0.3213, pruned_loss=0.08966, over 8083.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3122, pruned_loss=0.08207, over 1608468.41 frames. ], batch size: 21, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:03,705 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:08,296 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.619e+02 3.238e+02 3.926e+02 9.069e+02, threshold=6.476e+02, percent-clipped=5.0 +2023-02-06 10:26:30,326 INFO [train.py:901] (0/4) Epoch 11, batch 3950, loss[loss=0.2534, simple_loss=0.3342, pruned_loss=0.08631, over 8283.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3118, pruned_loss=0.08078, over 1609469.26 frames. ], batch size: 23, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:34,004 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6563, 2.2653, 3.7606, 2.7727, 3.1596, 2.2636, 1.9069, 1.6607], + device='cuda:0'), covar=tensor([0.3525, 0.3928, 0.0952, 0.2371, 0.1839, 0.2089, 0.1628, 0.4391], + device='cuda:0'), in_proj_covar=tensor([0.0882, 0.0864, 0.0729, 0.0835, 0.0932, 0.0794, 0.0703, 0.0762], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:26:52,760 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:56,203 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84818.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:04,808 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:05,277 INFO [train.py:901] (0/4) Epoch 11, batch 4000, loss[loss=0.2373, simple_loss=0.3244, pruned_loss=0.07504, over 8323.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3121, pruned_loss=0.08084, over 1612059.96 frames. ], batch size: 25, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:17,173 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.608e+02 2.990e+02 3.694e+02 8.393e+02, threshold=5.981e+02, percent-clipped=2.0 +2023-02-06 10:27:21,509 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:33,030 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5342, 4.4462, 3.9640, 2.1199, 3.9372, 4.0206, 4.1814, 3.7715], + device='cuda:0'), covar=tensor([0.0661, 0.0513, 0.0846, 0.4602, 0.0729, 0.0902, 0.0986, 0.0809], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0368, 0.0375, 0.0479, 0.0370, 0.0372, 0.0369, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:27:34,470 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6447, 1.6766, 4.3334, 2.0718, 2.4114, 5.0786, 4.9867, 4.2216], + device='cuda:0'), covar=tensor([0.0968, 0.1718, 0.0312, 0.1842, 0.1140, 0.0162, 0.0351, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0264, 0.0300, 0.0259, 0.0292, 0.0272, 0.0239, 0.0340, 0.0294], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:27:39,661 INFO [train.py:901] (0/4) Epoch 11, batch 4050, loss[loss=0.2329, simple_loss=0.3035, pruned_loss=0.08114, over 7639.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3121, pruned_loss=0.08095, over 1610902.88 frames. ], batch size: 19, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:43,785 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:07,229 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 10:28:15,558 INFO [train.py:901] (0/4) Epoch 11, batch 4100, loss[loss=0.1992, simple_loss=0.2796, pruned_loss=0.0594, over 8090.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3111, pruned_loss=0.08021, over 1612737.88 frames. ], batch size: 21, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:28:16,473 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:27,741 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.518e+02 2.978e+02 3.788e+02 7.594e+02, threshold=5.956e+02, percent-clipped=4.0 +2023-02-06 10:28:33,379 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,349 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,430 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:49,615 INFO [train.py:901] (0/4) Epoch 11, batch 4150, loss[loss=0.2599, simple_loss=0.3272, pruned_loss=0.09631, over 8248.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3126, pruned_loss=0.0812, over 1614025.51 frames. ], batch size: 24, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:28:58,535 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:59,078 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84995.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:04,354 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:11,613 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9971, 3.8820, 2.3073, 2.6271, 2.6639, 1.8203, 2.7594, 2.9552], + device='cuda:0'), covar=tensor([0.1609, 0.0290, 0.0997, 0.0780, 0.0722, 0.1353, 0.1050, 0.1009], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0236, 0.0316, 0.0294, 0.0302, 0.0323, 0.0342, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:29:23,829 INFO [train.py:901] (0/4) Epoch 11, batch 4200, loss[loss=0.278, simple_loss=0.3503, pruned_loss=0.1029, over 8251.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3115, pruned_loss=0.08109, over 1611918.71 frames. ], batch size: 24, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:29:36,444 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.581e+02 3.261e+02 3.967e+02 9.417e+02, threshold=6.523e+02, percent-clipped=7.0 +2023-02-06 10:29:47,908 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 10:29:50,128 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:53,724 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 10:29:58,024 INFO [train.py:901] (0/4) Epoch 11, batch 4250, loss[loss=0.2714, simple_loss=0.3376, pruned_loss=0.1026, over 8523.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3114, pruned_loss=0.0807, over 1614353.13 frames. ], batch size: 28, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:06,835 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:10,052 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 10:30:18,058 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:28,605 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:32,495 INFO [train.py:901] (0/4) Epoch 11, batch 4300, loss[loss=0.2143, simple_loss=0.3075, pruned_loss=0.06055, over 8613.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3124, pruned_loss=0.0814, over 1612991.86 frames. ], batch size: 34, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:33,952 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:45,173 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.616e+02 3.014e+02 4.154e+02 7.931e+02, threshold=6.027e+02, percent-clipped=5.0 +2023-02-06 10:30:54,016 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:31:06,959 INFO [train.py:901] (0/4) Epoch 11, batch 4350, loss[loss=0.1886, simple_loss=0.2651, pruned_loss=0.05604, over 7538.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3122, pruned_loss=0.08155, over 1611356.15 frames. ], batch size: 18, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:31:13,424 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 10:31:29,831 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6934, 2.0098, 2.2576, 1.2433, 2.3079, 1.4977, 0.7054, 1.9039], + device='cuda:0'), covar=tensor([0.0471, 0.0237, 0.0163, 0.0418, 0.0257, 0.0613, 0.0641, 0.0199], + device='cuda:0'), in_proj_covar=tensor([0.0391, 0.0325, 0.0271, 0.0382, 0.0311, 0.0475, 0.0359, 0.0354], + device='cuda:0'), out_proj_covar=tensor([1.1184e-04, 9.0938e-05, 7.5806e-05, 1.0777e-04, 8.8630e-05, 1.4564e-04, + 1.0287e-04, 1.0083e-04], device='cuda:0') +2023-02-06 10:31:31,294 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 10:31:40,295 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 10:31:41,573 INFO [train.py:901] (0/4) Epoch 11, batch 4400, loss[loss=0.2969, simple_loss=0.3551, pruned_loss=0.1193, over 8363.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3097, pruned_loss=0.08004, over 1605080.14 frames. ], batch size: 24, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:31:44,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2727, 2.1583, 1.5147, 1.8980, 1.6185, 1.3316, 1.5104, 1.5701], + device='cuda:0'), covar=tensor([0.1203, 0.0369, 0.1142, 0.0564, 0.0775, 0.1397, 0.1053, 0.0792], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0237, 0.0318, 0.0296, 0.0301, 0.0324, 0.0342, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:31:54,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.553e+02 3.172e+02 3.669e+02 6.483e+02, threshold=6.345e+02, percent-clipped=4.0 +2023-02-06 10:32:01,425 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:14,066 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:16,484 INFO [train.py:901] (0/4) Epoch 11, batch 4450, loss[loss=0.2643, simple_loss=0.3303, pruned_loss=0.09918, over 8124.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3093, pruned_loss=0.08007, over 1601776.28 frames. ], batch size: 22, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:32:18,774 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:22,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0673, 2.3304, 1.9517, 2.9247, 1.4587, 1.6318, 1.9542, 2.4847], + device='cuda:0'), covar=tensor([0.0824, 0.0917, 0.1018, 0.0385, 0.1150, 0.1548, 0.1094, 0.0843], + device='cuda:0'), in_proj_covar=tensor([0.0240, 0.0218, 0.0260, 0.0221, 0.0220, 0.0258, 0.0258, 0.0225], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 10:32:22,675 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 10:32:38,766 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:39,591 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4147, 2.0651, 3.0719, 2.4443, 2.6489, 2.1645, 1.7586, 1.5140], + device='cuda:0'), covar=tensor([0.3408, 0.3628, 0.1091, 0.2245, 0.1780, 0.1998, 0.1580, 0.3845], + device='cuda:0'), in_proj_covar=tensor([0.0871, 0.0854, 0.0727, 0.0822, 0.0922, 0.0786, 0.0695, 0.0760], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:32:50,703 INFO [train.py:901] (0/4) Epoch 11, batch 4500, loss[loss=0.2672, simple_loss=0.3247, pruned_loss=0.1048, over 8137.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3076, pruned_loss=0.07907, over 1604725.03 frames. ], batch size: 22, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:32:51,285 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 10:32:57,950 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.39 vs. limit=5.0 +2023-02-06 10:33:03,399 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.629e+02 3.227e+02 4.085e+02 1.162e+03, threshold=6.455e+02, percent-clipped=2.0 +2023-02-06 10:33:15,846 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 10:33:16,046 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:26,537 INFO [train.py:901] (0/4) Epoch 11, batch 4550, loss[loss=0.2357, simple_loss=0.318, pruned_loss=0.07666, over 7919.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3105, pruned_loss=0.08051, over 1608267.27 frames. ], batch size: 20, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:33,510 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:43,813 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4050, 2.5262, 1.7865, 2.1439, 2.0203, 1.4999, 1.9208, 1.9486], + device='cuda:0'), covar=tensor([0.1160, 0.0324, 0.1010, 0.0522, 0.0615, 0.1288, 0.0824, 0.0801], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0240, 0.0322, 0.0300, 0.0307, 0.0328, 0.0347, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:33:47,180 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9807, 1.6776, 1.3959, 1.6015, 1.3904, 1.2169, 1.3133, 1.3100], + device='cuda:0'), covar=tensor([0.1015, 0.0452, 0.1072, 0.0503, 0.0620, 0.1339, 0.0769, 0.0731], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0239, 0.0321, 0.0299, 0.0306, 0.0327, 0.0346, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:33:59,380 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:01,129 INFO [train.py:901] (0/4) Epoch 11, batch 4600, loss[loss=0.2007, simple_loss=0.2781, pruned_loss=0.06162, over 7709.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3105, pruned_loss=0.08066, over 1609691.18 frames. ], batch size: 18, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:34:11,933 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:13,797 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.573e+02 3.214e+02 4.149e+02 1.527e+03, threshold=6.427e+02, percent-clipped=2.0 +2023-02-06 10:34:27,368 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:33,586 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:36,253 INFO [train.py:901] (0/4) Epoch 11, batch 4650, loss[loss=0.3013, simple_loss=0.3599, pruned_loss=0.1214, over 7195.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3099, pruned_loss=0.08076, over 1607205.74 frames. ], batch size: 72, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:34:50,386 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:11,112 INFO [train.py:901] (0/4) Epoch 11, batch 4700, loss[loss=0.1949, simple_loss=0.2747, pruned_loss=0.05749, over 7547.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3106, pruned_loss=0.08093, over 1603933.34 frames. ], batch size: 18, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:12,705 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:19,223 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1971, 1.9250, 2.0328, 1.8869, 1.3258, 1.9237, 2.6764, 2.7243], + device='cuda:0'), covar=tensor([0.0405, 0.1155, 0.1576, 0.1245, 0.0547, 0.1343, 0.0552, 0.0478], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0192, 0.0157, 0.0103, 0.0162, 0.0117, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:35:22,553 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:23,046 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 2.022e+02 2.812e+02 3.491e+02 4.674e+02 1.006e+03, threshold=6.983e+02, percent-clipped=9.0 +2023-02-06 10:35:30,010 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:32,597 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7517, 1.7828, 3.3514, 1.4753, 2.2047, 3.5940, 3.6850, 3.0953], + device='cuda:0'), covar=tensor([0.1045, 0.1377, 0.0302, 0.1861, 0.0842, 0.0217, 0.0403, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0259, 0.0293, 0.0254, 0.0286, 0.0267, 0.0233, 0.0334, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 10:35:39,375 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5408, 1.9716, 3.4109, 1.2540, 2.4659, 1.9012, 1.6131, 2.3666], + device='cuda:0'), covar=tensor([0.1552, 0.2044, 0.0619, 0.3616, 0.1421, 0.2655, 0.1746, 0.1946], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0517, 0.0531, 0.0578, 0.0617, 0.0554, 0.0473, 0.0610], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:35:45,762 INFO [train.py:901] (0/4) Epoch 11, batch 4750, loss[loss=0.2543, simple_loss=0.325, pruned_loss=0.0918, over 8098.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3115, pruned_loss=0.08136, over 1605341.83 frames. ], batch size: 23, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:47,962 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:53,285 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:06,687 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3127, 1.2814, 1.5125, 1.2553, 0.7147, 1.3301, 1.1872, 1.3171], + device='cuda:0'), covar=tensor([0.0503, 0.1242, 0.1719, 0.1362, 0.0549, 0.1472, 0.0659, 0.0579], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0157, 0.0103, 0.0162, 0.0116, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:36:09,841 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 10:36:11,811 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 10:36:20,655 INFO [train.py:901] (0/4) Epoch 11, batch 4800, loss[loss=0.1841, simple_loss=0.2737, pruned_loss=0.04721, over 7917.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3116, pruned_loss=0.08162, over 1607444.37 frames. ], batch size: 20, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:28,782 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:32,782 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.628e+02 3.255e+02 4.281e+02 8.051e+02, threshold=6.510e+02, percent-clipped=3.0 +2023-02-06 10:36:55,319 INFO [train.py:901] (0/4) Epoch 11, batch 4850, loss[loss=0.2273, simple_loss=0.3028, pruned_loss=0.07588, over 8242.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3107, pruned_loss=0.08062, over 1612471.79 frames. ], batch size: 22, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:57,577 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:01,303 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 10:37:12,690 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4133, 1.7184, 2.6634, 1.2178, 2.0968, 1.7473, 1.5524, 1.8679], + device='cuda:0'), covar=tensor([0.1799, 0.2412, 0.0771, 0.4021, 0.1540, 0.2850, 0.1877, 0.2084], + device='cuda:0'), in_proj_covar=tensor([0.0490, 0.0521, 0.0535, 0.0584, 0.0621, 0.0555, 0.0474, 0.0615], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:37:13,008 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 10:37:14,634 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:17,363 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5738, 1.6793, 3.2575, 1.2189, 2.1255, 3.5505, 3.5862, 2.9809], + device='cuda:0'), covar=tensor([0.1236, 0.1449, 0.0357, 0.2292, 0.1015, 0.0262, 0.0542, 0.0678], + device='cuda:0'), in_proj_covar=tensor([0.0259, 0.0293, 0.0256, 0.0289, 0.0267, 0.0233, 0.0335, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 10:37:30,204 INFO [train.py:901] (0/4) Epoch 11, batch 4900, loss[loss=0.2502, simple_loss=0.318, pruned_loss=0.09119, over 8080.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3112, pruned_loss=0.08079, over 1615486.87 frames. ], batch size: 21, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:37:42,891 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.544e+02 3.151e+02 4.004e+02 8.063e+02, threshold=6.301e+02, percent-clipped=5.0 +2023-02-06 10:37:58,837 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7309, 1.7782, 2.1428, 1.5875, 1.1585, 2.2035, 0.2063, 1.2530], + device='cuda:0'), covar=tensor([0.2595, 0.1635, 0.0517, 0.1982, 0.4619, 0.0508, 0.3485, 0.2073], + device='cuda:0'), in_proj_covar=tensor([0.0164, 0.0165, 0.0098, 0.0212, 0.0254, 0.0103, 0.0165, 0.0164], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:38:04,658 INFO [train.py:901] (0/4) Epoch 11, batch 4950, loss[loss=0.2522, simple_loss=0.3273, pruned_loss=0.08857, over 8140.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3114, pruned_loss=0.08071, over 1615937.75 frames. ], batch size: 22, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:11,375 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:36,552 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3868, 1.5938, 2.5286, 0.9700, 1.9685, 2.7819, 3.0280, 1.9780], + device='cuda:0'), covar=tensor([0.1549, 0.1818, 0.0710, 0.3109, 0.1121, 0.0571, 0.0775, 0.1500], + device='cuda:0'), in_proj_covar=tensor([0.0258, 0.0293, 0.0255, 0.0286, 0.0266, 0.0231, 0.0334, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 10:38:39,604 INFO [train.py:901] (0/4) Epoch 11, batch 5000, loss[loss=0.2214, simple_loss=0.3111, pruned_loss=0.06585, over 8285.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3124, pruned_loss=0.0813, over 1615617.66 frames. ], batch size: 23, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:46,503 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:47,419 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-06 10:38:49,800 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:51,895 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:52,299 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.585e+02 3.219e+02 4.097e+02 8.363e+02, threshold=6.438e+02, percent-clipped=6.0 +2023-02-06 10:39:03,553 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:08,900 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:13,944 INFO [train.py:901] (0/4) Epoch 11, batch 5050, loss[loss=0.2852, simple_loss=0.3455, pruned_loss=0.1124, over 6866.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3134, pruned_loss=0.08239, over 1612949.63 frames. ], batch size: 71, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:15,552 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.40 vs. limit=5.0 +2023-02-06 10:39:21,248 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:22,000 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:30,131 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:39,822 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 10:39:48,589 INFO [train.py:901] (0/4) Epoch 11, batch 5100, loss[loss=0.246, simple_loss=0.3192, pruned_loss=0.08645, over 7984.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3116, pruned_loss=0.08088, over 1608606.37 frames. ], batch size: 21, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:40:00,784 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85948.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:40:01,254 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.570e+02 3.113e+02 3.980e+02 6.838e+02, threshold=6.226e+02, percent-clipped=2.0 +2023-02-06 10:40:08,917 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85960.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:19,542 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:23,495 INFO [train.py:901] (0/4) Epoch 11, batch 5150, loss[loss=0.2989, simple_loss=0.3628, pruned_loss=0.1175, over 8626.00 frames. ], tot_loss[loss=0.237, simple_loss=0.312, pruned_loss=0.08098, over 1609774.71 frames. ], batch size: 34, lr: 6.92e-03, grad_scale: 8.0 +2023-02-06 10:40:27,616 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:36,305 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-86000.pt +2023-02-06 10:40:42,287 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:51,412 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:59,442 INFO [train.py:901] (0/4) Epoch 11, batch 5200, loss[loss=0.2747, simple_loss=0.3412, pruned_loss=0.1041, over 8262.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3117, pruned_loss=0.08035, over 1611499.24 frames. ], batch size: 49, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:12,341 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.648e+02 3.082e+02 3.913e+02 1.007e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-06 10:41:22,283 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8390, 1.5006, 3.1341, 1.3701, 2.0604, 3.4171, 3.4175, 2.9331], + device='cuda:0'), covar=tensor([0.1039, 0.1476, 0.0340, 0.1878, 0.0893, 0.0262, 0.0515, 0.0622], + device='cuda:0'), in_proj_covar=tensor([0.0263, 0.0297, 0.0258, 0.0291, 0.0272, 0.0236, 0.0339, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:41:27,748 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:35,337 INFO [train.py:901] (0/4) Epoch 11, batch 5250, loss[loss=0.3236, simple_loss=0.3645, pruned_loss=0.1413, over 6567.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3119, pruned_loss=0.08094, over 1607526.01 frames. ], batch size: 71, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:39,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:40,973 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 10:41:50,767 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:10,648 INFO [train.py:901] (0/4) Epoch 11, batch 5300, loss[loss=0.2355, simple_loss=0.3055, pruned_loss=0.08279, over 7802.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3111, pruned_loss=0.08033, over 1607052.31 frames. ], batch size: 20, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:14,346 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7763, 2.3277, 4.4932, 1.4700, 3.2781, 2.2700, 1.7300, 2.9574], + device='cuda:0'), covar=tensor([0.1640, 0.2144, 0.0654, 0.3821, 0.1453, 0.2744, 0.1858, 0.2326], + device='cuda:0'), in_proj_covar=tensor([0.0485, 0.0519, 0.0533, 0.0580, 0.0614, 0.0551, 0.0471, 0.0611], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:42:23,760 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.570e+02 3.118e+02 4.195e+02 8.045e+02, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:42:32,860 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:46,503 INFO [train.py:901] (0/4) Epoch 11, batch 5350, loss[loss=0.2432, simple_loss=0.3161, pruned_loss=0.08514, over 8443.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3115, pruned_loss=0.08066, over 1606722.98 frames. ], batch size: 27, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:50,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:12,446 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:22,285 INFO [train.py:901] (0/4) Epoch 11, batch 5400, loss[loss=0.258, simple_loss=0.3293, pruned_loss=0.09329, over 8074.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3113, pruned_loss=0.08073, over 1607738.24 frames. ], batch size: 21, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:43:26,581 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:29,483 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:34,239 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5581, 1.5487, 1.8679, 1.6157, 1.1685, 1.9524, 0.4103, 1.3039], + device='cuda:0'), covar=tensor([0.2333, 0.1454, 0.0625, 0.1388, 0.4001, 0.0418, 0.2838, 0.1844], + device='cuda:0'), in_proj_covar=tensor([0.0160, 0.0164, 0.0096, 0.0208, 0.0246, 0.0100, 0.0160, 0.0161], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:43:34,674 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.471e+02 3.223e+02 4.268e+02 9.619e+02, threshold=6.446e+02, percent-clipped=7.0 +2023-02-06 10:43:44,521 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:45,145 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:57,388 INFO [train.py:901] (0/4) Epoch 11, batch 5450, loss[loss=0.2388, simple_loss=0.3037, pruned_loss=0.08698, over 7422.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3115, pruned_loss=0.08119, over 1607401.21 frames. ], batch size: 17, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:03,050 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:05,805 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86292.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:44:07,248 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6690, 1.8009, 1.4855, 2.2306, 1.0665, 1.3929, 1.5818, 1.8885], + device='cuda:0'), covar=tensor([0.0811, 0.0895, 0.1111, 0.0519, 0.1185, 0.1407, 0.0909, 0.0770], + device='cuda:0'), in_proj_covar=tensor([0.0238, 0.0215, 0.0254, 0.0216, 0.0217, 0.0253, 0.0254, 0.0220], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 10:44:25,120 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:34,079 INFO [train.py:901] (0/4) Epoch 11, batch 5500, loss[loss=0.2336, simple_loss=0.3263, pruned_loss=0.07043, over 8326.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3116, pruned_loss=0.08049, over 1612184.60 frames. ], batch size: 25, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:34,721 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 10:44:44,468 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 10:44:46,144 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.494e+02 3.013e+02 3.770e+02 8.759e+02, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 10:44:48,449 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:52,684 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:56,767 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:04,260 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 10:45:09,173 INFO [train.py:901] (0/4) Epoch 11, batch 5550, loss[loss=0.2994, simple_loss=0.3617, pruned_loss=0.1186, over 8594.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3116, pruned_loss=0.08072, over 1614176.44 frames. ], batch size: 39, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:10,769 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:27,853 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:45:33,155 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:44,330 INFO [train.py:901] (0/4) Epoch 11, batch 5600, loss[loss=0.2505, simple_loss=0.3405, pruned_loss=0.08028, over 8333.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3119, pruned_loss=0.08067, over 1614572.58 frames. ], batch size: 25, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:44,398 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:46,515 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:57,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.527e+02 3.003e+02 3.802e+02 9.548e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-06 10:46:03,244 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:06,583 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:17,331 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:18,549 INFO [train.py:901] (0/4) Epoch 11, batch 5650, loss[loss=0.2572, simple_loss=0.3405, pruned_loss=0.08691, over 8459.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3129, pruned_loss=0.08147, over 1614247.75 frames. ], batch size: 25, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:46:37,004 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 10:46:39,906 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 10:46:44,006 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1427, 1.1848, 3.3442, 0.8983, 2.9275, 2.8315, 3.0671, 2.9537], + device='cuda:0'), covar=tensor([0.0906, 0.3870, 0.0808, 0.3813, 0.1605, 0.1031, 0.0791, 0.0915], + device='cuda:0'), in_proj_covar=tensor([0.0491, 0.0576, 0.0579, 0.0534, 0.0605, 0.0517, 0.0509, 0.0582], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:46:52,193 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:53,988 INFO [train.py:901] (0/4) Epoch 11, batch 5700, loss[loss=0.2131, simple_loss=0.2802, pruned_loss=0.07299, over 7810.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3113, pruned_loss=0.08045, over 1614207.92 frames. ], batch size: 20, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:04,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:06,038 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.473e+02 3.032e+02 3.837e+02 8.433e+02, threshold=6.065e+02, percent-clipped=5.0 +2023-02-06 10:47:11,018 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8068, 1.6624, 3.4727, 1.4811, 2.3862, 3.8695, 3.8838, 3.2953], + device='cuda:0'), covar=tensor([0.1173, 0.1547, 0.0335, 0.1988, 0.0968, 0.0182, 0.0390, 0.0562], + device='cuda:0'), in_proj_covar=tensor([0.0260, 0.0295, 0.0255, 0.0286, 0.0268, 0.0233, 0.0335, 0.0287], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 10:47:28,594 INFO [train.py:901] (0/4) Epoch 11, batch 5750, loss[loss=0.2034, simple_loss=0.3019, pruned_loss=0.05242, over 8317.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3102, pruned_loss=0.07984, over 1610664.19 frames. ], batch size: 25, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:29,098 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 10:47:37,014 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-06 10:47:40,219 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:42,138 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 10:47:47,612 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:47,756 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:03,810 INFO [train.py:901] (0/4) Epoch 11, batch 5800, loss[loss=0.2397, simple_loss=0.3202, pruned_loss=0.07956, over 8568.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3114, pruned_loss=0.08068, over 1610658.97 frames. ], batch size: 31, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:05,399 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:17,064 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.625e+02 3.434e+02 4.363e+02 1.044e+03, threshold=6.867e+02, percent-clipped=16.0 +2023-02-06 10:48:19,991 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9456, 3.4153, 2.2436, 2.3880, 2.4618, 2.0112, 2.4986, 2.7344], + device='cuda:0'), covar=tensor([0.1441, 0.0247, 0.0926, 0.0777, 0.0674, 0.1170, 0.0917, 0.1012], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0231, 0.0311, 0.0293, 0.0300, 0.0317, 0.0337, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:48:26,848 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86663.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:39,359 INFO [train.py:901] (0/4) Epoch 11, batch 5850, loss[loss=0.2602, simple_loss=0.3363, pruned_loss=0.09211, over 8091.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3099, pruned_loss=0.07942, over 1610303.89 frames. ], batch size: 21, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:44,276 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86688.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:45,640 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:02,114 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:02,974 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 10:49:07,935 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:13,225 INFO [train.py:901] (0/4) Epoch 11, batch 5900, loss[loss=0.2489, simple_loss=0.3221, pruned_loss=0.08785, over 6942.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3118, pruned_loss=0.0804, over 1611781.51 frames. ], batch size: 71, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:16,683 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:25,721 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.878e+02 2.650e+02 3.002e+02 3.837e+02 8.505e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-06 10:49:33,349 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:43,863 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.12 vs. limit=2.0 +2023-02-06 10:49:48,264 INFO [train.py:901] (0/4) Epoch 11, batch 5950, loss[loss=0.2003, simple_loss=0.2779, pruned_loss=0.06136, over 7809.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3116, pruned_loss=0.08063, over 1614783.80 frames. ], batch size: 20, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:51,961 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:55,662 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 10:50:03,637 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:03,784 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:06,870 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:09,056 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:20,470 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:22,926 INFO [train.py:901] (0/4) Epoch 11, batch 6000, loss[loss=0.216, simple_loss=0.2923, pruned_loss=0.06988, over 8252.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3111, pruned_loss=0.08021, over 1614747.07 frames. ], batch size: 22, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:50:22,927 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 10:50:35,336 INFO [train.py:935] (0/4) Epoch 11, validation: loss=0.1887, simple_loss=0.2887, pruned_loss=0.04439, over 944034.00 frames. +2023-02-06 10:50:35,340 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 10:50:36,202 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86832.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:47,363 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.431e+02 2.934e+02 3.566e+02 7.044e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-06 10:51:10,313 INFO [train.py:901] (0/4) Epoch 11, batch 6050, loss[loss=0.2177, simple_loss=0.2998, pruned_loss=0.06777, over 8245.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3117, pruned_loss=0.08022, over 1619515.09 frames. ], batch size: 22, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:20,508 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:35,565 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:39,056 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:44,785 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5997, 2.2009, 3.4916, 2.4386, 2.9417, 2.4019, 1.9349, 1.7977], + device='cuda:0'), covar=tensor([0.4267, 0.4621, 0.1229, 0.3382, 0.2491, 0.2152, 0.1598, 0.4540], + device='cuda:0'), in_proj_covar=tensor([0.0884, 0.0867, 0.0727, 0.0837, 0.0929, 0.0792, 0.0699, 0.0762], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:51:45,217 INFO [train.py:901] (0/4) Epoch 11, batch 6100, loss[loss=0.1704, simple_loss=0.263, pruned_loss=0.03894, over 8035.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3096, pruned_loss=0.0789, over 1614157.08 frames. ], batch size: 22, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:46,288 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=2.22 vs. limit=2.0 +2023-02-06 10:51:53,476 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:58,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.604e+02 3.114e+02 3.901e+02 9.212e+02, threshold=6.229e+02, percent-clipped=4.0 +2023-02-06 10:52:06,848 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 10:52:19,156 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:20,326 INFO [train.py:901] (0/4) Epoch 11, batch 6150, loss[loss=0.2133, simple_loss=0.2944, pruned_loss=0.06606, over 8141.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3103, pruned_loss=0.07952, over 1616400.71 frames. ], batch size: 22, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:52:36,914 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:47,749 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87020.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:52:55,885 INFO [train.py:901] (0/4) Epoch 11, batch 6200, loss[loss=0.2617, simple_loss=0.3303, pruned_loss=0.09657, over 7056.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3097, pruned_loss=0.07907, over 1612110.59 frames. ], batch size: 71, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:53:00,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6427, 1.6611, 2.0028, 1.5717, 1.0820, 2.0030, 0.2977, 1.3147], + device='cuda:0'), covar=tensor([0.2884, 0.1527, 0.0595, 0.1739, 0.4426, 0.0517, 0.3507, 0.1875], + device='cuda:0'), in_proj_covar=tensor([0.0163, 0.0167, 0.0099, 0.0212, 0.0251, 0.0103, 0.0163, 0.0164], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:53:07,897 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.592e+02 3.192e+02 4.476e+02 1.804e+03, threshold=6.384e+02, percent-clipped=5.0 +2023-02-06 10:53:14,420 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:53:18,218 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 10:53:30,916 INFO [train.py:901] (0/4) Epoch 11, batch 6250, loss[loss=0.1978, simple_loss=0.2682, pruned_loss=0.06369, over 7703.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3104, pruned_loss=0.07969, over 1609031.45 frames. ], batch size: 18, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:53:39,949 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0536, 6.1462, 5.3280, 2.4376, 5.3927, 5.7252, 5.7789, 5.4326], + device='cuda:0'), covar=tensor([0.0534, 0.0437, 0.0803, 0.4145, 0.0602, 0.0520, 0.1022, 0.0513], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0369, 0.0382, 0.0478, 0.0375, 0.0375, 0.0373, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:54:06,553 INFO [train.py:901] (0/4) Epoch 11, batch 6300, loss[loss=0.2992, simple_loss=0.3533, pruned_loss=0.1226, over 6669.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3118, pruned_loss=0.08024, over 1610598.59 frames. ], batch size: 71, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:19,292 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.563e+02 3.017e+02 3.734e+02 8.364e+02, threshold=6.034e+02, percent-clipped=3.0 +2023-02-06 10:54:23,472 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8052, 3.6970, 3.3866, 1.8320, 3.3328, 3.2726, 3.3920, 3.0482], + device='cuda:0'), covar=tensor([0.0965, 0.0752, 0.1155, 0.4710, 0.1009, 0.1121, 0.1464, 0.1106], + device='cuda:0'), in_proj_covar=tensor([0.0461, 0.0367, 0.0380, 0.0477, 0.0375, 0.0372, 0.0371, 0.0330], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:54:36,427 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:38,304 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87176.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:39,796 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:41,628 INFO [train.py:901] (0/4) Epoch 11, batch 6350, loss[loss=0.2552, simple_loss=0.3197, pruned_loss=0.09536, over 8128.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3119, pruned_loss=0.08014, over 1614235.49 frames. ], batch size: 22, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:53,183 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:57,269 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:16,801 INFO [train.py:901] (0/4) Epoch 11, batch 6400, loss[loss=0.2476, simple_loss=0.3342, pruned_loss=0.0805, over 8526.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3123, pruned_loss=0.08006, over 1619783.92 frames. ], batch size: 26, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:19,098 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1631, 1.8188, 1.3802, 1.5754, 1.5331, 1.2178, 1.4531, 1.4814], + device='cuda:0'), covar=tensor([0.0873, 0.0266, 0.0738, 0.0421, 0.0474, 0.0916, 0.0608, 0.0597], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0235, 0.0315, 0.0296, 0.0300, 0.0319, 0.0340, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 10:55:23,187 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:28,875 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:29,366 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.577e+02 3.020e+02 3.786e+02 7.428e+02, threshold=6.041e+02, percent-clipped=2.0 +2023-02-06 10:55:51,533 INFO [train.py:901] (0/4) Epoch 11, batch 6450, loss[loss=0.1989, simple_loss=0.2732, pruned_loss=0.06233, over 7707.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3121, pruned_loss=0.08055, over 1614483.33 frames. ], batch size: 18, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:59,186 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:14,145 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:27,326 INFO [train.py:901] (0/4) Epoch 11, batch 6500, loss[loss=0.2363, simple_loss=0.3125, pruned_loss=0.08005, over 6790.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3107, pruned_loss=0.08023, over 1610659.73 frames. ], batch size: 74, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:56:32,327 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:39,860 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.605e+02 3.245e+02 4.169e+02 7.875e+02, threshold=6.489e+02, percent-clipped=5.0 +2023-02-06 10:56:44,213 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:50,401 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87364.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:56:58,811 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 10:57:01,880 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:57:02,427 INFO [train.py:901] (0/4) Epoch 11, batch 6550, loss[loss=0.2184, simple_loss=0.3003, pruned_loss=0.06826, over 8033.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.31, pruned_loss=0.07965, over 1608838.29 frames. ], batch size: 22, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:04,032 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1357, 1.4941, 1.5585, 1.2341, 0.9076, 1.3188, 1.7512, 1.4513], + device='cuda:0'), covar=tensor([0.0516, 0.1319, 0.1857, 0.1524, 0.0648, 0.1661, 0.0757, 0.0684], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0165, 0.0117, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 10:57:17,755 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 10:57:37,067 INFO [train.py:901] (0/4) Epoch 11, batch 6600, loss[loss=0.2313, simple_loss=0.3129, pruned_loss=0.07487, over 8129.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3102, pruned_loss=0.07931, over 1613551.52 frames. ], batch size: 22, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:37,792 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:57:39,954 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5628, 1.5259, 1.7758, 1.4259, 1.0538, 1.8386, 0.1008, 1.1624], + device='cuda:0'), covar=tensor([0.2450, 0.1795, 0.0722, 0.1800, 0.4472, 0.0591, 0.3170, 0.2151], + device='cuda:0'), in_proj_covar=tensor([0.0161, 0.0166, 0.0099, 0.0213, 0.0250, 0.0101, 0.0160, 0.0163], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:57:50,090 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.293e+02 2.790e+02 3.732e+02 8.562e+02, threshold=5.581e+02, percent-clipped=1.0 +2023-02-06 10:57:58,511 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6508, 1.9523, 3.0446, 1.4199, 2.2195, 2.0694, 1.6596, 1.9822], + device='cuda:0'), covar=tensor([0.1689, 0.2289, 0.0792, 0.3858, 0.1527, 0.2786, 0.1838, 0.2314], + device='cuda:0'), in_proj_covar=tensor([0.0481, 0.0517, 0.0530, 0.0573, 0.0610, 0.0549, 0.0466, 0.0607], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 10:58:11,488 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:11,525 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87479.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:58:12,692 INFO [train.py:901] (0/4) Epoch 11, batch 6650, loss[loss=0.2226, simple_loss=0.3125, pruned_loss=0.0663, over 8669.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3098, pruned_loss=0.07911, over 1612070.34 frames. ], batch size: 34, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:41,507 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:47,483 INFO [train.py:901] (0/4) Epoch 11, batch 6700, loss[loss=0.2249, simple_loss=0.3025, pruned_loss=0.07366, over 7226.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3108, pruned_loss=0.08005, over 1610033.36 frames. ], batch size: 16, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:58,339 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:59,467 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.493e+02 3.158e+02 4.170e+02 8.693e+02, threshold=6.316e+02, percent-clipped=8.0 +2023-02-06 10:59:04,537 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7440, 1.4020, 5.7920, 2.1050, 5.1970, 4.8972, 5.3716, 5.2226], + device='cuda:0'), covar=tensor([0.0443, 0.4309, 0.0335, 0.3200, 0.0946, 0.0713, 0.0460, 0.0517], + device='cuda:0'), in_proj_covar=tensor([0.0485, 0.0570, 0.0580, 0.0530, 0.0600, 0.0515, 0.0505, 0.0573], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 10:59:16,928 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:18,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8788, 1.8660, 2.4204, 1.7868, 1.2698, 2.6279, 0.4463, 1.4845], + device='cuda:0'), covar=tensor([0.2349, 0.1690, 0.0432, 0.1956, 0.3766, 0.0305, 0.3521, 0.2127], + device='cuda:0'), in_proj_covar=tensor([0.0161, 0.0165, 0.0099, 0.0211, 0.0247, 0.0100, 0.0159, 0.0161], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 10:59:18,586 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 10:59:22,974 INFO [train.py:901] (0/4) Epoch 11, batch 6750, loss[loss=0.2137, simple_loss=0.2973, pruned_loss=0.06505, over 8333.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.311, pruned_loss=0.07973, over 1614340.60 frames. ], batch size: 25, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:59:27,210 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9590, 1.9911, 1.6514, 2.5412, 1.1033, 1.4901, 1.6379, 2.1427], + device='cuda:0'), covar=tensor([0.0742, 0.0905, 0.1206, 0.0463, 0.1360, 0.1541, 0.1038, 0.0786], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0214, 0.0257, 0.0218, 0.0218, 0.0254, 0.0256, 0.0221], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 10:59:30,567 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:37,569 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:40,793 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:43,433 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:52,274 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:56,912 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 10:59:57,600 INFO [train.py:901] (0/4) Epoch 11, batch 6800, loss[loss=0.2031, simple_loss=0.2725, pruned_loss=0.06679, over 7791.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3108, pruned_loss=0.08007, over 1612571.74 frames. ], batch size: 19, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:01,247 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:10,517 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.375e+02 2.980e+02 3.798e+02 7.616e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 11:00:32,377 INFO [train.py:901] (0/4) Epoch 11, batch 6850, loss[loss=0.2551, simple_loss=0.3314, pruned_loss=0.08947, over 8289.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3105, pruned_loss=0.07981, over 1612091.16 frames. ], batch size: 23, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:45,132 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 11:00:50,749 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:54,104 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1816, 2.7937, 3.0636, 1.4240, 3.2287, 2.0828, 1.5687, 2.1724], + device='cuda:0'), covar=tensor([0.0608, 0.0250, 0.0229, 0.0559, 0.0359, 0.0583, 0.0674, 0.0334], + device='cuda:0'), in_proj_covar=tensor([0.0391, 0.0327, 0.0269, 0.0384, 0.0312, 0.0472, 0.0357, 0.0349], + device='cuda:0'), out_proj_covar=tensor([1.1157e-04, 9.1160e-05, 7.5166e-05, 1.0782e-04, 8.8158e-05, 1.4407e-04, + 1.0178e-04, 9.8844e-05], device='cuda:0') +2023-02-06 11:00:59,330 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7201, 3.6491, 3.3361, 1.7594, 3.2820, 3.2937, 3.3686, 3.0881], + device='cuda:0'), covar=tensor([0.1080, 0.0796, 0.1156, 0.4876, 0.0958, 0.1200, 0.1442, 0.1132], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0374, 0.0384, 0.0481, 0.0376, 0.0376, 0.0377, 0.0335], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:01:01,872 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:06,417 INFO [train.py:901] (0/4) Epoch 11, batch 6900, loss[loss=0.1898, simple_loss=0.2647, pruned_loss=0.05746, over 7268.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3119, pruned_loss=0.08031, over 1617308.36 frames. ], batch size: 16, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:01:10,023 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87735.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:01:19,188 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.628e+02 3.043e+02 4.130e+02 7.700e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 11:01:26,848 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87760.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:01:41,613 INFO [train.py:901] (0/4) Epoch 11, batch 6950, loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.06767, over 8364.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3119, pruned_loss=0.08059, over 1612129.41 frames. ], batch size: 24, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:01:52,585 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 11:02:11,619 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:16,825 INFO [train.py:901] (0/4) Epoch 11, batch 7000, loss[loss=0.2106, simple_loss=0.2881, pruned_loss=0.06655, over 5134.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3112, pruned_loss=0.08054, over 1608602.86 frames. ], batch size: 11, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:02:22,311 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:25,937 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 11:02:29,501 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.548e+02 3.185e+02 4.052e+02 9.283e+02, threshold=6.369e+02, percent-clipped=6.0 +2023-02-06 11:02:41,517 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:51,622 INFO [train.py:901] (0/4) Epoch 11, batch 7050, loss[loss=0.2457, simple_loss=0.3307, pruned_loss=0.08034, over 8497.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3104, pruned_loss=0.07995, over 1608890.13 frames. ], batch size: 49, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:26,708 INFO [train.py:901] (0/4) Epoch 11, batch 7100, loss[loss=0.2216, simple_loss=0.3092, pruned_loss=0.06704, over 8635.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3094, pruned_loss=0.07899, over 1612081.58 frames. ], batch size: 31, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:27,714 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 11:03:31,621 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87938.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:36,824 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:38,774 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.732e+02 3.356e+02 4.654e+02 1.650e+03, threshold=6.712e+02, percent-clipped=12.0 +2023-02-06 11:03:40,146 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:48,274 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:51,539 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:00,731 INFO [train.py:901] (0/4) Epoch 11, batch 7150, loss[loss=0.2647, simple_loss=0.3407, pruned_loss=0.09431, over 7643.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3098, pruned_loss=0.07935, over 1613502.52 frames. ], batch size: 19, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:04:01,614 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:05,829 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:14,502 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-88000.pt +2023-02-06 11:04:36,670 INFO [train.py:901] (0/4) Epoch 11, batch 7200, loss[loss=0.278, simple_loss=0.3491, pruned_loss=0.1034, over 8515.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3102, pruned_loss=0.08011, over 1614715.44 frames. ], batch size: 28, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:04:49,439 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.591e+02 3.086e+02 3.706e+02 9.715e+02, threshold=6.172e+02, percent-clipped=2.0 +2023-02-06 11:04:57,778 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:01,275 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:10,857 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-06 11:05:11,823 INFO [train.py:901] (0/4) Epoch 11, batch 7250, loss[loss=0.2288, simple_loss=0.3006, pruned_loss=0.07848, over 8561.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3109, pruned_loss=0.07996, over 1618461.56 frames. ], batch size: 39, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:05:12,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:19,795 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 11:05:21,492 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:37,924 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:39,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:46,928 INFO [train.py:901] (0/4) Epoch 11, batch 7300, loss[loss=0.2698, simple_loss=0.3281, pruned_loss=0.1057, over 7785.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3111, pruned_loss=0.08052, over 1614166.69 frames. ], batch size: 19, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:00,702 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.414e+02 2.958e+02 3.757e+02 7.369e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-06 11:06:22,828 INFO [train.py:901] (0/4) Epoch 11, batch 7350, loss[loss=0.246, simple_loss=0.3144, pruned_loss=0.08877, over 8405.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3115, pruned_loss=0.08066, over 1613738.92 frames. ], batch size: 49, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:28,427 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8048, 1.4266, 3.4175, 1.4601, 2.1816, 3.8031, 3.8330, 3.2511], + device='cuda:0'), covar=tensor([0.1262, 0.1734, 0.0407, 0.2116, 0.1224, 0.0230, 0.0476, 0.0647], + device='cuda:0'), in_proj_covar=tensor([0.0262, 0.0296, 0.0258, 0.0291, 0.0273, 0.0233, 0.0336, 0.0288], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 11:06:32,278 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 11:06:32,480 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88194.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:50,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:51,489 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 11:06:58,209 INFO [train.py:901] (0/4) Epoch 11, batch 7400, loss[loss=0.2581, simple_loss=0.3412, pruned_loss=0.08748, over 8623.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.312, pruned_loss=0.08069, over 1615489.94 frames. ], batch size: 34, lr: 6.84e-03, grad_scale: 16.0 +2023-02-06 11:07:03,172 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:11,886 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.577e+02 3.074e+02 3.691e+02 9.024e+02, threshold=6.148e+02, percent-clipped=4.0 +2023-02-06 11:07:20,893 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:32,898 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 11:07:33,599 INFO [train.py:901] (0/4) Epoch 11, batch 7450, loss[loss=0.2343, simple_loss=0.3121, pruned_loss=0.07828, over 8586.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3111, pruned_loss=0.07994, over 1617606.07 frames. ], batch size: 34, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:07:43,215 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4464, 1.9101, 3.3660, 1.2198, 2.3901, 1.9035, 1.5216, 2.4157], + device='cuda:0'), covar=tensor([0.1794, 0.2362, 0.0697, 0.4139, 0.1626, 0.2962, 0.1968, 0.2161], + device='cuda:0'), in_proj_covar=tensor([0.0489, 0.0522, 0.0530, 0.0578, 0.0620, 0.0559, 0.0473, 0.0620], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:07:52,582 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88309.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:07:58,566 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:01,841 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:07,731 INFO [train.py:901] (0/4) Epoch 11, batch 7500, loss[loss=0.2648, simple_loss=0.3242, pruned_loss=0.1027, over 6602.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3101, pruned_loss=0.07938, over 1612645.38 frames. ], batch size: 71, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:13,321 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:15,868 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:19,204 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:20,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.827e+02 3.509e+02 4.304e+02 1.282e+03, threshold=7.018e+02, percent-clipped=8.0 +2023-02-06 11:08:29,978 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:42,585 INFO [train.py:901] (0/4) Epoch 11, batch 7550, loss[loss=0.244, simple_loss=0.3205, pruned_loss=0.08378, over 8506.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3103, pruned_loss=0.07962, over 1616113.90 frames. ], batch size: 26, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:12,948 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8582, 1.4291, 1.6317, 1.3429, 1.0748, 1.4182, 1.6340, 1.3135], + device='cuda:0'), covar=tensor([0.0515, 0.1240, 0.1666, 0.1409, 0.0573, 0.1498, 0.0691, 0.0650], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0150, 0.0190, 0.0157, 0.0103, 0.0163, 0.0115, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 11:09:17,375 INFO [train.py:901] (0/4) Epoch 11, batch 7600, loss[loss=0.2603, simple_loss=0.328, pruned_loss=0.09626, over 8569.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3096, pruned_loss=0.07899, over 1612983.00 frames. ], batch size: 34, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:29,096 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4754, 1.4800, 4.7044, 1.7945, 4.1080, 3.9355, 4.3296, 4.1211], + device='cuda:0'), covar=tensor([0.0545, 0.4463, 0.0445, 0.3293, 0.1069, 0.0863, 0.0444, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0496, 0.0582, 0.0595, 0.0539, 0.0615, 0.0524, 0.0515, 0.0587], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:09:31,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 2.441e+02 2.975e+02 3.888e+02 6.138e+02, threshold=5.951e+02, percent-clipped=0.0 +2023-02-06 11:09:39,145 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:09:51,502 INFO [train.py:901] (0/4) Epoch 11, batch 7650, loss[loss=0.2101, simple_loss=0.273, pruned_loss=0.07366, over 7791.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.309, pruned_loss=0.07904, over 1611157.79 frames. ], batch size: 19, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:10:23,423 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.48 vs. limit=5.0 +2023-02-06 11:10:26,448 INFO [train.py:901] (0/4) Epoch 11, batch 7700, loss[loss=0.2202, simple_loss=0.2965, pruned_loss=0.07194, over 8360.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3076, pruned_loss=0.07824, over 1611318.39 frames. ], batch size: 24, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:10:39,166 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 11:10:39,711 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.472e+02 3.053e+02 3.571e+02 8.603e+02, threshold=6.105e+02, percent-clipped=3.0 +2023-02-06 11:10:58,417 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:11:00,812 INFO [train.py:901] (0/4) Epoch 11, batch 7750, loss[loss=0.2015, simple_loss=0.2871, pruned_loss=0.05795, over 8032.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3092, pruned_loss=0.07968, over 1613370.34 frames. ], batch size: 22, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:36,341 INFO [train.py:901] (0/4) Epoch 11, batch 7800, loss[loss=0.2479, simple_loss=0.3287, pruned_loss=0.08355, over 8012.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.31, pruned_loss=0.08026, over 1611785.69 frames. ], batch size: 22, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:48,834 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.685e+02 3.345e+02 4.152e+02 1.012e+03, threshold=6.690e+02, percent-clipped=6.0 +2023-02-06 11:11:50,872 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88653.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:12:04,960 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2085, 1.1062, 1.2729, 1.1473, 1.0012, 1.3232, 0.0848, 0.9930], + device='cuda:0'), covar=tensor([0.2408, 0.1849, 0.0677, 0.1340, 0.3784, 0.0615, 0.3264, 0.1691], + device='cuda:0'), in_proj_covar=tensor([0.0164, 0.0169, 0.0101, 0.0212, 0.0253, 0.0103, 0.0164, 0.0165], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 11:12:09,483 INFO [train.py:901] (0/4) Epoch 11, batch 7850, loss[loss=0.2822, simple_loss=0.3565, pruned_loss=0.1039, over 8354.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3105, pruned_loss=0.08026, over 1611891.26 frames. ], batch size: 24, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:42,904 INFO [train.py:901] (0/4) Epoch 11, batch 7900, loss[loss=0.2371, simple_loss=0.3193, pruned_loss=0.07747, over 8554.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.3088, pruned_loss=0.079, over 1610235.92 frames. ], batch size: 31, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:55,426 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.490e+02 3.060e+02 3.735e+02 6.734e+02, threshold=6.120e+02, percent-clipped=1.0 +2023-02-06 11:13:04,849 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-06 11:13:07,248 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88768.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:13:15,828 INFO [train.py:901] (0/4) Epoch 11, batch 7950, loss[loss=0.2454, simple_loss=0.3244, pruned_loss=0.08322, over 8488.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3083, pruned_loss=0.0787, over 1609559.48 frames. ], batch size: 27, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:24,834 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7116, 1.5776, 1.7773, 1.3628, 0.9242, 1.4896, 1.6575, 1.7292], + device='cuda:0'), covar=tensor([0.0528, 0.1119, 0.1574, 0.1320, 0.0570, 0.1457, 0.0614, 0.0508], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0150, 0.0191, 0.0157, 0.0103, 0.0164, 0.0115, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 11:13:49,362 INFO [train.py:901] (0/4) Epoch 11, batch 8000, loss[loss=0.207, simple_loss=0.2822, pruned_loss=0.06589, over 7695.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3082, pruned_loss=0.07915, over 1605654.32 frames. ], batch size: 18, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:50,911 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:13:54,535 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 11:14:02,011 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.603e+02 3.071e+02 3.730e+02 8.421e+02, threshold=6.141e+02, percent-clipped=3.0 +2023-02-06 11:14:07,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:22,230 INFO [train.py:901] (0/4) Epoch 11, batch 8050, loss[loss=0.1773, simple_loss=0.26, pruned_loss=0.04733, over 7527.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3072, pruned_loss=0.07932, over 1586978.86 frames. ], batch size: 18, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:14:44,457 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-11.pt +2023-02-06 11:14:55,381 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 11:14:58,684 INFO [train.py:901] (0/4) Epoch 12, batch 0, loss[loss=0.2693, simple_loss=0.3484, pruned_loss=0.09507, over 8108.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3484, pruned_loss=0.09507, over 8108.00 frames. ], batch size: 23, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:14:58,685 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 11:15:09,781 INFO [train.py:935] (0/4) Epoch 12, validation: loss=0.1897, simple_loss=0.2896, pruned_loss=0.04486, over 944034.00 frames. +2023-02-06 11:15:09,782 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 11:15:23,298 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 11:15:28,755 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4991, 2.7036, 1.8306, 2.0905, 2.1446, 1.5389, 2.0098, 2.0332], + device='cuda:0'), covar=tensor([0.1273, 0.0282, 0.1007, 0.0590, 0.0573, 0.1220, 0.0815, 0.0800], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0236, 0.0318, 0.0299, 0.0303, 0.0323, 0.0341, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:15:35,201 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.690e+02 3.540e+02 4.339e+02 7.249e+02, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 11:15:44,685 INFO [train.py:901] (0/4) Epoch 12, batch 50, loss[loss=0.2391, simple_loss=0.3207, pruned_loss=0.07874, over 8502.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3214, pruned_loss=0.08787, over 367002.54 frames. ], batch size: 26, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:15:57,432 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 11:16:19,036 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 11:16:19,746 INFO [train.py:901] (0/4) Epoch 12, batch 100, loss[loss=0.2716, simple_loss=0.3468, pruned_loss=0.09818, over 8339.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.319, pruned_loss=0.08626, over 646886.39 frames. ], batch size: 26, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:16:26,510 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89024.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:16:40,635 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:16:43,450 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89049.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:16:43,912 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.771e+02 3.256e+02 4.152e+02 1.357e+03, threshold=6.512e+02, percent-clipped=1.0 +2023-02-06 11:16:54,736 INFO [train.py:901] (0/4) Epoch 12, batch 150, loss[loss=0.2282, simple_loss=0.3122, pruned_loss=0.07213, over 8031.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3162, pruned_loss=0.08456, over 860570.98 frames. ], batch size: 22, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:00,755 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7293, 4.6558, 4.2406, 1.9555, 4.2037, 4.2540, 4.2937, 4.1164], + device='cuda:0'), covar=tensor([0.0732, 0.0570, 0.0944, 0.4785, 0.0752, 0.0852, 0.1248, 0.0769], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0370, 0.0382, 0.0476, 0.0372, 0.0375, 0.0373, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:17:29,010 INFO [train.py:901] (0/4) Epoch 12, batch 200, loss[loss=0.2108, simple_loss=0.2697, pruned_loss=0.07594, over 7536.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3122, pruned_loss=0.08269, over 1022664.26 frames. ], batch size: 18, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:33,288 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1079, 1.7200, 2.4374, 2.0686, 2.1914, 1.9956, 1.6386, 1.2091], + device='cuda:0'), covar=tensor([0.3421, 0.3678, 0.1119, 0.2160, 0.1726, 0.2044, 0.1516, 0.3332], + device='cuda:0'), in_proj_covar=tensor([0.0873, 0.0858, 0.0721, 0.0830, 0.0916, 0.0789, 0.0692, 0.0750], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:17:38,637 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1027, 1.5208, 1.5059, 1.1696, 0.9469, 1.3755, 1.6438, 1.6675], + device='cuda:0'), covar=tensor([0.0508, 0.1247, 0.1811, 0.1478, 0.0611, 0.1568, 0.0707, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0152, 0.0191, 0.0157, 0.0103, 0.0163, 0.0115, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 11:17:53,942 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.712e+02 3.423e+02 4.383e+02 1.008e+03, threshold=6.845e+02, percent-clipped=3.0 +2023-02-06 11:18:03,556 INFO [train.py:901] (0/4) Epoch 12, batch 250, loss[loss=0.2501, simple_loss=0.3281, pruned_loss=0.08608, over 8464.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3114, pruned_loss=0.08129, over 1152148.06 frames. ], batch size: 25, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:18:04,453 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8534, 2.0628, 1.7349, 2.5913, 1.0481, 1.4245, 1.6137, 2.2153], + device='cuda:0'), covar=tensor([0.0804, 0.0843, 0.1052, 0.0401, 0.1222, 0.1531, 0.1048, 0.0728], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0216, 0.0254, 0.0214, 0.0218, 0.0252, 0.0258, 0.0222], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 11:18:13,252 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 11:18:20,896 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:18:22,859 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 11:18:38,842 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9104, 1.5936, 1.6288, 1.3657, 0.9853, 1.4862, 1.6883, 1.3743], + device='cuda:0'), covar=tensor([0.0516, 0.1149, 0.1628, 0.1353, 0.0629, 0.1438, 0.0671, 0.0627], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0152, 0.0192, 0.0158, 0.0103, 0.0163, 0.0115, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 11:18:40,049 INFO [train.py:901] (0/4) Epoch 12, batch 300, loss[loss=0.1803, simple_loss=0.2608, pruned_loss=0.04993, over 8292.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3117, pruned_loss=0.08091, over 1254391.03 frames. ], batch size: 23, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:04,996 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.536e+02 3.052e+02 3.921e+02 6.584e+02, threshold=6.103e+02, percent-clipped=0.0 +2023-02-06 11:19:14,503 INFO [train.py:901] (0/4) Epoch 12, batch 350, loss[loss=0.2002, simple_loss=0.271, pruned_loss=0.06467, over 7517.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3123, pruned_loss=0.08118, over 1333128.77 frames. ], batch size: 18, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:32,083 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0770, 1.2594, 1.2236, 0.6306, 1.2241, 1.0241, 0.0614, 1.1322], + device='cuda:0'), covar=tensor([0.0231, 0.0205, 0.0185, 0.0304, 0.0220, 0.0532, 0.0488, 0.0177], + device='cuda:0'), in_proj_covar=tensor([0.0389, 0.0326, 0.0273, 0.0381, 0.0314, 0.0472, 0.0356, 0.0352], + device='cuda:0'), out_proj_covar=tensor([1.1072e-04, 9.0777e-05, 7.6165e-05, 1.0662e-04, 8.8719e-05, 1.4344e-04, + 1.0119e-04, 9.9422e-05], device='cuda:0') +2023-02-06 11:19:39,013 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.06 vs. limit=5.0 +2023-02-06 11:19:49,366 INFO [train.py:901] (0/4) Epoch 12, batch 400, loss[loss=0.2101, simple_loss=0.2925, pruned_loss=0.06383, over 8025.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3115, pruned_loss=0.08066, over 1393869.62 frames. ], batch size: 22, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:14,267 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.417e+02 2.965e+02 3.513e+02 5.511e+02, threshold=5.929e+02, percent-clipped=0.0 +2023-02-06 11:20:19,706 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0670, 1.5838, 6.1643, 2.2676, 5.6508, 5.2231, 5.7646, 5.5517], + device='cuda:0'), covar=tensor([0.0436, 0.4125, 0.0238, 0.3001, 0.0736, 0.0700, 0.0359, 0.0413], + device='cuda:0'), in_proj_covar=tensor([0.0490, 0.0571, 0.0579, 0.0529, 0.0606, 0.0512, 0.0507, 0.0576], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:20:24,226 INFO [train.py:901] (0/4) Epoch 12, batch 450, loss[loss=0.2554, simple_loss=0.3332, pruned_loss=0.08884, over 8571.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3112, pruned_loss=0.07996, over 1445690.82 frames. ], batch size: 31, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:40,992 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:43,193 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:58,875 INFO [train.py:901] (0/4) Epoch 12, batch 500, loss[loss=0.1937, simple_loss=0.2782, pruned_loss=0.05465, over 7928.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3095, pruned_loss=0.07931, over 1482587.55 frames. ], batch size: 20, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:21:00,672 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 11:21:15,394 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9940, 1.5160, 3.4798, 1.5542, 2.4440, 3.8612, 4.0138, 3.3800], + device='cuda:0'), covar=tensor([0.1027, 0.1494, 0.0347, 0.1804, 0.0926, 0.0246, 0.0382, 0.0574], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0296, 0.0261, 0.0293, 0.0273, 0.0237, 0.0343, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:21:19,402 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:21:22,152 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0774, 1.6037, 4.3538, 1.9488, 2.4063, 4.9024, 4.9692, 4.3167], + device='cuda:0'), covar=tensor([0.1134, 0.1552, 0.0264, 0.1767, 0.1048, 0.0206, 0.0353, 0.0552], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0296, 0.0261, 0.0293, 0.0273, 0.0237, 0.0343, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:21:24,108 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.539e+02 3.031e+02 3.696e+02 8.346e+02, threshold=6.063e+02, percent-clipped=3.0 +2023-02-06 11:21:29,683 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89457.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:21:34,349 INFO [train.py:901] (0/4) Epoch 12, batch 550, loss[loss=0.2128, simple_loss=0.2869, pruned_loss=0.06937, over 7640.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3084, pruned_loss=0.07866, over 1511581.68 frames. ], batch size: 19, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:02,579 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:09,198 INFO [train.py:901] (0/4) Epoch 12, batch 600, loss[loss=0.1959, simple_loss=0.2781, pruned_loss=0.0569, over 7810.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3094, pruned_loss=0.07879, over 1538717.46 frames. ], batch size: 19, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:17,842 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:20,451 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:26,551 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 11:22:34,513 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.630e+02 3.047e+02 3.733e+02 1.036e+03, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 11:22:41,118 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.70 vs. limit=5.0 +2023-02-06 11:22:44,045 INFO [train.py:901] (0/4) Epoch 12, batch 650, loss[loss=0.2311, simple_loss=0.3117, pruned_loss=0.07528, over 8468.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3096, pruned_loss=0.07836, over 1559230.54 frames. ], batch size: 29, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:54,319 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4272, 2.2245, 1.7808, 1.9634, 1.8503, 1.4340, 1.7208, 1.8286], + device='cuda:0'), covar=tensor([0.0879, 0.0294, 0.0833, 0.0447, 0.0514, 0.1147, 0.0682, 0.0549], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0237, 0.0316, 0.0296, 0.0300, 0.0321, 0.0336, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:23:18,861 INFO [train.py:901] (0/4) Epoch 12, batch 700, loss[loss=0.2476, simple_loss=0.3245, pruned_loss=0.08537, over 8514.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3096, pruned_loss=0.07868, over 1574401.74 frames. ], batch size: 26, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:22,434 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5188, 1.4350, 2.7203, 1.2078, 2.0383, 2.9012, 3.0981, 2.3319], + device='cuda:0'), covar=tensor([0.1204, 0.1560, 0.0467, 0.2182, 0.0956, 0.0395, 0.0629, 0.0859], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0298, 0.0263, 0.0294, 0.0275, 0.0238, 0.0345, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:23:39,701 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4994, 1.4139, 4.6985, 1.6122, 4.0907, 3.8969, 4.1975, 4.0165], + device='cuda:0'), covar=tensor([0.0539, 0.4389, 0.0424, 0.3480, 0.1104, 0.0860, 0.0574, 0.0659], + device='cuda:0'), in_proj_covar=tensor([0.0495, 0.0575, 0.0582, 0.0530, 0.0607, 0.0517, 0.0510, 0.0579], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:23:40,391 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:23:43,622 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.821e+02 3.296e+02 4.031e+02 9.579e+02, threshold=6.593e+02, percent-clipped=5.0 +2023-02-06 11:23:53,828 INFO [train.py:901] (0/4) Epoch 12, batch 750, loss[loss=0.28, simple_loss=0.3513, pruned_loss=0.1044, over 8327.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3096, pruned_loss=0.07884, over 1585502.85 frames. ], batch size: 25, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:54,685 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8084, 2.3098, 1.7701, 2.7461, 1.3253, 1.5777, 1.7695, 2.3024], + device='cuda:0'), covar=tensor([0.0841, 0.0727, 0.1006, 0.0374, 0.1140, 0.1402, 0.1065, 0.0735], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0212, 0.0254, 0.0213, 0.0217, 0.0251, 0.0257, 0.0219], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 11:23:59,424 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0289, 3.9779, 2.5613, 2.5458, 2.7663, 2.0221, 2.6181, 2.8733], + device='cuda:0'), covar=tensor([0.1498, 0.0246, 0.0802, 0.0771, 0.0648, 0.1179, 0.1122, 0.1041], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0241, 0.0319, 0.0300, 0.0303, 0.0325, 0.0340, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:24:11,501 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 11:24:17,636 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:20,262 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 11:24:23,777 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7193, 2.3012, 3.4068, 2.5442, 2.9227, 2.5574, 2.1040, 1.8544], + device='cuda:0'), covar=tensor([0.3437, 0.3980, 0.1098, 0.2591, 0.1983, 0.2047, 0.1643, 0.3837], + device='cuda:0'), in_proj_covar=tensor([0.0884, 0.0865, 0.0733, 0.0840, 0.0925, 0.0795, 0.0700, 0.0764], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:24:28,197 INFO [train.py:901] (0/4) Epoch 12, batch 800, loss[loss=0.2243, simple_loss=0.3178, pruned_loss=0.06539, over 8293.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3093, pruned_loss=0.07884, over 1594602.75 frames. ], batch size: 23, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:24:32,957 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5667, 1.5843, 2.9134, 1.1921, 2.1047, 3.1937, 3.3960, 2.3374], + device='cuda:0'), covar=tensor([0.1485, 0.1641, 0.0445, 0.2592, 0.1037, 0.0439, 0.0533, 0.1140], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0300, 0.0263, 0.0296, 0.0275, 0.0239, 0.0347, 0.0292], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:24:33,180 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 11:24:43,641 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:53,390 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.628e+02 3.285e+02 4.121e+02 9.349e+02, threshold=6.571e+02, percent-clipped=6.0 +2023-02-06 11:24:59,645 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:02,846 INFO [train.py:901] (0/4) Epoch 12, batch 850, loss[loss=0.225, simple_loss=0.3018, pruned_loss=0.07413, over 8137.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3098, pruned_loss=0.07898, over 1602745.83 frames. ], batch size: 22, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:25:17,992 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:19,267 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:28,670 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89801.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:25:32,942 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.54 vs. limit=5.0 +2023-02-06 11:25:37,800 INFO [train.py:901] (0/4) Epoch 12, batch 900, loss[loss=0.2434, simple_loss=0.3007, pruned_loss=0.09306, over 7234.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3093, pruned_loss=0.0788, over 1605532.31 frames. ], batch size: 16, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:03,293 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.446e+02 3.021e+02 3.729e+02 6.397e+02, threshold=6.041e+02, percent-clipped=0.0 +2023-02-06 11:26:03,485 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:11,297 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6677, 1.8751, 1.5930, 2.2818, 0.9894, 1.4091, 1.5291, 1.8515], + device='cuda:0'), covar=tensor([0.0852, 0.0847, 0.1025, 0.0471, 0.1214, 0.1516, 0.1100, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0213, 0.0255, 0.0215, 0.0218, 0.0252, 0.0258, 0.0218], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 11:26:11,800 INFO [train.py:901] (0/4) Epoch 12, batch 950, loss[loss=0.1906, simple_loss=0.2725, pruned_loss=0.0544, over 8085.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3094, pruned_loss=0.07846, over 1609960.11 frames. ], batch size: 21, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:16,405 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:24,524 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:33,234 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3999, 1.8972, 2.9945, 2.2080, 2.6754, 2.1770, 1.7484, 1.2772], + device='cuda:0'), covar=tensor([0.4296, 0.4452, 0.1200, 0.2866, 0.1938, 0.2381, 0.1797, 0.4547], + device='cuda:0'), in_proj_covar=tensor([0.0895, 0.0876, 0.0743, 0.0853, 0.0936, 0.0806, 0.0708, 0.0772], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:26:38,568 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:38,609 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:39,096 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 11:26:46,281 INFO [train.py:901] (0/4) Epoch 12, batch 1000, loss[loss=0.2668, simple_loss=0.3362, pruned_loss=0.09871, over 8370.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3088, pruned_loss=0.07807, over 1612999.79 frames. ], batch size: 24, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:47,819 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89916.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:26:48,419 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6507, 1.6057, 2.2035, 1.7354, 1.1782, 2.1500, 0.3599, 1.3861], + device='cuda:0'), covar=tensor([0.2576, 0.1887, 0.0472, 0.1557, 0.4472, 0.0469, 0.3385, 0.1837], + device='cuda:0'), in_proj_covar=tensor([0.0170, 0.0172, 0.0103, 0.0218, 0.0259, 0.0107, 0.0166, 0.0169], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 11:26:55,681 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:11,392 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.648e+02 3.254e+02 4.081e+02 9.414e+02, threshold=6.507e+02, percent-clipped=7.0 +2023-02-06 11:27:11,418 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 11:27:13,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 11:27:20,836 INFO [train.py:901] (0/4) Epoch 12, batch 1050, loss[loss=0.208, simple_loss=0.2977, pruned_loss=0.05912, over 8137.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3089, pruned_loss=0.07873, over 1613583.16 frames. ], batch size: 22, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:27:22,337 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0736, 1.1625, 4.3372, 1.5639, 3.6944, 3.6398, 3.9082, 3.7269], + device='cuda:0'), covar=tensor([0.0595, 0.4677, 0.0488, 0.3464, 0.1222, 0.0838, 0.0542, 0.0638], + device='cuda:0'), in_proj_covar=tensor([0.0501, 0.0586, 0.0593, 0.0543, 0.0620, 0.0528, 0.0520, 0.0592], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:27:24,323 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 11:27:35,822 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:44,987 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-90000.pt +2023-02-06 11:27:56,226 INFO [train.py:901] (0/4) Epoch 12, batch 1100, loss[loss=0.2659, simple_loss=0.3288, pruned_loss=0.1015, over 8353.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3095, pruned_loss=0.07936, over 1613019.04 frames. ], batch size: 49, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:28:04,623 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2782, 2.4573, 1.7165, 2.0116, 1.9191, 1.3990, 1.7966, 1.8427], + device='cuda:0'), covar=tensor([0.1460, 0.0389, 0.1129, 0.0608, 0.0699, 0.1431, 0.0940, 0.0877], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0240, 0.0316, 0.0297, 0.0301, 0.0323, 0.0336, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:28:15,783 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:28:22,906 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.536e+02 3.046e+02 3.976e+02 6.882e+02, threshold=6.092e+02, percent-clipped=1.0 +2023-02-06 11:28:31,017 INFO [train.py:901] (0/4) Epoch 12, batch 1150, loss[loss=0.281, simple_loss=0.3461, pruned_loss=0.108, over 8188.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3093, pruned_loss=0.07936, over 1616107.91 frames. ], batch size: 23, lr: 6.48e-03, grad_scale: 4.0 +2023-02-06 11:28:34,436 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 11:28:41,704 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 11:28:59,641 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6965, 1.2177, 4.8303, 1.7118, 4.3429, 4.0638, 4.4247, 4.2363], + device='cuda:0'), covar=tensor([0.0443, 0.4553, 0.0449, 0.3508, 0.0939, 0.0783, 0.0456, 0.0561], + device='cuda:0'), in_proj_covar=tensor([0.0494, 0.0581, 0.0589, 0.0534, 0.0611, 0.0521, 0.0512, 0.0584], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:29:01,091 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:05,603 INFO [train.py:901] (0/4) Epoch 12, batch 1200, loss[loss=0.2413, simple_loss=0.3226, pruned_loss=0.08002, over 8610.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3096, pruned_loss=0.07909, over 1618355.39 frames. ], batch size: 34, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:19,572 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:30,356 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:32,957 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.449e+02 3.099e+02 4.282e+02 6.791e+02, threshold=6.197e+02, percent-clipped=4.0 +2023-02-06 11:29:36,546 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:37,310 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:41,827 INFO [train.py:901] (0/4) Epoch 12, batch 1250, loss[loss=0.2337, simple_loss=0.2973, pruned_loss=0.08509, over 7830.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3092, pruned_loss=0.07904, over 1615881.46 frames. ], batch size: 18, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:47,513 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90172.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:29:55,711 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:05,433 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90197.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:30:17,132 INFO [train.py:901] (0/4) Epoch 12, batch 1300, loss[loss=0.231, simple_loss=0.2919, pruned_loss=0.08503, over 7663.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3093, pruned_loss=0.07888, over 1616068.16 frames. ], batch size: 19, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:30:26,160 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:37,500 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:37,677 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 11:30:43,674 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:44,949 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.408e+02 3.209e+02 4.069e+02 1.568e+03, threshold=6.418e+02, percent-clipped=9.0 +2023-02-06 11:30:53,239 INFO [train.py:901] (0/4) Epoch 12, batch 1350, loss[loss=0.2117, simple_loss=0.2743, pruned_loss=0.07452, over 7248.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3099, pruned_loss=0.07886, over 1619719.78 frames. ], batch size: 16, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:30:55,575 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:28,606 INFO [train.py:901] (0/4) Epoch 12, batch 1400, loss[loss=0.2373, simple_loss=0.3014, pruned_loss=0.08655, over 8280.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3092, pruned_loss=0.07858, over 1622148.05 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:31:47,888 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:54,627 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.398e+02 2.808e+02 3.540e+02 8.131e+02, threshold=5.617e+02, percent-clipped=1.0 +2023-02-06 11:32:03,610 INFO [train.py:901] (0/4) Epoch 12, batch 1450, loss[loss=0.2481, simple_loss=0.3015, pruned_loss=0.09737, over 7928.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3085, pruned_loss=0.07831, over 1621149.35 frames. ], batch size: 20, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:07,900 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:08,439 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 11:32:25,350 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3534, 1.8453, 1.9169, 1.7751, 1.3434, 1.8433, 2.0919, 2.0034], + device='cuda:0'), covar=tensor([0.0505, 0.0971, 0.1412, 0.1172, 0.0591, 0.1204, 0.0627, 0.0474], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0152, 0.0191, 0.0158, 0.0103, 0.0163, 0.0116, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:0') +2023-02-06 11:32:38,140 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:38,602 INFO [train.py:901] (0/4) Epoch 12, batch 1500, loss[loss=0.208, simple_loss=0.2925, pruned_loss=0.06171, over 8187.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.309, pruned_loss=0.07864, over 1621369.64 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:55,126 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:04,321 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.462e+02 2.993e+02 3.898e+02 9.256e+02, threshold=5.985e+02, percent-clipped=2.0 +2023-02-06 11:33:12,495 INFO [train.py:901] (0/4) Epoch 12, batch 1550, loss[loss=0.2303, simple_loss=0.3069, pruned_loss=0.07683, over 8032.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3101, pruned_loss=0.0792, over 1622527.63 frames. ], batch size: 22, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:21,455 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:33,071 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:48,752 INFO [train.py:901] (0/4) Epoch 12, batch 1600, loss[loss=0.2243, simple_loss=0.3143, pruned_loss=0.06713, over 8326.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3089, pruned_loss=0.07802, over 1618638.79 frames. ], batch size: 25, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:48,906 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:15,466 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.712e+02 3.378e+02 4.197e+02 8.231e+02, threshold=6.755e+02, percent-clipped=6.0 +2023-02-06 11:34:23,535 INFO [train.py:901] (0/4) Epoch 12, batch 1650, loss[loss=0.2427, simple_loss=0.3116, pruned_loss=0.08688, over 8134.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3082, pruned_loss=0.0776, over 1620098.72 frames. ], batch size: 22, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:34:43,449 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:47,049 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:53,820 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:58,215 INFO [train.py:901] (0/4) Epoch 12, batch 1700, loss[loss=0.2766, simple_loss=0.3398, pruned_loss=0.1067, over 8040.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3074, pruned_loss=0.07729, over 1616018.95 frames. ], batch size: 22, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:04,326 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:35:24,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.486e+02 2.952e+02 3.646e+02 6.764e+02, threshold=5.904e+02, percent-clipped=1.0 +2023-02-06 11:35:32,824 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6583, 1.4955, 1.6215, 1.3948, 0.9172, 1.3925, 1.5365, 1.2877], + device='cuda:0'), covar=tensor([0.0512, 0.1214, 0.1647, 0.1335, 0.0548, 0.1472, 0.0700, 0.0655], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0152, 0.0191, 0.0159, 0.0103, 0.0163, 0.0117, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 11:35:33,331 INFO [train.py:901] (0/4) Epoch 12, batch 1750, loss[loss=0.1947, simple_loss=0.2639, pruned_loss=0.0628, over 7410.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3069, pruned_loss=0.07672, over 1616560.72 frames. ], batch size: 17, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:40,293 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6939, 2.3723, 4.5745, 1.4171, 3.3946, 2.3603, 1.8960, 3.2307], + device='cuda:0'), covar=tensor([0.1718, 0.2329, 0.0627, 0.3882, 0.1313, 0.2738, 0.1789, 0.1960], + device='cuda:0'), in_proj_covar=tensor([0.0486, 0.0519, 0.0536, 0.0584, 0.0617, 0.0558, 0.0472, 0.0615], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:35:52,630 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0089, 1.2669, 1.2032, 0.6000, 1.2551, 1.0389, 0.0682, 1.1991], + device='cuda:0'), covar=tensor([0.0302, 0.0267, 0.0235, 0.0396, 0.0294, 0.0710, 0.0537, 0.0217], + device='cuda:0'), in_proj_covar=tensor([0.0391, 0.0332, 0.0277, 0.0384, 0.0315, 0.0471, 0.0356, 0.0356], + device='cuda:0'), out_proj_covar=tensor([1.1124e-04, 9.2123e-05, 7.7253e-05, 1.0763e-04, 8.8811e-05, 1.4259e-04, + 1.0116e-04, 1.0021e-04], device='cuda:0') +2023-02-06 11:35:55,167 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6273, 1.8731, 4.6102, 1.8764, 2.6700, 5.2096, 5.1742, 4.5026], + device='cuda:0'), covar=tensor([0.1017, 0.1588, 0.0231, 0.2081, 0.0994, 0.0187, 0.0301, 0.0583], + device='cuda:0'), in_proj_covar=tensor([0.0265, 0.0296, 0.0261, 0.0292, 0.0277, 0.0236, 0.0348, 0.0292], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:36:04,029 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:07,408 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:08,045 INFO [train.py:901] (0/4) Epoch 12, batch 1800, loss[loss=0.2408, simple_loss=0.3134, pruned_loss=0.08408, over 7525.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3068, pruned_loss=0.07647, over 1615778.69 frames. ], batch size: 18, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:28,905 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2926, 2.3789, 1.7177, 1.9968, 1.9530, 1.4322, 1.7302, 1.8535], + device='cuda:0'), covar=tensor([0.1373, 0.0385, 0.1109, 0.0631, 0.0643, 0.1366, 0.0968, 0.0881], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0244, 0.0321, 0.0303, 0.0305, 0.0328, 0.0344, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:36:35,308 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.625e+02 3.119e+02 3.569e+02 7.012e+02, threshold=6.239e+02, percent-clipped=2.0 +2023-02-06 11:36:43,319 INFO [train.py:901] (0/4) Epoch 12, batch 1850, loss[loss=0.2091, simple_loss=0.2894, pruned_loss=0.06437, over 7528.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3068, pruned_loss=0.077, over 1613045.23 frames. ], batch size: 18, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:17,706 INFO [train.py:901] (0/4) Epoch 12, batch 1900, loss[loss=0.2486, simple_loss=0.3348, pruned_loss=0.08117, over 8253.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3069, pruned_loss=0.07697, over 1611010.33 frames. ], batch size: 24, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:22,438 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:27,319 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:38,160 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-06 11:37:44,436 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.569e+02 3.031e+02 3.632e+02 7.649e+02, threshold=6.063e+02, percent-clipped=2.0 +2023-02-06 11:37:47,232 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 11:37:48,675 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:50,836 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5779, 1.5450, 1.9888, 1.5348, 1.0260, 1.9861, 0.2327, 1.2949], + device='cuda:0'), covar=tensor([0.2519, 0.1776, 0.0583, 0.1669, 0.4296, 0.0577, 0.3449, 0.1930], + device='cuda:0'), in_proj_covar=tensor([0.0169, 0.0172, 0.0104, 0.0218, 0.0255, 0.0108, 0.0165, 0.0169], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 11:37:52,146 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,635 INFO [train.py:901] (0/4) Epoch 12, batch 1950, loss[loss=0.205, simple_loss=0.2692, pruned_loss=0.07043, over 7668.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3074, pruned_loss=0.07765, over 1609972.42 frames. ], batch size: 19, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:37:54,785 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:59,365 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 11:38:10,306 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:19,034 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 11:38:27,979 INFO [train.py:901] (0/4) Epoch 12, batch 2000, loss[loss=0.2324, simple_loss=0.3182, pruned_loss=0.07332, over 8107.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3076, pruned_loss=0.07756, over 1611220.33 frames. ], batch size: 23, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:38:43,367 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90936.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:54,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.641e+02 3.163e+02 4.034e+02 9.087e+02, threshold=6.326e+02, percent-clipped=9.0 +2023-02-06 11:39:02,893 INFO [train.py:901] (0/4) Epoch 12, batch 2050, loss[loss=0.1957, simple_loss=0.2618, pruned_loss=0.06482, over 7695.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.308, pruned_loss=0.07788, over 1613798.78 frames. ], batch size: 18, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:39:03,755 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:09,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:21,818 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:38,672 INFO [train.py:901] (0/4) Epoch 12, batch 2100, loss[loss=0.2471, simple_loss=0.3214, pruned_loss=0.08637, over 8471.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3087, pruned_loss=0.07848, over 1612503.58 frames. ], batch size: 27, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:04,172 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 2.659e+02 3.265e+02 4.247e+02 8.349e+02, threshold=6.531e+02, percent-clipped=2.0 +2023-02-06 11:40:12,107 INFO [train.py:901] (0/4) Epoch 12, batch 2150, loss[loss=0.2406, simple_loss=0.315, pruned_loss=0.08304, over 8548.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3086, pruned_loss=0.0788, over 1611283.68 frames. ], batch size: 39, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:26,820 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:44,038 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:47,223 INFO [train.py:901] (0/4) Epoch 12, batch 2200, loss[loss=0.2067, simple_loss=0.2895, pruned_loss=0.06189, over 8122.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3079, pruned_loss=0.07878, over 1610642.85 frames. ], batch size: 22, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:13,716 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.751e+02 3.546e+02 4.173e+02 9.054e+02, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 11:41:21,759 INFO [train.py:901] (0/4) Epoch 12, batch 2250, loss[loss=0.2588, simple_loss=0.3365, pruned_loss=0.09055, over 8327.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3079, pruned_loss=0.0791, over 1603866.01 frames. ], batch size: 26, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:41,120 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:54,520 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:56,410 INFO [train.py:901] (0/4) Epoch 12, batch 2300, loss[loss=0.2309, simple_loss=0.2933, pruned_loss=0.0842, over 7801.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3087, pruned_loss=0.07934, over 1606119.60 frames. ], batch size: 19, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:58,501 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:07,351 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:23,426 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.635e+02 3.142e+02 4.194e+02 9.102e+02, threshold=6.284e+02, percent-clipped=2.0 +2023-02-06 11:42:25,000 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:31,680 INFO [train.py:901] (0/4) Epoch 12, batch 2350, loss[loss=0.2682, simple_loss=0.3237, pruned_loss=0.1063, over 7932.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3087, pruned_loss=0.07852, over 1614400.78 frames. ], batch size: 20, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:42:57,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:05,701 INFO [train.py:901] (0/4) Epoch 12, batch 2400, loss[loss=0.219, simple_loss=0.2974, pruned_loss=0.07032, over 8341.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3077, pruned_loss=0.07798, over 1615081.69 frames. ], batch size: 26, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:43:14,306 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:32,241 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.547e+02 3.046e+02 3.774e+02 7.420e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-06 11:43:35,834 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2118, 1.1589, 3.3539, 1.0329, 2.9513, 2.8045, 3.0810, 2.9459], + device='cuda:0'), covar=tensor([0.0688, 0.3751, 0.0707, 0.3337, 0.1252, 0.0979, 0.0645, 0.0831], + device='cuda:0'), in_proj_covar=tensor([0.0498, 0.0580, 0.0588, 0.0536, 0.0609, 0.0524, 0.0518, 0.0583], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:43:41,049 INFO [train.py:901] (0/4) Epoch 12, batch 2450, loss[loss=0.2841, simple_loss=0.3465, pruned_loss=0.1108, over 8502.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3082, pruned_loss=0.07836, over 1614623.38 frames. ], batch size: 26, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:44:06,928 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7437, 1.9931, 2.1687, 1.3749, 2.3245, 1.3791, 0.7723, 1.9012], + device='cuda:0'), covar=tensor([0.0471, 0.0266, 0.0194, 0.0418, 0.0247, 0.0704, 0.0602, 0.0230], + device='cuda:0'), in_proj_covar=tensor([0.0397, 0.0332, 0.0280, 0.0384, 0.0317, 0.0473, 0.0362, 0.0359], + device='cuda:0'), out_proj_covar=tensor([1.1284e-04, 9.1987e-05, 7.7832e-05, 1.0725e-04, 8.9322e-05, 1.4282e-04, + 1.0273e-04, 1.0109e-04], device='cuda:0') +2023-02-06 11:44:11,811 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3595, 2.0049, 2.8729, 2.2937, 2.6425, 2.1947, 1.7818, 1.3710], + device='cuda:0'), covar=tensor([0.3719, 0.3774, 0.1163, 0.2543, 0.1679, 0.2146, 0.1725, 0.4005], + device='cuda:0'), in_proj_covar=tensor([0.0883, 0.0871, 0.0740, 0.0850, 0.0937, 0.0804, 0.0704, 0.0765], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:44:15,054 INFO [train.py:901] (0/4) Epoch 12, batch 2500, loss[loss=0.1911, simple_loss=0.262, pruned_loss=0.06007, over 7800.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.308, pruned_loss=0.0779, over 1615724.75 frames. ], batch size: 19, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:44:41,745 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.576e+02 3.186e+02 4.386e+02 8.083e+02, threshold=6.372e+02, percent-clipped=11.0 +2023-02-06 11:44:50,272 INFO [train.py:901] (0/4) Epoch 12, batch 2550, loss[loss=0.2593, simple_loss=0.3304, pruned_loss=0.09411, over 8488.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3088, pruned_loss=0.07836, over 1622332.73 frames. ], batch size: 29, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:24,410 INFO [train.py:901] (0/4) Epoch 12, batch 2600, loss[loss=0.2348, simple_loss=0.3076, pruned_loss=0.08104, over 8370.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3091, pruned_loss=0.07819, over 1621174.42 frames. ], batch size: 24, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:50,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.880e+02 3.430e+02 4.544e+02 8.443e+02, threshold=6.860e+02, percent-clipped=9.0 +2023-02-06 11:45:56,363 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91560.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:45:58,890 INFO [train.py:901] (0/4) Epoch 12, batch 2650, loss[loss=0.2152, simple_loss=0.2912, pruned_loss=0.06963, over 7930.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3088, pruned_loss=0.07875, over 1616344.70 frames. ], batch size: 20, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:11,887 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:29,366 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:33,843 INFO [train.py:901] (0/4) Epoch 12, batch 2700, loss[loss=0.2246, simple_loss=0.3037, pruned_loss=0.0728, over 8484.00 frames. ], tot_loss[loss=0.232, simple_loss=0.308, pruned_loss=0.07799, over 1616490.11 frames. ], batch size: 26, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:35,603 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 11:46:55,919 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:59,281 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.691e+02 3.205e+02 3.908e+02 7.628e+02, threshold=6.410e+02, percent-clipped=2.0 +2023-02-06 11:47:08,024 INFO [train.py:901] (0/4) Epoch 12, batch 2750, loss[loss=0.2255, simple_loss=0.3117, pruned_loss=0.06961, over 8443.00 frames. ], tot_loss[loss=0.231, simple_loss=0.307, pruned_loss=0.07748, over 1614240.70 frames. ], batch size: 24, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:47:43,511 INFO [train.py:901] (0/4) Epoch 12, batch 2800, loss[loss=0.2218, simple_loss=0.2878, pruned_loss=0.07793, over 7649.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3076, pruned_loss=0.07841, over 1614779.33 frames. ], batch size: 19, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:08,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.634e+02 3.181e+02 3.784e+02 9.192e+02, threshold=6.362e+02, percent-clipped=3.0 +2023-02-06 11:48:13,228 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 11:48:15,796 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:48:16,915 INFO [train.py:901] (0/4) Epoch 12, batch 2850, loss[loss=0.2538, simple_loss=0.3298, pruned_loss=0.08889, over 8456.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3076, pruned_loss=0.07831, over 1612668.95 frames. ], batch size: 27, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:36,284 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 11:48:52,949 INFO [train.py:901] (0/4) Epoch 12, batch 2900, loss[loss=0.2531, simple_loss=0.332, pruned_loss=0.08712, over 8519.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3072, pruned_loss=0.07804, over 1612778.92 frames. ], batch size: 28, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:18,828 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.538e+02 3.175e+02 3.875e+02 8.885e+02, threshold=6.349e+02, percent-clipped=4.0 +2023-02-06 11:49:22,156 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 11:49:26,762 INFO [train.py:901] (0/4) Epoch 12, batch 2950, loss[loss=0.2358, simple_loss=0.313, pruned_loss=0.07934, over 8097.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.308, pruned_loss=0.07833, over 1614757.16 frames. ], batch size: 23, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:49,030 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-06 11:49:54,040 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:49:57,389 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:50:00,612 INFO [train.py:901] (0/4) Epoch 12, batch 3000, loss[loss=0.2315, simple_loss=0.3089, pruned_loss=0.077, over 8603.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3073, pruned_loss=0.07823, over 1613887.24 frames. ], batch size: 39, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:50:00,613 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 11:50:12,223 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.4731, 3.3838, 3.1917, 2.0220, 3.1259, 3.0664, 3.2922, 2.7886], + device='cuda:0'), covar=tensor([0.0918, 0.0710, 0.0819, 0.4092, 0.0798, 0.1154, 0.1050, 0.1121], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0379, 0.0388, 0.0489, 0.0379, 0.0383, 0.0379, 0.0330], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:50:13,629 INFO [train.py:935] (0/4) Epoch 12, validation: loss=0.1868, simple_loss=0.2871, pruned_loss=0.04323, over 944034.00 frames. +2023-02-06 11:50:13,630 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 11:50:40,670 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.361e+02 2.883e+02 3.802e+02 7.578e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-06 11:50:49,095 INFO [train.py:901] (0/4) Epoch 12, batch 3050, loss[loss=0.2566, simple_loss=0.3255, pruned_loss=0.0939, over 8454.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3083, pruned_loss=0.07891, over 1617585.74 frames. ], batch size: 29, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:50:56,129 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:10,055 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3344, 2.6368, 2.9362, 1.4621, 3.1723, 1.7303, 1.5247, 2.1174], + device='cuda:0'), covar=tensor([0.0558, 0.0269, 0.0217, 0.0553, 0.0330, 0.0663, 0.0649, 0.0322], + device='cuda:0'), in_proj_covar=tensor([0.0395, 0.0331, 0.0280, 0.0387, 0.0320, 0.0475, 0.0358, 0.0357], + device='cuda:0'), out_proj_covar=tensor([1.1218e-04, 9.1945e-05, 7.7681e-05, 1.0791e-04, 8.9865e-05, 1.4358e-04, + 1.0141e-04, 1.0051e-04], device='cuda:0') +2023-02-06 11:51:14,055 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91999.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:51:14,720 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-92000.pt +2023-02-06 11:51:25,121 INFO [train.py:901] (0/4) Epoch 12, batch 3100, loss[loss=0.2058, simple_loss=0.296, pruned_loss=0.05785, over 8107.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3077, pruned_loss=0.07808, over 1621148.75 frames. ], batch size: 23, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:51:28,135 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:28,802 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92019.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:51:45,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:51,735 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.663e+02 3.347e+02 4.142e+02 7.838e+02, threshold=6.695e+02, percent-clipped=5.0 +2023-02-06 11:52:01,129 INFO [train.py:901] (0/4) Epoch 12, batch 3150, loss[loss=0.3255, simple_loss=0.3632, pruned_loss=0.1438, over 6917.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3076, pruned_loss=0.07813, over 1620680.47 frames. ], batch size: 71, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:18,313 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7003, 1.5176, 4.8422, 1.7280, 4.3213, 3.9543, 4.4011, 4.2127], + device='cuda:0'), covar=tensor([0.0473, 0.4188, 0.0386, 0.3503, 0.0998, 0.0804, 0.0508, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0500, 0.0577, 0.0591, 0.0539, 0.0619, 0.0528, 0.0521, 0.0586], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:52:35,767 INFO [train.py:901] (0/4) Epoch 12, batch 3200, loss[loss=0.2071, simple_loss=0.2941, pruned_loss=0.06011, over 8356.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3071, pruned_loss=0.07777, over 1614115.28 frames. ], batch size: 24, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:44,075 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-06 11:53:02,007 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.674e+02 3.226e+02 3.971e+02 7.397e+02, threshold=6.453e+02, percent-clipped=3.0 +2023-02-06 11:53:10,360 INFO [train.py:901] (0/4) Epoch 12, batch 3250, loss[loss=0.1909, simple_loss=0.2657, pruned_loss=0.05804, over 7546.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3074, pruned_loss=0.07815, over 1612086.93 frames. ], batch size: 18, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:46,146 INFO [train.py:901] (0/4) Epoch 12, batch 3300, loss[loss=0.2064, simple_loss=0.2934, pruned_loss=0.05973, over 8359.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.307, pruned_loss=0.07731, over 1615094.69 frames. ], batch size: 24, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:54:11,040 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.357e+02 2.935e+02 3.680e+02 6.719e+02, threshold=5.870e+02, percent-clipped=1.0 +2023-02-06 11:54:11,765 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:54:19,116 INFO [train.py:901] (0/4) Epoch 12, batch 3350, loss[loss=0.2323, simple_loss=0.3142, pruned_loss=0.07517, over 7816.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3091, pruned_loss=0.07794, over 1620060.59 frames. ], batch size: 20, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:27,369 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92275.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:54:45,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92300.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:54:55,343 INFO [train.py:901] (0/4) Epoch 12, batch 3400, loss[loss=0.1989, simple_loss=0.2894, pruned_loss=0.05417, over 8324.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3084, pruned_loss=0.07754, over 1616294.59 frames. ], batch size: 25, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:57,470 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:15,992 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92343.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:55:21,867 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.364e+02 2.893e+02 3.659e+02 6.777e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 11:55:26,204 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9165, 2.6896, 3.6363, 1.9654, 1.7817, 3.5269, 0.7636, 2.0441], + device='cuda:0'), covar=tensor([0.2566, 0.1539, 0.0298, 0.3065, 0.4447, 0.0393, 0.4056, 0.2457], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0175, 0.0104, 0.0221, 0.0259, 0.0109, 0.0167, 0.0169], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 11:55:26,341 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.47 vs. limit=5.0 +2023-02-06 11:55:29,930 INFO [train.py:901] (0/4) Epoch 12, batch 3450, loss[loss=0.25, simple_loss=0.3268, pruned_loss=0.08665, over 8240.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3087, pruned_loss=0.07757, over 1620254.31 frames. ], batch size: 24, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:55:32,853 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92368.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:34,186 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2102, 1.8802, 2.6169, 2.2648, 2.5461, 2.1290, 1.7228, 1.1745], + device='cuda:0'), covar=tensor([0.3797, 0.3675, 0.1343, 0.2473, 0.1771, 0.2289, 0.1652, 0.4021], + device='cuda:0'), in_proj_covar=tensor([0.0890, 0.0870, 0.0732, 0.0845, 0.0934, 0.0804, 0.0705, 0.0766], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:55:52,772 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 11:56:04,892 INFO [train.py:901] (0/4) Epoch 12, batch 3500, loss[loss=0.2637, simple_loss=0.329, pruned_loss=0.09927, over 7979.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3081, pruned_loss=0.07737, over 1615880.29 frames. ], batch size: 21, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:18,391 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92432.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:56:29,126 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 11:56:33,046 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.438e+02 2.928e+02 3.742e+02 8.211e+02, threshold=5.856e+02, percent-clipped=5.0 +2023-02-06 11:56:36,511 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:56:40,246 INFO [train.py:901] (0/4) Epoch 12, batch 3550, loss[loss=0.2371, simple_loss=0.3097, pruned_loss=0.08219, over 7817.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3088, pruned_loss=0.07772, over 1617618.50 frames. ], batch size: 20, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:11,660 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 11:57:14,698 INFO [train.py:901] (0/4) Epoch 12, batch 3600, loss[loss=0.1834, simple_loss=0.2688, pruned_loss=0.04899, over 8191.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3089, pruned_loss=0.07753, over 1615842.61 frames. ], batch size: 23, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:29,923 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-06 11:57:38,956 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-06 11:57:42,450 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.628e+02 3.055e+02 4.234e+02 9.851e+02, threshold=6.109e+02, percent-clipped=7.0 +2023-02-06 11:57:50,891 INFO [train.py:901] (0/4) Epoch 12, batch 3650, loss[loss=0.2198, simple_loss=0.2862, pruned_loss=0.0767, over 7926.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3073, pruned_loss=0.07752, over 1610797.73 frames. ], batch size: 20, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:10,859 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6648, 4.6991, 4.2023, 1.9275, 4.1947, 4.1932, 4.2899, 3.9743], + device='cuda:0'), covar=tensor([0.0668, 0.0463, 0.0887, 0.4793, 0.0710, 0.0911, 0.1161, 0.0829], + device='cuda:0'), in_proj_covar=tensor([0.0468, 0.0377, 0.0388, 0.0488, 0.0380, 0.0383, 0.0379, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 11:58:23,913 INFO [train.py:901] (0/4) Epoch 12, batch 3700, loss[loss=0.2766, simple_loss=0.3331, pruned_loss=0.11, over 7928.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3092, pruned_loss=0.07847, over 1615997.65 frames. ], batch size: 20, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:28,530 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 11:58:30,697 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92624.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:44,254 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:48,471 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:50,889 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.476e+02 3.116e+02 4.152e+02 8.400e+02, threshold=6.233e+02, percent-clipped=9.0 +2023-02-06 11:58:54,981 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5852, 1.6264, 4.3231, 1.8238, 2.4406, 4.9399, 4.9791, 4.3211], + device='cuda:0'), covar=tensor([0.1021, 0.1734, 0.0272, 0.2246, 0.1159, 0.0208, 0.0313, 0.0582], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0302, 0.0264, 0.0298, 0.0280, 0.0240, 0.0354, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 11:58:57,877 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3604, 1.9236, 2.9064, 2.2573, 2.6943, 2.2139, 1.8086, 1.4340], + device='cuda:0'), covar=tensor([0.4204, 0.4418, 0.1249, 0.2897, 0.2039, 0.2344, 0.1847, 0.4294], + device='cuda:0'), in_proj_covar=tensor([0.0896, 0.0874, 0.0731, 0.0854, 0.0939, 0.0809, 0.0706, 0.0770], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:58:59,706 INFO [train.py:901] (0/4) Epoch 12, batch 3750, loss[loss=0.1965, simple_loss=0.2686, pruned_loss=0.0622, over 7525.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3074, pruned_loss=0.07751, over 1612350.31 frames. ], batch size: 18, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:01,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3704, 1.6275, 4.5252, 1.6316, 4.0526, 3.7498, 4.0907, 3.9773], + device='cuda:0'), covar=tensor([0.0500, 0.3974, 0.0521, 0.3525, 0.1115, 0.0909, 0.0517, 0.0594], + device='cuda:0'), in_proj_covar=tensor([0.0503, 0.0579, 0.0600, 0.0540, 0.0620, 0.0533, 0.0524, 0.0585], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 11:59:17,063 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:28,223 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6903, 2.0064, 2.2373, 1.1849, 2.3289, 1.4123, 0.7050, 1.7075], + device='cuda:0'), covar=tensor([0.0451, 0.0224, 0.0162, 0.0443, 0.0245, 0.0706, 0.0594, 0.0263], + device='cuda:0'), in_proj_covar=tensor([0.0408, 0.0337, 0.0286, 0.0400, 0.0328, 0.0489, 0.0365, 0.0367], + device='cuda:0'), out_proj_covar=tensor([1.1572e-04, 9.3237e-05, 7.9089e-05, 1.1163e-04, 9.1948e-05, 1.4776e-04, + 1.0352e-04, 1.0316e-04], device='cuda:0') +2023-02-06 11:59:34,439 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:34,943 INFO [train.py:901] (0/4) Epoch 12, batch 3800, loss[loss=0.2354, simple_loss=0.3209, pruned_loss=0.07497, over 8597.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.306, pruned_loss=0.0769, over 1613108.52 frames. ], batch size: 31, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:35,188 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92714.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:59:52,016 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:52,764 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92739.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:00:02,136 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.558e+02 2.972e+02 3.756e+02 9.318e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 12:00:09,493 INFO [train.py:901] (0/4) Epoch 12, batch 3850, loss[loss=0.2023, simple_loss=0.284, pruned_loss=0.06026, over 8584.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3065, pruned_loss=0.07706, over 1612521.94 frames. ], batch size: 31, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:00:33,580 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 12:00:45,119 INFO [train.py:901] (0/4) Epoch 12, batch 3900, loss[loss=0.1977, simple_loss=0.2804, pruned_loss=0.05745, over 7816.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3059, pruned_loss=0.0769, over 1610584.35 frames. ], batch size: 20, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:01:08,848 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:01:11,295 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.538e+02 2.989e+02 3.922e+02 7.912e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 12:01:17,562 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0687, 2.2371, 1.7953, 2.9001, 1.4964, 1.5247, 2.0388, 2.3947], + device='cuda:0'), covar=tensor([0.0736, 0.0928, 0.0997, 0.0419, 0.1141, 0.1507, 0.1047, 0.0696], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0212, 0.0254, 0.0215, 0.0216, 0.0251, 0.0256, 0.0216], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 12:01:19,448 INFO [train.py:901] (0/4) Epoch 12, batch 3950, loss[loss=0.1863, simple_loss=0.2637, pruned_loss=0.05446, over 7791.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3058, pruned_loss=0.07699, over 1607390.17 frames. ], batch size: 19, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:53,518 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-06 12:01:54,553 INFO [train.py:901] (0/4) Epoch 12, batch 4000, loss[loss=0.2408, simple_loss=0.3244, pruned_loss=0.07863, over 8534.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3062, pruned_loss=0.07688, over 1613355.69 frames. ], batch size: 49, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:56,834 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:00,211 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8475, 3.8105, 3.4860, 1.6710, 3.4198, 3.3622, 3.5003, 3.2162], + device='cuda:0'), covar=tensor([0.0863, 0.0643, 0.0999, 0.4671, 0.0816, 0.1112, 0.1255, 0.0869], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0376, 0.0386, 0.0483, 0.0376, 0.0380, 0.0374, 0.0332], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:02:05,009 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7231, 2.0231, 3.2634, 1.4657, 2.4657, 1.9910, 1.8006, 2.2985], + device='cuda:0'), covar=tensor([0.1584, 0.2202, 0.0697, 0.3683, 0.1436, 0.2775, 0.1759, 0.2194], + device='cuda:0'), in_proj_covar=tensor([0.0489, 0.0527, 0.0538, 0.0581, 0.0622, 0.0560, 0.0476, 0.0614], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:02:08,139 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0733, 1.6871, 3.4421, 1.4696, 2.4026, 3.8414, 3.8557, 3.2444], + device='cuda:0'), covar=tensor([0.1095, 0.1515, 0.0350, 0.2158, 0.1032, 0.0238, 0.0436, 0.0636], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0301, 0.0265, 0.0297, 0.0278, 0.0241, 0.0354, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:02:18,336 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:20,901 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.373e+02 3.059e+02 3.649e+02 8.513e+02, threshold=6.118e+02, percent-clipped=6.0 +2023-02-06 12:02:28,380 INFO [train.py:901] (0/4) Epoch 12, batch 4050, loss[loss=0.2243, simple_loss=0.304, pruned_loss=0.07231, over 8458.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3077, pruned_loss=0.07765, over 1616211.13 frames. ], batch size: 27, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:02:44,175 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:48,258 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:57,115 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1859, 2.1691, 2.4804, 1.9157, 1.8163, 2.4727, 1.1540, 2.0084], + device='cuda:0'), covar=tensor([0.2735, 0.1413, 0.0460, 0.1843, 0.2620, 0.0540, 0.2842, 0.1655], + device='cuda:0'), in_proj_covar=tensor([0.0170, 0.0174, 0.0103, 0.0216, 0.0253, 0.0107, 0.0162, 0.0168], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 12:03:03,714 INFO [train.py:901] (0/4) Epoch 12, batch 4100, loss[loss=0.2374, simple_loss=0.3102, pruned_loss=0.08232, over 8142.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3085, pruned_loss=0.07836, over 1614351.40 frames. ], batch size: 22, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:04,548 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0141, 1.4935, 1.5883, 1.2104, 0.9556, 1.3914, 1.8104, 1.8895], + device='cuda:0'), covar=tensor([0.0485, 0.1326, 0.1762, 0.1481, 0.0617, 0.1534, 0.0673, 0.0521], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0153, 0.0194, 0.0160, 0.0104, 0.0164, 0.0117, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:03:13,897 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:21,022 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 12:03:30,615 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.418e+02 3.048e+02 3.757e+02 7.047e+02, threshold=6.097e+02, percent-clipped=3.0 +2023-02-06 12:03:31,415 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:37,938 INFO [train.py:901] (0/4) Epoch 12, batch 4150, loss[loss=0.1978, simple_loss=0.2775, pruned_loss=0.05905, over 7810.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3076, pruned_loss=0.07746, over 1616789.49 frames. ], batch size: 20, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:50,877 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:04,580 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:05,524 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 12:04:12,248 INFO [train.py:901] (0/4) Epoch 12, batch 4200, loss[loss=0.2144, simple_loss=0.2909, pruned_loss=0.06897, over 7532.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3068, pruned_loss=0.07719, over 1613237.01 frames. ], batch size: 18, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:04:13,210 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6821, 2.1774, 3.5582, 2.5175, 3.0640, 2.3520, 2.0125, 1.6666], + device='cuda:0'), covar=tensor([0.4031, 0.4715, 0.1271, 0.3313, 0.2153, 0.2454, 0.1737, 0.4664], + device='cuda:0'), in_proj_covar=tensor([0.0885, 0.0864, 0.0720, 0.0845, 0.0927, 0.0796, 0.0699, 0.0758], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:04:24,925 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 12:04:26,408 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:29,266 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8817, 1.9063, 2.3323, 1.9461, 1.3897, 2.3789, 0.3764, 1.4391], + device='cuda:0'), covar=tensor([0.2648, 0.1860, 0.0490, 0.1553, 0.4002, 0.0493, 0.3469, 0.2261], + device='cuda:0'), in_proj_covar=tensor([0.0169, 0.0172, 0.0101, 0.0214, 0.0252, 0.0107, 0.0161, 0.0167], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 12:04:40,123 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.563e+02 2.943e+02 3.717e+02 8.503e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-06 12:04:47,442 INFO [train.py:901] (0/4) Epoch 12, batch 4250, loss[loss=0.2447, simple_loss=0.3221, pruned_loss=0.08364, over 8248.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.307, pruned_loss=0.07727, over 1613399.47 frames. ], batch size: 24, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:04:48,809 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 12:04:53,107 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-06 12:05:06,762 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:09,364 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93197.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:21,310 INFO [train.py:901] (0/4) Epoch 12, batch 4300, loss[loss=0.2347, simple_loss=0.315, pruned_loss=0.07721, over 8354.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3071, pruned_loss=0.07733, over 1616374.95 frames. ], batch size: 24, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:05:22,134 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7083, 1.3910, 4.8674, 1.8032, 4.3047, 4.0463, 4.3802, 4.2195], + device='cuda:0'), covar=tensor([0.0454, 0.4166, 0.0377, 0.3226, 0.0946, 0.0762, 0.0480, 0.0528], + device='cuda:0'), in_proj_covar=tensor([0.0501, 0.0580, 0.0594, 0.0537, 0.0615, 0.0527, 0.0519, 0.0581], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:05:48,576 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.718e+02 3.236e+02 4.116e+02 1.260e+03, threshold=6.473e+02, percent-clipped=7.0 +2023-02-06 12:05:54,511 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93261.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:56,509 INFO [train.py:901] (0/4) Epoch 12, batch 4350, loss[loss=0.1719, simple_loss=0.2508, pruned_loss=0.0465, over 7556.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.307, pruned_loss=0.07732, over 1615280.72 frames. ], batch size: 18, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:15,789 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:16,409 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 12:06:25,902 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:29,907 INFO [train.py:901] (0/4) Epoch 12, batch 4400, loss[loss=0.271, simple_loss=0.3431, pruned_loss=0.09943, over 8634.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3076, pruned_loss=0.07775, over 1617587.66 frames. ], batch size: 34, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:46,133 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:51,665 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1819, 3.0360, 2.3539, 2.5631, 2.5119, 2.1201, 2.4064, 2.7794], + device='cuda:0'), covar=tensor([0.1023, 0.0306, 0.0731, 0.0505, 0.0522, 0.0935, 0.0765, 0.0674], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0234, 0.0313, 0.0296, 0.0297, 0.0323, 0.0341, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:06:58,349 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.551e+02 2.995e+02 3.715e+02 7.484e+02, threshold=5.990e+02, percent-clipped=1.0 +2023-02-06 12:06:58,369 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 12:07:01,786 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:05,648 INFO [train.py:901] (0/4) Epoch 12, batch 4450, loss[loss=0.1936, simple_loss=0.2761, pruned_loss=0.05554, over 8250.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3069, pruned_loss=0.07744, over 1611604.42 frames. ], batch size: 24, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:11,753 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:14,525 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:19,341 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:29,365 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:36,045 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:39,924 INFO [train.py:901] (0/4) Epoch 12, batch 4500, loss[loss=0.2254, simple_loss=0.308, pruned_loss=0.07138, over 8362.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3071, pruned_loss=0.07781, over 1609764.44 frames. ], batch size: 26, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:46,172 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1343, 3.9613, 2.5466, 2.9374, 2.8149, 1.9526, 2.6589, 3.0825], + device='cuda:0'), covar=tensor([0.1333, 0.0274, 0.0823, 0.0596, 0.0646, 0.1153, 0.0924, 0.0923], + device='cuda:0'), in_proj_covar=tensor([0.0339, 0.0231, 0.0310, 0.0293, 0.0295, 0.0320, 0.0337, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:07:50,706 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 12:08:01,527 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3985, 2.3836, 1.7748, 2.0541, 1.8913, 1.4026, 1.7485, 1.8343], + device='cuda:0'), covar=tensor([0.1150, 0.0317, 0.0863, 0.0465, 0.0630, 0.1177, 0.0923, 0.0768], + device='cuda:0'), in_proj_covar=tensor([0.0340, 0.0233, 0.0312, 0.0294, 0.0297, 0.0321, 0.0338, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:08:06,004 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:06,469 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.576e+02 3.193e+02 4.187e+02 6.619e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:08:06,701 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:13,859 INFO [train.py:901] (0/4) Epoch 12, batch 4550, loss[loss=0.2088, simple_loss=0.2974, pruned_loss=0.06007, over 8476.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3066, pruned_loss=0.07744, over 1607504.76 frames. ], batch size: 29, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:08:24,170 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:24,981 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:30,575 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9284, 2.3667, 4.5017, 1.5595, 3.1011, 2.5059, 2.0343, 2.9642], + device='cuda:0'), covar=tensor([0.1533, 0.2261, 0.0701, 0.3710, 0.1524, 0.2373, 0.1679, 0.2193], + device='cuda:0'), in_proj_covar=tensor([0.0485, 0.0523, 0.0530, 0.0577, 0.0613, 0.0549, 0.0472, 0.0611], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:08:31,876 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93487.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:49,629 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:50,182 INFO [train.py:901] (0/4) Epoch 12, batch 4600, loss[loss=0.2329, simple_loss=0.3034, pruned_loss=0.08117, over 8702.00 frames. ], tot_loss[loss=0.23, simple_loss=0.306, pruned_loss=0.07695, over 1607968.85 frames. ], batch size: 34, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:16,605 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.455e+02 3.020e+02 4.052e+02 9.299e+02, threshold=6.041e+02, percent-clipped=5.0 +2023-02-06 12:09:21,186 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 12:09:24,867 INFO [train.py:901] (0/4) Epoch 12, batch 4650, loss[loss=0.2611, simple_loss=0.3402, pruned_loss=0.09099, over 8188.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3061, pruned_loss=0.07716, over 1609704.26 frames. ], batch size: 23, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:25,070 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:41,891 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:59,875 INFO [train.py:901] (0/4) Epoch 12, batch 4700, loss[loss=0.2283, simple_loss=0.3031, pruned_loss=0.07676, over 7658.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3049, pruned_loss=0.07656, over 1605649.06 frames. ], batch size: 19, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:12,708 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:20,112 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2841, 1.8685, 2.7436, 2.2006, 2.5972, 2.2009, 1.7665, 1.0900], + device='cuda:0'), covar=tensor([0.4282, 0.4252, 0.1262, 0.2630, 0.1789, 0.2213, 0.1821, 0.4205], + device='cuda:0'), in_proj_covar=tensor([0.0893, 0.0872, 0.0725, 0.0851, 0.0935, 0.0802, 0.0700, 0.0767], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:10:21,470 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6110, 2.2084, 3.2706, 2.6489, 3.0660, 2.4709, 1.8912, 1.7317], + device='cuda:0'), covar=tensor([0.3969, 0.4516, 0.1226, 0.2724, 0.1944, 0.2194, 0.1831, 0.4404], + device='cuda:0'), in_proj_covar=tensor([0.0893, 0.0871, 0.0725, 0.0851, 0.0934, 0.0802, 0.0699, 0.0767], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:10:26,597 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.370e+02 2.939e+02 3.568e+02 8.447e+02, threshold=5.879e+02, percent-clipped=4.0 +2023-02-06 12:10:29,423 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93657.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:33,908 INFO [train.py:901] (0/4) Epoch 12, batch 4750, loss[loss=0.2122, simple_loss=0.2955, pruned_loss=0.06447, over 8135.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3055, pruned_loss=0.07636, over 1610311.84 frames. ], batch size: 22, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:34,135 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:51,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93689.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:00,473 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 12:11:02,149 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-06 12:11:02,490 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 12:11:04,759 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:09,471 INFO [train.py:901] (0/4) Epoch 12, batch 4800, loss[loss=0.2754, simple_loss=0.3514, pruned_loss=0.09966, over 8601.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3038, pruned_loss=0.07514, over 1612202.32 frames. ], batch size: 49, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:19,234 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8985, 2.1356, 1.7742, 2.6158, 1.2037, 1.5696, 1.7496, 2.2707], + device='cuda:0'), covar=tensor([0.0723, 0.0772, 0.0966, 0.0382, 0.1081, 0.1221, 0.0871, 0.0625], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0210, 0.0253, 0.0214, 0.0215, 0.0249, 0.0255, 0.0217], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 12:11:22,630 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:28,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7249, 1.7415, 2.8181, 1.3046, 2.1271, 3.0514, 3.1283, 2.5352], + device='cuda:0'), covar=tensor([0.1040, 0.1213, 0.0362, 0.1961, 0.0830, 0.0313, 0.0583, 0.0710], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0299, 0.0264, 0.0295, 0.0279, 0.0240, 0.0353, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:11:30,147 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:36,835 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.519e+02 2.967e+02 3.635e+02 7.460e+02, threshold=5.934e+02, percent-clipped=2.0 +2023-02-06 12:11:44,130 INFO [train.py:901] (0/4) Epoch 12, batch 4850, loss[loss=0.265, simple_loss=0.3323, pruned_loss=0.0988, over 8247.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07575, over 1614189.59 frames. ], batch size: 22, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:47,091 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93768.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:47,784 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:53,077 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 12:12:00,551 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:04,716 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:18,636 INFO [train.py:901] (0/4) Epoch 12, batch 4900, loss[loss=0.2137, simple_loss=0.2971, pruned_loss=0.06514, over 8074.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3057, pruned_loss=0.0762, over 1618311.87 frames. ], batch size: 21, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:12:42,681 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:45,724 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.389e+02 2.920e+02 3.679e+02 7.315e+02, threshold=5.841e+02, percent-clipped=3.0 +2023-02-06 12:12:53,907 INFO [train.py:901] (0/4) Epoch 12, batch 4950, loss[loss=0.212, simple_loss=0.2868, pruned_loss=0.06857, over 8189.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3056, pruned_loss=0.07631, over 1620630.82 frames. ], batch size: 23, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:12:57,976 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3633, 2.8535, 2.4088, 4.0961, 1.9047, 1.9630, 2.4032, 3.1560], + device='cuda:0'), covar=tensor([0.0845, 0.0937, 0.0985, 0.0203, 0.1059, 0.1400, 0.1033, 0.0774], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0213, 0.0256, 0.0216, 0.0216, 0.0252, 0.0259, 0.0220], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 12:13:00,009 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:13:08,875 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.84 vs. limit=2.0 +2023-02-06 12:13:12,884 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 12:13:27,056 INFO [train.py:901] (0/4) Epoch 12, batch 5000, loss[loss=0.2032, simple_loss=0.2798, pruned_loss=0.06328, over 7977.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3065, pruned_loss=0.07752, over 1619739.75 frames. ], batch size: 21, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:55,395 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.574e+02 3.082e+02 3.748e+02 7.333e+02, threshold=6.165e+02, percent-clipped=4.0 +2023-02-06 12:14:02,966 INFO [train.py:901] (0/4) Epoch 12, batch 5050, loss[loss=0.3399, simple_loss=0.3812, pruned_loss=0.1493, over 8365.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3069, pruned_loss=0.07709, over 1622035.44 frames. ], batch size: 24, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:14:27,668 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:14:28,288 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-94000.pt +2023-02-06 12:14:31,137 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 12:14:38,585 INFO [train.py:901] (0/4) Epoch 12, batch 5100, loss[loss=0.2616, simple_loss=0.3382, pruned_loss=0.09254, over 8315.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.307, pruned_loss=0.07739, over 1617643.47 frames. ], batch size: 25, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:05,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.576e+02 2.962e+02 4.029e+02 5.912e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-06 12:15:13,507 INFO [train.py:901] (0/4) Epoch 12, batch 5150, loss[loss=0.2098, simple_loss=0.2885, pruned_loss=0.06555, over 7413.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3066, pruned_loss=0.0774, over 1617254.56 frames. ], batch size: 17, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:33,463 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 12:15:46,325 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4862, 2.3609, 4.1712, 1.2696, 2.9981, 1.9741, 1.6759, 2.7578], + device='cuda:0'), covar=tensor([0.1936, 0.2347, 0.0708, 0.4526, 0.1538, 0.3115, 0.2097, 0.2311], + device='cuda:0'), in_proj_covar=tensor([0.0492, 0.0526, 0.0535, 0.0587, 0.0618, 0.0558, 0.0478, 0.0617], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:15:47,487 INFO [train.py:901] (0/4) Epoch 12, batch 5200, loss[loss=0.226, simple_loss=0.3147, pruned_loss=0.06871, over 8454.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3072, pruned_loss=0.07777, over 1614406.41 frames. ], batch size: 27, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:58,650 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5052, 2.9104, 2.4266, 4.0495, 1.8266, 2.1808, 2.2624, 3.2956], + device='cuda:0'), covar=tensor([0.0766, 0.0893, 0.0989, 0.0274, 0.1148, 0.1436, 0.1233, 0.0753], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0216, 0.0258, 0.0219, 0.0220, 0.0256, 0.0263, 0.0224], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 12:15:59,923 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:16:00,729 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7562, 1.6771, 2.6809, 1.3464, 2.1029, 2.8835, 2.8985, 2.4927], + device='cuda:0'), covar=tensor([0.0935, 0.1265, 0.0510, 0.1861, 0.1066, 0.0315, 0.0664, 0.0609], + device='cuda:0'), in_proj_covar=tensor([0.0267, 0.0296, 0.0262, 0.0291, 0.0275, 0.0238, 0.0349, 0.0292], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:16:14,668 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.571e+02 3.074e+02 4.467e+02 8.286e+02, threshold=6.149e+02, percent-clipped=7.0 +2023-02-06 12:16:21,923 INFO [train.py:901] (0/4) Epoch 12, batch 5250, loss[loss=0.2611, simple_loss=0.3288, pruned_loss=0.09673, over 8329.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3074, pruned_loss=0.07831, over 1615667.94 frames. ], batch size: 25, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:16:25,911 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 12:16:57,674 INFO [train.py:901] (0/4) Epoch 12, batch 5300, loss[loss=0.1894, simple_loss=0.2683, pruned_loss=0.05528, over 7532.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3073, pruned_loss=0.07816, over 1615096.71 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:13,243 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 12:17:15,480 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94241.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:17:19,500 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:17:23,397 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.628e+02 3.237e+02 4.138e+02 9.258e+02, threshold=6.473e+02, percent-clipped=5.0 +2023-02-06 12:17:31,605 INFO [train.py:901] (0/4) Epoch 12, batch 5350, loss[loss=0.2326, simple_loss=0.3181, pruned_loss=0.07353, over 8792.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3073, pruned_loss=0.07758, over 1617170.96 frames. ], batch size: 30, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:42,437 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6401, 2.0240, 3.2058, 1.4194, 2.4528, 1.9940, 1.7190, 2.4292], + device='cuda:0'), covar=tensor([0.1582, 0.1949, 0.0636, 0.3513, 0.1458, 0.2519, 0.1674, 0.1900], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0521, 0.0532, 0.0582, 0.0614, 0.0555, 0.0475, 0.0613], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:17:55,696 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2405, 1.6885, 4.4037, 1.7925, 2.5249, 4.9519, 5.0323, 4.3589], + device='cuda:0'), covar=tensor([0.1190, 0.1710, 0.0316, 0.2182, 0.1078, 0.0230, 0.0362, 0.0572], + device='cuda:0'), in_proj_covar=tensor([0.0266, 0.0298, 0.0261, 0.0292, 0.0274, 0.0236, 0.0348, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:18:05,202 INFO [train.py:901] (0/4) Epoch 12, batch 5400, loss[loss=0.2581, simple_loss=0.3356, pruned_loss=0.09031, over 8194.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3075, pruned_loss=0.07815, over 1618894.72 frames. ], batch size: 23, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:25,476 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:32,239 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.485e+02 2.978e+02 4.110e+02 9.009e+02, threshold=5.957e+02, percent-clipped=6.0 +2023-02-06 12:18:39,966 INFO [train.py:901] (0/4) Epoch 12, batch 5450, loss[loss=0.2225, simple_loss=0.3062, pruned_loss=0.06935, over 8094.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3071, pruned_loss=0.07747, over 1614873.22 frames. ], batch size: 23, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:40,770 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2880, 2.1801, 1.7360, 1.9619, 1.8054, 1.4060, 1.7141, 1.6292], + device='cuda:0'), covar=tensor([0.1177, 0.0315, 0.0911, 0.0474, 0.0623, 0.1266, 0.0792, 0.0759], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0232, 0.0311, 0.0293, 0.0296, 0.0320, 0.0334, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:18:41,415 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:12,392 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 12:19:15,756 INFO [train.py:901] (0/4) Epoch 12, batch 5500, loss[loss=0.2246, simple_loss=0.3113, pruned_loss=0.06896, over 8626.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3057, pruned_loss=0.07653, over 1613934.53 frames. ], batch size: 31, lr: 6.33e-03, grad_scale: 16.0 +2023-02-06 12:19:28,061 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3759, 1.6361, 4.1433, 2.0377, 2.4388, 4.7702, 4.7584, 4.1600], + device='cuda:0'), covar=tensor([0.0975, 0.1570, 0.0286, 0.1688, 0.1091, 0.0191, 0.0392, 0.0516], + device='cuda:0'), in_proj_covar=tensor([0.0267, 0.0297, 0.0260, 0.0291, 0.0274, 0.0235, 0.0347, 0.0290], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:19:43,155 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.406e+02 2.798e+02 3.361e+02 6.650e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 12:19:45,370 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:46,246 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 12:19:49,122 INFO [train.py:901] (0/4) Epoch 12, batch 5550, loss[loss=0.2296, simple_loss=0.3027, pruned_loss=0.07824, over 7931.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3064, pruned_loss=0.07725, over 1610480.95 frames. ], batch size: 20, lr: 6.33e-03, grad_scale: 4.0 +2023-02-06 12:20:16,669 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:17,399 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1466, 1.8452, 2.6145, 2.0556, 2.2844, 2.0448, 1.7078, 1.1684], + device='cuda:0'), covar=tensor([0.4162, 0.3880, 0.1320, 0.2835, 0.2220, 0.2498, 0.1849, 0.4064], + device='cuda:0'), in_proj_covar=tensor([0.0893, 0.0874, 0.0730, 0.0856, 0.0945, 0.0804, 0.0705, 0.0767], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:20:24,042 INFO [train.py:901] (0/4) Epoch 12, batch 5600, loss[loss=0.2235, simple_loss=0.3134, pruned_loss=0.06676, over 8465.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3072, pruned_loss=0.0773, over 1614241.86 frames. ], batch size: 25, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:20:35,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:54,480 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.675e+02 3.313e+02 4.214e+02 1.006e+03, threshold=6.626e+02, percent-clipped=7.0 +2023-02-06 12:21:00,666 INFO [train.py:901] (0/4) Epoch 12, batch 5650, loss[loss=0.2407, simple_loss=0.3122, pruned_loss=0.08459, over 8535.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3077, pruned_loss=0.07723, over 1620231.34 frames. ], batch size: 39, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:21:15,232 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94585.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:21:21,326 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 12:21:35,768 INFO [train.py:901] (0/4) Epoch 12, batch 5700, loss[loss=0.2332, simple_loss=0.3193, pruned_loss=0.07359, over 8468.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3075, pruned_loss=0.07736, over 1614552.14 frames. ], batch size: 27, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:04,729 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.327e+02 3.036e+02 3.801e+02 7.493e+02, threshold=6.072e+02, percent-clipped=2.0 +2023-02-06 12:22:10,802 INFO [train.py:901] (0/4) Epoch 12, batch 5750, loss[loss=0.1856, simple_loss=0.2594, pruned_loss=0.0559, over 7648.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3081, pruned_loss=0.07769, over 1613343.91 frames. ], batch size: 19, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:25,646 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7499, 1.6163, 3.0011, 1.2576, 2.1173, 3.2919, 3.4219, 2.7514], + device='cuda:0'), covar=tensor([0.1226, 0.1560, 0.0395, 0.2221, 0.1054, 0.0296, 0.0460, 0.0730], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0299, 0.0260, 0.0292, 0.0277, 0.0237, 0.0347, 0.0292], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:22:26,190 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 12:22:35,729 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94700.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:22:42,475 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:45,146 INFO [train.py:901] (0/4) Epoch 12, batch 5800, loss[loss=0.2376, simple_loss=0.3052, pruned_loss=0.08496, over 7404.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3077, pruned_loss=0.07726, over 1613457.03 frames. ], batch size: 17, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:45,331 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:02,544 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:13,643 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.575e+02 3.288e+02 4.021e+02 7.847e+02, threshold=6.576e+02, percent-clipped=2.0 +2023-02-06 12:23:19,972 INFO [train.py:901] (0/4) Epoch 12, batch 5850, loss[loss=0.2724, simple_loss=0.3382, pruned_loss=0.1033, over 8553.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.309, pruned_loss=0.07855, over 1615017.88 frames. ], batch size: 31, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:23:54,273 INFO [train.py:901] (0/4) Epoch 12, batch 5900, loss[loss=0.221, simple_loss=0.3059, pruned_loss=0.06805, over 8241.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3088, pruned_loss=0.07821, over 1615541.46 frames. ], batch size: 24, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:24:01,747 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:24:01,821 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0758, 1.8062, 2.4747, 2.0508, 2.3221, 2.0111, 1.6433, 0.9829], + device='cuda:0'), covar=tensor([0.4236, 0.3997, 0.1311, 0.2590, 0.1861, 0.2247, 0.1734, 0.4106], + device='cuda:0'), in_proj_covar=tensor([0.0892, 0.0880, 0.0732, 0.0856, 0.0939, 0.0803, 0.0706, 0.0769], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:24:08,300 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4164, 1.1941, 4.5113, 1.6554, 3.9950, 3.7915, 4.0509, 3.9420], + device='cuda:0'), covar=tensor([0.0448, 0.4519, 0.0448, 0.3658, 0.1007, 0.0830, 0.0494, 0.0590], + device='cuda:0'), in_proj_covar=tensor([0.0513, 0.0581, 0.0596, 0.0544, 0.0623, 0.0535, 0.0525, 0.0584], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:24:22,270 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.604e+02 3.248e+02 4.213e+02 6.479e+02, threshold=6.496e+02, percent-clipped=0.0 +2023-02-06 12:24:28,377 INFO [train.py:901] (0/4) Epoch 12, batch 5950, loss[loss=0.2533, simple_loss=0.3263, pruned_loss=0.09009, over 8455.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3082, pruned_loss=0.07818, over 1612519.39 frames. ], batch size: 27, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:25:03,798 INFO [train.py:901] (0/4) Epoch 12, batch 6000, loss[loss=0.2466, simple_loss=0.3288, pruned_loss=0.08226, over 8106.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3074, pruned_loss=0.07768, over 1615021.03 frames. ], batch size: 23, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:25:03,799 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 12:25:16,950 INFO [train.py:935] (0/4) Epoch 12, validation: loss=0.1862, simple_loss=0.286, pruned_loss=0.04318, over 944034.00 frames. +2023-02-06 12:25:16,952 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 12:25:44,732 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.442e+02 2.970e+02 3.787e+02 9.017e+02, threshold=5.940e+02, percent-clipped=3.0 +2023-02-06 12:25:45,511 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94956.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:25:50,751 INFO [train.py:901] (0/4) Epoch 12, batch 6050, loss[loss=0.2421, simple_loss=0.3238, pruned_loss=0.08024, over 8545.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3073, pruned_loss=0.07768, over 1615107.67 frames. ], batch size: 31, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:00,455 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1247, 1.9749, 2.1093, 1.8097, 1.1939, 1.8076, 2.2712, 2.5497], + device='cuda:0'), covar=tensor([0.0430, 0.1102, 0.1533, 0.1289, 0.0587, 0.1410, 0.0624, 0.0500], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0192, 0.0158, 0.0103, 0.0162, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:26:02,522 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94981.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:26:25,550 INFO [train.py:901] (0/4) Epoch 12, batch 6100, loss[loss=0.2167, simple_loss=0.2986, pruned_loss=0.06739, over 7796.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3076, pruned_loss=0.07771, over 1616338.38 frames. ], batch size: 19, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:54,026 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 12:26:54,675 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.590e+02 3.216e+02 4.301e+02 8.648e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 12:27:00,775 INFO [train.py:901] (0/4) Epoch 12, batch 6150, loss[loss=0.2239, simple_loss=0.3171, pruned_loss=0.06531, over 8084.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3079, pruned_loss=0.0783, over 1614470.52 frames. ], batch size: 21, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:12,217 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:29,632 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95106.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:34,683 INFO [train.py:901] (0/4) Epoch 12, batch 6200, loss[loss=0.2356, simple_loss=0.3207, pruned_loss=0.07527, over 8699.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.308, pruned_loss=0.07846, over 1615588.51 frames. ], batch size: 34, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:41,099 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95123.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:27:46,636 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2031, 1.4757, 1.7304, 1.3522, 0.9709, 1.4504, 1.8466, 1.8713], + device='cuda:0'), covar=tensor([0.0467, 0.1212, 0.1726, 0.1371, 0.0601, 0.1456, 0.0615, 0.0552], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0151, 0.0191, 0.0157, 0.0102, 0.0161, 0.0114, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:28:04,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.519e+02 2.980e+02 3.798e+02 7.393e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 12:28:07,870 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7700, 1.3820, 1.5199, 1.2720, 0.9044, 1.3832, 1.5360, 1.4738], + device='cuda:0'), covar=tensor([0.0525, 0.1223, 0.1792, 0.1470, 0.0623, 0.1569, 0.0677, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0151, 0.0190, 0.0157, 0.0102, 0.0161, 0.0114, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:28:10,300 INFO [train.py:901] (0/4) Epoch 12, batch 6250, loss[loss=0.1975, simple_loss=0.2851, pruned_loss=0.0549, over 8458.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3088, pruned_loss=0.07918, over 1612867.68 frames. ], batch size: 25, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:28:18,457 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5509, 1.7205, 1.8208, 1.2502, 1.8960, 1.3120, 0.8500, 1.6076], + device='cuda:0'), covar=tensor([0.0338, 0.0223, 0.0136, 0.0316, 0.0223, 0.0481, 0.0504, 0.0204], + device='cuda:0'), in_proj_covar=tensor([0.0397, 0.0330, 0.0282, 0.0391, 0.0325, 0.0478, 0.0360, 0.0359], + device='cuda:0'), out_proj_covar=tensor([1.1204e-04, 9.0673e-05, 7.7960e-05, 1.0886e-04, 9.0804e-05, 1.4387e-04, + 1.0150e-04, 1.0065e-04], device='cuda:0') +2023-02-06 12:28:43,825 INFO [train.py:901] (0/4) Epoch 12, batch 6300, loss[loss=0.2387, simple_loss=0.3298, pruned_loss=0.07374, over 8450.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.3096, pruned_loss=0.0798, over 1607661.41 frames. ], batch size: 27, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:13,414 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.657e+02 3.224e+02 4.358e+02 1.571e+03, threshold=6.448e+02, percent-clipped=5.0 +2023-02-06 12:29:20,980 INFO [train.py:901] (0/4) Epoch 12, batch 6350, loss[loss=0.2123, simple_loss=0.3084, pruned_loss=0.05812, over 8496.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3103, pruned_loss=0.07956, over 1611355.98 frames. ], batch size: 26, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:30,713 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:36,272 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:55,396 INFO [train.py:901] (0/4) Epoch 12, batch 6400, loss[loss=0.2171, simple_loss=0.3005, pruned_loss=0.06682, over 8107.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3066, pruned_loss=0.07728, over 1611469.52 frames. ], batch size: 23, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:30:03,906 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3399, 2.9588, 3.5183, 2.5758, 1.9717, 3.6444, 0.8238, 2.4342], + device='cuda:0'), covar=tensor([0.1640, 0.1178, 0.0420, 0.1946, 0.3538, 0.0270, 0.3280, 0.1574], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0172, 0.0103, 0.0216, 0.0255, 0.0107, 0.0164, 0.0166], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 12:30:23,572 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.403e+02 2.937e+02 3.904e+02 6.682e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 12:30:25,092 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:30:29,660 INFO [train.py:901] (0/4) Epoch 12, batch 6450, loss[loss=0.206, simple_loss=0.2798, pruned_loss=0.06604, over 7653.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.306, pruned_loss=0.07674, over 1610992.98 frames. ], batch size: 19, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:30:32,396 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9081, 1.4591, 3.2991, 1.2843, 2.3236, 3.5187, 3.7017, 3.0480], + device='cuda:0'), covar=tensor([0.1086, 0.1702, 0.0350, 0.2083, 0.0951, 0.0272, 0.0435, 0.0588], + device='cuda:0'), in_proj_covar=tensor([0.0265, 0.0296, 0.0257, 0.0288, 0.0271, 0.0235, 0.0345, 0.0286], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 12:31:05,025 INFO [train.py:901] (0/4) Epoch 12, batch 6500, loss[loss=0.306, simple_loss=0.3672, pruned_loss=0.1224, over 8610.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3057, pruned_loss=0.0766, over 1611975.29 frames. ], batch size: 39, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:18,807 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 12:31:22,443 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2396, 2.2282, 1.6639, 1.9526, 1.6978, 1.3201, 1.5211, 1.6482], + device='cuda:0'), covar=tensor([0.1157, 0.0331, 0.1063, 0.0494, 0.0672, 0.1381, 0.0912, 0.0758], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0236, 0.0317, 0.0301, 0.0303, 0.0323, 0.0342, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:31:31,886 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.431e+02 2.857e+02 3.846e+02 1.801e+03, threshold=5.713e+02, percent-clipped=8.0 +2023-02-06 12:31:37,945 INFO [train.py:901] (0/4) Epoch 12, batch 6550, loss[loss=0.2103, simple_loss=0.2962, pruned_loss=0.06218, over 8525.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3063, pruned_loss=0.07749, over 1606435.77 frames. ], batch size: 26, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:40,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95467.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:32:04,346 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3377, 1.3355, 1.5310, 1.2830, 0.7089, 1.4202, 1.2941, 1.1651], + device='cuda:0'), covar=tensor([0.0530, 0.1212, 0.1671, 0.1363, 0.0565, 0.1432, 0.0622, 0.0631], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0151, 0.0190, 0.0157, 0.0102, 0.0161, 0.0114, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:32:06,859 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 12:32:13,529 INFO [train.py:901] (0/4) Epoch 12, batch 6600, loss[loss=0.2435, simple_loss=0.3243, pruned_loss=0.08132, over 8291.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3066, pruned_loss=0.07704, over 1610519.09 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:25,783 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 12:32:40,269 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.522e+02 3.078e+02 3.913e+02 8.021e+02, threshold=6.157e+02, percent-clipped=7.0 +2023-02-06 12:32:46,211 INFO [train.py:901] (0/4) Epoch 12, batch 6650, loss[loss=0.252, simple_loss=0.3233, pruned_loss=0.09031, over 8465.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.309, pruned_loss=0.07867, over 1607390.06 frames. ], batch size: 25, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:59,137 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95582.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:33:05,225 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0437, 1.9256, 2.0122, 1.9994, 1.0555, 1.9259, 2.2080, 2.3090], + device='cuda:0'), covar=tensor([0.0392, 0.0991, 0.1520, 0.1108, 0.0539, 0.1232, 0.0590, 0.0490], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0151, 0.0190, 0.0157, 0.0102, 0.0161, 0.0114, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:33:21,233 INFO [train.py:901] (0/4) Epoch 12, batch 6700, loss[loss=0.272, simple_loss=0.3249, pruned_loss=0.1096, over 7919.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3079, pruned_loss=0.07838, over 1607998.49 frames. ], batch size: 20, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:33:26,826 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:33,696 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:37,111 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:43,773 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9913, 2.2929, 1.9230, 2.8234, 1.3502, 1.6197, 1.9363, 2.4664], + device='cuda:0'), covar=tensor([0.0696, 0.0770, 0.0979, 0.0428, 0.1165, 0.1402, 0.1081, 0.0694], + device='cuda:0'), in_proj_covar=tensor([0.0236, 0.0214, 0.0255, 0.0218, 0.0215, 0.0255, 0.0261, 0.0221], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 12:33:50,488 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 2.656e+02 3.142e+02 4.011e+02 7.522e+02, threshold=6.284e+02, percent-clipped=4.0 +2023-02-06 12:33:56,566 INFO [train.py:901] (0/4) Epoch 12, batch 6750, loss[loss=0.1956, simple_loss=0.2635, pruned_loss=0.06388, over 7430.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3066, pruned_loss=0.07733, over 1607484.95 frames. ], batch size: 17, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:22,171 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95701.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:24,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1947, 1.8013, 2.5068, 2.0324, 2.2993, 2.1043, 1.7349, 1.1142], + device='cuda:0'), covar=tensor([0.4153, 0.4026, 0.1319, 0.2794, 0.1835, 0.2203, 0.1762, 0.4047], + device='cuda:0'), in_proj_covar=tensor([0.0894, 0.0883, 0.0736, 0.0867, 0.0941, 0.0811, 0.0708, 0.0775], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:34:30,620 INFO [train.py:901] (0/4) Epoch 12, batch 6800, loss[loss=0.2279, simple_loss=0.3046, pruned_loss=0.07563, over 7971.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3072, pruned_loss=0.07743, over 1609896.40 frames. ], batch size: 21, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:40,704 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 12:34:47,118 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:53,229 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:00,265 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.510e+02 2.822e+02 3.564e+02 9.162e+02, threshold=5.644e+02, percent-clipped=3.0 +2023-02-06 12:35:06,296 INFO [train.py:901] (0/4) Epoch 12, batch 6850, loss[loss=0.2447, simple_loss=0.3052, pruned_loss=0.09211, over 7548.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3063, pruned_loss=0.07696, over 1609702.22 frames. ], batch size: 18, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:26,874 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 12:35:32,367 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2399, 1.2922, 1.4924, 1.2185, 0.7222, 1.3104, 1.1257, 1.1254], + device='cuda:0'), covar=tensor([0.0547, 0.1221, 0.1714, 0.1391, 0.0584, 0.1490, 0.0693, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0159, 0.0103, 0.0162, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 12:35:40,360 INFO [train.py:901] (0/4) Epoch 12, batch 6900, loss[loss=0.2074, simple_loss=0.2824, pruned_loss=0.06627, over 7820.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3071, pruned_loss=0.07721, over 1612655.77 frames. ], batch size: 20, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:41,928 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:51,215 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95830.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:35:57,273 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95838.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:36:03,155 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2993, 1.9255, 2.6803, 2.1581, 2.4902, 2.1841, 1.8052, 1.2266], + device='cuda:0'), covar=tensor([0.3730, 0.3732, 0.1216, 0.2706, 0.1827, 0.2112, 0.1622, 0.4228], + device='cuda:0'), in_proj_covar=tensor([0.0887, 0.0878, 0.0731, 0.0858, 0.0935, 0.0806, 0.0704, 0.0770], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:36:08,028 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.693e+02 3.422e+02 4.342e+02 1.062e+03, threshold=6.843e+02, percent-clipped=12.0 +2023-02-06 12:36:14,243 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95863.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:36:14,702 INFO [train.py:901] (0/4) Epoch 12, batch 6950, loss[loss=0.2134, simple_loss=0.2994, pruned_loss=0.06371, over 7981.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3075, pruned_loss=0.0774, over 1611755.39 frames. ], batch size: 21, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:36:15,461 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:36:34,591 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 12:36:47,613 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0471, 2.7151, 3.1155, 1.3211, 3.2185, 1.7297, 1.5415, 2.1183], + device='cuda:0'), covar=tensor([0.0696, 0.0245, 0.0278, 0.0668, 0.0310, 0.0818, 0.0654, 0.0414], + device='cuda:0'), in_proj_covar=tensor([0.0398, 0.0332, 0.0284, 0.0394, 0.0326, 0.0487, 0.0363, 0.0365], + device='cuda:0'), out_proj_covar=tensor([1.1241e-04, 9.1129e-05, 7.8523e-05, 1.0925e-04, 9.1131e-05, 1.4664e-04, + 1.0241e-04, 1.0220e-04], device='cuda:0') +2023-02-06 12:36:48,726 INFO [train.py:901] (0/4) Epoch 12, batch 7000, loss[loss=0.2697, simple_loss=0.3436, pruned_loss=0.09792, over 8202.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3084, pruned_loss=0.07814, over 1614175.84 frames. ], batch size: 23, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:17,422 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.501e+02 3.116e+02 3.850e+02 8.001e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-06 12:37:23,279 INFO [train.py:901] (0/4) Epoch 12, batch 7050, loss[loss=0.2283, simple_loss=0.3138, pruned_loss=0.07147, over 8454.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3076, pruned_loss=0.07761, over 1615113.77 frames. ], batch size: 27, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:24,149 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8710, 1.7533, 2.5101, 1.5264, 1.1646, 2.5026, 0.3860, 1.3636], + device='cuda:0'), covar=tensor([0.2446, 0.1953, 0.0391, 0.2547, 0.4945, 0.0436, 0.3668, 0.2163], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0176, 0.0105, 0.0219, 0.0258, 0.0110, 0.0167, 0.0168], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 12:37:34,000 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:44,053 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:46,010 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8036, 1.4544, 3.3502, 1.3031, 2.3546, 3.7529, 3.7420, 3.2048], + device='cuda:0'), covar=tensor([0.1138, 0.1645, 0.0371, 0.2197, 0.1000, 0.0223, 0.0505, 0.0549], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0297, 0.0261, 0.0290, 0.0273, 0.0235, 0.0347, 0.0286], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 12:37:47,385 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:48,733 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-96000.pt +2023-02-06 12:37:50,531 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:58,499 INFO [train.py:901] (0/4) Epoch 12, batch 7100, loss[loss=0.216, simple_loss=0.2999, pruned_loss=0.06603, over 8287.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3076, pruned_loss=0.07753, over 1615345.87 frames. ], batch size: 23, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:01,399 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:06,974 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:26,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.539e+02 3.029e+02 4.080e+02 8.783e+02, threshold=6.058e+02, percent-clipped=4.0 +2023-02-06 12:38:33,144 INFO [train.py:901] (0/4) Epoch 12, batch 7150, loss[loss=0.2127, simple_loss=0.2813, pruned_loss=0.07207, over 7973.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3071, pruned_loss=0.0771, over 1614352.94 frames. ], batch size: 21, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:38,700 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:42,266 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 12:38:54,774 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:56,849 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:58,158 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:08,658 INFO [train.py:901] (0/4) Epoch 12, batch 7200, loss[loss=0.2036, simple_loss=0.2666, pruned_loss=0.07032, over 7444.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3061, pruned_loss=0.07633, over 1616706.18 frames. ], batch size: 17, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:39:10,832 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4736, 1.9610, 3.2892, 1.2528, 2.4621, 1.9673, 1.5512, 2.3599], + device='cuda:0'), covar=tensor([0.1849, 0.2333, 0.0806, 0.4188, 0.1767, 0.2960, 0.2040, 0.2415], + device='cuda:0'), in_proj_covar=tensor([0.0489, 0.0528, 0.0536, 0.0579, 0.0621, 0.0554, 0.0476, 0.0613], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:39:36,232 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.446e+02 3.002e+02 3.633e+02 6.248e+02, threshold=6.005e+02, percent-clipped=1.0 +2023-02-06 12:39:42,865 INFO [train.py:901] (0/4) Epoch 12, batch 7250, loss[loss=0.2104, simple_loss=0.2972, pruned_loss=0.06178, over 8307.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.305, pruned_loss=0.07572, over 1612717.54 frames. ], batch size: 25, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:39:49,509 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96174.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:39:55,424 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:06,506 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 12:40:14,127 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:15,131 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-06 12:40:17,290 INFO [train.py:901] (0/4) Epoch 12, batch 7300, loss[loss=0.2501, simple_loss=0.3204, pruned_loss=0.08985, over 7541.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3048, pruned_loss=0.07653, over 1606941.04 frames. ], batch size: 18, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:40:45,402 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.567e+02 3.297e+02 4.044e+02 1.170e+03, threshold=6.593e+02, percent-clipped=7.0 +2023-02-06 12:40:51,432 INFO [train.py:901] (0/4) Epoch 12, batch 7350, loss[loss=0.2142, simple_loss=0.2975, pruned_loss=0.06539, over 7963.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3045, pruned_loss=0.0763, over 1604190.37 frames. ], batch size: 21, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:09,292 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96289.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:41:15,847 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 12:41:26,501 INFO [train.py:901] (0/4) Epoch 12, batch 7400, loss[loss=0.2604, simple_loss=0.3424, pruned_loss=0.08914, over 8597.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3061, pruned_loss=0.07673, over 1610052.26 frames. ], batch size: 39, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:33,416 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:36,511 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 12:41:47,016 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:47,086 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:52,319 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:55,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.475e+02 3.182e+02 4.307e+02 9.281e+02, threshold=6.365e+02, percent-clipped=3.0 +2023-02-06 12:42:01,561 INFO [train.py:901] (0/4) Epoch 12, batch 7450, loss[loss=0.2521, simple_loss=0.3261, pruned_loss=0.0891, over 8510.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3066, pruned_loss=0.07753, over 1608229.21 frames. ], batch size: 26, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:09,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:15,384 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 12:42:31,406 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:35,240 INFO [train.py:901] (0/4) Epoch 12, batch 7500, loss[loss=0.2253, simple_loss=0.306, pruned_loss=0.07232, over 8372.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.307, pruned_loss=0.07798, over 1612066.26 frames. ], batch size: 24, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:47,518 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 12:42:54,771 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:03,962 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.684e+02 3.354e+02 4.069e+02 8.964e+02, threshold=6.707e+02, percent-clipped=7.0 +2023-02-06 12:43:05,492 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:09,793 INFO [train.py:901] (0/4) Epoch 12, batch 7550, loss[loss=0.2755, simple_loss=0.3432, pruned_loss=0.1039, over 8464.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3077, pruned_loss=0.07871, over 1614045.45 frames. ], batch size: 25, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:42,979 INFO [train.py:901] (0/4) Epoch 12, batch 7600, loss[loss=0.2157, simple_loss=0.2876, pruned_loss=0.07191, over 7659.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3075, pruned_loss=0.07862, over 1613041.17 frames. ], batch size: 19, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:52,495 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:05,562 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96545.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:44:11,790 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 2.706e+02 3.173e+02 4.121e+02 9.971e+02, threshold=6.345e+02, percent-clipped=8.0 +2023-02-06 12:44:13,324 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:18,532 INFO [train.py:901] (0/4) Epoch 12, batch 7650, loss[loss=0.2428, simple_loss=0.3269, pruned_loss=0.07941, over 8713.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3076, pruned_loss=0.0785, over 1610381.04 frames. ], batch size: 34, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:44:23,312 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96570.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:44:29,833 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:47,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:53,017 INFO [train.py:901] (0/4) Epoch 12, batch 7700, loss[loss=0.2336, simple_loss=0.3076, pruned_loss=0.0798, over 8289.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3066, pruned_loss=0.0779, over 1607992.06 frames. ], batch size: 23, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:12,723 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:13,563 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 12:45:21,231 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.521e+02 3.004e+02 3.630e+02 7.905e+02, threshold=6.007e+02, percent-clipped=3.0 +2023-02-06 12:45:23,902 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 12:45:27,882 INFO [train.py:901] (0/4) Epoch 12, batch 7750, loss[loss=0.2429, simple_loss=0.3207, pruned_loss=0.0825, over 8472.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3059, pruned_loss=0.07728, over 1609977.10 frames. ], batch size: 29, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:42,849 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:57,501 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 12:46:02,106 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,590 INFO [train.py:901] (0/4) Epoch 12, batch 7800, loss[loss=0.2405, simple_loss=0.3257, pruned_loss=0.07767, over 8471.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3059, pruned_loss=0.077, over 1611227.49 frames. ], batch size: 25, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:46:19,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:28,444 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:30,316 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.519e+02 3.193e+02 4.174e+02 8.059e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:46:36,572 INFO [train.py:901] (0/4) Epoch 12, batch 7850, loss[loss=0.1846, simple_loss=0.2607, pruned_loss=0.05429, over 7312.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3059, pruned_loss=0.07715, over 1606812.11 frames. ], batch size: 16, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:46:46,557 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 12:46:55,800 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.53 vs. limit=5.0 +2023-02-06 12:46:56,877 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:01,916 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:09,552 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 12:47:10,025 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,502 INFO [train.py:901] (0/4) Epoch 12, batch 7900, loss[loss=0.2401, simple_loss=0.3212, pruned_loss=0.07947, over 8464.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3073, pruned_loss=0.07736, over 1607930.21 frames. ], batch size: 29, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:27,502 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:38,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.595e+02 3.080e+02 3.878e+02 8.124e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 12:47:44,799 INFO [train.py:901] (0/4) Epoch 12, batch 7950, loss[loss=0.2297, simple_loss=0.306, pruned_loss=0.07668, over 8032.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.308, pruned_loss=0.07786, over 1611514.04 frames. ], batch size: 22, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:47,027 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:07,408 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:15,446 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-06 12:48:17,695 INFO [train.py:901] (0/4) Epoch 12, batch 8000, loss[loss=0.2166, simple_loss=0.305, pruned_loss=0.06408, over 8498.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3077, pruned_loss=0.07774, over 1610595.96 frames. ], batch size: 28, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:48:23,660 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:45,033 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.377e+02 3.280e+02 4.266e+02 7.100e+02, threshold=6.559e+02, percent-clipped=4.0 +2023-02-06 12:48:51,267 INFO [train.py:901] (0/4) Epoch 12, batch 8050, loss[loss=0.2649, simple_loss=0.3341, pruned_loss=0.09788, over 6474.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3078, pruned_loss=0.07829, over 1603047.43 frames. ], batch size: 71, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:49:15,168 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-12.pt +2023-02-06 12:49:25,986 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 12:49:29,795 INFO [train.py:901] (0/4) Epoch 13, batch 0, loss[loss=0.1982, simple_loss=0.286, pruned_loss=0.05518, over 8193.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.286, pruned_loss=0.05518, over 8193.00 frames. ], batch size: 23, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:49:29,796 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 12:49:40,736 INFO [train.py:935] (0/4) Epoch 13, validation: loss=0.1867, simple_loss=0.2865, pruned_loss=0.04345, over 944034.00 frames. +2023-02-06 12:49:40,737 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 12:49:48,848 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 12:49:55,394 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 12:49:55,528 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:49:57,150 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 12:50:14,919 INFO [train.py:901] (0/4) Epoch 13, batch 50, loss[loss=0.2767, simple_loss=0.3425, pruned_loss=0.1055, over 8193.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3098, pruned_loss=0.07822, over 362754.68 frames. ], batch size: 23, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:20,331 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.833e+02 3.357e+02 4.758e+02 6.927e+02, threshold=6.715e+02, percent-clipped=2.0 +2023-02-06 12:50:21,933 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:29,214 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 12:50:41,099 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:50,960 INFO [train.py:901] (0/4) Epoch 13, batch 100, loss[loss=0.1948, simple_loss=0.2757, pruned_loss=0.05702, over 8084.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.309, pruned_loss=0.07741, over 641805.87 frames. ], batch size: 21, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:52,984 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 12:51:02,199 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1823, 4.1925, 3.7374, 1.8210, 3.6792, 3.6520, 3.8426, 3.3754], + device='cuda:0'), covar=tensor([0.0818, 0.0556, 0.1079, 0.4552, 0.1032, 0.1030, 0.1189, 0.0930], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0387, 0.0397, 0.0496, 0.0390, 0.0392, 0.0385, 0.0337], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 12:51:09,044 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:18,985 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:24,703 INFO [train.py:901] (0/4) Epoch 13, batch 150, loss[loss=0.2316, simple_loss=0.2972, pruned_loss=0.08302, over 7156.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3063, pruned_loss=0.07638, over 853778.08 frames. ], batch size: 16, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:51:25,604 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:30,113 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.477e+02 2.848e+02 3.342e+02 7.997e+02, threshold=5.696e+02, percent-clipped=2.0 +2023-02-06 12:51:58,451 INFO [train.py:901] (0/4) Epoch 13, batch 200, loss[loss=0.2444, simple_loss=0.3226, pruned_loss=0.08311, over 8503.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3063, pruned_loss=0.07606, over 1028403.26 frames. ], batch size: 26, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:09,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3716, 1.3389, 2.3138, 1.2978, 2.0891, 2.4341, 2.5462, 2.1244], + device='cuda:0'), covar=tensor([0.1001, 0.1213, 0.0463, 0.1916, 0.0727, 0.0393, 0.0656, 0.0703], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0301, 0.0263, 0.0292, 0.0275, 0.0239, 0.0357, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 12:52:33,408 INFO [train.py:901] (0/4) Epoch 13, batch 250, loss[loss=0.2249, simple_loss=0.3045, pruned_loss=0.07269, over 8482.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3058, pruned_loss=0.07608, over 1161562.23 frames. ], batch size: 49, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:37,623 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:38,766 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.455e+02 3.117e+02 3.819e+02 7.824e+02, threshold=6.233e+02, percent-clipped=7.0 +2023-02-06 12:52:46,026 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 12:52:53,399 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5964, 1.8207, 1.9240, 1.3148, 2.0462, 1.4486, 0.7302, 1.7488], + device='cuda:0'), covar=tensor([0.0420, 0.0227, 0.0171, 0.0350, 0.0247, 0.0523, 0.0535, 0.0203], + device='cuda:0'), in_proj_covar=tensor([0.0397, 0.0334, 0.0283, 0.0390, 0.0319, 0.0476, 0.0355, 0.0360], + device='cuda:0'), out_proj_covar=tensor([1.1174e-04, 9.1578e-05, 7.8097e-05, 1.0806e-04, 8.8825e-05, 1.4247e-04, + 1.0009e-04, 1.0050e-04], device='cuda:0') +2023-02-06 12:52:54,012 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:54,538 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 12:53:06,364 INFO [train.py:901] (0/4) Epoch 13, batch 300, loss[loss=0.1854, simple_loss=0.2742, pruned_loss=0.04835, over 7811.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3062, pruned_loss=0.07649, over 1265654.02 frames. ], batch size: 20, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:15,189 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2858, 2.1646, 1.7015, 2.0109, 1.7079, 1.4035, 1.6100, 1.7011], + device='cuda:0'), covar=tensor([0.1127, 0.0347, 0.0984, 0.0462, 0.0635, 0.1239, 0.0853, 0.0742], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0236, 0.0321, 0.0301, 0.0304, 0.0322, 0.0342, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:53:41,539 INFO [train.py:901] (0/4) Epoch 13, batch 350, loss[loss=0.1978, simple_loss=0.2799, pruned_loss=0.05781, over 8102.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3062, pruned_loss=0.07624, over 1342925.12 frames. ], batch size: 23, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:46,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.508e+02 3.076e+02 3.709e+02 6.548e+02, threshold=6.153e+02, percent-clipped=1.0 +2023-02-06 12:53:50,448 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:51,744 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:53,793 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:11,873 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:15,163 INFO [train.py:901] (0/4) Epoch 13, batch 400, loss[loss=0.2054, simple_loss=0.2892, pruned_loss=0.06083, over 7649.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3067, pruned_loss=0.07607, over 1405078.81 frames. ], batch size: 19, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:51,190 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9212, 1.4956, 5.9457, 2.0729, 5.3329, 5.0387, 5.5219, 5.3517], + device='cuda:0'), covar=tensor([0.0387, 0.4622, 0.0356, 0.3406, 0.0933, 0.0817, 0.0438, 0.0444], + device='cuda:0'), in_proj_covar=tensor([0.0507, 0.0588, 0.0595, 0.0545, 0.0625, 0.0531, 0.0523, 0.0584], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 12:54:51,725 INFO [train.py:901] (0/4) Epoch 13, batch 450, loss[loss=0.2249, simple_loss=0.3117, pruned_loss=0.06899, over 8096.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3064, pruned_loss=0.07609, over 1455347.88 frames. ], batch size: 23, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:57,098 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.318e+02 2.836e+02 3.756e+02 7.381e+02, threshold=5.672e+02, percent-clipped=3.0 +2023-02-06 12:55:04,802 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7626, 1.5083, 2.7953, 1.2977, 2.1692, 2.9727, 3.1327, 2.5166], + device='cuda:0'), covar=tensor([0.1019, 0.1429, 0.0410, 0.2070, 0.0879, 0.0339, 0.0558, 0.0712], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0301, 0.0266, 0.0294, 0.0275, 0.0240, 0.0359, 0.0291], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:55:04,850 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2626, 2.1311, 1.6307, 1.9163, 1.7811, 1.2644, 1.6274, 1.6426], + device='cuda:0'), covar=tensor([0.1136, 0.0344, 0.1074, 0.0487, 0.0581, 0.1348, 0.0878, 0.0737], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0236, 0.0319, 0.0301, 0.0302, 0.0323, 0.0341, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:55:06,273 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8241, 1.8816, 2.2682, 1.7516, 1.2711, 2.3097, 0.3914, 1.4190], + device='cuda:0'), covar=tensor([0.2336, 0.1259, 0.0457, 0.1633, 0.3921, 0.0396, 0.2988, 0.1721], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0172, 0.0103, 0.0219, 0.0258, 0.0109, 0.0164, 0.0169], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 12:55:13,252 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:26,756 INFO [train.py:901] (0/4) Epoch 13, batch 500, loss[loss=0.2155, simple_loss=0.2945, pruned_loss=0.06824, over 8356.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3055, pruned_loss=0.0752, over 1492857.38 frames. ], batch size: 24, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:55:35,480 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97509.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:52,950 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:53,115 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-06 12:56:01,766 INFO [train.py:901] (0/4) Epoch 13, batch 550, loss[loss=0.2726, simple_loss=0.3475, pruned_loss=0.09878, over 8192.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3053, pruned_loss=0.07584, over 1517976.45 frames. ], batch size: 23, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:56:07,713 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.533e+02 3.037e+02 3.770e+02 9.997e+02, threshold=6.074e+02, percent-clipped=4.0 +2023-02-06 12:56:20,093 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:36,765 INFO [train.py:901] (0/4) Epoch 13, batch 600, loss[loss=0.2384, simple_loss=0.3238, pruned_loss=0.07646, over 8200.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07498, over 1537765.47 frames. ], batch size: 23, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:56:53,684 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:55,690 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 12:57:10,256 INFO [train.py:901] (0/4) Epoch 13, batch 650, loss[loss=0.2125, simple_loss=0.2962, pruned_loss=0.06438, over 8208.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3053, pruned_loss=0.07573, over 1553017.72 frames. ], batch size: 23, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:16,274 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.537e+02 2.925e+02 3.842e+02 7.324e+02, threshold=5.850e+02, percent-clipped=4.0 +2023-02-06 12:57:25,803 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3039, 1.9534, 1.4310, 1.8087, 1.5237, 1.1767, 1.4562, 1.6634], + device='cuda:0'), covar=tensor([0.1192, 0.0421, 0.1250, 0.0538, 0.0746, 0.1591, 0.1040, 0.0781], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0237, 0.0322, 0.0302, 0.0304, 0.0325, 0.0344, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 12:57:42,538 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97692.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:57:45,736 INFO [train.py:901] (0/4) Epoch 13, batch 700, loss[loss=0.1796, simple_loss=0.2499, pruned_loss=0.05465, over 7935.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3051, pruned_loss=0.07554, over 1569119.57 frames. ], batch size: 20, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:51,261 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:54,504 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:10,695 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:12,603 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:13,385 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:19,707 INFO [train.py:901] (0/4) Epoch 13, batch 750, loss[loss=0.2335, simple_loss=0.3179, pruned_loss=0.0746, over 8643.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3062, pruned_loss=0.07584, over 1584547.17 frames. ], batch size: 34, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:58:25,045 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.478e+02 2.997e+02 3.995e+02 8.399e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-06 12:58:27,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:39,717 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 12:58:49,007 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 12:58:54,230 INFO [train.py:901] (0/4) Epoch 13, batch 800, loss[loss=0.2694, simple_loss=0.3387, pruned_loss=0.1001, over 8341.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.305, pruned_loss=0.07497, over 1594444.06 frames. ], batch size: 26, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:58:56,585 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.37 vs. limit=5.0 +2023-02-06 12:59:10,089 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:14,045 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:29,482 INFO [train.py:901] (0/4) Epoch 13, batch 850, loss[loss=0.2065, simple_loss=0.2919, pruned_loss=0.06057, over 8043.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.305, pruned_loss=0.07484, over 1600910.94 frames. ], batch size: 22, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:32,328 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:35,503 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.627e+02 3.254e+02 4.246e+02 9.834e+02, threshold=6.507e+02, percent-clipped=8.0 +2023-02-06 13:00:03,797 INFO [train.py:901] (0/4) Epoch 13, batch 900, loss[loss=0.2479, simple_loss=0.3175, pruned_loss=0.08914, over 8500.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3046, pruned_loss=0.07492, over 1600671.68 frames. ], batch size: 28, lr: 5.98e-03, grad_scale: 8.0 +2023-02-06 13:00:09,699 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:18,213 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:39,215 INFO [train.py:901] (0/4) Epoch 13, batch 950, loss[loss=0.1983, simple_loss=0.2912, pruned_loss=0.05271, over 8278.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3045, pruned_loss=0.07476, over 1606068.70 frames. ], batch size: 23, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:00:45,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.593e+02 3.202e+02 4.020e+02 7.231e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 13:00:59,997 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1696, 4.1183, 3.7590, 1.7608, 3.6800, 3.7131, 3.7509, 3.3275], + device='cuda:0'), covar=tensor([0.0817, 0.0558, 0.1060, 0.4850, 0.0948, 0.0867, 0.1278, 0.0990], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0387, 0.0394, 0.0489, 0.0389, 0.0392, 0.0385, 0.0341], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:01:08,766 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 13:01:11,112 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:13,716 INFO [train.py:901] (0/4) Epoch 13, batch 1000, loss[loss=0.2128, simple_loss=0.3024, pruned_loss=0.06161, over 8473.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3048, pruned_loss=0.07458, over 1606270.84 frames. ], batch size: 25, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:15,919 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-98000.pt +2023-02-06 13:01:29,780 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:39,955 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:43,175 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98036.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:01:44,381 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 13:01:50,447 INFO [train.py:901] (0/4) Epoch 13, batch 1050, loss[loss=0.2795, simple_loss=0.3404, pruned_loss=0.1093, over 7125.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.304, pruned_loss=0.07451, over 1605292.27 frames. ], batch size: 71, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:56,534 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 2.365e+02 2.893e+02 3.782e+02 5.594e+02, threshold=5.785e+02, percent-clipped=0.0 +2023-02-06 13:01:57,235 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 13:02:10,161 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98075.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:13,450 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:24,721 INFO [train.py:901] (0/4) Epoch 13, batch 1100, loss[loss=0.2403, simple_loss=0.3261, pruned_loss=0.07729, over 8106.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3027, pruned_loss=0.07384, over 1606904.71 frames. ], batch size: 23, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:02:26,939 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:30,280 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:31,641 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:42,954 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:49,125 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:59,477 INFO [train.py:901] (0/4) Epoch 13, batch 1150, loss[loss=0.2118, simple_loss=0.2744, pruned_loss=0.07465, over 7410.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3037, pruned_loss=0.07435, over 1609741.76 frames. ], batch size: 17, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:02,365 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98151.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:03:05,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 13:03:06,090 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.603e+02 3.101e+02 3.825e+02 7.832e+02, threshold=6.203e+02, percent-clipped=6.0 +2023-02-06 13:03:14,927 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0827, 1.2448, 1.1938, 0.6139, 1.2386, 1.0196, 0.0737, 1.1814], + device='cuda:0'), covar=tensor([0.0306, 0.0269, 0.0256, 0.0430, 0.0327, 0.0705, 0.0569, 0.0230], + device='cuda:0'), in_proj_covar=tensor([0.0402, 0.0337, 0.0289, 0.0397, 0.0325, 0.0484, 0.0360, 0.0362], + device='cuda:0'), out_proj_covar=tensor([1.1293e-04, 9.2114e-05, 7.9625e-05, 1.0987e-04, 9.0458e-05, 1.4467e-04, + 1.0148e-04, 1.0106e-04], device='cuda:0') +2023-02-06 13:03:34,170 INFO [train.py:901] (0/4) Epoch 13, batch 1200, loss[loss=0.1897, simple_loss=0.2589, pruned_loss=0.06026, over 7409.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3024, pruned_loss=0.07408, over 1606332.76 frames. ], batch size: 17, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:46,026 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:04:06,227 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5203, 1.1629, 4.6726, 1.6481, 4.1003, 3.9128, 4.2107, 4.0476], + device='cuda:0'), covar=tensor([0.0478, 0.4902, 0.0401, 0.3775, 0.1019, 0.0848, 0.0523, 0.0573], + device='cuda:0'), in_proj_covar=tensor([0.0510, 0.0587, 0.0598, 0.0544, 0.0618, 0.0531, 0.0522, 0.0579], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:04:08,152 INFO [train.py:901] (0/4) Epoch 13, batch 1250, loss[loss=0.2514, simple_loss=0.3377, pruned_loss=0.08252, over 8459.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.304, pruned_loss=0.07511, over 1612664.13 frames. ], batch size: 27, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:10,201 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:14,080 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.568e+02 3.066e+02 4.053e+02 1.440e+03, threshold=6.132e+02, percent-clipped=8.0 +2023-02-06 13:04:36,935 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:43,429 INFO [train.py:901] (0/4) Epoch 13, batch 1300, loss[loss=0.2935, simple_loss=0.3547, pruned_loss=0.1162, over 8451.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3038, pruned_loss=0.07506, over 1612592.20 frames. ], batch size: 27, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:44,912 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2999, 1.8710, 2.8775, 2.2569, 2.4934, 2.1120, 1.7428, 1.1716], + device='cuda:0'), covar=tensor([0.4373, 0.4651, 0.1211, 0.2926, 0.2159, 0.2761, 0.2008, 0.4674], + device='cuda:0'), in_proj_covar=tensor([0.0893, 0.0883, 0.0741, 0.0861, 0.0939, 0.0810, 0.0706, 0.0771], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:04:54,305 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0573, 1.5447, 1.5482, 1.3963, 0.9077, 1.3656, 1.7164, 1.6596], + device='cuda:0'), covar=tensor([0.0466, 0.1152, 0.1736, 0.1337, 0.0586, 0.1482, 0.0670, 0.0573], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0153, 0.0193, 0.0158, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:04:54,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:12,874 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3365, 2.5215, 1.8345, 2.0648, 1.9827, 1.4733, 1.7148, 1.9707], + device='cuda:0'), covar=tensor([0.1275, 0.0325, 0.0968, 0.0572, 0.0767, 0.1380, 0.1008, 0.0766], + device='cuda:0'), in_proj_covar=tensor([0.0338, 0.0231, 0.0313, 0.0295, 0.0296, 0.0317, 0.0334, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:05:16,645 INFO [train.py:901] (0/4) Epoch 13, batch 1350, loss[loss=0.1894, simple_loss=0.2647, pruned_loss=0.05708, over 7212.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.304, pruned_loss=0.07491, over 1614621.30 frames. ], batch size: 16, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:23,232 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.492e+02 3.102e+02 3.697e+02 5.327e+02, threshold=6.205e+02, percent-clipped=0.0 +2023-02-06 13:05:29,565 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:52,226 INFO [train.py:901] (0/4) Epoch 13, batch 1400, loss[loss=0.2691, simple_loss=0.34, pruned_loss=0.09914, over 8283.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3041, pruned_loss=0.07501, over 1615343.70 frames. ], batch size: 23, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:59,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:14,063 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:06:16,845 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:26,899 INFO [train.py:901] (0/4) Epoch 13, batch 1450, loss[loss=0.2513, simple_loss=0.3301, pruned_loss=0.08622, over 8179.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3039, pruned_loss=0.07474, over 1611329.68 frames. ], batch size: 23, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:06:32,939 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.293e+02 2.812e+02 3.491e+02 8.118e+02, threshold=5.625e+02, percent-clipped=1.0 +2023-02-06 13:06:34,308 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 13:06:41,214 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:07:02,148 INFO [train.py:901] (0/4) Epoch 13, batch 1500, loss[loss=0.2097, simple_loss=0.291, pruned_loss=0.06426, over 8519.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3025, pruned_loss=0.07415, over 1610048.53 frames. ], batch size: 28, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:28,243 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4897, 1.9401, 3.3815, 1.3137, 2.5393, 1.8862, 1.5861, 2.2719], + device='cuda:0'), covar=tensor([0.1759, 0.2242, 0.0730, 0.4119, 0.1529, 0.3017, 0.1991, 0.2170], + device='cuda:0'), in_proj_covar=tensor([0.0492, 0.0532, 0.0538, 0.0590, 0.0626, 0.0561, 0.0483, 0.0613], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:07:36,058 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6819, 4.7127, 4.1899, 2.1584, 4.0271, 4.1854, 4.3431, 3.9378], + device='cuda:0'), covar=tensor([0.0611, 0.0515, 0.0885, 0.4228, 0.0907, 0.0897, 0.1110, 0.0744], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0382, 0.0391, 0.0483, 0.0383, 0.0388, 0.0380, 0.0337], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:07:37,386 INFO [train.py:901] (0/4) Epoch 13, batch 1550, loss[loss=0.2159, simple_loss=0.2849, pruned_loss=0.07339, over 8090.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3033, pruned_loss=0.07492, over 1606635.73 frames. ], batch size: 21, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:43,379 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.583e+02 3.208e+02 4.119e+02 6.608e+02, threshold=6.417e+02, percent-clipped=3.0 +2023-02-06 13:07:46,948 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:07:47,839 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.43 vs. limit=5.0 +2023-02-06 13:08:02,048 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:11,760 INFO [train.py:901] (0/4) Epoch 13, batch 1600, loss[loss=0.1992, simple_loss=0.282, pruned_loss=0.0582, over 7971.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3035, pruned_loss=0.07511, over 1606227.03 frames. ], batch size: 21, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:27,711 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1850, 1.1009, 1.2608, 1.1025, 0.9151, 1.3219, 0.0582, 0.8731], + device='cuda:0'), covar=tensor([0.2158, 0.1807, 0.0628, 0.1154, 0.3652, 0.0590, 0.2939, 0.1533], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0175, 0.0104, 0.0221, 0.0262, 0.0112, 0.0164, 0.0168], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 13:08:29,731 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:46,790 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:47,291 INFO [train.py:901] (0/4) Epoch 13, batch 1650, loss[loss=0.1894, simple_loss=0.2758, pruned_loss=0.05145, over 8196.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3057, pruned_loss=0.07643, over 1610801.58 frames. ], batch size: 23, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:53,405 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.462e+02 2.942e+02 3.707e+02 8.113e+02, threshold=5.885e+02, percent-clipped=6.0 +2023-02-06 13:09:07,750 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-06 13:09:20,790 INFO [train.py:901] (0/4) Epoch 13, batch 1700, loss[loss=0.1854, simple_loss=0.2707, pruned_loss=0.04999, over 8198.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3063, pruned_loss=0.07648, over 1613285.85 frames. ], batch size: 23, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:09:49,366 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6668, 1.6270, 2.0288, 1.5368, 1.2491, 2.0357, 0.3356, 1.2629], + device='cuda:0'), covar=tensor([0.3027, 0.1710, 0.0547, 0.1648, 0.3600, 0.0540, 0.2898, 0.1675], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0175, 0.0104, 0.0220, 0.0260, 0.0112, 0.0163, 0.0167], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 13:09:57,386 INFO [train.py:901] (0/4) Epoch 13, batch 1750, loss[loss=0.2293, simple_loss=0.3097, pruned_loss=0.07445, over 8498.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3057, pruned_loss=0.07636, over 1613019.01 frames. ], batch size: 26, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:10:03,433 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.663e+02 3.311e+02 3.905e+02 7.561e+02, threshold=6.622e+02, percent-clipped=6.0 +2023-02-06 13:10:15,119 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:10:27,277 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8031, 1.2321, 5.9177, 2.1968, 5.2242, 5.0087, 5.4782, 5.3288], + device='cuda:0'), covar=tensor([0.0487, 0.5155, 0.0332, 0.3358, 0.1019, 0.0848, 0.0460, 0.0467], + device='cuda:0'), in_proj_covar=tensor([0.0510, 0.0584, 0.0597, 0.0550, 0.0620, 0.0532, 0.0522, 0.0585], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:10:31,881 INFO [train.py:901] (0/4) Epoch 13, batch 1800, loss[loss=0.1886, simple_loss=0.2671, pruned_loss=0.05505, over 7981.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3052, pruned_loss=0.07558, over 1614144.81 frames. ], batch size: 21, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:01,397 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:06,994 INFO [train.py:901] (0/4) Epoch 13, batch 1850, loss[loss=0.2694, simple_loss=0.3268, pruned_loss=0.106, over 7532.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3046, pruned_loss=0.07556, over 1612029.55 frames. ], batch size: 18, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:13,496 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.419e+02 2.914e+02 4.067e+02 1.078e+03, threshold=5.828e+02, percent-clipped=2.0 +2023-02-06 13:11:18,919 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:34,567 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:41,187 INFO [train.py:901] (0/4) Epoch 13, batch 1900, loss[loss=0.2908, simple_loss=0.358, pruned_loss=0.1118, over 7184.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3047, pruned_loss=0.07563, over 1613865.67 frames. ], batch size: 71, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:46,600 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:48,677 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:51,815 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:11,194 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 13:12:15,140 INFO [train.py:901] (0/4) Epoch 13, batch 1950, loss[loss=0.2292, simple_loss=0.3137, pruned_loss=0.07232, over 8607.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3048, pruned_loss=0.07526, over 1616536.06 frames. ], batch size: 31, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:12:21,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.445e+02 3.079e+02 3.874e+02 6.986e+02, threshold=6.158e+02, percent-clipped=4.0 +2023-02-06 13:12:23,981 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 13:12:44,583 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 13:12:45,384 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98989.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:50,400 INFO [train.py:901] (0/4) Epoch 13, batch 2000, loss[loss=0.2607, simple_loss=0.3409, pruned_loss=0.09026, over 8187.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3055, pruned_loss=0.07565, over 1613384.75 frames. ], batch size: 23, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:06,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:24,243 INFO [train.py:901] (0/4) Epoch 13, batch 2050, loss[loss=0.2117, simple_loss=0.2918, pruned_loss=0.06576, over 8301.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3027, pruned_loss=0.07408, over 1607754.05 frames. ], batch size: 23, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:28,300 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8116, 1.9860, 2.0488, 1.2211, 2.2208, 1.5535, 0.5430, 1.9276], + device='cuda:0'), covar=tensor([0.0351, 0.0221, 0.0191, 0.0432, 0.0226, 0.0622, 0.0609, 0.0175], + device='cuda:0'), in_proj_covar=tensor([0.0393, 0.0330, 0.0286, 0.0394, 0.0321, 0.0478, 0.0355, 0.0359], + device='cuda:0'), out_proj_covar=tensor([1.1037e-04, 8.9948e-05, 7.8544e-05, 1.0883e-04, 8.9356e-05, 1.4268e-04, + 9.9842e-05, 1.0003e-04], device='cuda:0') +2023-02-06 13:13:28,455 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 13:13:30,114 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.527e+02 3.267e+02 4.166e+02 9.227e+02, threshold=6.535e+02, percent-clipped=8.0 +2023-02-06 13:13:39,432 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:48,845 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0762, 1.9969, 2.0920, 1.8406, 1.1032, 1.9244, 2.2757, 2.3245], + device='cuda:0'), covar=tensor([0.0383, 0.1077, 0.1451, 0.1184, 0.0548, 0.1296, 0.0592, 0.0480], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:13:58,777 INFO [train.py:901] (0/4) Epoch 13, batch 2100, loss[loss=0.3112, simple_loss=0.358, pruned_loss=0.1322, over 8520.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3014, pruned_loss=0.07363, over 1604121.85 frames. ], batch size: 28, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:59,949 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 13:14:31,269 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:14:33,890 INFO [train.py:901] (0/4) Epoch 13, batch 2150, loss[loss=0.231, simple_loss=0.3168, pruned_loss=0.07259, over 8195.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3036, pruned_loss=0.07543, over 1601227.75 frames. ], batch size: 23, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:39,911 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.393e+02 2.767e+02 3.323e+02 5.467e+02, threshold=5.533e+02, percent-clipped=0.0 +2023-02-06 13:14:48,055 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:07,844 INFO [train.py:901] (0/4) Epoch 13, batch 2200, loss[loss=0.2172, simple_loss=0.2954, pruned_loss=0.06952, over 8240.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3023, pruned_loss=0.07464, over 1599204.57 frames. ], batch size: 22, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:15:39,425 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 13:15:43,034 INFO [train.py:901] (0/4) Epoch 13, batch 2250, loss[loss=0.2705, simple_loss=0.3494, pruned_loss=0.09579, over 8309.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3033, pruned_loss=0.07514, over 1599008.53 frames. ], batch size: 26, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:15:43,152 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:46,442 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:48,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.623e+02 3.377e+02 4.135e+02 6.545e+02, threshold=6.753e+02, percent-clipped=6.0 +2023-02-06 13:15:49,661 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:02,579 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:09,205 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:16,491 INFO [train.py:901] (0/4) Epoch 13, batch 2300, loss[loss=0.2151, simple_loss=0.2812, pruned_loss=0.07449, over 7686.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3029, pruned_loss=0.07471, over 1602936.76 frames. ], batch size: 18, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:19,363 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:43,123 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:49,159 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:52,410 INFO [train.py:901] (0/4) Epoch 13, batch 2350, loss[loss=0.2714, simple_loss=0.338, pruned_loss=0.1024, over 8670.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3028, pruned_loss=0.07464, over 1606334.13 frames. ], batch size: 49, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:58,385 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.470e+02 3.074e+02 3.865e+02 1.080e+03, threshold=6.149e+02, percent-clipped=3.0 +2023-02-06 13:17:06,751 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99367.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:10,156 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:26,590 INFO [train.py:901] (0/4) Epoch 13, batch 2400, loss[loss=0.2414, simple_loss=0.3218, pruned_loss=0.0805, over 8020.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3031, pruned_loss=0.07506, over 1606910.60 frames. ], batch size: 22, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:17:26,696 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1800, 4.1494, 3.7832, 1.8217, 3.7620, 3.7185, 3.8006, 3.5783], + device='cuda:0'), covar=tensor([0.0733, 0.0623, 0.0987, 0.4833, 0.0781, 0.0915, 0.1363, 0.0735], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0393, 0.0397, 0.0497, 0.0391, 0.0394, 0.0384, 0.0343], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:17:37,524 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:55,975 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:01,834 INFO [train.py:901] (0/4) Epoch 13, batch 2450, loss[loss=0.1913, simple_loss=0.2591, pruned_loss=0.0617, over 7702.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3041, pruned_loss=0.07528, over 1612521.40 frames. ], batch size: 18, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:02,705 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:08,609 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.541e+02 3.157e+02 3.793e+02 6.756e+02, threshold=6.314e+02, percent-clipped=3.0 +2023-02-06 13:18:29,570 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99486.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:18:36,514 INFO [train.py:901] (0/4) Epoch 13, batch 2500, loss[loss=0.2169, simple_loss=0.2847, pruned_loss=0.07456, over 7522.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.302, pruned_loss=0.07417, over 1611652.55 frames. ], batch size: 18, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:57,692 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:10,257 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0844, 1.2544, 1.4485, 1.2098, 1.0332, 1.3122, 1.7718, 1.6993], + device='cuda:0'), covar=tensor([0.0571, 0.1807, 0.2538, 0.1890, 0.0722, 0.2182, 0.0791, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0102, 0.0164, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:19:10,748 INFO [train.py:901] (0/4) Epoch 13, batch 2550, loss[loss=0.2807, simple_loss=0.3544, pruned_loss=0.1035, over 8604.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3033, pruned_loss=0.07475, over 1615716.43 frames. ], batch size: 39, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:19:17,199 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.420e+02 2.977e+02 3.875e+02 7.325e+02, threshold=5.954e+02, percent-clipped=4.0 +2023-02-06 13:19:37,743 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99586.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:41,075 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:45,635 INFO [train.py:901] (0/4) Epoch 13, batch 2600, loss[loss=0.3143, simple_loss=0.3746, pruned_loss=0.127, over 7403.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3039, pruned_loss=0.07523, over 1613755.59 frames. ], batch size: 71, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:19:47,528 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:19:52,603 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:03,513 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:06,919 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:08,162 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:19,560 INFO [train.py:901] (0/4) Epoch 13, batch 2650, loss[loss=0.2041, simple_loss=0.2955, pruned_loss=0.05636, over 8236.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3038, pruned_loss=0.07518, over 1613523.48 frames. ], batch size: 22, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:20,457 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:23,639 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:25,437 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.403e+02 3.099e+02 4.031e+02 8.160e+02, threshold=6.198e+02, percent-clipped=1.0 +2023-02-06 13:20:46,670 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:54,457 INFO [train.py:901] (0/4) Epoch 13, batch 2700, loss[loss=0.1871, simple_loss=0.2716, pruned_loss=0.05128, over 7803.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3038, pruned_loss=0.07527, over 1611398.63 frames. ], batch size: 19, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:55,208 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:59,198 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:00,470 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:16,801 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:27,701 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:28,870 INFO [train.py:901] (0/4) Epoch 13, batch 2750, loss[loss=0.252, simple_loss=0.3257, pruned_loss=0.08912, over 8366.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3033, pruned_loss=0.07507, over 1610387.80 frames. ], batch size: 24, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:21:34,777 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.508e+02 3.194e+02 3.866e+02 8.318e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 13:21:49,874 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5475, 1.8063, 2.8204, 1.3534, 2.0568, 1.8530, 1.5774, 1.8887], + device='cuda:0'), covar=tensor([0.1636, 0.2057, 0.0626, 0.3717, 0.1485, 0.2739, 0.1849, 0.1928], + device='cuda:0'), in_proj_covar=tensor([0.0489, 0.0530, 0.0539, 0.0588, 0.0622, 0.0559, 0.0481, 0.0612], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:21:52,457 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:53,979 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:03,191 INFO [train.py:901] (0/4) Epoch 13, batch 2800, loss[loss=0.2084, simple_loss=0.2956, pruned_loss=0.06062, over 8336.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3034, pruned_loss=0.07509, over 1610808.27 frames. ], batch size: 26, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:06,156 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:11,456 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:26,099 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99830.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:22:37,605 INFO [train.py:901] (0/4) Epoch 13, batch 2850, loss[loss=0.2377, simple_loss=0.3146, pruned_loss=0.08037, over 8133.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07566, over 1609657.82 frames. ], batch size: 22, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:43,877 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.486e+02 2.909e+02 3.673e+02 9.445e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-06 13:23:11,496 INFO [train.py:901] (0/4) Epoch 13, batch 2900, loss[loss=0.2204, simple_loss=0.3089, pruned_loss=0.06592, over 8364.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3052, pruned_loss=0.07568, over 1611466.47 frames. ], batch size: 24, lr: 5.92e-03, grad_scale: 16.0 +2023-02-06 13:23:11,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:34,894 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:45,006 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99945.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:23:46,151 INFO [train.py:901] (0/4) Epoch 13, batch 2950, loss[loss=0.1965, simple_loss=0.2872, pruned_loss=0.05295, over 8581.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3054, pruned_loss=0.07604, over 1610385.98 frames. ], batch size: 31, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:23:48,976 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:52,242 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 13:23:52,908 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.762e+02 3.280e+02 4.150e+02 8.176e+02, threshold=6.560e+02, percent-clipped=12.0 +2023-02-06 13:23:57,273 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99962.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:14,467 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:21,168 INFO [train.py:901] (0/4) Epoch 13, batch 3000, loss[loss=0.1908, simple_loss=0.2642, pruned_loss=0.05872, over 7639.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3042, pruned_loss=0.07519, over 1613120.20 frames. ], batch size: 19, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:24:21,169 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 13:24:33,568 INFO [train.py:935] (0/4) Epoch 13, validation: loss=0.1841, simple_loss=0.2841, pruned_loss=0.04204, over 944034.00 frames. +2023-02-06 13:24:33,569 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 13:24:35,838 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-100000.pt +2023-02-06 13:24:37,720 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:53,947 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100025.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:24:55,300 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:05,689 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:07,819 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:09,026 INFO [train.py:901] (0/4) Epoch 13, batch 3050, loss[loss=0.2053, simple_loss=0.2896, pruned_loss=0.06054, over 7929.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3045, pruned_loss=0.07495, over 1616361.46 frames. ], batch size: 20, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:25:15,852 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.522e+02 3.008e+02 4.207e+02 1.157e+03, threshold=6.017e+02, percent-clipped=6.0 +2023-02-06 13:25:16,802 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:22,889 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:29,663 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2184, 4.1975, 3.7284, 1.6740, 3.7200, 3.7697, 3.7454, 3.4603], + device='cuda:0'), covar=tensor([0.0748, 0.0571, 0.1096, 0.4897, 0.0817, 0.1067, 0.1314, 0.0883], + device='cuda:0'), in_proj_covar=tensor([0.0481, 0.0393, 0.0397, 0.0497, 0.0390, 0.0396, 0.0388, 0.0346], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:25:31,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1372, 2.9209, 3.2810, 1.3134, 3.3788, 2.0124, 1.4318, 2.1128], + device='cuda:0'), covar=tensor([0.0578, 0.0232, 0.0167, 0.0572, 0.0258, 0.0592, 0.0699, 0.0400], + device='cuda:0'), in_proj_covar=tensor([0.0393, 0.0335, 0.0287, 0.0394, 0.0323, 0.0481, 0.0358, 0.0362], + device='cuda:0'), out_proj_covar=tensor([1.1003e-04, 9.1589e-05, 7.8762e-05, 1.0881e-04, 8.9588e-05, 1.4336e-04, + 1.0074e-04, 1.0061e-04], device='cuda:0') +2023-02-06 13:25:34,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:40,508 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 13:25:44,735 INFO [train.py:901] (0/4) Epoch 13, batch 3100, loss[loss=0.2109, simple_loss=0.2994, pruned_loss=0.06124, over 8293.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3046, pruned_loss=0.0743, over 1617989.43 frames. ], batch size: 23, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:19,853 INFO [train.py:901] (0/4) Epoch 13, batch 3150, loss[loss=0.2243, simple_loss=0.2969, pruned_loss=0.07586, over 8084.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3049, pruned_loss=0.07494, over 1616690.91 frames. ], batch size: 21, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:24,137 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:25,935 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.491e+02 3.036e+02 4.077e+02 6.258e+02, threshold=6.072e+02, percent-clipped=1.0 +2023-02-06 13:26:26,821 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:41,732 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:55,156 INFO [train.py:901] (0/4) Epoch 13, batch 3200, loss[loss=0.212, simple_loss=0.3008, pruned_loss=0.06162, over 8567.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3054, pruned_loss=0.07549, over 1617485.41 frames. ], batch size: 31, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:58,245 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100201.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:27:15,351 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100226.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:27:29,407 INFO [train.py:901] (0/4) Epoch 13, batch 3250, loss[loss=0.2679, simple_loss=0.3426, pruned_loss=0.0966, over 8717.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3065, pruned_loss=0.07585, over 1617920.88 frames. ], batch size: 30, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:27:35,466 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.430e+02 2.991e+02 3.670e+02 7.489e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 13:27:35,626 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:04,521 INFO [train.py:901] (0/4) Epoch 13, batch 3300, loss[loss=0.2297, simple_loss=0.3102, pruned_loss=0.0746, over 8282.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3056, pruned_loss=0.07529, over 1619796.37 frames. ], batch size: 23, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:04,763 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4513, 1.7215, 2.8772, 1.3021, 2.0147, 1.8208, 1.5078, 1.9010], + device='cuda:0'), covar=tensor([0.1782, 0.2199, 0.0631, 0.3932, 0.1592, 0.2840, 0.1936, 0.1937], + device='cuda:0'), in_proj_covar=tensor([0.0494, 0.0533, 0.0537, 0.0591, 0.0625, 0.0558, 0.0486, 0.0615], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:28:07,512 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:22,244 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:24,891 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:39,552 INFO [train.py:901] (0/4) Epoch 13, batch 3350, loss[loss=0.2005, simple_loss=0.2731, pruned_loss=0.06398, over 7799.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3043, pruned_loss=0.07431, over 1613646.49 frames. ], batch size: 19, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:39,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:45,562 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.599e+02 3.166e+02 3.997e+02 7.990e+02, threshold=6.333e+02, percent-clipped=6.0 +2023-02-06 13:28:54,366 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100369.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:29:02,351 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3371, 2.5722, 1.5793, 2.0571, 1.9810, 1.2325, 1.8588, 2.0900], + device='cuda:0'), covar=tensor([0.1787, 0.0530, 0.1514, 0.0856, 0.0914, 0.1956, 0.1380, 0.1192], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0236, 0.0319, 0.0300, 0.0298, 0.0322, 0.0340, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:29:13,503 INFO [train.py:901] (0/4) Epoch 13, batch 3400, loss[loss=0.2326, simple_loss=0.2891, pruned_loss=0.08805, over 7655.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3052, pruned_loss=0.07499, over 1617685.92 frames. ], batch size: 19, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:25,263 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:29,921 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:42,776 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:48,799 INFO [train.py:901] (0/4) Epoch 13, batch 3450, loss[loss=0.2052, simple_loss=0.2969, pruned_loss=0.05678, over 8451.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.306, pruned_loss=0.07574, over 1615695.04 frames. ], batch size: 29, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:54,821 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.523e+02 3.011e+02 4.006e+02 7.808e+02, threshold=6.023e+02, percent-clipped=2.0 +2023-02-06 13:30:07,207 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3334, 1.4754, 4.4336, 1.8914, 2.2694, 5.1366, 5.0737, 4.4064], + device='cuda:0'), covar=tensor([0.1081, 0.1759, 0.0273, 0.2049, 0.1295, 0.0175, 0.0292, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0303, 0.0268, 0.0296, 0.0281, 0.0242, 0.0361, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:30:07,918 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0118, 1.3534, 1.5884, 1.1874, 1.0051, 1.3326, 1.8304, 1.4414], + device='cuda:0'), covar=tensor([0.0498, 0.1249, 0.1679, 0.1418, 0.0607, 0.1482, 0.0625, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0151, 0.0190, 0.0157, 0.0101, 0.0162, 0.0113, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:30:14,288 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100484.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:30:23,411 INFO [train.py:901] (0/4) Epoch 13, batch 3500, loss[loss=0.2059, simple_loss=0.2964, pruned_loss=0.0577, over 8518.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3061, pruned_loss=0.07528, over 1617616.16 frames. ], batch size: 28, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:30:23,584 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8590, 1.6343, 3.1200, 1.3470, 2.1637, 3.3697, 3.4289, 2.8080], + device='cuda:0'), covar=tensor([0.1080, 0.1489, 0.0372, 0.2104, 0.0999, 0.0261, 0.0549, 0.0657], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0303, 0.0269, 0.0296, 0.0281, 0.0242, 0.0362, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:30:45,069 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1492, 1.2322, 4.2797, 1.6399, 3.7585, 3.5583, 3.8715, 3.7415], + device='cuda:0'), covar=tensor([0.0552, 0.4584, 0.0520, 0.3587, 0.1092, 0.0977, 0.0561, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0516, 0.0593, 0.0613, 0.0557, 0.0632, 0.0545, 0.0533, 0.0593], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:30:53,023 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 13:30:58,412 INFO [train.py:901] (0/4) Epoch 13, batch 3550, loss[loss=0.1637, simple_loss=0.2376, pruned_loss=0.04492, over 7422.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3047, pruned_loss=0.07527, over 1616079.87 frames. ], batch size: 17, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:31:04,442 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.564e+02 3.091e+02 3.906e+02 9.185e+02, threshold=6.182e+02, percent-clipped=3.0 +2023-02-06 13:31:33,174 INFO [train.py:901] (0/4) Epoch 13, batch 3600, loss[loss=0.2514, simple_loss=0.3249, pruned_loss=0.08894, over 8592.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3051, pruned_loss=0.07592, over 1607984.13 frames. ], batch size: 31, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:31:35,338 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:31:56,121 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0815, 1.7127, 1.4384, 1.6566, 1.3590, 1.2697, 1.3262, 1.4065], + device='cuda:0'), covar=tensor([0.1002, 0.0398, 0.0998, 0.0469, 0.0649, 0.1291, 0.0828, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0233, 0.0317, 0.0297, 0.0296, 0.0322, 0.0339, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:32:08,079 INFO [train.py:901] (0/4) Epoch 13, batch 3650, loss[loss=0.1932, simple_loss=0.2678, pruned_loss=0.05931, over 7693.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3041, pruned_loss=0.07554, over 1605385.36 frames. ], batch size: 18, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:32:12,309 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9034, 1.6175, 1.6268, 1.5530, 1.0853, 1.5673, 1.7145, 1.7647], + device='cuda:0'), covar=tensor([0.0500, 0.0944, 0.1308, 0.1081, 0.0571, 0.1140, 0.0612, 0.0435], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:32:14,110 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.362e+02 3.080e+02 3.827e+02 7.938e+02, threshold=6.161e+02, percent-clipped=3.0 +2023-02-06 13:32:17,452 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 13:32:34,201 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 13:32:43,149 INFO [train.py:901] (0/4) Epoch 13, batch 3700, loss[loss=0.2027, simple_loss=0.2776, pruned_loss=0.0639, over 7796.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3038, pruned_loss=0.07566, over 1603934.29 frames. ], batch size: 19, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:32:52,912 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 13:32:55,259 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:32:57,154 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 13:33:13,264 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100740.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:33:16,637 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5308, 2.6980, 1.9682, 2.1565, 2.0997, 1.5775, 1.9840, 2.1262], + device='cuda:0'), covar=tensor([0.1445, 0.0390, 0.0995, 0.0638, 0.0711, 0.1401, 0.1019, 0.0939], + device='cuda:0'), in_proj_covar=tensor([0.0344, 0.0234, 0.0318, 0.0299, 0.0300, 0.0322, 0.0340, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:33:17,851 INFO [train.py:901] (0/4) Epoch 13, batch 3750, loss[loss=0.2196, simple_loss=0.2963, pruned_loss=0.07149, over 8340.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3048, pruned_loss=0.07539, over 1613407.04 frames. ], batch size: 26, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:18,665 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100748.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:33:24,690 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.543e+02 3.029e+02 3.909e+02 6.778e+02, threshold=6.059e+02, percent-clipped=2.0 +2023-02-06 13:33:30,125 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:33:30,980 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100765.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:52,964 INFO [train.py:901] (0/4) Epoch 13, batch 3800, loss[loss=0.2671, simple_loss=0.328, pruned_loss=0.1031, over 7338.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3053, pruned_loss=0.07592, over 1614810.66 frames. ], batch size: 72, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:53,763 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6123, 1.5155, 4.7013, 1.7446, 4.2470, 3.8944, 4.2539, 4.1432], + device='cuda:0'), covar=tensor([0.0463, 0.4255, 0.0512, 0.3429, 0.0901, 0.0918, 0.0504, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0518, 0.0595, 0.0619, 0.0557, 0.0633, 0.0546, 0.0537, 0.0597], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:34:00,246 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 13:34:27,591 INFO [train.py:901] (0/4) Epoch 13, batch 3850, loss[loss=0.2477, simple_loss=0.3281, pruned_loss=0.0837, over 8522.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3052, pruned_loss=0.0757, over 1617614.67 frames. ], batch size: 28, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:34,506 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.985e+02 2.830e+02 3.312e+02 3.730e+02 7.453e+02, threshold=6.624e+02, percent-clipped=3.0 +2023-02-06 13:34:50,094 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100879.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:35:01,754 INFO [train.py:901] (0/4) Epoch 13, batch 3900, loss[loss=0.2499, simple_loss=0.319, pruned_loss=0.09041, over 7975.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3058, pruned_loss=0.07612, over 1619477.51 frames. ], batch size: 21, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:35:01,758 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 13:35:33,298 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1887, 1.8437, 2.5346, 2.0563, 2.2952, 2.0739, 1.7380, 1.0207], + device='cuda:0'), covar=tensor([0.3836, 0.3576, 0.1250, 0.2490, 0.1890, 0.2271, 0.1661, 0.3994], + device='cuda:0'), in_proj_covar=tensor([0.0891, 0.0887, 0.0734, 0.0862, 0.0941, 0.0813, 0.0705, 0.0776], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:35:37,127 INFO [train.py:901] (0/4) Epoch 13, batch 3950, loss[loss=0.1997, simple_loss=0.2798, pruned_loss=0.05977, over 7807.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3059, pruned_loss=0.07606, over 1615435.87 frames. ], batch size: 19, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:35:44,007 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.489e+02 3.011e+02 3.855e+02 9.802e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-06 13:35:54,141 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:10,578 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2644, 1.1897, 3.3696, 1.0307, 2.9518, 2.8390, 3.0210, 2.9884], + device='cuda:0'), covar=tensor([0.0672, 0.3758, 0.0728, 0.3476, 0.1387, 0.0978, 0.0695, 0.0794], + device='cuda:0'), in_proj_covar=tensor([0.0513, 0.0588, 0.0612, 0.0551, 0.0630, 0.0543, 0.0533, 0.0593], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 13:36:11,866 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:12,360 INFO [train.py:901] (0/4) Epoch 13, batch 4000, loss[loss=0.206, simple_loss=0.289, pruned_loss=0.06151, over 8111.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3063, pruned_loss=0.07612, over 1613350.19 frames. ], batch size: 23, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:20,494 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-06 13:36:47,688 INFO [train.py:901] (0/4) Epoch 13, batch 4050, loss[loss=0.2244, simple_loss=0.3041, pruned_loss=0.07235, over 8473.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.306, pruned_loss=0.07675, over 1607205.72 frames. ], batch size: 25, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:54,252 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.645e+02 3.184e+02 3.816e+02 9.518e+02, threshold=6.368e+02, percent-clipped=3.0 +2023-02-06 13:37:18,350 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101092.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:37:21,625 INFO [train.py:901] (0/4) Epoch 13, batch 4100, loss[loss=0.208, simple_loss=0.3006, pruned_loss=0.05765, over 8336.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3056, pruned_loss=0.0758, over 1612200.54 frames. ], batch size: 25, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:37:41,388 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:47,947 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:55,651 INFO [train.py:901] (0/4) Epoch 13, batch 4150, loss[loss=0.245, simple_loss=0.3286, pruned_loss=0.0807, over 7789.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.306, pruned_loss=0.0761, over 1608652.47 frames. ], batch size: 19, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:02,996 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.783e+02 3.401e+02 4.642e+02 1.010e+03, threshold=6.803e+02, percent-clipped=7.0 +2023-02-06 13:38:05,278 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:38:05,859 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2540, 3.1398, 2.8847, 1.5880, 2.8889, 2.8989, 2.8646, 2.6899], + device='cuda:0'), covar=tensor([0.1183, 0.0858, 0.1319, 0.4391, 0.1048, 0.1329, 0.1704, 0.1290], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0391, 0.0401, 0.0500, 0.0394, 0.0395, 0.0389, 0.0344], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:38:06,624 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9366, 1.2361, 1.5331, 1.1337, 0.9523, 1.2814, 1.6495, 1.5641], + device='cuda:0'), covar=tensor([0.0542, 0.1748, 0.2374, 0.1918, 0.0682, 0.2054, 0.0774, 0.0719], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0157, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:38:30,234 INFO [train.py:901] (0/4) Epoch 13, batch 4200, loss[loss=0.2506, simple_loss=0.3181, pruned_loss=0.09157, over 6832.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3054, pruned_loss=0.07582, over 1610248.05 frames. ], batch size: 71, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:37,978 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101207.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:38:39,773 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 13:38:54,609 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 13:39:05,863 INFO [train.py:901] (0/4) Epoch 13, batch 4250, loss[loss=0.2164, simple_loss=0.2803, pruned_loss=0.07625, over 7515.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3046, pruned_loss=0.07545, over 1608795.09 frames. ], batch size: 18, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:39:12,480 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 2.514e+02 3.154e+02 3.992e+02 7.648e+02, threshold=6.307e+02, percent-clipped=3.0 +2023-02-06 13:39:16,506 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 13:39:40,003 INFO [train.py:901] (0/4) Epoch 13, batch 4300, loss[loss=0.1975, simple_loss=0.2683, pruned_loss=0.06339, over 7541.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3027, pruned_loss=0.07415, over 1607466.64 frames. ], batch size: 18, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:39:58,268 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 13:40:01,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-06 13:40:14,572 INFO [train.py:901] (0/4) Epoch 13, batch 4350, loss[loss=0.289, simple_loss=0.3478, pruned_loss=0.1151, over 6816.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3042, pruned_loss=0.07485, over 1612344.32 frames. ], batch size: 71, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:14,795 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5758, 1.6354, 2.0935, 1.5458, 1.1781, 2.1101, 0.2680, 1.4002], + device='cuda:0'), covar=tensor([0.2339, 0.1658, 0.0419, 0.1359, 0.3825, 0.0482, 0.3068, 0.1424], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0176, 0.0107, 0.0219, 0.0256, 0.0111, 0.0164, 0.0170], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 13:40:21,335 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.682e+02 3.184e+02 4.441e+02 9.358e+02, threshold=6.368e+02, percent-clipped=11.0 +2023-02-06 13:40:40,928 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7084, 1.5243, 3.3628, 1.3873, 2.3412, 3.6166, 3.6749, 3.0730], + device='cuda:0'), covar=tensor([0.1140, 0.1539, 0.0310, 0.2157, 0.0852, 0.0235, 0.0446, 0.0575], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0302, 0.0266, 0.0296, 0.0278, 0.0239, 0.0360, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:40:49,418 INFO [train.py:901] (0/4) Epoch 13, batch 4400, loss[loss=0.1703, simple_loss=0.2528, pruned_loss=0.0439, over 7698.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3035, pruned_loss=0.07477, over 1604429.74 frames. ], batch size: 18, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:49,429 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 13:41:23,440 INFO [train.py:901] (0/4) Epoch 13, batch 4450, loss[loss=0.217, simple_loss=0.2968, pruned_loss=0.06858, over 8704.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3044, pruned_loss=0.07512, over 1609527.42 frames. ], batch size: 39, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:41:28,826 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 13:41:30,664 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 2.700e+02 3.319e+02 4.103e+02 1.285e+03, threshold=6.638e+02, percent-clipped=3.0 +2023-02-06 13:41:34,860 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101463.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:41:38,577 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:41:52,065 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101488.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:41:57,473 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9565, 1.6474, 1.4272, 1.5887, 1.3438, 1.2320, 1.1914, 1.3327], + device='cuda:0'), covar=tensor([0.1057, 0.0405, 0.1059, 0.0497, 0.0675, 0.1285, 0.0886, 0.0658], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0233, 0.0314, 0.0295, 0.0298, 0.0319, 0.0338, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:41:57,884 INFO [train.py:901] (0/4) Epoch 13, batch 4500, loss[loss=0.2503, simple_loss=0.3269, pruned_loss=0.08682, over 8362.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3056, pruned_loss=0.07554, over 1613750.50 frames. ], batch size: 24, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:11,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5500, 2.1178, 3.4160, 1.3168, 2.4555, 1.9730, 1.6212, 2.5163], + device='cuda:0'), covar=tensor([0.1707, 0.2110, 0.0735, 0.3991, 0.1646, 0.2833, 0.1914, 0.1984], + device='cuda:0'), in_proj_covar=tensor([0.0494, 0.0535, 0.0537, 0.0590, 0.0626, 0.0562, 0.0485, 0.0617], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:42:22,189 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 13:42:33,108 INFO [train.py:901] (0/4) Epoch 13, batch 4550, loss[loss=0.2164, simple_loss=0.3016, pruned_loss=0.06561, over 8466.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.305, pruned_loss=0.07514, over 1616834.48 frames. ], batch size: 27, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:39,891 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.403e+02 2.986e+02 3.546e+02 6.918e+02, threshold=5.973e+02, percent-clipped=1.0 +2023-02-06 13:42:58,926 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:43:08,200 INFO [train.py:901] (0/4) Epoch 13, batch 4600, loss[loss=0.2085, simple_loss=0.2919, pruned_loss=0.06254, over 7657.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3047, pruned_loss=0.07485, over 1614682.04 frames. ], batch size: 19, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:43:42,608 INFO [train.py:901] (0/4) Epoch 13, batch 4650, loss[loss=0.1913, simple_loss=0.279, pruned_loss=0.05179, over 8527.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3047, pruned_loss=0.07502, over 1614344.06 frames. ], batch size: 34, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:43:49,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.500e+02 2.989e+02 3.844e+02 7.619e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-06 13:44:01,145 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0760, 1.6385, 1.3655, 1.6259, 1.4269, 1.1759, 1.3290, 1.3302], + device='cuda:0'), covar=tensor([0.0979, 0.0434, 0.1149, 0.0473, 0.0659, 0.1361, 0.0769, 0.0622], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0235, 0.0316, 0.0295, 0.0300, 0.0321, 0.0340, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:44:17,683 INFO [train.py:901] (0/4) Epoch 13, batch 4700, loss[loss=0.1968, simple_loss=0.268, pruned_loss=0.06277, over 7802.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3043, pruned_loss=0.07523, over 1614354.33 frames. ], batch size: 19, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:21,749 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:44:52,091 INFO [train.py:901] (0/4) Epoch 13, batch 4750, loss[loss=0.2417, simple_loss=0.2904, pruned_loss=0.09647, over 7447.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3034, pruned_loss=0.07459, over 1613734.69 frames. ], batch size: 17, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:59,502 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.558e+02 3.081e+02 3.778e+02 8.564e+02, threshold=6.162e+02, percent-clipped=2.0 +2023-02-06 13:45:21,976 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 13:45:24,391 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 13:45:27,102 INFO [train.py:901] (0/4) Epoch 13, batch 4800, loss[loss=0.1825, simple_loss=0.2639, pruned_loss=0.05058, over 7709.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3028, pruned_loss=0.07392, over 1611683.22 frames. ], batch size: 18, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:45:57,600 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:01,954 INFO [train.py:901] (0/4) Epoch 13, batch 4850, loss[loss=0.2135, simple_loss=0.2886, pruned_loss=0.06916, over 8244.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3031, pruned_loss=0.07405, over 1613760.67 frames. ], batch size: 22, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:08,646 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.589e+02 3.137e+02 3.918e+02 7.572e+02, threshold=6.274e+02, percent-clipped=4.0 +2023-02-06 13:46:14,079 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 13:46:14,953 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:36,888 INFO [train.py:901] (0/4) Epoch 13, batch 4900, loss[loss=0.2941, simple_loss=0.351, pruned_loss=0.1186, over 7303.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3038, pruned_loss=0.07464, over 1611716.67 frames. ], batch size: 72, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:52,169 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 13:47:06,369 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101938.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:47:11,284 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 13:47:12,291 INFO [train.py:901] (0/4) Epoch 13, batch 4950, loss[loss=0.2501, simple_loss=0.3242, pruned_loss=0.08803, over 8419.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.303, pruned_loss=0.0742, over 1614414.39 frames. ], batch size: 29, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:47:15,207 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5675, 2.8016, 2.0444, 2.3929, 2.3217, 1.5944, 2.0885, 2.1739], + device='cuda:0'), covar=tensor([0.1384, 0.0333, 0.0879, 0.0587, 0.0532, 0.1299, 0.0966, 0.0959], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0233, 0.0314, 0.0293, 0.0296, 0.0321, 0.0339, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:47:19,104 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.432e+02 3.023e+02 3.670e+02 7.494e+02, threshold=6.046e+02, percent-clipped=3.0 +2023-02-06 13:47:30,002 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9977, 1.8359, 1.9015, 1.7765, 1.0817, 1.8395, 2.2404, 2.2225], + device='cuda:0'), covar=tensor([0.0421, 0.1111, 0.1586, 0.1323, 0.0605, 0.1362, 0.0608, 0.0525], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:47:46,757 INFO [train.py:901] (0/4) Epoch 13, batch 5000, loss[loss=0.2075, simple_loss=0.298, pruned_loss=0.05849, over 8450.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3021, pruned_loss=0.07399, over 1610292.15 frames. ], batch size: 27, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:47:48,887 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-102000.pt +2023-02-06 13:47:53,363 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:22,480 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:23,084 INFO [train.py:901] (0/4) Epoch 13, batch 5050, loss[loss=0.264, simple_loss=0.337, pruned_loss=0.09545, over 8106.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.302, pruned_loss=0.07394, over 1613280.14 frames. ], batch size: 23, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:48:29,968 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.626e+02 3.300e+02 4.185e+02 9.088e+02, threshold=6.599e+02, percent-clipped=3.0 +2023-02-06 13:48:54,016 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 13:48:56,649 INFO [train.py:901] (0/4) Epoch 13, batch 5100, loss[loss=0.2287, simple_loss=0.3173, pruned_loss=0.07008, over 8439.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3023, pruned_loss=0.07382, over 1612710.13 frames. ], batch size: 27, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:31,560 INFO [train.py:901] (0/4) Epoch 13, batch 5150, loss[loss=0.196, simple_loss=0.281, pruned_loss=0.05546, over 8090.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3024, pruned_loss=0.07367, over 1617301.89 frames. ], batch size: 21, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:38,302 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.413e+02 2.853e+02 3.425e+02 7.647e+02, threshold=5.706e+02, percent-clipped=3.0 +2023-02-06 13:49:41,790 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:50:06,640 INFO [train.py:901] (0/4) Epoch 13, batch 5200, loss[loss=0.2155, simple_loss=0.2908, pruned_loss=0.07008, over 8075.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3024, pruned_loss=0.07393, over 1616718.60 frames. ], batch size: 21, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:41,852 INFO [train.py:901] (0/4) Epoch 13, batch 5250, loss[loss=0.224, simple_loss=0.3063, pruned_loss=0.07079, over 8448.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3042, pruned_loss=0.0749, over 1619113.25 frames. ], batch size: 27, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:48,569 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.565e+02 3.047e+02 3.925e+02 1.157e+03, threshold=6.094e+02, percent-clipped=6.0 +2023-02-06 13:50:51,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-06 13:50:53,900 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 13:51:06,836 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:51:16,473 INFO [train.py:901] (0/4) Epoch 13, batch 5300, loss[loss=0.2125, simple_loss=0.2976, pruned_loss=0.06375, over 7813.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3023, pruned_loss=0.07389, over 1614216.83 frames. ], batch size: 20, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:51:51,011 INFO [train.py:901] (0/4) Epoch 13, batch 5350, loss[loss=0.2249, simple_loss=0.2917, pruned_loss=0.07903, over 7656.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3019, pruned_loss=0.07369, over 1607574.26 frames. ], batch size: 19, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:51:52,455 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:51:52,583 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6424, 2.8267, 1.8316, 2.1979, 2.2918, 1.5497, 2.1151, 2.2111], + device='cuda:0'), covar=tensor([0.1271, 0.0324, 0.1089, 0.0667, 0.0728, 0.1417, 0.1015, 0.0898], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0236, 0.0318, 0.0297, 0.0298, 0.0324, 0.0345, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:51:57,801 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.535e+02 3.049e+02 3.805e+02 7.372e+02, threshold=6.098e+02, percent-clipped=2.0 +2023-02-06 13:52:08,203 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 13:52:26,059 INFO [train.py:901] (0/4) Epoch 13, batch 5400, loss[loss=0.2401, simple_loss=0.3064, pruned_loss=0.08689, over 7923.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3034, pruned_loss=0.07457, over 1609920.53 frames. ], batch size: 20, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:52:26,259 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102397.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:52:40,296 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:52:56,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:00,134 INFO [train.py:901] (0/4) Epoch 13, batch 5450, loss[loss=0.1828, simple_loss=0.2526, pruned_loss=0.05653, over 7428.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3039, pruned_loss=0.07518, over 1607492.10 frames. ], batch size: 17, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:07,656 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.724e+02 3.222e+02 3.900e+02 7.023e+02, threshold=6.444e+02, percent-clipped=3.0 +2023-02-06 13:53:12,594 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:34,968 INFO [train.py:901] (0/4) Epoch 13, batch 5500, loss[loss=0.2657, simple_loss=0.3445, pruned_loss=0.09343, over 8469.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3041, pruned_loss=0.07528, over 1606125.39 frames. ], batch size: 27, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:41,598 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 13:54:03,089 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0284, 1.5443, 3.2654, 1.3607, 2.2091, 3.6308, 3.7064, 3.0859], + device='cuda:0'), covar=tensor([0.0991, 0.1530, 0.0325, 0.2171, 0.1027, 0.0221, 0.0409, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0268, 0.0300, 0.0265, 0.0296, 0.0277, 0.0237, 0.0360, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:54:09,471 INFO [train.py:901] (0/4) Epoch 13, batch 5550, loss[loss=0.1991, simple_loss=0.2863, pruned_loss=0.056, over 8469.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3029, pruned_loss=0.07479, over 1603379.41 frames. ], batch size: 25, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:15,944 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.279e+02 3.010e+02 3.933e+02 6.976e+02, threshold=6.019e+02, percent-clipped=1.0 +2023-02-06 13:54:43,200 INFO [train.py:901] (0/4) Epoch 13, batch 5600, loss[loss=0.2299, simple_loss=0.3219, pruned_loss=0.06898, over 8461.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3035, pruned_loss=0.07534, over 1602312.74 frames. ], batch size: 29, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:44,041 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4997, 1.5084, 4.2006, 1.8104, 2.2068, 4.8826, 4.9223, 4.1368], + device='cuda:0'), covar=tensor([0.0927, 0.1782, 0.0303, 0.2064, 0.1223, 0.0167, 0.0332, 0.0565], + device='cuda:0'), in_proj_covar=tensor([0.0269, 0.0302, 0.0266, 0.0296, 0.0278, 0.0239, 0.0360, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:55:04,659 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7130, 1.9636, 2.1190, 1.1846, 2.1802, 1.5371, 0.5338, 1.9392], + device='cuda:0'), covar=tensor([0.0339, 0.0221, 0.0160, 0.0392, 0.0269, 0.0520, 0.0572, 0.0165], + device='cuda:0'), in_proj_covar=tensor([0.0406, 0.0341, 0.0293, 0.0401, 0.0332, 0.0490, 0.0368, 0.0373], + device='cuda:0'), out_proj_covar=tensor([1.1347e-04, 9.3025e-05, 7.9922e-05, 1.1031e-04, 9.1562e-05, 1.4528e-04, + 1.0327e-04, 1.0324e-04], device='cuda:0') +2023-02-06 13:55:18,283 INFO [train.py:901] (0/4) Epoch 13, batch 5650, loss[loss=0.1995, simple_loss=0.2767, pruned_loss=0.06116, over 7927.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3037, pruned_loss=0.07519, over 1604071.12 frames. ], batch size: 20, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:55:22,471 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102653.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:55:24,852 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.730e+02 3.267e+02 4.266e+02 8.129e+02, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 13:55:39,200 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102678.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:43,637 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 13:55:52,506 INFO [train.py:901] (0/4) Epoch 13, batch 5700, loss[loss=0.1751, simple_loss=0.2519, pruned_loss=0.04909, over 7426.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.306, pruned_loss=0.07716, over 1600076.57 frames. ], batch size: 17, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:08,739 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:25,984 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:27,757 INFO [train.py:901] (0/4) Epoch 13, batch 5750, loss[loss=0.2088, simple_loss=0.2811, pruned_loss=0.06821, over 7647.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3066, pruned_loss=0.07692, over 1606046.40 frames. ], batch size: 19, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:34,419 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 2.514e+02 3.075e+02 4.012e+02 7.214e+02, threshold=6.150e+02, percent-clipped=2.0 +2023-02-06 13:56:35,995 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4084, 1.8409, 2.9678, 1.1563, 2.3298, 1.8516, 1.5448, 2.1447], + device='cuda:0'), covar=tensor([0.1810, 0.2201, 0.0796, 0.4270, 0.1584, 0.3036, 0.2046, 0.2197], + device='cuda:0'), in_proj_covar=tensor([0.0491, 0.0529, 0.0530, 0.0585, 0.0617, 0.0557, 0.0481, 0.0608], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 13:56:37,296 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3248, 2.6718, 1.7884, 2.1157, 2.0639, 1.4130, 1.8493, 2.0413], + device='cuda:0'), covar=tensor([0.1428, 0.0318, 0.1065, 0.0613, 0.0656, 0.1435, 0.1009, 0.0845], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0237, 0.0323, 0.0301, 0.0300, 0.0327, 0.0346, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 13:56:47,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 13:57:01,392 INFO [train.py:901] (0/4) Epoch 13, batch 5800, loss[loss=0.2246, simple_loss=0.2919, pruned_loss=0.07864, over 8494.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3055, pruned_loss=0.07606, over 1608148.13 frames. ], batch size: 26, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:28,961 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6690, 1.6228, 2.1313, 1.5275, 1.1479, 2.1156, 0.2936, 1.3350], + device='cuda:0'), covar=tensor([0.2452, 0.1524, 0.0417, 0.1508, 0.3693, 0.0358, 0.2747, 0.1662], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0174, 0.0106, 0.0220, 0.0255, 0.0110, 0.0164, 0.0172], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 13:57:36,623 INFO [train.py:901] (0/4) Epoch 13, batch 5850, loss[loss=0.2128, simple_loss=0.2864, pruned_loss=0.06961, over 7934.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3047, pruned_loss=0.0759, over 1605469.29 frames. ], batch size: 20, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:36,815 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8227, 1.8343, 1.7999, 2.6071, 1.1653, 1.5124, 1.8627, 2.1394], + device='cuda:0'), covar=tensor([0.0787, 0.1044, 0.1024, 0.0402, 0.1220, 0.1469, 0.0855, 0.0770], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0210, 0.0254, 0.0212, 0.0216, 0.0254, 0.0256, 0.0218], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 13:57:43,164 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.218e+02 2.874e+02 3.517e+02 7.476e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-06 13:57:52,582 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:11,316 INFO [train.py:901] (0/4) Epoch 13, batch 5900, loss[loss=0.239, simple_loss=0.3292, pruned_loss=0.07437, over 8359.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07572, over 1611368.28 frames. ], batch size: 49, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:16,829 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:29,671 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0754, 1.3707, 1.5675, 1.3568, 0.9283, 1.4278, 1.6533, 1.5109], + device='cuda:0'), covar=tensor([0.0484, 0.1307, 0.1701, 0.1379, 0.0628, 0.1533, 0.0686, 0.0635], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0101, 0.0162, 0.0113, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 13:58:42,653 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-06 13:58:46,262 INFO [train.py:901] (0/4) Epoch 13, batch 5950, loss[loss=0.2907, simple_loss=0.3476, pruned_loss=0.1169, over 6875.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3054, pruned_loss=0.07556, over 1614592.90 frames. ], batch size: 71, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:52,886 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.537e+02 3.124e+02 4.010e+02 1.248e+03, threshold=6.247e+02, percent-clipped=9.0 +2023-02-06 13:58:58,993 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0786, 2.6432, 3.0600, 1.3273, 3.1461, 1.7606, 1.5768, 2.0829], + device='cuda:0'), covar=tensor([0.0646, 0.0282, 0.0184, 0.0629, 0.0421, 0.0744, 0.0691, 0.0426], + device='cuda:0'), in_proj_covar=tensor([0.0410, 0.0346, 0.0296, 0.0406, 0.0334, 0.0496, 0.0372, 0.0376], + device='cuda:0'), out_proj_covar=tensor([1.1459e-04, 9.4281e-05, 8.0591e-05, 1.1148e-04, 9.2291e-05, 1.4726e-04, + 1.0437e-04, 1.0401e-04], device='cuda:0') +2023-02-06 13:59:21,495 INFO [train.py:901] (0/4) Epoch 13, batch 6000, loss[loss=0.1973, simple_loss=0.2747, pruned_loss=0.05998, over 7938.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3053, pruned_loss=0.07501, over 1614084.11 frames. ], batch size: 20, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:59:21,496 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 13:59:36,610 INFO [train.py:935] (0/4) Epoch 13, validation: loss=0.1836, simple_loss=0.2836, pruned_loss=0.04176, over 944034.00 frames. +2023-02-06 13:59:36,611 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 13:59:41,487 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 14:00:03,143 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5587, 1.3097, 4.6700, 1.7469, 4.0686, 3.8817, 4.2421, 4.1113], + device='cuda:0'), covar=tensor([0.0479, 0.4792, 0.0515, 0.3909, 0.1150, 0.0990, 0.0526, 0.0619], + device='cuda:0'), in_proj_covar=tensor([0.0512, 0.0579, 0.0603, 0.0552, 0.0623, 0.0529, 0.0523, 0.0582], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:00:10,254 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 14:00:11,023 INFO [train.py:901] (0/4) Epoch 13, batch 6050, loss[loss=0.1896, simple_loss=0.264, pruned_loss=0.05757, over 7704.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3038, pruned_loss=0.07435, over 1613629.93 frames. ], batch size: 18, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:18,297 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.480e+02 3.014e+02 3.999e+02 8.436e+02, threshold=6.027e+02, percent-clipped=4.0 +2023-02-06 14:00:45,649 INFO [train.py:901] (0/4) Epoch 13, batch 6100, loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05595, over 8358.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3045, pruned_loss=0.07466, over 1610807.96 frames. ], batch size: 24, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:52,616 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6409, 1.8951, 1.9613, 1.2381, 2.0765, 1.4537, 0.4630, 1.8435], + device='cuda:0'), covar=tensor([0.0380, 0.0246, 0.0196, 0.0347, 0.0241, 0.0678, 0.0583, 0.0180], + device='cuda:0'), in_proj_covar=tensor([0.0409, 0.0343, 0.0296, 0.0405, 0.0333, 0.0496, 0.0372, 0.0376], + device='cuda:0'), out_proj_covar=tensor([1.1437e-04, 9.3582e-05, 8.0512e-05, 1.1142e-04, 9.2098e-05, 1.4711e-04, + 1.0433e-04, 1.0395e-04], device='cuda:0') +2023-02-06 14:00:57,237 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0574, 1.7007, 3.4483, 1.6283, 2.3487, 3.8316, 3.8366, 3.2472], + device='cuda:0'), covar=tensor([0.1045, 0.1516, 0.0306, 0.1843, 0.0980, 0.0205, 0.0468, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0265, 0.0299, 0.0262, 0.0293, 0.0274, 0.0237, 0.0357, 0.0292], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:00:58,574 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103116.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:14,154 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 14:01:19,569 INFO [train.py:901] (0/4) Epoch 13, batch 6150, loss[loss=0.2183, simple_loss=0.2807, pruned_loss=0.07796, over 7432.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3048, pruned_loss=0.07521, over 1610591.95 frames. ], batch size: 17, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:01:21,548 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:26,197 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.434e+02 3.117e+02 4.172e+02 7.466e+02, threshold=6.235e+02, percent-clipped=2.0 +2023-02-06 14:01:55,077 INFO [train.py:901] (0/4) Epoch 13, batch 6200, loss[loss=0.2387, simple_loss=0.3221, pruned_loss=0.07767, over 8250.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3037, pruned_loss=0.07473, over 1611169.06 frames. ], batch size: 24, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:06,142 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:30,996 INFO [train.py:901] (0/4) Epoch 13, batch 6250, loss[loss=0.2084, simple_loss=0.2933, pruned_loss=0.06172, over 8200.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.0735, over 1611327.29 frames. ], batch size: 23, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:32,350 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103249.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:32,480 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0494, 4.0029, 2.4500, 2.9430, 3.0120, 2.2608, 2.9697, 3.1371], + device='cuda:0'), covar=tensor([0.1577, 0.0317, 0.1039, 0.0687, 0.0705, 0.1280, 0.0940, 0.0938], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0239, 0.0325, 0.0299, 0.0300, 0.0326, 0.0344, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:02:37,848 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.482e+02 2.950e+02 3.630e+02 6.819e+02, threshold=5.900e+02, percent-clipped=4.0 +2023-02-06 14:02:57,950 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5439, 2.3875, 1.6037, 2.1194, 2.0344, 1.3565, 1.9441, 2.1088], + device='cuda:0'), covar=tensor([0.1199, 0.0364, 0.1145, 0.0554, 0.0677, 0.1419, 0.0803, 0.0677], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0238, 0.0323, 0.0298, 0.0299, 0.0324, 0.0341, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:03:06,053 INFO [train.py:901] (0/4) Epoch 13, batch 6300, loss[loss=0.2589, simple_loss=0.3377, pruned_loss=0.09004, over 8619.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3015, pruned_loss=0.07338, over 1610376.35 frames. ], batch size: 39, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:27,927 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:03:40,550 INFO [train.py:901] (0/4) Epoch 13, batch 6350, loss[loss=0.2184, simple_loss=0.2891, pruned_loss=0.07385, over 8066.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3011, pruned_loss=0.07346, over 1608587.22 frames. ], batch size: 21, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:48,226 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.547e+02 3.093e+02 3.716e+02 8.603e+02, threshold=6.185e+02, percent-clipped=3.0 +2023-02-06 14:03:52,994 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:04:06,344 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103384.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:04:14,868 INFO [train.py:901] (0/4) Epoch 13, batch 6400, loss[loss=0.1825, simple_loss=0.2656, pruned_loss=0.0497, over 8087.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3011, pruned_loss=0.07385, over 1601719.39 frames. ], batch size: 21, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:49,441 INFO [train.py:901] (0/4) Epoch 13, batch 6450, loss[loss=0.2228, simple_loss=0.2887, pruned_loss=0.07843, over 7654.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3015, pruned_loss=0.07415, over 1600087.89 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:56,176 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.528e+02 3.186e+02 3.863e+02 6.544e+02, threshold=6.372e+02, percent-clipped=1.0 +2023-02-06 14:04:58,224 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:13,646 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2412, 1.2322, 1.4793, 1.2041, 0.7215, 1.2771, 1.1979, 1.1987], + device='cuda:0'), covar=tensor([0.0563, 0.1280, 0.1696, 0.1413, 0.0575, 0.1534, 0.0681, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0155, 0.0100, 0.0161, 0.0113, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 14:05:22,245 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:24,239 INFO [train.py:901] (0/4) Epoch 13, batch 6500, loss[loss=0.2246, simple_loss=0.3077, pruned_loss=0.07075, over 8498.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3045, pruned_loss=0.07595, over 1604480.84 frames. ], batch size: 28, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:05:58,666 INFO [train.py:901] (0/4) Epoch 13, batch 6550, loss[loss=0.2086, simple_loss=0.2754, pruned_loss=0.07093, over 7696.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3061, pruned_loss=0.07676, over 1612190.04 frames. ], batch size: 18, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:05,490 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.442e+02 3.089e+02 4.027e+02 9.292e+02, threshold=6.177e+02, percent-clipped=8.0 +2023-02-06 14:06:18,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103575.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:24,184 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 14:06:24,388 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:34,121 INFO [train.py:901] (0/4) Epoch 13, batch 6600, loss[loss=0.242, simple_loss=0.3168, pruned_loss=0.0836, over 8667.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3058, pruned_loss=0.07649, over 1612229.98 frames. ], batch size: 39, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:42,475 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:42,500 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:43,681 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 14:06:49,849 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:52,145 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 14:06:54,802 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6569, 2.2286, 3.5954, 2.5703, 3.1030, 2.4570, 2.1017, 1.7988], + device='cuda:0'), covar=tensor([0.4231, 0.4762, 0.1288, 0.2966, 0.2097, 0.2257, 0.1584, 0.4711], + device='cuda:0'), in_proj_covar=tensor([0.0897, 0.0889, 0.0743, 0.0868, 0.0948, 0.0824, 0.0708, 0.0776], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:07:07,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:09,065 INFO [train.py:901] (0/4) Epoch 13, batch 6650, loss[loss=0.2442, simple_loss=0.3156, pruned_loss=0.08642, over 8199.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3051, pruned_loss=0.07573, over 1612167.88 frames. ], batch size: 23, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:07:16,599 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.459e+02 2.800e+02 3.637e+02 6.016e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-06 14:07:43,993 INFO [train.py:901] (0/4) Epoch 13, batch 6700, loss[loss=0.2467, simple_loss=0.3103, pruned_loss=0.09156, over 7971.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3051, pruned_loss=0.07567, over 1614819.65 frames. ], batch size: 21, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:08:06,460 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:08:19,449 INFO [train.py:901] (0/4) Epoch 13, batch 6750, loss[loss=0.2443, simple_loss=0.3156, pruned_loss=0.08647, over 8578.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3056, pruned_loss=0.07577, over 1620210.39 frames. ], batch size: 34, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:26,133 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.551e+02 3.234e+02 3.983e+02 1.044e+03, threshold=6.469e+02, percent-clipped=6.0 +2023-02-06 14:08:26,990 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:44,309 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:53,553 INFO [train.py:901] (0/4) Epoch 13, batch 6800, loss[loss=0.2481, simple_loss=0.3194, pruned_loss=0.08838, over 8475.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3063, pruned_loss=0.07613, over 1618932.05 frames. ], batch size: 29, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:58,339 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 14:09:17,169 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103831.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:25,225 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103843.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:09:27,548 INFO [train.py:901] (0/4) Epoch 13, batch 6850, loss[loss=0.3025, simple_loss=0.3621, pruned_loss=0.1215, over 8330.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3052, pruned_loss=0.0758, over 1613894.47 frames. ], batch size: 49, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:09:33,726 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:34,155 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.670e+02 3.153e+02 3.957e+02 9.275e+02, threshold=6.306e+02, percent-clipped=2.0 +2023-02-06 14:09:40,240 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:44,802 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 14:09:57,529 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:00,222 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103894.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:02,115 INFO [train.py:901] (0/4) Epoch 13, batch 6900, loss[loss=0.2117, simple_loss=0.2995, pruned_loss=0.06192, over 8259.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3065, pruned_loss=0.07638, over 1615555.96 frames. ], batch size: 24, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:35,837 INFO [train.py:901] (0/4) Epoch 13, batch 6950, loss[loss=0.2115, simple_loss=0.2859, pruned_loss=0.06848, over 8126.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3055, pruned_loss=0.07564, over 1616943.84 frames. ], batch size: 22, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:43,032 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.539e+02 3.074e+02 3.917e+02 9.810e+02, threshold=6.147e+02, percent-clipped=9.0 +2023-02-06 14:10:53,183 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 14:11:00,840 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5426, 2.1475, 3.3845, 1.3458, 2.5519, 2.0142, 1.6973, 2.3735], + device='cuda:0'), covar=tensor([0.1768, 0.2226, 0.0748, 0.3957, 0.1609, 0.2802, 0.1886, 0.2157], + device='cuda:0'), in_proj_covar=tensor([0.0496, 0.0537, 0.0531, 0.0588, 0.0620, 0.0559, 0.0483, 0.0613], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:11:09,733 INFO [train.py:901] (0/4) Epoch 13, batch 7000, loss[loss=0.247, simple_loss=0.3009, pruned_loss=0.09654, over 7539.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3051, pruned_loss=0.07561, over 1616593.89 frames. ], batch size: 18, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:11,852 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-104000.pt +2023-02-06 14:11:18,193 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:19,230 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 14:11:23,537 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:35,614 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2699, 2.3906, 1.8705, 2.9698, 1.4456, 1.7328, 2.1174, 2.3592], + device='cuda:0'), covar=tensor([0.0618, 0.0798, 0.0966, 0.0337, 0.1180, 0.1297, 0.0895, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0208, 0.0252, 0.0210, 0.0215, 0.0252, 0.0256, 0.0215], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:11:43,224 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.34 vs. limit=5.0 +2023-02-06 14:11:44,816 INFO [train.py:901] (0/4) Epoch 13, batch 7050, loss[loss=0.2142, simple_loss=0.3024, pruned_loss=0.06304, over 8491.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3056, pruned_loss=0.07598, over 1611921.87 frames. ], batch size: 29, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:52,797 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.385e+02 2.879e+02 3.637e+02 6.044e+02, threshold=5.759e+02, percent-clipped=0.0 +2023-02-06 14:11:58,854 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:18,828 INFO [train.py:901] (0/4) Epoch 13, batch 7100, loss[loss=0.1861, simple_loss=0.267, pruned_loss=0.05257, over 8236.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3043, pruned_loss=0.0752, over 1610641.89 frames. ], batch size: 22, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:12:21,096 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104099.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:12:22,944 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:37,505 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104124.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:12:39,421 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:45,744 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4820, 1.4358, 1.7885, 1.4133, 1.1238, 1.8129, 0.1375, 1.0929], + device='cuda:0'), covar=tensor([0.2382, 0.1461, 0.0514, 0.1214, 0.3391, 0.0440, 0.2784, 0.1607], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0175, 0.0107, 0.0220, 0.0255, 0.0110, 0.0164, 0.0171], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:12:52,701 INFO [train.py:901] (0/4) Epoch 13, batch 7150, loss[loss=0.2478, simple_loss=0.3272, pruned_loss=0.0842, over 8515.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3044, pruned_loss=0.07553, over 1611121.62 frames. ], batch size: 39, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:00,082 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.541e+02 2.991e+02 4.071e+02 7.912e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 14:13:03,939 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 14:13:25,492 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 14:13:27,754 INFO [train.py:901] (0/4) Epoch 13, batch 7200, loss[loss=0.1947, simple_loss=0.2826, pruned_loss=0.05336, over 8481.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.304, pruned_loss=0.07506, over 1613144.76 frames. ], batch size: 29, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:42,129 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:55,682 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:58,494 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:14:01,687 INFO [train.py:901] (0/4) Epoch 13, batch 7250, loss[loss=0.2356, simple_loss=0.3155, pruned_loss=0.07781, over 8490.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3054, pruned_loss=0.07546, over 1616474.08 frames. ], batch size: 26, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:09,619 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.469e+02 3.063e+02 3.939e+02 8.277e+02, threshold=6.126e+02, percent-clipped=7.0 +2023-02-06 14:14:26,702 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8626, 1.8743, 2.5525, 1.9302, 1.4604, 2.5527, 0.5082, 1.6702], + device='cuda:0'), covar=tensor([0.2316, 0.1616, 0.0424, 0.1587, 0.3154, 0.0416, 0.2664, 0.1364], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0175, 0.0106, 0.0217, 0.0253, 0.0110, 0.0163, 0.0168], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:14:37,319 INFO [train.py:901] (0/4) Epoch 13, batch 7300, loss[loss=0.2317, simple_loss=0.3192, pruned_loss=0.07207, over 8499.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3038, pruned_loss=0.07469, over 1615223.68 frames. ], batch size: 26, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:53,581 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4274, 1.9523, 3.2347, 1.2937, 2.2872, 1.8433, 1.5706, 2.1980], + device='cuda:0'), covar=tensor([0.1765, 0.2219, 0.0750, 0.3950, 0.1653, 0.3001, 0.1879, 0.2217], + device='cuda:0'), in_proj_covar=tensor([0.0501, 0.0543, 0.0538, 0.0595, 0.0624, 0.0564, 0.0491, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:14:54,414 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 14:15:04,964 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9519, 1.5456, 6.0421, 2.1095, 5.4244, 5.0973, 5.5866, 5.4999], + device='cuda:0'), covar=tensor([0.0400, 0.4639, 0.0292, 0.3599, 0.0949, 0.0765, 0.0392, 0.0437], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0587, 0.0607, 0.0556, 0.0632, 0.0541, 0.0524, 0.0592], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:15:11,548 INFO [train.py:901] (0/4) Epoch 13, batch 7350, loss[loss=0.1972, simple_loss=0.2806, pruned_loss=0.05687, over 8242.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3035, pruned_loss=0.07461, over 1616485.05 frames. ], batch size: 22, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:14,978 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:15,793 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:19,101 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.439e+02 3.043e+02 3.823e+02 6.373e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 14:15:19,895 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:33,228 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 14:15:46,157 INFO [train.py:901] (0/4) Epoch 13, batch 7400, loss[loss=0.1749, simple_loss=0.2581, pruned_loss=0.0458, over 7699.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3042, pruned_loss=0.07484, over 1614372.74 frames. ], batch size: 18, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:50,822 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6061, 1.9028, 1.5551, 2.6869, 1.2960, 1.2964, 1.8938, 2.0705], + device='cuda:0'), covar=tensor([0.1023, 0.0871, 0.1358, 0.0426, 0.1031, 0.1552, 0.0821, 0.0784], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0206, 0.0250, 0.0208, 0.0212, 0.0249, 0.0253, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:15:53,254 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 14:15:56,726 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:21,106 INFO [train.py:901] (0/4) Epoch 13, batch 7450, loss[loss=0.2206, simple_loss=0.2931, pruned_loss=0.07402, over 8082.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3034, pruned_loss=0.07435, over 1612301.30 frames. ], batch size: 21, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:16:29,262 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 2.494e+02 3.000e+02 3.814e+02 1.100e+03, threshold=5.999e+02, percent-clipped=4.0 +2023-02-06 14:16:33,243 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 14:16:35,450 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:39,552 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:40,155 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:43,395 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1944, 3.1329, 2.8619, 1.6335, 2.8353, 2.8085, 2.8489, 2.6728], + device='cuda:0'), covar=tensor([0.1273, 0.0903, 0.1540, 0.4900, 0.1340, 0.1439, 0.1702, 0.1256], + device='cuda:0'), in_proj_covar=tensor([0.0472, 0.0386, 0.0396, 0.0488, 0.0388, 0.0391, 0.0380, 0.0338], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:16:55,895 INFO [train.py:901] (0/4) Epoch 13, batch 7500, loss[loss=0.2159, simple_loss=0.3075, pruned_loss=0.06213, over 8608.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3054, pruned_loss=0.07561, over 1614497.22 frames. ], batch size: 34, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:16:56,748 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:56,766 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:59,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4486, 2.6201, 1.6824, 2.0985, 2.1679, 1.5657, 1.9405, 2.0462], + device='cuda:0'), covar=tensor([0.1404, 0.0334, 0.1192, 0.0633, 0.0712, 0.1315, 0.0967, 0.0864], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0237, 0.0322, 0.0297, 0.0301, 0.0322, 0.0341, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:17:14,573 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:16,621 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:30,657 INFO [train.py:901] (0/4) Epoch 13, batch 7550, loss[loss=0.2932, simple_loss=0.3556, pruned_loss=0.1154, over 8547.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3056, pruned_loss=0.07567, over 1614665.95 frames. ], batch size: 31, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:17:37,881 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.482e+02 3.042e+02 4.105e+02 9.709e+02, threshold=6.085e+02, percent-clipped=7.0 +2023-02-06 14:17:58,611 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2074, 1.1986, 2.3243, 1.1725, 2.0380, 2.4775, 2.6300, 2.1007], + device='cuda:0'), covar=tensor([0.1207, 0.1410, 0.0483, 0.2117, 0.0779, 0.0381, 0.0604, 0.0788], + device='cuda:0'), in_proj_covar=tensor([0.0270, 0.0303, 0.0264, 0.0294, 0.0278, 0.0239, 0.0358, 0.0294], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:18:05,102 INFO [train.py:901] (0/4) Epoch 13, batch 7600, loss[loss=0.2189, simple_loss=0.2789, pruned_loss=0.0794, over 7701.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3064, pruned_loss=0.07668, over 1615225.88 frames. ], batch size: 18, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:13,228 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:30,775 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:38,448 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5271, 2.1391, 2.8791, 2.4740, 2.7492, 2.3558, 2.1531, 2.0146], + device='cuda:0'), covar=tensor([0.3226, 0.3614, 0.1261, 0.2489, 0.1738, 0.2194, 0.1344, 0.3280], + device='cuda:0'), in_proj_covar=tensor([0.0894, 0.0890, 0.0746, 0.0872, 0.0950, 0.0824, 0.0706, 0.0780], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:18:40,157 INFO [train.py:901] (0/4) Epoch 13, batch 7650, loss[loss=0.2541, simple_loss=0.312, pruned_loss=0.09811, over 8248.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3049, pruned_loss=0.07603, over 1610178.96 frames. ], batch size: 22, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:47,604 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 2.638e+02 3.280e+02 4.340e+02 1.130e+03, threshold=6.560e+02, percent-clipped=9.0 +2023-02-06 14:19:14,834 INFO [train.py:901] (0/4) Epoch 13, batch 7700, loss[loss=0.2287, simple_loss=0.311, pruned_loss=0.07321, over 8583.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3047, pruned_loss=0.07577, over 1613798.70 frames. ], batch size: 31, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:33,182 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:37,769 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 14:19:37,976 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:49,822 INFO [train.py:901] (0/4) Epoch 13, batch 7750, loss[loss=0.2391, simple_loss=0.3249, pruned_loss=0.07661, over 8113.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.306, pruned_loss=0.07676, over 1618322.21 frames. ], batch size: 23, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:50,603 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104748.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:55,991 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:57,814 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.530e+02 2.944e+02 3.392e+02 9.198e+02, threshold=5.888e+02, percent-clipped=3.0 +2023-02-06 14:19:58,021 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6740, 1.8876, 1.7140, 2.2922, 1.0237, 1.3999, 1.6380, 1.9343], + device='cuda:0'), covar=tensor([0.0853, 0.0868, 0.1020, 0.0491, 0.1207, 0.1508, 0.0920, 0.0789], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0208, 0.0254, 0.0211, 0.0213, 0.0253, 0.0256, 0.0215], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:20:04,701 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5971, 2.6402, 1.7864, 2.2992, 2.4265, 1.5824, 2.2252, 2.0779], + device='cuda:0'), covar=tensor([0.1502, 0.0365, 0.1159, 0.0668, 0.0633, 0.1500, 0.0833, 0.1032], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0234, 0.0317, 0.0293, 0.0298, 0.0320, 0.0337, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:20:14,012 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:24,829 INFO [train.py:901] (0/4) Epoch 13, batch 7800, loss[loss=0.236, simple_loss=0.3103, pruned_loss=0.08084, over 8493.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3055, pruned_loss=0.07622, over 1620483.35 frames. ], batch size: 39, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:20:31,895 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:57,829 INFO [train.py:901] (0/4) Epoch 13, batch 7850, loss[loss=0.2089, simple_loss=0.2718, pruned_loss=0.07304, over 7798.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3054, pruned_loss=0.07616, over 1620520.31 frames. ], batch size: 19, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:05,230 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.619e+02 3.074e+02 4.074e+02 1.012e+03, threshold=6.148e+02, percent-clipped=5.0 +2023-02-06 14:21:30,901 INFO [train.py:901] (0/4) Epoch 13, batch 7900, loss[loss=0.1698, simple_loss=0.2457, pruned_loss=0.04695, over 7703.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3058, pruned_loss=0.07627, over 1622007.81 frames. ], batch size: 18, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:44,399 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8475, 3.7035, 2.1884, 2.5486, 2.8825, 2.1133, 2.5298, 2.8496], + device='cuda:0'), covar=tensor([0.1509, 0.0304, 0.0968, 0.0811, 0.0623, 0.1227, 0.1107, 0.1053], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0236, 0.0318, 0.0295, 0.0298, 0.0322, 0.0339, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:22:04,230 INFO [train.py:901] (0/4) Epoch 13, batch 7950, loss[loss=0.2595, simple_loss=0.3276, pruned_loss=0.0957, over 8611.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3063, pruned_loss=0.07666, over 1620493.22 frames. ], batch size: 34, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:11,303 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.521e+02 3.027e+02 3.866e+02 6.555e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 14:22:37,771 INFO [train.py:901] (0/4) Epoch 13, batch 8000, loss[loss=0.2404, simple_loss=0.3265, pruned_loss=0.07714, over 8368.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3055, pruned_loss=0.07598, over 1620046.15 frames. ], batch size: 24, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:47,555 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 14:23:10,573 INFO [train.py:901] (0/4) Epoch 13, batch 8050, loss[loss=0.2024, simple_loss=0.2646, pruned_loss=0.07011, over 7547.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3036, pruned_loss=0.07537, over 1607945.37 frames. ], batch size: 18, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:18,074 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.417e+02 2.946e+02 3.621e+02 6.025e+02, threshold=5.892e+02, percent-clipped=0.0 +2023-02-06 14:23:24,941 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0690, 1.1459, 1.2244, 0.8499, 1.2869, 0.9858, 0.3539, 1.1960], + device='cuda:0'), covar=tensor([0.0314, 0.0253, 0.0178, 0.0300, 0.0232, 0.0521, 0.0517, 0.0180], + device='cuda:0'), in_proj_covar=tensor([0.0402, 0.0341, 0.0294, 0.0398, 0.0328, 0.0488, 0.0365, 0.0368], + device='cuda:0'), out_proj_covar=tensor([1.1230e-04, 9.2778e-05, 8.0021e-05, 1.0895e-04, 9.0265e-05, 1.4430e-04, + 1.0224e-04, 1.0168e-04], device='cuda:0') +2023-02-06 14:23:33,781 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-13.pt +2023-02-06 14:23:50,217 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 14:23:54,152 INFO [train.py:901] (0/4) Epoch 14, batch 0, loss[loss=0.2233, simple_loss=0.316, pruned_loss=0.06524, over 8321.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.316, pruned_loss=0.06524, over 8321.00 frames. ], batch size: 26, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:23:54,152 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 14:24:01,264 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5339, 1.8003, 2.5696, 1.3722, 2.1501, 1.8081, 1.6279, 2.0418], + device='cuda:0'), covar=tensor([0.1479, 0.2431, 0.0617, 0.3624, 0.1394, 0.2635, 0.1849, 0.1860], + device='cuda:0'), in_proj_covar=tensor([0.0499, 0.0545, 0.0539, 0.0600, 0.0623, 0.0566, 0.0493, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:24:05,196 INFO [train.py:935] (0/4) Epoch 14, validation: loss=0.184, simple_loss=0.2839, pruned_loss=0.04201, over 944034.00 frames. +2023-02-06 14:24:05,197 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 14:24:15,528 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8158, 1.3694, 5.8807, 2.1613, 5.2002, 4.9175, 5.4304, 5.2946], + device='cuda:0'), covar=tensor([0.0465, 0.5287, 0.0373, 0.3539, 0.0994, 0.0817, 0.0483, 0.0524], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0581, 0.0608, 0.0551, 0.0632, 0.0536, 0.0523, 0.0589], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:24:21,252 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 14:24:38,589 INFO [train.py:901] (0/4) Epoch 14, batch 50, loss[loss=0.2156, simple_loss=0.3017, pruned_loss=0.06472, over 8499.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3093, pruned_loss=0.07564, over 369645.55 frames. ], batch size: 26, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:24:54,776 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 14:24:58,156 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.684e+02 3.092e+02 3.835e+02 7.852e+02, threshold=6.183e+02, percent-clipped=3.0 +2023-02-06 14:25:14,420 INFO [train.py:901] (0/4) Epoch 14, batch 100, loss[loss=0.2438, simple_loss=0.3183, pruned_loss=0.0847, over 8579.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3087, pruned_loss=0.07556, over 651794.72 frames. ], batch size: 34, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:25:17,793 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 14:25:48,649 INFO [train.py:901] (0/4) Epoch 14, batch 150, loss[loss=0.1997, simple_loss=0.2821, pruned_loss=0.05866, over 8140.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3048, pruned_loss=0.07371, over 866176.60 frames. ], batch size: 22, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:08,299 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.384e+02 2.990e+02 3.742e+02 5.781e+02, threshold=5.980e+02, percent-clipped=0.0 +2023-02-06 14:26:23,065 INFO [train.py:901] (0/4) Epoch 14, batch 200, loss[loss=0.1984, simple_loss=0.2725, pruned_loss=0.06219, over 7785.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3037, pruned_loss=0.07338, over 1030602.58 frames. ], batch size: 19, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:46,926 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 14:26:58,947 INFO [train.py:901] (0/4) Epoch 14, batch 250, loss[loss=0.215, simple_loss=0.2971, pruned_loss=0.06646, over 8565.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3033, pruned_loss=0.07278, over 1162010.44 frames. ], batch size: 34, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:07,608 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 14:27:15,953 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 14:27:18,051 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.546e+02 3.157e+02 4.204e+02 9.163e+02, threshold=6.313e+02, percent-clipped=6.0 +2023-02-06 14:27:33,653 INFO [train.py:901] (0/4) Epoch 14, batch 300, loss[loss=0.2398, simple_loss=0.3201, pruned_loss=0.07976, over 8361.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3031, pruned_loss=0.07301, over 1264426.98 frames. ], batch size: 26, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:52,811 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:28:09,622 INFO [train.py:901] (0/4) Epoch 14, batch 350, loss[loss=0.2272, simple_loss=0.3152, pruned_loss=0.06962, over 8499.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3045, pruned_loss=0.0736, over 1341409.10 frames. ], batch size: 26, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:28:28,596 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 2.437e+02 2.818e+02 3.446e+02 5.751e+02, threshold=5.636e+02, percent-clipped=0.0 +2023-02-06 14:28:43,592 INFO [train.py:901] (0/4) Epoch 14, batch 400, loss[loss=0.2255, simple_loss=0.3122, pruned_loss=0.06935, over 8331.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3047, pruned_loss=0.07378, over 1401857.60 frames. ], batch size: 25, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:00,997 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:13,238 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:20,756 INFO [train.py:901] (0/4) Epoch 14, batch 450, loss[loss=0.1954, simple_loss=0.2692, pruned_loss=0.06078, over 7797.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07319, over 1446167.45 frames. ], batch size: 19, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:40,049 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.497e+02 2.804e+02 3.770e+02 6.336e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 14:29:43,640 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3841, 4.3781, 3.8554, 2.0294, 3.8131, 3.9930, 3.9115, 3.7546], + device='cuda:0'), covar=tensor([0.0754, 0.0583, 0.1268, 0.4930, 0.1025, 0.0938, 0.1315, 0.0827], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0390, 0.0399, 0.0498, 0.0390, 0.0393, 0.0379, 0.0341], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:29:55,211 INFO [train.py:901] (0/4) Epoch 14, batch 500, loss[loss=0.2078, simple_loss=0.2874, pruned_loss=0.06413, over 8593.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3043, pruned_loss=0.07395, over 1489992.24 frames. ], batch size: 31, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:09,395 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8034, 3.7219, 3.3505, 1.7568, 3.2957, 3.4410, 3.4123, 3.1460], + device='cuda:0'), covar=tensor([0.0899, 0.0753, 0.1191, 0.4966, 0.1060, 0.1155, 0.1301, 0.1116], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0390, 0.0398, 0.0498, 0.0392, 0.0393, 0.0379, 0.0342], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:30:29,387 INFO [train.py:901] (0/4) Epoch 14, batch 550, loss[loss=0.243, simple_loss=0.326, pruned_loss=0.08, over 8243.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3039, pruned_loss=0.07383, over 1521319.12 frames. ], batch size: 22, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:50,301 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.442e+02 2.933e+02 3.700e+02 8.163e+02, threshold=5.867e+02, percent-clipped=3.0 +2023-02-06 14:30:56,611 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6268, 1.4678, 4.8132, 1.6894, 4.2283, 4.0353, 4.3776, 4.2224], + device='cuda:0'), covar=tensor([0.0584, 0.4529, 0.0449, 0.4007, 0.1137, 0.0868, 0.0576, 0.0614], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0584, 0.0614, 0.0556, 0.0634, 0.0539, 0.0530, 0.0595], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:31:05,192 INFO [train.py:901] (0/4) Epoch 14, batch 600, loss[loss=0.2364, simple_loss=0.3126, pruned_loss=0.0801, over 8340.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3027, pruned_loss=0.07324, over 1541337.63 frames. ], batch size: 26, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:18,466 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 14:31:39,816 INFO [train.py:901] (0/4) Epoch 14, batch 650, loss[loss=0.2228, simple_loss=0.2937, pruned_loss=0.07599, over 7925.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3018, pruned_loss=0.07268, over 1558963.63 frames. ], batch size: 20, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:54,394 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:01,339 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.402e+02 3.000e+02 3.711e+02 7.109e+02, threshold=6.000e+02, percent-clipped=4.0 +2023-02-06 14:32:15,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8078, 1.4296, 3.9380, 1.3037, 3.4275, 3.2753, 3.5928, 3.4594], + device='cuda:0'), covar=tensor([0.0604, 0.4148, 0.0605, 0.3907, 0.1266, 0.0979, 0.0621, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0516, 0.0582, 0.0613, 0.0553, 0.0631, 0.0536, 0.0526, 0.0594], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:32:17,353 INFO [train.py:901] (0/4) Epoch 14, batch 700, loss[loss=0.1959, simple_loss=0.2682, pruned_loss=0.06179, over 7781.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3012, pruned_loss=0.07206, over 1572945.19 frames. ], batch size: 19, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:23,118 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-06 14:32:34,069 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-06 14:32:37,864 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:42,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105817.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:51,389 INFO [train.py:901] (0/4) Epoch 14, batch 750, loss[loss=0.2234, simple_loss=0.298, pruned_loss=0.07441, over 7973.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3008, pruned_loss=0.07205, over 1580209.68 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:33:03,852 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:06,427 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 14:33:11,294 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.459e+02 2.898e+02 3.725e+02 7.154e+02, threshold=5.796e+02, percent-clipped=4.0 +2023-02-06 14:33:15,479 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:16,075 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 14:33:16,240 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:16,864 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3743, 1.1473, 2.4277, 0.9727, 2.1252, 2.0449, 2.2604, 2.1881], + device='cuda:0'), covar=tensor([0.0688, 0.2963, 0.0973, 0.3206, 0.1122, 0.0920, 0.0653, 0.0703], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0587, 0.0618, 0.0559, 0.0637, 0.0542, 0.0531, 0.0599], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:33:18,522 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.58 vs. limit=5.0 +2023-02-06 14:33:27,180 INFO [train.py:901] (0/4) Epoch 14, batch 800, loss[loss=0.2213, simple_loss=0.294, pruned_loss=0.07436, over 7968.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3014, pruned_loss=0.07198, over 1588877.81 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:02,175 INFO [train.py:901] (0/4) Epoch 14, batch 850, loss[loss=0.2673, simple_loss=0.3398, pruned_loss=0.09734, over 8190.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3013, pruned_loss=0.0719, over 1597802.52 frames. ], batch size: 23, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:20,963 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.477e+02 2.961e+02 4.061e+02 6.411e+02, threshold=5.921e+02, percent-clipped=4.0 +2023-02-06 14:34:24,581 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:36,565 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:37,081 INFO [train.py:901] (0/4) Epoch 14, batch 900, loss[loss=0.2207, simple_loss=0.3052, pruned_loss=0.06815, over 8455.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3016, pruned_loss=0.07243, over 1601826.29 frames. ], batch size: 25, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:34:52,431 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-106000.pt +2023-02-06 14:35:14,899 INFO [train.py:901] (0/4) Epoch 14, batch 950, loss[loss=0.2113, simple_loss=0.289, pruned_loss=0.06682, over 8243.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3003, pruned_loss=0.07211, over 1604953.75 frames. ], batch size: 22, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:15,114 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6082, 1.8964, 2.2982, 1.2406, 2.2900, 1.3976, 0.6940, 1.8018], + device='cuda:0'), covar=tensor([0.0535, 0.0281, 0.0187, 0.0498, 0.0332, 0.0740, 0.0667, 0.0270], + device='cuda:0'), in_proj_covar=tensor([0.0409, 0.0345, 0.0298, 0.0404, 0.0334, 0.0493, 0.0367, 0.0376], + device='cuda:0'), out_proj_covar=tensor([1.1402e-04, 9.3679e-05, 8.1018e-05, 1.1073e-04, 9.1670e-05, 1.4546e-04, + 1.0263e-04, 1.0365e-04], device='cuda:0') +2023-02-06 14:35:33,976 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.617e+02 3.202e+02 4.119e+02 6.844e+02, threshold=6.403e+02, percent-clipped=3.0 +2023-02-06 14:35:38,932 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 14:35:49,305 INFO [train.py:901] (0/4) Epoch 14, batch 1000, loss[loss=0.2422, simple_loss=0.3228, pruned_loss=0.08086, over 8542.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3, pruned_loss=0.07203, over 1608334.01 frames. ], batch size: 49, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:51,334 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 14:36:00,596 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:14,280 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 14:36:20,062 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:26,949 INFO [train.py:901] (0/4) Epoch 14, batch 1050, loss[loss=0.2346, simple_loss=0.3033, pruned_loss=0.08296, over 8126.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3004, pruned_loss=0.07221, over 1610072.26 frames. ], batch size: 22, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:26,960 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 14:36:37,948 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:43,486 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:46,248 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.417e+02 2.951e+02 3.593e+02 9.096e+02, threshold=5.903e+02, percent-clipped=2.0 +2023-02-06 14:36:48,429 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:54,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4920, 1.4928, 4.7095, 1.5600, 4.1204, 3.9188, 4.2307, 4.0964], + device='cuda:0'), covar=tensor([0.0522, 0.4414, 0.0488, 0.3937, 0.1056, 0.0927, 0.0499, 0.0615], + device='cuda:0'), in_proj_covar=tensor([0.0521, 0.0583, 0.0615, 0.0553, 0.0631, 0.0540, 0.0526, 0.0595], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:37:01,618 INFO [train.py:901] (0/4) Epoch 14, batch 1100, loss[loss=0.2584, simple_loss=0.3295, pruned_loss=0.0937, over 7115.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3013, pruned_loss=0.07224, over 1613398.49 frames. ], batch size: 71, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:29,706 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:35,887 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 14:37:38,709 INFO [train.py:901] (0/4) Epoch 14, batch 1150, loss[loss=0.2315, simple_loss=0.2944, pruned_loss=0.08427, over 7645.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3028, pruned_loss=0.07314, over 1608695.45 frames. ], batch size: 19, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:42,399 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:49,098 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:58,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.475e+02 3.133e+02 3.919e+02 6.906e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 14:38:00,017 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:03,150 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 14:38:06,180 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106269.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:10,867 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:13,353 INFO [train.py:901] (0/4) Epoch 14, batch 1200, loss[loss=0.2505, simple_loss=0.3228, pruned_loss=0.08914, over 8456.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3025, pruned_loss=0.07291, over 1614146.37 frames. ], batch size: 27, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:38:17,531 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:47,936 INFO [train.py:901] (0/4) Epoch 14, batch 1250, loss[loss=0.2185, simple_loss=0.2898, pruned_loss=0.07361, over 8445.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3027, pruned_loss=0.0731, over 1614844.54 frames. ], batch size: 27, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:39:00,658 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.54 vs. limit=2.0 +2023-02-06 14:39:05,925 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:39:08,472 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.539e+02 3.303e+02 4.386e+02 1.450e+03, threshold=6.607e+02, percent-clipped=4.0 +2023-02-06 14:39:24,632 INFO [train.py:901] (0/4) Epoch 14, batch 1300, loss[loss=0.2005, simple_loss=0.2827, pruned_loss=0.05912, over 7807.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3026, pruned_loss=0.07296, over 1614819.07 frames. ], batch size: 20, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:39:52,937 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5286, 2.6992, 1.9011, 2.2422, 2.2750, 1.5397, 2.2063, 2.2524], + device='cuda:0'), covar=tensor([0.1389, 0.0339, 0.1075, 0.0689, 0.0706, 0.1510, 0.0901, 0.0904], + device='cuda:0'), in_proj_covar=tensor([0.0339, 0.0232, 0.0318, 0.0295, 0.0298, 0.0319, 0.0336, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:39:58,995 INFO [train.py:901] (0/4) Epoch 14, batch 1350, loss[loss=0.2381, simple_loss=0.3234, pruned_loss=0.07636, over 8513.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.301, pruned_loss=0.07205, over 1611029.77 frames. ], batch size: 28, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:40:05,431 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:19,203 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.550e+02 3.060e+02 3.665e+02 8.767e+02, threshold=6.121e+02, percent-clipped=1.0 +2023-02-06 14:40:29,770 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:35,128 INFO [train.py:901] (0/4) Epoch 14, batch 1400, loss[loss=0.1926, simple_loss=0.2811, pruned_loss=0.05202, over 7975.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2996, pruned_loss=0.0713, over 1611327.82 frames. ], batch size: 21, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:40:50,290 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 14:41:07,391 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:10,536 INFO [train.py:901] (0/4) Epoch 14, batch 1450, loss[loss=0.2777, simple_loss=0.3461, pruned_loss=0.1047, over 8501.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2997, pruned_loss=0.07155, over 1611756.52 frames. ], batch size: 26, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:11,253 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 14:41:12,177 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:24,665 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106550.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:27,426 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,524 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,960 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.546e+02 3.123e+02 4.151e+02 8.254e+02, threshold=6.246e+02, percent-clipped=6.0 +2023-02-06 14:41:41,376 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5897, 2.6641, 1.7649, 2.1512, 2.2279, 1.4874, 2.0501, 2.1306], + device='cuda:0'), covar=tensor([0.1460, 0.0351, 0.1201, 0.0619, 0.0758, 0.1571, 0.1043, 0.0969], + device='cuda:0'), in_proj_covar=tensor([0.0343, 0.0233, 0.0320, 0.0296, 0.0300, 0.0322, 0.0338, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:41:47,579 INFO [train.py:901] (0/4) Epoch 14, batch 1500, loss[loss=0.2693, simple_loss=0.3475, pruned_loss=0.09548, over 8341.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3009, pruned_loss=0.07222, over 1615957.19 frames. ], batch size: 26, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,567 INFO [train.py:901] (0/4) Epoch 14, batch 1550, loss[loss=0.1858, simple_loss=0.2684, pruned_loss=0.05154, over 7246.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3016, pruned_loss=0.07259, over 1615143.75 frames. ], batch size: 16, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,639 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:42:41,331 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.593e+02 3.196e+02 4.114e+02 8.054e+02, threshold=6.391e+02, percent-clipped=4.0 +2023-02-06 14:42:56,718 INFO [train.py:901] (0/4) Epoch 14, batch 1600, loss[loss=0.227, simple_loss=0.2964, pruned_loss=0.07883, over 6810.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3017, pruned_loss=0.07323, over 1612381.72 frames. ], batch size: 15, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:43:10,337 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:17,988 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 14:43:32,334 INFO [train.py:901] (0/4) Epoch 14, batch 1650, loss[loss=0.3238, simple_loss=0.3768, pruned_loss=0.1354, over 8430.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3019, pruned_loss=0.07333, over 1614908.90 frames. ], batch size: 49, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:43:42,571 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:51,906 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.493e+02 3.038e+02 4.078e+02 1.080e+03, threshold=6.076e+02, percent-clipped=3.0 +2023-02-06 14:44:06,428 INFO [train.py:901] (0/4) Epoch 14, batch 1700, loss[loss=0.2463, simple_loss=0.3199, pruned_loss=0.08632, over 8432.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3022, pruned_loss=0.07277, over 1618406.68 frames. ], batch size: 49, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:28,275 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:31,576 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:33,571 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:37,203 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0232, 1.4020, 1.6558, 1.3840, 0.9985, 1.4441, 1.8388, 1.6325], + device='cuda:0'), covar=tensor([0.0498, 0.1306, 0.1689, 0.1439, 0.0616, 0.1488, 0.0685, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0158, 0.0101, 0.0162, 0.0114, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 14:44:43,449 INFO [train.py:901] (0/4) Epoch 14, batch 1750, loss[loss=0.2358, simple_loss=0.3142, pruned_loss=0.07874, over 8460.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.303, pruned_loss=0.07303, over 1622489.83 frames. ], batch size: 27, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:47,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:45:04,130 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.358e+02 2.865e+02 3.554e+02 7.426e+02, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 14:45:18,445 INFO [train.py:901] (0/4) Epoch 14, batch 1800, loss[loss=0.1851, simple_loss=0.2653, pruned_loss=0.05243, over 7654.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3022, pruned_loss=0.07287, over 1620411.05 frames. ], batch size: 19, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:45:44,916 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 14:45:54,589 INFO [train.py:901] (0/4) Epoch 14, batch 1850, loss[loss=0.2219, simple_loss=0.2878, pruned_loss=0.07799, over 7806.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3027, pruned_loss=0.07333, over 1615574.58 frames. ], batch size: 19, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:45:55,508 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:00,465 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 14:46:16,026 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.570e+02 3.068e+02 3.847e+02 1.325e+03, threshold=6.136e+02, percent-clipped=4.0 +2023-02-06 14:46:17,533 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2451, 1.7970, 1.2972, 2.6157, 1.1914, 1.0894, 1.9085, 1.9316], + device='cuda:0'), covar=tensor([0.1743, 0.1256, 0.2327, 0.0517, 0.1425, 0.2313, 0.0954, 0.0928], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0206, 0.0252, 0.0211, 0.0214, 0.0252, 0.0255, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:46:23,896 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-06 14:46:29,528 INFO [train.py:901] (0/4) Epoch 14, batch 1900, loss[loss=0.2395, simple_loss=0.3128, pruned_loss=0.08313, over 7969.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3028, pruned_loss=0.07304, over 1613121.05 frames. ], batch size: 21, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:46:43,884 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:47,084 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 14:47:00,490 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 14:47:01,253 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:03,727 INFO [train.py:901] (0/4) Epoch 14, batch 1950, loss[loss=0.238, simple_loss=0.3201, pruned_loss=0.07798, over 8518.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3033, pruned_loss=0.0738, over 1616725.14 frames. ], batch size: 29, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:47:09,964 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2134, 2.7786, 3.6954, 2.2780, 1.8314, 3.7559, 0.8140, 2.2279], + device='cuda:0'), covar=tensor([0.1597, 0.1753, 0.0305, 0.2034, 0.3642, 0.0249, 0.2846, 0.1855], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0177, 0.0107, 0.0218, 0.0260, 0.0112, 0.0163, 0.0175], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:47:19,874 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 14:47:26,064 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.386e+02 2.840e+02 3.483e+02 6.138e+02, threshold=5.681e+02, percent-clipped=1.0 +2023-02-06 14:47:31,995 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:39,098 INFO [train.py:901] (0/4) Epoch 14, batch 2000, loss[loss=0.1906, simple_loss=0.274, pruned_loss=0.05359, over 7808.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3011, pruned_loss=0.0724, over 1613692.88 frames. ], batch size: 19, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:47:48,646 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:12,595 INFO [train.py:901] (0/4) Epoch 14, batch 2050, loss[loss=0.1906, simple_loss=0.2665, pruned_loss=0.05731, over 7801.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3, pruned_loss=0.0714, over 1616265.22 frames. ], batch size: 20, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:32,533 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107158.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:48:34,367 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.411e+02 3.055e+02 3.713e+02 7.642e+02, threshold=6.109e+02, percent-clipped=4.0 +2023-02-06 14:48:42,106 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107170.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:49,407 INFO [train.py:901] (0/4) Epoch 14, batch 2100, loss[loss=0.3074, simple_loss=0.3746, pruned_loss=0.1201, over 7042.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3013, pruned_loss=0.07225, over 1615978.82 frames. ], batch size: 73, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:49,589 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1767, 1.4906, 1.7250, 1.3470, 0.8253, 1.5147, 1.8131, 1.4666], + device='cuda:0'), covar=tensor([0.0465, 0.1255, 0.1707, 0.1421, 0.0623, 0.1473, 0.0645, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0157, 0.0101, 0.0161, 0.0113, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 14:48:54,361 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:11,184 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:23,203 INFO [train.py:901] (0/4) Epoch 14, batch 2150, loss[loss=0.2567, simple_loss=0.3257, pruned_loss=0.0939, over 8498.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3022, pruned_loss=0.07318, over 1616120.53 frames. ], batch size: 49, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:49:44,430 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.615e+02 3.041e+02 3.823e+02 8.460e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 14:49:58,901 INFO [train.py:901] (0/4) Epoch 14, batch 2200, loss[loss=0.2355, simple_loss=0.3167, pruned_loss=0.07715, over 8509.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.303, pruned_loss=0.07408, over 1612324.63 frames. ], batch size: 26, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:09,035 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 14:50:22,066 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 14:50:28,934 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 14:50:34,528 INFO [train.py:901] (0/4) Epoch 14, batch 2250, loss[loss=0.2362, simple_loss=0.3062, pruned_loss=0.0831, over 8145.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3025, pruned_loss=0.07423, over 1610840.25 frames. ], batch size: 22, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:54,552 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.599e+02 3.319e+02 4.071e+02 1.027e+03, threshold=6.637e+02, percent-clipped=7.0 +2023-02-06 14:51:08,890 INFO [train.py:901] (0/4) Epoch 14, batch 2300, loss[loss=0.2412, simple_loss=0.3139, pruned_loss=0.08424, over 8192.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3028, pruned_loss=0.07425, over 1616234.90 frames. ], batch size: 23, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:51:11,021 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0847, 2.3256, 1.8574, 2.7803, 1.2693, 1.5830, 1.9798, 2.2728], + device='cuda:0'), covar=tensor([0.0659, 0.0739, 0.0995, 0.0343, 0.1212, 0.1400, 0.0858, 0.0799], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0205, 0.0248, 0.0211, 0.0213, 0.0252, 0.0253, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:51:44,734 INFO [train.py:901] (0/4) Epoch 14, batch 2350, loss[loss=0.2108, simple_loss=0.2747, pruned_loss=0.07345, over 7423.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3026, pruned_loss=0.07431, over 1614348.12 frames. ], batch size: 17, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:04,937 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.358e+02 2.889e+02 3.449e+02 7.134e+02, threshold=5.779e+02, percent-clipped=1.0 +2023-02-06 14:52:18,380 INFO [train.py:901] (0/4) Epoch 14, batch 2400, loss[loss=0.2172, simple_loss=0.3034, pruned_loss=0.06553, over 8292.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3025, pruned_loss=0.07409, over 1617671.37 frames. ], batch size: 23, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:34,427 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107502.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:52:43,461 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:53,756 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:55,004 INFO [train.py:901] (0/4) Epoch 14, batch 2450, loss[loss=0.209, simple_loss=0.2691, pruned_loss=0.07444, over 7799.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3013, pruned_loss=0.07316, over 1620250.74 frames. ], batch size: 19, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:08,642 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7007, 2.3248, 3.1810, 1.9528, 1.6626, 3.1623, 0.6725, 1.9221], + device='cuda:0'), covar=tensor([0.2081, 0.1342, 0.0328, 0.2156, 0.3647, 0.0406, 0.2956, 0.1938], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0177, 0.0108, 0.0219, 0.0261, 0.0111, 0.0162, 0.0175], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:53:16,529 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.477e+02 3.089e+02 4.011e+02 1.178e+03, threshold=6.179e+02, percent-clipped=8.0 +2023-02-06 14:53:29,784 INFO [train.py:901] (0/4) Epoch 14, batch 2500, loss[loss=0.262, simple_loss=0.338, pruned_loss=0.09301, over 8025.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3015, pruned_loss=0.07292, over 1612874.44 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:30,963 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 14:53:32,367 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 14:53:48,274 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8079, 1.8118, 2.3789, 1.5296, 1.1891, 2.4287, 0.3643, 1.4184], + device='cuda:0'), covar=tensor([0.2256, 0.1748, 0.0443, 0.2236, 0.3991, 0.0478, 0.2786, 0.2070], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0177, 0.0108, 0.0218, 0.0261, 0.0112, 0.0162, 0.0174], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:53:55,573 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107617.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:54:03,464 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:03,973 INFO [train.py:901] (0/4) Epoch 14, batch 2550, loss[loss=0.2003, simple_loss=0.2853, pruned_loss=0.05771, over 8018.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3022, pruned_loss=0.07346, over 1612459.75 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:13,571 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1944, 2.4076, 2.0307, 2.9082, 1.3697, 1.6880, 2.1124, 2.4839], + device='cuda:0'), covar=tensor([0.0638, 0.0775, 0.0858, 0.0355, 0.1185, 0.1317, 0.0808, 0.0753], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0205, 0.0251, 0.0212, 0.0216, 0.0253, 0.0255, 0.0216], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:54:26,276 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.642e+02 3.253e+02 4.518e+02 1.030e+03, threshold=6.506e+02, percent-clipped=5.0 +2023-02-06 14:54:37,764 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:39,570 INFO [train.py:901] (0/4) Epoch 14, batch 2600, loss[loss=0.2045, simple_loss=0.2834, pruned_loss=0.06283, over 7923.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3014, pruned_loss=0.07283, over 1615781.22 frames. ], batch size: 20, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:39,762 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3890, 1.4619, 1.4352, 1.7782, 0.6429, 1.2545, 1.2192, 1.4858], + device='cuda:0'), covar=tensor([0.0831, 0.0802, 0.1013, 0.0596, 0.1335, 0.1490, 0.0868, 0.0759], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0206, 0.0252, 0.0213, 0.0217, 0.0253, 0.0255, 0.0216], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 14:54:58,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8547, 5.9688, 5.0224, 2.5803, 5.1220, 5.5792, 5.4101, 5.2844], + device='cuda:0'), covar=tensor([0.0448, 0.0358, 0.0839, 0.4020, 0.0614, 0.0512, 0.0999, 0.0473], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0401, 0.0408, 0.0501, 0.0401, 0.0402, 0.0388, 0.0349], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:55:00,367 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1767, 4.1444, 3.7618, 1.9789, 3.6677, 3.7119, 3.7739, 3.5299], + device='cuda:0'), covar=tensor([0.0773, 0.0571, 0.1054, 0.4510, 0.0846, 0.0996, 0.1261, 0.0809], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0401, 0.0408, 0.0501, 0.0401, 0.0403, 0.0388, 0.0349], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:55:12,836 INFO [train.py:901] (0/4) Epoch 14, batch 2650, loss[loss=0.2442, simple_loss=0.3126, pruned_loss=0.08795, over 8453.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3012, pruned_loss=0.07263, over 1612780.23 frames. ], batch size: 27, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:30,855 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:55:34,865 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.443e+02 2.980e+02 3.881e+02 9.981e+02, threshold=5.960e+02, percent-clipped=6.0 +2023-02-06 14:55:49,934 INFO [train.py:901] (0/4) Epoch 14, batch 2700, loss[loss=0.3001, simple_loss=0.3497, pruned_loss=0.1252, over 6801.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3035, pruned_loss=0.07411, over 1614082.77 frames. ], batch size: 72, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:54,376 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5985, 2.2562, 4.0296, 1.3228, 2.8172, 2.1032, 1.7667, 2.5843], + device='cuda:0'), covar=tensor([0.2061, 0.2619, 0.0681, 0.4613, 0.1895, 0.3298, 0.2258, 0.2778], + device='cuda:0'), in_proj_covar=tensor([0.0492, 0.0541, 0.0529, 0.0585, 0.0613, 0.0553, 0.0484, 0.0609], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:55:58,673 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 14:56:02,662 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 14:56:23,702 INFO [train.py:901] (0/4) Epoch 14, batch 2750, loss[loss=0.2172, simple_loss=0.2847, pruned_loss=0.07486, over 7700.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3026, pruned_loss=0.0733, over 1616043.40 frames. ], batch size: 18, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:44,700 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.404e+02 2.918e+02 3.592e+02 1.217e+03, threshold=5.837e+02, percent-clipped=4.0 +2023-02-06 14:56:53,308 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107872.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:56:54,136 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107873.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:56:59,459 INFO [train.py:901] (0/4) Epoch 14, batch 2800, loss[loss=0.2202, simple_loss=0.3029, pruned_loss=0.06874, over 8231.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3021, pruned_loss=0.07267, over 1616693.65 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:57:03,876 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:12,682 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107898.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:57:20,759 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:33,816 INFO [train.py:901] (0/4) Epoch 14, batch 2850, loss[loss=0.2603, simple_loss=0.343, pruned_loss=0.08882, over 8357.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3038, pruned_loss=0.07405, over 1616301.86 frames. ], batch size: 24, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:57:36,750 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.8648, 1.6631, 1.3458, 1.5114, 1.3242, 1.1576, 1.1735, 1.2734], + device='cuda:0'), covar=tensor([0.1107, 0.0432, 0.1146, 0.0528, 0.0703, 0.1338, 0.0902, 0.0760], + device='cuda:0'), in_proj_covar=tensor([0.0341, 0.0229, 0.0315, 0.0294, 0.0294, 0.0318, 0.0337, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 14:57:54,109 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.478e+02 3.087e+02 3.919e+02 8.173e+02, threshold=6.173e+02, percent-clipped=5.0 +2023-02-06 14:58:08,144 INFO [train.py:901] (0/4) Epoch 14, batch 2900, loss[loss=0.258, simple_loss=0.3206, pruned_loss=0.09775, over 8241.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3032, pruned_loss=0.07389, over 1616243.97 frames. ], batch size: 22, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:12,777 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:23,496 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-108000.pt +2023-02-06 14:58:27,870 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 14:58:38,625 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:40,705 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1715, 1.0098, 1.2189, 1.0754, 0.8542, 1.2660, 0.0758, 0.8812], + device='cuda:0'), covar=tensor([0.2112, 0.1823, 0.0583, 0.1071, 0.3513, 0.0567, 0.2844, 0.1572], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0178, 0.0108, 0.0219, 0.0261, 0.0112, 0.0163, 0.0175], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 14:58:43,495 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6954, 2.2792, 4.3524, 1.4936, 2.8687, 2.3141, 1.7377, 2.6792], + device='cuda:0'), covar=tensor([0.1711, 0.2316, 0.0670, 0.3976, 0.1722, 0.2782, 0.1935, 0.2474], + device='cuda:0'), in_proj_covar=tensor([0.0497, 0.0546, 0.0538, 0.0590, 0.0620, 0.0557, 0.0489, 0.0616], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:58:44,582 INFO [train.py:901] (0/4) Epoch 14, batch 2950, loss[loss=0.2674, simple_loss=0.33, pruned_loss=0.1024, over 8447.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3041, pruned_loss=0.0744, over 1619669.98 frames. ], batch size: 27, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:48,143 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6900, 1.9494, 2.1101, 1.2515, 2.1831, 1.5424, 0.5487, 1.8342], + device='cuda:0'), covar=tensor([0.0409, 0.0256, 0.0206, 0.0391, 0.0286, 0.0667, 0.0620, 0.0199], + device='cuda:0'), in_proj_covar=tensor([0.0416, 0.0354, 0.0309, 0.0410, 0.0341, 0.0500, 0.0376, 0.0382], + device='cuda:0'), out_proj_covar=tensor([1.1604e-04, 9.6232e-05, 8.3977e-05, 1.1160e-04, 9.3310e-05, 1.4720e-04, + 1.0517e-04, 1.0520e-04], device='cuda:0') +2023-02-06 14:58:50,906 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3212, 1.9784, 2.8413, 2.2611, 2.7709, 2.1570, 1.8747, 1.5451], + device='cuda:0'), covar=tensor([0.4483, 0.4336, 0.1423, 0.3077, 0.2112, 0.2534, 0.1692, 0.4432], + device='cuda:0'), in_proj_covar=tensor([0.0903, 0.0900, 0.0743, 0.0870, 0.0954, 0.0830, 0.0712, 0.0781], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 14:59:04,838 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.696e+02 3.199e+02 4.019e+02 8.231e+02, threshold=6.398e+02, percent-clipped=3.0 +2023-02-06 14:59:17,004 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7937, 2.3428, 4.5522, 1.5168, 3.4491, 2.4483, 1.7588, 3.2336], + device='cuda:0'), covar=tensor([0.1614, 0.2379, 0.0667, 0.3777, 0.1233, 0.2612, 0.1898, 0.1970], + device='cuda:0'), in_proj_covar=tensor([0.0497, 0.0547, 0.0539, 0.0591, 0.0618, 0.0558, 0.0489, 0.0617], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 14:59:18,173 INFO [train.py:901] (0/4) Epoch 14, batch 3000, loss[loss=0.2361, simple_loss=0.3147, pruned_loss=0.07871, over 8126.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3043, pruned_loss=0.07469, over 1617005.41 frames. ], batch size: 22, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:18,174 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 14:59:30,512 INFO [train.py:935] (0/4) Epoch 14, validation: loss=0.1827, simple_loss=0.283, pruned_loss=0.04121, over 944034.00 frames. +2023-02-06 14:59:30,513 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 14:59:43,702 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:05,758 INFO [train.py:901] (0/4) Epoch 14, batch 3050, loss[loss=0.2637, simple_loss=0.3401, pruned_loss=0.09361, over 8134.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3042, pruned_loss=0.07448, over 1615442.68 frames. ], batch size: 22, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:10,680 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:17,261 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1493, 1.9047, 2.6770, 2.1710, 2.5075, 2.0739, 1.6981, 1.2382], + device='cuda:0'), covar=tensor([0.4554, 0.4033, 0.1347, 0.2749, 0.2078, 0.2649, 0.1816, 0.4460], + device='cuda:0'), in_proj_covar=tensor([0.0903, 0.0902, 0.0745, 0.0870, 0.0959, 0.0832, 0.0713, 0.0780], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:00:28,083 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.670e+02 3.118e+02 3.835e+02 7.160e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 15:00:41,680 INFO [train.py:901] (0/4) Epoch 14, batch 3100, loss[loss=0.1879, simple_loss=0.2739, pruned_loss=0.0509, over 8139.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3026, pruned_loss=0.07342, over 1617023.79 frames. ], batch size: 22, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:04,696 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:15,953 INFO [train.py:901] (0/4) Epoch 14, batch 3150, loss[loss=0.2104, simple_loss=0.293, pruned_loss=0.06396, over 8622.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.301, pruned_loss=0.07285, over 1613443.28 frames. ], batch size: 34, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:24,651 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:37,158 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.570e+02 3.163e+02 4.155e+02 7.848e+02, threshold=6.326e+02, percent-clipped=5.0 +2023-02-06 15:01:42,776 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:51,516 INFO [train.py:901] (0/4) Epoch 14, batch 3200, loss[loss=0.2819, simple_loss=0.3504, pruned_loss=0.1067, over 8198.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.301, pruned_loss=0.07329, over 1611050.67 frames. ], batch size: 23, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:02:13,033 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 15:02:25,781 INFO [train.py:901] (0/4) Epoch 14, batch 3250, loss[loss=0.2359, simple_loss=0.3151, pruned_loss=0.07836, over 8505.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3008, pruned_loss=0.07317, over 1613271.76 frames. ], batch size: 26, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:02:35,719 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:02:47,021 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.638e+02 3.239e+02 4.086e+02 1.012e+03, threshold=6.478e+02, percent-clipped=4.0 +2023-02-06 15:02:47,296 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6461, 1.6815, 2.0848, 1.4344, 1.0986, 2.0923, 0.2049, 1.2838], + device='cuda:0'), covar=tensor([0.1879, 0.1558, 0.0414, 0.1625, 0.3818, 0.0436, 0.2792, 0.1528], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0176, 0.0107, 0.0218, 0.0259, 0.0110, 0.0162, 0.0172], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 15:02:51,341 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8668, 1.5375, 3.1295, 1.3133, 2.1949, 3.3711, 3.4607, 2.8850], + device='cuda:0'), covar=tensor([0.1061, 0.1475, 0.0370, 0.2020, 0.0993, 0.0246, 0.0478, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0273, 0.0303, 0.0265, 0.0294, 0.0280, 0.0242, 0.0363, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:02:54,347 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-06 15:03:02,203 INFO [train.py:901] (0/4) Epoch 14, batch 3300, loss[loss=0.2542, simple_loss=0.3137, pruned_loss=0.09737, over 7286.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3027, pruned_loss=0.07366, over 1614464.06 frames. ], batch size: 72, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:10,829 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-06 15:03:11,254 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:27,913 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:36,476 INFO [train.py:901] (0/4) Epoch 14, batch 3350, loss[loss=0.1983, simple_loss=0.2745, pruned_loss=0.06108, over 7439.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3027, pruned_loss=0.07381, over 1614033.74 frames. ], batch size: 17, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:49,806 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:57,197 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.656e+02 3.299e+02 4.467e+02 8.781e+02, threshold=6.597e+02, percent-clipped=5.0 +2023-02-06 15:04:04,261 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:10,805 INFO [train.py:901] (0/4) Epoch 14, batch 3400, loss[loss=0.2108, simple_loss=0.2822, pruned_loss=0.06967, over 7542.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3026, pruned_loss=0.0738, over 1612117.64 frames. ], batch size: 18, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:04:22,160 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:46,705 INFO [train.py:901] (0/4) Epoch 14, batch 3450, loss[loss=0.1719, simple_loss=0.2465, pruned_loss=0.04865, over 7698.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3023, pruned_loss=0.07362, over 1612889.30 frames. ], batch size: 18, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:07,907 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.481e+02 3.055e+02 3.627e+02 7.933e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:05:21,992 INFO [train.py:901] (0/4) Epoch 14, batch 3500, loss[loss=0.1886, simple_loss=0.2745, pruned_loss=0.05135, over 8236.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3021, pruned_loss=0.07356, over 1611363.86 frames. ], batch size: 22, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:29,153 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 15:05:31,985 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:05:42,109 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9412, 2.1023, 1.8027, 2.4391, 1.2393, 1.4312, 1.6973, 2.1217], + device='cuda:0'), covar=tensor([0.0674, 0.0799, 0.0910, 0.0521, 0.1213, 0.1533, 0.1020, 0.0718], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0206, 0.0250, 0.0212, 0.0214, 0.0249, 0.0255, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 15:05:57,721 INFO [train.py:901] (0/4) Epoch 14, batch 3550, loss[loss=0.2201, simple_loss=0.2943, pruned_loss=0.07292, over 7644.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3011, pruned_loss=0.07283, over 1609277.27 frames. ], batch size: 19, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:17,925 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.417e+02 3.151e+02 4.175e+02 8.210e+02, threshold=6.301e+02, percent-clipped=3.0 +2023-02-06 15:06:31,412 INFO [train.py:901] (0/4) Epoch 14, batch 3600, loss[loss=0.2098, simple_loss=0.2992, pruned_loss=0.06018, over 8367.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3017, pruned_loss=0.07264, over 1612015.34 frames. ], batch size: 24, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:36,122 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108687.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:07,040 INFO [train.py:901] (0/4) Epoch 14, batch 3650, loss[loss=0.1925, simple_loss=0.2818, pruned_loss=0.05159, over 8100.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3019, pruned_loss=0.07267, over 1611336.46 frames. ], batch size: 23, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:27,807 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 2.655e+02 3.191e+02 3.880e+02 8.243e+02, threshold=6.382e+02, percent-clipped=2.0 +2023-02-06 15:07:30,610 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:07:41,535 INFO [train.py:901] (0/4) Epoch 14, batch 3700, loss[loss=0.2175, simple_loss=0.2993, pruned_loss=0.06784, over 8448.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3022, pruned_loss=0.07304, over 1615744.61 frames. ], batch size: 48, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:50,766 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:56,399 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:01,060 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:15,862 INFO [train.py:901] (0/4) Epoch 14, batch 3750, loss[loss=0.2211, simple_loss=0.3017, pruned_loss=0.07024, over 8779.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3018, pruned_loss=0.07275, over 1617273.90 frames. ], batch size: 30, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:08:37,497 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.414e+02 2.846e+02 3.664e+02 8.039e+02, threshold=5.692e+02, percent-clipped=5.0 +2023-02-06 15:08:51,956 INFO [train.py:901] (0/4) Epoch 14, batch 3800, loss[loss=0.2113, simple_loss=0.2931, pruned_loss=0.06479, over 8762.00 frames. ], tot_loss[loss=0.224, simple_loss=0.302, pruned_loss=0.07296, over 1615548.33 frames. ], batch size: 30, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:08:56,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8700, 2.3559, 3.6945, 2.9354, 3.3551, 2.6124, 2.2897, 1.9044], + device='cuda:0'), covar=tensor([0.4119, 0.4703, 0.1343, 0.2912, 0.1982, 0.2288, 0.1606, 0.4846], + device='cuda:0'), in_proj_covar=tensor([0.0899, 0.0901, 0.0746, 0.0873, 0.0954, 0.0831, 0.0711, 0.0780], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:09:12,268 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:26,893 INFO [train.py:901] (0/4) Epoch 14, batch 3850, loss[loss=0.239, simple_loss=0.3142, pruned_loss=0.08192, over 8620.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.07344, over 1615865.97 frames. ], batch size: 31, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:09:33,935 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:35,948 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 15:09:49,068 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.574e+02 3.020e+02 4.517e+02 9.725e+02, threshold=6.039e+02, percent-clipped=15.0 +2023-02-06 15:10:04,295 INFO [train.py:901] (0/4) Epoch 14, batch 3900, loss[loss=0.2372, simple_loss=0.307, pruned_loss=0.08368, over 8624.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3015, pruned_loss=0.07253, over 1613945.95 frames. ], batch size: 34, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:39,039 INFO [train.py:901] (0/4) Epoch 14, batch 3950, loss[loss=0.2319, simple_loss=0.3151, pruned_loss=0.07431, over 8362.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3027, pruned_loss=0.07392, over 1616316.70 frames. ], batch size: 24, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:56,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:10:56,961 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109055.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:10:59,085 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:00,259 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.458e+02 2.966e+02 3.777e+02 8.079e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-06 15:11:14,761 INFO [train.py:901] (0/4) Epoch 14, batch 4000, loss[loss=0.239, simple_loss=0.3149, pruned_loss=0.08159, over 7969.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3024, pruned_loss=0.0741, over 1617488.17 frames. ], batch size: 21, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:11:17,688 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:50,557 INFO [train.py:901] (0/4) Epoch 14, batch 4050, loss[loss=0.1912, simple_loss=0.2681, pruned_loss=0.05719, over 7539.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3024, pruned_loss=0.0734, over 1624266.30 frames. ], batch size: 18, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:06,813 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:11,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.362e+02 2.684e+02 3.543e+02 7.215e+02, threshold=5.369e+02, percent-clipped=4.0 +2023-02-06 15:12:16,047 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:19,022 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-02-06 15:12:26,291 INFO [train.py:901] (0/4) Epoch 14, batch 4100, loss[loss=0.1722, simple_loss=0.2537, pruned_loss=0.04537, over 7204.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3019, pruned_loss=0.07312, over 1618783.86 frames. ], batch size: 16, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:34,006 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:49,793 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:02,503 INFO [train.py:901] (0/4) Epoch 14, batch 4150, loss[loss=0.2143, simple_loss=0.3047, pruned_loss=0.0619, over 8101.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3025, pruned_loss=0.07345, over 1617427.58 frames. ], batch size: 23, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:06,406 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 15:13:12,246 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6627, 2.0393, 5.8013, 2.4281, 5.1904, 4.8561, 5.3466, 5.2218], + device='cuda:0'), covar=tensor([0.0447, 0.4001, 0.0327, 0.3044, 0.0879, 0.0761, 0.0420, 0.0445], + device='cuda:0'), in_proj_covar=tensor([0.0531, 0.0600, 0.0619, 0.0561, 0.0634, 0.0545, 0.0535, 0.0597], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:13:23,982 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.677e+02 3.078e+02 3.893e+02 8.547e+02, threshold=6.157e+02, percent-clipped=10.0 +2023-02-06 15:13:28,957 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:35,724 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 15:13:37,146 INFO [train.py:901] (0/4) Epoch 14, batch 4200, loss[loss=0.1982, simple_loss=0.2808, pruned_loss=0.05782, over 8241.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.302, pruned_loss=0.07341, over 1615410.82 frames. ], batch size: 22, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:59,695 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,006 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,582 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 15:14:14,557 INFO [train.py:901] (0/4) Epoch 14, batch 4250, loss[loss=0.1877, simple_loss=0.2758, pruned_loss=0.04983, over 7652.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3013, pruned_loss=0.07316, over 1612154.95 frames. ], batch size: 19, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:14:18,177 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:18,832 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:35,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.488e+02 3.016e+02 3.845e+02 8.299e+02, threshold=6.033e+02, percent-clipped=4.0 +2023-02-06 15:14:48,676 INFO [train.py:901] (0/4) Epoch 14, batch 4300, loss[loss=0.2592, simple_loss=0.3253, pruned_loss=0.09652, over 7821.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3012, pruned_loss=0.07288, over 1611029.03 frames. ], batch size: 20, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:01,898 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109399.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:15:24,501 INFO [train.py:901] (0/4) Epoch 14, batch 4350, loss[loss=0.2337, simple_loss=0.3056, pruned_loss=0.08087, over 7927.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3027, pruned_loss=0.07415, over 1613142.21 frames. ], batch size: 20, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:28,952 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9200, 2.3310, 3.6260, 2.6311, 3.1960, 2.5933, 2.2067, 1.6855], + device='cuda:0'), covar=tensor([0.3932, 0.4488, 0.1237, 0.2929, 0.2150, 0.2600, 0.1783, 0.4863], + device='cuda:0'), in_proj_covar=tensor([0.0901, 0.0898, 0.0743, 0.0877, 0.0952, 0.0829, 0.0714, 0.0782], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:15:34,091 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 15:15:47,275 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.678e+02 3.270e+02 4.253e+02 1.326e+03, threshold=6.540e+02, percent-clipped=8.0 +2023-02-06 15:16:00,554 INFO [train.py:901] (0/4) Epoch 14, batch 4400, loss[loss=0.2328, simple_loss=0.3158, pruned_loss=0.0749, over 8288.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3017, pruned_loss=0.07354, over 1611610.61 frames. ], batch size: 23, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:16:15,770 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 15:16:23,486 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6617, 4.7037, 4.1596, 2.1126, 4.1170, 4.3408, 4.3049, 4.0959], + device='cuda:0'), covar=tensor([0.0655, 0.0495, 0.0987, 0.4781, 0.0794, 0.0944, 0.1224, 0.0730], + device='cuda:0'), in_proj_covar=tensor([0.0473, 0.0390, 0.0396, 0.0490, 0.0391, 0.0394, 0.0380, 0.0341], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:16:24,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109514.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:16:31,903 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:35,904 INFO [train.py:901] (0/4) Epoch 14, batch 4450, loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05809, over 8194.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3022, pruned_loss=0.07309, over 1614668.08 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:16:49,302 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:55,376 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:58,636 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.456e+02 2.864e+02 3.608e+02 1.087e+03, threshold=5.728e+02, percent-clipped=4.0 +2023-02-06 15:17:01,022 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5531, 2.7223, 1.7827, 2.1077, 2.2473, 1.6327, 1.9161, 2.1058], + device='cuda:0'), covar=tensor([0.1346, 0.0312, 0.1010, 0.0646, 0.0584, 0.1196, 0.0980, 0.0851], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0231, 0.0319, 0.0294, 0.0296, 0.0324, 0.0342, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:17:11,141 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 15:17:12,497 INFO [train.py:901] (0/4) Epoch 14, batch 4500, loss[loss=0.237, simple_loss=0.3154, pruned_loss=0.07936, over 8644.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3016, pruned_loss=0.07284, over 1615292.32 frames. ], batch size: 34, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:17:24,297 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:17:47,009 INFO [train.py:901] (0/4) Epoch 14, batch 4550, loss[loss=0.1896, simple_loss=0.269, pruned_loss=0.05513, over 7804.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3007, pruned_loss=0.07256, over 1611877.28 frames. ], batch size: 20, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:05,731 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:09,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.640e+02 3.232e+02 4.162e+02 9.021e+02, threshold=6.464e+02, percent-clipped=8.0 +2023-02-06 15:18:16,727 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:21,325 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6285, 3.5714, 3.2293, 2.1675, 3.1571, 3.2607, 3.3105, 3.0253], + device='cuda:0'), covar=tensor([0.0864, 0.0720, 0.0987, 0.3299, 0.0920, 0.1037, 0.1249, 0.0929], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0393, 0.0398, 0.0491, 0.0390, 0.0394, 0.0382, 0.0343], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:18:21,367 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:22,082 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0149, 1.6286, 1.2962, 1.5294, 1.3210, 1.1606, 1.2483, 1.2273], + device='cuda:0'), covar=tensor([0.1056, 0.0429, 0.1226, 0.0529, 0.0760, 0.1387, 0.0966, 0.0785], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0233, 0.0321, 0.0297, 0.0298, 0.0327, 0.0346, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:18:23,249 INFO [train.py:901] (0/4) Epoch 14, batch 4600, loss[loss=0.2233, simple_loss=0.3084, pruned_loss=0.06907, over 8505.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3003, pruned_loss=0.07242, over 1610326.69 frames. ], batch size: 26, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:23,320 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:48,909 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.64 vs. limit=5.0 +2023-02-06 15:18:56,938 INFO [train.py:901] (0/4) Epoch 14, batch 4650, loss[loss=0.2779, simple_loss=0.321, pruned_loss=0.1174, over 7540.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3016, pruned_loss=0.07318, over 1616121.27 frames. ], batch size: 18, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:18,705 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.556e+02 3.032e+02 3.907e+02 9.020e+02, threshold=6.065e+02, percent-clipped=4.0 +2023-02-06 15:19:24,807 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109770.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:19:25,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:31,303 INFO [train.py:901] (0/4) Epoch 14, batch 4700, loss[loss=0.2703, simple_loss=0.3474, pruned_loss=0.09661, over 8491.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3037, pruned_loss=0.07419, over 1618644.72 frames. ], batch size: 26, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:42,821 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:42,844 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109795.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:19:55,433 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8688, 1.7196, 1.8335, 1.7257, 1.2469, 1.6444, 2.2955, 2.1019], + device='cuda:0'), covar=tensor([0.0414, 0.1163, 0.1593, 0.1243, 0.0547, 0.1401, 0.0579, 0.0546], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0154, 0.0191, 0.0158, 0.0102, 0.0163, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 15:20:06,516 INFO [train.py:901] (0/4) Epoch 14, batch 4750, loss[loss=0.2029, simple_loss=0.2933, pruned_loss=0.05622, over 8289.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3038, pruned_loss=0.07445, over 1616918.70 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:10,487 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 15:20:12,435 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 15:20:26,961 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.643e+02 3.166e+02 4.371e+02 1.104e+03, threshold=6.332e+02, percent-clipped=5.0 +2023-02-06 15:20:39,503 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.16 vs. limit=5.0 +2023-02-06 15:20:40,298 INFO [train.py:901] (0/4) Epoch 14, batch 4800, loss[loss=0.2653, simple_loss=0.3396, pruned_loss=0.09556, over 8190.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3048, pruned_loss=0.07492, over 1622412.98 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:21:03,196 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 15:21:14,258 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:16,044 INFO [train.py:901] (0/4) Epoch 14, batch 4850, loss[loss=0.2121, simple_loss=0.3039, pruned_loss=0.06014, over 8831.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3036, pruned_loss=0.07383, over 1623962.23 frames. ], batch size: 50, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:21:23,514 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109941.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:31,157 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:37,040 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.854e+02 3.344e+02 7.947e+02, threshold=5.708e+02, percent-clipped=2.0 +2023-02-06 15:21:49,881 INFO [train.py:901] (0/4) Epoch 14, batch 4900, loss[loss=0.168, simple_loss=0.2455, pruned_loss=0.04527, over 7239.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.303, pruned_loss=0.07385, over 1620540.17 frames. ], batch size: 16, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:03,875 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 15:22:04,102 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-110000.pt +2023-02-06 15:22:19,432 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:24,309 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:26,032 INFO [train.py:901] (0/4) Epoch 14, batch 4950, loss[loss=0.222, simple_loss=0.2975, pruned_loss=0.07324, over 8291.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3019, pruned_loss=0.07274, over 1622256.03 frames. ], batch size: 23, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:30,945 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:41,721 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:42,426 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:45,150 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:48,336 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.585e+02 3.180e+02 4.032e+02 7.448e+02, threshold=6.360e+02, percent-clipped=3.0 +2023-02-06 15:22:49,149 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:58,263 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:00,771 INFO [train.py:901] (0/4) Epoch 14, batch 5000, loss[loss=0.2017, simple_loss=0.2869, pruned_loss=0.05819, over 8090.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3005, pruned_loss=0.07213, over 1616698.68 frames. ], batch size: 21, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:04,307 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4299, 1.9714, 3.4164, 1.1890, 2.4352, 1.8833, 1.4695, 2.3096], + device='cuda:0'), covar=tensor([0.1989, 0.2377, 0.0805, 0.4403, 0.1888, 0.3191, 0.2227, 0.2556], + device='cuda:0'), in_proj_covar=tensor([0.0502, 0.0550, 0.0540, 0.0597, 0.0622, 0.0565, 0.0488, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:23:10,393 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5336, 1.8752, 2.7801, 1.3941, 1.9427, 1.8853, 1.5975, 1.8999], + device='cuda:0'), covar=tensor([0.1752, 0.2159, 0.0683, 0.3870, 0.1697, 0.2880, 0.1912, 0.2015], + device='cuda:0'), in_proj_covar=tensor([0.0503, 0.0551, 0.0541, 0.0598, 0.0623, 0.0566, 0.0489, 0.0620], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:23:33,487 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:34,637 INFO [train.py:901] (0/4) Epoch 14, batch 5050, loss[loss=0.233, simple_loss=0.3056, pruned_loss=0.08023, over 7441.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3022, pruned_loss=0.0735, over 1615185.34 frames. ], batch size: 17, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:36,724 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:38,744 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:43,179 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 15:23:47,303 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7349, 1.2263, 4.8841, 1.6765, 4.3251, 4.1316, 4.4023, 4.2382], + device='cuda:0'), covar=tensor([0.0457, 0.4516, 0.0367, 0.3478, 0.0977, 0.0788, 0.0509, 0.0539], + device='cuda:0'), in_proj_covar=tensor([0.0532, 0.0598, 0.0620, 0.0561, 0.0631, 0.0542, 0.0537, 0.0596], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:23:57,272 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.889e+02 3.601e+02 4.263e+02 9.587e+02, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 15:24:09,945 INFO [train.py:901] (0/4) Epoch 14, batch 5100, loss[loss=0.2404, simple_loss=0.3281, pruned_loss=0.0763, over 8742.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3017, pruned_loss=0.07292, over 1619521.77 frames. ], batch size: 30, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:24:42,797 INFO [train.py:901] (0/4) Epoch 14, batch 5150, loss[loss=0.1724, simple_loss=0.2572, pruned_loss=0.04379, over 7705.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3014, pruned_loss=0.07324, over 1609341.17 frames. ], batch size: 18, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:05,056 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.455e+02 3.012e+02 3.817e+02 9.599e+02, threshold=6.024e+02, percent-clipped=2.0 +2023-02-06 15:25:19,327 INFO [train.py:901] (0/4) Epoch 14, batch 5200, loss[loss=0.2207, simple_loss=0.2914, pruned_loss=0.07498, over 8101.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3019, pruned_loss=0.07386, over 1610289.77 frames. ], batch size: 21, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:33,781 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:39,485 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 15:25:41,071 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:47,137 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:53,186 INFO [train.py:901] (0/4) Epoch 14, batch 5250, loss[loss=0.2098, simple_loss=0.2904, pruned_loss=0.06453, over 7647.00 frames. ], tot_loss[loss=0.226, simple_loss=0.303, pruned_loss=0.07449, over 1609190.62 frames. ], batch size: 19, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:25:57,539 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:58,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:08,757 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7704, 1.4374, 3.9578, 1.4138, 3.4120, 3.3065, 3.5472, 3.4203], + device='cuda:0'), covar=tensor([0.0723, 0.4133, 0.0596, 0.3812, 0.1296, 0.1065, 0.0718, 0.0760], + device='cuda:0'), in_proj_covar=tensor([0.0539, 0.0602, 0.0628, 0.0567, 0.0637, 0.0545, 0.0544, 0.0601], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:26:10,243 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8220, 2.2429, 3.5715, 2.5954, 3.2188, 2.5638, 2.2158, 1.9634], + device='cuda:0'), covar=tensor([0.4241, 0.4818, 0.1390, 0.3004, 0.2201, 0.2547, 0.1804, 0.4695], + device='cuda:0'), in_proj_covar=tensor([0.0898, 0.0900, 0.0744, 0.0877, 0.0952, 0.0830, 0.0714, 0.0782], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:26:15,522 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.532e+02 3.204e+02 3.879e+02 8.466e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 15:26:28,855 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:29,506 INFO [train.py:901] (0/4) Epoch 14, batch 5300, loss[loss=0.2181, simple_loss=0.2869, pruned_loss=0.07462, over 7787.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3029, pruned_loss=0.07417, over 1615243.49 frames. ], batch size: 19, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:26:39,505 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:48,908 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:56,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:04,993 INFO [train.py:901] (0/4) Epoch 14, batch 5350, loss[loss=0.2111, simple_loss=0.3024, pruned_loss=0.05993, over 8360.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3019, pruned_loss=0.07318, over 1617346.44 frames. ], batch size: 24, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:25,496 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.452e+02 3.047e+02 3.791e+02 6.566e+02, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 15:27:32,788 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:36,872 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:38,759 INFO [train.py:901] (0/4) Epoch 14, batch 5400, loss[loss=0.2376, simple_loss=0.3098, pruned_loss=0.08268, over 7967.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3029, pruned_loss=0.07355, over 1619266.31 frames. ], batch size: 21, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:45,642 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3401, 2.4497, 1.6640, 2.0783, 2.1392, 1.4448, 1.8753, 1.9222], + device='cuda:0'), covar=tensor([0.1420, 0.0365, 0.1180, 0.0550, 0.0624, 0.1453, 0.1035, 0.0825], + device='cuda:0'), in_proj_covar=tensor([0.0347, 0.0232, 0.0319, 0.0294, 0.0295, 0.0325, 0.0339, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:27:48,322 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:08,442 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:14,347 INFO [train.py:901] (0/4) Epoch 14, batch 5450, loss[loss=0.2324, simple_loss=0.3248, pruned_loss=0.06999, over 8598.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07324, over 1620475.35 frames. ], batch size: 31, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:30,468 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 15:28:34,926 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.429e+02 2.846e+02 3.589e+02 7.640e+02, threshold=5.692e+02, percent-clipped=1.0 +2023-02-06 15:28:46,381 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6621, 2.9349, 3.4744, 2.1798, 3.4183, 2.4957, 2.0116, 2.6138], + device='cuda:0'), covar=tensor([0.0644, 0.0297, 0.0133, 0.0511, 0.0396, 0.0547, 0.0679, 0.0356], + device='cuda:0'), in_proj_covar=tensor([0.0411, 0.0353, 0.0301, 0.0404, 0.0338, 0.0495, 0.0369, 0.0371], + device='cuda:0'), out_proj_covar=tensor([1.1404e-04, 9.5438e-05, 8.1448e-05, 1.0984e-04, 9.2225e-05, 1.4523e-04, + 1.0269e-04, 1.0161e-04], device='cuda:0') +2023-02-06 15:28:47,533 INFO [train.py:901] (0/4) Epoch 14, batch 5500, loss[loss=0.235, simple_loss=0.3147, pruned_loss=0.07763, over 8132.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3026, pruned_loss=0.07323, over 1612942.34 frames. ], batch size: 22, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:52,315 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:55,800 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,308 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,834 INFO [train.py:901] (0/4) Epoch 14, batch 5550, loss[loss=0.1782, simple_loss=0.2525, pruned_loss=0.05196, over 7429.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3014, pruned_loss=0.07344, over 1606435.43 frames. ], batch size: 17, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:33,996 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:44,461 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.421e+02 3.120e+02 3.692e+02 1.093e+03, threshold=6.240e+02, percent-clipped=9.0 +2023-02-06 15:29:44,674 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7457, 2.2281, 2.3522, 2.2043, 1.4373, 2.3404, 2.5404, 2.3146], + device='cuda:0'), covar=tensor([0.0419, 0.0841, 0.1233, 0.0991, 0.0619, 0.0989, 0.0542, 0.0414], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0101, 0.0161, 0.0114, 0.0137], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 15:29:47,074 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110665.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:56,909 INFO [train.py:901] (0/4) Epoch 14, batch 5600, loss[loss=0.2108, simple_loss=0.2818, pruned_loss=0.06996, over 6797.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.0735, over 1611564.52 frames. ], batch size: 15, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:56,978 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:30,942 INFO [train.py:901] (0/4) Epoch 14, batch 5650, loss[loss=0.216, simple_loss=0.2951, pruned_loss=0.06842, over 8295.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3013, pruned_loss=0.07298, over 1606906.23 frames. ], batch size: 23, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:30:33,073 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 15:30:33,295 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6745, 1.6896, 2.2211, 1.4655, 1.1393, 2.2565, 0.2857, 1.3223], + device='cuda:0'), covar=tensor([0.2216, 0.1644, 0.0383, 0.1915, 0.3863, 0.0336, 0.2998, 0.1714], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0173, 0.0106, 0.0215, 0.0258, 0.0110, 0.0159, 0.0170], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 15:30:47,233 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:49,875 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,018 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,499 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.502e+02 3.092e+02 3.638e+02 5.778e+02, threshold=6.185e+02, percent-clipped=0.0 +2023-02-06 15:31:00,908 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8903, 2.5854, 3.5613, 1.8569, 1.9261, 3.5828, 0.5622, 1.9560], + device='cuda:0'), covar=tensor([0.2508, 0.1389, 0.0328, 0.2758, 0.3905, 0.0361, 0.3499, 0.2133], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0174, 0.0106, 0.0215, 0.0259, 0.0111, 0.0160, 0.0171], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 15:31:04,317 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:05,718 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:07,495 INFO [train.py:901] (0/4) Epoch 14, batch 5700, loss[loss=0.2412, simple_loss=0.3021, pruned_loss=0.09018, over 7966.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2994, pruned_loss=0.07184, over 1605416.67 frames. ], batch size: 21, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:07,674 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:17,910 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:22,885 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:40,955 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 15:31:41,631 INFO [train.py:901] (0/4) Epoch 14, batch 5750, loss[loss=0.2527, simple_loss=0.3262, pruned_loss=0.08965, over 8332.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.299, pruned_loss=0.07189, over 1605187.89 frames. ], batch size: 25, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:51,523 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:55,000 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:04,409 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.447e+02 3.019e+02 3.853e+02 7.521e+02, threshold=6.038e+02, percent-clipped=3.0 +2023-02-06 15:32:10,160 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:14,063 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:18,685 INFO [train.py:901] (0/4) Epoch 14, batch 5800, loss[loss=0.2253, simple_loss=0.3039, pruned_loss=0.07332, over 8478.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2995, pruned_loss=0.07233, over 1606041.25 frames. ], batch size: 49, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:32:22,957 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:53,261 INFO [train.py:901] (0/4) Epoch 14, batch 5850, loss[loss=0.2041, simple_loss=0.2815, pruned_loss=0.06339, over 7973.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2989, pruned_loss=0.07185, over 1605924.71 frames. ], batch size: 21, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:14,140 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.498e+02 3.098e+02 4.112e+02 1.106e+03, threshold=6.195e+02, percent-clipped=10.0 +2023-02-06 15:33:22,877 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:23,754 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3645, 1.9886, 2.8007, 2.3030, 2.7110, 2.2603, 1.9369, 1.4611], + device='cuda:0'), covar=tensor([0.4558, 0.4305, 0.1454, 0.2812, 0.1984, 0.2544, 0.1715, 0.4580], + device='cuda:0'), in_proj_covar=tensor([0.0898, 0.0903, 0.0746, 0.0876, 0.0951, 0.0828, 0.0714, 0.0784], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:33:25,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:28,980 INFO [train.py:901] (0/4) Epoch 14, batch 5900, loss[loss=0.2376, simple_loss=0.3206, pruned_loss=0.07732, over 8333.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2996, pruned_loss=0.07202, over 1609071.14 frames. ], batch size: 25, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:54,403 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:03,740 INFO [train.py:901] (0/4) Epoch 14, batch 5950, loss[loss=0.2327, simple_loss=0.3164, pruned_loss=0.07453, over 8106.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2995, pruned_loss=0.07202, over 1607759.00 frames. ], batch size: 23, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:07,779 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111036.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:11,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:17,712 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:24,226 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.648e+02 3.047e+02 4.016e+02 7.772e+02, threshold=6.093e+02, percent-clipped=5.0 +2023-02-06 15:34:24,447 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:34,746 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:38,011 INFO [train.py:901] (0/4) Epoch 14, batch 6000, loss[loss=0.2429, simple_loss=0.319, pruned_loss=0.08342, over 8447.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3017, pruned_loss=0.07331, over 1612373.05 frames. ], batch size: 29, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:38,012 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 15:34:50,549 INFO [train.py:935] (0/4) Epoch 14, validation: loss=0.1818, simple_loss=0.2816, pruned_loss=0.04094, over 944034.00 frames. +2023-02-06 15:34:50,550 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 15:34:56,293 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:03,693 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:27,232 INFO [train.py:901] (0/4) Epoch 14, batch 6050, loss[loss=0.2398, simple_loss=0.3095, pruned_loss=0.08509, over 7935.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3005, pruned_loss=0.07273, over 1609149.34 frames. ], batch size: 20, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:35:49,432 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.432e+02 2.876e+02 3.526e+02 5.542e+02, threshold=5.752e+02, percent-clipped=0.0 +2023-02-06 15:36:01,588 INFO [train.py:901] (0/4) Epoch 14, batch 6100, loss[loss=0.2413, simple_loss=0.3186, pruned_loss=0.08204, over 8347.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2988, pruned_loss=0.07167, over 1610343.70 frames. ], batch size: 26, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:03,139 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6262, 1.5622, 2.8319, 1.2928, 2.1722, 3.0425, 3.1382, 2.5838], + device='cuda:0'), covar=tensor([0.1116, 0.1417, 0.0387, 0.2021, 0.0828, 0.0293, 0.0570, 0.0628], + device='cuda:0'), in_proj_covar=tensor([0.0272, 0.0305, 0.0268, 0.0296, 0.0283, 0.0244, 0.0370, 0.0293], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 15:36:09,227 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4962, 1.9005, 1.9295, 1.0060, 1.9970, 1.4521, 0.4294, 1.7328], + device='cuda:0'), covar=tensor([0.0426, 0.0274, 0.0209, 0.0436, 0.0321, 0.0663, 0.0655, 0.0230], + device='cuda:0'), in_proj_covar=tensor([0.0415, 0.0356, 0.0303, 0.0406, 0.0342, 0.0496, 0.0371, 0.0380], + device='cuda:0'), out_proj_covar=tensor([1.1529e-04, 9.6312e-05, 8.2072e-05, 1.1021e-04, 9.3345e-05, 1.4527e-04, + 1.0353e-04, 1.0408e-04], device='cuda:0') +2023-02-06 15:36:15,953 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 15:36:24,950 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:37,121 INFO [train.py:901] (0/4) Epoch 14, batch 6150, loss[loss=0.1909, simple_loss=0.2666, pruned_loss=0.05764, over 7408.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2988, pruned_loss=0.07113, over 1608110.97 frames. ], batch size: 17, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:37,202 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:46,706 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:59,323 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.474e+02 3.213e+02 4.029e+02 8.079e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-06 15:37:11,852 INFO [train.py:901] (0/4) Epoch 14, batch 6200, loss[loss=0.2139, simple_loss=0.2968, pruned_loss=0.06551, over 8761.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2989, pruned_loss=0.07141, over 1606959.03 frames. ], batch size: 30, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:38,511 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:45,101 INFO [train.py:901] (0/4) Epoch 14, batch 6250, loss[loss=0.2012, simple_loss=0.2888, pruned_loss=0.05673, over 8286.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2975, pruned_loss=0.07041, over 1607102.50 frames. ], batch size: 23, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:55,173 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:55,810 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:08,443 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.286e+02 2.818e+02 3.691e+02 1.208e+03, threshold=5.637e+02, percent-clipped=2.0 +2023-02-06 15:38:13,369 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:20,509 INFO [train.py:901] (0/4) Epoch 14, batch 6300, loss[loss=0.2177, simple_loss=0.3076, pruned_loss=0.06385, over 8497.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2989, pruned_loss=0.07055, over 1610463.20 frames. ], batch size: 49, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:55,242 INFO [train.py:901] (0/4) Epoch 14, batch 6350, loss[loss=0.2386, simple_loss=0.3117, pruned_loss=0.08282, over 8184.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2993, pruned_loss=0.07085, over 1607737.80 frames. ], batch size: 23, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:58,944 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:12,613 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 15:39:13,910 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 15:39:17,455 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.340e+02 2.891e+02 3.552e+02 9.934e+02, threshold=5.783e+02, percent-clipped=8.0 +2023-02-06 15:39:23,082 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:30,957 INFO [train.py:901] (0/4) Epoch 14, batch 6400, loss[loss=0.1857, simple_loss=0.2577, pruned_loss=0.05682, over 7690.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2986, pruned_loss=0.07056, over 1605440.32 frames. ], batch size: 18, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:39:32,442 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5678, 4.5434, 4.1060, 1.9655, 4.0183, 4.2483, 4.1669, 4.0469], + device='cuda:0'), covar=tensor([0.0708, 0.0583, 0.1057, 0.5093, 0.0867, 0.0900, 0.1280, 0.0725], + device='cuda:0'), in_proj_covar=tensor([0.0484, 0.0400, 0.0402, 0.0500, 0.0395, 0.0401, 0.0388, 0.0346], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:39:35,198 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:40,683 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:54,996 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.96 vs. limit=5.0 +2023-02-06 15:40:05,256 INFO [train.py:901] (0/4) Epoch 14, batch 6450, loss[loss=0.2045, simple_loss=0.2677, pruned_loss=0.07064, over 7703.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2979, pruned_loss=0.0699, over 1608011.32 frames. ], batch size: 18, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:40:26,320 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.418e+02 3.184e+02 3.807e+02 1.482e+03, threshold=6.367e+02, percent-clipped=8.0 +2023-02-06 15:40:37,968 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2257, 1.9416, 2.7435, 2.2665, 2.5596, 2.1594, 1.7834, 1.3579], + device='cuda:0'), covar=tensor([0.4356, 0.4051, 0.1303, 0.2611, 0.1993, 0.2301, 0.1757, 0.4327], + device='cuda:0'), in_proj_covar=tensor([0.0893, 0.0897, 0.0744, 0.0872, 0.0941, 0.0828, 0.0710, 0.0780], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:40:39,078 INFO [train.py:901] (0/4) Epoch 14, batch 6500, loss[loss=0.2147, simple_loss=0.2989, pruned_loss=0.06523, over 8629.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2994, pruned_loss=0.07138, over 1607582.68 frames. ], batch size: 39, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:40:44,378 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:55,105 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111601.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:12,085 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:14,602 INFO [train.py:901] (0/4) Epoch 14, batch 6550, loss[loss=0.2272, simple_loss=0.3041, pruned_loss=0.07514, over 8561.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2998, pruned_loss=0.07205, over 1610681.77 frames. ], batch size: 31, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:24,436 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 15:41:35,843 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 2.515e+02 3.055e+02 3.900e+02 7.605e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:41:42,133 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9563, 1.8584, 2.4617, 1.5738, 1.2880, 2.5552, 0.4148, 1.3760], + device='cuda:0'), covar=tensor([0.2006, 0.1450, 0.0319, 0.1793, 0.3287, 0.0353, 0.2946, 0.1708], + device='cuda:0'), in_proj_covar=tensor([0.0174, 0.0177, 0.0109, 0.0217, 0.0260, 0.0112, 0.0164, 0.0174], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 15:41:43,336 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:41:47,951 INFO [train.py:901] (0/4) Epoch 14, batch 6600, loss[loss=0.2255, simple_loss=0.2977, pruned_loss=0.07661, over 8128.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3016, pruned_loss=0.07305, over 1611235.80 frames. ], batch size: 22, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:55,623 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:03,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:14,379 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:23,469 INFO [train.py:901] (0/4) Epoch 14, batch 6650, loss[loss=0.2063, simple_loss=0.2853, pruned_loss=0.06371, over 7964.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3004, pruned_loss=0.07308, over 1611460.33 frames. ], batch size: 21, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:42:26,330 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6368, 2.8125, 1.8826, 2.3320, 2.2032, 1.6134, 2.1659, 2.3766], + device='cuda:0'), covar=tensor([0.1653, 0.0377, 0.1146, 0.0677, 0.0710, 0.1398, 0.1075, 0.0957], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0235, 0.0326, 0.0297, 0.0301, 0.0329, 0.0344, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:42:45,229 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.391e+02 3.105e+02 3.860e+02 7.189e+02, threshold=6.209e+02, percent-clipped=3.0 +2023-02-06 15:42:57,308 INFO [train.py:901] (0/4) Epoch 14, batch 6700, loss[loss=0.2497, simple_loss=0.3206, pruned_loss=0.0894, over 8464.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3007, pruned_loss=0.07323, over 1616362.43 frames. ], batch size: 25, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:16,619 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111809.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:43:32,519 INFO [train.py:901] (0/4) Epoch 14, batch 6750, loss[loss=0.2699, simple_loss=0.3383, pruned_loss=0.1008, over 6882.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3, pruned_loss=0.07204, over 1617820.26 frames. ], batch size: 71, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:32,588 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:43:54,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.559e+02 3.020e+02 4.182e+02 1.269e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-06 15:44:02,382 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 15:44:03,213 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8554, 3.7784, 3.4895, 1.6480, 3.4010, 3.3871, 3.4685, 3.1906], + device='cuda:0'), covar=tensor([0.0857, 0.0720, 0.1051, 0.5011, 0.0917, 0.1081, 0.1466, 0.0884], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0395, 0.0399, 0.0495, 0.0392, 0.0397, 0.0384, 0.0340], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:44:07,192 INFO [train.py:901] (0/4) Epoch 14, batch 6800, loss[loss=0.2288, simple_loss=0.2906, pruned_loss=0.08344, over 7561.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3004, pruned_loss=0.07198, over 1616093.30 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:14,363 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.30 vs. limit=5.0 +2023-02-06 15:44:40,412 INFO [train.py:901] (0/4) Epoch 14, batch 6850, loss[loss=0.2603, simple_loss=0.3457, pruned_loss=0.08743, over 8524.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3008, pruned_loss=0.07209, over 1616697.75 frames. ], batch size: 28, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:51,192 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 15:44:52,047 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:01,401 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:03,875 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.542e+02 3.126e+02 4.226e+02 8.027e+02, threshold=6.251e+02, percent-clipped=7.0 +2023-02-06 15:45:16,599 INFO [train.py:901] (0/4) Epoch 14, batch 6900, loss[loss=0.2279, simple_loss=0.3021, pruned_loss=0.07689, over 8454.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3014, pruned_loss=0.07261, over 1616400.98 frames. ], batch size: 27, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:18,704 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:23,422 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 15:45:29,849 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-112000.pt +2023-02-06 15:45:50,585 INFO [train.py:901] (0/4) Epoch 14, batch 6950, loss[loss=0.2281, simple_loss=0.3087, pruned_loss=0.07369, over 7804.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3008, pruned_loss=0.07296, over 1613049.06 frames. ], batch size: 20, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:58,675 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 15:46:02,907 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6859, 2.8806, 1.9238, 2.4031, 2.4210, 1.6599, 2.3636, 2.3356], + device='cuda:0'), covar=tensor([0.1492, 0.0411, 0.1143, 0.0649, 0.0603, 0.1473, 0.0820, 0.0993], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0236, 0.0328, 0.0302, 0.0304, 0.0332, 0.0348, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:46:13,920 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.419e+02 2.987e+02 3.531e+02 6.552e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-06 15:46:25,965 INFO [train.py:901] (0/4) Epoch 14, batch 7000, loss[loss=0.2056, simple_loss=0.2836, pruned_loss=0.06385, over 7554.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3007, pruned_loss=0.07294, over 1608142.28 frames. ], batch size: 18, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:46:28,772 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:46:29,104 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 15:46:59,913 INFO [train.py:901] (0/4) Epoch 14, batch 7050, loss[loss=0.1905, simple_loss=0.2729, pruned_loss=0.05403, over 8037.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3012, pruned_loss=0.07341, over 1607765.19 frames. ], batch size: 22, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:15,899 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112153.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:47:17,937 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6960, 1.5723, 1.7340, 1.5558, 1.0209, 1.6622, 2.0461, 1.9744], + device='cuda:0'), covar=tensor([0.0476, 0.1264, 0.1761, 0.1430, 0.0626, 0.1493, 0.0685, 0.0573], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0154, 0.0100, 0.0160, 0.0114, 0.0136], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 15:47:21,802 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.608e+02 3.153e+02 4.211e+02 1.237e+03, threshold=6.307e+02, percent-clipped=12.0 +2023-02-06 15:47:30,690 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2149, 2.0085, 3.2286, 1.9847, 2.6886, 3.5621, 3.4999, 3.2298], + device='cuda:0'), covar=tensor([0.0967, 0.1353, 0.0503, 0.1546, 0.1296, 0.0217, 0.0603, 0.0446], + device='cuda:0'), in_proj_covar=tensor([0.0279, 0.0308, 0.0270, 0.0301, 0.0287, 0.0246, 0.0373, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:47:35,265 INFO [train.py:901] (0/4) Epoch 14, batch 7100, loss[loss=0.2005, simple_loss=0.2776, pruned_loss=0.06167, over 7921.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.302, pruned_loss=0.07372, over 1605572.40 frames. ], batch size: 20, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:50,282 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:48:07,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:48:10,128 INFO [train.py:901] (0/4) Epoch 14, batch 7150, loss[loss=0.25, simple_loss=0.3194, pruned_loss=0.09027, over 6778.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3014, pruned_loss=0.07327, over 1602999.18 frames. ], batch size: 71, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:48:11,592 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3676, 3.0683, 2.2103, 3.9446, 1.8543, 1.8666, 2.4674, 3.0817], + device='cuda:0'), covar=tensor([0.0708, 0.0730, 0.0941, 0.0220, 0.1027, 0.1373, 0.0946, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0206, 0.0253, 0.0214, 0.0216, 0.0252, 0.0258, 0.0215], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 15:48:24,024 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 15:48:31,540 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.371e+02 2.859e+02 3.664e+02 7.587e+02, threshold=5.717e+02, percent-clipped=3.0 +2023-02-06 15:48:35,572 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112268.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:48:43,203 INFO [train.py:901] (0/4) Epoch 14, batch 7200, loss[loss=0.1881, simple_loss=0.2749, pruned_loss=0.05062, over 7966.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2994, pruned_loss=0.07224, over 1595238.45 frames. ], batch size: 21, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:18,417 INFO [train.py:901] (0/4) Epoch 14, batch 7250, loss[loss=0.1885, simple_loss=0.2769, pruned_loss=0.04999, over 8189.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2989, pruned_loss=0.072, over 1601443.75 frames. ], batch size: 23, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:31,016 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 15:49:39,902 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.708e+02 3.212e+02 3.989e+02 8.387e+02, threshold=6.424e+02, percent-clipped=5.0 +2023-02-06 15:49:52,058 INFO [train.py:901] (0/4) Epoch 14, batch 7300, loss[loss=0.2366, simple_loss=0.3192, pruned_loss=0.07702, over 8689.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2998, pruned_loss=0.07258, over 1605987.65 frames. ], batch size: 34, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:11,919 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:26,698 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:28,013 INFO [train.py:901] (0/4) Epoch 14, batch 7350, loss[loss=0.2161, simple_loss=0.2989, pruned_loss=0.06663, over 8561.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3005, pruned_loss=0.07296, over 1609628.95 frames. ], batch size: 31, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:40,032 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 15:50:49,901 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.462e+02 2.972e+02 3.682e+02 1.093e+03, threshold=5.943e+02, percent-clipped=5.0 +2023-02-06 15:50:59,944 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 15:51:02,055 INFO [train.py:901] (0/4) Epoch 14, batch 7400, loss[loss=0.2267, simple_loss=0.309, pruned_loss=0.07219, over 8288.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3006, pruned_loss=0.07241, over 1608889.32 frames. ], batch size: 23, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:32,626 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112524.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:51:37,190 INFO [train.py:901] (0/4) Epoch 14, batch 7450, loss[loss=0.2327, simple_loss=0.2936, pruned_loss=0.0859, over 7704.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3007, pruned_loss=0.07264, over 1609360.77 frames. ], batch size: 18, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:41,803 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 15:51:46,949 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:51:50,981 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112549.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:59,931 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.473e+02 3.112e+02 3.710e+02 6.215e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 15:52:13,215 INFO [train.py:901] (0/4) Epoch 14, batch 7500, loss[loss=0.232, simple_loss=0.3076, pruned_loss=0.07817, over 8579.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3009, pruned_loss=0.07262, over 1609269.43 frames. ], batch size: 39, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:13,360 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:23,781 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:47,276 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1910, 1.8367, 2.6921, 2.2192, 2.6484, 2.0677, 1.7337, 1.3513], + device='cuda:0'), covar=tensor([0.4726, 0.4629, 0.1573, 0.2923, 0.2157, 0.2820, 0.2066, 0.4838], + device='cuda:0'), in_proj_covar=tensor([0.0902, 0.0909, 0.0750, 0.0878, 0.0953, 0.0837, 0.0722, 0.0788], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:52:47,704 INFO [train.py:901] (0/4) Epoch 14, batch 7550, loss[loss=0.2554, simple_loss=0.3366, pruned_loss=0.08708, over 8102.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3015, pruned_loss=0.07292, over 1610540.17 frames. ], batch size: 23, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:51,278 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:53:11,225 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.402e+02 2.890e+02 3.643e+02 7.164e+02, threshold=5.781e+02, percent-clipped=3.0 +2023-02-06 15:53:23,631 INFO [train.py:901] (0/4) Epoch 14, batch 7600, loss[loss=0.2152, simple_loss=0.293, pruned_loss=0.06872, over 8036.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3014, pruned_loss=0.07302, over 1611288.25 frames. ], batch size: 22, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:53:39,793 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7842, 2.2988, 3.3841, 1.8035, 1.5901, 3.3902, 0.5285, 1.8490], + device='cuda:0'), covar=tensor([0.1934, 0.1543, 0.0293, 0.2413, 0.4056, 0.0336, 0.3360, 0.2195], + device='cuda:0'), in_proj_covar=tensor([0.0175, 0.0178, 0.0110, 0.0218, 0.0259, 0.0113, 0.0163, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 15:53:57,107 INFO [train.py:901] (0/4) Epoch 14, batch 7650, loss[loss=0.2059, simple_loss=0.2664, pruned_loss=0.07272, over 7704.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3015, pruned_loss=0.07315, over 1610262.46 frames. ], batch size: 18, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:11,085 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:18,473 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.546e+02 2.941e+02 3.649e+02 7.123e+02, threshold=5.882e+02, percent-clipped=5.0 +2023-02-06 15:54:32,355 INFO [train.py:901] (0/4) Epoch 14, batch 7700, loss[loss=0.1688, simple_loss=0.2529, pruned_loss=0.04242, over 7426.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3002, pruned_loss=0.07245, over 1610704.85 frames. ], batch size: 17, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:38,972 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 15:54:45,556 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:52,887 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 15:54:53,109 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9013, 2.4033, 4.3773, 1.4646, 3.1365, 2.5100, 2.2124, 2.8495], + device='cuda:0'), covar=tensor([0.1818, 0.2550, 0.0716, 0.4610, 0.1749, 0.2889, 0.1903, 0.2624], + device='cuda:0'), in_proj_covar=tensor([0.0498, 0.0552, 0.0539, 0.0601, 0.0623, 0.0565, 0.0491, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:55:03,111 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:06,910 INFO [train.py:901] (0/4) Epoch 14, batch 7750, loss[loss=0.3028, simple_loss=0.3546, pruned_loss=0.1255, over 6985.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2998, pruned_loss=0.07174, over 1611504.47 frames. ], batch size: 72, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:55:24,065 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 15:55:28,272 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.453e+02 3.172e+02 4.245e+02 8.131e+02, threshold=6.343e+02, percent-clipped=10.0 +2023-02-06 15:55:30,952 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:41,025 INFO [train.py:901] (0/4) Epoch 14, batch 7800, loss[loss=0.216, simple_loss=0.304, pruned_loss=0.064, over 8351.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2997, pruned_loss=0.07181, over 1610395.56 frames. ], batch size: 24, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:55:56,248 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2019, 1.8464, 2.5770, 2.1484, 2.5055, 2.1482, 1.8277, 1.2806], + device='cuda:0'), covar=tensor([0.4641, 0.4384, 0.1450, 0.2658, 0.1963, 0.2481, 0.1810, 0.4595], + device='cuda:0'), in_proj_covar=tensor([0.0900, 0.0909, 0.0750, 0.0874, 0.0947, 0.0835, 0.0718, 0.0785], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 15:56:12,164 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:13,266 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 15:56:14,512 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 15:56:16,234 INFO [train.py:901] (0/4) Epoch 14, batch 7850, loss[loss=0.2375, simple_loss=0.3159, pruned_loss=0.07951, over 8315.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2985, pruned_loss=0.07144, over 1605530.11 frames. ], batch size: 25, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:22,337 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:22,411 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:29,754 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4977, 1.9374, 3.5167, 1.2684, 2.5431, 2.0278, 1.6646, 2.4355], + device='cuda:0'), covar=tensor([0.1745, 0.2532, 0.0567, 0.4286, 0.1592, 0.2800, 0.1939, 0.2145], + device='cuda:0'), in_proj_covar=tensor([0.0496, 0.0551, 0.0535, 0.0600, 0.0622, 0.0564, 0.0489, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 15:56:37,778 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.504e+02 3.067e+02 3.726e+02 7.698e+02, threshold=6.135e+02, percent-clipped=2.0 +2023-02-06 15:56:49,061 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:49,642 INFO [train.py:901] (0/4) Epoch 14, batch 7900, loss[loss=0.2408, simple_loss=0.3218, pruned_loss=0.07992, over 8659.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2995, pruned_loss=0.07182, over 1605309.24 frames. ], batch size: 39, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:05,614 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2576, 3.0764, 2.2908, 2.5012, 2.5427, 2.1942, 2.5522, 2.7722], + device='cuda:0'), covar=tensor([0.1010, 0.0313, 0.0762, 0.0593, 0.0550, 0.0930, 0.0679, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0233, 0.0326, 0.0301, 0.0302, 0.0327, 0.0344, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 15:57:07,094 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 15:57:22,229 INFO [train.py:901] (0/4) Epoch 14, batch 7950, loss[loss=0.2292, simple_loss=0.3102, pruned_loss=0.07406, over 8098.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2993, pruned_loss=0.07148, over 1605697.17 frames. ], batch size: 23, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:28,301 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:38,023 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:43,242 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.654e+02 3.191e+02 4.041e+02 1.304e+03, threshold=6.382e+02, percent-clipped=5.0 +2023-02-06 15:57:55,250 INFO [train.py:901] (0/4) Epoch 14, batch 8000, loss[loss=0.259, simple_loss=0.3339, pruned_loss=0.09208, over 7111.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2988, pruned_loss=0.0708, over 1605653.71 frames. ], batch size: 71, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:58:04,668 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:23,450 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:28,557 INFO [train.py:901] (0/4) Epoch 14, batch 8050, loss[loss=0.2732, simple_loss=0.3308, pruned_loss=0.1078, over 7275.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2991, pruned_loss=0.07173, over 1593459.35 frames. ], batch size: 72, lr: 5.36e-03, grad_scale: 16.0 +2023-02-06 15:58:40,112 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:49,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.323e+02 2.856e+02 3.288e+02 8.076e+02, threshold=5.712e+02, percent-clipped=1.0 +2023-02-06 15:58:51,437 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-14.pt +2023-02-06 15:59:02,879 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 15:59:06,257 INFO [train.py:901] (0/4) Epoch 15, batch 0, loss[loss=0.2388, simple_loss=0.309, pruned_loss=0.0843, over 8606.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.309, pruned_loss=0.0843, over 8606.00 frames. ], batch size: 34, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 15:59:06,257 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 15:59:17,270 INFO [train.py:935] (0/4) Epoch 15, validation: loss=0.1825, simple_loss=0.283, pruned_loss=0.04098, over 944034.00 frames. +2023-02-06 15:59:17,271 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 15:59:32,300 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 15:59:51,451 INFO [train.py:901] (0/4) Epoch 15, batch 50, loss[loss=0.1917, simple_loss=0.2616, pruned_loss=0.06088, over 6791.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2979, pruned_loss=0.06851, over 367479.44 frames. ], batch size: 15, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:08,691 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 16:00:21,152 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:27,787 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.549e+02 3.077e+02 3.582e+02 9.445e+02, threshold=6.153e+02, percent-clipped=5.0 +2023-02-06 16:00:28,489 INFO [train.py:901] (0/4) Epoch 15, batch 100, loss[loss=0.2333, simple_loss=0.3127, pruned_loss=0.07701, over 8258.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2982, pruned_loss=0.07076, over 641169.90 frames. ], batch size: 24, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:29,903 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 16:00:41,955 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:50,246 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:00,330 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:02,089 INFO [train.py:901] (0/4) Epoch 15, batch 150, loss[loss=0.1797, simple_loss=0.2577, pruned_loss=0.05085, over 7981.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3002, pruned_loss=0.07156, over 860591.54 frames. ], batch size: 21, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:06,870 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:16,664 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:28,763 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:37,328 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.511e+02 3.032e+02 4.146e+02 1.005e+03, threshold=6.064e+02, percent-clipped=3.0 +2023-02-06 16:01:38,023 INFO [train.py:901] (0/4) Epoch 15, batch 200, loss[loss=0.2117, simple_loss=0.2811, pruned_loss=0.07115, over 7919.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.3001, pruned_loss=0.07113, over 1027136.42 frames. ], batch size: 20, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:46,232 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:01,345 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:11,075 INFO [train.py:901] (0/4) Epoch 15, batch 250, loss[loss=0.2336, simple_loss=0.3076, pruned_loss=0.07979, over 8503.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3006, pruned_loss=0.07128, over 1161638.51 frames. ], batch size: 26, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:02:19,364 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 16:02:28,571 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 16:02:43,763 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.666e+02 3.062e+02 4.026e+02 8.735e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 16:02:44,411 INFO [train.py:901] (0/4) Epoch 15, batch 300, loss[loss=0.2539, simple_loss=0.3357, pruned_loss=0.08604, over 8691.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3002, pruned_loss=0.07146, over 1263746.83 frames. ], batch size: 34, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:19,311 INFO [train.py:901] (0/4) Epoch 15, batch 350, loss[loss=0.198, simple_loss=0.2854, pruned_loss=0.05533, over 8289.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3014, pruned_loss=0.0719, over 1343890.74 frames. ], batch size: 23, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:52,034 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.415e+02 3.115e+02 3.728e+02 6.919e+02, threshold=6.229e+02, percent-clipped=2.0 +2023-02-06 16:03:52,728 INFO [train.py:901] (0/4) Epoch 15, batch 400, loss[loss=0.2159, simple_loss=0.2893, pruned_loss=0.07128, over 7936.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3003, pruned_loss=0.07114, over 1404595.53 frames. ], batch size: 20, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:17,457 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:17,495 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8160, 5.8766, 5.1824, 2.3759, 5.3209, 5.6661, 5.5201, 5.3324], + device='cuda:0'), covar=tensor([0.0532, 0.0414, 0.0923, 0.4610, 0.0727, 0.0713, 0.1071, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0482, 0.0400, 0.0401, 0.0501, 0.0400, 0.0401, 0.0390, 0.0350], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:04:28,807 INFO [train.py:901] (0/4) Epoch 15, batch 450, loss[loss=0.1854, simple_loss=0.2452, pruned_loss=0.06276, over 7528.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2991, pruned_loss=0.07076, over 1451326.36 frames. ], batch size: 18, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:43,280 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:56,080 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113654.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:01,023 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.616e+02 3.268e+02 4.141e+02 9.119e+02, threshold=6.536e+02, percent-clipped=2.0 +2023-02-06 16:05:01,740 INFO [train.py:901] (0/4) Epoch 15, batch 500, loss[loss=0.2294, simple_loss=0.3073, pruned_loss=0.07572, over 8485.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2988, pruned_loss=0.07118, over 1485871.44 frames. ], batch size: 29, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:05:02,531 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:11,071 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2434, 1.4163, 4.4678, 2.0000, 2.3642, 5.0293, 5.0488, 4.3686], + device='cuda:0'), covar=tensor([0.1122, 0.1796, 0.0245, 0.1823, 0.1136, 0.0176, 0.0494, 0.0560], + device='cuda:0'), in_proj_covar=tensor([0.0277, 0.0307, 0.0273, 0.0300, 0.0288, 0.0246, 0.0376, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:05:12,301 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:35,402 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113711.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:36,636 INFO [train.py:901] (0/4) Epoch 15, batch 550, loss[loss=0.2007, simple_loss=0.2778, pruned_loss=0.06178, over 7703.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.297, pruned_loss=0.07039, over 1511041.53 frames. ], batch size: 18, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:09,825 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.516e+02 3.119e+02 4.209e+02 9.524e+02, threshold=6.239e+02, percent-clipped=4.0 +2023-02-06 16:06:10,536 INFO [train.py:901] (0/4) Epoch 15, batch 600, loss[loss=0.2898, simple_loss=0.3474, pruned_loss=0.1161, over 7153.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2986, pruned_loss=0.07096, over 1536953.51 frames. ], batch size: 72, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:24,218 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 16:06:44,318 INFO [train.py:901] (0/4) Epoch 15, batch 650, loss[loss=0.2221, simple_loss=0.3132, pruned_loss=0.06556, over 8291.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2993, pruned_loss=0.07119, over 1555373.55 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:58,553 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5294, 1.8450, 4.2477, 1.9091, 2.3784, 4.7679, 4.8087, 4.1626], + device='cuda:0'), covar=tensor([0.0991, 0.1643, 0.0303, 0.1960, 0.1225, 0.0192, 0.0385, 0.0543], + device='cuda:0'), in_proj_covar=tensor([0.0279, 0.0309, 0.0274, 0.0301, 0.0288, 0.0248, 0.0377, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:07:16,605 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6234, 3.5536, 3.2333, 2.1332, 3.1535, 3.2366, 3.2969, 3.0002], + device='cuda:0'), covar=tensor([0.0819, 0.0679, 0.0955, 0.3648, 0.0903, 0.1164, 0.1220, 0.0965], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0397, 0.0397, 0.0498, 0.0396, 0.0395, 0.0383, 0.0346], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:07:19,221 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.270e+02 2.767e+02 3.649e+02 9.673e+02, threshold=5.535e+02, percent-clipped=4.0 +2023-02-06 16:07:19,886 INFO [train.py:901] (0/4) Epoch 15, batch 700, loss[loss=0.2338, simple_loss=0.3036, pruned_loss=0.08204, over 8139.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2982, pruned_loss=0.0706, over 1567225.21 frames. ], batch size: 22, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:07:37,433 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8259, 3.8145, 3.4055, 1.8396, 3.3420, 3.4687, 3.3873, 3.2274], + device='cuda:0'), covar=tensor([0.0854, 0.0690, 0.1179, 0.4334, 0.0910, 0.1045, 0.1445, 0.1022], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0397, 0.0396, 0.0497, 0.0396, 0.0394, 0.0381, 0.0345], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:07:53,445 INFO [train.py:901] (0/4) Epoch 15, batch 750, loss[loss=0.1871, simple_loss=0.2729, pruned_loss=0.05059, over 8110.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2989, pruned_loss=0.0714, over 1579478.93 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:11,149 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 16:08:20,460 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 16:08:29,183 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 2.237e+02 2.791e+02 3.511e+02 6.350e+02, threshold=5.582e+02, percent-clipped=4.0 +2023-02-06 16:08:29,872 INFO [train.py:901] (0/4) Epoch 15, batch 800, loss[loss=0.2288, simple_loss=0.3066, pruned_loss=0.07547, over 8482.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2992, pruned_loss=0.07205, over 1589707.00 frames. ], batch size: 49, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:32,827 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:40,756 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:47,649 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 16:08:50,090 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:55,521 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-114000.pt +2023-02-06 16:09:01,970 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:05,305 INFO [train.py:901] (0/4) Epoch 15, batch 850, loss[loss=0.2156, simple_loss=0.3022, pruned_loss=0.06448, over 8331.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2986, pruned_loss=0.07129, over 1597877.35 frames. ], batch size: 26, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:21,246 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 16:09:39,412 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.394e+02 2.826e+02 3.443e+02 6.296e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-06 16:09:40,793 INFO [train.py:901] (0/4) Epoch 15, batch 900, loss[loss=0.2102, simple_loss=0.2786, pruned_loss=0.07087, over 7791.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2988, pruned_loss=0.07146, over 1602816.06 frames. ], batch size: 19, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:02,643 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:15,158 INFO [train.py:901] (0/4) Epoch 15, batch 950, loss[loss=0.2413, simple_loss=0.311, pruned_loss=0.08581, over 6914.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3003, pruned_loss=0.07181, over 1605951.33 frames. ], batch size: 71, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:20,662 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6867, 2.0791, 2.2654, 1.2335, 2.3660, 1.5894, 0.7321, 1.9192], + device='cuda:0'), covar=tensor([0.0518, 0.0278, 0.0217, 0.0496, 0.0253, 0.0701, 0.0743, 0.0253], + device='cuda:0'), in_proj_covar=tensor([0.0417, 0.0355, 0.0303, 0.0408, 0.0339, 0.0495, 0.0367, 0.0379], + device='cuda:0'), out_proj_covar=tensor([1.1572e-04, 9.5734e-05, 8.1551e-05, 1.1079e-04, 9.2072e-05, 1.4424e-04, + 1.0184e-04, 1.0371e-04], device='cuda:0') +2023-02-06 16:10:21,908 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:36,818 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:39,439 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 16:10:45,125 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5677, 4.5454, 4.1550, 2.0953, 4.0429, 4.2129, 4.0865, 3.8810], + device='cuda:0'), covar=tensor([0.0602, 0.0477, 0.0863, 0.4222, 0.0795, 0.0861, 0.1124, 0.0757], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0398, 0.0400, 0.0496, 0.0397, 0.0397, 0.0382, 0.0347], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:10:49,209 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.449e+02 2.913e+02 3.851e+02 8.356e+02, threshold=5.826e+02, percent-clipped=3.0 +2023-02-06 16:10:49,926 INFO [train.py:901] (0/4) Epoch 15, batch 1000, loss[loss=0.2119, simple_loss=0.2813, pruned_loss=0.07129, over 7538.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3005, pruned_loss=0.07215, over 1605917.40 frames. ], batch size: 18, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:14,210 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 16:11:25,580 INFO [train.py:901] (0/4) Epoch 15, batch 1050, loss[loss=0.2219, simple_loss=0.3069, pruned_loss=0.06846, over 8239.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3011, pruned_loss=0.07227, over 1610292.26 frames. ], batch size: 24, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:25,602 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 16:11:57,601 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.504e+02 3.058e+02 3.938e+02 1.189e+03, threshold=6.116e+02, percent-clipped=4.0 +2023-02-06 16:11:58,319 INFO [train.py:901] (0/4) Epoch 15, batch 1100, loss[loss=0.2201, simple_loss=0.3036, pruned_loss=0.06832, over 8129.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3002, pruned_loss=0.07211, over 1610943.31 frames. ], batch size: 22, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:26,264 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 16:12:33,901 INFO [train.py:901] (0/4) Epoch 15, batch 1150, loss[loss=0.2481, simple_loss=0.3196, pruned_loss=0.08835, over 8104.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3019, pruned_loss=0.07278, over 1614700.62 frames. ], batch size: 23, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:37,515 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1869, 1.0940, 1.2659, 1.1007, 0.9095, 1.3346, 0.0803, 0.9593], + device='cuda:0'), covar=tensor([0.2113, 0.1592, 0.0594, 0.1020, 0.3292, 0.0534, 0.2672, 0.1497], + device='cuda:0'), in_proj_covar=tensor([0.0175, 0.0179, 0.0111, 0.0218, 0.0259, 0.0115, 0.0165, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 16:12:38,620 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 16:12:53,947 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 16:12:59,528 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:07,357 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.463e+02 3.139e+02 3.955e+02 6.139e+02, threshold=6.277e+02, percent-clipped=1.0 +2023-02-06 16:13:07,983 INFO [train.py:901] (0/4) Epoch 15, batch 1200, loss[loss=0.2299, simple_loss=0.2855, pruned_loss=0.08717, over 7433.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3005, pruned_loss=0.07188, over 1614627.35 frames. ], batch size: 17, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:13:16,129 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:18,738 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:36,385 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114404.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:42,792 INFO [train.py:901] (0/4) Epoch 15, batch 1250, loss[loss=0.2489, simple_loss=0.3227, pruned_loss=0.08755, over 8470.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3015, pruned_loss=0.07242, over 1615381.44 frames. ], batch size: 29, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:14:16,860 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.591e+02 3.148e+02 4.129e+02 1.085e+03, threshold=6.295e+02, percent-clipped=6.0 +2023-02-06 16:14:17,472 INFO [train.py:901] (0/4) Epoch 15, batch 1300, loss[loss=0.2117, simple_loss=0.2851, pruned_loss=0.06914, over 7293.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3008, pruned_loss=0.07191, over 1617403.77 frames. ], batch size: 16, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:14:35,228 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114489.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:14:51,236 INFO [train.py:901] (0/4) Epoch 15, batch 1350, loss[loss=0.2169, simple_loss=0.2963, pruned_loss=0.06877, over 8029.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3002, pruned_loss=0.07149, over 1614936.20 frames. ], batch size: 22, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:26,453 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.434e+02 2.903e+02 3.628e+02 5.826e+02, threshold=5.807e+02, percent-clipped=0.0 +2023-02-06 16:15:27,119 INFO [train.py:901] (0/4) Epoch 15, batch 1400, loss[loss=0.2459, simple_loss=0.3232, pruned_loss=0.08429, over 8354.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2992, pruned_loss=0.07048, over 1616091.72 frames. ], batch size: 26, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:54,552 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:15:54,601 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4786, 1.7746, 1.9161, 1.1628, 1.9031, 1.3625, 0.4050, 1.6782], + device='cuda:0'), covar=tensor([0.0371, 0.0253, 0.0171, 0.0346, 0.0300, 0.0650, 0.0607, 0.0184], + device='cuda:0'), in_proj_covar=tensor([0.0417, 0.0356, 0.0303, 0.0406, 0.0341, 0.0496, 0.0369, 0.0380], + device='cuda:0'), out_proj_covar=tensor([1.1544e-04, 9.6006e-05, 8.1496e-05, 1.1016e-04, 9.2748e-05, 1.4453e-04, + 1.0226e-04, 1.0368e-04], device='cuda:0') +2023-02-06 16:16:00,697 INFO [train.py:901] (0/4) Epoch 15, batch 1450, loss[loss=0.1746, simple_loss=0.2571, pruned_loss=0.0461, over 7664.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2996, pruned_loss=0.0709, over 1615575.25 frames. ], batch size: 19, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:08,829 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 16:16:36,179 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.414e+02 3.068e+02 3.744e+02 6.619e+02, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 16:16:36,893 INFO [train.py:901] (0/4) Epoch 15, batch 1500, loss[loss=0.206, simple_loss=0.2927, pruned_loss=0.05963, over 8439.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2989, pruned_loss=0.07094, over 1610288.97 frames. ], batch size: 27, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:52,687 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6449, 5.8354, 4.9524, 2.5449, 5.0315, 5.4276, 5.3693, 5.1758], + device='cuda:0'), covar=tensor([0.0591, 0.0402, 0.0968, 0.4084, 0.0719, 0.0792, 0.0966, 0.0477], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0398, 0.0400, 0.0496, 0.0394, 0.0397, 0.0383, 0.0349], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:17:11,515 INFO [train.py:901] (0/4) Epoch 15, batch 1550, loss[loss=0.2283, simple_loss=0.3126, pruned_loss=0.07199, over 8557.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2983, pruned_loss=0.07083, over 1608661.64 frames. ], batch size: 31, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:26,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:17:45,708 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.278e+02 2.828e+02 3.736e+02 6.971e+02, threshold=5.655e+02, percent-clipped=1.0 +2023-02-06 16:17:46,444 INFO [train.py:901] (0/4) Epoch 15, batch 1600, loss[loss=0.2048, simple_loss=0.274, pruned_loss=0.0678, over 7654.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2981, pruned_loss=0.07054, over 1607577.93 frames. ], batch size: 19, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:58,037 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8063, 1.9884, 1.8887, 1.9811, 1.0492, 1.6769, 2.0941, 1.7038], + device='cuda:0'), covar=tensor([0.0445, 0.1107, 0.1607, 0.1194, 0.0614, 0.1457, 0.0665, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0156, 0.0102, 0.0161, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:18:22,446 INFO [train.py:901] (0/4) Epoch 15, batch 1650, loss[loss=0.2091, simple_loss=0.2918, pruned_loss=0.06321, over 8447.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2974, pruned_loss=0.07079, over 1607035.35 frames. ], batch size: 25, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:55,117 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:18:56,272 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 2.429e+02 2.845e+02 3.384e+02 6.803e+02, threshold=5.691e+02, percent-clipped=1.0 +2023-02-06 16:18:56,966 INFO [train.py:901] (0/4) Epoch 15, batch 1700, loss[loss=0.2211, simple_loss=0.3055, pruned_loss=0.06839, over 8607.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2978, pruned_loss=0.07054, over 1612225.75 frames. ], batch size: 34, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:19:12,828 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:16,126 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:25,772 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6701, 2.0242, 2.3018, 1.0139, 2.3314, 1.4248, 0.7225, 1.8411], + device='cuda:0'), covar=tensor([0.0566, 0.0298, 0.0203, 0.0574, 0.0314, 0.0742, 0.0722, 0.0266], + device='cuda:0'), in_proj_covar=tensor([0.0416, 0.0355, 0.0303, 0.0407, 0.0340, 0.0491, 0.0366, 0.0378], + device='cuda:0'), out_proj_covar=tensor([1.1531e-04, 9.5673e-05, 8.1594e-05, 1.1055e-04, 9.2503e-05, 1.4293e-04, + 1.0128e-04, 1.0329e-04], device='cuda:0') +2023-02-06 16:19:32,924 INFO [train.py:901] (0/4) Epoch 15, batch 1750, loss[loss=0.2204, simple_loss=0.3074, pruned_loss=0.06671, over 8025.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2978, pruned_loss=0.07016, over 1614683.25 frames. ], batch size: 22, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:06,962 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.435e+02 3.025e+02 3.758e+02 7.531e+02, threshold=6.050e+02, percent-clipped=3.0 +2023-02-06 16:20:07,582 INFO [train.py:901] (0/4) Epoch 15, batch 1800, loss[loss=0.2552, simple_loss=0.3234, pruned_loss=0.0935, over 7990.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2984, pruned_loss=0.07069, over 1613572.61 frames. ], batch size: 21, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:43,782 INFO [train.py:901] (0/4) Epoch 15, batch 1850, loss[loss=0.2011, simple_loss=0.2796, pruned_loss=0.06124, over 7969.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2999, pruned_loss=0.07103, over 1616977.30 frames. ], batch size: 21, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:17,804 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.659e+02 3.189e+02 4.139e+02 1.250e+03, threshold=6.379e+02, percent-clipped=4.0 +2023-02-06 16:21:18,509 INFO [train.py:901] (0/4) Epoch 15, batch 1900, loss[loss=0.1859, simple_loss=0.2671, pruned_loss=0.05236, over 7649.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2987, pruned_loss=0.07075, over 1611669.16 frames. ], batch size: 19, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:28,838 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:38,129 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 16:21:43,682 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 16:21:46,750 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:50,088 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 16:21:53,561 INFO [train.py:901] (0/4) Epoch 15, batch 1950, loss[loss=0.2374, simple_loss=0.3218, pruned_loss=0.07648, over 8628.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2979, pruned_loss=0.07066, over 1612136.20 frames. ], batch size: 39, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:04,504 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 16:22:23,208 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 16:22:28,431 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.421e+02 3.112e+02 3.916e+02 6.433e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 16:22:29,137 INFO [train.py:901] (0/4) Epoch 15, batch 2000, loss[loss=0.2206, simple_loss=0.3042, pruned_loss=0.06848, over 8455.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2977, pruned_loss=0.06988, over 1618583.65 frames. ], batch size: 29, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:33,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0867, 1.7100, 3.6490, 1.6491, 2.4840, 3.9587, 4.0790, 3.4322], + device='cuda:0'), covar=tensor([0.0973, 0.1476, 0.0269, 0.1791, 0.0899, 0.0225, 0.0463, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0278, 0.0307, 0.0271, 0.0299, 0.0286, 0.0246, 0.0374, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:22:49,761 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:22:56,688 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6380, 1.4999, 1.6522, 1.4291, 0.8178, 1.4771, 1.6123, 1.3682], + device='cuda:0'), covar=tensor([0.0513, 0.1223, 0.1725, 0.1345, 0.0591, 0.1503, 0.0658, 0.0646], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0156, 0.0102, 0.0161, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:23:03,505 INFO [train.py:901] (0/4) Epoch 15, batch 2050, loss[loss=0.2184, simple_loss=0.3089, pruned_loss=0.06398, over 8335.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2982, pruned_loss=0.06992, over 1618343.11 frames. ], batch size: 25, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:23:18,037 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115233.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:23:39,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.382e+02 2.963e+02 3.753e+02 6.860e+02, threshold=5.925e+02, percent-clipped=2.0 +2023-02-06 16:23:39,496 INFO [train.py:901] (0/4) Epoch 15, batch 2100, loss[loss=0.263, simple_loss=0.3468, pruned_loss=0.08955, over 8510.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2997, pruned_loss=0.07049, over 1618213.05 frames. ], batch size: 28, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:13,864 INFO [train.py:901] (0/4) Epoch 15, batch 2150, loss[loss=0.2016, simple_loss=0.2913, pruned_loss=0.05595, over 8029.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.3004, pruned_loss=0.07069, over 1619342.12 frames. ], batch size: 22, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:37,886 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:24:40,824 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 16:24:42,048 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2304, 1.9712, 2.7243, 2.2288, 2.6314, 2.2075, 1.8441, 1.3413], + device='cuda:0'), covar=tensor([0.4632, 0.4432, 0.1376, 0.3009, 0.2078, 0.2534, 0.1858, 0.4626], + device='cuda:0'), in_proj_covar=tensor([0.0901, 0.0912, 0.0753, 0.0884, 0.0944, 0.0835, 0.0716, 0.0791], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:24:49,119 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.532e+02 3.093e+02 4.065e+02 1.254e+03, threshold=6.185e+02, percent-clipped=7.0 +2023-02-06 16:24:49,139 INFO [train.py:901] (0/4) Epoch 15, batch 2200, loss[loss=0.1992, simple_loss=0.2673, pruned_loss=0.06553, over 7692.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2989, pruned_loss=0.07036, over 1611626.50 frames. ], batch size: 18, lr: 5.12e-03, grad_scale: 16.0 +2023-02-06 16:24:59,764 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 16:25:07,035 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:07,716 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:24,252 INFO [train.py:901] (0/4) Epoch 15, batch 2250, loss[loss=0.2074, simple_loss=0.2987, pruned_loss=0.05805, over 8040.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2993, pruned_loss=0.07059, over 1611301.05 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:48,112 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:48,962 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:56,432 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5852, 1.9280, 2.0510, 1.2172, 2.1501, 1.4975, 0.5443, 1.8351], + device='cuda:0'), covar=tensor([0.0459, 0.0255, 0.0172, 0.0415, 0.0264, 0.0657, 0.0678, 0.0214], + device='cuda:0'), in_proj_covar=tensor([0.0420, 0.0359, 0.0307, 0.0414, 0.0343, 0.0499, 0.0373, 0.0385], + device='cuda:0'), out_proj_covar=tensor([1.1645e-04, 9.6753e-05, 8.2531e-05, 1.1242e-04, 9.3360e-05, 1.4535e-04, + 1.0333e-04, 1.0511e-04], device='cuda:0') +2023-02-06 16:25:58,291 INFO [train.py:901] (0/4) Epoch 15, batch 2300, loss[loss=0.2189, simple_loss=0.3098, pruned_loss=0.06402, over 8593.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2986, pruned_loss=0.06998, over 1614888.24 frames. ], batch size: 49, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:58,962 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 2.502e+02 3.175e+02 3.927e+02 9.067e+02, threshold=6.350e+02, percent-clipped=5.0 +2023-02-06 16:26:07,569 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:26:22,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0747, 2.7114, 3.7603, 2.0533, 1.9117, 3.7910, 0.8586, 2.1415], + device='cuda:0'), covar=tensor([0.1773, 0.1144, 0.0241, 0.2038, 0.3136, 0.0291, 0.2555, 0.1494], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0177, 0.0110, 0.0215, 0.0259, 0.0114, 0.0163, 0.0174], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 16:26:34,675 INFO [train.py:901] (0/4) Epoch 15, batch 2350, loss[loss=0.2166, simple_loss=0.3022, pruned_loss=0.06548, over 8076.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2976, pruned_loss=0.07006, over 1613931.46 frames. ], batch size: 21, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:26:37,569 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9864, 1.6412, 1.7812, 1.4950, 1.2417, 1.6054, 2.2679, 1.8146], + device='cuda:0'), covar=tensor([0.0422, 0.1276, 0.1731, 0.1495, 0.0590, 0.1590, 0.0634, 0.0642], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:27:02,843 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4294, 2.1417, 2.9251, 2.3885, 2.9090, 2.3125, 1.9521, 1.6035], + device='cuda:0'), covar=tensor([0.4689, 0.4639, 0.1481, 0.3009, 0.1926, 0.2533, 0.1770, 0.4481], + device='cuda:0'), in_proj_covar=tensor([0.0906, 0.0916, 0.0754, 0.0889, 0.0949, 0.0838, 0.0721, 0.0793], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:27:09,318 INFO [train.py:901] (0/4) Epoch 15, batch 2400, loss[loss=0.2216, simple_loss=0.2963, pruned_loss=0.07341, over 7817.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2979, pruned_loss=0.07044, over 1610745.96 frames. ], batch size: 20, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:09,502 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:10,001 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.542e+02 3.047e+02 3.524e+02 9.073e+02, threshold=6.095e+02, percent-clipped=1.0 +2023-02-06 16:27:39,716 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:45,574 INFO [train.py:901] (0/4) Epoch 15, batch 2450, loss[loss=0.2808, simple_loss=0.3284, pruned_loss=0.1166, over 7430.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2994, pruned_loss=0.07151, over 1616480.00 frames. ], batch size: 71, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:53,146 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2070, 2.0977, 1.4494, 1.8616, 1.7806, 1.1158, 1.6724, 1.7454], + device='cuda:0'), covar=tensor([0.1355, 0.0418, 0.1350, 0.0572, 0.0695, 0.1712, 0.0896, 0.0750], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0230, 0.0327, 0.0303, 0.0302, 0.0332, 0.0345, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:27:56,545 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:28:19,907 INFO [train.py:901] (0/4) Epoch 15, batch 2500, loss[loss=0.2762, simple_loss=0.3468, pruned_loss=0.1028, over 8582.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2984, pruned_loss=0.07132, over 1609679.64 frames. ], batch size: 34, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:28:20,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.367e+02 2.686e+02 3.697e+02 9.165e+02, threshold=5.372e+02, percent-clipped=5.0 +2023-02-06 16:28:52,057 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3191, 1.7495, 4.6566, 2.0816, 2.4850, 5.2091, 5.2099, 4.4698], + device='cuda:0'), covar=tensor([0.1148, 0.1694, 0.0183, 0.1764, 0.1106, 0.0138, 0.0346, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0276, 0.0306, 0.0270, 0.0297, 0.0286, 0.0247, 0.0377, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:28:55,237 INFO [train.py:901] (0/4) Epoch 15, batch 2550, loss[loss=0.1834, simple_loss=0.2679, pruned_loss=0.0495, over 8036.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2985, pruned_loss=0.07098, over 1607832.26 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:08,933 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115732.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:09,604 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:18,332 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2452, 1.9252, 2.6536, 2.1220, 2.4914, 2.1811, 1.8851, 1.2061], + device='cuda:0'), covar=tensor([0.4477, 0.4012, 0.1414, 0.3075, 0.2041, 0.2536, 0.1743, 0.4621], + device='cuda:0'), in_proj_covar=tensor([0.0905, 0.0915, 0.0755, 0.0890, 0.0946, 0.0841, 0.0720, 0.0795], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:29:30,413 INFO [train.py:901] (0/4) Epoch 15, batch 2600, loss[loss=0.2053, simple_loss=0.2878, pruned_loss=0.0614, over 7816.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2978, pruned_loss=0.07024, over 1609615.12 frames. ], batch size: 20, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:31,075 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.427e+02 3.148e+02 3.839e+02 8.607e+02, threshold=6.296e+02, percent-clipped=3.0 +2023-02-06 16:29:51,262 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6785, 2.2439, 4.2533, 1.4921, 2.9667, 2.3096, 1.6945, 3.0435], + device='cuda:0'), covar=tensor([0.1763, 0.2411, 0.0678, 0.4210, 0.1636, 0.2827, 0.2179, 0.2207], + device='cuda:0'), in_proj_covar=tensor([0.0497, 0.0551, 0.0541, 0.0602, 0.0622, 0.0566, 0.0497, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:30:00,217 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7257, 2.3381, 3.4787, 2.5539, 3.1273, 2.5247, 2.1370, 1.9760], + device='cuda:0'), covar=tensor([0.4301, 0.4651, 0.1400, 0.3138, 0.2108, 0.2424, 0.1696, 0.4740], + device='cuda:0'), in_proj_covar=tensor([0.0904, 0.0915, 0.0754, 0.0889, 0.0947, 0.0841, 0.0720, 0.0794], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:30:04,164 INFO [train.py:901] (0/4) Epoch 15, batch 2650, loss[loss=0.187, simple_loss=0.2702, pruned_loss=0.05188, over 8113.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2995, pruned_loss=0.07145, over 1613397.09 frames. ], batch size: 23, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:07,078 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1893, 1.4435, 3.3749, 1.1154, 2.9367, 2.8066, 3.0265, 2.9036], + device='cuda:0'), covar=tensor([0.0838, 0.3553, 0.0803, 0.3709, 0.1500, 0.1106, 0.0742, 0.0930], + device='cuda:0'), in_proj_covar=tensor([0.0554, 0.0611, 0.0638, 0.0582, 0.0653, 0.0561, 0.0555, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:30:08,489 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:27,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:29,375 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115847.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:30,052 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:32,824 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 16:30:39,732 INFO [train.py:901] (0/4) Epoch 15, batch 2700, loss[loss=0.233, simple_loss=0.3116, pruned_loss=0.07724, over 7193.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2996, pruned_loss=0.07145, over 1614458.37 frames. ], batch size: 72, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:40,390 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.337e+02 2.718e+02 3.606e+02 6.832e+02, threshold=5.436e+02, percent-clipped=3.0 +2023-02-06 16:31:12,006 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115910.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:31:13,862 INFO [train.py:901] (0/4) Epoch 15, batch 2750, loss[loss=0.1912, simple_loss=0.2727, pruned_loss=0.05491, over 7800.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2995, pruned_loss=0.07143, over 1616218.05 frames. ], batch size: 20, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:49,493 INFO [train.py:901] (0/4) Epoch 15, batch 2800, loss[loss=0.2091, simple_loss=0.2663, pruned_loss=0.07598, over 7542.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2998, pruned_loss=0.07153, over 1614204.14 frames. ], batch size: 18, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:50,140 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.517e+02 2.986e+02 3.677e+02 9.071e+02, threshold=5.972e+02, percent-clipped=5.0 +2023-02-06 16:32:15,112 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-116000.pt +2023-02-06 16:32:18,284 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3118, 2.1653, 1.6375, 1.9099, 1.7910, 1.3859, 1.6686, 1.7058], + device='cuda:0'), covar=tensor([0.1281, 0.0407, 0.1188, 0.0543, 0.0736, 0.1477, 0.0936, 0.0744], + device='cuda:0'), in_proj_covar=tensor([0.0346, 0.0228, 0.0323, 0.0300, 0.0298, 0.0326, 0.0341, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:32:24,947 INFO [train.py:901] (0/4) Epoch 15, batch 2850, loss[loss=0.1977, simple_loss=0.2723, pruned_loss=0.06153, over 7786.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2998, pruned_loss=0.07113, over 1615744.99 frames. ], batch size: 19, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:32:38,102 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:32:42,285 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9395, 1.7060, 2.6564, 1.5623, 2.1863, 2.8814, 2.8601, 2.5905], + device='cuda:0'), covar=tensor([0.0826, 0.1218, 0.0645, 0.1635, 0.1562, 0.0260, 0.0698, 0.0486], + device='cuda:0'), in_proj_covar=tensor([0.0276, 0.0302, 0.0268, 0.0295, 0.0285, 0.0246, 0.0373, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:33:00,841 INFO [train.py:901] (0/4) Epoch 15, batch 2900, loss[loss=0.2091, simple_loss=0.3057, pruned_loss=0.05628, over 8104.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.3003, pruned_loss=0.07131, over 1612746.85 frames. ], batch size: 23, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:01,415 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.452e+02 2.959e+02 3.782e+02 6.842e+02, threshold=5.917e+02, percent-clipped=3.0 +2023-02-06 16:33:29,127 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:29,811 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:35,327 INFO [train.py:901] (0/4) Epoch 15, batch 2950, loss[loss=0.2057, simple_loss=0.2789, pruned_loss=0.06623, over 7655.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2997, pruned_loss=0.07127, over 1611824.52 frames. ], batch size: 19, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:36,703 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 16:33:42,118 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:44,201 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9137, 1.5291, 1.6863, 1.3356, 1.0320, 1.4997, 1.7816, 1.5755], + device='cuda:0'), covar=tensor([0.0524, 0.1184, 0.1597, 0.1381, 0.0606, 0.1428, 0.0664, 0.0606], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0156, 0.0102, 0.0162, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:33:45,589 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:46,276 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:49,699 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:34:08,982 INFO [train.py:901] (0/4) Epoch 15, batch 3000, loss[loss=0.2436, simple_loss=0.3272, pruned_loss=0.07997, over 8321.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3006, pruned_loss=0.07161, over 1613811.54 frames. ], batch size: 25, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:34:08,982 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 16:34:21,679 INFO [train.py:935] (0/4) Epoch 15, validation: loss=0.1808, simple_loss=0.2809, pruned_loss=0.04034, over 944034.00 frames. +2023-02-06 16:34:21,681 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 16:34:22,354 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.534e+02 3.127e+02 3.845e+02 7.463e+02, threshold=6.253e+02, percent-clipped=8.0 +2023-02-06 16:34:57,896 INFO [train.py:901] (0/4) Epoch 15, batch 3050, loss[loss=0.2187, simple_loss=0.3075, pruned_loss=0.06491, over 8026.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2994, pruned_loss=0.07087, over 1616752.71 frames. ], batch size: 22, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:35:19,947 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 16:35:26,165 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:35:31,947 INFO [train.py:901] (0/4) Epoch 15, batch 3100, loss[loss=0.2444, simple_loss=0.313, pruned_loss=0.08789, over 8441.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2995, pruned_loss=0.0715, over 1611928.58 frames. ], batch size: 27, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:35:32,570 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.573e+02 3.095e+02 3.865e+02 1.142e+03, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 16:35:33,410 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2629, 1.2995, 1.5147, 1.2097, 0.7165, 1.2771, 1.1845, 0.9772], + device='cuda:0'), covar=tensor([0.0544, 0.1227, 0.1576, 0.1325, 0.0562, 0.1464, 0.0679, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0156, 0.0101, 0.0162, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:36:06,934 INFO [train.py:901] (0/4) Epoch 15, batch 3150, loss[loss=0.2315, simple_loss=0.3194, pruned_loss=0.07173, over 8664.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3004, pruned_loss=0.07174, over 1615416.53 frames. ], batch size: 34, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:20,545 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0698, 1.2693, 1.2085, 0.5390, 1.2350, 0.9958, 0.1358, 1.2036], + device='cuda:0'), covar=tensor([0.0339, 0.0276, 0.0232, 0.0458, 0.0333, 0.0777, 0.0625, 0.0229], + device='cuda:0'), in_proj_covar=tensor([0.0425, 0.0364, 0.0313, 0.0420, 0.0351, 0.0507, 0.0376, 0.0387], + device='cuda:0'), out_proj_covar=tensor([1.1755e-04, 9.7834e-05, 8.3975e-05, 1.1396e-04, 9.5599e-05, 1.4763e-04, + 1.0402e-04, 1.0548e-04], device='cuda:0') +2023-02-06 16:36:27,181 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:30,924 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 16:36:34,415 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 16:36:41,981 INFO [train.py:901] (0/4) Epoch 15, batch 3200, loss[loss=0.2132, simple_loss=0.2975, pruned_loss=0.06444, over 7426.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3017, pruned_loss=0.07276, over 1613714.39 frames. ], batch size: 17, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:43,354 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.524e+02 3.304e+02 3.942e+02 1.206e+03, threshold=6.608e+02, percent-clipped=2.0 +2023-02-06 16:36:46,721 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116369.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:36:46,960 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 16:36:51,221 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:16,512 INFO [train.py:901] (0/4) Epoch 15, batch 3250, loss[loss=0.2297, simple_loss=0.3032, pruned_loss=0.07808, over 8327.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3006, pruned_loss=0.07223, over 1610502.71 frames. ], batch size: 26, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:52,486 INFO [train.py:901] (0/4) Epoch 15, batch 3300, loss[loss=0.2373, simple_loss=0.3176, pruned_loss=0.07851, over 8583.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2994, pruned_loss=0.07177, over 1612193.96 frames. ], batch size: 31, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:53,156 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.388e+02 2.875e+02 3.716e+02 9.209e+02, threshold=5.750e+02, percent-clipped=3.0 +2023-02-06 16:37:53,278 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:55,220 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:00,666 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2394, 1.5229, 2.3655, 1.2385, 2.2099, 2.5220, 2.6455, 2.1350], + device='cuda:0'), covar=tensor([0.1158, 0.1179, 0.0444, 0.1932, 0.0663, 0.0399, 0.0708, 0.0721], + device='cuda:0'), in_proj_covar=tensor([0.0277, 0.0305, 0.0270, 0.0299, 0.0287, 0.0248, 0.0376, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:38:02,542 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:12,013 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:26,452 INFO [train.py:901] (0/4) Epoch 15, batch 3350, loss[loss=0.242, simple_loss=0.3413, pruned_loss=0.0714, over 8599.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3007, pruned_loss=0.07209, over 1617721.05 frames. ], batch size: 39, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:38:33,237 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:45,607 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3708, 1.4959, 1.3316, 1.7705, 0.7031, 1.2482, 1.2872, 1.4498], + device='cuda:0'), covar=tensor([0.0876, 0.0722, 0.1068, 0.0523, 0.1088, 0.1358, 0.0790, 0.0747], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0200, 0.0247, 0.0210, 0.0209, 0.0246, 0.0253, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 16:39:02,058 INFO [train.py:901] (0/4) Epoch 15, batch 3400, loss[loss=0.2931, simple_loss=0.3546, pruned_loss=0.1159, over 7180.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3001, pruned_loss=0.07206, over 1619573.04 frames. ], batch size: 71, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:02,719 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.566e+02 3.149e+02 4.104e+02 8.501e+02, threshold=6.298e+02, percent-clipped=7.0 +2023-02-06 16:39:06,250 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0272, 2.4608, 3.5948, 2.0552, 1.7486, 3.6319, 0.7470, 2.0618], + device='cuda:0'), covar=tensor([0.1574, 0.1338, 0.0220, 0.2126, 0.3676, 0.0279, 0.2825, 0.1787], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0178, 0.0110, 0.0214, 0.0258, 0.0114, 0.0162, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 16:39:14,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:15,885 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 16:39:22,184 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:36,208 INFO [train.py:901] (0/4) Epoch 15, batch 3450, loss[loss=0.2574, simple_loss=0.3274, pruned_loss=0.09364, over 8573.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2991, pruned_loss=0.07123, over 1619883.29 frames. ], batch size: 49, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:44,410 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116625.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:39:51,707 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:01,115 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116650.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:40:10,183 INFO [train.py:901] (0/4) Epoch 15, batch 3500, loss[loss=0.2438, simple_loss=0.3256, pruned_loss=0.081, over 8432.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2998, pruned_loss=0.07188, over 1617661.86 frames. ], batch size: 27, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:40:10,857 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.398e+02 2.936e+02 3.935e+02 9.560e+02, threshold=5.871e+02, percent-clipped=3.0 +2023-02-06 16:40:22,993 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 16:40:26,403 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116685.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:35,613 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 16:40:44,884 INFO [train.py:901] (0/4) Epoch 15, batch 3550, loss[loss=0.1867, simple_loss=0.2796, pruned_loss=0.04695, over 7797.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2993, pruned_loss=0.0719, over 1612493.06 frames. ], batch size: 19, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:40:46,337 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6632, 1.4458, 1.5471, 1.2426, 0.8681, 1.2932, 1.4568, 1.3224], + device='cuda:0'), covar=tensor([0.0526, 0.1270, 0.1719, 0.1447, 0.0588, 0.1566, 0.0724, 0.0637], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0101, 0.0162, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 16:40:50,941 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:08,597 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:19,416 INFO [train.py:901] (0/4) Epoch 15, batch 3600, loss[loss=0.2597, simple_loss=0.3419, pruned_loss=0.08879, over 8717.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2998, pruned_loss=0.07176, over 1617860.32 frames. ], batch size: 30, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:41:20,117 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.627e+02 3.005e+02 3.918e+02 8.490e+02, threshold=6.010e+02, percent-clipped=4.0 +2023-02-06 16:41:25,808 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:47,436 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:52,898 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:56,185 INFO [train.py:901] (0/4) Epoch 15, batch 3650, loss[loss=0.254, simple_loss=0.3455, pruned_loss=0.08127, over 8617.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2992, pruned_loss=0.0709, over 1614878.24 frames. ], batch size: 49, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:00,889 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:08,523 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 16:42:13,571 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6202, 2.8953, 2.5225, 4.0075, 1.6136, 2.1117, 2.3664, 3.1073], + device='cuda:0'), covar=tensor([0.0639, 0.0830, 0.0816, 0.0253, 0.1175, 0.1297, 0.1062, 0.0773], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0203, 0.0251, 0.0212, 0.0211, 0.0249, 0.0255, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 16:42:13,596 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:21,036 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:24,469 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5001, 2.5923, 1.7991, 2.1869, 2.1046, 1.4942, 1.8547, 2.0793], + device='cuda:0'), covar=tensor([0.1392, 0.0340, 0.1101, 0.0620, 0.0695, 0.1501, 0.0987, 0.0828], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0232, 0.0327, 0.0305, 0.0302, 0.0333, 0.0348, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:42:30,281 INFO [train.py:901] (0/4) Epoch 15, batch 3700, loss[loss=0.2621, simple_loss=0.3411, pruned_loss=0.09159, over 8343.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.301, pruned_loss=0.07224, over 1612894.43 frames. ], batch size: 25, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:30,476 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,964 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.301e+02 2.797e+02 3.414e+02 8.630e+02, threshold=5.595e+02, percent-clipped=3.0 +2023-02-06 16:42:33,136 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:36,571 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 16:42:38,067 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:06,653 INFO [train.py:901] (0/4) Epoch 15, batch 3750, loss[loss=0.1891, simple_loss=0.2748, pruned_loss=0.05166, over 7807.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2996, pruned_loss=0.07118, over 1613948.00 frames. ], batch size: 20, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:13,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:14,654 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 16:43:40,817 INFO [train.py:901] (0/4) Epoch 15, batch 3800, loss[loss=0.2665, simple_loss=0.3474, pruned_loss=0.09274, over 8509.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2991, pruned_loss=0.07093, over 1612185.01 frames. ], batch size: 26, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:41,467 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.512e+02 2.989e+02 3.697e+02 7.171e+02, threshold=5.977e+02, percent-clipped=7.0 +2023-02-06 16:43:52,408 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116980.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:53,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:15,603 INFO [train.py:901] (0/4) Epoch 15, batch 3850, loss[loss=0.2203, simple_loss=0.3054, pruned_loss=0.0676, over 8470.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.297, pruned_loss=0.07016, over 1609026.79 frames. ], batch size: 29, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:26,332 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 16:44:42,571 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 16:44:46,230 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:50,982 INFO [train.py:901] (0/4) Epoch 15, batch 3900, loss[loss=0.241, simple_loss=0.321, pruned_loss=0.08054, over 8439.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2966, pruned_loss=0.06991, over 1608228.37 frames. ], batch size: 27, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:51,616 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 2.428e+02 3.027e+02 3.797e+02 6.654e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 16:44:53,026 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:03,680 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:12,944 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:24,919 INFO [train.py:901] (0/4) Epoch 15, batch 3950, loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06656, over 8364.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2978, pruned_loss=0.07094, over 1610617.85 frames. ], batch size: 24, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:46:01,088 INFO [train.py:901] (0/4) Epoch 15, batch 4000, loss[loss=0.211, simple_loss=0.2912, pruned_loss=0.06539, over 7649.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2972, pruned_loss=0.07055, over 1611954.78 frames. ], batch size: 19, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:01,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.473e+02 2.992e+02 3.534e+02 5.115e+02, threshold=5.984e+02, percent-clipped=0.0 +2023-02-06 16:46:01,867 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117164.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:12,553 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:13,875 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:29,831 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:35,577 INFO [train.py:901] (0/4) Epoch 15, batch 4050, loss[loss=0.2376, simple_loss=0.3104, pruned_loss=0.08241, over 8238.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2988, pruned_loss=0.07165, over 1611851.75 frames. ], batch size: 24, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:47,321 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 16:46:47,801 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8845, 1.7172, 3.5002, 1.5524, 2.3137, 3.8386, 3.9594, 3.2249], + device='cuda:0'), covar=tensor([0.1236, 0.1643, 0.0332, 0.2114, 0.1110, 0.0251, 0.0532, 0.0619], + device='cuda:0'), in_proj_covar=tensor([0.0278, 0.0306, 0.0270, 0.0302, 0.0287, 0.0248, 0.0377, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:46:53,286 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:53,873 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:11,628 INFO [train.py:901] (0/4) Epoch 15, batch 4100, loss[loss=0.2087, simple_loss=0.2759, pruned_loss=0.0707, over 7911.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2995, pruned_loss=0.07186, over 1612016.16 frames. ], batch size: 20, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:47:11,833 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:12,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.506e+02 3.096e+02 3.742e+02 9.544e+02, threshold=6.191e+02, percent-clipped=4.0 +2023-02-06 16:47:22,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:46,643 INFO [train.py:901] (0/4) Epoch 15, batch 4150, loss[loss=0.1975, simple_loss=0.2779, pruned_loss=0.05853, over 7947.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2973, pruned_loss=0.07071, over 1607597.51 frames. ], batch size: 20, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:10,069 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:12,983 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:21,503 INFO [train.py:901] (0/4) Epoch 15, batch 4200, loss[loss=0.2068, simple_loss=0.2978, pruned_loss=0.05795, over 8246.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2987, pruned_loss=0.07098, over 1613810.04 frames. ], batch size: 24, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:22,822 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.404e+02 2.907e+02 3.383e+02 1.073e+03, threshold=5.814e+02, percent-clipped=1.0 +2023-02-06 16:48:31,967 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:40,581 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 16:48:57,043 INFO [train.py:901] (0/4) Epoch 15, batch 4250, loss[loss=0.2074, simple_loss=0.2994, pruned_loss=0.05773, over 8466.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2986, pruned_loss=0.07083, over 1613114.12 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:03,728 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 16:49:14,096 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:17,734 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 16:49:30,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:31,425 INFO [train.py:901] (0/4) Epoch 15, batch 4300, loss[loss=0.2141, simple_loss=0.3004, pruned_loss=0.06393, over 8292.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2982, pruned_loss=0.07069, over 1612217.80 frames. ], batch size: 23, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:32,091 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.479e+02 3.115e+02 3.892e+02 7.815e+02, threshold=6.229e+02, percent-clipped=5.0 +2023-02-06 16:49:59,158 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 16:50:05,915 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 16:50:07,601 INFO [train.py:901] (0/4) Epoch 15, batch 4350, loss[loss=0.2231, simple_loss=0.2966, pruned_loss=0.07474, over 7977.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2993, pruned_loss=0.07098, over 1616030.70 frames. ], batch size: 21, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:22,377 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7337, 2.4211, 3.4735, 2.8159, 3.1926, 2.6696, 2.2157, 2.0030], + device='cuda:0'), covar=tensor([0.4237, 0.4581, 0.1327, 0.2854, 0.1961, 0.2355, 0.1736, 0.4616], + device='cuda:0'), in_proj_covar=tensor([0.0908, 0.0915, 0.0752, 0.0880, 0.0954, 0.0839, 0.0717, 0.0790], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:50:23,043 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117535.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:36,326 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 16:50:40,548 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:42,371 INFO [train.py:901] (0/4) Epoch 15, batch 4400, loss[loss=0.2437, simple_loss=0.3235, pruned_loss=0.08195, over 8316.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3004, pruned_loss=0.07195, over 1619038.74 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:43,039 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.383e+02 3.124e+02 3.901e+02 9.506e+02, threshold=6.248e+02, percent-clipped=7.0 +2023-02-06 16:50:55,784 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:51:16,413 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 16:51:17,967 INFO [train.py:901] (0/4) Epoch 15, batch 4450, loss[loss=0.2546, simple_loss=0.3239, pruned_loss=0.09263, over 8029.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3005, pruned_loss=0.07233, over 1612276.04 frames. ], batch size: 22, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:17,986 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 16:51:48,982 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5274, 1.8557, 2.0175, 1.1895, 2.0992, 1.3986, 0.5267, 1.7814], + device='cuda:0'), covar=tensor([0.0506, 0.0281, 0.0220, 0.0443, 0.0280, 0.0742, 0.0705, 0.0240], + device='cuda:0'), in_proj_covar=tensor([0.0415, 0.0358, 0.0309, 0.0412, 0.0343, 0.0503, 0.0372, 0.0378], + device='cuda:0'), out_proj_covar=tensor([1.1465e-04, 9.6427e-05, 8.2773e-05, 1.1158e-04, 9.3190e-05, 1.4649e-04, + 1.0254e-04, 1.0265e-04], device='cuda:0') +2023-02-06 16:51:52,100 INFO [train.py:901] (0/4) Epoch 15, batch 4500, loss[loss=0.1742, simple_loss=0.2594, pruned_loss=0.04446, over 7807.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2999, pruned_loss=0.07149, over 1616815.86 frames. ], batch size: 19, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:52,735 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.480e+02 2.963e+02 4.043e+02 1.091e+03, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 16:52:11,212 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:11,853 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 16:52:16,197 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:27,013 INFO [train.py:901] (0/4) Epoch 15, batch 4550, loss[loss=0.2113, simple_loss=0.2992, pruned_loss=0.06171, over 8350.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.298, pruned_loss=0.07061, over 1609876.17 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:52:31,115 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9039, 5.9828, 5.1946, 2.4033, 5.3038, 5.6312, 5.5410, 5.4137], + device='cuda:0'), covar=tensor([0.0535, 0.0401, 0.0881, 0.4902, 0.0691, 0.0859, 0.1181, 0.0688], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0401, 0.0403, 0.0501, 0.0394, 0.0403, 0.0386, 0.0348], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:53:02,105 INFO [train.py:901] (0/4) Epoch 15, batch 4600, loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05788, over 7655.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2973, pruned_loss=0.07028, over 1610651.31 frames. ], batch size: 19, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:03,474 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.311e+02 2.848e+02 3.671e+02 5.923e+02, threshold=5.697e+02, percent-clipped=0.0 +2023-02-06 16:53:31,590 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7878, 5.8222, 5.2317, 2.5000, 5.1599, 5.5399, 5.3724, 5.3427], + device='cuda:0'), covar=tensor([0.0520, 0.0416, 0.0970, 0.4663, 0.0715, 0.0723, 0.1186, 0.0513], + device='cuda:0'), in_proj_covar=tensor([0.0487, 0.0404, 0.0408, 0.0506, 0.0398, 0.0407, 0.0389, 0.0351], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 16:53:31,670 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:53:36,043 INFO [train.py:901] (0/4) Epoch 15, batch 4650, loss[loss=0.214, simple_loss=0.2941, pruned_loss=0.06692, over 8497.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2975, pruned_loss=0.07015, over 1607873.48 frames. ], batch size: 26, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:54:11,643 INFO [train.py:901] (0/4) Epoch 15, batch 4700, loss[loss=0.2233, simple_loss=0.3, pruned_loss=0.07332, over 8092.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2969, pruned_loss=0.06997, over 1611315.82 frames. ], batch size: 21, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:54:12,892 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.509e+02 3.109e+02 4.231e+02 8.316e+02, threshold=6.217e+02, percent-clipped=12.0 +2023-02-06 16:54:46,542 INFO [train.py:901] (0/4) Epoch 15, batch 4750, loss[loss=0.2207, simple_loss=0.3006, pruned_loss=0.07043, over 8130.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2987, pruned_loss=0.07117, over 1608474.60 frames. ], batch size: 22, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:11,957 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 16:55:15,287 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 16:55:16,043 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:22,520 INFO [train.py:901] (0/4) Epoch 15, batch 4800, loss[loss=0.2585, simple_loss=0.3419, pruned_loss=0.08753, over 8633.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2989, pruned_loss=0.07117, over 1613541.84 frames. ], batch size: 31, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:23,941 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.482e+02 3.121e+02 4.555e+02 1.692e+03, threshold=6.242e+02, percent-clipped=8.0 +2023-02-06 16:55:33,833 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:47,559 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-118000.pt +2023-02-06 16:55:57,738 INFO [train.py:901] (0/4) Epoch 15, batch 4850, loss[loss=0.2157, simple_loss=0.299, pruned_loss=0.06618, over 8026.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3, pruned_loss=0.07186, over 1615572.38 frames. ], batch size: 22, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:07,046 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 16:56:29,208 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:31,984 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:32,449 INFO [train.py:901] (0/4) Epoch 15, batch 4900, loss[loss=0.2098, simple_loss=0.2812, pruned_loss=0.06923, over 7683.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3006, pruned_loss=0.07259, over 1618646.68 frames. ], batch size: 18, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:33,728 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.453e+02 2.951e+02 3.688e+02 9.605e+02, threshold=5.903e+02, percent-clipped=5.0 +2023-02-06 16:56:50,265 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:57:03,285 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.38 vs. limit=5.0 +2023-02-06 16:57:04,828 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.12 vs. limit=5.0 +2023-02-06 16:57:07,608 INFO [train.py:901] (0/4) Epoch 15, batch 4950, loss[loss=0.1959, simple_loss=0.2716, pruned_loss=0.06012, over 8245.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2996, pruned_loss=0.07169, over 1619934.91 frames. ], batch size: 22, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:38,290 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9188, 1.7431, 3.3605, 1.5395, 2.4171, 3.7934, 3.8034, 3.1746], + device='cuda:0'), covar=tensor([0.1197, 0.1577, 0.0363, 0.2070, 0.0957, 0.0230, 0.0498, 0.0620], + device='cuda:0'), in_proj_covar=tensor([0.0274, 0.0304, 0.0268, 0.0299, 0.0287, 0.0246, 0.0373, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 16:57:42,115 INFO [train.py:901] (0/4) Epoch 15, batch 5000, loss[loss=0.2084, simple_loss=0.2963, pruned_loss=0.0602, over 8256.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2996, pruned_loss=0.07131, over 1619407.57 frames. ], batch size: 24, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:43,373 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.421e+02 2.910e+02 3.813e+02 6.624e+02, threshold=5.820e+02, percent-clipped=4.0 +2023-02-06 16:57:58,591 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:58:17,614 INFO [train.py:901] (0/4) Epoch 15, batch 5050, loss[loss=0.2187, simple_loss=0.2972, pruned_loss=0.07009, over 7931.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2993, pruned_loss=0.07144, over 1613323.54 frames. ], batch size: 20, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:22,822 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 16:58:43,432 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 16:58:52,555 INFO [train.py:901] (0/4) Epoch 15, batch 5100, loss[loss=0.1907, simple_loss=0.2725, pruned_loss=0.05439, over 7976.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2999, pruned_loss=0.07198, over 1612318.61 frames. ], batch size: 21, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:53,829 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.591e+02 3.125e+02 3.877e+02 7.785e+02, threshold=6.249e+02, percent-clipped=4.0 +2023-02-06 16:59:08,396 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:18,594 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 16:59:23,821 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:27,363 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2248, 2.0559, 2.8538, 2.2625, 2.6621, 2.2246, 1.9151, 1.5063], + device='cuda:0'), covar=tensor([0.4859, 0.4685, 0.1609, 0.3291, 0.2356, 0.2763, 0.1915, 0.4994], + device='cuda:0'), in_proj_covar=tensor([0.0910, 0.0915, 0.0754, 0.0885, 0.0955, 0.0838, 0.0717, 0.0795], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 16:59:27,775 INFO [train.py:901] (0/4) Epoch 15, batch 5150, loss[loss=0.2512, simple_loss=0.3267, pruned_loss=0.08791, over 8501.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2997, pruned_loss=0.07177, over 1610412.57 frames. ], batch size: 28, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:59:51,072 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:51,389 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 17:00:02,420 INFO [train.py:901] (0/4) Epoch 15, batch 5200, loss[loss=0.2025, simple_loss=0.2906, pruned_loss=0.05717, over 8245.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3003, pruned_loss=0.0723, over 1614489.98 frames. ], batch size: 22, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:03,701 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.269e+02 2.811e+02 3.673e+02 9.088e+02, threshold=5.623e+02, percent-clipped=2.0 +2023-02-06 17:00:29,681 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:37,923 INFO [train.py:901] (0/4) Epoch 15, batch 5250, loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.06787, over 8480.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.299, pruned_loss=0.07109, over 1613843.78 frames. ], batch size: 26, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:46,152 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 17:01:12,971 INFO [train.py:901] (0/4) Epoch 15, batch 5300, loss[loss=0.2172, simple_loss=0.2937, pruned_loss=0.0704, over 8619.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2989, pruned_loss=0.07109, over 1615688.76 frames. ], batch size: 34, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:14,347 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.534e+02 2.995e+02 3.765e+02 8.916e+02, threshold=5.991e+02, percent-clipped=4.0 +2023-02-06 17:01:47,925 INFO [train.py:901] (0/4) Epoch 15, batch 5350, loss[loss=0.2222, simple_loss=0.3088, pruned_loss=0.06783, over 8330.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2988, pruned_loss=0.07109, over 1613421.39 frames. ], batch size: 25, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:50,867 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:01,049 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:15,660 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9905, 2.3835, 3.5932, 1.8624, 1.6184, 3.5588, 0.7158, 1.9939], + device='cuda:0'), covar=tensor([0.1741, 0.1441, 0.0212, 0.2141, 0.3496, 0.0322, 0.2909, 0.1740], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0177, 0.0111, 0.0212, 0.0256, 0.0115, 0.0160, 0.0174], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:02:24,465 INFO [train.py:901] (0/4) Epoch 15, batch 5400, loss[loss=0.1888, simple_loss=0.2724, pruned_loss=0.0526, over 8481.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2995, pruned_loss=0.07137, over 1613588.78 frames. ], batch size: 25, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:02:25,794 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.478e+02 2.903e+02 3.717e+02 8.291e+02, threshold=5.806e+02, percent-clipped=5.0 +2023-02-06 17:02:58,969 INFO [train.py:901] (0/4) Epoch 15, batch 5450, loss[loss=0.2161, simple_loss=0.2932, pruned_loss=0.06944, over 8440.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3003, pruned_loss=0.07163, over 1617999.93 frames. ], batch size: 49, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:11,222 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118631.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:21,575 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:22,326 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3595, 1.4758, 1.4966, 1.1212, 1.5570, 1.2022, 0.6191, 1.4208], + device='cuda:0'), covar=tensor([0.0360, 0.0235, 0.0169, 0.0340, 0.0259, 0.0500, 0.0506, 0.0185], + device='cuda:0'), in_proj_covar=tensor([0.0419, 0.0361, 0.0310, 0.0415, 0.0346, 0.0508, 0.0373, 0.0383], + device='cuda:0'), out_proj_covar=tensor([1.1577e-04, 9.7042e-05, 8.3039e-05, 1.1209e-04, 9.3657e-05, 1.4806e-04, + 1.0268e-04, 1.0398e-04], device='cuda:0') +2023-02-06 17:03:24,936 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:26,158 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:34,864 INFO [train.py:901] (0/4) Epoch 15, batch 5500, loss[loss=0.1659, simple_loss=0.2438, pruned_loss=0.04403, over 7546.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2982, pruned_loss=0.07048, over 1611709.44 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:36,239 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.592e+02 3.113e+02 3.610e+02 8.755e+02, threshold=6.227e+02, percent-clipped=2.0 +2023-02-06 17:03:38,415 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 17:03:54,399 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:09,094 INFO [train.py:901] (0/4) Epoch 15, batch 5550, loss[loss=0.1701, simple_loss=0.2598, pruned_loss=0.04022, over 7650.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2968, pruned_loss=0.0699, over 1607514.52 frames. ], batch size: 19, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:32,278 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:44,981 INFO [train.py:901] (0/4) Epoch 15, batch 5600, loss[loss=0.22, simple_loss=0.2994, pruned_loss=0.07031, over 8318.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2982, pruned_loss=0.07038, over 1610932.37 frames. ], batch size: 25, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:46,299 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.537e+02 3.218e+02 3.925e+02 9.216e+02, threshold=6.435e+02, percent-clipped=4.0 +2023-02-06 17:04:47,197 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:52,580 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:55,257 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9354, 2.4534, 3.4375, 1.7983, 1.7385, 3.3989, 0.6345, 1.9550], + device='cuda:0'), covar=tensor([0.1665, 0.1290, 0.0304, 0.2249, 0.3344, 0.0297, 0.3190, 0.1790], + device='cuda:0'), in_proj_covar=tensor([0.0171, 0.0177, 0.0111, 0.0211, 0.0255, 0.0115, 0.0160, 0.0173], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:05:09,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:09,671 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7926, 5.8535, 5.1769, 2.3841, 5.2183, 5.5607, 5.4700, 5.2475], + device='cuda:0'), covar=tensor([0.0584, 0.0434, 0.0969, 0.4718, 0.0661, 0.0766, 0.1016, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0491, 0.0406, 0.0412, 0.0510, 0.0401, 0.0414, 0.0391, 0.0357], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:05:14,505 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:19,155 INFO [train.py:901] (0/4) Epoch 15, batch 5650, loss[loss=0.2521, simple_loss=0.3331, pruned_loss=0.08558, over 8517.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2994, pruned_loss=0.07127, over 1616730.53 frames. ], batch size: 28, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:43,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 17:05:53,492 INFO [train.py:901] (0/4) Epoch 15, batch 5700, loss[loss=0.2066, simple_loss=0.2805, pruned_loss=0.06635, over 7540.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2996, pruned_loss=0.07153, over 1612561.45 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:54,817 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.491e+02 2.972e+02 3.726e+02 7.690e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 17:06:14,682 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 17:06:21,156 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:29,110 INFO [train.py:901] (0/4) Epoch 15, batch 5750, loss[loss=0.1931, simple_loss=0.2854, pruned_loss=0.05034, over 8668.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.301, pruned_loss=0.07217, over 1611112.98 frames. ], batch size: 34, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:06:38,206 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:46,380 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 17:07:04,205 INFO [train.py:901] (0/4) Epoch 15, batch 5800, loss[loss=0.2257, simple_loss=0.3096, pruned_loss=0.07089, over 8456.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2999, pruned_loss=0.07131, over 1612267.33 frames. ], batch size: 27, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:07:05,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.317e+02 2.944e+02 4.100e+02 6.996e+02, threshold=5.887e+02, percent-clipped=4.0 +2023-02-06 17:07:26,177 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:32,094 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:39,884 INFO [train.py:901] (0/4) Epoch 15, batch 5850, loss[loss=0.2018, simple_loss=0.2677, pruned_loss=0.06796, over 7285.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2994, pruned_loss=0.07154, over 1612024.30 frames. ], batch size: 16, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:07:46,174 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:49,451 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:02,794 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:13,672 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:14,146 INFO [train.py:901] (0/4) Epoch 15, batch 5900, loss[loss=0.1894, simple_loss=0.275, pruned_loss=0.05185, over 8125.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3001, pruned_loss=0.07128, over 1614921.88 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:08:15,366 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.486e+02 2.938e+02 3.942e+02 7.909e+02, threshold=5.877e+02, percent-clipped=6.0 +2023-02-06 17:08:30,146 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:34,134 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:44,788 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:45,485 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4936, 1.8200, 1.8923, 0.9435, 1.9392, 1.3767, 0.4182, 1.6554], + device='cuda:0'), covar=tensor([0.0443, 0.0283, 0.0225, 0.0502, 0.0294, 0.0730, 0.0678, 0.0254], + device='cuda:0'), in_proj_covar=tensor([0.0416, 0.0359, 0.0309, 0.0415, 0.0344, 0.0502, 0.0373, 0.0382], + device='cuda:0'), out_proj_covar=tensor([1.1483e-04, 9.6403e-05, 8.2617e-05, 1.1202e-04, 9.3162e-05, 1.4612e-04, + 1.0260e-04, 1.0383e-04], device='cuda:0') +2023-02-06 17:08:46,716 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119111.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:47,875 INFO [train.py:901] (0/4) Epoch 15, batch 5950, loss[loss=0.2048, simple_loss=0.2885, pruned_loss=0.06058, over 8250.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2999, pruned_loss=0.07113, over 1609890.61 frames. ], batch size: 24, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,897 INFO [train.py:901] (0/4) Epoch 15, batch 6000, loss[loss=0.2312, simple_loss=0.2956, pruned_loss=0.08347, over 7533.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2986, pruned_loss=0.07115, over 1606334.40 frames. ], batch size: 18, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,898 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 17:09:35,270 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1214, 1.8245, 2.4887, 2.0388, 2.3368, 2.1153, 1.8011, 1.1771], + device='cuda:0'), covar=tensor([0.4791, 0.4637, 0.1476, 0.3103, 0.2172, 0.2774, 0.1928, 0.4737], + device='cuda:0'), in_proj_covar=tensor([0.0914, 0.0917, 0.0752, 0.0883, 0.0953, 0.0839, 0.0715, 0.0792], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:09:35,680 INFO [train.py:935] (0/4) Epoch 15, validation: loss=0.181, simple_loss=0.2808, pruned_loss=0.04056, over 944034.00 frames. +2023-02-06 17:09:35,681 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 17:09:37,104 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.578e+02 3.120e+02 3.956e+02 1.218e+03, threshold=6.240e+02, percent-clipped=5.0 +2023-02-06 17:09:43,578 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 17:09:50,216 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4373, 1.3351, 4.6758, 1.7576, 4.0698, 3.9052, 4.2198, 4.0681], + device='cuda:0'), covar=tensor([0.0668, 0.4574, 0.0444, 0.3724, 0.1079, 0.0834, 0.0568, 0.0675], + device='cuda:0'), in_proj_covar=tensor([0.0559, 0.0607, 0.0636, 0.0578, 0.0652, 0.0557, 0.0556, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:10:10,487 INFO [train.py:901] (0/4) Epoch 15, batch 6050, loss[loss=0.1879, simple_loss=0.2744, pruned_loss=0.05075, over 8032.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3007, pruned_loss=0.07163, over 1616999.02 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:44,323 INFO [train.py:901] (0/4) Epoch 15, batch 6100, loss[loss=0.2984, simple_loss=0.3459, pruned_loss=0.1254, over 6561.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3009, pruned_loss=0.0718, over 1612521.93 frames. ], batch size: 71, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:45,658 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.463e+02 3.114e+02 4.132e+02 8.492e+02, threshold=6.229e+02, percent-clipped=7.0 +2023-02-06 17:11:03,647 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 17:11:18,259 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 17:11:20,324 INFO [train.py:901] (0/4) Epoch 15, batch 6150, loss[loss=0.2859, simple_loss=0.3477, pruned_loss=0.112, over 6963.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3004, pruned_loss=0.07144, over 1612666.26 frames. ], batch size: 71, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:20,502 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1845, 1.8088, 2.0668, 1.9518, 1.1490, 1.9164, 2.3780, 2.2223], + device='cuda:0'), covar=tensor([0.0400, 0.1144, 0.1553, 0.1220, 0.0607, 0.1328, 0.0605, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0162, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 17:11:54,706 INFO [train.py:901] (0/4) Epoch 15, batch 6200, loss[loss=0.218, simple_loss=0.2854, pruned_loss=0.07529, over 7938.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3, pruned_loss=0.07131, over 1616890.52 frames. ], batch size: 20, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:55,624 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:11:56,081 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.347e+02 3.204e+02 3.871e+02 7.576e+02, threshold=6.408e+02, percent-clipped=2.0 +2023-02-06 17:12:14,452 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:20,618 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0445, 1.2287, 1.1888, 0.6190, 1.2016, 1.0390, 0.0858, 1.1897], + device='cuda:0'), covar=tensor([0.0339, 0.0300, 0.0275, 0.0458, 0.0323, 0.0788, 0.0652, 0.0262], + device='cuda:0'), in_proj_covar=tensor([0.0423, 0.0364, 0.0315, 0.0422, 0.0350, 0.0512, 0.0378, 0.0391], + device='cuda:0'), out_proj_covar=tensor([1.1685e-04, 9.7884e-05, 8.4289e-05, 1.1401e-04, 9.4832e-05, 1.4925e-04, + 1.0413e-04, 1.0619e-04], device='cuda:0') +2023-02-06 17:12:24,803 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4512, 2.7333, 1.8234, 2.2318, 2.2795, 1.5887, 2.1006, 1.9897], + device='cuda:0'), covar=tensor([0.1406, 0.0299, 0.1084, 0.0612, 0.0631, 0.1340, 0.1031, 0.0867], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0231, 0.0327, 0.0302, 0.0303, 0.0332, 0.0348, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:12:30,385 INFO [train.py:901] (0/4) Epoch 15, batch 6250, loss[loss=0.2453, simple_loss=0.3178, pruned_loss=0.08643, over 8123.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2998, pruned_loss=0.07115, over 1618504.44 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:12:44,678 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8219, 1.5763, 3.1089, 1.4893, 2.3043, 3.3443, 3.4745, 2.6923], + device='cuda:0'), covar=tensor([0.1124, 0.1643, 0.0380, 0.2068, 0.0927, 0.0298, 0.0524, 0.0779], + device='cuda:0'), in_proj_covar=tensor([0.0276, 0.0305, 0.0269, 0.0298, 0.0285, 0.0248, 0.0374, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:12:47,158 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:59,456 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:13:04,851 INFO [train.py:901] (0/4) Epoch 15, batch 6300, loss[loss=0.2109, simple_loss=0.2891, pruned_loss=0.06636, over 8243.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.299, pruned_loss=0.0709, over 1618851.24 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:13:06,142 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.517e+02 3.087e+02 3.932e+02 1.134e+03, threshold=6.173e+02, percent-clipped=3.0 +2023-02-06 17:13:19,849 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6754, 2.2500, 3.3548, 2.5465, 3.1130, 2.6176, 2.2245, 1.7076], + device='cuda:0'), covar=tensor([0.4436, 0.4842, 0.1559, 0.3240, 0.2321, 0.2429, 0.1684, 0.5412], + device='cuda:0'), in_proj_covar=tensor([0.0907, 0.0914, 0.0751, 0.0880, 0.0951, 0.0838, 0.0712, 0.0790], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:13:31,640 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0927, 1.2380, 1.2228, 0.7268, 1.2145, 0.9768, 0.2237, 1.1650], + device='cuda:0'), covar=tensor([0.0294, 0.0286, 0.0266, 0.0405, 0.0337, 0.0772, 0.0601, 0.0234], + device='cuda:0'), in_proj_covar=tensor([0.0418, 0.0361, 0.0313, 0.0419, 0.0347, 0.0506, 0.0375, 0.0386], + device='cuda:0'), out_proj_covar=tensor([1.1514e-04, 9.6950e-05, 8.3743e-05, 1.1325e-04, 9.3858e-05, 1.4731e-04, + 1.0318e-04, 1.0472e-04], device='cuda:0') +2023-02-06 17:13:41,043 INFO [train.py:901] (0/4) Epoch 15, batch 6350, loss[loss=0.2347, simple_loss=0.2922, pruned_loss=0.08857, over 7432.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2991, pruned_loss=0.07141, over 1612360.54 frames. ], batch size: 17, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:13:53,784 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:07,836 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119552.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:15,092 INFO [train.py:901] (0/4) Epoch 15, batch 6400, loss[loss=0.193, simple_loss=0.2892, pruned_loss=0.0484, over 8104.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2991, pruned_loss=0.0713, over 1611708.89 frames. ], batch size: 23, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:14:16,455 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 3.023e+02 3.752e+02 7.818e+02, threshold=6.047e+02, percent-clipped=4.0 +2023-02-06 17:14:20,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119570.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:41,458 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4201, 1.6438, 1.6902, 0.9120, 1.7132, 1.3068, 0.2555, 1.5866], + device='cuda:0'), covar=tensor([0.0333, 0.0246, 0.0245, 0.0395, 0.0278, 0.0733, 0.0648, 0.0188], + device='cuda:0'), in_proj_covar=tensor([0.0420, 0.0362, 0.0314, 0.0421, 0.0348, 0.0507, 0.0376, 0.0388], + device='cuda:0'), out_proj_covar=tensor([1.1572e-04, 9.7260e-05, 8.4003e-05, 1.1390e-04, 9.4212e-05, 1.4753e-04, + 1.0361e-04, 1.0534e-04], device='cuda:0') +2023-02-06 17:14:49,984 INFO [train.py:901] (0/4) Epoch 15, batch 6450, loss[loss=0.1823, simple_loss=0.2677, pruned_loss=0.0485, over 7808.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2991, pruned_loss=0.07129, over 1612517.64 frames. ], batch size: 19, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:24,240 INFO [train.py:901] (0/4) Epoch 15, batch 6500, loss[loss=0.2331, simple_loss=0.3063, pruned_loss=0.07994, over 8086.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2988, pruned_loss=0.07116, over 1611042.30 frames. ], batch size: 21, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:25,570 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.561e+02 2.888e+02 3.578e+02 6.995e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-06 17:15:38,586 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:15:58,716 INFO [train.py:901] (0/4) Epoch 15, batch 6550, loss[loss=0.2226, simple_loss=0.3002, pruned_loss=0.07246, over 8535.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2991, pruned_loss=0.0707, over 1614762.97 frames. ], batch size: 39, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:14,596 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1970, 1.9199, 2.6296, 2.1935, 2.3539, 2.2573, 1.8394, 1.2093], + device='cuda:0'), covar=tensor([0.4443, 0.4040, 0.1447, 0.2824, 0.2101, 0.2229, 0.1699, 0.4341], + device='cuda:0'), in_proj_covar=tensor([0.0909, 0.0918, 0.0756, 0.0881, 0.0953, 0.0841, 0.0716, 0.0794], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:16:29,721 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 17:16:34,447 INFO [train.py:901] (0/4) Epoch 15, batch 6600, loss[loss=0.2534, simple_loss=0.332, pruned_loss=0.08738, over 8459.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2995, pruned_loss=0.07086, over 1608441.93 frames. ], batch size: 27, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:35,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.456e+02 2.938e+02 3.854e+02 9.901e+02, threshold=5.877e+02, percent-clipped=5.0 +2023-02-06 17:16:48,573 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 17:17:05,449 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:08,621 INFO [train.py:901] (0/4) Epoch 15, batch 6650, loss[loss=0.2201, simple_loss=0.2968, pruned_loss=0.07171, over 7659.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2997, pruned_loss=0.07124, over 1608445.50 frames. ], batch size: 19, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:17,667 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119826.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:22,300 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:36,358 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:44,390 INFO [train.py:901] (0/4) Epoch 15, batch 6700, loss[loss=0.2079, simple_loss=0.2891, pruned_loss=0.06336, over 8290.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2982, pruned_loss=0.07066, over 1607314.92 frames. ], batch size: 23, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:45,744 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.601e+02 2.951e+02 3.516e+02 8.618e+02, threshold=5.902e+02, percent-clipped=2.0 +2023-02-06 17:17:46,267 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 17:17:48,208 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 17:17:53,411 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:18:19,558 INFO [train.py:901] (0/4) Epoch 15, batch 6750, loss[loss=0.2128, simple_loss=0.2976, pruned_loss=0.06403, over 8334.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2966, pruned_loss=0.06989, over 1605457.60 frames. ], batch size: 26, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:18:55,246 INFO [train.py:901] (0/4) Epoch 15, batch 6800, loss[loss=0.2127, simple_loss=0.2949, pruned_loss=0.06521, over 8345.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2972, pruned_loss=0.06957, over 1611359.03 frames. ], batch size: 26, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:18:57,360 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.559e+02 3.032e+02 3.835e+02 7.300e+02, threshold=6.064e+02, percent-clipped=2.0 +2023-02-06 17:19:03,578 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 17:19:15,432 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:21,553 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-120000.pt +2023-02-06 17:19:32,107 INFO [train.py:901] (0/4) Epoch 15, batch 6850, loss[loss=0.339, simple_loss=0.3796, pruned_loss=0.1492, over 7059.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2973, pruned_loss=0.06947, over 1610968.62 frames. ], batch size: 73, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:19:33,898 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 17:19:41,642 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=120027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:46,726 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2558, 1.9665, 2.6921, 2.2388, 2.6007, 2.2042, 1.8274, 1.4106], + device='cuda:0'), covar=tensor([0.4775, 0.4364, 0.1509, 0.2956, 0.2043, 0.2514, 0.1785, 0.4400], + device='cuda:0'), in_proj_covar=tensor([0.0911, 0.0921, 0.0757, 0.0881, 0.0952, 0.0843, 0.0718, 0.0796], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:19:53,401 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 17:20:06,209 INFO [train.py:901] (0/4) Epoch 15, batch 6900, loss[loss=0.2107, simple_loss=0.2915, pruned_loss=0.06492, over 7199.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2972, pruned_loss=0.06952, over 1611507.73 frames. ], batch size: 16, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:20:07,530 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.397e+02 2.973e+02 3.506e+02 9.980e+02, threshold=5.947e+02, percent-clipped=2.0 +2023-02-06 17:20:23,943 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0027, 1.3518, 1.6023, 1.2194, 0.9167, 1.4375, 1.5317, 1.4447], + device='cuda:0'), covar=tensor([0.0478, 0.1269, 0.1737, 0.1482, 0.0631, 0.1488, 0.0743, 0.0635], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0156, 0.0100, 0.0163, 0.0113, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 17:20:42,260 INFO [train.py:901] (0/4) Epoch 15, batch 6950, loss[loss=0.213, simple_loss=0.3044, pruned_loss=0.06081, over 8335.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2973, pruned_loss=0.06937, over 1613528.33 frames. ], batch size: 25, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:02,380 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:21:03,577 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 17:21:11,749 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9938, 1.6676, 1.8379, 1.6229, 1.0726, 1.6523, 2.3554, 2.1642], + device='cuda:0'), covar=tensor([0.0404, 0.1282, 0.1703, 0.1389, 0.0609, 0.1546, 0.0600, 0.0558], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 17:21:11,999 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-06 17:21:16,260 INFO [train.py:901] (0/4) Epoch 15, batch 7000, loss[loss=0.2021, simple_loss=0.2785, pruned_loss=0.06286, over 7547.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2969, pruned_loss=0.06916, over 1612649.35 frames. ], batch size: 18, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:17,612 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.303e+02 2.879e+02 3.620e+02 6.461e+02, threshold=5.757e+02, percent-clipped=3.0 +2023-02-06 17:21:22,337 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4404, 4.3388, 3.9310, 2.0128, 3.9478, 3.9541, 4.0647, 3.7054], + device='cuda:0'), covar=tensor([0.0696, 0.0510, 0.0896, 0.4494, 0.0813, 0.1067, 0.1124, 0.0998], + device='cuda:0'), in_proj_covar=tensor([0.0497, 0.0409, 0.0417, 0.0513, 0.0408, 0.0415, 0.0397, 0.0361], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:21:51,897 INFO [train.py:901] (0/4) Epoch 15, batch 7050, loss[loss=0.2232, simple_loss=0.2811, pruned_loss=0.08262, over 7706.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2966, pruned_loss=0.06936, over 1609648.84 frames. ], batch size: 18, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:15,018 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:26,166 INFO [train.py:901] (0/4) Epoch 15, batch 7100, loss[loss=0.2259, simple_loss=0.3036, pruned_loss=0.07406, over 8098.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.296, pruned_loss=0.06884, over 1610670.56 frames. ], batch size: 23, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:27,488 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.428e+02 3.078e+02 4.147e+02 9.225e+02, threshold=6.156e+02, percent-clipped=10.0 +2023-02-06 17:22:32,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:35,797 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4735, 1.9832, 3.3072, 1.3303, 2.3614, 1.9169, 1.5980, 2.2914], + device='cuda:0'), covar=tensor([0.1783, 0.2099, 0.0743, 0.4065, 0.1602, 0.2810, 0.1949, 0.2195], + device='cuda:0'), in_proj_covar=tensor([0.0500, 0.0555, 0.0538, 0.0606, 0.0628, 0.0569, 0.0498, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:22:58,518 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 17:23:00,845 INFO [train.py:901] (0/4) Epoch 15, batch 7150, loss[loss=0.2064, simple_loss=0.2881, pruned_loss=0.06238, over 8299.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2952, pruned_loss=0.06843, over 1611101.15 frames. ], batch size: 48, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:35,467 INFO [train.py:901] (0/4) Epoch 15, batch 7200, loss[loss=0.239, simple_loss=0.3222, pruned_loss=0.07789, over 8353.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2958, pruned_loss=0.06907, over 1609841.11 frames. ], batch size: 24, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:36,812 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.418e+02 2.853e+02 3.692e+02 6.645e+02, threshold=5.707e+02, percent-clipped=2.0 +2023-02-06 17:23:49,344 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6281, 4.6683, 4.0880, 1.8974, 4.1337, 4.1933, 4.2429, 3.9713], + device='cuda:0'), covar=tensor([0.0710, 0.0555, 0.1174, 0.5454, 0.0875, 0.1095, 0.1313, 0.0955], + device='cuda:0'), in_proj_covar=tensor([0.0498, 0.0415, 0.0418, 0.0518, 0.0410, 0.0420, 0.0403, 0.0364], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:24:00,219 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:10,194 INFO [train.py:901] (0/4) Epoch 15, batch 7250, loss[loss=0.241, simple_loss=0.3176, pruned_loss=0.08225, over 8639.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2942, pruned_loss=0.06811, over 1604836.51 frames. ], batch size: 34, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:24:17,873 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:45,987 INFO [train.py:901] (0/4) Epoch 15, batch 7300, loss[loss=0.2265, simple_loss=0.3026, pruned_loss=0.07518, over 8713.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2958, pruned_loss=0.06916, over 1605002.36 frames. ], batch size: 34, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:24:47,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.423e+02 2.925e+02 3.483e+02 5.889e+02, threshold=5.849e+02, percent-clipped=3.0 +2023-02-06 17:25:20,537 INFO [train.py:901] (0/4) Epoch 15, batch 7350, loss[loss=0.164, simple_loss=0.2478, pruned_loss=0.04008, over 7202.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2966, pruned_loss=0.06937, over 1607857.78 frames. ], batch size: 16, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:45,388 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 17:25:55,000 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9143, 1.3112, 1.5604, 1.1643, 0.8263, 1.3578, 1.5085, 1.2897], + device='cuda:0'), covar=tensor([0.0498, 0.1324, 0.1778, 0.1534, 0.0634, 0.1557, 0.0757, 0.0714], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0100, 0.0162, 0.0113, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 17:25:56,239 INFO [train.py:901] (0/4) Epoch 15, batch 7400, loss[loss=0.2028, simple_loss=0.2917, pruned_loss=0.05701, over 8317.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2968, pruned_loss=0.06956, over 1608228.91 frames. ], batch size: 25, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:57,543 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.487e+02 3.190e+02 4.160e+02 9.613e+02, threshold=6.380e+02, percent-clipped=9.0 +2023-02-06 17:26:04,618 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 17:26:30,892 INFO [train.py:901] (0/4) Epoch 15, batch 7450, loss[loss=0.1743, simple_loss=0.2618, pruned_loss=0.04338, over 8093.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2989, pruned_loss=0.07084, over 1609751.10 frames. ], batch size: 21, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:26:42,795 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 17:27:06,396 INFO [train.py:901] (0/4) Epoch 15, batch 7500, loss[loss=0.2228, simple_loss=0.3084, pruned_loss=0.06866, over 8337.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2991, pruned_loss=0.07054, over 1615141.29 frames. ], batch size: 26, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:27:07,304 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2495, 2.5006, 3.0272, 1.5478, 3.2018, 1.7365, 1.4362, 2.1872], + device='cuda:0'), covar=tensor([0.0721, 0.0340, 0.0248, 0.0692, 0.0474, 0.0776, 0.0900, 0.0413], + device='cuda:0'), in_proj_covar=tensor([0.0418, 0.0362, 0.0312, 0.0416, 0.0347, 0.0503, 0.0369, 0.0384], + device='cuda:0'), out_proj_covar=tensor([1.1535e-04, 9.7416e-05, 8.3435e-05, 1.1223e-04, 9.3515e-05, 1.4610e-04, + 1.0167e-04, 1.0432e-04], device='cuda:0') +2023-02-06 17:27:07,754 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.388e+02 2.853e+02 3.831e+02 7.536e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 17:27:19,906 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9433, 1.6366, 2.2550, 1.8541, 2.1027, 1.9124, 1.6162, 1.0479], + device='cuda:0'), covar=tensor([0.4326, 0.3728, 0.1272, 0.2504, 0.1813, 0.2295, 0.1713, 0.3831], + device='cuda:0'), in_proj_covar=tensor([0.0909, 0.0918, 0.0755, 0.0881, 0.0951, 0.0841, 0.0717, 0.0793], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:27:27,351 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=120694.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:27:30,018 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8173, 1.2724, 3.9527, 1.3583, 3.4951, 3.2755, 3.5670, 3.4568], + device='cuda:0'), covar=tensor([0.0583, 0.4594, 0.0612, 0.4059, 0.1128, 0.0984, 0.0637, 0.0741], + device='cuda:0'), in_proj_covar=tensor([0.0564, 0.0616, 0.0639, 0.0590, 0.0662, 0.0568, 0.0560, 0.0621], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:27:40,240 INFO [train.py:901] (0/4) Epoch 15, batch 7550, loss[loss=0.2283, simple_loss=0.3073, pruned_loss=0.07461, over 7666.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2987, pruned_loss=0.07032, over 1617649.65 frames. ], batch size: 19, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:14,839 INFO [train.py:901] (0/4) Epoch 15, batch 7600, loss[loss=0.2198, simple_loss=0.303, pruned_loss=0.06825, over 8138.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2981, pruned_loss=0.07036, over 1616282.36 frames. ], batch size: 22, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:16,206 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.426e+02 3.048e+02 3.965e+02 8.844e+02, threshold=6.096e+02, percent-clipped=6.0 +2023-02-06 17:28:22,209 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 17:28:46,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1819, 1.0535, 1.2612, 1.0858, 0.9084, 1.2959, 0.0414, 0.9344], + device='cuda:0'), covar=tensor([0.1758, 0.1590, 0.0519, 0.0964, 0.3260, 0.0558, 0.2494, 0.1403], + device='cuda:0'), in_proj_covar=tensor([0.0172, 0.0179, 0.0111, 0.0214, 0.0257, 0.0115, 0.0162, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:28:50,145 INFO [train.py:901] (0/4) Epoch 15, batch 7650, loss[loss=0.2072, simple_loss=0.2922, pruned_loss=0.06113, over 8294.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2985, pruned_loss=0.07066, over 1617165.55 frames. ], batch size: 23, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:23,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6444, 1.5071, 3.0722, 1.2131, 2.1323, 3.3763, 3.4378, 2.9010], + device='cuda:0'), covar=tensor([0.1268, 0.1767, 0.0406, 0.2308, 0.1113, 0.0265, 0.0632, 0.0596], + device='cuda:0'), in_proj_covar=tensor([0.0279, 0.0308, 0.0272, 0.0301, 0.0287, 0.0249, 0.0377, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:29:25,336 INFO [train.py:901] (0/4) Epoch 15, batch 7700, loss[loss=0.2735, simple_loss=0.345, pruned_loss=0.101, over 8475.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.298, pruned_loss=0.07074, over 1612082.51 frames. ], batch size: 25, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:27,396 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.503e+02 3.087e+02 4.175e+02 9.539e+02, threshold=6.174e+02, percent-clipped=7.0 +2023-02-06 17:29:35,157 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0304, 2.6953, 3.6937, 1.7898, 1.7975, 3.6356, 0.8148, 2.0880], + device='cuda:0'), covar=tensor([0.1946, 0.1305, 0.0292, 0.2205, 0.3522, 0.0304, 0.2590, 0.1835], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0181, 0.0112, 0.0215, 0.0259, 0.0116, 0.0162, 0.0177], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:29:52,766 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 17:30:01,558 INFO [train.py:901] (0/4) Epoch 15, batch 7750, loss[loss=0.2145, simple_loss=0.3017, pruned_loss=0.06371, over 8197.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2978, pruned_loss=0.07051, over 1609347.20 frames. ], batch size: 23, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:30:36,084 INFO [train.py:901] (0/4) Epoch 15, batch 7800, loss[loss=0.2325, simple_loss=0.3332, pruned_loss=0.06595, over 8279.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2972, pruned_loss=0.07016, over 1607872.13 frames. ], batch size: 23, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:30:38,106 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 2.376e+02 2.783e+02 3.266e+02 5.993e+02, threshold=5.565e+02, percent-clipped=0.0 +2023-02-06 17:31:09,475 INFO [train.py:901] (0/4) Epoch 15, batch 7850, loss[loss=0.2099, simple_loss=0.2978, pruned_loss=0.061, over 8553.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2987, pruned_loss=0.07123, over 1609392.29 frames. ], batch size: 49, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:14,868 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:26,046 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121038.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:42,564 INFO [train.py:901] (0/4) Epoch 15, batch 7900, loss[loss=0.2245, simple_loss=0.3091, pruned_loss=0.06989, over 8332.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2985, pruned_loss=0.07054, over 1614873.46 frames. ], batch size: 25, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:44,516 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 3.139e+02 4.114e+02 1.036e+03, threshold=6.279e+02, percent-clipped=8.0 +2023-02-06 17:32:15,970 INFO [train.py:901] (0/4) Epoch 15, batch 7950, loss[loss=0.2373, simple_loss=0.3205, pruned_loss=0.07703, over 8457.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2995, pruned_loss=0.07065, over 1618225.71 frames. ], batch size: 25, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:42,027 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:32:48,354 INFO [train.py:901] (0/4) Epoch 15, batch 8000, loss[loss=0.2351, simple_loss=0.317, pruned_loss=0.07662, over 8511.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3005, pruned_loss=0.07167, over 1618465.26 frames. ], batch size: 28, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:50,387 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.992e+02 3.696e+02 7.694e+02, threshold=5.984e+02, percent-clipped=2.0 +2023-02-06 17:33:01,506 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:33:22,841 INFO [train.py:901] (0/4) Epoch 15, batch 8050, loss[loss=0.1895, simple_loss=0.2661, pruned_loss=0.05644, over 7522.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2989, pruned_loss=0.07151, over 1599696.96 frames. ], batch size: 18, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:33:46,150 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-15.pt +2023-02-06 17:33:57,581 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 17:34:00,918 INFO [train.py:901] (0/4) Epoch 16, batch 0, loss[loss=0.2319, simple_loss=0.3179, pruned_loss=0.07293, over 8515.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3179, pruned_loss=0.07293, over 8515.00 frames. ], batch size: 28, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:34:00,919 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 17:34:11,306 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5537, 1.5936, 2.7108, 1.2640, 2.0183, 2.8857, 3.0769, 2.4349], + device='cuda:0'), covar=tensor([0.1385, 0.1671, 0.0440, 0.2557, 0.0941, 0.0394, 0.0527, 0.0856], + device='cuda:0'), in_proj_covar=tensor([0.0280, 0.0309, 0.0271, 0.0301, 0.0288, 0.0248, 0.0377, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:34:11,910 INFO [train.py:935] (0/4) Epoch 16, validation: loss=0.1795, simple_loss=0.2801, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 17:34:11,911 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 17:34:24,910 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.543e+02 3.194e+02 4.084e+02 8.334e+02, threshold=6.389e+02, percent-clipped=7.0 +2023-02-06 17:34:26,234 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 17:34:47,413 INFO [train.py:901] (0/4) Epoch 16, batch 50, loss[loss=0.2304, simple_loss=0.3232, pruned_loss=0.06878, over 8463.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.06775, over 361350.08 frames. ], batch size: 25, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:02,273 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 17:35:02,487 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4507, 2.8246, 3.4042, 1.8475, 3.4849, 2.2340, 1.6268, 2.2199], + device='cuda:0'), covar=tensor([0.0695, 0.0334, 0.0168, 0.0586, 0.0348, 0.0569, 0.0740, 0.0407], + device='cuda:0'), in_proj_covar=tensor([0.0417, 0.0359, 0.0311, 0.0416, 0.0347, 0.0506, 0.0368, 0.0381], + device='cuda:0'), out_proj_covar=tensor([1.1499e-04, 9.6322e-05, 8.3087e-05, 1.1218e-04, 9.3558e-05, 1.4700e-04, + 1.0127e-04, 1.0336e-04], device='cuda:0') +2023-02-06 17:35:09,786 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121329.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:35:20,723 INFO [train.py:901] (0/4) Epoch 16, batch 100, loss[loss=0.2039, simple_loss=0.2929, pruned_loss=0.05743, over 8090.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.3, pruned_loss=0.07083, over 638816.16 frames. ], batch size: 21, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:24,727 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 17:35:33,276 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:35:33,867 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.470e+02 2.913e+02 3.674e+02 6.203e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 17:35:42,089 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4043, 2.0283, 2.8034, 2.2044, 2.6743, 2.3195, 2.0680, 1.4485], + device='cuda:0'), covar=tensor([0.4476, 0.4258, 0.1482, 0.3589, 0.2354, 0.2508, 0.1695, 0.4683], + device='cuda:0'), in_proj_covar=tensor([0.0906, 0.0914, 0.0751, 0.0883, 0.0946, 0.0837, 0.0717, 0.0793], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:35:53,848 INFO [train.py:901] (0/4) Epoch 16, batch 150, loss[loss=0.2509, simple_loss=0.3223, pruned_loss=0.08982, over 8590.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3002, pruned_loss=0.07152, over 856532.45 frames. ], batch size: 31, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:36:04,295 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:15,647 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:21,671 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:30,075 INFO [train.py:901] (0/4) Epoch 16, batch 200, loss[loss=0.1758, simple_loss=0.247, pruned_loss=0.05231, over 7404.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3009, pruned_loss=0.07254, over 1022029.11 frames. ], batch size: 17, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:36:43,673 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.385e+02 2.940e+02 3.661e+02 7.455e+02, threshold=5.881e+02, percent-clipped=4.0 +2023-02-06 17:36:46,560 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6446, 1.9188, 2.0827, 1.4042, 2.1879, 1.5448, 0.5543, 1.8716], + device='cuda:0'), covar=tensor([0.0478, 0.0282, 0.0187, 0.0388, 0.0294, 0.0655, 0.0670, 0.0219], + device='cuda:0'), in_proj_covar=tensor([0.0415, 0.0359, 0.0310, 0.0415, 0.0346, 0.0504, 0.0367, 0.0380], + device='cuda:0'), out_proj_covar=tensor([1.1452e-04, 9.6256e-05, 8.2914e-05, 1.1203e-04, 9.3313e-05, 1.4621e-04, + 1.0085e-04, 1.0301e-04], device='cuda:0') +2023-02-06 17:36:51,926 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7148, 5.7860, 5.1142, 2.1942, 5.1172, 5.4380, 5.3785, 5.2022], + device='cuda:0'), covar=tensor([0.0591, 0.0423, 0.0848, 0.4711, 0.0713, 0.0678, 0.1029, 0.0646], + device='cuda:0'), in_proj_covar=tensor([0.0488, 0.0404, 0.0409, 0.0512, 0.0401, 0.0407, 0.0392, 0.0355], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:36:53,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:04,017 INFO [train.py:901] (0/4) Epoch 16, batch 250, loss[loss=0.2211, simple_loss=0.3051, pruned_loss=0.06851, over 8514.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3001, pruned_loss=0.07175, over 1150620.35 frames. ], batch size: 39, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:18,656 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 17:37:24,805 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:28,152 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 17:37:30,229 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3923, 4.3772, 3.9741, 1.9577, 3.9360, 3.8672, 3.9270, 3.7169], + device='cuda:0'), covar=tensor([0.0733, 0.0563, 0.0957, 0.4496, 0.0821, 0.1148, 0.1277, 0.0956], + device='cuda:0'), in_proj_covar=tensor([0.0493, 0.0409, 0.0412, 0.0516, 0.0404, 0.0411, 0.0398, 0.0358], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:37:39,709 INFO [train.py:901] (0/4) Epoch 16, batch 300, loss[loss=0.2191, simple_loss=0.2981, pruned_loss=0.07011, over 8577.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2997, pruned_loss=0.07137, over 1255926.41 frames. ], batch size: 39, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:50,101 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7517, 1.6157, 2.3651, 1.6366, 1.2619, 2.4131, 0.2837, 1.3375], + device='cuda:0'), covar=tensor([0.1993, 0.1603, 0.0370, 0.1640, 0.3550, 0.0447, 0.3109, 0.1773], + device='cuda:0'), in_proj_covar=tensor([0.0174, 0.0180, 0.0111, 0.0214, 0.0256, 0.0115, 0.0162, 0.0177], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:37:54,075 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.529e+02 3.079e+02 3.820e+02 7.739e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-06 17:38:14,590 INFO [train.py:901] (0/4) Epoch 16, batch 350, loss[loss=0.2139, simple_loss=0.2772, pruned_loss=0.0753, over 7786.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2994, pruned_loss=0.07126, over 1332449.97 frames. ], batch size: 19, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:38:45,986 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:38:49,787 INFO [train.py:901] (0/4) Epoch 16, batch 400, loss[loss=0.2203, simple_loss=0.3023, pruned_loss=0.06916, over 8504.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3, pruned_loss=0.07175, over 1396656.80 frames. ], batch size: 26, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:04,288 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.467e+02 3.087e+02 3.761e+02 6.357e+02, threshold=6.175e+02, percent-clipped=1.0 +2023-02-06 17:39:09,177 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121673.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:39:25,134 INFO [train.py:901] (0/4) Epoch 16, batch 450, loss[loss=0.2388, simple_loss=0.3324, pruned_loss=0.07259, over 8460.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.301, pruned_loss=0.07141, over 1447582.57 frames. ], batch size: 27, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:52,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:39:59,040 INFO [train.py:901] (0/4) Epoch 16, batch 500, loss[loss=0.235, simple_loss=0.312, pruned_loss=0.07906, over 8100.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3013, pruned_loss=0.07189, over 1485022.81 frames. ], batch size: 23, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:40:00,112 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.39 vs. limit=5.0 +2023-02-06 17:40:10,960 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:14,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.435e+02 2.838e+02 3.555e+02 6.989e+02, threshold=5.677e+02, percent-clipped=1.0 +2023-02-06 17:40:17,014 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:29,902 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121788.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:40:35,814 INFO [train.py:901] (0/4) Epoch 16, batch 550, loss[loss=0.2389, simple_loss=0.319, pruned_loss=0.07936, over 8334.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3009, pruned_loss=0.07138, over 1512640.63 frames. ], batch size: 26, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:02,682 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0241, 1.5392, 3.4373, 1.4517, 2.3245, 3.7796, 3.8938, 3.2391], + device='cuda:0'), covar=tensor([0.1113, 0.1798, 0.0302, 0.2130, 0.1073, 0.0225, 0.0367, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0281, 0.0310, 0.0273, 0.0301, 0.0292, 0.0248, 0.0381, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:41:09,179 INFO [train.py:901] (0/4) Epoch 16, batch 600, loss[loss=0.1837, simple_loss=0.2663, pruned_loss=0.05053, over 8224.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2998, pruned_loss=0.07099, over 1535264.28 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:15,570 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 17:41:22,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.425e+02 3.086e+02 4.175e+02 1.417e+03, threshold=6.173e+02, percent-clipped=9.0 +2023-02-06 17:41:26,598 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 17:41:32,816 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6739, 2.2304, 4.2757, 1.4942, 2.9973, 2.2262, 1.7156, 2.9005], + device='cuda:0'), covar=tensor([0.1815, 0.2598, 0.0682, 0.4215, 0.1674, 0.2991, 0.2138, 0.2376], + device='cuda:0'), in_proj_covar=tensor([0.0504, 0.0556, 0.0538, 0.0608, 0.0629, 0.0576, 0.0501, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:41:36,811 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:44,746 INFO [train.py:901] (0/4) Epoch 16, batch 650, loss[loss=0.227, simple_loss=0.3023, pruned_loss=0.07586, over 8335.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2987, pruned_loss=0.07048, over 1549211.09 frames. ], batch size: 26, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:45,640 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:46,947 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6289, 2.1379, 3.4091, 1.4015, 2.4042, 1.9613, 1.7804, 2.2099], + device='cuda:0'), covar=tensor([0.1635, 0.1994, 0.0666, 0.3947, 0.1623, 0.2836, 0.1821, 0.2265], + device='cuda:0'), in_proj_covar=tensor([0.0504, 0.0555, 0.0538, 0.0608, 0.0629, 0.0575, 0.0500, 0.0621], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:42:02,840 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:03,506 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0156, 2.2797, 1.8336, 2.7313, 1.3619, 1.6323, 1.8956, 2.2428], + device='cuda:0'), covar=tensor([0.0737, 0.0764, 0.0952, 0.0377, 0.1137, 0.1326, 0.0941, 0.0776], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0204, 0.0251, 0.0214, 0.0215, 0.0251, 0.0256, 0.0216], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 17:42:18,672 INFO [train.py:901] (0/4) Epoch 16, batch 700, loss[loss=0.2169, simple_loss=0.3086, pruned_loss=0.06264, over 8258.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2976, pruned_loss=0.06954, over 1558923.19 frames. ], batch size: 24, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:42:32,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.496e+02 2.978e+02 3.542e+02 1.118e+03, threshold=5.957e+02, percent-clipped=1.0 +2023-02-06 17:42:33,590 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:53,714 INFO [train.py:901] (0/4) Epoch 16, batch 750, loss[loss=0.2327, simple_loss=0.3054, pruned_loss=0.07997, over 8474.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2973, pruned_loss=0.06938, over 1570236.11 frames. ], batch size: 29, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:42:56,595 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-122000.pt +2023-02-06 17:43:02,798 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7232, 1.4951, 2.8164, 1.3233, 2.0619, 3.0301, 3.1816, 2.5757], + device='cuda:0'), covar=tensor([0.1067, 0.1502, 0.0370, 0.2085, 0.0952, 0.0293, 0.0644, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0280, 0.0308, 0.0272, 0.0299, 0.0291, 0.0248, 0.0379, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 17:43:14,285 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 17:43:23,876 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 17:43:28,681 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122044.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:43:29,777 INFO [train.py:901] (0/4) Epoch 16, batch 800, loss[loss=0.2002, simple_loss=0.2768, pruned_loss=0.06182, over 7817.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2986, pruned_loss=0.07013, over 1581664.45 frames. ], batch size: 20, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:43,080 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.422e+02 2.925e+02 3.576e+02 6.712e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-06 17:43:45,459 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122069.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:44:03,130 INFO [train.py:901] (0/4) Epoch 16, batch 850, loss[loss=0.2173, simple_loss=0.2986, pruned_loss=0.06801, over 8306.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2981, pruned_loss=0.06933, over 1595186.80 frames. ], batch size: 23, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:31,619 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:34,936 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:38,755 INFO [train.py:901] (0/4) Epoch 16, batch 900, loss[loss=0.2122, simple_loss=0.2939, pruned_loss=0.06528, over 8138.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2991, pruned_loss=0.06984, over 1600963.30 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:52,346 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:52,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.482e+02 3.085e+02 4.013e+02 7.148e+02, threshold=6.170e+02, percent-clipped=4.0 +2023-02-06 17:45:12,889 INFO [train.py:901] (0/4) Epoch 16, batch 950, loss[loss=0.2632, simple_loss=0.3148, pruned_loss=0.1058, over 5947.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3, pruned_loss=0.07104, over 1600676.60 frames. ], batch size: 13, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:45:24,847 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:45:40,115 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 17:45:49,037 INFO [train.py:901] (0/4) Epoch 16, batch 1000, loss[loss=0.2343, simple_loss=0.3113, pruned_loss=0.07864, over 8197.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2987, pruned_loss=0.07013, over 1603702.22 frames. ], batch size: 23, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:03,407 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.462e+02 3.004e+02 3.600e+02 8.525e+02, threshold=6.009e+02, percent-clipped=4.0 +2023-02-06 17:46:14,173 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 17:46:23,697 INFO [train.py:901] (0/4) Epoch 16, batch 1050, loss[loss=0.2286, simple_loss=0.3127, pruned_loss=0.07227, over 8361.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2993, pruned_loss=0.07062, over 1609348.27 frames. ], batch size: 24, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:26,430 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 17:46:32,121 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6844, 1.6333, 2.0824, 1.5515, 1.2096, 2.0929, 0.1828, 1.2292], + device='cuda:0'), covar=tensor([0.2021, 0.1348, 0.0460, 0.1293, 0.3350, 0.0466, 0.2770, 0.1714], + device='cuda:0'), in_proj_covar=tensor([0.0175, 0.0180, 0.0112, 0.0211, 0.0255, 0.0116, 0.0161, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:46:34,622 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:40,811 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3300, 1.5377, 2.0370, 1.2432, 1.3850, 1.5876, 1.4336, 1.4268], + device='cuda:0'), covar=tensor([0.1767, 0.2318, 0.0898, 0.3996, 0.1754, 0.2954, 0.2010, 0.2096], + device='cuda:0'), in_proj_covar=tensor([0.0503, 0.0555, 0.0538, 0.0605, 0.0625, 0.0569, 0.0499, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:46:51,297 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:57,875 INFO [train.py:901] (0/4) Epoch 16, batch 1100, loss[loss=0.1954, simple_loss=0.2868, pruned_loss=0.05202, over 8474.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2982, pruned_loss=0.06996, over 1612292.86 frames. ], batch size: 25, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:04,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 17:47:12,623 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.699e+02 3.204e+02 3.982e+02 8.590e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 17:47:33,544 INFO [train.py:901] (0/4) Epoch 16, batch 1150, loss[loss=0.2314, simple_loss=0.3142, pruned_loss=0.07435, over 8469.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2985, pruned_loss=0.06987, over 1617763.03 frames. ], batch size: 27, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:38,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 17:47:54,847 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:47:58,515 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 17:48:07,557 INFO [train.py:901] (0/4) Epoch 16, batch 1200, loss[loss=0.2147, simple_loss=0.2903, pruned_loss=0.06959, over 7920.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2982, pruned_loss=0.06986, over 1616324.22 frames. ], batch size: 20, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:48:21,994 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.417e+02 3.007e+02 3.779e+02 1.089e+03, threshold=6.013e+02, percent-clipped=2.0 +2023-02-06 17:48:31,793 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:36,701 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:43,463 INFO [train.py:901] (0/4) Epoch 16, batch 1250, loss[loss=0.213, simple_loss=0.2864, pruned_loss=0.06977, over 8459.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.297, pruned_loss=0.06911, over 1620096.16 frames. ], batch size: 25, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:19,094 INFO [train.py:901] (0/4) Epoch 16, batch 1300, loss[loss=0.2187, simple_loss=0.2991, pruned_loss=0.06918, over 8297.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2975, pruned_loss=0.06858, over 1623337.50 frames. ], batch size: 23, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:26,978 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:33,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 3.105e+02 3.703e+02 6.719e+02, threshold=6.210e+02, percent-clipped=4.0 +2023-02-06 17:49:52,150 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4991, 2.6126, 1.7409, 2.2194, 2.0983, 1.3609, 1.9797, 2.0571], + device='cuda:0'), covar=tensor([0.1642, 0.0437, 0.1303, 0.0664, 0.0766, 0.1598, 0.1126, 0.1070], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0237, 0.0331, 0.0306, 0.0304, 0.0331, 0.0346, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:49:53,484 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4191, 2.3201, 4.5526, 1.9857, 2.6854, 5.1829, 5.2990, 4.4866], + device='cuda:0'), covar=tensor([0.1119, 0.1275, 0.0268, 0.1843, 0.1031, 0.0192, 0.0414, 0.0565], + device='cuda:0'), in_proj_covar=tensor([0.0281, 0.0309, 0.0273, 0.0302, 0.0295, 0.0249, 0.0381, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 17:49:54,923 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:56,067 INFO [train.py:901] (0/4) Epoch 16, batch 1350, loss[loss=0.2395, simple_loss=0.3212, pruned_loss=0.07888, over 8110.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2973, pruned_loss=0.06888, over 1616419.58 frames. ], batch size: 23, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:25,522 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9119, 2.1733, 1.7342, 2.5549, 1.2044, 1.5243, 1.7094, 2.0116], + device='cuda:0'), covar=tensor([0.0702, 0.0673, 0.0904, 0.0345, 0.1098, 0.1303, 0.0949, 0.0763], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0202, 0.0247, 0.0212, 0.0211, 0.0249, 0.0254, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 17:50:31,463 INFO [train.py:901] (0/4) Epoch 16, batch 1400, loss[loss=0.2138, simple_loss=0.3029, pruned_loss=0.06235, over 8339.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2983, pruned_loss=0.06893, over 1621473.55 frames. ], batch size: 26, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:45,927 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.319e+02 2.799e+02 3.491e+02 7.123e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 17:50:49,404 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:55,471 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:56,957 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:05,780 INFO [train.py:901] (0/4) Epoch 16, batch 1450, loss[loss=0.1809, simple_loss=0.2566, pruned_loss=0.05258, over 7700.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2958, pruned_loss=0.06814, over 1619977.53 frames. ], batch size: 18, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:12,710 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 17:51:15,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:26,183 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-06 17:51:26,747 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3557, 1.7425, 2.6968, 1.2301, 1.7289, 1.6943, 1.4889, 1.8174], + device='cuda:0'), covar=tensor([0.1898, 0.2360, 0.0878, 0.4510, 0.1925, 0.3254, 0.2147, 0.2227], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0558, 0.0541, 0.0610, 0.0628, 0.0573, 0.0502, 0.0621], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:51:42,664 INFO [train.py:901] (0/4) Epoch 16, batch 1500, loss[loss=0.2434, simple_loss=0.3308, pruned_loss=0.07803, over 8354.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.06856, over 1614902.37 frames. ], batch size: 24, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:56,875 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 2.515e+02 3.024e+02 4.111e+02 8.238e+02, threshold=6.047e+02, percent-clipped=9.0 +2023-02-06 17:52:16,422 INFO [train.py:901] (0/4) Epoch 16, batch 1550, loss[loss=0.2304, simple_loss=0.2818, pruned_loss=0.08949, over 7696.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2975, pruned_loss=0.06974, over 1617399.40 frames. ], batch size: 18, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:52:16,601 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:41,663 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:52,397 INFO [train.py:901] (0/4) Epoch 16, batch 1600, loss[loss=0.1849, simple_loss=0.2613, pruned_loss=0.05424, over 7783.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2948, pruned_loss=0.0687, over 1607961.60 frames. ], batch size: 19, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:52:55,408 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:07,648 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.555e+02 3.178e+02 4.067e+02 1.179e+03, threshold=6.355e+02, percent-clipped=12.0 +2023-02-06 17:53:13,373 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:23,663 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:27,666 INFO [train.py:901] (0/4) Epoch 16, batch 1650, loss[loss=0.2148, simple_loss=0.2934, pruned_loss=0.06809, over 8089.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2938, pruned_loss=0.06767, over 1608439.14 frames. ], batch size: 21, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:53:49,584 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,293 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8056, 3.8015, 3.4616, 1.7721, 3.3771, 3.5087, 3.4097, 3.1982], + device='cuda:0'), covar=tensor([0.0970, 0.0697, 0.1111, 0.4809, 0.0892, 0.1089, 0.1451, 0.1020], + device='cuda:0'), in_proj_covar=tensor([0.0482, 0.0402, 0.0402, 0.0500, 0.0397, 0.0400, 0.0390, 0.0351], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:54:02,358 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,860 INFO [train.py:901] (0/4) Epoch 16, batch 1700, loss[loss=0.2671, simple_loss=0.34, pruned_loss=0.09711, over 8108.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2945, pruned_loss=0.06802, over 1609618.32 frames. ], batch size: 23, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:54:08,408 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:17,616 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.342e+02 2.881e+02 3.479e+02 7.679e+02, threshold=5.763e+02, percent-clipped=3.0 +2023-02-06 17:54:38,073 INFO [train.py:901] (0/4) Epoch 16, batch 1750, loss[loss=0.2121, simple_loss=0.3006, pruned_loss=0.06179, over 8100.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2953, pruned_loss=0.0679, over 1614984.53 frames. ], batch size: 23, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:00,433 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7222, 1.6794, 2.0813, 1.5260, 1.2503, 2.0950, 0.2907, 1.3203], + device='cuda:0'), covar=tensor([0.2267, 0.1775, 0.0451, 0.1626, 0.3237, 0.0430, 0.2615, 0.1565], + device='cuda:0'), in_proj_covar=tensor([0.0174, 0.0180, 0.0111, 0.0212, 0.0255, 0.0115, 0.0161, 0.0176], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 17:55:12,109 INFO [train.py:901] (0/4) Epoch 16, batch 1800, loss[loss=0.2145, simple_loss=0.2944, pruned_loss=0.06727, over 8587.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2951, pruned_loss=0.06836, over 1610719.95 frames. ], batch size: 34, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:16,364 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:18,540 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-06 17:55:27,705 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.489e+02 2.922e+02 3.750e+02 7.056e+02, threshold=5.843e+02, percent-clipped=4.0 +2023-02-06 17:55:35,400 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123077.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:48,779 INFO [train.py:901] (0/4) Epoch 16, batch 1850, loss[loss=0.2055, simple_loss=0.2898, pruned_loss=0.06056, over 7977.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2951, pruned_loss=0.06823, over 1615141.31 frames. ], batch size: 21, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:55,607 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0506, 1.6658, 2.0990, 1.8603, 1.9830, 2.0176, 1.8470, 0.7743], + device='cuda:0'), covar=tensor([0.4702, 0.4131, 0.1615, 0.2739, 0.2059, 0.2563, 0.1705, 0.4246], + device='cuda:0'), in_proj_covar=tensor([0.0911, 0.0922, 0.0758, 0.0894, 0.0956, 0.0843, 0.0720, 0.0797], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:55:58,183 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8838, 6.1243, 5.2363, 2.5857, 5.3262, 5.7751, 5.5630, 5.3143], + device='cuda:0'), covar=tensor([0.0513, 0.0336, 0.0906, 0.3939, 0.0587, 0.0829, 0.1113, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0495, 0.0411, 0.0413, 0.0513, 0.0408, 0.0411, 0.0401, 0.0360], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:56:22,235 INFO [train.py:901] (0/4) Epoch 16, batch 1900, loss[loss=0.2165, simple_loss=0.2883, pruned_loss=0.07233, over 7520.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2948, pruned_loss=0.06819, over 1613622.91 frames. ], batch size: 18, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:31,862 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3770, 2.0005, 2.7422, 2.2350, 2.6170, 2.2667, 1.9819, 1.3750], + device='cuda:0'), covar=tensor([0.4519, 0.4218, 0.1537, 0.3153, 0.2117, 0.2644, 0.1820, 0.4713], + device='cuda:0'), in_proj_covar=tensor([0.0914, 0.0924, 0.0760, 0.0896, 0.0958, 0.0844, 0.0721, 0.0799], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:56:36,271 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.569e+02 3.077e+02 4.069e+02 9.708e+02, threshold=6.154e+02, percent-clipped=7.0 +2023-02-06 17:56:50,196 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 17:56:57,741 INFO [train.py:901] (0/4) Epoch 16, batch 1950, loss[loss=0.2061, simple_loss=0.2816, pruned_loss=0.06527, over 7660.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2955, pruned_loss=0.0686, over 1607072.50 frames. ], batch size: 19, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:59,131 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 17:57:01,383 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:11,324 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 17:57:18,788 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:24,071 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:30,725 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 17:57:32,091 INFO [train.py:901] (0/4) Epoch 16, batch 2000, loss[loss=0.206, simple_loss=0.2912, pruned_loss=0.0604, over 8189.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.294, pruned_loss=0.06762, over 1609569.11 frames. ], batch size: 23, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:57:46,346 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.202e+02 2.631e+02 3.355e+02 6.225e+02, threshold=5.263e+02, percent-clipped=1.0 +2023-02-06 17:58:05,887 INFO [train.py:901] (0/4) Epoch 16, batch 2050, loss[loss=0.2131, simple_loss=0.2971, pruned_loss=0.06458, over 8182.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2939, pruned_loss=0.06803, over 1601390.12 frames. ], batch size: 23, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:31,864 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123332.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:41,130 INFO [train.py:901] (0/4) Epoch 16, batch 2100, loss[loss=0.2335, simple_loss=0.2819, pruned_loss=0.0925, over 7533.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2949, pruned_loss=0.06872, over 1605976.41 frames. ], batch size: 18, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:43,355 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:54,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.517e+02 3.000e+02 3.631e+02 1.037e+03, threshold=6.000e+02, percent-clipped=6.0 +2023-02-06 17:59:14,279 INFO [train.py:901] (0/4) Epoch 16, batch 2150, loss[loss=0.1734, simple_loss=0.2565, pruned_loss=0.04519, over 7267.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2945, pruned_loss=0.06817, over 1607721.28 frames. ], batch size: 16, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:59:19,805 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8805, 1.5845, 2.0567, 1.7832, 1.9776, 1.9248, 1.6680, 0.6857], + device='cuda:0'), covar=tensor([0.5444, 0.4572, 0.1671, 0.2929, 0.2225, 0.2785, 0.2034, 0.4956], + device='cuda:0'), in_proj_covar=tensor([0.0920, 0.0928, 0.0763, 0.0900, 0.0964, 0.0846, 0.0720, 0.0803], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 17:59:22,896 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9293, 6.1457, 5.2203, 2.6212, 5.3154, 5.7554, 5.6532, 5.4231], + device='cuda:0'), covar=tensor([0.0622, 0.0336, 0.0940, 0.4299, 0.0740, 0.0692, 0.0922, 0.0446], + device='cuda:0'), in_proj_covar=tensor([0.0493, 0.0410, 0.0410, 0.0511, 0.0404, 0.0410, 0.0397, 0.0358], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 17:59:50,132 INFO [train.py:901] (0/4) Epoch 16, batch 2200, loss[loss=0.2149, simple_loss=0.2775, pruned_loss=0.07614, over 7439.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2947, pruned_loss=0.06844, over 1604024.17 frames. ], batch size: 17, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 17:59:50,270 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:00:04,143 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.694e+02 3.295e+02 4.036e+02 1.292e+03, threshold=6.590e+02, percent-clipped=6.0 +2023-02-06 18:00:23,384 INFO [train.py:901] (0/4) Epoch 16, batch 2250, loss[loss=0.2397, simple_loss=0.3326, pruned_loss=0.07338, over 8194.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2944, pruned_loss=0.06782, over 1605766.41 frames. ], batch size: 23, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:00:46,379 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 18:00:58,617 INFO [train.py:901] (0/4) Epoch 16, batch 2300, loss[loss=0.2602, simple_loss=0.3388, pruned_loss=0.09082, over 8497.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2954, pruned_loss=0.06828, over 1611338.50 frames. ], batch size: 26, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:13,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.374e+02 2.935e+02 3.719e+02 2.594e+03, threshold=5.871e+02, percent-clipped=2.0 +2023-02-06 18:01:22,764 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6009, 1.9764, 2.0391, 1.2891, 2.2087, 1.5658, 0.5521, 1.8645], + device='cuda:0'), covar=tensor([0.0515, 0.0256, 0.0207, 0.0474, 0.0257, 0.0707, 0.0727, 0.0237], + device='cuda:0'), in_proj_covar=tensor([0.0423, 0.0364, 0.0313, 0.0416, 0.0350, 0.0507, 0.0372, 0.0389], + device='cuda:0'), out_proj_covar=tensor([1.1640e-04, 9.7522e-05, 8.3455e-05, 1.1214e-04, 9.4408e-05, 1.4701e-04, + 1.0201e-04, 1.0529e-04], device='cuda:0') +2023-02-06 18:01:32,631 INFO [train.py:901] (0/4) Epoch 16, batch 2350, loss[loss=0.2054, simple_loss=0.2752, pruned_loss=0.06783, over 7699.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2953, pruned_loss=0.06862, over 1609304.43 frames. ], batch size: 18, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:38,854 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:01:55,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:06,132 INFO [train.py:901] (0/4) Epoch 16, batch 2400, loss[loss=0.2158, simple_loss=0.2863, pruned_loss=0.07265, over 7992.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2957, pruned_loss=0.06894, over 1611082.33 frames. ], batch size: 21, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:02:22,334 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.321e+02 3.011e+02 3.485e+02 7.740e+02, threshold=6.021e+02, percent-clipped=5.0 +2023-02-06 18:02:28,428 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:42,475 INFO [train.py:901] (0/4) Epoch 16, batch 2450, loss[loss=0.233, simple_loss=0.3131, pruned_loss=0.0764, over 8452.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06969, over 1614467.83 frames. ], batch size: 27, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:15,344 INFO [train.py:901] (0/4) Epoch 16, batch 2500, loss[loss=0.2366, simple_loss=0.3302, pruned_loss=0.07155, over 8256.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2987, pruned_loss=0.07027, over 1619243.86 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:25,452 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0468, 1.3789, 1.6240, 1.2372, 1.0393, 1.4104, 1.6589, 1.4953], + device='cuda:0'), covar=tensor([0.0523, 0.1325, 0.1694, 0.1508, 0.0594, 0.1575, 0.0700, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0157, 0.0100, 0.0163, 0.0114, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 18:03:29,366 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.388e+02 3.009e+02 3.987e+02 1.163e+03, threshold=6.019e+02, percent-clipped=7.0 +2023-02-06 18:03:46,943 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:47,734 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:50,918 INFO [train.py:901] (0/4) Epoch 16, batch 2550, loss[loss=0.2628, simple_loss=0.3352, pruned_loss=0.09524, over 7406.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2975, pruned_loss=0.06939, over 1616223.34 frames. ], batch size: 71, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:24,936 INFO [train.py:901] (0/4) Epoch 16, batch 2600, loss[loss=0.2576, simple_loss=0.3248, pruned_loss=0.09516, over 7305.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2967, pruned_loss=0.06873, over 1615978.15 frames. ], batch size: 72, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:38,916 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.447e+02 2.814e+02 3.524e+02 5.517e+02, threshold=5.629e+02, percent-clipped=0.0 +2023-02-06 18:04:54,356 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:04:58,762 INFO [train.py:901] (0/4) Epoch 16, batch 2650, loss[loss=0.2249, simple_loss=0.3056, pruned_loss=0.07207, over 8035.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.298, pruned_loss=0.0695, over 1618479.26 frames. ], batch size: 20, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:06,333 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:05:34,184 INFO [train.py:901] (0/4) Epoch 16, batch 2700, loss[loss=0.1991, simple_loss=0.2691, pruned_loss=0.06453, over 7693.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2975, pruned_loss=0.06939, over 1618442.68 frames. ], batch size: 18, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:48,215 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.455e+02 3.188e+02 4.135e+02 8.908e+02, threshold=6.377e+02, percent-clipped=7.0 +2023-02-06 18:06:07,604 INFO [train.py:901] (0/4) Epoch 16, batch 2750, loss[loss=0.1751, simple_loss=0.2555, pruned_loss=0.04737, over 7644.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2984, pruned_loss=0.07029, over 1613852.24 frames. ], batch size: 19, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:10,309 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-124000.pt +2023-02-06 18:06:45,090 INFO [train.py:901] (0/4) Epoch 16, batch 2800, loss[loss=0.1926, simple_loss=0.2743, pruned_loss=0.05539, over 8089.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.297, pruned_loss=0.06932, over 1613810.05 frames. ], batch size: 21, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:46,000 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:50,830 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:59,432 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 2.578e+02 3.039e+02 4.001e+02 1.196e+03, threshold=6.079e+02, percent-clipped=5.0 +2023-02-06 18:07:03,050 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:08,249 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:18,991 INFO [train.py:901] (0/4) Epoch 16, batch 2850, loss[loss=0.2054, simple_loss=0.2866, pruned_loss=0.06206, over 8470.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2973, pruned_loss=0.0692, over 1614911.20 frames. ], batch size: 27, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:07:22,869 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 18:07:55,341 INFO [train.py:901] (0/4) Epoch 16, batch 2900, loss[loss=0.1813, simple_loss=0.2597, pruned_loss=0.05142, over 7795.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2971, pruned_loss=0.06952, over 1610070.53 frames. ], batch size: 20, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:06,256 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:06,292 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2135, 1.9363, 2.6008, 2.1845, 2.4227, 2.2146, 1.8777, 1.2898], + device='cuda:0'), covar=tensor([0.4765, 0.4418, 0.1511, 0.3013, 0.2223, 0.2598, 0.1822, 0.4596], + device='cuda:0'), in_proj_covar=tensor([0.0914, 0.0924, 0.0758, 0.0894, 0.0961, 0.0848, 0.0723, 0.0797], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:08:10,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.456e+02 3.206e+02 4.387e+02 8.191e+02, threshold=6.412e+02, percent-clipped=4.0 +2023-02-06 18:08:22,880 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:29,509 INFO [train.py:901] (0/4) Epoch 16, batch 2950, loss[loss=0.1816, simple_loss=0.2616, pruned_loss=0.05078, over 7793.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2979, pruned_loss=0.07022, over 1608602.76 frames. ], batch size: 19, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:29,750 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2915, 2.7108, 3.1464, 1.6442, 3.3504, 2.0182, 1.5400, 2.2152], + device='cuda:0'), covar=tensor([0.0617, 0.0295, 0.0195, 0.0659, 0.0273, 0.0664, 0.0737, 0.0431], + device='cuda:0'), in_proj_covar=tensor([0.0425, 0.0363, 0.0311, 0.0417, 0.0350, 0.0508, 0.0371, 0.0387], + device='cuda:0'), out_proj_covar=tensor([1.1693e-04, 9.7248e-05, 8.2672e-05, 1.1219e-04, 9.4535e-05, 1.4726e-04, + 1.0189e-04, 1.0494e-04], device='cuda:0') +2023-02-06 18:08:35,635 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 18:08:55,340 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:01,459 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.77 vs. limit=5.0 +2023-02-06 18:09:03,840 INFO [train.py:901] (0/4) Epoch 16, batch 3000, loss[loss=0.2457, simple_loss=0.322, pruned_loss=0.0847, over 8355.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2979, pruned_loss=0.07009, over 1611640.21 frames. ], batch size: 24, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:09:03,841 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 18:09:16,270 INFO [train.py:935] (0/4) Epoch 16, validation: loss=0.1794, simple_loss=0.2796, pruned_loss=0.03958, over 944034.00 frames. +2023-02-06 18:09:16,271 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 18:09:32,708 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.393e+02 2.939e+02 3.627e+02 1.404e+03, threshold=5.877e+02, percent-clipped=2.0 +2023-02-06 18:09:49,092 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:52,918 INFO [train.py:901] (0/4) Epoch 16, batch 3050, loss[loss=0.2094, simple_loss=0.2971, pruned_loss=0.0609, over 8453.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2993, pruned_loss=0.07056, over 1615752.21 frames. ], batch size: 29, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:16,794 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9619, 1.9697, 6.0581, 2.1141, 5.4775, 5.0460, 5.5987, 5.4659], + device='cuda:0'), covar=tensor([0.0440, 0.4413, 0.0288, 0.3922, 0.0824, 0.0829, 0.0463, 0.0450], + device='cuda:0'), in_proj_covar=tensor([0.0556, 0.0607, 0.0637, 0.0588, 0.0659, 0.0568, 0.0561, 0.0623], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:10:25,965 INFO [train.py:901] (0/4) Epoch 16, batch 3100, loss[loss=0.2509, simple_loss=0.3228, pruned_loss=0.08947, over 8315.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.299, pruned_loss=0.06987, over 1617696.95 frames. ], batch size: 25, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:28,058 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:10:39,304 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:10:39,810 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.464e+02 2.975e+02 4.095e+02 1.383e+03, threshold=5.950e+02, percent-clipped=6.0 +2023-02-06 18:11:01,482 INFO [train.py:901] (0/4) Epoch 16, batch 3150, loss[loss=0.2572, simple_loss=0.3391, pruned_loss=0.08767, over 8107.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2993, pruned_loss=0.06996, over 1616130.71 frames. ], batch size: 23, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:02,949 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:21,457 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:23,925 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.40 vs. limit=5.0 +2023-02-06 18:11:36,607 INFO [train.py:901] (0/4) Epoch 16, batch 3200, loss[loss=0.1731, simple_loss=0.2582, pruned_loss=0.04401, over 7806.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2997, pruned_loss=0.07027, over 1616677.16 frames. ], batch size: 19, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:39,392 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:50,434 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.552e+02 3.102e+02 3.772e+02 6.284e+02, threshold=6.205e+02, percent-clipped=3.0 +2023-02-06 18:12:09,965 INFO [train.py:901] (0/4) Epoch 16, batch 3250, loss[loss=0.2342, simple_loss=0.3211, pruned_loss=0.07359, over 8188.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2995, pruned_loss=0.06991, over 1620992.03 frames. ], batch size: 23, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:23,011 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:38,295 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4180, 2.0970, 2.9029, 2.3577, 2.8357, 2.2891, 2.0578, 1.7760], + device='cuda:0'), covar=tensor([0.4448, 0.4455, 0.1509, 0.3135, 0.2109, 0.2544, 0.1707, 0.4545], + device='cuda:0'), in_proj_covar=tensor([0.0913, 0.0922, 0.0756, 0.0893, 0.0960, 0.0846, 0.0721, 0.0796], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:12:40,898 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:42,554 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 18:12:45,394 INFO [train.py:901] (0/4) Epoch 16, batch 3300, loss[loss=0.2716, simple_loss=0.3387, pruned_loss=0.1022, over 6779.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.298, pruned_loss=0.06953, over 1611818.89 frames. ], batch size: 72, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:59,411 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.425e+02 2.919e+02 3.659e+02 6.879e+02, threshold=5.837e+02, percent-clipped=1.0 +2023-02-06 18:13:18,831 INFO [train.py:901] (0/4) Epoch 16, batch 3350, loss[loss=0.2606, simple_loss=0.3251, pruned_loss=0.09806, over 6889.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2982, pruned_loss=0.06969, over 1610525.32 frames. ], batch size: 73, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:13:25,335 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:43,462 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:46,005 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:54,702 INFO [train.py:901] (0/4) Epoch 16, batch 3400, loss[loss=0.211, simple_loss=0.3018, pruned_loss=0.06014, over 8474.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2979, pruned_loss=0.06955, over 1613058.94 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:14:01,589 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:08,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.420e+02 3.011e+02 3.525e+02 7.222e+02, threshold=6.022e+02, percent-clipped=3.0 +2023-02-06 18:14:18,569 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:28,870 INFO [train.py:901] (0/4) Epoch 16, batch 3450, loss[loss=0.2143, simple_loss=0.2916, pruned_loss=0.06849, over 8550.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2983, pruned_loss=0.06958, over 1615303.40 frames. ], batch size: 49, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:14:38,557 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124710.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:14:47,104 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-06 18:15:05,352 INFO [train.py:901] (0/4) Epoch 16, batch 3500, loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.05864, over 8134.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.297, pruned_loss=0.06889, over 1613707.05 frames. ], batch size: 22, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:07,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:18,017 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3161, 2.3399, 1.5565, 1.9765, 1.9048, 1.2549, 1.6383, 1.8497], + device='cuda:0'), covar=tensor([0.1570, 0.0457, 0.1408, 0.0688, 0.0804, 0.1966, 0.1288, 0.0960], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0234, 0.0327, 0.0305, 0.0300, 0.0334, 0.0348, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 18:15:20,550 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.534e+02 3.082e+02 3.894e+02 7.146e+02, threshold=6.164e+02, percent-clipped=3.0 +2023-02-06 18:15:20,977 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 18:15:22,091 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:38,226 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 18:15:38,975 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,088 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,723 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:40,177 INFO [train.py:901] (0/4) Epoch 16, batch 3550, loss[loss=0.2126, simple_loss=0.2938, pruned_loss=0.06571, over 8496.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2981, pruned_loss=0.06941, over 1619907.39 frames. ], batch size: 28, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:56,871 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:00,135 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124825.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:16:08,884 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:14,221 INFO [train.py:901] (0/4) Epoch 16, batch 3600, loss[loss=0.1904, simple_loss=0.2653, pruned_loss=0.05777, over 7702.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2983, pruned_loss=0.06956, over 1617352.37 frames. ], batch size: 18, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:26,986 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.08 vs. limit=5.0 +2023-02-06 18:16:30,804 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.338e+02 2.977e+02 3.463e+02 8.977e+02, threshold=5.954e+02, percent-clipped=2.0 +2023-02-06 18:16:50,929 INFO [train.py:901] (0/4) Epoch 16, batch 3650, loss[loss=0.2517, simple_loss=0.3291, pruned_loss=0.08713, over 8360.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2994, pruned_loss=0.07022, over 1622580.31 frames. ], batch size: 24, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:53,007 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3665, 1.3101, 4.5456, 1.7739, 4.0133, 3.8117, 4.0720, 3.9913], + device='cuda:0'), covar=tensor([0.0517, 0.4708, 0.0442, 0.3893, 0.1061, 0.0906, 0.0582, 0.0632], + device='cuda:0'), in_proj_covar=tensor([0.0557, 0.0613, 0.0635, 0.0587, 0.0660, 0.0566, 0.0561, 0.0623], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:16:59,886 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:17:24,965 INFO [train.py:901] (0/4) Epoch 16, batch 3700, loss[loss=0.22, simple_loss=0.2933, pruned_loss=0.07336, over 8088.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2986, pruned_loss=0.06962, over 1623698.87 frames. ], batch size: 21, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:17:38,859 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:17:40,141 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.643e+02 3.299e+02 4.315e+02 1.525e+03, threshold=6.598e+02, percent-clipped=10.0 +2023-02-06 18:17:47,827 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4605, 1.8707, 3.0494, 1.3437, 2.2882, 1.8641, 1.6060, 2.1931], + device='cuda:0'), covar=tensor([0.1968, 0.2317, 0.0685, 0.4301, 0.1589, 0.2992, 0.2145, 0.2128], + device='cuda:0'), in_proj_covar=tensor([0.0505, 0.0559, 0.0541, 0.0611, 0.0630, 0.0569, 0.0500, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:18:01,619 INFO [train.py:901] (0/4) Epoch 16, batch 3750, loss[loss=0.2097, simple_loss=0.2861, pruned_loss=0.06659, over 8089.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2971, pruned_loss=0.06857, over 1618862.90 frames. ], batch size: 21, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:18:04,452 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:07,873 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:20,983 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:22,459 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6334, 1.7201, 4.8423, 1.9791, 4.2949, 4.0731, 4.3711, 4.2271], + device='cuda:0'), covar=tensor([0.0537, 0.4111, 0.0436, 0.3436, 0.0983, 0.0855, 0.0511, 0.0611], + device='cuda:0'), in_proj_covar=tensor([0.0556, 0.0612, 0.0635, 0.0584, 0.0658, 0.0565, 0.0561, 0.0624], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:18:24,558 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:35,176 INFO [train.py:901] (0/4) Epoch 16, batch 3800, loss[loss=0.17, simple_loss=0.2502, pruned_loss=0.04491, over 7684.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2968, pruned_loss=0.0687, over 1619349.10 frames. ], batch size: 18, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:18:49,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.284e+02 2.854e+02 3.651e+02 7.015e+02, threshold=5.709e+02, percent-clipped=3.0 +2023-02-06 18:18:58,957 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125081.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:19:10,727 INFO [train.py:901] (0/4) Epoch 16, batch 3850, loss[loss=0.2297, simple_loss=0.3136, pruned_loss=0.07292, over 8568.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2973, pruned_loss=0.06916, over 1621389.55 frames. ], batch size: 31, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:18,419 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125106.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:19:19,270 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 18:19:24,389 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:41,019 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:45,073 INFO [train.py:901] (0/4) Epoch 16, batch 3900, loss[loss=0.2019, simple_loss=0.2916, pruned_loss=0.05608, over 8098.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06871, over 1618439.75 frames. ], batch size: 21, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:45,091 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 18:19:45,214 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:52,241 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 18:19:58,178 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:59,303 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.507e+02 2.888e+02 3.601e+02 7.393e+02, threshold=5.777e+02, percent-clipped=3.0 +2023-02-06 18:20:09,606 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:15,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:19,100 INFO [train.py:901] (0/4) Epoch 16, batch 3950, loss[loss=0.2056, simple_loss=0.2844, pruned_loss=0.0634, over 8087.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2963, pruned_loss=0.06856, over 1619331.69 frames. ], batch size: 21, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:20:20,238 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 18:20:55,581 INFO [train.py:901] (0/4) Epoch 16, batch 4000, loss[loss=0.2014, simple_loss=0.2869, pruned_loss=0.05795, over 7971.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.06864, over 1616100.04 frames. ], batch size: 21, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:09,904 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.424e+02 2.747e+02 3.530e+02 7.172e+02, threshold=5.495e+02, percent-clipped=3.0 +2023-02-06 18:21:10,312 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.73 vs. limit=5.0 +2023-02-06 18:21:29,135 INFO [train.py:901] (0/4) Epoch 16, batch 4050, loss[loss=0.2237, simple_loss=0.3088, pruned_loss=0.06926, over 8327.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2978, pruned_loss=0.06936, over 1619121.77 frames. ], batch size: 25, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:29,952 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125297.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:21:43,914 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3623, 1.6395, 1.7420, 1.0495, 1.7950, 1.3419, 0.2149, 1.5591], + device='cuda:0'), covar=tensor([0.0363, 0.0253, 0.0186, 0.0340, 0.0302, 0.0650, 0.0602, 0.0187], + device='cuda:0'), in_proj_covar=tensor([0.0424, 0.0360, 0.0310, 0.0417, 0.0351, 0.0507, 0.0370, 0.0389], + device='cuda:0'), out_proj_covar=tensor([1.1663e-04, 9.6111e-05, 8.2316e-05, 1.1197e-04, 9.4792e-05, 1.4667e-04, + 1.0140e-04, 1.0537e-04], device='cuda:0') +2023-02-06 18:22:05,146 INFO [train.py:901] (0/4) Epoch 16, batch 4100, loss[loss=0.1828, simple_loss=0.2637, pruned_loss=0.051, over 7647.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2975, pruned_loss=0.06978, over 1616568.65 frames. ], batch size: 19, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:19,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.458e+02 2.941e+02 3.398e+02 7.943e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-06 18:22:20,410 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 18:22:22,350 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:24,205 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:37,712 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:38,920 INFO [train.py:901] (0/4) Epoch 16, batch 4150, loss[loss=0.2644, simple_loss=0.3431, pruned_loss=0.09282, over 8367.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2978, pruned_loss=0.07028, over 1609957.10 frames. ], batch size: 50, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:39,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:39,137 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:55,747 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:59,769 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6789, 4.6945, 4.2087, 1.9525, 4.2039, 4.2314, 4.2590, 3.9932], + device='cuda:0'), covar=tensor([0.0752, 0.0497, 0.1067, 0.4714, 0.0901, 0.0928, 0.1321, 0.0791], + device='cuda:0'), in_proj_covar=tensor([0.0495, 0.0412, 0.0412, 0.0514, 0.0405, 0.0411, 0.0401, 0.0358], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:23:02,563 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4945, 1.4743, 1.8082, 1.2706, 1.1079, 1.7553, 0.1088, 1.1513], + device='cuda:0'), covar=tensor([0.2178, 0.1580, 0.0429, 0.1320, 0.3268, 0.0556, 0.2655, 0.1512], + device='cuda:0'), in_proj_covar=tensor([0.0173, 0.0179, 0.0113, 0.0212, 0.0255, 0.0116, 0.0162, 0.0175], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 18:23:09,066 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:14,198 INFO [train.py:901] (0/4) Epoch 16, batch 4200, loss[loss=0.237, simple_loss=0.3154, pruned_loss=0.07927, over 8489.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2983, pruned_loss=0.07038, over 1609042.83 frames. ], batch size: 28, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:23:29,128 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.508e+02 2.881e+02 3.373e+02 7.881e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-06 18:23:39,902 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 18:23:44,590 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:48,645 INFO [train.py:901] (0/4) Epoch 16, batch 4250, loss[loss=0.1976, simple_loss=0.2761, pruned_loss=0.05956, over 7975.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2972, pruned_loss=0.0695, over 1606983.08 frames. ], batch size: 21, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:24:01,601 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 18:24:23,119 INFO [train.py:901] (0/4) Epoch 16, batch 4300, loss[loss=0.2504, simple_loss=0.3292, pruned_loss=0.08577, over 8107.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.298, pruned_loss=0.0699, over 1608309.90 frames. ], batch size: 23, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:24:24,746 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8621, 1.6090, 2.0350, 1.7817, 2.0401, 1.8847, 1.6508, 0.8357], + device='cuda:0'), covar=tensor([0.5107, 0.4173, 0.1644, 0.2952, 0.2009, 0.2592, 0.1764, 0.4306], + device='cuda:0'), in_proj_covar=tensor([0.0916, 0.0923, 0.0760, 0.0900, 0.0963, 0.0852, 0.0722, 0.0797], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:24:28,621 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125553.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:37,212 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6133, 1.3825, 2.3619, 1.1514, 2.1061, 2.4633, 2.6288, 2.1070], + device='cuda:0'), covar=tensor([0.0940, 0.1320, 0.0461, 0.2180, 0.0718, 0.0410, 0.0650, 0.0753], + device='cuda:0'), in_proj_covar=tensor([0.0278, 0.0309, 0.0274, 0.0299, 0.0290, 0.0249, 0.0383, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 18:24:38,404 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.516e+02 3.115e+02 4.119e+02 8.810e+02, threshold=6.231e+02, percent-clipped=6.0 +2023-02-06 18:24:46,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:58,881 INFO [train.py:901] (0/4) Epoch 16, batch 4350, loss[loss=0.2388, simple_loss=0.2917, pruned_loss=0.0929, over 7648.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.299, pruned_loss=0.07102, over 1610085.38 frames. ], batch size: 19, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:05,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:16,547 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:33,361 INFO [train.py:901] (0/4) Epoch 16, batch 4400, loss[loss=0.2117, simple_loss=0.283, pruned_loss=0.07019, over 7534.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2993, pruned_loss=0.0712, over 1611469.60 frames. ], batch size: 18, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:34,019 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 18:25:48,658 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.489e+02 3.156e+02 3.927e+02 6.760e+02, threshold=6.312e+02, percent-clipped=2.0 +2023-02-06 18:26:09,565 INFO [train.py:901] (0/4) Epoch 16, batch 4450, loss[loss=0.2111, simple_loss=0.3036, pruned_loss=0.0593, over 8313.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2982, pruned_loss=0.06998, over 1613477.10 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:14,203 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 18:26:24,207 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:38,221 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:41,742 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:43,637 INFO [train.py:901] (0/4) Epoch 16, batch 4500, loss[loss=0.1985, simple_loss=0.273, pruned_loss=0.06199, over 7925.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2983, pruned_loss=0.06977, over 1616078.71 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:57,808 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.361e+02 2.740e+02 3.373e+02 6.169e+02, threshold=5.479e+02, percent-clipped=0.0 +2023-02-06 18:27:04,075 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 18:27:10,712 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:19,278 INFO [train.py:901] (0/4) Epoch 16, batch 4550, loss[loss=0.2286, simple_loss=0.3128, pruned_loss=0.07222, over 8188.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2984, pruned_loss=0.06985, over 1614056.20 frames. ], batch size: 23, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:45,720 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:54,474 INFO [train.py:901] (0/4) Epoch 16, batch 4600, loss[loss=0.2326, simple_loss=0.3188, pruned_loss=0.07319, over 8453.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2986, pruned_loss=0.07005, over 1614679.47 frames. ], batch size: 48, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:59,476 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:05,144 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:08,979 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.490e+02 3.040e+02 3.897e+02 1.241e+03, threshold=6.080e+02, percent-clipped=8.0 +2023-02-06 18:28:22,182 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:30,029 INFO [train.py:901] (0/4) Epoch 16, batch 4650, loss[loss=0.2217, simple_loss=0.2839, pruned_loss=0.0798, over 7648.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2994, pruned_loss=0.07072, over 1616241.78 frames. ], batch size: 19, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:28:31,607 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:54,787 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 18:29:06,119 INFO [train.py:901] (0/4) Epoch 16, batch 4700, loss[loss=0.1784, simple_loss=0.2652, pruned_loss=0.04582, over 8099.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2998, pruned_loss=0.07094, over 1617713.91 frames. ], batch size: 23, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:18,968 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:20,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.580e+02 3.138e+02 4.127e+02 1.212e+03, threshold=6.277e+02, percent-clipped=5.0 +2023-02-06 18:29:25,563 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.32 vs. limit=5.0 +2023-02-06 18:29:39,834 INFO [train.py:901] (0/4) Epoch 16, batch 4750, loss[loss=0.1906, simple_loss=0.2755, pruned_loss=0.05283, over 8078.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2987, pruned_loss=0.06984, over 1619017.43 frames. ], batch size: 21, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:42,623 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-126000.pt +2023-02-06 18:29:50,563 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4392, 2.5690, 2.0237, 2.2006, 2.0676, 1.6800, 2.1661, 2.0856], + device='cuda:0'), covar=tensor([0.1434, 0.0360, 0.0978, 0.0599, 0.0736, 0.1389, 0.0867, 0.0927], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0230, 0.0322, 0.0301, 0.0297, 0.0328, 0.0340, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 18:29:55,988 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:11,192 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 18:30:13,728 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 18:30:16,331 INFO [train.py:901] (0/4) Epoch 16, batch 4800, loss[loss=0.1901, simple_loss=0.2608, pruned_loss=0.05974, over 7210.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2977, pruned_loss=0.06923, over 1618932.67 frames. ], batch size: 16, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:30:31,318 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.301e+02 2.788e+02 3.330e+02 6.705e+02, threshold=5.575e+02, percent-clipped=2.0 +2023-02-06 18:30:40,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:44,993 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:46,459 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:51,047 INFO [train.py:901] (0/4) Epoch 16, batch 4850, loss[loss=0.2217, simple_loss=0.3018, pruned_loss=0.07083, over 8462.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2973, pruned_loss=0.06913, over 1616159.70 frames. ], batch size: 27, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:30:59,983 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:01,795 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 18:31:03,310 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:19,043 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:26,989 INFO [train.py:901] (0/4) Epoch 16, batch 4900, loss[loss=0.2114, simple_loss=0.2968, pruned_loss=0.06302, over 8335.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2966, pruned_loss=0.0685, over 1615214.64 frames. ], batch size: 25, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:31:32,564 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:41,754 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.384e+02 3.140e+02 3.836e+02 7.587e+02, threshold=6.281e+02, percent-clipped=5.0 +2023-02-06 18:31:50,103 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:01,582 INFO [train.py:901] (0/4) Epoch 16, batch 4950, loss[loss=0.2538, simple_loss=0.3168, pruned_loss=0.09546, over 7000.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2966, pruned_loss=0.06864, over 1612540.29 frames. ], batch size: 74, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:06,028 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:35,839 INFO [train.py:901] (0/4) Epoch 16, batch 5000, loss[loss=0.1771, simple_loss=0.2592, pruned_loss=0.04746, over 8246.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2953, pruned_loss=0.06786, over 1609292.04 frames. ], batch size: 22, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:50,294 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.421e+02 2.802e+02 3.540e+02 7.456e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-06 18:33:10,452 INFO [train.py:901] (0/4) Epoch 16, batch 5050, loss[loss=0.1839, simple_loss=0.2605, pruned_loss=0.05368, over 7415.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2946, pruned_loss=0.068, over 1604801.05 frames. ], batch size: 17, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:38,217 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:38,802 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4014, 4.3575, 3.9258, 1.9288, 3.8817, 3.9322, 3.9527, 3.6328], + device='cuda:0'), covar=tensor([0.0735, 0.0581, 0.1108, 0.4963, 0.0965, 0.1084, 0.1243, 0.1013], + device='cuda:0'), in_proj_covar=tensor([0.0496, 0.0412, 0.0413, 0.0514, 0.0404, 0.0409, 0.0402, 0.0357], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:33:41,491 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 18:33:44,851 INFO [train.py:901] (0/4) Epoch 16, batch 5100, loss[loss=0.217, simple_loss=0.3051, pruned_loss=0.06445, over 8140.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.295, pruned_loss=0.06822, over 1607591.98 frames. ], batch size: 22, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:55,151 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:55,973 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126361.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:01,122 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.574e+02 2.967e+02 3.773e+02 8.448e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-06 18:34:20,688 INFO [train.py:901] (0/4) Epoch 16, batch 5150, loss[loss=0.2152, simple_loss=0.2954, pruned_loss=0.06752, over 7780.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2953, pruned_loss=0.06822, over 1611596.00 frames. ], batch size: 19, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:34:22,775 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:25,577 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6945, 4.7069, 4.1810, 2.1014, 4.1744, 4.3298, 4.2694, 4.0401], + device='cuda:0'), covar=tensor([0.0691, 0.0564, 0.1131, 0.4476, 0.0884, 0.0829, 0.1234, 0.0808], + device='cuda:0'), in_proj_covar=tensor([0.0495, 0.0413, 0.0412, 0.0513, 0.0403, 0.0409, 0.0400, 0.0355], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:34:29,713 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1941, 1.0823, 1.2699, 1.0568, 0.9357, 1.2988, 0.0485, 0.9270], + device='cuda:0'), covar=tensor([0.1989, 0.1588, 0.0544, 0.0981, 0.2997, 0.0540, 0.2556, 0.1381], + device='cuda:0'), in_proj_covar=tensor([0.0175, 0.0181, 0.0113, 0.0213, 0.0258, 0.0117, 0.0163, 0.0178], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 18:34:54,892 INFO [train.py:901] (0/4) Epoch 16, batch 5200, loss[loss=0.1809, simple_loss=0.2728, pruned_loss=0.04445, over 8364.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2971, pruned_loss=0.06944, over 1610953.88 frames. ], batch size: 24, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:03,412 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:10,024 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.450e+02 2.961e+02 4.009e+02 9.502e+02, threshold=5.923e+02, percent-clipped=8.0 +2023-02-06 18:35:10,196 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:13,937 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-06 18:35:15,120 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126475.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:21,832 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:30,966 INFO [train.py:901] (0/4) Epoch 16, batch 5250, loss[loss=0.2352, simple_loss=0.3021, pruned_loss=0.08414, over 7529.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2975, pruned_loss=0.0695, over 1611155.80 frames. ], batch size: 18, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:39,842 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 18:36:05,600 INFO [train.py:901] (0/4) Epoch 16, batch 5300, loss[loss=0.2091, simple_loss=0.3017, pruned_loss=0.05821, over 8581.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2973, pruned_loss=0.06903, over 1614524.12 frames. ], batch size: 31, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:36:20,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.415e+02 2.951e+02 3.953e+02 1.148e+03, threshold=5.902e+02, percent-clipped=4.0 +2023-02-06 18:36:37,688 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1866, 2.5240, 2.8179, 1.6711, 3.1131, 1.7302, 1.3643, 2.1862], + device='cuda:0'), covar=tensor([0.0815, 0.0352, 0.0236, 0.0671, 0.0410, 0.0860, 0.0821, 0.0469], + device='cuda:0'), in_proj_covar=tensor([0.0432, 0.0370, 0.0317, 0.0423, 0.0356, 0.0515, 0.0374, 0.0393], + device='cuda:0'), out_proj_covar=tensor([1.1843e-04, 9.8836e-05, 8.4163e-05, 1.1357e-04, 9.5914e-05, 1.4906e-04, + 1.0225e-04, 1.0630e-04], device='cuda:0') +2023-02-06 18:36:41,560 INFO [train.py:901] (0/4) Epoch 16, batch 5350, loss[loss=0.2086, simple_loss=0.2928, pruned_loss=0.06222, over 8348.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2992, pruned_loss=0.07021, over 1617544.00 frames. ], batch size: 24, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:36:51,955 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0610, 4.0642, 3.7371, 2.0785, 3.6786, 3.6901, 3.7189, 3.4252], + device='cuda:0'), covar=tensor([0.0901, 0.0662, 0.1060, 0.4417, 0.0878, 0.0977, 0.1238, 0.0902], + device='cuda:0'), in_proj_covar=tensor([0.0494, 0.0409, 0.0412, 0.0510, 0.0403, 0.0407, 0.0399, 0.0352], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:36:58,006 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7467, 2.2857, 4.0342, 1.5553, 2.8150, 2.1733, 1.8258, 2.5617], + device='cuda:0'), covar=tensor([0.1797, 0.2257, 0.0910, 0.4027, 0.1718, 0.2869, 0.1973, 0.2503], + device='cuda:0'), in_proj_covar=tensor([0.0500, 0.0553, 0.0540, 0.0611, 0.0627, 0.0566, 0.0498, 0.0617], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:37:12,255 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6347, 2.0334, 3.4149, 1.4101, 2.5793, 1.9799, 1.6432, 2.3844], + device='cuda:0'), covar=tensor([0.1849, 0.2285, 0.0895, 0.4278, 0.1641, 0.3049, 0.2128, 0.2345], + device='cuda:0'), in_proj_covar=tensor([0.0501, 0.0554, 0.0541, 0.0612, 0.0628, 0.0567, 0.0498, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:37:16,922 INFO [train.py:901] (0/4) Epoch 16, batch 5400, loss[loss=0.2032, simple_loss=0.2794, pruned_loss=0.06355, over 8129.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.299, pruned_loss=0.07011, over 1616278.41 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:27,560 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9647, 1.6495, 2.1110, 1.8408, 2.0249, 1.9304, 1.7217, 0.7301], + device='cuda:0'), covar=tensor([0.5105, 0.4428, 0.1674, 0.2916, 0.2036, 0.2688, 0.1822, 0.4476], + device='cuda:0'), in_proj_covar=tensor([0.0908, 0.0916, 0.0756, 0.0892, 0.0958, 0.0841, 0.0717, 0.0795], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:37:32,195 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.413e+02 2.875e+02 3.758e+02 9.843e+02, threshold=5.751e+02, percent-clipped=6.0 +2023-02-06 18:37:33,813 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5231, 1.9291, 3.4062, 1.3412, 2.5020, 1.9155, 1.6445, 2.3276], + device='cuda:0'), covar=tensor([0.2007, 0.2449, 0.0771, 0.4603, 0.1831, 0.3140, 0.2264, 0.2461], + device='cuda:0'), in_proj_covar=tensor([0.0504, 0.0557, 0.0544, 0.0615, 0.0631, 0.0570, 0.0501, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:37:51,442 INFO [train.py:901] (0/4) Epoch 16, batch 5450, loss[loss=0.246, simple_loss=0.3218, pruned_loss=0.08512, over 8701.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2985, pruned_loss=0.07021, over 1612038.52 frames. ], batch size: 34, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:56,215 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0868, 1.8529, 6.2119, 2.4126, 5.7485, 5.2391, 5.8095, 5.6981], + device='cuda:0'), covar=tensor([0.0366, 0.4137, 0.0333, 0.3102, 0.0748, 0.0809, 0.0387, 0.0412], + device='cuda:0'), in_proj_covar=tensor([0.0555, 0.0613, 0.0633, 0.0580, 0.0659, 0.0565, 0.0556, 0.0621], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:38:00,362 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2684, 3.1733, 2.9910, 1.6723, 2.9075, 2.8733, 2.8787, 2.7544], + device='cuda:0'), covar=tensor([0.1183, 0.0856, 0.1319, 0.4269, 0.1162, 0.1136, 0.1591, 0.1125], + device='cuda:0'), in_proj_covar=tensor([0.0493, 0.0407, 0.0410, 0.0507, 0.0401, 0.0408, 0.0398, 0.0351], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:38:17,596 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126731.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:17,746 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.57 vs. limit=5.0 +2023-02-06 18:38:22,997 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4998, 2.5670, 1.8079, 2.2163, 2.1792, 1.5904, 2.0009, 2.1332], + device='cuda:0'), covar=tensor([0.1562, 0.0419, 0.1220, 0.0669, 0.0763, 0.1529, 0.1041, 0.0996], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0232, 0.0323, 0.0301, 0.0299, 0.0331, 0.0343, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 18:38:24,939 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:28,283 INFO [train.py:901] (0/4) Epoch 16, batch 5500, loss[loss=0.198, simple_loss=0.2661, pruned_loss=0.06493, over 7693.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2977, pruned_loss=0.07057, over 1602994.52 frames. ], batch size: 18, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:28,988 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 18:38:35,369 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:38,179 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7717, 1.6805, 2.4323, 1.6465, 1.2508, 2.2953, 0.5161, 1.4305], + device='cuda:0'), covar=tensor([0.1736, 0.1295, 0.0264, 0.1333, 0.2962, 0.0370, 0.2380, 0.1333], + device='cuda:0'), in_proj_covar=tensor([0.0177, 0.0183, 0.0115, 0.0214, 0.0262, 0.0119, 0.0166, 0.0179], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 18:38:44,227 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.451e+02 2.886e+02 3.496e+02 8.391e+02, threshold=5.772e+02, percent-clipped=4.0 +2023-02-06 18:39:00,749 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 18:39:02,248 INFO [train.py:901] (0/4) Epoch 16, batch 5550, loss[loss=0.2239, simple_loss=0.3115, pruned_loss=0.06811, over 8465.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2969, pruned_loss=0.06967, over 1605909.81 frames. ], batch size: 27, lr: 4.73e-03, grad_scale: 4.0 +2023-02-06 18:39:05,931 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1466, 1.4802, 4.2834, 1.6919, 3.8039, 3.5767, 3.8989, 3.7666], + device='cuda:0'), covar=tensor([0.0515, 0.4225, 0.0587, 0.3585, 0.1023, 0.0914, 0.0580, 0.0642], + device='cuda:0'), in_proj_covar=tensor([0.0554, 0.0611, 0.0632, 0.0578, 0.0657, 0.0563, 0.0552, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:39:13,432 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:16,822 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3747, 1.4568, 1.4103, 1.8623, 0.7190, 1.1993, 1.3734, 1.4883], + device='cuda:0'), covar=tensor([0.0862, 0.0763, 0.0963, 0.0470, 0.1113, 0.1422, 0.0671, 0.0681], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0201, 0.0248, 0.0212, 0.0209, 0.0248, 0.0254, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:39:22,292 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2407, 1.9610, 2.6840, 2.1883, 2.6810, 2.1679, 1.8597, 1.3354], + device='cuda:0'), covar=tensor([0.4563, 0.4126, 0.1523, 0.3241, 0.2157, 0.2587, 0.1826, 0.4682], + device='cuda:0'), in_proj_covar=tensor([0.0914, 0.0923, 0.0759, 0.0899, 0.0963, 0.0846, 0.0721, 0.0799], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:39:30,339 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:38,324 INFO [train.py:901] (0/4) Epoch 16, batch 5600, loss[loss=0.2393, simple_loss=0.3153, pruned_loss=0.08166, over 7111.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2963, pruned_loss=0.06918, over 1606066.15 frames. ], batch size: 72, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:39:45,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:54,359 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.374e+02 2.959e+02 4.088e+02 8.002e+02, threshold=5.917e+02, percent-clipped=4.0 +2023-02-06 18:40:12,818 INFO [train.py:901] (0/4) Epoch 16, batch 5650, loss[loss=0.2545, simple_loss=0.3293, pruned_loss=0.08985, over 8182.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2962, pruned_loss=0.06882, over 1609161.33 frames. ], batch size: 23, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:40:26,274 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1838, 1.7152, 4.3307, 1.5852, 3.8522, 3.5681, 3.9275, 3.7743], + device='cuda:0'), covar=tensor([0.0522, 0.3778, 0.0494, 0.3700, 0.0968, 0.0916, 0.0521, 0.0618], + device='cuda:0'), in_proj_covar=tensor([0.0554, 0.0610, 0.0631, 0.0580, 0.0658, 0.0562, 0.0553, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:40:33,383 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 18:40:33,510 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:40:48,557 INFO [train.py:901] (0/4) Epoch 16, batch 5700, loss[loss=0.2188, simple_loss=0.2913, pruned_loss=0.07317, over 7925.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2971, pruned_loss=0.06945, over 1610080.95 frames. ], batch size: 20, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:04,179 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.583e+02 3.205e+02 4.543e+02 7.570e+02, threshold=6.410e+02, percent-clipped=11.0 +2023-02-06 18:41:22,813 INFO [train.py:901] (0/4) Epoch 16, batch 5750, loss[loss=0.189, simple_loss=0.2617, pruned_loss=0.05811, over 7788.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2977, pruned_loss=0.06959, over 1611511.72 frames. ], batch size: 19, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:34,413 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2100, 1.0983, 1.2960, 1.1083, 0.9423, 1.3251, 0.0853, 0.8966], + device='cuda:0'), covar=tensor([0.1659, 0.1444, 0.0463, 0.0779, 0.3055, 0.0505, 0.2374, 0.1387], + device='cuda:0'), in_proj_covar=tensor([0.0177, 0.0184, 0.0116, 0.0216, 0.0262, 0.0120, 0.0168, 0.0181], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 18:41:39,578 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 18:41:49,998 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7926, 2.4267, 4.4653, 1.5596, 3.3532, 2.4498, 1.8891, 2.9849], + device='cuda:0'), covar=tensor([0.1709, 0.2245, 0.0678, 0.4094, 0.1486, 0.2675, 0.1997, 0.2260], + device='cuda:0'), in_proj_covar=tensor([0.0503, 0.0553, 0.0541, 0.0611, 0.0625, 0.0564, 0.0499, 0.0617], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:41:51,104 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-06 18:41:56,554 INFO [train.py:901] (0/4) Epoch 16, batch 5800, loss[loss=0.1905, simple_loss=0.2593, pruned_loss=0.06087, over 7218.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2971, pruned_loss=0.06936, over 1611115.42 frames. ], batch size: 16, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:14,357 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.425e+02 2.951e+02 3.537e+02 6.549e+02, threshold=5.902e+02, percent-clipped=1.0 +2023-02-06 18:42:33,212 INFO [train.py:901] (0/4) Epoch 16, batch 5850, loss[loss=0.2078, simple_loss=0.2944, pruned_loss=0.06061, over 8251.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2961, pruned_loss=0.06841, over 1612574.14 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:45,191 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:42:47,577 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 18:43:02,056 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:07,131 INFO [train.py:901] (0/4) Epoch 16, batch 5900, loss[loss=0.2197, simple_loss=0.2951, pruned_loss=0.07211, over 8137.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2953, pruned_loss=0.06808, over 1615590.83 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:10,073 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3663, 1.4262, 4.5481, 1.7833, 4.0320, 3.8313, 4.1257, 4.0218], + device='cuda:0'), covar=tensor([0.0516, 0.4630, 0.0480, 0.3708, 0.1117, 0.0878, 0.0561, 0.0641], + device='cuda:0'), in_proj_covar=tensor([0.0552, 0.0609, 0.0631, 0.0579, 0.0659, 0.0561, 0.0553, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:43:22,997 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.337e+02 2.920e+02 3.581e+02 1.365e+03, threshold=5.840e+02, percent-clipped=5.0 +2023-02-06 18:43:30,606 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:34,101 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:42,722 INFO [train.py:901] (0/4) Epoch 16, batch 5950, loss[loss=0.2779, simple_loss=0.3387, pruned_loss=0.1086, over 6675.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2947, pruned_loss=0.06777, over 1609474.49 frames. ], batch size: 72, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:51,272 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127208.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:56,247 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.01 vs. limit=5.0 +2023-02-06 18:44:02,355 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5211, 1.9056, 2.0069, 1.0405, 2.1451, 1.4795, 0.5621, 1.8404], + device='cuda:0'), covar=tensor([0.0497, 0.0328, 0.0231, 0.0546, 0.0318, 0.0742, 0.0713, 0.0259], + device='cuda:0'), in_proj_covar=tensor([0.0429, 0.0368, 0.0316, 0.0422, 0.0355, 0.0518, 0.0373, 0.0394], + device='cuda:0'), out_proj_covar=tensor([1.1757e-04, 9.8481e-05, 8.4071e-05, 1.1340e-04, 9.5717e-05, 1.5004e-04, + 1.0199e-04, 1.0655e-04], device='cuda:0') +2023-02-06 18:44:17,703 INFO [train.py:901] (0/4) Epoch 16, batch 6000, loss[loss=0.1759, simple_loss=0.2548, pruned_loss=0.04851, over 7287.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06787, over 1608359.78 frames. ], batch size: 16, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:44:17,704 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 18:44:29,966 INFO [train.py:935] (0/4) Epoch 16, validation: loss=0.1793, simple_loss=0.2799, pruned_loss=0.03935, over 944034.00 frames. +2023-02-06 18:44:29,967 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 18:44:44,479 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:45,670 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.282e+02 2.976e+02 3.659e+02 8.304e+02, threshold=5.951e+02, percent-clipped=2.0 +2023-02-06 18:45:01,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:45:01,838 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5548, 1.9657, 2.0929, 1.0695, 2.1062, 1.4912, 0.6055, 1.9309], + device='cuda:0'), covar=tensor([0.0622, 0.0355, 0.0272, 0.0594, 0.0454, 0.0808, 0.0817, 0.0272], + device='cuda:0'), in_proj_covar=tensor([0.0424, 0.0364, 0.0314, 0.0418, 0.0351, 0.0512, 0.0368, 0.0391], + device='cuda:0'), out_proj_covar=tensor([1.1621e-04, 9.7185e-05, 8.3308e-05, 1.1233e-04, 9.4595e-05, 1.4822e-04, + 1.0077e-04, 1.0562e-04], device='cuda:0') +2023-02-06 18:45:03,659 INFO [train.py:901] (0/4) Epoch 16, batch 6050, loss[loss=0.2404, simple_loss=0.3093, pruned_loss=0.08572, over 7545.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2952, pruned_loss=0.06758, over 1609519.79 frames. ], batch size: 18, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:39,307 INFO [train.py:901] (0/4) Epoch 16, batch 6100, loss[loss=0.2258, simple_loss=0.3123, pruned_loss=0.06962, over 8634.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2951, pruned_loss=0.06731, over 1615890.11 frames. ], batch size: 49, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:54,962 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8873, 1.5084, 3.1999, 1.4313, 2.3178, 3.4991, 3.5518, 3.0154], + device='cuda:0'), covar=tensor([0.1176, 0.1802, 0.0424, 0.2144, 0.1110, 0.0251, 0.0615, 0.0589], + device='cuda:0'), in_proj_covar=tensor([0.0281, 0.0313, 0.0275, 0.0302, 0.0294, 0.0251, 0.0384, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 18:45:55,484 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.555e+02 2.947e+02 3.627e+02 8.036e+02, threshold=5.895e+02, percent-clipped=1.0 +2023-02-06 18:46:04,777 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.86 vs. limit=5.0 +2023-02-06 18:46:06,187 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 18:46:09,094 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 18:46:13,720 INFO [train.py:901] (0/4) Epoch 16, batch 6150, loss[loss=0.2291, simple_loss=0.2973, pruned_loss=0.08044, over 8079.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2948, pruned_loss=0.06732, over 1612772.23 frames. ], batch size: 21, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:15,216 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7149, 1.9016, 1.6322, 2.2585, 1.0817, 1.4143, 1.7152, 1.9507], + device='cuda:0'), covar=tensor([0.0813, 0.0708, 0.0974, 0.0506, 0.1119, 0.1411, 0.0839, 0.0723], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0202, 0.0247, 0.0211, 0.0207, 0.0247, 0.0253, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:46:15,286 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0595, 1.8648, 2.4322, 1.9907, 2.3217, 2.0904, 1.8156, 1.1140], + device='cuda:0'), covar=tensor([0.4634, 0.4101, 0.1469, 0.2968, 0.2002, 0.2480, 0.1751, 0.4365], + device='cuda:0'), in_proj_covar=tensor([0.0911, 0.0923, 0.0766, 0.0897, 0.0962, 0.0845, 0.0723, 0.0796], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:46:48,829 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127445.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:46:49,325 INFO [train.py:901] (0/4) Epoch 16, batch 6200, loss[loss=0.227, simple_loss=0.2996, pruned_loss=0.07718, over 7920.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2954, pruned_loss=0.06804, over 1612696.00 frames. ], batch size: 20, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:04,240 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3656, 2.8444, 2.4567, 3.9437, 1.8817, 2.1431, 2.6190, 3.0352], + device='cuda:0'), covar=tensor([0.0765, 0.0775, 0.0815, 0.0220, 0.1046, 0.1231, 0.0892, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0201, 0.0247, 0.0211, 0.0207, 0.0246, 0.0253, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:47:04,711 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.656e+02 3.320e+02 4.256e+02 8.643e+02, threshold=6.639e+02, percent-clipped=4.0 +2023-02-06 18:47:08,571 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-06 18:47:23,438 INFO [train.py:901] (0/4) Epoch 16, batch 6250, loss[loss=0.2386, simple_loss=0.3164, pruned_loss=0.0804, over 8134.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2967, pruned_loss=0.06854, over 1616395.11 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:57,838 INFO [train.py:901] (0/4) Epoch 16, batch 6300, loss[loss=0.2593, simple_loss=0.3329, pruned_loss=0.09288, over 8188.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2976, pruned_loss=0.06932, over 1619436.77 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:58,586 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:47:59,972 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:14,536 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.653e+02 3.258e+02 3.936e+02 6.732e+02, threshold=6.516e+02, percent-clipped=2.0 +2023-02-06 18:48:14,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0575, 2.2069, 1.8455, 2.8867, 1.2367, 1.5243, 1.8899, 2.2592], + device='cuda:0'), covar=tensor([0.0734, 0.0840, 0.1024, 0.0373, 0.1232, 0.1517, 0.1022, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0203, 0.0250, 0.0213, 0.0210, 0.0250, 0.0256, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:48:17,987 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:32,718 INFO [train.py:901] (0/4) Epoch 16, batch 6350, loss[loss=0.238, simple_loss=0.322, pruned_loss=0.07696, over 8295.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2973, pruned_loss=0.07007, over 1614587.85 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:48:43,693 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:45,101 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9779, 1.6665, 1.9253, 1.6731, 1.0210, 1.6509, 2.2211, 2.4728], + device='cuda:0'), covar=tensor([0.0428, 0.1245, 0.1648, 0.1419, 0.0598, 0.1526, 0.0618, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0156, 0.0101, 0.0162, 0.0114, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 18:49:07,008 INFO [train.py:901] (0/4) Epoch 16, batch 6400, loss[loss=0.2595, simple_loss=0.3296, pruned_loss=0.09472, over 8513.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.297, pruned_loss=0.07027, over 1611384.44 frames. ], batch size: 26, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:49:24,184 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.402e+02 3.034e+02 3.710e+02 8.847e+02, threshold=6.069e+02, percent-clipped=1.0 +2023-02-06 18:49:25,915 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 18:49:32,551 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:34,762 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 18:49:43,231 INFO [train.py:901] (0/4) Epoch 16, batch 6450, loss[loss=0.2141, simple_loss=0.272, pruned_loss=0.0781, over 7531.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2971, pruned_loss=0.06992, over 1613423.37 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:04,136 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:15,490 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.47 vs. limit=5.0 +2023-02-06 18:50:17,017 INFO [train.py:901] (0/4) Epoch 16, batch 6500, loss[loss=0.2897, simple_loss=0.3514, pruned_loss=0.1139, over 8752.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2973, pruned_loss=0.0699, over 1616465.46 frames. ], batch size: 30, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:32,623 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.427e+02 3.150e+02 4.006e+02 1.604e+03, threshold=6.301e+02, percent-clipped=4.0 +2023-02-06 18:50:48,407 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127789.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:50:52,951 INFO [train.py:901] (0/4) Epoch 16, batch 6550, loss[loss=0.2127, simple_loss=0.2782, pruned_loss=0.07364, over 7808.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2969, pruned_loss=0.06966, over 1618741.02 frames. ], batch size: 20, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:53,780 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:59,469 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4385, 2.0366, 4.3904, 1.3755, 2.9596, 2.0880, 1.4640, 2.8093], + device='cuda:0'), covar=tensor([0.2256, 0.2744, 0.0712, 0.4892, 0.1821, 0.3294, 0.2522, 0.2619], + device='cuda:0'), in_proj_covar=tensor([0.0505, 0.0559, 0.0544, 0.0612, 0.0626, 0.0567, 0.0503, 0.0618], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:51:17,242 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 18:51:27,456 INFO [train.py:901] (0/4) Epoch 16, batch 6600, loss[loss=0.1888, simple_loss=0.2633, pruned_loss=0.05715, over 7704.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.298, pruned_loss=0.07018, over 1615130.22 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:51:30,345 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0427, 1.6074, 1.3517, 1.5288, 1.3889, 1.2407, 1.1827, 1.3166], + device='cuda:0'), covar=tensor([0.1029, 0.0437, 0.1129, 0.0533, 0.0704, 0.1322, 0.0920, 0.0738], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0232, 0.0322, 0.0299, 0.0298, 0.0330, 0.0339, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 18:51:36,809 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:51:38,966 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6652, 1.8407, 1.6157, 2.2949, 1.0071, 1.3227, 1.6341, 1.7861], + device='cuda:0'), covar=tensor([0.0878, 0.0855, 0.1053, 0.0440, 0.1145, 0.1533, 0.0939, 0.0956], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0203, 0.0249, 0.0213, 0.0211, 0.0250, 0.0256, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:51:42,287 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:42,775 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.405e+02 2.899e+02 3.574e+02 1.034e+03, threshold=5.799e+02, percent-clipped=3.0 +2023-02-06 18:51:57,071 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:57,604 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127891.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:52:00,746 INFO [train.py:901] (0/4) Epoch 16, batch 6650, loss[loss=0.2105, simple_loss=0.2923, pruned_loss=0.06436, over 7984.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.297, pruned_loss=0.06963, over 1612661.66 frames. ], batch size: 21, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:07,580 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:52:28,264 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0642, 1.1811, 3.1995, 1.0781, 2.7826, 2.6718, 2.9163, 2.8132], + device='cuda:0'), covar=tensor([0.0801, 0.4286, 0.0902, 0.4029, 0.1479, 0.1095, 0.0798, 0.0956], + device='cuda:0'), in_proj_covar=tensor([0.0566, 0.0622, 0.0645, 0.0592, 0.0672, 0.0572, 0.0567, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:52:36,163 INFO [train.py:901] (0/4) Epoch 16, batch 6700, loss[loss=0.2088, simple_loss=0.2853, pruned_loss=0.06616, over 7967.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2962, pruned_loss=0.06883, over 1614345.43 frames. ], batch size: 21, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:52,425 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.543e+02 2.898e+02 3.564e+02 8.195e+02, threshold=5.796e+02, percent-clipped=3.0 +2023-02-06 18:53:01,215 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:10,440 INFO [train.py:901] (0/4) Epoch 16, batch 6750, loss[loss=0.2397, simple_loss=0.3162, pruned_loss=0.0816, over 8619.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2965, pruned_loss=0.06915, over 1612796.33 frames. ], batch size: 39, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:13,303 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-128000.pt +2023-02-06 18:53:18,491 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:19,157 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:32,876 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128024.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:47,638 INFO [train.py:901] (0/4) Epoch 16, batch 6800, loss[loss=0.2015, simple_loss=0.281, pruned_loss=0.06096, over 7810.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2967, pruned_loss=0.06941, over 1611272.37 frames. ], batch size: 20, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:51,040 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 18:54:04,017 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.604e+02 3.143e+02 4.008e+02 8.483e+02, threshold=6.287e+02, percent-clipped=3.0 +2023-02-06 18:54:22,239 INFO [train.py:901] (0/4) Epoch 16, batch 6850, loss[loss=0.1984, simple_loss=0.2933, pruned_loss=0.05175, over 8036.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2982, pruned_loss=0.06956, over 1615869.12 frames. ], batch size: 22, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:54:36,134 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9761, 2.1411, 1.7762, 2.7223, 1.2196, 1.4965, 1.9715, 2.1693], + device='cuda:0'), covar=tensor([0.0769, 0.0836, 0.0969, 0.0403, 0.1134, 0.1481, 0.0986, 0.0739], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0202, 0.0249, 0.0212, 0.0209, 0.0249, 0.0253, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 18:54:40,681 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 18:54:50,127 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 18:54:53,400 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128139.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:54,725 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128141.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:58,091 INFO [train.py:901] (0/4) Epoch 16, batch 6900, loss[loss=0.2203, simple_loss=0.2951, pruned_loss=0.07274, over 8035.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2977, pruned_loss=0.06926, over 1615154.57 frames. ], batch size: 22, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:54:58,582 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 18:55:06,442 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 18:55:08,151 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128160.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:55:14,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.605e+02 3.172e+02 3.868e+02 9.306e+02, threshold=6.344e+02, percent-clipped=5.0 +2023-02-06 18:55:25,853 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128185.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:55:32,787 INFO [train.py:901] (0/4) Epoch 16, batch 6950, loss[loss=0.2299, simple_loss=0.3093, pruned_loss=0.07523, over 8492.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.297, pruned_loss=0.06893, over 1612049.07 frames. ], batch size: 29, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:43,515 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:55:48,025 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 18:55:58,347 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:05,943 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8688, 3.7499, 3.4788, 1.8489, 3.3763, 3.4804, 3.3530, 3.3029], + device='cuda:0'), covar=tensor([0.0800, 0.0647, 0.1095, 0.4045, 0.0874, 0.0890, 0.1444, 0.0780], + device='cuda:0'), in_proj_covar=tensor([0.0492, 0.0406, 0.0408, 0.0505, 0.0396, 0.0408, 0.0400, 0.0352], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 18:56:07,210 INFO [train.py:901] (0/4) Epoch 16, batch 7000, loss[loss=0.1918, simple_loss=0.2754, pruned_loss=0.05412, over 7973.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2967, pruned_loss=0.06917, over 1605294.55 frames. ], batch size: 21, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:56:15,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:19,637 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:24,008 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.690e+02 3.457e+02 5.056e+02 8.270e+02, threshold=6.915e+02, percent-clipped=6.0 +2023-02-06 18:56:36,181 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:42,585 INFO [train.py:901] (0/4) Epoch 16, batch 7050, loss[loss=0.2092, simple_loss=0.2827, pruned_loss=0.06782, over 8292.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2961, pruned_loss=0.06899, over 1605319.78 frames. ], batch size: 23, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:03,873 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:11,329 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:16,462 INFO [train.py:901] (0/4) Epoch 16, batch 7100, loss[loss=0.1891, simple_loss=0.2752, pruned_loss=0.05155, over 8523.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2968, pruned_loss=0.06939, over 1605503.23 frames. ], batch size: 39, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:18,638 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:33,895 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.456e+02 3.083e+02 3.766e+02 8.441e+02, threshold=6.166e+02, percent-clipped=2.0 +2023-02-06 18:57:52,015 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128395.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:52,512 INFO [train.py:901] (0/4) Epoch 16, batch 7150, loss[loss=0.2569, simple_loss=0.318, pruned_loss=0.09784, over 6699.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2972, pruned_loss=0.06917, over 1606284.21 frames. ], batch size: 72, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:00,908 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7489, 1.7888, 2.5683, 1.6264, 1.2643, 2.4917, 0.4676, 1.4395], + device='cuda:0'), covar=tensor([0.2152, 0.1457, 0.0292, 0.1551, 0.3209, 0.0340, 0.2538, 0.1669], + device='cuda:0'), in_proj_covar=tensor([0.0176, 0.0185, 0.0114, 0.0215, 0.0260, 0.0120, 0.0165, 0.0180], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 18:58:09,831 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:13,952 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6711, 1.9383, 2.0756, 1.2816, 2.1703, 1.5682, 0.6093, 1.8326], + device='cuda:0'), covar=tensor([0.0444, 0.0297, 0.0226, 0.0448, 0.0341, 0.0757, 0.0731, 0.0238], + device='cuda:0'), in_proj_covar=tensor([0.0427, 0.0368, 0.0316, 0.0425, 0.0353, 0.0515, 0.0374, 0.0397], + device='cuda:0'), out_proj_covar=tensor([1.1705e-04, 9.8409e-05, 8.3824e-05, 1.1425e-04, 9.5101e-05, 1.4893e-04, + 1.0248e-04, 1.0749e-04], device='cuda:0') +2023-02-06 18:58:17,204 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:27,263 INFO [train.py:901] (0/4) Epoch 16, batch 7200, loss[loss=0.2506, simple_loss=0.3251, pruned_loss=0.08798, over 8460.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2971, pruned_loss=0.0693, over 1607243.00 frames. ], batch size: 29, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:42,533 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.378e+02 2.905e+02 3.370e+02 6.119e+02, threshold=5.810e+02, percent-clipped=0.0 +2023-02-06 18:59:02,793 INFO [train.py:901] (0/4) Epoch 16, batch 7250, loss[loss=0.2126, simple_loss=0.2949, pruned_loss=0.06515, over 8103.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2979, pruned_loss=0.06974, over 1605847.20 frames. ], batch size: 23, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:13,772 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:31,355 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:37,226 INFO [train.py:901] (0/4) Epoch 16, batch 7300, loss[loss=0.1756, simple_loss=0.2617, pruned_loss=0.04478, over 8141.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2966, pruned_loss=0.06926, over 1608854.20 frames. ], batch size: 22, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:37,480 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4638, 2.1279, 3.1976, 2.5280, 3.1085, 2.3599, 2.0746, 1.8216], + device='cuda:0'), covar=tensor([0.4912, 0.5024, 0.1725, 0.3408, 0.2403, 0.2781, 0.1863, 0.5343], + device='cuda:0'), in_proj_covar=tensor([0.0911, 0.0924, 0.0764, 0.0896, 0.0961, 0.0848, 0.0723, 0.0797], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 18:59:45,716 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 18:59:51,672 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.74 vs. limit=5.0 +2023-02-06 18:59:52,609 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.470e+02 2.980e+02 3.722e+02 1.252e+03, threshold=5.960e+02, percent-clipped=4.0 +2023-02-06 19:00:02,308 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:12,491 INFO [train.py:901] (0/4) Epoch 16, batch 7350, loss[loss=0.2889, simple_loss=0.3437, pruned_loss=0.117, over 6850.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2971, pruned_loss=0.06976, over 1610716.78 frames. ], batch size: 72, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:18,850 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2007, 1.9686, 2.7302, 2.2320, 2.6803, 2.1732, 1.8685, 1.4456], + device='cuda:0'), covar=tensor([0.5074, 0.4883, 0.1778, 0.3337, 0.2349, 0.2668, 0.1878, 0.5089], + device='cuda:0'), in_proj_covar=tensor([0.0916, 0.0927, 0.0768, 0.0903, 0.0965, 0.0853, 0.0727, 0.0801], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:00:19,503 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:21,501 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:31,426 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 19:00:36,120 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:47,435 INFO [train.py:901] (0/4) Epoch 16, batch 7400, loss[loss=0.2201, simple_loss=0.2835, pruned_loss=0.07835, over 7689.00 frames. ], tot_loss[loss=0.217, simple_loss=0.296, pruned_loss=0.06897, over 1607181.74 frames. ], batch size: 18, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:49,569 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 19:01:02,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.610e+02 3.305e+02 3.788e+02 1.058e+03, threshold=6.610e+02, percent-clipped=7.0 +2023-02-06 19:01:04,782 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 19:01:11,761 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:01:21,080 INFO [train.py:901] (0/4) Epoch 16, batch 7450, loss[loss=0.1979, simple_loss=0.285, pruned_loss=0.05543, over 8295.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2967, pruned_loss=0.06944, over 1607048.41 frames. ], batch size: 23, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:01:30,642 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 19:01:56,804 INFO [train.py:901] (0/4) Epoch 16, batch 7500, loss[loss=0.1837, simple_loss=0.2739, pruned_loss=0.04674, over 7651.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2962, pruned_loss=0.06907, over 1611095.93 frames. ], batch size: 19, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:02:13,129 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.417e+02 2.923e+02 3.614e+02 6.549e+02, threshold=5.847e+02, percent-clipped=0.0 +2023-02-06 19:02:17,171 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:31,136 INFO [train.py:901] (0/4) Epoch 16, batch 7550, loss[loss=0.2025, simple_loss=0.2888, pruned_loss=0.05812, over 8024.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2962, pruned_loss=0.06916, over 1610634.50 frames. ], batch size: 22, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:02:32,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:33,338 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:07,396 INFO [train.py:901] (0/4) Epoch 16, batch 7600, loss[loss=0.2334, simple_loss=0.3174, pruned_loss=0.07467, over 8528.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2964, pruned_loss=0.06915, over 1613264.37 frames. ], batch size: 28, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:16,512 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3216, 2.4484, 1.8913, 2.0941, 1.9388, 1.5706, 1.8756, 1.9721], + device='cuda:0'), covar=tensor([0.1419, 0.0373, 0.1100, 0.0616, 0.0695, 0.1470, 0.0944, 0.0980], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0233, 0.0326, 0.0300, 0.0298, 0.0331, 0.0342, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 19:03:21,390 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3277, 1.4893, 1.4252, 1.8608, 0.7792, 1.2113, 1.3293, 1.4785], + device='cuda:0'), covar=tensor([0.0978, 0.0850, 0.1057, 0.0509, 0.1173, 0.1636, 0.0774, 0.0792], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0200, 0.0246, 0.0209, 0.0207, 0.0247, 0.0250, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:03:22,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6114, 1.9535, 1.9688, 1.2158, 2.0614, 1.6071, 0.4484, 1.7899], + device='cuda:0'), covar=tensor([0.0394, 0.0272, 0.0221, 0.0438, 0.0354, 0.0718, 0.0690, 0.0225], + device='cuda:0'), in_proj_covar=tensor([0.0429, 0.0368, 0.0317, 0.0424, 0.0353, 0.0514, 0.0375, 0.0395], + device='cuda:0'), out_proj_covar=tensor([1.1750e-04, 9.8324e-05, 8.4154e-05, 1.1383e-04, 9.4979e-05, 1.4849e-04, + 1.0268e-04, 1.0664e-04], device='cuda:0') +2023-02-06 19:03:23,268 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.439e+02 3.123e+02 4.017e+02 8.994e+02, threshold=6.245e+02, percent-clipped=5.0 +2023-02-06 19:03:38,600 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:42,475 INFO [train.py:901] (0/4) Epoch 16, batch 7650, loss[loss=0.2215, simple_loss=0.312, pruned_loss=0.06549, over 8473.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2962, pruned_loss=0.06885, over 1613424.37 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:50,519 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128908.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:03:52,627 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3820, 2.0444, 2.8350, 2.3289, 2.8318, 2.3622, 2.0014, 1.4325], + device='cuda:0'), covar=tensor([0.4921, 0.4685, 0.1597, 0.2988, 0.2014, 0.2602, 0.1808, 0.5066], + device='cuda:0'), in_proj_covar=tensor([0.0906, 0.0917, 0.0759, 0.0895, 0.0955, 0.0844, 0.0718, 0.0791], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:04:17,494 INFO [train.py:901] (0/4) Epoch 16, batch 7700, loss[loss=0.2132, simple_loss=0.3034, pruned_loss=0.06144, over 8246.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2962, pruned_loss=0.06891, over 1615710.15 frames. ], batch size: 24, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:34,543 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.361e+02 3.016e+02 3.880e+02 7.767e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 19:04:42,146 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 19:04:52,876 INFO [train.py:901] (0/4) Epoch 16, batch 7750, loss[loss=0.2455, simple_loss=0.317, pruned_loss=0.08703, over 8696.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2957, pruned_loss=0.06871, over 1614991.81 frames. ], batch size: 34, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:26,240 INFO [train.py:901] (0/4) Epoch 16, batch 7800, loss[loss=0.1985, simple_loss=0.2612, pruned_loss=0.06788, over 7699.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2956, pruned_loss=0.06813, over 1617099.35 frames. ], batch size: 18, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:31,086 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:05:41,938 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.423e+02 2.949e+02 3.975e+02 9.373e+02, threshold=5.898e+02, percent-clipped=5.0 +2023-02-06 19:05:48,210 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:00,669 INFO [train.py:901] (0/4) Epoch 16, batch 7850, loss[loss=0.2002, simple_loss=0.2957, pruned_loss=0.05235, over 8335.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2965, pruned_loss=0.06902, over 1618413.55 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:21,688 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5132, 1.4974, 1.8526, 1.4163, 1.1874, 1.8132, 0.1886, 1.2555], + device='cuda:0'), covar=tensor([0.2189, 0.1444, 0.0443, 0.1064, 0.3204, 0.0503, 0.2637, 0.1349], + device='cuda:0'), in_proj_covar=tensor([0.0178, 0.0185, 0.0115, 0.0216, 0.0264, 0.0121, 0.0166, 0.0180], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 19:06:32,322 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:34,271 INFO [train.py:901] (0/4) Epoch 16, batch 7900, loss[loss=0.2161, simple_loss=0.2978, pruned_loss=0.06719, over 8462.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2959, pruned_loss=0.06865, over 1614106.90 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:34,513 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:51,032 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.289e+02 2.786e+02 3.620e+02 6.776e+02, threshold=5.572e+02, percent-clipped=2.0 +2023-02-06 19:06:51,889 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:08,449 INFO [train.py:901] (0/4) Epoch 16, batch 7950, loss[loss=0.2943, simple_loss=0.347, pruned_loss=0.1208, over 6745.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2973, pruned_loss=0.06967, over 1616200.67 frames. ], batch size: 71, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:27,495 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2793, 2.0344, 2.9177, 2.3050, 2.6939, 2.3049, 1.9582, 1.5284], + device='cuda:0'), covar=tensor([0.4864, 0.4763, 0.1561, 0.3410, 0.2375, 0.2578, 0.1759, 0.4886], + device='cuda:0'), in_proj_covar=tensor([0.0905, 0.0918, 0.0758, 0.0892, 0.0954, 0.0844, 0.0717, 0.0792], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:07:42,917 INFO [train.py:901] (0/4) Epoch 16, batch 8000, loss[loss=0.2111, simple_loss=0.2975, pruned_loss=0.06233, over 8028.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.297, pruned_loss=0.06924, over 1617205.43 frames. ], batch size: 22, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:47,177 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129252.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:07:51,178 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:59,068 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.511e+02 2.964e+02 3.601e+02 8.820e+02, threshold=5.927e+02, percent-clipped=6.0 +2023-02-06 19:08:16,584 INFO [train.py:901] (0/4) Epoch 16, batch 8050, loss[loss=0.1775, simple_loss=0.2651, pruned_loss=0.04498, over 7937.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2947, pruned_loss=0.06881, over 1595630.15 frames. ], batch size: 20, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:08:40,007 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-16.pt +2023-02-06 19:08:52,804 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 19:08:56,563 INFO [train.py:901] (0/4) Epoch 17, batch 0, loss[loss=0.2397, simple_loss=0.315, pruned_loss=0.08214, over 8472.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.315, pruned_loss=0.08214, over 8472.00 frames. ], batch size: 25, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:08:56,564 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 19:09:04,866 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5004, 1.7277, 2.5627, 1.3157, 1.8867, 1.7808, 1.5535, 1.8138], + device='cuda:0'), covar=tensor([0.1725, 0.2444, 0.0793, 0.4270, 0.1698, 0.3018, 0.2123, 0.2087], + device='cuda:0'), in_proj_covar=tensor([0.0504, 0.0561, 0.0543, 0.0612, 0.0631, 0.0570, 0.0501, 0.0620], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 19:09:07,558 INFO [train.py:935] (0/4) Epoch 17, validation: loss=0.1792, simple_loss=0.2794, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 19:09:07,559 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 19:09:21,171 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 19:09:26,471 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.83 vs. limit=5.0 +2023-02-06 19:09:33,851 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129367.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:09:35,634 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.551e+02 3.127e+02 3.678e+02 8.568e+02, threshold=6.254e+02, percent-clipped=4.0 +2023-02-06 19:09:41,816 INFO [train.py:901] (0/4) Epoch 17, batch 50, loss[loss=0.1754, simple_loss=0.251, pruned_loss=0.0499, over 7555.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3001, pruned_loss=0.07089, over 366094.84 frames. ], batch size: 18, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:09:54,005 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 19:10:18,443 INFO [train.py:901] (0/4) Epoch 17, batch 100, loss[loss=0.2604, simple_loss=0.3235, pruned_loss=0.09863, over 7048.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2993, pruned_loss=0.06994, over 646324.58 frames. ], batch size: 71, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:10:18,452 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 19:10:19,954 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129431.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:10:28,659 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0469, 1.6653, 6.1843, 2.1993, 5.7142, 5.2718, 5.7477, 5.7150], + device='cuda:0'), covar=tensor([0.0369, 0.4432, 0.0292, 0.3609, 0.0787, 0.0770, 0.0358, 0.0387], + device='cuda:0'), in_proj_covar=tensor([0.0559, 0.0618, 0.0638, 0.0587, 0.0664, 0.0565, 0.0563, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:10:31,368 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3369, 1.4773, 4.5469, 1.7707, 4.0034, 3.8429, 4.1300, 4.0491], + device='cuda:0'), covar=tensor([0.0517, 0.4527, 0.0477, 0.3658, 0.1091, 0.0916, 0.0516, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0559, 0.0618, 0.0638, 0.0587, 0.0664, 0.0566, 0.0563, 0.0622], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:10:44,180 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6073, 1.8144, 2.6591, 1.4658, 2.2137, 1.8292, 1.6613, 2.0642], + device='cuda:0'), covar=tensor([0.1438, 0.1964, 0.0702, 0.3396, 0.1370, 0.2367, 0.1707, 0.1999], + device='cuda:0'), in_proj_covar=tensor([0.0508, 0.0564, 0.0547, 0.0615, 0.0635, 0.0572, 0.0506, 0.0623], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 19:10:46,022 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.489e+02 3.062e+02 3.657e+02 7.822e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 19:10:52,175 INFO [train.py:901] (0/4) Epoch 17, batch 150, loss[loss=0.2192, simple_loss=0.3016, pruned_loss=0.06845, over 8657.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3012, pruned_loss=0.07126, over 859628.51 frames. ], batch size: 34, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:18,269 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:29,026 INFO [train.py:901] (0/4) Epoch 17, batch 200, loss[loss=0.1751, simple_loss=0.2522, pruned_loss=0.04897, over 7420.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2993, pruned_loss=0.07032, over 1027570.09 frames. ], batch size: 17, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:36,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:57,078 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.455e+02 2.902e+02 3.926e+02 7.649e+02, threshold=5.804e+02, percent-clipped=5.0 +2023-02-06 19:12:03,435 INFO [train.py:901] (0/4) Epoch 17, batch 250, loss[loss=0.1808, simple_loss=0.2604, pruned_loss=0.05061, over 7977.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2987, pruned_loss=0.06993, over 1159094.41 frames. ], batch size: 21, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:09,633 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 19:12:11,839 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7722, 3.6623, 2.2559, 2.5613, 2.5237, 1.9358, 2.4347, 2.8663], + device='cuda:0'), covar=tensor([0.1653, 0.0284, 0.1150, 0.0812, 0.0815, 0.1469, 0.1149, 0.1037], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0236, 0.0330, 0.0303, 0.0301, 0.0337, 0.0344, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 19:12:18,377 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 19:12:33,720 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129623.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:12:38,226 INFO [train.py:901] (0/4) Epoch 17, batch 300, loss[loss=0.2374, simple_loss=0.3138, pruned_loss=0.08056, over 8035.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2976, pruned_loss=0.06921, over 1267026.53 frames. ], batch size: 22, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:39,081 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:12:53,503 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129648.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:13:08,146 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.453e+02 3.064e+02 3.747e+02 1.027e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-06 19:13:14,341 INFO [train.py:901] (0/4) Epoch 17, batch 350, loss[loss=0.2299, simple_loss=0.3109, pruned_loss=0.0745, over 8500.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2979, pruned_loss=0.06932, over 1345410.64 frames. ], batch size: 26, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:13:47,838 INFO [train.py:901] (0/4) Epoch 17, batch 400, loss[loss=0.2156, simple_loss=0.293, pruned_loss=0.06912, over 8192.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2973, pruned_loss=0.06879, over 1404170.04 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:13:57,416 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9401, 2.0798, 1.7756, 2.4932, 1.2151, 1.5846, 1.8312, 2.1733], + device='cuda:0'), covar=tensor([0.0757, 0.0815, 0.0985, 0.0438, 0.1189, 0.1437, 0.0867, 0.0740], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0202, 0.0248, 0.0212, 0.0211, 0.0249, 0.0253, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:14:18,001 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.355e+02 2.898e+02 3.830e+02 8.224e+02, threshold=5.797e+02, percent-clipped=7.0 +2023-02-06 19:14:21,455 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129775.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:14:24,069 INFO [train.py:901] (0/4) Epoch 17, batch 450, loss[loss=0.2144, simple_loss=0.2949, pruned_loss=0.06699, over 8363.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2976, pruned_loss=0.06864, over 1453639.38 frames. ], batch size: 24, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:44,499 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:14:54,733 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7941, 5.9225, 5.1165, 2.5055, 5.1545, 5.5442, 5.4073, 5.2336], + device='cuda:0'), covar=tensor([0.0514, 0.0378, 0.0960, 0.4216, 0.0696, 0.0852, 0.1039, 0.0624], + device='cuda:0'), in_proj_covar=tensor([0.0505, 0.0414, 0.0416, 0.0512, 0.0406, 0.0416, 0.0404, 0.0358], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 19:14:58,027 INFO [train.py:901] (0/4) Epoch 17, batch 500, loss[loss=0.198, simple_loss=0.2911, pruned_loss=0.05247, over 8107.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2972, pruned_loss=0.06867, over 1489161.75 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:20,979 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8112, 1.4161, 3.9578, 1.3588, 3.5043, 3.2954, 3.5681, 3.4659], + device='cuda:0'), covar=tensor([0.0617, 0.4401, 0.0618, 0.4144, 0.1249, 0.1130, 0.0695, 0.0774], + device='cuda:0'), in_proj_covar=tensor([0.0561, 0.0620, 0.0643, 0.0590, 0.0669, 0.0574, 0.0569, 0.0628], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:15:28,004 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.377e+02 2.910e+02 3.862e+02 1.132e+03, threshold=5.820e+02, percent-clipped=8.0 +2023-02-06 19:15:35,664 INFO [train.py:901] (0/4) Epoch 17, batch 550, loss[loss=0.3088, simple_loss=0.3631, pruned_loss=0.1272, over 7232.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2975, pruned_loss=0.06853, over 1516461.71 frames. ], batch size: 72, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:43,362 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129890.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:16:10,114 INFO [train.py:901] (0/4) Epoch 17, batch 600, loss[loss=0.2519, simple_loss=0.3263, pruned_loss=0.08877, over 8669.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.296, pruned_loss=0.0677, over 1540374.83 frames. ], batch size: 39, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:16:19,705 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 19:16:38,503 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 2.576e+02 2.936e+02 3.639e+02 7.352e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-06 19:16:41,330 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:16:44,755 INFO [train.py:901] (0/4) Epoch 17, batch 650, loss[loss=0.2017, simple_loss=0.2904, pruned_loss=0.0565, over 8485.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2947, pruned_loss=0.06678, over 1556403.19 frames. ], batch size: 29, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:02,611 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-130000.pt +2023-02-06 19:17:16,433 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:17,837 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:23,793 INFO [train.py:901] (0/4) Epoch 17, batch 700, loss[loss=0.2515, simple_loss=0.3307, pruned_loss=0.08614, over 8610.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2936, pruned_loss=0.06641, over 1569389.83 frames. ], batch size: 34, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:32,190 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1627, 2.3628, 1.8183, 2.8807, 1.3236, 1.6479, 1.8933, 2.3490], + device='cuda:0'), covar=tensor([0.0654, 0.0724, 0.0957, 0.0328, 0.1233, 0.1416, 0.1028, 0.0808], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0201, 0.0249, 0.0211, 0.0210, 0.0250, 0.0252, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:17:51,866 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.350e+02 2.811e+02 3.683e+02 1.098e+03, threshold=5.622e+02, percent-clipped=6.0 +2023-02-06 19:17:53,079 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.83 vs. limit=5.0 +2023-02-06 19:17:58,270 INFO [train.py:901] (0/4) Epoch 17, batch 750, loss[loss=0.1967, simple_loss=0.2828, pruned_loss=0.05534, over 8302.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2935, pruned_loss=0.06699, over 1581297.72 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:05,545 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:18:08,269 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 19:18:19,413 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 19:18:36,034 INFO [train.py:901] (0/4) Epoch 17, batch 800, loss[loss=0.2294, simple_loss=0.3145, pruned_loss=0.07214, over 8331.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2942, pruned_loss=0.06739, over 1586748.65 frames. ], batch size: 25, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:48,055 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130146.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:18:52,755 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:19:04,225 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.363e+02 2.676e+02 3.408e+02 8.560e+02, threshold=5.353e+02, percent-clipped=3.0 +2023-02-06 19:19:05,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:19:10,475 INFO [train.py:901] (0/4) Epoch 17, batch 850, loss[loss=0.1838, simple_loss=0.2576, pruned_loss=0.05499, over 7431.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.0677, over 1595321.57 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:19:47,307 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 19:19:47,570 INFO [train.py:901] (0/4) Epoch 17, batch 900, loss[loss=0.2205, simple_loss=0.2972, pruned_loss=0.0719, over 8124.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2958, pruned_loss=0.06773, over 1606436.53 frames. ], batch size: 22, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:15,364 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:20:16,502 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.489e+02 3.023e+02 3.878e+02 8.176e+02, threshold=6.045e+02, percent-clipped=7.0 +2023-02-06 19:20:19,508 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7493, 1.8259, 2.3849, 1.5458, 1.4286, 2.3862, 0.3189, 1.4309], + device='cuda:0'), covar=tensor([0.1983, 0.1169, 0.0308, 0.1362, 0.2694, 0.0366, 0.2454, 0.1470], + device='cuda:0'), in_proj_covar=tensor([0.0177, 0.0184, 0.0115, 0.0217, 0.0262, 0.0120, 0.0166, 0.0178], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 19:20:22,803 INFO [train.py:901] (0/4) Epoch 17, batch 950, loss[loss=0.2304, simple_loss=0.3078, pruned_loss=0.07655, over 8303.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06796, over 1605627.63 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:43,393 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 19:20:57,198 INFO [train.py:901] (0/4) Epoch 17, batch 1000, loss[loss=0.2021, simple_loss=0.2897, pruned_loss=0.05727, over 8190.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2956, pruned_loss=0.0676, over 1609301.82 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:21:04,864 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8223, 2.0242, 1.7480, 2.6053, 1.2712, 1.5375, 1.8448, 2.0694], + device='cuda:0'), covar=tensor([0.0779, 0.0799, 0.1072, 0.0402, 0.1082, 0.1420, 0.0856, 0.0784], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0202, 0.0252, 0.0213, 0.0210, 0.0250, 0.0255, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:21:09,193 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:20,018 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 19:21:21,936 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:23,300 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:27,497 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.680e+02 3.059e+02 3.924e+02 8.380e+02, threshold=6.118e+02, percent-clipped=2.0 +2023-02-06 19:21:27,749 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:32,308 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 19:21:33,160 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 19:21:33,848 INFO [train.py:901] (0/4) Epoch 17, batch 1050, loss[loss=0.194, simple_loss=0.2846, pruned_loss=0.05175, over 8136.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2965, pruned_loss=0.06795, over 1613061.44 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:21:49,924 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:08,442 INFO [train.py:901] (0/4) Epoch 17, batch 1100, loss[loss=0.1895, simple_loss=0.2774, pruned_loss=0.05083, over 8260.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2977, pruned_loss=0.06873, over 1619330.83 frames. ], batch size: 24, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:23,041 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:27,192 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:38,657 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.545e+02 2.978e+02 3.676e+02 6.168e+02, threshold=5.956e+02, percent-clipped=1.0 +2023-02-06 19:22:44,115 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,333 INFO [train.py:901] (0/4) Epoch 17, batch 1150, loss[loss=0.2457, simple_loss=0.3105, pruned_loss=0.09046, over 7330.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2968, pruned_loss=0.06825, over 1616850.70 frames. ], batch size: 72, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:45,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,954 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 19:23:16,161 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:19,435 INFO [train.py:901] (0/4) Epoch 17, batch 1200, loss[loss=0.1955, simple_loss=0.2663, pruned_loss=0.06232, over 7261.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2959, pruned_loss=0.06829, over 1613428.08 frames. ], batch size: 16, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:33,403 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:45,141 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130566.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:46,148 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 19:23:47,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.314e+02 2.862e+02 3.617e+02 1.013e+03, threshold=5.724e+02, percent-clipped=2.0 +2023-02-06 19:23:53,876 INFO [train.py:901] (0/4) Epoch 17, batch 1250, loss[loss=0.2131, simple_loss=0.2824, pruned_loss=0.07189, over 7559.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2971, pruned_loss=0.06878, over 1615166.50 frames. ], batch size: 18, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:57,434 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:24:30,844 INFO [train.py:901] (0/4) Epoch 17, batch 1300, loss[loss=0.2392, simple_loss=0.314, pruned_loss=0.08224, over 8332.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2972, pruned_loss=0.06884, over 1616693.59 frames. ], batch size: 25, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:24:59,327 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.380e+02 3.126e+02 3.675e+02 7.509e+02, threshold=6.253e+02, percent-clipped=2.0 +2023-02-06 19:25:05,684 INFO [train.py:901] (0/4) Epoch 17, batch 1350, loss[loss=0.2258, simple_loss=0.2862, pruned_loss=0.08273, over 7454.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2957, pruned_loss=0.06796, over 1614663.38 frames. ], batch size: 17, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:27,719 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3481, 1.3055, 2.3761, 1.2037, 2.3592, 2.5350, 2.6726, 2.1719], + device='cuda:0'), covar=tensor([0.1011, 0.1232, 0.0422, 0.1889, 0.0618, 0.0364, 0.0656, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0310, 0.0275, 0.0304, 0.0297, 0.0254, 0.0390, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 19:25:43,058 INFO [train.py:901] (0/4) Epoch 17, batch 1400, loss[loss=0.1986, simple_loss=0.2773, pruned_loss=0.05992, over 7778.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2961, pruned_loss=0.06808, over 1617269.74 frames. ], batch size: 19, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:45,981 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:47,357 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:54,821 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:03,085 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:04,417 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:11,022 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.607e+02 3.260e+02 4.191e+02 1.113e+03, threshold=6.520e+02, percent-clipped=3.0 +2023-02-06 19:26:17,376 INFO [train.py:901] (0/4) Epoch 17, batch 1450, loss[loss=0.2844, simple_loss=0.3431, pruned_loss=0.1129, over 7171.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2958, pruned_loss=0.06829, over 1612295.90 frames. ], batch size: 71, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:26:20,731 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 19:26:27,816 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:32,211 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:54,259 INFO [train.py:901] (0/4) Epoch 17, batch 1500, loss[loss=0.275, simple_loss=0.3392, pruned_loss=0.1054, over 8575.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2949, pruned_loss=0.06777, over 1617452.16 frames. ], batch size: 34, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:17,117 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:22,933 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.370e+02 2.974e+02 3.638e+02 1.375e+03, threshold=5.949e+02, percent-clipped=1.0 +2023-02-06 19:27:29,112 INFO [train.py:901] (0/4) Epoch 17, batch 1550, loss[loss=0.2481, simple_loss=0.3249, pruned_loss=0.08566, over 8333.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2954, pruned_loss=0.06754, over 1620814.19 frames. ], batch size: 25, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:50,125 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:50,698 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:54,298 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130915.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:02,609 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:03,803 INFO [train.py:901] (0/4) Epoch 17, batch 1600, loss[loss=0.2149, simple_loss=0.2961, pruned_loss=0.06687, over 8235.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2956, pruned_loss=0.06842, over 1612604.75 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:28:34,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.345e+02 2.992e+02 3.546e+02 8.486e+02, threshold=5.983e+02, percent-clipped=5.0 +2023-02-06 19:28:40,949 INFO [train.py:901] (0/4) Epoch 17, batch 1650, loss[loss=0.2311, simple_loss=0.3101, pruned_loss=0.076, over 8091.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2961, pruned_loss=0.06833, over 1616934.81 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:28:57,393 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 19:29:13,494 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:16,141 INFO [train.py:901] (0/4) Epoch 17, batch 1700, loss[loss=0.1891, simple_loss=0.268, pruned_loss=0.05507, over 7698.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2953, pruned_loss=0.06777, over 1614112.68 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:25,393 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:46,942 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.451e+02 3.155e+02 3.823e+02 7.811e+02, threshold=6.311e+02, percent-clipped=3.0 +2023-02-06 19:29:53,061 INFO [train.py:901] (0/4) Epoch 17, batch 1750, loss[loss=0.2335, simple_loss=0.3119, pruned_loss=0.07757, over 8511.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2962, pruned_loss=0.06823, over 1619160.17 frames. ], batch size: 28, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:58,715 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0746, 1.5527, 1.6739, 1.4547, 0.9426, 1.5260, 1.8248, 1.4775], + device='cuda:0'), covar=tensor([0.0550, 0.1179, 0.1706, 0.1414, 0.0636, 0.1464, 0.0684, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0164, 0.0115, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 19:30:19,609 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131117.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:27,902 INFO [train.py:901] (0/4) Epoch 17, batch 1800, loss[loss=0.1916, simple_loss=0.2832, pruned_loss=0.05003, over 8244.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.296, pruned_loss=0.06827, over 1617982.89 frames. ], batch size: 24, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:30:37,097 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:51,223 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.4050, 1.3301, 3.7724, 1.5723, 2.9609, 3.0089, 3.4256, 3.3488], + device='cuda:0'), covar=tensor([0.1539, 0.6277, 0.1398, 0.4866, 0.2399, 0.1744, 0.1168, 0.1296], + device='cuda:0'), in_proj_covar=tensor([0.0568, 0.0622, 0.0654, 0.0592, 0.0671, 0.0575, 0.0567, 0.0635], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 19:30:52,690 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:55,946 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.745e+02 3.356e+02 4.683e+02 1.105e+03, threshold=6.712e+02, percent-clipped=11.0 +2023-02-06 19:30:56,918 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:03,697 INFO [train.py:901] (0/4) Epoch 17, batch 1850, loss[loss=0.2041, simple_loss=0.2718, pruned_loss=0.06824, over 7708.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2967, pruned_loss=0.06842, over 1622886.14 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:31:12,468 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:13,829 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:16,687 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:37,446 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7343, 1.8496, 2.4382, 1.5493, 1.2984, 2.3961, 0.4541, 1.4554], + device='cuda:0'), covar=tensor([0.2074, 0.1424, 0.0352, 0.1472, 0.3028, 0.0425, 0.2491, 0.1508], + device='cuda:0'), in_proj_covar=tensor([0.0179, 0.0183, 0.0115, 0.0216, 0.0261, 0.0122, 0.0166, 0.0179], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 19:31:39,958 INFO [train.py:901] (0/4) Epoch 17, batch 1900, loss[loss=0.2157, simple_loss=0.3089, pruned_loss=0.06121, over 8455.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2973, pruned_loss=0.06879, over 1621610.57 frames. ], batch size: 25, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:08,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.313e+02 2.955e+02 3.582e+02 5.685e+02, threshold=5.910e+02, percent-clipped=0.0 +2023-02-06 19:32:08,137 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 19:32:14,113 INFO [train.py:901] (0/4) Epoch 17, batch 1950, loss[loss=0.2013, simple_loss=0.2811, pruned_loss=0.06077, over 7979.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2969, pruned_loss=0.06897, over 1616705.50 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:15,744 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:19,629 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 19:32:28,898 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:35,128 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:39,911 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 19:32:47,544 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:51,199 INFO [train.py:901] (0/4) Epoch 17, batch 2000, loss[loss=0.2209, simple_loss=0.3157, pruned_loss=0.06307, over 7974.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2961, pruned_loss=0.06826, over 1617210.81 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:33:19,856 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.510e+02 3.128e+02 3.622e+02 6.098e+02, threshold=6.257e+02, percent-clipped=1.0 +2023-02-06 19:33:25,353 INFO [train.py:901] (0/4) Epoch 17, batch 2050, loss[loss=0.212, simple_loss=0.2955, pruned_loss=0.06425, over 8506.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2957, pruned_loss=0.06758, over 1620948.13 frames. ], batch size: 26, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:00,658 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:01,953 INFO [train.py:901] (0/4) Epoch 17, batch 2100, loss[loss=0.2201, simple_loss=0.3099, pruned_loss=0.06517, over 8522.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06796, over 1618423.65 frames. ], batch size: 31, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:06,118 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:31,418 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.457e+02 2.884e+02 3.530e+02 8.686e+02, threshold=5.767e+02, percent-clipped=1.0 +2023-02-06 19:34:36,975 INFO [train.py:901] (0/4) Epoch 17, batch 2150, loss[loss=0.2182, simple_loss=0.301, pruned_loss=0.06767, over 8233.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06829, over 1619541.29 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:58,678 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:12,360 INFO [train.py:901] (0/4) Epoch 17, batch 2200, loss[loss=0.1991, simple_loss=0.2792, pruned_loss=0.05951, over 7935.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2967, pruned_loss=0.0683, over 1617068.54 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:35:17,202 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:36,119 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3547, 1.8141, 1.4089, 2.8674, 1.3828, 1.2656, 2.0711, 2.0669], + device='cuda:0'), covar=tensor([0.1622, 0.1369, 0.2113, 0.0416, 0.1252, 0.2053, 0.0889, 0.0921], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0198, 0.0247, 0.0211, 0.0208, 0.0246, 0.0253, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:35:43,528 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.550e+02 3.248e+02 4.465e+02 1.208e+03, threshold=6.496e+02, percent-clipped=6.0 +2023-02-06 19:35:49,226 INFO [train.py:901] (0/4) Epoch 17, batch 2250, loss[loss=0.2273, simple_loss=0.3109, pruned_loss=0.07187, over 8462.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2973, pruned_loss=0.06834, over 1617671.96 frames. ], batch size: 25, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:23,874 INFO [train.py:901] (0/4) Epoch 17, batch 2300, loss[loss=0.1887, simple_loss=0.2742, pruned_loss=0.05155, over 8085.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2973, pruned_loss=0.0683, over 1614991.99 frames. ], batch size: 21, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:24,059 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8852, 2.2555, 1.8887, 2.8422, 1.3284, 1.5852, 1.9190, 2.3511], + device='cuda:0'), covar=tensor([0.0844, 0.0777, 0.0937, 0.0386, 0.1107, 0.1400, 0.1009, 0.0690], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0199, 0.0249, 0.0212, 0.0209, 0.0247, 0.0254, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:36:40,749 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:36:55,874 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 2.553e+02 3.001e+02 3.824e+02 6.268e+02, threshold=6.003e+02, percent-clipped=0.0 +2023-02-06 19:37:01,535 INFO [train.py:901] (0/4) Epoch 17, batch 2350, loss[loss=0.2429, simple_loss=0.3227, pruned_loss=0.08162, over 8514.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2977, pruned_loss=0.06861, over 1617482.22 frames. ], batch size: 26, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:16,027 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7487, 1.7541, 2.3432, 1.6853, 1.3153, 2.2900, 0.7398, 1.5176], + device='cuda:0'), covar=tensor([0.1993, 0.1237, 0.0347, 0.1248, 0.2828, 0.0399, 0.2006, 0.1462], + device='cuda:0'), in_proj_covar=tensor([0.0179, 0.0183, 0.0114, 0.0216, 0.0260, 0.0121, 0.0166, 0.0179], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 19:37:21,847 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 19:37:35,922 INFO [train.py:901] (0/4) Epoch 17, batch 2400, loss[loss=0.2219, simple_loss=0.3091, pruned_loss=0.06738, over 8543.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2957, pruned_loss=0.06814, over 1609576.63 frames. ], batch size: 49, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:40,381 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3422, 2.0592, 1.5787, 1.8512, 1.7573, 1.4473, 1.6820, 1.6803], + device='cuda:0'), covar=tensor([0.1023, 0.0414, 0.1094, 0.0499, 0.0569, 0.1232, 0.0748, 0.0780], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0233, 0.0324, 0.0299, 0.0297, 0.0328, 0.0340, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 19:38:06,368 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.467e+02 3.155e+02 3.892e+02 8.269e+02, threshold=6.310e+02, percent-clipped=4.0 +2023-02-06 19:38:06,488 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,194 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,838 INFO [train.py:901] (0/4) Epoch 17, batch 2450, loss[loss=0.1734, simple_loss=0.2608, pruned_loss=0.04296, over 7922.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2938, pruned_loss=0.06644, over 1611838.54 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:47,887 INFO [train.py:901] (0/4) Epoch 17, batch 2500, loss[loss=0.2195, simple_loss=0.3046, pruned_loss=0.06724, over 8285.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2927, pruned_loss=0.06544, over 1614836.91 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:58,558 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1350, 1.4807, 1.8006, 1.3687, 0.9269, 1.4943, 1.9072, 1.8781], + device='cuda:0'), covar=tensor([0.0435, 0.1210, 0.1504, 0.1372, 0.0581, 0.1429, 0.0646, 0.0541], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0099, 0.0161, 0.0114, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 19:39:05,482 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:17,123 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.481e+02 2.929e+02 3.320e+02 7.417e+02, threshold=5.858e+02, percent-clipped=2.0 +2023-02-06 19:39:20,112 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:22,845 INFO [train.py:901] (0/4) Epoch 17, batch 2550, loss[loss=0.1729, simple_loss=0.2448, pruned_loss=0.05044, over 7235.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2936, pruned_loss=0.06652, over 1614241.38 frames. ], batch size: 16, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:39:29,613 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:34,584 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:42,791 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8395, 1.7058, 3.3002, 1.5101, 2.5266, 3.5750, 3.6747, 2.9827], + device='cuda:0'), covar=tensor([0.1227, 0.1648, 0.0433, 0.2077, 0.0990, 0.0298, 0.0554, 0.0655], + device='cuda:0'), in_proj_covar=tensor([0.0282, 0.0308, 0.0275, 0.0302, 0.0292, 0.0252, 0.0388, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 19:39:45,565 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:00,840 INFO [train.py:901] (0/4) Epoch 17, batch 2600, loss[loss=0.2123, simple_loss=0.3024, pruned_loss=0.06111, over 8286.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2944, pruned_loss=0.06716, over 1611800.21 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:03,013 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:28,777 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:29,952 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.887e+02 3.716e+02 6.826e+02, threshold=5.774e+02, percent-clipped=1.0 +2023-02-06 19:40:35,439 INFO [train.py:901] (0/4) Epoch 17, batch 2650, loss[loss=0.2094, simple_loss=0.2796, pruned_loss=0.06963, over 7551.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2951, pruned_loss=0.06761, over 1611966.34 frames. ], batch size: 18, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:49,773 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-132000.pt +2023-02-06 19:40:52,326 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9716, 1.2203, 1.2162, 0.5939, 1.2047, 0.9960, 0.0478, 1.1528], + device='cuda:0'), covar=tensor([0.0373, 0.0362, 0.0269, 0.0483, 0.0373, 0.0783, 0.0719, 0.0282], + device='cuda:0'), in_proj_covar=tensor([0.0426, 0.0368, 0.0315, 0.0425, 0.0351, 0.0509, 0.0376, 0.0391], + device='cuda:0'), out_proj_covar=tensor([1.1651e-04, 9.7932e-05, 8.3442e-05, 1.1367e-04, 9.4395e-05, 1.4685e-04, + 1.0248e-04, 1.0514e-04], device='cuda:0') +2023-02-06 19:41:13,509 INFO [train.py:901] (0/4) Epoch 17, batch 2700, loss[loss=0.2189, simple_loss=0.2923, pruned_loss=0.07272, over 7936.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2966, pruned_loss=0.06833, over 1616255.45 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:27,356 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132049.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:31,121 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 19:41:42,493 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.450e+02 3.248e+02 4.102e+02 1.137e+03, threshold=6.496e+02, percent-clipped=12.0 +2023-02-06 19:41:43,388 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:48,257 INFO [train.py:901] (0/4) Epoch 17, batch 2750, loss[loss=0.2355, simple_loss=0.3038, pruned_loss=0.08363, over 7818.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2959, pruned_loss=0.06824, over 1615512.66 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:42:25,035 INFO [train.py:901] (0/4) Epoch 17, batch 2800, loss[loss=0.2608, simple_loss=0.3329, pruned_loss=0.09433, over 6937.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2953, pruned_loss=0.06844, over 1611853.98 frames. ], batch size: 71, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:42:35,383 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:40,200 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:52,677 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:55,269 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.217e+02 2.865e+02 3.623e+02 1.020e+03, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 19:42:57,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:00,621 INFO [train.py:901] (0/4) Epoch 17, batch 2850, loss[loss=0.1952, simple_loss=0.2865, pruned_loss=0.05192, over 8561.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2963, pruned_loss=0.069, over 1613652.59 frames. ], batch size: 31, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:06,963 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-06 19:43:22,885 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5378, 1.8915, 4.4181, 1.9715, 2.5652, 5.0209, 5.0140, 4.2847], + device='cuda:0'), covar=tensor([0.1080, 0.1745, 0.0295, 0.2026, 0.1129, 0.0171, 0.0449, 0.0543], + device='cuda:0'), in_proj_covar=tensor([0.0283, 0.0309, 0.0276, 0.0303, 0.0293, 0.0253, 0.0388, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 19:43:29,199 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:33,399 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:35,744 INFO [train.py:901] (0/4) Epoch 17, batch 2900, loss[loss=0.2416, simple_loss=0.3257, pruned_loss=0.07877, over 8474.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.296, pruned_loss=0.06871, over 1612873.21 frames. ], batch size: 25, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:52,922 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:44:08,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.407e+02 2.887e+02 3.454e+02 7.005e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 19:44:09,811 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 19:44:13,735 INFO [train.py:901] (0/4) Epoch 17, batch 2950, loss[loss=0.2033, simple_loss=0.2782, pruned_loss=0.06421, over 7933.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2955, pruned_loss=0.06798, over 1615243.36 frames. ], batch size: 20, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:42,851 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0340, 1.7235, 2.6460, 1.6215, 2.2656, 2.8933, 2.8632, 2.5992], + device='cuda:0'), covar=tensor([0.0847, 0.1366, 0.0709, 0.1678, 0.1371, 0.0256, 0.0665, 0.0474], + device='cuda:0'), in_proj_covar=tensor([0.0283, 0.0309, 0.0275, 0.0302, 0.0292, 0.0252, 0.0388, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 19:44:48,301 INFO [train.py:901] (0/4) Epoch 17, batch 3000, loss[loss=0.2117, simple_loss=0.2921, pruned_loss=0.06569, over 8554.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06863, over 1613103.95 frames. ], batch size: 31, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:48,302 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 19:45:00,592 INFO [train.py:935] (0/4) Epoch 17, validation: loss=0.1786, simple_loss=0.2786, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 19:45:00,593 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 19:45:04,437 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:45:31,437 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.492e+02 3.005e+02 3.786e+02 8.313e+02, threshold=6.010e+02, percent-clipped=11.0 +2023-02-06 19:45:37,096 INFO [train.py:901] (0/4) Epoch 17, batch 3050, loss[loss=0.2233, simple_loss=0.303, pruned_loss=0.07177, over 8448.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06862, over 1613590.28 frames. ], batch size: 27, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:45:48,262 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:04,203 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:12,935 INFO [train.py:901] (0/4) Epoch 17, batch 3100, loss[loss=0.2199, simple_loss=0.293, pruned_loss=0.07336, over 7442.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2971, pruned_loss=0.06882, over 1613943.02 frames. ], batch size: 17, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:41,889 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.340e+02 2.843e+02 3.195e+02 7.960e+02, threshold=5.685e+02, percent-clipped=6.0 +2023-02-06 19:46:47,327 INFO [train.py:901] (0/4) Epoch 17, batch 3150, loss[loss=0.2115, simple_loss=0.2987, pruned_loss=0.06218, over 8290.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2979, pruned_loss=0.06911, over 1619583.78 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:47:05,433 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6551, 1.8319, 1.6084, 2.2695, 1.0174, 1.3866, 1.5966, 1.8523], + device='cuda:0'), covar=tensor([0.0813, 0.0810, 0.0947, 0.0434, 0.1209, 0.1444, 0.0849, 0.0731], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0200, 0.0249, 0.0212, 0.0211, 0.0248, 0.0254, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 19:47:09,721 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:24,997 INFO [train.py:901] (0/4) Epoch 17, batch 3200, loss[loss=0.2099, simple_loss=0.2776, pruned_loss=0.07108, over 7791.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.297, pruned_loss=0.06833, over 1618978.80 frames. ], batch size: 19, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:47:26,582 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:54,177 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.495e+02 3.112e+02 3.824e+02 1.248e+03, threshold=6.223e+02, percent-clipped=6.0 +2023-02-06 19:47:59,504 INFO [train.py:901] (0/4) Epoch 17, batch 3250, loss[loss=0.2064, simple_loss=0.2799, pruned_loss=0.06647, over 7795.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2961, pruned_loss=0.06825, over 1612361.73 frames. ], batch size: 19, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:48:07,398 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:26,241 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:37,505 INFO [train.py:901] (0/4) Epoch 17, batch 3300, loss[loss=0.1957, simple_loss=0.2828, pruned_loss=0.05435, over 8654.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.296, pruned_loss=0.06846, over 1614468.47 frames. ], batch size: 34, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:48:58,465 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4246, 1.9755, 4.4509, 2.1481, 2.3657, 4.9321, 5.0555, 4.2734], + device='cuda:0'), covar=tensor([0.1096, 0.1539, 0.0266, 0.1955, 0.1206, 0.0185, 0.0427, 0.0492], + device='cuda:0'), in_proj_covar=tensor([0.0285, 0.0310, 0.0276, 0.0305, 0.0295, 0.0254, 0.0391, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 19:49:06,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 2.532e+02 2.971e+02 3.744e+02 7.972e+02, threshold=5.942e+02, percent-clipped=3.0 +2023-02-06 19:49:07,214 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 19:49:11,856 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132678.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:49:12,441 INFO [train.py:901] (0/4) Epoch 17, batch 3350, loss[loss=0.1619, simple_loss=0.2336, pruned_loss=0.04509, over 6845.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.295, pruned_loss=0.06786, over 1611918.60 frames. ], batch size: 15, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:19,014 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.33 vs. limit=5.0 +2023-02-06 19:49:26,537 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3853, 2.6826, 1.8200, 2.3461, 2.2853, 1.6320, 2.1271, 2.2881], + device='cuda:0'), covar=tensor([0.1659, 0.0359, 0.1260, 0.0699, 0.0682, 0.1488, 0.0970, 0.1036], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0231, 0.0321, 0.0298, 0.0295, 0.0326, 0.0338, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 19:49:49,260 INFO [train.py:901] (0/4) Epoch 17, batch 3400, loss[loss=0.205, simple_loss=0.2757, pruned_loss=0.06718, over 7817.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2944, pruned_loss=0.06729, over 1611566.93 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:02,395 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 19:50:14,680 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:14,758 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:19,442 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.442e+02 2.969e+02 4.012e+02 9.663e+02, threshold=5.937e+02, percent-clipped=5.0 +2023-02-06 19:50:20,634 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 19:50:24,936 INFO [train.py:901] (0/4) Epoch 17, batch 3450, loss[loss=0.2341, simple_loss=0.3038, pruned_loss=0.08222, over 8464.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.295, pruned_loss=0.06804, over 1608051.52 frames. ], batch size: 25, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:30,854 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:32,181 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:47,288 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 19:50:47,670 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:59,811 INFO [train.py:901] (0/4) Epoch 17, batch 3500, loss[loss=0.2194, simple_loss=0.2947, pruned_loss=0.07202, over 8328.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2946, pruned_loss=0.06788, over 1607220.35 frames. ], batch size: 25, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:51:13,841 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 19:51:31,535 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.401e+02 3.009e+02 3.970e+02 8.620e+02, threshold=6.019e+02, percent-clipped=6.0 +2023-02-06 19:51:37,025 INFO [train.py:901] (0/4) Epoch 17, batch 3550, loss[loss=0.2135, simple_loss=0.2814, pruned_loss=0.07277, over 7974.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2951, pruned_loss=0.06805, over 1608155.03 frames. ], batch size: 21, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:51:54,233 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2292, 1.3058, 1.5398, 1.2432, 0.6948, 1.2785, 1.1639, 1.1007], + device='cuda:0'), covar=tensor([0.0602, 0.1298, 0.1759, 0.1453, 0.0615, 0.1612, 0.0725, 0.0691], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0157, 0.0100, 0.0163, 0.0115, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 19:52:11,140 INFO [train.py:901] (0/4) Epoch 17, batch 3600, loss[loss=0.2044, simple_loss=0.2873, pruned_loss=0.06072, over 7804.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.06766, over 1612379.27 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:41,872 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.442e+02 2.775e+02 3.418e+02 6.006e+02, threshold=5.549e+02, percent-clipped=0.0 +2023-02-06 19:52:48,333 INFO [train.py:901] (0/4) Epoch 17, batch 3650, loss[loss=0.254, simple_loss=0.3409, pruned_loss=0.0835, over 8501.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2942, pruned_loss=0.06714, over 1611875.70 frames. ], batch size: 26, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:17,248 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0568, 2.1894, 2.3325, 1.5421, 2.3844, 1.6206, 1.7591, 1.9662], + device='cuda:0'), covar=tensor([0.0542, 0.0320, 0.0188, 0.0509, 0.0361, 0.0644, 0.0565, 0.0345], + device='cuda:0'), in_proj_covar=tensor([0.0432, 0.0374, 0.0318, 0.0430, 0.0357, 0.0515, 0.0379, 0.0393], + device='cuda:0'), out_proj_covar=tensor([1.1798e-04, 9.9506e-05, 8.4217e-05, 1.1506e-04, 9.5856e-05, 1.4830e-04, + 1.0340e-04, 1.0538e-04], device='cuda:0') +2023-02-06 19:53:18,535 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:53:21,766 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 19:53:23,088 INFO [train.py:901] (0/4) Epoch 17, batch 3700, loss[loss=0.2115, simple_loss=0.2673, pruned_loss=0.07787, over 7439.00 frames. ], tot_loss[loss=0.214, simple_loss=0.294, pruned_loss=0.06697, over 1609546.71 frames. ], batch size: 17, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:53,577 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.419e+02 3.081e+02 4.194e+02 7.364e+02, threshold=6.162e+02, percent-clipped=6.0 +2023-02-06 19:53:59,125 INFO [train.py:901] (0/4) Epoch 17, batch 3750, loss[loss=0.2331, simple_loss=0.3159, pruned_loss=0.07517, over 8743.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2934, pruned_loss=0.06648, over 1615572.08 frames. ], batch size: 39, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:16,848 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3228, 1.5541, 2.1139, 1.2542, 1.3007, 1.6286, 1.4074, 1.3545], + device='cuda:0'), covar=tensor([0.1885, 0.2243, 0.0883, 0.4111, 0.1992, 0.3049, 0.2176, 0.2184], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0565, 0.0543, 0.0613, 0.0631, 0.0568, 0.0508, 0.0623], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 19:54:21,509 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:54:35,519 INFO [train.py:901] (0/4) Epoch 17, batch 3800, loss[loss=0.2287, simple_loss=0.3102, pruned_loss=0.07364, over 8426.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.294, pruned_loss=0.06691, over 1614428.18 frames. ], batch size: 27, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:41,262 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:04,572 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.594e+02 3.054e+02 3.718e+02 6.772e+02, threshold=6.108e+02, percent-clipped=5.0 +2023-02-06 19:55:09,939 INFO [train.py:901] (0/4) Epoch 17, batch 3850, loss[loss=0.2838, simple_loss=0.3559, pruned_loss=0.1059, over 8245.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2956, pruned_loss=0.06801, over 1610979.48 frames. ], batch size: 24, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:55:31,145 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 19:55:42,996 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:46,898 INFO [train.py:901] (0/4) Epoch 17, batch 3900, loss[loss=0.2248, simple_loss=0.3104, pruned_loss=0.06957, over 8527.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2959, pruned_loss=0.0683, over 1609390.00 frames. ], batch size: 31, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:15,758 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.486e+02 2.968e+02 4.028e+02 1.073e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-06 19:56:21,114 INFO [train.py:901] (0/4) Epoch 17, batch 3950, loss[loss=0.2348, simple_loss=0.2917, pruned_loss=0.08894, over 6798.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2951, pruned_loss=0.06826, over 1606243.92 frames. ], batch size: 15, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:56,940 INFO [train.py:901] (0/4) Epoch 17, batch 4000, loss[loss=0.2131, simple_loss=0.2845, pruned_loss=0.07081, over 7433.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2943, pruned_loss=0.06783, over 1603954.47 frames. ], batch size: 17, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:27,418 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.526e+02 3.333e+02 3.995e+02 7.649e+02, threshold=6.666e+02, percent-clipped=5.0 +2023-02-06 19:57:32,340 INFO [train.py:901] (0/4) Epoch 17, batch 4050, loss[loss=0.2493, simple_loss=0.3149, pruned_loss=0.09187, over 7009.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2954, pruned_loss=0.0681, over 1605670.13 frames. ], batch size: 71, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:41,372 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133392.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:57:42,099 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:42,756 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:59,643 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:07,774 INFO [train.py:901] (0/4) Epoch 17, batch 4100, loss[loss=0.1937, simple_loss=0.2817, pruned_loss=0.05285, over 8300.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2962, pruned_loss=0.0681, over 1609017.64 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:40,085 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.513e+02 2.919e+02 3.658e+02 1.440e+03, threshold=5.839e+02, percent-clipped=2.0 +2023-02-06 19:58:45,050 INFO [train.py:901] (0/4) Epoch 17, batch 4150, loss[loss=0.2524, simple_loss=0.3395, pruned_loss=0.08267, over 8185.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2965, pruned_loss=0.06816, over 1610003.28 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:45,258 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:54,026 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:02,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:19,513 INFO [train.py:901] (0/4) Epoch 17, batch 4200, loss[loss=0.2167, simple_loss=0.3009, pruned_loss=0.06627, over 8754.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2962, pruned_loss=0.06808, over 1610144.81 frames. ], batch size: 30, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:20,466 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 19:59:32,477 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 19:59:51,075 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.565e+02 3.135e+02 3.827e+02 1.180e+03, threshold=6.269e+02, percent-clipped=6.0 +2023-02-06 19:59:56,775 INFO [train.py:901] (0/4) Epoch 17, batch 4250, loss[loss=0.1659, simple_loss=0.2473, pruned_loss=0.04224, over 7664.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2976, pruned_loss=0.06888, over 1612083.02 frames. ], batch size: 19, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:57,447 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 20:00:30,996 INFO [train.py:901] (0/4) Epoch 17, batch 4300, loss[loss=0.2017, simple_loss=0.2922, pruned_loss=0.05557, over 8478.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2967, pruned_loss=0.06822, over 1610652.09 frames. ], batch size: 29, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:00:46,261 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9260, 2.3309, 1.9474, 2.8851, 1.4067, 1.5483, 2.0761, 2.3575], + device='cuda:0'), covar=tensor([0.0837, 0.0888, 0.0917, 0.0370, 0.1211, 0.1433, 0.0950, 0.0746], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0199, 0.0248, 0.0211, 0.0208, 0.0245, 0.0254, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:00:58,160 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 20:00:59,905 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:01:01,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.551e+02 3.118e+02 3.976e+02 6.360e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 20:01:06,898 INFO [train.py:901] (0/4) Epoch 17, batch 4350, loss[loss=0.1729, simple_loss=0.256, pruned_loss=0.04494, over 7915.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.297, pruned_loss=0.06883, over 1614045.29 frames. ], batch size: 20, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:11,421 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 20:01:31,286 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 20:01:43,132 INFO [train.py:901] (0/4) Epoch 17, batch 4400, loss[loss=0.2047, simple_loss=0.3063, pruned_loss=0.05159, over 8494.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2971, pruned_loss=0.06868, over 1619143.95 frames. ], batch size: 26, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:48,083 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133736.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:01:49,407 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:02:06,820 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2061, 1.0778, 1.2746, 1.0621, 0.9338, 1.3045, 0.0463, 0.9400], + device='cuda:0'), covar=tensor([0.1976, 0.1464, 0.0528, 0.0884, 0.2943, 0.0600, 0.2620, 0.1286], + device='cuda:0'), in_proj_covar=tensor([0.0180, 0.0186, 0.0116, 0.0219, 0.0264, 0.0123, 0.0169, 0.0181], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 20:02:12,877 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.579e+02 3.148e+02 3.884e+02 8.584e+02, threshold=6.297e+02, percent-clipped=6.0 +2023-02-06 20:02:12,922 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 20:02:18,530 INFO [train.py:901] (0/4) Epoch 17, batch 4450, loss[loss=0.2316, simple_loss=0.3144, pruned_loss=0.07436, over 8100.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2962, pruned_loss=0.0683, over 1623279.94 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:02:55,023 INFO [train.py:901] (0/4) Epoch 17, batch 4500, loss[loss=0.2142, simple_loss=0.2857, pruned_loss=0.07133, over 7802.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2962, pruned_loss=0.06806, over 1622723.51 frames. ], batch size: 20, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:00,051 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:00,823 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.5639, 3.4389, 3.1681, 2.0815, 3.0783, 3.2006, 3.2605, 2.8840], + device='cuda:0'), covar=tensor([0.0913, 0.0722, 0.1032, 0.3918, 0.0920, 0.1053, 0.1224, 0.1036], + device='cuda:0'), in_proj_covar=tensor([0.0500, 0.0410, 0.0418, 0.0512, 0.0405, 0.0411, 0.0400, 0.0357], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:03:10,399 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133851.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:03:10,909 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 20:03:11,730 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:24,312 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.330e+02 2.856e+02 3.592e+02 8.327e+02, threshold=5.711e+02, percent-clipped=1.0 +2023-02-06 20:03:29,194 INFO [train.py:901] (0/4) Epoch 17, batch 4550, loss[loss=0.2299, simple_loss=0.3089, pruned_loss=0.07549, over 8496.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2958, pruned_loss=0.06818, over 1617496.41 frames. ], batch size: 28, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:35,072 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1952, 3.9212, 2.6538, 3.1139, 3.0571, 2.4537, 3.0807, 3.3008], + device='cuda:0'), covar=tensor([0.1577, 0.0296, 0.0850, 0.0631, 0.0645, 0.1193, 0.0966, 0.0882], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0232, 0.0324, 0.0299, 0.0296, 0.0330, 0.0339, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 20:03:47,065 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4780, 2.5605, 1.7743, 2.1720, 2.2068, 1.5648, 1.9599, 2.0997], + device='cuda:0'), covar=tensor([0.1475, 0.0345, 0.1149, 0.0612, 0.0643, 0.1470, 0.0910, 0.0913], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0233, 0.0325, 0.0301, 0.0297, 0.0331, 0.0340, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 20:04:04,555 INFO [train.py:901] (0/4) Epoch 17, batch 4600, loss[loss=0.251, simple_loss=0.3101, pruned_loss=0.09601, over 7822.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2959, pruned_loss=0.06826, over 1617697.07 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:21,366 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:04:35,425 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.386e+02 2.834e+02 3.772e+02 7.696e+02, threshold=5.668e+02, percent-clipped=3.0 +2023-02-06 20:04:40,245 INFO [train.py:901] (0/4) Epoch 17, batch 4650, loss[loss=0.1977, simple_loss=0.2826, pruned_loss=0.05637, over 7928.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2938, pruned_loss=0.06693, over 1616212.73 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:46,113 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.24 vs. limit=5.0 +2023-02-06 20:04:54,999 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-134000.pt +2023-02-06 20:05:06,443 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:07,186 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:16,655 INFO [train.py:901] (0/4) Epoch 17, batch 4700, loss[loss=0.244, simple_loss=0.3214, pruned_loss=0.0833, over 8500.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.295, pruned_loss=0.06811, over 1608223.50 frames. ], batch size: 39, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:24,863 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:05:28,194 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6016, 2.3609, 3.3527, 2.6022, 3.1770, 2.5429, 2.1816, 1.9068], + device='cuda:0'), covar=tensor([0.4756, 0.4735, 0.1654, 0.3371, 0.2328, 0.2711, 0.1719, 0.5124], + device='cuda:0'), in_proj_covar=tensor([0.0921, 0.0936, 0.0774, 0.0904, 0.0967, 0.0853, 0.0725, 0.0805], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 20:05:37,702 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 20:05:48,989 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.331e+02 2.674e+02 3.349e+02 6.559e+02, threshold=5.348e+02, percent-clipped=3.0 +2023-02-06 20:05:53,972 INFO [train.py:901] (0/4) Epoch 17, batch 4750, loss[loss=0.1768, simple_loss=0.2597, pruned_loss=0.04689, over 7784.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2935, pruned_loss=0.06712, over 1605682.35 frames. ], batch size: 19, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:13,289 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134107.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:06:14,640 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:17,892 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 20:06:20,569 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 20:06:20,742 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6974, 2.2430, 1.8840, 4.2608, 1.7157, 1.6371, 2.5437, 2.8328], + device='cuda:0'), covar=tensor([0.1743, 0.1389, 0.2108, 0.0212, 0.1428, 0.1988, 0.1074, 0.0942], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0199, 0.0250, 0.0211, 0.0209, 0.0247, 0.0254, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:06:28,172 INFO [train.py:901] (0/4) Epoch 17, batch 4800, loss[loss=0.2351, simple_loss=0.3185, pruned_loss=0.0758, over 8337.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2933, pruned_loss=0.06672, over 1608770.15 frames. ], batch size: 26, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:28,365 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:31,292 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134132.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:06:32,668 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:39,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1852, 2.7457, 3.1888, 1.7193, 3.3260, 1.9498, 1.5340, 2.0728], + device='cuda:0'), covar=tensor([0.0823, 0.0352, 0.0319, 0.0691, 0.0419, 0.0816, 0.0906, 0.0570], + device='cuda:0'), in_proj_covar=tensor([0.0433, 0.0372, 0.0316, 0.0427, 0.0354, 0.0512, 0.0377, 0.0396], + device='cuda:0'), out_proj_covar=tensor([1.1804e-04, 9.9210e-05, 8.3606e-05, 1.1416e-04, 9.4949e-05, 1.4767e-04, + 1.0283e-04, 1.0600e-04], device='cuda:0') +2023-02-06 20:07:00,736 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.352e+02 2.869e+02 3.488e+02 8.440e+02, threshold=5.739e+02, percent-clipped=9.0 +2023-02-06 20:07:01,895 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-06 20:07:06,361 INFO [train.py:901] (0/4) Epoch 17, batch 4850, loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.05902, over 7928.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2932, pruned_loss=0.06714, over 1609070.42 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:14,640 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 20:07:26,226 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:41,126 INFO [train.py:901] (0/4) Epoch 17, batch 4900, loss[loss=0.234, simple_loss=0.3146, pruned_loss=0.07667, over 8080.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2937, pruned_loss=0.06736, over 1606993.97 frames. ], batch size: 21, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:43,465 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134232.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:13,108 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.657e+02 3.351e+02 4.707e+02 1.168e+03, threshold=6.701e+02, percent-clipped=12.0 +2023-02-06 20:08:17,811 INFO [train.py:901] (0/4) Epoch 17, batch 4950, loss[loss=0.2023, simple_loss=0.2845, pruned_loss=0.06001, over 8338.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2942, pruned_loss=0.06751, over 1607407.51 frames. ], batch size: 26, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:08:36,229 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8828, 2.6635, 3.6736, 2.0596, 1.9001, 3.6560, 0.7447, 2.0916], + device='cuda:0'), covar=tensor([0.1592, 0.1125, 0.0276, 0.1741, 0.2866, 0.0296, 0.2843, 0.1656], + device='cuda:0'), in_proj_covar=tensor([0.0177, 0.0184, 0.0116, 0.0218, 0.0262, 0.0123, 0.0167, 0.0181], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 20:08:49,532 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:52,975 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:54,216 INFO [train.py:901] (0/4) Epoch 17, batch 5000, loss[loss=0.2081, simple_loss=0.3017, pruned_loss=0.0573, over 8575.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2938, pruned_loss=0.06702, over 1606885.31 frames. ], batch size: 39, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:15,199 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:24,851 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.361e+02 2.656e+02 3.405e+02 6.362e+02, threshold=5.311e+02, percent-clipped=0.0 +2023-02-06 20:09:30,487 INFO [train.py:901] (0/4) Epoch 17, batch 5050, loss[loss=0.2432, simple_loss=0.3075, pruned_loss=0.08947, over 7817.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2942, pruned_loss=0.06742, over 1607969.78 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:35,086 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:54,046 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:58,690 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 20:10:07,174 INFO [train.py:901] (0/4) Epoch 17, batch 5100, loss[loss=0.1867, simple_loss=0.2684, pruned_loss=0.05254, over 7917.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2937, pruned_loss=0.06737, over 1607504.27 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:36,993 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.384e+02 2.769e+02 3.675e+02 1.185e+03, threshold=5.538e+02, percent-clipped=9.0 +2023-02-06 20:10:38,539 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:10:42,691 INFO [train.py:901] (0/4) Epoch 17, batch 5150, loss[loss=0.2927, simple_loss=0.3556, pruned_loss=0.1149, over 7235.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2934, pruned_loss=0.06677, over 1606367.59 frames. ], batch size: 71, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:11:11,937 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0800, 2.5615, 2.9068, 1.6809, 3.1820, 1.7560, 1.4423, 2.0311], + device='cuda:0'), covar=tensor([0.0750, 0.0371, 0.0260, 0.0697, 0.0465, 0.0886, 0.0830, 0.0528], + device='cuda:0'), in_proj_covar=tensor([0.0440, 0.0378, 0.0321, 0.0436, 0.0362, 0.0524, 0.0383, 0.0401], + device='cuda:0'), out_proj_covar=tensor([1.2011e-04, 1.0062e-04, 8.4996e-05, 1.1665e-04, 9.7002e-05, 1.5114e-04, + 1.0434e-04, 1.0736e-04], device='cuda:0') +2023-02-06 20:11:20,267 INFO [train.py:901] (0/4) Epoch 17, batch 5200, loss[loss=0.2198, simple_loss=0.3031, pruned_loss=0.06823, over 8773.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2937, pruned_loss=0.06687, over 1605547.16 frames. ], batch size: 30, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:37,249 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6057, 1.9717, 2.1455, 1.3320, 2.2697, 1.5232, 0.6449, 1.8496], + device='cuda:0'), covar=tensor([0.0564, 0.0291, 0.0224, 0.0499, 0.0321, 0.0771, 0.0723, 0.0251], + device='cuda:0'), in_proj_covar=tensor([0.0440, 0.0378, 0.0321, 0.0436, 0.0362, 0.0524, 0.0383, 0.0401], + device='cuda:0'), out_proj_covar=tensor([1.2000e-04, 1.0071e-04, 8.5080e-05, 1.1664e-04, 9.7036e-05, 1.5117e-04, + 1.0427e-04, 1.0740e-04], device='cuda:0') +2023-02-06 20:11:49,976 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.225e+02 2.783e+02 3.706e+02 1.482e+03, threshold=5.567e+02, percent-clipped=8.0 +2023-02-06 20:11:54,882 INFO [train.py:901] (0/4) Epoch 17, batch 5250, loss[loss=0.2401, simple_loss=0.3167, pruned_loss=0.08172, over 8349.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2954, pruned_loss=0.06813, over 1603755.40 frames. ], batch size: 26, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:57,582 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 20:12:31,030 INFO [train.py:901] (0/4) Epoch 17, batch 5300, loss[loss=0.193, simple_loss=0.2794, pruned_loss=0.05332, over 7928.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2948, pruned_loss=0.06791, over 1602062.73 frames. ], batch size: 20, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:12:52,399 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 20:12:58,377 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:01,784 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:02,349 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.333e+02 2.884e+02 3.429e+02 1.143e+03, threshold=5.769e+02, percent-clipped=6.0 +2023-02-06 20:13:07,143 INFO [train.py:901] (0/4) Epoch 17, batch 5350, loss[loss=0.2395, simple_loss=0.3069, pruned_loss=0.08605, over 8505.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2932, pruned_loss=0.06692, over 1601466.70 frames. ], batch size: 28, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:10,128 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 20:13:43,340 INFO [train.py:901] (0/4) Epoch 17, batch 5400, loss[loss=0.2123, simple_loss=0.3062, pruned_loss=0.05914, over 8195.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2928, pruned_loss=0.0667, over 1602101.52 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:44,284 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:01,962 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:14,310 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.337e+02 2.988e+02 3.635e+02 1.067e+03, threshold=5.976e+02, percent-clipped=7.0 +2023-02-06 20:14:18,976 INFO [train.py:901] (0/4) Epoch 17, batch 5450, loss[loss=0.2836, simple_loss=0.3482, pruned_loss=0.1095, over 8359.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2941, pruned_loss=0.06763, over 1605385.37 frames. ], batch size: 24, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:20,493 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:23,997 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134786.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:25,745 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 20:14:47,238 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 20:14:52,635 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6840, 1.7402, 1.8298, 1.8352, 1.0022, 1.6386, 2.2642, 1.9855], + device='cuda:0'), covar=tensor([0.0445, 0.1274, 0.1721, 0.1295, 0.0642, 0.1539, 0.0655, 0.0590], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0162, 0.0114, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:14:54,759 INFO [train.py:901] (0/4) Epoch 17, batch 5500, loss[loss=0.189, simple_loss=0.2654, pruned_loss=0.0563, over 7926.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2935, pruned_loss=0.06737, over 1607483.78 frames. ], batch size: 20, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:55,403 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 20:15:25,527 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.381e+02 2.895e+02 3.783e+02 8.489e+02, threshold=5.790e+02, percent-clipped=3.0 +2023-02-06 20:15:31,374 INFO [train.py:901] (0/4) Epoch 17, batch 5550, loss[loss=0.2273, simple_loss=0.3156, pruned_loss=0.06946, over 8456.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.294, pruned_loss=0.06747, over 1606578.77 frames. ], batch size: 27, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:15:32,206 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:15:42,711 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6881, 2.1227, 3.4132, 1.4998, 2.5549, 2.0649, 1.7604, 2.5049], + device='cuda:0'), covar=tensor([0.1742, 0.2283, 0.0732, 0.4042, 0.1739, 0.2904, 0.2030, 0.2163], + device='cuda:0'), in_proj_covar=tensor([0.0509, 0.0567, 0.0544, 0.0615, 0.0633, 0.0572, 0.0508, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:16:06,818 INFO [train.py:901] (0/4) Epoch 17, batch 5600, loss[loss=0.1775, simple_loss=0.2588, pruned_loss=0.04809, over 5973.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2924, pruned_loss=0.06664, over 1605027.23 frames. ], batch size: 13, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:16:38,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.724e+02 3.292e+02 4.135e+02 9.276e+02, threshold=6.584e+02, percent-clipped=7.0 +2023-02-06 20:16:42,897 INFO [train.py:901] (0/4) Epoch 17, batch 5650, loss[loss=0.1939, simple_loss=0.2702, pruned_loss=0.05883, over 7538.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2929, pruned_loss=0.06679, over 1607083.76 frames. ], batch size: 18, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:16:47,875 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5725, 1.3698, 4.7375, 1.6624, 4.1915, 3.8895, 4.2951, 4.1446], + device='cuda:0'), covar=tensor([0.0497, 0.4768, 0.0491, 0.4059, 0.1145, 0.0857, 0.0546, 0.0650], + device='cuda:0'), in_proj_covar=tensor([0.0587, 0.0628, 0.0670, 0.0601, 0.0680, 0.0584, 0.0579, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:17:04,360 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 20:17:18,727 INFO [train.py:901] (0/4) Epoch 17, batch 5700, loss[loss=0.2354, simple_loss=0.3092, pruned_loss=0.08086, over 8644.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2933, pruned_loss=0.0669, over 1608271.10 frames. ], batch size: 34, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:24,492 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:28,004 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:31,683 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-06 20:17:42,570 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:45,910 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:49,720 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.516e+02 3.214e+02 3.973e+02 1.283e+03, threshold=6.427e+02, percent-clipped=6.0 +2023-02-06 20:17:53,692 INFO [train.py:901] (0/4) Epoch 17, batch 5750, loss[loss=0.2884, simple_loss=0.3585, pruned_loss=0.1091, over 8252.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2928, pruned_loss=0.06667, over 1607856.25 frames. ], batch size: 24, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:18:11,538 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 20:18:14,099 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 20:18:30,180 INFO [train.py:901] (0/4) Epoch 17, batch 5800, loss[loss=0.2515, simple_loss=0.3301, pruned_loss=0.08647, over 8499.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2922, pruned_loss=0.0662, over 1605613.35 frames. ], batch size: 49, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:00,427 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.07 vs. limit=5.0 +2023-02-06 20:19:00,544 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.313e+02 2.882e+02 3.681e+02 6.576e+02, threshold=5.764e+02, percent-clipped=1.0 +2023-02-06 20:19:04,575 INFO [train.py:901] (0/4) Epoch 17, batch 5850, loss[loss=0.2683, simple_loss=0.3417, pruned_loss=0.09748, over 8033.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2928, pruned_loss=0.06637, over 1608073.82 frames. ], batch size: 22, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:12,155 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135189.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:37,618 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:41,089 INFO [train.py:901] (0/4) Epoch 17, batch 5900, loss[loss=0.2307, simple_loss=0.3077, pruned_loss=0.07687, over 8193.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2923, pruned_loss=0.06649, over 1606131.00 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:12,430 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.357e+02 3.084e+02 3.660e+02 6.807e+02, threshold=6.167e+02, percent-clipped=2.0 +2023-02-06 20:20:16,629 INFO [train.py:901] (0/4) Epoch 17, batch 5950, loss[loss=0.2068, simple_loss=0.2789, pruned_loss=0.06733, over 7922.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2924, pruned_loss=0.06648, over 1610417.25 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:52,270 INFO [train.py:901] (0/4) Epoch 17, batch 6000, loss[loss=0.2128, simple_loss=0.292, pruned_loss=0.0668, over 7529.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2924, pruned_loss=0.0661, over 1606125.02 frames. ], batch size: 18, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:20:52,271 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 20:21:02,338 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8493, 3.7661, 3.5271, 2.1765, 3.3932, 3.4824, 3.5180, 3.2812], + device='cuda:0'), covar=tensor([0.0950, 0.0589, 0.0889, 0.4653, 0.0965, 0.0905, 0.1160, 0.0915], + device='cuda:0'), in_proj_covar=tensor([0.0501, 0.0412, 0.0417, 0.0518, 0.0406, 0.0410, 0.0401, 0.0360], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:21:05,419 INFO [train.py:935] (0/4) Epoch 17, validation: loss=0.1774, simple_loss=0.2777, pruned_loss=0.03857, over 944034.00 frames. +2023-02-06 20:21:05,419 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 20:21:12,601 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:21:36,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.411e+02 3.026e+02 3.580e+02 8.983e+02, threshold=6.051e+02, percent-clipped=2.0 +2023-02-06 20:21:40,890 INFO [train.py:901] (0/4) Epoch 17, batch 6050, loss[loss=0.2277, simple_loss=0.3044, pruned_loss=0.07546, over 8501.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.293, pruned_loss=0.06644, over 1608155.32 frames. ], batch size: 28, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:21:41,163 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6570, 2.0176, 3.4362, 1.4681, 2.6799, 2.2449, 1.7219, 2.4403], + device='cuda:0'), covar=tensor([0.1843, 0.2539, 0.0865, 0.4427, 0.1733, 0.2786, 0.2180, 0.2350], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0564, 0.0543, 0.0611, 0.0630, 0.0566, 0.0506, 0.0616], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:22:13,011 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0288, 1.6207, 1.4531, 1.5223, 1.3121, 1.3282, 1.2353, 1.2603], + device='cuda:0'), covar=tensor([0.1224, 0.0469, 0.1260, 0.0591, 0.0817, 0.1426, 0.0972, 0.0849], + device='cuda:0'), in_proj_covar=tensor([0.0348, 0.0229, 0.0320, 0.0297, 0.0293, 0.0326, 0.0337, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 20:22:16,307 INFO [train.py:901] (0/4) Epoch 17, batch 6100, loss[loss=0.2288, simple_loss=0.3043, pruned_loss=0.0766, over 8502.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2938, pruned_loss=0.06701, over 1610874.08 frames. ], batch size: 26, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:43,966 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.79 vs. limit=5.0 +2023-02-06 20:22:47,554 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.459e+02 2.890e+02 3.783e+02 6.848e+02, threshold=5.780e+02, percent-clipped=3.0 +2023-02-06 20:22:49,612 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 20:22:52,249 INFO [train.py:901] (0/4) Epoch 17, batch 6150, loss[loss=0.205, simple_loss=0.278, pruned_loss=0.06597, over 7973.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2954, pruned_loss=0.06798, over 1612943.66 frames. ], batch size: 21, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:02,061 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.83 vs. limit=5.0 +2023-02-06 20:23:26,579 INFO [train.py:901] (0/4) Epoch 17, batch 6200, loss[loss=0.1805, simple_loss=0.2517, pruned_loss=0.05461, over 7686.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2949, pruned_loss=0.06734, over 1611711.68 frames. ], batch size: 18, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:29,342 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:23:57,592 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.470e+02 3.035e+02 3.550e+02 6.137e+02, threshold=6.070e+02, percent-clipped=1.0 +2023-02-06 20:24:01,718 INFO [train.py:901] (0/4) Epoch 17, batch 6250, loss[loss=0.2239, simple_loss=0.3192, pruned_loss=0.06434, over 8521.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2951, pruned_loss=0.06702, over 1615135.39 frames. ], batch size: 28, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:13,143 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:14,679 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 20:24:30,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:36,941 INFO [train.py:901] (0/4) Epoch 17, batch 6300, loss[loss=0.2185, simple_loss=0.3054, pruned_loss=0.06582, over 8193.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2943, pruned_loss=0.06724, over 1609469.86 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:49,742 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:49,779 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:55,469 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 20:25:07,387 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.700e+02 3.426e+02 4.477e+02 8.691e+02, threshold=6.853e+02, percent-clipped=8.0 +2023-02-06 20:25:11,440 INFO [train.py:901] (0/4) Epoch 17, batch 6350, loss[loss=0.2187, simple_loss=0.3004, pruned_loss=0.0685, over 8132.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2947, pruned_loss=0.06719, over 1610981.14 frames. ], batch size: 22, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:25:15,104 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5259, 1.9773, 3.3052, 1.3193, 2.4643, 2.0244, 1.5992, 2.4630], + device='cuda:0'), covar=tensor([0.1883, 0.2414, 0.0688, 0.4329, 0.1635, 0.2833, 0.2128, 0.2083], + device='cuda:0'), in_proj_covar=tensor([0.0505, 0.0564, 0.0544, 0.0609, 0.0630, 0.0568, 0.0506, 0.0616], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:25:46,616 INFO [train.py:901] (0/4) Epoch 17, batch 6400, loss[loss=0.2023, simple_loss=0.2799, pruned_loss=0.06239, over 7445.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2942, pruned_loss=0.06725, over 1608456.32 frames. ], batch size: 17, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:25:52,203 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6228, 1.3275, 1.5070, 1.2677, 0.8166, 1.3387, 1.4397, 1.2989], + device='cuda:0'), covar=tensor([0.0536, 0.1323, 0.1741, 0.1501, 0.0608, 0.1576, 0.0731, 0.0674], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:26:16,675 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.216e+02 2.648e+02 3.143e+02 6.334e+02, threshold=5.295e+02, percent-clipped=0.0 +2023-02-06 20:26:20,478 INFO [train.py:901] (0/4) Epoch 17, batch 6450, loss[loss=0.2025, simple_loss=0.293, pruned_loss=0.05597, over 8026.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2942, pruned_loss=0.06765, over 1605777.41 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:56,417 INFO [train.py:901] (0/4) Epoch 17, batch 6500, loss[loss=0.2019, simple_loss=0.2868, pruned_loss=0.05855, over 8029.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2936, pruned_loss=0.06738, over 1602531.91 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:57,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8882, 2.2810, 3.8906, 1.7182, 2.9070, 2.3451, 1.9132, 2.8632], + device='cuda:0'), covar=tensor([0.1605, 0.2206, 0.0698, 0.3846, 0.1631, 0.2689, 0.1914, 0.2113], + device='cuda:0'), in_proj_covar=tensor([0.0508, 0.0568, 0.0546, 0.0614, 0.0634, 0.0573, 0.0508, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:27:26,781 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9150, 5.9991, 5.2049, 2.5046, 5.3454, 5.6877, 5.4780, 5.3201], + device='cuda:0'), covar=tensor([0.0475, 0.0384, 0.0845, 0.4013, 0.0637, 0.0674, 0.0974, 0.0512], + device='cuda:0'), in_proj_covar=tensor([0.0503, 0.0416, 0.0420, 0.0519, 0.0409, 0.0413, 0.0402, 0.0362], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:27:27,349 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.443e+02 3.095e+02 4.367e+02 8.897e+02, threshold=6.190e+02, percent-clipped=12.0 +2023-02-06 20:27:31,532 INFO [train.py:901] (0/4) Epoch 17, batch 6550, loss[loss=0.1975, simple_loss=0.2769, pruned_loss=0.05901, over 8460.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2922, pruned_loss=0.06611, over 1604763.30 frames. ], batch size: 29, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:48,288 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135904.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:27:56,453 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 20:28:06,529 INFO [train.py:901] (0/4) Epoch 17, batch 6600, loss[loss=0.2399, simple_loss=0.3104, pruned_loss=0.08472, over 8233.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2938, pruned_loss=0.06733, over 1603826.63 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:06,736 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:15,499 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.97 vs. limit=5.0 +2023-02-06 20:28:16,347 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 20:28:19,938 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8010, 2.3532, 1.9753, 4.1991, 1.5767, 1.8001, 2.2220, 2.7320], + device='cuda:0'), covar=tensor([0.1566, 0.1296, 0.1821, 0.0228, 0.1457, 0.1793, 0.1246, 0.1075], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0196, 0.0247, 0.0210, 0.0207, 0.0245, 0.0253, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:28:24,643 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4120, 1.5161, 1.3767, 1.8559, 0.8991, 1.2529, 1.2878, 1.5094], + device='cuda:0'), covar=tensor([0.0842, 0.0773, 0.1047, 0.0499, 0.1081, 0.1414, 0.0824, 0.0737], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0196, 0.0247, 0.0210, 0.0207, 0.0245, 0.0253, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:28:36,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.554e+02 2.943e+02 3.634e+02 1.271e+03, threshold=5.887e+02, percent-clipped=2.0 +2023-02-06 20:28:40,558 INFO [train.py:901] (0/4) Epoch 17, batch 6650, loss[loss=0.2009, simple_loss=0.28, pruned_loss=0.06089, over 7975.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2932, pruned_loss=0.06702, over 1604057.24 frames. ], batch size: 21, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:49,875 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:55,262 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-136000.pt +2023-02-06 20:28:56,423 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:04,534 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136012.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:16,453 INFO [train.py:901] (0/4) Epoch 17, batch 6700, loss[loss=0.2323, simple_loss=0.3133, pruned_loss=0.07561, over 8614.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2956, pruned_loss=0.06828, over 1610419.40 frames. ], batch size: 39, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:29:31,203 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-06 20:29:47,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.437e+02 3.090e+02 3.837e+02 8.578e+02, threshold=6.181e+02, percent-clipped=4.0 +2023-02-06 20:29:51,767 INFO [train.py:901] (0/4) Epoch 17, batch 6750, loss[loss=0.1967, simple_loss=0.2804, pruned_loss=0.0565, over 7925.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2949, pruned_loss=0.06849, over 1606978.33 frames. ], batch size: 20, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:11,742 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:26,349 INFO [train.py:901] (0/4) Epoch 17, batch 6800, loss[loss=0.1934, simple_loss=0.2811, pruned_loss=0.0528, over 8028.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2944, pruned_loss=0.06789, over 1610127.74 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:35,096 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 20:30:39,133 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6951, 4.7671, 4.2501, 2.1110, 4.0957, 4.2732, 4.3385, 4.0182], + device='cuda:0'), covar=tensor([0.0728, 0.0475, 0.0977, 0.4812, 0.0921, 0.1068, 0.1112, 0.0796], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0417, 0.0421, 0.0523, 0.0412, 0.0416, 0.0404, 0.0364], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:30:48,379 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:57,860 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.691e+02 3.204e+02 3.737e+02 8.793e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 20:31:01,822 INFO [train.py:901] (0/4) Epoch 17, batch 6850, loss[loss=0.2061, simple_loss=0.2896, pruned_loss=0.06128, over 8359.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2948, pruned_loss=0.06787, over 1609354.59 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:23,505 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 20:31:37,306 INFO [train.py:901] (0/4) Epoch 17, batch 6900, loss[loss=0.2019, simple_loss=0.2909, pruned_loss=0.05646, over 8361.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2943, pruned_loss=0.06775, over 1608364.26 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:59,827 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7667, 1.7406, 1.8237, 1.7451, 0.9642, 1.6465, 2.0533, 2.1013], + device='cuda:0'), covar=tensor([0.0425, 0.1245, 0.1730, 0.1331, 0.0621, 0.1496, 0.0675, 0.0552], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0154, 0.0192, 0.0159, 0.0101, 0.0164, 0.0115, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:32:03,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3164, 1.3037, 1.4710, 1.2777, 0.7665, 1.2642, 1.2389, 1.1828], + device='cuda:0'), covar=tensor([0.0558, 0.1392, 0.1800, 0.1476, 0.0586, 0.1676, 0.0760, 0.0659], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0154, 0.0192, 0.0159, 0.0101, 0.0164, 0.0115, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:32:08,491 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.868e+02 2.541e+02 3.415e+02 4.318e+02 7.722e+02, threshold=6.831e+02, percent-clipped=4.0 +2023-02-06 20:32:12,495 INFO [train.py:901] (0/4) Epoch 17, batch 6950, loss[loss=0.1753, simple_loss=0.2556, pruned_loss=0.04748, over 7426.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2944, pruned_loss=0.06738, over 1607237.50 frames. ], batch size: 17, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:32,967 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 20:32:33,817 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7231, 2.7486, 2.4312, 4.0286, 1.6040, 2.1433, 2.5385, 2.9015], + device='cuda:0'), covar=tensor([0.0623, 0.0877, 0.0893, 0.0226, 0.1266, 0.1198, 0.1020, 0.0810], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0197, 0.0248, 0.0212, 0.0209, 0.0247, 0.0254, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:32:48,141 INFO [train.py:901] (0/4) Epoch 17, batch 7000, loss[loss=0.2107, simple_loss=0.292, pruned_loss=0.06475, over 8078.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2934, pruned_loss=0.06707, over 1602201.29 frames. ], batch size: 21, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:50,919 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5615, 2.6401, 1.7598, 2.1329, 2.1572, 1.5810, 1.9849, 2.1492], + device='cuda:0'), covar=tensor([0.1346, 0.0325, 0.1137, 0.0640, 0.0661, 0.1348, 0.0998, 0.0977], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0229, 0.0321, 0.0297, 0.0294, 0.0327, 0.0339, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 20:32:58,258 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:04,320 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:06,280 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:06,417 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9673, 3.6446, 2.2055, 2.7193, 2.9168, 1.9951, 2.7508, 2.9396], + device='cuda:0'), covar=tensor([0.1655, 0.0342, 0.1173, 0.0865, 0.0718, 0.1450, 0.1068, 0.1159], + device='cuda:0'), in_proj_covar=tensor([0.0349, 0.0230, 0.0321, 0.0297, 0.0294, 0.0327, 0.0339, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 20:33:11,077 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:18,224 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.499e+02 2.956e+02 3.583e+02 7.307e+02, threshold=5.911e+02, percent-clipped=2.0 +2023-02-06 20:33:22,385 INFO [train.py:901] (0/4) Epoch 17, batch 7050, loss[loss=0.2266, simple_loss=0.3107, pruned_loss=0.07131, over 8491.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2947, pruned_loss=0.06742, over 1611136.24 frames. ], batch size: 29, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:33:29,397 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:57,957 INFO [train.py:901] (0/4) Epoch 17, batch 7100, loss[loss=0.2229, simple_loss=0.3074, pruned_loss=0.06916, over 8028.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2954, pruned_loss=0.06752, over 1616621.89 frames. ], batch size: 22, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:18,665 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:26,514 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:27,625 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.503e+02 2.917e+02 3.905e+02 1.004e+03, threshold=5.834e+02, percent-clipped=4.0 +2023-02-06 20:34:31,738 INFO [train.py:901] (0/4) Epoch 17, batch 7150, loss[loss=0.2242, simple_loss=0.3121, pruned_loss=0.06819, over 8194.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2962, pruned_loss=0.06802, over 1614496.16 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:50,070 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:35:07,634 INFO [train.py:901] (0/4) Epoch 17, batch 7200, loss[loss=0.2191, simple_loss=0.3058, pruned_loss=0.06622, over 8484.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2958, pruned_loss=0.06753, over 1617024.22 frames. ], batch size: 29, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:35:37,951 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.498e+02 3.072e+02 3.698e+02 8.742e+02, threshold=6.145e+02, percent-clipped=2.0 +2023-02-06 20:35:42,152 INFO [train.py:901] (0/4) Epoch 17, batch 7250, loss[loss=0.1899, simple_loss=0.2789, pruned_loss=0.05051, over 8574.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2965, pruned_loss=0.06797, over 1622541.04 frames. ], batch size: 31, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:11,358 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136619.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:17,802 INFO [train.py:901] (0/4) Epoch 17, batch 7300, loss[loss=0.2182, simple_loss=0.3052, pruned_loss=0.06561, over 8103.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2963, pruned_loss=0.06764, over 1616800.07 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:40,597 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:42,653 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0160, 1.5313, 3.5215, 1.5558, 2.3176, 3.9386, 3.9451, 3.3821], + device='cuda:0'), covar=tensor([0.1110, 0.1796, 0.0348, 0.2099, 0.1152, 0.0193, 0.0512, 0.0518], + device='cuda:0'), in_proj_covar=tensor([0.0285, 0.0314, 0.0278, 0.0307, 0.0296, 0.0255, 0.0392, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 20:36:48,514 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.506e+02 2.969e+02 3.762e+02 7.100e+02, threshold=5.939e+02, percent-clipped=2.0 +2023-02-06 20:36:52,586 INFO [train.py:901] (0/4) Epoch 17, batch 7350, loss[loss=0.2509, simple_loss=0.3191, pruned_loss=0.09138, over 8252.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2955, pruned_loss=0.06702, over 1615824.14 frames. ], batch size: 22, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:05,619 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:15,662 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 20:37:16,545 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 20:37:17,974 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:27,008 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136727.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:28,131 INFO [train.py:901] (0/4) Epoch 17, batch 7400, loss[loss=0.2534, simple_loss=0.3306, pruned_loss=0.08808, over 8284.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2949, pruned_loss=0.06716, over 1607374.04 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:35,006 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 20:37:36,558 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136740.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:45,373 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:59,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.354e+02 2.898e+02 3.777e+02 7.037e+02, threshold=5.795e+02, percent-clipped=3.0 +2023-02-06 20:38:03,291 INFO [train.py:901] (0/4) Epoch 17, batch 7450, loss[loss=0.1753, simple_loss=0.2537, pruned_loss=0.04844, over 7419.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2953, pruned_loss=0.06735, over 1610913.12 frames. ], batch size: 17, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:16,561 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 20:38:26,046 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:34,160 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:37,329 INFO [train.py:901] (0/4) Epoch 17, batch 7500, loss[loss=0.2361, simple_loss=0.3188, pruned_loss=0.07667, over 8195.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2957, pruned_loss=0.06765, over 1609160.18 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:09,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.462e+02 2.866e+02 3.948e+02 7.787e+02, threshold=5.732e+02, percent-clipped=6.0 +2023-02-06 20:39:11,055 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:13,443 INFO [train.py:901] (0/4) Epoch 17, batch 7550, loss[loss=0.1711, simple_loss=0.2537, pruned_loss=0.04427, over 7692.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2931, pruned_loss=0.06692, over 1599516.17 frames. ], batch size: 18, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:28,588 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:49,026 INFO [train.py:901] (0/4) Epoch 17, batch 7600, loss[loss=0.2479, simple_loss=0.334, pruned_loss=0.08087, over 8587.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2925, pruned_loss=0.06633, over 1599037.77 frames. ], batch size: 31, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:40:14,539 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0500, 2.3573, 3.3596, 1.8705, 2.7329, 2.3282, 2.0831, 2.6386], + device='cuda:0'), covar=tensor([0.1402, 0.1942, 0.0556, 0.3345, 0.1364, 0.2317, 0.1682, 0.1828], + device='cuda:0'), in_proj_covar=tensor([0.0509, 0.0571, 0.0545, 0.0614, 0.0635, 0.0576, 0.0509, 0.0619], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:40:21,096 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.516e+02 2.945e+02 3.717e+02 7.457e+02, threshold=5.891e+02, percent-clipped=6.0 +2023-02-06 20:40:22,731 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2925, 2.1120, 2.8431, 2.3067, 2.7366, 2.2978, 2.0165, 1.6073], + device='cuda:0'), covar=tensor([0.4675, 0.4386, 0.1543, 0.3079, 0.2215, 0.2620, 0.1690, 0.4786], + device='cuda:0'), in_proj_covar=tensor([0.0920, 0.0937, 0.0775, 0.0902, 0.0969, 0.0853, 0.0723, 0.0798], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 20:40:25,199 INFO [train.py:901] (0/4) Epoch 17, batch 7650, loss[loss=0.2706, simple_loss=0.3304, pruned_loss=0.1054, over 7914.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2924, pruned_loss=0.06646, over 1599219.71 frames. ], batch size: 20, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:40:38,666 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4813, 1.8286, 2.7257, 1.3217, 2.0220, 1.7825, 1.5785, 2.0136], + device='cuda:0'), covar=tensor([0.1814, 0.2326, 0.0789, 0.4276, 0.1686, 0.3013, 0.2088, 0.2080], + device='cuda:0'), in_proj_covar=tensor([0.0510, 0.0572, 0.0546, 0.0615, 0.0636, 0.0577, 0.0509, 0.0620], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:40:43,288 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:40:58,549 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:41:00,039 INFO [train.py:901] (0/4) Epoch 17, batch 7700, loss[loss=0.2552, simple_loss=0.3135, pruned_loss=0.09848, over 6696.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2942, pruned_loss=0.06767, over 1600368.31 frames. ], batch size: 71, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:26,489 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 20:41:26,683 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:41:30,554 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.598e+02 3.111e+02 3.900e+02 8.834e+02, threshold=6.222e+02, percent-clipped=1.0 +2023-02-06 20:41:34,727 INFO [train.py:901] (0/4) Epoch 17, batch 7750, loss[loss=0.1935, simple_loss=0.2763, pruned_loss=0.05539, over 7817.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2957, pruned_loss=0.0688, over 1602813.05 frames. ], batch size: 20, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:35,626 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1191, 2.5354, 3.7274, 2.1807, 1.9172, 3.7781, 0.7562, 2.2263], + device='cuda:0'), covar=tensor([0.1350, 0.1257, 0.0241, 0.1630, 0.2864, 0.0254, 0.2481, 0.1585], + device='cuda:0'), in_proj_covar=tensor([0.0176, 0.0183, 0.0116, 0.0216, 0.0263, 0.0123, 0.0166, 0.0182], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 20:41:44,964 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:03,591 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:09,483 INFO [train.py:901] (0/4) Epoch 17, batch 7800, loss[loss=0.1967, simple_loss=0.2692, pruned_loss=0.06209, over 7186.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2945, pruned_loss=0.06803, over 1601936.09 frames. ], batch size: 16, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:42:36,394 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:39,569 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.454e+02 2.768e+02 3.488e+02 7.043e+02, threshold=5.537e+02, percent-clipped=4.0 +2023-02-06 20:42:41,430 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 20:42:43,610 INFO [train.py:901] (0/4) Epoch 17, batch 7850, loss[loss=0.2135, simple_loss=0.2968, pruned_loss=0.0651, over 8567.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.0685, over 1605093.62 frames. ], batch size: 49, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:16,598 INFO [train.py:901] (0/4) Epoch 17, batch 7900, loss[loss=0.206, simple_loss=0.292, pruned_loss=0.05999, over 8616.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2968, pruned_loss=0.06921, over 1606746.26 frames. ], batch size: 31, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:45,782 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.478e+02 3.005e+02 3.961e+02 6.905e+02, threshold=6.010e+02, percent-clipped=7.0 +2023-02-06 20:43:49,859 INFO [train.py:901] (0/4) Epoch 17, batch 7950, loss[loss=0.2309, simple_loss=0.3159, pruned_loss=0.0729, over 8587.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.0688, over 1611235.78 frames. ], batch size: 31, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:52,821 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:23,210 INFO [train.py:901] (0/4) Epoch 17, batch 8000, loss[loss=0.1696, simple_loss=0.2467, pruned_loss=0.04625, over 7222.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2942, pruned_loss=0.06794, over 1602915.68 frames. ], batch size: 16, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:44:45,095 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0348, 1.7013, 3.0557, 1.3651, 2.2177, 3.3658, 3.4638, 2.7876], + device='cuda:0'), covar=tensor([0.1028, 0.1556, 0.0373, 0.2137, 0.1032, 0.0260, 0.0499, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0313, 0.0277, 0.0306, 0.0297, 0.0254, 0.0392, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 20:44:52,972 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.583e+02 3.026e+02 3.684e+02 1.341e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-06 20:44:55,370 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:57,243 INFO [train.py:901] (0/4) Epoch 17, batch 8050, loss[loss=0.1725, simple_loss=0.2423, pruned_loss=0.05138, over 7420.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2944, pruned_loss=0.06812, over 1595580.10 frames. ], batch size: 17, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:45:12,511 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:45:20,315 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-17.pt +2023-02-06 20:45:31,418 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 20:45:34,953 INFO [train.py:901] (0/4) Epoch 18, batch 0, loss[loss=0.2083, simple_loss=0.2966, pruned_loss=0.05997, over 8297.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2966, pruned_loss=0.05997, over 8297.00 frames. ], batch size: 23, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:45:34,954 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 20:45:46,126 INFO [train.py:935] (0/4) Epoch 18, validation: loss=0.1783, simple_loss=0.2784, pruned_loss=0.03907, over 944034.00 frames. +2023-02-06 20:45:46,127 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 20:46:00,879 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 20:46:20,803 INFO [train.py:901] (0/4) Epoch 18, batch 50, loss[loss=0.1906, simple_loss=0.2711, pruned_loss=0.05502, over 7929.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2963, pruned_loss=0.06928, over 368167.69 frames. ], batch size: 20, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:25,301 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 20:46:29,001 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 2.698e+02 3.585e+02 4.414e+02 8.769e+02, threshold=7.169e+02, percent-clipped=9.0 +2023-02-06 20:46:35,868 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 20:46:47,477 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6009, 1.4433, 1.6593, 1.3192, 0.9016, 1.4230, 1.4555, 1.2451], + device='cuda:0'), covar=tensor([0.0581, 0.1246, 0.1629, 0.1445, 0.0604, 0.1487, 0.0754, 0.0697], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0100, 0.0161, 0.0113, 0.0138], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:46:56,046 INFO [train.py:901] (0/4) Epoch 18, batch 100, loss[loss=0.1845, simple_loss=0.2648, pruned_loss=0.05212, over 7277.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2974, pruned_loss=0.07025, over 641071.54 frames. ], batch size: 16, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:58,829 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 20:47:16,414 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:31,280 INFO [train.py:901] (0/4) Epoch 18, batch 150, loss[loss=0.2116, simple_loss=0.2943, pruned_loss=0.06448, over 8385.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2971, pruned_loss=0.06979, over 861937.76 frames. ], batch size: 49, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:47:33,465 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:39,685 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.369e+02 2.797e+02 3.885e+02 6.122e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-06 20:48:07,683 INFO [train.py:901] (0/4) Epoch 18, batch 200, loss[loss=0.2044, simple_loss=0.2846, pruned_loss=0.06214, over 8229.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06785, over 1033345.98 frames. ], batch size: 22, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:44,090 INFO [train.py:901] (0/4) Epoch 18, batch 250, loss[loss=0.2481, simple_loss=0.3166, pruned_loss=0.08976, over 8503.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2943, pruned_loss=0.06746, over 1160760.76 frames. ], batch size: 29, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:52,403 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.467e+02 3.008e+02 3.586e+02 6.135e+02, threshold=6.015e+02, percent-clipped=1.0 +2023-02-06 20:48:55,938 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 20:49:03,706 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 20:49:19,874 INFO [train.py:901] (0/4) Epoch 18, batch 300, loss[loss=0.199, simple_loss=0.2776, pruned_loss=0.06018, over 7814.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2952, pruned_loss=0.06838, over 1256585.67 frames. ], batch size: 20, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:49:31,576 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-06 20:49:55,780 INFO [train.py:901] (0/4) Epoch 18, batch 350, loss[loss=0.2331, simple_loss=0.3124, pruned_loss=0.07691, over 8468.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2945, pruned_loss=0.06791, over 1333732.47 frames. ], batch size: 25, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:03,812 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7588, 1.8952, 1.7240, 2.3141, 0.9502, 1.4410, 1.6068, 1.9196], + device='cuda:0'), covar=tensor([0.0778, 0.0687, 0.0993, 0.0443, 0.1135, 0.1352, 0.0881, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0198, 0.0251, 0.0211, 0.0208, 0.0249, 0.0255, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:50:05,700 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.555e+02 3.034e+02 3.752e+02 7.695e+02, threshold=6.069e+02, percent-clipped=3.0 +2023-02-06 20:50:32,304 INFO [train.py:901] (0/4) Epoch 18, batch 400, loss[loss=0.1919, simple_loss=0.2649, pruned_loss=0.05946, over 7689.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2952, pruned_loss=0.06784, over 1397234.62 frames. ], batch size: 18, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:51:08,176 INFO [train.py:901] (0/4) Epoch 18, batch 450, loss[loss=0.2159, simple_loss=0.2867, pruned_loss=0.07253, over 7661.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2967, pruned_loss=0.06867, over 1448360.98 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:16,912 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.504e+02 3.016e+02 3.557e+02 6.367e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 20:51:37,225 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0891, 1.5367, 3.5360, 1.3373, 2.4939, 3.9236, 3.9739, 3.3155], + device='cuda:0'), covar=tensor([0.1138, 0.1704, 0.0323, 0.2155, 0.0953, 0.0214, 0.0546, 0.0664], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0314, 0.0277, 0.0307, 0.0296, 0.0254, 0.0396, 0.0299], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 20:51:43,032 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:43,588 INFO [train.py:901] (0/4) Epoch 18, batch 500, loss[loss=0.1753, simple_loss=0.2572, pruned_loss=0.04667, over 7645.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2965, pruned_loss=0.06815, over 1483310.26 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:45,164 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:46,137 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 20:52:04,555 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3816, 4.3899, 3.9576, 1.9279, 3.8563, 3.9788, 3.8910, 3.7512], + device='cuda:0'), covar=tensor([0.0820, 0.0583, 0.1149, 0.5315, 0.0981, 0.1075, 0.1375, 0.0886], + device='cuda:0'), in_proj_covar=tensor([0.0509, 0.0418, 0.0419, 0.0524, 0.0414, 0.0421, 0.0410, 0.0366], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:52:20,558 INFO [train.py:901] (0/4) Epoch 18, batch 550, loss[loss=0.2522, simple_loss=0.3064, pruned_loss=0.09901, over 7695.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2971, pruned_loss=0.06825, over 1516881.09 frames. ], batch size: 18, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:52:29,488 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.606e+02 3.197e+02 3.974e+02 7.545e+02, threshold=6.394e+02, percent-clipped=3.0 +2023-02-06 20:52:48,226 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-138000.pt +2023-02-06 20:52:56,990 INFO [train.py:901] (0/4) Epoch 18, batch 600, loss[loss=0.2424, simple_loss=0.3128, pruned_loss=0.08598, over 8039.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2966, pruned_loss=0.0681, over 1540780.49 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:09,283 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1290, 2.3163, 1.9904, 2.9362, 1.4235, 1.6189, 2.0013, 2.3511], + device='cuda:0'), covar=tensor([0.0722, 0.0739, 0.0978, 0.0365, 0.1131, 0.1407, 0.0976, 0.0718], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0199, 0.0251, 0.0212, 0.0208, 0.0249, 0.0254, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:53:11,919 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 20:53:32,788 INFO [train.py:901] (0/4) Epoch 18, batch 650, loss[loss=0.185, simple_loss=0.2728, pruned_loss=0.04857, over 8187.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2959, pruned_loss=0.06721, over 1562139.86 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:43,406 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.461e+02 2.865e+02 3.365e+02 7.739e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-06 20:54:00,542 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 20:54:09,454 INFO [train.py:901] (0/4) Epoch 18, batch 700, loss[loss=0.2982, simple_loss=0.3556, pruned_loss=0.1204, over 8734.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2954, pruned_loss=0.06706, over 1575970.70 frames. ], batch size: 30, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:29,540 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 20:54:30,784 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5624, 1.4561, 1.9103, 1.4025, 1.1548, 2.0018, 0.4240, 1.3449], + device='cuda:0'), covar=tensor([0.2046, 0.1472, 0.0418, 0.1135, 0.3027, 0.0426, 0.2348, 0.1489], + device='cuda:0'), in_proj_covar=tensor([0.0178, 0.0184, 0.0117, 0.0217, 0.0262, 0.0123, 0.0165, 0.0182], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 20:54:44,125 INFO [train.py:901] (0/4) Epoch 18, batch 750, loss[loss=0.1731, simple_loss=0.2553, pruned_loss=0.04546, over 7648.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2959, pruned_loss=0.06722, over 1586254.21 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:53,227 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.332e+02 3.041e+02 3.730e+02 6.216e+02, threshold=6.081e+02, percent-clipped=3.0 +2023-02-06 20:54:58,215 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 20:55:08,073 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 20:55:19,810 INFO [train.py:901] (0/4) Epoch 18, batch 800, loss[loss=0.2765, simple_loss=0.3426, pruned_loss=0.1052, over 7043.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2953, pruned_loss=0.06728, over 1590608.88 frames. ], batch size: 71, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:55:49,573 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:51,615 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:54,239 INFO [train.py:901] (0/4) Epoch 18, batch 850, loss[loss=0.2033, simple_loss=0.2824, pruned_loss=0.06208, over 8254.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2937, pruned_loss=0.06662, over 1597850.90 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:03,039 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.308e+02 2.906e+02 3.562e+02 8.427e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 20:56:13,562 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:56:30,839 INFO [train.py:901] (0/4) Epoch 18, batch 900, loss[loss=0.1933, simple_loss=0.2826, pruned_loss=0.05198, over 7544.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.06523, over 1606910.18 frames. ], batch size: 18, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:35,138 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0433, 1.4648, 1.7584, 1.4382, 1.0666, 1.4693, 1.7655, 1.4902], + device='cuda:0'), covar=tensor([0.0486, 0.1302, 0.1626, 0.1389, 0.0575, 0.1504, 0.0698, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 20:56:50,624 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:05,367 INFO [train.py:901] (0/4) Epoch 18, batch 950, loss[loss=0.1708, simple_loss=0.2484, pruned_loss=0.04663, over 7799.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2922, pruned_loss=0.06579, over 1608686.45 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:10,952 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:13,023 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:14,181 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.531e+02 3.020e+02 3.937e+02 8.991e+02, threshold=6.039e+02, percent-clipped=7.0 +2023-02-06 20:57:14,401 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6542, 1.8591, 1.6989, 2.3290, 1.0669, 1.4379, 1.7590, 1.9944], + device='cuda:0'), covar=tensor([0.0833, 0.0728, 0.0931, 0.0435, 0.1034, 0.1310, 0.0783, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0200, 0.0251, 0.0212, 0.0207, 0.0249, 0.0253, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 20:57:29,245 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 20:57:40,362 INFO [train.py:901] (0/4) Epoch 18, batch 1000, loss[loss=0.2197, simple_loss=0.3004, pruned_loss=0.06954, over 8130.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2919, pruned_loss=0.06579, over 1610532.01 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:56,185 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8764, 2.2813, 4.2349, 1.4917, 2.8723, 2.3451, 1.8150, 2.8636], + device='cuda:0'), covar=tensor([0.1783, 0.2574, 0.0778, 0.4329, 0.1903, 0.2978, 0.2146, 0.2445], + device='cuda:0'), in_proj_covar=tensor([0.0511, 0.0574, 0.0544, 0.0618, 0.0636, 0.0577, 0.0510, 0.0624], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 20:58:05,494 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 20:58:16,701 INFO [train.py:901] (0/4) Epoch 18, batch 1050, loss[loss=0.2143, simple_loss=0.3008, pruned_loss=0.0639, over 8632.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.292, pruned_loss=0.06566, over 1610606.03 frames. ], batch size: 39, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:58:18,830 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 20:58:25,548 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.454e+02 3.228e+02 4.133e+02 8.765e+02, threshold=6.456e+02, percent-clipped=4.0 +2023-02-06 20:58:51,053 INFO [train.py:901] (0/4) Epoch 18, batch 1100, loss[loss=0.2053, simple_loss=0.2846, pruned_loss=0.06298, over 7781.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2916, pruned_loss=0.06538, over 1616941.07 frames. ], batch size: 19, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:59:26,910 INFO [train.py:901] (0/4) Epoch 18, batch 1150, loss[loss=0.2138, simple_loss=0.3002, pruned_loss=0.06363, over 8507.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2914, pruned_loss=0.06557, over 1616965.93 frames. ], batch size: 28, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:59:29,613 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 20:59:35,876 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 2.366e+02 2.909e+02 3.553e+02 5.350e+02, threshold=5.817e+02, percent-clipped=0.0 +2023-02-06 21:00:02,037 INFO [train.py:901] (0/4) Epoch 18, batch 1200, loss[loss=0.1871, simple_loss=0.2815, pruned_loss=0.04641, over 8322.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2922, pruned_loss=0.06577, over 1616513.89 frames. ], batch size: 26, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:11,833 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:13,958 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:16,607 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:29,702 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:31,771 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:37,163 INFO [train.py:901] (0/4) Epoch 18, batch 1250, loss[loss=0.2892, simple_loss=0.3631, pruned_loss=0.1076, over 8460.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2925, pruned_loss=0.0654, over 1618238.13 frames. ], batch size: 27, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:47,269 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.310e+02 2.834e+02 3.613e+02 5.274e+02, threshold=5.668e+02, percent-clipped=0.0 +2023-02-06 21:00:50,839 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2263, 1.1655, 3.3267, 1.0313, 2.9540, 2.7717, 3.0499, 2.9508], + device='cuda:0'), covar=tensor([0.0799, 0.4354, 0.0944, 0.4464, 0.1433, 0.1174, 0.0790, 0.0939], + device='cuda:0'), in_proj_covar=tensor([0.0583, 0.0620, 0.0663, 0.0593, 0.0675, 0.0579, 0.0571, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:00:54,239 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:04,822 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:08,240 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:12,848 INFO [train.py:901] (0/4) Epoch 18, batch 1300, loss[loss=0.2094, simple_loss=0.2975, pruned_loss=0.06067, over 8030.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2922, pruned_loss=0.06576, over 1617362.32 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:15,188 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0061, 3.5555, 2.3434, 2.9156, 2.7283, 2.1395, 2.7012, 3.0447], + device='cuda:0'), covar=tensor([0.1671, 0.0391, 0.1044, 0.0732, 0.0744, 0.1350, 0.1097, 0.1081], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0236, 0.0327, 0.0302, 0.0299, 0.0331, 0.0345, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 21:01:15,823 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:21,888 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6244, 1.5713, 2.0947, 1.4931, 1.1480, 2.0978, 0.2592, 1.2995], + device='cuda:0'), covar=tensor([0.1899, 0.1468, 0.0386, 0.1207, 0.3302, 0.0395, 0.2372, 0.1516], + device='cuda:0'), in_proj_covar=tensor([0.0179, 0.0186, 0.0117, 0.0217, 0.0263, 0.0125, 0.0164, 0.0183], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 21:01:37,536 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:47,842 INFO [train.py:901] (0/4) Epoch 18, batch 1350, loss[loss=0.2328, simple_loss=0.2899, pruned_loss=0.08779, over 7224.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2925, pruned_loss=0.06578, over 1618891.65 frames. ], batch size: 16, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:49,487 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8484, 2.1866, 3.4081, 1.4124, 2.5689, 2.1407, 1.8337, 2.5471], + device='cuda:0'), covar=tensor([0.1700, 0.2387, 0.0815, 0.4252, 0.1728, 0.2967, 0.2088, 0.2214], + device='cuda:0'), in_proj_covar=tensor([0.0514, 0.0578, 0.0550, 0.0624, 0.0643, 0.0583, 0.0513, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:01:56,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.404e+02 2.906e+02 3.545e+02 6.613e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 21:02:15,433 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:02:23,333 INFO [train.py:901] (0/4) Epoch 18, batch 1400, loss[loss=0.2075, simple_loss=0.2877, pruned_loss=0.06368, over 8313.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2933, pruned_loss=0.06654, over 1618369.96 frames. ], batch size: 25, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:02:49,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 21:02:54,708 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 21:02:57,617 INFO [train.py:901] (0/4) Epoch 18, batch 1450, loss[loss=0.2178, simple_loss=0.2969, pruned_loss=0.06937, over 8229.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2926, pruned_loss=0.06611, over 1616808.80 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:06,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.491e+02 3.050e+02 4.246e+02 7.467e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-06 21:03:07,083 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 21:03:33,395 INFO [train.py:901] (0/4) Epoch 18, batch 1500, loss[loss=0.251, simple_loss=0.3267, pruned_loss=0.08764, over 8696.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2941, pruned_loss=0.06699, over 1617785.02 frames. ], batch size: 39, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:36,675 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 21:03:44,747 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:04,115 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 21:04:08,387 INFO [train.py:901] (0/4) Epoch 18, batch 1550, loss[loss=0.2015, simple_loss=0.2833, pruned_loss=0.05989, over 7967.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2941, pruned_loss=0.06715, over 1617085.87 frames. ], batch size: 21, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:17,331 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.366e+02 2.933e+02 3.736e+02 6.367e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-06 21:04:38,171 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:43,436 INFO [train.py:901] (0/4) Epoch 18, batch 1600, loss[loss=0.1798, simple_loss=0.2585, pruned_loss=0.05052, over 7421.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.294, pruned_loss=0.06684, over 1620138.62 frames. ], batch size: 17, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:56,938 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:59,592 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5837, 4.5936, 4.0778, 1.8982, 4.0217, 4.1122, 4.1261, 3.9167], + device='cuda:0'), covar=tensor([0.0706, 0.0485, 0.1090, 0.4910, 0.0857, 0.0907, 0.1275, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0508, 0.0418, 0.0419, 0.0523, 0.0413, 0.0424, 0.0409, 0.0366], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:05:07,012 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:10,355 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:15,387 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:18,013 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:19,983 INFO [train.py:901] (0/4) Epoch 18, batch 1650, loss[loss=0.2028, simple_loss=0.2961, pruned_loss=0.0547, over 8037.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2944, pruned_loss=0.06696, over 1619252.82 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:05:28,835 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.375e+02 2.907e+02 3.508e+02 7.626e+02, threshold=5.813e+02, percent-clipped=3.0 +2023-02-06 21:05:33,235 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:53,871 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1058, 1.6438, 3.2631, 1.3638, 2.1132, 3.5456, 3.6847, 3.0315], + device='cuda:0'), covar=tensor([0.0928, 0.1587, 0.0366, 0.2117, 0.1195, 0.0230, 0.0446, 0.0558], + device='cuda:0'), in_proj_covar=tensor([0.0286, 0.0317, 0.0279, 0.0310, 0.0300, 0.0257, 0.0398, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 21:05:54,423 INFO [train.py:901] (0/4) Epoch 18, batch 1700, loss[loss=0.2531, simple_loss=0.3345, pruned_loss=0.08586, over 8774.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2943, pruned_loss=0.06745, over 1619085.86 frames. ], batch size: 30, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:06:28,654 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9803, 1.5666, 3.4153, 1.4690, 2.3191, 3.7365, 3.7879, 2.9866], + device='cuda:0'), covar=tensor([0.1109, 0.1833, 0.0413, 0.2192, 0.1217, 0.0261, 0.0586, 0.0726], + device='cuda:0'), in_proj_covar=tensor([0.0285, 0.0316, 0.0278, 0.0309, 0.0299, 0.0256, 0.0397, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 21:06:28,678 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:29,046 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 21:06:30,614 INFO [train.py:901] (0/4) Epoch 18, batch 1750, loss[loss=0.1975, simple_loss=0.2768, pruned_loss=0.05914, over 7691.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2934, pruned_loss=0.06691, over 1619506.69 frames. ], batch size: 18, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:06:32,192 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:39,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.310e+02 2.913e+02 3.912e+02 7.750e+02, threshold=5.826e+02, percent-clipped=6.0 +2023-02-06 21:06:39,803 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:05,729 INFO [train.py:901] (0/4) Epoch 18, batch 1800, loss[loss=0.2186, simple_loss=0.3005, pruned_loss=0.06836, over 8246.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2929, pruned_loss=0.06681, over 1617497.32 frames. ], batch size: 24, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:37,620 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:40,849 INFO [train.py:901] (0/4) Epoch 18, batch 1850, loss[loss=0.244, simple_loss=0.3177, pruned_loss=0.08519, over 8330.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2931, pruned_loss=0.06686, over 1616584.99 frames. ], batch size: 25, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:49,460 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:51,404 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.274e+02 2.776e+02 3.369e+02 8.658e+02, threshold=5.552e+02, percent-clipped=2.0 +2023-02-06 21:08:03,886 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4857, 1.4462, 2.3940, 1.1887, 2.1603, 2.5733, 2.6861, 2.1921], + device='cuda:0'), covar=tensor([0.0995, 0.1225, 0.0445, 0.2087, 0.0704, 0.0363, 0.0659, 0.0717], + device='cuda:0'), in_proj_covar=tensor([0.0281, 0.0311, 0.0274, 0.0305, 0.0294, 0.0253, 0.0392, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 21:08:05,260 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:14,579 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:17,149 INFO [train.py:901] (0/4) Epoch 18, batch 1900, loss[loss=0.2074, simple_loss=0.2912, pruned_loss=0.06182, over 8234.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2932, pruned_loss=0.06706, over 1609624.80 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:52,414 INFO [train.py:901] (0/4) Epoch 18, batch 1950, loss[loss=0.1754, simple_loss=0.2573, pruned_loss=0.04677, over 7436.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2931, pruned_loss=0.06692, over 1611858.20 frames. ], batch size: 17, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:55,250 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 21:09:01,254 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.421e+02 2.964e+02 3.877e+02 7.962e+02, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 21:09:08,116 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 21:09:11,151 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:28,242 INFO [train.py:901] (0/4) Epoch 18, batch 2000, loss[loss=0.2267, simple_loss=0.3131, pruned_loss=0.07018, over 8718.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2935, pruned_loss=0.06712, over 1613983.50 frames. ], batch size: 34, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:09:28,248 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 21:09:30,562 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:33,877 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139419.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:42,126 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139430.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:48,231 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:51,625 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139444.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:59,139 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:10:02,969 INFO [train.py:901] (0/4) Epoch 18, batch 2050, loss[loss=0.2514, simple_loss=0.3341, pruned_loss=0.08429, over 8358.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2936, pruned_loss=0.06717, over 1620551.58 frames. ], batch size: 24, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:12,670 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.515e+02 3.080e+02 3.592e+02 7.733e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 21:10:39,806 INFO [train.py:901] (0/4) Epoch 18, batch 2100, loss[loss=0.3098, simple_loss=0.3707, pruned_loss=0.1244, over 8808.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2936, pruned_loss=0.06687, over 1619282.74 frames. ], batch size: 40, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:59,676 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 21:11:15,317 INFO [train.py:901] (0/4) Epoch 18, batch 2150, loss[loss=0.1793, simple_loss=0.2654, pruned_loss=0.04657, over 7805.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2936, pruned_loss=0.06641, over 1622683.96 frames. ], batch size: 20, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:24,958 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.487e+02 3.024e+02 3.808e+02 9.008e+02, threshold=6.048e+02, percent-clipped=4.0 +2023-02-06 21:11:34,704 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,096 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,169 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:50,324 INFO [train.py:901] (0/4) Epoch 18, batch 2200, loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06824, over 7796.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2928, pruned_loss=0.06612, over 1619908.84 frames. ], batch size: 20, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:54,351 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 21:12:06,586 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7451, 1.7105, 2.2314, 1.5377, 1.3133, 2.3052, 0.3569, 1.4421], + device='cuda:0'), covar=tensor([0.1627, 0.1456, 0.0402, 0.1334, 0.2861, 0.0459, 0.2404, 0.1515], + device='cuda:0'), in_proj_covar=tensor([0.0179, 0.0189, 0.0118, 0.0217, 0.0263, 0.0126, 0.0165, 0.0184], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 21:12:10,494 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:13,212 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,357 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:26,638 INFO [train.py:901] (0/4) Epoch 18, batch 2250, loss[loss=0.1962, simple_loss=0.2728, pruned_loss=0.05982, over 7421.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2944, pruned_loss=0.06707, over 1619966.11 frames. ], batch size: 17, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:31,140 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:36,177 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.519e+02 3.270e+02 4.475e+02 8.912e+02, threshold=6.540e+02, percent-clipped=11.0 +2023-02-06 21:12:51,929 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7112, 1.3965, 1.6282, 1.3286, 0.9279, 1.4123, 1.5650, 1.6174], + device='cuda:0'), covar=tensor([0.0433, 0.1052, 0.1414, 0.1205, 0.0505, 0.1225, 0.0563, 0.0491], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 21:13:01,643 INFO [train.py:901] (0/4) Epoch 18, batch 2300, loss[loss=0.2298, simple_loss=0.3145, pruned_loss=0.07257, over 8292.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2947, pruned_loss=0.06704, over 1616880.26 frames. ], batch size: 23, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:04,651 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:18,653 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 21:13:32,013 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:37,206 INFO [train.py:901] (0/4) Epoch 18, batch 2350, loss[loss=0.2004, simple_loss=0.2924, pruned_loss=0.05421, over 8118.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2955, pruned_loss=0.06758, over 1613008.46 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:40,617 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:47,220 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.439e+02 2.945e+02 3.859e+02 6.515e+02, threshold=5.891e+02, percent-clipped=0.0 +2023-02-06 21:13:55,495 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:08,992 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:11,587 INFO [train.py:901] (0/4) Epoch 18, batch 2400, loss[loss=0.2289, simple_loss=0.3091, pruned_loss=0.0743, over 8472.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2951, pruned_loss=0.06744, over 1613696.92 frames. ], batch size: 29, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:20,256 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:21,638 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:48,491 INFO [train.py:901] (0/4) Epoch 18, batch 2450, loss[loss=0.1804, simple_loss=0.2667, pruned_loss=0.0471, over 7431.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06792, over 1608880.20 frames. ], batch size: 17, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:53,478 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:58,170 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.359e+02 2.854e+02 3.442e+02 8.627e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-06 21:15:01,670 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4694, 4.3967, 4.0333, 2.1629, 3.9190, 4.1143, 4.0009, 3.8488], + device='cuda:0'), covar=tensor([0.0731, 0.0546, 0.0992, 0.4497, 0.0798, 0.0949, 0.1162, 0.0806], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0414, 0.0416, 0.0517, 0.0410, 0.0420, 0.0405, 0.0364], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:15:16,425 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0871, 1.2881, 1.2047, 0.6252, 1.1880, 1.0139, 0.1355, 1.1983], + device='cuda:0'), covar=tensor([0.0331, 0.0324, 0.0293, 0.0494, 0.0369, 0.0865, 0.0677, 0.0280], + device='cuda:0'), in_proj_covar=tensor([0.0430, 0.0371, 0.0321, 0.0426, 0.0357, 0.0515, 0.0377, 0.0398], + device='cuda:0'), out_proj_covar=tensor([1.1723e-04, 9.8204e-05, 8.4958e-05, 1.1336e-04, 9.5169e-05, 1.4766e-04, + 1.0272e-04, 1.0667e-04], device='cuda:0') +2023-02-06 21:15:23,630 INFO [train.py:901] (0/4) Epoch 18, batch 2500, loss[loss=0.1784, simple_loss=0.2715, pruned_loss=0.04266, over 8123.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2957, pruned_loss=0.06752, over 1613329.94 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:15:39,676 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:15:47,234 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:00,238 INFO [train.py:901] (0/4) Epoch 18, batch 2550, loss[loss=0.2173, simple_loss=0.2926, pruned_loss=0.071, over 7938.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2964, pruned_loss=0.06799, over 1617253.54 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:07,329 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:09,810 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.329e+02 2.906e+02 3.594e+02 7.294e+02, threshold=5.811e+02, percent-clipped=3.0 +2023-02-06 21:16:25,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:27,849 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-140000.pt +2023-02-06 21:16:35,516 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:36,624 INFO [train.py:901] (0/4) Epoch 18, batch 2600, loss[loss=0.2276, simple_loss=0.3028, pruned_loss=0.07619, over 8236.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2958, pruned_loss=0.06792, over 1613467.91 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:44,591 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:52,699 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:01,552 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:02,210 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:10,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:11,786 INFO [train.py:901] (0/4) Epoch 18, batch 2650, loss[loss=0.2484, simple_loss=0.3197, pruned_loss=0.08853, over 8454.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2971, pruned_loss=0.0691, over 1610990.38 frames. ], batch size: 27, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:18,364 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3413, 2.5462, 2.2004, 3.7194, 1.5963, 1.7778, 2.2353, 2.8246], + device='cuda:0'), covar=tensor([0.0804, 0.0844, 0.1012, 0.0389, 0.1222, 0.1410, 0.1151, 0.0820], + device='cuda:0'), in_proj_covar=tensor([0.0237, 0.0202, 0.0254, 0.0215, 0.0209, 0.0253, 0.0258, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 21:17:22,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 2.973e+02 3.666e+02 6.732e+02, threshold=5.945e+02, percent-clipped=3.0 +2023-02-06 21:17:47,904 INFO [train.py:901] (0/4) Epoch 18, batch 2700, loss[loss=0.2502, simple_loss=0.3076, pruned_loss=0.09638, over 7632.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2968, pruned_loss=0.06867, over 1608148.48 frames. ], batch size: 19, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:50,477 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 21:17:55,817 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:00,547 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:02,534 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:16,408 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:19,415 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 21:18:23,207 INFO [train.py:901] (0/4) Epoch 18, batch 2750, loss[loss=0.2148, simple_loss=0.3078, pruned_loss=0.0609, over 8367.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2977, pruned_loss=0.06899, over 1609848.61 frames. ], batch size: 24, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:18:26,679 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:28,866 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:33,789 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.336e+02 2.919e+02 3.807e+02 8.313e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-06 21:19:00,762 INFO [train.py:901] (0/4) Epoch 18, batch 2800, loss[loss=0.2478, simple_loss=0.3228, pruned_loss=0.08638, over 8557.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2969, pruned_loss=0.06886, over 1608745.03 frames. ], batch size: 31, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:01,488 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:25,705 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:35,887 INFO [train.py:901] (0/4) Epoch 18, batch 2850, loss[loss=0.1827, simple_loss=0.2642, pruned_loss=0.05062, over 7232.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2957, pruned_loss=0.06763, over 1613129.12 frames. ], batch size: 16, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:39,551 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140266.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:45,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.447e+02 2.919e+02 3.574e+02 5.806e+02, threshold=5.838e+02, percent-clipped=0.0 +2023-02-06 21:19:47,265 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:50,879 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:52,281 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:07,401 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:11,946 INFO [train.py:901] (0/4) Epoch 18, batch 2900, loss[loss=0.1774, simple_loss=0.2532, pruned_loss=0.0508, over 7801.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2953, pruned_loss=0.06751, over 1612445.53 frames. ], batch size: 19, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:14,969 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:18,626 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-06 21:20:23,105 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4080, 2.3762, 1.6932, 2.1021, 1.9708, 1.4802, 1.8787, 1.9334], + device='cuda:0'), covar=tensor([0.1366, 0.0388, 0.1231, 0.0576, 0.0746, 0.1522, 0.0945, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0234, 0.0322, 0.0301, 0.0296, 0.0330, 0.0339, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 21:20:23,751 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:25,180 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140329.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:32,779 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:44,326 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 21:20:47,982 INFO [train.py:901] (0/4) Epoch 18, batch 2950, loss[loss=0.2831, simple_loss=0.3372, pruned_loss=0.1145, over 6505.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2945, pruned_loss=0.06705, over 1607118.59 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:57,358 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.671e+02 3.280e+02 4.327e+02 7.160e+02, threshold=6.561e+02, percent-clipped=5.0 +2023-02-06 21:21:18,961 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:21:23,798 INFO [train.py:901] (0/4) Epoch 18, batch 3000, loss[loss=0.2111, simple_loss=0.2947, pruned_loss=0.06373, over 8185.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2951, pruned_loss=0.0672, over 1610843.01 frames. ], batch size: 23, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:21:23,798 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 21:21:37,685 INFO [train.py:935] (0/4) Epoch 18, validation: loss=0.1773, simple_loss=0.2774, pruned_loss=0.03861, over 944034.00 frames. +2023-02-06 21:21:37,686 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 21:22:03,257 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 21:22:07,939 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0480, 1.7853, 2.3657, 1.9588, 2.2423, 2.0382, 1.7573, 1.1117], + device='cuda:0'), covar=tensor([0.5316, 0.4620, 0.1855, 0.3342, 0.2374, 0.2698, 0.1889, 0.4813], + device='cuda:0'), in_proj_covar=tensor([0.0928, 0.0946, 0.0784, 0.0911, 0.0979, 0.0867, 0.0730, 0.0807], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:22:14,081 INFO [train.py:901] (0/4) Epoch 18, batch 3050, loss[loss=0.1762, simple_loss=0.2575, pruned_loss=0.04747, over 7696.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2951, pruned_loss=0.06701, over 1613444.38 frames. ], batch size: 18, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:16,898 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:21,637 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:24,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.663e+02 3.172e+02 4.119e+02 9.916e+02, threshold=6.345e+02, percent-clipped=7.0 +2023-02-06 21:22:42,883 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140502.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:46,233 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:48,872 INFO [train.py:901] (0/4) Epoch 18, batch 3100, loss[loss=0.1935, simple_loss=0.2665, pruned_loss=0.06031, over 7802.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2956, pruned_loss=0.06745, over 1613361.62 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:56,952 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:00,946 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,005 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:07,881 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:09,280 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:15,571 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:25,508 INFO [train.py:901] (0/4) Epoch 18, batch 3150, loss[loss=0.2321, simple_loss=0.3103, pruned_loss=0.07694, over 8113.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2957, pruned_loss=0.06777, over 1614437.85 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:23:26,378 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140562.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:27,674 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:28,414 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6328, 2.0080, 3.3599, 1.4097, 2.5599, 2.1128, 1.6888, 2.5374], + device='cuda:0'), covar=tensor([0.1780, 0.2593, 0.0705, 0.4401, 0.1611, 0.2905, 0.2156, 0.2101], + device='cuda:0'), in_proj_covar=tensor([0.0510, 0.0573, 0.0549, 0.0618, 0.0632, 0.0575, 0.0511, 0.0623], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:23:34,933 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.438e+02 2.948e+02 4.263e+02 1.019e+03, threshold=5.895e+02, percent-clipped=4.0 +2023-02-06 21:23:38,565 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:40,631 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:43,408 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:59,142 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:01,015 INFO [train.py:901] (0/4) Epoch 18, batch 3200, loss[loss=0.2266, simple_loss=0.308, pruned_loss=0.07259, over 8703.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2961, pruned_loss=0.06766, over 1619043.04 frames. ], batch size: 49, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:07,874 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:36,348 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4953, 1.8273, 1.8992, 1.1412, 1.9526, 1.4058, 0.3910, 1.7348], + device='cuda:0'), covar=tensor([0.0476, 0.0316, 0.0253, 0.0478, 0.0342, 0.0793, 0.0754, 0.0228], + device='cuda:0'), in_proj_covar=tensor([0.0436, 0.0376, 0.0322, 0.0431, 0.0361, 0.0522, 0.0380, 0.0400], + device='cuda:0'), out_proj_covar=tensor([1.1874e-04, 9.9782e-05, 8.5328e-05, 1.1489e-04, 9.6339e-05, 1.4965e-04, + 1.0343e-04, 1.0693e-04], device='cuda:0') +2023-02-06 21:24:36,812 INFO [train.py:901] (0/4) Epoch 18, batch 3250, loss[loss=0.195, simple_loss=0.2807, pruned_loss=0.05469, over 7822.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2949, pruned_loss=0.06641, over 1619307.22 frames. ], batch size: 20, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:40,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5867, 1.8676, 1.9466, 1.1748, 1.9850, 1.4833, 0.4350, 1.8236], + device='cuda:0'), covar=tensor([0.0428, 0.0325, 0.0231, 0.0445, 0.0357, 0.0796, 0.0703, 0.0202], + device='cuda:0'), in_proj_covar=tensor([0.0437, 0.0376, 0.0323, 0.0432, 0.0362, 0.0522, 0.0380, 0.0400], + device='cuda:0'), out_proj_covar=tensor([1.1882e-04, 9.9746e-05, 8.5410e-05, 1.1498e-04, 9.6435e-05, 1.4972e-04, + 1.0347e-04, 1.0694e-04], device='cuda:0') +2023-02-06 21:24:46,448 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.256e+02 2.889e+02 3.448e+02 6.536e+02, threshold=5.777e+02, percent-clipped=1.0 +2023-02-06 21:25:13,073 INFO [train.py:901] (0/4) Epoch 18, batch 3300, loss[loss=0.2653, simple_loss=0.3401, pruned_loss=0.09526, over 8475.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2937, pruned_loss=0.06613, over 1616809.68 frames. ], batch size: 25, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:30,579 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:33,922 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:35,580 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 21:25:39,312 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:47,446 INFO [train.py:901] (0/4) Epoch 18, batch 3350, loss[loss=0.1819, simple_loss=0.2681, pruned_loss=0.04785, over 8109.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.294, pruned_loss=0.0666, over 1618807.82 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:56,405 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8193, 2.0346, 5.9450, 2.1350, 5.2941, 4.9915, 5.5097, 5.3866], + device='cuda:0'), covar=tensor([0.0432, 0.4027, 0.0306, 0.3551, 0.0944, 0.0808, 0.0460, 0.0449], + device='cuda:0'), in_proj_covar=tensor([0.0585, 0.0616, 0.0663, 0.0593, 0.0671, 0.0574, 0.0571, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:25:57,593 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.504e+02 2.969e+02 3.727e+02 7.020e+02, threshold=5.938e+02, percent-clipped=2.0 +2023-02-06 21:26:23,945 INFO [train.py:901] (0/4) Epoch 18, batch 3400, loss[loss=0.2025, simple_loss=0.2888, pruned_loss=0.05807, over 8326.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2944, pruned_loss=0.06667, over 1618588.97 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:35,788 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:42,130 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:46,772 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:52,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:58,804 INFO [train.py:901] (0/4) Epoch 18, batch 3450, loss[loss=0.2141, simple_loss=0.289, pruned_loss=0.06963, over 7420.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2956, pruned_loss=0.0674, over 1616405.81 frames. ], batch size: 17, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:59,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:01,050 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:03,738 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:05,719 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:08,275 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.419e+02 3.065e+02 3.703e+02 6.567e+02, threshold=6.131e+02, percent-clipped=3.0 +2023-02-06 21:27:34,152 INFO [train.py:901] (0/4) Epoch 18, batch 3500, loss[loss=0.2293, simple_loss=0.314, pruned_loss=0.07231, over 8179.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2953, pruned_loss=0.06761, over 1613700.00 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:27:51,063 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 21:27:51,193 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:09,211 INFO [train.py:901] (0/4) Epoch 18, batch 3550, loss[loss=0.2617, simple_loss=0.3214, pruned_loss=0.101, over 7957.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2949, pruned_loss=0.06698, over 1615264.09 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:11,999 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:12,771 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140966.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:18,752 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.456e+02 3.083e+02 3.681e+02 6.081e+02, threshold=6.167e+02, percent-clipped=0.0 +2023-02-06 21:28:26,415 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:30,666 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:44,283 INFO [train.py:901] (0/4) Epoch 18, batch 3600, loss[loss=0.2134, simple_loss=0.2918, pruned_loss=0.06751, over 7523.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.294, pruned_loss=0.06631, over 1616238.59 frames. ], batch size: 18, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:49,255 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:20,152 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 21:29:20,372 INFO [train.py:901] (0/4) Epoch 18, batch 3650, loss[loss=0.2072, simple_loss=0.2777, pruned_loss=0.0683, over 7795.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2931, pruned_loss=0.06597, over 1615084.69 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:30,817 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.345e+02 2.956e+02 3.633e+02 6.454e+02, threshold=5.912e+02, percent-clipped=1.0 +2023-02-06 21:29:37,715 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:53,089 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5960, 4.5951, 4.0987, 1.9303, 3.9773, 4.1831, 4.1562, 3.8660], + device='cuda:0'), covar=tensor([0.0765, 0.0569, 0.1106, 0.4713, 0.0963, 0.0948, 0.1284, 0.0898], + device='cuda:0'), in_proj_covar=tensor([0.0507, 0.0421, 0.0420, 0.0521, 0.0411, 0.0421, 0.0406, 0.0368], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:29:55,734 INFO [train.py:901] (0/4) Epoch 18, batch 3700, loss[loss=0.1617, simple_loss=0.2539, pruned_loss=0.03479, over 7931.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2931, pruned_loss=0.06619, over 1613042.12 frames. ], batch size: 20, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:57,132 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 21:30:02,958 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:20,677 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:32,108 INFO [train.py:901] (0/4) Epoch 18, batch 3750, loss[loss=0.2001, simple_loss=0.2871, pruned_loss=0.05655, over 8243.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2937, pruned_loss=0.06632, over 1613950.47 frames. ], batch size: 24, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:30:32,263 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:39,100 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:41,863 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.679e+02 3.309e+02 4.099e+02 7.455e+02, threshold=6.618e+02, percent-clipped=7.0 +2023-02-06 21:30:48,315 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1273, 1.7081, 3.4691, 1.4935, 2.4214, 3.8464, 3.8414, 3.3364], + device='cuda:0'), covar=tensor([0.1020, 0.1646, 0.0310, 0.1988, 0.1033, 0.0200, 0.0556, 0.0528], + device='cuda:0'), in_proj_covar=tensor([0.0282, 0.0310, 0.0273, 0.0304, 0.0296, 0.0253, 0.0395, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 21:31:00,278 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:07,419 INFO [train.py:901] (0/4) Epoch 18, batch 3800, loss[loss=0.2028, simple_loss=0.2657, pruned_loss=0.06998, over 7215.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2931, pruned_loss=0.06614, over 1610838.05 frames. ], batch size: 16, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:15,001 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:29,257 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:32,640 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:34,649 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6694, 2.7082, 2.5779, 4.0535, 1.7225, 2.3036, 2.3688, 3.0439], + device='cuda:0'), covar=tensor([0.0668, 0.0826, 0.0752, 0.0220, 0.1117, 0.1152, 0.0983, 0.0722], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0201, 0.0251, 0.0213, 0.0207, 0.0250, 0.0255, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 21:31:42,608 INFO [train.py:901] (0/4) Epoch 18, batch 3850, loss[loss=0.2053, simple_loss=0.2986, pruned_loss=0.05601, over 7807.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2925, pruned_loss=0.06571, over 1612141.29 frames. ], batch size: 20, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:46,905 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:52,716 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.500e+02 3.018e+02 3.684e+02 7.912e+02, threshold=6.036e+02, percent-clipped=1.0 +2023-02-06 21:31:53,730 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 21:31:55,475 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:00,526 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:03,907 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 21:32:17,061 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141309.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:18,354 INFO [train.py:901] (0/4) Epoch 18, batch 3900, loss[loss=0.1999, simple_loss=0.2857, pruned_loss=0.05704, over 8355.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2943, pruned_loss=0.06638, over 1615818.10 frames. ], batch size: 24, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:32:42,392 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:52,481 INFO [train.py:901] (0/4) Epoch 18, batch 3950, loss[loss=0.2078, simple_loss=0.2709, pruned_loss=0.07231, over 7806.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.295, pruned_loss=0.06646, over 1621908.53 frames. ], batch size: 19, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:33:02,714 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.421e+02 2.990e+02 3.795e+02 7.053e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 21:33:15,874 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:27,609 INFO [train.py:901] (0/4) Epoch 18, batch 4000, loss[loss=0.2497, simple_loss=0.328, pruned_loss=0.08567, over 8678.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2949, pruned_loss=0.067, over 1619036.66 frames. ], batch size: 39, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:33:34,272 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 21:33:37,280 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:44,486 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141435.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:33:58,723 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:00,785 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:02,050 INFO [train.py:901] (0/4) Epoch 18, batch 4050, loss[loss=0.2499, simple_loss=0.3185, pruned_loss=0.09063, over 8096.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2948, pruned_loss=0.06677, over 1620811.81 frames. ], batch size: 23, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:12,708 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.514e+02 3.146e+02 4.229e+02 8.641e+02, threshold=6.293e+02, percent-clipped=9.0 +2023-02-06 21:34:16,924 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:34,612 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:38,484 INFO [train.py:901] (0/4) Epoch 18, batch 4100, loss[loss=0.1995, simple_loss=0.2784, pruned_loss=0.06031, over 7925.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2951, pruned_loss=0.06666, over 1622023.80 frames. ], batch size: 20, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:00,216 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:13,072 INFO [train.py:901] (0/4) Epoch 18, batch 4150, loss[loss=0.202, simple_loss=0.2795, pruned_loss=0.0623, over 7917.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2944, pruned_loss=0.06628, over 1618764.98 frames. ], batch size: 20, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:17,348 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141567.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:21,556 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4589, 1.8089, 1.8464, 1.2013, 1.9665, 1.3822, 0.4262, 1.7339], + device='cuda:0'), covar=tensor([0.0467, 0.0279, 0.0239, 0.0474, 0.0340, 0.0822, 0.0738, 0.0227], + device='cuda:0'), in_proj_covar=tensor([0.0431, 0.0371, 0.0318, 0.0429, 0.0358, 0.0517, 0.0375, 0.0396], + device='cuda:0'), out_proj_covar=tensor([1.1729e-04, 9.8246e-05, 8.4250e-05, 1.1435e-04, 9.5413e-05, 1.4836e-04, + 1.0212e-04, 1.0581e-04], device='cuda:0') +2023-02-06 21:35:22,679 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.507e+02 2.964e+02 3.952e+02 7.900e+02, threshold=5.928e+02, percent-clipped=3.0 +2023-02-06 21:35:37,186 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-06 21:35:48,909 INFO [train.py:901] (0/4) Epoch 18, batch 4200, loss[loss=0.204, simple_loss=0.2765, pruned_loss=0.06571, over 7683.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2939, pruned_loss=0.06592, over 1621292.01 frames. ], batch size: 18, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:49,763 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4427, 1.3905, 2.3567, 1.3638, 2.1856, 2.5406, 2.6957, 2.1822], + device='cuda:0'), covar=tensor([0.1084, 0.1241, 0.0449, 0.1910, 0.0675, 0.0389, 0.0680, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0312, 0.0276, 0.0307, 0.0297, 0.0256, 0.0397, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 21:35:55,106 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:02,343 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 21:36:15,978 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:23,880 INFO [train.py:901] (0/4) Epoch 18, batch 4250, loss[loss=0.1814, simple_loss=0.2663, pruned_loss=0.04818, over 7544.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2931, pruned_loss=0.06536, over 1616106.47 frames. ], batch size: 18, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:36:24,606 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 21:36:33,255 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.491e+02 2.994e+02 3.932e+02 8.485e+02, threshold=5.988e+02, percent-clipped=6.0 +2023-02-06 21:36:33,452 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:36,884 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:43,992 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:54,142 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:58,028 INFO [train.py:901] (0/4) Epoch 18, batch 4300, loss[loss=0.2243, simple_loss=0.309, pruned_loss=0.06981, over 8574.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2945, pruned_loss=0.06595, over 1618831.43 frames. ], batch size: 34, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:32,921 INFO [train.py:901] (0/4) Epoch 18, batch 4350, loss[loss=0.1698, simple_loss=0.249, pruned_loss=0.04531, over 7793.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2943, pruned_loss=0.0663, over 1612847.26 frames. ], batch size: 19, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:43,198 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.620e+02 3.197e+02 4.150e+02 9.266e+02, threshold=6.393e+02, percent-clipped=5.0 +2023-02-06 21:37:46,081 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141779.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:37:54,207 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 21:38:02,431 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:04,527 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:07,909 INFO [train.py:901] (0/4) Epoch 18, batch 4400, loss[loss=0.1965, simple_loss=0.2848, pruned_loss=0.05407, over 8312.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2944, pruned_loss=0.06703, over 1610825.20 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:36,619 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 21:38:43,852 INFO [train.py:901] (0/4) Epoch 18, batch 4450, loss[loss=0.2604, simple_loss=0.3296, pruned_loss=0.09559, over 6811.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2932, pruned_loss=0.06629, over 1611003.91 frames. ], batch size: 71, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:53,326 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.507e+02 2.868e+02 3.524e+02 7.777e+02, threshold=5.735e+02, percent-clipped=2.0 +2023-02-06 21:38:54,252 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:07,096 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141894.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:39:11,812 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:18,280 INFO [train.py:901] (0/4) Epoch 18, batch 4500, loss[loss=0.1997, simple_loss=0.2951, pruned_loss=0.05213, over 8020.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2941, pruned_loss=0.06627, over 1619433.61 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:39:23,237 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:27,788 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 21:39:34,700 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:42,675 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:53,399 INFO [train.py:901] (0/4) Epoch 18, batch 4550, loss[loss=0.2025, simple_loss=0.2925, pruned_loss=0.0562, over 8445.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2934, pruned_loss=0.06578, over 1616988.23 frames. ], batch size: 27, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:40:03,506 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.488e+02 2.920e+02 3.454e+02 6.371e+02, threshold=5.840e+02, percent-clipped=2.0 +2023-02-06 21:40:20,442 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-142000.pt +2023-02-06 21:40:29,738 INFO [train.py:901] (0/4) Epoch 18, batch 4600, loss[loss=0.2138, simple_loss=0.2807, pruned_loss=0.07341, over 7654.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2935, pruned_loss=0.06594, over 1617080.33 frames. ], batch size: 19, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:04,189 INFO [train.py:901] (0/4) Epoch 18, batch 4650, loss[loss=0.1796, simple_loss=0.2645, pruned_loss=0.04733, over 8231.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2929, pruned_loss=0.06583, over 1614740.24 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:05,110 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:13,898 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.389e+02 2.901e+02 3.503e+02 7.256e+02, threshold=5.801e+02, percent-clipped=3.0 +2023-02-06 21:41:23,654 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:39,047 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5107, 2.2587, 3.2017, 2.4463, 2.9340, 2.5118, 2.1808, 1.6745], + device='cuda:0'), covar=tensor([0.4852, 0.5115, 0.1693, 0.3679, 0.2677, 0.2611, 0.1780, 0.5454], + device='cuda:0'), in_proj_covar=tensor([0.0925, 0.0945, 0.0781, 0.0908, 0.0979, 0.0861, 0.0729, 0.0808], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:41:39,476 INFO [train.py:901] (0/4) Epoch 18, batch 4700, loss[loss=0.1808, simple_loss=0.2741, pruned_loss=0.04378, over 8335.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2922, pruned_loss=0.06571, over 1615020.89 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:00,893 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 21:42:06,016 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142150.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:13,362 INFO [train.py:901] (0/4) Epoch 18, batch 4750, loss[loss=0.1723, simple_loss=0.2573, pruned_loss=0.04368, over 8029.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.293, pruned_loss=0.06616, over 1611386.67 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:23,433 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:23,890 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.398e+02 2.792e+02 3.541e+02 9.190e+02, threshold=5.585e+02, percent-clipped=4.0 +2023-02-06 21:42:24,103 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142175.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:30,579 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 21:42:32,637 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 21:42:38,957 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0848, 2.2753, 1.9843, 2.7805, 1.3625, 1.7701, 2.0655, 2.3826], + device='cuda:0'), covar=tensor([0.0690, 0.0765, 0.0912, 0.0368, 0.1082, 0.1236, 0.0861, 0.0685], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0200, 0.0251, 0.0213, 0.0206, 0.0249, 0.0256, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 21:42:41,665 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:49,354 INFO [train.py:901] (0/4) Epoch 18, batch 4800, loss[loss=0.2442, simple_loss=0.3239, pruned_loss=0.08227, over 8247.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2934, pruned_loss=0.06666, over 1608512.01 frames. ], batch size: 24, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:23,849 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 21:43:24,502 INFO [train.py:901] (0/4) Epoch 18, batch 4850, loss[loss=0.2264, simple_loss=0.3091, pruned_loss=0.07187, over 8471.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.293, pruned_loss=0.06621, over 1610547.51 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:33,971 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.522e+02 3.053e+02 3.876e+02 6.315e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-06 21:43:36,085 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:45,097 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:59,058 INFO [train.py:901] (0/4) Epoch 18, batch 4900, loss[loss=0.2033, simple_loss=0.2995, pruned_loss=0.05357, over 8456.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2929, pruned_loss=0.06597, over 1617395.54 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:34,495 INFO [train.py:901] (0/4) Epoch 18, batch 4950, loss[loss=0.2688, simple_loss=0.3554, pruned_loss=0.09108, over 8453.00 frames. ], tot_loss[loss=0.213, simple_loss=0.293, pruned_loss=0.06646, over 1610565.98 frames. ], batch size: 27, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:44,960 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.451e+02 2.943e+02 3.789e+02 7.945e+02, threshold=5.886e+02, percent-clipped=1.0 +2023-02-06 21:44:57,234 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:05,915 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:09,747 INFO [train.py:901] (0/4) Epoch 18, batch 5000, loss[loss=0.3081, simple_loss=0.3673, pruned_loss=0.1244, over 6804.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2939, pruned_loss=0.06656, over 1609918.94 frames. ], batch size: 71, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:44,314 INFO [train.py:901] (0/4) Epoch 18, batch 5050, loss[loss=0.2249, simple_loss=0.3033, pruned_loss=0.07324, over 7802.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2919, pruned_loss=0.06599, over 1604298.69 frames. ], batch size: 19, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:46,570 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3484, 2.5358, 2.2285, 2.9776, 2.0993, 2.1917, 2.1721, 2.6495], + device='cuda:0'), covar=tensor([0.0592, 0.0616, 0.0711, 0.0465, 0.0808, 0.0960, 0.0729, 0.0572], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0200, 0.0252, 0.0213, 0.0206, 0.0249, 0.0256, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 21:45:54,481 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.384e+02 2.804e+02 3.417e+02 5.925e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 21:46:04,030 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 21:46:19,165 INFO [train.py:901] (0/4) Epoch 18, batch 5100, loss[loss=0.2205, simple_loss=0.3016, pruned_loss=0.06974, over 8236.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2928, pruned_loss=0.06607, over 1604978.21 frames. ], batch size: 24, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:46:54,253 INFO [train.py:901] (0/4) Epoch 18, batch 5150, loss[loss=0.1883, simple_loss=0.2724, pruned_loss=0.05212, over 8493.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2937, pruned_loss=0.06673, over 1603706.70 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:04,406 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.481e+02 3.004e+02 4.323e+02 1.197e+03, threshold=6.009e+02, percent-clipped=7.0 +2023-02-06 21:47:07,302 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6687, 1.5310, 4.8609, 1.8683, 4.2766, 4.0662, 4.3711, 4.2314], + device='cuda:0'), covar=tensor([0.0542, 0.4422, 0.0462, 0.3831, 0.1081, 0.0923, 0.0569, 0.0606], + device='cuda:0'), in_proj_covar=tensor([0.0593, 0.0624, 0.0672, 0.0599, 0.0680, 0.0582, 0.0576, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:47:14,700 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:47:15,673 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 21:47:29,007 INFO [train.py:901] (0/4) Epoch 18, batch 5200, loss[loss=0.2226, simple_loss=0.3014, pruned_loss=0.07192, over 8429.00 frames. ], tot_loss[loss=0.213, simple_loss=0.293, pruned_loss=0.06651, over 1604054.69 frames. ], batch size: 27, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:55,309 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:02,980 INFO [train.py:901] (0/4) Epoch 18, batch 5250, loss[loss=0.2179, simple_loss=0.2985, pruned_loss=0.06865, over 8250.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2936, pruned_loss=0.06633, over 1608161.75 frames. ], batch size: 24, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:03,209 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:03,667 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 21:48:13,207 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:14,357 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.491e+02 3.102e+02 3.692e+02 6.533e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 21:48:21,222 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:37,885 INFO [train.py:901] (0/4) Epoch 18, batch 5300, loss[loss=0.2848, simple_loss=0.3547, pruned_loss=0.1074, over 8635.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2943, pruned_loss=0.06678, over 1608489.75 frames. ], batch size: 34, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:59,483 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:49:12,889 INFO [train.py:901] (0/4) Epoch 18, batch 5350, loss[loss=0.2219, simple_loss=0.3001, pruned_loss=0.07183, over 8519.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2945, pruned_loss=0.06691, over 1609954.96 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:49:19,900 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 21:49:22,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.581e+02 3.011e+02 3.651e+02 7.168e+02, threshold=6.023e+02, percent-clipped=3.0 +2023-02-06 21:49:30,454 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7473, 1.4044, 1.6510, 1.2104, 0.9040, 1.3778, 1.6021, 1.3169], + device='cuda:0'), covar=tensor([0.0589, 0.1354, 0.1741, 0.1545, 0.0614, 0.1594, 0.0730, 0.0708], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 21:49:48,112 INFO [train.py:901] (0/4) Epoch 18, batch 5400, loss[loss=0.1716, simple_loss=0.2565, pruned_loss=0.04334, over 7662.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2955, pruned_loss=0.06738, over 1616264.08 frames. ], batch size: 19, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:22,753 INFO [train.py:901] (0/4) Epoch 18, batch 5450, loss[loss=0.203, simple_loss=0.2866, pruned_loss=0.05976, over 8458.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2945, pruned_loss=0.06691, over 1613529.74 frames. ], batch size: 27, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:33,556 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.381e+02 3.003e+02 4.378e+02 7.690e+02, threshold=6.006e+02, percent-clipped=4.0 +2023-02-06 21:50:50,000 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 21:50:51,468 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5486, 4.5476, 4.0377, 2.0002, 3.9511, 4.1931, 4.0541, 3.9728], + device='cuda:0'), covar=tensor([0.0695, 0.0537, 0.1015, 0.4821, 0.0886, 0.0976, 0.1253, 0.0739], + device='cuda:0'), in_proj_covar=tensor([0.0504, 0.0418, 0.0416, 0.0515, 0.0409, 0.0416, 0.0400, 0.0365], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:50:58,889 INFO [train.py:901] (0/4) Epoch 18, batch 5500, loss[loss=0.2154, simple_loss=0.3069, pruned_loss=0.06193, over 8294.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2932, pruned_loss=0.06617, over 1617035.42 frames. ], batch size: 23, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:05,232 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 21:51:14,986 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:51:18,577 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9922, 1.6725, 2.0858, 1.7836, 1.9716, 2.0080, 1.8163, 0.8089], + device='cuda:0'), covar=tensor([0.5312, 0.4494, 0.1767, 0.3262, 0.2370, 0.2820, 0.1962, 0.4756], + device='cuda:0'), in_proj_covar=tensor([0.0926, 0.0946, 0.0778, 0.0912, 0.0984, 0.0865, 0.0732, 0.0811], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:51:30,185 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-02-06 21:51:33,204 INFO [train.py:901] (0/4) Epoch 18, batch 5550, loss[loss=0.2449, simple_loss=0.3234, pruned_loss=0.08319, over 8468.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2925, pruned_loss=0.06641, over 1613002.67 frames. ], batch size: 29, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:43,334 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.398e+02 2.938e+02 3.826e+02 1.126e+03, threshold=5.876e+02, percent-clipped=10.0 +2023-02-06 21:51:49,614 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7128, 2.3680, 1.8684, 2.1569, 2.0221, 1.7407, 1.9508, 2.1095], + device='cuda:0'), covar=tensor([0.0955, 0.0340, 0.0854, 0.0488, 0.0587, 0.1063, 0.0747, 0.0717], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0236, 0.0325, 0.0305, 0.0297, 0.0330, 0.0343, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 21:52:02,287 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7016, 1.9367, 2.1143, 1.3939, 2.1437, 1.6071, 0.5785, 1.8486], + device='cuda:0'), covar=tensor([0.0524, 0.0355, 0.0246, 0.0439, 0.0362, 0.0724, 0.0752, 0.0239], + device='cuda:0'), in_proj_covar=tensor([0.0435, 0.0372, 0.0322, 0.0432, 0.0359, 0.0521, 0.0376, 0.0398], + device='cuda:0'), out_proj_covar=tensor([1.1821e-04, 9.8507e-05, 8.5268e-05, 1.1507e-04, 9.5506e-05, 1.4919e-04, + 1.0204e-04, 1.0606e-04], device='cuda:0') +2023-02-06 21:52:08,287 INFO [train.py:901] (0/4) Epoch 18, batch 5600, loss[loss=0.2105, simple_loss=0.2955, pruned_loss=0.06271, over 8501.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2925, pruned_loss=0.06634, over 1610496.99 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:36,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:52:43,530 INFO [train.py:901] (0/4) Epoch 18, batch 5650, loss[loss=0.1957, simple_loss=0.2913, pruned_loss=0.0501, over 8500.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2931, pruned_loss=0.06643, over 1612208.49 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:54,545 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.315e+02 3.071e+02 3.627e+02 7.364e+02, threshold=6.141e+02, percent-clipped=4.0 +2023-02-06 21:53:00,419 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 21:53:01,150 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143086.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:53:18,752 INFO [train.py:901] (0/4) Epoch 18, batch 5700, loss[loss=0.1896, simple_loss=0.2746, pruned_loss=0.05228, over 7812.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2928, pruned_loss=0.06622, over 1611078.28 frames. ], batch size: 20, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:53:19,802 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 21:53:23,692 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2436, 2.0142, 2.7190, 2.2423, 2.6764, 2.2347, 1.9651, 1.5080], + device='cuda:0'), covar=tensor([0.5107, 0.4936, 0.1756, 0.3601, 0.2383, 0.2943, 0.1837, 0.5096], + device='cuda:0'), in_proj_covar=tensor([0.0919, 0.0941, 0.0775, 0.0908, 0.0974, 0.0860, 0.0726, 0.0807], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 21:53:53,726 INFO [train.py:901] (0/4) Epoch 18, batch 5750, loss[loss=0.2664, simple_loss=0.336, pruned_loss=0.09844, over 8603.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2924, pruned_loss=0.06602, over 1606139.84 frames. ], batch size: 39, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:04,019 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.423e+02 2.839e+02 3.621e+02 5.889e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 21:54:04,732 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 21:54:21,111 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3788, 4.3288, 3.9129, 2.0407, 3.8252, 3.9197, 4.0124, 3.7582], + device='cuda:0'), covar=tensor([0.0901, 0.0657, 0.1184, 0.4702, 0.0980, 0.1297, 0.1292, 0.0956], + device='cuda:0'), in_proj_covar=tensor([0.0506, 0.0420, 0.0420, 0.0518, 0.0412, 0.0419, 0.0402, 0.0368], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:54:21,864 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:54:28,668 INFO [train.py:901] (0/4) Epoch 18, batch 5800, loss[loss=0.239, simple_loss=0.3149, pruned_loss=0.08157, over 8247.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2926, pruned_loss=0.06586, over 1610917.08 frames. ], batch size: 24, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:04,396 INFO [train.py:901] (0/4) Epoch 18, batch 5850, loss[loss=0.224, simple_loss=0.3122, pruned_loss=0.06786, over 8324.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2931, pruned_loss=0.06584, over 1609148.11 frames. ], batch size: 25, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:12,230 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0975, 2.3601, 2.6343, 1.6078, 2.6989, 1.7975, 1.5512, 2.0036], + device='cuda:0'), covar=tensor([0.0716, 0.0434, 0.0296, 0.0687, 0.0487, 0.0821, 0.0843, 0.0490], + device='cuda:0'), in_proj_covar=tensor([0.0436, 0.0371, 0.0321, 0.0431, 0.0360, 0.0521, 0.0378, 0.0398], + device='cuda:0'), out_proj_covar=tensor([1.1835e-04, 9.8228e-05, 8.4939e-05, 1.1489e-04, 9.5905e-05, 1.4910e-04, + 1.0265e-04, 1.0608e-04], device='cuda:0') +2023-02-06 21:55:15,561 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.467e+02 2.892e+02 3.630e+02 6.628e+02, threshold=5.783e+02, percent-clipped=2.0 +2023-02-06 21:55:36,566 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:55:39,811 INFO [train.py:901] (0/4) Epoch 18, batch 5900, loss[loss=0.2452, simple_loss=0.3207, pruned_loss=0.08484, over 7103.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06501, over 1604818.35 frames. ], batch size: 72, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:53,333 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143331.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:55:59,091 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.48 vs. limit=5.0 +2023-02-06 21:56:02,435 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5118, 3.0570, 4.6112, 2.2535, 3.6589, 2.9752, 2.7188, 3.2616], + device='cuda:0'), covar=tensor([0.1595, 0.2145, 0.0823, 0.4136, 0.1509, 0.2714, 0.1872, 0.2359], + device='cuda:0'), in_proj_covar=tensor([0.0510, 0.0573, 0.0547, 0.0618, 0.0632, 0.0578, 0.0511, 0.0624], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:56:14,454 INFO [train.py:901] (0/4) Epoch 18, batch 5950, loss[loss=0.2227, simple_loss=0.2994, pruned_loss=0.073, over 7925.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2904, pruned_loss=0.06466, over 1600664.64 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:25,148 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.388e+02 2.875e+02 3.741e+02 7.794e+02, threshold=5.749e+02, percent-clipped=3.0 +2023-02-06 21:56:40,500 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 21:56:49,466 INFO [train.py:901] (0/4) Epoch 18, batch 6000, loss[loss=0.2261, simple_loss=0.3018, pruned_loss=0.07524, over 7646.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.29, pruned_loss=0.06475, over 1601314.22 frames. ], batch size: 19, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:49,468 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 21:57:03,429 INFO [train.py:935] (0/4) Epoch 18, validation: loss=0.1765, simple_loss=0.2767, pruned_loss=0.03814, over 944034.00 frames. +2023-02-06 21:57:03,430 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 21:57:08,530 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143418.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:57:35,816 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:38,438 INFO [train.py:901] (0/4) Epoch 18, batch 6050, loss[loss=0.1977, simple_loss=0.2613, pruned_loss=0.06703, over 7654.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2916, pruned_loss=0.06551, over 1602227.86 frames. ], batch size: 19, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:57:48,584 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.412e+02 3.060e+02 4.409e+02 1.030e+03, threshold=6.120e+02, percent-clipped=9.0 +2023-02-06 21:57:49,089 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 21:57:52,900 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7581, 1.8953, 2.0815, 1.4847, 2.2484, 1.4144, 0.7315, 1.9326], + device='cuda:0'), covar=tensor([0.0561, 0.0347, 0.0295, 0.0512, 0.0368, 0.0949, 0.0773, 0.0300], + device='cuda:0'), in_proj_covar=tensor([0.0435, 0.0372, 0.0322, 0.0431, 0.0360, 0.0520, 0.0378, 0.0397], + device='cuda:0'), out_proj_covar=tensor([1.1818e-04, 9.8333e-05, 8.5246e-05, 1.1477e-04, 9.5715e-05, 1.4898e-04, + 1.0257e-04, 1.0579e-04], device='cuda:0') +2023-02-06 21:57:53,563 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143482.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:59,582 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143491.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:58:07,289 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7466, 1.8926, 1.6624, 2.3133, 1.0102, 1.4105, 1.6671, 1.8463], + device='cuda:0'), covar=tensor([0.0742, 0.0732, 0.0902, 0.0400, 0.1145, 0.1421, 0.0853, 0.0778], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0199, 0.0250, 0.0213, 0.0208, 0.0247, 0.0254, 0.0214], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 21:58:12,147 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2485, 4.2117, 3.8686, 1.7709, 3.8038, 3.7972, 3.7483, 3.4931], + device='cuda:0'), covar=tensor([0.0742, 0.0580, 0.1027, 0.4893, 0.0939, 0.1045, 0.1430, 0.0778], + device='cuda:0'), in_proj_covar=tensor([0.0512, 0.0425, 0.0425, 0.0524, 0.0417, 0.0422, 0.0408, 0.0373], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 21:58:13,415 INFO [train.py:901] (0/4) Epoch 18, batch 6100, loss[loss=0.1987, simple_loss=0.2919, pruned_loss=0.05277, over 8184.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2909, pruned_loss=0.06546, over 1601026.56 frames. ], batch size: 23, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:58:39,437 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 21:58:49,846 INFO [train.py:901] (0/4) Epoch 18, batch 6150, loss[loss=0.2103, simple_loss=0.2992, pruned_loss=0.06072, over 8546.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2904, pruned_loss=0.06495, over 1605701.31 frames. ], batch size: 31, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:00,211 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.359e+02 3.030e+02 3.820e+02 7.737e+02, threshold=6.061e+02, percent-clipped=3.0 +2023-02-06 21:59:02,802 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-06 21:59:25,630 INFO [train.py:901] (0/4) Epoch 18, batch 6200, loss[loss=0.2401, simple_loss=0.307, pruned_loss=0.08662, over 7153.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2922, pruned_loss=0.06584, over 1609386.61 frames. ], batch size: 72, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:26,581 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9606, 1.0911, 1.0652, 0.5629, 1.0514, 0.9216, 0.0880, 1.0509], + device='cuda:0'), covar=tensor([0.0346, 0.0306, 0.0265, 0.0479, 0.0354, 0.0744, 0.0669, 0.0264], + device='cuda:0'), in_proj_covar=tensor([0.0439, 0.0375, 0.0325, 0.0437, 0.0364, 0.0526, 0.0382, 0.0401], + device='cuda:0'), out_proj_covar=tensor([1.1915e-04, 9.9282e-05, 8.6034e-05, 1.1630e-04, 9.7003e-05, 1.5043e-04, + 1.0363e-04, 1.0695e-04], device='cuda:0') +2023-02-06 22:00:01,315 INFO [train.py:901] (0/4) Epoch 18, batch 6250, loss[loss=0.2182, simple_loss=0.2978, pruned_loss=0.06931, over 7069.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2916, pruned_loss=0.06565, over 1607393.25 frames. ], batch size: 72, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:12,420 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.460e+02 3.089e+02 4.040e+02 1.017e+03, threshold=6.178e+02, percent-clipped=5.0 +2023-02-06 22:00:28,278 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8438, 3.4906, 2.0812, 2.7089, 2.6827, 1.7959, 2.6536, 2.8579], + device='cuda:0'), covar=tensor([0.1882, 0.0460, 0.1322, 0.0901, 0.0904, 0.1699, 0.1208, 0.1294], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0235, 0.0325, 0.0305, 0.0296, 0.0331, 0.0342, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:00:29,821 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-06 22:00:37,053 INFO [train.py:901] (0/4) Epoch 18, batch 6300, loss[loss=0.1909, simple_loss=0.284, pruned_loss=0.04886, over 8114.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2933, pruned_loss=0.06625, over 1609720.91 frames. ], batch size: 23, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:58,044 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 22:01:11,901 INFO [train.py:901] (0/4) Epoch 18, batch 6350, loss[loss=0.2102, simple_loss=0.2907, pruned_loss=0.06487, over 8459.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2926, pruned_loss=0.06557, over 1609462.66 frames. ], batch size: 29, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:13,340 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:01:22,535 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.204e+02 2.882e+02 3.589e+02 6.333e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 22:01:40,208 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-06 22:01:47,357 INFO [train.py:901] (0/4) Epoch 18, batch 6400, loss[loss=0.21, simple_loss=0.2981, pruned_loss=0.06096, over 8488.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2939, pruned_loss=0.06647, over 1611776.37 frames. ], batch size: 29, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:54,087 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9342, 1.8246, 2.6412, 1.6806, 2.2078, 2.8935, 2.8432, 2.6196], + device='cuda:0'), covar=tensor([0.0911, 0.1267, 0.0518, 0.1606, 0.1210, 0.0264, 0.0808, 0.0431], + device='cuda:0'), in_proj_covar=tensor([0.0282, 0.0312, 0.0276, 0.0305, 0.0295, 0.0254, 0.0397, 0.0295], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:02:04,433 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143835.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:02:21,896 INFO [train.py:901] (0/4) Epoch 18, batch 6450, loss[loss=0.2026, simple_loss=0.2795, pruned_loss=0.06288, over 8083.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2928, pruned_loss=0.06584, over 1613698.60 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:02:33,456 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.452e+02 2.973e+02 3.704e+02 1.405e+03, threshold=5.946e+02, percent-clipped=1.0 +2023-02-06 22:02:34,287 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143877.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:57,285 INFO [train.py:901] (0/4) Epoch 18, batch 6500, loss[loss=0.2028, simple_loss=0.2909, pruned_loss=0.05741, over 8330.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2932, pruned_loss=0.066, over 1614959.63 frames. ], batch size: 25, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:24,069 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143950.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:03:31,368 INFO [train.py:901] (0/4) Epoch 18, batch 6550, loss[loss=0.1889, simple_loss=0.2745, pruned_loss=0.05162, over 7977.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2934, pruned_loss=0.06591, over 1616023.45 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:41,856 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.926e+02 2.526e+02 3.154e+02 3.765e+02 8.734e+02, threshold=6.308e+02, percent-clipped=5.0 +2023-02-06 22:03:48,811 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 22:03:59,959 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-144000.pt +2023-02-06 22:04:08,860 INFO [train.py:901] (0/4) Epoch 18, batch 6600, loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.06212, over 8596.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2927, pruned_loss=0.0658, over 1617123.17 frames. ], batch size: 34, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:10,894 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 22:04:18,941 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 22:04:43,666 INFO [train.py:901] (0/4) Epoch 18, batch 6650, loss[loss=0.2349, simple_loss=0.3122, pruned_loss=0.07878, over 8283.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2922, pruned_loss=0.06559, over 1616262.18 frames. ], batch size: 23, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:54,721 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.298e+02 3.022e+02 3.555e+02 7.360e+02, threshold=6.043e+02, percent-clipped=4.0 +2023-02-06 22:05:15,481 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 22:05:19,671 INFO [train.py:901] (0/4) Epoch 18, batch 6700, loss[loss=0.2385, simple_loss=0.3175, pruned_loss=0.0798, over 8316.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2928, pruned_loss=0.06583, over 1617017.77 frames. ], batch size: 25, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:05:34,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144133.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:05:50,160 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 22:05:51,947 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144158.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:05:53,840 INFO [train.py:901] (0/4) Epoch 18, batch 6750, loss[loss=0.1897, simple_loss=0.2941, pruned_loss=0.04271, over 8493.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2921, pruned_loss=0.06522, over 1610337.11 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:03,940 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.293e+02 3.003e+02 3.717e+02 7.578e+02, threshold=6.007e+02, percent-clipped=1.0 +2023-02-06 22:06:18,014 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4216, 2.0864, 2.7406, 2.3550, 2.7919, 2.4129, 2.0990, 1.5667], + device='cuda:0'), covar=tensor([0.4987, 0.4715, 0.1662, 0.2965, 0.2010, 0.2549, 0.1733, 0.4606], + device='cuda:0'), in_proj_covar=tensor([0.0932, 0.0950, 0.0784, 0.0919, 0.0986, 0.0871, 0.0736, 0.0815], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:06:25,427 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144206.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:06:27,270 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 22:06:28,559 INFO [train.py:901] (0/4) Epoch 18, batch 6800, loss[loss=0.2117, simple_loss=0.2792, pruned_loss=0.07214, over 8028.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2914, pruned_loss=0.0646, over 1611841.40 frames. ], batch size: 22, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:40,786 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7117, 1.9037, 1.6425, 2.2646, 1.0624, 1.4651, 1.5623, 1.8505], + device='cuda:0'), covar=tensor([0.0766, 0.0696, 0.0954, 0.0451, 0.1108, 0.1288, 0.0883, 0.0738], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0200, 0.0252, 0.0213, 0.0208, 0.0249, 0.0253, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:06:42,848 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144231.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:07:03,970 INFO [train.py:901] (0/4) Epoch 18, batch 6850, loss[loss=0.2052, simple_loss=0.2673, pruned_loss=0.07157, over 7788.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2929, pruned_loss=0.0657, over 1610820.26 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:07:12,878 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6065, 1.3255, 1.6003, 1.2081, 0.8503, 1.3394, 1.4166, 1.3561], + device='cuda:0'), covar=tensor([0.0529, 0.1302, 0.1771, 0.1504, 0.0561, 0.1500, 0.0711, 0.0687], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0139], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 22:07:13,993 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.379e+02 2.937e+02 3.634e+02 6.722e+02, threshold=5.873e+02, percent-clipped=2.0 +2023-02-06 22:07:17,332 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 22:07:18,819 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8306, 1.4714, 3.9635, 1.4151, 3.5040, 3.2808, 3.6421, 3.4965], + device='cuda:0'), covar=tensor([0.0595, 0.4169, 0.0592, 0.3888, 0.1200, 0.1058, 0.0598, 0.0708], + device='cuda:0'), in_proj_covar=tensor([0.0592, 0.0626, 0.0668, 0.0599, 0.0677, 0.0582, 0.0576, 0.0646], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:07:38,061 INFO [train.py:901] (0/4) Epoch 18, batch 6900, loss[loss=0.2254, simple_loss=0.303, pruned_loss=0.07386, over 8505.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.06604, over 1614077.63 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:07:44,344 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 22:07:50,538 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 22:07:52,974 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3893, 2.6373, 3.0854, 1.9171, 3.2165, 2.0028, 1.5471, 2.2715], + device='cuda:0'), covar=tensor([0.0701, 0.0381, 0.0236, 0.0631, 0.0518, 0.0757, 0.0875, 0.0514], + device='cuda:0'), in_proj_covar=tensor([0.0438, 0.0376, 0.0324, 0.0435, 0.0365, 0.0525, 0.0381, 0.0403], + device='cuda:0'), out_proj_covar=tensor([1.1879e-04, 9.9485e-05, 8.5836e-05, 1.1576e-04, 9.7193e-05, 1.5025e-04, + 1.0338e-04, 1.0749e-04], device='cuda:0') +2023-02-06 22:08:13,484 INFO [train.py:901] (0/4) Epoch 18, batch 6950, loss[loss=0.228, simple_loss=0.3172, pruned_loss=0.06942, over 8470.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2948, pruned_loss=0.06677, over 1620300.44 frames. ], batch size: 29, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:24,082 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.398e+02 2.919e+02 3.864e+02 7.610e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-06 22:08:25,463 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 22:08:33,150 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4817, 1.9821, 2.9417, 1.4012, 2.1911, 1.7415, 1.7672, 2.1019], + device='cuda:0'), covar=tensor([0.2102, 0.2485, 0.0992, 0.4603, 0.2047, 0.3531, 0.2236, 0.2419], + device='cuda:0'), in_proj_covar=tensor([0.0514, 0.0580, 0.0551, 0.0623, 0.0640, 0.0585, 0.0516, 0.0629], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:08:47,782 INFO [train.py:901] (0/4) Epoch 18, batch 7000, loss[loss=0.1955, simple_loss=0.2886, pruned_loss=0.05123, over 8597.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2949, pruned_loss=0.06652, over 1622842.64 frames. ], batch size: 34, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:01,088 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=144429.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:09:22,578 INFO [train.py:901] (0/4) Epoch 18, batch 7050, loss[loss=0.1986, simple_loss=0.284, pruned_loss=0.05662, over 7973.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2937, pruned_loss=0.06623, over 1621222.81 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:34,230 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 2.534e+02 2.937e+02 3.689e+02 8.247e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 22:09:58,448 INFO [train.py:901] (0/4) Epoch 18, batch 7100, loss[loss=0.1728, simple_loss=0.2561, pruned_loss=0.04477, over 7246.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2934, pruned_loss=0.06618, over 1618342.16 frames. ], batch size: 16, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:10:33,601 INFO [train.py:901] (0/4) Epoch 18, batch 7150, loss[loss=0.1774, simple_loss=0.2687, pruned_loss=0.04307, over 8091.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2928, pruned_loss=0.06558, over 1615598.89 frames. ], batch size: 21, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:10:43,978 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.263e+02 2.906e+02 3.662e+02 1.305e+03, threshold=5.813e+02, percent-clipped=7.0 +2023-02-06 22:10:53,082 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9662, 1.6551, 3.2753, 1.3769, 2.2166, 3.5552, 3.7069, 3.0135], + device='cuda:0'), covar=tensor([0.1119, 0.1601, 0.0347, 0.2138, 0.1092, 0.0224, 0.0521, 0.0618], + device='cuda:0'), in_proj_covar=tensor([0.0282, 0.0312, 0.0277, 0.0305, 0.0294, 0.0253, 0.0396, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:11:10,024 INFO [train.py:901] (0/4) Epoch 18, batch 7200, loss[loss=0.1641, simple_loss=0.2634, pruned_loss=0.03237, over 8252.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.294, pruned_loss=0.06573, over 1622389.02 frames. ], batch size: 24, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:44,447 INFO [train.py:901] (0/4) Epoch 18, batch 7250, loss[loss=0.2041, simple_loss=0.2773, pruned_loss=0.06546, over 7785.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2931, pruned_loss=0.06553, over 1614485.02 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:54,464 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.379e+02 2.816e+02 3.627e+02 9.857e+02, threshold=5.632e+02, percent-clipped=4.0 +2023-02-06 22:12:19,771 INFO [train.py:901] (0/4) Epoch 18, batch 7300, loss[loss=0.2295, simple_loss=0.3162, pruned_loss=0.07134, over 8294.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2932, pruned_loss=0.06559, over 1616538.43 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:12:53,974 INFO [train.py:901] (0/4) Epoch 18, batch 7350, loss[loss=0.2235, simple_loss=0.307, pruned_loss=0.07003, over 8444.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2934, pruned_loss=0.06599, over 1616188.20 frames. ], batch size: 27, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:13:02,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=144773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:13:04,751 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.484e+02 2.992e+02 3.514e+02 8.978e+02, threshold=5.985e+02, percent-clipped=6.0 +2023-02-06 22:13:08,080 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 22:13:26,885 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 22:13:28,902 INFO [train.py:901] (0/4) Epoch 18, batch 7400, loss[loss=0.1842, simple_loss=0.258, pruned_loss=0.05522, over 7532.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2927, pruned_loss=0.06572, over 1615430.96 frames. ], batch size: 18, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:04,315 INFO [train.py:901] (0/4) Epoch 18, batch 7450, loss[loss=0.1642, simple_loss=0.2446, pruned_loss=0.04187, over 7436.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2934, pruned_loss=0.06638, over 1612834.19 frames. ], batch size: 17, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:07,807 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 22:14:14,580 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 2.433e+02 3.083e+02 4.140e+02 9.921e+02, threshold=6.167e+02, percent-clipped=3.0 +2023-02-06 22:14:22,651 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:14:30,657 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2754, 2.5342, 2.2404, 3.4231, 1.7981, 1.8578, 2.1822, 2.7688], + device='cuda:0'), covar=tensor([0.0693, 0.0829, 0.0785, 0.0335, 0.1073, 0.1318, 0.1031, 0.0706], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0197, 0.0246, 0.0210, 0.0205, 0.0244, 0.0250, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:14:38,535 INFO [train.py:901] (0/4) Epoch 18, batch 7500, loss[loss=0.2471, simple_loss=0.3158, pruned_loss=0.08923, over 6840.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2938, pruned_loss=0.06647, over 1616256.09 frames. ], batch size: 71, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:12,904 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1649, 1.6731, 3.0806, 1.4200, 2.1761, 3.3023, 3.4878, 2.8655], + device='cuda:0'), covar=tensor([0.0964, 0.1599, 0.0413, 0.2176, 0.1073, 0.0274, 0.0645, 0.0621], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0317, 0.0277, 0.0308, 0.0296, 0.0256, 0.0401, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:15:14,116 INFO [train.py:901] (0/4) Epoch 18, batch 7550, loss[loss=0.1901, simple_loss=0.272, pruned_loss=0.05409, over 7648.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2938, pruned_loss=0.06637, over 1618687.30 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:24,760 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.361e+02 2.893e+02 3.293e+02 8.578e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 22:15:48,836 INFO [train.py:901] (0/4) Epoch 18, batch 7600, loss[loss=0.2219, simple_loss=0.3097, pruned_loss=0.06699, over 8451.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.293, pruned_loss=0.06597, over 1619580.34 frames. ], batch size: 27, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:51,061 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:12,824 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:24,419 INFO [train.py:901] (0/4) Epoch 18, batch 7650, loss[loss=0.208, simple_loss=0.2978, pruned_loss=0.0591, over 8496.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06596, over 1614569.43 frames. ], batch size: 28, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:16:35,685 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.409e+02 3.204e+02 3.806e+02 7.453e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 22:16:40,588 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:58,562 INFO [train.py:901] (0/4) Epoch 18, batch 7700, loss[loss=0.2559, simple_loss=0.3297, pruned_loss=0.09107, over 8503.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.066, over 1615423.05 frames. ], batch size: 28, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:16,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 22:17:21,897 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:33,670 INFO [train.py:901] (0/4) Epoch 18, batch 7750, loss[loss=0.2452, simple_loss=0.3231, pruned_loss=0.08368, over 8488.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.293, pruned_loss=0.06607, over 1614976.29 frames. ], batch size: 28, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:40,033 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:44,020 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1993, 1.8477, 3.5817, 1.8429, 2.6992, 3.9169, 4.0088, 3.3941], + device='cuda:0'), covar=tensor([0.1087, 0.1542, 0.0317, 0.1821, 0.0923, 0.0225, 0.0500, 0.0554], + device='cuda:0'), in_proj_covar=tensor([0.0283, 0.0314, 0.0276, 0.0306, 0.0294, 0.0255, 0.0399, 0.0297], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:17:45,193 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.617e+02 3.101e+02 3.765e+02 9.296e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 22:18:08,794 INFO [train.py:901] (0/4) Epoch 18, batch 7800, loss[loss=0.2339, simple_loss=0.3178, pruned_loss=0.07497, over 8471.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2938, pruned_loss=0.06684, over 1615291.01 frames. ], batch size: 25, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:18:30,140 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7059, 1.6904, 2.2930, 1.4841, 1.2208, 2.1947, 0.3956, 1.3801], + device='cuda:0'), covar=tensor([0.1750, 0.1290, 0.0353, 0.1471, 0.2996, 0.0487, 0.2377, 0.1375], + device='cuda:0'), in_proj_covar=tensor([0.0181, 0.0190, 0.0121, 0.0215, 0.0266, 0.0129, 0.0165, 0.0182], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 22:18:36,389 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7529, 1.7066, 2.3488, 1.6355, 1.3733, 2.2812, 0.4075, 1.5113], + device='cuda:0'), covar=tensor([0.1828, 0.1261, 0.0330, 0.1364, 0.2794, 0.0444, 0.2409, 0.1357], + device='cuda:0'), in_proj_covar=tensor([0.0181, 0.0190, 0.0121, 0.0215, 0.0266, 0.0129, 0.0165, 0.0182], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 22:18:42,840 INFO [train.py:901] (0/4) Epoch 18, batch 7850, loss[loss=0.1791, simple_loss=0.2727, pruned_loss=0.04281, over 8024.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2936, pruned_loss=0.06677, over 1611292.13 frames. ], batch size: 22, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:18:53,261 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.477e+02 2.948e+02 3.643e+02 1.044e+03, threshold=5.895e+02, percent-clipped=9.0 +2023-02-06 22:19:03,120 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.53 vs. limit=5.0 +2023-02-06 22:19:16,124 INFO [train.py:901] (0/4) Epoch 18, batch 7900, loss[loss=0.2045, simple_loss=0.2967, pruned_loss=0.05612, over 8448.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2935, pruned_loss=0.06611, over 1615205.20 frames. ], batch size: 24, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:28,936 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4709, 1.6956, 1.6833, 1.1049, 1.7446, 1.4082, 0.3002, 1.6127], + device='cuda:0'), covar=tensor([0.0367, 0.0298, 0.0260, 0.0394, 0.0333, 0.0755, 0.0732, 0.0216], + device='cuda:0'), in_proj_covar=tensor([0.0439, 0.0380, 0.0324, 0.0433, 0.0362, 0.0526, 0.0382, 0.0404], + device='cuda:0'), out_proj_covar=tensor([1.1925e-04, 1.0051e-04, 8.5804e-05, 1.1502e-04, 9.6359e-05, 1.5044e-04, + 1.0373e-04, 1.0803e-04], device='cuda:0') +2023-02-06 22:19:44,153 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2391, 1.9546, 2.6615, 2.1724, 2.4704, 2.2704, 1.9675, 1.4617], + device='cuda:0'), covar=tensor([0.5174, 0.4657, 0.1693, 0.3353, 0.2459, 0.2937, 0.1966, 0.5007], + device='cuda:0'), in_proj_covar=tensor([0.0939, 0.0956, 0.0783, 0.0921, 0.0988, 0.0876, 0.0737, 0.0817], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:19:47,146 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:19:49,086 INFO [train.py:901] (0/4) Epoch 18, batch 7950, loss[loss=0.2493, simple_loss=0.3237, pruned_loss=0.08746, over 8236.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2949, pruned_loss=0.06693, over 1619172.00 frames. ], batch size: 24, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:58,003 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8445, 2.1336, 1.7483, 2.6038, 1.1722, 1.5413, 1.9256, 2.0302], + device='cuda:0'), covar=tensor([0.0735, 0.0673, 0.0875, 0.0380, 0.1167, 0.1269, 0.0829, 0.0727], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0197, 0.0248, 0.0210, 0.0206, 0.0244, 0.0250, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:19:59,835 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.389e+02 3.012e+02 3.869e+02 1.111e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 22:20:07,929 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:08,001 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:23,107 INFO [train.py:901] (0/4) Epoch 18, batch 8000, loss[loss=0.24, simple_loss=0.3318, pruned_loss=0.07407, over 8102.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2931, pruned_loss=0.06562, over 1619936.66 frames. ], batch size: 23, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:20:34,646 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:57,109 INFO [train.py:901] (0/4) Epoch 18, batch 8050, loss[loss=0.2261, simple_loss=0.2891, pruned_loss=0.08155, over 7536.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2919, pruned_loss=0.06603, over 1597166.35 frames. ], batch size: 18, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:21:05,673 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:21:08,162 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.350e+02 2.866e+02 3.408e+02 5.747e+02, threshold=5.732e+02, percent-clipped=0.0 +2023-02-06 22:21:19,730 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-18.pt +2023-02-06 22:21:30,964 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 22:21:34,899 INFO [train.py:901] (0/4) Epoch 19, batch 0, loss[loss=0.2123, simple_loss=0.2933, pruned_loss=0.06571, over 8453.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2933, pruned_loss=0.06571, over 8453.00 frames. ], batch size: 29, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:21:34,900 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 22:21:46,554 INFO [train.py:935] (0/4) Epoch 19, validation: loss=0.1782, simple_loss=0.2779, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 22:21:46,555 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 22:21:54,190 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:02,572 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6465, 3.0694, 2.3683, 4.1026, 1.7815, 2.2786, 2.5750, 3.1681], + device='cuda:0'), covar=tensor([0.0614, 0.0731, 0.0896, 0.0236, 0.1060, 0.1105, 0.0894, 0.0740], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0199, 0.0250, 0.0212, 0.0208, 0.0246, 0.0252, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:22:03,065 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 22:22:22,466 INFO [train.py:901] (0/4) Epoch 19, batch 50, loss[loss=0.1647, simple_loss=0.243, pruned_loss=0.04318, over 7681.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2915, pruned_loss=0.06546, over 364260.31 frames. ], batch size: 18, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:22,665 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:39,355 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8598, 5.9925, 5.1621, 2.7752, 5.2416, 5.5814, 5.5393, 5.3006], + device='cuda:0'), covar=tensor([0.0479, 0.0359, 0.0894, 0.3959, 0.0738, 0.0649, 0.0924, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0514, 0.0426, 0.0429, 0.0527, 0.0415, 0.0425, 0.0409, 0.0374], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:22:40,521 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 22:22:45,198 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.340e+02 2.977e+02 3.641e+02 7.952e+02, threshold=5.953e+02, percent-clipped=6.0 +2023-02-06 22:22:56,256 INFO [train.py:901] (0/4) Epoch 19, batch 100, loss[loss=0.2353, simple_loss=0.3054, pruned_loss=0.08256, over 7813.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.06597, over 639021.87 frames. ], batch size: 20, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:23:01,896 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 22:23:10,098 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:30,509 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8320, 2.5257, 3.4838, 1.8160, 1.7229, 3.4731, 0.7557, 2.0566], + device='cuda:0'), covar=tensor([0.1583, 0.1069, 0.0230, 0.1876, 0.2992, 0.0341, 0.2195, 0.1472], + device='cuda:0'), in_proj_covar=tensor([0.0183, 0.0191, 0.0122, 0.0216, 0.0268, 0.0130, 0.0166, 0.0185], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 22:23:32,323 INFO [train.py:901] (0/4) Epoch 19, batch 150, loss[loss=0.1954, simple_loss=0.2732, pruned_loss=0.05883, over 7652.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2929, pruned_loss=0.06604, over 857564.47 frames. ], batch size: 19, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:23:46,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:48,211 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0213, 1.5812, 4.2223, 1.8251, 3.7805, 3.5221, 3.8661, 3.7225], + device='cuda:0'), covar=tensor([0.0751, 0.4664, 0.0621, 0.4104, 0.1128, 0.1032, 0.0649, 0.0728], + device='cuda:0'), in_proj_covar=tensor([0.0597, 0.0631, 0.0676, 0.0605, 0.0684, 0.0590, 0.0587, 0.0649], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:23:49,601 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1108, 2.3956, 1.8929, 2.8970, 1.4219, 1.7435, 2.1500, 2.4116], + device='cuda:0'), covar=tensor([0.0659, 0.0692, 0.0903, 0.0339, 0.1081, 0.1198, 0.0770, 0.0679], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0198, 0.0249, 0.0211, 0.0207, 0.0245, 0.0251, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:23:57,046 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.454e+02 2.969e+02 3.777e+02 1.176e+03, threshold=5.938e+02, percent-clipped=4.0 +2023-02-06 22:24:07,965 INFO [train.py:901] (0/4) Epoch 19, batch 200, loss[loss=0.2152, simple_loss=0.3036, pruned_loss=0.06335, over 8574.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2938, pruned_loss=0.06621, over 1028906.81 frames. ], batch size: 31, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:24:14,825 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9347, 1.6297, 3.2848, 1.4352, 2.2944, 3.5598, 3.7205, 3.0619], + device='cuda:0'), covar=tensor([0.1161, 0.1644, 0.0335, 0.2072, 0.1111, 0.0251, 0.0599, 0.0561], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0314, 0.0277, 0.0305, 0.0295, 0.0255, 0.0399, 0.0296], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:24:33,110 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:35,721 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:42,457 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 22:24:42,743 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9509, 3.8283, 2.3160, 2.8368, 2.8830, 2.0836, 3.0065, 2.9920], + device='cuda:0'), covar=tensor([0.1751, 0.0364, 0.1068, 0.0771, 0.0692, 0.1244, 0.1008, 0.1086], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0234, 0.0323, 0.0300, 0.0296, 0.0328, 0.0339, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:24:43,207 INFO [train.py:901] (0/4) Epoch 19, batch 250, loss[loss=0.1983, simple_loss=0.2811, pruned_loss=0.05779, over 7922.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2943, pruned_loss=0.06623, over 1161271.62 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:24:51,126 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:55,229 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:58,388 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 22:25:06,968 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 22:25:07,542 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.432e+02 3.022e+02 3.893e+02 7.688e+02, threshold=6.043e+02, percent-clipped=6.0 +2023-02-06 22:25:07,762 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1538, 2.1294, 1.5686, 1.9209, 1.7929, 1.2512, 1.6333, 1.5845], + device='cuda:0'), covar=tensor([0.1526, 0.0464, 0.1225, 0.0549, 0.0687, 0.1608, 0.1037, 0.0864], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0235, 0.0323, 0.0301, 0.0296, 0.0329, 0.0340, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:25:13,288 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:17,453 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8232, 2.2906, 1.6912, 2.9790, 1.2837, 1.5179, 2.0543, 2.3939], + device='cuda:0'), covar=tensor([0.0934, 0.0811, 0.1193, 0.0372, 0.1218, 0.1484, 0.0859, 0.0681], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0197, 0.0248, 0.0211, 0.0207, 0.0245, 0.0251, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:25:18,657 INFO [train.py:901] (0/4) Epoch 19, batch 300, loss[loss=0.2018, simple_loss=0.2853, pruned_loss=0.05916, over 8081.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.294, pruned_loss=0.06635, over 1263629.27 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:20,818 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3736, 1.3513, 2.3730, 1.1921, 2.1694, 2.5012, 2.6597, 2.1278], + device='cuda:0'), covar=tensor([0.1139, 0.1375, 0.0443, 0.2096, 0.0777, 0.0385, 0.0614, 0.0675], + device='cuda:0'), in_proj_covar=tensor([0.0284, 0.0315, 0.0277, 0.0307, 0.0296, 0.0255, 0.0400, 0.0298], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 22:25:22,940 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:39,973 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:50,230 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145839.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:25:50,291 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7155, 1.9818, 2.2031, 1.4142, 2.2709, 1.6152, 0.6864, 1.9822], + device='cuda:0'), covar=tensor([0.0516, 0.0314, 0.0216, 0.0467, 0.0351, 0.0690, 0.0702, 0.0250], + device='cuda:0'), in_proj_covar=tensor([0.0437, 0.0378, 0.0324, 0.0431, 0.0361, 0.0522, 0.0380, 0.0404], + device='cuda:0'), out_proj_covar=tensor([1.1878e-04, 1.0002e-04, 8.5744e-05, 1.1446e-04, 9.6007e-05, 1.4906e-04, + 1.0307e-04, 1.0820e-04], device='cuda:0') +2023-02-06 22:25:53,699 INFO [train.py:901] (0/4) Epoch 19, batch 350, loss[loss=0.1774, simple_loss=0.2579, pruned_loss=0.04845, over 8181.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2939, pruned_loss=0.06629, over 1345017.29 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:57,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:26:16,559 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0620, 1.8465, 2.3446, 1.9975, 2.2852, 2.1523, 1.9097, 1.1699], + device='cuda:0'), covar=tensor([0.4912, 0.4220, 0.1622, 0.3382, 0.2143, 0.2481, 0.1747, 0.4592], + device='cuda:0'), in_proj_covar=tensor([0.0929, 0.0948, 0.0776, 0.0915, 0.0979, 0.0867, 0.0729, 0.0808], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:26:17,670 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.383e+02 2.952e+02 3.795e+02 9.100e+02, threshold=5.904e+02, percent-clipped=6.0 +2023-02-06 22:26:30,034 INFO [train.py:901] (0/4) Epoch 19, batch 400, loss[loss=0.1791, simple_loss=0.2523, pruned_loss=0.05292, over 7781.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2915, pruned_loss=0.06516, over 1402902.07 frames. ], batch size: 19, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:04,033 INFO [train.py:901] (0/4) Epoch 19, batch 450, loss[loss=0.1999, simple_loss=0.2849, pruned_loss=0.05747, over 8181.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2938, pruned_loss=0.06623, over 1453767.49 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:12,886 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:27:28,524 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.474e+02 2.839e+02 3.457e+02 5.406e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 22:27:40,177 INFO [train.py:901] (0/4) Epoch 19, batch 500, loss[loss=0.188, simple_loss=0.2642, pruned_loss=0.05591, over 7638.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.294, pruned_loss=0.06656, over 1491743.47 frames. ], batch size: 19, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:45,786 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-146000.pt +2023-02-06 22:27:50,074 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:03,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6910, 1.5275, 4.8694, 1.7924, 4.3459, 3.9686, 4.4241, 4.2406], + device='cuda:0'), covar=tensor([0.0493, 0.4448, 0.0419, 0.3979, 0.0989, 0.0941, 0.0508, 0.0614], + device='cuda:0'), in_proj_covar=tensor([0.0597, 0.0627, 0.0673, 0.0605, 0.0687, 0.0591, 0.0585, 0.0649], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:28:15,839 INFO [train.py:901] (0/4) Epoch 19, batch 550, loss[loss=0.1746, simple_loss=0.2587, pruned_loss=0.0452, over 7938.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2922, pruned_loss=0.06545, over 1522548.79 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:19,280 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5862, 5.7310, 5.0329, 2.3937, 5.0880, 5.4075, 5.3483, 5.0882], + device='cuda:0'), covar=tensor([0.0671, 0.0448, 0.1008, 0.4564, 0.0731, 0.0781, 0.1094, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0428, 0.0430, 0.0530, 0.0416, 0.0430, 0.0411, 0.0375], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:28:35,097 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146071.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:38,933 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.557e+02 3.049e+02 4.000e+02 8.642e+02, threshold=6.099e+02, percent-clipped=4.0 +2023-02-06 22:28:50,789 INFO [train.py:901] (0/4) Epoch 19, batch 600, loss[loss=0.2174, simple_loss=0.2971, pruned_loss=0.06884, over 8133.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2935, pruned_loss=0.06609, over 1546614.31 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:54,534 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6560, 1.3618, 4.8723, 1.7339, 4.3295, 3.9976, 4.4256, 4.2439], + device='cuda:0'), covar=tensor([0.0499, 0.4887, 0.0466, 0.4046, 0.1063, 0.1015, 0.0520, 0.0635], + device='cuda:0'), in_proj_covar=tensor([0.0594, 0.0624, 0.0670, 0.0600, 0.0684, 0.0588, 0.0582, 0.0646], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:28:59,453 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:03,461 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4362, 4.3813, 4.0090, 2.1502, 3.9136, 4.0490, 4.0485, 3.7766], + device='cuda:0'), covar=tensor([0.0707, 0.0500, 0.1001, 0.4310, 0.0804, 0.1078, 0.1151, 0.0760], + device='cuda:0'), in_proj_covar=tensor([0.0517, 0.0425, 0.0427, 0.0526, 0.0413, 0.0427, 0.0407, 0.0373], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:29:11,428 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 22:29:11,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:17,640 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:26,663 INFO [train.py:901] (0/4) Epoch 19, batch 650, loss[loss=0.2061, simple_loss=0.2847, pruned_loss=0.06381, over 8034.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2925, pruned_loss=0.06582, over 1564083.19 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:29:42,897 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:49,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.628e+02 2.995e+02 3.912e+02 8.872e+02, threshold=5.991e+02, percent-clipped=7.0 +2023-02-06 22:29:53,905 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146183.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:30:00,618 INFO [train.py:901] (0/4) Epoch 19, batch 700, loss[loss=0.2125, simple_loss=0.2921, pruned_loss=0.06648, over 8080.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2931, pruned_loss=0.06635, over 1574790.12 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:37,716 INFO [train.py:901] (0/4) Epoch 19, batch 750, loss[loss=0.2103, simple_loss=0.2926, pruned_loss=0.06398, over 8023.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2917, pruned_loss=0.06518, over 1582685.47 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:58,054 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 22:31:00,738 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.187e+02 2.733e+02 3.387e+02 1.037e+03, threshold=5.466e+02, percent-clipped=4.0 +2023-02-06 22:31:06,862 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 22:31:11,496 INFO [train.py:901] (0/4) Epoch 19, batch 800, loss[loss=0.1893, simple_loss=0.2695, pruned_loss=0.05455, over 7806.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.293, pruned_loss=0.06613, over 1591803.32 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:14,849 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146298.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:31:15,523 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1944, 1.6028, 1.7886, 1.4418, 1.0003, 1.5828, 1.9043, 1.7971], + device='cuda:0'), covar=tensor([0.0455, 0.1193, 0.1593, 0.1365, 0.0572, 0.1423, 0.0630, 0.0597], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 22:31:17,173 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.53 vs. limit=5.0 +2023-02-06 22:31:35,761 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:31:47,234 INFO [train.py:901] (0/4) Epoch 19, batch 850, loss[loss=0.1787, simple_loss=0.2637, pruned_loss=0.04689, over 8135.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2922, pruned_loss=0.06577, over 1592393.88 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:53,497 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:10,849 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:11,293 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.470e+02 3.071e+02 3.941e+02 1.675e+03, threshold=6.141e+02, percent-clipped=6.0 +2023-02-06 22:32:22,245 INFO [train.py:901] (0/4) Epoch 19, batch 900, loss[loss=0.2421, simple_loss=0.318, pruned_loss=0.0831, over 8749.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2929, pruned_loss=0.06568, over 1595214.44 frames. ], batch size: 49, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:32:27,825 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:41,206 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6526, 1.9309, 3.1554, 1.4115, 2.4077, 2.0698, 1.7391, 2.3448], + device='cuda:0'), covar=tensor([0.1880, 0.2719, 0.0902, 0.4735, 0.1702, 0.3117, 0.2276, 0.2145], + device='cuda:0'), in_proj_covar=tensor([0.0516, 0.0580, 0.0550, 0.0630, 0.0639, 0.0584, 0.0518, 0.0629], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:32:56,371 INFO [train.py:901] (0/4) Epoch 19, batch 950, loss[loss=0.229, simple_loss=0.3107, pruned_loss=0.07367, over 8492.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2936, pruned_loss=0.06599, over 1601942.19 frames. ], batch size: 29, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:33:09,722 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.46 vs. limit=5.0 +2023-02-06 22:33:20,834 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1529, 1.5075, 1.7234, 1.3613, 0.9320, 1.4096, 1.6575, 1.4557], + device='cuda:0'), covar=tensor([0.0491, 0.1217, 0.1617, 0.1412, 0.0613, 0.1542, 0.0703, 0.0697], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0112, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 22:33:21,319 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.324e+02 2.987e+02 4.077e+02 9.877e+02, threshold=5.974e+02, percent-clipped=4.0 +2023-02-06 22:33:22,702 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 22:33:32,161 INFO [train.py:901] (0/4) Epoch 19, batch 1000, loss[loss=0.1755, simple_loss=0.2581, pruned_loss=0.0465, over 7554.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2942, pruned_loss=0.06639, over 1611586.02 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:33:44,471 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146511.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:33:54,580 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 22:34:06,367 INFO [train.py:901] (0/4) Epoch 19, batch 1050, loss[loss=0.2288, simple_loss=0.3126, pruned_loss=0.07247, over 8513.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2937, pruned_loss=0.06585, over 1612773.03 frames. ], batch size: 28, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:06,384 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 22:34:14,932 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146554.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:34:31,581 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.403e+02 2.837e+02 3.508e+02 6.242e+02, threshold=5.674e+02, percent-clipped=1.0 +2023-02-06 22:34:33,864 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146579.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:34:44,087 INFO [train.py:901] (0/4) Epoch 19, batch 1100, loss[loss=0.2196, simple_loss=0.2861, pruned_loss=0.07658, over 7931.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2926, pruned_loss=0.06591, over 1606874.09 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:55,179 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:34:56,640 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2424, 2.0528, 2.8786, 2.2983, 2.6601, 2.2820, 1.9795, 1.4752], + device='cuda:0'), covar=tensor([0.4867, 0.4616, 0.1706, 0.3385, 0.2354, 0.2825, 0.1814, 0.5229], + device='cuda:0'), in_proj_covar=tensor([0.0931, 0.0948, 0.0782, 0.0913, 0.0978, 0.0866, 0.0726, 0.0809], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:35:06,983 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:18,380 INFO [train.py:901] (0/4) Epoch 19, batch 1150, loss[loss=0.1969, simple_loss=0.2767, pruned_loss=0.0585, over 7826.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.292, pruned_loss=0.06534, over 1608352.61 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:35:19,108 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 22:35:19,253 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:42,421 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.484e+02 2.879e+02 3.755e+02 5.922e+02, threshold=5.758e+02, percent-clipped=3.0 +2023-02-06 22:35:53,863 INFO [train.py:901] (0/4) Epoch 19, batch 1200, loss[loss=0.2172, simple_loss=0.2987, pruned_loss=0.06781, over 8453.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2927, pruned_loss=0.06546, over 1614610.18 frames. ], batch size: 25, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:11,193 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8285, 1.8174, 2.4309, 1.6750, 1.3052, 2.4802, 0.4223, 1.4946], + device='cuda:0'), covar=tensor([0.2048, 0.1250, 0.0348, 0.1401, 0.2857, 0.0372, 0.2272, 0.1498], + device='cuda:0'), in_proj_covar=tensor([0.0184, 0.0192, 0.0123, 0.0218, 0.0267, 0.0130, 0.0168, 0.0185], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 22:36:29,002 INFO [train.py:901] (0/4) Epoch 19, batch 1250, loss[loss=0.214, simple_loss=0.2782, pruned_loss=0.07492, over 7691.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2928, pruned_loss=0.06606, over 1614016.94 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:31,924 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2402, 2.6561, 2.8009, 1.9369, 3.1589, 1.9630, 1.5241, 2.1843], + device='cuda:0'), covar=tensor([0.0816, 0.0376, 0.0290, 0.0673, 0.0392, 0.0786, 0.0878, 0.0515], + device='cuda:0'), in_proj_covar=tensor([0.0435, 0.0378, 0.0324, 0.0429, 0.0361, 0.0521, 0.0381, 0.0403], + device='cuda:0'), out_proj_covar=tensor([1.1803e-04, 9.9890e-05, 8.5681e-05, 1.1387e-04, 9.6013e-05, 1.4899e-04, + 1.0310e-04, 1.0761e-04], device='cuda:0') +2023-02-06 22:36:52,652 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 2.471e+02 2.976e+02 4.092e+02 7.603e+02, threshold=5.951e+02, percent-clipped=4.0 +2023-02-06 22:37:04,310 INFO [train.py:901] (0/4) Epoch 19, batch 1300, loss[loss=0.2271, simple_loss=0.2941, pruned_loss=0.0801, over 8071.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2931, pruned_loss=0.06612, over 1616614.62 frames. ], batch size: 21, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:40,717 INFO [train.py:901] (0/4) Epoch 19, batch 1350, loss[loss=0.1887, simple_loss=0.259, pruned_loss=0.05916, over 7712.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2923, pruned_loss=0.06556, over 1619714.19 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:53,728 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:03,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.302e+02 2.844e+02 3.659e+02 6.626e+02, threshold=5.688e+02, percent-clipped=1.0 +2023-02-06 22:38:07,780 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146882.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:14,731 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4276, 4.4729, 4.0057, 2.2319, 3.9436, 4.0052, 4.0509, 3.8075], + device='cuda:0'), covar=tensor([0.0702, 0.0558, 0.1104, 0.3875, 0.0834, 0.0857, 0.1231, 0.0853], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0426, 0.0432, 0.0528, 0.0417, 0.0429, 0.0412, 0.0378], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:38:15,248 INFO [train.py:901] (0/4) Epoch 19, batch 1400, loss[loss=0.2243, simple_loss=0.3016, pruned_loss=0.07349, over 8126.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2913, pruned_loss=0.06517, over 1615787.02 frames. ], batch size: 22, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:38:19,630 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2444, 1.4761, 1.4951, 1.0247, 1.4749, 1.1551, 0.3290, 1.3954], + device='cuda:0'), covar=tensor([0.0502, 0.0397, 0.0305, 0.0488, 0.0470, 0.0866, 0.0822, 0.0273], + device='cuda:0'), in_proj_covar=tensor([0.0435, 0.0377, 0.0323, 0.0429, 0.0360, 0.0521, 0.0380, 0.0402], + device='cuda:0'), out_proj_covar=tensor([1.1802e-04, 9.9697e-05, 8.5509e-05, 1.1409e-04, 9.5812e-05, 1.4897e-04, + 1.0291e-04, 1.0748e-04], device='cuda:0') +2023-02-06 22:38:25,965 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:43,247 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 22:38:52,633 INFO [train.py:901] (0/4) Epoch 19, batch 1450, loss[loss=0.2136, simple_loss=0.3041, pruned_loss=0.06151, over 8326.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2909, pruned_loss=0.06431, over 1619738.83 frames. ], batch size: 25, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:38:56,650 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 22:38:59,393 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:16,190 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.362e+02 2.962e+02 3.993e+02 1.525e+03, threshold=5.923e+02, percent-clipped=6.0 +2023-02-06 22:39:22,617 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6773, 1.2734, 4.8343, 1.7963, 4.3022, 4.0207, 4.3903, 4.2586], + device='cuda:0'), covar=tensor([0.0555, 0.5014, 0.0464, 0.4113, 0.1081, 0.1016, 0.0567, 0.0647], + device='cuda:0'), in_proj_covar=tensor([0.0602, 0.0635, 0.0677, 0.0609, 0.0692, 0.0594, 0.0590, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:39:23,965 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:27,299 INFO [train.py:901] (0/4) Epoch 19, batch 1500, loss[loss=0.2026, simple_loss=0.2847, pruned_loss=0.06026, over 8353.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2915, pruned_loss=0.06458, over 1620423.70 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:03,264 INFO [train.py:901] (0/4) Epoch 19, batch 1550, loss[loss=0.2228, simple_loss=0.2996, pruned_loss=0.07298, over 8136.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2921, pruned_loss=0.06463, over 1621780.02 frames. ], batch size: 22, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:22,631 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:40:28,393 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.434e+02 2.984e+02 3.600e+02 8.495e+02, threshold=5.968e+02, percent-clipped=1.0 +2023-02-06 22:40:39,453 INFO [train.py:901] (0/4) Epoch 19, batch 1600, loss[loss=0.1754, simple_loss=0.2592, pruned_loss=0.04583, over 7203.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2913, pruned_loss=0.06397, over 1621113.88 frames. ], batch size: 16, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:46,376 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:41:14,549 INFO [train.py:901] (0/4) Epoch 19, batch 1650, loss[loss=0.2362, simple_loss=0.3079, pruned_loss=0.08222, over 8307.00 frames. ], tot_loss[loss=0.211, simple_loss=0.292, pruned_loss=0.06503, over 1621087.59 frames. ], batch size: 48, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:41:40,969 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.354e+02 2.709e+02 3.474e+02 7.081e+02, threshold=5.418e+02, percent-clipped=1.0 +2023-02-06 22:41:51,231 INFO [train.py:901] (0/4) Epoch 19, batch 1700, loss[loss=0.2212, simple_loss=0.3029, pruned_loss=0.06977, over 8367.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2913, pruned_loss=0.06441, over 1619179.00 frames. ], batch size: 49, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:41:52,204 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1165, 1.8201, 2.3213, 1.9907, 2.2715, 2.1309, 1.8938, 1.1744], + device='cuda:0'), covar=tensor([0.4955, 0.4281, 0.1753, 0.3053, 0.2111, 0.2638, 0.1761, 0.4523], + device='cuda:0'), in_proj_covar=tensor([0.0936, 0.0953, 0.0788, 0.0917, 0.0984, 0.0869, 0.0730, 0.0810], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:42:00,560 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147206.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:25,937 INFO [train.py:901] (0/4) Epoch 19, batch 1750, loss[loss=0.2171, simple_loss=0.3109, pruned_loss=0.06162, over 8241.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2911, pruned_loss=0.06424, over 1615561.43 frames. ], batch size: 24, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:42:37,947 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:41,391 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5355, 1.3658, 4.7106, 1.8737, 4.1887, 3.9188, 4.2619, 4.1433], + device='cuda:0'), covar=tensor([0.0528, 0.4812, 0.0488, 0.4006, 0.0970, 0.0972, 0.0530, 0.0604], + device='cuda:0'), in_proj_covar=tensor([0.0601, 0.0636, 0.0677, 0.0612, 0.0692, 0.0596, 0.0591, 0.0653], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:42:46,351 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6881, 4.7304, 4.1709, 2.0552, 4.1566, 4.2949, 4.2687, 4.1481], + device='cuda:0'), covar=tensor([0.0662, 0.0555, 0.1146, 0.4426, 0.0815, 0.0979, 0.1337, 0.0839], + device='cuda:0'), in_proj_covar=tensor([0.0513, 0.0423, 0.0428, 0.0526, 0.0415, 0.0427, 0.0410, 0.0375], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:42:51,045 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.529e+02 3.043e+02 3.569e+02 7.736e+02, threshold=6.085e+02, percent-clipped=5.0 +2023-02-06 22:43:03,029 INFO [train.py:901] (0/4) Epoch 19, batch 1800, loss[loss=0.2106, simple_loss=0.2983, pruned_loss=0.06145, over 8238.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2912, pruned_loss=0.06451, over 1615832.00 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:22,531 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:24,589 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,334 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,658 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:43:37,468 INFO [train.py:901] (0/4) Epoch 19, batch 1850, loss[loss=0.2421, simple_loss=0.3259, pruned_loss=0.07918, over 8293.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.292, pruned_loss=0.06494, over 1616583.75 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:41,689 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:48,528 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:02,398 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.300e+02 2.823e+02 3.606e+02 1.006e+03, threshold=5.645e+02, percent-clipped=2.0 +2023-02-06 22:44:02,738 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 22:44:06,632 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:12,522 INFO [train.py:901] (0/4) Epoch 19, batch 1900, loss[loss=0.2085, simple_loss=0.2861, pruned_loss=0.06547, over 8246.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2919, pruned_loss=0.06486, over 1614291.36 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:37,268 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:44,950 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 22:44:49,675 INFO [train.py:901] (0/4) Epoch 19, batch 1950, loss[loss=0.2212, simple_loss=0.2944, pruned_loss=0.07401, over 7675.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2906, pruned_loss=0.06388, over 1615255.07 frames. ], batch size: 18, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:55,957 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:56,496 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 22:45:13,741 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.289e+02 2.862e+02 3.830e+02 8.439e+02, threshold=5.724e+02, percent-clipped=6.0 +2023-02-06 22:45:15,252 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 22:45:24,858 INFO [train.py:901] (0/4) Epoch 19, batch 2000, loss[loss=0.2389, simple_loss=0.3188, pruned_loss=0.07953, over 8550.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2915, pruned_loss=0.06468, over 1611300.38 frames. ], batch size: 49, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:01,778 INFO [train.py:901] (0/4) Epoch 19, batch 2050, loss[loss=0.2076, simple_loss=0.2817, pruned_loss=0.06677, over 7929.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2917, pruned_loss=0.06471, over 1613942.15 frames. ], batch size: 20, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:09,084 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.01 vs. limit=5.0 +2023-02-06 22:46:25,327 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:25,778 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.500e+02 2.918e+02 3.445e+02 6.516e+02, threshold=5.836e+02, percent-clipped=2.0 +2023-02-06 22:46:36,254 INFO [train.py:901] (0/4) Epoch 19, batch 2100, loss[loss=0.1816, simple_loss=0.2663, pruned_loss=0.04844, over 7979.00 frames. ], tot_loss[loss=0.211, simple_loss=0.292, pruned_loss=0.06499, over 1612653.92 frames. ], batch size: 21, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:42,937 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:43,425 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:47,319 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.65 vs. limit=5.0 +2023-02-06 22:47:12,100 INFO [train.py:901] (0/4) Epoch 19, batch 2150, loss[loss=0.2119, simple_loss=0.298, pruned_loss=0.06284, over 8138.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2915, pruned_loss=0.06501, over 1611727.43 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:47:28,862 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2780, 2.1949, 1.7139, 1.9160, 1.7346, 1.5190, 1.7262, 1.6495], + device='cuda:0'), covar=tensor([0.1239, 0.0387, 0.1088, 0.0563, 0.0757, 0.1354, 0.0835, 0.0794], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0234, 0.0325, 0.0303, 0.0301, 0.0331, 0.0341, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:47:31,898 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 22:47:33,534 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:36,909 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:37,423 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.398e+02 3.174e+02 3.852e+02 9.466e+02, threshold=6.348e+02, percent-clipped=6.0 +2023-02-06 22:47:47,723 INFO [train.py:901] (0/4) Epoch 19, batch 2200, loss[loss=0.1719, simple_loss=0.2522, pruned_loss=0.04577, over 7651.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2915, pruned_loss=0.06544, over 1604663.53 frames. ], batch size: 19, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:04,723 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:21,942 INFO [train.py:901] (0/4) Epoch 19, batch 2250, loss[loss=0.21, simple_loss=0.302, pruned_loss=0.05898, over 8466.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.06603, over 1607259.16 frames. ], batch size: 25, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:41,105 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:47,103 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.392e+02 3.089e+02 3.849e+02 9.613e+02, threshold=6.179e+02, percent-clipped=2.0 +2023-02-06 22:48:53,325 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:56,992 INFO [train.py:901] (0/4) Epoch 19, batch 2300, loss[loss=0.1638, simple_loss=0.2493, pruned_loss=0.03911, over 8079.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2931, pruned_loss=0.06623, over 1608225.23 frames. ], batch size: 21, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:58,973 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:24,328 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:30,985 INFO [train.py:901] (0/4) Epoch 19, batch 2350, loss[loss=0.2194, simple_loss=0.3035, pruned_loss=0.0676, over 8595.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2939, pruned_loss=0.06631, over 1613441.10 frames. ], batch size: 31, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:49:53,245 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:55,888 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.451e+02 2.984e+02 3.607e+02 1.132e+03, threshold=5.968e+02, percent-clipped=4.0 +2023-02-06 22:50:01,527 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:04,972 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8339, 2.9731, 2.5988, 4.1362, 1.8123, 2.3145, 2.5890, 3.3971], + device='cuda:0'), covar=tensor([0.0562, 0.0761, 0.0752, 0.0176, 0.1073, 0.1089, 0.0994, 0.0592], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0197, 0.0249, 0.0212, 0.0207, 0.0246, 0.0253, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:50:07,513 INFO [train.py:901] (0/4) Epoch 19, batch 2400, loss[loss=0.195, simple_loss=0.2648, pruned_loss=0.06255, over 7435.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2921, pruned_loss=0.06521, over 1612955.26 frames. ], batch size: 17, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:50:17,626 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-06 22:50:20,055 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147911.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:32,258 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:41,459 INFO [train.py:901] (0/4) Epoch 19, batch 2450, loss[loss=0.1845, simple_loss=0.2709, pruned_loss=0.04902, over 8108.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2922, pruned_loss=0.06561, over 1612454.30 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:03,062 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:05,437 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.599e+02 2.990e+02 3.557e+02 6.406e+02, threshold=5.981e+02, percent-clipped=1.0 +2023-02-06 22:51:15,596 INFO [train.py:901] (0/4) Epoch 19, batch 2500, loss[loss=0.2027, simple_loss=0.285, pruned_loss=0.06027, over 8333.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2922, pruned_loss=0.0653, over 1615930.79 frames. ], batch size: 26, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:20,625 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:21,200 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-148000.pt +2023-02-06 22:51:37,735 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:41,216 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8104, 2.4809, 4.0925, 1.6836, 3.1263, 2.2780, 2.0796, 2.7134], + device='cuda:0'), covar=tensor([0.1759, 0.2229, 0.0821, 0.4054, 0.1542, 0.3040, 0.1850, 0.2459], + device='cuda:0'), in_proj_covar=tensor([0.0513, 0.0577, 0.0548, 0.0626, 0.0634, 0.0583, 0.0518, 0.0627], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 22:51:52,333 INFO [train.py:901] (0/4) Epoch 19, batch 2550, loss[loss=0.2145, simple_loss=0.3019, pruned_loss=0.06354, over 8629.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.06522, over 1615620.57 frames. ], batch size: 49, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:52,608 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:09,266 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:15,629 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.379e+02 2.867e+02 3.516e+02 7.047e+02, threshold=5.734e+02, percent-clipped=3.0 +2023-02-06 22:52:17,123 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2979, 2.7186, 3.2018, 1.7423, 3.1491, 1.9090, 1.7168, 2.3847], + device='cuda:0'), covar=tensor([0.0821, 0.0391, 0.0290, 0.0832, 0.0502, 0.0957, 0.0833, 0.0419], + device='cuda:0'), in_proj_covar=tensor([0.0441, 0.0380, 0.0329, 0.0435, 0.0363, 0.0526, 0.0383, 0.0404], + device='cuda:0'), out_proj_covar=tensor([1.1934e-04, 1.0026e-04, 8.7018e-05, 1.1568e-04, 9.6228e-05, 1.5023e-04, + 1.0354e-04, 1.0795e-04], device='cuda:0') +2023-02-06 22:52:24,502 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2933, 2.3335, 1.6918, 2.0486, 1.7939, 1.3670, 1.7677, 1.8402], + device='cuda:0'), covar=tensor([0.1445, 0.0419, 0.1253, 0.0588, 0.0842, 0.1625, 0.1075, 0.0941], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0236, 0.0326, 0.0305, 0.0302, 0.0333, 0.0343, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:52:26,269 INFO [train.py:901] (0/4) Epoch 19, batch 2600, loss[loss=0.2239, simple_loss=0.3106, pruned_loss=0.06862, over 8238.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2931, pruned_loss=0.06676, over 1612537.26 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:52:57,239 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:00,041 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:01,844 INFO [train.py:901] (0/4) Epoch 19, batch 2650, loss[loss=0.22, simple_loss=0.3104, pruned_loss=0.0648, over 8368.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2942, pruned_loss=0.06745, over 1614067.55 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:16,796 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:18,187 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:24,871 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:25,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 2.384e+02 2.853e+02 3.529e+02 7.126e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 22:53:27,178 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.47 vs. limit=2.0 +2023-02-06 22:53:35,175 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:35,670 INFO [train.py:901] (0/4) Epoch 19, batch 2700, loss[loss=0.212, simple_loss=0.2988, pruned_loss=0.06258, over 8537.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2933, pruned_loss=0.0665, over 1614561.42 frames. ], batch size: 34, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:38,566 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5913, 2.6939, 1.9106, 2.4393, 2.2786, 1.7142, 2.2490, 2.2496], + device='cuda:0'), covar=tensor([0.1577, 0.0404, 0.1116, 0.0615, 0.0838, 0.1387, 0.0965, 0.1009], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0237, 0.0327, 0.0306, 0.0302, 0.0333, 0.0344, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 22:53:54,165 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:03,916 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8133, 2.0028, 1.7059, 2.3585, 1.1035, 1.5501, 1.7461, 1.9318], + device='cuda:0'), covar=tensor([0.0793, 0.0650, 0.1022, 0.0486, 0.1025, 0.1309, 0.0769, 0.0663], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0198, 0.0252, 0.0214, 0.0208, 0.0248, 0.0256, 0.0213], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:54:11,923 INFO [train.py:901] (0/4) Epoch 19, batch 2750, loss[loss=0.1919, simple_loss=0.2662, pruned_loss=0.05875, over 7411.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2937, pruned_loss=0.06654, over 1615229.47 frames. ], batch size: 17, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:32,992 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:36,059 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.484e+02 2.895e+02 4.098e+02 9.310e+02, threshold=5.790e+02, percent-clipped=8.0 +2023-02-06 22:54:45,411 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:45,919 INFO [train.py:901] (0/4) Epoch 19, batch 2800, loss[loss=0.2298, simple_loss=0.3071, pruned_loss=0.0762, over 8086.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2938, pruned_loss=0.06672, over 1613356.33 frames. ], batch size: 21, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:54,036 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148305.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:13,964 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:19,842 INFO [train.py:901] (0/4) Epoch 19, batch 2850, loss[loss=0.195, simple_loss=0.277, pruned_loss=0.05648, over 7797.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2931, pruned_loss=0.06623, over 1613049.26 frames. ], batch size: 20, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:55:29,871 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-06 22:55:46,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.512e+02 2.931e+02 3.824e+02 7.566e+02, threshold=5.862e+02, percent-clipped=4.0 +2023-02-06 22:55:52,982 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:55,623 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:56,116 INFO [train.py:901] (0/4) Epoch 19, batch 2900, loss[loss=0.2294, simple_loss=0.3229, pruned_loss=0.06789, over 8506.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2937, pruned_loss=0.06609, over 1617086.91 frames. ], batch size: 28, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:10,223 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 22:56:12,655 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:29,355 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 22:56:29,948 INFO [train.py:901] (0/4) Epoch 19, batch 2950, loss[loss=0.2037, simple_loss=0.2765, pruned_loss=0.06546, over 8085.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2939, pruned_loss=0.06645, over 1615496.09 frames. ], batch size: 21, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:32,795 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148447.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:52,406 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1085, 1.6522, 1.7000, 1.4703, 1.0175, 1.5351, 1.8115, 1.5940], + device='cuda:0'), covar=tensor([0.0500, 0.1183, 0.1616, 0.1405, 0.0598, 0.1475, 0.0681, 0.0615], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 22:56:54,932 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.514e+02 3.009e+02 3.973e+02 7.443e+02, threshold=6.017e+02, percent-clipped=3.0 +2023-02-06 22:57:06,356 INFO [train.py:901] (0/4) Epoch 19, batch 3000, loss[loss=0.2858, simple_loss=0.3543, pruned_loss=0.1086, over 6665.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2938, pruned_loss=0.06674, over 1613327.87 frames. ], batch size: 72, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:57:06,357 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 22:57:22,669 INFO [train.py:935] (0/4) Epoch 19, validation: loss=0.1752, simple_loss=0.2756, pruned_loss=0.03738, over 944034.00 frames. +2023-02-06 22:57:22,671 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 22:57:26,393 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9378, 1.8280, 2.9334, 2.2611, 2.5594, 1.9208, 1.6327, 1.2712], + device='cuda:0'), covar=tensor([0.6632, 0.5598, 0.1681, 0.3842, 0.2918, 0.4179, 0.2945, 0.5310], + device='cuda:0'), in_proj_covar=tensor([0.0932, 0.0952, 0.0782, 0.0916, 0.0980, 0.0868, 0.0730, 0.0809], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 22:57:38,582 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148516.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:57:44,374 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 22:57:56,924 INFO [train.py:901] (0/4) Epoch 19, batch 3050, loss[loss=0.2561, simple_loss=0.3244, pruned_loss=0.09389, over 8678.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2933, pruned_loss=0.06648, over 1615131.96 frames. ], batch size: 39, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:00,725 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:17,724 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:21,053 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.398e+02 2.811e+02 3.727e+02 6.995e+02, threshold=5.622e+02, percent-clipped=3.0 +2023-02-06 22:58:30,231 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:31,671 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3212, 2.6217, 3.0820, 1.7508, 3.3146, 2.1444, 1.5508, 2.2036], + device='cuda:0'), covar=tensor([0.0812, 0.0443, 0.0272, 0.0821, 0.0373, 0.0783, 0.0888, 0.0614], + device='cuda:0'), in_proj_covar=tensor([0.0442, 0.0380, 0.0330, 0.0437, 0.0364, 0.0531, 0.0384, 0.0406], + device='cuda:0'), out_proj_covar=tensor([1.1984e-04, 1.0038e-04, 8.7138e-05, 1.1622e-04, 9.6670e-05, 1.5179e-04, + 1.0388e-04, 1.0840e-04], device='cuda:0') +2023-02-06 22:58:32,155 INFO [train.py:901] (0/4) Epoch 19, batch 3100, loss[loss=0.1797, simple_loss=0.2644, pruned_loss=0.04753, over 8096.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06649, over 1614994.02 frames. ], batch size: 21, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:33,712 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0112, 2.6215, 3.6680, 2.0628, 2.1334, 3.6026, 0.6585, 2.2527], + device='cuda:0'), covar=tensor([0.1685, 0.1428, 0.0226, 0.1905, 0.2557, 0.0346, 0.2757, 0.1559], + device='cuda:0'), in_proj_covar=tensor([0.0183, 0.0192, 0.0122, 0.0218, 0.0267, 0.0130, 0.0168, 0.0185], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 22:58:49,317 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:50,729 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5197, 2.9821, 2.4856, 3.9865, 1.8292, 2.2097, 2.5034, 3.1895], + device='cuda:0'), covar=tensor([0.0721, 0.0814, 0.0756, 0.0196, 0.1133, 0.1213, 0.1039, 0.0714], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0249, 0.0212, 0.0206, 0.0246, 0.0253, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 22:59:09,444 INFO [train.py:901] (0/4) Epoch 19, batch 3150, loss[loss=0.2305, simple_loss=0.3232, pruned_loss=0.06887, over 8133.00 frames. ], tot_loss[loss=0.212, simple_loss=0.293, pruned_loss=0.06546, over 1614783.57 frames. ], batch size: 22, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:59:10,311 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:13,423 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,331 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,394 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:32,308 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.358e+02 3.073e+02 3.824e+02 9.523e+02, threshold=6.146e+02, percent-clipped=8.0 +2023-02-06 22:59:42,395 INFO [train.py:901] (0/4) Epoch 19, batch 3200, loss[loss=0.2665, simple_loss=0.3384, pruned_loss=0.09729, over 8473.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.293, pruned_loss=0.0656, over 1615613.10 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:12,966 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:19,662 INFO [train.py:901] (0/4) Epoch 19, batch 3250, loss[loss=0.1889, simple_loss=0.2679, pruned_loss=0.0549, over 7820.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2932, pruned_loss=0.06601, over 1613826.25 frames. ], batch size: 20, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:26,676 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0574, 1.4317, 1.6122, 1.3950, 0.8775, 1.4498, 1.7025, 1.5994], + device='cuda:0'), covar=tensor([0.0517, 0.1341, 0.1801, 0.1490, 0.0643, 0.1530, 0.0738, 0.0629], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0154, 0.0191, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:00:34,076 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:43,259 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.443e+02 3.073e+02 4.112e+02 8.183e+02, threshold=6.146e+02, percent-clipped=4.0 +2023-02-06 23:00:52,324 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:53,603 INFO [train.py:901] (0/4) Epoch 19, batch 3300, loss[loss=0.2175, simple_loss=0.3093, pruned_loss=0.06282, over 7808.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2914, pruned_loss=0.06515, over 1610291.61 frames. ], batch size: 20, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:28,250 INFO [train.py:901] (0/4) Epoch 19, batch 3350, loss[loss=0.2184, simple_loss=0.2965, pruned_loss=0.0701, over 8363.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06599, over 1608403.76 frames. ], batch size: 24, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:31,900 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4868, 2.2714, 3.1402, 2.4473, 3.0099, 2.3700, 2.1693, 1.7052], + device='cuda:0'), covar=tensor([0.4942, 0.4761, 0.1774, 0.3683, 0.2352, 0.3022, 0.1760, 0.5535], + device='cuda:0'), in_proj_covar=tensor([0.0932, 0.0950, 0.0783, 0.0916, 0.0980, 0.0870, 0.0728, 0.0810], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:01:41,955 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:01:53,950 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.291e+02 2.864e+02 3.449e+02 6.722e+02, threshold=5.728e+02, percent-clipped=1.0 +2023-02-06 23:02:04,176 INFO [train.py:901] (0/4) Epoch 19, batch 3400, loss[loss=0.2201, simple_loss=0.3087, pruned_loss=0.06572, over 8446.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2928, pruned_loss=0.06619, over 1610093.05 frames. ], batch size: 29, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:02:13,143 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:02:38,059 INFO [train.py:901] (0/4) Epoch 19, batch 3450, loss[loss=0.2426, simple_loss=0.3202, pruned_loss=0.08249, over 8354.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2927, pruned_loss=0.06601, over 1612544.24 frames. ], batch size: 26, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:01,937 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:04,405 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.340e+02 2.956e+02 3.727e+02 1.104e+03, threshold=5.912e+02, percent-clipped=3.0 +2023-02-06 23:03:14,149 INFO [train.py:901] (0/4) Epoch 19, batch 3500, loss[loss=0.2536, simple_loss=0.3341, pruned_loss=0.08652, over 8462.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2933, pruned_loss=0.06611, over 1614858.80 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:28,318 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:33,358 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:35,942 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 23:03:48,899 INFO [train.py:901] (0/4) Epoch 19, batch 3550, loss[loss=0.2237, simple_loss=0.3052, pruned_loss=0.07107, over 8331.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.293, pruned_loss=0.06542, over 1615228.62 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:50,370 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,072 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,638 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.461e+02 3.087e+02 3.824e+02 7.251e+02, threshold=6.175e+02, percent-clipped=6.0 +2023-02-06 23:04:25,646 INFO [train.py:901] (0/4) Epoch 19, batch 3600, loss[loss=0.1837, simple_loss=0.2656, pruned_loss=0.0509, over 7639.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2924, pruned_loss=0.065, over 1616682.95 frames. ], batch size: 19, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:04:36,706 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 23:04:49,816 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:59,729 INFO [train.py:901] (0/4) Epoch 19, batch 3650, loss[loss=0.2374, simple_loss=0.3075, pruned_loss=0.08365, over 8042.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2917, pruned_loss=0.06488, over 1617395.05 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:13,237 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:24,390 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.296e+02 2.731e+02 3.488e+02 6.725e+02, threshold=5.462e+02, percent-clipped=1.0 +2023-02-06 23:05:30,729 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:35,231 INFO [train.py:901] (0/4) Epoch 19, batch 3700, loss[loss=0.2392, simple_loss=0.3206, pruned_loss=0.07888, over 8691.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2918, pruned_loss=0.06501, over 1617385.55 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:35,405 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:38,060 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:06:02,787 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149231.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:10,470 INFO [train.py:901] (0/4) Epoch 19, batch 3750, loss[loss=0.2264, simple_loss=0.2886, pruned_loss=0.08209, over 8245.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2914, pruned_loss=0.06514, over 1612457.05 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:06:19,366 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:34,558 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 2.530e+02 3.028e+02 3.831e+02 7.632e+02, threshold=6.056e+02, percent-clipped=6.0 +2023-02-06 23:06:44,223 INFO [train.py:901] (0/4) Epoch 19, batch 3800, loss[loss=0.1792, simple_loss=0.259, pruned_loss=0.04968, over 7934.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2908, pruned_loss=0.06508, over 1613206.60 frames. ], batch size: 20, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:20,712 INFO [train.py:901] (0/4) Epoch 19, batch 3850, loss[loss=0.2744, simple_loss=0.3452, pruned_loss=0.1018, over 7042.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2897, pruned_loss=0.06432, over 1612622.88 frames. ], batch size: 72, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:42,389 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 23:07:45,097 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.409e+02 2.948e+02 3.728e+02 6.848e+02, threshold=5.896e+02, percent-clipped=3.0 +2023-02-06 23:07:48,732 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:07:54,637 INFO [train.py:901] (0/4) Epoch 19, batch 3900, loss[loss=0.2053, simple_loss=0.283, pruned_loss=0.06381, over 8130.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2902, pruned_loss=0.06425, over 1615241.88 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:58,218 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7979, 1.5136, 2.8087, 1.3730, 2.1507, 2.9874, 3.1198, 2.5777], + device='cuda:0'), covar=tensor([0.1040, 0.1523, 0.0438, 0.2108, 0.0945, 0.0298, 0.0670, 0.0554], + device='cuda:0'), in_proj_covar=tensor([0.0285, 0.0313, 0.0281, 0.0308, 0.0298, 0.0261, 0.0400, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 23:07:58,927 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0333, 3.8501, 2.4207, 2.7561, 3.0043, 2.1516, 2.7917, 3.0199], + device='cuda:0'), covar=tensor([0.1698, 0.0342, 0.1071, 0.0840, 0.0737, 0.1319, 0.1170, 0.1228], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0239, 0.0327, 0.0304, 0.0300, 0.0331, 0.0341, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:08:06,544 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:31,938 INFO [train.py:901] (0/4) Epoch 19, batch 3950, loss[loss=0.1877, simple_loss=0.2672, pruned_loss=0.05414, over 6799.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.29, pruned_loss=0.0639, over 1613266.15 frames. ], batch size: 15, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:08:36,310 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:53,035 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:56,239 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.497e+02 2.881e+02 4.050e+02 6.266e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 23:09:04,519 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6778, 4.6548, 4.2091, 2.0771, 4.0455, 4.2795, 4.1864, 3.9068], + device='cuda:0'), covar=tensor([0.0693, 0.0542, 0.1063, 0.4545, 0.0864, 0.0813, 0.1292, 0.0769], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0430, 0.0432, 0.0533, 0.0422, 0.0437, 0.0415, 0.0377], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:09:05,731 INFO [train.py:901] (0/4) Epoch 19, batch 4000, loss[loss=0.1978, simple_loss=0.2846, pruned_loss=0.05547, over 8238.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2907, pruned_loss=0.06419, over 1612949.68 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:09:32,409 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:09:40,133 INFO [train.py:901] (0/4) Epoch 19, batch 4050, loss[loss=0.184, simple_loss=0.2537, pruned_loss=0.05708, over 7534.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2916, pruned_loss=0.06503, over 1614446.57 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:05,803 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.470e+02 3.003e+02 4.246e+02 8.728e+02, threshold=6.007e+02, percent-clipped=8.0 +2023-02-06 23:10:08,583 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:10:15,178 INFO [train.py:901] (0/4) Epoch 19, batch 4100, loss[loss=0.2192, simple_loss=0.2861, pruned_loss=0.07617, over 7804.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.292, pruned_loss=0.06509, over 1616066.72 frames. ], batch size: 20, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:49,881 INFO [train.py:901] (0/4) Epoch 19, batch 4150, loss[loss=0.213, simple_loss=0.289, pruned_loss=0.06855, over 8341.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2919, pruned_loss=0.06468, over 1620611.80 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:57,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 23:11:16,655 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.273e+02 2.791e+02 3.594e+02 5.057e+02, threshold=5.582e+02, percent-clipped=0.0 +2023-02-06 23:11:26,115 INFO [train.py:901] (0/4) Epoch 19, batch 4200, loss[loss=0.2267, simple_loss=0.3066, pruned_loss=0.07341, over 6955.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.06541, over 1616455.95 frames. ], batch size: 71, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:11:36,583 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 23:11:41,377 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2120, 1.0723, 1.3025, 1.1076, 0.9888, 1.3318, 0.0675, 0.8825], + device='cuda:0'), covar=tensor([0.1747, 0.1400, 0.0550, 0.0935, 0.2963, 0.0571, 0.2424, 0.1349], + device='cuda:0'), in_proj_covar=tensor([0.0185, 0.0194, 0.0124, 0.0222, 0.0271, 0.0132, 0.0171, 0.0187], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:11:59,504 INFO [train.py:901] (0/4) Epoch 19, batch 4250, loss[loss=0.205, simple_loss=0.2916, pruned_loss=0.05917, over 8257.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2917, pruned_loss=0.06473, over 1617101.23 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:12:00,911 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 23:12:01,074 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1219, 1.4188, 1.6223, 1.3045, 0.9049, 1.4767, 1.6853, 1.5894], + device='cuda:0'), covar=tensor([0.0535, 0.1321, 0.1711, 0.1471, 0.0641, 0.1520, 0.0726, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0100, 0.0160, 0.0113, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:12:14,480 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149764.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:12:25,317 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 2.444e+02 3.025e+02 3.928e+02 1.033e+03, threshold=6.050e+02, percent-clipped=5.0 +2023-02-06 23:12:35,593 INFO [train.py:901] (0/4) Epoch 19, batch 4300, loss[loss=0.2137, simple_loss=0.315, pruned_loss=0.05622, over 8316.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2915, pruned_loss=0.06474, over 1616234.76 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:10,088 INFO [train.py:901] (0/4) Epoch 19, batch 4350, loss[loss=0.1926, simple_loss=0.2745, pruned_loss=0.05533, over 8241.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2914, pruned_loss=0.06507, over 1611651.93 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:33,155 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 23:13:33,211 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:13:35,197 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.416e+02 2.972e+02 3.761e+02 1.184e+03, threshold=5.944e+02, percent-clipped=4.0 +2023-02-06 23:13:44,579 INFO [train.py:901] (0/4) Epoch 19, batch 4400, loss[loss=0.1949, simple_loss=0.2743, pruned_loss=0.05772, over 8016.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2907, pruned_loss=0.06413, over 1611552.38 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:09,954 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:14,588 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 23:14:20,890 INFO [train.py:901] (0/4) Epoch 19, batch 4450, loss[loss=0.1666, simple_loss=0.2499, pruned_loss=0.04162, over 7803.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2908, pruned_loss=0.06395, over 1607540.60 frames. ], batch size: 20, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:23,038 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6353, 1.4590, 1.7187, 1.3684, 0.7271, 1.4660, 1.4776, 1.4597], + device='cuda:0'), covar=tensor([0.0511, 0.1206, 0.1609, 0.1378, 0.0586, 0.1448, 0.0690, 0.0604], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0100, 0.0161, 0.0112, 0.0140], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:14:44,886 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.522e+02 2.925e+02 4.193e+02 1.036e+03, threshold=5.849e+02, percent-clipped=7.0 +2023-02-06 23:14:53,304 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:54,477 INFO [train.py:901] (0/4) Epoch 19, batch 4500, loss[loss=0.21, simple_loss=0.3094, pruned_loss=0.05524, over 8318.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2914, pruned_loss=0.06433, over 1607210.41 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:59,956 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-150000.pt +2023-02-06 23:15:08,406 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 23:15:21,654 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3460, 1.4449, 1.3367, 1.7832, 0.8382, 1.2622, 1.2916, 1.4816], + device='cuda:0'), covar=tensor([0.0929, 0.0750, 0.1067, 0.0543, 0.1028, 0.1252, 0.0725, 0.0686], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0199, 0.0250, 0.0214, 0.0207, 0.0249, 0.0255, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 23:15:31,625 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:15:32,128 INFO [train.py:901] (0/4) Epoch 19, batch 4550, loss[loss=0.1927, simple_loss=0.2679, pruned_loss=0.05876, over 7421.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2915, pruned_loss=0.06414, over 1612145.51 frames. ], batch size: 17, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:54,569 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1711, 1.0355, 1.2611, 1.0467, 0.9529, 1.2668, 0.1030, 0.9059], + device='cuda:0'), covar=tensor([0.1810, 0.1561, 0.0567, 0.0896, 0.3038, 0.0709, 0.2414, 0.1345], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0222, 0.0271, 0.0132, 0.0170, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:15:56,360 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.399e+02 2.811e+02 3.428e+02 5.502e+02, threshold=5.622e+02, percent-clipped=0.0 +2023-02-06 23:16:05,765 INFO [train.py:901] (0/4) Epoch 19, batch 4600, loss[loss=0.2045, simple_loss=0.2952, pruned_loss=0.05691, over 8598.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2925, pruned_loss=0.06494, over 1610999.38 frames. ], batch size: 34, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:16:08,462 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:16:15,961 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150108.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:16:41,502 INFO [train.py:901] (0/4) Epoch 19, batch 4650, loss[loss=0.2226, simple_loss=0.3054, pruned_loss=0.06995, over 8506.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2915, pruned_loss=0.06449, over 1613926.16 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:17:03,546 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 23:17:06,561 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.474e+02 2.856e+02 3.464e+02 8.049e+02, threshold=5.712e+02, percent-clipped=3.0 +2023-02-06 23:17:16,083 INFO [train.py:901] (0/4) Epoch 19, batch 4700, loss[loss=0.1738, simple_loss=0.2577, pruned_loss=0.045, over 7691.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2917, pruned_loss=0.06446, over 1614779.78 frames. ], batch size: 18, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:36,647 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150223.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:17:39,458 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1232, 2.4552, 2.6217, 1.6246, 2.6941, 1.8473, 1.6660, 2.1082], + device='cuda:0'), covar=tensor([0.0680, 0.0348, 0.0238, 0.0657, 0.0431, 0.0722, 0.0735, 0.0443], + device='cuda:0'), in_proj_covar=tensor([0.0442, 0.0382, 0.0331, 0.0438, 0.0365, 0.0529, 0.0386, 0.0410], + device='cuda:0'), out_proj_covar=tensor([1.1973e-04, 1.0090e-04, 8.7497e-05, 1.1626e-04, 9.6710e-05, 1.5111e-04, + 1.0449e-04, 1.0962e-04], device='cuda:0') +2023-02-06 23:17:50,831 INFO [train.py:901] (0/4) Epoch 19, batch 4750, loss[loss=0.2073, simple_loss=0.2899, pruned_loss=0.06237, over 8028.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2912, pruned_loss=0.06393, over 1615350.09 frames. ], batch size: 22, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:53,777 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:12,328 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:12,661 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 23:18:13,491 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 23:18:15,508 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 23:18:16,855 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.315e+02 2.829e+02 3.523e+02 6.730e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-06 23:18:20,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5602, 1.6927, 4.4269, 2.1210, 2.4560, 5.0518, 5.0996, 4.3712], + device='cuda:0'), covar=tensor([0.1090, 0.1836, 0.0278, 0.1815, 0.1147, 0.0169, 0.0426, 0.0547], + device='cuda:0'), in_proj_covar=tensor([0.0290, 0.0317, 0.0287, 0.0312, 0.0301, 0.0265, 0.0406, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:18:25,813 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:26,362 INFO [train.py:901] (0/4) Epoch 19, batch 4800, loss[loss=0.2179, simple_loss=0.3102, pruned_loss=0.06273, over 8528.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2903, pruned_loss=0.06367, over 1611086.50 frames. ], batch size: 28, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:18:29,937 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:46,557 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:57,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.1989, 1.2908, 3.3782, 0.9789, 2.9364, 2.8132, 3.0679, 2.9556], + device='cuda:0'), covar=tensor([0.0920, 0.4045, 0.0832, 0.4274, 0.1485, 0.1169, 0.0761, 0.0976], + device='cuda:0'), in_proj_covar=tensor([0.0595, 0.0627, 0.0668, 0.0607, 0.0681, 0.0584, 0.0584, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:19:00,002 INFO [train.py:901] (0/4) Epoch 19, batch 4850, loss[loss=0.1787, simple_loss=0.2736, pruned_loss=0.04188, over 8305.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2917, pruned_loss=0.06453, over 1613817.62 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:05,338 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 23:19:27,011 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.476e+02 2.899e+02 3.621e+02 6.951e+02, threshold=5.799e+02, percent-clipped=6.0 +2023-02-06 23:19:36,180 INFO [train.py:901] (0/4) Epoch 19, batch 4900, loss[loss=0.2077, simple_loss=0.2897, pruned_loss=0.06284, over 8619.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2902, pruned_loss=0.06403, over 1611677.63 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:06,481 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3023, 1.8465, 4.2709, 1.7116, 2.5539, 4.7893, 5.0683, 3.6963], + device='cuda:0'), covar=tensor([0.1478, 0.2030, 0.0373, 0.2625, 0.1204, 0.0321, 0.0502, 0.1021], + device='cuda:0'), in_proj_covar=tensor([0.0290, 0.0318, 0.0288, 0.0313, 0.0302, 0.0267, 0.0408, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:20:07,720 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:20:08,994 INFO [train.py:901] (0/4) Epoch 19, batch 4950, loss[loss=0.1709, simple_loss=0.2565, pruned_loss=0.04267, over 7249.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2904, pruned_loss=0.06462, over 1607721.93 frames. ], batch size: 16, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:33,691 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.356e+02 2.775e+02 3.573e+02 1.033e+03, threshold=5.550e+02, percent-clipped=4.0 +2023-02-06 23:20:33,932 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150479.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:20:43,976 INFO [train.py:901] (0/4) Epoch 19, batch 5000, loss[loss=0.1912, simple_loss=0.2821, pruned_loss=0.05016, over 8246.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2908, pruned_loss=0.06457, over 1609459.54 frames. ], batch size: 24, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:52,070 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150504.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:21:06,772 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7970, 1.4830, 3.9977, 1.4222, 3.4570, 3.2933, 3.5703, 3.4254], + device='cuda:0'), covar=tensor([0.0759, 0.4813, 0.0611, 0.4362, 0.1377, 0.1130, 0.0735, 0.0873], + device='cuda:0'), in_proj_covar=tensor([0.0598, 0.0632, 0.0671, 0.0610, 0.0684, 0.0589, 0.0590, 0.0649], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:21:15,546 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 23:21:17,792 INFO [train.py:901] (0/4) Epoch 19, batch 5050, loss[loss=0.2021, simple_loss=0.2855, pruned_loss=0.05933, over 8454.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2916, pruned_loss=0.06489, over 1613408.63 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:21:25,904 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:26,606 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:31,777 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4095, 1.3168, 2.3786, 1.2269, 2.0897, 2.5194, 2.6799, 2.1472], + device='cuda:0'), covar=tensor([0.1161, 0.1442, 0.0458, 0.2173, 0.0717, 0.0407, 0.0650, 0.0742], + device='cuda:0'), in_proj_covar=tensor([0.0289, 0.0316, 0.0287, 0.0312, 0.0302, 0.0265, 0.0405, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:21:40,927 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 23:21:41,595 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.501e+02 3.000e+02 3.972e+02 7.212e+02, threshold=5.999e+02, percent-clipped=3.0 +2023-02-06 23:21:51,768 INFO [train.py:901] (0/4) Epoch 19, batch 5100, loss[loss=0.1987, simple_loss=0.2828, pruned_loss=0.05727, over 7810.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2916, pruned_loss=0.06478, over 1612020.93 frames. ], batch size: 20, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:23,309 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:22:27,858 INFO [train.py:901] (0/4) Epoch 19, batch 5150, loss[loss=0.2141, simple_loss=0.2964, pruned_loss=0.06587, over 8283.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06466, over 1612546.50 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:51,867 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.510e+02 3.215e+02 4.688e+02 9.098e+02, threshold=6.429e+02, percent-clipped=11.0 +2023-02-06 23:22:53,379 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2226, 1.6320, 3.4114, 1.3764, 2.3141, 3.7606, 3.8714, 3.2188], + device='cuda:0'), covar=tensor([0.1062, 0.1714, 0.0347, 0.2263, 0.1057, 0.0234, 0.0512, 0.0570], + device='cuda:0'), in_proj_covar=tensor([0.0288, 0.0316, 0.0286, 0.0311, 0.0300, 0.0264, 0.0404, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 23:23:01,325 INFO [train.py:901] (0/4) Epoch 19, batch 5200, loss[loss=0.2255, simple_loss=0.3013, pruned_loss=0.07482, over 8447.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2897, pruned_loss=0.06386, over 1613754.90 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:04,484 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.24 vs. limit=5.0 +2023-02-06 23:23:38,104 INFO [train.py:901] (0/4) Epoch 19, batch 5250, loss[loss=0.2752, simple_loss=0.3377, pruned_loss=0.1063, over 6692.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.29, pruned_loss=0.06438, over 1608841.66 frames. ], batch size: 71, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:40,628 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 23:23:42,647 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:23:43,367 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:01,531 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.565e+02 3.080e+02 4.191e+02 1.354e+03, threshold=6.160e+02, percent-clipped=9.0 +2023-02-06 23:24:10,883 INFO [train.py:901] (0/4) Epoch 19, batch 5300, loss[loss=0.2012, simple_loss=0.291, pruned_loss=0.05574, over 7821.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2908, pruned_loss=0.06485, over 1610157.02 frames. ], batch size: 20, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:24:21,007 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6326, 1.7357, 1.8007, 1.4307, 1.8810, 1.4735, 0.9551, 1.7466], + device='cuda:0'), covar=tensor([0.0403, 0.0323, 0.0183, 0.0363, 0.0318, 0.0544, 0.0622, 0.0196], + device='cuda:0'), in_proj_covar=tensor([0.0440, 0.0381, 0.0330, 0.0434, 0.0364, 0.0525, 0.0384, 0.0405], + device='cuda:0'), out_proj_covar=tensor([1.1926e-04, 1.0061e-04, 8.7090e-05, 1.1526e-04, 9.6612e-05, 1.4969e-04, + 1.0392e-04, 1.0818e-04], device='cuda:0') +2023-02-06 23:24:23,645 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:41,653 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:42,317 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7238, 1.8756, 1.6577, 2.3368, 1.1089, 1.4416, 1.7189, 1.9077], + device='cuda:0'), covar=tensor([0.0719, 0.0705, 0.0947, 0.0416, 0.0986, 0.1348, 0.0757, 0.0709], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0197, 0.0247, 0.0212, 0.0203, 0.0248, 0.0252, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 23:24:46,243 INFO [train.py:901] (0/4) Epoch 19, batch 5350, loss[loss=0.2672, simple_loss=0.3386, pruned_loss=0.09786, over 8693.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2928, pruned_loss=0.06613, over 1613748.86 frames. ], batch size: 39, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:25:10,958 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.499e+02 2.979e+02 3.723e+02 8.863e+02, threshold=5.959e+02, percent-clipped=1.0 +2023-02-06 23:25:20,520 INFO [train.py:901] (0/4) Epoch 19, batch 5400, loss[loss=0.1848, simple_loss=0.2668, pruned_loss=0.05145, over 7662.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2928, pruned_loss=0.06648, over 1616253.68 frames. ], batch size: 19, lr: 3.98e-03, grad_scale: 16.0 +2023-02-06 23:25:24,755 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:37,919 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:45,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8682, 1.3130, 4.0096, 1.4453, 3.5462, 3.3726, 3.6140, 3.5043], + device='cuda:0'), covar=tensor([0.0616, 0.4712, 0.0623, 0.4148, 0.1224, 0.1007, 0.0659, 0.0756], + device='cuda:0'), in_proj_covar=tensor([0.0598, 0.0634, 0.0669, 0.0608, 0.0685, 0.0587, 0.0589, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:25:55,465 INFO [train.py:901] (0/4) Epoch 19, batch 5450, loss[loss=0.1992, simple_loss=0.2653, pruned_loss=0.06653, over 7436.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2911, pruned_loss=0.06541, over 1609797.04 frames. ], batch size: 17, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:25:55,706 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8345, 2.0136, 1.7444, 2.5623, 1.2511, 1.4906, 1.8048, 2.0659], + device='cuda:0'), covar=tensor([0.0736, 0.0709, 0.0905, 0.0396, 0.1045, 0.1324, 0.0787, 0.0677], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0197, 0.0247, 0.0212, 0.0204, 0.0247, 0.0252, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 23:26:22,623 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.347e+02 2.658e+02 3.430e+02 7.604e+02, threshold=5.316e+02, percent-clipped=2.0 +2023-02-06 23:26:28,459 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 23:26:31,932 INFO [train.py:901] (0/4) Epoch 19, batch 5500, loss[loss=0.254, simple_loss=0.3224, pruned_loss=0.09278, over 7151.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2905, pruned_loss=0.06475, over 1611271.56 frames. ], batch size: 71, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:41,072 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1808, 1.0404, 1.3053, 1.0708, 0.9556, 1.3271, 0.0986, 0.9232], + device='cuda:0'), covar=tensor([0.1700, 0.1446, 0.0507, 0.0879, 0.2800, 0.0597, 0.2277, 0.1345], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0222, 0.0269, 0.0132, 0.0169, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:26:41,788 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:46,613 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:58,950 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:00,495 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-06 23:27:06,205 INFO [train.py:901] (0/4) Epoch 19, batch 5550, loss[loss=0.2292, simple_loss=0.3074, pruned_loss=0.07552, over 8116.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.291, pruned_loss=0.06504, over 1613143.91 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:32,496 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.454e+02 3.027e+02 4.195e+02 6.901e+02, threshold=6.054e+02, percent-clipped=7.0 +2023-02-06 23:27:42,410 INFO [train.py:901] (0/4) Epoch 19, batch 5600, loss[loss=0.1919, simple_loss=0.2805, pruned_loss=0.05167, over 8035.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2924, pruned_loss=0.06537, over 1616804.97 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:43,134 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:28:15,318 INFO [train.py:901] (0/4) Epoch 19, batch 5650, loss[loss=0.2407, simple_loss=0.3266, pruned_loss=0.0774, over 8459.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2939, pruned_loss=0.06641, over 1619977.97 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:31,571 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 23:28:39,668 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.685e+02 3.149e+02 3.866e+02 8.044e+02, threshold=6.298e+02, percent-clipped=3.0 +2023-02-06 23:28:50,396 INFO [train.py:901] (0/4) Epoch 19, batch 5700, loss[loss=0.192, simple_loss=0.2759, pruned_loss=0.05407, over 7644.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2944, pruned_loss=0.067, over 1616587.20 frames. ], batch size: 19, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:54,075 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7018, 2.3159, 3.3843, 1.7162, 1.7166, 3.3649, 0.5621, 1.9778], + device='cuda:0'), covar=tensor([0.1817, 0.1283, 0.0273, 0.2036, 0.3012, 0.0371, 0.2635, 0.1447], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0222, 0.0269, 0.0133, 0.0168, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:28:57,624 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:00,857 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9391, 6.1429, 5.1961, 3.2964, 5.2948, 5.7404, 5.6050, 5.5754], + device='cuda:0'), covar=tensor([0.0521, 0.0361, 0.1028, 0.3426, 0.0655, 0.0723, 0.1143, 0.0562], + device='cuda:0'), in_proj_covar=tensor([0.0515, 0.0427, 0.0426, 0.0526, 0.0414, 0.0431, 0.0409, 0.0371], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:29:02,310 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:06,541 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:29:24,755 INFO [train.py:901] (0/4) Epoch 19, batch 5750, loss[loss=0.1629, simple_loss=0.2493, pruned_loss=0.03825, over 7925.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2932, pruned_loss=0.06576, over 1618788.63 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:29:36,097 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 23:29:37,492 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:43,023 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:48,610 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.389e+02 2.918e+02 3.727e+02 7.769e+02, threshold=5.836e+02, percent-clipped=3.0 +2023-02-06 23:29:58,863 INFO [train.py:901] (0/4) Epoch 19, batch 5800, loss[loss=0.2187, simple_loss=0.296, pruned_loss=0.07074, over 7160.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2935, pruned_loss=0.06622, over 1619701.63 frames. ], batch size: 71, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:00,348 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:04,371 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:16,584 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:20,001 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1831, 1.2718, 4.3454, 1.5789, 3.8842, 3.6148, 3.9312, 3.8520], + device='cuda:0'), covar=tensor([0.0553, 0.4780, 0.0507, 0.4072, 0.1020, 0.0862, 0.0591, 0.0601], + device='cuda:0'), in_proj_covar=tensor([0.0600, 0.0634, 0.0673, 0.0608, 0.0685, 0.0586, 0.0590, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:30:24,044 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1077, 1.5756, 4.3253, 1.6532, 3.7667, 3.5935, 3.8852, 3.7857], + device='cuda:0'), covar=tensor([0.0741, 0.4472, 0.0544, 0.3823, 0.1230, 0.0912, 0.0650, 0.0713], + device='cuda:0'), in_proj_covar=tensor([0.0601, 0.0635, 0.0674, 0.0608, 0.0686, 0.0587, 0.0591, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:30:34,584 INFO [train.py:901] (0/4) Epoch 19, batch 5850, loss[loss=0.188, simple_loss=0.262, pruned_loss=0.05694, over 7783.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2929, pruned_loss=0.0659, over 1619749.84 frames. ], batch size: 19, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:57,557 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:58,661 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.176e+02 2.714e+02 3.221e+02 1.387e+03, threshold=5.429e+02, percent-clipped=3.0 +2023-02-06 23:31:08,074 INFO [train.py:901] (0/4) Epoch 19, batch 5900, loss[loss=0.1994, simple_loss=0.2798, pruned_loss=0.05949, over 7247.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2927, pruned_loss=0.06572, over 1618224.72 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:21,367 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2800, 1.5364, 1.5691, 1.1199, 1.6211, 1.2632, 0.2933, 1.4800], + device='cuda:0'), covar=tensor([0.0533, 0.0432, 0.0386, 0.0562, 0.0544, 0.0999, 0.0969, 0.0319], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0382, 0.0333, 0.0439, 0.0369, 0.0529, 0.0386, 0.0409], + device='cuda:0'), out_proj_covar=tensor([1.2022e-04, 1.0084e-04, 8.7799e-05, 1.1665e-04, 9.7842e-05, 1.5098e-04, + 1.0461e-04, 1.0906e-04], device='cuda:0') +2023-02-06 23:31:28,024 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6120, 1.9128, 2.1443, 1.4136, 2.2285, 1.3475, 0.6725, 1.8538], + device='cuda:0'), covar=tensor([0.0613, 0.0363, 0.0249, 0.0548, 0.0355, 0.1021, 0.0875, 0.0321], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0381, 0.0333, 0.0439, 0.0368, 0.0528, 0.0386, 0.0408], + device='cuda:0'), out_proj_covar=tensor([1.2010e-04, 1.0074e-04, 8.7786e-05, 1.1660e-04, 9.7655e-05, 1.5087e-04, + 1.0459e-04, 1.0892e-04], device='cuda:0') +2023-02-06 23:31:37,730 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.63 vs. limit=5.0 +2023-02-06 23:31:44,903 INFO [train.py:901] (0/4) Epoch 19, batch 5950, loss[loss=0.2057, simple_loss=0.2905, pruned_loss=0.06041, over 8449.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2924, pruned_loss=0.06498, over 1619087.44 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:59,882 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:09,190 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.424e+02 3.104e+02 3.851e+02 8.156e+02, threshold=6.208e+02, percent-clipped=3.0 +2023-02-06 23:32:16,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:18,548 INFO [train.py:901] (0/4) Epoch 19, batch 6000, loss[loss=0.1851, simple_loss=0.2615, pruned_loss=0.05436, over 7295.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2928, pruned_loss=0.06573, over 1616521.25 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:32:18,549 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 23:32:32,013 INFO [train.py:935] (0/4) Epoch 19, validation: loss=0.1763, simple_loss=0.2764, pruned_loss=0.03805, over 944034.00 frames. +2023-02-06 23:32:32,014 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 23:33:06,937 INFO [train.py:901] (0/4) Epoch 19, batch 6050, loss[loss=0.1678, simple_loss=0.2568, pruned_loss=0.03937, over 8024.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2922, pruned_loss=0.06538, over 1615781.19 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:09,100 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:33:32,578 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.535e+02 3.172e+02 3.888e+02 8.825e+02, threshold=6.343e+02, percent-clipped=4.0 +2023-02-06 23:33:42,766 INFO [train.py:901] (0/4) Epoch 19, batch 6100, loss[loss=0.2079, simple_loss=0.2908, pruned_loss=0.06249, over 8487.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.293, pruned_loss=0.06594, over 1614534.93 frames. ], batch size: 29, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:07,699 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 23:34:10,838 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:12,869 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6864, 1.4790, 4.8984, 1.9093, 4.3970, 4.0936, 4.4341, 4.3531], + device='cuda:0'), covar=tensor([0.0511, 0.4460, 0.0438, 0.3789, 0.0876, 0.0866, 0.0500, 0.0539], + device='cuda:0'), in_proj_covar=tensor([0.0599, 0.0635, 0.0678, 0.0611, 0.0685, 0.0588, 0.0591, 0.0652], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:34:17,588 INFO [train.py:901] (0/4) Epoch 19, batch 6150, loss[loss=0.1944, simple_loss=0.2851, pruned_loss=0.05191, over 8682.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2929, pruned_loss=0.06556, over 1615255.74 frames. ], batch size: 34, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:18,358 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:28,850 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,133 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151660.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,967 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:43,583 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.320e+02 2.846e+02 3.654e+02 5.745e+02, threshold=5.693e+02, percent-clipped=0.0 +2023-02-06 23:34:53,936 INFO [train.py:901] (0/4) Epoch 19, batch 6200, loss[loss=0.1792, simple_loss=0.2706, pruned_loss=0.0439, over 8086.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06469, over 1612446.69 frames. ], batch size: 21, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:02,690 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:10,161 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.12 vs. limit=5.0 +2023-02-06 23:35:28,542 INFO [train.py:901] (0/4) Epoch 19, batch 6250, loss[loss=0.1865, simple_loss=0.251, pruned_loss=0.06101, over 7192.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2911, pruned_loss=0.06455, over 1616353.33 frames. ], batch size: 16, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:39,357 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:50,918 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:53,500 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.555e+02 3.246e+02 4.070e+02 8.549e+02, threshold=6.492e+02, percent-clipped=6.0 +2023-02-06 23:36:03,715 INFO [train.py:901] (0/4) Epoch 19, batch 6300, loss[loss=0.231, simple_loss=0.3056, pruned_loss=0.0782, over 8245.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2908, pruned_loss=0.06454, over 1613145.65 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:22,196 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:36:36,291 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 23:36:39,097 INFO [train.py:901] (0/4) Epoch 19, batch 6350, loss[loss=0.2258, simple_loss=0.3094, pruned_loss=0.07107, over 8494.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2904, pruned_loss=0.06455, over 1611344.21 frames. ], batch size: 29, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:52,340 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.4866, 1.2475, 3.8011, 1.5056, 2.9353, 2.9142, 3.2852, 3.3027], + device='cuda:0'), covar=tensor([0.1420, 0.7243, 0.1528, 0.5495, 0.2631, 0.2265, 0.1467, 0.1416], + device='cuda:0'), in_proj_covar=tensor([0.0592, 0.0627, 0.0667, 0.0603, 0.0679, 0.0583, 0.0584, 0.0641], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:37:03,138 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.376e+02 2.921e+02 3.593e+02 6.855e+02, threshold=5.841e+02, percent-clipped=1.0 +2023-02-06 23:37:13,208 INFO [train.py:901] (0/4) Epoch 19, batch 6400, loss[loss=0.2086, simple_loss=0.2966, pruned_loss=0.06029, over 8512.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2915, pruned_loss=0.0652, over 1613561.24 frames. ], batch size: 29, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:17,661 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6354, 1.3980, 1.5961, 1.3166, 0.8619, 1.4400, 1.4637, 1.3583], + device='cuda:0'), covar=tensor([0.0586, 0.1276, 0.1717, 0.1462, 0.0632, 0.1498, 0.0714, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:37:30,618 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,044 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,519 INFO [train.py:901] (0/4) Epoch 19, batch 6450, loss[loss=0.1816, simple_loss=0.2707, pruned_loss=0.04618, over 8461.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2904, pruned_loss=0.06433, over 1614662.02 frames. ], batch size: 25, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:13,513 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.409e+02 2.943e+02 3.710e+02 6.232e+02, threshold=5.887e+02, percent-clipped=1.0 +2023-02-06 23:38:20,582 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2263, 1.4109, 1.6631, 1.3277, 1.0407, 1.4184, 1.8612, 1.6820], + device='cuda:0'), covar=tensor([0.0494, 0.1385, 0.1712, 0.1519, 0.0616, 0.1584, 0.0690, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0160, 0.0112, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:38:23,089 INFO [train.py:901] (0/4) Epoch 19, batch 6500, loss[loss=0.2065, simple_loss=0.2851, pruned_loss=0.06397, over 8438.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2899, pruned_loss=0.06427, over 1614866.27 frames. ], batch size: 27, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:27,747 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-152000.pt +2023-02-06 23:38:40,007 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:51,607 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:58,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152040.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:00,452 INFO [train.py:901] (0/4) Epoch 19, batch 6550, loss[loss=0.1712, simple_loss=0.2509, pruned_loss=0.0458, over 7698.00 frames. ], tot_loss[loss=0.208, simple_loss=0.289, pruned_loss=0.06347, over 1613098.80 frames. ], batch size: 18, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:04,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:09,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:21,603 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 23:39:24,915 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.379e+02 2.761e+02 3.695e+02 7.678e+02, threshold=5.522e+02, percent-clipped=3.0 +2023-02-06 23:39:34,324 INFO [train.py:901] (0/4) Epoch 19, batch 6600, loss[loss=0.2557, simple_loss=0.3261, pruned_loss=0.09261, over 8634.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2902, pruned_loss=0.06391, over 1616530.33 frames. ], batch size: 34, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:39,624 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:40:09,003 INFO [train.py:901] (0/4) Epoch 19, batch 6650, loss[loss=0.2328, simple_loss=0.3176, pruned_loss=0.07398, over 8760.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06403, over 1614765.92 frames. ], batch size: 30, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:23,461 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:24,942 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:34,177 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.686e+02 3.265e+02 3.895e+02 8.931e+02, threshold=6.531e+02, percent-clipped=7.0 +2023-02-06 23:40:44,520 INFO [train.py:901] (0/4) Epoch 19, batch 6700, loss[loss=0.1637, simple_loss=0.2346, pruned_loss=0.04644, over 6785.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.0635, over 1614739.55 frames. ], batch size: 15, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:46,809 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0845, 2.1868, 1.8681, 2.7789, 1.3423, 1.6151, 2.0062, 2.3547], + device='cuda:0'), covar=tensor([0.0735, 0.0809, 0.0907, 0.0419, 0.1099, 0.1396, 0.0868, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0198, 0.0249, 0.0213, 0.0206, 0.0250, 0.0254, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 23:41:01,636 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4020, 1.6864, 1.6668, 1.0381, 1.7440, 1.3710, 0.2503, 1.6350], + device='cuda:0'), covar=tensor([0.0432, 0.0317, 0.0309, 0.0471, 0.0382, 0.0888, 0.0769, 0.0240], + device='cuda:0'), in_proj_covar=tensor([0.0438, 0.0377, 0.0330, 0.0436, 0.0363, 0.0521, 0.0382, 0.0403], + device='cuda:0'), out_proj_covar=tensor([1.1851e-04, 9.9523e-05, 8.7250e-05, 1.1574e-04, 9.6159e-05, 1.4864e-04, + 1.0355e-04, 1.0741e-04], device='cuda:0') +2023-02-06 23:41:19,456 INFO [train.py:901] (0/4) Epoch 19, batch 6750, loss[loss=0.1971, simple_loss=0.2785, pruned_loss=0.05783, over 8237.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2912, pruned_loss=0.06433, over 1614707.24 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:19,594 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:40,040 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-06 23:41:41,437 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 23:41:44,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:45,124 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.378e+02 2.909e+02 3.491e+02 6.752e+02, threshold=5.817e+02, percent-clipped=2.0 +2023-02-06 23:41:53,495 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:54,063 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 23:41:54,748 INFO [train.py:901] (0/4) Epoch 19, batch 6800, loss[loss=0.2337, simple_loss=0.3117, pruned_loss=0.07779, over 8472.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.291, pruned_loss=0.06402, over 1615735.05 frames. ], batch size: 27, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:29,090 INFO [train.py:901] (0/4) Epoch 19, batch 6850, loss[loss=0.1958, simple_loss=0.2876, pruned_loss=0.05196, over 8328.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.0638, over 1612219.44 frames. ], batch size: 25, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:43,965 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 23:42:54,741 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.344e+02 3.012e+02 3.839e+02 8.073e+02, threshold=6.025e+02, percent-clipped=5.0 +2023-02-06 23:43:05,118 INFO [train.py:901] (0/4) Epoch 19, batch 6900, loss[loss=0.197, simple_loss=0.2865, pruned_loss=0.05378, over 8290.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06359, over 1613091.94 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:10,945 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9003, 1.6508, 2.0456, 1.7569, 1.9278, 1.9336, 1.7140, 0.7849], + device='cuda:0'), covar=tensor([0.5021, 0.4359, 0.1826, 0.3187, 0.2336, 0.2720, 0.1915, 0.4580], + device='cuda:0'), in_proj_covar=tensor([0.0927, 0.0956, 0.0783, 0.0918, 0.0977, 0.0872, 0.0738, 0.0812], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:43:25,518 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:29,729 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7199, 2.0754, 2.1952, 1.3839, 2.2712, 1.5467, 0.6487, 1.9349], + device='cuda:0'), covar=tensor([0.0477, 0.0294, 0.0260, 0.0534, 0.0349, 0.0819, 0.0757, 0.0283], + device='cuda:0'), in_proj_covar=tensor([0.0437, 0.0378, 0.0333, 0.0437, 0.0365, 0.0524, 0.0384, 0.0406], + device='cuda:0'), out_proj_covar=tensor([1.1828e-04, 9.9567e-05, 8.8043e-05, 1.1599e-04, 9.6806e-05, 1.4952e-04, + 1.0402e-04, 1.0828e-04], device='cuda:0') +2023-02-06 23:43:40,384 INFO [train.py:901] (0/4) Epoch 19, batch 6950, loss[loss=0.2364, simple_loss=0.3263, pruned_loss=0.07323, over 8094.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2919, pruned_loss=0.06439, over 1616169.68 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:42,596 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:53,786 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 23:43:53,933 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:05,258 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.443e+02 3.132e+02 3.706e+02 6.613e+02, threshold=6.264e+02, percent-clipped=2.0 +2023-02-06 23:44:14,623 INFO [train.py:901] (0/4) Epoch 19, batch 7000, loss[loss=0.2035, simple_loss=0.2851, pruned_loss=0.06093, over 8133.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2929, pruned_loss=0.06479, over 1620069.50 frames. ], batch size: 22, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:44:44,356 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:51,103 INFO [train.py:901] (0/4) Epoch 19, batch 7050, loss[loss=0.2101, simple_loss=0.2882, pruned_loss=0.066, over 7961.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.293, pruned_loss=0.06477, over 1619451.21 frames. ], batch size: 21, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:02,279 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:15,723 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.420e+02 2.800e+02 3.429e+02 5.549e+02, threshold=5.599e+02, percent-clipped=0.0 +2023-02-06 23:45:21,283 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:25,398 INFO [train.py:901] (0/4) Epoch 19, batch 7100, loss[loss=0.2448, simple_loss=0.3211, pruned_loss=0.08427, over 8569.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2925, pruned_loss=0.0649, over 1619922.13 frames. ], batch size: 31, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:30,673 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:35,438 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:56,366 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:00,031 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1218, 1.9135, 2.5473, 2.0367, 2.3394, 2.1532, 1.8618, 1.2238], + device='cuda:0'), covar=tensor([0.5144, 0.4450, 0.1573, 0.3078, 0.2304, 0.2827, 0.1889, 0.4674], + device='cuda:0'), in_proj_covar=tensor([0.0929, 0.0957, 0.0783, 0.0921, 0.0979, 0.0872, 0.0738, 0.0816], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:46:01,878 INFO [train.py:901] (0/4) Epoch 19, batch 7150, loss[loss=0.1923, simple_loss=0.2698, pruned_loss=0.05739, over 7810.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.292, pruned_loss=0.06451, over 1620153.12 frames. ], batch size: 20, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:27,177 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.485e+02 2.441e+02 2.885e+02 3.630e+02 1.043e+03, threshold=5.770e+02, percent-clipped=5.0 +2023-02-06 23:46:36,617 INFO [train.py:901] (0/4) Epoch 19, batch 7200, loss[loss=0.2267, simple_loss=0.299, pruned_loss=0.07716, over 8211.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06464, over 1616361.06 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:42,736 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:44,095 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6389, 1.6697, 2.0709, 1.4437, 1.2214, 2.0501, 0.3236, 1.3199], + device='cuda:0'), covar=tensor([0.1828, 0.1353, 0.0418, 0.1196, 0.2931, 0.0459, 0.2170, 0.1337], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0194, 0.0123, 0.0221, 0.0267, 0.0133, 0.0168, 0.0186], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:47:12,601 INFO [train.py:901] (0/4) Epoch 19, batch 7250, loss[loss=0.2292, simple_loss=0.3124, pruned_loss=0.073, over 8249.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2907, pruned_loss=0.06469, over 1615255.90 frames. ], batch size: 24, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:13,459 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:17,250 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 23:47:17,701 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:37,385 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.392e+02 2.877e+02 3.488e+02 7.359e+02, threshold=5.753e+02, percent-clipped=2.0 +2023-02-06 23:47:47,609 INFO [train.py:901] (0/4) Epoch 19, batch 7300, loss[loss=0.1788, simple_loss=0.2581, pruned_loss=0.04976, over 7422.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.06453, over 1611880.15 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:57,325 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:48:21,885 INFO [train.py:901] (0/4) Epoch 19, batch 7350, loss[loss=0.2541, simple_loss=0.3262, pruned_loss=0.09105, over 7929.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2902, pruned_loss=0.06476, over 1610776.97 frames. ], batch size: 20, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:48:41,691 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.56 vs. limit=5.0 +2023-02-06 23:48:46,710 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 23:48:48,162 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.571e+02 3.070e+02 4.184e+02 8.940e+02, threshold=6.140e+02, percent-clipped=8.0 +2023-02-06 23:48:58,050 INFO [train.py:901] (0/4) Epoch 19, batch 7400, loss[loss=0.2255, simple_loss=0.3047, pruned_loss=0.0731, over 8582.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2905, pruned_loss=0.06442, over 1615362.90 frames. ], batch size: 39, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:07,695 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 23:49:13,379 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3079, 2.0046, 4.4023, 2.1052, 2.4352, 5.0346, 5.1102, 4.3487], + device='cuda:0'), covar=tensor([0.1227, 0.1581, 0.0278, 0.1904, 0.1224, 0.0176, 0.0396, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0320, 0.0286, 0.0313, 0.0303, 0.0262, 0.0409, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:49:18,798 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:32,918 INFO [train.py:901] (0/4) Epoch 19, batch 7450, loss[loss=0.1719, simple_loss=0.2558, pruned_loss=0.04404, over 7556.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.06425, over 1613918.17 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:33,696 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:38,517 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:44,014 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:46,572 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 23:49:58,932 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.506e+02 3.079e+02 4.075e+02 8.166e+02, threshold=6.159e+02, percent-clipped=5.0 +2023-02-06 23:50:01,203 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:08,330 INFO [train.py:901] (0/4) Epoch 19, batch 7500, loss[loss=0.213, simple_loss=0.2832, pruned_loss=0.07136, over 7264.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2904, pruned_loss=0.06452, over 1609356.96 frames. ], batch size: 16, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:17,495 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:31,548 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5340, 2.5920, 1.9645, 2.2708, 2.1451, 1.6126, 2.0776, 2.1848], + device='cuda:0'), covar=tensor([0.1388, 0.0385, 0.1027, 0.0623, 0.0697, 0.1448, 0.0870, 0.0919], + device='cuda:0'), in_proj_covar=tensor([0.0345, 0.0232, 0.0324, 0.0300, 0.0295, 0.0327, 0.0338, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:50:34,833 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:42,929 INFO [train.py:901] (0/4) Epoch 19, batch 7550, loss[loss=0.2271, simple_loss=0.3024, pruned_loss=0.07594, over 8029.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.06414, over 1614112.67 frames. ], batch size: 22, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:47,919 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8630, 3.0895, 2.5378, 4.1577, 1.8722, 2.2263, 2.7231, 3.3364], + device='cuda:0'), covar=tensor([0.0638, 0.0806, 0.0805, 0.0240, 0.1058, 0.1191, 0.0893, 0.0724], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0211, 0.0203, 0.0246, 0.0250, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-06 23:50:53,917 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:58,707 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:58,948 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 23:51:08,489 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.431e+02 2.980e+02 3.688e+02 7.634e+02, threshold=5.960e+02, percent-clipped=2.0 +2023-02-06 23:51:14,080 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:18,073 INFO [train.py:901] (0/4) Epoch 19, batch 7600, loss[loss=0.2299, simple_loss=0.3136, pruned_loss=0.07315, over 8486.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06472, over 1611388.31 frames. ], batch size: 28, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:51:45,038 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2467, 1.1776, 1.4078, 1.1237, 0.7545, 1.2099, 1.2389, 1.0336], + device='cuda:0'), covar=tensor([0.0619, 0.1245, 0.1647, 0.1458, 0.0606, 0.1458, 0.0698, 0.0695], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0160, 0.0113, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:51:47,761 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:53,093 INFO [train.py:901] (0/4) Epoch 19, batch 7650, loss[loss=0.2182, simple_loss=0.296, pruned_loss=0.07023, over 8251.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2901, pruned_loss=0.06452, over 1611340.80 frames. ], batch size: 24, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:17,637 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:18,733 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.290e+02 2.780e+02 3.362e+02 7.829e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-06 23:52:28,387 INFO [train.py:901] (0/4) Epoch 19, batch 7700, loss[loss=0.1792, simple_loss=0.2626, pruned_loss=0.04785, over 7551.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2904, pruned_loss=0.06452, over 1611231.69 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:33,499 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3822, 1.9911, 4.1499, 1.3207, 2.7797, 1.9927, 1.4392, 2.5791], + device='cuda:0'), covar=tensor([0.2268, 0.2930, 0.0763, 0.5097, 0.2115, 0.3480, 0.2727, 0.2756], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0587, 0.0557, 0.0632, 0.0644, 0.0592, 0.0524, 0.0635], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:52:35,486 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:35,509 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:44,963 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2547, 1.1442, 3.3696, 0.9920, 2.9856, 2.8022, 3.0718, 2.9812], + device='cuda:0'), covar=tensor([0.0805, 0.4550, 0.0798, 0.4359, 0.1409, 0.1168, 0.0759, 0.0908], + device='cuda:0'), in_proj_covar=tensor([0.0600, 0.0630, 0.0673, 0.0605, 0.0687, 0.0589, 0.0586, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:52:57,462 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 23:53:02,813 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2536, 1.1589, 3.3633, 0.9961, 2.9553, 2.8176, 3.0605, 2.9532], + device='cuda:0'), covar=tensor([0.0820, 0.4305, 0.0858, 0.4317, 0.1410, 0.1109, 0.0748, 0.0918], + device='cuda:0'), in_proj_covar=tensor([0.0596, 0.0627, 0.0669, 0.0602, 0.0684, 0.0586, 0.0584, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:53:03,343 INFO [train.py:901] (0/4) Epoch 19, batch 7750, loss[loss=0.2592, simple_loss=0.3345, pruned_loss=0.09194, over 8515.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2922, pruned_loss=0.06488, over 1617876.88 frames. ], batch size: 26, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:15,051 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 23:53:18,415 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2415, 4.2108, 3.8228, 2.1936, 3.7574, 3.8388, 3.8264, 3.5857], + device='cuda:0'), covar=tensor([0.0762, 0.0585, 0.1168, 0.4332, 0.0930, 0.0885, 0.1362, 0.0860], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0429, 0.0429, 0.0536, 0.0422, 0.0434, 0.0416, 0.0375], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:53:28,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.456e+02 3.001e+02 3.725e+02 8.940e+02, threshold=6.003e+02, percent-clipped=11.0 +2023-02-06 23:53:37,743 INFO [train.py:901] (0/4) Epoch 19, batch 7800, loss[loss=0.2368, simple_loss=0.3242, pruned_loss=0.07472, over 8504.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2924, pruned_loss=0.06534, over 1613693.55 frames. ], batch size: 26, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:39,913 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:53,366 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:57,897 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9564, 3.7534, 3.4499, 2.5619, 3.4083, 3.4912, 3.5667, 3.2897], + device='cuda:0'), covar=tensor([0.0736, 0.0771, 0.1092, 0.3508, 0.0826, 0.1179, 0.1248, 0.1022], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0430, 0.0431, 0.0537, 0.0423, 0.0435, 0.0417, 0.0375], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:53:58,012 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:06,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6618, 2.1828, 4.0877, 1.4488, 3.0102, 2.1876, 1.7307, 2.9959], + device='cuda:0'), covar=tensor([0.1902, 0.2666, 0.0795, 0.4536, 0.1860, 0.3141, 0.2351, 0.2297], + device='cuda:0'), in_proj_covar=tensor([0.0516, 0.0584, 0.0555, 0.0629, 0.0641, 0.0589, 0.0522, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-06 23:54:09,673 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:11,465 INFO [train.py:901] (0/4) Epoch 19, batch 7850, loss[loss=0.1838, simple_loss=0.2671, pruned_loss=0.0503, over 7945.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2926, pruned_loss=0.06583, over 1608927.29 frames. ], batch size: 20, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:54:14,371 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:36,623 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.457e+02 2.874e+02 3.581e+02 1.670e+03, threshold=5.749e+02, percent-clipped=9.0 +2023-02-06 23:54:44,304 INFO [train.py:901] (0/4) Epoch 19, batch 7900, loss[loss=0.2354, simple_loss=0.3232, pruned_loss=0.07384, over 8488.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2902, pruned_loss=0.06452, over 1604882.91 frames. ], batch size: 29, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:15,521 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:17,980 INFO [train.py:901] (0/4) Epoch 19, batch 7950, loss[loss=0.2561, simple_loss=0.3246, pruned_loss=0.09386, over 7096.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2907, pruned_loss=0.06449, over 1607443.04 frames. ], batch size: 71, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:28,805 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:37,525 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3447, 1.9488, 2.5490, 2.0919, 2.4640, 2.3329, 2.1518, 1.3328], + device='cuda:0'), covar=tensor([0.5001, 0.4694, 0.1913, 0.3811, 0.2496, 0.2731, 0.1662, 0.5079], + device='cuda:0'), in_proj_covar=tensor([0.0926, 0.0955, 0.0782, 0.0922, 0.0981, 0.0867, 0.0735, 0.0809], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-06 23:55:41,874 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:43,178 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.434e+02 3.034e+02 3.983e+02 8.510e+02, threshold=6.068e+02, percent-clipped=6.0 +2023-02-06 23:55:45,334 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:51,001 INFO [train.py:901] (0/4) Epoch 19, batch 8000, loss[loss=0.2342, simple_loss=0.332, pruned_loss=0.06817, over 8340.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.291, pruned_loss=0.06485, over 1607962.26 frames. ], batch size: 25, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:17,602 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:56:25,144 INFO [train.py:901] (0/4) Epoch 19, batch 8050, loss[loss=0.1901, simple_loss=0.2707, pruned_loss=0.05477, over 7519.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2896, pruned_loss=0.06424, over 1602488.85 frames. ], batch size: 18, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:48,472 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-19.pt +2023-02-06 23:57:01,097 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 23:57:04,930 INFO [train.py:901] (0/4) Epoch 20, batch 0, loss[loss=0.2063, simple_loss=0.2879, pruned_loss=0.06231, over 8075.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2879, pruned_loss=0.06231, over 8075.00 frames. ], batch size: 21, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:57:04,931 INFO [train.py:926] (0/4) Computing validation loss +2023-02-06 23:57:16,945 INFO [train.py:935] (0/4) Epoch 20, validation: loss=0.1757, simple_loss=0.276, pruned_loss=0.03766, over 944034.00 frames. +2023-02-06 23:57:16,946 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-06 23:57:20,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.577e+02 3.496e+02 4.495e+02 1.164e+03, threshold=6.992e+02, percent-clipped=12.0 +2023-02-06 23:57:29,439 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6436, 1.2447, 1.5287, 1.2110, 0.8722, 1.2821, 1.5529, 1.5428], + device='cuda:0'), covar=tensor([0.0611, 0.1841, 0.2539, 0.1964, 0.0696, 0.2110, 0.0822, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0158, 0.0100, 0.0161, 0.0113, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-06 23:57:29,447 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:57:31,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 23:57:33,665 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2838, 2.0904, 3.1185, 1.9157, 2.6341, 3.4413, 3.3388, 3.1023], + device='cuda:0'), covar=tensor([0.0944, 0.1325, 0.0552, 0.1682, 0.1200, 0.0226, 0.0628, 0.0437], + device='cuda:0'), in_proj_covar=tensor([0.0288, 0.0317, 0.0284, 0.0310, 0.0300, 0.0260, 0.0403, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-06 23:57:41,242 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3395, 2.4129, 1.7375, 2.0638, 2.0123, 1.4812, 1.7514, 1.8841], + device='cuda:0'), covar=tensor([0.1336, 0.0368, 0.1078, 0.0611, 0.0650, 0.1419, 0.0950, 0.0873], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0235, 0.0330, 0.0305, 0.0299, 0.0332, 0.0343, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-06 23:57:51,326 INFO [train.py:901] (0/4) Epoch 20, batch 50, loss[loss=0.1908, simple_loss=0.2822, pruned_loss=0.04968, over 8310.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2957, pruned_loss=0.06669, over 368384.43 frames. ], batch size: 25, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:01,101 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:58:06,570 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 23:58:27,836 INFO [train.py:901] (0/4) Epoch 20, batch 100, loss[loss=0.2435, simple_loss=0.3187, pruned_loss=0.08412, over 8465.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2938, pruned_loss=0.06616, over 651407.07 frames. ], batch size: 29, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:29,238 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 23:58:31,357 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.446e+02 2.844e+02 3.351e+02 7.473e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-06 23:59:03,135 INFO [train.py:901] (0/4) Epoch 20, batch 150, loss[loss=0.1716, simple_loss=0.26, pruned_loss=0.04164, over 8088.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2924, pruned_loss=0.06491, over 863181.44 frames. ], batch size: 21, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:23,316 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:59:26,114 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7845, 1.8053, 2.3207, 1.6076, 1.3618, 2.3303, 0.5246, 1.4482], + device='cuda:0'), covar=tensor([0.1728, 0.1183, 0.0303, 0.1178, 0.2819, 0.0388, 0.2148, 0.1302], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0221, 0.0270, 0.0133, 0.0169, 0.0186], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-06 23:59:39,298 INFO [train.py:901] (0/4) Epoch 20, batch 200, loss[loss=0.1828, simple_loss=0.2599, pruned_loss=0.05284, over 7555.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2918, pruned_loss=0.06498, over 1027477.45 frames. ], batch size: 18, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:42,412 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-06 23:59:42,527 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.177e+02 2.784e+02 3.416e+02 8.818e+02, threshold=5.569e+02, percent-clipped=1.0 +2023-02-06 23:59:43,904 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:15,038 INFO [train.py:901] (0/4) Epoch 20, batch 250, loss[loss=0.1665, simple_loss=0.2449, pruned_loss=0.04406, over 7703.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.06537, over 1151943.89 frames. ], batch size: 18, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:26,531 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 00:00:31,606 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:34,729 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 00:00:48,269 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:48,764 INFO [train.py:901] (0/4) Epoch 20, batch 300, loss[loss=0.1997, simple_loss=0.2775, pruned_loss=0.06096, over 7697.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2936, pruned_loss=0.06617, over 1256625.10 frames. ], batch size: 18, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:51,992 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.425e+02 2.846e+02 3.739e+02 1.062e+03, threshold=5.691e+02, percent-clipped=2.0 +2023-02-07 00:01:05,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:24,553 INFO [train.py:901] (0/4) Epoch 20, batch 350, loss[loss=0.2184, simple_loss=0.3032, pruned_loss=0.06685, over 8029.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2924, pruned_loss=0.06553, over 1337805.14 frames. ], batch size: 22, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:01:35,765 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153941.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:36,640 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 00:01:59,284 INFO [train.py:901] (0/4) Epoch 20, batch 400, loss[loss=0.1663, simple_loss=0.2466, pruned_loss=0.04298, over 7698.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.0661, over 1399324.47 frames. ], batch size: 18, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:02,806 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 2.483e+02 2.937e+02 3.652e+02 9.410e+02, threshold=5.874e+02, percent-clipped=4.0 +2023-02-07 00:02:15,726 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-154000.pt +2023-02-07 00:02:22,964 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 00:02:25,630 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:02:36,314 INFO [train.py:901] (0/4) Epoch 20, batch 450, loss[loss=0.2205, simple_loss=0.3032, pruned_loss=0.06888, over 8743.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2926, pruned_loss=0.06519, over 1449912.76 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:44,055 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154036.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:05,762 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154067.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:03:11,785 INFO [train.py:901] (0/4) Epoch 20, batch 500, loss[loss=0.1899, simple_loss=0.2832, pruned_loss=0.04829, over 8641.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2907, pruned_loss=0.06427, over 1482364.45 frames. ], batch size: 49, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:15,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 2.274e+02 2.685e+02 3.204e+02 7.760e+02, threshold=5.371e+02, percent-clipped=3.0 +2023-02-07 00:03:24,422 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:29,957 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:46,381 INFO [train.py:901] (0/4) Epoch 20, batch 550, loss[loss=0.1458, simple_loss=0.2215, pruned_loss=0.03505, over 7430.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2904, pruned_loss=0.06376, over 1509865.49 frames. ], batch size: 17, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:49,725 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 00:03:59,234 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-07 00:04:07,825 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:23,276 INFO [train.py:901] (0/4) Epoch 20, batch 600, loss[loss=0.2074, simple_loss=0.2775, pruned_loss=0.06866, over 7979.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2892, pruned_loss=0.06324, over 1530989.74 frames. ], batch size: 21, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:25,537 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:26,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.477e+02 2.962e+02 3.836e+02 8.919e+02, threshold=5.925e+02, percent-clipped=6.0 +2023-02-07 00:04:45,282 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 00:04:57,538 INFO [train.py:901] (0/4) Epoch 20, batch 650, loss[loss=0.2198, simple_loss=0.3113, pruned_loss=0.06415, over 8636.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2881, pruned_loss=0.06258, over 1547801.60 frames. ], batch size: 34, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:06,548 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:08,022 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7665, 1.9825, 1.7211, 2.2721, 1.0126, 1.4598, 1.6888, 1.9418], + device='cuda:0'), covar=tensor([0.0772, 0.0761, 0.0915, 0.0403, 0.1026, 0.1346, 0.0704, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0247, 0.0209, 0.0204, 0.0248, 0.0249, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 00:05:34,061 INFO [train.py:901] (0/4) Epoch 20, batch 700, loss[loss=0.2015, simple_loss=0.2754, pruned_loss=0.06378, over 7702.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2886, pruned_loss=0.06272, over 1561619.17 frames. ], batch size: 18, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:37,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.357e+02 2.958e+02 3.586e+02 6.466e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-07 00:05:38,980 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0434, 2.3949, 1.8126, 2.8456, 1.4355, 1.6614, 2.0417, 2.3804], + device='cuda:0'), covar=tensor([0.0688, 0.0675, 0.0865, 0.0340, 0.1025, 0.1259, 0.0802, 0.0704], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0196, 0.0247, 0.0209, 0.0205, 0.0248, 0.0249, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 00:05:40,210 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154285.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:54,018 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:57,527 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:59,511 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5152, 1.5150, 1.8699, 1.3354, 1.2406, 1.8268, 0.1879, 1.1844], + device='cuda:0'), covar=tensor([0.1921, 0.1282, 0.0409, 0.0925, 0.2728, 0.0459, 0.2180, 0.1250], + device='cuda:0'), in_proj_covar=tensor([0.0188, 0.0195, 0.0124, 0.0222, 0.0270, 0.0135, 0.0170, 0.0187], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 00:06:08,842 INFO [train.py:901] (0/4) Epoch 20, batch 750, loss[loss=0.2002, simple_loss=0.2692, pruned_loss=0.06565, over 7261.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2902, pruned_loss=0.0637, over 1574353.02 frames. ], batch size: 16, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:11,930 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:06:28,505 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154355.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:06:33,652 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 00:06:43,003 INFO [train.py:901] (0/4) Epoch 20, batch 800, loss[loss=0.2084, simple_loss=0.2996, pruned_loss=0.05856, over 8339.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06394, over 1582809.60 frames. ], batch size: 26, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:43,012 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 00:06:47,166 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.337e+02 2.441e+02 3.052e+02 3.711e+02 8.675e+02, threshold=6.104e+02, percent-clipped=3.0 +2023-02-07 00:07:01,172 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:08,310 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154411.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:07:19,178 INFO [train.py:901] (0/4) Epoch 20, batch 850, loss[loss=0.2116, simple_loss=0.3039, pruned_loss=0.05968, over 8105.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2902, pruned_loss=0.06352, over 1594480.15 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:27,220 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:32,649 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:52,873 INFO [train.py:901] (0/4) Epoch 20, batch 900, loss[loss=0.2021, simple_loss=0.2874, pruned_loss=0.0584, over 8246.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2894, pruned_loss=0.0636, over 1599857.08 frames. ], batch size: 22, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:56,205 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.439e+02 2.923e+02 3.686e+02 1.072e+03, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 00:07:57,767 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0140, 2.2996, 1.8502, 2.7121, 1.2382, 1.6464, 1.9062, 2.2436], + device='cuda:0'), covar=tensor([0.0700, 0.0679, 0.0915, 0.0392, 0.1151, 0.1313, 0.0858, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0195, 0.0246, 0.0209, 0.0205, 0.0248, 0.0249, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 00:07:59,808 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3326, 1.7397, 2.6804, 1.2157, 1.9424, 1.7336, 1.4941, 1.8711], + device='cuda:0'), covar=tensor([0.2144, 0.2577, 0.0913, 0.4893, 0.2031, 0.3439, 0.2474, 0.2376], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0586, 0.0553, 0.0631, 0.0639, 0.0587, 0.0524, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:08:04,389 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:29,164 INFO [train.py:901] (0/4) Epoch 20, batch 950, loss[loss=0.1994, simple_loss=0.2906, pruned_loss=0.05409, over 8330.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2897, pruned_loss=0.06315, over 1605558.02 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:08:29,367 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154526.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:08:48,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154553.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:54,159 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:00,273 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9344, 1.4968, 3.2146, 1.3488, 2.2102, 3.5106, 3.6780, 3.0002], + device='cuda:0'), covar=tensor([0.1179, 0.1899, 0.0354, 0.2286, 0.1170, 0.0262, 0.0514, 0.0597], + device='cuda:0'), in_proj_covar=tensor([0.0288, 0.0318, 0.0286, 0.0310, 0.0302, 0.0259, 0.0403, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:09:01,512 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 00:09:04,232 INFO [train.py:901] (0/4) Epoch 20, batch 1000, loss[loss=0.2558, simple_loss=0.331, pruned_loss=0.09032, over 8194.00 frames. ], tot_loss[loss=0.209, simple_loss=0.291, pruned_loss=0.06352, over 1608062.59 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:07,493 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.511e+02 3.044e+02 3.807e+02 8.767e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 00:09:08,974 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:35,154 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 00:09:38,959 INFO [train.py:901] (0/4) Epoch 20, batch 1050, loss[loss=0.1856, simple_loss=0.2861, pruned_loss=0.04259, over 8255.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2917, pruned_loss=0.06389, over 1610951.41 frames. ], batch size: 24, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:49,451 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 00:09:52,626 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.74 vs. limit=2.0 +2023-02-07 00:09:53,684 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:54,889 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:59,113 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:01,431 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154656.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:14,713 INFO [train.py:901] (0/4) Epoch 20, batch 1100, loss[loss=0.2023, simple_loss=0.2884, pruned_loss=0.05812, over 8532.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2908, pruned_loss=0.06382, over 1616637.04 frames. ], batch size: 28, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:10:18,094 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.486e+02 3.103e+02 3.988e+02 8.246e+02, threshold=6.206e+02, percent-clipped=6.0 +2023-02-07 00:10:18,318 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154681.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:29,739 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:30,329 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154699.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:10:45,302 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:10:48,864 INFO [train.py:901] (0/4) Epoch 20, batch 1150, loss[loss=0.1991, simple_loss=0.2859, pruned_loss=0.05616, over 8188.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2913, pruned_loss=0.06366, over 1620663.27 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:10:57,846 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154738.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:59,083 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 00:11:16,273 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:19,770 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:21,236 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7077, 2.0835, 3.2823, 1.5336, 2.4451, 2.0714, 1.7908, 2.4556], + device='cuda:0'), covar=tensor([0.1736, 0.2436, 0.0718, 0.4179, 0.1605, 0.3044, 0.2089, 0.2011], + device='cuda:0'), in_proj_covar=tensor([0.0521, 0.0586, 0.0553, 0.0631, 0.0642, 0.0589, 0.0524, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:11:25,033 INFO [train.py:901] (0/4) Epoch 20, batch 1200, loss[loss=0.2334, simple_loss=0.318, pruned_loss=0.07438, over 8541.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2922, pruned_loss=0.06457, over 1622212.63 frames. ], batch size: 39, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:11:28,383 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.412e+02 2.746e+02 3.577e+02 9.067e+02, threshold=5.492e+02, percent-clipped=2.0 +2023-02-07 00:11:29,318 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154782.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:46,342 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154807.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:47,772 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154809.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:48,781 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.83 vs. limit=5.0 +2023-02-07 00:11:51,180 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154814.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:11:53,298 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154817.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:59,125 INFO [train.py:901] (0/4) Epoch 20, batch 1250, loss[loss=0.1593, simple_loss=0.2376, pruned_loss=0.04053, over 6807.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2916, pruned_loss=0.06463, over 1618094.09 frames. ], batch size: 15, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:05,340 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154834.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:06,466 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:11,369 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:34,986 INFO [train.py:901] (0/4) Epoch 20, batch 1300, loss[loss=0.2141, simple_loss=0.3026, pruned_loss=0.06279, over 8289.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2921, pruned_loss=0.06441, over 1619964.76 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:38,324 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.433e+02 3.191e+02 3.995e+02 7.235e+02, threshold=6.381e+02, percent-clipped=6.0 +2023-02-07 00:13:09,376 INFO [train.py:901] (0/4) Epoch 20, batch 1350, loss[loss=0.2329, simple_loss=0.3144, pruned_loss=0.07569, over 8557.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2931, pruned_loss=0.06504, over 1621313.07 frames. ], batch size: 49, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:27,094 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:29,046 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:44,745 INFO [train.py:901] (0/4) Epoch 20, batch 1400, loss[loss=0.1961, simple_loss=0.2791, pruned_loss=0.05657, over 8096.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2919, pruned_loss=0.06462, over 1616210.60 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:47,792 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:48,963 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.419e+02 2.969e+02 3.620e+02 8.609e+02, threshold=5.938e+02, percent-clipped=3.0 +2023-02-07 00:13:55,291 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:58,156 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:16,395 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:19,593 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:20,739 INFO [train.py:901] (0/4) Epoch 20, batch 1450, loss[loss=0.2138, simple_loss=0.3018, pruned_loss=0.06285, over 8254.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2931, pruned_loss=0.06542, over 1618245.23 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:29,164 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 00:14:33,294 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:36,494 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:50,888 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155070.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:14:55,240 INFO [train.py:901] (0/4) Epoch 20, batch 1500, loss[loss=0.2019, simple_loss=0.2855, pruned_loss=0.05912, over 7920.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2943, pruned_loss=0.06637, over 1619609.80 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:58,575 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.482e+02 3.072e+02 3.822e+02 6.990e+02, threshold=6.143e+02, percent-clipped=2.0 +2023-02-07 00:14:59,295 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:02,790 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:09,000 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155095.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:15:09,715 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.20 vs. limit=5.0 +2023-02-07 00:15:15,587 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:30,480 INFO [train.py:901] (0/4) Epoch 20, batch 1550, loss[loss=0.2476, simple_loss=0.3176, pruned_loss=0.08876, over 7020.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2936, pruned_loss=0.06521, over 1621279.68 frames. ], batch size: 71, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:15:36,919 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1281, 1.5307, 1.7518, 1.3972, 0.8846, 1.4671, 1.7547, 1.6482], + device='cuda:0'), covar=tensor([0.0529, 0.1242, 0.1586, 0.1414, 0.0634, 0.1470, 0.0682, 0.0618], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0100, 0.0160, 0.0111, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:16:04,725 INFO [train.py:901] (0/4) Epoch 20, batch 1600, loss[loss=0.1556, simple_loss=0.2358, pruned_loss=0.03768, over 6791.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2927, pruned_loss=0.0649, over 1622481.24 frames. ], batch size: 15, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:08,764 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.295e+02 2.863e+02 3.431e+02 6.352e+02, threshold=5.726e+02, percent-clipped=1.0 +2023-02-07 00:16:20,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:27,181 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:40,713 INFO [train.py:901] (0/4) Epoch 20, batch 1650, loss[loss=0.1877, simple_loss=0.2724, pruned_loss=0.0515, over 8470.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2905, pruned_loss=0.06406, over 1619968.03 frames. ], batch size: 49, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:45,129 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:17:15,947 INFO [train.py:901] (0/4) Epoch 20, batch 1700, loss[loss=0.1754, simple_loss=0.2491, pruned_loss=0.0509, over 7713.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06379, over 1622867.31 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:17:19,372 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.383e+02 2.759e+02 3.259e+02 7.427e+02, threshold=5.517e+02, percent-clipped=3.0 +2023-02-07 00:17:20,951 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4055, 1.7631, 1.8386, 1.1019, 1.8887, 1.3039, 0.4277, 1.6441], + device='cuda:0'), covar=tensor([0.0623, 0.0395, 0.0293, 0.0608, 0.0479, 0.0929, 0.0861, 0.0292], + device='cuda:0'), in_proj_covar=tensor([0.0443, 0.0384, 0.0338, 0.0442, 0.0370, 0.0529, 0.0388, 0.0409], + device='cuda:0'), out_proj_covar=tensor([1.1964e-04, 1.0103e-04, 8.9166e-05, 1.1711e-04, 9.7986e-05, 1.5067e-04, + 1.0512e-04, 1.0900e-04], device='cuda:0') +2023-02-07 00:17:44,035 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0210, 1.4765, 1.6146, 1.3046, 0.9668, 1.3534, 1.7860, 1.4497], + device='cuda:0'), covar=tensor([0.0514, 0.1250, 0.1691, 0.1456, 0.0596, 0.1544, 0.0678, 0.0665], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:17:51,286 INFO [train.py:901] (0/4) Epoch 20, batch 1750, loss[loss=0.2175, simple_loss=0.3076, pruned_loss=0.06372, over 8463.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2912, pruned_loss=0.06437, over 1621185.23 frames. ], batch size: 25, lr: 3.82e-03, grad_scale: 16.0 +2023-02-07 00:18:00,370 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:11,541 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7215, 2.6748, 1.8118, 2.4149, 2.3449, 1.5688, 2.1496, 2.2741], + device='cuda:0'), covar=tensor([0.1453, 0.0408, 0.1270, 0.0639, 0.0642, 0.1533, 0.1014, 0.1001], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0235, 0.0330, 0.0303, 0.0300, 0.0335, 0.0343, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:18:17,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:27,005 INFO [train.py:901] (0/4) Epoch 20, batch 1800, loss[loss=0.173, simple_loss=0.2694, pruned_loss=0.03831, over 7919.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.06361, over 1618226.16 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:18:31,095 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.586e+02 2.965e+02 3.772e+02 7.314e+02, threshold=5.929e+02, percent-clipped=8.0 +2023-02-07 00:18:34,029 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:01,127 INFO [train.py:901] (0/4) Epoch 20, batch 1850, loss[loss=0.2063, simple_loss=0.2958, pruned_loss=0.05838, over 8252.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2876, pruned_loss=0.06255, over 1614232.56 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:04,531 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,143 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,187 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:36,697 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155475.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:37,256 INFO [train.py:901] (0/4) Epoch 20, batch 1900, loss[loss=0.2071, simple_loss=0.2851, pruned_loss=0.06454, over 8255.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2877, pruned_loss=0.06231, over 1615236.93 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:38,771 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:41,333 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.441e+02 2.899e+02 3.473e+02 6.405e+02, threshold=5.799e+02, percent-clipped=1.0 +2023-02-07 00:20:11,843 INFO [train.py:901] (0/4) Epoch 20, batch 1950, loss[loss=0.18, simple_loss=0.277, pruned_loss=0.04152, over 8245.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2887, pruned_loss=0.06289, over 1615294.18 frames. ], batch size: 24, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:13,323 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 00:20:19,618 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7449, 1.5768, 1.7352, 1.4359, 0.8596, 1.5957, 1.6222, 1.4224], + device='cuda:0'), covar=tensor([0.0530, 0.1170, 0.1585, 0.1335, 0.0583, 0.1348, 0.0670, 0.0617], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:20:26,408 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:20:26,930 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 00:20:46,989 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 00:20:47,681 INFO [train.py:901] (0/4) Epoch 20, batch 2000, loss[loss=0.2095, simple_loss=0.3038, pruned_loss=0.0576, over 8508.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2897, pruned_loss=0.06343, over 1615523.64 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:51,755 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.363e+02 2.911e+02 3.881e+02 1.027e+03, threshold=5.822e+02, percent-clipped=2.0 +2023-02-07 00:21:20,964 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:22,881 INFO [train.py:901] (0/4) Epoch 20, batch 2050, loss[loss=0.2028, simple_loss=0.278, pruned_loss=0.06383, over 8361.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2893, pruned_loss=0.06338, over 1613353.10 frames. ], batch size: 24, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:21:40,403 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2922, 2.1207, 1.6088, 1.9675, 1.8263, 1.3708, 1.6254, 1.7208], + device='cuda:0'), covar=tensor([0.1250, 0.0425, 0.1322, 0.0552, 0.0646, 0.1561, 0.0916, 0.0944], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0236, 0.0331, 0.0303, 0.0299, 0.0334, 0.0342, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:21:57,546 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:58,085 INFO [train.py:901] (0/4) Epoch 20, batch 2100, loss[loss=0.2276, simple_loss=0.3144, pruned_loss=0.07037, over 8348.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.29, pruned_loss=0.06373, over 1612059.19 frames. ], batch size: 24, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:02,096 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.564e+02 2.968e+02 3.686e+02 8.256e+02, threshold=5.935e+02, percent-clipped=7.0 +2023-02-07 00:22:20,231 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 00:22:22,190 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:22:33,489 INFO [train.py:901] (0/4) Epoch 20, batch 2150, loss[loss=0.1993, simple_loss=0.2846, pruned_loss=0.05695, over 8290.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06399, over 1614428.36 frames. ], batch size: 23, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:39,018 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:07,311 INFO [train.py:901] (0/4) Epoch 20, batch 2200, loss[loss=0.2176, simple_loss=0.3054, pruned_loss=0.06489, over 8194.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2914, pruned_loss=0.06464, over 1612914.96 frames. ], batch size: 23, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:08,250 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9677, 1.6184, 3.4761, 1.6065, 2.3960, 3.8386, 3.9413, 3.3048], + device='cuda:0'), covar=tensor([0.1214, 0.1794, 0.0341, 0.2056, 0.1020, 0.0226, 0.0527, 0.0566], + device='cuda:0'), in_proj_covar=tensor([0.0289, 0.0319, 0.0285, 0.0311, 0.0301, 0.0261, 0.0407, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 00:23:12,093 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.519e+02 2.939e+02 3.787e+02 7.175e+02, threshold=5.878e+02, percent-clipped=4.0 +2023-02-07 00:23:26,030 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:26,111 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:36,456 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1658, 1.6610, 4.4230, 1.9412, 2.5171, 4.9800, 5.0687, 4.3189], + device='cuda:0'), covar=tensor([0.1277, 0.1852, 0.0277, 0.2072, 0.1124, 0.0184, 0.0426, 0.0537], + device='cuda:0'), in_proj_covar=tensor([0.0289, 0.0320, 0.0285, 0.0312, 0.0301, 0.0261, 0.0407, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 00:23:38,410 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155819.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:43,090 INFO [train.py:901] (0/4) Epoch 20, batch 2250, loss[loss=0.2102, simple_loss=0.3021, pruned_loss=0.05915, over 8498.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2916, pruned_loss=0.06502, over 1613356.31 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:44,827 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:24:17,857 INFO [train.py:901] (0/4) Epoch 20, batch 2300, loss[loss=0.2267, simple_loss=0.3006, pruned_loss=0.07636, over 7985.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2922, pruned_loss=0.06573, over 1609459.14 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:24:21,974 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.500e+02 2.966e+02 3.753e+02 6.656e+02, threshold=5.933e+02, percent-clipped=3.0 +2023-02-07 00:24:54,606 INFO [train.py:901] (0/4) Epoch 20, batch 2350, loss[loss=0.1632, simple_loss=0.2496, pruned_loss=0.03836, over 7707.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2912, pruned_loss=0.06506, over 1608530.23 frames. ], batch size: 18, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:00,019 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:02,317 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.97 vs. limit=2.0 +2023-02-07 00:25:07,085 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9418, 1.1948, 1.1976, 0.5830, 1.2243, 1.0089, 0.0562, 1.1463], + device='cuda:0'), covar=tensor([0.0500, 0.0388, 0.0356, 0.0589, 0.0406, 0.0976, 0.0865, 0.0307], + device='cuda:0'), in_proj_covar=tensor([0.0444, 0.0382, 0.0336, 0.0440, 0.0368, 0.0528, 0.0390, 0.0409], + device='cuda:0'), out_proj_covar=tensor([1.1971e-04, 1.0045e-04, 8.8696e-05, 1.1652e-04, 9.7482e-05, 1.5041e-04, + 1.0555e-04, 1.0907e-04], device='cuda:0') +2023-02-07 00:25:11,811 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4921, 1.7526, 2.5902, 1.4200, 1.9086, 1.8339, 1.5581, 1.9176], + device='cuda:0'), covar=tensor([0.1931, 0.2501, 0.0911, 0.4345, 0.1871, 0.3174, 0.2231, 0.2167], + device='cuda:0'), in_proj_covar=tensor([0.0515, 0.0585, 0.0550, 0.0628, 0.0637, 0.0586, 0.0521, 0.0625], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:25:23,244 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155967.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:29,300 INFO [train.py:901] (0/4) Epoch 20, batch 2400, loss[loss=0.1761, simple_loss=0.2547, pruned_loss=0.04872, over 7684.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2915, pruned_loss=0.06463, over 1615421.03 frames. ], batch size: 18, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:33,216 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.377e+02 2.729e+02 3.502e+02 6.388e+02, threshold=5.458e+02, percent-clipped=1.0 +2023-02-07 00:25:45,459 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-156000.pt +2023-02-07 00:26:00,990 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:05,604 INFO [train.py:901] (0/4) Epoch 20, batch 2450, loss[loss=0.1569, simple_loss=0.2382, pruned_loss=0.03778, over 7231.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2916, pruned_loss=0.06488, over 1614495.69 frames. ], batch size: 16, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:40,965 INFO [train.py:901] (0/4) Epoch 20, batch 2500, loss[loss=0.2205, simple_loss=0.3072, pruned_loss=0.06685, over 8545.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.292, pruned_loss=0.06508, over 1617749.16 frames. ], batch size: 49, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:45,023 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.463e+02 3.105e+02 3.826e+02 1.382e+03, threshold=6.210e+02, percent-clipped=11.0 +2023-02-07 00:26:45,218 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:49,177 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156088.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:49,234 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3052, 3.7577, 2.4197, 3.1909, 3.1378, 2.1922, 2.9806, 3.3235], + device='cuda:0'), covar=tensor([0.1571, 0.0347, 0.1083, 0.0627, 0.0675, 0.1354, 0.1047, 0.0850], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0235, 0.0329, 0.0305, 0.0297, 0.0334, 0.0340, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:27:15,806 INFO [train.py:901] (0/4) Epoch 20, batch 2550, loss[loss=0.2092, simple_loss=0.282, pruned_loss=0.06819, over 8077.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2909, pruned_loss=0.06453, over 1611760.01 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:21,370 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:29,834 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:50,749 INFO [train.py:901] (0/4) Epoch 20, batch 2600, loss[loss=0.1921, simple_loss=0.2652, pruned_loss=0.05955, over 7292.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2918, pruned_loss=0.06548, over 1613581.54 frames. ], batch size: 16, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:54,662 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.376e+02 3.118e+02 3.808e+02 9.704e+02, threshold=6.236e+02, percent-clipped=5.0 +2023-02-07 00:27:57,074 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-07 00:28:00,363 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:07,282 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1488, 1.9407, 2.5433, 2.1602, 2.4692, 2.2059, 1.9346, 1.4602], + device='cuda:0'), covar=tensor([0.5205, 0.4472, 0.1801, 0.3360, 0.2356, 0.2858, 0.2019, 0.4756], + device='cuda:0'), in_proj_covar=tensor([0.0941, 0.0971, 0.0797, 0.0935, 0.0995, 0.0883, 0.0745, 0.0824], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 00:28:12,720 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6944, 2.0763, 3.2761, 1.4770, 2.6522, 2.0891, 1.7808, 2.5396], + device='cuda:0'), covar=tensor([0.1858, 0.2549, 0.0942, 0.4346, 0.1718, 0.3103, 0.2111, 0.2169], + device='cuda:0'), in_proj_covar=tensor([0.0515, 0.0586, 0.0549, 0.0629, 0.0637, 0.0588, 0.0522, 0.0626], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:28:17,459 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:24,592 INFO [train.py:901] (0/4) Epoch 20, batch 2650, loss[loss=0.2067, simple_loss=0.2941, pruned_loss=0.05965, over 8464.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06494, over 1612692.75 frames. ], batch size: 27, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:28:30,659 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:49,175 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:00,116 INFO [train.py:901] (0/4) Epoch 20, batch 2700, loss[loss=0.2368, simple_loss=0.2969, pruned_loss=0.08834, over 7915.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2913, pruned_loss=0.0648, over 1618103.40 frames. ], batch size: 20, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:04,092 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.401e+02 3.078e+02 3.829e+02 8.557e+02, threshold=6.156e+02, percent-clipped=4.0 +2023-02-07 00:29:23,238 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:35,128 INFO [train.py:901] (0/4) Epoch 20, batch 2750, loss[loss=0.2102, simple_loss=0.2771, pruned_loss=0.07163, over 6774.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2904, pruned_loss=0.06452, over 1610418.33 frames. ], batch size: 15, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:43,521 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:01,786 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:10,369 INFO [train.py:901] (0/4) Epoch 20, batch 2800, loss[loss=0.231, simple_loss=0.3136, pruned_loss=0.07417, over 8515.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2904, pruned_loss=0.06416, over 1608634.44 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:15,863 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.534e+02 2.983e+02 3.648e+02 6.974e+02, threshold=5.966e+02, percent-clipped=1.0 +2023-02-07 00:30:20,866 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:38,812 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:42,967 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5602, 1.8693, 1.9661, 1.2190, 2.0272, 1.5276, 0.4027, 1.7931], + device='cuda:0'), covar=tensor([0.0465, 0.0320, 0.0266, 0.0481, 0.0363, 0.0845, 0.0721, 0.0220], + device='cuda:0'), in_proj_covar=tensor([0.0443, 0.0382, 0.0334, 0.0437, 0.0365, 0.0527, 0.0386, 0.0406], + device='cuda:0'), out_proj_covar=tensor([1.1936e-04, 1.0056e-04, 8.8201e-05, 1.1570e-04, 9.6665e-05, 1.5016e-04, + 1.0456e-04, 1.0826e-04], device='cuda:0') +2023-02-07 00:30:46,260 INFO [train.py:901] (0/4) Epoch 20, batch 2850, loss[loss=0.2195, simple_loss=0.3038, pruned_loss=0.06757, over 8328.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2905, pruned_loss=0.06402, over 1609078.93 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:50,397 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156432.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:03,903 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8574, 3.7969, 3.4619, 1.7792, 3.4180, 3.4845, 3.3838, 3.3387], + device='cuda:0'), covar=tensor([0.0827, 0.0673, 0.1144, 0.4601, 0.0920, 0.1015, 0.1462, 0.0877], + device='cuda:0'), in_proj_covar=tensor([0.0512, 0.0424, 0.0428, 0.0528, 0.0418, 0.0429, 0.0415, 0.0372], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:31:20,790 INFO [train.py:901] (0/4) Epoch 20, batch 2900, loss[loss=0.2225, simple_loss=0.308, pruned_loss=0.06848, over 8531.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.06381, over 1610543.72 frames. ], batch size: 49, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:31:26,323 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.409e+02 2.783e+02 3.401e+02 8.568e+02, threshold=5.566e+02, percent-clipped=1.0 +2023-02-07 00:31:50,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:53,709 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 00:31:57,135 INFO [train.py:901] (0/4) Epoch 20, batch 2950, loss[loss=0.1974, simple_loss=0.2861, pruned_loss=0.05433, over 8322.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2916, pruned_loss=0.06481, over 1610551.86 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:08,265 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156542.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:11,711 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2930, 1.9460, 4.3791, 2.0640, 2.3944, 4.9588, 5.0739, 4.1433], + device='cuda:0'), covar=tensor([0.1305, 0.1756, 0.0316, 0.2046, 0.1354, 0.0204, 0.0483, 0.0590], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0323, 0.0287, 0.0315, 0.0305, 0.0262, 0.0410, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:32:11,735 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:31,025 INFO [train.py:901] (0/4) Epoch 20, batch 3000, loss[loss=0.2023, simple_loss=0.2902, pruned_loss=0.05719, over 8336.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2909, pruned_loss=0.06422, over 1613158.98 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:31,026 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 00:32:46,835 INFO [train.py:935] (0/4) Epoch 20, validation: loss=0.1756, simple_loss=0.2756, pruned_loss=0.03779, over 944034.00 frames. +2023-02-07 00:32:46,837 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 00:32:48,377 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:51,797 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 2.420e+02 3.007e+02 3.801e+02 6.408e+02, threshold=6.014e+02, percent-clipped=4.0 +2023-02-07 00:33:17,708 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7554, 1.4542, 3.9240, 1.5438, 3.4458, 3.2512, 3.5810, 3.4222], + device='cuda:0'), covar=tensor([0.0720, 0.4370, 0.0644, 0.3810, 0.1215, 0.0978, 0.0644, 0.0780], + device='cuda:0'), in_proj_covar=tensor([0.0612, 0.0632, 0.0680, 0.0612, 0.0695, 0.0600, 0.0596, 0.0661], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:33:22,163 INFO [train.py:901] (0/4) Epoch 20, batch 3050, loss[loss=0.2878, simple_loss=0.36, pruned_loss=0.1078, over 8282.00 frames. ], tot_loss[loss=0.212, simple_loss=0.293, pruned_loss=0.06553, over 1617564.63 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:33:35,524 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.24 vs. limit=5.0 +2023-02-07 00:33:40,590 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:33:40,697 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8115, 1.5652, 1.9199, 1.5912, 0.9758, 1.6906, 2.0453, 2.0780], + device='cuda:0'), covar=tensor([0.0447, 0.1228, 0.1586, 0.1371, 0.0606, 0.1365, 0.0640, 0.0555], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0192, 0.0159, 0.0101, 0.0162, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:33:57,464 INFO [train.py:901] (0/4) Epoch 20, batch 3100, loss[loss=0.2232, simple_loss=0.3065, pruned_loss=0.06998, over 8257.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2935, pruned_loss=0.06529, over 1620575.36 frames. ], batch size: 24, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:34:02,297 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.428e+02 2.992e+02 3.732e+02 8.006e+02, threshold=5.985e+02, percent-clipped=5.0 +2023-02-07 00:34:09,231 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:34:31,976 INFO [train.py:901] (0/4) Epoch 20, batch 3150, loss[loss=0.2305, simple_loss=0.3119, pruned_loss=0.07454, over 8492.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2933, pruned_loss=0.06523, over 1620365.78 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:01,268 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:01,303 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:07,253 INFO [train.py:901] (0/4) Epoch 20, batch 3200, loss[loss=0.2642, simple_loss=0.3275, pruned_loss=0.1004, over 6915.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2914, pruned_loss=0.06432, over 1617730.11 frames. ], batch size: 72, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:11,890 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.338e+02 2.875e+02 3.612e+02 1.133e+03, threshold=5.749e+02, percent-clipped=4.0 +2023-02-07 00:35:14,168 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5502, 1.5718, 2.0985, 1.3803, 1.1971, 2.0250, 0.3471, 1.2373], + device='cuda:0'), covar=tensor([0.1945, 0.1537, 0.0421, 0.1395, 0.2908, 0.0515, 0.2171, 0.1471], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0193, 0.0124, 0.0219, 0.0267, 0.0133, 0.0167, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 00:35:26,507 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:41,821 INFO [train.py:901] (0/4) Epoch 20, batch 3250, loss[loss=0.1922, simple_loss=0.2714, pruned_loss=0.05653, over 8131.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06377, over 1614376.13 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:43,296 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:17,054 INFO [train.py:901] (0/4) Epoch 20, batch 3300, loss[loss=0.202, simple_loss=0.2895, pruned_loss=0.05725, over 8360.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2903, pruned_loss=0.06372, over 1617221.22 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:36:21,768 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.341e+02 2.967e+02 3.887e+02 7.432e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-07 00:36:35,541 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156903.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:51,533 INFO [train.py:901] (0/4) Epoch 20, batch 3350, loss[loss=0.2013, simple_loss=0.2795, pruned_loss=0.06153, over 7928.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06352, over 1622331.69 frames. ], batch size: 20, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:07,172 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:11,494 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9587, 2.1556, 1.7172, 2.6635, 1.2194, 1.6158, 1.8590, 2.1991], + device='cuda:0'), covar=tensor([0.0753, 0.0789, 0.0978, 0.0409, 0.1161, 0.1339, 0.0902, 0.0752], + device='cuda:0'), in_proj_covar=tensor([0.0236, 0.0199, 0.0250, 0.0215, 0.0208, 0.0252, 0.0255, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 00:37:25,821 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:27,095 INFO [train.py:901] (0/4) Epoch 20, batch 3400, loss[loss=0.2325, simple_loss=0.3097, pruned_loss=0.0777, over 8240.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2915, pruned_loss=0.06414, over 1624182.10 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:31,900 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.508e+02 3.011e+02 3.882e+02 8.239e+02, threshold=6.022e+02, percent-clipped=6.0 +2023-02-07 00:37:49,719 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8287, 1.6197, 1.8636, 1.6459, 0.9188, 1.5857, 2.1605, 2.0056], + device='cuda:0'), covar=tensor([0.0451, 0.1232, 0.1681, 0.1383, 0.0629, 0.1481, 0.0641, 0.0568], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:37:59,111 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6220, 1.3470, 1.5483, 1.2357, 0.9180, 1.3514, 1.5568, 1.2773], + device='cuda:0'), covar=tensor([0.0556, 0.1299, 0.1691, 0.1479, 0.0599, 0.1552, 0.0694, 0.0682], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:38:01,289 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:03,918 INFO [train.py:901] (0/4) Epoch 20, batch 3450, loss[loss=0.1988, simple_loss=0.2709, pruned_loss=0.06338, over 7532.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2917, pruned_loss=0.06465, over 1619527.10 frames. ], batch size: 18, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:05,349 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:11,407 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7011, 2.2261, 3.3257, 1.8670, 1.6738, 3.1658, 0.9107, 2.0791], + device='cuda:0'), covar=tensor([0.1604, 0.1392, 0.0351, 0.2022, 0.3047, 0.0423, 0.2374, 0.1775], + device='cuda:0'), in_proj_covar=tensor([0.0184, 0.0191, 0.0123, 0.0216, 0.0265, 0.0132, 0.0165, 0.0186], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 00:38:18,895 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:38,200 INFO [train.py:901] (0/4) Epoch 20, batch 3500, loss[loss=0.1988, simple_loss=0.2802, pruned_loss=0.05869, over 8460.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2925, pruned_loss=0.06515, over 1617429.55 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:43,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.548e+02 3.004e+02 3.939e+02 7.448e+02, threshold=6.007e+02, percent-clipped=9.0 +2023-02-07 00:38:55,890 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 00:39:02,270 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 00:39:03,068 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:09,294 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6541, 1.8902, 2.0774, 1.2577, 2.2033, 1.4717, 0.6359, 1.8677], + device='cuda:0'), covar=tensor([0.0615, 0.0370, 0.0284, 0.0588, 0.0391, 0.0923, 0.0865, 0.0306], + device='cuda:0'), in_proj_covar=tensor([0.0446, 0.0388, 0.0340, 0.0440, 0.0371, 0.0535, 0.0392, 0.0413], + device='cuda:0'), out_proj_covar=tensor([1.2021e-04, 1.0204e-04, 8.9864e-05, 1.1636e-04, 9.8216e-05, 1.5260e-04, + 1.0595e-04, 1.1022e-04], device='cuda:0') +2023-02-07 00:39:13,059 INFO [train.py:901] (0/4) Epoch 20, batch 3550, loss[loss=0.1958, simple_loss=0.2779, pruned_loss=0.05688, over 7929.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2922, pruned_loss=0.06511, over 1616468.54 frames. ], batch size: 20, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:39:37,677 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157160.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:48,301 INFO [train.py:901] (0/4) Epoch 20, batch 3600, loss[loss=0.2278, simple_loss=0.3092, pruned_loss=0.07314, over 8502.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2911, pruned_loss=0.06442, over 1614027.29 frames. ], batch size: 26, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:39:53,036 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.446e+02 2.923e+02 3.668e+02 9.434e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 00:40:10,603 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 00:40:15,592 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6425, 1.4583, 1.5955, 1.3640, 0.8945, 1.3972, 1.4992, 1.4138], + device='cuda:0'), covar=tensor([0.0647, 0.1265, 0.1695, 0.1462, 0.0622, 0.1533, 0.0736, 0.0664], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:40:24,181 INFO [train.py:901] (0/4) Epoch 20, batch 3650, loss[loss=0.2836, simple_loss=0.343, pruned_loss=0.1121, over 6985.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2909, pruned_loss=0.06412, over 1614192.76 frames. ], batch size: 71, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:40:24,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:38,564 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:42,200 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 00:40:58,618 INFO [train.py:901] (0/4) Epoch 20, batch 3700, loss[loss=0.2125, simple_loss=0.2931, pruned_loss=0.06592, over 8329.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2922, pruned_loss=0.06447, over 1617943.77 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:03,187 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.545e+02 3.038e+02 3.849e+02 9.039e+02, threshold=6.076e+02, percent-clipped=6.0 +2023-02-07 00:41:05,249 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 00:41:33,573 INFO [train.py:901] (0/4) Epoch 20, batch 3750, loss[loss=0.1931, simple_loss=0.2861, pruned_loss=0.05007, over 8702.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2932, pruned_loss=0.0652, over 1619274.69 frames. ], batch size: 39, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:58,948 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157362.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:05,450 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:06,812 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:07,963 INFO [train.py:901] (0/4) Epoch 20, batch 3800, loss[loss=0.2163, simple_loss=0.302, pruned_loss=0.06529, over 8286.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2914, pruned_loss=0.0644, over 1618016.42 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:42:12,515 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.302e+02 2.981e+02 3.884e+02 7.104e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 00:42:25,022 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:29,222 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.10 vs. limit=5.0 +2023-02-07 00:42:39,614 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157422.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:42:42,793 INFO [train.py:901] (0/4) Epoch 20, batch 3850, loss[loss=0.2462, simple_loss=0.3258, pruned_loss=0.08329, over 8516.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2911, pruned_loss=0.06467, over 1612711.10 frames. ], batch size: 26, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:05,839 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5562, 4.5244, 4.1084, 2.4330, 4.0383, 4.3046, 4.0988, 4.0457], + device='cuda:0'), covar=tensor([0.0772, 0.0577, 0.1168, 0.4053, 0.0769, 0.0804, 0.1197, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0511, 0.0421, 0.0426, 0.0526, 0.0415, 0.0428, 0.0413, 0.0371], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:43:09,743 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 00:43:17,659 INFO [train.py:901] (0/4) Epoch 20, batch 3900, loss[loss=0.1584, simple_loss=0.2342, pruned_loss=0.0413, over 7655.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.291, pruned_loss=0.0646, over 1612230.25 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:21,784 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157482.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:22,201 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.513e+02 3.153e+02 3.900e+02 7.255e+02, threshold=6.305e+02, percent-clipped=5.0 +2023-02-07 00:43:24,973 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:37,134 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:39,407 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:43,541 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1435, 1.3290, 4.3328, 1.5680, 3.7913, 3.6203, 3.9255, 3.7780], + device='cuda:0'), covar=tensor([0.0611, 0.4683, 0.0544, 0.4137, 0.1157, 0.0946, 0.0618, 0.0682], + device='cuda:0'), in_proj_covar=tensor([0.0612, 0.0635, 0.0687, 0.0616, 0.0697, 0.0603, 0.0599, 0.0665], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:43:51,264 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157524.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:43:52,537 INFO [train.py:901] (0/4) Epoch 20, batch 3950, loss[loss=0.1834, simple_loss=0.27, pruned_loss=0.04841, over 7538.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2914, pruned_loss=0.0646, over 1614559.09 frames. ], batch size: 18, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:25,099 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2047, 4.1368, 3.8274, 1.9451, 3.7516, 3.8536, 3.7270, 3.6701], + device='cuda:0'), covar=tensor([0.0733, 0.0525, 0.1046, 0.4664, 0.0786, 0.0955, 0.1310, 0.0758], + device='cuda:0'), in_proj_covar=tensor([0.0516, 0.0425, 0.0431, 0.0531, 0.0418, 0.0432, 0.0418, 0.0374], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:44:28,111 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-07 00:44:28,450 INFO [train.py:901] (0/4) Epoch 20, batch 4000, loss[loss=0.1978, simple_loss=0.2662, pruned_loss=0.06474, over 7787.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2902, pruned_loss=0.06362, over 1613062.92 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:33,891 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.441e+02 3.259e+02 3.960e+02 7.383e+02, threshold=6.518e+02, percent-clipped=3.0 +2023-02-07 00:44:34,064 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6549, 1.7405, 4.8292, 1.7916, 4.3094, 4.0489, 4.4291, 4.3004], + device='cuda:0'), covar=tensor([0.0534, 0.4457, 0.0504, 0.4066, 0.0991, 0.0993, 0.0550, 0.0586], + device='cuda:0'), in_proj_covar=tensor([0.0614, 0.0637, 0.0688, 0.0619, 0.0701, 0.0606, 0.0601, 0.0667], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:44:36,139 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:41,348 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6379, 2.6939, 1.9498, 2.3901, 2.2195, 1.6882, 2.2889, 2.3237], + device='cuda:0'), covar=tensor([0.1352, 0.0341, 0.1045, 0.0538, 0.0706, 0.1397, 0.0865, 0.0849], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0235, 0.0332, 0.0307, 0.0300, 0.0335, 0.0344, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 00:44:57,783 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157618.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:58,431 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:03,486 INFO [train.py:901] (0/4) Epoch 20, batch 4050, loss[loss=0.1794, simple_loss=0.2595, pruned_loss=0.04961, over 7795.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2906, pruned_loss=0.06362, over 1614143.72 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:15,102 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:29,208 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 00:45:33,138 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-07 00:45:38,035 INFO [train.py:901] (0/4) Epoch 20, batch 4100, loss[loss=0.2366, simple_loss=0.3227, pruned_loss=0.07523, over 8368.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06355, over 1617117.60 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:42,595 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.468e+02 3.178e+02 4.268e+02 8.149e+02, threshold=6.355e+02, percent-clipped=4.0 +2023-02-07 00:46:07,480 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157718.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:12,788 INFO [train.py:901] (0/4) Epoch 20, batch 4150, loss[loss=0.2081, simple_loss=0.2892, pruned_loss=0.0635, over 8540.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2908, pruned_loss=0.06357, over 1618293.07 frames. ], batch size: 28, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:25,105 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157743.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:25,619 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:40,577 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157766.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:46:41,942 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:45,980 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3951, 2.0619, 2.7701, 2.3101, 2.7366, 2.4000, 2.0995, 1.5934], + device='cuda:0'), covar=tensor([0.5071, 0.4821, 0.1836, 0.3389, 0.2342, 0.2922, 0.1945, 0.5037], + device='cuda:0'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0931, 0.0984, 0.0882, 0.0737, 0.0815], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 00:46:46,937 INFO [train.py:901] (0/4) Epoch 20, batch 4200, loss[loss=0.2358, simple_loss=0.3245, pruned_loss=0.07351, over 8191.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2918, pruned_loss=0.06403, over 1622047.12 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:52,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.385e+02 2.811e+02 3.577e+02 7.269e+02, threshold=5.621e+02, percent-clipped=2.0 +2023-02-07 00:47:08,779 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 00:47:10,326 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6086, 1.4497, 4.7465, 1.8503, 4.2422, 3.9236, 4.3399, 4.1756], + device='cuda:0'), covar=tensor([0.0472, 0.4582, 0.0486, 0.3827, 0.0987, 0.0925, 0.0513, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0619, 0.0640, 0.0694, 0.0622, 0.0703, 0.0610, 0.0605, 0.0670], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 00:47:23,338 INFO [train.py:901] (0/4) Epoch 20, batch 4250, loss[loss=0.1999, simple_loss=0.2847, pruned_loss=0.05754, over 7923.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2921, pruned_loss=0.06431, over 1618726.33 frames. ], batch size: 20, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:47:26,220 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8806, 1.7141, 1.8981, 1.6330, 1.0125, 1.6599, 2.1112, 1.8975], + device='cuda:0'), covar=tensor([0.0435, 0.1290, 0.1669, 0.1374, 0.0594, 0.1450, 0.0632, 0.0643], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0113, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 00:47:28,307 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:32,318 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 00:47:46,384 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157859.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:53,340 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157868.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:47:55,479 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,152 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,656 INFO [train.py:901] (0/4) Epoch 20, batch 4300, loss[loss=0.1718, simple_loss=0.2585, pruned_loss=0.04251, over 8084.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2917, pruned_loss=0.06373, over 1617507.43 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:02,027 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157881.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:48:03,192 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.270e+02 2.745e+02 3.400e+02 8.203e+02, threshold=5.491e+02, percent-clipped=7.0 +2023-02-07 00:48:15,333 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157900.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:17,322 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6480, 1.4473, 2.9244, 1.3351, 2.1144, 3.0385, 3.2288, 2.6322], + device='cuda:0'), covar=tensor([0.1135, 0.1571, 0.0352, 0.2100, 0.0905, 0.0301, 0.0632, 0.0561], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0318, 0.0285, 0.0311, 0.0300, 0.0262, 0.0407, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 00:48:24,974 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 00:48:33,497 INFO [train.py:901] (0/4) Epoch 20, batch 4350, loss[loss=0.2857, simple_loss=0.3533, pruned_loss=0.1091, over 7025.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2928, pruned_loss=0.06472, over 1617966.83 frames. ], batch size: 72, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:36,277 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:49:04,095 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 00:49:08,270 INFO [train.py:901] (0/4) Epoch 20, batch 4400, loss[loss=0.1691, simple_loss=0.2513, pruned_loss=0.04347, over 8081.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.292, pruned_loss=0.06407, over 1617504.14 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:49:13,795 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.443e+02 2.894e+02 3.714e+02 1.238e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 00:49:13,987 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157983.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:49:24,899 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-158000.pt +2023-02-07 00:49:44,328 INFO [train.py:901] (0/4) Epoch 20, batch 4450, loss[loss=0.2388, simple_loss=0.3168, pruned_loss=0.08036, over 8017.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2911, pruned_loss=0.06355, over 1617773.17 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:49:45,692 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 00:49:57,304 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:18,898 INFO [train.py:901] (0/4) Epoch 20, batch 4500, loss[loss=0.2294, simple_loss=0.298, pruned_loss=0.08044, over 7655.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2911, pruned_loss=0.06375, over 1614330.46 frames. ], batch size: 19, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:50:23,587 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.382e+02 2.908e+02 3.384e+02 7.082e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-07 00:50:27,969 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:39,142 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 00:50:45,149 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158114.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:45,854 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158115.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:53,561 INFO [train.py:901] (0/4) Epoch 20, batch 4550, loss[loss=0.1937, simple_loss=0.2797, pruned_loss=0.05384, over 8704.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.291, pruned_loss=0.0644, over 1612778.38 frames. ], batch size: 30, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:01,048 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158137.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:51:02,894 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:02,943 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:18,407 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158162.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:51:28,240 INFO [train.py:901] (0/4) Epoch 20, batch 4600, loss[loss=0.2051, simple_loss=0.2932, pruned_loss=0.05847, over 8241.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2908, pruned_loss=0.06466, over 1612677.61 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:32,827 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.506e+02 3.217e+02 3.763e+02 8.986e+02, threshold=6.435e+02, percent-clipped=3.0 +2023-02-07 00:51:54,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:52:01,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7982, 1.7743, 2.3161, 1.6987, 1.3715, 2.3146, 0.7076, 1.6322], + device='cuda:0'), covar=tensor([0.1784, 0.1132, 0.0321, 0.1155, 0.2699, 0.0394, 0.2037, 0.1228], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0195, 0.0124, 0.0219, 0.0268, 0.0134, 0.0167, 0.0189], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 00:52:03,093 INFO [train.py:901] (0/4) Epoch 20, batch 4650, loss[loss=0.2187, simple_loss=0.2979, pruned_loss=0.06977, over 8657.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.29, pruned_loss=0.06444, over 1610664.81 frames. ], batch size: 34, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:12,047 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158239.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:52:30,111 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158264.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:52:37,867 INFO [train.py:901] (0/4) Epoch 20, batch 4700, loss[loss=0.1994, simple_loss=0.2902, pruned_loss=0.0543, over 8328.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2891, pruned_loss=0.06361, over 1607246.09 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:42,601 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.408e+02 3.012e+02 4.119e+02 1.091e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-07 00:52:55,809 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:12,689 INFO [train.py:901] (0/4) Epoch 20, batch 4750, loss[loss=0.2083, simple_loss=0.2826, pruned_loss=0.06705, over 8721.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2882, pruned_loss=0.06285, over 1609236.58 frames. ], batch size: 30, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:53:12,920 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:15,563 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:40,940 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 00:53:43,665 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 00:53:48,277 INFO [train.py:901] (0/4) Epoch 20, batch 4800, loss[loss=0.2583, simple_loss=0.3229, pruned_loss=0.09688, over 7021.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2878, pruned_loss=0.063, over 1604884.02 frames. ], batch size: 72, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:53:52,906 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.386e+02 2.729e+02 3.445e+02 7.258e+02, threshold=5.458e+02, percent-clipped=2.0 +2023-02-07 00:54:06,928 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:54:22,895 INFO [train.py:901] (0/4) Epoch 20, batch 4850, loss[loss=0.1892, simple_loss=0.2769, pruned_loss=0.05072, over 8627.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2886, pruned_loss=0.06333, over 1608836.09 frames. ], batch size: 39, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:54:23,837 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9900, 1.7227, 2.0278, 1.7973, 1.9829, 2.0458, 1.8369, 0.7894], + device='cuda:0'), covar=tensor([0.5367, 0.4480, 0.1913, 0.3554, 0.2548, 0.2964, 0.1957, 0.5132], + device='cuda:0'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0933, 0.0990, 0.0882, 0.0740, 0.0820], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 00:54:33,547 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 00:54:35,059 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9760, 1.7536, 3.4647, 1.5161, 2.3154, 3.7605, 3.9241, 3.1992], + device='cuda:0'), covar=tensor([0.1338, 0.1766, 0.0388, 0.2251, 0.1195, 0.0257, 0.0588, 0.0632], + device='cuda:0'), in_proj_covar=tensor([0.0293, 0.0320, 0.0287, 0.0313, 0.0303, 0.0263, 0.0410, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 00:54:51,891 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0894, 2.0130, 3.1573, 1.7187, 2.4241, 3.4748, 3.5108, 3.0088], + device='cuda:0'), covar=tensor([0.1140, 0.1439, 0.0453, 0.1951, 0.1085, 0.0241, 0.0595, 0.0541], + device='cuda:0'), in_proj_covar=tensor([0.0293, 0.0319, 0.0286, 0.0312, 0.0303, 0.0262, 0.0409, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 00:54:57,238 INFO [train.py:901] (0/4) Epoch 20, batch 4900, loss[loss=0.2445, simple_loss=0.3143, pruned_loss=0.08735, over 7079.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2891, pruned_loss=0.06383, over 1605921.12 frames. ], batch size: 71, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:55:02,466 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.481e+02 3.123e+02 4.208e+02 8.958e+02, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 00:55:03,227 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:55:32,884 INFO [train.py:901] (0/4) Epoch 20, batch 4950, loss[loss=0.1988, simple_loss=0.2841, pruned_loss=0.0567, over 7965.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2895, pruned_loss=0.06386, over 1607742.57 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:07,757 INFO [train.py:901] (0/4) Epoch 20, batch 5000, loss[loss=0.2214, simple_loss=0.2975, pruned_loss=0.07265, over 8476.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.29, pruned_loss=0.06384, over 1612006.48 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:12,218 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.361e+02 2.881e+02 3.667e+02 7.563e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-07 00:56:12,596 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-07 00:56:14,503 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:23,817 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:32,785 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:42,897 INFO [train.py:901] (0/4) Epoch 20, batch 5050, loss[loss=0.2869, simple_loss=0.349, pruned_loss=0.1124, over 8025.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.06519, over 1612434.69 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:54,121 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7641, 1.7193, 2.3468, 1.4680, 1.3136, 2.2912, 0.5624, 1.3944], + device='cuda:0'), covar=tensor([0.2207, 0.1434, 0.0348, 0.1651, 0.2942, 0.0518, 0.2387, 0.1702], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0197, 0.0127, 0.0222, 0.0273, 0.0135, 0.0171, 0.0192], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 00:57:10,200 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 00:57:17,768 INFO [train.py:901] (0/4) Epoch 20, batch 5100, loss[loss=0.2523, simple_loss=0.3223, pruned_loss=0.09121, over 7147.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2905, pruned_loss=0.06451, over 1610165.89 frames. ], batch size: 71, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:23,330 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.670e+02 3.233e+02 3.910e+02 8.185e+02, threshold=6.466e+02, percent-clipped=7.0 +2023-02-07 00:57:53,848 INFO [train.py:901] (0/4) Epoch 20, batch 5150, loss[loss=0.2184, simple_loss=0.2881, pruned_loss=0.07433, over 7976.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2903, pruned_loss=0.06427, over 1610306.86 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:07,543 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:28,394 INFO [train.py:901] (0/4) Epoch 20, batch 5200, loss[loss=0.2463, simple_loss=0.3165, pruned_loss=0.08807, over 8460.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2901, pruned_loss=0.06421, over 1610089.22 frames. ], batch size: 29, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:30,714 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7354, 1.8372, 1.5983, 2.5114, 1.2258, 1.4033, 1.7636, 1.9512], + device='cuda:0'), covar=tensor([0.0938, 0.0892, 0.1187, 0.0542, 0.1201, 0.1593, 0.0973, 0.0888], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0212, 0.0203, 0.0246, 0.0249, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 00:58:33,214 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.433e+02 2.837e+02 3.461e+02 7.505e+02, threshold=5.673e+02, percent-clipped=2.0 +2023-02-07 00:58:41,624 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:46,650 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:52,344 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 00:59:03,978 INFO [train.py:901] (0/4) Epoch 20, batch 5250, loss[loss=0.2201, simple_loss=0.3058, pruned_loss=0.06717, over 8502.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2904, pruned_loss=0.06443, over 1611192.54 frames. ], batch size: 26, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:59:11,286 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 00:59:22,852 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158853.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:59:24,330 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158855.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:28,231 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:38,589 INFO [train.py:901] (0/4) Epoch 20, batch 5300, loss[loss=0.186, simple_loss=0.2642, pruned_loss=0.05386, over 7425.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2903, pruned_loss=0.06447, over 1612622.06 frames. ], batch size: 17, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 00:59:41,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:43,356 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.336e+02 2.792e+02 3.296e+02 7.091e+02, threshold=5.585e+02, percent-clipped=2.0 +2023-02-07 00:59:52,521 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-07 01:00:13,209 INFO [train.py:901] (0/4) Epoch 20, batch 5350, loss[loss=0.1791, simple_loss=0.267, pruned_loss=0.04559, over 7655.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.291, pruned_loss=0.06438, over 1618144.30 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:15,060 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 01:00:27,705 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 01:00:48,002 INFO [train.py:901] (0/4) Epoch 20, batch 5400, loss[loss=0.1927, simple_loss=0.2588, pruned_loss=0.06329, over 7224.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2904, pruned_loss=0.06463, over 1615284.08 frames. ], batch size: 16, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:52,646 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.394e+02 2.966e+02 3.887e+02 6.953e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 01:01:16,690 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:01:22,919 INFO [train.py:901] (0/4) Epoch 20, batch 5450, loss[loss=0.1988, simple_loss=0.2936, pruned_loss=0.05196, over 8295.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2909, pruned_loss=0.06419, over 1617827.26 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:01:42,891 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3302, 2.0808, 2.7845, 2.2209, 2.7176, 2.3321, 2.0863, 1.6546], + device='cuda:0'), covar=tensor([0.4935, 0.4409, 0.1823, 0.3304, 0.2147, 0.2651, 0.1717, 0.4794], + device='cuda:0'), in_proj_covar=tensor([0.0925, 0.0958, 0.0784, 0.0924, 0.0977, 0.0874, 0.0732, 0.0809], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 01:01:57,422 INFO [train.py:901] (0/4) Epoch 20, batch 5500, loss[loss=0.2298, simple_loss=0.3112, pruned_loss=0.07416, over 8243.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2911, pruned_loss=0.06425, over 1615563.80 frames. ], batch size: 24, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:00,098 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 01:02:02,834 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.298e+02 2.656e+02 3.222e+02 6.486e+02, threshold=5.312e+02, percent-clipped=1.0 +2023-02-07 01:02:03,906 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.46 vs. limit=2.0 +2023-02-07 01:02:05,769 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:21,366 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 01:02:27,304 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:29,231 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7227, 2.3592, 4.2670, 1.5646, 3.2935, 2.3830, 1.9023, 3.1127], + device='cuda:0'), covar=tensor([0.1817, 0.2632, 0.0701, 0.4336, 0.1539, 0.3030, 0.2122, 0.2302], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0590, 0.0554, 0.0634, 0.0643, 0.0589, 0.0528, 0.0629], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:02:32,429 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7902, 1.6760, 3.1679, 1.4699, 2.2901, 3.4668, 3.5196, 2.9679], + device='cuda:0'), covar=tensor([0.1175, 0.1492, 0.0333, 0.2002, 0.0870, 0.0227, 0.0546, 0.0522], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0319, 0.0286, 0.0314, 0.0304, 0.0262, 0.0410, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 01:02:32,980 INFO [train.py:901] (0/4) Epoch 20, batch 5550, loss[loss=0.2046, simple_loss=0.2791, pruned_loss=0.06505, over 7532.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2915, pruned_loss=0.06403, over 1619851.34 frames. ], batch size: 18, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:41,915 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159139.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:43,056 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 01:02:44,150 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:45,997 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:50,311 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 01:03:08,170 INFO [train.py:901] (0/4) Epoch 20, batch 5600, loss[loss=0.2646, simple_loss=0.3377, pruned_loss=0.09579, over 8531.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.291, pruned_loss=0.06376, over 1619181.21 frames. ], batch size: 31, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:03:09,014 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:03:12,919 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.419e+02 2.780e+02 3.445e+02 7.739e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 01:03:20,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6539, 2.0573, 3.2679, 1.4761, 2.4946, 2.1944, 1.7848, 2.5263], + device='cuda:0'), covar=tensor([0.1856, 0.2635, 0.0900, 0.4398, 0.1801, 0.3035, 0.2177, 0.2168], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0589, 0.0555, 0.0634, 0.0643, 0.0589, 0.0528, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:03:23,289 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159197.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:03:44,008 INFO [train.py:901] (0/4) Epoch 20, batch 5650, loss[loss=0.1931, simple_loss=0.2759, pruned_loss=0.0552, over 8034.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2904, pruned_loss=0.06378, over 1614353.71 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:04:03,403 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:04,621 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 01:04:07,496 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:19,056 INFO [train.py:901] (0/4) Epoch 20, batch 5700, loss[loss=0.189, simple_loss=0.2768, pruned_loss=0.05057, over 8505.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2912, pruned_loss=0.06405, over 1615282.57 frames. ], batch size: 28, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:25,333 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.576e+02 3.260e+02 4.013e+02 6.441e+02, threshold=6.520e+02, percent-clipped=4.0 +2023-02-07 01:04:42,302 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:45,039 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159312.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:04:54,514 INFO [train.py:901] (0/4) Epoch 20, batch 5750, loss[loss=0.1838, simple_loss=0.2857, pruned_loss=0.04097, over 8193.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2907, pruned_loss=0.06402, over 1614881.55 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:59,220 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 01:05:09,318 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 01:05:23,185 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4781, 1.6918, 4.5652, 2.1807, 2.5827, 5.2778, 5.2895, 4.5888], + device='cuda:0'), covar=tensor([0.1062, 0.1665, 0.0230, 0.1748, 0.1070, 0.0141, 0.0412, 0.0441], + device='cuda:0'), in_proj_covar=tensor([0.0291, 0.0316, 0.0284, 0.0311, 0.0302, 0.0260, 0.0407, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:05:29,348 INFO [train.py:901] (0/4) Epoch 20, batch 5800, loss[loss=0.2014, simple_loss=0.2911, pruned_loss=0.05581, over 8560.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06342, over 1612171.29 frames. ], batch size: 39, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:35,563 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.438e+02 2.992e+02 3.849e+02 1.447e+03, threshold=5.984e+02, percent-clipped=4.0 +2023-02-07 01:05:56,875 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0063, 1.6559, 3.1231, 1.4133, 2.1740, 3.3608, 3.5487, 2.8826], + device='cuda:0'), covar=tensor([0.1050, 0.1541, 0.0372, 0.2131, 0.1064, 0.0253, 0.0476, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0291, 0.0315, 0.0283, 0.0310, 0.0302, 0.0259, 0.0405, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:06:04,881 INFO [train.py:901] (0/4) Epoch 20, batch 5850, loss[loss=0.2067, simple_loss=0.3069, pruned_loss=0.05327, over 8460.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2898, pruned_loss=0.06318, over 1614878.57 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:08,463 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:06:39,995 INFO [train.py:901] (0/4) Epoch 20, batch 5900, loss[loss=0.2336, simple_loss=0.3034, pruned_loss=0.08189, over 7274.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.29, pruned_loss=0.06317, over 1613564.06 frames. ], batch size: 74, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:43,334 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 01:06:45,630 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.454e+02 2.951e+02 3.822e+02 7.063e+02, threshold=5.901e+02, percent-clipped=2.0 +2023-02-07 01:07:04,115 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:08,807 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:12,080 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:15,457 INFO [train.py:901] (0/4) Epoch 20, batch 5950, loss[loss=0.2139, simple_loss=0.3059, pruned_loss=0.06098, over 8487.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.29, pruned_loss=0.0634, over 1613928.31 frames. ], batch size: 28, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:21,759 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:26,375 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:29,658 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:45,590 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159568.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:07:50,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4258, 4.3643, 3.9852, 2.0668, 3.8955, 3.9394, 3.9331, 3.7063], + device='cuda:0'), covar=tensor([0.0718, 0.0563, 0.1047, 0.4097, 0.0876, 0.0993, 0.1245, 0.0868], + device='cuda:0'), in_proj_covar=tensor([0.0512, 0.0424, 0.0428, 0.0526, 0.0415, 0.0427, 0.0413, 0.0372], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:07:50,944 INFO [train.py:901] (0/4) Epoch 20, batch 6000, loss[loss=0.2643, simple_loss=0.3385, pruned_loss=0.09508, over 8437.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2904, pruned_loss=0.06369, over 1613416.06 frames. ], batch size: 27, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:50,945 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 01:08:04,190 INFO [train.py:935] (0/4) Epoch 20, validation: loss=0.175, simple_loss=0.275, pruned_loss=0.03755, over 944034.00 frames. +2023-02-07 01:08:04,192 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 01:08:09,556 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.504e+02 2.869e+02 3.482e+02 8.370e+02, threshold=5.739e+02, percent-clipped=5.0 +2023-02-07 01:08:15,914 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159593.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:08:38,927 INFO [train.py:901] (0/4) Epoch 20, batch 6050, loss[loss=0.2025, simple_loss=0.2864, pruned_loss=0.05933, over 8559.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2886, pruned_loss=0.06276, over 1614952.69 frames. ], batch size: 31, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:08:45,968 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:55,611 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159649.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:55,912 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 01:08:57,640 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:09:14,781 INFO [train.py:901] (0/4) Epoch 20, batch 6100, loss[loss=0.2005, simple_loss=0.2945, pruned_loss=0.05328, over 8483.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2888, pruned_loss=0.06272, over 1618410.00 frames. ], batch size: 28, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:09:21,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.453e+02 2.842e+02 3.745e+02 1.322e+03, threshold=5.684e+02, percent-clipped=4.0 +2023-02-07 01:09:41,572 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 01:09:49,995 INFO [train.py:901] (0/4) Epoch 20, batch 6150, loss[loss=0.1862, simple_loss=0.2538, pruned_loss=0.05934, over 7428.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2905, pruned_loss=0.06329, over 1622457.54 frames. ], batch size: 17, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:18,345 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:10:24,860 INFO [train.py:901] (0/4) Epoch 20, batch 6200, loss[loss=0.2439, simple_loss=0.3211, pruned_loss=0.08334, over 8193.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2901, pruned_loss=0.06348, over 1615521.31 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:30,204 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.429e+02 3.094e+02 3.753e+02 7.329e+02, threshold=6.188e+02, percent-clipped=3.0 +2023-02-07 01:10:43,484 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:00,335 INFO [train.py:901] (0/4) Epoch 20, batch 6250, loss[loss=0.24, simple_loss=0.326, pruned_loss=0.07701, over 8461.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2895, pruned_loss=0.06279, over 1615877.99 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:01,222 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:07,404 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:11:32,491 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:34,372 INFO [train.py:901] (0/4) Epoch 20, batch 6300, loss[loss=0.2156, simple_loss=0.2968, pruned_loss=0.06722, over 8649.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2898, pruned_loss=0.06335, over 1617487.54 frames. ], batch size: 34, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:40,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.354e+02 2.951e+02 3.644e+02 9.166e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 01:11:45,868 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:58,776 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2056, 2.0008, 2.5775, 1.6332, 1.5952, 2.4982, 1.1671, 2.0314], + device='cuda:0'), covar=tensor([0.1651, 0.1290, 0.0464, 0.1455, 0.2510, 0.0433, 0.2168, 0.1451], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0272, 0.0134, 0.0170, 0.0190], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 01:12:03,496 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:09,078 INFO [train.py:901] (0/4) Epoch 20, batch 6350, loss[loss=0.2047, simple_loss=0.2805, pruned_loss=0.06447, over 8137.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.29, pruned_loss=0.06385, over 1617363.66 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:10,571 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:43,479 INFO [train.py:901] (0/4) Epoch 20, batch 6400, loss[loss=0.2156, simple_loss=0.2957, pruned_loss=0.06774, over 7823.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.29, pruned_loss=0.06357, over 1619488.73 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:48,758 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.436e+02 2.995e+02 3.881e+02 8.346e+02, threshold=5.989e+02, percent-clipped=6.0 +2023-02-07 01:12:55,746 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:00,484 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-160000.pt +2023-02-07 01:13:05,827 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:13:16,794 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:18,638 INFO [train.py:901] (0/4) Epoch 20, batch 6450, loss[loss=0.2311, simple_loss=0.3068, pruned_loss=0.07769, over 7916.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2907, pruned_loss=0.06408, over 1620108.68 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:34,516 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:53,995 INFO [train.py:901] (0/4) Epoch 20, batch 6500, loss[loss=0.2091, simple_loss=0.2861, pruned_loss=0.06612, over 8198.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2915, pruned_loss=0.0644, over 1623417.24 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:59,465 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.613e+02 3.061e+02 4.120e+02 1.100e+03, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 01:14:16,476 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:14:29,725 INFO [train.py:901] (0/4) Epoch 20, batch 6550, loss[loss=0.1876, simple_loss=0.2654, pruned_loss=0.05489, over 7967.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2916, pruned_loss=0.06464, over 1618803.80 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:14:53,090 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 01:15:05,556 INFO [train.py:901] (0/4) Epoch 20, batch 6600, loss[loss=0.1762, simple_loss=0.2604, pruned_loss=0.04598, over 7546.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2918, pruned_loss=0.06465, over 1619100.50 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:10,792 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.488e+02 3.067e+02 3.982e+02 8.719e+02, threshold=6.134e+02, percent-clipped=3.0 +2023-02-07 01:15:12,123 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 01:15:33,421 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160217.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:15:39,336 INFO [train.py:901] (0/4) Epoch 20, batch 6650, loss[loss=0.2072, simple_loss=0.2783, pruned_loss=0.06802, over 7554.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06465, over 1619217.39 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:56,480 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5628, 1.4632, 2.3647, 1.3305, 2.2316, 2.5551, 2.7008, 2.1466], + device='cuda:0'), covar=tensor([0.1026, 0.1313, 0.0465, 0.1975, 0.0722, 0.0359, 0.0652, 0.0648], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0321, 0.0287, 0.0315, 0.0306, 0.0263, 0.0412, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:16:12,500 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:16:15,124 INFO [train.py:901] (0/4) Epoch 20, batch 6700, loss[loss=0.1949, simple_loss=0.271, pruned_loss=0.05938, over 7709.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2907, pruned_loss=0.0644, over 1619482.26 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:20,502 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.299e+02 2.819e+02 3.357e+02 8.975e+02, threshold=5.638e+02, percent-clipped=4.0 +2023-02-07 01:16:48,908 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160325.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:16:49,348 INFO [train.py:901] (0/4) Epoch 20, batch 6750, loss[loss=0.29, simple_loss=0.3559, pruned_loss=0.1121, over 8762.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2918, pruned_loss=0.06496, over 1619650.64 frames. ], batch size: 30, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:53,598 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160332.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:16,326 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:23,986 INFO [train.py:901] (0/4) Epoch 20, batch 6800, loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05917, over 7922.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.06537, over 1621252.12 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:17:28,099 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 01:17:29,320 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.510e+02 3.096e+02 3.947e+02 9.727e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 01:17:30,379 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:17:31,580 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:33,645 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160389.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:57,296 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5649, 1.5727, 4.7323, 1.8841, 4.2556, 3.9454, 4.3226, 4.2006], + device='cuda:0'), covar=tensor([0.0555, 0.4540, 0.0608, 0.4060, 0.1125, 0.1015, 0.0603, 0.0655], + device='cuda:0'), in_proj_covar=tensor([0.0612, 0.0629, 0.0681, 0.0613, 0.0694, 0.0595, 0.0596, 0.0667], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:17:59,211 INFO [train.py:901] (0/4) Epoch 20, batch 6850, loss[loss=0.1831, simple_loss=0.2744, pruned_loss=0.04587, over 8080.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2915, pruned_loss=0.06463, over 1623665.12 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:00,886 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 01:18:19,437 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 01:18:34,204 INFO [train.py:901] (0/4) Epoch 20, batch 6900, loss[loss=0.1877, simple_loss=0.2673, pruned_loss=0.05408, over 7921.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.0642, over 1620250.68 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:39,564 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.333e+02 2.912e+02 3.495e+02 9.213e+02, threshold=5.824e+02, percent-clipped=3.0 +2023-02-07 01:18:43,804 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1634, 1.3876, 1.6831, 1.2537, 0.6869, 1.3980, 1.1621, 1.0422], + device='cuda:0'), covar=tensor([0.0600, 0.1232, 0.1654, 0.1478, 0.0564, 0.1514, 0.0721, 0.0727], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0112, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 01:19:08,572 INFO [train.py:901] (0/4) Epoch 20, batch 6950, loss[loss=0.2248, simple_loss=0.3038, pruned_loss=0.07295, over 8243.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2893, pruned_loss=0.06333, over 1615602.68 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:19:15,013 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8020, 1.6641, 2.4538, 1.6845, 1.2099, 2.4550, 0.5960, 1.5139], + device='cuda:0'), covar=tensor([0.1639, 0.1461, 0.0386, 0.1380, 0.3120, 0.0356, 0.2349, 0.1425], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0195, 0.0126, 0.0222, 0.0271, 0.0134, 0.0169, 0.0189], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 01:19:30,209 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 01:19:31,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2803, 2.1672, 1.6165, 1.8475, 1.8073, 1.3847, 1.7060, 1.6657], + device='cuda:0'), covar=tensor([0.1285, 0.0404, 0.1378, 0.0588, 0.0727, 0.1627, 0.0907, 0.0878], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0238, 0.0335, 0.0311, 0.0303, 0.0340, 0.0347, 0.0319], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 01:19:42,959 INFO [train.py:901] (0/4) Epoch 20, batch 7000, loss[loss=0.2149, simple_loss=0.2979, pruned_loss=0.06594, over 8294.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2879, pruned_loss=0.06261, over 1613729.30 frames. ], batch size: 23, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:19:48,348 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.497e+02 2.987e+02 3.377e+02 5.985e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-07 01:19:49,224 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6850, 2.3619, 3.2465, 2.5153, 3.1532, 2.5954, 2.3963, 1.8413], + device='cuda:0'), covar=tensor([0.5020, 0.4925, 0.1955, 0.4089, 0.2646, 0.2849, 0.1799, 0.5856], + device='cuda:0'), in_proj_covar=tensor([0.0927, 0.0966, 0.0792, 0.0929, 0.0982, 0.0879, 0.0738, 0.0817], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 01:19:52,061 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:09,528 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160613.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:18,199 INFO [train.py:901] (0/4) Epoch 20, batch 7050, loss[loss=0.2211, simple_loss=0.3112, pruned_loss=0.06548, over 8243.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2904, pruned_loss=0.06372, over 1617263.28 frames. ], batch size: 49, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:24,697 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 01:20:30,598 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:48,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160668.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:49,341 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160669.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:20:53,998 INFO [train.py:901] (0/4) Epoch 20, batch 7100, loss[loss=0.21, simple_loss=0.2969, pruned_loss=0.06159, over 8322.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2903, pruned_loss=0.06364, over 1614070.42 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:59,625 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.520e+02 2.814e+02 3.523e+02 7.232e+02, threshold=5.628e+02, percent-clipped=2.0 +2023-02-07 01:21:02,177 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:21:29,426 INFO [train.py:901] (0/4) Epoch 20, batch 7150, loss[loss=0.2126, simple_loss=0.2973, pruned_loss=0.06396, over 8329.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2898, pruned_loss=0.06378, over 1610516.24 frames. ], batch size: 26, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:21:30,767 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8706, 6.1014, 5.1561, 2.4077, 5.3611, 5.6340, 5.4437, 5.4792], + device='cuda:0'), covar=tensor([0.0492, 0.0354, 0.1001, 0.4285, 0.0694, 0.0725, 0.1092, 0.0580], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0432, 0.0435, 0.0536, 0.0424, 0.0440, 0.0423, 0.0381], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:21:49,560 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 01:22:04,613 INFO [train.py:901] (0/4) Epoch 20, batch 7200, loss[loss=0.1802, simple_loss=0.27, pruned_loss=0.04525, over 7825.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2893, pruned_loss=0.06333, over 1611516.42 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:22:09,775 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.432e+02 3.066e+02 3.972e+02 8.502e+02, threshold=6.132e+02, percent-clipped=3.0 +2023-02-07 01:22:09,967 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160784.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:22:18,952 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 01:22:22,738 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0580, 1.6024, 1.3117, 1.5184, 1.3003, 1.1311, 1.3693, 1.3131], + device='cuda:0'), covar=tensor([0.1120, 0.0487, 0.1403, 0.0548, 0.0823, 0.1719, 0.0830, 0.0805], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0235, 0.0332, 0.0306, 0.0301, 0.0336, 0.0344, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 01:22:39,218 INFO [train.py:901] (0/4) Epoch 20, batch 7250, loss[loss=0.217, simple_loss=0.2969, pruned_loss=0.06855, over 8609.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2907, pruned_loss=0.06433, over 1611720.55 frames. ], batch size: 34, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:13,929 INFO [train.py:901] (0/4) Epoch 20, batch 7300, loss[loss=0.2301, simple_loss=0.3097, pruned_loss=0.0752, over 8631.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.291, pruned_loss=0.06396, over 1612565.22 frames. ], batch size: 31, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:19,316 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.519e+02 2.885e+02 3.982e+02 8.183e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 01:23:28,358 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5551, 1.8334, 1.9257, 1.2307, 1.9888, 1.5320, 0.4706, 1.8061], + device='cuda:0'), covar=tensor([0.0537, 0.0357, 0.0266, 0.0516, 0.0413, 0.0869, 0.0875, 0.0254], + device='cuda:0'), in_proj_covar=tensor([0.0450, 0.0384, 0.0339, 0.0440, 0.0369, 0.0534, 0.0392, 0.0412], + device='cuda:0'), out_proj_covar=tensor([1.2107e-04, 1.0064e-04, 8.9304e-05, 1.1617e-04, 9.7327e-05, 1.5183e-04, + 1.0604e-04, 1.0947e-04], device='cuda:0') +2023-02-07 01:23:44,246 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160919.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:23:48,785 INFO [train.py:901] (0/4) Epoch 20, batch 7350, loss[loss=0.1915, simple_loss=0.2638, pruned_loss=0.0596, over 7714.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.29, pruned_loss=0.06313, over 1610222.41 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:51,530 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2951, 1.3392, 3.4204, 1.0830, 3.0297, 2.9007, 3.1423, 3.0549], + device='cuda:0'), covar=tensor([0.0752, 0.3873, 0.0778, 0.3873, 0.1397, 0.1061, 0.0728, 0.0841], + device='cuda:0'), in_proj_covar=tensor([0.0612, 0.0630, 0.0679, 0.0615, 0.0696, 0.0594, 0.0595, 0.0665], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:24:16,155 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 01:24:20,000 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 01:24:24,317 INFO [train.py:901] (0/4) Epoch 20, batch 7400, loss[loss=0.179, simple_loss=0.2622, pruned_loss=0.04792, over 8029.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06299, over 1606285.06 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:24:29,908 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:24:30,422 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.344e+02 3.002e+02 3.673e+02 6.079e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-07 01:24:37,302 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 01:24:59,976 INFO [train.py:901] (0/4) Epoch 20, batch 7450, loss[loss=0.2306, simple_loss=0.3118, pruned_loss=0.07469, over 8462.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2892, pruned_loss=0.0629, over 1609860.59 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:10,065 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161040.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:16,122 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 01:25:28,612 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161065.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:35,939 INFO [train.py:901] (0/4) Epoch 20, batch 7500, loss[loss=0.2076, simple_loss=0.2898, pruned_loss=0.06272, over 8476.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2888, pruned_loss=0.0632, over 1604224.88 frames. ], batch size: 28, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:41,424 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.441e+02 3.010e+02 3.756e+02 8.900e+02, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:26:11,146 INFO [train.py:901] (0/4) Epoch 20, batch 7550, loss[loss=0.1926, simple_loss=0.262, pruned_loss=0.0616, over 7539.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2907, pruned_loss=0.06448, over 1605114.18 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:46,339 INFO [train.py:901] (0/4) Epoch 20, batch 7600, loss[loss=0.1954, simple_loss=0.2808, pruned_loss=0.05505, over 8503.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2908, pruned_loss=0.06425, over 1612238.56 frames. ], batch size: 39, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:51,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.460e+02 3.037e+02 4.113e+02 9.859e+02, threshold=6.074e+02, percent-clipped=9.0 +2023-02-07 01:27:20,287 INFO [train.py:901] (0/4) Epoch 20, batch 7650, loss[loss=0.2021, simple_loss=0.2917, pruned_loss=0.05623, over 8475.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.06449, over 1608762.78 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:27:25,753 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:36,274 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:45,578 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:54,268 INFO [train.py:901] (0/4) Epoch 20, batch 7700, loss[loss=0.1665, simple_loss=0.2448, pruned_loss=0.04407, over 7549.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2904, pruned_loss=0.06464, over 1609545.08 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:27:59,447 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.411e+02 2.987e+02 3.572e+02 6.786e+02, threshold=5.975e+02, percent-clipped=3.0 +2023-02-07 01:28:19,966 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-07 01:28:25,746 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 01:28:29,787 INFO [train.py:901] (0/4) Epoch 20, batch 7750, loss[loss=0.2002, simple_loss=0.2849, pruned_loss=0.05778, over 8783.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2913, pruned_loss=0.06508, over 1614102.30 frames. ], batch size: 40, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:28:30,570 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:05,270 INFO [train.py:901] (0/4) Epoch 20, batch 7800, loss[loss=0.2119, simple_loss=0.289, pruned_loss=0.06736, over 8081.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.06457, over 1616106.24 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:29:06,850 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:10,618 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.429e+02 2.909e+02 3.732e+02 6.331e+02, threshold=5.818e+02, percent-clipped=2.0 +2023-02-07 01:29:34,443 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:39,043 INFO [train.py:901] (0/4) Epoch 20, batch 7850, loss[loss=0.1941, simple_loss=0.2771, pruned_loss=0.0556, over 8346.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2915, pruned_loss=0.06477, over 1623275.53 frames. ], batch size: 24, lr: 3.74e-03, grad_scale: 16.0 +2023-02-07 01:29:49,624 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:12,437 INFO [train.py:901] (0/4) Epoch 20, batch 7900, loss[loss=0.1739, simple_loss=0.2637, pruned_loss=0.04207, over 8089.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2921, pruned_loss=0.0651, over 1622950.67 frames. ], batch size: 21, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:13,747 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:18,876 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 2.352e+02 2.923e+02 4.060e+02 8.940e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 01:30:30,598 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8941, 2.2027, 1.6770, 2.8072, 1.4049, 1.5846, 2.0799, 2.2162], + device='cuda:0'), covar=tensor([0.0990, 0.0801, 0.1199, 0.0415, 0.1176, 0.1507, 0.0996, 0.0958], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0197, 0.0247, 0.0213, 0.0205, 0.0247, 0.0252, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 01:30:45,508 INFO [train.py:901] (0/4) Epoch 20, batch 7950, loss[loss=0.1969, simple_loss=0.2753, pruned_loss=0.05924, over 7935.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06474, over 1619528.43 frames. ], batch size: 20, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:59,396 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1091, 1.8469, 2.3932, 2.0274, 2.2437, 2.1503, 1.8902, 1.0847], + device='cuda:0'), covar=tensor([0.5022, 0.4170, 0.1684, 0.3229, 0.2250, 0.2703, 0.1786, 0.4504], + device='cuda:0'), in_proj_covar=tensor([0.0936, 0.0969, 0.0796, 0.0932, 0.0989, 0.0882, 0.0742, 0.0818], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 01:31:18,035 INFO [train.py:901] (0/4) Epoch 20, batch 8000, loss[loss=0.1878, simple_loss=0.2753, pruned_loss=0.05017, over 7933.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2912, pruned_loss=0.06441, over 1621492.65 frames. ], batch size: 20, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:19,437 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:23,852 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.449e+02 3.108e+02 3.740e+02 8.675e+02, threshold=6.215e+02, percent-clipped=6.0 +2023-02-07 01:31:29,203 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-07 01:31:29,404 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:51,325 INFO [train.py:901] (0/4) Epoch 20, batch 8050, loss[loss=0.1845, simple_loss=0.2674, pruned_loss=0.05082, over 7226.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2898, pruned_loss=0.06411, over 1609172.53 frames. ], batch size: 16, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:57,051 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:05,180 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7362, 1.7947, 2.0004, 1.7650, 1.1979, 1.8247, 2.4840, 2.1598], + device='cuda:0'), covar=tensor([0.0580, 0.1482, 0.1970, 0.1653, 0.0801, 0.1727, 0.0697, 0.0624], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0162, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 01:32:15,041 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-20.pt +2023-02-07 01:32:28,744 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 01:32:32,190 INFO [train.py:901] (0/4) Epoch 21, batch 0, loss[loss=0.177, simple_loss=0.2558, pruned_loss=0.04913, over 7419.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2558, pruned_loss=0.04913, over 7419.00 frames. ], batch size: 17, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:32:32,191 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 01:32:44,210 INFO [train.py:935] (0/4) Epoch 21, validation: loss=0.1763, simple_loss=0.2762, pruned_loss=0.03818, over 944034.00 frames. +2023-02-07 01:32:44,211 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 01:32:44,451 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:49,224 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1508, 4.1493, 3.6677, 1.9223, 3.6555, 3.7576, 3.7028, 3.6508], + device='cuda:0'), covar=tensor([0.0835, 0.0620, 0.1237, 0.4811, 0.1020, 0.1208, 0.1429, 0.0792], + device='cuda:0'), in_proj_covar=tensor([0.0511, 0.0429, 0.0427, 0.0526, 0.0419, 0.0430, 0.0413, 0.0375], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:32:59,362 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 01:33:02,224 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.415e+02 2.918e+02 3.924e+02 7.413e+02, threshold=5.835e+02, percent-clipped=4.0 +2023-02-07 01:33:07,287 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8266, 1.9581, 2.1637, 1.4890, 2.2855, 1.6728, 0.6911, 1.9517], + device='cuda:0'), covar=tensor([0.0483, 0.0361, 0.0286, 0.0467, 0.0392, 0.0715, 0.0847, 0.0266], + device='cuda:0'), in_proj_covar=tensor([0.0451, 0.0384, 0.0336, 0.0437, 0.0368, 0.0531, 0.0391, 0.0412], + device='cuda:0'), out_proj_covar=tensor([1.2127e-04, 1.0075e-04, 8.8586e-05, 1.1548e-04, 9.7045e-05, 1.5098e-04, + 1.0579e-04, 1.0949e-04], device='cuda:0') +2023-02-07 01:33:07,960 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:11,598 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:18,569 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:19,110 INFO [train.py:901] (0/4) Epoch 21, batch 50, loss[loss=0.1948, simple_loss=0.2782, pruned_loss=0.0557, over 8597.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2873, pruned_loss=0.06301, over 363305.31 frames. ], batch size: 39, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:29,248 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:32,459 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 01:33:56,015 INFO [train.py:901] (0/4) Epoch 21, batch 100, loss[loss=0.2198, simple_loss=0.2856, pruned_loss=0.07698, over 7799.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2881, pruned_loss=0.06378, over 640398.77 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:57,243 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 01:33:58,643 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:14,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.511e+02 2.964e+02 4.065e+02 7.207e+02, threshold=5.927e+02, percent-clipped=4.0 +2023-02-07 01:34:16,981 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1498, 1.8107, 3.4565, 1.4430, 2.4109, 3.8498, 3.9947, 3.2840], + device='cuda:0'), covar=tensor([0.1141, 0.1619, 0.0347, 0.2319, 0.1098, 0.0223, 0.0499, 0.0568], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0320, 0.0286, 0.0315, 0.0306, 0.0261, 0.0410, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:34:17,016 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1845, 2.2927, 1.8386, 2.8948, 1.2947, 1.7748, 2.1881, 2.2931], + device='cuda:0'), covar=tensor([0.0689, 0.0786, 0.0903, 0.0349, 0.1172, 0.1256, 0.0862, 0.0809], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0210, 0.0203, 0.0244, 0.0248, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 01:34:30,762 INFO [train.py:901] (0/4) Epoch 21, batch 150, loss[loss=0.1518, simple_loss=0.2316, pruned_loss=0.03602, over 7192.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2911, pruned_loss=0.06514, over 859232.96 frames. ], batch size: 16, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:34:33,322 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.53 vs. limit=5.0 +2023-02-07 01:34:39,729 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:47,239 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:06,331 INFO [train.py:901] (0/4) Epoch 21, batch 200, loss[loss=0.1657, simple_loss=0.2525, pruned_loss=0.03941, over 7981.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2906, pruned_loss=0.06437, over 1025408.21 frames. ], batch size: 21, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:15,833 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8927, 2.2807, 4.2669, 1.4844, 2.9040, 2.2941, 1.9071, 2.6774], + device='cuda:0'), covar=tensor([0.1755, 0.2502, 0.0797, 0.4569, 0.1835, 0.3248, 0.2145, 0.2650], + device='cuda:0'), in_proj_covar=tensor([0.0523, 0.0594, 0.0551, 0.0636, 0.0642, 0.0594, 0.0530, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:35:19,142 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:23,716 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.494e+02 2.791e+02 3.613e+02 7.338e+02, threshold=5.582e+02, percent-clipped=1.0 +2023-02-07 01:35:41,064 INFO [train.py:901] (0/4) Epoch 21, batch 250, loss[loss=0.2184, simple_loss=0.2979, pruned_loss=0.06941, over 8464.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2913, pruned_loss=0.06367, over 1162237.28 frames. ], batch size: 27, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:47,944 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 01:35:55,197 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8981, 1.8127, 2.9295, 2.1766, 2.5785, 1.8915, 1.6530, 1.2403], + device='cuda:0'), covar=tensor([0.6912, 0.6071, 0.1867, 0.4197, 0.3032, 0.4297, 0.3028, 0.5823], + device='cuda:0'), in_proj_covar=tensor([0.0933, 0.0967, 0.0794, 0.0929, 0.0983, 0.0879, 0.0740, 0.0815], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 01:35:57,078 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 01:36:00,661 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161937.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:08,767 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:15,257 INFO [train.py:901] (0/4) Epoch 21, batch 300, loss[loss=0.2751, simple_loss=0.3262, pruned_loss=0.112, over 7789.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2924, pruned_loss=0.06409, over 1265785.41 frames. ], batch size: 19, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:19,008 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:26,612 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:33,765 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.839e+02 3.558e+02 8.067e+02, threshold=5.678e+02, percent-clipped=5.0 +2023-02-07 01:36:36,643 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:44,138 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-162000.pt +2023-02-07 01:36:51,876 INFO [train.py:901] (0/4) Epoch 21, batch 350, loss[loss=0.265, simple_loss=0.3373, pruned_loss=0.09638, over 8497.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2899, pruned_loss=0.0633, over 1337573.50 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:37:14,170 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 01:37:25,797 INFO [train.py:901] (0/4) Epoch 21, batch 400, loss[loss=0.2358, simple_loss=0.3216, pruned_loss=0.07502, over 8461.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2908, pruned_loss=0.06355, over 1404658.83 frames. ], batch size: 25, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:37:29,049 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 01:37:44,477 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.323e+02 2.796e+02 3.394e+02 5.024e+02, threshold=5.592e+02, percent-clipped=0.0 +2023-02-07 01:37:52,938 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:02,161 INFO [train.py:901] (0/4) Epoch 21, batch 450, loss[loss=0.1799, simple_loss=0.2508, pruned_loss=0.05456, over 7786.00 frames. ], tot_loss[loss=0.208, simple_loss=0.29, pruned_loss=0.06297, over 1447776.20 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:20,138 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:27,155 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6934, 2.2046, 4.1733, 1.4778, 2.9503, 2.1943, 1.7407, 3.0093], + device='cuda:0'), covar=tensor([0.1803, 0.2654, 0.0733, 0.4373, 0.1774, 0.3181, 0.2216, 0.2203], + device='cuda:0'), in_proj_covar=tensor([0.0521, 0.0593, 0.0550, 0.0633, 0.0640, 0.0592, 0.0529, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:38:31,233 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1585, 1.3787, 4.2995, 1.6706, 3.8394, 3.5624, 3.9322, 3.8124], + device='cuda:0'), covar=tensor([0.0602, 0.4837, 0.0568, 0.4097, 0.1080, 0.1002, 0.0600, 0.0701], + device='cuda:0'), in_proj_covar=tensor([0.0615, 0.0632, 0.0683, 0.0614, 0.0697, 0.0597, 0.0598, 0.0664], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:38:37,320 INFO [train.py:901] (0/4) Epoch 21, batch 500, loss[loss=0.1652, simple_loss=0.2425, pruned_loss=0.04394, over 7786.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2877, pruned_loss=0.06194, over 1481076.53 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:37,537 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:50,071 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:55,592 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.501e+02 2.975e+02 3.750e+02 9.376e+02, threshold=5.950e+02, percent-clipped=8.0 +2023-02-07 01:38:58,683 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3685, 1.5865, 2.0906, 1.2660, 1.5203, 1.6216, 1.4666, 1.5431], + device='cuda:0'), covar=tensor([0.2232, 0.3040, 0.1065, 0.5253, 0.2206, 0.3869, 0.2712, 0.2306], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0593, 0.0550, 0.0632, 0.0640, 0.0592, 0.0530, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:39:01,456 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162193.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:06,995 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0599, 1.7688, 3.5776, 1.6344, 2.5395, 3.9739, 4.0880, 3.3735], + device='cuda:0'), covar=tensor([0.1211, 0.1584, 0.0299, 0.2052, 0.0958, 0.0217, 0.0513, 0.0521], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0319, 0.0286, 0.0312, 0.0306, 0.0261, 0.0409, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:39:13,737 INFO [train.py:901] (0/4) Epoch 21, batch 550, loss[loss=0.1959, simple_loss=0.2672, pruned_loss=0.06227, over 7270.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.06203, over 1511725.59 frames. ], batch size: 16, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:20,121 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:42,252 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:48,770 INFO [train.py:901] (0/4) Epoch 21, batch 600, loss[loss=0.1895, simple_loss=0.2761, pruned_loss=0.05144, over 7928.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2891, pruned_loss=0.06247, over 1540074.30 frames. ], batch size: 20, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:02,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 01:40:06,586 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.365e+02 2.932e+02 3.412e+02 7.385e+02, threshold=5.863e+02, percent-clipped=2.0 +2023-02-07 01:40:11,405 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:40:22,965 INFO [train.py:901] (0/4) Epoch 21, batch 650, loss[loss=0.1869, simple_loss=0.2714, pruned_loss=0.05118, over 8242.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2879, pruned_loss=0.06178, over 1560615.35 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:43,636 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7565, 2.4386, 3.8577, 1.5126, 2.7360, 1.9137, 2.0914, 2.2959], + device='cuda:0'), covar=tensor([0.2024, 0.2146, 0.1070, 0.4423, 0.1857, 0.3627, 0.2122, 0.3077], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0592, 0.0548, 0.0631, 0.0640, 0.0590, 0.0529, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:40:49,740 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9613, 1.9216, 2.1544, 1.9589, 1.0999, 1.8613, 2.2374, 2.2543], + device='cuda:0'), covar=tensor([0.0434, 0.1155, 0.1534, 0.1259, 0.0577, 0.1331, 0.0607, 0.0561], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0098, 0.0161, 0.0111, 0.0141], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 01:40:59,237 INFO [train.py:901] (0/4) Epoch 21, batch 700, loss[loss=0.2457, simple_loss=0.3196, pruned_loss=0.08588, over 6943.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2887, pruned_loss=0.06273, over 1569213.78 frames. ], batch size: 71, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:17,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.443e+02 3.111e+02 4.032e+02 8.821e+02, threshold=6.222e+02, percent-clipped=5.0 +2023-02-07 01:41:34,557 INFO [train.py:901] (0/4) Epoch 21, batch 750, loss[loss=0.1737, simple_loss=0.2573, pruned_loss=0.04508, over 8335.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2883, pruned_loss=0.06249, over 1578835.73 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:40,441 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:41:45,475 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 01:41:54,302 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 01:41:56,361 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:42:01,039 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 01:42:11,048 INFO [train.py:901] (0/4) Epoch 21, batch 800, loss[loss=0.1954, simple_loss=0.2721, pruned_loss=0.05937, over 7693.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2894, pruned_loss=0.06312, over 1588539.26 frames. ], batch size: 18, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:42:29,940 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.455e+02 2.861e+02 3.570e+02 7.084e+02, threshold=5.721e+02, percent-clipped=3.0 +2023-02-07 01:42:47,168 INFO [train.py:901] (0/4) Epoch 21, batch 850, loss[loss=0.2138, simple_loss=0.2893, pruned_loss=0.06914, over 8135.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2897, pruned_loss=0.06336, over 1595142.00 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:01,466 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:07,185 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4217, 2.2449, 2.9842, 2.4545, 2.9094, 2.3750, 2.2545, 1.9082], + device='cuda:0'), covar=tensor([0.4678, 0.4549, 0.1607, 0.3308, 0.2222, 0.2807, 0.1732, 0.4696], + device='cuda:0'), in_proj_covar=tensor([0.0936, 0.0967, 0.0791, 0.0929, 0.0983, 0.0878, 0.0738, 0.0815], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 01:43:16,271 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:21,104 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:24,453 INFO [train.py:901] (0/4) Epoch 21, batch 900, loss[loss=0.2216, simple_loss=0.2971, pruned_loss=0.07305, over 8524.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2903, pruned_loss=0.06343, over 1601071.79 frames. ], batch size: 39, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:34,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:42,638 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.319e+02 2.838e+02 3.637e+02 1.203e+03, threshold=5.677e+02, percent-clipped=5.0 +2023-02-07 01:43:49,232 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:44:00,578 INFO [train.py:901] (0/4) Epoch 21, batch 950, loss[loss=0.2233, simple_loss=0.3101, pruned_loss=0.06828, over 8196.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2903, pruned_loss=0.06359, over 1605598.33 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:14,212 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 01:44:35,848 INFO [train.py:901] (0/4) Epoch 21, batch 1000, loss[loss=0.221, simple_loss=0.3012, pruned_loss=0.07038, over 8445.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2898, pruned_loss=0.06323, over 1611241.04 frames. ], batch size: 27, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:40,150 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9601, 1.5942, 3.2661, 1.4781, 2.2857, 3.5925, 3.6988, 3.0520], + device='cuda:0'), covar=tensor([0.1170, 0.1711, 0.0371, 0.2166, 0.1166, 0.0241, 0.0539, 0.0552], + device='cuda:0'), in_proj_covar=tensor([0.0292, 0.0319, 0.0286, 0.0311, 0.0307, 0.0261, 0.0410, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 01:44:48,948 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 01:44:55,205 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.438e+02 2.954e+02 4.014e+02 9.557e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 01:45:01,388 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 01:45:11,681 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:45:12,206 INFO [train.py:901] (0/4) Epoch 21, batch 1050, loss[loss=0.1857, simple_loss=0.2802, pruned_loss=0.04556, over 8191.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2909, pruned_loss=0.06345, over 1615925.10 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:45:46,485 INFO [train.py:901] (0/4) Epoch 21, batch 1100, loss[loss=0.2058, simple_loss=0.2961, pruned_loss=0.05774, over 8293.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2902, pruned_loss=0.06328, over 1617550.53 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:06,016 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.501e+02 3.059e+02 3.494e+02 1.150e+03, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 01:46:14,528 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 01:46:23,792 INFO [train.py:901] (0/4) Epoch 21, batch 1150, loss[loss=0.2169, simple_loss=0.3015, pruned_loss=0.06619, over 8246.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2893, pruned_loss=0.06266, over 1618264.61 frames. ], batch size: 24, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:24,671 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:29,073 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:46:29,779 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:46:42,971 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:50,055 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162845.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:59,722 INFO [train.py:901] (0/4) Epoch 21, batch 1200, loss[loss=0.193, simple_loss=0.2857, pruned_loss=0.0501, over 8512.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2895, pruned_loss=0.0631, over 1615764.64 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:47:09,523 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:47:17,449 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.368e+02 3.051e+02 3.779e+02 6.869e+02, threshold=6.103e+02, percent-clipped=3.0 +2023-02-07 01:47:36,405 INFO [train.py:901] (0/4) Epoch 21, batch 1250, loss[loss=0.2181, simple_loss=0.3165, pruned_loss=0.05987, over 8474.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2894, pruned_loss=0.0632, over 1614908.26 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:11,284 INFO [train.py:901] (0/4) Epoch 21, batch 1300, loss[loss=0.2255, simple_loss=0.3082, pruned_loss=0.07138, over 8460.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2896, pruned_loss=0.06359, over 1616986.37 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:14,770 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:27,750 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 01:48:28,481 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.260e+02 2.727e+02 3.317e+02 5.773e+02, threshold=5.453e+02, percent-clipped=0.0 +2023-02-07 01:48:29,598 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.99 vs. limit=5.0 +2023-02-07 01:48:30,724 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:31,445 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:44,803 INFO [train.py:901] (0/4) Epoch 21, batch 1350, loss[loss=0.1649, simple_loss=0.2436, pruned_loss=0.04305, over 7444.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2895, pruned_loss=0.06312, over 1620186.09 frames. ], batch size: 17, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:48:45,179 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 01:49:09,961 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.08 vs. limit=2.0 +2023-02-07 01:49:21,737 INFO [train.py:901] (0/4) Epoch 21, batch 1400, loss[loss=0.2138, simple_loss=0.2943, pruned_loss=0.0667, over 8459.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2895, pruned_loss=0.06372, over 1619478.55 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:49:34,632 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 01:49:38,380 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.22 vs. limit=5.0 +2023-02-07 01:49:39,401 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.469e+02 3.010e+02 4.050e+02 1.060e+03, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:49:46,311 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 01:49:55,422 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:49:55,932 INFO [train.py:901] (0/4) Epoch 21, batch 1450, loss[loss=0.2324, simple_loss=0.3182, pruned_loss=0.07332, over 8101.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2909, pruned_loss=0.06345, over 1620184.11 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:31,010 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1930, 1.0707, 1.3027, 1.0111, 0.9162, 1.3347, 0.1034, 0.9118], + device='cuda:0'), covar=tensor([0.1607, 0.1354, 0.0466, 0.0838, 0.2815, 0.0533, 0.2101, 0.1223], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0192, 0.0125, 0.0219, 0.0269, 0.0132, 0.0167, 0.0188], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 01:50:32,151 INFO [train.py:901] (0/4) Epoch 21, batch 1500, loss[loss=0.2032, simple_loss=0.285, pruned_loss=0.06074, over 7977.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2903, pruned_loss=0.06304, over 1616481.73 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:35,020 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5363, 2.0111, 2.1706, 1.2217, 2.3254, 1.5039, 0.7303, 1.7454], + device='cuda:0'), covar=tensor([0.0691, 0.0393, 0.0271, 0.0689, 0.0373, 0.0898, 0.1002, 0.0402], + device='cuda:0'), in_proj_covar=tensor([0.0450, 0.0389, 0.0337, 0.0440, 0.0371, 0.0534, 0.0389, 0.0418], + device='cuda:0'), out_proj_covar=tensor([1.2093e-04, 1.0215e-04, 8.8828e-05, 1.1620e-04, 9.7726e-05, 1.5150e-04, + 1.0536e-04, 1.1126e-04], device='cuda:0') +2023-02-07 01:50:50,507 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.250e+02 2.722e+02 3.392e+02 6.898e+02, threshold=5.444e+02, percent-clipped=4.0 +2023-02-07 01:50:53,285 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163189.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:05,627 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2277, 2.2681, 1.9982, 2.9665, 1.2895, 1.7528, 2.0226, 2.2305], + device='cuda:0'), covar=tensor([0.0646, 0.0785, 0.0869, 0.0337, 0.1151, 0.1231, 0.0939, 0.0802], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0205, 0.0247, 0.0250, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 01:51:06,787 INFO [train.py:901] (0/4) Epoch 21, batch 1550, loss[loss=0.2376, simple_loss=0.3174, pruned_loss=0.07891, over 8507.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2897, pruned_loss=0.06259, over 1622478.35 frames. ], batch size: 28, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:31,313 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,280 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163258.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,818 INFO [train.py:901] (0/4) Epoch 21, batch 1600, loss[loss=0.2007, simple_loss=0.2836, pruned_loss=0.05889, over 8241.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2885, pruned_loss=0.06178, over 1622638.94 frames. ], batch size: 22, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:47,156 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6472, 1.3287, 1.6250, 1.2154, 0.8029, 1.4094, 1.4213, 1.4465], + device='cuda:0'), covar=tensor([0.0597, 0.1394, 0.1844, 0.1631, 0.0655, 0.1581, 0.0759, 0.0676], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0162, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 01:51:50,591 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163269.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:00,873 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.380e+02 3.009e+02 4.081e+02 9.131e+02, threshold=6.018e+02, percent-clipped=6.0 +2023-02-07 01:52:14,552 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:17,721 INFO [train.py:901] (0/4) Epoch 21, batch 1650, loss[loss=0.1733, simple_loss=0.2431, pruned_loss=0.05172, over 7239.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.06218, over 1619803.64 frames. ], batch size: 16, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:52:51,344 INFO [train.py:901] (0/4) Epoch 21, batch 1700, loss[loss=0.1788, simple_loss=0.2484, pruned_loss=0.05461, over 7688.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2892, pruned_loss=0.06252, over 1620149.30 frames. ], batch size: 18, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:09,964 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.344e+02 2.897e+02 3.678e+02 1.033e+03, threshold=5.793e+02, percent-clipped=5.0 +2023-02-07 01:53:27,421 INFO [train.py:901] (0/4) Epoch 21, batch 1750, loss[loss=0.2108, simple_loss=0.2889, pruned_loss=0.06636, over 8249.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2892, pruned_loss=0.06278, over 1619286.99 frames. ], batch size: 24, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:51,710 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4411, 2.0654, 2.1097, 1.9636, 1.4526, 1.9924, 2.2809, 2.0738], + device='cuda:0'), covar=tensor([0.0480, 0.0891, 0.1296, 0.1156, 0.0586, 0.1122, 0.0617, 0.0511], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0098, 0.0161, 0.0112, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 01:53:56,328 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:54:01,060 INFO [train.py:901] (0/4) Epoch 21, batch 1800, loss[loss=0.193, simple_loss=0.2747, pruned_loss=0.05563, over 7964.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2894, pruned_loss=0.06283, over 1618759.90 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:18,722 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.661e+02 3.025e+02 4.067e+02 7.408e+02, threshold=6.049e+02, percent-clipped=6.0 +2023-02-07 01:54:36,585 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-07 01:54:37,343 INFO [train.py:901] (0/4) Epoch 21, batch 1850, loss[loss=0.207, simple_loss=0.2846, pruned_loss=0.0647, over 8197.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06354, over 1619720.67 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:11,694 INFO [train.py:901] (0/4) Epoch 21, batch 1900, loss[loss=0.2306, simple_loss=0.3069, pruned_loss=0.07714, over 6711.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2899, pruned_loss=0.0632, over 1621232.22 frames. ], batch size: 71, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:12,595 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:17,223 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:26,427 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 01:55:29,002 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.410e+02 2.798e+02 3.588e+02 7.290e+02, threshold=5.595e+02, percent-clipped=1.0 +2023-02-07 01:55:29,214 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:37,692 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 01:55:40,606 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163602.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:43,574 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 01:55:45,148 INFO [train.py:901] (0/4) Epoch 21, batch 1950, loss[loss=0.1957, simple_loss=0.2859, pruned_loss=0.05276, over 8513.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2895, pruned_loss=0.0631, over 1621240.55 frames. ], batch size: 28, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:58,504 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 01:56:01,711 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 01:56:21,642 INFO [train.py:901] (0/4) Epoch 21, batch 2000, loss[loss=0.2489, simple_loss=0.3186, pruned_loss=0.08963, over 8586.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2893, pruned_loss=0.06326, over 1618995.04 frames. ], batch size: 34, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:56:29,872 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6433, 1.8943, 2.0215, 1.4422, 2.1465, 1.4989, 0.5110, 1.8845], + device='cuda:0'), covar=tensor([0.0498, 0.0321, 0.0262, 0.0499, 0.0336, 0.0810, 0.0805, 0.0259], + device='cuda:0'), in_proj_covar=tensor([0.0446, 0.0384, 0.0336, 0.0438, 0.0367, 0.0528, 0.0387, 0.0414], + device='cuda:0'), out_proj_covar=tensor([1.2002e-04, 1.0088e-04, 8.8661e-05, 1.1577e-04, 9.6821e-05, 1.4969e-04, + 1.0456e-04, 1.1004e-04], device='cuda:0') +2023-02-07 01:56:39,056 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.546e+02 3.013e+02 3.975e+02 6.874e+02, threshold=6.025e+02, percent-clipped=4.0 +2023-02-07 01:56:55,198 INFO [train.py:901] (0/4) Epoch 21, batch 2050, loss[loss=0.219, simple_loss=0.298, pruned_loss=0.06998, over 8034.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2893, pruned_loss=0.06301, over 1620900.30 frames. ], batch size: 22, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:00,599 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:57:05,691 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8313, 1.3092, 3.9729, 1.3982, 3.5388, 3.3220, 3.5794, 3.4808], + device='cuda:0'), covar=tensor([0.0655, 0.4594, 0.0696, 0.4149, 0.1179, 0.1080, 0.0662, 0.0750], + device='cuda:0'), in_proj_covar=tensor([0.0624, 0.0640, 0.0685, 0.0622, 0.0701, 0.0603, 0.0602, 0.0670], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:57:30,299 INFO [train.py:901] (0/4) Epoch 21, batch 2100, loss[loss=0.2189, simple_loss=0.3005, pruned_loss=0.06859, over 8345.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2892, pruned_loss=0.06307, over 1621871.13 frames. ], batch size: 26, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:43,622 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 01:57:48,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.575e+02 2.946e+02 3.630e+02 8.805e+02, threshold=5.893e+02, percent-clipped=3.0 +2023-02-07 01:58:04,870 INFO [train.py:901] (0/4) Epoch 21, batch 2150, loss[loss=0.2004, simple_loss=0.2766, pruned_loss=0.06209, over 7778.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2889, pruned_loss=0.06297, over 1620893.17 frames. ], batch size: 19, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:14,671 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:31,299 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:39,334 INFO [train.py:901] (0/4) Epoch 21, batch 2200, loss[loss=0.214, simple_loss=0.2897, pruned_loss=0.0691, over 6428.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2883, pruned_loss=0.06291, over 1610822.93 frames. ], batch size: 14, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:58,241 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.475e+02 2.987e+02 3.670e+02 7.762e+02, threshold=5.973e+02, percent-clipped=3.0 +2023-02-07 01:59:15,135 INFO [train.py:901] (0/4) Epoch 21, batch 2250, loss[loss=0.2319, simple_loss=0.3156, pruned_loss=0.07408, over 8609.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2884, pruned_loss=0.06298, over 1610751.62 frames. ], batch size: 34, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:21,982 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8058, 3.7682, 3.3842, 2.0549, 3.3128, 3.4134, 3.3458, 3.2687], + device='cuda:0'), covar=tensor([0.0934, 0.0722, 0.1177, 0.4741, 0.1047, 0.1288, 0.1480, 0.1026], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0433, 0.0435, 0.0538, 0.0426, 0.0441, 0.0421, 0.0382], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:59:30,190 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7591, 5.9716, 5.0557, 3.0491, 5.1816, 5.5991, 5.4676, 5.3551], + device='cuda:0'), covar=tensor([0.0601, 0.0396, 0.0980, 0.3826, 0.0846, 0.0775, 0.1014, 0.0743], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0433, 0.0435, 0.0538, 0.0427, 0.0441, 0.0421, 0.0383], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 01:59:33,084 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7624, 1.9775, 2.1138, 1.4773, 2.2729, 1.4981, 0.7053, 1.9508], + device='cuda:0'), covar=tensor([0.0599, 0.0318, 0.0274, 0.0550, 0.0359, 0.0858, 0.0904, 0.0290], + device='cuda:0'), in_proj_covar=tensor([0.0448, 0.0384, 0.0336, 0.0439, 0.0369, 0.0526, 0.0387, 0.0413], + device='cuda:0'), out_proj_covar=tensor([1.2041e-04, 1.0092e-04, 8.8536e-05, 1.1599e-04, 9.7212e-05, 1.4906e-04, + 1.0461e-04, 1.0979e-04], device='cuda:0') +2023-02-07 01:59:49,152 INFO [train.py:901] (0/4) Epoch 21, batch 2300, loss[loss=0.1882, simple_loss=0.2758, pruned_loss=0.05033, over 8587.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2886, pruned_loss=0.06281, over 1609578.44 frames. ], batch size: 31, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:58,901 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163973.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:08,071 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.361e+02 2.889e+02 3.736e+02 8.411e+02, threshold=5.778e+02, percent-clipped=4.0 +2023-02-07 02:00:17,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:19,119 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-164000.pt +2023-02-07 02:00:20,572 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 02:00:26,179 INFO [train.py:901] (0/4) Epoch 21, batch 2350, loss[loss=0.171, simple_loss=0.2488, pruned_loss=0.04656, over 7230.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2877, pruned_loss=0.06231, over 1610224.85 frames. ], batch size: 16, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:00:39,153 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 02:00:46,866 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0358, 1.2081, 1.1715, 0.8319, 1.1816, 1.0219, 0.1106, 1.1668], + device='cuda:0'), covar=tensor([0.0412, 0.0375, 0.0332, 0.0482, 0.0376, 0.0898, 0.0807, 0.0313], + device='cuda:0'), in_proj_covar=tensor([0.0447, 0.0383, 0.0334, 0.0437, 0.0368, 0.0525, 0.0386, 0.0412], + device='cuda:0'), out_proj_covar=tensor([1.2028e-04, 1.0044e-04, 8.8028e-05, 1.1556e-04, 9.7141e-05, 1.4874e-04, + 1.0434e-04, 1.0937e-04], device='cuda:0') +2023-02-07 02:01:01,226 INFO [train.py:901] (0/4) Epoch 21, batch 2400, loss[loss=0.2132, simple_loss=0.2903, pruned_loss=0.06806, over 7792.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2883, pruned_loss=0.06283, over 1609322.45 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:19,280 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 2.419e+02 2.926e+02 3.800e+02 6.132e+02, threshold=5.852e+02, percent-clipped=4.0 +2023-02-07 02:01:37,285 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:01:37,736 INFO [train.py:901] (0/4) Epoch 21, batch 2450, loss[loss=0.2083, simple_loss=0.287, pruned_loss=0.06479, over 8454.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2881, pruned_loss=0.06323, over 1603135.29 frames. ], batch size: 27, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:54,112 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:02:12,735 INFO [train.py:901] (0/4) Epoch 21, batch 2500, loss[loss=0.2045, simple_loss=0.286, pruned_loss=0.06147, over 8734.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.288, pruned_loss=0.06306, over 1603074.51 frames. ], batch size: 34, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:22,147 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:02:30,792 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.421e+02 3.174e+02 4.025e+02 1.090e+03, threshold=6.349e+02, percent-clipped=9.0 +2023-02-07 02:02:46,233 INFO [train.py:901] (0/4) Epoch 21, batch 2550, loss[loss=0.1831, simple_loss=0.2756, pruned_loss=0.04531, over 8195.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2892, pruned_loss=0.06346, over 1611533.05 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:54,990 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9393, 2.0198, 1.7445, 2.5432, 1.0942, 1.5543, 1.8170, 1.9990], + device='cuda:0'), covar=tensor([0.0778, 0.0898, 0.0959, 0.0480, 0.1143, 0.1383, 0.0872, 0.0841], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0212, 0.0204, 0.0244, 0.0250, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 02:03:22,648 INFO [train.py:901] (0/4) Epoch 21, batch 2600, loss[loss=0.233, simple_loss=0.3147, pruned_loss=0.07568, over 8627.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2877, pruned_loss=0.06291, over 1608063.02 frames. ], batch size: 34, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:03:40,891 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.272e+02 2.670e+02 3.622e+02 6.852e+02, threshold=5.341e+02, percent-clipped=1.0 +2023-02-07 02:03:56,823 INFO [train.py:901] (0/4) Epoch 21, batch 2650, loss[loss=0.2279, simple_loss=0.3119, pruned_loss=0.07196, over 8288.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2876, pruned_loss=0.06316, over 1608309.23 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:33,147 INFO [train.py:901] (0/4) Epoch 21, batch 2700, loss[loss=0.2484, simple_loss=0.3252, pruned_loss=0.08585, over 8496.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2882, pruned_loss=0.06312, over 1609898.19 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:46,924 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:04:52,076 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.228e+02 2.697e+02 3.361e+02 7.045e+02, threshold=5.394e+02, percent-clipped=4.0 +2023-02-07 02:05:07,796 INFO [train.py:901] (0/4) Epoch 21, batch 2750, loss[loss=0.1955, simple_loss=0.2813, pruned_loss=0.05478, over 8239.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2891, pruned_loss=0.06389, over 1609754.39 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:05:36,819 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:05:42,229 INFO [train.py:901] (0/4) Epoch 21, batch 2800, loss[loss=0.193, simple_loss=0.2817, pruned_loss=0.05219, over 8032.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06356, over 1614188.54 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:02,588 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.305e+02 2.813e+02 3.760e+02 7.507e+02, threshold=5.625e+02, percent-clipped=3.0 +2023-02-07 02:06:18,052 INFO [train.py:901] (0/4) Epoch 21, batch 2850, loss[loss=0.1863, simple_loss=0.25, pruned_loss=0.06128, over 7425.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2896, pruned_loss=0.06344, over 1610822.69 frames. ], batch size: 17, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:23,426 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:06:51,356 INFO [train.py:901] (0/4) Epoch 21, batch 2900, loss[loss=0.1758, simple_loss=0.2562, pruned_loss=0.04768, over 7928.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.0642, over 1607795.15 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:56,992 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:09,749 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 02:07:11,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.599e+02 3.265e+02 4.069e+02 1.074e+03, threshold=6.531e+02, percent-clipped=8.0 +2023-02-07 02:07:11,856 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:18,890 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:28,722 INFO [train.py:901] (0/4) Epoch 21, batch 2950, loss[loss=0.189, simple_loss=0.2722, pruned_loss=0.05288, over 8616.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2886, pruned_loss=0.06339, over 1606133.75 frames. ], batch size: 31, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:07:44,449 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:02,298 INFO [train.py:901] (0/4) Epoch 21, batch 3000, loss[loss=0.1852, simple_loss=0.2627, pruned_loss=0.05386, over 7556.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2884, pruned_loss=0.06346, over 1606088.97 frames. ], batch size: 18, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:02,299 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 02:08:15,068 INFO [train.py:935] (0/4) Epoch 21, validation: loss=0.1742, simple_loss=0.2744, pruned_loss=0.03706, over 944034.00 frames. +2023-02-07 02:08:15,069 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 02:08:26,759 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164676.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:33,565 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.380e+02 2.886e+02 3.399e+02 6.002e+02, threshold=5.772e+02, percent-clipped=0.0 +2023-02-07 02:08:38,604 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.34 vs. limit=5.0 +2023-02-07 02:08:49,850 INFO [train.py:901] (0/4) Epoch 21, batch 3050, loss[loss=0.1737, simple_loss=0.2476, pruned_loss=0.0499, over 7201.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2882, pruned_loss=0.06319, over 1611581.48 frames. ], batch size: 16, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:59,333 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:25,494 INFO [train.py:901] (0/4) Epoch 21, batch 3100, loss[loss=0.2035, simple_loss=0.2943, pruned_loss=0.05631, over 8034.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2882, pruned_loss=0.06289, over 1613877.61 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:09:29,014 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164764.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:33,853 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7649, 1.7517, 2.3140, 1.4775, 1.3045, 2.2830, 0.3302, 1.4836], + device='cuda:0'), covar=tensor([0.1719, 0.1248, 0.0387, 0.1366, 0.2843, 0.0342, 0.2363, 0.1439], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0194, 0.0127, 0.0221, 0.0271, 0.0133, 0.0170, 0.0190], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:09:43,647 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.375e+02 2.980e+02 3.572e+02 8.800e+02, threshold=5.960e+02, percent-clipped=5.0 +2023-02-07 02:09:59,128 INFO [train.py:901] (0/4) Epoch 21, batch 3150, loss[loss=0.224, simple_loss=0.3027, pruned_loss=0.0726, over 8492.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2877, pruned_loss=0.06255, over 1612161.33 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:10:08,642 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:19,447 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:22,083 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9543, 1.6217, 3.4112, 1.4277, 2.3556, 3.7548, 3.8766, 3.2405], + device='cuda:0'), covar=tensor([0.1143, 0.1768, 0.0350, 0.2214, 0.1052, 0.0230, 0.0469, 0.0531], + device='cuda:0'), in_proj_covar=tensor([0.0290, 0.0320, 0.0288, 0.0314, 0.0304, 0.0261, 0.0410, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:10:26,875 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:34,953 INFO [train.py:901] (0/4) Epoch 21, batch 3200, loss[loss=0.2212, simple_loss=0.2981, pruned_loss=0.07222, over 8078.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2891, pruned_loss=0.0632, over 1616035.84 frames. ], batch size: 21, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:10:54,107 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.324e+02 2.650e+02 3.384e+02 7.808e+02, threshold=5.299e+02, percent-clipped=1.0 +2023-02-07 02:10:55,690 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:56,572 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 02:11:05,563 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3368, 1.6933, 4.4930, 1.8145, 3.9881, 3.7636, 4.0516, 3.9522], + device='cuda:0'), covar=tensor([0.0576, 0.4436, 0.0555, 0.4081, 0.1144, 0.0975, 0.0622, 0.0657], + device='cuda:0'), in_proj_covar=tensor([0.0627, 0.0641, 0.0685, 0.0624, 0.0705, 0.0603, 0.0602, 0.0670], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:11:09,479 INFO [train.py:901] (0/4) Epoch 21, batch 3250, loss[loss=0.2205, simple_loss=0.3018, pruned_loss=0.06961, over 8294.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2898, pruned_loss=0.06334, over 1618324.26 frames. ], batch size: 23, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:11:12,436 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:23,940 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:30,775 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:44,788 INFO [train.py:901] (0/4) Epoch 21, batch 3300, loss[loss=0.1774, simple_loss=0.2728, pruned_loss=0.04098, over 8262.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2884, pruned_loss=0.06216, over 1616342.56 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:05,210 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.295e+02 2.742e+02 3.217e+02 7.829e+02, threshold=5.483e+02, percent-clipped=4.0 +2023-02-07 02:12:12,055 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7807, 2.3443, 4.3626, 1.5932, 3.2155, 2.3107, 1.9054, 3.1426], + device='cuda:0'), covar=tensor([0.1910, 0.2800, 0.0756, 0.4596, 0.1692, 0.3219, 0.2363, 0.2188], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0600, 0.0559, 0.0640, 0.0645, 0.0595, 0.0536, 0.0637], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:12:20,623 INFO [train.py:901] (0/4) Epoch 21, batch 3350, loss[loss=0.2348, simple_loss=0.3094, pruned_loss=0.08013, over 8604.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2876, pruned_loss=0.0621, over 1615526.88 frames. ], batch size: 31, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:28,059 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165020.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:45,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:52,364 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:54,956 INFO [train.py:901] (0/4) Epoch 21, batch 3400, loss[loss=0.2265, simple_loss=0.3024, pruned_loss=0.07529, over 8514.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2878, pruned_loss=0.06267, over 1611781.76 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:15,624 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 2.300e+02 2.821e+02 3.884e+02 1.046e+03, threshold=5.643e+02, percent-clipped=8.0 +2023-02-07 02:13:16,553 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6025, 1.9660, 3.0695, 1.4476, 2.2856, 2.0325, 1.7502, 2.2658], + device='cuda:0'), covar=tensor([0.1794, 0.2627, 0.0870, 0.4488, 0.1928, 0.3112, 0.2202, 0.2501], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0600, 0.0561, 0.0642, 0.0646, 0.0596, 0.0537, 0.0638], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:13:20,661 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,284 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,888 INFO [train.py:901] (0/4) Epoch 21, batch 3450, loss[loss=0.2089, simple_loss=0.281, pruned_loss=0.06846, over 7798.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2866, pruned_loss=0.06212, over 1606496.59 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:36,359 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 02:13:38,102 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:49,298 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165135.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:53,298 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:14:05,183 INFO [train.py:901] (0/4) Epoch 21, batch 3500, loss[loss=0.2203, simple_loss=0.3078, pruned_loss=0.06643, over 8488.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2886, pruned_loss=0.06315, over 1606833.26 frames. ], batch size: 49, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:10,624 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 02:14:24,648 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.436e+02 2.745e+02 3.695e+02 8.606e+02, threshold=5.490e+02, percent-clipped=3.0 +2023-02-07 02:14:41,283 INFO [train.py:901] (0/4) Epoch 21, batch 3550, loss[loss=0.1944, simple_loss=0.2633, pruned_loss=0.06276, over 7230.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.0636, over 1609480.76 frames. ], batch size: 16, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:51,650 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:15,602 INFO [train.py:901] (0/4) Epoch 21, batch 3600, loss[loss=0.1854, simple_loss=0.2775, pruned_loss=0.04663, over 8340.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2899, pruned_loss=0.06352, over 1609352.44 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:34,163 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.347e+02 2.942e+02 3.699e+02 7.087e+02, threshold=5.884e+02, percent-clipped=2.0 +2023-02-07 02:15:44,472 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:51,140 INFO [train.py:901] (0/4) Epoch 21, batch 3650, loss[loss=0.1772, simple_loss=0.2599, pruned_loss=0.04726, over 7551.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.289, pruned_loss=0.06317, over 1610850.67 frames. ], batch size: 18, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:52,685 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:00,720 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5551, 2.3107, 3.2674, 2.4745, 2.8981, 2.4760, 2.2180, 1.8235], + device='cuda:0'), covar=tensor([0.4991, 0.5045, 0.1751, 0.3674, 0.2668, 0.2828, 0.1918, 0.5216], + device='cuda:0'), in_proj_covar=tensor([0.0935, 0.0970, 0.0793, 0.0932, 0.0992, 0.0884, 0.0743, 0.0820], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:16:03,307 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:10,799 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:16,767 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:16:25,003 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9250, 2.4140, 3.5778, 1.9238, 1.7326, 3.5120, 0.9060, 2.2087], + device='cuda:0'), covar=tensor([0.1344, 0.1244, 0.0253, 0.1634, 0.2819, 0.0331, 0.2251, 0.1426], + device='cuda:0'), in_proj_covar=tensor([0.0187, 0.0195, 0.0128, 0.0221, 0.0272, 0.0134, 0.0170, 0.0190], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:16:25,986 INFO [train.py:901] (0/4) Epoch 21, batch 3700, loss[loss=0.2558, simple_loss=0.3338, pruned_loss=0.0889, over 8325.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2888, pruned_loss=0.06313, over 1613805.86 frames. ], batch size: 25, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:16:43,086 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 02:16:44,015 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.402e+02 2.885e+02 3.854e+02 8.848e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 02:16:45,637 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6594, 1.8216, 2.7086, 1.4648, 1.9627, 1.9487, 1.7197, 1.8502], + device='cuda:0'), covar=tensor([0.1667, 0.2291, 0.0798, 0.4189, 0.1796, 0.3027, 0.2002, 0.2240], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0595, 0.0556, 0.0636, 0.0642, 0.0591, 0.0533, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:16:47,624 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165391.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:59,579 INFO [train.py:901] (0/4) Epoch 21, batch 3750, loss[loss=0.2375, simple_loss=0.3076, pruned_loss=0.08365, over 7808.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.289, pruned_loss=0.06301, over 1616074.59 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:04,499 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:07,465 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 02:17:32,209 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3555, 2.0818, 2.7224, 2.2827, 2.7075, 2.3202, 2.0708, 1.4904], + device='cuda:0'), covar=tensor([0.5149, 0.4731, 0.1885, 0.3301, 0.2296, 0.2869, 0.1839, 0.4823], + device='cuda:0'), in_proj_covar=tensor([0.0932, 0.0967, 0.0790, 0.0929, 0.0988, 0.0883, 0.0739, 0.0816], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:17:36,650 INFO [train.py:901] (0/4) Epoch 21, batch 3800, loss[loss=0.2397, simple_loss=0.324, pruned_loss=0.07773, over 8553.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2888, pruned_loss=0.06289, over 1617021.39 frames. ], batch size: 50, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:50,387 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,319 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,906 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.401e+02 2.925e+02 3.673e+02 6.793e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-07 02:18:07,126 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:18:08,528 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8502, 1.3875, 1.6171, 1.2187, 0.9203, 1.3580, 1.6014, 1.3687], + device='cuda:0'), covar=tensor([0.0547, 0.1268, 0.1733, 0.1570, 0.0653, 0.1573, 0.0699, 0.0693], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0099, 0.0163, 0.0113, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 02:18:10,362 INFO [train.py:901] (0/4) Epoch 21, batch 3850, loss[loss=0.2098, simple_loss=0.282, pruned_loss=0.06873, over 5868.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2885, pruned_loss=0.06245, over 1613756.79 frames. ], batch size: 13, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:15,538 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 02:18:18,549 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 02:18:25,783 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9745, 1.5705, 6.1669, 2.1019, 5.6048, 5.2131, 5.6627, 5.5891], + device='cuda:0'), covar=tensor([0.0537, 0.5019, 0.0357, 0.3966, 0.0926, 0.0822, 0.0485, 0.0482], + device='cuda:0'), in_proj_covar=tensor([0.0636, 0.0652, 0.0698, 0.0635, 0.0717, 0.0614, 0.0614, 0.0683], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:18:46,103 INFO [train.py:901] (0/4) Epoch 21, batch 3900, loss[loss=0.1993, simple_loss=0.2906, pruned_loss=0.05396, over 8250.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2888, pruned_loss=0.06223, over 1619656.47 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:49,786 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6501, 2.3383, 4.2763, 1.5156, 3.0104, 2.3291, 1.9044, 2.9631], + device='cuda:0'), covar=tensor([0.1922, 0.2562, 0.0770, 0.4394, 0.1716, 0.3024, 0.2233, 0.2368], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0595, 0.0555, 0.0636, 0.0642, 0.0591, 0.0533, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:18:53,040 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:05,122 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.182e+02 2.809e+02 3.459e+02 6.713e+02, threshold=5.619e+02, percent-clipped=4.0 +2023-02-07 02:19:12,742 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5408, 1.3021, 4.7511, 1.8016, 4.2348, 3.9864, 4.2716, 4.1919], + device='cuda:0'), covar=tensor([0.0563, 0.5008, 0.0485, 0.4100, 0.1127, 0.1007, 0.0593, 0.0640], + device='cuda:0'), in_proj_covar=tensor([0.0633, 0.0648, 0.0695, 0.0632, 0.0714, 0.0612, 0.0610, 0.0681], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:19:14,936 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:20,719 INFO [train.py:901] (0/4) Epoch 21, batch 3950, loss[loss=0.226, simple_loss=0.2966, pruned_loss=0.07771, over 7812.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2882, pruned_loss=0.06172, over 1616729.83 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:19:55,186 INFO [train.py:901] (0/4) Epoch 21, batch 4000, loss[loss=0.1745, simple_loss=0.2516, pruned_loss=0.04872, over 7798.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2886, pruned_loss=0.06229, over 1614858.08 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:15,749 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.370e+02 2.936e+02 3.785e+02 6.204e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-07 02:20:31,258 INFO [train.py:901] (0/4) Epoch 21, batch 4050, loss[loss=0.2215, simple_loss=0.2934, pruned_loss=0.07482, over 8298.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.06322, over 1612532.53 frames. ], batch size: 23, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:46,224 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0393, 1.8931, 3.5019, 2.4620, 2.8988, 1.9343, 1.6390, 1.7234], + device='cuda:0'), covar=tensor([0.7170, 0.6756, 0.1979, 0.3876, 0.3388, 0.4565, 0.3027, 0.5999], + device='cuda:0'), in_proj_covar=tensor([0.0939, 0.0974, 0.0797, 0.0939, 0.0993, 0.0890, 0.0747, 0.0822], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:21:04,769 INFO [train.py:901] (0/4) Epoch 21, batch 4100, loss[loss=0.214, simple_loss=0.2913, pruned_loss=0.06833, over 8470.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2893, pruned_loss=0.06304, over 1612872.86 frames. ], batch size: 28, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:11,135 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:21:11,324 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.56 vs. limit=5.0 +2023-02-07 02:21:15,323 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4097, 1.3656, 4.5779, 1.8125, 4.1066, 3.8478, 4.1301, 4.0533], + device='cuda:0'), covar=tensor([0.0570, 0.4772, 0.0463, 0.3714, 0.1016, 0.0916, 0.0604, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0635, 0.0650, 0.0699, 0.0633, 0.0713, 0.0612, 0.0614, 0.0682], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:21:24,839 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.510e+02 3.105e+02 3.860e+02 6.931e+02, threshold=6.209e+02, percent-clipped=6.0 +2023-02-07 02:21:41,894 INFO [train.py:901] (0/4) Epoch 21, batch 4150, loss[loss=0.2204, simple_loss=0.2913, pruned_loss=0.07476, over 7642.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2895, pruned_loss=0.06335, over 1614037.48 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:50,121 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165821.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:13,876 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165856.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:15,095 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 02:22:15,762 INFO [train.py:901] (0/4) Epoch 21, batch 4200, loss[loss=0.1679, simple_loss=0.256, pruned_loss=0.03989, over 7976.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2887, pruned_loss=0.06294, over 1615106.54 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:16,171 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 02:22:30,499 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:33,698 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.320e+02 2.907e+02 3.705e+02 7.802e+02, threshold=5.814e+02, percent-clipped=2.0 +2023-02-07 02:22:37,052 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 02:22:41,244 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7510, 1.8282, 1.6534, 2.3084, 1.0666, 1.4538, 1.6499, 1.8001], + device='cuda:0'), covar=tensor([0.0820, 0.0802, 0.1007, 0.0446, 0.1166, 0.1371, 0.0829, 0.0778], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0199, 0.0247, 0.0215, 0.0207, 0.0249, 0.0254, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 02:22:50,812 INFO [train.py:901] (0/4) Epoch 21, batch 4250, loss[loss=0.2071, simple_loss=0.2831, pruned_loss=0.06557, over 7658.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2898, pruned_loss=0.06336, over 1618828.83 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:50,961 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6974, 1.5706, 4.8898, 1.8904, 4.3531, 4.0878, 4.4017, 4.3158], + device='cuda:0'), covar=tensor([0.0561, 0.4828, 0.0412, 0.3923, 0.0988, 0.0916, 0.0603, 0.0658], + device='cuda:0'), in_proj_covar=tensor([0.0634, 0.0646, 0.0694, 0.0632, 0.0710, 0.0609, 0.0610, 0.0678], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:22:53,714 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:23:26,539 INFO [train.py:901] (0/4) Epoch 21, batch 4300, loss[loss=0.2402, simple_loss=0.3319, pruned_loss=0.0743, over 8341.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06294, over 1616636.76 frames. ], batch size: 26, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:23:44,465 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.284e+02 2.728e+02 3.396e+02 7.954e+02, threshold=5.457e+02, percent-clipped=4.0 +2023-02-07 02:23:54,142 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-166000.pt +2023-02-07 02:24:01,642 INFO [train.py:901] (0/4) Epoch 21, batch 4350, loss[loss=0.2142, simple_loss=0.2984, pruned_loss=0.06498, over 7980.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.29, pruned_loss=0.06311, over 1621185.59 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:11,720 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 02:24:15,221 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:24:30,488 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4683, 2.6896, 3.1384, 1.7570, 3.3908, 2.1948, 1.5304, 2.2205], + device='cuda:0'), covar=tensor([0.0733, 0.0338, 0.0259, 0.0723, 0.0456, 0.0721, 0.0889, 0.0499], + device='cuda:0'), in_proj_covar=tensor([0.0453, 0.0385, 0.0340, 0.0441, 0.0373, 0.0532, 0.0390, 0.0414], + device='cuda:0'), out_proj_covar=tensor([1.2177e-04, 1.0103e-04, 8.9515e-05, 1.1651e-04, 9.8250e-05, 1.5077e-04, + 1.0534e-04, 1.0995e-04], device='cuda:0') +2023-02-07 02:24:36,823 INFO [train.py:901] (0/4) Epoch 21, batch 4400, loss[loss=0.1933, simple_loss=0.287, pruned_loss=0.04984, over 7981.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2891, pruned_loss=0.06306, over 1619202.39 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:45,730 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166072.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:24:54,403 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 02:24:55,055 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.482e+02 3.095e+02 3.863e+02 7.424e+02, threshold=6.191e+02, percent-clipped=10.0 +2023-02-07 02:25:08,088 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9572, 2.1666, 1.7634, 2.7477, 1.3537, 1.5726, 1.9327, 2.1005], + device='cuda:0'), covar=tensor([0.0717, 0.0807, 0.0927, 0.0317, 0.1090, 0.1304, 0.0834, 0.0697], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0199, 0.0246, 0.0215, 0.0208, 0.0248, 0.0255, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 02:25:10,664 INFO [train.py:901] (0/4) Epoch 21, batch 4450, loss[loss=0.1948, simple_loss=0.2684, pruned_loss=0.06063, over 7222.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2893, pruned_loss=0.06309, over 1619554.44 frames. ], batch size: 16, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:25:12,781 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166112.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:15,617 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8932, 3.7634, 2.0516, 2.8820, 2.5847, 1.8523, 2.5544, 3.1161], + device='cuda:0'), covar=tensor([0.1743, 0.0404, 0.1386, 0.0793, 0.0941, 0.1787, 0.1350, 0.0983], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0234, 0.0334, 0.0308, 0.0298, 0.0333, 0.0343, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:25:31,546 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1525, 1.6247, 1.8186, 1.5288, 1.0583, 1.6364, 1.9015, 1.7113], + device='cuda:0'), covar=tensor([0.0447, 0.1182, 0.1567, 0.1371, 0.0563, 0.1386, 0.0627, 0.0607], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 02:25:45,428 INFO [train.py:901] (0/4) Epoch 21, batch 4500, loss[loss=0.2075, simple_loss=0.307, pruned_loss=0.05396, over 8586.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2885, pruned_loss=0.06245, over 1617557.68 frames. ], batch size: 39, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:25:50,235 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 02:25:50,295 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:50,363 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:05,011 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.688e+02 3.191e+02 4.415e+02 1.086e+03, threshold=6.382e+02, percent-clipped=9.0 +2023-02-07 02:26:18,169 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3291, 2.3032, 1.7156, 2.0974, 1.9528, 1.5869, 1.8579, 1.8496], + device='cuda:0'), covar=tensor([0.1663, 0.0505, 0.1379, 0.0623, 0.0757, 0.1541, 0.1137, 0.1162], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0234, 0.0334, 0.0309, 0.0297, 0.0333, 0.0343, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:26:20,601 INFO [train.py:901] (0/4) Epoch 21, batch 4550, loss[loss=0.1861, simple_loss=0.2782, pruned_loss=0.04701, over 8033.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2888, pruned_loss=0.06224, over 1617691.21 frames. ], batch size: 22, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:26:33,032 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166227.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:54,697 INFO [train.py:901] (0/4) Epoch 21, batch 4600, loss[loss=0.1915, simple_loss=0.2802, pruned_loss=0.05135, over 8190.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2885, pruned_loss=0.06287, over 1615370.41 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:10,556 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166280.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:13,352 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166284.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:15,245 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.551e+02 3.310e+02 4.080e+02 7.820e+02, threshold=6.621e+02, percent-clipped=4.0 +2023-02-07 02:27:30,339 INFO [train.py:901] (0/4) Epoch 21, batch 4650, loss[loss=0.1793, simple_loss=0.257, pruned_loss=0.05082, over 7422.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2883, pruned_loss=0.06272, over 1612812.90 frames. ], batch size: 17, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:30,538 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:41,210 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:28:03,637 INFO [train.py:901] (0/4) Epoch 21, batch 4700, loss[loss=0.1955, simple_loss=0.2648, pruned_loss=0.06313, over 7705.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2879, pruned_loss=0.06282, over 1610266.04 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:28:23,893 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.420e+02 2.373e+02 2.801e+02 3.877e+02 1.145e+03, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 02:28:30,891 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-07 02:28:40,135 INFO [train.py:901] (0/4) Epoch 21, batch 4750, loss[loss=0.213, simple_loss=0.3006, pruned_loss=0.06274, over 8530.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2879, pruned_loss=0.06271, over 1610594.60 frames. ], batch size: 49, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:28:44,972 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166416.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:28:52,937 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 02:28:55,087 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 02:29:14,176 INFO [train.py:901] (0/4) Epoch 21, batch 4800, loss[loss=0.2219, simple_loss=0.2885, pruned_loss=0.07768, over 8087.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2881, pruned_loss=0.06295, over 1606096.69 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:29,969 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0338, 2.1014, 1.8441, 2.7625, 1.2895, 1.7022, 1.9442, 2.2027], + device='cuda:0'), covar=tensor([0.0735, 0.0835, 0.0926, 0.0354, 0.1142, 0.1278, 0.0951, 0.0757], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0197, 0.0246, 0.0215, 0.0207, 0.0245, 0.0254, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 02:29:30,711 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166483.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:33,191 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.407e+02 2.819e+02 3.849e+02 8.316e+02, threshold=5.639e+02, percent-clipped=5.0 +2023-02-07 02:29:42,736 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:43,986 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 02:29:48,838 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166508.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:49,314 INFO [train.py:901] (0/4) Epoch 21, batch 4850, loss[loss=0.221, simple_loss=0.3008, pruned_loss=0.07056, over 8363.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2887, pruned_loss=0.06261, over 1612692.56 frames. ], batch size: 26, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:49,390 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:03,254 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:05,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166531.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:30:09,302 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166536.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:17,920 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:20,271 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.23 vs. limit=5.0 +2023-02-07 02:30:24,635 INFO [train.py:901] (0/4) Epoch 21, batch 4900, loss[loss=0.1977, simple_loss=0.2952, pruned_loss=0.05015, over 8361.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2889, pruned_loss=0.06252, over 1616021.41 frames. ], batch size: 24, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:30:26,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:43,060 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.429e+02 3.059e+02 4.014e+02 7.599e+02, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 02:30:58,608 INFO [train.py:901] (0/4) Epoch 21, batch 4950, loss[loss=0.1731, simple_loss=0.2437, pruned_loss=0.05126, over 7701.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.289, pruned_loss=0.06232, over 1619860.23 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:31:09,512 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:12,171 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0504, 1.6254, 1.3627, 1.5625, 1.2989, 1.2353, 1.3014, 1.3424], + device='cuda:0'), covar=tensor([0.1168, 0.0496, 0.1336, 0.0575, 0.0816, 0.1516, 0.0972, 0.0788], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0233, 0.0331, 0.0307, 0.0295, 0.0331, 0.0340, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:31:34,523 INFO [train.py:901] (0/4) Epoch 21, batch 5000, loss[loss=0.2008, simple_loss=0.273, pruned_loss=0.06427, over 8137.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.06214, over 1612293.44 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:31:40,570 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6918, 1.9545, 2.0465, 1.5655, 2.1350, 1.4633, 0.6699, 1.8921], + device='cuda:0'), covar=tensor([0.0405, 0.0268, 0.0197, 0.0352, 0.0293, 0.0599, 0.0616, 0.0207], + device='cuda:0'), in_proj_covar=tensor([0.0451, 0.0385, 0.0339, 0.0441, 0.0373, 0.0531, 0.0389, 0.0413], + device='cuda:0'), out_proj_covar=tensor([1.2119e-04, 1.0106e-04, 8.9162e-05, 1.1642e-04, 9.8378e-05, 1.5046e-04, + 1.0515e-04, 1.0956e-04], device='cuda:0') +2023-02-07 02:31:41,052 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:52,856 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.262e+02 2.770e+02 3.475e+02 7.586e+02, threshold=5.540e+02, percent-clipped=2.0 +2023-02-07 02:32:07,634 INFO [train.py:901] (0/4) Epoch 21, batch 5050, loss[loss=0.1771, simple_loss=0.2578, pruned_loss=0.04826, over 7704.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2873, pruned_loss=0.06199, over 1613544.70 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:19,810 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5624, 2.5475, 1.8525, 2.3270, 2.2310, 1.6289, 2.0033, 2.1549], + device='cuda:0'), covar=tensor([0.1609, 0.0415, 0.1251, 0.0610, 0.0723, 0.1545, 0.1114, 0.1093], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0233, 0.0332, 0.0306, 0.0295, 0.0330, 0.0341, 0.0313], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:32:23,024 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 02:32:37,034 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8648, 1.5480, 5.9774, 2.4250, 5.3741, 5.0610, 5.4844, 5.4007], + device='cuda:0'), covar=tensor([0.0476, 0.5143, 0.0331, 0.3515, 0.0977, 0.0786, 0.0579, 0.0478], + device='cuda:0'), in_proj_covar=tensor([0.0632, 0.0643, 0.0695, 0.0629, 0.0708, 0.0607, 0.0606, 0.0679], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:32:38,362 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:32:42,946 INFO [train.py:901] (0/4) Epoch 21, batch 5100, loss[loss=0.2127, simple_loss=0.2775, pruned_loss=0.07398, over 7525.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.06252, over 1614834.63 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:57,472 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9870, 1.7264, 3.3925, 1.6110, 2.4329, 3.7588, 3.8259, 3.2696], + device='cuda:0'), covar=tensor([0.1204, 0.1691, 0.0356, 0.2057, 0.1081, 0.0220, 0.0573, 0.0516], + device='cuda:0'), in_proj_covar=tensor([0.0289, 0.0318, 0.0286, 0.0312, 0.0306, 0.0262, 0.0412, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 02:32:59,528 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:00,921 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:02,730 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.499e+02 3.045e+02 3.729e+02 1.083e+03, threshold=6.090e+02, percent-clipped=5.0 +2023-02-07 02:33:02,974 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166787.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:33:17,657 INFO [train.py:901] (0/4) Epoch 21, batch 5150, loss[loss=0.197, simple_loss=0.2848, pruned_loss=0.05459, over 8515.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2877, pruned_loss=0.06261, over 1613683.85 frames. ], batch size: 26, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:19,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166812.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:33:40,919 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:52,921 INFO [train.py:901] (0/4) Epoch 21, batch 5200, loss[loss=0.2136, simple_loss=0.2966, pruned_loss=0.06533, over 8492.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06199, over 1616713.81 frames. ], batch size: 28, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:34:01,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:08,930 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:13,421 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.417e+02 2.893e+02 3.464e+02 9.071e+02, threshold=5.787e+02, percent-clipped=3.0 +2023-02-07 02:34:17,445 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166893.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:17,849 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 02:34:22,858 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 02:34:25,852 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166905.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:28,017 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:34:28,358 INFO [train.py:901] (0/4) Epoch 21, batch 5250, loss[loss=0.1599, simple_loss=0.2359, pruned_loss=0.04192, over 7537.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2885, pruned_loss=0.06248, over 1617710.79 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:34:53,835 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:00,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:01,476 INFO [train.py:901] (0/4) Epoch 21, batch 5300, loss[loss=0.1929, simple_loss=0.272, pruned_loss=0.05688, over 7653.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2889, pruned_loss=0.06265, over 1613527.73 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:06,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8336, 2.2601, 3.4956, 1.7440, 1.7042, 3.4219, 0.6914, 2.1467], + device='cuda:0'), covar=tensor([0.1982, 0.1879, 0.0369, 0.2309, 0.3176, 0.0545, 0.2551, 0.1767], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0197, 0.0128, 0.0224, 0.0273, 0.0137, 0.0173, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:35:21,119 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:21,589 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.349e+02 2.996e+02 3.802e+02 6.845e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-07 02:35:25,178 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6752, 2.0203, 2.1759, 1.2711, 2.2324, 1.5010, 0.6423, 1.9385], + device='cuda:0'), covar=tensor([0.0612, 0.0310, 0.0251, 0.0574, 0.0359, 0.0825, 0.0837, 0.0292], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0386, 0.0338, 0.0442, 0.0374, 0.0530, 0.0388, 0.0413], + device='cuda:0'), out_proj_covar=tensor([1.2077e-04, 1.0129e-04, 8.8875e-05, 1.1674e-04, 9.8600e-05, 1.5001e-04, + 1.0490e-04, 1.0944e-04], device='cuda:0') +2023-02-07 02:35:37,481 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:37,990 INFO [train.py:901] (0/4) Epoch 21, batch 5350, loss[loss=0.2364, simple_loss=0.3104, pruned_loss=0.08121, over 8702.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06223, over 1616462.18 frames. ], batch size: 49, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:58,888 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167040.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:12,095 INFO [train.py:901] (0/4) Epoch 21, batch 5400, loss[loss=0.1816, simple_loss=0.2703, pruned_loss=0.04641, over 8501.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2883, pruned_loss=0.06237, over 1615163.17 frames. ], batch size: 26, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:16,483 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167065.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:32,164 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.459e+02 3.013e+02 3.547e+02 6.118e+02, threshold=6.026e+02, percent-clipped=1.0 +2023-02-07 02:36:39,937 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:47,956 INFO [train.py:901] (0/4) Epoch 21, batch 5450, loss[loss=0.2099, simple_loss=0.2843, pruned_loss=0.06771, over 7548.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.288, pruned_loss=0.06208, over 1612577.94 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:55,802 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:58,565 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:01,217 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:12,858 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 02:37:24,084 INFO [train.py:901] (0/4) Epoch 21, batch 5500, loss[loss=0.2027, simple_loss=0.2962, pruned_loss=0.05458, over 8312.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06176, over 1609104.95 frames. ], batch size: 25, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:37:24,243 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6521, 1.3686, 4.8901, 1.9103, 4.3781, 4.1041, 4.4674, 4.3443], + device='cuda:0'), covar=tensor([0.0633, 0.5137, 0.0450, 0.4197, 0.1064, 0.0909, 0.0566, 0.0631], + device='cuda:0'), in_proj_covar=tensor([0.0630, 0.0642, 0.0694, 0.0629, 0.0707, 0.0605, 0.0604, 0.0676], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:37:36,488 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3138, 1.6659, 1.8477, 1.6136, 1.1531, 1.7308, 2.0320, 2.0870], + device='cuda:0'), covar=tensor([0.0531, 0.1210, 0.1558, 0.1362, 0.0627, 0.1379, 0.0656, 0.0527], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0099, 0.0163, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 02:37:43,653 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.538e+02 3.099e+02 3.967e+02 8.838e+02, threshold=6.197e+02, percent-clipped=3.0 +2023-02-07 02:37:58,443 INFO [train.py:901] (0/4) Epoch 21, batch 5550, loss[loss=0.2054, simple_loss=0.2805, pruned_loss=0.06522, over 7546.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2883, pruned_loss=0.06212, over 1614409.12 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:38:01,310 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:02,551 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:21,110 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:22,439 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:23,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167242.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:23,321 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 02:38:34,474 INFO [train.py:901] (0/4) Epoch 21, batch 5600, loss[loss=0.2168, simple_loss=0.2933, pruned_loss=0.07013, over 8686.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06139, over 1615257.81 frames. ], batch size: 49, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:38:38,113 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167264.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:40,051 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167267.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:52,849 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9412, 2.4170, 3.6131, 1.9105, 1.9348, 3.5131, 0.6772, 2.1491], + device='cuda:0'), covar=tensor([0.1369, 0.1218, 0.0255, 0.1719, 0.2500, 0.0355, 0.2288, 0.1316], + device='cuda:0'), in_proj_covar=tensor([0.0188, 0.0195, 0.0127, 0.0222, 0.0271, 0.0135, 0.0172, 0.0191], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:38:54,594 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.492e+02 3.097e+02 3.838e+02 7.086e+02, threshold=6.194e+02, percent-clipped=1.0 +2023-02-07 02:38:54,810 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:56,074 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:39:08,028 INFO [train.py:901] (0/4) Epoch 21, batch 5650, loss[loss=0.1988, simple_loss=0.2919, pruned_loss=0.05287, over 8190.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.06159, over 1614034.42 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:39:16,511 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 02:39:18,764 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 02:39:27,013 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4663, 1.4448, 1.8115, 1.1810, 1.1359, 1.7969, 0.2381, 1.1880], + device='cuda:0'), covar=tensor([0.1638, 0.1317, 0.0439, 0.1012, 0.2720, 0.0484, 0.2177, 0.1194], + device='cuda:0'), in_proj_covar=tensor([0.0186, 0.0193, 0.0127, 0.0220, 0.0268, 0.0134, 0.0170, 0.0190], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:39:44,507 INFO [train.py:901] (0/4) Epoch 21, batch 5700, loss[loss=0.2054, simple_loss=0.2811, pruned_loss=0.06488, over 7429.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2883, pruned_loss=0.06232, over 1614006.43 frames. ], batch size: 17, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:04,669 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.585e+02 3.206e+02 3.925e+02 8.506e+02, threshold=6.412e+02, percent-clipped=6.0 +2023-02-07 02:40:16,545 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:18,418 INFO [train.py:901] (0/4) Epoch 21, batch 5750, loss[loss=0.2253, simple_loss=0.3051, pruned_loss=0.07273, over 7928.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2881, pruned_loss=0.0627, over 1613755.22 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:24,268 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 02:40:47,692 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167450.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:53,628 INFO [train.py:901] (0/4) Epoch 21, batch 5800, loss[loss=0.1942, simple_loss=0.2774, pruned_loss=0.0555, over 6802.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2874, pruned_loss=0.06188, over 1611629.75 frames. ], batch size: 15, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:55,798 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:59,183 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167466.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:00,730 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:01,411 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6006, 1.9047, 2.5941, 1.5034, 1.8492, 1.9290, 1.7090, 1.8927], + device='cuda:0'), covar=tensor([0.2113, 0.2656, 0.1040, 0.4828, 0.2030, 0.3541, 0.2502, 0.2411], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0593, 0.0552, 0.0635, 0.0637, 0.0587, 0.0528, 0.0626], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:41:08,617 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3053, 2.6806, 3.1974, 1.4734, 3.1701, 1.8900, 1.5957, 2.0659], + device='cuda:0'), covar=tensor([0.0825, 0.0374, 0.0282, 0.0814, 0.0630, 0.0885, 0.0873, 0.0566], + device='cuda:0'), in_proj_covar=tensor([0.0451, 0.0387, 0.0339, 0.0442, 0.0375, 0.0533, 0.0389, 0.0415], + device='cuda:0'), out_proj_covar=tensor([1.2122e-04, 1.0153e-04, 8.9186e-05, 1.1692e-04, 9.8850e-05, 1.5066e-04, + 1.0503e-04, 1.1002e-04], device='cuda:0') +2023-02-07 02:41:15,047 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.466e+02 2.953e+02 3.603e+02 7.254e+02, threshold=5.907e+02, percent-clipped=1.0 +2023-02-07 02:41:16,947 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 02:41:18,008 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:20,744 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:28,628 INFO [train.py:901] (0/4) Epoch 21, batch 5850, loss[loss=0.1835, simple_loss=0.27, pruned_loss=0.04854, over 7435.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06224, over 1612620.57 frames. ], batch size: 17, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:41:37,844 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:03,764 INFO [train.py:901] (0/4) Epoch 21, batch 5900, loss[loss=0.2226, simple_loss=0.3201, pruned_loss=0.06248, over 8134.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06202, over 1611734.82 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:42:16,867 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:19,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:25,334 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4212, 2.0006, 3.8920, 1.4202, 2.8280, 2.0563, 1.5984, 2.8328], + device='cuda:0'), covar=tensor([0.2436, 0.3243, 0.0903, 0.5257, 0.2029, 0.3781, 0.2887, 0.2470], + device='cuda:0'), in_proj_covar=tensor([0.0519, 0.0592, 0.0551, 0.0635, 0.0637, 0.0587, 0.0528, 0.0626], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:42:25,700 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.257e+02 2.859e+02 3.440e+02 7.059e+02, threshold=5.718e+02, percent-clipped=2.0 +2023-02-07 02:42:39,892 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2865, 1.0631, 1.3894, 1.0553, 0.7446, 1.1827, 1.2337, 1.0566], + device='cuda:0'), covar=tensor([0.0651, 0.1693, 0.2301, 0.1845, 0.0675, 0.1961, 0.0761, 0.0769], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0158, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 02:42:40,390 INFO [train.py:901] (0/4) Epoch 21, batch 5950, loss[loss=0.2684, simple_loss=0.3419, pruned_loss=0.09744, over 8539.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06281, over 1615132.96 frames. ], batch size: 39, lr: 3.58e-03, grad_scale: 4.0 +2023-02-07 02:43:14,051 INFO [train.py:901] (0/4) Epoch 21, batch 6000, loss[loss=0.1807, simple_loss=0.2606, pruned_loss=0.05037, over 7539.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.288, pruned_loss=0.06247, over 1615209.60 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:43:14,052 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 02:43:26,400 INFO [train.py:935] (0/4) Epoch 21, validation: loss=0.174, simple_loss=0.2741, pruned_loss=0.03692, over 944034.00 frames. +2023-02-07 02:43:26,402 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 02:43:28,713 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:38,372 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2468, 1.2377, 3.3508, 1.0675, 2.9830, 2.7822, 3.0224, 2.9788], + device='cuda:0'), covar=tensor([0.0881, 0.4471, 0.0850, 0.4566, 0.1386, 0.1223, 0.0943, 0.0938], + device='cuda:0'), in_proj_covar=tensor([0.0631, 0.0645, 0.0699, 0.0635, 0.0711, 0.0610, 0.0612, 0.0682], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:43:45,659 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:47,403 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.382e+02 2.918e+02 3.609e+02 5.587e+02, threshold=5.837e+02, percent-clipped=0.0 +2023-02-07 02:44:01,970 INFO [train.py:901] (0/4) Epoch 21, batch 6050, loss[loss=0.2038, simple_loss=0.2965, pruned_loss=0.05561, over 8627.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2867, pruned_loss=0.06163, over 1615021.03 frames. ], batch size: 31, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:22,044 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167737.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:38,029 INFO [train.py:901] (0/4) Epoch 21, batch 6100, loss[loss=0.1877, simple_loss=0.2835, pruned_loss=0.04598, over 8358.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06139, over 1615493.26 frames. ], batch size: 24, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:56,031 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167785.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:57,192 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 02:44:58,541 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.390e+02 3.045e+02 3.849e+02 6.701e+02, threshold=6.089e+02, percent-clipped=2.0 +2023-02-07 02:45:02,092 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167794.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:13,116 INFO [train.py:901] (0/4) Epoch 21, batch 6150, loss[loss=0.2068, simple_loss=0.2999, pruned_loss=0.05686, over 8683.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06139, over 1616750.83 frames. ], batch size: 39, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:30,430 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:33,094 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,361 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,814 INFO [train.py:901] (0/4) Epoch 21, batch 6200, loss[loss=0.1839, simple_loss=0.2608, pruned_loss=0.05346, over 7445.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2871, pruned_loss=0.06117, over 1615249.97 frames. ], batch size: 17, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:51,034 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:09,471 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.266e+02 2.776e+02 3.727e+02 8.167e+02, threshold=5.552e+02, percent-clipped=4.0 +2023-02-07 02:46:23,402 INFO [train.py:901] (0/4) Epoch 21, batch 6250, loss[loss=0.2212, simple_loss=0.303, pruned_loss=0.06973, over 8417.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.288, pruned_loss=0.06165, over 1611233.77 frames. ], batch size: 49, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:46:23,624 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:58,909 INFO [train.py:901] (0/4) Epoch 21, batch 6300, loss[loss=0.1957, simple_loss=0.2781, pruned_loss=0.05664, over 8258.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2893, pruned_loss=0.06242, over 1614173.91 frames. ], batch size: 24, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:47:20,723 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.475e+02 2.869e+02 3.545e+02 9.430e+02, threshold=5.737e+02, percent-clipped=7.0 +2023-02-07 02:47:28,255 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-168000.pt +2023-02-07 02:47:35,243 INFO [train.py:901] (0/4) Epoch 21, batch 6350, loss[loss=0.2049, simple_loss=0.292, pruned_loss=0.05891, over 8340.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2896, pruned_loss=0.06282, over 1615050.28 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:06,023 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:09,825 INFO [train.py:901] (0/4) Epoch 21, batch 6400, loss[loss=0.1845, simple_loss=0.2648, pruned_loss=0.05209, over 7707.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2891, pruned_loss=0.06239, over 1618803.11 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:16,727 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2491, 1.9786, 2.7046, 2.1816, 2.7432, 2.2363, 2.0035, 1.4098], + device='cuda:0'), covar=tensor([0.5314, 0.4892, 0.1913, 0.3788, 0.2350, 0.3079, 0.1922, 0.5305], + device='cuda:0'), in_proj_covar=tensor([0.0940, 0.0975, 0.0800, 0.0941, 0.0998, 0.0892, 0.0746, 0.0821], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:48:25,108 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168081.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:30,482 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.236e+02 2.639e+02 3.603e+02 6.999e+02, threshold=5.279e+02, percent-clipped=2.0 +2023-02-07 02:48:45,435 INFO [train.py:901] (0/4) Epoch 21, batch 6450, loss[loss=0.2078, simple_loss=0.2949, pruned_loss=0.06035, over 8460.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2888, pruned_loss=0.0622, over 1621864.70 frames. ], batch size: 25, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:59,381 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:19,828 INFO [train.py:901] (0/4) Epoch 21, batch 6500, loss[loss=0.2073, simple_loss=0.296, pruned_loss=0.05931, over 8462.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2891, pruned_loss=0.0623, over 1622888.46 frames. ], batch size: 25, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:49:24,786 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:41,359 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.483e+02 3.129e+02 4.081e+02 1.148e+03, threshold=6.258e+02, percent-clipped=13.0 +2023-02-07 02:49:42,173 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:46,107 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:54,741 INFO [train.py:901] (0/4) Epoch 21, batch 6550, loss[loss=0.2355, simple_loss=0.3208, pruned_loss=0.07508, over 8336.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2873, pruned_loss=0.06126, over 1620156.96 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:19,978 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:20,493 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 02:50:24,642 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:29,926 INFO [train.py:901] (0/4) Epoch 21, batch 6600, loss[loss=0.2649, simple_loss=0.3311, pruned_loss=0.09934, over 7796.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2871, pruned_loss=0.06105, over 1619316.54 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:38,738 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:50:50,803 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.414e+02 2.830e+02 3.481e+02 7.637e+02, threshold=5.659e+02, percent-clipped=3.0 +2023-02-07 02:50:59,933 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6490, 2.1807, 4.0748, 1.5622, 3.0495, 2.2258, 1.7387, 2.8360], + device='cuda:0'), covar=tensor([0.1853, 0.2703, 0.0741, 0.4391, 0.1661, 0.3102, 0.2304, 0.2441], + device='cuda:0'), in_proj_covar=tensor([0.0523, 0.0598, 0.0555, 0.0640, 0.0642, 0.0589, 0.0531, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:51:05,096 INFO [train.py:901] (0/4) Epoch 21, batch 6650, loss[loss=0.2873, simple_loss=0.3575, pruned_loss=0.1086, over 8560.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.288, pruned_loss=0.06193, over 1619988.62 frames. ], batch size: 31, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:51:22,034 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2129, 4.0807, 3.8299, 1.9249, 3.7099, 3.8145, 3.7123, 3.5555], + device='cuda:0'), covar=tensor([0.0776, 0.0632, 0.1085, 0.4739, 0.0923, 0.0806, 0.1273, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0431, 0.0430, 0.0535, 0.0423, 0.0439, 0.0420, 0.0382], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:51:40,103 INFO [train.py:901] (0/4) Epoch 21, batch 6700, loss[loss=0.1724, simple_loss=0.249, pruned_loss=0.04784, over 7811.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06203, over 1614787.79 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:00,447 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.306e+02 2.933e+02 3.476e+02 6.537e+02, threshold=5.866e+02, percent-clipped=2.0 +2023-02-07 02:52:05,987 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:06,319 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 02:52:14,921 INFO [train.py:901] (0/4) Epoch 21, batch 6750, loss[loss=0.2219, simple_loss=0.3097, pruned_loss=0.06704, over 8433.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2881, pruned_loss=0.06282, over 1615277.13 frames. ], batch size: 29, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:45,419 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:50,753 INFO [train.py:901] (0/4) Epoch 21, batch 6800, loss[loss=0.2065, simple_loss=0.2936, pruned_loss=0.0597, over 8099.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2882, pruned_loss=0.06268, over 1613950.23 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:58,530 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 02:53:04,360 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:12,380 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.333e+02 2.834e+02 3.373e+02 7.883e+02, threshold=5.669e+02, percent-clipped=5.0 +2023-02-07 02:53:20,381 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168500.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:26,326 INFO [train.py:901] (0/4) Epoch 21, batch 6850, loss[loss=0.1657, simple_loss=0.2486, pruned_loss=0.04145, over 7693.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2871, pruned_loss=0.06201, over 1613936.01 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:53:28,539 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:37,577 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:45,940 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 02:53:51,543 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 02:54:00,733 INFO [train.py:901] (0/4) Epoch 21, batch 6900, loss[loss=0.2093, simple_loss=0.2919, pruned_loss=0.06332, over 8102.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2878, pruned_loss=0.06266, over 1609118.12 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:22,243 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.867e+02 3.613e+02 6.820e+02, threshold=5.733e+02, percent-clipped=1.0 +2023-02-07 02:54:26,376 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:54:35,728 INFO [train.py:901] (0/4) Epoch 21, batch 6950, loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05701, over 8201.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2893, pruned_loss=0.06331, over 1613150.26 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:53,388 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 02:54:56,312 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1816, 3.6861, 2.2877, 2.9101, 3.0255, 2.0998, 2.9265, 2.9562], + device='cuda:0'), covar=tensor([0.1496, 0.0359, 0.1245, 0.0763, 0.0662, 0.1485, 0.1005, 0.1137], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0234, 0.0334, 0.0306, 0.0299, 0.0334, 0.0343, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:55:10,653 INFO [train.py:901] (0/4) Epoch 21, batch 7000, loss[loss=0.1684, simple_loss=0.245, pruned_loss=0.0459, over 7436.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2879, pruned_loss=0.06263, over 1609516.80 frames. ], batch size: 17, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:13,586 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9197, 2.4884, 1.9832, 2.2357, 2.2150, 1.8798, 2.1470, 2.3012], + device='cuda:0'), covar=tensor([0.1117, 0.0386, 0.0992, 0.0560, 0.0637, 0.1236, 0.0820, 0.0836], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0233, 0.0334, 0.0305, 0.0299, 0.0334, 0.0342, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 02:55:17,033 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6641, 2.3446, 3.3366, 2.6305, 3.2131, 2.5104, 2.3303, 1.9102], + device='cuda:0'), covar=tensor([0.5026, 0.4948, 0.1755, 0.3320, 0.2230, 0.2837, 0.1722, 0.5492], + device='cuda:0'), in_proj_covar=tensor([0.0940, 0.0973, 0.0803, 0.0940, 0.0997, 0.0891, 0.0745, 0.0823], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:55:20,991 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9422, 1.6959, 2.0656, 1.8392, 2.0081, 1.9735, 1.7809, 0.8184], + device='cuda:0'), covar=tensor([0.5642, 0.4637, 0.1873, 0.3208, 0.2256, 0.2922, 0.1830, 0.4718], + device='cuda:0'), in_proj_covar=tensor([0.0940, 0.0973, 0.0802, 0.0940, 0.0997, 0.0891, 0.0745, 0.0822], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 02:55:31,356 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.444e+02 3.041e+02 3.968e+02 8.528e+02, threshold=6.083e+02, percent-clipped=8.0 +2023-02-07 02:55:45,696 INFO [train.py:901] (0/4) Epoch 21, batch 7050, loss[loss=0.1935, simple_loss=0.2701, pruned_loss=0.05849, over 7940.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2881, pruned_loss=0.06284, over 1607487.74 frames. ], batch size: 20, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:46,579 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168710.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:55:48,258 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.68 vs. limit=5.0 +2023-02-07 02:55:49,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7901, 1.7803, 2.4731, 1.4697, 1.3014, 2.3270, 0.5206, 1.4483], + device='cuda:0'), covar=tensor([0.1683, 0.1088, 0.0293, 0.1443, 0.2967, 0.0422, 0.2250, 0.1445], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0195, 0.0127, 0.0222, 0.0271, 0.0136, 0.0171, 0.0192], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 02:56:19,949 INFO [train.py:901] (0/4) Epoch 21, batch 7100, loss[loss=0.1888, simple_loss=0.2805, pruned_loss=0.04857, over 8239.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2884, pruned_loss=0.06268, over 1609788.23 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:56:26,884 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:40,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.605e+02 3.011e+02 3.811e+02 1.077e+03, threshold=6.022e+02, percent-clipped=4.0 +2023-02-07 02:56:43,685 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:55,259 INFO [train.py:901] (0/4) Epoch 21, batch 7150, loss[loss=0.2698, simple_loss=0.3427, pruned_loss=0.09839, over 8454.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2891, pruned_loss=0.06326, over 1606218.31 frames. ], batch size: 27, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:29,818 INFO [train.py:901] (0/4) Epoch 21, batch 7200, loss[loss=0.2124, simple_loss=0.2899, pruned_loss=0.0675, over 8591.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2887, pruned_loss=0.06286, over 1609998.86 frames. ], batch size: 31, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:51,147 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.343e+02 3.196e+02 4.097e+02 7.456e+02, threshold=6.392e+02, percent-clipped=6.0 +2023-02-07 02:57:53,932 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0691, 1.2631, 1.2342, 0.8116, 1.2608, 1.0788, 0.1348, 1.2242], + device='cuda:0'), covar=tensor([0.0382, 0.0366, 0.0335, 0.0492, 0.0377, 0.0879, 0.0765, 0.0288], + device='cuda:0'), in_proj_covar=tensor([0.0450, 0.0389, 0.0341, 0.0442, 0.0372, 0.0532, 0.0388, 0.0415], + device='cuda:0'), out_proj_covar=tensor([1.2074e-04, 1.0213e-04, 8.9754e-05, 1.1642e-04, 9.7995e-05, 1.5043e-04, + 1.0467e-04, 1.1006e-04], device='cuda:0') +2023-02-07 02:58:04,698 INFO [train.py:901] (0/4) Epoch 21, batch 7250, loss[loss=0.1913, simple_loss=0.2672, pruned_loss=0.05769, over 7704.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2891, pruned_loss=0.06349, over 1609253.02 frames. ], batch size: 18, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:40,063 INFO [train.py:901] (0/4) Epoch 21, batch 7300, loss[loss=0.1876, simple_loss=0.2735, pruned_loss=0.05081, over 8251.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2887, pruned_loss=0.06321, over 1609818.56 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:44,981 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168966.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:58,201 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168985.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:59,434 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168987.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:59:00,593 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.375e+02 2.880e+02 4.111e+02 9.346e+02, threshold=5.760e+02, percent-clipped=6.0 +2023-02-07 02:59:02,086 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:59:13,134 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 02:59:14,674 INFO [train.py:901] (0/4) Epoch 21, batch 7350, loss[loss=0.1798, simple_loss=0.2584, pruned_loss=0.05059, over 7657.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2875, pruned_loss=0.06224, over 1607527.21 frames. ], batch size: 19, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:21,355 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 02:59:35,046 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 02:59:48,058 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6211, 1.8758, 3.3104, 1.4278, 2.2856, 2.0781, 1.6403, 2.3841], + device='cuda:0'), covar=tensor([0.2007, 0.2841, 0.0888, 0.4742, 0.2283, 0.3250, 0.2436, 0.2588], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0603, 0.0558, 0.0644, 0.0645, 0.0596, 0.0534, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 02:59:49,824 INFO [train.py:901] (0/4) Epoch 21, batch 7400, loss[loss=0.2394, simple_loss=0.3154, pruned_loss=0.0817, over 8097.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.06281, over 1612002.75 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:53,393 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 03:00:10,728 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 2.322e+02 3.020e+02 4.298e+02 1.187e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-07 03:00:19,190 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169100.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:00:25,153 INFO [train.py:901] (0/4) Epoch 21, batch 7450, loss[loss=0.1925, simple_loss=0.2858, pruned_loss=0.04958, over 8523.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2903, pruned_loss=0.06326, over 1612229.90 frames. ], batch size: 28, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:00:33,880 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 03:00:34,283 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 03:00:42,569 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:00,744 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5985, 2.3576, 3.2346, 2.5946, 3.1436, 2.4596, 2.3023, 1.9203], + device='cuda:0'), covar=tensor([0.4887, 0.4767, 0.1897, 0.3830, 0.2520, 0.3071, 0.1844, 0.5435], + device='cuda:0'), in_proj_covar=tensor([0.0936, 0.0969, 0.0798, 0.0937, 0.0991, 0.0885, 0.0741, 0.0817], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 03:01:01,175 INFO [train.py:901] (0/4) Epoch 21, batch 7500, loss[loss=0.2165, simple_loss=0.3037, pruned_loss=0.06463, over 8252.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2913, pruned_loss=0.0638, over 1617143.59 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:01:13,523 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:21,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.287e+02 2.739e+02 3.438e+02 5.948e+02, threshold=5.478e+02, percent-clipped=0.0 +2023-02-07 03:01:35,752 INFO [train.py:901] (0/4) Epoch 21, batch 7550, loss[loss=0.2089, simple_loss=0.2775, pruned_loss=0.0702, over 7206.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2904, pruned_loss=0.06347, over 1610089.17 frames. ], batch size: 16, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:01:56,176 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 03:02:09,758 INFO [train.py:901] (0/4) Epoch 21, batch 7600, loss[loss=0.2054, simple_loss=0.2914, pruned_loss=0.05974, over 8101.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2912, pruned_loss=0.06395, over 1612926.76 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:18,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6662, 1.4912, 1.6537, 1.3434, 0.9024, 1.4706, 1.6393, 1.5309], + device='cuda:0'), covar=tensor([0.0621, 0.1236, 0.1710, 0.1487, 0.0594, 0.1558, 0.0694, 0.0644], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0158, 0.0099, 0.0163, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 03:02:32,181 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.243e+02 2.742e+02 3.349e+02 1.012e+03, threshold=5.485e+02, percent-clipped=5.0 +2023-02-07 03:02:45,878 INFO [train.py:901] (0/4) Epoch 21, batch 7650, loss[loss=0.2348, simple_loss=0.331, pruned_loss=0.06936, over 8245.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2904, pruned_loss=0.06316, over 1616722.35 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:00,457 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:03:01,839 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169331.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:03:21,298 INFO [train.py:901] (0/4) Epoch 21, batch 7700, loss[loss=0.2133, simple_loss=0.2984, pruned_loss=0.06408, over 8248.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2902, pruned_loss=0.06284, over 1618577.36 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:42,196 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.349e+02 2.901e+02 3.736e+02 6.675e+02, threshold=5.802e+02, percent-clipped=6.0 +2023-02-07 03:03:44,241 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 03:03:57,018 INFO [train.py:901] (0/4) Epoch 21, batch 7750, loss[loss=0.2502, simple_loss=0.3313, pruned_loss=0.08455, over 8772.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2893, pruned_loss=0.06236, over 1620596.48 frames. ], batch size: 30, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:21,904 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:22,011 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:23,343 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169446.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:04:32,647 INFO [train.py:901] (0/4) Epoch 21, batch 7800, loss[loss=0.2473, simple_loss=0.3218, pruned_loss=0.08643, over 8442.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06215, over 1616326.52 frames. ], batch size: 27, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:35,557 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5408, 1.8926, 2.9272, 1.3928, 2.1556, 1.9589, 1.6053, 2.1983], + device='cuda:0'), covar=tensor([0.1919, 0.2576, 0.0829, 0.4509, 0.1850, 0.3075, 0.2339, 0.2219], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0599, 0.0555, 0.0637, 0.0643, 0.0591, 0.0530, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:04:45,393 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:52,677 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.145e+02 2.738e+02 3.428e+02 8.790e+02, threshold=5.476e+02, percent-clipped=3.0 +2023-02-07 03:05:06,016 INFO [train.py:901] (0/4) Epoch 21, batch 7850, loss[loss=0.2069, simple_loss=0.2874, pruned_loss=0.0632, over 8363.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.289, pruned_loss=0.06207, over 1619878.13 frames. ], batch size: 24, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:14,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:39,266 INFO [train.py:901] (0/4) Epoch 21, batch 7900, loss[loss=0.236, simple_loss=0.3053, pruned_loss=0.08336, over 8476.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06228, over 1614607.37 frames. ], batch size: 27, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:39,444 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:59,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.405e+02 2.884e+02 3.520e+02 8.387e+02, threshold=5.767e+02, percent-clipped=5.0 +2023-02-07 03:06:02,045 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:12,856 INFO [train.py:901] (0/4) Epoch 21, batch 7950, loss[loss=0.1849, simple_loss=0.2732, pruned_loss=0.04825, over 7810.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.288, pruned_loss=0.06243, over 1610444.62 frames. ], batch size: 20, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:06:31,356 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:33,350 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:46,590 INFO [train.py:901] (0/4) Epoch 21, batch 8000, loss[loss=0.211, simple_loss=0.2884, pruned_loss=0.06676, over 8235.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2873, pruned_loss=0.06194, over 1612762.33 frames. ], batch size: 24, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:06,437 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.194e+02 2.844e+02 3.383e+02 6.688e+02, threshold=5.687e+02, percent-clipped=2.0 +2023-02-07 03:07:11,984 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.7877, 1.1740, 0.9482, 1.1112, 0.9824, 0.8949, 0.9258, 1.0216], + device='cuda:0'), covar=tensor([0.0826, 0.0376, 0.0942, 0.0440, 0.0585, 0.1079, 0.0704, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0235, 0.0333, 0.0308, 0.0300, 0.0335, 0.0343, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 03:07:14,030 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:15,395 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169702.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:07:19,702 INFO [train.py:901] (0/4) Epoch 21, batch 8050, loss[loss=0.1791, simple_loss=0.2471, pruned_loss=0.05556, over 7248.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2875, pruned_loss=0.06287, over 1590851.07 frames. ], batch size: 16, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:30,618 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:31,994 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169727.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:07:42,946 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-21.pt +2023-02-07 03:07:54,371 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 03:07:58,224 INFO [train.py:901] (0/4) Epoch 22, batch 0, loss[loss=0.2043, simple_loss=0.2857, pruned_loss=0.06147, over 8741.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2857, pruned_loss=0.06147, over 8741.00 frames. ], batch size: 30, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:07:58,224 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 03:08:05,366 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6807, 1.8282, 1.5764, 2.1921, 1.1301, 1.5121, 1.6520, 1.7546], + device='cuda:0'), covar=tensor([0.0753, 0.0674, 0.0980, 0.0434, 0.1034, 0.1198, 0.0692, 0.0734], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0207, 0.0246, 0.0250, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:08:08,956 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2980, 1.5696, 1.6030, 1.0801, 1.6331, 1.2299, 0.3168, 1.4700], + device='cuda:0'), covar=tensor([0.0486, 0.0388, 0.0330, 0.0517, 0.0397, 0.0949, 0.0865, 0.0317], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0387, 0.0341, 0.0438, 0.0370, 0.0529, 0.0386, 0.0416], + device='cuda:0'), out_proj_covar=tensor([1.2047e-04, 1.0158e-04, 8.9695e-05, 1.1538e-04, 9.7332e-05, 1.4925e-04, + 1.0430e-04, 1.1037e-04], device='cuda:0') +2023-02-07 03:08:09,351 INFO [train.py:935] (0/4) Epoch 22, validation: loss=0.1743, simple_loss=0.2746, pruned_loss=0.03702, over 944034.00 frames. +2023-02-07 03:08:09,352 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 03:08:12,908 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169747.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:24,241 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 03:08:25,065 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:42,194 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.482e+02 2.980e+02 3.558e+02 1.069e+03, threshold=5.959e+02, percent-clipped=8.0 +2023-02-07 03:08:44,175 INFO [train.py:901] (0/4) Epoch 22, batch 50, loss[loss=0.2133, simple_loss=0.2922, pruned_loss=0.06725, over 8319.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.295, pruned_loss=0.06668, over 369270.41 frames. ], batch size: 25, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:08:54,134 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:01,092 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 03:09:02,025 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:19,145 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169840.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:20,327 INFO [train.py:901] (0/4) Epoch 22, batch 100, loss[loss=0.1987, simple_loss=0.2816, pruned_loss=0.05786, over 8245.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2907, pruned_loss=0.06368, over 647993.63 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:23,126 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 03:09:25,364 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:42,069 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:52,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.356e+02 3.069e+02 3.800e+02 7.981e+02, threshold=6.138e+02, percent-clipped=3.0 +2023-02-07 03:09:55,631 INFO [train.py:901] (0/4) Epoch 22, batch 150, loss[loss=0.2222, simple_loss=0.3082, pruned_loss=0.06809, over 8343.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.289, pruned_loss=0.06271, over 859418.51 frames. ], batch size: 26, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:55,864 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:12,784 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:30,764 INFO [train.py:901] (0/4) Epoch 22, batch 200, loss[loss=0.215, simple_loss=0.2967, pruned_loss=0.06669, over 8595.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2881, pruned_loss=0.06231, over 1026301.31 frames. ], batch size: 31, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:10:58,685 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:11:02,619 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.362e+02 2.871e+02 3.395e+02 8.094e+02, threshold=5.742e+02, percent-clipped=2.0 +2023-02-07 03:11:04,633 INFO [train.py:901] (0/4) Epoch 22, batch 250, loss[loss=0.2596, simple_loss=0.3319, pruned_loss=0.09363, over 8652.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.06107, over 1162383.82 frames. ], batch size: 34, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:10,171 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-170000.pt +2023-02-07 03:11:17,894 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 03:11:26,139 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 03:11:41,667 INFO [train.py:901] (0/4) Epoch 22, batch 300, loss[loss=0.1968, simple_loss=0.2785, pruned_loss=0.0576, over 8197.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2872, pruned_loss=0.06107, over 1264285.59 frames. ], batch size: 23, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:42,593 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8024, 2.1333, 3.1929, 1.5631, 2.6643, 2.1370, 1.8762, 2.5809], + device='cuda:0'), covar=tensor([0.1791, 0.2618, 0.0927, 0.4343, 0.1708, 0.2995, 0.2225, 0.2200], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0601, 0.0556, 0.0638, 0.0642, 0.0589, 0.0533, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:11:56,572 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:13,690 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.486e+02 2.821e+02 3.492e+02 6.452e+02, threshold=5.641e+02, percent-clipped=3.0 +2023-02-07 03:12:15,163 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:15,766 INFO [train.py:901] (0/4) Epoch 22, batch 350, loss[loss=0.2046, simple_loss=0.2996, pruned_loss=0.0548, over 8462.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06109, over 1336626.82 frames. ], batch size: 25, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:12:19,957 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:27,043 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:34,532 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7541, 1.9325, 2.1473, 1.4770, 2.2109, 1.6713, 0.8567, 1.9335], + device='cuda:0'), covar=tensor([0.0598, 0.0364, 0.0269, 0.0539, 0.0421, 0.0752, 0.0815, 0.0304], + device='cuda:0'), in_proj_covar=tensor([0.0451, 0.0390, 0.0343, 0.0441, 0.0372, 0.0532, 0.0387, 0.0419], + device='cuda:0'), out_proj_covar=tensor([1.2114e-04, 1.0247e-04, 9.0178e-05, 1.1608e-04, 9.7872e-05, 1.5013e-04, + 1.0452e-04, 1.1113e-04], device='cuda:0') +2023-02-07 03:12:49,716 INFO [train.py:901] (0/4) Epoch 22, batch 400, loss[loss=0.2049, simple_loss=0.2991, pruned_loss=0.05532, over 8638.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2859, pruned_loss=0.0606, over 1399391.28 frames. ], batch size: 34, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:12:53,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170148.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:54,639 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 03:13:22,568 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.277e+02 2.821e+02 3.460e+02 6.418e+02, threshold=5.643e+02, percent-clipped=3.0 +2023-02-07 03:13:24,663 INFO [train.py:901] (0/4) Epoch 22, batch 450, loss[loss=0.2392, simple_loss=0.3192, pruned_loss=0.07961, over 8335.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2852, pruned_loss=0.06028, over 1446484.14 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:13:34,377 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170206.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:46,337 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:58,275 INFO [train.py:901] (0/4) Epoch 22, batch 500, loss[loss=0.1779, simple_loss=0.2632, pruned_loss=0.04623, over 7660.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06085, over 1487118.48 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:14:13,725 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:14:31,688 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.263e+02 2.770e+02 3.716e+02 6.957e+02, threshold=5.540e+02, percent-clipped=5.0 +2023-02-07 03:14:34,518 INFO [train.py:901] (0/4) Epoch 22, batch 550, loss[loss=0.2127, simple_loss=0.3024, pruned_loss=0.06147, over 8596.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06094, over 1515544.92 frames. ], batch size: 49, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:08,212 INFO [train.py:901] (0/4) Epoch 22, batch 600, loss[loss=0.1996, simple_loss=0.2742, pruned_loss=0.06249, over 7234.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2879, pruned_loss=0.06149, over 1537819.25 frames. ], batch size: 16, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:15,787 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5273, 1.6742, 5.6518, 2.2428, 5.1243, 4.7935, 5.1999, 5.1074], + device='cuda:0'), covar=tensor([0.0393, 0.4714, 0.0340, 0.3673, 0.0863, 0.0919, 0.0459, 0.0448], + device='cuda:0'), in_proj_covar=tensor([0.0629, 0.0641, 0.0692, 0.0625, 0.0712, 0.0608, 0.0611, 0.0676], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:15:16,562 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:27,521 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 03:15:34,308 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:40,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.463e+02 3.010e+02 3.561e+02 9.437e+02, threshold=6.021e+02, percent-clipped=1.0 +2023-02-07 03:15:42,757 INFO [train.py:901] (0/4) Epoch 22, batch 650, loss[loss=0.2258, simple_loss=0.3041, pruned_loss=0.07374, over 8552.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2874, pruned_loss=0.06136, over 1557185.50 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:53,430 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170407.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:17,645 INFO [train.py:901] (0/4) Epoch 22, batch 700, loss[loss=0.2625, simple_loss=0.3439, pruned_loss=0.09055, over 8758.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2876, pruned_loss=0.06129, over 1569466.63 frames. ], batch size: 30, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:16:27,544 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 03:16:31,452 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:42,960 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:43,710 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:49,121 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:50,917 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.347e+02 2.936e+02 3.672e+02 5.936e+02, threshold=5.871e+02, percent-clipped=0.0 +2023-02-07 03:16:52,903 INFO [train.py:901] (0/4) Epoch 22, batch 750, loss[loss=0.2192, simple_loss=0.3002, pruned_loss=0.06909, over 8502.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2884, pruned_loss=0.06186, over 1577476.07 frames. ], batch size: 28, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:01,714 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:11,632 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:13,546 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:14,722 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 03:17:23,962 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 03:17:27,399 INFO [train.py:901] (0/4) Epoch 22, batch 800, loss[loss=0.1919, simple_loss=0.2798, pruned_loss=0.052, over 8148.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2888, pruned_loss=0.06216, over 1591653.76 frames. ], batch size: 22, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:28,967 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170544.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:57,564 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:58,752 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.234e+02 2.598e+02 3.180e+02 6.753e+02, threshold=5.195e+02, percent-clipped=1.0 +2023-02-07 03:18:00,801 INFO [train.py:901] (0/4) Epoch 22, batch 850, loss[loss=0.2134, simple_loss=0.3021, pruned_loss=0.06241, over 8199.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2892, pruned_loss=0.06262, over 1598574.81 frames. ], batch size: 23, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:17,662 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 03:18:36,985 INFO [train.py:901] (0/4) Epoch 22, batch 900, loss[loss=0.1979, simple_loss=0.2981, pruned_loss=0.04883, over 8508.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2891, pruned_loss=0.06179, over 1607354.53 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:57,706 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.54 vs. limit=5.0 +2023-02-07 03:19:09,372 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.381e+02 2.827e+02 3.296e+02 7.509e+02, threshold=5.655e+02, percent-clipped=4.0 +2023-02-07 03:19:11,437 INFO [train.py:901] (0/4) Epoch 22, batch 950, loss[loss=0.2165, simple_loss=0.3074, pruned_loss=0.06284, over 8100.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2876, pruned_loss=0.06147, over 1607772.77 frames. ], batch size: 23, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:19:29,273 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-07 03:19:43,584 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 03:19:46,359 INFO [train.py:901] (0/4) Epoch 22, batch 1000, loss[loss=0.245, simple_loss=0.3236, pruned_loss=0.08322, over 7362.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.288, pruned_loss=0.06154, over 1610600.34 frames. ], batch size: 71, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:20:01,244 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1809, 1.2265, 1.4797, 1.1810, 0.6949, 1.3028, 1.1667, 0.9506], + device='cuda:0'), covar=tensor([0.0624, 0.1384, 0.1777, 0.1590, 0.0637, 0.1664, 0.0760, 0.0778], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0099, 0.0163, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 03:20:12,106 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170778.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:16,587 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8642, 1.5017, 3.4409, 1.5662, 2.3543, 3.7818, 3.9248, 3.1322], + device='cuda:0'), covar=tensor([0.1337, 0.1952, 0.0429, 0.2132, 0.1268, 0.0298, 0.0640, 0.0729], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0322, 0.0285, 0.0316, 0.0309, 0.0264, 0.0418, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 03:20:17,095 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 03:20:19,772 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 2.326e+02 2.890e+02 3.504e+02 6.405e+02, threshold=5.779e+02, percent-clipped=4.0 +2023-02-07 03:20:21,021 INFO [train.py:901] (0/4) Epoch 22, batch 1050, loss[loss=0.2114, simple_loss=0.2795, pruned_loss=0.07162, over 7925.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.06204, over 1607990.35 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:20:28,513 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 03:20:28,701 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:31,481 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.02 vs. limit=5.0 +2023-02-07 03:20:41,765 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:55,953 INFO [train.py:901] (0/4) Epoch 22, batch 1100, loss[loss=0.1829, simple_loss=0.2614, pruned_loss=0.0522, over 7655.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2886, pruned_loss=0.06235, over 1609673.29 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:22,157 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6421, 1.8382, 2.6397, 1.5030, 2.1686, 1.8739, 1.6849, 2.0875], + device='cuda:0'), covar=tensor([0.1472, 0.2066, 0.0671, 0.3649, 0.1416, 0.2559, 0.1908, 0.2056], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0603, 0.0556, 0.0643, 0.0645, 0.0592, 0.0534, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:21:29,315 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.576e+02 3.127e+02 3.706e+02 1.049e+03, threshold=6.255e+02, percent-clipped=5.0 +2023-02-07 03:21:30,679 INFO [train.py:901] (0/4) Epoch 22, batch 1150, loss[loss=0.1889, simple_loss=0.2643, pruned_loss=0.0567, over 7926.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2884, pruned_loss=0.0624, over 1606014.85 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:37,431 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 03:21:56,781 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:01,711 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:04,213 INFO [train.py:901] (0/4) Epoch 22, batch 1200, loss[loss=0.2082, simple_loss=0.2674, pruned_loss=0.07451, over 7600.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2882, pruned_loss=0.06247, over 1603007.74 frames. ], batch size: 17, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:07,063 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170946.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:38,801 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.384e+02 2.807e+02 3.549e+02 5.873e+02, threshold=5.615e+02, percent-clipped=0.0 +2023-02-07 03:22:40,088 INFO [train.py:901] (0/4) Epoch 22, batch 1250, loss[loss=0.1698, simple_loss=0.251, pruned_loss=0.04434, over 7543.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2869, pruned_loss=0.06144, over 1605166.16 frames. ], batch size: 18, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:57,662 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8233, 3.7137, 3.3970, 1.7026, 3.2764, 3.5030, 3.3865, 3.2282], + device='cuda:0'), covar=tensor([0.0951, 0.0727, 0.1189, 0.4972, 0.1025, 0.1100, 0.1409, 0.0968], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0430, 0.0430, 0.0530, 0.0422, 0.0441, 0.0420, 0.0382], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:22:58,788 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 03:23:14,554 INFO [train.py:901] (0/4) Epoch 22, batch 1300, loss[loss=0.2137, simple_loss=0.3002, pruned_loss=0.06363, over 8531.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.286, pruned_loss=0.06045, over 1611376.28 frames. ], batch size: 49, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:23:17,483 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:23:47,495 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.382e+02 2.988e+02 3.753e+02 7.309e+02, threshold=5.975e+02, percent-clipped=5.0 +2023-02-07 03:23:48,841 INFO [train.py:901] (0/4) Epoch 22, batch 1350, loss[loss=0.1974, simple_loss=0.2827, pruned_loss=0.05603, over 8264.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2861, pruned_loss=0.06013, over 1615094.95 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:01,671 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171110.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:24:02,358 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.7942, 1.4754, 3.9700, 1.4125, 3.4722, 3.2690, 3.5769, 3.4482], + device='cuda:0'), covar=tensor([0.0792, 0.4942, 0.0707, 0.4600, 0.1416, 0.1170, 0.0802, 0.0854], + device='cuda:0'), in_proj_covar=tensor([0.0625, 0.0637, 0.0688, 0.0620, 0.0704, 0.0604, 0.0606, 0.0672], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:24:23,451 INFO [train.py:901] (0/4) Epoch 22, batch 1400, loss[loss=0.1889, simple_loss=0.276, pruned_loss=0.05094, over 8477.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2866, pruned_loss=0.06054, over 1615713.66 frames. ], batch size: 29, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:23,825 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.93 vs. limit=5.0 +2023-02-07 03:24:51,098 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1759, 2.4045, 1.8897, 3.0137, 1.3789, 1.6478, 2.0699, 2.2744], + device='cuda:0'), covar=tensor([0.0657, 0.0794, 0.0891, 0.0325, 0.1120, 0.1284, 0.0850, 0.0851], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0197, 0.0245, 0.0215, 0.0208, 0.0248, 0.0251, 0.0212], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:24:55,486 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 3.047e+02 3.835e+02 9.203e+02, threshold=6.094e+02, percent-clipped=3.0 +2023-02-07 03:24:57,482 INFO [train.py:901] (0/4) Epoch 22, batch 1450, loss[loss=0.2617, simple_loss=0.3368, pruned_loss=0.09332, over 8499.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2881, pruned_loss=0.0616, over 1612597.21 frames. ], batch size: 28, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:58,888 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:06,224 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 03:25:12,521 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:16,679 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:32,447 INFO [train.py:901] (0/4) Epoch 22, batch 1500, loss[loss=0.2259, simple_loss=0.2979, pruned_loss=0.07689, over 7802.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06189, over 1615563.01 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:04,592 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.477e+02 2.962e+02 3.885e+02 1.079e+03, threshold=5.924e+02, percent-clipped=2.0 +2023-02-07 03:26:04,686 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171290.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:05,960 INFO [train.py:901] (0/4) Epoch 22, batch 1550, loss[loss=0.208, simple_loss=0.2898, pruned_loss=0.06307, over 8035.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2889, pruned_loss=0.06229, over 1613748.91 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:12,937 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:25,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7008, 1.8242, 1.6240, 2.2602, 0.9815, 1.4042, 1.6104, 1.8495], + device='cuda:0'), covar=tensor([0.0776, 0.0803, 0.0920, 0.0436, 0.1271, 0.1392, 0.0816, 0.0754], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0247, 0.0250, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:26:30,107 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:37,454 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6756, 1.4794, 4.9040, 1.7982, 4.3808, 4.1510, 4.4257, 4.3181], + device='cuda:0'), covar=tensor([0.0566, 0.4707, 0.0530, 0.3932, 0.1115, 0.0938, 0.0590, 0.0621], + device='cuda:0'), in_proj_covar=tensor([0.0629, 0.0642, 0.0693, 0.0623, 0.0708, 0.0608, 0.0611, 0.0676], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:26:40,692 INFO [train.py:901] (0/4) Epoch 22, batch 1600, loss[loss=0.2174, simple_loss=0.2981, pruned_loss=0.06833, over 8527.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2886, pruned_loss=0.0621, over 1613110.52 frames. ], batch size: 28, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:55,733 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:13,631 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 2.510e+02 3.045e+02 3.987e+02 6.104e+02, threshold=6.090e+02, percent-clipped=2.0 +2023-02-07 03:27:14,996 INFO [train.py:901] (0/4) Epoch 22, batch 1650, loss[loss=0.2401, simple_loss=0.3263, pruned_loss=0.07691, over 8495.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2891, pruned_loss=0.06239, over 1615192.14 frames. ], batch size: 29, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:24,096 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171405.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:51,026 INFO [train.py:901] (0/4) Epoch 22, batch 1700, loss[loss=0.1678, simple_loss=0.2628, pruned_loss=0.03641, over 8242.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2882, pruned_loss=0.0618, over 1614045.42 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:59,269 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171454.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:28:18,870 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 03:28:24,566 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.429e+02 3.050e+02 3.629e+02 7.357e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-07 03:28:25,936 INFO [train.py:901] (0/4) Epoch 22, batch 1750, loss[loss=0.1535, simple_loss=0.2385, pruned_loss=0.03429, over 7658.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2875, pruned_loss=0.06177, over 1614945.49 frames. ], batch size: 19, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:28:42,133 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:00,474 INFO [train.py:901] (0/4) Epoch 22, batch 1800, loss[loss=0.2009, simple_loss=0.2826, pruned_loss=0.05959, over 8651.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2867, pruned_loss=0.06158, over 1612124.50 frames. ], batch size: 34, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:29:11,504 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:19,688 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:34,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.408e+02 2.801e+02 3.784e+02 7.831e+02, threshold=5.602e+02, percent-clipped=2.0 +2023-02-07 03:29:35,959 INFO [train.py:901] (0/4) Epoch 22, batch 1850, loss[loss=0.1638, simple_loss=0.2362, pruned_loss=0.04566, over 7425.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2867, pruned_loss=0.06172, over 1605126.42 frames. ], batch size: 17, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:10,046 INFO [train.py:901] (0/4) Epoch 22, batch 1900, loss[loss=0.1777, simple_loss=0.2648, pruned_loss=0.04528, over 8081.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2878, pruned_loss=0.06195, over 1608260.79 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:24,233 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171661.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:32,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:36,788 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 03:30:41,591 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171686.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:44,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.518e+02 3.035e+02 3.649e+02 9.576e+02, threshold=6.070e+02, percent-clipped=4.0 +2023-02-07 03:30:45,467 INFO [train.py:901] (0/4) Epoch 22, batch 1950, loss[loss=0.1917, simple_loss=0.2647, pruned_loss=0.05935, over 7704.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2882, pruned_loss=0.06174, over 1614133.45 frames. ], batch size: 18, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:48,015 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 03:30:56,295 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171707.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:31:07,794 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 03:31:20,037 INFO [train.py:901] (0/4) Epoch 22, batch 2000, loss[loss=0.2441, simple_loss=0.3122, pruned_loss=0.08807, over 8515.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2872, pruned_loss=0.06123, over 1617339.55 frames. ], batch size: 28, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:31:43,133 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0869, 1.6125, 4.3301, 1.9204, 2.3534, 4.9092, 5.0657, 4.2052], + device='cuda:0'), covar=tensor([0.1378, 0.1976, 0.0279, 0.2098, 0.1344, 0.0178, 0.0379, 0.0584], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0323, 0.0285, 0.0318, 0.0307, 0.0264, 0.0419, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 03:31:53,992 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.301e+02 2.928e+02 3.706e+02 6.798e+02, threshold=5.855e+02, percent-clipped=1.0 +2023-02-07 03:31:55,408 INFO [train.py:901] (0/4) Epoch 22, batch 2050, loss[loss=0.2926, simple_loss=0.3598, pruned_loss=0.1127, over 7322.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2873, pruned_loss=0.0612, over 1619128.13 frames. ], batch size: 72, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:17,604 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:19,713 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171825.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:31,138 INFO [train.py:901] (0/4) Epoch 22, batch 2100, loss[loss=0.1855, simple_loss=0.2608, pruned_loss=0.05509, over 7801.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06118, over 1617442.06 frames. ], batch size: 19, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:36,880 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:43,494 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:45,790 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6619, 1.7395, 2.4917, 1.6731, 1.3114, 2.4555, 0.5821, 1.4672], + device='cuda:0'), covar=tensor([0.1955, 0.1284, 0.0342, 0.1294, 0.2857, 0.0315, 0.2322, 0.1508], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0198, 0.0127, 0.0223, 0.0272, 0.0137, 0.0171, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 03:33:05,591 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.505e+02 2.999e+02 3.749e+02 9.868e+02, threshold=5.998e+02, percent-clipped=7.0 +2023-02-07 03:33:06,897 INFO [train.py:901] (0/4) Epoch 22, batch 2150, loss[loss=0.1927, simple_loss=0.2794, pruned_loss=0.05301, over 8517.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.288, pruned_loss=0.06268, over 1617132.30 frames. ], batch size: 49, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:33,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:42,596 INFO [train.py:901] (0/4) Epoch 22, batch 2200, loss[loss=0.2189, simple_loss=0.3015, pruned_loss=0.06812, over 7960.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2876, pruned_loss=0.06251, over 1608453.93 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:51,024 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:51,678 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:04,375 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-07 03:34:05,575 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:15,544 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.362e+02 2.812e+02 3.623e+02 6.076e+02, threshold=5.624e+02, percent-clipped=1.0 +2023-02-07 03:34:16,931 INFO [train.py:901] (0/4) Epoch 22, batch 2250, loss[loss=0.1585, simple_loss=0.2332, pruned_loss=0.04195, over 7704.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2863, pruned_loss=0.06196, over 1607519.27 frames. ], batch size: 18, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:34:17,301 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 03:34:22,551 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-172000.pt +2023-02-07 03:34:42,051 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8046, 1.5827, 5.9933, 2.0963, 5.3289, 5.0088, 5.4722, 5.4071], + device='cuda:0'), covar=tensor([0.0563, 0.4820, 0.0333, 0.3971, 0.1135, 0.0917, 0.0645, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0629, 0.0645, 0.0692, 0.0623, 0.0714, 0.0612, 0.0610, 0.0675], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:34:54,270 INFO [train.py:901] (0/4) Epoch 22, batch 2300, loss[loss=0.2005, simple_loss=0.2952, pruned_loss=0.05284, over 8245.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2864, pruned_loss=0.06173, over 1607187.82 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:35:20,089 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:35:21,688 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 03:35:28,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.401e+02 3.005e+02 3.667e+02 7.010e+02, threshold=6.010e+02, percent-clipped=1.0 +2023-02-07 03:35:29,626 INFO [train.py:901] (0/4) Epoch 22, batch 2350, loss[loss=0.2265, simple_loss=0.309, pruned_loss=0.07196, over 8454.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.286, pruned_loss=0.06119, over 1613360.29 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:35:37,297 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:01,297 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:05,215 INFO [train.py:901] (0/4) Epoch 22, batch 2400, loss[loss=0.2217, simple_loss=0.3013, pruned_loss=0.07109, over 8028.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06193, over 1621183.91 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:08,333 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-07 03:36:11,438 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9756, 1.6293, 4.2184, 1.5581, 2.6170, 4.8630, 5.1334, 3.7105], + device='cuda:0'), covar=tensor([0.1707, 0.2389, 0.0406, 0.2866, 0.1376, 0.0260, 0.0419, 0.1021], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0324, 0.0285, 0.0317, 0.0307, 0.0264, 0.0420, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 03:36:14,844 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8150, 3.7473, 3.4228, 1.7944, 3.3137, 3.4706, 3.3665, 3.2806], + device='cuda:0'), covar=tensor([0.0905, 0.0684, 0.1265, 0.4959, 0.1000, 0.1135, 0.1559, 0.0958], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0437, 0.0435, 0.0540, 0.0430, 0.0451, 0.0429, 0.0389], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:36:19,756 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2883, 2.1082, 2.6837, 2.2488, 2.5988, 2.3307, 2.1263, 1.4932], + device='cuda:0'), covar=tensor([0.5729, 0.5076, 0.1994, 0.3635, 0.2641, 0.2987, 0.2043, 0.5266], + device='cuda:0'), in_proj_covar=tensor([0.0943, 0.0976, 0.0804, 0.0941, 0.0996, 0.0893, 0.0748, 0.0826], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 03:36:39,694 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.658e+02 3.455e+02 4.348e+02 7.809e+02, threshold=6.910e+02, percent-clipped=6.0 +2023-02-07 03:36:41,128 INFO [train.py:901] (0/4) Epoch 22, batch 2450, loss[loss=0.2496, simple_loss=0.3289, pruned_loss=0.08518, over 8328.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06194, over 1619652.20 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:45,729 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7498, 2.4642, 3.3182, 2.6607, 3.2572, 2.6583, 2.4328, 1.9107], + device='cuda:0'), covar=tensor([0.4643, 0.4923, 0.1917, 0.3514, 0.2431, 0.2917, 0.1828, 0.5334], + device='cuda:0'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0997, 0.0895, 0.0748, 0.0828], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 03:37:08,124 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172231.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:16,956 INFO [train.py:901] (0/4) Epoch 22, batch 2500, loss[loss=0.2535, simple_loss=0.3171, pruned_loss=0.09498, over 7152.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2888, pruned_loss=0.0617, over 1622113.41 frames. ], batch size: 71, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:26,707 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:36,998 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2255, 2.5770, 2.0540, 3.6101, 1.5852, 1.8534, 2.0706, 2.5619], + device='cuda:0'), covar=tensor([0.0756, 0.0855, 0.0834, 0.0414, 0.1148, 0.1287, 0.1098, 0.0867], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0243, 0.0215, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:37:50,845 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.288e+02 2.722e+02 3.540e+02 9.975e+02, threshold=5.443e+02, percent-clipped=1.0 +2023-02-07 03:37:52,246 INFO [train.py:901] (0/4) Epoch 22, batch 2550, loss[loss=0.2138, simple_loss=0.2883, pruned_loss=0.06961, over 8095.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2895, pruned_loss=0.06268, over 1617312.50 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:56,721 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:38:25,409 INFO [train.py:901] (0/4) Epoch 22, batch 2600, loss[loss=0.2084, simple_loss=0.2874, pruned_loss=0.06468, over 8240.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.06198, over 1617501.76 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:38:58,389 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.484e+02 3.096e+02 3.957e+02 1.134e+03, threshold=6.191e+02, percent-clipped=6.0 +2023-02-07 03:39:00,465 INFO [train.py:901] (0/4) Epoch 22, batch 2650, loss[loss=0.2231, simple_loss=0.2981, pruned_loss=0.07403, over 8597.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.06221, over 1618747.90 frames. ], batch size: 39, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:39:16,257 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172414.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:39:35,339 INFO [train.py:901] (0/4) Epoch 22, batch 2700, loss[loss=0.2355, simple_loss=0.3131, pruned_loss=0.079, over 6585.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2872, pruned_loss=0.06185, over 1615692.15 frames. ], batch size: 71, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:02,834 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:40:09,204 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 03:40:09,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.345e+02 2.798e+02 3.767e+02 1.133e+03, threshold=5.596e+02, percent-clipped=4.0 +2023-02-07 03:40:10,845 INFO [train.py:901] (0/4) Epoch 22, batch 2750, loss[loss=0.1966, simple_loss=0.2681, pruned_loss=0.06259, over 7268.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2866, pruned_loss=0.06155, over 1614180.77 frames. ], batch size: 16, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:45,668 INFO [train.py:901] (0/4) Epoch 22, batch 2800, loss[loss=0.2146, simple_loss=0.2967, pruned_loss=0.06629, over 7808.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.287, pruned_loss=0.06181, over 1613680.98 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:18,227 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 2.395e+02 2.840e+02 3.614e+02 7.820e+02, threshold=5.680e+02, percent-clipped=6.0 +2023-02-07 03:41:20,389 INFO [train.py:901] (0/4) Epoch 22, batch 2850, loss[loss=0.1652, simple_loss=0.2494, pruned_loss=0.04044, over 7703.00 frames. ], tot_loss[loss=0.206, simple_loss=0.288, pruned_loss=0.06204, over 1615207.94 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:23,211 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:25,862 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4463, 4.3832, 4.0073, 1.9742, 3.8427, 4.0606, 3.9567, 3.6980], + device='cuda:0'), covar=tensor([0.0665, 0.0501, 0.0939, 0.4502, 0.0906, 0.0810, 0.1239, 0.0702], + device='cuda:0'), in_proj_covar=tensor([0.0520, 0.0431, 0.0427, 0.0532, 0.0423, 0.0443, 0.0424, 0.0383], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:41:37,765 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:44,843 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 03:41:56,011 INFO [train.py:901] (0/4) Epoch 22, batch 2900, loss[loss=0.1977, simple_loss=0.2764, pruned_loss=0.05951, over 8084.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2874, pruned_loss=0.06195, over 1613597.93 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:57,555 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:15,796 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172670.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:24,161 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 03:42:28,903 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.482e+02 2.975e+02 3.907e+02 6.756e+02, threshold=5.949e+02, percent-clipped=4.0 +2023-02-07 03:42:30,289 INFO [train.py:901] (0/4) Epoch 22, batch 2950, loss[loss=0.1787, simple_loss=0.264, pruned_loss=0.04666, over 8085.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2891, pruned_loss=0.06272, over 1615873.39 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:42:32,547 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:56,541 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2383, 2.0070, 2.6522, 2.2493, 2.5251, 2.2634, 2.0589, 1.3787], + device='cuda:0'), covar=tensor([0.5110, 0.4327, 0.1828, 0.3246, 0.2324, 0.2852, 0.1812, 0.4847], + device='cuda:0'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0995, 0.0896, 0.0745, 0.0827], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 03:43:05,664 INFO [train.py:901] (0/4) Epoch 22, batch 3000, loss[loss=0.1893, simple_loss=0.2778, pruned_loss=0.05042, over 8666.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06295, over 1610513.92 frames. ], batch size: 49, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:43:05,665 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 03:43:17,973 INFO [train.py:935] (0/4) Epoch 22, validation: loss=0.1735, simple_loss=0.2739, pruned_loss=0.03659, over 944034.00 frames. +2023-02-07 03:43:17,975 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 03:43:25,621 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172752.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:43:51,444 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.191e+02 2.765e+02 3.574e+02 6.067e+02, threshold=5.530e+02, percent-clipped=1.0 +2023-02-07 03:43:52,758 INFO [train.py:901] (0/4) Epoch 22, batch 3050, loss[loss=0.2277, simple_loss=0.3059, pruned_loss=0.07478, over 8346.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.289, pruned_loss=0.06268, over 1615127.87 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:15,783 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1676, 2.0358, 3.4867, 2.1817, 2.7729, 3.9364, 3.9052, 3.4932], + device='cuda:0'), covar=tensor([0.1113, 0.1569, 0.0507, 0.1624, 0.1327, 0.0221, 0.0620, 0.0504], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0325, 0.0286, 0.0318, 0.0310, 0.0266, 0.0423, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 03:44:26,397 INFO [train.py:901] (0/4) Epoch 22, batch 3100, loss[loss=0.2405, simple_loss=0.324, pruned_loss=0.07847, over 8456.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2896, pruned_loss=0.063, over 1616773.76 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:31,364 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1638, 2.4926, 2.8078, 1.6978, 3.1232, 1.8314, 1.5721, 2.1411], + device='cuda:0'), covar=tensor([0.0856, 0.0442, 0.0274, 0.0836, 0.0516, 0.0924, 0.0878, 0.0620], + device='cuda:0'), in_proj_covar=tensor([0.0447, 0.0386, 0.0340, 0.0440, 0.0370, 0.0528, 0.0385, 0.0412], + device='cuda:0'), out_proj_covar=tensor([1.1964e-04, 1.0113e-04, 8.9532e-05, 1.1579e-04, 9.7151e-05, 1.4874e-04, + 1.0389e-04, 1.0921e-04], device='cuda:0') +2023-02-07 03:44:32,687 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172851.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:40,606 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:44,812 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1391, 2.3241, 1.9225, 2.7805, 1.6085, 1.8395, 2.1503, 2.3962], + device='cuda:0'), covar=tensor([0.0693, 0.0691, 0.0849, 0.0405, 0.0985, 0.1105, 0.0735, 0.0744], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0208, 0.0249, 0.0252, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:44:50,782 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:59,805 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.451e+02 3.163e+02 4.463e+02 7.617e+02, threshold=6.327e+02, percent-clipped=7.0 +2023-02-07 03:45:01,201 INFO [train.py:901] (0/4) Epoch 22, batch 3150, loss[loss=0.2285, simple_loss=0.3081, pruned_loss=0.0745, over 8323.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2893, pruned_loss=0.06302, over 1617130.22 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:27,821 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-07 03:45:29,142 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-07 03:45:35,472 INFO [train.py:901] (0/4) Epoch 22, batch 3200, loss[loss=0.2101, simple_loss=0.2802, pruned_loss=0.07003, over 7709.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2896, pruned_loss=0.06313, over 1617752.59 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:47,723 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:45:53,342 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0712, 2.3747, 1.8896, 2.8714, 1.4351, 1.6302, 2.1459, 2.2802], + device='cuda:0'), covar=tensor([0.0701, 0.0704, 0.0886, 0.0358, 0.1038, 0.1269, 0.0732, 0.0769], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0207, 0.0249, 0.0252, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:46:06,751 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:46:07,588 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2174, 2.0288, 2.6843, 2.1821, 2.6574, 2.2891, 2.0945, 1.5337], + device='cuda:0'), covar=tensor([0.5372, 0.4800, 0.2106, 0.3975, 0.2643, 0.3056, 0.1938, 0.5365], + device='cuda:0'), in_proj_covar=tensor([0.0945, 0.0978, 0.0805, 0.0944, 0.0997, 0.0896, 0.0748, 0.0828], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 03:46:09,262 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.554e+02 2.964e+02 3.773e+02 6.891e+02, threshold=5.928e+02, percent-clipped=2.0 +2023-02-07 03:46:10,583 INFO [train.py:901] (0/4) Epoch 22, batch 3250, loss[loss=0.1756, simple_loss=0.2569, pruned_loss=0.04711, over 7222.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2889, pruned_loss=0.06296, over 1617243.13 frames. ], batch size: 16, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:46:19,603 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7204, 1.9781, 2.0343, 1.4222, 2.1584, 1.6301, 0.6884, 1.8579], + device='cuda:0'), covar=tensor([0.0506, 0.0309, 0.0261, 0.0482, 0.0425, 0.0758, 0.0760, 0.0262], + device='cuda:0'), in_proj_covar=tensor([0.0448, 0.0388, 0.0342, 0.0443, 0.0372, 0.0531, 0.0387, 0.0414], + device='cuda:0'), out_proj_covar=tensor([1.2008e-04, 1.0164e-04, 9.0147e-05, 1.1659e-04, 9.7624e-05, 1.4961e-04, + 1.0443e-04, 1.0972e-04], device='cuda:0') +2023-02-07 03:46:40,160 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1798, 1.4357, 1.6836, 1.3886, 0.9740, 1.4101, 1.8198, 1.5166], + device='cuda:0'), covar=tensor([0.0491, 0.1300, 0.1713, 0.1460, 0.0593, 0.1541, 0.0675, 0.0675], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 03:46:45,374 INFO [train.py:901] (0/4) Epoch 22, batch 3300, loss[loss=0.2135, simple_loss=0.2895, pruned_loss=0.06881, over 8133.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2892, pruned_loss=0.06282, over 1616036.97 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:46:45,484 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2264, 4.2288, 3.8407, 1.9841, 3.8114, 3.8135, 3.8286, 3.6120], + device='cuda:0'), covar=tensor([0.0810, 0.0571, 0.1051, 0.4686, 0.1008, 0.0971, 0.1290, 0.0894], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0434, 0.0431, 0.0538, 0.0426, 0.0446, 0.0426, 0.0385], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:47:07,548 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:17,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.506e+02 2.831e+02 3.669e+02 6.075e+02, threshold=5.662e+02, percent-clipped=1.0 +2023-02-07 03:47:18,591 INFO [train.py:901] (0/4) Epoch 22, batch 3350, loss[loss=0.2074, simple_loss=0.2889, pruned_loss=0.06301, over 8617.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2893, pruned_loss=0.06267, over 1617216.14 frames. ], batch size: 31, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:47:22,020 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:23,439 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4198, 4.4020, 3.9827, 2.0778, 3.8567, 4.0078, 3.9788, 3.8247], + device='cuda:0'), covar=tensor([0.0837, 0.0572, 0.1050, 0.4760, 0.1028, 0.1052, 0.1307, 0.0870], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0434, 0.0432, 0.0536, 0.0426, 0.0446, 0.0425, 0.0385], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:47:26,079 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:43,693 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7843, 1.5027, 5.9591, 2.2066, 5.3509, 5.0302, 5.4545, 5.3386], + device='cuda:0'), covar=tensor([0.0490, 0.4913, 0.0322, 0.3797, 0.1021, 0.0849, 0.0521, 0.0508], + device='cuda:0'), in_proj_covar=tensor([0.0625, 0.0637, 0.0686, 0.0619, 0.0701, 0.0606, 0.0604, 0.0669], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:47:54,983 INFO [train.py:901] (0/4) Epoch 22, batch 3400, loss[loss=0.2413, simple_loss=0.3204, pruned_loss=0.08112, over 8183.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2891, pruned_loss=0.06203, over 1622453.12 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:12,773 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173168.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:21,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2264, 1.4521, 4.3451, 1.5849, 3.9037, 3.5918, 3.9756, 3.8362], + device='cuda:0'), covar=tensor([0.0513, 0.4107, 0.0523, 0.3688, 0.0955, 0.0931, 0.0527, 0.0576], + device='cuda:0'), in_proj_covar=tensor([0.0625, 0.0636, 0.0685, 0.0618, 0.0700, 0.0604, 0.0603, 0.0669], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:48:28,293 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 2.494e+02 3.128e+02 3.771e+02 6.972e+02, threshold=6.255e+02, percent-clipped=4.0 +2023-02-07 03:48:28,960 INFO [train.py:901] (0/4) Epoch 22, batch 3450, loss[loss=0.2546, simple_loss=0.3374, pruned_loss=0.08588, over 8365.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.289, pruned_loss=0.06231, over 1620332.19 frames. ], batch size: 24, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:39,544 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:42,427 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173211.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:49:05,730 INFO [train.py:901] (0/4) Epoch 22, batch 3500, loss[loss=0.2012, simple_loss=0.2919, pruned_loss=0.05528, over 8331.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2882, pruned_loss=0.06162, over 1620505.42 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:24,793 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 03:49:38,937 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.642e+02 3.082e+02 3.788e+02 9.506e+02, threshold=6.164e+02, percent-clipped=4.0 +2023-02-07 03:49:39,651 INFO [train.py:901] (0/4) Epoch 22, batch 3550, loss[loss=0.2087, simple_loss=0.2958, pruned_loss=0.06082, over 8108.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.29, pruned_loss=0.06284, over 1620050.61 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:50,455 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9479, 6.0345, 5.2924, 2.6613, 5.3150, 5.6750, 5.6861, 5.5546], + device='cuda:0'), covar=tensor([0.0468, 0.0318, 0.0817, 0.4133, 0.0661, 0.0715, 0.0978, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0523, 0.0434, 0.0432, 0.0536, 0.0427, 0.0447, 0.0426, 0.0386], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:50:00,021 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173322.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:03,364 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:06,880 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:10,306 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8701, 1.4189, 1.6741, 1.3099, 0.9735, 1.3823, 1.6644, 1.3408], + device='cuda:0'), covar=tensor([0.0568, 0.1299, 0.1667, 0.1498, 0.0616, 0.1538, 0.0703, 0.0712], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 03:50:14,772 INFO [train.py:901] (0/4) Epoch 22, batch 3600, loss[loss=0.1989, simple_loss=0.2844, pruned_loss=0.05674, over 8442.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2893, pruned_loss=0.0628, over 1618337.61 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:50:24,998 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173356.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:26,357 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:43,487 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:48,589 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.398e+02 3.034e+02 4.459e+02 8.281e+02, threshold=6.068e+02, percent-clipped=7.0 +2023-02-07 03:50:49,310 INFO [train.py:901] (0/4) Epoch 22, batch 3650, loss[loss=0.2009, simple_loss=0.2876, pruned_loss=0.05715, over 8246.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2898, pruned_loss=0.06279, over 1618345.81 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:23,353 INFO [train.py:901] (0/4) Epoch 22, batch 3700, loss[loss=0.1753, simple_loss=0.2622, pruned_loss=0.04418, over 8135.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2881, pruned_loss=0.06214, over 1617115.58 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:24,746 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 03:51:42,309 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:51:57,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.519e+02 2.931e+02 3.909e+02 7.363e+02, threshold=5.861e+02, percent-clipped=2.0 +2023-02-07 03:51:58,521 INFO [train.py:901] (0/4) Epoch 22, batch 3750, loss[loss=0.2062, simple_loss=0.2877, pruned_loss=0.06231, over 8576.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2874, pruned_loss=0.06184, over 1616987.06 frames. ], batch size: 31, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:58,725 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:11,078 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173509.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:52:12,762 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:32,722 INFO [train.py:901] (0/4) Epoch 22, batch 3800, loss[loss=0.2009, simple_loss=0.277, pruned_loss=0.06239, over 7977.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2871, pruned_loss=0.0618, over 1617226.19 frames. ], batch size: 21, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:52:51,257 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3874, 1.6887, 1.6744, 1.1532, 1.7535, 1.3845, 0.2368, 1.5828], + device='cuda:0'), covar=tensor([0.0432, 0.0329, 0.0281, 0.0499, 0.0372, 0.0839, 0.0849, 0.0271], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0390, 0.0344, 0.0445, 0.0376, 0.0534, 0.0390, 0.0418], + device='cuda:0'), out_proj_covar=tensor([1.2101e-04, 1.0218e-04, 9.0511e-05, 1.1720e-04, 9.8907e-05, 1.5068e-04, + 1.0519e-04, 1.1059e-04], device='cuda:0') +2023-02-07 03:52:58,729 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:07,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.467e+02 3.140e+02 3.842e+02 8.904e+02, threshold=6.281e+02, percent-clipped=2.0 +2023-02-07 03:53:08,687 INFO [train.py:901] (0/4) Epoch 22, batch 3850, loss[loss=0.2153, simple_loss=0.3011, pruned_loss=0.06475, over 8561.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.06158, over 1618496.86 frames. ], batch size: 31, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:53:16,502 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173603.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:30,773 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 03:53:33,545 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:36,867 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:43,561 INFO [train.py:901] (0/4) Epoch 22, batch 3900, loss[loss=0.182, simple_loss=0.2801, pruned_loss=0.04196, over 8731.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2871, pruned_loss=0.06119, over 1618032.54 frames. ], batch size: 30, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:02,781 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4932, 1.4553, 1.8299, 1.2794, 1.1658, 1.8073, 0.2329, 1.1718], + device='cuda:0'), covar=tensor([0.1553, 0.1346, 0.0405, 0.0985, 0.2910, 0.0439, 0.2179, 0.1309], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0221, 0.0269, 0.0136, 0.0171, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 03:54:03,323 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173671.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:54:17,229 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.938e+02 2.507e+02 2.945e+02 3.654e+02 8.206e+02, threshold=5.890e+02, percent-clipped=3.0 +2023-02-07 03:54:17,888 INFO [train.py:901] (0/4) Epoch 22, batch 3950, loss[loss=0.2025, simple_loss=0.2813, pruned_loss=0.0618, over 7445.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06087, over 1614270.29 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:52,805 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5167, 1.4924, 1.8296, 1.2464, 1.2027, 1.8079, 0.2673, 1.1119], + device='cuda:0'), covar=tensor([0.1578, 0.1243, 0.0380, 0.0962, 0.2543, 0.0417, 0.1944, 0.1310], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0197, 0.0127, 0.0220, 0.0267, 0.0136, 0.0170, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 03:54:53,285 INFO [train.py:901] (0/4) Epoch 22, batch 4000, loss[loss=0.2223, simple_loss=0.3026, pruned_loss=0.07106, over 8480.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2859, pruned_loss=0.06082, over 1614187.52 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:55:09,380 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0192, 2.2776, 1.8395, 2.7664, 1.2945, 1.5895, 1.9988, 2.2084], + device='cuda:0'), covar=tensor([0.0707, 0.0697, 0.0923, 0.0362, 0.1134, 0.1335, 0.0836, 0.0754], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0199, 0.0246, 0.0216, 0.0208, 0.0248, 0.0252, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:55:23,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:55:24,452 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0504, 1.2302, 1.1206, 1.9298, 0.8475, 1.0849, 1.5534, 1.4186], + device='cuda:0'), covar=tensor([0.1556, 0.1090, 0.1923, 0.0551, 0.1195, 0.1797, 0.0688, 0.0778], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0200, 0.0248, 0.0217, 0.0209, 0.0250, 0.0254, 0.0211], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:55:26,125 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.329e+02 2.821e+02 3.599e+02 1.045e+03, threshold=5.642e+02, percent-clipped=6.0 +2023-02-07 03:55:26,787 INFO [train.py:901] (0/4) Epoch 22, batch 4050, loss[loss=0.2299, simple_loss=0.3062, pruned_loss=0.07676, over 8461.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2874, pruned_loss=0.06151, over 1614757.30 frames. ], batch size: 25, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:02,760 INFO [train.py:901] (0/4) Epoch 22, batch 4100, loss[loss=0.2241, simple_loss=0.315, pruned_loss=0.06661, over 8480.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2889, pruned_loss=0.06239, over 1619411.89 frames. ], batch size: 29, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:10,365 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173853.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:56:31,257 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173883.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:56:36,360 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.329e+02 2.764e+02 3.605e+02 7.317e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 03:56:37,021 INFO [train.py:901] (0/4) Epoch 22, batch 4150, loss[loss=0.239, simple_loss=0.3172, pruned_loss=0.08037, over 8506.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.06195, over 1617925.70 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:47,600 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:02,474 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 03:57:12,184 INFO [train.py:901] (0/4) Epoch 22, batch 4200, loss[loss=0.1512, simple_loss=0.2236, pruned_loss=0.03944, over 7423.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06152, over 1614328.00 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:57:28,909 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 03:57:29,741 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173968.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:57:35,178 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:46,762 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.391e+02 3.056e+02 3.931e+02 9.713e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 03:57:46,787 INFO [train.py:901] (0/4) Epoch 22, batch 4250, loss[loss=0.2217, simple_loss=0.2808, pruned_loss=0.08127, over 7260.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2882, pruned_loss=0.06183, over 1617034.10 frames. ], batch size: 16, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:57:52,302 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-174000.pt +2023-02-07 03:57:55,806 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 03:58:04,794 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1408, 4.0962, 3.7886, 2.0569, 3.6617, 3.7440, 3.7883, 3.5746], + device='cuda:0'), covar=tensor([0.0774, 0.0597, 0.1011, 0.4517, 0.0847, 0.0997, 0.1225, 0.0853], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0436, 0.0433, 0.0538, 0.0425, 0.0448, 0.0428, 0.0388], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 03:58:15,725 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174033.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:17,067 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:21,635 INFO [train.py:901] (0/4) Epoch 22, batch 4300, loss[loss=0.1959, simple_loss=0.2891, pruned_loss=0.05128, over 8330.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2883, pruned_loss=0.06169, over 1618155.27 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:58:21,839 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:25,982 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 03:58:40,389 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:56,900 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:57,380 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.427e+02 2.775e+02 3.458e+02 5.995e+02, threshold=5.550e+02, percent-clipped=0.0 +2023-02-07 03:58:57,400 INFO [train.py:901] (0/4) Epoch 22, batch 4350, loss[loss=0.2159, simple_loss=0.3013, pruned_loss=0.06527, over 8299.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2887, pruned_loss=0.06189, over 1619034.61 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 4.0 +2023-02-07 03:59:25,038 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 03:59:32,547 INFO [train.py:901] (0/4) Epoch 22, batch 4400, loss[loss=0.2151, simple_loss=0.3001, pruned_loss=0.06505, over 8466.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2902, pruned_loss=0.06254, over 1620301.58 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 03:59:36,057 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7493, 1.8556, 1.5796, 2.3291, 1.0263, 1.3948, 1.6979, 1.8009], + device='cuda:0'), covar=tensor([0.0805, 0.0745, 0.0963, 0.0406, 0.1110, 0.1363, 0.0731, 0.0796], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0199, 0.0246, 0.0216, 0.0208, 0.0247, 0.0250, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 03:59:42,802 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:00:06,461 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 04:00:07,758 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.623e+02 3.065e+02 3.902e+02 1.119e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-07 04:00:07,778 INFO [train.py:901] (0/4) Epoch 22, batch 4450, loss[loss=0.1801, simple_loss=0.2648, pruned_loss=0.04773, over 7923.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2911, pruned_loss=0.06295, over 1620422.65 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:24,477 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 04:00:26,069 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4280, 1.6244, 2.2143, 1.3203, 1.6011, 1.6934, 1.5161, 1.6222], + device='cuda:0'), covar=tensor([0.1990, 0.2674, 0.0975, 0.4753, 0.1915, 0.3478, 0.2391, 0.2090], + device='cuda:0'), in_proj_covar=tensor([0.0528, 0.0605, 0.0559, 0.0645, 0.0646, 0.0591, 0.0536, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:00:30,136 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174224.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:00:41,754 INFO [train.py:901] (0/4) Epoch 22, batch 4500, loss[loss=0.2372, simple_loss=0.3247, pruned_loss=0.07488, over 8635.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2908, pruned_loss=0.06332, over 1618429.90 frames. ], batch size: 31, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:46,559 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174249.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:00:56,984 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 04:01:17,042 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.524e+02 3.306e+02 4.354e+02 7.569e+02, threshold=6.612e+02, percent-clipped=6.0 +2023-02-07 04:01:17,062 INFO [train.py:901] (0/4) Epoch 22, batch 4550, loss[loss=0.1821, simple_loss=0.2632, pruned_loss=0.05047, over 7921.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2906, pruned_loss=0.06317, over 1617487.30 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:23,301 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:28,566 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:44,046 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 04:01:51,063 INFO [train.py:901] (0/4) Epoch 22, batch 4600, loss[loss=0.198, simple_loss=0.2946, pruned_loss=0.05066, over 8360.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2908, pruned_loss=0.06335, over 1615441.54 frames. ], batch size: 24, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:54,749 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:12,049 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:15,437 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:16,887 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:25,990 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.375e+02 2.973e+02 3.873e+02 1.031e+03, threshold=5.946e+02, percent-clipped=3.0 +2023-02-07 04:02:26,016 INFO [train.py:901] (0/4) Epoch 22, batch 4650, loss[loss=0.197, simple_loss=0.2969, pruned_loss=0.04857, over 8243.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2898, pruned_loss=0.06271, over 1618692.88 frames. ], batch size: 24, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:02:31,944 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 04:02:37,948 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:02,520 INFO [train.py:901] (0/4) Epoch 22, batch 4700, loss[loss=0.1973, simple_loss=0.2679, pruned_loss=0.06335, over 7711.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06166, over 1618320.11 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:21,309 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7518, 1.9520, 2.0965, 1.3756, 2.2165, 1.5640, 0.6917, 1.9680], + device='cuda:0'), covar=tensor([0.0557, 0.0343, 0.0267, 0.0564, 0.0359, 0.0710, 0.0849, 0.0294], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0387, 0.0343, 0.0442, 0.0372, 0.0528, 0.0385, 0.0415], + device='cuda:0'), out_proj_covar=tensor([1.2022e-04, 1.0130e-04, 9.0373e-05, 1.1635e-04, 9.7755e-05, 1.4885e-04, + 1.0387e-04, 1.0982e-04], device='cuda:0') +2023-02-07 04:03:37,044 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.374e+02 2.923e+02 3.899e+02 9.329e+02, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 04:03:37,065 INFO [train.py:901] (0/4) Epoch 22, batch 4750, loss[loss=0.2085, simple_loss=0.2982, pruned_loss=0.05939, over 8254.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2887, pruned_loss=0.06201, over 1615786.50 frames. ], batch size: 24, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:37,252 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:38,601 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:43,310 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:59,881 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7219, 2.2258, 3.4116, 1.6867, 1.6840, 3.2971, 0.7426, 1.9864], + device='cuda:0'), covar=tensor([0.1548, 0.1212, 0.0241, 0.1924, 0.2634, 0.0278, 0.2172, 0.1579], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0220, 0.0269, 0.0136, 0.0171, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:04:04,453 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 04:04:06,501 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 04:04:12,378 INFO [train.py:901] (0/4) Epoch 22, batch 4800, loss[loss=0.2124, simple_loss=0.2988, pruned_loss=0.06299, over 8194.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2883, pruned_loss=0.06212, over 1615388.16 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:19,976 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.95 vs. limit=5.0 +2023-02-07 04:04:22,156 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7111, 2.1572, 3.4877, 1.8474, 1.5293, 3.3928, 0.6494, 2.0184], + device='cuda:0'), covar=tensor([0.1510, 0.1385, 0.0284, 0.1918, 0.3283, 0.0315, 0.2444, 0.1639], + device='cuda:0'), in_proj_covar=tensor([0.0191, 0.0198, 0.0129, 0.0221, 0.0270, 0.0137, 0.0172, 0.0195], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:04:27,173 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 04:04:46,103 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.398e+02 2.995e+02 3.860e+02 8.125e+02, threshold=5.990e+02, percent-clipped=3.0 +2023-02-07 04:04:46,130 INFO [train.py:901] (0/4) Epoch 22, batch 4850, loss[loss=0.1992, simple_loss=0.2772, pruned_loss=0.06057, over 7538.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2896, pruned_loss=0.06304, over 1614908.45 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:55,425 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 04:05:02,299 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:20,319 INFO [train.py:901] (0/4) Epoch 22, batch 4900, loss[loss=0.1888, simple_loss=0.2761, pruned_loss=0.05081, over 8092.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2898, pruned_loss=0.06369, over 1617098.49 frames. ], batch size: 21, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:05:23,132 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:29,307 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:56,489 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.582e+02 3.121e+02 3.821e+02 7.682e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-07 04:05:56,510 INFO [train.py:901] (0/4) Epoch 22, batch 4950, loss[loss=0.197, simple_loss=0.2861, pruned_loss=0.054, over 8031.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2895, pruned_loss=0.06309, over 1619452.30 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:17,783 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:21,265 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4877, 2.4434, 1.6842, 2.2466, 1.9750, 1.3586, 1.9122, 2.0570], + device='cuda:0'), covar=tensor([0.1619, 0.0418, 0.1393, 0.0659, 0.0916, 0.1771, 0.1129, 0.1002], + device='cuda:0'), in_proj_covar=tensor([0.0350, 0.0231, 0.0331, 0.0305, 0.0297, 0.0336, 0.0339, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:06:27,422 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6569, 2.9724, 2.6369, 4.1475, 1.8016, 2.2550, 2.7239, 3.1189], + device='cuda:0'), covar=tensor([0.0617, 0.0713, 0.0675, 0.0238, 0.1084, 0.1135, 0.0842, 0.0714], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0216, 0.0208, 0.0248, 0.0250, 0.0210], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 04:06:30,646 INFO [train.py:901] (0/4) Epoch 22, batch 5000, loss[loss=0.2119, simple_loss=0.2944, pruned_loss=0.06466, over 8026.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06176, over 1617330.75 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:35,000 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,235 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,386 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:43,936 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:51,010 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:54,576 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174773.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:55,980 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:07:07,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.477e+02 2.928e+02 3.612e+02 7.754e+02, threshold=5.856e+02, percent-clipped=3.0 +2023-02-07 04:07:07,703 INFO [train.py:901] (0/4) Epoch 22, batch 5050, loss[loss=0.2097, simple_loss=0.2967, pruned_loss=0.0613, over 8542.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2884, pruned_loss=0.06196, over 1626100.90 frames. ], batch size: 31, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:16,964 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0572, 1.4876, 1.7067, 1.4323, 1.0363, 1.4631, 1.9371, 1.6214], + device='cuda:0'), covar=tensor([0.0524, 0.1246, 0.1637, 0.1423, 0.0594, 0.1472, 0.0654, 0.0670], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0164, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 04:07:27,846 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2538, 1.9751, 2.6261, 2.1536, 2.4996, 2.2679, 2.0363, 1.4280], + device='cuda:0'), covar=tensor([0.5193, 0.4842, 0.1982, 0.3634, 0.2383, 0.2883, 0.1820, 0.5135], + device='cuda:0'), in_proj_covar=tensor([0.0935, 0.0975, 0.0800, 0.0941, 0.0989, 0.0888, 0.0745, 0.0821], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 04:07:34,469 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 04:07:42,672 INFO [train.py:901] (0/4) Epoch 22, batch 5100, loss[loss=0.2179, simple_loss=0.3108, pruned_loss=0.06246, over 8550.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2878, pruned_loss=0.06127, over 1626387.45 frames. ], batch size: 31, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:58,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:03,627 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:17,810 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.498e+02 3.130e+02 3.757e+02 7.363e+02, threshold=6.259e+02, percent-clipped=3.0 +2023-02-07 04:08:17,830 INFO [train.py:901] (0/4) Epoch 22, batch 5150, loss[loss=0.1801, simple_loss=0.255, pruned_loss=0.05266, over 7700.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.06243, over 1624507.09 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:21,121 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:52,061 INFO [train.py:901] (0/4) Epoch 22, batch 5200, loss[loss=0.1733, simple_loss=0.244, pruned_loss=0.05131, over 7715.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2881, pruned_loss=0.06219, over 1618252.58 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:52,890 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8341, 1.2366, 3.9517, 1.4490, 3.4713, 3.2943, 3.6199, 3.4565], + device='cuda:0'), covar=tensor([0.0615, 0.5168, 0.0662, 0.4372, 0.1333, 0.1105, 0.0658, 0.0801], + device='cuda:0'), in_proj_covar=tensor([0.0628, 0.0646, 0.0695, 0.0627, 0.0708, 0.0603, 0.0606, 0.0677], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:08:58,258 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1762, 4.1328, 3.7477, 1.9007, 3.6308, 3.7989, 3.7473, 3.6484], + device='cuda:0'), covar=tensor([0.0708, 0.0521, 0.0994, 0.4702, 0.0878, 0.0940, 0.1223, 0.0852], + device='cuda:0'), in_proj_covar=tensor([0.0522, 0.0433, 0.0432, 0.0536, 0.0424, 0.0444, 0.0424, 0.0387], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:09:26,990 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.432e+02 2.854e+02 3.739e+02 7.258e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-07 04:09:27,011 INFO [train.py:901] (0/4) Epoch 22, batch 5250, loss[loss=0.2268, simple_loss=0.3173, pruned_loss=0.06814, over 8337.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2884, pruned_loss=0.06207, over 1619039.05 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:09:31,811 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 04:09:44,045 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:09:49,535 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,595 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,999 INFO [train.py:901] (0/4) Epoch 22, batch 5300, loss[loss=0.2476, simple_loss=0.307, pruned_loss=0.09413, over 7815.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.288, pruned_loss=0.06213, over 1611118.84 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:07,097 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:19,113 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:35,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.519e+02 3.149e+02 3.909e+02 1.075e+03, threshold=6.297e+02, percent-clipped=6.0 +2023-02-07 04:10:35,776 INFO [train.py:901] (0/4) Epoch 22, batch 5350, loss[loss=0.1824, simple_loss=0.2737, pruned_loss=0.04559, over 8192.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2865, pruned_loss=0.06145, over 1609008.31 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:57,891 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:11,825 INFO [train.py:901] (0/4) Epoch 22, batch 5400, loss[loss=0.2308, simple_loss=0.3167, pruned_loss=0.07247, over 8351.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2866, pruned_loss=0.06176, over 1608895.57 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:11:13,249 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2305, 3.1841, 2.9085, 1.5595, 2.8017, 2.9360, 2.8538, 2.8798], + device='cuda:0'), covar=tensor([0.1178, 0.0799, 0.1283, 0.4588, 0.1104, 0.1326, 0.1523, 0.0987], + device='cuda:0'), in_proj_covar=tensor([0.0521, 0.0432, 0.0430, 0.0533, 0.0422, 0.0442, 0.0422, 0.0384], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:11:14,633 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:22,709 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:39,690 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:46,276 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.426e+02 2.833e+02 4.034e+02 1.686e+03, threshold=5.665e+02, percent-clipped=5.0 +2023-02-07 04:11:46,296 INFO [train.py:901] (0/4) Epoch 22, batch 5450, loss[loss=0.202, simple_loss=0.2856, pruned_loss=0.05923, over 8132.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06171, over 1613320.31 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:09,756 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:12:20,253 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 04:12:22,327 INFO [train.py:901] (0/4) Epoch 22, batch 5500, loss[loss=0.2329, simple_loss=0.3257, pruned_loss=0.07004, over 8527.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2864, pruned_loss=0.06148, over 1608499.38 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:40,055 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 04:12:56,619 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.376e+02 2.848e+02 3.508e+02 8.289e+02, threshold=5.697e+02, percent-clipped=6.0 +2023-02-07 04:12:56,639 INFO [train.py:901] (0/4) Epoch 22, batch 5550, loss[loss=0.1916, simple_loss=0.2552, pruned_loss=0.06401, over 7419.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06126, over 1608236.14 frames. ], batch size: 17, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:31,790 INFO [train.py:901] (0/4) Epoch 22, batch 5600, loss[loss=0.1869, simple_loss=0.2637, pruned_loss=0.05505, over 7798.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.0612, over 1614561.85 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:46,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6827, 2.0880, 3.2452, 1.4888, 2.4374, 2.1557, 1.6769, 2.5379], + device='cuda:0'), covar=tensor([0.1902, 0.2517, 0.0832, 0.4504, 0.1866, 0.3145, 0.2478, 0.2180], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0604, 0.0554, 0.0641, 0.0645, 0.0589, 0.0535, 0.0629], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:13:49,337 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6417, 1.5952, 2.1181, 1.4211, 1.3108, 2.0874, 0.2697, 1.2418], + device='cuda:0'), covar=tensor([0.1663, 0.1521, 0.0359, 0.1039, 0.2546, 0.0428, 0.2122, 0.1419], + device='cuda:0'), in_proj_covar=tensor([0.0191, 0.0197, 0.0129, 0.0220, 0.0267, 0.0137, 0.0168, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:13:50,743 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.16 vs. limit=5.0 +2023-02-07 04:14:03,191 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8416, 2.1310, 1.7477, 2.6613, 1.2902, 1.5884, 2.0044, 2.0888], + device='cuda:0'), covar=tensor([0.0762, 0.0746, 0.0923, 0.0366, 0.1014, 0.1274, 0.0674, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0216, 0.0208, 0.0247, 0.0252, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 04:14:03,816 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:06,408 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.532e+02 3.141e+02 4.135e+02 1.836e+03, threshold=6.283e+02, percent-clipped=10.0 +2023-02-07 04:14:06,428 INFO [train.py:901] (0/4) Epoch 22, batch 5650, loss[loss=0.1918, simple_loss=0.2768, pruned_loss=0.05342, over 8284.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.287, pruned_loss=0.06165, over 1612462.23 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:09,950 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6815, 1.9489, 2.9872, 1.5023, 2.1801, 2.0111, 1.7025, 2.1098], + device='cuda:0'), covar=tensor([0.1796, 0.2384, 0.0825, 0.4345, 0.1839, 0.3202, 0.2278, 0.2210], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0604, 0.0553, 0.0640, 0.0645, 0.0589, 0.0535, 0.0628], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:14:20,107 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:22,700 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 04:14:37,560 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:40,743 INFO [train.py:901] (0/4) Epoch 22, batch 5700, loss[loss=0.1822, simple_loss=0.2608, pruned_loss=0.05181, over 7693.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2864, pruned_loss=0.06154, over 1613922.31 frames. ], batch size: 18, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:56,160 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:15,527 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.452e+02 2.878e+02 3.661e+02 5.836e+02, threshold=5.755e+02, percent-clipped=0.0 +2023-02-07 04:15:15,547 INFO [train.py:901] (0/4) Epoch 22, batch 5750, loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.06895, over 8485.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06172, over 1613430.64 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:15:20,411 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:22,435 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:27,204 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 04:15:50,255 INFO [train.py:901] (0/4) Epoch 22, batch 5800, loss[loss=0.2118, simple_loss=0.293, pruned_loss=0.06527, over 8501.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2872, pruned_loss=0.06162, over 1613198.86 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:09,010 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:16:25,901 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 2.280e+02 2.739e+02 3.457e+02 6.413e+02, threshold=5.479e+02, percent-clipped=3.0 +2023-02-07 04:16:25,921 INFO [train.py:901] (0/4) Epoch 22, batch 5850, loss[loss=0.2008, simple_loss=0.28, pruned_loss=0.06077, over 7254.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2868, pruned_loss=0.06134, over 1613432.71 frames. ], batch size: 16, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:42,215 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:00,355 INFO [train.py:901] (0/4) Epoch 22, batch 5900, loss[loss=0.2132, simple_loss=0.2916, pruned_loss=0.06737, over 8702.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.06228, over 1617550.54 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:17:29,305 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:35,263 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.520e+02 3.043e+02 3.699e+02 9.671e+02, threshold=6.086e+02, percent-clipped=7.0 +2023-02-07 04:17:35,284 INFO [train.py:901] (0/4) Epoch 22, batch 5950, loss[loss=0.1979, simple_loss=0.2686, pruned_loss=0.06355, over 7651.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2864, pruned_loss=0.0613, over 1614169.86 frames. ], batch size: 19, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:00,037 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175728.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:02,692 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:09,579 INFO [train.py:901] (0/4) Epoch 22, batch 6000, loss[loss=0.185, simple_loss=0.2822, pruned_loss=0.0439, over 8507.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06149, over 1616366.72 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:09,579 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 04:18:21,639 INFO [train.py:935] (0/4) Epoch 22, validation: loss=0.1729, simple_loss=0.2732, pruned_loss=0.03632, over 944034.00 frames. +2023-02-07 04:18:21,640 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 04:18:31,458 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:56,208 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.478e+02 2.934e+02 3.623e+02 7.032e+02, threshold=5.869e+02, percent-clipped=2.0 +2023-02-07 04:18:56,229 INFO [train.py:901] (0/4) Epoch 22, batch 6050, loss[loss=0.2441, simple_loss=0.3184, pruned_loss=0.08487, over 8838.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06188, over 1616147.50 frames. ], batch size: 40, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:31,875 INFO [train.py:901] (0/4) Epoch 22, batch 6100, loss[loss=0.2056, simple_loss=0.302, pruned_loss=0.05454, over 8184.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06189, over 1615609.01 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:32,683 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:35,630 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:51,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:52,699 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:56,549 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 04:20:02,529 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2018, 1.9456, 2.5209, 2.1096, 2.3786, 2.2171, 2.0359, 1.2230], + device='cuda:0'), covar=tensor([0.5051, 0.4470, 0.1925, 0.3539, 0.2475, 0.3040, 0.1804, 0.5223], + device='cuda:0'), in_proj_covar=tensor([0.0942, 0.0980, 0.0806, 0.0945, 0.0996, 0.0896, 0.0747, 0.0826], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 04:20:04,917 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 04:20:07,201 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.365e+02 2.974e+02 3.880e+02 6.577e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 04:20:07,221 INFO [train.py:901] (0/4) Epoch 22, batch 6150, loss[loss=0.1859, simple_loss=0.2714, pruned_loss=0.05022, over 8077.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2873, pruned_loss=0.06188, over 1610673.45 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:10,657 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:11,999 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:35,031 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7556, 2.4084, 3.4123, 2.6695, 3.2177, 2.6557, 2.4585, 1.8742], + device='cuda:0'), covar=tensor([0.4962, 0.5009, 0.1699, 0.3505, 0.2478, 0.2840, 0.1755, 0.5428], + device='cuda:0'), in_proj_covar=tensor([0.0944, 0.0981, 0.0806, 0.0946, 0.0997, 0.0897, 0.0749, 0.0827], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 04:20:40,366 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:41,027 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0420, 2.4280, 2.7130, 1.3973, 2.8964, 1.6950, 1.5086, 2.1063], + device='cuda:0'), covar=tensor([0.0854, 0.0432, 0.0374, 0.0882, 0.0573, 0.0906, 0.0891, 0.0556], + device='cuda:0'), in_proj_covar=tensor([0.0454, 0.0390, 0.0346, 0.0447, 0.0378, 0.0533, 0.0391, 0.0421], + device='cuda:0'), out_proj_covar=tensor([1.2165e-04, 1.0223e-04, 9.0932e-05, 1.1754e-04, 9.9585e-05, 1.5018e-04, + 1.0525e-04, 1.1143e-04], device='cuda:0') +2023-02-07 04:20:41,476 INFO [train.py:901] (0/4) Epoch 22, batch 6200, loss[loss=0.1826, simple_loss=0.2789, pruned_loss=0.04314, over 8607.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.287, pruned_loss=0.06132, over 1611638.56 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:52,895 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:56,912 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:58,221 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175965.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:21:15,640 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.329e+02 2.882e+02 3.634e+02 1.217e+03, threshold=5.765e+02, percent-clipped=6.0 +2023-02-07 04:21:15,660 INFO [train.py:901] (0/4) Epoch 22, batch 6250, loss[loss=0.2072, simple_loss=0.2891, pruned_loss=0.06269, over 8642.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2875, pruned_loss=0.06206, over 1611355.14 frames. ], batch size: 39, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:21:21,790 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-176000.pt +2023-02-07 04:21:26,197 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6680, 4.6600, 4.1909, 2.0737, 4.1454, 4.2073, 4.1282, 4.0691], + device='cuda:0'), covar=tensor([0.0614, 0.0480, 0.0996, 0.4465, 0.0741, 0.0872, 0.1261, 0.0693], + device='cuda:0'), in_proj_covar=tensor([0.0531, 0.0438, 0.0437, 0.0543, 0.0429, 0.0448, 0.0428, 0.0389], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:21:51,350 INFO [train.py:901] (0/4) Epoch 22, batch 6300, loss[loss=0.2039, simple_loss=0.289, pruned_loss=0.05945, over 8129.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2894, pruned_loss=0.06318, over 1613444.90 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:12,730 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176072.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:26,685 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.297e+02 2.795e+02 3.577e+02 6.374e+02, threshold=5.590e+02, percent-clipped=1.0 +2023-02-07 04:22:26,705 INFO [train.py:901] (0/4) Epoch 22, batch 6350, loss[loss=0.2527, simple_loss=0.3401, pruned_loss=0.08266, over 8722.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2874, pruned_loss=0.06188, over 1611501.82 frames. ], batch size: 30, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:27,806 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.13 vs. limit=5.0 +2023-02-07 04:22:29,245 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 04:22:34,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,184 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,828 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:01,397 INFO [train.py:901] (0/4) Epoch 22, batch 6400, loss[loss=0.1921, simple_loss=0.281, pruned_loss=0.05158, over 8374.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.06163, over 1616506.28 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:08,431 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:33,469 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:36,652 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.375e+02 2.869e+02 3.334e+02 7.002e+02, threshold=5.738e+02, percent-clipped=1.0 +2023-02-07 04:23:36,672 INFO [train.py:901] (0/4) Epoch 22, batch 6450, loss[loss=0.201, simple_loss=0.2903, pruned_loss=0.05587, over 8200.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2874, pruned_loss=0.06144, over 1617152.38 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:52,603 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:09,899 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:11,675 INFO [train.py:901] (0/4) Epoch 22, batch 6500, loss[loss=0.1794, simple_loss=0.2597, pruned_loss=0.04959, over 7685.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2876, pruned_loss=0.06125, over 1616265.06 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:12,441 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:15,767 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1751, 1.5175, 4.4122, 2.0762, 2.6020, 5.0265, 5.1388, 4.3378], + device='cuda:0'), covar=tensor([0.1239, 0.2005, 0.0280, 0.1950, 0.1137, 0.0183, 0.0400, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0323, 0.0287, 0.0315, 0.0310, 0.0266, 0.0421, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:24:23,871 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:34,064 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:41,794 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7183, 2.1186, 3.3106, 1.4782, 2.5424, 2.2058, 1.7792, 2.4876], + device='cuda:0'), covar=tensor([0.1830, 0.2702, 0.0811, 0.4647, 0.1741, 0.3025, 0.2333, 0.2131], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0606, 0.0557, 0.0644, 0.0648, 0.0595, 0.0538, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:24:45,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.325e+02 2.725e+02 3.404e+02 5.159e+02, threshold=5.450e+02, percent-clipped=0.0 +2023-02-07 04:24:45,652 INFO [train.py:901] (0/4) Epoch 22, batch 6550, loss[loss=0.2298, simple_loss=0.3158, pruned_loss=0.0719, over 8808.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2873, pruned_loss=0.06093, over 1612154.51 frames. ], batch size: 39, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:57,260 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176307.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:09,355 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 04:25:13,583 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3197, 2.1538, 1.8010, 2.0236, 1.8133, 1.4615, 1.7392, 1.6976], + device='cuda:0'), covar=tensor([0.1221, 0.0406, 0.1140, 0.0469, 0.0673, 0.1443, 0.0906, 0.0770], + device='cuda:0'), in_proj_covar=tensor([0.0352, 0.0232, 0.0333, 0.0309, 0.0298, 0.0340, 0.0345, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:25:21,087 INFO [train.py:901] (0/4) Epoch 22, batch 6600, loss[loss=0.1847, simple_loss=0.2525, pruned_loss=0.05849, over 7286.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.06159, over 1611134.84 frames. ], batch size: 16, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:29,279 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 04:25:32,750 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:55,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.510e+02 3.110e+02 4.060e+02 7.968e+02, threshold=6.221e+02, percent-clipped=4.0 +2023-02-07 04:25:55,402 INFO [train.py:901] (0/4) Epoch 22, batch 6650, loss[loss=0.1662, simple_loss=0.2448, pruned_loss=0.04384, over 7406.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2873, pruned_loss=0.06125, over 1615294.34 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:17,181 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:31,229 INFO [train.py:901] (0/4) Epoch 22, batch 6700, loss[loss=0.1894, simple_loss=0.2683, pruned_loss=0.05528, over 8241.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.288, pruned_loss=0.06179, over 1613395.61 frames. ], batch size: 22, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:32,076 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8864, 2.0423, 1.7218, 2.5608, 1.3364, 1.5528, 1.9629, 2.0299], + device='cuda:0'), covar=tensor([0.0730, 0.0708, 0.0886, 0.0362, 0.0996, 0.1244, 0.0713, 0.0750], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0196, 0.0243, 0.0214, 0.0204, 0.0244, 0.0249, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 04:26:32,109 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:49,607 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:00,363 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:05,583 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.672e+02 3.290e+02 4.002e+02 8.131e+02, threshold=6.579e+02, percent-clipped=6.0 +2023-02-07 04:27:05,602 INFO [train.py:901] (0/4) Epoch 22, batch 6750, loss[loss=0.2258, simple_loss=0.3106, pruned_loss=0.07053, over 8612.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2884, pruned_loss=0.06159, over 1616465.14 frames. ], batch size: 31, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:41,476 INFO [train.py:901] (0/4) Epoch 22, batch 6800, loss[loss=0.1991, simple_loss=0.2803, pruned_loss=0.05898, over 7252.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2886, pruned_loss=0.0624, over 1610553.43 frames. ], batch size: 16, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:44,989 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 04:28:16,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.317e+02 3.026e+02 3.783e+02 8.757e+02, threshold=6.052e+02, percent-clipped=1.0 +2023-02-07 04:28:16,805 INFO [train.py:901] (0/4) Epoch 22, batch 6850, loss[loss=0.2339, simple_loss=0.2978, pruned_loss=0.08506, over 7808.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.288, pruned_loss=0.06221, over 1610209.43 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:24,733 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:28,901 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6321, 2.0308, 3.0870, 1.3757, 2.4065, 1.8435, 1.8082, 2.2584], + device='cuda:0'), covar=tensor([0.2207, 0.2794, 0.1178, 0.5165, 0.2065, 0.3743, 0.2558, 0.2807], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0605, 0.0555, 0.0643, 0.0648, 0.0594, 0.0537, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:28:31,632 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:34,701 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 04:28:34,752 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:48,424 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:50,267 INFO [train.py:901] (0/4) Epoch 22, batch 6900, loss[loss=0.185, simple_loss=0.2691, pruned_loss=0.05044, over 7237.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.0616, over 1610383.53 frames. ], batch size: 16, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:51,041 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:06,970 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6374, 3.5787, 3.2994, 2.1769, 3.1851, 3.2339, 3.2682, 3.0477], + device='cuda:0'), covar=tensor([0.0856, 0.0659, 0.0999, 0.3753, 0.0954, 0.1122, 0.1247, 0.1088], + device='cuda:0'), in_proj_covar=tensor([0.0530, 0.0437, 0.0435, 0.0542, 0.0427, 0.0448, 0.0428, 0.0389], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:29:17,285 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:17,603 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-07 04:29:20,602 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:26,749 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 2.439e+02 3.078e+02 3.806e+02 5.995e+02, threshold=6.157e+02, percent-clipped=0.0 +2023-02-07 04:29:26,775 INFO [train.py:901] (0/4) Epoch 22, batch 6950, loss[loss=0.1871, simple_loss=0.2783, pruned_loss=0.04789, over 8356.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2877, pruned_loss=0.06147, over 1611721.55 frames. ], batch size: 49, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:29:35,498 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176703.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:44,284 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 04:29:46,555 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176719.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:56,862 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:30:02,055 INFO [train.py:901] (0/4) Epoch 22, batch 7000, loss[loss=0.177, simple_loss=0.2698, pruned_loss=0.04207, over 8196.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.06161, over 1612502.08 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:30:07,859 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:30:13,251 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7204, 1.8477, 2.0203, 1.9071, 1.2013, 1.8057, 2.2856, 2.0008], + device='cuda:0'), covar=tensor([0.0465, 0.1106, 0.1498, 0.1225, 0.0600, 0.1316, 0.0623, 0.0598], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 04:30:30,000 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6951, 1.8923, 1.9568, 1.4649, 2.0685, 1.4579, 0.5354, 1.9176], + device='cuda:0'), covar=tensor([0.0588, 0.0379, 0.0332, 0.0576, 0.0435, 0.0959, 0.0945, 0.0293], + device='cuda:0'), in_proj_covar=tensor([0.0453, 0.0390, 0.0345, 0.0447, 0.0379, 0.0535, 0.0391, 0.0422], + device='cuda:0'), out_proj_covar=tensor([1.2141e-04, 1.0229e-04, 9.0739e-05, 1.1767e-04, 9.9832e-05, 1.5080e-04, + 1.0535e-04, 1.1165e-04], device='cuda:0') +2023-02-07 04:30:37,819 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.923e+02 3.703e+02 8.900e+02, threshold=5.847e+02, percent-clipped=5.0 +2023-02-07 04:30:37,839 INFO [train.py:901] (0/4) Epoch 22, batch 7050, loss[loss=0.196, simple_loss=0.2832, pruned_loss=0.0544, over 8027.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2868, pruned_loss=0.06119, over 1608022.69 frames. ], batch size: 22, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:03,289 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:12,380 INFO [train.py:901] (0/4) Epoch 22, batch 7100, loss[loss=0.202, simple_loss=0.2923, pruned_loss=0.05585, over 8026.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2868, pruned_loss=0.06074, over 1608551.48 frames. ], batch size: 22, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:30,665 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 04:31:31,805 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:46,098 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.316e+02 2.836e+02 3.633e+02 7.093e+02, threshold=5.673e+02, percent-clipped=3.0 +2023-02-07 04:31:46,118 INFO [train.py:901] (0/4) Epoch 22, batch 7150, loss[loss=0.2422, simple_loss=0.3296, pruned_loss=0.07742, over 8501.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.287, pruned_loss=0.06065, over 1614071.54 frames. ], batch size: 49, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:32:22,266 INFO [train.py:901] (0/4) Epoch 22, batch 7200, loss[loss=0.1961, simple_loss=0.2711, pruned_loss=0.06061, over 6820.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2864, pruned_loss=0.06057, over 1607102.62 frames. ], batch size: 15, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:23,108 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:44,920 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:52,844 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:54,993 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:56,132 INFO [train.py:901] (0/4) Epoch 22, batch 7250, loss[loss=0.2089, simple_loss=0.2776, pruned_loss=0.0701, over 7800.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.286, pruned_loss=0.06039, over 1609876.49 frames. ], batch size: 19, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:56,788 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.385e+02 2.852e+02 3.441e+02 7.839e+02, threshold=5.703e+02, percent-clipped=2.0 +2023-02-07 04:33:02,397 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:14,103 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177015.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:21,887 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177027.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:31,976 INFO [train.py:901] (0/4) Epoch 22, batch 7300, loss[loss=0.2119, simple_loss=0.2939, pruned_loss=0.06491, over 8129.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.06031, over 1612517.97 frames. ], batch size: 22, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:06,491 INFO [train.py:901] (0/4) Epoch 22, batch 7350, loss[loss=0.1776, simple_loss=0.2521, pruned_loss=0.05151, over 7431.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2878, pruned_loss=0.0613, over 1610803.73 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:07,156 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.532e+02 3.310e+02 4.342e+02 9.656e+02, threshold=6.621e+02, percent-clipped=7.0 +2023-02-07 04:34:13,478 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:26,063 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 04:34:31,744 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:33,086 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:42,503 INFO [train.py:901] (0/4) Epoch 22, batch 7400, loss[loss=0.2163, simple_loss=0.3056, pruned_loss=0.06352, over 8197.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.288, pruned_loss=0.06183, over 1612151.64 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:42,681 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:47,994 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 04:34:49,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3916, 1.6694, 1.6720, 0.9631, 1.7073, 1.3551, 0.2788, 1.6066], + device='cuda:0'), covar=tensor([0.0447, 0.0356, 0.0298, 0.0559, 0.0361, 0.0886, 0.0860, 0.0280], + device='cuda:0'), in_proj_covar=tensor([0.0458, 0.0396, 0.0348, 0.0451, 0.0383, 0.0540, 0.0394, 0.0425], + device='cuda:0'), out_proj_covar=tensor([1.2273e-04, 1.0389e-04, 9.1480e-05, 1.1879e-04, 1.0073e-04, 1.5222e-04, + 1.0607e-04, 1.1261e-04], device='cuda:0') +2023-02-07 04:35:16,507 INFO [train.py:901] (0/4) Epoch 22, batch 7450, loss[loss=0.1755, simple_loss=0.251, pruned_loss=0.05001, over 7189.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.0623, over 1614439.60 frames. ], batch size: 16, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:17,191 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.327e+02 2.972e+02 3.761e+02 7.589e+02, threshold=5.944e+02, percent-clipped=3.0 +2023-02-07 04:35:21,630 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:27,657 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 04:35:32,251 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:38,453 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:51,509 INFO [train.py:901] (0/4) Epoch 22, batch 7500, loss[loss=0.1882, simple_loss=0.2767, pruned_loss=0.04988, over 8026.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2876, pruned_loss=0.06183, over 1615052.63 frames. ], batch size: 22, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:13,912 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-07 04:36:15,019 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1971, 4.1597, 3.7968, 1.9423, 3.7313, 3.7796, 3.7975, 3.6719], + device='cuda:0'), covar=tensor([0.0815, 0.0580, 0.1182, 0.4520, 0.0868, 0.0924, 0.1208, 0.0749], + device='cuda:0'), in_proj_covar=tensor([0.0531, 0.0441, 0.0434, 0.0541, 0.0427, 0.0449, 0.0429, 0.0390], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:36:25,359 INFO [train.py:901] (0/4) Epoch 22, batch 7550, loss[loss=0.215, simple_loss=0.3012, pruned_loss=0.06444, over 8358.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06138, over 1615820.91 frames. ], batch size: 24, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:26,046 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.508e+02 3.019e+02 3.781e+02 7.904e+02, threshold=6.039e+02, percent-clipped=4.0 +2023-02-07 04:36:51,653 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:36:59,620 INFO [train.py:901] (0/4) Epoch 22, batch 7600, loss[loss=0.2503, simple_loss=0.3278, pruned_loss=0.08644, over 8523.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2871, pruned_loss=0.06194, over 1608101.19 frames. ], batch size: 49, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:11,461 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:29,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:35,785 INFO [train.py:901] (0/4) Epoch 22, batch 7650, loss[loss=0.1681, simple_loss=0.2523, pruned_loss=0.04194, over 8086.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2869, pruned_loss=0.06155, over 1615292.41 frames. ], batch size: 21, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:36,441 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.559e+02 3.074e+02 4.315e+02 1.263e+03, threshold=6.148e+02, percent-clipped=10.0 +2023-02-07 04:37:39,970 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:57,406 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177423.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:02,924 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8058, 1.9482, 2.0889, 1.4066, 2.2204, 1.5607, 0.7300, 1.8930], + device='cuda:0'), covar=tensor([0.0560, 0.0395, 0.0277, 0.0604, 0.0429, 0.0908, 0.0916, 0.0347], + device='cuda:0'), in_proj_covar=tensor([0.0453, 0.0393, 0.0344, 0.0447, 0.0379, 0.0535, 0.0391, 0.0422], + device='cuda:0'), out_proj_covar=tensor([1.2121e-04, 1.0304e-04, 9.0455e-05, 1.1757e-04, 9.9689e-05, 1.5079e-04, + 1.0516e-04, 1.1168e-04], device='cuda:0') +2023-02-07 04:38:09,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2643, 1.4694, 4.2661, 1.7353, 2.4710, 4.8007, 4.8923, 4.1634], + device='cuda:0'), covar=tensor([0.1201, 0.2128, 0.0281, 0.2266, 0.1221, 0.0194, 0.0486, 0.0582], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0322, 0.0285, 0.0314, 0.0310, 0.0266, 0.0420, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:38:09,936 INFO [train.py:901] (0/4) Epoch 22, batch 7700, loss[loss=0.1862, simple_loss=0.2781, pruned_loss=0.04716, over 8487.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2877, pruned_loss=0.06207, over 1614726.03 frames. ], batch size: 29, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:14,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9902, 1.6067, 1.4202, 1.5062, 1.3377, 1.2670, 1.2225, 1.2683], + device='cuda:0'), covar=tensor([0.1190, 0.0453, 0.1254, 0.0597, 0.0793, 0.1559, 0.0972, 0.0857], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0232, 0.0335, 0.0309, 0.0299, 0.0340, 0.0344, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:38:30,444 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:31,725 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177473.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:38,630 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 04:38:45,979 INFO [train.py:901] (0/4) Epoch 22, batch 7750, loss[loss=0.2172, simple_loss=0.2991, pruned_loss=0.06765, over 8041.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2885, pruned_loss=0.06209, over 1614287.05 frames. ], batch size: 22, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:46,656 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.486e+02 3.125e+02 4.090e+02 1.041e+03, threshold=6.251e+02, percent-clipped=8.0 +2023-02-07 04:38:49,023 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 04:39:20,414 INFO [train.py:901] (0/4) Epoch 22, batch 7800, loss[loss=0.2143, simple_loss=0.3047, pruned_loss=0.06195, over 8179.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2879, pruned_loss=0.06189, over 1611230.84 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:34,474 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1282, 1.9212, 2.4804, 2.1066, 2.4987, 2.2107, 2.0158, 1.2689], + device='cuda:0'), covar=tensor([0.5599, 0.4891, 0.1925, 0.3601, 0.2498, 0.3127, 0.1882, 0.5204], + device='cuda:0'), in_proj_covar=tensor([0.0946, 0.0981, 0.0811, 0.0950, 0.0999, 0.0899, 0.0754, 0.0829], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 04:39:39,765 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177571.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,854 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,890 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:51,147 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:53,646 INFO [train.py:901] (0/4) Epoch 22, batch 7850, loss[loss=0.1813, simple_loss=0.2574, pruned_loss=0.05259, over 7921.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2867, pruned_loss=0.06136, over 1612947.76 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:54,299 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.387e+02 2.753e+02 3.373e+02 6.542e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 04:40:06,552 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:40:26,685 INFO [train.py:901] (0/4) Epoch 22, batch 7900, loss[loss=0.2056, simple_loss=0.2981, pruned_loss=0.05654, over 8359.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.06227, over 1609216.93 frames. ], batch size: 24, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:40:53,554 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:41:00,029 INFO [train.py:901] (0/4) Epoch 22, batch 7950, loss[loss=0.1854, simple_loss=0.2723, pruned_loss=0.04927, over 7918.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2885, pruned_loss=0.06272, over 1607749.29 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:00,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.424e+02 2.966e+02 3.766e+02 9.319e+02, threshold=5.931e+02, percent-clipped=7.0 +2023-02-07 04:41:27,913 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9030, 1.3963, 3.4107, 1.4875, 2.3988, 3.7029, 3.7253, 3.2197], + device='cuda:0'), covar=tensor([0.1138, 0.1840, 0.0285, 0.1997, 0.0984, 0.0204, 0.0496, 0.0490], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0323, 0.0286, 0.0316, 0.0312, 0.0267, 0.0423, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:41:33,720 INFO [train.py:901] (0/4) Epoch 22, batch 8000, loss[loss=0.2448, simple_loss=0.3262, pruned_loss=0.08171, over 8287.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06278, over 1611162.87 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:35,162 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7317, 4.6943, 4.3000, 2.1823, 4.2239, 4.4049, 4.2936, 4.2296], + device='cuda:0'), covar=tensor([0.0607, 0.0438, 0.0916, 0.4454, 0.0719, 0.0748, 0.1118, 0.0666], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0434, 0.0429, 0.0532, 0.0420, 0.0441, 0.0420, 0.0385], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:41:44,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4671, 1.6492, 2.1722, 1.3377, 1.4101, 1.6624, 1.5938, 1.4935], + device='cuda:0'), covar=tensor([0.2224, 0.2861, 0.1101, 0.4901, 0.2300, 0.3805, 0.2589, 0.2280], + device='cuda:0'), in_proj_covar=tensor([0.0528, 0.0607, 0.0557, 0.0649, 0.0650, 0.0596, 0.0538, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:42:06,689 INFO [train.py:901] (0/4) Epoch 22, batch 8050, loss[loss=0.1677, simple_loss=0.2467, pruned_loss=0.04432, over 7545.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2876, pruned_loss=0.06282, over 1596005.00 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:42:07,275 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.308e+02 2.923e+02 3.618e+02 1.070e+03, threshold=5.846e+02, percent-clipped=4.0 +2023-02-07 04:42:10,775 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:22,339 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:25,487 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.58 vs. limit=5.0 +2023-02-07 04:42:30,249 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-22.pt +2023-02-07 04:42:41,458 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 04:42:44,818 INFO [train.py:901] (0/4) Epoch 23, batch 0, loss[loss=0.1775, simple_loss=0.2533, pruned_loss=0.05085, over 7239.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2533, pruned_loss=0.05085, over 7239.00 frames. ], batch size: 16, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:42:44,818 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 04:42:56,156 INFO [train.py:935] (0/4) Epoch 23, validation: loss=0.1743, simple_loss=0.274, pruned_loss=0.0373, over 944034.00 frames. +2023-02-07 04:42:56,157 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 04:43:08,351 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:10,527 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177844.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:12,415 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 04:43:20,505 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6931, 1.3712, 5.8324, 2.0316, 5.2297, 4.8804, 5.4140, 5.2486], + device='cuda:0'), covar=tensor([0.0535, 0.5455, 0.0352, 0.4341, 0.1011, 0.0917, 0.0485, 0.0543], + device='cuda:0'), in_proj_covar=tensor([0.0634, 0.0648, 0.0700, 0.0632, 0.0709, 0.0606, 0.0610, 0.0684], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:43:25,876 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:28,075 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:32,006 INFO [train.py:901] (0/4) Epoch 23, batch 50, loss[loss=0.1896, simple_loss=0.2802, pruned_loss=0.04947, over 8486.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2904, pruned_loss=0.06287, over 363679.24 frames. ], batch size: 49, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:43:45,269 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.650e+02 3.149e+02 3.939e+02 1.519e+03, threshold=6.298e+02, percent-clipped=14.0 +2023-02-07 04:43:46,682 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 04:44:01,098 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177915.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:44:06,090 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0557, 1.6388, 1.3748, 1.5041, 1.3237, 1.3047, 1.2794, 1.2983], + device='cuda:0'), covar=tensor([0.1121, 0.0442, 0.1325, 0.0573, 0.0754, 0.1428, 0.0935, 0.0822], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0230, 0.0330, 0.0307, 0.0296, 0.0336, 0.0341, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:44:07,964 INFO [train.py:901] (0/4) Epoch 23, batch 100, loss[loss=0.2125, simple_loss=0.3012, pruned_loss=0.06187, over 8473.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2872, pruned_loss=0.06064, over 644533.57 frames. ], batch size: 27, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:09,369 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 04:44:42,228 INFO [train.py:901] (0/4) Epoch 23, batch 150, loss[loss=0.1824, simple_loss=0.2867, pruned_loss=0.03908, over 8461.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2893, pruned_loss=0.06157, over 863038.20 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:54,770 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 04:44:54,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.352e+02 3.015e+02 3.767e+02 5.945e+02, threshold=6.031e+02, percent-clipped=0.0 +2023-02-07 04:44:59,737 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-178000.pt +2023-02-07 04:45:17,376 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 04:45:18,316 INFO [train.py:901] (0/4) Epoch 23, batch 200, loss[loss=0.1804, simple_loss=0.2605, pruned_loss=0.05021, over 7420.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2914, pruned_loss=0.06238, over 1035072.39 frames. ], batch size: 17, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:45:19,125 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:21,848 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:53,024 INFO [train.py:901] (0/4) Epoch 23, batch 250, loss[loss=0.1891, simple_loss=0.262, pruned_loss=0.05804, over 7818.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2896, pruned_loss=0.06198, over 1163574.30 frames. ], batch size: 20, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:04,787 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 04:46:06,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.380e+02 2.804e+02 3.484e+02 6.736e+02, threshold=5.609e+02, percent-clipped=2.0 +2023-02-07 04:46:06,309 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4847, 1.2973, 2.2664, 1.2395, 2.1606, 2.4395, 2.5519, 2.0727], + device='cuda:0'), covar=tensor([0.1009, 0.1328, 0.0459, 0.1908, 0.0755, 0.0362, 0.0757, 0.0671], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0320, 0.0286, 0.0315, 0.0311, 0.0266, 0.0422, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:46:12,825 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 04:46:23,219 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4705, 1.7443, 2.6351, 1.3575, 1.9439, 1.8389, 1.5274, 2.0006], + device='cuda:0'), covar=tensor([0.2097, 0.2735, 0.0961, 0.4686, 0.1898, 0.3310, 0.2443, 0.2200], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0603, 0.0555, 0.0644, 0.0647, 0.0590, 0.0535, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:46:28,466 INFO [train.py:901] (0/4) Epoch 23, batch 300, loss[loss=0.1997, simple_loss=0.2856, pruned_loss=0.05683, over 8593.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2903, pruned_loss=0.06262, over 1266535.47 frames. ], batch size: 49, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:40,061 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:40,621 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:52,875 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:55,262 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 04:47:03,748 INFO [train.py:901] (0/4) Epoch 23, batch 350, loss[loss=0.1888, simple_loss=0.2624, pruned_loss=0.05758, over 7773.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.289, pruned_loss=0.06195, over 1342021.34 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:16,034 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.905e+02 3.451e+02 8.072e+02, threshold=5.809e+02, percent-clipped=5.0 +2023-02-07 04:47:17,665 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4218, 2.0379, 3.0446, 1.7546, 1.5783, 2.9722, 0.9379, 2.0366], + device='cuda:0'), covar=tensor([0.1839, 0.1234, 0.0275, 0.1507, 0.2657, 0.0361, 0.2161, 0.1399], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0201, 0.0130, 0.0224, 0.0272, 0.0139, 0.0173, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:47:38,674 INFO [train.py:901] (0/4) Epoch 23, batch 400, loss[loss=0.2547, simple_loss=0.3428, pruned_loss=0.08329, over 8731.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2886, pruned_loss=0.06165, over 1404155.61 frames. ], batch size: 30, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:02,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:02,315 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0843, 1.2298, 1.2337, 0.8126, 1.2338, 1.0290, 0.0839, 1.1970], + device='cuda:0'), covar=tensor([0.0419, 0.0415, 0.0367, 0.0544, 0.0421, 0.1077, 0.0892, 0.0356], + device='cuda:0'), in_proj_covar=tensor([0.0452, 0.0393, 0.0344, 0.0445, 0.0378, 0.0535, 0.0392, 0.0423], + device='cuda:0'), out_proj_covar=tensor([1.2080e-04, 1.0296e-04, 9.0392e-05, 1.1706e-04, 9.9405e-05, 1.5071e-04, + 1.0550e-04, 1.1208e-04], device='cuda:0') +2023-02-07 04:48:15,030 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,485 INFO [train.py:901] (0/4) Epoch 23, batch 450, loss[loss=0.1984, simple_loss=0.2779, pruned_loss=0.05949, over 8244.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.0617, over 1452029.80 frames. ], batch size: 24, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:20,714 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 04:48:23,152 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:27,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.308e+02 2.812e+02 3.532e+02 1.107e+03, threshold=5.624e+02, percent-clipped=2.0 +2023-02-07 04:48:40,224 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:50,176 INFO [train.py:901] (0/4) Epoch 23, batch 500, loss[loss=0.2154, simple_loss=0.2855, pruned_loss=0.07269, over 7822.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2885, pruned_loss=0.06169, over 1492497.95 frames. ], batch size: 20, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:50,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0644, 1.7558, 1.9911, 1.6294, 1.0531, 1.7148, 2.2553, 2.1445], + device='cuda:0'), covar=tensor([0.0411, 0.1273, 0.1611, 0.1378, 0.0613, 0.1403, 0.0602, 0.0598], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0163, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 04:49:16,606 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 04:49:25,958 INFO [train.py:901] (0/4) Epoch 23, batch 550, loss[loss=0.2535, simple_loss=0.3292, pruned_loss=0.08895, over 8551.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2894, pruned_loss=0.0624, over 1517011.59 frames. ], batch size: 49, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:39,365 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.448e+02 3.105e+02 3.761e+02 9.562e+02, threshold=6.211e+02, percent-clipped=5.0 +2023-02-07 04:49:42,424 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:49:59,307 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:01,203 INFO [train.py:901] (0/4) Epoch 23, batch 600, loss[loss=0.2027, simple_loss=0.2957, pruned_loss=0.05483, over 8328.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2885, pruned_loss=0.06144, over 1542126.90 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:14,790 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 04:50:33,524 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178470.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:36,800 INFO [train.py:901] (0/4) Epoch 23, batch 650, loss[loss=0.1965, simple_loss=0.285, pruned_loss=0.05403, over 8099.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06101, over 1555435.05 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:49,810 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.230e+02 2.701e+02 3.368e+02 8.641e+02, threshold=5.402e+02, percent-clipped=2.0 +2023-02-07 04:50:55,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5576, 1.5192, 2.1360, 1.3838, 1.2035, 2.0725, 0.4334, 1.2848], + device='cuda:0'), covar=tensor([0.1944, 0.1262, 0.0330, 0.0982, 0.2638, 0.0404, 0.2007, 0.1391], + device='cuda:0'), in_proj_covar=tensor([0.0194, 0.0202, 0.0131, 0.0224, 0.0272, 0.0139, 0.0173, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:51:04,340 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:11,890 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3465, 2.6603, 3.0307, 1.6607, 3.2990, 2.0769, 1.6783, 2.2921], + device='cuda:0'), covar=tensor([0.0913, 0.0426, 0.0318, 0.0824, 0.0504, 0.0832, 0.0963, 0.0538], + device='cuda:0'), in_proj_covar=tensor([0.0449, 0.0390, 0.0343, 0.0443, 0.0377, 0.0531, 0.0389, 0.0419], + device='cuda:0'), out_proj_covar=tensor([1.2001e-04, 1.0232e-04, 9.0183e-05, 1.1660e-04, 9.9026e-05, 1.4976e-04, + 1.0476e-04, 1.1106e-04], device='cuda:0') +2023-02-07 04:51:12,421 INFO [train.py:901] (0/4) Epoch 23, batch 700, loss[loss=0.1644, simple_loss=0.2525, pruned_loss=0.03812, over 7655.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06077, over 1569784.39 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:16,059 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:21,517 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:32,923 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 04:51:33,982 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178555.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:47,514 INFO [train.py:901] (0/4) Epoch 23, batch 750, loss[loss=0.1824, simple_loss=0.2509, pruned_loss=0.05698, over 7245.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06156, over 1581166.51 frames. ], batch size: 16, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:52:00,640 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.529e+02 2.988e+02 3.531e+02 9.866e+02, threshold=5.976e+02, percent-clipped=5.0 +2023-02-07 04:52:03,330 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 04:52:12,914 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 04:52:13,852 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5617, 2.4953, 1.7913, 2.3984, 2.1965, 1.5011, 2.0990, 2.2138], + device='cuda:0'), covar=tensor([0.1619, 0.0453, 0.1354, 0.0604, 0.0745, 0.1705, 0.1053, 0.1014], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0235, 0.0337, 0.0311, 0.0300, 0.0342, 0.0347, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:52:24,031 INFO [train.py:901] (0/4) Epoch 23, batch 800, loss[loss=0.1717, simple_loss=0.2582, pruned_loss=0.04266, over 8187.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06069, over 1584886.72 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:52:32,108 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178637.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:52:57,754 INFO [train.py:901] (0/4) Epoch 23, batch 850, loss[loss=0.2219, simple_loss=0.2996, pruned_loss=0.07209, over 8023.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2877, pruned_loss=0.06147, over 1592220.15 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:10,567 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.561e+02 2.992e+02 3.918e+02 1.040e+03, threshold=5.984e+02, percent-clipped=6.0 +2023-02-07 04:53:24,463 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178712.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:53:24,481 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9840, 1.7378, 3.1929, 1.5543, 2.4367, 3.4286, 3.5456, 2.9560], + device='cuda:0'), covar=tensor([0.1097, 0.1545, 0.0334, 0.1897, 0.0891, 0.0254, 0.0667, 0.0557], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0322, 0.0287, 0.0315, 0.0311, 0.0268, 0.0423, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:53:26,475 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178715.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:53:27,219 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1652, 1.0736, 1.3033, 0.9900, 0.9560, 1.3215, 0.0515, 0.8827], + device='cuda:0'), covar=tensor([0.1699, 0.1274, 0.0475, 0.0915, 0.2473, 0.0561, 0.2058, 0.1251], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0201, 0.0130, 0.0221, 0.0270, 0.0138, 0.0170, 0.0196], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:53:34,044 INFO [train.py:901] (0/4) Epoch 23, batch 900, loss[loss=0.2274, simple_loss=0.3106, pruned_loss=0.07212, over 7056.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06095, over 1595714.00 frames. ], batch size: 72, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:57,336 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-07 04:54:09,419 INFO [train.py:901] (0/4) Epoch 23, batch 950, loss[loss=0.17, simple_loss=0.2472, pruned_loss=0.04643, over 7939.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2867, pruned_loss=0.06087, over 1606081.82 frames. ], batch size: 20, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:54:18,536 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:18,783 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.72 vs. limit=5.0 +2023-02-07 04:54:21,857 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.330e+02 2.907e+02 3.544e+02 9.473e+02, threshold=5.814e+02, percent-clipped=4.0 +2023-02-07 04:54:35,811 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 04:54:37,113 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:42,178 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5736, 2.0844, 3.2580, 1.4928, 2.4340, 2.0172, 1.7713, 2.4375], + device='cuda:0'), covar=tensor([0.1922, 0.2466, 0.0732, 0.4430, 0.1744, 0.3103, 0.2276, 0.2071], + device='cuda:0'), in_proj_covar=tensor([0.0521, 0.0598, 0.0549, 0.0639, 0.0641, 0.0586, 0.0530, 0.0626], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 04:54:45,361 INFO [train.py:901] (0/4) Epoch 23, batch 1000, loss[loss=0.1814, simple_loss=0.2661, pruned_loss=0.04839, over 8086.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06094, over 1607255.16 frames. ], batch size: 21, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:12,377 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 04:55:21,365 INFO [train.py:901] (0/4) Epoch 23, batch 1050, loss[loss=0.228, simple_loss=0.3105, pruned_loss=0.0727, over 8556.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.286, pruned_loss=0.06019, over 1613138.50 frames. ], batch size: 31, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:25,394 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 04:55:33,402 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.332e+02 2.695e+02 3.454e+02 6.847e+02, threshold=5.390e+02, percent-clipped=5.0 +2023-02-07 04:55:46,647 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178912.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:55:47,284 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1177, 1.7020, 4.5485, 2.0169, 2.6573, 5.1704, 5.2596, 4.5273], + device='cuda:0'), covar=tensor([0.1306, 0.1814, 0.0267, 0.1867, 0.1055, 0.0162, 0.0403, 0.0537], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0320, 0.0287, 0.0315, 0.0310, 0.0267, 0.0422, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 04:55:56,202 INFO [train.py:901] (0/4) Epoch 23, batch 1100, loss[loss=0.214, simple_loss=0.2973, pruned_loss=0.06534, over 8444.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2863, pruned_loss=0.06095, over 1609796.92 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:55:59,143 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:32,128 INFO [train.py:901] (0/4) Epoch 23, batch 1150, loss[loss=0.174, simple_loss=0.2596, pruned_loss=0.04417, over 7818.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2856, pruned_loss=0.06091, over 1605748.46 frames. ], batch size: 20, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:56:36,255 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 04:56:36,321 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:45,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.628e+02 3.162e+02 4.177e+02 1.087e+03, threshold=6.324e+02, percent-clipped=6.0 +2023-02-07 04:57:07,126 INFO [train.py:901] (0/4) Epoch 23, batch 1200, loss[loss=0.179, simple_loss=0.267, pruned_loss=0.04546, over 7787.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2856, pruned_loss=0.06088, over 1604739.51 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:29,067 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:57:31,022 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179059.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:57:42,802 INFO [train.py:901] (0/4) Epoch 23, batch 1250, loss[loss=0.2089, simple_loss=0.2833, pruned_loss=0.06722, over 8137.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2856, pruned_loss=0.06093, over 1608896.51 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:55,996 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.289e+02 2.896e+02 3.686e+02 5.954e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 04:57:58,278 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:05,898 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1243, 1.0333, 1.2126, 0.9572, 0.9423, 1.2360, 0.1084, 0.9043], + device='cuda:0'), covar=tensor([0.1606, 0.1313, 0.0526, 0.0820, 0.2269, 0.0528, 0.2000, 0.1291], + device='cuda:0'), in_proj_covar=tensor([0.0192, 0.0200, 0.0129, 0.0220, 0.0268, 0.0136, 0.0170, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 04:58:19,011 INFO [train.py:901] (0/4) Epoch 23, batch 1300, loss[loss=0.2074, simple_loss=0.2979, pruned_loss=0.05839, over 8024.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2868, pruned_loss=0.06162, over 1611789.27 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:58:20,698 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3801, 2.3138, 1.6993, 2.1240, 1.9716, 1.4821, 1.9516, 1.8516], + device='cuda:0'), covar=tensor([0.1454, 0.0415, 0.1297, 0.0556, 0.0771, 0.1534, 0.0973, 0.0966], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0232, 0.0334, 0.0308, 0.0298, 0.0338, 0.0342, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 04:58:24,100 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:51,876 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179171.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:53,999 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179174.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:58:54,478 INFO [train.py:901] (0/4) Epoch 23, batch 1350, loss[loss=0.1903, simple_loss=0.2751, pruned_loss=0.05279, over 8241.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.06109, over 1611662.07 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:58:55,432 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3516, 2.2550, 2.9887, 2.4541, 2.8962, 2.4737, 2.2253, 1.7847], + device='cuda:0'), covar=tensor([0.5521, 0.5387, 0.2026, 0.3696, 0.2615, 0.2943, 0.1870, 0.5493], + device='cuda:0'), in_proj_covar=tensor([0.0938, 0.0984, 0.0811, 0.0950, 0.0994, 0.0898, 0.0752, 0.0828], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 04:59:01,685 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179185.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:07,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.184e+02 2.635e+02 3.098e+02 5.270e+02, threshold=5.271e+02, percent-clipped=0.0 +2023-02-07 04:59:16,580 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 04:59:20,414 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179210.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:30,635 INFO [train.py:901] (0/4) Epoch 23, batch 1400, loss[loss=0.1915, simple_loss=0.2881, pruned_loss=0.04742, over 8241.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2861, pruned_loss=0.06095, over 1611512.27 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:37,572 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 04:59:47,155 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:53,424 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179256.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:59:57,850 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 05:00:06,550 INFO [train.py:901] (0/4) Epoch 23, batch 1450, loss[loss=0.1663, simple_loss=0.2552, pruned_loss=0.03874, over 7673.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06152, over 1612883.88 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:16,906 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 05:00:19,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.293e+02 2.971e+02 3.774e+02 8.745e+02, threshold=5.941e+02, percent-clipped=9.0 +2023-02-07 05:00:43,616 INFO [train.py:901] (0/4) Epoch 23, batch 1500, loss[loss=0.2028, simple_loss=0.2877, pruned_loss=0.05892, over 8248.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2873, pruned_loss=0.06198, over 1612111.75 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:57,219 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7934, 1.8265, 2.5019, 1.5862, 1.3949, 2.4545, 0.4965, 1.4932], + device='cuda:0'), covar=tensor([0.1907, 0.1179, 0.0287, 0.1304, 0.2569, 0.0326, 0.1993, 0.1377], + device='cuda:0'), in_proj_covar=tensor([0.0191, 0.0199, 0.0129, 0.0219, 0.0267, 0.0136, 0.0169, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 05:01:03,389 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:16,374 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179371.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:01:18,855 INFO [train.py:901] (0/4) Epoch 23, batch 1550, loss[loss=0.1714, simple_loss=0.25, pruned_loss=0.0464, over 7775.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2866, pruned_loss=0.06158, over 1609471.78 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:20,456 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:21,725 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:31,094 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.349e+02 2.958e+02 3.969e+02 7.808e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-07 05:01:32,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2440, 2.1748, 1.7155, 1.9682, 1.7870, 1.5079, 1.6689, 1.7150], + device='cuda:0'), covar=tensor([0.1341, 0.0375, 0.1193, 0.0487, 0.0746, 0.1421, 0.0926, 0.0849], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0234, 0.0336, 0.0311, 0.0300, 0.0339, 0.0346, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 05:01:54,027 INFO [train.py:901] (0/4) Epoch 23, batch 1600, loss[loss=0.2088, simple_loss=0.2929, pruned_loss=0.06235, over 8237.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06169, over 1612129.12 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:56,459 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179427.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:58,497 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179430.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:14,544 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:02:16,562 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179455.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:02:21,374 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179462.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:02:31,190 INFO [train.py:901] (0/4) Epoch 23, batch 1650, loss[loss=0.2024, simple_loss=0.2808, pruned_loss=0.06197, over 7534.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.287, pruned_loss=0.06115, over 1616263.29 frames. ], batch size: 18, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:02:43,578 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.367e+02 2.783e+02 3.381e+02 8.055e+02, threshold=5.566e+02, percent-clipped=4.0 +2023-02-07 05:02:50,690 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:06,279 INFO [train.py:901] (0/4) Epoch 23, batch 1700, loss[loss=0.2133, simple_loss=0.2879, pruned_loss=0.06936, over 7783.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06067, over 1614851.69 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:03:08,681 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:32,848 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5542, 1.9276, 3.0007, 1.4060, 2.2613, 1.9285, 1.6852, 2.3045], + device='cuda:0'), covar=tensor([0.1966, 0.2541, 0.0842, 0.4585, 0.1840, 0.3265, 0.2304, 0.2117], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0608, 0.0556, 0.0647, 0.0653, 0.0595, 0.0538, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:03:42,338 INFO [train.py:901] (0/4) Epoch 23, batch 1750, loss[loss=0.1677, simple_loss=0.2426, pruned_loss=0.04646, over 7209.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06043, over 1614642.53 frames. ], batch size: 16, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:03:44,861 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 05:03:52,465 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 05:03:56,235 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.481e+02 2.857e+02 3.517e+02 8.396e+02, threshold=5.713e+02, percent-clipped=3.0 +2023-02-07 05:03:58,697 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6453, 2.0306, 2.1834, 1.2807, 2.2408, 1.5863, 0.7598, 1.9037], + device='cuda:0'), covar=tensor([0.0666, 0.0395, 0.0273, 0.0674, 0.0467, 0.0890, 0.0897, 0.0394], + device='cuda:0'), in_proj_covar=tensor([0.0454, 0.0394, 0.0348, 0.0449, 0.0382, 0.0535, 0.0394, 0.0422], + device='cuda:0'), out_proj_covar=tensor([1.2119e-04, 1.0329e-04, 9.1232e-05, 1.1818e-04, 1.0075e-04, 1.5078e-04, + 1.0621e-04, 1.1164e-04], device='cuda:0') +2023-02-07 05:04:15,366 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3702, 1.6658, 4.5542, 1.7287, 4.0218, 3.7705, 4.1689, 3.9912], + device='cuda:0'), covar=tensor([0.0614, 0.4665, 0.0516, 0.4293, 0.1201, 0.0958, 0.0573, 0.0704], + device='cuda:0'), in_proj_covar=tensor([0.0640, 0.0651, 0.0707, 0.0640, 0.0717, 0.0613, 0.0612, 0.0690], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:04:17,968 INFO [train.py:901] (0/4) Epoch 23, batch 1800, loss[loss=0.2236, simple_loss=0.2918, pruned_loss=0.07773, over 8666.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2857, pruned_loss=0.06011, over 1618861.44 frames. ], batch size: 34, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:04:19,554 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179627.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:04:37,264 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179652.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:04:38,538 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179654.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:04:54,468 INFO [train.py:901] (0/4) Epoch 23, batch 1850, loss[loss=0.1881, simple_loss=0.2745, pruned_loss=0.05083, over 8141.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.286, pruned_loss=0.06047, over 1618375.64 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:00,031 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1478, 1.5911, 1.9636, 1.5226, 1.0353, 1.6586, 1.9230, 1.8478], + device='cuda:0'), covar=tensor([0.0512, 0.1230, 0.1569, 0.1356, 0.0574, 0.1362, 0.0618, 0.0604], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:05:07,477 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.311e+02 2.831e+02 3.615e+02 8.108e+02, threshold=5.663e+02, percent-clipped=6.0 +2023-02-07 05:05:28,483 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:05:29,811 INFO [train.py:901] (0/4) Epoch 23, batch 1900, loss[loss=0.2271, simple_loss=0.3126, pruned_loss=0.07084, over 8576.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.06032, over 1618513.20 frames. ], batch size: 31, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:59,884 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 05:06:05,567 INFO [train.py:901] (0/4) Epoch 23, batch 1950, loss[loss=0.1694, simple_loss=0.2538, pruned_loss=0.04247, over 8143.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.06056, over 1616319.29 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:12,626 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 05:06:19,480 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.457e+02 2.986e+02 3.643e+02 8.972e+02, threshold=5.972e+02, percent-clipped=4.0 +2023-02-07 05:06:28,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179806.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:06:31,261 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 05:06:34,182 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:06:36,269 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5878, 1.4023, 2.7956, 1.3317, 2.1995, 3.0056, 3.2122, 2.4946], + device='cuda:0'), covar=tensor([0.1346, 0.1839, 0.0424, 0.2316, 0.0958, 0.0344, 0.0661, 0.0691], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0322, 0.0289, 0.0316, 0.0312, 0.0269, 0.0426, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:06:41,622 INFO [train.py:901] (0/4) Epoch 23, batch 2000, loss[loss=0.1837, simple_loss=0.2766, pruned_loss=0.04539, over 7803.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2868, pruned_loss=0.06033, over 1618119.57 frames. ], batch size: 20, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:50,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179838.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:07:02,093 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4065, 1.2899, 2.3833, 1.2771, 2.2442, 2.5436, 2.7116, 2.1179], + device='cuda:0'), covar=tensor([0.1111, 0.1422, 0.0423, 0.2064, 0.0729, 0.0361, 0.0516, 0.0671], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0320, 0.0287, 0.0315, 0.0311, 0.0268, 0.0424, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:07:13,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2602, 1.3661, 1.6699, 1.3569, 0.7522, 1.4528, 1.2905, 1.1252], + device='cuda:0'), covar=tensor([0.0558, 0.1238, 0.1598, 0.1349, 0.0545, 0.1405, 0.0647, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0111, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:07:16,425 INFO [train.py:901] (0/4) Epoch 23, batch 2050, loss[loss=0.1868, simple_loss=0.276, pruned_loss=0.04883, over 8465.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2865, pruned_loss=0.06016, over 1619278.25 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:07:30,043 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.444e+02 2.856e+02 3.794e+02 1.051e+03, threshold=5.713e+02, percent-clipped=7.0 +2023-02-07 05:07:49,595 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179921.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:51,629 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179924.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:52,172 INFO [train.py:901] (0/4) Epoch 23, batch 2100, loss[loss=0.2126, simple_loss=0.3095, pruned_loss=0.05779, over 8462.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.06052, over 1616018.63 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:04,839 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179942.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:08:14,748 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 05:08:27,545 INFO [train.py:901] (0/4) Epoch 23, batch 2150, loss[loss=0.1803, simple_loss=0.2544, pruned_loss=0.0531, over 7258.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06118, over 1612742.96 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:34,129 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6659, 2.3934, 3.7889, 1.4915, 2.8837, 2.1729, 1.9126, 2.5546], + device='cuda:0'), covar=tensor([0.2151, 0.2572, 0.1017, 0.5000, 0.1936, 0.3527, 0.2499, 0.2925], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0608, 0.0555, 0.0645, 0.0649, 0.0592, 0.0537, 0.0630], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:08:34,285 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-07 05:08:41,580 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.311e+02 2.940e+02 3.642e+02 8.826e+02, threshold=5.880e+02, percent-clipped=6.0 +2023-02-07 05:08:44,582 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:08:45,954 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-180000.pt +2023-02-07 05:09:05,717 INFO [train.py:901] (0/4) Epoch 23, batch 2200, loss[loss=0.2053, simple_loss=0.2864, pruned_loss=0.06203, over 7651.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06123, over 1611965.12 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:18,801 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:40,653 INFO [train.py:901] (0/4) Epoch 23, batch 2250, loss[loss=0.1897, simple_loss=0.2744, pruned_loss=0.05249, over 8454.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2864, pruned_loss=0.06109, over 1616853.66 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:43,939 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 05:09:53,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.377e+02 2.815e+02 3.570e+02 6.536e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-07 05:09:54,034 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:07,878 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180113.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:12,137 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:16,772 INFO [train.py:901] (0/4) Epoch 23, batch 2300, loss[loss=0.247, simple_loss=0.3309, pruned_loss=0.08152, over 8593.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06188, over 1620270.31 frames. ], batch size: 31, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:40,106 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:44,655 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 05:10:52,691 INFO [train.py:901] (0/4) Epoch 23, batch 2350, loss[loss=0.1702, simple_loss=0.2538, pruned_loss=0.04328, over 7805.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06152, over 1617153.54 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:54,356 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180177.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:05,884 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.451e+02 2.928e+02 3.544e+02 9.883e+02, threshold=5.856e+02, percent-clipped=4.0 +2023-02-07 05:11:11,596 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180202.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:11:23,080 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3734, 1.6631, 4.4213, 2.0080, 2.6763, 4.9884, 5.0869, 4.2990], + device='cuda:0'), covar=tensor([0.1112, 0.1859, 0.0219, 0.1922, 0.1018, 0.0164, 0.0433, 0.0555], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0320, 0.0286, 0.0314, 0.0310, 0.0267, 0.0422, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:11:25,800 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:11:27,091 INFO [train.py:901] (0/4) Epoch 23, batch 2400, loss[loss=0.1966, simple_loss=0.2813, pruned_loss=0.0559, over 8465.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.06146, over 1617748.17 frames. ], batch size: 27, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:11:33,034 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 05:11:59,477 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180268.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:12:02,905 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:04,122 INFO [train.py:901] (0/4) Epoch 23, batch 2450, loss[loss=0.2475, simple_loss=0.3232, pruned_loss=0.08594, over 7813.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2865, pruned_loss=0.061, over 1614074.04 frames. ], batch size: 20, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:12:12,628 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:18,001 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.501e+02 2.918e+02 3.866e+02 1.157e+03, threshold=5.835e+02, percent-clipped=6.0 +2023-02-07 05:12:39,628 INFO [train.py:901] (0/4) Epoch 23, batch 2500, loss[loss=0.2264, simple_loss=0.3103, pruned_loss=0.07119, over 8185.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06082, over 1608594.51 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:00,473 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:12,925 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:16,779 INFO [train.py:901] (0/4) Epoch 23, batch 2550, loss[loss=0.2009, simple_loss=0.275, pruned_loss=0.06343, over 7929.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2858, pruned_loss=0.06129, over 1604031.29 frames. ], batch size: 20, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:22,401 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180383.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:13:25,748 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:29,883 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.435e+02 3.031e+02 3.942e+02 1.076e+03, threshold=6.063e+02, percent-clipped=1.0 +2023-02-07 05:13:30,124 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:35,739 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180401.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:51,963 INFO [train.py:901] (0/4) Epoch 23, batch 2600, loss[loss=0.2784, simple_loss=0.3409, pruned_loss=0.1079, over 7558.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2866, pruned_loss=0.06201, over 1607186.25 frames. ], batch size: 72, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:28,394 INFO [train.py:901] (0/4) Epoch 23, batch 2650, loss[loss=0.1848, simple_loss=0.2847, pruned_loss=0.04244, over 8471.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06109, over 1610564.16 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:42,175 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 2.331e+02 2.876e+02 3.734e+02 9.435e+02, threshold=5.753e+02, percent-clipped=4.0 +2023-02-07 05:14:48,431 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:04,093 INFO [train.py:901] (0/4) Epoch 23, batch 2700, loss[loss=0.1972, simple_loss=0.2875, pruned_loss=0.05345, over 8496.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.0612, over 1607287.96 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:07,066 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:19,170 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9650, 2.4042, 3.7588, 1.9217, 1.9335, 3.7225, 0.6583, 2.1306], + device='cuda:0'), covar=tensor([0.1402, 0.1343, 0.0221, 0.1676, 0.2562, 0.0230, 0.2349, 0.1374], + device='cuda:0'), in_proj_covar=tensor([0.0192, 0.0200, 0.0130, 0.0222, 0.0270, 0.0137, 0.0170, 0.0195], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 05:15:24,158 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:33,266 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:39,688 INFO [train.py:901] (0/4) Epoch 23, batch 2750, loss[loss=0.2021, simple_loss=0.3003, pruned_loss=0.05191, over 8463.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2868, pruned_loss=0.06115, over 1613226.10 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:48,802 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:53,500 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.355e+02 2.814e+02 3.432e+02 9.125e+02, threshold=5.629e+02, percent-clipped=4.0 +2023-02-07 05:16:15,671 INFO [train.py:901] (0/4) Epoch 23, batch 2800, loss[loss=0.1718, simple_loss=0.27, pruned_loss=0.03685, over 8466.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2875, pruned_loss=0.06163, over 1613960.24 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:26,273 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180639.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:38,597 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180657.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:43,309 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180664.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:48,882 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5708, 1.8726, 1.9848, 1.2980, 2.2323, 1.3853, 0.6832, 1.8051], + device='cuda:0'), covar=tensor([0.0694, 0.0397, 0.0304, 0.0681, 0.0414, 0.0996, 0.0941, 0.0386], + device='cuda:0'), in_proj_covar=tensor([0.0457, 0.0395, 0.0348, 0.0446, 0.0380, 0.0535, 0.0393, 0.0422], + device='cuda:0'), out_proj_covar=tensor([1.2204e-04, 1.0332e-04, 9.1312e-05, 1.1730e-04, 9.9986e-05, 1.5054e-04, + 1.0598e-04, 1.1180e-04], device='cuda:0') +2023-02-07 05:16:50,739 INFO [train.py:901] (0/4) Epoch 23, batch 2850, loss[loss=0.1671, simple_loss=0.2522, pruned_loss=0.04098, over 7560.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2866, pruned_loss=0.06144, over 1612399.82 frames. ], batch size: 18, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:55,722 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:55,747 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:04,498 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.469e+02 3.037e+02 3.866e+02 9.714e+02, threshold=6.075e+02, percent-clipped=7.0 +2023-02-07 05:17:07,424 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:27,392 INFO [train.py:901] (0/4) Epoch 23, batch 2900, loss[loss=0.1953, simple_loss=0.2687, pruned_loss=0.06093, over 7976.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2871, pruned_loss=0.06163, over 1610652.04 frames. ], batch size: 21, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:17:45,123 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:52,131 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180759.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:59,484 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 05:18:02,964 INFO [train.py:901] (0/4) Epoch 23, batch 2950, loss[loss=0.182, simple_loss=0.2698, pruned_loss=0.04713, over 8081.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2869, pruned_loss=0.06155, over 1608616.47 frames. ], batch size: 21, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:18:09,318 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:16,022 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.356e+02 2.925e+02 3.942e+02 6.480e+02, threshold=5.850e+02, percent-clipped=1.0 +2023-02-07 05:18:26,739 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1549, 1.6486, 1.7968, 1.5266, 1.0157, 1.5542, 1.9817, 1.6354], + device='cuda:0'), covar=tensor([0.0481, 0.1112, 0.1578, 0.1325, 0.0576, 0.1357, 0.0595, 0.0618], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0188, 0.0159, 0.0099, 0.0161, 0.0111, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:18:30,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180813.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:38,129 INFO [train.py:901] (0/4) Epoch 23, batch 3000, loss[loss=0.2009, simple_loss=0.2904, pruned_loss=0.05568, over 8336.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2871, pruned_loss=0.06114, over 1611415.09 frames. ], batch size: 25, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:18:38,130 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 05:18:45,773 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2870, 1.0900, 1.2608, 1.0262, 0.6815, 1.0958, 1.1932, 1.0161], + device='cuda:0'), covar=tensor([0.0459, 0.1044, 0.1303, 0.1156, 0.0514, 0.1187, 0.0527, 0.0524], + device='cuda:0'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0159, 0.0099, 0.0161, 0.0111, 0.0142], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:18:50,537 INFO [train.py:935] (0/4) Epoch 23, validation: loss=0.1735, simple_loss=0.2731, pruned_loss=0.03696, over 944034.00 frames. +2023-02-07 05:18:50,538 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 05:19:03,682 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:19:26,992 INFO [train.py:901] (0/4) Epoch 23, batch 3050, loss[loss=0.2138, simple_loss=0.2965, pruned_loss=0.06555, over 8146.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2869, pruned_loss=0.06149, over 1610169.02 frames. ], batch size: 48, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:19:40,669 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.515e+02 3.107e+02 3.968e+02 1.139e+03, threshold=6.214e+02, percent-clipped=7.0 +2023-02-07 05:20:02,333 INFO [train.py:901] (0/4) Epoch 23, batch 3100, loss[loss=0.1902, simple_loss=0.2599, pruned_loss=0.06022, over 7691.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2867, pruned_loss=0.06111, over 1611342.01 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:07,174 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180932.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:11,431 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:22,569 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5583, 1.7980, 1.9228, 1.4092, 1.9857, 1.4243, 0.4459, 1.7924], + device='cuda:0'), covar=tensor([0.0526, 0.0342, 0.0294, 0.0488, 0.0404, 0.0947, 0.0896, 0.0251], + device='cuda:0'), in_proj_covar=tensor([0.0457, 0.0395, 0.0347, 0.0448, 0.0380, 0.0536, 0.0393, 0.0422], + device='cuda:0'), out_proj_covar=tensor([1.2212e-04, 1.0333e-04, 9.1140e-05, 1.1778e-04, 1.0004e-04, 1.5101e-04, + 1.0585e-04, 1.1162e-04], device='cuda:0') +2023-02-07 05:20:29,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:38,173 INFO [train.py:901] (0/4) Epoch 23, batch 3150, loss[loss=0.2127, simple_loss=0.2921, pruned_loss=0.06662, over 7652.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06155, over 1613576.27 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:51,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.467e+02 3.042e+02 3.660e+02 1.036e+03, threshold=6.084e+02, percent-clipped=2.0 +2023-02-07 05:21:14,471 INFO [train.py:901] (0/4) Epoch 23, batch 3200, loss[loss=0.1883, simple_loss=0.2715, pruned_loss=0.0525, over 8105.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06185, over 1613284.66 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:29,909 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181047.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:45,848 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181069.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:49,882 INFO [train.py:901] (0/4) Epoch 23, batch 3250, loss[loss=0.1934, simple_loss=0.2794, pruned_loss=0.05372, over 7802.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2884, pruned_loss=0.06245, over 1608804.35 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:22:03,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.376e+02 2.917e+02 3.369e+02 6.745e+02, threshold=5.834e+02, percent-clipped=1.0 +2023-02-07 05:22:03,868 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:03,996 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:25,755 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4246, 1.4022, 1.8097, 1.3177, 1.0889, 1.7893, 0.1897, 1.1799], + device='cuda:0'), covar=tensor([0.1970, 0.1368, 0.0435, 0.0969, 0.2892, 0.0473, 0.2263, 0.1280], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0222, 0.0272, 0.0138, 0.0171, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 05:22:26,226 INFO [train.py:901] (0/4) Epoch 23, batch 3300, loss[loss=0.2136, simple_loss=0.3006, pruned_loss=0.06328, over 8449.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2871, pruned_loss=0.06177, over 1604176.48 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:01,363 INFO [train.py:901] (0/4) Epoch 23, batch 3350, loss[loss=0.2369, simple_loss=0.3186, pruned_loss=0.07755, over 8425.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2866, pruned_loss=0.06115, over 1605177.59 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:10,452 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:14,980 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.358e+02 3.053e+02 3.666e+02 9.674e+02, threshold=6.107e+02, percent-clipped=1.0 +2023-02-07 05:23:26,406 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:38,218 INFO [train.py:901] (0/4) Epoch 23, batch 3400, loss[loss=0.1878, simple_loss=0.2771, pruned_loss=0.0493, over 8237.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.06166, over 1604316.85 frames. ], batch size: 22, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:13,225 INFO [train.py:901] (0/4) Epoch 23, batch 3450, loss[loss=0.2091, simple_loss=0.2766, pruned_loss=0.07074, over 7407.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2876, pruned_loss=0.06189, over 1607451.74 frames. ], batch size: 17, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:27,415 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.466e+02 2.960e+02 3.783e+02 8.296e+02, threshold=5.920e+02, percent-clipped=4.0 +2023-02-07 05:24:32,992 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:33,727 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:42,773 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181315.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:47,013 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6753, 2.7547, 1.9788, 2.4602, 2.3534, 1.7728, 2.2457, 2.3726], + device='cuda:0'), covar=tensor([0.1453, 0.0386, 0.1170, 0.0604, 0.0732, 0.1458, 0.1002, 0.0956], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0233, 0.0336, 0.0309, 0.0300, 0.0338, 0.0344, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 05:24:49,555 INFO [train.py:901] (0/4) Epoch 23, batch 3500, loss[loss=0.192, simple_loss=0.2805, pruned_loss=0.05182, over 8100.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2866, pruned_loss=0.06108, over 1610786.90 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:52,789 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181328.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:25:07,647 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 05:25:13,730 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.60 vs. limit=5.0 +2023-02-07 05:25:17,763 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 05:25:25,788 INFO [train.py:901] (0/4) Epoch 23, batch 3550, loss[loss=0.1934, simple_loss=0.2862, pruned_loss=0.05037, over 8090.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2859, pruned_loss=0.06062, over 1611393.74 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:25:39,019 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.351e+02 2.882e+02 3.469e+02 9.271e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 05:26:01,193 INFO [train.py:901] (0/4) Epoch 23, batch 3600, loss[loss=0.2027, simple_loss=0.2752, pruned_loss=0.06513, over 7653.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2872, pruned_loss=0.0612, over 1617167.49 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:09,774 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4632, 1.8615, 3.2022, 1.3205, 2.4754, 1.9914, 1.5295, 2.4247], + device='cuda:0'), covar=tensor([0.2222, 0.2875, 0.0813, 0.5026, 0.1844, 0.3347, 0.2711, 0.2137], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0604, 0.0549, 0.0643, 0.0644, 0.0591, 0.0536, 0.0627], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:26:30,524 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181465.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:37,794 INFO [train.py:901] (0/4) Epoch 23, batch 3650, loss[loss=0.1752, simple_loss=0.2626, pruned_loss=0.04386, over 7827.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2865, pruned_loss=0.06062, over 1618686.60 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:44,025 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1398, 1.5828, 3.4530, 1.6177, 2.3713, 3.8827, 3.9566, 3.2898], + device='cuda:0'), covar=tensor([0.1082, 0.1868, 0.0344, 0.2075, 0.1158, 0.0220, 0.0475, 0.0570], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0319, 0.0286, 0.0314, 0.0309, 0.0267, 0.0422, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:26:48,286 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181490.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:50,927 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.424e+02 2.919e+02 3.720e+02 6.119e+02, threshold=5.839e+02, percent-clipped=1.0 +2023-02-07 05:26:52,533 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5712, 4.5937, 4.1383, 2.1422, 4.0577, 4.1819, 4.0789, 4.0194], + device='cuda:0'), covar=tensor([0.0715, 0.0526, 0.1133, 0.4729, 0.0861, 0.1080, 0.1299, 0.0755], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0439, 0.0431, 0.0540, 0.0429, 0.0444, 0.0426, 0.0389], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:27:11,129 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 05:27:12,456 INFO [train.py:901] (0/4) Epoch 23, batch 3700, loss[loss=0.2423, simple_loss=0.3186, pruned_loss=0.08301, over 8251.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2872, pruned_loss=0.06113, over 1615926.05 frames. ], batch size: 24, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:27:30,273 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:37,393 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:49,547 INFO [train.py:901] (0/4) Epoch 23, batch 3750, loss[loss=0.2056, simple_loss=0.2827, pruned_loss=0.06425, over 7946.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06107, over 1617227.62 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:27:55,369 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:28:02,819 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.354e+02 2.844e+02 3.677e+02 7.170e+02, threshold=5.688e+02, percent-clipped=4.0 +2023-02-07 05:28:09,865 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3956, 1.5900, 4.5966, 1.6173, 4.0508, 3.7612, 4.1281, 4.0178], + device='cuda:0'), covar=tensor([0.0608, 0.4421, 0.0467, 0.4200, 0.1131, 0.0931, 0.0551, 0.0675], + device='cuda:0'), in_proj_covar=tensor([0.0644, 0.0651, 0.0707, 0.0641, 0.0722, 0.0617, 0.0617, 0.0689], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:28:24,873 INFO [train.py:901] (0/4) Epoch 23, batch 3800, loss[loss=0.1867, simple_loss=0.2752, pruned_loss=0.04914, over 8099.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2861, pruned_loss=0.06087, over 1610322.85 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:28:44,966 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9387, 1.4404, 3.5427, 1.5598, 2.3907, 3.9351, 4.0036, 3.4114], + device='cuda:0'), covar=tensor([0.1145, 0.1852, 0.0273, 0.2035, 0.1007, 0.0193, 0.0376, 0.0486], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0320, 0.0285, 0.0315, 0.0309, 0.0267, 0.0421, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:28:49,194 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:29:00,796 INFO [train.py:901] (0/4) Epoch 23, batch 3850, loss[loss=0.2366, simple_loss=0.3017, pruned_loss=0.08574, over 8652.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06172, over 1613502.44 frames. ], batch size: 34, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:14,852 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 2.361e+02 2.900e+02 3.650e+02 9.007e+02, threshold=5.800e+02, percent-clipped=7.0 +2023-02-07 05:29:15,140 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6865, 2.3533, 4.0986, 1.5688, 2.9027, 2.2136, 1.8876, 2.7320], + device='cuda:0'), covar=tensor([0.2047, 0.2644, 0.0760, 0.4750, 0.1895, 0.3354, 0.2449, 0.2564], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0609, 0.0554, 0.0649, 0.0650, 0.0599, 0.0542, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:29:17,199 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4711, 1.4210, 1.8489, 1.1019, 1.0418, 1.8074, 0.1171, 1.1131], + device='cuda:0'), covar=tensor([0.1582, 0.1319, 0.0381, 0.1212, 0.2714, 0.0433, 0.2009, 0.1224], + device='cuda:0'), in_proj_covar=tensor([0.0192, 0.0199, 0.0128, 0.0219, 0.0269, 0.0136, 0.0170, 0.0192], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 05:29:22,392 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 05:29:36,640 INFO [train.py:901] (0/4) Epoch 23, batch 3900, loss[loss=0.195, simple_loss=0.2846, pruned_loss=0.05275, over 8479.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06129, over 1613695.47 frames. ], batch size: 29, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:10,464 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:30:10,998 INFO [train.py:901] (0/4) Epoch 23, batch 3950, loss[loss=0.2063, simple_loss=0.2992, pruned_loss=0.05668, over 8556.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2879, pruned_loss=0.0618, over 1614178.52 frames. ], batch size: 31, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:26,260 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.359e+02 2.788e+02 3.393e+02 6.824e+02, threshold=5.575e+02, percent-clipped=4.0 +2023-02-07 05:30:47,713 INFO [train.py:901] (0/4) Epoch 23, batch 4000, loss[loss=0.1934, simple_loss=0.2805, pruned_loss=0.05313, over 8253.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2881, pruned_loss=0.06153, over 1616522.28 frames. ], batch size: 24, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:22,578 INFO [train.py:901] (0/4) Epoch 23, batch 4050, loss[loss=0.2454, simple_loss=0.3254, pruned_loss=0.08266, over 8442.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06185, over 1617396.26 frames. ], batch size: 49, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:34,361 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:31:34,878 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 05:31:35,719 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.508e+02 2.885e+02 3.954e+02 8.020e+02, threshold=5.770e+02, percent-clipped=6.0 +2023-02-07 05:31:59,840 INFO [train.py:901] (0/4) Epoch 23, batch 4100, loss[loss=0.2185, simple_loss=0.3019, pruned_loss=0.06753, over 8374.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2881, pruned_loss=0.06154, over 1619005.31 frames. ], batch size: 24, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:32:34,923 INFO [train.py:901] (0/4) Epoch 23, batch 4150, loss[loss=0.2198, simple_loss=0.3108, pruned_loss=0.06445, over 8342.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2905, pruned_loss=0.06294, over 1618994.40 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:32:48,434 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.356e+02 2.929e+02 3.956e+02 6.697e+02, threshold=5.858e+02, percent-clipped=3.0 +2023-02-07 05:32:49,356 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:32:52,060 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-182000.pt +2023-02-07 05:32:58,727 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:11,748 INFO [train.py:901] (0/4) Epoch 23, batch 4200, loss[loss=0.16, simple_loss=0.2497, pruned_loss=0.03517, over 7975.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2891, pruned_loss=0.06237, over 1614232.43 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:16,172 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:25,741 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 05:33:33,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:43,683 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9960, 1.3500, 1.5926, 1.2917, 0.8826, 1.4364, 1.7097, 1.4635], + device='cuda:0'), covar=tensor([0.0528, 0.1361, 0.1754, 0.1511, 0.0616, 0.1568, 0.0715, 0.0697], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:33:47,604 INFO [train.py:901] (0/4) Epoch 23, batch 4250, loss[loss=0.2357, simple_loss=0.3158, pruned_loss=0.07778, over 8613.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06166, over 1612952.96 frames. ], batch size: 34, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:49,061 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 05:34:01,341 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.416e+02 2.989e+02 3.588e+02 6.339e+02, threshold=5.979e+02, percent-clipped=2.0 +2023-02-07 05:34:22,743 INFO [train.py:901] (0/4) Epoch 23, batch 4300, loss[loss=0.1978, simple_loss=0.2915, pruned_loss=0.05208, over 8240.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2874, pruned_loss=0.06152, over 1609950.42 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:34:58,238 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7445, 1.8006, 2.4041, 1.5815, 1.4286, 2.3492, 0.3275, 1.4358], + device='cuda:0'), covar=tensor([0.1538, 0.1049, 0.0268, 0.1018, 0.2218, 0.0320, 0.1878, 0.1042], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0200, 0.0129, 0.0221, 0.0270, 0.0137, 0.0170, 0.0193], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 05:34:58,707 INFO [train.py:901] (0/4) Epoch 23, batch 4350, loss[loss=0.193, simple_loss=0.2702, pruned_loss=0.05792, over 7563.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2869, pruned_loss=0.06139, over 1609799.38 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:13,491 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.346e+02 2.960e+02 3.931e+02 9.702e+02, threshold=5.919e+02, percent-clipped=9.0 +2023-02-07 05:35:21,947 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 05:35:34,673 INFO [train.py:901] (0/4) Epoch 23, batch 4400, loss[loss=0.2061, simple_loss=0.2903, pruned_loss=0.06098, over 8505.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.286, pruned_loss=0.06076, over 1611188.50 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:01,785 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9951, 2.2535, 1.8745, 2.9294, 1.3607, 1.6852, 2.1669, 2.3685], + device='cuda:0'), covar=tensor([0.0718, 0.0789, 0.0824, 0.0389, 0.1126, 0.1217, 0.0814, 0.0767], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0215, 0.0208, 0.0249, 0.0251, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 05:36:03,219 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:05,078 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 05:36:11,440 INFO [train.py:901] (0/4) Epoch 23, batch 4450, loss[loss=0.2236, simple_loss=0.2991, pruned_loss=0.07406, over 8450.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2857, pruned_loss=0.06045, over 1611924.55 frames. ], batch size: 27, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:20,465 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182288.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:26,040 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.606e+02 3.225e+02 4.349e+02 9.132e+02, threshold=6.449e+02, percent-clipped=7.0 +2023-02-07 05:36:28,971 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:47,069 INFO [train.py:901] (0/4) Epoch 23, batch 4500, loss[loss=0.2433, simple_loss=0.3237, pruned_loss=0.08151, over 8529.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.286, pruned_loss=0.06079, over 1612948.37 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:56,821 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 05:36:57,583 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182340.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:37:23,620 INFO [train.py:901] (0/4) Epoch 23, batch 4550, loss[loss=0.2604, simple_loss=0.3203, pruned_loss=0.1003, over 7705.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2855, pruned_loss=0.0604, over 1609507.32 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:37:37,490 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.347e+02 2.810e+02 3.651e+02 9.685e+02, threshold=5.619e+02, percent-clipped=2.0 +2023-02-07 05:37:39,975 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 05:37:59,264 INFO [train.py:901] (0/4) Epoch 23, batch 4600, loss[loss=0.189, simple_loss=0.2667, pruned_loss=0.05566, over 8087.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.06028, over 1611532.75 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:20,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182455.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:38:34,862 INFO [train.py:901] (0/4) Epoch 23, batch 4650, loss[loss=0.2081, simple_loss=0.2858, pruned_loss=0.06521, over 8080.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2863, pruned_loss=0.06076, over 1614244.90 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:50,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.204e+02 2.647e+02 3.638e+02 6.712e+02, threshold=5.294e+02, percent-clipped=7.0 +2023-02-07 05:39:12,445 INFO [train.py:901] (0/4) Epoch 23, batch 4700, loss[loss=0.1803, simple_loss=0.2799, pruned_loss=0.04035, over 8255.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2856, pruned_loss=0.05997, over 1617069.91 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:39:17,385 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:39:47,033 INFO [train.py:901] (0/4) Epoch 23, batch 4750, loss[loss=0.1968, simple_loss=0.2942, pruned_loss=0.04972, over 8337.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05974, over 1619389.13 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:01,607 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.297e+02 2.902e+02 3.418e+02 7.225e+02, threshold=5.805e+02, percent-clipped=3.0 +2023-02-07 05:40:06,065 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 05:40:08,822 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 05:40:24,084 INFO [train.py:901] (0/4) Epoch 23, batch 4800, loss[loss=0.2171, simple_loss=0.297, pruned_loss=0.06862, over 8047.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2855, pruned_loss=0.05953, over 1616051.11 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:36,435 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:40:59,186 INFO [train.py:901] (0/4) Epoch 23, batch 4850, loss[loss=0.2231, simple_loss=0.293, pruned_loss=0.0766, over 8334.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2867, pruned_loss=0.0606, over 1614903.51 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:00,607 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 05:41:13,249 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.288e+02 2.781e+02 3.814e+02 7.165e+02, threshold=5.562e+02, percent-clipped=4.0 +2023-02-07 05:41:25,682 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182711.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:41:36,257 INFO [train.py:901] (0/4) Epoch 23, batch 4900, loss[loss=0.1811, simple_loss=0.2666, pruned_loss=0.04782, over 8187.00 frames. ], tot_loss[loss=0.203, simple_loss=0.286, pruned_loss=0.05995, over 1617571.27 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:44,985 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:00,347 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:12,960 INFO [train.py:901] (0/4) Epoch 23, batch 4950, loss[loss=0.1946, simple_loss=0.2727, pruned_loss=0.05831, over 7519.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06044, over 1616546.86 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:42:27,040 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.423e+02 2.989e+02 3.745e+02 1.524e+03, threshold=5.977e+02, percent-clipped=7.0 +2023-02-07 05:42:48,225 INFO [train.py:901] (0/4) Epoch 23, batch 5000, loss[loss=0.2617, simple_loss=0.3229, pruned_loss=0.1003, over 6722.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.0607, over 1613007.74 frames. ], batch size: 71, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:25,214 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:25,725 INFO [train.py:901] (0/4) Epoch 23, batch 5050, loss[loss=0.1815, simple_loss=0.2621, pruned_loss=0.05044, over 7819.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2864, pruned_loss=0.06117, over 1610756.14 frames. ], batch size: 20, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:26,529 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:29,514 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:40,730 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.376e+02 2.932e+02 3.646e+02 6.966e+02, threshold=5.864e+02, percent-clipped=3.0 +2023-02-07 05:43:46,341 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 05:44:01,709 INFO [train.py:901] (0/4) Epoch 23, batch 5100, loss[loss=0.2299, simple_loss=0.3075, pruned_loss=0.07618, over 8502.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.06116, over 1606214.23 frames. ], batch size: 28, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:03,885 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9673, 1.4836, 1.5793, 1.3771, 0.9551, 1.3858, 1.6746, 1.6200], + device='cuda:0'), covar=tensor([0.0553, 0.1284, 0.1757, 0.1496, 0.0619, 0.1538, 0.0736, 0.0646], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:44:38,859 INFO [train.py:901] (0/4) Epoch 23, batch 5150, loss[loss=0.1878, simple_loss=0.2813, pruned_loss=0.04716, over 8361.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2876, pruned_loss=0.06152, over 1611675.03 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:50,234 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:44:53,603 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 2.409e+02 2.843e+02 3.449e+02 6.604e+02, threshold=5.686e+02, percent-clipped=1.0 +2023-02-07 05:45:07,238 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183014.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:09,899 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6099, 1.5830, 1.8859, 1.7268, 1.0608, 1.6850, 2.1600, 2.0262], + device='cuda:0'), covar=tensor([0.0505, 0.1247, 0.1554, 0.1323, 0.0613, 0.1431, 0.0630, 0.0570], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:0') +2023-02-07 05:45:14,591 INFO [train.py:901] (0/4) Epoch 23, batch 5200, loss[loss=0.1947, simple_loss=0.2668, pruned_loss=0.06132, over 7426.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2869, pruned_loss=0.0607, over 1612321.66 frames. ], batch size: 17, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:45:24,441 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183039.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:46,581 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 05:45:50,599 INFO [train.py:901] (0/4) Epoch 23, batch 5250, loss[loss=0.2251, simple_loss=0.3017, pruned_loss=0.07421, over 8462.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2882, pruned_loss=0.06133, over 1618095.56 frames. ], batch size: 25, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:45:55,149 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 05:46:05,168 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.492e+02 2.942e+02 3.798e+02 7.403e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-07 05:46:27,067 INFO [train.py:901] (0/4) Epoch 23, batch 5300, loss[loss=0.2297, simple_loss=0.3057, pruned_loss=0.07684, over 7517.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2872, pruned_loss=0.06074, over 1617417.66 frames. ], batch size: 71, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:46:32,349 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-07 05:46:59,017 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-07 05:47:02,886 INFO [train.py:901] (0/4) Epoch 23, batch 5350, loss[loss=0.2472, simple_loss=0.3202, pruned_loss=0.08711, over 8467.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.288, pruned_loss=0.06089, over 1618292.30 frames. ], batch size: 25, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:47:17,709 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.502e+02 3.193e+02 3.793e+02 7.809e+02, threshold=6.385e+02, percent-clipped=1.0 +2023-02-07 05:47:34,655 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:38,841 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:39,486 INFO [train.py:901] (0/4) Epoch 23, batch 5400, loss[loss=0.2207, simple_loss=0.3043, pruned_loss=0.06852, over 8206.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2869, pruned_loss=0.06035, over 1615160.31 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:47:55,891 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:13,007 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:14,973 INFO [train.py:901] (0/4) Epoch 23, batch 5450, loss[loss=0.1596, simple_loss=0.2361, pruned_loss=0.04149, over 7515.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2871, pruned_loss=0.06042, over 1620094.05 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:22,225 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1193, 1.9052, 2.5002, 2.1284, 2.5319, 2.1241, 1.9348, 1.4261], + device='cuda:0'), covar=tensor([0.5492, 0.4965, 0.1974, 0.3568, 0.2173, 0.3237, 0.2063, 0.5171], + device='cuda:0'), in_proj_covar=tensor([0.0941, 0.0986, 0.0806, 0.0947, 0.0995, 0.0898, 0.0751, 0.0829], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 05:48:30,412 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.334e+02 2.819e+02 3.622e+02 6.725e+02, threshold=5.637e+02, percent-clipped=1.0 +2023-02-07 05:48:41,150 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 05:48:52,618 INFO [train.py:901] (0/4) Epoch 23, batch 5500, loss[loss=0.2035, simple_loss=0.2903, pruned_loss=0.05837, over 8508.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2879, pruned_loss=0.06079, over 1617618.47 frames. ], batch size: 39, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:58,314 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183333.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:02,343 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:14,869 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 05:49:27,066 INFO [train.py:901] (0/4) Epoch 23, batch 5550, loss[loss=0.1914, simple_loss=0.2878, pruned_loss=0.04752, over 8485.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2873, pruned_loss=0.06068, over 1612594.01 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:49:41,518 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.428e+02 3.119e+02 4.010e+02 1.058e+03, threshold=6.238e+02, percent-clipped=9.0 +2023-02-07 05:50:03,270 INFO [train.py:901] (0/4) Epoch 23, batch 5600, loss[loss=0.1709, simple_loss=0.2571, pruned_loss=0.04233, over 7929.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2865, pruned_loss=0.06045, over 1611480.62 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:04,075 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:50:12,353 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3677, 1.6272, 4.6112, 1.7475, 4.0676, 3.8272, 4.1701, 4.0701], + device='cuda:0'), covar=tensor([0.0625, 0.4221, 0.0498, 0.3728, 0.1068, 0.0881, 0.0543, 0.0628], + device='cuda:0'), in_proj_covar=tensor([0.0643, 0.0650, 0.0704, 0.0636, 0.0713, 0.0613, 0.0611, 0.0686], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:50:39,034 INFO [train.py:901] (0/4) Epoch 23, batch 5650, loss[loss=0.2156, simple_loss=0.2929, pruned_loss=0.06911, over 7918.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2855, pruned_loss=0.06025, over 1609924.84 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:51,413 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 05:50:53,306 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.300e+02 3.084e+02 3.921e+02 7.530e+02, threshold=6.168e+02, percent-clipped=4.0 +2023-02-07 05:51:14,121 INFO [train.py:901] (0/4) Epoch 23, batch 5700, loss[loss=0.2288, simple_loss=0.3082, pruned_loss=0.07466, over 8644.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2867, pruned_loss=0.06104, over 1608293.79 frames. ], batch size: 39, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:50,330 INFO [train.py:901] (0/4) Epoch 23, batch 5750, loss[loss=0.2075, simple_loss=0.2919, pruned_loss=0.06155, over 8590.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2856, pruned_loss=0.06136, over 1605401.87 frames. ], batch size: 39, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:57,138 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 05:52:00,834 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183589.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:01,859 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-07 05:52:05,028 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:05,469 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.334e+02 3.030e+02 3.740e+02 1.347e+03, threshold=6.060e+02, percent-clipped=7.0 +2023-02-07 05:52:18,045 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:22,120 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183620.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:25,346 INFO [train.py:901] (0/4) Epoch 23, batch 5800, loss[loss=0.1903, simple_loss=0.2784, pruned_loss=0.05112, over 8523.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2851, pruned_loss=0.06026, over 1609914.65 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:52:30,168 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:51,758 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3894, 1.3097, 4.6265, 1.7407, 4.1346, 3.8245, 4.1790, 4.0713], + device='cuda:0'), covar=tensor([0.0565, 0.4801, 0.0471, 0.3963, 0.0989, 0.0961, 0.0523, 0.0660], + device='cuda:0'), in_proj_covar=tensor([0.0642, 0.0650, 0.0704, 0.0636, 0.0713, 0.0614, 0.0610, 0.0689], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:53:01,086 INFO [train.py:901] (0/4) Epoch 23, batch 5850, loss[loss=0.1666, simple_loss=0.2514, pruned_loss=0.04087, over 7657.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2843, pruned_loss=0.05976, over 1607175.88 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:16,202 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 2.367e+02 2.798e+02 3.640e+02 5.951e+02, threshold=5.597e+02, percent-clipped=0.0 +2023-02-07 05:53:36,768 INFO [train.py:901] (0/4) Epoch 23, batch 5900, loss[loss=0.215, simple_loss=0.2962, pruned_loss=0.06691, over 8524.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05923, over 1610779.33 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:38,958 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6959, 1.4648, 2.8923, 1.4276, 2.3184, 3.0854, 3.2435, 2.6023], + device='cuda:0'), covar=tensor([0.1144, 0.1613, 0.0351, 0.1996, 0.0761, 0.0302, 0.0635, 0.0585], + device='cuda:0'), in_proj_covar=tensor([0.0294, 0.0319, 0.0285, 0.0313, 0.0311, 0.0267, 0.0422, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:53:38,995 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7397, 1.8784, 1.6336, 2.3724, 1.0332, 1.4977, 1.7318, 1.9067], + device='cuda:0'), covar=tensor([0.0746, 0.0756, 0.0942, 0.0412, 0.1002, 0.1341, 0.0693, 0.0690], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0197, 0.0245, 0.0214, 0.0206, 0.0246, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 05:53:59,044 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 05:54:08,420 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183770.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:54:11,756 INFO [train.py:901] (0/4) Epoch 23, batch 5950, loss[loss=0.2166, simple_loss=0.3019, pruned_loss=0.06567, over 8021.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05971, over 1610440.61 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:23,697 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2002, 4.1755, 3.8160, 1.9479, 3.7377, 3.7577, 3.7879, 3.5736], + device='cuda:0'), covar=tensor([0.0889, 0.0635, 0.1183, 0.4312, 0.1001, 0.1147, 0.1368, 0.0854], + device='cuda:0'), in_proj_covar=tensor([0.0524, 0.0441, 0.0426, 0.0536, 0.0427, 0.0443, 0.0427, 0.0384], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:54:27,017 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 2.784e+02 3.423e+02 5.836e+02, threshold=5.567e+02, percent-clipped=2.0 +2023-02-07 05:54:38,314 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4250, 1.2940, 2.3821, 1.2593, 2.1063, 2.5292, 2.6973, 2.1470], + device='cuda:0'), covar=tensor([0.1134, 0.1474, 0.0459, 0.2091, 0.0788, 0.0417, 0.0695, 0.0671], + device='cuda:0'), in_proj_covar=tensor([0.0293, 0.0317, 0.0283, 0.0311, 0.0308, 0.0265, 0.0420, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:54:43,514 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5855, 4.6311, 4.1449, 2.1406, 4.1330, 4.1732, 4.2071, 4.0361], + device='cuda:0'), covar=tensor([0.0728, 0.0483, 0.1092, 0.4492, 0.0808, 0.0971, 0.1179, 0.0764], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0443, 0.0428, 0.0539, 0.0429, 0.0445, 0.0428, 0.0385], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:54:47,613 INFO [train.py:901] (0/4) Epoch 23, batch 6000, loss[loss=0.192, simple_loss=0.2727, pruned_loss=0.05571, over 8023.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2858, pruned_loss=0.06005, over 1613221.31 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:47,614 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 05:55:00,695 INFO [train.py:935] (0/4) Epoch 23, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03597, over 944034.00 frames. +2023-02-07 05:55:00,697 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 05:55:19,887 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-07 05:55:25,805 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:27,316 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7435, 2.5034, 3.2011, 2.6500, 3.0884, 2.5992, 2.5524, 2.4062], + device='cuda:0'), covar=tensor([0.3855, 0.3960, 0.1676, 0.3059, 0.1965, 0.2611, 0.1446, 0.3727], + device='cuda:0'), in_proj_covar=tensor([0.0945, 0.0990, 0.0809, 0.0953, 0.0998, 0.0900, 0.0751, 0.0829], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 05:55:36,138 INFO [train.py:901] (0/4) Epoch 23, batch 6050, loss[loss=0.2034, simple_loss=0.2646, pruned_loss=0.0711, over 7537.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.286, pruned_loss=0.06036, over 1613122.10 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:55:43,231 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:50,620 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.465e+02 3.097e+02 3.782e+02 8.398e+02, threshold=6.194e+02, percent-clipped=6.0 +2023-02-07 05:56:11,858 INFO [train.py:901] (0/4) Epoch 23, batch 6100, loss[loss=0.2142, simple_loss=0.2954, pruned_loss=0.06654, over 8625.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.0615, over 1617177.12 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:21,661 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 05:56:23,562 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4044, 4.3821, 4.0046, 2.2802, 3.9162, 3.9683, 3.9452, 3.9051], + device='cuda:0'), covar=tensor([0.0714, 0.0517, 0.0961, 0.3920, 0.0846, 0.1046, 0.1292, 0.0748], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0443, 0.0431, 0.0539, 0.0430, 0.0446, 0.0428, 0.0386], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:56:32,486 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 05:56:47,362 INFO [train.py:901] (0/4) Epoch 23, batch 6150, loss[loss=0.1973, simple_loss=0.2809, pruned_loss=0.05683, over 7921.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.287, pruned_loss=0.06124, over 1614633.14 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:48,168 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:56:51,714 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3364, 1.7780, 3.3911, 1.5887, 2.3848, 3.7467, 3.8666, 3.1409], + device='cuda:0'), covar=tensor([0.0942, 0.1581, 0.0311, 0.2158, 0.1014, 0.0232, 0.0491, 0.0551], + device='cuda:0'), in_proj_covar=tensor([0.0293, 0.0317, 0.0282, 0.0312, 0.0308, 0.0265, 0.0419, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 05:56:59,930 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6356, 1.3985, 4.8834, 1.8242, 4.2952, 4.0391, 4.3755, 4.3011], + device='cuda:0'), covar=tensor([0.0535, 0.5087, 0.0455, 0.4287, 0.1082, 0.1006, 0.0530, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0646, 0.0654, 0.0708, 0.0643, 0.0723, 0.0619, 0.0618, 0.0693], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:57:01,785 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.512e+02 2.876e+02 3.577e+02 6.799e+02, threshold=5.752e+02, percent-clipped=2.0 +2023-02-07 05:57:04,783 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-184000.pt +2023-02-07 05:57:22,990 INFO [train.py:901] (0/4) Epoch 23, batch 6200, loss[loss=0.2159, simple_loss=0.3017, pruned_loss=0.06507, over 8104.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2868, pruned_loss=0.06072, over 1614461.50 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:57:23,225 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4023, 2.3276, 1.7384, 2.2562, 2.0939, 1.4793, 1.9626, 1.9451], + device='cuda:0'), covar=tensor([0.1485, 0.0415, 0.1280, 0.0562, 0.0670, 0.1636, 0.0944, 0.0943], + device='cuda:0'), in_proj_covar=tensor([0.0351, 0.0231, 0.0332, 0.0306, 0.0298, 0.0338, 0.0341, 0.0315], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 05:57:54,990 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:57:59,624 INFO [train.py:901] (0/4) Epoch 23, batch 6250, loss[loss=0.2187, simple_loss=0.2965, pruned_loss=0.07042, over 8481.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.0611, over 1617181.39 frames. ], batch size: 25, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:03,995 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6359, 1.9843, 2.1564, 1.8944, 1.4388, 1.9733, 2.3947, 2.1867], + device='cuda:0'), covar=tensor([0.0483, 0.1074, 0.1436, 0.1290, 0.0590, 0.1257, 0.0591, 0.0545], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 05:58:06,669 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5925, 2.0672, 3.2764, 1.4199, 2.4574, 2.0321, 1.6778, 2.4264], + device='cuda:0'), covar=tensor([0.1908, 0.2551, 0.0935, 0.4557, 0.1918, 0.3155, 0.2371, 0.2468], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0611, 0.0554, 0.0647, 0.0647, 0.0594, 0.0542, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 05:58:11,438 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:58:14,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.340e+02 2.866e+02 3.425e+02 5.984e+02, threshold=5.731e+02, percent-clipped=3.0 +2023-02-07 05:58:34,478 INFO [train.py:901] (0/4) Epoch 23, batch 6300, loss[loss=0.1712, simple_loss=0.2644, pruned_loss=0.03899, over 7424.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2865, pruned_loss=0.06054, over 1617752.62 frames. ], batch size: 17, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:42,583 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.53 vs. limit=5.0 +2023-02-07 05:58:45,746 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:04,542 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:10,617 INFO [train.py:901] (0/4) Epoch 23, batch 6350, loss[loss=0.2102, simple_loss=0.2933, pruned_loss=0.06352, over 8564.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06005, over 1615471.96 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 05:59:25,787 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.298e+02 2.703e+02 3.593e+02 9.198e+02, threshold=5.406e+02, percent-clipped=6.0 +2023-02-07 05:59:32,345 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184204.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:46,837 INFO [train.py:901] (0/4) Epoch 23, batch 6400, loss[loss=0.2058, simple_loss=0.2903, pruned_loss=0.06065, over 8471.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2848, pruned_loss=0.05994, over 1613640.53 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:22,069 INFO [train.py:901] (0/4) Epoch 23, batch 6450, loss[loss=0.1881, simple_loss=0.2784, pruned_loss=0.04897, over 8328.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2852, pruned_loss=0.06038, over 1614947.03 frames. ], batch size: 26, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:31,212 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2486, 1.0854, 2.4363, 1.0531, 1.9465, 1.9424, 2.1826, 2.1798], + device='cuda:0'), covar=tensor([0.1723, 0.4914, 0.1888, 0.5129, 0.2400, 0.1962, 0.1412, 0.1479], + device='cuda:0'), in_proj_covar=tensor([0.0642, 0.0649, 0.0706, 0.0640, 0.0716, 0.0616, 0.0616, 0.0691], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:00:37,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.429e+02 3.055e+02 3.904e+02 7.071e+02, threshold=6.109e+02, percent-clipped=5.0 +2023-02-07 06:00:54,561 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:00:58,476 INFO [train.py:901] (0/4) Epoch 23, batch 6500, loss[loss=0.199, simple_loss=0.2901, pruned_loss=0.05393, over 8031.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2852, pruned_loss=0.06087, over 1611353.31 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:01,453 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.62 vs. limit=5.0 +2023-02-07 06:01:13,631 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:30,730 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:32,653 INFO [train.py:901] (0/4) Epoch 23, batch 6550, loss[loss=0.2016, simple_loss=0.2805, pruned_loss=0.06131, over 8513.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2868, pruned_loss=0.06141, over 1616254.33 frames. ], batch size: 28, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:34,221 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:48,090 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.251e+02 2.720e+02 3.518e+02 7.175e+02, threshold=5.440e+02, percent-clipped=6.0 +2023-02-07 06:01:51,710 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 06:02:00,063 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:02:07,252 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6507, 1.8881, 2.0436, 1.3039, 2.1974, 1.4189, 0.6712, 1.9025], + device='cuda:0'), covar=tensor([0.0638, 0.0404, 0.0328, 0.0649, 0.0400, 0.0918, 0.0938, 0.0327], + device='cuda:0'), in_proj_covar=tensor([0.0455, 0.0396, 0.0349, 0.0448, 0.0381, 0.0536, 0.0392, 0.0425], + device='cuda:0'), out_proj_covar=tensor([1.2144e-04, 1.0353e-04, 9.1555e-05, 1.1783e-04, 1.0023e-04, 1.5082e-04, + 1.0563e-04, 1.1218e-04], device='cuda:0') +2023-02-07 06:02:09,832 INFO [train.py:901] (0/4) Epoch 23, batch 6600, loss[loss=0.2611, simple_loss=0.318, pruned_loss=0.1021, over 7164.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2865, pruned_loss=0.0615, over 1610216.01 frames. ], batch size: 71, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:09,849 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 06:02:20,762 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 06:02:42,925 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-07 06:02:45,212 INFO [train.py:901] (0/4) Epoch 23, batch 6650, loss[loss=0.1859, simple_loss=0.2801, pruned_loss=0.04583, over 8456.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.287, pruned_loss=0.06139, over 1608441.67 frames. ], batch size: 27, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:52,258 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0223, 1.7062, 3.3011, 1.4968, 2.4784, 3.6092, 3.7307, 3.0449], + device='cuda:0'), covar=tensor([0.1169, 0.1701, 0.0323, 0.2182, 0.0974, 0.0249, 0.0532, 0.0571], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0320, 0.0286, 0.0315, 0.0313, 0.0269, 0.0425, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:03:00,392 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.187e+02 2.636e+02 3.150e+02 7.164e+02, threshold=5.273e+02, percent-clipped=1.0 +2023-02-07 06:03:06,012 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:21,176 INFO [train.py:901] (0/4) Epoch 23, batch 6700, loss[loss=0.2266, simple_loss=0.3241, pruned_loss=0.06449, over 8458.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2867, pruned_loss=0.06087, over 1606026.04 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:22,771 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:56,987 INFO [train.py:901] (0/4) Epoch 23, batch 6750, loss[loss=0.1846, simple_loss=0.2705, pruned_loss=0.04941, over 7815.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2873, pruned_loss=0.06125, over 1608919.60 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:57,247 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184575.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:11,517 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.299e+02 2.705e+02 3.689e+02 1.087e+03, threshold=5.410e+02, percent-clipped=6.0 +2023-02-07 06:04:14,468 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:30,935 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 06:04:32,267 INFO [train.py:901] (0/4) Epoch 23, batch 6800, loss[loss=0.28, simple_loss=0.3583, pruned_loss=0.1009, over 8596.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2875, pruned_loss=0.06159, over 1612866.41 frames. ], batch size: 31, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:04:55,640 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 06:05:08,929 INFO [train.py:901] (0/4) Epoch 23, batch 6850, loss[loss=0.1815, simple_loss=0.2578, pruned_loss=0.05263, over 7435.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2871, pruned_loss=0.06091, over 1614831.38 frames. ], batch size: 17, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:05:19,309 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 06:05:23,518 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.645e+02 3.100e+02 4.179e+02 7.238e+02, threshold=6.201e+02, percent-clipped=8.0 +2023-02-07 06:05:40,681 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184721.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:05:43,468 INFO [train.py:901] (0/4) Epoch 23, batch 6900, loss[loss=0.2303, simple_loss=0.3023, pruned_loss=0.07913, over 8611.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.288, pruned_loss=0.06159, over 1617201.09 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:21,325 INFO [train.py:901] (0/4) Epoch 23, batch 6950, loss[loss=0.2095, simple_loss=0.2975, pruned_loss=0.06072, over 8614.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2877, pruned_loss=0.06124, over 1616943.88 frames. ], batch size: 31, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:27,153 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:29,056 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 06:06:35,999 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.334e+02 2.863e+02 3.573e+02 6.345e+02, threshold=5.727e+02, percent-clipped=1.0 +2023-02-07 06:06:44,518 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184808.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:56,210 INFO [train.py:901] (0/4) Epoch 23, batch 7000, loss[loss=0.2084, simple_loss=0.2823, pruned_loss=0.06728, over 8508.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2866, pruned_loss=0.06022, over 1621854.07 frames. ], batch size: 26, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:03,959 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:12,026 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:32,118 INFO [train.py:901] (0/4) Epoch 23, batch 7050, loss[loss=0.202, simple_loss=0.2786, pruned_loss=0.06269, over 8030.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2866, pruned_loss=0.0601, over 1620645.51 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:38,582 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:48,062 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.247e+02 2.854e+02 3.580e+02 1.056e+03, threshold=5.709e+02, percent-clipped=4.0 +2023-02-07 06:07:53,399 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.26 vs. limit=5.0 +2023-02-07 06:08:08,171 INFO [train.py:901] (0/4) Epoch 23, batch 7100, loss[loss=0.2139, simple_loss=0.3054, pruned_loss=0.06115, over 8361.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2866, pruned_loss=0.0598, over 1619296.08 frames. ], batch size: 48, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:30,154 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184957.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:34,366 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:42,476 INFO [train.py:901] (0/4) Epoch 23, batch 7150, loss[loss=0.2129, simple_loss=0.2971, pruned_loss=0.06438, over 8337.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.287, pruned_loss=0.06032, over 1619156.87 frames. ], batch size: 26, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:53,466 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4870, 1.3495, 1.5875, 1.4362, 1.5458, 1.5081, 1.3799, 0.7882], + device='cuda:0'), covar=tensor([0.3897, 0.3451, 0.1563, 0.2526, 0.1775, 0.2296, 0.1505, 0.3797], + device='cuda:0'), in_proj_covar=tensor([0.0942, 0.0991, 0.0806, 0.0950, 0.0997, 0.0896, 0.0753, 0.0829], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:08:58,822 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.323e+02 2.664e+02 3.243e+02 7.163e+02, threshold=5.329e+02, percent-clipped=2.0 +2023-02-07 06:09:20,371 INFO [train.py:901] (0/4) Epoch 23, batch 7200, loss[loss=0.2388, simple_loss=0.3196, pruned_loss=0.07893, over 8101.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2867, pruned_loss=0.06014, over 1619352.59 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:29,421 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0559, 1.6536, 1.6880, 1.5492, 0.9128, 1.4848, 1.7651, 1.5353], + device='cuda:0'), covar=tensor([0.0512, 0.1204, 0.1700, 0.1412, 0.0624, 0.1472, 0.0707, 0.0657], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:09:35,027 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9391, 1.5918, 3.5415, 1.6510, 2.4619, 3.9367, 4.0099, 3.4237], + device='cuda:0'), covar=tensor([0.1173, 0.1790, 0.0298, 0.1841, 0.1062, 0.0212, 0.0486, 0.0480], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0323, 0.0287, 0.0315, 0.0314, 0.0271, 0.0426, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:09:35,757 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8800, 2.1974, 3.6329, 1.7770, 1.7112, 3.5096, 0.7310, 2.1518], + device='cuda:0'), covar=tensor([0.1328, 0.1263, 0.0219, 0.1890, 0.2640, 0.0306, 0.2225, 0.1426], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0195, 0.0129, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 06:09:44,864 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5420, 1.8082, 2.1945, 1.6949, 0.9091, 1.8638, 1.9619, 1.8553], + device='cuda:0'), covar=tensor([0.0468, 0.1177, 0.1530, 0.1371, 0.0597, 0.1368, 0.0651, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:09:44,903 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8432, 2.2062, 3.6778, 1.8343, 1.6274, 3.5894, 0.5847, 2.1633], + device='cuda:0'), covar=tensor([0.1524, 0.1345, 0.0249, 0.1942, 0.2826, 0.0363, 0.2488, 0.1525], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0196, 0.0129, 0.0220, 0.0268, 0.0136, 0.0169, 0.0191], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 06:09:54,643 INFO [train.py:901] (0/4) Epoch 23, batch 7250, loss[loss=0.215, simple_loss=0.2953, pruned_loss=0.06734, over 8119.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2863, pruned_loss=0.06022, over 1614996.06 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:56,879 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:06,361 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:09,712 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.341e+02 2.701e+02 3.625e+02 6.528e+02, threshold=5.401e+02, percent-clipped=8.0 +2023-02-07 06:10:24,999 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:30,388 INFO [train.py:901] (0/4) Epoch 23, batch 7300, loss[loss=0.2011, simple_loss=0.2902, pruned_loss=0.05596, over 8571.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2876, pruned_loss=0.06142, over 1616814.76 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:10:45,748 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9137, 1.6574, 1.9801, 1.8153, 1.9093, 1.9547, 1.7649, 0.8393], + device='cuda:0'), covar=tensor([0.5145, 0.4417, 0.2016, 0.3168, 0.2111, 0.2782, 0.1901, 0.4531], + device='cuda:0'), in_proj_covar=tensor([0.0939, 0.0986, 0.0805, 0.0948, 0.0995, 0.0896, 0.0751, 0.0826], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:10:46,963 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:04,728 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.19 vs. limit=5.0 +2023-02-07 06:11:06,510 INFO [train.py:901] (0/4) Epoch 23, batch 7350, loss[loss=0.1863, simple_loss=0.2835, pruned_loss=0.04455, over 8288.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2881, pruned_loss=0.06134, over 1618859.36 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:11:19,726 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 06:11:21,063 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.380e+02 2.863e+02 3.556e+02 7.708e+02, threshold=5.726e+02, percent-clipped=6.0 +2023-02-07 06:11:38,654 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7672, 1.7142, 2.4398, 1.5600, 1.2975, 2.4017, 0.5465, 1.4651], + device='cuda:0'), covar=tensor([0.1385, 0.1100, 0.0276, 0.1134, 0.2432, 0.0309, 0.2024, 0.1261], + device='cuda:0'), in_proj_covar=tensor([0.0189, 0.0195, 0.0128, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 06:11:38,659 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:41,240 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 06:11:42,634 INFO [train.py:901] (0/4) Epoch 23, batch 7400, loss[loss=0.2345, simple_loss=0.3066, pruned_loss=0.08119, over 8596.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2872, pruned_loss=0.0612, over 1609010.07 frames. ], batch size: 34, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:11:44,779 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185228.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:48,318 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0324, 1.8293, 3.3245, 1.4821, 2.3130, 3.6235, 3.7381, 3.0398], + device='cuda:0'), covar=tensor([0.1163, 0.1602, 0.0330, 0.2150, 0.1073, 0.0236, 0.0565, 0.0563], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0322, 0.0287, 0.0315, 0.0314, 0.0269, 0.0424, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:11:56,710 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:04,425 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 06:12:18,696 INFO [train.py:901] (0/4) Epoch 23, batch 7450, loss[loss=0.2327, simple_loss=0.3203, pruned_loss=0.07254, over 8466.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2867, pruned_loss=0.0611, over 1614929.83 frames. ], batch size: 29, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:12:18,962 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5783, 1.9385, 2.8123, 1.3962, 2.1586, 1.9212, 1.6512, 2.1809], + device='cuda:0'), covar=tensor([0.1977, 0.2635, 0.1029, 0.4751, 0.2007, 0.3316, 0.2492, 0.2338], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0611, 0.0554, 0.0646, 0.0647, 0.0595, 0.0541, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:12:21,569 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 06:12:33,471 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.310e+02 2.954e+02 3.827e+02 6.869e+02, threshold=5.908e+02, percent-clipped=4.0 +2023-02-07 06:12:37,081 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:53,821 INFO [train.py:901] (0/4) Epoch 23, batch 7500, loss[loss=0.2079, simple_loss=0.2883, pruned_loss=0.06374, over 8282.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2861, pruned_loss=0.06115, over 1613335.97 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:05,840 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 06:13:08,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185343.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:13:22,311 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4741, 2.8910, 2.1893, 4.0080, 1.5068, 2.0431, 2.4543, 2.7881], + device='cuda:0'), covar=tensor([0.0679, 0.0800, 0.0879, 0.0260, 0.1178, 0.1241, 0.0857, 0.0760], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0199, 0.0244, 0.0214, 0.0206, 0.0247, 0.0250, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 06:13:31,746 INFO [train.py:901] (0/4) Epoch 23, batch 7550, loss[loss=0.1968, simple_loss=0.2785, pruned_loss=0.05753, over 7936.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2856, pruned_loss=0.06094, over 1613724.70 frames. ], batch size: 20, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:39,223 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 06:13:46,072 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.461e+02 3.059e+02 3.860e+02 7.244e+02, threshold=6.118e+02, percent-clipped=3.0 +2023-02-07 06:14:00,499 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:04,572 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:06,601 INFO [train.py:901] (0/4) Epoch 23, batch 7600, loss[loss=0.1847, simple_loss=0.2543, pruned_loss=0.05759, over 7234.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2847, pruned_loss=0.06038, over 1612556.04 frames. ], batch size: 16, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:09,580 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 06:14:41,964 INFO [train.py:901] (0/4) Epoch 23, batch 7650, loss[loss=0.2126, simple_loss=0.3051, pruned_loss=0.06005, over 8187.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2851, pruned_loss=0.06097, over 1612109.51 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:50,191 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:54,391 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:57,818 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.478e+02 3.100e+02 3.999e+02 8.387e+02, threshold=6.200e+02, percent-clipped=6.0 +2023-02-07 06:15:17,441 INFO [train.py:901] (0/4) Epoch 23, batch 7700, loss[loss=0.2108, simple_loss=0.2896, pruned_loss=0.06605, over 8075.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2859, pruned_loss=0.06097, over 1612609.95 frames. ], batch size: 21, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:15:25,724 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:15:37,342 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 06:15:53,097 INFO [train.py:901] (0/4) Epoch 23, batch 7750, loss[loss=0.2188, simple_loss=0.2923, pruned_loss=0.07264, over 7792.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2853, pruned_loss=0.06067, over 1609109.49 frames. ], batch size: 19, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:16:08,169 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.336e+02 2.905e+02 3.607e+02 6.527e+02, threshold=5.810e+02, percent-clipped=2.0 +2023-02-07 06:16:10,495 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:15,291 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,831 INFO [train.py:901] (0/4) Epoch 23, batch 7800, loss[loss=0.2014, simple_loss=0.29, pruned_loss=0.05635, over 8475.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2863, pruned_loss=0.06128, over 1607015.41 frames. ], batch size: 25, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:01,066 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185672.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:02,911 INFO [train.py:901] (0/4) Epoch 23, batch 7850, loss[loss=0.2129, simple_loss=0.2984, pruned_loss=0.06369, over 8555.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2847, pruned_loss=0.06041, over 1607954.73 frames. ], batch size: 49, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:17,281 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 2.983e+02 3.607e+02 9.941e+02, threshold=5.966e+02, percent-clipped=5.0 +2023-02-07 06:17:18,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185697.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:30,684 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.28 vs. limit=5.0 +2023-02-07 06:17:36,388 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 06:17:37,222 INFO [train.py:901] (0/4) Epoch 23, batch 7900, loss[loss=0.2125, simple_loss=0.2945, pruned_loss=0.06523, over 8627.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2848, pruned_loss=0.06054, over 1601890.02 frames. ], batch size: 34, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:05,868 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5441, 1.7658, 1.9096, 1.2221, 1.9590, 1.4061, 0.4781, 1.7560], + device='cuda:0'), covar=tensor([0.0479, 0.0327, 0.0289, 0.0487, 0.0365, 0.0827, 0.0785, 0.0266], + device='cuda:0'), in_proj_covar=tensor([0.0461, 0.0401, 0.0354, 0.0454, 0.0386, 0.0544, 0.0398, 0.0430], + device='cuda:0'), out_proj_covar=tensor([1.2310e-04, 1.0477e-04, 9.3033e-05, 1.1932e-04, 1.0143e-04, 1.5319e-04, + 1.0712e-04, 1.1351e-04], device='cuda:0') +2023-02-07 06:18:11,090 INFO [train.py:901] (0/4) Epoch 23, batch 7950, loss[loss=0.1748, simple_loss=0.2547, pruned_loss=0.04749, over 7812.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2843, pruned_loss=0.06007, over 1603133.75 frames. ], batch size: 20, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:12,591 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185777.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:23,319 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:25,078 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.254e+02 2.775e+02 3.427e+02 8.244e+02, threshold=5.550e+02, percent-clipped=2.0 +2023-02-07 06:18:39,370 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185817.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:18:40,117 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:44,667 INFO [train.py:901] (0/4) Epoch 23, batch 8000, loss[loss=0.2622, simple_loss=0.3337, pruned_loss=0.0953, over 8670.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2862, pruned_loss=0.06109, over 1605593.96 frames. ], batch size: 39, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:48,036 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:09,864 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:18,260 INFO [train.py:901] (0/4) Epoch 23, batch 8050, loss[loss=0.1792, simple_loss=0.2538, pruned_loss=0.05233, over 7529.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2841, pruned_loss=0.06082, over 1589089.33 frames. ], batch size: 18, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:19:26,746 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:32,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.662e+02 3.318e+02 4.159e+02 9.358e+02, threshold=6.635e+02, percent-clipped=7.0 +2023-02-07 06:19:41,826 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-23.pt +2023-02-07 06:19:53,796 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 06:19:57,770 INFO [train.py:901] (0/4) Epoch 24, batch 0, loss[loss=0.1683, simple_loss=0.2475, pruned_loss=0.04454, over 7238.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2475, pruned_loss=0.04454, over 7238.00 frames. ], batch size: 16, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:19:57,771 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 06:20:01,566 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5874, 1.3106, 1.5603, 1.2867, 0.9029, 1.3291, 1.5476, 1.2196], + device='cuda:0'), covar=tensor([0.0647, 0.1395, 0.1778, 0.1557, 0.0639, 0.1603, 0.0721, 0.0735], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:20:09,065 INFO [train.py:935] (0/4) Epoch 24, validation: loss=0.1731, simple_loss=0.2733, pruned_loss=0.03644, over 944034.00 frames. +2023-02-07 06:20:09,066 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 06:20:22,818 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7259, 2.0177, 3.3096, 1.5522, 2.6104, 2.1185, 1.7762, 2.7111], + device='cuda:0'), covar=tensor([0.1995, 0.2815, 0.0795, 0.4463, 0.1811, 0.3084, 0.2388, 0.2125], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0610, 0.0551, 0.0645, 0.0647, 0.0594, 0.0542, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:20:23,944 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 06:20:35,528 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:20:44,032 INFO [train.py:901] (0/4) Epoch 24, batch 50, loss[loss=0.1775, simple_loss=0.264, pruned_loss=0.04556, over 8519.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06094, over 365468.98 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:20:57,554 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 06:21:11,394 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.437e+02 2.851e+02 3.663e+02 1.155e+03, threshold=5.702e+02, percent-clipped=3.0 +2023-02-07 06:21:14,405 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-186000.pt +2023-02-07 06:21:16,891 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2951, 2.8774, 2.1488, 4.0976, 1.6841, 1.7889, 2.3607, 2.9044], + device='cuda:0'), covar=tensor([0.0747, 0.0702, 0.0888, 0.0291, 0.1046, 0.1425, 0.1038, 0.0760], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0197, 0.0243, 0.0214, 0.0206, 0.0247, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 06:21:20,546 INFO [train.py:901] (0/4) Epoch 24, batch 100, loss[loss=0.2291, simple_loss=0.3067, pruned_loss=0.07571, over 8478.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2891, pruned_loss=0.06103, over 648178.04 frames. ], batch size: 28, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:21:22,589 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 06:21:44,089 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 06:21:56,095 INFO [train.py:901] (0/4) Epoch 24, batch 150, loss[loss=0.1809, simple_loss=0.2663, pruned_loss=0.04779, over 8088.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.061, over 860587.84 frames. ], batch size: 21, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:21,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.384e+02 2.880e+02 3.401e+02 7.597e+02, threshold=5.761e+02, percent-clipped=1.0 +2023-02-07 06:22:30,263 INFO [train.py:901] (0/4) Epoch 24, batch 200, loss[loss=0.174, simple_loss=0.2526, pruned_loss=0.04766, over 7814.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2859, pruned_loss=0.0601, over 1025636.82 frames. ], batch size: 20, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:38,134 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:22:40,016 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:22:43,726 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-07 06:23:05,568 INFO [train.py:901] (0/4) Epoch 24, batch 250, loss[loss=0.2241, simple_loss=0.312, pruned_loss=0.06815, over 8500.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06013, over 1155636.54 frames. ], batch size: 28, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:07,692 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186161.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:23:16,539 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 06:23:18,798 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186176.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:25,610 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 06:23:32,338 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.402e+02 3.098e+02 3.972e+02 8.418e+02, threshold=6.197e+02, percent-clipped=5.0 +2023-02-07 06:23:36,036 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186201.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:40,507 INFO [train.py:901] (0/4) Epoch 24, batch 300, loss[loss=0.1845, simple_loss=0.2523, pruned_loss=0.05828, over 7424.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2889, pruned_loss=0.06225, over 1258570.14 frames. ], batch size: 17, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:49,145 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 06:23:50,381 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 06:23:52,985 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:15,085 INFO [train.py:901] (0/4) Epoch 24, batch 350, loss[loss=0.1962, simple_loss=0.2773, pruned_loss=0.05761, over 7659.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06223, over 1335930.90 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:24:28,126 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186276.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:24:28,699 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:42,189 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.428e+02 2.971e+02 3.348e+02 5.777e+02, threshold=5.941e+02, percent-clipped=0.0 +2023-02-07 06:24:47,223 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 06:24:50,342 INFO [train.py:901] (0/4) Epoch 24, batch 400, loss[loss=0.1909, simple_loss=0.2844, pruned_loss=0.04865, over 8280.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2894, pruned_loss=0.06227, over 1396652.91 frames. ], batch size: 23, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:26,090 INFO [train.py:901] (0/4) Epoch 24, batch 450, loss[loss=0.1804, simple_loss=0.2534, pruned_loss=0.05372, over 7776.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.06205, over 1441974.09 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:52,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.487e+02 2.919e+02 3.580e+02 7.824e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-07 06:26:02,009 INFO [train.py:901] (0/4) Epoch 24, batch 500, loss[loss=0.1818, simple_loss=0.2607, pruned_loss=0.05145, over 7975.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.288, pruned_loss=0.06169, over 1485208.97 frames. ], batch size: 21, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:11,160 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5277, 2.7273, 1.9484, 2.3099, 2.0835, 1.7451, 2.1648, 2.2577], + device='cuda:0'), covar=tensor([0.1516, 0.0389, 0.1253, 0.0648, 0.0782, 0.1457, 0.0982, 0.0919], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0236, 0.0338, 0.0312, 0.0304, 0.0343, 0.0350, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 06:26:23,333 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:37,814 INFO [train.py:901] (0/4) Epoch 24, batch 550, loss[loss=0.1836, simple_loss=0.2688, pruned_loss=0.04916, over 7806.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2858, pruned_loss=0.0604, over 1508615.92 frames. ], batch size: 20, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:40,765 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:42,861 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6065, 1.6228, 2.2927, 1.5040, 1.1767, 2.2309, 0.5105, 1.3024], + device='cuda:0'), covar=tensor([0.1848, 0.1423, 0.0361, 0.1194, 0.2735, 0.0389, 0.1780, 0.1447], + device='cuda:0'), in_proj_covar=tensor([0.0190, 0.0196, 0.0129, 0.0217, 0.0268, 0.0135, 0.0168, 0.0190], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 06:27:01,103 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:01,700 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:03,579 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.350e+02 3.005e+02 3.846e+02 7.955e+02, threshold=6.011e+02, percent-clipped=1.0 +2023-02-07 06:27:05,957 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1575, 1.8980, 2.4575, 2.0184, 2.4281, 2.1887, 1.9639, 1.1746], + device='cuda:0'), covar=tensor([0.5784, 0.5148, 0.2094, 0.4019, 0.2666, 0.3313, 0.2051, 0.5617], + device='cuda:0'), in_proj_covar=tensor([0.0943, 0.0986, 0.0806, 0.0950, 0.0994, 0.0896, 0.0752, 0.0824], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:27:12,584 INFO [train.py:901] (0/4) Epoch 24, batch 600, loss[loss=0.1934, simple_loss=0.2845, pruned_loss=0.05114, over 8246.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2852, pruned_loss=0.06003, over 1530797.23 frames. ], batch size: 24, lr: 3.17e-03, grad_scale: 16.0 +2023-02-07 06:27:19,616 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:19,986 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 06:27:21,524 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186520.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:24,467 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 06:27:26,213 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 06:27:29,777 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186532.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:31,735 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:31,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7781, 2.3244, 4.0848, 1.5502, 3.0700, 2.3058, 1.9815, 2.9387], + device='cuda:0'), covar=tensor([0.1935, 0.2629, 0.0927, 0.4643, 0.1808, 0.3180, 0.2259, 0.2526], + device='cuda:0'), in_proj_covar=tensor([0.0523, 0.0610, 0.0552, 0.0645, 0.0648, 0.0593, 0.0541, 0.0631], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:27:42,704 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 06:27:46,456 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186557.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:46,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186557.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 06:27:47,019 INFO [train.py:901] (0/4) Epoch 24, batch 650, loss[loss=0.2342, simple_loss=0.32, pruned_loss=0.0742, over 8444.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2865, pruned_loss=0.06048, over 1551985.89 frames. ], batch size: 29, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:27:49,556 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 06:28:01,232 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:07,489 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:15,660 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.377e+02 2.753e+02 3.513e+02 8.271e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 06:28:20,740 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:23,403 INFO [train.py:901] (0/4) Epoch 24, batch 700, loss[loss=0.2372, simple_loss=0.3146, pruned_loss=0.07987, over 7082.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.06039, over 1565456.66 frames. ], batch size: 71, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:28:33,205 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186621.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:43,729 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186635.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:59,664 INFO [train.py:901] (0/4) Epoch 24, batch 750, loss[loss=0.1896, simple_loss=0.268, pruned_loss=0.05557, over 8151.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06014, over 1576374.96 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:11,917 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 06:29:21,535 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 06:29:27,091 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.532e+02 3.077e+02 4.008e+02 9.294e+02, threshold=6.153e+02, percent-clipped=8.0 +2023-02-07 06:29:35,751 INFO [train.py:901] (0/4) Epoch 24, batch 800, loss[loss=0.2146, simple_loss=0.3048, pruned_loss=0.0622, over 8328.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2855, pruned_loss=0.0599, over 1583887.58 frames. ], batch size: 25, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:48,922 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:29:48,941 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9619, 1.4908, 3.3788, 1.5760, 2.3499, 3.7079, 3.8236, 3.1696], + device='cuda:0'), covar=tensor([0.1189, 0.1879, 0.0335, 0.2009, 0.1164, 0.0231, 0.0517, 0.0532], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0321, 0.0285, 0.0314, 0.0312, 0.0268, 0.0424, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:29:56,169 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:29:56,181 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6541, 1.9065, 1.6754, 2.2867, 0.9877, 1.5005, 1.7374, 1.8680], + device='cuda:0'), covar=tensor([0.0781, 0.0696, 0.0891, 0.0426, 0.1092, 0.1300, 0.0715, 0.0677], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0195, 0.0242, 0.0213, 0.0204, 0.0244, 0.0248, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 06:30:11,911 INFO [train.py:901] (0/4) Epoch 24, batch 850, loss[loss=0.1757, simple_loss=0.2613, pruned_loss=0.04503, over 7929.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2855, pruned_loss=0.0602, over 1589253.26 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:30:29,490 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:39,059 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.330e+02 2.764e+02 3.350e+02 7.186e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 06:30:43,835 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-07 06:30:47,651 INFO [train.py:901] (0/4) Epoch 24, batch 900, loss[loss=0.2075, simple_loss=0.2989, pruned_loss=0.05811, over 7974.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.0594, over 1592899.91 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:05,955 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:08,567 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:24,270 INFO [train.py:901] (0/4) Epoch 24, batch 950, loss[loss=0.1881, simple_loss=0.2646, pruned_loss=0.05577, over 7927.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06019, over 1600518.61 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:24,442 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:24,504 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:39,988 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186879.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:43,459 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 06:31:48,329 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186891.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:52,257 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.324e+02 2.850e+02 3.567e+02 7.043e+02, threshold=5.700e+02, percent-clipped=2.0 +2023-02-07 06:31:53,149 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:55,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:59,853 INFO [train.py:901] (0/4) Epoch 24, batch 1000, loss[loss=0.2062, simple_loss=0.302, pruned_loss=0.05522, over 8454.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2862, pruned_loss=0.06009, over 1608489.41 frames. ], batch size: 25, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:32:05,645 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,385 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,459 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:20,491 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 06:32:29,429 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186948.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:32,167 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:33,360 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 06:32:36,101 INFO [train.py:901] (0/4) Epoch 24, batch 1050, loss[loss=0.196, simple_loss=0.2782, pruned_loss=0.05686, over 7549.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2854, pruned_loss=0.0594, over 1612828.32 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:32:58,701 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7448, 2.9748, 2.5428, 4.0567, 1.5275, 2.2676, 2.5264, 2.9363], + device='cuda:0'), covar=tensor([0.0607, 0.0718, 0.0671, 0.0227, 0.1068, 0.1122, 0.0893, 0.0780], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0195, 0.0242, 0.0213, 0.0204, 0.0244, 0.0248, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 06:33:01,536 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186992.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:02,855 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:04,739 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.475e+02 2.949e+02 3.829e+02 9.793e+02, threshold=5.897e+02, percent-clipped=8.0 +2023-02-07 06:33:12,439 INFO [train.py:901] (0/4) Epoch 24, batch 1100, loss[loss=0.1873, simple_loss=0.2777, pruned_loss=0.04841, over 8362.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2848, pruned_loss=0.05903, over 1610601.04 frames. ], batch size: 24, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:12,711 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4696, 1.7154, 2.6478, 1.3585, 1.9302, 1.8142, 1.4921, 1.8767], + device='cuda:0'), covar=tensor([0.2018, 0.2721, 0.0917, 0.4831, 0.2043, 0.3418, 0.2617, 0.2390], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0615, 0.0556, 0.0650, 0.0653, 0.0600, 0.0544, 0.0634], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:33:18,325 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:19,056 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:37,504 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:38,266 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:45,685 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 06:33:47,119 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:48,344 INFO [train.py:901] (0/4) Epoch 24, batch 1150, loss[loss=0.1792, simple_loss=0.2588, pruned_loss=0.04983, over 8236.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2863, pruned_loss=0.05976, over 1615445.66 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:49,911 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0225, 1.6933, 3.4316, 1.7630, 2.5724, 3.7391, 3.8458, 3.2641], + device='cuda:0'), covar=tensor([0.1185, 0.1756, 0.0365, 0.1880, 0.1047, 0.0226, 0.0566, 0.0481], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0324, 0.0288, 0.0317, 0.0315, 0.0270, 0.0427, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:33:52,026 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:57,506 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187071.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:34:02,539 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6051, 2.5569, 1.8178, 2.3914, 2.0831, 1.6101, 2.1317, 2.2155], + device='cuda:0'), covar=tensor([0.1511, 0.0467, 0.1325, 0.0670, 0.0783, 0.1627, 0.1014, 0.0993], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0309, 0.0301, 0.0341, 0.0346, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 06:34:16,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.205e+02 2.742e+02 3.279e+02 6.267e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 06:34:24,624 INFO [train.py:901] (0/4) Epoch 24, batch 1200, loss[loss=0.1575, simple_loss=0.2428, pruned_loss=0.03608, over 7711.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.286, pruned_loss=0.05926, over 1618741.41 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:34:56,620 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:00,007 INFO [train.py:901] (0/4) Epoch 24, batch 1250, loss[loss=0.1477, simple_loss=0.2267, pruned_loss=0.03431, over 7685.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2861, pruned_loss=0.05922, over 1621847.62 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:15,142 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:19,761 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187186.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:27,952 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.417e+02 2.916e+02 3.659e+02 9.833e+02, threshold=5.832e+02, percent-clipped=6.0 +2023-02-07 06:35:30,862 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187202.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:35,739 INFO [train.py:901] (0/4) Epoch 24, batch 1300, loss[loss=0.1938, simple_loss=0.2739, pruned_loss=0.05679, over 8068.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2857, pruned_loss=0.05928, over 1623707.47 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:35,959 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187208.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:51,427 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-07 06:35:53,888 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187233.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:05,767 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187250.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:10,937 INFO [train.py:901] (0/4) Epoch 24, batch 1350, loss[loss=0.2253, simple_loss=0.304, pruned_loss=0.07328, over 8328.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05974, over 1618304.89 frames. ], batch size: 25, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:20,754 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:21,291 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:22,811 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,303 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,749 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.391e+02 3.088e+02 3.702e+02 1.176e+03, threshold=6.175e+02, percent-clipped=8.0 +2023-02-07 06:36:41,380 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187300.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:46,710 INFO [train.py:901] (0/4) Epoch 24, batch 1400, loss[loss=0.1687, simple_loss=0.2568, pruned_loss=0.04029, over 7640.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2861, pruned_loss=0.0601, over 1617064.19 frames. ], batch size: 19, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:52,964 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187317.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:54,421 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:59,256 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:03,361 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4038, 1.5879, 2.0844, 1.3104, 1.4153, 1.6933, 1.4630, 1.3476], + device='cuda:0'), covar=tensor([0.2020, 0.2493, 0.0959, 0.4570, 0.2041, 0.3346, 0.2453, 0.2341], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0617, 0.0558, 0.0652, 0.0653, 0.0600, 0.0545, 0.0636], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:37:05,349 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4265, 2.1264, 1.6223, 1.9597, 1.8114, 1.4341, 1.7706, 1.7042], + device='cuda:0'), covar=tensor([0.1129, 0.0397, 0.1244, 0.0525, 0.0699, 0.1456, 0.0880, 0.0810], + device='cuda:0'), in_proj_covar=tensor([0.0353, 0.0233, 0.0335, 0.0308, 0.0300, 0.0338, 0.0346, 0.0316], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 06:37:12,868 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187344.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:16,721 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-07 06:37:21,713 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 06:37:22,425 INFO [train.py:901] (0/4) Epoch 24, batch 1450, loss[loss=0.1632, simple_loss=0.249, pruned_loss=0.03867, over 8076.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06054, over 1615577.11 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:37:42,313 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:43,076 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:44,410 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:49,534 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.482e+02 2.870e+02 4.012e+02 8.494e+02, threshold=5.740e+02, percent-clipped=8.0 +2023-02-07 06:37:51,135 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:56,982 INFO [train.py:901] (0/4) Epoch 24, batch 1500, loss[loss=0.1974, simple_loss=0.2979, pruned_loss=0.04852, over 8461.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.0605, over 1612536.24 frames. ], batch size: 25, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:22,153 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:33,246 INFO [train.py:901] (0/4) Epoch 24, batch 1550, loss[loss=0.1919, simple_loss=0.2765, pruned_loss=0.05358, over 8047.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2855, pruned_loss=0.06066, over 1608341.56 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:39,577 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:59,896 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.355e+02 2.764e+02 3.622e+02 7.454e+02, threshold=5.529e+02, percent-clipped=4.0 +2023-02-07 06:39:02,834 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:06,539 INFO [train.py:901] (0/4) Epoch 24, batch 1600, loss[loss=0.2129, simple_loss=0.3066, pruned_loss=0.05963, over 8355.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2849, pruned_loss=0.06055, over 1608318.54 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:11,504 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:22,051 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:27,620 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:30,378 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:41,433 INFO [train.py:901] (0/4) Epoch 24, batch 1650, loss[loss=0.17, simple_loss=0.2558, pruned_loss=0.04209, over 7246.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2848, pruned_loss=0.06058, over 1610677.10 frames. ], batch size: 16, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:52,499 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:09,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.451e+02 2.921e+02 3.516e+02 7.853e+02, threshold=5.842e+02, percent-clipped=7.0 +2023-02-07 06:40:09,872 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187598.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:10,442 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9315, 1.6601, 3.2240, 1.5379, 2.3003, 3.5134, 3.6471, 3.0991], + device='cuda:0'), covar=tensor([0.1199, 0.1619, 0.0394, 0.2186, 0.1149, 0.0243, 0.0593, 0.0508], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0319, 0.0283, 0.0312, 0.0311, 0.0267, 0.0422, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:40:16,425 INFO [train.py:901] (0/4) Epoch 24, batch 1700, loss[loss=0.1665, simple_loss=0.2614, pruned_loss=0.03578, over 8292.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.285, pruned_loss=0.06044, over 1614941.54 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:40,843 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187644.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:50,906 INFO [train.py:901] (0/4) Epoch 24, batch 1750, loss[loss=0.2312, simple_loss=0.3118, pruned_loss=0.07533, over 8388.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2844, pruned_loss=0.06052, over 1611598.04 frames. ], batch size: 48, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:58,599 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:18,561 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.502e+02 3.000e+02 3.757e+02 9.885e+02, threshold=5.999e+02, percent-clipped=2.0 +2023-02-07 06:41:25,257 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 06:41:26,178 INFO [train.py:901] (0/4) Epoch 24, batch 1800, loss[loss=0.1502, simple_loss=0.2386, pruned_loss=0.0309, over 8031.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2836, pruned_loss=0.0597, over 1611469.27 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:43,267 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:59,216 INFO [train.py:901] (0/4) Epoch 24, batch 1850, loss[loss=0.2322, simple_loss=0.3131, pruned_loss=0.07565, over 8238.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2848, pruned_loss=0.06045, over 1613799.42 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:59,465 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:09,287 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:14,870 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6163, 1.5177, 2.8801, 1.3719, 2.2847, 3.0770, 3.2723, 2.6661], + device='cuda:0'), covar=tensor([0.1203, 0.1535, 0.0346, 0.2148, 0.0792, 0.0303, 0.0617, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0319, 0.0284, 0.0312, 0.0312, 0.0267, 0.0423, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:42:18,445 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:27,398 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:28,603 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 2.492e+02 2.912e+02 4.002e+02 8.326e+02, threshold=5.824e+02, percent-clipped=6.0 +2023-02-07 06:42:36,381 INFO [train.py:901] (0/4) Epoch 24, batch 1900, loss[loss=0.2043, simple_loss=0.291, pruned_loss=0.05882, over 8592.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2848, pruned_loss=0.06033, over 1613549.74 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:42:42,643 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8665, 5.9233, 5.1572, 2.7406, 5.3010, 5.6767, 5.4598, 5.4917], + device='cuda:0'), covar=tensor([0.0521, 0.0422, 0.0859, 0.4147, 0.0755, 0.0850, 0.1128, 0.0530], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0543, 0.0435, 0.0448, 0.0428, 0.0391], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:42:49,918 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 06:43:02,032 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 06:43:05,617 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 06:43:05,793 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:11,793 INFO [train.py:901] (0/4) Epoch 24, batch 1950, loss[loss=0.2007, simple_loss=0.2813, pruned_loss=0.06002, over 8125.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2847, pruned_loss=0.05991, over 1616230.15 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:43:18,405 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 06:43:22,597 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:27,982 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:30,722 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:39,203 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 2.375e+02 2.745e+02 3.412e+02 6.105e+02, threshold=5.491e+02, percent-clipped=1.0 +2023-02-07 06:43:39,247 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 06:43:42,654 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5420, 1.8172, 1.9622, 1.1909, 2.0831, 1.3176, 0.6958, 1.8163], + device='cuda:0'), covar=tensor([0.0825, 0.0494, 0.0380, 0.0874, 0.0538, 0.1399, 0.1097, 0.0421], + device='cuda:0'), in_proj_covar=tensor([0.0458, 0.0401, 0.0355, 0.0451, 0.0385, 0.0541, 0.0394, 0.0426], + device='cuda:0'), out_proj_covar=tensor([1.2231e-04, 1.0479e-04, 9.3321e-05, 1.1851e-04, 1.0101e-04, 1.5216e-04, + 1.0605e-04, 1.1231e-04], device='cuda:0') +2023-02-07 06:43:46,280 INFO [train.py:901] (0/4) Epoch 24, batch 2000, loss[loss=0.2007, simple_loss=0.2858, pruned_loss=0.05782, over 7654.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2847, pruned_loss=0.0601, over 1614222.92 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:21,546 INFO [train.py:901] (0/4) Epoch 24, batch 2050, loss[loss=0.2184, simple_loss=0.2952, pruned_loss=0.07082, over 8141.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2861, pruned_loss=0.06063, over 1618284.27 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:27,168 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2062, 2.0434, 2.6408, 2.2376, 2.6196, 2.2587, 2.0398, 1.4904], + device='cuda:0'), covar=tensor([0.5686, 0.5080, 0.2099, 0.3878, 0.2658, 0.3274, 0.2080, 0.5384], + device='cuda:0'), in_proj_covar=tensor([0.0951, 0.0999, 0.0816, 0.0964, 0.1001, 0.0907, 0.0761, 0.0833], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:44:34,897 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2310, 1.9998, 2.5708, 2.1573, 2.5663, 2.2841, 2.1009, 1.3878], + device='cuda:0'), covar=tensor([0.5571, 0.4895, 0.2007, 0.3907, 0.2437, 0.3302, 0.2146, 0.5202], + device='cuda:0'), in_proj_covar=tensor([0.0950, 0.0998, 0.0815, 0.0963, 0.1000, 0.0907, 0.0761, 0.0832], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:44:35,436 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2479, 3.1561, 2.9688, 1.6738, 2.8740, 2.9213, 2.8596, 2.8335], + device='cuda:0'), covar=tensor([0.1164, 0.0847, 0.1331, 0.4293, 0.1115, 0.1202, 0.1653, 0.1008], + device='cuda:0'), in_proj_covar=tensor([0.0527, 0.0445, 0.0434, 0.0541, 0.0433, 0.0447, 0.0425, 0.0387], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:44:42,290 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:46,897 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:48,823 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.387e+02 2.958e+02 3.531e+02 6.524e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 06:44:50,345 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-188000.pt +2023-02-07 06:44:51,434 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:56,704 INFO [train.py:901] (0/4) Epoch 24, batch 2100, loss[loss=0.1536, simple_loss=0.2357, pruned_loss=0.03576, over 7701.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05991, over 1620768.85 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:04,573 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-07 06:45:32,168 INFO [train.py:901] (0/4) Epoch 24, batch 2150, loss[loss=0.1554, simple_loss=0.2361, pruned_loss=0.03739, over 7803.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06005, over 1619204.56 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:45,161 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3938, 1.6232, 1.6606, 1.1626, 1.6613, 1.3205, 0.3339, 1.6046], + device='cuda:0'), covar=tensor([0.0465, 0.0355, 0.0316, 0.0483, 0.0468, 0.0936, 0.0878, 0.0263], + device='cuda:0'), in_proj_covar=tensor([0.0456, 0.0398, 0.0353, 0.0448, 0.0383, 0.0537, 0.0392, 0.0424], + device='cuda:0'), out_proj_covar=tensor([1.2176e-04, 1.0405e-04, 9.2593e-05, 1.1775e-04, 1.0065e-04, 1.5101e-04, + 1.0535e-04, 1.1185e-04], device='cuda:0') +2023-02-07 06:45:58,768 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.464e+02 3.048e+02 3.692e+02 7.821e+02, threshold=6.095e+02, percent-clipped=5.0 +2023-02-07 06:46:03,905 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:05,707 INFO [train.py:901] (0/4) Epoch 24, batch 2200, loss[loss=0.2396, simple_loss=0.3227, pruned_loss=0.07828, over 8537.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2852, pruned_loss=0.05962, over 1620585.08 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:46:21,941 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188130.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:41,365 INFO [train.py:901] (0/4) Epoch 24, batch 2250, loss[loss=0.2091, simple_loss=0.3078, pruned_loss=0.05518, over 8257.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.05876, over 1616798.78 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:46:46,285 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 06:46:58,996 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1356, 4.0934, 3.6723, 2.0307, 3.6119, 3.7294, 3.6738, 3.5775], + device='cuda:0'), covar=tensor([0.0858, 0.0656, 0.1178, 0.4475, 0.0951, 0.1151, 0.1366, 0.0836], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0445, 0.0436, 0.0543, 0.0433, 0.0447, 0.0427, 0.0389], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 06:47:09,411 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.349e+02 3.047e+02 3.898e+02 9.680e+02, threshold=6.095e+02, percent-clipped=4.0 +2023-02-07 06:47:16,302 INFO [train.py:901] (0/4) Epoch 24, batch 2300, loss[loss=0.1877, simple_loss=0.2635, pruned_loss=0.05596, over 5576.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2832, pruned_loss=0.0587, over 1613384.90 frames. ], batch size: 12, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:42,352 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:47,116 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:50,571 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:51,705 INFO [train.py:901] (0/4) Epoch 24, batch 2350, loss[loss=0.2771, simple_loss=0.3402, pruned_loss=0.107, over 6996.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05895, over 1616808.52 frames. ], batch size: 71, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:00,163 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188270.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:05,490 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:08,173 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188281.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:20,113 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.514e+02 3.085e+02 3.939e+02 8.316e+02, threshold=6.171e+02, percent-clipped=4.0 +2023-02-07 06:48:21,112 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2743, 2.0828, 2.7035, 2.3308, 2.7101, 2.3401, 2.1491, 1.6190], + device='cuda:0'), covar=tensor([0.5551, 0.5149, 0.1875, 0.3398, 0.2413, 0.3100, 0.2001, 0.5323], + device='cuda:0'), in_proj_covar=tensor([0.0948, 0.0998, 0.0816, 0.0960, 0.1000, 0.0906, 0.0760, 0.0834], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:48:22,415 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:27,108 INFO [train.py:901] (0/4) Epoch 24, batch 2400, loss[loss=0.2237, simple_loss=0.2928, pruned_loss=0.0773, over 7782.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.05926, over 1612162.78 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:55,189 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-07 06:49:02,253 INFO [train.py:901] (0/4) Epoch 24, batch 2450, loss[loss=0.2043, simple_loss=0.2764, pruned_loss=0.06612, over 8090.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.0592, over 1612054.44 frames. ], batch size: 21, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:49:20,761 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5374, 1.7980, 1.8543, 1.1704, 1.9343, 1.4367, 0.4484, 1.7956], + device='cuda:0'), covar=tensor([0.0548, 0.0368, 0.0301, 0.0599, 0.0399, 0.0921, 0.0943, 0.0278], + device='cuda:0'), in_proj_covar=tensor([0.0458, 0.0399, 0.0355, 0.0451, 0.0384, 0.0540, 0.0395, 0.0426], + device='cuda:0'), out_proj_covar=tensor([1.2227e-04, 1.0430e-04, 9.3155e-05, 1.1845e-04, 1.0094e-04, 1.5171e-04, + 1.0623e-04, 1.1246e-04], device='cuda:0') +2023-02-07 06:49:30,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.263e+02 2.982e+02 3.612e+02 7.179e+02, threshold=5.965e+02, percent-clipped=1.0 +2023-02-07 06:49:38,398 INFO [train.py:901] (0/4) Epoch 24, batch 2500, loss[loss=0.1821, simple_loss=0.2666, pruned_loss=0.04877, over 7977.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05882, over 1614259.60 frames. ], batch size: 21, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:02,885 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.19 vs. limit=5.0 +2023-02-07 06:50:11,879 INFO [train.py:901] (0/4) Epoch 24, batch 2550, loss[loss=0.222, simple_loss=0.2891, pruned_loss=0.07745, over 7430.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2837, pruned_loss=0.05896, over 1615505.06 frames. ], batch size: 17, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:17,708 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 06:50:34,558 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3883, 1.6719, 1.6820, 1.0695, 1.6816, 1.4399, 0.2946, 1.6280], + device='cuda:0'), covar=tensor([0.0502, 0.0401, 0.0354, 0.0536, 0.0455, 0.0998, 0.0911, 0.0288], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0402, 0.0357, 0.0454, 0.0387, 0.0543, 0.0398, 0.0430], + device='cuda:0'), out_proj_covar=tensor([1.2320e-04, 1.0520e-04, 9.3852e-05, 1.1930e-04, 1.0164e-04, 1.5275e-04, + 1.0707e-04, 1.1345e-04], device='cuda:0') +2023-02-07 06:50:40,457 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 2.538e+02 2.905e+02 3.766e+02 9.788e+02, threshold=5.809e+02, percent-clipped=4.0 +2023-02-07 06:50:40,654 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7795, 1.6441, 1.9280, 1.6169, 1.1593, 1.6892, 2.1552, 2.1286], + device='cuda:0'), covar=tensor([0.0496, 0.1276, 0.1675, 0.1457, 0.0602, 0.1467, 0.0669, 0.0600], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:50:41,289 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:50:47,911 INFO [train.py:901] (0/4) Epoch 24, batch 2600, loss[loss=0.1562, simple_loss=0.2419, pruned_loss=0.03526, over 7531.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05816, over 1619327.06 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:00,092 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 06:51:21,917 INFO [train.py:901] (0/4) Epoch 24, batch 2650, loss[loss=0.1815, simple_loss=0.2578, pruned_loss=0.0526, over 7528.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2841, pruned_loss=0.05831, over 1621490.26 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:48,597 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.338e+02 2.924e+02 3.924e+02 7.774e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 06:51:55,401 INFO [train.py:901] (0/4) Epoch 24, batch 2700, loss[loss=0.1619, simple_loss=0.2484, pruned_loss=0.03766, over 7224.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2842, pruned_loss=0.05865, over 1621060.49 frames. ], batch size: 16, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:21,721 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:52:31,277 INFO [train.py:901] (0/4) Epoch 24, batch 2750, loss[loss=0.1924, simple_loss=0.2811, pruned_loss=0.05188, over 8533.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2851, pruned_loss=0.05919, over 1627090.40 frames. ], batch size: 28, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:37,532 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 06:52:41,607 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 06:52:44,035 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1526, 1.3559, 1.5993, 1.3376, 0.7878, 1.4061, 1.2737, 1.0020], + device='cuda:0'), covar=tensor([0.0617, 0.1264, 0.1617, 0.1394, 0.0552, 0.1406, 0.0688, 0.0719], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:52:57,784 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.375e+02 3.087e+02 4.139e+02 1.460e+03, threshold=6.174e+02, percent-clipped=4.0 +2023-02-07 06:53:05,388 INFO [train.py:901] (0/4) Epoch 24, batch 2800, loss[loss=0.1776, simple_loss=0.2677, pruned_loss=0.04375, over 7923.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05913, over 1621102.72 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:40,611 INFO [train.py:901] (0/4) Epoch 24, batch 2850, loss[loss=0.2152, simple_loss=0.2959, pruned_loss=0.06726, over 8180.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.285, pruned_loss=0.05914, over 1620386.76 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:42,158 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:07,924 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.346e+02 3.064e+02 3.754e+02 6.997e+02, threshold=6.129e+02, percent-clipped=3.0 +2023-02-07 06:54:14,861 INFO [train.py:901] (0/4) Epoch 24, batch 2900, loss[loss=0.2434, simple_loss=0.3287, pruned_loss=0.07905, over 8242.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.05982, over 1616071.96 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:54:21,524 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 06:54:39,464 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:51,396 INFO [train.py:901] (0/4) Epoch 24, batch 2950, loss[loss=0.1977, simple_loss=0.2901, pruned_loss=0.05263, over 8196.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2846, pruned_loss=0.05922, over 1614020.60 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:54:51,439 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 06:55:04,756 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1604, 1.9181, 2.5369, 2.1378, 2.5313, 2.2155, 2.0046, 1.3923], + device='cuda:0'), covar=tensor([0.5545, 0.5068, 0.2023, 0.3720, 0.2560, 0.3182, 0.1958, 0.5588], + device='cuda:0'), in_proj_covar=tensor([0.0949, 0.0993, 0.0816, 0.0958, 0.0997, 0.0907, 0.0757, 0.0833], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:55:15,785 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1667, 1.4800, 4.3559, 1.7413, 2.4733, 4.9470, 5.1342, 4.3488], + device='cuda:0'), covar=tensor([0.1293, 0.1963, 0.0289, 0.2249, 0.1187, 0.0212, 0.0585, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0323, 0.0285, 0.0316, 0.0315, 0.0270, 0.0426, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 06:55:19,116 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.217e+02 2.685e+02 3.700e+02 9.567e+02, threshold=5.370e+02, percent-clipped=4.0 +2023-02-07 06:55:25,891 INFO [train.py:901] (0/4) Epoch 24, batch 3000, loss[loss=0.1854, simple_loss=0.2796, pruned_loss=0.04561, over 7796.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2856, pruned_loss=0.05934, over 1616601.35 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:55:25,891 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 06:55:39,548 INFO [train.py:935] (0/4) Epoch 24, validation: loss=0.1724, simple_loss=0.2726, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 06:55:39,549 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 06:55:46,717 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 06:56:05,990 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:07,356 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:13,960 INFO [train.py:901] (0/4) Epoch 24, batch 3050, loss[loss=0.2076, simple_loss=0.2993, pruned_loss=0.05798, over 8366.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05979, over 1617013.34 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:14,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:41,562 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.422e+02 3.010e+02 3.817e+02 9.746e+02, threshold=6.020e+02, percent-clipped=4.0 +2023-02-07 06:56:49,123 INFO [train.py:901] (0/4) Epoch 24, batch 3100, loss[loss=0.2181, simple_loss=0.2983, pruned_loss=0.06899, over 8029.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2868, pruned_loss=0.06072, over 1614523.83 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:54,797 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:12,097 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:16,965 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:23,538 INFO [train.py:901] (0/4) Epoch 24, batch 3150, loss[loss=0.2136, simple_loss=0.2974, pruned_loss=0.06485, over 7983.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.0611, over 1615274.51 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:57:40,667 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:50,859 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:51,169 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 06:57:51,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.394e+02 2.917e+02 3.565e+02 6.979e+02, threshold=5.834e+02, percent-clipped=3.0 +2023-02-07 06:57:59,743 INFO [train.py:901] (0/4) Epoch 24, batch 3200, loss[loss=0.1746, simple_loss=0.2612, pruned_loss=0.04396, over 8356.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06092, over 1612945.51 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:58:33,957 INFO [train.py:901] (0/4) Epoch 24, batch 3250, loss[loss=0.2324, simple_loss=0.3081, pruned_loss=0.0784, over 8497.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.287, pruned_loss=0.06094, over 1614968.66 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:01,531 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.438e+02 3.003e+02 3.759e+02 6.490e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-07 06:59:08,533 INFO [train.py:901] (0/4) Epoch 24, batch 3300, loss[loss=0.1806, simple_loss=0.275, pruned_loss=0.04309, over 8608.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2858, pruned_loss=0.06047, over 1615745.90 frames. ], batch size: 34, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:10,274 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-07 06:59:12,860 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:13,785 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 06:59:26,873 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8485, 1.4329, 1.6326, 1.3185, 0.9809, 1.4936, 1.6701, 1.4884], + device='cuda:0'), covar=tensor([0.0516, 0.1248, 0.1635, 0.1448, 0.0603, 0.1420, 0.0664, 0.0630], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0154, 0.0190, 0.0160, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 06:59:30,912 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:37,688 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1489, 2.0092, 2.5826, 2.1381, 2.6211, 2.2218, 2.0046, 1.4450], + device='cuda:0'), covar=tensor([0.5699, 0.5029, 0.1995, 0.3817, 0.2441, 0.3155, 0.1979, 0.5398], + device='cuda:0'), in_proj_covar=tensor([0.0941, 0.0987, 0.0808, 0.0951, 0.0991, 0.0900, 0.0751, 0.0825], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 06:59:44,169 INFO [train.py:901] (0/4) Epoch 24, batch 3350, loss[loss=0.192, simple_loss=0.272, pruned_loss=0.05602, over 7723.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2866, pruned_loss=0.06048, over 1622266.01 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:49,403 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189266.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:56,566 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4759, 1.3819, 1.8274, 1.2313, 1.1320, 1.7422, 0.1814, 1.1753], + device='cuda:0'), covar=tensor([0.1569, 0.1398, 0.0428, 0.1076, 0.2588, 0.0566, 0.2172, 0.1321], + device='cuda:0'), in_proj_covar=tensor([0.0192, 0.0200, 0.0131, 0.0221, 0.0271, 0.0138, 0.0171, 0.0195], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 07:00:05,776 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:07,043 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189293.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:10,342 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.367e+02 2.967e+02 3.575e+02 9.298e+02, threshold=5.934e+02, percent-clipped=5.0 +2023-02-07 07:00:17,748 INFO [train.py:901] (0/4) Epoch 24, batch 3400, loss[loss=0.1969, simple_loss=0.2823, pruned_loss=0.05573, over 8466.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2873, pruned_loss=0.06122, over 1621052.20 frames. ], batch size: 25, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:52,454 INFO [train.py:901] (0/4) Epoch 24, batch 3450, loss[loss=0.1995, simple_loss=0.2804, pruned_loss=0.05927, over 8188.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2871, pruned_loss=0.06083, over 1624250.16 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:57,502 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:16,585 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:20,612 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.399e+02 2.884e+02 3.624e+02 7.571e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 07:01:26,250 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:26,979 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5996, 2.3726, 3.0808, 2.5544, 3.0477, 2.6192, 2.4820, 1.9045], + device='cuda:0'), covar=tensor([0.5551, 0.4944, 0.2043, 0.3996, 0.2614, 0.3017, 0.1803, 0.5563], + device='cuda:0'), in_proj_covar=tensor([0.0940, 0.0988, 0.0808, 0.0954, 0.0993, 0.0901, 0.0752, 0.0825], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:01:27,411 INFO [train.py:901] (0/4) Epoch 24, batch 3500, loss[loss=0.1687, simple_loss=0.2479, pruned_loss=0.04478, over 7510.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06115, over 1621006.99 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:01:27,606 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:40,527 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 07:01:40,594 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:50,792 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189441.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:58,888 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5653, 1.6641, 4.7678, 1.9841, 4.3197, 3.9990, 4.3277, 4.1752], + device='cuda:0'), covar=tensor([0.0566, 0.4362, 0.0479, 0.4014, 0.1006, 0.0892, 0.0523, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0647, 0.0654, 0.0714, 0.0643, 0.0722, 0.0619, 0.0615, 0.0694], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:02:03,504 INFO [train.py:901] (0/4) Epoch 24, batch 3550, loss[loss=0.2087, simple_loss=0.2937, pruned_loss=0.06184, over 8435.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.06108, over 1622596.34 frames. ], batch size: 48, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:31,306 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.509e+02 2.981e+02 3.708e+02 7.370e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 07:02:37,626 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:38,143 INFO [train.py:901] (0/4) Epoch 24, batch 3600, loss[loss=0.1815, simple_loss=0.2763, pruned_loss=0.04333, over 8460.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06082, over 1612815.95 frames. ], batch size: 27, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:45,840 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:47,253 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5507, 1.6330, 4.7702, 1.7401, 4.2555, 3.9489, 4.3360, 4.2273], + device='cuda:0'), covar=tensor([0.0663, 0.4564, 0.0482, 0.4274, 0.1049, 0.0935, 0.0579, 0.0664], + device='cuda:0'), in_proj_covar=tensor([0.0648, 0.0656, 0.0714, 0.0645, 0.0724, 0.0621, 0.0617, 0.0695], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:03:01,844 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:12,181 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189556.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:13,420 INFO [train.py:901] (0/4) Epoch 24, batch 3650, loss[loss=0.2103, simple_loss=0.304, pruned_loss=0.05829, over 8490.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.287, pruned_loss=0.06158, over 1613293.25 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:14,316 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6972, 2.0164, 2.0934, 1.3903, 2.2046, 1.4408, 0.7733, 1.9487], + device='cuda:0'), covar=tensor([0.0688, 0.0384, 0.0313, 0.0637, 0.0437, 0.0955, 0.0941, 0.0365], + device='cuda:0'), in_proj_covar=tensor([0.0458, 0.0398, 0.0354, 0.0450, 0.0385, 0.0539, 0.0394, 0.0427], + device='cuda:0'), out_proj_covar=tensor([1.2221e-04, 1.0402e-04, 9.2805e-05, 1.1810e-04, 1.0125e-04, 1.5143e-04, + 1.0595e-04, 1.1258e-04], device='cuda:0') +2023-02-07 07:03:41,145 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:03:41,748 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.496e+02 2.930e+02 3.600e+02 6.319e+02, threshold=5.860e+02, percent-clipped=2.0 +2023-02-07 07:03:48,375 INFO [train.py:901] (0/4) Epoch 24, batch 3700, loss[loss=0.1858, simple_loss=0.2626, pruned_loss=0.05449, over 7202.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2866, pruned_loss=0.06149, over 1608462.50 frames. ], batch size: 16, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:49,804 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:56,018 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0995, 1.8009, 2.2939, 1.9272, 2.2885, 2.1376, 1.9524, 1.1520], + device='cuda:0'), covar=tensor([0.5675, 0.5194, 0.2093, 0.3760, 0.2585, 0.3279, 0.1947, 0.5305], + device='cuda:0'), in_proj_covar=tensor([0.0945, 0.0993, 0.0811, 0.0960, 0.0999, 0.0906, 0.0755, 0.0830], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:04:10,923 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5799, 1.2773, 1.6478, 1.2515, 0.9514, 1.4471, 1.4796, 1.3090], + device='cuda:0'), covar=tensor([0.0574, 0.1271, 0.1641, 0.1532, 0.0580, 0.1454, 0.0714, 0.0673], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:04:23,135 INFO [train.py:901] (0/4) Epoch 24, batch 3750, loss[loss=0.1906, simple_loss=0.282, pruned_loss=0.04959, over 8295.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2867, pruned_loss=0.06118, over 1610940.48 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:23,273 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189658.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:26,007 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:27,231 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189664.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:42,767 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:43,971 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189689.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:51,128 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.609e+02 3.129e+02 4.249e+02 7.016e+02, threshold=6.258e+02, percent-clipped=8.0 +2023-02-07 07:04:51,254 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5578, 4.4863, 4.0182, 2.2583, 3.9409, 4.1862, 4.0160, 3.9687], + device='cuda:0'), covar=tensor([0.0692, 0.0513, 0.1023, 0.4377, 0.0864, 0.0883, 0.1233, 0.0774], + device='cuda:0'), in_proj_covar=tensor([0.0523, 0.0441, 0.0431, 0.0537, 0.0427, 0.0444, 0.0423, 0.0387], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:04:57,807 INFO [train.py:901] (0/4) Epoch 24, batch 3800, loss[loss=0.2404, simple_loss=0.3151, pruned_loss=0.08284, over 8442.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2863, pruned_loss=0.06064, over 1612025.25 frames. ], batch size: 27, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:58,579 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:07,483 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:09,540 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:24,350 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:27,567 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6288, 1.9349, 2.9310, 1.4757, 2.1740, 2.1354, 1.6789, 2.1820], + device='cuda:0'), covar=tensor([0.2090, 0.2864, 0.0922, 0.4983, 0.2085, 0.3321, 0.2602, 0.2416], + device='cuda:0'), in_proj_covar=tensor([0.0530, 0.0618, 0.0559, 0.0657, 0.0655, 0.0602, 0.0550, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:05:32,056 INFO [train.py:901] (0/4) Epoch 24, batch 3850, loss[loss=0.1812, simple_loss=0.2692, pruned_loss=0.04661, over 8240.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05978, over 1611395.62 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:05:35,651 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:47,651 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 07:05:53,006 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,077 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,532 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.407e+02 2.910e+02 3.432e+02 8.251e+02, threshold=5.819e+02, percent-clipped=1.0 +2023-02-07 07:06:02,174 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 07:06:06,335 INFO [train.py:901] (0/4) Epoch 24, batch 3900, loss[loss=0.1798, simple_loss=0.2765, pruned_loss=0.04157, over 8509.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.06031, over 1612032.60 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:10,103 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189812.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:17,538 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:18,887 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189824.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:27,531 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:42,140 INFO [train.py:901] (0/4) Epoch 24, batch 3950, loss[loss=0.2059, simple_loss=0.2743, pruned_loss=0.06871, over 7922.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2843, pruned_loss=0.05965, over 1608193.98 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:45,540 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:07:09,585 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.387e+02 3.217e+02 3.997e+02 8.874e+02, threshold=6.434e+02, percent-clipped=5.0 +2023-02-07 07:07:14,574 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.21 vs. limit=5.0 +2023-02-07 07:07:16,315 INFO [train.py:901] (0/4) Epoch 24, batch 4000, loss[loss=0.2075, simple_loss=0.2958, pruned_loss=0.05958, over 8196.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2848, pruned_loss=0.05983, over 1610080.74 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:07:45,303 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3809, 1.6479, 4.5432, 1.6719, 4.0572, 3.7475, 4.0838, 3.9962], + device='cuda:0'), covar=tensor([0.0634, 0.4581, 0.0528, 0.4359, 0.1070, 0.0978, 0.0661, 0.0666], + device='cuda:0'), in_proj_covar=tensor([0.0652, 0.0660, 0.0717, 0.0646, 0.0724, 0.0624, 0.0622, 0.0696], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:07:51,144 INFO [train.py:901] (0/4) Epoch 24, batch 4050, loss[loss=0.2464, simple_loss=0.3338, pruned_loss=0.07951, over 8564.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2845, pruned_loss=0.05981, over 1612124.33 frames. ], batch size: 31, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:08:05,463 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:07,495 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:18,677 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.334e+02 2.770e+02 3.399e+02 1.124e+03, threshold=5.539e+02, percent-clipped=1.0 +2023-02-07 07:08:18,872 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1553, 1.2628, 1.5471, 1.2128, 0.7607, 1.3527, 1.1793, 0.9027], + device='cuda:0'), covar=tensor([0.0653, 0.1321, 0.1621, 0.1530, 0.0600, 0.1536, 0.0752, 0.0771], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:08:20,165 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-190000.pt +2023-02-07 07:08:22,559 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:25,394 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:27,378 INFO [train.py:901] (0/4) Epoch 24, batch 4100, loss[loss=0.2128, simple_loss=0.2889, pruned_loss=0.0684, over 8344.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.284, pruned_loss=0.05942, over 1610336.08 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:02,436 INFO [train.py:901] (0/4) Epoch 24, batch 4150, loss[loss=0.2725, simple_loss=0.3463, pruned_loss=0.09941, over 8627.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06001, over 1609909.74 frames. ], batch size: 34, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:08,092 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:13,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8074, 1.4466, 3.2488, 1.4146, 2.2380, 3.5240, 3.7178, 3.0106], + device='cuda:0'), covar=tensor([0.1295, 0.1824, 0.0351, 0.2204, 0.1027, 0.0251, 0.0556, 0.0566], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0323, 0.0286, 0.0315, 0.0314, 0.0271, 0.0427, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:09:17,887 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190080.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:25,157 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:30,579 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 2.377e+02 2.724e+02 3.400e+02 7.023e+02, threshold=5.448e+02, percent-clipped=3.0 +2023-02-07 07:09:35,501 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:37,391 INFO [train.py:901] (0/4) Epoch 24, batch 4200, loss[loss=0.2279, simple_loss=0.3093, pruned_loss=0.07319, over 8722.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2854, pruned_loss=0.06013, over 1609558.74 frames. ], batch size: 49, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:09:43,677 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:48,186 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 07:10:10,655 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 07:10:11,421 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190157.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:10:11,874 INFO [train.py:901] (0/4) Epoch 24, batch 4250, loss[loss=0.2562, simple_loss=0.3339, pruned_loss=0.08925, over 8562.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2863, pruned_loss=0.06072, over 1612059.17 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:19,337 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190169.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:28,694 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:40,140 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.309e+02 2.865e+02 3.517e+02 8.092e+02, threshold=5.730e+02, percent-clipped=6.0 +2023-02-07 07:10:45,750 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190205.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:47,629 INFO [train.py:901] (0/4) Epoch 24, batch 4300, loss[loss=0.2142, simple_loss=0.2994, pruned_loss=0.0645, over 8482.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2872, pruned_loss=0.06035, over 1620213.43 frames. ], batch size: 29, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:56,873 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-07 07:11:05,306 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:21,910 INFO [train.py:901] (0/4) Epoch 24, batch 4350, loss[loss=0.1867, simple_loss=0.282, pruned_loss=0.04566, over 8110.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2866, pruned_loss=0.06018, over 1618356.67 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:11:22,126 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8404, 1.7177, 2.3666, 1.6247, 1.4127, 2.3135, 0.3746, 1.4366], + device='cuda:0'), covar=tensor([0.1499, 0.1324, 0.0345, 0.0976, 0.2294, 0.0424, 0.1949, 0.1202], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0201, 0.0130, 0.0222, 0.0271, 0.0138, 0.0171, 0.0195], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 07:11:22,812 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190259.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:40,181 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 07:11:48,230 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 07:11:50,408 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.430e+02 2.823e+02 3.493e+02 1.012e+03, threshold=5.646e+02, percent-clipped=3.0 +2023-02-07 07:11:57,331 INFO [train.py:901] (0/4) Epoch 24, batch 4400, loss[loss=0.2404, simple_loss=0.3132, pruned_loss=0.08381, over 8361.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.286, pruned_loss=0.05988, over 1619209.26 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:15,560 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:12:23,152 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 07:12:32,980 INFO [train.py:901] (0/4) Epoch 24, batch 4450, loss[loss=0.211, simple_loss=0.3005, pruned_loss=0.0607, over 8338.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2858, pruned_loss=0.05937, over 1618836.90 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:43,417 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:00,241 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 2.201e+02 2.691e+02 3.403e+02 6.534e+02, threshold=5.381e+02, percent-clipped=2.0 +2023-02-07 07:13:00,471 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:07,778 INFO [train.py:901] (0/4) Epoch 24, batch 4500, loss[loss=0.2058, simple_loss=0.289, pruned_loss=0.06134, over 8636.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2859, pruned_loss=0.05964, over 1620972.05 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:14,918 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:17,415 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 07:13:29,010 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190437.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:29,617 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2011, 1.3515, 1.5953, 1.2844, 0.7325, 1.4432, 1.2131, 1.0733], + device='cuda:0'), covar=tensor([0.0596, 0.1253, 0.1627, 0.1436, 0.0551, 0.1423, 0.0668, 0.0709], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:13:42,697 INFO [train.py:901] (0/4) Epoch 24, batch 4550, loss[loss=0.2109, simple_loss=0.3035, pruned_loss=0.05917, over 8245.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.286, pruned_loss=0.06026, over 1618652.09 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:44,857 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:45,491 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:46,754 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5239, 2.4713, 1.7882, 2.4228, 2.1621, 1.4508, 2.1292, 2.2244], + device='cuda:0'), covar=tensor([0.1561, 0.0472, 0.1349, 0.0556, 0.0823, 0.1754, 0.1025, 0.0976], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0234, 0.0337, 0.0311, 0.0300, 0.0343, 0.0346, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 07:13:54,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8444, 1.5690, 3.1570, 1.5121, 2.3217, 3.3847, 3.5196, 2.9051], + device='cuda:0'), covar=tensor([0.1181, 0.1694, 0.0323, 0.2078, 0.0940, 0.0255, 0.0619, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0323, 0.0285, 0.0316, 0.0314, 0.0271, 0.0428, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:14:02,589 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:10,717 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.430e+02 2.973e+02 3.981e+02 9.647e+02, threshold=5.946e+02, percent-clipped=9.0 +2023-02-07 07:14:12,830 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190501.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:14:17,566 INFO [train.py:901] (0/4) Epoch 24, batch 4600, loss[loss=0.2335, simple_loss=0.3121, pruned_loss=0.07746, over 8591.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2866, pruned_loss=0.06056, over 1617621.45 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:14:21,247 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:27,617 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0411, 1.7406, 3.2142, 1.6029, 2.3744, 3.5236, 3.6497, 3.0197], + device='cuda:0'), covar=tensor([0.1169, 0.1656, 0.0386, 0.2114, 0.1066, 0.0279, 0.0635, 0.0614], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0323, 0.0285, 0.0315, 0.0314, 0.0270, 0.0428, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:14:35,973 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6493, 2.2564, 4.0708, 1.4028, 2.8025, 2.1379, 1.8567, 2.7275], + device='cuda:0'), covar=tensor([0.2093, 0.2697, 0.0832, 0.4960, 0.2074, 0.3399, 0.2487, 0.2828], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0622, 0.0562, 0.0661, 0.0658, 0.0605, 0.0553, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:14:54,265 INFO [train.py:901] (0/4) Epoch 24, batch 4650, loss[loss=0.1843, simple_loss=0.2643, pruned_loss=0.05212, over 7809.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05994, over 1617002.62 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:19,160 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8056, 1.5711, 1.6773, 1.5286, 1.0992, 1.6084, 1.7135, 1.5500], + device='cuda:0'), covar=tensor([0.0568, 0.0955, 0.1273, 0.1154, 0.0578, 0.1123, 0.0703, 0.0521], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0188, 0.0160, 0.0100, 0.0162, 0.0111, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:15:22,378 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.469e+02 2.987e+02 3.787e+02 1.231e+03, threshold=5.974e+02, percent-clipped=5.0 +2023-02-07 07:15:29,192 INFO [train.py:901] (0/4) Epoch 24, batch 4700, loss[loss=0.2357, simple_loss=0.3192, pruned_loss=0.07609, over 8360.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2862, pruned_loss=0.06024, over 1616757.08 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:29,951 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190609.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:34,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190616.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:41,741 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:42,469 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190628.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:57,393 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8691, 1.5955, 3.3493, 1.5784, 2.3198, 3.6394, 3.7542, 3.1149], + device='cuda:0'), covar=tensor([0.1186, 0.1667, 0.0303, 0.1991, 0.0935, 0.0238, 0.0615, 0.0553], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0322, 0.0283, 0.0314, 0.0313, 0.0270, 0.0426, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:16:04,013 INFO [train.py:901] (0/4) Epoch 24, batch 4750, loss[loss=0.2183, simple_loss=0.2878, pruned_loss=0.07442, over 7525.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2855, pruned_loss=0.06021, over 1613861.56 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:16:18,692 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:20,754 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 07:16:22,902 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 07:16:29,407 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:32,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.279e+02 2.789e+02 3.393e+02 7.815e+02, threshold=5.578e+02, percent-clipped=3.0 +2023-02-07 07:16:40,394 INFO [train.py:901] (0/4) Epoch 24, batch 4800, loss[loss=0.2317, simple_loss=0.3104, pruned_loss=0.07652, over 6672.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.285, pruned_loss=0.05986, over 1610559.89 frames. ], batch size: 71, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:13,012 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 07:17:15,041 INFO [train.py:901] (0/4) Epoch 24, batch 4850, loss[loss=0.1862, simple_loss=0.2755, pruned_loss=0.04848, over 8667.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.285, pruned_loss=0.05981, over 1613027.48 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:15,277 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6869, 2.1247, 3.1989, 1.4952, 2.4842, 2.0589, 1.8734, 2.4511], + device='cuda:0'), covar=tensor([0.2019, 0.2795, 0.0868, 0.4890, 0.2012, 0.3514, 0.2550, 0.2344], + device='cuda:0'), in_proj_covar=tensor([0.0525, 0.0612, 0.0552, 0.0650, 0.0646, 0.0595, 0.0545, 0.0632], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:17:17,934 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:40,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:43,508 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.278e+02 2.775e+02 3.178e+02 7.824e+02, threshold=5.550e+02, percent-clipped=3.0 +2023-02-07 07:17:50,723 INFO [train.py:901] (0/4) Epoch 24, batch 4900, loss[loss=0.1834, simple_loss=0.2561, pruned_loss=0.05538, over 7534.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2838, pruned_loss=0.0589, over 1615267.20 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:09,671 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5610, 1.6034, 4.7784, 1.8492, 4.2342, 4.0166, 4.3173, 4.2026], + device='cuda:0'), covar=tensor([0.0569, 0.4399, 0.0494, 0.4000, 0.1042, 0.0856, 0.0527, 0.0640], + device='cuda:0'), in_proj_covar=tensor([0.0646, 0.0653, 0.0713, 0.0641, 0.0721, 0.0619, 0.0618, 0.0692], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:18:25,854 INFO [train.py:901] (0/4) Epoch 24, batch 4950, loss[loss=0.1499, simple_loss=0.2397, pruned_loss=0.03001, over 6391.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05887, over 1615458.17 frames. ], batch size: 14, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:36,138 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190872.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:18:39,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:45,178 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:53,847 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190897.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:18:54,306 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.393e+02 2.890e+02 3.701e+02 7.772e+02, threshold=5.780e+02, percent-clipped=4.0 +2023-02-07 07:19:01,826 INFO [train.py:901] (0/4) Epoch 24, batch 5000, loss[loss=0.2108, simple_loss=0.3042, pruned_loss=0.05868, over 8458.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.05853, over 1616667.06 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:02,718 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:33,474 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190953.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:19:36,598 INFO [train.py:901] (0/4) Epoch 24, batch 5050, loss[loss=0.205, simple_loss=0.2909, pruned_loss=0.05957, over 8514.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05786, over 1610449.62 frames. ], batch size: 28, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:45,288 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190971.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:51,188 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 07:20:04,905 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.521e+02 3.221e+02 4.359e+02 8.705e+02, threshold=6.442e+02, percent-clipped=11.0 +2023-02-07 07:20:11,615 INFO [train.py:901] (0/4) Epoch 24, batch 5100, loss[loss=0.207, simple_loss=0.2967, pruned_loss=0.05867, over 8437.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2834, pruned_loss=0.05757, over 1614565.38 frames. ], batch size: 29, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:25,285 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9680, 1.7132, 3.3488, 1.7369, 2.5194, 3.6912, 3.7321, 3.1849], + device='cuda:0'), covar=tensor([0.1188, 0.1693, 0.0337, 0.1958, 0.1049, 0.0231, 0.0603, 0.0522], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0322, 0.0284, 0.0314, 0.0313, 0.0271, 0.0428, 0.0301], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:20:31,893 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:40,071 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:44,519 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 07:20:46,726 INFO [train.py:901] (0/4) Epoch 24, batch 5150, loss[loss=0.233, simple_loss=0.3152, pruned_loss=0.07543, over 8312.00 frames. ], tot_loss[loss=0.199, simple_loss=0.283, pruned_loss=0.05753, over 1616619.25 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:53,679 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191068.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:20:57,806 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:03,775 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3156, 2.1020, 1.7156, 1.9792, 1.8716, 1.4654, 1.7064, 1.6586], + device='cuda:0'), covar=tensor([0.1216, 0.0436, 0.1152, 0.0494, 0.0658, 0.1549, 0.0935, 0.0809], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0234, 0.0337, 0.0309, 0.0299, 0.0341, 0.0346, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 07:21:05,748 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,783 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:13,676 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.286e+02 2.691e+02 3.617e+02 7.196e+02, threshold=5.383e+02, percent-clipped=2.0 +2023-02-07 07:21:20,844 INFO [train.py:901] (0/4) Epoch 24, batch 5200, loss[loss=0.1879, simple_loss=0.2797, pruned_loss=0.04807, over 8198.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05834, over 1611851.39 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:21:38,118 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:45,746 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-07 07:21:50,039 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 07:21:50,828 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:52,265 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:55,620 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:56,110 INFO [train.py:901] (0/4) Epoch 24, batch 5250, loss[loss=0.2553, simple_loss=0.3311, pruned_loss=0.08972, over 8329.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.05901, over 1611959.82 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:22:25,868 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.847e+02 3.981e+02 6.971e+02, threshold=5.694e+02, percent-clipped=11.0 +2023-02-07 07:22:28,326 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191203.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:22:31,606 INFO [train.py:901] (0/4) Epoch 24, batch 5300, loss[loss=0.2025, simple_loss=0.2909, pruned_loss=0.05705, over 7809.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05887, over 1602287.62 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:07,066 INFO [train.py:901] (0/4) Epoch 24, batch 5350, loss[loss=0.2145, simple_loss=0.297, pruned_loss=0.06594, over 8527.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05931, over 1607466.33 frames. ], batch size: 39, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:36,917 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.294e+02 2.754e+02 3.342e+02 1.056e+03, threshold=5.508e+02, percent-clipped=2.0 +2023-02-07 07:23:42,381 INFO [train.py:901] (0/4) Epoch 24, batch 5400, loss[loss=0.2103, simple_loss=0.2951, pruned_loss=0.06277, over 8431.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05909, over 1604282.75 frames. ], batch size: 27, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:23:53,139 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191324.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:24:05,735 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191342.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:11,129 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191349.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:17,035 INFO [train.py:901] (0/4) Epoch 24, batch 5450, loss[loss=0.2102, simple_loss=0.2954, pruned_loss=0.0625, over 8601.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2843, pruned_loss=0.05967, over 1608867.59 frames. ], batch size: 31, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:20,828 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 07:24:23,274 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:34,960 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:39,466 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 07:24:46,091 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.294e+02 2.951e+02 3.676e+02 7.135e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 07:24:52,418 INFO [train.py:901] (0/4) Epoch 24, batch 5500, loss[loss=0.1793, simple_loss=0.2539, pruned_loss=0.05234, over 7292.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2842, pruned_loss=0.05993, over 1611917.67 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:52,648 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:59,653 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8612, 1.5262, 3.5393, 1.6442, 2.4513, 3.8285, 3.9293, 3.2518], + device='cuda:0'), covar=tensor([0.1218, 0.1786, 0.0268, 0.1928, 0.0899, 0.0218, 0.0518, 0.0547], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0325, 0.0287, 0.0316, 0.0315, 0.0273, 0.0431, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:25:07,779 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:10,085 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:12,206 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7054, 1.9128, 2.1064, 1.4175, 2.2320, 1.4448, 0.6657, 1.8429], + device='cuda:0'), covar=tensor([0.0699, 0.0413, 0.0301, 0.0651, 0.0486, 0.1026, 0.1010, 0.0387], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0404, 0.0359, 0.0456, 0.0388, 0.0542, 0.0401, 0.0434], + device='cuda:0'), out_proj_covar=tensor([1.2355e-04, 1.0545e-04, 9.4180e-05, 1.1973e-04, 1.0200e-04, 1.5191e-04, + 1.0759e-04, 1.1450e-04], device='cuda:0') +2023-02-07 07:25:28,136 INFO [train.py:901] (0/4) Epoch 24, batch 5550, loss[loss=0.2205, simple_loss=0.3086, pruned_loss=0.06619, over 8458.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2854, pruned_loss=0.0601, over 1614863.77 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:25:53,333 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:57,930 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.420e+02 3.039e+02 3.989e+02 7.925e+02, threshold=6.078e+02, percent-clipped=5.0 +2023-02-07 07:26:00,914 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3415, 2.1004, 2.5699, 2.2257, 2.5357, 2.2848, 2.2011, 1.8684], + device='cuda:0'), covar=tensor([0.4076, 0.4051, 0.1794, 0.3148, 0.2273, 0.2700, 0.1587, 0.4109], + device='cuda:0'), in_proj_covar=tensor([0.0946, 0.0994, 0.0813, 0.0964, 0.1002, 0.0907, 0.0756, 0.0832], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:26:03,389 INFO [train.py:901] (0/4) Epoch 24, batch 5600, loss[loss=0.2065, simple_loss=0.287, pruned_loss=0.06304, over 8481.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2862, pruned_loss=0.06108, over 1609905.79 frames. ], batch size: 48, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:29,646 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191545.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:30,939 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:38,448 INFO [train.py:901] (0/4) Epoch 24, batch 5650, loss[loss=0.2192, simple_loss=0.2941, pruned_loss=0.07211, over 7652.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.06108, over 1608922.50 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:45,404 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 07:26:49,860 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1475, 1.8439, 2.2754, 2.0063, 2.2876, 2.1552, 1.9662, 1.0756], + device='cuda:0'), covar=tensor([0.5271, 0.5104, 0.2167, 0.3648, 0.2443, 0.3167, 0.1982, 0.5403], + device='cuda:0'), in_proj_covar=tensor([0.0948, 0.0996, 0.0815, 0.0966, 0.1003, 0.0908, 0.0757, 0.0832], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:27:08,572 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.481e+02 2.901e+02 3.753e+02 9.237e+02, threshold=5.802e+02, percent-clipped=5.0 +2023-02-07 07:27:13,761 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7846, 2.2502, 3.7740, 1.6592, 2.9413, 2.3278, 1.8668, 2.7747], + device='cuda:0'), covar=tensor([0.1920, 0.2777, 0.0923, 0.4575, 0.1816, 0.3115, 0.2340, 0.2512], + device='cuda:0'), in_proj_covar=tensor([0.0532, 0.0617, 0.0556, 0.0656, 0.0653, 0.0600, 0.0548, 0.0637], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:27:14,234 INFO [train.py:901] (0/4) Epoch 24, batch 5700, loss[loss=0.2078, simple_loss=0.2965, pruned_loss=0.05951, over 8711.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2862, pruned_loss=0.06056, over 1614125.22 frames. ], batch size: 39, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:15,039 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:37,282 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 07:27:49,744 INFO [train.py:901] (0/4) Epoch 24, batch 5750, loss[loss=0.2052, simple_loss=0.2809, pruned_loss=0.06477, over 7644.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06055, over 1615424.56 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:52,742 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:53,269 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 07:28:19,019 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.394e+02 2.726e+02 3.524e+02 6.240e+02, threshold=5.452e+02, percent-clipped=4.0 +2023-02-07 07:28:25,093 INFO [train.py:901] (0/4) Epoch 24, batch 5800, loss[loss=0.1936, simple_loss=0.2709, pruned_loss=0.05818, over 7233.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2856, pruned_loss=0.0604, over 1616290.07 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:28:38,084 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:28:46,950 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2519, 2.4371, 2.0025, 2.9718, 1.4406, 1.8135, 2.1706, 2.2961], + device='cuda:0'), covar=tensor([0.0682, 0.0661, 0.0825, 0.0295, 0.0963, 0.1121, 0.0725, 0.0777], + device='cuda:0'), in_proj_covar=tensor([0.0235, 0.0199, 0.0246, 0.0216, 0.0206, 0.0247, 0.0253, 0.0209], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 07:28:59,633 INFO [train.py:901] (0/4) Epoch 24, batch 5850, loss[loss=0.2057, simple_loss=0.2947, pruned_loss=0.05834, over 8236.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2852, pruned_loss=0.06032, over 1613754.80 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:29,158 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.223e+02 2.821e+02 3.422e+02 9.012e+02, threshold=5.641e+02, percent-clipped=8.0 +2023-02-07 07:29:30,137 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:34,717 INFO [train.py:901] (0/4) Epoch 24, batch 5900, loss[loss=0.1868, simple_loss=0.2785, pruned_loss=0.04754, over 8036.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2858, pruned_loss=0.06019, over 1620010.15 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:45,873 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 07:29:48,205 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191826.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:56,045 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 07:29:59,334 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:10,957 INFO [train.py:901] (0/4) Epoch 24, batch 5950, loss[loss=0.2362, simple_loss=0.313, pruned_loss=0.07965, over 8705.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2833, pruned_loss=0.05906, over 1614056.75 frames. ], batch size: 34, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:16,050 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:33,606 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191890.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:34,659 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-07 07:30:40,226 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 2.338e+02 2.991e+02 3.628e+02 7.270e+02, threshold=5.982e+02, percent-clipped=3.0 +2023-02-07 07:30:45,682 INFO [train.py:901] (0/4) Epoch 24, batch 6000, loss[loss=0.2412, simple_loss=0.3147, pruned_loss=0.08381, over 7204.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.05951, over 1614776.65 frames. ], batch size: 71, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:45,682 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 07:31:01,024 INFO [train.py:935] (0/4) Epoch 24, validation: loss=0.1718, simple_loss=0.2718, pruned_loss=0.0359, over 944034.00 frames. +2023-02-07 07:31:01,026 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 07:31:08,198 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191918.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:13,481 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5078, 1.6381, 4.6952, 1.9845, 4.1909, 3.9157, 4.2996, 4.1472], + device='cuda:0'), covar=tensor([0.0583, 0.4870, 0.0502, 0.4147, 0.1059, 0.0969, 0.0558, 0.0637], + device='cuda:0'), in_proj_covar=tensor([0.0648, 0.0653, 0.0711, 0.0644, 0.0720, 0.0616, 0.0618, 0.0690], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:31:24,825 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:35,325 INFO [train.py:901] (0/4) Epoch 24, batch 6050, loss[loss=0.2156, simple_loss=0.3052, pruned_loss=0.06297, over 8077.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05936, over 1618171.86 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:31:46,660 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.1433, 2.0375, 6.2533, 2.3723, 5.7465, 5.2529, 5.8407, 5.6975], + device='cuda:0'), covar=tensor([0.0348, 0.4412, 0.0277, 0.3714, 0.0733, 0.0787, 0.0401, 0.0420], + device='cuda:0'), in_proj_covar=tensor([0.0646, 0.0652, 0.0709, 0.0641, 0.0718, 0.0615, 0.0618, 0.0688], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:32:04,602 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.415e+02 2.742e+02 3.441e+02 8.508e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 07:32:04,770 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-192000.pt +2023-02-07 07:32:11,925 INFO [train.py:901] (0/4) Epoch 24, batch 6100, loss[loss=0.1695, simple_loss=0.2546, pruned_loss=0.04223, over 7657.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.284, pruned_loss=0.05929, over 1611703.37 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:32:15,176 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 07:32:25,157 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7456, 1.4670, 3.1792, 1.1876, 2.2850, 3.5117, 3.8330, 2.5738], + device='cuda:0'), covar=tensor([0.1729, 0.2455, 0.0513, 0.3225, 0.1318, 0.0402, 0.0653, 0.1125], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0322, 0.0286, 0.0313, 0.0313, 0.0272, 0.0428, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:32:34,086 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 07:32:46,756 INFO [train.py:901] (0/4) Epoch 24, batch 6150, loss[loss=0.2107, simple_loss=0.2774, pruned_loss=0.07202, over 7225.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2838, pruned_loss=0.05928, over 1611488.16 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:15,568 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:16,714 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.367e+02 2.762e+02 3.348e+02 6.106e+02, threshold=5.524e+02, percent-clipped=2.0 +2023-02-07 07:33:20,331 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 07:33:22,018 INFO [train.py:901] (0/4) Epoch 24, batch 6200, loss[loss=0.2036, simple_loss=0.2831, pruned_loss=0.06203, over 7977.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2841, pruned_loss=0.05971, over 1613776.35 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:30,657 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192120.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:32,760 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192123.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:56,607 INFO [train.py:901] (0/4) Epoch 24, batch 6250, loss[loss=0.208, simple_loss=0.2867, pruned_loss=0.06467, over 8285.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05909, over 1615207.99 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:07,851 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:26,658 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.364e+02 2.949e+02 3.646e+02 8.976e+02, threshold=5.898e+02, percent-clipped=7.0 +2023-02-07 07:34:33,020 INFO [train.py:901] (0/4) Epoch 24, batch 6300, loss[loss=0.196, simple_loss=0.2664, pruned_loss=0.06277, over 7546.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2833, pruned_loss=0.05881, over 1611313.47 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:52,685 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192237.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:54,074 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:54,839 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7629, 1.7261, 2.6992, 2.0574, 2.4605, 1.7951, 1.5570, 1.2497], + device='cuda:0'), covar=tensor([0.7216, 0.6072, 0.2045, 0.3961, 0.2917, 0.4418, 0.3125, 0.5664], + device='cuda:0'), in_proj_covar=tensor([0.0949, 0.0998, 0.0817, 0.0970, 0.1005, 0.0910, 0.0760, 0.0833], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:35:07,361 INFO [train.py:901] (0/4) Epoch 24, batch 6350, loss[loss=0.2095, simple_loss=0.2933, pruned_loss=0.06285, over 8477.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2849, pruned_loss=0.05978, over 1614544.03 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:35:20,870 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 07:35:36,835 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.329e+02 2.896e+02 3.640e+02 5.459e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 07:35:43,003 INFO [train.py:901] (0/4) Epoch 24, batch 6400, loss[loss=0.223, simple_loss=0.3084, pruned_loss=0.06878, over 8286.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.285, pruned_loss=0.05999, over 1619850.35 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:35:58,398 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7297, 1.4487, 2.8605, 1.4088, 2.3070, 3.0888, 3.2146, 2.6678], + device='cuda:0'), covar=tensor([0.1074, 0.1542, 0.0343, 0.1953, 0.0768, 0.0291, 0.0650, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0297, 0.0323, 0.0286, 0.0315, 0.0314, 0.0272, 0.0430, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:36:19,118 INFO [train.py:901] (0/4) Epoch 24, batch 6450, loss[loss=0.1787, simple_loss=0.2402, pruned_loss=0.05863, over 7444.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2855, pruned_loss=0.06031, over 1619562.00 frames. ], batch size: 17, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:49,115 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.491e+02 2.965e+02 3.858e+02 7.678e+02, threshold=5.930e+02, percent-clipped=7.0 +2023-02-07 07:36:53,706 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 07:36:54,692 INFO [train.py:901] (0/4) Epoch 24, batch 6500, loss[loss=0.1552, simple_loss=0.2408, pruned_loss=0.03477, over 7654.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05951, over 1614353.37 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:56,119 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:00,677 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:24,989 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192451.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:29,457 INFO [train.py:901] (0/4) Epoch 24, batch 6550, loss[loss=0.2488, simple_loss=0.3169, pruned_loss=0.09034, over 6642.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2855, pruned_loss=0.06031, over 1611856.72 frames. ], batch size: 71, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:37:33,652 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:48,308 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 07:37:58,370 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.233e+02 2.734e+02 3.455e+02 6.558e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 07:38:03,874 INFO [train.py:901] (0/4) Epoch 24, batch 6600, loss[loss=0.22, simple_loss=0.3061, pruned_loss=0.06693, over 8616.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2845, pruned_loss=0.05982, over 1610728.53 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:08,128 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:38:10,832 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:18,041 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 07:38:38,394 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6120, 1.6995, 3.7703, 1.5441, 3.3533, 3.1532, 3.4142, 3.2964], + device='cuda:0'), covar=tensor([0.0740, 0.3818, 0.0731, 0.4032, 0.1206, 0.0979, 0.0668, 0.0741], + device='cuda:0'), in_proj_covar=tensor([0.0646, 0.0655, 0.0710, 0.0639, 0.0718, 0.0614, 0.0617, 0.0686], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:38:39,631 INFO [train.py:901] (0/4) Epoch 24, batch 6650, loss[loss=0.2133, simple_loss=0.2955, pruned_loss=0.06551, over 8337.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2846, pruned_loss=0.0597, over 1614109.68 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:53,940 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:55,127 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:56,512 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:08,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.365e+02 2.876e+02 3.746e+02 9.522e+02, threshold=5.752e+02, percent-clipped=3.0 +2023-02-07 07:39:14,186 INFO [train.py:901] (0/4) Epoch 24, batch 6700, loss[loss=0.1848, simple_loss=0.2823, pruned_loss=0.04364, over 8320.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2841, pruned_loss=0.0597, over 1614474.04 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:39:31,214 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:32,530 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:49,031 INFO [train.py:901] (0/4) Epoch 24, batch 6750, loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07319, over 7920.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2841, pruned_loss=0.05989, over 1608406.61 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:12,029 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192691.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:15,265 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192696.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:16,610 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:17,826 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.601e+02 3.163e+02 3.841e+02 9.507e+02, threshold=6.325e+02, percent-clipped=3.0 +2023-02-07 07:40:23,409 INFO [train.py:901] (0/4) Epoch 24, batch 6800, loss[loss=0.2284, simple_loss=0.2981, pruned_loss=0.07937, over 7556.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06106, over 1611805.59 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:24,836 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 07:40:40,562 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-07 07:40:56,186 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:56,807 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:59,526 INFO [train.py:901] (0/4) Epoch 24, batch 6850, loss[loss=0.1982, simple_loss=0.2921, pruned_loss=0.0522, over 8324.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2858, pruned_loss=0.0604, over 1614485.93 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:01,648 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:15,307 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 07:41:26,163 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:27,525 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3302, 1.4092, 4.1708, 2.0091, 2.4542, 4.8804, 5.0076, 4.2102], + device='cuda:0'), covar=tensor([0.1072, 0.2211, 0.0320, 0.1956, 0.1330, 0.0181, 0.0369, 0.0524], + device='cuda:0'), in_proj_covar=tensor([0.0298, 0.0325, 0.0287, 0.0315, 0.0315, 0.0273, 0.0431, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:41:29,431 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.230e+02 2.864e+02 3.611e+02 9.090e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-07 07:41:34,274 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 07:41:35,208 INFO [train.py:901] (0/4) Epoch 24, batch 6900, loss[loss=0.2752, simple_loss=0.3389, pruned_loss=0.1058, over 7229.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2856, pruned_loss=0.06053, over 1609468.86 frames. ], batch size: 71, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:48,734 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5147, 2.0107, 3.0785, 1.3952, 2.2960, 1.9984, 1.6900, 2.4344], + device='cuda:0'), covar=tensor([0.2129, 0.2711, 0.1025, 0.4946, 0.2113, 0.3336, 0.2582, 0.2407], + device='cuda:0'), in_proj_covar=tensor([0.0532, 0.0619, 0.0557, 0.0655, 0.0652, 0.0601, 0.0550, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:41:54,061 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:03,101 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192849.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:42:08,868 INFO [train.py:901] (0/4) Epoch 24, batch 6950, loss[loss=0.2116, simple_loss=0.2989, pruned_loss=0.06213, over 8531.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06017, over 1609819.04 frames. ], batch size: 28, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:10,319 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:17,130 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:21,962 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:23,178 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 07:42:30,036 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:37,505 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6172, 2.0175, 3.1696, 1.4944, 2.4593, 2.1260, 1.7240, 2.5109], + device='cuda:0'), covar=tensor([0.1958, 0.2730, 0.0957, 0.4747, 0.1918, 0.3214, 0.2436, 0.2240], + device='cuda:0'), in_proj_covar=tensor([0.0533, 0.0620, 0.0558, 0.0656, 0.0653, 0.0602, 0.0550, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:42:38,586 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.384e+02 2.955e+02 3.597e+02 9.319e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 07:42:44,708 INFO [train.py:901] (0/4) Epoch 24, batch 7000, loss[loss=0.2362, simple_loss=0.3176, pruned_loss=0.07737, over 8453.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05973, over 1608919.81 frames. ], batch size: 29, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:46,232 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:48,260 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:15,560 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:16,882 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:19,416 INFO [train.py:901] (0/4) Epoch 24, batch 7050, loss[loss=0.2368, simple_loss=0.322, pruned_loss=0.07579, over 8692.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.0603, over 1609925.56 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:43:30,606 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3287, 2.1294, 2.7273, 2.2710, 2.7138, 2.3714, 2.1639, 1.5370], + device='cuda:0'), covar=tensor([0.5360, 0.4903, 0.2034, 0.3694, 0.2636, 0.3071, 0.1902, 0.5209], + device='cuda:0'), in_proj_covar=tensor([0.0945, 0.0995, 0.0815, 0.0964, 0.1001, 0.0907, 0.0756, 0.0830], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:43:32,666 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:33,239 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:34,092 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:48,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.407e+02 3.090e+02 3.925e+02 9.689e+02, threshold=6.179e+02, percent-clipped=7.0 +2023-02-07 07:43:54,456 INFO [train.py:901] (0/4) Epoch 24, batch 7100, loss[loss=0.1768, simple_loss=0.2608, pruned_loss=0.04639, over 7802.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05941, over 1609201.15 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:14,194 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:29,512 INFO [train.py:901] (0/4) Epoch 24, batch 7150, loss[loss=0.1977, simple_loss=0.2676, pruned_loss=0.06385, over 7521.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.283, pruned_loss=0.05897, over 1603932.11 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:30,377 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193059.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:54,302 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:55,772 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7923, 1.6668, 2.6131, 2.0371, 2.3148, 1.7986, 1.5258, 1.1731], + device='cuda:0'), covar=tensor([0.8014, 0.7437, 0.2342, 0.4315, 0.3392, 0.5025, 0.3568, 0.6316], + device='cuda:0'), in_proj_covar=tensor([0.0948, 0.0997, 0.0817, 0.0965, 0.1006, 0.0910, 0.0759, 0.0832], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 07:44:56,926 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:58,925 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.272e+02 2.945e+02 3.915e+02 7.728e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-07 07:45:05,029 INFO [train.py:901] (0/4) Epoch 24, batch 7200, loss[loss=0.2321, simple_loss=0.3049, pruned_loss=0.07968, over 8656.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2844, pruned_loss=0.05938, over 1609571.69 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:45:16,877 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:21,521 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:22,767 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1999, 1.7234, 4.4615, 2.0728, 2.5699, 5.0433, 5.1235, 4.2304], + device='cuda:0'), covar=tensor([0.1346, 0.1967, 0.0309, 0.1998, 0.1139, 0.0214, 0.0540, 0.0649], + device='cuda:0'), in_proj_covar=tensor([0.0296, 0.0321, 0.0286, 0.0313, 0.0313, 0.0272, 0.0428, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:45:34,268 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,292 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:39,614 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:40,076 INFO [train.py:901] (0/4) Epoch 24, batch 7250, loss[loss=0.1962, simple_loss=0.2815, pruned_loss=0.05545, over 8554.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2838, pruned_loss=0.05925, over 1612805.86 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:45:45,711 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:03,183 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:04,338 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193193.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:46:08,901 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.389e+02 2.780e+02 3.377e+02 1.311e+03, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 07:46:14,404 INFO [train.py:901] (0/4) Epoch 24, batch 7300, loss[loss=0.1722, simple_loss=0.2481, pruned_loss=0.04815, over 7798.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2836, pruned_loss=0.059, over 1614519.01 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:17,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:29,138 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193229.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:41,117 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6343, 1.3721, 2.9053, 1.4849, 2.2377, 3.0757, 3.2172, 2.6577], + device='cuda:0'), covar=tensor([0.1148, 0.1716, 0.0364, 0.1940, 0.0889, 0.0315, 0.0578, 0.0565], + device='cuda:0'), in_proj_covar=tensor([0.0295, 0.0320, 0.0284, 0.0311, 0.0312, 0.0270, 0.0426, 0.0300], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:46:48,079 INFO [train.py:901] (0/4) Epoch 24, batch 7350, loss[loss=0.1938, simple_loss=0.2898, pruned_loss=0.04885, over 8497.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2824, pruned_loss=0.05885, over 1611238.20 frames. ], batch size: 29, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:11,633 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 07:47:17,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.477e+02 2.971e+02 3.853e+02 6.522e+02, threshold=5.942e+02, percent-clipped=4.0 +2023-02-07 07:47:19,304 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:23,205 INFO [train.py:901] (0/4) Epoch 24, batch 7400, loss[loss=0.1565, simple_loss=0.2453, pruned_loss=0.03382, over 7655.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2834, pruned_loss=0.05938, over 1608777.27 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:23,395 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193308.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:47:31,916 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 07:47:51,095 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5625, 1.9488, 3.0402, 1.4445, 2.2284, 2.0102, 1.6386, 2.3231], + device='cuda:0'), covar=tensor([0.1998, 0.2707, 0.0942, 0.4761, 0.2112, 0.3232, 0.2409, 0.2274], + device='cuda:0'), in_proj_covar=tensor([0.0528, 0.0616, 0.0555, 0.0649, 0.0648, 0.0597, 0.0545, 0.0635], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:47:52,375 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:58,367 INFO [train.py:901] (0/4) Epoch 24, batch 7450, loss[loss=0.2023, simple_loss=0.2998, pruned_loss=0.05241, over 8301.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05899, over 1609962.48 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:09,518 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 07:48:10,314 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:24,526 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1005, 2.2655, 1.8786, 2.8231, 1.2415, 1.7359, 2.0121, 2.2497], + device='cuda:0'), covar=tensor([0.0683, 0.0707, 0.0869, 0.0317, 0.1116, 0.1162, 0.0775, 0.0797], + device='cuda:0'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0214, 0.0206, 0.0248, 0.0252, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 07:48:28,444 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.340e+02 2.930e+02 4.048e+02 8.147e+02, threshold=5.861e+02, percent-clipped=5.0 +2023-02-07 07:48:30,628 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:32,862 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:34,038 INFO [train.py:901] (0/4) Epoch 24, batch 7500, loss[loss=0.1731, simple_loss=0.2666, pruned_loss=0.03981, over 8249.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2852, pruned_loss=0.0597, over 1613605.16 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:44,182 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.20 vs. limit=5.0 +2023-02-07 07:48:50,681 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:56,215 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:09,712 INFO [train.py:901] (0/4) Epoch 24, batch 7550, loss[loss=0.1952, simple_loss=0.282, pruned_loss=0.05415, over 8229.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.285, pruned_loss=0.05944, over 1616173.78 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:16,842 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:19,403 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193472.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:49:34,380 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:34,556 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 07:49:39,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.474e+02 3.046e+02 3.751e+02 6.843e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-07 07:49:45,271 INFO [train.py:901] (0/4) Epoch 24, batch 7600, loss[loss=0.217, simple_loss=0.3037, pruned_loss=0.06519, over 8554.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2842, pruned_loss=0.05942, over 1614487.90 frames. ], batch size: 31, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:51,992 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193518.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:19,354 INFO [train.py:901] (0/4) Epoch 24, batch 7650, loss[loss=0.2027, simple_loss=0.2866, pruned_loss=0.05938, over 8359.00 frames. ], tot_loss[loss=0.203, simple_loss=0.286, pruned_loss=0.05999, over 1618694.84 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:50:23,598 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193564.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:50:30,315 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:41,146 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193589.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:50:48,583 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.626e+02 3.196e+02 4.372e+02 7.437e+02, threshold=6.392e+02, percent-clipped=4.0 +2023-02-07 07:50:53,968 INFO [train.py:901] (0/4) Epoch 24, batch 7700, loss[loss=0.2207, simple_loss=0.2884, pruned_loss=0.07656, over 7982.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2871, pruned_loss=0.06062, over 1619589.11 frames. ], batch size: 21, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:11,794 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:14,901 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 07:51:20,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:29,067 INFO [train.py:901] (0/4) Epoch 24, batch 7750, loss[loss=0.1941, simple_loss=0.2742, pruned_loss=0.05703, over 7250.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2862, pruned_loss=0.06012, over 1616824.89 frames. ], batch size: 16, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:50,176 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193688.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:51,544 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9275, 1.7419, 2.5765, 1.6743, 1.4002, 2.4892, 0.4618, 1.6220], + device='cuda:0'), covar=tensor([0.1444, 0.1316, 0.0290, 0.1162, 0.2469, 0.0322, 0.2018, 0.1154], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0202, 0.0130, 0.0221, 0.0274, 0.0140, 0.0172, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 07:51:58,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.679e+02 3.147e+02 3.999e+02 8.742e+02, threshold=6.294e+02, percent-clipped=3.0 +2023-02-07 07:52:03,353 INFO [train.py:901] (0/4) Epoch 24, batch 7800, loss[loss=0.2573, simple_loss=0.3392, pruned_loss=0.08773, over 8129.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2856, pruned_loss=0.06003, over 1613457.07 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:52:10,492 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3741, 1.3634, 2.3238, 1.1966, 2.2573, 2.4614, 2.6314, 1.9917], + device='cuda:0'), covar=tensor([0.1290, 0.1522, 0.0507, 0.2337, 0.0744, 0.0484, 0.0814, 0.0899], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0322, 0.0287, 0.0316, 0.0315, 0.0272, 0.0429, 0.0302], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 07:52:37,266 INFO [train.py:901] (0/4) Epoch 24, batch 7850, loss[loss=0.1887, simple_loss=0.2673, pruned_loss=0.05508, over 7233.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2858, pruned_loss=0.06058, over 1609709.51 frames. ], batch size: 16, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:52:39,490 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:48,223 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:54,176 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,048 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193799.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,511 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.471e+02 2.801e+02 3.652e+02 8.352e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-07 07:53:10,835 INFO [train.py:901] (0/4) Epoch 24, batch 7900, loss[loss=0.2376, simple_loss=0.3117, pruned_loss=0.08178, over 7096.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.287, pruned_loss=0.06085, over 1613644.47 frames. ], batch size: 71, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:53:16,263 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193816.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:53:43,830 INFO [train.py:901] (0/4) Epoch 24, batch 7950, loss[loss=0.2147, simple_loss=0.2995, pruned_loss=0.06499, over 8589.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2867, pruned_loss=0.0606, over 1614150.21 frames. ], batch size: 31, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:11,276 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:12,511 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.494e+02 3.061e+02 3.521e+02 6.741e+02, threshold=6.122e+02, percent-clipped=2.0 +2023-02-07 07:54:13,962 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193902.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:17,820 INFO [train.py:901] (0/4) Epoch 24, batch 8000, loss[loss=0.1686, simple_loss=0.2463, pruned_loss=0.04539, over 7421.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2882, pruned_loss=0.06114, over 1613919.03 frames. ], batch size: 17, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:33,377 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193931.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:54:42,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:51,054 INFO [train.py:901] (0/4) Epoch 24, batch 8050, loss[loss=0.1917, simple_loss=0.2649, pruned_loss=0.05926, over 7439.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2851, pruned_loss=0.06049, over 1592752.82 frames. ], batch size: 17, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:58,121 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:03,435 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:13,098 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-24.pt +2023-02-07 07:55:25,187 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 07:55:28,445 INFO [train.py:901] (0/4) Epoch 25, batch 0, loss[loss=0.2769, simple_loss=0.3367, pruned_loss=0.1086, over 7815.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3367, pruned_loss=0.1086, over 7815.00 frames. ], batch size: 20, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:55:28,446 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 07:55:39,669 INFO [train.py:935] (0/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 07:55:39,670 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 07:55:46,485 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.577e+02 3.086e+02 3.975e+02 9.885e+02, threshold=6.172e+02, percent-clipped=3.0 +2023-02-07 07:55:46,615 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-194000.pt +2023-02-07 07:55:57,050 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 07:56:00,103 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:15,973 INFO [train.py:901] (0/4) Epoch 25, batch 50, loss[loss=0.2154, simple_loss=0.2813, pruned_loss=0.07476, over 5576.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2858, pruned_loss=0.0605, over 363480.64 frames. ], batch size: 12, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:17,571 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:32,512 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 07:56:51,125 INFO [train.py:901] (0/4) Epoch 25, batch 100, loss[loss=0.2202, simple_loss=0.3073, pruned_loss=0.06659, over 8295.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2901, pruned_loss=0.06273, over 647972.67 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:51,259 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:52,651 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:55,677 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 07:56:57,725 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.655e+02 3.251e+02 4.247e+02 7.218e+02, threshold=6.502e+02, percent-clipped=2.0 +2023-02-07 07:57:02,239 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 07:57:11,416 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1867, 1.0646, 1.2831, 1.0316, 0.9482, 1.3188, 0.0845, 1.0021], + device='cuda:0'), covar=tensor([0.1504, 0.1210, 0.0436, 0.0695, 0.2554, 0.0514, 0.1973, 0.1042], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0199, 0.0129, 0.0218, 0.0271, 0.0138, 0.0171, 0.0196], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 07:57:13,418 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7352, 1.8859, 1.5871, 2.3264, 0.9956, 1.4388, 1.6663, 1.8868], + device='cuda:0'), covar=tensor([0.0754, 0.0734, 0.0921, 0.0369, 0.1093, 0.1372, 0.0748, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0244, 0.0214, 0.0205, 0.0247, 0.0251, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 07:57:25,367 INFO [train.py:901] (0/4) Epoch 25, batch 150, loss[loss=0.2023, simple_loss=0.2951, pruned_loss=0.05475, over 8776.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2889, pruned_loss=0.06185, over 865086.86 frames. ], batch size: 30, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:57:35,091 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:43,933 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5368, 1.3464, 1.5938, 1.3311, 0.8656, 1.3492, 1.4296, 1.2536], + device='cuda:0'), covar=tensor([0.0662, 0.1294, 0.1618, 0.1482, 0.0655, 0.1535, 0.0781, 0.0705], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:57:52,148 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:58,201 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194187.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:00,657 INFO [train.py:901] (0/4) Epoch 25, batch 200, loss[loss=0.1907, simple_loss=0.2626, pruned_loss=0.05939, over 7419.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2872, pruned_loss=0.06126, over 1031067.38 frames. ], batch size: 17, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:04,983 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5253, 1.9083, 2.9653, 1.3980, 2.1699, 1.8768, 1.6312, 2.1744], + device='cuda:0'), covar=tensor([0.2025, 0.2702, 0.0849, 0.4762, 0.1968, 0.3460, 0.2521, 0.2283], + device='cuda:0'), in_proj_covar=tensor([0.0526, 0.0615, 0.0554, 0.0651, 0.0649, 0.0596, 0.0546, 0.0633], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 07:58:07,397 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.343e+02 2.842e+02 3.543e+02 5.999e+02, threshold=5.685e+02, percent-clipped=0.0 +2023-02-07 07:58:14,561 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4696, 1.3418, 1.7014, 1.2668, 0.9062, 1.4228, 1.4364, 1.2633], + device='cuda:0'), covar=tensor([0.0660, 0.1335, 0.1709, 0.1554, 0.0626, 0.1554, 0.0735, 0.0703], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0159, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 07:58:16,685 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194212.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:35,370 INFO [train.py:901] (0/4) Epoch 25, batch 250, loss[loss=0.2184, simple_loss=0.3021, pruned_loss=0.06735, over 8506.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2855, pruned_loss=0.05925, over 1164104.29 frames. ], batch size: 28, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:39,443 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194246.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:58:49,470 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 07:58:56,324 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6875, 2.0091, 2.1424, 1.3120, 2.2188, 1.5472, 0.6425, 1.8507], + device='cuda:0'), covar=tensor([0.0610, 0.0366, 0.0308, 0.0642, 0.0394, 0.0903, 0.0985, 0.0374], + device='cuda:0'), in_proj_covar=tensor([0.0461, 0.0400, 0.0356, 0.0452, 0.0386, 0.0539, 0.0398, 0.0431], + device='cuda:0'), out_proj_covar=tensor([1.2274e-04, 1.0453e-04, 9.3081e-05, 1.1866e-04, 1.0132e-04, 1.5105e-04, + 1.0688e-04, 1.1364e-04], device='cuda:0') +2023-02-07 07:58:58,133 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 07:59:09,575 INFO [train.py:901] (0/4) Epoch 25, batch 300, loss[loss=0.1739, simple_loss=0.2682, pruned_loss=0.03976, over 8338.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.05851, over 1267494.39 frames. ], batch size: 25, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:17,096 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.353e+02 2.857e+02 3.504e+02 7.851e+02, threshold=5.715e+02, percent-clipped=2.0 +2023-02-07 07:59:35,717 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 07:59:43,417 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:59:45,916 INFO [train.py:901] (0/4) Epoch 25, batch 350, loss[loss=0.2137, simple_loss=0.2999, pruned_loss=0.06377, over 8264.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2855, pruned_loss=0.05917, over 1346770.04 frames. ], batch size: 24, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:51,517 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:59:55,579 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8893, 2.0567, 4.0350, 2.2913, 3.6635, 3.4325, 3.7665, 3.6657], + device='cuda:0'), covar=tensor([0.0733, 0.3699, 0.0846, 0.3898, 0.1095, 0.0904, 0.0617, 0.0676], + device='cuda:0'), in_proj_covar=tensor([0.0645, 0.0655, 0.0710, 0.0646, 0.0717, 0.0612, 0.0617, 0.0692], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:00:00,424 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:08,024 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194371.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:09,421 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:14,123 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194380.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:00:20,644 INFO [train.py:901] (0/4) Epoch 25, batch 400, loss[loss=0.23, simple_loss=0.3129, pruned_loss=0.07355, over 8341.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05971, over 1403703.33 frames. ], batch size: 26, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:00:27,611 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.445e+02 3.013e+02 3.982e+02 8.525e+02, threshold=6.027e+02, percent-clipped=7.0 +2023-02-07 08:00:27,884 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7369, 1.7052, 5.8784, 2.1154, 5.2957, 4.9390, 5.4319, 5.2861], + device='cuda:0'), covar=tensor([0.0435, 0.4753, 0.0397, 0.4171, 0.0930, 0.0796, 0.0458, 0.0509], + device='cuda:0'), in_proj_covar=tensor([0.0645, 0.0655, 0.0709, 0.0644, 0.0715, 0.0611, 0.0616, 0.0690], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:00:52,190 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194434.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:56,957 INFO [train.py:901] (0/4) Epoch 25, batch 450, loss[loss=0.195, simple_loss=0.2789, pruned_loss=0.05557, over 8105.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2857, pruned_loss=0.05903, over 1454155.99 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:00:59,297 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8271, 1.8669, 1.6865, 2.3408, 0.9844, 1.6163, 1.6946, 1.9217], + device='cuda:0'), covar=tensor([0.0761, 0.0763, 0.0925, 0.0400, 0.1050, 0.1181, 0.0736, 0.0738], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0245, 0.0214, 0.0205, 0.0247, 0.0251, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:01:07,926 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4239, 2.6511, 3.0319, 1.8338, 3.3180, 2.1387, 1.6168, 2.3925], + device='cuda:0'), covar=tensor([0.0718, 0.0403, 0.0277, 0.0760, 0.0417, 0.0819, 0.0903, 0.0489], + device='cuda:0'), in_proj_covar=tensor([0.0464, 0.0404, 0.0358, 0.0454, 0.0390, 0.0541, 0.0401, 0.0435], + device='cuda:0'), out_proj_covar=tensor([1.2337e-04, 1.0538e-04, 9.3850e-05, 1.1925e-04, 1.0229e-04, 1.5171e-04, + 1.0760e-04, 1.1450e-04], device='cuda:0') +2023-02-07 08:01:30,916 INFO [train.py:901] (0/4) Epoch 25, batch 500, loss[loss=0.22, simple_loss=0.2893, pruned_loss=0.07532, over 7680.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2859, pruned_loss=0.0602, over 1488714.42 frames. ], batch size: 18, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:01:37,833 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.459e+02 3.156e+02 4.025e+02 7.800e+02, threshold=6.312e+02, percent-clipped=3.0 +2023-02-07 08:02:06,154 INFO [train.py:901] (0/4) Epoch 25, batch 550, loss[loss=0.1976, simple_loss=0.2917, pruned_loss=0.05181, over 8255.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2858, pruned_loss=0.06024, over 1518001.03 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:13,462 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:02:30,085 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0347, 1.4054, 1.5936, 1.4509, 1.0580, 1.3726, 1.9019, 1.9576], + device='cuda:0'), covar=tensor([0.0542, 0.1607, 0.2191, 0.1669, 0.0661, 0.1891, 0.0729, 0.0582], + device='cuda:0'), in_proj_covar=tensor([0.0097, 0.0152, 0.0188, 0.0159, 0.0100, 0.0162, 0.0111, 0.0144], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 08:02:42,118 INFO [train.py:901] (0/4) Epoch 25, batch 600, loss[loss=0.1737, simple_loss=0.2526, pruned_loss=0.04741, over 6773.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05911, over 1537307.25 frames. ], batch size: 15, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:48,728 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.361e+02 2.970e+02 3.663e+02 1.001e+03, threshold=5.941e+02, percent-clipped=3.0 +2023-02-07 08:03:01,134 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 08:03:01,346 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:16,772 INFO [train.py:901] (0/4) Epoch 25, batch 650, loss[loss=0.1986, simple_loss=0.2775, pruned_loss=0.05983, over 8290.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05868, over 1553489.09 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:03:18,060 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:25,629 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:45,798 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194680.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:52,552 INFO [train.py:901] (0/4) Epoch 25, batch 700, loss[loss=0.1816, simple_loss=0.2539, pruned_loss=0.05465, over 7538.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2844, pruned_loss=0.0588, over 1568060.45 frames. ], batch size: 18, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:00,045 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.448e+02 2.849e+02 3.638e+02 5.412e+02, threshold=5.698e+02, percent-clipped=0.0 +2023-02-07 08:04:09,639 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194715.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:04:16,389 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194724.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:04:27,392 INFO [train.py:901] (0/4) Epoch 25, batch 750, loss[loss=0.196, simple_loss=0.2838, pruned_loss=0.05411, over 8322.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2846, pruned_loss=0.05885, over 1582289.85 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:49,452 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 08:04:58,538 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 08:05:03,369 INFO [train.py:901] (0/4) Epoch 25, batch 800, loss[loss=0.1925, simple_loss=0.2694, pruned_loss=0.05776, over 8132.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05886, over 1587474.28 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:07,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:11,583 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 2.417e+02 2.991e+02 3.771e+02 6.788e+02, threshold=5.982e+02, percent-clipped=2.0 +2023-02-07 08:05:13,774 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1545, 2.2686, 1.9199, 2.7038, 1.3699, 1.8350, 2.0366, 2.4041], + device='cuda:0'), covar=tensor([0.0635, 0.0688, 0.0836, 0.0391, 0.1062, 0.1069, 0.0801, 0.0591], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0213, 0.0205, 0.0246, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:05:14,511 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194805.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,781 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:33,417 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.64 vs. limit=5.0 +2023-02-07 08:05:37,852 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194839.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:05:38,295 INFO [train.py:901] (0/4) Epoch 25, batch 850, loss[loss=0.2296, simple_loss=0.3067, pruned_loss=0.0762, over 8031.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2846, pruned_loss=0.0592, over 1596658.11 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:44,136 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4171, 1.4409, 1.4175, 1.8254, 0.7742, 1.3152, 1.4132, 1.5141], + device='cuda:0'), covar=tensor([0.0832, 0.0727, 0.0963, 0.0450, 0.1000, 0.1176, 0.0627, 0.0599], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0213, 0.0205, 0.0246, 0.0251, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:05:45,503 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2984, 2.6503, 2.0985, 3.8638, 1.5034, 1.9489, 2.3222, 2.8371], + device='cuda:0'), covar=tensor([0.0711, 0.0739, 0.0829, 0.0238, 0.1082, 0.1165, 0.0968, 0.0673], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0213, 0.0205, 0.0246, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:05:56,602 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:06:14,016 INFO [train.py:901] (0/4) Epoch 25, batch 900, loss[loss=0.2678, simple_loss=0.3461, pruned_loss=0.09472, over 8468.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2851, pruned_loss=0.05907, over 1601803.26 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:06:22,179 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.491e+02 2.923e+02 3.701e+02 8.623e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 08:06:28,887 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 08:06:44,145 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-07 08:06:49,715 INFO [train.py:901] (0/4) Epoch 25, batch 950, loss[loss=0.1847, simple_loss=0.2671, pruned_loss=0.05111, over 8083.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05977, over 1607339.35 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:13,450 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4921, 1.4729, 2.0519, 1.2788, 1.1025, 2.0287, 0.3427, 1.2775], + device='cuda:0'), covar=tensor([0.1723, 0.1372, 0.0366, 0.1163, 0.2829, 0.0417, 0.1977, 0.1316], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0199, 0.0129, 0.0219, 0.0270, 0.0137, 0.0170, 0.0194], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:07:17,973 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 08:07:19,514 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 08:07:24,289 INFO [train.py:901] (0/4) Epoch 25, batch 1000, loss[loss=0.1943, simple_loss=0.2737, pruned_loss=0.05746, over 7957.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2851, pruned_loss=0.05894, over 1612245.23 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:29,061 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:32,165 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.651e+02 3.101e+02 3.894e+02 6.477e+02, threshold=6.202e+02, percent-clipped=4.0 +2023-02-07 08:07:47,059 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9279, 1.4943, 3.4796, 1.4471, 2.3624, 3.7630, 3.9238, 3.2975], + device='cuda:0'), covar=tensor([0.1183, 0.1941, 0.0302, 0.2145, 0.1072, 0.0248, 0.0444, 0.0516], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0323, 0.0287, 0.0317, 0.0315, 0.0274, 0.0430, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 08:07:51,774 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195029.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:54,443 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 08:07:59,734 INFO [train.py:901] (0/4) Epoch 25, batch 1050, loss[loss=0.1881, simple_loss=0.2767, pruned_loss=0.04977, over 8283.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.285, pruned_loss=0.05885, over 1612700.49 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:06,388 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 08:08:07,151 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195051.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:14,480 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195062.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:08:23,800 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195076.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:31,306 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,293 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,843 INFO [train.py:901] (0/4) Epoch 25, batch 1100, loss[loss=0.1753, simple_loss=0.2575, pruned_loss=0.04659, over 8075.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.285, pruned_loss=0.0591, over 1613385.10 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:37,357 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195095.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:08:41,229 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.550e+02 3.152e+02 4.111e+02 6.650e+02, threshold=6.304e+02, percent-clipped=3.0 +2023-02-07 08:08:48,157 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:48,180 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:54,919 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195120.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:09:09,218 INFO [train.py:901] (0/4) Epoch 25, batch 1150, loss[loss=0.1892, simple_loss=0.2687, pruned_loss=0.05489, over 7648.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.05926, over 1610530.43 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:11,484 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8705, 2.2999, 3.7307, 1.9423, 1.8941, 3.7359, 0.6316, 2.3034], + device='cuda:0'), covar=tensor([0.1434, 0.1262, 0.0248, 0.1736, 0.2351, 0.0250, 0.2102, 0.1252], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0199, 0.0129, 0.0220, 0.0271, 0.0138, 0.0170, 0.0195], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:09:16,871 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 08:09:43,693 INFO [train.py:901] (0/4) Epoch 25, batch 1200, loss[loss=0.1651, simple_loss=0.2402, pruned_loss=0.04499, over 7213.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.0592, over 1603153.74 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:51,821 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.229e+02 2.843e+02 3.492e+02 1.399e+03, threshold=5.685e+02, percent-clipped=2.0 +2023-02-07 08:09:57,121 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:10:18,382 INFO [train.py:901] (0/4) Epoch 25, batch 1250, loss[loss=0.1781, simple_loss=0.2781, pruned_loss=0.03902, over 8033.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2847, pruned_loss=0.05946, over 1608720.57 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:10:20,610 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9965, 2.2790, 1.8330, 2.7779, 1.2892, 1.6672, 2.0438, 2.2149], + device='cuda:0'), covar=tensor([0.0662, 0.0634, 0.0873, 0.0336, 0.1066, 0.1215, 0.0734, 0.0729], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0246, 0.0214, 0.0206, 0.0247, 0.0251, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:10:53,105 INFO [train.py:901] (0/4) Epoch 25, batch 1300, loss[loss=0.217, simple_loss=0.3043, pruned_loss=0.06489, over 8466.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2844, pruned_loss=0.05899, over 1610616.70 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:00,277 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.452e+02 2.850e+02 4.025e+02 1.071e+03, threshold=5.700e+02, percent-clipped=7.0 +2023-02-07 08:11:16,288 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:26,821 INFO [train.py:901] (0/4) Epoch 25, batch 1350, loss[loss=0.2291, simple_loss=0.3026, pruned_loss=0.07783, over 7769.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.0593, over 1614374.45 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:42,194 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3631, 2.1332, 3.4775, 2.1021, 2.8546, 3.9191, 3.8833, 3.3856], + device='cuda:0'), covar=tensor([0.1038, 0.1545, 0.0534, 0.1789, 0.1287, 0.0246, 0.0659, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0300, 0.0324, 0.0289, 0.0318, 0.0316, 0.0275, 0.0432, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 08:11:45,667 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:50,114 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:02,400 INFO [train.py:901] (0/4) Epoch 25, batch 1400, loss[loss=0.1963, simple_loss=0.2905, pruned_loss=0.051, over 8439.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2847, pruned_loss=0.05975, over 1615392.49 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:03,971 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:10,478 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.570e+02 2.915e+02 3.833e+02 8.465e+02, threshold=5.831e+02, percent-clipped=6.0 +2023-02-07 08:12:13,196 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195406.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:12:15,756 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:30,621 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:36,126 INFO [train.py:901] (0/4) Epoch 25, batch 1450, loss[loss=0.2409, simple_loss=0.3205, pruned_loss=0.08061, over 8231.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2854, pruned_loss=0.06071, over 1612520.33 frames. ], batch size: 48, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:40,059 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-07 08:12:44,146 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 08:13:10,158 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195488.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:13:11,409 INFO [train.py:901] (0/4) Epoch 25, batch 1500, loss[loss=0.1821, simple_loss=0.2569, pruned_loss=0.0536, over 7442.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2858, pruned_loss=0.06044, over 1613557.38 frames. ], batch size: 17, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:18,980 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 08:13:19,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.401e+02 3.375e+02 4.255e+02 1.024e+03, threshold=6.749e+02, percent-clipped=12.0 +2023-02-07 08:13:33,935 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195521.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:13:46,243 INFO [train.py:901] (0/4) Epoch 25, batch 1550, loss[loss=0.1657, simple_loss=0.2531, pruned_loss=0.03917, over 7801.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2848, pruned_loss=0.05998, over 1612022.02 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:51,896 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:05,612 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0921, 1.8770, 2.3238, 1.9961, 2.2074, 2.1605, 1.9929, 1.1568], + device='cuda:0'), covar=tensor([0.5405, 0.4680, 0.2034, 0.3326, 0.2366, 0.3090, 0.2029, 0.5009], + device='cuda:0'), in_proj_covar=tensor([0.0950, 0.1005, 0.0823, 0.0972, 0.1014, 0.0914, 0.0762, 0.0838], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:14:14,121 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:21,350 INFO [train.py:901] (0/4) Epoch 25, batch 1600, loss[loss=0.1842, simple_loss=0.2639, pruned_loss=0.0523, over 7934.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2847, pruned_loss=0.05971, over 1613372.01 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:14:22,241 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8604, 2.1537, 1.7877, 2.5920, 1.3125, 1.6231, 1.9871, 2.0738], + device='cuda:0'), covar=tensor([0.0738, 0.0669, 0.0932, 0.0356, 0.1019, 0.1259, 0.0729, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0214, 0.0206, 0.0246, 0.0251, 0.0208], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:14:29,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.317e+02 3.105e+02 3.813e+02 7.132e+02, threshold=6.211e+02, percent-clipped=3.0 +2023-02-07 08:14:32,310 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:55,970 INFO [train.py:901] (0/4) Epoch 25, batch 1650, loss[loss=0.2113, simple_loss=0.2716, pruned_loss=0.07554, over 7426.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.05964, over 1615718.39 frames. ], batch size: 17, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:29,751 INFO [train.py:901] (0/4) Epoch 25, batch 1700, loss[loss=0.2079, simple_loss=0.2851, pruned_loss=0.06532, over 7803.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05971, over 1618641.36 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:35,724 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-07 08:15:38,031 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.590e+02 3.116e+02 3.996e+02 7.880e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-07 08:15:52,243 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 08:16:05,355 INFO [train.py:901] (0/4) Epoch 25, batch 1750, loss[loss=0.2048, simple_loss=0.2857, pruned_loss=0.06194, over 8352.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2857, pruned_loss=0.05986, over 1621226.48 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:16:06,413 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1424, 1.8979, 2.4655, 2.0045, 2.5199, 2.2075, 2.0275, 1.3081], + device='cuda:0'), covar=tensor([0.5840, 0.4968, 0.2100, 0.3863, 0.2450, 0.2998, 0.1903, 0.5401], + device='cuda:0'), in_proj_covar=tensor([0.0953, 0.1008, 0.0825, 0.0976, 0.1017, 0.0917, 0.0764, 0.0841], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:16:09,124 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:09,890 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.58 vs. limit=5.0 +2023-02-07 08:16:15,710 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:26,033 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195769.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:31,535 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195777.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:16:36,280 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9422, 1.8600, 2.8995, 2.1617, 2.5591, 2.0147, 1.7359, 1.4709], + device='cuda:0'), covar=tensor([0.6788, 0.5936, 0.1820, 0.3982, 0.3082, 0.4113, 0.2898, 0.5405], + device='cuda:0'), in_proj_covar=tensor([0.0954, 0.1008, 0.0825, 0.0977, 0.1016, 0.0917, 0.0764, 0.0841], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:16:40,101 INFO [train.py:901] (0/4) Epoch 25, batch 1800, loss[loss=0.1683, simple_loss=0.2515, pruned_loss=0.04254, over 7253.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2854, pruned_loss=0.05979, over 1621592.45 frames. ], batch size: 16, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:16:48,985 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.456e+02 2.857e+02 3.484e+02 7.816e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-07 08:16:49,208 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195802.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:16:50,558 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:56,738 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.72 vs. limit=5.0 +2023-02-07 08:17:07,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:08,883 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 08:17:15,238 INFO [train.py:901] (0/4) Epoch 25, batch 1850, loss[loss=0.2097, simple_loss=0.2921, pruned_loss=0.06367, over 8562.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05975, over 1618472.81 frames. ], batch size: 49, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:36,411 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:50,156 INFO [train.py:901] (0/4) Epoch 25, batch 1900, loss[loss=0.1911, simple_loss=0.2815, pruned_loss=0.05033, over 8466.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2851, pruned_loss=0.05971, over 1619025.82 frames. ], batch size: 27, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:58,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.686e+02 3.045e+02 3.689e+02 8.196e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-07 08:18:20,510 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5383, 1.4781, 1.8424, 1.1906, 1.2011, 1.8536, 0.2032, 1.1526], + device='cuda:0'), covar=tensor([0.1571, 0.1220, 0.0378, 0.0951, 0.2481, 0.0409, 0.2013, 0.1217], + device='cuda:0'), in_proj_covar=tensor([0.0194, 0.0200, 0.0130, 0.0219, 0.0271, 0.0139, 0.0171, 0.0196], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:18:24,462 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 08:18:25,126 INFO [train.py:901] (0/4) Epoch 25, batch 1950, loss[loss=0.2067, simple_loss=0.2848, pruned_loss=0.06428, over 8129.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2856, pruned_loss=0.06001, over 1617195.32 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:18:37,850 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 08:18:57,284 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 08:19:00,593 INFO [train.py:901] (0/4) Epoch 25, batch 2000, loss[loss=0.1936, simple_loss=0.2587, pruned_loss=0.06424, over 7563.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2859, pruned_loss=0.06048, over 1612807.69 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:19:07,497 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-196000.pt +2023-02-07 08:19:09,751 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.344e+02 2.823e+02 3.287e+02 7.423e+02, threshold=5.646e+02, percent-clipped=4.0 +2023-02-07 08:19:35,699 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1172, 1.9698, 2.5793, 2.1475, 2.5768, 2.1582, 2.0268, 1.3987], + device='cuda:0'), covar=tensor([0.5881, 0.5010, 0.2044, 0.3725, 0.2475, 0.3206, 0.2074, 0.5538], + device='cuda:0'), in_proj_covar=tensor([0.0949, 0.1001, 0.0819, 0.0971, 0.1010, 0.0911, 0.0759, 0.0835], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:19:36,115 INFO [train.py:901] (0/4) Epoch 25, batch 2050, loss[loss=0.2096, simple_loss=0.3003, pruned_loss=0.05947, over 8747.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2853, pruned_loss=0.05982, over 1615701.32 frames. ], batch size: 30, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:11,106 INFO [train.py:901] (0/4) Epoch 25, batch 2100, loss[loss=0.1957, simple_loss=0.2852, pruned_loss=0.05307, over 7811.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.0603, over 1618256.38 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:20,386 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.403e+02 2.946e+02 3.659e+02 8.101e+02, threshold=5.892e+02, percent-clipped=3.0 +2023-02-07 08:20:35,879 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=196125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:20:46,046 INFO [train.py:901] (0/4) Epoch 25, batch 2150, loss[loss=0.2206, simple_loss=0.305, pruned_loss=0.0681, over 8626.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2861, pruned_loss=0.0606, over 1618016.49 frames. ], batch size: 34, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:54,019 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=196150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:21:22,081 INFO [train.py:901] (0/4) Epoch 25, batch 2200, loss[loss=0.197, simple_loss=0.2853, pruned_loss=0.05439, over 8288.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2872, pruned_loss=0.06092, over 1619368.53 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:21:30,653 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.555e+02 3.213e+02 4.289e+02 6.887e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-07 08:21:56,962 INFO [train.py:901] (0/4) Epoch 25, batch 2250, loss[loss=0.2196, simple_loss=0.3108, pruned_loss=0.06417, over 8244.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2852, pruned_loss=0.05989, over 1614367.05 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:32,050 INFO [train.py:901] (0/4) Epoch 25, batch 2300, loss[loss=0.1753, simple_loss=0.2546, pruned_loss=0.04804, over 7410.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05967, over 1614456.14 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:40,957 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.794e+02 3.530e+02 9.865e+02, threshold=5.587e+02, percent-clipped=2.0 +2023-02-07 08:23:07,216 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:07,741 INFO [train.py:901] (0/4) Epoch 25, batch 2350, loss[loss=0.2296, simple_loss=0.3146, pruned_loss=0.07233, over 8315.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05993, over 1613274.23 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:34,279 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:42,080 INFO [train.py:901] (0/4) Epoch 25, batch 2400, loss[loss=0.1672, simple_loss=0.2475, pruned_loss=0.04348, over 7796.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2852, pruned_loss=0.05977, over 1613936.93 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:50,280 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.902e+02 3.432e+02 7.434e+02, threshold=5.805e+02, percent-clipped=2.0 +2023-02-07 08:24:17,356 INFO [train.py:901] (0/4) Epoch 25, batch 2450, loss[loss=0.1707, simple_loss=0.2632, pruned_loss=0.0391, over 8235.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2857, pruned_loss=0.05974, over 1614037.17 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:24:51,934 INFO [train.py:901] (0/4) Epoch 25, batch 2500, loss[loss=0.2036, simple_loss=0.2837, pruned_loss=0.06175, over 8241.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2855, pruned_loss=0.05976, over 1615037.49 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:00,802 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.398e+02 2.858e+02 3.242e+02 5.404e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 08:25:27,002 INFO [train.py:901] (0/4) Epoch 25, batch 2550, loss[loss=0.1802, simple_loss=0.2567, pruned_loss=0.05183, over 7924.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05979, over 1613165.04 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:33,106 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4640, 2.7200, 2.2727, 4.0839, 1.5663, 1.9593, 2.4573, 2.9285], + device='cuda:0'), covar=tensor([0.0707, 0.0831, 0.0872, 0.0246, 0.1063, 0.1254, 0.0928, 0.0756], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0205, 0.0247, 0.0249, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:26:02,151 INFO [train.py:901] (0/4) Epoch 25, batch 2600, loss[loss=0.1822, simple_loss=0.2541, pruned_loss=0.05516, over 7427.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.05995, over 1610460.88 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:06,407 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:26:10,247 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.366e+02 2.911e+02 3.287e+02 8.101e+02, threshold=5.822e+02, percent-clipped=1.0 +2023-02-07 08:26:18,993 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-07 08:26:37,066 INFO [train.py:901] (0/4) Epoch 25, batch 2650, loss[loss=0.1598, simple_loss=0.2462, pruned_loss=0.03667, over 8087.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.05963, over 1608695.18 frames. ], batch size: 21, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:56,396 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9911, 1.4523, 3.3621, 1.4697, 2.3861, 3.6876, 3.7819, 3.1874], + device='cuda:0'), covar=tensor([0.1216, 0.1972, 0.0337, 0.2274, 0.1139, 0.0252, 0.0576, 0.0520], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0325, 0.0291, 0.0320, 0.0318, 0.0276, 0.0435, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 08:26:58,181 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 08:27:03,507 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6826, 2.3521, 4.0262, 1.5915, 2.9747, 2.2728, 1.8539, 2.8596], + device='cuda:0'), covar=tensor([0.1951, 0.2659, 0.0697, 0.4724, 0.1941, 0.3325, 0.2330, 0.2367], + device='cuda:0'), in_proj_covar=tensor([0.0532, 0.0619, 0.0555, 0.0657, 0.0653, 0.0601, 0.0548, 0.0636], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:27:08,208 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:12,710 INFO [train.py:901] (0/4) Epoch 25, batch 2700, loss[loss=0.3188, simple_loss=0.3594, pruned_loss=0.1391, over 7326.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05978, over 1613540.38 frames. ], batch size: 72, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:20,586 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.452e+02 2.909e+02 3.648e+02 8.771e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-07 08:27:22,823 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2080, 1.0571, 1.2620, 1.0564, 1.0261, 1.2900, 0.0955, 0.9604], + device='cuda:0'), covar=tensor([0.1480, 0.1320, 0.0482, 0.0681, 0.2448, 0.0549, 0.1929, 0.1183], + device='cuda:0'), in_proj_covar=tensor([0.0195, 0.0201, 0.0131, 0.0220, 0.0272, 0.0140, 0.0171, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:27:33,978 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:40,540 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 08:27:46,998 INFO [train.py:901] (0/4) Epoch 25, batch 2750, loss[loss=0.2275, simple_loss=0.3067, pruned_loss=0.07413, over 8544.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2854, pruned_loss=0.05938, over 1613139.88 frames. ], batch size: 34, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:28:22,174 INFO [train.py:901] (0/4) Epoch 25, batch 2800, loss[loss=0.2179, simple_loss=0.3029, pruned_loss=0.06647, over 8463.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05934, over 1609580.88 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:28:27,861 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:28,540 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:31,141 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.445e+02 2.946e+02 3.604e+02 6.151e+02, threshold=5.892e+02, percent-clipped=2.0 +2023-02-07 08:28:40,900 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3712, 3.7213, 2.3461, 3.1312, 2.9798, 2.1173, 3.0059, 3.1986], + device='cuda:0'), covar=tensor([0.1559, 0.0305, 0.1108, 0.0672, 0.0697, 0.1456, 0.1029, 0.0947], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0234, 0.0337, 0.0309, 0.0299, 0.0342, 0.0347, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 08:28:54,947 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:56,679 INFO [train.py:901] (0/4) Epoch 25, batch 2850, loss[loss=0.2616, simple_loss=0.3393, pruned_loss=0.09195, over 8688.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05931, over 1605377.68 frames. ], batch size: 49, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:19,391 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:32,393 INFO [train.py:901] (0/4) Epoch 25, batch 2900, loss[loss=0.1874, simple_loss=0.2629, pruned_loss=0.05594, over 7526.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05926, over 1604087.66 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:39,419 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:41,311 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.504e+02 3.053e+02 3.742e+02 6.617e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-07 08:29:49,198 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1535, 1.3063, 4.3309, 1.6078, 3.8319, 3.5860, 3.9298, 3.7757], + device='cuda:0'), covar=tensor([0.0600, 0.5032, 0.0632, 0.4348, 0.1265, 0.1050, 0.0600, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0651, 0.0655, 0.0716, 0.0645, 0.0726, 0.0618, 0.0623, 0.0696], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:30:08,121 INFO [train.py:901] (0/4) Epoch 25, batch 2950, loss[loss=0.1735, simple_loss=0.2653, pruned_loss=0.04088, over 8323.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2831, pruned_loss=0.05891, over 1605203.88 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:08,201 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:30:08,827 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 08:30:16,154 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9965, 6.1083, 5.2143, 2.7013, 5.4248, 5.7900, 5.5470, 5.5254], + device='cuda:0'), covar=tensor([0.0572, 0.0342, 0.0891, 0.4321, 0.0754, 0.0704, 0.1073, 0.0459], + device='cuda:0'), in_proj_covar=tensor([0.0533, 0.0450, 0.0437, 0.0545, 0.0435, 0.0454, 0.0429, 0.0400], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:30:37,361 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7933, 1.7111, 2.5354, 1.4908, 1.3949, 2.4674, 0.3935, 1.4290], + device='cuda:0'), covar=tensor([0.1411, 0.1146, 0.0272, 0.1257, 0.2242, 0.0386, 0.2030, 0.1349], + device='cuda:0'), in_proj_covar=tensor([0.0193, 0.0200, 0.0131, 0.0220, 0.0271, 0.0139, 0.0170, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:30:42,478 INFO [train.py:901] (0/4) Epoch 25, batch 3000, loss[loss=0.2209, simple_loss=0.2823, pruned_loss=0.07974, over 7248.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2828, pruned_loss=0.05919, over 1601671.32 frames. ], batch size: 16, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:42,478 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 08:30:55,639 INFO [train.py:935] (0/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2721, pruned_loss=0.03618, over 944034.00 frames. +2023-02-07 08:30:55,640 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 08:31:03,960 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 2.477e+02 2.955e+02 3.925e+02 7.788e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 08:31:12,606 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6100, 2.5668, 1.5900, 2.3697, 2.2208, 1.3895, 2.0900, 2.2734], + device='cuda:0'), covar=tensor([0.1546, 0.0533, 0.1543, 0.0673, 0.0851, 0.2072, 0.1195, 0.1006], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0237, 0.0339, 0.0311, 0.0301, 0.0344, 0.0349, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 08:31:21,841 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 08:31:30,698 INFO [train.py:901] (0/4) Epoch 25, batch 3050, loss[loss=0.1618, simple_loss=0.2487, pruned_loss=0.03746, over 7811.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2829, pruned_loss=0.05928, over 1604324.08 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:31:40,512 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197054.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:41,146 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:54,610 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 08:31:57,855 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197079.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:05,201 INFO [train.py:901] (0/4) Epoch 25, batch 3100, loss[loss=0.2193, simple_loss=0.3012, pruned_loss=0.0687, over 8097.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2842, pruned_loss=0.06006, over 1601599.30 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:07,466 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:13,235 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.425e+02 3.089e+02 3.818e+02 7.102e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-07 08:32:13,693 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 08:32:24,947 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:40,196 INFO [train.py:901] (0/4) Epoch 25, batch 3150, loss[loss=0.1917, simple_loss=0.2814, pruned_loss=0.05107, over 8523.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.283, pruned_loss=0.05933, over 1603742.76 frames. ], batch size: 31, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:40,969 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:47,802 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 08:32:53,744 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3439, 2.7909, 2.3004, 3.9310, 1.5015, 2.0105, 2.3878, 2.9780], + device='cuda:0'), covar=tensor([0.0740, 0.0735, 0.0777, 0.0249, 0.1099, 0.1195, 0.0995, 0.0751], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0197, 0.0245, 0.0213, 0.0206, 0.0247, 0.0251, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:33:15,297 INFO [train.py:901] (0/4) Epoch 25, batch 3200, loss[loss=0.1971, simple_loss=0.2853, pruned_loss=0.05447, over 8323.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2831, pruned_loss=0.05932, over 1605584.35 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:23,539 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.400e+02 2.739e+02 3.315e+02 1.024e+03, threshold=5.479e+02, percent-clipped=5.0 +2023-02-07 08:33:33,166 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:50,301 INFO [train.py:901] (0/4) Epoch 25, batch 3250, loss[loss=0.2353, simple_loss=0.322, pruned_loss=0.0743, over 8295.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.283, pruned_loss=0.05926, over 1602898.90 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:52,457 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:02,152 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:25,383 INFO [train.py:901] (0/4) Epoch 25, batch 3300, loss[loss=0.2086, simple_loss=0.2958, pruned_loss=0.06074, over 8524.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2835, pruned_loss=0.05922, over 1606634.49 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:34:34,250 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.560e+02 3.230e+02 4.212e+02 8.703e+02, threshold=6.460e+02, percent-clipped=10.0 +2023-02-07 08:34:40,485 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:41,135 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6384, 1.9251, 1.9925, 1.3508, 2.0912, 1.4831, 0.7344, 1.8708], + device='cuda:0'), covar=tensor([0.0906, 0.0467, 0.0427, 0.0886, 0.0704, 0.1142, 0.1163, 0.0512], + device='cuda:0'), in_proj_covar=tensor([0.0467, 0.0403, 0.0362, 0.0457, 0.0387, 0.0544, 0.0401, 0.0433], + device='cuda:0'), out_proj_covar=tensor([1.2421e-04, 1.0507e-04, 9.4827e-05, 1.1986e-04, 1.0140e-04, 1.5233e-04, + 1.0755e-04, 1.1386e-04], device='cuda:0') +2023-02-07 08:34:54,230 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:57,740 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:00,245 INFO [train.py:901] (0/4) Epoch 25, batch 3350, loss[loss=0.2101, simple_loss=0.2868, pruned_loss=0.06669, over 8328.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2839, pruned_loss=0.05966, over 1608012.74 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:35:13,330 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:36,174 INFO [train.py:901] (0/4) Epoch 25, batch 3400, loss[loss=0.1887, simple_loss=0.2704, pruned_loss=0.05344, over 8244.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2846, pruned_loss=0.05982, over 1612915.85 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:35:44,268 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.489e+02 3.044e+02 3.734e+02 7.163e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 08:36:06,293 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197433.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:36:11,060 INFO [train.py:901] (0/4) Epoch 25, batch 3450, loss[loss=0.2286, simple_loss=0.3194, pruned_loss=0.0689, over 8325.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.286, pruned_loss=0.06051, over 1615655.29 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:46,261 INFO [train.py:901] (0/4) Epoch 25, batch 3500, loss[loss=0.2401, simple_loss=0.3177, pruned_loss=0.08129, over 8411.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06134, over 1616700.04 frames. ], batch size: 49, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:48,493 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0042, 1.6155, 1.4529, 1.4957, 1.3405, 1.3189, 1.2555, 1.2768], + device='cuda:0'), covar=tensor([0.1259, 0.0508, 0.1354, 0.0629, 0.0779, 0.1597, 0.1106, 0.0878], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0237, 0.0341, 0.0312, 0.0301, 0.0345, 0.0351, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 08:36:54,913 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.387e+02 2.953e+02 3.537e+02 5.869e+02, threshold=5.907e+02, percent-clipped=0.0 +2023-02-07 08:37:02,029 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:07,274 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 08:37:17,727 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:19,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:21,599 INFO [train.py:901] (0/4) Epoch 25, batch 3550, loss[loss=0.2473, simple_loss=0.3239, pruned_loss=0.08531, over 8348.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2876, pruned_loss=0.0611, over 1616506.69 frames. ], batch size: 24, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:37:31,951 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.2608, 1.9027, 5.4324, 2.5087, 4.9021, 4.5606, 4.9998, 4.8840], + device='cuda:0'), covar=tensor([0.0495, 0.4639, 0.0450, 0.3894, 0.1002, 0.0965, 0.0523, 0.0581], + device='cuda:0'), in_proj_covar=tensor([0.0651, 0.0655, 0.0713, 0.0646, 0.0727, 0.0619, 0.0622, 0.0695], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:37:37,370 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197563.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:54,484 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:56,262 INFO [train.py:901] (0/4) Epoch 25, batch 3600, loss[loss=0.1836, simple_loss=0.2682, pruned_loss=0.04952, over 8248.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2869, pruned_loss=0.0607, over 1615627.64 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:05,202 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.311e+02 2.881e+02 3.803e+02 6.346e+02, threshold=5.762e+02, percent-clipped=1.0 +2023-02-07 08:38:12,126 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197612.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:13,530 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,211 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,670 INFO [train.py:901] (0/4) Epoch 25, batch 3650, loss[loss=0.235, simple_loss=0.3277, pruned_loss=0.07117, over 8605.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2865, pruned_loss=0.06032, over 1616788.54 frames. ], batch size: 31, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:39:06,718 INFO [train.py:901] (0/4) Epoch 25, batch 3700, loss[loss=0.1895, simple_loss=0.2797, pruned_loss=0.04968, over 8484.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06012, over 1614460.34 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:39:09,552 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 08:39:15,746 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.484e+02 2.942e+02 3.783e+02 7.174e+02, threshold=5.884e+02, percent-clipped=5.0 +2023-02-07 08:39:43,097 INFO [train.py:901] (0/4) Epoch 25, batch 3750, loss[loss=0.1819, simple_loss=0.2652, pruned_loss=0.04931, over 7805.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06011, over 1613041.08 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:09,408 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197777.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:40:13,697 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 08:40:18,201 INFO [train.py:901] (0/4) Epoch 25, batch 3800, loss[loss=0.2362, simple_loss=0.322, pruned_loss=0.07517, over 8504.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2869, pruned_loss=0.06071, over 1615491.75 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:26,494 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.549e+02 3.044e+02 3.681e+02 9.424e+02, threshold=6.087e+02, percent-clipped=5.0 +2023-02-07 08:40:53,469 INFO [train.py:901] (0/4) Epoch 25, batch 3850, loss[loss=0.2098, simple_loss=0.2963, pruned_loss=0.06162, over 8133.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2872, pruned_loss=0.0607, over 1617203.40 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:41:12,912 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 08:41:19,583 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:28,518 INFO [train.py:901] (0/4) Epoch 25, batch 3900, loss[loss=0.1843, simple_loss=0.2764, pruned_loss=0.04615, over 8142.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2851, pruned_loss=0.05928, over 1619015.46 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:41:29,948 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197892.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:41:36,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.445e+02 2.982e+02 3.609e+02 8.629e+02, threshold=5.963e+02, percent-clipped=3.0 +2023-02-07 08:41:39,816 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197907.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:41,927 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:50,299 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 08:42:02,719 INFO [train.py:901] (0/4) Epoch 25, batch 3950, loss[loss=0.2053, simple_loss=0.2951, pruned_loss=0.05773, over 8486.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05893, over 1617499.13 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:26,595 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-07 08:42:37,828 INFO [train.py:901] (0/4) Epoch 25, batch 4000, loss[loss=0.2009, simple_loss=0.2797, pruned_loss=0.06107, over 7912.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05799, over 1615138.86 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:40,157 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:45,677 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-198000.pt +2023-02-07 08:42:47,782 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.312e+02 2.768e+02 3.562e+02 7.475e+02, threshold=5.536e+02, percent-clipped=2.0 +2023-02-07 08:43:00,833 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.9402, 2.0458, 4.1026, 2.2409, 3.7459, 3.4820, 3.7976, 3.7087], + device='cuda:0'), covar=tensor([0.0664, 0.3745, 0.0807, 0.3825, 0.0900, 0.0903, 0.0608, 0.0602], + device='cuda:0'), in_proj_covar=tensor([0.0651, 0.0656, 0.0718, 0.0645, 0.0726, 0.0621, 0.0624, 0.0696], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:43:01,554 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198022.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:07,334 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5447, 1.7831, 1.8726, 1.6828, 0.9834, 1.7082, 2.1819, 1.7910], + device='cuda:0'), covar=tensor([0.0492, 0.1182, 0.1707, 0.1387, 0.0597, 0.1445, 0.0628, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 08:43:14,007 INFO [train.py:901] (0/4) Epoch 25, batch 4050, loss[loss=0.1994, simple_loss=0.2875, pruned_loss=0.05572, over 8244.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2828, pruned_loss=0.05801, over 1616058.17 frames. ], batch size: 24, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:48,800 INFO [train.py:901] (0/4) Epoch 25, batch 4100, loss[loss=0.1709, simple_loss=0.2745, pruned_loss=0.03369, over 8033.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2831, pruned_loss=0.05764, over 1617798.66 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:55,121 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198099.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:57,001 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.413e+02 2.876e+02 3.434e+02 5.292e+02, threshold=5.752e+02, percent-clipped=1.0 +2023-02-07 08:44:24,280 INFO [train.py:901] (0/4) Epoch 25, batch 4150, loss[loss=0.2067, simple_loss=0.2896, pruned_loss=0.06192, over 8132.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2836, pruned_loss=0.05778, over 1617511.25 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:44:29,932 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198148.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:44:47,413 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198173.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:44:58,967 INFO [train.py:901] (0/4) Epoch 25, batch 4200, loss[loss=0.1733, simple_loss=0.2618, pruned_loss=0.04234, over 8188.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05809, over 1617246.19 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:08,035 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.351e+02 3.091e+02 3.845e+02 7.201e+02, threshold=6.182e+02, percent-clipped=4.0 +2023-02-07 08:45:09,406 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 08:45:33,174 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 08:45:35,185 INFO [train.py:901] (0/4) Epoch 25, batch 4250, loss[loss=0.1919, simple_loss=0.2646, pruned_loss=0.05965, over 7800.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2851, pruned_loss=0.05849, over 1617452.09 frames. ], batch size: 19, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:41,594 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:44,716 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:59,326 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:02,103 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198278.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:09,935 INFO [train.py:901] (0/4) Epoch 25, batch 4300, loss[loss=0.163, simple_loss=0.248, pruned_loss=0.03907, over 8085.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2848, pruned_loss=0.05869, over 1613057.62 frames. ], batch size: 21, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:46:18,877 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.314e+02 2.735e+02 3.533e+02 6.805e+02, threshold=5.471e+02, percent-clipped=1.0 +2023-02-07 08:46:19,845 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:45,628 INFO [train.py:901] (0/4) Epoch 25, batch 4350, loss[loss=0.156, simple_loss=0.2284, pruned_loss=0.04185, over 6820.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2832, pruned_loss=0.05761, over 1613359.51 frames. ], batch size: 15, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:47:04,265 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 08:47:06,468 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:47:21,572 INFO [train.py:901] (0/4) Epoch 25, batch 4400, loss[loss=0.2168, simple_loss=0.3001, pruned_loss=0.0667, over 8335.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.05788, over 1612863.49 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:29,521 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.496e+02 2.935e+02 3.768e+02 7.665e+02, threshold=5.870e+02, percent-clipped=6.0 +2023-02-07 08:47:45,274 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 08:47:56,744 INFO [train.py:901] (0/4) Epoch 25, batch 4450, loss[loss=0.2231, simple_loss=0.2993, pruned_loss=0.0734, over 8185.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2843, pruned_loss=0.05858, over 1613372.12 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:58,932 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:48:01,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1375, 1.9072, 2.3515, 2.0690, 2.2976, 2.1975, 2.0173, 1.1700], + device='cuda:0'), covar=tensor([0.5915, 0.4637, 0.1982, 0.3617, 0.2542, 0.3209, 0.1919, 0.5118], + device='cuda:0'), in_proj_covar=tensor([0.0945, 0.0998, 0.0816, 0.0969, 0.1010, 0.0911, 0.0757, 0.0835], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:48:31,963 INFO [train.py:901] (0/4) Epoch 25, batch 4500, loss[loss=0.223, simple_loss=0.2896, pruned_loss=0.07823, over 7923.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05936, over 1611010.17 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:48:40,439 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.274e+02 2.771e+02 3.541e+02 5.802e+02, threshold=5.543e+02, percent-clipped=0.0 +2023-02-07 08:48:40,470 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 08:48:51,900 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:06,134 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8541, 2.3790, 4.0608, 1.7569, 2.9536, 2.3752, 2.0152, 2.9751], + device='cuda:0'), covar=tensor([0.1922, 0.2806, 0.0949, 0.4716, 0.2118, 0.3275, 0.2439, 0.2437], + device='cuda:0'), in_proj_covar=tensor([0.0533, 0.0622, 0.0557, 0.0658, 0.0657, 0.0605, 0.0552, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:49:08,830 INFO [train.py:901] (0/4) Epoch 25, batch 4550, loss[loss=0.1921, simple_loss=0.2782, pruned_loss=0.05298, over 8240.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2826, pruned_loss=0.05884, over 1607525.61 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:22,064 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:24,449 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 08:49:44,786 INFO [train.py:901] (0/4) Epoch 25, batch 4600, loss[loss=0.1743, simple_loss=0.2596, pruned_loss=0.04455, over 7719.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2844, pruned_loss=0.06005, over 1612040.41 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:52,976 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.449e+02 2.940e+02 3.432e+02 8.422e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-07 08:50:09,321 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198625.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:14,653 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:19,157 INFO [train.py:901] (0/4) Epoch 25, batch 4650, loss[loss=0.1844, simple_loss=0.2801, pruned_loss=0.04434, over 8452.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.284, pruned_loss=0.05958, over 1615112.78 frames. ], batch size: 27, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:50:26,829 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:54,514 INFO [train.py:901] (0/4) Epoch 25, batch 4700, loss[loss=0.1589, simple_loss=0.2489, pruned_loss=0.03441, over 7656.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2843, pruned_loss=0.05929, over 1617496.65 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:50:58,177 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2413, 1.0894, 1.2880, 1.0389, 1.0081, 1.3274, 0.0578, 0.9550], + device='cuda:0'), covar=tensor([0.1418, 0.1209, 0.0535, 0.0676, 0.2427, 0.0517, 0.1993, 0.1276], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0222, 0.0275, 0.0141, 0.0172, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:51:03,372 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.164e+02 2.735e+02 3.323e+02 7.623e+02, threshold=5.470e+02, percent-clipped=2.0 +2023-02-07 08:51:12,343 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5809, 1.5063, 1.7991, 1.4472, 0.8925, 1.5829, 2.1113, 1.9601], + device='cuda:0'), covar=tensor([0.0513, 0.1396, 0.1711, 0.1574, 0.0681, 0.1619, 0.0685, 0.0623], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 08:51:17,651 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2714, 2.4730, 1.9960, 2.9905, 1.4129, 1.7868, 2.2625, 2.4367], + device='cuda:0'), covar=tensor([0.0661, 0.0732, 0.0866, 0.0326, 0.1092, 0.1203, 0.0782, 0.0686], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0245, 0.0212, 0.0203, 0.0246, 0.0247, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 08:51:29,737 INFO [train.py:901] (0/4) Epoch 25, batch 4750, loss[loss=0.2148, simple_loss=0.288, pruned_loss=0.07077, over 7686.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2848, pruned_loss=0.05949, over 1620149.39 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:42,027 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 08:51:45,378 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 08:52:05,184 INFO [train.py:901] (0/4) Epoch 25, batch 4800, loss[loss=0.2051, simple_loss=0.2929, pruned_loss=0.05865, over 8475.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05826, over 1619228.89 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:13,383 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 2.392e+02 2.917e+02 3.409e+02 6.169e+02, threshold=5.835e+02, percent-clipped=3.0 +2023-02-07 08:52:22,025 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:36,111 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 08:52:39,633 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:40,103 INFO [train.py:901] (0/4) Epoch 25, batch 4850, loss[loss=0.1906, simple_loss=0.2616, pruned_loss=0.05977, over 7796.00 frames. ], tot_loss[loss=0.199, simple_loss=0.282, pruned_loss=0.05797, over 1613092.00 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:54,171 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3202, 2.1133, 1.6612, 1.9576, 1.7416, 1.3331, 1.7429, 1.7319], + device='cuda:0'), covar=tensor([0.1352, 0.0460, 0.1329, 0.0538, 0.0820, 0.1735, 0.0927, 0.0938], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0234, 0.0338, 0.0310, 0.0299, 0.0342, 0.0344, 0.0317], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 08:52:55,418 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:01,725 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198870.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:14,818 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6555, 2.6294, 1.8792, 2.3510, 2.0755, 1.5067, 2.0715, 2.1949], + device='cuda:0'), covar=tensor([0.1299, 0.0399, 0.1246, 0.0516, 0.0771, 0.1692, 0.1002, 0.0847], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0235, 0.0340, 0.0311, 0.0301, 0.0343, 0.0346, 0.0319], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 08:53:15,878 INFO [train.py:901] (0/4) Epoch 25, batch 4900, loss[loss=0.2034, simple_loss=0.2872, pruned_loss=0.05985, over 8472.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2818, pruned_loss=0.05844, over 1612414.75 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:22,929 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8848, 1.8128, 2.4115, 1.5951, 1.4391, 2.4550, 0.3445, 1.5476], + device='cuda:0'), covar=tensor([0.1830, 0.1291, 0.0432, 0.1098, 0.2447, 0.0423, 0.2074, 0.1171], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0201, 0.0132, 0.0221, 0.0274, 0.0141, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:53:24,147 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:24,667 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.376e+02 2.954e+02 3.660e+02 6.336e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 08:53:50,031 INFO [train.py:901] (0/4) Epoch 25, batch 4950, loss[loss=0.2363, simple_loss=0.3177, pruned_loss=0.07742, over 8194.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2823, pruned_loss=0.05815, over 1616520.77 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:54,413 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:01,873 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9192, 1.7491, 2.5551, 1.7344, 1.4456, 2.5413, 0.4601, 1.6237], + device='cuda:0'), covar=tensor([0.1415, 0.1141, 0.0284, 0.1009, 0.2385, 0.0319, 0.2015, 0.1366], + device='cuda:0'), in_proj_covar=tensor([0.0195, 0.0201, 0.0132, 0.0220, 0.0274, 0.0141, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 08:54:15,936 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:16,526 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:25,251 INFO [train.py:901] (0/4) Epoch 25, batch 5000, loss[loss=0.2224, simple_loss=0.302, pruned_loss=0.0714, over 8334.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2827, pruned_loss=0.05805, over 1614046.64 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:54:33,925 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 2.360e+02 2.883e+02 3.509e+02 6.136e+02, threshold=5.766e+02, percent-clipped=1.0 +2023-02-07 08:54:59,850 INFO [train.py:901] (0/4) Epoch 25, batch 5050, loss[loss=0.2057, simple_loss=0.2908, pruned_loss=0.06029, over 8250.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05802, over 1611507.02 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:02,109 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1661, 1.6038, 1.7355, 1.5577, 1.1362, 1.5995, 1.9046, 1.6923], + device='cuda:0'), covar=tensor([0.0567, 0.1255, 0.1685, 0.1447, 0.0631, 0.1456, 0.0708, 0.0632], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0163, 0.0113, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 08:55:14,358 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 08:55:35,768 INFO [train.py:901] (0/4) Epoch 25, batch 5100, loss[loss=0.2238, simple_loss=0.323, pruned_loss=0.06232, over 8112.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05832, over 1611102.14 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:37,391 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:55:44,128 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.470e+02 3.005e+02 3.768e+02 7.063e+02, threshold=6.010e+02, percent-clipped=5.0 +2023-02-07 08:56:11,855 INFO [train.py:901] (0/4) Epoch 25, batch 5150, loss[loss=0.1791, simple_loss=0.265, pruned_loss=0.04662, over 7796.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2834, pruned_loss=0.05878, over 1616318.81 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:47,047 INFO [train.py:901] (0/4) Epoch 25, batch 5200, loss[loss=0.21, simple_loss=0.2972, pruned_loss=0.06144, over 8243.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2847, pruned_loss=0.05925, over 1616188.77 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:49,909 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:56:55,025 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 2.381e+02 2.894e+02 3.514e+02 1.206e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 08:57:04,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:05,109 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 08:57:12,750 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 08:57:17,170 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:22,321 INFO [train.py:901] (0/4) Epoch 25, batch 5250, loss[loss=0.2078, simple_loss=0.307, pruned_loss=0.05431, over 8530.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06005, over 1612965.42 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:57:25,795 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:29,645 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 08:57:34,066 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2144, 4.1657, 3.7500, 1.9250, 3.7220, 3.8690, 3.7557, 3.7225], + device='cuda:0'), covar=tensor([0.0812, 0.0591, 0.1101, 0.4369, 0.0916, 0.1174, 0.1374, 0.0802], + device='cuda:0'), in_proj_covar=tensor([0.0542, 0.0456, 0.0442, 0.0555, 0.0439, 0.0460, 0.0432, 0.0404], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 08:57:34,818 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:56,824 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:57,409 INFO [train.py:901] (0/4) Epoch 25, batch 5300, loss[loss=0.2012, simple_loss=0.2931, pruned_loss=0.05466, over 8342.00 frames. ], tot_loss[loss=0.204, simple_loss=0.287, pruned_loss=0.06047, over 1618544.07 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:05,706 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.313e+02 2.718e+02 3.488e+02 6.386e+02, threshold=5.437e+02, percent-clipped=3.0 +2023-02-07 08:58:25,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:32,735 INFO [train.py:901] (0/4) Epoch 25, batch 5350, loss[loss=0.2529, simple_loss=0.3352, pruned_loss=0.08529, over 8335.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2854, pruned_loss=0.05948, over 1618656.29 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:38,526 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:47,315 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 08:58:47,609 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199360.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:57,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:08,720 INFO [train.py:901] (0/4) Epoch 25, batch 5400, loss[loss=0.2067, simple_loss=0.286, pruned_loss=0.06374, over 8314.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2849, pruned_loss=0.05972, over 1614726.73 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 32.0 +2023-02-07 08:59:18,148 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 2.292e+02 2.858e+02 3.757e+02 5.815e+02, threshold=5.716e+02, percent-clipped=3.0 +2023-02-07 08:59:18,346 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:28,341 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:41,399 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3574, 2.0557, 2.6926, 2.2482, 2.7684, 2.4008, 2.2067, 1.5064], + device='cuda:0'), covar=tensor([0.5746, 0.5312, 0.2059, 0.3875, 0.2454, 0.3133, 0.1867, 0.5626], + device='cuda:0'), in_proj_covar=tensor([0.0949, 0.1000, 0.0817, 0.0971, 0.1012, 0.0915, 0.0759, 0.0836], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 08:59:43,192 INFO [train.py:901] (0/4) Epoch 25, batch 5450, loss[loss=0.184, simple_loss=0.2597, pruned_loss=0.05416, over 7549.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05874, over 1610420.28 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:08,077 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 09:00:08,202 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199476.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:14,523 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.44 vs. limit=5.0 +2023-02-07 09:00:17,966 INFO [train.py:901] (0/4) Epoch 25, batch 5500, loss[loss=0.2303, simple_loss=0.3026, pruned_loss=0.07905, over 8604.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2829, pruned_loss=0.05833, over 1612437.06 frames. ], batch size: 31, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:28,255 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.278e+02 2.767e+02 3.622e+02 8.817e+02, threshold=5.534e+02, percent-clipped=3.0 +2023-02-07 09:00:33,794 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199512.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:00:40,195 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 09:00:52,114 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:53,324 INFO [train.py:901] (0/4) Epoch 25, batch 5550, loss[loss=0.2265, simple_loss=0.3117, pruned_loss=0.07068, over 8529.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05804, over 1606565.85 frames. ], batch size: 31, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:02,087 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199553.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:01:24,432 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:27,625 INFO [train.py:901] (0/4) Epoch 25, batch 5600, loss[loss=0.2151, simple_loss=0.296, pruned_loss=0.06711, over 8651.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2835, pruned_loss=0.05878, over 1607315.16 frames. ], batch size: 39, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:38,052 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.538e+02 3.116e+02 4.016e+02 1.228e+03, threshold=6.232e+02, percent-clipped=11.0 +2023-02-07 09:01:43,145 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:47,345 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:03,340 INFO [train.py:901] (0/4) Epoch 25, batch 5650, loss[loss=0.1999, simple_loss=0.2874, pruned_loss=0.05622, over 8143.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05906, over 1610556.35 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:04,208 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:13,519 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:14,004 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 09:02:18,307 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:36,563 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199685.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:39,923 INFO [train.py:901] (0/4) Epoch 25, batch 5700, loss[loss=0.1663, simple_loss=0.2445, pruned_loss=0.04405, over 7937.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2829, pruned_loss=0.05813, over 1614784.08 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:49,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.200e+02 2.647e+02 3.419e+02 7.306e+02, threshold=5.294e+02, percent-clipped=3.0 +2023-02-07 09:03:16,064 INFO [train.py:901] (0/4) Epoch 25, batch 5750, loss[loss=0.1876, simple_loss=0.2715, pruned_loss=0.05191, over 7981.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2807, pruned_loss=0.05721, over 1609993.21 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:03:21,576 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 09:03:30,821 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:03:50,502 INFO [train.py:901] (0/4) Epoch 25, batch 5800, loss[loss=0.2193, simple_loss=0.3096, pruned_loss=0.06457, over 8669.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2817, pruned_loss=0.05779, over 1610938.23 frames. ], batch size: 39, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:00,801 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.348e+02 2.869e+02 3.742e+02 6.332e+02, threshold=5.738e+02, percent-clipped=6.0 +2023-02-07 09:04:11,724 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199820.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:04:26,580 INFO [train.py:901] (0/4) Epoch 25, batch 5850, loss[loss=0.1885, simple_loss=0.2823, pruned_loss=0.04742, over 8176.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2815, pruned_loss=0.05738, over 1614911.74 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:37,359 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199856.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:04:51,709 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199877.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:04:52,571 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 09:05:01,001 INFO [train.py:901] (0/4) Epoch 25, batch 5900, loss[loss=0.2821, simple_loss=0.3413, pruned_loss=0.1115, over 8470.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2823, pruned_loss=0.05762, over 1617932.45 frames. ], batch size: 28, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:05,785 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199897.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:05:10,359 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.352e+02 2.828e+02 3.481e+02 7.421e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-07 09:05:13,991 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:26,436 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:31,326 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:32,028 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:35,262 INFO [train.py:901] (0/4) Epoch 25, batch 5950, loss[loss=0.1709, simple_loss=0.2502, pruned_loss=0.04582, over 7285.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05758, over 1612903.65 frames. ], batch size: 16, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:58,231 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199971.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:06:11,073 INFO [train.py:901] (0/4) Epoch 25, batch 6000, loss[loss=0.1796, simple_loss=0.2681, pruned_loss=0.04553, over 8248.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.281, pruned_loss=0.05744, over 1610203.82 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:06:11,074 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 09:06:23,700 INFO [train.py:935] (0/4) Epoch 25, validation: loss=0.1725, simple_loss=0.2721, pruned_loss=0.03643, over 944034.00 frames. +2023-02-07 09:06:23,701 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 09:06:30,903 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-200000.pt +2023-02-07 09:06:34,584 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.373e+02 2.952e+02 3.581e+02 7.260e+02, threshold=5.903e+02, percent-clipped=4.0 +2023-02-07 09:06:37,407 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8010, 5.8972, 5.0266, 2.7156, 5.1310, 5.6254, 5.3468, 5.3110], + device='cuda:0'), covar=tensor([0.0510, 0.0342, 0.0885, 0.3815, 0.0825, 0.0797, 0.1017, 0.0578], + device='cuda:0'), in_proj_covar=tensor([0.0536, 0.0451, 0.0437, 0.0548, 0.0435, 0.0453, 0.0430, 0.0400], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:06:40,199 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200012.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:06:49,049 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5759, 2.9293, 2.4721, 4.0893, 1.7453, 2.1336, 2.7306, 3.0467], + device='cuda:0'), covar=tensor([0.0667, 0.0719, 0.0760, 0.0207, 0.1074, 0.1236, 0.0808, 0.0754], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0194, 0.0244, 0.0211, 0.0205, 0.0247, 0.0248, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:06:59,670 INFO [train.py:901] (0/4) Epoch 25, batch 6050, loss[loss=0.2081, simple_loss=0.2939, pruned_loss=0.06115, over 8476.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2819, pruned_loss=0.05772, over 1612140.71 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:35,041 INFO [train.py:901] (0/4) Epoch 25, batch 6100, loss[loss=0.2165, simple_loss=0.3052, pruned_loss=0.0639, over 8483.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05815, over 1612313.12 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:45,362 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.345e+02 2.959e+02 3.596e+02 7.197e+02, threshold=5.919e+02, percent-clipped=3.0 +2023-02-07 09:07:54,362 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 09:08:05,858 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200133.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:07,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0257, 1.7276, 6.2592, 2.3868, 5.6518, 5.2179, 5.7614, 5.6428], + device='cuda:0'), covar=tensor([0.0493, 0.4710, 0.0309, 0.3823, 0.0969, 0.0850, 0.0452, 0.0496], + device='cuda:0'), in_proj_covar=tensor([0.0659, 0.0657, 0.0725, 0.0648, 0.0733, 0.0623, 0.0625, 0.0700], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:08:11,247 INFO [train.py:901] (0/4) Epoch 25, batch 6150, loss[loss=0.2245, simple_loss=0.293, pruned_loss=0.07799, over 7429.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2829, pruned_loss=0.05837, over 1611616.78 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:23,399 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:46,124 INFO [train.py:901] (0/4) Epoch 25, batch 6200, loss[loss=0.1847, simple_loss=0.2567, pruned_loss=0.05639, over 7655.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2834, pruned_loss=0.05872, over 1610108.58 frames. ], batch size: 19, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:47,048 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:49,549 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200195.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:08:55,698 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.314e+02 2.821e+02 3.535e+02 6.331e+02, threshold=5.643e+02, percent-clipped=2.0 +2023-02-07 09:09:04,314 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:12,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1151, 1.8935, 2.4928, 2.0899, 2.5382, 2.2271, 2.0411, 1.3070], + device='cuda:0'), covar=tensor([0.5940, 0.4791, 0.2092, 0.3998, 0.2547, 0.3177, 0.2007, 0.5570], + device='cuda:0'), in_proj_covar=tensor([0.0956, 0.1005, 0.0822, 0.0978, 0.1017, 0.0917, 0.0766, 0.0840], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 09:09:13,086 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200227.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:21,660 INFO [train.py:901] (0/4) Epoch 25, batch 6250, loss[loss=0.2595, simple_loss=0.3215, pruned_loss=0.09882, over 6668.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2831, pruned_loss=0.05869, over 1608574.46 frames. ], batch size: 71, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:29,289 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-07 09:09:29,782 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200252.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:41,361 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200268.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:09:43,274 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:56,026 INFO [train.py:901] (0/4) Epoch 25, batch 6300, loss[loss=0.206, simple_loss=0.2857, pruned_loss=0.06319, over 8083.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05853, over 1606398.49 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:58,159 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200293.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:10:00,769 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:10:06,125 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.537e+02 3.046e+02 4.211e+02 7.306e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 09:10:31,232 INFO [train.py:901] (0/4) Epoch 25, batch 6350, loss[loss=0.1804, simple_loss=0.2775, pruned_loss=0.04167, over 8360.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.05856, over 1604105.08 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:03,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:11:06,377 INFO [train.py:901] (0/4) Epoch 25, batch 6400, loss[loss=0.1905, simple_loss=0.2891, pruned_loss=0.04591, over 8345.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2824, pruned_loss=0.05851, over 1607731.49 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:15,859 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.247e+02 2.600e+02 3.696e+02 8.014e+02, threshold=5.200e+02, percent-clipped=2.0 +2023-02-07 09:11:40,859 INFO [train.py:901] (0/4) Epoch 25, batch 6450, loss[loss=0.1723, simple_loss=0.2753, pruned_loss=0.03469, over 8503.00 frames. ], tot_loss[loss=0.2, simple_loss=0.283, pruned_loss=0.0585, over 1609353.86 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:00,417 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 09:12:16,064 INFO [train.py:901] (0/4) Epoch 25, batch 6500, loss[loss=0.1606, simple_loss=0.2495, pruned_loss=0.03589, over 7671.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2828, pruned_loss=0.05803, over 1611282.77 frames. ], batch size: 19, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:26,035 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.181e+02 2.613e+02 3.190e+02 4.719e+02, threshold=5.226e+02, percent-clipped=0.0 +2023-02-07 09:12:49,387 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200539.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:12:49,943 INFO [train.py:901] (0/4) Epoch 25, batch 6550, loss[loss=0.182, simple_loss=0.262, pruned_loss=0.051, over 8077.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05769, over 1611811.27 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:09,851 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 09:13:26,022 INFO [train.py:901] (0/4) Epoch 25, batch 6600, loss[loss=0.2056, simple_loss=0.2821, pruned_loss=0.06456, over 8138.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05742, over 1616481.51 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:27,811 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-07 09:13:30,820 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 09:13:35,550 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.321e+02 2.722e+02 3.541e+02 8.507e+02, threshold=5.445e+02, percent-clipped=6.0 +2023-02-07 09:14:00,778 INFO [train.py:901] (0/4) Epoch 25, batch 6650, loss[loss=0.1897, simple_loss=0.2772, pruned_loss=0.05112, over 8471.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05697, over 1614734.63 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:01,585 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:02,419 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:10,426 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200654.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:14:16,373 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200663.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:19,897 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200667.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:35,347 INFO [train.py:901] (0/4) Epoch 25, batch 6700, loss[loss=0.2235, simple_loss=0.3074, pruned_loss=0.06982, over 7159.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2811, pruned_loss=0.05694, over 1610633.76 frames. ], batch size: 72, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:45,632 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.443e+02 2.859e+02 3.397e+02 5.440e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 09:15:06,977 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9023, 1.5676, 3.4446, 1.5186, 2.5813, 3.8129, 3.8676, 3.3028], + device='cuda:0'), covar=tensor([0.1233, 0.1776, 0.0294, 0.1960, 0.0968, 0.0222, 0.0434, 0.0508], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0324, 0.0288, 0.0318, 0.0318, 0.0275, 0.0434, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 09:15:10,910 INFO [train.py:901] (0/4) Epoch 25, batch 6750, loss[loss=0.1823, simple_loss=0.2689, pruned_loss=0.04785, over 7810.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2816, pruned_loss=0.05713, over 1612268.28 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:22,767 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:15:47,015 INFO [train.py:901] (0/4) Epoch 25, batch 6800, loss[loss=0.2255, simple_loss=0.3082, pruned_loss=0.07145, over 7936.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.05709, over 1613190.87 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:51,865 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 09:15:56,781 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.322e+02 2.853e+02 3.502e+02 6.162e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-07 09:16:21,869 INFO [train.py:901] (0/4) Epoch 25, batch 6850, loss[loss=0.152, simple_loss=0.2313, pruned_loss=0.03632, over 7684.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2811, pruned_loss=0.05719, over 1611605.47 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:16:40,400 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1709, 1.5013, 3.5314, 1.6210, 2.4958, 3.9395, 4.0372, 3.3549], + device='cuda:0'), covar=tensor([0.1049, 0.1904, 0.0258, 0.1982, 0.1076, 0.0199, 0.0426, 0.0525], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0324, 0.0288, 0.0318, 0.0317, 0.0274, 0.0433, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 09:16:40,956 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 09:16:56,592 INFO [train.py:901] (0/4) Epoch 25, batch 6900, loss[loss=0.1696, simple_loss=0.239, pruned_loss=0.05013, over 7536.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2808, pruned_loss=0.05693, over 1614162.00 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:03,070 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 09:17:06,816 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.244e+02 2.770e+02 3.533e+02 6.127e+02, threshold=5.541e+02, percent-clipped=2.0 +2023-02-07 09:17:11,227 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200910.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:17:28,172 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200935.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:17:31,355 INFO [train.py:901] (0/4) Epoch 25, batch 6950, loss[loss=0.2344, simple_loss=0.3138, pruned_loss=0.07745, over 8081.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.05711, over 1607724.95 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:34,436 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 09:17:36,186 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7580, 1.7085, 2.2935, 1.5072, 1.3668, 2.2292, 0.3930, 1.4485], + device='cuda:0'), covar=tensor([0.1552, 0.1100, 0.0317, 0.0987, 0.2386, 0.0483, 0.2029, 0.1233], + device='cuda:0'), in_proj_covar=tensor([0.0194, 0.0199, 0.0130, 0.0220, 0.0273, 0.0140, 0.0170, 0.0196], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:17:50,976 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 09:18:07,799 INFO [train.py:901] (0/4) Epoch 25, batch 7000, loss[loss=0.2151, simple_loss=0.3044, pruned_loss=0.0629, over 8450.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05733, over 1612370.17 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:18:17,567 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.593e+02 3.026e+02 3.851e+02 8.547e+02, threshold=6.052e+02, percent-clipped=7.0 +2023-02-07 09:18:19,768 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:23,173 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201012.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:40,485 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:42,298 INFO [train.py:901] (0/4) Epoch 25, batch 7050, loss[loss=0.1831, simple_loss=0.2789, pruned_loss=0.04364, over 8480.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05735, over 1612333.18 frames. ], batch size: 29, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:12,437 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 09:19:16,824 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:17,386 INFO [train.py:901] (0/4) Epoch 25, batch 7100, loss[loss=0.2197, simple_loss=0.2958, pruned_loss=0.07183, over 7703.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2817, pruned_loss=0.05723, over 1610461.27 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:26,873 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.246e+02 2.728e+02 3.277e+02 5.322e+02, threshold=5.456e+02, percent-clipped=0.0 +2023-02-07 09:19:33,404 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8055, 1.6477, 2.4019, 1.4234, 1.3077, 2.3490, 0.4178, 1.4737], + device='cuda:0'), covar=tensor([0.1637, 0.1245, 0.0319, 0.1382, 0.2490, 0.0381, 0.2153, 0.1319], + device='cuda:0'), in_proj_covar=tensor([0.0195, 0.0199, 0.0130, 0.0219, 0.0273, 0.0140, 0.0169, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:19:40,018 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:52,247 INFO [train.py:901] (0/4) Epoch 25, batch 7150, loss[loss=0.1649, simple_loss=0.2614, pruned_loss=0.03427, over 7969.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.057, over 1611222.08 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:00,669 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3641, 1.5538, 2.1769, 1.2786, 1.7142, 1.5921, 1.4729, 1.7395], + device='cuda:0'), covar=tensor([0.1484, 0.2005, 0.0756, 0.3585, 0.1527, 0.2410, 0.1799, 0.2015], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0626, 0.0559, 0.0661, 0.0661, 0.0605, 0.0553, 0.0643], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:20:23,854 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6444, 1.9944, 3.2019, 1.4730, 2.4031, 2.1664, 1.7235, 2.4940], + device='cuda:0'), covar=tensor([0.1912, 0.2931, 0.0825, 0.4909, 0.2037, 0.3141, 0.2540, 0.2273], + device='cuda:0'), in_proj_covar=tensor([0.0536, 0.0627, 0.0560, 0.0662, 0.0662, 0.0606, 0.0554, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:20:28,430 INFO [train.py:901] (0/4) Epoch 25, batch 7200, loss[loss=0.1691, simple_loss=0.2553, pruned_loss=0.04145, over 7817.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.0577, over 1614782.34 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:38,242 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.475e+02 3.123e+02 4.294e+02 9.608e+02, threshold=6.246e+02, percent-clipped=8.0 +2023-02-07 09:20:46,061 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7981, 1.4834, 2.8900, 1.4558, 2.1566, 3.1110, 3.2457, 2.6170], + device='cuda:0'), covar=tensor([0.1079, 0.1638, 0.0344, 0.2011, 0.0915, 0.0272, 0.0601, 0.0566], + device='cuda:0'), in_proj_covar=tensor([0.0300, 0.0322, 0.0288, 0.0317, 0.0316, 0.0273, 0.0432, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 09:20:55,928 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 09:21:03,482 INFO [train.py:901] (0/4) Epoch 25, batch 7250, loss[loss=0.2051, simple_loss=0.2862, pruned_loss=0.06199, over 8469.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2837, pruned_loss=0.05802, over 1616375.17 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:23,522 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-07 09:21:25,201 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:37,959 INFO [train.py:901] (0/4) Epoch 25, batch 7300, loss[loss=0.1678, simple_loss=0.2477, pruned_loss=0.04395, over 6429.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2839, pruned_loss=0.05845, over 1615938.88 frames. ], batch size: 14, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:39,371 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:48,706 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.341e+02 2.809e+02 3.464e+02 9.506e+02, threshold=5.617e+02, percent-clipped=4.0 +2023-02-07 09:22:13,165 INFO [train.py:901] (0/4) Epoch 25, batch 7350, loss[loss=0.229, simple_loss=0.2969, pruned_loss=0.08053, over 7156.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2843, pruned_loss=0.05864, over 1616993.04 frames. ], batch size: 71, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:17,668 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8013, 2.1284, 3.2175, 1.7015, 2.5704, 2.2116, 1.8198, 2.4496], + device='cuda:0'), covar=tensor([0.1818, 0.2530, 0.0891, 0.4374, 0.1835, 0.3085, 0.2401, 0.2256], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0627, 0.0559, 0.0663, 0.0661, 0.0607, 0.0553, 0.0643], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:22:20,359 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0386, 2.1758, 1.7680, 2.7551, 1.2646, 1.5566, 1.9321, 2.0628], + device='cuda:0'), covar=tensor([0.0713, 0.0692, 0.0924, 0.0350, 0.1166, 0.1411, 0.0920, 0.0838], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0211, 0.0206, 0.0247, 0.0249, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:22:39,015 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 09:22:40,514 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:48,513 INFO [train.py:901] (0/4) Epoch 25, batch 7400, loss[loss=0.1855, simple_loss=0.2672, pruned_loss=0.05194, over 8229.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2838, pruned_loss=0.0586, over 1616993.61 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:57,493 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:57,973 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.231e+02 2.880e+02 3.857e+02 7.685e+02, threshold=5.759e+02, percent-clipped=5.0 +2023-02-07 09:22:58,697 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 09:23:19,305 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:23:22,167 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3942, 1.6045, 1.6747, 1.0240, 1.6676, 1.3780, 0.2653, 1.5451], + device='cuda:0'), covar=tensor([0.0511, 0.0390, 0.0360, 0.0575, 0.0426, 0.0940, 0.0936, 0.0328], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0404, 0.0363, 0.0461, 0.0392, 0.0547, 0.0403, 0.0433], + device='cuda:0'), out_proj_covar=tensor([1.2330e-04, 1.0519e-04, 9.4840e-05, 1.2081e-04, 1.0278e-04, 1.5312e-04, + 1.0792e-04, 1.1392e-04], device='cuda:0') +2023-02-07 09:23:23,993 INFO [train.py:901] (0/4) Epoch 25, batch 7450, loss[loss=0.2295, simple_loss=0.3215, pruned_loss=0.06871, over 8343.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05889, over 1614896.64 frames. ], batch size: 26, lr: 2.99e-03, grad_scale: 16.0 +2023-02-07 09:23:38,548 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 09:23:59,281 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9548, 2.0748, 1.8765, 2.6094, 1.2368, 1.6575, 1.9049, 1.9993], + device='cuda:0'), covar=tensor([0.0740, 0.0733, 0.0829, 0.0397, 0.1027, 0.1260, 0.0785, 0.0753], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0196, 0.0245, 0.0212, 0.0206, 0.0248, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:23:59,798 INFO [train.py:901] (0/4) Epoch 25, batch 7500, loss[loss=0.1714, simple_loss=0.2622, pruned_loss=0.04037, over 7922.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2832, pruned_loss=0.05836, over 1614614.56 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:09,389 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6477, 1.6013, 2.0712, 1.3511, 1.2786, 2.0609, 0.4496, 1.4268], + device='cuda:0'), covar=tensor([0.1475, 0.1001, 0.0321, 0.0812, 0.2075, 0.0361, 0.1606, 0.1088], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0200, 0.0130, 0.0220, 0.0274, 0.0141, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:24:09,794 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.281e+02 2.758e+02 3.564e+02 6.593e+02, threshold=5.515e+02, percent-clipped=6.0 +2023-02-07 09:24:25,714 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 09:24:34,711 INFO [train.py:901] (0/4) Epoch 25, batch 7550, loss[loss=0.2006, simple_loss=0.2977, pruned_loss=0.05173, over 8640.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2834, pruned_loss=0.05832, over 1618580.52 frames. ], batch size: 34, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:40,276 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:09,417 INFO [train.py:901] (0/4) Epoch 25, batch 7600, loss[loss=0.1959, simple_loss=0.2838, pruned_loss=0.05401, over 8448.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05799, over 1621130.22 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:25:20,540 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.467e+02 2.939e+02 3.909e+02 7.265e+02, threshold=5.878e+02, percent-clipped=5.0 +2023-02-07 09:25:27,310 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:41,303 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:43,929 INFO [train.py:901] (0/4) Epoch 25, batch 7650, loss[loss=0.2166, simple_loss=0.3015, pruned_loss=0.06588, over 8348.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05909, over 1620801.27 frames. ], batch size: 26, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:00,397 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:12,134 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:14,193 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0422, 2.5310, 2.7078, 1.4827, 2.8663, 1.5843, 1.5374, 2.0401], + device='cuda:0'), covar=tensor([0.1043, 0.0454, 0.0449, 0.0995, 0.0593, 0.1151, 0.1247, 0.0703], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0404, 0.0362, 0.0460, 0.0393, 0.0545, 0.0404, 0.0433], + device='cuda:0'), out_proj_covar=tensor([1.2350e-04, 1.0521e-04, 9.4487e-05, 1.2071e-04, 1.0281e-04, 1.5270e-04, + 1.0792e-04, 1.1377e-04], device='cuda:0') +2023-02-07 09:26:19,453 INFO [train.py:901] (0/4) Epoch 25, batch 7700, loss[loss=0.1932, simple_loss=0.2695, pruned_loss=0.05846, over 7418.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05833, over 1618346.25 frames. ], batch size: 17, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:30,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.306e+02 2.805e+02 3.732e+02 7.115e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-07 09:26:43,967 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201724.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:48,005 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:49,245 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 09:26:54,627 INFO [train.py:901] (0/4) Epoch 25, batch 7750, loss[loss=0.2036, simple_loss=0.288, pruned_loss=0.05959, over 8365.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05811, over 1616044.25 frames. ], batch size: 24, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:02,275 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201751.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:20,009 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8312, 2.4349, 3.7877, 1.9090, 1.9385, 3.7796, 0.5017, 2.1240], + device='cuda:0'), covar=tensor([0.1586, 0.1108, 0.0170, 0.1622, 0.2386, 0.0214, 0.2322, 0.1368], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0201, 0.0131, 0.0221, 0.0275, 0.0141, 0.0171, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:27:20,636 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8180, 5.9344, 5.1021, 2.6040, 5.2255, 5.6023, 5.3173, 5.4168], + device='cuda:0'), covar=tensor([0.0587, 0.0382, 0.0944, 0.4434, 0.0756, 0.0934, 0.1216, 0.0612], + device='cuda:0'), in_proj_covar=tensor([0.0533, 0.0452, 0.0439, 0.0549, 0.0436, 0.0457, 0.0430, 0.0397], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:27:29,977 INFO [train.py:901] (0/4) Epoch 25, batch 7800, loss[loss=0.2026, simple_loss=0.293, pruned_loss=0.05608, over 8791.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.05756, over 1619985.87 frames. ], batch size: 40, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:40,035 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:40,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.301e+02 2.955e+02 3.831e+02 1.047e+03, threshold=5.910e+02, percent-clipped=5.0 +2023-02-07 09:27:56,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:28:03,384 INFO [train.py:901] (0/4) Epoch 25, batch 7850, loss[loss=0.2171, simple_loss=0.3022, pruned_loss=0.06601, over 8607.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2833, pruned_loss=0.05773, over 1620096.98 frames. ], batch size: 31, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:26,017 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 09:28:36,517 INFO [train.py:901] (0/4) Epoch 25, batch 7900, loss[loss=0.1848, simple_loss=0.2621, pruned_loss=0.05377, over 7774.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2835, pruned_loss=0.05826, over 1614699.44 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:47,141 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.505e+02 3.187e+02 3.787e+02 7.491e+02, threshold=6.375e+02, percent-clipped=2.0 +2023-02-07 09:29:09,581 INFO [train.py:901] (0/4) Epoch 25, batch 7950, loss[loss=0.1652, simple_loss=0.2528, pruned_loss=0.03879, over 7257.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2839, pruned_loss=0.05838, over 1615748.05 frames. ], batch size: 16, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:29:40,304 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:42,800 INFO [train.py:901] (0/4) Epoch 25, batch 8000, loss[loss=0.1762, simple_loss=0.253, pruned_loss=0.04964, over 7426.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05907, over 1613912.42 frames. ], batch size: 17, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:29:49,508 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-202000.pt +2023-02-07 09:29:54,397 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 2.298e+02 3.131e+02 3.789e+02 6.155e+02, threshold=6.263e+02, percent-clipped=0.0 +2023-02-07 09:29:54,493 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,326 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,934 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:58,060 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:06,541 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:07,922 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8233, 1.7684, 2.0055, 1.8317, 1.2167, 1.7668, 2.3891, 2.1649], + device='cuda:0'), covar=tensor([0.0439, 0.1202, 0.1534, 0.1297, 0.0558, 0.1374, 0.0582, 0.0571], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 09:30:12,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202032.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:16,032 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 09:30:17,710 INFO [train.py:901] (0/4) Epoch 25, batch 8050, loss[loss=0.2306, simple_loss=0.3037, pruned_loss=0.07876, over 6944.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2842, pruned_loss=0.05992, over 1601560.21 frames. ], batch size: 72, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:30:34,607 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 09:30:36,895 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:40,703 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-25.pt +2023-02-07 09:30:51,763 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 09:30:55,055 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 09:30:55,313 INFO [train.py:901] (0/4) Epoch 26, batch 0, loss[loss=0.2188, simple_loss=0.2964, pruned_loss=0.07057, over 8255.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2964, pruned_loss=0.07057, over 8255.00 frames. ], batch size: 24, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:30:55,314 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 09:31:04,569 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2914, 2.0870, 1.6358, 1.8753, 1.7637, 1.5211, 1.6988, 1.6868], + device='cuda:0'), covar=tensor([0.1547, 0.0450, 0.1327, 0.0618, 0.0707, 0.1601, 0.1044, 0.1077], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0239, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 09:31:06,904 INFO [train.py:935] (0/4) Epoch 26, validation: loss=0.1717, simple_loss=0.2716, pruned_loss=0.03591, over 944034.00 frames. +2023-02-07 09:31:06,905 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6722MB +2023-02-07 09:31:21,600 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 09:31:29,813 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.411e+02 2.993e+02 3.956e+02 9.314e+02, threshold=5.987e+02, percent-clipped=4.0 +2023-02-07 09:31:36,234 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9699, 2.1554, 1.8091, 2.7895, 1.3164, 1.6549, 2.0754, 2.1721], + device='cuda:0'), covar=tensor([0.0746, 0.0786, 0.0948, 0.0358, 0.1061, 0.1303, 0.0784, 0.0755], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0211, 0.0204, 0.0245, 0.0248, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:31:40,828 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:41,325 INFO [train.py:901] (0/4) Epoch 26, batch 50, loss[loss=0.2382, simple_loss=0.3126, pruned_loss=0.08193, over 8364.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2847, pruned_loss=0.06069, over 364506.77 frames. ], batch size: 26, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:31:52,552 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202138.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:55,750 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 09:32:14,786 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5239, 1.4841, 1.8254, 1.2396, 1.1941, 1.8398, 0.1925, 1.2606], + device='cuda:0'), covar=tensor([0.1307, 0.1179, 0.0391, 0.0824, 0.2197, 0.0404, 0.1747, 0.1051], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0202, 0.0131, 0.0223, 0.0276, 0.0142, 0.0172, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:32:15,972 INFO [train.py:901] (0/4) Epoch 26, batch 100, loss[loss=0.1662, simple_loss=0.2526, pruned_loss=0.03992, over 7810.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2854, pruned_loss=0.05918, over 643781.11 frames. ], batch size: 19, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:32:18,604 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 09:32:23,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:32:40,584 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.435e+02 2.962e+02 3.649e+02 8.375e+02, threshold=5.925e+02, percent-clipped=4.0 +2023-02-07 09:32:51,107 INFO [train.py:901] (0/4) Epoch 26, batch 150, loss[loss=0.1923, simple_loss=0.284, pruned_loss=0.05028, over 8572.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2853, pruned_loss=0.06027, over 859182.62 frames. ], batch size: 34, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:22,870 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 09:33:26,388 INFO [train.py:901] (0/4) Epoch 26, batch 200, loss[loss=0.1969, simple_loss=0.2871, pruned_loss=0.05337, over 8556.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.0601, over 1026563.52 frames. ], batch size: 39, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:49,944 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.408e+02 2.928e+02 3.669e+02 9.390e+02, threshold=5.857e+02, percent-clipped=3.0 +2023-02-07 09:34:01,568 INFO [train.py:901] (0/4) Epoch 26, batch 250, loss[loss=0.2352, simple_loss=0.3112, pruned_loss=0.07959, over 8479.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2848, pruned_loss=0.06026, over 1154902.38 frames. ], batch size: 49, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:05,778 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7328, 5.8697, 5.0604, 2.5666, 5.1290, 5.5028, 5.3355, 5.3767], + device='cuda:0'), covar=tensor([0.0484, 0.0386, 0.0938, 0.4159, 0.0710, 0.0819, 0.0933, 0.0541], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0451, 0.0437, 0.0549, 0.0435, 0.0455, 0.0427, 0.0397], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:34:09,732 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 09:34:19,969 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 09:34:22,758 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:36,408 INFO [train.py:901] (0/4) Epoch 26, batch 300, loss[loss=0.1967, simple_loss=0.2782, pruned_loss=0.05762, over 7649.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.05995, over 1260275.29 frames. ], batch size: 19, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:38,648 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1279, 3.6206, 2.2620, 2.8360, 2.6651, 2.0228, 2.7695, 2.9952], + device='cuda:0'), covar=tensor([0.1763, 0.0368, 0.1257, 0.0760, 0.0886, 0.1688, 0.1088, 0.1150], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0239, 0.0344, 0.0315, 0.0304, 0.0349, 0.0351, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 09:34:40,010 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:52,230 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:57,255 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:59,615 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.510e+02 3.033e+02 3.572e+02 1.183e+03, threshold=6.066e+02, percent-clipped=2.0 +2023-02-07 09:35:08,713 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:10,557 INFO [train.py:901] (0/4) Epoch 26, batch 350, loss[loss=0.1641, simple_loss=0.2437, pruned_loss=0.04228, over 7799.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05882, over 1340236.99 frames. ], batch size: 19, lr: 2.93e-03, grad_scale: 4.0 +2023-02-07 09:35:23,417 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:40,989 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:43,088 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:46,422 INFO [train.py:901] (0/4) Epoch 26, batch 400, loss[loss=0.2108, simple_loss=0.2817, pruned_loss=0.06997, over 7919.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2839, pruned_loss=0.05892, over 1404415.95 frames. ], batch size: 20, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:36:11,056 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 2.504e+02 3.071e+02 3.633e+02 8.131e+02, threshold=6.142e+02, percent-clipped=3.0 +2023-02-07 09:36:11,213 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9374, 1.4119, 6.0150, 2.1521, 5.4251, 4.9596, 5.5745, 5.4573], + device='cuda:0'), covar=tensor([0.0427, 0.5631, 0.0391, 0.4077, 0.1032, 0.0954, 0.0469, 0.0514], + device='cuda:0'), in_proj_covar=tensor([0.0658, 0.0654, 0.0720, 0.0645, 0.0731, 0.0626, 0.0626, 0.0697], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:36:21,078 INFO [train.py:901] (0/4) Epoch 26, batch 450, loss[loss=0.2249, simple_loss=0.3188, pruned_loss=0.06556, over 8499.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2835, pruned_loss=0.05885, over 1448898.77 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:36:55,495 INFO [train.py:901] (0/4) Epoch 26, batch 500, loss[loss=0.1935, simple_loss=0.2762, pruned_loss=0.0554, over 8568.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2837, pruned_loss=0.05863, over 1488019.06 frames. ], batch size: 31, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:11,980 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3879, 1.5770, 1.6403, 1.1289, 1.6439, 1.3300, 0.3178, 1.6231], + device='cuda:0'), covar=tensor([0.0506, 0.0394, 0.0298, 0.0525, 0.0445, 0.0794, 0.0858, 0.0262], + device='cuda:0'), in_proj_covar=tensor([0.0463, 0.0402, 0.0357, 0.0455, 0.0387, 0.0540, 0.0398, 0.0429], + device='cuda:0'), out_proj_covar=tensor([1.2293e-04, 1.0473e-04, 9.3178e-05, 1.1921e-04, 1.0137e-04, 1.5109e-04, + 1.0650e-04, 1.1288e-04], device='cuda:0') +2023-02-07 09:37:19,245 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.396e+02 2.962e+02 4.085e+02 8.069e+02, threshold=5.924e+02, percent-clipped=6.0 +2023-02-07 09:37:19,505 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8445, 1.8848, 3.2716, 2.3332, 2.8407, 1.9562, 1.6789, 1.6164], + device='cuda:0'), covar=tensor([0.7916, 0.6969, 0.2155, 0.4572, 0.3349, 0.4753, 0.3148, 0.6510], + device='cuda:0'), in_proj_covar=tensor([0.0950, 0.1005, 0.0820, 0.0977, 0.1014, 0.0915, 0.0762, 0.0838], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 09:37:29,369 INFO [train.py:901] (0/4) Epoch 26, batch 550, loss[loss=0.2202, simple_loss=0.3084, pruned_loss=0.06603, over 8493.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2849, pruned_loss=0.05896, over 1522337.04 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:05,142 INFO [train.py:901] (0/4) Epoch 26, batch 600, loss[loss=0.2281, simple_loss=0.3104, pruned_loss=0.07296, over 8289.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05916, over 1546139.62 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:21,738 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 09:38:27,323 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1957, 2.5570, 2.7963, 1.6375, 3.0344, 1.8477, 1.5958, 2.1278], + device='cuda:0'), covar=tensor([0.0946, 0.0416, 0.0296, 0.0889, 0.0483, 0.0794, 0.0952, 0.0626], + device='cuda:0'), in_proj_covar=tensor([0.0462, 0.0400, 0.0355, 0.0453, 0.0386, 0.0537, 0.0396, 0.0427], + device='cuda:0'), out_proj_covar=tensor([1.2263e-04, 1.0416e-04, 9.2798e-05, 1.1873e-04, 1.0103e-04, 1.5032e-04, + 1.0597e-04, 1.1237e-04], device='cuda:0') +2023-02-07 09:38:29,023 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.445e+02 2.916e+02 3.512e+02 6.749e+02, threshold=5.833e+02, percent-clipped=3.0 +2023-02-07 09:38:38,958 INFO [train.py:901] (0/4) Epoch 26, batch 650, loss[loss=0.1746, simple_loss=0.2496, pruned_loss=0.0498, over 7522.00 frames. ], tot_loss[loss=0.201, simple_loss=0.284, pruned_loss=0.05899, over 1558270.59 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:39,861 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:38:57,380 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:39:14,890 INFO [train.py:901] (0/4) Epoch 26, batch 700, loss[loss=0.1719, simple_loss=0.247, pruned_loss=0.04843, over 7799.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.05971, over 1572505.21 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:39:38,607 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.498e+02 3.029e+02 3.750e+02 8.351e+02, threshold=6.058e+02, percent-clipped=3.0 +2023-02-07 09:39:43,670 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2567, 1.5307, 4.5842, 1.7997, 2.5107, 5.1904, 5.2247, 4.4124], + device='cuda:0'), covar=tensor([0.1220, 0.1916, 0.0272, 0.2093, 0.1214, 0.0160, 0.0475, 0.0592], + device='cuda:0'), in_proj_covar=tensor([0.0301, 0.0325, 0.0287, 0.0318, 0.0317, 0.0273, 0.0433, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 09:39:49,890 INFO [train.py:901] (0/4) Epoch 26, batch 750, loss[loss=0.2691, simple_loss=0.3311, pruned_loss=0.1035, over 6794.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05962, over 1587922.29 frames. ], batch size: 71, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:01,300 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 09:40:05,054 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 09:40:07,991 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202848.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:40:13,831 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 09:40:24,779 INFO [train.py:901] (0/4) Epoch 26, batch 800, loss[loss=0.24, simple_loss=0.3171, pruned_loss=0.08146, over 8456.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.285, pruned_loss=0.05968, over 1591402.01 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:49,974 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.489e+02 2.818e+02 3.827e+02 7.280e+02, threshold=5.635e+02, percent-clipped=3.0 +2023-02-07 09:40:59,907 INFO [train.py:901] (0/4) Epoch 26, batch 850, loss[loss=0.2218, simple_loss=0.3047, pruned_loss=0.06945, over 8284.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2856, pruned_loss=0.05956, over 1596247.50 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:33,349 INFO [train.py:901] (0/4) Epoch 26, batch 900, loss[loss=0.1888, simple_loss=0.2832, pruned_loss=0.04715, over 8327.00 frames. ], tot_loss[loss=0.204, simple_loss=0.287, pruned_loss=0.06053, over 1598148.97 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:58,978 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.742e+02 3.265e+02 4.005e+02 6.934e+02, threshold=6.531e+02, percent-clipped=5.0 +2023-02-07 09:42:08,859 INFO [train.py:901] (0/4) Epoch 26, batch 950, loss[loss=0.1758, simple_loss=0.2611, pruned_loss=0.04526, over 8320.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2865, pruned_loss=0.06011, over 1606327.23 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:31,716 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 09:42:32,482 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:42:43,150 INFO [train.py:901] (0/4) Epoch 26, batch 1000, loss[loss=0.1983, simple_loss=0.2892, pruned_loss=0.05372, over 8660.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2848, pruned_loss=0.05935, over 1606087.41 frames. ], batch size: 34, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:04,689 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:43:05,260 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 09:43:07,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.348e+02 2.857e+02 3.355e+02 6.976e+02, threshold=5.714e+02, percent-clipped=1.0 +2023-02-07 09:43:17,239 INFO [train.py:901] (0/4) Epoch 26, batch 1050, loss[loss=0.1817, simple_loss=0.2435, pruned_loss=0.05994, over 7710.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2848, pruned_loss=0.05981, over 1607915.80 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:18,682 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 09:43:53,212 INFO [train.py:901] (0/4) Epoch 26, batch 1100, loss[loss=0.1757, simple_loss=0.2646, pruned_loss=0.04336, over 7815.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05899, over 1614300.53 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:55,400 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8192, 6.0221, 5.1536, 2.3418, 5.2420, 5.5883, 5.4467, 5.4560], + device='cuda:0'), covar=tensor([0.0505, 0.0328, 0.0971, 0.4523, 0.0762, 0.0723, 0.0978, 0.0466], + device='cuda:0'), in_proj_covar=tensor([0.0530, 0.0451, 0.0438, 0.0549, 0.0435, 0.0453, 0.0428, 0.0395], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:44:06,850 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203192.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:44:16,728 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.511e+02 2.912e+02 3.711e+02 8.666e+02, threshold=5.824e+02, percent-clipped=4.0 +2023-02-07 09:44:21,618 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9238, 2.5198, 3.6951, 1.8778, 1.9430, 3.6163, 0.7774, 2.1808], + device='cuda:0'), covar=tensor([0.1434, 0.1075, 0.0231, 0.1751, 0.2394, 0.0330, 0.1971, 0.1393], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0203, 0.0132, 0.0222, 0.0277, 0.0143, 0.0171, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:44:27,590 INFO [train.py:901] (0/4) Epoch 26, batch 1150, loss[loss=0.2317, simple_loss=0.3174, pruned_loss=0.07296, over 8281.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05851, over 1616238.95 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:27,602 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 09:45:02,742 INFO [train.py:901] (0/4) Epoch 26, batch 1200, loss[loss=0.2056, simple_loss=0.2811, pruned_loss=0.0651, over 8083.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2841, pruned_loss=0.05857, over 1613412.45 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:45:27,263 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203307.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:45:27,742 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.402e+02 2.806e+02 3.306e+02 6.331e+02, threshold=5.612e+02, percent-clipped=2.0 +2023-02-07 09:45:36,616 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7094, 1.9096, 2.7611, 1.5799, 1.9902, 2.1077, 1.7225, 1.9749], + device='cuda:0'), covar=tensor([0.1838, 0.2834, 0.0898, 0.4874, 0.2075, 0.3260, 0.2494, 0.2285], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0625, 0.0556, 0.0660, 0.0658, 0.0606, 0.0555, 0.0642], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:45:37,088 INFO [train.py:901] (0/4) Epoch 26, batch 1250, loss[loss=0.1693, simple_loss=0.2426, pruned_loss=0.04801, over 7526.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.0586, over 1612492.95 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:45:37,263 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8853, 1.3836, 1.5626, 1.3491, 1.0507, 1.3986, 1.5827, 1.2711], + device='cuda:0'), covar=tensor([0.0551, 0.1340, 0.1750, 0.1518, 0.0618, 0.1516, 0.0756, 0.0735], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0099, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 09:45:49,531 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8893, 2.1448, 2.2509, 1.3944, 2.3442, 1.6962, 0.7635, 2.0412], + device='cuda:0'), covar=tensor([0.0605, 0.0356, 0.0309, 0.0690, 0.0443, 0.0904, 0.0970, 0.0360], + device='cuda:0'), in_proj_covar=tensor([0.0461, 0.0400, 0.0356, 0.0453, 0.0387, 0.0539, 0.0395, 0.0428], + device='cuda:0'), out_proj_covar=tensor([1.2258e-04, 1.0409e-04, 9.2922e-05, 1.1871e-04, 1.0138e-04, 1.5074e-04, + 1.0581e-04, 1.1255e-04], device='cuda:0') +2023-02-07 09:46:02,928 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9794, 2.1948, 1.7324, 2.8696, 1.3813, 1.6339, 2.0823, 2.2504], + device='cuda:0'), covar=tensor([0.0693, 0.0780, 0.0900, 0.0291, 0.1039, 0.1234, 0.0775, 0.0649], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0246, 0.0212, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:46:11,115 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-07 09:46:12,728 INFO [train.py:901] (0/4) Epoch 26, batch 1300, loss[loss=0.1675, simple_loss=0.2622, pruned_loss=0.03643, over 8516.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05818, over 1619026.22 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:46:29,975 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6820, 1.6608, 2.1077, 1.3140, 1.2935, 2.1176, 0.2559, 1.3491], + device='cuda:0'), covar=tensor([0.1413, 0.1135, 0.0328, 0.1045, 0.2262, 0.0357, 0.1735, 0.1164], + device='cuda:0'), in_proj_covar=tensor([0.0198, 0.0204, 0.0132, 0.0224, 0.0278, 0.0143, 0.0173, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:46:31,896 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:46:37,059 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.437e+02 2.919e+02 3.429e+02 9.499e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-07 09:46:46,424 INFO [train.py:901] (0/4) Epoch 26, batch 1350, loss[loss=0.2235, simple_loss=0.3154, pruned_loss=0.06577, over 8323.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2848, pruned_loss=0.05856, over 1617604.42 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:04,178 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203447.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:22,497 INFO [train.py:901] (0/4) Epoch 26, batch 1400, loss[loss=0.1807, simple_loss=0.2752, pruned_loss=0.04307, over 8251.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2855, pruned_loss=0.05891, over 1619106.34 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:24,886 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 09:47:31,497 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:47,756 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.419e+02 2.906e+02 3.589e+02 5.599e+02, threshold=5.812e+02, percent-clipped=0.0 +2023-02-07 09:47:52,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:54,389 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 09:47:57,076 INFO [train.py:901] (0/4) Epoch 26, batch 1450, loss[loss=0.2253, simple_loss=0.2964, pruned_loss=0.07708, over 8086.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2852, pruned_loss=0.05876, over 1622271.21 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:24,031 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:48:24,749 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203563.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:48:31,364 INFO [train.py:901] (0/4) Epoch 26, batch 1500, loss[loss=0.2415, simple_loss=0.3249, pruned_loss=0.07908, over 8502.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.285, pruned_loss=0.05866, over 1624122.77 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:39,241 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 09:48:42,960 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203588.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:48:56,866 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.262e+02 2.668e+02 3.517e+02 8.500e+02, threshold=5.335e+02, percent-clipped=2.0 +2023-02-07 09:49:06,819 INFO [train.py:901] (0/4) Epoch 26, batch 1550, loss[loss=0.1878, simple_loss=0.285, pruned_loss=0.04533, over 8253.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2854, pruned_loss=0.05911, over 1623495.84 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:49:40,408 INFO [train.py:901] (0/4) Epoch 26, batch 1600, loss[loss=0.2115, simple_loss=0.2945, pruned_loss=0.06424, over 8042.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2859, pruned_loss=0.05947, over 1621287.55 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:05,085 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.410e+02 3.021e+02 3.901e+02 1.362e+03, threshold=6.042e+02, percent-clipped=8.0 +2023-02-07 09:50:15,009 INFO [train.py:901] (0/4) Epoch 26, batch 1650, loss[loss=0.1622, simple_loss=0.244, pruned_loss=0.04026, over 7438.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2852, pruned_loss=0.05955, over 1617290.69 frames. ], batch size: 17, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:32,730 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5597, 2.9350, 2.3359, 4.1463, 2.0041, 2.1923, 2.9707, 3.1086], + device='cuda:0'), covar=tensor([0.0750, 0.0741, 0.0880, 0.0231, 0.0961, 0.1204, 0.0769, 0.0751], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0195, 0.0247, 0.0213, 0.0205, 0.0248, 0.0250, 0.0207], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:50:36,582 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-07 09:50:44,771 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:45,726 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 09:50:48,702 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:49,156 INFO [train.py:901] (0/4) Epoch 26, batch 1700, loss[loss=0.2061, simple_loss=0.2812, pruned_loss=0.0655, over 7799.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2847, pruned_loss=0.05903, over 1617478.59 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:57,400 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:05,466 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:14,290 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.478e+02 3.017e+02 3.791e+02 8.735e+02, threshold=6.035e+02, percent-clipped=4.0 +2023-02-07 09:51:20,893 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:20,912 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5215, 2.0083, 3.1698, 1.3260, 2.4191, 1.9590, 1.5918, 2.4572], + device='cuda:0'), covar=tensor([0.2355, 0.2966, 0.1020, 0.5518, 0.2185, 0.3773, 0.2894, 0.2483], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0624, 0.0554, 0.0659, 0.0654, 0.0602, 0.0552, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:51:23,311 INFO [train.py:901] (0/4) Epoch 26, batch 1750, loss[loss=0.2067, simple_loss=0.2923, pruned_loss=0.06059, over 8518.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2859, pruned_loss=0.05948, over 1622620.40 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:51:28,096 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:38,338 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:39,327 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 09:51:43,135 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4536, 1.4217, 1.8269, 1.1647, 1.0521, 1.7981, 0.1489, 1.1483], + device='cuda:0'), covar=tensor([0.1385, 0.1346, 0.0413, 0.0923, 0.2599, 0.0495, 0.1902, 0.1217], + device='cuda:0'), in_proj_covar=tensor([0.0198, 0.0205, 0.0133, 0.0226, 0.0280, 0.0144, 0.0173, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 09:51:45,962 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0708, 2.2854, 3.1147, 1.9423, 2.6539, 2.3160, 2.1302, 2.6099], + device='cuda:0'), covar=tensor([0.1518, 0.2111, 0.0720, 0.3544, 0.1420, 0.2450, 0.1807, 0.1904], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0625, 0.0555, 0.0660, 0.0655, 0.0603, 0.0552, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:51:54,043 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:58,807 INFO [train.py:901] (0/4) Epoch 26, batch 1800, loss[loss=0.1925, simple_loss=0.2813, pruned_loss=0.05188, over 8487.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2849, pruned_loss=0.0593, over 1620513.93 frames. ], batch size: 29, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:52:00,429 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8052, 2.4859, 3.9384, 1.6161, 2.8983, 2.2128, 1.9849, 2.8915], + device='cuda:0'), covar=tensor([0.2166, 0.2772, 0.1066, 0.5434, 0.2161, 0.3756, 0.2784, 0.2855], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0623, 0.0554, 0.0659, 0.0655, 0.0603, 0.0551, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:52:21,984 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4120, 1.6045, 2.1491, 1.3230, 1.3689, 1.6707, 1.4148, 1.5807], + device='cuda:0'), covar=tensor([0.1927, 0.2593, 0.0978, 0.4642, 0.2210, 0.3394, 0.2461, 0.2083], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0623, 0.0554, 0.0659, 0.0655, 0.0603, 0.0551, 0.0639], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:52:23,002 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.441e+02 2.799e+02 3.336e+02 4.977e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 09:52:29,699 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203918.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:52:32,264 INFO [train.py:901] (0/4) Epoch 26, batch 1850, loss[loss=0.2551, simple_loss=0.3177, pruned_loss=0.09627, over 6846.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2848, pruned_loss=0.05959, over 1620270.13 frames. ], batch size: 71, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:52:47,698 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:53:07,698 INFO [train.py:901] (0/4) Epoch 26, batch 1900, loss[loss=0.2174, simple_loss=0.3059, pruned_loss=0.06448, over 8657.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2839, pruned_loss=0.05899, over 1617597.04 frames. ], batch size: 34, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:27,230 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-204000.pt +2023-02-07 09:53:33,464 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.507e+02 3.073e+02 4.108e+02 9.647e+02, threshold=6.146e+02, percent-clipped=9.0 +2023-02-07 09:53:36,913 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 09:53:42,920 INFO [train.py:901] (0/4) Epoch 26, batch 1950, loss[loss=0.2311, simple_loss=0.3115, pruned_loss=0.07534, over 8561.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.283, pruned_loss=0.05841, over 1617015.50 frames. ], batch size: 49, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:49,445 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 09:53:57,074 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.6424, 3.5789, 3.3567, 2.2207, 3.2508, 3.3127, 3.3170, 3.1963], + device='cuda:0'), covar=tensor([0.0849, 0.0671, 0.0933, 0.3818, 0.0860, 0.1200, 0.1301, 0.0902], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0454, 0.0438, 0.0554, 0.0438, 0.0458, 0.0434, 0.0397], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 09:54:07,155 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 09:54:17,200 INFO [train.py:901] (0/4) Epoch 26, batch 2000, loss[loss=0.1803, simple_loss=0.2488, pruned_loss=0.05589, over 7449.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05874, over 1614762.87 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:34,351 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:43,742 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.388e+02 3.050e+02 3.690e+02 7.171e+02, threshold=6.101e+02, percent-clipped=4.0 +2023-02-07 09:54:44,458 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:53,075 INFO [train.py:901] (0/4) Epoch 26, batch 2050, loss[loss=0.2032, simple_loss=0.2739, pruned_loss=0.06626, over 8131.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05876, over 1618351.86 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:57,136 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:15,938 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3707, 1.4424, 1.3885, 1.8277, 0.7708, 1.2713, 1.3118, 1.4498], + device='cuda:0'), covar=tensor([0.0849, 0.0739, 0.0896, 0.0467, 0.1080, 0.1330, 0.0721, 0.0687], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0212, 0.0205, 0.0247, 0.0249, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 09:55:26,603 INFO [train.py:901] (0/4) Epoch 26, batch 2100, loss[loss=0.1691, simple_loss=0.258, pruned_loss=0.04016, over 8087.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2848, pruned_loss=0.05891, over 1619518.27 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:55:33,651 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:47,793 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:52,930 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.312e+02 2.797e+02 3.552e+02 6.063e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-07 09:55:53,722 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:02,442 INFO [train.py:901] (0/4) Epoch 26, batch 2150, loss[loss=0.2471, simple_loss=0.3135, pruned_loss=0.09029, over 7041.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05938, over 1616830.07 frames. ], batch size: 71, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:03,978 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:04,613 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:04,835 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-07 09:56:17,181 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:29,888 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204262.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:56:36,278 INFO [train.py:901] (0/4) Epoch 26, batch 2200, loss[loss=0.1811, simple_loss=0.254, pruned_loss=0.05416, over 7430.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.0593, over 1616871.50 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:01,395 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.362e+02 3.099e+02 4.074e+02 1.599e+03, threshold=6.197e+02, percent-clipped=8.0 +2023-02-07 09:57:11,826 INFO [train.py:901] (0/4) Epoch 26, batch 2250, loss[loss=0.2032, simple_loss=0.2801, pruned_loss=0.06315, over 8471.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05968, over 1614221.19 frames. ], batch size: 27, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:13,359 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:57:46,485 INFO [train.py:901] (0/4) Epoch 26, batch 2300, loss[loss=0.2028, simple_loss=0.292, pruned_loss=0.05674, over 8108.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2846, pruned_loss=0.05885, over 1615570.01 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:50,096 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204377.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:57:57,430 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1169, 2.4784, 2.7889, 1.4820, 3.0074, 1.7904, 1.4529, 2.2379], + device='cuda:0'), covar=tensor([0.0949, 0.0513, 0.0389, 0.0997, 0.0518, 0.1053, 0.1037, 0.0653], + device='cuda:0'), in_proj_covar=tensor([0.0467, 0.0405, 0.0359, 0.0456, 0.0392, 0.0547, 0.0400, 0.0434], + device='cuda:0'), out_proj_covar=tensor([1.2417e-04, 1.0545e-04, 9.3790e-05, 1.1938e-04, 1.0260e-04, 1.5299e-04, + 1.0700e-04, 1.1407e-04], device='cuda:0') +2023-02-07 09:58:10,725 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.304e+02 2.813e+02 3.713e+02 7.684e+02, threshold=5.626e+02, percent-clipped=3.0 +2023-02-07 09:58:17,209 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6714, 1.9873, 2.0817, 1.3743, 2.0746, 1.5461, 0.5405, 1.9418], + device='cuda:0'), covar=tensor([0.0647, 0.0373, 0.0297, 0.0607, 0.0450, 0.1005, 0.1040, 0.0296], + device='cuda:0'), in_proj_covar=tensor([0.0466, 0.0404, 0.0359, 0.0455, 0.0391, 0.0546, 0.0399, 0.0433], + device='cuda:0'), out_proj_covar=tensor([1.2390e-04, 1.0532e-04, 9.3736e-05, 1.1916e-04, 1.0234e-04, 1.5264e-04, + 1.0674e-04, 1.1402e-04], device='cuda:0') +2023-02-07 09:58:21,053 INFO [train.py:901] (0/4) Epoch 26, batch 2350, loss[loss=0.1849, simple_loss=0.275, pruned_loss=0.04741, over 7822.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2846, pruned_loss=0.05882, over 1616617.33 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:58:33,848 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:58:57,206 INFO [train.py:901] (0/4) Epoch 26, batch 2400, loss[loss=0.2032, simple_loss=0.2988, pruned_loss=0.05383, over 8477.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.285, pruned_loss=0.05942, over 1615233.86 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:02,850 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:16,083 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:20,382 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:22,321 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.551e+02 2.904e+02 3.805e+02 7.023e+02, threshold=5.807e+02, percent-clipped=3.0 +2023-02-07 09:59:32,143 INFO [train.py:901] (0/4) Epoch 26, batch 2450, loss[loss=0.2382, simple_loss=0.3143, pruned_loss=0.08104, over 8347.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2848, pruned_loss=0.05973, over 1614325.91 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:33,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204524.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:34,321 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:56,587 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:08,715 INFO [train.py:901] (0/4) Epoch 26, batch 2500, loss[loss=0.1955, simple_loss=0.2827, pruned_loss=0.05411, over 8605.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.05953, over 1615067.58 frames. ], batch size: 39, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:15,144 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:25,197 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6594, 2.6423, 1.8877, 2.4829, 2.2362, 1.6605, 2.2585, 2.2917], + device='cuda:0'), covar=tensor([0.1672, 0.0464, 0.1309, 0.0668, 0.0778, 0.1652, 0.1056, 0.1090], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0240, 0.0341, 0.0312, 0.0303, 0.0346, 0.0348, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:00:31,996 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:33,809 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.483e+02 3.074e+02 3.585e+02 8.993e+02, threshold=6.148e+02, percent-clipped=7.0 +2023-02-07 10:00:43,171 INFO [train.py:901] (0/4) Epoch 26, batch 2550, loss[loss=0.1717, simple_loss=0.2492, pruned_loss=0.04712, over 7434.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2841, pruned_loss=0.05912, over 1611694.83 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:50,563 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204633.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:00:55,139 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204640.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:01:07,859 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204658.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:01:18,432 INFO [train.py:901] (0/4) Epoch 26, batch 2600, loss[loss=0.186, simple_loss=0.2655, pruned_loss=0.05324, over 7520.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.0589, over 1615656.99 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:01:43,337 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.465e+02 3.094e+02 3.874e+02 9.576e+02, threshold=6.187e+02, percent-clipped=4.0 +2023-02-07 10:01:52,890 INFO [train.py:901] (0/4) Epoch 26, batch 2650, loss[loss=0.1698, simple_loss=0.2454, pruned_loss=0.04707, over 7712.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2855, pruned_loss=0.05973, over 1618225.86 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:14,671 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 10:02:28,033 INFO [train.py:901] (0/4) Epoch 26, batch 2700, loss[loss=0.2164, simple_loss=0.3102, pruned_loss=0.06128, over 8354.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2851, pruned_loss=0.05952, over 1614379.19 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:53,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.359e+02 2.865e+02 3.674e+02 6.992e+02, threshold=5.730e+02, percent-clipped=1.0 +2023-02-07 10:02:55,422 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:04,008 INFO [train.py:901] (0/4) Epoch 26, batch 2750, loss[loss=0.2275, simple_loss=0.3246, pruned_loss=0.06526, over 8465.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05918, over 1617702.17 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:13,288 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:14,638 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0707, 1.3725, 1.6706, 1.2560, 0.7467, 1.4542, 1.1880, 1.0336], + device='cuda:0'), covar=tensor([0.0627, 0.1200, 0.1591, 0.1402, 0.0556, 0.1429, 0.0656, 0.0719], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 10:03:16,002 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:38,122 INFO [train.py:901] (0/4) Epoch 26, batch 2800, loss[loss=0.2401, simple_loss=0.3163, pruned_loss=0.08197, over 8377.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2842, pruned_loss=0.05886, over 1615988.06 frames. ], batch size: 49, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:53,216 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5914, 1.2694, 2.8772, 1.2731, 2.1319, 3.0922, 3.2229, 2.6218], + device='cuda:0'), covar=tensor([0.1336, 0.1880, 0.0397, 0.2261, 0.0917, 0.0302, 0.0692, 0.0622], + device='cuda:0'), in_proj_covar=tensor([0.0304, 0.0328, 0.0290, 0.0320, 0.0322, 0.0276, 0.0436, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:03:55,282 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204896.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:03,030 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.78 vs. limit=5.0 +2023-02-07 10:04:04,626 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.285e+02 3.108e+02 3.828e+02 9.944e+02, threshold=6.216e+02, percent-clipped=6.0 +2023-02-07 10:04:13,906 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204921.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:14,413 INFO [train.py:901] (0/4) Epoch 26, batch 2850, loss[loss=0.1872, simple_loss=0.2573, pruned_loss=0.05857, over 7426.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2841, pruned_loss=0.05861, over 1616069.43 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:48,524 INFO [train.py:901] (0/4) Epoch 26, batch 2900, loss[loss=0.2229, simple_loss=0.3036, pruned_loss=0.07109, over 8493.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2855, pruned_loss=0.05944, over 1614366.82 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:13,383 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.393e+02 3.052e+02 3.991e+02 9.487e+02, threshold=6.105e+02, percent-clipped=5.0 +2023-02-07 10:05:20,488 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 10:05:23,876 INFO [train.py:901] (0/4) Epoch 26, batch 2950, loss[loss=0.258, simple_loss=0.3299, pruned_loss=0.09302, over 8434.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2861, pruned_loss=0.06002, over 1616069.85 frames. ], batch size: 27, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:52,963 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 10:05:58,486 INFO [train.py:901] (0/4) Epoch 26, batch 3000, loss[loss=0.2005, simple_loss=0.2787, pruned_loss=0.06112, over 8335.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2863, pruned_loss=0.05966, over 1612946.14 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,487 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 10:06:11,416 INFO [train.py:935] (0/4) Epoch 26, validation: loss=0.1716, simple_loss=0.2713, pruned_loss=0.03593, over 944034.00 frames. +2023-02-07 10:06:11,417 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6760MB +2023-02-07 10:06:36,696 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.246e+02 2.785e+02 3.735e+02 7.523e+02, threshold=5.571e+02, percent-clipped=3.0 +2023-02-07 10:06:45,997 INFO [train.py:901] (0/4) Epoch 26, batch 3050, loss[loss=0.1909, simple_loss=0.2718, pruned_loss=0.05499, over 7965.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2866, pruned_loss=0.06014, over 1616177.42 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:06:47,781 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 10:06:49,433 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:22,789 INFO [train.py:901] (0/4) Epoch 26, batch 3100, loss[loss=0.1763, simple_loss=0.2668, pruned_loss=0.04295, over 8092.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2865, pruned_loss=0.06009, over 1621382.13 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:07:24,530 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-07 10:07:30,228 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:48,156 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.327e+02 2.997e+02 4.038e+02 1.256e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 10:07:53,683 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:57,550 INFO [train.py:901] (0/4) Epoch 26, batch 3150, loss[loss=0.1824, simple_loss=0.2674, pruned_loss=0.0487, over 8063.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2867, pruned_loss=0.06016, over 1624167.30 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:33,477 INFO [train.py:901] (0/4) Epoch 26, batch 3200, loss[loss=0.1802, simple_loss=0.2632, pruned_loss=0.04863, over 7812.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2845, pruned_loss=0.0591, over 1621126.77 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:52,246 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:08:52,270 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7589, 2.6244, 1.8883, 2.3245, 2.2993, 1.6618, 2.1724, 2.2166], + device='cuda:0'), covar=tensor([0.1362, 0.0422, 0.1216, 0.0620, 0.0636, 0.1474, 0.1000, 0.0916], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0238, 0.0337, 0.0309, 0.0299, 0.0341, 0.0344, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:08:58,807 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.527e+02 3.010e+02 3.735e+02 6.895e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-07 10:08:58,936 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8391, 3.7769, 3.4660, 1.9162, 3.3886, 3.5080, 3.4101, 3.3390], + device='cuda:0'), covar=tensor([0.0897, 0.0628, 0.1209, 0.4570, 0.0968, 0.1074, 0.1404, 0.0843], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0449, 0.0437, 0.0549, 0.0432, 0.0455, 0.0431, 0.0394], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:09:09,092 INFO [train.py:901] (0/4) Epoch 26, batch 3250, loss[loss=0.2069, simple_loss=0.2918, pruned_loss=0.06101, over 8293.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05928, over 1623959.20 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:39,876 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.2192, 5.2444, 4.7364, 2.8939, 4.6263, 4.9160, 4.8585, 4.6707], + device='cuda:0'), covar=tensor([0.0576, 0.0433, 0.0856, 0.3746, 0.0874, 0.0919, 0.1051, 0.0679], + device='cuda:0'), in_proj_covar=tensor([0.0531, 0.0448, 0.0435, 0.0547, 0.0431, 0.0453, 0.0429, 0.0393], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:09:43,186 INFO [train.py:901] (0/4) Epoch 26, batch 3300, loss[loss=0.1579, simple_loss=0.2388, pruned_loss=0.03849, over 7904.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2857, pruned_loss=0.05971, over 1622942.70 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:10,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.313e+02 2.653e+02 3.358e+02 9.214e+02, threshold=5.305e+02, percent-clipped=4.0 +2023-02-07 10:10:20,066 INFO [train.py:901] (0/4) Epoch 26, batch 3350, loss[loss=0.2318, simple_loss=0.315, pruned_loss=0.07433, over 8594.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2852, pruned_loss=0.05909, over 1623908.13 frames. ], batch size: 34, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:32,196 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-07 10:10:54,100 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:10:54,722 INFO [train.py:901] (0/4) Epoch 26, batch 3400, loss[loss=0.2276, simple_loss=0.3161, pruned_loss=0.06958, over 8398.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2839, pruned_loss=0.05838, over 1624103.50 frames. ], batch size: 49, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:07,754 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 10:11:20,313 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.409e+02 2.883e+02 3.635e+02 7.106e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 10:11:30,475 INFO [train.py:901] (0/4) Epoch 26, batch 3450, loss[loss=0.203, simple_loss=0.2857, pruned_loss=0.06012, over 8526.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2834, pruned_loss=0.05804, over 1620321.80 frames. ], batch size: 49, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:45,457 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7365, 1.3239, 2.8904, 1.4650, 2.2529, 3.0941, 3.2321, 2.6478], + device='cuda:0'), covar=tensor([0.1168, 0.1813, 0.0353, 0.2040, 0.0866, 0.0293, 0.0595, 0.0599], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0326, 0.0289, 0.0318, 0.0320, 0.0275, 0.0434, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:11:53,149 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:11:54,048 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-07 10:11:56,802 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 10:11:57,155 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:05,241 INFO [train.py:901] (0/4) Epoch 26, batch 3500, loss[loss=0.1962, simple_loss=0.2875, pruned_loss=0.05242, over 8481.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05775, over 1618425.88 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:12:10,353 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:15,144 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:24,883 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 10:12:30,113 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.208e+02 2.714e+02 3.358e+02 5.744e+02, threshold=5.428e+02, percent-clipped=0.0 +2023-02-07 10:12:39,510 INFO [train.py:901] (0/4) Epoch 26, batch 3550, loss[loss=0.2119, simple_loss=0.2993, pruned_loss=0.06224, over 8108.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2834, pruned_loss=0.05795, over 1617514.28 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:12:59,247 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 10:13:08,106 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3819, 2.7113, 3.0498, 1.8065, 3.4335, 2.0904, 1.6271, 2.3927], + device='cuda:0'), covar=tensor([0.0765, 0.0400, 0.0282, 0.0841, 0.0381, 0.0847, 0.1063, 0.0542], + device='cuda:0'), in_proj_covar=tensor([0.0470, 0.0407, 0.0363, 0.0458, 0.0391, 0.0553, 0.0402, 0.0436], + device='cuda:0'), out_proj_covar=tensor([1.2482e-04, 1.0608e-04, 9.4851e-05, 1.1996e-04, 1.0242e-04, 1.5480e-04, + 1.0754e-04, 1.1466e-04], device='cuda:0') +2023-02-07 10:13:12,849 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7727, 1.9970, 1.6009, 2.7833, 1.2870, 1.4448, 1.9742, 2.1859], + device='cuda:0'), covar=tensor([0.0906, 0.0937, 0.1141, 0.0402, 0.1181, 0.1488, 0.0905, 0.0772], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0193, 0.0244, 0.0210, 0.0202, 0.0245, 0.0247, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 10:13:14,225 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7731, 2.0325, 2.9862, 1.5923, 2.3592, 2.2085, 1.7359, 2.3780], + device='cuda:0'), covar=tensor([0.1796, 0.2538, 0.0837, 0.4360, 0.1659, 0.3008, 0.2336, 0.1995], + device='cuda:0'), in_proj_covar=tensor([0.0536, 0.0625, 0.0558, 0.0662, 0.0657, 0.0604, 0.0557, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:13:15,380 INFO [train.py:901] (0/4) Epoch 26, batch 3600, loss[loss=0.1603, simple_loss=0.2432, pruned_loss=0.03869, over 7316.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2835, pruned_loss=0.05835, over 1617163.30 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:16,205 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:17,471 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:21,537 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0356, 1.6113, 3.3671, 1.5154, 2.4495, 3.7205, 3.8522, 3.1196], + device='cuda:0'), covar=tensor([0.1222, 0.1813, 0.0351, 0.2178, 0.1073, 0.0246, 0.0531, 0.0592], + device='cuda:0'), in_proj_covar=tensor([0.0304, 0.0328, 0.0291, 0.0320, 0.0322, 0.0277, 0.0437, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:13:37,463 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 10:13:39,662 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.295e+02 2.882e+02 3.730e+02 8.207e+02, threshold=5.763e+02, percent-clipped=6.0 +2023-02-07 10:13:49,105 INFO [train.py:901] (0/4) Epoch 26, batch 3650, loss[loss=0.1651, simple_loss=0.2453, pruned_loss=0.04243, over 7641.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05804, over 1612244.72 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:56,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8001, 1.7578, 2.3252, 1.4674, 1.3206, 2.3052, 0.3406, 1.4244], + device='cuda:0'), covar=tensor([0.1578, 0.1109, 0.0291, 0.1045, 0.2291, 0.0306, 0.1930, 0.1191], + device='cuda:0'), in_proj_covar=tensor([0.0195, 0.0201, 0.0131, 0.0220, 0.0274, 0.0143, 0.0170, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 10:14:18,442 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:14:24,801 INFO [train.py:901] (0/4) Epoch 26, batch 3700, loss[loss=0.1543, simple_loss=0.2419, pruned_loss=0.03338, over 7791.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05808, over 1610864.72 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:27,591 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:14:49,669 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.377e+02 2.968e+02 3.727e+02 1.221e+03, threshold=5.937e+02, percent-clipped=5.0 +2023-02-07 10:14:59,197 INFO [train.py:901] (0/4) Epoch 26, batch 3750, loss[loss=0.2099, simple_loss=0.291, pruned_loss=0.06441, over 8612.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.282, pruned_loss=0.05783, over 1607224.28 frames. ], batch size: 31, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:12,916 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:14,925 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6971, 1.8961, 1.9988, 1.4833, 2.1761, 1.5426, 0.8163, 1.8955], + device='cuda:0'), covar=tensor([0.0677, 0.0394, 0.0338, 0.0612, 0.0436, 0.0930, 0.0936, 0.0352], + device='cuda:0'), in_proj_covar=tensor([0.0470, 0.0408, 0.0362, 0.0458, 0.0391, 0.0552, 0.0402, 0.0436], + device='cuda:0'), out_proj_covar=tensor([1.2485e-04, 1.0618e-04, 9.4536e-05, 1.1998e-04, 1.0244e-04, 1.5464e-04, + 1.0771e-04, 1.1476e-04], device='cuda:0') +2023-02-07 10:15:24,381 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0541, 3.6302, 2.3024, 2.9716, 2.7234, 2.1079, 2.7570, 3.1115], + device='cuda:0'), covar=tensor([0.1837, 0.0353, 0.1226, 0.0734, 0.0820, 0.1469, 0.1194, 0.1046], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0239, 0.0341, 0.0311, 0.0302, 0.0344, 0.0347, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:15:31,317 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:34,438 INFO [train.py:901] (0/4) Epoch 26, batch 3800, loss[loss=0.2197, simple_loss=0.3085, pruned_loss=0.06545, over 8239.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05699, over 1610900.62 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:52,886 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4817, 2.4330, 3.1906, 2.5188, 3.1513, 2.5224, 2.4651, 2.0179], + device='cuda:0'), covar=tensor([0.5831, 0.5123, 0.1997, 0.4070, 0.2536, 0.3155, 0.1980, 0.5369], + device='cuda:0'), in_proj_covar=tensor([0.0956, 0.1005, 0.0825, 0.0981, 0.1017, 0.0919, 0.0764, 0.0841], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 10:15:59,216 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.381e+02 2.847e+02 3.364e+02 6.986e+02, threshold=5.694e+02, percent-clipped=1.0 +2023-02-07 10:15:59,374 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:08,813 INFO [train.py:901] (0/4) Epoch 26, batch 3850, loss[loss=0.2021, simple_loss=0.2964, pruned_loss=0.05383, over 8505.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05727, over 1609632.99 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:16:15,133 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:29,197 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 10:16:29,384 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5569, 2.6130, 1.8273, 2.4029, 2.2010, 1.6377, 2.1472, 2.2245], + device='cuda:0'), covar=tensor([0.1683, 0.0403, 0.1245, 0.0695, 0.0839, 0.1594, 0.1079, 0.1116], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0238, 0.0337, 0.0309, 0.0300, 0.0342, 0.0345, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:16:32,105 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205956.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:36,156 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4724, 1.2537, 2.3723, 1.3533, 2.2194, 2.5298, 2.7024, 2.1796], + device='cuda:0'), covar=tensor([0.1111, 0.1512, 0.0397, 0.2012, 0.0767, 0.0377, 0.0648, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0304, 0.0328, 0.0291, 0.0321, 0.0322, 0.0278, 0.0438, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:16:42,784 INFO [train.py:901] (0/4) Epoch 26, batch 3900, loss[loss=0.2037, simple_loss=0.2998, pruned_loss=0.05385, over 8464.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05732, over 1612454.74 frames. ], batch size: 27, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:03,801 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-206000.pt +2023-02-07 10:17:05,680 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.02 vs. limit=5.0 +2023-02-07 10:17:09,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.441e+02 2.892e+02 3.706e+02 7.796e+02, threshold=5.785e+02, percent-clipped=3.0 +2023-02-07 10:17:12,781 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4200, 2.0278, 3.0031, 1.7796, 1.5568, 2.9880, 0.7737, 2.1951], + device='cuda:0'), covar=tensor([0.1156, 0.1231, 0.0300, 0.1243, 0.2406, 0.0297, 0.1935, 0.1142], + device='cuda:0'), in_proj_covar=tensor([0.0195, 0.0200, 0.0130, 0.0219, 0.0272, 0.0143, 0.0170, 0.0196], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 10:17:16,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:17:20,037 INFO [train.py:901] (0/4) Epoch 26, batch 3950, loss[loss=0.1887, simple_loss=0.2855, pruned_loss=0.04594, over 8464.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2825, pruned_loss=0.05722, over 1615385.69 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:49,442 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0611, 1.8263, 2.3169, 2.0138, 2.3170, 2.1352, 1.9754, 1.0845], + device='cuda:0'), covar=tensor([0.5768, 0.4772, 0.2093, 0.3770, 0.2559, 0.3168, 0.1972, 0.5269], + device='cuda:0'), in_proj_covar=tensor([0.0955, 0.1004, 0.0826, 0.0978, 0.1017, 0.0918, 0.0762, 0.0840], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 10:17:53,924 INFO [train.py:901] (0/4) Epoch 26, batch 4000, loss[loss=0.1777, simple_loss=0.2711, pruned_loss=0.04218, over 7228.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.05721, over 1615548.28 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:55,452 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:18,670 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206106.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:19,947 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.407e+02 2.986e+02 3.556e+02 8.558e+02, threshold=5.971e+02, percent-clipped=6.0 +2023-02-07 10:18:29,518 INFO [train.py:901] (0/4) Epoch 26, batch 4050, loss[loss=0.2111, simple_loss=0.3017, pruned_loss=0.0602, over 8323.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2832, pruned_loss=0.05819, over 1615821.41 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:18:37,175 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:52,053 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3465, 1.6869, 1.2933, 2.6510, 1.3023, 1.2316, 1.9802, 1.8495], + device='cuda:0'), covar=tensor([0.1530, 0.1155, 0.1919, 0.0375, 0.1173, 0.2003, 0.0778, 0.0950], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0192, 0.0242, 0.0210, 0.0201, 0.0244, 0.0246, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 10:19:03,799 INFO [train.py:901] (0/4) Epoch 26, batch 4100, loss[loss=0.1928, simple_loss=0.2771, pruned_loss=0.05427, over 8594.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05845, over 1620160.71 frames. ], batch size: 34, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:28,869 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.376e+02 2.755e+02 3.418e+02 9.873e+02, threshold=5.510e+02, percent-clipped=4.0 +2023-02-07 10:19:34,505 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,248 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206221.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,711 INFO [train.py:901] (0/4) Epoch 26, batch 4150, loss[loss=0.213, simple_loss=0.28, pruned_loss=0.07305, over 7933.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05817, over 1621701.39 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:20:00,781 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:20:14,207 INFO [train.py:901] (0/4) Epoch 26, batch 4200, loss[loss=0.2055, simple_loss=0.2963, pruned_loss=0.05737, over 8368.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2845, pruned_loss=0.05857, over 1622058.88 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:20:22,976 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 10:20:38,388 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.335e+02 2.968e+02 3.755e+02 9.805e+02, threshold=5.936e+02, percent-clipped=3.0 +2023-02-07 10:20:44,915 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 10:20:49,126 INFO [train.py:901] (0/4) Epoch 26, batch 4250, loss[loss=0.1719, simple_loss=0.2504, pruned_loss=0.04674, over 7697.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05815, over 1618472.04 frames. ], batch size: 18, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:20:54,128 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7151, 1.5021, 2.9125, 1.4202, 2.3123, 3.1437, 3.2421, 2.6885], + device='cuda:0'), covar=tensor([0.1151, 0.1605, 0.0354, 0.2069, 0.0799, 0.0290, 0.0594, 0.0559], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0325, 0.0290, 0.0319, 0.0320, 0.0277, 0.0435, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:21:21,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:24,650 INFO [train.py:901] (0/4) Epoch 26, batch 4300, loss[loss=0.15, simple_loss=0.2429, pruned_loss=0.02857, over 6764.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.0577, over 1619522.55 frames. ], batch size: 15, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:35,899 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:50,296 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.331e+02 2.890e+02 3.800e+02 6.492e+02, threshold=5.781e+02, percent-clipped=2.0 +2023-02-07 10:21:53,271 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206413.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:56,610 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:59,355 INFO [train.py:901] (0/4) Epoch 26, batch 4350, loss[loss=0.1966, simple_loss=0.2683, pruned_loss=0.06248, over 7813.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05812, over 1618080.31 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:02,343 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9663, 1.1879, 1.0848, 1.9271, 0.8250, 0.9684, 1.4217, 1.3432], + device='cuda:0'), covar=tensor([0.1675, 0.1171, 0.1906, 0.0473, 0.1219, 0.2017, 0.0729, 0.0953], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0212, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 10:22:18,981 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 10:22:34,786 INFO [train.py:901] (0/4) Epoch 26, batch 4400, loss[loss=0.1889, simple_loss=0.2564, pruned_loss=0.06065, over 7441.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.05873, over 1618678.47 frames. ], batch size: 17, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:38,336 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:22:55,676 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:00,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.619e+02 3.000e+02 3.925e+02 8.429e+02, threshold=6.000e+02, percent-clipped=7.0 +2023-02-07 10:23:00,194 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 10:23:08,805 INFO [train.py:901] (0/4) Epoch 26, batch 4450, loss[loss=0.1999, simple_loss=0.2892, pruned_loss=0.05527, over 8462.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2846, pruned_loss=0.05923, over 1620383.21 frames. ], batch size: 27, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:16,246 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206533.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:34,134 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:44,257 INFO [train.py:901] (0/4) Epoch 26, batch 4500, loss[loss=0.2005, simple_loss=0.2909, pruned_loss=0.05512, over 8508.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2842, pruned_loss=0.05886, over 1623698.87 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:55,214 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 10:24:05,596 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9847, 1.5681, 1.8212, 1.3881, 1.0523, 1.5553, 1.8097, 1.5222], + device='cuda:0'), covar=tensor([0.0533, 0.1278, 0.1602, 0.1480, 0.0619, 0.1487, 0.0689, 0.0688], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 10:24:10,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.363e+02 2.961e+02 3.499e+02 6.135e+02, threshold=5.921e+02, percent-clipped=1.0 +2023-02-07 10:24:18,680 INFO [train.py:901] (0/4) Epoch 26, batch 4550, loss[loss=0.2143, simple_loss=0.3029, pruned_loss=0.06289, over 8322.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2826, pruned_loss=0.05793, over 1619054.44 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:19,481 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:35,787 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:51,533 INFO [train.py:901] (0/4) Epoch 26, batch 4600, loss[loss=0.1764, simple_loss=0.2682, pruned_loss=0.04232, over 7796.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05888, over 1617268.64 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:53,052 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206674.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:25:06,512 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0417, 1.2573, 1.1847, 0.7088, 1.2200, 1.1023, 0.0702, 1.2154], + device='cuda:0'), covar=tensor([0.0455, 0.0422, 0.0394, 0.0648, 0.0468, 0.0931, 0.0933, 0.0381], + device='cuda:0'), in_proj_covar=tensor([0.0468, 0.0405, 0.0362, 0.0458, 0.0391, 0.0550, 0.0401, 0.0437], + device='cuda:0'), out_proj_covar=tensor([1.2427e-04, 1.0548e-04, 9.4578e-05, 1.2001e-04, 1.0246e-04, 1.5383e-04, + 1.0739e-04, 1.1479e-04], device='cuda:0') +2023-02-07 10:25:16,709 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7859, 1.5710, 2.8812, 1.3324, 2.2530, 3.0585, 3.2368, 2.6203], + device='cuda:0'), covar=tensor([0.1125, 0.1552, 0.0365, 0.2212, 0.0886, 0.0309, 0.0692, 0.0551], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0325, 0.0289, 0.0319, 0.0319, 0.0276, 0.0434, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:25:18,508 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.342e+02 2.811e+02 3.625e+02 9.770e+02, threshold=5.622e+02, percent-clipped=5.0 +2023-02-07 10:25:28,313 INFO [train.py:901] (0/4) Epoch 26, batch 4650, loss[loss=0.2395, simple_loss=0.319, pruned_loss=0.07998, over 8661.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2842, pruned_loss=0.05883, over 1619454.33 frames. ], batch size: 49, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:26:02,070 INFO [train.py:901] (0/4) Epoch 26, batch 4700, loss[loss=0.2093, simple_loss=0.2888, pruned_loss=0.06493, over 7788.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.0578, over 1619211.53 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:12,877 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0127, 1.7481, 2.9896, 1.6918, 2.4116, 3.2583, 3.3255, 2.8560], + device='cuda:0'), covar=tensor([0.1074, 0.1639, 0.0431, 0.1874, 0.1185, 0.0265, 0.0598, 0.0495], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0326, 0.0289, 0.0319, 0.0319, 0.0276, 0.0435, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:26:13,614 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206789.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:28,922 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.506e+02 2.890e+02 3.298e+02 6.611e+02, threshold=5.779e+02, percent-clipped=3.0 +2023-02-07 10:26:32,494 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:37,697 INFO [train.py:901] (0/4) Epoch 26, batch 4750, loss[loss=0.2038, simple_loss=0.2937, pruned_loss=0.05694, over 8513.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05839, over 1617867.37 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:53,338 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 10:26:55,372 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 10:26:58,821 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206852.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:27:12,088 INFO [train.py:901] (0/4) Epoch 26, batch 4800, loss[loss=0.1879, simple_loss=0.2819, pruned_loss=0.047, over 8463.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05823, over 1618116.20 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:37,299 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.410e+02 2.886e+02 3.541e+02 7.542e+02, threshold=5.772e+02, percent-clipped=6.0 +2023-02-07 10:27:46,704 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 10:27:47,383 INFO [train.py:901] (0/4) Epoch 26, batch 4850, loss[loss=0.1633, simple_loss=0.245, pruned_loss=0.04078, over 7808.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2826, pruned_loss=0.05839, over 1615821.35 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:52,976 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:10,401 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:21,661 INFO [train.py:901] (0/4) Epoch 26, batch 4900, loss[loss=0.2147, simple_loss=0.3002, pruned_loss=0.06455, over 8189.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2829, pruned_loss=0.05873, over 1613917.23 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:28:46,039 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.535e+02 3.142e+02 3.836e+02 8.051e+02, threshold=6.285e+02, percent-clipped=2.0 +2023-02-07 10:28:46,439 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 10:28:55,267 INFO [train.py:901] (0/4) Epoch 26, batch 4950, loss[loss=0.1958, simple_loss=0.2758, pruned_loss=0.05792, over 8235.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05893, over 1616118.30 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:18,676 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2400, 3.1575, 2.9746, 1.5528, 2.9173, 2.9178, 2.8146, 2.8026], + device='cuda:0'), covar=tensor([0.1224, 0.0892, 0.1371, 0.4536, 0.1169, 0.1339, 0.1861, 0.1066], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0455, 0.0443, 0.0553, 0.0439, 0.0461, 0.0438, 0.0403], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:29:32,340 INFO [train.py:901] (0/4) Epoch 26, batch 5000, loss[loss=0.1608, simple_loss=0.2461, pruned_loss=0.0377, over 7805.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2853, pruned_loss=0.05983, over 1618702.30 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:57,378 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.413e+02 2.985e+02 3.933e+02 1.062e+03, threshold=5.970e+02, percent-clipped=3.0 +2023-02-07 10:30:06,446 INFO [train.py:901] (0/4) Epoch 26, batch 5050, loss[loss=0.2192, simple_loss=0.3068, pruned_loss=0.06582, over 8335.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05981, over 1614871.25 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:24,789 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 10:30:42,637 INFO [train.py:901] (0/4) Epoch 26, batch 5100, loss[loss=0.2003, simple_loss=0.2855, pruned_loss=0.05754, over 8636.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.284, pruned_loss=0.0591, over 1613945.79 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:58,150 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=207194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:30:58,765 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8633, 6.0112, 5.2511, 2.5114, 5.3140, 5.5793, 5.4631, 5.3782], + device='cuda:0'), covar=tensor([0.0490, 0.0315, 0.0866, 0.4072, 0.0761, 0.0792, 0.1051, 0.0591], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0453, 0.0440, 0.0550, 0.0437, 0.0457, 0.0434, 0.0400], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:30:59,431 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:31:08,230 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.087e+02 2.633e+02 3.622e+02 6.552e+02, threshold=5.265e+02, percent-clipped=1.0 +2023-02-07 10:31:16,935 INFO [train.py:901] (0/4) Epoch 26, batch 5150, loss[loss=0.1909, simple_loss=0.2816, pruned_loss=0.05007, over 8467.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05924, over 1613746.10 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:31:34,006 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5750, 1.8659, 1.8846, 1.2622, 1.9214, 1.4189, 0.4342, 1.7762], + device='cuda:0'), covar=tensor([0.0538, 0.0377, 0.0301, 0.0592, 0.0461, 0.1004, 0.0991, 0.0354], + device='cuda:0'), in_proj_covar=tensor([0.0468, 0.0407, 0.0361, 0.0459, 0.0392, 0.0551, 0.0403, 0.0437], + device='cuda:0'), out_proj_covar=tensor([1.2442e-04, 1.0592e-04, 9.4180e-05, 1.2007e-04, 1.0281e-04, 1.5430e-04, + 1.0792e-04, 1.1494e-04], device='cuda:0') +2023-02-07 10:31:52,790 INFO [train.py:901] (0/4) Epoch 26, batch 5200, loss[loss=0.2195, simple_loss=0.3105, pruned_loss=0.06421, over 8286.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2832, pruned_loss=0.05894, over 1612339.67 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:09,867 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 10:32:17,989 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.585e+02 3.464e+02 4.468e+02 1.375e+03, threshold=6.928e+02, percent-clipped=16.0 +2023-02-07 10:32:19,447 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:32:19,957 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 10:32:26,697 INFO [train.py:901] (0/4) Epoch 26, batch 5250, loss[loss=0.2141, simple_loss=0.2943, pruned_loss=0.06693, over 8606.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2833, pruned_loss=0.05909, over 1613666.01 frames. ], batch size: 34, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:29,621 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5358, 2.4261, 1.7053, 2.1987, 2.0680, 1.5455, 1.9452, 2.0251], + device='cuda:0'), covar=tensor([0.1635, 0.0470, 0.1402, 0.0737, 0.0965, 0.1821, 0.1265, 0.1118], + device='cuda:0'), in_proj_covar=tensor([0.0361, 0.0241, 0.0340, 0.0312, 0.0302, 0.0347, 0.0349, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:32:50,482 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9424, 1.6908, 6.0727, 2.1205, 5.4886, 5.1322, 5.6028, 5.4880], + device='cuda:0'), covar=tensor([0.0449, 0.4915, 0.0352, 0.4199, 0.0958, 0.0905, 0.0507, 0.0553], + device='cuda:0'), in_proj_covar=tensor([0.0665, 0.0661, 0.0730, 0.0652, 0.0739, 0.0628, 0.0629, 0.0703], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:33:00,346 INFO [train.py:901] (0/4) Epoch 26, batch 5300, loss[loss=0.2238, simple_loss=0.3084, pruned_loss=0.06954, over 8501.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2841, pruned_loss=0.05942, over 1614086.55 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:33:27,789 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.392e+02 2.913e+02 3.782e+02 6.658e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-07 10:33:36,843 INFO [train.py:901] (0/4) Epoch 26, batch 5350, loss[loss=0.1904, simple_loss=0.2739, pruned_loss=0.05341, over 8253.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2823, pruned_loss=0.05832, over 1606856.95 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:10,280 INFO [train.py:901] (0/4) Epoch 26, batch 5400, loss[loss=0.1745, simple_loss=0.2596, pruned_loss=0.04473, over 7792.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05726, over 1607892.30 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:37,323 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.344e+02 3.061e+02 4.157e+02 9.885e+02, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 10:34:46,157 INFO [train.py:901] (0/4) Epoch 26, batch 5450, loss[loss=0.1776, simple_loss=0.2551, pruned_loss=0.04999, over 7444.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.05755, over 1610286.67 frames. ], batch size: 17, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:56,175 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 10:34:57,839 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:06,590 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 10:35:17,344 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:20,580 INFO [train.py:901] (0/4) Epoch 26, batch 5500, loss[loss=0.1898, simple_loss=0.2822, pruned_loss=0.04865, over 8130.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05754, over 1614200.59 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:35:31,007 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8058, 2.0345, 2.1638, 1.4648, 2.2717, 1.5356, 0.7117, 2.0462], + device='cuda:0'), covar=tensor([0.0701, 0.0417, 0.0352, 0.0703, 0.0515, 0.1032, 0.1026, 0.0342], + device='cuda:0'), in_proj_covar=tensor([0.0464, 0.0403, 0.0358, 0.0455, 0.0389, 0.0546, 0.0400, 0.0432], + device='cuda:0'), out_proj_covar=tensor([1.2314e-04, 1.0483e-04, 9.3501e-05, 1.1919e-04, 1.0202e-04, 1.5287e-04, + 1.0721e-04, 1.1351e-04], device='cuda:0') +2023-02-07 10:35:34,396 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207592.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:47,182 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.395e+02 2.975e+02 3.460e+02 7.775e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 10:35:56,709 INFO [train.py:901] (0/4) Epoch 26, batch 5550, loss[loss=0.1629, simple_loss=0.242, pruned_loss=0.04188, over 7701.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2829, pruned_loss=0.05711, over 1616929.02 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:17,870 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:36:30,424 INFO [train.py:901] (0/4) Epoch 26, batch 5600, loss[loss=0.1841, simple_loss=0.2588, pruned_loss=0.0547, over 7251.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2834, pruned_loss=0.05736, over 1619389.89 frames. ], batch size: 16, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:55,074 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.412e+02 3.079e+02 3.750e+02 8.490e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-07 10:37:01,326 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3310, 2.1214, 1.7804, 2.0342, 1.6400, 1.5126, 1.6726, 1.7719], + device='cuda:0'), covar=tensor([0.1327, 0.0492, 0.1195, 0.0521, 0.0910, 0.1594, 0.0954, 0.0759], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0243, 0.0341, 0.0313, 0.0303, 0.0347, 0.0349, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:37:04,589 INFO [train.py:901] (0/4) Epoch 26, batch 5650, loss[loss=0.2017, simple_loss=0.2783, pruned_loss=0.06262, over 7538.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2827, pruned_loss=0.05688, over 1616259.78 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:37:12,998 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 10:37:40,858 INFO [train.py:901] (0/4) Epoch 26, batch 5700, loss[loss=0.2098, simple_loss=0.2973, pruned_loss=0.06116, over 8591.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2819, pruned_loss=0.05661, over 1615501.75 frames. ], batch size: 39, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:37:52,449 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3213, 1.6633, 1.7131, 0.9803, 1.6553, 1.3428, 0.2445, 1.5284], + device='cuda:0'), covar=tensor([0.0536, 0.0395, 0.0316, 0.0583, 0.0446, 0.0942, 0.0941, 0.0326], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0402, 0.0359, 0.0456, 0.0389, 0.0547, 0.0401, 0.0432], + device='cuda:0'), out_proj_covar=tensor([1.2356e-04, 1.0471e-04, 9.3756e-05, 1.1947e-04, 1.0200e-04, 1.5306e-04, + 1.0733e-04, 1.1359e-04], device='cuda:0') +2023-02-07 10:38:05,994 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.232e+02 2.912e+02 3.330e+02 6.698e+02, threshold=5.824e+02, percent-clipped=1.0 +2023-02-07 10:38:14,797 INFO [train.py:901] (0/4) Epoch 26, batch 5750, loss[loss=0.2078, simple_loss=0.2953, pruned_loss=0.06016, over 8517.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05703, over 1615494.96 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:16,870 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 10:38:50,530 INFO [train.py:901] (0/4) Epoch 26, batch 5800, loss[loss=0.1907, simple_loss=0.2899, pruned_loss=0.04579, over 8110.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05635, over 1611357.72 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:15,988 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.614e+02 3.148e+02 4.020e+02 8.026e+02, threshold=6.297e+02, percent-clipped=4.0 +2023-02-07 10:39:16,254 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:17,563 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8791, 1.7819, 2.5316, 1.6344, 1.4257, 2.5148, 0.5580, 1.5526], + device='cuda:0'), covar=tensor([0.1645, 0.1074, 0.0309, 0.1194, 0.2195, 0.0326, 0.1816, 0.1219], + device='cuda:0'), in_proj_covar=tensor([0.0198, 0.0204, 0.0133, 0.0223, 0.0277, 0.0145, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 10:39:24,803 INFO [train.py:901] (0/4) Epoch 26, batch 5850, loss[loss=0.1937, simple_loss=0.2927, pruned_loss=0.04738, over 8503.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2804, pruned_loss=0.05647, over 1613461.44 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:25,028 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8378, 2.1384, 2.1893, 1.4671, 2.2438, 1.7487, 0.6667, 1.9973], + device='cuda:0'), covar=tensor([0.0611, 0.0399, 0.0339, 0.0598, 0.0505, 0.0848, 0.0949, 0.0309], + device='cuda:0'), in_proj_covar=tensor([0.0467, 0.0403, 0.0361, 0.0458, 0.0391, 0.0549, 0.0403, 0.0435], + device='cuda:0'), out_proj_covar=tensor([1.2413e-04, 1.0500e-04, 9.4186e-05, 1.1985e-04, 1.0244e-04, 1.5341e-04, + 1.0788e-04, 1.1434e-04], device='cuda:0') +2023-02-07 10:39:32,992 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:59,802 INFO [train.py:901] (0/4) Epoch 26, batch 5900, loss[loss=0.182, simple_loss=0.2739, pruned_loss=0.04502, over 8027.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05616, over 1615463.29 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:17,925 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8301, 2.1141, 2.1941, 1.5424, 2.2932, 1.6448, 0.8105, 1.9488], + device='cuda:0'), covar=tensor([0.0692, 0.0394, 0.0317, 0.0632, 0.0471, 0.0909, 0.0969, 0.0396], + device='cuda:0'), in_proj_covar=tensor([0.0468, 0.0403, 0.0360, 0.0456, 0.0390, 0.0547, 0.0402, 0.0434], + device='cuda:0'), out_proj_covar=tensor([1.2420e-04, 1.0499e-04, 9.4113e-05, 1.1951e-04, 1.0242e-04, 1.5293e-04, + 1.0772e-04, 1.1414e-04], device='cuda:0') +2023-02-07 10:40:19,157 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-208000.pt +2023-02-07 10:40:26,849 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.451e+02 2.961e+02 3.656e+02 5.483e+02, threshold=5.923e+02, percent-clipped=0.0 +2023-02-07 10:40:35,639 INFO [train.py:901] (0/4) Epoch 26, batch 5950, loss[loss=0.1835, simple_loss=0.2536, pruned_loss=0.0567, over 7795.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2798, pruned_loss=0.05606, over 1614840.58 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:54,907 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:41:01,456 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 10:41:07,267 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1209, 1.7652, 3.1673, 1.7416, 2.4095, 3.4805, 3.4989, 3.0444], + device='cuda:0'), covar=tensor([0.1070, 0.1581, 0.0378, 0.1863, 0.1129, 0.0221, 0.0576, 0.0462], + device='cuda:0'), in_proj_covar=tensor([0.0299, 0.0322, 0.0288, 0.0314, 0.0316, 0.0274, 0.0432, 0.0303], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:41:09,813 INFO [train.py:901] (0/4) Epoch 26, batch 6000, loss[loss=0.2135, simple_loss=0.3019, pruned_loss=0.06258, over 8506.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2808, pruned_loss=0.05693, over 1612499.91 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:41:09,814 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 10:41:24,452 INFO [train.py:935] (0/4) Epoch 26, validation: loss=0.1721, simple_loss=0.2717, pruned_loss=0.03627, over 944034.00 frames. +2023-02-07 10:41:24,453 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6760MB +2023-02-07 10:41:51,028 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.308e+02 2.837e+02 3.630e+02 6.769e+02, threshold=5.675e+02, percent-clipped=2.0 +2023-02-07 10:42:00,869 INFO [train.py:901] (0/4) Epoch 26, batch 6050, loss[loss=0.1943, simple_loss=0.2891, pruned_loss=0.04976, over 8516.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05675, over 1615902.76 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:42:25,307 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9235, 2.3398, 3.6668, 1.9943, 1.8078, 3.5304, 0.6896, 2.1797], + device='cuda:0'), covar=tensor([0.1165, 0.1160, 0.0240, 0.1403, 0.2295, 0.0399, 0.1942, 0.1200], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0204, 0.0133, 0.0221, 0.0275, 0.0144, 0.0170, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 10:42:27,456 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 10:42:36,027 INFO [train.py:901] (0/4) Epoch 26, batch 6100, loss[loss=0.207, simple_loss=0.2937, pruned_loss=0.06014, over 8320.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05681, over 1616050.57 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:42:48,575 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 10:43:01,339 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.422e+02 2.947e+02 3.994e+02 1.088e+03, threshold=5.894e+02, percent-clipped=8.0 +2023-02-07 10:43:05,095 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 10:43:10,769 INFO [train.py:901] (0/4) Epoch 26, batch 6150, loss[loss=0.1733, simple_loss=0.2624, pruned_loss=0.04207, over 8024.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2799, pruned_loss=0.05587, over 1614338.97 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:45,589 INFO [train.py:901] (0/4) Epoch 26, batch 6200, loss[loss=0.2248, simple_loss=0.3124, pruned_loss=0.06862, over 8657.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05652, over 1618230.32 frames. ], batch size: 34, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:53,058 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208283.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:44:10,220 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 2.196e+02 2.837e+02 3.308e+02 7.178e+02, threshold=5.674e+02, percent-clipped=2.0 +2023-02-07 10:44:19,004 INFO [train.py:901] (0/4) Epoch 26, batch 6250, loss[loss=0.2009, simple_loss=0.2824, pruned_loss=0.05967, over 8144.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2824, pruned_loss=0.05749, over 1618643.27 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:44:31,996 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2394, 3.1604, 2.9281, 1.6816, 2.8736, 2.9504, 2.8808, 2.7743], + device='cuda:0'), covar=tensor([0.1252, 0.0887, 0.1396, 0.4337, 0.1159, 0.1296, 0.1654, 0.1047], + device='cuda:0'), in_proj_covar=tensor([0.0532, 0.0449, 0.0437, 0.0545, 0.0430, 0.0454, 0.0429, 0.0397], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:44:55,648 INFO [train.py:901] (0/4) Epoch 26, batch 6300, loss[loss=0.1999, simple_loss=0.2719, pruned_loss=0.06394, over 7644.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2821, pruned_loss=0.05791, over 1612687.85 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:45:10,443 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:45:20,552 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.443e+02 2.919e+02 3.618e+02 1.192e+03, threshold=5.838e+02, percent-clipped=3.0 +2023-02-07 10:45:25,955 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8150, 1.4188, 3.9689, 1.4418, 3.5406, 3.2812, 3.6331, 3.5097], + device='cuda:0'), covar=tensor([0.0745, 0.4847, 0.0695, 0.4422, 0.1224, 0.1082, 0.0687, 0.0797], + device='cuda:0'), in_proj_covar=tensor([0.0665, 0.0660, 0.0728, 0.0652, 0.0737, 0.0629, 0.0627, 0.0706], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:45:29,139 INFO [train.py:901] (0/4) Epoch 26, batch 6350, loss[loss=0.1793, simple_loss=0.2556, pruned_loss=0.05147, over 7278.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05721, over 1613222.65 frames. ], batch size: 16, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:45:54,272 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6971, 1.4789, 3.0913, 1.3783, 2.2281, 3.3660, 3.5098, 2.8271], + device='cuda:0'), covar=tensor([0.1335, 0.1949, 0.0374, 0.2289, 0.1055, 0.0291, 0.0646, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0324, 0.0289, 0.0317, 0.0318, 0.0276, 0.0434, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:46:04,931 INFO [train.py:901] (0/4) Epoch 26, batch 6400, loss[loss=0.2059, simple_loss=0.3017, pruned_loss=0.05505, over 8291.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.0575, over 1616098.95 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:30,800 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.580e+02 3.188e+02 3.813e+02 6.849e+02, threshold=6.376e+02, percent-clipped=3.0 +2023-02-07 10:46:30,997 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:46:39,660 INFO [train.py:901] (0/4) Epoch 26, batch 6450, loss[loss=0.1858, simple_loss=0.2701, pruned_loss=0.05075, over 7925.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05804, over 1610450.79 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:09,470 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6075, 2.6079, 1.8001, 2.3027, 2.2386, 1.6815, 2.0556, 2.1604], + device='cuda:0'), covar=tensor([0.1444, 0.0398, 0.1186, 0.0622, 0.0691, 0.1404, 0.1002, 0.1028], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0241, 0.0340, 0.0312, 0.0303, 0.0347, 0.0349, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:47:11,953 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1942, 1.4657, 4.2578, 1.9171, 2.4827, 4.8682, 4.9730, 4.1410], + device='cuda:0'), covar=tensor([0.1212, 0.2109, 0.0310, 0.2104, 0.1248, 0.0199, 0.0498, 0.0598], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0318, 0.0319, 0.0277, 0.0435, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:47:13,740 INFO [train.py:901] (0/4) Epoch 26, batch 6500, loss[loss=0.1927, simple_loss=0.2732, pruned_loss=0.05605, over 7929.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05788, over 1610550.19 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:27,387 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0277, 1.5516, 3.0948, 1.4060, 2.2819, 3.3410, 3.5050, 2.8445], + device='cuda:0'), covar=tensor([0.1093, 0.1819, 0.0422, 0.2286, 0.1034, 0.0318, 0.0672, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0318, 0.0319, 0.0277, 0.0435, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:47:29,451 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208594.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:39,224 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.371e+02 2.869e+02 3.528e+02 8.936e+02, threshold=5.738e+02, percent-clipped=3.0 +2023-02-07 10:47:48,760 INFO [train.py:901] (0/4) Epoch 26, batch 6550, loss[loss=0.2041, simple_loss=0.297, pruned_loss=0.05553, over 8344.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2808, pruned_loss=0.05693, over 1609762.84 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:52,190 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:56,162 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 10:48:12,571 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-07 10:48:12,889 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:48:22,502 INFO [train.py:901] (0/4) Epoch 26, batch 6600, loss[loss=0.1917, simple_loss=0.2621, pruned_loss=0.06065, over 7974.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2815, pruned_loss=0.05752, over 1611837.24 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:48:44,618 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1103, 1.5239, 3.3337, 1.4658, 2.3144, 3.7012, 3.8040, 3.1330], + device='cuda:0'), covar=tensor([0.1186, 0.1917, 0.0350, 0.2240, 0.1074, 0.0263, 0.0608, 0.0582], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0325, 0.0290, 0.0318, 0.0319, 0.0277, 0.0435, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 10:48:49,204 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.581e+02 2.930e+02 3.571e+02 6.165e+02, threshold=5.859e+02, percent-clipped=2.0 +2023-02-07 10:48:58,721 INFO [train.py:901] (0/4) Epoch 26, batch 6650, loss[loss=0.1871, simple_loss=0.2645, pruned_loss=0.05487, over 7803.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.0577, over 1618532.33 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:12,755 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:28,859 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:29,732 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 10:49:33,527 INFO [train.py:901] (0/4) Epoch 26, batch 6700, loss[loss=0.1951, simple_loss=0.2779, pruned_loss=0.05611, over 8101.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.0576, over 1618868.07 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:46,225 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=208790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:57,534 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 10:49:59,641 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.530e+02 3.053e+02 4.076e+02 9.744e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-07 10:50:09,371 INFO [train.py:901] (0/4) Epoch 26, batch 6750, loss[loss=0.1942, simple_loss=0.2593, pruned_loss=0.06451, over 7272.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2826, pruned_loss=0.05783, over 1615539.24 frames. ], batch size: 16, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:30,915 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 10:50:43,649 INFO [train.py:901] (0/4) Epoch 26, batch 6800, loss[loss=0.1919, simple_loss=0.2745, pruned_loss=0.05462, over 8089.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2826, pruned_loss=0.05776, over 1616871.13 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:51:05,253 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 10:51:08,812 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.376e+02 2.847e+02 3.449e+02 1.016e+03, threshold=5.694e+02, percent-clipped=2.0 +2023-02-07 10:51:18,806 INFO [train.py:901] (0/4) Epoch 26, batch 6850, loss[loss=0.2364, simple_loss=0.314, pruned_loss=0.07935, over 8655.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05806, over 1616793.49 frames. ], batch size: 34, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:51:19,509 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 10:51:30,537 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:51:43,005 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8319, 2.0541, 2.1183, 1.4305, 2.2575, 1.5821, 0.7731, 1.9561], + device='cuda:0'), covar=tensor([0.0675, 0.0418, 0.0357, 0.0717, 0.0451, 0.0952, 0.1062, 0.0383], + device='cuda:0'), in_proj_covar=tensor([0.0465, 0.0404, 0.0360, 0.0456, 0.0390, 0.0547, 0.0401, 0.0435], + device='cuda:0'), out_proj_covar=tensor([1.2348e-04, 1.0515e-04, 9.4029e-05, 1.1954e-04, 1.0199e-04, 1.5290e-04, + 1.0723e-04, 1.1427e-04], device='cuda:0') +2023-02-07 10:51:54,555 INFO [train.py:901] (0/4) Epoch 26, batch 6900, loss[loss=0.2257, simple_loss=0.2969, pruned_loss=0.07724, over 8339.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05811, over 1617931.33 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:12,013 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:20,219 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.430e+02 2.933e+02 3.890e+02 9.541e+02, threshold=5.866e+02, percent-clipped=7.0 +2023-02-07 10:52:23,020 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 10:52:28,982 INFO [train.py:901] (0/4) Epoch 26, batch 6950, loss[loss=0.2137, simple_loss=0.2968, pruned_loss=0.06528, over 8605.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05783, over 1619154.39 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:29,840 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:47,736 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9341, 1.7108, 2.8026, 2.1395, 2.6378, 1.9150, 1.7172, 1.4007], + device='cuda:0'), covar=tensor([0.7595, 0.6571, 0.2184, 0.4477, 0.3228, 0.4767, 0.3186, 0.6241], + device='cuda:0'), in_proj_covar=tensor([0.0960, 0.1013, 0.0829, 0.0985, 0.1016, 0.0922, 0.0769, 0.0846], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 10:52:51,120 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:53:04,217 INFO [train.py:901] (0/4) Epoch 26, batch 7000, loss[loss=0.2297, simple_loss=0.3035, pruned_loss=0.07798, over 8029.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05781, over 1617155.41 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:07,235 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7441, 1.9886, 2.0906, 1.3991, 2.2051, 1.5573, 0.6514, 1.8638], + device='cuda:0'), covar=tensor([0.0636, 0.0409, 0.0362, 0.0680, 0.0424, 0.0957, 0.0976, 0.0382], + device='cuda:0'), in_proj_covar=tensor([0.0469, 0.0406, 0.0362, 0.0459, 0.0392, 0.0550, 0.0403, 0.0437], + device='cuda:0'), out_proj_covar=tensor([1.2439e-04, 1.0566e-04, 9.4474e-05, 1.2022e-04, 1.0275e-04, 1.5369e-04, + 1.0770e-04, 1.1488e-04], device='cuda:0') +2023-02-07 10:53:15,631 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 10:53:30,452 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.543e+02 3.075e+02 4.223e+02 1.225e+03, threshold=6.150e+02, percent-clipped=4.0 +2023-02-07 10:53:38,407 INFO [train.py:901] (0/4) Epoch 26, batch 7050, loss[loss=0.1728, simple_loss=0.2621, pruned_loss=0.04171, over 7678.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05816, over 1613964.89 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:15,173 INFO [train.py:901] (0/4) Epoch 26, batch 7100, loss[loss=0.2115, simple_loss=0.3043, pruned_loss=0.05934, over 8467.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2833, pruned_loss=0.05804, over 1611944.67 frames. ], batch size: 29, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:34,737 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8273, 2.0964, 2.1875, 1.4021, 2.3024, 1.5706, 0.7209, 1.9326], + device='cuda:0'), covar=tensor([0.0700, 0.0415, 0.0323, 0.0709, 0.0508, 0.0984, 0.0997, 0.0377], + device='cuda:0'), in_proj_covar=tensor([0.0468, 0.0404, 0.0360, 0.0457, 0.0390, 0.0548, 0.0402, 0.0436], + device='cuda:0'), out_proj_covar=tensor([1.2422e-04, 1.0530e-04, 9.4032e-05, 1.1962e-04, 1.0213e-04, 1.5309e-04, + 1.0742e-04, 1.1443e-04], device='cuda:0') +2023-02-07 10:54:42,204 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.538e+02 3.057e+02 3.964e+02 1.199e+03, threshold=6.114e+02, percent-clipped=9.0 +2023-02-07 10:54:49,199 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209220.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:54:50,434 INFO [train.py:901] (0/4) Epoch 26, batch 7150, loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06217, over 8531.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05767, over 1617144.88 frames. ], batch size: 28, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:56,930 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 10:55:03,374 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7443, 1.9707, 2.0843, 1.4147, 2.0813, 1.6368, 0.5756, 1.9475], + device='cuda:0'), covar=tensor([0.0565, 0.0353, 0.0302, 0.0579, 0.0467, 0.0903, 0.0911, 0.0269], + device='cuda:0'), in_proj_covar=tensor([0.0467, 0.0405, 0.0360, 0.0456, 0.0390, 0.0548, 0.0402, 0.0436], + device='cuda:0'), out_proj_covar=tensor([1.2414e-04, 1.0539e-04, 9.3979e-05, 1.1942e-04, 1.0217e-04, 1.5325e-04, + 1.0739e-04, 1.1444e-04], device='cuda:0') +2023-02-07 10:55:07,565 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-07 10:55:19,351 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6822, 2.2985, 3.8199, 1.5751, 2.8812, 2.1887, 1.9156, 2.7618], + device='cuda:0'), covar=tensor([0.1984, 0.2579, 0.0841, 0.4676, 0.1919, 0.3297, 0.2323, 0.2458], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0630, 0.0560, 0.0665, 0.0659, 0.0609, 0.0558, 0.0645], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:55:20,030 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7625, 2.4151, 3.8676, 1.5442, 2.8550, 2.2300, 2.0080, 2.6792], + device='cuda:0'), covar=tensor([0.2056, 0.2735, 0.1003, 0.5140, 0.2054, 0.3537, 0.2594, 0.3021], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0630, 0.0560, 0.0665, 0.0659, 0.0609, 0.0558, 0.0645], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:55:25,278 INFO [train.py:901] (0/4) Epoch 26, batch 7200, loss[loss=0.1687, simple_loss=0.2704, pruned_loss=0.03351, over 8733.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2837, pruned_loss=0.05813, over 1622998.45 frames. ], batch size: 39, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:25,475 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9978, 1.6287, 1.4454, 1.5606, 1.3479, 1.3164, 1.3550, 1.2556], + device='cuda:0'), covar=tensor([0.1315, 0.0554, 0.1357, 0.0599, 0.0823, 0.1628, 0.0974, 0.0887], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0239, 0.0340, 0.0312, 0.0302, 0.0345, 0.0347, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:55:26,114 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:26,746 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8461, 5.9978, 5.1932, 2.3382, 5.2053, 5.5676, 5.4991, 5.3889], + device='cuda:0'), covar=tensor([0.0495, 0.0386, 0.0786, 0.4301, 0.0673, 0.0766, 0.0970, 0.0547], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0457, 0.0443, 0.0554, 0.0437, 0.0461, 0.0436, 0.0403], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 10:55:51,810 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:52,284 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.329e+02 2.733e+02 3.562e+02 6.414e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 10:55:56,506 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209316.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:00,335 INFO [train.py:901] (0/4) Epoch 26, batch 7250, loss[loss=0.263, simple_loss=0.3346, pruned_loss=0.09573, over 8026.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2841, pruned_loss=0.05848, over 1622149.58 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:08,436 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:33,562 INFO [train.py:901] (0/4) Epoch 26, batch 7300, loss[loss=0.1799, simple_loss=0.2667, pruned_loss=0.04654, over 8248.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2836, pruned_loss=0.05803, over 1623087.53 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:59,845 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.463e+02 3.055e+02 3.934e+02 7.151e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 10:57:06,766 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 10:57:09,434 INFO [train.py:901] (0/4) Epoch 26, batch 7350, loss[loss=0.1892, simple_loss=0.2656, pruned_loss=0.05646, over 7657.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05943, over 1619939.86 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:26,313 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 10:57:42,849 INFO [train.py:901] (0/4) Epoch 26, batch 7400, loss[loss=0.1895, simple_loss=0.2749, pruned_loss=0.05203, over 7966.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2849, pruned_loss=0.05931, over 1621863.59 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:50,999 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:04,816 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 10:58:09,480 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.534e+02 3.042e+02 3.812e+02 9.347e+02, threshold=6.084e+02, percent-clipped=5.0 +2023-02-07 10:58:17,540 INFO [train.py:901] (0/4) Epoch 26, batch 7450, loss[loss=0.2385, simple_loss=0.3218, pruned_loss=0.07761, over 8608.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05973, over 1622964.71 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:58:22,475 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:47,395 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209564.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:52,742 INFO [train.py:901] (0/4) Epoch 26, batch 7500, loss[loss=0.1934, simple_loss=0.2761, pruned_loss=0.05535, over 8186.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05984, over 1618574.41 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:59:18,692 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.436e+02 2.983e+02 3.503e+02 8.056e+02, threshold=5.967e+02, percent-clipped=5.0 +2023-02-07 10:59:24,163 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:27,346 INFO [train.py:901] (0/4) Epoch 26, batch 7550, loss[loss=0.1882, simple_loss=0.2689, pruned_loss=0.05374, over 7935.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2848, pruned_loss=0.05974, over 1617808.26 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 10:59:28,776 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:34,807 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 10:59:41,889 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5121, 2.4515, 1.7399, 2.2426, 1.9987, 1.4277, 1.9684, 2.1955], + device='cuda:0'), covar=tensor([0.1964, 0.0586, 0.1555, 0.0753, 0.1083, 0.2044, 0.1370, 0.1123], + device='cuda:0'), in_proj_covar=tensor([0.0363, 0.0241, 0.0342, 0.0314, 0.0306, 0.0349, 0.0351, 0.0326], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 10:59:54,103 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:55,166 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-07 11:00:01,942 INFO [train.py:901] (0/4) Epoch 26, batch 7600, loss[loss=0.1958, simple_loss=0.2845, pruned_loss=0.05352, over 8109.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05971, over 1615807.33 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:07,012 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:25,092 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209706.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:27,672 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.404e+02 2.880e+02 3.478e+02 6.437e+02, threshold=5.761e+02, percent-clipped=3.0 +2023-02-07 11:00:35,725 INFO [train.py:901] (0/4) Epoch 26, batch 7650, loss[loss=0.2087, simple_loss=0.2892, pruned_loss=0.06408, over 8030.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05978, over 1622518.64 frames. ], batch size: 22, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:43,054 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:52,377 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5691, 1.4675, 4.7432, 1.8384, 4.2806, 3.9278, 4.2898, 4.2305], + device='cuda:0'), covar=tensor([0.0597, 0.4990, 0.0546, 0.4172, 0.0995, 0.1049, 0.0630, 0.0655], + device='cuda:0'), in_proj_covar=tensor([0.0674, 0.0665, 0.0736, 0.0659, 0.0743, 0.0635, 0.0635, 0.0711], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:01:10,731 INFO [train.py:901] (0/4) Epoch 26, batch 7700, loss[loss=0.1671, simple_loss=0.2475, pruned_loss=0.0434, over 7650.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2861, pruned_loss=0.05999, over 1619213.44 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:12,816 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6470, 4.6670, 4.2184, 2.1746, 4.1431, 4.2593, 4.1866, 4.0374], + device='cuda:0'), covar=tensor([0.0635, 0.0493, 0.1099, 0.4098, 0.0772, 0.1073, 0.1250, 0.0681], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0454, 0.0442, 0.0553, 0.0435, 0.0460, 0.0435, 0.0401], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:01:12,896 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:14,129 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 11:01:36,864 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.688e+02 3.083e+02 3.850e+02 9.382e+02, threshold=6.167e+02, percent-clipped=8.0 +2023-02-07 11:01:44,893 INFO [train.py:901] (0/4) Epoch 26, batch 7750, loss[loss=0.1931, simple_loss=0.2714, pruned_loss=0.05743, over 8463.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.05997, over 1615132.87 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:48,925 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:19,758 INFO [train.py:901] (0/4) Epoch 26, batch 7800, loss[loss=0.1987, simple_loss=0.2854, pruned_loss=0.05599, over 7943.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.0595, over 1614209.10 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:02:19,819 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:45,322 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.465e+02 2.962e+02 3.430e+02 5.705e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-07 11:02:53,247 INFO [train.py:901] (0/4) Epoch 26, batch 7850, loss[loss=0.1659, simple_loss=0.2495, pruned_loss=0.04117, over 7935.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.05877, over 1614913.78 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:01,890 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:07,202 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:18,173 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:23,276 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209968.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:25,899 INFO [train.py:901] (0/4) Epoch 26, batch 7900, loss[loss=0.1643, simple_loss=0.2389, pruned_loss=0.04488, over 7805.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2819, pruned_loss=0.05813, over 1609383.40 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:35,832 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:36,567 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:37,798 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2269, 3.1398, 2.9697, 1.4540, 2.8731, 2.9839, 2.8908, 2.8247], + device='cuda:0'), covar=tensor([0.1250, 0.0877, 0.1399, 0.4982, 0.1065, 0.1162, 0.1650, 0.0980], + device='cuda:0'), in_proj_covar=tensor([0.0533, 0.0452, 0.0438, 0.0548, 0.0433, 0.0456, 0.0431, 0.0398], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:03:44,410 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-210000.pt +2023-02-07 11:03:51,912 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.305e+02 2.795e+02 3.387e+02 5.942e+02, threshold=5.591e+02, percent-clipped=1.0 +2023-02-07 11:03:54,071 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210013.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:59,748 INFO [train.py:901] (0/4) Epoch 26, batch 7950, loss[loss=0.2042, simple_loss=0.2916, pruned_loss=0.05845, over 8192.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2826, pruned_loss=0.05849, over 1611241.38 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:06,000 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210031.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:18,574 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:22,762 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:26,359 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.80 vs. limit=5.0 +2023-02-07 11:04:33,085 INFO [train.py:901] (0/4) Epoch 26, batch 8000, loss[loss=0.1825, simple_loss=0.2765, pruned_loss=0.04426, over 8288.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2822, pruned_loss=0.05791, over 1610779.94 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:35,221 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:40,639 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210083.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:51,786 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.21 vs. limit=5.0 +2023-02-07 11:04:53,029 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6598, 2.0856, 3.0729, 1.5630, 2.4594, 2.1462, 1.8356, 2.4087], + device='cuda:0'), covar=tensor([0.1947, 0.2560, 0.1005, 0.4626, 0.1789, 0.3171, 0.2349, 0.2204], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0634, 0.0562, 0.0669, 0.0662, 0.0612, 0.0561, 0.0645], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:04:58,003 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.316e+02 2.819e+02 3.710e+02 9.270e+02, threshold=5.638e+02, percent-clipped=7.0 +2023-02-07 11:05:05,834 INFO [train.py:901] (0/4) Epoch 26, batch 8050, loss[loss=0.1923, simple_loss=0.2673, pruned_loss=0.05868, over 7550.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2793, pruned_loss=0.05692, over 1592142.18 frames. ], batch size: 18, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:05:21,063 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-07 11:05:21,922 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0783, 1.5341, 1.7331, 1.4442, 0.9970, 1.4852, 1.8773, 1.6520], + device='cuda:0'), covar=tensor([0.0534, 0.1281, 0.1723, 0.1462, 0.0594, 0.1515, 0.0669, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0101, 0.0164, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:05:28,104 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-26.pt +2023-02-07 11:05:39,502 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 11:05:43,126 INFO [train.py:901] (0/4) Epoch 27, batch 0, loss[loss=0.1873, simple_loss=0.2663, pruned_loss=0.05414, over 7928.00 frames. ], tot_loss[loss=0.1873, simple_loss=0.2663, pruned_loss=0.05414, over 7928.00 frames. ], batch size: 20, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:05:43,127 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 11:05:54,197 INFO [train.py:935] (0/4) Epoch 27, validation: loss=0.172, simple_loss=0.2713, pruned_loss=0.03628, over 944034.00 frames. +2023-02-07 11:05:54,198 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6760MB +2023-02-07 11:06:01,164 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:07,185 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4177, 1.2300, 2.3749, 1.2984, 2.2253, 2.5760, 2.7089, 2.1332], + device='cuda:0'), covar=tensor([0.1154, 0.1471, 0.0412, 0.2112, 0.0694, 0.0373, 0.0624, 0.0676], + device='cuda:0'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0319, 0.0317, 0.0276, 0.0433, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:06:08,365 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 11:06:24,779 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:28,659 INFO [train.py:901] (0/4) Epoch 27, batch 50, loss[loss=0.2467, simple_loss=0.3348, pruned_loss=0.07936, over 8604.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2876, pruned_loss=0.0628, over 366206.96 frames. ], batch size: 31, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:06:33,573 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.417e+02 2.930e+02 3.516e+02 7.088e+02, threshold=5.860e+02, percent-clipped=5.0 +2023-02-07 11:06:41,899 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 11:06:43,317 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:56,860 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:59,547 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1365, 1.8959, 2.3428, 2.0438, 2.3614, 2.2082, 2.0313, 1.1441], + device='cuda:0'), covar=tensor([0.5473, 0.4470, 0.2098, 0.3569, 0.2400, 0.3060, 0.1861, 0.4837], + device='cuda:0'), in_proj_covar=tensor([0.0962, 0.1018, 0.0828, 0.0987, 0.1019, 0.0926, 0.0772, 0.0847], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 11:07:04,531 INFO [train.py:901] (0/4) Epoch 27, batch 100, loss[loss=0.1989, simple_loss=0.2817, pruned_loss=0.05809, over 8137.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06106, over 644343.43 frames. ], batch size: 22, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:04,986 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 11:07:05,174 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 11:07:13,374 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210268.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:22,921 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 11:07:34,280 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5981, 2.0758, 3.2751, 1.5374, 2.5274, 2.1819, 1.7463, 2.6277], + device='cuda:0'), covar=tensor([0.2128, 0.2840, 0.1019, 0.4702, 0.2018, 0.3348, 0.2584, 0.2297], + device='cuda:0'), in_proj_covar=tensor([0.0536, 0.0633, 0.0562, 0.0666, 0.0661, 0.0610, 0.0560, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:07:37,998 INFO [train.py:901] (0/4) Epoch 27, batch 150, loss[loss=0.1955, simple_loss=0.283, pruned_loss=0.05401, over 8346.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05884, over 861249.53 frames. ], batch size: 26, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:40,614 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5620, 4.5519, 4.1285, 2.0567, 3.9702, 4.2195, 4.1209, 4.0392], + device='cuda:0'), covar=tensor([0.0662, 0.0530, 0.1003, 0.4448, 0.0848, 0.0900, 0.1191, 0.0717], + device='cuda:0'), in_proj_covar=tensor([0.0535, 0.0454, 0.0439, 0.0549, 0.0433, 0.0456, 0.0432, 0.0400], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:07:40,870 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 11:07:41,159 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.352e+02 2.905e+02 3.661e+02 1.089e+03, threshold=5.811e+02, percent-clipped=3.0 +2023-02-07 11:07:46,929 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 11:08:02,853 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:14,086 INFO [train.py:901] (0/4) Epoch 27, batch 200, loss[loss=0.1842, simple_loss=0.2665, pruned_loss=0.05093, over 8088.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2853, pruned_loss=0.05919, over 1032228.89 frames. ], batch size: 21, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:08:20,475 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:48,339 INFO [train.py:901] (0/4) Epoch 27, batch 250, loss[loss=0.203, simple_loss=0.2946, pruned_loss=0.05567, over 8459.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.284, pruned_loss=0.05815, over 1167114.04 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:08:51,582 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.304e+02 2.819e+02 3.559e+02 6.263e+02, threshold=5.638e+02, percent-clipped=1.0 +2023-02-07 11:08:57,573 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 11:08:57,626 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:59,057 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210421.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:06,461 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 11:09:15,915 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:23,005 INFO [train.py:901] (0/4) Epoch 27, batch 300, loss[loss=0.2034, simple_loss=0.2723, pruned_loss=0.06729, over 6747.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2842, pruned_loss=0.05885, over 1263805.73 frames. ], batch size: 15, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:09:26,773 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 11:09:46,152 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0593, 1.6015, 1.4884, 1.5893, 1.2759, 1.3327, 1.3123, 1.3635], + device='cuda:0'), covar=tensor([0.1135, 0.0468, 0.1238, 0.0547, 0.0799, 0.1451, 0.0911, 0.0723], + device='cuda:0'), in_proj_covar=tensor([0.0361, 0.0240, 0.0341, 0.0311, 0.0304, 0.0346, 0.0348, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:09:46,850 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7318, 2.6450, 2.0228, 2.3565, 2.1806, 1.7820, 2.1905, 2.2610], + device='cuda:0'), covar=tensor([0.1307, 0.0388, 0.1055, 0.0594, 0.0746, 0.1400, 0.0896, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0361, 0.0240, 0.0341, 0.0311, 0.0304, 0.0346, 0.0348, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:09:57,348 INFO [train.py:901] (0/4) Epoch 27, batch 350, loss[loss=0.1939, simple_loss=0.2774, pruned_loss=0.05516, over 8464.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2833, pruned_loss=0.05863, over 1334975.27 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:09:57,505 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8734, 1.4384, 3.1097, 1.4046, 2.3206, 3.3771, 3.5025, 2.8893], + device='cuda:0'), covar=tensor([0.1209, 0.1911, 0.0373, 0.2207, 0.1039, 0.0260, 0.0590, 0.0526], + device='cuda:0'), in_proj_covar=tensor([0.0302, 0.0323, 0.0288, 0.0317, 0.0316, 0.0274, 0.0431, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:10:00,693 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.321e+02 2.740e+02 3.479e+02 7.751e+02, threshold=5.481e+02, percent-clipped=4.0 +2023-02-07 11:10:01,074 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 11:10:08,251 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0837, 2.2703, 1.8260, 2.9139, 1.4617, 1.7344, 2.2125, 2.2672], + device='cuda:0'), covar=tensor([0.0752, 0.0795, 0.0923, 0.0360, 0.1108, 0.1315, 0.0845, 0.0853], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0194, 0.0246, 0.0211, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 11:10:16,892 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:10:30,766 INFO [train.py:901] (0/4) Epoch 27, batch 400, loss[loss=0.238, simple_loss=0.3231, pruned_loss=0.07646, over 8487.00 frames. ], tot_loss[loss=0.202, simple_loss=0.285, pruned_loss=0.05947, over 1396158.96 frames. ], batch size: 29, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:06,883 INFO [train.py:901] (0/4) Epoch 27, batch 450, loss[loss=0.1482, simple_loss=0.2281, pruned_loss=0.03411, over 7696.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.0587, over 1448208.23 frames. ], batch size: 18, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:10,233 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.445e+02 3.096e+02 3.744e+02 6.670e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 11:11:17,242 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7388, 2.2369, 3.5730, 1.9071, 1.7905, 3.4381, 0.6121, 2.1306], + device='cuda:0'), covar=tensor([0.1375, 0.1013, 0.0205, 0.1516, 0.2096, 0.0320, 0.2099, 0.1409], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0203, 0.0133, 0.0221, 0.0274, 0.0144, 0.0172, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:11:37,197 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:11:39,680 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6688, 2.9504, 2.2904, 4.0937, 1.6514, 2.2418, 2.7601, 2.9510], + device='cuda:0'), covar=tensor([0.0656, 0.0739, 0.0871, 0.0234, 0.1121, 0.1186, 0.0828, 0.0728], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0194, 0.0246, 0.0211, 0.0203, 0.0244, 0.0249, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 11:11:40,169 INFO [train.py:901] (0/4) Epoch 27, batch 500, loss[loss=0.1788, simple_loss=0.2582, pruned_loss=0.04966, over 7822.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05901, over 1484979.82 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:54,645 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-07 11:12:01,203 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:08,765 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4154, 1.4787, 1.3075, 1.6590, 1.0206, 1.2105, 1.4472, 1.5025], + device='cuda:0'), covar=tensor([0.0634, 0.0625, 0.0720, 0.0518, 0.0910, 0.0972, 0.0561, 0.0564], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0246, 0.0210, 0.0203, 0.0244, 0.0249, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 11:12:12,805 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:15,960 INFO [train.py:901] (0/4) Epoch 27, batch 550, loss[loss=0.2217, simple_loss=0.3017, pruned_loss=0.07088, over 8032.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.05919, over 1513425.13 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:19,369 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.336e+02 2.792e+02 3.793e+02 8.487e+02, threshold=5.584e+02, percent-clipped=3.0 +2023-02-07 11:12:35,240 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5957, 1.4361, 1.6204, 1.3177, 0.7780, 1.3636, 1.3327, 1.3319], + device='cuda:0'), covar=tensor([0.0605, 0.1246, 0.1684, 0.1519, 0.0645, 0.1527, 0.0783, 0.0695], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0162, 0.0101, 0.0164, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:12:50,315 INFO [train.py:901] (0/4) Epoch 27, batch 600, loss[loss=0.1897, simple_loss=0.2846, pruned_loss=0.04742, over 8459.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2847, pruned_loss=0.05869, over 1537167.82 frames. ], batch size: 27, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:50,560 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6072, 2.1008, 3.2865, 1.5128, 2.5332, 2.0126, 1.8168, 2.6575], + device='cuda:0'), covar=tensor([0.1874, 0.2637, 0.0771, 0.4479, 0.1709, 0.3230, 0.2340, 0.1943], + device='cuda:0'), in_proj_covar=tensor([0.0537, 0.0631, 0.0562, 0.0665, 0.0660, 0.0609, 0.0561, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:13:00,512 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7676, 2.0948, 3.6176, 2.0426, 1.6839, 3.5157, 0.5309, 2.1566], + device='cuda:0'), covar=tensor([0.1505, 0.1603, 0.0338, 0.1526, 0.2606, 0.0376, 0.2381, 0.1372], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0221, 0.0276, 0.0144, 0.0172, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:13:05,184 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8692, 1.5275, 1.7481, 1.4258, 1.0889, 1.5333, 1.7885, 1.4700], + device='cuda:0'), covar=tensor([0.0550, 0.1256, 0.1638, 0.1415, 0.0595, 0.1446, 0.0685, 0.0681], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0162, 0.0101, 0.0164, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:13:07,221 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6111, 1.4706, 1.9768, 1.3602, 1.2608, 1.9477, 0.5995, 1.3233], + device='cuda:0'), covar=tensor([0.1297, 0.1212, 0.0384, 0.0845, 0.2145, 0.0417, 0.1766, 0.1287], + device='cuda:0'), in_proj_covar=tensor([0.0196, 0.0203, 0.0133, 0.0221, 0.0275, 0.0144, 0.0172, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:13:08,464 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:11,593 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 11:13:13,654 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:23,577 INFO [train.py:901] (0/4) Epoch 27, batch 650, loss[loss=0.1602, simple_loss=0.2475, pruned_loss=0.03648, over 7713.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2845, pruned_loss=0.0584, over 1558638.50 frames. ], batch size: 18, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:28,267 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.351e+02 2.894e+02 3.474e+02 6.032e+02, threshold=5.788e+02, percent-clipped=3.0 +2023-02-07 11:13:32,497 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:59,787 INFO [train.py:901] (0/4) Epoch 27, batch 700, loss[loss=0.2135, simple_loss=0.3031, pruned_loss=0.06198, over 8107.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.284, pruned_loss=0.05829, over 1574135.89 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:03,232 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1853, 2.0610, 2.6643, 2.2014, 2.5957, 2.2739, 2.1060, 1.4276], + device='cuda:0'), covar=tensor([0.5690, 0.5024, 0.2075, 0.3745, 0.2496, 0.3132, 0.1947, 0.5341], + device='cuda:0'), in_proj_covar=tensor([0.0961, 0.1015, 0.0827, 0.0983, 0.1018, 0.0925, 0.0768, 0.0845], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 11:14:16,332 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.5522, 1.8118, 5.8368, 2.5688, 4.9253, 4.8848, 5.3683, 5.3287], + device='cuda:0'), covar=tensor([0.0857, 0.6390, 0.0607, 0.4132, 0.1589, 0.1233, 0.0832, 0.0693], + device='cuda:0'), in_proj_covar=tensor([0.0665, 0.0659, 0.0728, 0.0650, 0.0737, 0.0629, 0.0629, 0.0702], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:14:32,709 INFO [train.py:901] (0/4) Epoch 27, batch 750, loss[loss=0.2347, simple_loss=0.2962, pruned_loss=0.08656, over 7634.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2857, pruned_loss=0.05954, over 1586806.14 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:33,743 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-07 11:14:35,963 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.536e+02 2.996e+02 3.960e+02 1.304e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 11:14:54,972 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 11:15:04,232 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 11:15:08,850 INFO [train.py:901] (0/4) Epoch 27, batch 800, loss[loss=0.2036, simple_loss=0.2996, pruned_loss=0.05374, over 8254.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.05875, over 1589824.02 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:15:11,720 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.28 vs. limit=5.0 +2023-02-07 11:15:34,982 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:38,620 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 11:15:40,383 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:42,228 INFO [train.py:901] (0/4) Epoch 27, batch 850, loss[loss=0.202, simple_loss=0.288, pruned_loss=0.05799, over 7938.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05857, over 1591712.17 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:15:45,629 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.265e+02 2.725e+02 3.482e+02 8.151e+02, threshold=5.450e+02, percent-clipped=2.0 +2023-02-07 11:15:46,885 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 11:15:57,712 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:10,425 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:17,675 INFO [train.py:901] (0/4) Epoch 27, batch 900, loss[loss=0.193, simple_loss=0.2853, pruned_loss=0.0504, over 8647.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.284, pruned_loss=0.05922, over 1596229.84 frames. ], batch size: 34, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:51,933 INFO [train.py:901] (0/4) Epoch 27, batch 950, loss[loss=0.1828, simple_loss=0.2655, pruned_loss=0.05003, over 7817.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05812, over 1601879.07 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:54,833 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:55,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.483e+02 2.981e+02 4.008e+02 9.530e+02, threshold=5.961e+02, percent-clipped=10.0 +2023-02-07 11:17:06,152 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:11,859 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 11:17:17,576 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211143.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:18,070 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 11:17:26,814 INFO [train.py:901] (0/4) Epoch 27, batch 1000, loss[loss=0.1983, simple_loss=0.2889, pruned_loss=0.05391, over 8290.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05808, over 1607068.79 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:17:30,456 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:51,441 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3025, 2.1506, 1.7497, 2.0400, 1.7721, 1.4832, 1.7435, 1.8073], + device='cuda:0'), covar=tensor([0.1221, 0.0409, 0.1199, 0.0496, 0.0769, 0.1555, 0.0925, 0.0682], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0239, 0.0340, 0.0311, 0.0304, 0.0344, 0.0346, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:17:53,293 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 11:18:03,291 INFO [train.py:901] (0/4) Epoch 27, batch 1050, loss[loss=0.1995, simple_loss=0.2916, pruned_loss=0.05366, over 8573.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05775, over 1610088.96 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:18:05,217 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 11:18:07,251 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.494e+02 3.070e+02 3.818e+02 8.233e+02, threshold=6.140e+02, percent-clipped=4.0 +2023-02-07 11:18:08,732 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4442, 1.2856, 1.5051, 1.3628, 1.4329, 1.4520, 1.3873, 0.7649], + device='cuda:0'), covar=tensor([0.4277, 0.3570, 0.1698, 0.2725, 0.2012, 0.2482, 0.1469, 0.3876], + device='cuda:0'), in_proj_covar=tensor([0.0964, 0.1018, 0.0831, 0.0988, 0.1021, 0.0928, 0.0771, 0.0850], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 11:18:27,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:18:36,465 INFO [train.py:901] (0/4) Epoch 27, batch 1100, loss[loss=0.2009, simple_loss=0.2943, pruned_loss=0.05369, over 8358.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.05814, over 1611491.83 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:13,344 INFO [train.py:901] (0/4) Epoch 27, batch 1150, loss[loss=0.2013, simple_loss=0.2673, pruned_loss=0.06768, over 7802.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.0579, over 1613027.32 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:15,957 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 11:19:17,153 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 11:19:17,281 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.371e+02 2.782e+02 3.549e+02 6.262e+02, threshold=5.564e+02, percent-clipped=1.0 +2023-02-07 11:19:40,822 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211346.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:19:46,769 INFO [train.py:901] (0/4) Epoch 27, batch 1200, loss[loss=0.211, simple_loss=0.3029, pruned_loss=0.05958, over 8042.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2819, pruned_loss=0.05764, over 1609024.81 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:53,739 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:08,479 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0469, 2.2124, 1.8468, 2.8329, 1.3301, 1.6403, 2.0654, 2.2030], + device='cuda:0'), covar=tensor([0.0787, 0.0800, 0.0831, 0.0344, 0.1073, 0.1292, 0.0808, 0.0825], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0211, 0.0202, 0.0244, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 11:20:11,155 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:18,555 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211399.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:21,056 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7079, 4.7711, 4.2764, 2.0259, 4.1490, 4.3680, 4.3166, 4.1706], + device='cuda:0'), covar=tensor([0.0663, 0.0498, 0.0961, 0.4600, 0.0870, 0.0864, 0.1108, 0.0801], + device='cuda:0'), in_proj_covar=tensor([0.0543, 0.0459, 0.0446, 0.0557, 0.0440, 0.0463, 0.0437, 0.0407], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:20:21,762 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:22,039 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 11:20:22,319 INFO [train.py:901] (0/4) Epoch 27, batch 1250, loss[loss=0.2043, simple_loss=0.2897, pruned_loss=0.05947, over 8518.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05829, over 1612695.15 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:26,153 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.344e+02 2.922e+02 3.484e+02 6.390e+02, threshold=5.843e+02, percent-clipped=2.0 +2023-02-07 11:20:29,695 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:35,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211424.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:39,463 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:46,219 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211440.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:49,956 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 11:20:56,225 INFO [train.py:901] (0/4) Epoch 27, batch 1300, loss[loss=0.2039, simple_loss=0.2872, pruned_loss=0.06031, over 8469.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05737, over 1617256.02 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:59,829 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7082, 2.1187, 3.3823, 1.5730, 2.5196, 2.1868, 1.8803, 2.5854], + device='cuda:0'), covar=tensor([0.1979, 0.2941, 0.0875, 0.4942, 0.1980, 0.3486, 0.2497, 0.2361], + device='cuda:0'), in_proj_covar=tensor([0.0539, 0.0632, 0.0564, 0.0669, 0.0659, 0.0610, 0.0560, 0.0644], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:21:00,455 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:23,158 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0885, 1.6594, 4.1638, 1.8155, 2.4381, 4.7087, 4.8127, 4.0897], + device='cuda:0'), covar=tensor([0.1326, 0.1947, 0.0322, 0.2126, 0.1251, 0.0193, 0.0452, 0.0569], + device='cuda:0'), in_proj_covar=tensor([0.0307, 0.0327, 0.0293, 0.0321, 0.0320, 0.0278, 0.0437, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:21:24,561 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:30,505 INFO [train.py:901] (0/4) Epoch 27, batch 1350, loss[loss=0.2023, simple_loss=0.2906, pruned_loss=0.05706, over 8089.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05671, over 1617169.34 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:21:34,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.433e+02 2.859e+02 3.519e+02 6.900e+02, threshold=5.717e+02, percent-clipped=5.0 +2023-02-07 11:21:43,592 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:22:05,937 INFO [train.py:901] (0/4) Epoch 27, batch 1400, loss[loss=0.2118, simple_loss=0.2968, pruned_loss=0.0634, over 8502.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05717, over 1620206.28 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:39,548 INFO [train.py:901] (0/4) Epoch 27, batch 1450, loss[loss=0.2612, simple_loss=0.3579, pruned_loss=0.0822, over 8354.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05842, over 1620062.84 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:43,642 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.739e+02 3.417e+02 5.363e+02 1.739e+03, threshold=6.835e+02, percent-clipped=22.0 +2023-02-07 11:22:45,699 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 11:23:15,887 INFO [train.py:901] (0/4) Epoch 27, batch 1500, loss[loss=0.1715, simple_loss=0.2635, pruned_loss=0.03971, over 8022.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2839, pruned_loss=0.05847, over 1617780.53 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:49,651 INFO [train.py:901] (0/4) Epoch 27, batch 1550, loss[loss=0.2283, simple_loss=0.3053, pruned_loss=0.07562, over 8083.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2836, pruned_loss=0.05834, over 1620335.82 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:53,681 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.372e+02 3.027e+02 3.476e+02 5.786e+02, threshold=6.054e+02, percent-clipped=0.0 +2023-02-07 11:23:57,834 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:15,282 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:19,419 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:24,782 INFO [train.py:901] (0/4) Epoch 27, batch 1600, loss[loss=0.2429, simple_loss=0.3273, pruned_loss=0.0793, over 8494.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2836, pruned_loss=0.0581, over 1624892.35 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:24:28,356 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0045, 2.3300, 3.7444, 2.1910, 2.0838, 3.7752, 0.7467, 2.2372], + device='cuda:0'), covar=tensor([0.1114, 0.1071, 0.0224, 0.1437, 0.2029, 0.0190, 0.1958, 0.1623], + device='cuda:0'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0221, 0.0275, 0.0144, 0.0172, 0.0197], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:24:38,870 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:59,712 INFO [train.py:901] (0/4) Epoch 27, batch 1650, loss[loss=0.1773, simple_loss=0.2575, pruned_loss=0.04856, over 7930.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2835, pruned_loss=0.05767, over 1627846.97 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:03,543 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 11:25:03,792 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.547e+02 3.089e+02 3.889e+02 1.356e+03, threshold=6.177e+02, percent-clipped=3.0 +2023-02-07 11:25:34,306 INFO [train.py:901] (0/4) Epoch 27, batch 1700, loss[loss=0.2184, simple_loss=0.2902, pruned_loss=0.07332, over 6960.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2843, pruned_loss=0.05787, over 1627214.07 frames. ], batch size: 72, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:39,903 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:25:57,117 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5885, 1.3888, 2.8784, 1.4351, 2.1517, 3.0994, 3.2436, 2.6359], + device='cuda:0'), covar=tensor([0.1251, 0.1654, 0.0347, 0.2053, 0.0891, 0.0299, 0.0487, 0.0550], + device='cuda:0'), in_proj_covar=tensor([0.0307, 0.0329, 0.0295, 0.0322, 0.0323, 0.0279, 0.0440, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:25:59,114 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211889.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:26:09,570 INFO [train.py:901] (0/4) Epoch 27, batch 1750, loss[loss=0.1837, simple_loss=0.2642, pruned_loss=0.05165, over 8241.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2838, pruned_loss=0.05771, over 1624455.22 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:26:13,486 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 2.460e+02 2.972e+02 3.773e+02 5.726e+02, threshold=5.944e+02, percent-clipped=0.0 +2023-02-07 11:26:24,668 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-07 11:26:43,427 INFO [train.py:901] (0/4) Epoch 27, batch 1800, loss[loss=0.2024, simple_loss=0.2904, pruned_loss=0.05726, over 8312.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2832, pruned_loss=0.05731, over 1620426.82 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:03,807 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 11:27:15,627 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-212000.pt +2023-02-07 11:27:20,494 INFO [train.py:901] (0/4) Epoch 27, batch 1850, loss[loss=0.1948, simple_loss=0.2771, pruned_loss=0.05628, over 8294.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.0568, over 1615436.96 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:24,576 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.252e+02 2.767e+02 3.484e+02 5.487e+02, threshold=5.534e+02, percent-clipped=0.0 +2023-02-07 11:27:54,169 INFO [train.py:901] (0/4) Epoch 27, batch 1900, loss[loss=0.1787, simple_loss=0.2632, pruned_loss=0.04704, over 7417.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2832, pruned_loss=0.05784, over 1616803.89 frames. ], batch size: 17, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:23,227 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6275, 4.5968, 4.2249, 2.0898, 4.0829, 4.2141, 4.1798, 4.0971], + device='cuda:0'), covar=tensor([0.0611, 0.0484, 0.0910, 0.4038, 0.0771, 0.0862, 0.1071, 0.0667], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0456, 0.0442, 0.0552, 0.0438, 0.0460, 0.0437, 0.0402], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:28:25,173 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 11:28:28,373 INFO [train.py:901] (0/4) Epoch 27, batch 1950, loss[loss=0.1779, simple_loss=0.2677, pruned_loss=0.04403, over 8251.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2829, pruned_loss=0.05752, over 1615400.39 frames. ], batch size: 24, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:33,122 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.484e+02 3.059e+02 3.727e+02 7.478e+02, threshold=6.119e+02, percent-clipped=3.0 +2023-02-07 11:28:38,408 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 11:28:38,624 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:41,246 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212122.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:28:56,769 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212144.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:57,301 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 11:28:57,488 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:03,980 INFO [train.py:901] (0/4) Epoch 27, batch 2000, loss[loss=0.1953, simple_loss=0.2808, pruned_loss=0.05495, over 8525.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2823, pruned_loss=0.057, over 1614377.57 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:14,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212170.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:37,643 INFO [train.py:901] (0/4) Epoch 27, batch 2050, loss[loss=0.2352, simple_loss=0.3211, pruned_loss=0.07467, over 7100.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2829, pruned_loss=0.0575, over 1613558.74 frames. ], batch size: 72, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:41,746 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.364e+02 2.966e+02 3.655e+02 9.314e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 11:29:53,464 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6304, 1.4347, 1.6398, 1.3635, 0.9363, 1.4415, 1.4495, 1.3491], + device='cuda:0'), covar=tensor([0.0613, 0.1284, 0.1759, 0.1522, 0.0616, 0.1540, 0.0745, 0.0706], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0163, 0.0102, 0.0164, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:30:13,814 INFO [train.py:901] (0/4) Epoch 27, batch 2100, loss[loss=0.2111, simple_loss=0.2979, pruned_loss=0.06215, over 8111.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05693, over 1607408.67 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:24,393 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4056, 4.3448, 4.0014, 1.9425, 3.8390, 4.0657, 3.8951, 3.9037], + device='cuda:0'), covar=tensor([0.0693, 0.0501, 0.0946, 0.4576, 0.0860, 0.0978, 0.1240, 0.0732], + device='cuda:0'), in_proj_covar=tensor([0.0539, 0.0456, 0.0442, 0.0554, 0.0438, 0.0459, 0.0437, 0.0404], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:30:25,136 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:30:42,128 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 11:30:47,851 INFO [train.py:901] (0/4) Epoch 27, batch 2150, loss[loss=0.2275, simple_loss=0.306, pruned_loss=0.07453, over 8502.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05639, over 1610343.72 frames. ], batch size: 26, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:48,019 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9711, 1.5974, 1.3967, 1.4901, 1.2659, 1.2571, 1.2147, 1.2527], + device='cuda:0'), covar=tensor([0.1303, 0.0602, 0.1429, 0.0635, 0.0835, 0.1710, 0.1056, 0.0888], + device='cuda:0'), in_proj_covar=tensor([0.0365, 0.0240, 0.0341, 0.0316, 0.0304, 0.0349, 0.0350, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:30:51,761 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.395e+02 2.764e+02 3.582e+02 6.444e+02, threshold=5.527e+02, percent-clipped=1.0 +2023-02-07 11:31:22,777 INFO [train.py:901] (0/4) Epoch 27, batch 2200, loss[loss=0.1779, simple_loss=0.2593, pruned_loss=0.04827, over 7798.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05683, over 1611416.88 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:31:57,346 INFO [train.py:901] (0/4) Epoch 27, batch 2250, loss[loss=0.1879, simple_loss=0.273, pruned_loss=0.05141, over 7734.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2822, pruned_loss=0.05671, over 1613143.19 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:01,577 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.301e+02 2.815e+02 3.457e+02 5.141e+02, threshold=5.631e+02, percent-clipped=0.0 +2023-02-07 11:32:31,466 INFO [train.py:901] (0/4) Epoch 27, batch 2300, loss[loss=0.1583, simple_loss=0.2506, pruned_loss=0.03299, over 7813.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2814, pruned_loss=0.05611, over 1614194.61 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:32,832 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212457.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:32:39,375 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212466.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:33:07,947 INFO [train.py:901] (0/4) Epoch 27, batch 2350, loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05698, over 8033.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2807, pruned_loss=0.05585, over 1612593.85 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:33:12,149 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.366e+02 2.801e+02 3.492e+02 6.818e+02, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 11:33:42,857 INFO [train.py:901] (0/4) Epoch 27, batch 2400, loss[loss=0.1862, simple_loss=0.262, pruned_loss=0.05521, over 7657.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05687, over 1609670.24 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:01,662 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212581.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:34:04,897 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8377, 3.7892, 3.4452, 1.9415, 3.3699, 3.5654, 3.3453, 3.3523], + device='cuda:0'), covar=tensor([0.0940, 0.0712, 0.1215, 0.4548, 0.0989, 0.1127, 0.1519, 0.0887], + device='cuda:0'), in_proj_covar=tensor([0.0542, 0.0460, 0.0444, 0.0558, 0.0440, 0.0464, 0.0439, 0.0406], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:34:19,817 INFO [train.py:901] (0/4) Epoch 27, batch 2450, loss[loss=0.2055, simple_loss=0.2995, pruned_loss=0.05581, over 8504.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.282, pruned_loss=0.05672, over 1615834.65 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:23,907 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.406e+02 2.879e+02 3.948e+02 9.646e+02, threshold=5.757e+02, percent-clipped=9.0 +2023-02-07 11:34:26,770 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:34:46,275 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-07 11:34:54,050 INFO [train.py:901] (0/4) Epoch 27, batch 2500, loss[loss=0.2016, simple_loss=0.2819, pruned_loss=0.06063, over 7231.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2831, pruned_loss=0.0577, over 1617890.82 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:59,673 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8285, 1.6440, 3.3703, 1.4132, 2.3487, 3.6248, 3.7347, 3.1387], + device='cuda:0'), covar=tensor([0.1326, 0.1787, 0.0334, 0.2347, 0.1047, 0.0226, 0.0564, 0.0520], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0321, 0.0320, 0.0277, 0.0437, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:35:28,284 INFO [train.py:901] (0/4) Epoch 27, batch 2550, loss[loss=0.1542, simple_loss=0.2352, pruned_loss=0.0366, over 7699.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2835, pruned_loss=0.05809, over 1618970.20 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:35:33,073 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.333e+02 2.985e+02 3.926e+02 7.498e+02, threshold=5.971e+02, percent-clipped=4.0 +2023-02-07 11:35:37,328 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212716.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:46,863 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212729.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:47,588 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:04,558 INFO [train.py:901] (0/4) Epoch 27, batch 2600, loss[loss=0.2037, simple_loss=0.2885, pruned_loss=0.05941, over 8565.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2836, pruned_loss=0.05783, over 1616865.05 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:09,617 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 11:36:18,100 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9663, 1.6604, 3.5101, 1.6431, 2.6047, 3.8183, 3.8758, 3.3285], + device='cuda:0'), covar=tensor([0.1190, 0.1722, 0.0290, 0.2028, 0.0898, 0.0199, 0.0501, 0.0468], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0320, 0.0320, 0.0277, 0.0438, 0.0307], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:36:21,569 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1354, 3.5572, 2.2000, 2.7082, 2.8463, 1.9341, 2.8587, 2.9594], + device='cuda:0'), covar=tensor([0.1864, 0.0475, 0.1306, 0.0883, 0.0800, 0.1590, 0.1163, 0.1305], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0238, 0.0337, 0.0312, 0.0301, 0.0345, 0.0346, 0.0319], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:36:29,683 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212792.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:33,818 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0236, 2.2751, 3.7934, 2.2446, 2.0953, 3.7502, 0.8078, 2.3097], + device='cuda:0'), covar=tensor([0.1185, 0.1315, 0.0182, 0.1243, 0.2086, 0.0234, 0.1999, 0.1360], + device='cuda:0'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0144, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:36:35,822 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:38,497 INFO [train.py:901] (0/4) Epoch 27, batch 2650, loss[loss=0.2089, simple_loss=0.2906, pruned_loss=0.0636, over 8449.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05766, over 1614781.39 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:43,316 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.517e+02 2.957e+02 3.589e+02 7.428e+02, threshold=5.913e+02, percent-clipped=3.0 +2023-02-07 11:37:02,114 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212837.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:37:04,071 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5796, 1.4824, 1.8860, 1.2763, 1.2070, 1.8576, 0.1805, 1.2514], + device='cuda:0'), covar=tensor([0.1305, 0.1180, 0.0365, 0.0843, 0.2165, 0.0421, 0.1728, 0.1197], + device='cuda:0'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0145, 0.0172, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:37:14,955 INFO [train.py:901] (0/4) Epoch 27, batch 2700, loss[loss=0.2262, simple_loss=0.3073, pruned_loss=0.07249, over 8322.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.0588, over 1612927.48 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:19,841 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212862.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:37:49,368 INFO [train.py:901] (0/4) Epoch 27, batch 2750, loss[loss=0.2098, simple_loss=0.297, pruned_loss=0.06129, over 8484.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2833, pruned_loss=0.05896, over 1609976.64 frames. ], batch size: 39, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:53,340 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.387e+02 2.942e+02 3.576e+02 8.277e+02, threshold=5.883e+02, percent-clipped=4.0 +2023-02-07 11:37:56,793 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:25,581 INFO [train.py:901] (0/4) Epoch 27, batch 2800, loss[loss=0.2049, simple_loss=0.2911, pruned_loss=0.05933, over 8279.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.05946, over 1612441.27 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:38:43,385 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4880, 1.4772, 1.8395, 1.1272, 1.0575, 1.8502, 0.1932, 1.1840], + device='cuda:0'), covar=tensor([0.1605, 0.1022, 0.0393, 0.1080, 0.2603, 0.0387, 0.1676, 0.1216], + device='cuda:0'), in_proj_covar=tensor([0.0199, 0.0205, 0.0134, 0.0224, 0.0277, 0.0145, 0.0172, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:38:46,070 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:58,528 INFO [train.py:901] (0/4) Epoch 27, batch 2850, loss[loss=0.2533, simple_loss=0.3451, pruned_loss=0.08075, over 8513.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2851, pruned_loss=0.05966, over 1612000.87 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:02,628 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 2.420e+02 3.040e+02 3.738e+02 9.771e+02, threshold=6.080e+02, percent-clipped=4.0 +2023-02-07 11:39:02,841 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:03,500 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5608, 1.4904, 1.7637, 1.3412, 0.8720, 1.5115, 1.4740, 1.1784], + device='cuda:0'), covar=tensor([0.0608, 0.1247, 0.1565, 0.1503, 0.0603, 0.1451, 0.0718, 0.0749], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0101, 0.0164, 0.0112, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:39:33,429 INFO [train.py:901] (0/4) Epoch 27, batch 2900, loss[loss=0.2269, simple_loss=0.3107, pruned_loss=0.07161, over 8332.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2846, pruned_loss=0.05928, over 1610345.37 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:36,850 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213060.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:46,822 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213073.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:08,834 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 11:40:09,162 INFO [train.py:901] (0/4) Epoch 27, batch 2950, loss[loss=0.1913, simple_loss=0.288, pruned_loss=0.04732, over 8448.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2836, pruned_loss=0.05869, over 1608741.75 frames. ], batch size: 29, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:12,522 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 11:40:13,188 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.292e+02 2.734e+02 3.601e+02 6.803e+02, threshold=5.467e+02, percent-clipped=1.0 +2023-02-07 11:40:30,262 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:42,822 INFO [train.py:901] (0/4) Epoch 27, batch 3000, loss[loss=0.2177, simple_loss=0.307, pruned_loss=0.06422, over 8197.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.05852, over 1610470.36 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:42,823 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 11:40:56,481 INFO [train.py:935] (0/4) Epoch 27, validation: loss=0.171, simple_loss=0.2706, pruned_loss=0.03572, over 944034.00 frames. +2023-02-07 11:40:56,482 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6760MB +2023-02-07 11:41:08,332 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213172.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:10,342 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213175.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:19,861 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213188.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:25,937 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:31,706 INFO [train.py:901] (0/4) Epoch 27, batch 3050, loss[loss=0.2481, simple_loss=0.3175, pruned_loss=0.08931, over 7238.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.0582, over 1608695.47 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:41:36,535 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.283e+02 2.877e+02 3.649e+02 6.604e+02, threshold=5.754e+02, percent-clipped=7.0 +2023-02-07 11:41:49,446 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4775, 1.7435, 4.3420, 1.9281, 2.6032, 4.9800, 5.1256, 4.2769], + device='cuda:0'), covar=tensor([0.1104, 0.1848, 0.0272, 0.2029, 0.1190, 0.0183, 0.0431, 0.0559], + device='cuda:0'), in_proj_covar=tensor([0.0306, 0.0328, 0.0293, 0.0321, 0.0321, 0.0279, 0.0438, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:41:55,419 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2358, 1.5299, 4.5201, 1.9190, 3.8251, 3.6650, 4.1669, 4.0997], + device='cuda:0'), covar=tensor([0.1010, 0.6102, 0.0952, 0.4674, 0.1732, 0.1511, 0.0871, 0.0826], + device='cuda:0'), in_proj_covar=tensor([0.0676, 0.0668, 0.0736, 0.0660, 0.0749, 0.0637, 0.0638, 0.0719], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:42:04,098 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:42:06,427 INFO [train.py:901] (0/4) Epoch 27, batch 3100, loss[loss=0.2165, simple_loss=0.2963, pruned_loss=0.06831, over 8032.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2834, pruned_loss=0.0586, over 1607073.05 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:40,166 INFO [train.py:901] (0/4) Epoch 27, batch 3150, loss[loss=0.182, simple_loss=0.2655, pruned_loss=0.04927, over 8089.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2834, pruned_loss=0.0588, over 1608142.98 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:44,217 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.557e+02 3.186e+02 3.836e+02 1.080e+03, threshold=6.372e+02, percent-clipped=6.0 +2023-02-07 11:43:15,308 INFO [train.py:901] (0/4) Epoch 27, batch 3200, loss[loss=0.1895, simple_loss=0.2857, pruned_loss=0.04665, over 8505.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2836, pruned_loss=0.05841, over 1613844.27 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:48,888 INFO [train.py:901] (0/4) Epoch 27, batch 3250, loss[loss=0.228, simple_loss=0.3089, pruned_loss=0.07359, over 8289.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05766, over 1611684.38 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:52,809 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.478e+02 2.885e+02 3.413e+02 5.983e+02, threshold=5.770e+02, percent-clipped=0.0 +2023-02-07 11:44:07,271 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:17,673 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:25,453 INFO [train.py:901] (0/4) Epoch 27, batch 3300, loss[loss=0.2204, simple_loss=0.2842, pruned_loss=0.07834, over 7793.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05763, over 1612080.98 frames. ], batch size: 19, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:44:26,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213456.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:35,188 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213469.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:59,365 INFO [train.py:901] (0/4) Epoch 27, batch 3350, loss[loss=0.2508, simple_loss=0.3352, pruned_loss=0.08314, over 8718.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05802, over 1610632.53 frames. ], batch size: 30, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:00,955 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:03,449 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.606e+02 3.102e+02 3.998e+02 8.787e+02, threshold=6.203e+02, percent-clipped=8.0 +2023-02-07 11:45:18,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:33,327 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4250, 1.4287, 1.3968, 1.8418, 0.5647, 1.2903, 1.2995, 1.4441], + device='cuda:0'), covar=tensor([0.0871, 0.0859, 0.0963, 0.0476, 0.1263, 0.1411, 0.0783, 0.0747], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0214, 0.0205, 0.0248, 0.0251, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 11:45:34,516 INFO [train.py:901] (0/4) Epoch 27, batch 3400, loss[loss=0.1845, simple_loss=0.268, pruned_loss=0.05053, over 8194.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.05821, over 1611101.39 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:55,328 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=213584.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:46:09,634 INFO [train.py:901] (0/4) Epoch 27, batch 3450, loss[loss=0.1972, simple_loss=0.2864, pruned_loss=0.05397, over 8483.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05778, over 1612753.08 frames. ], batch size: 29, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:13,704 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.297e+02 2.616e+02 3.439e+02 9.820e+02, threshold=5.232e+02, percent-clipped=1.0 +2023-02-07 11:46:41,460 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 11:46:44,525 INFO [train.py:901] (0/4) Epoch 27, batch 3500, loss[loss=0.2425, simple_loss=0.3228, pruned_loss=0.0811, over 6848.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2833, pruned_loss=0.05804, over 1613094.60 frames. ], batch size: 71, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:02,336 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8940, 2.1498, 3.4451, 2.0356, 2.0169, 3.4590, 0.9413, 2.1312], + device='cuda:0'), covar=tensor([0.1150, 0.1277, 0.0372, 0.1662, 0.2192, 0.0330, 0.1848, 0.1447], + device='cuda:0'), in_proj_covar=tensor([0.0200, 0.0205, 0.0135, 0.0224, 0.0276, 0.0145, 0.0172, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 11:47:08,783 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 11:47:11,017 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 11:47:20,454 INFO [train.py:901] (0/4) Epoch 27, batch 3550, loss[loss=0.1784, simple_loss=0.2659, pruned_loss=0.04549, over 8133.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05765, over 1612243.43 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:24,358 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.484e+02 3.157e+02 3.893e+02 8.912e+02, threshold=6.313e+02, percent-clipped=7.0 +2023-02-07 11:47:55,123 INFO [train.py:901] (0/4) Epoch 27, batch 3600, loss[loss=0.2298, simple_loss=0.3087, pruned_loss=0.07545, over 8503.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2824, pruned_loss=0.05757, over 1614659.81 frames. ], batch size: 26, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:58,956 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 11:48:31,565 INFO [train.py:901] (0/4) Epoch 27, batch 3650, loss[loss=0.194, simple_loss=0.2691, pruned_loss=0.05947, over 8136.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2823, pruned_loss=0.05748, over 1614250.06 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:35,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.435e+02 3.005e+02 4.000e+02 1.001e+03, threshold=6.009e+02, percent-clipped=1.0 +2023-02-07 11:49:05,225 INFO [train.py:901] (0/4) Epoch 27, batch 3700, loss[loss=0.1835, simple_loss=0.2581, pruned_loss=0.05446, over 7548.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2827, pruned_loss=0.05792, over 1612627.83 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:07,787 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 11:49:11,332 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 11:49:40,753 INFO [train.py:901] (0/4) Epoch 27, batch 3750, loss[loss=0.213, simple_loss=0.2986, pruned_loss=0.06366, over 8245.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05812, over 1613586.95 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:44,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.240e+02 2.670e+02 3.453e+02 6.024e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 11:49:57,382 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:50:03,488 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2941, 3.7146, 2.4851, 3.0521, 3.1181, 2.1893, 3.2046, 3.1761], + device='cuda:0'), covar=tensor([0.1765, 0.0420, 0.1093, 0.0737, 0.0852, 0.1536, 0.0942, 0.1067], + device='cuda:0'), in_proj_covar=tensor([0.0365, 0.0243, 0.0345, 0.0317, 0.0308, 0.0352, 0.0352, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 11:50:15,099 INFO [train.py:901] (0/4) Epoch 27, batch 3800, loss[loss=0.1909, simple_loss=0.271, pruned_loss=0.05539, over 8078.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.0584, over 1615721.98 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:32,925 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 11:50:46,486 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-214000.pt +2023-02-07 11:50:51,694 INFO [train.py:901] (0/4) Epoch 27, batch 3850, loss[loss=0.1961, simple_loss=0.2776, pruned_loss=0.05727, over 8328.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05753, over 1611532.93 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:55,388 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 11:50:55,678 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.325e+02 2.987e+02 3.815e+02 9.366e+02, threshold=5.974e+02, percent-clipped=6.0 +2023-02-07 11:51:02,708 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0816, 1.2355, 1.2108, 0.7774, 1.2046, 1.0740, 0.1045, 1.2235], + device='cuda:0'), covar=tensor([0.0500, 0.0467, 0.0409, 0.0623, 0.0535, 0.1065, 0.0969, 0.0383], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0407, 0.0361, 0.0457, 0.0392, 0.0545, 0.0402, 0.0436], + device='cuda:0'), out_proj_covar=tensor([1.2487e-04, 1.0563e-04, 9.4230e-05, 1.1962e-04, 1.0261e-04, 1.5214e-04, + 1.0725e-04, 1.1451e-04], device='cuda:0') +2023-02-07 11:51:19,438 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:51:21,269 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 11:51:27,202 INFO [train.py:901] (0/4) Epoch 27, batch 3900, loss[loss=0.148, simple_loss=0.229, pruned_loss=0.03346, over 7711.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2828, pruned_loss=0.05774, over 1611216.10 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:51:38,678 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0574, 1.6817, 3.3407, 1.5738, 2.5144, 3.6448, 3.7610, 3.1720], + device='cuda:0'), covar=tensor([0.1140, 0.1726, 0.0267, 0.2072, 0.0922, 0.0222, 0.0502, 0.0460], + device='cuda:0'), in_proj_covar=tensor([0.0306, 0.0326, 0.0291, 0.0321, 0.0321, 0.0278, 0.0438, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 11:52:00,516 INFO [train.py:901] (0/4) Epoch 27, batch 3950, loss[loss=0.1711, simple_loss=0.2692, pruned_loss=0.03652, over 8183.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05799, over 1614349.97 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:04,368 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.377e+02 2.717e+02 3.364e+02 5.097e+02, threshold=5.435e+02, percent-clipped=0.0 +2023-02-07 11:52:26,022 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6290, 1.4361, 1.7633, 1.3113, 0.9253, 1.4980, 1.5119, 1.5059], + device='cuda:0'), covar=tensor([0.0586, 0.1323, 0.1594, 0.1492, 0.0606, 0.1546, 0.0711, 0.0632], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0101, 0.0164, 0.0113, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:52:26,775 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9852, 1.7028, 2.0344, 1.8552, 1.9902, 2.0371, 1.9169, 0.8560], + device='cuda:0'), covar=tensor([0.5990, 0.5104, 0.2136, 0.3761, 0.2563, 0.3397, 0.2024, 0.5356], + device='cuda:0'), in_proj_covar=tensor([0.0961, 0.1017, 0.0829, 0.0986, 0.1020, 0.0925, 0.0767, 0.0848], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 11:52:30,026 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:52:36,018 INFO [train.py:901] (0/4) Epoch 27, batch 4000, loss[loss=0.1832, simple_loss=0.2616, pruned_loss=0.05245, over 7246.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.05794, over 1612644.09 frames. ], batch size: 16, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:43,219 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 11:53:10,789 INFO [train.py:901] (0/4) Epoch 27, batch 4050, loss[loss=0.1884, simple_loss=0.2637, pruned_loss=0.05656, over 7282.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.058, over 1609369.97 frames. ], batch size: 16, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:14,931 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.379e+02 2.958e+02 3.648e+02 7.596e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 11:53:22,008 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 11:53:31,859 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:53:45,405 INFO [train.py:901] (0/4) Epoch 27, batch 4100, loss[loss=0.1867, simple_loss=0.2759, pruned_loss=0.04874, over 8709.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2814, pruned_loss=0.05724, over 1607376.18 frames. ], batch size: 39, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:10,634 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 11:54:17,255 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:21,095 INFO [train.py:901] (0/4) Epoch 27, batch 4150, loss[loss=0.2214, simple_loss=0.3085, pruned_loss=0.06715, over 8444.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2814, pruned_loss=0.05715, over 1608548.62 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:25,175 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.522e+02 2.957e+02 3.518e+02 6.524e+02, threshold=5.913e+02, percent-clipped=2.0 +2023-02-07 11:54:34,216 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:55,411 INFO [train.py:901] (0/4) Epoch 27, batch 4200, loss[loss=0.2493, simple_loss=0.3291, pruned_loss=0.08476, over 8334.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05757, over 1608548.17 frames. ], batch size: 26, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:03,017 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4512, 1.8164, 2.5974, 1.3416, 1.8818, 1.8236, 1.5275, 1.8834], + device='cuda:0'), covar=tensor([0.2062, 0.2705, 0.0983, 0.4905, 0.2170, 0.3426, 0.2648, 0.2496], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0635, 0.0566, 0.0671, 0.0660, 0.0609, 0.0564, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:55:15,711 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 11:55:30,763 INFO [train.py:901] (0/4) Epoch 27, batch 4250, loss[loss=0.1824, simple_loss=0.2704, pruned_loss=0.04724, over 7910.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05773, over 1609750.83 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:34,789 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.351e+02 2.930e+02 3.605e+02 8.966e+02, threshold=5.860e+02, percent-clipped=4.0 +2023-02-07 11:55:40,121 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 11:55:49,286 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8053, 1.4512, 1.6979, 1.3652, 0.9774, 1.4772, 1.6608, 1.4319], + device='cuda:0'), covar=tensor([0.0580, 0.1332, 0.1673, 0.1500, 0.0587, 0.1551, 0.0717, 0.0704], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 11:56:04,676 INFO [train.py:901] (0/4) Epoch 27, batch 4300, loss[loss=0.1855, simple_loss=0.2745, pruned_loss=0.04819, over 8137.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.05839, over 1609255.38 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:10,730 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7063, 2.4083, 4.0017, 1.6080, 2.9789, 2.2704, 1.8833, 3.0188], + device='cuda:0'), covar=tensor([0.2095, 0.2735, 0.0934, 0.4893, 0.1965, 0.3505, 0.2575, 0.2549], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0635, 0.0566, 0.0670, 0.0661, 0.0611, 0.0564, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:56:23,664 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 11:56:29,283 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:56:40,083 INFO [train.py:901] (0/4) Epoch 27, batch 4350, loss[loss=0.2011, simple_loss=0.2708, pruned_loss=0.06567, over 7800.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2848, pruned_loss=0.05888, over 1618825.74 frames. ], batch size: 19, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:44,887 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.457e+02 2.989e+02 4.041e+02 8.697e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-07 11:56:48,049 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 11:57:11,508 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 11:57:14,890 INFO [train.py:901] (0/4) Epoch 27, batch 4400, loss[loss=0.2431, simple_loss=0.3147, pruned_loss=0.08574, over 8595.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.05853, over 1614485.14 frames. ], batch size: 34, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:32,004 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:39,143 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 11:57:49,418 INFO [train.py:901] (0/4) Epoch 27, batch 4450, loss[loss=0.2602, simple_loss=0.3207, pruned_loss=0.09989, over 7914.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05881, over 1614647.88 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:50,287 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:51,442 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 11:57:53,336 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.435e+02 2.910e+02 3.675e+02 1.096e+03, threshold=5.821e+02, percent-clipped=3.0 +2023-02-07 11:58:06,357 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7840, 1.8854, 5.9899, 2.2530, 5.3669, 5.0426, 5.4656, 5.3822], + device='cuda:0'), covar=tensor([0.0499, 0.4938, 0.0376, 0.3998, 0.1029, 0.0888, 0.0536, 0.0540], + device='cuda:0'), in_proj_covar=tensor([0.0680, 0.0674, 0.0742, 0.0660, 0.0754, 0.0642, 0.0643, 0.0721], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:58:10,645 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 11:58:14,562 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5788, 2.1879, 3.9844, 1.5429, 2.8989, 2.3808, 1.6456, 2.8318], + device='cuda:0'), covar=tensor([0.2209, 0.2887, 0.0888, 0.5111, 0.2006, 0.3177, 0.2833, 0.2578], + device='cuda:0'), in_proj_covar=tensor([0.0536, 0.0630, 0.0561, 0.0665, 0.0656, 0.0605, 0.0560, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 11:58:25,065 INFO [train.py:901] (0/4) Epoch 27, batch 4500, loss[loss=0.1846, simple_loss=0.2558, pruned_loss=0.05671, over 7537.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2829, pruned_loss=0.05826, over 1613121.13 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:58:49,088 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 11:58:51,961 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:58:58,558 INFO [train.py:901] (0/4) Epoch 27, batch 4550, loss[loss=0.1619, simple_loss=0.2529, pruned_loss=0.03551, over 8043.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2808, pruned_loss=0.05744, over 1610327.32 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:59:03,197 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.259e+02 2.795e+02 3.667e+02 7.490e+02, threshold=5.591e+02, percent-clipped=6.0 +2023-02-07 11:59:34,567 INFO [train.py:901] (0/4) Epoch 27, batch 4600, loss[loss=0.1833, simple_loss=0.2812, pruned_loss=0.04272, over 8251.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2811, pruned_loss=0.05741, over 1610236.59 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:07,864 INFO [train.py:901] (0/4) Epoch 27, batch 4650, loss[loss=0.1845, simple_loss=0.2551, pruned_loss=0.05693, over 7417.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2819, pruned_loss=0.05825, over 1612888.54 frames. ], batch size: 17, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:11,921 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.452e+02 3.083e+02 3.974e+02 1.018e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-07 12:00:43,657 INFO [train.py:901] (0/4) Epoch 27, batch 4700, loss[loss=0.2378, simple_loss=0.314, pruned_loss=0.08084, over 6789.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2829, pruned_loss=0.0588, over 1609591.63 frames. ], batch size: 71, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:48,570 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:05,450 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:17,189 INFO [train.py:901] (0/4) Epoch 27, batch 4750, loss[loss=0.2396, simple_loss=0.3338, pruned_loss=0.07267, over 8361.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2824, pruned_loss=0.0586, over 1610016.09 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:01:21,120 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.444e+02 3.016e+02 3.790e+02 1.117e+03, threshold=6.032e+02, percent-clipped=6.0 +2023-02-07 12:01:42,271 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 12:01:45,082 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 12:01:49,020 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:51,313 INFO [train.py:901] (0/4) Epoch 27, batch 4800, loss[loss=0.1649, simple_loss=0.2604, pruned_loss=0.03467, over 7809.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2813, pruned_loss=0.05754, over 1608175.44 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:07,893 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:02:27,034 INFO [train.py:901] (0/4) Epoch 27, batch 4850, loss[loss=0.1964, simple_loss=0.2623, pruned_loss=0.06528, over 7532.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2816, pruned_loss=0.05754, over 1608111.61 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:31,212 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.294e+02 2.705e+02 3.274e+02 6.085e+02, threshold=5.409e+02, percent-clipped=1.0 +2023-02-07 12:02:36,650 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 12:03:00,128 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9161, 1.9221, 3.2275, 2.3891, 2.7470, 2.0108, 1.6987, 1.6745], + device='cuda:0'), covar=tensor([0.8297, 0.7217, 0.2425, 0.4518, 0.3579, 0.4615, 0.3244, 0.6400], + device='cuda:0'), in_proj_covar=tensor([0.0963, 0.1016, 0.0827, 0.0988, 0.1021, 0.0925, 0.0767, 0.0847], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 12:03:01,830 INFO [train.py:901] (0/4) Epoch 27, batch 4900, loss[loss=0.197, simple_loss=0.2853, pruned_loss=0.05433, over 8015.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.281, pruned_loss=0.05739, over 1605079.87 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:37,419 INFO [train.py:901] (0/4) Epoch 27, batch 4950, loss[loss=0.1703, simple_loss=0.2533, pruned_loss=0.04365, over 7415.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05708, over 1603009.88 frames. ], batch size: 17, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:41,324 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.323e+02 2.858e+02 3.502e+02 9.819e+02, threshold=5.716e+02, percent-clipped=5.0 +2023-02-07 12:04:10,562 INFO [train.py:901] (0/4) Epoch 27, batch 5000, loss[loss=0.2013, simple_loss=0.2881, pruned_loss=0.05724, over 8446.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05805, over 1609007.04 frames. ], batch size: 27, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:04:47,121 INFO [train.py:901] (0/4) Epoch 27, batch 5050, loss[loss=0.2768, simple_loss=0.3428, pruned_loss=0.1054, over 7074.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2818, pruned_loss=0.05782, over 1605194.30 frames. ], batch size: 71, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:04:50,998 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.415e+02 2.920e+02 3.667e+02 5.760e+02, threshold=5.840e+02, percent-clipped=1.0 +2023-02-07 12:05:10,169 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 12:05:15,548 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215248.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:05:20,047 INFO [train.py:901] (0/4) Epoch 27, batch 5100, loss[loss=0.2431, simple_loss=0.2997, pruned_loss=0.0932, over 7808.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2824, pruned_loss=0.05869, over 1601394.67 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:28,227 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7796, 1.7511, 2.3482, 1.6108, 1.3875, 2.2603, 0.4565, 1.4723], + device='cuda:0'), covar=tensor([0.1879, 0.1180, 0.0316, 0.0982, 0.2567, 0.0507, 0.2129, 0.1403], + device='cuda:0'), in_proj_covar=tensor([0.0201, 0.0206, 0.0137, 0.0225, 0.0279, 0.0146, 0.0174, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 12:05:32,898 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2108, 1.0649, 1.3222, 1.0447, 0.9651, 1.3010, 0.1272, 1.0261], + device='cuda:0'), covar=tensor([0.1561, 0.1417, 0.0525, 0.0660, 0.2336, 0.0610, 0.2058, 0.1265], + device='cuda:0'), in_proj_covar=tensor([0.0201, 0.0207, 0.0137, 0.0225, 0.0279, 0.0147, 0.0174, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 12:05:54,329 INFO [train.py:901] (0/4) Epoch 27, batch 5150, loss[loss=0.2052, simple_loss=0.3022, pruned_loss=0.05411, over 8606.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2824, pruned_loss=0.05834, over 1602057.00 frames. ], batch size: 39, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:59,266 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.449e+02 2.868e+02 3.492e+02 6.640e+02, threshold=5.736e+02, percent-clipped=1.0 +2023-02-07 12:06:22,719 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215343.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:06:28,172 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215351.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:06:30,710 INFO [train.py:901] (0/4) Epoch 27, batch 5200, loss[loss=0.2138, simple_loss=0.3045, pruned_loss=0.06158, over 7977.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05772, over 1607637.42 frames. ], batch size: 21, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:05,197 INFO [train.py:901] (0/4) Epoch 27, batch 5250, loss[loss=0.1714, simple_loss=0.2583, pruned_loss=0.04222, over 8248.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2838, pruned_loss=0.05893, over 1607121.53 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:09,825 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.358e+02 2.790e+02 3.638e+02 8.125e+02, threshold=5.579e+02, percent-clipped=3.0 +2023-02-07 12:07:12,598 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 12:07:40,339 INFO [train.py:901] (0/4) Epoch 27, batch 5300, loss[loss=0.2628, simple_loss=0.3346, pruned_loss=0.09548, over 6961.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2835, pruned_loss=0.05888, over 1606313.05 frames. ], batch size: 71, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:13,771 INFO [train.py:901] (0/4) Epoch 27, batch 5350, loss[loss=0.2147, simple_loss=0.291, pruned_loss=0.06921, over 7304.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05851, over 1608165.34 frames. ], batch size: 71, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:18,661 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 2.455e+02 2.847e+02 3.988e+02 1.267e+03, threshold=5.693e+02, percent-clipped=12.0 +2023-02-07 12:08:25,712 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:08:48,872 INFO [train.py:901] (0/4) Epoch 27, batch 5400, loss[loss=0.2286, simple_loss=0.2999, pruned_loss=0.07869, over 7128.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.05857, over 1605646.21 frames. ], batch size: 72, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:52,411 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5992, 2.5156, 1.8481, 2.3445, 2.1529, 1.5528, 2.0664, 2.1137], + device='cuda:0'), covar=tensor([0.1447, 0.0450, 0.1257, 0.0581, 0.0756, 0.1624, 0.1015, 0.0964], + device='cuda:0'), in_proj_covar=tensor([0.0361, 0.0241, 0.0343, 0.0312, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 12:08:57,717 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215566.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:09:08,900 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:09:14,846 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215592.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:23,334 INFO [train.py:901] (0/4) Epoch 27, batch 5450, loss[loss=0.2197, simple_loss=0.2952, pruned_loss=0.07214, over 8458.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2822, pruned_loss=0.05847, over 1605216.52 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:27,892 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.540e+02 3.136e+02 3.819e+02 8.555e+02, threshold=6.272e+02, percent-clipped=5.0 +2023-02-07 12:09:47,129 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8774, 1.9824, 2.1020, 1.9293, 1.2104, 1.9983, 2.5622, 2.5295], + device='cuda:0'), covar=tensor([0.0464, 0.1112, 0.1500, 0.1309, 0.0544, 0.1257, 0.0543, 0.0515], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:09:56,869 INFO [train.py:901] (0/4) Epoch 27, batch 5500, loss[loss=0.1855, simple_loss=0.2651, pruned_loss=0.05295, over 7915.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2824, pruned_loss=0.05866, over 1607596.19 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:56,883 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 12:10:20,594 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215687.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:10:25,967 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:10:32,534 INFO [train.py:901] (0/4) Epoch 27, batch 5550, loss[loss=0.2046, simple_loss=0.296, pruned_loss=0.05664, over 8187.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2813, pruned_loss=0.05798, over 1607200.10 frames. ], batch size: 23, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:10:34,093 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215707.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:10:37,234 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.445e+02 2.973e+02 3.969e+02 8.778e+02, threshold=5.947e+02, percent-clipped=4.0 +2023-02-07 12:11:06,798 INFO [train.py:901] (0/4) Epoch 27, batch 5600, loss[loss=0.2099, simple_loss=0.2961, pruned_loss=0.06188, over 8523.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2816, pruned_loss=0.05793, over 1608332.71 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:40,020 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:11:41,821 INFO [train.py:901] (0/4) Epoch 27, batch 5650, loss[loss=0.2033, simple_loss=0.2866, pruned_loss=0.06001, over 8240.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2811, pruned_loss=0.05787, over 1606766.80 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:46,130 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:11:47,203 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.361e+02 2.799e+02 3.308e+02 5.877e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 12:11:50,149 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9083, 2.3074, 3.6973, 1.9216, 1.8152, 3.6292, 0.7672, 2.2460], + device='cuda:0'), covar=tensor([0.1376, 0.1106, 0.0206, 0.1449, 0.2410, 0.0255, 0.1993, 0.1249], + device='cuda:0'), in_proj_covar=tensor([0.0199, 0.0204, 0.0135, 0.0222, 0.0275, 0.0145, 0.0171, 0.0198], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 12:12:04,983 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 12:12:11,241 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:16,656 INFO [train.py:901] (0/4) Epoch 27, batch 5700, loss[loss=0.2128, simple_loss=0.282, pruned_loss=0.07174, over 7413.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.0573, over 1607937.77 frames. ], batch size: 17, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:23,686 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:51,895 INFO [train.py:901] (0/4) Epoch 27, batch 5750, loss[loss=0.1588, simple_loss=0.2357, pruned_loss=0.04092, over 7647.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2804, pruned_loss=0.05685, over 1608771.39 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:55,997 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215910.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:12:57,181 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.395e+02 2.899e+02 3.864e+02 7.116e+02, threshold=5.798e+02, percent-clipped=7.0 +2023-02-07 12:13:07,810 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:10,335 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 12:13:26,395 INFO [train.py:901] (0/4) Epoch 27, batch 5800, loss[loss=0.2092, simple_loss=0.293, pruned_loss=0.06271, over 8195.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2824, pruned_loss=0.05794, over 1610594.21 frames. ], batch size: 23, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:13:31,949 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=215963.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:13:43,165 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215980.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:47,578 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9048, 1.4114, 1.5389, 1.1964, 0.9375, 1.3788, 1.6273, 1.5939], + device='cuda:0'), covar=tensor([0.0527, 0.1262, 0.1777, 0.1580, 0.0608, 0.1546, 0.0710, 0.0636], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:13:48,224 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=215988.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:13:56,686 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-216000.pt +2023-02-07 12:14:01,060 INFO [train.py:901] (0/4) Epoch 27, batch 5850, loss[loss=0.1956, simple_loss=0.2692, pruned_loss=0.06104, over 7786.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2827, pruned_loss=0.05854, over 1609291.30 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:05,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.428e+02 2.871e+02 3.760e+02 7.078e+02, threshold=5.742e+02, percent-clipped=9.0 +2023-02-07 12:14:15,359 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216025.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:14:27,440 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:30,147 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:36,736 INFO [train.py:901] (0/4) Epoch 27, batch 5900, loss[loss=0.178, simple_loss=0.2628, pruned_loss=0.04657, over 8119.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2821, pruned_loss=0.0582, over 1610723.64 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:38,985 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216058.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:44,134 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:55,476 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216083.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:15:00,975 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:15:10,157 INFO [train.py:901] (0/4) Epoch 27, batch 5950, loss[loss=0.1784, simple_loss=0.2765, pruned_loss=0.04013, over 8104.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05777, over 1611717.40 frames. ], batch size: 23, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:15,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.411e+02 2.864e+02 3.625e+02 8.908e+02, threshold=5.728e+02, percent-clipped=5.0 +2023-02-07 12:15:46,891 INFO [train.py:901] (0/4) Epoch 27, batch 6000, loss[loss=0.1704, simple_loss=0.2674, pruned_loss=0.03674, over 8318.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.0583, over 1615871.51 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:46,892 INFO [train.py:926] (0/4) Computing validation loss +2023-02-07 12:15:59,961 INFO [train.py:935] (0/4) Epoch 27, validation: loss=0.1711, simple_loss=0.2711, pruned_loss=0.03554, over 944034.00 frames. +2023-02-07 12:15:59,962 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6760MB +2023-02-07 12:16:25,738 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:16:35,309 INFO [train.py:901] (0/4) Epoch 27, batch 6050, loss[loss=0.1863, simple_loss=0.2795, pruned_loss=0.04657, over 8465.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05757, over 1617443.55 frames. ], batch size: 27, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:16:40,125 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.565e+02 3.207e+02 4.227e+02 9.285e+02, threshold=6.415e+02, percent-clipped=9.0 +2023-02-07 12:16:56,663 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:16:58,608 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5744, 1.3556, 1.7241, 1.2652, 1.0016, 1.4504, 2.1429, 2.0062], + device='cuda:0'), covar=tensor([0.0505, 0.1759, 0.2385, 0.1957, 0.0676, 0.2031, 0.0745, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:17:09,870 INFO [train.py:901] (0/4) Epoch 27, batch 6100, loss[loss=0.1802, simple_loss=0.2545, pruned_loss=0.05298, over 7693.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2818, pruned_loss=0.05779, over 1614630.79 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:13,990 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:23,687 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.3525, 1.8683, 3.6173, 2.0286, 2.9802, 2.8896, 3.2720, 3.2544], + device='cuda:0'), covar=tensor([0.1398, 0.4819, 0.1611, 0.4870, 0.1832, 0.1731, 0.1129, 0.1044], + device='cuda:0'), in_proj_covar=tensor([0.0672, 0.0664, 0.0734, 0.0654, 0.0744, 0.0632, 0.0634, 0.0713], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:17:28,455 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216281.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:17:39,618 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:44,969 INFO [train.py:901] (0/4) Epoch 27, batch 6150, loss[loss=0.1842, simple_loss=0.2816, pruned_loss=0.04341, over 8127.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.282, pruned_loss=0.05791, over 1614569.37 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:44,980 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 12:17:45,826 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216306.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:45,848 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216306.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:17:49,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.311e+02 2.985e+02 4.036e+02 8.594e+02, threshold=5.970e+02, percent-clipped=2.0 +2023-02-07 12:17:57,198 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216323.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:08,590 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5424, 2.8450, 2.5880, 4.1305, 1.7956, 2.0342, 2.6935, 2.9197], + device='cuda:0'), covar=tensor([0.0723, 0.0769, 0.0787, 0.0206, 0.0992, 0.1181, 0.0767, 0.0811], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0212, 0.0203, 0.0245, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-07 12:18:10,060 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 12:18:18,516 INFO [train.py:901] (0/4) Epoch 27, batch 6200, loss[loss=0.1999, simple_loss=0.2847, pruned_loss=0.05752, over 8022.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2804, pruned_loss=0.05659, over 1613939.62 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:42,449 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:48,840 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 12:18:53,890 INFO [train.py:901] (0/4) Epoch 27, batch 6250, loss[loss=0.1936, simple_loss=0.2872, pruned_loss=0.04999, over 8235.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.28, pruned_loss=0.05656, over 1613028.18 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:58,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.470e+02 2.901e+02 3.405e+02 7.374e+02, threshold=5.803e+02, percent-clipped=1.0 +2023-02-07 12:19:27,757 INFO [train.py:901] (0/4) Epoch 27, batch 6300, loss[loss=0.1795, simple_loss=0.2691, pruned_loss=0.04498, over 8187.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05624, over 1616745.21 frames. ], batch size: 23, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:20:01,018 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 12:20:01,970 INFO [train.py:901] (0/4) Epoch 27, batch 6350, loss[loss=0.1578, simple_loss=0.2337, pruned_loss=0.04095, over 7409.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2792, pruned_loss=0.05629, over 1610692.67 frames. ], batch size: 17, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:02,161 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:07,862 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.511e+02 2.994e+02 4.018e+02 7.521e+02, threshold=5.987e+02, percent-clipped=5.0 +2023-02-07 12:20:11,421 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7434, 1.5268, 4.9228, 1.8522, 4.4236, 4.0722, 4.4513, 4.3679], + device='cuda:0'), covar=tensor([0.0532, 0.4755, 0.0466, 0.4273, 0.0939, 0.0972, 0.0562, 0.0563], + device='cuda:0'), in_proj_covar=tensor([0.0672, 0.0665, 0.0734, 0.0654, 0.0744, 0.0632, 0.0634, 0.0714], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:20:36,661 INFO [train.py:901] (0/4) Epoch 27, batch 6400, loss[loss=0.2001, simple_loss=0.2761, pruned_loss=0.06207, over 8438.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2809, pruned_loss=0.05738, over 1610004.04 frames. ], batch size: 29, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:41,576 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:58,189 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:10,659 INFO [train.py:901] (0/4) Epoch 27, batch 6450, loss[loss=0.2084, simple_loss=0.3017, pruned_loss=0.05754, over 8203.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2816, pruned_loss=0.05764, over 1614089.76 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:13,564 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:14,324 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6453, 1.8911, 2.8185, 1.4924, 2.2470, 1.9774, 1.7355, 2.2391], + device='cuda:0'), covar=tensor([0.2136, 0.2977, 0.1041, 0.5146, 0.2029, 0.3516, 0.2711, 0.2581], + device='cuda:0'), in_proj_covar=tensor([0.0534, 0.0629, 0.0560, 0.0664, 0.0655, 0.0606, 0.0560, 0.0640], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:21:16,164 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.469e+02 2.882e+02 3.609e+02 7.919e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 12:21:46,015 INFO [train.py:901] (0/4) Epoch 27, batch 6500, loss[loss=0.1996, simple_loss=0.2762, pruned_loss=0.06152, over 8493.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2816, pruned_loss=0.05785, over 1614885.14 frames. ], batch size: 28, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:58,501 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216673.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:22:04,716 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7288, 1.6869, 2.1062, 1.4836, 1.3790, 2.0888, 0.3971, 1.4128], + device='cuda:0'), covar=tensor([0.1547, 0.1143, 0.0428, 0.0869, 0.2336, 0.0439, 0.1746, 0.1152], + device='cuda:0'), in_proj_covar=tensor([0.0200, 0.0205, 0.0136, 0.0222, 0.0275, 0.0146, 0.0172, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 12:22:19,963 INFO [train.py:901] (0/4) Epoch 27, batch 6550, loss[loss=0.1978, simple_loss=0.2917, pruned_loss=0.05199, over 8246.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2817, pruned_loss=0.05825, over 1613746.02 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:25,109 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.544e+02 2.876e+02 3.743e+02 6.730e+02, threshold=5.752e+02, percent-clipped=5.0 +2023-02-07 12:22:32,725 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:22:48,681 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2063, 2.0693, 2.7464, 2.2429, 2.7477, 2.2879, 2.1322, 1.6772], + device='cuda:0'), covar=tensor([0.5816, 0.5173, 0.1978, 0.4170, 0.2587, 0.3130, 0.1948, 0.5666], + device='cuda:0'), in_proj_covar=tensor([0.0969, 0.1024, 0.0834, 0.0995, 0.1032, 0.0933, 0.0775, 0.0855], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 12:22:54,586 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 12:22:55,724 INFO [train.py:901] (0/4) Epoch 27, batch 6600, loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06125, over 8018.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.282, pruned_loss=0.05827, over 1615080.59 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:59,998 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:12,957 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 12:23:17,013 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:29,548 INFO [train.py:901] (0/4) Epoch 27, batch 6650, loss[loss=0.165, simple_loss=0.2457, pruned_loss=0.04213, over 7542.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2827, pruned_loss=0.05867, over 1613246.70 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:23:34,796 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.571e+02 3.099e+02 3.859e+02 9.745e+02, threshold=6.199e+02, percent-clipped=7.0 +2023-02-07 12:24:03,764 INFO [train.py:901] (0/4) Epoch 27, batch 6700, loss[loss=0.2413, simple_loss=0.3235, pruned_loss=0.07958, over 8352.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2825, pruned_loss=0.05849, over 1615569.48 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:08,628 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4450, 2.4247, 3.0611, 2.4946, 3.0890, 2.5361, 2.4570, 2.0012], + device='cuda:0'), covar=tensor([0.5920, 0.5236, 0.2309, 0.4342, 0.2988, 0.3155, 0.1933, 0.5999], + device='cuda:0'), in_proj_covar=tensor([0.0967, 0.1022, 0.0834, 0.0993, 0.1031, 0.0931, 0.0773, 0.0854], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 12:24:38,616 INFO [train.py:901] (0/4) Epoch 27, batch 6750, loss[loss=0.1884, simple_loss=0.269, pruned_loss=0.0539, over 7651.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2816, pruned_loss=0.05771, over 1614236.82 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:43,890 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.479e+02 3.006e+02 3.687e+02 6.813e+02, threshold=6.012e+02, percent-clipped=1.0 +2023-02-07 12:24:56,979 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 12:25:11,160 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216953.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:12,347 INFO [train.py:901] (0/4) Epoch 27, batch 6800, loss[loss=0.1782, simple_loss=0.2694, pruned_loss=0.04346, over 7980.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2824, pruned_loss=0.05818, over 1610888.85 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:24,102 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 12:25:32,310 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:39,478 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1006, 1.7275, 3.2131, 1.7285, 2.5488, 3.5418, 3.6170, 2.9862], + device='cuda:0'), covar=tensor([0.1196, 0.1888, 0.0470, 0.2040, 0.1259, 0.0314, 0.0686, 0.0663], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0324, 0.0292, 0.0318, 0.0321, 0.0275, 0.0438, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:25:41,670 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.71 vs. limit=5.0 +2023-02-07 12:25:43,596 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1785, 1.3894, 4.2719, 1.9187, 2.4357, 4.8189, 4.9629, 4.1777], + device='cuda:0'), covar=tensor([0.1267, 0.2193, 0.0287, 0.2057, 0.1298, 0.0221, 0.0523, 0.0600], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0325, 0.0292, 0.0319, 0.0321, 0.0276, 0.0438, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:25:47,365 INFO [train.py:901] (0/4) Epoch 27, batch 6850, loss[loss=0.2442, simple_loss=0.334, pruned_loss=0.07721, over 8043.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2844, pruned_loss=0.05942, over 1613794.67 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:52,582 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.408e+02 3.097e+02 3.751e+02 9.876e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-07 12:25:55,360 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217017.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:26:10,983 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 12:26:21,095 INFO [train.py:901] (0/4) Epoch 27, batch 6900, loss[loss=0.1972, simple_loss=0.2918, pruned_loss=0.05131, over 8299.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2847, pruned_loss=0.05896, over 1620994.57 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:26:29,745 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:30,512 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:33,610 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 12:26:56,824 INFO [train.py:901] (0/4) Epoch 27, batch 6950, loss[loss=0.2038, simple_loss=0.2903, pruned_loss=0.05864, over 8502.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2853, pruned_loss=0.05926, over 1618640.14 frames. ], batch size: 28, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:02,039 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.303e+02 2.670e+02 3.410e+02 6.861e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 12:27:15,666 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217132.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:27:19,478 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 12:27:23,884 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-07 12:27:30,834 INFO [train.py:901] (0/4) Epoch 27, batch 7000, loss[loss=0.2248, simple_loss=0.31, pruned_loss=0.06978, over 8465.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2848, pruned_loss=0.05895, over 1613040.34 frames. ], batch size: 29, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:46,174 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5217, 1.7887, 4.5902, 2.2850, 2.7673, 5.1461, 5.2662, 4.3225], + device='cuda:0'), covar=tensor([0.1190, 0.1921, 0.0272, 0.1935, 0.1115, 0.0254, 0.0654, 0.0662], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0324, 0.0292, 0.0318, 0.0321, 0.0276, 0.0438, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:27:49,476 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:28:05,134 INFO [train.py:901] (0/4) Epoch 27, batch 7050, loss[loss=0.2177, simple_loss=0.2923, pruned_loss=0.0716, over 8366.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2843, pruned_loss=0.05884, over 1609526.38 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:28:11,286 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.356e+02 3.046e+02 3.591e+02 8.726e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 12:28:40,063 INFO [train.py:901] (0/4) Epoch 27, batch 7100, loss[loss=0.1947, simple_loss=0.275, pruned_loss=0.05721, over 7649.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05888, over 1605833.20 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:14,492 INFO [train.py:901] (0/4) Epoch 27, batch 7150, loss[loss=0.1746, simple_loss=0.2571, pruned_loss=0.04609, over 7555.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05884, over 1603641.45 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:14,869 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 12:29:16,708 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7910, 1.6507, 2.1276, 1.8740, 1.8961, 1.8622, 1.6276, 0.9387], + device='cuda:0'), covar=tensor([0.6320, 0.5033, 0.1928, 0.3200, 0.2470, 0.3884, 0.2774, 0.4345], + device='cuda:0'), in_proj_covar=tensor([0.0965, 0.1023, 0.0833, 0.0992, 0.1030, 0.0930, 0.0771, 0.0852], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 12:29:19,666 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.444e+02 3.123e+02 4.113e+02 1.134e+03, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 12:29:27,197 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6119, 2.0274, 2.9577, 1.5498, 2.2941, 2.0681, 1.7164, 2.3029], + device='cuda:0'), covar=tensor([0.1942, 0.2545, 0.0875, 0.4692, 0.1800, 0.3172, 0.2404, 0.2161], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0634, 0.0565, 0.0669, 0.0659, 0.0612, 0.0565, 0.0645], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:29:27,885 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:29,687 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:39,080 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7894, 1.6717, 2.5322, 1.9980, 2.2773, 1.8482, 1.6099, 1.2011], + device='cuda:0'), covar=tensor([0.7960, 0.6559, 0.2596, 0.4743, 0.3715, 0.4848, 0.3234, 0.6385], + device='cuda:0'), in_proj_covar=tensor([0.0963, 0.1020, 0.0831, 0.0991, 0.1028, 0.0927, 0.0769, 0.0850], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-07 12:29:45,927 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:49,820 INFO [train.py:901] (0/4) Epoch 27, batch 7200, loss[loss=0.2457, simple_loss=0.3101, pruned_loss=0.09061, over 7931.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2842, pruned_loss=0.0595, over 1604731.49 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:11,967 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217388.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:30:23,036 INFO [train.py:901] (0/4) Epoch 27, batch 7250, loss[loss=0.2006, simple_loss=0.2858, pruned_loss=0.0577, over 8034.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2843, pruned_loss=0.05929, over 1609002.82 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:23,856 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:28,398 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.296e+02 2.784e+02 3.610e+02 7.832e+02, threshold=5.568e+02, percent-clipped=2.0 +2023-02-07 12:30:28,631 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217413.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:30:45,938 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:49,264 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:58,440 INFO [train.py:901] (0/4) Epoch 27, batch 7300, loss[loss=0.1879, simple_loss=0.2778, pruned_loss=0.04902, over 7976.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2831, pruned_loss=0.05867, over 1606455.01 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:04,049 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:33,092 INFO [train.py:901] (0/4) Epoch 27, batch 7350, loss[loss=0.1897, simple_loss=0.283, pruned_loss=0.0482, over 8042.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2829, pruned_loss=0.05853, over 1606856.11 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:36,614 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:37,298 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6108, 1.4579, 1.7120, 1.3989, 0.9699, 1.5050, 1.4906, 1.0970], + device='cuda:0'), covar=tensor([0.0609, 0.1209, 0.1585, 0.1474, 0.0582, 0.1452, 0.0717, 0.0742], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:31:38,492 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.322e+02 2.888e+02 3.768e+02 6.651e+02, threshold=5.777e+02, percent-clipped=4.0 +2023-02-07 12:31:59,834 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 12:32:03,454 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0708, 1.6000, 1.3183, 1.6048, 1.2820, 1.1727, 1.3514, 1.4203], + device='cuda:0'), covar=tensor([0.1126, 0.0554, 0.1578, 0.0533, 0.0886, 0.1762, 0.0971, 0.0807], + device='cuda:0'), in_proj_covar=tensor([0.0363, 0.0243, 0.0345, 0.0315, 0.0306, 0.0350, 0.0353, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 12:32:07,059 INFO [train.py:901] (0/4) Epoch 27, batch 7400, loss[loss=0.2434, simple_loss=0.3201, pruned_loss=0.08333, over 8656.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2836, pruned_loss=0.05866, over 1610283.03 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:19,117 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 12:32:23,935 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0296, 1.8532, 2.2013, 1.8136, 1.0448, 1.9161, 2.2728, 2.3156], + device='cuda:0'), covar=tensor([0.0448, 0.1147, 0.1461, 0.1302, 0.0551, 0.1290, 0.0599, 0.0552], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0152, 0.0189, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:32:28,668 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5950, 1.8151, 2.6522, 1.5296, 2.1591, 1.9111, 1.6898, 2.1986], + device='cuda:0'), covar=tensor([0.1775, 0.2432, 0.0761, 0.4126, 0.1577, 0.2769, 0.2105, 0.2044], + device='cuda:0'), in_proj_covar=tensor([0.0544, 0.0637, 0.0567, 0.0672, 0.0661, 0.0614, 0.0567, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:32:40,306 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-07 12:32:42,453 INFO [train.py:901] (0/4) Epoch 27, batch 7450, loss[loss=0.1829, simple_loss=0.2597, pruned_loss=0.05305, over 7187.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2844, pruned_loss=0.05924, over 1605930.18 frames. ], batch size: 16, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:47,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.478e+02 3.262e+02 4.062e+02 8.102e+02, threshold=6.523e+02, percent-clipped=5.0 +2023-02-07 12:32:58,342 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 12:33:16,132 INFO [train.py:901] (0/4) Epoch 27, batch 7500, loss[loss=0.2502, simple_loss=0.3153, pruned_loss=0.09251, over 8337.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05929, over 1604153.04 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:28,161 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 12:33:34,961 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:46,528 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:51,445 INFO [train.py:901] (0/4) Epoch 27, batch 7550, loss[loss=0.2187, simple_loss=0.2934, pruned_loss=0.07197, over 7698.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2836, pruned_loss=0.05947, over 1601470.49 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:56,745 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.428e+02 3.024e+02 3.911e+02 8.560e+02, threshold=6.047e+02, percent-clipped=1.0 +2023-02-07 12:34:01,687 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3183, 1.1863, 2.3830, 1.3587, 2.1707, 2.4897, 2.7009, 2.1379], + device='cuda:0'), covar=tensor([0.1315, 0.1614, 0.0433, 0.2033, 0.0790, 0.0453, 0.0866, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0322, 0.0291, 0.0317, 0.0319, 0.0276, 0.0437, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:34:03,655 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:09,252 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 12:34:21,815 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:25,179 INFO [train.py:901] (0/4) Epoch 27, batch 7600, loss[loss=0.1901, simple_loss=0.2784, pruned_loss=0.05091, over 7980.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2836, pruned_loss=0.05901, over 1603010.47 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:34:53,128 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.63 vs. limit=5.0 +2023-02-07 12:35:01,492 INFO [train.py:901] (0/4) Epoch 27, batch 7650, loss[loss=0.2022, simple_loss=0.2971, pruned_loss=0.05365, over 8496.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2831, pruned_loss=0.05866, over 1604760.66 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:06,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.541e+02 2.896e+02 3.920e+02 6.720e+02, threshold=5.793e+02, percent-clipped=4.0 +2023-02-07 12:35:18,899 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2692, 3.1170, 2.9139, 1.5350, 2.8240, 2.9277, 2.8447, 2.7802], + device='cuda:0'), covar=tensor([0.1096, 0.0842, 0.1240, 0.4583, 0.1160, 0.1273, 0.1597, 0.1135], + device='cuda:0'), in_proj_covar=tensor([0.0539, 0.0462, 0.0447, 0.0556, 0.0442, 0.0464, 0.0440, 0.0406], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:35:35,075 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217854.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:35,687 INFO [train.py:901] (0/4) Epoch 27, batch 7700, loss[loss=0.1847, simple_loss=0.2684, pruned_loss=0.05051, over 7816.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2832, pruned_loss=0.05828, over 1606154.79 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:42,366 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:36:05,138 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 12:36:10,561 INFO [train.py:901] (0/4) Epoch 27, batch 7750, loss[loss=0.226, simple_loss=0.3036, pruned_loss=0.07417, over 8140.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2822, pruned_loss=0.05786, over 1608002.13 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:14,308 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 12:36:15,951 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.515e+02 3.033e+02 3.634e+02 8.452e+02, threshold=6.066e+02, percent-clipped=4.0 +2023-02-07 12:36:18,066 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217916.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:36:27,433 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7606, 1.5151, 3.2819, 1.3981, 2.4975, 3.5789, 3.7994, 2.9240], + device='cuda:0'), covar=tensor([0.1504, 0.2076, 0.0442, 0.2395, 0.1142, 0.0349, 0.0841, 0.0829], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0322, 0.0290, 0.0317, 0.0318, 0.0275, 0.0435, 0.0304], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:36:45,570 INFO [train.py:901] (0/4) Epoch 27, batch 7800, loss[loss=0.1867, simple_loss=0.269, pruned_loss=0.05219, over 8088.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05718, over 1610142.74 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:55,070 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:04,140 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9820, 2.8839, 2.6974, 1.4436, 2.6120, 2.6756, 2.6230, 2.6044], + device='cuda:0'), covar=tensor([0.1085, 0.0871, 0.1195, 0.4397, 0.1120, 0.1253, 0.1553, 0.1079], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0461, 0.0446, 0.0554, 0.0440, 0.0464, 0.0438, 0.0405], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:37:15,325 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-218000.pt +2023-02-07 12:37:19,654 INFO [train.py:901] (0/4) Epoch 27, batch 7850, loss[loss=0.2364, simple_loss=0.3131, pruned_loss=0.07982, over 8351.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2817, pruned_loss=0.05775, over 1612103.15 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:37:24,955 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.277e+02 2.828e+02 3.912e+02 8.712e+02, threshold=5.655e+02, percent-clipped=7.0 +2023-02-07 12:37:33,479 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:52,839 INFO [train.py:901] (0/4) Epoch 27, batch 7900, loss[loss=0.1873, simple_loss=0.2741, pruned_loss=0.05027, over 8079.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2808, pruned_loss=0.05718, over 1610547.37 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:37:52,991 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6514, 1.6280, 4.8490, 1.9169, 4.3220, 4.0126, 4.4264, 4.2755], + device='cuda:0'), covar=tensor([0.0537, 0.4701, 0.0448, 0.4142, 0.0931, 0.0964, 0.0551, 0.0605], + device='cuda:0'), in_proj_covar=tensor([0.0677, 0.0663, 0.0731, 0.0653, 0.0742, 0.0631, 0.0635, 0.0717], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-07 12:37:58,506 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 12:38:08,979 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7362, 2.1499, 3.5857, 1.6083, 1.7337, 3.5283, 0.5899, 2.0418], + device='cuda:0'), covar=tensor([0.1551, 0.1117, 0.0216, 0.1803, 0.2360, 0.0233, 0.1932, 0.1226], + device='cuda:0'), in_proj_covar=tensor([0.0201, 0.0206, 0.0137, 0.0224, 0.0278, 0.0147, 0.0173, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-07 12:38:09,510 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7131, 1.4893, 1.9267, 1.5507, 0.8443, 1.7034, 2.0703, 1.9459], + device='cuda:0'), covar=tensor([0.0520, 0.1355, 0.1733, 0.1565, 0.0647, 0.1503, 0.0697, 0.0640], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-07 12:38:25,725 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9662, 1.6578, 1.3841, 1.5105, 1.3094, 1.2963, 1.3104, 1.2991], + device='cuda:0'), covar=tensor([0.1379, 0.0534, 0.1475, 0.0692, 0.0866, 0.1695, 0.0962, 0.0953], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0242, 0.0341, 0.0313, 0.0305, 0.0346, 0.0349, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-07 12:38:26,203 INFO [train.py:901] (0/4) Epoch 27, batch 7950, loss[loss=0.2345, simple_loss=0.3148, pruned_loss=0.07711, over 8314.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2809, pruned_loss=0.05732, over 1610724.47 frames. ], batch size: 25, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:38:31,702 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.553e+02 3.230e+02 4.059e+02 8.354e+02, threshold=6.459e+02, percent-clipped=5.0 +2023-02-07 12:38:35,258 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:37,426 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:50,499 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:51,659 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9996, 1.5295, 3.4383, 1.5493, 2.4045, 3.7599, 3.8545, 3.2488], + device='cuda:0'), covar=tensor([0.1223, 0.1853, 0.0333, 0.2140, 0.1074, 0.0227, 0.0459, 0.0554], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0323, 0.0291, 0.0318, 0.0320, 0.0276, 0.0437, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:38:51,965 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 12:38:53,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:59,466 INFO [train.py:901] (0/4) Epoch 27, batch 8000, loss[loss=0.1751, simple_loss=0.2525, pruned_loss=0.04881, over 6372.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2813, pruned_loss=0.05745, over 1613665.80 frames. ], batch size: 14, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:05,390 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6952, 1.4145, 3.1633, 1.3520, 2.3709, 3.3856, 3.5579, 2.9272], + device='cuda:0'), covar=tensor([0.1349, 0.1908, 0.0361, 0.2275, 0.1061, 0.0268, 0.0485, 0.0549], + device='cuda:0'), in_proj_covar=tensor([0.0305, 0.0323, 0.0291, 0.0317, 0.0320, 0.0276, 0.0437, 0.0305], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-07 12:39:29,223 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:32,351 INFO [train.py:901] (0/4) Epoch 27, batch 8050, loss[loss=0.1781, simple_loss=0.2657, pruned_loss=0.04524, over 8460.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2817, pruned_loss=0.05782, over 1605336.87 frames. ], batch size: 25, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:38,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.285e+02 2.948e+02 3.498e+02 7.136e+02, threshold=5.897e+02, percent-clipped=2.0 +2023-02-07 12:39:46,224 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:48,246 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218228.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:55,707 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-27.pt diff --git a/log/log-train-2023-02-05-17-58-35-1 b/log/log-train-2023-02-05-17-58-35-1 new file mode 100644 index 0000000000000000000000000000000000000000..4176c6ab5270e4a50291f5c1a90f42f081c2f017 --- /dev/null +++ b/log/log-train-2023-02-05-17-58-35-1 @@ -0,0 +1,24984 @@ +2023-02-05 17:58:35,365 INFO [train.py:973] (1/4) Training started +2023-02-05 17:58:35,366 INFO [train.py:983] (1/4) Device: cuda:1 +2023-02-05 17:58:35,412 INFO [train.py:992] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n07', 'IP address': '10.1.7.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-05 17:58:35,412 INFO [train.py:994] (1/4) About to create model +2023-02-05 17:58:36,040 INFO [zipformer.py:402] (1/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-05 17:58:36,052 INFO [train.py:998] (1/4) Number of model parameters: 20697573 +2023-02-05 17:58:51,148 INFO [train.py:1013] (1/4) Using DDP +2023-02-05 17:58:51,429 INFO [asr_datamodule.py:420] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-05 17:58:52,644 INFO [asr_datamodule.py:224] (1/4) Enable MUSAN +2023-02-05 17:58:52,645 INFO [asr_datamodule.py:225] (1/4) About to get Musan cuts +2023-02-05 17:58:54,252 INFO [asr_datamodule.py:249] (1/4) Enable SpecAugment +2023-02-05 17:58:54,252 INFO [asr_datamodule.py:250] (1/4) Time warp factor: 80 +2023-02-05 17:58:54,253 INFO [asr_datamodule.py:260] (1/4) Num frame mask: 10 +2023-02-05 17:58:54,253 INFO [asr_datamodule.py:273] (1/4) About to create train dataset +2023-02-05 17:58:54,253 INFO [asr_datamodule.py:300] (1/4) Using DynamicBucketingSampler. +2023-02-05 17:58:54,274 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:58:56,355 INFO [asr_datamodule.py:316] (1/4) About to create train dataloader +2023-02-05 17:58:56,356 INFO [asr_datamodule.py:430] (1/4) About to get dev-clean cuts +2023-02-05 17:58:56,369 INFO [asr_datamodule.py:437] (1/4) About to get dev-other cuts +2023-02-05 17:58:56,393 INFO [asr_datamodule.py:347] (1/4) About to create dev dataset +2023-02-05 17:58:56,740 INFO [asr_datamodule.py:364] (1/4) About to create dev dataloader +2023-02-05 17:59:06,122 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:59:11,994 INFO [train.py:901] (1/4) Epoch 1, batch 0, loss[loss=7.264, simple_loss=6.579, pruned_loss=6.84, over 8251.00 frames. ], tot_loss[loss=7.264, simple_loss=6.579, pruned_loss=6.84, over 8251.00 frames. ], batch size: 24, lr: 2.50e-02, grad_scale: 2.0 +2023-02-05 17:59:11,994 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 17:59:24,182 INFO [train.py:935] (1/4) Epoch 1, validation: loss=6.888, simple_loss=6.229, pruned_loss=6.575, over 944034.00 frames. +2023-02-05 17:59:24,183 INFO [train.py:936] (1/4) Maximum memory allocated so far is 5591MB +2023-02-05 17:59:31,383 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=3.89 vs. limit=2.0 +2023-02-05 17:59:37,849 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 17:59:48,719 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=6.07 vs. limit=2.0 +2023-02-05 17:59:55,493 INFO [train.py:901] (1/4) Epoch 1, batch 50, loss[loss=1.38, simple_loss=1.223, pruned_loss=1.4, over 8329.00 frames. ], tot_loss[loss=2.187, simple_loss=1.977, pruned_loss=2.012, over 366493.11 frames. ], batch size: 25, lr: 2.75e-02, grad_scale: 0.25 +2023-02-05 17:59:56,152 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:00:12,635 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 18:00:13,733 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:28,695 INFO [train.py:901] (1/4) Epoch 1, batch 100, loss[loss=1.174, simple_loss=1.009, pruned_loss=1.308, over 8587.00 frames. ], tot_loss[loss=1.657, simple_loss=1.477, pruned_loss=1.628, over 642331.42 frames. ], batch size: 49, lr: 3.00e-02, grad_scale: 0.0625 +2023-02-05 18:00:28,822 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:32,052 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 18:00:32,810 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.087e+01 6.689e+01 1.862e+02 6.030e+02 6.185e+04, threshold=3.723e+02, percent-clipped=0.0 +2023-02-05 18:00:40,832 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=5.55 vs. limit=2.0 +2023-02-05 18:01:00,477 INFO [train.py:901] (1/4) Epoch 1, batch 150, loss[loss=1.087, simple_loss=0.9274, pruned_loss=1.156, over 8466.00 frames. ], tot_loss[loss=1.416, simple_loss=1.245, pruned_loss=1.443, over 862597.96 frames. ], batch size: 27, lr: 3.25e-02, grad_scale: 0.0625 +2023-02-05 18:01:20,192 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=2.76 vs. limit=2.0 +2023-02-05 18:01:34,585 INFO [train.py:901] (1/4) Epoch 1, batch 200, loss[loss=1.002, simple_loss=0.8486, pruned_loss=1.031, over 8363.00 frames. ], tot_loss[loss=1.277, simple_loss=1.112, pruned_loss=1.311, over 1032380.55 frames. ], batch size: 24, lr: 3.50e-02, grad_scale: 0.125 +2023-02-05 18:01:37,982 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.848e+01 5.119e+01 6.630e+01 8.708e+01 3.236e+02, threshold=1.326e+02, percent-clipped=1.0 +2023-02-05 18:02:05,442 INFO [train.py:901] (1/4) Epoch 1, batch 250, loss[loss=1.034, simple_loss=0.8716, pruned_loss=1.018, over 8584.00 frames. ], tot_loss[loss=1.188, simple_loss=1.027, pruned_loss=1.213, over 1159568.60 frames. ], batch size: 31, lr: 3.75e-02, grad_scale: 0.125 +2023-02-05 18:02:09,794 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=27.87 vs. limit=5.0 +2023-02-05 18:02:14,814 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 18:02:22,946 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 18:02:33,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=18.32 vs. limit=5.0 +2023-02-05 18:02:37,911 INFO [train.py:901] (1/4) Epoch 1, batch 300, loss[loss=0.8431, simple_loss=0.7, pruned_loss=0.8315, over 8085.00 frames. ], tot_loss[loss=1.129, simple_loss=0.968, pruned_loss=1.141, over 1264912.80 frames. ], batch size: 21, lr: 4.00e-02, grad_scale: 0.25 +2023-02-05 18:02:42,059 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=4.00 vs. limit=2.0 +2023-02-05 18:02:42,325 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=306.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:02:42,690 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.041e+01 5.570e+01 7.201e+01 9.677e+01 1.807e+02, threshold=1.440e+02, percent-clipped=6.0 +2023-02-05 18:02:46,519 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 18:02:47,401 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=314.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:03:00,231 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.35 vs. limit=2.0 +2023-02-05 18:03:10,248 INFO [train.py:901] (1/4) Epoch 1, batch 350, loss[loss=0.9444, simple_loss=0.7793, pruned_loss=0.9053, over 8308.00 frames. ], tot_loss[loss=1.09, simple_loss=0.9272, pruned_loss=1.089, over 1344291.99 frames. ], batch size: 25, lr: 4.25e-02, grad_scale: 0.25 +2023-02-05 18:03:26,549 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.76 vs. limit=5.0 +2023-02-05 18:03:42,311 INFO [train.py:901] (1/4) Epoch 1, batch 400, loss[loss=1.003, simple_loss=0.8237, pruned_loss=0.9328, over 8563.00 frames. ], tot_loss[loss=1.056, simple_loss=0.8917, pruned_loss=1.041, over 1400142.87 frames. ], batch size: 31, lr: 4.50e-02, grad_scale: 0.5 +2023-02-05 18:03:44,608 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=405.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:03:45,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 4.847e+01 5.714e+01 6.661e+01 8.261e+01 1.252e+02, threshold=1.332e+02, percent-clipped=0.0 +2023-02-05 18:03:55,269 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=421.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:04:05,708 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6695, 2.6772, 2.6447, 2.6648, 2.6531, 2.6787, 2.6743, 2.6711], + device='cuda:1'), covar=tensor([0.0066, 0.0065, 0.0061, 0.0071, 0.0066, 0.0063, 0.0065, 0.0070], + device='cuda:1'), in_proj_covar=tensor([0.0014, 0.0015, 0.0014, 0.0015, 0.0014, 0.0014, 0.0015, 0.0014], + device='cuda:1'), out_proj_covar=tensor([9.6141e-06, 9.7577e-06, 9.5819e-06, 9.3404e-06, 9.7828e-06, 9.3457e-06, + 9.8162e-06, 9.7360e-06], device='cuda:1') +2023-02-05 18:04:09,569 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8055, 3.8085, 3.8089, 3.7997, 3.7826, 3.8072, 3.8080, 3.8101], + device='cuda:1'), covar=tensor([0.0067, 0.0058, 0.0059, 0.0060, 0.0087, 0.0071, 0.0065, 0.0055], + device='cuda:1'), in_proj_covar=tensor([0.0014, 0.0014, 0.0014, 0.0015, 0.0014, 0.0015, 0.0014, 0.0014], + device='cuda:1'), out_proj_covar=tensor([9.4092e-06, 9.2868e-06, 9.4074e-06, 9.0735e-06, 9.4270e-06, 9.4842e-06, + 9.4478e-06, 9.1931e-06], device='cuda:1') +2023-02-05 18:04:11,520 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=445.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:04:15,510 INFO [train.py:901] (1/4) Epoch 1, batch 450, loss[loss=0.9341, simple_loss=0.7604, pruned_loss=0.8563, over 7656.00 frames. ], tot_loss[loss=1.036, simple_loss=0.8687, pruned_loss=1.006, over 1450031.13 frames. ], batch size: 19, lr: 4.75e-02, grad_scale: 0.5 +2023-02-05 18:04:45,732 INFO [train.py:901] (1/4) Epoch 1, batch 500, loss[loss=1.02, simple_loss=0.8288, pruned_loss=0.905, over 8243.00 frames. ], tot_loss[loss=1.024, simple_loss=0.8527, pruned_loss=0.9766, over 1488734.62 frames. ], batch size: 24, lr: 4.99e-02, grad_scale: 1.0 +2023-02-05 18:04:49,468 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.283e+01 6.268e+01 7.626e+01 9.977e+01 2.238e+02, threshold=1.525e+02, percent-clipped=10.0 +2023-02-05 18:05:16,921 INFO [train.py:901] (1/4) Epoch 1, batch 550, loss[loss=0.9823, simple_loss=0.8068, pruned_loss=0.8238, over 8681.00 frames. ], tot_loss[loss=1.008, simple_loss=0.8357, pruned_loss=0.9405, over 1521153.63 frames. ], batch size: 34, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:22,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=560.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:33,859 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=580.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:39,262 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=586.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:47,865 INFO [train.py:901] (1/4) Epoch 1, batch 600, loss[loss=0.993, simple_loss=0.8195, pruned_loss=0.8007, over 8658.00 frames. ], tot_loss[loss=0.989, simple_loss=0.819, pruned_loss=0.8986, over 1540438.65 frames. ], batch size: 49, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:51,147 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 5.986e+01 8.101e+01 1.064e+02 1.512e+02 3.340e+02, threshold=2.128e+02, percent-clipped=22.0 +2023-02-05 18:05:51,950 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=608.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:57,484 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 18:06:12,877 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=8.27 vs. limit=5.0 +2023-02-05 18:06:15,537 INFO [train.py:901] (1/4) Epoch 1, batch 650, loss[loss=0.9258, simple_loss=0.7775, pruned_loss=0.6995, over 8492.00 frames. ], tot_loss[loss=0.9652, simple_loss=0.8002, pruned_loss=0.8506, over 1560934.51 frames. ], batch size: 39, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:20,642 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=658.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:31,074 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=677.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:44,418 INFO [train.py:901] (1/4) Epoch 1, batch 700, loss[loss=0.84, simple_loss=0.7033, pruned_loss=0.6257, over 8018.00 frames. ], tot_loss[loss=0.9383, simple_loss=0.7798, pruned_loss=0.8013, over 1573902.94 frames. ], batch size: 22, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:45,059 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=702.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:06:48,207 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 2.109e+02 3.132e+02 4.412e+02 1.990e+03, threshold=6.264e+02, percent-clipped=73.0 +2023-02-05 18:06:54,505 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-05 18:07:08,504 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3827, 0.7722, 1.1431, 1.8246, 0.9044, 1.2140, 1.8388, 1.4516], + device='cuda:1'), covar=tensor([0.7917, 1.8214, 1.3068, 0.4213, 1.1263, 0.7367, 0.5792, 0.7957], + device='cuda:1'), in_proj_covar=tensor([0.0057, 0.0061, 0.0066, 0.0043, 0.0053, 0.0049, 0.0048, 0.0054], + device='cuda:1'), out_proj_covar=tensor([3.2304e-05, 4.5814e-05, 4.4696e-05, 2.2479e-05, 3.7465e-05, 2.7060e-05, + 2.7978e-05, 3.0013e-05], device='cuda:1') +2023-02-05 18:07:14,475 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=749.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:07:15,365 INFO [train.py:901] (1/4) Epoch 1, batch 750, loss[loss=0.8544, simple_loss=0.724, pruned_loss=0.6081, over 8470.00 frames. ], tot_loss[loss=0.9144, simple_loss=0.7622, pruned_loss=0.7568, over 1583359.21 frames. ], batch size: 25, lr: 4.97e-02, grad_scale: 1.0 +2023-02-05 18:07:25,616 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 18:07:26,842 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=773.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:32,308 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 18:07:43,635 INFO [train.py:901] (1/4) Epoch 1, batch 800, loss[loss=0.6687, simple_loss=0.5687, pruned_loss=0.4645, over 7652.00 frames. ], tot_loss[loss=0.8839, simple_loss=0.7399, pruned_loss=0.7089, over 1589716.37 frames. ], batch size: 19, lr: 4.97e-02, grad_scale: 2.0 +2023-02-05 18:07:46,599 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.528e+02 3.354e+02 4.455e+02 1.086e+03, threshold=6.708e+02, percent-clipped=4.0 +2023-02-05 18:07:51,311 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=816.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:07:59,138 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 18:08:05,166 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=841.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:11,169 INFO [train.py:901] (1/4) Epoch 1, batch 850, loss[loss=0.8005, simple_loss=0.6832, pruned_loss=0.5439, over 8571.00 frames. ], tot_loss[loss=0.8585, simple_loss=0.7215, pruned_loss=0.6679, over 1597531.02 frames. ], batch size: 31, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:22,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=864.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:22,895 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=865.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:42,864 INFO [train.py:901] (1/4) Epoch 1, batch 900, loss[loss=0.6933, simple_loss=0.5956, pruned_loss=0.4587, over 7417.00 frames. ], tot_loss[loss=0.8313, simple_loss=0.7017, pruned_loss=0.6286, over 1597829.58 frames. ], batch size: 17, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:43,649 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.10 vs. limit=5.0 +2023-02-05 18:08:46,409 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 3.070e+02 3.818e+02 4.702e+02 7.623e+02, threshold=7.636e+02, percent-clipped=5.0 +2023-02-05 18:08:55,929 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=924.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:58,990 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=930.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:09:05,469 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.29 vs. limit=5.0 +2023-02-05 18:09:10,089 INFO [train.py:901] (1/4) Epoch 1, batch 950, loss[loss=0.7313, simple_loss=0.623, pruned_loss=0.4856, over 7800.00 frames. ], tot_loss[loss=0.8065, simple_loss=0.6835, pruned_loss=0.5934, over 1597289.85 frames. ], batch size: 20, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:09:10,748 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=952.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:26,428 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 18:09:37,674 INFO [train.py:901] (1/4) Epoch 1, batch 1000, loss[loss=0.7123, simple_loss=0.6225, pruned_loss=0.4461, over 8029.00 frames. ], tot_loss[loss=0.7851, simple_loss=0.6685, pruned_loss=0.5623, over 1600873.98 frames. ], batch size: 22, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:09:40,943 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 3.215e+02 4.159e+02 4.799e+02 1.770e+03, threshold=8.319e+02, percent-clipped=6.0 +2023-02-05 18:09:52,916 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1029.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:53,907 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 18:09:59,182 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1039.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:02,590 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1045.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:05,090 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 18:10:05,577 INFO [train.py:901] (1/4) Epoch 1, batch 1050, loss[loss=0.7019, simple_loss=0.6116, pruned_loss=0.4381, over 8431.00 frames. ], tot_loss[loss=0.7677, simple_loss=0.657, pruned_loss=0.5355, over 1608888.76 frames. ], batch size: 49, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:10:07,174 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1054.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:14,045 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1067.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:33,038 INFO [train.py:901] (1/4) Epoch 1, batch 1100, loss[loss=0.737, simple_loss=0.6478, pruned_loss=0.4493, over 8361.00 frames. ], tot_loss[loss=0.7495, simple_loss=0.644, pruned_loss=0.5107, over 1612159.48 frames. ], batch size: 24, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:10:36,085 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.463e+02 4.480e+02 5.452e+02 1.232e+03, threshold=8.959e+02, percent-clipped=3.0 +2023-02-05 18:10:43,740 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1120.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:56,865 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1145.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:59,908 INFO [train.py:901] (1/4) Epoch 1, batch 1150, loss[loss=0.7116, simple_loss=0.6134, pruned_loss=0.4447, over 8511.00 frames. ], tot_loss[loss=0.7307, simple_loss=0.6305, pruned_loss=0.4875, over 1610057.59 frames. ], batch size: 28, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:11:01,587 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 18:11:11,750 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1171.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:11:27,719 INFO [train.py:901] (1/4) Epoch 1, batch 1200, loss[loss=0.673, simple_loss=0.5946, pruned_loss=0.4011, over 8499.00 frames. ], tot_loss[loss=0.7172, simple_loss=0.6213, pruned_loss=0.4687, over 1612006.46 frames. ], batch size: 28, lr: 4.93e-02, grad_scale: 4.0 +2023-02-05 18:11:30,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.424e+02 4.173e+02 5.178e+02 8.029e+02, threshold=8.346e+02, percent-clipped=0.0 +2023-02-05 18:11:32,137 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1209.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:11:56,770 INFO [train.py:901] (1/4) Epoch 1, batch 1250, loss[loss=0.6584, simple_loss=0.5861, pruned_loss=0.3855, over 8495.00 frames. ], tot_loss[loss=0.703, simple_loss=0.6113, pruned_loss=0.451, over 1614366.47 frames. ], batch size: 26, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:21,158 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1295.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:24,253 INFO [train.py:901] (1/4) Epoch 1, batch 1300, loss[loss=0.7442, simple_loss=0.6555, pruned_loss=0.4404, over 8449.00 frames. ], tot_loss[loss=0.6911, simple_loss=0.6036, pruned_loss=0.4354, over 1619071.27 frames. ], batch size: 27, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:24,440 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1301.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:27,418 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 3.917e+02 4.747e+02 6.152e+02 9.080e+02, threshold=9.493e+02, percent-clipped=1.0 +2023-02-05 18:12:34,721 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1320.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:36,262 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1323.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:36,736 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1324.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:37,916 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1326.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:51,937 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1348.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:53,373 INFO [train.py:901] (1/4) Epoch 1, batch 1350, loss[loss=0.6178, simple_loss=0.5415, pruned_loss=0.3661, over 7822.00 frames. ], tot_loss[loss=0.6803, simple_loss=0.596, pruned_loss=0.4221, over 1618793.74 frames. ], batch size: 20, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:11,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-05 18:13:22,437 INFO [train.py:901] (1/4) Epoch 1, batch 1400, loss[loss=0.6174, simple_loss=0.5355, pruned_loss=0.3689, over 7789.00 frames. ], tot_loss[loss=0.6697, simple_loss=0.5888, pruned_loss=0.4095, over 1620891.95 frames. ], batch size: 19, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:25,821 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.466e+02 4.520e+02 5.912e+02 1.396e+03, threshold=9.040e+02, percent-clipped=6.0 +2023-02-05 18:13:50,929 INFO [train.py:901] (1/4) Epoch 1, batch 1450, loss[loss=0.6444, simple_loss=0.581, pruned_loss=0.3641, over 8496.00 frames. ], tot_loss[loss=0.6609, simple_loss=0.5829, pruned_loss=0.3989, over 1621344.63 frames. ], batch size: 28, lr: 4.90e-02, grad_scale: 4.0 +2023-02-05 18:13:51,608 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1452.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:13:54,962 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 18:14:21,306 INFO [train.py:901] (1/4) Epoch 1, batch 1500, loss[loss=0.6714, simple_loss=0.5894, pruned_loss=0.391, over 7142.00 frames. ], tot_loss[loss=0.6543, simple_loss=0.5787, pruned_loss=0.3903, over 1622200.61 frames. ], batch size: 72, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:14:24,736 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.084e+02 4.059e+02 4.884e+02 5.820e+02 1.191e+03, threshold=9.769e+02, percent-clipped=4.0 +2023-02-05 18:14:29,246 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1515.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:14:50,473 INFO [train.py:901] (1/4) Epoch 1, batch 1550, loss[loss=0.5674, simple_loss=0.5155, pruned_loss=0.3156, over 8086.00 frames. ], tot_loss[loss=0.6428, simple_loss=0.5709, pruned_loss=0.3788, over 1621015.10 frames. ], batch size: 21, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:15:08,627 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1580.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:10,873 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1584.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:18,490 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5878, 2.1465, 3.1350, 2.5066, 2.1341, 3.5923, 4.0077, 3.5393], + device='cuda:1'), covar=tensor([0.2579, 0.3282, 0.0491, 0.1811, 0.1837, 0.0334, 0.0178, 0.0334], + device='cuda:1'), in_proj_covar=tensor([0.0128, 0.0140, 0.0077, 0.0117, 0.0104, 0.0069, 0.0062, 0.0081], + device='cuda:1'), out_proj_covar=tensor([9.1397e-05, 1.0646e-04, 4.3327e-05, 7.5971e-05, 7.0460e-05, 4.0048e-05, + 3.4240e-05, 4.6320e-05], device='cuda:1') +2023-02-05 18:15:20,755 INFO [train.py:901] (1/4) Epoch 1, batch 1600, loss[loss=0.6381, simple_loss=0.5732, pruned_loss=0.3587, over 8317.00 frames. ], tot_loss[loss=0.6364, simple_loss=0.5667, pruned_loss=0.3714, over 1623258.38 frames. ], batch size: 25, lr: 4.88e-02, grad_scale: 8.0 +2023-02-05 18:15:24,002 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1605.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:24,967 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.844e+02 4.893e+02 6.465e+02 8.597e+02 2.177e+03, threshold=1.293e+03, percent-clipped=12.0 +2023-02-05 18:15:31,426 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.6621, 1.3647, 3.4978, 2.4597, 3.5174, 3.1411, 3.0759, 3.3898], + device='cuda:1'), covar=tensor([0.0396, 0.3661, 0.0494, 0.1172, 0.0492, 0.0492, 0.0622, 0.0623], + device='cuda:1'), in_proj_covar=tensor([0.0042, 0.0112, 0.0057, 0.0070, 0.0059, 0.0057, 0.0064, 0.0071], + device='cuda:1'), out_proj_covar=tensor([2.5403e-05, 7.3418e-05, 3.2264e-05, 4.6114e-05, 3.2357e-05, 3.3137e-05, + 3.7992e-05, 4.1670e-05], device='cuda:1') +2023-02-05 18:15:37,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1629.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:38,278 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1630.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:50,677 INFO [train.py:901] (1/4) Epoch 1, batch 1650, loss[loss=0.6742, simple_loss=0.6085, pruned_loss=0.3759, over 8602.00 frames. ], tot_loss[loss=0.6301, simple_loss=0.5627, pruned_loss=0.3643, over 1624583.62 frames. ], batch size: 39, lr: 4.87e-02, grad_scale: 8.0 +2023-02-05 18:16:21,963 INFO [train.py:901] (1/4) Epoch 1, batch 1700, loss[loss=0.5992, simple_loss=0.5303, pruned_loss=0.3402, over 8327.00 frames. ], tot_loss[loss=0.6191, simple_loss=0.5557, pruned_loss=0.3541, over 1620804.06 frames. ], batch size: 25, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:16:25,350 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.633e+02 4.287e+02 5.230e+02 6.455e+02 2.107e+03, threshold=1.046e+03, percent-clipped=2.0 +2023-02-05 18:16:51,248 INFO [train.py:901] (1/4) Epoch 1, batch 1750, loss[loss=0.6179, simple_loss=0.5631, pruned_loss=0.3393, over 8504.00 frames. ], tot_loss[loss=0.6135, simple_loss=0.5525, pruned_loss=0.348, over 1620375.83 frames. ], batch size: 28, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:17:06,014 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.35 vs. limit=2.0 +2023-02-05 18:17:18,058 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1796.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:17:21,123 INFO [train.py:901] (1/4) Epoch 1, batch 1800, loss[loss=0.5968, simple_loss=0.5495, pruned_loss=0.3237, over 8178.00 frames. ], tot_loss[loss=0.6056, simple_loss=0.5475, pruned_loss=0.3407, over 1615431.61 frames. ], batch size: 23, lr: 4.85e-02, grad_scale: 8.0 +2023-02-05 18:17:24,721 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.688e+02 4.554e+02 5.596e+02 6.733e+02 1.418e+03, threshold=1.119e+03, percent-clipped=4.0 +2023-02-05 18:17:52,108 INFO [train.py:901] (1/4) Epoch 1, batch 1850, loss[loss=0.4515, simple_loss=0.4298, pruned_loss=0.2364, over 7698.00 frames. ], tot_loss[loss=0.6001, simple_loss=0.5445, pruned_loss=0.335, over 1619605.13 frames. ], batch size: 18, lr: 4.84e-02, grad_scale: 8.0 +2023-02-05 18:17:55,072 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1856.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:06,748 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1875.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:13,282 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1886.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:18:14,316 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:21,902 INFO [train.py:901] (1/4) Epoch 1, batch 1900, loss[loss=0.5577, simple_loss=0.5209, pruned_loss=0.2976, over 8347.00 frames. ], tot_loss[loss=0.5974, simple_loss=0.5436, pruned_loss=0.3314, over 1619777.54 frames. ], batch size: 26, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:25,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.326e+02 4.483e+02 5.242e+02 7.443e+02 2.270e+03, threshold=1.048e+03, percent-clipped=7.0 +2023-02-05 18:18:27,946 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1911.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:27,957 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1911.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:18:37,733 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:45,003 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 18:18:52,608 INFO [train.py:901] (1/4) Epoch 1, batch 1950, loss[loss=0.5642, simple_loss=0.519, pruned_loss=0.305, over 8751.00 frames. ], tot_loss[loss=0.5912, simple_loss=0.5394, pruned_loss=0.3261, over 1619109.61 frames. ], batch size: 30, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:55,557 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 18:19:05,704 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:19:11,329 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 18:19:23,720 INFO [train.py:901] (1/4) Epoch 1, batch 2000, loss[loss=0.6076, simple_loss=0.5438, pruned_loss=0.3357, over 8487.00 frames. ], tot_loss[loss=0.5872, simple_loss=0.537, pruned_loss=0.3223, over 1620923.71 frames. ], batch size: 29, lr: 4.82e-02, grad_scale: 8.0 +2023-02-05 18:19:27,548 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.734e+02 4.600e+02 5.655e+02 7.771e+02 1.691e+03, threshold=1.131e+03, percent-clipped=5.0 +2023-02-05 18:19:50,354 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2043.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:19:56,661 INFO [train.py:901] (1/4) Epoch 1, batch 2050, loss[loss=0.5707, simple_loss=0.5353, pruned_loss=0.3031, over 8599.00 frames. ], tot_loss[loss=0.5766, simple_loss=0.5308, pruned_loss=0.3141, over 1620880.58 frames. ], batch size: 49, lr: 4.81e-02, grad_scale: 8.0 +2023-02-05 18:19:58,197 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6584, 1.8697, 3.4366, 2.2849, 2.3852, 4.6455, 4.4095, 3.9496], + device='cuda:1'), covar=tensor([0.3162, 0.4081, 0.0445, 0.2981, 0.1902, 0.0262, 0.0212, 0.0365], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0183, 0.0095, 0.0170, 0.0143, 0.0085, 0.0081, 0.0100], + device='cuda:1'), out_proj_covar=tensor([1.1921e-04, 1.3042e-04, 5.8222e-05, 1.1288e-04, 1.0164e-04, 5.3775e-05, + 4.7913e-05, 6.3234e-05], device='cuda:1') +2023-02-05 18:20:21,044 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2088.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:20:29,063 INFO [train.py:901] (1/4) Epoch 1, batch 2100, loss[loss=0.5327, simple_loss=0.5033, pruned_loss=0.2811, over 8105.00 frames. ], tot_loss[loss=0.5701, simple_loss=0.5269, pruned_loss=0.3088, over 1620693.73 frames. ], batch size: 21, lr: 4.80e-02, grad_scale: 16.0 +2023-02-05 18:20:32,707 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.532e+02 4.654e+02 5.875e+02 8.240e+02 2.515e+03, threshold=1.175e+03, percent-clipped=11.0 +2023-02-05 18:21:01,639 INFO [train.py:901] (1/4) Epoch 1, batch 2150, loss[loss=0.4822, simple_loss=0.4538, pruned_loss=0.2553, over 7788.00 frames. ], tot_loss[loss=0.5592, simple_loss=0.5207, pruned_loss=0.3005, over 1615957.76 frames. ], batch size: 19, lr: 4.79e-02, grad_scale: 16.0 +2023-02-05 18:21:09,732 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=2.00 vs. limit=2.0 +2023-02-05 18:21:11,748 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2167.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:21:29,899 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2192.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:21:35,015 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:35,570 INFO [train.py:901] (1/4) Epoch 1, batch 2200, loss[loss=0.6093, simple_loss=0.5555, pruned_loss=0.3316, over 8635.00 frames. ], tot_loss[loss=0.5532, simple_loss=0.518, pruned_loss=0.2955, over 1616363.20 frames. ], batch size: 39, lr: 4.78e-02, grad_scale: 16.0 +2023-02-05 18:21:39,329 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.979e+02 3.885e+02 5.100e+02 6.280e+02 1.293e+03, threshold=1.020e+03, percent-clipped=3.0 +2023-02-05 18:21:46,988 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:55,779 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:22:07,881 INFO [train.py:901] (1/4) Epoch 1, batch 2250, loss[loss=0.5384, simple_loss=0.5328, pruned_loss=0.272, over 8255.00 frames. ], tot_loss[loss=0.5483, simple_loss=0.5156, pruned_loss=0.2915, over 1615790.84 frames. ], batch size: 24, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:30,951 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6680, 1.5983, 2.6430, 1.7471, 2.0147, 2.8844, 3.2924, 2.7620], + device='cuda:1'), covar=tensor([0.2397, 0.2919, 0.0431, 0.2504, 0.1359, 0.0375, 0.0254, 0.0407], + device='cuda:1'), in_proj_covar=tensor([0.0171, 0.0183, 0.0098, 0.0169, 0.0146, 0.0084, 0.0081, 0.0099], + device='cuda:1'), out_proj_covar=tensor([1.2249e-04, 1.2969e-04, 6.5995e-05, 1.1360e-04, 1.0826e-04, 5.5481e-05, + 5.2542e-05, 6.3173e-05], device='cuda:1') +2023-02-05 18:22:41,043 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2299.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:42,094 INFO [train.py:901] (1/4) Epoch 1, batch 2300, loss[loss=0.5281, simple_loss=0.5111, pruned_loss=0.2726, over 8351.00 frames. ], tot_loss[loss=0.5453, simple_loss=0.5143, pruned_loss=0.2889, over 1620057.41 frames. ], batch size: 25, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:45,951 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.442e+02 5.272e+02 6.513e+02 7.975e+02 1.884e+03, threshold=1.303e+03, percent-clipped=9.0 +2023-02-05 18:22:51,189 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2315.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:56,956 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2324.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:03,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2334.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:08,004 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.41 vs. limit=5.0 +2023-02-05 18:23:09,679 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2344.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:12,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2347.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:14,702 INFO [train.py:901] (1/4) Epoch 1, batch 2350, loss[loss=0.5124, simple_loss=0.4917, pruned_loss=0.2666, over 8347.00 frames. ], tot_loss[loss=0.5412, simple_loss=0.5129, pruned_loss=0.2854, over 1623372.32 frames. ], batch size: 26, lr: 4.76e-02, grad_scale: 16.0 +2023-02-05 18:23:19,250 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2358.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:21,696 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7697, 1.5662, 1.6737, 2.1281, 1.3080, 1.3974, 1.1556, 2.0080], + device='cuda:1'), covar=tensor([0.2300, 0.2321, 0.1810, 0.0658, 0.2737, 0.2738, 0.3200, 0.2004], + device='cuda:1'), in_proj_covar=tensor([0.0183, 0.0178, 0.0158, 0.0128, 0.0223, 0.0199, 0.0220, 0.0184], + device='cuda:1'), out_proj_covar=tensor([1.3616e-04, 1.3265e-04, 1.2580e-04, 8.9310e-05, 1.6231e-04, 1.4676e-04, + 1.6242e-04, 1.4031e-04], device='cuda:1') +2023-02-05 18:23:26,037 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2369.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:46,432 INFO [train.py:901] (1/4) Epoch 1, batch 2400, loss[loss=0.5055, simple_loss=0.5091, pruned_loss=0.2509, over 8495.00 frames. ], tot_loss[loss=0.5361, simple_loss=0.5097, pruned_loss=0.2817, over 1620268.61 frames. ], batch size: 29, lr: 4.75e-02, grad_scale: 16.0 +2023-02-05 18:23:50,350 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.591e+02 4.467e+02 5.905e+02 7.151e+02 1.301e+03, threshold=1.181e+03, percent-clipped=0.0 +2023-02-05 18:24:20,800 INFO [train.py:901] (1/4) Epoch 1, batch 2450, loss[loss=0.4867, simple_loss=0.4837, pruned_loss=0.2449, over 8027.00 frames. ], tot_loss[loss=0.5329, simple_loss=0.5077, pruned_loss=0.2794, over 1624811.77 frames. ], batch size: 22, lr: 4.74e-02, grad_scale: 16.0 +2023-02-05 18:24:52,776 INFO [train.py:901] (1/4) Epoch 1, batch 2500, loss[loss=0.4717, simple_loss=0.4476, pruned_loss=0.2478, over 7799.00 frames. ], tot_loss[loss=0.5288, simple_loss=0.5059, pruned_loss=0.2761, over 1623691.42 frames. ], batch size: 19, lr: 4.73e-02, grad_scale: 16.0 +2023-02-05 18:24:56,552 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.099e+02 5.238e+02 6.448e+02 8.237e+02 1.660e+03, threshold=1.290e+03, percent-clipped=6.0 +2023-02-05 18:24:57,984 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9917, 1.0863, 3.9154, 1.8599, 3.5499, 3.2211, 3.3456, 3.2322], + device='cuda:1'), covar=tensor([0.0191, 0.3603, 0.0179, 0.1146, 0.0291, 0.0300, 0.0369, 0.0431], + device='cuda:1'), in_proj_covar=tensor([0.0070, 0.0200, 0.0087, 0.0113, 0.0097, 0.0093, 0.0101, 0.0112], + device='cuda:1'), out_proj_covar=tensor([4.3750e-05, 1.2370e-04, 5.5997e-05, 7.6126e-05, 5.5256e-05, 5.2312e-05, + 6.1834e-05, 6.6717e-05], device='cuda:1') +2023-02-05 18:25:25,593 INFO [train.py:901] (1/4) Epoch 1, batch 2550, loss[loss=0.5002, simple_loss=0.4822, pruned_loss=0.2591, over 8205.00 frames. ], tot_loss[loss=0.526, simple_loss=0.5036, pruned_loss=0.2745, over 1619270.22 frames. ], batch size: 23, lr: 4.72e-02, grad_scale: 16.0 +2023-02-05 18:25:38,472 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2571.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:51,088 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2590.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:25:54,860 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2596.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:25:57,893 INFO [train.py:901] (1/4) Epoch 1, batch 2600, loss[loss=0.4493, simple_loss=0.469, pruned_loss=0.2148, over 8037.00 frames. ], tot_loss[loss=0.5219, simple_loss=0.5018, pruned_loss=0.2711, over 1618763.15 frames. ], batch size: 22, lr: 4.71e-02, grad_scale: 16.0 +2023-02-05 18:25:59,373 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2603.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:01,611 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.388e+02 4.352e+02 5.534e+02 7.344e+02 1.370e+03, threshold=1.107e+03, percent-clipped=3.0 +2023-02-05 18:26:06,880 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2615.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:15,230 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2628.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:25,262 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 18:26:31,162 INFO [train.py:901] (1/4) Epoch 1, batch 2650, loss[loss=0.4848, simple_loss=0.4899, pruned_loss=0.2398, over 8759.00 frames. ], tot_loss[loss=0.517, simple_loss=0.4992, pruned_loss=0.2676, over 1620654.11 frames. ], batch size: 30, lr: 4.70e-02, grad_scale: 16.0 +2023-02-05 18:26:38,477 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=2.09 vs. limit=2.0 +2023-02-05 18:27:03,808 INFO [train.py:901] (1/4) Epoch 1, batch 2700, loss[loss=0.4704, simple_loss=0.473, pruned_loss=0.2339, over 7979.00 frames. ], tot_loss[loss=0.512, simple_loss=0.4954, pruned_loss=0.2644, over 1617028.78 frames. ], batch size: 21, lr: 4.69e-02, grad_scale: 16.0 +2023-02-05 18:27:04,569 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2702.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:27:05,219 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:27:08,304 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.214e+02 4.351e+02 5.311e+02 6.408e+02 1.471e+03, threshold=1.062e+03, percent-clipped=4.0 +2023-02-05 18:27:28,815 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.5506, 3.7211, 3.2614, 1.3954, 3.1274, 3.4605, 3.2981, 3.0215], + device='cuda:1'), covar=tensor([0.0565, 0.0331, 0.0471, 0.2831, 0.0404, 0.0325, 0.0751, 0.0546], + device='cuda:1'), in_proj_covar=tensor([0.0132, 0.0104, 0.0120, 0.0181, 0.0102, 0.0084, 0.0136, 0.0096], + device='cuda:1'), out_proj_covar=tensor([9.5898e-05, 8.7337e-05, 8.2219e-05, 1.2508e-04, 6.8701e-05, 6.0768e-05, + 1.0412e-04, 6.6109e-05], device='cuda:1') +2023-02-05 18:27:37,287 INFO [train.py:901] (1/4) Epoch 1, batch 2750, loss[loss=0.4997, simple_loss=0.4919, pruned_loss=0.2537, over 8292.00 frames. ], tot_loss[loss=0.5056, simple_loss=0.491, pruned_loss=0.2602, over 1609480.04 frames. ], batch size: 23, lr: 4.68e-02, grad_scale: 16.0 +2023-02-05 18:28:11,566 INFO [train.py:901] (1/4) Epoch 1, batch 2800, loss[loss=0.4342, simple_loss=0.4199, pruned_loss=0.2242, over 7685.00 frames. ], tot_loss[loss=0.5052, simple_loss=0.4917, pruned_loss=0.2595, over 1614745.18 frames. ], batch size: 18, lr: 4.67e-02, grad_scale: 16.0 +2023-02-05 18:28:15,258 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.916e+02 4.898e+02 6.530e+02 2.276e+03, threshold=9.797e+02, percent-clipped=2.0 +2023-02-05 18:28:17,286 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4772, 1.8391, 2.7143, 1.6681, 2.1618, 3.1048, 3.3007, 2.6595], + device='cuda:1'), covar=tensor([0.2390, 0.1882, 0.0410, 0.2117, 0.1113, 0.0251, 0.0184, 0.0437], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0218, 0.0119, 0.0195, 0.0172, 0.0094, 0.0092, 0.0124], + device='cuda:1'), out_proj_covar=tensor([1.4755e-04, 1.5660e-04, 9.1568e-05, 1.3459e-04, 1.3457e-04, 6.8317e-05, + 6.8260e-05, 8.6717e-05], device='cuda:1') +2023-02-05 18:28:21,895 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2817.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:28:38,530 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2842.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:28:44,179 INFO [train.py:901] (1/4) Epoch 1, batch 2850, loss[loss=0.4473, simple_loss=0.4492, pruned_loss=0.2226, over 7654.00 frames. ], tot_loss[loss=0.5035, simple_loss=0.4905, pruned_loss=0.2583, over 1612584.52 frames. ], batch size: 19, lr: 4.66e-02, grad_scale: 16.0 +2023-02-05 18:29:18,794 INFO [train.py:901] (1/4) Epoch 1, batch 2900, loss[loss=0.6148, simple_loss=0.565, pruned_loss=0.3323, over 8606.00 frames. ], tot_loss[loss=0.4994, simple_loss=0.4878, pruned_loss=0.2556, over 1613116.18 frames. ], batch size: 49, lr: 4.65e-02, grad_scale: 16.0 +2023-02-05 18:29:22,681 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.417e+02 4.413e+02 5.664e+02 7.338e+02 1.737e+03, threshold=1.133e+03, percent-clipped=8.0 +2023-02-05 18:29:48,920 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 18:29:52,146 INFO [train.py:901] (1/4) Epoch 1, batch 2950, loss[loss=0.4434, simple_loss=0.4587, pruned_loss=0.2141, over 8333.00 frames. ], tot_loss[loss=0.4953, simple_loss=0.4856, pruned_loss=0.2526, over 1614109.44 frames. ], batch size: 25, lr: 4.64e-02, grad_scale: 16.0 +2023-02-05 18:29:54,921 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2955.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:30:25,914 INFO [train.py:901] (1/4) Epoch 1, batch 3000, loss[loss=0.4687, simple_loss=0.4641, pruned_loss=0.2367, over 7933.00 frames. ], tot_loss[loss=0.4924, simple_loss=0.4838, pruned_loss=0.2505, over 1612402.48 frames. ], batch size: 20, lr: 4.63e-02, grad_scale: 16.0 +2023-02-05 18:30:25,914 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 18:30:40,795 INFO [train.py:935] (1/4) Epoch 1, validation: loss=0.4518, simple_loss=0.5106, pruned_loss=0.1966, over 944034.00 frames. +2023-02-05 18:30:40,797 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6237MB +2023-02-05 18:30:43,954 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=2.02 vs. limit=2.0 +2023-02-05 18:30:44,896 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.692e+02 4.264e+02 5.642e+02 7.781e+02 1.743e+03, threshold=1.128e+03, percent-clipped=6.0 +2023-02-05 18:31:07,309 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3037.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:08,851 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 18:31:13,909 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3047.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:14,751 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8149, 1.7292, 3.2529, 3.6106, 2.4020, 1.0424, 1.3845, 2.3946], + device='cuda:1'), covar=tensor([0.3407, 0.2444, 0.0284, 0.0232, 0.1132, 0.2511, 0.2182, 0.1349], + device='cuda:1'), in_proj_covar=tensor([0.0204, 0.0148, 0.0076, 0.0094, 0.0158, 0.0162, 0.0155, 0.0170], + device='cuda:1'), out_proj_covar=tensor([1.2995e-04, 9.3687e-05, 4.7938e-05, 5.6883e-05, 9.7502e-05, 9.8520e-05, + 9.7091e-05, 1.0133e-04], device='cuda:1') +2023-02-05 18:31:16,515 INFO [train.py:901] (1/4) Epoch 1, batch 3050, loss[loss=0.4956, simple_loss=0.4842, pruned_loss=0.2535, over 7651.00 frames. ], tot_loss[loss=0.4896, simple_loss=0.482, pruned_loss=0.2486, over 1603401.73 frames. ], batch size: 19, lr: 4.62e-02, grad_scale: 16.0 +2023-02-05 18:31:20,856 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 18:31:30,842 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3073.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:47,475 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3098.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:49,293 INFO [train.py:901] (1/4) Epoch 1, batch 3100, loss[loss=0.4697, simple_loss=0.463, pruned_loss=0.2382, over 8146.00 frames. ], tot_loss[loss=0.4918, simple_loss=0.4836, pruned_loss=0.25, over 1606945.37 frames. ], batch size: 22, lr: 4.61e-02, grad_scale: 16.0 +2023-02-05 18:31:53,107 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.570e+02 4.257e+02 6.045e+02 8.311e+02 2.838e+03, threshold=1.209e+03, percent-clipped=13.0 +2023-02-05 18:32:24,770 INFO [train.py:901] (1/4) Epoch 1, batch 3150, loss[loss=0.4562, simple_loss=0.4657, pruned_loss=0.2234, over 8476.00 frames. ], tot_loss[loss=0.4913, simple_loss=0.4836, pruned_loss=0.2495, over 1606515.37 frames. ], batch size: 25, lr: 4.60e-02, grad_scale: 16.0 +2023-02-05 18:32:32,222 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3162.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:47,645 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:50,018 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 18:32:55,518 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.36 vs. limit=2.0 +2023-02-05 18:32:57,082 INFO [train.py:901] (1/4) Epoch 1, batch 3200, loss[loss=0.5802, simple_loss=0.5298, pruned_loss=0.3153, over 6696.00 frames. ], tot_loss[loss=0.491, simple_loss=0.4836, pruned_loss=0.2492, over 1605274.94 frames. ], batch size: 71, lr: 4.59e-02, grad_scale: 16.0 +2023-02-05 18:33:00,921 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 4.232e+02 5.266e+02 6.948e+02 2.778e+03, threshold=1.053e+03, percent-clipped=2.0 +2023-02-05 18:33:07,793 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2011, 2.1950, 2.7909, 2.1002, 2.4877, 3.3838, 3.1102, 3.1037], + device='cuda:1'), covar=tensor([0.1702, 0.1992, 0.0432, 0.2018, 0.0942, 0.0200, 0.0238, 0.0275], + device='cuda:1'), in_proj_covar=tensor([0.0210, 0.0234, 0.0126, 0.0215, 0.0171, 0.0097, 0.0096, 0.0124], + device='cuda:1'), out_proj_covar=tensor([1.5607e-04, 1.7023e-04, 9.9238e-05, 1.4916e-04, 1.3629e-04, 7.4761e-05, + 7.5616e-05, 9.1560e-05], device='cuda:1') +2023-02-05 18:33:32,110 INFO [train.py:901] (1/4) Epoch 1, batch 3250, loss[loss=0.4896, simple_loss=0.4933, pruned_loss=0.2429, over 8464.00 frames. ], tot_loss[loss=0.4925, simple_loss=0.4846, pruned_loss=0.2502, over 1606825.62 frames. ], batch size: 25, lr: 4.58e-02, grad_scale: 16.0 +2023-02-05 18:34:04,437 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3299.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:05,637 INFO [train.py:901] (1/4) Epoch 1, batch 3300, loss[loss=0.5469, simple_loss=0.5333, pruned_loss=0.2802, over 8703.00 frames. ], tot_loss[loss=0.49, simple_loss=0.4842, pruned_loss=0.2479, over 1616697.44 frames. ], batch size: 34, lr: 4.57e-02, grad_scale: 16.0 +2023-02-05 18:34:05,853 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3301.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:08,951 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3306.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:34:09,418 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 4.334e+02 5.638e+02 7.160e+02 2.697e+03, threshold=1.128e+03, percent-clipped=10.0 +2023-02-05 18:34:39,426 INFO [train.py:901] (1/4) Epoch 1, batch 3350, loss[loss=0.4214, simple_loss=0.4371, pruned_loss=0.2029, over 7251.00 frames. ], tot_loss[loss=0.4867, simple_loss=0.4824, pruned_loss=0.2456, over 1618118.10 frames. ], batch size: 16, lr: 4.56e-02, grad_scale: 16.0 +2023-02-05 18:35:01,937 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3381.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:14,988 INFO [train.py:901] (1/4) Epoch 1, batch 3400, loss[loss=0.4848, simple_loss=0.4841, pruned_loss=0.2427, over 8515.00 frames. ], tot_loss[loss=0.4845, simple_loss=0.4807, pruned_loss=0.2442, over 1617416.87 frames. ], batch size: 28, lr: 4.55e-02, grad_scale: 16.0 +2023-02-05 18:35:19,030 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.486e+02 3.960e+02 5.068e+02 6.311e+02 1.481e+03, threshold=1.014e+03, percent-clipped=3.0 +2023-02-05 18:35:23,813 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:26,543 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3418.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:35:43,715 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3443.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:48,748 INFO [train.py:901] (1/4) Epoch 1, batch 3450, loss[loss=0.4457, simple_loss=0.4406, pruned_loss=0.2254, over 7927.00 frames. ], tot_loss[loss=0.4845, simple_loss=0.48, pruned_loss=0.2445, over 1615127.64 frames. ], batch size: 20, lr: 4.54e-02, grad_scale: 16.0 +2023-02-05 18:36:12,075 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.41 vs. limit=5.0 +2023-02-05 18:36:21,044 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:36:24,211 INFO [train.py:901] (1/4) Epoch 1, batch 3500, loss[loss=0.4987, simple_loss=0.5081, pruned_loss=0.2447, over 8567.00 frames. ], tot_loss[loss=0.4835, simple_loss=0.4795, pruned_loss=0.2437, over 1611035.15 frames. ], batch size: 39, lr: 4.53e-02, grad_scale: 16.0 +2023-02-05 18:36:28,192 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.405e+02 5.773e+02 7.537e+02 2.537e+03, threshold=1.155e+03, percent-clipped=7.0 +2023-02-05 18:36:36,262 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 18:36:48,246 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-05 18:36:57,801 INFO [train.py:901] (1/4) Epoch 1, batch 3550, loss[loss=0.5235, simple_loss=0.5051, pruned_loss=0.271, over 8470.00 frames. ], tot_loss[loss=0.4804, simple_loss=0.4782, pruned_loss=0.2413, over 1614206.40 frames. ], batch size: 25, lr: 4.51e-02, grad_scale: 16.0 +2023-02-05 18:37:02,065 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3557.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:37:07,160 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3564.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:37:19,199 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3582.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:37:33,290 INFO [train.py:901] (1/4) Epoch 1, batch 3600, loss[loss=0.5266, simple_loss=0.5114, pruned_loss=0.2709, over 8327.00 frames. ], tot_loss[loss=0.4849, simple_loss=0.481, pruned_loss=0.2444, over 1617559.46 frames. ], batch size: 25, lr: 4.50e-02, grad_scale: 16.0 +2023-02-05 18:37:37,965 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.853e+02 4.660e+02 6.337e+02 8.772e+02 4.832e+03, threshold=1.267e+03, percent-clipped=11.0 +2023-02-05 18:38:06,589 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3650.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:38:07,048 INFO [train.py:901] (1/4) Epoch 1, batch 3650, loss[loss=0.4941, simple_loss=0.4898, pruned_loss=0.2491, over 7810.00 frames. ], tot_loss[loss=0.4825, simple_loss=0.4795, pruned_loss=0.2428, over 1617290.08 frames. ], batch size: 20, lr: 4.49e-02, grad_scale: 16.0 +2023-02-05 18:38:18,964 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.4645, 1.2937, 1.0214, 0.2389, 0.9251, 0.9424, 0.1912, 1.2272], + device='cuda:1'), covar=tensor([0.0868, 0.0310, 0.0385, 0.1043, 0.0570, 0.0548, 0.1016, 0.0367], + device='cuda:1'), in_proj_covar=tensor([0.0094, 0.0076, 0.0068, 0.0093, 0.0074, 0.0078, 0.0100, 0.0074], + device='cuda:1'), out_proj_covar=tensor([6.6076e-05, 5.0290e-05, 4.7154e-05, 7.0751e-05, 5.3792e-05, 5.4444e-05, + 7.0970e-05, 4.8331e-05], device='cuda:1') +2023-02-05 18:38:19,646 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3670.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:23,273 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.07 vs. limit=2.0 +2023-02-05 18:38:36,822 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3694.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:38:37,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3695.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:40,441 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 18:38:41,150 INFO [train.py:901] (1/4) Epoch 1, batch 3700, loss[loss=0.4382, simple_loss=0.4561, pruned_loss=0.2101, over 8479.00 frames. ], tot_loss[loss=0.4821, simple_loss=0.4797, pruned_loss=0.2423, over 1613096.62 frames. ], batch size: 25, lr: 4.48e-02, grad_scale: 16.0 +2023-02-05 18:38:45,146 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.178e+02 4.586e+02 6.278e+02 1.050e+03 3.437e+03, threshold=1.256e+03, percent-clipped=14.0 +2023-02-05 18:39:17,448 INFO [train.py:901] (1/4) Epoch 1, batch 3750, loss[loss=0.4096, simple_loss=0.4252, pruned_loss=0.197, over 7707.00 frames. ], tot_loss[loss=0.4792, simple_loss=0.4782, pruned_loss=0.2401, over 1617121.31 frames. ], batch size: 18, lr: 4.47e-02, grad_scale: 16.0 +2023-02-05 18:39:18,336 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3752.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:39:21,232 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.18 vs. limit=2.0 +2023-02-05 18:39:27,127 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3765.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:39:35,243 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3777.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:39:50,664 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.12 vs. limit=5.0 +2023-02-05 18:39:51,677 INFO [train.py:901] (1/4) Epoch 1, batch 3800, loss[loss=0.4199, simple_loss=0.4214, pruned_loss=0.2092, over 7428.00 frames. ], tot_loss[loss=0.4764, simple_loss=0.4762, pruned_loss=0.2383, over 1616008.21 frames. ], batch size: 17, lr: 4.46e-02, grad_scale: 16.0 +2023-02-05 18:39:55,874 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.457e+02 5.389e+02 6.979e+02 9.091e+02 1.609e+03, threshold=1.396e+03, percent-clipped=5.0 +2023-02-05 18:39:56,457 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-05 18:40:27,881 INFO [train.py:901] (1/4) Epoch 1, batch 3850, loss[loss=0.4614, simple_loss=0.4638, pruned_loss=0.2295, over 8076.00 frames. ], tot_loss[loss=0.4733, simple_loss=0.4736, pruned_loss=0.2365, over 1611899.57 frames. ], batch size: 21, lr: 4.45e-02, grad_scale: 16.0 +2023-02-05 18:40:46,565 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 18:41:01,009 INFO [train.py:901] (1/4) Epoch 1, batch 3900, loss[loss=0.5821, simple_loss=0.5339, pruned_loss=0.3152, over 7125.00 frames. ], tot_loss[loss=0.4719, simple_loss=0.4724, pruned_loss=0.2357, over 1612302.97 frames. ], batch size: 71, lr: 4.44e-02, grad_scale: 16.0 +2023-02-05 18:41:05,008 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.102e+02 5.552e+02 7.100e+02 9.321e+02 1.906e+03, threshold=1.420e+03, percent-clipped=2.0 +2023-02-05 18:41:05,748 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3908.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:41:29,958 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:41:35,351 INFO [train.py:901] (1/4) Epoch 1, batch 3950, loss[loss=0.4185, simple_loss=0.4419, pruned_loss=0.1975, over 8228.00 frames. ], tot_loss[loss=0.4708, simple_loss=0.4721, pruned_loss=0.2347, over 1613910.44 frames. ], batch size: 22, lr: 4.43e-02, grad_scale: 16.0 +2023-02-05 18:41:50,588 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 18:41:59,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9277, 1.1791, 3.1450, 1.3193, 2.4620, 3.4731, 3.3212, 3.2374], + device='cuda:1'), covar=tensor([0.1736, 0.2461, 0.0315, 0.2421, 0.0796, 0.0195, 0.0216, 0.0320], + device='cuda:1'), in_proj_covar=tensor([0.0221, 0.0247, 0.0136, 0.0231, 0.0181, 0.0103, 0.0100, 0.0144], + device='cuda:1'), out_proj_covar=tensor([1.7220e-04, 1.8765e-04, 1.1712e-04, 1.6848e-04, 1.5655e-04, 8.3781e-05, + 8.8452e-05, 1.1598e-04], device='cuda:1') +2023-02-05 18:42:03,389 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3504, 1.6599, 1.2022, 1.4713, 1.4143, 1.5011, 1.1371, 1.8666], + device='cuda:1'), covar=tensor([0.1438, 0.0949, 0.2145, 0.0842, 0.1649, 0.1363, 0.2185, 0.0885], + device='cuda:1'), in_proj_covar=tensor([0.0221, 0.0158, 0.0255, 0.0153, 0.0223, 0.0186, 0.0257, 0.0194], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 18:42:10,920 INFO [train.py:901] (1/4) Epoch 1, batch 4000, loss[loss=0.4952, simple_loss=0.5062, pruned_loss=0.2421, over 8474.00 frames. ], tot_loss[loss=0.4685, simple_loss=0.4709, pruned_loss=0.233, over 1610361.76 frames. ], batch size: 27, lr: 4.42e-02, grad_scale: 8.0 +2023-02-05 18:42:15,527 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.262e+02 4.572e+02 5.687e+02 7.371e+02 1.820e+03, threshold=1.137e+03, percent-clipped=4.0 +2023-02-05 18:42:24,567 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4021.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:25,844 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4023.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:36,384 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4038.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:42,707 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4046.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:46,086 INFO [train.py:901] (1/4) Epoch 1, batch 4050, loss[loss=0.4065, simple_loss=0.4421, pruned_loss=0.1854, over 8137.00 frames. ], tot_loss[loss=0.4687, simple_loss=0.471, pruned_loss=0.2332, over 1616932.60 frames. ], batch size: 22, lr: 4.41e-02, grad_scale: 8.0 +2023-02-05 18:43:22,339 INFO [train.py:901] (1/4) Epoch 1, batch 4100, loss[loss=0.4333, simple_loss=0.4452, pruned_loss=0.2106, over 7781.00 frames. ], tot_loss[loss=0.4675, simple_loss=0.4701, pruned_loss=0.2325, over 1619308.45 frames. ], batch size: 19, lr: 4.40e-02, grad_scale: 8.0 +2023-02-05 18:43:26,892 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.479e+02 4.889e+02 6.474e+02 8.616e+02 2.054e+03, threshold=1.295e+03, percent-clipped=5.0 +2023-02-05 18:43:56,551 INFO [train.py:901] (1/4) Epoch 1, batch 4150, loss[loss=0.383, simple_loss=0.413, pruned_loss=0.1765, over 8088.00 frames. ], tot_loss[loss=0.4634, simple_loss=0.4667, pruned_loss=0.2301, over 1614262.75 frames. ], batch size: 21, lr: 4.39e-02, grad_scale: 8.0 +2023-02-05 18:43:58,175 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4153.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:44:02,260 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4159.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:44:33,533 INFO [train.py:901] (1/4) Epoch 1, batch 4200, loss[loss=0.4116, simple_loss=0.4365, pruned_loss=0.1934, over 8094.00 frames. ], tot_loss[loss=0.4627, simple_loss=0.4666, pruned_loss=0.2294, over 1614387.72 frames. ], batch size: 21, lr: 4.38e-02, grad_scale: 8.0 +2023-02-05 18:44:38,305 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 4.057e+02 5.109e+02 6.409e+02 1.525e+03, threshold=1.022e+03, percent-clipped=2.0 +2023-02-05 18:44:43,118 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2711, 1.2889, 2.8704, 1.1459, 2.0670, 3.2057, 3.1668, 2.8719], + device='cuda:1'), covar=tensor([0.2273, 0.2576, 0.0333, 0.2616, 0.0971, 0.0245, 0.0265, 0.0387], + device='cuda:1'), in_proj_covar=tensor([0.0223, 0.0251, 0.0134, 0.0229, 0.0177, 0.0104, 0.0106, 0.0143], + device='cuda:1'), out_proj_covar=tensor([1.7406e-04, 1.9112e-04, 1.1864e-04, 1.6901e-04, 1.5586e-04, 8.5899e-05, + 9.4618e-05, 1.1781e-04], device='cuda:1') +2023-02-05 18:44:44,326 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 18:45:04,973 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 18:45:07,077 INFO [train.py:901] (1/4) Epoch 1, batch 4250, loss[loss=0.4317, simple_loss=0.431, pruned_loss=0.2162, over 7652.00 frames. ], tot_loss[loss=0.4607, simple_loss=0.4651, pruned_loss=0.2282, over 1610185.55 frames. ], batch size: 19, lr: 4.36e-02, grad_scale: 8.0 +2023-02-05 18:45:14,109 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.6373, 1.1663, 1.7408, 0.1668, 1.0392, 1.0210, 0.3513, 1.3622], + device='cuda:1'), covar=tensor([0.1055, 0.0613, 0.0272, 0.1602, 0.0772, 0.0648, 0.1298, 0.0403], + device='cuda:1'), in_proj_covar=tensor([0.0111, 0.0086, 0.0072, 0.0110, 0.0080, 0.0097, 0.0106, 0.0081], + device='cuda:1'), out_proj_covar=tensor([7.8676e-05, 5.8146e-05, 5.1019e-05, 8.7177e-05, 6.2644e-05, 6.8205e-05, + 7.8033e-05, 5.5820e-05], device='cuda:1') +2023-02-05 18:45:26,612 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4279.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:45:29,997 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4253, 2.5060, 2.8087, 3.5377, 1.9861, 1.2789, 2.8148, 2.4395], + device='cuda:1'), covar=tensor([0.2250, 0.1223, 0.0587, 0.0314, 0.1198, 0.1657, 0.0911, 0.1270], + device='cuda:1'), in_proj_covar=tensor([0.0152, 0.0099, 0.0072, 0.0078, 0.0102, 0.0117, 0.0124, 0.0114], + device='cuda:1'), out_proj_covar=tensor([9.0042e-05, 5.6350e-05, 3.9887e-05, 4.4295e-05, 5.7512e-05, 6.4370e-05, + 6.9393e-05, 6.1616e-05], device='cuda:1') +2023-02-05 18:45:33,325 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4288.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:33,449 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0367, 2.0750, 2.0407, 2.7772, 1.4009, 1.3419, 1.9020, 2.0699], + device='cuda:1'), covar=tensor([0.1188, 0.1371, 0.1153, 0.0268, 0.2164, 0.1994, 0.2003, 0.1432], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0271, 0.0248, 0.0165, 0.0324, 0.0302, 0.0345, 0.0268], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002], + device='cuda:1') +2023-02-05 18:45:42,875 INFO [train.py:901] (1/4) Epoch 1, batch 4300, loss[loss=0.3897, simple_loss=0.4101, pruned_loss=0.1847, over 7930.00 frames. ], tot_loss[loss=0.4583, simple_loss=0.4637, pruned_loss=0.2264, over 1610214.68 frames. ], batch size: 20, lr: 4.35e-02, grad_scale: 8.0 +2023-02-05 18:45:45,696 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4304.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:47,010 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4306.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:48,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.647e+02 4.666e+02 6.207e+02 8.078e+02 1.600e+03, threshold=1.241e+03, percent-clipped=6.0 +2023-02-05 18:46:18,302 INFO [train.py:901] (1/4) Epoch 1, batch 4350, loss[loss=0.4154, simple_loss=0.4138, pruned_loss=0.2085, over 7792.00 frames. ], tot_loss[loss=0.4533, simple_loss=0.4603, pruned_loss=0.2231, over 1611221.92 frames. ], batch size: 19, lr: 4.34e-02, grad_scale: 8.0 +2023-02-05 18:46:37,369 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 18:46:52,942 INFO [train.py:901] (1/4) Epoch 1, batch 4400, loss[loss=0.4444, simple_loss=0.4678, pruned_loss=0.2104, over 8346.00 frames. ], tot_loss[loss=0.4524, simple_loss=0.4593, pruned_loss=0.2227, over 1614902.56 frames. ], batch size: 26, lr: 4.33e-02, grad_scale: 8.0 +2023-02-05 18:46:54,542 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:46:57,934 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 4.338e+02 5.789e+02 7.262e+02 1.136e+03, threshold=1.158e+03, percent-clipped=0.0 +2023-02-05 18:46:58,965 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4409.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:47:18,621 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4434.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:47:21,210 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 18:47:29,977 INFO [train.py:901] (1/4) Epoch 1, batch 4450, loss[loss=0.4471, simple_loss=0.4561, pruned_loss=0.219, over 8360.00 frames. ], tot_loss[loss=0.4496, simple_loss=0.457, pruned_loss=0.2211, over 1607961.54 frames. ], batch size: 24, lr: 4.32e-02, grad_scale: 8.0 +2023-02-05 18:47:58,914 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-05 18:48:04,128 INFO [train.py:901] (1/4) Epoch 1, batch 4500, loss[loss=0.3921, simple_loss=0.4124, pruned_loss=0.1859, over 7791.00 frames. ], tot_loss[loss=0.4499, simple_loss=0.4564, pruned_loss=0.2216, over 1610012.42 frames. ], batch size: 19, lr: 4.31e-02, grad_scale: 8.0 +2023-02-05 18:48:05,597 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4503.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:48:09,058 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.251e+02 4.383e+02 5.863e+02 8.313e+02 2.632e+03, threshold=1.173e+03, percent-clipped=9.0 +2023-02-05 18:48:15,322 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 18:48:41,808 INFO [train.py:901] (1/4) Epoch 1, batch 4550, loss[loss=0.5309, simple_loss=0.5116, pruned_loss=0.2751, over 7132.00 frames. ], tot_loss[loss=0.4478, simple_loss=0.4554, pruned_loss=0.2201, over 1608685.53 frames. ], batch size: 71, lr: 4.30e-02, grad_scale: 8.0 +2023-02-05 18:49:02,396 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-05 18:49:16,692 INFO [train.py:901] (1/4) Epoch 1, batch 4600, loss[loss=0.4854, simple_loss=0.4943, pruned_loss=0.2383, over 8579.00 frames. ], tot_loss[loss=0.4498, simple_loss=0.457, pruned_loss=0.2213, over 1610645.05 frames. ], batch size: 31, lr: 4.29e-02, grad_scale: 8.0 +2023-02-05 18:49:21,478 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.983e+02 5.037e+02 6.922e+02 1.236e+03, threshold=1.007e+03, percent-clipped=2.0 +2023-02-05 18:49:28,469 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4618.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:51,613 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4650.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:52,226 INFO [train.py:901] (1/4) Epoch 1, batch 4650, loss[loss=0.4273, simple_loss=0.4509, pruned_loss=0.2019, over 8024.00 frames. ], tot_loss[loss=0.4496, simple_loss=0.457, pruned_loss=0.221, over 1613104.70 frames. ], batch size: 22, lr: 4.28e-02, grad_scale: 8.0 +2023-02-05 18:49:59,118 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4659.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:16,191 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4684.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:27,583 INFO [train.py:901] (1/4) Epoch 1, batch 4700, loss[loss=0.534, simple_loss=0.5166, pruned_loss=0.2757, over 8335.00 frames. ], tot_loss[loss=0.4496, simple_loss=0.458, pruned_loss=0.2207, over 1615942.55 frames. ], batch size: 25, lr: 4.27e-02, grad_scale: 8.0 +2023-02-05 18:50:32,374 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.254e+02 4.576e+02 5.443e+02 6.674e+02 1.320e+03, threshold=1.089e+03, percent-clipped=4.0 +2023-02-05 18:51:01,880 INFO [train.py:901] (1/4) Epoch 1, batch 4750, loss[loss=0.3664, simple_loss=0.4132, pruned_loss=0.1598, over 8285.00 frames. ], tot_loss[loss=0.4455, simple_loss=0.4548, pruned_loss=0.2181, over 1614282.87 frames. ], batch size: 23, lr: 4.26e-02, grad_scale: 8.0 +2023-02-05 18:51:12,270 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4765.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:51:21,697 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 18:51:23,826 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 18:51:37,813 INFO [train.py:901] (1/4) Epoch 1, batch 4800, loss[loss=0.3653, simple_loss=0.3863, pruned_loss=0.1721, over 7691.00 frames. ], tot_loss[loss=0.4462, simple_loss=0.4555, pruned_loss=0.2185, over 1618583.38 frames. ], batch size: 18, lr: 4.25e-02, grad_scale: 8.0 +2023-02-05 18:51:42,622 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.690e+02 4.367e+02 5.327e+02 7.244e+02 1.939e+03, threshold=1.065e+03, percent-clipped=6.0 +2023-02-05 18:52:11,419 INFO [train.py:901] (1/4) Epoch 1, batch 4850, loss[loss=0.405, simple_loss=0.4378, pruned_loss=0.1862, over 8451.00 frames. ], tot_loss[loss=0.4485, simple_loss=0.4568, pruned_loss=0.2201, over 1620832.19 frames. ], batch size: 27, lr: 4.24e-02, grad_scale: 8.0 +2023-02-05 18:52:13,499 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 18:52:27,488 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4874.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:39,806 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4281, 1.4272, 1.9384, 1.5193, 1.2812, 2.0726, 0.5103, 1.0623], + device='cuda:1'), covar=tensor([0.0776, 0.0466, 0.0409, 0.0429, 0.0787, 0.0399, 0.1503, 0.0736], + device='cuda:1'), in_proj_covar=tensor([0.0143, 0.0111, 0.0105, 0.0126, 0.0123, 0.0093, 0.0172, 0.0141], + device='cuda:1'), out_proj_covar=tensor([1.1046e-04, 9.3525e-05, 8.1902e-05, 9.4438e-05, 1.0104e-04, 7.2022e-05, + 1.3448e-04, 1.1395e-04], device='cuda:1') +2023-02-05 18:52:47,401 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4899.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:48,555 INFO [train.py:901] (1/4) Epoch 1, batch 4900, loss[loss=0.4193, simple_loss=0.4327, pruned_loss=0.2029, over 8096.00 frames. ], tot_loss[loss=0.446, simple_loss=0.455, pruned_loss=0.2186, over 1619512.72 frames. ], batch size: 23, lr: 4.23e-02, grad_scale: 8.0 +2023-02-05 18:52:53,376 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.332e+02 4.394e+02 5.447e+02 6.722e+02 1.310e+03, threshold=1.089e+03, percent-clipped=5.0 +2023-02-05 18:53:12,067 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7081, 1.9489, 2.4756, 1.7068, 1.5823, 2.4559, 0.9231, 1.3590], + device='cuda:1'), covar=tensor([0.0907, 0.0491, 0.0285, 0.0419, 0.0587, 0.0320, 0.1239, 0.0668], + device='cuda:1'), in_proj_covar=tensor([0.0144, 0.0110, 0.0102, 0.0124, 0.0122, 0.0091, 0.0164, 0.0136], + device='cuda:1'), out_proj_covar=tensor([1.1115e-04, 9.2859e-05, 8.0234e-05, 9.3721e-05, 1.0132e-04, 7.0945e-05, + 1.2858e-04, 1.0974e-04], device='cuda:1') +2023-02-05 18:53:22,702 INFO [train.py:901] (1/4) Epoch 1, batch 4950, loss[loss=0.5099, simple_loss=0.4805, pruned_loss=0.2696, over 7926.00 frames. ], tot_loss[loss=0.4439, simple_loss=0.4537, pruned_loss=0.2171, over 1616002.06 frames. ], batch size: 20, lr: 4.21e-02, grad_scale: 8.0 +2023-02-05 18:53:59,112 INFO [train.py:901] (1/4) Epoch 1, batch 5000, loss[loss=0.4789, simple_loss=0.4671, pruned_loss=0.2454, over 8469.00 frames. ], tot_loss[loss=0.4444, simple_loss=0.4542, pruned_loss=0.2173, over 1618235.06 frames. ], batch size: 25, lr: 4.20e-02, grad_scale: 8.0 +2023-02-05 18:54:04,639 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.658e+02 4.358e+02 5.438e+02 7.182e+02 1.797e+03, threshold=1.088e+03, percent-clipped=3.0 +2023-02-05 18:54:12,968 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0484, 1.9157, 3.6931, 1.7154, 2.8544, 2.6865, 1.7062, 2.7190], + device='cuda:1'), covar=tensor([0.0866, 0.1574, 0.0158, 0.1111, 0.0862, 0.1237, 0.1168, 0.0970], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0236, 0.0192, 0.0250, 0.0286, 0.0304, 0.0247, 0.0275], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 18:54:13,642 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5021.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:30,644 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:33,891 INFO [train.py:901] (1/4) Epoch 1, batch 5050, loss[loss=0.3754, simple_loss=0.4257, pruned_loss=0.1625, over 7924.00 frames. ], tot_loss[loss=0.4432, simple_loss=0.4539, pruned_loss=0.2163, over 1618799.33 frames. ], batch size: 20, lr: 4.19e-02, grad_scale: 8.0 +2023-02-05 18:54:50,684 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 18:55:08,928 INFO [train.py:901] (1/4) Epoch 1, batch 5100, loss[loss=0.4606, simple_loss=0.4718, pruned_loss=0.2247, over 8098.00 frames. ], tot_loss[loss=0.4427, simple_loss=0.4528, pruned_loss=0.2163, over 1608852.81 frames. ], batch size: 23, lr: 4.18e-02, grad_scale: 8.0 +2023-02-05 18:55:13,603 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.507e+02 4.431e+02 5.257e+02 6.582e+02 1.311e+03, threshold=1.051e+03, percent-clipped=2.0 +2023-02-05 18:55:33,130 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.21 vs. limit=2.0 +2023-02-05 18:55:45,846 INFO [train.py:901] (1/4) Epoch 1, batch 5150, loss[loss=0.485, simple_loss=0.4943, pruned_loss=0.2378, over 8583.00 frames. ], tot_loss[loss=0.4411, simple_loss=0.4517, pruned_loss=0.2152, over 1611980.00 frames. ], batch size: 31, lr: 4.17e-02, grad_scale: 8.0 +2023-02-05 18:55:50,656 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5111, 1.4164, 2.2351, 0.5807, 2.0777, 1.9539, 0.6991, 1.9160], + device='cuda:1'), covar=tensor([0.0688, 0.0453, 0.0709, 0.1837, 0.1232, 0.0614, 0.2108, 0.0487], + device='cuda:1'), in_proj_covar=tensor([0.0135, 0.0102, 0.0086, 0.0133, 0.0096, 0.0138, 0.0135, 0.0103], + device='cuda:1'), out_proj_covar=tensor([9.5556e-05, 7.3662e-05, 6.3745e-05, 1.0725e-04, 7.8101e-05, 1.0169e-04, + 1.0400e-04, 7.2532e-05], device='cuda:1') +2023-02-05 18:56:19,015 INFO [train.py:901] (1/4) Epoch 1, batch 5200, loss[loss=0.4194, simple_loss=0.4192, pruned_loss=0.2098, over 8286.00 frames. ], tot_loss[loss=0.4406, simple_loss=0.4514, pruned_loss=0.2149, over 1610845.15 frames. ], batch size: 23, lr: 4.16e-02, grad_scale: 8.0 +2023-02-05 18:56:23,576 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.039e+02 3.937e+02 5.264e+02 6.479e+02 1.558e+03, threshold=1.053e+03, percent-clipped=7.0 +2023-02-05 18:56:51,636 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 18:56:55,099 INFO [train.py:901] (1/4) Epoch 1, batch 5250, loss[loss=0.4621, simple_loss=0.4748, pruned_loss=0.2247, over 8337.00 frames. ], tot_loss[loss=0.44, simple_loss=0.4514, pruned_loss=0.2143, over 1611727.52 frames. ], batch size: 25, lr: 4.15e-02, grad_scale: 8.0 +2023-02-05 18:57:28,844 INFO [train.py:901] (1/4) Epoch 1, batch 5300, loss[loss=0.5379, simple_loss=0.5225, pruned_loss=0.2767, over 7073.00 frames. ], tot_loss[loss=0.4403, simple_loss=0.4513, pruned_loss=0.2146, over 1609882.16 frames. ], batch size: 74, lr: 4.14e-02, grad_scale: 8.0 +2023-02-05 18:57:33,632 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.076e+02 4.278e+02 4.955e+02 6.641e+02 1.586e+03, threshold=9.909e+02, percent-clipped=4.0 +2023-02-05 18:57:33,850 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9093, 1.6606, 0.8840, 1.8833, 1.3636, 1.1374, 1.3096, 2.2293], + device='cuda:1'), covar=tensor([0.1118, 0.0734, 0.2474, 0.0567, 0.1791, 0.1367, 0.1596, 0.0506], + device='cuda:1'), in_proj_covar=tensor([0.0273, 0.0188, 0.0308, 0.0222, 0.0280, 0.0233, 0.0299, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 18:57:51,140 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 18:58:04,337 INFO [train.py:901] (1/4) Epoch 1, batch 5350, loss[loss=0.4909, simple_loss=0.4757, pruned_loss=0.253, over 6710.00 frames. ], tot_loss[loss=0.4401, simple_loss=0.4512, pruned_loss=0.2145, over 1607093.55 frames. ], batch size: 71, lr: 4.13e-02, grad_scale: 8.0 +2023-02-05 18:58:39,824 INFO [train.py:901] (1/4) Epoch 1, batch 5400, loss[loss=0.4487, simple_loss=0.472, pruned_loss=0.2126, over 8323.00 frames. ], tot_loss[loss=0.4397, simple_loss=0.4511, pruned_loss=0.2141, over 1611672.27 frames. ], batch size: 25, lr: 4.12e-02, grad_scale: 8.0 +2023-02-05 18:58:44,298 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.977e+02 4.515e+02 5.788e+02 7.308e+02 1.362e+03, threshold=1.158e+03, percent-clipped=5.0 +2023-02-05 18:58:50,440 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5030, 5.6810, 4.8305, 1.8117, 4.4811, 4.9441, 5.2012, 4.4580], + device='cuda:1'), covar=tensor([0.0653, 0.0256, 0.0674, 0.3425, 0.0406, 0.0460, 0.0646, 0.0458], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0174, 0.0198, 0.0262, 0.0159, 0.0129, 0.0197, 0.0120], + device='cuda:1'), out_proj_covar=tensor([1.8059e-04, 1.2781e-04, 1.3354e-04, 1.7272e-04, 1.0684e-04, 9.4440e-05, + 1.4481e-04, 8.5077e-05], device='cuda:1') +2023-02-05 18:59:13,402 INFO [train.py:901] (1/4) Epoch 1, batch 5450, loss[loss=0.4414, simple_loss=0.4528, pruned_loss=0.215, over 8532.00 frames. ], tot_loss[loss=0.4373, simple_loss=0.4499, pruned_loss=0.2124, over 1616049.11 frames. ], batch size: 28, lr: 4.11e-02, grad_scale: 8.0 +2023-02-05 18:59:41,788 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 18:59:49,950 INFO [train.py:901] (1/4) Epoch 1, batch 5500, loss[loss=0.4122, simple_loss=0.4331, pruned_loss=0.1957, over 7924.00 frames. ], tot_loss[loss=0.4354, simple_loss=0.4484, pruned_loss=0.2112, over 1613769.69 frames. ], batch size: 20, lr: 4.10e-02, grad_scale: 8.0 +2023-02-05 18:59:54,515 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.397e+02 4.451e+02 5.295e+02 6.340e+02 1.239e+03, threshold=1.059e+03, percent-clipped=2.0 +2023-02-05 19:00:23,639 INFO [train.py:901] (1/4) Epoch 1, batch 5550, loss[loss=0.3807, simple_loss=0.4081, pruned_loss=0.1766, over 8223.00 frames. ], tot_loss[loss=0.4365, simple_loss=0.4493, pruned_loss=0.2119, over 1613527.81 frames. ], batch size: 22, lr: 4.09e-02, grad_scale: 8.0 +2023-02-05 19:01:00,925 INFO [train.py:901] (1/4) Epoch 1, batch 5600, loss[loss=0.4324, simple_loss=0.4602, pruned_loss=0.2023, over 8256.00 frames. ], tot_loss[loss=0.4341, simple_loss=0.4478, pruned_loss=0.2102, over 1612820.50 frames. ], batch size: 24, lr: 4.08e-02, grad_scale: 8.0 +2023-02-05 19:01:05,769 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 3.916e+02 5.301e+02 6.582e+02 1.340e+03, threshold=1.060e+03, percent-clipped=3.0 +2023-02-05 19:01:34,544 INFO [train.py:901] (1/4) Epoch 1, batch 5650, loss[loss=0.4038, simple_loss=0.4253, pruned_loss=0.1911, over 7801.00 frames. ], tot_loss[loss=0.4338, simple_loss=0.4477, pruned_loss=0.2099, over 1616904.36 frames. ], batch size: 19, lr: 4.07e-02, grad_scale: 8.0 +2023-02-05 19:01:45,694 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 19:01:45,831 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5668.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:02:09,329 INFO [train.py:901] (1/4) Epoch 1, batch 5700, loss[loss=0.3892, simple_loss=0.4029, pruned_loss=0.1877, over 7521.00 frames. ], tot_loss[loss=0.4366, simple_loss=0.4492, pruned_loss=0.212, over 1619911.50 frames. ], batch size: 18, lr: 4.06e-02, grad_scale: 8.0 +2023-02-05 19:02:15,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.140e+02 4.740e+02 5.744e+02 8.008e+02 1.790e+03, threshold=1.149e+03, percent-clipped=10.0 +2023-02-05 19:02:44,481 INFO [train.py:901] (1/4) Epoch 1, batch 5750, loss[loss=0.3654, simple_loss=0.388, pruned_loss=0.1714, over 7439.00 frames. ], tot_loss[loss=0.4349, simple_loss=0.4485, pruned_loss=0.2106, over 1618911.68 frames. ], batch size: 17, lr: 4.05e-02, grad_scale: 8.0 +2023-02-05 19:02:51,408 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 19:02:52,455 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.54 vs. limit=5.0 +2023-02-05 19:02:59,869 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5773.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:03:19,628 INFO [train.py:901] (1/4) Epoch 1, batch 5800, loss[loss=0.5476, simple_loss=0.522, pruned_loss=0.2867, over 6742.00 frames. ], tot_loss[loss=0.4323, simple_loss=0.4465, pruned_loss=0.209, over 1614374.08 frames. ], batch size: 71, lr: 4.04e-02, grad_scale: 8.0 +2023-02-05 19:03:24,541 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.671e+02 4.595e+02 5.667e+02 1.405e+03, threshold=9.190e+02, percent-clipped=2.0 +2023-02-05 19:03:57,256 INFO [train.py:901] (1/4) Epoch 1, batch 5850, loss[loss=0.4157, simple_loss=0.4478, pruned_loss=0.1918, over 8102.00 frames. ], tot_loss[loss=0.4298, simple_loss=0.4447, pruned_loss=0.2074, over 1614213.57 frames. ], batch size: 23, lr: 4.03e-02, grad_scale: 8.0 +2023-02-05 19:04:15,222 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:04:32,490 INFO [train.py:901] (1/4) Epoch 1, batch 5900, loss[loss=0.4499, simple_loss=0.458, pruned_loss=0.2209, over 8590.00 frames. ], tot_loss[loss=0.4308, simple_loss=0.445, pruned_loss=0.2083, over 1614644.03 frames. ], batch size: 34, lr: 4.02e-02, grad_scale: 8.0 +2023-02-05 19:04:37,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 3.095e+02 4.155e+02 5.559e+02 6.668e+02 2.372e+03, threshold=1.112e+03, percent-clipped=6.0 +2023-02-05 19:05:03,786 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.85 vs. limit=2.0 +2023-02-05 19:05:09,347 INFO [train.py:901] (1/4) Epoch 1, batch 5950, loss[loss=0.3586, simple_loss=0.4002, pruned_loss=0.1585, over 8285.00 frames. ], tot_loss[loss=0.4282, simple_loss=0.4434, pruned_loss=0.2065, over 1617159.49 frames. ], batch size: 23, lr: 4.01e-02, grad_scale: 8.0 +2023-02-05 19:05:44,547 INFO [train.py:901] (1/4) Epoch 1, batch 6000, loss[loss=0.4452, simple_loss=0.4623, pruned_loss=0.214, over 8469.00 frames. ], tot_loss[loss=0.4288, simple_loss=0.4438, pruned_loss=0.2069, over 1614844.60 frames. ], batch size: 25, lr: 4.00e-02, grad_scale: 16.0 +2023-02-05 19:05:44,547 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 19:06:02,010 INFO [train.py:935] (1/4) Epoch 1, validation: loss=0.3351, simple_loss=0.4011, pruned_loss=0.1346, over 944034.00 frames. +2023-02-05 19:06:02,011 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6470MB +2023-02-05 19:06:06,794 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 3.694e+02 4.999e+02 6.330e+02 1.596e+03, threshold=9.998e+02, percent-clipped=5.0 +2023-02-05 19:06:06,987 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:06:09,472 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6012.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:06:10,336 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.16 vs. limit=5.0 +2023-02-05 19:06:35,745 INFO [train.py:901] (1/4) Epoch 1, batch 6050, loss[loss=0.5053, simple_loss=0.5064, pruned_loss=0.2521, over 8332.00 frames. ], tot_loss[loss=0.4325, simple_loss=0.4459, pruned_loss=0.2095, over 1613765.38 frames. ], batch size: 25, lr: 3.99e-02, grad_scale: 8.0 +2023-02-05 19:06:36,629 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3090, 2.8783, 2.5633, 3.8699, 1.6172, 1.7346, 2.2029, 3.0009], + device='cuda:1'), covar=tensor([0.1369, 0.1484, 0.1215, 0.0202, 0.2417, 0.2089, 0.2150, 0.1198], + device='cuda:1'), in_proj_covar=tensor([0.0294, 0.0313, 0.0291, 0.0183, 0.0348, 0.0333, 0.0383, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 19:06:42,877 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:06:45,814 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4324, 1.5649, 2.6830, 0.9943, 2.0183, 1.6059, 1.3358, 1.7839], + device='cuda:1'), covar=tensor([0.1352, 0.1357, 0.0291, 0.1675, 0.0950, 0.1917, 0.1352, 0.1062], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0265, 0.0236, 0.0290, 0.0335, 0.0341, 0.0284, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:07:05,560 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5081, 2.0194, 0.9701, 2.0427, 1.7079, 1.3838, 1.7481, 1.9613], + device='cuda:1'), covar=tensor([0.1403, 0.0704, 0.1911, 0.0824, 0.1126, 0.1329, 0.1556, 0.0896], + device='cuda:1'), in_proj_covar=tensor([0.0301, 0.0211, 0.0327, 0.0242, 0.0299, 0.0260, 0.0318, 0.0257], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-05 19:07:10,269 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.8417, 0.9141, 0.9425, 0.9699, 0.6828, 0.9804, 0.0757, 0.5638], + device='cuda:1'), covar=tensor([0.0643, 0.0489, 0.0340, 0.0369, 0.0539, 0.0384, 0.1260, 0.0736], + device='cuda:1'), in_proj_covar=tensor([0.0141, 0.0113, 0.0098, 0.0127, 0.0122, 0.0085, 0.0164, 0.0142], + device='cuda:1'), out_proj_covar=tensor([1.1441e-04, 1.0140e-04, 7.9299e-05, 1.0018e-04, 1.0693e-04, 6.8400e-05, + 1.3596e-04, 1.2118e-04], device='cuda:1') +2023-02-05 19:07:12,055 INFO [train.py:901] (1/4) Epoch 1, batch 6100, loss[loss=0.4615, simple_loss=0.4676, pruned_loss=0.2277, over 7974.00 frames. ], tot_loss[loss=0.43, simple_loss=0.4449, pruned_loss=0.2075, over 1615083.51 frames. ], batch size: 21, lr: 3.98e-02, grad_scale: 8.0 +2023-02-05 19:07:17,506 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.508e+02 4.942e+02 6.048e+02 7.564e+02 1.774e+03, threshold=1.210e+03, percent-clipped=15.0 +2023-02-05 19:07:23,142 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6117.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:28,993 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 19:07:29,787 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6127.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:07:44,056 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3375, 5.6435, 4.7799, 1.8396, 4.7095, 5.1230, 4.8616, 4.1493], + device='cuda:1'), covar=tensor([0.0952, 0.0377, 0.0639, 0.4092, 0.0378, 0.0443, 0.1074, 0.0530], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0184, 0.0216, 0.0269, 0.0169, 0.0133, 0.0203, 0.0126], + device='cuda:1'), out_proj_covar=tensor([1.9402e-04, 1.3245e-04, 1.4140e-04, 1.7718e-04, 1.1049e-04, 9.4872e-05, + 1.4503e-04, 9.0425e-05], device='cuda:1') +2023-02-05 19:07:45,982 INFO [train.py:901] (1/4) Epoch 1, batch 6150, loss[loss=0.424, simple_loss=0.444, pruned_loss=0.2021, over 8535.00 frames. ], tot_loss[loss=0.4293, simple_loss=0.4441, pruned_loss=0.2073, over 1612551.82 frames. ], batch size: 31, lr: 3.97e-02, grad_scale: 8.0 +2023-02-05 19:07:47,401 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6153.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:58,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8032, 6.0211, 4.9435, 2.2793, 5.0120, 5.4277, 5.2681, 4.6362], + device='cuda:1'), covar=tensor([0.0607, 0.0298, 0.0706, 0.3144, 0.0320, 0.0411, 0.0894, 0.0390], + device='cuda:1'), in_proj_covar=tensor([0.0247, 0.0181, 0.0213, 0.0265, 0.0166, 0.0132, 0.0200, 0.0123], + device='cuda:1'), out_proj_covar=tensor([1.8874e-04, 1.2984e-04, 1.3857e-04, 1.7487e-04, 1.0829e-04, 9.3786e-05, + 1.4285e-04, 8.8412e-05], device='cuda:1') +2023-02-05 19:08:12,168 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6188.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:22,942 INFO [train.py:901] (1/4) Epoch 1, batch 6200, loss[loss=0.4495, simple_loss=0.4576, pruned_loss=0.2207, over 8083.00 frames. ], tot_loss[loss=0.4296, simple_loss=0.4442, pruned_loss=0.2075, over 1612255.47 frames. ], batch size: 21, lr: 3.96e-02, grad_scale: 8.0 +2023-02-05 19:08:28,570 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.743e+02 4.155e+02 5.130e+02 7.106e+02 1.864e+03, threshold=1.026e+03, percent-clipped=2.0 +2023-02-05 19:08:36,391 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:37,176 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:42,651 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6229.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:44,638 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:57,431 INFO [train.py:901] (1/4) Epoch 1, batch 6250, loss[loss=0.5057, simple_loss=0.502, pruned_loss=0.2547, over 8483.00 frames. ], tot_loss[loss=0.4278, simple_loss=0.4435, pruned_loss=0.2061, over 1616582.80 frames. ], batch size: 28, lr: 3.95e-02, grad_scale: 8.0 +2023-02-05 19:09:20,021 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:09:32,703 INFO [train.py:901] (1/4) Epoch 1, batch 6300, loss[loss=0.4165, simple_loss=0.4489, pruned_loss=0.1921, over 8429.00 frames. ], tot_loss[loss=0.4263, simple_loss=0.4429, pruned_loss=0.2048, over 1618044.07 frames. ], batch size: 49, lr: 3.94e-02, grad_scale: 8.0 +2023-02-05 19:09:38,772 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.783e+02 4.352e+02 5.159e+02 6.362e+02 1.735e+03, threshold=1.032e+03, percent-clipped=4.0 +2023-02-05 19:09:56,807 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6335.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:07,347 INFO [train.py:901] (1/4) Epoch 1, batch 6350, loss[loss=0.389, simple_loss=0.4117, pruned_loss=0.1832, over 7697.00 frames. ], tot_loss[loss=0.4283, simple_loss=0.4446, pruned_loss=0.206, over 1619296.35 frames. ], batch size: 18, lr: 3.93e-02, grad_scale: 8.0 +2023-02-05 19:10:08,104 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6352.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:28,942 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6383.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:10:31,742 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 19:10:40,800 INFO [train.py:901] (1/4) Epoch 1, batch 6400, loss[loss=0.3563, simple_loss=0.3803, pruned_loss=0.1662, over 7542.00 frames. ], tot_loss[loss=0.4274, simple_loss=0.4443, pruned_loss=0.2052, over 1619432.42 frames. ], batch size: 18, lr: 3.92e-02, grad_scale: 8.0 +2023-02-05 19:10:43,626 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6405.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:45,789 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6408.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:10:46,251 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.785e+02 4.017e+02 4.991e+02 6.603e+02 1.156e+03, threshold=9.981e+02, percent-clipped=3.0 +2023-02-05 19:10:46,702 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-05 19:11:16,789 INFO [train.py:901] (1/4) Epoch 1, batch 6450, loss[loss=0.4947, simple_loss=0.4732, pruned_loss=0.2581, over 8249.00 frames. ], tot_loss[loss=0.4237, simple_loss=0.4414, pruned_loss=0.203, over 1620172.50 frames. ], batch size: 22, lr: 3.91e-02, grad_scale: 8.0 +2023-02-05 19:11:27,827 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6467.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:36,480 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:39,749 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6485.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:41,894 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:43,161 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5876, 1.7371, 1.7651, 2.6307, 1.0437, 1.2687, 1.5375, 1.7214], + device='cuda:1'), covar=tensor([0.1332, 0.1532, 0.1177, 0.0298, 0.2036, 0.1815, 0.2112, 0.1208], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0317, 0.0300, 0.0195, 0.0338, 0.0339, 0.0398, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 19:11:47,716 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:50,294 INFO [train.py:901] (1/4) Epoch 1, batch 6500, loss[loss=0.3586, simple_loss=0.3692, pruned_loss=0.174, over 6811.00 frames. ], tot_loss[loss=0.4225, simple_loss=0.4401, pruned_loss=0.2025, over 1612828.36 frames. ], batch size: 15, lr: 3.90e-02, grad_scale: 8.0 +2023-02-05 19:11:55,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 4.204e+02 5.270e+02 6.161e+02 1.286e+03, threshold=1.054e+03, percent-clipped=6.0 +2023-02-05 19:11:58,505 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6513.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:03,293 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6520.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:11,141 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:14,878 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:12:25,065 INFO [train.py:901] (1/4) Epoch 1, batch 6550, loss[loss=0.3371, simple_loss=0.3916, pruned_loss=0.1413, over 8473.00 frames. ], tot_loss[loss=0.4227, simple_loss=0.4407, pruned_loss=0.2024, over 1616785.51 frames. ], batch size: 25, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:12:35,937 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:37,969 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 19:12:41,462 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6573.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:53,792 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:57,637 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 19:13:00,386 INFO [train.py:901] (1/4) Epoch 1, batch 6600, loss[loss=0.3886, simple_loss=0.4223, pruned_loss=0.1774, over 8469.00 frames. ], tot_loss[loss=0.4235, simple_loss=0.4414, pruned_loss=0.2029, over 1616998.24 frames. ], batch size: 27, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:13:05,674 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.999e+02 4.035e+02 4.985e+02 6.404e+02 1.328e+03, threshold=9.970e+02, percent-clipped=3.0 +2023-02-05 19:13:07,910 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:10,619 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:18,640 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6628.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,559 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:34,156 INFO [train.py:901] (1/4) Epoch 1, batch 6650, loss[loss=0.4416, simple_loss=0.4617, pruned_loss=0.2107, over 8642.00 frames. ], tot_loss[loss=0.4202, simple_loss=0.439, pruned_loss=0.2007, over 1616858.13 frames. ], batch size: 49, lr: 3.88e-02, grad_scale: 8.0 +2023-02-05 19:13:36,498 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 19:13:53,673 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8571, 1.4908, 1.4643, 1.2162, 1.9388, 1.4125, 1.4964, 2.2620], + device='cuda:1'), covar=tensor([0.1760, 0.2509, 0.2856, 0.2718, 0.1305, 0.2338, 0.1741, 0.1153], + device='cuda:1'), in_proj_covar=tensor([0.0284, 0.0299, 0.0299, 0.0291, 0.0280, 0.0269, 0.0272, 0.0262], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:13:56,264 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:01,298 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:09,924 INFO [train.py:901] (1/4) Epoch 1, batch 6700, loss[loss=0.4796, simple_loss=0.4655, pruned_loss=0.2468, over 7122.00 frames. ], tot_loss[loss=0.4216, simple_loss=0.4396, pruned_loss=0.2018, over 1612025.89 frames. ], batch size: 71, lr: 3.87e-02, grad_scale: 8.0 +2023-02-05 19:14:15,389 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.351e+02 4.140e+02 4.960e+02 6.260e+02 1.494e+03, threshold=9.921e+02, percent-clipped=3.0 +2023-02-05 19:14:25,019 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6723.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:38,630 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6743.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:42,143 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:43,045 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 19:14:44,012 INFO [train.py:901] (1/4) Epoch 1, batch 6750, loss[loss=0.3969, simple_loss=0.4205, pruned_loss=0.1867, over 8139.00 frames. ], tot_loss[loss=0.4206, simple_loss=0.4388, pruned_loss=0.2012, over 1611871.85 frames. ], batch size: 22, lr: 3.86e-02, grad_scale: 8.0 +2023-02-05 19:15:00,933 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:14,371 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 19:15:19,955 INFO [train.py:901] (1/4) Epoch 1, batch 6800, loss[loss=0.4679, simple_loss=0.481, pruned_loss=0.2274, over 8573.00 frames. ], tot_loss[loss=0.4202, simple_loss=0.4387, pruned_loss=0.2009, over 1613709.70 frames. ], batch size: 31, lr: 3.85e-02, grad_scale: 8.0 +2023-02-05 19:15:20,150 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6801.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:25,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 4.226e+02 5.434e+02 7.341e+02 1.725e+03, threshold=1.087e+03, percent-clipped=4.0 +2023-02-05 19:15:35,610 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6824.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:39,023 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6829.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:44,946 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 19:15:54,385 INFO [train.py:901] (1/4) Epoch 1, batch 6850, loss[loss=0.4166, simple_loss=0.4423, pruned_loss=0.1955, over 8364.00 frames. ], tot_loss[loss=0.4182, simple_loss=0.4373, pruned_loss=0.1996, over 1611828.39 frames. ], batch size: 24, lr: 3.84e-02, grad_scale: 8.0 +2023-02-05 19:16:04,831 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 19:16:06,402 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:23,437 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6893.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:25,426 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6896.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:16:29,307 INFO [train.py:901] (1/4) Epoch 1, batch 6900, loss[loss=0.4853, simple_loss=0.4828, pruned_loss=0.2439, over 8532.00 frames. ], tot_loss[loss=0.4198, simple_loss=0.4387, pruned_loss=0.2005, over 1616367.92 frames. ], batch size: 39, lr: 3.83e-02, grad_scale: 8.0 +2023-02-05 19:16:31,395 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6903.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:35,803 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.796e+02 4.754e+02 6.076e+02 1.448e+03, threshold=9.507e+02, percent-clipped=2.0 +2023-02-05 19:16:48,742 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6927.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:49,392 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3640, 1.6924, 1.5287, 2.3754, 1.0454, 1.1045, 1.6067, 1.6785], + device='cuda:1'), covar=tensor([0.1500, 0.1707, 0.1572, 0.0454, 0.2182, 0.2539, 0.2067, 0.1221], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0321, 0.0303, 0.0200, 0.0336, 0.0331, 0.0395, 0.0281], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 19:16:49,409 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:54,880 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6936.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:56,903 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6939.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,425 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,474 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:05,075 INFO [train.py:901] (1/4) Epoch 1, batch 6950, loss[loss=0.3881, simple_loss=0.4176, pruned_loss=0.1794, over 8354.00 frames. ], tot_loss[loss=0.4185, simple_loss=0.4371, pruned_loss=0.1999, over 1617158.59 frames. ], batch size: 24, lr: 3.82e-02, grad_scale: 8.0 +2023-02-05 19:17:11,203 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 19:17:12,151 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6961.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:17,903 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:32,942 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6991.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:34,437 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4185, 1.6981, 1.1373, 2.0029, 1.5506, 1.1706, 1.0039, 1.9568], + device='cuda:1'), covar=tensor([0.1106, 0.0748, 0.1610, 0.0631, 0.1200, 0.1399, 0.1680, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0317, 0.0226, 0.0341, 0.0265, 0.0319, 0.0277, 0.0328, 0.0278], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-05 19:17:38,573 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6999.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:39,825 INFO [train.py:901] (1/4) Epoch 1, batch 7000, loss[loss=0.3953, simple_loss=0.4245, pruned_loss=0.1831, over 8593.00 frames. ], tot_loss[loss=0.4158, simple_loss=0.4353, pruned_loss=0.1982, over 1612886.64 frames. ], batch size: 31, lr: 3.81e-02, grad_scale: 8.0 +2023-02-05 19:17:45,245 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.380e+02 4.090e+02 4.918e+02 6.048e+02 1.151e+03, threshold=9.836e+02, percent-clipped=6.0 +2023-02-05 19:17:57,721 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:16,022 INFO [train.py:901] (1/4) Epoch 1, batch 7050, loss[loss=0.4042, simple_loss=0.4363, pruned_loss=0.186, over 8250.00 frames. ], tot_loss[loss=0.4164, simple_loss=0.436, pruned_loss=0.1984, over 1615156.83 frames. ], batch size: 22, lr: 3.80e-02, grad_scale: 8.0 +2023-02-05 19:18:26,088 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7066.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:18:50,235 INFO [train.py:901] (1/4) Epoch 1, batch 7100, loss[loss=0.3979, simple_loss=0.4218, pruned_loss=0.187, over 8751.00 frames. ], tot_loss[loss=0.4166, simple_loss=0.4363, pruned_loss=0.1984, over 1617646.91 frames. ], batch size: 30, lr: 3.79e-02, grad_scale: 8.0 +2023-02-05 19:18:53,871 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:55,757 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.791e+02 4.613e+02 6.150e+02 1.722e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 19:19:25,902 INFO [train.py:901] (1/4) Epoch 1, batch 7150, loss[loss=0.4873, simple_loss=0.458, pruned_loss=0.2583, over 7553.00 frames. ], tot_loss[loss=0.4157, simple_loss=0.4358, pruned_loss=0.1978, over 1618951.08 frames. ], batch size: 18, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:19:46,605 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7181.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:48,142 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 19:19:56,066 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7195.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,479 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,943 INFO [train.py:901] (1/4) Epoch 1, batch 7200, loss[loss=0.3514, simple_loss=0.372, pruned_loss=0.1654, over 7439.00 frames. ], tot_loss[loss=0.416, simple_loss=0.436, pruned_loss=0.198, over 1620531.18 frames. ], batch size: 17, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:20:05,322 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.325e+02 4.231e+02 5.262e+02 7.053e+02 1.685e+03, threshold=1.052e+03, percent-clipped=7.0 +2023-02-05 19:20:13,040 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:16,296 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:25,922 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7240.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:20:33,000 INFO [train.py:901] (1/4) Epoch 1, batch 7250, loss[loss=0.3839, simple_loss=0.4141, pruned_loss=0.1769, over 8347.00 frames. ], tot_loss[loss=0.4158, simple_loss=0.4366, pruned_loss=0.1975, over 1620269.74 frames. ], batch size: 24, lr: 3.77e-02, grad_scale: 8.0 +2023-02-05 19:20:48,481 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:03,382 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1430, 1.7615, 1.7355, 0.3547, 1.6512, 1.3204, 0.2150, 1.7288], + device='cuda:1'), covar=tensor([0.0397, 0.0240, 0.0235, 0.0817, 0.0306, 0.0533, 0.0710, 0.0206], + device='cuda:1'), in_proj_covar=tensor([0.0153, 0.0117, 0.0098, 0.0158, 0.0111, 0.0177, 0.0162, 0.0128], + device='cuda:1'), out_proj_covar=tensor([1.1261e-04, 8.5500e-05, 7.7439e-05, 1.2152e-04, 9.1249e-05, 1.4085e-04, + 1.2374e-04, 9.6372e-05], device='cuda:1') +2023-02-05 19:21:03,978 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:08,993 INFO [train.py:901] (1/4) Epoch 1, batch 7300, loss[loss=0.431, simple_loss=0.4559, pruned_loss=0.203, over 8619.00 frames. ], tot_loss[loss=0.4142, simple_loss=0.4349, pruned_loss=0.1967, over 1614472.34 frames. ], batch size: 34, lr: 3.76e-02, grad_scale: 8.0 +2023-02-05 19:21:14,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.413e+02 4.263e+02 5.448e+02 6.514e+02 1.215e+03, threshold=1.090e+03, percent-clipped=2.0 +2023-02-05 19:21:42,633 INFO [train.py:901] (1/4) Epoch 1, batch 7350, loss[loss=0.3931, simple_loss=0.4164, pruned_loss=0.1849, over 7800.00 frames. ], tot_loss[loss=0.4137, simple_loss=0.4347, pruned_loss=0.1963, over 1616232.45 frames. ], batch size: 20, lr: 3.75e-02, grad_scale: 8.0 +2023-02-05 19:21:45,493 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3229, 1.2681, 5.2837, 2.4260, 4.4792, 4.3454, 4.6880, 4.4539], + device='cuda:1'), covar=tensor([0.0256, 0.3443, 0.0155, 0.1169, 0.0725, 0.0292, 0.0251, 0.0371], + device='cuda:1'), in_proj_covar=tensor([0.0153, 0.0338, 0.0178, 0.0212, 0.0223, 0.0201, 0.0173, 0.0202], + device='cuda:1'), out_proj_covar=tensor([9.4270e-05, 1.8591e-04, 1.0904e-04, 1.3403e-04, 1.2752e-04, 1.2063e-04, + 1.0457e-04, 1.2652e-04], device='cuda:1') +2023-02-05 19:21:45,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7355.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:21:50,111 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7362.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:56,009 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 19:22:08,458 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7386.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:09,133 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7387.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:18,186 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 19:22:18,458 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2618, 1.3444, 2.7000, 1.1214, 2.0295, 2.8823, 2.6331, 2.6317], + device='cuda:1'), covar=tensor([0.1769, 0.1865, 0.0405, 0.2383, 0.0793, 0.0290, 0.0309, 0.0430], + device='cuda:1'), in_proj_covar=tensor([0.0245, 0.0265, 0.0163, 0.0256, 0.0190, 0.0128, 0.0126, 0.0185], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-05 19:22:18,974 INFO [train.py:901] (1/4) Epoch 1, batch 7400, loss[loss=0.3528, simple_loss=0.3879, pruned_loss=0.1588, over 7811.00 frames. ], tot_loss[loss=0.4134, simple_loss=0.435, pruned_loss=0.1958, over 1618225.57 frames. ], batch size: 20, lr: 3.74e-02, grad_scale: 8.0 +2023-02-05 19:22:24,409 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.824e+02 4.270e+02 5.603e+02 6.704e+02 2.452e+03, threshold=1.121e+03, percent-clipped=4.0 +2023-02-05 19:22:25,137 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7410.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:22:35,056 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:52,095 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8494, 1.9639, 1.6585, 2.4729, 1.4641, 1.3087, 1.9812, 1.9084], + device='cuda:1'), covar=tensor([0.1053, 0.1340, 0.1434, 0.0417, 0.1692, 0.1937, 0.1468, 0.1159], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0327, 0.0311, 0.0200, 0.0343, 0.0347, 0.0389, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 19:22:52,554 INFO [train.py:901] (1/4) Epoch 1, batch 7450, loss[loss=0.461, simple_loss=0.4653, pruned_loss=0.2284, over 8608.00 frames. ], tot_loss[loss=0.4141, simple_loss=0.4354, pruned_loss=0.1964, over 1613696.01 frames. ], batch size: 39, lr: 3.73e-02, grad_scale: 8.0 +2023-02-05 19:22:56,045 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 19:23:00,898 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0647, 3.1525, 2.7466, 1.2231, 2.6538, 2.5949, 2.8211, 2.5175], + device='cuda:1'), covar=tensor([0.1152, 0.0757, 0.1120, 0.3956, 0.0672, 0.0974, 0.1447, 0.0623], + device='cuda:1'), in_proj_covar=tensor([0.0267, 0.0183, 0.0223, 0.0293, 0.0174, 0.0137, 0.0206, 0.0133], + device='cuda:1'), out_proj_covar=tensor([1.9800e-04, 1.2876e-04, 1.4666e-04, 1.8845e-04, 1.1263e-04, 9.9221e-05, + 1.4048e-04, 9.3942e-05], device='cuda:1') +2023-02-05 19:23:02,948 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5026, 4.6919, 4.0454, 1.6778, 3.8114, 3.7858, 4.2559, 3.4266], + device='cuda:1'), covar=tensor([0.0613, 0.0299, 0.0605, 0.3276, 0.0390, 0.0622, 0.0789, 0.0455], + device='cuda:1'), in_proj_covar=tensor([0.0266, 0.0182, 0.0223, 0.0293, 0.0173, 0.0137, 0.0205, 0.0133], + device='cuda:1'), out_proj_covar=tensor([1.9719e-04, 1.2831e-04, 1.4626e-04, 1.8808e-04, 1.1233e-04, 9.9143e-05, + 1.3986e-04, 9.3494e-05], device='cuda:1') +2023-02-05 19:23:05,306 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:23:10,752 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 19:23:27,518 INFO [train.py:901] (1/4) Epoch 1, batch 7500, loss[loss=0.3594, simple_loss=0.3868, pruned_loss=0.166, over 7414.00 frames. ], tot_loss[loss=0.4121, simple_loss=0.4335, pruned_loss=0.1954, over 1610188.30 frames. ], batch size: 17, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:23:34,187 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 4.060e+02 5.044e+02 6.934e+02 1.457e+03, threshold=1.009e+03, percent-clipped=3.0 +2023-02-05 19:23:45,044 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7525.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:23:45,155 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7525.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:24:02,231 INFO [train.py:901] (1/4) Epoch 1, batch 7550, loss[loss=0.4384, simple_loss=0.4721, pruned_loss=0.2023, over 8475.00 frames. ], tot_loss[loss=0.4119, simple_loss=0.4333, pruned_loss=0.1952, over 1606285.46 frames. ], batch size: 29, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:24:36,292 INFO [train.py:901] (1/4) Epoch 1, batch 7600, loss[loss=0.448, simple_loss=0.4561, pruned_loss=0.2199, over 8512.00 frames. ], tot_loss[loss=0.4119, simple_loss=0.4329, pruned_loss=0.1955, over 1606465.61 frames. ], batch size: 28, lr: 3.71e-02, grad_scale: 8.0 +2023-02-05 19:24:41,734 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.765e+02 4.361e+02 5.460e+02 6.853e+02 1.164e+03, threshold=1.092e+03, percent-clipped=2.0 +2023-02-05 19:24:43,938 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7611.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:25:03,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7636.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:25:03,854 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7637.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:05,945 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7640.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,275 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,354 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:13,185 INFO [train.py:901] (1/4) Epoch 1, batch 7650, loss[loss=0.375, simple_loss=0.3877, pruned_loss=0.1811, over 7725.00 frames. ], tot_loss[loss=0.411, simple_loss=0.4329, pruned_loss=0.1946, over 1610011.06 frames. ], batch size: 18, lr: 3.70e-02, grad_scale: 8.0 +2023-02-05 19:25:23,888 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7667.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:46,144 INFO [train.py:901] (1/4) Epoch 1, batch 7700, loss[loss=0.385, simple_loss=0.4094, pruned_loss=0.1803, over 7919.00 frames. ], tot_loss[loss=0.4108, simple_loss=0.4329, pruned_loss=0.1943, over 1609702.38 frames. ], batch size: 20, lr: 3.69e-02, grad_scale: 8.0 +2023-02-05 19:25:51,306 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.517e+02 4.083e+02 4.742e+02 6.161e+02 2.101e+03, threshold=9.483e+02, percent-clipped=6.0 +2023-02-05 19:26:07,597 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 19:26:21,658 INFO [train.py:901] (1/4) Epoch 1, batch 7750, loss[loss=0.5122, simple_loss=0.4974, pruned_loss=0.2635, over 7119.00 frames. ], tot_loss[loss=0.4083, simple_loss=0.4303, pruned_loss=0.1931, over 1599073.36 frames. ], batch size: 72, lr: 3.68e-02, grad_scale: 8.0 +2023-02-05 19:26:23,157 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7752.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:29,117 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:34,417 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7769.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:42,684 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7781.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:26:56,367 INFO [train.py:901] (1/4) Epoch 1, batch 7800, loss[loss=0.4081, simple_loss=0.4242, pruned_loss=0.196, over 8094.00 frames. ], tot_loss[loss=0.4078, simple_loss=0.4299, pruned_loss=0.1928, over 1599798.06 frames. ], batch size: 21, lr: 3.67e-02, grad_scale: 8.0 +2023-02-05 19:26:59,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7806.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:27:01,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.720e+02 4.585e+02 5.523e+02 1.290e+03, threshold=9.170e+02, percent-clipped=3.0 +2023-02-05 19:27:09,240 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:29,712 INFO [train.py:901] (1/4) Epoch 1, batch 7850, loss[loss=0.3439, simple_loss=0.376, pruned_loss=0.1558, over 7697.00 frames. ], tot_loss[loss=0.4065, simple_loss=0.4291, pruned_loss=0.192, over 1605266.31 frames. ], batch size: 18, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:27:37,629 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-05 19:27:52,021 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7884.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:59,883 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7896.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:03,027 INFO [train.py:901] (1/4) Epoch 1, batch 7900, loss[loss=0.4361, simple_loss=0.4462, pruned_loss=0.213, over 8471.00 frames. ], tot_loss[loss=0.4067, simple_loss=0.4298, pruned_loss=0.1918, over 1609370.71 frames. ], batch size: 29, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:28:03,947 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0572, 2.1249, 2.9519, 3.2619, 2.7622, 1.8750, 1.9132, 2.2097], + device='cuda:1'), covar=tensor([0.1203, 0.0801, 0.0224, 0.0204, 0.0387, 0.0499, 0.0473, 0.0703], + device='cuda:1'), in_proj_covar=tensor([0.0371, 0.0268, 0.0169, 0.0199, 0.0263, 0.0259, 0.0270, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:28:08,438 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.732e+02 4.923e+02 6.190e+02 1.863e+03, threshold=9.845e+02, percent-clipped=5.0 +2023-02-05 19:28:16,370 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:35,814 INFO [train.py:901] (1/4) Epoch 1, batch 7950, loss[loss=0.395, simple_loss=0.4176, pruned_loss=0.1862, over 8373.00 frames. ], tot_loss[loss=0.4074, simple_loss=0.4301, pruned_loss=0.1924, over 1609618.89 frames. ], batch size: 48, lr: 3.65e-02, grad_scale: 8.0 +2023-02-05 19:28:42,787 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-05 19:28:59,118 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7986.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:10,050 INFO [train.py:901] (1/4) Epoch 1, batch 8000, loss[loss=0.3893, simple_loss=0.4078, pruned_loss=0.1854, over 7712.00 frames. ], tot_loss[loss=0.4062, simple_loss=0.4289, pruned_loss=0.1917, over 1605448.90 frames. ], batch size: 18, lr: 3.64e-02, grad_scale: 8.0 +2023-02-05 19:29:15,095 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:15,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.650e+02 3.959e+02 4.934e+02 6.403e+02 1.426e+03, threshold=9.868e+02, percent-clipped=4.0 +2023-02-05 19:29:31,421 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8033.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:43,004 INFO [train.py:901] (1/4) Epoch 1, batch 8050, loss[loss=0.3727, simple_loss=0.3859, pruned_loss=0.1797, over 7201.00 frames. ], tot_loss[loss=0.4074, simple_loss=0.4288, pruned_loss=0.193, over 1592540.58 frames. ], batch size: 16, lr: 3.63e-02, grad_scale: 16.0 +2023-02-05 19:29:51,668 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0391, 1.3611, 1.5075, 0.3323, 1.3017, 1.1543, 0.1520, 1.5003], + device='cuda:1'), covar=tensor([0.0352, 0.0180, 0.0319, 0.0631, 0.0282, 0.0641, 0.0702, 0.0166], + device='cuda:1'), in_proj_covar=tensor([0.0162, 0.0119, 0.0102, 0.0169, 0.0118, 0.0187, 0.0170, 0.0132], + device='cuda:1'), out_proj_covar=tensor([1.1704e-04, 8.5295e-05, 8.1200e-05, 1.2578e-04, 9.4830e-05, 1.4771e-04, + 1.2942e-04, 9.7166e-05], device='cuda:1') +2023-02-05 19:30:17,053 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 19:30:20,856 INFO [train.py:901] (1/4) Epoch 2, batch 0, loss[loss=0.4707, simple_loss=0.464, pruned_loss=0.2387, over 8135.00 frames. ], tot_loss[loss=0.4707, simple_loss=0.464, pruned_loss=0.2387, over 8135.00 frames. ], batch size: 22, lr: 3.56e-02, grad_scale: 8.0 +2023-02-05 19:30:20,856 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 19:30:32,396 INFO [train.py:935] (1/4) Epoch 2, validation: loss=0.3107, simple_loss=0.3861, pruned_loss=0.1176, over 944034.00 frames. +2023-02-05 19:30:32,397 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6470MB +2023-02-05 19:30:36,091 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3882, 1.8338, 3.3712, 2.9442, 2.3144, 1.8939, 1.8951, 2.0937], + device='cuda:1'), covar=tensor([0.0806, 0.0772, 0.0110, 0.0208, 0.0392, 0.0425, 0.0501, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0378, 0.0275, 0.0175, 0.0200, 0.0269, 0.0262, 0.0282, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:30:44,057 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8101.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:46,380 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-05 19:30:46,617 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 19:30:46,677 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:49,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 3.846e+02 4.676e+02 6.027e+02 1.450e+03, threshold=9.352e+02, percent-clipped=5.0 +2023-02-05 19:31:06,745 INFO [train.py:901] (1/4) Epoch 2, batch 50, loss[loss=0.3337, simple_loss=0.38, pruned_loss=0.1437, over 7791.00 frames. ], tot_loss[loss=0.4019, simple_loss=0.4268, pruned_loss=0.1885, over 359871.00 frames. ], batch size: 19, lr: 3.55e-02, grad_scale: 8.0 +2023-02-05 19:31:11,120 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8140.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:20,804 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 19:31:28,339 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:29,157 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8165.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:41,593 INFO [train.py:901] (1/4) Epoch 2, batch 100, loss[loss=0.4516, simple_loss=0.4631, pruned_loss=0.22, over 7661.00 frames. ], tot_loss[loss=0.4127, simple_loss=0.4354, pruned_loss=0.195, over 640604.10 frames. ], batch size: 19, lr: 3.54e-02, grad_scale: 8.0 +2023-02-05 19:31:44,274 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 19:31:59,424 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.246e+02 4.943e+02 6.491e+02 9.375e+02, threshold=9.885e+02, percent-clipped=1.0 +2023-02-05 19:32:06,340 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:07,760 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0998, 2.4838, 4.6106, 1.1780, 3.0390, 2.3056, 1.7895, 2.5047], + device='cuda:1'), covar=tensor([0.0963, 0.1161, 0.0281, 0.1640, 0.0988, 0.1419, 0.0902, 0.1387], + device='cuda:1'), in_proj_covar=tensor([0.0324, 0.0316, 0.0312, 0.0356, 0.0403, 0.0372, 0.0324, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:32:15,455 INFO [train.py:901] (1/4) Epoch 2, batch 150, loss[loss=0.3442, simple_loss=0.384, pruned_loss=0.1523, over 8031.00 frames. ], tot_loss[loss=0.4053, simple_loss=0.4303, pruned_loss=0.1901, over 858424.97 frames. ], batch size: 22, lr: 3.53e-02, grad_scale: 8.0 +2023-02-05 19:32:47,791 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,388 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8283.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,923 INFO [train.py:901] (1/4) Epoch 2, batch 200, loss[loss=0.4083, simple_loss=0.4282, pruned_loss=0.1942, over 7976.00 frames. ], tot_loss[loss=0.405, simple_loss=0.4302, pruned_loss=0.1899, over 1028462.56 frames. ], batch size: 21, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:32:53,835 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-05 19:33:08,598 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.581e+02 3.727e+02 4.975e+02 6.903e+02 1.681e+03, threshold=9.950e+02, percent-clipped=7.0 +2023-02-05 19:33:24,848 INFO [train.py:901] (1/4) Epoch 2, batch 250, loss[loss=0.4791, simple_loss=0.4891, pruned_loss=0.2346, over 8450.00 frames. ], tot_loss[loss=0.405, simple_loss=0.43, pruned_loss=0.19, over 1157959.05 frames. ], batch size: 27, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:33:27,774 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9244, 1.5722, 1.3958, 1.4134, 1.7887, 1.5096, 1.4375, 1.7570], + device='cuda:1'), covar=tensor([0.1255, 0.1939, 0.2697, 0.2145, 0.1106, 0.1904, 0.1485, 0.1188], + device='cuda:1'), in_proj_covar=tensor([0.0259, 0.0281, 0.0299, 0.0272, 0.0260, 0.0252, 0.0257, 0.0246], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:33:36,310 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 19:33:40,681 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:46,006 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 19:33:58,447 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:59,607 INFO [train.py:901] (1/4) Epoch 2, batch 300, loss[loss=0.4504, simple_loss=0.4706, pruned_loss=0.2151, over 8462.00 frames. ], tot_loss[loss=0.4029, simple_loss=0.4282, pruned_loss=0.1888, over 1258159.82 frames. ], batch size: 27, lr: 3.51e-02, grad_scale: 8.0 +2023-02-05 19:34:18,663 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 4.043e+02 4.737e+02 5.583e+02 9.957e+02, threshold=9.474e+02, percent-clipped=1.0 +2023-02-05 19:34:29,133 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=2.02 vs. limit=2.0 +2023-02-05 19:34:35,499 INFO [train.py:901] (1/4) Epoch 2, batch 350, loss[loss=0.3569, simple_loss=0.3946, pruned_loss=0.1596, over 8099.00 frames. ], tot_loss[loss=0.403, simple_loss=0.4282, pruned_loss=0.1889, over 1338979.22 frames. ], batch size: 23, lr: 3.50e-02, grad_scale: 8.0 +2023-02-05 19:35:00,611 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 19:35:03,558 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:07,681 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.8351, 1.1315, 0.9131, 0.7851, 0.7160, 1.0305, 0.0144, 0.6527], + device='cuda:1'), covar=tensor([0.0588, 0.0454, 0.0366, 0.0496, 0.0562, 0.0342, 0.1518, 0.0819], + device='cuda:1'), in_proj_covar=tensor([0.0140, 0.0118, 0.0101, 0.0148, 0.0119, 0.0091, 0.0167, 0.0143], + device='cuda:1'), out_proj_covar=tensor([1.1985e-04, 1.1085e-04, 9.0684e-05, 1.2712e-04, 1.1273e-04, 8.3598e-05, + 1.4563e-04, 1.2831e-04], device='cuda:1') +2023-02-05 19:35:09,448 INFO [train.py:901] (1/4) Epoch 2, batch 400, loss[loss=0.3996, simple_loss=0.4211, pruned_loss=0.189, over 8125.00 frames. ], tot_loss[loss=0.4021, simple_loss=0.4275, pruned_loss=0.1883, over 1401681.04 frames. ], batch size: 22, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:20,907 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:27,437 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.339e+02 4.887e+02 6.099e+02 1.134e+03, threshold=9.773e+02, percent-clipped=6.0 +2023-02-05 19:35:43,490 INFO [train.py:901] (1/4) Epoch 2, batch 450, loss[loss=0.4188, simple_loss=0.4422, pruned_loss=0.1977, over 8334.00 frames. ], tot_loss[loss=0.4043, simple_loss=0.4292, pruned_loss=0.1897, over 1447416.92 frames. ], batch size: 26, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:44,346 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:01,885 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:18,004 INFO [train.py:901] (1/4) Epoch 2, batch 500, loss[loss=0.375, simple_loss=0.4124, pruned_loss=0.1688, over 7972.00 frames. ], tot_loss[loss=0.4001, simple_loss=0.4263, pruned_loss=0.187, over 1485513.68 frames. ], batch size: 21, lr: 3.48e-02, grad_scale: 8.0 +2023-02-05 19:36:36,156 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.361e+02 3.910e+02 4.803e+02 5.619e+02 9.699e+02, threshold=9.605e+02, percent-clipped=0.0 +2023-02-05 19:36:47,558 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8627.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:52,677 INFO [train.py:901] (1/4) Epoch 2, batch 550, loss[loss=0.4441, simple_loss=0.4553, pruned_loss=0.2165, over 8287.00 frames. ], tot_loss[loss=0.3993, simple_loss=0.4252, pruned_loss=0.1867, over 1510781.78 frames. ], batch size: 23, lr: 3.47e-02, grad_scale: 8.0 +2023-02-05 19:37:26,531 INFO [train.py:901] (1/4) Epoch 2, batch 600, loss[loss=0.4042, simple_loss=0.4015, pruned_loss=0.2034, over 7439.00 frames. ], tot_loss[loss=0.3988, simple_loss=0.4249, pruned_loss=0.1864, over 1535830.68 frames. ], batch size: 17, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:37:29,013 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 19:37:32,131 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5103, 1.6727, 1.6688, 2.3604, 0.9514, 1.1686, 1.5089, 1.7280], + device='cuda:1'), covar=tensor([0.1148, 0.1385, 0.1158, 0.0374, 0.1972, 0.1995, 0.1733, 0.1173], + device='cuda:1'), in_proj_covar=tensor([0.0302, 0.0326, 0.0315, 0.0208, 0.0341, 0.0351, 0.0385, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:37:43,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.752e+02 3.934e+02 5.073e+02 6.758e+02 1.500e+03, threshold=1.015e+03, percent-clipped=5.0 +2023-02-05 19:37:44,753 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 19:37:59,728 INFO [train.py:901] (1/4) Epoch 2, batch 650, loss[loss=0.4536, simple_loss=0.4735, pruned_loss=0.2168, over 8203.00 frames. ], tot_loss[loss=0.3998, simple_loss=0.4257, pruned_loss=0.187, over 1551972.71 frames. ], batch size: 23, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:38:05,386 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:31,187 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:31,223 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3800, 1.3703, 1.3335, 1.1313, 1.5391, 1.2475, 1.0795, 1.4982], + device='cuda:1'), covar=tensor([0.1289, 0.1994, 0.2307, 0.2283, 0.0932, 0.1962, 0.1413, 0.1009], + device='cuda:1'), in_proj_covar=tensor([0.0260, 0.0278, 0.0292, 0.0278, 0.0258, 0.0255, 0.0258, 0.0245], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:38:35,550 INFO [train.py:901] (1/4) Epoch 2, batch 700, loss[loss=0.3339, simple_loss=0.3738, pruned_loss=0.147, over 8054.00 frames. ], tot_loss[loss=0.3991, simple_loss=0.4251, pruned_loss=0.1866, over 1569840.40 frames. ], batch size: 20, lr: 3.45e-02, grad_scale: 8.0 +2023-02-05 19:38:53,112 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.421e+02 3.759e+02 4.676e+02 6.060e+02 1.461e+03, threshold=9.352e+02, percent-clipped=1.0 +2023-02-05 19:39:09,181 INFO [train.py:901] (1/4) Epoch 2, batch 750, loss[loss=0.4373, simple_loss=0.4575, pruned_loss=0.2086, over 8465.00 frames. ], tot_loss[loss=0.3976, simple_loss=0.4242, pruned_loss=0.1855, over 1579481.82 frames. ], batch size: 27, lr: 3.44e-02, grad_scale: 8.0 +2023-02-05 19:39:23,216 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4328, 2.1749, 2.3622, 0.6779, 2.4197, 1.6001, 1.0912, 1.5649], + device='cuda:1'), covar=tensor([0.0419, 0.0162, 0.0232, 0.0699, 0.0256, 0.0528, 0.0717, 0.0345], + device='cuda:1'), in_proj_covar=tensor([0.0167, 0.0126, 0.0107, 0.0174, 0.0119, 0.0200, 0.0180, 0.0145], + device='cuda:1'), out_proj_covar=tensor([1.1862e-04, 8.9553e-05, 8.2196e-05, 1.2736e-04, 9.1641e-05, 1.5391e-04, + 1.3355e-04, 1.0755e-04], device='cuda:1') +2023-02-05 19:39:26,415 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 19:39:35,548 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 19:39:44,342 INFO [train.py:901] (1/4) Epoch 2, batch 800, loss[loss=0.4073, simple_loss=0.4445, pruned_loss=0.1851, over 8334.00 frames. ], tot_loss[loss=0.4016, simple_loss=0.4268, pruned_loss=0.1882, over 1589496.35 frames. ], batch size: 26, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:02,285 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 4.043e+02 5.225e+02 6.708e+02 1.302e+03, threshold=1.045e+03, percent-clipped=9.0 +2023-02-05 19:40:18,497 INFO [train.py:901] (1/4) Epoch 2, batch 850, loss[loss=0.4011, simple_loss=0.4144, pruned_loss=0.1939, over 7710.00 frames. ], tot_loss[loss=0.3975, simple_loss=0.4241, pruned_loss=0.1854, over 1596321.47 frames. ], batch size: 18, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:26,055 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8945.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:40:52,653 INFO [train.py:901] (1/4) Epoch 2, batch 900, loss[loss=0.4227, simple_loss=0.4601, pruned_loss=0.1927, over 8723.00 frames. ], tot_loss[loss=0.3956, simple_loss=0.4233, pruned_loss=0.1839, over 1604426.31 frames. ], batch size: 30, lr: 3.42e-02, grad_scale: 8.0 +2023-02-05 19:41:03,946 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8998.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:08,138 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9004.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:12,012 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.317e+02 3.660e+02 4.402e+02 6.333e+02 1.420e+03, threshold=8.805e+02, percent-clipped=4.0 +2023-02-05 19:41:21,856 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9023.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:27,232 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9031.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:29,111 INFO [train.py:901] (1/4) Epoch 2, batch 950, loss[loss=0.4026, simple_loss=0.4033, pruned_loss=0.201, over 7927.00 frames. ], tot_loss[loss=0.3954, simple_loss=0.4231, pruned_loss=0.1838, over 1605895.35 frames. ], batch size: 20, lr: 3.41e-02, grad_scale: 8.0 +2023-02-05 19:41:57,087 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 19:42:04,019 INFO [train.py:901] (1/4) Epoch 2, batch 1000, loss[loss=0.3815, simple_loss=0.4066, pruned_loss=0.1782, over 8547.00 frames. ], tot_loss[loss=0.3936, simple_loss=0.4219, pruned_loss=0.1826, over 1609324.99 frames. ], batch size: 31, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:21,659 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 19:42:22,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.505e+02 3.676e+02 4.681e+02 5.718e+02 9.745e+02, threshold=9.362e+02, percent-clipped=2.0 +2023-02-05 19:42:30,650 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9122.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:42:31,271 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 19:42:39,155 INFO [train.py:901] (1/4) Epoch 2, batch 1050, loss[loss=0.4249, simple_loss=0.4431, pruned_loss=0.2033, over 8465.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.4213, pruned_loss=0.1822, over 1610697.99 frames. ], batch size: 25, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:39,685 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 19:42:43,224 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 19:43:12,162 INFO [train.py:901] (1/4) Epoch 2, batch 1100, loss[loss=0.368, simple_loss=0.3942, pruned_loss=0.1709, over 7276.00 frames. ], tot_loss[loss=0.3925, simple_loss=0.4207, pruned_loss=0.1822, over 1607257.45 frames. ], batch size: 16, lr: 3.39e-02, grad_scale: 8.0 +2023-02-05 19:43:30,057 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.895e+02 4.986e+02 6.293e+02 1.172e+03, threshold=9.973e+02, percent-clipped=2.0 +2023-02-05 19:43:47,492 INFO [train.py:901] (1/4) Epoch 2, batch 1150, loss[loss=0.4165, simple_loss=0.4539, pruned_loss=0.1895, over 8189.00 frames. ], tot_loss[loss=0.3935, simple_loss=0.4211, pruned_loss=0.1829, over 1610358.05 frames. ], batch size: 23, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:43:49,760 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9237.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:43:50,987 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 19:44:08,368 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.08 vs. limit=2.0 +2023-02-05 19:44:22,137 INFO [train.py:901] (1/4) Epoch 2, batch 1200, loss[loss=0.4109, simple_loss=0.4377, pruned_loss=0.192, over 8591.00 frames. ], tot_loss[loss=0.3924, simple_loss=0.421, pruned_loss=0.1819, over 1614361.03 frames. ], batch size: 39, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:44:25,541 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9289.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:44:41,016 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.810e+02 4.160e+02 4.885e+02 6.720e+02 4.965e+03, threshold=9.769e+02, percent-clipped=5.0 +2023-02-05 19:44:41,470 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 19:44:56,724 INFO [train.py:901] (1/4) Epoch 2, batch 1250, loss[loss=0.4003, simple_loss=0.4367, pruned_loss=0.182, over 8242.00 frames. ], tot_loss[loss=0.3933, simple_loss=0.4221, pruned_loss=0.1823, over 1618951.73 frames. ], batch size: 24, lr: 3.37e-02, grad_scale: 4.0 +2023-02-05 19:45:02,246 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2887, 1.5177, 2.4095, 1.0117, 1.7094, 1.4698, 1.3779, 1.5512], + device='cuda:1'), covar=tensor([0.1075, 0.1057, 0.0351, 0.1568, 0.0919, 0.1495, 0.0970, 0.0986], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0329, 0.0349, 0.0373, 0.0426, 0.0395, 0.0339, 0.0413], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 19:45:07,485 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9348.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:17,413 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3418, 1.0956, 2.9560, 1.1115, 1.8708, 3.3369, 3.1569, 2.8928], + device='cuda:1'), covar=tensor([0.1795, 0.2184, 0.0545, 0.2728, 0.1069, 0.0380, 0.0407, 0.0782], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0255, 0.0177, 0.0251, 0.0185, 0.0138, 0.0137, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-05 19:45:25,821 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9375.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:27,212 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0701, 3.1914, 2.7951, 1.4825, 2.5670, 2.6918, 2.8057, 2.4230], + device='cuda:1'), covar=tensor([0.1154, 0.0811, 0.1059, 0.3605, 0.0758, 0.0660, 0.1432, 0.0660], + device='cuda:1'), in_proj_covar=tensor([0.0291, 0.0198, 0.0239, 0.0309, 0.0201, 0.0151, 0.0218, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 19:45:31,819 INFO [train.py:901] (1/4) Epoch 2, batch 1300, loss[loss=0.3774, simple_loss=0.414, pruned_loss=0.1704, over 8347.00 frames. ], tot_loss[loss=0.3926, simple_loss=0.4216, pruned_loss=0.1818, over 1618919.74 frames. ], batch size: 24, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:45:45,162 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0014, 1.8622, 3.1822, 2.8239, 2.5282, 1.8380, 1.5262, 1.9701], + device='cuda:1'), covar=tensor([0.1108, 0.0991, 0.0180, 0.0276, 0.0407, 0.0467, 0.0607, 0.0761], + device='cuda:1'), in_proj_covar=tensor([0.0430, 0.0337, 0.0230, 0.0268, 0.0355, 0.0309, 0.0335, 0.0383], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:45:45,735 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9404.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:50,305 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.161e+02 4.162e+02 5.656e+02 7.688e+02 2.529e+03, threshold=1.131e+03, percent-clipped=11.0 +2023-02-05 19:45:57,157 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7858, 6.1501, 5.2296, 1.7613, 4.6430, 5.2377, 5.5275, 4.6403], + device='cuda:1'), covar=tensor([0.0842, 0.0295, 0.0589, 0.4589, 0.0423, 0.0490, 0.0953, 0.0509], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0201, 0.0245, 0.0316, 0.0201, 0.0156, 0.0218, 0.0152], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 19:46:05,038 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:06,286 INFO [train.py:901] (1/4) Epoch 2, batch 1350, loss[loss=0.3793, simple_loss=0.417, pruned_loss=0.1708, over 8511.00 frames. ], tot_loss[loss=0.3918, simple_loss=0.4207, pruned_loss=0.1815, over 1616361.25 frames. ], batch size: 26, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:46:27,274 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:27,429 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-05 19:46:41,365 INFO [train.py:901] (1/4) Epoch 2, batch 1400, loss[loss=0.4181, simple_loss=0.4548, pruned_loss=0.1907, over 8390.00 frames. ], tot_loss[loss=0.3922, simple_loss=0.4207, pruned_loss=0.1818, over 1616591.56 frames. ], batch size: 49, lr: 3.35e-02, grad_scale: 4.0 +2023-02-05 19:46:45,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9490.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:47,583 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9493.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:56,270 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8411, 0.9313, 4.1050, 1.7771, 2.0587, 4.8963, 4.2167, 4.4501], + device='cuda:1'), covar=tensor([0.1615, 0.2334, 0.0277, 0.2044, 0.1039, 0.0183, 0.0237, 0.0346], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0266, 0.0180, 0.0254, 0.0193, 0.0144, 0.0142, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-05 19:46:59,487 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.192e+02 3.889e+02 4.981e+02 6.326e+02 1.555e+03, threshold=9.962e+02, percent-clipped=1.0 +2023-02-05 19:47:04,250 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9518.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:47:15,072 INFO [train.py:901] (1/4) Epoch 2, batch 1450, loss[loss=0.4216, simple_loss=0.4394, pruned_loss=0.2019, over 8337.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.421, pruned_loss=0.1824, over 1617799.11 frames. ], batch size: 26, lr: 3.34e-02, grad_scale: 4.0 +2023-02-05 19:47:19,043 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 19:47:49,259 INFO [train.py:901] (1/4) Epoch 2, batch 1500, loss[loss=0.4998, simple_loss=0.4998, pruned_loss=0.2499, over 8610.00 frames. ], tot_loss[loss=0.3909, simple_loss=0.4196, pruned_loss=0.1811, over 1618494.21 frames. ], batch size: 31, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:01,350 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:07,902 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.496e+02 4.006e+02 4.905e+02 6.157e+02 1.300e+03, threshold=9.811e+02, percent-clipped=3.0 +2023-02-05 19:48:23,391 INFO [train.py:901] (1/4) Epoch 2, batch 1550, loss[loss=0.372, simple_loss=0.4131, pruned_loss=0.1654, over 8447.00 frames. ], tot_loss[loss=0.3903, simple_loss=0.4193, pruned_loss=0.1806, over 1617952.08 frames. ], batch size: 27, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:41,695 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9660.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:52,347 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9676.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:57,573 INFO [train.py:901] (1/4) Epoch 2, batch 1600, loss[loss=0.3975, simple_loss=0.4261, pruned_loss=0.1845, over 8664.00 frames. ], tot_loss[loss=0.39, simple_loss=0.4189, pruned_loss=0.1806, over 1617414.17 frames. ], batch size: 39, lr: 3.32e-02, grad_scale: 8.0 +2023-02-05 19:48:58,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9685.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:17,081 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.192e+02 5.177e+02 6.492e+02 1.266e+03, threshold=1.035e+03, percent-clipped=2.0 +2023-02-05 19:49:22,896 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9719.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:33,628 INFO [train.py:901] (1/4) Epoch 2, batch 1650, loss[loss=0.4183, simple_loss=0.4457, pruned_loss=0.1955, over 8470.00 frames. ], tot_loss[loss=0.391, simple_loss=0.4193, pruned_loss=0.1813, over 1619802.02 frames. ], batch size: 29, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:49:40,258 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9744.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:41,550 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9746.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:51,527 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:58,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:02,238 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:07,341 INFO [train.py:901] (1/4) Epoch 2, batch 1700, loss[loss=0.3455, simple_loss=0.3913, pruned_loss=0.1498, over 7978.00 frames. ], tot_loss[loss=0.3912, simple_loss=0.4194, pruned_loss=0.1815, over 1621133.19 frames. ], batch size: 21, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:50:26,240 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.640e+02 4.068e+02 5.098e+02 6.535e+02 1.207e+03, threshold=1.020e+03, percent-clipped=5.0 +2023-02-05 19:50:42,247 INFO [train.py:901] (1/4) Epoch 2, batch 1750, loss[loss=0.3973, simple_loss=0.4266, pruned_loss=0.184, over 8245.00 frames. ], tot_loss[loss=0.3894, simple_loss=0.4183, pruned_loss=0.1802, over 1619032.98 frames. ], batch size: 22, lr: 3.30e-02, grad_scale: 8.0 +2023-02-05 19:51:16,321 INFO [train.py:901] (1/4) Epoch 2, batch 1800, loss[loss=0.3697, simple_loss=0.402, pruned_loss=0.1686, over 8025.00 frames. ], tot_loss[loss=0.3905, simple_loss=0.4191, pruned_loss=0.1809, over 1618907.09 frames. ], batch size: 22, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:21,271 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:51:34,076 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.365e+02 4.111e+02 5.198e+02 6.626e+02 1.120e+03, threshold=1.040e+03, percent-clipped=3.0 +2023-02-05 19:51:49,949 INFO [train.py:901] (1/4) Epoch 2, batch 1850, loss[loss=0.3244, simple_loss=0.373, pruned_loss=0.1379, over 7419.00 frames. ], tot_loss[loss=0.3908, simple_loss=0.4195, pruned_loss=0.1811, over 1618957.19 frames. ], batch size: 17, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:51,600 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.92 vs. limit=2.0 +2023-02-05 19:51:58,651 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9946.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:11,388 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3374, 1.7804, 1.4396, 1.1696, 2.1074, 1.6052, 1.8790, 2.2281], + device='cuda:1'), covar=tensor([0.1141, 0.1926, 0.2529, 0.2158, 0.0976, 0.2182, 0.1274, 0.0861], + device='cuda:1'), in_proj_covar=tensor([0.0249, 0.0269, 0.0290, 0.0260, 0.0240, 0.0252, 0.0235, 0.0229], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:52:24,354 INFO [train.py:901] (1/4) Epoch 2, batch 1900, loss[loss=0.3323, simple_loss=0.3846, pruned_loss=0.14, over 8357.00 frames. ], tot_loss[loss=0.3879, simple_loss=0.4175, pruned_loss=0.1791, over 1615913.55 frames. ], batch size: 24, lr: 3.28e-02, grad_scale: 8.0 +2023-02-05 19:52:43,808 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.513e+02 4.327e+02 5.785e+02 1.080e+03, threshold=8.653e+02, percent-clipped=1.0 +2023-02-05 19:52:49,914 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:54,613 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 19:52:55,762 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.68 vs. limit=5.0 +2023-02-05 19:52:59,200 INFO [train.py:901] (1/4) Epoch 2, batch 1950, loss[loss=0.4162, simple_loss=0.4464, pruned_loss=0.193, over 8517.00 frames. ], tot_loss[loss=0.3865, simple_loss=0.4172, pruned_loss=0.1779, over 1616026.25 frames. ], batch size: 28, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:06,846 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 19:53:15,909 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10057.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:19,402 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:25,604 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 19:53:33,050 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10080.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:53:35,465 INFO [train.py:901] (1/4) Epoch 2, batch 2000, loss[loss=0.4084, simple_loss=0.4202, pruned_loss=0.1983, over 7930.00 frames. ], tot_loss[loss=0.3862, simple_loss=0.4169, pruned_loss=0.1778, over 1616185.74 frames. ], batch size: 20, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:50,417 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:55,728 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.648e+02 4.167e+02 5.413e+02 6.926e+02 6.671e+03, threshold=1.083e+03, percent-clipped=14.0 +2023-02-05 19:54:09,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3152, 1.6495, 1.2195, 1.0694, 2.0348, 1.3153, 1.3997, 2.0868], + device='cuda:1'), covar=tensor([0.1060, 0.1879, 0.2671, 0.2380, 0.1045, 0.2179, 0.1396, 0.0851], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0263, 0.0281, 0.0260, 0.0236, 0.0246, 0.0230, 0.0224], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:54:10,560 INFO [train.py:901] (1/4) Epoch 2, batch 2050, loss[loss=0.3689, simple_loss=0.3997, pruned_loss=0.1691, over 7521.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.4162, pruned_loss=0.1773, over 1617248.95 frames. ], batch size: 18, lr: 3.26e-02, grad_scale: 4.0 +2023-02-05 19:54:11,460 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10135.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:19,453 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:36,896 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:45,482 INFO [train.py:901] (1/4) Epoch 2, batch 2100, loss[loss=0.3311, simple_loss=0.3852, pruned_loss=0.1385, over 8343.00 frames. ], tot_loss[loss=0.384, simple_loss=0.4148, pruned_loss=0.1766, over 1613909.58 frames. ], batch size: 25, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:06,155 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.788e+02 4.646e+02 5.840e+02 1.328e+03, threshold=9.292e+02, percent-clipped=3.0 +2023-02-05 19:55:09,447 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 19:55:11,269 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:55:20,252 INFO [train.py:901] (1/4) Epoch 2, batch 2150, loss[loss=0.3521, simple_loss=0.3903, pruned_loss=0.157, over 8103.00 frames. ], tot_loss[loss=0.3866, simple_loss=0.4169, pruned_loss=0.1781, over 1615094.52 frames. ], batch size: 23, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:53,988 INFO [train.py:901] (1/4) Epoch 2, batch 2200, loss[loss=0.4029, simple_loss=0.4344, pruned_loss=0.1857, over 8255.00 frames. ], tot_loss[loss=0.3864, simple_loss=0.4166, pruned_loss=0.1781, over 1614106.32 frames. ], batch size: 24, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:13,538 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1197, 2.1497, 3.8391, 4.0579, 3.1251, 1.6851, 1.1982, 1.9442], + device='cuda:1'), covar=tensor([0.1607, 0.1352, 0.0181, 0.0284, 0.0463, 0.0918, 0.1478, 0.1230], + device='cuda:1'), in_proj_covar=tensor([0.0445, 0.0351, 0.0252, 0.0291, 0.0373, 0.0328, 0.0347, 0.0397], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 19:56:14,586 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.803e+02 4.971e+02 6.310e+02 1.458e+03, threshold=9.942e+02, percent-clipped=6.0 +2023-02-05 19:56:18,166 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10317.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:29,273 INFO [train.py:901] (1/4) Epoch 2, batch 2250, loss[loss=0.3922, simple_loss=0.4307, pruned_loss=0.1769, over 8515.00 frames. ], tot_loss[loss=0.3868, simple_loss=0.4169, pruned_loss=0.1783, over 1615999.13 frames. ], batch size: 28, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:34,611 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10342.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:37,306 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6564, 1.0548, 3.1759, 1.1651, 1.9088, 3.8937, 3.4563, 3.3619], + device='cuda:1'), covar=tensor([0.1521, 0.1880, 0.0384, 0.2120, 0.0906, 0.0172, 0.0219, 0.0410], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0261, 0.0175, 0.0248, 0.0191, 0.0149, 0.0136, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-05 19:57:03,222 INFO [train.py:901] (1/4) Epoch 2, batch 2300, loss[loss=0.3645, simple_loss=0.4035, pruned_loss=0.1627, over 8508.00 frames. ], tot_loss[loss=0.3869, simple_loss=0.4174, pruned_loss=0.1782, over 1617265.54 frames. ], batch size: 49, lr: 3.23e-02, grad_scale: 4.0 +2023-02-05 19:57:08,303 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10391.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:15,021 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10401.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:23,819 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.389e+02 3.989e+02 5.161e+02 7.086e+02 1.471e+03, threshold=1.032e+03, percent-clipped=7.0 +2023-02-05 19:57:25,925 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10416.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:31,794 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10424.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:57:33,875 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:39,145 INFO [train.py:901] (1/4) Epoch 2, batch 2350, loss[loss=0.3223, simple_loss=0.3792, pruned_loss=0.1327, over 7787.00 frames. ], tot_loss[loss=0.3859, simple_loss=0.4169, pruned_loss=0.1774, over 1619616.40 frames. ], batch size: 19, lr: 3.22e-02, grad_scale: 4.0 +2023-02-05 19:58:05,152 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:07,803 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:12,898 INFO [train.py:901] (1/4) Epoch 2, batch 2400, loss[loss=0.4567, simple_loss=0.4688, pruned_loss=0.2223, over 8465.00 frames. ], tot_loss[loss=0.384, simple_loss=0.4154, pruned_loss=0.1763, over 1615023.31 frames. ], batch size: 25, lr: 3.22e-02, grad_scale: 8.0 +2023-02-05 19:58:24,664 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:32,501 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.364e+02 3.956e+02 5.047e+02 6.263e+02 1.564e+03, threshold=1.009e+03, percent-clipped=2.0 +2023-02-05 19:58:34,737 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10516.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:36,282 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-05 19:58:47,320 INFO [train.py:901] (1/4) Epoch 2, batch 2450, loss[loss=0.3462, simple_loss=0.396, pruned_loss=0.1482, over 8498.00 frames. ], tot_loss[loss=0.385, simple_loss=0.4155, pruned_loss=0.1772, over 1610869.61 frames. ], batch size: 26, lr: 3.21e-02, grad_scale: 8.0 +2023-02-05 19:58:50,861 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10539.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:59:22,252 INFO [train.py:901] (1/4) Epoch 2, batch 2500, loss[loss=0.36, simple_loss=0.4057, pruned_loss=0.1571, over 8468.00 frames. ], tot_loss[loss=0.3834, simple_loss=0.4143, pruned_loss=0.1762, over 1610464.58 frames. ], batch size: 29, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 19:59:42,162 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.520e+02 3.522e+02 4.438e+02 6.473e+02 1.354e+03, threshold=8.876e+02, percent-clipped=4.0 +2023-02-05 19:59:53,425 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5997, 1.8099, 1.4814, 1.1078, 2.1371, 1.5928, 1.8902, 2.0854], + device='cuda:1'), covar=tensor([0.1051, 0.1740, 0.2333, 0.2241, 0.0880, 0.1871, 0.1229, 0.0897], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0263, 0.0290, 0.0256, 0.0235, 0.0250, 0.0229, 0.0225], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 19:59:55,950 INFO [train.py:901] (1/4) Epoch 2, batch 2550, loss[loss=0.384, simple_loss=0.4225, pruned_loss=0.1728, over 8561.00 frames. ], tot_loss[loss=0.382, simple_loss=0.4132, pruned_loss=0.1754, over 1613899.48 frames. ], batch size: 31, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 19:59:58,132 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1268, 0.8819, 3.1557, 0.8946, 2.6406, 2.6291, 2.7390, 2.7955], + device='cuda:1'), covar=tensor([0.0340, 0.2829, 0.0396, 0.1538, 0.1118, 0.0470, 0.0371, 0.0439], + device='cuda:1'), in_proj_covar=tensor([0.0179, 0.0357, 0.0213, 0.0242, 0.0290, 0.0229, 0.0213, 0.0246], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:00:19,397 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3119, 2.5962, 4.0163, 4.2184, 2.8761, 2.5072, 1.9614, 2.2593], + device='cuda:1'), covar=tensor([0.0720, 0.0932, 0.0180, 0.0199, 0.0434, 0.0344, 0.0476, 0.0865], + device='cuda:1'), in_proj_covar=tensor([0.0467, 0.0370, 0.0256, 0.0298, 0.0377, 0.0335, 0.0358, 0.0403], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:00:31,356 INFO [train.py:901] (1/4) Epoch 2, batch 2600, loss[loss=0.3594, simple_loss=0.3992, pruned_loss=0.1598, over 8472.00 frames. ], tot_loss[loss=0.381, simple_loss=0.4127, pruned_loss=0.1746, over 1619873.40 frames. ], batch size: 25, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:00:50,499 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 4.188e+02 4.914e+02 6.333e+02 1.432e+03, threshold=9.828e+02, percent-clipped=6.0 +2023-02-05 20:01:05,101 INFO [train.py:901] (1/4) Epoch 2, batch 2650, loss[loss=0.3412, simple_loss=0.3835, pruned_loss=0.1494, over 8033.00 frames. ], tot_loss[loss=0.3821, simple_loss=0.4139, pruned_loss=0.1751, over 1624235.76 frames. ], batch size: 22, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:01:19,079 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-05 20:01:24,207 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10762.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:30,878 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:31,713 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10772.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:40,307 INFO [train.py:901] (1/4) Epoch 2, batch 2700, loss[loss=0.3782, simple_loss=0.4088, pruned_loss=0.1738, over 8617.00 frames. ], tot_loss[loss=0.384, simple_loss=0.4146, pruned_loss=0.1767, over 1619989.17 frames. ], batch size: 34, lr: 3.18e-02, grad_scale: 8.0 +2023-02-05 20:01:46,658 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10792.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:48,801 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10795.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:01:50,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10797.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:01,031 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.290e+02 4.005e+02 5.458e+02 7.000e+02 2.619e+03, threshold=1.092e+03, percent-clipped=7.0 +2023-02-05 20:02:03,279 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10816.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:06,059 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10820.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:02:15,182 INFO [train.py:901] (1/4) Epoch 2, batch 2750, loss[loss=0.3701, simple_loss=0.372, pruned_loss=0.1841, over 7429.00 frames. ], tot_loss[loss=0.3828, simple_loss=0.4142, pruned_loss=0.1757, over 1620489.35 frames. ], batch size: 17, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:49,765 INFO [train.py:901] (1/4) Epoch 2, batch 2800, loss[loss=0.3661, simple_loss=0.3929, pruned_loss=0.1697, over 7662.00 frames. ], tot_loss[loss=0.382, simple_loss=0.4129, pruned_loss=0.1755, over 1615661.67 frames. ], batch size: 19, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:49,989 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4389, 1.5377, 2.8405, 0.9955, 1.9164, 1.8998, 1.2848, 1.7027], + device='cuda:1'), covar=tensor([0.1276, 0.1401, 0.0350, 0.1960, 0.0954, 0.1433, 0.1372, 0.1146], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0341, 0.0365, 0.0392, 0.0440, 0.0409, 0.0358, 0.0434], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:02:51,262 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10886.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:03,334 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10903.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:03:10,630 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.367e+02 3.535e+02 4.531e+02 6.001e+02 1.335e+03, threshold=9.062e+02, percent-clipped=2.0 +2023-02-05 20:03:23,141 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:25,025 INFO [train.py:901] (1/4) Epoch 2, batch 2850, loss[loss=0.318, simple_loss=0.3733, pruned_loss=0.1314, over 7660.00 frames. ], tot_loss[loss=0.3815, simple_loss=0.4127, pruned_loss=0.1752, over 1611487.46 frames. ], batch size: 19, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:03:59,104 INFO [train.py:901] (1/4) Epoch 2, batch 2900, loss[loss=0.3203, simple_loss=0.3821, pruned_loss=0.1292, over 8024.00 frames. ], tot_loss[loss=0.3826, simple_loss=0.4138, pruned_loss=0.1757, over 1611721.56 frames. ], batch size: 22, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:04:19,453 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.551e+02 4.216e+02 5.196e+02 6.845e+02 2.226e+03, threshold=1.039e+03, percent-clipped=10.0 +2023-02-05 20:04:34,447 INFO [train.py:901] (1/4) Epoch 2, batch 2950, loss[loss=0.482, simple_loss=0.4776, pruned_loss=0.2432, over 7642.00 frames. ], tot_loss[loss=0.3813, simple_loss=0.4126, pruned_loss=0.1751, over 1608074.97 frames. ], batch size: 19, lr: 3.15e-02, grad_scale: 8.0 +2023-02-05 20:04:39,272 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 20:05:08,646 INFO [train.py:901] (1/4) Epoch 2, batch 3000, loss[loss=0.3714, simple_loss=0.4199, pruned_loss=0.1614, over 8253.00 frames. ], tot_loss[loss=0.3815, simple_loss=0.413, pruned_loss=0.175, over 1611790.10 frames. ], batch size: 24, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:05:08,646 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 20:05:24,854 INFO [train.py:935] (1/4) Epoch 2, validation: loss=0.2878, simple_loss=0.369, pruned_loss=0.1033, over 944034.00 frames. +2023-02-05 20:05:24,855 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6470MB +2023-02-05 20:05:40,476 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:05:45,161 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.542e+02 3.795e+02 4.955e+02 6.193e+02 1.384e+03, threshold=9.910e+02, percent-clipped=4.0 +2023-02-05 20:06:00,081 INFO [train.py:901] (1/4) Epoch 2, batch 3050, loss[loss=0.3554, simple_loss=0.4032, pruned_loss=0.1538, over 8193.00 frames. ], tot_loss[loss=0.3807, simple_loss=0.4129, pruned_loss=0.1743, over 1613056.93 frames. ], batch size: 23, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:06:01,581 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:03,723 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6771, 1.5525, 1.2970, 1.1072, 1.5682, 1.4027, 1.4633, 1.5728], + device='cuda:1'), covar=tensor([0.1215, 0.1633, 0.2496, 0.2207, 0.0964, 0.2031, 0.1240, 0.1000], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0261, 0.0284, 0.0257, 0.0231, 0.0248, 0.0227, 0.0223], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 20:06:05,912 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11142.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:14,864 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0364, 2.4476, 3.9268, 3.9098, 3.2524, 2.4230, 1.6787, 2.4196], + device='cuda:1'), covar=tensor([0.0690, 0.0868, 0.0148, 0.0200, 0.0315, 0.0339, 0.0534, 0.0707], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0382, 0.0270, 0.0310, 0.0399, 0.0343, 0.0367, 0.0411], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:06:24,331 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:35,123 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 20:06:35,407 INFO [train.py:901] (1/4) Epoch 2, batch 3100, loss[loss=0.3853, simple_loss=0.422, pruned_loss=0.1743, over 8332.00 frames. ], tot_loss[loss=0.3781, simple_loss=0.4112, pruned_loss=0.1725, over 1613749.24 frames. ], batch size: 25, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:06:37,609 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:40,852 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,409 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,869 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.930e+02 4.987e+02 6.652e+02 1.229e+03, threshold=9.974e+02, percent-clipped=5.0 +2023-02-05 20:07:01,738 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:10,348 INFO [train.py:901] (1/4) Epoch 2, batch 3150, loss[loss=0.4007, simple_loss=0.4444, pruned_loss=0.1785, over 8504.00 frames. ], tot_loss[loss=0.3773, simple_loss=0.4107, pruned_loss=0.1719, over 1615412.95 frames. ], batch size: 26, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:07:20,139 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11247.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:07:22,984 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11251.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:29,924 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2463, 1.4618, 1.6945, 1.0407, 0.9340, 1.7071, 0.1620, 0.9974], + device='cuda:1'), covar=tensor([0.0873, 0.0461, 0.0381, 0.0643, 0.0894, 0.0311, 0.1714, 0.0814], + device='cuda:1'), in_proj_covar=tensor([0.0116, 0.0094, 0.0086, 0.0131, 0.0120, 0.0080, 0.0157, 0.0127], + device='cuda:1'), out_proj_covar=tensor([1.1081e-04, 9.6578e-05, 8.2430e-05, 1.2308e-04, 1.2024e-04, 7.6526e-05, + 1.4727e-04, 1.2461e-04], device='cuda:1') +2023-02-05 20:07:34,696 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8889, 1.1072, 4.1046, 1.7341, 3.3113, 3.3698, 3.5104, 3.5551], + device='cuda:1'), covar=tensor([0.0596, 0.3476, 0.0282, 0.1507, 0.1092, 0.0408, 0.0392, 0.0481], + device='cuda:1'), in_proj_covar=tensor([0.0182, 0.0357, 0.0220, 0.0253, 0.0296, 0.0234, 0.0216, 0.0245], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:07:46,078 INFO [train.py:901] (1/4) Epoch 2, batch 3200, loss[loss=0.3347, simple_loss=0.3933, pruned_loss=0.1381, over 8145.00 frames. ], tot_loss[loss=0.3767, simple_loss=0.4103, pruned_loss=0.1716, over 1611940.16 frames. ], batch size: 22, lr: 3.12e-02, grad_scale: 8.0 +2023-02-05 20:08:06,208 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 3.889e+02 4.508e+02 6.050e+02 1.565e+03, threshold=9.016e+02, percent-clipped=4.0 +2023-02-05 20:08:21,225 INFO [train.py:901] (1/4) Epoch 2, batch 3250, loss[loss=0.3461, simple_loss=0.3955, pruned_loss=0.1483, over 8355.00 frames. ], tot_loss[loss=0.3723, simple_loss=0.4065, pruned_loss=0.169, over 1610624.29 frames. ], batch size: 24, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:08:39,999 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11362.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:08:55,031 INFO [train.py:901] (1/4) Epoch 2, batch 3300, loss[loss=0.3435, simple_loss=0.3825, pruned_loss=0.1523, over 8099.00 frames. ], tot_loss[loss=0.3738, simple_loss=0.4079, pruned_loss=0.1698, over 1609443.09 frames. ], batch size: 21, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:09:16,012 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.686e+02 3.650e+02 4.417e+02 5.589e+02 1.513e+03, threshold=8.834e+02, percent-clipped=8.0 +2023-02-05 20:09:30,178 INFO [train.py:901] (1/4) Epoch 2, batch 3350, loss[loss=0.3913, simple_loss=0.426, pruned_loss=0.1783, over 8286.00 frames. ], tot_loss[loss=0.3761, simple_loss=0.4097, pruned_loss=0.1712, over 1606913.61 frames. ], batch size: 23, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:09:42,542 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 20:10:00,577 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11477.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:05,250 INFO [train.py:901] (1/4) Epoch 2, batch 3400, loss[loss=0.3887, simple_loss=0.4258, pruned_loss=0.1758, over 8465.00 frames. ], tot_loss[loss=0.3772, simple_loss=0.4105, pruned_loss=0.172, over 1607259.74 frames. ], batch size: 25, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:09,504 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3276, 1.3424, 4.5005, 1.9830, 3.8509, 3.6776, 3.9355, 3.9631], + device='cuda:1'), covar=tensor([0.0280, 0.2717, 0.0199, 0.1139, 0.0759, 0.0281, 0.0250, 0.0330], + device='cuda:1'), in_proj_covar=tensor([0.0180, 0.0365, 0.0222, 0.0252, 0.0310, 0.0244, 0.0225, 0.0256], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:10:17,703 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11502.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:21,207 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11507.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:25,780 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.120e+02 3.730e+02 4.591e+02 5.662e+02 1.223e+03, threshold=9.181e+02, percent-clipped=5.0 +2023-02-05 20:10:39,494 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:40,653 INFO [train.py:901] (1/4) Epoch 2, batch 3450, loss[loss=0.37, simple_loss=0.4027, pruned_loss=0.1687, over 7936.00 frames. ], tot_loss[loss=0.3775, simple_loss=0.411, pruned_loss=0.172, over 1610064.95 frames. ], batch size: 20, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:10:42,063 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:51,626 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11550.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:11:15,658 INFO [train.py:901] (1/4) Epoch 2, batch 3500, loss[loss=0.3235, simple_loss=0.359, pruned_loss=0.144, over 7530.00 frames. ], tot_loss[loss=0.3768, simple_loss=0.4103, pruned_loss=0.1717, over 1605115.82 frames. ], batch size: 18, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:11:19,136 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.6695, 0.9980, 3.7487, 1.3194, 3.1667, 3.0746, 3.2458, 3.2249], + device='cuda:1'), covar=tensor([0.0325, 0.3205, 0.0247, 0.1426, 0.0919, 0.0378, 0.0344, 0.0466], + device='cuda:1'), in_proj_covar=tensor([0.0180, 0.0370, 0.0225, 0.0258, 0.0307, 0.0246, 0.0228, 0.0255], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:11:35,939 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 4.071e+02 4.877e+02 6.297e+02 1.257e+03, threshold=9.753e+02, percent-clipped=3.0 +2023-02-05 20:11:39,549 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11618.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:11:40,718 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 20:11:50,751 INFO [train.py:901] (1/4) Epoch 2, batch 3550, loss[loss=0.4031, simple_loss=0.436, pruned_loss=0.1851, over 8445.00 frames. ], tot_loss[loss=0.3753, simple_loss=0.4085, pruned_loss=0.171, over 1603298.48 frames. ], batch size: 27, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:11:57,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11643.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:12:02,967 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:04,328 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:25,617 INFO [train.py:901] (1/4) Epoch 2, batch 3600, loss[loss=0.3924, simple_loss=0.4295, pruned_loss=0.1776, over 8021.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.4074, pruned_loss=0.1694, over 1606874.21 frames. ], batch size: 22, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:12:30,444 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7013, 1.2264, 2.9991, 1.3926, 2.0252, 3.4023, 3.1498, 2.9718], + device='cuda:1'), covar=tensor([0.1277, 0.1768, 0.0353, 0.1809, 0.0760, 0.0194, 0.0235, 0.0466], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0267, 0.0177, 0.0252, 0.0202, 0.0150, 0.0146, 0.0217], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:12:39,209 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-05 20:12:45,382 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.676e+02 3.688e+02 4.691e+02 6.662e+02 1.491e+03, threshold=9.383e+02, percent-clipped=3.0 +2023-02-05 20:12:48,295 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:59,434 INFO [train.py:901] (1/4) Epoch 2, batch 3650, loss[loss=0.347, simple_loss=0.3794, pruned_loss=0.1573, over 8357.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4072, pruned_loss=0.1695, over 1603039.71 frames. ], batch size: 24, lr: 3.07e-02, grad_scale: 8.0 +2023-02-05 20:13:04,729 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3858, 4.5738, 3.8823, 1.8727, 3.7010, 3.6231, 4.1195, 3.2437], + device='cuda:1'), covar=tensor([0.0951, 0.0452, 0.0896, 0.4068, 0.0611, 0.0586, 0.1121, 0.0755], + device='cuda:1'), in_proj_covar=tensor([0.0304, 0.0206, 0.0246, 0.0331, 0.0221, 0.0170, 0.0236, 0.0161], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:13:23,237 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4633, 5.6617, 4.9275, 1.7961, 4.8667, 4.8292, 5.1436, 4.5928], + device='cuda:1'), covar=tensor([0.0806, 0.0319, 0.0723, 0.4292, 0.0475, 0.0509, 0.0880, 0.0421], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0209, 0.0249, 0.0336, 0.0224, 0.0171, 0.0238, 0.0161], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:13:23,559 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-02-05 20:13:33,823 INFO [train.py:901] (1/4) Epoch 2, batch 3700, loss[loss=0.3364, simple_loss=0.3714, pruned_loss=0.1508, over 7706.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.4094, pruned_loss=0.1708, over 1610728.03 frames. ], batch size: 18, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:13:44,414 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:13:46,973 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-05 20:13:53,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.910e+02 4.224e+02 5.211e+02 6.213e+02 2.304e+03, threshold=1.042e+03, percent-clipped=10.0 +2023-02-05 20:14:08,511 INFO [train.py:901] (1/4) Epoch 2, batch 3750, loss[loss=0.38, simple_loss=0.4106, pruned_loss=0.1747, over 8233.00 frames. ], tot_loss[loss=0.3744, simple_loss=0.4085, pruned_loss=0.1702, over 1611190.79 frames. ], batch size: 22, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:14:08,621 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:28,576 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11864.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:43,027 INFO [train.py:901] (1/4) Epoch 2, batch 3800, loss[loss=0.338, simple_loss=0.3918, pruned_loss=0.1422, over 8454.00 frames. ], tot_loss[loss=0.374, simple_loss=0.4078, pruned_loss=0.1701, over 1607967.39 frames. ], batch size: 27, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:14:49,683 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11894.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:58,770 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:02,633 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.750e+02 4.056e+02 4.773e+02 6.198e+02 1.391e+03, threshold=9.546e+02, percent-clipped=3.0 +2023-02-05 20:15:07,557 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9423, 2.3406, 2.0798, 2.7962, 1.3081, 1.2252, 1.7687, 2.1940], + device='cuda:1'), covar=tensor([0.1232, 0.1146, 0.1235, 0.0430, 0.2074, 0.2118, 0.2026, 0.1154], + device='cuda:1'), in_proj_covar=tensor([0.0317, 0.0337, 0.0327, 0.0214, 0.0330, 0.0329, 0.0372, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:1') +2023-02-05 20:15:16,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:17,489 INFO [train.py:901] (1/4) Epoch 2, batch 3850, loss[loss=0.4148, simple_loss=0.4517, pruned_loss=0.1889, over 8033.00 frames. ], tot_loss[loss=0.3743, simple_loss=0.4087, pruned_loss=0.17, over 1610717.01 frames. ], batch size: 22, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:15:20,311 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11938.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:39,746 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11966.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:47,063 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 20:15:51,653 INFO [train.py:901] (1/4) Epoch 2, batch 3900, loss[loss=0.369, simple_loss=0.4138, pruned_loss=0.1621, over 8619.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.4092, pruned_loss=0.1699, over 1610223.45 frames. ], batch size: 31, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:01,111 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11997.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:06,092 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12002.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:10,874 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12009.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:13,175 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.113e+02 3.926e+02 4.686e+02 5.678e+02 1.222e+03, threshold=9.373e+02, percent-clipped=4.0 +2023-02-05 20:16:28,134 INFO [train.py:901] (1/4) Epoch 2, batch 3950, loss[loss=0.364, simple_loss=0.3761, pruned_loss=0.1759, over 7432.00 frames. ], tot_loss[loss=0.3732, simple_loss=0.4082, pruned_loss=0.169, over 1612125.04 frames. ], batch size: 17, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:46,992 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:02,485 INFO [train.py:901] (1/4) Epoch 2, batch 4000, loss[loss=0.3556, simple_loss=0.386, pruned_loss=0.1626, over 7195.00 frames. ], tot_loss[loss=0.3721, simple_loss=0.4079, pruned_loss=0.1681, over 1615536.86 frames. ], batch size: 16, lr: 3.03e-02, grad_scale: 8.0 +2023-02-05 20:17:09,214 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12094.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:22,644 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12112.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:23,112 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.955e+02 4.453e+02 5.904e+02 7.845e+02 2.502e+03, threshold=1.181e+03, percent-clipped=13.0 +2023-02-05 20:17:36,860 INFO [train.py:901] (1/4) Epoch 2, batch 4050, loss[loss=0.3433, simple_loss=0.3925, pruned_loss=0.1471, over 8286.00 frames. ], tot_loss[loss=0.3748, simple_loss=0.41, pruned_loss=0.1698, over 1616158.73 frames. ], batch size: 23, lr: 3.03e-02, grad_scale: 16.0 +2023-02-05 20:18:06,024 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:06,916 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.09 vs. limit=2.0 +2023-02-05 20:18:07,288 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12178.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:11,164 INFO [train.py:901] (1/4) Epoch 2, batch 4100, loss[loss=0.362, simple_loss=0.4027, pruned_loss=0.1607, over 7796.00 frames. ], tot_loss[loss=0.3761, simple_loss=0.4105, pruned_loss=0.1709, over 1614388.62 frames. ], batch size: 19, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:18:16,106 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9664, 1.5300, 1.3158, 1.2465, 1.9251, 1.4840, 1.3621, 1.6402], + device='cuda:1'), covar=tensor([0.0898, 0.1572, 0.2082, 0.1692, 0.0739, 0.1692, 0.1080, 0.0792], + device='cuda:1'), in_proj_covar=tensor([0.0225, 0.0254, 0.0275, 0.0246, 0.0223, 0.0243, 0.0216, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004, 0.0004], + device='cuda:1') +2023-02-05 20:18:16,882 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8083, 1.9181, 2.2676, 0.8618, 2.1515, 1.5982, 0.4982, 1.9286], + device='cuda:1'), covar=tensor([0.0102, 0.0072, 0.0073, 0.0172, 0.0088, 0.0241, 0.0277, 0.0081], + device='cuda:1'), in_proj_covar=tensor([0.0181, 0.0125, 0.0121, 0.0183, 0.0129, 0.0233, 0.0188, 0.0160], + device='cuda:1'), out_proj_covar=tensor([1.1337e-04, 7.8511e-05, 8.0131e-05, 1.1457e-04, 8.6559e-05, 1.5844e-04, + 1.2213e-04, 1.0361e-04], device='cuda:1') +2023-02-05 20:18:27,597 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12208.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:28,926 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6768, 1.3025, 3.3536, 1.2449, 2.2110, 3.8339, 3.4509, 3.2379], + device='cuda:1'), covar=tensor([0.1166, 0.1711, 0.0316, 0.1991, 0.0791, 0.0220, 0.0282, 0.0439], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0255, 0.0177, 0.0249, 0.0191, 0.0154, 0.0143, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:18:30,897 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.728e+02 4.672e+02 5.863e+02 2.072e+03, threshold=9.344e+02, percent-clipped=1.0 +2023-02-05 20:18:47,032 INFO [train.py:901] (1/4) Epoch 2, batch 4150, loss[loss=0.3691, simple_loss=0.4281, pruned_loss=0.155, over 8362.00 frames. ], tot_loss[loss=0.3757, simple_loss=0.4109, pruned_loss=0.1702, over 1618956.19 frames. ], batch size: 24, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:19:08,905 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12265.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:20,418 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12282.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:21,719 INFO [train.py:901] (1/4) Epoch 2, batch 4200, loss[loss=0.3195, simple_loss=0.3887, pruned_loss=0.1251, over 8280.00 frames. ], tot_loss[loss=0.3749, simple_loss=0.4101, pruned_loss=0.1698, over 1619263.62 frames. ], batch size: 23, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:19:25,931 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12290.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:28,632 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:40,064 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12310.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:42,059 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.324e+02 3.573e+02 4.694e+02 5.833e+02 1.413e+03, threshold=9.388e+02, percent-clipped=6.0 +2023-02-05 20:19:43,535 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 20:19:49,252 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12323.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:57,053 INFO [train.py:901] (1/4) Epoch 2, batch 4250, loss[loss=0.3856, simple_loss=0.4255, pruned_loss=0.1729, over 8492.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.4098, pruned_loss=0.1695, over 1616661.25 frames. ], batch size: 26, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:20:06,019 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:06,628 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 20:20:20,970 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12368.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:32,224 INFO [train.py:901] (1/4) Epoch 2, batch 4300, loss[loss=0.4376, simple_loss=0.4548, pruned_loss=0.2102, over 8640.00 frames. ], tot_loss[loss=0.3737, simple_loss=0.4096, pruned_loss=0.169, over 1616164.96 frames. ], batch size: 39, lr: 3.00e-02, grad_scale: 16.0 +2023-02-05 20:20:38,552 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12393.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:41,196 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:53,215 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.244e+02 3.864e+02 4.648e+02 5.983e+02 1.525e+03, threshold=9.296e+02, percent-clipped=6.0 +2023-02-05 20:21:00,853 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:05,798 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:06,902 INFO [train.py:901] (1/4) Epoch 2, batch 4350, loss[loss=0.3822, simple_loss=0.4067, pruned_loss=0.1789, over 8656.00 frames. ], tot_loss[loss=0.3715, simple_loss=0.4075, pruned_loss=0.1677, over 1611971.84 frames. ], batch size: 49, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:21:09,316 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 20:21:09,737 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12438.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:23,515 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12457.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:26,041 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12461.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:28,052 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:38,137 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 20:21:42,134 INFO [train.py:901] (1/4) Epoch 2, batch 4400, loss[loss=0.4562, simple_loss=0.4507, pruned_loss=0.2308, over 7941.00 frames. ], tot_loss[loss=0.3714, simple_loss=0.407, pruned_loss=0.1679, over 1612056.26 frames. ], batch size: 20, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:22:02,397 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.494e+02 4.041e+02 4.964e+02 6.742e+02 1.213e+03, threshold=9.928e+02, percent-clipped=4.0 +2023-02-05 20:22:16,723 INFO [train.py:901] (1/4) Epoch 2, batch 4450, loss[loss=0.3501, simple_loss=0.3814, pruned_loss=0.1594, over 8079.00 frames. ], tot_loss[loss=0.3713, simple_loss=0.407, pruned_loss=0.1679, over 1611699.80 frames. ], batch size: 21, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:17,404 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 20:22:27,255 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:28,829 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 20:22:29,912 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12553.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:45,079 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12574.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:49,126 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:52,408 INFO [train.py:901] (1/4) Epoch 2, batch 4500, loss[loss=0.4176, simple_loss=0.4315, pruned_loss=0.2018, over 8495.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.4059, pruned_loss=0.1665, over 1613611.31 frames. ], batch size: 26, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:23:06,028 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12604.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:12,547 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 20:23:13,222 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.312e+02 4.309e+02 5.092e+02 6.256e+02 1.421e+03, threshold=1.018e+03, percent-clipped=5.0 +2023-02-05 20:23:25,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5434, 2.0502, 3.5312, 3.0474, 2.6059, 2.0706, 1.4379, 1.9047], + device='cuda:1'), covar=tensor([0.0597, 0.0736, 0.0118, 0.0227, 0.0317, 0.0305, 0.0442, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0492, 0.0402, 0.0293, 0.0337, 0.0420, 0.0363, 0.0385, 0.0419], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:23:27,083 INFO [train.py:901] (1/4) Epoch 2, batch 4550, loss[loss=0.3917, simple_loss=0.4252, pruned_loss=0.179, over 8504.00 frames. ], tot_loss[loss=0.3683, simple_loss=0.4044, pruned_loss=0.1661, over 1610294.99 frames. ], batch size: 26, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:23:40,670 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:57,691 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12678.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:59,767 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:01,607 INFO [train.py:901] (1/4) Epoch 2, batch 4600, loss[loss=0.3519, simple_loss=0.4062, pruned_loss=0.1488, over 8521.00 frames. ], tot_loss[loss=0.3679, simple_loss=0.4042, pruned_loss=0.1658, over 1610315.84 frames. ], batch size: 26, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:24:17,918 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:23,143 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 3.817e+02 4.647e+02 5.826e+02 1.354e+03, threshold=9.293e+02, percent-clipped=3.0 +2023-02-05 20:24:25,437 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:26,985 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.13 vs. limit=5.0 +2023-02-05 20:24:31,992 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.5874, 2.5804, 4.1258, 4.3168, 3.3072, 2.5892, 1.8100, 2.2889], + device='cuda:1'), covar=tensor([0.0471, 0.0774, 0.0115, 0.0179, 0.0263, 0.0257, 0.0423, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0402, 0.0291, 0.0341, 0.0419, 0.0362, 0.0383, 0.0417], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:24:37,073 INFO [train.py:901] (1/4) Epoch 2, batch 4650, loss[loss=0.3392, simple_loss=0.381, pruned_loss=0.1487, over 7803.00 frames. ], tot_loss[loss=0.3681, simple_loss=0.404, pruned_loss=0.1661, over 1609395.34 frames. ], batch size: 20, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:24:42,619 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:46,544 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5493, 1.1529, 4.5342, 1.9326, 4.0414, 3.7942, 3.9922, 3.9338], + device='cuda:1'), covar=tensor([0.0267, 0.3158, 0.0219, 0.1398, 0.0758, 0.0338, 0.0313, 0.0361], + device='cuda:1'), in_proj_covar=tensor([0.0184, 0.0376, 0.0234, 0.0267, 0.0322, 0.0259, 0.0243, 0.0264], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:24:47,918 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4411, 1.7668, 1.3521, 1.1730, 1.9954, 1.4739, 1.6492, 1.5806], + device='cuda:1'), covar=tensor([0.0899, 0.1554, 0.2190, 0.1860, 0.0788, 0.1797, 0.1098, 0.0916], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0253, 0.0278, 0.0248, 0.0224, 0.0245, 0.0216, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:1') +2023-02-05 20:25:09,730 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:11,634 INFO [train.py:901] (1/4) Epoch 2, batch 4700, loss[loss=0.3282, simple_loss=0.3727, pruned_loss=0.1419, over 8240.00 frames. ], tot_loss[loss=0.365, simple_loss=0.4017, pruned_loss=0.1641, over 1611444.45 frames. ], batch size: 22, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:25:28,058 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12808.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:29,556 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12809.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:32,791 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.540e+02 4.122e+02 5.358e+02 6.927e+02 1.344e+03, threshold=1.072e+03, percent-clipped=8.0 +2023-02-05 20:25:47,171 INFO [train.py:901] (1/4) Epoch 2, batch 4750, loss[loss=0.4306, simple_loss=0.4365, pruned_loss=0.2124, over 7325.00 frames. ], tot_loss[loss=0.3662, simple_loss=0.4021, pruned_loss=0.1651, over 1607124.37 frames. ], batch size: 71, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:25:47,388 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:01,688 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9373, 4.1416, 3.6295, 1.8091, 3.5501, 3.5054, 3.7212, 2.9867], + device='cuda:1'), covar=tensor([0.0817, 0.0428, 0.0739, 0.3556, 0.0497, 0.0568, 0.0852, 0.0592], + device='cuda:1'), in_proj_covar=tensor([0.0317, 0.0209, 0.0253, 0.0333, 0.0230, 0.0173, 0.0235, 0.0156], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:26:12,046 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4601, 2.1293, 3.6776, 1.0865, 2.6406, 1.7528, 1.5387, 2.1674], + device='cuda:1'), covar=tensor([0.0974, 0.0974, 0.0301, 0.1521, 0.0903, 0.1468, 0.0883, 0.1256], + device='cuda:1'), in_proj_covar=tensor([0.0377, 0.0365, 0.0403, 0.0423, 0.0481, 0.0425, 0.0379, 0.0479], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:26:18,679 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 20:26:20,730 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 20:26:22,726 INFO [train.py:901] (1/4) Epoch 2, batch 4800, loss[loss=0.3423, simple_loss=0.3741, pruned_loss=0.1553, over 7806.00 frames. ], tot_loss[loss=0.3665, simple_loss=0.4023, pruned_loss=0.1654, over 1603996.99 frames. ], batch size: 20, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:26:25,597 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4425, 4.6585, 4.0177, 1.5816, 3.9251, 4.0892, 4.1539, 3.4725], + device='cuda:1'), covar=tensor([0.0871, 0.0296, 0.0659, 0.4239, 0.0477, 0.0453, 0.0821, 0.0485], + device='cuda:1'), in_proj_covar=tensor([0.0323, 0.0212, 0.0259, 0.0344, 0.0236, 0.0180, 0.0243, 0.0161], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 20:26:36,806 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7794, 1.2492, 3.9645, 1.6947, 3.3637, 3.2200, 3.4574, 3.4917], + device='cuda:1'), covar=tensor([0.0370, 0.3329, 0.0226, 0.1635, 0.0929, 0.0403, 0.0387, 0.0431], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0387, 0.0238, 0.0275, 0.0331, 0.0264, 0.0249, 0.0268], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 20:26:43,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.314e+02 3.678e+02 4.471e+02 5.888e+02 1.234e+03, threshold=8.941e+02, percent-clipped=3.0 +2023-02-05 20:26:49,701 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12923.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:57,686 INFO [train.py:901] (1/4) Epoch 2, batch 4850, loss[loss=0.3601, simple_loss=0.377, pruned_loss=0.1716, over 7690.00 frames. ], tot_loss[loss=0.3654, simple_loss=0.4017, pruned_loss=0.1646, over 1604016.51 frames. ], batch size: 18, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:05,034 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0941, 1.3928, 1.5358, 1.0288, 0.8107, 1.5353, 0.2649, 0.7764], + device='cuda:1'), covar=tensor([0.1156, 0.0837, 0.0604, 0.1035, 0.1140, 0.0526, 0.2338, 0.1228], + device='cuda:1'), in_proj_covar=tensor([0.0116, 0.0095, 0.0087, 0.0149, 0.0125, 0.0086, 0.0159, 0.0125], + device='cuda:1'), out_proj_covar=tensor([1.1890e-04, 1.0350e-04, 9.0018e-05, 1.4585e-04, 1.3103e-04, 8.8109e-05, + 1.5848e-04, 1.3131e-04], device='cuda:1') +2023-02-05 20:27:12,750 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 20:27:32,108 INFO [train.py:901] (1/4) Epoch 2, batch 4900, loss[loss=0.3173, simple_loss=0.3431, pruned_loss=0.1458, over 7799.00 frames. ], tot_loss[loss=0.3668, simple_loss=0.4027, pruned_loss=0.1654, over 1610669.41 frames. ], batch size: 19, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:43,685 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-05 20:27:53,285 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 4.170e+02 5.532e+02 7.452e+02 1.588e+03, threshold=1.106e+03, percent-clipped=9.0 +2023-02-05 20:28:06,716 INFO [train.py:901] (1/4) Epoch 2, batch 4950, loss[loss=0.3155, simple_loss=0.3661, pruned_loss=0.1324, over 8022.00 frames. ], tot_loss[loss=0.3676, simple_loss=0.4029, pruned_loss=0.1661, over 1607099.53 frames. ], batch size: 22, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:28:10,874 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-05 20:28:22,843 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8901, 1.6479, 2.5559, 0.8744, 2.4519, 1.6603, 1.1490, 1.9868], + device='cuda:1'), covar=tensor([0.0214, 0.0118, 0.0141, 0.0236, 0.0179, 0.0320, 0.0310, 0.0141], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0139, 0.0125, 0.0187, 0.0134, 0.0242, 0.0194, 0.0165], + device='cuda:1'), out_proj_covar=tensor([1.1356e-04, 8.4579e-05, 7.9256e-05, 1.1359e-04, 8.6796e-05, 1.5971e-04, + 1.2185e-04, 1.0173e-04], device='cuda:1') +2023-02-05 20:28:41,855 INFO [train.py:901] (1/4) Epoch 2, batch 5000, loss[loss=0.2976, simple_loss=0.3483, pruned_loss=0.1235, over 7974.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.4044, pruned_loss=0.1666, over 1613466.63 frames. ], batch size: 21, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:29:02,469 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.193e+02 4.113e+02 5.050e+02 6.511e+02 1.788e+03, threshold=1.010e+03, percent-clipped=5.0 +2023-02-05 20:29:09,704 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=13125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:15,847 INFO [train.py:901] (1/4) Epoch 2, batch 5050, loss[loss=0.3605, simple_loss=0.4134, pruned_loss=0.1538, over 8108.00 frames. ], tot_loss[loss=0.37, simple_loss=0.4052, pruned_loss=0.1674, over 1612746.61 frames. ], batch size: 23, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:29:21,929 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2385, 1.6019, 3.1925, 0.8259, 2.8490, 1.9872, 0.9719, 1.7290], + device='cuda:1'), covar=tensor([0.0132, 0.0097, 0.0079, 0.0185, 0.0103, 0.0249, 0.0253, 0.0117], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0135, 0.0125, 0.0182, 0.0130, 0.0238, 0.0187, 0.0161], + device='cuda:1'), out_proj_covar=tensor([1.1239e-04, 8.1977e-05, 7.9614e-05, 1.0961e-04, 8.3239e-05, 1.5611e-04, + 1.1633e-04, 9.8533e-05], device='cuda:1') +2023-02-05 20:29:33,601 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-02-05 20:29:47,498 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13179.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:47,977 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 20:29:50,623 INFO [train.py:901] (1/4) Epoch 2, batch 5100, loss[loss=0.4024, simple_loss=0.4221, pruned_loss=0.1913, over 8631.00 frames. ], tot_loss[loss=0.371, simple_loss=0.4054, pruned_loss=0.1682, over 1611272.25 frames. ], batch size: 34, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:30:04,620 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13204.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:09,125 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4582, 1.8440, 2.9436, 1.1596, 2.0877, 1.6140, 1.4280, 1.6801], + device='cuda:1'), covar=tensor([0.0837, 0.0888, 0.0297, 0.1322, 0.0774, 0.1362, 0.0742, 0.1101], + device='cuda:1'), in_proj_covar=tensor([0.0378, 0.0364, 0.0401, 0.0423, 0.0487, 0.0426, 0.0378, 0.0477], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:30:11,536 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.930e+02 4.883e+02 5.892e+02 1.355e+03, threshold=9.766e+02, percent-clipped=3.0 +2023-02-05 20:30:15,632 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1174, 1.6689, 1.2904, 1.2003, 1.6066, 1.3991, 1.5243, 1.6498], + device='cuda:1'), covar=tensor([0.0952, 0.1491, 0.2121, 0.1749, 0.0904, 0.1755, 0.1150, 0.0841], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0255, 0.0282, 0.0251, 0.0225, 0.0247, 0.0218, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:1') +2023-02-05 20:30:24,603 INFO [train.py:901] (1/4) Epoch 2, batch 5150, loss[loss=0.3481, simple_loss=0.3897, pruned_loss=0.1533, over 8073.00 frames. ], tot_loss[loss=0.3702, simple_loss=0.4051, pruned_loss=0.1676, over 1612537.07 frames. ], batch size: 21, lr: 2.91e-02, grad_scale: 4.0 +2023-02-05 20:30:28,689 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:39,616 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0783, 2.3555, 4.0141, 3.8600, 3.1423, 2.6010, 1.8220, 2.3301], + device='cuda:1'), covar=tensor([0.0638, 0.0823, 0.0177, 0.0241, 0.0336, 0.0304, 0.0448, 0.0699], + device='cuda:1'), in_proj_covar=tensor([0.0498, 0.0417, 0.0311, 0.0349, 0.0440, 0.0387, 0.0407, 0.0435], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:30:59,003 INFO [train.py:901] (1/4) Epoch 2, batch 5200, loss[loss=0.3705, simple_loss=0.3824, pruned_loss=0.1793, over 7429.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4049, pruned_loss=0.1672, over 1616933.22 frames. ], batch size: 17, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:30:59,193 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4019, 1.4487, 3.9795, 1.8078, 2.3476, 4.7985, 4.1898, 4.1021], + device='cuda:1'), covar=tensor([0.1041, 0.1589, 0.0265, 0.1751, 0.0732, 0.0195, 0.0269, 0.0494], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0257, 0.0177, 0.0251, 0.0189, 0.0152, 0.0145, 0.0218], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:31:06,812 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-05 20:31:20,912 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 4.339e+02 5.206e+02 6.705e+02 1.063e+03, threshold=1.041e+03, percent-clipped=3.0 +2023-02-05 20:31:33,618 INFO [train.py:901] (1/4) Epoch 2, batch 5250, loss[loss=0.3399, simple_loss=0.3952, pruned_loss=0.1423, over 8334.00 frames. ], tot_loss[loss=0.3698, simple_loss=0.4053, pruned_loss=0.1672, over 1618761.65 frames. ], batch size: 26, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:33,949 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-05 20:31:42,984 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 20:31:48,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2304, 1.1132, 2.2334, 1.0587, 2.1331, 2.4416, 2.1670, 2.0411], + device='cuda:1'), covar=tensor([0.1077, 0.1265, 0.0376, 0.1671, 0.0450, 0.0313, 0.0412, 0.0633], + device='cuda:1'), in_proj_covar=tensor([0.0224, 0.0251, 0.0175, 0.0245, 0.0189, 0.0151, 0.0146, 0.0216], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:32:01,984 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-05 20:32:07,576 INFO [train.py:901] (1/4) Epoch 2, batch 5300, loss[loss=0.3931, simple_loss=0.4307, pruned_loss=0.1778, over 8328.00 frames. ], tot_loss[loss=0.3676, simple_loss=0.404, pruned_loss=0.1657, over 1617893.96 frames. ], batch size: 25, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:07,737 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9818, 1.5912, 3.3137, 1.2305, 2.2490, 3.7512, 3.3845, 3.2193], + device='cuda:1'), covar=tensor([0.1075, 0.1271, 0.0282, 0.1890, 0.0611, 0.0199, 0.0225, 0.0493], + device='cuda:1'), in_proj_covar=tensor([0.0225, 0.0252, 0.0176, 0.0249, 0.0189, 0.0152, 0.0145, 0.0218], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:32:29,083 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.093e+02 3.821e+02 4.884e+02 6.417e+02 1.823e+03, threshold=9.767e+02, percent-clipped=6.0 +2023-02-05 20:32:42,521 INFO [train.py:901] (1/4) Epoch 2, batch 5350, loss[loss=0.3645, simple_loss=0.4079, pruned_loss=0.1605, over 8253.00 frames. ], tot_loss[loss=0.3697, simple_loss=0.4056, pruned_loss=0.1669, over 1619649.71 frames. ], batch size: 24, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:46,414 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 20:33:16,587 INFO [train.py:901] (1/4) Epoch 2, batch 5400, loss[loss=0.3898, simple_loss=0.4262, pruned_loss=0.1767, over 8653.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4055, pruned_loss=0.1668, over 1617070.63 frames. ], batch size: 34, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:33:24,899 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:38,014 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.355e+02 3.820e+02 4.559e+02 5.766e+02 1.205e+03, threshold=9.119e+02, percent-clipped=6.0 +2023-02-05 20:33:43,037 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:51,332 INFO [train.py:901] (1/4) Epoch 2, batch 5450, loss[loss=0.3668, simple_loss=0.4032, pruned_loss=0.1652, over 8193.00 frames. ], tot_loss[loss=0.3689, simple_loss=0.4054, pruned_loss=0.1662, over 1618340.82 frames. ], batch size: 23, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:34:25,970 INFO [train.py:901] (1/4) Epoch 2, batch 5500, loss[loss=0.3988, simple_loss=0.4331, pruned_loss=0.1822, over 8515.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4063, pruned_loss=0.1675, over 1621881.09 frames. ], batch size: 26, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:34:28,062 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 20:34:46,540 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.726e+02 4.817e+02 6.308e+02 1.682e+03, threshold=9.635e+02, percent-clipped=6.0 +2023-02-05 20:34:59,984 INFO [train.py:901] (1/4) Epoch 2, batch 5550, loss[loss=0.3184, simple_loss=0.3713, pruned_loss=0.1327, over 7811.00 frames. ], tot_loss[loss=0.3676, simple_loss=0.4038, pruned_loss=0.1657, over 1618475.15 frames. ], batch size: 20, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:35:35,316 INFO [train.py:901] (1/4) Epoch 2, batch 5600, loss[loss=0.3856, simple_loss=0.4234, pruned_loss=0.1738, over 8501.00 frames. ], tot_loss[loss=0.3674, simple_loss=0.4033, pruned_loss=0.1658, over 1613700.58 frames. ], batch size: 26, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:35:55,773 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 3.833e+02 4.619e+02 6.071e+02 1.383e+03, threshold=9.238e+02, percent-clipped=5.0 +2023-02-05 20:36:08,576 INFO [train.py:901] (1/4) Epoch 2, batch 5650, loss[loss=0.3628, simple_loss=0.3967, pruned_loss=0.1644, over 8509.00 frames. ], tot_loss[loss=0.3687, simple_loss=0.4046, pruned_loss=0.1664, over 1614805.40 frames. ], batch size: 49, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:36:23,364 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=13755.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:36:34,194 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 20:36:43,557 INFO [train.py:901] (1/4) Epoch 2, batch 5700, loss[loss=0.2859, simple_loss=0.3448, pruned_loss=0.1135, over 8132.00 frames. ], tot_loss[loss=0.367, simple_loss=0.4035, pruned_loss=0.1652, over 1614980.35 frames. ], batch size: 22, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:05,583 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.337e+02 4.261e+02 5.123e+02 6.631e+02 2.352e+03, threshold=1.025e+03, percent-clipped=5.0 +2023-02-05 20:37:18,900 INFO [train.py:901] (1/4) Epoch 2, batch 5750, loss[loss=0.3299, simple_loss=0.3631, pruned_loss=0.1484, over 7788.00 frames. ], tot_loss[loss=0.3689, simple_loss=0.4052, pruned_loss=0.1663, over 1615160.97 frames. ], batch size: 19, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:33,850 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.20 vs. limit=2.0 +2023-02-05 20:37:38,970 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 20:37:54,461 INFO [train.py:901] (1/4) Epoch 2, batch 5800, loss[loss=0.4145, simple_loss=0.4468, pruned_loss=0.1912, over 8249.00 frames. ], tot_loss[loss=0.3704, simple_loss=0.4067, pruned_loss=0.167, over 1616220.60 frames. ], batch size: 24, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:15,733 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.601e+02 3.784e+02 4.729e+02 6.225e+02 2.390e+03, threshold=9.458e+02, percent-clipped=5.0 +2023-02-05 20:38:29,061 INFO [train.py:901] (1/4) Epoch 2, batch 5850, loss[loss=0.3778, simple_loss=0.4305, pruned_loss=0.1625, over 8241.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.4057, pruned_loss=0.1666, over 1612777.62 frames. ], batch size: 24, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:39:03,909 INFO [train.py:901] (1/4) Epoch 2, batch 5900, loss[loss=0.3501, simple_loss=0.387, pruned_loss=0.1567, over 7803.00 frames. ], tot_loss[loss=0.3672, simple_loss=0.4041, pruned_loss=0.1651, over 1606771.04 frames. ], batch size: 20, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:39:11,156 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9345, 2.2701, 4.6071, 1.1658, 2.7692, 2.1107, 1.6888, 2.0046], + device='cuda:1'), covar=tensor([0.0958, 0.1168, 0.0278, 0.1842, 0.1080, 0.1476, 0.0954, 0.1713], + device='cuda:1'), in_proj_covar=tensor([0.0398, 0.0379, 0.0424, 0.0449, 0.0499, 0.0443, 0.0398, 0.0497], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:39:27,081 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.452e+02 3.946e+02 4.724e+02 6.297e+02 1.551e+03, threshold=9.448e+02, percent-clipped=7.0 +2023-02-05 20:39:40,158 INFO [train.py:901] (1/4) Epoch 2, batch 5950, loss[loss=0.3883, simple_loss=0.4268, pruned_loss=0.1749, over 8360.00 frames. ], tot_loss[loss=0.3654, simple_loss=0.403, pruned_loss=0.1639, over 1606408.80 frames. ], batch size: 26, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:39:45,639 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6579, 1.4711, 2.7291, 0.9735, 2.1315, 2.9587, 2.9373, 2.3935], + device='cuda:1'), covar=tensor([0.1399, 0.1805, 0.0525, 0.2655, 0.0774, 0.0506, 0.0349, 0.0976], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0264, 0.0188, 0.0258, 0.0201, 0.0160, 0.0150, 0.0235], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:40:00,196 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.54 vs. limit=5.0 +2023-02-05 20:40:07,401 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5484, 3.2514, 2.8040, 3.9896, 1.9937, 1.4853, 2.0304, 3.1576], + device='cuda:1'), covar=tensor([0.1141, 0.1308, 0.1315, 0.0253, 0.1780, 0.2585, 0.2418, 0.1178], + device='cuda:1'), in_proj_covar=tensor([0.0307, 0.0345, 0.0329, 0.0231, 0.0312, 0.0331, 0.0367, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0004, 0.0005, 0.0005, 0.0004], + device='cuda:1') +2023-02-05 20:40:14,643 INFO [train.py:901] (1/4) Epoch 2, batch 6000, loss[loss=0.3535, simple_loss=0.4181, pruned_loss=0.1445, over 8237.00 frames. ], tot_loss[loss=0.365, simple_loss=0.4028, pruned_loss=0.1636, over 1609808.30 frames. ], batch size: 24, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,643 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 20:40:27,834 INFO [train.py:935] (1/4) Epoch 2, validation: loss=0.2758, simple_loss=0.3606, pruned_loss=0.0955, over 944034.00 frames. +2023-02-05 20:40:27,835 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 20:40:31,352 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2913, 1.3604, 2.6580, 0.9427, 2.0150, 2.9541, 2.7274, 2.5270], + device='cuda:1'), covar=tensor([0.1291, 0.1424, 0.0472, 0.2074, 0.0663, 0.0321, 0.0303, 0.0590], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0261, 0.0185, 0.0255, 0.0198, 0.0159, 0.0148, 0.0232], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:40:32,058 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:38,738 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14099.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:49,506 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 3.733e+02 4.780e+02 6.772e+02 2.203e+03, threshold=9.561e+02, percent-clipped=10.0 +2023-02-05 20:40:53,729 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:02,694 INFO [train.py:901] (1/4) Epoch 2, batch 6050, loss[loss=0.3176, simple_loss=0.3657, pruned_loss=0.1348, over 8079.00 frames. ], tot_loss[loss=0.3638, simple_loss=0.4021, pruned_loss=0.1627, over 1608168.24 frames. ], batch size: 21, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:05,430 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14138.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:41:08,102 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1173, 1.3294, 1.5815, 1.2615, 0.9238, 1.5084, 0.3152, 0.7166], + device='cuda:1'), covar=tensor([0.2016, 0.1570, 0.0849, 0.1481, 0.2026, 0.0810, 0.3759, 0.1796], + device='cuda:1'), in_proj_covar=tensor([0.0106, 0.0097, 0.0083, 0.0142, 0.0142, 0.0084, 0.0161, 0.0118], + device='cuda:1'), out_proj_covar=tensor([1.1492e-04, 1.1103e-04, 8.9692e-05, 1.4729e-04, 1.5143e-04, 9.2284e-05, + 1.6641e-04, 1.2941e-04], device='cuda:1') +2023-02-05 20:41:12,689 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8515, 2.1143, 2.5950, 1.5092, 1.4998, 2.1853, 0.2953, 1.1360], + device='cuda:1'), covar=tensor([0.1346, 0.1814, 0.0480, 0.1190, 0.1604, 0.0648, 0.3172, 0.1336], + device='cuda:1'), in_proj_covar=tensor([0.0106, 0.0096, 0.0083, 0.0140, 0.0141, 0.0083, 0.0160, 0.0117], + device='cuda:1'), out_proj_covar=tensor([1.1430e-04, 1.1016e-04, 8.9026e-05, 1.4556e-04, 1.5041e-04, 9.1809e-05, + 1.6520e-04, 1.2828e-04], device='cuda:1') +2023-02-05 20:41:34,278 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-05 20:41:37,173 INFO [train.py:901] (1/4) Epoch 2, batch 6100, loss[loss=0.3345, simple_loss=0.371, pruned_loss=0.149, over 8076.00 frames. ], tot_loss[loss=0.3646, simple_loss=0.4023, pruned_loss=0.1635, over 1611060.38 frames. ], batch size: 21, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:58,419 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14214.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:58,918 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 3.920e+02 4.920e+02 6.492e+02 2.677e+03, threshold=9.840e+02, percent-clipped=6.0 +2023-02-05 20:42:08,073 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 20:42:11,467 INFO [train.py:901] (1/4) Epoch 2, batch 6150, loss[loss=0.4274, simple_loss=0.4477, pruned_loss=0.2036, over 8594.00 frames. ], tot_loss[loss=0.3653, simple_loss=0.4033, pruned_loss=0.1637, over 1613872.33 frames. ], batch size: 34, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:46,428 INFO [train.py:901] (1/4) Epoch 2, batch 6200, loss[loss=0.344, simple_loss=0.3722, pruned_loss=0.1578, over 7815.00 frames. ], tot_loss[loss=0.3641, simple_loss=0.4025, pruned_loss=0.1629, over 1618307.27 frames. ], batch size: 20, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:58,969 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0238, 2.4101, 3.9170, 3.6046, 2.7877, 2.1902, 1.4224, 2.1544], + device='cuda:1'), covar=tensor([0.0788, 0.1079, 0.0197, 0.0298, 0.0502, 0.0395, 0.0593, 0.0841], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0435, 0.0326, 0.0365, 0.0468, 0.0403, 0.0422, 0.0442], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:43:08,134 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 3.453e+02 4.846e+02 6.394e+02 2.249e+03, threshold=9.691e+02, percent-clipped=6.0 +2023-02-05 20:43:21,530 INFO [train.py:901] (1/4) Epoch 2, batch 6250, loss[loss=0.4365, simple_loss=0.4606, pruned_loss=0.2062, over 8249.00 frames. ], tot_loss[loss=0.3632, simple_loss=0.4013, pruned_loss=0.1626, over 1616049.10 frames. ], batch size: 24, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:43:55,855 INFO [train.py:901] (1/4) Epoch 2, batch 6300, loss[loss=0.3345, simple_loss=0.3725, pruned_loss=0.1482, over 7703.00 frames. ], tot_loss[loss=0.3655, simple_loss=0.4033, pruned_loss=0.1638, over 1618275.66 frames. ], batch size: 18, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:17,506 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.643e+02 3.823e+02 4.655e+02 5.877e+02 1.568e+03, threshold=9.309e+02, percent-clipped=4.0 +2023-02-05 20:44:28,390 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14431.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:30,254 INFO [train.py:901] (1/4) Epoch 2, batch 6350, loss[loss=0.3622, simple_loss=0.3829, pruned_loss=0.1708, over 7798.00 frames. ], tot_loss[loss=0.364, simple_loss=0.4014, pruned_loss=0.1633, over 1616473.83 frames. ], batch size: 19, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:30,320 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14434.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:51,460 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14465.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:54,882 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:03,110 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14482.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:45:04,270 INFO [train.py:901] (1/4) Epoch 2, batch 6400, loss[loss=0.356, simple_loss=0.4005, pruned_loss=0.1557, over 8103.00 frames. ], tot_loss[loss=0.3656, simple_loss=0.4023, pruned_loss=0.1644, over 1614199.13 frames. ], batch size: 23, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:12,400 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14495.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:19,137 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14505.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:25,558 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.974e+02 5.065e+02 7.362e+02 1.328e+03, threshold=1.013e+03, percent-clipped=8.0 +2023-02-05 20:45:38,723 INFO [train.py:901] (1/4) Epoch 2, batch 6450, loss[loss=0.3879, simple_loss=0.4204, pruned_loss=0.1777, over 8469.00 frames. ], tot_loss[loss=0.3617, simple_loss=0.3993, pruned_loss=0.162, over 1611870.37 frames. ], batch size: 27, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:48,960 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:10,570 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14580.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:13,165 INFO [train.py:901] (1/4) Epoch 2, batch 6500, loss[loss=0.389, simple_loss=0.4268, pruned_loss=0.1756, over 8464.00 frames. ], tot_loss[loss=0.3617, simple_loss=0.3991, pruned_loss=0.1621, over 1611976.24 frames. ], batch size: 25, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:46:22,643 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14597.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:46:28,918 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6841, 2.3033, 4.6001, 1.0070, 2.6454, 1.9031, 1.7324, 2.4348], + device='cuda:1'), covar=tensor([0.1257, 0.1436, 0.0366, 0.2281, 0.1305, 0.2050, 0.1087, 0.1715], + device='cuda:1'), in_proj_covar=tensor([0.0412, 0.0382, 0.0434, 0.0461, 0.0506, 0.0457, 0.0405, 0.0508], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:46:35,357 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.999e+02 5.009e+02 6.288e+02 1.522e+03, threshold=1.002e+03, percent-clipped=8.0 +2023-02-05 20:46:48,434 INFO [train.py:901] (1/4) Epoch 2, batch 6550, loss[loss=0.3191, simple_loss=0.358, pruned_loss=0.1401, over 7436.00 frames. ], tot_loss[loss=0.3628, simple_loss=0.4002, pruned_loss=0.1627, over 1616899.10 frames. ], batch size: 17, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:47:16,621 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 20:47:23,547 INFO [train.py:901] (1/4) Epoch 2, batch 6600, loss[loss=0.3543, simple_loss=0.3991, pruned_loss=0.1547, over 8507.00 frames. ], tot_loss[loss=0.3629, simple_loss=0.401, pruned_loss=0.1624, over 1619206.31 frames. ], batch size: 26, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:47:36,560 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:47:45,891 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.289e+02 3.681e+02 4.457e+02 5.556e+02 1.208e+03, threshold=8.913e+02, percent-clipped=4.0 +2023-02-05 20:47:58,950 INFO [train.py:901] (1/4) Epoch 2, batch 6650, loss[loss=0.3458, simple_loss=0.3927, pruned_loss=0.1494, over 7964.00 frames. ], tot_loss[loss=0.3611, simple_loss=0.3997, pruned_loss=0.1613, over 1616754.67 frames. ], batch size: 21, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:16,405 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14758.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:28,651 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14775.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:34,790 INFO [train.py:901] (1/4) Epoch 2, batch 6700, loss[loss=0.3601, simple_loss=0.376, pruned_loss=0.1721, over 7513.00 frames. ], tot_loss[loss=0.3589, simple_loss=0.3979, pruned_loss=0.16, over 1614849.94 frames. ], batch size: 18, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:50,186 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:56,674 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.056e+02 3.873e+02 4.634e+02 6.203e+02 1.536e+03, threshold=9.268e+02, percent-clipped=6.0 +2023-02-05 20:48:58,888 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0539, 3.7586, 2.4966, 2.4945, 3.0461, 2.1885, 2.3598, 2.8989], + device='cuda:1'), covar=tensor([0.1278, 0.0505, 0.0767, 0.0848, 0.0668, 0.0997, 0.1008, 0.0836], + device='cuda:1'), in_proj_covar=tensor([0.0380, 0.0244, 0.0365, 0.0319, 0.0357, 0.0334, 0.0356, 0.0328], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:1') +2023-02-05 20:49:07,140 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14830.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:10,347 INFO [train.py:901] (1/4) Epoch 2, batch 6750, loss[loss=0.3488, simple_loss=0.3991, pruned_loss=0.1492, over 8104.00 frames. ], tot_loss[loss=0.3596, simple_loss=0.3986, pruned_loss=0.1603, over 1613181.27 frames. ], batch size: 23, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:11,909 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14836.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:21,254 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14849.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:24,074 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14853.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:49:29,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14861.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:31,025 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3600, 1.7037, 1.9345, 1.3788, 0.9765, 1.9973, 0.2626, 0.9190], + device='cuda:1'), covar=tensor([0.1821, 0.1049, 0.0768, 0.1376, 0.2379, 0.0604, 0.4357, 0.2108], + device='cuda:1'), in_proj_covar=tensor([0.0108, 0.0100, 0.0084, 0.0142, 0.0144, 0.0082, 0.0163, 0.0116], + device='cuda:1'), out_proj_covar=tensor([1.1963e-04, 1.1657e-04, 9.2294e-05, 1.4965e-04, 1.5701e-04, 9.2883e-05, + 1.7264e-04, 1.3059e-04], device='cuda:1') +2023-02-05 20:49:35,149 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3216, 1.6198, 4.3327, 2.1439, 2.0402, 5.1567, 4.7711, 4.5911], + device='cuda:1'), covar=tensor([0.0972, 0.1288, 0.0169, 0.1406, 0.0755, 0.0151, 0.0202, 0.0312], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0257, 0.0180, 0.0255, 0.0195, 0.0162, 0.0150, 0.0228], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 20:49:41,499 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14878.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:49:45,986 INFO [train.py:901] (1/4) Epoch 2, batch 6800, loss[loss=0.3576, simple_loss=0.4007, pruned_loss=0.1573, over 8107.00 frames. ], tot_loss[loss=0.3595, simple_loss=0.3984, pruned_loss=0.1603, over 1611143.94 frames. ], batch size: 23, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:50,353 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14890.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:54,304 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 20:50:07,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.663e+02 4.715e+02 6.092e+02 1.805e+03, threshold=9.431e+02, percent-clipped=7.0 +2023-02-05 20:50:21,320 INFO [train.py:901] (1/4) Epoch 2, batch 6850, loss[loss=0.2994, simple_loss=0.3355, pruned_loss=0.1317, over 7706.00 frames. ], tot_loss[loss=0.3588, simple_loss=0.3984, pruned_loss=0.1596, over 1612556.23 frames. ], batch size: 18, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:50:29,479 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-05 20:50:42,738 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14964.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:50:45,346 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 20:50:57,101 INFO [train.py:901] (1/4) Epoch 2, batch 6900, loss[loss=0.3796, simple_loss=0.4084, pruned_loss=0.1754, over 8142.00 frames. ], tot_loss[loss=0.36, simple_loss=0.3995, pruned_loss=0.1602, over 1615129.41 frames. ], batch size: 22, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:51:19,285 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.011e+02 4.191e+02 5.097e+02 7.005e+02 1.700e+03, threshold=1.019e+03, percent-clipped=5.0 +2023-02-05 20:51:32,588 INFO [train.py:901] (1/4) Epoch 2, batch 6950, loss[loss=0.3163, simple_loss=0.3559, pruned_loss=0.1384, over 7249.00 frames. ], tot_loss[loss=0.359, simple_loss=0.3983, pruned_loss=0.1599, over 1610754.68 frames. ], batch size: 16, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:51:56,470 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 20:51:57,426 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4890, 1.6819, 2.8439, 1.0166, 2.0850, 1.6384, 1.4470, 1.7130], + device='cuda:1'), covar=tensor([0.1101, 0.1342, 0.0453, 0.2056, 0.1036, 0.1868, 0.1040, 0.1437], + device='cuda:1'), in_proj_covar=tensor([0.0410, 0.0389, 0.0449, 0.0458, 0.0513, 0.0465, 0.0403, 0.0511], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 20:52:08,409 INFO [train.py:901] (1/4) Epoch 2, batch 7000, loss[loss=0.3488, simple_loss=0.3871, pruned_loss=0.1553, over 8085.00 frames. ], tot_loss[loss=0.3586, simple_loss=0.3981, pruned_loss=0.1595, over 1610516.24 frames. ], batch size: 21, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:52:21,504 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15102.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:30,568 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.233e+02 3.928e+02 4.810e+02 5.818e+02 1.410e+03, threshold=9.621e+02, percent-clipped=1.0 +2023-02-05 20:52:44,331 INFO [train.py:901] (1/4) Epoch 2, batch 7050, loss[loss=0.3655, simple_loss=0.4229, pruned_loss=0.154, over 8544.00 frames. ], tot_loss[loss=0.3586, simple_loss=0.3983, pruned_loss=0.1594, over 1610928.27 frames. ], batch size: 31, lr: 2.75e-02, grad_scale: 16.0 +2023-02-05 20:52:52,890 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:10,283 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15171.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:18,777 INFO [train.py:901] (1/4) Epoch 2, batch 7100, loss[loss=0.3731, simple_loss=0.4078, pruned_loss=0.1692, over 8469.00 frames. ], tot_loss[loss=0.3564, simple_loss=0.3968, pruned_loss=0.158, over 1612410.13 frames. ], batch size: 25, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:53:19,708 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0948, 3.6418, 2.0225, 2.7058, 3.2486, 2.3611, 2.4262, 2.6732], + device='cuda:1'), covar=tensor([0.1487, 0.0641, 0.0978, 0.0881, 0.0541, 0.0992, 0.1261, 0.1023], + device='cuda:1'), in_proj_covar=tensor([0.0371, 0.0246, 0.0349, 0.0314, 0.0347, 0.0335, 0.0353, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:1') +2023-02-05 20:53:39,789 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15213.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:40,183 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 20:53:41,005 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.718e+02 4.413e+02 5.855e+02 1.165e+03, threshold=8.826e+02, percent-clipped=3.0 +2023-02-05 20:53:41,246 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8191, 2.0360, 2.4019, 0.9879, 2.6361, 1.7606, 1.2257, 1.9075], + device='cuda:1'), covar=tensor([0.0141, 0.0075, 0.0093, 0.0148, 0.0084, 0.0196, 0.0201, 0.0099], + device='cuda:1'), in_proj_covar=tensor([0.0198, 0.0132, 0.0121, 0.0184, 0.0134, 0.0250, 0.0201, 0.0171], + device='cuda:1'), out_proj_covar=tensor([1.1171e-04, 7.4086e-05, 7.0120e-05, 1.0115e-04, 7.9218e-05, 1.5196e-04, + 1.1549e-04, 9.7090e-05], device='cuda:1') +2023-02-05 20:53:42,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15217.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:44,510 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:53,628 INFO [train.py:901] (1/4) Epoch 2, batch 7150, loss[loss=0.3232, simple_loss=0.3718, pruned_loss=0.1373, over 7926.00 frames. ], tot_loss[loss=0.3557, simple_loss=0.396, pruned_loss=0.1577, over 1611723.67 frames. ], batch size: 20, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:54:02,061 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:54:29,186 INFO [train.py:901] (1/4) Epoch 2, batch 7200, loss[loss=0.3733, simple_loss=0.3869, pruned_loss=0.1799, over 7259.00 frames. ], tot_loss[loss=0.3556, simple_loss=0.3962, pruned_loss=0.1575, over 1612371.31 frames. ], batch size: 16, lr: 2.73e-02, grad_scale: 16.0 +2023-02-05 20:54:51,169 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.528e+02 3.704e+02 4.905e+02 6.625e+02 1.855e+03, threshold=9.809e+02, percent-clipped=12.0 +2023-02-05 20:55:04,896 INFO [train.py:901] (1/4) Epoch 2, batch 7250, loss[loss=0.3687, simple_loss=0.4213, pruned_loss=0.1581, over 8614.00 frames. ], tot_loss[loss=0.3579, simple_loss=0.3979, pruned_loss=0.1589, over 1611765.69 frames. ], batch size: 31, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:55:21,617 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4353, 2.1744, 2.1561, 0.5318, 2.0971, 1.4501, 0.4259, 1.6750], + device='cuda:1'), covar=tensor([0.0115, 0.0047, 0.0084, 0.0151, 0.0079, 0.0221, 0.0223, 0.0079], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0135, 0.0122, 0.0186, 0.0135, 0.0248, 0.0201, 0.0176], + device='cuda:1'), out_proj_covar=tensor([1.1391e-04, 7.5015e-05, 7.0380e-05, 1.0189e-04, 7.9616e-05, 1.5064e-04, + 1.1507e-04, 9.9132e-05], device='cuda:1') +2023-02-05 20:55:22,312 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1638, 1.6002, 1.7789, 1.2430, 0.8860, 1.6456, 0.1127, 0.9047], + device='cuda:1'), covar=tensor([0.2359, 0.1352, 0.0761, 0.1503, 0.2482, 0.0771, 0.4110, 0.2086], + device='cuda:1'), in_proj_covar=tensor([0.0107, 0.0098, 0.0079, 0.0137, 0.0135, 0.0079, 0.0151, 0.0113], + device='cuda:1'), out_proj_covar=tensor([1.1947e-04, 1.1632e-04, 8.8296e-05, 1.4814e-04, 1.4938e-04, 9.1009e-05, + 1.6417e-04, 1.3109e-04], device='cuda:1') +2023-02-05 20:55:39,924 INFO [train.py:901] (1/4) Epoch 2, batch 7300, loss[loss=0.3527, simple_loss=0.3983, pruned_loss=0.1535, over 8527.00 frames. ], tot_loss[loss=0.3564, simple_loss=0.3961, pruned_loss=0.1583, over 1607423.30 frames. ], batch size: 28, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:56:02,308 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.194e+02 3.434e+02 4.292e+02 5.923e+02 1.449e+03, threshold=8.584e+02, percent-clipped=5.0 +2023-02-05 20:56:12,517 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 20:56:14,874 INFO [train.py:901] (1/4) Epoch 2, batch 7350, loss[loss=0.3489, simple_loss=0.3835, pruned_loss=0.1571, over 8232.00 frames. ], tot_loss[loss=0.3581, simple_loss=0.3979, pruned_loss=0.1592, over 1609268.89 frames. ], batch size: 22, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:22,685 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 20:56:24,574 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 20:56:42,776 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15473.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:56:43,069 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-05 20:56:43,893 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 20:56:49,837 INFO [train.py:901] (1/4) Epoch 2, batch 7400, loss[loss=0.3357, simple_loss=0.3728, pruned_loss=0.1493, over 7268.00 frames. ], tot_loss[loss=0.3588, simple_loss=0.3986, pruned_loss=0.1595, over 1611898.10 frames. ], batch size: 16, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:59,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:01,972 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 20:57:11,806 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.940e+02 4.956e+02 6.362e+02 1.377e+03, threshold=9.912e+02, percent-clipped=7.0 +2023-02-05 20:57:24,672 INFO [train.py:901] (1/4) Epoch 2, batch 7450, loss[loss=0.3643, simple_loss=0.4083, pruned_loss=0.1601, over 8352.00 frames. ], tot_loss[loss=0.3593, simple_loss=0.3984, pruned_loss=0.1601, over 1611337.55 frames. ], batch size: 24, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:57:25,215 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.41 vs. limit=5.0 +2023-02-05 20:57:32,760 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9564, 2.0138, 1.6861, 2.6109, 1.1957, 1.1235, 1.3797, 2.1188], + device='cuda:1'), covar=tensor([0.1296, 0.1411, 0.1863, 0.0552, 0.2203, 0.2951, 0.2519, 0.1297], + device='cuda:1'), in_proj_covar=tensor([0.0313, 0.0326, 0.0331, 0.0226, 0.0311, 0.0332, 0.0363, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 20:57:40,474 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15557.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:41,767 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 20:57:49,569 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.32 vs. limit=5.0 +2023-02-05 20:57:59,035 INFO [train.py:901] (1/4) Epoch 2, batch 7500, loss[loss=0.2878, simple_loss=0.3479, pruned_loss=0.1138, over 8094.00 frames. ], tot_loss[loss=0.3593, simple_loss=0.3985, pruned_loss=0.16, over 1611896.74 frames. ], batch size: 21, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:20,424 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 20:58:21,359 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.662e+02 4.519e+02 5.678e+02 1.466e+03, threshold=9.038e+02, percent-clipped=6.0 +2023-02-05 20:58:34,049 INFO [train.py:901] (1/4) Epoch 2, batch 7550, loss[loss=0.2709, simple_loss=0.3248, pruned_loss=0.1085, over 7708.00 frames. ], tot_loss[loss=0.3566, simple_loss=0.3966, pruned_loss=0.1583, over 1612086.88 frames. ], batch size: 18, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:59:00,928 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15672.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:59:08,581 INFO [train.py:901] (1/4) Epoch 2, batch 7600, loss[loss=0.295, simple_loss=0.3481, pruned_loss=0.121, over 7426.00 frames. ], tot_loss[loss=0.3565, simple_loss=0.3966, pruned_loss=0.1582, over 1612276.77 frames. ], batch size: 17, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 20:59:31,058 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.634e+02 4.473e+02 6.191e+02 1.516e+03, threshold=8.946e+02, percent-clipped=5.0 +2023-02-05 20:59:43,083 INFO [train.py:901] (1/4) Epoch 2, batch 7650, loss[loss=0.3941, simple_loss=0.4261, pruned_loss=0.1811, over 8373.00 frames. ], tot_loss[loss=0.3558, simple_loss=0.3961, pruned_loss=0.1578, over 1608780.12 frames. ], batch size: 24, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 21:00:19,420 INFO [train.py:901] (1/4) Epoch 2, batch 7700, loss[loss=0.3868, simple_loss=0.421, pruned_loss=0.1763, over 8669.00 frames. ], tot_loss[loss=0.3548, simple_loss=0.3958, pruned_loss=0.1569, over 1613321.21 frames. ], batch size: 34, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:00:41,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 3.880e+02 4.902e+02 6.175e+02 1.322e+03, threshold=9.805e+02, percent-clipped=4.0 +2023-02-05 21:00:42,379 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 21:00:51,197 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 21:00:53,924 INFO [train.py:901] (1/4) Epoch 2, batch 7750, loss[loss=0.4503, simple_loss=0.4561, pruned_loss=0.2223, over 6676.00 frames. ], tot_loss[loss=0.3535, simple_loss=0.395, pruned_loss=0.156, over 1612106.23 frames. ], batch size: 71, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:28,164 INFO [train.py:901] (1/4) Epoch 2, batch 7800, loss[loss=0.2775, simple_loss=0.3334, pruned_loss=0.1108, over 7790.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.393, pruned_loss=0.1552, over 1610323.13 frames. ], batch size: 19, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:40,336 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:01:50,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.569e+02 4.742e+02 5.990e+02 9.896e+02, threshold=9.484e+02, percent-clipped=1.0 +2023-02-05 21:01:58,284 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3562, 5.5806, 4.8132, 2.0221, 4.7453, 4.9828, 5.0880, 4.2032], + device='cuda:1'), covar=tensor([0.0996, 0.0545, 0.0872, 0.4851, 0.0538, 0.0519, 0.1214, 0.0883], + device='cuda:1'), in_proj_covar=tensor([0.0336, 0.0224, 0.0265, 0.0358, 0.0241, 0.0201, 0.0255, 0.0185], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 21:01:59,060 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:02,915 INFO [train.py:901] (1/4) Epoch 2, batch 7850, loss[loss=0.3354, simple_loss=0.3732, pruned_loss=0.1488, over 7542.00 frames. ], tot_loss[loss=0.351, simple_loss=0.3928, pruned_loss=0.1546, over 1613108.29 frames. ], batch size: 18, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:15,681 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15953.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:36,232 INFO [train.py:901] (1/4) Epoch 2, batch 7900, loss[loss=0.3592, simple_loss=0.4064, pruned_loss=0.1559, over 8350.00 frames. ], tot_loss[loss=0.3525, simple_loss=0.3943, pruned_loss=0.1554, over 1618334.90 frames. ], batch size: 24, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:58,246 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.808e+02 4.602e+02 5.936e+02 1.299e+03, threshold=9.205e+02, percent-clipped=9.0 +2023-02-05 21:03:00,391 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16019.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:03:10,218 INFO [train.py:901] (1/4) Epoch 2, batch 7950, loss[loss=0.2751, simple_loss=0.3296, pruned_loss=0.1103, over 7223.00 frames. ], tot_loss[loss=0.3534, simple_loss=0.3951, pruned_loss=0.1559, over 1619964.81 frames. ], batch size: 16, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:03:43,334 INFO [train.py:901] (1/4) Epoch 2, batch 8000, loss[loss=0.3209, simple_loss=0.3686, pruned_loss=0.1365, over 8040.00 frames. ], tot_loss[loss=0.3551, simple_loss=0.3964, pruned_loss=0.1569, over 1619806.53 frames. ], batch size: 22, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:03:56,055 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16103.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:04:04,544 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.336e+02 4.123e+02 4.991e+02 6.647e+02 1.461e+03, threshold=9.983e+02, percent-clipped=10.0 +2023-02-05 21:04:16,506 INFO [train.py:901] (1/4) Epoch 2, batch 8050, loss[loss=0.3703, simple_loss=0.3902, pruned_loss=0.1752, over 7539.00 frames. ], tot_loss[loss=0.3554, simple_loss=0.3956, pruned_loss=0.1576, over 1607700.72 frames. ], batch size: 18, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:04:51,387 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 21:04:55,122 INFO [train.py:901] (1/4) Epoch 3, batch 0, loss[loss=0.3948, simple_loss=0.4092, pruned_loss=0.1902, over 7691.00 frames. ], tot_loss[loss=0.3948, simple_loss=0.4092, pruned_loss=0.1902, over 7691.00 frames. ], batch size: 18, lr: 2.53e-02, grad_scale: 8.0 +2023-02-05 21:04:55,123 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 21:05:06,956 INFO [train.py:935] (1/4) Epoch 3, validation: loss=0.2731, simple_loss=0.3579, pruned_loss=0.09417, over 944034.00 frames. +2023-02-05 21:05:06,957 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 21:05:07,098 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:05:23,592 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 21:05:24,627 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-05 21:05:42,765 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.402e+02 4.065e+02 5.070e+02 6.931e+02 1.670e+03, threshold=1.014e+03, percent-clipped=5.0 +2023-02-05 21:05:42,791 INFO [train.py:901] (1/4) Epoch 3, batch 50, loss[loss=0.4258, simple_loss=0.4456, pruned_loss=0.203, over 7703.00 frames. ], tot_loss[loss=0.36, simple_loss=0.3983, pruned_loss=0.1609, over 359706.15 frames. ], batch size: 72, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:05:58,817 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 21:06:02,997 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:06:18,204 INFO [train.py:901] (1/4) Epoch 3, batch 100, loss[loss=0.4071, simple_loss=0.4318, pruned_loss=0.1912, over 8616.00 frames. ], tot_loss[loss=0.3566, simple_loss=0.3972, pruned_loss=0.158, over 640092.39 frames. ], batch size: 39, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:06:18,921 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 21:06:49,925 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-05 21:06:53,421 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.291e+02 3.520e+02 4.471e+02 5.811e+02 1.196e+03, threshold=8.942e+02, percent-clipped=3.0 +2023-02-05 21:06:53,441 INFO [train.py:901] (1/4) Epoch 3, batch 150, loss[loss=0.3913, simple_loss=0.439, pruned_loss=0.1718, over 8242.00 frames. ], tot_loss[loss=0.3535, simple_loss=0.396, pruned_loss=0.1555, over 861618.12 frames. ], batch size: 24, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:23,171 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16360.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:24,939 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16363.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:27,430 INFO [train.py:901] (1/4) Epoch 3, batch 200, loss[loss=0.3523, simple_loss=0.3837, pruned_loss=0.1605, over 7530.00 frames. ], tot_loss[loss=0.3495, simple_loss=0.3934, pruned_loss=0.1528, over 1031012.31 frames. ], batch size: 18, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:42,148 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16389.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:47,434 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7713, 2.4038, 2.0581, 2.7185, 1.4680, 1.0965, 1.9769, 2.3905], + device='cuda:1'), covar=tensor([0.1264, 0.1180, 0.1427, 0.0502, 0.1826, 0.2713, 0.1677, 0.0955], + device='cuda:1'), in_proj_covar=tensor([0.0314, 0.0329, 0.0329, 0.0233, 0.0306, 0.0329, 0.0354, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:08:00,665 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.67 vs. limit=2.0 +2023-02-05 21:08:01,470 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 3.609e+02 4.419e+02 5.456e+02 1.161e+03, threshold=8.837e+02, percent-clipped=3.0 +2023-02-05 21:08:01,490 INFO [train.py:901] (1/4) Epoch 3, batch 250, loss[loss=0.3598, simple_loss=0.4128, pruned_loss=0.1534, over 8487.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3929, pruned_loss=0.1525, over 1163351.46 frames. ], batch size: 28, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:03,189 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-02-05 21:08:13,923 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 21:08:22,557 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 21:08:22,621 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:35,579 INFO [train.py:901] (1/4) Epoch 3, batch 300, loss[loss=0.3342, simple_loss=0.3935, pruned_loss=0.1375, over 8466.00 frames. ], tot_loss[loss=0.3487, simple_loss=0.3922, pruned_loss=0.1526, over 1265268.01 frames. ], batch size: 25, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:43,564 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16478.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:05,163 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16511.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:09,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.982e+02 3.752e+02 4.774e+02 5.919e+02 1.248e+03, threshold=9.549e+02, percent-clipped=6.0 +2023-02-05 21:09:09,120 INFO [train.py:901] (1/4) Epoch 3, batch 350, loss[loss=0.3159, simple_loss=0.375, pruned_loss=0.1284, over 8515.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3928, pruned_loss=0.1526, over 1346618.60 frames. ], batch size: 50, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:09:40,929 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16562.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:40,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5340, 2.1261, 3.5750, 1.0570, 2.6216, 2.0476, 1.6346, 2.1973], + device='cuda:1'), covar=tensor([0.1077, 0.1195, 0.0385, 0.2123, 0.0926, 0.1393, 0.0923, 0.1471], + device='cuda:1'), in_proj_covar=tensor([0.0417, 0.0382, 0.0444, 0.0469, 0.0518, 0.0452, 0.0407, 0.0510], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:09:44,010 INFO [train.py:901] (1/4) Epoch 3, batch 400, loss[loss=0.3245, simple_loss=0.3851, pruned_loss=0.1319, over 8473.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3932, pruned_loss=0.1528, over 1405991.94 frames. ], batch size: 29, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:18,113 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:18,544 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.210e+02 3.588e+02 4.493e+02 6.059e+02 1.047e+03, threshold=8.987e+02, percent-clipped=2.0 +2023-02-05 21:10:18,565 INFO [train.py:901] (1/4) Epoch 3, batch 450, loss[loss=0.3703, simple_loss=0.4142, pruned_loss=0.1632, over 8835.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.3932, pruned_loss=0.1527, over 1458569.05 frames. ], batch size: 32, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:21,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4596, 2.7391, 3.1844, 2.2084, 1.7280, 3.1346, 0.7001, 1.5902], + device='cuda:1'), covar=tensor([0.2299, 0.2109, 0.0555, 0.1921, 0.3163, 0.0604, 0.5168, 0.1952], + device='cuda:1'), in_proj_covar=tensor([0.0109, 0.0097, 0.0081, 0.0143, 0.0143, 0.0082, 0.0149, 0.0108], + device='cuda:1'), out_proj_covar=tensor([1.2697e-04, 1.1944e-04, 9.5400e-05, 1.5891e-04, 1.6245e-04, 9.8834e-05, + 1.6792e-04, 1.3025e-04], device='cuda:1') +2023-02-05 21:10:24,806 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16626.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:35,583 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16641.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:53,050 INFO [train.py:901] (1/4) Epoch 3, batch 500, loss[loss=0.3159, simple_loss=0.3531, pruned_loss=0.1393, over 7541.00 frames. ], tot_loss[loss=0.3497, simple_loss=0.3928, pruned_loss=0.1533, over 1491541.25 frames. ], batch size: 18, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:11:27,933 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 3.547e+02 4.664e+02 6.145e+02 2.246e+03, threshold=9.327e+02, percent-clipped=7.0 +2023-02-05 21:11:27,953 INFO [train.py:901] (1/4) Epoch 3, batch 550, loss[loss=0.364, simple_loss=0.4137, pruned_loss=0.1572, over 8534.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3926, pruned_loss=0.1528, over 1522533.86 frames. ], batch size: 28, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:11:38,641 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16733.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:39,420 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16734.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:56,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16759.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:01,687 INFO [train.py:901] (1/4) Epoch 3, batch 600, loss[loss=0.4002, simple_loss=0.4262, pruned_loss=0.1871, over 8628.00 frames. ], tot_loss[loss=0.3509, simple_loss=0.3932, pruned_loss=0.1543, over 1540715.97 frames. ], batch size: 34, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:16,351 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 21:12:28,543 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.09 vs. limit=2.0 +2023-02-05 21:12:36,655 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.715e+02 4.834e+02 5.984e+02 1.404e+03, threshold=9.668e+02, percent-clipped=7.0 +2023-02-05 21:12:36,676 INFO [train.py:901] (1/4) Epoch 3, batch 650, loss[loss=0.3303, simple_loss=0.3702, pruned_loss=0.1452, over 7683.00 frames. ], tot_loss[loss=0.349, simple_loss=0.3915, pruned_loss=0.1533, over 1554958.75 frames. ], batch size: 18, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:37,556 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16818.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:54,024 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:55,880 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5182, 2.1941, 2.2739, 0.8715, 2.1050, 1.5281, 0.4587, 1.7498], + device='cuda:1'), covar=tensor([0.0100, 0.0040, 0.0039, 0.0123, 0.0065, 0.0194, 0.0182, 0.0065], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0141, 0.0121, 0.0187, 0.0134, 0.0251, 0.0206, 0.0174], + device='cuda:1'), out_proj_covar=tensor([1.0708e-04, 7.3793e-05, 6.4750e-05, 9.6656e-05, 7.3934e-05, 1.4436e-04, + 1.1176e-04, 9.3405e-05], device='cuda:1') +2023-02-05 21:12:57,237 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:10,252 INFO [train.py:901] (1/4) Epoch 3, batch 700, loss[loss=0.4567, simple_loss=0.4659, pruned_loss=0.2238, over 8349.00 frames. ], tot_loss[loss=0.3495, simple_loss=0.3924, pruned_loss=0.1533, over 1572411.67 frames. ], batch size: 24, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:20,396 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16882.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:23,049 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16886.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:13:38,445 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:44,820 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.086e+02 3.932e+02 4.613e+02 6.231e+02 2.383e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 21:13:44,841 INFO [train.py:901] (1/4) Epoch 3, batch 750, loss[loss=0.4011, simple_loss=0.4213, pruned_loss=0.1905, over 7923.00 frames. ], tot_loss[loss=0.3501, simple_loss=0.3924, pruned_loss=0.1539, over 1575922.58 frames. ], batch size: 20, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:49,806 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16924.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:52,494 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3329, 4.4526, 3.8642, 1.9426, 3.7895, 3.8246, 4.0459, 3.6613], + device='cuda:1'), covar=tensor([0.0909, 0.0557, 0.0832, 0.4449, 0.0661, 0.0749, 0.1285, 0.0750], + device='cuda:1'), in_proj_covar=tensor([0.0338, 0.0222, 0.0267, 0.0356, 0.0250, 0.0201, 0.0256, 0.0183], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 21:13:59,070 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 21:14:07,690 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 21:14:19,211 INFO [train.py:901] (1/4) Epoch 3, batch 800, loss[loss=0.3345, simple_loss=0.3748, pruned_loss=0.1471, over 7925.00 frames. ], tot_loss[loss=0.347, simple_loss=0.3901, pruned_loss=0.152, over 1585242.65 frames. ], batch size: 20, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:14:53,628 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.211e+02 3.452e+02 4.368e+02 5.287e+02 1.393e+03, threshold=8.735e+02, percent-clipped=4.0 +2023-02-05 21:14:53,648 INFO [train.py:901] (1/4) Epoch 3, batch 850, loss[loss=0.349, simple_loss=0.4042, pruned_loss=0.1469, over 8361.00 frames. ], tot_loss[loss=0.3501, simple_loss=0.3922, pruned_loss=0.154, over 1592790.31 frames. ], batch size: 24, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:14:55,868 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3934, 2.3961, 1.5108, 1.8967, 1.8152, 1.3420, 1.6826, 1.9169], + device='cuda:1'), covar=tensor([0.1300, 0.0337, 0.0984, 0.0728, 0.0870, 0.1076, 0.0998, 0.0891], + device='cuda:1'), in_proj_covar=tensor([0.0368, 0.0246, 0.0341, 0.0309, 0.0350, 0.0309, 0.0355, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 21:15:28,355 INFO [train.py:901] (1/4) Epoch 3, batch 900, loss[loss=0.2768, simple_loss=0.3454, pruned_loss=0.1041, over 7960.00 frames. ], tot_loss[loss=0.3498, simple_loss=0.3919, pruned_loss=0.1538, over 1594450.86 frames. ], batch size: 21, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:53,790 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17104.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:02,287 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.375e+02 3.695e+02 4.540e+02 5.760e+02 9.795e+02, threshold=9.080e+02, percent-clipped=3.0 +2023-02-05 21:16:02,308 INFO [train.py:901] (1/4) Epoch 3, batch 950, loss[loss=0.3915, simple_loss=0.4206, pruned_loss=0.1813, over 8581.00 frames. ], tot_loss[loss=0.3483, simple_loss=0.3907, pruned_loss=0.1529, over 1596986.62 frames. ], batch size: 39, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:16:10,479 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17129.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:12,703 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 21:16:25,713 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 21:16:36,855 INFO [train.py:901] (1/4) Epoch 3, batch 1000, loss[loss=0.3593, simple_loss=0.3989, pruned_loss=0.1599, over 8355.00 frames. ], tot_loss[loss=0.3491, simple_loss=0.3906, pruned_loss=0.1538, over 1595706.24 frames. ], batch size: 24, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:16:57,954 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 21:17:03,572 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:10,142 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 4.093e+02 4.952e+02 6.088e+02 1.030e+03, threshold=9.904e+02, percent-clipped=7.0 +2023-02-05 21:17:10,162 INFO [train.py:901] (1/4) Epoch 3, batch 1050, loss[loss=0.4926, simple_loss=0.4942, pruned_loss=0.2455, over 8594.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.39, pruned_loss=0.1526, over 1603945.02 frames. ], batch size: 39, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:10,174 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 21:17:19,689 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17230.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:17:45,190 INFO [train.py:901] (1/4) Epoch 3, batch 1100, loss[loss=0.3368, simple_loss=0.383, pruned_loss=0.1453, over 7974.00 frames. ], tot_loss[loss=0.3455, simple_loss=0.3887, pruned_loss=0.1511, over 1606378.32 frames. ], batch size: 21, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:45,926 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:48,751 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-05 21:18:19,092 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.272e+02 3.840e+02 4.434e+02 5.714e+02 1.415e+03, threshold=8.869e+02, percent-clipped=3.0 +2023-02-05 21:18:19,112 INFO [train.py:901] (1/4) Epoch 3, batch 1150, loss[loss=0.3184, simple_loss=0.3656, pruned_loss=0.1356, over 8247.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3868, pruned_loss=0.1496, over 1602747.90 frames. ], batch size: 22, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:18:22,465 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 21:18:22,641 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7217, 2.1653, 1.9335, 2.6711, 1.1672, 1.3046, 1.5295, 2.3158], + device='cuda:1'), covar=tensor([0.1361, 0.1364, 0.1557, 0.0561, 0.2075, 0.2568, 0.2003, 0.1007], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0330, 0.0324, 0.0232, 0.0302, 0.0332, 0.0355, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:18:35,405 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4930, 1.9339, 3.2058, 1.0234, 2.2727, 1.7185, 1.4810, 1.8950], + device='cuda:1'), covar=tensor([0.1202, 0.1349, 0.0421, 0.2396, 0.1128, 0.1765, 0.1036, 0.1705], + device='cuda:1'), in_proj_covar=tensor([0.0430, 0.0399, 0.0458, 0.0490, 0.0535, 0.0476, 0.0423, 0.0537], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:18:38,632 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17345.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:18:52,840 INFO [train.py:901] (1/4) Epoch 3, batch 1200, loss[loss=0.3181, simple_loss=0.3835, pruned_loss=0.1263, over 8293.00 frames. ], tot_loss[loss=0.342, simple_loss=0.3862, pruned_loss=0.1488, over 1604562.38 frames. ], batch size: 23, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:19:02,187 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17380.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:04,907 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17383.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:17,665 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17401.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:19:28,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 3.772e+02 4.989e+02 5.905e+02 9.785e+02, threshold=9.978e+02, percent-clipped=4.0 +2023-02-05 21:19:28,385 INFO [train.py:901] (1/4) Epoch 3, batch 1250, loss[loss=0.3769, simple_loss=0.4142, pruned_loss=0.1698, over 6847.00 frames. ], tot_loss[loss=0.3419, simple_loss=0.3858, pruned_loss=0.149, over 1602754.96 frames. ], batch size: 71, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:20:02,610 INFO [train.py:901] (1/4) Epoch 3, batch 1300, loss[loss=0.3043, simple_loss=0.3577, pruned_loss=0.1254, over 8196.00 frames. ], tot_loss[loss=0.3438, simple_loss=0.3879, pruned_loss=0.1498, over 1609980.42 frames. ], batch size: 23, lr: 2.44e-02, grad_scale: 8.0 +2023-02-05 21:20:37,546 INFO [train.py:901] (1/4) Epoch 3, batch 1350, loss[loss=0.3288, simple_loss=0.3958, pruned_loss=0.1309, over 8614.00 frames. ], tot_loss[loss=0.3455, simple_loss=0.389, pruned_loss=0.151, over 1613145.39 frames. ], batch size: 34, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:20:38,234 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 4.258e+02 5.812e+02 8.345e+02 8.746e+03, threshold=1.162e+03, percent-clipped=16.0 +2023-02-05 21:21:00,247 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17551.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:21:11,045 INFO [train.py:901] (1/4) Epoch 3, batch 1400, loss[loss=0.3668, simple_loss=0.4119, pruned_loss=0.1609, over 8511.00 frames. ], tot_loss[loss=0.3463, simple_loss=0.3899, pruned_loss=0.1513, over 1616018.21 frames. ], batch size: 28, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:21:34,754 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17601.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:21:38,919 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-05 21:21:46,907 INFO [train.py:901] (1/4) Epoch 3, batch 1450, loss[loss=0.3337, simple_loss=0.384, pruned_loss=0.1417, over 8148.00 frames. ], tot_loss[loss=0.347, simple_loss=0.3906, pruned_loss=0.1517, over 1618377.58 frames. ], batch size: 22, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:21:47,583 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.138e+02 3.309e+02 4.161e+02 5.035e+02 1.114e+03, threshold=8.322e+02, percent-clipped=0.0 +2023-02-05 21:21:48,940 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 21:21:53,237 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17626.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:22:02,518 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17639.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:19,049 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17664.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,439 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17666.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,943 INFO [train.py:901] (1/4) Epoch 3, batch 1500, loss[loss=0.3153, simple_loss=0.372, pruned_loss=0.1293, over 8316.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.391, pruned_loss=0.1521, over 1617228.19 frames. ], batch size: 25, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,188 INFO [train.py:901] (1/4) Epoch 3, batch 1550, loss[loss=0.3103, simple_loss=0.3532, pruned_loss=0.1337, over 7800.00 frames. ], tot_loss[loss=0.3489, simple_loss=0.3917, pruned_loss=0.153, over 1615940.74 frames. ], batch size: 19, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,832 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.415e+02 3.678e+02 4.620e+02 5.892e+02 1.697e+03, threshold=9.239e+02, percent-clipped=9.0 +2023-02-05 21:22:57,018 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:01,087 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17724.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:16,062 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17745.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:23:17,955 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:30,930 INFO [train.py:901] (1/4) Epoch 3, batch 1600, loss[loss=0.3095, simple_loss=0.3574, pruned_loss=0.1309, over 7438.00 frames. ], tot_loss[loss=0.3486, simple_loss=0.3916, pruned_loss=0.1528, over 1616090.29 frames. ], batch size: 17, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:23:46,407 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2244, 4.3308, 3.7830, 1.6393, 3.7450, 3.6833, 4.0509, 3.3416], + device='cuda:1'), covar=tensor([0.0812, 0.0400, 0.0722, 0.4390, 0.0512, 0.0679, 0.0764, 0.0656], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0231, 0.0278, 0.0364, 0.0253, 0.0207, 0.0263, 0.0194], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 21:23:49,220 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6146, 2.2526, 2.3261, 0.4633, 2.2994, 1.3838, 0.5981, 1.9221], + device='cuda:1'), covar=tensor([0.0137, 0.0054, 0.0084, 0.0190, 0.0091, 0.0262, 0.0222, 0.0073], + device='cuda:1'), in_proj_covar=tensor([0.0212, 0.0151, 0.0125, 0.0189, 0.0143, 0.0262, 0.0209, 0.0178], + device='cuda:1'), out_proj_covar=tensor([1.1028e-04, 7.7879e-05, 6.4757e-05, 9.4771e-05, 7.5891e-05, 1.4564e-04, + 1.0997e-04, 9.3637e-05], device='cuda:1') +2023-02-05 21:24:05,136 INFO [train.py:901] (1/4) Epoch 3, batch 1650, loss[loss=0.3248, simple_loss=0.3859, pruned_loss=0.1318, over 8327.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.39, pruned_loss=0.1514, over 1614778.73 frames. ], batch size: 26, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,797 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.309e+02 4.132e+02 5.477e+02 8.650e+02, threshold=8.264e+02, percent-clipped=0.0 +2023-02-05 21:24:20,682 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17839.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:24:26,724 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1338, 2.3634, 1.9964, 2.8588, 1.4166, 1.4482, 1.9802, 2.6334], + device='cuda:1'), covar=tensor([0.1038, 0.1245, 0.1456, 0.0515, 0.1907, 0.2194, 0.1940, 0.1004], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0318, 0.0313, 0.0228, 0.0292, 0.0320, 0.0335, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:24:35,267 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17860.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:24:39,691 INFO [train.py:901] (1/4) Epoch 3, batch 1700, loss[loss=0.3146, simple_loss=0.3688, pruned_loss=0.1302, over 8136.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.3913, pruned_loss=0.152, over 1615726.67 frames. ], batch size: 22, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:13,899 INFO [train.py:901] (1/4) Epoch 3, batch 1750, loss[loss=0.3466, simple_loss=0.3859, pruned_loss=0.1536, over 7783.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.393, pruned_loss=0.1528, over 1620838.47 frames. ], batch size: 19, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:14,599 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.998e+02 5.161e+02 6.686e+02 1.470e+03, threshold=1.032e+03, percent-clipped=12.0 +2023-02-05 21:25:17,619 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17922.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:35,790 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17947.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:48,900 INFO [train.py:901] (1/4) Epoch 3, batch 1800, loss[loss=0.2733, simple_loss=0.3198, pruned_loss=0.1134, over 7701.00 frames. ], tot_loss[loss=0.3462, simple_loss=0.3904, pruned_loss=0.151, over 1618815.48 frames. ], batch size: 18, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:25:57,026 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17978.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:25,060 INFO [train.py:901] (1/4) Epoch 3, batch 1850, loss[loss=0.3552, simple_loss=0.4127, pruned_loss=0.1489, over 8478.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.3897, pruned_loss=0.1506, over 1619211.08 frames. ], batch size: 28, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:26:25,631 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.564e+02 4.327e+02 5.819e+02 2.228e+03, threshold=8.654e+02, percent-clipped=8.0 +2023-02-05 21:26:39,909 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7495, 2.1597, 1.8650, 2.6629, 1.2440, 1.3037, 1.6110, 2.3149], + device='cuda:1'), covar=tensor([0.1333, 0.1535, 0.1669, 0.0485, 0.1944, 0.2436, 0.2080, 0.0922], + device='cuda:1'), in_proj_covar=tensor([0.0309, 0.0316, 0.0320, 0.0234, 0.0295, 0.0327, 0.0348, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:26:52,697 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1963, 1.8047, 2.9979, 2.5107, 2.3098, 1.9945, 1.3710, 1.1573], + device='cuda:1'), covar=tensor([0.0721, 0.0805, 0.0166, 0.0289, 0.0319, 0.0375, 0.0531, 0.0768], + device='cuda:1'), in_proj_covar=tensor([0.0561, 0.0487, 0.0387, 0.0435, 0.0536, 0.0452, 0.0474, 0.0478], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:26:55,935 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18062.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:59,914 INFO [train.py:901] (1/4) Epoch 3, batch 1900, loss[loss=0.4159, simple_loss=0.4414, pruned_loss=0.1953, over 8511.00 frames. ], tot_loss[loss=0.3462, simple_loss=0.3904, pruned_loss=0.1509, over 1621279.56 frames. ], batch size: 49, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:27:15,649 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.17 vs. limit=5.0 +2023-02-05 21:27:17,421 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18092.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:19,615 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:24,179 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 21:27:34,371 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18116.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:27:34,803 INFO [train.py:901] (1/4) Epoch 3, batch 1950, loss[loss=0.4042, simple_loss=0.444, pruned_loss=0.1822, over 8614.00 frames. ], tot_loss[loss=0.3445, simple_loss=0.3894, pruned_loss=0.1499, over 1617004.96 frames. ], batch size: 34, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:27:35,483 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.131e+02 3.385e+02 4.094e+02 5.586e+02 1.173e+03, threshold=8.188e+02, percent-clipped=3.0 +2023-02-05 21:27:36,216 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 21:27:37,024 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:51,206 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18141.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:27:55,040 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 21:28:09,123 INFO [train.py:901] (1/4) Epoch 3, batch 2000, loss[loss=0.4401, simple_loss=0.4632, pruned_loss=0.2085, over 8332.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3875, pruned_loss=0.1484, over 1617970.58 frames. ], batch size: 25, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:17,060 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18177.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:27,346 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:28,019 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18193.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:38,158 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:41,236 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 21:28:44,807 INFO [train.py:901] (1/4) Epoch 3, batch 2050, loss[loss=0.2794, simple_loss=0.3349, pruned_loss=0.112, over 7937.00 frames. ], tot_loss[loss=0.3418, simple_loss=0.3873, pruned_loss=0.1482, over 1620512.70 frames. ], batch size: 20, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:46,138 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.474e+02 3.817e+02 4.995e+02 6.129e+02 1.664e+03, threshold=9.991e+02, percent-clipped=7.0 +2023-02-05 21:29:03,574 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4595, 2.6969, 1.6806, 2.0817, 2.0384, 1.2197, 1.7656, 2.1845], + device='cuda:1'), covar=tensor([0.1265, 0.0356, 0.1024, 0.0766, 0.0798, 0.1337, 0.1089, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0369, 0.0259, 0.0354, 0.0319, 0.0352, 0.0317, 0.0363, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 21:29:19,897 INFO [train.py:901] (1/4) Epoch 3, batch 2100, loss[loss=0.2879, simple_loss=0.3474, pruned_loss=0.1142, over 8094.00 frames. ], tot_loss[loss=0.3433, simple_loss=0.3884, pruned_loss=0.1491, over 1622054.82 frames. ], batch size: 21, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,202 INFO [train.py:901] (1/4) Epoch 3, batch 2150, loss[loss=0.3449, simple_loss=0.4012, pruned_loss=0.1443, over 8445.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3873, pruned_loss=0.1486, over 1620460.51 frames. ], batch size: 27, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,885 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.297e+02 3.744e+02 4.718e+02 5.936e+02 1.452e+03, threshold=9.436e+02, percent-clipped=4.0 +2023-02-05 21:29:59,210 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18322.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:30:15,995 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1247, 1.1508, 2.3221, 1.0191, 2.0395, 2.5245, 2.3480, 2.1272], + device='cuda:1'), covar=tensor([0.1034, 0.1118, 0.0397, 0.1852, 0.0480, 0.0264, 0.0328, 0.0587], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0252, 0.0196, 0.0255, 0.0196, 0.0162, 0.0162, 0.0238], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:30:31,074 INFO [train.py:901] (1/4) Epoch 3, batch 2200, loss[loss=0.4405, simple_loss=0.4435, pruned_loss=0.2188, over 6923.00 frames. ], tot_loss[loss=0.3423, simple_loss=0.387, pruned_loss=0.1488, over 1615380.93 frames. ], batch size: 71, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:31:06,840 INFO [train.py:901] (1/4) Epoch 3, batch 2250, loss[loss=0.3263, simple_loss=0.386, pruned_loss=0.1333, over 8508.00 frames. ], tot_loss[loss=0.341, simple_loss=0.3864, pruned_loss=0.1478, over 1614600.05 frames. ], batch size: 26, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:07,502 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.379e+02 3.424e+02 4.222e+02 5.561e+02 1.530e+03, threshold=8.445e+02, percent-clipped=2.0 +2023-02-05 21:31:18,180 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18433.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:21,447 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18437.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:36,360 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18458.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:39,724 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7157, 1.4654, 3.0438, 1.4117, 2.3470, 3.4285, 3.0857, 2.9639], + device='cuda:1'), covar=tensor([0.1311, 0.1557, 0.0501, 0.1924, 0.0701, 0.0282, 0.0356, 0.0557], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0260, 0.0199, 0.0255, 0.0200, 0.0165, 0.0168, 0.0245], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:31:39,795 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:42,290 INFO [train.py:901] (1/4) Epoch 3, batch 2300, loss[loss=0.2855, simple_loss=0.3458, pruned_loss=0.1126, over 7523.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3861, pruned_loss=0.1472, over 1615795.61 frames. ], batch size: 18, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:56,790 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:10,468 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18508.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:15,216 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7380, 1.5301, 3.3501, 1.1812, 2.2420, 3.8996, 3.6146, 3.2845], + device='cuda:1'), covar=tensor([0.1222, 0.1468, 0.0328, 0.2043, 0.0638, 0.0227, 0.0289, 0.0520], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0261, 0.0199, 0.0255, 0.0201, 0.0166, 0.0168, 0.0245], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:32:17,121 INFO [train.py:901] (1/4) Epoch 3, batch 2350, loss[loss=0.3182, simple_loss=0.3639, pruned_loss=0.1362, over 7449.00 frames. ], tot_loss[loss=0.3383, simple_loss=0.385, pruned_loss=0.1458, over 1617256.44 frames. ], batch size: 17, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:17,770 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.449e+02 3.759e+02 4.661e+02 5.652e+02 9.227e+02, threshold=9.323e+02, percent-clipped=1.0 +2023-02-05 21:32:23,347 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18526.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:32:30,484 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:31,167 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18537.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:51,324 INFO [train.py:901] (1/4) Epoch 3, batch 2400, loss[loss=0.3082, simple_loss=0.3544, pruned_loss=0.131, over 7921.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3853, pruned_loss=0.1464, over 1614788.01 frames. ], batch size: 20, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:33:05,630 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2023, 1.1875, 4.3725, 1.7979, 3.6526, 3.6107, 3.7647, 3.7302], + device='cuda:1'), covar=tensor([0.0279, 0.3267, 0.0220, 0.1787, 0.0907, 0.0411, 0.0394, 0.0456], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0414, 0.0279, 0.0320, 0.0383, 0.0302, 0.0290, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 21:33:24,822 INFO [train.py:901] (1/4) Epoch 3, batch 2450, loss[loss=0.3928, simple_loss=0.4202, pruned_loss=0.1827, over 8198.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3882, pruned_loss=0.1489, over 1616517.92 frames. ], batch size: 23, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:33:25,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 3.618e+02 4.763e+02 6.456e+02 1.024e+03, threshold=9.527e+02, percent-clipped=2.0 +2023-02-05 21:33:49,164 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:49,837 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18652.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:51,269 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7104, 2.4877, 3.7304, 3.3086, 2.7576, 2.2990, 1.4566, 2.0499], + device='cuda:1'), covar=tensor([0.0859, 0.1037, 0.0248, 0.0399, 0.0549, 0.0491, 0.0703, 0.0962], + device='cuda:1'), in_proj_covar=tensor([0.0566, 0.0491, 0.0393, 0.0449, 0.0544, 0.0464, 0.0485, 0.0487], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:33:59,429 INFO [train.py:901] (1/4) Epoch 3, batch 2500, loss[loss=0.3672, simple_loss=0.4102, pruned_loss=0.1621, over 8468.00 frames. ], tot_loss[loss=0.3416, simple_loss=0.3874, pruned_loss=0.1478, over 1617920.19 frames. ], batch size: 25, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:17,683 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:18,343 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18693.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:33,854 INFO [train.py:901] (1/4) Epoch 3, batch 2550, loss[loss=0.3868, simple_loss=0.42, pruned_loss=0.1768, over 8448.00 frames. ], tot_loss[loss=0.3427, simple_loss=0.3884, pruned_loss=0.1484, over 1619287.37 frames. ], batch size: 49, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:34,503 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.889e+02 4.529e+02 5.619e+02 1.309e+03, threshold=9.058e+02, percent-clipped=5.0 +2023-02-05 21:34:34,734 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:40,293 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 21:35:00,848 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-05 21:35:08,024 INFO [train.py:901] (1/4) Epoch 3, batch 2600, loss[loss=0.3472, simple_loss=0.3878, pruned_loss=0.1533, over 8647.00 frames. ], tot_loss[loss=0.3393, simple_loss=0.3858, pruned_loss=0.1464, over 1618778.95 frames. ], batch size: 34, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:27,952 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5890, 1.9767, 3.5715, 0.9915, 2.4403, 1.7694, 1.6226, 2.1030], + device='cuda:1'), covar=tensor([0.1125, 0.1284, 0.0467, 0.2245, 0.1053, 0.1830, 0.1050, 0.1804], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0408, 0.0481, 0.0493, 0.0549, 0.0491, 0.0441, 0.0547], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:35:44,452 INFO [train.py:901] (1/4) Epoch 3, batch 2650, loss[loss=0.2848, simple_loss=0.3468, pruned_loss=0.1114, over 8251.00 frames. ], tot_loss[loss=0.3387, simple_loss=0.3855, pruned_loss=0.1459, over 1622709.17 frames. ], batch size: 22, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:45,137 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.426e+02 4.272e+02 5.708e+02 1.020e+03, threshold=8.544e+02, percent-clipped=5.0 +2023-02-05 21:36:08,385 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18852.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:19,356 INFO [train.py:901] (1/4) Epoch 3, batch 2700, loss[loss=0.4109, simple_loss=0.435, pruned_loss=0.1934, over 7253.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3859, pruned_loss=0.1461, over 1626135.53 frames. ], batch size: 71, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:21,520 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18870.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:36:28,573 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-05 21:36:47,163 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:47,843 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18908.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:53,561 INFO [train.py:901] (1/4) Epoch 3, batch 2750, loss[loss=0.3406, simple_loss=0.393, pruned_loss=0.1441, over 8246.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.3865, pruned_loss=0.1466, over 1619618.54 frames. ], batch size: 24, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:54,219 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.973e+02 3.360e+02 4.052e+02 5.079e+02 9.265e+02, threshold=8.105e+02, percent-clipped=2.0 +2023-02-05 21:37:05,002 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:05,663 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18933.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:15,752 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:28,079 INFO [train.py:901] (1/4) Epoch 3, batch 2800, loss[loss=0.2667, simple_loss=0.3197, pruned_loss=0.1068, over 7429.00 frames. ], tot_loss[loss=0.3393, simple_loss=0.3864, pruned_loss=0.1461, over 1619959.02 frames. ], batch size: 17, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:37:28,254 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:41,129 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18985.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:38:03,344 INFO [train.py:901] (1/4) Epoch 3, batch 2850, loss[loss=0.33, simple_loss=0.3852, pruned_loss=0.1374, over 7802.00 frames. ], tot_loss[loss=0.3386, simple_loss=0.3861, pruned_loss=0.1455, over 1622326.17 frames. ], batch size: 20, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:03,913 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 3.511e+02 4.402e+02 5.555e+02 1.104e+03, threshold=8.804e+02, percent-clipped=5.0 +2023-02-05 21:38:15,997 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19036.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:38:37,626 INFO [train.py:901] (1/4) Epoch 3, batch 2900, loss[loss=0.334, simple_loss=0.375, pruned_loss=0.1465, over 7421.00 frames. ], tot_loss[loss=0.3392, simple_loss=0.3863, pruned_loss=0.1461, over 1620707.90 frames. ], batch size: 17, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:48,617 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 21:39:02,486 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 21:39:09,997 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2209, 1.7181, 1.5843, 0.3720, 1.5434, 1.1990, 0.3260, 1.6458], + device='cuda:1'), covar=tensor([0.0102, 0.0047, 0.0058, 0.0125, 0.0069, 0.0212, 0.0165, 0.0053], + device='cuda:1'), in_proj_covar=tensor([0.0216, 0.0148, 0.0135, 0.0197, 0.0151, 0.0272, 0.0212, 0.0182], + device='cuda:1'), out_proj_covar=tensor([1.0782e-04, 7.3127e-05, 6.6019e-05, 9.5798e-05, 7.7379e-05, 1.4546e-04, + 1.0817e-04, 9.1182e-05], device='cuda:1') +2023-02-05 21:39:11,764 INFO [train.py:901] (1/4) Epoch 3, batch 2950, loss[loss=0.3298, simple_loss=0.3773, pruned_loss=0.1412, over 8132.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3852, pruned_loss=0.1455, over 1617930.99 frames. ], batch size: 22, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:12,417 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.613e+02 4.498e+02 5.900e+02 1.326e+03, threshold=8.996e+02, percent-clipped=8.0 +2023-02-05 21:39:32,502 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19147.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:39:35,223 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:39:46,215 INFO [train.py:901] (1/4) Epoch 3, batch 3000, loss[loss=0.3379, simple_loss=0.3845, pruned_loss=0.1456, over 8089.00 frames. ], tot_loss[loss=0.3355, simple_loss=0.383, pruned_loss=0.144, over 1617722.86 frames. ], batch size: 21, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:46,215 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 21:39:58,666 INFO [train.py:935] (1/4) Epoch 3, validation: loss=0.2584, simple_loss=0.3473, pruned_loss=0.08481, over 944034.00 frames. +2023-02-05 21:39:58,667 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 21:40:03,995 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-05 21:40:29,846 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2464, 3.8425, 2.4066, 2.8391, 2.4866, 1.9002, 2.4488, 2.7378], + device='cuda:1'), covar=tensor([0.1224, 0.0440, 0.0780, 0.0618, 0.0771, 0.1054, 0.0960, 0.1026], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0245, 0.0326, 0.0303, 0.0333, 0.0310, 0.0341, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:1') +2023-02-05 21:40:33,767 INFO [train.py:901] (1/4) Epoch 3, batch 3050, loss[loss=0.3854, simple_loss=0.4106, pruned_loss=0.1801, over 7283.00 frames. ], tot_loss[loss=0.337, simple_loss=0.3841, pruned_loss=0.1449, over 1616948.28 frames. ], batch size: 72, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:40:34,446 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.016e+02 3.526e+02 4.458e+02 6.217e+02 1.354e+03, threshold=8.917e+02, percent-clipped=3.0 +2023-02-05 21:40:38,079 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:40:38,977 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-05 21:40:50,792 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19241.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:40:55,431 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19248.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:07,576 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19266.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:41:07,997 INFO [train.py:901] (1/4) Epoch 3, batch 3100, loss[loss=0.3348, simple_loss=0.3775, pruned_loss=0.146, over 7527.00 frames. ], tot_loss[loss=0.337, simple_loss=0.384, pruned_loss=0.145, over 1616077.19 frames. ], batch size: 18, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:41:26,056 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19292.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:43,825 INFO [train.py:901] (1/4) Epoch 3, batch 3150, loss[loss=0.3611, simple_loss=0.3924, pruned_loss=0.1649, over 8248.00 frames. ], tot_loss[loss=0.3383, simple_loss=0.3846, pruned_loss=0.146, over 1615517.80 frames. ], batch size: 22, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:41:44,465 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.556e+02 3.507e+02 4.387e+02 6.193e+02 1.521e+03, threshold=8.773e+02, percent-clipped=4.0 +2023-02-05 21:42:02,338 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.52 vs. limit=5.0 +2023-02-05 21:42:15,246 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0945, 0.9872, 3.1225, 0.9205, 2.6119, 2.6909, 2.8303, 2.7701], + device='cuda:1'), covar=tensor([0.0482, 0.3402, 0.0546, 0.2152, 0.1402, 0.0635, 0.0559, 0.0695], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0415, 0.0287, 0.0325, 0.0387, 0.0308, 0.0302, 0.0326], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 21:42:17,833 INFO [train.py:901] (1/4) Epoch 3, batch 3200, loss[loss=0.3353, simple_loss=0.3932, pruned_loss=0.1387, over 8015.00 frames. ], tot_loss[loss=0.3376, simple_loss=0.384, pruned_loss=0.1456, over 1613430.71 frames. ], batch size: 22, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:45,873 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:45,912 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:49,211 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19412.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:53,530 INFO [train.py:901] (1/4) Epoch 3, batch 3250, loss[loss=0.4182, simple_loss=0.4449, pruned_loss=0.1958, over 8683.00 frames. ], tot_loss[loss=0.3394, simple_loss=0.3851, pruned_loss=0.1469, over 1610535.75 frames. ], batch size: 30, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:54,124 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 3.440e+02 4.583e+02 5.736e+02 1.373e+03, threshold=9.167e+02, percent-clipped=8.0 +2023-02-05 21:42:54,452 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 21:43:03,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:43:26,830 INFO [train.py:901] (1/4) Epoch 3, batch 3300, loss[loss=0.2875, simple_loss=0.3313, pruned_loss=0.1219, over 7226.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3847, pruned_loss=0.1466, over 1609076.85 frames. ], batch size: 16, lr: 2.32e-02, grad_scale: 8.0 +2023-02-05 21:43:43,382 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19491.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:44:01,035 INFO [train.py:901] (1/4) Epoch 3, batch 3350, loss[loss=0.3831, simple_loss=0.415, pruned_loss=0.1756, over 8456.00 frames. ], tot_loss[loss=0.3408, simple_loss=0.3859, pruned_loss=0.1479, over 1612450.08 frames. ], batch size: 48, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:44:01,702 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 3.690e+02 4.650e+02 5.581e+02 1.223e+03, threshold=9.300e+02, percent-clipped=5.0 +2023-02-05 21:44:35,822 INFO [train.py:901] (1/4) Epoch 3, batch 3400, loss[loss=0.3757, simple_loss=0.4178, pruned_loss=0.1668, over 8550.00 frames. ], tot_loss[loss=0.3401, simple_loss=0.3855, pruned_loss=0.1474, over 1612101.21 frames. ], batch size: 39, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:02,606 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19606.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:45:09,812 INFO [train.py:901] (1/4) Epoch 3, batch 3450, loss[loss=0.375, simple_loss=0.42, pruned_loss=0.1651, over 8763.00 frames. ], tot_loss[loss=0.3397, simple_loss=0.385, pruned_loss=0.1472, over 1614701.79 frames. ], batch size: 30, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:10,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.543e+02 3.801e+02 4.733e+02 6.108e+02 1.526e+03, threshold=9.466e+02, percent-clipped=4.0 +2023-02-05 21:45:23,371 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19636.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:42,898 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19663.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:45,373 INFO [train.py:901] (1/4) Epoch 3, batch 3500, loss[loss=0.2934, simple_loss=0.3443, pruned_loss=0.1213, over 7541.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3837, pruned_loss=0.1463, over 1612435.25 frames. ], batch size: 18, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:45:58,038 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 21:45:59,530 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:12,432 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.72 vs. limit=2.0 +2023-02-05 21:46:19,301 INFO [train.py:901] (1/4) Epoch 3, batch 3550, loss[loss=0.342, simple_loss=0.4014, pruned_loss=0.1413, over 8363.00 frames. ], tot_loss[loss=0.3391, simple_loss=0.3848, pruned_loss=0.1467, over 1611829.54 frames. ], batch size: 24, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:46:19,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.944e+02 3.514e+02 4.193e+02 5.166e+02 1.109e+03, threshold=8.387e+02, percent-clipped=2.0 +2023-02-05 21:46:22,491 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.17 vs. limit=5.0 +2023-02-05 21:46:46,365 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19756.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:54,375 INFO [train.py:901] (1/4) Epoch 3, batch 3600, loss[loss=0.3664, simple_loss=0.4117, pruned_loss=0.1606, over 8759.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3836, pruned_loss=0.1455, over 1612761.76 frames. ], batch size: 30, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:47:04,332 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.58 vs. limit=5.0 +2023-02-05 21:47:17,891 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 21:47:28,230 INFO [train.py:901] (1/4) Epoch 3, batch 3650, loss[loss=0.2913, simple_loss=0.3209, pruned_loss=0.1309, over 6413.00 frames. ], tot_loss[loss=0.3357, simple_loss=0.3821, pruned_loss=0.1447, over 1610310.14 frames. ], batch size: 14, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:47:28,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 3.610e+02 4.497e+02 5.952e+02 1.837e+03, threshold=8.994e+02, percent-clipped=7.0 +2023-02-05 21:47:54,114 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 21:47:58,734 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 21:47:59,615 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19862.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:48:02,764 INFO [train.py:901] (1/4) Epoch 3, batch 3700, loss[loss=0.295, simple_loss=0.3611, pruned_loss=0.1145, over 8287.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3818, pruned_loss=0.1445, over 1611302.50 frames. ], batch size: 23, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:05,644 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19871.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:48:12,204 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3673, 1.5735, 1.3662, 1.1318, 1.6527, 1.5134, 1.5720, 1.9261], + device='cuda:1'), covar=tensor([0.0843, 0.1670, 0.2510, 0.1930, 0.0901, 0.2024, 0.1094, 0.0670], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0226, 0.0264, 0.0227, 0.0190, 0.0228, 0.0187, 0.0187], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:48:17,455 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19887.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:48:37,840 INFO [train.py:901] (1/4) Epoch 3, batch 3750, loss[loss=0.3822, simple_loss=0.4225, pruned_loss=0.171, over 8251.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3828, pruned_loss=0.1447, over 1613553.78 frames. ], batch size: 24, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:38,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 3.342e+02 4.116e+02 5.480e+02 1.463e+03, threshold=8.233e+02, percent-clipped=1.0 +2023-02-05 21:49:12,036 INFO [train.py:901] (1/4) Epoch 3, batch 3800, loss[loss=0.3361, simple_loss=0.3942, pruned_loss=0.1391, over 8467.00 frames. ], tot_loss[loss=0.3334, simple_loss=0.3807, pruned_loss=0.143, over 1613592.64 frames. ], batch size: 27, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:49:19,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 21:49:20,774 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19980.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:49:21,520 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6788, 1.2814, 5.7300, 1.8643, 4.9522, 4.8570, 5.2664, 5.2061], + device='cuda:1'), covar=tensor([0.0262, 0.3452, 0.0210, 0.1965, 0.0742, 0.0322, 0.0305, 0.0340], + device='cuda:1'), in_proj_covar=tensor([0.0241, 0.0428, 0.0294, 0.0329, 0.0388, 0.0310, 0.0308, 0.0332], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 21:49:32,281 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4370, 1.9702, 3.1986, 0.9906, 2.1244, 1.7524, 1.6717, 1.6249], + device='cuda:1'), covar=tensor([0.1604, 0.1412, 0.0580, 0.2667, 0.1363, 0.2109, 0.1216, 0.2156], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0406, 0.0487, 0.0493, 0.0544, 0.0486, 0.0431, 0.0548], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:49:37,390 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 21:49:48,447 INFO [train.py:901] (1/4) Epoch 3, batch 3850, loss[loss=0.3547, simple_loss=0.4057, pruned_loss=0.1518, over 8608.00 frames. ], tot_loss[loss=0.3353, simple_loss=0.3822, pruned_loss=0.1442, over 1613023.52 frames. ], batch size: 34, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:49:49,085 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.536e+02 4.444e+02 5.257e+02 1.055e+03, threshold=8.889e+02, percent-clipped=4.0 +2023-02-05 21:49:50,510 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0483, 1.1585, 1.0663, 0.9127, 0.6964, 1.0980, 0.0349, 0.9978], + device='cuda:1'), covar=tensor([0.2380, 0.1241, 0.1204, 0.1701, 0.4260, 0.1111, 0.5220, 0.1652], + device='cuda:1'), in_proj_covar=tensor([0.0110, 0.0093, 0.0080, 0.0144, 0.0167, 0.0076, 0.0150, 0.0107], + device='cuda:1'), out_proj_covar=tensor([1.3875e-04, 1.2032e-04, 1.0436e-04, 1.7446e-04, 1.9680e-04, 9.9052e-05, + 1.8260e-04, 1.4189e-04], device='cuda:1') +2023-02-05 21:50:01,939 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 21:50:02,725 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20038.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:11,369 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1683, 1.6994, 3.1778, 1.5704, 2.3872, 3.6120, 3.3141, 3.1730], + device='cuda:1'), covar=tensor([0.0920, 0.1238, 0.0451, 0.1609, 0.0685, 0.0173, 0.0308, 0.0482], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0256, 0.0195, 0.0256, 0.0201, 0.0168, 0.0169, 0.0242], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:50:22,591 INFO [train.py:901] (1/4) Epoch 3, batch 3900, loss[loss=0.3183, simple_loss=0.3652, pruned_loss=0.1357, over 8193.00 frames. ], tot_loss[loss=0.3384, simple_loss=0.3845, pruned_loss=0.1461, over 1615051.66 frames. ], batch size: 23, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:22,832 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1073, 1.8537, 3.5055, 2.9994, 2.6234, 1.5280, 1.1405, 1.3943], + device='cuda:1'), covar=tensor([0.1702, 0.1662, 0.0277, 0.0497, 0.0665, 0.1077, 0.1187, 0.1391], + device='cuda:1'), in_proj_covar=tensor([0.0588, 0.0504, 0.0417, 0.0463, 0.0564, 0.0476, 0.0498, 0.0491], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 21:50:41,539 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:42,863 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5527, 1.4556, 5.5861, 2.2457, 4.9371, 4.6682, 5.0077, 5.1751], + device='cuda:1'), covar=tensor([0.0228, 0.3142, 0.0164, 0.1527, 0.0649, 0.0312, 0.0270, 0.0244], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0421, 0.0296, 0.0326, 0.0388, 0.0306, 0.0306, 0.0329], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 21:50:50,426 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20107.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:56,844 INFO [train.py:901] (1/4) Epoch 3, batch 3950, loss[loss=0.2993, simple_loss=0.3667, pruned_loss=0.116, over 8362.00 frames. ], tot_loss[loss=0.337, simple_loss=0.3836, pruned_loss=0.1452, over 1616041.49 frames. ], batch size: 24, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:57,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.492e+02 4.461e+02 6.032e+02 1.371e+03, threshold=8.922e+02, percent-clipped=4.0 +2023-02-05 21:51:05,055 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:06,984 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20130.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:21,462 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20152.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:31,210 INFO [train.py:901] (1/4) Epoch 3, batch 4000, loss[loss=0.3479, simple_loss=0.4044, pruned_loss=0.1457, over 8308.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3825, pruned_loss=0.1441, over 1616574.53 frames. ], batch size: 23, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:51:38,888 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9114, 1.1708, 4.1478, 1.6502, 3.4153, 3.3994, 3.6319, 3.6484], + device='cuda:1'), covar=tensor([0.0415, 0.3087, 0.0290, 0.1774, 0.1015, 0.0475, 0.0409, 0.0456], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0410, 0.0291, 0.0321, 0.0391, 0.0303, 0.0304, 0.0329], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-05 21:51:50,790 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7072, 1.9442, 3.6485, 1.0216, 2.5138, 1.8900, 1.5896, 1.8863], + device='cuda:1'), covar=tensor([0.1190, 0.1593, 0.0385, 0.2442, 0.1186, 0.1908, 0.1136, 0.2097], + device='cuda:1'), in_proj_covar=tensor([0.0432, 0.0400, 0.0463, 0.0477, 0.0532, 0.0476, 0.0419, 0.0533], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:52:05,183 INFO [train.py:901] (1/4) Epoch 3, batch 4050, loss[loss=0.3333, simple_loss=0.3659, pruned_loss=0.1503, over 7227.00 frames. ], tot_loss[loss=0.3338, simple_loss=0.3812, pruned_loss=0.1432, over 1615515.89 frames. ], batch size: 16, lr: 2.28e-02, grad_scale: 16.0 +2023-02-05 21:52:05,854 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.226e+02 3.505e+02 4.242e+02 5.307e+02 1.364e+03, threshold=8.485e+02, percent-clipped=4.0 +2023-02-05 21:52:09,348 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20222.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:52:37,132 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5183, 1.8421, 2.8433, 1.1279, 2.3259, 1.7271, 1.5480, 1.7112], + device='cuda:1'), covar=tensor([0.1255, 0.1358, 0.0473, 0.2277, 0.0799, 0.1881, 0.1105, 0.1558], + device='cuda:1'), in_proj_covar=tensor([0.0435, 0.0402, 0.0472, 0.0484, 0.0531, 0.0481, 0.0423, 0.0536], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:52:40,359 INFO [train.py:901] (1/4) Epoch 3, batch 4100, loss[loss=0.376, simple_loss=0.4104, pruned_loss=0.1708, over 6904.00 frames. ], tot_loss[loss=0.3353, simple_loss=0.3827, pruned_loss=0.1439, over 1615079.53 frames. ], batch size: 71, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:52:41,926 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0420, 2.1960, 1.6865, 2.8499, 1.6073, 1.3579, 1.7728, 2.3857], + device='cuda:1'), covar=tensor([0.1140, 0.1633, 0.2021, 0.0587, 0.2001, 0.2690, 0.2207, 0.1204], + device='cuda:1'), in_proj_covar=tensor([0.0299, 0.0312, 0.0308, 0.0229, 0.0296, 0.0312, 0.0335, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:52:57,065 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 21:53:14,414 INFO [train.py:901] (1/4) Epoch 3, batch 4150, loss[loss=0.3342, simple_loss=0.3735, pruned_loss=0.1474, over 8601.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3835, pruned_loss=0.1443, over 1620912.37 frames. ], batch size: 39, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:53:15,797 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.167e+02 3.849e+02 4.660e+02 5.932e+02 1.097e+03, threshold=9.320e+02, percent-clipped=6.0 +2023-02-05 21:53:38,380 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20351.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:53:49,993 INFO [train.py:901] (1/4) Epoch 3, batch 4200, loss[loss=0.2892, simple_loss=0.349, pruned_loss=0.1147, over 7665.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.383, pruned_loss=0.1444, over 1619526.84 frames. ], batch size: 19, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:53:55,427 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 21:53:56,308 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20376.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:00,121 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,277 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20406.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,812 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 21:54:24,064 INFO [train.py:901] (1/4) Epoch 3, batch 4250, loss[loss=0.3688, simple_loss=0.4153, pruned_loss=0.1611, over 8460.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.3829, pruned_loss=0.1444, over 1618022.22 frames. ], batch size: 27, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:54:25,369 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.360e+02 3.627e+02 5.036e+02 6.332e+02 1.636e+03, threshold=1.007e+03, percent-clipped=4.0 +2023-02-05 21:54:29,691 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5381, 2.0196, 3.3016, 1.1408, 2.3357, 1.7728, 1.5221, 1.8937], + device='cuda:1'), covar=tensor([0.1123, 0.1159, 0.0453, 0.2164, 0.0906, 0.1682, 0.1034, 0.1548], + device='cuda:1'), in_proj_covar=tensor([0.0432, 0.0398, 0.0471, 0.0485, 0.0532, 0.0473, 0.0419, 0.0533], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:54:31,104 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-05 21:54:37,015 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20436.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:47,726 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20451.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:59,264 INFO [train.py:901] (1/4) Epoch 3, batch 4300, loss[loss=0.3378, simple_loss=0.3781, pruned_loss=0.1488, over 7685.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3822, pruned_loss=0.1443, over 1611828.80 frames. ], batch size: 18, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:04,855 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20474.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:20,187 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:33,536 INFO [train.py:901] (1/4) Epoch 3, batch 4350, loss[loss=0.3134, simple_loss=0.3545, pruned_loss=0.1361, over 7810.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3819, pruned_loss=0.1441, over 1617391.89 frames. ], batch size: 20, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:34,898 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.452e+02 4.356e+02 5.638e+02 1.577e+03, threshold=8.711e+02, percent-clipped=2.0 +2023-02-05 21:55:46,534 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 21:55:55,316 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4889, 2.0517, 3.6037, 1.1454, 2.4896, 1.7866, 1.5476, 1.9710], + device='cuda:1'), covar=tensor([0.1197, 0.1265, 0.0424, 0.2169, 0.1021, 0.1807, 0.1059, 0.1862], + device='cuda:1'), in_proj_covar=tensor([0.0435, 0.0406, 0.0478, 0.0494, 0.0534, 0.0475, 0.0426, 0.0538], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:55:57,857 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4972, 2.0741, 1.2971, 1.6979, 1.7536, 1.2308, 1.4686, 1.9950], + device='cuda:1'), covar=tensor([0.0938, 0.0291, 0.0950, 0.0579, 0.0578, 0.1003, 0.0967, 0.0571], + device='cuda:1'), in_proj_covar=tensor([0.0369, 0.0241, 0.0336, 0.0319, 0.0339, 0.0321, 0.0363, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 21:56:06,881 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:06,990 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:07,507 INFO [train.py:901] (1/4) Epoch 3, batch 4400, loss[loss=0.3091, simple_loss=0.3476, pruned_loss=0.1353, over 6812.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3833, pruned_loss=0.1458, over 1613718.48 frames. ], batch size: 15, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:11,290 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 21:56:24,123 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20589.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:27,484 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 21:56:36,782 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:44,139 INFO [train.py:901] (1/4) Epoch 3, batch 4450, loss[loss=0.3712, simple_loss=0.4197, pruned_loss=0.1613, over 8615.00 frames. ], tot_loss[loss=0.3359, simple_loss=0.383, pruned_loss=0.1444, over 1614691.32 frames. ], batch size: 31, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:45,438 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.404e+02 4.420e+02 6.069e+02 1.310e+03, threshold=8.839e+02, percent-clipped=8.0 +2023-02-05 21:57:07,990 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3207, 1.6488, 1.3921, 1.2837, 1.6593, 1.4754, 1.6310, 1.6843], + device='cuda:1'), covar=tensor([0.0732, 0.1377, 0.2075, 0.1643, 0.0795, 0.1705, 0.0992, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0184, 0.0218, 0.0256, 0.0220, 0.0185, 0.0220, 0.0181, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 21:57:12,691 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7488, 1.2511, 3.2408, 1.4510, 2.2110, 3.5020, 3.2791, 3.0834], + device='cuda:1'), covar=tensor([0.1048, 0.1691, 0.0337, 0.1936, 0.0724, 0.0267, 0.0349, 0.0518], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0270, 0.0208, 0.0269, 0.0213, 0.0180, 0.0183, 0.0261], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:57:18,488 INFO [train.py:901] (1/4) Epoch 3, batch 4500, loss[loss=0.3422, simple_loss=0.3908, pruned_loss=0.1468, over 8658.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3823, pruned_loss=0.1443, over 1609586.09 frames. ], batch size: 34, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:20,855 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 21:57:21,680 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6269, 2.5553, 1.5211, 1.9480, 1.9348, 1.4746, 1.8692, 2.1615], + device='cuda:1'), covar=tensor([0.1071, 0.0342, 0.0893, 0.0493, 0.0522, 0.0976, 0.0751, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0241, 0.0329, 0.0315, 0.0342, 0.0320, 0.0357, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 21:57:27,450 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:57:30,258 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.01 vs. limit=5.0 +2023-02-05 21:57:53,171 INFO [train.py:901] (1/4) Epoch 3, batch 4550, loss[loss=0.3159, simple_loss=0.359, pruned_loss=0.1364, over 7540.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3822, pruned_loss=0.144, over 1607476.11 frames. ], batch size: 18, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:54,484 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.207e+02 3.483e+02 4.570e+02 6.300e+02 1.347e+03, threshold=9.139e+02, percent-clipped=2.0 +2023-02-05 21:57:54,738 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6576, 2.2055, 4.5172, 0.9793, 2.7558, 2.2893, 1.5266, 2.3240], + device='cuda:1'), covar=tensor([0.1365, 0.1688, 0.0530, 0.2887, 0.1174, 0.1957, 0.1237, 0.2234], + device='cuda:1'), in_proj_covar=tensor([0.0435, 0.0411, 0.0479, 0.0497, 0.0548, 0.0484, 0.0425, 0.0550], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 21:58:14,826 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20750.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:17,073 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:26,235 INFO [train.py:901] (1/4) Epoch 3, batch 4600, loss[loss=0.3169, simple_loss=0.3676, pruned_loss=0.1332, over 7706.00 frames. ], tot_loss[loss=0.3355, simple_loss=0.382, pruned_loss=0.1445, over 1611552.82 frames. ], batch size: 18, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:58:34,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:35,792 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20780.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:00,491 INFO [train.py:901] (1/4) Epoch 3, batch 4650, loss[loss=0.2867, simple_loss=0.3496, pruned_loss=0.1119, over 7976.00 frames. ], tot_loss[loss=0.3363, simple_loss=0.3823, pruned_loss=0.1451, over 1607618.71 frames. ], batch size: 21, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:02,525 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 3.299e+02 4.239e+02 5.426e+02 9.400e+02, threshold=8.478e+02, percent-clipped=1.0 +2023-02-05 21:59:04,816 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20822.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:21,394 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20845.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:22,741 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20847.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:34,619 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20865.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:35,785 INFO [train.py:901] (1/4) Epoch 3, batch 4700, loss[loss=0.3464, simple_loss=0.4015, pruned_loss=0.1456, over 8448.00 frames. ], tot_loss[loss=0.3356, simple_loss=0.3822, pruned_loss=0.1445, over 1614936.92 frames. ], batch size: 27, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:37,914 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20870.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:54,881 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20895.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:08,976 INFO [train.py:901] (1/4) Epoch 3, batch 4750, loss[loss=0.3329, simple_loss=0.385, pruned_loss=0.1404, over 8321.00 frames. ], tot_loss[loss=0.3345, simple_loss=0.3812, pruned_loss=0.1439, over 1611062.60 frames. ], batch size: 25, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 22:00:10,302 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.634e+02 4.432e+02 5.821e+02 1.296e+03, threshold=8.863e+02, percent-clipped=5.0 +2023-02-05 22:00:23,256 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20937.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:24,395 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 22:00:24,548 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6840, 1.4752, 3.3080, 1.1864, 2.2232, 3.6643, 3.4298, 3.1541], + device='cuda:1'), covar=tensor([0.1077, 0.1265, 0.0291, 0.1808, 0.0616, 0.0223, 0.0308, 0.0528], + device='cuda:1'), in_proj_covar=tensor([0.0227, 0.0255, 0.0197, 0.0255, 0.0204, 0.0174, 0.0175, 0.0250], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:00:26,473 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 22:00:32,640 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:41,442 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:44,560 INFO [train.py:901] (1/4) Epoch 3, batch 4800, loss[loss=0.3724, simple_loss=0.4149, pruned_loss=0.165, over 8340.00 frames. ], tot_loss[loss=0.3337, simple_loss=0.3808, pruned_loss=0.1432, over 1610258.11 frames. ], batch size: 25, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:18,124 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 22:01:18,796 INFO [train.py:901] (1/4) Epoch 3, batch 4850, loss[loss=0.3502, simple_loss=0.3825, pruned_loss=0.1589, over 7434.00 frames. ], tot_loss[loss=0.3321, simple_loss=0.3796, pruned_loss=0.1423, over 1607061.03 frames. ], batch size: 17, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:20,191 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.459e+02 3.687e+02 4.412e+02 5.668e+02 1.155e+03, threshold=8.825e+02, percent-clipped=6.0 +2023-02-05 22:01:37,496 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3910, 1.9499, 3.1735, 1.0158, 2.1897, 1.6455, 1.4781, 1.7705], + device='cuda:1'), covar=tensor([0.1283, 0.1330, 0.0480, 0.2501, 0.1148, 0.1849, 0.1162, 0.1815], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0410, 0.0484, 0.0501, 0.0551, 0.0475, 0.0432, 0.0547], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 22:01:42,459 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 22:01:51,977 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21065.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:01:53,054 INFO [train.py:901] (1/4) Epoch 3, batch 4900, loss[loss=0.3242, simple_loss=0.3548, pruned_loss=0.1468, over 7637.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3801, pruned_loss=0.1425, over 1609938.98 frames. ], batch size: 19, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:55,916 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:27,700 INFO [train.py:901] (1/4) Epoch 3, batch 4950, loss[loss=0.3417, simple_loss=0.3874, pruned_loss=0.148, over 8730.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3806, pruned_loss=0.1428, over 1608252.66 frames. ], batch size: 34, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:02:29,085 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 3.569e+02 4.502e+02 6.229e+02 1.133e+03, threshold=9.004e+02, percent-clipped=2.0 +2023-02-05 22:02:30,676 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:41,332 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:48,073 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:51,488 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:53,464 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2074, 1.6885, 2.0342, 1.5464, 0.8518, 1.7672, 0.2681, 1.2170], + device='cuda:1'), covar=tensor([0.3447, 0.1709, 0.0786, 0.1873, 0.4412, 0.1205, 0.4778, 0.2230], + device='cuda:1'), in_proj_covar=tensor([0.0113, 0.0106, 0.0082, 0.0152, 0.0171, 0.0081, 0.0149, 0.0115], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:03:01,832 INFO [train.py:901] (1/4) Epoch 3, batch 5000, loss[loss=0.2863, simple_loss=0.3353, pruned_loss=0.1187, over 7424.00 frames. ], tot_loss[loss=0.3314, simple_loss=0.3795, pruned_loss=0.1417, over 1611197.11 frames. ], batch size: 17, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:08,655 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:34,153 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0956, 1.4331, 1.5096, 1.1294, 1.5240, 1.4486, 1.4640, 1.5415], + device='cuda:1'), covar=tensor([0.0727, 0.1407, 0.1838, 0.1660, 0.0681, 0.1580, 0.0932, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0183, 0.0218, 0.0254, 0.0217, 0.0178, 0.0219, 0.0181, 0.0180], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 22:03:37,220 INFO [train.py:901] (1/4) Epoch 3, batch 5050, loss[loss=0.2858, simple_loss=0.3458, pruned_loss=0.1129, over 8204.00 frames. ], tot_loss[loss=0.3299, simple_loss=0.378, pruned_loss=0.1409, over 1607337.83 frames. ], batch size: 23, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:38,541 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.043e+02 3.325e+02 4.224e+02 5.254e+02 1.187e+03, threshold=8.447e+02, percent-clipped=3.0 +2023-02-05 22:03:57,070 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 22:04:11,239 INFO [train.py:901] (1/4) Epoch 3, batch 5100, loss[loss=0.2896, simple_loss=0.3422, pruned_loss=0.1185, over 7818.00 frames. ], tot_loss[loss=0.33, simple_loss=0.378, pruned_loss=0.141, over 1607877.45 frames. ], batch size: 20, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:28,234 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4702, 1.8796, 1.9788, 0.7704, 1.9827, 1.3514, 0.5154, 1.6575], + device='cuda:1'), covar=tensor([0.0130, 0.0072, 0.0096, 0.0151, 0.0086, 0.0248, 0.0224, 0.0067], + device='cuda:1'), in_proj_covar=tensor([0.0225, 0.0161, 0.0135, 0.0208, 0.0159, 0.0276, 0.0219, 0.0189], + device='cuda:1'), out_proj_covar=tensor([1.0640e-04, 7.5361e-05, 6.1075e-05, 9.3885e-05, 7.5659e-05, 1.3855e-04, + 1.0545e-04, 8.7761e-05], device='cuda:1') +2023-02-05 22:04:46,363 INFO [train.py:901] (1/4) Epoch 3, batch 5150, loss[loss=0.4568, simple_loss=0.463, pruned_loss=0.2253, over 7393.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3772, pruned_loss=0.1401, over 1608057.73 frames. ], batch size: 71, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:47,674 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.142e+02 3.453e+02 4.061e+02 5.332e+02 1.278e+03, threshold=8.122e+02, percent-clipped=4.0 +2023-02-05 22:04:50,019 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21321.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:04:56,026 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21330.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,483 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:19,999 INFO [train.py:901] (1/4) Epoch 3, batch 5200, loss[loss=0.3589, simple_loss=0.3814, pruned_loss=0.1682, over 8247.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3774, pruned_loss=0.1405, over 1608426.69 frames. ], batch size: 22, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:43,302 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5589, 2.5793, 1.5993, 1.9358, 1.8880, 1.1226, 1.8142, 2.0318], + device='cuda:1'), covar=tensor([0.1383, 0.0401, 0.1108, 0.0777, 0.0955, 0.1578, 0.1164, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0235, 0.0325, 0.0316, 0.0338, 0.0321, 0.0347, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 22:05:52,822 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:54,863 INFO [train.py:901] (1/4) Epoch 3, batch 5250, loss[loss=0.3301, simple_loss=0.3924, pruned_loss=0.1339, over 8525.00 frames. ], tot_loss[loss=0.3315, simple_loss=0.3787, pruned_loss=0.1421, over 1606011.34 frames. ], batch size: 28, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:54,880 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 22:05:56,257 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 3.353e+02 4.281e+02 5.765e+02 2.364e+03, threshold=8.563e+02, percent-clipped=11.0 +2023-02-05 22:06:15,346 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1777, 4.2031, 3.8835, 1.7270, 3.6898, 3.6613, 4.0167, 3.0769], + device='cuda:1'), covar=tensor([0.1028, 0.0628, 0.0928, 0.4668, 0.0645, 0.0711, 0.1335, 0.0876], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0234, 0.0284, 0.0367, 0.0264, 0.0211, 0.0263, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 22:06:30,410 INFO [train.py:901] (1/4) Epoch 3, batch 5300, loss[loss=0.3884, simple_loss=0.4142, pruned_loss=0.1813, over 8664.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3793, pruned_loss=0.1419, over 1609283.24 frames. ], batch size: 39, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:06:39,463 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:06:57,980 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 22:07:04,804 INFO [train.py:901] (1/4) Epoch 3, batch 5350, loss[loss=0.3074, simple_loss=0.3574, pruned_loss=0.1287, over 7801.00 frames. ], tot_loss[loss=0.332, simple_loss=0.3801, pruned_loss=0.142, over 1613305.51 frames. ], batch size: 20, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:07:06,080 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.338e+02 4.128e+02 5.460e+02 1.129e+03, threshold=8.255e+02, percent-clipped=3.0 +2023-02-05 22:07:13,625 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21529.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:40,175 INFO [train.py:901] (1/4) Epoch 3, batch 5400, loss[loss=0.3578, simple_loss=0.3962, pruned_loss=0.1597, over 7803.00 frames. ], tot_loss[loss=0.3349, simple_loss=0.3822, pruned_loss=0.1438, over 1614244.26 frames. ], batch size: 20, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:07:52,667 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-05 22:07:59,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21595.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:12,087 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21613.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:14,749 INFO [train.py:901] (1/4) Epoch 3, batch 5450, loss[loss=0.3217, simple_loss=0.3807, pruned_loss=0.1313, over 8481.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3826, pruned_loss=0.1438, over 1614382.96 frames. ], batch size: 25, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:16,068 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 3.746e+02 4.366e+02 5.874e+02 2.172e+03, threshold=8.732e+02, percent-clipped=6.0 +2023-02-05 22:08:19,189 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 22:08:24,284 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21631.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:41,812 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 22:08:49,813 INFO [train.py:901] (1/4) Epoch 3, batch 5500, loss[loss=0.2579, simple_loss=0.3087, pruned_loss=0.1036, over 7410.00 frames. ], tot_loss[loss=0.3328, simple_loss=0.3811, pruned_loss=0.1423, over 1614771.11 frames. ], batch size: 17, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:55,224 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21674.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:05,892 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21690.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:23,546 INFO [train.py:901] (1/4) Epoch 3, batch 5550, loss[loss=0.3151, simple_loss=0.3624, pruned_loss=0.1338, over 7656.00 frames. ], tot_loss[loss=0.3313, simple_loss=0.3804, pruned_loss=0.1411, over 1614406.73 frames. ], batch size: 19, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:09:24,912 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 3.296e+02 4.063e+02 5.206e+02 8.291e+02, threshold=8.125e+02, percent-clipped=0.0 +2023-02-05 22:09:28,458 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3042, 1.2785, 2.7799, 1.1482, 2.0636, 2.9175, 2.6145, 2.5219], + device='cuda:1'), covar=tensor([0.1384, 0.1533, 0.0441, 0.2038, 0.0665, 0.0370, 0.0691, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0261, 0.0200, 0.0254, 0.0201, 0.0180, 0.0177, 0.0252], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:09:58,432 INFO [train.py:901] (1/4) Epoch 3, batch 5600, loss[loss=0.4309, simple_loss=0.4508, pruned_loss=0.2055, over 8365.00 frames. ], tot_loss[loss=0.3304, simple_loss=0.3796, pruned_loss=0.1406, over 1609224.40 frames. ], batch size: 24, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:08,490 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:11,832 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21785.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:14,466 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21789.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:25,386 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:28,825 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21810.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:33,432 INFO [train.py:901] (1/4) Epoch 3, batch 5650, loss[loss=0.2764, simple_loss=0.3358, pruned_loss=0.1085, over 8143.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3774, pruned_loss=0.1392, over 1606761.06 frames. ], batch size: 22, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:34,802 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.984e+02 3.614e+02 4.526e+02 5.980e+02 8.654e+02, threshold=9.051e+02, percent-clipped=4.0 +2023-02-05 22:10:45,286 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 22:10:56,855 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21851.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:58,914 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4993, 2.0581, 3.4134, 2.8174, 2.7163, 2.0882, 1.5102, 1.5600], + device='cuda:1'), covar=tensor([0.1024, 0.1210, 0.0202, 0.0488, 0.0526, 0.0512, 0.0713, 0.1125], + device='cuda:1'), in_proj_covar=tensor([0.0627, 0.0541, 0.0457, 0.0499, 0.0610, 0.0502, 0.0518, 0.0523], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:11:07,114 INFO [train.py:901] (1/4) Epoch 3, batch 5700, loss[loss=0.3546, simple_loss=0.3897, pruned_loss=0.1598, over 8353.00 frames. ], tot_loss[loss=0.3309, simple_loss=0.379, pruned_loss=0.1414, over 1603326.52 frames. ], batch size: 26, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:07,936 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:13,371 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:42,901 INFO [train.py:901] (1/4) Epoch 3, batch 5750, loss[loss=0.3923, simple_loss=0.4165, pruned_loss=0.184, over 8471.00 frames. ], tot_loss[loss=0.3295, simple_loss=0.3777, pruned_loss=0.1406, over 1604670.56 frames. ], batch size: 25, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:44,219 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.339e+02 3.657e+02 4.422e+02 5.345e+02 1.248e+03, threshold=8.845e+02, percent-clipped=3.0 +2023-02-05 22:11:49,709 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 22:11:53,992 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5802, 2.2623, 4.5512, 1.0674, 2.9220, 2.1347, 1.7622, 2.5585], + device='cuda:1'), covar=tensor([0.1336, 0.1690, 0.0488, 0.2789, 0.1155, 0.2106, 0.1176, 0.1888], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0416, 0.0490, 0.0504, 0.0545, 0.0485, 0.0425, 0.0549], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 22:12:02,091 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 22:12:10,247 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21957.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:16,968 INFO [train.py:901] (1/4) Epoch 3, batch 5800, loss[loss=0.3574, simple_loss=0.3919, pruned_loss=0.1615, over 7970.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3771, pruned_loss=0.1406, over 1603402.63 frames. ], batch size: 21, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:22,434 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:31,003 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21988.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:52,153 INFO [train.py:901] (1/4) Epoch 3, batch 5850, loss[loss=0.317, simple_loss=0.3741, pruned_loss=0.1299, over 8337.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3754, pruned_loss=0.1394, over 1602388.49 frames. ], batch size: 26, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:53,399 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.662e+02 4.461e+02 5.594e+02 1.608e+03, threshold=8.923e+02, percent-clipped=8.0 +2023-02-05 22:12:55,624 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9509, 3.7796, 2.3623, 2.5414, 2.9053, 1.7439, 2.2468, 2.5485], + device='cuda:1'), covar=tensor([0.1675, 0.0528, 0.0988, 0.1066, 0.0721, 0.1324, 0.1410, 0.1178], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0249, 0.0327, 0.0316, 0.0340, 0.0323, 0.0357, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 22:13:11,807 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:22,197 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:25,935 INFO [train.py:901] (1/4) Epoch 3, batch 5900, loss[loss=0.3387, simple_loss=0.3652, pruned_loss=0.1561, over 7557.00 frames. ], tot_loss[loss=0.3266, simple_loss=0.3749, pruned_loss=0.1391, over 1605293.50 frames. ], batch size: 18, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:13:28,813 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:30,188 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22072.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:39,580 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22086.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:42,233 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:00,344 INFO [train.py:901] (1/4) Epoch 3, batch 5950, loss[loss=0.2669, simple_loss=0.3305, pruned_loss=0.1016, over 8076.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3766, pruned_loss=0.1404, over 1608931.23 frames. ], batch size: 21, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:02,408 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.045e+02 3.353e+02 4.485e+02 5.691e+02 1.558e+03, threshold=8.970e+02, percent-clipped=6.0 +2023-02-05 22:14:07,175 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:23,952 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22148.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:36,958 INFO [train.py:901] (1/4) Epoch 3, batch 6000, loss[loss=0.343, simple_loss=0.3814, pruned_loss=0.1523, over 7918.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3754, pruned_loss=0.1394, over 1606173.01 frames. ], batch size: 20, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:36,958 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 22:14:49,936 INFO [train.py:935] (1/4) Epoch 3, validation: loss=0.2472, simple_loss=0.3383, pruned_loss=0.07805, over 944034.00 frames. +2023-02-05 22:14:49,936 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 22:15:08,345 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22194.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:21,644 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:25,115 INFO [train.py:901] (1/4) Epoch 3, batch 6050, loss[loss=0.3827, simple_loss=0.4084, pruned_loss=0.1785, over 7935.00 frames. ], tot_loss[loss=0.3258, simple_loss=0.3745, pruned_loss=0.1386, over 1609195.42 frames. ], batch size: 20, lr: 2.18e-02, grad_scale: 8.0 +2023-02-05 22:15:26,475 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.554e+02 3.417e+02 4.364e+02 5.364e+02 3.571e+03, threshold=8.727e+02, percent-clipped=6.0 +2023-02-05 22:15:36,164 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22233.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:40,933 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:51,050 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22255.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:59,767 INFO [train.py:901] (1/4) Epoch 3, batch 6100, loss[loss=0.2872, simple_loss=0.355, pruned_loss=0.1096, over 8095.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3768, pruned_loss=0.1397, over 1607431.94 frames. ], batch size: 23, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:12,751 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7759, 4.1472, 2.5273, 2.6872, 2.9347, 2.0202, 2.4142, 2.8249], + device='cuda:1'), covar=tensor([0.1371, 0.0235, 0.0675, 0.0740, 0.0536, 0.0994, 0.0987, 0.0754], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0238, 0.0320, 0.0305, 0.0327, 0.0309, 0.0343, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 22:16:18,458 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 22:16:35,122 INFO [train.py:901] (1/4) Epoch 3, batch 6150, loss[loss=0.306, simple_loss=0.3712, pruned_loss=0.1204, over 8482.00 frames. ], tot_loss[loss=0.3295, simple_loss=0.3784, pruned_loss=0.1402, over 1610625.17 frames. ], batch size: 27, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:36,462 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.615e+02 4.380e+02 5.688e+02 1.525e+03, threshold=8.759e+02, percent-clipped=2.0 +2023-02-05 22:16:41,831 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:42,483 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22328.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:45,082 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22332.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:50,862 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-05 22:16:51,479 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 22:16:54,536 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:59,259 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22353.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:08,709 INFO [train.py:901] (1/4) Epoch 3, batch 6200, loss[loss=0.3291, simple_loss=0.3771, pruned_loss=0.1406, over 8198.00 frames. ], tot_loss[loss=0.3282, simple_loss=0.3774, pruned_loss=0.1396, over 1611396.64 frames. ], batch size: 23, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:17:11,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22371.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:14,321 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7222, 3.6500, 3.2585, 1.9539, 3.2322, 3.1727, 3.4420, 2.7931], + device='cuda:1'), covar=tensor([0.0991, 0.0719, 0.1007, 0.4254, 0.0783, 0.0947, 0.1211, 0.0931], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0250, 0.0281, 0.0375, 0.0265, 0.0217, 0.0263, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 22:17:34,075 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.04 vs. limit=2.0 +2023-02-05 22:17:34,524 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:44,420 INFO [train.py:901] (1/4) Epoch 3, batch 6250, loss[loss=0.2837, simple_loss=0.3537, pruned_loss=0.1068, over 8456.00 frames. ], tot_loss[loss=0.3296, simple_loss=0.3782, pruned_loss=0.1405, over 1611868.93 frames. ], batch size: 29, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:17:45,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.506e+02 4.308e+02 5.585e+02 1.214e+03, threshold=8.617e+02, percent-clipped=6.0 +2023-02-05 22:17:57,836 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-05 22:18:04,325 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2686, 1.2594, 2.2518, 1.1326, 2.1114, 2.4155, 2.3776, 2.0942], + device='cuda:1'), covar=tensor([0.1045, 0.1150, 0.0435, 0.1884, 0.0452, 0.0362, 0.0371, 0.0712], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0261, 0.0198, 0.0259, 0.0205, 0.0179, 0.0174, 0.0252], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:18:05,787 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:19,126 INFO [train.py:901] (1/4) Epoch 3, batch 6300, loss[loss=0.2906, simple_loss=0.3575, pruned_loss=0.1119, over 8101.00 frames. ], tot_loss[loss=0.3314, simple_loss=0.3796, pruned_loss=0.1416, over 1616811.12 frames. ], batch size: 23, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:36,440 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22492.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:39,306 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:54,108 INFO [train.py:901] (1/4) Epoch 3, batch 6350, loss[loss=0.3314, simple_loss=0.3786, pruned_loss=0.1421, over 8128.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3778, pruned_loss=0.1398, over 1618944.68 frames. ], batch size: 22, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:55,440 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.104e+02 3.537e+02 4.368e+02 5.315e+02 1.494e+03, threshold=8.736e+02, percent-clipped=5.0 +2023-02-05 22:18:57,018 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:08,839 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22538.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:28,745 INFO [train.py:901] (1/4) Epoch 3, batch 6400, loss[loss=0.3708, simple_loss=0.4164, pruned_loss=0.1626, over 8502.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.3763, pruned_loss=0.1385, over 1617911.85 frames. ], batch size: 26, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:19:35,384 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22577.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:39,403 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8286, 2.3169, 2.0161, 2.9341, 1.4598, 1.3949, 1.8815, 2.3699], + device='cuda:1'), covar=tensor([0.1417, 0.1418, 0.1509, 0.0427, 0.1983, 0.2595, 0.1849, 0.1204], + device='cuda:1'), in_proj_covar=tensor([0.0303, 0.0298, 0.0300, 0.0223, 0.0279, 0.0310, 0.0315, 0.0291], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 22:19:39,436 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:50,039 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22599.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:55,824 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22607.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:56,540 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22608.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:03,201 INFO [train.py:901] (1/4) Epoch 3, batch 6450, loss[loss=0.3456, simple_loss=0.3868, pruned_loss=0.1522, over 8017.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3773, pruned_loss=0.1393, over 1619464.85 frames. ], batch size: 22, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:04,479 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 3.557e+02 4.436e+02 5.729e+02 1.082e+03, threshold=8.871e+02, percent-clipped=7.0 +2023-02-05 22:20:28,490 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:31,145 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22657.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:37,605 INFO [train.py:901] (1/4) Epoch 3, batch 6500, loss[loss=0.3865, simple_loss=0.4134, pruned_loss=0.1798, over 8492.00 frames. ], tot_loss[loss=0.3306, simple_loss=0.3792, pruned_loss=0.141, over 1618610.32 frames. ], batch size: 49, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:55,249 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:02,656 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:09,918 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22714.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:11,752 INFO [train.py:901] (1/4) Epoch 3, batch 6550, loss[loss=0.3779, simple_loss=0.4196, pruned_loss=0.1681, over 8558.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.3789, pruned_loss=0.1404, over 1618925.66 frames. ], batch size: 34, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:21:13,166 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.155e+02 3.258e+02 3.883e+02 5.357e+02 1.264e+03, threshold=7.766e+02, percent-clipped=3.0 +2023-02-05 22:21:19,295 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:28,613 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 22:21:32,838 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22747.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:47,199 INFO [train.py:901] (1/4) Epoch 3, batch 6600, loss[loss=0.3769, simple_loss=0.4231, pruned_loss=0.1654, over 8600.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3789, pruned_loss=0.1409, over 1614008.26 frames. ], batch size: 39, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:21:47,899 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 22:21:54,955 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5094, 2.5932, 1.6707, 1.9506, 1.9835, 1.1416, 1.8068, 1.9400], + device='cuda:1'), covar=tensor([0.1147, 0.0326, 0.0840, 0.0724, 0.0677, 0.1198, 0.0926, 0.0806], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0231, 0.0322, 0.0315, 0.0328, 0.0314, 0.0340, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 22:22:19,032 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22812.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:22,348 INFO [train.py:901] (1/4) Epoch 3, batch 6650, loss[loss=0.3079, simple_loss=0.3734, pruned_loss=0.1213, over 8453.00 frames. ], tot_loss[loss=0.3301, simple_loss=0.3791, pruned_loss=0.1406, over 1620182.49 frames. ], batch size: 25, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:22:24,341 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.373e+02 3.456e+02 4.169e+02 5.335e+02 9.931e+02, threshold=8.339e+02, percent-clipped=8.0 +2023-02-05 22:22:40,061 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:53,741 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22862.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:54,497 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22863.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:57,072 INFO [train.py:901] (1/4) Epoch 3, batch 6700, loss[loss=0.3408, simple_loss=0.3799, pruned_loss=0.1508, over 7649.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3786, pruned_loss=0.1407, over 1617554.46 frames. ], batch size: 19, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:12,655 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:26,895 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:32,716 INFO [train.py:901] (1/4) Epoch 3, batch 6750, loss[loss=0.3275, simple_loss=0.3679, pruned_loss=0.1435, over 7541.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3774, pruned_loss=0.1393, over 1620418.41 frames. ], batch size: 18, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:34,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.597e+02 4.402e+02 5.483e+02 1.400e+03, threshold=8.804e+02, percent-clipped=7.0 +2023-02-05 22:23:44,511 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22934.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:53,620 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:05,894 INFO [train.py:901] (1/4) Epoch 3, batch 6800, loss[loss=0.348, simple_loss=0.3924, pruned_loss=0.1518, over 8444.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3788, pruned_loss=0.1408, over 1616427.61 frames. ], batch size: 27, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:05,909 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 22:24:08,782 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22970.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:10,818 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:26,137 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22995.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:30,239 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23001.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:41,423 INFO [train.py:901] (1/4) Epoch 3, batch 6850, loss[loss=0.3166, simple_loss=0.3745, pruned_loss=0.1294, over 8106.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3784, pruned_loss=0.1399, over 1616045.83 frames. ], batch size: 23, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:43,431 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.425e+02 4.505e+02 5.413e+02 1.323e+03, threshold=9.011e+02, percent-clipped=6.0 +2023-02-05 22:24:54,867 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 22:25:15,269 INFO [train.py:901] (1/4) Epoch 3, batch 6900, loss[loss=0.337, simple_loss=0.3838, pruned_loss=0.1451, over 8108.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3773, pruned_loss=0.1393, over 1612777.54 frames. ], batch size: 23, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:50,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23116.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:50,685 INFO [train.py:901] (1/4) Epoch 3, batch 6950, loss[loss=0.3133, simple_loss=0.3654, pruned_loss=0.1306, over 8299.00 frames. ], tot_loss[loss=0.3274, simple_loss=0.3766, pruned_loss=0.1391, over 1613447.67 frames. ], batch size: 23, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:51,596 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:52,726 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.525e+02 4.440e+02 6.025e+02 1.140e+03, threshold=8.880e+02, percent-clipped=3.0 +2023-02-05 22:25:57,645 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23126.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:02,139 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 22:26:09,886 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:18,636 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23156.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:26,200 INFO [train.py:901] (1/4) Epoch 3, batch 7000, loss[loss=0.3309, simple_loss=0.3684, pruned_loss=0.1467, over 7643.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.374, pruned_loss=0.1374, over 1610035.75 frames. ], batch size: 19, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:26:39,931 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:01,206 INFO [train.py:901] (1/4) Epoch 3, batch 7050, loss[loss=0.3268, simple_loss=0.379, pruned_loss=0.1373, over 8179.00 frames. ], tot_loss[loss=0.3266, simple_loss=0.376, pruned_loss=0.1386, over 1614822.89 frames. ], batch size: 23, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:03,863 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 3.682e+02 4.488e+02 5.424e+02 1.788e+03, threshold=8.977e+02, percent-clipped=6.0 +2023-02-05 22:27:14,103 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23235.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:22,748 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:36,329 INFO [train.py:901] (1/4) Epoch 3, batch 7100, loss[loss=0.3283, simple_loss=0.3822, pruned_loss=0.1372, over 7792.00 frames. ], tot_loss[loss=0.3246, simple_loss=0.3744, pruned_loss=0.1374, over 1612564.50 frames. ], batch size: 19, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:39,093 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:59,143 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:28:08,852 INFO [train.py:901] (1/4) Epoch 3, batch 7150, loss[loss=0.3145, simple_loss=0.3726, pruned_loss=0.1282, over 8573.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.3762, pruned_loss=0.1387, over 1615571.80 frames. ], batch size: 34, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:09,679 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7597, 1.5442, 3.3514, 1.3705, 2.1871, 3.6968, 3.4340, 3.1967], + device='cuda:1'), covar=tensor([0.1186, 0.1403, 0.0369, 0.1821, 0.0765, 0.0233, 0.0323, 0.0530], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0264, 0.0206, 0.0263, 0.0204, 0.0181, 0.0182, 0.0253], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:28:10,872 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.845e+02 4.572e+02 5.960e+02 1.048e+03, threshold=9.143e+02, percent-clipped=2.0 +2023-02-05 22:28:43,302 INFO [train.py:901] (1/4) Epoch 3, batch 7200, loss[loss=0.3757, simple_loss=0.4085, pruned_loss=0.1714, over 7274.00 frames. ], tot_loss[loss=0.3264, simple_loss=0.3761, pruned_loss=0.1384, over 1619093.10 frames. ], batch size: 71, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:47,006 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 22:28:47,558 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23372.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:04,755 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:17,772 INFO [train.py:901] (1/4) Epoch 3, batch 7250, loss[loss=0.3152, simple_loss=0.3678, pruned_loss=0.1313, over 7937.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3772, pruned_loss=0.1392, over 1619658.27 frames. ], batch size: 20, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:20,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.518e+02 3.505e+02 4.323e+02 5.847e+02 9.851e+02, threshold=8.646e+02, percent-clipped=2.0 +2023-02-05 22:29:52,896 INFO [train.py:901] (1/4) Epoch 3, batch 7300, loss[loss=0.2608, simple_loss=0.325, pruned_loss=0.09833, over 7545.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3773, pruned_loss=0.1391, over 1615466.75 frames. ], batch size: 18, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:55,065 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:02,087 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0474, 1.1774, 1.1756, 0.9188, 0.7921, 1.1703, 0.0355, 1.0604], + device='cuda:1'), covar=tensor([0.3084, 0.2082, 0.1388, 0.2052, 0.5612, 0.0978, 0.5176, 0.1530], + device='cuda:1'), in_proj_covar=tensor([0.0121, 0.0116, 0.0085, 0.0159, 0.0186, 0.0080, 0.0151, 0.0117], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:30:21,339 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23506.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:28,671 INFO [train.py:901] (1/4) Epoch 3, batch 7350, loss[loss=0.3044, simple_loss=0.3608, pruned_loss=0.124, over 8104.00 frames. ], tot_loss[loss=0.3285, simple_loss=0.378, pruned_loss=0.1395, over 1618045.11 frames. ], batch size: 23, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:30:31,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.204e+02 3.295e+02 4.174e+02 5.897e+02 1.266e+03, threshold=8.348e+02, percent-clipped=6.0 +2023-02-05 22:30:35,728 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23527.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:45,653 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 22:30:52,307 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23552.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:56,349 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23558.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:03,075 INFO [train.py:901] (1/4) Epoch 3, batch 7400, loss[loss=0.3415, simple_loss=0.406, pruned_loss=0.1385, over 8196.00 frames. ], tot_loss[loss=0.3269, simple_loss=0.377, pruned_loss=0.1384, over 1617623.97 frames. ], batch size: 23, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:05,769 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 22:31:09,364 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5342, 2.2385, 3.8001, 1.1468, 2.5354, 1.8643, 1.6280, 2.0831], + device='cuda:1'), covar=tensor([0.1178, 0.1194, 0.0357, 0.2260, 0.1003, 0.1689, 0.1078, 0.1594], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0412, 0.0488, 0.0499, 0.0546, 0.0484, 0.0433, 0.0554], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 22:31:11,873 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:14,778 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:16,175 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23585.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:20,316 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:38,776 INFO [train.py:901] (1/4) Epoch 3, batch 7450, loss[loss=0.4341, simple_loss=0.443, pruned_loss=0.2126, over 7281.00 frames. ], tot_loss[loss=0.3257, simple_loss=0.3764, pruned_loss=0.1375, over 1616724.28 frames. ], batch size: 71, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:41,492 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.560e+02 4.542e+02 5.434e+02 8.209e+02, threshold=9.083e+02, percent-clipped=0.0 +2023-02-05 22:31:44,189 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 22:32:11,862 INFO [train.py:901] (1/4) Epoch 3, batch 7500, loss[loss=0.3847, simple_loss=0.4271, pruned_loss=0.1711, over 8104.00 frames. ], tot_loss[loss=0.3282, simple_loss=0.3783, pruned_loss=0.139, over 1619522.52 frames. ], batch size: 23, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:31,249 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23694.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:39,259 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:47,011 INFO [train.py:901] (1/4) Epoch 3, batch 7550, loss[loss=0.3014, simple_loss=0.3633, pruned_loss=0.1198, over 8245.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3782, pruned_loss=0.1387, over 1623611.31 frames. ], batch size: 24, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:49,789 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 3.573e+02 4.120e+02 5.568e+02 9.909e+02, threshold=8.240e+02, percent-clipped=1.0 +2023-02-05 22:33:21,007 INFO [train.py:901] (1/4) Epoch 3, batch 7600, loss[loss=0.2899, simple_loss=0.3368, pruned_loss=0.1215, over 7664.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3767, pruned_loss=0.1378, over 1619414.47 frames. ], batch size: 19, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:55,887 INFO [train.py:901] (1/4) Epoch 3, batch 7650, loss[loss=0.2607, simple_loss=0.3208, pruned_loss=0.1003, over 7694.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.377, pruned_loss=0.1383, over 1613823.87 frames. ], batch size: 18, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:58,399 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.349e+02 3.333e+02 4.379e+02 5.791e+02 1.321e+03, threshold=8.759e+02, percent-clipped=7.0 +2023-02-05 22:34:07,042 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 22:34:12,885 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23841.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:15,491 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4061, 1.8914, 2.0128, 1.5297, 0.9814, 2.1475, 0.4151, 1.4371], + device='cuda:1'), covar=tensor([0.3642, 0.1730, 0.1263, 0.2689, 0.5679, 0.0872, 0.5579, 0.2186], + device='cuda:1'), in_proj_covar=tensor([0.0119, 0.0116, 0.0083, 0.0159, 0.0188, 0.0080, 0.0147, 0.0117], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:34:19,387 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23850.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:30,199 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:30,652 INFO [train.py:901] (1/4) Epoch 3, batch 7700, loss[loss=0.2878, simple_loss=0.3582, pruned_loss=0.1087, over 8134.00 frames. ], tot_loss[loss=0.3252, simple_loss=0.3756, pruned_loss=0.1373, over 1612059.52 frames. ], batch size: 22, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:34:50,851 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 22:35:01,334 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-05 22:35:03,601 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8257, 3.7594, 3.4396, 1.7532, 3.3477, 3.3145, 3.5500, 3.0289], + device='cuda:1'), covar=tensor([0.0997, 0.0690, 0.1031, 0.4207, 0.0823, 0.0816, 0.1315, 0.0824], + device='cuda:1'), in_proj_covar=tensor([0.0372, 0.0251, 0.0292, 0.0379, 0.0280, 0.0230, 0.0273, 0.0217], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 22:35:04,771 INFO [train.py:901] (1/4) Epoch 3, batch 7750, loss[loss=0.2619, simple_loss=0.3371, pruned_loss=0.09341, over 8129.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.3751, pruned_loss=0.1368, over 1610661.35 frames. ], batch size: 22, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:08,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.458e+02 4.167e+02 5.729e+02 1.393e+03, threshold=8.335e+02, percent-clipped=8.0 +2023-02-05 22:35:27,675 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:37,128 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:39,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23965.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:40,191 INFO [train.py:901] (1/4) Epoch 3, batch 7800, loss[loss=0.2983, simple_loss=0.3598, pruned_loss=0.1184, over 8028.00 frames. ], tot_loss[loss=0.3256, simple_loss=0.376, pruned_loss=0.1376, over 1619767.83 frames. ], batch size: 22, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:45,530 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:53,468 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:13,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-05 22:36:14,028 INFO [train.py:901] (1/4) Epoch 3, batch 7850, loss[loss=0.286, simple_loss=0.3523, pruned_loss=0.1098, over 8098.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.3767, pruned_loss=0.1385, over 1619592.60 frames. ], batch size: 23, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:36:16,552 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.230e+02 3.608e+02 4.565e+02 5.801e+02 1.089e+03, threshold=9.129e+02, percent-clipped=5.0 +2023-02-05 22:36:39,254 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24055.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:47,360 INFO [train.py:901] (1/4) Epoch 3, batch 7900, loss[loss=0.3826, simple_loss=0.4098, pruned_loss=0.1777, over 8575.00 frames. ], tot_loss[loss=0.3251, simple_loss=0.3751, pruned_loss=0.1376, over 1617734.36 frames. ], batch size: 49, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:20,414 INFO [train.py:901] (1/4) Epoch 3, batch 7950, loss[loss=0.3505, simple_loss=0.3993, pruned_loss=0.1508, over 6912.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3757, pruned_loss=0.1383, over 1616461.90 frames. ], batch size: 71, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:23,172 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.080e+02 3.295e+02 4.369e+02 5.897e+02 2.335e+03, threshold=8.738e+02, percent-clipped=5.0 +2023-02-05 22:37:54,055 INFO [train.py:901] (1/4) Epoch 3, batch 8000, loss[loss=0.3996, simple_loss=0.4148, pruned_loss=0.1922, over 7360.00 frames. ], tot_loss[loss=0.3252, simple_loss=0.3749, pruned_loss=0.1377, over 1614689.10 frames. ], batch size: 74, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:38:27,963 INFO [train.py:901] (1/4) Epoch 3, batch 8050, loss[loss=0.2624, simple_loss=0.3141, pruned_loss=0.1053, over 7520.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.373, pruned_loss=0.1379, over 1591144.32 frames. ], batch size: 18, lr: 2.09e-02, grad_scale: 8.0 +2023-02-05 22:38:30,754 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.328e+02 4.149e+02 5.404e+02 3.135e+03, threshold=8.298e+02, percent-clipped=6.0 +2023-02-05 22:38:30,988 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:38:48,120 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24246.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:39:03,851 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 22:39:07,719 INFO [train.py:901] (1/4) Epoch 4, batch 0, loss[loss=0.3027, simple_loss=0.3599, pruned_loss=0.1228, over 8124.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3599, pruned_loss=0.1228, over 8124.00 frames. ], batch size: 22, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:39:07,719 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 22:39:18,725 INFO [train.py:935] (1/4) Epoch 4, validation: loss=0.2476, simple_loss=0.3384, pruned_loss=0.07836, over 944034.00 frames. +2023-02-05 22:39:18,726 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 22:39:34,132 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 22:39:52,986 INFO [train.py:901] (1/4) Epoch 4, batch 50, loss[loss=0.3157, simple_loss=0.3536, pruned_loss=0.1389, over 7232.00 frames. ], tot_loss[loss=0.3242, simple_loss=0.3748, pruned_loss=0.1368, over 362988.95 frames. ], batch size: 16, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:40:07,592 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.017e+02 3.527e+02 4.250e+02 5.116e+02 9.987e+02, threshold=8.500e+02, percent-clipped=2.0 +2023-02-05 22:40:09,007 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 22:40:27,955 INFO [train.py:901] (1/4) Epoch 4, batch 100, loss[loss=0.3318, simple_loss=0.3874, pruned_loss=0.1381, over 8033.00 frames. ], tot_loss[loss=0.3213, simple_loss=0.373, pruned_loss=0.1348, over 642504.40 frames. ], batch size: 22, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:40:31,336 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 22:40:38,488 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 22:41:01,468 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24399.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:41:01,649 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0997, 1.7980, 2.8776, 2.2962, 2.2914, 1.8523, 1.3423, 1.0236], + device='cuda:1'), covar=tensor([0.1164, 0.1285, 0.0253, 0.0524, 0.0521, 0.0658, 0.0801, 0.1247], + device='cuda:1'), in_proj_covar=tensor([0.0646, 0.0563, 0.0480, 0.0527, 0.0645, 0.0524, 0.0541, 0.0540], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:41:02,095 INFO [train.py:901] (1/4) Epoch 4, batch 150, loss[loss=0.3467, simple_loss=0.3879, pruned_loss=0.1527, over 7667.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3696, pruned_loss=0.1334, over 855774.98 frames. ], batch size: 19, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:41:14,385 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-05 22:41:17,166 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.490e+02 4.203e+02 5.614e+02 1.653e+03, threshold=8.406e+02, percent-clipped=4.0 +2023-02-05 22:41:37,212 INFO [train.py:901] (1/4) Epoch 4, batch 200, loss[loss=0.3005, simple_loss=0.3666, pruned_loss=0.1172, over 8603.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.3686, pruned_loss=0.1315, over 1026505.81 frames. ], batch size: 31, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:11,045 INFO [train.py:901] (1/4) Epoch 4, batch 250, loss[loss=0.3759, simple_loss=0.4099, pruned_loss=0.1709, over 8431.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3697, pruned_loss=0.1325, over 1157764.87 frames. ], batch size: 27, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:20,367 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24514.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:42:23,564 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 22:42:24,844 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.156e+02 3.531e+02 4.434e+02 5.277e+02 1.190e+03, threshold=8.868e+02, percent-clipped=4.0 +2023-02-05 22:42:31,617 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 22:42:46,012 INFO [train.py:901] (1/4) Epoch 4, batch 300, loss[loss=0.2815, simple_loss=0.3397, pruned_loss=0.1117, over 7970.00 frames. ], tot_loss[loss=0.3199, simple_loss=0.3726, pruned_loss=0.1336, over 1265578.97 frames. ], batch size: 21, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:57,014 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:12,332 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24587.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:21,546 INFO [train.py:901] (1/4) Epoch 4, batch 350, loss[loss=0.292, simple_loss=0.3547, pruned_loss=0.1147, over 8252.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3715, pruned_loss=0.1331, over 1343364.53 frames. ], batch size: 22, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:43:35,592 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 3.300e+02 4.421e+02 5.071e+02 1.044e+03, threshold=8.841e+02, percent-clipped=4.0 +2023-02-05 22:43:50,090 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-05 22:43:56,471 INFO [train.py:901] (1/4) Epoch 4, batch 400, loss[loss=0.3045, simple_loss=0.3511, pruned_loss=0.1289, over 8081.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3719, pruned_loss=0.1331, over 1402379.10 frames. ], batch size: 21, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:12,739 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 22:44:15,945 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5763, 2.1244, 4.6559, 1.0685, 2.5972, 2.0816, 1.5495, 2.6546], + device='cuda:1'), covar=tensor([0.1878, 0.2100, 0.0655, 0.3569, 0.1487, 0.2303, 0.1835, 0.2127], + device='cuda:1'), in_proj_covar=tensor([0.0454, 0.0413, 0.0497, 0.0506, 0.0554, 0.0479, 0.0438, 0.0561], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 22:44:30,024 INFO [train.py:901] (1/4) Epoch 4, batch 450, loss[loss=0.2979, simple_loss=0.3617, pruned_loss=0.117, over 8585.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.372, pruned_loss=0.1332, over 1452954.57 frames. ], batch size: 34, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:42,419 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 22:44:44,811 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.414e+02 4.548e+02 5.600e+02 1.007e+03, threshold=9.096e+02, percent-clipped=5.0 +2023-02-05 22:45:04,972 INFO [train.py:901] (1/4) Epoch 4, batch 500, loss[loss=0.3051, simple_loss=0.3615, pruned_loss=0.1244, over 7816.00 frames. ], tot_loss[loss=0.3198, simple_loss=0.3726, pruned_loss=0.1335, over 1491801.73 frames. ], batch size: 20, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:19,904 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24770.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:28,214 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24783.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:36,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24795.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:40,046 INFO [train.py:901] (1/4) Epoch 4, batch 550, loss[loss=0.2686, simple_loss=0.3324, pruned_loss=0.1024, over 7540.00 frames. ], tot_loss[loss=0.3189, simple_loss=0.3721, pruned_loss=0.1329, over 1521523.53 frames. ], batch size: 18, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:53,858 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 3.369e+02 4.426e+02 5.591e+02 8.767e+02, threshold=8.852e+02, percent-clipped=0.0 +2023-02-05 22:46:13,952 INFO [train.py:901] (1/4) Epoch 4, batch 600, loss[loss=0.3667, simple_loss=0.4041, pruned_loss=0.1646, over 8587.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3723, pruned_loss=0.1329, over 1547734.94 frames. ], batch size: 31, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:24,940 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:46:28,941 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 22:46:39,228 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1528, 1.2782, 1.1739, 0.0904, 1.2044, 0.9847, 0.2066, 1.2656], + device='cuda:1'), covar=tensor([0.0093, 0.0072, 0.0063, 0.0155, 0.0096, 0.0253, 0.0203, 0.0079], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0175, 0.0140, 0.0219, 0.0165, 0.0297, 0.0236, 0.0207], + device='cuda:1'), out_proj_covar=tensor([1.0948e-04, 7.5440e-05, 5.9494e-05, 9.1786e-05, 7.2555e-05, 1.3915e-04, + 1.0301e-04, 8.7562e-05], device='cuda:1') +2023-02-05 22:46:49,154 INFO [train.py:901] (1/4) Epoch 4, batch 650, loss[loss=0.3354, simple_loss=0.3878, pruned_loss=0.1415, over 8581.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.3713, pruned_loss=0.1322, over 1568945.33 frames. ], batch size: 31, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:55,185 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:00,774 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5191, 1.8428, 3.3586, 1.0018, 2.4169, 1.7103, 1.4743, 1.9451], + device='cuda:1'), covar=tensor([0.1824, 0.1884, 0.0646, 0.3371, 0.1309, 0.2505, 0.1827, 0.2230], + device='cuda:1'), in_proj_covar=tensor([0.0459, 0.0416, 0.0501, 0.0511, 0.0550, 0.0482, 0.0441, 0.0562], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 22:47:03,762 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 3.310e+02 4.230e+02 5.108e+02 1.167e+03, threshold=8.459e+02, percent-clipped=4.0 +2023-02-05 22:47:10,606 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:24,034 INFO [train.py:901] (1/4) Epoch 4, batch 700, loss[loss=0.3097, simple_loss=0.341, pruned_loss=0.1392, over 7809.00 frames. ], tot_loss[loss=0.318, simple_loss=0.3716, pruned_loss=0.1322, over 1584768.56 frames. ], batch size: 19, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:47:43,051 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1755, 3.7616, 3.3082, 4.2370, 2.1462, 2.6807, 2.2673, 3.8366], + device='cuda:1'), covar=tensor([0.0858, 0.1000, 0.1131, 0.0352, 0.1879, 0.1900, 0.2162, 0.0857], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0300, 0.0307, 0.0224, 0.0277, 0.0308, 0.0315, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:1') +2023-02-05 22:47:59,147 INFO [train.py:901] (1/4) Epoch 4, batch 750, loss[loss=0.313, simple_loss=0.3709, pruned_loss=0.1275, over 8392.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3708, pruned_loss=0.1313, over 1592351.71 frames. ], batch size: 49, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:08,255 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25013.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:13,341 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.175e+02 4.108e+02 5.247e+02 1.235e+03, threshold=8.217e+02, percent-clipped=4.0 +2023-02-05 22:48:14,034 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 22:48:15,510 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:22,610 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 22:48:30,677 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:33,224 INFO [train.py:901] (1/4) Epoch 4, batch 800, loss[loss=0.3158, simple_loss=0.3727, pruned_loss=0.1294, over 7980.00 frames. ], tot_loss[loss=0.3176, simple_loss=0.3709, pruned_loss=0.1322, over 1597176.77 frames. ], batch size: 21, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:49:06,964 INFO [train.py:901] (1/4) Epoch 4, batch 850, loss[loss=0.2245, simple_loss=0.295, pruned_loss=0.07702, over 7704.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3709, pruned_loss=0.1323, over 1599609.89 frames. ], batch size: 18, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:49:22,453 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 3.301e+02 4.277e+02 5.478e+02 1.022e+03, threshold=8.554e+02, percent-clipped=4.0 +2023-02-05 22:49:26,600 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:49:42,457 INFO [train.py:901] (1/4) Epoch 4, batch 900, loss[loss=0.2887, simple_loss=0.3588, pruned_loss=0.1093, over 8354.00 frames. ], tot_loss[loss=0.3162, simple_loss=0.3699, pruned_loss=0.1313, over 1604554.06 frames. ], batch size: 24, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:16,535 INFO [train.py:901] (1/4) Epoch 4, batch 950, loss[loss=0.323, simple_loss=0.3763, pruned_loss=0.1349, over 8463.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3698, pruned_loss=0.1323, over 1601822.89 frames. ], batch size: 25, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:20,252 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5011, 1.7640, 1.7051, 1.3669, 1.1382, 1.8844, 0.2709, 1.1645], + device='cuda:1'), covar=tensor([0.2680, 0.2383, 0.1803, 0.2712, 0.5630, 0.0917, 0.6368, 0.2651], + device='cuda:1'), in_proj_covar=tensor([0.0116, 0.0115, 0.0085, 0.0159, 0.0189, 0.0082, 0.0154, 0.0118], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:50:23,515 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25210.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:30,874 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.215e+02 3.501e+02 4.488e+02 5.717e+02 1.063e+03, threshold=8.976e+02, percent-clipped=5.0 +2023-02-05 22:50:40,871 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 22:50:46,557 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25242.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:51,542 INFO [train.py:901] (1/4) Epoch 4, batch 1000, loss[loss=0.3749, simple_loss=0.4172, pruned_loss=0.1664, over 8327.00 frames. ], tot_loss[loss=0.3168, simple_loss=0.3692, pruned_loss=0.1322, over 1604810.20 frames. ], batch size: 26, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:12,232 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25280.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:13,313 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 22:51:25,827 INFO [train.py:901] (1/4) Epoch 4, batch 1050, loss[loss=0.3173, simple_loss=0.3589, pruned_loss=0.1379, over 8249.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3695, pruned_loss=0.1321, over 1606334.47 frames. ], batch size: 22, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:26,402 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 22:51:27,141 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:28,964 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:39,496 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 3.519e+02 4.399e+02 5.664e+02 1.146e+03, threshold=8.797e+02, percent-clipped=2.0 +2023-02-05 22:51:42,388 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25325.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:43,774 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:58,858 INFO [train.py:901] (1/4) Epoch 4, batch 1100, loss[loss=0.2684, simple_loss=0.3375, pruned_loss=0.0996, over 8457.00 frames. ], tot_loss[loss=0.3148, simple_loss=0.3678, pruned_loss=0.1309, over 1606499.14 frames. ], batch size: 25, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:52:04,505 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:52:34,558 INFO [train.py:901] (1/4) Epoch 4, batch 1150, loss[loss=0.3065, simple_loss=0.3783, pruned_loss=0.1173, over 8467.00 frames. ], tot_loss[loss=0.3151, simple_loss=0.3679, pruned_loss=0.1311, over 1606183.61 frames. ], batch size: 27, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:52:37,396 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 22:52:49,212 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.195e+02 3.278e+02 3.972e+02 4.649e+02 8.065e+02, threshold=7.944e+02, percent-clipped=0.0 +2023-02-05 22:52:59,737 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-05 22:53:06,097 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25446.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:53:08,506 INFO [train.py:901] (1/4) Epoch 4, batch 1200, loss[loss=0.3635, simple_loss=0.4095, pruned_loss=0.1588, over 8299.00 frames. ], tot_loss[loss=0.3155, simple_loss=0.3684, pruned_loss=0.1313, over 1606239.92 frames. ], batch size: 23, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:10,192 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.08 vs. limit=2.0 +2023-02-05 22:53:23,246 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:42,025 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:43,175 INFO [train.py:901] (1/4) Epoch 4, batch 1250, loss[loss=0.3319, simple_loss=0.3823, pruned_loss=0.1408, over 8328.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.368, pruned_loss=0.1312, over 1608532.89 frames. ], batch size: 26, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:57,802 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 3.538e+02 4.328e+02 6.105e+02 1.271e+03, threshold=8.657e+02, percent-clipped=4.0 +2023-02-05 22:53:59,259 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25523.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:18,017 INFO [train.py:901] (1/4) Epoch 4, batch 1300, loss[loss=0.3452, simple_loss=0.3847, pruned_loss=0.1528, over 7796.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3678, pruned_loss=0.1311, over 1608493.41 frames. ], batch size: 20, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:39,387 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25581.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:53,161 INFO [train.py:901] (1/4) Epoch 4, batch 1350, loss[loss=0.2735, simple_loss=0.3251, pruned_loss=0.111, over 7638.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.3674, pruned_loss=0.1308, over 1606088.89 frames. ], batch size: 19, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:57,498 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:59,645 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3983, 1.5965, 1.6633, 1.3099, 0.8863, 1.6765, 0.1029, 0.9882], + device='cuda:1'), covar=tensor([0.2861, 0.1883, 0.1079, 0.2240, 0.6185, 0.0937, 0.5468, 0.2724], + device='cuda:1'), in_proj_covar=tensor([0.0118, 0.0110, 0.0086, 0.0159, 0.0190, 0.0083, 0.0152, 0.0118], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 22:55:08,861 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.283e+02 4.098e+02 5.393e+02 1.175e+03, threshold=8.196e+02, percent-clipped=3.0 +2023-02-05 22:55:12,063 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.14 vs. limit=5.0 +2023-02-05 22:55:27,914 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.13 vs. limit=5.0 +2023-02-05 22:55:28,874 INFO [train.py:901] (1/4) Epoch 4, batch 1400, loss[loss=0.2824, simple_loss=0.3601, pruned_loss=0.1024, over 8449.00 frames. ], tot_loss[loss=0.3148, simple_loss=0.368, pruned_loss=0.1308, over 1608752.02 frames. ], batch size: 29, lr: 1.91e-02, grad_scale: 8.0 +2023-02-05 22:56:03,161 INFO [train.py:901] (1/4) Epoch 4, batch 1450, loss[loss=0.3396, simple_loss=0.3936, pruned_loss=0.1428, over 7962.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.3681, pruned_loss=0.1301, over 1609724.95 frames. ], batch size: 21, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:05,848 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 22:56:18,905 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.243e+02 3.964e+02 4.847e+02 1.034e+03, threshold=7.929e+02, percent-clipped=2.0 +2023-02-05 22:56:23,200 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:56:38,604 INFO [train.py:901] (1/4) Epoch 4, batch 1500, loss[loss=0.3, simple_loss=0.3726, pruned_loss=0.1137, over 8497.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.3686, pruned_loss=0.1299, over 1614866.24 frames. ], batch size: 26, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:40,774 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:57:02,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5395, 4.4821, 3.9490, 1.8611, 3.9654, 3.9659, 4.2954, 3.5885], + device='cuda:1'), covar=tensor([0.0879, 0.0504, 0.0816, 0.4455, 0.0722, 0.0656, 0.0980, 0.0715], + device='cuda:1'), in_proj_covar=tensor([0.0380, 0.0254, 0.0300, 0.0381, 0.0294, 0.0236, 0.0280, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 22:57:05,990 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25790.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:57:12,582 INFO [train.py:901] (1/4) Epoch 4, batch 1550, loss[loss=0.2877, simple_loss=0.3613, pruned_loss=0.1071, over 8326.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3679, pruned_loss=0.1294, over 1616095.83 frames. ], batch size: 26, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:57:27,015 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 3.100e+02 3.836e+02 5.066e+02 1.009e+03, threshold=7.672e+02, percent-clipped=5.0 +2023-02-05 22:57:31,370 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-05 22:57:46,741 INFO [train.py:901] (1/4) Epoch 4, batch 1600, loss[loss=0.2975, simple_loss=0.3501, pruned_loss=0.1224, over 7246.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3676, pruned_loss=0.1296, over 1616592.14 frames. ], batch size: 16, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:04,757 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25876.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:21,025 INFO [train.py:901] (1/4) Epoch 4, batch 1650, loss[loss=0.2932, simple_loss=0.3563, pruned_loss=0.1151, over 8191.00 frames. ], tot_loss[loss=0.3137, simple_loss=0.367, pruned_loss=0.1302, over 1606890.93 frames. ], batch size: 23, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:24,584 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25905.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:32,035 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4998, 3.1360, 2.4183, 4.0428, 1.7191, 1.6076, 2.1716, 3.5238], + device='cuda:1'), covar=tensor([0.0980, 0.1459, 0.1547, 0.0355, 0.1834, 0.2449, 0.2203, 0.0837], + device='cuda:1'), in_proj_covar=tensor([0.0285, 0.0288, 0.0299, 0.0224, 0.0269, 0.0303, 0.0315, 0.0285], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 22:58:35,952 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.823e+02 4.768e+02 5.766e+02 1.707e+03, threshold=9.535e+02, percent-clipped=9.0 +2023-02-05 22:58:56,114 INFO [train.py:901] (1/4) Epoch 4, batch 1700, loss[loss=0.2853, simple_loss=0.3494, pruned_loss=0.1106, over 8230.00 frames. ], tot_loss[loss=0.3134, simple_loss=0.3669, pruned_loss=0.1299, over 1612039.46 frames. ], batch size: 22, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:58,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4794, 2.0557, 1.3905, 2.6236, 1.4522, 1.0414, 1.5520, 2.0659], + device='cuda:1'), covar=tensor([0.1883, 0.1587, 0.3190, 0.0503, 0.1835, 0.3289, 0.1981, 0.1234], + device='cuda:1'), in_proj_covar=tensor([0.0285, 0.0287, 0.0301, 0.0221, 0.0268, 0.0305, 0.0313, 0.0283], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 22:59:31,190 INFO [train.py:901] (1/4) Epoch 4, batch 1750, loss[loss=0.274, simple_loss=0.3339, pruned_loss=0.107, over 8655.00 frames. ], tot_loss[loss=0.3123, simple_loss=0.3664, pruned_loss=0.1291, over 1614823.59 frames. ], batch size: 34, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 22:59:47,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.187e+02 3.816e+02 4.801e+02 8.317e+02, threshold=7.632e+02, percent-clipped=0.0 +2023-02-05 23:00:06,091 INFO [train.py:901] (1/4) Epoch 4, batch 1800, loss[loss=0.3121, simple_loss=0.3778, pruned_loss=0.1232, over 8245.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3668, pruned_loss=0.1296, over 1616051.65 frames. ], batch size: 24, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:41,282 INFO [train.py:901] (1/4) Epoch 4, batch 1850, loss[loss=0.3292, simple_loss=0.3812, pruned_loss=0.1386, over 8542.00 frames. ], tot_loss[loss=0.3122, simple_loss=0.3665, pruned_loss=0.129, over 1617126.20 frames. ], batch size: 31, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:55,436 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:00:56,599 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.260e+02 3.379e+02 4.261e+02 5.084e+02 1.608e+03, threshold=8.521e+02, percent-clipped=6.0 +2023-02-05 23:01:15,415 INFO [train.py:901] (1/4) Epoch 4, batch 1900, loss[loss=0.2604, simple_loss=0.3143, pruned_loss=0.1033, over 7222.00 frames. ], tot_loss[loss=0.3124, simple_loss=0.3664, pruned_loss=0.1292, over 1615911.59 frames. ], batch size: 16, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:22,863 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26161.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:01:40,591 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:01:40,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26186.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:01:41,082 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 23:01:49,625 INFO [train.py:901] (1/4) Epoch 4, batch 1950, loss[loss=0.2867, simple_loss=0.3512, pruned_loss=0.1111, over 8097.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3671, pruned_loss=0.1293, over 1616843.06 frames. ], batch size: 23, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:52,424 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 23:02:04,046 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26220.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:02:05,152 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.303e+02 3.684e+02 4.572e+02 6.046e+02 1.247e+03, threshold=9.144e+02, percent-clipped=2.0 +2023-02-05 23:02:10,406 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 23:02:22,289 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-05 23:02:24,334 INFO [train.py:901] (1/4) Epoch 4, batch 2000, loss[loss=0.4108, simple_loss=0.4288, pruned_loss=0.1964, over 8251.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3674, pruned_loss=0.1299, over 1613666.21 frames. ], batch size: 24, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:02:36,624 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:02:59,487 INFO [train.py:901] (1/4) Epoch 4, batch 2050, loss[loss=0.298, simple_loss=0.3597, pruned_loss=0.1182, over 7957.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3663, pruned_loss=0.1299, over 1609808.91 frames. ], batch size: 21, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:03:10,495 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4198, 2.0604, 1.9976, 1.1903, 2.0935, 1.3940, 0.6042, 1.7223], + device='cuda:1'), covar=tensor([0.0158, 0.0074, 0.0061, 0.0127, 0.0093, 0.0282, 0.0218, 0.0070], + device='cuda:1'), in_proj_covar=tensor([0.0260, 0.0189, 0.0150, 0.0226, 0.0175, 0.0301, 0.0250, 0.0214], + device='cuda:1'), out_proj_covar=tensor([1.1004e-04, 7.9613e-05, 6.1679e-05, 9.1799e-05, 7.5091e-05, 1.3656e-04, + 1.0664e-04, 8.7591e-05], device='cuda:1') +2023-02-05 23:03:14,377 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.223e+02 3.433e+02 4.198e+02 5.260e+02 1.263e+03, threshold=8.396e+02, percent-clipped=5.0 +2023-02-05 23:03:24,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26335.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:03:34,740 INFO [train.py:901] (1/4) Epoch 4, batch 2100, loss[loss=0.3155, simple_loss=0.3724, pruned_loss=0.1293, over 8191.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.3677, pruned_loss=0.1304, over 1613887.55 frames. ], batch size: 23, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:08,106 INFO [train.py:901] (1/4) Epoch 4, batch 2150, loss[loss=0.4336, simple_loss=0.4385, pruned_loss=0.2143, over 6637.00 frames. ], tot_loss[loss=0.3133, simple_loss=0.3673, pruned_loss=0.1297, over 1616944.47 frames. ], batch size: 71, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:13,091 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5789, 1.8341, 3.2589, 1.1409, 2.2109, 1.9042, 1.4656, 1.9182], + device='cuda:1'), covar=tensor([0.1263, 0.1448, 0.0444, 0.2663, 0.1126, 0.1927, 0.1414, 0.1818], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0415, 0.0505, 0.0504, 0.0546, 0.0487, 0.0439, 0.0554], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:04:24,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.431e+02 3.407e+02 4.210e+02 5.616e+02 1.521e+03, threshold=8.419e+02, percent-clipped=4.0 +2023-02-05 23:04:31,148 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:04:43,606 INFO [train.py:901] (1/4) Epoch 4, batch 2200, loss[loss=0.3355, simple_loss=0.3719, pruned_loss=0.1495, over 8134.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3661, pruned_loss=0.1287, over 1620580.60 frames. ], batch size: 22, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:53,163 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:04:55,297 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1892, 1.9697, 1.7329, 1.9174, 1.8576, 1.7403, 2.7881, 2.0291], + device='cuda:1'), covar=tensor([0.0600, 0.1294, 0.1861, 0.1334, 0.0739, 0.1596, 0.0734, 0.0594], + device='cuda:1'), in_proj_covar=tensor([0.0163, 0.0199, 0.0240, 0.0201, 0.0162, 0.0206, 0.0168, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 23:05:18,122 INFO [train.py:901] (1/4) Epoch 4, batch 2250, loss[loss=0.3041, simple_loss=0.3584, pruned_loss=0.1249, over 7772.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3652, pruned_loss=0.1275, over 1621273.35 frames. ], batch size: 19, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:05:32,594 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4879, 1.3490, 2.7612, 1.1156, 2.0719, 2.9640, 2.9243, 2.5343], + device='cuda:1'), covar=tensor([0.0994, 0.1297, 0.0404, 0.1961, 0.0596, 0.0356, 0.0427, 0.0675], + device='cuda:1'), in_proj_covar=tensor([0.0224, 0.0257, 0.0207, 0.0262, 0.0207, 0.0188, 0.0196, 0.0259], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:05:33,086 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.188e+02 3.857e+02 4.748e+02 9.287e+02, threshold=7.714e+02, percent-clipped=1.0 +2023-02-05 23:05:38,940 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26530.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:52,824 INFO [train.py:901] (1/4) Epoch 4, batch 2300, loss[loss=0.3125, simple_loss=0.372, pruned_loss=0.1265, over 8493.00 frames. ], tot_loss[loss=0.3099, simple_loss=0.3645, pruned_loss=0.1277, over 1613403.64 frames. ], batch size: 26, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:02,938 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3841, 2.0704, 3.1176, 2.7400, 2.5191, 1.9822, 1.3159, 1.2584], + device='cuda:1'), covar=tensor([0.1349, 0.1545, 0.0291, 0.0597, 0.0757, 0.0699, 0.0929, 0.1536], + device='cuda:1'), in_proj_covar=tensor([0.0667, 0.0595, 0.0499, 0.0566, 0.0687, 0.0546, 0.0549, 0.0563], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:06:12,941 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:21,882 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26591.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:06:27,543 INFO [train.py:901] (1/4) Epoch 4, batch 2350, loss[loss=0.299, simple_loss=0.3672, pruned_loss=0.1153, over 8190.00 frames. ], tot_loss[loss=0.3095, simple_loss=0.3641, pruned_loss=0.1274, over 1610252.15 frames. ], batch size: 23, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:35,939 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:38,734 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26616.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:06:42,411 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.505e+02 4.841e+02 5.770e+02 1.247e+03, threshold=9.683e+02, percent-clipped=6.0 +2023-02-05 23:06:58,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26645.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:07:01,512 INFO [train.py:901] (1/4) Epoch 4, batch 2400, loss[loss=0.2733, simple_loss=0.3245, pruned_loss=0.1111, over 7405.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3655, pruned_loss=0.1286, over 1612281.64 frames. ], batch size: 17, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:03,752 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7816, 1.3288, 5.9180, 2.2683, 5.2664, 4.8382, 5.4161, 5.4159], + device='cuda:1'), covar=tensor([0.0478, 0.3964, 0.0243, 0.1981, 0.0733, 0.0459, 0.0390, 0.0374], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0442, 0.0342, 0.0357, 0.0419, 0.0359, 0.0341, 0.0378], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-05 23:07:37,131 INFO [train.py:901] (1/4) Epoch 4, batch 2450, loss[loss=0.3062, simple_loss=0.3654, pruned_loss=0.1235, over 8041.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3662, pruned_loss=0.1297, over 1610296.90 frames. ], batch size: 22, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:47,372 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1525, 1.8385, 3.4249, 0.8742, 2.1868, 1.4580, 1.3249, 1.8828], + device='cuda:1'), covar=tensor([0.2280, 0.2200, 0.0815, 0.4104, 0.1709, 0.2975, 0.2136, 0.2535], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0420, 0.0506, 0.0511, 0.0554, 0.0483, 0.0439, 0.0561], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:07:51,851 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.211e+02 4.300e+02 5.616e+02 1.854e+03, threshold=8.599e+02, percent-clipped=7.0 +2023-02-05 23:07:55,327 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26727.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:07:58,057 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3962, 2.0766, 1.9923, 0.7973, 2.1005, 1.5361, 0.5870, 1.7961], + device='cuda:1'), covar=tensor([0.0168, 0.0072, 0.0078, 0.0148, 0.0102, 0.0257, 0.0239, 0.0072], + device='cuda:1'), in_proj_covar=tensor([0.0262, 0.0188, 0.0154, 0.0226, 0.0172, 0.0307, 0.0251, 0.0210], + device='cuda:1'), out_proj_covar=tensor([1.1017e-04, 7.8155e-05, 6.2722e-05, 9.1577e-05, 7.2669e-05, 1.3801e-04, + 1.0628e-04, 8.5724e-05], device='cuda:1') +2023-02-05 23:08:10,589 INFO [train.py:901] (1/4) Epoch 4, batch 2500, loss[loss=0.3214, simple_loss=0.3807, pruned_loss=0.131, over 8360.00 frames. ], tot_loss[loss=0.3124, simple_loss=0.3659, pruned_loss=0.1295, over 1604712.06 frames. ], batch size: 24, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:08:19,999 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6413, 1.4734, 3.1291, 1.2792, 2.1361, 3.4200, 3.2503, 2.8166], + device='cuda:1'), covar=tensor([0.1038, 0.1363, 0.0311, 0.1919, 0.0720, 0.0232, 0.0342, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0224, 0.0259, 0.0205, 0.0260, 0.0209, 0.0186, 0.0192, 0.0260], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:08:26,662 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8132, 1.6252, 3.4988, 1.3866, 2.4282, 3.9200, 3.7122, 3.2998], + device='cuda:1'), covar=tensor([0.1108, 0.1458, 0.0271, 0.1900, 0.0702, 0.0227, 0.0308, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0261, 0.0207, 0.0262, 0.0211, 0.0187, 0.0193, 0.0261], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:08:29,253 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:45,144 INFO [train.py:901] (1/4) Epoch 4, batch 2550, loss[loss=0.3197, simple_loss=0.3832, pruned_loss=0.1281, over 8451.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3654, pruned_loss=0.1289, over 1603138.38 frames. ], batch size: 27, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:09:01,297 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.235e+02 3.301e+02 4.146e+02 5.074e+02 1.055e+03, threshold=8.293e+02, percent-clipped=2.0 +2023-02-05 23:09:05,071 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1014, 1.6767, 2.9394, 2.4081, 2.3171, 1.7503, 1.2831, 1.0179], + device='cuda:1'), covar=tensor([0.1436, 0.1787, 0.0282, 0.0623, 0.0660, 0.0991, 0.1074, 0.1442], + device='cuda:1'), in_proj_covar=tensor([0.0661, 0.0593, 0.0492, 0.0567, 0.0677, 0.0549, 0.0546, 0.0557], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:09:10,584 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26835.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:20,517 INFO [train.py:901] (1/4) Epoch 4, batch 2600, loss[loss=0.3042, simple_loss=0.3609, pruned_loss=0.1238, over 7807.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3663, pruned_loss=0.1289, over 1608021.71 frames. ], batch size: 20, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:27,745 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26860.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:50,362 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:56,432 INFO [train.py:901] (1/4) Epoch 4, batch 2650, loss[loss=0.3081, simple_loss=0.3744, pruned_loss=0.121, over 8323.00 frames. ], tot_loss[loss=0.3098, simple_loss=0.3648, pruned_loss=0.1274, over 1610301.83 frames. ], batch size: 25, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:57,281 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:12,338 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 3.245e+02 3.916e+02 5.024e+02 1.006e+03, threshold=7.831e+02, percent-clipped=3.0 +2023-02-05 23:10:12,505 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26922.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:10:15,324 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26926.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:32,098 INFO [train.py:901] (1/4) Epoch 4, batch 2700, loss[loss=0.2977, simple_loss=0.3724, pruned_loss=0.1115, over 8612.00 frames. ], tot_loss[loss=0.3089, simple_loss=0.3646, pruned_loss=0.1265, over 1615480.95 frames. ], batch size: 31, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:10:44,358 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26968.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:54,318 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26983.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:00,901 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4570, 4.5550, 4.0749, 1.7308, 3.9729, 3.9193, 4.2015, 3.4376], + device='cuda:1'), covar=tensor([0.0805, 0.0518, 0.0973, 0.4356, 0.0648, 0.0649, 0.1177, 0.0729], + device='cuda:1'), in_proj_covar=tensor([0.0364, 0.0252, 0.0302, 0.0383, 0.0288, 0.0233, 0.0282, 0.0223], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:11:05,901 INFO [train.py:901] (1/4) Epoch 4, batch 2750, loss[loss=0.361, simple_loss=0.4074, pruned_loss=0.1573, over 8479.00 frames. ], tot_loss[loss=0.3111, simple_loss=0.3659, pruned_loss=0.1282, over 1612424.13 frames. ], batch size: 29, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:10,046 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 23:11:12,356 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:21,386 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 3.589e+02 4.354e+02 5.460e+02 1.197e+03, threshold=8.707e+02, percent-clipped=9.0 +2023-02-05 23:11:40,850 INFO [train.py:901] (1/4) Epoch 4, batch 2800, loss[loss=0.307, simple_loss=0.3772, pruned_loss=0.1184, over 8461.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3664, pruned_loss=0.1286, over 1614383.95 frames. ], batch size: 25, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:41,014 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3708, 1.4252, 3.0999, 1.3267, 2.1737, 3.3369, 3.0970, 2.8439], + device='cuda:1'), covar=tensor([0.1133, 0.1384, 0.0366, 0.1949, 0.0699, 0.0301, 0.0430, 0.0614], + device='cuda:1'), in_proj_covar=tensor([0.0225, 0.0257, 0.0210, 0.0262, 0.0211, 0.0187, 0.0195, 0.0262], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:11:52,405 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5673, 1.7441, 1.8427, 1.5573, 0.8647, 1.8084, 0.2392, 1.1714], + device='cuda:1'), covar=tensor([0.3814, 0.2764, 0.1178, 0.2075, 0.6938, 0.1213, 0.7083, 0.2494], + device='cuda:1'), in_proj_covar=tensor([0.0125, 0.0114, 0.0083, 0.0162, 0.0200, 0.0084, 0.0155, 0.0121], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:12:09,983 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.99 vs. limit=2.0 +2023-02-05 23:12:14,847 INFO [train.py:901] (1/4) Epoch 4, batch 2850, loss[loss=0.2575, simple_loss=0.3234, pruned_loss=0.09579, over 7969.00 frames. ], tot_loss[loss=0.3121, simple_loss=0.3667, pruned_loss=0.1288, over 1620512.28 frames. ], batch size: 21, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:12:30,251 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 3.374e+02 4.464e+02 5.831e+02 1.992e+03, threshold=8.927e+02, percent-clipped=6.0 +2023-02-05 23:12:47,565 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:12:49,278 INFO [train.py:901] (1/4) Epoch 4, batch 2900, loss[loss=0.3115, simple_loss=0.3745, pruned_loss=0.1243, over 8664.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3657, pruned_loss=0.1284, over 1613012.86 frames. ], batch size: 34, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:00,618 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27166.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:05,274 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:11,781 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 23:13:24,074 INFO [train.py:901] (1/4) Epoch 4, batch 2950, loss[loss=0.3614, simple_loss=0.3798, pruned_loss=0.1715, over 6360.00 frames. ], tot_loss[loss=0.3132, simple_loss=0.3666, pruned_loss=0.1299, over 1607920.41 frames. ], batch size: 14, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:24,412 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6191, 2.0445, 2.0802, 0.8749, 2.1639, 1.4747, 0.5836, 1.7346], + device='cuda:1'), covar=tensor([0.0194, 0.0104, 0.0087, 0.0187, 0.0150, 0.0277, 0.0292, 0.0090], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0195, 0.0158, 0.0230, 0.0187, 0.0314, 0.0257, 0.0214], + device='cuda:1'), out_proj_covar=tensor([1.1159e-04, 8.0623e-05, 6.3440e-05, 9.2207e-05, 7.8582e-05, 1.3990e-04, + 1.0875e-04, 8.6472e-05], device='cuda:1') +2023-02-05 23:13:38,737 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 3.092e+02 3.649e+02 5.055e+02 1.216e+03, threshold=7.299e+02, percent-clipped=3.0 +2023-02-05 23:13:58,839 INFO [train.py:901] (1/4) Epoch 4, batch 3000, loss[loss=0.3256, simple_loss=0.3787, pruned_loss=0.1362, over 8621.00 frames. ], tot_loss[loss=0.3108, simple_loss=0.3645, pruned_loss=0.1286, over 1607196.03 frames. ], batch size: 31, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:58,839 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 23:14:11,268 INFO [train.py:935] (1/4) Epoch 4, validation: loss=0.2374, simple_loss=0.3304, pruned_loss=0.07225, over 944034.00 frames. +2023-02-05 23:14:11,269 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 23:14:23,026 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27266.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:14:45,404 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 23:14:45,710 INFO [train.py:901] (1/4) Epoch 4, batch 3050, loss[loss=0.3212, simple_loss=0.3607, pruned_loss=0.1409, over 8528.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.3642, pruned_loss=0.1289, over 1603825.96 frames. ], batch size: 28, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:14:54,660 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27312.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:15:01,936 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.415e+02 4.317e+02 5.768e+02 1.933e+03, threshold=8.634e+02, percent-clipped=10.0 +2023-02-05 23:15:20,627 INFO [train.py:901] (1/4) Epoch 4, batch 3100, loss[loss=0.2922, simple_loss=0.3482, pruned_loss=0.1181, over 7802.00 frames. ], tot_loss[loss=0.3103, simple_loss=0.3639, pruned_loss=0.1283, over 1604910.81 frames. ], batch size: 20, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:15:41,821 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27381.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:15:54,812 INFO [train.py:901] (1/4) Epoch 4, batch 3150, loss[loss=0.3022, simple_loss=0.3556, pruned_loss=0.1244, over 7978.00 frames. ], tot_loss[loss=0.3095, simple_loss=0.3637, pruned_loss=0.1276, over 1605526.69 frames. ], batch size: 21, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:16:09,479 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 3.237e+02 4.041e+02 5.193e+02 1.210e+03, threshold=8.082e+02, percent-clipped=3.0 +2023-02-05 23:16:13,662 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:16:16,326 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2941, 1.7048, 2.9516, 1.0172, 1.8908, 1.7134, 1.3822, 1.5440], + device='cuda:1'), covar=tensor([0.1328, 0.1361, 0.0485, 0.2571, 0.1153, 0.1935, 0.1188, 0.1741], + device='cuda:1'), in_proj_covar=tensor([0.0448, 0.0417, 0.0499, 0.0512, 0.0560, 0.0494, 0.0430, 0.0560], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:16:29,604 INFO [train.py:901] (1/4) Epoch 4, batch 3200, loss[loss=0.3166, simple_loss=0.3711, pruned_loss=0.131, over 8090.00 frames. ], tot_loss[loss=0.3103, simple_loss=0.3648, pruned_loss=0.1279, over 1609137.61 frames. ], batch size: 21, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:16:33,644 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0873, 4.1067, 3.6518, 1.8316, 3.6605, 3.4899, 3.7182, 3.0053], + device='cuda:1'), covar=tensor([0.0856, 0.0520, 0.0894, 0.4258, 0.0724, 0.0774, 0.1113, 0.0871], + device='cuda:1'), in_proj_covar=tensor([0.0368, 0.0259, 0.0300, 0.0383, 0.0289, 0.0238, 0.0281, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:16:50,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1031, 3.0274, 2.7451, 1.6183, 2.7129, 2.6642, 2.8442, 2.4452], + device='cuda:1'), covar=tensor([0.1325, 0.0926, 0.1195, 0.4513, 0.1106, 0.1049, 0.1490, 0.1059], + device='cuda:1'), in_proj_covar=tensor([0.0366, 0.0260, 0.0301, 0.0383, 0.0290, 0.0238, 0.0282, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:17:03,110 INFO [train.py:901] (1/4) Epoch 4, batch 3250, loss[loss=0.2973, simple_loss=0.3484, pruned_loss=0.1231, over 6806.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3634, pruned_loss=0.1273, over 1609370.68 frames. ], batch size: 15, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:17:07,516 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 23:17:10,578 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:17:18,418 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 3.449e+02 4.059e+02 4.930e+02 7.939e+02, threshold=8.117e+02, percent-clipped=0.0 +2023-02-05 23:17:37,474 INFO [train.py:901] (1/4) Epoch 4, batch 3300, loss[loss=0.3104, simple_loss=0.3341, pruned_loss=0.1434, over 7546.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.3641, pruned_loss=0.1271, over 1612454.93 frames. ], batch size: 18, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:12,284 INFO [train.py:901] (1/4) Epoch 4, batch 3350, loss[loss=0.2815, simple_loss=0.3486, pruned_loss=0.1072, over 8139.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3646, pruned_loss=0.1271, over 1617788.18 frames. ], batch size: 22, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:28,394 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 3.326e+02 4.176e+02 5.439e+02 1.733e+03, threshold=8.353e+02, percent-clipped=9.0 +2023-02-05 23:18:30,489 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27625.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:18:38,365 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27637.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:18:46,891 INFO [train.py:901] (1/4) Epoch 4, batch 3400, loss[loss=0.3077, simple_loss=0.3481, pruned_loss=0.1336, over 7508.00 frames. ], tot_loss[loss=0.3121, simple_loss=0.366, pruned_loss=0.1291, over 1614411.47 frames. ], batch size: 18, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:18:55,791 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27662.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:19:10,319 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27683.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:21,476 INFO [train.py:901] (1/4) Epoch 4, batch 3450, loss[loss=0.2616, simple_loss=0.3267, pruned_loss=0.09823, over 7812.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.366, pruned_loss=0.1287, over 1613799.15 frames. ], batch size: 20, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:19:26,943 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27708.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:36,065 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.499e+02 3.357e+02 4.072e+02 5.275e+02 9.264e+02, threshold=8.144e+02, percent-clipped=1.0 +2023-02-05 23:19:43,548 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27732.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:19:46,954 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-05 23:19:55,911 INFO [train.py:901] (1/4) Epoch 4, batch 3500, loss[loss=0.3375, simple_loss=0.389, pruned_loss=0.143, over 8617.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.3669, pruned_loss=0.1291, over 1616457.62 frames. ], batch size: 34, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:10,690 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 23:20:21,912 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 23:20:31,120 INFO [train.py:901] (1/4) Epoch 4, batch 3550, loss[loss=0.4197, simple_loss=0.4381, pruned_loss=0.2006, over 6560.00 frames. ], tot_loss[loss=0.3111, simple_loss=0.3657, pruned_loss=0.1283, over 1612593.48 frames. ], batch size: 71, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:46,083 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.262e+02 3.955e+02 5.254e+02 1.114e+03, threshold=7.909e+02, percent-clipped=8.0 +2023-02-05 23:21:05,471 INFO [train.py:901] (1/4) Epoch 4, batch 3600, loss[loss=0.3176, simple_loss=0.3601, pruned_loss=0.1376, over 7775.00 frames. ], tot_loss[loss=0.312, simple_loss=0.366, pruned_loss=0.129, over 1612530.32 frames. ], batch size: 19, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:27,350 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27881.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:39,936 INFO [train.py:901] (1/4) Epoch 4, batch 3650, loss[loss=0.2902, simple_loss=0.3427, pruned_loss=0.1188, over 8026.00 frames. ], tot_loss[loss=0.3115, simple_loss=0.3659, pruned_loss=0.1286, over 1611909.16 frames. ], batch size: 22, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:44,917 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27906.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:56,104 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.998e+02 3.334e+02 3.945e+02 4.811e+02 1.062e+03, threshold=7.891e+02, percent-clipped=4.0 +2023-02-05 23:22:13,494 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:22:14,794 INFO [train.py:901] (1/4) Epoch 4, batch 3700, loss[loss=0.3366, simple_loss=0.3762, pruned_loss=0.1485, over 7812.00 frames. ], tot_loss[loss=0.312, simple_loss=0.366, pruned_loss=0.129, over 1614678.61 frames. ], batch size: 20, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:22:42,571 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3679, 1.9889, 1.5665, 1.4371, 1.7155, 1.7495, 2.2566, 2.2669], + device='cuda:1'), covar=tensor([0.0501, 0.1255, 0.1826, 0.1430, 0.0714, 0.1512, 0.0749, 0.0504], + device='cuda:1'), in_proj_covar=tensor([0.0155, 0.0196, 0.0236, 0.0197, 0.0154, 0.0200, 0.0160, 0.0165], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 23:22:49,607 INFO [train.py:901] (1/4) Epoch 4, batch 3750, loss[loss=0.3115, simple_loss=0.3711, pruned_loss=0.1259, over 8082.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3655, pruned_loss=0.1287, over 1610583.52 frames. ], batch size: 21, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:05,800 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.245e+02 3.553e+02 4.442e+02 6.055e+02 1.985e+03, threshold=8.883e+02, percent-clipped=11.0 +2023-02-05 23:23:20,799 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.17 vs. limit=2.0 +2023-02-05 23:23:25,245 INFO [train.py:901] (1/4) Epoch 4, batch 3800, loss[loss=0.3666, simple_loss=0.4121, pruned_loss=0.1605, over 8452.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3651, pruned_loss=0.1289, over 1605269.99 frames. ], batch size: 27, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:33,493 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3308, 2.1294, 1.5853, 2.0775, 1.7920, 1.3143, 1.4412, 1.8961], + device='cuda:1'), covar=tensor([0.0988, 0.0391, 0.0908, 0.0406, 0.0630, 0.1101, 0.0886, 0.0626], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0246, 0.0309, 0.0304, 0.0332, 0.0319, 0.0341, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:23:38,933 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3190, 1.9002, 3.1073, 2.3560, 2.5612, 1.9587, 1.3769, 1.2094], + device='cuda:1'), covar=tensor([0.1432, 0.1573, 0.0339, 0.0741, 0.0666, 0.0788, 0.0913, 0.1611], + device='cuda:1'), in_proj_covar=tensor([0.0683, 0.0610, 0.0519, 0.0573, 0.0690, 0.0564, 0.0569, 0.0575], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:23:42,794 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28076.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:23:45,521 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8998, 1.5045, 3.3752, 1.3546, 2.2829, 3.8277, 3.5844, 3.2419], + device='cuda:1'), covar=tensor([0.1199, 0.1573, 0.0387, 0.2072, 0.0791, 0.0258, 0.0471, 0.0743], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0266, 0.0218, 0.0264, 0.0218, 0.0194, 0.0201, 0.0274], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:23:47,597 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7622, 1.9304, 2.1118, 0.8413, 2.1514, 1.5209, 0.6127, 1.8523], + device='cuda:1'), covar=tensor([0.0144, 0.0088, 0.0080, 0.0173, 0.0134, 0.0281, 0.0262, 0.0080], + device='cuda:1'), in_proj_covar=tensor([0.0263, 0.0197, 0.0155, 0.0229, 0.0185, 0.0315, 0.0253, 0.0218], + device='cuda:1'), out_proj_covar=tensor([1.0771e-04, 8.0089e-05, 6.0441e-05, 9.0932e-05, 7.6607e-05, 1.3796e-04, + 1.0568e-04, 8.6719e-05], device='cuda:1') +2023-02-05 23:24:00,306 INFO [train.py:901] (1/4) Epoch 4, batch 3850, loss[loss=0.3027, simple_loss=0.3567, pruned_loss=0.1243, over 8109.00 frames. ], tot_loss[loss=0.3119, simple_loss=0.3655, pruned_loss=0.1291, over 1607755.24 frames. ], batch size: 23, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:14,107 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2670, 1.5823, 1.5491, 1.2597, 0.8440, 1.6859, 0.0602, 0.9382], + device='cuda:1'), covar=tensor([0.3544, 0.1783, 0.1595, 0.2456, 0.6155, 0.0856, 0.6003, 0.2525], + device='cuda:1'), in_proj_covar=tensor([0.0124, 0.0110, 0.0083, 0.0163, 0.0193, 0.0080, 0.0147, 0.0119], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:24:15,225 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 3.238e+02 4.124e+02 5.182e+02 9.210e+02, threshold=8.247e+02, percent-clipped=1.0 +2023-02-05 23:24:17,313 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 23:24:27,548 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5534, 2.7546, 1.6782, 2.1378, 2.4069, 1.3712, 1.6463, 2.0548], + device='cuda:1'), covar=tensor([0.1205, 0.0253, 0.0802, 0.0568, 0.0475, 0.1156, 0.1023, 0.0806], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0239, 0.0306, 0.0306, 0.0326, 0.0319, 0.0342, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:24:32,277 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4485, 2.0221, 3.3282, 0.9892, 2.2334, 1.6186, 1.4886, 1.8832], + device='cuda:1'), covar=tensor([0.1477, 0.1535, 0.0607, 0.3039, 0.1415, 0.2389, 0.1364, 0.2192], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0427, 0.0503, 0.0513, 0.0563, 0.0493, 0.0435, 0.0564], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:24:34,698 INFO [train.py:901] (1/4) Epoch 4, batch 3900, loss[loss=0.3446, simple_loss=0.3988, pruned_loss=0.1452, over 8604.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.366, pruned_loss=0.1296, over 1609634.60 frames. ], batch size: 34, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:42,324 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8341, 2.3218, 4.5763, 1.2493, 2.6954, 2.2139, 1.7803, 2.4410], + device='cuda:1'), covar=tensor([0.1239, 0.1470, 0.0616, 0.2723, 0.1465, 0.2027, 0.1243, 0.2225], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0427, 0.0502, 0.0510, 0.0562, 0.0491, 0.0434, 0.0564], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:25:02,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28191.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:25:08,592 INFO [train.py:901] (1/4) Epoch 4, batch 3950, loss[loss=0.3078, simple_loss=0.3635, pruned_loss=0.1261, over 8528.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3641, pruned_loss=0.1276, over 1609090.31 frames. ], batch size: 26, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:24,838 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.357e+02 4.080e+02 5.453e+02 1.389e+03, threshold=8.161e+02, percent-clipped=8.0 +2023-02-05 23:25:41,190 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:25:43,103 INFO [train.py:901] (1/4) Epoch 4, batch 4000, loss[loss=0.273, simple_loss=0.3366, pruned_loss=0.1047, over 6472.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.3628, pruned_loss=0.127, over 1603275.52 frames. ], batch size: 14, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:59,936 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28273.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:26:04,985 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 23:26:17,618 INFO [train.py:901] (1/4) Epoch 4, batch 4050, loss[loss=0.3244, simple_loss=0.3865, pruned_loss=0.1311, over 8334.00 frames. ], tot_loss[loss=0.3071, simple_loss=0.3619, pruned_loss=0.1261, over 1604753.02 frames. ], batch size: 26, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:26:17,834 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8498, 2.2813, 3.0367, 1.2639, 3.0313, 2.0055, 1.6388, 2.0981], + device='cuda:1'), covar=tensor([0.0249, 0.0130, 0.0072, 0.0188, 0.0103, 0.0257, 0.0219, 0.0111], + device='cuda:1'), in_proj_covar=tensor([0.0264, 0.0192, 0.0155, 0.0229, 0.0187, 0.0313, 0.0250, 0.0220], + device='cuda:1'), out_proj_covar=tensor([1.0753e-04, 7.7614e-05, 5.9638e-05, 9.0650e-05, 7.6804e-05, 1.3637e-04, + 1.0344e-04, 8.6985e-05], device='cuda:1') +2023-02-05 23:26:21,157 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:26:24,887 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 23:26:34,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.482e+02 4.201e+02 5.400e+02 1.078e+03, threshold=8.403e+02, percent-clipped=4.0 +2023-02-05 23:26:40,835 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.30 vs. limit=5.0 +2023-02-05 23:26:52,367 INFO [train.py:901] (1/4) Epoch 4, batch 4100, loss[loss=0.2799, simple_loss=0.3551, pruned_loss=0.1024, over 8459.00 frames. ], tot_loss[loss=0.3086, simple_loss=0.3631, pruned_loss=0.127, over 1608904.56 frames. ], batch size: 25, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:26:53,951 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3595, 2.2037, 1.5658, 1.9383, 1.8724, 1.3300, 1.7987, 1.8063], + device='cuda:1'), covar=tensor([0.0985, 0.0330, 0.0796, 0.0420, 0.0592, 0.1013, 0.0711, 0.0563], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0239, 0.0311, 0.0307, 0.0334, 0.0317, 0.0342, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:27:09,280 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.90 vs. limit=2.0 +2023-02-05 23:27:27,345 INFO [train.py:901] (1/4) Epoch 4, batch 4150, loss[loss=0.4314, simple_loss=0.4485, pruned_loss=0.2071, over 6785.00 frames. ], tot_loss[loss=0.3091, simple_loss=0.3636, pruned_loss=0.1273, over 1607622.21 frames. ], batch size: 71, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:27:43,612 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.222e+02 3.372e+02 4.170e+02 5.520e+02 1.384e+03, threshold=8.341e+02, percent-clipped=6.0 +2023-02-05 23:28:00,681 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28447.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:28:02,476 INFO [train.py:901] (1/4) Epoch 4, batch 4200, loss[loss=0.2369, simple_loss=0.2883, pruned_loss=0.09275, over 7270.00 frames. ], tot_loss[loss=0.3086, simple_loss=0.3633, pruned_loss=0.127, over 1604184.64 frames. ], batch size: 16, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:07,668 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 23:28:17,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28472.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:28:29,059 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 23:28:29,825 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7610, 5.8483, 5.1186, 2.3640, 5.2844, 5.3510, 5.4202, 4.5915], + device='cuda:1'), covar=tensor([0.0568, 0.0343, 0.0715, 0.3956, 0.0545, 0.0498, 0.0930, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0373, 0.0269, 0.0301, 0.0394, 0.0297, 0.0243, 0.0289, 0.0225], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:28:36,446 INFO [train.py:901] (1/4) Epoch 4, batch 4250, loss[loss=0.3604, simple_loss=0.4033, pruned_loss=0.1587, over 8526.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3644, pruned_loss=0.1274, over 1608756.74 frames. ], batch size: 49, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:39,187 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28504.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:43,323 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:51,867 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 3.170e+02 4.105e+02 5.662e+02 1.430e+03, threshold=8.210e+02, percent-clipped=9.0 +2023-02-05 23:29:07,173 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6589, 2.4909, 2.9950, 0.9556, 2.9401, 1.8869, 1.3292, 1.7192], + device='cuda:1'), covar=tensor([0.0225, 0.0088, 0.0057, 0.0239, 0.0115, 0.0245, 0.0290, 0.0132], + device='cuda:1'), in_proj_covar=tensor([0.0273, 0.0192, 0.0155, 0.0234, 0.0185, 0.0314, 0.0253, 0.0220], + device='cuda:1'), out_proj_covar=tensor([1.1074e-04, 7.7224e-05, 5.9519e-05, 9.2067e-05, 7.6167e-05, 1.3641e-04, + 1.0452e-04, 8.6385e-05], device='cuda:1') +2023-02-05 23:29:10,384 INFO [train.py:901] (1/4) Epoch 4, batch 4300, loss[loss=0.3391, simple_loss=0.396, pruned_loss=0.1411, over 8617.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3635, pruned_loss=0.1259, over 1611034.71 frames. ], batch size: 34, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:37,893 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2213, 2.2034, 1.5595, 2.0144, 1.7083, 1.3360, 1.7212, 1.8747], + device='cuda:1'), covar=tensor([0.0937, 0.0377, 0.0918, 0.0426, 0.0654, 0.1128, 0.0730, 0.0599], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0238, 0.0307, 0.0311, 0.0326, 0.0310, 0.0342, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:29:38,454 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:29:45,231 INFO [train.py:901] (1/4) Epoch 4, batch 4350, loss[loss=0.2569, simple_loss=0.3227, pruned_loss=0.09555, over 7703.00 frames. ], tot_loss[loss=0.3083, simple_loss=0.3639, pruned_loss=0.1264, over 1612304.09 frames. ], batch size: 18, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:57,578 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28617.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:29:58,771 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 23:30:01,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 3.285e+02 3.917e+02 4.771e+02 1.131e+03, threshold=7.833e+02, percent-clipped=1.0 +2023-02-05 23:30:19,075 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28649.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:30:19,684 INFO [train.py:901] (1/4) Epoch 4, batch 4400, loss[loss=0.2934, simple_loss=0.3528, pruned_loss=0.117, over 8141.00 frames. ], tot_loss[loss=0.3075, simple_loss=0.3634, pruned_loss=0.1257, over 1614169.30 frames. ], batch size: 22, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:30:23,359 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 23:30:36,843 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-05 23:30:41,085 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 23:30:54,265 INFO [train.py:901] (1/4) Epoch 4, batch 4450, loss[loss=0.2962, simple_loss=0.348, pruned_loss=0.1222, over 7924.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3634, pruned_loss=0.1256, over 1617138.17 frames. ], batch size: 20, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:30:58,500 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:31:10,735 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.181e+02 3.229e+02 4.056e+02 4.786e+02 8.259e+02, threshold=8.113e+02, percent-clipped=1.0 +2023-02-05 23:31:17,749 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28732.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:31:21,627 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-05 23:31:30,203 INFO [train.py:901] (1/4) Epoch 4, batch 4500, loss[loss=0.2782, simple_loss=0.3183, pruned_loss=0.119, over 7207.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3619, pruned_loss=0.1246, over 1612717.41 frames. ], batch size: 16, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:31:36,227 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 23:31:39,889 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28764.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:05,460 INFO [train.py:901] (1/4) Epoch 4, batch 4550, loss[loss=0.2694, simple_loss=0.326, pruned_loss=0.1064, over 7451.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3621, pruned_loss=0.1246, over 1616722.88 frames. ], batch size: 17, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:21,345 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.133e+02 4.046e+02 5.517e+02 1.256e+03, threshold=8.093e+02, percent-clipped=3.0 +2023-02-05 23:32:39,631 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:40,904 INFO [train.py:901] (1/4) Epoch 4, batch 4600, loss[loss=0.2758, simple_loss=0.334, pruned_loss=0.1088, over 8069.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3619, pruned_loss=0.1247, over 1613421.59 frames. ], batch size: 21, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:43,600 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28854.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:00,433 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28879.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:13,243 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.25 vs. limit=5.0 +2023-02-05 23:33:14,780 INFO [train.py:901] (1/4) Epoch 4, batch 4650, loss[loss=0.2413, simple_loss=0.3084, pruned_loss=0.08714, over 7815.00 frames. ], tot_loss[loss=0.3081, simple_loss=0.3639, pruned_loss=0.1262, over 1618922.10 frames. ], batch size: 20, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:21,519 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3832, 1.4527, 1.6023, 1.3086, 0.8651, 1.6013, 0.1205, 1.1211], + device='cuda:1'), covar=tensor([0.3469, 0.2573, 0.1182, 0.1987, 0.6998, 0.1092, 0.5756, 0.2382], + device='cuda:1'), in_proj_covar=tensor([0.0123, 0.0111, 0.0080, 0.0154, 0.0198, 0.0083, 0.0142, 0.0117], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:33:30,676 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 3.425e+02 4.570e+02 5.631e+02 1.457e+03, threshold=9.141e+02, percent-clipped=7.0 +2023-02-05 23:33:49,339 INFO [train.py:901] (1/4) Epoch 4, batch 4700, loss[loss=0.2828, simple_loss=0.3499, pruned_loss=0.1078, over 8464.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3641, pruned_loss=0.1269, over 1616907.01 frames. ], batch size: 25, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:54,997 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6601, 4.6250, 4.0819, 1.7971, 4.0229, 4.1516, 4.2848, 3.8443], + device='cuda:1'), covar=tensor([0.0748, 0.0644, 0.0843, 0.4762, 0.0748, 0.0821, 0.1212, 0.0633], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0263, 0.0291, 0.0382, 0.0291, 0.0238, 0.0281, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:33:58,473 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:59,162 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28963.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:03,894 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:15,878 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:16,552 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28988.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:34:24,389 INFO [train.py:901] (1/4) Epoch 4, batch 4750, loss[loss=0.2533, simple_loss=0.3073, pruned_loss=0.09963, over 7927.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.3638, pruned_loss=0.1265, over 1618845.30 frames. ], batch size: 20, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:34:33,279 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29013.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:34:38,666 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:38,747 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:40,432 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.078e+02 3.145e+02 3.754e+02 5.040e+02 8.107e+02, threshold=7.508e+02, percent-clipped=0.0 +2023-02-05 23:34:40,462 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 23:34:42,472 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 23:34:43,230 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6818, 5.6440, 5.0877, 1.6065, 5.0433, 5.2272, 5.2289, 4.5944], + device='cuda:1'), covar=tensor([0.0650, 0.0372, 0.0757, 0.4984, 0.0666, 0.0625, 0.1011, 0.0644], + device='cuda:1'), in_proj_covar=tensor([0.0370, 0.0267, 0.0296, 0.0380, 0.0293, 0.0240, 0.0286, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:34:56,221 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:59,307 INFO [train.py:901] (1/4) Epoch 4, batch 4800, loss[loss=0.251, simple_loss=0.3161, pruned_loss=0.09298, over 7637.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3644, pruned_loss=0.1267, over 1622517.87 frames. ], batch size: 19, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,007 INFO [train.py:901] (1/4) Epoch 4, batch 4850, loss[loss=0.4041, simple_loss=0.4254, pruned_loss=0.1914, over 8271.00 frames. ], tot_loss[loss=0.3107, simple_loss=0.3655, pruned_loss=0.1279, over 1622433.00 frames. ], batch size: 48, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,044 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 23:35:39,703 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6170, 2.8436, 1.8681, 2.3092, 2.2514, 1.3921, 1.7796, 2.2933], + device='cuda:1'), covar=tensor([0.1231, 0.0356, 0.0897, 0.0597, 0.0604, 0.1271, 0.1043, 0.0738], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0237, 0.0302, 0.0300, 0.0326, 0.0311, 0.0338, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:35:49,596 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.099e+02 3.374e+02 4.405e+02 6.016e+02 1.134e+03, threshold=8.810e+02, percent-clipped=7.0 +2023-02-05 23:36:08,349 INFO [train.py:901] (1/4) Epoch 4, batch 4900, loss[loss=0.2493, simple_loss=0.3071, pruned_loss=0.09576, over 7646.00 frames. ], tot_loss[loss=0.3095, simple_loss=0.3648, pruned_loss=0.1271, over 1622830.36 frames. ], batch size: 19, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:41,949 INFO [train.py:901] (1/4) Epoch 4, batch 4950, loss[loss=0.3152, simple_loss=0.3728, pruned_loss=0.1287, over 8571.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.362, pruned_loss=0.1254, over 1620549.56 frames. ], batch size: 39, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:46,762 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2684, 4.0777, 3.7605, 1.4820, 3.7784, 3.5827, 3.8963, 3.4486], + device='cuda:1'), covar=tensor([0.0876, 0.0673, 0.1091, 0.4939, 0.0827, 0.0766, 0.1143, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0376, 0.0277, 0.0300, 0.0387, 0.0300, 0.0246, 0.0293, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-05 23:36:56,268 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:36:58,766 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.208e+02 3.912e+02 5.596e+02 9.849e+02, threshold=7.824e+02, percent-clipped=2.0 +2023-02-05 23:36:58,865 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:00,341 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:01,726 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0403, 1.0464, 0.9419, 0.9823, 0.7208, 1.1481, 0.0368, 0.8566], + device='cuda:1'), covar=tensor([0.3270, 0.2010, 0.1168, 0.1837, 0.5015, 0.0931, 0.4705, 0.1967], + device='cuda:1'), in_proj_covar=tensor([0.0124, 0.0107, 0.0080, 0.0151, 0.0191, 0.0082, 0.0136, 0.0116], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:37:12,969 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29244.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:16,643 INFO [train.py:901] (1/4) Epoch 4, batch 5000, loss[loss=0.2786, simple_loss=0.3327, pruned_loss=0.1122, over 7942.00 frames. ], tot_loss[loss=0.3058, simple_loss=0.3615, pruned_loss=0.125, over 1618926.33 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:37:16,867 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29250.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:19,511 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29254.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:34,853 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6190, 2.6963, 2.7269, 2.1786, 1.4249, 3.0019, 0.5178, 1.8716], + device='cuda:1'), covar=tensor([0.4669, 0.1546, 0.1218, 0.3599, 0.6591, 0.1046, 0.6742, 0.2552], + device='cuda:1'), in_proj_covar=tensor([0.0126, 0.0109, 0.0083, 0.0156, 0.0192, 0.0082, 0.0140, 0.0118], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:37:51,640 INFO [train.py:901] (1/4) Epoch 4, batch 5050, loss[loss=0.2406, simple_loss=0.2999, pruned_loss=0.09064, over 7928.00 frames. ], tot_loss[loss=0.3064, simple_loss=0.3618, pruned_loss=0.1255, over 1616974.35 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:07,701 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 3.436e+02 4.072e+02 5.001e+02 1.022e+03, threshold=8.144e+02, percent-clipped=3.0 +2023-02-05 23:38:14,944 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 23:38:18,449 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29338.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:26,644 INFO [train.py:901] (1/4) Epoch 4, batch 5100, loss[loss=0.2402, simple_loss=0.304, pruned_loss=0.08816, over 7797.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.3606, pruned_loss=0.1246, over 1618194.58 frames. ], batch size: 19, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:36,222 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29364.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:39:00,484 INFO [train.py:901] (1/4) Epoch 4, batch 5150, loss[loss=0.2447, simple_loss=0.3157, pruned_loss=0.0869, over 7648.00 frames. ], tot_loss[loss=0.303, simple_loss=0.3595, pruned_loss=0.1232, over 1614021.88 frames. ], batch size: 19, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:13,945 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2554, 1.4675, 2.3089, 1.0255, 1.5803, 1.4278, 1.2549, 1.4567], + device='cuda:1'), covar=tensor([0.1413, 0.1504, 0.0577, 0.2742, 0.1121, 0.2191, 0.1364, 0.1455], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0433, 0.0513, 0.0517, 0.0559, 0.0501, 0.0440, 0.0571], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:39:16,241 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.145e+02 3.888e+02 4.871e+02 1.199e+03, threshold=7.777e+02, percent-clipped=1.0 +2023-02-05 23:39:35,364 INFO [train.py:901] (1/4) Epoch 4, batch 5200, loss[loss=0.311, simple_loss=0.3789, pruned_loss=0.1216, over 8494.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3594, pruned_loss=0.1234, over 1609442.26 frames. ], batch size: 28, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:37,871 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 23:39:54,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29479.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:40:09,486 INFO [train.py:901] (1/4) Epoch 4, batch 5250, loss[loss=0.2881, simple_loss=0.3551, pruned_loss=0.1106, over 8244.00 frames. ], tot_loss[loss=0.302, simple_loss=0.359, pruned_loss=0.1225, over 1611120.01 frames. ], batch size: 22, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:12,194 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 23:40:25,985 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.346e+02 3.507e+02 4.371e+02 5.555e+02 1.318e+03, threshold=8.742e+02, percent-clipped=11.0 +2023-02-05 23:40:43,420 INFO [train.py:901] (1/4) Epoch 4, batch 5300, loss[loss=0.308, simple_loss=0.3516, pruned_loss=0.1322, over 7814.00 frames. ], tot_loss[loss=0.3025, simple_loss=0.3591, pruned_loss=0.123, over 1611375.42 frames. ], batch size: 20, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:57,665 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29569.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:14,734 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29594.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:16,736 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5962, 1.1426, 1.2669, 1.0105, 1.1641, 1.1317, 1.3215, 1.2150], + device='cuda:1'), covar=tensor([0.0675, 0.1462, 0.2009, 0.1605, 0.0651, 0.1798, 0.0794, 0.0669], + device='cuda:1'), in_proj_covar=tensor([0.0152, 0.0195, 0.0230, 0.0195, 0.0152, 0.0199, 0.0161, 0.0162], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 23:41:17,286 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29598.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:18,614 INFO [train.py:901] (1/4) Epoch 4, batch 5350, loss[loss=0.2933, simple_loss=0.3609, pruned_loss=0.1128, over 8136.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3596, pruned_loss=0.1233, over 1611334.57 frames. ], batch size: 22, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:41:32,358 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29619.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:35,507 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 3.127e+02 4.006e+02 4.952e+02 2.682e+03, threshold=8.012e+02, percent-clipped=7.0 +2023-02-05 23:41:53,621 INFO [train.py:901] (1/4) Epoch 4, batch 5400, loss[loss=0.2702, simple_loss=0.3246, pruned_loss=0.1079, over 7529.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3596, pruned_loss=0.1236, over 1609569.42 frames. ], batch size: 18, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:14,529 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:28,600 INFO [train.py:901] (1/4) Epoch 4, batch 5450, loss[loss=0.2656, simple_loss=0.3301, pruned_loss=0.1005, over 8142.00 frames. ], tot_loss[loss=0.3045, simple_loss=0.3606, pruned_loss=0.1242, over 1606683.64 frames. ], batch size: 22, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:37,302 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29713.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:44,948 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.180e+02 3.089e+02 4.007e+02 5.016e+02 9.074e+02, threshold=8.014e+02, percent-clipped=4.0 +2023-02-05 23:42:52,684 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29735.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:57,993 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 23:43:02,828 INFO [train.py:901] (1/4) Epoch 4, batch 5500, loss[loss=0.3171, simple_loss=0.3715, pruned_loss=0.1313, over 8294.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3607, pruned_loss=0.1237, over 1604936.10 frames. ], batch size: 23, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:09,623 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2757, 1.4536, 1.1724, 1.9290, 0.8966, 1.1536, 1.2643, 1.5154], + device='cuda:1'), covar=tensor([0.1404, 0.1257, 0.2034, 0.0774, 0.1623, 0.2303, 0.1351, 0.1070], + device='cuda:1'), in_proj_covar=tensor([0.0274, 0.0282, 0.0297, 0.0221, 0.0264, 0.0292, 0.0302, 0.0274], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 23:43:10,332 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29760.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:23,276 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-05 23:43:38,374 INFO [train.py:901] (1/4) Epoch 4, batch 5550, loss[loss=0.3431, simple_loss=0.3902, pruned_loss=0.1481, over 8337.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3615, pruned_loss=0.1245, over 1605613.15 frames. ], batch size: 25, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:51,763 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:54,237 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.211e+02 3.931e+02 4.808e+02 9.688e+02, threshold=7.861e+02, percent-clipped=2.0 +2023-02-05 23:44:12,162 INFO [train.py:901] (1/4) Epoch 4, batch 5600, loss[loss=0.3268, simple_loss=0.3846, pruned_loss=0.1345, over 8329.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3604, pruned_loss=0.1231, over 1608915.42 frames. ], batch size: 25, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:46,063 INFO [train.py:901] (1/4) Epoch 4, batch 5650, loss[loss=0.3325, simple_loss=0.3751, pruned_loss=0.1449, over 8134.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3618, pruned_loss=0.1245, over 1612909.76 frames. ], batch size: 22, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:55,378 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29913.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:03,290 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.236e+02 4.025e+02 5.119e+02 8.732e+02, threshold=8.050e+02, percent-clipped=2.0 +2023-02-05 23:45:03,322 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 23:45:20,777 INFO [train.py:901] (1/4) Epoch 4, batch 5700, loss[loss=0.2502, simple_loss=0.3368, pruned_loss=0.0818, over 8337.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.3621, pruned_loss=0.1245, over 1614481.20 frames. ], batch size: 25, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:45:34,475 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:41,359 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-05 23:45:48,656 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8634, 2.2484, 1.7073, 2.5522, 1.2151, 1.3006, 1.7539, 2.3459], + device='cuda:1'), covar=tensor([0.1159, 0.1133, 0.1528, 0.0540, 0.1659, 0.2254, 0.1539, 0.1027], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0287, 0.0301, 0.0224, 0.0264, 0.0296, 0.0310, 0.0276], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-05 23:45:52,052 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29994.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:55,961 INFO [train.py:901] (1/4) Epoch 4, batch 5750, loss[loss=0.3212, simple_loss=0.3869, pruned_loss=0.1277, over 8512.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3612, pruned_loss=0.124, over 1616416.33 frames. ], batch size: 26, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:46:07,174 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 23:46:13,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.352e+02 3.278e+02 4.024e+02 4.787e+02 1.009e+03, threshold=8.047e+02, percent-clipped=4.0 +2023-02-05 23:46:13,350 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:17,493 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30028.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:32,295 INFO [train.py:901] (1/4) Epoch 4, batch 5800, loss[loss=0.3638, simple_loss=0.3941, pruned_loss=0.1667, over 6946.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3619, pruned_loss=0.125, over 1615469.88 frames. ], batch size: 71, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:47:06,538 INFO [train.py:901] (1/4) Epoch 4, batch 5850, loss[loss=0.3304, simple_loss=0.3806, pruned_loss=0.1401, over 8143.00 frames. ], tot_loss[loss=0.3074, simple_loss=0.3629, pruned_loss=0.126, over 1614115.28 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:23,103 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 3.427e+02 4.657e+02 5.932e+02 9.223e+02, threshold=9.314e+02, percent-clipped=4.0 +2023-02-05 23:47:30,861 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0568, 2.2540, 4.1156, 3.1132, 3.1298, 2.3649, 1.6373, 1.6911], + device='cuda:1'), covar=tensor([0.1338, 0.1934, 0.0348, 0.0827, 0.0848, 0.0740, 0.0886, 0.1791], + device='cuda:1'), in_proj_covar=tensor([0.0687, 0.0615, 0.0522, 0.0589, 0.0701, 0.0576, 0.0559, 0.0574], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-05 23:47:33,291 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30139.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:35,877 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:41,683 INFO [train.py:901] (1/4) Epoch 4, batch 5900, loss[loss=0.274, simple_loss=0.3481, pruned_loss=0.09995, over 8246.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3624, pruned_loss=0.1258, over 1613022.04 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:51,359 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:15,969 INFO [train.py:901] (1/4) Epoch 4, batch 5950, loss[loss=0.2981, simple_loss=0.3522, pruned_loss=0.122, over 8235.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3623, pruned_loss=0.1261, over 1611976.97 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:32,439 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.869e+02 3.143e+02 3.968e+02 4.977e+02 1.070e+03, threshold=7.937e+02, percent-clipped=1.0 +2023-02-05 23:48:35,345 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30227.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:50,186 INFO [train.py:901] (1/4) Epoch 4, batch 6000, loss[loss=0.3083, simple_loss=0.3693, pruned_loss=0.1237, over 8612.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3625, pruned_loss=0.1264, over 1613370.28 frames. ], batch size: 31, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:50,186 INFO [train.py:926] (1/4) Computing validation loss +2023-02-05 23:49:02,859 INFO [train.py:935] (1/4) Epoch 4, validation: loss=0.2338, simple_loss=0.3275, pruned_loss=0.07005, over 944034.00 frames. +2023-02-05 23:49:02,860 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-05 23:49:22,554 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:25,895 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:37,706 INFO [train.py:901] (1/4) Epoch 4, batch 6050, loss[loss=0.2916, simple_loss=0.3571, pruned_loss=0.113, over 8288.00 frames. ], tot_loss[loss=0.3072, simple_loss=0.3622, pruned_loss=0.126, over 1609751.30 frames. ], batch size: 23, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:49:44,061 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30309.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:53,935 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 3.338e+02 3.992e+02 4.649e+02 1.183e+03, threshold=7.984e+02, percent-clipped=3.0 +2023-02-05 23:50:12,470 INFO [train.py:901] (1/4) Epoch 4, batch 6100, loss[loss=0.3137, simple_loss=0.3609, pruned_loss=0.1333, over 7800.00 frames. ], tot_loss[loss=0.3067, simple_loss=0.3619, pruned_loss=0.1257, over 1610271.07 frames. ], batch size: 19, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:32,433 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30378.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:39,279 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 23:50:44,138 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30395.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:47,304 INFO [train.py:901] (1/4) Epoch 4, batch 6150, loss[loss=0.381, simple_loss=0.4265, pruned_loss=0.1678, over 8472.00 frames. ], tot_loss[loss=0.3065, simple_loss=0.3618, pruned_loss=0.1256, over 1613012.35 frames. ], batch size: 29, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:51,617 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6703, 2.3594, 3.0513, 0.7183, 3.0465, 1.8167, 1.3302, 1.5327], + device='cuda:1'), covar=tensor([0.0342, 0.0109, 0.0079, 0.0291, 0.0163, 0.0310, 0.0361, 0.0180], + device='cuda:1'), in_proj_covar=tensor([0.0277, 0.0196, 0.0163, 0.0241, 0.0190, 0.0323, 0.0259, 0.0223], + device='cuda:1'), out_proj_covar=tensor([1.0953e-04, 7.6379e-05, 6.0923e-05, 9.1732e-05, 7.5420e-05, 1.3652e-04, + 1.0317e-04, 8.6125e-05], device='cuda:1') +2023-02-05 23:50:57,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2312, 1.8633, 1.2125, 1.6262, 1.4752, 1.1253, 1.3988, 1.5586], + device='cuda:1'), covar=tensor([0.0698, 0.0237, 0.0663, 0.0349, 0.0439, 0.0762, 0.0574, 0.0486], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0235, 0.0307, 0.0304, 0.0332, 0.0314, 0.0337, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:51:02,475 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30420.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:05,069 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.511e+02 4.267e+02 5.249e+02 1.089e+03, threshold=8.535e+02, percent-clipped=6.0 +2023-02-05 23:51:23,145 INFO [train.py:901] (1/4) Epoch 4, batch 6200, loss[loss=0.2899, simple_loss=0.3593, pruned_loss=0.1102, over 8247.00 frames. ], tot_loss[loss=0.3076, simple_loss=0.3628, pruned_loss=0.1262, over 1615923.78 frames. ], batch size: 24, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:51:36,029 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6722, 2.8513, 1.6301, 2.0854, 2.1350, 1.4360, 1.7640, 2.0439], + device='cuda:1'), covar=tensor([0.1373, 0.0291, 0.1012, 0.0769, 0.0750, 0.1323, 0.1108, 0.0897], + device='cuda:1'), in_proj_covar=tensor([0.0364, 0.0243, 0.0313, 0.0316, 0.0339, 0.0320, 0.0342, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-05 23:51:48,270 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30487.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:57,470 INFO [train.py:901] (1/4) Epoch 4, batch 6250, loss[loss=0.2639, simple_loss=0.3364, pruned_loss=0.09575, over 8094.00 frames. ], tot_loss[loss=0.309, simple_loss=0.3638, pruned_loss=0.1271, over 1617662.24 frames. ], batch size: 23, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:14,465 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 3.291e+02 3.933e+02 5.014e+02 1.132e+03, threshold=7.866e+02, percent-clipped=4.0 +2023-02-05 23:52:22,914 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:24,418 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 23:52:32,705 INFO [train.py:901] (1/4) Epoch 4, batch 6300, loss[loss=0.3623, simple_loss=0.4106, pruned_loss=0.157, over 8352.00 frames. ], tot_loss[loss=0.3085, simple_loss=0.3637, pruned_loss=0.1267, over 1614457.29 frames. ], batch size: 26, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:39,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:47,316 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30571.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:06,769 INFO [train.py:901] (1/4) Epoch 4, batch 6350, loss[loss=0.3402, simple_loss=0.4067, pruned_loss=0.1368, over 8501.00 frames. ], tot_loss[loss=0.3089, simple_loss=0.364, pruned_loss=0.1269, over 1615366.66 frames. ], batch size: 28, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:08,358 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:23,787 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.165e+02 3.849e+02 5.077e+02 1.430e+03, threshold=7.697e+02, percent-clipped=4.0 +2023-02-05 23:53:30,819 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.08 vs. limit=2.0 +2023-02-05 23:53:42,468 INFO [train.py:901] (1/4) Epoch 4, batch 6400, loss[loss=0.2566, simple_loss=0.324, pruned_loss=0.09463, over 8073.00 frames. ], tot_loss[loss=0.306, simple_loss=0.3616, pruned_loss=0.1252, over 1612130.76 frames. ], batch size: 21, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:43,310 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5506, 2.4717, 4.6761, 1.2283, 3.1249, 2.2409, 1.7768, 2.4221], + device='cuda:1'), covar=tensor([0.1418, 0.1415, 0.0527, 0.2802, 0.1260, 0.2008, 0.1281, 0.2167], + device='cuda:1'), in_proj_covar=tensor([0.0457, 0.0427, 0.0509, 0.0518, 0.0564, 0.0499, 0.0440, 0.0576], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:53:56,547 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 23:53:58,063 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30673.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:07,683 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30686.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:14,455 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9933, 2.3107, 3.7763, 1.5059, 2.8168, 2.3012, 2.0101, 2.5083], + device='cuda:1'), covar=tensor([0.0987, 0.1363, 0.0437, 0.2291, 0.0979, 0.1530, 0.0994, 0.1692], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0424, 0.0501, 0.0510, 0.0555, 0.0493, 0.0433, 0.0566], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:54:16,882 INFO [train.py:901] (1/4) Epoch 4, batch 6450, loss[loss=0.2964, simple_loss=0.359, pruned_loss=0.1169, over 8652.00 frames. ], tot_loss[loss=0.304, simple_loss=0.361, pruned_loss=0.1235, over 1615018.22 frames. ], batch size: 34, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:54:31,539 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30722.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:32,828 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.022e+02 3.987e+02 5.645e+02 1.412e+03, threshold=7.975e+02, percent-clipped=10.0 +2023-02-05 23:54:50,958 INFO [train.py:901] (1/4) Epoch 4, batch 6500, loss[loss=0.2942, simple_loss=0.3627, pruned_loss=0.1128, over 8518.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3604, pruned_loss=0.1234, over 1616164.32 frames. ], batch size: 28, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:55:26,192 INFO [train.py:901] (1/4) Epoch 4, batch 6550, loss[loss=0.2442, simple_loss=0.306, pruned_loss=0.09118, over 6384.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3605, pruned_loss=0.1225, over 1615137.17 frames. ], batch size: 14, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:55:42,638 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.539e+02 4.251e+02 5.114e+02 1.135e+03, threshold=8.501e+02, percent-clipped=1.0 +2023-02-05 23:55:50,034 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 23:55:51,488 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30837.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:00,640 INFO [train.py:901] (1/4) Epoch 4, batch 6600, loss[loss=0.2349, simple_loss=0.2977, pruned_loss=0.08607, over 7695.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3626, pruned_loss=0.1237, over 1618907.84 frames. ], batch size: 18, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:06,240 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30858.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:08,699 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:56:24,234 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30883.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:35,343 INFO [train.py:901] (1/4) Epoch 4, batch 6650, loss[loss=0.321, simple_loss=0.3633, pruned_loss=0.1394, over 6885.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3624, pruned_loss=0.124, over 1621958.85 frames. ], batch size: 71, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:50,095 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:51,878 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 3.362e+02 4.352e+02 5.461e+02 1.446e+03, threshold=8.703e+02, percent-clipped=3.0 +2023-02-05 23:57:01,675 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-05 23:57:04,055 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30942.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:04,665 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30943.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:09,171 INFO [train.py:901] (1/4) Epoch 4, batch 6700, loss[loss=0.3273, simple_loss=0.3461, pruned_loss=0.1542, over 7692.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.3621, pruned_loss=0.1245, over 1618667.31 frames. ], batch size: 18, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:10,676 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30952.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:21,616 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:45,057 INFO [train.py:901] (1/4) Epoch 4, batch 6750, loss[loss=0.2957, simple_loss=0.3602, pruned_loss=0.1156, over 8549.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3606, pruned_loss=0.1232, over 1620828.68 frames. ], batch size: 31, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:56,392 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31017.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:00,825 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.225e+02 3.317e+02 4.136e+02 5.252e+02 1.678e+03, threshold=8.272e+02, percent-clipped=4.0 +2023-02-05 23:58:04,859 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31029.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:58:07,647 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3994, 1.6910, 2.3870, 1.2210, 1.8533, 1.6427, 1.4252, 1.5281], + device='cuda:1'), covar=tensor([0.1341, 0.1493, 0.0583, 0.2565, 0.0986, 0.2032, 0.1293, 0.1399], + device='cuda:1'), in_proj_covar=tensor([0.0460, 0.0431, 0.0520, 0.0526, 0.0569, 0.0508, 0.0448, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-05 23:58:15,267 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.21 vs. limit=5.0 +2023-02-05 23:58:18,941 INFO [train.py:901] (1/4) Epoch 4, batch 6800, loss[loss=0.2853, simple_loss=0.3478, pruned_loss=0.1114, over 8092.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.3612, pruned_loss=0.1237, over 1615336.51 frames. ], batch size: 21, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:58:19,604 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 23:58:48,702 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31093.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:53,203 INFO [train.py:901] (1/4) Epoch 4, batch 6850, loss[loss=0.3699, simple_loss=0.408, pruned_loss=0.1659, over 8517.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3595, pruned_loss=0.1231, over 1610408.27 frames. ], batch size: 49, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:59:06,887 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:10,004 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 23:59:10,569 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.178e+02 3.797e+02 5.313e+02 1.260e+03, threshold=7.594e+02, percent-clipped=4.0 +2023-02-05 23:59:16,196 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31132.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:28,759 INFO [train.py:901] (1/4) Epoch 4, batch 6900, loss[loss=0.2881, simple_loss=0.363, pruned_loss=0.1066, over 8324.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.3589, pruned_loss=0.1223, over 1614976.07 frames. ], batch size: 25, lr: 1.73e-02, grad_scale: 8.0 +2023-02-05 23:59:28,928 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5798, 1.7140, 3.2544, 1.2411, 2.2371, 3.5235, 3.2663, 2.9823], + device='cuda:1'), covar=tensor([0.1262, 0.1331, 0.0348, 0.2152, 0.0768, 0.0262, 0.0392, 0.0616], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0257, 0.0214, 0.0259, 0.0211, 0.0194, 0.0204, 0.0260], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:00:03,318 INFO [train.py:901] (1/4) Epoch 4, batch 6950, loss[loss=0.3282, simple_loss=0.3862, pruned_loss=0.1351, over 8479.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3598, pruned_loss=0.123, over 1614676.89 frames. ], batch size: 25, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:06,208 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2760, 1.5842, 1.4780, 1.2231, 1.6526, 1.4631, 1.7966, 1.7889], + device='cuda:1'), covar=tensor([0.0644, 0.1345, 0.2065, 0.1669, 0.0622, 0.1690, 0.0804, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0151, 0.0193, 0.0235, 0.0195, 0.0148, 0.0198, 0.0160, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:00:18,141 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 00:00:20,074 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 3.425e+02 4.122e+02 5.302e+02 9.579e+02, threshold=8.244e+02, percent-clipped=6.0 +2023-02-06 00:00:38,131 INFO [train.py:901] (1/4) Epoch 4, batch 7000, loss[loss=0.3544, simple_loss=0.3996, pruned_loss=0.1546, over 8477.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3608, pruned_loss=0.1246, over 1609553.26 frames. ], batch size: 27, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:48,755 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:00:54,169 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8741, 1.3721, 5.8954, 2.0537, 5.2701, 4.9628, 5.4987, 5.4624], + device='cuda:1'), covar=tensor([0.0457, 0.3494, 0.0225, 0.2112, 0.0880, 0.0518, 0.0387, 0.0381], + device='cuda:1'), in_proj_covar=tensor([0.0302, 0.0465, 0.0367, 0.0386, 0.0454, 0.0377, 0.0374, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:01:03,270 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:09,178 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:11,662 INFO [train.py:901] (1/4) Epoch 4, batch 7050, loss[loss=0.2332, simple_loss=0.2947, pruned_loss=0.08582, over 7699.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3599, pruned_loss=0.1237, over 1612167.08 frames. ], batch size: 18, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:28,375 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 3.221e+02 3.873e+02 5.326e+02 1.178e+03, threshold=7.746e+02, percent-clipped=8.0 +2023-02-06 00:01:47,442 INFO [train.py:901] (1/4) Epoch 4, batch 7100, loss[loss=0.3002, simple_loss=0.3464, pruned_loss=0.127, over 7430.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3599, pruned_loss=0.1236, over 1605698.63 frames. ], batch size: 17, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:59,550 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2070, 1.5242, 2.2051, 1.0230, 1.6754, 1.3479, 1.2769, 1.4275], + device='cuda:1'), covar=tensor([0.1220, 0.1385, 0.0480, 0.2461, 0.1056, 0.1844, 0.1205, 0.1552], + device='cuda:1'), in_proj_covar=tensor([0.0470, 0.0436, 0.0520, 0.0527, 0.0572, 0.0516, 0.0449, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:02:02,610 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31373.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:02:08,041 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:13,376 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:21,056 INFO [train.py:901] (1/4) Epoch 4, batch 7150, loss[loss=0.3437, simple_loss=0.3878, pruned_loss=0.1498, over 8185.00 frames. ], tot_loss[loss=0.306, simple_loss=0.3619, pruned_loss=0.1251, over 1609898.76 frames. ], batch size: 23, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:02:22,714 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:28,563 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:29,914 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:37,019 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.187e+02 3.955e+02 5.000e+02 8.847e+02, threshold=7.910e+02, percent-clipped=2.0 +2023-02-06 00:02:37,260 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4586, 2.0343, 2.2779, 0.5564, 2.2600, 1.5990, 0.5688, 1.8010], + device='cuda:1'), covar=tensor([0.0182, 0.0076, 0.0081, 0.0204, 0.0143, 0.0257, 0.0281, 0.0086], + device='cuda:1'), in_proj_covar=tensor([0.0286, 0.0204, 0.0166, 0.0247, 0.0201, 0.0337, 0.0274, 0.0236], + device='cuda:1'), out_proj_covar=tensor([1.1113e-04, 7.8551e-05, 6.1639e-05, 9.2134e-05, 7.8388e-05, 1.4029e-04, + 1.0728e-04, 9.0225e-05], device='cuda:1') +2023-02-06 00:02:39,202 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:56,011 INFO [train.py:901] (1/4) Epoch 4, batch 7200, loss[loss=0.275, simple_loss=0.3254, pruned_loss=0.1123, over 7407.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3602, pruned_loss=0.1235, over 1613540.21 frames. ], batch size: 17, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:03:02,368 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5838, 1.8819, 3.1648, 1.1062, 2.3824, 1.7920, 1.5810, 1.8661], + device='cuda:1'), covar=tensor([0.1465, 0.1778, 0.0590, 0.3038, 0.1240, 0.2326, 0.1328, 0.2158], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0427, 0.0508, 0.0513, 0.0556, 0.0503, 0.0437, 0.0572], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:03:04,340 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8130, 2.2729, 1.9256, 2.8869, 1.5231, 1.4547, 1.9841, 2.6386], + device='cuda:1'), covar=tensor([0.1216, 0.1176, 0.1464, 0.0577, 0.1711, 0.2386, 0.1584, 0.0742], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0278, 0.0293, 0.0218, 0.0261, 0.0292, 0.0292, 0.0267], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:03:22,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31488.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:03:23,586 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9124, 1.2604, 4.4279, 2.0301, 2.4038, 4.9675, 4.7437, 4.5382], + device='cuda:1'), covar=tensor([0.1305, 0.1697, 0.0279, 0.1815, 0.0847, 0.0219, 0.0324, 0.0520], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0265, 0.0218, 0.0265, 0.0222, 0.0198, 0.0210, 0.0270], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:03:30,969 INFO [train.py:901] (1/4) Epoch 4, batch 7250, loss[loss=0.2421, simple_loss=0.3197, pruned_loss=0.08227, over 7653.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3587, pruned_loss=0.1222, over 1615225.92 frames. ], batch size: 19, lr: 1.73e-02, grad_scale: 16.0 +2023-02-06 00:03:37,362 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31509.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:03:47,340 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.150e+02 3.858e+02 4.938e+02 9.845e+02, threshold=7.715e+02, percent-clipped=4.0 +2023-02-06 00:03:47,838 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 00:04:05,056 INFO [train.py:901] (1/4) Epoch 4, batch 7300, loss[loss=0.2595, simple_loss=0.3392, pruned_loss=0.08993, over 7931.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3576, pruned_loss=0.1217, over 1606338.15 frames. ], batch size: 20, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:37,950 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6472, 2.1058, 3.6331, 1.1240, 2.2964, 1.7772, 1.7533, 1.9294], + device='cuda:1'), covar=tensor([0.1708, 0.1981, 0.0688, 0.3406, 0.1697, 0.2559, 0.1556, 0.2513], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0434, 0.0507, 0.0515, 0.0555, 0.0506, 0.0442, 0.0577], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:04:40,425 INFO [train.py:901] (1/4) Epoch 4, batch 7350, loss[loss=0.3338, simple_loss=0.3925, pruned_loss=0.1375, over 8361.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3593, pruned_loss=0.1228, over 1602426.11 frames. ], batch size: 24, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:57,284 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.182e+02 2.774e+02 3.613e+02 4.483e+02 1.102e+03, threshold=7.227e+02, percent-clipped=2.0 +2023-02-06 00:04:59,973 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 00:05:05,401 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:14,083 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3019, 1.5192, 1.4914, 1.0829, 1.5942, 1.4058, 1.7663, 1.5798], + device='cuda:1'), covar=tensor([0.0606, 0.1225, 0.1874, 0.1585, 0.0609, 0.1550, 0.0776, 0.0616], + device='cuda:1'), in_proj_covar=tensor([0.0146, 0.0191, 0.0233, 0.0193, 0.0146, 0.0196, 0.0157, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:05:14,564 INFO [train.py:901] (1/4) Epoch 4, batch 7400, loss[loss=0.2945, simple_loss=0.359, pruned_loss=0.115, over 8131.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3592, pruned_loss=0.1228, over 1602753.56 frames. ], batch size: 22, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:19,266 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 00:05:20,112 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:21,983 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:26,692 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:37,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:43,494 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:48,576 INFO [train.py:901] (1/4) Epoch 4, batch 7450, loss[loss=0.2754, simple_loss=0.3495, pruned_loss=0.1006, over 8244.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3606, pruned_loss=0.1237, over 1608394.27 frames. ], batch size: 24, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:58,011 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 00:06:04,225 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:05,435 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.933e+02 3.216e+02 3.933e+02 5.503e+02 1.387e+03, threshold=7.866e+02, percent-clipped=9.0 +2023-02-06 00:06:19,838 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31744.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:06:23,714 INFO [train.py:901] (1/4) Epoch 4, batch 7500, loss[loss=0.396, simple_loss=0.4126, pruned_loss=0.1897, over 6950.00 frames. ], tot_loss[loss=0.3056, simple_loss=0.362, pruned_loss=0.1246, over 1612739.84 frames. ], batch size: 74, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:06:27,341 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9291, 3.8826, 2.3510, 2.3970, 2.6564, 1.8276, 2.2559, 2.8275], + device='cuda:1'), covar=tensor([0.1423, 0.0259, 0.0821, 0.0737, 0.0705, 0.1107, 0.1102, 0.1033], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0236, 0.0310, 0.0303, 0.0324, 0.0311, 0.0336, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 00:06:36,769 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31769.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:06:37,956 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:58,133 INFO [train.py:901] (1/4) Epoch 4, batch 7550, loss[loss=0.2734, simple_loss=0.3448, pruned_loss=0.101, over 8240.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3597, pruned_loss=0.1232, over 1609214.16 frames. ], batch size: 22, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:02,947 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:16,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.990e+02 2.842e+02 3.963e+02 5.244e+02 1.193e+03, threshold=7.926e+02, percent-clipped=8.0 +2023-02-06 00:07:27,214 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:32,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5047, 3.4311, 2.4115, 4.1846, 1.9369, 2.0545, 2.1801, 3.4878], + device='cuda:1'), covar=tensor([0.1182, 0.0982, 0.1906, 0.0328, 0.2017, 0.2422, 0.2106, 0.0899], + device='cuda:1'), in_proj_covar=tensor([0.0275, 0.0277, 0.0299, 0.0225, 0.0263, 0.0288, 0.0293, 0.0270], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:07:34,061 INFO [train.py:901] (1/4) Epoch 4, batch 7600, loss[loss=0.2731, simple_loss=0.3219, pruned_loss=0.1122, over 7216.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.358, pruned_loss=0.1224, over 1606657.53 frames. ], batch size: 16, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:36,148 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31853.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:07:58,617 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:05,425 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:07,987 INFO [train.py:901] (1/4) Epoch 4, batch 7650, loss[loss=0.2208, simple_loss=0.2938, pruned_loss=0.07387, over 7939.00 frames. ], tot_loss[loss=0.3007, simple_loss=0.3579, pruned_loss=0.1218, over 1604893.15 frames. ], batch size: 20, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:26,153 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.122e+02 3.181e+02 3.860e+02 4.828e+02 9.649e+02, threshold=7.720e+02, percent-clipped=2.0 +2023-02-06 00:08:31,512 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:40,609 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.56 vs. limit=5.0 +2023-02-06 00:08:43,637 INFO [train.py:901] (1/4) Epoch 4, batch 7700, loss[loss=0.3253, simple_loss=0.38, pruned_loss=0.1352, over 8507.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.3579, pruned_loss=0.1217, over 1605919.01 frames. ], batch size: 49, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:53,640 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 00:08:55,985 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:56,021 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31968.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:09:06,796 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 00:09:17,470 INFO [train.py:901] (1/4) Epoch 4, batch 7750, loss[loss=0.2463, simple_loss=0.3202, pruned_loss=0.08625, over 7800.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3587, pruned_loss=0.1229, over 1605880.22 frames. ], batch size: 19, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:09:35,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.165e+02 3.163e+02 3.927e+02 5.355e+02 1.239e+03, threshold=7.853e+02, percent-clipped=4.0 +2023-02-06 00:09:44,297 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6319, 2.9439, 1.7334, 2.1293, 2.2465, 1.4648, 2.0795, 2.2146], + device='cuda:1'), covar=tensor([0.1224, 0.0322, 0.0876, 0.0652, 0.0695, 0.1186, 0.0848, 0.0919], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0241, 0.0317, 0.0309, 0.0328, 0.0314, 0.0341, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 00:09:53,604 INFO [train.py:901] (1/4) Epoch 4, batch 7800, loss[loss=0.271, simple_loss=0.3519, pruned_loss=0.09508, over 8288.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.359, pruned_loss=0.1223, over 1609624.52 frames. ], batch size: 23, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:05,166 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:10:27,396 INFO [train.py:901] (1/4) Epoch 4, batch 7850, loss[loss=0.3699, simple_loss=0.4115, pruned_loss=0.1641, over 8332.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.3579, pruned_loss=0.1212, over 1614577.25 frames. ], batch size: 26, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:43,937 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.460e+02 3.521e+02 4.480e+02 6.179e+02 1.308e+03, threshold=8.960e+02, percent-clipped=13.0 +2023-02-06 00:10:55,616 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:00,779 INFO [train.py:901] (1/4) Epoch 4, batch 7900, loss[loss=0.2447, simple_loss=0.3148, pruned_loss=0.08725, over 7912.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3582, pruned_loss=0.1218, over 1605001.73 frames. ], batch size: 20, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:00,852 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:12,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:21,566 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:24,161 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:34,317 INFO [train.py:901] (1/4) Epoch 4, batch 7950, loss[loss=0.2882, simple_loss=0.347, pruned_loss=0.1147, over 7932.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3585, pruned_loss=0.1217, over 1606498.93 frames. ], batch size: 20, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:51,235 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32224.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:11:51,628 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.248e+02 3.536e+02 4.226e+02 5.315e+02 1.259e+03, threshold=8.452e+02, percent-clipped=4.0 +2023-02-06 00:11:53,748 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3616, 1.6228, 1.5806, 0.4831, 1.5706, 1.1949, 0.3072, 1.6225], + device='cuda:1'), covar=tensor([0.0159, 0.0102, 0.0095, 0.0182, 0.0117, 0.0331, 0.0270, 0.0078], + device='cuda:1'), in_proj_covar=tensor([0.0286, 0.0203, 0.0164, 0.0251, 0.0199, 0.0331, 0.0269, 0.0232], + device='cuda:1'), out_proj_covar=tensor([1.0900e-04, 7.7012e-05, 6.0510e-05, 9.3008e-05, 7.6624e-05, 1.3534e-04, + 1.0371e-04, 8.7393e-05], device='cuda:1') +2023-02-06 00:12:01,778 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:08,272 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32249.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:12:08,749 INFO [train.py:901] (1/4) Epoch 4, batch 8000, loss[loss=0.3073, simple_loss=0.3547, pruned_loss=0.1299, over 7772.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.3579, pruned_loss=0.1222, over 1604095.44 frames. ], batch size: 19, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:12:11,585 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6202, 1.5863, 1.6574, 1.3926, 1.6104, 1.7061, 1.8913, 1.6422], + device='cuda:1'), covar=tensor([0.0626, 0.1297, 0.1918, 0.1530, 0.0667, 0.1575, 0.0823, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0148, 0.0189, 0.0232, 0.0194, 0.0144, 0.0198, 0.0159, 0.0161], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:12:19,078 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:26,260 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1555, 1.6886, 1.2322, 1.5470, 1.4715, 1.1151, 1.2797, 1.5530], + device='cuda:1'), covar=tensor([0.0748, 0.0294, 0.0596, 0.0302, 0.0356, 0.0789, 0.0537, 0.0479], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0239, 0.0315, 0.0309, 0.0323, 0.0314, 0.0340, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 00:12:26,798 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:41,283 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6850, 2.0710, 1.7738, 2.6554, 1.2199, 1.4722, 1.6802, 2.2157], + device='cuda:1'), covar=tensor([0.1293, 0.1207, 0.1601, 0.0593, 0.1727, 0.2181, 0.1583, 0.1180], + device='cuda:1'), in_proj_covar=tensor([0.0279, 0.0277, 0.0300, 0.0224, 0.0259, 0.0294, 0.0297, 0.0269], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:12:42,459 INFO [train.py:901] (1/4) Epoch 4, batch 8050, loss[loss=0.2551, simple_loss=0.3191, pruned_loss=0.0956, over 7543.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3571, pruned_loss=0.1228, over 1587984.05 frames. ], batch size: 18, lr: 1.70e-02, grad_scale: 8.0 +2023-02-06 00:12:42,649 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:50,708 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:58,916 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.697e+02 3.496e+02 4.220e+02 5.135e+02 1.064e+03, threshold=8.441e+02, percent-clipped=2.0 +2023-02-06 00:13:02,564 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 00:13:15,984 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 00:13:19,663 INFO [train.py:901] (1/4) Epoch 5, batch 0, loss[loss=0.351, simple_loss=0.3988, pruned_loss=0.1516, over 8495.00 frames. ], tot_loss[loss=0.351, simple_loss=0.3988, pruned_loss=0.1516, over 8495.00 frames. ], batch size: 26, lr: 1.59e-02, grad_scale: 8.0 +2023-02-06 00:13:19,663 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 00:13:31,617 INFO [train.py:935] (1/4) Epoch 5, validation: loss=0.2309, simple_loss=0.3254, pruned_loss=0.06822, over 944034.00 frames. +2023-02-06 00:13:31,618 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-06 00:13:46,430 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 00:13:46,603 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:06,999 INFO [train.py:901] (1/4) Epoch 5, batch 50, loss[loss=0.2828, simple_loss=0.3269, pruned_loss=0.1194, over 8081.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3616, pruned_loss=0.1221, over 369140.90 frames. ], batch size: 21, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:14,087 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:22,032 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 00:14:22,903 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 00:14:36,523 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.148e+02 3.721e+02 4.839e+02 1.477e+03, threshold=7.442e+02, percent-clipped=1.0 +2023-02-06 00:14:38,052 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:41,771 INFO [train.py:901] (1/4) Epoch 5, batch 100, loss[loss=0.2979, simple_loss=0.3532, pruned_loss=0.1213, over 8517.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3595, pruned_loss=0.1215, over 646985.18 frames. ], batch size: 39, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:44,567 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:45,029 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 00:15:01,741 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 00:15:02,111 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:15,803 INFO [train.py:901] (1/4) Epoch 5, batch 150, loss[loss=0.2888, simple_loss=0.3488, pruned_loss=0.1144, over 8026.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3546, pruned_loss=0.1187, over 856675.37 frames. ], batch size: 22, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:43,041 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:45,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.082e+02 3.007e+02 3.818e+02 4.644e+02 8.323e+02, threshold=7.636e+02, percent-clipped=1.0 +2023-02-06 00:15:50,802 INFO [train.py:901] (1/4) Epoch 5, batch 200, loss[loss=0.34, simple_loss=0.3856, pruned_loss=0.1472, over 8790.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3559, pruned_loss=0.1196, over 1020911.69 frames. ], batch size: 49, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:59,824 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:06,415 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:08,360 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5507, 2.0628, 2.1666, 1.0234, 2.1385, 1.5145, 0.5377, 1.8266], + device='cuda:1'), covar=tensor([0.0192, 0.0084, 0.0065, 0.0185, 0.0112, 0.0293, 0.0286, 0.0076], + device='cuda:1'), in_proj_covar=tensor([0.0292, 0.0205, 0.0165, 0.0252, 0.0200, 0.0329, 0.0268, 0.0233], + device='cuda:1'), out_proj_covar=tensor([1.1070e-04, 7.7295e-05, 6.0324e-05, 9.2430e-05, 7.6535e-05, 1.3368e-04, + 1.0327e-04, 8.7060e-05], device='cuda:1') +2023-02-06 00:16:23,703 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:24,859 INFO [train.py:901] (1/4) Epoch 5, batch 250, loss[loss=0.269, simple_loss=0.3398, pruned_loss=0.09907, over 8455.00 frames. ], tot_loss[loss=0.2981, simple_loss=0.3563, pruned_loss=0.1199, over 1157405.19 frames. ], batch size: 25, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:16:36,169 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 00:16:45,164 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:46,311 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 00:16:54,483 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 3.241e+02 4.131e+02 4.869e+02 1.219e+03, threshold=8.263e+02, percent-clipped=9.0 +2023-02-06 00:17:00,684 INFO [train.py:901] (1/4) Epoch 5, batch 300, loss[loss=0.2864, simple_loss=0.3271, pruned_loss=0.1229, over 7519.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3569, pruned_loss=0.1197, over 1259890.21 frames. ], batch size: 18, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:03,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:10,894 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:14,888 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1250, 1.1971, 2.2436, 0.8749, 2.1809, 2.4914, 2.3266, 2.0872], + device='cuda:1'), covar=tensor([0.0994, 0.1095, 0.0492, 0.1938, 0.0518, 0.0328, 0.0498, 0.0745], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0265, 0.0222, 0.0261, 0.0224, 0.0194, 0.0212, 0.0270], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:17:27,580 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:34,188 INFO [train.py:901] (1/4) Epoch 5, batch 350, loss[loss=0.3028, simple_loss=0.3762, pruned_loss=0.1147, over 8440.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3575, pruned_loss=0.1195, over 1342896.20 frames. ], batch size: 27, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:34,409 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:48,303 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3016, 1.1658, 4.4733, 1.5546, 3.7770, 3.7159, 4.0133, 3.9196], + device='cuda:1'), covar=tensor([0.0355, 0.3755, 0.0298, 0.2402, 0.1027, 0.0618, 0.0370, 0.0464], + device='cuda:1'), in_proj_covar=tensor([0.0304, 0.0472, 0.0384, 0.0390, 0.0455, 0.0378, 0.0376, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:17:51,715 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:18:04,034 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.189e+02 4.031e+02 4.810e+02 8.158e+02, threshold=8.062e+02, percent-clipped=0.0 +2023-02-06 00:18:09,328 INFO [train.py:901] (1/4) Epoch 5, batch 400, loss[loss=0.326, simple_loss=0.3848, pruned_loss=0.1336, over 8344.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3558, pruned_loss=0.1191, over 1396882.35 frames. ], batch size: 26, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:18:43,765 INFO [train.py:901] (1/4) Epoch 5, batch 450, loss[loss=0.3803, simple_loss=0.4014, pruned_loss=0.1796, over 6736.00 frames. ], tot_loss[loss=0.2983, simple_loss=0.3569, pruned_loss=0.1199, over 1445837.69 frames. ], batch size: 71, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:19:02,189 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 00:19:09,314 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0990, 3.0829, 2.8135, 1.5590, 2.7784, 2.7568, 2.9210, 2.5739], + device='cuda:1'), covar=tensor([0.1270, 0.0828, 0.1116, 0.4813, 0.0955, 0.1129, 0.1375, 0.0944], + device='cuda:1'), in_proj_covar=tensor([0.0379, 0.0279, 0.0310, 0.0409, 0.0302, 0.0266, 0.0295, 0.0237], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:19:12,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.263e+02 3.122e+02 4.068e+02 4.898e+02 9.897e+02, threshold=8.137e+02, percent-clipped=5.0 +2023-02-06 00:19:17,681 INFO [train.py:901] (1/4) Epoch 5, batch 500, loss[loss=0.3267, simple_loss=0.3801, pruned_loss=0.1366, over 8238.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3572, pruned_loss=0.1202, over 1485030.72 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:52,898 INFO [train.py:901] (1/4) Epoch 5, batch 550, loss[loss=0.2815, simple_loss=0.3502, pruned_loss=0.1064, over 8191.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.3562, pruned_loss=0.1193, over 1517542.60 frames. ], batch size: 23, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:58,693 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4097, 1.9457, 3.4878, 1.0973, 2.4191, 1.7810, 1.5436, 2.0578], + device='cuda:1'), covar=tensor([0.1643, 0.1775, 0.0615, 0.3033, 0.1416, 0.2247, 0.1452, 0.2158], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0437, 0.0515, 0.0522, 0.0570, 0.0500, 0.0444, 0.0583], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:20:13,547 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 00:20:21,235 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.184e+02 3.133e+02 3.697e+02 5.126e+02 1.321e+03, threshold=7.393e+02, percent-clipped=4.0 +2023-02-06 00:20:26,722 INFO [train.py:901] (1/4) Epoch 5, batch 600, loss[loss=0.2671, simple_loss=0.3181, pruned_loss=0.1081, over 7541.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3551, pruned_loss=0.119, over 1536540.03 frames. ], batch size: 18, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:47,716 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7038, 2.4287, 4.7076, 1.2032, 2.6855, 2.1494, 1.7047, 2.3579], + device='cuda:1'), covar=tensor([0.1321, 0.1526, 0.0510, 0.2830, 0.1297, 0.1979, 0.1270, 0.2136], + device='cuda:1'), in_proj_covar=tensor([0.0464, 0.0440, 0.0518, 0.0527, 0.0571, 0.0502, 0.0445, 0.0587], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:20:50,110 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 00:21:02,150 INFO [train.py:901] (1/4) Epoch 5, batch 650, loss[loss=0.394, simple_loss=0.4216, pruned_loss=0.1831, over 8322.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.356, pruned_loss=0.1193, over 1555704.09 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:21:02,980 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:21:09,633 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1640, 3.1818, 2.8896, 1.5745, 2.8712, 2.8101, 2.9366, 2.6844], + device='cuda:1'), covar=tensor([0.1280, 0.0806, 0.1168, 0.4474, 0.0951, 0.1072, 0.1422, 0.1056], + device='cuda:1'), in_proj_covar=tensor([0.0377, 0.0277, 0.0312, 0.0405, 0.0300, 0.0264, 0.0293, 0.0236], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:21:30,780 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 3.090e+02 3.854e+02 5.024e+02 8.355e+02, threshold=7.708e+02, percent-clipped=4.0 +2023-02-06 00:21:36,137 INFO [train.py:901] (1/4) Epoch 5, batch 700, loss[loss=0.2614, simple_loss=0.3347, pruned_loss=0.09399, over 8244.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3552, pruned_loss=0.1191, over 1567967.15 frames. ], batch size: 24, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:11,098 INFO [train.py:901] (1/4) Epoch 5, batch 750, loss[loss=0.295, simple_loss=0.3652, pruned_loss=0.1124, over 8326.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3534, pruned_loss=0.1176, over 1575648.83 frames. ], batch size: 25, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:13,822 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3168, 1.8314, 1.7718, 1.6547, 1.6385, 1.7666, 2.4741, 2.2880], + device='cuda:1'), covar=tensor([0.0520, 0.1390, 0.2025, 0.1493, 0.0708, 0.1754, 0.0749, 0.0541], + device='cuda:1'), in_proj_covar=tensor([0.0142, 0.0187, 0.0225, 0.0187, 0.0141, 0.0194, 0.0151, 0.0155], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:22:14,421 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:22:36,868 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 00:22:38,059 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.66 vs. limit=5.0 +2023-02-06 00:22:40,967 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 3.306e+02 4.079e+02 5.042e+02 1.499e+03, threshold=8.159e+02, percent-clipped=7.0 +2023-02-06 00:22:45,501 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 00:22:46,150 INFO [train.py:901] (1/4) Epoch 5, batch 800, loss[loss=0.268, simple_loss=0.3454, pruned_loss=0.09531, over 8029.00 frames. ], tot_loss[loss=0.2954, simple_loss=0.3542, pruned_loss=0.1183, over 1585007.91 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:13,727 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:23:19,949 INFO [train.py:901] (1/4) Epoch 5, batch 850, loss[loss=0.2811, simple_loss=0.3503, pruned_loss=0.106, over 8335.00 frames. ], tot_loss[loss=0.2956, simple_loss=0.3546, pruned_loss=0.1183, over 1592385.61 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:49,971 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.888e+02 3.855e+02 5.468e+02 1.103e+03, threshold=7.709e+02, percent-clipped=2.0 +2023-02-06 00:23:56,031 INFO [train.py:901] (1/4) Epoch 5, batch 900, loss[loss=0.3705, simple_loss=0.4112, pruned_loss=0.1649, over 8329.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.3541, pruned_loss=0.1183, over 1598625.99 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:59,531 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7728, 3.7781, 3.3946, 1.8682, 3.4102, 3.2558, 3.4845, 2.8892], + device='cuda:1'), covar=tensor([0.1085, 0.0752, 0.1155, 0.4497, 0.0763, 0.0975, 0.1555, 0.0913], + device='cuda:1'), in_proj_covar=tensor([0.0373, 0.0274, 0.0304, 0.0392, 0.0294, 0.0260, 0.0284, 0.0230], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:24:29,307 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4696, 1.8642, 3.2032, 1.1803, 2.1666, 1.8285, 1.5188, 1.7697], + device='cuda:1'), covar=tensor([0.1447, 0.1709, 0.0594, 0.2888, 0.1291, 0.2144, 0.1360, 0.2016], + device='cuda:1'), in_proj_covar=tensor([0.0470, 0.0444, 0.0516, 0.0525, 0.0571, 0.0508, 0.0442, 0.0585], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:24:29,755 INFO [train.py:901] (1/4) Epoch 5, batch 950, loss[loss=0.2831, simple_loss=0.3463, pruned_loss=0.11, over 8243.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3537, pruned_loss=0.1173, over 1601199.00 frames. ], batch size: 24, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:24:54,213 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3973, 1.2924, 4.5895, 1.6290, 3.8235, 3.6874, 3.9456, 3.8781], + device='cuda:1'), covar=tensor([0.0434, 0.4082, 0.0338, 0.2893, 0.1199, 0.0666, 0.0512, 0.0568], + device='cuda:1'), in_proj_covar=tensor([0.0315, 0.0480, 0.0383, 0.0395, 0.0471, 0.0387, 0.0382, 0.0428], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:25:01,016 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.004e+02 3.759e+02 4.642e+02 8.675e+02, threshold=7.519e+02, percent-clipped=2.0 +2023-02-06 00:25:03,075 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:25:05,105 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 00:25:06,447 INFO [train.py:901] (1/4) Epoch 5, batch 1000, loss[loss=0.2791, simple_loss=0.3306, pruned_loss=0.1139, over 7926.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3538, pruned_loss=0.1167, over 1608229.85 frames. ], batch size: 20, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,440 INFO [train.py:901] (1/4) Epoch 5, batch 1050, loss[loss=0.2948, simple_loss=0.3661, pruned_loss=0.1118, over 8283.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3544, pruned_loss=0.1171, over 1614569.69 frames. ], batch size: 23, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,444 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 00:25:42,725 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8231, 1.2471, 3.4092, 1.1886, 2.1760, 3.7497, 3.7102, 3.3431], + device='cuda:1'), covar=tensor([0.1096, 0.1549, 0.0383, 0.2056, 0.0821, 0.0253, 0.0331, 0.0505], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0264, 0.0220, 0.0257, 0.0222, 0.0197, 0.0216, 0.0267], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:25:52,111 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 00:26:08,785 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.885e+02 3.252e+02 3.786e+02 4.850e+02 9.380e+02, threshold=7.572e+02, percent-clipped=3.0 +2023-02-06 00:26:13,583 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:14,854 INFO [train.py:901] (1/4) Epoch 5, batch 1100, loss[loss=0.2241, simple_loss=0.298, pruned_loss=0.07516, over 8245.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3527, pruned_loss=0.116, over 1607908.37 frames. ], batch size: 22, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:15,001 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9592, 1.7224, 5.9192, 2.3222, 5.2948, 5.1545, 5.5756, 5.4742], + device='cuda:1'), covar=tensor([0.0287, 0.3220, 0.0191, 0.2007, 0.0920, 0.0451, 0.0284, 0.0373], + device='cuda:1'), in_proj_covar=tensor([0.0311, 0.0471, 0.0382, 0.0390, 0.0461, 0.0377, 0.0374, 0.0420], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:26:23,005 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:50,043 INFO [train.py:901] (1/4) Epoch 5, batch 1150, loss[loss=0.3062, simple_loss=0.3733, pruned_loss=0.1195, over 8313.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3521, pruned_loss=0.1157, over 1608400.00 frames. ], batch size: 48, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:54,929 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:02,048 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 00:27:13,082 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:18,259 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 3.101e+02 4.052e+02 5.357e+02 1.331e+03, threshold=8.105e+02, percent-clipped=11.0 +2023-02-06 00:27:23,593 INFO [train.py:901] (1/4) Epoch 5, batch 1200, loss[loss=0.2631, simple_loss=0.3374, pruned_loss=0.09434, over 8512.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.354, pruned_loss=0.1167, over 1612795.43 frames. ], batch size: 28, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:27:32,554 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:55,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3117, 1.3853, 1.5331, 1.0877, 1.4603, 1.3236, 1.6984, 1.8172], + device='cuda:1'), covar=tensor([0.0573, 0.1296, 0.1845, 0.1551, 0.0651, 0.1719, 0.0820, 0.0553], + device='cuda:1'), in_proj_covar=tensor([0.0145, 0.0187, 0.0227, 0.0188, 0.0141, 0.0197, 0.0154, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:28:00,122 INFO [train.py:901] (1/4) Epoch 5, batch 1250, loss[loss=0.2643, simple_loss=0.3303, pruned_loss=0.09913, over 8080.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3552, pruned_loss=0.1179, over 1614007.87 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:08,903 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 00:28:29,020 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.057e+02 3.737e+02 5.343e+02 1.068e+03, threshold=7.474e+02, percent-clipped=1.0 +2023-02-06 00:28:34,049 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:28:34,552 INFO [train.py:901] (1/4) Epoch 5, batch 1300, loss[loss=0.3183, simple_loss=0.3762, pruned_loss=0.1302, over 8186.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3559, pruned_loss=0.1184, over 1619026.40 frames. ], batch size: 23, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:38,322 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 00:29:10,152 INFO [train.py:901] (1/4) Epoch 5, batch 1350, loss[loss=0.2633, simple_loss=0.3401, pruned_loss=0.09324, over 8501.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.356, pruned_loss=0.1182, over 1619896.78 frames. ], batch size: 28, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:29:18,821 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:21,558 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:38,303 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:39,349 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 3.141e+02 3.942e+02 4.566e+02 9.800e+02, threshold=7.885e+02, percent-clipped=1.0 +2023-02-06 00:29:44,191 INFO [train.py:901] (1/4) Epoch 5, batch 1400, loss[loss=0.2458, simple_loss=0.3143, pruned_loss=0.08865, over 7809.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3541, pruned_loss=0.1166, over 1620039.49 frames. ], batch size: 20, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:18,165 INFO [train.py:901] (1/4) Epoch 5, batch 1450, loss[loss=0.3554, simple_loss=0.4084, pruned_loss=0.1512, over 8590.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3548, pruned_loss=0.1174, over 1615930.54 frames. ], batch size: 31, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:32,271 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 00:30:32,510 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:49,117 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.073e+02 3.068e+02 3.705e+02 5.190e+02 1.303e+03, threshold=7.410e+02, percent-clipped=4.0 +2023-02-06 00:30:50,028 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:54,026 INFO [train.py:901] (1/4) Epoch 5, batch 1500, loss[loss=0.2578, simple_loss=0.3253, pruned_loss=0.09516, over 7813.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3537, pruned_loss=0.1165, over 1617027.08 frames. ], batch size: 20, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:54,780 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:27,613 INFO [train.py:901] (1/4) Epoch 5, batch 1550, loss[loss=0.2268, simple_loss=0.3031, pruned_loss=0.07527, over 7822.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3541, pruned_loss=0.1165, over 1621154.75 frames. ], batch size: 19, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:31:31,089 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:34,152 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-06 00:31:48,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:51,173 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-06 00:31:58,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.067e+02 3.419e+02 4.027e+02 4.998e+02 8.696e+02, threshold=8.054e+02, percent-clipped=2.0 +2023-02-06 00:32:03,155 INFO [train.py:901] (1/4) Epoch 5, batch 1600, loss[loss=0.3463, simple_loss=0.3927, pruned_loss=0.15, over 8465.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.356, pruned_loss=0.1181, over 1618882.56 frames. ], batch size: 39, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:32:14,804 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:20,119 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:28,683 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7037, 1.8867, 1.5207, 2.3591, 1.2138, 1.4249, 1.5907, 1.9017], + device='cuda:1'), covar=tensor([0.1035, 0.1351, 0.1705, 0.0605, 0.1761, 0.2164, 0.1454, 0.1110], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0274, 0.0292, 0.0226, 0.0260, 0.0292, 0.0289, 0.0269], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:32:37,058 INFO [train.py:901] (1/4) Epoch 5, batch 1650, loss[loss=0.267, simple_loss=0.3459, pruned_loss=0.09399, over 8101.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3544, pruned_loss=0.1169, over 1620012.42 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:07,303 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.926e+02 3.722e+02 4.611e+02 9.053e+02, threshold=7.444e+02, percent-clipped=4.0 +2023-02-06 00:33:11,841 INFO [train.py:901] (1/4) Epoch 5, batch 1700, loss[loss=0.2746, simple_loss=0.3438, pruned_loss=0.1027, over 8248.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.3549, pruned_loss=0.1173, over 1620601.75 frames. ], batch size: 24, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:17,266 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:33,625 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:42,178 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 00:33:47,846 INFO [train.py:901] (1/4) Epoch 5, batch 1750, loss[loss=0.2544, simple_loss=0.3177, pruned_loss=0.0956, over 7260.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3544, pruned_loss=0.1174, over 1614839.84 frames. ], batch size: 16, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:34:16,525 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.177e+02 3.118e+02 3.687e+02 4.787e+02 9.448e+02, threshold=7.373e+02, percent-clipped=7.0 +2023-02-06 00:34:21,854 INFO [train.py:901] (1/4) Epoch 5, batch 1800, loss[loss=0.3276, simple_loss=0.3777, pruned_loss=0.1388, over 8598.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3565, pruned_loss=0.1192, over 1619556.40 frames. ], batch size: 31, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:34:36,712 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:41,453 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 00:34:43,251 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:57,299 INFO [train.py:901] (1/4) Epoch 5, batch 1850, loss[loss=0.3059, simple_loss=0.3616, pruned_loss=0.1251, over 8561.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3562, pruned_loss=0.1188, over 1623324.83 frames. ], batch size: 31, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:35:12,358 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34205.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:26,216 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.489e+02 4.150e+02 5.670e+02 1.027e+03, threshold=8.299e+02, percent-clipped=7.0 +2023-02-06 00:35:29,077 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:30,902 INFO [train.py:901] (1/4) Epoch 5, batch 1900, loss[loss=0.2682, simple_loss=0.3366, pruned_loss=0.09989, over 8282.00 frames. ], tot_loss[loss=0.2967, simple_loss=0.3554, pruned_loss=0.119, over 1621250.06 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:35:45,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7029, 1.6413, 3.0115, 1.2266, 2.1853, 3.3398, 3.2460, 2.8308], + device='cuda:1'), covar=tensor([0.0966, 0.1213, 0.0395, 0.1894, 0.0656, 0.0238, 0.0352, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0260, 0.0215, 0.0255, 0.0215, 0.0192, 0.0217, 0.0264], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:35:56,409 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 00:36:05,943 INFO [train.py:901] (1/4) Epoch 5, batch 1950, loss[loss=0.2733, simple_loss=0.3454, pruned_loss=0.1006, over 8515.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.356, pruned_loss=0.1194, over 1624635.26 frames. ], batch size: 28, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:09,867 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 00:36:18,716 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:36:23,384 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 00:36:35,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.967e+02 3.945e+02 4.927e+02 1.257e+03, threshold=7.890e+02, percent-clipped=2.0 +2023-02-06 00:36:40,168 INFO [train.py:901] (1/4) Epoch 5, batch 2000, loss[loss=0.3325, simple_loss=0.3937, pruned_loss=0.1356, over 8767.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3558, pruned_loss=0.1187, over 1623184.41 frames. ], batch size: 30, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:42,291 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 00:37:14,372 INFO [train.py:901] (1/4) Epoch 5, batch 2050, loss[loss=0.2799, simple_loss=0.3339, pruned_loss=0.113, over 8090.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3555, pruned_loss=0.1185, over 1621762.11 frames. ], batch size: 21, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:26,355 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9849, 3.9856, 3.6525, 1.9178, 3.5938, 3.6160, 3.7167, 3.2919], + device='cuda:1'), covar=tensor([0.1130, 0.0618, 0.0993, 0.4972, 0.0964, 0.0931, 0.1177, 0.0971], + device='cuda:1'), in_proj_covar=tensor([0.0378, 0.0280, 0.0300, 0.0392, 0.0302, 0.0265, 0.0290, 0.0238], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:37:31,119 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:33,927 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:38,680 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:45,186 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 3.245e+02 4.066e+02 4.898e+02 1.293e+03, threshold=8.132e+02, percent-clipped=4.0 +2023-02-06 00:37:49,849 INFO [train.py:901] (1/4) Epoch 5, batch 2100, loss[loss=0.3371, simple_loss=0.3832, pruned_loss=0.1455, over 8687.00 frames. ], tot_loss[loss=0.2956, simple_loss=0.3549, pruned_loss=0.1182, over 1618399.23 frames. ], batch size: 39, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:50,883 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 00:37:51,417 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:52,359 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.45 vs. limit=5.0 +2023-02-06 00:38:08,191 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3787, 1.5556, 1.5293, 0.7694, 1.5892, 1.2673, 0.6799, 1.5253], + device='cuda:1'), covar=tensor([0.0158, 0.0096, 0.0079, 0.0150, 0.0099, 0.0232, 0.0226, 0.0071], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0213, 0.0175, 0.0259, 0.0207, 0.0345, 0.0273, 0.0246], + device='cuda:1'), out_proj_covar=tensor([1.1004e-04, 7.8018e-05, 6.2492e-05, 9.2994e-05, 7.7861e-05, 1.3715e-04, + 1.0192e-04, 9.0270e-05], device='cuda:1') +2023-02-06 00:38:15,732 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.15 vs. limit=5.0 +2023-02-06 00:38:23,173 INFO [train.py:901] (1/4) Epoch 5, batch 2150, loss[loss=0.2482, simple_loss=0.3119, pruned_loss=0.09228, over 7419.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3534, pruned_loss=0.1178, over 1611811.85 frames. ], batch size: 17, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:38:39,338 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9322, 1.4757, 3.4344, 1.3470, 2.1555, 3.7723, 3.5954, 3.2145], + device='cuda:1'), covar=tensor([0.1018, 0.1331, 0.0308, 0.1910, 0.0733, 0.0227, 0.0379, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0262, 0.0220, 0.0259, 0.0218, 0.0196, 0.0226, 0.0273], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 00:38:39,926 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:50,623 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:53,807 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.929e+02 3.753e+02 4.663e+02 1.529e+03, threshold=7.506e+02, percent-clipped=2.0 +2023-02-06 00:38:59,147 INFO [train.py:901] (1/4) Epoch 5, batch 2200, loss[loss=0.4335, simple_loss=0.4492, pruned_loss=0.2089, over 7208.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.3535, pruned_loss=0.118, over 1611979.76 frames. ], batch size: 72, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:39:02,751 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4792, 2.0839, 4.2853, 1.0897, 2.8343, 1.9240, 1.6606, 2.3268], + device='cuda:1'), covar=tensor([0.1517, 0.1818, 0.0790, 0.3218, 0.1372, 0.2492, 0.1349, 0.2438], + device='cuda:1'), in_proj_covar=tensor([0.0464, 0.0441, 0.0526, 0.0525, 0.0568, 0.0512, 0.0441, 0.0575], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:39:32,427 INFO [train.py:901] (1/4) Epoch 5, batch 2250, loss[loss=0.2735, simple_loss=0.3031, pruned_loss=0.122, over 7420.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3549, pruned_loss=0.1191, over 1610246.77 frames. ], batch size: 17, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:39:52,221 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:39:59,498 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:02,635 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.996e+02 3.688e+02 4.883e+02 6.349e+02 4.437e+03, threshold=9.766e+02, percent-clipped=16.0 +2023-02-06 00:40:07,918 INFO [train.py:901] (1/4) Epoch 5, batch 2300, loss[loss=0.3042, simple_loss=0.368, pruned_loss=0.1202, over 8442.00 frames. ], tot_loss[loss=0.2984, simple_loss=0.3562, pruned_loss=0.1203, over 1610098.75 frames. ], batch size: 27, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:12,724 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:16,065 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:34,989 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:42,251 INFO [train.py:901] (1/4) Epoch 5, batch 2350, loss[loss=0.2625, simple_loss=0.3355, pruned_loss=0.0947, over 8102.00 frames. ], tot_loss[loss=0.2978, simple_loss=0.356, pruned_loss=0.1198, over 1608474.60 frames. ], batch size: 23, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:51,691 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:11,432 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 3.189e+02 4.018e+02 4.942e+02 1.178e+03, threshold=8.036e+02, percent-clipped=1.0 +2023-02-06 00:41:12,930 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:41:16,111 INFO [train.py:901] (1/4) Epoch 5, batch 2400, loss[loss=0.2724, simple_loss=0.351, pruned_loss=0.09688, over 8473.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3545, pruned_loss=0.1188, over 1608671.29 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:41:47,417 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:51,224 INFO [train.py:901] (1/4) Epoch 5, batch 2450, loss[loss=0.2853, simple_loss=0.3614, pruned_loss=0.1046, over 8448.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3544, pruned_loss=0.1185, over 1611823.63 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:04,202 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:19,460 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.991e+02 3.791e+02 4.954e+02 1.109e+03, threshold=7.583e+02, percent-clipped=3.0 +2023-02-06 00:42:24,154 INFO [train.py:901] (1/4) Epoch 5, batch 2500, loss[loss=0.2817, simple_loss=0.3514, pruned_loss=0.106, over 8462.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3534, pruned_loss=0.1176, over 1614511.49 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:29,096 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4008, 1.8644, 1.9131, 0.8385, 1.9022, 1.4808, 0.4299, 1.5877], + device='cuda:1'), covar=tensor([0.0216, 0.0109, 0.0098, 0.0178, 0.0147, 0.0343, 0.0290, 0.0093], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0215, 0.0178, 0.0258, 0.0207, 0.0347, 0.0275, 0.0247], + device='cuda:1'), out_proj_covar=tensor([1.1089e-04, 7.8472e-05, 6.3019e-05, 9.2484e-05, 7.7289e-05, 1.3744e-04, + 1.0226e-04, 9.0032e-05], device='cuda:1') +2023-02-06 00:42:53,616 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 00:42:55,973 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:59,771 INFO [train.py:901] (1/4) Epoch 5, batch 2550, loss[loss=0.2713, simple_loss=0.3336, pruned_loss=0.1045, over 7926.00 frames. ], tot_loss[loss=0.2961, simple_loss=0.3547, pruned_loss=0.1187, over 1614534.83 frames. ], batch size: 20, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:13,493 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:43:29,143 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 2.947e+02 3.618e+02 4.736e+02 1.253e+03, threshold=7.237e+02, percent-clipped=4.0 +2023-02-06 00:43:31,377 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0284, 1.0069, 4.2388, 1.5657, 3.6107, 3.5502, 3.7833, 3.7023], + device='cuda:1'), covar=tensor([0.0408, 0.3802, 0.0302, 0.2303, 0.0951, 0.0549, 0.0419, 0.0527], + device='cuda:1'), in_proj_covar=tensor([0.0318, 0.0492, 0.0403, 0.0413, 0.0482, 0.0397, 0.0400, 0.0444], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:43:33,924 INFO [train.py:901] (1/4) Epoch 5, batch 2600, loss[loss=0.3353, simple_loss=0.3926, pruned_loss=0.139, over 8110.00 frames. ], tot_loss[loss=0.2948, simple_loss=0.3537, pruned_loss=0.118, over 1612955.21 frames. ], batch size: 23, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:40,976 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2969, 1.7094, 1.6201, 0.5277, 1.7263, 1.2304, 0.2979, 1.5624], + device='cuda:1'), covar=tensor([0.0167, 0.0091, 0.0077, 0.0166, 0.0086, 0.0307, 0.0237, 0.0065], + device='cuda:1'), in_proj_covar=tensor([0.0303, 0.0217, 0.0179, 0.0261, 0.0206, 0.0350, 0.0275, 0.0249], + device='cuda:1'), out_proj_covar=tensor([1.1228e-04, 7.9056e-05, 6.3550e-05, 9.3850e-05, 7.7019e-05, 1.3833e-04, + 1.0169e-04, 9.0907e-05], device='cuda:1') +2023-02-06 00:43:49,011 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:09,533 INFO [train.py:901] (1/4) Epoch 5, batch 2650, loss[loss=0.2718, simple_loss=0.3236, pruned_loss=0.11, over 7711.00 frames. ], tot_loss[loss=0.2935, simple_loss=0.3529, pruned_loss=0.1171, over 1613863.05 frames. ], batch size: 18, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:44:10,276 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:13,022 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:21,474 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0638, 2.2822, 4.0078, 3.1061, 3.0528, 2.3661, 1.6032, 1.7686], + device='cuda:1'), covar=tensor([0.1603, 0.2285, 0.0428, 0.0897, 0.0933, 0.0945, 0.1137, 0.2100], + device='cuda:1'), in_proj_covar=tensor([0.0743, 0.0668, 0.0571, 0.0650, 0.0751, 0.0626, 0.0602, 0.0625], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:44:39,310 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.922e+02 3.827e+02 4.980e+02 8.274e+02, threshold=7.654e+02, percent-clipped=5.0 +2023-02-06 00:44:43,771 INFO [train.py:901] (1/4) Epoch 5, batch 2700, loss[loss=0.3096, simple_loss=0.3683, pruned_loss=0.1254, over 8188.00 frames. ], tot_loss[loss=0.2935, simple_loss=0.3531, pruned_loss=0.1169, over 1610426.10 frames. ], batch size: 23, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:45:09,481 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:10,725 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35072.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:45:18,313 INFO [train.py:901] (1/4) Epoch 5, batch 2750, loss[loss=0.2908, simple_loss=0.3649, pruned_loss=0.1083, over 8289.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3521, pruned_loss=0.1159, over 1607302.21 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:45:24,479 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7366, 1.8534, 2.0757, 1.6069, 1.0742, 2.0555, 0.3494, 1.2981], + device='cuda:1'), covar=tensor([0.3819, 0.1974, 0.0999, 0.3176, 0.6587, 0.0822, 0.5821, 0.2310], + device='cuda:1'), in_proj_covar=tensor([0.0129, 0.0126, 0.0079, 0.0166, 0.0207, 0.0081, 0.0146, 0.0121], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:45:29,768 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:32,528 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:37,954 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:48,688 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.004e+02 3.039e+02 3.659e+02 5.251e+02 1.248e+03, threshold=7.317e+02, percent-clipped=8.0 +2023-02-06 00:45:53,678 INFO [train.py:901] (1/4) Epoch 5, batch 2800, loss[loss=0.225, simple_loss=0.2941, pruned_loss=0.07793, over 7530.00 frames. ], tot_loss[loss=0.292, simple_loss=0.3516, pruned_loss=0.1162, over 1603134.33 frames. ], batch size: 18, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:27,525 INFO [train.py:901] (1/4) Epoch 5, batch 2850, loss[loss=0.2513, simple_loss=0.3124, pruned_loss=0.09512, over 7526.00 frames. ], tot_loss[loss=0.291, simple_loss=0.3511, pruned_loss=0.1154, over 1606976.38 frames. ], batch size: 18, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:30,533 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35187.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:46:58,361 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.990e+02 3.598e+02 4.675e+02 1.498e+03, threshold=7.197e+02, percent-clipped=4.0 +2023-02-06 00:47:03,666 INFO [train.py:901] (1/4) Epoch 5, batch 2900, loss[loss=0.3412, simple_loss=0.3828, pruned_loss=0.1498, over 7982.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3519, pruned_loss=0.1163, over 1609772.38 frames. ], batch size: 21, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:36,536 INFO [train.py:901] (1/4) Epoch 5, batch 2950, loss[loss=0.3116, simple_loss=0.376, pruned_loss=0.1236, over 8186.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3527, pruned_loss=0.1164, over 1609841.00 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:41,854 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 00:48:06,831 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 3.185e+02 3.825e+02 4.988e+02 1.295e+03, threshold=7.649e+02, percent-clipped=4.0 +2023-02-06 00:48:07,076 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:12,108 INFO [train.py:901] (1/4) Epoch 5, batch 3000, loss[loss=0.2497, simple_loss=0.3261, pruned_loss=0.08663, over 8099.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3528, pruned_loss=0.1163, over 1614040.89 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:48:12,108 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 00:48:25,515 INFO [train.py:935] (1/4) Epoch 5, validation: loss=0.2228, simple_loss=0.319, pruned_loss=0.0633, over 944034.00 frames. +2023-02-06 00:48:25,516 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-06 00:48:39,251 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:41,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0525, 4.0422, 3.6970, 1.7839, 3.6477, 3.5964, 3.7762, 3.0746], + device='cuda:1'), covar=tensor([0.1008, 0.0635, 0.0948, 0.4513, 0.0804, 0.0692, 0.1268, 0.0936], + device='cuda:1'), in_proj_covar=tensor([0.0388, 0.0280, 0.0314, 0.0398, 0.0309, 0.0270, 0.0295, 0.0240], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:48:41,991 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:43,991 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9724, 1.4640, 1.4057, 1.2986, 1.2602, 1.3826, 1.5149, 1.3355], + device='cuda:1'), covar=tensor([0.0614, 0.1200, 0.1849, 0.1384, 0.0658, 0.1520, 0.0802, 0.0654], + device='cuda:1'), in_proj_covar=tensor([0.0142, 0.0184, 0.0223, 0.0185, 0.0138, 0.0193, 0.0148, 0.0156], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 00:48:44,701 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:47,343 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35363.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:48:59,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:01,094 INFO [train.py:901] (1/4) Epoch 5, batch 3050, loss[loss=0.273, simple_loss=0.3409, pruned_loss=0.1025, over 8580.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3542, pruned_loss=0.1169, over 1619867.20 frames. ], batch size: 39, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:01,964 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:07,185 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:29,668 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.238e+02 3.010e+02 3.735e+02 4.816e+02 9.592e+02, threshold=7.471e+02, percent-clipped=3.0 +2023-02-06 00:49:34,197 INFO [train.py:901] (1/4) Epoch 5, batch 3100, loss[loss=0.3128, simple_loss=0.3777, pruned_loss=0.1239, over 8581.00 frames. ], tot_loss[loss=0.2929, simple_loss=0.3537, pruned_loss=0.1161, over 1625476.45 frames. ], batch size: 34, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:41,040 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35443.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:49:48,047 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35454.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:59,664 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35468.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:50:09,995 INFO [train.py:901] (1/4) Epoch 5, batch 3150, loss[loss=0.3585, simple_loss=0.4148, pruned_loss=0.1511, over 8454.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3542, pruned_loss=0.1169, over 1622465.20 frames. ], batch size: 27, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:50:21,416 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 00:50:39,624 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.108e+02 3.249e+02 4.087e+02 5.030e+02 9.472e+02, threshold=8.174e+02, percent-clipped=3.0 +2023-02-06 00:50:44,432 INFO [train.py:901] (1/4) Epoch 5, batch 3200, loss[loss=0.2669, simple_loss=0.326, pruned_loss=0.1039, over 7814.00 frames. ], tot_loss[loss=0.2935, simple_loss=0.3538, pruned_loss=0.1166, over 1623642.81 frames. ], batch size: 19, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:50:54,842 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:09,483 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:20,169 INFO [train.py:901] (1/4) Epoch 5, batch 3250, loss[loss=0.2578, simple_loss=0.3261, pruned_loss=0.0947, over 7520.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3543, pruned_loss=0.1167, over 1624383.25 frames. ], batch size: 18, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:51:50,106 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5571, 1.8501, 3.6146, 1.1672, 2.4728, 1.8730, 1.6296, 2.1164], + device='cuda:1'), covar=tensor([0.1405, 0.1894, 0.0612, 0.2908, 0.1356, 0.2212, 0.1354, 0.2211], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0451, 0.0530, 0.0541, 0.0581, 0.0523, 0.0458, 0.0595], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 00:51:50,494 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.378e+02 4.149e+02 5.121e+02 1.146e+03, threshold=8.298e+02, percent-clipped=3.0 +2023-02-06 00:51:55,293 INFO [train.py:901] (1/4) Epoch 5, batch 3300, loss[loss=0.2354, simple_loss=0.2975, pruned_loss=0.08665, over 7715.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3543, pruned_loss=0.1175, over 1620650.77 frames. ], batch size: 18, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:52:07,279 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3540, 2.0590, 4.4616, 2.0342, 2.5165, 5.1378, 4.9871, 4.4973], + device='cuda:1'), covar=tensor([0.1095, 0.1252, 0.0244, 0.1744, 0.0740, 0.0189, 0.0258, 0.0479], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0262, 0.0219, 0.0263, 0.0218, 0.0195, 0.0227, 0.0271], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 00:52:21,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3886, 2.0246, 3.1473, 2.3531, 2.6542, 2.0122, 1.4362, 1.3620], + device='cuda:1'), covar=tensor([0.1739, 0.1956, 0.0401, 0.1009, 0.0816, 0.0996, 0.1159, 0.1888], + device='cuda:1'), in_proj_covar=tensor([0.0733, 0.0664, 0.0571, 0.0653, 0.0755, 0.0624, 0.0603, 0.0617], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:52:30,161 INFO [train.py:901] (1/4) Epoch 5, batch 3350, loss[loss=0.2506, simple_loss=0.3241, pruned_loss=0.08851, over 8198.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3529, pruned_loss=0.1165, over 1617211.03 frames. ], batch size: 23, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:52:47,838 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35707.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:53:01,472 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.966e+02 3.555e+02 4.125e+02 4.946e+02 1.065e+03, threshold=8.250e+02, percent-clipped=5.0 +2023-02-06 00:53:06,244 INFO [train.py:901] (1/4) Epoch 5, batch 3400, loss[loss=0.307, simple_loss=0.3637, pruned_loss=0.1251, over 8250.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3516, pruned_loss=0.1159, over 1613860.71 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:08,410 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:39,726 INFO [train.py:901] (1/4) Epoch 5, batch 3450, loss[loss=0.2542, simple_loss=0.3264, pruned_loss=0.09099, over 7931.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3519, pruned_loss=0.116, over 1612336.75 frames. ], batch size: 20, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:43,885 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:07,868 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35822.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:54:09,936 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:10,371 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.123e+02 3.051e+02 3.738e+02 4.571e+02 6.690e+02, threshold=7.475e+02, percent-clipped=0.0 +2023-02-06 00:54:12,579 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35829.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:15,094 INFO [train.py:901] (1/4) Epoch 5, batch 3500, loss[loss=0.2047, simple_loss=0.2792, pruned_loss=0.06504, over 7278.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3511, pruned_loss=0.1147, over 1612360.81 frames. ], batch size: 16, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:27,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:27,681 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:40,689 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 00:54:48,819 INFO [train.py:901] (1/4) Epoch 5, batch 3550, loss[loss=0.2165, simple_loss=0.291, pruned_loss=0.07104, over 8245.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3501, pruned_loss=0.1139, over 1616108.17 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:54,909 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:55:19,537 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.232e+02 3.318e+02 3.882e+02 4.908e+02 1.221e+03, threshold=7.763e+02, percent-clipped=6.0 +2023-02-06 00:55:23,582 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5874, 1.9581, 2.2723, 0.9731, 2.2974, 1.3927, 0.5856, 1.6970], + device='cuda:1'), covar=tensor([0.0184, 0.0097, 0.0068, 0.0175, 0.0076, 0.0314, 0.0251, 0.0089], + device='cuda:1'), in_proj_covar=tensor([0.0304, 0.0223, 0.0178, 0.0269, 0.0205, 0.0352, 0.0276, 0.0249], + device='cuda:1'), out_proj_covar=tensor([1.1077e-04, 8.0166e-05, 6.2543e-05, 9.5879e-05, 7.4915e-05, 1.3728e-04, + 1.0157e-04, 8.9133e-05], device='cuda:1') +2023-02-06 00:55:23,994 INFO [train.py:901] (1/4) Epoch 5, batch 3600, loss[loss=0.309, simple_loss=0.378, pruned_loss=0.12, over 8250.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3494, pruned_loss=0.1135, over 1614130.29 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:55:33,166 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 00:55:34,957 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2569, 1.4678, 1.4310, 1.3075, 1.2924, 1.3642, 1.7329, 1.5378], + device='cuda:1'), covar=tensor([0.0587, 0.1194, 0.1958, 0.1400, 0.0622, 0.1585, 0.0723, 0.0563], + device='cuda:1'), in_proj_covar=tensor([0.0141, 0.0183, 0.0225, 0.0187, 0.0137, 0.0194, 0.0147, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 00:55:57,699 INFO [train.py:901] (1/4) Epoch 5, batch 3650, loss[loss=0.3075, simple_loss=0.3529, pruned_loss=0.131, over 7523.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3518, pruned_loss=0.1147, over 1618205.56 frames. ], batch size: 18, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:56:15,071 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:56:19,743 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4876, 4.5536, 3.9329, 1.8867, 4.0019, 3.8969, 4.1854, 3.4555], + device='cuda:1'), covar=tensor([0.0687, 0.0462, 0.0887, 0.4512, 0.0791, 0.0849, 0.0968, 0.0751], + device='cuda:1'), in_proj_covar=tensor([0.0387, 0.0283, 0.0316, 0.0401, 0.0313, 0.0275, 0.0298, 0.0244], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:56:27,673 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.237e+02 3.345e+02 4.197e+02 5.280e+02 9.599e+02, threshold=8.394e+02, percent-clipped=10.0 +2023-02-06 00:56:32,340 INFO [train.py:901] (1/4) Epoch 5, batch 3700, loss[loss=0.2757, simple_loss=0.3214, pruned_loss=0.115, over 7922.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.353, pruned_loss=0.1162, over 1617175.47 frames. ], batch size: 20, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:56:40,787 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 00:57:04,809 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36078.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:57:07,931 INFO [train.py:901] (1/4) Epoch 5, batch 3750, loss[loss=0.2553, simple_loss=0.3322, pruned_loss=0.08923, over 8300.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3527, pruned_loss=0.1158, over 1615538.66 frames. ], batch size: 23, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:57:21,381 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36103.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:57:24,136 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:28,741 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3245, 1.5107, 1.7168, 1.3417, 1.3725, 1.4923, 1.7793, 1.7735], + device='cuda:1'), covar=tensor([0.0627, 0.1346, 0.1741, 0.1476, 0.0660, 0.1540, 0.0816, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0139, 0.0181, 0.0222, 0.0185, 0.0136, 0.0191, 0.0148, 0.0157], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 00:57:37,193 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 3.033e+02 3.704e+02 4.599e+02 1.470e+03, threshold=7.408e+02, percent-clipped=9.0 +2023-02-06 00:57:40,782 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:41,243 INFO [train.py:901] (1/4) Epoch 5, batch 3800, loss[loss=0.3348, simple_loss=0.3869, pruned_loss=0.1414, over 8324.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3523, pruned_loss=0.1159, over 1614416.69 frames. ], batch size: 25, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:57:41,310 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:44,350 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 00:58:09,342 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:15,719 INFO [train.py:901] (1/4) Epoch 5, batch 3850, loss[loss=0.3463, simple_loss=0.4032, pruned_loss=0.1447, over 8578.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.3534, pruned_loss=0.1165, over 1616097.98 frames. ], batch size: 31, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:27,210 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:41,068 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 00:58:45,745 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 3.284e+02 4.097e+02 5.243e+02 1.380e+03, threshold=8.194e+02, percent-clipped=10.0 +2023-02-06 00:58:49,717 INFO [train.py:901] (1/4) Epoch 5, batch 3900, loss[loss=0.306, simple_loss=0.3657, pruned_loss=0.1232, over 8569.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3519, pruned_loss=0.1155, over 1615539.79 frames. ], batch size: 31, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:51,248 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3127, 1.3353, 4.4537, 1.7872, 3.6696, 3.7011, 3.9093, 3.8753], + device='cuda:1'), covar=tensor([0.0388, 0.3850, 0.0366, 0.2378, 0.1263, 0.0613, 0.0462, 0.0554], + device='cuda:1'), in_proj_covar=tensor([0.0335, 0.0498, 0.0414, 0.0419, 0.0493, 0.0407, 0.0398, 0.0450], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 00:58:59,605 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:09,648 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:24,827 INFO [train.py:901] (1/4) Epoch 5, batch 3950, loss[loss=0.2661, simple_loss=0.3316, pruned_loss=0.1003, over 7649.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3508, pruned_loss=0.1151, over 1611480.21 frames. ], batch size: 19, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:59:28,384 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:28,408 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:35,016 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0579, 3.2905, 3.4197, 2.6077, 1.8573, 3.1845, 0.8536, 2.0531], + device='cuda:1'), covar=tensor([0.6981, 0.1531, 0.0676, 0.2104, 0.4766, 0.1355, 0.6653, 0.2561], + device='cuda:1'), in_proj_covar=tensor([0.0131, 0.0129, 0.0082, 0.0171, 0.0212, 0.0085, 0.0149, 0.0126], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:59:45,899 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5461, 4.6070, 3.9778, 1.6251, 3.8780, 4.0236, 4.2389, 3.4869], + device='cuda:1'), covar=tensor([0.0748, 0.0453, 0.0916, 0.4898, 0.0754, 0.0624, 0.1135, 0.0823], + device='cuda:1'), in_proj_covar=tensor([0.0394, 0.0282, 0.0323, 0.0406, 0.0315, 0.0273, 0.0304, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 00:59:51,387 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1248, 3.1060, 2.8286, 1.3572, 2.7484, 2.6507, 2.9151, 2.4593], + device='cuda:1'), covar=tensor([0.1217, 0.0833, 0.1306, 0.4450, 0.1067, 0.1166, 0.1482, 0.1031], + device='cuda:1'), in_proj_covar=tensor([0.0395, 0.0283, 0.0321, 0.0406, 0.0315, 0.0274, 0.0303, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:1') +2023-02-06 00:59:54,576 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.894e+02 2.959e+02 3.540e+02 4.519e+02 1.633e+03, threshold=7.079e+02, percent-clipped=6.0 +2023-02-06 00:59:58,373 INFO [train.py:901] (1/4) Epoch 5, batch 4000, loss[loss=0.3118, simple_loss=0.3697, pruned_loss=0.127, over 8516.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3518, pruned_loss=0.1163, over 1609012.59 frames. ], batch size: 28, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:00:19,939 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0984, 3.3854, 2.4948, 4.2172, 2.1150, 2.3379, 2.7228, 3.5521], + device='cuda:1'), covar=tensor([0.0849, 0.0901, 0.1437, 0.0244, 0.1554, 0.1935, 0.1483, 0.0734], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0259, 0.0284, 0.0225, 0.0251, 0.0285, 0.0286, 0.0261], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 01:00:22,757 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4997, 1.9761, 3.1769, 2.5816, 2.5406, 2.0283, 1.4460, 1.1912], + device='cuda:1'), covar=tensor([0.1597, 0.2072, 0.0385, 0.0966, 0.0911, 0.0920, 0.1030, 0.2085], + device='cuda:1'), in_proj_covar=tensor([0.0737, 0.0666, 0.0563, 0.0657, 0.0759, 0.0620, 0.0600, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:00:31,744 INFO [train.py:901] (1/4) Epoch 5, batch 4050, loss[loss=0.2442, simple_loss=0.3186, pruned_loss=0.08493, over 8294.00 frames. ], tot_loss[loss=0.2903, simple_loss=0.3505, pruned_loss=0.115, over 1611393.03 frames. ], batch size: 23, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:00:38,671 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4592, 1.7427, 2.8636, 1.1151, 2.1349, 1.6393, 1.4016, 1.7490], + device='cuda:1'), covar=tensor([0.1438, 0.1784, 0.0556, 0.3149, 0.1230, 0.2324, 0.1430, 0.1906], + device='cuda:1'), in_proj_covar=tensor([0.0468, 0.0445, 0.0523, 0.0536, 0.0570, 0.0516, 0.0451, 0.0585], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:01:03,217 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 3.036e+02 3.577e+02 4.439e+02 7.437e+02, threshold=7.154e+02, percent-clipped=1.0 +2023-02-06 01:01:07,958 INFO [train.py:901] (1/4) Epoch 5, batch 4100, loss[loss=0.3363, simple_loss=0.3826, pruned_loss=0.145, over 7141.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3512, pruned_loss=0.1151, over 1612021.64 frames. ], batch size: 74, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:28,298 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 01:01:41,933 INFO [train.py:901] (1/4) Epoch 5, batch 4150, loss[loss=0.2143, simple_loss=0.2846, pruned_loss=0.07198, over 7532.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3506, pruned_loss=0.1145, over 1608997.66 frames. ], batch size: 18, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:56,690 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:13,605 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.751e+02 3.740e+02 4.679e+02 9.033e+02, threshold=7.480e+02, percent-clipped=3.0 +2023-02-06 01:02:15,177 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:17,732 INFO [train.py:901] (1/4) Epoch 5, batch 4200, loss[loss=0.3545, simple_loss=0.4041, pruned_loss=0.1525, over 8367.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3511, pruned_loss=0.1142, over 1612990.84 frames. ], batch size: 24, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:02:24,838 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:25,678 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36544.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:36,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3496, 1.2635, 2.3478, 1.1067, 2.0902, 2.5334, 2.4695, 2.1086], + device='cuda:1'), covar=tensor([0.0980, 0.1168, 0.0436, 0.1875, 0.0571, 0.0310, 0.0458, 0.0741], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0275, 0.0225, 0.0268, 0.0227, 0.0200, 0.0232, 0.0276], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:02:43,318 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 01:02:43,520 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:52,958 INFO [train.py:901] (1/4) Epoch 5, batch 4250, loss[loss=0.3316, simple_loss=0.3899, pruned_loss=0.1367, over 8465.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3512, pruned_loss=0.1145, over 1615564.48 frames. ], batch size: 27, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:05,690 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 01:03:14,953 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1731, 1.1005, 3.2901, 0.9303, 2.8379, 2.7676, 2.9822, 2.9064], + device='cuda:1'), covar=tensor([0.0519, 0.3000, 0.0556, 0.2457, 0.1363, 0.0800, 0.0532, 0.0654], + device='cuda:1'), in_proj_covar=tensor([0.0323, 0.0482, 0.0404, 0.0416, 0.0477, 0.0400, 0.0387, 0.0442], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 01:03:20,907 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,284 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,819 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.959e+02 3.120e+02 3.802e+02 4.654e+02 9.583e+02, threshold=7.605e+02, percent-clipped=3.0 +2023-02-06 01:03:27,119 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:28,977 INFO [train.py:901] (1/4) Epoch 5, batch 4300, loss[loss=0.2757, simple_loss=0.3393, pruned_loss=0.1061, over 7964.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3498, pruned_loss=0.1137, over 1611952.20 frames. ], batch size: 21, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:47,173 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:04:04,991 INFO [train.py:901] (1/4) Epoch 5, batch 4350, loss[loss=0.2868, simple_loss=0.3502, pruned_loss=0.1117, over 8198.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.3494, pruned_loss=0.1134, over 1615166.38 frames. ], batch size: 23, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:04:28,891 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36718.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:04:30,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0168, 1.6384, 4.2094, 1.7411, 3.6940, 3.6079, 3.8654, 3.7389], + device='cuda:1'), covar=tensor([0.0468, 0.3111, 0.0382, 0.2291, 0.1010, 0.0600, 0.0391, 0.0496], + device='cuda:1'), in_proj_covar=tensor([0.0327, 0.0487, 0.0414, 0.0417, 0.0478, 0.0405, 0.0393, 0.0446], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 01:04:34,836 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.000e+02 3.265e+02 4.032e+02 4.973e+02 1.053e+03, threshold=8.064e+02, percent-clipped=5.0 +2023-02-06 01:04:36,266 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 01:04:38,955 INFO [train.py:901] (1/4) Epoch 5, batch 4400, loss[loss=0.2702, simple_loss=0.3262, pruned_loss=0.1071, over 7302.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3496, pruned_loss=0.114, over 1615446.46 frames. ], batch size: 16, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:15,304 INFO [train.py:901] (1/4) Epoch 5, batch 4450, loss[loss=0.2707, simple_loss=0.3467, pruned_loss=0.09732, over 8191.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3497, pruned_loss=0.1147, over 1610303.17 frames. ], batch size: 23, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:18,076 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 01:05:43,200 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36823.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:05:45,715 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 3.029e+02 3.648e+02 4.687e+02 9.435e+02, threshold=7.296e+02, percent-clipped=4.0 +2023-02-06 01:05:49,708 INFO [train.py:901] (1/4) Epoch 5, batch 4500, loss[loss=0.2888, simple_loss=0.339, pruned_loss=0.1193, over 8037.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3493, pruned_loss=0.114, over 1609835.66 frames. ], batch size: 22, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:14,966 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 01:06:26,108 INFO [train.py:901] (1/4) Epoch 5, batch 4550, loss[loss=0.2796, simple_loss=0.3354, pruned_loss=0.1119, over 8082.00 frames. ], tot_loss[loss=0.2881, simple_loss=0.349, pruned_loss=0.1136, over 1610940.88 frames. ], batch size: 21, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:39,984 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 01:06:47,955 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36914.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:06:56,558 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 3.174e+02 3.779e+02 4.790e+02 8.988e+02, threshold=7.559e+02, percent-clipped=4.0 +2023-02-06 01:06:58,095 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:00,568 INFO [train.py:901] (1/4) Epoch 5, batch 4600, loss[loss=0.2596, simple_loss=0.3106, pruned_loss=0.1043, over 7259.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3501, pruned_loss=0.1142, over 1615269.13 frames. ], batch size: 16, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:07:04,720 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:10,000 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1476, 4.0681, 3.6813, 1.7590, 3.5990, 3.5253, 3.7582, 3.1977], + device='cuda:1'), covar=tensor([0.0940, 0.0757, 0.1153, 0.4785, 0.0899, 0.0804, 0.1624, 0.0831], + device='cuda:1'), in_proj_covar=tensor([0.0402, 0.0292, 0.0327, 0.0410, 0.0321, 0.0273, 0.0308, 0.0253], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:07:15,528 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3023, 1.7699, 1.6947, 0.6517, 1.7304, 1.2194, 0.2389, 1.6056], + device='cuda:1'), covar=tensor([0.0169, 0.0078, 0.0066, 0.0156, 0.0094, 0.0302, 0.0267, 0.0084], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0225, 0.0179, 0.0264, 0.0211, 0.0358, 0.0281, 0.0257], + device='cuda:1'), out_proj_covar=tensor([1.1142e-04, 7.9431e-05, 6.1587e-05, 9.3178e-05, 7.6574e-05, 1.3819e-04, + 1.0244e-04, 9.1805e-05], device='cuda:1') +2023-02-06 01:07:23,552 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:25,631 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36970.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:29,033 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:30,328 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:35,643 INFO [train.py:901] (1/4) Epoch 5, batch 4650, loss[loss=0.2173, simple_loss=0.2804, pruned_loss=0.07711, over 7519.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3503, pruned_loss=0.1146, over 1613254.15 frames. ], batch size: 18, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:08:02,861 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:06,058 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 3.207e+02 3.974e+02 5.163e+02 9.904e+02, threshold=7.949e+02, percent-clipped=4.0 +2023-02-06 01:08:10,735 INFO [train.py:901] (1/4) Epoch 5, batch 4700, loss[loss=0.3055, simple_loss=0.3669, pruned_loss=0.122, over 8240.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3481, pruned_loss=0.1119, over 1616198.50 frames. ], batch size: 24, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:16,876 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4784, 1.5603, 1.5342, 1.3703, 1.4079, 1.5176, 1.8561, 1.6514], + device='cuda:1'), covar=tensor([0.0542, 0.1210, 0.1771, 0.1449, 0.0635, 0.1457, 0.0782, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0138, 0.0181, 0.0222, 0.0183, 0.0132, 0.0191, 0.0147, 0.0156], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 01:08:29,891 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37062.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:08:43,608 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:44,127 INFO [train.py:901] (1/4) Epoch 5, batch 4750, loss[loss=0.2665, simple_loss=0.3397, pruned_loss=0.09666, over 8677.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3491, pruned_loss=0.1128, over 1616751.38 frames. ], batch size: 34, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:45,716 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:49,147 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:09:08,562 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7351, 5.8322, 5.1130, 2.0866, 5.0629, 5.4280, 5.3877, 4.8873], + device='cuda:1'), covar=tensor([0.0631, 0.0295, 0.0657, 0.4591, 0.0651, 0.0455, 0.0716, 0.0504], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0290, 0.0324, 0.0406, 0.0323, 0.0270, 0.0305, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:09:15,977 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 3.010e+02 3.846e+02 4.879e+02 1.523e+03, threshold=7.692e+02, percent-clipped=5.0 +2023-02-06 01:09:17,408 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 01:09:20,182 INFO [train.py:901] (1/4) Epoch 5, batch 4800, loss[loss=0.308, simple_loss=0.3526, pruned_loss=0.1317, over 7795.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3493, pruned_loss=0.1134, over 1613702.47 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:09:20,191 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 01:09:25,059 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7394, 1.3158, 5.7948, 2.0519, 5.1550, 5.0544, 5.3918, 5.3667], + device='cuda:1'), covar=tensor([0.0403, 0.3982, 0.0227, 0.2449, 0.0847, 0.0398, 0.0307, 0.0398], + device='cuda:1'), in_proj_covar=tensor([0.0331, 0.0486, 0.0415, 0.0413, 0.0475, 0.0399, 0.0397, 0.0443], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 01:09:40,399 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1885, 1.8400, 2.7924, 2.2987, 2.4246, 1.9168, 1.4329, 0.9261], + device='cuda:1'), covar=tensor([0.1786, 0.1855, 0.0419, 0.0905, 0.0745, 0.0933, 0.0977, 0.1802], + device='cuda:1'), in_proj_covar=tensor([0.0744, 0.0678, 0.0572, 0.0660, 0.0760, 0.0630, 0.0598, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:09:44,355 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37167.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:09:51,296 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37177.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:09:55,233 INFO [train.py:901] (1/4) Epoch 5, batch 4850, loss[loss=0.2246, simple_loss=0.2886, pruned_loss=0.0803, over 7782.00 frames. ], tot_loss[loss=0.287, simple_loss=0.348, pruned_loss=0.113, over 1612744.96 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:10,652 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 01:10:27,459 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.956e+02 3.581e+02 4.871e+02 1.087e+03, threshold=7.163e+02, percent-clipped=6.0 +2023-02-06 01:10:31,481 INFO [train.py:901] (1/4) Epoch 5, batch 4900, loss[loss=0.2541, simple_loss=0.3207, pruned_loss=0.09374, over 7660.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3479, pruned_loss=0.1134, over 1611239.92 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:59,365 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:05,611 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:11:06,080 INFO [train.py:901] (1/4) Epoch 5, batch 4950, loss[loss=0.3455, simple_loss=0.3983, pruned_loss=0.1464, over 8547.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3484, pruned_loss=0.1137, over 1613070.67 frames. ], batch size: 49, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:08,197 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:31,041 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:35,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 3.052e+02 3.616e+02 4.696e+02 1.143e+03, threshold=7.231e+02, percent-clipped=5.0 +2023-02-06 01:11:40,307 INFO [train.py:901] (1/4) Epoch 5, batch 5000, loss[loss=0.2806, simple_loss=0.3523, pruned_loss=0.1045, over 8516.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3499, pruned_loss=0.1144, over 1615568.38 frames. ], batch size: 39, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:43,901 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:45,913 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:49,232 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:01,324 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,195 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,307 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:05,948 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:14,487 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:15,020 INFO [train.py:901] (1/4) Epoch 5, batch 5050, loss[loss=0.3225, simple_loss=0.3812, pruned_loss=0.1319, over 8356.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.35, pruned_loss=0.1153, over 1611753.32 frames. ], batch size: 26, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:18,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:22,598 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 01:12:24,797 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 01:12:44,491 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 3.496e+02 4.122e+02 5.072e+02 9.522e+02, threshold=8.245e+02, percent-clipped=6.0 +2023-02-06 01:12:48,504 INFO [train.py:901] (1/4) Epoch 5, batch 5100, loss[loss=0.2689, simple_loss=0.3496, pruned_loss=0.09412, over 8512.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3492, pruned_loss=0.1148, over 1608395.07 frames. ], batch size: 26, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:48,513 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 01:12:48,708 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37433.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:12:49,923 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:01,262 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4765, 2.0818, 3.6320, 1.0504, 2.3630, 1.7489, 1.5374, 2.0909], + device='cuda:1'), covar=tensor([0.1454, 0.1529, 0.0623, 0.3093, 0.1351, 0.2444, 0.1411, 0.2101], + device='cuda:1'), in_proj_covar=tensor([0.0464, 0.0438, 0.0514, 0.0532, 0.0570, 0.0511, 0.0442, 0.0580], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:13:06,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:13:07,425 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 01:13:22,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:23,387 INFO [train.py:901] (1/4) Epoch 5, batch 5150, loss[loss=0.3371, simple_loss=0.3804, pruned_loss=0.1469, over 6824.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3483, pruned_loss=0.1142, over 1608405.38 frames. ], batch size: 71, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:13:53,503 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.138e+02 3.879e+02 5.454e+02 1.167e+03, threshold=7.757e+02, percent-clipped=4.0 +2023-02-06 01:13:57,569 INFO [train.py:901] (1/4) Epoch 5, batch 5200, loss[loss=0.2721, simple_loss=0.3432, pruned_loss=0.1006, over 8585.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3486, pruned_loss=0.1139, over 1609859.10 frames. ], batch size: 34, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:01,118 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37538.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:14:19,383 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37563.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:14:27,418 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.15 vs. limit=5.0 +2023-02-06 01:14:33,081 INFO [train.py:901] (1/4) Epoch 5, batch 5250, loss[loss=0.2951, simple_loss=0.3495, pruned_loss=0.1204, over 7792.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3502, pruned_loss=0.1149, over 1612732.15 frames. ], batch size: 19, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:45,236 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 01:15:03,494 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 3.205e+02 3.781e+02 5.298e+02 9.083e+02, threshold=7.562e+02, percent-clipped=4.0 +2023-02-06 01:15:05,549 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:07,537 INFO [train.py:901] (1/4) Epoch 5, batch 5300, loss[loss=0.2868, simple_loss=0.3464, pruned_loss=0.1136, over 8099.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3505, pruned_loss=0.1149, over 1617389.89 frames. ], batch size: 23, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:15,192 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:28,082 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3635, 1.6865, 2.8652, 1.0883, 2.0858, 1.6126, 1.4619, 1.6505], + device='cuda:1'), covar=tensor([0.1408, 0.1633, 0.0558, 0.2920, 0.1171, 0.2162, 0.1360, 0.1798], + device='cuda:1'), in_proj_covar=tensor([0.0459, 0.0438, 0.0506, 0.0530, 0.0561, 0.0504, 0.0440, 0.0576], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:15:32,586 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:42,454 INFO [train.py:901] (1/4) Epoch 5, batch 5350, loss[loss=0.3417, simple_loss=0.3874, pruned_loss=0.148, over 6805.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.353, pruned_loss=0.1162, over 1620855.40 frames. ], batch size: 71, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:47,813 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:05,041 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:11,801 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:12,347 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 3.472e+02 4.206e+02 5.536e+02 1.524e+03, threshold=8.412e+02, percent-clipped=7.0 +2023-02-06 01:16:17,026 INFO [train.py:901] (1/4) Epoch 5, batch 5400, loss[loss=0.2352, simple_loss=0.2966, pruned_loss=0.08686, over 7804.00 frames. ], tot_loss[loss=0.2932, simple_loss=0.3534, pruned_loss=0.1166, over 1620048.64 frames. ], batch size: 19, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:16:17,260 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8955, 2.1367, 4.1062, 1.2619, 2.9111, 2.2861, 1.7336, 2.4980], + device='cuda:1'), covar=tensor([0.1275, 0.1772, 0.0645, 0.3197, 0.1221, 0.2109, 0.1417, 0.2144], + device='cuda:1'), in_proj_covar=tensor([0.0461, 0.0440, 0.0515, 0.0535, 0.0570, 0.0509, 0.0442, 0.0580], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:16:19,896 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:25,204 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:36,760 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:40,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1454, 1.6327, 2.7435, 2.1482, 2.4107, 1.7783, 1.3751, 0.9416], + device='cuda:1'), covar=tensor([0.1769, 0.2113, 0.0458, 0.1066, 0.0844, 0.1016, 0.1082, 0.1990], + device='cuda:1'), in_proj_covar=tensor([0.0754, 0.0680, 0.0573, 0.0663, 0.0777, 0.0631, 0.0609, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:16:50,828 INFO [train.py:901] (1/4) Epoch 5, batch 5450, loss[loss=0.2967, simple_loss=0.3482, pruned_loss=0.1226, over 8087.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3521, pruned_loss=0.116, over 1615584.06 frames. ], batch size: 21, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:16:54,347 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9989, 1.3539, 3.3091, 1.1476, 2.1660, 3.6475, 3.6142, 3.0802], + device='cuda:1'), covar=tensor([0.1060, 0.1643, 0.0431, 0.2199, 0.0960, 0.0264, 0.0340, 0.0709], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0273, 0.0225, 0.0262, 0.0224, 0.0200, 0.0230, 0.0279], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:17:22,462 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 2.936e+02 3.882e+02 5.021e+02 1.156e+03, threshold=7.764e+02, percent-clipped=3.0 +2023-02-06 01:17:26,495 INFO [train.py:901] (1/4) Epoch 5, batch 5500, loss[loss=0.3106, simple_loss=0.3628, pruned_loss=0.1291, over 8327.00 frames. ], tot_loss[loss=0.2915, simple_loss=0.3514, pruned_loss=0.1158, over 1617304.87 frames. ], batch size: 26, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:30,475 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 01:17:32,001 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:18:00,740 INFO [train.py:901] (1/4) Epoch 5, batch 5550, loss[loss=0.3148, simple_loss=0.3849, pruned_loss=0.1223, over 8198.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3526, pruned_loss=0.1158, over 1616894.89 frames. ], batch size: 23, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:32,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 3.124e+02 3.941e+02 5.093e+02 9.977e+02, threshold=7.882e+02, percent-clipped=4.0 +2023-02-06 01:18:36,200 INFO [train.py:901] (1/4) Epoch 5, batch 5600, loss[loss=0.2761, simple_loss=0.345, pruned_loss=0.1036, over 8187.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3503, pruned_loss=0.1148, over 1613186.20 frames. ], batch size: 23, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:19:01,903 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5297, 4.3970, 3.9953, 2.3803, 3.9403, 4.0637, 4.1333, 3.6439], + device='cuda:1'), covar=tensor([0.0895, 0.0605, 0.0858, 0.3801, 0.0754, 0.0763, 0.1124, 0.0825], + device='cuda:1'), in_proj_covar=tensor([0.0401, 0.0290, 0.0316, 0.0405, 0.0323, 0.0270, 0.0299, 0.0251], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:19:04,040 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5839, 3.0500, 3.0195, 2.0772, 1.6112, 3.1086, 0.5303, 1.8793], + device='cuda:1'), covar=tensor([0.4361, 0.1831, 0.0982, 0.4012, 0.6655, 0.0671, 0.6758, 0.2401], + device='cuda:1'), in_proj_covar=tensor([0.0127, 0.0121, 0.0079, 0.0172, 0.0208, 0.0079, 0.0146, 0.0124], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:19:10,753 INFO [train.py:901] (1/4) Epoch 5, batch 5650, loss[loss=0.2689, simple_loss=0.3345, pruned_loss=0.1017, over 7963.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.3503, pruned_loss=0.1147, over 1615469.98 frames. ], batch size: 21, lr: 1.47e-02, grad_scale: 4.0 +2023-02-06 01:19:20,216 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37997.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:23,907 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:34,396 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 01:19:41,409 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:42,540 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 3.005e+02 3.717e+02 4.758e+02 1.120e+03, threshold=7.434e+02, percent-clipped=3.0 +2023-02-06 01:19:45,907 INFO [train.py:901] (1/4) Epoch 5, batch 5700, loss[loss=0.3328, simple_loss=0.3852, pruned_loss=0.1402, over 8685.00 frames. ], tot_loss[loss=0.291, simple_loss=0.3511, pruned_loss=0.1155, over 1614634.39 frames. ], batch size: 34, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:19:55,551 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4025, 1.7045, 2.8403, 1.1119, 2.0626, 1.7906, 1.3327, 1.7540], + device='cuda:1'), covar=tensor([0.1521, 0.1764, 0.0672, 0.3194, 0.1291, 0.2279, 0.1664, 0.1735], + device='cuda:1'), in_proj_covar=tensor([0.0466, 0.0448, 0.0530, 0.0537, 0.0577, 0.0518, 0.0448, 0.0586], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:20:07,611 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0066, 2.2799, 2.8899, 0.9299, 2.9884, 1.9318, 1.3132, 1.5912], + device='cuda:1'), covar=tensor([0.0286, 0.0136, 0.0116, 0.0276, 0.0178, 0.0306, 0.0369, 0.0193], + device='cuda:1'), in_proj_covar=tensor([0.0316, 0.0224, 0.0183, 0.0268, 0.0214, 0.0360, 0.0281, 0.0262], + device='cuda:1'), out_proj_covar=tensor([1.1335e-04, 7.7842e-05, 6.2803e-05, 9.3467e-05, 7.6265e-05, 1.3717e-04, + 1.0081e-04, 9.1940e-05], device='cuda:1') +2023-02-06 01:20:20,728 INFO [train.py:901] (1/4) Epoch 5, batch 5750, loss[loss=0.2211, simple_loss=0.2983, pruned_loss=0.07193, over 7935.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3515, pruned_loss=0.1156, over 1612192.81 frames. ], batch size: 20, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:30,232 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:37,218 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 01:20:46,793 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:50,583 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.053e+02 3.876e+02 4.925e+02 1.023e+03, threshold=7.752e+02, percent-clipped=4.0 +2023-02-06 01:20:54,530 INFO [train.py:901] (1/4) Epoch 5, batch 5800, loss[loss=0.3103, simple_loss=0.3732, pruned_loss=0.1237, over 8200.00 frames. ], tot_loss[loss=0.2918, simple_loss=0.3524, pruned_loss=0.1156, over 1614187.60 frames. ], batch size: 23, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:59,287 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2123, 1.4540, 4.0611, 1.8231, 2.4428, 4.6068, 4.5838, 4.0864], + device='cuda:1'), covar=tensor([0.1057, 0.1545, 0.0292, 0.1808, 0.0819, 0.0199, 0.0261, 0.0501], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0269, 0.0225, 0.0263, 0.0224, 0.0200, 0.0232, 0.0277], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:21:25,613 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3276, 1.6320, 1.6654, 1.2869, 0.8800, 1.6708, 0.1102, 1.1107], + device='cuda:1'), covar=tensor([0.3682, 0.1970, 0.1066, 0.2061, 0.5300, 0.1112, 0.5127, 0.2090], + device='cuda:1'), in_proj_covar=tensor([0.0130, 0.0121, 0.0081, 0.0169, 0.0210, 0.0081, 0.0145, 0.0125], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:21:26,249 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8828, 1.2000, 1.4163, 1.0654, 1.1072, 1.3216, 1.5029, 1.5003], + device='cuda:1'), covar=tensor([0.0685, 0.1459, 0.2130, 0.1696, 0.0673, 0.1836, 0.0848, 0.0629], + device='cuda:1'), in_proj_covar=tensor([0.0135, 0.0178, 0.0218, 0.0182, 0.0130, 0.0189, 0.0144, 0.0153], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 01:21:29,423 INFO [train.py:901] (1/4) Epoch 5, batch 5850, loss[loss=0.2592, simple_loss=0.3316, pruned_loss=0.09338, over 8257.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3517, pruned_loss=0.1148, over 1612223.37 frames. ], batch size: 24, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:01,295 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.005e+02 3.016e+02 3.759e+02 4.889e+02 1.185e+03, threshold=7.518e+02, percent-clipped=2.0 +2023-02-06 01:22:04,825 INFO [train.py:901] (1/4) Epoch 5, batch 5900, loss[loss=0.2834, simple_loss=0.3591, pruned_loss=0.1039, over 8247.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3508, pruned_loss=0.1139, over 1612957.78 frames. ], batch size: 24, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:25,289 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5979, 4.6196, 3.9406, 1.9169, 4.0126, 3.9781, 4.2545, 3.6383], + device='cuda:1'), covar=tensor([0.0772, 0.0530, 0.1031, 0.4718, 0.0870, 0.0924, 0.1090, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0285, 0.0319, 0.0404, 0.0314, 0.0268, 0.0298, 0.0251], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:22:40,888 INFO [train.py:901] (1/4) Epoch 5, batch 5950, loss[loss=0.3138, simple_loss=0.3736, pruned_loss=0.127, over 8547.00 frames. ], tot_loss[loss=0.2904, simple_loss=0.3518, pruned_loss=0.1145, over 1613653.82 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:23:12,028 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.191e+02 3.790e+02 5.332e+02 1.075e+03, threshold=7.580e+02, percent-clipped=7.0 +2023-02-06 01:23:15,487 INFO [train.py:901] (1/4) Epoch 5, batch 6000, loss[loss=0.2611, simple_loss=0.3224, pruned_loss=0.09988, over 8239.00 frames. ], tot_loss[loss=0.2902, simple_loss=0.3514, pruned_loss=0.1145, over 1613781.17 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:23:15,487 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 01:23:28,278 INFO [train.py:935] (1/4) Epoch 5, validation: loss=0.2196, simple_loss=0.3162, pruned_loss=0.06146, over 944034.00 frames. +2023-02-06 01:23:28,279 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-06 01:23:33,799 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:23:58,476 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38378.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:23:58,573 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4717, 1.7854, 3.0798, 1.0140, 2.3020, 1.7680, 1.5088, 1.8062], + device='cuda:1'), covar=tensor([0.1418, 0.1665, 0.0628, 0.3140, 0.1291, 0.2264, 0.1378, 0.2140], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0445, 0.0523, 0.0536, 0.0578, 0.0519, 0.0443, 0.0594], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:24:01,364 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 01:24:01,709 INFO [train.py:901] (1/4) Epoch 5, batch 6050, loss[loss=0.2522, simple_loss=0.3353, pruned_loss=0.08457, over 8481.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3514, pruned_loss=0.1141, over 1615432.89 frames. ], batch size: 27, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:33,765 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 3.108e+02 3.868e+02 4.827e+02 8.119e+02, threshold=7.737e+02, percent-clipped=1.0 +2023-02-06 01:24:37,063 INFO [train.py:901] (1/4) Epoch 5, batch 6100, loss[loss=0.2681, simple_loss=0.3354, pruned_loss=0.1004, over 8030.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3512, pruned_loss=0.1141, over 1615452.54 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:53,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:25:09,663 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 01:25:10,932 INFO [train.py:901] (1/4) Epoch 5, batch 6150, loss[loss=0.2583, simple_loss=0.3188, pruned_loss=0.09888, over 7929.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3502, pruned_loss=0.1136, over 1611638.73 frames. ], batch size: 20, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:42,288 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 3.089e+02 4.008e+02 5.119e+02 1.011e+03, threshold=8.016e+02, percent-clipped=7.0 +2023-02-06 01:25:45,625 INFO [train.py:901] (1/4) Epoch 5, batch 6200, loss[loss=0.3564, simple_loss=0.3947, pruned_loss=0.1591, over 8520.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3486, pruned_loss=0.1135, over 1607090.10 frames. ], batch size: 26, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:45,970 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.80 vs. limit=5.0 +2023-02-06 01:26:20,120 INFO [train.py:901] (1/4) Epoch 5, batch 6250, loss[loss=0.3296, simple_loss=0.3911, pruned_loss=0.1341, over 8507.00 frames. ], tot_loss[loss=0.287, simple_loss=0.348, pruned_loss=0.113, over 1607273.19 frames. ], batch size: 26, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:26:51,177 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.239e+02 3.994e+02 4.997e+02 1.061e+03, threshold=7.988e+02, percent-clipped=3.0 +2023-02-06 01:26:54,599 INFO [train.py:901] (1/4) Epoch 5, batch 6300, loss[loss=0.335, simple_loss=0.3889, pruned_loss=0.1406, over 8255.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3482, pruned_loss=0.1133, over 1606443.60 frames. ], batch size: 24, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:29,613 INFO [train.py:901] (1/4) Epoch 5, batch 6350, loss[loss=0.2823, simple_loss=0.3389, pruned_loss=0.1129, over 7533.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3462, pruned_loss=0.1122, over 1600498.30 frames. ], batch size: 18, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:49,913 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38712.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:27:56,421 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:00,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.862e+02 3.826e+02 4.732e+02 1.596e+03, threshold=7.652e+02, percent-clipped=5.0 +2023-02-06 01:28:03,602 INFO [train.py:901] (1/4) Epoch 5, batch 6400, loss[loss=0.2555, simple_loss=0.3169, pruned_loss=0.09708, over 7659.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3467, pruned_loss=0.1116, over 1606835.37 frames. ], batch size: 19, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:06,533 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:12,650 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 01:28:38,882 INFO [train.py:901] (1/4) Epoch 5, batch 6450, loss[loss=0.2853, simple_loss=0.3485, pruned_loss=0.111, over 8453.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3462, pruned_loss=0.1112, over 1606896.47 frames. ], batch size: 27, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:54,128 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5489, 1.7374, 2.7666, 1.2883, 2.1667, 1.7783, 1.6185, 1.9775], + device='cuda:1'), covar=tensor([0.1086, 0.1471, 0.0499, 0.2378, 0.1034, 0.1704, 0.1142, 0.1476], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0454, 0.0524, 0.0541, 0.0578, 0.0529, 0.0450, 0.0595], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:29:09,808 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.470e+02 3.536e+02 4.141e+02 5.010e+02 9.096e+02, threshold=8.281e+02, percent-clipped=4.0 +2023-02-06 01:29:13,125 INFO [train.py:901] (1/4) Epoch 5, batch 6500, loss[loss=0.2782, simple_loss=0.3459, pruned_loss=0.1053, over 8135.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3472, pruned_loss=0.112, over 1605443.04 frames. ], batch size: 22, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:16,032 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:18,637 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:48,026 INFO [train.py:901] (1/4) Epoch 5, batch 6550, loss[loss=0.3243, simple_loss=0.3781, pruned_loss=0.1352, over 8566.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3469, pruned_loss=0.1118, over 1602904.06 frames. ], batch size: 49, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:19,275 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.239e+02 3.737e+02 4.952e+02 1.438e+03, threshold=7.474e+02, percent-clipped=4.0 +2023-02-06 01:30:22,018 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 01:30:22,677 INFO [train.py:901] (1/4) Epoch 5, batch 6600, loss[loss=0.248, simple_loss=0.3119, pruned_loss=0.09205, over 7799.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3477, pruned_loss=0.1126, over 1606366.61 frames. ], batch size: 20, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:39,812 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 01:30:41,192 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6674, 4.6633, 4.1408, 1.8361, 4.0879, 4.1757, 4.2840, 3.8642], + device='cuda:1'), covar=tensor([0.0766, 0.0553, 0.1138, 0.5246, 0.0789, 0.0601, 0.1211, 0.0735], + device='cuda:1'), in_proj_covar=tensor([0.0410, 0.0300, 0.0327, 0.0414, 0.0321, 0.0280, 0.0305, 0.0256], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:30:58,095 INFO [train.py:901] (1/4) Epoch 5, batch 6650, loss[loss=0.2766, simple_loss=0.3476, pruned_loss=0.1028, over 8502.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.348, pruned_loss=0.1128, over 1608337.82 frames. ], batch size: 26, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:08,532 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 01:31:10,555 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 01:31:29,862 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.068e+02 3.660e+02 4.252e+02 1.265e+03, threshold=7.321e+02, percent-clipped=3.0 +2023-02-06 01:31:33,329 INFO [train.py:901] (1/4) Epoch 5, batch 6700, loss[loss=0.282, simple_loss=0.3559, pruned_loss=0.1041, over 8480.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3482, pruned_loss=0.1126, over 1610436.77 frames. ], batch size: 25, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:56,923 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 01:32:08,098 INFO [train.py:901] (1/4) Epoch 5, batch 6750, loss[loss=0.2627, simple_loss=0.3333, pruned_loss=0.09601, over 8105.00 frames. ], tot_loss[loss=0.2854, simple_loss=0.3477, pruned_loss=0.1115, over 1613596.92 frames. ], batch size: 23, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:15,834 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:33,090 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:40,327 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.802e+02 3.437e+02 4.300e+02 8.945e+02, threshold=6.874e+02, percent-clipped=2.0 +2023-02-06 01:32:43,709 INFO [train.py:901] (1/4) Epoch 5, batch 6800, loss[loss=0.2272, simple_loss=0.2935, pruned_loss=0.08045, over 7698.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3462, pruned_loss=0.111, over 1605710.65 frames. ], batch size: 18, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:32:54,744 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 01:33:04,419 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3909, 2.8995, 2.0380, 2.2742, 2.3359, 1.8264, 2.3023, 2.2878], + device='cuda:1'), covar=tensor([0.1350, 0.0219, 0.0835, 0.0640, 0.0612, 0.1074, 0.0733, 0.0965], + device='cuda:1'), in_proj_covar=tensor([0.0341, 0.0242, 0.0303, 0.0300, 0.0315, 0.0312, 0.0334, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 01:33:10,331 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8484, 5.8448, 5.1795, 2.0401, 5.2037, 5.4688, 5.4850, 5.1800], + device='cuda:1'), covar=tensor([0.0496, 0.0400, 0.0734, 0.4494, 0.0538, 0.0594, 0.0845, 0.0537], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0292, 0.0320, 0.0404, 0.0311, 0.0280, 0.0304, 0.0252], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:33:17,612 INFO [train.py:901] (1/4) Epoch 5, batch 6850, loss[loss=0.2569, simple_loss=0.3105, pruned_loss=0.1017, over 7535.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3466, pruned_loss=0.1115, over 1608417.86 frames. ], batch size: 18, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:33:19,093 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:31,228 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:43,502 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 01:33:48,721 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 3.284e+02 3.960e+02 5.468e+02 1.321e+03, threshold=7.919e+02, percent-clipped=11.0 +2023-02-06 01:33:52,161 INFO [train.py:901] (1/4) Epoch 5, batch 6900, loss[loss=0.3038, simple_loss=0.3638, pruned_loss=0.1219, over 8073.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3477, pruned_loss=0.1119, over 1610867.45 frames. ], batch size: 21, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:26,649 INFO [train.py:901] (1/4) Epoch 5, batch 6950, loss[loss=0.2547, simple_loss=0.3356, pruned_loss=0.08694, over 8461.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3482, pruned_loss=0.1123, over 1611790.13 frames. ], batch size: 25, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:38,150 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:39,401 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:49,528 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 01:34:58,303 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.231e+02 3.801e+02 5.196e+02 1.038e+03, threshold=7.603e+02, percent-clipped=4.0 +2023-02-06 01:35:01,665 INFO [train.py:901] (1/4) Epoch 5, batch 7000, loss[loss=0.2455, simple_loss=0.3148, pruned_loss=0.08808, over 8091.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3474, pruned_loss=0.1113, over 1614099.48 frames. ], batch size: 21, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:06,877 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 01:35:34,736 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5074, 2.9094, 3.0922, 1.7544, 1.5423, 2.7769, 0.5486, 1.8616], + device='cuda:1'), covar=tensor([0.4420, 0.1674, 0.0718, 0.3500, 0.5483, 0.0843, 0.5902, 0.2435], + device='cuda:1'), in_proj_covar=tensor([0.0133, 0.0128, 0.0082, 0.0175, 0.0217, 0.0082, 0.0143, 0.0130], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:35:35,823 INFO [train.py:901] (1/4) Epoch 5, batch 7050, loss[loss=0.2413, simple_loss=0.3049, pruned_loss=0.08887, over 7247.00 frames. ], tot_loss[loss=0.284, simple_loss=0.3466, pruned_loss=0.1107, over 1614672.79 frames. ], batch size: 16, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:40,290 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 01:36:04,433 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4085, 1.8578, 3.1558, 1.1104, 2.2372, 1.8449, 1.4896, 1.8704], + device='cuda:1'), covar=tensor([0.1533, 0.1788, 0.0543, 0.3172, 0.1214, 0.2257, 0.1517, 0.2161], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0449, 0.0514, 0.0522, 0.0564, 0.0505, 0.0440, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:36:06,897 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 2.867e+02 3.538e+02 4.706e+02 1.662e+03, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 01:36:10,287 INFO [train.py:901] (1/4) Epoch 5, batch 7100, loss[loss=0.2581, simple_loss=0.3349, pruned_loss=0.09063, over 8145.00 frames. ], tot_loss[loss=0.284, simple_loss=0.3469, pruned_loss=0.1106, over 1617120.76 frames. ], batch size: 22, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:10,520 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0520, 2.5508, 3.2013, 1.1135, 2.9130, 1.9039, 1.3748, 1.7928], + device='cuda:1'), covar=tensor([0.0306, 0.0133, 0.0097, 0.0273, 0.0240, 0.0363, 0.0377, 0.0191], + device='cuda:1'), in_proj_covar=tensor([0.0323, 0.0229, 0.0195, 0.0277, 0.0224, 0.0374, 0.0291, 0.0270], + device='cuda:1'), out_proj_covar=tensor([1.1320e-04, 7.8686e-05, 6.6479e-05, 9.5059e-05, 7.8630e-05, 1.4069e-04, + 1.0271e-04, 9.3335e-05], device='cuda:1') +2023-02-06 01:36:44,804 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:36:46,001 INFO [train.py:901] (1/4) Epoch 5, batch 7150, loss[loss=0.307, simple_loss=0.3689, pruned_loss=0.1225, over 8726.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3473, pruned_loss=0.1106, over 1617978.95 frames. ], batch size: 34, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:50,219 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0332, 1.3013, 4.1777, 1.5731, 3.5797, 3.4149, 3.7319, 3.6637], + device='cuda:1'), covar=tensor([0.0441, 0.3799, 0.0479, 0.2637, 0.1200, 0.0693, 0.0509, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0500, 0.0439, 0.0433, 0.0502, 0.0419, 0.0413, 0.0466], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 01:37:17,188 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.977e+02 2.912e+02 3.907e+02 4.774e+02 1.202e+03, threshold=7.813e+02, percent-clipped=7.0 +2023-02-06 01:37:18,078 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6495, 1.6222, 4.7946, 1.7027, 4.1797, 3.9700, 4.3308, 4.1751], + device='cuda:1'), covar=tensor([0.0332, 0.3125, 0.0296, 0.2350, 0.0975, 0.0474, 0.0364, 0.0460], + device='cuda:1'), in_proj_covar=tensor([0.0342, 0.0492, 0.0435, 0.0431, 0.0497, 0.0414, 0.0408, 0.0462], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 01:37:20,770 INFO [train.py:901] (1/4) Epoch 5, batch 7200, loss[loss=0.2479, simple_loss=0.3228, pruned_loss=0.08647, over 8130.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3491, pruned_loss=0.1115, over 1622025.62 frames. ], batch size: 22, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:21,582 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5540, 4.5235, 4.0784, 1.8527, 3.9734, 4.0861, 4.2149, 3.7080], + device='cuda:1'), covar=tensor([0.0721, 0.0566, 0.0881, 0.4503, 0.0630, 0.0610, 0.1170, 0.0713], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0288, 0.0317, 0.0403, 0.0306, 0.0275, 0.0301, 0.0249], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:37:21,686 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4818, 1.7722, 2.1636, 0.9972, 2.1224, 1.4129, 0.6507, 1.6596], + device='cuda:1'), covar=tensor([0.0281, 0.0138, 0.0104, 0.0229, 0.0143, 0.0361, 0.0325, 0.0129], + device='cuda:1'), in_proj_covar=tensor([0.0320, 0.0226, 0.0192, 0.0274, 0.0222, 0.0370, 0.0289, 0.0267], + device='cuda:1'), out_proj_covar=tensor([1.1218e-04, 7.7630e-05, 6.5339e-05, 9.3752e-05, 7.7743e-05, 1.3891e-04, + 1.0154e-04, 9.2352e-05], device='cuda:1') +2023-02-06 01:37:30,602 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:37,515 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:55,140 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:56,298 INFO [train.py:901] (1/4) Epoch 5, batch 7250, loss[loss=0.2585, simple_loss=0.3223, pruned_loss=0.09733, over 7644.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3479, pruned_loss=0.1109, over 1621789.10 frames. ], batch size: 19, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:27,208 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 2.862e+02 3.679e+02 5.056e+02 1.142e+03, threshold=7.358e+02, percent-clipped=8.0 +2023-02-06 01:38:30,500 INFO [train.py:901] (1/4) Epoch 5, batch 7300, loss[loss=0.2913, simple_loss=0.3438, pruned_loss=0.1194, over 7439.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3486, pruned_loss=0.1119, over 1620717.40 frames. ], batch size: 17, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:39,559 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:38:47,105 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39657.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:38:50,512 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39662.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:39:05,556 INFO [train.py:901] (1/4) Epoch 5, batch 7350, loss[loss=0.2685, simple_loss=0.3324, pruned_loss=0.1024, over 8357.00 frames. ], tot_loss[loss=0.2866, simple_loss=0.3484, pruned_loss=0.1125, over 1615177.25 frames. ], batch size: 24, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:33,447 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 01:39:36,159 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.846e+02 3.982e+02 4.999e+02 1.878e+03, threshold=7.964e+02, percent-clipped=11.0 +2023-02-06 01:39:39,633 INFO [train.py:901] (1/4) Epoch 5, batch 7400, loss[loss=0.3019, simple_loss=0.3648, pruned_loss=0.1194, over 8605.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3501, pruned_loss=0.1135, over 1619484.09 frames. ], batch size: 49, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:52,995 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 01:39:59,153 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:01,203 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1329, 2.4334, 3.0940, 1.2397, 3.2125, 2.1396, 1.5200, 1.7748], + device='cuda:1'), covar=tensor([0.0262, 0.0115, 0.0111, 0.0260, 0.0116, 0.0308, 0.0365, 0.0185], + device='cuda:1'), in_proj_covar=tensor([0.0316, 0.0223, 0.0192, 0.0276, 0.0221, 0.0370, 0.0289, 0.0266], + device='cuda:1'), out_proj_covar=tensor([1.1039e-04, 7.6072e-05, 6.5133e-05, 9.4686e-05, 7.7486e-05, 1.3836e-04, + 1.0171e-04, 9.1758e-05], device='cuda:1') +2023-02-06 01:40:05,265 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1139, 1.6622, 4.1853, 1.9392, 2.3620, 4.9006, 4.7569, 4.3198], + device='cuda:1'), covar=tensor([0.1078, 0.1462, 0.0308, 0.1913, 0.0784, 0.0189, 0.0299, 0.0475], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0263, 0.0222, 0.0259, 0.0220, 0.0198, 0.0230, 0.0272], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:40:13,746 INFO [train.py:901] (1/4) Epoch 5, batch 7450, loss[loss=0.2412, simple_loss=0.2963, pruned_loss=0.0931, over 7244.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.35, pruned_loss=0.1129, over 1620937.37 frames. ], batch size: 16, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:15,955 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:32,187 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 01:40:43,637 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:43,747 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1809, 1.6282, 1.6281, 1.5621, 1.6982, 1.6154, 2.5570, 2.2648], + device='cuda:1'), covar=tensor([0.0484, 0.1257, 0.1654, 0.1313, 0.0539, 0.1517, 0.0603, 0.0490], + device='cuda:1'), in_proj_covar=tensor([0.0132, 0.0177, 0.0217, 0.0181, 0.0128, 0.0187, 0.0142, 0.0150], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 01:40:45,625 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 3.116e+02 3.779e+02 4.440e+02 1.107e+03, threshold=7.558e+02, percent-clipped=3.0 +2023-02-06 01:40:49,055 INFO [train.py:901] (1/4) Epoch 5, batch 7500, loss[loss=0.3468, simple_loss=0.3904, pruned_loss=0.1516, over 7017.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.3499, pruned_loss=0.1133, over 1619170.92 frames. ], batch size: 71, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:49,187 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:21,798 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7710, 1.5537, 3.2468, 1.3164, 2.0802, 3.7596, 3.6634, 3.1478], + device='cuda:1'), covar=tensor([0.1085, 0.1400, 0.0355, 0.2055, 0.0837, 0.0216, 0.0344, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0265, 0.0224, 0.0263, 0.0224, 0.0202, 0.0234, 0.0278], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:41:22,994 INFO [train.py:901] (1/4) Epoch 5, batch 7550, loss[loss=0.2732, simple_loss=0.3456, pruned_loss=0.1004, over 8110.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.3495, pruned_loss=0.1128, over 1615553.61 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:41:48,014 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:54,548 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.144e+02 3.978e+02 5.379e+02 1.554e+03, threshold=7.957e+02, percent-clipped=6.0 +2023-02-06 01:41:57,850 INFO [train.py:901] (1/4) Epoch 5, batch 7600, loss[loss=0.3406, simple_loss=0.3826, pruned_loss=0.1493, over 7333.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3491, pruned_loss=0.1123, over 1616095.80 frames. ], batch size: 71, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:42:02,463 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:04,536 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39943.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:19,502 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4354, 2.1581, 1.5726, 1.8930, 1.8258, 1.4447, 1.7772, 1.8643], + device='cuda:1'), covar=tensor([0.0728, 0.0251, 0.0636, 0.0341, 0.0396, 0.0765, 0.0523, 0.0517], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0241, 0.0316, 0.0301, 0.0314, 0.0313, 0.0334, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 01:42:32,756 INFO [train.py:901] (1/4) Epoch 5, batch 7650, loss[loss=0.2915, simple_loss=0.3542, pruned_loss=0.1144, over 8369.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3495, pruned_loss=0.113, over 1617435.92 frames. ], batch size: 49, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:42:43,218 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 01:42:45,885 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40001.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:42:57,000 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:05,594 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.281e+02 3.028e+02 3.689e+02 4.703e+02 1.290e+03, threshold=7.379e+02, percent-clipped=1.0 +2023-02-06 01:43:08,877 INFO [train.py:901] (1/4) Epoch 5, batch 7700, loss[loss=0.3168, simple_loss=0.365, pruned_loss=0.1343, over 8339.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3496, pruned_loss=0.1134, over 1617231.71 frames. ], batch size: 48, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:15,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:44,266 INFO [train.py:901] (1/4) Epoch 5, batch 7750, loss[loss=0.3048, simple_loss=0.3727, pruned_loss=0.1185, over 8298.00 frames. ], tot_loss[loss=0.2891, simple_loss=0.3502, pruned_loss=0.114, over 1614899.80 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:44,280 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 01:43:50,924 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.02 vs. limit=5.0 +2023-02-06 01:44:07,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40116.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:44:15,425 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.050e+02 3.016e+02 3.638e+02 4.428e+02 8.911e+02, threshold=7.276e+02, percent-clipped=8.0 +2023-02-06 01:44:16,233 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:18,939 INFO [train.py:901] (1/4) Epoch 5, batch 7800, loss[loss=0.3457, simple_loss=0.3855, pruned_loss=0.153, over 7108.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3504, pruned_loss=0.1143, over 1614543.27 frames. ], batch size: 71, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:44:50,150 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:54,263 INFO [train.py:901] (1/4) Epoch 5, batch 7850, loss[loss=0.2709, simple_loss=0.3442, pruned_loss=0.09883, over 8466.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3492, pruned_loss=0.1136, over 1610040.76 frames. ], batch size: 29, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:45:03,211 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:14,679 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:20,120 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:24,748 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.315e+02 3.285e+02 3.978e+02 4.753e+02 1.108e+03, threshold=7.955e+02, percent-clipped=4.0 +2023-02-06 01:45:28,283 INFO [train.py:901] (1/4) Epoch 5, batch 7900, loss[loss=0.3093, simple_loss=0.38, pruned_loss=0.1193, over 8644.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3491, pruned_loss=0.1137, over 1607296.72 frames. ], batch size: 39, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:45:30,494 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40236.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:35,936 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:02,028 INFO [train.py:901] (1/4) Epoch 5, batch 7950, loss[loss=0.2927, simple_loss=0.343, pruned_loss=0.1212, over 7928.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3477, pruned_loss=0.1122, over 1608062.56 frames. ], batch size: 20, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:07,560 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:09,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:33,063 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.942e+02 3.003e+02 3.931e+02 4.743e+02 9.937e+02, threshold=7.862e+02, percent-clipped=4.0 +2023-02-06 01:46:36,390 INFO [train.py:901] (1/4) Epoch 5, batch 8000, loss[loss=0.2773, simple_loss=0.3514, pruned_loss=0.1016, over 8326.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3468, pruned_loss=0.1108, over 1613735.60 frames. ], batch size: 25, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:46,580 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:47:03,147 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40372.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:47:10,472 INFO [train.py:901] (1/4) Epoch 5, batch 8050, loss[loss=0.1969, simple_loss=0.2619, pruned_loss=0.06595, over 7528.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3448, pruned_loss=0.11, over 1613654.45 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 8.0 +2023-02-06 01:47:20,239 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40397.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:47:43,883 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 01:47:48,096 INFO [train.py:901] (1/4) Epoch 6, batch 0, loss[loss=0.3063, simple_loss=0.3685, pruned_loss=0.1221, over 8334.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3685, pruned_loss=0.1221, over 8334.00 frames. ], batch size: 26, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:47:48,097 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 01:47:59,060 INFO [train.py:935] (1/4) Epoch 6, validation: loss=0.2203, simple_loss=0.3165, pruned_loss=0.06206, over 944034.00 frames. +2023-02-06 01:47:59,061 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6555MB +2023-02-06 01:48:07,792 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.052e+02 3.992e+02 5.098e+02 1.227e+03, threshold=7.983e+02, percent-clipped=7.0 +2023-02-06 01:48:12,971 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4260, 1.6380, 2.8931, 1.1442, 2.0539, 1.7634, 1.3566, 1.6793], + device='cuda:1'), covar=tensor([0.1684, 0.1970, 0.0651, 0.3465, 0.1469, 0.2533, 0.1702, 0.2075], + device='cuda:1'), in_proj_covar=tensor([0.0477, 0.0458, 0.0521, 0.0548, 0.0594, 0.0524, 0.0461, 0.0596], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 01:48:13,431 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 01:48:34,118 INFO [train.py:901] (1/4) Epoch 6, batch 50, loss[loss=0.2705, simple_loss=0.341, pruned_loss=0.09995, over 8200.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3476, pruned_loss=0.1103, over 365447.67 frames. ], batch size: 23, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:48:48,506 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 01:48:57,363 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:04,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:08,815 INFO [train.py:901] (1/4) Epoch 6, batch 100, loss[loss=0.247, simple_loss=0.3137, pruned_loss=0.09014, over 7699.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.3484, pruned_loss=0.1111, over 642610.92 frames. ], batch size: 18, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:49:13,093 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 01:49:15,347 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:17,929 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.877e+02 3.627e+02 4.294e+02 7.601e+02, threshold=7.253e+02, percent-clipped=0.0 +2023-02-06 01:49:31,529 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:37,661 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:44,253 INFO [train.py:901] (1/4) Epoch 6, batch 150, loss[loss=0.2605, simple_loss=0.3393, pruned_loss=0.09085, over 8547.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3487, pruned_loss=0.1118, over 856977.01 frames. ], batch size: 31, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:49:49,721 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:54,329 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:19,249 INFO [train.py:901] (1/4) Epoch 6, batch 200, loss[loss=0.3075, simple_loss=0.3807, pruned_loss=0.1171, over 8510.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3498, pruned_loss=0.112, over 1029954.87 frames. ], batch size: 28, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:25,196 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 01:50:28,760 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 3.079e+02 3.898e+02 5.213e+02 9.157e+02, threshold=7.795e+02, percent-clipped=3.0 +2023-02-06 01:50:32,270 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:52,862 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8607, 1.9658, 2.1530, 1.7686, 1.2163, 2.3267, 0.4484, 1.2280], + device='cuda:1'), covar=tensor([0.3204, 0.1696, 0.0991, 0.2287, 0.5399, 0.0578, 0.5363, 0.2549], + device='cuda:1'), in_proj_covar=tensor([0.0134, 0.0129, 0.0081, 0.0175, 0.0215, 0.0083, 0.0145, 0.0134], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:50:54,058 INFO [train.py:901] (1/4) Epoch 6, batch 250, loss[loss=0.2388, simple_loss=0.308, pruned_loss=0.08479, over 7699.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3492, pruned_loss=0.1122, over 1164529.09 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:56,282 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:58,372 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:04,072 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 01:51:12,281 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 01:51:13,018 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:15,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:29,232 INFO [train.py:901] (1/4) Epoch 6, batch 300, loss[loss=0.2225, simple_loss=0.2938, pruned_loss=0.07564, over 7532.00 frames. ], tot_loss[loss=0.2866, simple_loss=0.3493, pruned_loss=0.1119, over 1264590.72 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:51:38,587 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 3.025e+02 3.729e+02 4.724e+02 9.863e+02, threshold=7.458e+02, percent-clipped=3.0 +2023-02-06 01:51:47,273 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6824, 5.6357, 4.8353, 2.1056, 5.0045, 5.4384, 5.4263, 4.6764], + device='cuda:1'), covar=tensor([0.0575, 0.0427, 0.0839, 0.4547, 0.0572, 0.0424, 0.0882, 0.0546], + device='cuda:1'), in_proj_covar=tensor([0.0406, 0.0288, 0.0325, 0.0412, 0.0314, 0.0278, 0.0301, 0.0255], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 01:51:52,716 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:03,714 INFO [train.py:901] (1/4) Epoch 6, batch 350, loss[loss=0.288, simple_loss=0.3576, pruned_loss=0.1092, over 8106.00 frames. ], tot_loss[loss=0.2841, simple_loss=0.3472, pruned_loss=0.1105, over 1347446.51 frames. ], batch size: 23, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:32,430 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:38,237 INFO [train.py:901] (1/4) Epoch 6, batch 400, loss[loss=0.2974, simple_loss=0.3597, pruned_loss=0.1175, over 8483.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3485, pruned_loss=0.1118, over 1405520.29 frames. ], batch size: 29, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:46,898 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.939e+02 3.080e+02 3.801e+02 5.022e+02 1.220e+03, threshold=7.601e+02, percent-clipped=4.0 +2023-02-06 01:53:03,002 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8936, 3.5722, 2.4649, 4.1734, 1.7545, 2.1517, 2.1518, 3.5281], + device='cuda:1'), covar=tensor([0.0799, 0.0747, 0.1196, 0.0213, 0.1661, 0.1718, 0.1681, 0.0864], + device='cuda:1'), in_proj_covar=tensor([0.0277, 0.0246, 0.0281, 0.0223, 0.0247, 0.0278, 0.0280, 0.0256], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 01:53:04,907 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:12,738 INFO [train.py:901] (1/4) Epoch 6, batch 450, loss[loss=0.2015, simple_loss=0.2755, pruned_loss=0.06373, over 7537.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3474, pruned_loss=0.1106, over 1455082.82 frames. ], batch size: 18, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:24,458 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:47,844 INFO [train.py:901] (1/4) Epoch 6, batch 500, loss[loss=0.2628, simple_loss=0.3359, pruned_loss=0.09484, over 8330.00 frames. ], tot_loss[loss=0.2822, simple_loss=0.346, pruned_loss=0.1092, over 1490852.11 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:49,261 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:56,741 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:57,239 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.058e+02 3.738e+02 5.288e+02 8.550e+02, threshold=7.476e+02, percent-clipped=3.0 +2023-02-06 01:54:12,304 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:13,627 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:20,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6853, 1.9750, 1.5619, 2.2638, 1.3780, 1.4055, 1.6096, 1.9698], + device='cuda:1'), covar=tensor([0.0955, 0.0830, 0.1146, 0.0589, 0.1172, 0.1451, 0.0991, 0.0700], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0252, 0.0283, 0.0226, 0.0250, 0.0282, 0.0284, 0.0263], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 01:54:22,940 INFO [train.py:901] (1/4) Epoch 6, batch 550, loss[loss=0.2583, simple_loss=0.3344, pruned_loss=0.09108, over 8358.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3463, pruned_loss=0.1097, over 1517199.04 frames. ], batch size: 24, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:54:25,209 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:30,598 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:49,827 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:51,394 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 01:54:53,853 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9348, 2.1521, 1.6217, 2.7307, 1.1403, 1.3812, 1.6712, 2.2538], + device='cuda:1'), covar=tensor([0.0986, 0.1162, 0.1500, 0.0425, 0.1601, 0.1905, 0.1323, 0.1015], + device='cuda:1'), in_proj_covar=tensor([0.0278, 0.0252, 0.0279, 0.0223, 0.0248, 0.0279, 0.0283, 0.0260], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 01:54:55,069 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:56,473 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:57,030 INFO [train.py:901] (1/4) Epoch 6, batch 600, loss[loss=0.2796, simple_loss=0.3494, pruned_loss=0.1049, over 8257.00 frames. ], tot_loss[loss=0.2791, simple_loss=0.3428, pruned_loss=0.1077, over 1532285.39 frames. ], batch size: 24, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:06,087 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.839e+02 3.515e+02 4.292e+02 8.268e+02, threshold=7.031e+02, percent-clipped=4.0 +2023-02-06 01:55:07,011 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:09,465 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 01:55:29,301 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:29,378 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:31,079 INFO [train.py:901] (1/4) Epoch 6, batch 650, loss[loss=0.288, simple_loss=0.3475, pruned_loss=0.1142, over 8027.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3421, pruned_loss=0.107, over 1548803.23 frames. ], batch size: 22, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:46,208 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:59,433 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 01:56:05,813 INFO [train.py:901] (1/4) Epoch 6, batch 700, loss[loss=0.3367, simple_loss=0.3959, pruned_loss=0.1388, over 8541.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3435, pruned_loss=0.1077, over 1568679.34 frames. ], batch size: 28, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:56:08,624 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,237 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,737 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.998e+02 3.776e+02 4.654e+02 1.221e+03, threshold=7.553e+02, percent-clipped=4.0 +2023-02-06 01:56:40,070 INFO [train.py:901] (1/4) Epoch 6, batch 750, loss[loss=0.3778, simple_loss=0.4172, pruned_loss=0.1692, over 8620.00 frames. ], tot_loss[loss=0.2796, simple_loss=0.3434, pruned_loss=0.1079, over 1579065.73 frames. ], batch size: 34, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:56:42,121 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4681, 1.4364, 2.9820, 1.1802, 2.1275, 3.2641, 3.2606, 2.7230], + device='cuda:1'), covar=tensor([0.1273, 0.1560, 0.0429, 0.2287, 0.0828, 0.0306, 0.0428, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0266, 0.0228, 0.0262, 0.0234, 0.0207, 0.0242, 0.0276], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 01:56:52,792 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 01:57:00,953 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 01:57:15,161 INFO [train.py:901] (1/4) Epoch 6, batch 800, loss[loss=0.2682, simple_loss=0.3364, pruned_loss=0.1, over 8338.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3439, pruned_loss=0.1084, over 1591366.92 frames. ], batch size: 49, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:16,748 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2582, 1.7166, 1.6390, 0.6566, 1.6869, 1.2088, 0.2984, 1.5193], + device='cuda:1'), covar=tensor([0.0187, 0.0100, 0.0099, 0.0179, 0.0129, 0.0314, 0.0276, 0.0093], + device='cuda:1'), in_proj_covar=tensor([0.0322, 0.0230, 0.0200, 0.0285, 0.0232, 0.0375, 0.0296, 0.0274], + device='cuda:1'), out_proj_covar=tensor([1.1118e-04, 7.6952e-05, 6.7294e-05, 9.6538e-05, 7.9966e-05, 1.3841e-04, + 1.0216e-04, 9.3394e-05], device='cuda:1') +2023-02-06 01:57:21,526 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:22,745 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:24,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.937e+02 3.578e+02 4.897e+02 8.076e+02, threshold=7.157e+02, percent-clipped=3.0 +2023-02-06 01:57:30,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6048, 2.1203, 2.1952, 0.9574, 2.1561, 1.3380, 0.6178, 1.7054], + device='cuda:1'), covar=tensor([0.0222, 0.0103, 0.0090, 0.0217, 0.0142, 0.0369, 0.0335, 0.0119], + device='cuda:1'), in_proj_covar=tensor([0.0323, 0.0231, 0.0202, 0.0286, 0.0233, 0.0377, 0.0296, 0.0274], + device='cuda:1'), out_proj_covar=tensor([1.1151e-04, 7.6999e-05, 6.7681e-05, 9.6953e-05, 8.0298e-05, 1.3913e-04, + 1.0220e-04, 9.3277e-05], device='cuda:1') +2023-02-06 01:57:38,347 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:46,849 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:49,531 INFO [train.py:901] (1/4) Epoch 6, batch 850, loss[loss=0.2498, simple_loss=0.3238, pruned_loss=0.08785, over 8327.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3443, pruned_loss=0.1087, over 1599265.36 frames. ], batch size: 25, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:23,893 INFO [train.py:901] (1/4) Epoch 6, batch 900, loss[loss=0.2803, simple_loss=0.3424, pruned_loss=0.1091, over 7662.00 frames. ], tot_loss[loss=0.2807, simple_loss=0.3442, pruned_loss=0.1087, over 1600869.94 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:33,481 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.871e+02 3.405e+02 4.321e+02 1.147e+03, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 01:58:42,516 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:53,862 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:58,558 INFO [train.py:901] (1/4) Epoch 6, batch 950, loss[loss=0.2992, simple_loss=0.3603, pruned_loss=0.119, over 8242.00 frames. ], tot_loss[loss=0.2786, simple_loss=0.3424, pruned_loss=0.1074, over 1600258.71 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:06,364 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:11,064 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:21,437 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 01:59:26,996 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:28,450 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:32,977 INFO [train.py:901] (1/4) Epoch 6, batch 1000, loss[loss=0.2638, simple_loss=0.336, pruned_loss=0.09581, over 8133.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3425, pruned_loss=0.1071, over 1606755.04 frames. ], batch size: 22, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:41,360 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.042e+02 3.293e+02 3.921e+02 5.074e+02 1.211e+03, threshold=7.843e+02, percent-clipped=6.0 +2023-02-06 01:59:55,295 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 02:00:06,251 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:07,521 INFO [train.py:901] (1/4) Epoch 6, batch 1050, loss[loss=0.2523, simple_loss=0.3173, pruned_loss=0.09367, over 7534.00 frames. ], tot_loss[loss=0.2788, simple_loss=0.3426, pruned_loss=0.1075, over 1603760.14 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:08,224 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 02:00:13,142 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:15,204 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4377, 1.2941, 1.4285, 1.2798, 0.8751, 1.3433, 1.1239, 1.0767], + device='cuda:1'), covar=tensor([0.0607, 0.1238, 0.1779, 0.1429, 0.0613, 0.1554, 0.0730, 0.0590], + device='cuda:1'), in_proj_covar=tensor([0.0131, 0.0176, 0.0219, 0.0182, 0.0129, 0.0187, 0.0140, 0.0150], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 02:00:42,275 INFO [train.py:901] (1/4) Epoch 6, batch 1100, loss[loss=0.2786, simple_loss=0.3444, pruned_loss=0.1065, over 8482.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3428, pruned_loss=0.1073, over 1611339.42 frames. ], batch size: 25, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:46,694 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:51,106 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.937e+02 3.488e+02 4.376e+02 9.981e+02, threshold=6.976e+02, percent-clipped=3.0 +2023-02-06 02:01:16,052 INFO [train.py:901] (1/4) Epoch 6, batch 1150, loss[loss=0.2084, simple_loss=0.2843, pruned_loss=0.06624, over 7781.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3412, pruned_loss=0.1062, over 1611518.93 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:18,802 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 02:01:25,448 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:28,764 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2916, 2.7048, 3.3222, 0.9358, 3.2333, 2.0564, 1.5221, 2.0740], + device='cuda:1'), covar=tensor([0.0256, 0.0125, 0.0086, 0.0278, 0.0157, 0.0289, 0.0362, 0.0161], + device='cuda:1'), in_proj_covar=tensor([0.0320, 0.0235, 0.0204, 0.0280, 0.0228, 0.0374, 0.0294, 0.0273], + device='cuda:1'), out_proj_covar=tensor([1.0964e-04, 7.8724e-05, 6.8079e-05, 9.4752e-05, 7.8025e-05, 1.3747e-04, + 1.0116e-04, 9.2538e-05], device='cuda:1') +2023-02-06 02:01:32,278 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 02:01:33,975 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0830, 2.2909, 1.7696, 2.9772, 1.4520, 1.5308, 1.7955, 2.3468], + device='cuda:1'), covar=tensor([0.0789, 0.0911, 0.1355, 0.0389, 0.1320, 0.1691, 0.1341, 0.0884], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0249, 0.0280, 0.0224, 0.0244, 0.0276, 0.0283, 0.0256], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 02:01:38,083 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:50,379 INFO [train.py:901] (1/4) Epoch 6, batch 1200, loss[loss=0.2678, simple_loss=0.3281, pruned_loss=0.1037, over 7548.00 frames. ], tot_loss[loss=0.2778, simple_loss=0.3422, pruned_loss=0.1067, over 1617068.27 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:55,793 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:00,253 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.965e+02 3.060e+02 3.864e+02 4.910e+02 1.275e+03, threshold=7.729e+02, percent-clipped=9.0 +2023-02-06 02:02:03,209 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:19,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:24,698 INFO [train.py:901] (1/4) Epoch 6, batch 1250, loss[loss=0.2144, simple_loss=0.2873, pruned_loss=0.07069, over 8098.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.342, pruned_loss=0.1065, over 1621347.41 frames. ], batch size: 21, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:02:29,699 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:59,807 INFO [train.py:901] (1/4) Epoch 6, batch 1300, loss[loss=0.266, simple_loss=0.328, pruned_loss=0.1021, over 8107.00 frames. ], tot_loss[loss=0.2777, simple_loss=0.3421, pruned_loss=0.1067, over 1616281.18 frames. ], batch size: 23, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:08,597 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.137e+02 4.028e+02 4.813e+02 9.668e+02, threshold=8.056e+02, percent-clipped=5.0 +2023-02-06 02:03:09,500 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:16,174 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.23 vs. limit=5.0 +2023-02-06 02:03:16,612 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0996, 2.0716, 1.9403, 1.8118, 1.3047, 1.8079, 2.0056, 1.9706], + device='cuda:1'), covar=tensor([0.0489, 0.1162, 0.1593, 0.1244, 0.0639, 0.1446, 0.0718, 0.0541], + device='cuda:1'), in_proj_covar=tensor([0.0129, 0.0174, 0.0214, 0.0179, 0.0128, 0.0184, 0.0139, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 02:03:18,929 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 02:03:27,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:34,638 INFO [train.py:901] (1/4) Epoch 6, batch 1350, loss[loss=0.2634, simple_loss=0.3352, pruned_loss=0.09579, over 8249.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3419, pruned_loss=0.1071, over 1613070.33 frames. ], batch size: 24, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:40,202 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5042, 4.4891, 4.1030, 1.8269, 3.9147, 4.0091, 4.1809, 3.4419], + device='cuda:1'), covar=tensor([0.0819, 0.0661, 0.0927, 0.4792, 0.0734, 0.0678, 0.1405, 0.0963], + device='cuda:1'), in_proj_covar=tensor([0.0401, 0.0290, 0.0319, 0.0405, 0.0310, 0.0279, 0.0301, 0.0255], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:03:42,952 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:00,419 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:08,478 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8978, 3.7843, 2.2800, 2.7611, 2.9093, 1.7787, 2.5354, 2.9143], + device='cuda:1'), covar=tensor([0.1327, 0.0299, 0.0865, 0.0668, 0.0659, 0.1170, 0.0914, 0.0844], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0237, 0.0310, 0.0294, 0.0311, 0.0311, 0.0335, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 02:04:09,616 INFO [train.py:901] (1/4) Epoch 6, batch 1400, loss[loss=0.2582, simple_loss=0.3303, pruned_loss=0.09307, over 8031.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3416, pruned_loss=0.1067, over 1616098.35 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:18,110 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.090e+02 3.079e+02 3.704e+02 4.589e+02 8.838e+02, threshold=7.407e+02, percent-clipped=2.0 +2023-02-06 02:04:22,366 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:39,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:44,581 INFO [train.py:901] (1/4) Epoch 6, batch 1450, loss[loss=0.268, simple_loss=0.3464, pruned_loss=0.0948, over 8349.00 frames. ], tot_loss[loss=0.2768, simple_loss=0.3408, pruned_loss=0.1064, over 1615397.51 frames. ], batch size: 24, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:47,870 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 02:05:02,018 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6099, 1.8693, 2.2907, 0.9724, 2.2975, 1.4660, 0.6385, 1.7025], + device='cuda:1'), covar=tensor([0.0208, 0.0132, 0.0100, 0.0203, 0.0139, 0.0345, 0.0284, 0.0119], + device='cuda:1'), in_proj_covar=tensor([0.0318, 0.0232, 0.0202, 0.0282, 0.0229, 0.0372, 0.0294, 0.0269], + device='cuda:1'), out_proj_covar=tensor([1.0907e-04, 7.7481e-05, 6.7521e-05, 9.4855e-05, 7.8162e-05, 1.3630e-04, + 1.0143e-04, 9.1382e-05], device='cuda:1') +2023-02-06 02:05:18,593 INFO [train.py:901] (1/4) Epoch 6, batch 1500, loss[loss=0.269, simple_loss=0.3313, pruned_loss=0.1034, over 7406.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3419, pruned_loss=0.1072, over 1615565.81 frames. ], batch size: 17, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:24,650 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:05:27,834 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 2.922e+02 3.542e+02 4.432e+02 1.007e+03, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 02:05:39,393 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8346, 2.1890, 1.6813, 2.7655, 1.2330, 1.4125, 1.5479, 2.1555], + device='cuda:1'), covar=tensor([0.1040, 0.1037, 0.1372, 0.0446, 0.1596, 0.1998, 0.1443, 0.1002], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0247, 0.0277, 0.0223, 0.0245, 0.0276, 0.0278, 0.0255], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 02:05:53,225 INFO [train.py:901] (1/4) Epoch 6, batch 1550, loss[loss=0.2771, simple_loss=0.3399, pruned_loss=0.1072, over 8075.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.3421, pruned_loss=0.1074, over 1615942.13 frames. ], batch size: 21, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:53,368 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41966.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:06:28,445 INFO [train.py:901] (1/4) Epoch 6, batch 1600, loss[loss=0.3102, simple_loss=0.3828, pruned_loss=0.1188, over 8468.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3419, pruned_loss=0.1071, over 1615151.77 frames. ], batch size: 25, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:06:28,510 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:06:37,881 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.135e+02 3.132e+02 3.836e+02 5.392e+02 3.005e+03, threshold=7.672e+02, percent-clipped=11.0 +2023-02-06 02:06:42,325 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.00 vs. limit=5.0 +2023-02-06 02:07:03,792 INFO [train.py:901] (1/4) Epoch 6, batch 1650, loss[loss=0.2943, simple_loss=0.3511, pruned_loss=0.1188, over 8457.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3409, pruned_loss=0.1058, over 1621178.61 frames. ], batch size: 27, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:39,088 INFO [train.py:901] (1/4) Epoch 6, batch 1700, loss[loss=0.3106, simple_loss=0.3729, pruned_loss=0.1242, over 7933.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3401, pruned_loss=0.1055, over 1615724.01 frames. ], batch size: 20, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:47,888 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.826e+02 3.670e+02 4.452e+02 1.049e+03, threshold=7.339e+02, percent-clipped=2.0 +2023-02-06 02:07:49,313 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:07:52,041 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0193, 1.3209, 4.2289, 1.4987, 3.6804, 3.4891, 3.8093, 3.6912], + device='cuda:1'), covar=tensor([0.0485, 0.3759, 0.0453, 0.2822, 0.1173, 0.0706, 0.0502, 0.0588], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0504, 0.0442, 0.0435, 0.0500, 0.0414, 0.0412, 0.0463], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:08:05,249 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4955, 1.5421, 1.5745, 1.4151, 0.9402, 1.7287, 0.1018, 0.9204], + device='cuda:1'), covar=tensor([0.2838, 0.2055, 0.0843, 0.1787, 0.5701, 0.0754, 0.4306, 0.2363], + device='cuda:1'), in_proj_covar=tensor([0.0131, 0.0132, 0.0082, 0.0179, 0.0216, 0.0083, 0.0145, 0.0135], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:08:14,479 INFO [train.py:901] (1/4) Epoch 6, batch 1750, loss[loss=0.2085, simple_loss=0.289, pruned_loss=0.06405, over 6383.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3412, pruned_loss=0.107, over 1613125.48 frames. ], batch size: 14, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:49,383 INFO [train.py:901] (1/4) Epoch 6, batch 1800, loss[loss=0.298, simple_loss=0.3571, pruned_loss=0.1195, over 8135.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3409, pruned_loss=0.1061, over 1613036.20 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:59,173 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 3.032e+02 3.540e+02 4.353e+02 2.015e+03, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 02:09:24,927 INFO [train.py:901] (1/4) Epoch 6, batch 1850, loss[loss=0.2923, simple_loss=0.3629, pruned_loss=0.1109, over 8334.00 frames. ], tot_loss[loss=0.2778, simple_loss=0.3411, pruned_loss=0.1072, over 1608781.78 frames. ], batch size: 49, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:09:26,411 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:09:55,347 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42310.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:09:59,286 INFO [train.py:901] (1/4) Epoch 6, batch 1900, loss[loss=0.2943, simple_loss=0.3566, pruned_loss=0.1161, over 8189.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3405, pruned_loss=0.1064, over 1610222.21 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:10:08,776 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.992e+02 2.715e+02 3.297e+02 4.142e+02 7.213e+02, threshold=6.594e+02, percent-clipped=2.0 +2023-02-06 02:10:23,963 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 02:10:34,055 INFO [train.py:901] (1/4) Epoch 6, batch 1950, loss[loss=0.2384, simple_loss=0.3106, pruned_loss=0.08315, over 8447.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3386, pruned_loss=0.1048, over 1614011.66 frames. ], batch size: 27, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:10:36,639 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 02:10:46,215 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:48,986 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42387.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:56,196 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 02:10:56,323 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:06,414 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:08,983 INFO [train.py:901] (1/4) Epoch 6, batch 2000, loss[loss=0.2732, simple_loss=0.3493, pruned_loss=0.09859, over 8499.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3378, pruned_loss=0.1041, over 1615000.12 frames. ], batch size: 28, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:11:15,185 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42425.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:11:18,300 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.766e+02 3.581e+02 4.303e+02 8.011e+02, threshold=7.162e+02, percent-clipped=3.0 +2023-02-06 02:11:19,861 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2184, 1.4915, 1.2475, 1.9565, 0.7764, 1.0720, 1.1202, 1.5071], + device='cuda:1'), covar=tensor([0.1208, 0.0984, 0.1568, 0.0605, 0.1527, 0.2052, 0.1230, 0.0878], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0251, 0.0277, 0.0222, 0.0245, 0.0277, 0.0279, 0.0254], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 02:11:21,925 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4526, 1.7690, 1.8603, 0.8554, 1.9726, 1.3433, 0.4818, 1.6822], + device='cuda:1'), covar=tensor([0.0194, 0.0128, 0.0127, 0.0228, 0.0133, 0.0403, 0.0332, 0.0112], + device='cuda:1'), in_proj_covar=tensor([0.0330, 0.0242, 0.0210, 0.0294, 0.0237, 0.0386, 0.0308, 0.0280], + device='cuda:1'), out_proj_covar=tensor([1.1273e-04, 8.0154e-05, 7.0017e-05, 9.8503e-05, 8.0558e-05, 1.4074e-04, + 1.0550e-04, 9.4484e-05], device='cuda:1') +2023-02-06 02:11:43,879 INFO [train.py:901] (1/4) Epoch 6, batch 2050, loss[loss=0.243, simple_loss=0.3181, pruned_loss=0.08392, over 8353.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3391, pruned_loss=0.1052, over 1612380.34 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:11:52,269 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-06 02:12:17,690 INFO [train.py:901] (1/4) Epoch 6, batch 2100, loss[loss=0.2857, simple_loss=0.3503, pruned_loss=0.1106, over 8359.00 frames. ], tot_loss[loss=0.2749, simple_loss=0.3395, pruned_loss=0.1052, over 1615646.65 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:23,854 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:12:24,530 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5626, 4.3475, 3.9875, 1.7089, 3.9743, 3.9785, 4.1944, 3.6379], + device='cuda:1'), covar=tensor([0.0736, 0.0550, 0.0948, 0.4574, 0.0694, 0.0804, 0.0998, 0.0893], + device='cuda:1'), in_proj_covar=tensor([0.0394, 0.0293, 0.0324, 0.0402, 0.0316, 0.0283, 0.0306, 0.0258], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:12:27,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.916e+02 3.481e+02 4.572e+02 1.310e+03, threshold=6.962e+02, percent-clipped=2.0 +2023-02-06 02:12:50,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1787, 1.0637, 1.0661, 1.1838, 0.8014, 1.2485, 0.1644, 0.9399], + device='cuda:1'), covar=tensor([0.2830, 0.2278, 0.1212, 0.1919, 0.5633, 0.0940, 0.4437, 0.2122], + device='cuda:1'), in_proj_covar=tensor([0.0134, 0.0133, 0.0085, 0.0180, 0.0221, 0.0084, 0.0142, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:12:52,386 INFO [train.py:901] (1/4) Epoch 6, batch 2150, loss[loss=0.281, simple_loss=0.3519, pruned_loss=0.105, over 8563.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3399, pruned_loss=0.1056, over 1615046.09 frames. ], batch size: 49, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:58,003 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0190, 1.5627, 2.3544, 1.9490, 2.0195, 1.8451, 1.4727, 0.6107], + device='cuda:1'), covar=tensor([0.1966, 0.2149, 0.0538, 0.1069, 0.0831, 0.1146, 0.1083, 0.2000], + device='cuda:1'), in_proj_covar=tensor([0.0777, 0.0715, 0.0619, 0.0714, 0.0803, 0.0661, 0.0623, 0.0655], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:13:27,037 INFO [train.py:901] (1/4) Epoch 6, batch 2200, loss[loss=0.3055, simple_loss=0.3705, pruned_loss=0.1202, over 8245.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3404, pruned_loss=0.1064, over 1612463.88 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:36,152 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 3.020e+02 3.729e+02 5.072e+02 1.122e+03, threshold=7.459e+02, percent-clipped=5.0 +2023-02-06 02:13:43,115 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:13:45,254 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 02:13:59,701 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:00,834 INFO [train.py:901] (1/4) Epoch 6, batch 2250, loss[loss=0.2403, simple_loss=0.3213, pruned_loss=0.07969, over 8471.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3405, pruned_loss=0.106, over 1613612.68 frames. ], batch size: 25, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:01,614 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:06,424 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8561, 1.5491, 3.2448, 1.3179, 2.1961, 3.6225, 3.5443, 3.0533], + device='cuda:1'), covar=tensor([0.0972, 0.1283, 0.0320, 0.1803, 0.0720, 0.0200, 0.0295, 0.0531], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0268, 0.0227, 0.0263, 0.0234, 0.0207, 0.0249, 0.0280], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:14:11,438 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 02:14:11,882 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42681.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:14:26,793 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 02:14:29,250 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42706.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:14:35,836 INFO [train.py:901] (1/4) Epoch 6, batch 2300, loss[loss=0.2706, simple_loss=0.3215, pruned_loss=0.1099, over 7798.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3411, pruned_loss=0.1067, over 1615868.85 frames. ], batch size: 19, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:45,247 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.977e+02 3.532e+02 4.435e+02 7.362e+02, threshold=7.063e+02, percent-clipped=0.0 +2023-02-06 02:14:53,243 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:15:11,259 INFO [train.py:901] (1/4) Epoch 6, batch 2350, loss[loss=0.3934, simple_loss=0.4259, pruned_loss=0.1805, over 6611.00 frames. ], tot_loss[loss=0.2765, simple_loss=0.3403, pruned_loss=0.1063, over 1610967.21 frames. ], batch size: 71, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:46,903 INFO [train.py:901] (1/4) Epoch 6, batch 2400, loss[loss=0.2851, simple_loss=0.3415, pruned_loss=0.1144, over 7784.00 frames. ], tot_loss[loss=0.2765, simple_loss=0.3404, pruned_loss=0.1063, over 1610348.63 frames. ], batch size: 19, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:56,300 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.099e+02 3.712e+02 4.452e+02 1.076e+03, threshold=7.425e+02, percent-clipped=4.0 +2023-02-06 02:16:14,313 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:20,855 INFO [train.py:901] (1/4) Epoch 6, batch 2450, loss[loss=0.2986, simple_loss=0.3658, pruned_loss=0.1157, over 8608.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3411, pruned_loss=0.1064, over 1613213.79 frames. ], batch size: 39, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:16:22,281 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:29,141 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:42,215 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:47,007 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9758, 1.8425, 3.2137, 2.6440, 2.5482, 1.7896, 1.4033, 1.5131], + device='cuda:1'), covar=tensor([0.3124, 0.3116, 0.0619, 0.1358, 0.1492, 0.1681, 0.1643, 0.2564], + device='cuda:1'), in_proj_covar=tensor([0.0788, 0.0721, 0.0620, 0.0720, 0.0806, 0.0664, 0.0628, 0.0656], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:16:54,606 INFO [train.py:901] (1/4) Epoch 6, batch 2500, loss[loss=0.2524, simple_loss=0.3311, pruned_loss=0.08685, over 8104.00 frames. ], tot_loss[loss=0.2765, simple_loss=0.3405, pruned_loss=0.1062, over 1613972.75 frames. ], batch size: 23, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:05,200 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 3.094e+02 4.004e+02 4.995e+02 1.056e+03, threshold=8.009e+02, percent-clipped=4.0 +2023-02-06 02:17:15,537 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4742, 1.5056, 1.6598, 1.3138, 0.9396, 1.7329, 0.0979, 0.9137], + device='cuda:1'), covar=tensor([0.2693, 0.2294, 0.0908, 0.2120, 0.5659, 0.0714, 0.4251, 0.2678], + device='cuda:1'), in_proj_covar=tensor([0.0136, 0.0136, 0.0085, 0.0185, 0.0223, 0.0086, 0.0144, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:17:15,681 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 02:17:25,672 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 02:17:29,433 INFO [train.py:901] (1/4) Epoch 6, batch 2550, loss[loss=0.2688, simple_loss=0.3448, pruned_loss=0.09637, over 8512.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3418, pruned_loss=0.1063, over 1617448.43 frames. ], batch size: 28, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:41,643 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:01,216 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:04,437 INFO [train.py:901] (1/4) Epoch 6, batch 2600, loss[loss=0.276, simple_loss=0.3505, pruned_loss=0.1007, over 8445.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3436, pruned_loss=0.1077, over 1618972.94 frames. ], batch size: 27, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:13,988 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.052e+02 3.779e+02 5.019e+02 1.784e+03, threshold=7.558e+02, percent-clipped=4.0 +2023-02-06 02:18:39,587 INFO [train.py:901] (1/4) Epoch 6, batch 2650, loss[loss=0.2611, simple_loss=0.3188, pruned_loss=0.1017, over 7212.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3432, pruned_loss=0.1067, over 1621764.92 frames. ], batch size: 16, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:47,154 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7165, 2.4962, 2.9271, 2.0103, 1.2711, 2.7986, 0.7220, 1.6378], + device='cuda:1'), covar=tensor([0.2862, 0.2271, 0.0798, 0.3144, 0.6429, 0.0559, 0.5718, 0.2694], + device='cuda:1'), in_proj_covar=tensor([0.0133, 0.0131, 0.0081, 0.0179, 0.0217, 0.0082, 0.0141, 0.0133], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:19:05,111 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1280, 1.0844, 1.1230, 1.0736, 0.7533, 1.2249, 0.0337, 0.7574], + device='cuda:1'), covar=tensor([0.2809, 0.2520, 0.1018, 0.1884, 0.5370, 0.0792, 0.4713, 0.2672], + device='cuda:1'), in_proj_covar=tensor([0.0134, 0.0133, 0.0083, 0.0182, 0.0219, 0.0084, 0.0144, 0.0136], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:19:11,158 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:14,342 INFO [train.py:901] (1/4) Epoch 6, batch 2700, loss[loss=0.2548, simple_loss=0.3144, pruned_loss=0.09761, over 7302.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.3444, pruned_loss=0.1078, over 1622036.20 frames. ], batch size: 16, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:19:15,202 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4343, 1.7855, 2.8314, 1.1531, 2.0602, 1.7283, 1.4900, 1.7450], + device='cuda:1'), covar=tensor([0.1573, 0.1954, 0.0679, 0.3606, 0.1395, 0.2556, 0.1590, 0.2114], + device='cuda:1'), in_proj_covar=tensor([0.0472, 0.0462, 0.0529, 0.0542, 0.0594, 0.0528, 0.0449, 0.0601], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 02:19:20,969 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:23,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.935e+02 3.532e+02 4.548e+02 1.003e+03, threshold=7.064e+02, percent-clipped=2.0 +2023-02-06 02:19:28,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:45,814 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 02:19:47,954 INFO [train.py:901] (1/4) Epoch 6, batch 2750, loss[loss=0.2918, simple_loss=0.367, pruned_loss=0.1084, over 8027.00 frames. ], tot_loss[loss=0.2779, simple_loss=0.3427, pruned_loss=0.1065, over 1616214.61 frames. ], batch size: 22, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:19:57,321 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5008, 1.9396, 2.1468, 1.0909, 2.2752, 1.4287, 0.8073, 1.7593], + device='cuda:1'), covar=tensor([0.0258, 0.0110, 0.0071, 0.0202, 0.0123, 0.0351, 0.0288, 0.0125], + device='cuda:1'), in_proj_covar=tensor([0.0321, 0.0235, 0.0206, 0.0288, 0.0227, 0.0381, 0.0298, 0.0273], + device='cuda:1'), out_proj_covar=tensor([1.0839e-04, 7.7382e-05, 6.8384e-05, 9.5718e-05, 7.6279e-05, 1.3795e-04, + 1.0173e-04, 9.1606e-05], device='cuda:1') +2023-02-06 02:20:02,613 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3123, 1.9539, 3.2599, 1.0262, 2.6964, 1.6066, 1.6270, 2.0995], + device='cuda:1'), covar=tensor([0.1911, 0.1958, 0.0811, 0.3787, 0.1420, 0.2912, 0.1770, 0.2452], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0460, 0.0526, 0.0535, 0.0589, 0.0523, 0.0444, 0.0590], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 02:20:22,671 INFO [train.py:901] (1/4) Epoch 6, batch 2800, loss[loss=0.2943, simple_loss=0.3663, pruned_loss=0.1111, over 8480.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3429, pruned_loss=0.1066, over 1616163.94 frames. ], batch size: 28, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:20:26,179 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:32,060 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.696e+02 3.315e+02 4.271e+02 8.534e+02, threshold=6.630e+02, percent-clipped=4.0 +2023-02-06 02:20:39,132 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:40,379 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:55,849 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:56,925 INFO [train.py:901] (1/4) Epoch 6, batch 2850, loss[loss=0.2828, simple_loss=0.3531, pruned_loss=0.1063, over 8359.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3409, pruned_loss=0.1047, over 1615947.91 frames. ], batch size: 24, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:11,145 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6376, 4.0352, 2.1910, 1.9841, 2.6694, 1.4683, 2.1388, 2.8587], + device='cuda:1'), covar=tensor([0.1713, 0.0346, 0.1083, 0.1008, 0.0831, 0.1603, 0.1420, 0.0844], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0237, 0.0312, 0.0304, 0.0317, 0.0313, 0.0340, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 02:21:31,689 INFO [train.py:901] (1/4) Epoch 6, batch 2900, loss[loss=0.2266, simple_loss=0.2993, pruned_loss=0.07702, over 8233.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3401, pruned_loss=0.1049, over 1613052.90 frames. ], batch size: 22, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:41,574 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.996e+02 3.885e+02 4.976e+02 9.964e+02, threshold=7.771e+02, percent-clipped=9.0 +2023-02-06 02:21:46,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:00,469 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:01,693 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 02:22:07,135 INFO [train.py:901] (1/4) Epoch 6, batch 2950, loss[loss=0.2612, simple_loss=0.3489, pruned_loss=0.08676, over 8316.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3417, pruned_loss=0.106, over 1616121.47 frames. ], batch size: 25, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:17,904 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:35,335 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:41,761 INFO [train.py:901] (1/4) Epoch 6, batch 3000, loss[loss=0.3135, simple_loss=0.3755, pruned_loss=0.1258, over 8564.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3413, pruned_loss=0.1058, over 1620685.57 frames. ], batch size: 34, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:41,762 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 02:22:53,878 INFO [train.py:935] (1/4) Epoch 6, validation: loss=0.2158, simple_loss=0.3124, pruned_loss=0.05962, over 944034.00 frames. +2023-02-06 02:22:53,879 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 02:23:03,877 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.288e+02 4.080e+02 5.339e+02 1.082e+03, threshold=8.161e+02, percent-clipped=5.0 +2023-02-06 02:23:20,244 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4297, 1.9259, 2.1153, 0.9446, 2.2492, 1.3988, 0.4799, 1.7287], + device='cuda:1'), covar=tensor([0.0251, 0.0111, 0.0110, 0.0198, 0.0116, 0.0333, 0.0322, 0.0110], + device='cuda:1'), in_proj_covar=tensor([0.0325, 0.0238, 0.0208, 0.0293, 0.0231, 0.0384, 0.0300, 0.0274], + device='cuda:1'), out_proj_covar=tensor([1.0977e-04, 7.8215e-05, 6.8408e-05, 9.7167e-05, 7.7805e-05, 1.3890e-04, + 1.0211e-04, 9.1620e-05], device='cuda:1') +2023-02-06 02:23:28,759 INFO [train.py:901] (1/4) Epoch 6, batch 3050, loss[loss=0.2896, simple_loss=0.3508, pruned_loss=0.1142, over 8677.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3411, pruned_loss=0.1059, over 1623149.18 frames. ], batch size: 34, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:03,332 INFO [train.py:901] (1/4) Epoch 6, batch 3100, loss[loss=0.2802, simple_loss=0.3323, pruned_loss=0.114, over 7980.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3404, pruned_loss=0.1056, over 1619870.11 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:12,759 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.871e+02 3.509e+02 4.582e+02 1.148e+03, threshold=7.017e+02, percent-clipped=4.0 +2023-02-06 02:24:14,186 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:24:36,351 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7482, 1.3143, 3.3479, 1.2062, 2.1382, 3.7106, 3.7033, 3.1305], + device='cuda:1'), covar=tensor([0.1143, 0.1587, 0.0352, 0.1989, 0.0896, 0.0221, 0.0392, 0.0653], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0274, 0.0228, 0.0265, 0.0244, 0.0212, 0.0252, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:24:38,268 INFO [train.py:901] (1/4) Epoch 6, batch 3150, loss[loss=0.3752, simple_loss=0.4011, pruned_loss=0.1747, over 6597.00 frames. ], tot_loss[loss=0.2761, simple_loss=0.3409, pruned_loss=0.1057, over 1615845.67 frames. ], batch size: 73, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:57,078 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:10,983 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:13,476 INFO [train.py:901] (1/4) Epoch 6, batch 3200, loss[loss=0.3202, simple_loss=0.3762, pruned_loss=0.1321, over 8328.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3412, pruned_loss=0.1056, over 1619157.02 frames. ], batch size: 26, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:14,376 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:23,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.828e+02 3.409e+02 4.222e+02 1.719e+03, threshold=6.818e+02, percent-clipped=4.0 +2023-02-06 02:25:28,578 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43637.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:49,155 INFO [train.py:901] (1/4) Epoch 6, batch 3250, loss[loss=0.4689, simple_loss=0.4618, pruned_loss=0.238, over 6916.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.3418, pruned_loss=0.1058, over 1621802.32 frames. ], batch size: 75, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:23,420 INFO [train.py:901] (1/4) Epoch 6, batch 3300, loss[loss=0.2374, simple_loss=0.3033, pruned_loss=0.08579, over 7971.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.34, pruned_loss=0.1051, over 1614566.89 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:33,006 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.968e+02 3.670e+02 5.054e+02 9.057e+02, threshold=7.341e+02, percent-clipped=6.0 +2023-02-06 02:26:57,497 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1651, 3.0811, 2.8810, 1.3841, 2.7913, 2.8503, 2.9413, 2.5807], + device='cuda:1'), covar=tensor([0.1159, 0.0799, 0.1124, 0.4728, 0.1024, 0.1042, 0.1468, 0.1131], + device='cuda:1'), in_proj_covar=tensor([0.0398, 0.0296, 0.0333, 0.0412, 0.0321, 0.0287, 0.0318, 0.0260], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:26:58,037 INFO [train.py:901] (1/4) Epoch 6, batch 3350, loss[loss=0.3038, simple_loss=0.3623, pruned_loss=0.1226, over 8242.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3414, pruned_loss=0.1057, over 1615990.21 frames. ], batch size: 24, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:27:25,489 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:27:33,326 INFO [train.py:901] (1/4) Epoch 6, batch 3400, loss[loss=0.3301, simple_loss=0.3946, pruned_loss=0.1328, over 8348.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3415, pruned_loss=0.1057, over 1613549.18 frames. ], batch size: 26, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:27:42,440 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.693e+02 3.397e+02 4.441e+02 9.371e+02, threshold=6.793e+02, percent-clipped=2.0 +2023-02-06 02:28:07,540 INFO [train.py:901] (1/4) Epoch 6, batch 3450, loss[loss=0.2739, simple_loss=0.3245, pruned_loss=0.1116, over 7820.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3414, pruned_loss=0.1056, over 1614208.45 frames. ], batch size: 20, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:14,344 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:42,237 INFO [train.py:901] (1/4) Epoch 6, batch 3500, loss[loss=0.2029, simple_loss=0.2865, pruned_loss=0.05965, over 7981.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.34, pruned_loss=0.1047, over 1615385.32 frames. ], batch size: 21, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:50,512 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:52,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 3.111e+02 3.775e+02 4.956e+02 7.195e+02, threshold=7.550e+02, percent-clipped=1.0 +2023-02-06 02:28:59,190 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 02:29:06,064 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 02:29:16,408 INFO [train.py:901] (1/4) Epoch 6, batch 3550, loss[loss=0.2747, simple_loss=0.3352, pruned_loss=0.1071, over 8077.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3399, pruned_loss=0.1055, over 1612681.18 frames. ], batch size: 21, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:29:24,736 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:34,248 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:52,633 INFO [train.py:901] (1/4) Epoch 6, batch 3600, loss[loss=0.2431, simple_loss=0.2953, pruned_loss=0.09541, over 7812.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3388, pruned_loss=0.1049, over 1608279.51 frames. ], batch size: 19, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:30:02,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.983e+02 3.632e+02 4.470e+02 1.452e+03, threshold=7.265e+02, percent-clipped=1.0 +2023-02-06 02:30:27,003 INFO [train.py:901] (1/4) Epoch 6, batch 3650, loss[loss=0.2804, simple_loss=0.3406, pruned_loss=0.1101, over 7532.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3384, pruned_loss=0.1043, over 1606367.52 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:00,588 INFO [train.py:901] (1/4) Epoch 6, batch 3700, loss[loss=0.3117, simple_loss=0.3692, pruned_loss=0.1272, over 8323.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3397, pruned_loss=0.1053, over 1611353.84 frames. ], batch size: 49, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:01,285 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 02:31:11,218 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 3.019e+02 3.651e+02 4.413e+02 8.839e+02, threshold=7.303e+02, percent-clipped=3.0 +2023-02-06 02:31:23,984 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:31:35,822 INFO [train.py:901] (1/4) Epoch 6, batch 3750, loss[loss=0.2792, simple_loss=0.344, pruned_loss=0.1072, over 8354.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3389, pruned_loss=0.1049, over 1608153.28 frames. ], batch size: 24, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:49,949 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8377, 1.6116, 3.1622, 1.2690, 2.1795, 3.3533, 3.4387, 2.6450], + device='cuda:1'), covar=tensor([0.1106, 0.1401, 0.0402, 0.2082, 0.0828, 0.0426, 0.0487, 0.0911], + device='cuda:1'), in_proj_covar=tensor([0.0241, 0.0272, 0.0225, 0.0267, 0.0237, 0.0211, 0.0248, 0.0282], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:32:09,332 INFO [train.py:901] (1/4) Epoch 6, batch 3800, loss[loss=0.3029, simple_loss=0.3589, pruned_loss=0.1234, over 8679.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3385, pruned_loss=0.1051, over 1606609.08 frames. ], batch size: 34, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:19,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.008e+02 3.761e+02 4.930e+02 1.044e+03, threshold=7.521e+02, percent-clipped=7.0 +2023-02-06 02:32:32,456 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:44,266 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:45,463 INFO [train.py:901] (1/4) Epoch 6, batch 3850, loss[loss=0.254, simple_loss=0.318, pruned_loss=0.09497, over 7794.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3392, pruned_loss=0.1057, over 1607941.77 frames. ], batch size: 19, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:47,013 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4422, 1.3620, 4.6399, 1.7410, 4.0132, 3.8612, 4.1696, 4.0394], + device='cuda:1'), covar=tensor([0.0451, 0.3879, 0.0434, 0.2715, 0.1093, 0.0710, 0.0443, 0.0573], + device='cuda:1'), in_proj_covar=tensor([0.0361, 0.0504, 0.0449, 0.0440, 0.0506, 0.0416, 0.0423, 0.0478], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:32:48,960 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:49,789 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:51,204 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4419, 1.6880, 1.5503, 1.3475, 1.2157, 1.4070, 1.8687, 1.4599], + device='cuda:1'), covar=tensor([0.0553, 0.1172, 0.1812, 0.1396, 0.0618, 0.1518, 0.0691, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0126, 0.0171, 0.0213, 0.0177, 0.0121, 0.0181, 0.0137, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 02:33:02,827 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 02:33:20,476 INFO [train.py:901] (1/4) Epoch 6, batch 3900, loss[loss=0.2864, simple_loss=0.3562, pruned_loss=0.1083, over 8588.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.3399, pruned_loss=0.1057, over 1611025.83 frames. ], batch size: 31, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:33:23,926 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:30,569 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 2.909e+02 3.535e+02 4.398e+02 8.405e+02, threshold=7.069e+02, percent-clipped=2.0 +2023-02-06 02:33:56,224 INFO [train.py:901] (1/4) Epoch 6, batch 3950, loss[loss=0.3221, simple_loss=0.3767, pruned_loss=0.1337, over 8523.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3402, pruned_loss=0.1057, over 1617549.18 frames. ], batch size: 28, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:09,754 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:34:17,818 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1665, 1.3271, 2.3328, 1.0664, 2.1149, 2.5353, 2.4881, 2.1422], + device='cuda:1'), covar=tensor([0.0977, 0.1034, 0.0475, 0.1835, 0.0531, 0.0359, 0.0507, 0.0725], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0272, 0.0226, 0.0267, 0.0235, 0.0211, 0.0248, 0.0282], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:34:30,798 INFO [train.py:901] (1/4) Epoch 6, batch 4000, loss[loss=0.2603, simple_loss=0.3404, pruned_loss=0.09009, over 8194.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3389, pruned_loss=0.1047, over 1616737.19 frames. ], batch size: 23, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:40,322 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.014e+02 2.805e+02 3.702e+02 4.857e+02 8.487e+02, threshold=7.405e+02, percent-clipped=7.0 +2023-02-06 02:34:44,616 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:34:59,497 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.42 vs. limit=5.0 +2023-02-06 02:35:05,731 INFO [train.py:901] (1/4) Epoch 6, batch 4050, loss[loss=0.2695, simple_loss=0.3378, pruned_loss=0.1006, over 8086.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3379, pruned_loss=0.1037, over 1613457.39 frames. ], batch size: 21, lr: 1.27e-02, grad_scale: 16.0 +2023-02-06 02:35:39,370 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3274, 2.2447, 1.6805, 2.0913, 1.8135, 1.4065, 1.6672, 1.8393], + device='cuda:1'), covar=tensor([0.0961, 0.0325, 0.0811, 0.0358, 0.0516, 0.1026, 0.0642, 0.0578], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0236, 0.0310, 0.0306, 0.0322, 0.0314, 0.0337, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 02:35:41,172 INFO [train.py:901] (1/4) Epoch 6, batch 4100, loss[loss=0.2855, simple_loss=0.3536, pruned_loss=0.1087, over 8282.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3383, pruned_loss=0.1036, over 1614914.30 frames. ], batch size: 23, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:35:44,018 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:50,475 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.131e+02 3.987e+02 5.314e+02 1.327e+03, threshold=7.973e+02, percent-clipped=4.0 +2023-02-06 02:36:00,534 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:00,596 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:14,411 INFO [train.py:901] (1/4) Epoch 6, batch 4150, loss[loss=0.2636, simple_loss=0.3322, pruned_loss=0.09744, over 8106.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3402, pruned_loss=0.1052, over 1615747.68 frames. ], batch size: 23, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:15,789 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:31,824 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6564, 2.3930, 4.5502, 1.3284, 2.9501, 2.1722, 1.6935, 2.5901], + device='cuda:1'), covar=tensor([0.1466, 0.1855, 0.0498, 0.3259, 0.1473, 0.2349, 0.1524, 0.2222], + device='cuda:1'), in_proj_covar=tensor([0.0466, 0.0455, 0.0530, 0.0533, 0.0581, 0.0520, 0.0441, 0.0583], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 02:36:49,812 INFO [train.py:901] (1/4) Epoch 6, batch 4200, loss[loss=0.2613, simple_loss=0.3251, pruned_loss=0.09873, over 8246.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3394, pruned_loss=0.1044, over 1615862.38 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:58,987 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 2.806e+02 3.559e+02 4.787e+02 1.284e+03, threshold=7.119e+02, percent-clipped=4.0 +2023-02-06 02:37:05,647 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 02:37:07,969 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:12,004 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6756, 2.2348, 4.3144, 1.2527, 3.0079, 2.1104, 1.6813, 2.5107], + device='cuda:1'), covar=tensor([0.1396, 0.1868, 0.0584, 0.3176, 0.1203, 0.2322, 0.1495, 0.2161], + device='cuda:1'), in_proj_covar=tensor([0.0462, 0.0452, 0.0530, 0.0531, 0.0578, 0.0518, 0.0438, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 02:37:23,786 INFO [train.py:901] (1/4) Epoch 6, batch 4250, loss[loss=0.321, simple_loss=0.3839, pruned_loss=0.1291, over 8666.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.34, pruned_loss=0.1042, over 1620678.48 frames. ], batch size: 34, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:37:24,684 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:29,255 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 02:37:41,572 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:58,491 INFO [train.py:901] (1/4) Epoch 6, batch 4300, loss[loss=0.2263, simple_loss=0.297, pruned_loss=0.07782, over 8253.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3404, pruned_loss=0.1044, over 1625418.54 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:00,033 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:38:08,670 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.771e+02 3.321e+02 4.102e+02 9.930e+02, threshold=6.641e+02, percent-clipped=2.0 +2023-02-06 02:38:17,344 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2498, 1.1578, 4.4641, 1.6473, 3.7990, 3.5745, 3.9541, 3.8683], + device='cuda:1'), covar=tensor([0.0485, 0.4139, 0.0376, 0.2791, 0.1109, 0.0695, 0.0511, 0.0578], + device='cuda:1'), in_proj_covar=tensor([0.0363, 0.0504, 0.0456, 0.0442, 0.0512, 0.0421, 0.0422, 0.0478], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:38:33,192 INFO [train.py:901] (1/4) Epoch 6, batch 4350, loss[loss=0.2384, simple_loss=0.3041, pruned_loss=0.08633, over 8028.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3401, pruned_loss=0.1041, over 1618534.55 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:00,031 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 02:39:06,572 INFO [train.py:901] (1/4) Epoch 6, batch 4400, loss[loss=0.2406, simple_loss=0.3062, pruned_loss=0.08752, over 7660.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3414, pruned_loss=0.1053, over 1616636.74 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:17,273 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 3.434e+02 4.206e+02 5.183e+02 1.151e+03, threshold=8.413e+02, percent-clipped=11.0 +2023-02-06 02:39:40,225 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 02:39:42,260 INFO [train.py:901] (1/4) Epoch 6, batch 4450, loss[loss=0.2953, simple_loss=0.3689, pruned_loss=0.1108, over 8106.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3429, pruned_loss=0.1068, over 1614709.98 frames. ], batch size: 23, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:58,544 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:13,884 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:16,361 INFO [train.py:901] (1/4) Epoch 6, batch 4500, loss[loss=0.3337, simple_loss=0.3848, pruned_loss=0.1413, over 8724.00 frames. ], tot_loss[loss=0.279, simple_loss=0.3434, pruned_loss=0.1073, over 1615664.28 frames. ], batch size: 40, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:40:26,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.100e+02 3.740e+02 5.266e+02 1.703e+03, threshold=7.479e+02, percent-clipped=4.0 +2023-02-06 02:40:31,754 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 02:40:48,305 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8577, 4.1284, 2.3492, 2.8753, 3.0583, 1.9799, 2.4404, 3.1552], + device='cuda:1'), covar=tensor([0.1301, 0.0236, 0.0843, 0.0590, 0.0599, 0.1079, 0.0940, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0235, 0.0307, 0.0300, 0.0313, 0.0311, 0.0332, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 02:40:52,040 INFO [train.py:901] (1/4) Epoch 6, batch 4550, loss[loss=0.3376, simple_loss=0.3795, pruned_loss=0.1479, over 8251.00 frames. ], tot_loss[loss=0.279, simple_loss=0.3435, pruned_loss=0.1073, over 1616261.22 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:18,811 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:27,066 INFO [train.py:901] (1/4) Epoch 6, batch 4600, loss[loss=0.3126, simple_loss=0.3569, pruned_loss=0.1342, over 6816.00 frames. ], tot_loss[loss=0.2765, simple_loss=0.3413, pruned_loss=0.1059, over 1612352.36 frames. ], batch size: 71, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:34,845 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:36,664 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.977e+02 3.732e+02 4.434e+02 1.135e+03, threshold=7.465e+02, percent-clipped=1.0 +2023-02-06 02:42:02,768 INFO [train.py:901] (1/4) Epoch 6, batch 4650, loss[loss=0.3159, simple_loss=0.3613, pruned_loss=0.1352, over 7979.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3417, pruned_loss=0.1062, over 1615619.31 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:08,375 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:20,838 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 02:42:25,261 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:36,979 INFO [train.py:901] (1/4) Epoch 6, batch 4700, loss[loss=0.2138, simple_loss=0.2918, pruned_loss=0.06791, over 8095.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3414, pruned_loss=0.1066, over 1610665.60 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:46,395 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.041e+02 3.187e+02 3.833e+02 4.569e+02 1.251e+03, threshold=7.667e+02, percent-clipped=2.0 +2023-02-06 02:43:11,112 INFO [train.py:901] (1/4) Epoch 6, batch 4750, loss[loss=0.2708, simple_loss=0.3337, pruned_loss=0.1039, over 7541.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.3413, pruned_loss=0.1061, over 1613751.22 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:30,431 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 02:43:31,820 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 02:43:46,228 INFO [train.py:901] (1/4) Epoch 6, batch 4800, loss[loss=0.2106, simple_loss=0.274, pruned_loss=0.07357, over 7708.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3418, pruned_loss=0.1065, over 1612061.14 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:55,770 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 3.021e+02 3.501e+02 4.623e+02 8.497e+02, threshold=7.001e+02, percent-clipped=1.0 +2023-02-06 02:44:16,235 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:20,163 INFO [train.py:901] (1/4) Epoch 6, batch 4850, loss[loss=0.3135, simple_loss=0.3682, pruned_loss=0.1294, over 8595.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3403, pruned_loss=0.106, over 1605138.49 frames. ], batch size: 39, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:44:20,859 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 02:44:22,416 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3639, 2.0964, 3.5136, 2.9848, 3.0786, 1.7422, 1.7770, 2.1561], + device='cuda:1'), covar=tensor([0.2211, 0.2665, 0.0634, 0.1183, 0.1155, 0.1723, 0.1322, 0.2181], + device='cuda:1'), in_proj_covar=tensor([0.0789, 0.0728, 0.0624, 0.0719, 0.0825, 0.0667, 0.0631, 0.0665], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:44:32,443 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:35,168 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45285.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:50,593 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:56,004 INFO [train.py:901] (1/4) Epoch 6, batch 4900, loss[loss=0.2946, simple_loss=0.3557, pruned_loss=0.1167, over 8133.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3408, pruned_loss=0.1065, over 1605035.60 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:04,267 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9700, 1.4347, 4.2533, 1.6374, 3.6194, 3.4616, 3.7657, 3.6022], + device='cuda:1'), covar=tensor([0.0565, 0.3596, 0.0411, 0.2629, 0.1064, 0.0687, 0.0527, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0514, 0.0456, 0.0444, 0.0509, 0.0416, 0.0426, 0.0476], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:45:05,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.896e+02 3.521e+02 4.501e+02 9.960e+02, threshold=7.042e+02, percent-clipped=7.0 +2023-02-06 02:45:06,568 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 02:45:13,523 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0802, 1.0225, 3.2655, 0.9355, 2.7183, 2.6483, 2.8826, 2.7736], + device='cuda:1'), covar=tensor([0.0694, 0.3816, 0.0611, 0.3054, 0.1465, 0.0925, 0.0703, 0.0817], + device='cuda:1'), in_proj_covar=tensor([0.0366, 0.0515, 0.0455, 0.0443, 0.0511, 0.0417, 0.0427, 0.0475], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:45:30,239 INFO [train.py:901] (1/4) Epoch 6, batch 4950, loss[loss=0.2976, simple_loss=0.3556, pruned_loss=0.1198, over 8450.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3421, pruned_loss=0.1074, over 1609331.95 frames. ], batch size: 29, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:30,380 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:45:35,783 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3933, 1.3588, 2.7433, 1.2177, 2.0051, 3.0110, 2.9722, 2.5960], + device='cuda:1'), covar=tensor([0.1128, 0.1312, 0.0477, 0.2026, 0.0741, 0.0313, 0.0520, 0.0640], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0263, 0.0224, 0.0265, 0.0231, 0.0211, 0.0245, 0.0276], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:45:43,178 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1427, 0.9683, 3.2996, 0.9162, 2.7882, 2.6798, 2.9292, 2.8117], + device='cuda:1'), covar=tensor([0.0657, 0.3819, 0.0640, 0.3004, 0.1480, 0.0938, 0.0701, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0363, 0.0511, 0.0453, 0.0443, 0.0513, 0.0418, 0.0428, 0.0470], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:45:48,540 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3473, 1.8132, 3.0156, 2.2674, 2.4985, 1.9726, 1.6310, 1.0092], + device='cuda:1'), covar=tensor([0.2168, 0.2618, 0.0564, 0.1490, 0.1223, 0.1352, 0.1211, 0.2638], + device='cuda:1'), in_proj_covar=tensor([0.0789, 0.0731, 0.0622, 0.0717, 0.0824, 0.0670, 0.0636, 0.0667], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:45:54,531 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7800, 1.9442, 1.6627, 2.3908, 0.9703, 1.3311, 1.6758, 1.9627], + device='cuda:1'), covar=tensor([0.0983, 0.1094, 0.1446, 0.0590, 0.1656, 0.2286, 0.1305, 0.0938], + device='cuda:1'), in_proj_covar=tensor([0.0262, 0.0242, 0.0277, 0.0220, 0.0240, 0.0273, 0.0279, 0.0246], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 02:46:05,709 INFO [train.py:901] (1/4) Epoch 6, batch 5000, loss[loss=0.2857, simple_loss=0.3584, pruned_loss=0.1065, over 8460.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3414, pruned_loss=0.1067, over 1608198.24 frames. ], batch size: 27, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:07,201 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:15,105 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.255e+02 4.005e+02 4.887e+02 1.315e+03, threshold=8.009e+02, percent-clipped=7.0 +2023-02-06 02:46:24,036 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:40,014 INFO [train.py:901] (1/4) Epoch 6, batch 5050, loss[loss=0.3084, simple_loss=0.3729, pruned_loss=0.122, over 8520.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3424, pruned_loss=0.107, over 1611915.51 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:58,883 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 02:47:14,060 INFO [train.py:901] (1/4) Epoch 6, batch 5100, loss[loss=0.2945, simple_loss=0.3454, pruned_loss=0.1218, over 8029.00 frames. ], tot_loss[loss=0.2786, simple_loss=0.3429, pruned_loss=0.1071, over 1613835.38 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:47:24,716 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.842e+02 3.419e+02 4.219e+02 7.828e+02, threshold=6.837e+02, percent-clipped=0.0 +2023-02-06 02:47:26,314 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4146, 1.5011, 1.5350, 1.1194, 0.8928, 1.6674, 0.1630, 1.1104], + device='cuda:1'), covar=tensor([0.2794, 0.2100, 0.1003, 0.2190, 0.5405, 0.0560, 0.3769, 0.1954], + device='cuda:1'), in_proj_covar=tensor([0.0138, 0.0136, 0.0087, 0.0184, 0.0228, 0.0083, 0.0147, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:47:26,970 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:29,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7001, 1.6539, 3.1770, 1.3676, 2.2033, 3.5280, 3.4641, 2.9756], + device='cuda:1'), covar=tensor([0.1071, 0.1189, 0.0363, 0.1790, 0.0730, 0.0262, 0.0473, 0.0611], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0263, 0.0226, 0.0264, 0.0231, 0.0212, 0.0248, 0.0278], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 02:47:29,672 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7670, 1.9699, 2.0845, 1.5226, 1.0338, 2.1690, 0.2786, 1.3175], + device='cuda:1'), covar=tensor([0.3084, 0.1657, 0.1019, 0.2524, 0.6026, 0.0514, 0.4649, 0.2240], + device='cuda:1'), in_proj_covar=tensor([0.0138, 0.0136, 0.0088, 0.0185, 0.0228, 0.0083, 0.0148, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:47:43,452 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:49,317 INFO [train.py:901] (1/4) Epoch 6, batch 5150, loss[loss=0.2951, simple_loss=0.3339, pruned_loss=0.1282, over 8095.00 frames. ], tot_loss[loss=0.2802, simple_loss=0.344, pruned_loss=0.1082, over 1615113.68 frames. ], batch size: 21, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:23,753 INFO [train.py:901] (1/4) Epoch 6, batch 5200, loss[loss=0.2342, simple_loss=0.2997, pruned_loss=0.0844, over 7261.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.3437, pruned_loss=0.108, over 1614428.61 frames. ], batch size: 16, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:34,006 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.204e+02 4.015e+02 4.654e+02 8.708e+02, threshold=8.029e+02, percent-clipped=4.0 +2023-02-06 02:48:37,738 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9445, 3.4099, 2.7521, 4.2148, 1.9381, 2.1587, 2.5229, 3.3173], + device='cuda:1'), covar=tensor([0.0777, 0.0814, 0.1093, 0.0293, 0.1301, 0.1695, 0.1410, 0.0874], + device='cuda:1'), in_proj_covar=tensor([0.0264, 0.0244, 0.0278, 0.0224, 0.0240, 0.0275, 0.0282, 0.0248], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 02:48:48,646 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 02:48:57,682 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 02:48:59,756 INFO [train.py:901] (1/4) Epoch 6, batch 5250, loss[loss=0.3007, simple_loss=0.3595, pruned_loss=0.121, over 8346.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3428, pruned_loss=0.108, over 1612708.29 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:02,753 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2179, 1.5172, 2.2793, 1.0967, 1.6992, 1.4945, 1.3592, 1.2569], + device='cuda:1'), covar=tensor([0.1546, 0.1836, 0.0710, 0.3212, 0.1279, 0.2492, 0.1550, 0.1715], + device='cuda:1'), in_proj_covar=tensor([0.0466, 0.0464, 0.0531, 0.0540, 0.0586, 0.0526, 0.0443, 0.0588], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 02:49:30,211 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45710.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:49:34,107 INFO [train.py:901] (1/4) Epoch 6, batch 5300, loss[loss=0.273, simple_loss=0.3373, pruned_loss=0.1043, over 8236.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3428, pruned_loss=0.1073, over 1616515.16 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:36,949 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:49:43,555 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.935e+02 3.437e+02 4.667e+02 1.283e+03, threshold=6.874e+02, percent-clipped=3.0 +2023-02-06 02:50:09,985 INFO [train.py:901] (1/4) Epoch 6, batch 5350, loss[loss=0.2723, simple_loss=0.3545, pruned_loss=0.09505, over 8295.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3433, pruned_loss=0.1073, over 1620736.32 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:25,433 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:27,332 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,754 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,772 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:43,901 INFO [train.py:901] (1/4) Epoch 6, batch 5400, loss[loss=0.2416, simple_loss=0.3245, pruned_loss=0.07933, over 8299.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3425, pruned_loss=0.107, over 1616726.54 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:49,017 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 02:50:49,970 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45825.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:50:53,692 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.842e+02 3.609e+02 4.644e+02 1.367e+03, threshold=7.218e+02, percent-clipped=2.0 +2023-02-06 02:50:59,042 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:16,139 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5044, 1.9332, 2.1288, 1.0555, 2.1836, 1.3883, 0.5072, 1.7156], + device='cuda:1'), covar=tensor([0.0263, 0.0144, 0.0107, 0.0221, 0.0144, 0.0451, 0.0356, 0.0120], + device='cuda:1'), in_proj_covar=tensor([0.0333, 0.0247, 0.0207, 0.0298, 0.0237, 0.0384, 0.0308, 0.0282], + device='cuda:1'), out_proj_covar=tensor([1.1068e-04, 8.0056e-05, 6.6763e-05, 9.7360e-05, 7.8224e-05, 1.3616e-04, + 1.0250e-04, 9.2662e-05], device='cuda:1') +2023-02-06 02:51:17,291 INFO [train.py:901] (1/4) Epoch 6, batch 5450, loss[loss=0.3522, simple_loss=0.3933, pruned_loss=0.1555, over 6696.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3403, pruned_loss=0.1057, over 1616106.98 frames. ], batch size: 71, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:51:25,559 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:47,555 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 02:51:52,369 INFO [train.py:901] (1/4) Epoch 6, batch 5500, loss[loss=0.3079, simple_loss=0.3592, pruned_loss=0.1283, over 8289.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3399, pruned_loss=0.1058, over 1608624.73 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:52:03,083 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.821e+02 3.418e+02 4.385e+02 9.516e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 02:52:05,947 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6776, 1.5491, 3.1963, 1.3023, 2.1644, 3.3980, 3.5802, 2.8687], + device='cuda:1'), covar=tensor([0.1094, 0.1352, 0.0389, 0.2024, 0.0824, 0.0386, 0.0358, 0.0761], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0262, 0.0224, 0.0263, 0.0232, 0.0212, 0.0247, 0.0271], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 02:52:27,011 INFO [train.py:901] (1/4) Epoch 6, batch 5550, loss[loss=0.2586, simple_loss=0.339, pruned_loss=0.08914, over 8295.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3399, pruned_loss=0.1051, over 1609584.88 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:53:03,503 INFO [train.py:901] (1/4) Epoch 6, batch 5600, loss[loss=0.2799, simple_loss=0.3475, pruned_loss=0.1061, over 8479.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3392, pruned_loss=0.1047, over 1609280.09 frames. ], batch size: 49, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:13,368 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.809e+02 3.495e+02 4.670e+02 1.291e+03, threshold=6.989e+02, percent-clipped=6.0 +2023-02-06 02:53:36,015 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46064.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:53:37,305 INFO [train.py:901] (1/4) Epoch 6, batch 5650, loss[loss=0.2694, simple_loss=0.3389, pruned_loss=0.09991, over 8562.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.34, pruned_loss=0.1053, over 1609082.08 frames. ], batch size: 39, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:47,399 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:53:51,176 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 02:54:04,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46106.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:54:12,531 INFO [train.py:901] (1/4) Epoch 6, batch 5700, loss[loss=0.2667, simple_loss=0.3409, pruned_loss=0.09629, over 8027.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3395, pruned_loss=0.1047, over 1606881.79 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:22,542 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.829e+02 3.489e+02 4.392e+02 1.030e+03, threshold=6.978e+02, percent-clipped=3.0 +2023-02-06 02:54:25,975 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:54:34,077 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.6956, 1.1718, 3.9452, 1.2562, 3.3903, 3.2494, 3.5245, 3.4421], + device='cuda:1'), covar=tensor([0.0555, 0.3593, 0.0438, 0.2937, 0.1196, 0.0776, 0.0549, 0.0636], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0506, 0.0457, 0.0443, 0.0501, 0.0416, 0.0427, 0.0470], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 02:54:38,098 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.6903, 1.2964, 4.1445, 1.7308, 2.9690, 3.2389, 3.5955, 3.7259], + device='cuda:1'), covar=tensor([0.1268, 0.5706, 0.0927, 0.3647, 0.2438, 0.1430, 0.1237, 0.1121], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0505, 0.0456, 0.0441, 0.0500, 0.0415, 0.0426, 0.0468], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:1') +2023-02-06 02:54:46,327 INFO [train.py:901] (1/4) Epoch 6, batch 5750, loss[loss=0.2644, simple_loss=0.3369, pruned_loss=0.09596, over 8327.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3403, pruned_loss=0.1053, over 1614684.12 frames. ], batch size: 26, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:46,529 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9880, 3.9500, 2.6894, 2.5873, 2.9130, 2.1038, 2.6900, 2.9775], + device='cuda:1'), covar=tensor([0.1252, 0.0179, 0.0694, 0.0696, 0.0600, 0.0993, 0.0837, 0.0853], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0232, 0.0312, 0.0305, 0.0314, 0.0314, 0.0339, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 02:54:47,224 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8129, 2.7703, 3.2992, 0.8321, 3.3805, 1.9197, 1.4041, 1.9120], + device='cuda:1'), covar=tensor([0.0462, 0.0122, 0.0085, 0.0346, 0.0123, 0.0421, 0.0518, 0.0201], + device='cuda:1'), in_proj_covar=tensor([0.0335, 0.0245, 0.0206, 0.0298, 0.0237, 0.0384, 0.0305, 0.0284], + device='cuda:1'), out_proj_covar=tensor([1.1131e-04, 7.9433e-05, 6.5978e-05, 9.6942e-05, 7.7894e-05, 1.3547e-04, + 1.0132e-04, 9.3017e-05], device='cuda:1') +2023-02-06 02:54:53,637 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 02:54:55,243 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:09,101 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9918, 2.6223, 2.9239, 1.4002, 3.1452, 1.9028, 1.4907, 1.8391], + device='cuda:1'), covar=tensor([0.0434, 0.0141, 0.0170, 0.0321, 0.0197, 0.0419, 0.0421, 0.0214], + device='cuda:1'), in_proj_covar=tensor([0.0333, 0.0245, 0.0206, 0.0297, 0.0238, 0.0382, 0.0304, 0.0282], + device='cuda:1'), out_proj_covar=tensor([1.1058e-04, 7.9316e-05, 6.6154e-05, 9.6550e-05, 7.8107e-05, 1.3455e-04, + 1.0076e-04, 9.2262e-05], device='cuda:1') +2023-02-06 02:55:21,179 INFO [train.py:901] (1/4) Epoch 6, batch 5800, loss[loss=0.309, simple_loss=0.3671, pruned_loss=0.1255, over 8242.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3415, pruned_loss=0.1063, over 1610352.23 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:55:24,802 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:32,627 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.949e+02 3.358e+02 4.338e+02 9.471e+02, threshold=6.717e+02, percent-clipped=1.0 +2023-02-06 02:55:45,820 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:55,787 INFO [train.py:901] (1/4) Epoch 6, batch 5850, loss[loss=0.2801, simple_loss=0.3411, pruned_loss=0.1095, over 8129.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3413, pruned_loss=0.1062, over 1613012.50 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:14,982 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:56:29,478 INFO [train.py:901] (1/4) Epoch 6, batch 5900, loss[loss=0.2863, simple_loss=0.3318, pruned_loss=0.1204, over 7697.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3414, pruned_loss=0.1064, over 1613825.71 frames. ], batch size: 18, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:39,482 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 3.022e+02 3.849e+02 5.141e+02 8.536e+02, threshold=7.697e+02, percent-clipped=7.0 +2023-02-06 02:56:43,692 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:57:04,148 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6443, 1.3165, 1.2953, 1.1342, 0.9426, 1.1597, 1.3115, 1.3732], + device='cuda:1'), covar=tensor([0.0514, 0.1089, 0.1456, 0.1216, 0.0548, 0.1390, 0.0675, 0.0483], + device='cuda:1'), in_proj_covar=tensor([0.0123, 0.0172, 0.0212, 0.0175, 0.0122, 0.0182, 0.0136, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 02:57:04,663 INFO [train.py:901] (1/4) Epoch 6, batch 5950, loss[loss=0.211, simple_loss=0.2803, pruned_loss=0.07087, over 5542.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.3408, pruned_loss=0.1054, over 1613638.93 frames. ], batch size: 12, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,437 INFO [train.py:901] (1/4) Epoch 6, batch 6000, loss[loss=0.2911, simple_loss=0.3608, pruned_loss=0.1107, over 7928.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3402, pruned_loss=0.1046, over 1617598.54 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,437 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 02:57:50,762 INFO [train.py:935] (1/4) Epoch 6, validation: loss=0.2127, simple_loss=0.3094, pruned_loss=0.05799, over 944034.00 frames. +2023-02-06 02:57:50,763 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 02:58:01,252 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.683e+02 3.226e+02 4.100e+02 1.140e+03, threshold=6.453e+02, percent-clipped=1.0 +2023-02-06 02:58:04,363 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:21,780 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:25,656 INFO [train.py:901] (1/4) Epoch 6, batch 6050, loss[loss=0.2495, simple_loss=0.3281, pruned_loss=0.08549, over 8044.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3392, pruned_loss=0.1036, over 1619851.07 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:58:37,126 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 02:58:39,501 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0756, 3.0728, 2.8032, 1.4256, 2.6985, 2.8009, 2.8116, 2.6708], + device='cuda:1'), covar=tensor([0.1509, 0.0983, 0.1358, 0.5370, 0.1142, 0.1340, 0.1811, 0.1269], + device='cuda:1'), in_proj_covar=tensor([0.0416, 0.0315, 0.0341, 0.0434, 0.0332, 0.0305, 0.0329, 0.0275], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 02:58:56,114 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:02,119 INFO [train.py:901] (1/4) Epoch 6, batch 6100, loss[loss=0.2443, simple_loss=0.3027, pruned_loss=0.09296, over 7789.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3389, pruned_loss=0.1039, over 1615579.73 frames. ], batch size: 19, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:12,635 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 3.046e+02 3.657e+02 4.398e+02 9.620e+02, threshold=7.315e+02, percent-clipped=4.0 +2023-02-06 02:59:13,565 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:24,581 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 02:59:37,765 INFO [train.py:901] (1/4) Epoch 6, batch 6150, loss[loss=0.2779, simple_loss=0.3448, pruned_loss=0.1055, over 8572.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3377, pruned_loss=0.103, over 1615796.32 frames. ], batch size: 31, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:56,277 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:09,154 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8107, 3.2137, 3.4564, 2.5602, 1.4642, 3.4689, 0.6602, 2.1785], + device='cuda:1'), covar=tensor([0.3457, 0.1585, 0.0696, 0.2830, 0.6547, 0.0617, 0.6133, 0.1869], + device='cuda:1'), in_proj_covar=tensor([0.0135, 0.0133, 0.0082, 0.0182, 0.0219, 0.0080, 0.0143, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:00:11,879 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:13,823 INFO [train.py:901] (1/4) Epoch 6, batch 6200, loss[loss=0.3978, simple_loss=0.4223, pruned_loss=0.1867, over 7154.00 frames. ], tot_loss[loss=0.2729, simple_loss=0.3381, pruned_loss=0.1039, over 1611175.38 frames. ], batch size: 72, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:00:14,743 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:16,774 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:24,153 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.053e+02 3.051e+02 3.861e+02 4.926e+02 1.016e+03, threshold=7.722e+02, percent-clipped=3.0 +2023-02-06 03:00:28,971 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:48,686 INFO [train.py:901] (1/4) Epoch 6, batch 6250, loss[loss=0.2226, simple_loss=0.301, pruned_loss=0.07211, over 7942.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3365, pruned_loss=0.1029, over 1604893.11 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:22,862 INFO [train.py:901] (1/4) Epoch 6, batch 6300, loss[loss=0.2332, simple_loss=0.2996, pruned_loss=0.0834, over 8078.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3364, pruned_loss=0.1026, over 1604518.41 frames. ], batch size: 21, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:34,427 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.734e+02 3.399e+02 4.377e+02 1.449e+03, threshold=6.797e+02, percent-clipped=4.0 +2023-02-06 03:01:49,385 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:01:57,779 INFO [train.py:901] (1/4) Epoch 6, batch 6350, loss[loss=0.3493, simple_loss=0.3984, pruned_loss=0.1501, over 8342.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3376, pruned_loss=0.1031, over 1607376.53 frames. ], batch size: 26, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:01:58,516 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3322, 1.6473, 1.5772, 1.3180, 1.4180, 1.7383, 2.3296, 1.7531], + device='cuda:1'), covar=tensor([0.0416, 0.1316, 0.1743, 0.1368, 0.0609, 0.1554, 0.0633, 0.0624], + device='cuda:1'), in_proj_covar=tensor([0.0122, 0.0169, 0.0210, 0.0172, 0.0120, 0.0178, 0.0133, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 03:02:32,255 INFO [train.py:901] (1/4) Epoch 6, batch 6400, loss[loss=0.2844, simple_loss=0.3522, pruned_loss=0.1083, over 8453.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3371, pruned_loss=0.1029, over 1611053.02 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:41,189 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:02:43,104 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.744e+02 3.578e+02 4.396e+02 9.504e+02, threshold=7.157e+02, percent-clipped=5.0 +2023-02-06 03:03:07,338 INFO [train.py:901] (1/4) Epoch 6, batch 6450, loss[loss=0.2593, simple_loss=0.3385, pruned_loss=0.09005, over 8453.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3371, pruned_loss=0.1028, over 1609943.81 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:41,565 INFO [train.py:901] (1/4) Epoch 6, batch 6500, loss[loss=0.2171, simple_loss=0.2822, pruned_loss=0.07595, over 7797.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3362, pruned_loss=0.103, over 1609733.68 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:51,600 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.994e+02 3.001e+02 3.759e+02 4.377e+02 1.086e+03, threshold=7.517e+02, percent-clipped=1.0 +2023-02-06 03:04:09,449 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:14,545 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:15,766 INFO [train.py:901] (1/4) Epoch 6, batch 6550, loss[loss=0.2595, simple_loss=0.3221, pruned_loss=0.09842, over 7434.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3363, pruned_loss=0.1037, over 1610830.81 frames. ], batch size: 17, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:37,515 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 03:04:45,799 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:50,982 INFO [train.py:901] (1/4) Epoch 6, batch 6600, loss[loss=0.2973, simple_loss=0.3648, pruned_loss=0.1149, over 8706.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.3354, pruned_loss=0.1031, over 1608987.47 frames. ], batch size: 34, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:56,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 03:05:01,160 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.927e+02 3.687e+02 4.772e+02 1.123e+03, threshold=7.374e+02, percent-clipped=4.0 +2023-02-06 03:05:03,300 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:25,535 INFO [train.py:901] (1/4) Epoch 6, batch 6650, loss[loss=0.2694, simple_loss=0.3234, pruned_loss=0.1077, over 7818.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3365, pruned_loss=0.1037, over 1605830.82 frames. ], batch size: 20, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:05:30,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:35,422 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:06:00,869 INFO [train.py:901] (1/4) Epoch 6, batch 6700, loss[loss=0.235, simple_loss=0.3116, pruned_loss=0.07919, over 8475.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3366, pruned_loss=0.1031, over 1611651.32 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:12,494 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.777e+02 3.640e+02 4.922e+02 1.093e+03, threshold=7.281e+02, percent-clipped=6.0 +2023-02-06 03:06:28,292 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4876, 1.8687, 1.5272, 2.4048, 1.2023, 1.2678, 1.7916, 1.9352], + device='cuda:1'), covar=tensor([0.1138, 0.1025, 0.1573, 0.0482, 0.1345, 0.1977, 0.1103, 0.1047], + device='cuda:1'), in_proj_covar=tensor([0.0264, 0.0246, 0.0279, 0.0224, 0.0244, 0.0272, 0.0276, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:06:28,355 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1843, 1.7929, 2.6403, 2.0559, 2.2637, 1.9134, 1.4716, 0.9229], + device='cuda:1'), covar=tensor([0.2251, 0.2347, 0.0626, 0.1342, 0.1192, 0.1278, 0.1247, 0.2537], + device='cuda:1'), in_proj_covar=tensor([0.0799, 0.0735, 0.0639, 0.0724, 0.0825, 0.0677, 0.0634, 0.0677], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:06:34,847 INFO [train.py:901] (1/4) Epoch 6, batch 6750, loss[loss=0.2862, simple_loss=0.3605, pruned_loss=0.106, over 8104.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3365, pruned_loss=0.1027, over 1610999.99 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:38,956 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:01,070 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-06 03:07:10,710 INFO [train.py:901] (1/4) Epoch 6, batch 6800, loss[loss=0.2927, simple_loss=0.3606, pruned_loss=0.1124, over 8498.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.336, pruned_loss=0.1026, over 1608081.24 frames. ], batch size: 26, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:12,696 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 03:07:21,207 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.942e+02 3.591e+02 4.804e+02 1.528e+03, threshold=7.182e+02, percent-clipped=7.0 +2023-02-06 03:07:37,978 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1364, 1.7051, 1.3556, 1.7119, 1.3666, 1.1187, 1.2630, 1.3796], + device='cuda:1'), covar=tensor([0.0924, 0.0348, 0.0927, 0.0455, 0.0613, 0.1209, 0.0840, 0.0711], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0230, 0.0307, 0.0299, 0.0308, 0.0311, 0.0338, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 03:07:40,024 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0148, 1.1418, 4.2734, 1.5186, 3.5959, 3.5373, 3.7716, 3.5845], + device='cuda:1'), covar=tensor([0.0580, 0.4306, 0.0412, 0.2955, 0.1178, 0.0806, 0.0532, 0.0694], + device='cuda:1'), in_proj_covar=tensor([0.0371, 0.0511, 0.0450, 0.0448, 0.0507, 0.0428, 0.0430, 0.0482], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 03:07:40,034 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:44,572 INFO [train.py:901] (1/4) Epoch 6, batch 6850, loss[loss=0.3871, simple_loss=0.4073, pruned_loss=0.1835, over 8282.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.338, pruned_loss=0.1037, over 1610625.79 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:58,966 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:00,906 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 03:08:19,245 INFO [train.py:901] (1/4) Epoch 6, batch 6900, loss[loss=0.2515, simple_loss=0.3179, pruned_loss=0.09255, over 8291.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3379, pruned_loss=0.1035, over 1611167.05 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:08:28,205 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:30,615 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 2.873e+02 3.537e+02 4.379e+02 9.664e+02, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 03:08:32,865 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:44,870 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:49,771 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:54,249 INFO [train.py:901] (1/4) Epoch 6, batch 6950, loss[loss=0.2036, simple_loss=0.2663, pruned_loss=0.07047, over 7426.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.338, pruned_loss=0.1037, over 1612816.38 frames. ], batch size: 17, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:09,777 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 03:09:15,147 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:09:28,488 INFO [train.py:901] (1/4) Epoch 6, batch 7000, loss[loss=0.2886, simple_loss=0.3664, pruned_loss=0.1055, over 8349.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3378, pruned_loss=0.1028, over 1618057.64 frames. ], batch size: 24, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:39,928 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.860e+02 2.784e+02 3.553e+02 4.437e+02 1.281e+03, threshold=7.106e+02, percent-clipped=4.0 +2023-02-06 03:10:01,757 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4388, 2.0042, 3.3363, 2.5998, 2.6557, 1.9756, 1.4312, 1.2264], + device='cuda:1'), covar=tensor([0.2163, 0.2645, 0.0525, 0.1310, 0.1138, 0.1285, 0.1240, 0.2633], + device='cuda:1'), in_proj_covar=tensor([0.0814, 0.0741, 0.0647, 0.0737, 0.0840, 0.0689, 0.0646, 0.0686], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:10:03,576 INFO [train.py:901] (1/4) Epoch 6, batch 7050, loss[loss=0.2286, simple_loss=0.3114, pruned_loss=0.07289, over 8358.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3388, pruned_loss=0.1031, over 1619100.87 frames. ], batch size: 24, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:10:24,717 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6247, 1.8828, 2.0860, 1.5756, 0.9291, 2.2063, 0.3325, 1.3538], + device='cuda:1'), covar=tensor([0.3863, 0.1850, 0.0879, 0.2591, 0.5952, 0.0502, 0.4738, 0.2223], + device='cuda:1'), in_proj_covar=tensor([0.0138, 0.0135, 0.0082, 0.0181, 0.0225, 0.0081, 0.0142, 0.0136], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:10:37,638 INFO [train.py:901] (1/4) Epoch 6, batch 7100, loss[loss=0.2415, simple_loss=0.3055, pruned_loss=0.08872, over 7660.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3381, pruned_loss=0.1031, over 1616096.87 frames. ], batch size: 19, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:10:48,827 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.154e+02 3.207e+02 3.842e+02 5.073e+02 1.424e+03, threshold=7.684e+02, percent-clipped=2.0 +2023-02-06 03:10:53,127 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6035, 1.9649, 3.4176, 1.1583, 2.3237, 1.8983, 1.6069, 1.9966], + device='cuda:1'), covar=tensor([0.1340, 0.1630, 0.0605, 0.3160, 0.1333, 0.2168, 0.1362, 0.2155], + device='cuda:1'), in_proj_covar=tensor([0.0467, 0.0466, 0.0535, 0.0548, 0.0590, 0.0527, 0.0446, 0.0595], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:10:56,274 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:12,591 INFO [train.py:901] (1/4) Epoch 6, batch 7150, loss[loss=0.3066, simple_loss=0.3657, pruned_loss=0.1237, over 8504.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3388, pruned_loss=0.1042, over 1607905.34 frames. ], batch size: 34, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:14,075 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:29,991 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 03:11:33,775 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9861, 2.4404, 2.6620, 1.0510, 2.7101, 1.4876, 1.4754, 1.6618], + device='cuda:1'), covar=tensor([0.0383, 0.0151, 0.0136, 0.0349, 0.0247, 0.0476, 0.0411, 0.0255], + device='cuda:1'), in_proj_covar=tensor([0.0337, 0.0251, 0.0208, 0.0302, 0.0245, 0.0385, 0.0309, 0.0287], + device='cuda:1'), out_proj_covar=tensor([1.1073e-04, 8.0746e-05, 6.5994e-05, 9.7173e-05, 8.0053e-05, 1.3478e-04, + 1.0181e-04, 9.3043e-05], device='cuda:1') +2023-02-06 03:11:37,955 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:46,775 INFO [train.py:901] (1/4) Epoch 6, batch 7200, loss[loss=0.213, simple_loss=0.2893, pruned_loss=0.06841, over 7796.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3395, pruned_loss=0.1046, over 1609882.12 frames. ], batch size: 19, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:57,794 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.983e+02 3.737e+02 4.630e+02 8.445e+02, threshold=7.473e+02, percent-clipped=4.0 +2023-02-06 03:12:22,027 INFO [train.py:901] (1/4) Epoch 6, batch 7250, loss[loss=0.2513, simple_loss=0.3296, pruned_loss=0.08655, over 8355.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3393, pruned_loss=0.1036, over 1614805.36 frames. ], batch size: 24, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:56,482 INFO [train.py:901] (1/4) Epoch 6, batch 7300, loss[loss=0.2971, simple_loss=0.3581, pruned_loss=0.118, over 7974.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3398, pruned_loss=0.1042, over 1617926.85 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:57,904 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:07,203 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.071e+02 3.696e+02 4.839e+02 1.031e+03, threshold=7.393e+02, percent-clipped=2.0 +2023-02-06 03:13:13,260 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:23,405 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:30,054 INFO [train.py:901] (1/4) Epoch 6, batch 7350, loss[loss=0.2725, simple_loss=0.3176, pruned_loss=0.1137, over 5937.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.339, pruned_loss=0.1037, over 1615578.84 frames. ], batch size: 13, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:13:45,224 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 03:13:48,789 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 03:13:59,324 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:05,872 INFO [train.py:901] (1/4) Epoch 6, batch 7400, loss[loss=0.2554, simple_loss=0.3209, pruned_loss=0.09493, over 7806.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3398, pruned_loss=0.1044, over 1614695.05 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:08,003 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 03:14:13,529 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47827.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:14:17,407 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 3.124e+02 3.904e+02 4.877e+02 9.892e+02, threshold=7.808e+02, percent-clipped=5.0 +2023-02-06 03:14:33,728 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:40,242 INFO [train.py:901] (1/4) Epoch 6, batch 7450, loss[loss=0.2441, simple_loss=0.3093, pruned_loss=0.08942, over 7815.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.3405, pruned_loss=0.105, over 1610445.95 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:46,244 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 03:15:15,323 INFO [train.py:901] (1/4) Epoch 6, batch 7500, loss[loss=0.2405, simple_loss=0.3189, pruned_loss=0.08105, over 8527.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3402, pruned_loss=0.1046, over 1614283.56 frames. ], batch size: 28, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:25,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.098e+02 3.102e+02 3.706e+02 4.699e+02 1.511e+03, threshold=7.412e+02, percent-clipped=9.0 +2023-02-06 03:15:49,279 INFO [train.py:901] (1/4) Epoch 6, batch 7550, loss[loss=0.325, simple_loss=0.3857, pruned_loss=0.1321, over 8517.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3407, pruned_loss=0.1043, over 1618747.76 frames. ], batch size: 26, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:54,925 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:11,852 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:24,741 INFO [train.py:901] (1/4) Epoch 6, batch 7600, loss[loss=0.2094, simple_loss=0.2762, pruned_loss=0.0713, over 7928.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3405, pruned_loss=0.1045, over 1619893.62 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:16:37,193 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.897e+02 3.536e+02 4.611e+02 2.294e+03, threshold=7.072e+02, percent-clipped=5.0 +2023-02-06 03:17:01,520 INFO [train.py:901] (1/4) Epoch 6, batch 7650, loss[loss=0.2886, simple_loss=0.3521, pruned_loss=0.1126, over 7176.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.341, pruned_loss=0.1043, over 1624470.75 frames. ], batch size: 72, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:03,782 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1282, 2.4285, 2.0016, 2.9416, 1.5232, 1.5501, 2.0946, 2.6943], + device='cuda:1'), covar=tensor([0.1051, 0.1178, 0.1283, 0.0567, 0.1440, 0.2065, 0.1245, 0.0947], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0248, 0.0277, 0.0229, 0.0246, 0.0272, 0.0278, 0.0248], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:17:17,563 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48090.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:24,158 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:32,462 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:34,944 INFO [train.py:901] (1/4) Epoch 6, batch 7700, loss[loss=0.2488, simple_loss=0.3339, pruned_loss=0.08179, over 8183.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3385, pruned_loss=0.103, over 1621096.81 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:45,628 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6714, 3.1394, 3.2575, 2.2552, 1.3705, 3.2766, 0.4806, 2.1256], + device='cuda:1'), covar=tensor([0.2493, 0.1110, 0.0754, 0.2976, 0.6538, 0.0663, 0.5229, 0.2414], + device='cuda:1'), in_proj_covar=tensor([0.0140, 0.0132, 0.0081, 0.0183, 0.0231, 0.0082, 0.0143, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:17:46,047 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.821e+02 3.617e+02 4.667e+02 9.808e+02, threshold=7.234e+02, percent-clipped=3.0 +2023-02-06 03:17:50,863 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:57,317 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 03:17:59,320 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:10,116 INFO [train.py:901] (1/4) Epoch 6, batch 7750, loss[loss=0.2376, simple_loss=0.2951, pruned_loss=0.09007, over 7533.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3383, pruned_loss=0.103, over 1622898.80 frames. ], batch size: 18, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:13,408 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48171.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:18:18,148 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4480, 1.8954, 2.1016, 0.9393, 2.2129, 1.3734, 0.5106, 1.7968], + device='cuda:1'), covar=tensor([0.0303, 0.0158, 0.0117, 0.0259, 0.0162, 0.0428, 0.0396, 0.0140], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0251, 0.0206, 0.0304, 0.0244, 0.0388, 0.0319, 0.0290], + device='cuda:1'), out_proj_covar=tensor([1.1360e-04, 8.0534e-05, 6.4772e-05, 9.7039e-05, 7.9117e-05, 1.3542e-04, + 1.0453e-04, 9.3706e-05], device='cuda:1') +2023-02-06 03:18:28,105 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9788, 1.4155, 4.3670, 1.7483, 2.3567, 5.0622, 4.8316, 4.3102], + device='cuda:1'), covar=tensor([0.1203, 0.1608, 0.0298, 0.2132, 0.0867, 0.0211, 0.0352, 0.0613], + device='cuda:1'), in_proj_covar=tensor([0.0242, 0.0272, 0.0227, 0.0273, 0.0235, 0.0212, 0.0260, 0.0280], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 03:18:39,663 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 03:18:39,964 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5888, 2.0262, 3.4701, 1.2823, 2.5385, 1.8722, 1.6819, 2.0844], + device='cuda:1'), covar=tensor([0.1438, 0.1688, 0.0574, 0.2976, 0.1250, 0.2252, 0.1464, 0.2027], + device='cuda:1'), in_proj_covar=tensor([0.0468, 0.0466, 0.0530, 0.0549, 0.0595, 0.0532, 0.0449, 0.0592], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:18:43,312 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:43,803 INFO [train.py:901] (1/4) Epoch 6, batch 7800, loss[loss=0.2435, simple_loss=0.3275, pruned_loss=0.07978, over 8363.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.338, pruned_loss=0.1027, over 1619571.39 frames. ], batch size: 24, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:53,439 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48230.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:18:54,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 3.053e+02 3.731e+02 4.789e+02 1.133e+03, threshold=7.462e+02, percent-clipped=3.0 +2023-02-06 03:19:16,578 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:19:17,073 INFO [train.py:901] (1/4) Epoch 6, batch 7850, loss[loss=0.2636, simple_loss=0.3274, pruned_loss=0.09985, over 7812.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3365, pruned_loss=0.102, over 1615540.32 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:19:30,601 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48286.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:19:51,010 INFO [train.py:901] (1/4) Epoch 6, batch 7900, loss[loss=0.2717, simple_loss=0.3434, pruned_loss=0.1001, over 8731.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3376, pruned_loss=0.1034, over 1614718.25 frames. ], batch size: 40, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:01,872 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.927e+02 3.494e+02 4.326e+02 7.205e+02, threshold=6.988e+02, percent-clipped=0.0 +2023-02-06 03:20:09,328 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:25,102 INFO [train.py:901] (1/4) Epoch 6, batch 7950, loss[loss=0.2966, simple_loss=0.3576, pruned_loss=0.1178, over 8426.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3362, pruned_loss=0.1026, over 1612753.52 frames. ], batch size: 49, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:58,705 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:59,254 INFO [train.py:901] (1/4) Epoch 6, batch 8000, loss[loss=0.1898, simple_loss=0.2701, pruned_loss=0.05476, over 7700.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3369, pruned_loss=0.103, over 1612118.50 frames. ], batch size: 18, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:10,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.174e+02 2.873e+02 3.488e+02 4.217e+02 8.104e+02, threshold=6.977e+02, percent-clipped=2.0 +2023-02-06 03:21:11,768 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:16,771 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:33,924 INFO [train.py:901] (1/4) Epoch 6, batch 8050, loss[loss=0.1878, simple_loss=0.2561, pruned_loss=0.05975, over 6799.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.3351, pruned_loss=0.103, over 1585351.63 frames. ], batch size: 15, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:37,661 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:54,561 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48496.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:07,122 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 03:22:10,943 INFO [train.py:901] (1/4) Epoch 7, batch 0, loss[loss=0.2877, simple_loss=0.3442, pruned_loss=0.1156, over 8132.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3442, pruned_loss=0.1156, over 8132.00 frames. ], batch size: 22, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:10,943 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 03:22:22,764 INFO [train.py:935] (1/4) Epoch 7, validation: loss=0.2113, simple_loss=0.3091, pruned_loss=0.05678, over 944034.00 frames. +2023-02-06 03:22:22,765 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 03:22:28,411 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9532, 2.3022, 1.6967, 2.8375, 1.3141, 1.4630, 1.8004, 2.2859], + device='cuda:1'), covar=tensor([0.0901, 0.1105, 0.1438, 0.0440, 0.1419, 0.1789, 0.1298, 0.0952], + device='cuda:1'), in_proj_covar=tensor([0.0263, 0.0239, 0.0276, 0.0221, 0.0240, 0.0267, 0.0273, 0.0244], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:22:37,630 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:38,086 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 03:22:39,037 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3442, 1.6656, 1.6303, 0.8153, 1.7435, 1.1872, 0.2628, 1.5441], + device='cuda:1'), covar=tensor([0.0216, 0.0149, 0.0114, 0.0213, 0.0146, 0.0414, 0.0381, 0.0112], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0251, 0.0203, 0.0302, 0.0238, 0.0385, 0.0310, 0.0287], + device='cuda:1'), out_proj_covar=tensor([1.1150e-04, 8.0551e-05, 6.3816e-05, 9.6298e-05, 7.6935e-05, 1.3405e-04, + 1.0118e-04, 9.2305e-05], device='cuda:1') +2023-02-06 03:22:41,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3224, 1.4703, 1.3324, 1.9170, 0.7715, 1.1810, 1.2681, 1.4537], + device='cuda:1'), covar=tensor([0.1247, 0.1140, 0.1648, 0.0676, 0.1552, 0.2006, 0.1218, 0.1082], + device='cuda:1'), in_proj_covar=tensor([0.0266, 0.0242, 0.0278, 0.0224, 0.0244, 0.0271, 0.0277, 0.0247], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:22:45,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.727e+02 3.570e+02 4.321e+02 1.428e+03, threshold=7.140e+02, percent-clipped=5.0 +2023-02-06 03:22:53,288 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48542.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:22:55,822 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:56,685 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 03:22:57,635 INFO [train.py:901] (1/4) Epoch 7, batch 50, loss[loss=0.3065, simple_loss=0.359, pruned_loss=0.1271, over 8234.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3385, pruned_loss=0.1021, over 366194.36 frames. ], batch size: 22, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:57,790 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:23:09,796 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48567.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:23:12,934 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 03:23:14,282 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48574.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:23:31,385 INFO [train.py:901] (1/4) Epoch 7, batch 100, loss[loss=0.2815, simple_loss=0.3451, pruned_loss=0.1089, over 8675.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3383, pruned_loss=0.1019, over 646438.79 frames. ], batch size: 31, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:23:34,998 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 03:23:44,211 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5580, 2.0063, 3.4963, 1.2820, 2.5842, 2.1093, 1.7009, 2.0216], + device='cuda:1'), covar=tensor([0.1478, 0.1724, 0.0581, 0.3076, 0.1234, 0.2163, 0.1423, 0.2044], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0473, 0.0526, 0.0547, 0.0594, 0.0534, 0.0451, 0.0592], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:23:54,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 2.936e+02 3.434e+02 4.642e+02 8.961e+02, threshold=6.868e+02, percent-clipped=3.0 +2023-02-06 03:24:06,731 INFO [train.py:901] (1/4) Epoch 7, batch 150, loss[loss=0.2533, simple_loss=0.3169, pruned_loss=0.09485, over 7775.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3368, pruned_loss=0.1007, over 862404.77 frames. ], batch size: 19, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:24:28,700 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5855, 2.2160, 4.5705, 1.2990, 2.7021, 2.1801, 1.6328, 2.7617], + device='cuda:1'), covar=tensor([0.1675, 0.2100, 0.0643, 0.3527, 0.1806, 0.2570, 0.1682, 0.2466], + device='cuda:1'), in_proj_covar=tensor([0.0470, 0.0470, 0.0523, 0.0541, 0.0591, 0.0527, 0.0448, 0.0587], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:24:31,972 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:24:34,077 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48689.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:24:40,629 INFO [train.py:901] (1/4) Epoch 7, batch 200, loss[loss=0.2468, simple_loss=0.3267, pruned_loss=0.0835, over 8300.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3346, pruned_loss=0.0999, over 1024016.76 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:03,494 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.629e+02 3.306e+02 4.274e+02 1.004e+03, threshold=6.612e+02, percent-clipped=3.0 +2023-02-06 03:25:15,507 INFO [train.py:901] (1/4) Epoch 7, batch 250, loss[loss=0.2565, simple_loss=0.3268, pruned_loss=0.09313, over 8187.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3332, pruned_loss=0.09864, over 1158267.80 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:22,707 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:26,762 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 03:25:35,605 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 03:25:41,109 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:44,863 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 03:25:50,548 INFO [train.py:901] (1/4) Epoch 7, batch 300, loss[loss=0.2947, simple_loss=0.3637, pruned_loss=0.1128, over 8187.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3335, pruned_loss=0.09957, over 1255185.02 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:52,221 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:54,987 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:12,383 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:13,531 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.973e+02 3.476e+02 4.340e+02 1.124e+03, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 03:26:18,393 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:25,097 INFO [train.py:901] (1/4) Epoch 7, batch 350, loss[loss=0.3251, simple_loss=0.3652, pruned_loss=0.1425, over 6503.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3335, pruned_loss=0.09963, over 1331483.86 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:26:30,598 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:43,233 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:00,372 INFO [train.py:901] (1/4) Epoch 7, batch 400, loss[loss=0.3472, simple_loss=0.3935, pruned_loss=0.1504, over 8707.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3357, pruned_loss=0.1011, over 1398708.16 frames. ], batch size: 34, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:00,663 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3763, 1.9433, 3.0639, 2.3834, 2.5771, 2.1118, 1.5573, 1.2673], + device='cuda:1'), covar=tensor([0.2643, 0.2907, 0.0653, 0.1717, 0.1353, 0.1439, 0.1294, 0.2977], + device='cuda:1'), in_proj_covar=tensor([0.0816, 0.0754, 0.0650, 0.0743, 0.0840, 0.0695, 0.0646, 0.0683], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:27:01,278 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:22,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.855e+02 2.734e+02 3.619e+02 4.506e+02 1.679e+03, threshold=7.237e+02, percent-clipped=8.0 +2023-02-06 03:27:32,150 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48945.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:27:34,592 INFO [train.py:901] (1/4) Epoch 7, batch 450, loss[loss=0.2549, simple_loss=0.3103, pruned_loss=0.09975, over 7703.00 frames. ], tot_loss[loss=0.2711, simple_loss=0.3373, pruned_loss=0.1024, over 1446516.58 frames. ], batch size: 18, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:36,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5453, 1.9078, 2.1159, 1.1231, 2.2395, 1.3189, 0.5630, 1.6605], + device='cuda:1'), covar=tensor([0.0283, 0.0156, 0.0092, 0.0228, 0.0135, 0.0470, 0.0404, 0.0136], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0250, 0.0206, 0.0307, 0.0240, 0.0393, 0.0317, 0.0293], + device='cuda:1'), out_proj_covar=tensor([1.1364e-04, 7.9519e-05, 6.4796e-05, 9.7593e-05, 7.7148e-05, 1.3655e-04, + 1.0325e-04, 9.4133e-05], device='cuda:1') +2023-02-06 03:27:49,648 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48970.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:28:10,035 INFO [train.py:901] (1/4) Epoch 7, batch 500, loss[loss=0.3007, simple_loss=0.3685, pruned_loss=0.1165, over 8776.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3374, pruned_loss=0.1025, over 1481682.49 frames. ], batch size: 30, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:28:10,363 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 03:28:32,338 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.572e+02 3.184e+02 4.227e+02 8.649e+02, threshold=6.369e+02, percent-clipped=1.0 +2023-02-06 03:28:43,905 INFO [train.py:901] (1/4) Epoch 7, batch 550, loss[loss=0.2859, simple_loss=0.3474, pruned_loss=0.1121, over 8606.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3371, pruned_loss=0.1022, over 1509144.64 frames. ], batch size: 31, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:28:50,294 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:07,258 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:19,498 INFO [train.py:901] (1/4) Epoch 7, batch 600, loss[loss=0.2904, simple_loss=0.3477, pruned_loss=0.1165, over 7254.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.3364, pruned_loss=0.1024, over 1527933.37 frames. ], batch size: 16, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:19,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0645, 3.9933, 3.7374, 2.0913, 3.5863, 3.5936, 3.7779, 3.2013], + device='cuda:1'), covar=tensor([0.0875, 0.0665, 0.0842, 0.4064, 0.0859, 0.1001, 0.1102, 0.0911], + device='cuda:1'), in_proj_covar=tensor([0.0413, 0.0316, 0.0338, 0.0421, 0.0326, 0.0309, 0.0314, 0.0268], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:29:31,450 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 03:29:41,662 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49130.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:42,820 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.845e+02 3.510e+02 4.694e+02 1.227e+03, threshold=7.020e+02, percent-clipped=5.0 +2023-02-06 03:29:54,592 INFO [train.py:901] (1/4) Epoch 7, batch 650, loss[loss=0.2486, simple_loss=0.3231, pruned_loss=0.08701, over 8290.00 frames. ], tot_loss[loss=0.2708, simple_loss=0.337, pruned_loss=0.1023, over 1551935.08 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:55,667 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 03:29:58,991 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:59,738 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:17,689 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:18,993 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:24,598 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9322, 1.0984, 4.2981, 1.6691, 3.1684, 3.2694, 3.7632, 3.7574], + device='cuda:1'), covar=tensor([0.1019, 0.6042, 0.0994, 0.4123, 0.2663, 0.1863, 0.1153, 0.1154], + device='cuda:1'), in_proj_covar=tensor([0.0372, 0.0517, 0.0461, 0.0457, 0.0520, 0.0430, 0.0433, 0.0489], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 03:30:29,835 INFO [train.py:901] (1/4) Epoch 7, batch 700, loss[loss=0.2342, simple_loss=0.3079, pruned_loss=0.08022, over 8191.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3369, pruned_loss=0.1025, over 1565574.81 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:30:31,264 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:54,557 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 2.876e+02 3.436e+02 4.276e+02 6.994e+02, threshold=6.873e+02, percent-clipped=0.0 +2023-02-06 03:31:06,154 INFO [train.py:901] (1/4) Epoch 7, batch 750, loss[loss=0.228, simple_loss=0.2978, pruned_loss=0.07904, over 8077.00 frames. ], tot_loss[loss=0.2694, simple_loss=0.3355, pruned_loss=0.1016, over 1578731.18 frames. ], batch size: 21, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:09,788 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:18,092 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 03:31:18,490 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.41 vs. limit=5.0 +2023-02-06 03:31:26,438 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 03:31:40,971 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:41,445 INFO [train.py:901] (1/4) Epoch 7, batch 800, loss[loss=0.2316, simple_loss=0.3009, pruned_loss=0.0811, over 8243.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3348, pruned_loss=0.1006, over 1592718.62 frames. ], batch size: 22, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:53,378 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:32:05,544 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.832e+02 3.318e+02 4.162e+02 1.224e+03, threshold=6.636e+02, percent-clipped=6.0 +2023-02-06 03:32:17,948 INFO [train.py:901] (1/4) Epoch 7, batch 850, loss[loss=0.2286, simple_loss=0.2912, pruned_loss=0.08299, over 7437.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3338, pruned_loss=0.09995, over 1596095.21 frames. ], batch size: 17, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:32:52,561 INFO [train.py:901] (1/4) Epoch 7, batch 900, loss[loss=0.3344, simple_loss=0.3773, pruned_loss=0.1457, over 6754.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3351, pruned_loss=0.1009, over 1600040.75 frames. ], batch size: 71, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:07,535 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 03:33:10,011 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4595, 1.7695, 1.7483, 0.7402, 1.7364, 1.3549, 0.3701, 1.6251], + device='cuda:1'), covar=tensor([0.0213, 0.0123, 0.0104, 0.0235, 0.0157, 0.0380, 0.0375, 0.0099], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0254, 0.0210, 0.0308, 0.0242, 0.0395, 0.0317, 0.0294], + device='cuda:1'), out_proj_covar=tensor([1.1440e-04, 8.0497e-05, 6.5810e-05, 9.7596e-05, 7.7650e-05, 1.3680e-04, + 1.0305e-04, 9.4303e-05], device='cuda:1') +2023-02-06 03:33:14,970 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-06 03:33:17,136 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.812e+02 3.278e+02 4.578e+02 1.649e+03, threshold=6.556e+02, percent-clipped=8.0 +2023-02-06 03:33:22,011 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:33:28,088 INFO [train.py:901] (1/4) Epoch 7, batch 950, loss[loss=0.2527, simple_loss=0.3043, pruned_loss=0.1005, over 7534.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3348, pruned_loss=0.101, over 1602521.33 frames. ], batch size: 18, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:28,882 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49450.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:33:50,525 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 03:34:03,669 INFO [train.py:901] (1/4) Epoch 7, batch 1000, loss[loss=0.2791, simple_loss=0.3462, pruned_loss=0.1059, over 8355.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3356, pruned_loss=0.102, over 1603558.28 frames. ], batch size: 24, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:34:18,333 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3033, 1.4914, 1.3129, 1.8804, 0.8896, 1.1176, 1.2682, 1.4635], + device='cuda:1'), covar=tensor([0.1074, 0.1021, 0.1416, 0.0634, 0.1251, 0.1952, 0.1084, 0.0936], + device='cuda:1'), in_proj_covar=tensor([0.0260, 0.0245, 0.0283, 0.0227, 0.0243, 0.0274, 0.0277, 0.0244], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:34:24,221 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 03:34:27,700 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.091e+02 3.599e+02 4.515e+02 1.445e+03, threshold=7.198e+02, percent-clipped=7.0 +2023-02-06 03:34:35,939 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 03:34:38,748 INFO [train.py:901] (1/4) Epoch 7, batch 1050, loss[loss=0.2889, simple_loss=0.3611, pruned_loss=0.1084, over 8583.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3356, pruned_loss=0.1018, over 1608288.22 frames. ], batch size: 31, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:34:43,073 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:34:45,404 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.35 vs. limit=5.0 +2023-02-06 03:34:54,566 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49571.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:00,763 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:13,219 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2018, 1.5857, 3.3029, 1.2283, 2.2021, 3.6942, 3.6453, 3.1542], + device='cuda:1'), covar=tensor([0.0919, 0.1433, 0.0409, 0.2166, 0.0877, 0.0255, 0.0399, 0.0601], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0274, 0.0230, 0.0270, 0.0233, 0.0213, 0.0266, 0.0277], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 03:35:13,270 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:14,562 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:15,154 INFO [train.py:901] (1/4) Epoch 7, batch 1100, loss[loss=0.2836, simple_loss=0.3523, pruned_loss=0.1075, over 8600.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.3362, pruned_loss=0.1025, over 1606808.18 frames. ], batch size: 31, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:35:38,352 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.766e+02 3.386e+02 4.310e+02 6.415e+02, threshold=6.771e+02, percent-clipped=0.0 +2023-02-06 03:35:46,089 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 03:35:49,456 INFO [train.py:901] (1/4) Epoch 7, batch 1150, loss[loss=0.2532, simple_loss=0.3279, pruned_loss=0.0892, over 8507.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3362, pruned_loss=0.1026, over 1610258.89 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:23,376 INFO [train.py:901] (1/4) Epoch 7, batch 1200, loss[loss=0.2906, simple_loss=0.3603, pruned_loss=0.1104, over 8756.00 frames. ], tot_loss[loss=0.2705, simple_loss=0.3361, pruned_loss=0.1024, over 1612654.67 frames. ], batch size: 30, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:33,697 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:36:41,591 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 03:36:47,173 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.915e+02 3.820e+02 5.048e+02 1.193e+03, threshold=7.640e+02, percent-clipped=11.0 +2023-02-06 03:36:57,981 INFO [train.py:901] (1/4) Epoch 7, batch 1250, loss[loss=0.3111, simple_loss=0.3841, pruned_loss=0.119, over 8189.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3352, pruned_loss=0.1014, over 1610051.70 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:22,186 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:22,256 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:29,678 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49794.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:37:32,809 INFO [train.py:901] (1/4) Epoch 7, batch 1300, loss[loss=0.3144, simple_loss=0.3781, pruned_loss=0.1253, over 8578.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3363, pruned_loss=0.1015, over 1613659.05 frames. ], batch size: 39, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:57,661 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.650e+02 3.390e+02 4.402e+02 9.600e+02, threshold=6.781e+02, percent-clipped=3.0 +2023-02-06 03:38:08,117 INFO [train.py:901] (1/4) Epoch 7, batch 1350, loss[loss=0.2852, simple_loss=0.3586, pruned_loss=0.1059, over 8317.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3365, pruned_loss=0.1016, over 1616110.99 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:42,144 INFO [train.py:901] (1/4) Epoch 7, batch 1400, loss[loss=0.2334, simple_loss=0.306, pruned_loss=0.08042, over 7784.00 frames. ], tot_loss[loss=0.2706, simple_loss=0.3367, pruned_loss=0.1023, over 1612990.41 frames. ], batch size: 19, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:43,020 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:38:49,796 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49909.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:39:07,159 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 2.921e+02 3.790e+02 4.996e+02 8.997e+02, threshold=7.579e+02, percent-clipped=6.0 +2023-02-06 03:39:11,375 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 03:39:12,310 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8607, 1.5324, 2.2235, 1.8423, 1.9697, 1.7076, 1.3712, 0.6241], + device='cuda:1'), covar=tensor([0.2724, 0.2561, 0.0767, 0.1433, 0.1231, 0.1431, 0.1330, 0.2717], + device='cuda:1'), in_proj_covar=tensor([0.0831, 0.0759, 0.0662, 0.0753, 0.0851, 0.0700, 0.0656, 0.0698], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:39:18,098 INFO [train.py:901] (1/4) Epoch 7, batch 1450, loss[loss=0.2549, simple_loss=0.3364, pruned_loss=0.08673, over 8754.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3364, pruned_loss=0.1017, over 1614263.47 frames. ], batch size: 30, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:39:31,680 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:49,082 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:52,398 INFO [train.py:901] (1/4) Epoch 7, batch 1500, loss[loss=0.2944, simple_loss=0.361, pruned_loss=0.1139, over 8347.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.337, pruned_loss=0.1024, over 1614182.37 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:16,582 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.823e+02 3.555e+02 4.038e+02 9.229e+02, threshold=7.110e+02, percent-clipped=3.0 +2023-02-06 03:40:27,896 INFO [train.py:901] (1/4) Epoch 7, batch 1550, loss[loss=0.3633, simple_loss=0.4005, pruned_loss=0.163, over 6913.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.3371, pruned_loss=0.1017, over 1616366.50 frames. ], batch size: 71, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:50,281 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7574, 1.1683, 3.9291, 1.4053, 3.4158, 3.2790, 3.5101, 3.3877], + device='cuda:1'), covar=tensor([0.0477, 0.3716, 0.0459, 0.2731, 0.1200, 0.0708, 0.0530, 0.0647], + device='cuda:1'), in_proj_covar=tensor([0.0368, 0.0508, 0.0461, 0.0449, 0.0513, 0.0427, 0.0431, 0.0482], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 03:41:02,321 INFO [train.py:901] (1/4) Epoch 7, batch 1600, loss[loss=0.3494, simple_loss=0.3979, pruned_loss=0.1505, over 8110.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3363, pruned_loss=0.1009, over 1616993.32 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:22,664 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:25,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.849e+02 3.464e+02 4.418e+02 7.019e+02, threshold=6.928e+02, percent-clipped=0.0 +2023-02-06 03:41:35,387 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:37,165 INFO [train.py:901] (1/4) Epoch 7, batch 1650, loss[loss=0.3246, simple_loss=0.3707, pruned_loss=0.1393, over 7158.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3354, pruned_loss=0.1009, over 1615238.29 frames. ], batch size: 71, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:41,431 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:48,656 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50165.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:41:59,248 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:05,854 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:11,957 INFO [train.py:901] (1/4) Epoch 7, batch 1700, loss[loss=0.2603, simple_loss=0.3417, pruned_loss=0.08944, over 8595.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3359, pruned_loss=0.1007, over 1619850.39 frames. ], batch size: 34, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:42:20,323 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5194, 1.7855, 4.2064, 1.8738, 2.4732, 4.8645, 4.7238, 4.1366], + device='cuda:1'), covar=tensor([0.1020, 0.1471, 0.0320, 0.1996, 0.0923, 0.0191, 0.0297, 0.0629], + device='cuda:1'), in_proj_covar=tensor([0.0245, 0.0281, 0.0235, 0.0276, 0.0241, 0.0218, 0.0276, 0.0286], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 03:42:22,985 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50215.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:35,242 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.886e+02 3.481e+02 4.608e+02 1.233e+03, threshold=6.962e+02, percent-clipped=3.0 +2023-02-06 03:42:42,128 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:46,679 INFO [train.py:901] (1/4) Epoch 7, batch 1750, loss[loss=0.2974, simple_loss=0.3645, pruned_loss=0.1151, over 8357.00 frames. ], tot_loss[loss=0.2701, simple_loss=0.3368, pruned_loss=0.1017, over 1616910.35 frames. ], batch size: 24, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:21,551 INFO [train.py:901] (1/4) Epoch 7, batch 1800, loss[loss=0.2666, simple_loss=0.3343, pruned_loss=0.09939, over 8138.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3372, pruned_loss=0.1023, over 1616331.18 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:23,896 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 03:43:44,798 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.922e+02 3.562e+02 4.379e+02 1.030e+03, threshold=7.125e+02, percent-clipped=4.0 +2023-02-06 03:43:56,169 INFO [train.py:901] (1/4) Epoch 7, batch 1850, loss[loss=0.2852, simple_loss=0.3568, pruned_loss=0.1068, over 8330.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3374, pruned_loss=0.102, over 1618454.61 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:21,516 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0223, 1.2732, 1.5677, 1.2844, 1.1227, 1.2854, 1.4725, 1.3685], + device='cuda:1'), covar=tensor([0.0561, 0.1354, 0.1785, 0.1451, 0.0631, 0.1735, 0.0787, 0.0621], + device='cuda:1'), in_proj_covar=tensor([0.0118, 0.0169, 0.0209, 0.0172, 0.0117, 0.0177, 0.0130, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 03:44:30,595 INFO [train.py:901] (1/4) Epoch 7, batch 1900, loss[loss=0.2511, simple_loss=0.3217, pruned_loss=0.09026, over 8255.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3368, pruned_loss=0.1016, over 1613172.55 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:43,985 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 03:44:53,923 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.786e+02 3.637e+02 4.614e+02 8.948e+02, threshold=7.273e+02, percent-clipped=3.0 +2023-02-06 03:44:56,011 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 03:45:04,117 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6825, 1.8596, 2.1138, 1.7160, 1.0055, 2.1065, 0.4872, 1.3100], + device='cuda:1'), covar=tensor([0.3109, 0.1641, 0.0850, 0.2305, 0.5851, 0.0654, 0.4396, 0.2500], + device='cuda:1'), in_proj_covar=tensor([0.0141, 0.0136, 0.0082, 0.0188, 0.0227, 0.0086, 0.0145, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:45:04,598 INFO [train.py:901] (1/4) Epoch 7, batch 1950, loss[loss=0.2773, simple_loss=0.3501, pruned_loss=0.1023, over 8556.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3354, pruned_loss=0.1007, over 1614183.80 frames. ], batch size: 31, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:15,317 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 03:45:27,055 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-06 03:45:33,290 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:39,206 INFO [train.py:901] (1/4) Epoch 7, batch 2000, loss[loss=0.281, simple_loss=0.3522, pruned_loss=0.1049, over 8507.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3324, pruned_loss=0.09936, over 1606302.28 frames. ], batch size: 28, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:39,413 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50499.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:47,633 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 03:45:56,765 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:03,218 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.800e+02 3.583e+02 4.591e+02 1.075e+03, threshold=7.166e+02, percent-clipped=7.0 +2023-02-06 03:46:13,437 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9552, 1.4776, 1.5248, 1.3560, 1.0524, 1.3483, 1.5569, 1.5057], + device='cuda:1'), covar=tensor([0.0559, 0.1211, 0.1662, 0.1368, 0.0634, 0.1553, 0.0777, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0118, 0.0169, 0.0208, 0.0171, 0.0118, 0.0176, 0.0130, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 03:46:13,940 INFO [train.py:901] (1/4) Epoch 7, batch 2050, loss[loss=0.2555, simple_loss=0.33, pruned_loss=0.09052, over 8470.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3312, pruned_loss=0.09836, over 1604230.97 frames. ], batch size: 29, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:20,542 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50559.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:46:23,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:47,288 INFO [train.py:901] (1/4) Epoch 7, batch 2100, loss[loss=0.3133, simple_loss=0.3672, pruned_loss=0.1297, over 8106.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3332, pruned_loss=0.1, over 1605480.06 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:52,374 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:47:11,024 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 3.066e+02 3.697e+02 4.610e+02 1.063e+03, threshold=7.394e+02, percent-clipped=3.0 +2023-02-06 03:47:12,591 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4538, 2.0764, 3.5686, 1.2167, 2.5094, 1.9604, 1.7291, 2.0940], + device='cuda:1'), covar=tensor([0.1572, 0.1774, 0.0637, 0.3197, 0.1445, 0.2389, 0.1492, 0.2398], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0465, 0.0528, 0.0547, 0.0593, 0.0528, 0.0450, 0.0590], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:47:22,368 INFO [train.py:901] (1/4) Epoch 7, batch 2150, loss[loss=0.2615, simple_loss=0.3363, pruned_loss=0.09335, over 8560.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3331, pruned_loss=0.1001, over 1606743.72 frames. ], batch size: 31, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:47:40,256 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50674.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:47:44,608 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 03:47:52,461 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-06 03:47:56,609 INFO [train.py:901] (1/4) Epoch 7, batch 2200, loss[loss=0.2915, simple_loss=0.3677, pruned_loss=0.1077, over 8588.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3337, pruned_loss=0.1001, over 1605562.53 frames. ], batch size: 31, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:48:09,854 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 03:48:20,831 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.939e+02 3.492e+02 4.230e+02 8.261e+02, threshold=6.983e+02, percent-clipped=2.0 +2023-02-06 03:48:31,234 INFO [train.py:901] (1/4) Epoch 7, batch 2250, loss[loss=0.2497, simple_loss=0.3163, pruned_loss=0.09161, over 7799.00 frames. ], tot_loss[loss=0.2662, simple_loss=0.3333, pruned_loss=0.09959, over 1606334.30 frames. ], batch size: 20, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:04,149 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6236, 1.2497, 2.7657, 1.1258, 1.9459, 3.0014, 3.0451, 2.5463], + device='cuda:1'), covar=tensor([0.0952, 0.1482, 0.0436, 0.2138, 0.0796, 0.0308, 0.0474, 0.0695], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0270, 0.0227, 0.0267, 0.0236, 0.0212, 0.0266, 0.0278], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 03:49:05,375 INFO [train.py:901] (1/4) Epoch 7, batch 2300, loss[loss=0.2661, simple_loss=0.3443, pruned_loss=0.09398, over 8471.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3347, pruned_loss=0.1006, over 1608142.30 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:06,293 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:49:23,393 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50826.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:49:28,611 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.140e+02 4.073e+02 5.620e+02 1.608e+03, threshold=8.146e+02, percent-clipped=16.0 +2023-02-06 03:49:39,827 INFO [train.py:901] (1/4) Epoch 7, batch 2350, loss[loss=0.2812, simple_loss=0.3548, pruned_loss=0.1039, over 8462.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3358, pruned_loss=0.1012, over 1613881.36 frames. ], batch size: 27, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:47,977 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:04,997 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:14,216 INFO [train.py:901] (1/4) Epoch 7, batch 2400, loss[loss=0.267, simple_loss=0.3471, pruned_loss=0.0934, over 8246.00 frames. ], tot_loss[loss=0.268, simple_loss=0.335, pruned_loss=0.1005, over 1614673.03 frames. ], batch size: 24, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:20,197 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:30,576 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-06 03:50:35,196 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50930.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:50:36,987 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.900e+02 3.414e+02 4.196e+02 7.276e+02, threshold=6.828e+02, percent-clipped=0.0 +2023-02-06 03:50:47,541 INFO [train.py:901] (1/4) Epoch 7, batch 2450, loss[loss=0.2652, simple_loss=0.3347, pruned_loss=0.09786, over 8340.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3348, pruned_loss=0.1006, over 1613582.69 frames. ], batch size: 49, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:52,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50955.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:50:59,075 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:22,800 INFO [train.py:901] (1/4) Epoch 7, batch 2500, loss[loss=0.2613, simple_loss=0.3399, pruned_loss=0.09131, over 8461.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3347, pruned_loss=0.1004, over 1610474.21 frames. ], batch size: 27, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:51:36,588 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3006, 1.7961, 4.4685, 2.0250, 3.8978, 3.7675, 4.0843, 3.8912], + device='cuda:1'), covar=tensor([0.0531, 0.3650, 0.0441, 0.2685, 0.1052, 0.0799, 0.0553, 0.0621], + device='cuda:1'), in_proj_covar=tensor([0.0377, 0.0526, 0.0475, 0.0456, 0.0528, 0.0435, 0.0437, 0.0497], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 03:51:39,925 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51023.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:45,169 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3255, 1.4600, 4.4549, 1.4885, 3.8728, 3.6972, 3.9919, 3.8156], + device='cuda:1'), covar=tensor([0.0450, 0.3845, 0.0367, 0.2998, 0.0970, 0.0713, 0.0521, 0.0624], + device='cuda:1'), in_proj_covar=tensor([0.0376, 0.0524, 0.0472, 0.0456, 0.0524, 0.0433, 0.0435, 0.0493], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 03:51:46,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.634e+02 3.421e+02 4.023e+02 8.503e+02, threshold=6.842e+02, percent-clipped=1.0 +2023-02-06 03:51:56,889 INFO [train.py:901] (1/4) Epoch 7, batch 2550, loss[loss=0.2532, simple_loss=0.3374, pruned_loss=0.08444, over 8555.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3355, pruned_loss=0.1006, over 1615030.82 frames. ], batch size: 31, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:18,133 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:52:31,079 INFO [train.py:901] (1/4) Epoch 7, batch 2600, loss[loss=0.3835, simple_loss=0.4033, pruned_loss=0.1819, over 6449.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3348, pruned_loss=0.1003, over 1610630.72 frames. ], batch size: 71, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:54,868 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.187e+02 3.140e+02 3.874e+02 4.757e+02 8.436e+02, threshold=7.747e+02, percent-clipped=5.0 +2023-02-06 03:53:02,023 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:53:05,174 INFO [train.py:901] (1/4) Epoch 7, batch 2650, loss[loss=0.3667, simple_loss=0.4029, pruned_loss=0.1653, over 8461.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3353, pruned_loss=0.1003, over 1610647.11 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:19,254 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51170.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:32,643 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:39,227 INFO [train.py:901] (1/4) Epoch 7, batch 2700, loss[loss=0.2885, simple_loss=0.3599, pruned_loss=0.1086, over 8487.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.335, pruned_loss=0.09998, over 1608468.13 frames. ], batch size: 39, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:58,088 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2493, 1.4741, 2.1771, 1.1395, 1.5344, 1.5534, 1.3254, 1.3853], + device='cuda:1'), covar=tensor([0.1678, 0.1860, 0.0751, 0.3316, 0.1345, 0.2506, 0.1686, 0.1776], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0471, 0.0540, 0.0553, 0.0591, 0.0534, 0.0453, 0.0596], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 03:54:02,467 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 3.046e+02 3.584e+02 4.560e+02 9.753e+02, threshold=7.169e+02, percent-clipped=4.0 +2023-02-06 03:54:14,246 INFO [train.py:901] (1/4) Epoch 7, batch 2750, loss[loss=0.2639, simple_loss=0.3178, pruned_loss=0.105, over 7935.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3356, pruned_loss=0.1002, over 1614448.28 frames. ], batch size: 20, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:21,062 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:34,381 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:38,289 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51285.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:54:47,538 INFO [train.py:901] (1/4) Epoch 7, batch 2800, loss[loss=0.2447, simple_loss=0.31, pruned_loss=0.08972, over 7689.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.335, pruned_loss=0.09969, over 1618740.43 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:51,138 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:53,735 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:55:11,784 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.811e+02 3.563e+02 4.674e+02 6.809e+02, threshold=7.126e+02, percent-clipped=0.0 +2023-02-06 03:55:14,047 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0070, 2.0784, 1.8379, 2.6545, 1.0102, 1.5606, 1.6553, 2.0609], + device='cuda:1'), covar=tensor([0.0799, 0.1011, 0.1334, 0.0544, 0.1640, 0.1765, 0.1316, 0.0959], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0239, 0.0278, 0.0222, 0.0243, 0.0267, 0.0278, 0.0243], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 03:55:22,690 INFO [train.py:901] (1/4) Epoch 7, batch 2850, loss[loss=0.219, simple_loss=0.2936, pruned_loss=0.07217, over 7550.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3326, pruned_loss=0.09812, over 1611399.10 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:57,259 INFO [train.py:901] (1/4) Epoch 7, batch 2900, loss[loss=0.2776, simple_loss=0.3358, pruned_loss=0.1097, over 8265.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3324, pruned_loss=0.09875, over 1608252.06 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:58,210 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51400.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:04,948 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:13,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:14,126 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:19,622 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 03:56:20,271 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.833e+02 3.577e+02 4.732e+02 1.075e+03, threshold=7.153e+02, percent-clipped=9.0 +2023-02-06 03:56:32,326 INFO [train.py:901] (1/4) Epoch 7, batch 2950, loss[loss=0.2751, simple_loss=0.3388, pruned_loss=0.1057, over 8243.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.3325, pruned_loss=0.09945, over 1606013.90 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:56:34,164 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 03:56:38,234 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 03:57:06,418 INFO [train.py:901] (1/4) Epoch 7, batch 3000, loss[loss=0.28, simple_loss=0.343, pruned_loss=0.1084, over 8516.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.333, pruned_loss=0.09998, over 1608440.89 frames. ], batch size: 28, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:57:06,419 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 03:57:21,704 INFO [train.py:935] (1/4) Epoch 7, validation: loss=0.2071, simple_loss=0.305, pruned_loss=0.05459, over 944034.00 frames. +2023-02-06 03:57:21,706 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 03:57:31,203 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:32,577 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51515.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:37,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7464, 2.3141, 3.8596, 2.8168, 3.0870, 2.2540, 1.6774, 1.7278], + device='cuda:1'), covar=tensor([0.2546, 0.3101, 0.0642, 0.1694, 0.1396, 0.1445, 0.1336, 0.3309], + device='cuda:1'), in_proj_covar=tensor([0.0817, 0.0756, 0.0656, 0.0750, 0.0845, 0.0694, 0.0653, 0.0692], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:57:45,152 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.842e+02 3.422e+02 4.197e+02 1.269e+03, threshold=6.844e+02, percent-clipped=2.0 +2023-02-06 03:57:45,245 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51534.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:57:48,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:49,353 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51540.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:50,068 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51541.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:57:55,343 INFO [train.py:901] (1/4) Epoch 7, batch 3050, loss[loss=0.2866, simple_loss=0.3566, pruned_loss=0.1083, over 8296.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.333, pruned_loss=0.09934, over 1609693.11 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:06,943 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51566.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:58:12,896 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0280, 1.5890, 1.6093, 1.3060, 1.0742, 1.3904, 1.6936, 1.5018], + device='cuda:1'), covar=tensor([0.0543, 0.1197, 0.1664, 0.1350, 0.0595, 0.1465, 0.0681, 0.0560], + device='cuda:1'), in_proj_covar=tensor([0.0118, 0.0167, 0.0210, 0.0173, 0.0117, 0.0174, 0.0128, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 03:58:29,887 INFO [train.py:901] (1/4) Epoch 7, batch 3100, loss[loss=0.2509, simple_loss=0.309, pruned_loss=0.09637, over 7253.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.334, pruned_loss=0.1002, over 1612929.53 frames. ], batch size: 16, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:54,840 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.035e+02 3.902e+02 5.145e+02 1.067e+03, threshold=7.804e+02, percent-clipped=7.0 +2023-02-06 03:59:05,332 INFO [train.py:901] (1/4) Epoch 7, batch 3150, loss[loss=0.2636, simple_loss=0.3334, pruned_loss=0.0969, over 8612.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3347, pruned_loss=0.1009, over 1609958.59 frames. ], batch size: 34, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:05,517 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51649.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:59:23,453 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6366, 4.7051, 4.0884, 1.8332, 4.0207, 4.1275, 4.3128, 3.6289], + device='cuda:1'), covar=tensor([0.0802, 0.0597, 0.1025, 0.4871, 0.0843, 0.0747, 0.1364, 0.0963], + device='cuda:1'), in_proj_covar=tensor([0.0421, 0.0320, 0.0346, 0.0429, 0.0334, 0.0313, 0.0325, 0.0282], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:59:26,411 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:59:33,466 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8702, 2.3299, 3.9370, 2.8586, 3.1407, 2.3181, 1.8178, 1.8206], + device='cuda:1'), covar=tensor([0.2360, 0.2909, 0.0645, 0.1693, 0.1469, 0.1465, 0.1340, 0.3203], + device='cuda:1'), in_proj_covar=tensor([0.0821, 0.0758, 0.0666, 0.0753, 0.0854, 0.0706, 0.0655, 0.0698], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 03:59:40,113 INFO [train.py:901] (1/4) Epoch 7, batch 3200, loss[loss=0.3204, simple_loss=0.3774, pruned_loss=0.1317, over 8547.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3345, pruned_loss=0.1004, over 1611925.39 frames. ], batch size: 34, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:43,600 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:05,271 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.946e+02 3.588e+02 4.680e+02 7.788e+02, threshold=7.176e+02, percent-clipped=0.0 +2023-02-06 04:00:12,048 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:16,041 INFO [train.py:901] (1/4) Epoch 7, batch 3250, loss[loss=0.2179, simple_loss=0.2931, pruned_loss=0.07136, over 8233.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3345, pruned_loss=0.1004, over 1610818.94 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:00:19,464 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:23,521 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8239, 1.5968, 1.6981, 1.3853, 1.3797, 1.5896, 2.1727, 1.9054], + device='cuda:1'), covar=tensor([0.0551, 0.1330, 0.1838, 0.1523, 0.0659, 0.1613, 0.0704, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0119, 0.0166, 0.0210, 0.0172, 0.0117, 0.0174, 0.0127, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:00:26,784 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51765.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:46,801 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:49,339 INFO [train.py:901] (1/4) Epoch 7, batch 3300, loss[loss=0.2353, simple_loss=0.3035, pruned_loss=0.08352, over 7812.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3336, pruned_loss=0.1001, over 1611207.41 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:03,665 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:14,384 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.988e+02 3.662e+02 4.246e+02 9.313e+02, threshold=7.324e+02, percent-clipped=2.0 +2023-02-06 04:01:24,626 INFO [train.py:901] (1/4) Epoch 7, batch 3350, loss[loss=0.2404, simple_loss=0.321, pruned_loss=0.07987, over 8102.00 frames. ], tot_loss[loss=0.266, simple_loss=0.3328, pruned_loss=0.09958, over 1611154.56 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:30,919 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:32,345 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51859.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:39,536 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:59,352 INFO [train.py:901] (1/4) Epoch 7, batch 3400, loss[loss=0.2775, simple_loss=0.3262, pruned_loss=0.1144, over 8131.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3315, pruned_loss=0.09908, over 1609533.44 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:03,642 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51905.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:02:10,928 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 04:02:16,129 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0763, 2.2641, 1.7812, 2.6311, 1.4914, 1.4978, 1.9555, 2.3020], + device='cuda:1'), covar=tensor([0.0785, 0.0851, 0.1138, 0.0508, 0.1205, 0.1573, 0.1112, 0.0764], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0233, 0.0275, 0.0219, 0.0239, 0.0263, 0.0272, 0.0237], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:02:20,935 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51930.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:02:23,259 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.723e+02 3.470e+02 4.144e+02 7.359e+02, threshold=6.940e+02, percent-clipped=1.0 +2023-02-06 04:02:24,205 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7168, 1.5062, 2.0072, 1.6962, 1.7716, 1.6425, 1.3076, 0.7041], + device='cuda:1'), covar=tensor([0.2443, 0.2354, 0.0709, 0.1264, 0.1129, 0.1357, 0.1247, 0.2290], + device='cuda:1'), in_proj_covar=tensor([0.0822, 0.0758, 0.0656, 0.0752, 0.0856, 0.0704, 0.0653, 0.0691], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:02:34,621 INFO [train.py:901] (1/4) Epoch 7, batch 3450, loss[loss=0.2552, simple_loss=0.3318, pruned_loss=0.08931, over 8482.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3337, pruned_loss=0.1004, over 1612454.54 frames. ], batch size: 27, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:50,955 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:03:04,798 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5337, 1.5771, 2.7559, 1.1015, 2.0850, 3.0586, 3.0385, 2.5618], + device='cuda:1'), covar=tensor([0.1100, 0.1284, 0.0450, 0.2190, 0.0715, 0.0307, 0.0512, 0.0742], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0274, 0.0230, 0.0271, 0.0243, 0.0218, 0.0275, 0.0281], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:03:09,380 INFO [train.py:901] (1/4) Epoch 7, batch 3500, loss[loss=0.2624, simple_loss=0.3452, pruned_loss=0.08976, over 8315.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.334, pruned_loss=0.1001, over 1611456.17 frames. ], batch size: 25, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:22,435 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 04:03:27,422 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.58 vs. limit=5.0 +2023-02-06 04:03:33,496 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.822e+02 3.302e+02 4.435e+02 1.594e+03, threshold=6.604e+02, percent-clipped=5.0 +2023-02-06 04:03:42,067 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 04:03:43,734 INFO [train.py:901] (1/4) Epoch 7, batch 3550, loss[loss=0.2451, simple_loss=0.315, pruned_loss=0.08755, over 7969.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3338, pruned_loss=0.09976, over 1613612.07 frames. ], batch size: 21, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:48,709 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7709, 2.1391, 4.3148, 1.2792, 2.7149, 2.1457, 1.6679, 2.5480], + device='cuda:1'), covar=tensor([0.1577, 0.2247, 0.0690, 0.3625, 0.1654, 0.2691, 0.1736, 0.2492], + device='cuda:1'), in_proj_covar=tensor([0.0472, 0.0472, 0.0527, 0.0549, 0.0583, 0.0522, 0.0452, 0.0590], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 04:03:50,008 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:19,844 INFO [train.py:901] (1/4) Epoch 7, batch 3600, loss[loss=0.2119, simple_loss=0.2939, pruned_loss=0.06496, over 7542.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.333, pruned_loss=0.09875, over 1614658.89 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:24,802 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1161, 1.6859, 1.4000, 1.7892, 1.3451, 1.2075, 1.1654, 1.5390], + device='cuda:1'), covar=tensor([0.0844, 0.0359, 0.0914, 0.0351, 0.0638, 0.1062, 0.0703, 0.0582], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0237, 0.0316, 0.0303, 0.0316, 0.0322, 0.0347, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:04:26,702 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:28,341 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 04:04:30,885 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:37,712 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:43,468 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.818e+02 3.176e+02 4.094e+02 8.086e+02, threshold=6.353e+02, percent-clipped=5.0 +2023-02-06 04:04:47,733 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:53,395 INFO [train.py:901] (1/4) Epoch 7, batch 3650, loss[loss=0.3627, simple_loss=0.4009, pruned_loss=0.1623, over 6948.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3328, pruned_loss=0.09872, over 1613839.74 frames. ], batch size: 72, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:54,173 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:21,245 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3024, 1.2828, 5.6125, 2.2657, 4.3314, 4.4292, 5.1407, 5.1194], + device='cuda:1'), covar=tensor([0.0849, 0.5849, 0.0651, 0.3272, 0.2116, 0.1186, 0.0730, 0.0723], + device='cuda:1'), in_proj_covar=tensor([0.0378, 0.0526, 0.0475, 0.0468, 0.0528, 0.0436, 0.0429, 0.0493], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:05:23,189 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:05:28,581 INFO [train.py:901] (1/4) Epoch 7, batch 3700, loss[loss=0.2861, simple_loss=0.3547, pruned_loss=0.1087, over 8558.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3341, pruned_loss=0.09921, over 1613983.74 frames. ], batch size: 39, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:05:36,879 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:47,064 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:49,831 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:53,723 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.602e+02 3.554e+02 4.404e+02 9.700e+02, threshold=7.108e+02, percent-clipped=5.0 +2023-02-06 04:06:04,133 INFO [train.py:901] (1/4) Epoch 7, batch 3750, loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07494, over 8245.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3343, pruned_loss=0.09931, over 1615687.54 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:06:07,173 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:06:12,885 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7717, 2.0107, 1.6962, 2.5268, 1.2344, 1.4474, 1.8013, 1.9596], + device='cuda:1'), covar=tensor([0.0893, 0.1107, 0.1115, 0.0510, 0.1248, 0.1456, 0.1017, 0.0835], + device='cuda:1'), in_proj_covar=tensor([0.0254, 0.0238, 0.0280, 0.0223, 0.0241, 0.0266, 0.0274, 0.0241], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:06:24,074 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.51 vs. limit=5.0 +2023-02-06 04:06:38,742 INFO [train.py:901] (1/4) Epoch 7, batch 3800, loss[loss=0.2463, simple_loss=0.3239, pruned_loss=0.0843, over 8297.00 frames. ], tot_loss[loss=0.266, simple_loss=0.3331, pruned_loss=0.09948, over 1606044.85 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:01,885 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52330.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:07:04,421 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.780e+02 3.361e+02 4.228e+02 6.516e+02, threshold=6.722e+02, percent-clipped=0.0 +2023-02-06 04:07:15,844 INFO [train.py:901] (1/4) Epoch 7, batch 3850, loss[loss=0.2396, simple_loss=0.3107, pruned_loss=0.08423, over 7973.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3331, pruned_loss=0.09885, over 1610550.48 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:30,480 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 04:07:49,738 INFO [train.py:901] (1/4) Epoch 7, batch 3900, loss[loss=0.2355, simple_loss=0.301, pruned_loss=0.08502, over 7788.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3335, pruned_loss=0.09916, over 1609036.15 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:51,996 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:08:15,069 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.697e+02 3.207e+02 4.225e+02 1.297e+03, threshold=6.414e+02, percent-clipped=5.0 +2023-02-06 04:08:25,215 INFO [train.py:901] (1/4) Epoch 7, batch 3950, loss[loss=0.2484, simple_loss=0.3239, pruned_loss=0.08644, over 8316.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3316, pruned_loss=0.09782, over 1612841.04 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:08:47,998 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:08:57,501 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0437, 1.0081, 4.1963, 1.5874, 3.6478, 3.4849, 3.7995, 3.6472], + device='cuda:1'), covar=tensor([0.0459, 0.4108, 0.0408, 0.2917, 0.1123, 0.0803, 0.0498, 0.0595], + device='cuda:1'), in_proj_covar=tensor([0.0383, 0.0527, 0.0479, 0.0469, 0.0533, 0.0443, 0.0433, 0.0496], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:09:00,793 INFO [train.py:901] (1/4) Epoch 7, batch 4000, loss[loss=0.25, simple_loss=0.3081, pruned_loss=0.0959, over 7694.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3312, pruned_loss=0.09793, over 1609308.78 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:04,720 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.75 vs. limit=5.0 +2023-02-06 04:09:05,167 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:13,173 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:24,184 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.935e+02 3.629e+02 4.693e+02 1.248e+03, threshold=7.258e+02, percent-clipped=9.0 +2023-02-06 04:09:35,733 INFO [train.py:901] (1/4) Epoch 7, batch 4050, loss[loss=0.2372, simple_loss=0.3146, pruned_loss=0.0799, over 8198.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3316, pruned_loss=0.09838, over 1609546.30 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:39,788 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:52,444 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52573.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:10:10,474 INFO [train.py:901] (1/4) Epoch 7, batch 4100, loss[loss=0.2613, simple_loss=0.3328, pruned_loss=0.09491, over 8469.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3323, pruned_loss=0.09883, over 1612930.90 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:33,846 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.709e+02 3.346e+02 4.687e+02 1.096e+03, threshold=6.691e+02, percent-clipped=5.0 +2023-02-06 04:10:44,015 INFO [train.py:901] (1/4) Epoch 7, batch 4150, loss[loss=0.2652, simple_loss=0.348, pruned_loss=0.09116, over 8477.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3321, pruned_loss=0.09833, over 1615222.00 frames. ], batch size: 29, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:59,827 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:02,381 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:20,451 INFO [train.py:901] (1/4) Epoch 7, batch 4200, loss[loss=0.375, simple_loss=0.416, pruned_loss=0.167, over 8342.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3327, pruned_loss=0.09863, over 1616215.84 frames. ], batch size: 26, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:11:30,522 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 04:11:43,886 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.675e+02 3.334e+02 4.108e+02 1.082e+03, threshold=6.669e+02, percent-clipped=4.0 +2023-02-06 04:11:53,187 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 04:11:53,854 INFO [train.py:901] (1/4) Epoch 7, batch 4250, loss[loss=0.2295, simple_loss=0.2968, pruned_loss=0.08115, over 7551.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3328, pruned_loss=0.09835, over 1617222.21 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:05,291 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8664, 1.6880, 2.5382, 1.6291, 2.1111, 2.8032, 2.6662, 2.5741], + device='cuda:1'), covar=tensor([0.0702, 0.1037, 0.0623, 0.1437, 0.0939, 0.0288, 0.0605, 0.0476], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0274, 0.0229, 0.0271, 0.0241, 0.0216, 0.0274, 0.0278], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:12:08,145 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:10,304 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:22,419 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,472 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,947 INFO [train.py:901] (1/4) Epoch 7, batch 4300, loss[loss=0.2401, simple_loss=0.3177, pruned_loss=0.08128, over 8495.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3313, pruned_loss=0.09794, over 1612705.99 frames. ], batch size: 29, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:53,670 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.879e+02 3.462e+02 4.347e+02 1.112e+03, threshold=6.924e+02, percent-clipped=5.0 +2023-02-06 04:13:03,883 INFO [train.py:901] (1/4) Epoch 7, batch 4350, loss[loss=0.2297, simple_loss=0.3048, pruned_loss=0.07729, over 8108.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3306, pruned_loss=0.09714, over 1615000.54 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:24,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 04:13:38,502 INFO [train.py:901] (1/4) Epoch 7, batch 4400, loss[loss=0.2119, simple_loss=0.2877, pruned_loss=0.06804, over 7928.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3306, pruned_loss=0.09687, over 1618422.51 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:50,775 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52917.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:13:56,757 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:02,505 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.715e+02 3.689e+02 4.508e+02 8.331e+02, threshold=7.379e+02, percent-clipped=6.0 +2023-02-06 04:14:06,680 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 04:14:13,199 INFO [train.py:901] (1/4) Epoch 7, batch 4450, loss[loss=0.3267, simple_loss=0.3646, pruned_loss=0.1444, over 7794.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3297, pruned_loss=0.09646, over 1616148.85 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:14,720 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:19,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2355, 1.7768, 2.7316, 2.1572, 2.2278, 1.8956, 1.5695, 0.9851], + device='cuda:1'), covar=tensor([0.2431, 0.2573, 0.0633, 0.1358, 0.1238, 0.1424, 0.1287, 0.2702], + device='cuda:1'), in_proj_covar=tensor([0.0825, 0.0770, 0.0674, 0.0762, 0.0857, 0.0708, 0.0658, 0.0708], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:14:46,592 INFO [train.py:901] (1/4) Epoch 7, batch 4500, loss[loss=0.2082, simple_loss=0.2831, pruned_loss=0.0666, over 7543.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3309, pruned_loss=0.09684, over 1618102.98 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:49,877 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 04:14:59,362 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 04:15:10,359 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53032.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:15:11,037 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8204, 1.9300, 2.1852, 1.6707, 1.1148, 2.2836, 0.4322, 1.4787], + device='cuda:1'), covar=tensor([0.3475, 0.1620, 0.0653, 0.2531, 0.5420, 0.0717, 0.4255, 0.2007], + device='cuda:1'), in_proj_covar=tensor([0.0144, 0.0143, 0.0086, 0.0193, 0.0233, 0.0089, 0.0150, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:15:11,488 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.236e+02 2.890e+02 3.405e+02 4.030e+02 1.067e+03, threshold=6.809e+02, percent-clipped=4.0 +2023-02-06 04:15:18,996 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:22,053 INFO [train.py:901] (1/4) Epoch 7, batch 4550, loss[loss=0.3094, simple_loss=0.3741, pruned_loss=0.1223, over 8599.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3309, pruned_loss=0.09697, over 1616603.49 frames. ], batch size: 31, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:15:36,992 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:39,629 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:56,219 INFO [train.py:901] (1/4) Epoch 7, batch 4600, loss[loss=0.23, simple_loss=0.3078, pruned_loss=0.07617, over 8084.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3307, pruned_loss=0.09674, over 1617553.99 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:16:06,504 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:16:20,975 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.951e+02 3.579e+02 4.375e+02 1.013e+03, threshold=7.158e+02, percent-clipped=5.0 +2023-02-06 04:16:31,857 INFO [train.py:901] (1/4) Epoch 7, batch 4650, loss[loss=0.2647, simple_loss=0.3364, pruned_loss=0.09653, over 7964.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3302, pruned_loss=0.09685, over 1615007.59 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:06,943 INFO [train.py:901] (1/4) Epoch 7, batch 4700, loss[loss=0.218, simple_loss=0.2986, pruned_loss=0.06871, over 7927.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.331, pruned_loss=0.09784, over 1608394.35 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:27,463 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:30,529 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.793e+02 3.469e+02 4.275e+02 9.300e+02, threshold=6.939e+02, percent-clipped=3.0 +2023-02-06 04:17:34,871 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6610, 2.3293, 4.7049, 1.3090, 3.1543, 2.0951, 1.7251, 2.9211], + device='cuda:1'), covar=tensor([0.1619, 0.1881, 0.0573, 0.3714, 0.1358, 0.2588, 0.1721, 0.2117], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0474, 0.0527, 0.0551, 0.0597, 0.0534, 0.0455, 0.0588], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:17:41,199 INFO [train.py:901] (1/4) Epoch 7, batch 4750, loss[loss=0.2605, simple_loss=0.3261, pruned_loss=0.09748, over 7196.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3314, pruned_loss=0.09834, over 1605088.66 frames. ], batch size: 16, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:48,716 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:52,100 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5898, 1.3509, 2.7603, 1.1529, 1.9006, 3.0154, 3.0092, 2.5684], + device='cuda:1'), covar=tensor([0.1027, 0.1368, 0.0440, 0.2019, 0.0799, 0.0301, 0.0517, 0.0624], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0276, 0.0231, 0.0272, 0.0243, 0.0218, 0.0276, 0.0282], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:17:56,642 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 04:17:59,374 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 04:18:09,733 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53288.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:18:16,610 INFO [train.py:901] (1/4) Epoch 7, batch 4800, loss[loss=0.2551, simple_loss=0.3037, pruned_loss=0.1033, over 7411.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3328, pruned_loss=0.0991, over 1608751.45 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:26,454 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53313.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:18:30,764 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 04:18:32,423 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:33,780 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3114, 1.6571, 4.4228, 1.8475, 2.3281, 5.2287, 5.0957, 4.5034], + device='cuda:1'), covar=tensor([0.1001, 0.1476, 0.0248, 0.1931, 0.0925, 0.0153, 0.0234, 0.0455], + device='cuda:1'), in_proj_covar=tensor([0.0240, 0.0276, 0.0231, 0.0272, 0.0241, 0.0219, 0.0277, 0.0281], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:18:39,901 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:41,111 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.630e+02 3.191e+02 3.984e+02 9.617e+02, threshold=6.381e+02, percent-clipped=3.0 +2023-02-06 04:18:50,458 INFO [train.py:901] (1/4) Epoch 7, batch 4850, loss[loss=0.2361, simple_loss=0.3096, pruned_loss=0.08124, over 7965.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.3305, pruned_loss=0.09743, over 1599447.06 frames. ], batch size: 21, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:51,164 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 04:19:12,830 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-06 04:19:16,472 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:26,613 INFO [train.py:901] (1/4) Epoch 7, batch 4900, loss[loss=0.2806, simple_loss=0.3595, pruned_loss=0.1008, over 8189.00 frames. ], tot_loss[loss=0.2629, simple_loss=0.3308, pruned_loss=0.09756, over 1601516.58 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:19:38,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6468, 1.8025, 2.0324, 1.5980, 1.0444, 2.0863, 0.2272, 1.1637], + device='cuda:1'), covar=tensor([0.2943, 0.1521, 0.0624, 0.1948, 0.4844, 0.0563, 0.3674, 0.1904], + device='cuda:1'), in_proj_covar=tensor([0.0142, 0.0142, 0.0084, 0.0190, 0.0228, 0.0088, 0.0147, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:19:40,217 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:40,949 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8017, 1.4619, 5.9026, 1.9786, 5.2322, 5.0253, 5.4952, 5.3253], + device='cuda:1'), covar=tensor([0.0414, 0.4093, 0.0203, 0.2738, 0.0780, 0.0540, 0.0365, 0.0393], + device='cuda:1'), in_proj_covar=tensor([0.0385, 0.0528, 0.0476, 0.0464, 0.0526, 0.0441, 0.0430, 0.0493], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:19:51,450 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.811e+02 3.269e+02 4.328e+02 9.769e+02, threshold=6.539e+02, percent-clipped=6.0 +2023-02-06 04:19:51,636 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7949, 1.5752, 5.8005, 2.0176, 5.2398, 4.9139, 5.4519, 5.2562], + device='cuda:1'), covar=tensor([0.0391, 0.4214, 0.0267, 0.2939, 0.0922, 0.0562, 0.0400, 0.0447], + device='cuda:1'), in_proj_covar=tensor([0.0387, 0.0531, 0.0480, 0.0466, 0.0527, 0.0442, 0.0434, 0.0497], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:20:00,864 INFO [train.py:901] (1/4) Epoch 7, batch 4950, loss[loss=0.2912, simple_loss=0.357, pruned_loss=0.1127, over 8493.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3312, pruned_loss=0.09805, over 1604423.48 frames. ], batch size: 28, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:27,211 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:36,823 INFO [train.py:901] (1/4) Epoch 7, batch 5000, loss[loss=0.2522, simple_loss=0.3112, pruned_loss=0.09663, over 7433.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3309, pruned_loss=0.09739, over 1603772.21 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:45,268 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:01,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:03,200 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.612e+02 3.156e+02 4.000e+02 8.821e+02, threshold=6.312e+02, percent-clipped=7.0 +2023-02-06 04:21:12,890 INFO [train.py:901] (1/4) Epoch 7, batch 5050, loss[loss=0.247, simple_loss=0.3192, pruned_loss=0.08743, over 8439.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3299, pruned_loss=0.09635, over 1605440.26 frames. ], batch size: 29, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:32,025 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 04:21:46,781 INFO [train.py:901] (1/4) Epoch 7, batch 5100, loss[loss=0.294, simple_loss=0.3317, pruned_loss=0.1282, over 7784.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3299, pruned_loss=0.09707, over 1604401.96 frames. ], batch size: 19, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:51,220 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:56,055 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0694, 3.9672, 2.4646, 2.6613, 2.8525, 1.8681, 2.4753, 3.0091], + device='cuda:1'), covar=tensor([0.1524, 0.0269, 0.0887, 0.0723, 0.0650, 0.1174, 0.1060, 0.0884], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0241, 0.0316, 0.0304, 0.0315, 0.0322, 0.0348, 0.0324], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:22:13,233 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.889e+02 3.391e+02 4.238e+02 9.606e+02, threshold=6.783e+02, percent-clipped=10.0 +2023-02-06 04:22:23,450 INFO [train.py:901] (1/4) Epoch 7, batch 5150, loss[loss=0.3069, simple_loss=0.3594, pruned_loss=0.1272, over 7175.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3303, pruned_loss=0.09707, over 1606335.74 frames. ], batch size: 74, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:22:34,955 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:42,266 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:56,582 INFO [train.py:901] (1/4) Epoch 7, batch 5200, loss[loss=0.2402, simple_loss=0.3282, pruned_loss=0.07611, over 8459.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3314, pruned_loss=0.09762, over 1614076.65 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:10,748 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:17,967 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:22,034 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.983e+02 3.078e+02 4.028e+02 5.378e+02 1.177e+03, threshold=8.056e+02, percent-clipped=8.0 +2023-02-06 04:23:28,940 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 04:23:32,186 INFO [train.py:901] (1/4) Epoch 7, batch 5250, loss[loss=0.2958, simple_loss=0.3527, pruned_loss=0.1195, over 8285.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3289, pruned_loss=0.09605, over 1611605.02 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:45,136 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0046, 2.2530, 3.7817, 1.6617, 2.9838, 2.4123, 2.0580, 2.6995], + device='cuda:1'), covar=tensor([0.1176, 0.1703, 0.0457, 0.2775, 0.0973, 0.1764, 0.1272, 0.1608], + device='cuda:1'), in_proj_covar=tensor([0.0480, 0.0477, 0.0529, 0.0554, 0.0603, 0.0531, 0.0451, 0.0591], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 04:23:54,706 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:00,274 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:02,338 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:06,925 INFO [train.py:901] (1/4) Epoch 7, batch 5300, loss[loss=0.3135, simple_loss=0.3745, pruned_loss=0.1262, over 8340.00 frames. ], tot_loss[loss=0.2617, simple_loss=0.3299, pruned_loss=0.09674, over 1613371.08 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:24:17,554 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:29,722 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 04:24:31,356 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.647e+02 3.169e+02 3.870e+02 1.211e+03, threshold=6.339e+02, percent-clipped=2.0 +2023-02-06 04:24:39,055 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:42,222 INFO [train.py:901] (1/4) Epoch 7, batch 5350, loss[loss=0.2373, simple_loss=0.3288, pruned_loss=0.07287, over 8461.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3301, pruned_loss=0.09658, over 1615827.59 frames. ], batch size: 27, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:17,625 INFO [train.py:901] (1/4) Epoch 7, batch 5400, loss[loss=0.2138, simple_loss=0.2865, pruned_loss=0.07054, over 7975.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3272, pruned_loss=0.0954, over 1608376.84 frames. ], batch size: 21, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:28,186 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 04:25:41,926 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.728e+02 3.458e+02 4.119e+02 1.009e+03, threshold=6.915e+02, percent-clipped=3.0 +2023-02-06 04:25:51,251 INFO [train.py:901] (1/4) Epoch 7, batch 5450, loss[loss=0.2483, simple_loss=0.3195, pruned_loss=0.08858, over 8323.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3289, pruned_loss=0.09637, over 1610014.71 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:08,815 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:18,684 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 04:26:26,954 INFO [train.py:901] (1/4) Epoch 7, batch 5500, loss[loss=0.3139, simple_loss=0.3753, pruned_loss=0.1262, over 8806.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3293, pruned_loss=0.09659, over 1611542.83 frames. ], batch size: 32, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:27,124 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:35,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0122, 1.5522, 3.2498, 1.3191, 2.0203, 3.5429, 3.5600, 3.0787], + device='cuda:1'), covar=tensor([0.0941, 0.1472, 0.0362, 0.2035, 0.0931, 0.0272, 0.0404, 0.0668], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0272, 0.0233, 0.0269, 0.0237, 0.0219, 0.0274, 0.0279], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:26:52,074 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.929e+02 2.794e+02 3.496e+02 4.646e+02 1.157e+03, threshold=6.993e+02, percent-clipped=7.0 +2023-02-06 04:26:53,580 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,157 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,618 INFO [train.py:901] (1/4) Epoch 7, batch 5550, loss[loss=0.2632, simple_loss=0.319, pruned_loss=0.1037, over 8245.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3289, pruned_loss=0.0965, over 1614175.00 frames. ], batch size: 22, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:10,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:11,404 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 04:27:18,030 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:27,406 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3861, 1.9598, 4.6509, 1.1315, 2.6310, 2.0914, 1.3839, 2.6368], + device='cuda:1'), covar=tensor([0.2156, 0.2553, 0.0673, 0.4391, 0.1735, 0.2954, 0.2270, 0.2501], + device='cuda:1'), in_proj_covar=tensor([0.0481, 0.0477, 0.0530, 0.0554, 0.0604, 0.0535, 0.0455, 0.0593], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:1') +2023-02-06 04:27:37,055 INFO [train.py:901] (1/4) Epoch 7, batch 5600, loss[loss=0.4122, simple_loss=0.4384, pruned_loss=0.193, over 6949.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3286, pruned_loss=0.09588, over 1613360.51 frames. ], batch size: 71, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:38,003 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:55,839 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:28:01,354 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3169, 1.9580, 3.0734, 2.5141, 2.6897, 1.9733, 1.5515, 1.3792], + device='cuda:1'), covar=tensor([0.2785, 0.3112, 0.0734, 0.1710, 0.1341, 0.1633, 0.1532, 0.3256], + device='cuda:1'), in_proj_covar=tensor([0.0828, 0.0779, 0.0670, 0.0769, 0.0858, 0.0709, 0.0657, 0.0707], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:28:02,460 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.954e+02 2.795e+02 3.455e+02 4.516e+02 9.788e+02, threshold=6.911e+02, percent-clipped=3.0 +2023-02-06 04:28:12,211 INFO [train.py:901] (1/4) Epoch 7, batch 5650, loss[loss=0.2453, simple_loss=0.3085, pruned_loss=0.09106, over 7426.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3292, pruned_loss=0.09607, over 1617648.86 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:28:25,370 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 04:28:25,791 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-06 04:28:26,283 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4007, 1.9856, 3.1200, 2.4227, 2.5977, 2.0048, 1.5493, 1.4041], + device='cuda:1'), covar=tensor([0.2357, 0.2748, 0.0701, 0.1602, 0.1322, 0.1404, 0.1296, 0.2823], + device='cuda:1'), in_proj_covar=tensor([0.0834, 0.0785, 0.0673, 0.0772, 0.0863, 0.0712, 0.0661, 0.0708], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:28:47,273 INFO [train.py:901] (1/4) Epoch 7, batch 5700, loss[loss=0.2719, simple_loss=0.3311, pruned_loss=0.1064, over 7920.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3287, pruned_loss=0.09593, over 1617757.82 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:28:54,428 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7749, 2.9740, 3.1893, 1.9050, 1.4780, 3.5397, 0.6795, 2.1227], + device='cuda:1'), covar=tensor([0.3252, 0.1423, 0.0465, 0.3299, 0.6259, 0.0301, 0.5165, 0.1998], + device='cuda:1'), in_proj_covar=tensor([0.0143, 0.0143, 0.0086, 0.0192, 0.0228, 0.0088, 0.0151, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:29:12,993 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.786e+02 3.155e+02 4.023e+02 8.991e+02, threshold=6.311e+02, percent-clipped=4.0 +2023-02-06 04:29:22,469 INFO [train.py:901] (1/4) Epoch 7, batch 5750, loss[loss=0.2543, simple_loss=0.3268, pruned_loss=0.09087, over 8512.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3295, pruned_loss=0.09658, over 1613688.37 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:31,472 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 04:29:40,474 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2464, 1.8443, 2.9484, 2.3008, 2.5332, 1.9700, 1.5815, 1.2032], + device='cuda:1'), covar=tensor([0.2587, 0.2792, 0.0645, 0.1601, 0.1254, 0.1506, 0.1337, 0.3060], + device='cuda:1'), in_proj_covar=tensor([0.0822, 0.0771, 0.0660, 0.0760, 0.0852, 0.0703, 0.0653, 0.0700], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:29:54,386 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0375, 1.6728, 1.6827, 1.5664, 1.3322, 1.6535, 2.2685, 1.9376], + device='cuda:1'), covar=tensor([0.0450, 0.1226, 0.1817, 0.1391, 0.0560, 0.1465, 0.0621, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0116, 0.0167, 0.0209, 0.0171, 0.0116, 0.0174, 0.0128, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:29:56,269 INFO [train.py:901] (1/4) Epoch 7, batch 5800, loss[loss=0.2362, simple_loss=0.3318, pruned_loss=0.07028, over 8290.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3294, pruned_loss=0.09669, over 1611814.82 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:18,113 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7850, 1.9677, 1.6114, 2.3347, 1.0633, 1.3891, 1.6471, 1.9593], + device='cuda:1'), covar=tensor([0.0798, 0.0856, 0.1207, 0.0583, 0.1254, 0.1698, 0.1042, 0.0865], + device='cuda:1'), in_proj_covar=tensor([0.0249, 0.0237, 0.0272, 0.0224, 0.0235, 0.0265, 0.0275, 0.0238], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:30:22,037 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.127e+02 2.882e+02 3.737e+02 4.385e+02 9.194e+02, threshold=7.474e+02, percent-clipped=5.0 +2023-02-06 04:30:32,284 INFO [train.py:901] (1/4) Epoch 7, batch 5850, loss[loss=0.2575, simple_loss=0.3306, pruned_loss=0.0922, over 8135.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3296, pruned_loss=0.09681, over 1611279.00 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:40,665 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9686, 3.1126, 3.2353, 2.1513, 1.6843, 3.4993, 0.6979, 2.1756], + device='cuda:1'), covar=tensor([0.2677, 0.1150, 0.0469, 0.2994, 0.5149, 0.0374, 0.4933, 0.1870], + device='cuda:1'), in_proj_covar=tensor([0.0145, 0.0144, 0.0086, 0.0193, 0.0230, 0.0088, 0.0152, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:31:06,118 INFO [train.py:901] (1/4) Epoch 7, batch 5900, loss[loss=0.2772, simple_loss=0.3476, pruned_loss=0.1034, over 8106.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3292, pruned_loss=0.09662, over 1611991.66 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:30,659 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.634e+02 3.151e+02 3.851e+02 7.879e+02, threshold=6.301e+02, percent-clipped=2.0 +2023-02-06 04:31:40,696 INFO [train.py:901] (1/4) Epoch 7, batch 5950, loss[loss=0.1853, simple_loss=0.2568, pruned_loss=0.05693, over 7711.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3292, pruned_loss=0.09689, over 1605649.49 frames. ], batch size: 18, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:13,860 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4946, 2.0430, 2.0746, 1.1977, 2.1712, 1.5785, 0.5789, 1.8545], + device='cuda:1'), covar=tensor([0.0266, 0.0129, 0.0098, 0.0204, 0.0152, 0.0385, 0.0349, 0.0108], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0262, 0.0223, 0.0325, 0.0263, 0.0414, 0.0322, 0.0300], + device='cuda:1'), out_proj_covar=tensor([1.1091e-04, 8.1536e-05, 6.8065e-05, 1.0005e-04, 8.2443e-05, 1.3947e-04, + 1.0222e-04, 9.3704e-05], device='cuda:1') +2023-02-06 04:32:14,309 INFO [train.py:901] (1/4) Epoch 7, batch 6000, loss[loss=0.233, simple_loss=0.2986, pruned_loss=0.08373, over 7932.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3293, pruned_loss=0.09666, over 1607196.25 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:14,309 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 04:32:26,542 INFO [train.py:935] (1/4) Epoch 7, validation: loss=0.2048, simple_loss=0.3036, pruned_loss=0.05298, over 944034.00 frames. +2023-02-06 04:32:26,543 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 04:32:41,083 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3264, 1.6617, 1.6762, 1.3545, 0.9667, 1.4778, 1.7511, 1.8797], + device='cuda:1'), covar=tensor([0.0530, 0.1252, 0.1763, 0.1387, 0.0630, 0.1508, 0.0712, 0.0573], + device='cuda:1'), in_proj_covar=tensor([0.0115, 0.0165, 0.0206, 0.0168, 0.0115, 0.0171, 0.0127, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:32:50,868 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.687e+02 3.524e+02 4.445e+02 8.914e+02, threshold=7.048e+02, percent-clipped=8.0 +2023-02-06 04:33:00,127 INFO [train.py:901] (1/4) Epoch 7, batch 6050, loss[loss=0.2668, simple_loss=0.3219, pruned_loss=0.1058, over 7923.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3288, pruned_loss=0.0965, over 1607395.07 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:04,706 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 04:33:17,078 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4050, 2.2624, 1.6488, 2.0203, 1.9473, 1.3544, 1.7172, 1.7918], + device='cuda:1'), covar=tensor([0.0957, 0.0280, 0.0873, 0.0400, 0.0569, 0.1163, 0.0686, 0.0618], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0234, 0.0309, 0.0299, 0.0313, 0.0314, 0.0334, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:33:36,271 INFO [train.py:901] (1/4) Epoch 7, batch 6100, loss[loss=0.2432, simple_loss=0.3116, pruned_loss=0.08739, over 7825.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3292, pruned_loss=0.09684, over 1611169.41 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:44,373 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 04:34:00,453 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 04:34:01,818 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.824e+02 3.447e+02 4.351e+02 1.012e+03, threshold=6.894e+02, percent-clipped=2.0 +2023-02-06 04:34:08,855 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3327, 1.5775, 2.2454, 1.1997, 1.5903, 1.6414, 1.3705, 1.5220], + device='cuda:1'), covar=tensor([0.1480, 0.1607, 0.0636, 0.3001, 0.1261, 0.2263, 0.1576, 0.1561], + device='cuda:1'), in_proj_covar=tensor([0.0478, 0.0473, 0.0526, 0.0558, 0.0599, 0.0530, 0.0453, 0.0588], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:34:11,167 INFO [train.py:901] (1/4) Epoch 7, batch 6150, loss[loss=0.21, simple_loss=0.2844, pruned_loss=0.06777, over 7411.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3298, pruned_loss=0.09715, over 1614938.32 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:34:34,031 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54682.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:34:46,669 INFO [train.py:901] (1/4) Epoch 7, batch 6200, loss[loss=0.2725, simple_loss=0.3444, pruned_loss=0.1003, over 8434.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3306, pruned_loss=0.09749, over 1616484.26 frames. ], batch size: 27, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:34:49,810 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 04:35:12,147 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.928e+02 3.624e+02 4.953e+02 9.267e+02, threshold=7.248e+02, percent-clipped=4.0 +2023-02-06 04:35:21,786 INFO [train.py:901] (1/4) Epoch 7, batch 6250, loss[loss=0.2571, simple_loss=0.326, pruned_loss=0.09408, over 7974.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.3308, pruned_loss=0.09721, over 1617730.19 frames. ], batch size: 21, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:40,874 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1149, 2.3773, 1.8010, 2.7539, 1.3861, 1.6009, 1.7195, 2.2706], + device='cuda:1'), covar=tensor([0.0694, 0.0707, 0.1216, 0.0487, 0.1219, 0.1514, 0.1313, 0.0767], + device='cuda:1'), in_proj_covar=tensor([0.0249, 0.0235, 0.0274, 0.0225, 0.0235, 0.0264, 0.0273, 0.0238], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:35:55,506 INFO [train.py:901] (1/4) Epoch 7, batch 6300, loss[loss=0.3319, simple_loss=0.3872, pruned_loss=0.1382, over 8340.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3298, pruned_loss=0.09717, over 1608662.33 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:22,283 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 3.001e+02 3.662e+02 4.451e+02 9.002e+02, threshold=7.325e+02, percent-clipped=3.0 +2023-02-06 04:36:32,313 INFO [train.py:901] (1/4) Epoch 7, batch 6350, loss[loss=0.2453, simple_loss=0.3348, pruned_loss=0.07791, over 8566.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3294, pruned_loss=0.09743, over 1605701.60 frames. ], batch size: 31, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:53,641 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:37:06,004 INFO [train.py:901] (1/4) Epoch 7, batch 6400, loss[loss=0.2744, simple_loss=0.3504, pruned_loss=0.09918, over 8658.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3292, pruned_loss=0.09655, over 1609269.29 frames. ], batch size: 34, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:15,525 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.52 vs. limit=5.0 +2023-02-06 04:37:31,198 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.661e+02 3.281e+02 3.949e+02 1.010e+03, threshold=6.562e+02, percent-clipped=2.0 +2023-02-06 04:37:40,720 INFO [train.py:901] (1/4) Epoch 7, batch 6450, loss[loss=0.2088, simple_loss=0.2862, pruned_loss=0.06564, over 7925.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3295, pruned_loss=0.09664, over 1609311.41 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:47,759 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0575, 1.2807, 1.1462, 0.4369, 1.1593, 0.9017, 0.2068, 0.9993], + device='cuda:1'), covar=tensor([0.0228, 0.0162, 0.0154, 0.0286, 0.0204, 0.0494, 0.0359, 0.0163], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0261, 0.0221, 0.0322, 0.0261, 0.0411, 0.0320, 0.0299], + device='cuda:1'), out_proj_covar=tensor([1.1000e-04, 8.0842e-05, 6.7190e-05, 9.8893e-05, 8.1659e-05, 1.3821e-04, + 1.0117e-04, 9.3070e-05], device='cuda:1') +2023-02-06 04:37:50,649 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 04:37:57,799 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0124, 1.8913, 1.7337, 1.4956, 1.1464, 1.5185, 2.3125, 2.1777], + device='cuda:1'), covar=tensor([0.0470, 0.1119, 0.1705, 0.1371, 0.0589, 0.1443, 0.0632, 0.0527], + device='cuda:1'), in_proj_covar=tensor([0.0115, 0.0165, 0.0206, 0.0168, 0.0115, 0.0171, 0.0126, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:38:15,593 INFO [train.py:901] (1/4) Epoch 7, batch 6500, loss[loss=0.2382, simple_loss=0.3224, pruned_loss=0.07693, over 8369.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3284, pruned_loss=0.09528, over 1610535.83 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:33,806 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55026.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:38:39,665 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.546e+02 3.271e+02 4.197e+02 5.859e+02, threshold=6.542e+02, percent-clipped=0.0 +2023-02-06 04:38:49,577 INFO [train.py:901] (1/4) Epoch 7, batch 6550, loss[loss=0.2615, simple_loss=0.3342, pruned_loss=0.09438, over 8677.00 frames. ], tot_loss[loss=0.2609, simple_loss=0.3293, pruned_loss=0.09618, over 1610632.62 frames. ], batch size: 39, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:04,762 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9841, 1.8368, 3.3337, 1.3522, 2.1753, 3.6940, 3.5894, 3.1260], + device='cuda:1'), covar=tensor([0.0981, 0.1359, 0.0336, 0.2141, 0.0945, 0.0272, 0.0445, 0.0594], + device='cuda:1'), in_proj_covar=tensor([0.0245, 0.0278, 0.0233, 0.0272, 0.0245, 0.0219, 0.0279, 0.0283], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:39:12,186 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 04:39:25,807 INFO [train.py:901] (1/4) Epoch 7, batch 6600, loss[loss=0.2544, simple_loss=0.322, pruned_loss=0.09337, over 8520.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3296, pruned_loss=0.09591, over 1611122.20 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:32,339 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:39:49,418 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.699e+02 3.503e+02 4.413e+02 7.218e+02, threshold=7.007e+02, percent-clipped=4.0 +2023-02-06 04:39:53,564 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55141.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:39:58,774 INFO [train.py:901] (1/4) Epoch 7, batch 6650, loss[loss=0.2609, simple_loss=0.3426, pruned_loss=0.08966, over 8489.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3286, pruned_loss=0.09552, over 1606715.62 frames. ], batch size: 39, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:10,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:16,890 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2843, 1.4489, 4.3235, 1.9002, 2.4746, 4.9709, 4.9725, 4.3595], + device='cuda:1'), covar=tensor([0.1031, 0.1618, 0.0272, 0.1959, 0.0893, 0.0214, 0.0360, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0242, 0.0275, 0.0233, 0.0272, 0.0242, 0.0216, 0.0275, 0.0280], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 04:40:34,123 INFO [train.py:901] (1/4) Epoch 7, batch 6700, loss[loss=0.2137, simple_loss=0.2855, pruned_loss=0.071, over 7935.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.3298, pruned_loss=0.09594, over 1611488.20 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:42,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 04:40:51,581 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:55,052 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:58,796 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 3.031e+02 3.759e+02 4.673e+02 1.170e+03, threshold=7.519e+02, percent-clipped=9.0 +2023-02-06 04:41:07,986 INFO [train.py:901] (1/4) Epoch 7, batch 6750, loss[loss=0.2878, simple_loss=0.3419, pruned_loss=0.1168, over 7536.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3301, pruned_loss=0.09649, over 1608851.90 frames. ], batch size: 18, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:41:42,535 INFO [train.py:901] (1/4) Epoch 7, batch 6800, loss[loss=0.2865, simple_loss=0.3685, pruned_loss=0.1022, over 8473.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3306, pruned_loss=0.0968, over 1607039.22 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 16.0 +2023-02-06 04:41:47,245 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 04:41:57,717 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1796, 3.0710, 2.8572, 1.6085, 2.7534, 2.8144, 2.8713, 2.6427], + device='cuda:1'), covar=tensor([0.1181, 0.0911, 0.1320, 0.4416, 0.1059, 0.1162, 0.1446, 0.1331], + device='cuda:1'), in_proj_covar=tensor([0.0414, 0.0323, 0.0356, 0.0434, 0.0340, 0.0320, 0.0328, 0.0280], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:42:08,557 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.859e+02 3.364e+02 4.161e+02 9.626e+02, threshold=6.728e+02, percent-clipped=3.0 +2023-02-06 04:42:11,503 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:42:18,201 INFO [train.py:901] (1/4) Epoch 7, batch 6850, loss[loss=0.2711, simple_loss=0.3368, pruned_loss=0.1027, over 8280.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3297, pruned_loss=0.09635, over 1610210.69 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 16.0 +2023-02-06 04:42:24,371 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7386, 3.1834, 2.2815, 4.0775, 1.9282, 2.2172, 2.3407, 2.9137], + device='cuda:1'), covar=tensor([0.0841, 0.0867, 0.1275, 0.0294, 0.1244, 0.1605, 0.1423, 0.1149], + device='cuda:1'), in_proj_covar=tensor([0.0246, 0.0233, 0.0272, 0.0221, 0.0230, 0.0265, 0.0274, 0.0236], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:42:34,179 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 04:42:50,630 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:42:52,432 INFO [train.py:901] (1/4) Epoch 7, batch 6900, loss[loss=0.3099, simple_loss=0.3564, pruned_loss=0.1317, over 7970.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3293, pruned_loss=0.09641, over 1609512.94 frames. ], batch size: 21, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:04,239 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 04:43:06,187 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1999, 1.8161, 1.5419, 1.4511, 1.3304, 1.6713, 2.1726, 1.8057], + device='cuda:1'), covar=tensor([0.0446, 0.1349, 0.1854, 0.1498, 0.0605, 0.1551, 0.0690, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0111, 0.0166, 0.0204, 0.0168, 0.0114, 0.0170, 0.0125, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:43:09,642 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55422.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:43:19,279 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.767e+02 3.318e+02 4.413e+02 7.718e+02, threshold=6.635e+02, percent-clipped=1.0 +2023-02-06 04:43:19,733 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 04:43:28,924 INFO [train.py:901] (1/4) Epoch 7, batch 6950, loss[loss=0.2828, simple_loss=0.3526, pruned_loss=0.1065, over 8137.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3287, pruned_loss=0.09579, over 1608373.42 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:42,918 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 04:43:46,600 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 04:43:48,202 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 04:44:02,247 INFO [train.py:901] (1/4) Epoch 7, batch 7000, loss[loss=0.2242, simple_loss=0.2898, pruned_loss=0.07933, over 7231.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3275, pruned_loss=0.09557, over 1602150.99 frames. ], batch size: 16, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:04,367 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5279, 2.7683, 1.5858, 2.1979, 2.0993, 1.3254, 1.9645, 2.0825], + device='cuda:1'), covar=tensor([0.1222, 0.0240, 0.0983, 0.0540, 0.0619, 0.1262, 0.0855, 0.0821], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0232, 0.0308, 0.0297, 0.0308, 0.0317, 0.0335, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:44:09,453 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:44:28,238 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.942e+02 3.699e+02 4.542e+02 1.220e+03, threshold=7.399e+02, percent-clipped=11.0 +2023-02-06 04:44:37,061 INFO [train.py:901] (1/4) Epoch 7, batch 7050, loss[loss=0.2388, simple_loss=0.3259, pruned_loss=0.07586, over 8506.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3279, pruned_loss=0.09545, over 1607335.08 frames. ], batch size: 26, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:43,035 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 04:44:54,228 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:09,303 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:11,744 INFO [train.py:901] (1/4) Epoch 7, batch 7100, loss[loss=0.229, simple_loss=0.302, pruned_loss=0.07798, over 7977.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3278, pruned_loss=0.09509, over 1608055.08 frames. ], batch size: 21, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:45:26,282 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:29,489 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:30,152 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7019, 1.4650, 3.0519, 1.1556, 2.2138, 3.2896, 3.2961, 2.8006], + device='cuda:1'), covar=tensor([0.1032, 0.1322, 0.0367, 0.1920, 0.0785, 0.0290, 0.0392, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0243, 0.0270, 0.0229, 0.0268, 0.0239, 0.0212, 0.0277, 0.0277], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 04:45:36,697 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.720e+02 3.297e+02 4.008e+02 7.250e+02, threshold=6.594e+02, percent-clipped=0.0 +2023-02-06 04:45:45,958 INFO [train.py:901] (1/4) Epoch 7, batch 7150, loss[loss=0.2513, simple_loss=0.3151, pruned_loss=0.09381, over 7250.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3272, pruned_loss=0.09472, over 1605008.45 frames. ], batch size: 16, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:45:57,609 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6081, 1.2756, 4.7671, 1.9263, 4.0945, 3.9939, 4.3579, 4.2295], + device='cuda:1'), covar=tensor([0.0424, 0.3905, 0.0348, 0.2652, 0.1085, 0.0654, 0.0397, 0.0483], + device='cuda:1'), in_proj_covar=tensor([0.0390, 0.0531, 0.0481, 0.0464, 0.0540, 0.0442, 0.0445, 0.0503], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:46:14,244 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:46:21,671 INFO [train.py:901] (1/4) Epoch 7, batch 7200, loss[loss=0.2676, simple_loss=0.3379, pruned_loss=0.0987, over 8730.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3258, pruned_loss=0.09392, over 1604114.56 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:21,923 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5679, 2.1638, 3.3957, 1.2940, 2.3240, 1.9953, 1.6397, 2.2176], + device='cuda:1'), covar=tensor([0.1545, 0.1822, 0.0500, 0.3447, 0.1352, 0.2411, 0.1574, 0.1999], + device='cuda:1'), in_proj_covar=tensor([0.0476, 0.0478, 0.0534, 0.0561, 0.0599, 0.0535, 0.0455, 0.0594], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:46:33,577 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6969, 1.8544, 2.1458, 1.7283, 1.0191, 2.2528, 0.3150, 1.2557], + device='cuda:1'), covar=tensor([0.2755, 0.1662, 0.0558, 0.1817, 0.5011, 0.0643, 0.4027, 0.2025], + device='cuda:1'), in_proj_covar=tensor([0.0144, 0.0145, 0.0085, 0.0192, 0.0232, 0.0090, 0.0146, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:46:38,473 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9181, 4.1290, 2.3320, 2.6304, 2.8531, 2.1285, 2.6919, 2.9839], + device='cuda:1'), covar=tensor([0.1514, 0.0187, 0.0941, 0.0704, 0.0666, 0.1171, 0.0974, 0.0937], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0232, 0.0307, 0.0298, 0.0309, 0.0316, 0.0337, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:46:47,138 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.857e+02 3.487e+02 4.455e+02 1.230e+03, threshold=6.974e+02, percent-clipped=5.0 +2023-02-06 04:46:55,812 INFO [train.py:901] (1/4) Epoch 7, batch 7250, loss[loss=0.2973, simple_loss=0.3569, pruned_loss=0.1189, over 8137.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.328, pruned_loss=0.09519, over 1607047.45 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:07,787 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:47:31,018 INFO [train.py:901] (1/4) Epoch 7, batch 7300, loss[loss=0.2258, simple_loss=0.3011, pruned_loss=0.07523, over 8134.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3285, pruned_loss=0.09592, over 1604715.57 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:55,727 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.887e+02 3.402e+02 4.424e+02 1.529e+03, threshold=6.804e+02, percent-clipped=7.0 +2023-02-06 04:48:04,224 INFO [train.py:901] (1/4) Epoch 7, batch 7350, loss[loss=0.2411, simple_loss=0.3132, pruned_loss=0.08452, over 8238.00 frames. ], tot_loss[loss=0.2608, simple_loss=0.329, pruned_loss=0.0963, over 1608353.10 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:15,834 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:26,624 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55881.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:27,824 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 04:48:40,240 INFO [train.py:901] (1/4) Epoch 7, batch 7400, loss[loss=0.2651, simple_loss=0.3496, pruned_loss=0.09031, over 8103.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3301, pruned_loss=0.09704, over 1608088.71 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:45,329 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:50,034 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 04:49:01,383 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9042, 1.5123, 1.4944, 1.3177, 1.1289, 1.3075, 1.5659, 1.6235], + device='cuda:1'), covar=tensor([0.0535, 0.1257, 0.1636, 0.1352, 0.0552, 0.1520, 0.0712, 0.0578], + device='cuda:1'), in_proj_covar=tensor([0.0114, 0.0166, 0.0204, 0.0168, 0.0113, 0.0172, 0.0127, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 04:49:05,902 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.646e+02 3.471e+02 4.467e+02 1.348e+03, threshold=6.942e+02, percent-clipped=5.0 +2023-02-06 04:49:11,671 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:14,923 INFO [train.py:901] (1/4) Epoch 7, batch 7450, loss[loss=0.2529, simple_loss=0.3232, pruned_loss=0.09128, over 7816.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3305, pruned_loss=0.09695, over 1609511.79 frames. ], batch size: 20, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:49:25,422 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6538, 2.8475, 1.7913, 2.2720, 2.4472, 1.5609, 2.0525, 2.1903], + device='cuda:1'), covar=tensor([0.1174, 0.0266, 0.0915, 0.0566, 0.0577, 0.1181, 0.0888, 0.0806], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0233, 0.0309, 0.0301, 0.0310, 0.0319, 0.0334, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 04:49:25,911 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 04:49:28,832 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:38,426 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6730, 1.7741, 2.0841, 1.7149, 1.2212, 2.2199, 0.2109, 1.2484], + device='cuda:1'), covar=tensor([0.3160, 0.1715, 0.0670, 0.2025, 0.4799, 0.0615, 0.4545, 0.2253], + device='cuda:1'), in_proj_covar=tensor([0.0142, 0.0144, 0.0085, 0.0191, 0.0229, 0.0088, 0.0145, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:49:50,667 INFO [train.py:901] (1/4) Epoch 7, batch 7500, loss[loss=0.2292, simple_loss=0.2972, pruned_loss=0.08067, over 8025.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3315, pruned_loss=0.09745, over 1613764.78 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:17,129 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.816e+02 3.537e+02 4.737e+02 9.745e+02, threshold=7.074e+02, percent-clipped=6.0 +2023-02-06 04:50:23,239 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5218, 4.5027, 4.0261, 1.9210, 3.9292, 4.0133, 4.0749, 3.5202], + device='cuda:1'), covar=tensor([0.0769, 0.0616, 0.1044, 0.4971, 0.0803, 0.0798, 0.1318, 0.0992], + device='cuda:1'), in_proj_covar=tensor([0.0425, 0.0330, 0.0361, 0.0447, 0.0345, 0.0320, 0.0335, 0.0287], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:50:25,625 INFO [train.py:901] (1/4) Epoch 7, batch 7550, loss[loss=0.2978, simple_loss=0.3573, pruned_loss=0.1191, over 8284.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.331, pruned_loss=0.09702, over 1614308.93 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:30,389 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1069, 2.5169, 1.8914, 2.9989, 1.6213, 1.6699, 2.3265, 2.4592], + device='cuda:1'), covar=tensor([0.0846, 0.0933, 0.1278, 0.0377, 0.1220, 0.1679, 0.1005, 0.0880], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0238, 0.0278, 0.0222, 0.0233, 0.0270, 0.0274, 0.0238], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 04:50:58,638 INFO [train.py:901] (1/4) Epoch 7, batch 7600, loss[loss=0.2907, simple_loss=0.3496, pruned_loss=0.1159, over 8344.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3314, pruned_loss=0.09788, over 1615577.58 frames. ], batch size: 26, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:06,779 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:25,295 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.750e+02 3.495e+02 4.537e+02 9.121e+02, threshold=6.990e+02, percent-clipped=3.0 +2023-02-06 04:51:34,926 INFO [train.py:901] (1/4) Epoch 7, batch 7650, loss[loss=0.2585, simple_loss=0.3295, pruned_loss=0.09375, over 8466.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3317, pruned_loss=0.09832, over 1617941.80 frames. ], batch size: 29, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:44,425 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:47,916 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4820, 2.0070, 3.1264, 2.3867, 2.6105, 2.0154, 1.4858, 1.3933], + device='cuda:1'), covar=tensor([0.2644, 0.3195, 0.0732, 0.1912, 0.1489, 0.1633, 0.1659, 0.3092], + device='cuda:1'), in_proj_covar=tensor([0.0838, 0.0784, 0.0674, 0.0785, 0.0869, 0.0725, 0.0677, 0.0713], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:52:08,628 INFO [train.py:901] (1/4) Epoch 7, batch 7700, loss[loss=0.3351, simple_loss=0.385, pruned_loss=0.1426, over 7089.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3314, pruned_loss=0.09794, over 1617665.14 frames. ], batch size: 71, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:16,094 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:26,762 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:34,688 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.791e+02 3.394e+02 3.978e+02 9.035e+02, threshold=6.788e+02, percent-clipped=3.0 +2023-02-06 04:52:34,717 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 04:52:44,019 INFO [train.py:901] (1/4) Epoch 7, batch 7750, loss[loss=0.2966, simple_loss=0.3679, pruned_loss=0.1127, over 8325.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3311, pruned_loss=0.09747, over 1619376.76 frames. ], batch size: 25, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:54,605 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-06 04:53:18,264 INFO [train.py:901] (1/4) Epoch 7, batch 7800, loss[loss=0.2271, simple_loss=0.3025, pruned_loss=0.07585, over 7813.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3319, pruned_loss=0.09765, over 1622888.63 frames. ], batch size: 20, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:22,037 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 04:53:35,675 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:53:36,868 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8727, 3.7985, 3.4620, 1.4895, 3.3483, 3.1630, 3.4760, 2.8725], + device='cuda:1'), covar=tensor([0.0967, 0.0797, 0.1132, 0.5271, 0.0974, 0.1172, 0.1703, 0.1162], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0331, 0.0364, 0.0455, 0.0351, 0.0324, 0.0336, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:53:42,612 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.702e+02 3.307e+02 4.383e+02 8.490e+02, threshold=6.613e+02, percent-clipped=4.0 +2023-02-06 04:53:51,371 INFO [train.py:901] (1/4) Epoch 7, batch 7850, loss[loss=0.2819, simple_loss=0.3588, pruned_loss=0.1025, over 8510.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3331, pruned_loss=0.09838, over 1620893.85 frames. ], batch size: 26, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:54,171 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1246, 3.0052, 2.8447, 1.5441, 2.7348, 2.7932, 2.9067, 2.5630], + device='cuda:1'), covar=tensor([0.1438, 0.0993, 0.1428, 0.5266, 0.1208, 0.1407, 0.1668, 0.1363], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0331, 0.0364, 0.0454, 0.0349, 0.0326, 0.0337, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:54:09,406 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 04:54:24,869 INFO [train.py:901] (1/4) Epoch 7, batch 7900, loss[loss=0.3059, simple_loss=0.3826, pruned_loss=0.1146, over 8331.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3355, pruned_loss=0.1, over 1616310.70 frames. ], batch size: 25, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:54:27,218 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3521, 2.0412, 3.2035, 2.5101, 2.7914, 2.0529, 1.5532, 1.3466], + device='cuda:1'), covar=tensor([0.2860, 0.3062, 0.0722, 0.1762, 0.1429, 0.1679, 0.1394, 0.3216], + device='cuda:1'), in_proj_covar=tensor([0.0841, 0.0786, 0.0677, 0.0786, 0.0876, 0.0732, 0.0677, 0.0713], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:54:49,425 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.758e+02 3.520e+02 4.424e+02 1.197e+03, threshold=7.039e+02, percent-clipped=9.0 +2023-02-06 04:54:58,048 INFO [train.py:901] (1/4) Epoch 7, batch 7950, loss[loss=0.2414, simple_loss=0.3131, pruned_loss=0.08483, over 8229.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3345, pruned_loss=0.09926, over 1612099.55 frames. ], batch size: 22, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:19,800 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:31,645 INFO [train.py:901] (1/4) Epoch 7, batch 8000, loss[loss=0.2624, simple_loss=0.3412, pruned_loss=0.0918, over 8108.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3336, pruned_loss=0.09847, over 1613807.64 frames. ], batch size: 23, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:36,306 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:36,850 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:56,108 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.809e+02 3.378e+02 4.457e+02 7.052e+02, threshold=6.755e+02, percent-clipped=1.0 +2023-02-06 04:55:59,635 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56541.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:56:04,837 INFO [train.py:901] (1/4) Epoch 7, batch 8050, loss[loss=0.3269, simple_loss=0.3734, pruned_loss=0.1402, over 6891.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3324, pruned_loss=0.09877, over 1602126.78 frames. ], batch size: 71, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:56:37,708 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 04:56:42,633 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:56:43,099 INFO [train.py:901] (1/4) Epoch 8, batch 0, loss[loss=0.2558, simple_loss=0.327, pruned_loss=0.09228, over 8558.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.327, pruned_loss=0.09228, over 8558.00 frames. ], batch size: 31, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:56:43,099 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 04:56:54,074 INFO [train.py:935] (1/4) Epoch 8, validation: loss=0.205, simple_loss=0.3028, pruned_loss=0.05355, over 944034.00 frames. +2023-02-06 04:56:54,074 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6607MB +2023-02-06 04:56:54,916 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2884, 4.2647, 3.8675, 1.4982, 3.7870, 3.8979, 3.9411, 3.6281], + device='cuda:1'), covar=tensor([0.1045, 0.0698, 0.1212, 0.6179, 0.0843, 0.1099, 0.1356, 0.0951], + device='cuda:1'), in_proj_covar=tensor([0.0419, 0.0327, 0.0354, 0.0451, 0.0343, 0.0322, 0.0334, 0.0287], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:57:08,614 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 04:57:10,771 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:22,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:28,998 INFO [train.py:901] (1/4) Epoch 8, batch 50, loss[loss=0.3288, simple_loss=0.3639, pruned_loss=0.1468, over 6981.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3358, pruned_loss=0.09957, over 368996.56 frames. ], batch size: 72, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:57:31,767 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.831e+02 3.488e+02 4.265e+02 1.069e+03, threshold=6.975e+02, percent-clipped=2.0 +2023-02-06 04:57:39,013 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 04:57:43,161 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 04:58:03,657 INFO [train.py:901] (1/4) Epoch 8, batch 100, loss[loss=0.2787, simple_loss=0.3399, pruned_loss=0.1087, over 8640.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3342, pruned_loss=0.09946, over 646599.27 frames. ], batch size: 31, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:05,727 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 04:58:38,295 INFO [train.py:901] (1/4) Epoch 8, batch 150, loss[loss=0.2768, simple_loss=0.3585, pruned_loss=0.09753, over 8323.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3318, pruned_loss=0.09755, over 860551.26 frames. ], batch size: 25, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:40,473 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.0922, 1.6577, 6.1218, 1.9657, 5.4751, 5.2252, 5.7090, 5.4935], + device='cuda:1'), covar=tensor([0.0322, 0.3591, 0.0238, 0.2988, 0.0857, 0.0681, 0.0301, 0.0420], + device='cuda:1'), in_proj_covar=tensor([0.0402, 0.0531, 0.0489, 0.0473, 0.0538, 0.0450, 0.0448, 0.0507], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 04:58:40,997 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.710e+02 3.372e+02 4.105e+02 8.611e+02, threshold=6.744e+02, percent-clipped=2.0 +2023-02-06 04:59:12,801 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 04:59:13,819 INFO [train.py:901] (1/4) Epoch 8, batch 200, loss[loss=0.2575, simple_loss=0.3304, pruned_loss=0.09226, over 8077.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3322, pruned_loss=0.09712, over 1032506.37 frames. ], batch size: 21, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:41,309 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:59:43,306 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8461, 5.9470, 4.9467, 2.6188, 5.0614, 5.6564, 5.4684, 5.1491], + device='cuda:1'), covar=tensor([0.0532, 0.0372, 0.0857, 0.4330, 0.0713, 0.0592, 0.0911, 0.0574], + device='cuda:1'), in_proj_covar=tensor([0.0417, 0.0324, 0.0356, 0.0446, 0.0347, 0.0322, 0.0334, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 04:59:48,621 INFO [train.py:901] (1/4) Epoch 8, batch 250, loss[loss=0.2942, simple_loss=0.3664, pruned_loss=0.1109, over 8246.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3307, pruned_loss=0.09581, over 1163874.54 frames. ], batch size: 24, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:51,337 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.703e+02 3.318e+02 4.204e+02 1.022e+03, threshold=6.636e+02, percent-clipped=1.0 +2023-02-06 04:59:56,868 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 05:00:06,243 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 05:00:21,369 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:23,889 INFO [train.py:901] (1/4) Epoch 8, batch 300, loss[loss=0.2738, simple_loss=0.3413, pruned_loss=0.1031, over 8759.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.331, pruned_loss=0.09609, over 1263733.77 frames. ], batch size: 30, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:00:26,003 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56885.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 05:00:28,633 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4117, 1.2272, 1.3352, 1.1038, 0.8252, 1.1552, 1.1400, 1.0546], + device='cuda:1'), covar=tensor([0.0613, 0.1292, 0.1905, 0.1414, 0.0582, 0.1582, 0.0732, 0.0653], + device='cuda:1'), in_proj_covar=tensor([0.0112, 0.0162, 0.0201, 0.0164, 0.0112, 0.0169, 0.0124, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:00:38,052 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:54,955 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:58,777 INFO [train.py:901] (1/4) Epoch 8, batch 350, loss[loss=0.2381, simple_loss=0.303, pruned_loss=0.08664, over 7983.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3317, pruned_loss=0.09692, over 1342846.27 frames. ], batch size: 21, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:01,452 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.612e+02 3.168e+02 3.951e+02 1.059e+03, threshold=6.336e+02, percent-clipped=3.0 +2023-02-06 05:01:02,328 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3596, 1.2641, 1.5090, 1.1679, 0.8723, 1.3259, 1.2709, 1.2275], + device='cuda:1'), covar=tensor([0.0588, 0.1296, 0.1800, 0.1404, 0.0564, 0.1555, 0.0658, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0111, 0.0162, 0.0200, 0.0163, 0.0112, 0.0168, 0.0123, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:01:33,168 INFO [train.py:901] (1/4) Epoch 8, batch 400, loss[loss=0.2074, simple_loss=0.2915, pruned_loss=0.06168, over 8088.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3294, pruned_loss=0.09544, over 1405336.86 frames. ], batch size: 21, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:45,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57000.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:01:52,567 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:01:53,814 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:02:07,050 INFO [train.py:901] (1/4) Epoch 8, batch 450, loss[loss=0.2883, simple_loss=0.3594, pruned_loss=0.1085, over 8398.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3295, pruned_loss=0.09588, over 1449130.28 frames. ], batch size: 49, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:02:10,304 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 2.769e+02 3.532e+02 4.551e+02 9.004e+02, threshold=7.064e+02, percent-clipped=7.0 +2023-02-06 05:02:19,152 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3936, 1.9787, 3.2013, 2.5276, 2.6717, 2.0974, 1.5814, 1.3610], + device='cuda:1'), covar=tensor([0.2827, 0.3008, 0.0754, 0.1798, 0.1518, 0.1510, 0.1367, 0.3059], + device='cuda:1'), in_proj_covar=tensor([0.0839, 0.0790, 0.0679, 0.0791, 0.0880, 0.0728, 0.0680, 0.0718], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:02:27,140 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8327, 1.2411, 5.8986, 2.1328, 5.1258, 4.9448, 5.4070, 5.3261], + device='cuda:1'), covar=tensor([0.0333, 0.4596, 0.0322, 0.2830, 0.1019, 0.0689, 0.0414, 0.0447], + device='cuda:1'), in_proj_covar=tensor([0.0404, 0.0532, 0.0494, 0.0479, 0.0544, 0.0456, 0.0455, 0.0509], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 05:02:41,864 INFO [train.py:901] (1/4) Epoch 8, batch 500, loss[loss=0.2311, simple_loss=0.3008, pruned_loss=0.08069, over 7810.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3303, pruned_loss=0.09633, over 1484855.33 frames. ], batch size: 19, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:02:56,786 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8943, 2.1570, 1.8055, 2.6974, 1.3538, 1.5201, 1.9842, 2.2876], + device='cuda:1'), covar=tensor([0.0877, 0.0838, 0.1243, 0.0467, 0.1156, 0.1644, 0.0911, 0.0742], + device='cuda:1'), in_proj_covar=tensor([0.0250, 0.0233, 0.0272, 0.0219, 0.0229, 0.0266, 0.0271, 0.0237], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 05:03:15,889 INFO [train.py:901] (1/4) Epoch 8, batch 550, loss[loss=0.2909, simple_loss=0.3662, pruned_loss=0.1079, over 8504.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3294, pruned_loss=0.09575, over 1512233.57 frames. ], batch size: 26, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:03:18,519 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.761e+02 3.532e+02 4.192e+02 1.400e+03, threshold=7.064e+02, percent-clipped=6.0 +2023-02-06 05:03:39,524 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:03:50,876 INFO [train.py:901] (1/4) Epoch 8, batch 600, loss[loss=0.2455, simple_loss=0.3288, pruned_loss=0.08112, over 8132.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3277, pruned_loss=0.09451, over 1535467.48 frames. ], batch size: 22, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:04:03,002 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 05:04:06,349 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:13,917 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:25,747 INFO [train.py:901] (1/4) Epoch 8, batch 650, loss[loss=0.3072, simple_loss=0.3677, pruned_loss=0.1234, over 8738.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3268, pruned_loss=0.09427, over 1553620.25 frames. ], batch size: 39, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:04:28,382 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.474e+02 3.242e+02 4.284e+02 1.059e+03, threshold=6.484e+02, percent-clipped=6.0 +2023-02-06 05:04:41,995 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57256.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 05:04:46,650 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:51,982 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:59,505 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57280.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:00,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57281.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 05:05:00,683 INFO [train.py:901] (1/4) Epoch 8, batch 700, loss[loss=0.2086, simple_loss=0.2724, pruned_loss=0.0724, over 7705.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3257, pruned_loss=0.09341, over 1566828.09 frames. ], batch size: 18, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:34,910 INFO [train.py:901] (1/4) Epoch 8, batch 750, loss[loss=0.2879, simple_loss=0.3405, pruned_loss=0.1176, over 8081.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3256, pruned_loss=0.09361, over 1576334.47 frames. ], batch size: 21, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:38,340 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.846e+02 3.371e+02 4.091e+02 7.333e+02, threshold=6.742e+02, percent-clipped=1.0 +2023-02-06 05:05:49,702 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 05:05:51,162 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:52,589 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:57,731 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 05:06:09,704 INFO [train.py:901] (1/4) Epoch 8, batch 800, loss[loss=0.2948, simple_loss=0.3544, pruned_loss=0.1176, over 8440.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3255, pruned_loss=0.09334, over 1585283.41 frames. ], batch size: 29, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:11,937 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:22,048 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1840, 2.5554, 1.9642, 2.9755, 1.3806, 1.6560, 1.9976, 2.5664], + device='cuda:1'), covar=tensor([0.0745, 0.0718, 0.1144, 0.0451, 0.1352, 0.1557, 0.1154, 0.0720], + device='cuda:1'), in_proj_covar=tensor([0.0248, 0.0231, 0.0269, 0.0214, 0.0227, 0.0262, 0.0266, 0.0234], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 05:06:33,602 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:44,182 INFO [train.py:901] (1/4) Epoch 8, batch 850, loss[loss=0.218, simple_loss=0.2887, pruned_loss=0.07369, over 7813.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3251, pruned_loss=0.09294, over 1591621.54 frames. ], batch size: 20, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:46,897 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.664e+02 3.287e+02 4.255e+02 8.769e+02, threshold=6.575e+02, percent-clipped=4.0 +2023-02-06 05:07:11,112 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:12,464 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:17,828 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:19,067 INFO [train.py:901] (1/4) Epoch 8, batch 900, loss[loss=0.2936, simple_loss=0.3632, pruned_loss=0.112, over 8316.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3264, pruned_loss=0.09352, over 1598955.42 frames. ], batch size: 25, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:40,918 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:53,666 INFO [train.py:901] (1/4) Epoch 8, batch 950, loss[loss=0.3255, simple_loss=0.3788, pruned_loss=0.1361, over 8248.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3265, pruned_loss=0.0933, over 1604869.40 frames. ], batch size: 24, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:56,421 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.713e+02 3.197e+02 4.416e+02 7.629e+02, threshold=6.394e+02, percent-clipped=6.0 +2023-02-06 05:07:56,665 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:56,700 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2612, 1.7764, 2.9695, 2.3118, 2.5173, 2.0114, 1.6451, 1.1336], + device='cuda:1'), covar=tensor([0.2893, 0.3295, 0.0762, 0.1876, 0.1453, 0.1702, 0.1416, 0.3341], + device='cuda:1'), in_proj_covar=tensor([0.0827, 0.0779, 0.0675, 0.0778, 0.0863, 0.0720, 0.0667, 0.0706], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:08:04,584 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:13,024 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:14,317 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 05:08:14,516 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:24,649 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3251, 1.9607, 2.8895, 2.3899, 2.6384, 2.0683, 1.8483, 1.7716], + device='cuda:1'), covar=tensor([0.2033, 0.2506, 0.0609, 0.1376, 0.1085, 0.1353, 0.1115, 0.2233], + device='cuda:1'), in_proj_covar=tensor([0.0827, 0.0779, 0.0677, 0.0778, 0.0864, 0.0722, 0.0670, 0.0709], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:08:29,126 INFO [train.py:901] (1/4) Epoch 8, batch 1000, loss[loss=0.1838, simple_loss=0.2658, pruned_loss=0.05091, over 7543.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3268, pruned_loss=0.09327, over 1605519.00 frames. ], batch size: 18, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:08:45,898 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:47,892 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 05:09:00,599 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 05:09:03,903 INFO [train.py:901] (1/4) Epoch 8, batch 1050, loss[loss=0.2422, simple_loss=0.3245, pruned_loss=0.08, over 8338.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3275, pruned_loss=0.09341, over 1608075.03 frames. ], batch size: 25, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:09:06,634 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.778e+02 2.733e+02 3.382e+02 4.210e+02 1.523e+03, threshold=6.765e+02, percent-clipped=11.0 +2023-02-06 05:09:10,051 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:22,119 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 05:09:24,434 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:26,522 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:26,667 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 05:09:31,687 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:37,592 INFO [train.py:901] (1/4) Epoch 8, batch 1100, loss[loss=0.233, simple_loss=0.2957, pruned_loss=0.08519, over 7667.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3284, pruned_loss=0.09448, over 1609042.36 frames. ], batch size: 19, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:09:57,897 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2034, 2.6251, 2.8984, 1.2772, 3.2665, 1.8926, 1.5058, 1.7441], + device='cuda:1'), covar=tensor([0.0391, 0.0164, 0.0158, 0.0348, 0.0226, 0.0495, 0.0508, 0.0278], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0277, 0.0226, 0.0334, 0.0271, 0.0420, 0.0328, 0.0308], + device='cuda:1'), out_proj_covar=tensor([1.1122e-04, 8.4474e-05, 6.8003e-05, 1.0109e-04, 8.3864e-05, 1.3921e-04, + 1.0172e-04, 9.4731e-05], device='cuda:1') +2023-02-06 05:10:05,399 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:08,897 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,255 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57728.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,705 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 05:10:12,775 INFO [train.py:901] (1/4) Epoch 8, batch 1150, loss[loss=0.3028, simple_loss=0.3814, pruned_loss=0.1121, over 8596.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3275, pruned_loss=0.0935, over 1614681.61 frames. ], batch size: 39, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:10:15,551 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.752e+02 3.349e+02 4.211e+02 1.172e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 05:10:21,932 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9907, 1.4463, 6.0883, 1.8630, 5.3924, 5.2766, 5.7844, 5.6051], + device='cuda:1'), covar=tensor([0.0514, 0.3941, 0.0294, 0.2952, 0.0985, 0.0584, 0.0383, 0.0410], + device='cuda:1'), in_proj_covar=tensor([0.0400, 0.0527, 0.0491, 0.0473, 0.0534, 0.0449, 0.0447, 0.0507], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 05:10:25,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2431, 2.5277, 1.7553, 2.0666, 1.8897, 1.3222, 1.7332, 2.0318], + device='cuda:1'), covar=tensor([0.1312, 0.0302, 0.0985, 0.0526, 0.0693, 0.1309, 0.0865, 0.0725], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0234, 0.0313, 0.0300, 0.0306, 0.0314, 0.0334, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 05:10:26,728 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:26,795 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:28,154 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:32,811 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:47,638 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2150, 1.6568, 1.6002, 1.3299, 1.1495, 1.4752, 1.7557, 1.8427], + device='cuda:1'), covar=tensor([0.0506, 0.1133, 0.1752, 0.1326, 0.0567, 0.1536, 0.0665, 0.0563], + device='cuda:1'), in_proj_covar=tensor([0.0110, 0.0163, 0.0200, 0.0165, 0.0111, 0.0170, 0.0124, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:10:48,150 INFO [train.py:901] (1/4) Epoch 8, batch 1200, loss[loss=0.2285, simple_loss=0.2819, pruned_loss=0.08755, over 7534.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3268, pruned_loss=0.09334, over 1614086.27 frames. ], batch size: 18, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:10:58,922 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9577, 1.6460, 4.1032, 1.4924, 2.1999, 4.5917, 4.8219, 3.4937], + device='cuda:1'), covar=tensor([0.1710, 0.2028, 0.0383, 0.2707, 0.1294, 0.0464, 0.0426, 0.1271], + device='cuda:1'), in_proj_covar=tensor([0.0250, 0.0281, 0.0239, 0.0272, 0.0247, 0.0218, 0.0288, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 05:11:17,882 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:23,251 INFO [train.py:901] (1/4) Epoch 8, batch 1250, loss[loss=0.2675, simple_loss=0.3371, pruned_loss=0.09897, over 8493.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3272, pruned_loss=0.09387, over 1612741.18 frames. ], batch size: 28, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:11:25,903 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.817e+02 3.577e+02 4.191e+02 8.690e+02, threshold=7.155e+02, percent-clipped=5.0 +2023-02-06 05:11:41,388 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:53,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:58,371 INFO [train.py:901] (1/4) Epoch 8, batch 1300, loss[loss=0.2733, simple_loss=0.343, pruned_loss=0.1018, over 8187.00 frames. ], tot_loss[loss=0.258, simple_loss=0.3279, pruned_loss=0.09407, over 1615852.12 frames. ], batch size: 23, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:12:24,246 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57919.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:31,597 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2129, 1.9715, 3.0434, 2.4263, 2.5667, 2.0821, 1.6742, 1.2848], + device='cuda:1'), covar=tensor([0.3282, 0.3189, 0.0805, 0.1778, 0.1551, 0.1700, 0.1475, 0.3284], + device='cuda:1'), in_proj_covar=tensor([0.0836, 0.0784, 0.0676, 0.0779, 0.0863, 0.0720, 0.0666, 0.0709], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:12:32,236 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:33,380 INFO [train.py:901] (1/4) Epoch 8, batch 1350, loss[loss=0.2111, simple_loss=0.2981, pruned_loss=0.06211, over 8251.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3278, pruned_loss=0.09418, over 1614842.48 frames. ], batch size: 22, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:12:35,571 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4516, 4.5716, 4.0757, 2.1045, 3.9808, 4.0975, 4.2307, 3.6214], + device='cuda:1'), covar=tensor([0.0767, 0.0434, 0.0871, 0.4149, 0.0723, 0.0817, 0.1050, 0.0758], + device='cuda:1'), in_proj_covar=tensor([0.0423, 0.0325, 0.0359, 0.0447, 0.0353, 0.0329, 0.0335, 0.0286], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:12:36,127 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.787e+02 3.281e+02 4.089e+02 1.129e+03, threshold=6.562e+02, percent-clipped=4.0 +2023-02-06 05:12:38,227 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:41,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:49,718 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:01,727 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:05,179 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:08,423 INFO [train.py:901] (1/4) Epoch 8, batch 1400, loss[loss=0.2369, simple_loss=0.321, pruned_loss=0.07638, over 8362.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3286, pruned_loss=0.0948, over 1617763.48 frames. ], batch size: 24, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:23,094 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:41,319 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 05:13:42,543 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 05:13:43,360 INFO [train.py:901] (1/4) Epoch 8, batch 1450, loss[loss=0.2725, simple_loss=0.3411, pruned_loss=0.1019, over 7808.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3295, pruned_loss=0.09454, over 1621475.17 frames. ], batch size: 20, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:46,043 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.668e+02 3.298e+02 4.223e+02 1.032e+03, threshold=6.596e+02, percent-clipped=5.0 +2023-02-06 05:14:18,669 INFO [train.py:901] (1/4) Epoch 8, batch 1500, loss[loss=0.2781, simple_loss=0.3421, pruned_loss=0.107, over 8584.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3305, pruned_loss=0.09545, over 1623204.60 frames. ], batch size: 34, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:28,078 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:31,216 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 05:14:52,815 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:53,267 INFO [train.py:901] (1/4) Epoch 8, batch 1550, loss[loss=0.2386, simple_loss=0.3163, pruned_loss=0.08043, over 8601.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3288, pruned_loss=0.09463, over 1618611.77 frames. ], batch size: 49, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:56,006 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.601e+02 3.218e+02 3.979e+02 6.246e+02, threshold=6.435e+02, percent-clipped=0.0 +2023-02-06 05:15:10,024 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:12,036 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58159.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:14,756 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:27,769 INFO [train.py:901] (1/4) Epoch 8, batch 1600, loss[loss=0.2996, simple_loss=0.3626, pruned_loss=0.1183, over 8357.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.3299, pruned_loss=0.0958, over 1616443.21 frames. ], batch size: 24, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:15:37,317 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:47,383 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:54,054 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:00,056 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:02,612 INFO [train.py:901] (1/4) Epoch 8, batch 1650, loss[loss=0.3217, simple_loss=0.3667, pruned_loss=0.1383, over 6781.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3298, pruned_loss=0.09561, over 1615228.60 frames. ], batch size: 71, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:16:05,269 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.785e+02 3.241e+02 4.331e+02 1.468e+03, threshold=6.482e+02, percent-clipped=4.0 +2023-02-06 05:16:16,840 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:37,572 INFO [train.py:901] (1/4) Epoch 8, batch 1700, loss[loss=0.2357, simple_loss=0.3072, pruned_loss=0.08207, over 7963.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3286, pruned_loss=0.09496, over 1614127.54 frames. ], batch size: 21, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:17:00,580 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4574, 5.4887, 4.9113, 2.0451, 4.9321, 5.1986, 5.2038, 4.7307], + device='cuda:1'), covar=tensor([0.0574, 0.0462, 0.0828, 0.4630, 0.0637, 0.0593, 0.1002, 0.0571], + device='cuda:1'), in_proj_covar=tensor([0.0419, 0.0324, 0.0355, 0.0444, 0.0349, 0.0325, 0.0335, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:17:11,722 INFO [train.py:901] (1/4) Epoch 8, batch 1750, loss[loss=0.2555, simple_loss=0.3214, pruned_loss=0.09482, over 8239.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3277, pruned_loss=0.09365, over 1614929.50 frames. ], batch size: 22, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:17:15,040 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.752e+02 3.204e+02 3.949e+02 8.384e+02, threshold=6.409e+02, percent-clipped=4.0 +2023-02-06 05:17:41,591 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 05:17:43,449 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 05:17:45,746 INFO [train.py:901] (1/4) Epoch 8, batch 1800, loss[loss=0.2463, simple_loss=0.3245, pruned_loss=0.08402, over 8526.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3265, pruned_loss=0.092, over 1617845.19 frames. ], batch size: 31, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:18:06,234 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 05:18:21,312 INFO [train.py:901] (1/4) Epoch 8, batch 1850, loss[loss=0.2536, simple_loss=0.3259, pruned_loss=0.09065, over 8191.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3267, pruned_loss=0.0921, over 1620875.01 frames. ], batch size: 23, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:18:24,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.956e+02 3.603e+02 4.636e+02 8.044e+02, threshold=7.207e+02, percent-clipped=5.0 +2023-02-06 05:18:45,039 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58466.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:18:55,854 INFO [train.py:901] (1/4) Epoch 8, batch 1900, loss[loss=0.2627, simple_loss=0.3361, pruned_loss=0.0946, over 8348.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3258, pruned_loss=0.09205, over 1619216.29 frames. ], batch size: 26, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:19:01,998 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:10,026 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:12,313 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-06 05:19:12,698 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:19,280 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 05:19:30,166 INFO [train.py:901] (1/4) Epoch 8, batch 1950, loss[loss=0.2475, simple_loss=0.3207, pruned_loss=0.08719, over 7641.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3261, pruned_loss=0.09253, over 1619102.58 frames. ], batch size: 19, lr: 9.75e-03, grad_scale: 16.0 +2023-02-06 05:19:30,817 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 05:19:32,606 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 05:19:32,812 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.699e+02 3.417e+02 4.103e+02 8.210e+02, threshold=6.834e+02, percent-clipped=5.0 +2023-02-06 05:19:50,870 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 05:20:04,902 INFO [train.py:901] (1/4) Epoch 8, batch 2000, loss[loss=0.2649, simple_loss=0.3409, pruned_loss=0.0944, over 8638.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3257, pruned_loss=0.09211, over 1617949.02 frames. ], batch size: 34, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:29,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58618.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:31,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:36,253 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 05:20:39,664 INFO [train.py:901] (1/4) Epoch 8, batch 2050, loss[loss=0.3133, simple_loss=0.3645, pruned_loss=0.1311, over 8772.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3263, pruned_loss=0.0925, over 1620260.53 frames. ], batch size: 30, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:42,943 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.785e+02 3.396e+02 4.687e+02 1.585e+03, threshold=6.792e+02, percent-clipped=4.0 +2023-02-06 05:21:13,661 INFO [train.py:901] (1/4) Epoch 8, batch 2100, loss[loss=0.2375, simple_loss=0.314, pruned_loss=0.0805, over 8181.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3268, pruned_loss=0.09308, over 1617681.92 frames. ], batch size: 23, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:25,816 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:21:47,831 INFO [train.py:901] (1/4) Epoch 8, batch 2150, loss[loss=0.2256, simple_loss=0.2939, pruned_loss=0.07868, over 7688.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3231, pruned_loss=0.09131, over 1609460.00 frames. ], batch size: 18, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:51,088 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.818e+02 3.372e+02 4.104e+02 8.704e+02, threshold=6.743e+02, percent-clipped=2.0 +2023-02-06 05:21:51,247 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0885, 1.2744, 4.2258, 1.5017, 3.6560, 3.4415, 3.7754, 3.6303], + device='cuda:1'), covar=tensor([0.0449, 0.4195, 0.0464, 0.3239, 0.1056, 0.0755, 0.0480, 0.0629], + device='cuda:1'), in_proj_covar=tensor([0.0403, 0.0528, 0.0503, 0.0473, 0.0540, 0.0457, 0.0454, 0.0510], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 05:22:23,675 INFO [train.py:901] (1/4) Epoch 8, batch 2200, loss[loss=0.3244, simple_loss=0.3677, pruned_loss=0.1405, over 7053.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3247, pruned_loss=0.09237, over 1608630.81 frames. ], batch size: 72, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:22:31,531 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58793.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:22:49,412 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5539, 2.8008, 1.8141, 2.0173, 2.3241, 1.5671, 1.9407, 2.1964], + device='cuda:1'), covar=tensor([0.1276, 0.0277, 0.0872, 0.0621, 0.0567, 0.1069, 0.0880, 0.0749], + device='cuda:1'), in_proj_covar=tensor([0.0344, 0.0231, 0.0308, 0.0294, 0.0300, 0.0313, 0.0334, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 05:22:53,124 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 05:22:58,789 INFO [train.py:901] (1/4) Epoch 8, batch 2250, loss[loss=0.229, simple_loss=0.3168, pruned_loss=0.07059, over 8322.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3262, pruned_loss=0.0928, over 1614787.03 frames. ], batch size: 26, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:02,309 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.600e+02 3.138e+02 4.259e+02 8.800e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-06 05:23:08,952 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-06 05:23:29,266 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:31,271 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:34,472 INFO [train.py:901] (1/4) Epoch 8, batch 2300, loss[loss=0.218, simple_loss=0.3018, pruned_loss=0.06705, over 8028.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3253, pruned_loss=0.09277, over 1611467.89 frames. ], batch size: 22, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:46,510 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:48,578 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:03,549 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:09,482 INFO [train.py:901] (1/4) Epoch 8, batch 2350, loss[loss=0.2645, simple_loss=0.3447, pruned_loss=0.09215, over 8249.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3263, pruned_loss=0.09306, over 1614269.57 frames. ], batch size: 24, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:24:12,942 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.611e+02 3.221e+02 3.780e+02 8.999e+02, threshold=6.441e+02, percent-clipped=2.0 +2023-02-06 05:24:44,047 INFO [train.py:901] (1/4) Epoch 8, batch 2400, loss[loss=0.2404, simple_loss=0.3014, pruned_loss=0.08966, over 7555.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3257, pruned_loss=0.09267, over 1612041.59 frames. ], batch size: 18, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:25:18,661 INFO [train.py:901] (1/4) Epoch 8, batch 2450, loss[loss=0.3225, simple_loss=0.384, pruned_loss=0.1305, over 8441.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3264, pruned_loss=0.09278, over 1613618.61 frames. ], batch size: 29, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:25:21,882 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 3.001e+02 3.706e+02 4.542e+02 9.599e+02, threshold=7.413e+02, percent-clipped=3.0 +2023-02-06 05:25:26,104 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:25:52,508 INFO [train.py:901] (1/4) Epoch 8, batch 2500, loss[loss=0.273, simple_loss=0.3547, pruned_loss=0.09561, over 8097.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3257, pruned_loss=0.09259, over 1614462.97 frames. ], batch size: 23, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:27,546 INFO [train.py:901] (1/4) Epoch 8, batch 2550, loss[loss=0.2879, simple_loss=0.3529, pruned_loss=0.1114, over 8606.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3255, pruned_loss=0.09331, over 1614076.47 frames. ], batch size: 34, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:29,740 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0434, 1.4516, 1.5075, 1.3194, 1.1150, 1.3779, 1.4985, 1.6617], + device='cuda:1'), covar=tensor([0.0560, 0.1280, 0.1835, 0.1431, 0.0619, 0.1555, 0.0739, 0.0568], + device='cuda:1'), in_proj_covar=tensor([0.0111, 0.0161, 0.0200, 0.0165, 0.0111, 0.0170, 0.0124, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:26:30,876 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.676e+02 3.180e+02 4.175e+02 9.807e+02, threshold=6.360e+02, percent-clipped=4.0 +2023-02-06 05:26:30,966 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:26:45,980 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:03,045 INFO [train.py:901] (1/4) Epoch 8, batch 2600, loss[loss=0.2363, simple_loss=0.3092, pruned_loss=0.08172, over 7651.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3248, pruned_loss=0.09276, over 1609227.47 frames. ], batch size: 19, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:03,179 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:18,803 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4437, 1.4017, 4.4375, 1.7943, 2.4166, 5.1297, 4.9981, 4.4865], + device='cuda:1'), covar=tensor([0.1037, 0.1596, 0.0230, 0.1916, 0.0983, 0.0175, 0.0292, 0.0466], + device='cuda:1'), in_proj_covar=tensor([0.0251, 0.0285, 0.0243, 0.0281, 0.0249, 0.0226, 0.0296, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 05:27:38,220 INFO [train.py:901] (1/4) Epoch 8, batch 2650, loss[loss=0.2552, simple_loss=0.332, pruned_loss=0.0892, over 8467.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3255, pruned_loss=0.09291, over 1612248.52 frames. ], batch size: 29, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:41,643 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.757e+02 3.213e+02 4.207e+02 1.360e+03, threshold=6.426e+02, percent-clipped=6.0 +2023-02-06 05:27:52,003 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:03,086 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:12,228 INFO [train.py:901] (1/4) Epoch 8, batch 2700, loss[loss=0.267, simple_loss=0.3086, pruned_loss=0.1126, over 7922.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3244, pruned_loss=0.09236, over 1614189.48 frames. ], batch size: 20, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:46,767 INFO [train.py:901] (1/4) Epoch 8, batch 2750, loss[loss=0.2228, simple_loss=0.3069, pruned_loss=0.06929, over 8242.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3243, pruned_loss=0.09201, over 1616411.07 frames. ], batch size: 22, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:50,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.846e+02 3.367e+02 4.274e+02 9.837e+02, threshold=6.735e+02, percent-clipped=6.0 +2023-02-06 05:29:14,891 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-06 05:29:22,662 INFO [train.py:901] (1/4) Epoch 8, batch 2800, loss[loss=0.2864, simple_loss=0.346, pruned_loss=0.1134, over 7974.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3244, pruned_loss=0.09152, over 1619507.72 frames. ], batch size: 21, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:29:23,476 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:44,991 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:50,442 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 05:29:56,663 INFO [train.py:901] (1/4) Epoch 8, batch 2850, loss[loss=0.2761, simple_loss=0.3336, pruned_loss=0.1093, over 8082.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3252, pruned_loss=0.09181, over 1619772.21 frames. ], batch size: 21, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:00,150 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.577e+02 2.974e+02 3.773e+02 5.956e+02, threshold=5.948e+02, percent-clipped=0.0 +2023-02-06 05:30:01,814 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:16,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7569, 1.2045, 3.9075, 1.4304, 3.4596, 3.2136, 3.4723, 3.4202], + device='cuda:1'), covar=tensor([0.0528, 0.3996, 0.0478, 0.2867, 0.1053, 0.0777, 0.0582, 0.0577], + device='cuda:1'), in_proj_covar=tensor([0.0408, 0.0531, 0.0508, 0.0473, 0.0549, 0.0460, 0.0458, 0.0508], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 05:30:32,530 INFO [train.py:901] (1/4) Epoch 8, batch 2900, loss[loss=0.257, simple_loss=0.3286, pruned_loss=0.09266, over 8332.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3254, pruned_loss=0.0922, over 1616842.37 frames. ], batch size: 26, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:39,660 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6776, 2.2308, 3.6604, 2.8308, 2.9113, 2.2752, 1.9138, 1.9038], + device='cuda:1'), covar=tensor([0.2888, 0.3383, 0.0871, 0.2131, 0.1836, 0.1750, 0.1384, 0.3583], + device='cuda:1'), in_proj_covar=tensor([0.0844, 0.0801, 0.0685, 0.0787, 0.0885, 0.0740, 0.0673, 0.0722], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:30:47,128 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2107, 1.1474, 1.1735, 1.1754, 0.8857, 1.2897, 0.0563, 0.9027], + device='cuda:1'), covar=tensor([0.2616, 0.1867, 0.0750, 0.1409, 0.4406, 0.0771, 0.3822, 0.1805], + device='cuda:1'), in_proj_covar=tensor([0.0146, 0.0146, 0.0083, 0.0193, 0.0230, 0.0088, 0.0151, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:30:51,219 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:59,190 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:03,060 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:04,349 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 05:31:07,543 INFO [train.py:901] (1/4) Epoch 8, batch 2950, loss[loss=0.2898, simple_loss=0.3655, pruned_loss=0.1071, over 8023.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3269, pruned_loss=0.09351, over 1611746.91 frames. ], batch size: 22, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:08,345 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:10,778 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.847e+02 3.468e+02 5.057e+02 9.591e+02, threshold=6.936e+02, percent-clipped=13.0 +2023-02-06 05:31:42,183 INFO [train.py:901] (1/4) Epoch 8, batch 3000, loss[loss=0.2534, simple_loss=0.3279, pruned_loss=0.0894, over 8350.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3256, pruned_loss=0.09289, over 1610391.66 frames. ], batch size: 24, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:42,183 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 05:31:54,427 INFO [train.py:935] (1/4) Epoch 8, validation: loss=0.2021, simple_loss=0.3001, pruned_loss=0.05199, over 944034.00 frames. +2023-02-06 05:31:54,428 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 05:32:30,902 INFO [train.py:901] (1/4) Epoch 8, batch 3050, loss[loss=0.2656, simple_loss=0.3376, pruned_loss=0.09677, over 8463.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3248, pruned_loss=0.09286, over 1608805.61 frames. ], batch size: 25, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:32:34,251 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.623e+02 3.324e+02 4.059e+02 7.396e+02, threshold=6.648e+02, percent-clipped=1.0 +2023-02-06 05:32:35,816 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:37,140 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:52,395 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:33:05,113 INFO [train.py:901] (1/4) Epoch 8, batch 3100, loss[loss=0.2254, simple_loss=0.311, pruned_loss=0.06994, over 8293.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3256, pruned_loss=0.09327, over 1611145.45 frames. ], batch size: 23, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:40,028 INFO [train.py:901] (1/4) Epoch 8, batch 3150, loss[loss=0.2443, simple_loss=0.309, pruned_loss=0.08983, over 7929.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.326, pruned_loss=0.09372, over 1611396.61 frames. ], batch size: 20, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:43,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.052e+02 2.894e+02 3.427e+02 4.526e+02 8.691e+02, threshold=6.853e+02, percent-clipped=4.0 +2023-02-06 05:33:57,517 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 05:34:14,631 INFO [train.py:901] (1/4) Epoch 8, batch 3200, loss[loss=0.2881, simple_loss=0.3635, pruned_loss=0.1064, over 8502.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.325, pruned_loss=0.09297, over 1613003.08 frames. ], batch size: 49, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:23,260 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 05:34:50,973 INFO [train.py:901] (1/4) Epoch 8, batch 3250, loss[loss=0.249, simple_loss=0.3113, pruned_loss=0.09337, over 7237.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3263, pruned_loss=0.09317, over 1615066.67 frames. ], batch size: 16, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:54,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.545e+02 3.201e+02 4.295e+02 9.179e+02, threshold=6.402e+02, percent-clipped=6.0 +2023-02-06 05:35:13,090 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:14,536 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:25,226 INFO [train.py:901] (1/4) Epoch 8, batch 3300, loss[loss=0.2055, simple_loss=0.2755, pruned_loss=0.06776, over 7425.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3261, pruned_loss=0.0935, over 1613899.33 frames. ], batch size: 17, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:35:35,299 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:37,375 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 05:35:41,750 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:52,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:53,730 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4124, 1.5372, 1.4383, 1.4053, 1.1613, 1.3909, 1.8922, 1.7940], + device='cuda:1'), covar=tensor([0.0517, 0.1183, 0.1803, 0.1395, 0.0564, 0.1555, 0.0672, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0109, 0.0160, 0.0199, 0.0164, 0.0109, 0.0169, 0.0123, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:35:59,535 INFO [train.py:901] (1/4) Epoch 8, batch 3350, loss[loss=0.233, simple_loss=0.2905, pruned_loss=0.08778, over 7194.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.327, pruned_loss=0.09396, over 1611667.55 frames. ], batch size: 16, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:02,906 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.795e+02 3.400e+02 4.166e+02 8.824e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 05:36:25,158 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2557, 1.6151, 1.4863, 1.4000, 1.0742, 1.3132, 1.6502, 1.5780], + device='cuda:1'), covar=tensor([0.0486, 0.1191, 0.1744, 0.1352, 0.0583, 0.1561, 0.0684, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0110, 0.0162, 0.0200, 0.0165, 0.0111, 0.0170, 0.0124, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:36:31,821 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:36:33,704 INFO [train.py:901] (1/4) Epoch 8, batch 3400, loss[loss=0.303, simple_loss=0.3595, pruned_loss=0.1232, over 8495.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3256, pruned_loss=0.09298, over 1609988.98 frames. ], batch size: 26, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:51,164 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 05:37:08,461 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9377, 1.5813, 2.2502, 1.8914, 2.0038, 1.7955, 1.4742, 0.6673], + device='cuda:1'), covar=tensor([0.3062, 0.2965, 0.0900, 0.1718, 0.1381, 0.1714, 0.1469, 0.2833], + device='cuda:1'), in_proj_covar=tensor([0.0839, 0.0805, 0.0683, 0.0784, 0.0886, 0.0735, 0.0675, 0.0719], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:37:09,550 INFO [train.py:901] (1/4) Epoch 8, batch 3450, loss[loss=0.2278, simple_loss=0.2941, pruned_loss=0.08072, over 5963.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3248, pruned_loss=0.09242, over 1608669.15 frames. ], batch size: 13, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:12,877 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.668e+02 3.106e+02 3.891e+02 9.201e+02, threshold=6.211e+02, percent-clipped=2.0 +2023-02-06 05:37:27,911 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:36,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3219, 1.2723, 4.5358, 1.7373, 4.0008, 3.7901, 4.0830, 3.9459], + device='cuda:1'), covar=tensor([0.0489, 0.3729, 0.0385, 0.2770, 0.1004, 0.0745, 0.0460, 0.0526], + device='cuda:1'), in_proj_covar=tensor([0.0415, 0.0532, 0.0512, 0.0474, 0.0549, 0.0463, 0.0458, 0.0509], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 05:37:36,426 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:44,253 INFO [train.py:901] (1/4) Epoch 8, batch 3500, loss[loss=0.2289, simple_loss=0.2962, pruned_loss=0.08081, over 7532.00 frames. ], tot_loss[loss=0.254, simple_loss=0.324, pruned_loss=0.09198, over 1609181.63 frames. ], batch size: 18, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:45,065 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:53,893 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60096.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:38:02,913 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 05:38:18,648 INFO [train.py:901] (1/4) Epoch 8, batch 3550, loss[loss=0.2245, simple_loss=0.2925, pruned_loss=0.07825, over 7231.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3243, pruned_loss=0.09207, over 1609707.60 frames. ], batch size: 16, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:38:22,097 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.844e+02 3.449e+02 4.512e+02 7.529e+02, threshold=6.898e+02, percent-clipped=5.0 +2023-02-06 05:38:43,014 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 05:38:54,366 INFO [train.py:901] (1/4) Epoch 8, batch 3600, loss[loss=0.2532, simple_loss=0.3357, pruned_loss=0.08529, over 8522.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.325, pruned_loss=0.09258, over 1606652.26 frames. ], batch size: 28, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:14,697 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:18,777 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:29,810 INFO [train.py:901] (1/4) Epoch 8, batch 3650, loss[loss=0.2948, simple_loss=0.3565, pruned_loss=0.1166, over 8039.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3251, pruned_loss=0.09282, over 1608888.88 frames. ], batch size: 22, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:32,067 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:33,129 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.875e+02 2.702e+02 3.457e+02 4.155e+02 9.631e+02, threshold=6.915e+02, percent-clipped=4.0 +2023-02-06 05:39:42,738 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:48,807 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:02,757 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 05:40:04,766 INFO [train.py:901] (1/4) Epoch 8, batch 3700, loss[loss=0.231, simple_loss=0.2909, pruned_loss=0.08554, over 7555.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3257, pruned_loss=0.09279, over 1610873.10 frames. ], batch size: 18, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:34,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:39,110 INFO [train.py:901] (1/4) Epoch 8, batch 3750, loss[loss=0.2695, simple_loss=0.3477, pruned_loss=0.09562, over 8463.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3248, pruned_loss=0.09184, over 1613258.92 frames. ], batch size: 27, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:43,054 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.685e+02 3.295e+02 3.882e+02 8.274e+02, threshold=6.589e+02, percent-clipped=2.0 +2023-02-06 05:41:02,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:03,425 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4722, 1.8644, 1.9685, 1.2423, 2.0752, 1.4954, 0.5076, 1.6466], + device='cuda:1'), covar=tensor([0.0352, 0.0201, 0.0131, 0.0251, 0.0227, 0.0481, 0.0492, 0.0155], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0279, 0.0228, 0.0335, 0.0267, 0.0424, 0.0330, 0.0307], + device='cuda:1'), out_proj_covar=tensor([1.0899e-04, 8.3723e-05, 6.7981e-05, 1.0004e-04, 8.1914e-05, 1.3918e-04, + 1.0114e-04, 9.3205e-05], device='cuda:1') +2023-02-06 05:41:13,771 INFO [train.py:901] (1/4) Epoch 8, batch 3800, loss[loss=0.3307, simple_loss=0.3769, pruned_loss=0.1422, over 8546.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3241, pruned_loss=0.09132, over 1609598.05 frames. ], batch size: 49, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:41:28,150 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:36,471 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:45,874 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:49,063 INFO [train.py:901] (1/4) Epoch 8, batch 3850, loss[loss=0.2841, simple_loss=0.3631, pruned_loss=0.1025, over 8297.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3254, pruned_loss=0.09189, over 1616261.70 frames. ], batch size: 23, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:41:52,292 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.691e+02 3.271e+02 4.212e+02 1.032e+03, threshold=6.541e+02, percent-clipped=5.0 +2023-02-06 05:41:54,201 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:08,493 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 05:42:22,366 INFO [train.py:901] (1/4) Epoch 8, batch 3900, loss[loss=0.2802, simple_loss=0.353, pruned_loss=0.1037, over 8560.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3256, pruned_loss=0.09272, over 1615299.25 frames. ], batch size: 31, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:42:46,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:53,421 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6661, 1.7520, 1.9871, 1.6442, 1.0833, 2.0708, 0.1930, 1.2460], + device='cuda:1'), covar=tensor([0.3185, 0.2036, 0.0622, 0.2043, 0.6050, 0.0618, 0.4869, 0.2543], + device='cuda:1'), in_proj_covar=tensor([0.0151, 0.0147, 0.0085, 0.0196, 0.0238, 0.0091, 0.0155, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:42:55,286 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:57,130 INFO [train.py:901] (1/4) Epoch 8, batch 3950, loss[loss=0.2566, simple_loss=0.3415, pruned_loss=0.08587, over 8260.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3243, pruned_loss=0.09157, over 1611848.86 frames. ], batch size: 24, lr: 9.59e-03, grad_scale: 8.0 +2023-02-06 05:43:00,412 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.562e+02 3.362e+02 4.082e+02 8.516e+02, threshold=6.724e+02, percent-clipped=2.0 +2023-02-06 05:43:03,868 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:13,333 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:16,391 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:28,571 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3569, 1.7319, 1.5756, 1.4178, 1.2144, 1.4651, 1.8722, 1.5745], + device='cuda:1'), covar=tensor([0.0442, 0.1098, 0.1591, 0.1251, 0.0531, 0.1377, 0.0626, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0109, 0.0161, 0.0198, 0.0163, 0.0111, 0.0169, 0.0123, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 05:43:31,325 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,761 INFO [train.py:901] (1/4) Epoch 8, batch 4000, loss[loss=0.2587, simple_loss=0.3353, pruned_loss=0.09109, over 8476.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3247, pruned_loss=0.09173, over 1610609.35 frames. ], batch size: 28, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:43:48,271 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:59,774 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:06,267 INFO [train.py:901] (1/4) Epoch 8, batch 4050, loss[loss=0.2596, simple_loss=0.3256, pruned_loss=0.09675, over 8012.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3248, pruned_loss=0.09148, over 1614015.40 frames. ], batch size: 22, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:44:09,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.835e+02 3.722e+02 4.462e+02 8.493e+02, threshold=7.445e+02, percent-clipped=1.0 +2023-02-06 05:44:17,202 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:24,713 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8763, 1.9480, 2.1766, 1.7696, 1.3231, 2.1428, 0.6189, 1.4988], + device='cuda:1'), covar=tensor([0.2491, 0.1749, 0.0668, 0.2083, 0.4516, 0.0613, 0.3957, 0.2181], + device='cuda:1'), in_proj_covar=tensor([0.0148, 0.0145, 0.0085, 0.0193, 0.0232, 0.0090, 0.0151, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:44:30,905 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8062, 1.5070, 2.0328, 1.7480, 1.8101, 1.7158, 1.3664, 0.7184], + device='cuda:1'), covar=tensor([0.2548, 0.2546, 0.0782, 0.1462, 0.1206, 0.1334, 0.1207, 0.2447], + device='cuda:1'), in_proj_covar=tensor([0.0829, 0.0802, 0.0681, 0.0780, 0.0879, 0.0734, 0.0675, 0.0718], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:44:36,979 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:41,540 INFO [train.py:901] (1/4) Epoch 8, batch 4100, loss[loss=0.2442, simple_loss=0.3048, pruned_loss=0.09178, over 7440.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3243, pruned_loss=0.09151, over 1608657.73 frames. ], batch size: 17, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:16,312 INFO [train.py:901] (1/4) Epoch 8, batch 4150, loss[loss=0.3475, simple_loss=0.3871, pruned_loss=0.1539, over 7346.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3256, pruned_loss=0.09255, over 1609581.11 frames. ], batch size: 72, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:19,089 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:19,618 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.904e+02 3.574e+02 4.093e+02 8.234e+02, threshold=7.147e+02, percent-clipped=2.0 +2023-02-06 05:45:45,332 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:51,066 INFO [train.py:901] (1/4) Epoch 8, batch 4200, loss[loss=0.2833, simple_loss=0.348, pruned_loss=0.1093, over 8481.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3268, pruned_loss=0.09335, over 1606990.12 frames. ], batch size: 27, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:45:54,024 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,586 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,606 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:07,981 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 05:46:10,874 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:11,535 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60811.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:20,183 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:25,969 INFO [train.py:901] (1/4) Epoch 8, batch 4250, loss[loss=0.3508, simple_loss=0.3995, pruned_loss=0.151, over 6713.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3284, pruned_loss=0.09436, over 1606584.35 frames. ], batch size: 72, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:46:26,076 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6226, 5.8591, 4.8781, 2.0167, 5.0778, 5.4535, 5.4003, 4.8648], + device='cuda:1'), covar=tensor([0.0703, 0.0478, 0.1034, 0.5300, 0.0670, 0.0674, 0.1118, 0.0759], + device='cuda:1'), in_proj_covar=tensor([0.0421, 0.0331, 0.0354, 0.0453, 0.0350, 0.0328, 0.0338, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:46:28,893 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:30,101 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.807e+02 3.546e+02 4.515e+02 1.213e+03, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 05:46:30,847 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 05:47:01,077 INFO [train.py:901] (1/4) Epoch 8, batch 4300, loss[loss=0.2732, simple_loss=0.3547, pruned_loss=0.09592, over 8104.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3277, pruned_loss=0.09373, over 1611278.36 frames. ], batch size: 23, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:47:35,210 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:35,682 INFO [train.py:901] (1/4) Epoch 8, batch 4350, loss[loss=0.2615, simple_loss=0.3242, pruned_loss=0.09937, over 7791.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.327, pruned_loss=0.09361, over 1609129.32 frames. ], batch size: 19, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:47:39,637 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.751e+02 3.442e+02 4.335e+02 7.709e+02, threshold=6.884e+02, percent-clipped=1.0 +2023-02-06 05:47:51,973 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:59,293 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 05:48:07,094 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4480, 2.6506, 1.7070, 2.0176, 2.0758, 1.4251, 1.7733, 1.9955], + device='cuda:1'), covar=tensor([0.1241, 0.0278, 0.0902, 0.0558, 0.0636, 0.1172, 0.0941, 0.0873], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0231, 0.0306, 0.0290, 0.0300, 0.0308, 0.0333, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 05:48:10,332 INFO [train.py:901] (1/4) Epoch 8, batch 4400, loss[loss=0.2057, simple_loss=0.2873, pruned_loss=0.06203, over 8235.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3249, pruned_loss=0.0919, over 1612063.82 frames. ], batch size: 22, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:48:42,561 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 05:48:45,134 INFO [train.py:901] (1/4) Epoch 8, batch 4450, loss[loss=0.2297, simple_loss=0.3182, pruned_loss=0.07059, over 8486.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3235, pruned_loss=0.09098, over 1609755.85 frames. ], batch size: 48, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:48:49,126 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.403e+02 3.130e+02 3.948e+02 8.767e+02, threshold=6.260e+02, percent-clipped=3.0 +2023-02-06 05:49:17,370 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:17,969 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:19,304 INFO [train.py:901] (1/4) Epoch 8, batch 4500, loss[loss=0.2324, simple_loss=0.312, pruned_loss=0.07643, over 8464.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3228, pruned_loss=0.09097, over 1609361.02 frames. ], batch size: 29, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:36,534 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 05:49:53,484 INFO [train.py:901] (1/4) Epoch 8, batch 4550, loss[loss=0.2538, simple_loss=0.3243, pruned_loss=0.09171, over 7436.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3241, pruned_loss=0.09233, over 1611510.71 frames. ], batch size: 17, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:58,150 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.768e+02 3.493e+02 4.645e+02 1.007e+03, threshold=6.986e+02, percent-clipped=6.0 +2023-02-06 05:50:29,333 INFO [train.py:901] (1/4) Epoch 8, batch 4600, loss[loss=0.2142, simple_loss=0.2963, pruned_loss=0.06608, over 7928.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3232, pruned_loss=0.09139, over 1611009.87 frames. ], batch size: 20, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:50:38,244 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:51:03,415 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 05:51:04,234 INFO [train.py:901] (1/4) Epoch 8, batch 4650, loss[loss=0.2426, simple_loss=0.3186, pruned_loss=0.08329, over 7912.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3233, pruned_loss=0.09142, over 1610032.08 frames. ], batch size: 20, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:51:08,278 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 2.693e+02 3.115e+02 3.876e+02 8.832e+02, threshold=6.229e+02, percent-clipped=3.0 +2023-02-06 05:51:13,729 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1483, 1.6770, 1.2423, 1.7735, 1.4116, 1.0138, 1.3016, 1.6045], + device='cuda:1'), covar=tensor([0.0832, 0.0396, 0.1129, 0.0401, 0.0585, 0.1341, 0.0679, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0234, 0.0311, 0.0297, 0.0305, 0.0316, 0.0340, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 05:51:38,690 INFO [train.py:901] (1/4) Epoch 8, batch 4700, loss[loss=0.2747, simple_loss=0.3464, pruned_loss=0.1015, over 8192.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3229, pruned_loss=0.09095, over 1610505.07 frames. ], batch size: 23, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:52:13,892 INFO [train.py:901] (1/4) Epoch 8, batch 4750, loss[loss=0.2256, simple_loss=0.3162, pruned_loss=0.06748, over 8354.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3247, pruned_loss=0.09135, over 1613031.79 frames. ], batch size: 24, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:52:17,856 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.870e+02 3.425e+02 4.672e+02 9.837e+02, threshold=6.850e+02, percent-clipped=8.0 +2023-02-06 05:52:36,777 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 05:52:39,424 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 05:52:48,203 INFO [train.py:901] (1/4) Epoch 8, batch 4800, loss[loss=0.2212, simple_loss=0.294, pruned_loss=0.07421, over 8765.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3246, pruned_loss=0.09172, over 1613957.60 frames. ], batch size: 30, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:53:16,793 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:22,688 INFO [train.py:901] (1/4) Epoch 8, batch 4850, loss[loss=0.2466, simple_loss=0.3293, pruned_loss=0.08197, over 8295.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3231, pruned_loss=0.09059, over 1612955.42 frames. ], batch size: 23, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:53:26,648 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.780e+02 3.448e+02 4.323e+02 7.771e+02, threshold=6.895e+02, percent-clipped=1.0 +2023-02-06 05:53:28,677 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 05:53:36,187 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61451.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:53,270 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:56,863 INFO [train.py:901] (1/4) Epoch 8, batch 4900, loss[loss=0.2248, simple_loss=0.2951, pruned_loss=0.07727, over 7533.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3247, pruned_loss=0.0916, over 1614826.86 frames. ], batch size: 18, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:31,293 INFO [train.py:901] (1/4) Epoch 8, batch 4950, loss[loss=0.2379, simple_loss=0.3111, pruned_loss=0.08241, over 8039.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3252, pruned_loss=0.09216, over 1612759.70 frames. ], batch size: 22, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:35,319 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.701e+02 3.325e+02 4.582e+02 7.633e+02, threshold=6.649e+02, percent-clipped=1.0 +2023-02-06 05:54:35,525 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61538.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:55:07,170 INFO [train.py:901] (1/4) Epoch 8, batch 5000, loss[loss=0.2487, simple_loss=0.3242, pruned_loss=0.08654, over 8322.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3254, pruned_loss=0.09241, over 1610538.71 frames. ], batch size: 26, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:20,035 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 05:55:42,157 INFO [train.py:901] (1/4) Epoch 8, batch 5050, loss[loss=0.2541, simple_loss=0.3356, pruned_loss=0.08629, over 8550.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3263, pruned_loss=0.09274, over 1611761.02 frames. ], batch size: 31, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:46,799 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.771e+02 3.459e+02 4.924e+02 1.310e+03, threshold=6.919e+02, percent-clipped=9.0 +2023-02-06 05:56:07,690 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 05:56:17,395 INFO [train.py:901] (1/4) Epoch 8, batch 5100, loss[loss=0.2112, simple_loss=0.2937, pruned_loss=0.06437, over 8666.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3261, pruned_loss=0.09278, over 1613954.62 frames. ], batch size: 39, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:52,939 INFO [train.py:901] (1/4) Epoch 8, batch 5150, loss[loss=0.2673, simple_loss=0.3392, pruned_loss=0.09771, over 8531.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.326, pruned_loss=0.09277, over 1614434.45 frames. ], batch size: 34, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:57,120 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.584e+02 3.190e+02 4.018e+02 8.337e+02, threshold=6.381e+02, percent-clipped=2.0 +2023-02-06 05:57:02,454 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-06 05:57:06,181 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:13,688 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:27,556 INFO [train.py:901] (1/4) Epoch 8, batch 5200, loss[loss=0.2215, simple_loss=0.3064, pruned_loss=0.06824, over 8285.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3243, pruned_loss=0.09184, over 1606616.94 frames. ], batch size: 23, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:57:35,700 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:53,056 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:58:02,400 INFO [train.py:901] (1/4) Epoch 8, batch 5250, loss[loss=0.2918, simple_loss=0.3641, pruned_loss=0.1097, over 8458.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3247, pruned_loss=0.09183, over 1608742.92 frames. ], batch size: 27, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:58:06,458 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.711e+02 3.309e+02 4.013e+02 1.150e+03, threshold=6.618e+02, percent-clipped=3.0 +2023-02-06 05:58:07,796 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 05:58:37,169 INFO [train.py:901] (1/4) Epoch 8, batch 5300, loss[loss=0.2345, simple_loss=0.3155, pruned_loss=0.07676, over 8456.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3254, pruned_loss=0.09269, over 1610720.31 frames. ], batch size: 27, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:04,765 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2894, 2.2698, 1.9613, 2.8036, 1.3048, 1.6317, 1.8184, 2.2483], + device='cuda:1'), covar=tensor([0.0670, 0.0830, 0.1074, 0.0372, 0.1352, 0.1567, 0.1194, 0.0832], + device='cuda:1'), in_proj_covar=tensor([0.0248, 0.0226, 0.0267, 0.0216, 0.0228, 0.0260, 0.0264, 0.0234], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 05:59:12,105 INFO [train.py:901] (1/4) Epoch 8, batch 5350, loss[loss=0.2601, simple_loss=0.337, pruned_loss=0.0916, over 8372.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.324, pruned_loss=0.0919, over 1608327.58 frames. ], batch size: 24, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,262 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:59:16,961 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.785e+02 2.596e+02 3.249e+02 3.983e+02 1.109e+03, threshold=6.498e+02, percent-clipped=6.0 +2023-02-06 05:59:17,996 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0036, 1.6254, 2.2991, 1.8987, 1.9586, 1.8911, 1.5102, 0.6763], + device='cuda:1'), covar=tensor([0.2886, 0.2862, 0.0822, 0.1714, 0.1352, 0.1584, 0.1286, 0.2802], + device='cuda:1'), in_proj_covar=tensor([0.0846, 0.0802, 0.0677, 0.0789, 0.0889, 0.0742, 0.0670, 0.0723], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 05:59:47,814 INFO [train.py:901] (1/4) Epoch 8, batch 5400, loss[loss=0.2158, simple_loss=0.2954, pruned_loss=0.0681, over 8084.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3247, pruned_loss=0.09252, over 1607421.11 frames. ], batch size: 21, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:23,964 INFO [train.py:901] (1/4) Epoch 8, batch 5450, loss[loss=0.2658, simple_loss=0.3462, pruned_loss=0.09266, over 8512.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3246, pruned_loss=0.09209, over 1609688.13 frames. ], batch size: 28, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:28,683 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.625e+02 3.240e+02 4.068e+02 8.471e+02, threshold=6.479e+02, percent-clipped=5.0 +2023-02-06 06:01:00,410 INFO [train.py:901] (1/4) Epoch 8, batch 5500, loss[loss=0.2359, simple_loss=0.308, pruned_loss=0.08196, over 8291.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3244, pruned_loss=0.09164, over 1610749.10 frames. ], batch size: 23, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:01,792 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 06:01:07,838 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3387, 1.5239, 4.4588, 1.6997, 3.8624, 3.7277, 4.0288, 3.9079], + device='cuda:1'), covar=tensor([0.0480, 0.3672, 0.0393, 0.2892, 0.1015, 0.0736, 0.0552, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0532, 0.0517, 0.0489, 0.0555, 0.0467, 0.0471, 0.0519], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 06:01:08,451 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:16,548 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:34,824 INFO [train.py:901] (1/4) Epoch 8, batch 5550, loss[loss=0.2358, simple_loss=0.3253, pruned_loss=0.07319, over 8351.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3244, pruned_loss=0.09169, over 1606612.38 frames. ], batch size: 25, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:38,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.724e+02 3.277e+02 4.222e+02 9.983e+02, threshold=6.553e+02, percent-clipped=5.0 +2023-02-06 06:01:40,086 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:09,237 INFO [train.py:901] (1/4) Epoch 8, batch 5600, loss[loss=0.2881, simple_loss=0.3509, pruned_loss=0.1126, over 5938.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3249, pruned_loss=0.09232, over 1606028.79 frames. ], batch size: 13, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:02:27,945 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:35,390 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:44,121 INFO [train.py:901] (1/4) Epoch 8, batch 5650, loss[loss=0.2768, simple_loss=0.3619, pruned_loss=0.09584, over 8325.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3259, pruned_loss=0.09287, over 1607293.75 frames. ], batch size: 25, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:02:48,208 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.893e+02 3.442e+02 4.058e+02 7.819e+02, threshold=6.884e+02, percent-clipped=2.0 +2023-02-06 06:03:01,734 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 06:03:03,308 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 06:03:09,918 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 06:03:14,935 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:19,759 INFO [train.py:901] (1/4) Epoch 8, batch 5700, loss[loss=0.2309, simple_loss=0.3153, pruned_loss=0.07332, over 8552.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3247, pruned_loss=0.09236, over 1606734.67 frames. ], batch size: 31, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:03:24,177 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 06:03:26,022 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:44,792 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4120, 1.9084, 3.3347, 1.2261, 2.4562, 1.9161, 1.6055, 2.1947], + device='cuda:1'), covar=tensor([0.1681, 0.2041, 0.0765, 0.3639, 0.1528, 0.2577, 0.1653, 0.2208], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0487, 0.0532, 0.0563, 0.0605, 0.0537, 0.0456, 0.0597], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:03:53,764 INFO [train.py:901] (1/4) Epoch 8, batch 5750, loss[loss=0.2565, simple_loss=0.3125, pruned_loss=0.1002, over 8137.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3244, pruned_loss=0.09204, over 1605281.74 frames. ], batch size: 22, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:03:58,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.702e+02 3.342e+02 4.214e+02 1.406e+03, threshold=6.684e+02, percent-clipped=3.0 +2023-02-06 06:04:07,822 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 06:04:28,274 INFO [train.py:901] (1/4) Epoch 8, batch 5800, loss[loss=0.2993, simple_loss=0.3568, pruned_loss=0.1209, over 7035.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3243, pruned_loss=0.09171, over 1610907.57 frames. ], batch size: 71, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:04:35,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:48,166 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:04,094 INFO [train.py:901] (1/4) Epoch 8, batch 5850, loss[loss=0.2543, simple_loss=0.3243, pruned_loss=0.09216, over 8609.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3238, pruned_loss=0.09138, over 1611011.84 frames. ], batch size: 34, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:05:08,211 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.970e+02 2.690e+02 3.286e+02 4.000e+02 6.740e+02, threshold=6.571e+02, percent-clipped=1.0 +2023-02-06 06:05:15,060 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2164, 1.1874, 1.2189, 1.1443, 0.8185, 1.3317, 0.0627, 1.0364], + device='cuda:1'), covar=tensor([0.2538, 0.1902, 0.0630, 0.1428, 0.4845, 0.0643, 0.3831, 0.1589], + device='cuda:1'), in_proj_covar=tensor([0.0150, 0.0152, 0.0087, 0.0199, 0.0238, 0.0092, 0.0159, 0.0152], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 06:05:27,198 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:34,611 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:38,455 INFO [train.py:901] (1/4) Epoch 8, batch 5900, loss[loss=0.2976, simple_loss=0.3644, pruned_loss=0.1154, over 8457.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3239, pruned_loss=0.09158, over 1612128.38 frames. ], batch size: 29, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:05:39,872 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:44,017 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:51,361 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:54,884 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.77 vs. limit=5.0 +2023-02-06 06:06:06,966 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-06 06:06:13,413 INFO [train.py:901] (1/4) Epoch 8, batch 5950, loss[loss=0.1917, simple_loss=0.2697, pruned_loss=0.05688, over 7435.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3229, pruned_loss=0.09086, over 1610468.68 frames. ], batch size: 17, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:17,413 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 2.652e+02 3.220e+02 3.904e+02 8.315e+02, threshold=6.439e+02, percent-clipped=2.0 +2023-02-06 06:06:31,784 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6585, 2.0242, 3.6667, 1.3872, 2.7100, 2.1688, 1.7583, 2.3386], + device='cuda:1'), covar=tensor([0.1530, 0.2002, 0.0602, 0.3534, 0.1244, 0.2320, 0.1534, 0.2033], + device='cuda:1'), in_proj_covar=tensor([0.0479, 0.0490, 0.0535, 0.0568, 0.0604, 0.0540, 0.0460, 0.0603], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:06:34,795 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:06:47,824 INFO [train.py:901] (1/4) Epoch 8, batch 6000, loss[loss=0.2573, simple_loss=0.3333, pruned_loss=0.09071, over 8195.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3226, pruned_loss=0.09103, over 1608899.97 frames. ], batch size: 23, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:47,824 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 06:07:00,014 INFO [train.py:935] (1/4) Epoch 8, validation: loss=0.1996, simple_loss=0.2985, pruned_loss=0.05037, over 944034.00 frames. +2023-02-06 06:07:00,014 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 06:07:12,287 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62599.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:14,963 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:33,818 INFO [train.py:901] (1/4) Epoch 8, batch 6050, loss[loss=0.2441, simple_loss=0.3187, pruned_loss=0.08472, over 8454.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3216, pruned_loss=0.09034, over 1612198.81 frames. ], batch size: 25, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:07:35,913 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:35,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1691, 1.3153, 4.2450, 1.5743, 3.8490, 3.4910, 3.7992, 3.6672], + device='cuda:1'), covar=tensor([0.0424, 0.3579, 0.0437, 0.2788, 0.0861, 0.0733, 0.0486, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0438, 0.0549, 0.0530, 0.0500, 0.0563, 0.0475, 0.0480, 0.0534], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 06:07:37,858 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.575e+02 3.268e+02 4.071e+02 9.720e+02, threshold=6.536e+02, percent-clipped=3.0 +2023-02-06 06:07:39,343 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3026, 1.6708, 3.3079, 1.4525, 2.2190, 3.6475, 3.6053, 3.1570], + device='cuda:1'), covar=tensor([0.0857, 0.1440, 0.0385, 0.1896, 0.0952, 0.0290, 0.0489, 0.0623], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0286, 0.0246, 0.0274, 0.0257, 0.0227, 0.0295, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 06:07:44,177 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:02,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:08,762 INFO [train.py:901] (1/4) Epoch 8, batch 6100, loss[loss=0.2326, simple_loss=0.3106, pruned_loss=0.07735, over 8513.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3233, pruned_loss=0.09084, over 1616206.29 frames. ], batch size: 26, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:08:10,158 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:12,272 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6281, 2.1638, 3.6719, 2.7322, 2.9282, 2.2501, 1.7550, 1.6063], + device='cuda:1'), covar=tensor([0.3007, 0.3728, 0.0843, 0.2283, 0.1951, 0.1892, 0.1516, 0.3965], + device='cuda:1'), in_proj_covar=tensor([0.0848, 0.0800, 0.0681, 0.0785, 0.0880, 0.0739, 0.0671, 0.0721], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:08:14,894 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7546, 1.8700, 2.0685, 1.7041, 1.1885, 2.1524, 0.3593, 1.3163], + device='cuda:1'), covar=tensor([0.3398, 0.1774, 0.0724, 0.2196, 0.5292, 0.0646, 0.4514, 0.2185], + device='cuda:1'), in_proj_covar=tensor([0.0152, 0.0153, 0.0088, 0.0202, 0.0241, 0.0094, 0.0159, 0.0155], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 06:08:30,095 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0261, 1.3122, 1.3812, 1.1337, 1.0067, 1.3041, 1.6143, 1.8128], + device='cuda:1'), covar=tensor([0.0526, 0.1243, 0.1750, 0.1505, 0.0605, 0.1461, 0.0671, 0.0514], + device='cuda:1'), in_proj_covar=tensor([0.0110, 0.0160, 0.0200, 0.0165, 0.0112, 0.0168, 0.0123, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:08:33,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3674, 1.7309, 1.6516, 0.7834, 1.7835, 1.3307, 0.3211, 1.5563], + device='cuda:1'), covar=tensor([0.0235, 0.0145, 0.0150, 0.0242, 0.0170, 0.0445, 0.0375, 0.0128], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0282, 0.0232, 0.0337, 0.0271, 0.0429, 0.0329, 0.0310], + device='cuda:1'), out_proj_covar=tensor([1.0717e-04, 8.3397e-05, 6.8682e-05, 1.0028e-04, 8.2210e-05, 1.3959e-04, + 9.9998e-05, 9.2975e-05], device='cuda:1') +2023-02-06 06:08:37,242 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 06:08:38,228 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-06 06:08:42,660 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1932, 1.7606, 2.7084, 2.1487, 2.4279, 1.9707, 1.5385, 0.8925], + device='cuda:1'), covar=tensor([0.3266, 0.3352, 0.0791, 0.1872, 0.1276, 0.1763, 0.1509, 0.3437], + device='cuda:1'), in_proj_covar=tensor([0.0857, 0.0806, 0.0684, 0.0786, 0.0885, 0.0742, 0.0675, 0.0725], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:08:43,085 INFO [train.py:901] (1/4) Epoch 8, batch 6150, loss[loss=0.2327, simple_loss=0.3115, pruned_loss=0.07692, over 8288.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3232, pruned_loss=0.09094, over 1615530.58 frames. ], batch size: 23, lr: 9.42e-03, grad_scale: 8.0 +2023-02-06 06:08:47,080 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.667e+02 3.544e+02 4.037e+02 8.376e+02, threshold=7.087e+02, percent-clipped=5.0 +2023-02-06 06:08:52,369 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8658, 1.2924, 2.9730, 1.2132, 2.0021, 3.1775, 3.3071, 2.5765], + device='cuda:1'), covar=tensor([0.1107, 0.1686, 0.0485, 0.2133, 0.1075, 0.0414, 0.0598, 0.0894], + device='cuda:1'), in_proj_covar=tensor([0.0251, 0.0284, 0.0243, 0.0272, 0.0255, 0.0224, 0.0292, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 06:08:55,074 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:56,501 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3602, 1.6795, 2.7275, 1.1914, 1.9994, 1.7296, 1.4179, 1.7431], + device='cuda:1'), covar=tensor([0.1611, 0.1936, 0.0683, 0.3421, 0.1429, 0.2502, 0.1681, 0.1953], + device='cuda:1'), in_proj_covar=tensor([0.0478, 0.0491, 0.0535, 0.0564, 0.0603, 0.0539, 0.0458, 0.0602], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:08:57,036 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:17,709 INFO [train.py:901] (1/4) Epoch 8, batch 6200, loss[loss=0.2825, simple_loss=0.3541, pruned_loss=0.1055, over 8622.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3228, pruned_loss=0.09018, over 1617558.08 frames. ], batch size: 34, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:23,750 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:49,075 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-06 06:09:52,744 INFO [train.py:901] (1/4) Epoch 8, batch 6250, loss[loss=0.2416, simple_loss=0.3048, pruned_loss=0.08922, over 7660.00 frames. ], tot_loss[loss=0.2521, simple_loss=0.3231, pruned_loss=0.09054, over 1618942.53 frames. ], batch size: 19, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:56,741 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.905e+02 2.717e+02 3.222e+02 4.596e+02 9.217e+02, threshold=6.445e+02, percent-clipped=3.0 +2023-02-06 06:09:57,875 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 06:10:08,421 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:16,909 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:24,986 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:26,044 INFO [train.py:901] (1/4) Epoch 8, batch 6300, loss[loss=0.271, simple_loss=0.3488, pruned_loss=0.0966, over 8352.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3237, pruned_loss=0.09131, over 1617271.20 frames. ], batch size: 24, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:10:44,086 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:44,273 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1908, 1.7144, 2.5917, 2.0857, 2.3271, 1.9854, 1.5844, 0.9380], + device='cuda:1'), covar=tensor([0.2942, 0.3209, 0.0817, 0.1791, 0.1423, 0.1769, 0.1508, 0.3215], + device='cuda:1'), in_proj_covar=tensor([0.0862, 0.0815, 0.0688, 0.0797, 0.0898, 0.0749, 0.0678, 0.0734], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:10:58,980 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0114, 2.5045, 3.0302, 1.0750, 3.1989, 1.6390, 1.3268, 1.5993], + device='cuda:1'), covar=tensor([0.0487, 0.0240, 0.0174, 0.0434, 0.0202, 0.0479, 0.0537, 0.0338], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0286, 0.0234, 0.0341, 0.0274, 0.0434, 0.0335, 0.0315], + device='cuda:1'), out_proj_covar=tensor([1.0864e-04, 8.4565e-05, 6.8971e-05, 1.0117e-04, 8.2937e-05, 1.4138e-04, + 1.0202e-04, 9.4461e-05], device='cuda:1') +2023-02-06 06:11:01,296 INFO [train.py:901] (1/4) Epoch 8, batch 6350, loss[loss=0.2524, simple_loss=0.3113, pruned_loss=0.09678, over 7931.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3232, pruned_loss=0.09082, over 1618638.80 frames. ], batch size: 20, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:05,347 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.824e+02 3.504e+02 4.161e+02 7.437e+02, threshold=7.007e+02, percent-clipped=2.0 +2023-02-06 06:11:12,103 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:35,243 INFO [train.py:901] (1/4) Epoch 8, batch 6400, loss[loss=0.2445, simple_loss=0.3327, pruned_loss=0.0782, over 8252.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3225, pruned_loss=0.09022, over 1612307.99 frames. ], batch size: 24, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:52,121 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:03,492 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:07,444 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:09,582 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:10,063 INFO [train.py:901] (1/4) Epoch 8, batch 6450, loss[loss=0.1794, simple_loss=0.2539, pruned_loss=0.0525, over 7789.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3227, pruned_loss=0.0903, over 1610774.44 frames. ], batch size: 20, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:12:14,129 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 2.983e+02 3.820e+02 5.218e+02 9.633e+02, threshold=7.640e+02, percent-clipped=4.0 +2023-02-06 06:12:26,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9192, 1.7145, 1.5761, 1.3294, 1.3531, 1.4843, 2.2838, 2.0072], + device='cuda:1'), covar=tensor([0.0512, 0.1242, 0.1808, 0.1437, 0.0615, 0.1489, 0.0628, 0.0546], + device='cuda:1'), in_proj_covar=tensor([0.0109, 0.0160, 0.0198, 0.0166, 0.0112, 0.0168, 0.0122, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:12:31,369 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:45,222 INFO [train.py:901] (1/4) Epoch 8, batch 6500, loss[loss=0.2486, simple_loss=0.3233, pruned_loss=0.08695, over 7548.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3235, pruned_loss=0.09079, over 1610818.08 frames. ], batch size: 18, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:14,221 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:20,113 INFO [train.py:901] (1/4) Epoch 8, batch 6550, loss[loss=0.262, simple_loss=0.3358, pruned_loss=0.0941, over 8588.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3242, pruned_loss=0.09121, over 1613779.12 frames. ], batch size: 31, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:21,925 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 06:13:22,301 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:24,225 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.645e+02 3.116e+02 3.905e+02 9.747e+02, threshold=6.232e+02, percent-clipped=3.0 +2023-02-06 06:13:27,764 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:31,975 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:49,400 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 06:13:54,676 INFO [train.py:901] (1/4) Epoch 8, batch 6600, loss[loss=0.3403, simple_loss=0.3818, pruned_loss=0.1494, over 6426.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3246, pruned_loss=0.09151, over 1614089.46 frames. ], batch size: 71, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:07,399 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 06:14:29,751 INFO [train.py:901] (1/4) Epoch 8, batch 6650, loss[loss=0.2854, simple_loss=0.3353, pruned_loss=0.1178, over 8327.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3239, pruned_loss=0.09144, over 1614403.67 frames. ], batch size: 26, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:33,634 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 2.943e+02 3.528e+02 4.449e+02 1.178e+03, threshold=7.055e+02, percent-clipped=8.0 +2023-02-06 06:14:34,509 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:14:42,440 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:01,181 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:03,732 INFO [train.py:901] (1/4) Epoch 8, batch 6700, loss[loss=0.2604, simple_loss=0.3414, pruned_loss=0.08972, over 8193.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3243, pruned_loss=0.09108, over 1620183.84 frames. ], batch size: 23, lr: 9.38e-03, grad_scale: 16.0 +2023-02-06 06:15:19,273 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:29,353 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:38,600 INFO [train.py:901] (1/4) Epoch 8, batch 6750, loss[loss=0.2671, simple_loss=0.3375, pruned_loss=0.09836, over 8108.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3243, pruned_loss=0.09169, over 1612772.67 frames. ], batch size: 23, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:15:43,297 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.603e+02 3.068e+02 3.707e+02 1.416e+03, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 06:15:46,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:13,027 INFO [train.py:901] (1/4) Epoch 8, batch 6800, loss[loss=0.2589, simple_loss=0.3367, pruned_loss=0.09054, over 8335.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3252, pruned_loss=0.09229, over 1615148.20 frames. ], batch size: 25, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:16:21,016 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 06:16:24,480 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63399.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:24,626 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.60 vs. limit=5.0 +2023-02-06 06:16:42,719 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:47,930 INFO [train.py:901] (1/4) Epoch 8, batch 6850, loss[loss=0.2014, simple_loss=0.2774, pruned_loss=0.06267, over 8030.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3244, pruned_loss=0.09208, over 1612490.93 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:16:52,294 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:52,779 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.669e+02 3.418e+02 4.059e+02 7.847e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 06:16:53,660 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:11,112 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 06:17:22,262 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 06:17:22,530 INFO [train.py:901] (1/4) Epoch 8, batch 6900, loss[loss=0.2842, simple_loss=0.3556, pruned_loss=0.1064, over 7936.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3243, pruned_loss=0.09226, over 1608241.43 frames. ], batch size: 20, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:17:39,556 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,287 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,755 INFO [train.py:901] (1/4) Epoch 8, batch 6950, loss[loss=0.2161, simple_loss=0.2906, pruned_loss=0.07077, over 7639.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3244, pruned_loss=0.09182, over 1612993.22 frames. ], batch size: 19, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:18:03,569 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.671e+02 3.369e+02 4.495e+02 9.890e+02, threshold=6.738e+02, percent-clipped=4.0 +2023-02-06 06:18:18,620 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 06:18:32,018 INFO [train.py:901] (1/4) Epoch 8, batch 7000, loss[loss=0.2403, simple_loss=0.3227, pruned_loss=0.07894, over 8575.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3238, pruned_loss=0.09135, over 1612838.14 frames. ], batch size: 39, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:18:32,816 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:36,333 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:48,879 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:19:08,082 INFO [train.py:901] (1/4) Epoch 8, batch 7050, loss[loss=0.2422, simple_loss=0.3263, pruned_loss=0.0791, over 8189.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3224, pruned_loss=0.09033, over 1607732.61 frames. ], batch size: 23, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:19:12,569 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.720e+02 3.242e+02 3.930e+02 7.648e+02, threshold=6.484e+02, percent-clipped=3.0 +2023-02-06 06:19:42,435 INFO [train.py:901] (1/4) Epoch 8, batch 7100, loss[loss=0.2825, simple_loss=0.3419, pruned_loss=0.1115, over 8457.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3227, pruned_loss=0.09066, over 1610173.19 frames. ], batch size: 25, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:19:53,538 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:17,190 INFO [train.py:901] (1/4) Epoch 8, batch 7150, loss[loss=0.2596, simple_loss=0.3138, pruned_loss=0.1027, over 8054.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3222, pruned_loss=0.09063, over 1608766.20 frames. ], batch size: 20, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:21,748 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.778e+02 3.549e+02 4.516e+02 1.097e+03, threshold=7.098e+02, percent-clipped=7.0 +2023-02-06 06:20:36,024 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 06:20:51,752 INFO [train.py:901] (1/4) Epoch 8, batch 7200, loss[loss=0.2383, simple_loss=0.316, pruned_loss=0.0803, over 8136.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3217, pruned_loss=0.09046, over 1608717.95 frames. ], batch size: 22, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:51,835 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:53,162 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:59,479 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2440, 1.4522, 1.5619, 1.3775, 1.0243, 1.6239, 1.5702, 1.6551], + device='cuda:1'), covar=tensor([0.0508, 0.1285, 0.1825, 0.1414, 0.0614, 0.1382, 0.0735, 0.0595], + device='cuda:1'), in_proj_covar=tensor([0.0107, 0.0160, 0.0198, 0.0163, 0.0111, 0.0167, 0.0122, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:21:23,980 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:26,386 INFO [train.py:901] (1/4) Epoch 8, batch 7250, loss[loss=0.2862, simple_loss=0.3514, pruned_loss=0.1105, over 8100.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3222, pruned_loss=0.0909, over 1610492.05 frames. ], batch size: 23, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:21:30,981 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.694e+02 3.202e+02 4.148e+02 8.009e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 06:21:47,991 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:00,780 INFO [train.py:901] (1/4) Epoch 8, batch 7300, loss[loss=0.1933, simple_loss=0.2682, pruned_loss=0.05918, over 7200.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3211, pruned_loss=0.09025, over 1604262.94 frames. ], batch size: 16, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:12,956 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:14,359 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:36,720 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:37,312 INFO [train.py:901] (1/4) Epoch 8, batch 7350, loss[loss=0.275, simple_loss=0.3515, pruned_loss=0.09921, over 8328.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3216, pruned_loss=0.09012, over 1606688.30 frames. ], batch size: 25, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:38,891 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:42,243 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.551e+02 3.183e+02 3.767e+02 5.416e+02, threshold=6.365e+02, percent-clipped=0.0 +2023-02-06 06:22:48,939 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63948.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:52,632 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 06:22:53,083 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5927, 4.6321, 4.1556, 1.7531, 4.0489, 3.9332, 4.2218, 3.7782], + device='cuda:1'), covar=tensor([0.0711, 0.0511, 0.1043, 0.4923, 0.0881, 0.0805, 0.1264, 0.0709], + device='cuda:1'), in_proj_covar=tensor([0.0437, 0.0349, 0.0366, 0.0461, 0.0359, 0.0339, 0.0353, 0.0299], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:22:53,943 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:05,949 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 06:23:10,851 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:12,560 INFO [train.py:901] (1/4) Epoch 8, batch 7400, loss[loss=0.2687, simple_loss=0.3382, pruned_loss=0.09954, over 8023.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3225, pruned_loss=0.09073, over 1607594.45 frames. ], batch size: 22, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:24,825 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 06:23:48,438 INFO [train.py:901] (1/4) Epoch 8, batch 7450, loss[loss=0.2798, simple_loss=0.3438, pruned_loss=0.1079, over 8554.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3225, pruned_loss=0.09122, over 1608191.36 frames. ], batch size: 31, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:50,937 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 06:23:53,163 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.827e+02 3.358e+02 3.935e+02 9.777e+02, threshold=6.715e+02, percent-clipped=5.0 +2023-02-06 06:23:54,697 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:58,109 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:05,483 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 06:24:10,172 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:23,174 INFO [train.py:901] (1/4) Epoch 8, batch 7500, loss[loss=0.2514, simple_loss=0.3206, pruned_loss=0.09112, over 8192.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3224, pruned_loss=0.09057, over 1611609.57 frames. ], batch size: 23, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:24:30,452 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2670, 1.7072, 1.5838, 1.5927, 1.2901, 1.6423, 2.0970, 2.0426], + device='cuda:1'), covar=tensor([0.0456, 0.1241, 0.1864, 0.1357, 0.0659, 0.1588, 0.0694, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0107, 0.0160, 0.0199, 0.0164, 0.0111, 0.0168, 0.0122, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:24:55,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 06:24:57,464 INFO [train.py:901] (1/4) Epoch 8, batch 7550, loss[loss=0.2553, simple_loss=0.3372, pruned_loss=0.08671, over 8294.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3243, pruned_loss=0.09171, over 1614661.36 frames. ], batch size: 23, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:02,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.846e+02 3.017e+02 3.905e+02 4.969e+02 7.546e+02, threshold=7.810e+02, percent-clipped=1.0 +2023-02-06 06:25:11,681 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:13,076 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:24,182 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:28,963 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:30,994 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:32,164 INFO [train.py:901] (1/4) Epoch 8, batch 7600, loss[loss=0.2596, simple_loss=0.3303, pruned_loss=0.09446, over 8323.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3234, pruned_loss=0.091, over 1617138.09 frames. ], batch size: 25, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:49,061 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:07,009 INFO [train.py:901] (1/4) Epoch 8, batch 7650, loss[loss=0.2506, simple_loss=0.3259, pruned_loss=0.08772, over 8351.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3236, pruned_loss=0.09109, over 1613543.49 frames. ], batch size: 24, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:11,826 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.778e+02 3.467e+02 5.154e+02 1.113e+03, threshold=6.933e+02, percent-clipped=3.0 +2023-02-06 06:26:19,378 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:38,169 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:40,891 INFO [train.py:901] (1/4) Epoch 8, batch 7700, loss[loss=0.2555, simple_loss=0.3261, pruned_loss=0.09249, over 8469.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3221, pruned_loss=0.09058, over 1612038.07 frames. ], batch size: 27, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:45,250 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:56,495 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:08,131 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,004 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,460 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 06:27:13,351 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:16,533 INFO [train.py:901] (1/4) Epoch 8, batch 7750, loss[loss=0.2303, simple_loss=0.3048, pruned_loss=0.07787, over 7963.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3215, pruned_loss=0.09058, over 1607725.16 frames. ], batch size: 21, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:27:21,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.516e+02 3.070e+02 3.996e+02 6.859e+02, threshold=6.139e+02, percent-clipped=0.0 +2023-02-06 06:27:25,266 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:38,600 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:43,238 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8964, 1.2493, 1.5300, 1.2651, 0.8917, 1.2995, 1.5107, 1.4708], + device='cuda:1'), covar=tensor([0.0505, 0.1284, 0.1633, 0.1385, 0.0600, 0.1501, 0.0656, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0107, 0.0160, 0.0198, 0.0165, 0.0111, 0.0168, 0.0121, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:27:51,130 INFO [train.py:901] (1/4) Epoch 8, batch 7800, loss[loss=0.2572, simple_loss=0.3313, pruned_loss=0.09152, over 8035.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3224, pruned_loss=0.09085, over 1605465.88 frames. ], batch size: 22, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:27:53,240 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:58,818 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:28:25,447 INFO [train.py:901] (1/4) Epoch 8, batch 7850, loss[loss=0.2975, simple_loss=0.357, pruned_loss=0.119, over 8256.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3235, pruned_loss=0.09081, over 1613928.95 frames. ], batch size: 24, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:27,099 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.84 vs. limit=2.0 +2023-02-06 06:28:30,098 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.075e+02 2.873e+02 3.519e+02 4.505e+02 1.254e+03, threshold=7.037e+02, percent-clipped=6.0 +2023-02-06 06:28:31,631 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7644, 2.0047, 1.9779, 1.5246, 2.0821, 1.5626, 1.1637, 1.7711], + device='cuda:1'), covar=tensor([0.0260, 0.0147, 0.0108, 0.0228, 0.0154, 0.0351, 0.0366, 0.0137], + device='cuda:1'), in_proj_covar=tensor([0.0364, 0.0286, 0.0237, 0.0350, 0.0276, 0.0444, 0.0338, 0.0321], + device='cuda:1'), out_proj_covar=tensor([1.0975e-04, 8.3635e-05, 6.9924e-05, 1.0379e-04, 8.2563e-05, 1.4448e-04, + 1.0234e-04, 9.5831e-05], device='cuda:1') +2023-02-06 06:28:58,103 INFO [train.py:901] (1/4) Epoch 8, batch 7900, loss[loss=0.3247, simple_loss=0.3688, pruned_loss=0.1403, over 6536.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3237, pruned_loss=0.09158, over 1610567.22 frames. ], batch size: 71, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:58,917 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:10,202 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:32,352 INFO [train.py:901] (1/4) Epoch 8, batch 7950, loss[loss=0.2477, simple_loss=0.3284, pruned_loss=0.08355, over 8588.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3258, pruned_loss=0.09237, over 1616492.12 frames. ], batch size: 39, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:29:37,068 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.689e+02 3.383e+02 4.341e+02 8.251e+02, threshold=6.766e+02, percent-clipped=4.0 +2023-02-06 06:29:40,082 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:54,374 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-06 06:29:56,713 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:03,530 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:06,048 INFO [train.py:901] (1/4) Epoch 8, batch 8000, loss[loss=0.2283, simple_loss=0.3121, pruned_loss=0.07223, over 8468.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3254, pruned_loss=0.09219, over 1618944.06 frames. ], batch size: 29, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:14,266 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:17,199 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6029, 2.0933, 3.5236, 1.2493, 2.3903, 1.9739, 1.6985, 2.1014], + device='cuda:1'), covar=tensor([0.1499, 0.1762, 0.0535, 0.3378, 0.1339, 0.2359, 0.1516, 0.2041], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0478, 0.0516, 0.0553, 0.0591, 0.0527, 0.0451, 0.0588], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:30:20,524 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:23,051 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:26,235 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9921, 1.7648, 3.4475, 1.4264, 2.4833, 3.8889, 3.9668, 3.3752], + device='cuda:1'), covar=tensor([0.1037, 0.1415, 0.0371, 0.1916, 0.0818, 0.0226, 0.0362, 0.0558], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0284, 0.0242, 0.0274, 0.0251, 0.0225, 0.0295, 0.0283], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 06:30:39,928 INFO [train.py:901] (1/4) Epoch 8, batch 8050, loss[loss=0.3024, simple_loss=0.357, pruned_loss=0.1239, over 6840.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3241, pruned_loss=0.09203, over 1606469.61 frames. ], batch size: 71, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:41,681 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 06:30:44,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.434e+02 2.955e+02 3.616e+02 6.730e+02, threshold=5.909e+02, percent-clipped=0.0 +2023-02-06 06:30:51,580 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:13,265 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 06:31:17,613 INFO [train.py:901] (1/4) Epoch 9, batch 0, loss[loss=0.255, simple_loss=0.3368, pruned_loss=0.08663, over 8462.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3368, pruned_loss=0.08663, over 8462.00 frames. ], batch size: 25, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:31:17,614 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 06:31:28,851 INFO [train.py:935] (1/4) Epoch 9, validation: loss=0.1983, simple_loss=0.2974, pruned_loss=0.04961, over 944034.00 frames. +2023-02-06 06:31:28,852 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 06:31:29,659 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,196 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,994 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 06:31:43,423 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 06:31:56,868 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:58,420 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:02,438 INFO [train.py:901] (1/4) Epoch 9, batch 50, loss[loss=0.2125, simple_loss=0.2986, pruned_loss=0.06322, over 8472.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3241, pruned_loss=0.0901, over 368025.82 frames. ], batch size: 25, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:32:06,627 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:07,944 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4535, 1.9880, 3.3585, 1.1441, 2.3137, 1.7517, 1.7421, 1.9378], + device='cuda:1'), covar=tensor([0.1966, 0.2100, 0.0754, 0.4194, 0.1732, 0.3105, 0.1823, 0.2545], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0482, 0.0522, 0.0562, 0.0595, 0.0532, 0.0457, 0.0595], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:32:16,341 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 06:32:18,980 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.818e+02 3.347e+02 4.122e+02 1.189e+03, threshold=6.695e+02, percent-clipped=9.0 +2023-02-06 06:32:30,208 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4365, 1.8580, 1.8557, 1.0305, 1.9620, 1.3329, 0.5078, 1.6189], + device='cuda:1'), covar=tensor([0.0287, 0.0161, 0.0122, 0.0289, 0.0181, 0.0459, 0.0423, 0.0143], + device='cuda:1'), in_proj_covar=tensor([0.0372, 0.0288, 0.0239, 0.0353, 0.0281, 0.0447, 0.0340, 0.0324], + device='cuda:1'), out_proj_covar=tensor([1.1223e-04, 8.4071e-05, 7.0473e-05, 1.0408e-04, 8.4099e-05, 1.4525e-04, + 1.0258e-04, 9.6760e-05], device='cuda:1') +2023-02-06 06:32:31,576 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:36,073 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:37,300 INFO [train.py:901] (1/4) Epoch 9, batch 100, loss[loss=0.241, simple_loss=0.3237, pruned_loss=0.07911, over 8493.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3227, pruned_loss=0.09003, over 641960.15 frames. ], batch size: 28, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:32:41,535 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:42,071 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 06:32:49,741 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:11,715 INFO [train.py:901] (1/4) Epoch 9, batch 150, loss[loss=0.3035, simple_loss=0.3481, pruned_loss=0.1295, over 7342.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3237, pruned_loss=0.09126, over 859657.98 frames. ], batch size: 71, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:33:16,753 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:20,058 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:27,775 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.896e+02 2.577e+02 3.213e+02 3.848e+02 9.281e+02, threshold=6.425e+02, percent-clipped=3.0 +2023-02-06 06:33:45,631 INFO [train.py:901] (1/4) Epoch 9, batch 200, loss[loss=0.2878, simple_loss=0.3484, pruned_loss=0.1136, over 8367.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3247, pruned_loss=0.09139, over 1030523.14 frames. ], batch size: 24, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:34:21,132 INFO [train.py:901] (1/4) Epoch 9, batch 250, loss[loss=0.231, simple_loss=0.2963, pruned_loss=0.08283, over 7547.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3245, pruned_loss=0.0913, over 1154124.43 frames. ], batch size: 18, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:34,285 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 06:34:36,819 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 2.841e+02 3.295e+02 4.179e+02 1.029e+03, threshold=6.590e+02, percent-clipped=5.0 +2023-02-06 06:34:39,012 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:42,833 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 06:34:44,900 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:54,260 INFO [train.py:901] (1/4) Epoch 9, batch 300, loss[loss=0.2023, simple_loss=0.2954, pruned_loss=0.05456, over 8461.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3245, pruned_loss=0.0906, over 1259448.03 frames. ], batch size: 25, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:54,455 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:11,900 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:15,052 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:26,440 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:30,327 INFO [train.py:901] (1/4) Epoch 9, batch 350, loss[loss=0.2752, simple_loss=0.3447, pruned_loss=0.1029, over 8633.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3243, pruned_loss=0.09057, over 1337216.88 frames. ], batch size: 49, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:35:31,388 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 06:35:46,490 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.892e+02 2.570e+02 3.183e+02 3.796e+02 1.000e+03, threshold=6.367e+02, percent-clipped=4.0 +2023-02-06 06:36:03,872 INFO [train.py:901] (1/4) Epoch 9, batch 400, loss[loss=0.1928, simple_loss=0.276, pruned_loss=0.05482, over 8027.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3219, pruned_loss=0.08881, over 1399334.74 frames. ], batch size: 22, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:03,940 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65065.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:04,747 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:09,398 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:12,871 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:30,518 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:33,062 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:37,774 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:38,389 INFO [train.py:901] (1/4) Epoch 9, batch 450, loss[loss=0.2452, simple_loss=0.3122, pruned_loss=0.08906, over 7644.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3219, pruned_loss=0.08879, over 1449241.48 frames. ], batch size: 19, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:46,200 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:51,803 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4382, 1.8564, 1.8529, 0.9880, 2.0268, 1.3885, 0.4042, 1.6585], + device='cuda:1'), covar=tensor([0.0285, 0.0161, 0.0140, 0.0271, 0.0194, 0.0518, 0.0440, 0.0132], + device='cuda:1'), in_proj_covar=tensor([0.0366, 0.0291, 0.0240, 0.0349, 0.0281, 0.0445, 0.0338, 0.0321], + device='cuda:1'), out_proj_covar=tensor([1.1022e-04, 8.5274e-05, 7.0688e-05, 1.0267e-04, 8.4168e-05, 1.4412e-04, + 1.0199e-04, 9.5694e-05], device='cuda:1') +2023-02-06 06:36:53,165 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0736, 1.3058, 4.2155, 1.4515, 3.7025, 3.4086, 3.8133, 3.6152], + device='cuda:1'), covar=tensor([0.0566, 0.4481, 0.0490, 0.3371, 0.1125, 0.0859, 0.0561, 0.0724], + device='cuda:1'), in_proj_covar=tensor([0.0428, 0.0548, 0.0538, 0.0496, 0.0562, 0.0476, 0.0479, 0.0535], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 06:36:56,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.705e+02 3.323e+02 3.920e+02 9.407e+02, threshold=6.647e+02, percent-clipped=6.0 +2023-02-06 06:37:08,207 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7694, 1.3783, 3.9024, 1.3458, 3.4562, 3.2078, 3.4985, 3.3457], + device='cuda:1'), covar=tensor([0.0539, 0.3780, 0.0593, 0.3186, 0.1133, 0.0856, 0.0592, 0.0692], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0547, 0.0535, 0.0494, 0.0560, 0.0476, 0.0477, 0.0532], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 06:37:13,479 INFO [train.py:901] (1/4) Epoch 9, batch 500, loss[loss=0.2286, simple_loss=0.3134, pruned_loss=0.07188, over 8191.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3226, pruned_loss=0.08864, over 1486926.40 frames. ], batch size: 23, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:37:23,274 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:35,046 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:45,904 INFO [train.py:901] (1/4) Epoch 9, batch 550, loss[loss=0.2718, simple_loss=0.3441, pruned_loss=0.09975, over 8452.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3232, pruned_loss=0.0903, over 1510981.72 frames. ], batch size: 27, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:37:51,955 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:52,663 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:52,681 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0942, 1.2931, 1.2474, 0.7548, 1.3338, 1.0466, 0.4041, 1.1644], + device='cuda:1'), covar=tensor([0.0180, 0.0132, 0.0107, 0.0195, 0.0118, 0.0307, 0.0299, 0.0108], + device='cuda:1'), in_proj_covar=tensor([0.0363, 0.0290, 0.0239, 0.0348, 0.0279, 0.0441, 0.0336, 0.0320], + device='cuda:1'), out_proj_covar=tensor([1.0949e-04, 8.5071e-05, 7.0429e-05, 1.0219e-04, 8.3463e-05, 1.4277e-04, + 1.0133e-04, 9.5413e-05], device='cuda:1') +2023-02-06 06:37:56,727 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:03,152 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.829e+02 3.496e+02 4.355e+02 8.306e+02, threshold=6.991e+02, percent-clipped=2.0 +2023-02-06 06:38:21,250 INFO [train.py:901] (1/4) Epoch 9, batch 600, loss[loss=0.2372, simple_loss=0.3062, pruned_loss=0.08415, over 7809.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3221, pruned_loss=0.0896, over 1533085.63 frames. ], batch size: 20, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:38:38,352 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 06:38:43,860 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1166, 2.7219, 3.1092, 1.4244, 3.2682, 1.9589, 1.5391, 2.0359], + device='cuda:1'), covar=tensor([0.0486, 0.0180, 0.0192, 0.0411, 0.0227, 0.0483, 0.0546, 0.0275], + device='cuda:1'), in_proj_covar=tensor([0.0360, 0.0286, 0.0237, 0.0345, 0.0277, 0.0440, 0.0333, 0.0315], + device='cuda:1'), out_proj_covar=tensor([1.0834e-04, 8.3912e-05, 6.9865e-05, 1.0129e-04, 8.3041e-05, 1.4269e-04, + 1.0025e-04, 9.3990e-05], device='cuda:1') +2023-02-06 06:38:46,500 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:54,293 INFO [train.py:901] (1/4) Epoch 9, batch 650, loss[loss=0.2485, simple_loss=0.3279, pruned_loss=0.08452, over 8471.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3233, pruned_loss=0.0907, over 1545863.21 frames. ], batch size: 25, lr: 8.75e-03, grad_scale: 16.0 +2023-02-06 06:38:58,620 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 06:38:59,108 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,303 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,871 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.656e+02 3.252e+02 4.080e+02 6.220e+02, threshold=6.503e+02, percent-clipped=0.0 +2023-02-06 06:39:17,065 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:29,074 INFO [train.py:901] (1/4) Epoch 9, batch 700, loss[loss=0.2224, simple_loss=0.297, pruned_loss=0.07396, over 7665.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3212, pruned_loss=0.08908, over 1560465.75 frames. ], batch size: 19, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:39:41,003 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65381.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:57,977 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:03,854 INFO [train.py:901] (1/4) Epoch 9, batch 750, loss[loss=0.239, simple_loss=0.3225, pruned_loss=0.07779, over 8452.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3226, pruned_loss=0.09004, over 1569089.02 frames. ], batch size: 27, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:05,336 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:10,998 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 06:40:18,170 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:19,959 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.803e+02 3.527e+02 4.474e+02 1.505e+03, threshold=7.053e+02, percent-clipped=7.0 +2023-02-06 06:40:21,305 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 06:40:25,581 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.50 vs. limit=5.0 +2023-02-06 06:40:30,033 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 06:40:30,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:36,146 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65461.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:38,585 INFO [train.py:901] (1/4) Epoch 9, batch 800, loss[loss=0.1912, simple_loss=0.2703, pruned_loss=0.05602, over 7804.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.323, pruned_loss=0.08966, over 1583854.85 frames. ], batch size: 19, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:47,458 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:53,603 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:58,219 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1121, 2.3097, 1.8711, 2.9768, 1.3960, 1.5023, 2.0000, 2.4720], + device='cuda:1'), covar=tensor([0.0835, 0.0971, 0.1106, 0.0397, 0.1283, 0.1808, 0.1148, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0247, 0.0226, 0.0270, 0.0219, 0.0226, 0.0263, 0.0266, 0.0228], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 06:41:05,668 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:09,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3763, 1.9361, 3.1437, 2.3365, 2.6251, 2.1399, 1.6440, 1.3375], + device='cuda:1'), covar=tensor([0.3234, 0.3345, 0.0842, 0.2220, 0.1769, 0.1690, 0.1480, 0.3616], + device='cuda:1'), in_proj_covar=tensor([0.0855, 0.0810, 0.0700, 0.0803, 0.0895, 0.0751, 0.0685, 0.0729], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:41:10,492 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:13,591 INFO [train.py:901] (1/4) Epoch 9, batch 850, loss[loss=0.3084, simple_loss=0.3626, pruned_loss=0.1272, over 8466.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3222, pruned_loss=0.08967, over 1585371.43 frames. ], batch size: 25, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:17,141 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1201, 1.2534, 1.1407, 0.6278, 1.2484, 0.9734, 0.1536, 1.2131], + device='cuda:1'), covar=tensor([0.0168, 0.0147, 0.0132, 0.0218, 0.0169, 0.0415, 0.0341, 0.0139], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0290, 0.0241, 0.0350, 0.0282, 0.0447, 0.0336, 0.0319], + device='cuda:1'), out_proj_covar=tensor([1.0988e-04, 8.4788e-05, 7.1140e-05, 1.0286e-04, 8.4558e-05, 1.4460e-04, + 1.0110e-04, 9.5098e-05], device='cuda:1') +2023-02-06 06:41:25,161 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:29,556 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.736e+02 3.271e+02 4.209e+02 1.110e+03, threshold=6.542e+02, percent-clipped=5.0 +2023-02-06 06:41:37,086 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65550.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:41:47,588 INFO [train.py:901] (1/4) Epoch 9, batch 900, loss[loss=0.2416, simple_loss=0.3238, pruned_loss=0.07971, over 8197.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3232, pruned_loss=0.08968, over 1593737.69 frames. ], batch size: 23, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:12,150 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7406, 2.4377, 1.8878, 2.0493, 2.0981, 1.6471, 1.9430, 2.0117], + device='cuda:1'), covar=tensor([0.0898, 0.0291, 0.0661, 0.0427, 0.0452, 0.0951, 0.0597, 0.0698], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0237, 0.0312, 0.0300, 0.0304, 0.0320, 0.0341, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 06:42:23,173 INFO [train.py:901] (1/4) Epoch 9, batch 950, loss[loss=0.2351, simple_loss=0.3044, pruned_loss=0.08291, over 8372.00 frames. ], tot_loss[loss=0.251, simple_loss=0.323, pruned_loss=0.08945, over 1598351.92 frames. ], batch size: 24, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:39,187 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.498e+02 3.047e+02 4.041e+02 6.463e+02, threshold=6.094e+02, percent-clipped=0.0 +2023-02-06 06:42:44,700 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:42:50,355 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 06:42:56,309 INFO [train.py:901] (1/4) Epoch 9, batch 1000, loss[loss=0.2553, simple_loss=0.3365, pruned_loss=0.08709, over 8277.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3225, pruned_loss=0.08947, over 1601086.81 frames. ], batch size: 23, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:23,104 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 06:43:27,427 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:31,856 INFO [train.py:901] (1/4) Epoch 9, batch 1050, loss[loss=0.2271, simple_loss=0.2887, pruned_loss=0.08269, over 7685.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3213, pruned_loss=0.08862, over 1600768.95 frames. ], batch size: 18, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:35,709 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 06:43:44,967 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:47,993 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 2.877e+02 3.398e+02 4.338e+02 8.070e+02, threshold=6.796e+02, percent-clipped=6.0 +2023-02-06 06:44:00,654 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8031, 3.6989, 3.3995, 1.5347, 3.3679, 3.3798, 3.3847, 3.0744], + device='cuda:1'), covar=tensor([0.1002, 0.0826, 0.1143, 0.5133, 0.0964, 0.1208, 0.1507, 0.1129], + device='cuda:1'), in_proj_covar=tensor([0.0444, 0.0354, 0.0368, 0.0465, 0.0363, 0.0345, 0.0361, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 06:44:03,475 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:05,257 INFO [train.py:901] (1/4) Epoch 9, batch 1100, loss[loss=0.2617, simple_loss=0.3219, pruned_loss=0.1007, over 7973.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3219, pruned_loss=0.08902, over 1605497.21 frames. ], batch size: 21, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:44:20,619 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:38,698 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:39,897 INFO [train.py:901] (1/4) Epoch 9, batch 1150, loss[loss=0.258, simple_loss=0.32, pruned_loss=0.09803, over 8236.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.322, pruned_loss=0.08973, over 1607587.03 frames. ], batch size: 22, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:44:44,633 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 06:44:56,767 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.552e+02 3.121e+02 3.966e+02 8.304e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-06 06:45:09,004 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65856.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:14,590 INFO [train.py:901] (1/4) Epoch 9, batch 1200, loss[loss=0.2681, simple_loss=0.3354, pruned_loss=0.1003, over 8446.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.323, pruned_loss=0.08995, over 1613402.30 frames. ], batch size: 27, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:45:33,719 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65894.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:47,642 INFO [train.py:901] (1/4) Epoch 9, batch 1250, loss[loss=0.2911, simple_loss=0.3529, pruned_loss=0.1147, over 8468.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3228, pruned_loss=0.08992, over 1614334.31 frames. ], batch size: 48, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:46:05,032 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.844e+02 3.477e+02 4.312e+02 8.167e+02, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 06:46:23,818 INFO [train.py:901] (1/4) Epoch 9, batch 1300, loss[loss=0.253, simple_loss=0.3229, pruned_loss=0.09161, over 8638.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3229, pruned_loss=0.09014, over 1616559.21 frames. ], batch size: 34, lr: 8.70e-03, grad_scale: 16.0 +2023-02-06 06:46:25,399 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7025, 1.3386, 1.4989, 1.1819, 0.9308, 1.2871, 1.3750, 1.5745], + device='cuda:1'), covar=tensor([0.0541, 0.1222, 0.1608, 0.1375, 0.0567, 0.1476, 0.0726, 0.0600], + device='cuda:1'), in_proj_covar=tensor([0.0107, 0.0159, 0.0198, 0.0164, 0.0111, 0.0168, 0.0122, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:46:26,118 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5709, 1.9527, 2.0602, 1.2777, 2.2131, 1.4591, 0.7204, 1.7406], + device='cuda:1'), covar=tensor([0.0307, 0.0171, 0.0146, 0.0294, 0.0207, 0.0512, 0.0428, 0.0161], + device='cuda:1'), in_proj_covar=tensor([0.0364, 0.0293, 0.0241, 0.0354, 0.0281, 0.0449, 0.0337, 0.0323], + device='cuda:1'), out_proj_covar=tensor([1.0895e-04, 8.5781e-05, 7.0990e-05, 1.0369e-04, 8.4104e-05, 1.4484e-04, + 1.0126e-04, 9.6215e-05], device='cuda:1') +2023-02-06 06:46:43,342 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7104, 2.6284, 2.8453, 2.0349, 1.5468, 2.8425, 0.7715, 1.9064], + device='cuda:1'), covar=tensor([0.2310, 0.1508, 0.0649, 0.2629, 0.5018, 0.0559, 0.4594, 0.2633], + device='cuda:1'), in_proj_covar=tensor([0.0152, 0.0151, 0.0090, 0.0199, 0.0239, 0.0093, 0.0155, 0.0153], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 06:46:55,288 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66009.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:46:59,203 INFO [train.py:901] (1/4) Epoch 9, batch 1350, loss[loss=0.2737, simple_loss=0.3383, pruned_loss=0.1046, over 8591.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3227, pruned_loss=0.09025, over 1613241.64 frames. ], batch size: 31, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:47:01,384 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:17,553 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 2.557e+02 3.336e+02 4.233e+02 1.201e+03, threshold=6.672e+02, percent-clipped=8.0 +2023-02-06 06:47:19,768 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:35,102 INFO [train.py:901] (1/4) Epoch 9, batch 1400, loss[loss=0.2299, simple_loss=0.311, pruned_loss=0.0744, over 8132.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3229, pruned_loss=0.09029, over 1614671.30 frames. ], batch size: 22, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:47:57,581 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8068, 1.7528, 2.4058, 1.3628, 2.0763, 2.6553, 2.7004, 2.1665], + device='cuda:1'), covar=tensor([0.0974, 0.1216, 0.0956, 0.1882, 0.1511, 0.0456, 0.0674, 0.0908], + device='cuda:1'), in_proj_covar=tensor([0.0254, 0.0284, 0.0244, 0.0277, 0.0259, 0.0226, 0.0302, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 06:48:09,442 INFO [train.py:901] (1/4) Epoch 9, batch 1450, loss[loss=0.262, simple_loss=0.3268, pruned_loss=0.09866, over 8612.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3241, pruned_loss=0.09105, over 1615971.78 frames. ], batch size: 34, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:12,160 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 06:48:21,933 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 06:48:26,154 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.633e+02 3.463e+02 4.686e+02 9.003e+02, threshold=6.925e+02, percent-clipped=5.0 +2023-02-06 06:48:42,245 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:48:44,177 INFO [train.py:901] (1/4) Epoch 9, batch 1500, loss[loss=0.2645, simple_loss=0.336, pruned_loss=0.09653, over 8615.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3234, pruned_loss=0.09003, over 1619364.61 frames. ], batch size: 31, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:52,880 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66178.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:08,955 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66200.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:49:18,750 INFO [train.py:901] (1/4) Epoch 9, batch 1550, loss[loss=0.2184, simple_loss=0.2972, pruned_loss=0.06976, over 7968.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3212, pruned_loss=0.08889, over 1616200.58 frames. ], batch size: 21, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:49:35,629 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.559e+02 2.942e+02 3.565e+02 7.942e+02, threshold=5.885e+02, percent-clipped=2.0 +2023-02-06 06:49:53,203 INFO [train.py:901] (1/4) Epoch 9, batch 1600, loss[loss=0.2066, simple_loss=0.2794, pruned_loss=0.06688, over 7710.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3228, pruned_loss=0.09, over 1620022.98 frames. ], batch size: 18, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:49:53,437 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66265.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:02,306 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:50:11,777 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66290.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:29,749 INFO [train.py:901] (1/4) Epoch 9, batch 1650, loss[loss=0.2429, simple_loss=0.3241, pruned_loss=0.08084, over 8201.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3215, pruned_loss=0.08853, over 1618990.20 frames. ], batch size: 23, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:50:29,934 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66315.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:46,542 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.536e+02 3.360e+02 4.258e+02 7.701e+02, threshold=6.719e+02, percent-clipped=5.0 +2023-02-06 06:50:53,612 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 06:51:03,473 INFO [train.py:901] (1/4) Epoch 9, batch 1700, loss[loss=0.2864, simple_loss=0.3577, pruned_loss=0.1075, over 8389.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3233, pruned_loss=0.08975, over 1621337.82 frames. ], batch size: 49, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:51:12,076 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 06:51:18,054 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4776, 2.8535, 1.8194, 2.1340, 2.1548, 1.4225, 2.1128, 2.0081], + device='cuda:1'), covar=tensor([0.1360, 0.0279, 0.0970, 0.0693, 0.0737, 0.1319, 0.0842, 0.0903], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0232, 0.0312, 0.0299, 0.0305, 0.0318, 0.0341, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 06:51:26,496 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 06:51:39,916 INFO [train.py:901] (1/4) Epoch 9, batch 1750, loss[loss=0.2689, simple_loss=0.3551, pruned_loss=0.09141, over 8483.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3235, pruned_loss=0.08933, over 1623558.63 frames. ], batch size: 27, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:51:53,592 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2802, 1.2581, 1.4827, 1.2396, 0.8116, 1.3005, 1.2371, 1.1312], + device='cuda:1'), covar=tensor([0.0554, 0.1260, 0.1779, 0.1394, 0.0587, 0.1513, 0.0669, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0106, 0.0159, 0.0200, 0.0164, 0.0111, 0.0169, 0.0123, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:51:57,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.942e+02 3.542e+02 4.261e+02 7.419e+02, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 06:52:13,916 INFO [train.py:901] (1/4) Epoch 9, batch 1800, loss[loss=0.1988, simple_loss=0.28, pruned_loss=0.05885, over 8094.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3233, pruned_loss=0.08942, over 1621407.38 frames. ], batch size: 21, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:42,187 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:52:48,995 INFO [train.py:901] (1/4) Epoch 9, batch 1850, loss[loss=0.2508, simple_loss=0.3023, pruned_loss=0.09961, over 7724.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3214, pruned_loss=0.08837, over 1619366.92 frames. ], batch size: 18, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:53,698 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66522.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:05,495 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66539.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:05,989 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.848e+02 3.228e+02 4.154e+02 1.120e+03, threshold=6.457e+02, percent-clipped=1.0 +2023-02-06 06:53:22,984 INFO [train.py:901] (1/4) Epoch 9, batch 1900, loss[loss=0.2106, simple_loss=0.2818, pruned_loss=0.06966, over 7932.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3224, pruned_loss=0.0888, over 1624558.92 frames. ], batch size: 20, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:27,127 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66571.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:43,854 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66596.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:47,139 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 06:53:56,561 INFO [train.py:901] (1/4) Epoch 9, batch 1950, loss[loss=0.2651, simple_loss=0.3406, pruned_loss=0.09478, over 8725.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3229, pruned_loss=0.0891, over 1623294.31 frames. ], batch size: 34, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:58,601 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 06:54:00,032 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:00,843 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:12,904 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66637.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:54:14,628 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.852e+02 3.410e+02 4.369e+02 9.021e+02, threshold=6.820e+02, percent-clipped=7.0 +2023-02-06 06:54:20,056 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 06:54:32,099 INFO [train.py:901] (1/4) Epoch 9, batch 2000, loss[loss=0.2104, simple_loss=0.2861, pruned_loss=0.06737, over 7811.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.321, pruned_loss=0.08811, over 1619237.25 frames. ], batch size: 20, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:55:06,976 INFO [train.py:901] (1/4) Epoch 9, batch 2050, loss[loss=0.2058, simple_loss=0.2937, pruned_loss=0.059, over 8478.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.321, pruned_loss=0.08808, over 1619963.37 frames. ], batch size: 25, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:55:08,490 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.51 vs. limit=5.0 +2023-02-06 06:55:11,895 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 06:55:20,375 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:55:23,644 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.024e+02 2.763e+02 3.349e+02 4.333e+02 1.017e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 06:55:31,185 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66749.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:55:34,842 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 06:55:42,441 INFO [train.py:901] (1/4) Epoch 9, batch 2100, loss[loss=0.2716, simple_loss=0.3406, pruned_loss=0.1013, over 8042.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3226, pruned_loss=0.08898, over 1620297.70 frames. ], batch size: 22, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:17,439 INFO [train.py:901] (1/4) Epoch 9, batch 2150, loss[loss=0.2563, simple_loss=0.3088, pruned_loss=0.1019, over 7532.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3216, pruned_loss=0.08883, over 1616012.20 frames. ], batch size: 18, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:25,854 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7284, 1.4644, 1.5377, 1.3038, 1.2700, 1.5297, 2.2044, 2.2984], + device='cuda:1'), covar=tensor([0.0611, 0.1763, 0.2681, 0.1963, 0.0709, 0.2116, 0.0765, 0.0649], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0158, 0.0199, 0.0162, 0.0109, 0.0167, 0.0121, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:56:26,603 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 06:56:34,548 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.810e+02 3.362e+02 4.511e+02 1.000e+03, threshold=6.724e+02, percent-clipped=7.0 +2023-02-06 06:56:40,142 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9866, 2.1691, 1.8830, 2.7589, 1.3081, 1.4320, 1.9033, 2.2260], + device='cuda:1'), covar=tensor([0.0831, 0.0943, 0.1075, 0.0449, 0.1342, 0.1653, 0.1028, 0.0822], + device='cuda:1'), in_proj_covar=tensor([0.0247, 0.0223, 0.0264, 0.0218, 0.0224, 0.0260, 0.0264, 0.0229], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 06:56:49,315 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1879, 2.1836, 1.5713, 2.0144, 1.7467, 1.2728, 1.7520, 1.8660], + device='cuda:1'), covar=tensor([0.1134, 0.0329, 0.1126, 0.0453, 0.0693, 0.1408, 0.0786, 0.0696], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0236, 0.0316, 0.0300, 0.0308, 0.0323, 0.0344, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 06:56:53,078 INFO [train.py:901] (1/4) Epoch 9, batch 2200, loss[loss=0.264, simple_loss=0.3334, pruned_loss=0.09735, over 8196.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3221, pruned_loss=0.08949, over 1615837.16 frames. ], batch size: 23, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:01,068 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:05,627 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66883.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:05,935 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 06:57:12,258 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66893.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:18,133 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:18,322 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 06:57:24,871 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-06 06:57:27,004 INFO [train.py:901] (1/4) Epoch 9, batch 2250, loss[loss=0.2186, simple_loss=0.2839, pruned_loss=0.07665, over 7545.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3227, pruned_loss=0.0904, over 1614762.70 frames. ], batch size: 18, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:29,191 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2584, 1.3767, 1.4360, 1.3053, 0.7652, 1.3764, 1.2274, 1.0168], + device='cuda:1'), covar=tensor([0.0556, 0.1155, 0.1637, 0.1300, 0.0548, 0.1421, 0.0600, 0.0632], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0158, 0.0198, 0.0162, 0.0109, 0.0167, 0.0120, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 06:57:29,219 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66918.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:57:43,696 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.716e+02 3.375e+02 4.203e+02 7.579e+02, threshold=6.750e+02, percent-clipped=1.0 +2023-02-06 06:58:00,326 INFO [train.py:901] (1/4) Epoch 9, batch 2300, loss[loss=0.251, simple_loss=0.3347, pruned_loss=0.08363, over 8337.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3213, pruned_loss=0.08928, over 1615723.68 frames. ], batch size: 26, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:58:01,250 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0999, 2.2096, 1.6151, 1.9838, 1.7049, 1.3015, 1.6955, 1.7491], + device='cuda:1'), covar=tensor([0.0968, 0.0347, 0.0966, 0.0410, 0.0564, 0.1242, 0.0749, 0.0587], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0236, 0.0314, 0.0299, 0.0305, 0.0319, 0.0342, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 06:58:07,517 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:08,933 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5478, 3.9391, 2.2982, 2.4713, 2.7527, 1.9453, 2.7581, 2.5934], + device='cuda:1'), covar=tensor([0.1488, 0.0223, 0.0809, 0.0774, 0.0617, 0.1143, 0.0838, 0.0975], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0237, 0.0314, 0.0300, 0.0306, 0.0320, 0.0343, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 06:58:19,750 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:24,436 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66998.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:58:36,101 INFO [train.py:901] (1/4) Epoch 9, batch 2350, loss[loss=0.2247, simple_loss=0.3019, pruned_loss=0.07375, over 7978.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3195, pruned_loss=0.08808, over 1615220.71 frames. ], batch size: 21, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:58:36,997 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:53,557 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.755e+02 3.236e+02 4.430e+02 1.005e+03, threshold=6.472e+02, percent-clipped=3.0 +2023-02-06 06:59:10,055 INFO [train.py:901] (1/4) Epoch 9, batch 2400, loss[loss=0.2472, simple_loss=0.3197, pruned_loss=0.08734, over 8235.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3202, pruned_loss=0.08834, over 1619707.28 frames. ], batch size: 22, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:59:28,524 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67093.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:59:34,528 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:59:44,443 INFO [train.py:901] (1/4) Epoch 9, batch 2450, loss[loss=0.2341, simple_loss=0.311, pruned_loss=0.07856, over 8344.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3188, pruned_loss=0.08725, over 1617129.36 frames. ], batch size: 26, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 07:00:02,010 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.801e+02 2.787e+02 3.467e+02 4.148e+02 8.119e+02, threshold=6.934e+02, percent-clipped=3.0 +2023-02-06 07:00:18,498 INFO [train.py:901] (1/4) Epoch 9, batch 2500, loss[loss=0.2318, simple_loss=0.3163, pruned_loss=0.07365, over 8446.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3214, pruned_loss=0.0892, over 1619948.94 frames. ], batch size: 27, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:00:18,669 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1306, 1.6603, 1.5490, 1.3065, 1.0983, 1.3623, 1.7329, 1.7647], + device='cuda:1'), covar=tensor([0.0526, 0.1136, 0.1749, 0.1391, 0.0619, 0.1539, 0.0719, 0.0548], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0157, 0.0196, 0.0161, 0.0108, 0.0167, 0.0120, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:00:23,019 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.32 vs. limit=5.0 +2023-02-06 07:00:48,186 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67208.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:00:52,871 INFO [train.py:901] (1/4) Epoch 9, batch 2550, loss[loss=0.2339, simple_loss=0.3221, pruned_loss=0.07288, over 8480.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3217, pruned_loss=0.08885, over 1620045.67 frames. ], batch size: 29, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:12,346 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.679e+02 3.405e+02 4.203e+02 8.726e+02, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 07:01:21,884 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67254.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:01:29,617 INFO [train.py:901] (1/4) Epoch 9, batch 2600, loss[loss=0.2453, simple_loss=0.3092, pruned_loss=0.09069, over 7520.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3208, pruned_loss=0.0884, over 1617224.08 frames. ], batch size: 18, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:36,607 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5405, 1.5884, 4.4155, 1.9962, 2.4216, 5.0379, 4.9651, 4.2906], + device='cuda:1'), covar=tensor([0.1073, 0.1714, 0.0255, 0.1946, 0.1007, 0.0184, 0.0346, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0251, 0.0281, 0.0244, 0.0274, 0.0256, 0.0227, 0.0302, 0.0286], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:01:39,252 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67279.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:02:03,430 INFO [train.py:901] (1/4) Epoch 9, batch 2650, loss[loss=0.2276, simple_loss=0.2975, pruned_loss=0.07889, over 7806.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.32, pruned_loss=0.08812, over 1616745.14 frames. ], batch size: 19, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:02:06,282 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:02:11,466 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.02 vs. limit=5.0 +2023-02-06 07:02:19,993 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2828, 1.8219, 2.8709, 2.2572, 2.4082, 2.1166, 1.5590, 1.1379], + device='cuda:1'), covar=tensor([0.3294, 0.3555, 0.0909, 0.2137, 0.1727, 0.1888, 0.1620, 0.3663], + device='cuda:1'), in_proj_covar=tensor([0.0853, 0.0818, 0.0698, 0.0810, 0.0900, 0.0761, 0.0686, 0.0732], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:02:21,820 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.739e+02 3.376e+02 4.238e+02 9.756e+02, threshold=6.752e+02, percent-clipped=4.0 +2023-02-06 07:02:38,172 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4931, 1.4455, 1.6735, 1.3371, 0.9781, 1.7097, 0.1651, 1.1645], + device='cuda:1'), covar=tensor([0.2852, 0.1957, 0.0737, 0.2119, 0.5446, 0.0638, 0.4027, 0.2194], + device='cuda:1'), in_proj_covar=tensor([0.0155, 0.0154, 0.0093, 0.0205, 0.0244, 0.0095, 0.0154, 0.0155], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 07:02:39,314 INFO [train.py:901] (1/4) Epoch 9, batch 2700, loss[loss=0.2266, simple_loss=0.3022, pruned_loss=0.07545, over 8133.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3195, pruned_loss=0.08771, over 1617592.75 frames. ], batch size: 22, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:02:49,626 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3744, 1.6845, 1.7206, 1.0095, 1.7389, 1.2928, 0.3099, 1.5973], + device='cuda:1'), covar=tensor([0.0342, 0.0186, 0.0214, 0.0292, 0.0311, 0.0620, 0.0534, 0.0160], + device='cuda:1'), in_proj_covar=tensor([0.0370, 0.0294, 0.0246, 0.0359, 0.0285, 0.0446, 0.0339, 0.0323], + device='cuda:1'), out_proj_covar=tensor([1.1038e-04, 8.5350e-05, 7.2293e-05, 1.0518e-04, 8.4636e-05, 1.4294e-04, + 1.0141e-04, 9.5693e-05], device='cuda:1') +2023-02-06 07:03:13,103 INFO [train.py:901] (1/4) Epoch 9, batch 2750, loss[loss=0.2753, simple_loss=0.3345, pruned_loss=0.1081, over 7525.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3195, pruned_loss=0.08831, over 1614183.70 frames. ], batch size: 18, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:03:15,954 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1557, 1.2845, 4.2290, 1.6847, 2.3039, 4.7856, 4.8936, 3.8464], + device='cuda:1'), covar=tensor([0.1224, 0.1880, 0.0347, 0.2108, 0.1056, 0.0290, 0.0449, 0.0851], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0283, 0.0244, 0.0276, 0.0257, 0.0226, 0.0303, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:03:26,091 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:29,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.867e+02 3.446e+02 4.196e+02 9.783e+02, threshold=6.892e+02, percent-clipped=3.0 +2023-02-06 07:03:34,270 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67445.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:48,020 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67464.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:03:48,380 INFO [train.py:901] (1/4) Epoch 9, batch 2800, loss[loss=0.3119, simple_loss=0.3748, pruned_loss=0.1245, over 8454.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3208, pruned_loss=0.08911, over 1614619.18 frames. ], batch size: 27, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:04:05,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67489.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:04:23,689 INFO [train.py:901] (1/4) Epoch 9, batch 2850, loss[loss=0.2238, simple_loss=0.3158, pruned_loss=0.0659, over 8322.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.321, pruned_loss=0.08943, over 1614768.03 frames. ], batch size: 25, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:04:34,554 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:40,481 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.765e+02 3.269e+02 4.105e+02 6.649e+02, threshold=6.538e+02, percent-clipped=0.0 +2023-02-06 07:04:55,076 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:58,183 INFO [train.py:901] (1/4) Epoch 9, batch 2900, loss[loss=0.2192, simple_loss=0.2892, pruned_loss=0.07466, over 7505.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3209, pruned_loss=0.08896, over 1618267.05 frames. ], batch size: 18, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:02,203 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 07:05:24,258 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 07:05:33,887 INFO [train.py:901] (1/4) Epoch 9, batch 2950, loss[loss=0.2514, simple_loss=0.323, pruned_loss=0.08988, over 8367.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3208, pruned_loss=0.08831, over 1616813.02 frames. ], batch size: 48, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:39,065 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2621, 2.2089, 1.4953, 1.9384, 1.8631, 1.2809, 1.5774, 1.8100], + device='cuda:1'), covar=tensor([0.1029, 0.0291, 0.1008, 0.0440, 0.0530, 0.1240, 0.0861, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0232, 0.0311, 0.0296, 0.0303, 0.0317, 0.0340, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 07:05:51,259 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.115e+02 2.827e+02 3.390e+02 4.435e+02 7.404e+02, threshold=6.780e+02, percent-clipped=4.0 +2023-02-06 07:06:08,216 INFO [train.py:901] (1/4) Epoch 9, batch 3000, loss[loss=0.263, simple_loss=0.3413, pruned_loss=0.09231, over 8322.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.321, pruned_loss=0.08772, over 1621039.05 frames. ], batch size: 25, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:08,217 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 07:06:20,341 INFO [train.py:935] (1/4) Epoch 9, validation: loss=0.1965, simple_loss=0.2957, pruned_loss=0.04864, over 944034.00 frames. +2023-02-06 07:06:20,342 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 07:06:37,422 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:43,349 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:52,180 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:55,505 INFO [train.py:901] (1/4) Epoch 9, batch 3050, loss[loss=0.2281, simple_loss=0.2962, pruned_loss=0.07998, over 7249.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3209, pruned_loss=0.08784, over 1617491.77 frames. ], batch size: 16, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:55,723 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:07:13,238 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.629e+02 3.194e+02 3.976e+02 7.575e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:07:29,777 INFO [train.py:901] (1/4) Epoch 9, batch 3100, loss[loss=0.2467, simple_loss=0.325, pruned_loss=0.08422, over 8503.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.3199, pruned_loss=0.08726, over 1617340.93 frames. ], batch size: 28, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:07:52,369 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.0155, 6.1261, 5.2382, 2.3677, 5.2254, 5.6342, 5.5467, 5.3106], + device='cuda:1'), covar=tensor([0.0565, 0.0366, 0.0802, 0.4252, 0.0682, 0.0638, 0.1083, 0.0634], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0343, 0.0361, 0.0447, 0.0356, 0.0335, 0.0354, 0.0299], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:08:04,172 INFO [train.py:901] (1/4) Epoch 9, batch 3150, loss[loss=0.2209, simple_loss=0.3195, pruned_loss=0.06117, over 8509.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3217, pruned_loss=0.08822, over 1619837.18 frames. ], batch size: 26, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:05,040 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:21,147 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.202e+02 2.768e+02 3.401e+02 4.235e+02 8.418e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 07:08:21,987 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:37,988 INFO [train.py:901] (1/4) Epoch 9, batch 3200, loss[loss=0.2979, simple_loss=0.3507, pruned_loss=0.1226, over 8524.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3228, pruned_loss=0.08902, over 1621085.57 frames. ], batch size: 28, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:42,800 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2037, 1.4267, 1.5113, 1.3863, 1.0943, 1.3697, 1.6192, 1.5598], + device='cuda:1'), covar=tensor([0.0470, 0.1238, 0.1703, 0.1318, 0.0564, 0.1482, 0.0721, 0.0590], + device='cuda:1'), in_proj_covar=tensor([0.0106, 0.0158, 0.0197, 0.0162, 0.0110, 0.0168, 0.0121, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:08:44,730 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:09:12,191 INFO [train.py:901] (1/4) Epoch 9, batch 3250, loss[loss=0.3532, simple_loss=0.414, pruned_loss=0.1462, over 8481.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3216, pruned_loss=0.08881, over 1618192.29 frames. ], batch size: 27, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:09:29,470 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.709e+02 3.362e+02 4.203e+02 8.128e+02, threshold=6.724e+02, percent-clipped=5.0 +2023-02-06 07:09:46,666 INFO [train.py:901] (1/4) Epoch 9, batch 3300, loss[loss=0.2378, simple_loss=0.3074, pruned_loss=0.08411, over 7964.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3218, pruned_loss=0.08909, over 1617912.72 frames. ], batch size: 21, lr: 8.57e-03, grad_scale: 8.0 +2023-02-06 07:09:50,091 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7070, 5.8995, 5.0095, 2.0684, 5.0270, 5.5481, 5.4591, 4.9927], + device='cuda:1'), covar=tensor([0.0525, 0.0338, 0.0737, 0.4553, 0.0648, 0.0576, 0.0700, 0.0689], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0347, 0.0360, 0.0454, 0.0360, 0.0340, 0.0356, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:10:04,401 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:10,457 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:22,541 INFO [train.py:901] (1/4) Epoch 9, batch 3350, loss[loss=0.2443, simple_loss=0.3201, pruned_loss=0.08427, over 8322.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3219, pruned_loss=0.08903, over 1616526.27 frames. ], batch size: 25, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:10:39,231 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.493e+02 3.108e+02 4.287e+02 1.101e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-06 07:10:40,592 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:46,026 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.66 vs. limit=5.0 +2023-02-06 07:10:49,087 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:56,401 INFO [train.py:901] (1/4) Epoch 9, batch 3400, loss[loss=0.2524, simple_loss=0.3246, pruned_loss=0.09007, over 8359.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3215, pruned_loss=0.08857, over 1618852.90 frames. ], batch size: 24, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:11:01,258 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 07:11:24,317 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68105.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:11:25,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5224, 2.9929, 1.8140, 2.2137, 2.3860, 1.4828, 1.9977, 2.1609], + device='cuda:1'), covar=tensor([0.1367, 0.0288, 0.0985, 0.0653, 0.0587, 0.1400, 0.0976, 0.0987], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0234, 0.0310, 0.0295, 0.0305, 0.0320, 0.0342, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 07:11:30,827 INFO [train.py:901] (1/4) Epoch 9, batch 3450, loss[loss=0.2885, simple_loss=0.36, pruned_loss=0.1085, over 8195.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3219, pruned_loss=0.08865, over 1622834.03 frames. ], batch size: 23, lr: 8.56e-03, grad_scale: 16.0 +2023-02-06 07:11:48,136 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.585e+02 3.242e+02 3.955e+02 1.617e+03, threshold=6.484e+02, percent-clipped=7.0 +2023-02-06 07:11:59,844 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:05,792 INFO [train.py:901] (1/4) Epoch 9, batch 3500, loss[loss=0.2945, simple_loss=0.3673, pruned_loss=0.1109, over 8513.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3229, pruned_loss=0.0891, over 1623119.41 frames. ], batch size: 28, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:08,693 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:17,615 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 07:12:17,936 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 07:12:40,958 INFO [train.py:901] (1/4) Epoch 9, batch 3550, loss[loss=0.2104, simple_loss=0.2774, pruned_loss=0.07169, over 7930.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3218, pruned_loss=0.08848, over 1620546.54 frames. ], batch size: 20, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:48,010 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.23 vs. limit=2.0 +2023-02-06 07:12:58,946 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.838e+02 3.387e+02 4.304e+02 7.616e+02, threshold=6.774e+02, percent-clipped=6.0 +2023-02-06 07:13:02,583 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:14,900 INFO [train.py:901] (1/4) Epoch 9, batch 3600, loss[loss=0.3091, simple_loss=0.3682, pruned_loss=0.125, over 8492.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3231, pruned_loss=0.08949, over 1620115.11 frames. ], batch size: 28, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:13:19,091 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:49,592 INFO [train.py:901] (1/4) Epoch 9, batch 3650, loss[loss=0.2302, simple_loss=0.2967, pruned_loss=0.08186, over 7650.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3226, pruned_loss=0.08926, over 1621565.11 frames. ], batch size: 19, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:08,229 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.637e+02 3.214e+02 4.100e+02 7.421e+02, threshold=6.428e+02, percent-clipped=2.0 +2023-02-06 07:14:09,681 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:18,271 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:14:23,153 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3973, 2.5686, 1.7773, 2.1814, 2.0937, 1.5160, 1.8141, 1.9824], + device='cuda:1'), covar=tensor([0.1165, 0.0335, 0.0853, 0.0471, 0.0595, 0.1197, 0.0886, 0.0708], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0234, 0.0312, 0.0297, 0.0308, 0.0324, 0.0345, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 07:14:25,007 INFO [train.py:901] (1/4) Epoch 9, batch 3700, loss[loss=0.2108, simple_loss=0.2942, pruned_loss=0.06369, over 7973.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3206, pruned_loss=0.08834, over 1615455.67 frames. ], batch size: 21, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:40,538 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0031, 2.2944, 3.7908, 2.9393, 3.2260, 2.5484, 2.0345, 1.9349], + device='cuda:1'), covar=tensor([0.3001, 0.3725, 0.0882, 0.2090, 0.1778, 0.1785, 0.1452, 0.3712], + device='cuda:1'), in_proj_covar=tensor([0.0852, 0.0808, 0.0686, 0.0802, 0.0898, 0.0754, 0.0682, 0.0731], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 07:14:57,592 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:58,734 INFO [train.py:901] (1/4) Epoch 9, batch 3750, loss[loss=0.2467, simple_loss=0.3054, pruned_loss=0.09396, over 7232.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3204, pruned_loss=0.08857, over 1611503.15 frames. ], batch size: 16, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:15:00,178 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:06,051 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:14,922 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:16,805 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.868e+02 3.639e+02 4.960e+02 1.282e+03, threshold=7.278e+02, percent-clipped=8.0 +2023-02-06 07:15:22,903 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68449.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:15:23,652 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:29,219 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:33,828 INFO [train.py:901] (1/4) Epoch 9, batch 3800, loss[loss=0.2707, simple_loss=0.3337, pruned_loss=0.1038, over 8607.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3215, pruned_loss=0.08926, over 1618773.90 frames. ], batch size: 31, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:07,820 INFO [train.py:901] (1/4) Epoch 9, batch 3850, loss[loss=0.2053, simple_loss=0.2701, pruned_loss=0.07023, over 7695.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3218, pruned_loss=0.08985, over 1616367.03 frames. ], batch size: 18, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:15,346 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5439, 1.8457, 2.0989, 1.2364, 2.2140, 1.4500, 0.7733, 1.7195], + device='cuda:1'), covar=tensor([0.0408, 0.0197, 0.0138, 0.0346, 0.0203, 0.0518, 0.0492, 0.0197], + device='cuda:1'), in_proj_covar=tensor([0.0372, 0.0295, 0.0249, 0.0358, 0.0288, 0.0445, 0.0340, 0.0321], + device='cuda:1'), out_proj_covar=tensor([1.1072e-04, 8.5281e-05, 7.3058e-05, 1.0468e-04, 8.5559e-05, 1.4219e-04, + 1.0140e-04, 9.4757e-05], device='cuda:1') +2023-02-06 07:16:25,012 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 07:16:25,654 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.582e+02 3.048e+02 3.724e+02 6.674e+02, threshold=6.096e+02, percent-clipped=0.0 +2023-02-06 07:16:27,990 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0621, 1.3396, 4.2635, 1.5119, 3.6718, 3.5588, 3.8087, 3.6847], + device='cuda:1'), covar=tensor([0.0532, 0.4276, 0.0468, 0.3450, 0.1164, 0.0838, 0.0526, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0443, 0.0546, 0.0542, 0.0509, 0.0574, 0.0479, 0.0476, 0.0539], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 07:16:42,279 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68564.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:16:42,694 INFO [train.py:901] (1/4) Epoch 9, batch 3900, loss[loss=0.2746, simple_loss=0.3354, pruned_loss=0.1069, over 7644.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3216, pruned_loss=0.08961, over 1619235.47 frames. ], batch size: 19, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:17:02,946 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1648, 2.1411, 2.8471, 1.7328, 2.4030, 3.1742, 3.0662, 2.8430], + device='cuda:1'), covar=tensor([0.0787, 0.0976, 0.0607, 0.1559, 0.1238, 0.0259, 0.0562, 0.0491], + device='cuda:1'), in_proj_covar=tensor([0.0255, 0.0285, 0.0246, 0.0276, 0.0262, 0.0228, 0.0304, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:17:04,304 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:17:17,702 INFO [train.py:901] (1/4) Epoch 9, batch 3950, loss[loss=0.2617, simple_loss=0.3421, pruned_loss=0.0907, over 8576.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3216, pruned_loss=0.08977, over 1615740.09 frames. ], batch size: 39, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:17:35,330 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.589e+02 3.045e+02 4.133e+02 1.084e+03, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 07:17:51,773 INFO [train.py:901] (1/4) Epoch 9, batch 4000, loss[loss=0.2476, simple_loss=0.3226, pruned_loss=0.08625, over 8100.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3215, pruned_loss=0.08999, over 1611278.50 frames. ], batch size: 23, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:00,059 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-06 07:18:13,682 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.07 vs. limit=5.0 +2023-02-06 07:18:16,919 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 07:18:17,936 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68703.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,237 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,676 INFO [train.py:901] (1/4) Epoch 9, batch 4050, loss[loss=0.258, simple_loss=0.3284, pruned_loss=0.09385, over 7981.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.322, pruned_loss=0.09012, over 1613236.06 frames. ], batch size: 21, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:28,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0097, 2.2274, 4.0318, 2.7704, 3.2876, 2.4204, 1.9691, 1.6518], + device='cuda:1'), covar=tensor([0.3097, 0.3996, 0.0794, 0.2268, 0.1770, 0.2145, 0.1641, 0.4059], + device='cuda:1'), in_proj_covar=tensor([0.0853, 0.0814, 0.0697, 0.0808, 0.0904, 0.0758, 0.0687, 0.0731], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:18:42,617 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:43,696 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.478e+02 3.133e+02 3.692e+02 8.585e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 07:18:57,742 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:19:00,369 INFO [train.py:901] (1/4) Epoch 9, batch 4100, loss[loss=0.2091, simple_loss=0.286, pruned_loss=0.06614, over 7647.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3211, pruned_loss=0.0889, over 1611661.47 frames. ], batch size: 19, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:34,811 INFO [train.py:901] (1/4) Epoch 9, batch 4150, loss[loss=0.2263, simple_loss=0.2999, pruned_loss=0.07637, over 7526.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3206, pruned_loss=0.08899, over 1609818.04 frames. ], batch size: 18, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:38,351 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68820.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:19:52,100 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.558e+02 3.576e+02 4.352e+02 8.740e+02, threshold=7.151e+02, percent-clipped=5.0 +2023-02-06 07:19:55,822 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68845.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:20:02,868 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 07:20:09,494 INFO [train.py:901] (1/4) Epoch 9, batch 4200, loss[loss=0.2268, simple_loss=0.2955, pruned_loss=0.07912, over 7690.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3203, pruned_loss=0.08881, over 1612872.95 frames. ], batch size: 18, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:12,586 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 07:20:17,185 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:20:23,114 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 07:20:44,838 INFO [train.py:901] (1/4) Epoch 9, batch 4250, loss[loss=0.2269, simple_loss=0.3007, pruned_loss=0.07652, over 7222.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3194, pruned_loss=0.08868, over 1609163.02 frames. ], batch size: 16, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:45,559 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 07:21:02,845 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:21:03,455 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.856e+02 3.701e+02 4.402e+02 9.379e+02, threshold=7.403e+02, percent-clipped=2.0 +2023-02-06 07:21:20,740 INFO [train.py:901] (1/4) Epoch 9, batch 4300, loss[loss=0.2204, simple_loss=0.2901, pruned_loss=0.07541, over 7670.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.32, pruned_loss=0.08909, over 1613407.08 frames. ], batch size: 19, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:21:55,039 INFO [train.py:901] (1/4) Epoch 9, batch 4350, loss[loss=0.2498, simple_loss=0.3133, pruned_loss=0.09315, over 7931.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3218, pruned_loss=0.09011, over 1618415.80 frames. ], batch size: 20, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:12,980 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.761e+02 3.203e+02 3.985e+02 6.558e+02, threshold=6.405e+02, percent-clipped=0.0 +2023-02-06 07:22:16,277 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 07:22:17,703 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:23,136 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:27,142 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:29,683 INFO [train.py:901] (1/4) Epoch 9, batch 4400, loss[loss=0.2909, simple_loss=0.3614, pruned_loss=0.1101, over 8427.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3222, pruned_loss=0.09046, over 1616449.43 frames. ], batch size: 49, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:30,553 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2959, 1.4093, 1.2096, 1.8665, 0.7079, 1.0841, 1.2022, 1.4322], + device='cuda:1'), covar=tensor([0.1038, 0.0925, 0.1250, 0.0569, 0.1353, 0.1703, 0.1026, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0249, 0.0226, 0.0267, 0.0222, 0.0228, 0.0263, 0.0268, 0.0228], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 07:22:40,715 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0288, 1.9278, 2.8637, 1.7440, 2.3850, 3.1309, 3.0661, 2.7035], + device='cuda:1'), covar=tensor([0.0839, 0.1143, 0.0700, 0.1536, 0.1283, 0.0299, 0.0599, 0.0562], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0285, 0.0246, 0.0275, 0.0260, 0.0229, 0.0301, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:22:55,799 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 07:23:04,531 INFO [train.py:901] (1/4) Epoch 9, batch 4450, loss[loss=0.2095, simple_loss=0.2786, pruned_loss=0.07016, over 7533.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3211, pruned_loss=0.08966, over 1612911.39 frames. ], batch size: 18, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:16,661 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:22,374 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.746e+02 3.298e+02 3.852e+02 8.052e+02, threshold=6.596e+02, percent-clipped=4.0 +2023-02-06 07:23:33,872 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:37,258 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:39,014 INFO [train.py:901] (1/4) Epoch 9, batch 4500, loss[loss=0.2294, simple_loss=0.3192, pruned_loss=0.06977, over 8454.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3213, pruned_loss=0.08937, over 1613393.65 frames. ], batch size: 27, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:49,135 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 07:23:49,292 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:53,178 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:13,193 INFO [train.py:901] (1/4) Epoch 9, batch 4550, loss[loss=0.2403, simple_loss=0.3135, pruned_loss=0.08357, over 8356.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3202, pruned_loss=0.08871, over 1609067.82 frames. ], batch size: 24, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:24:31,950 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.522e+02 2.943e+02 3.743e+02 5.945e+02, threshold=5.886e+02, percent-clipped=0.0 +2023-02-06 07:24:33,347 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:33,984 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0839, 1.7897, 3.7461, 1.6437, 2.5203, 4.0219, 4.2867, 3.1446], + device='cuda:1'), covar=tensor([0.1446, 0.1781, 0.0488, 0.2493, 0.1128, 0.0471, 0.0453, 0.1110], + device='cuda:1'), in_proj_covar=tensor([0.0260, 0.0290, 0.0252, 0.0283, 0.0267, 0.0236, 0.0310, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:24:47,713 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69263.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:24:48,927 INFO [train.py:901] (1/4) Epoch 9, batch 4600, loss[loss=0.2116, simple_loss=0.2796, pruned_loss=0.07184, over 7172.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3201, pruned_loss=0.0885, over 1609850.49 frames. ], batch size: 16, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:07,550 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:22,106 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:24,593 INFO [train.py:901] (1/4) Epoch 9, batch 4650, loss[loss=0.2735, simple_loss=0.3501, pruned_loss=0.09847, over 8245.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3196, pruned_loss=0.08747, over 1612755.19 frames. ], batch size: 24, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:38,787 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:40,114 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:42,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.666e+02 3.298e+02 3.900e+02 8.712e+02, threshold=6.595e+02, percent-clipped=8.0 +2023-02-06 07:25:58,525 INFO [train.py:901] (1/4) Epoch 9, batch 4700, loss[loss=0.228, simple_loss=0.2937, pruned_loss=0.08114, over 7698.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3193, pruned_loss=0.08767, over 1612770.94 frames. ], batch size: 18, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:26:14,067 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:26,888 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:31,849 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:33,682 INFO [train.py:901] (1/4) Epoch 9, batch 4750, loss[loss=0.227, simple_loss=0.3005, pruned_loss=0.07676, over 8292.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3187, pruned_loss=0.0873, over 1614068.23 frames. ], batch size: 23, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:26:35,879 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:43,651 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 07:26:44,850 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 07:26:47,353 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0585, 2.6842, 2.9543, 1.2454, 3.0446, 1.9503, 1.3470, 1.9741], + device='cuda:1'), covar=tensor([0.0509, 0.0187, 0.0167, 0.0449, 0.0270, 0.0502, 0.0528, 0.0255], + device='cuda:1'), in_proj_covar=tensor([0.0374, 0.0297, 0.0249, 0.0360, 0.0289, 0.0448, 0.0340, 0.0323], + device='cuda:1'), out_proj_covar=tensor([1.1117e-04, 8.5849e-05, 7.2697e-05, 1.0501e-04, 8.5473e-05, 1.4261e-04, + 1.0118e-04, 9.5352e-05], device='cuda:1') +2023-02-06 07:26:49,139 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 07:26:50,997 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 07:26:51,649 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.564e+02 3.173e+02 4.227e+02 9.736e+02, threshold=6.346e+02, percent-clipped=4.0 +2023-02-06 07:26:53,215 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:08,536 INFO [train.py:901] (1/4) Epoch 9, batch 4800, loss[loss=0.2147, simple_loss=0.3065, pruned_loss=0.06151, over 8500.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3203, pruned_loss=0.08815, over 1618814.69 frames. ], batch size: 26, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:38,647 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4166, 1.1903, 4.6710, 1.7787, 3.9899, 3.8103, 4.1583, 4.0612], + device='cuda:1'), covar=tensor([0.0687, 0.4552, 0.0451, 0.3144, 0.1171, 0.0814, 0.0569, 0.0683], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0549, 0.0545, 0.0505, 0.0576, 0.0488, 0.0481, 0.0547], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 07:27:41,292 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 07:27:43,207 INFO [train.py:901] (1/4) Epoch 9, batch 4850, loss[loss=0.24, simple_loss=0.311, pruned_loss=0.08449, over 7970.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3211, pruned_loss=0.08849, over 1620792.96 frames. ], batch size: 21, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:46,750 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:49,410 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:53,332 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:55,750 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 07:28:00,630 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.717e+02 3.193e+02 3.973e+02 8.915e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:28:17,597 INFO [train.py:901] (1/4) Epoch 9, batch 4900, loss[loss=0.2555, simple_loss=0.3326, pruned_loss=0.08921, over 8249.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3209, pruned_loss=0.0887, over 1617074.06 frames. ], batch size: 24, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:28:33,124 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:47,471 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69607.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:28:53,430 INFO [train.py:901] (1/4) Epoch 9, batch 4950, loss[loss=0.2261, simple_loss=0.311, pruned_loss=0.07064, over 8765.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3197, pruned_loss=0.08855, over 1615971.20 frames. ], batch size: 30, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:06,609 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:09,357 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:10,455 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.734e+02 3.225e+02 4.131e+02 8.295e+02, threshold=6.450e+02, percent-clipped=5.0 +2023-02-06 07:29:13,216 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:27,256 INFO [train.py:901] (1/4) Epoch 9, batch 5000, loss[loss=0.2777, simple_loss=0.3279, pruned_loss=0.1138, over 7924.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3193, pruned_loss=0.08837, over 1615639.68 frames. ], batch size: 20, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:39,118 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:46,885 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69692.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:29:53,587 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:02,818 INFO [train.py:901] (1/4) Epoch 9, batch 5050, loss[loss=0.253, simple_loss=0.327, pruned_loss=0.08949, over 8465.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3194, pruned_loss=0.08803, over 1618072.01 frames. ], batch size: 25, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:30:07,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69722.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:30:12,767 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:18,111 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 07:30:20,811 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.703e+02 3.249e+02 3.895e+02 8.845e+02, threshold=6.498e+02, percent-clipped=2.0 +2023-02-06 07:30:26,982 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:30,972 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:36,716 INFO [train.py:901] (1/4) Epoch 9, batch 5100, loss[loss=0.2125, simple_loss=0.2968, pruned_loss=0.06406, over 8289.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3212, pruned_loss=0.0887, over 1622468.60 frames. ], batch size: 23, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:30:44,225 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:59,038 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:01,898 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:11,534 INFO [train.py:901] (1/4) Epoch 9, batch 5150, loss[loss=0.1965, simple_loss=0.2725, pruned_loss=0.06023, over 7426.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3211, pruned_loss=0.08863, over 1621795.39 frames. ], batch size: 17, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:29,718 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 2.410e+02 3.240e+02 3.896e+02 9.119e+02, threshold=6.481e+02, percent-clipped=3.0 +2023-02-06 07:31:32,781 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:46,668 INFO [train.py:901] (1/4) Epoch 9, batch 5200, loss[loss=0.2168, simple_loss=0.2939, pruned_loss=0.06987, over 7931.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3203, pruned_loss=0.08807, over 1620736.15 frames. ], batch size: 20, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:50,870 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:06,754 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69895.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:11,378 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:12,139 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.44 vs. limit=5.0 +2023-02-06 07:32:17,775 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 07:32:20,229 INFO [train.py:901] (1/4) Epoch 9, batch 5250, loss[loss=0.2061, simple_loss=0.2854, pruned_loss=0.06339, over 7548.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3199, pruned_loss=0.08827, over 1617152.66 frames. ], batch size: 18, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:32:23,774 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69920.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:23,901 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 07:32:27,862 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:38,260 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 2.909e+02 3.504e+02 4.160e+02 7.603e+02, threshold=7.007e+02, percent-clipped=5.0 +2023-02-06 07:32:50,718 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:55,306 INFO [train.py:901] (1/4) Epoch 9, batch 5300, loss[loss=0.2509, simple_loss=0.3309, pruned_loss=0.08551, over 8437.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3204, pruned_loss=0.08804, over 1621574.15 frames. ], batch size: 27, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:04,972 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69978.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:08,277 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:22,903 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70003.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:24,836 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:31,217 INFO [train.py:901] (1/4) Epoch 9, batch 5350, loss[loss=0.278, simple_loss=0.3508, pruned_loss=0.1026, over 8658.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3192, pruned_loss=0.08765, over 1619441.92 frames. ], batch size: 39, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:42,006 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:45,282 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70036.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:33:48,396 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.965e+02 3.484e+02 4.155e+02 9.515e+02, threshold=6.968e+02, percent-clipped=2.0 +2023-02-06 07:33:57,308 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:59,443 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 07:34:05,194 INFO [train.py:901] (1/4) Epoch 9, batch 5400, loss[loss=0.2493, simple_loss=0.3334, pruned_loss=0.08262, over 8509.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3207, pruned_loss=0.08886, over 1616176.76 frames. ], batch size: 26, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:34:14,841 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:31,586 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:40,958 INFO [train.py:901] (1/4) Epoch 9, batch 5450, loss[loss=0.2548, simple_loss=0.3165, pruned_loss=0.09652, over 8338.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3208, pruned_loss=0.08849, over 1621195.68 frames. ], batch size: 26, lr: 8.44e-03, grad_scale: 8.0 +2023-02-06 07:34:48,364 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,039 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,622 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:58,807 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.682e+02 3.191e+02 4.046e+02 1.028e+03, threshold=6.382e+02, percent-clipped=4.0 +2023-02-06 07:35:04,353 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 07:35:05,827 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70151.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:35:06,516 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70152.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:35:15,866 INFO [train.py:901] (1/4) Epoch 9, batch 5500, loss[loss=0.2158, simple_loss=0.2926, pruned_loss=0.06948, over 8490.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3223, pruned_loss=0.08981, over 1622418.25 frames. ], batch size: 28, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:35:50,278 INFO [train.py:901] (1/4) Epoch 9, batch 5550, loss[loss=0.3076, simple_loss=0.3527, pruned_loss=0.1313, over 7792.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3221, pruned_loss=0.08967, over 1619230.21 frames. ], batch size: 19, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:36:07,873 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.484e+02 3.031e+02 3.937e+02 9.276e+02, threshold=6.062e+02, percent-clipped=2.0 +2023-02-06 07:36:14,989 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 07:36:24,758 INFO [train.py:901] (1/4) Epoch 9, batch 5600, loss[loss=0.2738, simple_loss=0.3548, pruned_loss=0.09635, over 8475.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3224, pruned_loss=0.08974, over 1622807.46 frames. ], batch size: 25, lr: 8.43e-03, grad_scale: 16.0 +2023-02-06 07:36:34,075 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:36:59,380 INFO [train.py:901] (1/4) Epoch 9, batch 5650, loss[loss=0.2383, simple_loss=0.3164, pruned_loss=0.08012, over 8299.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3225, pruned_loss=0.08968, over 1620723.14 frames. ], batch size: 23, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:08,094 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 07:37:18,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.609e+02 3.248e+02 4.005e+02 8.106e+02, threshold=6.497e+02, percent-clipped=5.0 +2023-02-06 07:37:32,959 INFO [train.py:901] (1/4) Epoch 9, batch 5700, loss[loss=0.2339, simple_loss=0.3184, pruned_loss=0.07473, over 7665.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3217, pruned_loss=0.08966, over 1611918.12 frames. ], batch size: 19, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:38,233 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8216, 3.0753, 2.1953, 3.8429, 1.8948, 1.8365, 2.2517, 3.3233], + device='cuda:1'), covar=tensor([0.0672, 0.0779, 0.1077, 0.0286, 0.1146, 0.1569, 0.1305, 0.0789], + device='cuda:1'), in_proj_covar=tensor([0.0244, 0.0221, 0.0262, 0.0217, 0.0222, 0.0259, 0.0266, 0.0227], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 07:37:55,589 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8151, 1.7340, 1.8773, 1.7802, 1.3165, 1.7952, 2.3148, 1.8648], + device='cuda:1'), covar=tensor([0.0442, 0.1112, 0.1540, 0.1241, 0.0559, 0.1315, 0.0572, 0.0548], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0159, 0.0198, 0.0163, 0.0108, 0.0167, 0.0121, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:38:02,682 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:08,022 INFO [train.py:901] (1/4) Epoch 9, batch 5750, loss[loss=0.2478, simple_loss=0.3075, pruned_loss=0.09406, over 7797.00 frames. ], tot_loss[loss=0.2505, simple_loss=0.3218, pruned_loss=0.08959, over 1614415.65 frames. ], batch size: 20, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:12,127 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 07:38:20,262 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:27,528 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.028e+02 2.898e+02 3.376e+02 4.229e+02 8.555e+02, threshold=6.753e+02, percent-clipped=3.0 +2023-02-06 07:38:31,872 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3711, 2.7297, 1.8342, 3.7315, 1.6986, 1.6296, 2.2559, 3.1269], + device='cuda:1'), covar=tensor([0.0882, 0.1009, 0.1324, 0.0259, 0.1427, 0.1682, 0.1229, 0.0841], + device='cuda:1'), in_proj_covar=tensor([0.0244, 0.0220, 0.0263, 0.0217, 0.0224, 0.0260, 0.0264, 0.0227], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 07:38:43,379 INFO [train.py:901] (1/4) Epoch 9, batch 5800, loss[loss=0.2755, simple_loss=0.3471, pruned_loss=0.102, over 8475.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3215, pruned_loss=0.08864, over 1616398.98 frames. ], batch size: 29, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:48,299 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:39:18,643 INFO [train.py:901] (1/4) Epoch 9, batch 5850, loss[loss=0.2572, simple_loss=0.3263, pruned_loss=0.09406, over 8079.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3189, pruned_loss=0.08721, over 1613138.44 frames. ], batch size: 21, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:39:37,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.477e+02 3.501e+02 4.376e+02 8.995e+02, threshold=7.001e+02, percent-clipped=4.0 +2023-02-06 07:39:48,211 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5637, 2.1070, 3.7028, 2.6301, 2.9531, 2.4128, 1.8793, 1.5006], + device='cuda:1'), covar=tensor([0.3373, 0.4003, 0.0949, 0.2371, 0.1970, 0.1926, 0.1582, 0.4386], + device='cuda:1'), in_proj_covar=tensor([0.0853, 0.0819, 0.0703, 0.0805, 0.0900, 0.0761, 0.0683, 0.0733], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 07:39:53,321 INFO [train.py:901] (1/4) Epoch 9, batch 5900, loss[loss=0.2475, simple_loss=0.3247, pruned_loss=0.08517, over 7813.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3202, pruned_loss=0.08722, over 1618082.42 frames. ], batch size: 20, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:40:08,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:23,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9539, 1.9543, 2.3280, 1.7044, 1.2293, 2.4390, 0.3877, 1.5445], + device='cuda:1'), covar=tensor([0.2363, 0.1785, 0.0472, 0.2023, 0.5186, 0.0447, 0.4055, 0.1855], + device='cuda:1'), in_proj_covar=tensor([0.0159, 0.0160, 0.0091, 0.0210, 0.0248, 0.0096, 0.0161, 0.0158], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:40:27,233 INFO [train.py:901] (1/4) Epoch 9, batch 5950, loss[loss=0.2326, simple_loss=0.2892, pruned_loss=0.08799, over 7677.00 frames. ], tot_loss[loss=0.247, simple_loss=0.3199, pruned_loss=0.08709, over 1619106.96 frames. ], batch size: 18, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:40:32,699 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:45,950 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.730e+02 3.193e+02 3.849e+02 7.953e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 07:41:02,076 INFO [train.py:901] (1/4) Epoch 9, batch 6000, loss[loss=0.2788, simple_loss=0.3479, pruned_loss=0.1049, over 8336.00 frames. ], tot_loss[loss=0.247, simple_loss=0.32, pruned_loss=0.08702, over 1619452.98 frames. ], batch size: 25, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:02,077 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 07:41:08,245 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3266, 1.0964, 1.1670, 0.9714, 0.7347, 1.0809, 1.2010, 1.1620], + device='cuda:1'), covar=tensor([0.0468, 0.1089, 0.1473, 0.1137, 0.0508, 0.1243, 0.0566, 0.0485], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0158, 0.0198, 0.0162, 0.0108, 0.0166, 0.0121, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:41:14,592 INFO [train.py:935] (1/4) Epoch 9, validation: loss=0.1952, simple_loss=0.2947, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 07:41:14,593 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 07:41:33,920 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3060, 1.3891, 2.2631, 1.0774, 2.1833, 2.4280, 2.5217, 2.0510], + device='cuda:1'), covar=tensor([0.1070, 0.1181, 0.0521, 0.2020, 0.0646, 0.0410, 0.0674, 0.0822], + device='cuda:1'), in_proj_covar=tensor([0.0251, 0.0286, 0.0250, 0.0273, 0.0265, 0.0229, 0.0312, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:41:49,702 INFO [train.py:901] (1/4) Epoch 9, batch 6050, loss[loss=0.2314, simple_loss=0.302, pruned_loss=0.08042, over 8142.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3185, pruned_loss=0.08686, over 1615516.10 frames. ], batch size: 22, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:55,399 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 07:42:04,730 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:42:07,993 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.822e+02 3.602e+02 4.348e+02 1.269e+03, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 07:42:24,360 INFO [train.py:901] (1/4) Epoch 9, batch 6100, loss[loss=0.2613, simple_loss=0.3324, pruned_loss=0.09504, over 8450.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3196, pruned_loss=0.0879, over 1613460.96 frames. ], batch size: 27, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:42:25,259 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3482, 1.1520, 1.4317, 1.0667, 0.7939, 1.2078, 1.1881, 0.9029], + device='cuda:1'), covar=tensor([0.0575, 0.1318, 0.1808, 0.1525, 0.0634, 0.1616, 0.0711, 0.0734], + device='cuda:1'), in_proj_covar=tensor([0.0104, 0.0158, 0.0197, 0.0162, 0.0108, 0.0167, 0.0121, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:42:37,602 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2456, 1.5930, 1.5752, 0.7647, 1.6563, 1.2880, 0.2680, 1.4459], + device='cuda:1'), covar=tensor([0.0406, 0.0240, 0.0195, 0.0349, 0.0281, 0.0623, 0.0541, 0.0178], + device='cuda:1'), in_proj_covar=tensor([0.0382, 0.0304, 0.0250, 0.0363, 0.0291, 0.0455, 0.0347, 0.0331], + device='cuda:1'), out_proj_covar=tensor([1.1315e-04, 8.7876e-05, 7.2327e-05, 1.0547e-04, 8.5479e-05, 1.4409e-04, + 1.0294e-04, 9.7344e-05], device='cuda:1') +2023-02-06 07:42:42,094 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 07:42:55,120 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7115, 1.4853, 2.8073, 1.2135, 2.1170, 3.0493, 3.0226, 2.5827], + device='cuda:1'), covar=tensor([0.1011, 0.1380, 0.0403, 0.1920, 0.0783, 0.0284, 0.0518, 0.0693], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0288, 0.0251, 0.0275, 0.0267, 0.0230, 0.0313, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:42:56,500 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0783, 2.5503, 2.8965, 1.1767, 3.1823, 1.8169, 1.5906, 1.8476], + device='cuda:1'), covar=tensor([0.0448, 0.0191, 0.0165, 0.0431, 0.0212, 0.0491, 0.0465, 0.0291], + device='cuda:1'), in_proj_covar=tensor([0.0376, 0.0300, 0.0246, 0.0360, 0.0287, 0.0449, 0.0342, 0.0327], + device='cuda:1'), out_proj_covar=tensor([1.1129e-04, 8.6525e-05, 7.1326e-05, 1.0444e-04, 8.4251e-05, 1.4203e-04, + 1.0153e-04, 9.6391e-05], device='cuda:1') +2023-02-06 07:43:00,373 INFO [train.py:901] (1/4) Epoch 9, batch 6150, loss[loss=0.2298, simple_loss=0.288, pruned_loss=0.08581, over 7532.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3192, pruned_loss=0.08772, over 1614461.25 frames. ], batch size: 18, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:18,322 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.692e+02 3.232e+02 3.879e+02 7.941e+02, threshold=6.463e+02, percent-clipped=1.0 +2023-02-06 07:43:19,251 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:43:22,705 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2268, 1.9171, 1.7561, 1.7544, 1.2457, 1.7245, 2.3300, 2.1375], + device='cuda:1'), covar=tensor([0.0410, 0.1080, 0.1684, 0.1345, 0.0575, 0.1398, 0.0589, 0.0563], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0158, 0.0198, 0.0163, 0.0108, 0.0167, 0.0121, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 07:43:33,935 INFO [train.py:901] (1/4) Epoch 9, batch 6200, loss[loss=0.2557, simple_loss=0.3221, pruned_loss=0.09463, over 7813.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3198, pruned_loss=0.08803, over 1610424.05 frames. ], batch size: 20, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:36,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:44:04,565 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 07:44:09,711 INFO [train.py:901] (1/4) Epoch 9, batch 6250, loss[loss=0.2945, simple_loss=0.3615, pruned_loss=0.1137, over 8102.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.3192, pruned_loss=0.08778, over 1607039.77 frames. ], batch size: 23, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:44:28,171 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 07:44:28,454 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.792e+02 3.423e+02 4.432e+02 1.474e+03, threshold=6.847e+02, percent-clipped=7.0 +2023-02-06 07:44:42,861 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 07:44:43,938 INFO [train.py:901] (1/4) Epoch 9, batch 6300, loss[loss=0.2295, simple_loss=0.3106, pruned_loss=0.07421, over 8107.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3194, pruned_loss=0.0878, over 1602158.99 frames. ], batch size: 23, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:03,885 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:11,480 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9899, 2.3350, 1.6913, 3.0080, 1.3932, 1.5326, 1.9952, 2.5149], + device='cuda:1'), covar=tensor([0.0949, 0.0972, 0.1408, 0.0394, 0.1368, 0.1677, 0.1125, 0.0799], + device='cuda:1'), in_proj_covar=tensor([0.0249, 0.0225, 0.0266, 0.0220, 0.0227, 0.0262, 0.0268, 0.0227], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 07:45:16,121 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:19,447 INFO [train.py:901] (1/4) Epoch 9, batch 6350, loss[loss=0.1788, simple_loss=0.2556, pruned_loss=0.05096, over 7542.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3195, pruned_loss=0.0878, over 1601584.88 frames. ], batch size: 18, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:21,648 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:38,849 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.814e+02 3.293e+02 4.210e+02 8.338e+02, threshold=6.585e+02, percent-clipped=5.0 +2023-02-06 07:45:42,976 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:54,194 INFO [train.py:901] (1/4) Epoch 9, batch 6400, loss[loss=0.2496, simple_loss=0.3099, pruned_loss=0.09462, over 8087.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3202, pruned_loss=0.08845, over 1604886.48 frames. ], batch size: 21, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:46:28,849 INFO [train.py:901] (1/4) Epoch 9, batch 6450, loss[loss=0.249, simple_loss=0.3207, pruned_loss=0.08865, over 8369.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3212, pruned_loss=0.08908, over 1609609.63 frames. ], batch size: 24, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:46:48,393 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.655e+02 3.350e+02 4.272e+02 1.011e+03, threshold=6.701e+02, percent-clipped=3.0 +2023-02-06 07:47:03,601 INFO [train.py:901] (1/4) Epoch 9, batch 6500, loss[loss=0.2818, simple_loss=0.3535, pruned_loss=0.1051, over 8249.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3204, pruned_loss=0.08845, over 1611433.83 frames. ], batch size: 24, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:37,717 INFO [train.py:901] (1/4) Epoch 9, batch 6550, loss[loss=0.2622, simple_loss=0.3443, pruned_loss=0.09012, over 8325.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3197, pruned_loss=0.08787, over 1604811.45 frames. ], batch size: 25, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:50,050 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 07:47:58,022 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.500e+02 3.444e+02 4.178e+02 7.414e+02, threshold=6.887e+02, percent-clipped=1.0 +2023-02-06 07:48:10,532 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:48:13,164 INFO [train.py:901] (1/4) Epoch 9, batch 6600, loss[loss=0.2016, simple_loss=0.2782, pruned_loss=0.0625, over 7925.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.3191, pruned_loss=0.08778, over 1607437.70 frames. ], batch size: 20, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:48:22,791 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7745, 1.5215, 3.3660, 1.3428, 2.3204, 3.6066, 3.6149, 3.0533], + device='cuda:1'), covar=tensor([0.1081, 0.1501, 0.0298, 0.2062, 0.0895, 0.0237, 0.0342, 0.0611], + device='cuda:1'), in_proj_covar=tensor([0.0254, 0.0285, 0.0248, 0.0279, 0.0263, 0.0229, 0.0308, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:48:32,873 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-06 07:48:47,048 INFO [train.py:901] (1/4) Epoch 9, batch 6650, loss[loss=0.2434, simple_loss=0.3153, pruned_loss=0.08576, over 8099.00 frames. ], tot_loss[loss=0.2462, simple_loss=0.3186, pruned_loss=0.08694, over 1610194.93 frames. ], batch size: 23, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:05,719 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.641e+02 3.214e+02 4.234e+02 1.005e+03, threshold=6.427e+02, percent-clipped=4.0 +2023-02-06 07:49:12,673 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3978, 4.3076, 3.8725, 2.3615, 3.7893, 3.8850, 4.0654, 3.4867], + device='cuda:1'), covar=tensor([0.0753, 0.0631, 0.1032, 0.4344, 0.0750, 0.0905, 0.1231, 0.0975], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0359, 0.0366, 0.0470, 0.0372, 0.0353, 0.0364, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:49:13,931 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:21,687 INFO [train.py:901] (1/4) Epoch 9, batch 6700, loss[loss=0.2568, simple_loss=0.3305, pruned_loss=0.09154, over 8338.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3194, pruned_loss=0.08761, over 1609313.75 frames. ], batch size: 26, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:41,043 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:56,528 INFO [train.py:901] (1/4) Epoch 9, batch 6750, loss[loss=0.2211, simple_loss=0.2869, pruned_loss=0.07769, over 7527.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3189, pruned_loss=0.08716, over 1608552.07 frames. ], batch size: 18, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:50:15,374 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 3.032e+02 3.821e+02 4.704e+02 1.129e+03, threshold=7.641e+02, percent-clipped=7.0 +2023-02-06 07:50:23,372 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 07:50:30,405 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2969, 1.5173, 2.2367, 1.2052, 1.5319, 1.5410, 1.4453, 1.4187], + device='cuda:1'), covar=tensor([0.1718, 0.1933, 0.0677, 0.3388, 0.1481, 0.2713, 0.1652, 0.1803], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0500, 0.0529, 0.0575, 0.0611, 0.0548, 0.0469, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:50:30,842 INFO [train.py:901] (1/4) Epoch 9, batch 6800, loss[loss=0.2716, simple_loss=0.3371, pruned_loss=0.103, over 8630.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3188, pruned_loss=0.08716, over 1609913.20 frames. ], batch size: 31, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:50:33,613 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:01,250 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:06,464 INFO [train.py:901] (1/4) Epoch 9, batch 6850, loss[loss=0.2497, simple_loss=0.3253, pruned_loss=0.0871, over 8475.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3197, pruned_loss=0.08825, over 1611101.04 frames. ], batch size: 25, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:14,494 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 07:51:25,240 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.628e+02 3.217e+02 4.054e+02 6.964e+02, threshold=6.433e+02, percent-clipped=0.0 +2023-02-06 07:51:40,035 INFO [train.py:901] (1/4) Epoch 9, batch 6900, loss[loss=0.2606, simple_loss=0.3436, pruned_loss=0.08877, over 8028.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3196, pruned_loss=0.08771, over 1613722.57 frames. ], batch size: 22, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:48,539 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2272, 1.4168, 3.0927, 1.1621, 2.1745, 3.3852, 3.6340, 2.4697], + device='cuda:1'), covar=tensor([0.1009, 0.1793, 0.0512, 0.2590, 0.1213, 0.0410, 0.0549, 0.1245], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0282, 0.0249, 0.0278, 0.0261, 0.0226, 0.0305, 0.0287], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:52:15,382 INFO [train.py:901] (1/4) Epoch 9, batch 6950, loss[loss=0.2262, simple_loss=0.2962, pruned_loss=0.07813, over 7661.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3205, pruned_loss=0.08846, over 1616175.53 frames. ], batch size: 19, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:23,475 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 07:52:29,644 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:52:35,519 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.622e+02 3.284e+02 3.978e+02 8.428e+02, threshold=6.567e+02, percent-clipped=2.0 +2023-02-06 07:52:50,437 INFO [train.py:901] (1/4) Epoch 9, batch 7000, loss[loss=0.2566, simple_loss=0.3298, pruned_loss=0.0917, over 8243.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3204, pruned_loss=0.08767, over 1619996.64 frames. ], batch size: 24, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:19,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 07:53:21,191 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 07:53:24,874 INFO [train.py:901] (1/4) Epoch 9, batch 7050, loss[loss=0.2534, simple_loss=0.3273, pruned_loss=0.08973, over 8581.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3202, pruned_loss=0.08742, over 1617098.80 frames. ], batch size: 39, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:32,436 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71725.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:45,154 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.886e+02 3.338e+02 4.007e+02 6.250e+02, threshold=6.676e+02, percent-clipped=0.0 +2023-02-06 07:53:48,781 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9119, 1.5895, 6.0263, 2.1329, 5.2433, 5.0324, 5.6399, 5.3887], + device='cuda:1'), covar=tensor([0.0522, 0.4609, 0.0413, 0.3231, 0.1189, 0.0849, 0.0448, 0.0533], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0552, 0.0553, 0.0513, 0.0582, 0.0495, 0.0485, 0.0545], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 07:53:50,707 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:59,284 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:00,448 INFO [train.py:901] (1/4) Epoch 9, batch 7100, loss[loss=0.2609, simple_loss=0.3286, pruned_loss=0.09659, over 8287.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3192, pruned_loss=0.08651, over 1616165.55 frames. ], batch size: 23, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:54:08,143 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 07:54:16,149 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:34,523 INFO [train.py:901] (1/4) Epoch 9, batch 7150, loss[loss=0.2372, simple_loss=0.3207, pruned_loss=0.07686, over 8178.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3186, pruned_loss=0.08609, over 1616015.13 frames. ], batch size: 23, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:54:54,748 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.560e+02 3.246e+02 4.043e+02 1.359e+03, threshold=6.493e+02, percent-clipped=7.0 +2023-02-06 07:55:10,759 INFO [train.py:901] (1/4) Epoch 9, batch 7200, loss[loss=0.2384, simple_loss=0.3043, pruned_loss=0.08624, over 7647.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3196, pruned_loss=0.08696, over 1614825.41 frames. ], batch size: 19, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:37,710 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 07:55:40,054 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4222, 1.4783, 1.6622, 1.3209, 0.8938, 1.6569, 0.1940, 1.1318], + device='cuda:1'), covar=tensor([0.2664, 0.1775, 0.0616, 0.1662, 0.4503, 0.0599, 0.3521, 0.1879], + device='cuda:1'), in_proj_covar=tensor([0.0157, 0.0157, 0.0092, 0.0205, 0.0244, 0.0095, 0.0154, 0.0155], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 07:55:41,084 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.85 vs. limit=5.0 +2023-02-06 07:55:43,959 INFO [train.py:901] (1/4) Epoch 9, batch 7250, loss[loss=0.1736, simple_loss=0.2547, pruned_loss=0.04625, over 7314.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3199, pruned_loss=0.0871, over 1617743.00 frames. ], batch size: 16, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:58,954 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:02,879 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.597e+02 3.277e+02 3.984e+02 9.565e+02, threshold=6.554e+02, percent-clipped=6.0 +2023-02-06 07:56:19,573 INFO [train.py:901] (1/4) Epoch 9, batch 7300, loss[loss=0.3104, simple_loss=0.36, pruned_loss=0.1304, over 6757.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3195, pruned_loss=0.0868, over 1619012.33 frames. ], batch size: 71, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:56:28,820 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:54,487 INFO [train.py:901] (1/4) Epoch 9, batch 7350, loss[loss=0.258, simple_loss=0.3318, pruned_loss=0.09211, over 8359.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3206, pruned_loss=0.08743, over 1623717.33 frames. ], batch size: 24, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:02,503 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 07:57:04,267 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-06 07:57:13,529 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.925e+02 3.749e+02 4.804e+02 1.068e+03, threshold=7.499e+02, percent-clipped=9.0 +2023-02-06 07:57:22,479 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 07:57:30,035 INFO [train.py:901] (1/4) Epoch 9, batch 7400, loss[loss=0.3034, simple_loss=0.3651, pruned_loss=0.1208, over 8546.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.32, pruned_loss=0.08659, over 1625126.08 frames. ], batch size: 49, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:40,384 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4709, 1.7882, 2.9040, 1.3194, 1.9899, 1.9004, 1.5411, 1.7925], + device='cuda:1'), covar=tensor([0.1597, 0.1886, 0.0563, 0.3459, 0.1455, 0.2499, 0.1619, 0.1902], + device='cuda:1'), in_proj_covar=tensor([0.0481, 0.0494, 0.0530, 0.0569, 0.0608, 0.0539, 0.0463, 0.0609], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 07:57:50,406 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:03,870 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 07:58:05,139 INFO [train.py:901] (1/4) Epoch 9, batch 7450, loss[loss=0.2813, simple_loss=0.3573, pruned_loss=0.1027, over 8472.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3203, pruned_loss=0.08714, over 1625218.82 frames. ], batch size: 25, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:58:23,972 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.410e+02 3.229e+02 3.860e+02 9.903e+02, threshold=6.459e+02, percent-clipped=1.0 +2023-02-06 07:58:26,826 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:38,697 INFO [train.py:901] (1/4) Epoch 9, batch 7500, loss[loss=0.2082, simple_loss=0.2807, pruned_loss=0.0679, over 7789.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3192, pruned_loss=0.0868, over 1623941.07 frames. ], batch size: 19, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:15,024 INFO [train.py:901] (1/4) Epoch 9, batch 7550, loss[loss=0.2534, simple_loss=0.3221, pruned_loss=0.09237, over 8071.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3192, pruned_loss=0.08682, over 1621418.65 frames. ], batch size: 21, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:33,698 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.819e+02 3.433e+02 4.309e+02 8.597e+02, threshold=6.865e+02, percent-clipped=4.0 +2023-02-06 07:59:48,157 INFO [train.py:901] (1/4) Epoch 9, batch 7600, loss[loss=0.2822, simple_loss=0.3491, pruned_loss=0.1077, over 8360.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3206, pruned_loss=0.08782, over 1617889.25 frames. ], batch size: 24, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:48,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 07:59:54,369 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 07:59:58,697 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:22,667 INFO [train.py:901] (1/4) Epoch 9, batch 7650, loss[loss=0.2355, simple_loss=0.3168, pruned_loss=0.07709, over 8189.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3205, pruned_loss=0.08783, over 1619011.73 frames. ], batch size: 23, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:00:42,776 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.706e+02 3.178e+02 3.983e+02 6.818e+02, threshold=6.357e+02, percent-clipped=0.0 +2023-02-06 08:00:43,593 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:47,001 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:57,587 INFO [train.py:901] (1/4) Epoch 9, batch 7700, loss[loss=0.3303, simple_loss=0.3769, pruned_loss=0.1419, over 6851.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.319, pruned_loss=0.08705, over 1616520.53 frames. ], batch size: 71, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:03,922 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:09,242 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9008, 1.5682, 3.1407, 1.3176, 2.2954, 3.2642, 3.4047, 2.8099], + device='cuda:1'), covar=tensor([0.1002, 0.1486, 0.0339, 0.2098, 0.0839, 0.0333, 0.0541, 0.0717], + device='cuda:1'), in_proj_covar=tensor([0.0258, 0.0293, 0.0252, 0.0283, 0.0268, 0.0234, 0.0316, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:01:09,787 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 08:01:18,755 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:32,214 INFO [train.py:901] (1/4) Epoch 9, batch 7750, loss[loss=0.2422, simple_loss=0.3244, pruned_loss=0.07999, over 8468.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.319, pruned_loss=0.08658, over 1617841.16 frames. ], batch size: 29, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:42,786 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 08:01:53,031 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.670e+02 3.267e+02 4.054e+02 1.108e+03, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 08:02:08,196 INFO [train.py:901] (1/4) Epoch 9, batch 7800, loss[loss=0.2, simple_loss=0.2848, pruned_loss=0.05763, over 8240.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3189, pruned_loss=0.08648, over 1615535.97 frames. ], batch size: 22, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:02:12,663 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 08:02:22,373 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1850, 1.8750, 2.7996, 2.1966, 2.4388, 2.0508, 1.6140, 1.1950], + device='cuda:1'), covar=tensor([0.3706, 0.3715, 0.0958, 0.2285, 0.1808, 0.1997, 0.1641, 0.3848], + device='cuda:1'), in_proj_covar=tensor([0.0861, 0.0827, 0.0699, 0.0815, 0.0902, 0.0763, 0.0690, 0.0745], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:02:25,493 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:02:41,270 INFO [train.py:901] (1/4) Epoch 9, batch 7850, loss[loss=0.2375, simple_loss=0.3072, pruned_loss=0.08387, over 7922.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3178, pruned_loss=0.08547, over 1614389.24 frames. ], batch size: 20, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:02:59,530 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.530e+02 3.201e+02 3.890e+02 8.475e+02, threshold=6.403e+02, percent-clipped=6.0 +2023-02-06 08:03:14,039 INFO [train.py:901] (1/4) Epoch 9, batch 7900, loss[loss=0.241, simple_loss=0.3173, pruned_loss=0.08239, over 7809.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3173, pruned_loss=0.08575, over 1610438.86 frames. ], batch size: 20, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:03:14,273 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4693, 1.8376, 3.4510, 1.2181, 2.3881, 1.8364, 1.4729, 2.2502], + device='cuda:1'), covar=tensor([0.1710, 0.2301, 0.0788, 0.3919, 0.1754, 0.2902, 0.1889, 0.2482], + device='cuda:1'), in_proj_covar=tensor([0.0477, 0.0493, 0.0527, 0.0568, 0.0605, 0.0539, 0.0464, 0.0609], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:03:31,539 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7568, 3.7401, 3.4226, 1.5679, 3.3464, 3.3034, 3.4876, 3.1013], + device='cuda:1'), covar=tensor([0.0888, 0.0667, 0.1028, 0.5242, 0.0858, 0.1016, 0.1246, 0.0937], + device='cuda:1'), in_proj_covar=tensor([0.0449, 0.0353, 0.0367, 0.0467, 0.0368, 0.0349, 0.0360, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:03:39,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0556, 1.4467, 3.3555, 1.4110, 2.1675, 3.6949, 3.7372, 3.1059], + device='cuda:1'), covar=tensor([0.0966, 0.1500, 0.0373, 0.2098, 0.1098, 0.0272, 0.0490, 0.0706], + device='cuda:1'), in_proj_covar=tensor([0.0253, 0.0290, 0.0252, 0.0281, 0.0264, 0.0231, 0.0314, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:03:41,420 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:03:47,073 INFO [train.py:901] (1/4) Epoch 9, batch 7950, loss[loss=0.2318, simple_loss=0.3074, pruned_loss=0.07805, over 7911.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3179, pruned_loss=0.08576, over 1615918.52 frames. ], batch size: 20, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:04:05,347 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.498e+02 3.176e+02 4.184e+02 8.861e+02, threshold=6.353e+02, percent-clipped=6.0 +2023-02-06 08:04:11,439 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:19,757 INFO [train.py:901] (1/4) Epoch 9, batch 8000, loss[loss=0.2416, simple_loss=0.3227, pruned_loss=0.08021, over 8369.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3178, pruned_loss=0.08595, over 1615331.31 frames. ], batch size: 49, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:04:27,790 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:34,986 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:52,985 INFO [train.py:901] (1/4) Epoch 9, batch 8050, loss[loss=0.2924, simple_loss=0.349, pruned_loss=0.1179, over 6895.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3167, pruned_loss=0.08654, over 1590111.34 frames. ], batch size: 71, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:05:11,531 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.635e+02 3.102e+02 3.711e+02 7.462e+02, threshold=6.205e+02, percent-clipped=1.0 +2023-02-06 08:05:25,695 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 08:05:31,303 INFO [train.py:901] (1/4) Epoch 10, batch 0, loss[loss=0.22, simple_loss=0.2946, pruned_loss=0.07268, over 8246.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2946, pruned_loss=0.07268, over 8246.00 frames. ], batch size: 22, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:05:31,303 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 08:05:43,259 INFO [train.py:935] (1/4) Epoch 10, validation: loss=0.1954, simple_loss=0.295, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 08:05:43,259 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 08:05:57,157 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 08:06:07,500 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 08:06:17,953 INFO [train.py:901] (1/4) Epoch 10, batch 50, loss[loss=0.2902, simple_loss=0.3449, pruned_loss=0.1178, over 7925.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3192, pruned_loss=0.08828, over 366393.45 frames. ], batch size: 20, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:06:21,761 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:06:31,232 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 08:06:49,408 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.716e+02 3.124e+02 3.887e+02 7.160e+02, threshold=6.248e+02, percent-clipped=5.0 +2023-02-06 08:06:52,302 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 08:06:52,960 INFO [train.py:901] (1/4) Epoch 10, batch 100, loss[loss=0.247, simple_loss=0.3212, pruned_loss=0.08642, over 8239.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3206, pruned_loss=0.08807, over 645289.54 frames. ], batch size: 24, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:07:03,813 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:22,311 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:30,273 INFO [train.py:901] (1/4) Epoch 10, batch 150, loss[loss=0.2419, simple_loss=0.3188, pruned_loss=0.08247, over 8140.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.3189, pruned_loss=0.08682, over 858361.05 frames. ], batch size: 22, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:07:33,249 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2499, 1.8797, 3.0509, 2.4117, 2.7002, 2.0419, 1.6016, 1.3971], + device='cuda:1'), covar=tensor([0.3538, 0.3775, 0.0888, 0.2267, 0.1801, 0.2096, 0.1744, 0.3730], + device='cuda:1'), in_proj_covar=tensor([0.0865, 0.0828, 0.0700, 0.0817, 0.0905, 0.0763, 0.0689, 0.0744], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:07:36,683 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9315, 1.5806, 3.1608, 1.3776, 2.2149, 3.5330, 3.4893, 3.0086], + device='cuda:1'), covar=tensor([0.1125, 0.1564, 0.0421, 0.2221, 0.1010, 0.0270, 0.0536, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0289, 0.0251, 0.0279, 0.0264, 0.0230, 0.0312, 0.0285], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:07:40,079 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:08:01,152 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.670e+02 3.307e+02 4.288e+02 9.841e+02, threshold=6.614e+02, percent-clipped=3.0 +2023-02-06 08:08:04,550 INFO [train.py:901] (1/4) Epoch 10, batch 200, loss[loss=0.2825, simple_loss=0.3538, pruned_loss=0.1056, over 8775.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3185, pruned_loss=0.08667, over 1024182.55 frames. ], batch size: 34, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:29,432 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72982.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:08:41,007 INFO [train.py:901] (1/4) Epoch 10, batch 250, loss[loss=0.2884, simple_loss=0.3465, pruned_loss=0.1151, over 8248.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3183, pruned_loss=0.08607, over 1158039.68 frames. ], batch size: 22, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:47,831 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 08:08:56,851 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 08:09:02,436 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7115, 1.4168, 1.4906, 1.3325, 0.8998, 1.2967, 1.5014, 1.2216], + device='cuda:1'), covar=tensor([0.0571, 0.1216, 0.1715, 0.1349, 0.0584, 0.1521, 0.0699, 0.0649], + device='cuda:1'), in_proj_covar=tensor([0.0106, 0.0155, 0.0196, 0.0160, 0.0107, 0.0167, 0.0120, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 08:09:07,478 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 08:09:12,563 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.688e+02 3.158e+02 3.760e+02 5.735e+02, threshold=6.316e+02, percent-clipped=0.0 +2023-02-06 08:09:16,038 INFO [train.py:901] (1/4) Epoch 10, batch 300, loss[loss=0.2181, simple_loss=0.2811, pruned_loss=0.07754, over 8084.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3189, pruned_loss=0.08623, over 1261071.37 frames. ], batch size: 21, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:09:23,775 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:40,902 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:51,612 INFO [train.py:901] (1/4) Epoch 10, batch 350, loss[loss=0.2263, simple_loss=0.3046, pruned_loss=0.07395, over 8101.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3206, pruned_loss=0.08698, over 1344765.89 frames. ], batch size: 23, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:09:55,111 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5642, 2.0000, 3.2447, 1.3059, 2.3411, 1.9516, 1.5724, 2.1558], + device='cuda:1'), covar=tensor([0.1600, 0.1995, 0.0766, 0.3673, 0.1489, 0.2576, 0.1726, 0.2164], + device='cuda:1'), in_proj_covar=tensor([0.0481, 0.0495, 0.0534, 0.0567, 0.0607, 0.0540, 0.0466, 0.0605], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:10:23,499 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 2.632e+02 3.058e+02 3.924e+02 7.931e+02, threshold=6.116e+02, percent-clipped=5.0 +2023-02-06 08:10:26,909 INFO [train.py:901] (1/4) Epoch 10, batch 400, loss[loss=0.301, simple_loss=0.3682, pruned_loss=0.117, over 8462.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3218, pruned_loss=0.0876, over 1407336.87 frames. ], batch size: 27, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:10:49,313 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5532, 2.7657, 1.8246, 2.1699, 2.1426, 1.5116, 1.9788, 2.1363], + device='cuda:1'), covar=tensor([0.1439, 0.0339, 0.1108, 0.0622, 0.0616, 0.1348, 0.0971, 0.0983], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0231, 0.0310, 0.0297, 0.0300, 0.0318, 0.0335, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:11:01,347 INFO [train.py:901] (1/4) Epoch 10, batch 450, loss[loss=0.1895, simple_loss=0.2525, pruned_loss=0.06323, over 7699.00 frames. ], tot_loss[loss=0.249, simple_loss=0.322, pruned_loss=0.08798, over 1452227.97 frames. ], batch size: 18, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:33,881 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.629e+02 3.140e+02 3.877e+02 8.143e+02, threshold=6.279e+02, percent-clipped=4.0 +2023-02-06 08:11:35,981 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3639, 1.3668, 5.7374, 2.2170, 4.5285, 4.6563, 5.3490, 5.2444], + device='cuda:1'), covar=tensor([0.0955, 0.7136, 0.0743, 0.4437, 0.2126, 0.1569, 0.0836, 0.0707], + device='cuda:1'), in_proj_covar=tensor([0.0452, 0.0566, 0.0562, 0.0524, 0.0594, 0.0510, 0.0496, 0.0559], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:11:37,159 INFO [train.py:901] (1/4) Epoch 10, batch 500, loss[loss=0.2719, simple_loss=0.3404, pruned_loss=0.1017, over 8337.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3209, pruned_loss=0.0877, over 1489094.03 frames. ], batch size: 26, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:42,548 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:10,501 INFO [train.py:901] (1/4) Epoch 10, batch 550, loss[loss=0.2148, simple_loss=0.2935, pruned_loss=0.0681, over 7945.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.3199, pruned_loss=0.0869, over 1518608.27 frames. ], batch size: 20, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:18,663 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7399, 1.5236, 3.0544, 1.4343, 2.1328, 3.2604, 3.3146, 2.8060], + device='cuda:1'), covar=tensor([0.1081, 0.1457, 0.0428, 0.1917, 0.1015, 0.0286, 0.0554, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0255, 0.0290, 0.0252, 0.0282, 0.0265, 0.0231, 0.0314, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:12:19,366 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:19,486 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5872, 1.9272, 3.0425, 2.3866, 2.7311, 2.2159, 1.8063, 1.3797], + device='cuda:1'), covar=tensor([0.2963, 0.3513, 0.0842, 0.2219, 0.1603, 0.1866, 0.1550, 0.3468], + device='cuda:1'), in_proj_covar=tensor([0.0860, 0.0831, 0.0701, 0.0817, 0.0907, 0.0763, 0.0690, 0.0744], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:12:23,932 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1351, 1.4565, 1.4836, 1.3115, 1.1145, 1.3680, 1.7093, 1.6311], + device='cuda:1'), covar=tensor([0.0505, 0.1240, 0.1828, 0.1437, 0.0594, 0.1539, 0.0707, 0.0594], + device='cuda:1'), in_proj_covar=tensor([0.0105, 0.0157, 0.0197, 0.0161, 0.0108, 0.0168, 0.0120, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 08:12:29,157 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73326.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:12:41,607 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.475e+02 3.100e+02 3.629e+02 1.040e+03, threshold=6.201e+02, percent-clipped=3.0 +2023-02-06 08:12:44,825 INFO [train.py:901] (1/4) Epoch 10, batch 600, loss[loss=0.2563, simple_loss=0.3165, pruned_loss=0.09808, over 7805.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3173, pruned_loss=0.08585, over 1536988.66 frames. ], batch size: 20, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:56,198 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 08:13:01,726 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:13:20,007 INFO [train.py:901] (1/4) Epoch 10, batch 650, loss[loss=0.2307, simple_loss=0.3085, pruned_loss=0.07651, over 8105.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3174, pruned_loss=0.08608, over 1550066.82 frames. ], batch size: 23, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:13:50,088 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73441.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:13:51,167 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.848e+02 2.469e+02 3.040e+02 3.840e+02 6.530e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 08:13:54,587 INFO [train.py:901] (1/4) Epoch 10, batch 700, loss[loss=0.2401, simple_loss=0.3216, pruned_loss=0.0793, over 8546.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3168, pruned_loss=0.08546, over 1562854.18 frames. ], batch size: 28, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:31,475 INFO [train.py:901] (1/4) Epoch 10, batch 750, loss[loss=0.2503, simple_loss=0.327, pruned_loss=0.08678, over 8323.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3171, pruned_loss=0.08554, over 1574533.55 frames. ], batch size: 25, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:45,797 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 08:14:54,757 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 08:15:02,253 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.756e+02 3.307e+02 3.958e+02 8.111e+02, threshold=6.615e+02, percent-clipped=6.0 +2023-02-06 08:15:02,470 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0108, 2.4234, 1.9284, 2.8933, 1.4446, 1.6119, 1.7595, 2.4711], + device='cuda:1'), covar=tensor([0.0836, 0.0892, 0.1029, 0.0397, 0.1249, 0.1606, 0.1287, 0.0784], + device='cuda:1'), in_proj_covar=tensor([0.0246, 0.0221, 0.0264, 0.0220, 0.0223, 0.0260, 0.0267, 0.0228], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 08:15:05,716 INFO [train.py:901] (1/4) Epoch 10, batch 800, loss[loss=0.2456, simple_loss=0.3227, pruned_loss=0.08425, over 8464.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3174, pruned_loss=0.08545, over 1586140.99 frames. ], batch size: 27, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:17,220 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7375, 1.3494, 3.9537, 1.3770, 3.4775, 3.3801, 3.5679, 3.4788], + device='cuda:1'), covar=tensor([0.0651, 0.3858, 0.0613, 0.3313, 0.1350, 0.0942, 0.0612, 0.0703], + device='cuda:1'), in_proj_covar=tensor([0.0444, 0.0556, 0.0557, 0.0510, 0.0585, 0.0496, 0.0488, 0.0552], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:15:41,986 INFO [train.py:901] (1/4) Epoch 10, batch 850, loss[loss=0.262, simple_loss=0.3351, pruned_loss=0.0944, over 8735.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3177, pruned_loss=0.08583, over 1597564.35 frames. ], batch size: 40, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:49,887 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:03,000 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73627.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:13,765 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.847e+02 3.470e+02 4.482e+02 1.720e+03, threshold=6.940e+02, percent-clipped=10.0 +2023-02-06 08:16:17,262 INFO [train.py:901] (1/4) Epoch 10, batch 900, loss[loss=0.2238, simple_loss=0.3083, pruned_loss=0.06965, over 8095.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3174, pruned_loss=0.08563, over 1594883.58 frames. ], batch size: 23, lr: 7.83e-03, grad_scale: 16.0 +2023-02-06 08:16:20,225 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:22,222 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73655.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:52,136 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73697.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:16:52,447 INFO [train.py:901] (1/4) Epoch 10, batch 950, loss[loss=0.3178, simple_loss=0.3446, pruned_loss=0.1455, over 7682.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.318, pruned_loss=0.08624, over 1601369.38 frames. ], batch size: 18, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:06,245 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.5677, 1.6429, 3.6618, 1.9830, 3.3209, 3.1307, 3.4060, 3.3159], + device='cuda:1'), covar=tensor([0.0534, 0.3270, 0.0722, 0.2696, 0.0948, 0.0809, 0.0484, 0.0558], + device='cuda:1'), in_proj_covar=tensor([0.0443, 0.0553, 0.0555, 0.0507, 0.0585, 0.0496, 0.0486, 0.0549], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:17:10,352 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73722.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:17:18,334 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 08:17:24,921 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.751e+02 3.323e+02 4.211e+02 1.163e+03, threshold=6.645e+02, percent-clipped=9.0 +2023-02-06 08:17:27,463 INFO [train.py:901] (1/4) Epoch 10, batch 1000, loss[loss=0.2233, simple_loss=0.3015, pruned_loss=0.07252, over 7926.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3172, pruned_loss=0.08627, over 1599451.77 frames. ], batch size: 20, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:28,906 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:35,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3454, 1.3669, 4.4991, 1.7696, 3.9708, 3.7486, 4.0862, 3.9171], + device='cuda:1'), covar=tensor([0.0496, 0.4139, 0.0470, 0.3120, 0.1083, 0.0807, 0.0511, 0.0586], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0563, 0.0564, 0.0511, 0.0591, 0.0504, 0.0492, 0.0557], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:17:36,340 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.17 vs. limit=5.0 +2023-02-06 08:17:42,252 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:50,830 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 08:18:00,668 INFO [train.py:901] (1/4) Epoch 10, batch 1050, loss[loss=0.2932, simple_loss=0.3507, pruned_loss=0.1179, over 8423.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3174, pruned_loss=0.08651, over 1605832.51 frames. ], batch size: 48, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:18:01,396 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 08:18:34,181 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.634e+02 3.058e+02 3.903e+02 1.179e+03, threshold=6.116e+02, percent-clipped=2.0 +2023-02-06 08:18:36,807 INFO [train.py:901] (1/4) Epoch 10, batch 1100, loss[loss=0.2997, simple_loss=0.3666, pruned_loss=0.1164, over 8606.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3167, pruned_loss=0.08565, over 1611832.57 frames. ], batch size: 31, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:09,836 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 08:19:10,510 INFO [train.py:901] (1/4) Epoch 10, batch 1150, loss[loss=0.2463, simple_loss=0.3207, pruned_loss=0.08598, over 8139.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3171, pruned_loss=0.08522, over 1614600.99 frames. ], batch size: 22, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:42,447 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.356e+02 2.791e+02 3.726e+02 1.227e+03, threshold=5.583e+02, percent-clipped=4.0 +2023-02-06 08:19:45,139 INFO [train.py:901] (1/4) Epoch 10, batch 1200, loss[loss=0.2298, simple_loss=0.3054, pruned_loss=0.07709, over 8262.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3154, pruned_loss=0.08355, over 1612730.31 frames. ], batch size: 22, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:48,561 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:20,063 INFO [train.py:901] (1/4) Epoch 10, batch 1250, loss[loss=0.2707, simple_loss=0.3344, pruned_loss=0.1035, over 7927.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.316, pruned_loss=0.08425, over 1614481.02 frames. ], batch size: 20, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:20:39,874 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:46,794 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 08:20:51,572 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.519e+02 3.075e+02 3.983e+02 7.817e+02, threshold=6.150e+02, percent-clipped=4.0 +2023-02-06 08:20:54,942 INFO [train.py:901] (1/4) Epoch 10, batch 1300, loss[loss=0.2775, simple_loss=0.3483, pruned_loss=0.1033, over 7149.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3155, pruned_loss=0.0843, over 1611571.88 frames. ], batch size: 72, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:20:57,208 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:58,728 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 08:21:07,834 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:19,111 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:21,530 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-06 08:21:26,841 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:29,354 INFO [train.py:901] (1/4) Epoch 10, batch 1350, loss[loss=0.2159, simple_loss=0.2904, pruned_loss=0.07071, over 7704.00 frames. ], tot_loss[loss=0.243, simple_loss=0.3163, pruned_loss=0.08488, over 1612123.63 frames. ], batch size: 18, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:21:59,537 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.773e+02 3.448e+02 4.052e+02 8.675e+02, threshold=6.895e+02, percent-clipped=5.0 +2023-02-06 08:22:02,261 INFO [train.py:901] (1/4) Epoch 10, batch 1400, loss[loss=0.2363, simple_loss=0.3061, pruned_loss=0.08321, over 8137.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3164, pruned_loss=0.08512, over 1613194.86 frames. ], batch size: 22, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:25,376 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-06 08:22:38,034 INFO [train.py:901] (1/4) Epoch 10, batch 1450, loss[loss=0.2123, simple_loss=0.282, pruned_loss=0.07133, over 7799.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3154, pruned_loss=0.08398, over 1617248.31 frames. ], batch size: 19, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:41,677 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 08:22:45,869 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:09,188 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.838e+02 2.525e+02 3.045e+02 3.954e+02 1.310e+03, threshold=6.089e+02, percent-clipped=4.0 +2023-02-06 08:23:11,856 INFO [train.py:901] (1/4) Epoch 10, batch 1500, loss[loss=0.253, simple_loss=0.3301, pruned_loss=0.08798, over 8364.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3146, pruned_loss=0.08376, over 1613686.78 frames. ], batch size: 24, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:18,344 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:18,741 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 08:23:19,294 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.50 vs. limit=5.0 +2023-02-06 08:23:19,838 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 08:23:46,603 INFO [train.py:901] (1/4) Epoch 10, batch 1550, loss[loss=0.2093, simple_loss=0.2812, pruned_loss=0.06875, over 7660.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3137, pruned_loss=0.08316, over 1612928.77 frames. ], batch size: 19, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:24:05,702 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:15,645 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 08:24:19,871 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.540e+02 3.095e+02 3.981e+02 6.537e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 08:24:22,714 INFO [train.py:901] (1/4) Epoch 10, batch 1600, loss[loss=0.2099, simple_loss=0.2807, pruned_loss=0.06956, over 7274.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3153, pruned_loss=0.08396, over 1616258.11 frames. ], batch size: 16, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:24:22,924 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:56,950 INFO [train.py:901] (1/4) Epoch 10, batch 1650, loss[loss=0.2609, simple_loss=0.336, pruned_loss=0.09289, over 8317.00 frames. ], tot_loss[loss=0.241, simple_loss=0.315, pruned_loss=0.08353, over 1614241.20 frames. ], batch size: 49, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:18,080 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:25:30,269 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.508e+02 3.008e+02 3.971e+02 8.483e+02, threshold=6.016e+02, percent-clipped=6.0 +2023-02-06 08:25:31,723 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7980, 2.2402, 1.6543, 2.6414, 1.1793, 1.2654, 1.7410, 2.0087], + device='cuda:1'), covar=tensor([0.0851, 0.0695, 0.1213, 0.0449, 0.1222, 0.1790, 0.1033, 0.0786], + device='cuda:1'), in_proj_covar=tensor([0.0241, 0.0216, 0.0261, 0.0217, 0.0221, 0.0255, 0.0261, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 08:25:32,864 INFO [train.py:901] (1/4) Epoch 10, batch 1700, loss[loss=0.2308, simple_loss=0.3014, pruned_loss=0.08006, over 7549.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3148, pruned_loss=0.08385, over 1610834.05 frames. ], batch size: 18, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:44,197 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:00,777 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:05,899 INFO [train.py:901] (1/4) Epoch 10, batch 1750, loss[loss=0.2631, simple_loss=0.3389, pruned_loss=0.09367, over 8316.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3159, pruned_loss=0.08444, over 1613637.43 frames. ], batch size: 25, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:26:36,855 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74541.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:38,766 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.804e+02 3.517e+02 4.698e+02 1.546e+03, threshold=7.034e+02, percent-clipped=7.0 +2023-02-06 08:26:41,533 INFO [train.py:901] (1/4) Epoch 10, batch 1800, loss[loss=0.2325, simple_loss=0.303, pruned_loss=0.08099, over 7963.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3159, pruned_loss=0.08455, over 1611798.24 frames. ], batch size: 21, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:27:14,907 INFO [train.py:901] (1/4) Epoch 10, batch 1850, loss[loss=0.233, simple_loss=0.3065, pruned_loss=0.07971, over 8362.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3151, pruned_loss=0.08451, over 1610602.40 frames. ], batch size: 24, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:27:17,786 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:25,276 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:46,972 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.251e+02 2.722e+02 3.219e+02 4.226e+02 1.097e+03, threshold=6.437e+02, percent-clipped=2.0 +2023-02-06 08:27:50,407 INFO [train.py:901] (1/4) Epoch 10, batch 1900, loss[loss=0.2705, simple_loss=0.3373, pruned_loss=0.1018, over 8669.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3161, pruned_loss=0.08487, over 1611227.67 frames. ], batch size: 39, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:13,881 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 08:28:25,266 INFO [train.py:901] (1/4) Epoch 10, batch 1950, loss[loss=0.2746, simple_loss=0.3406, pruned_loss=0.1043, over 6794.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3167, pruned_loss=0.08533, over 1613937.10 frames. ], batch size: 71, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:25,962 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 08:28:34,313 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.58 vs. limit=5.0 +2023-02-06 08:28:38,209 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:28:43,960 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 08:28:56,735 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.465e+02 3.030e+02 3.717e+02 6.494e+02, threshold=6.060e+02, percent-clipped=3.0 +2023-02-06 08:28:59,502 INFO [train.py:901] (1/4) Epoch 10, batch 2000, loss[loss=0.2786, simple_loss=0.3431, pruned_loss=0.107, over 8557.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.316, pruned_loss=0.08447, over 1615962.03 frames. ], batch size: 49, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:29:04,983 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9182, 3.9337, 2.3729, 2.5318, 2.6047, 1.8491, 2.7732, 2.9731], + device='cuda:1'), covar=tensor([0.1497, 0.0237, 0.0920, 0.0796, 0.0712, 0.1255, 0.0943, 0.0994], + device='cuda:1'), in_proj_covar=tensor([0.0344, 0.0233, 0.0312, 0.0298, 0.0305, 0.0318, 0.0337, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:29:34,151 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:34,591 INFO [train.py:901] (1/4) Epoch 10, batch 2050, loss[loss=0.2342, simple_loss=0.3101, pruned_loss=0.07914, over 8502.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3147, pruned_loss=0.08367, over 1614410.87 frames. ], batch size: 28, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:29:40,112 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5128, 1.8783, 1.8712, 1.2401, 1.9995, 1.3491, 0.4707, 1.5847], + device='cuda:1'), covar=tensor([0.0302, 0.0166, 0.0150, 0.0270, 0.0194, 0.0518, 0.0481, 0.0153], + device='cuda:1'), in_proj_covar=tensor([0.0376, 0.0309, 0.0266, 0.0367, 0.0296, 0.0456, 0.0349, 0.0334], + device='cuda:1'), out_proj_covar=tensor([1.1003e-04, 8.8041e-05, 7.6442e-05, 1.0546e-04, 8.6185e-05, 1.4293e-04, + 1.0245e-04, 9.7077e-05], device='cuda:1') +2023-02-06 08:29:50,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:30:04,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.471e+02 3.084e+02 4.282e+02 1.276e+03, threshold=6.169e+02, percent-clipped=5.0 +2023-02-06 08:30:07,347 INFO [train.py:901] (1/4) Epoch 10, batch 2100, loss[loss=0.2096, simple_loss=0.2977, pruned_loss=0.06078, over 8456.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3131, pruned_loss=0.08303, over 1616130.77 frames. ], batch size: 25, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:30:42,984 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 08:30:43,219 INFO [train.py:901] (1/4) Epoch 10, batch 2150, loss[loss=0.2221, simple_loss=0.2897, pruned_loss=0.07724, over 7532.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3127, pruned_loss=0.08281, over 1611211.61 frames. ], batch size: 18, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:02,634 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:13,920 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.626e+02 3.226e+02 3.775e+02 6.882e+02, threshold=6.451e+02, percent-clipped=1.0 +2023-02-06 08:31:16,699 INFO [train.py:901] (1/4) Epoch 10, batch 2200, loss[loss=0.2584, simple_loss=0.3209, pruned_loss=0.0979, over 7694.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3135, pruned_loss=0.08323, over 1613163.28 frames. ], batch size: 18, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:22,705 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:29,470 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4834, 2.0029, 3.1004, 2.4431, 2.7131, 2.1941, 1.7225, 1.4538], + device='cuda:1'), covar=tensor([0.3408, 0.3655, 0.1031, 0.2252, 0.1753, 0.2001, 0.1636, 0.3854], + device='cuda:1'), in_proj_covar=tensor([0.0868, 0.0841, 0.0718, 0.0825, 0.0917, 0.0775, 0.0696, 0.0756], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:31:33,572 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:39,088 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.57 vs. limit=5.0 +2023-02-06 08:31:39,394 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:50,364 INFO [train.py:901] (1/4) Epoch 10, batch 2250, loss[loss=0.3015, simple_loss=0.366, pruned_loss=0.1186, over 8251.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3133, pruned_loss=0.083, over 1612169.37 frames. ], batch size: 24, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:31:50,514 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1486, 1.1363, 3.3211, 1.0213, 2.8269, 2.8114, 2.9969, 2.8893], + device='cuda:1'), covar=tensor([0.0685, 0.3885, 0.0727, 0.3316, 0.1434, 0.0949, 0.0665, 0.0808], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0560, 0.0560, 0.0514, 0.0587, 0.0506, 0.0493, 0.0557], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:31:50,579 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:04,484 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1846, 1.4523, 1.5642, 1.3391, 1.1108, 1.3306, 1.7952, 1.6858], + device='cuda:1'), covar=tensor([0.0512, 0.1353, 0.1802, 0.1478, 0.0573, 0.1615, 0.0704, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0104, 0.0156, 0.0196, 0.0161, 0.0106, 0.0166, 0.0118, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 08:32:23,118 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.748e+02 3.468e+02 4.709e+02 1.048e+03, threshold=6.936e+02, percent-clipped=3.0 +2023-02-06 08:32:25,869 INFO [train.py:901] (1/4) Epoch 10, batch 2300, loss[loss=0.2096, simple_loss=0.2872, pruned_loss=0.06601, over 7258.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3134, pruned_loss=0.08304, over 1611988.30 frames. ], batch size: 16, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:32:42,307 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:59,565 INFO [train.py:901] (1/4) Epoch 10, batch 2350, loss[loss=0.2365, simple_loss=0.3237, pruned_loss=0.07468, over 8327.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3141, pruned_loss=0.08333, over 1610947.95 frames. ], batch size: 25, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:33:01,993 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 08:33:33,026 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.526e+02 3.215e+02 4.182e+02 1.054e+03, threshold=6.430e+02, percent-clipped=5.0 +2023-02-06 08:33:35,803 INFO [train.py:901] (1/4) Epoch 10, batch 2400, loss[loss=0.22, simple_loss=0.2835, pruned_loss=0.07829, over 7783.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3153, pruned_loss=0.08406, over 1614306.27 frames. ], batch size: 19, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:34:08,760 INFO [train.py:901] (1/4) Epoch 10, batch 2450, loss[loss=0.1981, simple_loss=0.2749, pruned_loss=0.06066, over 7528.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.315, pruned_loss=0.08433, over 1610719.33 frames. ], batch size: 18, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:34:40,821 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.613e+02 3.092e+02 4.227e+02 1.037e+03, threshold=6.184e+02, percent-clipped=5.0 +2023-02-06 08:34:43,535 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:34:44,711 INFO [train.py:901] (1/4) Epoch 10, batch 2500, loss[loss=0.1932, simple_loss=0.2705, pruned_loss=0.05796, over 7792.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3144, pruned_loss=0.08376, over 1610098.57 frames. ], batch size: 19, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:00,231 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:11,667 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:18,083 INFO [train.py:901] (1/4) Epoch 10, batch 2550, loss[loss=0.239, simple_loss=0.3202, pruned_loss=0.07891, over 8029.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3152, pruned_loss=0.08418, over 1613711.06 frames. ], batch size: 22, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:33,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0633, 1.1940, 4.2999, 1.5906, 3.7166, 3.4886, 3.8271, 3.6874], + device='cuda:1'), covar=tensor([0.0495, 0.4200, 0.0452, 0.3238, 0.1141, 0.0802, 0.0553, 0.0630], + device='cuda:1'), in_proj_covar=tensor([0.0448, 0.0559, 0.0566, 0.0516, 0.0589, 0.0506, 0.0492, 0.0555], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:35:36,578 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:38,119 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:49,133 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.664e+02 3.245e+02 3.791e+02 6.757e+02, threshold=6.490e+02, percent-clipped=2.0 +2023-02-06 08:35:51,833 INFO [train.py:901] (1/4) Epoch 10, batch 2600, loss[loss=0.2282, simple_loss=0.3178, pruned_loss=0.06934, over 8456.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3155, pruned_loss=0.08469, over 1609253.91 frames. ], batch size: 25, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:55,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:01,374 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7789, 4.2057, 2.6445, 2.7941, 2.7774, 1.8526, 2.8662, 3.2035], + device='cuda:1'), covar=tensor([0.1550, 0.0244, 0.0892, 0.0710, 0.0806, 0.1532, 0.1064, 0.0834], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0236, 0.0314, 0.0299, 0.0308, 0.0322, 0.0341, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:36:08,167 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8295, 2.4571, 4.7858, 1.3809, 3.1194, 2.3676, 1.8918, 2.9872], + device='cuda:1'), covar=tensor([0.1506, 0.2082, 0.0514, 0.3558, 0.1432, 0.2530, 0.1525, 0.1975], + device='cuda:1'), in_proj_covar=tensor([0.0479, 0.0498, 0.0532, 0.0566, 0.0605, 0.0544, 0.0464, 0.0605], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:36:19,401 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:27,353 INFO [train.py:901] (1/4) Epoch 10, batch 2650, loss[loss=0.2887, simple_loss=0.3586, pruned_loss=0.1094, over 8625.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3163, pruned_loss=0.08536, over 1607998.16 frames. ], batch size: 31, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:36:56,762 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:58,553 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.723e+02 3.413e+02 4.384e+02 8.455e+02, threshold=6.827e+02, percent-clipped=3.0 +2023-02-06 08:37:01,358 INFO [train.py:901] (1/4) Epoch 10, batch 2700, loss[loss=0.2553, simple_loss=0.3336, pruned_loss=0.08849, over 8716.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3155, pruned_loss=0.08462, over 1604851.44 frames. ], batch size: 30, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:37,687 INFO [train.py:901] (1/4) Epoch 10, batch 2750, loss[loss=0.2839, simple_loss=0.3487, pruned_loss=0.1096, over 8502.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3173, pruned_loss=0.08515, over 1611217.63 frames. ], batch size: 26, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:47,101 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5272, 1.5584, 1.7280, 1.4580, 1.0896, 1.7884, 0.0693, 1.1376], + device='cuda:1'), covar=tensor([0.3100, 0.1640, 0.0564, 0.1554, 0.4377, 0.0555, 0.3474, 0.2235], + device='cuda:1'), in_proj_covar=tensor([0.0164, 0.0165, 0.0094, 0.0217, 0.0258, 0.0099, 0.0164, 0.0161], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:37:55,834 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2937, 1.9638, 2.9877, 2.3352, 2.6534, 2.0765, 1.6397, 1.4666], + device='cuda:1'), covar=tensor([0.3454, 0.3678, 0.1003, 0.2220, 0.1716, 0.2119, 0.1692, 0.3993], + device='cuda:1'), in_proj_covar=tensor([0.0863, 0.0840, 0.0711, 0.0823, 0.0920, 0.0779, 0.0697, 0.0755], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:38:01,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7806, 2.0334, 1.7481, 2.6600, 1.2581, 1.3849, 1.7683, 1.9889], + device='cuda:1'), covar=tensor([0.0931, 0.0984, 0.1219, 0.0408, 0.1349, 0.1688, 0.1061, 0.0875], + device='cuda:1'), in_proj_covar=tensor([0.0245, 0.0220, 0.0265, 0.0220, 0.0225, 0.0257, 0.0265, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 08:38:08,174 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.609e+02 3.111e+02 3.957e+02 1.084e+03, threshold=6.223e+02, percent-clipped=3.0 +2023-02-06 08:38:10,722 INFO [train.py:901] (1/4) Epoch 10, batch 2800, loss[loss=0.2763, simple_loss=0.3494, pruned_loss=0.1016, over 8498.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3174, pruned_loss=0.08556, over 1612237.46 frames. ], batch size: 26, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:38:13,273 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 08:38:39,098 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:38:44,121 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-06 08:38:45,064 INFO [train.py:901] (1/4) Epoch 10, batch 2850, loss[loss=0.2338, simple_loss=0.3107, pruned_loss=0.07842, over 8127.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.317, pruned_loss=0.0849, over 1612672.17 frames. ], batch size: 22, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:04,152 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-06 08:39:09,363 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:16,267 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:17,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.670e+02 3.172e+02 3.749e+02 6.038e+02, threshold=6.343e+02, percent-clipped=0.0 +2023-02-06 08:39:20,068 INFO [train.py:901] (1/4) Epoch 10, batch 2900, loss[loss=0.2479, simple_loss=0.3331, pruned_loss=0.08128, over 8358.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3176, pruned_loss=0.08557, over 1614469.66 frames. ], batch size: 24, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:32,781 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:49,788 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 08:39:53,283 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:53,745 INFO [train.py:901] (1/4) Epoch 10, batch 2950, loss[loss=0.2427, simple_loss=0.3271, pruned_loss=0.07912, over 8203.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3167, pruned_loss=0.08506, over 1615443.09 frames. ], batch size: 23, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:39:58,777 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:11,379 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:26,685 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.480e+02 3.030e+02 3.596e+02 1.304e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-06 08:40:28,962 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:29,496 INFO [train.py:901] (1/4) Epoch 10, batch 3000, loss[loss=0.2136, simple_loss=0.2868, pruned_loss=0.0702, over 8144.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3152, pruned_loss=0.08421, over 1614012.68 frames. ], batch size: 22, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:40:29,497 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 08:40:41,877 INFO [train.py:935] (1/4) Epoch 10, validation: loss=0.1918, simple_loss=0.2916, pruned_loss=0.04599, over 944034.00 frames. +2023-02-06 08:40:41,878 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 08:41:15,547 INFO [train.py:901] (1/4) Epoch 10, batch 3050, loss[loss=0.2179, simple_loss=0.2952, pruned_loss=0.07031, over 6332.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3154, pruned_loss=0.08445, over 1608253.47 frames. ], batch size: 14, lr: 7.72e-03, grad_scale: 16.0 +2023-02-06 08:41:47,924 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.518e+02 3.138e+02 4.468e+02 1.006e+03, threshold=6.276e+02, percent-clipped=13.0 +2023-02-06 08:41:50,032 INFO [train.py:901] (1/4) Epoch 10, batch 3100, loss[loss=0.1979, simple_loss=0.2821, pruned_loss=0.05683, over 8042.00 frames. ], tot_loss[loss=0.242, simple_loss=0.315, pruned_loss=0.08448, over 1603066.99 frames. ], batch size: 22, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:41:52,187 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2804, 1.5852, 4.4959, 1.6824, 3.8291, 3.7284, 4.0096, 3.8649], + device='cuda:1'), covar=tensor([0.0583, 0.3923, 0.0497, 0.3207, 0.1287, 0.0797, 0.0556, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0564, 0.0578, 0.0525, 0.0602, 0.0512, 0.0499, 0.0565], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:42:25,561 INFO [train.py:901] (1/4) Epoch 10, batch 3150, loss[loss=0.2446, simple_loss=0.3166, pruned_loss=0.08632, over 8034.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3169, pruned_loss=0.08568, over 1608365.25 frames. ], batch size: 22, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:48,061 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6378, 1.3286, 4.8119, 1.7787, 4.1992, 3.9804, 4.3359, 4.2214], + device='cuda:1'), covar=tensor([0.0459, 0.4198, 0.0389, 0.3128, 0.0959, 0.0745, 0.0462, 0.0506], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0562, 0.0572, 0.0521, 0.0599, 0.0510, 0.0497, 0.0560], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:42:57,481 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.584e+02 3.323e+02 3.941e+02 8.938e+02, threshold=6.646e+02, percent-clipped=3.0 +2023-02-06 08:42:59,544 INFO [train.py:901] (1/4) Epoch 10, batch 3200, loss[loss=0.2428, simple_loss=0.3238, pruned_loss=0.08089, over 8475.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3163, pruned_loss=0.08533, over 1604589.59 frames. ], batch size: 29, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:43:09,326 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75961.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:28,331 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:28,980 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8563, 3.8554, 2.3852, 2.6520, 2.8999, 1.8977, 2.8386, 2.9571], + device='cuda:1'), covar=tensor([0.1695, 0.0333, 0.0982, 0.0782, 0.0716, 0.1342, 0.1032, 0.1136], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0237, 0.0314, 0.0301, 0.0312, 0.0326, 0.0343, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:43:36,371 INFO [train.py:901] (1/4) Epoch 10, batch 3250, loss[loss=0.2256, simple_loss=0.3143, pruned_loss=0.06846, over 8110.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3159, pruned_loss=0.08462, over 1608241.17 frames. ], batch size: 23, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:43:41,259 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:57,878 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:44:04,546 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7292, 1.5511, 2.6400, 1.1691, 2.0014, 2.7998, 3.0712, 1.9929], + device='cuda:1'), covar=tensor([0.1252, 0.1394, 0.0585, 0.2403, 0.0951, 0.0500, 0.0646, 0.1290], + device='cuda:1'), in_proj_covar=tensor([0.0256, 0.0294, 0.0254, 0.0284, 0.0270, 0.0234, 0.0319, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:44:09,017 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.688e+02 3.300e+02 3.989e+02 9.835e+02, threshold=6.601e+02, percent-clipped=4.0 +2023-02-06 08:44:11,020 INFO [train.py:901] (1/4) Epoch 10, batch 3300, loss[loss=0.2341, simple_loss=0.3076, pruned_loss=0.08037, over 7706.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3154, pruned_loss=0.08442, over 1607341.34 frames. ], batch size: 18, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:11,218 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3564, 2.6255, 1.8660, 2.1717, 2.2484, 1.4595, 2.0910, 2.2126], + device='cuda:1'), covar=tensor([0.1357, 0.0330, 0.0993, 0.0517, 0.0626, 0.1474, 0.0853, 0.0814], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0235, 0.0313, 0.0298, 0.0310, 0.0324, 0.0338, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:44:47,579 INFO [train.py:901] (1/4) Epoch 10, batch 3350, loss[loss=0.2476, simple_loss=0.3055, pruned_loss=0.0948, over 7527.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3154, pruned_loss=0.08427, over 1608681.01 frames. ], batch size: 18, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:58,400 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4802, 1.5052, 4.2515, 1.9525, 2.3222, 4.8177, 4.7341, 4.0719], + device='cuda:1'), covar=tensor([0.0949, 0.1600, 0.0312, 0.1871, 0.1134, 0.0180, 0.0348, 0.0588], + device='cuda:1'), in_proj_covar=tensor([0.0255, 0.0293, 0.0252, 0.0283, 0.0268, 0.0233, 0.0318, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:45:18,634 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.652e+02 3.239e+02 4.192e+02 7.352e+02, threshold=6.477e+02, percent-clipped=1.0 +2023-02-06 08:45:20,670 INFO [train.py:901] (1/4) Epoch 10, batch 3400, loss[loss=0.2662, simple_loss=0.33, pruned_loss=0.1012, over 8193.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3155, pruned_loss=0.08451, over 1608553.51 frames. ], batch size: 23, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:55,814 INFO [train.py:901] (1/4) Epoch 10, batch 3450, loss[loss=0.2793, simple_loss=0.3381, pruned_loss=0.1103, over 8566.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3156, pruned_loss=0.08478, over 1607338.84 frames. ], batch size: 31, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:30,138 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.394e+02 3.045e+02 3.881e+02 9.338e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 08:46:32,227 INFO [train.py:901] (1/4) Epoch 10, batch 3500, loss[loss=0.2622, simple_loss=0.3326, pruned_loss=0.09589, over 8362.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3156, pruned_loss=0.0844, over 1612994.98 frames. ], batch size: 24, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:48,064 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 08:46:59,315 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76287.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:47:06,734 INFO [train.py:901] (1/4) Epoch 10, batch 3550, loss[loss=0.2377, simple_loss=0.2868, pruned_loss=0.09425, over 7712.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3158, pruned_loss=0.08485, over 1608426.94 frames. ], batch size: 18, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:17,273 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:47:39,382 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76341.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:47:42,024 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 2.725e+02 3.470e+02 4.316e+02 7.747e+02, threshold=6.941e+02, percent-clipped=6.0 +2023-02-06 08:47:44,159 INFO [train.py:901] (1/4) Epoch 10, batch 3600, loss[loss=0.2557, simple_loss=0.33, pruned_loss=0.0907, over 8457.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3148, pruned_loss=0.08404, over 1613342.46 frames. ], batch size: 27, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:44,401 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5253, 1.9601, 2.0319, 1.2353, 2.1470, 1.3274, 0.7276, 1.7079], + device='cuda:1'), covar=tensor([0.0493, 0.0243, 0.0216, 0.0404, 0.0291, 0.0671, 0.0588, 0.0221], + device='cuda:1'), in_proj_covar=tensor([0.0375, 0.0308, 0.0264, 0.0370, 0.0298, 0.0458, 0.0353, 0.0336], + device='cuda:1'), out_proj_covar=tensor([1.0895e-04, 8.7400e-05, 7.5535e-05, 1.0599e-04, 8.6524e-05, 1.4287e-04, + 1.0330e-04, 9.7498e-05], device='cuda:1') +2023-02-06 08:48:18,378 INFO [train.py:901] (1/4) Epoch 10, batch 3650, loss[loss=0.1968, simple_loss=0.2821, pruned_loss=0.05573, over 7976.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3146, pruned_loss=0.08329, over 1618082.52 frames. ], batch size: 21, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:50,989 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.530e+02 3.057e+02 3.624e+02 8.995e+02, threshold=6.114e+02, percent-clipped=3.0 +2023-02-06 08:48:52,986 INFO [train.py:901] (1/4) Epoch 10, batch 3700, loss[loss=0.2171, simple_loss=0.3041, pruned_loss=0.06508, over 7803.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3142, pruned_loss=0.08312, over 1620679.70 frames. ], batch size: 20, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:54,935 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 08:49:11,482 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2257, 1.7813, 2.8854, 2.2901, 2.5403, 2.0740, 1.6294, 1.1244], + device='cuda:1'), covar=tensor([0.3791, 0.4027, 0.0949, 0.2380, 0.1824, 0.2241, 0.1796, 0.4149], + device='cuda:1'), in_proj_covar=tensor([0.0873, 0.0846, 0.0711, 0.0829, 0.0920, 0.0779, 0.0699, 0.0757], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:49:28,892 INFO [train.py:901] (1/4) Epoch 10, batch 3750, loss[loss=0.2397, simple_loss=0.316, pruned_loss=0.08171, over 8079.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3151, pruned_loss=0.08299, over 1623679.37 frames. ], batch size: 21, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:00,300 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 2.765e+02 3.416e+02 4.278e+02 1.031e+03, threshold=6.832e+02, percent-clipped=4.0 +2023-02-06 08:50:02,988 INFO [train.py:901] (1/4) Epoch 10, batch 3800, loss[loss=0.2859, simple_loss=0.3544, pruned_loss=0.1087, over 8126.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3146, pruned_loss=0.08286, over 1618901.52 frames. ], batch size: 22, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:38,444 INFO [train.py:901] (1/4) Epoch 10, batch 3850, loss[loss=0.2206, simple_loss=0.3103, pruned_loss=0.06548, over 8261.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3146, pruned_loss=0.08321, over 1617718.85 frames. ], batch size: 24, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:50:53,209 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9582, 1.6769, 1.3550, 1.6158, 1.2541, 1.1256, 1.2371, 1.3783], + device='cuda:1'), covar=tensor([0.0989, 0.0434, 0.1068, 0.0473, 0.0719, 0.1381, 0.0845, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0341, 0.0234, 0.0309, 0.0295, 0.0303, 0.0322, 0.0337, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 08:50:59,040 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 08:51:00,491 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76631.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:51:09,918 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.563e+02 3.093e+02 4.191e+02 1.151e+03, threshold=6.187e+02, percent-clipped=5.0 +2023-02-06 08:51:11,982 INFO [train.py:901] (1/4) Epoch 10, batch 3900, loss[loss=0.2063, simple_loss=0.276, pruned_loss=0.06827, over 7645.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3134, pruned_loss=0.08268, over 1618257.44 frames. ], batch size: 19, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:17,435 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:51:38,044 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76685.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:51:48,063 INFO [train.py:901] (1/4) Epoch 10, batch 3950, loss[loss=0.2446, simple_loss=0.3103, pruned_loss=0.08948, over 7811.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3145, pruned_loss=0.08399, over 1614728.80 frames. ], batch size: 20, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:19,558 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.430e+02 3.097e+02 3.693e+02 7.444e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-06 08:52:20,445 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76746.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:52:21,615 INFO [train.py:901] (1/4) Epoch 10, batch 4000, loss[loss=0.2382, simple_loss=0.3174, pruned_loss=0.07955, over 8623.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3143, pruned_loss=0.08396, over 1610347.33 frames. ], batch size: 34, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:32,221 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 08:52:37,417 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:52:51,544 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1689, 1.7973, 2.7063, 2.1317, 2.3482, 2.0593, 1.6669, 0.9460], + device='cuda:1'), covar=tensor([0.3470, 0.3510, 0.0941, 0.2148, 0.1714, 0.2067, 0.1599, 0.3756], + device='cuda:1'), in_proj_covar=tensor([0.0865, 0.0838, 0.0707, 0.0823, 0.0912, 0.0773, 0.0693, 0.0750], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:52:56,078 INFO [train.py:901] (1/4) Epoch 10, batch 4050, loss[loss=0.2407, simple_loss=0.315, pruned_loss=0.08318, over 8443.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3155, pruned_loss=0.08479, over 1609634.85 frames. ], batch size: 49, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:52:57,682 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76800.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:53:27,753 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.17 vs. limit=2.0 +2023-02-06 08:53:29,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.637e+02 3.294e+02 4.061e+02 9.505e+02, threshold=6.587e+02, percent-clipped=7.0 +2023-02-06 08:53:31,229 INFO [train.py:901] (1/4) Epoch 10, batch 4100, loss[loss=0.2385, simple_loss=0.3192, pruned_loss=0.07889, over 8332.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3167, pruned_loss=0.0855, over 1611277.40 frames. ], batch size: 25, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:53:31,386 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6611, 1.4256, 4.8352, 1.7381, 4.2888, 4.0772, 4.4289, 4.1951], + device='cuda:1'), covar=tensor([0.0422, 0.3913, 0.0333, 0.2922, 0.0842, 0.0718, 0.0400, 0.0554], + device='cuda:1'), in_proj_covar=tensor([0.0448, 0.0552, 0.0556, 0.0513, 0.0582, 0.0499, 0.0489, 0.0552], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:53:37,247 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:54:04,759 INFO [train.py:901] (1/4) Epoch 10, batch 4150, loss[loss=0.2674, simple_loss=0.3449, pruned_loss=0.09493, over 8498.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3147, pruned_loss=0.08429, over 1608914.32 frames. ], batch size: 26, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:15,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1921, 1.1344, 3.3377, 0.9878, 2.8849, 2.8329, 3.0365, 2.9044], + device='cuda:1'), covar=tensor([0.0699, 0.3625, 0.0702, 0.3132, 0.1396, 0.1018, 0.0668, 0.0904], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0555, 0.0559, 0.0515, 0.0585, 0.0501, 0.0490, 0.0557], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 08:54:38,768 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.505e+02 2.967e+02 3.617e+02 8.554e+02, threshold=5.933e+02, percent-clipped=2.0 +2023-02-06 08:54:39,589 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7944, 3.6751, 3.4206, 1.8236, 3.2978, 3.3231, 3.4773, 3.0633], + device='cuda:1'), covar=tensor([0.0987, 0.0828, 0.1174, 0.4938, 0.1018, 0.1078, 0.1446, 0.1090], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0362, 0.0372, 0.0471, 0.0366, 0.0357, 0.0364, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:54:40,856 INFO [train.py:901] (1/4) Epoch 10, batch 4200, loss[loss=0.2038, simple_loss=0.2779, pruned_loss=0.06482, over 7423.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3136, pruned_loss=0.08407, over 1602992.93 frames. ], batch size: 17, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:47,338 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.56 vs. limit=5.0 +2023-02-06 08:55:00,918 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 08:55:14,236 INFO [train.py:901] (1/4) Epoch 10, batch 4250, loss[loss=0.2596, simple_loss=0.3425, pruned_loss=0.0884, over 8527.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3135, pruned_loss=0.08353, over 1606676.16 frames. ], batch size: 28, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:17,204 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77002.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:55:23,834 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 08:55:29,245 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9896, 6.1187, 5.3553, 2.8341, 5.4267, 5.7561, 5.6253, 5.2740], + device='cuda:1'), covar=tensor([0.0552, 0.0440, 0.0847, 0.3974, 0.0644, 0.0653, 0.1165, 0.0592], + device='cuda:1'), in_proj_covar=tensor([0.0456, 0.0364, 0.0373, 0.0473, 0.0370, 0.0360, 0.0367, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:55:34,061 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77027.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:55:34,071 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:46,497 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.530e+02 3.131e+02 3.743e+02 6.568e+02, threshold=6.262e+02, percent-clipped=1.0 +2023-02-06 08:55:48,443 INFO [train.py:901] (1/4) Epoch 10, batch 4300, loss[loss=0.271, simple_loss=0.3359, pruned_loss=0.103, over 8286.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3146, pruned_loss=0.08397, over 1610538.89 frames. ], batch size: 23, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:52,732 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:55,460 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77056.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:55:55,973 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2082, 4.1824, 3.7687, 1.8736, 3.7507, 3.6256, 3.8825, 3.3460], + device='cuda:1'), covar=tensor([0.0811, 0.0625, 0.1011, 0.4967, 0.0890, 0.0907, 0.1185, 0.0997], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0362, 0.0371, 0.0473, 0.0368, 0.0359, 0.0367, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 08:56:12,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:56:23,977 INFO [train.py:901] (1/4) Epoch 10, batch 4350, loss[loss=0.2207, simple_loss=0.2879, pruned_loss=0.07676, over 7789.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.314, pruned_loss=0.08338, over 1610134.97 frames. ], batch size: 19, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:56:33,992 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:56:53,833 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 08:56:54,997 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.751e+02 3.331e+02 4.491e+02 1.022e+03, threshold=6.663e+02, percent-clipped=8.0 +2023-02-06 08:56:57,038 INFO [train.py:901] (1/4) Epoch 10, batch 4400, loss[loss=0.2391, simple_loss=0.3227, pruned_loss=0.07777, over 8112.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3134, pruned_loss=0.08253, over 1606923.40 frames. ], batch size: 23, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:57:27,970 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5555, 1.8955, 2.0788, 1.0904, 2.2034, 1.5201, 0.5259, 1.7637], + device='cuda:1'), covar=tensor([0.0383, 0.0211, 0.0173, 0.0347, 0.0215, 0.0598, 0.0558, 0.0168], + device='cuda:1'), in_proj_covar=tensor([0.0368, 0.0303, 0.0261, 0.0367, 0.0298, 0.0452, 0.0345, 0.0335], + device='cuda:1'), out_proj_covar=tensor([1.0685e-04, 8.5883e-05, 7.4300e-05, 1.0485e-04, 8.6320e-05, 1.4065e-04, + 1.0040e-04, 9.7029e-05], device='cuda:1') +2023-02-06 08:57:33,157 INFO [train.py:901] (1/4) Epoch 10, batch 4450, loss[loss=0.1629, simple_loss=0.2437, pruned_loss=0.041, over 7201.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3127, pruned_loss=0.08248, over 1603850.34 frames. ], batch size: 16, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:57:35,380 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:57:36,010 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 08:57:54,213 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:04,752 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.784e+02 3.289e+02 4.035e+02 8.452e+02, threshold=6.579e+02, percent-clipped=2.0 +2023-02-06 08:58:06,786 INFO [train.py:901] (1/4) Epoch 10, batch 4500, loss[loss=0.2994, simple_loss=0.3526, pruned_loss=0.1231, over 8364.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3125, pruned_loss=0.08237, over 1606766.39 frames. ], batch size: 24, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:27,559 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 08:58:37,991 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:39,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2001, 1.5812, 3.1995, 1.4228, 2.2354, 3.5558, 3.6019, 3.0115], + device='cuda:1'), covar=tensor([0.0797, 0.1273, 0.0339, 0.1785, 0.0865, 0.0221, 0.0428, 0.0582], + device='cuda:1'), in_proj_covar=tensor([0.0252, 0.0291, 0.0251, 0.0279, 0.0265, 0.0232, 0.0321, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 08:58:41,442 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:43,370 INFO [train.py:901] (1/4) Epoch 10, batch 4550, loss[loss=0.1984, simple_loss=0.2793, pruned_loss=0.0587, over 7939.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3119, pruned_loss=0.08258, over 1601581.03 frames. ], batch size: 20, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:55,667 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:59:14,841 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.639e+02 3.213e+02 4.072e+02 8.769e+02, threshold=6.426e+02, percent-clipped=3.0 +2023-02-06 08:59:16,947 INFO [train.py:901] (1/4) Epoch 10, batch 4600, loss[loss=0.2305, simple_loss=0.3058, pruned_loss=0.07757, over 8293.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3126, pruned_loss=0.08309, over 1608410.38 frames. ], batch size: 23, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:59:42,598 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 08:59:50,937 INFO [train.py:901] (1/4) Epoch 10, batch 4650, loss[loss=0.2085, simple_loss=0.2974, pruned_loss=0.05974, over 7811.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.314, pruned_loss=0.08337, over 1610954.19 frames. ], batch size: 20, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:12,900 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1972, 3.1426, 2.8980, 1.4861, 2.8223, 2.7855, 2.8948, 2.6511], + device='cuda:1'), covar=tensor([0.1317, 0.0891, 0.1411, 0.4889, 0.1146, 0.1330, 0.1619, 0.1069], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0357, 0.0375, 0.0471, 0.0369, 0.0358, 0.0369, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:00:20,728 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9328, 1.6666, 1.7065, 1.5049, 1.1265, 1.5918, 2.2131, 1.8762], + device='cuda:1'), covar=tensor([0.0450, 0.1203, 0.1717, 0.1471, 0.0606, 0.1492, 0.0655, 0.0634], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0195, 0.0161, 0.0106, 0.0165, 0.0119, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:00:25,447 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 3.048e+02 3.591e+02 4.434e+02 8.168e+02, threshold=7.182e+02, percent-clipped=8.0 +2023-02-06 09:00:27,558 INFO [train.py:901] (1/4) Epoch 10, batch 4700, loss[loss=0.2657, simple_loss=0.3268, pruned_loss=0.1023, over 8804.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3139, pruned_loss=0.08378, over 1606551.91 frames. ], batch size: 40, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:33,883 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:00:58,287 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4640, 2.7295, 1.8657, 2.2343, 1.9977, 1.4707, 1.8849, 2.2059], + device='cuda:1'), covar=tensor([0.1253, 0.0315, 0.1007, 0.0511, 0.0703, 0.1457, 0.0914, 0.0797], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0235, 0.0313, 0.0297, 0.0301, 0.0325, 0.0340, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:01:02,744 INFO [train.py:901] (1/4) Epoch 10, batch 4750, loss[loss=0.2303, simple_loss=0.2904, pruned_loss=0.08513, over 7539.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3143, pruned_loss=0.08436, over 1605813.46 frames. ], batch size: 18, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:28,523 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 09:01:30,525 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 09:01:35,879 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.648e+02 3.312e+02 4.103e+02 1.054e+03, threshold=6.623e+02, percent-clipped=5.0 +2023-02-06 09:01:37,936 INFO [train.py:901] (1/4) Epoch 10, batch 4800, loss[loss=0.2602, simple_loss=0.333, pruned_loss=0.09371, over 8293.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3163, pruned_loss=0.08515, over 1612531.32 frames. ], batch size: 23, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:54,069 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,110 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,628 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:10,895 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:11,381 INFO [train.py:901] (1/4) Epoch 10, batch 4850, loss[loss=0.2452, simple_loss=0.3126, pruned_loss=0.08892, over 8032.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3168, pruned_loss=0.08516, over 1617064.70 frames. ], batch size: 22, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:16,292 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 09:02:38,004 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:38,637 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:42,019 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:46,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.616e+02 3.128e+02 3.870e+02 7.279e+02, threshold=6.256e+02, percent-clipped=1.0 +2023-02-06 09:02:48,055 INFO [train.py:901] (1/4) Epoch 10, batch 4900, loss[loss=0.2522, simple_loss=0.3218, pruned_loss=0.09128, over 8698.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3164, pruned_loss=0.08509, over 1609763.54 frames. ], batch size: 34, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:55,137 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1917, 1.3989, 4.1512, 1.8239, 2.4376, 4.7647, 4.7273, 4.0603], + device='cuda:1'), covar=tensor([0.1048, 0.1668, 0.0305, 0.1900, 0.1050, 0.0176, 0.0424, 0.0620], + device='cuda:1'), in_proj_covar=tensor([0.0250, 0.0286, 0.0248, 0.0279, 0.0261, 0.0228, 0.0317, 0.0281], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 09:03:09,345 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4483, 1.4441, 2.8804, 1.2216, 2.0536, 3.0707, 3.1234, 2.5944], + device='cuda:1'), covar=tensor([0.1144, 0.1409, 0.0395, 0.2071, 0.0836, 0.0277, 0.0539, 0.0715], + device='cuda:1'), in_proj_covar=tensor([0.0250, 0.0287, 0.0247, 0.0278, 0.0260, 0.0227, 0.0316, 0.0281], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 09:03:15,422 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:19,988 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:21,730 INFO [train.py:901] (1/4) Epoch 10, batch 4950, loss[loss=0.2749, simple_loss=0.3302, pruned_loss=0.1098, over 7544.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3163, pruned_loss=0.08575, over 1605776.16 frames. ], batch size: 18, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:54,524 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 2.775e+02 3.348e+02 4.012e+02 9.680e+02, threshold=6.695e+02, percent-clipped=4.0 +2023-02-06 09:03:57,188 INFO [train.py:901] (1/4) Epoch 10, batch 5000, loss[loss=0.2767, simple_loss=0.3526, pruned_loss=0.1004, over 8515.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3179, pruned_loss=0.0869, over 1603694.93 frames. ], batch size: 28, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:58,711 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:59,989 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5018, 2.8595, 1.9533, 2.1811, 2.2049, 1.5125, 2.0121, 2.1385], + device='cuda:1'), covar=tensor([0.1686, 0.0317, 0.1034, 0.0740, 0.0704, 0.1502, 0.1109, 0.1116], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0234, 0.0312, 0.0299, 0.0305, 0.0326, 0.0339, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:04:01,945 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:30,717 INFO [train.py:901] (1/4) Epoch 10, batch 5050, loss[loss=0.254, simple_loss=0.3321, pruned_loss=0.08797, over 8470.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3178, pruned_loss=0.08654, over 1608322.19 frames. ], batch size: 25, lr: 7.62e-03, grad_scale: 8.0 +2023-02-06 09:04:51,127 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:52,883 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 09:05:02,965 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.592e+02 3.051e+02 4.098e+02 9.089e+02, threshold=6.102e+02, percent-clipped=4.0 +2023-02-06 09:05:05,650 INFO [train.py:901] (1/4) Epoch 10, batch 5100, loss[loss=0.3114, simple_loss=0.3561, pruned_loss=0.1333, over 6921.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3175, pruned_loss=0.08646, over 1610379.88 frames. ], batch size: 72, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:09,145 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:05:40,071 INFO [train.py:901] (1/4) Epoch 10, batch 5150, loss[loss=0.2116, simple_loss=0.2982, pruned_loss=0.06245, over 7963.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3165, pruned_loss=0.08542, over 1616054.92 frames. ], batch size: 21, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:43,179 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-06 09:06:07,894 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9007, 2.2278, 4.7380, 1.3270, 3.4207, 2.2717, 1.7514, 2.8800], + device='cuda:1'), covar=tensor([0.1500, 0.2130, 0.0606, 0.3746, 0.1189, 0.2557, 0.1674, 0.2095], + device='cuda:1'), in_proj_covar=tensor([0.0482, 0.0502, 0.0526, 0.0567, 0.0604, 0.0545, 0.0460, 0.0600], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:06:11,279 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:11,738 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.805e+02 3.349e+02 3.898e+02 8.134e+02, threshold=6.697e+02, percent-clipped=4.0 +2023-02-06 09:06:13,812 INFO [train.py:901] (1/4) Epoch 10, batch 5200, loss[loss=0.2606, simple_loss=0.3295, pruned_loss=0.09583, over 8451.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3151, pruned_loss=0.08434, over 1616517.72 frames. ], batch size: 49, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:29,782 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:35,818 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:50,976 INFO [train.py:901] (1/4) Epoch 10, batch 5250, loss[loss=0.2201, simple_loss=0.2938, pruned_loss=0.07316, over 7814.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3139, pruned_loss=0.0836, over 1618096.86 frames. ], batch size: 20, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:06:56,860 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 09:06:57,815 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:00,644 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:05,372 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7955, 2.2204, 3.6884, 2.6465, 3.0695, 2.3647, 1.9312, 1.8502], + device='cuda:1'), covar=tensor([0.3383, 0.4225, 0.0980, 0.2869, 0.2090, 0.2133, 0.1611, 0.4405], + device='cuda:1'), in_proj_covar=tensor([0.0871, 0.0846, 0.0710, 0.0823, 0.0919, 0.0778, 0.0694, 0.0752], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:07:14,856 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:17,402 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:19,992 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:23,912 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.672e+02 3.378e+02 4.041e+02 9.848e+02, threshold=6.756e+02, percent-clipped=3.0 +2023-02-06 09:07:25,981 INFO [train.py:901] (1/4) Epoch 10, batch 5300, loss[loss=0.2642, simple_loss=0.3383, pruned_loss=0.09505, over 8293.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3131, pruned_loss=0.08295, over 1617488.96 frames. ], batch size: 23, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:07:57,639 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:00,803 INFO [train.py:901] (1/4) Epoch 10, batch 5350, loss[loss=0.2432, simple_loss=0.3183, pruned_loss=0.0841, over 8020.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3135, pruned_loss=0.08286, over 1618589.27 frames. ], batch size: 22, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:08:06,683 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2248, 1.8862, 2.7919, 2.3119, 2.5372, 2.1442, 1.6797, 1.2645], + device='cuda:1'), covar=tensor([0.3732, 0.3754, 0.1075, 0.2232, 0.1655, 0.2118, 0.1798, 0.3953], + device='cuda:1'), in_proj_covar=tensor([0.0869, 0.0846, 0.0708, 0.0819, 0.0916, 0.0777, 0.0692, 0.0748], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:08:34,365 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.713e+02 3.238e+02 4.266e+02 6.892e+02, threshold=6.476e+02, percent-clipped=1.0 +2023-02-06 09:08:35,741 INFO [train.py:901] (1/4) Epoch 10, batch 5400, loss[loss=0.2475, simple_loss=0.3243, pruned_loss=0.08534, over 8519.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3147, pruned_loss=0.08352, over 1622575.76 frames. ], batch size: 39, lr: 7.61e-03, grad_scale: 8.0 +2023-02-06 09:08:40,007 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:08,707 INFO [train.py:901] (1/4) Epoch 10, batch 5450, loss[loss=0.2176, simple_loss=0.2982, pruned_loss=0.06846, over 8451.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3145, pruned_loss=0.08361, over 1619606.35 frames. ], batch size: 27, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:43,437 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.697e+02 3.396e+02 4.413e+02 8.943e+02, threshold=6.791e+02, percent-clipped=7.0 +2023-02-06 09:09:44,799 INFO [train.py:901] (1/4) Epoch 10, batch 5500, loss[loss=0.1749, simple_loss=0.2548, pruned_loss=0.04748, over 7543.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3148, pruned_loss=0.08379, over 1615951.22 frames. ], batch size: 18, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:46,331 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:46,835 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 09:10:03,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6510, 2.4714, 4.5706, 1.3395, 3.2117, 2.2493, 1.7868, 2.7962], + device='cuda:1'), covar=tensor([0.1696, 0.2029, 0.0760, 0.4055, 0.1412, 0.2692, 0.1776, 0.2255], + device='cuda:1'), in_proj_covar=tensor([0.0482, 0.0501, 0.0524, 0.0570, 0.0602, 0.0544, 0.0460, 0.0599], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:10:17,484 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3214, 1.2137, 1.4649, 1.1363, 0.7703, 1.2845, 1.1458, 1.1437], + device='cuda:1'), covar=tensor([0.0527, 0.1357, 0.1692, 0.1478, 0.0555, 0.1601, 0.0739, 0.0633], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0155, 0.0194, 0.0160, 0.0105, 0.0164, 0.0119, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 09:10:18,554 INFO [train.py:901] (1/4) Epoch 10, batch 5550, loss[loss=0.247, simple_loss=0.3245, pruned_loss=0.08478, over 8431.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3141, pruned_loss=0.08311, over 1615948.56 frames. ], batch size: 27, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:22,039 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4371, 2.9265, 1.7720, 2.1664, 2.0541, 1.5119, 1.8437, 2.2938], + device='cuda:1'), covar=tensor([0.1409, 0.0361, 0.1083, 0.0647, 0.0760, 0.1547, 0.1167, 0.0813], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0231, 0.0310, 0.0297, 0.0303, 0.0323, 0.0337, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:10:52,568 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.563e+02 3.102e+02 4.076e+02 7.679e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 09:10:54,680 INFO [train.py:901] (1/4) Epoch 10, batch 5600, loss[loss=0.2202, simple_loss=0.2963, pruned_loss=0.07203, over 7661.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3137, pruned_loss=0.08307, over 1614094.91 frames. ], batch size: 19, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:55,582 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:12,346 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:28,352 INFO [train.py:901] (1/4) Epoch 10, batch 5650, loss[loss=0.1995, simple_loss=0.2773, pruned_loss=0.06081, over 7255.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3137, pruned_loss=0.08311, over 1611127.73 frames. ], batch size: 16, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:11:36,903 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:48,304 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 09:11:54,461 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:02,394 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.443e+02 2.913e+02 3.480e+02 5.594e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 09:12:03,751 INFO [train.py:901] (1/4) Epoch 10, batch 5700, loss[loss=0.2733, simple_loss=0.3342, pruned_loss=0.1062, over 8086.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3145, pruned_loss=0.08335, over 1616000.66 frames. ], batch size: 21, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:25,607 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:36,575 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2463, 2.4693, 1.9673, 2.9253, 1.2488, 1.5411, 1.9074, 2.2827], + device='cuda:1'), covar=tensor([0.0694, 0.0782, 0.1108, 0.0352, 0.1276, 0.1514, 0.1062, 0.0866], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0209, 0.0253, 0.0214, 0.0215, 0.0250, 0.0255, 0.0218], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:12:39,152 INFO [train.py:901] (1/4) Epoch 10, batch 5750, loss[loss=0.1927, simple_loss=0.2577, pruned_loss=0.06386, over 7531.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3137, pruned_loss=0.08323, over 1612512.65 frames. ], batch size: 18, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:53,267 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 09:13:11,252 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.793e+02 3.478e+02 4.404e+02 1.244e+03, threshold=6.955e+02, percent-clipped=11.0 +2023-02-06 09:13:12,614 INFO [train.py:901] (1/4) Epoch 10, batch 5800, loss[loss=0.2191, simple_loss=0.296, pruned_loss=0.07109, over 8252.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3119, pruned_loss=0.08249, over 1609047.87 frames. ], batch size: 22, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:13:31,641 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:45,424 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:48,025 INFO [train.py:901] (1/4) Epoch 10, batch 5850, loss[loss=0.2289, simple_loss=0.2995, pruned_loss=0.07914, over 8015.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3128, pruned_loss=0.08268, over 1611555.06 frames. ], batch size: 22, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:12,162 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7060, 1.9304, 2.1595, 1.4819, 2.2640, 1.5837, 0.9293, 1.7408], + device='cuda:1'), covar=tensor([0.0392, 0.0191, 0.0124, 0.0320, 0.0218, 0.0475, 0.0516, 0.0202], + device='cuda:1'), in_proj_covar=tensor([0.0380, 0.0308, 0.0265, 0.0376, 0.0305, 0.0462, 0.0352, 0.0337], + device='cuda:1'), out_proj_covar=tensor([1.1013e-04, 8.7315e-05, 7.5397e-05, 1.0727e-04, 8.8303e-05, 1.4328e-04, + 1.0199e-04, 9.6771e-05], device='cuda:1') +2023-02-06 09:14:19,904 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.589e+02 3.164e+02 4.281e+02 9.296e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-06 09:14:21,265 INFO [train.py:901] (1/4) Epoch 10, batch 5900, loss[loss=0.2146, simple_loss=0.2989, pruned_loss=0.06521, over 8099.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3124, pruned_loss=0.08275, over 1610390.83 frames. ], batch size: 23, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:22,174 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7696, 1.4834, 1.9708, 1.6193, 1.8266, 1.6753, 1.3910, 0.7172], + device='cuda:1'), covar=tensor([0.3231, 0.2993, 0.1023, 0.1874, 0.1407, 0.1827, 0.1487, 0.2955], + device='cuda:1'), in_proj_covar=tensor([0.0872, 0.0849, 0.0710, 0.0827, 0.0917, 0.0776, 0.0692, 0.0745], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:14:47,571 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9594, 1.6556, 1.7603, 1.5701, 1.0717, 1.7647, 2.2422, 1.9962], + device='cuda:1'), covar=tensor([0.0431, 0.1205, 0.1667, 0.1327, 0.0600, 0.1392, 0.0610, 0.0557], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0155, 0.0195, 0.0159, 0.0106, 0.0165, 0.0118, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:14:57,566 INFO [train.py:901] (1/4) Epoch 10, batch 5950, loss[loss=0.2411, simple_loss=0.3191, pruned_loss=0.08158, over 8442.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3128, pruned_loss=0.08295, over 1611699.36 frames. ], batch size: 27, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:05,423 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:15:30,046 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.430e+02 2.939e+02 3.954e+02 7.661e+02, threshold=5.878e+02, percent-clipped=3.0 +2023-02-06 09:15:30,239 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3124, 1.3090, 2.2865, 1.2172, 2.2239, 2.4766, 2.5228, 2.1018], + device='cuda:1'), covar=tensor([0.0941, 0.1161, 0.0465, 0.1742, 0.0543, 0.0373, 0.0555, 0.0771], + device='cuda:1'), in_proj_covar=tensor([0.0256, 0.0293, 0.0254, 0.0285, 0.0267, 0.0232, 0.0323, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:15:31,440 INFO [train.py:901] (1/4) Epoch 10, batch 6000, loss[loss=0.1998, simple_loss=0.2657, pruned_loss=0.06689, over 6365.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3125, pruned_loss=0.08325, over 1604084.05 frames. ], batch size: 14, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:31,441 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 09:15:43,951 INFO [train.py:935] (1/4) Epoch 10, validation: loss=0.1914, simple_loss=0.2907, pruned_loss=0.04604, over 944034.00 frames. +2023-02-06 09:15:43,952 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 09:16:18,420 INFO [train.py:901] (1/4) Epoch 10, batch 6050, loss[loss=0.2383, simple_loss=0.3011, pruned_loss=0.08775, over 7253.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3109, pruned_loss=0.08255, over 1595892.35 frames. ], batch size: 16, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:16:35,967 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:16:52,866 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.842e+02 3.348e+02 4.641e+02 9.072e+02, threshold=6.696e+02, percent-clipped=15.0 +2023-02-06 09:16:54,182 INFO [train.py:901] (1/4) Epoch 10, batch 6100, loss[loss=0.2141, simple_loss=0.2794, pruned_loss=0.07438, over 7698.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3113, pruned_loss=0.08241, over 1598126.58 frames. ], batch size: 18, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:24,370 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 09:17:27,730 INFO [train.py:901] (1/4) Epoch 10, batch 6150, loss[loss=0.2244, simple_loss=0.3027, pruned_loss=0.07302, over 7419.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3098, pruned_loss=0.08136, over 1599435.86 frames. ], batch size: 17, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:41,304 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:17:54,630 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:01,037 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.486e+02 3.076e+02 3.632e+02 7.166e+02, threshold=6.152e+02, percent-clipped=1.0 +2023-02-06 09:18:02,465 INFO [train.py:901] (1/4) Epoch 10, batch 6200, loss[loss=0.2361, simple_loss=0.308, pruned_loss=0.08214, over 7437.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3117, pruned_loss=0.08169, over 1606760.68 frames. ], batch size: 17, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:18:15,663 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:28,058 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:32,912 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:38,064 INFO [train.py:901] (1/4) Epoch 10, batch 6250, loss[loss=0.2387, simple_loss=0.3154, pruned_loss=0.08097, over 8360.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3116, pruned_loss=0.08212, over 1606400.28 frames. ], batch size: 24, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:19:01,794 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79033.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:19:10,145 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.509e+02 3.177e+02 4.128e+02 1.006e+03, threshold=6.355e+02, percent-clipped=7.0 +2023-02-06 09:19:11,558 INFO [train.py:901] (1/4) Epoch 10, batch 6300, loss[loss=0.2433, simple_loss=0.3208, pruned_loss=0.08287, over 8040.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3112, pruned_loss=0.08188, over 1599922.51 frames. ], batch size: 22, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:47,655 INFO [train.py:901] (1/4) Epoch 10, batch 6350, loss[loss=0.305, simple_loss=0.3412, pruned_loss=0.1345, over 7549.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3113, pruned_loss=0.08226, over 1599455.42 frames. ], batch size: 18, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:20,619 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.998e+02 3.636e+02 4.667e+02 1.201e+03, threshold=7.271e+02, percent-clipped=11.0 +2023-02-06 09:20:21,298 INFO [train.py:901] (1/4) Epoch 10, batch 6400, loss[loss=0.2399, simple_loss=0.3125, pruned_loss=0.08367, over 8473.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3139, pruned_loss=0.08387, over 1606650.01 frames. ], batch size: 25, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:54,163 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:20:57,437 INFO [train.py:901] (1/4) Epoch 10, batch 6450, loss[loss=0.2502, simple_loss=0.3268, pruned_loss=0.08676, over 8765.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.313, pruned_loss=0.08257, over 1610042.41 frames. ], batch size: 30, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:21:12,224 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79218.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:21:31,632 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.577e+02 3.130e+02 4.050e+02 7.383e+02, threshold=6.260e+02, percent-clipped=1.0 +2023-02-06 09:21:32,338 INFO [train.py:901] (1/4) Epoch 10, batch 6500, loss[loss=0.2126, simple_loss=0.2946, pruned_loss=0.06527, over 8204.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.313, pruned_loss=0.08226, over 1613606.81 frames. ], batch size: 23, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:21:37,198 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5702, 1.9430, 2.0759, 1.3026, 2.2276, 1.3909, 0.7276, 1.7820], + device='cuda:1'), covar=tensor([0.0380, 0.0203, 0.0151, 0.0300, 0.0212, 0.0590, 0.0475, 0.0183], + device='cuda:1'), in_proj_covar=tensor([0.0375, 0.0308, 0.0262, 0.0374, 0.0298, 0.0460, 0.0349, 0.0334], + device='cuda:1'), out_proj_covar=tensor([1.0847e-04, 8.7017e-05, 7.4642e-05, 1.0684e-04, 8.5972e-05, 1.4267e-04, + 1.0098e-04, 9.6060e-05], device='cuda:1') +2023-02-06 09:21:59,879 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79289.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:06,996 INFO [train.py:901] (1/4) Epoch 10, batch 6550, loss[loss=0.2506, simple_loss=0.3268, pruned_loss=0.08726, over 8111.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3116, pruned_loss=0.08153, over 1612206.64 frames. ], batch size: 23, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:11,145 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79303.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:22:18,364 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79314.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:27,871 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:35,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4722, 1.7947, 2.7453, 1.2355, 1.9358, 1.8683, 1.5524, 1.7626], + device='cuda:1'), covar=tensor([0.1706, 0.1975, 0.0764, 0.3767, 0.1539, 0.2690, 0.1787, 0.2141], + device='cuda:1'), in_proj_covar=tensor([0.0482, 0.0502, 0.0527, 0.0569, 0.0612, 0.0545, 0.0463, 0.0603], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:22:36,078 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5069, 1.8618, 1.9141, 1.0896, 2.0989, 1.4281, 0.4755, 1.7974], + device='cuda:1'), covar=tensor([0.0329, 0.0214, 0.0163, 0.0284, 0.0201, 0.0556, 0.0496, 0.0143], + device='cuda:1'), in_proj_covar=tensor([0.0379, 0.0310, 0.0266, 0.0377, 0.0301, 0.0464, 0.0353, 0.0337], + device='cuda:1'), out_proj_covar=tensor([1.0953e-04, 8.7883e-05, 7.5710e-05, 1.0731e-04, 8.6790e-05, 1.4378e-04, + 1.0216e-04, 9.6991e-05], device='cuda:1') +2023-02-06 09:22:36,562 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 09:22:41,242 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.767e+02 3.312e+02 4.239e+02 1.073e+03, threshold=6.623e+02, percent-clipped=3.0 +2023-02-06 09:22:41,949 INFO [train.py:901] (1/4) Epoch 10, batch 6600, loss[loss=0.2494, simple_loss=0.3243, pruned_loss=0.08719, over 8660.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3121, pruned_loss=0.08202, over 1612557.36 frames. ], batch size: 39, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:44,151 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:53,887 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 09:23:04,704 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:11,890 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79393.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:23:15,197 INFO [train.py:901] (1/4) Epoch 10, batch 6650, loss[loss=0.243, simple_loss=0.2982, pruned_loss=0.09396, over 7206.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3134, pruned_loss=0.08299, over 1609133.05 frames. ], batch size: 16, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:23:15,338 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4864, 1.5233, 5.5471, 2.1146, 4.9659, 4.6933, 5.1164, 4.9771], + device='cuda:1'), covar=tensor([0.0378, 0.4078, 0.0375, 0.3076, 0.0864, 0.0673, 0.0379, 0.0467], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0564, 0.0570, 0.0521, 0.0596, 0.0505, 0.0496, 0.0571], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:23:47,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:47,686 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8991, 2.0830, 1.7575, 2.6425, 1.1568, 1.4613, 1.8378, 2.0980], + device='cuda:1'), covar=tensor([0.0792, 0.0929, 0.1084, 0.0413, 0.1184, 0.1572, 0.0882, 0.0774], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0212, 0.0254, 0.0216, 0.0218, 0.0252, 0.0258, 0.0223], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:23:50,857 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.666e+02 3.220e+02 4.193e+02 8.839e+02, threshold=6.440e+02, percent-clipped=3.0 +2023-02-06 09:23:51,582 INFO [train.py:901] (1/4) Epoch 10, batch 6700, loss[loss=0.2382, simple_loss=0.322, pruned_loss=0.07722, over 8198.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3148, pruned_loss=0.08357, over 1613485.52 frames. ], batch size: 23, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:11,621 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.5790, 1.8485, 3.7080, 1.9674, 3.3287, 3.1652, 3.4258, 3.3390], + device='cuda:1'), covar=tensor([0.0601, 0.2951, 0.0782, 0.2863, 0.0960, 0.0872, 0.0530, 0.0627], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0565, 0.0572, 0.0523, 0.0595, 0.0507, 0.0497, 0.0569], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:24:24,671 INFO [train.py:901] (1/4) Epoch 10, batch 6750, loss[loss=0.2491, simple_loss=0.3224, pruned_loss=0.08795, over 8342.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3154, pruned_loss=0.08435, over 1611585.88 frames. ], batch size: 26, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:00,368 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.662e+02 3.188e+02 4.113e+02 8.575e+02, threshold=6.376e+02, percent-clipped=4.0 +2023-02-06 09:25:01,059 INFO [train.py:901] (1/4) Epoch 10, batch 6800, loss[loss=0.218, simple_loss=0.3014, pruned_loss=0.06735, over 8362.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.315, pruned_loss=0.08398, over 1614244.25 frames. ], batch size: 24, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:11,666 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 09:25:35,820 INFO [train.py:901] (1/4) Epoch 10, batch 6850, loss[loss=0.2654, simple_loss=0.344, pruned_loss=0.09339, over 8480.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3141, pruned_loss=0.08384, over 1608390.76 frames. ], batch size: 29, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:39,355 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:25:59,652 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 09:26:10,492 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.480e+02 2.958e+02 3.519e+02 6.592e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-06 09:26:10,580 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79647.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:26:11,121 INFO [train.py:901] (1/4) Epoch 10, batch 6900, loss[loss=0.2396, simple_loss=0.3236, pruned_loss=0.07779, over 8026.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3133, pruned_loss=0.08324, over 1607423.82 frames. ], batch size: 22, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:19,350 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 09:26:26,448 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0543, 1.2632, 1.2021, 0.6510, 1.2670, 1.0317, 0.2231, 1.1466], + device='cuda:1'), covar=tensor([0.0228, 0.0211, 0.0197, 0.0314, 0.0250, 0.0570, 0.0479, 0.0186], + device='cuda:1'), in_proj_covar=tensor([0.0375, 0.0305, 0.0261, 0.0370, 0.0296, 0.0456, 0.0349, 0.0333], + device='cuda:1'), out_proj_covar=tensor([1.0844e-04, 8.6142e-05, 7.4195e-05, 1.0539e-04, 8.5244e-05, 1.4116e-04, + 1.0118e-04, 9.5813e-05], device='cuda:1') +2023-02-06 09:26:30,985 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:44,404 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:46,401 INFO [train.py:901] (1/4) Epoch 10, batch 6950, loss[loss=0.2084, simple_loss=0.2996, pruned_loss=0.05863, over 8473.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3124, pruned_loss=0.08195, over 1614465.73 frames. ], batch size: 25, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:46,615 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:01,842 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 09:27:03,563 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:05,484 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:10,661 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 09:27:12,697 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79737.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:27:19,259 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.770e+02 3.379e+02 4.019e+02 1.115e+03, threshold=6.759e+02, percent-clipped=8.0 +2023-02-06 09:27:19,985 INFO [train.py:901] (1/4) Epoch 10, batch 7000, loss[loss=0.1963, simple_loss=0.2676, pruned_loss=0.0625, over 7433.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3122, pruned_loss=0.08198, over 1611912.63 frames. ], batch size: 17, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:27:28,518 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 09:27:30,359 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:43,454 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:55,338 INFO [train.py:901] (1/4) Epoch 10, batch 7050, loss[loss=0.2691, simple_loss=0.3411, pruned_loss=0.09857, over 8288.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3119, pruned_loss=0.08166, over 1611955.07 frames. ], batch size: 23, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:27:58,325 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2525, 1.2920, 2.3242, 1.1858, 2.0829, 2.4711, 2.5497, 2.1388], + device='cuda:1'), covar=tensor([0.1078, 0.1300, 0.0489, 0.2033, 0.0724, 0.0461, 0.0677, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0257, 0.0293, 0.0251, 0.0284, 0.0268, 0.0233, 0.0328, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:28:04,455 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:18,695 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2424, 1.5271, 1.6615, 1.3872, 1.0044, 1.5254, 1.7571, 1.8529], + device='cuda:1'), covar=tensor([0.0507, 0.1274, 0.1708, 0.1414, 0.0651, 0.1556, 0.0735, 0.0537], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0152, 0.0193, 0.0159, 0.0105, 0.0164, 0.0117, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:1') +2023-02-06 09:28:25,535 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:29,363 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.704e+02 3.361e+02 4.306e+02 1.362e+03, threshold=6.722e+02, percent-clipped=5.0 +2023-02-06 09:28:29,796 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 09:28:30,079 INFO [train.py:901] (1/4) Epoch 10, batch 7100, loss[loss=0.2511, simple_loss=0.3307, pruned_loss=0.0857, over 8454.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3123, pruned_loss=0.08194, over 1612627.95 frames. ], batch size: 27, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:32,891 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79852.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:29:06,028 INFO [train.py:901] (1/4) Epoch 10, batch 7150, loss[loss=0.2321, simple_loss=0.3031, pruned_loss=0.08057, over 8182.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3127, pruned_loss=0.08159, over 1616037.01 frames. ], batch size: 23, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:29:33,297 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-06 09:29:39,477 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.702e+02 3.262e+02 4.332e+02 1.613e+03, threshold=6.525e+02, percent-clipped=3.0 +2023-02-06 09:29:39,575 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:29:40,184 INFO [train.py:901] (1/4) Epoch 10, batch 7200, loss[loss=0.2482, simple_loss=0.3311, pruned_loss=0.08268, over 8546.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3119, pruned_loss=0.08108, over 1611119.72 frames. ], batch size: 31, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:13,866 INFO [train.py:901] (1/4) Epoch 10, batch 7250, loss[loss=0.2315, simple_loss=0.3067, pruned_loss=0.07813, over 8315.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3124, pruned_loss=0.08187, over 1611405.41 frames. ], batch size: 25, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:30,522 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80018.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:30:31,018 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80019.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:30:47,885 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80043.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:50,296 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.755e+02 3.243e+02 3.993e+02 1.489e+03, threshold=6.485e+02, percent-clipped=9.0 +2023-02-06 09:30:50,951 INFO [train.py:901] (1/4) Epoch 10, batch 7300, loss[loss=0.2545, simple_loss=0.3392, pruned_loss=0.08491, over 8357.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3135, pruned_loss=0.08301, over 1608164.51 frames. ], batch size: 26, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:31:00,594 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:03,354 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:04,718 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8171, 2.0163, 1.6861, 2.6049, 1.0968, 1.3991, 1.6874, 2.1667], + device='cuda:1'), covar=tensor([0.0871, 0.0946, 0.1111, 0.0455, 0.1285, 0.1554, 0.1109, 0.0711], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0212, 0.0253, 0.0216, 0.0218, 0.0250, 0.0257, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:31:20,006 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80091.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,178 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,600 INFO [train.py:901] (1/4) Epoch 10, batch 7350, loss[loss=0.291, simple_loss=0.3602, pruned_loss=0.1109, over 8331.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3136, pruned_loss=0.08356, over 1609470.84 frames. ], batch size: 25, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:31:31,679 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80108.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:31:42,410 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:44,304 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:50,388 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80133.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:31:50,994 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:56,728 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 09:31:59,434 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.519e+02 3.343e+02 4.224e+02 9.659e+02, threshold=6.686e+02, percent-clipped=6.0 +2023-02-06 09:32:00,144 INFO [train.py:901] (1/4) Epoch 10, batch 7400, loss[loss=0.1911, simple_loss=0.2731, pruned_loss=0.05456, over 7925.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3148, pruned_loss=0.08442, over 1609701.25 frames. ], batch size: 20, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:16,199 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 09:32:19,815 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7267, 2.2963, 4.2788, 1.4425, 2.9739, 2.1371, 1.8331, 2.5435], + device='cuda:1'), covar=tensor([0.1834, 0.2221, 0.0722, 0.4179, 0.1768, 0.3129, 0.1823, 0.2618], + device='cuda:1'), in_proj_covar=tensor([0.0490, 0.0512, 0.0536, 0.0576, 0.0617, 0.0558, 0.0468, 0.0608], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:32:34,304 INFO [train.py:901] (1/4) Epoch 10, batch 7450, loss[loss=0.2311, simple_loss=0.3101, pruned_loss=0.07601, over 8360.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3154, pruned_loss=0.08424, over 1614351.95 frames. ], batch size: 24, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:34,500 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9448, 1.6301, 1.7126, 1.5316, 1.1566, 1.6022, 1.7042, 1.6235], + device='cuda:1'), covar=tensor([0.0544, 0.0995, 0.1375, 0.1109, 0.0601, 0.1211, 0.0685, 0.0474], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0153, 0.0195, 0.0159, 0.0105, 0.0164, 0.0118, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:32:47,421 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 09:32:54,358 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 09:33:02,478 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:07,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9172, 1.0386, 3.0072, 0.9209, 2.6432, 2.5221, 2.7967, 2.6921], + device='cuda:1'), covar=tensor([0.0632, 0.3438, 0.0692, 0.2884, 0.1254, 0.0916, 0.0597, 0.0722], + device='cuda:1'), in_proj_covar=tensor([0.0460, 0.0554, 0.0558, 0.0508, 0.0582, 0.0490, 0.0484, 0.0551], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:33:09,144 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.637e+02 3.217e+02 3.901e+02 6.824e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:33:09,864 INFO [train.py:901] (1/4) Epoch 10, batch 7500, loss[loss=0.3119, simple_loss=0.3697, pruned_loss=0.1271, over 8525.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3148, pruned_loss=0.08396, over 1611351.33 frames. ], batch size: 26, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:33:43,946 INFO [train.py:901] (1/4) Epoch 10, batch 7550, loss[loss=0.273, simple_loss=0.3515, pruned_loss=0.0972, over 8450.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3134, pruned_loss=0.08261, over 1613890.42 frames. ], batch size: 27, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:33:46,245 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:57,962 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:07,069 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-06 09:34:07,862 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 09:34:15,033 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:17,454 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.399e+02 2.908e+02 3.933e+02 1.078e+03, threshold=5.816e+02, percent-clipped=3.0 +2023-02-06 09:34:18,142 INFO [train.py:901] (1/4) Epoch 10, batch 7600, loss[loss=0.2789, simple_loss=0.3624, pruned_loss=0.09773, over 8698.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3149, pruned_loss=0.0832, over 1613610.42 frames. ], batch size: 49, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:34:32,353 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2145, 1.1345, 4.4199, 1.9935, 2.6610, 5.1054, 5.0292, 4.4180], + device='cuda:1'), covar=tensor([0.1072, 0.1999, 0.0298, 0.1845, 0.1003, 0.0182, 0.0409, 0.0580], + device='cuda:1'), in_proj_covar=tensor([0.0258, 0.0294, 0.0253, 0.0286, 0.0268, 0.0231, 0.0329, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:34:49,202 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80390.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:54,346 INFO [train.py:901] (1/4) Epoch 10, batch 7650, loss[loss=0.2542, simple_loss=0.3237, pruned_loss=0.09234, over 8709.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3132, pruned_loss=0.08263, over 1611701.54 frames. ], batch size: 39, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:03,331 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:05,493 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4468, 1.9532, 2.9847, 2.2783, 2.5714, 2.2699, 1.8387, 1.2129], + device='cuda:1'), covar=tensor([0.3533, 0.3820, 0.1117, 0.2684, 0.2025, 0.2083, 0.1662, 0.4089], + device='cuda:1'), in_proj_covar=tensor([0.0882, 0.0856, 0.0723, 0.0830, 0.0930, 0.0790, 0.0700, 0.0761], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:35:06,117 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:10,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4408, 1.8015, 1.8955, 1.0442, 1.9913, 1.4749, 0.3929, 1.5613], + device='cuda:1'), covar=tensor([0.0336, 0.0200, 0.0186, 0.0298, 0.0280, 0.0518, 0.0494, 0.0150], + device='cuda:1'), in_proj_covar=tensor([0.0373, 0.0305, 0.0262, 0.0374, 0.0295, 0.0453, 0.0345, 0.0337], + device='cuda:1'), out_proj_covar=tensor([1.0765e-04, 8.6215e-05, 7.4130e-05, 1.0662e-04, 8.4942e-05, 1.3962e-04, + 9.9863e-05, 9.6723e-05], device='cuda:1') +2023-02-06 09:35:27,278 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.621e+02 3.149e+02 3.913e+02 9.838e+02, threshold=6.298e+02, percent-clipped=6.0 +2023-02-06 09:35:27,989 INFO [train.py:901] (1/4) Epoch 10, batch 7700, loss[loss=0.2322, simple_loss=0.3155, pruned_loss=0.07448, over 8465.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3123, pruned_loss=0.08229, over 1607431.46 frames. ], batch size: 25, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:51,538 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:54,393 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-06 09:36:01,536 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:01,919 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 09:36:03,344 INFO [train.py:901] (1/4) Epoch 10, batch 7750, loss[loss=0.2259, simple_loss=0.292, pruned_loss=0.07992, over 7803.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3118, pruned_loss=0.08217, over 1608168.78 frames. ], batch size: 19, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:06,754 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 09:36:13,282 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:13,358 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7723, 2.0517, 2.3399, 1.4553, 2.4041, 1.7242, 0.8655, 1.9241], + device='cuda:1'), covar=tensor([0.0406, 0.0227, 0.0172, 0.0354, 0.0249, 0.0579, 0.0516, 0.0186], + device='cuda:1'), in_proj_covar=tensor([0.0373, 0.0303, 0.0261, 0.0373, 0.0294, 0.0451, 0.0344, 0.0336], + device='cuda:1'), out_proj_covar=tensor([1.0755e-04, 8.5373e-05, 7.3923e-05, 1.0623e-04, 8.4408e-05, 1.3903e-04, + 9.9296e-05, 9.6469e-05], device='cuda:1') +2023-02-06 09:36:18,623 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:36,346 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.713e+02 3.406e+02 4.090e+02 8.759e+02, threshold=6.812e+02, percent-clipped=3.0 +2023-02-06 09:36:37,056 INFO [train.py:901] (1/4) Epoch 10, batch 7800, loss[loss=0.2211, simple_loss=0.3032, pruned_loss=0.06949, over 8108.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3127, pruned_loss=0.08286, over 1606624.47 frames. ], batch size: 23, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:49,780 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7860, 3.8309, 3.3955, 1.9117, 3.3112, 3.3666, 3.4429, 3.0611], + device='cuda:1'), covar=tensor([0.0888, 0.0600, 0.1074, 0.4431, 0.1028, 0.0988, 0.1333, 0.0947], + device='cuda:1'), in_proj_covar=tensor([0.0460, 0.0357, 0.0379, 0.0477, 0.0377, 0.0363, 0.0367, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:37:09,857 INFO [train.py:901] (1/4) Epoch 10, batch 7850, loss[loss=0.2685, simple_loss=0.3463, pruned_loss=0.09538, over 8456.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3123, pruned_loss=0.08244, over 1604100.03 frames. ], batch size: 27, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:40,874 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:37:42,726 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.556e+02 3.372e+02 4.255e+02 7.191e+02, threshold=6.744e+02, percent-clipped=1.0 +2023-02-06 09:37:43,427 INFO [train.py:901] (1/4) Epoch 10, batch 7900, loss[loss=0.2597, simple_loss=0.3332, pruned_loss=0.09309, over 8438.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3126, pruned_loss=0.08297, over 1605857.82 frames. ], batch size: 27, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:56,467 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 09:37:57,225 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.39 vs. limit=5.0 +2023-02-06 09:38:11,818 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9810, 1.6651, 1.9144, 1.5933, 1.1284, 1.9649, 2.1485, 2.0715], + device='cuda:1'), covar=tensor([0.0457, 0.1203, 0.1628, 0.1337, 0.0597, 0.1368, 0.0670, 0.0551], + device='cuda:1'), in_proj_covar=tensor([0.0103, 0.0155, 0.0195, 0.0159, 0.0106, 0.0165, 0.0119, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:38:16,929 INFO [train.py:901] (1/4) Epoch 10, batch 7950, loss[loss=0.2277, simple_loss=0.2906, pruned_loss=0.08247, over 7657.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3121, pruned_loss=0.08245, over 1610742.29 frames. ], batch size: 19, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:50,732 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 2.660e+02 3.023e+02 3.700e+02 9.606e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-06 09:38:51,446 INFO [train.py:901] (1/4) Epoch 10, batch 8000, loss[loss=0.2407, simple_loss=0.314, pruned_loss=0.08369, over 8476.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3129, pruned_loss=0.08301, over 1607401.70 frames. ], batch size: 49, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:38:55,690 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:56,306 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:59,808 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:14,662 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 09:39:20,370 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:24,988 INFO [train.py:901] (1/4) Epoch 10, batch 8050, loss[loss=0.2204, simple_loss=0.2921, pruned_loss=0.07439, over 7285.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3133, pruned_loss=0.0837, over 1602007.09 frames. ], batch size: 16, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:39:43,567 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:58,221 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 09:40:01,794 INFO [train.py:901] (1/4) Epoch 11, batch 0, loss[loss=0.2893, simple_loss=0.3533, pruned_loss=0.1127, over 8524.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3533, pruned_loss=0.1127, over 8524.00 frames. ], batch size: 49, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:01,795 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 09:40:13,090 INFO [train.py:935] (1/4) Epoch 11, validation: loss=0.1907, simple_loss=0.2907, pruned_loss=0.04534, over 944034.00 frames. +2023-02-06 09:40:13,091 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 09:40:23,915 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.899e+02 3.439e+02 4.416e+02 1.589e+03, threshold=6.879e+02, percent-clipped=9.0 +2023-02-06 09:40:27,457 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 09:40:30,151 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:39,870 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80870.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:47,942 INFO [train.py:901] (1/4) Epoch 11, batch 50, loss[loss=0.2359, simple_loss=0.3145, pruned_loss=0.0787, over 8194.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3181, pruned_loss=0.08411, over 367693.05 frames. ], batch size: 23, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:54,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5229, 1.3901, 2.7726, 1.2380, 1.9830, 3.0658, 3.1065, 2.5847], + device='cuda:1'), covar=tensor([0.1182, 0.1552, 0.0449, 0.2082, 0.0933, 0.0296, 0.0546, 0.0678], + device='cuda:1'), in_proj_covar=tensor([0.0257, 0.0291, 0.0252, 0.0283, 0.0268, 0.0231, 0.0326, 0.0285], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 09:40:57,788 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3251, 1.3276, 2.3172, 1.2070, 2.1140, 2.4940, 2.5864, 2.1336], + device='cuda:1'), covar=tensor([0.0995, 0.1231, 0.0460, 0.1976, 0.0685, 0.0387, 0.0600, 0.0706], + device='cuda:1'), in_proj_covar=tensor([0.0256, 0.0291, 0.0251, 0.0282, 0.0268, 0.0230, 0.0325, 0.0284], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 09:41:03,890 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 09:41:15,730 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7622, 2.6398, 1.9979, 4.3515, 1.6179, 1.6683, 2.7622, 2.8882], + device='cuda:1'), covar=tensor([0.1786, 0.1447, 0.2028, 0.0201, 0.1738, 0.2410, 0.1154, 0.1181], + device='cuda:1'), in_proj_covar=tensor([0.0241, 0.0217, 0.0259, 0.0220, 0.0222, 0.0259, 0.0259, 0.0229], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:41:24,350 INFO [train.py:901] (1/4) Epoch 11, batch 100, loss[loss=0.2287, simple_loss=0.2816, pruned_loss=0.08792, over 7447.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3137, pruned_loss=0.08177, over 640371.14 frames. ], batch size: 17, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:41:29,239 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 09:41:30,713 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:35,314 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.679e+02 3.187e+02 3.933e+02 1.063e+03, threshold=6.374e+02, percent-clipped=2.0 +2023-02-06 09:41:50,783 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 09:41:51,851 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:58,397 INFO [train.py:901] (1/4) Epoch 11, batch 150, loss[loss=0.2875, simple_loss=0.362, pruned_loss=0.1065, over 8569.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3142, pruned_loss=0.0823, over 858359.17 frames. ], batch size: 39, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:23,729 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:34,633 INFO [train.py:901] (1/4) Epoch 11, batch 200, loss[loss=0.219, simple_loss=0.3007, pruned_loss=0.06868, over 8230.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3156, pruned_loss=0.08408, over 1025395.43 frames. ], batch size: 22, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:42,993 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:47,013 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.935e+02 2.662e+02 3.186e+02 4.005e+02 8.686e+02, threshold=6.371e+02, percent-clipped=5.0 +2023-02-06 09:43:10,553 INFO [train.py:901] (1/4) Epoch 11, batch 250, loss[loss=0.2153, simple_loss=0.3008, pruned_loss=0.06491, over 8608.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3136, pruned_loss=0.08297, over 1156483.34 frames. ], batch size: 49, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:43:21,546 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 09:43:22,296 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:31,348 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 09:43:36,941 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2307, 1.5506, 1.6582, 1.4329, 1.0623, 1.5418, 1.9468, 1.6614], + device='cuda:1'), covar=tensor([0.0470, 0.1255, 0.1691, 0.1404, 0.0616, 0.1539, 0.0658, 0.0603], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0159, 0.0107, 0.0164, 0.0118, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:43:42,713 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:46,009 INFO [train.py:901] (1/4) Epoch 11, batch 300, loss[loss=0.2432, simple_loss=0.3004, pruned_loss=0.093, over 7815.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3116, pruned_loss=0.08174, over 1258573.19 frames. ], batch size: 19, lr: 7.13e-03, grad_scale: 16.0 +2023-02-06 09:43:48,260 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:48,875 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:57,127 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.697e+02 3.136e+02 4.054e+02 9.565e+02, threshold=6.271e+02, percent-clipped=1.0 +2023-02-06 09:44:00,775 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:22,514 INFO [train.py:901] (1/4) Epoch 11, batch 350, loss[loss=0.2394, simple_loss=0.3091, pruned_loss=0.08489, over 7990.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.312, pruned_loss=0.08158, over 1340853.97 frames. ], batch size: 21, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:44:32,494 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 09:44:33,016 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:39,210 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0227, 1.6506, 2.2307, 1.8805, 2.0472, 1.9721, 1.6957, 0.6676], + device='cuda:1'), covar=tensor([0.4000, 0.3603, 0.1187, 0.2110, 0.1603, 0.1966, 0.1597, 0.3637], + device='cuda:1'), in_proj_covar=tensor([0.0892, 0.0863, 0.0720, 0.0833, 0.0935, 0.0794, 0.0704, 0.0762], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:44:44,357 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:46,355 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9233, 1.7479, 1.8620, 1.6974, 1.2489, 1.7366, 2.2949, 1.7517], + device='cuda:1'), covar=tensor([0.0452, 0.1172, 0.1599, 0.1290, 0.0564, 0.1390, 0.0630, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0155, 0.0195, 0.0159, 0.0106, 0.0164, 0.0118, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:44:49,728 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:53,755 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:56,281 INFO [train.py:901] (1/4) Epoch 11, batch 400, loss[loss=0.2629, simple_loss=0.3363, pruned_loss=0.09475, over 8702.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3128, pruned_loss=0.08223, over 1401447.65 frames. ], batch size: 34, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:00,801 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 09:45:08,651 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.601e+02 3.216e+02 4.274e+02 6.931e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:45:10,287 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:11,747 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:23,643 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4845, 1.7567, 1.8952, 1.0800, 1.9494, 1.2748, 0.4021, 1.6752], + device='cuda:1'), covar=tensor([0.0375, 0.0220, 0.0165, 0.0365, 0.0228, 0.0689, 0.0565, 0.0167], + device='cuda:1'), in_proj_covar=tensor([0.0373, 0.0304, 0.0259, 0.0372, 0.0293, 0.0457, 0.0345, 0.0339], + device='cuda:1'), out_proj_covar=tensor([1.0753e-04, 8.5703e-05, 7.3022e-05, 1.0561e-04, 8.3944e-05, 1.4109e-04, + 9.9503e-05, 9.6983e-05], device='cuda:1') +2023-02-06 09:45:29,085 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9147, 3.8123, 2.6992, 2.5997, 2.9691, 2.1601, 3.0239, 2.9284], + device='cuda:1'), covar=tensor([0.1394, 0.0286, 0.0767, 0.0631, 0.0511, 0.1102, 0.0774, 0.0887], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0233, 0.0308, 0.0296, 0.0303, 0.0325, 0.0338, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:45:32,911 INFO [train.py:901] (1/4) Epoch 11, batch 450, loss[loss=0.218, simple_loss=0.3014, pruned_loss=0.06733, over 8361.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3134, pruned_loss=0.08195, over 1453868.09 frames. ], batch size: 24, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:57,089 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:58,459 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:46:06,253 INFO [train.py:901] (1/4) Epoch 11, batch 500, loss[loss=0.2103, simple_loss=0.2917, pruned_loss=0.06446, over 8028.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3126, pruned_loss=0.08117, over 1491791.37 frames. ], batch size: 22, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:46:07,904 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-06 09:46:17,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.501e+02 3.364e+02 4.069e+02 6.845e+02, threshold=6.728e+02, percent-clipped=2.0 +2023-02-06 09:46:40,091 INFO [train.py:901] (1/4) Epoch 11, batch 550, loss[loss=0.1891, simple_loss=0.2736, pruned_loss=0.05234, over 7807.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3119, pruned_loss=0.08091, over 1519246.41 frames. ], batch size: 20, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:47:04,541 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2552, 1.9004, 2.7393, 2.1705, 2.5520, 2.1107, 1.7646, 1.1479], + device='cuda:1'), covar=tensor([0.3963, 0.3748, 0.1134, 0.2384, 0.1694, 0.2228, 0.1772, 0.3862], + device='cuda:1'), in_proj_covar=tensor([0.0886, 0.0853, 0.0717, 0.0828, 0.0927, 0.0789, 0.0701, 0.0759], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:47:15,699 INFO [train.py:901] (1/4) Epoch 11, batch 600, loss[loss=0.273, simple_loss=0.3448, pruned_loss=0.1006, over 7810.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3119, pruned_loss=0.08121, over 1539222.46 frames. ], batch size: 20, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:27,353 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.633e+02 3.080e+02 3.885e+02 6.931e+02, threshold=6.160e+02, percent-clipped=1.0 +2023-02-06 09:47:27,526 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8182, 1.5674, 3.1882, 1.2624, 2.1869, 3.4426, 3.4650, 2.9070], + device='cuda:1'), covar=tensor([0.1089, 0.1409, 0.0343, 0.1965, 0.0795, 0.0255, 0.0519, 0.0651], + device='cuda:1'), in_proj_covar=tensor([0.0257, 0.0286, 0.0247, 0.0278, 0.0264, 0.0226, 0.0322, 0.0280], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:1') +2023-02-06 09:47:27,548 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8616, 1.3725, 1.6694, 1.2448, 1.0814, 1.4952, 1.7583, 1.6098], + device='cuda:1'), covar=tensor([0.0523, 0.1326, 0.1747, 0.1460, 0.0612, 0.1515, 0.0703, 0.0600], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0155, 0.0196, 0.0159, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:47:27,967 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 09:47:35,471 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 09:47:42,475 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:48,472 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:50,444 INFO [train.py:901] (1/4) Epoch 11, batch 650, loss[loss=0.2225, simple_loss=0.306, pruned_loss=0.06945, over 8034.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3124, pruned_loss=0.08151, over 1559219.54 frames. ], batch size: 22, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:59,446 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:04,899 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1510, 2.2276, 1.6970, 2.0414, 1.7511, 1.4367, 1.7466, 1.7138], + device='cuda:1'), covar=tensor([0.1119, 0.0330, 0.0944, 0.0410, 0.0585, 0.1292, 0.0788, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0228, 0.0304, 0.0295, 0.0297, 0.0318, 0.0332, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:48:08,337 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:26,933 INFO [train.py:901] (1/4) Epoch 11, batch 700, loss[loss=0.2602, simple_loss=0.3353, pruned_loss=0.09261, over 8700.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3109, pruned_loss=0.08078, over 1569934.06 frames. ], batch size: 40, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:48:27,091 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:28,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0167, 2.2168, 3.5948, 1.5530, 2.9339, 2.3271, 2.0955, 2.6112], + device='cuda:1'), covar=tensor([0.1455, 0.2292, 0.0630, 0.3844, 0.1414, 0.2472, 0.1586, 0.2290], + device='cuda:1'), in_proj_covar=tensor([0.0480, 0.0504, 0.0526, 0.0567, 0.0609, 0.0541, 0.0466, 0.0608], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:48:38,762 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.541e+02 3.049e+02 3.626e+02 6.264e+02, threshold=6.097e+02, percent-clipped=1.0 +2023-02-06 09:49:01,395 INFO [train.py:901] (1/4) Epoch 11, batch 750, loss[loss=0.2535, simple_loss=0.3196, pruned_loss=0.09364, over 7696.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3108, pruned_loss=0.08033, over 1579982.79 frames. ], batch size: 18, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:07,835 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 09:49:10,212 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:10,586 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 09:49:21,636 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3230, 1.8914, 3.0207, 2.2879, 2.6518, 2.1567, 1.6580, 1.3022], + device='cuda:1'), covar=tensor([0.3754, 0.4105, 0.1001, 0.2446, 0.1868, 0.2120, 0.1822, 0.3880], + device='cuda:1'), in_proj_covar=tensor([0.0885, 0.0855, 0.0718, 0.0833, 0.0931, 0.0790, 0.0701, 0.0761], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:49:24,850 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 09:49:25,673 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3427, 1.5293, 1.3708, 1.8170, 0.8024, 1.2413, 1.2887, 1.5210], + device='cuda:1'), covar=tensor([0.0914, 0.0768, 0.1122, 0.0524, 0.1142, 0.1421, 0.0856, 0.0730], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0212, 0.0254, 0.0216, 0.0216, 0.0254, 0.0257, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:49:29,019 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7743, 5.9901, 5.0311, 2.2676, 5.2147, 5.5389, 5.6156, 5.3263], + device='cuda:1'), covar=tensor([0.0621, 0.0436, 0.1039, 0.5184, 0.0736, 0.0638, 0.1050, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0368, 0.0379, 0.0479, 0.0373, 0.0365, 0.0367, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:49:33,722 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 09:49:36,455 INFO [train.py:901] (1/4) Epoch 11, batch 800, loss[loss=0.2375, simple_loss=0.3008, pruned_loss=0.08707, over 7811.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3098, pruned_loss=0.08019, over 1585505.67 frames. ], batch size: 20, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:49,270 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.494e+02 2.971e+02 3.970e+02 9.403e+02, threshold=5.941e+02, percent-clipped=2.0 +2023-02-06 09:49:58,265 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:59,646 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:11,555 INFO [train.py:901] (1/4) Epoch 11, batch 850, loss[loss=0.2205, simple_loss=0.2917, pruned_loss=0.07466, over 7228.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3114, pruned_loss=0.08056, over 1594570.83 frames. ], batch size: 16, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:16,503 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:46,088 INFO [train.py:901] (1/4) Epoch 11, batch 900, loss[loss=0.2466, simple_loss=0.3352, pruned_loss=0.07898, over 8525.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3115, pruned_loss=0.08043, over 1603397.27 frames. ], batch size: 29, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:58,849 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.543e+02 3.289e+02 4.286e+02 9.063e+02, threshold=6.577e+02, percent-clipped=7.0 +2023-02-06 09:51:08,752 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0399, 1.4591, 1.4615, 1.2408, 1.1149, 1.2979, 1.7382, 1.3554], + device='cuda:1'), covar=tensor([0.0541, 0.1280, 0.1791, 0.1492, 0.0593, 0.1595, 0.0734, 0.0655], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0159, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:51:18,680 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:20,092 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:21,851 INFO [train.py:901] (1/4) Epoch 11, batch 950, loss[loss=0.2174, simple_loss=0.2832, pruned_loss=0.07582, over 7807.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3125, pruned_loss=0.08159, over 1609703.83 frames. ], batch size: 20, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:51:37,583 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1026, 0.9926, 1.0983, 1.0944, 0.7641, 1.2036, 0.0395, 0.8939], + device='cuda:1'), covar=tensor([0.2452, 0.1638, 0.0610, 0.1105, 0.3663, 0.0633, 0.3311, 0.1653], + device='cuda:1'), in_proj_covar=tensor([0.0163, 0.0164, 0.0094, 0.0210, 0.0253, 0.0102, 0.0162, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:51:51,854 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 09:51:56,016 INFO [train.py:901] (1/4) Epoch 11, batch 1000, loss[loss=0.2415, simple_loss=0.3151, pruned_loss=0.0839, over 7914.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3124, pruned_loss=0.08126, over 1609464.27 frames. ], batch size: 20, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:52:06,321 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4982, 1.8680, 3.1050, 1.2928, 2.2701, 1.8697, 1.5412, 2.1231], + device='cuda:1'), covar=tensor([0.1686, 0.2073, 0.0716, 0.3904, 0.1464, 0.2774, 0.1816, 0.2027], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0515, 0.0537, 0.0582, 0.0618, 0.0552, 0.0474, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 09:52:07,440 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.713e+02 3.211e+02 4.023e+02 7.481e+02, threshold=6.422e+02, percent-clipped=3.0 +2023-02-06 09:52:08,391 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:13,880 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 09:52:22,680 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 09:52:27,198 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 09:52:27,395 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:31,957 INFO [train.py:901] (1/4) Epoch 11, batch 1050, loss[loss=0.1963, simple_loss=0.2783, pruned_loss=0.0571, over 7787.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3124, pruned_loss=0.08116, over 1610091.47 frames. ], batch size: 19, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:52:39,044 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 09:53:06,144 INFO [train.py:901] (1/4) Epoch 11, batch 1100, loss[loss=0.2332, simple_loss=0.3039, pruned_loss=0.08128, over 7984.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.312, pruned_loss=0.08112, over 1609724.27 frames. ], batch size: 21, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:18,507 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.436e+02 2.887e+02 3.709e+02 9.106e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 09:53:36,920 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4062, 1.4508, 2.2847, 1.1372, 2.0264, 2.4994, 2.5474, 2.0925], + device='cuda:1'), covar=tensor([0.0936, 0.1098, 0.0477, 0.1955, 0.0666, 0.0367, 0.0582, 0.0759], + device='cuda:1'), in_proj_covar=tensor([0.0262, 0.0297, 0.0255, 0.0287, 0.0271, 0.0232, 0.0332, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 09:53:36,945 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2231, 1.2579, 1.4408, 1.1859, 0.8587, 1.2055, 1.2918, 1.0817], + device='cuda:1'), covar=tensor([0.0542, 0.1220, 0.1667, 0.1320, 0.0544, 0.1525, 0.0660, 0.0611], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0153, 0.0193, 0.0157, 0.0104, 0.0164, 0.0117, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 09:53:41,609 INFO [train.py:901] (1/4) Epoch 11, batch 1150, loss[loss=0.2438, simple_loss=0.3136, pruned_loss=0.08702, over 8611.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3119, pruned_loss=0.08111, over 1612728.57 frames. ], batch size: 49, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:51,761 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 09:54:00,452 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:12,086 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2130, 1.8885, 2.7700, 2.2194, 2.5442, 2.1065, 1.6900, 1.2972], + device='cuda:1'), covar=tensor([0.3701, 0.3821, 0.1058, 0.2432, 0.1711, 0.2132, 0.1644, 0.3984], + device='cuda:1'), in_proj_covar=tensor([0.0869, 0.0840, 0.0710, 0.0819, 0.0907, 0.0778, 0.0690, 0.0749], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 09:54:17,961 INFO [train.py:901] (1/4) Epoch 11, batch 1200, loss[loss=0.1882, simple_loss=0.2575, pruned_loss=0.05952, over 7642.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3118, pruned_loss=0.08092, over 1615345.34 frames. ], batch size: 19, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:54:18,728 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:18,847 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:20,244 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:26,899 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3251, 1.8005, 3.0540, 1.3400, 2.1360, 3.3372, 3.3437, 2.8055], + device='cuda:1'), covar=tensor([0.0762, 0.1293, 0.0386, 0.2004, 0.0975, 0.0262, 0.0509, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0262, 0.0298, 0.0255, 0.0287, 0.0273, 0.0232, 0.0332, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:54:29,502 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.664e+02 3.172e+02 3.772e+02 1.117e+03, threshold=6.345e+02, percent-clipped=5.0 +2023-02-06 09:54:36,794 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:38,233 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:45,481 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 09:54:47,999 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:53,463 INFO [train.py:901] (1/4) Epoch 11, batch 1250, loss[loss=0.3083, simple_loss=0.3753, pruned_loss=0.1206, over 8097.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3117, pruned_loss=0.08073, over 1617425.33 frames. ], batch size: 23, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:55:29,355 INFO [train.py:901] (1/4) Epoch 11, batch 1300, loss[loss=0.2403, simple_loss=0.3181, pruned_loss=0.08129, over 8496.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3121, pruned_loss=0.08097, over 1617317.93 frames. ], batch size: 26, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:55:40,416 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:55:40,861 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.583e+02 3.223e+02 4.179e+02 7.623e+02, threshold=6.447e+02, percent-clipped=2.0 +2023-02-06 09:55:57,032 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1524, 1.0916, 1.2824, 1.0931, 0.8863, 1.3122, 0.2048, 0.8997], + device='cuda:1'), covar=tensor([0.2283, 0.1677, 0.0517, 0.1556, 0.3125, 0.0576, 0.3102, 0.1791], + device='cuda:1'), in_proj_covar=tensor([0.0162, 0.0163, 0.0095, 0.0211, 0.0253, 0.0102, 0.0161, 0.0159], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 09:56:03,693 INFO [train.py:901] (1/4) Epoch 11, batch 1350, loss[loss=0.2432, simple_loss=0.3123, pruned_loss=0.08705, over 7648.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3141, pruned_loss=0.08256, over 1618825.57 frames. ], batch size: 19, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:38,846 INFO [train.py:901] (1/4) Epoch 11, batch 1400, loss[loss=0.276, simple_loss=0.3493, pruned_loss=0.1014, over 8249.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3136, pruned_loss=0.08224, over 1618592.80 frames. ], batch size: 24, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:51,063 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.627e+02 3.119e+02 3.954e+02 1.224e+03, threshold=6.238e+02, percent-clipped=1.0 +2023-02-06 09:56:55,576 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-06 09:57:13,605 INFO [train.py:901] (1/4) Epoch 11, batch 1450, loss[loss=0.2215, simple_loss=0.3019, pruned_loss=0.07059, over 7925.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3123, pruned_loss=0.08157, over 1615724.80 frames. ], batch size: 20, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:57:27,730 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 09:57:28,847 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.78 vs. limit=5.0 +2023-02-06 09:57:42,966 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 09:57:48,805 INFO [train.py:901] (1/4) Epoch 11, batch 1500, loss[loss=0.2821, simple_loss=0.3498, pruned_loss=0.1072, over 8696.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3137, pruned_loss=0.08297, over 1608928.83 frames. ], batch size: 34, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:58:01,391 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.943e+02 2.743e+02 3.193e+02 4.270e+02 9.879e+02, threshold=6.387e+02, percent-clipped=7.0 +2023-02-06 09:58:02,230 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:13,191 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:16,175 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 09:58:24,635 INFO [train.py:901] (1/4) Epoch 11, batch 1550, loss[loss=0.2523, simple_loss=0.3258, pruned_loss=0.08943, over 8473.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3136, pruned_loss=0.08283, over 1612869.51 frames. ], batch size: 27, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:58:39,990 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82403.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:50,113 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:50,233 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3125, 2.9046, 2.3574, 4.0212, 1.6480, 2.1040, 2.2359, 3.0441], + device='cuda:1'), covar=tensor([0.0837, 0.0884, 0.0946, 0.0241, 0.1232, 0.1323, 0.1168, 0.0765], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0212, 0.0251, 0.0216, 0.0216, 0.0252, 0.0253, 0.0223], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 09:58:57,860 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:59,750 INFO [train.py:901] (1/4) Epoch 11, batch 1600, loss[loss=0.2306, simple_loss=0.3185, pruned_loss=0.07133, over 8100.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.3137, pruned_loss=0.08199, over 1617236.35 frames. ], batch size: 23, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:12,998 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.328e+02 2.878e+02 3.468e+02 7.869e+02, threshold=5.757e+02, percent-clipped=2.0 +2023-02-06 09:59:24,134 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:59:36,407 INFO [train.py:901] (1/4) Epoch 11, batch 1650, loss[loss=0.2406, simple_loss=0.3216, pruned_loss=0.07983, over 8194.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3134, pruned_loss=0.0818, over 1618066.97 frames. ], batch size: 23, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:57,313 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82511.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:00:11,613 INFO [train.py:901] (1/4) Epoch 11, batch 1700, loss[loss=0.2552, simple_loss=0.3326, pruned_loss=0.08892, over 8529.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3138, pruned_loss=0.08227, over 1619571.48 frames. ], batch size: 49, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 10:00:12,454 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:00:17,650 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 10:00:23,206 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.517e+02 3.185e+02 4.066e+02 8.085e+02, threshold=6.370e+02, percent-clipped=5.0 +2023-02-06 10:00:47,528 INFO [train.py:901] (1/4) Epoch 11, batch 1750, loss[loss=0.238, simple_loss=0.3116, pruned_loss=0.08214, over 8499.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3128, pruned_loss=0.08198, over 1613068.97 frames. ], batch size: 26, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:00:52,548 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:01:23,306 INFO [train.py:901] (1/4) Epoch 11, batch 1800, loss[loss=0.2375, simple_loss=0.315, pruned_loss=0.07998, over 8469.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3117, pruned_loss=0.08141, over 1609251.41 frames. ], batch size: 25, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:01:35,823 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.598e+02 3.107e+02 4.193e+02 1.199e+03, threshold=6.213e+02, percent-clipped=8.0 +2023-02-06 10:01:58,592 INFO [train.py:901] (1/4) Epoch 11, batch 1850, loss[loss=0.2326, simple_loss=0.3041, pruned_loss=0.08053, over 7279.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3114, pruned_loss=0.08147, over 1607750.86 frames. ], batch size: 16, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:18,371 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:26,596 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:34,013 INFO [train.py:901] (1/4) Epoch 11, batch 1900, loss[loss=0.2494, simple_loss=0.3301, pruned_loss=0.08431, over 8339.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3107, pruned_loss=0.08082, over 1608626.97 frames. ], batch size: 26, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:43,694 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:45,528 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.442e+02 3.142e+02 3.936e+02 6.780e+02, threshold=6.284e+02, percent-clipped=1.0 +2023-02-06 10:03:08,879 INFO [train.py:901] (1/4) Epoch 11, batch 1950, loss[loss=0.2322, simple_loss=0.3131, pruned_loss=0.07569, over 8479.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3107, pruned_loss=0.0809, over 1613752.00 frames. ], batch size: 25, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:03:12,343 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 10:03:13,853 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:26,471 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 10:03:32,231 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:39,051 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:44,652 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 10:03:45,035 INFO [train.py:901] (1/4) Epoch 11, batch 2000, loss[loss=0.173, simple_loss=0.2562, pruned_loss=0.04491, over 7536.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3108, pruned_loss=0.08061, over 1615134.37 frames. ], batch size: 18, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:03:47,024 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 10:03:56,672 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.675e+02 3.279e+02 3.987e+02 1.082e+03, threshold=6.559e+02, percent-clipped=7.0 +2023-02-06 10:04:01,553 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82855.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:04:10,616 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0023, 1.2517, 1.1947, 0.5133, 1.2109, 1.0223, 0.0764, 1.1291], + device='cuda:1'), covar=tensor([0.0263, 0.0225, 0.0191, 0.0347, 0.0257, 0.0625, 0.0506, 0.0194], + device='cuda:1'), in_proj_covar=tensor([0.0383, 0.0317, 0.0266, 0.0377, 0.0303, 0.0468, 0.0351, 0.0347], + device='cuda:1'), out_proj_covar=tensor([1.1015e-04, 8.9331e-05, 7.4800e-05, 1.0639e-04, 8.6493e-05, 1.4422e-04, + 1.0102e-04, 9.9094e-05], device='cuda:1') +2023-02-06 10:04:19,314 INFO [train.py:901] (1/4) Epoch 11, batch 2050, loss[loss=0.1898, simple_loss=0.2642, pruned_loss=0.05772, over 7237.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.31, pruned_loss=0.08022, over 1612537.85 frames. ], batch size: 16, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,307 INFO [train.py:901] (1/4) Epoch 11, batch 2100, loss[loss=0.1932, simple_loss=0.2788, pruned_loss=0.05381, over 8145.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3113, pruned_loss=0.08043, over 1618488.13 frames. ], batch size: 22, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,377 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:07,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.489e+02 3.174e+02 3.706e+02 9.083e+02, threshold=6.348e+02, percent-clipped=2.0 +2023-02-06 10:05:13,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8258, 1.6751, 1.8314, 1.6197, 0.9941, 1.7988, 2.3065, 1.9656], + device='cuda:1'), covar=tensor([0.0445, 0.1182, 0.1614, 0.1318, 0.0622, 0.1382, 0.0630, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0160, 0.0105, 0.0166, 0.0118, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 10:05:22,088 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82970.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:05:29,198 INFO [train.py:901] (1/4) Epoch 11, batch 2150, loss[loss=0.2346, simple_loss=0.2966, pruned_loss=0.08632, over 7790.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3121, pruned_loss=0.08078, over 1621549.72 frames. ], batch size: 19, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:05:29,371 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82981.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:49,414 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 10:06:04,068 INFO [train.py:901] (1/4) Epoch 11, batch 2200, loss[loss=0.2666, simple_loss=0.34, pruned_loss=0.09664, over 8109.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3116, pruned_loss=0.08069, over 1620549.32 frames. ], batch size: 23, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:06:15,769 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:16,953 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.510e+02 3.092e+02 4.104e+02 1.639e+03, threshold=6.185e+02, percent-clipped=4.0 +2023-02-06 10:06:38,935 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:40,017 INFO [train.py:901] (1/4) Epoch 11, batch 2250, loss[loss=0.1936, simple_loss=0.2721, pruned_loss=0.05755, over 7911.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3102, pruned_loss=0.08003, over 1616684.80 frames. ], batch size: 20, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:06:46,977 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0001, 1.5362, 1.3823, 1.6499, 1.3283, 1.2352, 1.3327, 1.3914], + device='cuda:1'), covar=tensor([0.1006, 0.0428, 0.1088, 0.0455, 0.0611, 0.1273, 0.0723, 0.0619], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0236, 0.0315, 0.0297, 0.0304, 0.0326, 0.0341, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 10:06:49,943 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 10:06:55,778 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:07:14,108 INFO [train.py:901] (1/4) Epoch 11, batch 2300, loss[loss=0.223, simple_loss=0.2959, pruned_loss=0.07504, over 8149.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3092, pruned_loss=0.08003, over 1611880.08 frames. ], batch size: 22, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:07:25,651 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.480e+02 3.199e+02 4.275e+02 9.806e+02, threshold=6.398e+02, percent-clipped=6.0 +2023-02-06 10:07:48,928 INFO [train.py:901] (1/4) Epoch 11, batch 2350, loss[loss=0.2179, simple_loss=0.308, pruned_loss=0.06391, over 8453.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3095, pruned_loss=0.0796, over 1616325.23 frames. ], batch size: 27, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:12,069 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:08:20,036 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83226.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:08:23,031 INFO [train.py:901] (1/4) Epoch 11, batch 2400, loss[loss=0.1942, simple_loss=0.2653, pruned_loss=0.06155, over 7267.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3096, pruned_loss=0.07993, over 1610735.43 frames. ], batch size: 16, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:35,100 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.359e+02 2.853e+02 3.666e+02 7.740e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-06 10:08:37,261 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83251.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:08:58,651 INFO [train.py:901] (1/4) Epoch 11, batch 2450, loss[loss=0.2393, simple_loss=0.3123, pruned_loss=0.08313, over 8287.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.308, pruned_loss=0.07945, over 1611906.76 frames. ], batch size: 23, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:09:13,729 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:13,799 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:29,004 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:30,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:32,767 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 10:09:33,056 INFO [train.py:901] (1/4) Epoch 11, batch 2500, loss[loss=0.2909, simple_loss=0.3441, pruned_loss=0.1189, over 6897.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3093, pruned_loss=0.08042, over 1613897.79 frames. ], batch size: 72, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:09:44,609 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.634e+02 3.143e+02 3.904e+02 7.323e+02, threshold=6.285e+02, percent-clipped=4.0 +2023-02-06 10:10:00,399 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1854, 1.8015, 2.5449, 2.0934, 2.3049, 2.0659, 1.7048, 1.0643], + device='cuda:1'), covar=tensor([0.3852, 0.3691, 0.1072, 0.2222, 0.1668, 0.1892, 0.1568, 0.3798], + device='cuda:1'), in_proj_covar=tensor([0.0878, 0.0859, 0.0727, 0.0832, 0.0932, 0.0785, 0.0700, 0.0758], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:10:07,376 INFO [train.py:901] (1/4) Epoch 11, batch 2550, loss[loss=0.2248, simple_loss=0.2998, pruned_loss=0.0749, over 7804.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3102, pruned_loss=0.08078, over 1612639.10 frames. ], batch size: 19, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:43,105 INFO [train.py:901] (1/4) Epoch 11, batch 2600, loss[loss=0.2126, simple_loss=0.2989, pruned_loss=0.06315, over 8289.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3101, pruned_loss=0.08016, over 1617423.57 frames. ], batch size: 23, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:49,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:10:54,713 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.607e+02 3.192e+02 4.372e+02 8.439e+02, threshold=6.384e+02, percent-clipped=10.0 +2023-02-06 10:11:17,508 INFO [train.py:901] (1/4) Epoch 11, batch 2650, loss[loss=0.2358, simple_loss=0.311, pruned_loss=0.0803, over 8291.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3105, pruned_loss=0.08084, over 1617321.17 frames. ], batch size: 23, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:11:52,414 INFO [train.py:901] (1/4) Epoch 11, batch 2700, loss[loss=0.2216, simple_loss=0.3043, pruned_loss=0.06949, over 8651.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3103, pruned_loss=0.08093, over 1616503.99 frames. ], batch size: 27, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:12:04,667 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.578e+02 3.131e+02 4.095e+02 6.916e+02, threshold=6.263e+02, percent-clipped=2.0 +2023-02-06 10:12:06,306 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7281, 1.9177, 1.6423, 2.2955, 1.0914, 1.3893, 1.6033, 2.0033], + device='cuda:1'), covar=tensor([0.0833, 0.0869, 0.1116, 0.0496, 0.1181, 0.1608, 0.0961, 0.0779], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0215, 0.0252, 0.0217, 0.0217, 0.0254, 0.0257, 0.0224], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:12:11,630 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:12:17,332 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.11 vs. limit=5.0 +2023-02-06 10:12:27,325 INFO [train.py:901] (1/4) Epoch 11, batch 2750, loss[loss=0.215, simple_loss=0.3003, pruned_loss=0.0648, over 8282.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3104, pruned_loss=0.08075, over 1609795.65 frames. ], batch size: 23, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:12:49,818 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.1570, 1.7241, 6.2463, 2.0881, 5.7335, 5.3236, 5.8973, 5.7548], + device='cuda:1'), covar=tensor([0.0328, 0.3693, 0.0227, 0.2923, 0.0744, 0.0604, 0.0273, 0.0337], + device='cuda:1'), in_proj_covar=tensor([0.0478, 0.0567, 0.0577, 0.0524, 0.0596, 0.0498, 0.0501, 0.0571], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:12:51,398 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 10:13:03,333 INFO [train.py:901] (1/4) Epoch 11, batch 2800, loss[loss=0.1959, simple_loss=0.2807, pruned_loss=0.05553, over 8034.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3104, pruned_loss=0.08045, over 1611138.47 frames. ], batch size: 22, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:03,709 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 10:13:13,810 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:15,055 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.535e+02 3.136e+02 3.769e+02 1.201e+03, threshold=6.273e+02, percent-clipped=3.0 +2023-02-06 10:13:32,686 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:37,613 INFO [train.py:901] (1/4) Epoch 11, batch 2850, loss[loss=0.3307, simple_loss=0.3817, pruned_loss=0.1399, over 8505.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.309, pruned_loss=0.08022, over 1606325.80 frames. ], batch size: 28, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:47,918 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83696.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:49,189 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0474, 1.3974, 1.6166, 1.2829, 1.1070, 1.3904, 1.6864, 1.8337], + device='cuda:1'), covar=tensor([0.0510, 0.1230, 0.1780, 0.1398, 0.0571, 0.1491, 0.0694, 0.0545], + device='cuda:1'), in_proj_covar=tensor([0.0102, 0.0154, 0.0194, 0.0159, 0.0105, 0.0164, 0.0117, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 10:14:05,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:12,966 INFO [train.py:901] (1/4) Epoch 11, batch 2900, loss[loss=0.2168, simple_loss=0.3008, pruned_loss=0.06647, over 8682.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3098, pruned_loss=0.08029, over 1607511.76 frames. ], batch size: 34, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:14:25,269 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.545e+02 3.159e+02 4.165e+02 9.643e+02, threshold=6.318e+02, percent-clipped=5.0 +2023-02-06 10:14:34,286 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:46,314 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:48,167 INFO [train.py:901] (1/4) Epoch 11, batch 2950, loss[loss=0.2421, simple_loss=0.3062, pruned_loss=0.08896, over 7262.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3108, pruned_loss=0.08096, over 1605897.02 frames. ], batch size: 16, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:14:53,616 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 10:15:09,067 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 10:15:18,756 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-06 10:15:22,299 INFO [train.py:901] (1/4) Epoch 11, batch 3000, loss[loss=0.2265, simple_loss=0.3024, pruned_loss=0.07531, over 7935.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3114, pruned_loss=0.08143, over 1606715.58 frames. ], batch size: 20, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:15:22,299 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 10:15:34,555 INFO [train.py:935] (1/4) Epoch 11, validation: loss=0.1889, simple_loss=0.2886, pruned_loss=0.04461, over 944034.00 frames. +2023-02-06 10:15:34,556 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 10:15:46,618 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.511e+02 2.977e+02 3.600e+02 5.313e+02, threshold=5.953e+02, percent-clipped=0.0 +2023-02-06 10:16:10,359 INFO [train.py:901] (1/4) Epoch 11, batch 3050, loss[loss=0.2228, simple_loss=0.3124, pruned_loss=0.06665, over 8353.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3122, pruned_loss=0.08217, over 1606028.46 frames. ], batch size: 24, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:21,308 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9178, 1.6139, 1.3382, 1.5861, 1.3772, 1.2009, 1.2330, 1.2817], + device='cuda:1'), covar=tensor([0.1042, 0.0409, 0.1146, 0.0518, 0.0593, 0.1279, 0.0878, 0.0691], + device='cuda:1'), in_proj_covar=tensor([0.0344, 0.0234, 0.0314, 0.0295, 0.0299, 0.0320, 0.0338, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 10:16:43,140 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:16:44,272 INFO [train.py:901] (1/4) Epoch 11, batch 3100, loss[loss=0.1743, simple_loss=0.2598, pruned_loss=0.04442, over 7638.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3114, pruned_loss=0.08129, over 1607248.80 frames. ], batch size: 19, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:55,415 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 2.748e+02 3.262e+02 3.755e+02 7.942e+02, threshold=6.525e+02, percent-clipped=1.0 +2023-02-06 10:17:00,081 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:08,763 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:18,428 INFO [train.py:901] (1/4) Epoch 11, batch 3150, loss[loss=0.1815, simple_loss=0.2561, pruned_loss=0.05347, over 7545.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.311, pruned_loss=0.08113, over 1604901.10 frames. ], batch size: 18, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:17:34,199 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:42,318 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4384, 2.0993, 2.9183, 2.3589, 2.7236, 2.2800, 1.8584, 1.3032], + device='cuda:1'), covar=tensor([0.3728, 0.3786, 0.1116, 0.2630, 0.1979, 0.2108, 0.1595, 0.4362], + device='cuda:1'), in_proj_covar=tensor([0.0884, 0.0865, 0.0727, 0.0837, 0.0934, 0.0793, 0.0700, 0.0763], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:17:44,278 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:53,258 INFO [train.py:901] (1/4) Epoch 11, batch 3200, loss[loss=0.2384, simple_loss=0.3134, pruned_loss=0.08175, over 8429.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.312, pruned_loss=0.08162, over 1606849.63 frames. ], batch size: 49, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:01,393 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:05,770 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.726e+02 3.369e+02 4.220e+02 9.302e+02, threshold=6.739e+02, percent-clipped=4.0 +2023-02-06 10:18:27,189 INFO [train.py:901] (1/4) Epoch 11, batch 3250, loss[loss=0.2266, simple_loss=0.2997, pruned_loss=0.0768, over 7971.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3136, pruned_loss=0.08257, over 1613686.75 frames. ], batch size: 21, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:50,436 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:55,080 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:19:01,839 INFO [train.py:901] (1/4) Epoch 11, batch 3300, loss[loss=0.2656, simple_loss=0.3364, pruned_loss=0.09742, over 8688.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3134, pruned_loss=0.08208, over 1612428.70 frames. ], batch size: 34, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:08,603 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9138, 1.9720, 2.3623, 1.6045, 1.2073, 2.5569, 0.3991, 1.5633], + device='cuda:1'), covar=tensor([0.2161, 0.1635, 0.0520, 0.2230, 0.4322, 0.0422, 0.3355, 0.1778], + device='cuda:1'), in_proj_covar=tensor([0.0164, 0.0167, 0.0097, 0.0215, 0.0253, 0.0104, 0.0166, 0.0165], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 10:19:13,378 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.729e+02 3.101e+02 4.103e+02 8.191e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 10:19:35,418 INFO [train.py:901] (1/4) Epoch 11, batch 3350, loss[loss=0.2216, simple_loss=0.3103, pruned_loss=0.06645, over 8103.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3148, pruned_loss=0.08331, over 1611969.81 frames. ], batch size: 23, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:37,882 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 10:20:01,020 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:10,209 INFO [train.py:901] (1/4) Epoch 11, batch 3400, loss[loss=0.1967, simple_loss=0.2856, pruned_loss=0.05391, over 8251.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3147, pruned_loss=0.08325, over 1615675.28 frames. ], batch size: 24, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:20:15,228 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:23,165 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 2.553e+02 3.068e+02 3.977e+02 7.727e+02, threshold=6.137e+02, percent-clipped=2.0 +2023-02-06 10:20:26,658 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:45,376 INFO [train.py:901] (1/4) Epoch 11, batch 3450, loss[loss=0.2279, simple_loss=0.3096, pruned_loss=0.07311, over 8450.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3137, pruned_loss=0.08234, over 1612259.61 frames. ], batch size: 27, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:06,407 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:20,277 INFO [train.py:901] (1/4) Epoch 11, batch 3500, loss[loss=0.234, simple_loss=0.3209, pruned_loss=0.07354, over 8202.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3133, pruned_loss=0.08202, over 1614527.06 frames. ], batch size: 23, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:31,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:32,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.912e+02 2.703e+02 3.166e+02 4.187e+02 8.001e+02, threshold=6.332e+02, percent-clipped=6.0 +2023-02-06 10:21:36,449 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:48,766 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 10:21:51,923 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 10:21:54,137 INFO [train.py:901] (1/4) Epoch 11, batch 3550, loss[loss=0.2419, simple_loss=0.3231, pruned_loss=0.0803, over 8338.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3132, pruned_loss=0.08207, over 1613221.67 frames. ], batch size: 26, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:25,864 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:28,968 INFO [train.py:901] (1/4) Epoch 11, batch 3600, loss[loss=0.2521, simple_loss=0.3236, pruned_loss=0.09031, over 8081.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3143, pruned_loss=0.08313, over 1609302.00 frames. ], batch size: 21, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:37,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3131, 1.8997, 2.7705, 2.3393, 2.5758, 2.0906, 1.7068, 1.3412], + device='cuda:1'), covar=tensor([0.3764, 0.3744, 0.1238, 0.2360, 0.1853, 0.2132, 0.1604, 0.3831], + device='cuda:1'), in_proj_covar=tensor([0.0881, 0.0861, 0.0725, 0.0841, 0.0935, 0.0795, 0.0702, 0.0761], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:22:41,783 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.788e+02 3.447e+02 4.179e+02 1.001e+03, threshold=6.895e+02, percent-clipped=4.0 +2023-02-06 10:22:48,584 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:50,731 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:03,532 INFO [train.py:901] (1/4) Epoch 11, batch 3650, loss[loss=0.2601, simple_loss=0.3092, pruned_loss=0.1055, over 7974.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3123, pruned_loss=0.08208, over 1607250.54 frames. ], batch size: 21, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:23:11,597 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84493.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:12,298 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5329, 2.1578, 4.4725, 1.2748, 3.1004, 2.2803, 1.5853, 2.7084], + device='cuda:1'), covar=tensor([0.1716, 0.2247, 0.0577, 0.3847, 0.1435, 0.2606, 0.1859, 0.2333], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0514, 0.0530, 0.0575, 0.0613, 0.0548, 0.0471, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:23:28,891 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84518.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:37,213 INFO [train.py:901] (1/4) Epoch 11, batch 3700, loss[loss=0.2215, simple_loss=0.2849, pruned_loss=0.07904, over 7659.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3126, pruned_loss=0.08221, over 1609726.18 frames. ], batch size: 19, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:23:46,126 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:48,582 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:23:49,860 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.648e+02 3.219e+02 3.938e+02 7.332e+02, threshold=6.437e+02, percent-clipped=1.0 +2023-02-06 10:23:58,479 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:07,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:11,829 INFO [train.py:901] (1/4) Epoch 11, batch 3750, loss[loss=0.2619, simple_loss=0.3406, pruned_loss=0.09162, over 8658.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.313, pruned_loss=0.08175, over 1616105.25 frames. ], batch size: 39, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:23,993 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:46,862 INFO [train.py:901] (1/4) Epoch 11, batch 3800, loss[loss=0.3195, simple_loss=0.375, pruned_loss=0.132, over 8724.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3138, pruned_loss=0.08227, over 1612652.66 frames. ], batch size: 39, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:53,230 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-06 10:24:58,768 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.607e+02 3.118e+02 4.251e+02 1.041e+03, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:25:00,926 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:18,144 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:21,363 INFO [train.py:901] (1/4) Epoch 11, batch 3850, loss[loss=0.2574, simple_loss=0.3302, pruned_loss=0.09232, over 7057.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.3134, pruned_loss=0.08218, over 1608853.39 frames. ], batch size: 72, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:25:22,265 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:25,124 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-06 10:25:25,853 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 10:25:32,819 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:39,748 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:43,784 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:47,047 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:51,556 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 10:25:55,495 INFO [train.py:901] (1/4) Epoch 11, batch 3900, loss[loss=0.2602, simple_loss=0.3367, pruned_loss=0.09185, over 8030.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.313, pruned_loss=0.08198, over 1607366.22 frames. ], batch size: 22, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:03,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:08,301 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.619e+02 3.238e+02 3.926e+02 9.069e+02, threshold=6.476e+02, percent-clipped=5.0 +2023-02-06 10:26:19,960 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5365, 1.8670, 2.0285, 1.1145, 2.0546, 1.2608, 0.5882, 1.6930], + device='cuda:1'), covar=tensor([0.0531, 0.0301, 0.0202, 0.0440, 0.0313, 0.0737, 0.0689, 0.0251], + device='cuda:1'), in_proj_covar=tensor([0.0386, 0.0322, 0.0267, 0.0380, 0.0307, 0.0470, 0.0355, 0.0350], + device='cuda:1'), out_proj_covar=tensor([1.1065e-04, 9.0333e-05, 7.4837e-05, 1.0719e-04, 8.7397e-05, 1.4397e-04, + 1.0181e-04, 9.9744e-05], device='cuda:1') +2023-02-06 10:26:30,326 INFO [train.py:901] (1/4) Epoch 11, batch 3950, loss[loss=0.2149, simple_loss=0.285, pruned_loss=0.0724, over 7255.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3136, pruned_loss=0.08231, over 1609955.03 frames. ], batch size: 16, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:52,780 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:56,202 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84818.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:04,809 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:05,277 INFO [train.py:901] (1/4) Epoch 11, batch 4000, loss[loss=0.2218, simple_loss=0.2908, pruned_loss=0.07636, over 7522.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3135, pruned_loss=0.08196, over 1613408.39 frames. ], batch size: 18, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:07,061 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 10:27:17,173 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.608e+02 2.990e+02 3.694e+02 8.393e+02, threshold=5.981e+02, percent-clipped=2.0 +2023-02-06 10:27:21,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:39,667 INFO [train.py:901] (1/4) Epoch 11, batch 4050, loss[loss=0.1911, simple_loss=0.2698, pruned_loss=0.05621, over 7697.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3126, pruned_loss=0.08132, over 1610915.29 frames. ], batch size: 18, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:43,790 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:15,559 INFO [train.py:901] (1/4) Epoch 11, batch 4100, loss[loss=0.2641, simple_loss=0.3331, pruned_loss=0.09754, over 6933.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3123, pruned_loss=0.08145, over 1610032.25 frames. ], batch size: 72, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:28:16,466 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:26,691 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 10:28:27,736 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.518e+02 2.978e+02 3.788e+02 7.594e+02, threshold=5.956e+02, percent-clipped=4.0 +2023-02-06 10:28:33,385 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:34,132 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4330, 1.7278, 2.7340, 1.2543, 1.9179, 1.9517, 1.5304, 1.8646], + device='cuda:1'), covar=tensor([0.1788, 0.2155, 0.0825, 0.3690, 0.1615, 0.2673, 0.1889, 0.2079], + device='cuda:1'), in_proj_covar=tensor([0.0493, 0.0520, 0.0536, 0.0583, 0.0619, 0.0558, 0.0477, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:28:41,347 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,425 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:43,325 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0283, 2.3571, 1.9435, 2.9564, 1.3405, 1.7106, 1.8253, 2.3998], + device='cuda:1'), covar=tensor([0.0878, 0.0975, 0.1030, 0.0432, 0.1248, 0.1540, 0.1202, 0.0874], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0216, 0.0256, 0.0220, 0.0217, 0.0253, 0.0255, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:28:49,615 INFO [train.py:901] (1/4) Epoch 11, batch 4150, loss[loss=0.2215, simple_loss=0.278, pruned_loss=0.08256, over 7406.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3114, pruned_loss=0.08074, over 1613726.10 frames. ], batch size: 17, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:28:58,527 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:59,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84995.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:04,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:23,817 INFO [train.py:901] (1/4) Epoch 11, batch 4200, loss[loss=0.2738, simple_loss=0.3378, pruned_loss=0.1049, over 8466.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3118, pruned_loss=0.08098, over 1612690.95 frames. ], batch size: 25, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:29:36,444 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.581e+02 3.261e+02 3.967e+02 9.417e+02, threshold=6.523e+02, percent-clipped=7.0 +2023-02-06 10:29:47,919 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 10:29:50,141 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:58,029 INFO [train.py:901] (1/4) Epoch 11, batch 4250, loss[loss=0.2767, simple_loss=0.3548, pruned_loss=0.09928, over 8358.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3108, pruned_loss=0.08055, over 1611980.42 frames. ], batch size: 24, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:06,842 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:10,060 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 10:30:18,069 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:25,268 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8858, 3.7840, 3.5161, 1.7365, 3.4228, 3.3684, 3.5664, 3.1098], + device='cuda:1'), covar=tensor([0.0827, 0.0609, 0.0927, 0.4590, 0.0807, 0.1056, 0.1210, 0.1062], + device='cuda:1'), in_proj_covar=tensor([0.0459, 0.0365, 0.0370, 0.0473, 0.0369, 0.0368, 0.0367, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:30:28,613 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:32,505 INFO [train.py:901] (1/4) Epoch 11, batch 4300, loss[loss=0.241, simple_loss=0.306, pruned_loss=0.08802, over 7807.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3107, pruned_loss=0.08067, over 1613427.05 frames. ], batch size: 19, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:33,963 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:45,182 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.616e+02 3.014e+02 4.154e+02 7.931e+02, threshold=6.027e+02, percent-clipped=5.0 +2023-02-06 10:30:54,030 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:31:06,971 INFO [train.py:901] (1/4) Epoch 11, batch 4350, loss[loss=0.244, simple_loss=0.306, pruned_loss=0.09103, over 7782.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3118, pruned_loss=0.08156, over 1614619.45 frames. ], batch size: 19, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:31:36,560 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4925, 2.1665, 4.1254, 1.3509, 2.7880, 1.9954, 1.7086, 2.5419], + device='cuda:1'), covar=tensor([0.1924, 0.2486, 0.0722, 0.4235, 0.1774, 0.3259, 0.2020, 0.2761], + device='cuda:1'), in_proj_covar=tensor([0.0493, 0.0520, 0.0538, 0.0582, 0.0621, 0.0562, 0.0476, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:31:40,296 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 10:31:41,573 INFO [train.py:901] (1/4) Epoch 11, batch 4400, loss[loss=0.2396, simple_loss=0.3107, pruned_loss=0.08427, over 7931.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3128, pruned_loss=0.08216, over 1616518.61 frames. ], batch size: 20, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:31:54,340 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.553e+02 3.172e+02 3.669e+02 6.483e+02, threshold=6.345e+02, percent-clipped=4.0 +2023-02-06 10:32:01,427 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:14,074 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:16,485 INFO [train.py:901] (1/4) Epoch 11, batch 4450, loss[loss=0.2372, simple_loss=0.3152, pruned_loss=0.07955, over 8292.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3115, pruned_loss=0.08079, over 1615800.78 frames. ], batch size: 23, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:32:18,770 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:22,679 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 10:32:38,766 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:50,699 INFO [train.py:901] (1/4) Epoch 11, batch 4500, loss[loss=0.2198, simple_loss=0.3045, pruned_loss=0.06761, over 8694.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3109, pruned_loss=0.08039, over 1615702.34 frames. ], batch size: 34, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:00,922 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9349, 1.6502, 1.6747, 1.5461, 1.1177, 1.5481, 1.8209, 1.6972], + device='cuda:1'), covar=tensor([0.0515, 0.1122, 0.1570, 0.1257, 0.0581, 0.1405, 0.0666, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0154, 0.0193, 0.0159, 0.0105, 0.0164, 0.0118, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 10:33:03,403 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.629e+02 3.227e+02 4.085e+02 1.162e+03, threshold=6.455e+02, percent-clipped=2.0 +2023-02-06 10:33:15,858 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 10:33:16,083 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:26,545 INFO [train.py:901] (1/4) Epoch 11, batch 4550, loss[loss=0.1861, simple_loss=0.2857, pruned_loss=0.04324, over 8248.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.311, pruned_loss=0.08068, over 1618658.00 frames. ], batch size: 24, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:27,819 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 10:33:33,530 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:59,385 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:01,131 INFO [train.py:901] (1/4) Epoch 11, batch 4600, loss[loss=0.2409, simple_loss=0.3182, pruned_loss=0.08175, over 8455.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3105, pruned_loss=0.08056, over 1617403.07 frames. ], batch size: 25, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:34:11,928 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:13,797 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.573e+02 3.214e+02 4.149e+02 1.527e+03, threshold=6.427e+02, percent-clipped=2.0 +2023-02-06 10:34:14,010 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1434, 2.2989, 1.9197, 2.7340, 1.4332, 1.6912, 1.9806, 2.3493], + device='cuda:1'), covar=tensor([0.0703, 0.0765, 0.0971, 0.0394, 0.1105, 0.1304, 0.0886, 0.0753], + device='cuda:1'), in_proj_covar=tensor([0.0241, 0.0217, 0.0259, 0.0219, 0.0220, 0.0257, 0.0259, 0.0226], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:34:27,393 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:33,579 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:36,247 INFO [train.py:901] (1/4) Epoch 11, batch 4650, loss[loss=0.2336, simple_loss=0.3055, pruned_loss=0.08086, over 7425.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3116, pruned_loss=0.08129, over 1615468.22 frames. ], batch size: 17, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:34:49,881 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.74 vs. limit=5.0 +2023-02-06 10:34:50,387 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:51,873 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9461, 2.2695, 3.6895, 1.7313, 2.9840, 2.3608, 2.0955, 2.7611], + device='cuda:1'), covar=tensor([0.1342, 0.1883, 0.0604, 0.3010, 0.1214, 0.2030, 0.1486, 0.1848], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0517, 0.0531, 0.0576, 0.0618, 0.0554, 0.0474, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:35:11,112 INFO [train.py:901] (1/4) Epoch 11, batch 4700, loss[loss=0.2835, simple_loss=0.3497, pruned_loss=0.1087, over 8518.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3125, pruned_loss=0.08204, over 1612414.01 frames. ], batch size: 26, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:12,715 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:22,561 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:23,054 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 2.022e+02 2.812e+02 3.491e+02 4.674e+02 1.006e+03, threshold=6.983e+02, percent-clipped=9.0 +2023-02-06 10:35:30,008 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:45,766 INFO [train.py:901] (1/4) Epoch 11, batch 4750, loss[loss=0.1946, simple_loss=0.2632, pruned_loss=0.06301, over 7787.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3126, pruned_loss=0.08191, over 1618449.49 frames. ], batch size: 19, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:47,966 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:52,336 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 10:35:53,283 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:09,834 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 10:36:11,808 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 10:36:20,655 INFO [train.py:901] (1/4) Epoch 11, batch 4800, loss[loss=0.2267, simple_loss=0.3152, pruned_loss=0.06908, over 8104.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3137, pruned_loss=0.08232, over 1621225.56 frames. ], batch size: 23, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:28,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:32,782 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.628e+02 3.255e+02 4.281e+02 8.051e+02, threshold=6.510e+02, percent-clipped=3.0 +2023-02-06 10:36:35,795 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5488, 2.1199, 3.1024, 2.5582, 2.7705, 2.2680, 1.9989, 2.0259], + device='cuda:1'), covar=tensor([0.2819, 0.3419, 0.0939, 0.1858, 0.1506, 0.1835, 0.1457, 0.2991], + device='cuda:1'), in_proj_covar=tensor([0.0877, 0.0863, 0.0729, 0.0827, 0.0932, 0.0792, 0.0700, 0.0766], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:36:40,524 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1446, 2.2035, 1.7647, 2.0757, 1.7363, 1.3765, 1.6418, 1.7578], + device='cuda:1'), covar=tensor([0.1265, 0.0355, 0.0994, 0.0416, 0.0580, 0.1356, 0.0897, 0.0777], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0235, 0.0316, 0.0295, 0.0303, 0.0322, 0.0340, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 10:36:42,175 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 10:36:55,312 INFO [train.py:901] (1/4) Epoch 11, batch 4850, loss[loss=0.257, simple_loss=0.3144, pruned_loss=0.09984, over 8083.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3117, pruned_loss=0.081, over 1617338.17 frames. ], batch size: 21, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:57,567 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:01,293 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 10:37:14,640 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:30,196 INFO [train.py:901] (1/4) Epoch 11, batch 4900, loss[loss=0.2404, simple_loss=0.3257, pruned_loss=0.0776, over 8359.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3118, pruned_loss=0.08083, over 1614946.08 frames. ], batch size: 24, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:37:42,889 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.544e+02 3.151e+02 4.004e+02 8.063e+02, threshold=6.301e+02, percent-clipped=5.0 +2023-02-06 10:38:04,653 INFO [train.py:901] (1/4) Epoch 11, batch 4950, loss[loss=0.2307, simple_loss=0.316, pruned_loss=0.07273, over 8031.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3119, pruned_loss=0.08073, over 1615061.36 frames. ], batch size: 22, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:11,377 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:17,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.3741, 5.3917, 4.8142, 2.2599, 4.8257, 5.0007, 5.0833, 4.5281], + device='cuda:1'), covar=tensor([0.0617, 0.0421, 0.0808, 0.4572, 0.0769, 0.0748, 0.0967, 0.0728], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0374, 0.0379, 0.0484, 0.0378, 0.0378, 0.0375, 0.0334], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:38:28,576 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6048, 1.9008, 1.9468, 1.0990, 2.0708, 1.4970, 0.3516, 1.7113], + device='cuda:1'), covar=tensor([0.0319, 0.0184, 0.0159, 0.0315, 0.0208, 0.0554, 0.0546, 0.0148], + device='cuda:1'), in_proj_covar=tensor([0.0387, 0.0322, 0.0269, 0.0380, 0.0308, 0.0470, 0.0355, 0.0347], + device='cuda:1'), out_proj_covar=tensor([1.1049e-04, 8.9705e-05, 7.5298e-05, 1.0694e-04, 8.7683e-05, 1.4383e-04, + 1.0160e-04, 9.8904e-05], device='cuda:1') +2023-02-06 10:38:39,599 INFO [train.py:901] (1/4) Epoch 11, batch 5000, loss[loss=0.1961, simple_loss=0.2821, pruned_loss=0.05503, over 8468.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3105, pruned_loss=0.08009, over 1614928.17 frames. ], batch size: 25, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:46,500 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:49,801 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:51,926 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:52,299 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.585e+02 3.219e+02 4.097e+02 8.363e+02, threshold=6.438e+02, percent-clipped=6.0 +2023-02-06 10:39:03,557 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:08,905 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:11,146 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 10:39:13,948 INFO [train.py:901] (1/4) Epoch 11, batch 5050, loss[loss=0.2222, simple_loss=0.2899, pruned_loss=0.07726, over 7639.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3112, pruned_loss=0.08054, over 1612697.65 frames. ], batch size: 19, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:21,267 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:22,011 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:30,139 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:39,818 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 10:39:48,589 INFO [train.py:901] (1/4) Epoch 11, batch 5100, loss[loss=0.2044, simple_loss=0.2789, pruned_loss=0.06495, over 7650.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3102, pruned_loss=0.07964, over 1612814.93 frames. ], batch size: 19, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:48,846 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8612, 1.9082, 2.2827, 1.6561, 1.1146, 2.5041, 0.2061, 1.3095], + device='cuda:1'), covar=tensor([0.2497, 0.1602, 0.0555, 0.2084, 0.4674, 0.0499, 0.3863, 0.2496], + device='cuda:1'), in_proj_covar=tensor([0.0164, 0.0168, 0.0100, 0.0215, 0.0256, 0.0104, 0.0167, 0.0166], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 10:40:00,812 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85948.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:40:01,252 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.570e+02 3.113e+02 3.980e+02 6.838e+02, threshold=6.226e+02, percent-clipped=2.0 +2023-02-06 10:40:08,944 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85960.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:19,544 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:23,496 INFO [train.py:901] (1/4) Epoch 11, batch 5150, loss[loss=0.2695, simple_loss=0.3415, pruned_loss=0.09878, over 8283.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3121, pruned_loss=0.08052, over 1611355.44 frames. ], batch size: 23, lr: 6.92e-03, grad_scale: 8.0 +2023-02-06 10:40:26,411 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7701, 1.3809, 1.5157, 1.2723, 0.9804, 1.2408, 1.5970, 1.3738], + device='cuda:1'), covar=tensor([0.0525, 0.1279, 0.1752, 0.1453, 0.0578, 0.1554, 0.0688, 0.0619], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0151, 0.0191, 0.0158, 0.0103, 0.0163, 0.0116, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 10:40:27,614 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:42,275 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:51,391 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:59,434 INFO [train.py:901] (1/4) Epoch 11, batch 5200, loss[loss=0.2568, simple_loss=0.337, pruned_loss=0.08827, over 8495.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3113, pruned_loss=0.07975, over 1614434.14 frames. ], batch size: 26, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:12,339 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.648e+02 3.082e+02 3.913e+02 1.007e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-06 10:41:27,743 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:35,330 INFO [train.py:901] (1/4) Epoch 11, batch 5250, loss[loss=0.2525, simple_loss=0.3328, pruned_loss=0.08607, over 8323.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3098, pruned_loss=0.07906, over 1614779.32 frames. ], batch size: 25, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:39,719 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:40,975 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 10:41:50,756 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:10,639 INFO [train.py:901] (1/4) Epoch 11, batch 5300, loss[loss=0.236, simple_loss=0.3123, pruned_loss=0.07986, over 8324.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3108, pruned_loss=0.07929, over 1618393.19 frames. ], batch size: 26, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:23,758 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.570e+02 3.118e+02 4.195e+02 8.045e+02, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:42:32,839 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:46,496 INFO [train.py:901] (1/4) Epoch 11, batch 5350, loss[loss=0.1909, simple_loss=0.2725, pruned_loss=0.05462, over 7797.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3105, pruned_loss=0.07971, over 1612791.94 frames. ], batch size: 19, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:50,703 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:12,424 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:22,274 INFO [train.py:901] (1/4) Epoch 11, batch 5400, loss[loss=0.2752, simple_loss=0.3521, pruned_loss=0.09919, over 8029.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3116, pruned_loss=0.0807, over 1613061.48 frames. ], batch size: 22, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:43:26,576 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:29,462 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:34,672 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.471e+02 3.223e+02 4.268e+02 9.619e+02, threshold=6.446e+02, percent-clipped=7.0 +2023-02-06 10:43:44,506 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:45,131 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:57,396 INFO [train.py:901] (1/4) Epoch 11, batch 5450, loss[loss=0.2733, simple_loss=0.3391, pruned_loss=0.1037, over 8504.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3117, pruned_loss=0.08081, over 1617304.12 frames. ], batch size: 28, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:03,077 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:05,824 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86292.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:44:23,287 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 10:44:25,144 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:34,087 INFO [train.py:901] (1/4) Epoch 11, batch 5500, loss[loss=0.2441, simple_loss=0.3257, pruned_loss=0.0812, over 8333.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3109, pruned_loss=0.08026, over 1613510.30 frames. ], batch size: 25, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:34,728 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 10:44:37,215 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 10:44:46,146 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.494e+02 3.013e+02 3.770e+02 8.759e+02, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 10:44:48,475 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:52,710 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:56,787 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:58,664 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 10:45:09,177 INFO [train.py:901] (1/4) Epoch 11, batch 5550, loss[loss=0.2283, simple_loss=0.2967, pruned_loss=0.07993, over 7932.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3116, pruned_loss=0.08039, over 1614173.14 frames. ], batch size: 20, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:10,784 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:27,857 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86407.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:45:33,158 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:44,323 INFO [train.py:901] (1/4) Epoch 11, batch 5600, loss[loss=0.2455, simple_loss=0.321, pruned_loss=0.08505, over 8758.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3113, pruned_loss=0.0803, over 1614992.90 frames. ], batch size: 30, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:44,401 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:46,511 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:51,968 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0195, 1.4717, 3.2498, 1.2045, 2.3104, 3.5977, 3.6052, 3.0422], + device='cuda:1'), covar=tensor([0.1121, 0.1688, 0.0371, 0.2282, 0.0942, 0.0233, 0.0492, 0.0618], + device='cuda:1'), in_proj_covar=tensor([0.0262, 0.0296, 0.0257, 0.0288, 0.0269, 0.0235, 0.0337, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 10:45:57,241 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.527e+02 3.003e+02 3.802e+02 9.548e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-06 10:46:03,247 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:06,580 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:17,335 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:18,539 INFO [train.py:901] (1/4) Epoch 11, batch 5650, loss[loss=0.1927, simple_loss=0.2813, pruned_loss=0.05207, over 8194.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3113, pruned_loss=0.08071, over 1612355.67 frames. ], batch size: 23, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:46:39,890 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 10:46:48,926 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3921, 1.9727, 2.9242, 2.3003, 2.5224, 2.2347, 1.8370, 1.2606], + device='cuda:1'), covar=tensor([0.3888, 0.4004, 0.1147, 0.2466, 0.1976, 0.2152, 0.1709, 0.4233], + device='cuda:1'), in_proj_covar=tensor([0.0885, 0.0862, 0.0724, 0.0831, 0.0929, 0.0792, 0.0698, 0.0762], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:46:52,197 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:53,980 INFO [train.py:901] (1/4) Epoch 11, batch 5700, loss[loss=0.2776, simple_loss=0.3432, pruned_loss=0.106, over 8521.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3125, pruned_loss=0.08169, over 1614497.60 frames. ], batch size: 26, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:04,209 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:06,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.473e+02 3.032e+02 3.837e+02 8.433e+02, threshold=6.065e+02, percent-clipped=5.0 +2023-02-06 10:47:28,602 INFO [train.py:901] (1/4) Epoch 11, batch 5750, loss[loss=0.2117, simple_loss=0.2772, pruned_loss=0.07313, over 7817.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3114, pruned_loss=0.08082, over 1613366.55 frames. ], batch size: 20, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:40,221 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:42,139 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 10:47:47,614 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:47,759 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:54,772 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 10:48:00,607 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2145, 1.7509, 2.5698, 1.9864, 2.2795, 2.0484, 1.7036, 0.9410], + device='cuda:1'), covar=tensor([0.3815, 0.3661, 0.1095, 0.2464, 0.1749, 0.2238, 0.1680, 0.3853], + device='cuda:1'), in_proj_covar=tensor([0.0884, 0.0866, 0.0727, 0.0835, 0.0930, 0.0794, 0.0699, 0.0764], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:48:03,810 INFO [train.py:901] (1/4) Epoch 11, batch 5800, loss[loss=0.2233, simple_loss=0.2872, pruned_loss=0.07972, over 7216.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3095, pruned_loss=0.08024, over 1606702.50 frames. ], batch size: 16, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:05,399 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:13,803 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3284, 2.9025, 2.3981, 3.7513, 1.6977, 2.0558, 2.2233, 2.9701], + device='cuda:1'), covar=tensor([0.0794, 0.0731, 0.0890, 0.0300, 0.1162, 0.1384, 0.1201, 0.0798], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0214, 0.0252, 0.0217, 0.0218, 0.0254, 0.0254, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:48:17,062 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.625e+02 3.434e+02 4.363e+02 1.044e+03, threshold=6.867e+02, percent-clipped=16.0 +2023-02-06 10:48:26,845 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86663.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:39,359 INFO [train.py:901] (1/4) Epoch 11, batch 5850, loss[loss=0.2578, simple_loss=0.3361, pruned_loss=0.0897, over 8246.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3094, pruned_loss=0.08026, over 1602168.40 frames. ], batch size: 24, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:44,282 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86688.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:48:45,640 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:02,117 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:07,940 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:08,185 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 10:49:13,215 INFO [train.py:901] (1/4) Epoch 11, batch 5900, loss[loss=0.2189, simple_loss=0.2826, pruned_loss=0.07761, over 7203.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3104, pruned_loss=0.08096, over 1603445.37 frames. ], batch size: 16, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:16,680 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:25,724 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.878e+02 2.650e+02 3.002e+02 3.837e+02 8.505e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-06 10:49:33,355 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:48,259 INFO [train.py:901] (1/4) Epoch 11, batch 5950, loss[loss=0.2213, simple_loss=0.2934, pruned_loss=0.07459, over 7982.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3102, pruned_loss=0.08066, over 1606070.58 frames. ], batch size: 21, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:51,984 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:57,466 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4608, 2.0016, 3.1620, 2.4142, 2.7546, 2.2856, 1.7954, 1.4073], + device='cuda:1'), covar=tensor([0.3987, 0.4010, 0.1092, 0.2565, 0.1872, 0.2132, 0.1634, 0.4281], + device='cuda:1'), in_proj_covar=tensor([0.0887, 0.0869, 0.0728, 0.0839, 0.0930, 0.0798, 0.0700, 0.0765], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:50:03,635 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:03,784 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:06,870 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:09,065 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:10,679 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 10:50:20,474 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:22,932 INFO [train.py:901] (1/4) Epoch 11, batch 6000, loss[loss=0.2624, simple_loss=0.3359, pruned_loss=0.09442, over 8477.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3093, pruned_loss=0.0802, over 1604617.49 frames. ], batch size: 25, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:50:22,932 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 10:50:35,331 INFO [train.py:935] (1/4) Epoch 11, validation: loss=0.1887, simple_loss=0.2887, pruned_loss=0.04439, over 944034.00 frames. +2023-02-06 10:50:35,332 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 10:50:36,204 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86832.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:47,363 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.431e+02 2.934e+02 3.566e+02 7.044e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-06 10:51:10,323 INFO [train.py:901] (1/4) Epoch 11, batch 6050, loss[loss=0.2137, simple_loss=0.2781, pruned_loss=0.07463, over 8028.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3104, pruned_loss=0.08116, over 1608556.97 frames. ], batch size: 22, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:20,528 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:35,592 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:39,111 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:45,230 INFO [train.py:901] (1/4) Epoch 11, batch 6100, loss[loss=0.2261, simple_loss=0.3054, pruned_loss=0.07339, over 8537.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.31, pruned_loss=0.08062, over 1610942.72 frames. ], batch size: 50, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:53,492 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:58,241 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.604e+02 3.114e+02 3.901e+02 9.212e+02, threshold=6.229e+02, percent-clipped=4.0 +2023-02-06 10:52:06,861 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 10:52:19,160 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:20,335 INFO [train.py:901] (1/4) Epoch 11, batch 6150, loss[loss=0.2722, simple_loss=0.3444, pruned_loss=0.09996, over 8501.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3094, pruned_loss=0.07962, over 1612398.98 frames. ], batch size: 26, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:52:36,919 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:47,743 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87020.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:52:55,885 INFO [train.py:901] (1/4) Epoch 11, batch 6200, loss[loss=0.1857, simple_loss=0.2711, pruned_loss=0.05013, over 8462.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.309, pruned_loss=0.07935, over 1610676.94 frames. ], batch size: 25, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:53:06,093 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8283, 2.0950, 1.6522, 2.6801, 1.3186, 1.4723, 1.7683, 2.2248], + device='cuda:1'), covar=tensor([0.0844, 0.0940, 0.1182, 0.0391, 0.1104, 0.1502, 0.0991, 0.0888], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0215, 0.0256, 0.0217, 0.0218, 0.0253, 0.0253, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:53:07,901 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.592e+02 3.192e+02 4.476e+02 1.804e+03, threshold=6.384e+02, percent-clipped=5.0 +2023-02-06 10:53:14,474 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:53:30,924 INFO [train.py:901] (1/4) Epoch 11, batch 6250, loss[loss=0.2231, simple_loss=0.309, pruned_loss=0.06865, over 5064.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3103, pruned_loss=0.07996, over 1611655.88 frames. ], batch size: 11, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:06,553 INFO [train.py:901] (1/4) Epoch 11, batch 6300, loss[loss=0.2278, simple_loss=0.3066, pruned_loss=0.07449, over 8595.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.3094, pruned_loss=0.0798, over 1607274.50 frames. ], batch size: 49, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:19,292 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.563e+02 3.017e+02 3.734e+02 8.364e+02, threshold=6.034e+02, percent-clipped=3.0 +2023-02-06 10:54:36,433 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:38,306 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87176.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:39,802 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:41,629 INFO [train.py:901] (1/4) Epoch 11, batch 6350, loss[loss=0.2336, simple_loss=0.2891, pruned_loss=0.08908, over 7705.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3104, pruned_loss=0.08043, over 1609195.30 frames. ], batch size: 18, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:44,729 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 10:54:53,189 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:57,275 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:16,801 INFO [train.py:901] (1/4) Epoch 11, batch 6400, loss[loss=0.1979, simple_loss=0.2722, pruned_loss=0.06178, over 7707.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3102, pruned_loss=0.08062, over 1610252.94 frames. ], batch size: 18, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:23,183 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:28,866 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:29,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.577e+02 3.020e+02 3.786e+02 7.428e+02, threshold=6.041e+02, percent-clipped=2.0 +2023-02-06 10:55:37,016 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3888, 2.0602, 3.3132, 1.2178, 2.6349, 1.8588, 1.5709, 2.3845], + device='cuda:1'), covar=tensor([0.1858, 0.2257, 0.0873, 0.4086, 0.1541, 0.2924, 0.2077, 0.2171], + device='cuda:1'), in_proj_covar=tensor([0.0486, 0.0523, 0.0541, 0.0580, 0.0619, 0.0557, 0.0473, 0.0616], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:55:51,527 INFO [train.py:901] (1/4) Epoch 11, batch 6450, loss[loss=0.2443, simple_loss=0.319, pruned_loss=0.08477, over 8025.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3104, pruned_loss=0.0808, over 1610690.86 frames. ], batch size: 22, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:59,185 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:14,144 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:14,840 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3175, 2.4849, 1.6726, 2.0106, 1.8732, 1.3067, 1.7837, 1.8722], + device='cuda:1'), covar=tensor([0.1330, 0.0325, 0.1092, 0.0627, 0.0655, 0.1391, 0.0932, 0.0858], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0235, 0.0316, 0.0296, 0.0302, 0.0320, 0.0340, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 10:56:17,607 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3004, 1.7997, 2.7615, 2.1751, 2.4645, 2.1040, 1.7484, 1.2855], + device='cuda:1'), covar=tensor([0.3757, 0.4094, 0.1170, 0.2504, 0.1799, 0.2244, 0.1648, 0.4030], + device='cuda:1'), in_proj_covar=tensor([0.0884, 0.0866, 0.0726, 0.0840, 0.0925, 0.0794, 0.0698, 0.0761], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 10:56:27,327 INFO [train.py:901] (1/4) Epoch 11, batch 6500, loss[loss=0.2617, simple_loss=0.3395, pruned_loss=0.09196, over 8497.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3106, pruned_loss=0.08063, over 1613012.13 frames. ], batch size: 26, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:56:32,359 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:39,861 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.605e+02 3.245e+02 4.169e+02 7.875e+02, threshold=6.489e+02, percent-clipped=5.0 +2023-02-06 10:56:44,232 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:50,430 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87364.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:57:01,903 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:57:02,442 INFO [train.py:901] (1/4) Epoch 11, batch 6550, loss[loss=0.2293, simple_loss=0.2964, pruned_loss=0.08107, over 7801.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3109, pruned_loss=0.08017, over 1619609.55 frames. ], batch size: 20, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:17,764 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 10:57:36,586 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1570, 2.4216, 1.9360, 3.0246, 1.5283, 1.6200, 1.9693, 2.4960], + device='cuda:1'), covar=tensor([0.0750, 0.0839, 0.0988, 0.0308, 0.1065, 0.1408, 0.0903, 0.0820], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0215, 0.0258, 0.0218, 0.0220, 0.0257, 0.0258, 0.0224], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:57:37,077 INFO [train.py:901] (1/4) Epoch 11, batch 6600, loss[loss=0.1878, simple_loss=0.2493, pruned_loss=0.06316, over 7524.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3109, pruned_loss=0.08015, over 1618084.36 frames. ], batch size: 18, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:37,783 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:57:41,254 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4218, 4.3112, 4.0149, 1.7537, 3.8420, 3.9817, 3.9734, 3.7320], + device='cuda:1'), covar=tensor([0.0774, 0.0606, 0.0967, 0.5259, 0.0792, 0.0985, 0.1262, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0461, 0.0372, 0.0382, 0.0481, 0.0379, 0.0375, 0.0377, 0.0333], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:57:46,876 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9055, 1.9046, 2.2950, 1.7256, 1.2833, 2.5722, 0.6110, 1.4939], + device='cuda:1'), covar=tensor([0.2379, 0.1912, 0.0554, 0.2094, 0.4275, 0.0384, 0.3412, 0.2005], + device='cuda:1'), in_proj_covar=tensor([0.0161, 0.0167, 0.0099, 0.0213, 0.0250, 0.0101, 0.0160, 0.0163], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 10:57:47,567 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4787, 1.8343, 1.7515, 1.1213, 1.8940, 1.3345, 0.5548, 1.6819], + device='cuda:1'), covar=tensor([0.0330, 0.0192, 0.0159, 0.0321, 0.0221, 0.0550, 0.0483, 0.0163], + device='cuda:1'), in_proj_covar=tensor([0.0388, 0.0323, 0.0266, 0.0381, 0.0308, 0.0466, 0.0349, 0.0344], + device='cuda:1'), out_proj_covar=tensor([1.1076e-04, 9.0009e-05, 7.4450e-05, 1.0723e-04, 8.7297e-05, 1.4185e-04, + 9.9638e-05, 9.7645e-05], device='cuda:1') +2023-02-06 10:57:50,083 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.293e+02 2.790e+02 3.732e+02 8.562e+02, threshold=5.581e+02, percent-clipped=1.0 +2023-02-06 10:57:55,854 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 10:58:11,498 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:11,537 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87479.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:58:12,692 INFO [train.py:901] (1/4) Epoch 11, batch 6650, loss[loss=0.205, simple_loss=0.2801, pruned_loss=0.06496, over 7692.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3104, pruned_loss=0.0798, over 1619873.40 frames. ], batch size: 18, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:18,142 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8894, 2.0489, 1.6772, 2.5150, 1.2427, 1.5597, 1.6819, 2.0061], + device='cuda:1'), covar=tensor([0.0715, 0.0797, 0.1040, 0.0413, 0.1186, 0.1306, 0.0933, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0214, 0.0257, 0.0217, 0.0219, 0.0254, 0.0257, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 10:58:41,497 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:47,482 INFO [train.py:901] (1/4) Epoch 11, batch 6700, loss[loss=0.2402, simple_loss=0.298, pruned_loss=0.09114, over 7795.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3098, pruned_loss=0.07949, over 1619345.74 frames. ], batch size: 19, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:58,342 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:59,459 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.493e+02 3.158e+02 4.170e+02 8.693e+02, threshold=6.316e+02, percent-clipped=8.0 +2023-02-06 10:59:16,931 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:22,976 INFO [train.py:901] (1/4) Epoch 11, batch 6750, loss[loss=0.1991, simple_loss=0.2867, pruned_loss=0.05579, over 8234.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3098, pruned_loss=0.07994, over 1618314.93 frames. ], batch size: 22, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:59:30,584 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:37,575 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:39,609 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4570, 1.6335, 2.4062, 1.3120, 1.6738, 1.7643, 1.4334, 1.5455], + device='cuda:1'), covar=tensor([0.1595, 0.1924, 0.0600, 0.3592, 0.1315, 0.2625, 0.1869, 0.1559], + device='cuda:1'), in_proj_covar=tensor([0.0479, 0.0515, 0.0525, 0.0570, 0.0608, 0.0544, 0.0464, 0.0602], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 10:59:40,789 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:43,430 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:52,275 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:56,919 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 10:59:57,597 INFO [train.py:901] (1/4) Epoch 11, batch 6800, loss[loss=0.2974, simple_loss=0.3522, pruned_loss=0.1213, over 6734.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3097, pruned_loss=0.08012, over 1616514.13 frames. ], batch size: 72, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:01,245 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:01,931 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9193, 2.7290, 3.4598, 1.7772, 1.5539, 3.7168, 0.4874, 1.9510], + device='cuda:1'), covar=tensor([0.2104, 0.1618, 0.0455, 0.3511, 0.4633, 0.0270, 0.3830, 0.2348], + device='cuda:1'), in_proj_covar=tensor([0.0159, 0.0163, 0.0097, 0.0209, 0.0244, 0.0100, 0.0157, 0.0160], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:00:10,522 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.375e+02 2.980e+02 3.798e+02 7.616e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 11:00:23,987 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8437, 2.6900, 3.1842, 2.1868, 1.6749, 3.4013, 0.5317, 2.1155], + device='cuda:1'), covar=tensor([0.2160, 0.1263, 0.0438, 0.2346, 0.3932, 0.0429, 0.3558, 0.1867], + device='cuda:1'), in_proj_covar=tensor([0.0162, 0.0165, 0.0098, 0.0212, 0.0248, 0.0101, 0.0159, 0.0162], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:00:32,370 INFO [train.py:901] (1/4) Epoch 11, batch 6850, loss[loss=0.2021, simple_loss=0.2818, pruned_loss=0.06123, over 7933.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3098, pruned_loss=0.08046, over 1618578.91 frames. ], batch size: 20, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:45,129 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 11:00:50,747 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:51,408 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6697, 1.3858, 4.8064, 1.6752, 4.2288, 3.9794, 4.3518, 4.1673], + device='cuda:1'), covar=tensor([0.0457, 0.4422, 0.0413, 0.3517, 0.0994, 0.0828, 0.0460, 0.0589], + device='cuda:1'), in_proj_covar=tensor([0.0486, 0.0571, 0.0579, 0.0529, 0.0596, 0.0514, 0.0505, 0.0572], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:01:01,872 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:06,419 INFO [train.py:901] (1/4) Epoch 11, batch 6900, loss[loss=0.2325, simple_loss=0.3069, pruned_loss=0.07903, over 8032.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3103, pruned_loss=0.08054, over 1616931.54 frames. ], batch size: 22, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:01:10,021 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87735.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:01:19,188 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.628e+02 3.043e+02 4.130e+02 7.700e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 11:01:22,687 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7387, 3.6855, 3.3948, 1.7325, 3.3143, 3.2891, 3.4205, 3.0491], + device='cuda:1'), covar=tensor([0.1115, 0.0798, 0.1244, 0.4991, 0.1021, 0.1187, 0.1620, 0.1124], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0374, 0.0384, 0.0480, 0.0377, 0.0376, 0.0377, 0.0336], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:01:26,779 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8393, 1.4258, 5.9919, 2.1038, 5.3077, 4.9611, 5.5515, 5.3901], + device='cuda:1'), covar=tensor([0.0478, 0.4982, 0.0324, 0.3416, 0.0969, 0.0827, 0.0444, 0.0490], + device='cuda:1'), in_proj_covar=tensor([0.0490, 0.0576, 0.0584, 0.0533, 0.0602, 0.0517, 0.0507, 0.0577], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:01:26,848 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87760.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:01:41,613 INFO [train.py:901] (1/4) Epoch 11, batch 6950, loss[loss=0.1972, simple_loss=0.2673, pruned_loss=0.06355, over 7694.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.3094, pruned_loss=0.07981, over 1611802.63 frames. ], batch size: 18, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:01:52,557 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 11:02:11,622 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:16,826 INFO [train.py:901] (1/4) Epoch 11, batch 7000, loss[loss=0.2222, simple_loss=0.2983, pruned_loss=0.07307, over 7975.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.3094, pruned_loss=0.0798, over 1612728.57 frames. ], batch size: 21, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:02:22,315 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:29,505 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.548e+02 3.185e+02 4.052e+02 9.283e+02, threshold=6.369e+02, percent-clipped=6.0 +2023-02-06 11:02:41,530 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:51,622 INFO [train.py:901] (1/4) Epoch 11, batch 7050, loss[loss=0.2652, simple_loss=0.3243, pruned_loss=0.103, over 7915.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3099, pruned_loss=0.07984, over 1614412.62 frames. ], batch size: 20, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:12,208 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5721, 1.6443, 2.0138, 1.7055, 1.0540, 2.0754, 0.2250, 1.2916], + device='cuda:1'), covar=tensor([0.2590, 0.1818, 0.0433, 0.1457, 0.4334, 0.0424, 0.3225, 0.1674], + device='cuda:1'), in_proj_covar=tensor([0.0161, 0.0165, 0.0098, 0.0211, 0.0247, 0.0100, 0.0159, 0.0162], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:03:26,708 INFO [train.py:901] (1/4) Epoch 11, batch 7100, loss[loss=0.2449, simple_loss=0.3177, pruned_loss=0.08611, over 8301.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3103, pruned_loss=0.08004, over 1612444.56 frames. ], batch size: 23, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:31,616 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87938.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:36,825 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:38,774 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.732e+02 3.356e+02 4.654e+02 1.650e+03, threshold=6.712e+02, percent-clipped=12.0 +2023-02-06 11:03:40,145 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:48,276 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:51,539 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:00,737 INFO [train.py:901] (1/4) Epoch 11, batch 7150, loss[loss=0.2524, simple_loss=0.323, pruned_loss=0.09093, over 7166.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.31, pruned_loss=0.07982, over 1612891.43 frames. ], batch size: 16, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:04:01,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:05,825 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:36,666 INFO [train.py:901] (1/4) Epoch 11, batch 7200, loss[loss=0.2397, simple_loss=0.3152, pruned_loss=0.08209, over 6864.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3103, pruned_loss=0.08029, over 1611128.19 frames. ], batch size: 72, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:04:49,438 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.591e+02 3.086e+02 3.706e+02 9.715e+02, threshold=6.172e+02, percent-clipped=2.0 +2023-02-06 11:04:57,787 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:01,268 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:11,817 INFO [train.py:901] (1/4) Epoch 11, batch 7250, loss[loss=0.18, simple_loss=0.2558, pruned_loss=0.05211, over 7188.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3114, pruned_loss=0.08118, over 1608840.23 frames. ], batch size: 16, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:05:12,647 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:21,491 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:37,918 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:39,343 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:40,258 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 11:05:46,910 INFO [train.py:901] (1/4) Epoch 11, batch 7300, loss[loss=0.2178, simple_loss=0.3084, pruned_loss=0.0636, over 8142.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3106, pruned_loss=0.08089, over 1608089.67 frames. ], batch size: 22, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:00,692 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.414e+02 2.958e+02 3.757e+02 7.369e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-06 11:06:10,194 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 11:06:22,812 INFO [train.py:901] (1/4) Epoch 11, batch 7350, loss[loss=0.2321, simple_loss=0.3162, pruned_loss=0.07393, over 8361.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3102, pruned_loss=0.08024, over 1608253.61 frames. ], batch size: 24, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:32,263 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 11:06:32,470 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88194.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:50,354 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:51,484 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 11:06:58,209 INFO [train.py:901] (1/4) Epoch 11, batch 7400, loss[loss=0.2371, simple_loss=0.3144, pruned_loss=0.07989, over 7980.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.31, pruned_loss=0.07959, over 1611083.66 frames. ], batch size: 21, lr: 6.84e-03, grad_scale: 16.0 +2023-02-06 11:07:03,173 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:11,891 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.577e+02 3.074e+02 3.691e+02 9.024e+02, threshold=6.148e+02, percent-clipped=4.0 +2023-02-06 11:07:20,895 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:26,471 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 11:07:31,794 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6227, 2.0101, 3.4187, 1.3623, 2.5503, 2.0809, 1.7405, 2.2487], + device='cuda:1'), covar=tensor([0.1670, 0.2164, 0.0761, 0.3761, 0.1458, 0.2615, 0.1757, 0.2231], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0522, 0.0529, 0.0576, 0.0618, 0.0558, 0.0472, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:07:32,914 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 11:07:33,599 INFO [train.py:901] (1/4) Epoch 11, batch 7450, loss[loss=0.2532, simple_loss=0.3277, pruned_loss=0.08939, over 7300.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.31, pruned_loss=0.07968, over 1611598.19 frames. ], batch size: 72, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:07:52,585 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88309.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:07:58,568 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:01,846 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:07,739 INFO [train.py:901] (1/4) Epoch 11, batch 7500, loss[loss=0.1946, simple_loss=0.2797, pruned_loss=0.05477, over 8103.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3096, pruned_loss=0.07961, over 1612068.25 frames. ], batch size: 23, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:13,329 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:15,895 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:19,206 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:20,937 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.827e+02 3.509e+02 4.304e+02 1.282e+03, threshold=7.018e+02, percent-clipped=8.0 +2023-02-06 11:08:30,007 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:42,578 INFO [train.py:901] (1/4) Epoch 11, batch 7550, loss[loss=0.2072, simple_loss=0.2809, pruned_loss=0.06678, over 7654.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3093, pruned_loss=0.08, over 1610335.27 frames. ], batch size: 19, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:48,011 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9994, 1.6049, 1.6668, 1.6178, 1.0802, 1.7006, 2.1438, 1.9147], + device='cuda:1'), covar=tensor([0.0414, 0.1238, 0.1746, 0.1340, 0.0632, 0.1473, 0.0659, 0.0550], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0150, 0.0190, 0.0157, 0.0103, 0.0164, 0.0115, 0.0136], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 11:09:03,019 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 11:09:13,612 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5854, 1.4108, 2.7712, 1.2363, 1.9998, 3.0237, 3.1378, 2.5937], + device='cuda:1'), covar=tensor([0.1087, 0.1458, 0.0425, 0.2012, 0.0948, 0.0297, 0.0546, 0.0599], + device='cuda:1'), in_proj_covar=tensor([0.0263, 0.0298, 0.0261, 0.0292, 0.0273, 0.0236, 0.0338, 0.0287], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 11:09:17,367 INFO [train.py:901] (1/4) Epoch 11, batch 7600, loss[loss=0.2638, simple_loss=0.3338, pruned_loss=0.09687, over 8137.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.31, pruned_loss=0.08056, over 1613067.15 frames. ], batch size: 22, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:31,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 2.441e+02 2.975e+02 3.888e+02 6.138e+02, threshold=5.951e+02, percent-clipped=0.0 +2023-02-06 11:09:39,145 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:09:51,499 INFO [train.py:901] (1/4) Epoch 11, batch 7650, loss[loss=0.2683, simple_loss=0.3396, pruned_loss=0.09846, over 8486.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3107, pruned_loss=0.08038, over 1614809.40 frames. ], batch size: 28, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:10:12,791 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 11:10:26,446 INFO [train.py:901] (1/4) Epoch 11, batch 7700, loss[loss=0.1924, simple_loss=0.2787, pruned_loss=0.05301, over 8081.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3109, pruned_loss=0.0807, over 1613299.17 frames. ], batch size: 21, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:10:38,717 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7075, 2.1449, 2.2704, 1.3756, 2.3958, 1.5962, 0.7395, 1.8064], + device='cuda:1'), covar=tensor([0.0448, 0.0196, 0.0160, 0.0382, 0.0245, 0.0571, 0.0559, 0.0219], + device='cuda:1'), in_proj_covar=tensor([0.0386, 0.0324, 0.0269, 0.0378, 0.0308, 0.0468, 0.0355, 0.0348], + device='cuda:1'), out_proj_covar=tensor([1.0997e-04, 9.0117e-05, 7.4909e-05, 1.0604e-04, 8.6932e-05, 1.4251e-04, + 1.0098e-04, 9.8654e-05], device='cuda:1') +2023-02-06 11:10:39,159 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 11:10:39,711 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.472e+02 3.053e+02 3.571e+02 8.603e+02, threshold=6.105e+02, percent-clipped=3.0 +2023-02-06 11:10:58,414 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:11:00,812 INFO [train.py:901] (1/4) Epoch 11, batch 7750, loss[loss=0.1908, simple_loss=0.276, pruned_loss=0.05281, over 7430.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.311, pruned_loss=0.08038, over 1613997.24 frames. ], batch size: 17, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:20,952 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2390, 1.7995, 2.6345, 2.0676, 2.3333, 2.0695, 1.7297, 1.0096], + device='cuda:1'), covar=tensor([0.3955, 0.3898, 0.1190, 0.2399, 0.1710, 0.2130, 0.1644, 0.3884], + device='cuda:1'), in_proj_covar=tensor([0.0884, 0.0864, 0.0730, 0.0842, 0.0927, 0.0800, 0.0701, 0.0763], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:11:36,342 INFO [train.py:901] (1/4) Epoch 11, batch 7800, loss[loss=0.2257, simple_loss=0.3107, pruned_loss=0.07037, over 8467.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.3094, pruned_loss=0.07984, over 1613451.92 frames. ], batch size: 25, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:42,970 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7460, 1.4403, 5.8573, 2.0967, 5.1842, 4.9368, 5.4238, 5.2014], + device='cuda:1'), covar=tensor([0.0464, 0.4524, 0.0350, 0.3180, 0.0889, 0.0680, 0.0389, 0.0467], + device='cuda:1'), in_proj_covar=tensor([0.0490, 0.0576, 0.0586, 0.0528, 0.0603, 0.0514, 0.0507, 0.0577], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:11:48,834 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.685e+02 3.345e+02 4.152e+02 1.012e+03, threshold=6.690e+02, percent-clipped=6.0 +2023-02-06 11:11:50,872 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:12:09,483 INFO [train.py:901] (1/4) Epoch 11, batch 7850, loss[loss=0.266, simple_loss=0.3457, pruned_loss=0.09316, over 8263.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3111, pruned_loss=0.08093, over 1613916.43 frames. ], batch size: 24, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:11,209 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 11:12:42,909 INFO [train.py:901] (1/4) Epoch 11, batch 7900, loss[loss=0.2582, simple_loss=0.3289, pruned_loss=0.09375, over 8190.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3108, pruned_loss=0.08073, over 1612856.57 frames. ], batch size: 23, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:55,426 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.490e+02 3.060e+02 3.735e+02 6.734e+02, threshold=6.120e+02, percent-clipped=1.0 +2023-02-06 11:12:58,825 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8314, 1.8292, 2.3615, 1.7060, 1.1850, 2.5513, 0.4113, 1.5186], + device='cuda:1'), covar=tensor([0.2889, 0.2436, 0.0572, 0.2551, 0.4908, 0.0557, 0.4025, 0.2072], + device='cuda:1'), in_proj_covar=tensor([0.0162, 0.0168, 0.0101, 0.0212, 0.0252, 0.0103, 0.0163, 0.0165], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:13:07,261 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88768.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:13:15,811 INFO [train.py:901] (1/4) Epoch 11, batch 7950, loss[loss=0.2019, simple_loss=0.2836, pruned_loss=0.06008, over 7791.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3119, pruned_loss=0.08121, over 1613239.22 frames. ], batch size: 19, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:49,359 INFO [train.py:901] (1/4) Epoch 11, batch 8000, loss[loss=0.2388, simple_loss=0.3118, pruned_loss=0.08285, over 7967.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3103, pruned_loss=0.08036, over 1611234.92 frames. ], batch size: 21, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:50,906 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:02,005 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.603e+02 3.071e+02 3.730e+02 8.421e+02, threshold=6.141e+02, percent-clipped=3.0 +2023-02-06 11:14:07,246 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:22,240 INFO [train.py:901] (1/4) Epoch 11, batch 8050, loss[loss=0.1961, simple_loss=0.2722, pruned_loss=0.06003, over 7215.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3084, pruned_loss=0.07986, over 1599023.27 frames. ], batch size: 16, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:14:54,022 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 11:14:58,683 INFO [train.py:901] (1/4) Epoch 12, batch 0, loss[loss=0.2134, simple_loss=0.2801, pruned_loss=0.07332, over 7699.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2801, pruned_loss=0.07332, over 7699.00 frames. ], batch size: 18, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:14:58,683 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 11:15:09,785 INFO [train.py:935] (1/4) Epoch 12, validation: loss=0.1897, simple_loss=0.2896, pruned_loss=0.04486, over 944034.00 frames. +2023-02-06 11:15:09,787 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 11:15:23,306 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 11:15:35,201 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.690e+02 3.540e+02 4.339e+02 7.249e+02, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 11:15:44,678 INFO [train.py:901] (1/4) Epoch 12, batch 50, loss[loss=0.1773, simple_loss=0.258, pruned_loss=0.04827, over 7213.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3154, pruned_loss=0.08223, over 366956.02 frames. ], batch size: 16, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:15:57,421 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 11:16:19,049 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 11:16:19,762 INFO [train.py:901] (1/4) Epoch 12, batch 100, loss[loss=0.1978, simple_loss=0.2901, pruned_loss=0.0527, over 8537.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3133, pruned_loss=0.08172, over 641890.29 frames. ], batch size: 28, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:16:26,519 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89024.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:16:33,362 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 11:16:40,643 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:16:43,463 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89049.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:16:43,905 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.771e+02 3.256e+02 4.152e+02 1.357e+03, threshold=6.512e+02, percent-clipped=1.0 +2023-02-06 11:16:54,729 INFO [train.py:901] (1/4) Epoch 12, batch 150, loss[loss=0.1647, simple_loss=0.2372, pruned_loss=0.04612, over 7222.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3099, pruned_loss=0.07889, over 856206.81 frames. ], batch size: 16, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:24,726 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 11:17:29,011 INFO [train.py:901] (1/4) Epoch 12, batch 200, loss[loss=0.2347, simple_loss=0.3096, pruned_loss=0.07988, over 8490.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3097, pruned_loss=0.07866, over 1024675.60 frames. ], batch size: 28, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:53,942 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.712e+02 3.423e+02 4.383e+02 1.008e+03, threshold=6.845e+02, percent-clipped=3.0 +2023-02-06 11:18:03,563 INFO [train.py:901] (1/4) Epoch 12, batch 250, loss[loss=0.1907, simple_loss=0.2813, pruned_loss=0.05004, over 8302.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3101, pruned_loss=0.07894, over 1155497.99 frames. ], batch size: 23, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:18:13,242 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 11:18:17,195 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-06 11:18:20,887 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:18:22,861 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 11:18:40,049 INFO [train.py:901] (1/4) Epoch 12, batch 300, loss[loss=0.2048, simple_loss=0.2773, pruned_loss=0.06614, over 7692.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3103, pruned_loss=0.07921, over 1260634.17 frames. ], batch size: 18, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:05,001 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.536e+02 3.052e+02 3.921e+02 6.584e+02, threshold=6.103e+02, percent-clipped=0.0 +2023-02-06 11:19:14,503 INFO [train.py:901] (1/4) Epoch 12, batch 350, loss[loss=0.2834, simple_loss=0.3469, pruned_loss=0.11, over 8424.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3103, pruned_loss=0.07915, over 1343046.37 frames. ], batch size: 49, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:49,366 INFO [train.py:901] (1/4) Epoch 12, batch 400, loss[loss=0.2202, simple_loss=0.297, pruned_loss=0.07167, over 8074.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3106, pruned_loss=0.07935, over 1404475.29 frames. ], batch size: 21, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:14,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.417e+02 2.965e+02 3.513e+02 5.511e+02, threshold=5.929e+02, percent-clipped=0.0 +2023-02-06 11:20:24,227 INFO [train.py:901] (1/4) Epoch 12, batch 450, loss[loss=0.212, simple_loss=0.2904, pruned_loss=0.06684, over 8137.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3106, pruned_loss=0.07951, over 1450528.40 frames. ], batch size: 22, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:25,042 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6904, 1.4666, 5.8051, 2.1503, 5.2045, 4.9196, 5.3759, 5.2040], + device='cuda:1'), covar=tensor([0.0437, 0.4551, 0.0297, 0.3153, 0.0879, 0.0664, 0.0425, 0.0481], + device='cuda:1'), in_proj_covar=tensor([0.0490, 0.0571, 0.0579, 0.0529, 0.0606, 0.0511, 0.0507, 0.0575], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:20:40,991 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:43,189 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:58,868 INFO [train.py:901] (1/4) Epoch 12, batch 500, loss[loss=0.1793, simple_loss=0.2537, pruned_loss=0.0525, over 7920.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3098, pruned_loss=0.07905, over 1485763.48 frames. ], batch size: 20, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:21:19,379 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:21:22,225 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5193, 1.7444, 2.8010, 1.3281, 2.0752, 1.8942, 1.6053, 1.9172], + device='cuda:1'), covar=tensor([0.1615, 0.1998, 0.0714, 0.3603, 0.1439, 0.2681, 0.1715, 0.1819], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0521, 0.0534, 0.0580, 0.0619, 0.0555, 0.0473, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:21:24,107 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.539e+02 3.031e+02 3.696e+02 8.346e+02, threshold=6.063e+02, percent-clipped=3.0 +2023-02-06 11:21:29,672 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89457.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:21:34,341 INFO [train.py:901] (1/4) Epoch 12, batch 550, loss[loss=0.2208, simple_loss=0.2817, pruned_loss=0.07997, over 7659.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3106, pruned_loss=0.08025, over 1514428.92 frames. ], batch size: 19, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:21:53,122 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6006, 1.8986, 1.6653, 2.3168, 0.9657, 1.4475, 1.5524, 1.9149], + device='cuda:1'), covar=tensor([0.0906, 0.0801, 0.1041, 0.0435, 0.1220, 0.1410, 0.0891, 0.0756], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0213, 0.0255, 0.0214, 0.0218, 0.0251, 0.0257, 0.0220], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:22:02,583 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:09,198 INFO [train.py:901] (1/4) Epoch 12, batch 600, loss[loss=0.2304, simple_loss=0.3162, pruned_loss=0.0723, over 8516.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.311, pruned_loss=0.08026, over 1540257.39 frames. ], batch size: 26, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:17,833 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:20,461 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:26,550 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 11:22:34,513 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.630e+02 3.047e+02 3.733e+02 1.036e+03, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 11:22:44,045 INFO [train.py:901] (1/4) Epoch 12, batch 650, loss[loss=0.2783, simple_loss=0.3271, pruned_loss=0.1148, over 6820.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3105, pruned_loss=0.08023, over 1554058.65 frames. ], batch size: 15, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:18,870 INFO [train.py:901] (1/4) Epoch 12, batch 700, loss[loss=0.2593, simple_loss=0.3371, pruned_loss=0.09074, over 8335.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3103, pruned_loss=0.08008, over 1568926.12 frames. ], batch size: 25, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:40,433 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:23:43,631 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.821e+02 3.296e+02 4.031e+02 9.579e+02, threshold=6.593e+02, percent-clipped=5.0 +2023-02-06 11:23:47,395 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5678, 1.9930, 3.3348, 1.3328, 2.3409, 2.0170, 1.6828, 2.2380], + device='cuda:1'), covar=tensor([0.1636, 0.2257, 0.0636, 0.3928, 0.1658, 0.2828, 0.1755, 0.2251], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0516, 0.0528, 0.0578, 0.0614, 0.0551, 0.0468, 0.0607], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:23:53,354 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9577, 1.6072, 1.3163, 1.5233, 1.2568, 1.1270, 1.1889, 1.2791], + device='cuda:1'), covar=tensor([0.1035, 0.0424, 0.1198, 0.0513, 0.0671, 0.1329, 0.0790, 0.0731], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0241, 0.0320, 0.0300, 0.0303, 0.0325, 0.0340, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:23:53,837 INFO [train.py:901] (1/4) Epoch 12, batch 750, loss[loss=0.261, simple_loss=0.3376, pruned_loss=0.09219, over 8341.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3108, pruned_loss=0.08077, over 1579016.42 frames. ], batch size: 26, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:55,356 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1441, 1.4110, 4.3126, 1.6009, 3.8456, 3.5940, 3.8986, 3.7342], + device='cuda:1'), covar=tensor([0.0506, 0.4121, 0.0457, 0.3188, 0.0927, 0.0830, 0.0504, 0.0605], + device='cuda:1'), in_proj_covar=tensor([0.0495, 0.0576, 0.0582, 0.0530, 0.0606, 0.0516, 0.0509, 0.0579], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:24:02,992 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.41 vs. limit=5.0 +2023-02-06 11:24:11,504 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 11:24:17,637 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:20,260 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 11:24:28,200 INFO [train.py:901] (1/4) Epoch 12, batch 800, loss[loss=0.2491, simple_loss=0.3426, pruned_loss=0.07781, over 8094.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3113, pruned_loss=0.08047, over 1590170.64 frames. ], batch size: 23, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:24:37,596 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4430, 1.4204, 1.7115, 1.3417, 1.0015, 1.7216, 0.1737, 1.2148], + device='cuda:1'), covar=tensor([0.3014, 0.1917, 0.0546, 0.1757, 0.4207, 0.0584, 0.3429, 0.2129], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0171, 0.0102, 0.0218, 0.0259, 0.0107, 0.0166, 0.0169], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:24:43,681 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:53,399 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.628e+02 3.285e+02 4.121e+02 9.349e+02, threshold=6.571e+02, percent-clipped=6.0 +2023-02-06 11:24:59,656 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:02,846 INFO [train.py:901] (1/4) Epoch 12, batch 850, loss[loss=0.2178, simple_loss=0.2979, pruned_loss=0.06892, over 8501.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.311, pruned_loss=0.0806, over 1595393.51 frames. ], batch size: 31, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:25:18,000 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:19,284 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:28,670 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89801.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:25:31,255 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8388, 5.9855, 5.1686, 2.3850, 5.2506, 5.6058, 5.3778, 5.3142], + device='cuda:1'), covar=tensor([0.0681, 0.0442, 0.0872, 0.4594, 0.0758, 0.0700, 0.1114, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0383, 0.0388, 0.0491, 0.0390, 0.0386, 0.0380, 0.0338], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:25:37,800 INFO [train.py:901] (1/4) Epoch 12, batch 900, loss[loss=0.2032, simple_loss=0.2869, pruned_loss=0.05972, over 8191.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3102, pruned_loss=0.07931, over 1605862.91 frames. ], batch size: 23, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:03,293 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.446e+02 3.021e+02 3.729e+02 6.397e+02, threshold=6.041e+02, percent-clipped=0.0 +2023-02-06 11:26:03,491 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:11,263 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6119, 1.5050, 5.6521, 2.2671, 5.0213, 4.7954, 5.2760, 5.0402], + device='cuda:1'), covar=tensor([0.0401, 0.4638, 0.0364, 0.3309, 0.0950, 0.0660, 0.0390, 0.0485], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0586, 0.0593, 0.0542, 0.0619, 0.0526, 0.0519, 0.0593], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:26:11,800 INFO [train.py:901] (1/4) Epoch 12, batch 950, loss[loss=0.2501, simple_loss=0.3303, pruned_loss=0.08497, over 8524.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3101, pruned_loss=0.07914, over 1605389.37 frames. ], batch size: 34, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:16,404 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:24,520 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:30,578 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 11:26:38,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:38,607 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:39,096 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 11:26:46,288 INFO [train.py:901] (1/4) Epoch 12, batch 1000, loss[loss=0.2793, simple_loss=0.3408, pruned_loss=0.1089, over 8190.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.311, pruned_loss=0.07989, over 1607141.80 frames. ], batch size: 23, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:47,815 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89916.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:26:55,678 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:11,391 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.648e+02 3.254e+02 4.081e+02 9.414e+02, threshold=6.507e+02, percent-clipped=7.0 +2023-02-06 11:27:11,417 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 11:27:20,829 INFO [train.py:901] (1/4) Epoch 12, batch 1050, loss[loss=0.2308, simple_loss=0.3067, pruned_loss=0.07748, over 8467.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3105, pruned_loss=0.07955, over 1609810.85 frames. ], batch size: 25, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:27:24,329 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 11:27:35,822 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:56,236 INFO [train.py:901] (1/4) Epoch 12, batch 1100, loss[loss=0.1973, simple_loss=0.285, pruned_loss=0.05476, over 7926.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3108, pruned_loss=0.07989, over 1609214.09 frames. ], batch size: 20, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:27:57,787 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4820, 1.2904, 2.3608, 1.1276, 2.0695, 2.4882, 2.6235, 2.1451], + device='cuda:1'), covar=tensor([0.0871, 0.1173, 0.0429, 0.1922, 0.0717, 0.0356, 0.0561, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0266, 0.0296, 0.0260, 0.0292, 0.0274, 0.0236, 0.0345, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 11:28:15,782 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:28:22,906 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.536e+02 3.046e+02 3.976e+02 6.882e+02, threshold=6.092e+02, percent-clipped=1.0 +2023-02-06 11:28:31,018 INFO [train.py:901] (1/4) Epoch 12, batch 1150, loss[loss=0.2447, simple_loss=0.3297, pruned_loss=0.07991, over 8496.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3092, pruned_loss=0.07907, over 1607020.57 frames. ], batch size: 26, lr: 6.48e-03, grad_scale: 4.0 +2023-02-06 11:28:34,440 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 11:29:01,090 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:05,597 INFO [train.py:901] (1/4) Epoch 12, batch 1200, loss[loss=0.2789, simple_loss=0.3405, pruned_loss=0.1086, over 8187.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3103, pruned_loss=0.07955, over 1611330.87 frames. ], batch size: 23, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:19,573 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:30,361 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:32,957 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.449e+02 3.099e+02 4.282e+02 6.791e+02, threshold=6.197e+02, percent-clipped=4.0 +2023-02-06 11:29:36,548 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:37,309 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:41,828 INFO [train.py:901] (1/4) Epoch 12, batch 1250, loss[loss=0.1873, simple_loss=0.263, pruned_loss=0.05579, over 7814.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3101, pruned_loss=0.07909, over 1615725.54 frames. ], batch size: 20, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:47,517 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90172.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:29:55,698 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:05,420 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90197.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:30:17,140 INFO [train.py:901] (1/4) Epoch 12, batch 1300, loss[loss=0.2213, simple_loss=0.3009, pruned_loss=0.07086, over 8198.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3103, pruned_loss=0.07886, over 1615961.42 frames. ], batch size: 23, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:30:26,149 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:37,504 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:43,679 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:44,948 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.408e+02 3.209e+02 4.069e+02 1.568e+03, threshold=6.418e+02, percent-clipped=9.0 +2023-02-06 11:30:53,231 INFO [train.py:901] (1/4) Epoch 12, batch 1350, loss[loss=0.3019, simple_loss=0.3525, pruned_loss=0.1256, over 6659.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3101, pruned_loss=0.07853, over 1613379.83 frames. ], batch size: 71, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:30:55,580 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:09,750 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0848, 1.6196, 1.3630, 1.6333, 1.3599, 1.2188, 1.2858, 1.3874], + device='cuda:1'), covar=tensor([0.1007, 0.0426, 0.1203, 0.0465, 0.0601, 0.1307, 0.0795, 0.0655], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0240, 0.0318, 0.0299, 0.0301, 0.0325, 0.0339, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:31:28,597 INFO [train.py:901] (1/4) Epoch 12, batch 1400, loss[loss=0.2679, simple_loss=0.3451, pruned_loss=0.0954, over 8287.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3101, pruned_loss=0.0792, over 1615178.30 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:31:30,297 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 11:31:47,887 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:54,627 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.398e+02 2.808e+02 3.540e+02 8.131e+02, threshold=5.617e+02, percent-clipped=1.0 +2023-02-06 11:32:03,614 INFO [train.py:901] (1/4) Epoch 12, batch 1450, loss[loss=0.2806, simple_loss=0.3415, pruned_loss=0.1098, over 8107.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3094, pruned_loss=0.07905, over 1613269.05 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:07,898 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:08,434 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 11:32:38,144 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:38,593 INFO [train.py:901] (1/4) Epoch 12, batch 1500, loss[loss=0.2122, simple_loss=0.2917, pruned_loss=0.06636, over 8284.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3081, pruned_loss=0.079, over 1612834.48 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:55,147 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:04,320 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.462e+02 2.993e+02 3.898e+02 9.256e+02, threshold=5.985e+02, percent-clipped=2.0 +2023-02-06 11:33:12,495 INFO [train.py:901] (1/4) Epoch 12, batch 1550, loss[loss=0.2469, simple_loss=0.3134, pruned_loss=0.09022, over 8133.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.309, pruned_loss=0.07888, over 1617082.15 frames. ], batch size: 22, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:21,467 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:33,069 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:48,245 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0483, 2.4242, 1.7789, 2.7813, 1.5337, 1.5150, 2.0724, 2.3359], + device='cuda:1'), covar=tensor([0.0721, 0.0821, 0.1057, 0.0389, 0.1139, 0.1407, 0.0924, 0.0831], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0215, 0.0258, 0.0218, 0.0221, 0.0252, 0.0259, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:33:48,745 INFO [train.py:901] (1/4) Epoch 12, batch 1600, loss[loss=0.252, simple_loss=0.3383, pruned_loss=0.08289, over 8513.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3088, pruned_loss=0.0781, over 1620417.03 frames. ], batch size: 26, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:48,905 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:15,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.712e+02 3.378e+02 4.197e+02 8.231e+02, threshold=6.755e+02, percent-clipped=6.0 +2023-02-06 11:34:23,538 INFO [train.py:901] (1/4) Epoch 12, batch 1650, loss[loss=0.2163, simple_loss=0.3016, pruned_loss=0.06545, over 7974.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.308, pruned_loss=0.07781, over 1618711.71 frames. ], batch size: 21, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:34:43,461 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:44,973 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7148, 1.3259, 1.5904, 1.2361, 0.9727, 1.3753, 1.4640, 1.3472], + device='cuda:1'), covar=tensor([0.0467, 0.1224, 0.1636, 0.1378, 0.0540, 0.1470, 0.0685, 0.0620], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0152, 0.0191, 0.0158, 0.0103, 0.0163, 0.0117, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 11:34:47,040 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:53,818 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:58,208 INFO [train.py:901] (1/4) Epoch 12, batch 1700, loss[loss=0.3033, simple_loss=0.3585, pruned_loss=0.1241, over 8035.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3089, pruned_loss=0.07862, over 1617882.45 frames. ], batch size: 22, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:04,319 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:35:12,050 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.16 vs. limit=2.0 +2023-02-06 11:35:18,595 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3835, 2.0208, 3.4044, 1.2617, 2.5182, 1.9459, 1.5919, 2.4185], + device='cuda:1'), covar=tensor([0.1749, 0.2115, 0.0667, 0.3885, 0.1419, 0.2782, 0.1834, 0.2076], + device='cuda:1'), in_proj_covar=tensor([0.0484, 0.0518, 0.0533, 0.0581, 0.0617, 0.0556, 0.0470, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:35:24,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.486e+02 2.952e+02 3.646e+02 6.764e+02, threshold=5.904e+02, percent-clipped=1.0 +2023-02-06 11:35:33,331 INFO [train.py:901] (1/4) Epoch 12, batch 1750, loss[loss=0.206, simple_loss=0.2934, pruned_loss=0.05931, over 7975.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3084, pruned_loss=0.07827, over 1612624.29 frames. ], batch size: 21, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:04,022 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:07,404 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:08,032 INFO [train.py:901] (1/4) Epoch 12, batch 1800, loss[loss=0.2485, simple_loss=0.3135, pruned_loss=0.09178, over 7703.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3084, pruned_loss=0.07821, over 1614360.51 frames. ], batch size: 18, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:35,308 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.625e+02 3.119e+02 3.569e+02 7.012e+02, threshold=6.239e+02, percent-clipped=2.0 +2023-02-06 11:36:43,320 INFO [train.py:901] (1/4) Epoch 12, batch 1850, loss[loss=0.2074, simple_loss=0.2876, pruned_loss=0.06358, over 8291.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3091, pruned_loss=0.07872, over 1612417.96 frames. ], batch size: 23, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:17,706 INFO [train.py:901] (1/4) Epoch 12, batch 1900, loss[loss=0.2539, simple_loss=0.3342, pruned_loss=0.08675, over 8022.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3096, pruned_loss=0.07887, over 1613642.41 frames. ], batch size: 22, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:22,469 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:27,322 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:44,438 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.569e+02 3.031e+02 3.632e+02 7.649e+02, threshold=6.063e+02, percent-clipped=2.0 +2023-02-06 11:37:47,238 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 11:37:48,679 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,154 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,632 INFO [train.py:901] (1/4) Epoch 12, batch 1950, loss[loss=0.2084, simple_loss=0.2873, pruned_loss=0.06477, over 8037.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3105, pruned_loss=0.07976, over 1618232.53 frames. ], batch size: 22, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:37:54,799 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:59,342 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 11:38:10,331 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:19,030 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 11:38:27,972 INFO [train.py:901] (1/4) Epoch 12, batch 2000, loss[loss=0.2301, simple_loss=0.2995, pruned_loss=0.08037, over 8275.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.31, pruned_loss=0.0792, over 1619907.51 frames. ], batch size: 23, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:38:36,468 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9460, 2.3655, 3.6582, 1.7560, 3.0283, 2.2970, 2.0825, 2.7035], + device='cuda:1'), covar=tensor([0.1285, 0.1873, 0.0528, 0.3127, 0.1112, 0.2384, 0.1420, 0.1892], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0518, 0.0531, 0.0582, 0.0616, 0.0558, 0.0471, 0.0610], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:38:43,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90936.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:54,918 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.641e+02 3.163e+02 4.034e+02 9.087e+02, threshold=6.326e+02, percent-clipped=9.0 +2023-02-06 11:39:02,897 INFO [train.py:901] (1/4) Epoch 12, batch 2050, loss[loss=0.2459, simple_loss=0.3099, pruned_loss=0.09094, over 7917.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3078, pruned_loss=0.07805, over 1614766.51 frames. ], batch size: 20, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:39:03,755 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:09,924 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:21,820 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:38,681 INFO [train.py:901] (1/4) Epoch 12, batch 2100, loss[loss=0.1899, simple_loss=0.2707, pruned_loss=0.05457, over 8079.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3086, pruned_loss=0.07808, over 1617448.58 frames. ], batch size: 21, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:39:40,484 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 11:40:04,173 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 2.659e+02 3.265e+02 4.247e+02 8.349e+02, threshold=6.531e+02, percent-clipped=2.0 +2023-02-06 11:40:12,099 INFO [train.py:901] (1/4) Epoch 12, batch 2150, loss[loss=0.233, simple_loss=0.3092, pruned_loss=0.07842, over 8252.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3079, pruned_loss=0.07771, over 1615672.79 frames. ], batch size: 24, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:26,821 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:44,037 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:47,215 INFO [train.py:901] (1/4) Epoch 12, batch 2200, loss[loss=0.2332, simple_loss=0.2998, pruned_loss=0.08333, over 8036.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3081, pruned_loss=0.0783, over 1614756.14 frames. ], batch size: 22, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:13,716 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.751e+02 3.546e+02 4.173e+02 9.054e+02, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 11:41:21,758 INFO [train.py:901] (1/4) Epoch 12, batch 2250, loss[loss=0.2348, simple_loss=0.2955, pruned_loss=0.08703, over 7780.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3081, pruned_loss=0.07819, over 1614347.04 frames. ], batch size: 19, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:41,116 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:44,211 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 11:41:54,520 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:56,402 INFO [train.py:901] (1/4) Epoch 12, batch 2300, loss[loss=0.2192, simple_loss=0.2857, pruned_loss=0.07638, over 7264.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3077, pruned_loss=0.07846, over 1611356.54 frames. ], batch size: 16, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:58,490 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:01,936 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0203, 1.5599, 1.6486, 1.4791, 0.9079, 1.4666, 1.6799, 1.6076], + device='cuda:1'), covar=tensor([0.0474, 0.1215, 0.1591, 0.1303, 0.0598, 0.1427, 0.0655, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0152, 0.0190, 0.0159, 0.0103, 0.0162, 0.0115, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 11:42:07,343 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:15,377 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5300, 2.6677, 1.9321, 2.2585, 2.2573, 1.4640, 2.0612, 2.2783], + device='cuda:1'), covar=tensor([0.1528, 0.0374, 0.1068, 0.0696, 0.0692, 0.1590, 0.0984, 0.0843], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0237, 0.0318, 0.0295, 0.0300, 0.0325, 0.0341, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:42:23,423 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.635e+02 3.142e+02 4.194e+02 9.102e+02, threshold=6.284e+02, percent-clipped=2.0 +2023-02-06 11:42:25,004 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:31,176 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3506, 1.5339, 1.3847, 1.9048, 0.7307, 1.2204, 1.3210, 1.5577], + device='cuda:1'), covar=tensor([0.0896, 0.0795, 0.1180, 0.0524, 0.1220, 0.1378, 0.0792, 0.0839], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0211, 0.0256, 0.0215, 0.0218, 0.0253, 0.0258, 0.0220], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:42:31,680 INFO [train.py:901] (1/4) Epoch 12, batch 2350, loss[loss=0.2391, simple_loss=0.3003, pruned_loss=0.08896, over 7439.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3077, pruned_loss=0.07826, over 1614908.14 frames. ], batch size: 17, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:42:40,965 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8013, 5.8127, 5.0339, 2.4314, 5.1398, 5.4676, 5.3685, 5.2020], + device='cuda:1'), covar=tensor([0.0490, 0.0364, 0.0830, 0.4558, 0.0601, 0.0677, 0.1023, 0.0508], + device='cuda:1'), in_proj_covar=tensor([0.0462, 0.0372, 0.0385, 0.0484, 0.0378, 0.0382, 0.0377, 0.0330], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:42:47,155 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3348, 2.0766, 1.6543, 1.9612, 1.7655, 1.3697, 1.6356, 1.7983], + device='cuda:1'), covar=tensor([0.1247, 0.0386, 0.1093, 0.0550, 0.0658, 0.1405, 0.0870, 0.0756], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0237, 0.0318, 0.0296, 0.0300, 0.0325, 0.0342, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:42:49,223 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4188, 1.8083, 3.1015, 1.1517, 2.0999, 1.6844, 1.5192, 1.8565], + device='cuda:1'), covar=tensor([0.1729, 0.2085, 0.0680, 0.3925, 0.1656, 0.2938, 0.1801, 0.2380], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0521, 0.0534, 0.0583, 0.0619, 0.0558, 0.0471, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:42:57,738 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:02,771 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 11:43:05,258 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6218, 2.2693, 4.4292, 1.3595, 3.0167, 2.2245, 1.8636, 2.8649], + device='cuda:1'), covar=tensor([0.1709, 0.2251, 0.0629, 0.3945, 0.1563, 0.2761, 0.1716, 0.2194], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0522, 0.0535, 0.0582, 0.0619, 0.0557, 0.0471, 0.0611], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:43:05,699 INFO [train.py:901] (1/4) Epoch 12, batch 2400, loss[loss=0.2184, simple_loss=0.2996, pruned_loss=0.06859, over 8292.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3076, pruned_loss=0.07835, over 1613126.74 frames. ], batch size: 23, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:43:14,307 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:32,237 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.547e+02 3.046e+02 3.774e+02 7.420e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-06 11:43:41,055 INFO [train.py:901] (1/4) Epoch 12, batch 2450, loss[loss=0.1812, simple_loss=0.2557, pruned_loss=0.05338, over 7696.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3079, pruned_loss=0.07875, over 1614351.62 frames. ], batch size: 18, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:44:15,047 INFO [train.py:901] (1/4) Epoch 12, batch 2500, loss[loss=0.2616, simple_loss=0.3321, pruned_loss=0.09556, over 8465.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3084, pruned_loss=0.07907, over 1616305.17 frames. ], batch size: 25, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:44:27,373 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4565, 1.8157, 4.5257, 1.7465, 2.6152, 5.0486, 5.1747, 3.9649], + device='cuda:1'), covar=tensor([0.1194, 0.1734, 0.0275, 0.2339, 0.1010, 0.0268, 0.0371, 0.0891], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0301, 0.0264, 0.0296, 0.0280, 0.0237, 0.0351, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:44:41,745 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.576e+02 3.186e+02 4.386e+02 8.083e+02, threshold=6.372e+02, percent-clipped=11.0 +2023-02-06 11:44:50,281 INFO [train.py:901] (1/4) Epoch 12, batch 2550, loss[loss=0.2358, simple_loss=0.3153, pruned_loss=0.07819, over 8446.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3087, pruned_loss=0.07928, over 1618227.98 frames. ], batch size: 27, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:24,410 INFO [train.py:901] (1/4) Epoch 12, batch 2600, loss[loss=0.2012, simple_loss=0.2652, pruned_loss=0.06861, over 7460.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3088, pruned_loss=0.07984, over 1615039.70 frames. ], batch size: 17, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:45,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4412, 2.0499, 3.4149, 1.2597, 2.4983, 1.8312, 1.5871, 2.3689], + device='cuda:1'), covar=tensor([0.1773, 0.2122, 0.0746, 0.3952, 0.1646, 0.2903, 0.1804, 0.2237], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0523, 0.0536, 0.0584, 0.0620, 0.0556, 0.0472, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:45:50,013 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.880e+02 3.430e+02 4.544e+02 8.443e+02, threshold=6.860e+02, percent-clipped=9.0 +2023-02-06 11:45:56,362 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91560.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:45:58,895 INFO [train.py:901] (1/4) Epoch 12, batch 2650, loss[loss=0.2084, simple_loss=0.2844, pruned_loss=0.06618, over 8141.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3089, pruned_loss=0.07961, over 1615214.59 frames. ], batch size: 22, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:11,917 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:21,317 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6334, 2.1718, 4.4158, 1.3442, 3.0710, 2.1457, 1.6727, 2.9688], + device='cuda:1'), covar=tensor([0.1687, 0.2374, 0.0519, 0.4080, 0.1494, 0.2850, 0.1871, 0.2140], + device='cuda:1'), in_proj_covar=tensor([0.0488, 0.0527, 0.0539, 0.0587, 0.0623, 0.0560, 0.0475, 0.0617], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:46:29,372 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:33,855 INFO [train.py:901] (1/4) Epoch 12, batch 2700, loss[loss=0.2043, simple_loss=0.2877, pruned_loss=0.06049, over 7962.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3097, pruned_loss=0.08016, over 1611814.11 frames. ], batch size: 21, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:42,003 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5184, 1.9291, 3.0644, 1.2980, 2.1764, 1.8184, 1.6622, 2.0222], + device='cuda:1'), covar=tensor([0.1610, 0.1946, 0.0689, 0.3729, 0.1501, 0.2724, 0.1627, 0.2004], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0523, 0.0536, 0.0583, 0.0617, 0.0558, 0.0472, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:46:55,931 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:59,290 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.691e+02 3.205e+02 3.908e+02 7.628e+02, threshold=6.410e+02, percent-clipped=2.0 +2023-02-06 11:47:08,031 INFO [train.py:901] (1/4) Epoch 12, batch 2750, loss[loss=0.1961, simple_loss=0.2846, pruned_loss=0.05379, over 8317.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3079, pruned_loss=0.0785, over 1612792.03 frames. ], batch size: 25, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:47:10,606 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3812, 2.6720, 2.1354, 3.7673, 1.5955, 1.8859, 2.1536, 3.0557], + device='cuda:1'), covar=tensor([0.0753, 0.1004, 0.1050, 0.0337, 0.1351, 0.1470, 0.1329, 0.0768], + device='cuda:1'), in_proj_covar=tensor([0.0239, 0.0214, 0.0258, 0.0220, 0.0220, 0.0256, 0.0262, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:47:23,579 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4851, 1.4328, 1.7892, 1.4559, 1.1008, 1.8229, 0.0936, 1.1590], + device='cuda:1'), covar=tensor([0.2422, 0.1935, 0.0513, 0.1291, 0.3982, 0.0548, 0.3309, 0.1754], + device='cuda:1'), in_proj_covar=tensor([0.0169, 0.0174, 0.0103, 0.0218, 0.0258, 0.0107, 0.0167, 0.0168], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:47:27,141 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 11:47:38,296 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.2471, 5.1426, 4.6023, 2.5089, 4.6308, 4.8572, 4.8988, 4.5255], + device='cuda:1'), covar=tensor([0.0489, 0.0399, 0.0842, 0.4008, 0.0638, 0.0841, 0.1021, 0.0663], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0377, 0.0389, 0.0486, 0.0380, 0.0386, 0.0375, 0.0332], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:47:43,511 INFO [train.py:901] (1/4) Epoch 12, batch 2800, loss[loss=0.211, simple_loss=0.288, pruned_loss=0.06701, over 7214.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3082, pruned_loss=0.07859, over 1615055.33 frames. ], batch size: 16, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:08,847 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.634e+02 3.181e+02 3.784e+02 9.192e+02, threshold=6.362e+02, percent-clipped=3.0 +2023-02-06 11:48:15,802 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:48:16,915 INFO [train.py:901] (1/4) Epoch 12, batch 2850, loss[loss=0.2634, simple_loss=0.3141, pruned_loss=0.1063, over 7532.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3094, pruned_loss=0.07938, over 1615185.85 frames. ], batch size: 18, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:48,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3447, 2.4060, 1.9234, 2.9503, 1.5659, 1.7589, 2.2090, 2.5242], + device='cuda:1'), covar=tensor([0.0577, 0.0721, 0.0883, 0.0355, 0.1020, 0.1208, 0.0844, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0214, 0.0258, 0.0219, 0.0218, 0.0254, 0.0260, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:48:52,944 INFO [train.py:901] (1/4) Epoch 12, batch 2900, loss[loss=0.2117, simple_loss=0.2762, pruned_loss=0.07361, over 7698.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3082, pruned_loss=0.07874, over 1610253.00 frames. ], batch size: 18, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:18,828 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.538e+02 3.175e+02 3.875e+02 8.885e+02, threshold=6.349e+02, percent-clipped=4.0 +2023-02-06 11:49:22,156 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 11:49:26,762 INFO [train.py:901] (1/4) Epoch 12, batch 2950, loss[loss=0.234, simple_loss=0.3071, pruned_loss=0.08046, over 8334.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3084, pruned_loss=0.07883, over 1608226.13 frames. ], batch size: 26, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:54,042 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91904.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:49:57,393 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:50:00,613 INFO [train.py:901] (1/4) Epoch 12, batch 3000, loss[loss=0.2033, simple_loss=0.283, pruned_loss=0.06177, over 8231.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3092, pruned_loss=0.07956, over 1608342.14 frames. ], batch size: 22, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:50:00,613 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 11:50:13,630 INFO [train.py:935] (1/4) Epoch 12, validation: loss=0.1868, simple_loss=0.2871, pruned_loss=0.04323, over 944034.00 frames. +2023-02-06 11:50:13,632 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6608MB +2023-02-06 11:50:26,349 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0205, 3.0736, 2.8143, 4.1868, 1.8019, 2.5107, 2.6129, 3.5395], + device='cuda:1'), covar=tensor([0.0610, 0.0874, 0.0823, 0.0270, 0.1253, 0.1218, 0.1147, 0.0727], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0212, 0.0259, 0.0218, 0.0219, 0.0253, 0.0260, 0.0221], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 11:50:40,664 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.361e+02 2.883e+02 3.802e+02 7.578e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-06 11:50:49,085 INFO [train.py:901] (1/4) Epoch 12, batch 3050, loss[loss=0.2228, simple_loss=0.2867, pruned_loss=0.07942, over 7443.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3089, pruned_loss=0.07934, over 1608248.70 frames. ], batch size: 17, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:50:56,132 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:14,066 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91999.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:51:25,121 INFO [train.py:901] (1/4) Epoch 12, batch 3100, loss[loss=0.2119, simple_loss=0.2984, pruned_loss=0.0627, over 8604.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3092, pruned_loss=0.07933, over 1610112.86 frames. ], batch size: 31, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:51:28,126 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:28,811 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92019.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:51:45,784 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:51,726 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.663e+02 3.347e+02 4.142e+02 7.838e+02, threshold=6.695e+02, percent-clipped=5.0 +2023-02-06 11:52:01,130 INFO [train.py:901] (1/4) Epoch 12, batch 3150, loss[loss=0.229, simple_loss=0.2989, pruned_loss=0.07955, over 8656.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3098, pruned_loss=0.07985, over 1615297.10 frames. ], batch size: 34, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:26,881 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 11:52:35,761 INFO [train.py:901] (1/4) Epoch 12, batch 3200, loss[loss=0.1841, simple_loss=0.2596, pruned_loss=0.05428, over 6854.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3096, pruned_loss=0.07925, over 1612438.85 frames. ], batch size: 15, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:02,009 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.674e+02 3.226e+02 3.971e+02 7.397e+02, threshold=6.453e+02, percent-clipped=3.0 +2023-02-06 11:53:10,368 INFO [train.py:901] (1/4) Epoch 12, batch 3250, loss[loss=0.2336, simple_loss=0.3034, pruned_loss=0.08187, over 8088.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3086, pruned_loss=0.07831, over 1618741.96 frames. ], batch size: 21, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:16,098 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1954, 1.6798, 1.8085, 1.5498, 1.1105, 1.6891, 1.8366, 1.9595], + device='cuda:1'), covar=tensor([0.0446, 0.1148, 0.1563, 0.1245, 0.0550, 0.1366, 0.0634, 0.0512], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0158, 0.0102, 0.0162, 0.0115, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:1') +2023-02-06 11:53:16,528 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 11:53:46,146 INFO [train.py:901] (1/4) Epoch 12, batch 3300, loss[loss=0.1989, simple_loss=0.2777, pruned_loss=0.06005, over 7980.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3076, pruned_loss=0.07755, over 1615092.51 frames. ], batch size: 21, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:53,631 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2961, 1.5420, 4.5307, 1.6924, 3.9269, 3.8209, 4.0785, 3.9509], + device='cuda:1'), covar=tensor([0.0583, 0.3779, 0.0504, 0.3231, 0.1164, 0.0866, 0.0617, 0.0708], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0579, 0.0593, 0.0540, 0.0622, 0.0531, 0.0522, 0.0586], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:54:11,040 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.357e+02 2.935e+02 3.680e+02 6.719e+02, threshold=5.870e+02, percent-clipped=1.0 +2023-02-06 11:54:11,768 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:54:19,108 INFO [train.py:901] (1/4) Epoch 12, batch 3350, loss[loss=0.1947, simple_loss=0.2661, pruned_loss=0.0616, over 7717.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3087, pruned_loss=0.07872, over 1618785.66 frames. ], batch size: 18, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:27,349 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92275.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:54:45,174 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92300.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:54:55,326 INFO [train.py:901] (1/4) Epoch 12, batch 3400, loss[loss=0.3059, simple_loss=0.3529, pruned_loss=0.1294, over 8200.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3093, pruned_loss=0.07884, over 1618163.22 frames. ], batch size: 23, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:57,464 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:15,991 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92343.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:55:21,866 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.364e+02 2.893e+02 3.659e+02 6.777e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 11:55:29,927 INFO [train.py:901] (1/4) Epoch 12, batch 3450, loss[loss=0.2176, simple_loss=0.3024, pruned_loss=0.06636, over 8468.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3093, pruned_loss=0.0786, over 1622022.61 frames. ], batch size: 25, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:55:32,847 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92368.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:39,753 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4623, 1.8184, 1.8856, 0.9838, 1.9504, 1.3367, 0.4465, 1.7184], + device='cuda:1'), covar=tensor([0.0401, 0.0230, 0.0177, 0.0406, 0.0251, 0.0634, 0.0615, 0.0178], + device='cuda:1'), in_proj_covar=tensor([0.0404, 0.0334, 0.0286, 0.0397, 0.0326, 0.0486, 0.0364, 0.0365], + device='cuda:1'), out_proj_covar=tensor([1.1477e-04, 9.2415e-05, 7.9310e-05, 1.1088e-04, 9.1650e-05, 1.4679e-04, + 1.0322e-04, 1.0268e-04], device='cuda:1') +2023-02-06 11:56:00,344 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2186, 1.0419, 1.2739, 1.0967, 0.9835, 1.3039, 0.0390, 0.8691], + device='cuda:1'), covar=tensor([0.2325, 0.1928, 0.0615, 0.1198, 0.3560, 0.0654, 0.3093, 0.1839], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0174, 0.0104, 0.0219, 0.0256, 0.0107, 0.0165, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 11:56:04,889 INFO [train.py:901] (1/4) Epoch 12, batch 3500, loss[loss=0.2175, simple_loss=0.2922, pruned_loss=0.07137, over 7780.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3081, pruned_loss=0.07816, over 1619530.11 frames. ], batch size: 19, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:18,393 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92432.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:56:27,916 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7823, 1.5050, 3.9316, 1.3207, 3.4443, 3.2686, 3.5608, 3.4175], + device='cuda:1'), covar=tensor([0.0620, 0.3981, 0.0581, 0.3648, 0.1241, 0.0900, 0.0629, 0.0745], + device='cuda:1'), in_proj_covar=tensor([0.0498, 0.0574, 0.0595, 0.0538, 0.0621, 0.0532, 0.0522, 0.0586], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:56:29,149 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 11:56:33,046 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.438e+02 2.928e+02 3.742e+02 8.211e+02, threshold=5.856e+02, percent-clipped=5.0 +2023-02-06 11:56:36,508 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92458.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:56:38,508 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4983, 1.8488, 1.8588, 1.0180, 1.9531, 1.3800, 0.4345, 1.7038], + device='cuda:1'), covar=tensor([0.0337, 0.0217, 0.0198, 0.0388, 0.0251, 0.0685, 0.0568, 0.0176], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0330, 0.0281, 0.0392, 0.0323, 0.0480, 0.0359, 0.0362], + device='cuda:1'), out_proj_covar=tensor([1.1321e-04, 9.1216e-05, 7.7866e-05, 1.0948e-04, 9.0566e-05, 1.4500e-04, + 1.0185e-04, 1.0176e-04], device='cuda:1') +2023-02-06 11:56:40,253 INFO [train.py:901] (1/4) Epoch 12, batch 3550, loss[loss=0.2118, simple_loss=0.3051, pruned_loss=0.05927, over 8097.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3082, pruned_loss=0.07796, over 1622270.60 frames. ], batch size: 23, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:11,660 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 11:57:14,698 INFO [train.py:901] (1/4) Epoch 12, batch 3600, loss[loss=0.2331, simple_loss=0.3001, pruned_loss=0.08303, over 5157.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3075, pruned_loss=0.07791, over 1616035.28 frames. ], batch size: 11, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:29,921 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-06 11:57:38,949 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 11:57:42,454 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.628e+02 3.055e+02 4.234e+02 9.851e+02, threshold=6.109e+02, percent-clipped=7.0 +2023-02-06 11:57:50,882 INFO [train.py:901] (1/4) Epoch 12, batch 3650, loss[loss=0.2167, simple_loss=0.2985, pruned_loss=0.06748, over 8237.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3065, pruned_loss=0.07738, over 1609207.98 frames. ], batch size: 22, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:10,860 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6876, 4.6987, 4.1192, 2.1471, 4.1725, 4.2618, 4.2554, 4.0610], + device='cuda:1'), covar=tensor([0.0711, 0.0517, 0.1076, 0.5019, 0.0729, 0.0993, 0.1291, 0.0771], + device='cuda:1'), in_proj_covar=tensor([0.0468, 0.0377, 0.0388, 0.0488, 0.0380, 0.0383, 0.0379, 0.0332], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 11:58:23,907 INFO [train.py:901] (1/4) Epoch 12, batch 3700, loss[loss=0.2715, simple_loss=0.3348, pruned_loss=0.1041, over 7809.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3063, pruned_loss=0.07769, over 1603936.57 frames. ], batch size: 20, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:28,535 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 11:58:30,702 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92624.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:44,261 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:48,467 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:50,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.476e+02 3.116e+02 4.152e+02 8.400e+02, threshold=6.233e+02, percent-clipped=9.0 +2023-02-06 11:58:54,984 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3337, 1.9364, 3.3310, 1.6628, 2.5692, 3.7314, 3.6659, 3.2533], + device='cuda:1'), covar=tensor([0.0959, 0.1453, 0.0438, 0.2087, 0.1061, 0.0239, 0.0560, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0302, 0.0264, 0.0298, 0.0280, 0.0240, 0.0354, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 11:58:57,877 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7456, 2.2109, 3.5249, 2.5805, 3.0995, 2.3444, 2.0214, 1.8893], + device='cuda:1'), covar=tensor([0.4024, 0.4557, 0.1298, 0.3026, 0.2227, 0.2512, 0.1778, 0.4716], + device='cuda:1'), in_proj_covar=tensor([0.0896, 0.0874, 0.0731, 0.0854, 0.0939, 0.0809, 0.0706, 0.0770], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:58:59,710 INFO [train.py:901] (1/4) Epoch 12, batch 3750, loss[loss=0.2498, simple_loss=0.3204, pruned_loss=0.0896, over 8523.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3064, pruned_loss=0.07737, over 1608024.67 frames. ], batch size: 28, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:01,852 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1436, 1.5265, 4.2625, 1.4489, 3.7435, 3.5097, 3.8085, 3.6798], + device='cuda:1'), covar=tensor([0.0510, 0.4318, 0.0526, 0.3876, 0.1131, 0.0927, 0.0603, 0.0707], + device='cuda:1'), in_proj_covar=tensor([0.0503, 0.0579, 0.0600, 0.0540, 0.0620, 0.0533, 0.0524, 0.0585], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 11:59:17,097 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:28,224 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3003, 1.6399, 1.6318, 0.9589, 1.7071, 1.2410, 0.2419, 1.4769], + device='cuda:1'), covar=tensor([0.0343, 0.0218, 0.0189, 0.0349, 0.0239, 0.0679, 0.0560, 0.0169], + device='cuda:1'), in_proj_covar=tensor([0.0408, 0.0337, 0.0286, 0.0400, 0.0328, 0.0489, 0.0365, 0.0367], + device='cuda:1'), out_proj_covar=tensor([1.1572e-04, 9.3237e-05, 7.9089e-05, 1.1163e-04, 9.1948e-05, 1.4776e-04, + 1.0352e-04, 1.0316e-04], device='cuda:1') +2023-02-06 11:59:34,453 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:34,943 INFO [train.py:901] (1/4) Epoch 12, batch 3800, loss[loss=0.2811, simple_loss=0.3482, pruned_loss=0.107, over 6856.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3056, pruned_loss=0.07664, over 1611208.06 frames. ], batch size: 71, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:35,189 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92714.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:59:52,022 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:52,766 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92739.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:59:59,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8788, 1.6687, 5.9865, 2.1907, 5.4444, 5.0097, 5.5726, 5.4924], + device='cuda:1'), covar=tensor([0.0418, 0.4182, 0.0335, 0.3303, 0.0859, 0.0682, 0.0407, 0.0410], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0577, 0.0597, 0.0536, 0.0615, 0.0528, 0.0521, 0.0581], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:00:02,126 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.558e+02 2.972e+02 3.756e+02 9.318e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 12:00:09,490 INFO [train.py:901] (1/4) Epoch 12, batch 3850, loss[loss=0.2504, simple_loss=0.3347, pruned_loss=0.08302, over 8251.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3068, pruned_loss=0.07749, over 1608294.21 frames. ], batch size: 24, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:00:33,554 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 12:00:43,232 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0678, 1.7885, 3.3109, 1.3970, 2.3235, 3.6511, 3.6469, 3.0036], + device='cuda:1'), covar=tensor([0.0977, 0.1395, 0.0362, 0.2074, 0.0925, 0.0245, 0.0471, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0299, 0.0265, 0.0296, 0.0277, 0.0239, 0.0354, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:00:45,108 INFO [train.py:901] (1/4) Epoch 12, batch 3900, loss[loss=0.2224, simple_loss=0.3043, pruned_loss=0.07019, over 8457.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3071, pruned_loss=0.07748, over 1615319.48 frames. ], batch size: 25, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:01:08,874 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:01:11,295 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.538e+02 2.989e+02 3.922e+02 7.912e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 12:01:18,296 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6404, 2.4341, 4.4329, 1.3684, 2.9942, 2.2570, 1.7817, 2.7030], + device='cuda:1'), covar=tensor([0.1716, 0.2057, 0.0768, 0.3875, 0.1568, 0.2727, 0.1732, 0.2389], + device='cuda:1'), in_proj_covar=tensor([0.0492, 0.0529, 0.0539, 0.0583, 0.0622, 0.0564, 0.0477, 0.0615], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:01:19,449 INFO [train.py:901] (1/4) Epoch 12, batch 3950, loss[loss=0.2176, simple_loss=0.3116, pruned_loss=0.06174, over 8195.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3076, pruned_loss=0.07806, over 1612250.02 frames. ], batch size: 23, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:54,553 INFO [train.py:901] (1/4) Epoch 12, batch 4000, loss[loss=0.2105, simple_loss=0.2985, pruned_loss=0.06127, over 8299.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3072, pruned_loss=0.07789, over 1612200.50 frames. ], batch size: 23, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:56,841 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:18,344 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:20,893 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.373e+02 3.059e+02 3.649e+02 8.513e+02, threshold=6.118e+02, percent-clipped=6.0 +2023-02-06 12:02:28,381 INFO [train.py:901] (1/4) Epoch 12, batch 4050, loss[loss=0.2076, simple_loss=0.2879, pruned_loss=0.06369, over 7707.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3072, pruned_loss=0.07752, over 1611793.95 frames. ], batch size: 18, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:02:44,203 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:48,264 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:57,122 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1186, 2.7644, 3.4807, 2.2350, 2.0305, 3.5006, 0.6116, 2.1222], + device='cuda:1'), covar=tensor([0.2443, 0.1637, 0.0507, 0.2667, 0.3521, 0.0410, 0.4241, 0.2142], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0174, 0.0103, 0.0216, 0.0253, 0.0107, 0.0162, 0.0168], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:03:03,723 INFO [train.py:901] (1/4) Epoch 12, batch 4100, loss[loss=0.2522, simple_loss=0.3246, pruned_loss=0.08993, over 7127.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3074, pruned_loss=0.07756, over 1610729.79 frames. ], batch size: 72, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:04,562 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8056, 1.3592, 1.5140, 1.1553, 0.8434, 1.2792, 1.5375, 1.3896], + device='cuda:1'), covar=tensor([0.0534, 0.1284, 0.1766, 0.1478, 0.0645, 0.1519, 0.0743, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0153, 0.0194, 0.0160, 0.0104, 0.0164, 0.0117, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:03:13,912 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:21,019 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 12:03:30,614 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.418e+02 3.048e+02 3.757e+02 7.047e+02, threshold=6.097e+02, percent-clipped=3.0 +2023-02-06 12:03:31,424 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:37,938 INFO [train.py:901] (1/4) Epoch 12, batch 4150, loss[loss=0.2208, simple_loss=0.3114, pruned_loss=0.0651, over 8491.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3078, pruned_loss=0.07772, over 1614988.27 frames. ], batch size: 26, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:50,875 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:02,150 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 12:04:04,581 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:12,239 INFO [train.py:901] (1/4) Epoch 12, batch 4200, loss[loss=0.2209, simple_loss=0.2983, pruned_loss=0.07173, over 7649.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3067, pruned_loss=0.07758, over 1611235.16 frames. ], batch size: 19, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:04:24,919 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 12:04:26,410 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:40,114 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.563e+02 2.943e+02 3.717e+02 8.503e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-06 12:04:47,434 INFO [train.py:901] (1/4) Epoch 12, batch 4250, loss[loss=0.2689, simple_loss=0.338, pruned_loss=0.0999, over 8449.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3051, pruned_loss=0.07674, over 1607836.13 frames. ], batch size: 27, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:04:48,800 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 12:04:53,301 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 12:05:06,764 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:09,354 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93197.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:21,301 INFO [train.py:901] (1/4) Epoch 12, batch 4300, loss[loss=0.1868, simple_loss=0.275, pruned_loss=0.04929, over 8250.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3049, pruned_loss=0.07616, over 1609709.02 frames. ], batch size: 24, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:05:22,139 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6288, 1.3983, 4.7761, 1.7424, 4.3000, 3.9673, 4.3109, 4.1399], + device='cuda:1'), covar=tensor([0.0374, 0.3995, 0.0380, 0.3072, 0.0839, 0.0734, 0.0425, 0.0502], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0580, 0.0594, 0.0537, 0.0615, 0.0527, 0.0519, 0.0581], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:05:48,566 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.718e+02 3.236e+02 4.116e+02 1.260e+03, threshold=6.473e+02, percent-clipped=7.0 +2023-02-06 12:05:54,513 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93261.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:56,509 INFO [train.py:901] (1/4) Epoch 12, batch 4350, loss[loss=0.2792, simple_loss=0.3509, pruned_loss=0.1038, over 8482.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3074, pruned_loss=0.07739, over 1613232.62 frames. ], batch size: 29, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:15,791 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:16,412 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 12:06:25,929 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:29,908 INFO [train.py:901] (1/4) Epoch 12, batch 4400, loss[loss=0.1832, simple_loss=0.2606, pruned_loss=0.0529, over 7698.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3078, pruned_loss=0.07773, over 1607097.26 frames. ], batch size: 18, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:46,137 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:58,348 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.551e+02 2.995e+02 3.715e+02 7.484e+02, threshold=5.990e+02, percent-clipped=1.0 +2023-02-06 12:06:58,373 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 12:07:01,790 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:05,648 INFO [train.py:901] (1/4) Epoch 12, batch 4450, loss[loss=0.228, simple_loss=0.3084, pruned_loss=0.07382, over 8101.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3068, pruned_loss=0.07726, over 1607193.21 frames. ], batch size: 23, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:11,752 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:14,528 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:19,342 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:29,363 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:36,061 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:38,776 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1938, 2.5454, 2.9450, 1.3396, 3.0164, 1.8107, 1.6156, 1.7431], + device='cuda:1'), covar=tensor([0.0590, 0.0308, 0.0248, 0.0605, 0.0368, 0.0657, 0.0689, 0.0482], + device='cuda:1'), in_proj_covar=tensor([0.0392, 0.0328, 0.0273, 0.0387, 0.0320, 0.0474, 0.0356, 0.0357], + device='cuda:1'), out_proj_covar=tensor([1.1110e-04, 9.0557e-05, 7.5531e-05, 1.0777e-04, 8.9835e-05, 1.4287e-04, + 1.0068e-04, 1.0019e-04], device='cuda:1') +2023-02-06 12:07:39,490 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5692, 2.0626, 3.0417, 2.3693, 2.9632, 2.2809, 1.9470, 1.3950], + device='cuda:1'), covar=tensor([0.3560, 0.3749, 0.1306, 0.2771, 0.1654, 0.2113, 0.1584, 0.4278], + device='cuda:1'), in_proj_covar=tensor([0.0892, 0.0871, 0.0730, 0.0852, 0.0931, 0.0803, 0.0705, 0.0768], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:07:39,925 INFO [train.py:901] (1/4) Epoch 12, batch 4500, loss[loss=0.1626, simple_loss=0.2445, pruned_loss=0.0403, over 7416.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3063, pruned_loss=0.07754, over 1604257.92 frames. ], batch size: 17, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:44,325 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.89 vs. limit=5.0 +2023-02-06 12:07:50,737 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 12:08:06,003 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:06,475 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.576e+02 3.193e+02 4.187e+02 6.619e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:08:06,709 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:13,866 INFO [train.py:901] (1/4) Epoch 12, batch 4550, loss[loss=0.2418, simple_loss=0.3169, pruned_loss=0.08333, over 7925.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3059, pruned_loss=0.07734, over 1605913.16 frames. ], batch size: 20, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:08:24,178 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:25,008 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:31,261 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6539, 1.9126, 2.0810, 1.1713, 2.1512, 1.4112, 0.5583, 1.7478], + device='cuda:1'), covar=tensor([0.0355, 0.0208, 0.0165, 0.0329, 0.0272, 0.0599, 0.0564, 0.0191], + device='cuda:1'), in_proj_covar=tensor([0.0395, 0.0331, 0.0276, 0.0390, 0.0322, 0.0476, 0.0359, 0.0359], + device='cuda:1'), out_proj_covar=tensor([1.1197e-04, 9.1400e-05, 7.6214e-05, 1.0852e-04, 9.0395e-05, 1.4353e-04, + 1.0154e-04, 1.0079e-04], device='cuda:1') +2023-02-06 12:08:31,907 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93487.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:49,683 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:50,198 INFO [train.py:901] (1/4) Epoch 12, batch 4600, loss[loss=0.2076, simple_loss=0.2823, pruned_loss=0.06643, over 7799.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3063, pruned_loss=0.0775, over 1604666.85 frames. ], batch size: 19, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:08:53,906 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 12:09:16,615 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.455e+02 3.020e+02 4.052e+02 9.299e+02, threshold=6.041e+02, percent-clipped=5.0 +2023-02-06 12:09:18,236 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7034, 1.4468, 1.5192, 1.2217, 0.8646, 1.2609, 1.4910, 1.2500], + device='cuda:1'), covar=tensor([0.0545, 0.1262, 0.1774, 0.1448, 0.0621, 0.1618, 0.0711, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0104, 0.0163, 0.0116, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:09:24,877 INFO [train.py:901] (1/4) Epoch 12, batch 4650, loss[loss=0.2833, simple_loss=0.3338, pruned_loss=0.1164, over 7233.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3065, pruned_loss=0.0774, over 1609594.43 frames. ], batch size: 16, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:25,092 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:41,890 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,159 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:59,883 INFO [train.py:901] (1/4) Epoch 12, batch 4700, loss[loss=0.2407, simple_loss=0.3111, pruned_loss=0.08514, over 8597.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3062, pruned_loss=0.07706, over 1610489.14 frames. ], batch size: 31, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:12,704 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:21,477 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3183, 1.8498, 2.7212, 2.1671, 2.4930, 2.1580, 1.7097, 1.1736], + device='cuda:1'), covar=tensor([0.3909, 0.3956, 0.1064, 0.2474, 0.1724, 0.2069, 0.1797, 0.3786], + device='cuda:1'), in_proj_covar=tensor([0.0893, 0.0871, 0.0725, 0.0851, 0.0934, 0.0802, 0.0699, 0.0767], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:10:26,597 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.370e+02 2.939e+02 3.568e+02 8.447e+02, threshold=5.879e+02, percent-clipped=4.0 +2023-02-06 12:10:29,424 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93657.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:33,905 INFO [train.py:901] (1/4) Epoch 12, batch 4750, loss[loss=0.2155, simple_loss=0.2892, pruned_loss=0.07093, over 7818.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3062, pruned_loss=0.07774, over 1611067.71 frames. ], batch size: 20, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:34,142 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:51,742 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93689.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:00,453 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 12:11:02,146 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-06 12:11:02,482 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 12:11:04,757 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:09,466 INFO [train.py:901] (1/4) Epoch 12, batch 4800, loss[loss=0.2276, simple_loss=0.3094, pruned_loss=0.07291, over 8088.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3052, pruned_loss=0.07714, over 1610960.40 frames. ], batch size: 21, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:23,305 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:30,151 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:36,833 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.519e+02 2.967e+02 3.635e+02 7.460e+02, threshold=5.934e+02, percent-clipped=2.0 +2023-02-06 12:11:44,128 INFO [train.py:901] (1/4) Epoch 12, batch 4850, loss[loss=0.2949, simple_loss=0.3635, pruned_loss=0.1131, over 8103.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3053, pruned_loss=0.07717, over 1610601.19 frames. ], batch size: 23, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:44,993 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7931, 1.7242, 2.6537, 1.3943, 2.1547, 2.9213, 2.9305, 2.4916], + device='cuda:1'), covar=tensor([0.0930, 0.1214, 0.0533, 0.1898, 0.1056, 0.0306, 0.0692, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0299, 0.0265, 0.0295, 0.0279, 0.0240, 0.0354, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:11:47,093 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93768.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:47,777 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:53,107 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 12:12:00,552 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:04,721 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:18,633 INFO [train.py:901] (1/4) Epoch 12, batch 4900, loss[loss=0.2262, simple_loss=0.2954, pruned_loss=0.07846, over 7973.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3057, pruned_loss=0.07698, over 1611368.03 frames. ], batch size: 21, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:12:42,679 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:45,718 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.389e+02 2.920e+02 3.679e+02 7.315e+02, threshold=5.841e+02, percent-clipped=3.0 +2023-02-06 12:12:53,902 INFO [train.py:901] (1/4) Epoch 12, batch 4950, loss[loss=0.2138, simple_loss=0.2918, pruned_loss=0.06796, over 8276.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3064, pruned_loss=0.07745, over 1610721.56 frames. ], batch size: 23, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:00,015 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:13:27,063 INFO [train.py:901] (1/4) Epoch 12, batch 5000, loss[loss=0.2406, simple_loss=0.3142, pruned_loss=0.08352, over 8339.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3073, pruned_loss=0.07765, over 1610825.03 frames. ], batch size: 26, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:55,386 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.574e+02 3.082e+02 3.748e+02 7.333e+02, threshold=6.165e+02, percent-clipped=4.0 +2023-02-06 12:14:02,944 INFO [train.py:901] (1/4) Epoch 12, batch 5050, loss[loss=0.294, simple_loss=0.3475, pruned_loss=0.1203, over 8089.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3074, pruned_loss=0.07806, over 1611000.07 frames. ], batch size: 21, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:14:27,666 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:14:31,162 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 12:14:38,585 INFO [train.py:901] (1/4) Epoch 12, batch 5100, loss[loss=0.219, simple_loss=0.2932, pruned_loss=0.07244, over 7980.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3075, pruned_loss=0.0782, over 1611882.58 frames. ], batch size: 21, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:05,336 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.576e+02 2.962e+02 4.029e+02 5.912e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-06 12:15:13,500 INFO [train.py:901] (1/4) Epoch 12, batch 5150, loss[loss=0.2218, simple_loss=0.2832, pruned_loss=0.08018, over 7515.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3064, pruned_loss=0.07788, over 1611930.79 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:33,466 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 12:15:46,359 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9109, 2.2940, 3.6455, 1.6212, 2.9750, 2.3293, 2.0680, 2.6842], + device='cuda:1'), covar=tensor([0.1509, 0.2207, 0.0649, 0.3782, 0.1271, 0.2463, 0.1709, 0.2039], + device='cuda:1'), in_proj_covar=tensor([0.0492, 0.0526, 0.0535, 0.0587, 0.0618, 0.0558, 0.0478, 0.0617], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:15:47,487 INFO [train.py:901] (1/4) Epoch 12, batch 5200, loss[loss=0.206, simple_loss=0.2739, pruned_loss=0.06905, over 7217.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3073, pruned_loss=0.07802, over 1611794.34 frames. ], batch size: 16, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:58,685 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3513, 1.4588, 1.2994, 1.8788, 0.7750, 1.1874, 1.2320, 1.4711], + device='cuda:1'), covar=tensor([0.0926, 0.0909, 0.1165, 0.0486, 0.1216, 0.1586, 0.0917, 0.0796], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0216, 0.0258, 0.0219, 0.0220, 0.0256, 0.0263, 0.0224], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 12:15:59,925 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:16:00,737 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8296, 1.7354, 2.9029, 1.3498, 2.2181, 3.0909, 3.1445, 2.6564], + device='cuda:1'), covar=tensor([0.0871, 0.1214, 0.0363, 0.1801, 0.0781, 0.0278, 0.0542, 0.0614], + device='cuda:1'), in_proj_covar=tensor([0.0267, 0.0296, 0.0262, 0.0291, 0.0275, 0.0238, 0.0349, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:16:14,668 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.571e+02 3.074e+02 4.467e+02 8.286e+02, threshold=6.149e+02, percent-clipped=7.0 +2023-02-06 12:16:21,922 INFO [train.py:901] (1/4) Epoch 12, batch 5250, loss[loss=0.1992, simple_loss=0.2643, pruned_loss=0.06704, over 7552.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3075, pruned_loss=0.07812, over 1612385.43 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:16:25,911 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 12:16:57,674 INFO [train.py:901] (1/4) Epoch 12, batch 5300, loss[loss=0.223, simple_loss=0.3047, pruned_loss=0.07061, over 8043.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3064, pruned_loss=0.07694, over 1611523.04 frames. ], batch size: 22, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:13,241 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 12:17:15,484 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94241.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:17:19,507 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:17:23,399 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.628e+02 3.237e+02 4.138e+02 9.258e+02, threshold=6.473e+02, percent-clipped=5.0 +2023-02-06 12:17:31,605 INFO [train.py:901] (1/4) Epoch 12, batch 5350, loss[loss=0.2617, simple_loss=0.3355, pruned_loss=0.0939, over 8497.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3058, pruned_loss=0.07639, over 1613789.11 frames. ], batch size: 29, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:34,488 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3204, 1.9528, 2.7410, 2.3071, 2.5889, 2.0982, 1.9457, 1.8484], + device='cuda:1'), covar=tensor([0.3339, 0.3585, 0.1206, 0.2208, 0.1530, 0.2274, 0.1578, 0.3127], + device='cuda:1'), in_proj_covar=tensor([0.0886, 0.0863, 0.0722, 0.0844, 0.0929, 0.0795, 0.0697, 0.0758], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:18:05,212 INFO [train.py:901] (1/4) Epoch 12, batch 5400, loss[loss=0.2261, simple_loss=0.3086, pruned_loss=0.07179, over 8467.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3062, pruned_loss=0.07657, over 1613718.67 frames. ], batch size: 25, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:23,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2536, 3.0953, 2.8471, 1.6084, 2.8850, 2.8009, 2.8121, 2.6981], + device='cuda:1'), covar=tensor([0.1328, 0.1051, 0.1622, 0.5355, 0.1308, 0.1448, 0.1955, 0.1220], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0379, 0.0389, 0.0484, 0.0379, 0.0386, 0.0381, 0.0333], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:18:25,506 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:32,244 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.485e+02 2.978e+02 4.110e+02 9.009e+02, threshold=5.957e+02, percent-clipped=6.0 +2023-02-06 12:18:34,961 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 12:18:39,987 INFO [train.py:901] (1/4) Epoch 12, batch 5450, loss[loss=0.2207, simple_loss=0.2985, pruned_loss=0.07148, over 5127.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3061, pruned_loss=0.07684, over 1609936.06 frames. ], batch size: 11, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:41,434 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:47,042 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.14 vs. limit=5.0 +2023-02-06 12:18:57,715 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3671, 1.2096, 3.6585, 1.4185, 2.7635, 2.7770, 3.2509, 3.2337], + device='cuda:1'), covar=tensor([0.1648, 0.7093, 0.1531, 0.5369, 0.3178, 0.2227, 0.1553, 0.1487], + device='cuda:1'), in_proj_covar=tensor([0.0505, 0.0580, 0.0593, 0.0537, 0.0619, 0.0529, 0.0516, 0.0578], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:19:12,399 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 12:19:15,765 INFO [train.py:901] (1/4) Epoch 12, batch 5500, loss[loss=0.2537, simple_loss=0.3315, pruned_loss=0.0879, over 8327.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3061, pruned_loss=0.07648, over 1609412.68 frames. ], batch size: 26, lr: 6.33e-03, grad_scale: 16.0 +2023-02-06 12:19:30,249 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.03 vs. limit=5.0 +2023-02-06 12:19:43,164 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.406e+02 2.798e+02 3.361e+02 6.650e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 12:19:45,380 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:49,115 INFO [train.py:901] (1/4) Epoch 12, batch 5550, loss[loss=0.2806, simple_loss=0.3371, pruned_loss=0.112, over 8541.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3064, pruned_loss=0.07709, over 1609694.40 frames. ], batch size: 28, lr: 6.33e-03, grad_scale: 4.0 +2023-02-06 12:20:16,694 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:24,052 INFO [train.py:901] (1/4) Epoch 12, batch 5600, loss[loss=0.1878, simple_loss=0.2707, pruned_loss=0.05246, over 7816.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3056, pruned_loss=0.07684, over 1611072.75 frames. ], batch size: 20, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:20:35,266 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:54,480 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.675e+02 3.313e+02 4.214e+02 1.006e+03, threshold=6.626e+02, percent-clipped=7.0 +2023-02-06 12:21:00,674 INFO [train.py:901] (1/4) Epoch 12, batch 5650, loss[loss=0.218, simple_loss=0.3051, pruned_loss=0.06547, over 8032.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.305, pruned_loss=0.07612, over 1610713.76 frames. ], batch size: 22, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:21:15,257 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94585.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:21:21,336 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 12:21:35,767 INFO [train.py:901] (1/4) Epoch 12, batch 5700, loss[loss=0.2085, simple_loss=0.2894, pruned_loss=0.06382, over 7968.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3048, pruned_loss=0.076, over 1607172.29 frames. ], batch size: 21, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:04,739 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.327e+02 3.036e+02 3.801e+02 7.493e+02, threshold=6.072e+02, percent-clipped=2.0 +2023-02-06 12:22:10,812 INFO [train.py:901] (1/4) Epoch 12, batch 5750, loss[loss=0.2196, simple_loss=0.3069, pruned_loss=0.06612, over 8365.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3038, pruned_loss=0.07576, over 1609035.33 frames. ], batch size: 24, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:15,197 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8672, 1.7734, 2.3059, 1.6956, 1.2239, 2.3782, 0.4840, 1.3611], + device='cuda:1'), covar=tensor([0.2317, 0.1727, 0.0496, 0.1965, 0.4035, 0.0428, 0.3263, 0.1877], + device='cuda:1'), in_proj_covar=tensor([0.0166, 0.0169, 0.0100, 0.0213, 0.0252, 0.0104, 0.0163, 0.0164], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:22:26,195 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 12:22:35,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94700.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:22:42,477 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:45,141 INFO [train.py:901] (1/4) Epoch 12, batch 5800, loss[loss=0.258, simple_loss=0.3485, pruned_loss=0.08374, over 8761.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3043, pruned_loss=0.07573, over 1609495.62 frames. ], batch size: 30, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:45,332 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:02,548 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:13,645 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.575e+02 3.288e+02 4.021e+02 7.847e+02, threshold=6.576e+02, percent-clipped=2.0 +2023-02-06 12:23:19,972 INFO [train.py:901] (1/4) Epoch 12, batch 5850, loss[loss=0.3504, simple_loss=0.3878, pruned_loss=0.1565, over 6802.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3043, pruned_loss=0.07593, over 1608586.91 frames. ], batch size: 72, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:23:26,199 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3800, 1.6609, 1.7625, 0.9561, 1.7023, 1.4066, 0.2618, 1.5991], + device='cuda:1'), covar=tensor([0.0314, 0.0223, 0.0184, 0.0321, 0.0289, 0.0618, 0.0581, 0.0164], + device='cuda:1'), in_proj_covar=tensor([0.0394, 0.0328, 0.0275, 0.0386, 0.0320, 0.0475, 0.0356, 0.0357], + device='cuda:1'), out_proj_covar=tensor([1.1132e-04, 9.0097e-05, 7.6090e-05, 1.0710e-04, 8.9457e-05, 1.4311e-04, + 1.0049e-04, 1.0016e-04], device='cuda:1') +2023-02-06 12:23:54,266 INFO [train.py:901] (1/4) Epoch 12, batch 5900, loss[loss=0.2229, simple_loss=0.2824, pruned_loss=0.08163, over 7796.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3046, pruned_loss=0.07598, over 1607149.80 frames. ], batch size: 19, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:24:01,747 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:24:22,270 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.604e+02 3.248e+02 4.213e+02 6.479e+02, threshold=6.496e+02, percent-clipped=0.0 +2023-02-06 12:24:28,372 INFO [train.py:901] (1/4) Epoch 12, batch 5950, loss[loss=0.2365, simple_loss=0.3279, pruned_loss=0.07253, over 8355.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3049, pruned_loss=0.07629, over 1610350.50 frames. ], batch size: 24, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:25:03,798 INFO [train.py:901] (1/4) Epoch 12, batch 6000, loss[loss=0.2492, simple_loss=0.3278, pruned_loss=0.08525, over 8563.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3051, pruned_loss=0.07637, over 1606121.02 frames. ], batch size: 34, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:25:03,799 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 12:25:16,952 INFO [train.py:935] (1/4) Epoch 12, validation: loss=0.1862, simple_loss=0.286, pruned_loss=0.04318, over 944034.00 frames. +2023-02-06 12:25:16,953 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 12:25:17,910 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2788, 2.5684, 3.1026, 1.4450, 3.2273, 1.8505, 1.4250, 2.0926], + device='cuda:1'), covar=tensor([0.0600, 0.0304, 0.0164, 0.0540, 0.0292, 0.0666, 0.0704, 0.0402], + device='cuda:1'), in_proj_covar=tensor([0.0394, 0.0327, 0.0276, 0.0386, 0.0320, 0.0474, 0.0357, 0.0356], + device='cuda:1'), out_proj_covar=tensor([1.1110e-04, 9.0045e-05, 7.6154e-05, 1.0725e-04, 8.9410e-05, 1.4274e-04, + 1.0059e-04, 9.9550e-05], device='cuda:1') +2023-02-06 12:25:44,732 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.442e+02 2.970e+02 3.787e+02 9.017e+02, threshold=5.940e+02, percent-clipped=3.0 +2023-02-06 12:25:45,512 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94956.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:25:50,745 INFO [train.py:901] (1/4) Epoch 12, batch 6050, loss[loss=0.2375, simple_loss=0.3142, pruned_loss=0.08037, over 8081.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3046, pruned_loss=0.07585, over 1603879.23 frames. ], batch size: 21, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:02,527 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94981.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:26:03,821 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9596, 1.7365, 1.8315, 1.5815, 1.0359, 1.5952, 1.9485, 2.0644], + device='cuda:1'), covar=tensor([0.0454, 0.1173, 0.1679, 0.1323, 0.0629, 0.1456, 0.0674, 0.0549], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0192, 0.0158, 0.0103, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:26:25,547 INFO [train.py:901] (1/4) Epoch 12, batch 6100, loss[loss=0.2043, simple_loss=0.2874, pruned_loss=0.06059, over 7915.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3055, pruned_loss=0.07641, over 1604279.60 frames. ], batch size: 20, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:38,777 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7300, 2.0158, 2.2654, 1.3510, 2.3537, 1.4323, 0.6659, 1.9839], + device='cuda:1'), covar=tensor([0.0442, 0.0234, 0.0171, 0.0390, 0.0306, 0.0688, 0.0623, 0.0202], + device='cuda:1'), in_proj_covar=tensor([0.0398, 0.0330, 0.0280, 0.0390, 0.0324, 0.0479, 0.0360, 0.0359], + device='cuda:1'), out_proj_covar=tensor([1.1226e-04, 9.0735e-05, 7.7408e-05, 1.0852e-04, 9.0699e-05, 1.4404e-04, + 1.0164e-04, 1.0065e-04], device='cuda:1') +2023-02-06 12:26:54,026 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 12:26:54,678 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.590e+02 3.216e+02 4.301e+02 8.648e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 12:27:00,775 INFO [train.py:901] (1/4) Epoch 12, batch 6150, loss[loss=0.2556, simple_loss=0.3329, pruned_loss=0.08919, over 8188.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3086, pruned_loss=0.07822, over 1612494.99 frames. ], batch size: 23, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:12,216 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:29,629 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95106.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:34,674 INFO [train.py:901] (1/4) Epoch 12, batch 6200, loss[loss=0.2633, simple_loss=0.3385, pruned_loss=0.09409, over 8108.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.309, pruned_loss=0.07903, over 1614834.89 frames. ], batch size: 23, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:41,092 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95123.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:28:04,335 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.519e+02 2.980e+02 3.798e+02 7.393e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 12:28:10,299 INFO [train.py:901] (1/4) Epoch 12, batch 6250, loss[loss=0.2226, simple_loss=0.2965, pruned_loss=0.0744, over 8218.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3088, pruned_loss=0.07872, over 1614255.17 frames. ], batch size: 49, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:28:43,837 INFO [train.py:901] (1/4) Epoch 12, batch 6300, loss[loss=0.251, simple_loss=0.327, pruned_loss=0.08752, over 8362.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3092, pruned_loss=0.07928, over 1613025.97 frames. ], batch size: 24, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:28:46,417 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-06 12:29:13,416 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.657e+02 3.224e+02 4.358e+02 1.571e+03, threshold=6.448e+02, percent-clipped=5.0 +2023-02-06 12:29:20,995 INFO [train.py:901] (1/4) Epoch 12, batch 6350, loss[loss=0.2465, simple_loss=0.3189, pruned_loss=0.08707, over 8496.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.3081, pruned_loss=0.0794, over 1609878.48 frames. ], batch size: 29, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:30,734 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:36,286 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:50,041 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7364, 2.1021, 4.1722, 1.4695, 2.9501, 2.2081, 1.7442, 2.7151], + device='cuda:1'), covar=tensor([0.1760, 0.2603, 0.0606, 0.4034, 0.1695, 0.2847, 0.1948, 0.2348], + device='cuda:1'), in_proj_covar=tensor([0.0493, 0.0531, 0.0537, 0.0586, 0.0621, 0.0559, 0.0478, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:29:55,408 INFO [train.py:901] (1/4) Epoch 12, batch 6400, loss[loss=0.2275, simple_loss=0.3131, pruned_loss=0.07093, over 8284.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3077, pruned_loss=0.07827, over 1613042.01 frames. ], batch size: 23, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:30:23,572 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.403e+02 2.937e+02 3.904e+02 6.682e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 12:30:25,102 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:30:27,149 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3968, 1.5744, 2.2621, 1.3067, 1.5586, 1.6848, 1.5252, 1.3449], + device='cuda:1'), covar=tensor([0.1665, 0.2068, 0.0738, 0.3704, 0.1567, 0.2687, 0.1757, 0.1839], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0526, 0.0533, 0.0579, 0.0617, 0.0555, 0.0472, 0.0610], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:30:29,661 INFO [train.py:901] (1/4) Epoch 12, batch 6450, loss[loss=0.2287, simple_loss=0.2876, pruned_loss=0.08494, over 7780.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3074, pruned_loss=0.07853, over 1612345.60 frames. ], batch size: 19, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:05,020 INFO [train.py:901] (1/4) Epoch 12, batch 6500, loss[loss=0.272, simple_loss=0.3453, pruned_loss=0.09931, over 8031.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3059, pruned_loss=0.07755, over 1611284.39 frames. ], batch size: 22, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:14,614 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4448, 2.0125, 3.4436, 1.3096, 2.4117, 1.8424, 1.6526, 2.3586], + device='cuda:1'), covar=tensor([0.1745, 0.2198, 0.0665, 0.3888, 0.1690, 0.2950, 0.1873, 0.2174], + device='cuda:1'), in_proj_covar=tensor([0.0489, 0.0527, 0.0533, 0.0580, 0.0618, 0.0555, 0.0473, 0.0611], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:31:31,886 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.431e+02 2.857e+02 3.846e+02 1.801e+03, threshold=5.713e+02, percent-clipped=8.0 +2023-02-06 12:31:35,001 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 12:31:37,952 INFO [train.py:901] (1/4) Epoch 12, batch 6550, loss[loss=0.1936, simple_loss=0.2762, pruned_loss=0.05543, over 7658.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.307, pruned_loss=0.07812, over 1613711.77 frames. ], batch size: 19, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:40,440 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-06 12:31:40,699 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95467.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:31:59,679 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6384, 2.3475, 4.8304, 2.8307, 4.3714, 4.1827, 4.5376, 4.4463], + device='cuda:1'), covar=tensor([0.0571, 0.3345, 0.0454, 0.2729, 0.0857, 0.0716, 0.0457, 0.0448], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0581, 0.0594, 0.0544, 0.0622, 0.0532, 0.0520, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:32:06,855 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 12:32:13,527 INFO [train.py:901] (1/4) Epoch 12, batch 6600, loss[loss=0.2069, simple_loss=0.2838, pruned_loss=0.065, over 7656.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3059, pruned_loss=0.07763, over 1612703.24 frames. ], batch size: 19, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:25,471 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 12:32:25,784 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 12:32:40,269 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.522e+02 3.078e+02 3.913e+02 8.021e+02, threshold=6.157e+02, percent-clipped=7.0 +2023-02-06 12:32:43,793 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6853, 1.7516, 2.3350, 1.3791, 1.1106, 2.3795, 0.3341, 1.3155], + device='cuda:1'), covar=tensor([0.2523, 0.1461, 0.0416, 0.2731, 0.3925, 0.0349, 0.3062, 0.2067], + device='cuda:1'), in_proj_covar=tensor([0.0171, 0.0173, 0.0103, 0.0216, 0.0256, 0.0108, 0.0163, 0.0166], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:32:46,211 INFO [train.py:901] (1/4) Epoch 12, batch 6650, loss[loss=0.2122, simple_loss=0.2972, pruned_loss=0.06362, over 8242.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3063, pruned_loss=0.0775, over 1615751.16 frames. ], batch size: 24, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:59,154 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95582.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:33:15,569 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 12:33:21,233 INFO [train.py:901] (1/4) Epoch 12, batch 6700, loss[loss=0.2353, simple_loss=0.3062, pruned_loss=0.08215, over 7438.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3059, pruned_loss=0.07698, over 1612526.21 frames. ], batch size: 17, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:33:27,483 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:33,688 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:35,025 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7255, 1.4826, 2.7795, 1.1576, 2.1028, 3.0276, 3.1211, 2.5903], + device='cuda:1'), covar=tensor([0.1075, 0.1450, 0.0427, 0.2206, 0.0881, 0.0303, 0.0569, 0.0637], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0297, 0.0261, 0.0291, 0.0272, 0.0236, 0.0347, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:33:37,106 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:50,484 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 2.656e+02 3.142e+02 4.011e+02 7.522e+02, threshold=6.284e+02, percent-clipped=4.0 +2023-02-06 12:33:56,566 INFO [train.py:901] (1/4) Epoch 12, batch 6750, loss[loss=0.2241, simple_loss=0.3033, pruned_loss=0.0725, over 8190.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3068, pruned_loss=0.07728, over 1613538.26 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:33:59,366 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6635, 1.6179, 2.0223, 1.6013, 1.1630, 2.0429, 0.3097, 1.1920], + device='cuda:1'), covar=tensor([0.1978, 0.1407, 0.0447, 0.1407, 0.3267, 0.0402, 0.3113, 0.1641], + device='cuda:1'), in_proj_covar=tensor([0.0168, 0.0171, 0.0102, 0.0213, 0.0253, 0.0106, 0.0161, 0.0164], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:34:06,791 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4866, 1.8467, 1.9044, 1.0925, 1.9643, 1.3130, 0.4246, 1.6425], + device='cuda:1'), covar=tensor([0.0347, 0.0187, 0.0182, 0.0351, 0.0230, 0.0654, 0.0557, 0.0181], + device='cuda:1'), in_proj_covar=tensor([0.0396, 0.0329, 0.0281, 0.0391, 0.0325, 0.0482, 0.0360, 0.0358], + device='cuda:1'), out_proj_covar=tensor([1.1175e-04, 9.0035e-05, 7.7683e-05, 1.0853e-04, 9.0945e-05, 1.4494e-04, + 1.0150e-04, 1.0027e-04], device='cuda:1') +2023-02-06 12:34:22,169 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95701.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:30,621 INFO [train.py:901] (1/4) Epoch 12, batch 6800, loss[loss=0.212, simple_loss=0.2773, pruned_loss=0.07337, over 7185.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3068, pruned_loss=0.0771, over 1616663.96 frames. ], batch size: 16, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:40,704 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 12:34:44,337 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5746, 1.3967, 2.7736, 1.2607, 2.1608, 3.0295, 3.1095, 2.5755], + device='cuda:1'), covar=tensor([0.1176, 0.1664, 0.0425, 0.2241, 0.0885, 0.0316, 0.0640, 0.0703], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0298, 0.0261, 0.0292, 0.0273, 0.0236, 0.0349, 0.0288], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:34:47,117 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:53,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:00,265 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.510e+02 2.822e+02 3.564e+02 9.162e+02, threshold=5.644e+02, percent-clipped=3.0 +2023-02-06 12:35:06,297 INFO [train.py:901] (1/4) Epoch 12, batch 6850, loss[loss=0.2534, simple_loss=0.318, pruned_loss=0.09443, over 8476.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3058, pruned_loss=0.07598, over 1617714.70 frames. ], batch size: 27, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:21,827 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1325, 1.4794, 4.1507, 1.6869, 2.4912, 4.7213, 4.6809, 4.0275], + device='cuda:1'), covar=tensor([0.1190, 0.1862, 0.0314, 0.2179, 0.1127, 0.0199, 0.0443, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0267, 0.0297, 0.0259, 0.0291, 0.0271, 0.0235, 0.0347, 0.0286], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:35:26,873 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 12:35:40,360 INFO [train.py:901] (1/4) Epoch 12, batch 6900, loss[loss=0.2121, simple_loss=0.2769, pruned_loss=0.07368, over 7814.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3067, pruned_loss=0.07655, over 1620473.41 frames. ], batch size: 20, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:41,925 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:51,213 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95830.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:35:57,297 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95838.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:36:05,035 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2050, 3.0990, 2.8610, 1.4085, 2.8056, 2.9519, 2.9091, 2.6140], + device='cuda:1'), covar=tensor([0.1124, 0.0901, 0.1353, 0.4863, 0.1086, 0.1135, 0.1496, 0.1122], + device='cuda:1'), in_proj_covar=tensor([0.0472, 0.0384, 0.0394, 0.0488, 0.0388, 0.0389, 0.0387, 0.0334], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:36:05,634 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9413, 1.5886, 1.6636, 1.4623, 1.1489, 1.5806, 1.7544, 1.5743], + device='cuda:1'), covar=tensor([0.0510, 0.1161, 0.1591, 0.1340, 0.0622, 0.1399, 0.0710, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0151, 0.0190, 0.0158, 0.0103, 0.0162, 0.0114, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:36:08,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.693e+02 3.422e+02 4.342e+02 1.062e+03, threshold=6.843e+02, percent-clipped=12.0 +2023-02-06 12:36:14,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95863.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:36:14,702 INFO [train.py:901] (1/4) Epoch 12, batch 6950, loss[loss=0.2294, simple_loss=0.3005, pruned_loss=0.07912, over 7941.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3075, pruned_loss=0.07702, over 1620836.37 frames. ], batch size: 20, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:36:15,460 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:36:20,070 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8471, 1.4741, 3.3852, 1.3444, 2.3707, 3.7771, 3.8862, 3.0648], + device='cuda:1'), covar=tensor([0.1153, 0.1730, 0.0394, 0.2275, 0.0984, 0.0286, 0.0440, 0.0735], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0298, 0.0261, 0.0292, 0.0273, 0.0237, 0.0350, 0.0287], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:36:34,624 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 12:36:48,738 INFO [train.py:901] (1/4) Epoch 12, batch 7000, loss[loss=0.2209, simple_loss=0.2994, pruned_loss=0.0712, over 8506.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3081, pruned_loss=0.07774, over 1620147.21 frames. ], batch size: 39, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:36:58,642 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 12:37:17,431 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.501e+02 3.116e+02 3.850e+02 8.001e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-06 12:37:22,085 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6325, 4.5992, 4.2075, 1.9388, 4.1141, 4.1777, 4.1488, 3.8672], + device='cuda:1'), covar=tensor([0.0786, 0.0610, 0.1146, 0.4667, 0.0890, 0.0742, 0.1374, 0.0603], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0383, 0.0394, 0.0486, 0.0386, 0.0388, 0.0386, 0.0335], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:37:23,288 INFO [train.py:901] (1/4) Epoch 12, batch 7050, loss[loss=0.2444, simple_loss=0.3138, pruned_loss=0.08748, over 7817.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3076, pruned_loss=0.07791, over 1619323.43 frames. ], batch size: 20, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:34,014 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:44,071 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:47,395 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:50,531 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:58,505 INFO [train.py:901] (1/4) Epoch 12, batch 7100, loss[loss=0.2806, simple_loss=0.346, pruned_loss=0.1076, over 8106.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3088, pruned_loss=0.07881, over 1620574.58 frames. ], batch size: 23, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:01,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:06,474 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 12:38:07,001 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:20,456 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8817, 1.8517, 2.4000, 1.6584, 1.2945, 2.4260, 0.4119, 1.4527], + device='cuda:1'), covar=tensor([0.2674, 0.1671, 0.0415, 0.1874, 0.4103, 0.0461, 0.3503, 0.1950], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0176, 0.0104, 0.0219, 0.0257, 0.0110, 0.0166, 0.0168], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:38:26,975 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.539e+02 3.029e+02 4.080e+02 8.783e+02, threshold=6.058e+02, percent-clipped=4.0 +2023-02-06 12:38:33,144 INFO [train.py:901] (1/4) Epoch 12, batch 7150, loss[loss=0.219, simple_loss=0.287, pruned_loss=0.07551, over 7540.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3088, pruned_loss=0.07872, over 1619343.62 frames. ], batch size: 18, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:38,700 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:42,728 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8923, 2.5133, 3.3165, 1.6782, 1.5374, 3.3358, 0.5072, 1.8290], + device='cuda:1'), covar=tensor([0.1996, 0.1392, 0.0450, 0.3006, 0.4048, 0.0355, 0.3903, 0.2114], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0176, 0.0104, 0.0219, 0.0257, 0.0110, 0.0166, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:38:54,774 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:56,848 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:58,164 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:08,659 INFO [train.py:901] (1/4) Epoch 12, batch 7200, loss[loss=0.2525, simple_loss=0.329, pruned_loss=0.088, over 8296.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3081, pruned_loss=0.07881, over 1614996.98 frames. ], batch size: 23, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:39:36,231 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.446e+02 3.002e+02 3.633e+02 6.248e+02, threshold=6.005e+02, percent-clipped=1.0 +2023-02-06 12:39:42,865 INFO [train.py:901] (1/4) Epoch 12, batch 7250, loss[loss=0.296, simple_loss=0.3692, pruned_loss=0.1114, over 8435.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3079, pruned_loss=0.07834, over 1617782.63 frames. ], batch size: 27, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:39:49,510 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96174.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:39:55,426 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:14,133 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:14,950 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5976, 2.2484, 3.3598, 2.6777, 3.0445, 2.3543, 2.0473, 1.9581], + device='cuda:1'), covar=tensor([0.3589, 0.3960, 0.1286, 0.2588, 0.1928, 0.2061, 0.1593, 0.3912], + device='cuda:1'), in_proj_covar=tensor([0.0888, 0.0876, 0.0726, 0.0858, 0.0934, 0.0810, 0.0704, 0.0767], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:40:17,294 INFO [train.py:901] (1/4) Epoch 12, batch 7300, loss[loss=0.2292, simple_loss=0.3127, pruned_loss=0.07286, over 8241.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3059, pruned_loss=0.07742, over 1612859.37 frames. ], batch size: 22, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:40:43,441 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4096, 5.4644, 4.8328, 2.5741, 4.8561, 5.0918, 5.1058, 4.6401], + device='cuda:1'), covar=tensor([0.0576, 0.0407, 0.0836, 0.4182, 0.0715, 0.0835, 0.0952, 0.0775], + device='cuda:1'), in_proj_covar=tensor([0.0467, 0.0380, 0.0389, 0.0480, 0.0381, 0.0383, 0.0378, 0.0332], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:40:45,402 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.567e+02 3.297e+02 4.044e+02 1.170e+03, threshold=6.593e+02, percent-clipped=7.0 +2023-02-06 12:40:51,432 INFO [train.py:901] (1/4) Epoch 12, batch 7350, loss[loss=0.2071, simple_loss=0.2896, pruned_loss=0.06231, over 7655.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.307, pruned_loss=0.07756, over 1613492.27 frames. ], batch size: 19, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:09,290 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96289.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:41:15,852 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 12:41:26,504 INFO [train.py:901] (1/4) Epoch 12, batch 7400, loss[loss=0.2365, simple_loss=0.3308, pruned_loss=0.07111, over 8582.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.307, pruned_loss=0.0771, over 1615837.07 frames. ], batch size: 31, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:33,425 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:36,483 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 12:41:40,268 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8694, 1.6349, 2.1717, 1.8383, 2.0462, 1.7906, 1.5578, 1.1535], + device='cuda:1'), covar=tensor([0.2932, 0.2894, 0.1143, 0.2039, 0.1353, 0.1807, 0.1413, 0.3007], + device='cuda:1'), in_proj_covar=tensor([0.0893, 0.0883, 0.0732, 0.0866, 0.0941, 0.0814, 0.0709, 0.0773], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:41:47,014 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:47,084 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:52,332 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:55,372 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.475e+02 3.182e+02 4.307e+02 9.281e+02, threshold=6.365e+02, percent-clipped=3.0 +2023-02-06 12:42:01,559 INFO [train.py:901] (1/4) Epoch 12, batch 7450, loss[loss=0.2389, simple_loss=0.3152, pruned_loss=0.08127, over 7453.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3079, pruned_loss=0.07754, over 1617121.67 frames. ], batch size: 72, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:08,570 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2654, 1.2136, 1.4201, 1.1744, 0.6978, 1.2612, 1.1553, 0.9291], + device='cuda:1'), covar=tensor([0.0548, 0.1299, 0.1742, 0.1460, 0.0615, 0.1551, 0.0684, 0.0694], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0157, 0.0103, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:42:09,250 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:15,395 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 12:42:31,445 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:35,251 INFO [train.py:901] (1/4) Epoch 12, batch 7500, loss[loss=0.2331, simple_loss=0.3112, pruned_loss=0.07745, over 7812.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3092, pruned_loss=0.07812, over 1617857.48 frames. ], batch size: 20, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:54,779 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:57,629 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3589, 2.8335, 3.7280, 2.2266, 2.0589, 3.5610, 1.0096, 2.1420], + device='cuda:1'), covar=tensor([0.1679, 0.1831, 0.0293, 0.2179, 0.3584, 0.0388, 0.3049, 0.2044], + device='cuda:1'), in_proj_covar=tensor([0.0169, 0.0172, 0.0103, 0.0217, 0.0255, 0.0109, 0.0164, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 12:43:01,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9153, 1.4928, 1.6453, 1.3433, 0.8700, 1.3933, 1.5252, 1.5095], + device='cuda:1'), covar=tensor([0.0484, 0.1203, 0.1737, 0.1407, 0.0624, 0.1475, 0.0704, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0151, 0.0190, 0.0157, 0.0103, 0.0161, 0.0115, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:43:03,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.684e+02 3.354e+02 4.069e+02 8.964e+02, threshold=6.707e+02, percent-clipped=7.0 +2023-02-06 12:43:05,493 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:09,794 INFO [train.py:901] (1/4) Epoch 12, batch 7550, loss[loss=0.2031, simple_loss=0.276, pruned_loss=0.06505, over 7420.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.308, pruned_loss=0.07815, over 1614625.42 frames. ], batch size: 17, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:42,989 INFO [train.py:901] (1/4) Epoch 12, batch 7600, loss[loss=0.2493, simple_loss=0.3266, pruned_loss=0.08597, over 8035.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3068, pruned_loss=0.07776, over 1612347.75 frames. ], batch size: 22, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:45,372 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.57 vs. limit=5.0 +2023-02-06 12:43:52,179 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 12:43:52,500 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:05,570 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96545.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:44:08,120 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3496, 1.3697, 4.5437, 1.8163, 4.0083, 3.7638, 4.0573, 3.9472], + device='cuda:1'), covar=tensor([0.0567, 0.4264, 0.0435, 0.3553, 0.1071, 0.0852, 0.0567, 0.0643], + device='cuda:1'), in_proj_covar=tensor([0.0511, 0.0583, 0.0601, 0.0551, 0.0625, 0.0536, 0.0527, 0.0590], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:44:11,796 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 2.706e+02 3.173e+02 4.121e+02 9.971e+02, threshold=6.345e+02, percent-clipped=8.0 +2023-02-06 12:44:13,329 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:18,536 INFO [train.py:901] (1/4) Epoch 12, batch 7650, loss[loss=0.2188, simple_loss=0.2974, pruned_loss=0.07011, over 8490.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3086, pruned_loss=0.07853, over 1613442.04 frames. ], batch size: 26, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:44:23,316 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96570.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:44:29,846 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:38,364 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 12:44:47,088 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:53,024 INFO [train.py:901] (1/4) Epoch 12, batch 7700, loss[loss=0.2119, simple_loss=0.2985, pruned_loss=0.06261, over 8760.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3069, pruned_loss=0.07743, over 1614323.18 frames. ], batch size: 30, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:12,729 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:21,239 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.521e+02 3.004e+02 3.630e+02 7.905e+02, threshold=6.007e+02, percent-clipped=3.0 +2023-02-06 12:45:23,916 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 12:45:27,892 INFO [train.py:901] (1/4) Epoch 12, batch 7750, loss[loss=0.2548, simple_loss=0.3349, pruned_loss=0.08734, over 8565.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3068, pruned_loss=0.07706, over 1618184.30 frames. ], batch size: 39, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:34,367 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3675, 1.2574, 2.1164, 1.1681, 1.8282, 2.2714, 2.3527, 1.9731], + device='cuda:1'), covar=tensor([0.0841, 0.1127, 0.0444, 0.1742, 0.0834, 0.0360, 0.0595, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0300, 0.0263, 0.0291, 0.0274, 0.0238, 0.0353, 0.0289], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:45:42,858 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:55,113 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9208, 1.5446, 1.6023, 1.3359, 1.0763, 1.3698, 1.6201, 1.4024], + device='cuda:1'), covar=tensor([0.0543, 0.1204, 0.1629, 0.1386, 0.0608, 0.1502, 0.0683, 0.0657], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0151, 0.0190, 0.0156, 0.0103, 0.0161, 0.0114, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:46:02,090 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,580 INFO [train.py:901] (1/4) Epoch 12, batch 7800, loss[loss=0.2269, simple_loss=0.292, pruned_loss=0.08093, over 6374.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3062, pruned_loss=0.07678, over 1612103.00 frames. ], batch size: 14, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:46:19,269 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:28,440 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:30,315 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.519e+02 3.193e+02 4.174e+02 8.059e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:46:36,564 INFO [train.py:901] (1/4) Epoch 12, batch 7850, loss[loss=0.1953, simple_loss=0.27, pruned_loss=0.06034, over 7205.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3069, pruned_loss=0.07731, over 1611157.99 frames. ], batch size: 16, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:46:56,869 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:57,907 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 12:47:01,901 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,007 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,492 INFO [train.py:901] (1/4) Epoch 12, batch 7900, loss[loss=0.2252, simple_loss=0.3034, pruned_loss=0.07347, over 8356.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3064, pruned_loss=0.07666, over 1608258.30 frames. ], batch size: 26, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:27,503 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:38,784 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.595e+02 3.080e+02 3.878e+02 8.124e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 12:47:44,792 INFO [train.py:901] (1/4) Epoch 12, batch 7950, loss[loss=0.2297, simple_loss=0.2868, pruned_loss=0.08631, over 7253.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.307, pruned_loss=0.07679, over 1607998.78 frames. ], batch size: 16, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:47,012 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:07,423 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:17,696 INFO [train.py:901] (1/4) Epoch 12, batch 8000, loss[loss=0.2467, simple_loss=0.3174, pruned_loss=0.08801, over 8424.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3074, pruned_loss=0.07695, over 1611898.77 frames. ], batch size: 49, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:48:23,689 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:45,033 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.377e+02 3.280e+02 4.266e+02 7.100e+02, threshold=6.559e+02, percent-clipped=4.0 +2023-02-06 12:48:51,265 INFO [train.py:901] (1/4) Epoch 12, batch 8050, loss[loss=0.3157, simple_loss=0.374, pruned_loss=0.1287, over 6953.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3082, pruned_loss=0.07811, over 1601951.98 frames. ], batch size: 71, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:49:02,878 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9700, 1.6631, 2.2749, 1.8433, 2.1151, 1.9136, 1.6722, 0.7287], + device='cuda:1'), covar=tensor([0.4466, 0.3799, 0.1307, 0.2450, 0.1832, 0.2325, 0.1656, 0.4032], + device='cuda:1'), in_proj_covar=tensor([0.0896, 0.0884, 0.0734, 0.0859, 0.0939, 0.0816, 0.0708, 0.0773], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:49:10,442 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6471, 2.0184, 2.1953, 1.1788, 2.2828, 1.4570, 0.6506, 1.6910], + device='cuda:1'), covar=tensor([0.0368, 0.0238, 0.0157, 0.0372, 0.0279, 0.0624, 0.0533, 0.0229], + device='cuda:1'), in_proj_covar=tensor([0.0395, 0.0334, 0.0284, 0.0390, 0.0321, 0.0478, 0.0354, 0.0357], + device='cuda:1'), out_proj_covar=tensor([1.1132e-04, 9.1524e-05, 7.8504e-05, 1.0800e-04, 8.9455e-05, 1.4328e-04, + 9.9614e-05, 9.9761e-05], device='cuda:1') +2023-02-06 12:49:24,804 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 12:49:29,797 INFO [train.py:901] (1/4) Epoch 13, batch 0, loss[loss=0.2379, simple_loss=0.3125, pruned_loss=0.08162, over 8341.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3125, pruned_loss=0.08162, over 8341.00 frames. ], batch size: 26, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:49:29,797 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 12:49:40,741 INFO [train.py:935] (1/4) Epoch 13, validation: loss=0.1867, simple_loss=0.2865, pruned_loss=0.04345, over 944034.00 frames. +2023-02-06 12:49:40,743 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 12:49:55,389 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 12:49:55,518 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:14,916 INFO [train.py:901] (1/4) Epoch 13, batch 50, loss[loss=0.2667, simple_loss=0.3459, pruned_loss=0.09372, over 8197.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3054, pruned_loss=0.07402, over 365709.23 frames. ], batch size: 23, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:20,335 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.833e+02 3.357e+02 4.758e+02 6.927e+02, threshold=6.715e+02, percent-clipped=2.0 +2023-02-06 12:50:21,935 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:29,193 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 12:50:41,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:50,960 INFO [train.py:901] (1/4) Epoch 13, batch 100, loss[loss=0.2084, simple_loss=0.3024, pruned_loss=0.05719, over 8498.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.307, pruned_loss=0.07598, over 646020.17 frames. ], batch size: 26, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:52,978 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 12:51:09,078 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:18,982 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:24,703 INFO [train.py:901] (1/4) Epoch 13, batch 150, loss[loss=0.205, simple_loss=0.2876, pruned_loss=0.06124, over 7796.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.306, pruned_loss=0.07629, over 856714.61 frames. ], batch size: 19, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:51:25,593 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:30,112 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.477e+02 2.848e+02 3.342e+02 7.997e+02, threshold=5.696e+02, percent-clipped=2.0 +2023-02-06 12:51:39,278 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 12:51:42,868 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0684, 1.5326, 3.4335, 1.4503, 2.2630, 3.7885, 3.8459, 3.2620], + device='cuda:1'), covar=tensor([0.1113, 0.1610, 0.0377, 0.2128, 0.1104, 0.0231, 0.0421, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0302, 0.0265, 0.0293, 0.0275, 0.0239, 0.0359, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:51:58,452 INFO [train.py:901] (1/4) Epoch 13, batch 200, loss[loss=0.2224, simple_loss=0.3121, pruned_loss=0.06639, over 8242.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3045, pruned_loss=0.07509, over 1026741.77 frames. ], batch size: 24, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:09,225 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6979, 3.0661, 2.7601, 4.0833, 1.7982, 2.4634, 2.5107, 3.3851], + device='cuda:1'), covar=tensor([0.0649, 0.0857, 0.0747, 0.0235, 0.1119, 0.1183, 0.1109, 0.0703], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0211, 0.0251, 0.0214, 0.0213, 0.0251, 0.0254, 0.0218], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 12:52:18,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5995, 1.4891, 2.8875, 1.0530, 2.0290, 3.0439, 3.3725, 2.2592], + device='cuda:1'), covar=tensor([0.1577, 0.1920, 0.0567, 0.2931, 0.1195, 0.0530, 0.0675, 0.1296], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0301, 0.0263, 0.0292, 0.0274, 0.0238, 0.0356, 0.0291], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 12:52:23,163 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 12:52:33,405 INFO [train.py:901] (1/4) Epoch 13, batch 250, loss[loss=0.1926, simple_loss=0.2756, pruned_loss=0.0548, over 8242.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3053, pruned_loss=0.07525, over 1161598.85 frames. ], batch size: 22, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:37,629 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:38,766 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.455e+02 3.117e+02 3.819e+02 7.824e+02, threshold=6.233e+02, percent-clipped=7.0 +2023-02-06 12:52:46,018 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 12:52:53,683 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 12:52:54,017 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:54,531 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 12:53:06,366 INFO [train.py:901] (1/4) Epoch 13, batch 300, loss[loss=0.2136, simple_loss=0.2833, pruned_loss=0.07202, over 7525.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3058, pruned_loss=0.07555, over 1264570.46 frames. ], batch size: 18, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:06,760 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 12:53:41,550 INFO [train.py:901] (1/4) Epoch 13, batch 350, loss[loss=0.1716, simple_loss=0.2491, pruned_loss=0.04708, over 7643.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3052, pruned_loss=0.07544, over 1340104.42 frames. ], batch size: 19, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:46,931 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.508e+02 3.076e+02 3.709e+02 6.548e+02, threshold=6.153e+02, percent-clipped=1.0 +2023-02-06 12:53:48,446 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1786, 1.4710, 4.3120, 1.5891, 3.7780, 3.6089, 3.8930, 3.7906], + device='cuda:1'), covar=tensor([0.0477, 0.4132, 0.0485, 0.3508, 0.1099, 0.0905, 0.0545, 0.0592], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0589, 0.0598, 0.0547, 0.0628, 0.0534, 0.0525, 0.0585], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 12:53:50,467 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:51,766 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:53,796 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:11,875 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:15,163 INFO [train.py:901] (1/4) Epoch 13, batch 400, loss[loss=0.2058, simple_loss=0.2885, pruned_loss=0.06159, over 8135.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3044, pruned_loss=0.0748, over 1403635.28 frames. ], batch size: 22, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:51,737 INFO [train.py:901] (1/4) Epoch 13, batch 450, loss[loss=0.2347, simple_loss=0.3176, pruned_loss=0.07591, over 8102.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3044, pruned_loss=0.07481, over 1450662.10 frames. ], batch size: 23, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:57,109 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.318e+02 2.836e+02 3.756e+02 7.381e+02, threshold=5.672e+02, percent-clipped=3.0 +2023-02-06 12:55:13,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:26,774 INFO [train.py:901] (1/4) Epoch 13, batch 500, loss[loss=0.2229, simple_loss=0.303, pruned_loss=0.0714, over 8454.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3042, pruned_loss=0.07525, over 1485124.04 frames. ], batch size: 25, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:55:35,518 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97509.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:52,996 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:59,606 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 12:56:01,766 INFO [train.py:901] (1/4) Epoch 13, batch 550, loss[loss=0.2364, simple_loss=0.3063, pruned_loss=0.08322, over 8763.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3053, pruned_loss=0.07645, over 1509813.75 frames. ], batch size: 30, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:56:07,720 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.533e+02 3.037e+02 3.770e+02 9.997e+02, threshold=6.074e+02, percent-clipped=4.0 +2023-02-06 12:56:20,096 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:36,766 INFO [train.py:901] (1/4) Epoch 13, batch 600, loss[loss=0.2822, simple_loss=0.3538, pruned_loss=0.1052, over 8572.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3059, pruned_loss=0.07597, over 1539026.12 frames. ], batch size: 31, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:56:53,681 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:54,431 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7013, 4.7406, 4.2183, 1.9582, 4.1777, 4.3089, 4.4136, 3.9854], + device='cuda:1'), covar=tensor([0.0689, 0.0511, 0.1016, 0.4543, 0.0846, 0.0853, 0.0949, 0.0731], + device='cuda:1'), in_proj_covar=tensor([0.0470, 0.0385, 0.0391, 0.0488, 0.0385, 0.0389, 0.0382, 0.0339], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:56:55,689 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 12:56:55,862 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2567, 1.5568, 1.6484, 1.3752, 1.0807, 1.4538, 1.8450, 1.5105], + device='cuda:1'), covar=tensor([0.0462, 0.1219, 0.1652, 0.1399, 0.0600, 0.1524, 0.0661, 0.0649], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0151, 0.0191, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 12:56:59,743 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3344, 2.1553, 1.7862, 2.0035, 1.6874, 1.3149, 1.5979, 1.7541], + device='cuda:1'), covar=tensor([0.1137, 0.0345, 0.1024, 0.0439, 0.0678, 0.1434, 0.0860, 0.0747], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0238, 0.0324, 0.0304, 0.0306, 0.0327, 0.0345, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:57:09,688 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7733, 4.7155, 4.3209, 1.9371, 4.2127, 4.3599, 4.4144, 3.9848], + device='cuda:1'), covar=tensor([0.0568, 0.0534, 0.0951, 0.4707, 0.0765, 0.0653, 0.1082, 0.0716], + device='cuda:1'), in_proj_covar=tensor([0.0467, 0.0385, 0.0391, 0.0487, 0.0384, 0.0388, 0.0380, 0.0338], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 12:57:10,256 INFO [train.py:901] (1/4) Epoch 13, batch 650, loss[loss=0.2236, simple_loss=0.2893, pruned_loss=0.07893, over 7783.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3063, pruned_loss=0.07666, over 1556894.48 frames. ], batch size: 19, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:16,274 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.537e+02 2.925e+02 3.842e+02 7.324e+02, threshold=5.850e+02, percent-clipped=4.0 +2023-02-06 12:57:37,850 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5792, 2.4813, 1.9398, 2.2151, 2.0932, 1.4170, 1.9332, 2.0805], + device='cuda:1'), covar=tensor([0.1354, 0.0368, 0.1025, 0.0603, 0.0663, 0.1403, 0.0990, 0.0954], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0238, 0.0323, 0.0303, 0.0306, 0.0327, 0.0345, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:57:42,540 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97692.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:57:45,736 INFO [train.py:901] (1/4) Epoch 13, batch 700, loss[loss=0.2603, simple_loss=0.3225, pruned_loss=0.0991, over 8133.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3071, pruned_loss=0.07694, over 1575358.20 frames. ], batch size: 22, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:50,154 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.26 vs. limit=5.0 +2023-02-06 12:57:51,264 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:54,511 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:59,099 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 12:58:10,714 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:12,618 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:13,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:19,712 INFO [train.py:901] (1/4) Epoch 13, batch 750, loss[loss=0.2263, simple_loss=0.3079, pruned_loss=0.07233, over 8583.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3066, pruned_loss=0.07665, over 1587997.74 frames. ], batch size: 34, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:58:25,054 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.478e+02 2.997e+02 3.995e+02 8.399e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-06 12:58:27,356 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:39,717 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 12:58:49,019 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 12:58:51,752 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0351, 1.4152, 3.1818, 1.3749, 2.2866, 3.4675, 3.5613, 2.8170], + device='cuda:1'), covar=tensor([0.1084, 0.1751, 0.0478, 0.2220, 0.1078, 0.0345, 0.0593, 0.0825], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0302, 0.0267, 0.0294, 0.0278, 0.0242, 0.0359, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 12:58:54,236 INFO [train.py:901] (1/4) Epoch 13, batch 800, loss[loss=0.2785, simple_loss=0.3475, pruned_loss=0.1047, over 8554.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3063, pruned_loss=0.07626, over 1598738.76 frames. ], batch size: 31, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:10,091 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:14,042 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:29,483 INFO [train.py:901] (1/4) Epoch 13, batch 850, loss[loss=0.1992, simple_loss=0.285, pruned_loss=0.05669, over 8087.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3067, pruned_loss=0.07661, over 1603028.71 frames. ], batch size: 21, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:32,314 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:35,504 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.627e+02 3.254e+02 4.246e+02 9.834e+02, threshold=6.507e+02, percent-clipped=8.0 +2023-02-06 13:00:03,794 INFO [train.py:901] (1/4) Epoch 13, batch 900, loss[loss=0.253, simple_loss=0.3261, pruned_loss=0.08993, over 8360.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3057, pruned_loss=0.07625, over 1606974.30 frames. ], batch size: 24, lr: 5.98e-03, grad_scale: 8.0 +2023-02-06 13:00:05,043 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 13:00:09,699 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:18,212 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:39,218 INFO [train.py:901] (1/4) Epoch 13, batch 950, loss[loss=0.2685, simple_loss=0.3386, pruned_loss=0.09921, over 8375.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3063, pruned_loss=0.0762, over 1612968.68 frames. ], batch size: 49, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:00:45,307 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.593e+02 3.202e+02 4.020e+02 7.231e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 13:00:55,387 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9687, 1.6119, 3.3621, 1.3734, 2.3195, 3.7250, 3.7878, 3.1881], + device='cuda:1'), covar=tensor([0.0997, 0.1460, 0.0343, 0.1910, 0.0937, 0.0216, 0.0401, 0.0553], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0300, 0.0266, 0.0292, 0.0276, 0.0241, 0.0358, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:01:08,774 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 13:01:11,143 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:13,724 INFO [train.py:901] (1/4) Epoch 13, batch 1000, loss[loss=0.222, simple_loss=0.302, pruned_loss=0.07104, over 8664.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3048, pruned_loss=0.07546, over 1613589.40 frames. ], batch size: 34, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:29,783 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:39,953 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:43,177 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98036.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:01:44,388 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 13:01:50,455 INFO [train.py:901] (1/4) Epoch 13, batch 1050, loss[loss=0.2343, simple_loss=0.2958, pruned_loss=0.0864, over 7796.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3054, pruned_loss=0.07591, over 1615505.98 frames. ], batch size: 19, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:56,525 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 2.365e+02 2.893e+02 3.782e+02 5.594e+02, threshold=5.785e+02, percent-clipped=0.0 +2023-02-06 13:01:57,223 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 13:02:10,154 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98075.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:13,449 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:24,720 INFO [train.py:901] (1/4) Epoch 13, batch 1100, loss[loss=0.2465, simple_loss=0.3225, pruned_loss=0.08526, over 8572.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3048, pruned_loss=0.07619, over 1610074.54 frames. ], batch size: 31, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:02:26,951 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:30,280 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:31,649 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:42,956 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:49,148 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:59,481 INFO [train.py:901] (1/4) Epoch 13, batch 1150, loss[loss=0.2387, simple_loss=0.3153, pruned_loss=0.08106, over 8189.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3045, pruned_loss=0.07584, over 1609104.06 frames. ], batch size: 23, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:02,367 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98151.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:03:05,425 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 13:03:06,089 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.603e+02 3.101e+02 3.825e+02 7.832e+02, threshold=6.203e+02, percent-clipped=6.0 +2023-02-06 13:03:14,947 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1119, 1.2560, 1.2264, 0.8577, 1.3284, 0.9894, 0.3815, 1.1795], + device='cuda:1'), covar=tensor([0.0250, 0.0178, 0.0155, 0.0265, 0.0194, 0.0437, 0.0421, 0.0152], + device='cuda:1'), in_proj_covar=tensor([0.0402, 0.0337, 0.0289, 0.0397, 0.0325, 0.0484, 0.0360, 0.0362], + device='cuda:1'), out_proj_covar=tensor([1.1293e-04, 9.2114e-05, 7.9625e-05, 1.0987e-04, 9.0458e-05, 1.4467e-04, + 1.0148e-04, 1.0106e-04], device='cuda:1') +2023-02-06 13:03:34,163 INFO [train.py:901] (1/4) Epoch 13, batch 1200, loss[loss=0.2497, simple_loss=0.3261, pruned_loss=0.08661, over 8021.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3049, pruned_loss=0.07628, over 1607775.42 frames. ], batch size: 22, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:46,029 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 13:04:06,233 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2435, 1.0615, 3.3566, 1.0456, 2.9516, 2.8859, 3.0532, 2.9587], + device='cuda:1'), covar=tensor([0.0697, 0.3938, 0.0734, 0.3398, 0.1291, 0.0996, 0.0682, 0.0785], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0587, 0.0598, 0.0544, 0.0618, 0.0531, 0.0522, 0.0579], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:04:08,152 INFO [train.py:901] (1/4) Epoch 13, batch 1250, loss[loss=0.2271, simple_loss=0.3227, pruned_loss=0.06575, over 8434.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3048, pruned_loss=0.07586, over 1608157.09 frames. ], batch size: 27, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:10,200 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:14,074 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.568e+02 3.066e+02 4.053e+02 1.440e+03, threshold=6.132e+02, percent-clipped=8.0 +2023-02-06 13:04:36,960 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:43,429 INFO [train.py:901] (1/4) Epoch 13, batch 1300, loss[loss=0.2051, simple_loss=0.2878, pruned_loss=0.06119, over 8029.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3048, pruned_loss=0.07577, over 1608624.04 frames. ], batch size: 22, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:44,911 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2304, 1.8111, 2.6039, 2.0623, 2.2358, 2.0553, 1.7356, 1.0606], + device='cuda:1'), covar=tensor([0.3906, 0.3929, 0.1180, 0.2659, 0.1873, 0.2294, 0.1834, 0.4000], + device='cuda:1'), in_proj_covar=tensor([0.0893, 0.0883, 0.0741, 0.0861, 0.0939, 0.0810, 0.0706, 0.0771], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:04:54,306 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6364, 1.4519, 1.6692, 1.2787, 0.9294, 1.4235, 1.5031, 1.4500], + device='cuda:1'), covar=tensor([0.0489, 0.1148, 0.1624, 0.1332, 0.0551, 0.1429, 0.0636, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0153, 0.0193, 0.0158, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:04:54,335 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:12,876 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4335, 2.4634, 1.6981, 2.0284, 2.0158, 1.3962, 1.6892, 1.9235], + device='cuda:1'), covar=tensor([0.1226, 0.0312, 0.1010, 0.0569, 0.0573, 0.1306, 0.1009, 0.0830], + device='cuda:1'), in_proj_covar=tensor([0.0338, 0.0231, 0.0313, 0.0295, 0.0296, 0.0317, 0.0334, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:05:16,635 INFO [train.py:901] (1/4) Epoch 13, batch 1350, loss[loss=0.2204, simple_loss=0.3032, pruned_loss=0.06885, over 8328.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3047, pruned_loss=0.07601, over 1605057.28 frames. ], batch size: 25, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:23,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.492e+02 3.102e+02 3.697e+02 5.327e+02, threshold=6.205e+02, percent-clipped=0.0 +2023-02-06 13:05:29,575 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:52,226 INFO [train.py:901] (1/4) Epoch 13, batch 1400, loss[loss=0.1782, simple_loss=0.2632, pruned_loss=0.04665, over 8255.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3042, pruned_loss=0.07563, over 1611927.09 frames. ], batch size: 22, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:59,391 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:14,079 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:06:16,860 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:26,892 INFO [train.py:901] (1/4) Epoch 13, batch 1450, loss[loss=0.2212, simple_loss=0.3107, pruned_loss=0.06581, over 8599.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3031, pruned_loss=0.07535, over 1609051.74 frames. ], batch size: 39, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:06:32,939 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.293e+02 2.812e+02 3.491e+02 8.118e+02, threshold=5.625e+02, percent-clipped=1.0 +2023-02-06 13:06:34,308 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 13:06:41,225 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:07:02,157 INFO [train.py:901] (1/4) Epoch 13, batch 1500, loss[loss=0.2557, simple_loss=0.3319, pruned_loss=0.08972, over 8463.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3038, pruned_loss=0.0755, over 1610361.55 frames. ], batch size: 25, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:37,390 INFO [train.py:901] (1/4) Epoch 13, batch 1550, loss[loss=0.2254, simple_loss=0.2908, pruned_loss=0.07999, over 8078.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3045, pruned_loss=0.07591, over 1611764.50 frames. ], batch size: 21, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:38,172 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0560, 1.5278, 1.5846, 1.3291, 0.9612, 1.4228, 1.6470, 1.7042], + device='cuda:1'), covar=tensor([0.0510, 0.1176, 0.1675, 0.1393, 0.0598, 0.1475, 0.0703, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0152, 0.0193, 0.0158, 0.0101, 0.0164, 0.0115, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:07:43,379 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.583e+02 3.208e+02 4.119e+02 6.608e+02, threshold=6.417e+02, percent-clipped=3.0 +2023-02-06 13:07:46,947 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:02,051 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:02,732 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5728, 2.7479, 2.3903, 3.7854, 1.8445, 2.0866, 2.2848, 3.0432], + device='cuda:1'), covar=tensor([0.0624, 0.0861, 0.0920, 0.0279, 0.1172, 0.1271, 0.1074, 0.0814], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0216, 0.0258, 0.0217, 0.0218, 0.0255, 0.0260, 0.0222], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 13:08:11,760 INFO [train.py:901] (1/4) Epoch 13, batch 1600, loss[loss=0.2444, simple_loss=0.3218, pruned_loss=0.0835, over 8406.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3053, pruned_loss=0.07568, over 1615019.07 frames. ], batch size: 49, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:29,745 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:46,802 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:47,299 INFO [train.py:901] (1/4) Epoch 13, batch 1650, loss[loss=0.1978, simple_loss=0.2653, pruned_loss=0.06517, over 7682.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.305, pruned_loss=0.07575, over 1612747.97 frames. ], batch size: 18, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:53,409 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.462e+02 2.942e+02 3.707e+02 8.113e+02, threshold=5.885e+02, percent-clipped=6.0 +2023-02-06 13:09:20,787 INFO [train.py:901] (1/4) Epoch 13, batch 1700, loss[loss=0.2105, simple_loss=0.2906, pruned_loss=0.06519, over 8233.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3047, pruned_loss=0.07544, over 1611568.28 frames. ], batch size: 22, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:09:43,783 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5140, 1.9151, 3.3350, 1.2615, 2.6463, 1.9881, 1.6898, 2.4109], + device='cuda:1'), covar=tensor([0.1907, 0.2346, 0.0826, 0.4274, 0.1529, 0.3082, 0.1946, 0.2149], + device='cuda:1'), in_proj_covar=tensor([0.0491, 0.0532, 0.0537, 0.0589, 0.0620, 0.0558, 0.0480, 0.0611], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 13:09:57,380 INFO [train.py:901] (1/4) Epoch 13, batch 1750, loss[loss=0.2504, simple_loss=0.3322, pruned_loss=0.08426, over 8349.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3054, pruned_loss=0.07581, over 1615893.49 frames. ], batch size: 49, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:10:03,434 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.663e+02 3.311e+02 3.905e+02 7.561e+02, threshold=6.622e+02, percent-clipped=6.0 +2023-02-06 13:10:15,122 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:10:31,882 INFO [train.py:901] (1/4) Epoch 13, batch 1800, loss[loss=0.2377, simple_loss=0.317, pruned_loss=0.07918, over 8363.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.307, pruned_loss=0.07619, over 1622126.54 frames. ], batch size: 24, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:01,386 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:06,994 INFO [train.py:901] (1/4) Epoch 13, batch 1850, loss[loss=0.2914, simple_loss=0.3452, pruned_loss=0.1188, over 8424.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3061, pruned_loss=0.07623, over 1620890.62 frames. ], batch size: 49, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:13,499 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.419e+02 2.914e+02 4.067e+02 1.078e+03, threshold=5.828e+02, percent-clipped=2.0 +2023-02-06 13:11:18,918 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:29,406 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9071, 3.7546, 2.3484, 2.3793, 2.5815, 1.8056, 2.5621, 2.8087], + device='cuda:1'), covar=tensor([0.1463, 0.0254, 0.0917, 0.0809, 0.0718, 0.1353, 0.1023, 0.0966], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0235, 0.0316, 0.0299, 0.0300, 0.0323, 0.0339, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:11:34,565 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:41,190 INFO [train.py:901] (1/4) Epoch 13, batch 1900, loss[loss=0.2035, simple_loss=0.2839, pruned_loss=0.06158, over 7913.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3063, pruned_loss=0.07623, over 1622617.78 frames. ], batch size: 20, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:46,605 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:48,677 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:51,825 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:11,195 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 13:12:15,140 INFO [train.py:901] (1/4) Epoch 13, batch 1950, loss[loss=0.2603, simple_loss=0.3252, pruned_loss=0.09768, over 8511.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3058, pruned_loss=0.07608, over 1620715.74 frames. ], batch size: 28, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:12:21,305 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.445e+02 3.079e+02 3.874e+02 6.986e+02, threshold=6.158e+02, percent-clipped=4.0 +2023-02-06 13:12:23,955 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 13:12:35,732 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8576, 6.0156, 5.2939, 2.2904, 5.2068, 5.6608, 5.4730, 5.3107], + device='cuda:1'), covar=tensor([0.0523, 0.0432, 0.1004, 0.4617, 0.0790, 0.0600, 0.1156, 0.0598], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0390, 0.0396, 0.0492, 0.0388, 0.0392, 0.0384, 0.0341], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 13:12:44,576 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 13:12:45,368 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98989.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:50,409 INFO [train.py:901] (1/4) Epoch 13, batch 2000, loss[loss=0.2884, simple_loss=0.3733, pruned_loss=0.1018, over 8509.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3053, pruned_loss=0.07588, over 1619563.95 frames. ], batch size: 28, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:06,451 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:24,249 INFO [train.py:901] (1/4) Epoch 13, batch 2050, loss[loss=0.1625, simple_loss=0.247, pruned_loss=0.03905, over 7777.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3048, pruned_loss=0.07553, over 1621519.96 frames. ], batch size: 19, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:30,114 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.527e+02 3.267e+02 4.166e+02 9.227e+02, threshold=6.535e+02, percent-clipped=8.0 +2023-02-06 13:13:39,427 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:43,693 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 13:13:51,522 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7806, 1.2459, 3.9569, 1.4861, 3.4350, 3.2797, 3.5475, 3.4404], + device='cuda:1'), covar=tensor([0.0641, 0.4479, 0.0532, 0.3661, 0.1265, 0.0990, 0.0593, 0.0737], + device='cuda:1'), in_proj_covar=tensor([0.0511, 0.0581, 0.0594, 0.0550, 0.0624, 0.0534, 0.0524, 0.0587], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:13:52,406 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 13:13:58,777 INFO [train.py:901] (1/4) Epoch 13, batch 2100, loss[loss=0.2389, simple_loss=0.3091, pruned_loss=0.08433, over 7810.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3036, pruned_loss=0.07473, over 1619847.49 frames. ], batch size: 20, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:27,699 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7977, 1.4048, 3.9457, 1.3590, 3.4719, 3.2435, 3.5106, 3.4316], + device='cuda:1'), covar=tensor([0.0657, 0.4496, 0.0672, 0.4119, 0.1365, 0.1081, 0.0711, 0.0826], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0583, 0.0596, 0.0550, 0.0624, 0.0534, 0.0523, 0.0587], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:14:31,280 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:14:33,893 INFO [train.py:901] (1/4) Epoch 13, batch 2150, loss[loss=0.2107, simple_loss=0.2802, pruned_loss=0.07054, over 8085.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3038, pruned_loss=0.075, over 1614257.41 frames. ], batch size: 21, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:39,911 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.393e+02 2.767e+02 3.323e+02 5.467e+02, threshold=5.533e+02, percent-clipped=0.0 +2023-02-06 13:14:48,054 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:07,845 INFO [train.py:901] (1/4) Epoch 13, batch 2200, loss[loss=0.1806, simple_loss=0.2647, pruned_loss=0.04826, over 7214.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3046, pruned_loss=0.07565, over 1615355.26 frames. ], batch size: 16, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:15:27,754 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2325, 1.9308, 3.2589, 1.5694, 2.4772, 3.5777, 3.5621, 3.1157], + device='cuda:1'), covar=tensor([0.0900, 0.1283, 0.0430, 0.1946, 0.1027, 0.0230, 0.0538, 0.0552], + device='cuda:1'), in_proj_covar=tensor([0.0273, 0.0303, 0.0270, 0.0295, 0.0280, 0.0241, 0.0361, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:15:43,035 INFO [train.py:901] (1/4) Epoch 13, batch 2250, loss[loss=0.2418, simple_loss=0.3047, pruned_loss=0.08947, over 7799.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3048, pruned_loss=0.07592, over 1612527.17 frames. ], batch size: 19, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:15:43,151 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:46,453 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:48,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.623e+02 3.377e+02 4.135e+02 6.545e+02, threshold=6.753e+02, percent-clipped=6.0 +2023-02-06 13:15:49,684 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:02,580 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:09,204 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:16,503 INFO [train.py:901] (1/4) Epoch 13, batch 2300, loss[loss=0.2556, simple_loss=0.3314, pruned_loss=0.08994, over 8353.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3041, pruned_loss=0.07535, over 1610168.13 frames. ], batch size: 24, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:19,363 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:43,128 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:49,178 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:52,411 INFO [train.py:901] (1/4) Epoch 13, batch 2350, loss[loss=0.242, simple_loss=0.3133, pruned_loss=0.08536, over 6819.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3045, pruned_loss=0.07585, over 1607979.31 frames. ], batch size: 72, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:58,394 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.470e+02 3.074e+02 3.865e+02 1.080e+03, threshold=6.149e+02, percent-clipped=3.0 +2023-02-06 13:17:06,771 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99367.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:10,175 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:12,140 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7105, 1.2048, 4.8687, 1.7214, 4.4060, 4.0991, 4.4309, 4.3047], + device='cuda:1'), covar=tensor([0.0452, 0.4597, 0.0375, 0.3723, 0.0807, 0.0791, 0.0448, 0.0525], + device='cuda:1'), in_proj_covar=tensor([0.0521, 0.0596, 0.0613, 0.0564, 0.0637, 0.0546, 0.0538, 0.0601], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:17:22,902 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0478, 2.2707, 2.3184, 1.6219, 2.4942, 1.8237, 1.6199, 1.9720], + device='cuda:1'), covar=tensor([0.0510, 0.0243, 0.0168, 0.0431, 0.0251, 0.0422, 0.0485, 0.0328], + device='cuda:1'), in_proj_covar=tensor([0.0401, 0.0336, 0.0292, 0.0402, 0.0325, 0.0487, 0.0361, 0.0367], + device='cuda:1'), out_proj_covar=tensor([1.1254e-04, 9.1758e-05, 8.0184e-05, 1.1101e-04, 9.0365e-05, 1.4522e-04, + 1.0168e-04, 1.0207e-04], device='cuda:1') +2023-02-06 13:17:26,585 INFO [train.py:901] (1/4) Epoch 13, batch 2400, loss[loss=0.2158, simple_loss=0.2966, pruned_loss=0.06752, over 8653.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3047, pruned_loss=0.07602, over 1606819.57 frames. ], batch size: 39, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:17:37,521 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:54,055 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2877, 1.9325, 2.7580, 2.2242, 2.5611, 2.1927, 1.8130, 1.2952], + device='cuda:1'), covar=tensor([0.4237, 0.4344, 0.1464, 0.2830, 0.2047, 0.2490, 0.1746, 0.4409], + device='cuda:1'), in_proj_covar=tensor([0.0896, 0.0887, 0.0740, 0.0862, 0.0942, 0.0813, 0.0705, 0.0775], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:17:55,981 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:01,835 INFO [train.py:901] (1/4) Epoch 13, batch 2450, loss[loss=0.2415, simple_loss=0.3171, pruned_loss=0.08299, over 8515.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3037, pruned_loss=0.07577, over 1601171.97 frames. ], batch size: 29, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:02,707 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:08,609 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.541e+02 3.157e+02 3.793e+02 6.756e+02, threshold=6.314e+02, percent-clipped=3.0 +2023-02-06 13:18:29,563 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99486.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:18:36,515 INFO [train.py:901] (1/4) Epoch 13, batch 2500, loss[loss=0.2706, simple_loss=0.339, pruned_loss=0.1011, over 7450.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3032, pruned_loss=0.07529, over 1601783.31 frames. ], batch size: 72, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:45,256 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9704, 2.7284, 3.6738, 2.2787, 1.9078, 3.5297, 0.7671, 2.2027], + device='cuda:1'), covar=tensor([0.2114, 0.1348, 0.0239, 0.2351, 0.3698, 0.0484, 0.3327, 0.1783], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0176, 0.0104, 0.0220, 0.0257, 0.0111, 0.0165, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 13:18:57,692 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:10,748 INFO [train.py:901] (1/4) Epoch 13, batch 2550, loss[loss=0.2458, simple_loss=0.3102, pruned_loss=0.0907, over 7707.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3029, pruned_loss=0.0752, over 1602494.57 frames. ], batch size: 18, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:19:17,199 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.420e+02 2.977e+02 3.875e+02 7.325e+02, threshold=5.954e+02, percent-clipped=4.0 +2023-02-06 13:19:19,406 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6568, 2.3317, 3.4490, 2.6668, 2.9815, 2.3741, 2.0144, 1.8526], + device='cuda:1'), covar=tensor([0.3872, 0.4153, 0.1357, 0.2838, 0.2156, 0.2372, 0.1631, 0.4603], + device='cuda:1'), in_proj_covar=tensor([0.0893, 0.0887, 0.0738, 0.0862, 0.0943, 0.0812, 0.0703, 0.0774], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:19:20,014 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7018, 1.8258, 2.5468, 1.6769, 1.1482, 2.4366, 0.3956, 1.3671], + device='cuda:1'), covar=tensor([0.2653, 0.1592, 0.0412, 0.2378, 0.4636, 0.0496, 0.3524, 0.2097], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0176, 0.0103, 0.0219, 0.0257, 0.0110, 0.0165, 0.0167], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 13:19:21,127 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6653, 2.6483, 2.0053, 2.2204, 2.2717, 1.6541, 1.9396, 2.2283], + device='cuda:1'), covar=tensor([0.1229, 0.0340, 0.0826, 0.0526, 0.0551, 0.1147, 0.0903, 0.0724], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0240, 0.0320, 0.0300, 0.0301, 0.0326, 0.0344, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:19:37,733 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99586.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:41,078 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:45,635 INFO [train.py:901] (1/4) Epoch 13, batch 2600, loss[loss=0.2591, simple_loss=0.3272, pruned_loss=0.09551, over 8695.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3041, pruned_loss=0.07546, over 1611186.03 frames. ], batch size: 49, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:19:52,602 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:03,510 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:06,924 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:08,162 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:19,551 INFO [train.py:901] (1/4) Epoch 13, batch 2650, loss[loss=0.251, simple_loss=0.3376, pruned_loss=0.08222, over 8556.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3043, pruned_loss=0.07509, over 1614300.66 frames. ], batch size: 31, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:20,458 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:23,636 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:25,434 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.403e+02 3.099e+02 4.031e+02 8.160e+02, threshold=6.198e+02, percent-clipped=1.0 +2023-02-06 13:20:47,382 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:54,454 INFO [train.py:901] (1/4) Epoch 13, batch 2700, loss[loss=0.2154, simple_loss=0.2933, pruned_loss=0.06868, over 7659.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3045, pruned_loss=0.07487, over 1618536.19 frames. ], batch size: 19, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:55,210 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:59,153 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8291, 1.4967, 1.8011, 1.4774, 1.1183, 1.6579, 2.2025, 2.1767], + device='cuda:1'), covar=tensor([0.0420, 0.1358, 0.1793, 0.1462, 0.0591, 0.1561, 0.0625, 0.0603], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:20:59,201 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:00,475 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:12,257 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 13:21:16,796 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:27,694 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:28,869 INFO [train.py:901] (1/4) Epoch 13, batch 2750, loss[loss=0.2268, simple_loss=0.3099, pruned_loss=0.07186, over 8254.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3041, pruned_loss=0.07468, over 1616024.89 frames. ], batch size: 24, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:21:34,776 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.508e+02 3.194e+02 3.866e+02 8.318e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 13:21:52,456 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:53,978 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:55,002 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 13:21:57,553 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 13:22:03,191 INFO [train.py:901] (1/4) Epoch 13, batch 2800, loss[loss=0.2386, simple_loss=0.3263, pruned_loss=0.07544, over 8491.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3055, pruned_loss=0.07557, over 1617059.67 frames. ], batch size: 26, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:06,170 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:11,449 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:26,105 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99830.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:22:37,606 INFO [train.py:901] (1/4) Epoch 13, batch 2850, loss[loss=0.2106, simple_loss=0.2831, pruned_loss=0.06908, over 7662.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3046, pruned_loss=0.0753, over 1615877.49 frames. ], batch size: 19, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:43,877 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.486e+02 2.909e+02 3.673e+02 9.445e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-06 13:23:11,478 INFO [train.py:901] (1/4) Epoch 13, batch 2900, loss[loss=0.2204, simple_loss=0.3029, pruned_loss=0.06896, over 8034.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.305, pruned_loss=0.07579, over 1612949.00 frames. ], batch size: 22, lr: 5.92e-03, grad_scale: 16.0 +2023-02-06 13:23:11,680 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:34,894 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:44,996 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99945.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:23:46,141 INFO [train.py:901] (1/4) Epoch 13, batch 2950, loss[loss=0.221, simple_loss=0.2833, pruned_loss=0.07937, over 7971.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3055, pruned_loss=0.0763, over 1612364.57 frames. ], batch size: 21, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:23:48,968 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:52,246 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 13:23:52,911 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.762e+02 3.280e+02 4.150e+02 8.176e+02, threshold=6.560e+02, percent-clipped=12.0 +2023-02-06 13:23:57,303 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99962.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:14,501 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:21,176 INFO [train.py:901] (1/4) Epoch 13, batch 3000, loss[loss=0.2194, simple_loss=0.2899, pruned_loss=0.07452, over 7801.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3047, pruned_loss=0.07555, over 1608323.02 frames. ], batch size: 19, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:24:21,176 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 13:24:33,566 INFO [train.py:935] (1/4) Epoch 13, validation: loss=0.1841, simple_loss=0.2841, pruned_loss=0.04204, over 944034.00 frames. +2023-02-06 13:24:33,567 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 13:24:37,722 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:42,513 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3255, 1.1799, 1.4386, 1.1919, 0.7342, 1.2745, 1.2878, 1.1196], + device='cuda:1'), covar=tensor([0.0514, 0.1362, 0.1843, 0.1444, 0.0600, 0.1568, 0.0681, 0.0657], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:24:53,937 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100025.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:24:55,300 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:05,693 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:07,828 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:09,025 INFO [train.py:901] (1/4) Epoch 13, batch 3050, loss[loss=0.2242, simple_loss=0.31, pruned_loss=0.06926, over 8337.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3055, pruned_loss=0.07588, over 1613194.37 frames. ], batch size: 26, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:25:15,849 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.522e+02 3.008e+02 4.207e+02 1.157e+03, threshold=6.017e+02, percent-clipped=6.0 +2023-02-06 13:25:16,792 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:22,900 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:24,259 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1501, 1.2132, 4.2759, 1.6369, 3.7506, 3.5111, 3.8548, 3.7766], + device='cuda:1'), covar=tensor([0.0526, 0.4481, 0.0520, 0.3641, 0.1145, 0.1065, 0.0578, 0.0620], + device='cuda:1'), in_proj_covar=tensor([0.0516, 0.0591, 0.0609, 0.0558, 0.0634, 0.0546, 0.0538, 0.0591], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:25:34,609 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:41,469 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9925, 2.7930, 3.4789, 2.0803, 1.8598, 3.5279, 0.8726, 2.2527], + device='cuda:1'), covar=tensor([0.2226, 0.1445, 0.0330, 0.2569, 0.3965, 0.0384, 0.3299, 0.1836], + device='cuda:1'), in_proj_covar=tensor([0.0173, 0.0177, 0.0106, 0.0222, 0.0260, 0.0112, 0.0166, 0.0170], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 13:25:44,733 INFO [train.py:901] (1/4) Epoch 13, batch 3100, loss[loss=0.2624, simple_loss=0.3409, pruned_loss=0.09196, over 8359.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3062, pruned_loss=0.07614, over 1616132.83 frames. ], batch size: 24, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:19,849 INFO [train.py:901] (1/4) Epoch 13, batch 3150, loss[loss=0.1893, simple_loss=0.27, pruned_loss=0.05433, over 8136.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3055, pruned_loss=0.07551, over 1618001.84 frames. ], batch size: 22, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:24,151 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:25,932 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.491e+02 3.036e+02 4.077e+02 6.258e+02, threshold=6.072e+02, percent-clipped=1.0 +2023-02-06 13:26:26,823 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:41,745 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:55,151 INFO [train.py:901] (1/4) Epoch 13, batch 3200, loss[loss=0.2024, simple_loss=0.2877, pruned_loss=0.05854, over 7810.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3046, pruned_loss=0.07509, over 1617233.31 frames. ], batch size: 20, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:58,247 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100201.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:27:05,241 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0660, 1.4868, 1.5837, 1.3782, 1.0188, 1.4238, 1.7094, 1.8365], + device='cuda:1'), covar=tensor([0.0527, 0.1281, 0.1692, 0.1421, 0.0624, 0.1527, 0.0711, 0.0554], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0158, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:27:15,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100226.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:27:29,408 INFO [train.py:901] (1/4) Epoch 13, batch 3250, loss[loss=0.2488, simple_loss=0.3222, pruned_loss=0.08772, over 8588.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3049, pruned_loss=0.07537, over 1620426.27 frames. ], batch size: 34, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:27:34,918 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8355, 3.7877, 3.4199, 1.7765, 3.4084, 3.3881, 3.4026, 3.1134], + device='cuda:1'), covar=tensor([0.0889, 0.0684, 0.1109, 0.4137, 0.0926, 0.0968, 0.1400, 0.1002], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0387, 0.0391, 0.0490, 0.0388, 0.0392, 0.0385, 0.0342], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 13:27:35,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.430e+02 2.991e+02 3.670e+02 7.489e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 13:27:35,627 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:04,522 INFO [train.py:901] (1/4) Epoch 13, batch 3300, loss[loss=0.2601, simple_loss=0.3337, pruned_loss=0.09323, over 8517.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3047, pruned_loss=0.07543, over 1613530.46 frames. ], batch size: 26, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:07,523 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:22,245 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:24,889 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:39,548 INFO [train.py:901] (1/4) Epoch 13, batch 3350, loss[loss=0.1937, simple_loss=0.261, pruned_loss=0.06317, over 7687.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3039, pruned_loss=0.07471, over 1614180.78 frames. ], batch size: 18, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:39,750 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:45,562 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.599e+02 3.166e+02 3.997e+02 7.990e+02, threshold=6.333e+02, percent-clipped=6.0 +2023-02-06 13:28:54,364 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100369.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:29:13,503 INFO [train.py:901] (1/4) Epoch 13, batch 3400, loss[loss=0.1871, simple_loss=0.2598, pruned_loss=0.05724, over 7700.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3034, pruned_loss=0.07453, over 1613578.92 frames. ], batch size: 18, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:25,268 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:29,914 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:42,785 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:48,792 INFO [train.py:901] (1/4) Epoch 13, batch 3450, loss[loss=0.2711, simple_loss=0.3302, pruned_loss=0.106, over 8532.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3016, pruned_loss=0.0731, over 1611156.41 frames. ], batch size: 39, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:54,821 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.523e+02 3.011e+02 4.006e+02 7.808e+02, threshold=6.023e+02, percent-clipped=2.0 +2023-02-06 13:30:14,314 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100484.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:30:23,422 INFO [train.py:901] (1/4) Epoch 13, batch 3500, loss[loss=0.223, simple_loss=0.2955, pruned_loss=0.07522, over 7929.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3027, pruned_loss=0.07398, over 1611297.78 frames. ], batch size: 20, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:30:26,341 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9739, 2.4599, 3.6357, 2.7233, 3.1012, 2.6523, 2.2589, 2.0314], + device='cuda:1'), covar=tensor([0.3395, 0.3947, 0.1231, 0.2707, 0.2167, 0.2050, 0.1534, 0.4277], + device='cuda:1'), in_proj_covar=tensor([0.0882, 0.0881, 0.0727, 0.0849, 0.0932, 0.0808, 0.0699, 0.0768], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:30:53,031 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 13:30:58,419 INFO [train.py:901] (1/4) Epoch 13, batch 3550, loss[loss=0.2131, simple_loss=0.2914, pruned_loss=0.06741, over 8462.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3031, pruned_loss=0.07453, over 1601823.82 frames. ], batch size: 25, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:31:04,446 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.564e+02 3.091e+02 3.906e+02 9.185e+02, threshold=6.182e+02, percent-clipped=3.0 +2023-02-06 13:31:33,167 INFO [train.py:901] (1/4) Epoch 13, batch 3600, loss[loss=0.3194, simple_loss=0.3685, pruned_loss=0.1351, over 7071.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3025, pruned_loss=0.07439, over 1601058.99 frames. ], batch size: 71, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:31:35,333 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:31:40,865 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2402, 1.5470, 1.6757, 1.3951, 1.0997, 1.4745, 1.9573, 1.9142], + device='cuda:1'), covar=tensor([0.0471, 0.1188, 0.1645, 0.1316, 0.0603, 0.1463, 0.0602, 0.0569], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0151, 0.0189, 0.0156, 0.0101, 0.0162, 0.0114, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:32:08,079 INFO [train.py:901] (1/4) Epoch 13, batch 3650, loss[loss=0.2569, simple_loss=0.333, pruned_loss=0.09044, over 8265.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3025, pruned_loss=0.07465, over 1599469.78 frames. ], batch size: 24, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:32:12,473 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 13:32:14,110 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.362e+02 3.080e+02 3.827e+02 7.938e+02, threshold=6.161e+02, percent-clipped=3.0 +2023-02-06 13:32:38,642 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5753, 1.3319, 4.7282, 1.7449, 4.1825, 3.9566, 4.2862, 4.1359], + device='cuda:1'), covar=tensor([0.0476, 0.4562, 0.0397, 0.3429, 0.0904, 0.0774, 0.0490, 0.0542], + device='cuda:1'), in_proj_covar=tensor([0.0518, 0.0599, 0.0616, 0.0560, 0.0637, 0.0548, 0.0537, 0.0597], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:32:43,156 INFO [train.py:901] (1/4) Epoch 13, batch 3700, loss[loss=0.216, simple_loss=0.2953, pruned_loss=0.06836, over 8449.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3019, pruned_loss=0.07444, over 1601517.47 frames. ], batch size: 27, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:32:55,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:32:57,153 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 13:33:09,354 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:33:12,128 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 13:33:13,302 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100740.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:17,859 INFO [train.py:901] (1/4) Epoch 13, batch 3750, loss[loss=0.2563, simple_loss=0.3297, pruned_loss=0.09143, over 8116.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3025, pruned_loss=0.0745, over 1603076.16 frames. ], batch size: 23, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:18,692 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100748.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:24,692 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.543e+02 3.029e+02 3.909e+02 6.778e+02, threshold=6.059e+02, percent-clipped=2.0 +2023-02-06 13:33:28,257 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5287, 1.9717, 2.2477, 1.1123, 2.3287, 1.3222, 0.6795, 1.6998], + device='cuda:1'), covar=tensor([0.0582, 0.0273, 0.0207, 0.0525, 0.0268, 0.0743, 0.0735, 0.0319], + device='cuda:1'), in_proj_covar=tensor([0.0400, 0.0341, 0.0290, 0.0398, 0.0327, 0.0489, 0.0363, 0.0367], + device='cuda:1'), out_proj_covar=tensor([1.1185e-04, 9.3302e-05, 7.9554e-05, 1.0965e-04, 9.0508e-05, 1.4573e-04, + 1.0217e-04, 1.0191e-04], device='cuda:1') +2023-02-06 13:33:30,145 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:33:31,007 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100765.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:52,963 INFO [train.py:901] (1/4) Epoch 13, batch 3800, loss[loss=0.197, simple_loss=0.2634, pruned_loss=0.06532, over 7766.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.303, pruned_loss=0.0746, over 1606565.21 frames. ], batch size: 19, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:22,774 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 13:34:27,591 INFO [train.py:901] (1/4) Epoch 13, batch 3850, loss[loss=0.2445, simple_loss=0.3256, pruned_loss=0.08174, over 8546.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3031, pruned_loss=0.07465, over 1604893.09 frames. ], batch size: 31, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:34,506 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.985e+02 2.830e+02 3.312e+02 3.730e+02 7.453e+02, threshold=6.624e+02, percent-clipped=3.0 +2023-02-06 13:34:50,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100879.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:35:01,746 INFO [train.py:901] (1/4) Epoch 13, batch 3900, loss[loss=0.2024, simple_loss=0.2889, pruned_loss=0.05792, over 8279.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3045, pruned_loss=0.07545, over 1610436.54 frames. ], batch size: 23, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:35:01,756 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 13:35:12,828 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 13:35:18,420 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.46 vs. limit=5.0 +2023-02-06 13:35:33,879 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0037, 1.8444, 4.2279, 1.7330, 2.4702, 4.6102, 4.7737, 3.8435], + device='cuda:1'), covar=tensor([0.1345, 0.1627, 0.0296, 0.2259, 0.1123, 0.0287, 0.0423, 0.0819], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0301, 0.0265, 0.0293, 0.0277, 0.0239, 0.0359, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:35:37,127 INFO [train.py:901] (1/4) Epoch 13, batch 3950, loss[loss=0.2227, simple_loss=0.3075, pruned_loss=0.06891, over 8241.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3047, pruned_loss=0.0753, over 1612580.56 frames. ], batch size: 22, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:35:44,007 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.489e+02 3.011e+02 3.855e+02 9.802e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-06 13:35:54,143 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:11,866 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:12,368 INFO [train.py:901] (1/4) Epoch 13, batch 4000, loss[loss=0.2516, simple_loss=0.3282, pruned_loss=0.08747, over 8614.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3061, pruned_loss=0.07585, over 1616804.36 frames. ], batch size: 49, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:47,681 INFO [train.py:901] (1/4) Epoch 13, batch 4050, loss[loss=0.2241, simple_loss=0.3104, pruned_loss=0.06896, over 8285.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3062, pruned_loss=0.07571, over 1618891.09 frames. ], batch size: 23, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:54,252 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.645e+02 3.184e+02 3.816e+02 9.518e+02, threshold=6.368e+02, percent-clipped=3.0 +2023-02-06 13:37:17,940 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 13:37:18,351 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101092.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:37:18,940 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5885, 4.6206, 4.1408, 1.9556, 4.1253, 4.1394, 4.1955, 3.9284], + device='cuda:1'), covar=tensor([0.0884, 0.0613, 0.1204, 0.4843, 0.0834, 0.0861, 0.1400, 0.0810], + device='cuda:1'), in_proj_covar=tensor([0.0480, 0.0388, 0.0399, 0.0497, 0.0392, 0.0393, 0.0387, 0.0341], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 13:37:21,159 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6837, 1.9810, 2.1112, 1.2703, 2.2508, 1.4824, 0.7440, 1.8374], + device='cuda:1'), covar=tensor([0.0408, 0.0198, 0.0159, 0.0384, 0.0229, 0.0562, 0.0542, 0.0193], + device='cuda:1'), in_proj_covar=tensor([0.0399, 0.0338, 0.0290, 0.0400, 0.0328, 0.0487, 0.0366, 0.0367], + device='cuda:1'), out_proj_covar=tensor([1.1169e-04, 9.2409e-05, 7.9217e-05, 1.1059e-04, 9.0656e-05, 1.4486e-04, + 1.0278e-04, 1.0186e-04], device='cuda:1') +2023-02-06 13:37:21,621 INFO [train.py:901] (1/4) Epoch 13, batch 4100, loss[loss=0.2614, simple_loss=0.3347, pruned_loss=0.09401, over 8553.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3052, pruned_loss=0.07514, over 1618150.50 frames. ], batch size: 39, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:37:41,405 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:47,943 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:50,602 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5662, 1.5299, 4.6953, 1.6682, 4.1096, 3.8707, 4.2396, 4.1275], + device='cuda:1'), covar=tensor([0.0470, 0.4480, 0.0434, 0.3774, 0.1085, 0.0932, 0.0540, 0.0610], + device='cuda:1'), in_proj_covar=tensor([0.0515, 0.0591, 0.0619, 0.0556, 0.0634, 0.0547, 0.0536, 0.0600], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:37:55,653 INFO [train.py:901] (1/4) Epoch 13, batch 4150, loss[loss=0.2419, simple_loss=0.3433, pruned_loss=0.07023, over 8102.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3067, pruned_loss=0.07632, over 1615040.43 frames. ], batch size: 23, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:02,999 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.783e+02 3.401e+02 4.642e+02 1.010e+03, threshold=6.803e+02, percent-clipped=7.0 +2023-02-06 13:38:05,271 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:38:30,233 INFO [train.py:901] (1/4) Epoch 13, batch 4200, loss[loss=0.2025, simple_loss=0.2856, pruned_loss=0.05974, over 8103.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3072, pruned_loss=0.07676, over 1613507.26 frames. ], batch size: 23, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:37,994 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101207.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:38:54,601 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 13:39:05,853 INFO [train.py:901] (1/4) Epoch 13, batch 4250, loss[loss=0.2179, simple_loss=0.2945, pruned_loss=0.07067, over 8233.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3069, pruned_loss=0.07665, over 1616776.37 frames. ], batch size: 22, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:39:12,480 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 2.514e+02 3.154e+02 3.992e+02 7.648e+02, threshold=6.307e+02, percent-clipped=3.0 +2023-02-06 13:39:16,514 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 13:39:40,007 INFO [train.py:901] (1/4) Epoch 13, batch 4300, loss[loss=0.1951, simple_loss=0.2648, pruned_loss=0.06266, over 7429.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3068, pruned_loss=0.07657, over 1617547.18 frames. ], batch size: 17, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:14,576 INFO [train.py:901] (1/4) Epoch 13, batch 4350, loss[loss=0.1988, simple_loss=0.2637, pruned_loss=0.06697, over 7234.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3059, pruned_loss=0.07599, over 1616840.93 frames. ], batch size: 16, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:21,344 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.682e+02 3.184e+02 4.441e+02 9.358e+02, threshold=6.368e+02, percent-clipped=11.0 +2023-02-06 13:40:30,790 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7122, 1.3794, 3.8754, 1.4683, 3.3945, 3.2270, 3.5065, 3.4218], + device='cuda:1'), covar=tensor([0.0588, 0.4064, 0.0652, 0.3562, 0.1120, 0.0888, 0.0577, 0.0648], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0584, 0.0615, 0.0551, 0.0623, 0.0537, 0.0531, 0.0590], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:40:49,428 INFO [train.py:901] (1/4) Epoch 13, batch 4400, loss[loss=0.2435, simple_loss=0.317, pruned_loss=0.08498, over 8449.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3068, pruned_loss=0.07598, over 1620716.89 frames. ], batch size: 29, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:49,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 13:41:20,991 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9981, 2.2396, 1.9292, 2.7911, 1.0694, 1.5558, 1.8177, 2.3005], + device='cuda:1'), covar=tensor([0.0774, 0.0855, 0.0940, 0.0413, 0.1257, 0.1405, 0.1022, 0.0800], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0212, 0.0256, 0.0214, 0.0216, 0.0254, 0.0260, 0.0219], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 13:41:22,953 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9950, 1.7311, 3.4029, 1.4127, 2.2329, 3.7390, 3.8077, 3.1647], + device='cuda:1'), covar=tensor([0.1079, 0.1522, 0.0307, 0.2173, 0.1039, 0.0211, 0.0437, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0302, 0.0266, 0.0296, 0.0278, 0.0240, 0.0360, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:41:23,440 INFO [train.py:901] (1/4) Epoch 13, batch 4450, loss[loss=0.1956, simple_loss=0.2745, pruned_loss=0.05836, over 7800.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3069, pruned_loss=0.07587, over 1619634.38 frames. ], batch size: 20, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:41:28,826 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 13:41:30,664 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 2.700e+02 3.319e+02 4.103e+02 1.285e+03, threshold=6.638e+02, percent-clipped=3.0 +2023-02-06 13:41:34,860 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101463.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:41:38,376 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 13:41:38,578 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:41:52,078 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101488.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:41:57,888 INFO [train.py:901] (1/4) Epoch 13, batch 4500, loss[loss=0.2623, simple_loss=0.3277, pruned_loss=0.09846, over 7968.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.307, pruned_loss=0.07582, over 1618552.51 frames. ], batch size: 21, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:04,780 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 13:42:22,190 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 13:42:30,588 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1010, 1.7739, 2.4624, 1.9131, 2.3322, 1.9547, 1.6812, 1.1381], + device='cuda:1'), covar=tensor([0.3723, 0.3733, 0.1318, 0.2657, 0.1656, 0.2371, 0.1705, 0.3769], + device='cuda:1'), in_proj_covar=tensor([0.0893, 0.0893, 0.0746, 0.0868, 0.0944, 0.0818, 0.0709, 0.0778], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:42:33,108 INFO [train.py:901] (1/4) Epoch 13, batch 4550, loss[loss=0.2338, simple_loss=0.3127, pruned_loss=0.07746, over 8143.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3062, pruned_loss=0.07546, over 1616549.19 frames. ], batch size: 22, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:39,101 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 13:42:39,884 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.403e+02 2.986e+02 3.546e+02 6.918e+02, threshold=5.973e+02, percent-clipped=1.0 +2023-02-06 13:42:58,931 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:43:08,196 INFO [train.py:901] (1/4) Epoch 13, batch 4600, loss[loss=0.2487, simple_loss=0.3289, pruned_loss=0.08421, over 8467.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.305, pruned_loss=0.07443, over 1619643.98 frames. ], batch size: 49, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:43:42,608 INFO [train.py:901] (1/4) Epoch 13, batch 4650, loss[loss=0.2009, simple_loss=0.282, pruned_loss=0.05989, over 7542.00 frames. ], tot_loss[loss=0.227, simple_loss=0.305, pruned_loss=0.07451, over 1620848.55 frames. ], batch size: 18, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:43:49,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.500e+02 2.989e+02 3.844e+02 7.619e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-06 13:44:00,410 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2830, 1.5657, 4.4836, 1.9236, 3.9358, 3.7098, 4.0370, 3.9016], + device='cuda:1'), covar=tensor([0.0557, 0.4384, 0.0467, 0.3311, 0.0992, 0.0781, 0.0524, 0.0607], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0581, 0.0613, 0.0547, 0.0622, 0.0534, 0.0526, 0.0589], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:44:17,686 INFO [train.py:901] (1/4) Epoch 13, batch 4700, loss[loss=0.2153, simple_loss=0.2865, pruned_loss=0.0721, over 7659.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3044, pruned_loss=0.07443, over 1620067.93 frames. ], batch size: 19, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:21,754 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:44:52,087 INFO [train.py:901] (1/4) Epoch 13, batch 4750, loss[loss=0.2067, simple_loss=0.2809, pruned_loss=0.0662, over 7791.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3027, pruned_loss=0.07372, over 1614314.35 frames. ], batch size: 19, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:55,560 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0982, 1.6370, 3.3015, 1.5041, 2.2859, 3.6123, 3.7212, 3.0602], + device='cuda:1'), covar=tensor([0.0993, 0.1484, 0.0315, 0.1975, 0.0992, 0.0238, 0.0499, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0267, 0.0299, 0.0264, 0.0294, 0.0276, 0.0237, 0.0358, 0.0294], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:44:59,502 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.558e+02 3.081e+02 3.778e+02 8.564e+02, threshold=6.162e+02, percent-clipped=2.0 +2023-02-06 13:45:21,975 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 13:45:24,390 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 13:45:27,102 INFO [train.py:901] (1/4) Epoch 13, batch 4800, loss[loss=0.2252, simple_loss=0.3017, pruned_loss=0.07435, over 8370.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3026, pruned_loss=0.07381, over 1613240.01 frames. ], batch size: 48, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:45:57,593 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:01,954 INFO [train.py:901] (1/4) Epoch 13, batch 4850, loss[loss=0.2447, simple_loss=0.3291, pruned_loss=0.08019, over 8331.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3031, pruned_loss=0.07427, over 1610170.91 frames. ], batch size: 26, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:08,646 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.589e+02 3.137e+02 3.918e+02 7.572e+02, threshold=6.274e+02, percent-clipped=4.0 +2023-02-06 13:46:14,101 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 13:46:14,946 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:36,888 INFO [train.py:901] (1/4) Epoch 13, batch 4900, loss[loss=0.2033, simple_loss=0.2997, pruned_loss=0.05349, over 8453.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3036, pruned_loss=0.07418, over 1614912.18 frames. ], batch size: 29, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:45,576 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:47:06,381 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101938.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:47:12,292 INFO [train.py:901] (1/4) Epoch 13, batch 4950, loss[loss=0.2376, simple_loss=0.3254, pruned_loss=0.07493, over 8327.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3031, pruned_loss=0.07393, over 1609966.69 frames. ], batch size: 25, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:47:19,104 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.432e+02 3.023e+02 3.670e+02 7.494e+02, threshold=6.046e+02, percent-clipped=3.0 +2023-02-06 13:47:30,006 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6959, 1.4555, 1.6040, 1.3173, 0.8994, 1.3466, 1.4601, 1.4330], + device='cuda:1'), covar=tensor([0.0491, 0.1202, 0.1619, 0.1338, 0.0567, 0.1507, 0.0687, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:47:46,750 INFO [train.py:901] (1/4) Epoch 13, batch 5000, loss[loss=0.2282, simple_loss=0.3085, pruned_loss=0.07397, over 8337.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3021, pruned_loss=0.07363, over 1607733.29 frames. ], batch size: 26, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:47:53,355 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:16,611 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0180, 1.6559, 1.7420, 1.6447, 1.1955, 1.8377, 2.1206, 2.0976], + device='cuda:1'), covar=tensor([0.0391, 0.1214, 0.1634, 0.1321, 0.0583, 0.1429, 0.0613, 0.0534], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0158, 0.0102, 0.0163, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 13:48:22,481 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:23,084 INFO [train.py:901] (1/4) Epoch 13, batch 5050, loss[loss=0.1864, simple_loss=0.2631, pruned_loss=0.05485, over 8038.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3016, pruned_loss=0.07382, over 1606529.74 frames. ], batch size: 20, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:48:29,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.626e+02 3.300e+02 4.185e+02 9.088e+02, threshold=6.599e+02, percent-clipped=3.0 +2023-02-06 13:48:54,021 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 13:48:56,649 INFO [train.py:901] (1/4) Epoch 13, batch 5100, loss[loss=0.2246, simple_loss=0.3131, pruned_loss=0.068, over 7964.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3035, pruned_loss=0.07449, over 1604956.06 frames. ], batch size: 21, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:31,561 INFO [train.py:901] (1/4) Epoch 13, batch 5150, loss[loss=0.1911, simple_loss=0.2693, pruned_loss=0.05643, over 8240.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3048, pruned_loss=0.0755, over 1608391.44 frames. ], batch size: 22, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:38,306 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.413e+02 2.853e+02 3.425e+02 7.647e+02, threshold=5.706e+02, percent-clipped=3.0 +2023-02-06 13:49:41,794 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:50:06,648 INFO [train.py:901] (1/4) Epoch 13, batch 5200, loss[loss=0.2242, simple_loss=0.307, pruned_loss=0.07067, over 8474.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3042, pruned_loss=0.07553, over 1606617.81 frames. ], batch size: 25, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:41,853 INFO [train.py:901] (1/4) Epoch 13, batch 5250, loss[loss=0.2391, simple_loss=0.3277, pruned_loss=0.07527, over 8496.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3046, pruned_loss=0.07538, over 1608237.64 frames. ], batch size: 29, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:48,575 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.565e+02 3.047e+02 3.925e+02 1.157e+03, threshold=6.094e+02, percent-clipped=6.0 +2023-02-06 13:50:51,018 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.79 vs. limit=5.0 +2023-02-06 13:50:53,901 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 13:51:06,839 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:51:16,478 INFO [train.py:901] (1/4) Epoch 13, batch 5300, loss[loss=0.2034, simple_loss=0.2863, pruned_loss=0.06025, over 8602.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3033, pruned_loss=0.07469, over 1606391.99 frames. ], batch size: 31, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:51:51,008 INFO [train.py:901] (1/4) Epoch 13, batch 5350, loss[loss=0.2477, simple_loss=0.3251, pruned_loss=0.08515, over 8506.00 frames. ], tot_loss[loss=0.227, simple_loss=0.304, pruned_loss=0.075, over 1611509.84 frames. ], batch size: 26, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:51:52,483 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:51:52,614 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5955, 2.7796, 1.7947, 2.1871, 2.2138, 1.4933, 2.0296, 2.1930], + device='cuda:1'), covar=tensor([0.1474, 0.0395, 0.1234, 0.0707, 0.0700, 0.1493, 0.1075, 0.1090], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0236, 0.0318, 0.0297, 0.0298, 0.0324, 0.0345, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:51:57,799 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.535e+02 3.049e+02 3.805e+02 7.372e+02, threshold=6.098e+02, percent-clipped=2.0 +2023-02-06 13:52:08,205 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-02-06 13:52:26,065 INFO [train.py:901] (1/4) Epoch 13, batch 5400, loss[loss=0.2384, simple_loss=0.3119, pruned_loss=0.08246, over 8743.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3048, pruned_loss=0.07536, over 1616189.70 frames. ], batch size: 39, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:52:26,257 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:52:40,305 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:52:56,882 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:00,140 INFO [train.py:901] (1/4) Epoch 13, batch 5450, loss[loss=0.2169, simple_loss=0.3041, pruned_loss=0.06489, over 8356.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3056, pruned_loss=0.0758, over 1614639.34 frames. ], batch size: 24, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:07,658 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.724e+02 3.222e+02 3.900e+02 7.023e+02, threshold=6.444e+02, percent-clipped=3.0 +2023-02-06 13:53:12,594 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:34,976 INFO [train.py:901] (1/4) Epoch 13, batch 5500, loss[loss=0.2024, simple_loss=0.2786, pruned_loss=0.06313, over 7534.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3049, pruned_loss=0.07549, over 1612773.96 frames. ], batch size: 18, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:41,586 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 13:54:03,099 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0110, 1.7628, 3.3235, 1.3184, 2.2711, 3.7226, 3.8154, 3.1283], + device='cuda:1'), covar=tensor([0.1117, 0.1473, 0.0350, 0.2328, 0.1014, 0.0214, 0.0397, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0300, 0.0265, 0.0296, 0.0277, 0.0237, 0.0360, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:54:09,472 INFO [train.py:901] (1/4) Epoch 13, batch 5550, loss[loss=0.1678, simple_loss=0.2497, pruned_loss=0.04293, over 7228.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3045, pruned_loss=0.0761, over 1608001.59 frames. ], batch size: 16, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:15,943 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.279e+02 3.010e+02 3.933e+02 6.976e+02, threshold=6.019e+02, percent-clipped=1.0 +2023-02-06 13:54:43,196 INFO [train.py:901] (1/4) Epoch 13, batch 5600, loss[loss=0.202, simple_loss=0.2717, pruned_loss=0.06609, over 7810.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3042, pruned_loss=0.07542, over 1611624.29 frames. ], batch size: 19, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:44,043 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3003, 1.6131, 4.3682, 2.0847, 2.3727, 5.0583, 5.0747, 4.3232], + device='cuda:1'), covar=tensor([0.1103, 0.1651, 0.0273, 0.1939, 0.1112, 0.0195, 0.0346, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0269, 0.0302, 0.0266, 0.0296, 0.0278, 0.0239, 0.0360, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 13:55:04,663 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2566, 2.5714, 2.9102, 1.4749, 2.9047, 1.8874, 1.5811, 2.1077], + device='cuda:1'), covar=tensor([0.0575, 0.0269, 0.0213, 0.0555, 0.0414, 0.0574, 0.0650, 0.0361], + device='cuda:1'), in_proj_covar=tensor([0.0406, 0.0341, 0.0293, 0.0401, 0.0332, 0.0490, 0.0368, 0.0373], + device='cuda:1'), out_proj_covar=tensor([1.1347e-04, 9.3025e-05, 7.9922e-05, 1.1031e-04, 9.1562e-05, 1.4528e-04, + 1.0327e-04, 1.0324e-04], device='cuda:1') +2023-02-06 13:55:18,285 INFO [train.py:901] (1/4) Epoch 13, batch 5650, loss[loss=0.2189, simple_loss=0.2924, pruned_loss=0.07267, over 8240.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3051, pruned_loss=0.07575, over 1611632.84 frames. ], batch size: 22, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:55:22,473 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:24,858 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.730e+02 3.267e+02 4.266e+02 8.129e+02, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 13:55:39,201 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102678.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:55:43,646 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 13:55:52,506 INFO [train.py:901] (1/4) Epoch 13, batch 5700, loss[loss=0.2263, simple_loss=0.2986, pruned_loss=0.07698, over 8563.00 frames. ], tot_loss[loss=0.227, simple_loss=0.304, pruned_loss=0.07501, over 1614597.29 frames. ], batch size: 39, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:08,740 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:25,991 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:27,755 INFO [train.py:901] (1/4) Epoch 13, batch 5750, loss[loss=0.2056, simple_loss=0.2856, pruned_loss=0.06282, over 8246.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.304, pruned_loss=0.07441, over 1614126.42 frames. ], batch size: 22, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:34,417 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 2.514e+02 3.075e+02 4.012e+02 7.214e+02, threshold=6.150e+02, percent-clipped=2.0 +2023-02-06 13:56:45,826 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.77 vs. limit=5.0 +2023-02-06 13:56:47,305 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 13:56:49,609 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5441, 2.1768, 3.2814, 2.5676, 2.8602, 2.3339, 2.0730, 1.7960], + device='cuda:1'), covar=tensor([0.3959, 0.4336, 0.1515, 0.3168, 0.2553, 0.2521, 0.1696, 0.4821], + device='cuda:1'), in_proj_covar=tensor([0.0894, 0.0889, 0.0744, 0.0870, 0.0943, 0.0815, 0.0705, 0.0778], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:56:52,946 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8150, 2.1034, 2.3131, 1.2134, 2.4206, 1.6840, 0.6765, 2.0057], + device='cuda:1'), covar=tensor([0.0477, 0.0243, 0.0181, 0.0509, 0.0243, 0.0633, 0.0680, 0.0229], + device='cuda:1'), in_proj_covar=tensor([0.0407, 0.0342, 0.0295, 0.0403, 0.0334, 0.0492, 0.0369, 0.0374], + device='cuda:1'), out_proj_covar=tensor([1.1393e-04, 9.3301e-05, 8.0550e-05, 1.1094e-04, 9.2128e-05, 1.4583e-04, + 1.0362e-04, 1.0363e-04], device='cuda:1') +2023-02-06 13:57:01,390 INFO [train.py:901] (1/4) Epoch 13, batch 5800, loss[loss=0.2379, simple_loss=0.2899, pruned_loss=0.09301, over 7252.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3035, pruned_loss=0.07399, over 1615695.62 frames. ], batch size: 16, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:36,617 INFO [train.py:901] (1/4) Epoch 13, batch 5850, loss[loss=0.2137, simple_loss=0.2853, pruned_loss=0.07099, over 7800.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3026, pruned_loss=0.07367, over 1615994.23 frames. ], batch size: 20, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:43,168 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.218e+02 2.874e+02 3.517e+02 7.476e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-06 13:57:52,589 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:04,221 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6090, 1.7163, 2.2394, 1.4188, 1.1717, 2.2070, 0.3776, 1.3992], + device='cuda:1'), covar=tensor([0.2813, 0.1626, 0.0422, 0.1848, 0.3834, 0.0432, 0.3001, 0.1763], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0174, 0.0106, 0.0219, 0.0255, 0.0110, 0.0164, 0.0171], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 13:58:11,326 INFO [train.py:901] (1/4) Epoch 13, batch 5900, loss[loss=0.2817, simple_loss=0.3372, pruned_loss=0.1131, over 8122.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.303, pruned_loss=0.07399, over 1617921.81 frames. ], batch size: 22, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:16,843 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:17,966 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 13:58:46,019 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 13:58:46,265 INFO [train.py:901] (1/4) Epoch 13, batch 5950, loss[loss=0.2378, simple_loss=0.3144, pruned_loss=0.08062, over 8021.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3029, pruned_loss=0.07396, over 1616709.15 frames. ], batch size: 22, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:52,891 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.537e+02 3.124e+02 4.010e+02 1.248e+03, threshold=6.247e+02, percent-clipped=9.0 +2023-02-06 13:59:01,045 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6139, 1.6691, 2.2862, 1.5704, 0.9962, 2.1909, 0.4121, 1.3873], + device='cuda:1'), covar=tensor([0.2508, 0.1563, 0.0447, 0.1791, 0.3808, 0.0491, 0.2935, 0.1725], + device='cuda:1'), in_proj_covar=tensor([0.0173, 0.0175, 0.0107, 0.0221, 0.0256, 0.0111, 0.0166, 0.0172], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 13:59:21,490 INFO [train.py:901] (1/4) Epoch 13, batch 6000, loss[loss=0.1955, simple_loss=0.2855, pruned_loss=0.0528, over 8233.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3041, pruned_loss=0.07478, over 1621243.01 frames. ], batch size: 24, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:59:21,490 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 13:59:30,390 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8459, 1.0161, 2.9810, 1.0743, 2.5896, 2.4539, 2.6255, 2.5370], + device='cuda:1'), covar=tensor([0.0472, 0.3475, 0.0383, 0.3206, 0.0910, 0.0749, 0.0471, 0.0618], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0581, 0.0604, 0.0552, 0.0624, 0.0529, 0.0524, 0.0582], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 13:59:36,606 INFO [train.py:935] (1/4) Epoch 13, validation: loss=0.1836, simple_loss=0.2836, pruned_loss=0.04176, over 944034.00 frames. +2023-02-06 13:59:36,607 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 14:00:11,030 INFO [train.py:901] (1/4) Epoch 13, batch 6050, loss[loss=0.192, simple_loss=0.2732, pruned_loss=0.05543, over 7797.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3024, pruned_loss=0.07382, over 1617379.92 frames. ], batch size: 19, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:18,306 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.480e+02 3.014e+02 3.999e+02 8.436e+02, threshold=6.027e+02, percent-clipped=4.0 +2023-02-06 14:00:25,996 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6739, 1.8056, 1.6763, 2.3433, 1.1820, 1.4549, 1.6940, 1.9414], + device='cuda:1'), covar=tensor([0.0721, 0.0802, 0.0899, 0.0407, 0.1011, 0.1274, 0.0735, 0.0668], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0209, 0.0252, 0.0211, 0.0216, 0.0251, 0.0256, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:00:45,637 INFO [train.py:901] (1/4) Epoch 13, batch 6100, loss[loss=0.2628, simple_loss=0.3391, pruned_loss=0.09328, over 8367.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.304, pruned_loss=0.07437, over 1617006.31 frames. ], batch size: 49, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:58,586 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103116.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:14,163 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 14:01:19,578 INFO [train.py:901] (1/4) Epoch 13, batch 6150, loss[loss=0.1956, simple_loss=0.2728, pruned_loss=0.05921, over 7929.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3036, pruned_loss=0.07443, over 1618252.01 frames. ], batch size: 20, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:01:21,559 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:26,205 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.434e+02 3.117e+02 4.172e+02 7.466e+02, threshold=6.235e+02, percent-clipped=2.0 +2023-02-06 14:01:46,186 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6754, 1.5788, 2.2026, 1.2340, 1.8259, 2.4129, 2.3704, 2.1247], + device='cuda:1'), covar=tensor([0.0798, 0.1082, 0.0641, 0.1697, 0.1230, 0.0308, 0.0780, 0.0547], + device='cuda:1'), in_proj_covar=tensor([0.0268, 0.0303, 0.0265, 0.0297, 0.0278, 0.0239, 0.0361, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:01:51,050 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4909, 1.8152, 4.4264, 1.9393, 2.3672, 5.0597, 5.0814, 4.4041], + device='cuda:1'), covar=tensor([0.1038, 0.1511, 0.0280, 0.1918, 0.1177, 0.0168, 0.0404, 0.0528], + device='cuda:1'), in_proj_covar=tensor([0.0267, 0.0301, 0.0264, 0.0296, 0.0277, 0.0239, 0.0359, 0.0294], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:01:55,081 INFO [train.py:901] (1/4) Epoch 13, batch 6200, loss[loss=0.2595, simple_loss=0.3376, pruned_loss=0.09066, over 8508.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3043, pruned_loss=0.07516, over 1617944.44 frames. ], batch size: 26, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:06,145 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:31,003 INFO [train.py:901] (1/4) Epoch 13, batch 6250, loss[loss=0.2751, simple_loss=0.3394, pruned_loss=0.1054, over 6622.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3032, pruned_loss=0.07501, over 1611738.03 frames. ], batch size: 71, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:32,368 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103249.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:37,850 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.482e+02 2.950e+02 3.630e+02 6.819e+02, threshold=5.900e+02, percent-clipped=4.0 +2023-02-06 14:03:06,048 INFO [train.py:901] (1/4) Epoch 13, batch 6300, loss[loss=0.2327, simple_loss=0.3137, pruned_loss=0.07586, over 8094.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3036, pruned_loss=0.07461, over 1615179.96 frames. ], batch size: 21, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:27,932 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:03:40,543 INFO [train.py:901] (1/4) Epoch 13, batch 6350, loss[loss=0.1863, simple_loss=0.2658, pruned_loss=0.05336, over 7777.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.305, pruned_loss=0.0757, over 1616704.81 frames. ], batch size: 19, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:48,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.547e+02 3.093e+02 3.716e+02 8.603e+02, threshold=6.185e+02, percent-clipped=3.0 +2023-02-06 14:03:52,991 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:04:02,165 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3838, 4.3382, 3.9687, 1.6962, 3.9419, 3.9485, 4.0660, 3.5600], + device='cuda:1'), covar=tensor([0.0793, 0.0595, 0.1055, 0.5221, 0.0844, 0.0877, 0.1118, 0.1003], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0386, 0.0395, 0.0491, 0.0390, 0.0388, 0.0380, 0.0340], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:04:06,343 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103384.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:04:11,086 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9697, 2.2509, 1.8889, 2.7645, 1.2699, 1.5918, 1.8914, 2.2349], + device='cuda:1'), covar=tensor([0.0737, 0.0738, 0.0880, 0.0337, 0.1207, 0.1409, 0.0917, 0.0682], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0208, 0.0252, 0.0212, 0.0216, 0.0252, 0.0256, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:04:14,877 INFO [train.py:901] (1/4) Epoch 13, batch 6400, loss[loss=0.2598, simple_loss=0.3157, pruned_loss=0.102, over 7919.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3051, pruned_loss=0.07579, over 1613534.99 frames. ], batch size: 20, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:36,360 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7991, 1.9951, 2.3125, 1.5292, 2.3556, 1.6402, 0.8105, 1.9605], + device='cuda:1'), covar=tensor([0.0443, 0.0243, 0.0176, 0.0367, 0.0292, 0.0595, 0.0588, 0.0216], + device='cuda:1'), in_proj_covar=tensor([0.0407, 0.0341, 0.0293, 0.0402, 0.0333, 0.0491, 0.0365, 0.0370], + device='cuda:1'), out_proj_covar=tensor([1.1384e-04, 9.3002e-05, 7.9762e-05, 1.1053e-04, 9.1891e-05, 1.4560e-04, + 1.0235e-04, 1.0221e-04], device='cuda:1') +2023-02-06 14:04:49,452 INFO [train.py:901] (1/4) Epoch 13, batch 6450, loss[loss=0.3192, simple_loss=0.3779, pruned_loss=0.1303, over 7096.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.304, pruned_loss=0.07567, over 1609893.69 frames. ], batch size: 74, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:56,176 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.528e+02 3.186e+02 3.863e+02 6.544e+02, threshold=6.372e+02, percent-clipped=1.0 +2023-02-06 14:04:58,239 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:22,249 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:24,240 INFO [train.py:901] (1/4) Epoch 13, batch 6500, loss[loss=0.2008, simple_loss=0.2896, pruned_loss=0.05599, over 8331.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3037, pruned_loss=0.07462, over 1614106.17 frames. ], batch size: 25, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:05:30,977 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6463, 1.9122, 2.1609, 1.3052, 2.2481, 1.4486, 0.6415, 1.8771], + device='cuda:1'), covar=tensor([0.0497, 0.0298, 0.0207, 0.0447, 0.0284, 0.0738, 0.0666, 0.0220], + device='cuda:1'), in_proj_covar=tensor([0.0410, 0.0343, 0.0295, 0.0404, 0.0333, 0.0494, 0.0367, 0.0372], + device='cuda:1'), out_proj_covar=tensor([1.1472e-04, 9.3462e-05, 8.0153e-05, 1.1115e-04, 9.1904e-05, 1.4647e-04, + 1.0306e-04, 1.0281e-04], device='cuda:1') +2023-02-06 14:05:32,164 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6819, 1.3071, 1.5687, 1.2443, 0.9085, 1.3607, 1.4652, 1.3977], + device='cuda:1'), covar=tensor([0.0542, 0.1308, 0.1700, 0.1481, 0.0595, 0.1513, 0.0698, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0155, 0.0100, 0.0161, 0.0113, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 14:05:58,666 INFO [train.py:901] (1/4) Epoch 13, batch 6550, loss[loss=0.176, simple_loss=0.2592, pruned_loss=0.04644, over 7975.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3041, pruned_loss=0.07448, over 1616088.10 frames. ], batch size: 21, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:05,488 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.442e+02 3.089e+02 4.027e+02 9.292e+02, threshold=6.177e+02, percent-clipped=8.0 +2023-02-06 14:06:18,304 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103575.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:24,184 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 14:06:24,389 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:34,121 INFO [train.py:901] (1/4) Epoch 13, batch 6600, loss[loss=0.1976, simple_loss=0.2703, pruned_loss=0.06246, over 7530.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3032, pruned_loss=0.07426, over 1613306.88 frames. ], batch size: 18, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:42,481 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:42,507 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:43,685 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 14:06:49,823 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:07,869 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:09,058 INFO [train.py:901] (1/4) Epoch 13, batch 6650, loss[loss=0.2411, simple_loss=0.3093, pruned_loss=0.08645, over 7812.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.304, pruned_loss=0.07457, over 1611676.88 frames. ], batch size: 20, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:07:16,597 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.459e+02 2.800e+02 3.637e+02 6.016e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-06 14:07:38,061 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4329, 2.6167, 1.8429, 2.1709, 2.1553, 1.4925, 1.9926, 2.0275], + device='cuda:1'), covar=tensor([0.1396, 0.0331, 0.0973, 0.0662, 0.0681, 0.1426, 0.0897, 0.0991], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0236, 0.0321, 0.0298, 0.0298, 0.0326, 0.0343, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:07:43,986 INFO [train.py:901] (1/4) Epoch 13, batch 6700, loss[loss=0.2771, simple_loss=0.3324, pruned_loss=0.111, over 6817.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3035, pruned_loss=0.07424, over 1610721.83 frames. ], batch size: 15, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:07:54,502 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9846, 2.3485, 1.8198, 3.0278, 1.3705, 1.7007, 2.0000, 2.3629], + device='cuda:1'), covar=tensor([0.0800, 0.0851, 0.1047, 0.0338, 0.1225, 0.1428, 0.0921, 0.0801], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0205, 0.0250, 0.0209, 0.0214, 0.0249, 0.0253, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:08:06,482 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:08:19,449 INFO [train.py:901] (1/4) Epoch 13, batch 6750, loss[loss=0.2439, simple_loss=0.3249, pruned_loss=0.08142, over 8475.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3035, pruned_loss=0.07436, over 1613693.00 frames. ], batch size: 28, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:26,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.551e+02 3.234e+02 3.983e+02 1.044e+03, threshold=6.469e+02, percent-clipped=6.0 +2023-02-06 14:08:26,993 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:44,329 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:53,553 INFO [train.py:901] (1/4) Epoch 13, batch 6800, loss[loss=0.2687, simple_loss=0.3525, pruned_loss=0.09242, over 8487.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3054, pruned_loss=0.07581, over 1613003.67 frames. ], batch size: 28, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:56,499 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9889, 2.1443, 1.7376, 2.5871, 1.2581, 1.6266, 1.7631, 2.1174], + device='cuda:1'), covar=tensor([0.0696, 0.0778, 0.0886, 0.0475, 0.1165, 0.1227, 0.0922, 0.0829], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0205, 0.0249, 0.0208, 0.0212, 0.0248, 0.0252, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:08:58,345 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 14:09:17,173 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103831.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:25,225 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103843.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:09:27,548 INFO [train.py:901] (1/4) Epoch 13, batch 6850, loss[loss=0.2452, simple_loss=0.3357, pruned_loss=0.07735, over 8605.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.305, pruned_loss=0.07521, over 1612265.08 frames. ], batch size: 31, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:09:33,737 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:34,148 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.670e+02 3.153e+02 3.957e+02 9.275e+02, threshold=6.306e+02, percent-clipped=2.0 +2023-02-06 14:09:40,247 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:44,812 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 14:09:51,041 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0260, 4.0290, 2.7405, 2.9161, 2.9276, 2.3365, 3.0129, 3.2046], + device='cuda:1'), covar=tensor([0.1687, 0.0352, 0.0834, 0.0684, 0.0656, 0.1236, 0.0898, 0.1066], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0234, 0.0318, 0.0295, 0.0298, 0.0324, 0.0340, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:09:57,527 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:00,233 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103894.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:02,103 INFO [train.py:901] (1/4) Epoch 13, batch 6900, loss[loss=0.1866, simple_loss=0.2741, pruned_loss=0.04961, over 7809.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3046, pruned_loss=0.07514, over 1610967.94 frames. ], batch size: 20, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:35,828 INFO [train.py:901] (1/4) Epoch 13, batch 6950, loss[loss=0.184, simple_loss=0.2558, pruned_loss=0.05606, over 7265.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3037, pruned_loss=0.07428, over 1612360.16 frames. ], batch size: 16, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:43,024 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.539e+02 3.074e+02 3.917e+02 9.810e+02, threshold=6.147e+02, percent-clipped=9.0 +2023-02-06 14:10:53,178 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 14:11:09,724 INFO [train.py:901] (1/4) Epoch 13, batch 7000, loss[loss=0.2328, simple_loss=0.3136, pruned_loss=0.07606, over 8291.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.303, pruned_loss=0.07413, over 1611955.82 frames. ], batch size: 23, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:18,192 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:23,553 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:39,557 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8401, 1.3894, 5.9069, 2.0986, 5.3379, 5.0311, 5.4906, 5.3996], + device='cuda:1'), covar=tensor([0.0474, 0.4770, 0.0379, 0.3469, 0.0944, 0.0711, 0.0417, 0.0520], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0589, 0.0613, 0.0560, 0.0635, 0.0538, 0.0530, 0.0595], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 14:11:44,820 INFO [train.py:901] (1/4) Epoch 13, batch 7050, loss[loss=0.2341, simple_loss=0.3156, pruned_loss=0.07626, over 8458.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3036, pruned_loss=0.07452, over 1611346.67 frames. ], batch size: 27, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:51,571 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1868, 4.1097, 3.7686, 1.8833, 3.7301, 3.7817, 3.7930, 3.5607], + device='cuda:1'), covar=tensor([0.0708, 0.0621, 0.1031, 0.4338, 0.0839, 0.0835, 0.1317, 0.0791], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0389, 0.0400, 0.0491, 0.0393, 0.0393, 0.0385, 0.0342], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:11:52,789 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.385e+02 2.879e+02 3.637e+02 6.044e+02, threshold=5.759e+02, percent-clipped=0.0 +2023-02-06 14:11:58,855 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:18,829 INFO [train.py:901] (1/4) Epoch 13, batch 7100, loss[loss=0.2103, simple_loss=0.2949, pruned_loss=0.06288, over 8329.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3047, pruned_loss=0.07474, over 1612168.72 frames. ], batch size: 25, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:12:21,108 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104099.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:12:22,946 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:37,506 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104124.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:12:39,433 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:45,125 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8414, 1.7509, 2.5431, 1.7767, 1.3940, 2.5535, 0.6404, 1.4691], + device='cuda:1'), covar=tensor([0.2592, 0.1410, 0.0327, 0.1784, 0.3191, 0.0348, 0.2653, 0.1713], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0175, 0.0107, 0.0220, 0.0255, 0.0110, 0.0164, 0.0171], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 14:12:52,701 INFO [train.py:901] (1/4) Epoch 13, batch 7150, loss[loss=0.2367, simple_loss=0.3283, pruned_loss=0.07256, over 8342.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3048, pruned_loss=0.07474, over 1614169.87 frames. ], batch size: 26, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:00,089 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.541e+02 2.991e+02 4.071e+02 7.912e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 14:13:16,456 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6012, 4.5992, 4.1487, 2.2819, 4.0964, 4.3742, 4.2217, 4.0912], + device='cuda:1'), covar=tensor([0.0681, 0.0482, 0.0920, 0.4085, 0.0770, 0.0851, 0.1154, 0.0742], + device='cuda:1'), in_proj_covar=tensor([0.0476, 0.0391, 0.0404, 0.0493, 0.0394, 0.0395, 0.0388, 0.0344], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:13:27,756 INFO [train.py:901] (1/4) Epoch 13, batch 7200, loss[loss=0.2185, simple_loss=0.303, pruned_loss=0.06699, over 8532.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3042, pruned_loss=0.07451, over 1615503.22 frames. ], batch size: 28, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:42,128 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:55,690 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:56,859 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 14:13:58,506 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:14:01,688 INFO [train.py:901] (1/4) Epoch 13, batch 7250, loss[loss=0.2751, simple_loss=0.3254, pruned_loss=0.1124, over 7120.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3052, pruned_loss=0.07555, over 1612730.58 frames. ], batch size: 71, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:09,625 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.469e+02 3.063e+02 3.939e+02 8.277e+02, threshold=6.126e+02, percent-clipped=7.0 +2023-02-06 14:14:37,316 INFO [train.py:901] (1/4) Epoch 13, batch 7300, loss[loss=0.2621, simple_loss=0.3364, pruned_loss=0.09392, over 8339.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.305, pruned_loss=0.07502, over 1610109.10 frames. ], batch size: 26, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:38,126 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1017, 1.6455, 1.6997, 1.5474, 0.8465, 1.5148, 1.7270, 1.5707], + device='cuda:1'), covar=tensor([0.0484, 0.1191, 0.1672, 0.1313, 0.0635, 0.1499, 0.0701, 0.0634], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 14:15:11,542 INFO [train.py:901] (1/4) Epoch 13, batch 7350, loss[loss=0.2003, simple_loss=0.2801, pruned_loss=0.06022, over 7649.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3036, pruned_loss=0.07391, over 1608934.93 frames. ], batch size: 19, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:14,979 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:15,793 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:19,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.439e+02 3.043e+02 3.823e+02 6.373e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 14:15:19,892 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:33,234 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 14:15:46,158 INFO [train.py:901] (1/4) Epoch 13, batch 7400, loss[loss=0.2551, simple_loss=0.3373, pruned_loss=0.0864, over 8452.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3049, pruned_loss=0.07465, over 1611586.02 frames. ], batch size: 29, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:53,245 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 14:15:56,725 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:21,099 INFO [train.py:901] (1/4) Epoch 13, batch 7450, loss[loss=0.225, simple_loss=0.302, pruned_loss=0.07402, over 8333.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3036, pruned_loss=0.07424, over 1612570.36 frames. ], batch size: 25, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:16:29,260 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 2.494e+02 3.000e+02 3.814e+02 1.100e+03, threshold=5.999e+02, percent-clipped=4.0 +2023-02-06 14:16:33,244 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 14:16:35,445 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:39,565 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:40,155 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:48,678 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4991, 2.9039, 1.7808, 2.2095, 2.2294, 1.6217, 2.1082, 2.2376], + device='cuda:1'), covar=tensor([0.1330, 0.0248, 0.1060, 0.0649, 0.0711, 0.1267, 0.0861, 0.0911], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0237, 0.0322, 0.0297, 0.0301, 0.0322, 0.0342, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:16:55,884 INFO [train.py:901] (1/4) Epoch 13, batch 7500, loss[loss=0.214, simple_loss=0.2959, pruned_loss=0.06605, over 8329.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3023, pruned_loss=0.0739, over 1612790.22 frames. ], batch size: 25, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:16:56,750 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:56,770 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:57,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1862, 2.3188, 2.0109, 2.9944, 1.4132, 1.6270, 2.1054, 2.5404], + device='cuda:1'), covar=tensor([0.0715, 0.0934, 0.0946, 0.0312, 0.1198, 0.1484, 0.1028, 0.0749], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0209, 0.0253, 0.0210, 0.0215, 0.0253, 0.0256, 0.0216], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:17:14,590 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:16,624 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:25,703 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 14:17:28,999 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 14:17:30,657 INFO [train.py:901] (1/4) Epoch 13, batch 7550, loss[loss=0.2832, simple_loss=0.3465, pruned_loss=0.11, over 8477.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3023, pruned_loss=0.07419, over 1615460.61 frames. ], batch size: 25, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:17:37,882 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.482e+02 3.042e+02 4.105e+02 9.709e+02, threshold=6.085e+02, percent-clipped=7.0 +2023-02-06 14:18:05,094 INFO [train.py:901] (1/4) Epoch 13, batch 7600, loss[loss=0.2569, simple_loss=0.3299, pruned_loss=0.09198, over 8665.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3035, pruned_loss=0.07511, over 1615289.11 frames. ], batch size: 34, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:13,224 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:30,775 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:40,157 INFO [train.py:901] (1/4) Epoch 13, batch 7650, loss[loss=0.2851, simple_loss=0.3481, pruned_loss=0.111, over 8596.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3043, pruned_loss=0.07551, over 1612597.84 frames. ], batch size: 34, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:47,611 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 2.638e+02 3.280e+02 4.340e+02 1.130e+03, threshold=6.560e+02, percent-clipped=9.0 +2023-02-06 14:19:14,840 INFO [train.py:901] (1/4) Epoch 13, batch 7700, loss[loss=0.2249, simple_loss=0.3051, pruned_loss=0.07238, over 8460.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3041, pruned_loss=0.07482, over 1616675.52 frames. ], batch size: 25, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:33,199 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:37,759 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 14:19:37,981 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:49,822 INFO [train.py:901] (1/4) Epoch 13, batch 7750, loss[loss=0.2378, simple_loss=0.3207, pruned_loss=0.07747, over 8242.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3027, pruned_loss=0.0737, over 1617076.12 frames. ], batch size: 22, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:50,599 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104748.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:55,981 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:57,814 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.530e+02 2.944e+02 3.392e+02 9.198e+02, threshold=5.888e+02, percent-clipped=3.0 +2023-02-06 14:20:14,007 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:24,829 INFO [train.py:901] (1/4) Epoch 13, batch 7800, loss[loss=0.2052, simple_loss=0.2784, pruned_loss=0.06604, over 7193.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3016, pruned_loss=0.07272, over 1614752.14 frames. ], batch size: 16, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:20:31,892 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:57,827 INFO [train.py:901] (1/4) Epoch 13, batch 7850, loss[loss=0.2086, simple_loss=0.2982, pruned_loss=0.05951, over 8334.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3005, pruned_loss=0.07251, over 1612943.72 frames. ], batch size: 25, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:05,226 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.619e+02 3.074e+02 4.074e+02 1.012e+03, threshold=6.148e+02, percent-clipped=5.0 +2023-02-06 14:21:30,904 INFO [train.py:901] (1/4) Epoch 13, batch 7900, loss[loss=0.1958, simple_loss=0.2786, pruned_loss=0.05649, over 8242.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3008, pruned_loss=0.07241, over 1616187.71 frames. ], batch size: 22, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:46,719 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 14:22:04,224 INFO [train.py:901] (1/4) Epoch 13, batch 7950, loss[loss=0.2752, simple_loss=0.3392, pruned_loss=0.1056, over 8532.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3021, pruned_loss=0.07308, over 1617901.23 frames. ], batch size: 39, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:11,310 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.521e+02 3.027e+02 3.866e+02 6.555e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 14:22:20,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6383, 1.4659, 4.8953, 1.7922, 4.3113, 4.0358, 4.3884, 4.2646], + device='cuda:1'), covar=tensor([0.0485, 0.4243, 0.0365, 0.3405, 0.0905, 0.0738, 0.0484, 0.0571], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0586, 0.0609, 0.0553, 0.0634, 0.0539, 0.0527, 0.0592], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 14:22:37,769 INFO [train.py:901] (1/4) Epoch 13, batch 8000, loss[loss=0.2305, simple_loss=0.3078, pruned_loss=0.07664, over 8259.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3022, pruned_loss=0.0734, over 1618746.97 frames. ], batch size: 24, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:49,399 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 14:23:10,573 INFO [train.py:901] (1/4) Epoch 13, batch 8050, loss[loss=0.199, simple_loss=0.2772, pruned_loss=0.06035, over 7920.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.302, pruned_loss=0.07452, over 1604250.89 frames. ], batch size: 20, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:18,080 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.417e+02 2.946e+02 3.621e+02 6.025e+02, threshold=5.892e+02, percent-clipped=0.0 +2023-02-06 14:23:50,380 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 14:23:54,150 INFO [train.py:901] (1/4) Epoch 14, batch 0, loss[loss=0.233, simple_loss=0.3125, pruned_loss=0.07672, over 8514.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3125, pruned_loss=0.07672, over 8514.00 frames. ], batch size: 26, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:23:54,150 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 14:24:01,446 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3024, 1.4680, 2.2207, 1.0707, 1.6859, 1.5601, 1.3112, 1.6648], + device='cuda:1'), covar=tensor([0.1569, 0.2373, 0.0676, 0.3839, 0.1545, 0.2718, 0.1901, 0.1817], + device='cuda:1'), in_proj_covar=tensor([0.0499, 0.0545, 0.0539, 0.0600, 0.0623, 0.0566, 0.0493, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:24:05,201 INFO [train.py:935] (1/4) Epoch 14, validation: loss=0.184, simple_loss=0.2839, pruned_loss=0.04201, over 944034.00 frames. +2023-02-06 14:24:05,202 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 14:24:16,398 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 14:24:21,242 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 14:24:22,037 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.1102, 1.7713, 6.1070, 2.1963, 5.5557, 5.2153, 5.7232, 5.6111], + device='cuda:1'), covar=tensor([0.0383, 0.4288, 0.0313, 0.3153, 0.0825, 0.0698, 0.0378, 0.0417], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0580, 0.0608, 0.0551, 0.0632, 0.0536, 0.0523, 0.0589], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 14:24:38,586 INFO [train.py:901] (1/4) Epoch 14, batch 50, loss[loss=0.2062, simple_loss=0.274, pruned_loss=0.06918, over 7226.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.306, pruned_loss=0.07744, over 362828.66 frames. ], batch size: 16, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:24:54,214 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9785, 1.5214, 3.3754, 1.4167, 2.1604, 3.7332, 3.8094, 3.1415], + device='cuda:1'), covar=tensor([0.1094, 0.1640, 0.0326, 0.2056, 0.1071, 0.0219, 0.0440, 0.0592], + device='cuda:1'), in_proj_covar=tensor([0.0272, 0.0304, 0.0265, 0.0295, 0.0281, 0.0241, 0.0362, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:24:54,762 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 14:24:58,151 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.684e+02 3.092e+02 3.835e+02 7.852e+02, threshold=6.183e+02, percent-clipped=3.0 +2023-02-06 14:25:02,497 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6063, 1.9684, 3.2762, 1.3832, 2.2449, 2.0726, 1.7088, 2.2644], + device='cuda:1'), covar=tensor([0.1703, 0.2291, 0.0667, 0.4002, 0.1716, 0.2898, 0.1960, 0.2076], + device='cuda:1'), in_proj_covar=tensor([0.0498, 0.0541, 0.0535, 0.0597, 0.0623, 0.0565, 0.0490, 0.0615], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:25:14,421 INFO [train.py:901] (1/4) Epoch 14, batch 100, loss[loss=0.2409, simple_loss=0.3137, pruned_loss=0.08406, over 8588.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3072, pruned_loss=0.07637, over 645941.16 frames. ], batch size: 34, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:25:17,790 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 14:25:48,643 INFO [train.py:901] (1/4) Epoch 14, batch 150, loss[loss=0.1746, simple_loss=0.244, pruned_loss=0.0526, over 7432.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07569, over 855095.22 frames. ], batch size: 17, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:08,291 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.384e+02 2.990e+02 3.742e+02 5.781e+02, threshold=5.980e+02, percent-clipped=0.0 +2023-02-06 14:26:14,013 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 14:26:22,055 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 14:26:23,055 INFO [train.py:901] (1/4) Epoch 14, batch 200, loss[loss=0.1783, simple_loss=0.2587, pruned_loss=0.04898, over 8151.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3053, pruned_loss=0.07533, over 1029338.56 frames. ], batch size: 22, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:44,064 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 14:26:47,400 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1924, 1.3000, 1.5792, 1.2285, 0.7354, 1.3957, 1.2066, 0.9124], + device='cuda:1'), covar=tensor([0.0548, 0.1205, 0.1605, 0.1399, 0.0555, 0.1423, 0.0648, 0.0711], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0150, 0.0188, 0.0155, 0.0100, 0.0160, 0.0112, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 14:26:58,947 INFO [train.py:901] (1/4) Epoch 14, batch 250, loss[loss=0.2038, simple_loss=0.2886, pruned_loss=0.05952, over 7646.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07495, over 1156875.40 frames. ], batch size: 19, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:07,611 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 14:27:15,945 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 14:27:18,051 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.546e+02 3.157e+02 4.204e+02 9.163e+02, threshold=6.313e+02, percent-clipped=6.0 +2023-02-06 14:27:31,870 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7758, 1.9105, 2.1584, 1.3780, 2.2536, 1.5188, 0.6931, 1.8973], + device='cuda:1'), covar=tensor([0.0406, 0.0260, 0.0168, 0.0397, 0.0255, 0.0647, 0.0626, 0.0209], + device='cuda:1'), in_proj_covar=tensor([0.0406, 0.0344, 0.0296, 0.0400, 0.0330, 0.0488, 0.0365, 0.0372], + device='cuda:1'), out_proj_covar=tensor([1.1341e-04, 9.3573e-05, 8.0427e-05, 1.0972e-04, 9.0763e-05, 1.4409e-04, + 1.0227e-04, 1.0270e-04], device='cuda:1') +2023-02-06 14:27:33,664 INFO [train.py:901] (1/4) Epoch 14, batch 300, loss[loss=0.2402, simple_loss=0.3197, pruned_loss=0.08037, over 8547.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3058, pruned_loss=0.0762, over 1255229.59 frames. ], batch size: 31, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:52,817 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:28:00,866 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7035, 1.7422, 1.5637, 1.9939, 1.3317, 1.4824, 1.6765, 1.9042], + device='cuda:1'), covar=tensor([0.0627, 0.0724, 0.0776, 0.0565, 0.0975, 0.1037, 0.0673, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0237, 0.0210, 0.0255, 0.0214, 0.0215, 0.0256, 0.0261, 0.0218], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:28:06,568 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 14:28:09,631 INFO [train.py:901] (1/4) Epoch 14, batch 350, loss[loss=0.2295, simple_loss=0.3132, pruned_loss=0.07289, over 8820.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.305, pruned_loss=0.07504, over 1337775.00 frames. ], batch size: 39, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:28:28,606 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 2.437e+02 2.818e+02 3.446e+02 5.751e+02, threshold=5.636e+02, percent-clipped=0.0 +2023-02-06 14:28:43,600 INFO [train.py:901] (1/4) Epoch 14, batch 400, loss[loss=0.2132, simple_loss=0.2739, pruned_loss=0.07625, over 7406.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3041, pruned_loss=0.07423, over 1398194.16 frames. ], batch size: 17, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:00,989 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:09,894 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5470, 2.7884, 1.9003, 2.1918, 2.2964, 1.5642, 2.0387, 2.1020], + device='cuda:1'), covar=tensor([0.1481, 0.0354, 0.1031, 0.0648, 0.0684, 0.1419, 0.0942, 0.1100], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0240, 0.0322, 0.0299, 0.0303, 0.0325, 0.0343, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:29:13,242 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:20,742 INFO [train.py:901] (1/4) Epoch 14, batch 450, loss[loss=0.1835, simple_loss=0.271, pruned_loss=0.04799, over 8238.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3033, pruned_loss=0.07376, over 1450177.27 frames. ], batch size: 22, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:40,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.497e+02 2.804e+02 3.770e+02 6.336e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 14:29:55,212 INFO [train.py:901] (1/4) Epoch 14, batch 500, loss[loss=0.2206, simple_loss=0.2977, pruned_loss=0.07174, over 8615.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3034, pruned_loss=0.07367, over 1490785.12 frames. ], batch size: 34, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:29,387 INFO [train.py:901] (1/4) Epoch 14, batch 550, loss[loss=0.2523, simple_loss=0.3207, pruned_loss=0.09197, over 7990.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3031, pruned_loss=0.0743, over 1517702.56 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:50,299 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.442e+02 2.933e+02 3.700e+02 8.163e+02, threshold=5.867e+02, percent-clipped=3.0 +2023-02-06 14:31:05,192 INFO [train.py:901] (1/4) Epoch 14, batch 600, loss[loss=0.2202, simple_loss=0.2993, pruned_loss=0.07057, over 8239.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3029, pruned_loss=0.07371, over 1537580.09 frames. ], batch size: 22, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:18,452 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 14:31:39,813 INFO [train.py:901] (1/4) Epoch 14, batch 650, loss[loss=0.2408, simple_loss=0.3064, pruned_loss=0.08759, over 7650.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3021, pruned_loss=0.07287, over 1556194.93 frames. ], batch size: 19, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:54,386 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:01,339 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.402e+02 3.000e+02 3.711e+02 7.109e+02, threshold=6.000e+02, percent-clipped=4.0 +2023-02-06 14:32:17,053 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.05 vs. limit=5.0 +2023-02-06 14:32:17,354 INFO [train.py:901] (1/4) Epoch 14, batch 700, loss[loss=0.2255, simple_loss=0.3048, pruned_loss=0.07306, over 8483.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.302, pruned_loss=0.0727, over 1571424.09 frames. ], batch size: 26, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:37,865 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:42,735 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105817.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:51,384 INFO [train.py:901] (1/4) Epoch 14, batch 750, loss[loss=0.2318, simple_loss=0.3071, pruned_loss=0.07819, over 8087.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3019, pruned_loss=0.07331, over 1583720.85 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:51,591 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4877, 2.8506, 1.8330, 2.1555, 2.1165, 1.5774, 1.8796, 2.3504], + device='cuda:1'), covar=tensor([0.1694, 0.0484, 0.1318, 0.0830, 0.0913, 0.1657, 0.1453, 0.0981], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0236, 0.0318, 0.0295, 0.0299, 0.0320, 0.0337, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:33:03,871 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:06,441 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 14:33:11,302 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.459e+02 2.898e+02 3.725e+02 7.154e+02, threshold=5.796e+02, percent-clipped=4.0 +2023-02-06 14:33:15,486 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:16,040 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 14:33:16,238 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:27,181 INFO [train.py:901] (1/4) Epoch 14, batch 800, loss[loss=0.223, simple_loss=0.3105, pruned_loss=0.0678, over 8467.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3016, pruned_loss=0.07299, over 1590389.87 frames. ], batch size: 29, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:02,180 INFO [train.py:901] (1/4) Epoch 14, batch 850, loss[loss=0.1704, simple_loss=0.2481, pruned_loss=0.04633, over 7976.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3005, pruned_loss=0.07249, over 1594180.84 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:20,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.477e+02 2.961e+02 4.061e+02 6.411e+02, threshold=5.921e+02, percent-clipped=4.0 +2023-02-06 14:34:24,609 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:36,585 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:37,077 INFO [train.py:901] (1/4) Epoch 14, batch 900, loss[loss=0.1814, simple_loss=0.2539, pruned_loss=0.05446, over 7538.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3004, pruned_loss=0.07236, over 1596793.22 frames. ], batch size: 18, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:03,393 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1808, 1.0435, 1.2460, 1.1029, 0.9742, 1.3143, 0.0262, 0.9547], + device='cuda:1'), covar=tensor([0.2172, 0.1594, 0.0620, 0.1048, 0.3256, 0.0569, 0.2852, 0.1615], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0177, 0.0108, 0.0220, 0.0256, 0.0111, 0.0164, 0.0174], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 14:35:14,900 INFO [train.py:901] (1/4) Epoch 14, batch 950, loss[loss=0.2176, simple_loss=0.3019, pruned_loss=0.06666, over 8190.00 frames. ], tot_loss[loss=0.224, simple_loss=0.302, pruned_loss=0.07303, over 1604727.31 frames. ], batch size: 23, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:33,973 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.617e+02 3.202e+02 4.119e+02 6.844e+02, threshold=6.403e+02, percent-clipped=3.0 +2023-02-06 14:35:38,929 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 14:35:49,306 INFO [train.py:901] (1/4) Epoch 14, batch 1000, loss[loss=0.2385, simple_loss=0.3137, pruned_loss=0.08164, over 7983.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3018, pruned_loss=0.07319, over 1608731.87 frames. ], batch size: 21, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:00,603 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:14,292 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 14:36:20,062 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:26,961 INFO [train.py:901] (1/4) Epoch 14, batch 1050, loss[loss=0.1939, simple_loss=0.262, pruned_loss=0.06286, over 7707.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3017, pruned_loss=0.07305, over 1608683.58 frames. ], batch size: 18, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:26,975 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 14:36:37,960 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:43,490 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:46,240 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.417e+02 2.951e+02 3.593e+02 9.096e+02, threshold=5.903e+02, percent-clipped=2.0 +2023-02-06 14:36:48,433 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:01,608 INFO [train.py:901] (1/4) Epoch 14, batch 1100, loss[loss=0.1858, simple_loss=0.265, pruned_loss=0.05331, over 7800.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3013, pruned_loss=0.07288, over 1611066.47 frames. ], batch size: 19, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:29,717 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:35,896 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 14:37:38,713 INFO [train.py:901] (1/4) Epoch 14, batch 1150, loss[loss=0.1847, simple_loss=0.2702, pruned_loss=0.0496, over 8031.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3005, pruned_loss=0.07232, over 1609560.37 frames. ], batch size: 22, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:42,400 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:46,538 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-06 14:37:49,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:58,401 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.475e+02 3.133e+02 3.919e+02 6.906e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 14:38:00,001 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:06,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106269.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:10,868 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:13,353 INFO [train.py:901] (1/4) Epoch 14, batch 1200, loss[loss=0.2331, simple_loss=0.3119, pruned_loss=0.07715, over 8105.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.301, pruned_loss=0.0729, over 1611484.92 frames. ], batch size: 23, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:38:17,553 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:42,805 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3012, 1.5717, 4.1594, 1.7866, 2.3592, 4.7226, 4.6881, 4.0111], + device='cuda:1'), covar=tensor([0.1011, 0.1769, 0.0303, 0.1992, 0.1209, 0.0185, 0.0398, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0275, 0.0307, 0.0267, 0.0296, 0.0284, 0.0245, 0.0365, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:38:47,948 INFO [train.py:901] (1/4) Epoch 14, batch 1250, loss[loss=0.2498, simple_loss=0.3159, pruned_loss=0.09185, over 7309.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3028, pruned_loss=0.0739, over 1614631.26 frames. ], batch size: 71, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:39:05,959 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:39:08,474 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.539e+02 3.303e+02 4.386e+02 1.450e+03, threshold=6.607e+02, percent-clipped=4.0 +2023-02-06 14:39:24,633 INFO [train.py:901] (1/4) Epoch 14, batch 1300, loss[loss=0.2278, simple_loss=0.3077, pruned_loss=0.07393, over 8569.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3022, pruned_loss=0.07362, over 1615309.55 frames. ], batch size: 31, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:39:55,727 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6775, 1.9104, 2.0812, 1.3381, 2.1841, 1.5056, 0.5367, 1.8136], + device='cuda:1'), covar=tensor([0.0437, 0.0230, 0.0206, 0.0373, 0.0242, 0.0600, 0.0579, 0.0183], + device='cuda:1'), in_proj_covar=tensor([0.0410, 0.0346, 0.0302, 0.0404, 0.0337, 0.0491, 0.0366, 0.0376], + device='cuda:1'), out_proj_covar=tensor([1.1432e-04, 9.3765e-05, 8.2173e-05, 1.1042e-04, 9.2543e-05, 1.4467e-04, + 1.0230e-04, 1.0355e-04], device='cuda:1') +2023-02-06 14:39:58,990 INFO [train.py:901] (1/4) Epoch 14, batch 1350, loss[loss=0.2198, simple_loss=0.3069, pruned_loss=0.06638, over 8466.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3015, pruned_loss=0.07289, over 1618157.73 frames. ], batch size: 25, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:40:05,432 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:19,201 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.550e+02 3.060e+02 3.665e+02 8.767e+02, threshold=6.121e+02, percent-clipped=1.0 +2023-02-06 14:40:29,773 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:35,138 INFO [train.py:901] (1/4) Epoch 14, batch 1400, loss[loss=0.1916, simple_loss=0.2637, pruned_loss=0.0598, over 7553.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3016, pruned_loss=0.07303, over 1615121.44 frames. ], batch size: 18, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:05,456 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 14:41:07,391 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:10,539 INFO [train.py:901] (1/4) Epoch 14, batch 1450, loss[loss=0.2697, simple_loss=0.344, pruned_loss=0.0977, over 8538.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3024, pruned_loss=0.0734, over 1618269.21 frames. ], batch size: 39, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:11,252 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 14:41:12,179 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:24,668 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106550.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:27,429 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,526 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,959 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.546e+02 3.123e+02 4.151e+02 8.254e+02, threshold=6.246e+02, percent-clipped=6.0 +2023-02-06 14:41:47,568 INFO [train.py:901] (1/4) Epoch 14, batch 1500, loss[loss=0.256, simple_loss=0.3275, pruned_loss=0.09221, over 8717.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3025, pruned_loss=0.07391, over 1613805.52 frames. ], batch size: 34, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,567 INFO [train.py:901] (1/4) Epoch 14, batch 1550, loss[loss=0.2327, simple_loss=0.3215, pruned_loss=0.07194, over 8545.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3029, pruned_loss=0.07407, over 1618927.98 frames. ], batch size: 31, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,643 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:42:41,337 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.593e+02 3.196e+02 4.114e+02 8.054e+02, threshold=6.391e+02, percent-clipped=4.0 +2023-02-06 14:42:52,894 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9790, 1.5144, 3.5327, 1.6430, 2.3846, 3.7970, 3.8848, 3.3130], + device='cuda:1'), covar=tensor([0.1027, 0.1595, 0.0272, 0.1842, 0.0956, 0.0211, 0.0496, 0.0545], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0303, 0.0263, 0.0293, 0.0281, 0.0242, 0.0362, 0.0293], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 14:42:56,719 INFO [train.py:901] (1/4) Epoch 14, batch 1600, loss[loss=0.2913, simple_loss=0.355, pruned_loss=0.1138, over 8436.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3037, pruned_loss=0.07441, over 1617879.39 frames. ], batch size: 39, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:43:10,336 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:32,335 INFO [train.py:901] (1/4) Epoch 14, batch 1650, loss[loss=0.2055, simple_loss=0.2813, pruned_loss=0.06485, over 7977.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3031, pruned_loss=0.07419, over 1615698.57 frames. ], batch size: 21, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:43:35,237 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7700, 1.7800, 2.1008, 1.8024, 1.0537, 1.7995, 2.3832, 2.1605], + device='cuda:1'), covar=tensor([0.0430, 0.1176, 0.1582, 0.1242, 0.0567, 0.1425, 0.0557, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0101, 0.0162, 0.0113, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 14:43:42,577 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:51,909 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.493e+02 3.038e+02 4.078e+02 1.080e+03, threshold=6.076e+02, percent-clipped=3.0 +2023-02-06 14:43:52,351 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.62 vs. limit=5.0 +2023-02-06 14:43:53,535 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6352, 1.8921, 2.0627, 1.2631, 2.1401, 1.4325, 0.5292, 1.8428], + device='cuda:1'), covar=tensor([0.0417, 0.0283, 0.0228, 0.0399, 0.0291, 0.0685, 0.0595, 0.0207], + device='cuda:1'), in_proj_covar=tensor([0.0412, 0.0349, 0.0304, 0.0406, 0.0338, 0.0492, 0.0370, 0.0375], + device='cuda:1'), out_proj_covar=tensor([1.1478e-04, 9.4922e-05, 8.2619e-05, 1.1079e-04, 9.2894e-05, 1.4505e-04, + 1.0328e-04, 1.0334e-04], device='cuda:1') +2023-02-06 14:44:06,421 INFO [train.py:901] (1/4) Epoch 14, batch 1700, loss[loss=0.2273, simple_loss=0.3164, pruned_loss=0.06915, over 8495.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3037, pruned_loss=0.07396, over 1617309.03 frames. ], batch size: 28, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:28,260 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:31,567 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:33,567 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:43,433 INFO [train.py:901] (1/4) Epoch 14, batch 1750, loss[loss=0.2281, simple_loss=0.3177, pruned_loss=0.06931, over 8258.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3029, pruned_loss=0.07338, over 1618845.08 frames. ], batch size: 24, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:47,853 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:45:04,128 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.358e+02 2.865e+02 3.554e+02 7.426e+02, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 14:45:18,432 INFO [train.py:901] (1/4) Epoch 14, batch 1800, loss[loss=0.1794, simple_loss=0.2613, pruned_loss=0.04872, over 7927.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3002, pruned_loss=0.07226, over 1611413.07 frames. ], batch size: 20, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:45:54,583 INFO [train.py:901] (1/4) Epoch 14, batch 1850, loss[loss=0.2275, simple_loss=0.3088, pruned_loss=0.07313, over 8501.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3016, pruned_loss=0.07281, over 1615881.85 frames. ], batch size: 26, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:45:55,505 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:16,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.570e+02 3.068e+02 3.847e+02 1.325e+03, threshold=6.136e+02, percent-clipped=4.0 +2023-02-06 14:46:29,529 INFO [train.py:901] (1/4) Epoch 14, batch 1900, loss[loss=0.1959, simple_loss=0.2731, pruned_loss=0.05932, over 7913.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3037, pruned_loss=0.07428, over 1618785.22 frames. ], batch size: 20, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:46:43,885 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:47,082 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 14:46:59,783 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 14:47:01,262 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:03,727 INFO [train.py:901] (1/4) Epoch 14, batch 1950, loss[loss=0.2258, simple_loss=0.3111, pruned_loss=0.07029, over 8327.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3031, pruned_loss=0.0735, over 1620337.14 frames. ], batch size: 25, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:47:19,881 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 14:47:26,063 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.386e+02 2.840e+02 3.483e+02 6.138e+02, threshold=5.681e+02, percent-clipped=1.0 +2023-02-06 14:47:31,983 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:39,105 INFO [train.py:901] (1/4) Epoch 14, batch 2000, loss[loss=0.2224, simple_loss=0.3005, pruned_loss=0.0721, over 7655.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3037, pruned_loss=0.07395, over 1618692.12 frames. ], batch size: 19, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:47:48,656 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:12,599 INFO [train.py:901] (1/4) Epoch 14, batch 2050, loss[loss=0.258, simple_loss=0.3297, pruned_loss=0.09314, over 8526.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3036, pruned_loss=0.07372, over 1618907.16 frames. ], batch size: 28, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:19,570 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4131, 1.5864, 1.3642, 1.8973, 0.8550, 1.2622, 1.2452, 1.5221], + device='cuda:1'), covar=tensor([0.0801, 0.0701, 0.1019, 0.0480, 0.1106, 0.1356, 0.0794, 0.0734], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0203, 0.0245, 0.0207, 0.0211, 0.0247, 0.0251, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:48:23,060 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6422, 1.6218, 2.8064, 1.1638, 2.1518, 2.9683, 3.1137, 2.5039], + device='cuda:1'), covar=tensor([0.1206, 0.1403, 0.0408, 0.2253, 0.0926, 0.0323, 0.0575, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0304, 0.0264, 0.0292, 0.0279, 0.0241, 0.0362, 0.0293], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 14:48:26,611 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9465, 1.6271, 2.1053, 1.8192, 2.0072, 1.9300, 1.6725, 0.7742], + device='cuda:1'), covar=tensor([0.4743, 0.4067, 0.1530, 0.2809, 0.2079, 0.2690, 0.1930, 0.4385], + device='cuda:1'), in_proj_covar=tensor([0.0897, 0.0898, 0.0746, 0.0868, 0.0956, 0.0825, 0.0710, 0.0779], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 14:48:27,933 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1975, 2.4022, 2.0435, 2.9950, 1.3839, 1.8232, 2.0246, 2.5735], + device='cuda:1'), covar=tensor([0.0637, 0.0869, 0.0804, 0.0334, 0.1148, 0.1180, 0.0995, 0.0761], + device='cuda:1'), in_proj_covar=tensor([0.0226, 0.0202, 0.0245, 0.0207, 0.0210, 0.0247, 0.0251, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:48:32,521 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107158.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:48:34,367 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.411e+02 3.055e+02 3.713e+02 7.642e+02, threshold=6.109e+02, percent-clipped=4.0 +2023-02-06 14:48:42,096 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107170.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:49,408 INFO [train.py:901] (1/4) Epoch 14, batch 2100, loss[loss=0.3015, simple_loss=0.3716, pruned_loss=0.1157, over 8640.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3029, pruned_loss=0.07305, over 1621329.06 frames. ], batch size: 39, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:54,367 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:57,046 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6730, 2.2614, 4.1515, 1.4528, 2.9556, 2.2585, 1.8849, 2.7914], + device='cuda:1'), covar=tensor([0.1737, 0.2141, 0.0694, 0.3808, 0.1547, 0.2727, 0.1821, 0.2204], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0542, 0.0535, 0.0592, 0.0618, 0.0559, 0.0487, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:49:00,233 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6389, 1.4330, 2.8568, 1.3460, 2.0695, 3.0888, 3.1448, 2.6019], + device='cuda:1'), covar=tensor([0.1125, 0.1448, 0.0418, 0.2005, 0.0933, 0.0288, 0.0672, 0.0628], + device='cuda:1'), in_proj_covar=tensor([0.0272, 0.0305, 0.0266, 0.0293, 0.0281, 0.0242, 0.0363, 0.0293], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 14:49:11,188 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:23,204 INFO [train.py:901] (1/4) Epoch 14, batch 2150, loss[loss=0.2458, simple_loss=0.3286, pruned_loss=0.08152, over 8495.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3016, pruned_loss=0.07242, over 1622343.86 frames. ], batch size: 28, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:49:44,434 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.615e+02 3.041e+02 3.823e+02 8.460e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 14:49:58,912 INFO [train.py:901] (1/4) Epoch 14, batch 2200, loss[loss=0.208, simple_loss=0.2883, pruned_loss=0.0638, over 7978.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.302, pruned_loss=0.07317, over 1616804.10 frames. ], batch size: 21, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:23,273 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6796, 1.9768, 2.1500, 1.2760, 2.3174, 1.4604, 0.7046, 1.9029], + device='cuda:1'), covar=tensor([0.0483, 0.0283, 0.0194, 0.0436, 0.0293, 0.0719, 0.0672, 0.0251], + device='cuda:1'), in_proj_covar=tensor([0.0411, 0.0351, 0.0306, 0.0407, 0.0337, 0.0496, 0.0372, 0.0378], + device='cuda:1'), out_proj_covar=tensor([1.1456e-04, 9.5359e-05, 8.3165e-05, 1.1110e-04, 9.2411e-05, 1.4607e-04, + 1.0394e-04, 1.0421e-04], device='cuda:1') +2023-02-06 14:50:34,524 INFO [train.py:901] (1/4) Epoch 14, batch 2250, loss[loss=0.1862, simple_loss=0.2617, pruned_loss=0.0554, over 7437.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3015, pruned_loss=0.07305, over 1616920.95 frames. ], batch size: 17, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:54,555 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.599e+02 3.319e+02 4.071e+02 1.027e+03, threshold=6.637e+02, percent-clipped=7.0 +2023-02-06 14:51:08,891 INFO [train.py:901] (1/4) Epoch 14, batch 2300, loss[loss=0.1558, simple_loss=0.2393, pruned_loss=0.03619, over 7932.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3005, pruned_loss=0.07215, over 1613857.15 frames. ], batch size: 20, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:51:44,729 INFO [train.py:901] (1/4) Epoch 14, batch 2350, loss[loss=0.2724, simple_loss=0.3418, pruned_loss=0.1015, over 8644.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3015, pruned_loss=0.07256, over 1618860.24 frames. ], batch size: 34, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:04,938 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.358e+02 2.889e+02 3.449e+02 7.134e+02, threshold=5.779e+02, percent-clipped=1.0 +2023-02-06 14:52:15,943 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8248, 5.8760, 5.1069, 2.3958, 5.1716, 5.5514, 5.4191, 5.2453], + device='cuda:1'), covar=tensor([0.0484, 0.0360, 0.0875, 0.4218, 0.0636, 0.0638, 0.0931, 0.0670], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0400, 0.0404, 0.0496, 0.0398, 0.0400, 0.0384, 0.0345], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:52:18,380 INFO [train.py:901] (1/4) Epoch 14, batch 2400, loss[loss=0.273, simple_loss=0.3262, pruned_loss=0.1099, over 7961.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3017, pruned_loss=0.07302, over 1617841.12 frames. ], batch size: 21, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:34,429 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107502.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:52:43,465 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:53,760 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:54,997 INFO [train.py:901] (1/4) Epoch 14, batch 2450, loss[loss=0.2141, simple_loss=0.3034, pruned_loss=0.06238, over 8529.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3032, pruned_loss=0.07431, over 1617039.67 frames. ], batch size: 28, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:16,529 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.477e+02 3.089e+02 4.011e+02 1.178e+03, threshold=6.179e+02, percent-clipped=8.0 +2023-02-06 14:53:29,785 INFO [train.py:901] (1/4) Epoch 14, batch 2500, loss[loss=0.2196, simple_loss=0.3011, pruned_loss=0.06901, over 8732.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3041, pruned_loss=0.0749, over 1616099.90 frames. ], batch size: 34, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:55,582 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107617.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:54:03,476 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:03,967 INFO [train.py:901] (1/4) Epoch 14, batch 2550, loss[loss=0.2123, simple_loss=0.2978, pruned_loss=0.06339, over 8469.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3027, pruned_loss=0.07374, over 1616498.41 frames. ], batch size: 25, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:26,274 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.642e+02 3.253e+02 4.518e+02 1.030e+03, threshold=6.506e+02, percent-clipped=5.0 +2023-02-06 14:54:31,308 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2024, 2.4345, 2.0125, 2.9658, 1.2626, 1.7012, 2.0175, 2.4697], + device='cuda:1'), covar=tensor([0.0634, 0.0734, 0.0921, 0.0344, 0.1195, 0.1337, 0.0881, 0.0833], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0206, 0.0251, 0.0212, 0.0216, 0.0253, 0.0255, 0.0216], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:54:37,782 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:39,570 INFO [train.py:901] (1/4) Epoch 14, batch 2600, loss[loss=0.2275, simple_loss=0.2894, pruned_loss=0.08279, over 7667.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3021, pruned_loss=0.0738, over 1614418.39 frames. ], batch size: 19, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:48,870 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 14:55:12,844 INFO [train.py:901] (1/4) Epoch 14, batch 2650, loss[loss=0.2066, simple_loss=0.2744, pruned_loss=0.06937, over 7426.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3021, pruned_loss=0.07404, over 1610517.36 frames. ], batch size: 17, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:30,857 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:55:34,866 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.443e+02 2.980e+02 3.881e+02 9.981e+02, threshold=5.960e+02, percent-clipped=6.0 +2023-02-06 14:55:38,604 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5531, 1.6896, 1.7598, 1.2102, 1.8200, 1.4130, 0.8446, 1.6811], + device='cuda:1'), covar=tensor([0.0355, 0.0219, 0.0160, 0.0345, 0.0244, 0.0479, 0.0508, 0.0195], + device='cuda:1'), in_proj_covar=tensor([0.0415, 0.0353, 0.0308, 0.0411, 0.0341, 0.0499, 0.0376, 0.0381], + device='cuda:1'), out_proj_covar=tensor([1.1548e-04, 9.5921e-05, 8.3857e-05, 1.1200e-04, 9.3445e-05, 1.4695e-04, + 1.0487e-04, 1.0510e-04], device='cuda:1') +2023-02-06 14:55:49,934 INFO [train.py:901] (1/4) Epoch 14, batch 2700, loss[loss=0.243, simple_loss=0.3145, pruned_loss=0.08568, over 8245.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3011, pruned_loss=0.07372, over 1607385.21 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:04,448 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0595, 4.1590, 2.8069, 3.0046, 3.1407, 2.2656, 2.7357, 3.1471], + device='cuda:1'), covar=tensor([0.1467, 0.0258, 0.0751, 0.0653, 0.0642, 0.1120, 0.1005, 0.0784], + device='cuda:1'), in_proj_covar=tensor([0.0343, 0.0230, 0.0316, 0.0295, 0.0296, 0.0319, 0.0336, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:56:23,702 INFO [train.py:901] (1/4) Epoch 14, batch 2750, loss[loss=0.1977, simple_loss=0.2853, pruned_loss=0.05507, over 7658.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3029, pruned_loss=0.07383, over 1616655.40 frames. ], batch size: 19, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:44,693 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.404e+02 2.918e+02 3.592e+02 1.217e+03, threshold=5.837e+02, percent-clipped=4.0 +2023-02-06 14:56:53,310 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107872.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:56:54,154 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107873.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:56:59,460 INFO [train.py:901] (1/4) Epoch 14, batch 2800, loss[loss=0.2378, simple_loss=0.315, pruned_loss=0.08026, over 8669.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3031, pruned_loss=0.07419, over 1613947.50 frames. ], batch size: 39, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:57:03,900 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:12,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107898.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:57:20,767 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:33,811 INFO [train.py:901] (1/4) Epoch 14, batch 2850, loss[loss=0.2112, simple_loss=0.2947, pruned_loss=0.06391, over 8248.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3025, pruned_loss=0.07367, over 1610500.08 frames. ], batch size: 22, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:57:54,109 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.478e+02 3.087e+02 3.919e+02 8.173e+02, threshold=6.173e+02, percent-clipped=5.0 +2023-02-06 14:58:01,786 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2031, 2.2694, 1.7220, 2.0389, 1.8446, 1.3740, 1.6160, 1.6933], + device='cuda:1'), covar=tensor([0.1126, 0.0316, 0.0958, 0.0462, 0.0641, 0.1280, 0.0927, 0.0787], + device='cuda:1'), in_proj_covar=tensor([0.0340, 0.0227, 0.0315, 0.0293, 0.0293, 0.0317, 0.0337, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 14:58:03,816 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1391, 2.4503, 3.6761, 1.9058, 2.8600, 2.4418, 2.2410, 2.6405], + device='cuda:1'), covar=tensor([0.1138, 0.1585, 0.0503, 0.2487, 0.1189, 0.1919, 0.1229, 0.1793], + device='cuda:1'), in_proj_covar=tensor([0.0495, 0.0543, 0.0535, 0.0589, 0.0619, 0.0555, 0.0486, 0.0612], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 14:58:05,775 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9350, 2.2156, 1.8087, 2.7875, 1.3495, 1.5994, 1.9499, 2.2151], + device='cuda:1'), covar=tensor([0.0715, 0.0858, 0.0978, 0.0387, 0.1200, 0.1410, 0.0908, 0.0813], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0204, 0.0248, 0.0210, 0.0212, 0.0248, 0.0252, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 14:58:08,144 INFO [train.py:901] (1/4) Epoch 14, batch 2900, loss[loss=0.2353, simple_loss=0.3135, pruned_loss=0.07852, over 8353.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3017, pruned_loss=0.07347, over 1607902.35 frames. ], batch size: 26, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:12,782 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:27,891 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 14:58:38,640 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:44,583 INFO [train.py:901] (1/4) Epoch 14, batch 2950, loss[loss=0.2462, simple_loss=0.3141, pruned_loss=0.08918, over 8205.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3008, pruned_loss=0.07344, over 1602102.65 frames. ], batch size: 23, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:04,840 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.696e+02 3.199e+02 4.019e+02 8.231e+02, threshold=6.398e+02, percent-clipped=3.0 +2023-02-06 14:59:18,166 INFO [train.py:901] (1/4) Epoch 14, batch 3000, loss[loss=0.2274, simple_loss=0.3175, pruned_loss=0.0686, over 8491.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3001, pruned_loss=0.07275, over 1603511.63 frames. ], batch size: 25, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:18,166 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 14:59:30,508 INFO [train.py:935] (1/4) Epoch 14, validation: loss=0.1827, simple_loss=0.283, pruned_loss=0.04121, over 944034.00 frames. +2023-02-06 14:59:30,510 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 14:59:43,701 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:05,762 INFO [train.py:901] (1/4) Epoch 14, batch 3050, loss[loss=0.2368, simple_loss=0.3124, pruned_loss=0.0806, over 8396.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3006, pruned_loss=0.07323, over 1603251.80 frames. ], batch size: 49, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:10,186 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.42 vs. limit=5.0 +2023-02-06 15:00:10,682 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:28,076 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.670e+02 3.118e+02 3.835e+02 7.160e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 15:00:41,679 INFO [train.py:901] (1/4) Epoch 14, batch 3100, loss[loss=0.1927, simple_loss=0.2604, pruned_loss=0.06247, over 7697.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.301, pruned_loss=0.07332, over 1605743.87 frames. ], batch size: 18, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:49,460 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 15:01:04,702 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:15,963 INFO [train.py:901] (1/4) Epoch 14, batch 3150, loss[loss=0.249, simple_loss=0.3352, pruned_loss=0.08136, over 8198.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3014, pruned_loss=0.07348, over 1604923.79 frames. ], batch size: 23, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:24,679 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:37,167 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.570e+02 3.163e+02 4.155e+02 7.848e+02, threshold=6.326e+02, percent-clipped=5.0 +2023-02-06 15:01:43,492 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:46,379 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 15:01:51,511 INFO [train.py:901] (1/4) Epoch 14, batch 3200, loss[loss=0.2061, simple_loss=0.2877, pruned_loss=0.06222, over 8719.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.302, pruned_loss=0.07368, over 1609556.13 frames. ], batch size: 30, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:51,735 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5183, 1.4672, 1.7656, 1.3076, 1.1421, 1.7859, 0.1278, 1.0647], + device='cuda:1'), covar=tensor([0.2445, 0.1884, 0.0587, 0.1457, 0.3987, 0.0564, 0.2918, 0.1735], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0177, 0.0109, 0.0220, 0.0261, 0.0111, 0.0163, 0.0174], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 15:02:06,043 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5501, 2.2281, 3.3905, 2.5985, 2.9496, 2.3912, 1.9723, 1.9802], + device='cuda:1'), covar=tensor([0.3969, 0.4113, 0.1366, 0.2714, 0.2131, 0.2181, 0.1627, 0.4078], + device='cuda:1'), in_proj_covar=tensor([0.0902, 0.0901, 0.0740, 0.0868, 0.0957, 0.0831, 0.0712, 0.0778], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:02:25,763 INFO [train.py:901] (1/4) Epoch 14, batch 3250, loss[loss=0.1968, simple_loss=0.2778, pruned_loss=0.05792, over 8459.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3033, pruned_loss=0.07428, over 1608973.05 frames. ], batch size: 27, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:02:35,708 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:02:47,022 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.638e+02 3.239e+02 4.086e+02 1.012e+03, threshold=6.478e+02, percent-clipped=4.0 +2023-02-06 15:03:02,204 INFO [train.py:901] (1/4) Epoch 14, batch 3300, loss[loss=0.2353, simple_loss=0.3221, pruned_loss=0.07427, over 8030.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3028, pruned_loss=0.07363, over 1612470.19 frames. ], batch size: 22, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:11,260 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:27,903 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:36,466 INFO [train.py:901] (1/4) Epoch 14, batch 3350, loss[loss=0.1959, simple_loss=0.2811, pruned_loss=0.05538, over 8127.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3027, pruned_loss=0.07336, over 1616313.45 frames. ], batch size: 22, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:49,808 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:57,188 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.656e+02 3.299e+02 4.467e+02 8.781e+02, threshold=6.597e+02, percent-clipped=5.0 +2023-02-06 15:04:04,263 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:10,796 INFO [train.py:901] (1/4) Epoch 14, batch 3400, loss[loss=0.2212, simple_loss=0.3129, pruned_loss=0.06471, over 8328.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3041, pruned_loss=0.07418, over 1618322.82 frames. ], batch size: 25, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:04:22,161 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:46,713 INFO [train.py:901] (1/4) Epoch 14, batch 3450, loss[loss=0.2715, simple_loss=0.326, pruned_loss=0.1085, over 7055.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3036, pruned_loss=0.07405, over 1614803.28 frames. ], batch size: 71, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:07,901 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.481e+02 3.055e+02 3.627e+02 7.933e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:05:21,984 INFO [train.py:901] (1/4) Epoch 14, batch 3500, loss[loss=0.2831, simple_loss=0.3556, pruned_loss=0.1053, over 8460.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3042, pruned_loss=0.07438, over 1614618.42 frames. ], batch size: 27, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:29,151 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 15:05:31,989 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:05:57,729 INFO [train.py:901] (1/4) Epoch 14, batch 3550, loss[loss=0.2186, simple_loss=0.3063, pruned_loss=0.06547, over 8334.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3034, pruned_loss=0.07417, over 1615535.75 frames. ], batch size: 25, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:17,929 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.417e+02 3.151e+02 4.175e+02 8.210e+02, threshold=6.301e+02, percent-clipped=3.0 +2023-02-06 15:06:28,318 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2092, 1.3526, 3.3459, 1.0399, 2.9314, 2.7743, 3.0131, 2.9510], + device='cuda:1'), covar=tensor([0.0801, 0.4080, 0.0858, 0.4044, 0.1504, 0.1233, 0.0765, 0.0880], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0598, 0.0614, 0.0560, 0.0636, 0.0545, 0.0534, 0.0598], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:06:31,420 INFO [train.py:901] (1/4) Epoch 14, batch 3600, loss[loss=0.1942, simple_loss=0.2688, pruned_loss=0.0598, over 7800.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.303, pruned_loss=0.07386, over 1615770.46 frames. ], batch size: 20, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:36,125 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108687.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:07,041 INFO [train.py:901] (1/4) Epoch 14, batch 3650, loss[loss=0.2226, simple_loss=0.3034, pruned_loss=0.07089, over 8458.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3019, pruned_loss=0.07324, over 1612950.93 frames. ], batch size: 27, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:17,347 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2090, 2.5610, 3.5734, 2.1240, 1.7568, 3.7208, 0.6612, 2.2245], + device='cuda:1'), covar=tensor([0.1527, 0.1110, 0.0379, 0.2262, 0.3249, 0.0197, 0.3155, 0.1724], + device='cuda:1'), in_proj_covar=tensor([0.0171, 0.0174, 0.0106, 0.0215, 0.0256, 0.0109, 0.0161, 0.0169], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 15:07:27,807 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 2.655e+02 3.191e+02 3.880e+02 8.243e+02, threshold=6.382e+02, percent-clipped=2.0 +2023-02-06 15:07:30,611 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:07:39,031 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5430, 1.9891, 2.9056, 2.2759, 2.7570, 2.3303, 2.0004, 1.4271], + device='cuda:1'), covar=tensor([0.4051, 0.4505, 0.1470, 0.3277, 0.2232, 0.2561, 0.1767, 0.4897], + device='cuda:1'), in_proj_covar=tensor([0.0895, 0.0896, 0.0742, 0.0869, 0.0949, 0.0827, 0.0710, 0.0778], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:07:41,535 INFO [train.py:901] (1/4) Epoch 14, batch 3700, loss[loss=0.2131, simple_loss=0.2992, pruned_loss=0.06353, over 8105.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3015, pruned_loss=0.07298, over 1612996.10 frames. ], batch size: 23, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:50,763 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:56,409 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:01,069 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:14,687 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6443, 1.4749, 1.5686, 1.2871, 0.9618, 1.3248, 1.6171, 1.3351], + device='cuda:1'), covar=tensor([0.0524, 0.1141, 0.1635, 0.1343, 0.0545, 0.1377, 0.0643, 0.0611], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0157, 0.0100, 0.0161, 0.0113, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 15:08:15,867 INFO [train.py:901] (1/4) Epoch 14, batch 3750, loss[loss=0.2175, simple_loss=0.2881, pruned_loss=0.07347, over 7797.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.301, pruned_loss=0.07296, over 1610893.71 frames. ], batch size: 19, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:08:37,495 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.414e+02 2.846e+02 3.664e+02 8.039e+02, threshold=5.692e+02, percent-clipped=5.0 +2023-02-06 15:08:51,949 INFO [train.py:901] (1/4) Epoch 14, batch 3800, loss[loss=0.2509, simple_loss=0.3283, pruned_loss=0.08669, over 8587.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3, pruned_loss=0.07222, over 1603249.46 frames. ], batch size: 31, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:09:12,257 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:21,323 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0648, 1.5955, 3.5454, 1.5000, 2.3774, 3.8409, 3.9421, 3.2952], + device='cuda:1'), covar=tensor([0.1041, 0.1675, 0.0285, 0.2142, 0.1059, 0.0277, 0.0479, 0.0607], + device='cuda:1'), in_proj_covar=tensor([0.0272, 0.0303, 0.0266, 0.0296, 0.0283, 0.0246, 0.0367, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:09:26,888 INFO [train.py:901] (1/4) Epoch 14, batch 3850, loss[loss=0.2136, simple_loss=0.2984, pruned_loss=0.06439, over 8324.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2995, pruned_loss=0.07213, over 1599041.49 frames. ], batch size: 25, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:09:33,935 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:35,490 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2835, 1.9332, 2.8605, 2.2708, 2.7294, 2.2186, 1.8846, 1.3531], + device='cuda:1'), covar=tensor([0.4635, 0.4454, 0.1470, 0.3051, 0.2076, 0.2492, 0.1755, 0.4875], + device='cuda:1'), in_proj_covar=tensor([0.0892, 0.0896, 0.0741, 0.0869, 0.0948, 0.0825, 0.0708, 0.0777], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:09:35,935 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 15:09:49,066 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.574e+02 3.020e+02 4.517e+02 9.725e+02, threshold=6.039e+02, percent-clipped=15.0 +2023-02-06 15:09:51,343 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7212, 2.0816, 1.6886, 2.5489, 1.1763, 1.3731, 1.7504, 2.2030], + device='cuda:1'), covar=tensor([0.0905, 0.0790, 0.1048, 0.0405, 0.1208, 0.1649, 0.1023, 0.0748], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0207, 0.0252, 0.0214, 0.0216, 0.0252, 0.0257, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 15:10:04,288 INFO [train.py:901] (1/4) Epoch 14, batch 3900, loss[loss=0.2031, simple_loss=0.2751, pruned_loss=0.06562, over 7932.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2986, pruned_loss=0.0716, over 1598282.64 frames. ], batch size: 20, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:07,765 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1695, 1.2147, 4.5234, 1.7160, 3.5677, 3.5268, 4.0578, 3.9786], + device='cuda:1'), covar=tensor([0.1020, 0.6822, 0.0833, 0.4502, 0.1987, 0.1670, 0.0961, 0.0901], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0608, 0.0623, 0.0570, 0.0645, 0.0552, 0.0543, 0.0606], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:10:39,032 INFO [train.py:901] (1/4) Epoch 14, batch 3950, loss[loss=0.2039, simple_loss=0.2818, pruned_loss=0.06303, over 7184.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2995, pruned_loss=0.07201, over 1600284.04 frames. ], batch size: 16, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:56,309 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:10:56,952 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109055.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:10:59,103 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:00,258 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.458e+02 2.966e+02 3.777e+02 8.079e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-06 15:11:14,754 INFO [train.py:901] (1/4) Epoch 14, batch 4000, loss[loss=0.1896, simple_loss=0.2717, pruned_loss=0.05372, over 7655.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3008, pruned_loss=0.07255, over 1603842.35 frames. ], batch size: 19, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:11:17,682 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:50,550 INFO [train.py:901] (1/4) Epoch 14, batch 4050, loss[loss=0.2258, simple_loss=0.3134, pruned_loss=0.06912, over 8335.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3017, pruned_loss=0.0726, over 1608311.04 frames. ], batch size: 26, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:06,806 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:11,580 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 15:12:11,649 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.362e+02 2.684e+02 3.543e+02 7.215e+02, threshold=5.369e+02, percent-clipped=4.0 +2023-02-06 15:12:16,040 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:26,284 INFO [train.py:901] (1/4) Epoch 14, batch 4100, loss[loss=0.2234, simple_loss=0.3021, pruned_loss=0.07239, over 8553.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3022, pruned_loss=0.07244, over 1610530.06 frames. ], batch size: 31, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:33,980 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:50,498 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:02,496 INFO [train.py:901] (1/4) Epoch 14, batch 4150, loss[loss=0.2797, simple_loss=0.3346, pruned_loss=0.1124, over 6675.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3028, pruned_loss=0.07326, over 1610001.84 frames. ], batch size: 71, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:23,981 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.677e+02 3.078e+02 3.893e+02 8.547e+02, threshold=6.157e+02, percent-clipped=10.0 +2023-02-06 15:13:28,945 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:35,738 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 15:13:37,136 INFO [train.py:901] (1/4) Epoch 14, batch 4200, loss[loss=0.2379, simple_loss=0.3183, pruned_loss=0.07875, over 8575.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3023, pruned_loss=0.07288, over 1608800.91 frames. ], batch size: 31, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:44,010 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-06 15:13:59,678 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:00,998 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,570 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 15:14:14,547 INFO [train.py:901] (1/4) Epoch 14, batch 4250, loss[loss=0.2078, simple_loss=0.2981, pruned_loss=0.05876, over 8623.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3021, pruned_loss=0.07261, over 1607671.28 frames. ], batch size: 31, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:14:18,160 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:18,815 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:35,152 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.2583, 1.9012, 5.3923, 2.5116, 4.8282, 4.5514, 4.9796, 4.8502], + device='cuda:1'), covar=tensor([0.0512, 0.4498, 0.0456, 0.3163, 0.0970, 0.0803, 0.0454, 0.0471], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0599, 0.0617, 0.0559, 0.0633, 0.0544, 0.0535, 0.0596], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:14:35,685 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.488e+02 3.016e+02 3.845e+02 8.299e+02, threshold=6.033e+02, percent-clipped=4.0 +2023-02-06 15:14:48,673 INFO [train.py:901] (1/4) Epoch 14, batch 4300, loss[loss=0.2272, simple_loss=0.304, pruned_loss=0.07519, over 8232.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3022, pruned_loss=0.07276, over 1611010.28 frames. ], batch size: 22, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:14:56,019 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 15:14:58,772 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 15:15:01,894 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109399.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:15:24,494 INFO [train.py:901] (1/4) Epoch 14, batch 4350, loss[loss=0.2083, simple_loss=0.2828, pruned_loss=0.06689, over 7780.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3022, pruned_loss=0.0729, over 1610352.87 frames. ], batch size: 19, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:34,087 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 15:15:47,273 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.678e+02 3.270e+02 4.253e+02 1.326e+03, threshold=6.540e+02, percent-clipped=8.0 +2023-02-06 15:16:00,544 INFO [train.py:901] (1/4) Epoch 14, batch 4400, loss[loss=0.2819, simple_loss=0.347, pruned_loss=0.1084, over 8602.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3024, pruned_loss=0.07326, over 1615520.78 frames. ], batch size: 34, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:16:15,760 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 15:16:24,168 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109514.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:16:31,889 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:35,897 INFO [train.py:901] (1/4) Epoch 14, batch 4450, loss[loss=0.2861, simple_loss=0.3473, pruned_loss=0.1125, over 8455.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3009, pruned_loss=0.07284, over 1612758.78 frames. ], batch size: 27, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:16:49,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:55,365 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:58,638 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.456e+02 2.864e+02 3.608e+02 1.087e+03, threshold=5.728e+02, percent-clipped=4.0 +2023-02-06 15:17:11,132 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 15:17:12,488 INFO [train.py:901] (1/4) Epoch 14, batch 4500, loss[loss=0.191, simple_loss=0.2762, pruned_loss=0.05294, over 8101.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3011, pruned_loss=0.07295, over 1615534.96 frames. ], batch size: 21, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:17:24,282 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:17:47,001 INFO [train.py:901] (1/4) Epoch 14, batch 4550, loss[loss=0.2081, simple_loss=0.2757, pruned_loss=0.0702, over 7183.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3021, pruned_loss=0.07351, over 1614520.33 frames. ], batch size: 16, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:05,732 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:09,030 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.640e+02 3.232e+02 4.162e+02 9.021e+02, threshold=6.464e+02, percent-clipped=8.0 +2023-02-06 15:18:16,730 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:21,369 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:23,261 INFO [train.py:901] (1/4) Epoch 14, batch 4600, loss[loss=0.2314, simple_loss=0.3053, pruned_loss=0.07872, over 8181.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2996, pruned_loss=0.07196, over 1611497.55 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:23,335 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:56,948 INFO [train.py:901] (1/4) Epoch 14, batch 4650, loss[loss=0.2516, simple_loss=0.324, pruned_loss=0.08961, over 8288.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3009, pruned_loss=0.07362, over 1605190.35 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:18,714 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.556e+02 3.032e+02 3.907e+02 9.020e+02, threshold=6.065e+02, percent-clipped=4.0 +2023-02-06 15:19:24,824 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109770.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:19:25,417 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:31,314 INFO [train.py:901] (1/4) Epoch 14, batch 4700, loss[loss=0.2613, simple_loss=0.3297, pruned_loss=0.09647, over 8318.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.2996, pruned_loss=0.07296, over 1602594.89 frames. ], batch size: 26, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:42,825 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:42,849 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109795.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:20:06,516 INFO [train.py:901] (1/4) Epoch 14, batch 4750, loss[loss=0.254, simple_loss=0.3271, pruned_loss=0.09049, over 8344.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3005, pruned_loss=0.07334, over 1604574.96 frames. ], batch size: 25, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:10,485 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 15:20:12,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 15:20:26,957 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.643e+02 3.166e+02 4.371e+02 1.104e+03, threshold=6.332e+02, percent-clipped=5.0 +2023-02-06 15:20:40,298 INFO [train.py:901] (1/4) Epoch 14, batch 4800, loss[loss=0.2704, simple_loss=0.3391, pruned_loss=0.1009, over 8364.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3009, pruned_loss=0.07358, over 1607947.83 frames. ], batch size: 24, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:56,506 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7336, 1.8151, 1.7173, 2.3252, 0.9703, 1.4895, 1.7869, 1.9455], + device='cuda:1'), covar=tensor([0.0728, 0.0865, 0.0916, 0.0378, 0.1196, 0.1400, 0.0756, 0.0759], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0207, 0.0253, 0.0212, 0.0214, 0.0251, 0.0256, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 15:21:03,935 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 15:21:14,269 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:16,045 INFO [train.py:901] (1/4) Epoch 14, batch 4850, loss[loss=0.181, simple_loss=0.2535, pruned_loss=0.05422, over 7425.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3, pruned_loss=0.07258, over 1605338.55 frames. ], batch size: 17, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:21:23,511 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109941.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:31,142 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:37,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.854e+02 3.344e+02 7.947e+02, threshold=5.708e+02, percent-clipped=2.0 +2023-02-06 15:21:38,878 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 15:21:46,631 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0181, 2.1608, 1.7852, 2.7891, 1.1554, 1.6628, 2.0120, 2.3301], + device='cuda:1'), covar=tensor([0.0757, 0.0893, 0.1033, 0.0368, 0.1243, 0.1402, 0.1021, 0.0800], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0208, 0.0254, 0.0212, 0.0214, 0.0252, 0.0256, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 15:21:49,882 INFO [train.py:901] (1/4) Epoch 14, batch 4900, loss[loss=0.1876, simple_loss=0.2582, pruned_loss=0.05856, over 7443.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3012, pruned_loss=0.07277, over 1608510.00 frames. ], batch size: 17, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:07,941 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9059, 1.8233, 2.9850, 1.5758, 2.3061, 3.2769, 3.2018, 2.8461], + device='cuda:1'), covar=tensor([0.0972, 0.1268, 0.0380, 0.1796, 0.0994, 0.0257, 0.0622, 0.0524], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0302, 0.0266, 0.0296, 0.0282, 0.0244, 0.0366, 0.0291], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 15:22:18,843 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0473, 1.5314, 3.4505, 1.5072, 2.3006, 3.8211, 3.7693, 3.2586], + device='cuda:1'), covar=tensor([0.0990, 0.1619, 0.0311, 0.2081, 0.1078, 0.0241, 0.0558, 0.0612], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0303, 0.0267, 0.0296, 0.0283, 0.0245, 0.0367, 0.0292], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 15:22:19,455 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:20,956 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3926, 2.7606, 3.2568, 1.6455, 3.4607, 1.9720, 1.5517, 2.1793], + device='cuda:1'), covar=tensor([0.0639, 0.0317, 0.0231, 0.0575, 0.0279, 0.0675, 0.0704, 0.0379], + device='cuda:1'), in_proj_covar=tensor([0.0419, 0.0356, 0.0305, 0.0411, 0.0343, 0.0503, 0.0376, 0.0381], + device='cuda:1'), out_proj_covar=tensor([1.1645e-04, 9.6243e-05, 8.2705e-05, 1.1197e-04, 9.3788e-05, 1.4787e-04, + 1.0478e-04, 1.0453e-04], device='cuda:1') +2023-02-06 15:22:24,322 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:26,032 INFO [train.py:901] (1/4) Epoch 14, batch 4950, loss[loss=0.1927, simple_loss=0.2758, pruned_loss=0.05481, over 8298.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3014, pruned_loss=0.07295, over 1608672.32 frames. ], batch size: 23, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:30,965 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:41,741 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:42,418 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:45,147 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:48,328 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.585e+02 3.180e+02 4.032e+02 7.448e+02, threshold=6.360e+02, percent-clipped=3.0 +2023-02-06 15:22:49,148 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:58,267 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:00,768 INFO [train.py:901] (1/4) Epoch 14, batch 5000, loss[loss=0.2172, simple_loss=0.2982, pruned_loss=0.06803, over 8640.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3008, pruned_loss=0.0723, over 1610871.56 frames. ], batch size: 31, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:33,488 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:34,638 INFO [train.py:901] (1/4) Epoch 14, batch 5050, loss[loss=0.3106, simple_loss=0.3543, pruned_loss=0.1335, over 6660.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3013, pruned_loss=0.0732, over 1612036.39 frames. ], batch size: 71, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:36,756 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:38,745 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:43,180 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 15:23:57,272 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.889e+02 3.601e+02 4.263e+02 9.587e+02, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 15:24:09,950 INFO [train.py:901] (1/4) Epoch 14, batch 5100, loss[loss=0.2922, simple_loss=0.3551, pruned_loss=0.1147, over 7271.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3023, pruned_loss=0.07351, over 1617687.52 frames. ], batch size: 71, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:24:42,800 INFO [train.py:901] (1/4) Epoch 14, batch 5150, loss[loss=0.2047, simple_loss=0.2828, pruned_loss=0.06332, over 8281.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3025, pruned_loss=0.07378, over 1614343.84 frames. ], batch size: 23, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:05,059 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.455e+02 3.012e+02 3.817e+02 9.599e+02, threshold=6.024e+02, percent-clipped=2.0 +2023-02-06 15:25:17,476 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6635, 1.6980, 2.0960, 1.3408, 1.1872, 2.0918, 0.2815, 1.2534], + device='cuda:1'), covar=tensor([0.1771, 0.1271, 0.0419, 0.1800, 0.3521, 0.0442, 0.2444, 0.1837], + device='cuda:1'), in_proj_covar=tensor([0.0169, 0.0173, 0.0105, 0.0214, 0.0258, 0.0111, 0.0160, 0.0170], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 15:25:19,324 INFO [train.py:901] (1/4) Epoch 14, batch 5200, loss[loss=0.2179, simple_loss=0.2934, pruned_loss=0.07116, over 8077.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.302, pruned_loss=0.07329, over 1613459.82 frames. ], batch size: 21, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:20,643 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1660, 4.1235, 3.7793, 1.7570, 3.7122, 3.7504, 3.7255, 3.3883], + device='cuda:1'), covar=tensor([0.0859, 0.0604, 0.1092, 0.5026, 0.0852, 0.1090, 0.1387, 0.0952], + device='cuda:1'), in_proj_covar=tensor([0.0480, 0.0397, 0.0401, 0.0495, 0.0393, 0.0398, 0.0387, 0.0346], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:25:25,792 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 15:25:27,240 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-06 15:25:33,781 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:39,477 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 15:25:41,058 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:47,126 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:53,178 INFO [train.py:901] (1/4) Epoch 14, batch 5250, loss[loss=0.2244, simple_loss=0.3063, pruned_loss=0.07125, over 8445.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3027, pruned_loss=0.07359, over 1617467.51 frames. ], batch size: 27, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:25:57,548 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:58,343 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:15,521 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.532e+02 3.204e+02 3.879e+02 8.466e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 15:26:28,848 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:29,491 INFO [train.py:901] (1/4) Epoch 14, batch 5300, loss[loss=0.2689, simple_loss=0.3348, pruned_loss=0.1015, over 8346.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.303, pruned_loss=0.07425, over 1613440.52 frames. ], batch size: 26, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:26:37,622 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 15:26:39,498 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:48,919 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:56,332 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:03,329 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-06 15:27:05,004 INFO [train.py:901] (1/4) Epoch 14, batch 5350, loss[loss=0.237, simple_loss=0.3206, pruned_loss=0.07669, over 8506.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3028, pruned_loss=0.07389, over 1616764.95 frames. ], batch size: 28, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:25,506 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.452e+02 3.047e+02 3.791e+02 6.566e+02, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 15:27:32,802 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:36,875 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:38,761 INFO [train.py:901] (1/4) Epoch 14, batch 5400, loss[loss=0.2212, simple_loss=0.3036, pruned_loss=0.06939, over 8759.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3027, pruned_loss=0.07359, over 1619477.63 frames. ], batch size: 39, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:48,336 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:08,456 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:14,356 INFO [train.py:901] (1/4) Epoch 14, batch 5450, loss[loss=0.1969, simple_loss=0.2724, pruned_loss=0.06073, over 7920.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3019, pruned_loss=0.07307, over 1614540.42 frames. ], batch size: 20, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:30,474 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 15:28:34,934 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.429e+02 2.846e+02 3.589e+02 7.640e+02, threshold=5.692e+02, percent-clipped=1.0 +2023-02-06 15:28:39,770 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1573, 2.1254, 1.5811, 1.8501, 1.8187, 1.3618, 1.6551, 1.6052], + device='cuda:1'), covar=tensor([0.1430, 0.0424, 0.1299, 0.0564, 0.0724, 0.1573, 0.0946, 0.0892], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0235, 0.0322, 0.0297, 0.0298, 0.0328, 0.0344, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:28:47,544 INFO [train.py:901] (1/4) Epoch 14, batch 5500, loss[loss=0.2169, simple_loss=0.2999, pruned_loss=0.06695, over 8453.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.303, pruned_loss=0.07367, over 1617394.79 frames. ], batch size: 25, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:50,377 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4821, 2.8236, 1.9573, 2.2218, 2.3276, 1.7261, 2.1350, 2.1516], + device='cuda:1'), covar=tensor([0.1520, 0.0399, 0.1106, 0.0664, 0.0617, 0.1401, 0.0963, 0.0910], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0234, 0.0322, 0.0297, 0.0297, 0.0328, 0.0343, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:28:52,322 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:55,814 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,306 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,843 INFO [train.py:901] (1/4) Epoch 14, batch 5550, loss[loss=0.2107, simple_loss=0.2859, pruned_loss=0.06782, over 8241.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3018, pruned_loss=0.07333, over 1609730.94 frames. ], batch size: 22, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:28,292 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.94 vs. limit=5.0 +2023-02-06 15:29:34,006 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:35,490 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0966, 4.1948, 2.6163, 2.7562, 3.0842, 2.2581, 2.8959, 3.0442], + device='cuda:1'), covar=tensor([0.1546, 0.0239, 0.0841, 0.0686, 0.0696, 0.1241, 0.0987, 0.0996], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0235, 0.0323, 0.0298, 0.0298, 0.0329, 0.0345, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:29:44,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.421e+02 3.120e+02 3.692e+02 1.093e+03, threshold=6.240e+02, percent-clipped=9.0 +2023-02-06 15:29:47,077 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110665.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:56,916 INFO [train.py:901] (1/4) Epoch 14, batch 5600, loss[loss=0.2221, simple_loss=0.2993, pruned_loss=0.07242, over 8485.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3027, pruned_loss=0.07412, over 1613780.49 frames. ], batch size: 28, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:56,986 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:30,943 INFO [train.py:901] (1/4) Epoch 14, batch 5650, loss[loss=0.2635, simple_loss=0.3343, pruned_loss=0.09637, over 7180.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3022, pruned_loss=0.07345, over 1615686.52 frames. ], batch size: 71, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:30:33,768 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 15:30:46,525 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9618, 1.5786, 1.2024, 1.4286, 1.3109, 1.0615, 1.1631, 1.2015], + device='cuda:1'), covar=tensor([0.1050, 0.0512, 0.1265, 0.0612, 0.0792, 0.1585, 0.0992, 0.0775], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0233, 0.0319, 0.0294, 0.0294, 0.0324, 0.0340, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:30:47,219 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:49,877 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,024 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,499 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.502e+02 3.092e+02 3.638e+02 5.778e+02, threshold=6.185e+02, percent-clipped=0.0 +2023-02-06 15:30:55,475 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4622, 1.7064, 2.7491, 1.3052, 1.9589, 1.7882, 1.5203, 1.9648], + device='cuda:1'), covar=tensor([0.1904, 0.2476, 0.0800, 0.4288, 0.1737, 0.3187, 0.2115, 0.2174], + device='cuda:1'), in_proj_covar=tensor([0.0499, 0.0552, 0.0540, 0.0598, 0.0621, 0.0565, 0.0490, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:30:56,821 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2995, 1.4400, 1.3403, 1.8213, 0.7487, 1.1007, 1.3448, 1.4941], + device='cuda:1'), covar=tensor([0.0891, 0.0821, 0.1101, 0.0540, 0.1140, 0.1534, 0.0789, 0.0777], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0204, 0.0250, 0.0211, 0.0211, 0.0248, 0.0252, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 15:31:04,326 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:04,551 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-06 15:31:05,711 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:07,496 INFO [train.py:901] (1/4) Epoch 14, batch 5700, loss[loss=0.2335, simple_loss=0.3163, pruned_loss=0.07535, over 8359.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3041, pruned_loss=0.07403, over 1622803.11 frames. ], batch size: 24, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:07,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:12,399 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2858, 1.6937, 3.3591, 1.6012, 2.4603, 3.7326, 3.6857, 3.2286], + device='cuda:1'), covar=tensor([0.0922, 0.1633, 0.0369, 0.2063, 0.1111, 0.0231, 0.0563, 0.0528], + device='cuda:1'), in_proj_covar=tensor([0.0270, 0.0303, 0.0268, 0.0295, 0.0282, 0.0243, 0.0368, 0.0290], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 15:31:12,424 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2192, 1.6703, 1.7115, 1.5494, 1.0345, 1.6031, 1.8004, 1.9027], + device='cuda:1'), covar=tensor([0.0487, 0.1171, 0.1656, 0.1330, 0.0652, 0.1468, 0.0697, 0.0532], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0102, 0.0162, 0.0114, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 15:31:17,893 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:22,879 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:40,944 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 15:31:41,628 INFO [train.py:901] (1/4) Epoch 14, batch 5750, loss[loss=0.1802, simple_loss=0.2774, pruned_loss=0.04153, over 8189.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3021, pruned_loss=0.07273, over 1619090.32 frames. ], batch size: 23, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:51,512 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:54,986 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:04,403 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.447e+02 3.019e+02 3.853e+02 7.521e+02, threshold=6.038e+02, percent-clipped=3.0 +2023-02-06 15:32:10,139 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:14,045 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:18,669 INFO [train.py:901] (1/4) Epoch 14, batch 5800, loss[loss=0.2241, simple_loss=0.3025, pruned_loss=0.07284, over 8244.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.301, pruned_loss=0.07211, over 1619567.25 frames. ], batch size: 22, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:32:22,958 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:53,262 INFO [train.py:901] (1/4) Epoch 14, batch 5850, loss[loss=0.1887, simple_loss=0.2607, pruned_loss=0.05834, over 7536.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07166, over 1621600.13 frames. ], batch size: 18, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:14,140 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.498e+02 3.098e+02 4.112e+02 1.106e+03, threshold=6.195e+02, percent-clipped=10.0 +2023-02-06 15:33:15,331 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 15:33:22,880 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:25,739 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:28,977 INFO [train.py:901] (1/4) Epoch 14, batch 5900, loss[loss=0.274, simple_loss=0.3397, pruned_loss=0.1041, over 8087.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07179, over 1616238.16 frames. ], batch size: 21, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:40,957 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 15:33:51,006 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7881, 1.7586, 2.4745, 1.6922, 1.2286, 2.5912, 0.4896, 1.3198], + device='cuda:1'), covar=tensor([0.2194, 0.1427, 0.0409, 0.1635, 0.3465, 0.0263, 0.2638, 0.1951], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0175, 0.0107, 0.0216, 0.0260, 0.0112, 0.0161, 0.0172], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 15:33:54,429 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:03,738 INFO [train.py:901] (1/4) Epoch 14, batch 5950, loss[loss=0.2696, simple_loss=0.3292, pruned_loss=0.105, over 6822.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3011, pruned_loss=0.07225, over 1613786.98 frames. ], batch size: 71, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:07,788 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111036.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:08,695 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.18 vs. limit=5.0 +2023-02-06 15:34:11,072 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:17,718 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:20,640 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 15:34:24,231 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.648e+02 3.047e+02 4.016e+02 7.772e+02, threshold=6.093e+02, percent-clipped=5.0 +2023-02-06 15:34:24,456 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:34,726 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:38,002 INFO [train.py:901] (1/4) Epoch 14, batch 6000, loss[loss=0.1972, simple_loss=0.27, pruned_loss=0.06225, over 7928.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3007, pruned_loss=0.07226, over 1614362.34 frames. ], batch size: 20, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:38,002 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 15:34:50,553 INFO [train.py:935] (1/4) Epoch 14, validation: loss=0.1818, simple_loss=0.2816, pruned_loss=0.04094, over 944034.00 frames. +2023-02-06 15:34:50,554 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 15:34:56,289 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:03,697 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:27,233 INFO [train.py:901] (1/4) Epoch 14, batch 6050, loss[loss=0.2326, simple_loss=0.3103, pruned_loss=0.07745, over 8075.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3012, pruned_loss=0.07211, over 1615685.25 frames. ], batch size: 21, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:35:45,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6232, 1.4815, 2.8675, 1.3368, 2.1221, 3.1028, 3.1678, 2.6197], + device='cuda:1'), covar=tensor([0.1120, 0.1513, 0.0366, 0.2038, 0.0857, 0.0284, 0.0557, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0271, 0.0304, 0.0266, 0.0294, 0.0282, 0.0243, 0.0369, 0.0291], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 15:35:49,431 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.432e+02 2.876e+02 3.526e+02 5.542e+02, threshold=5.752e+02, percent-clipped=0.0 +2023-02-06 15:36:01,587 INFO [train.py:901] (1/4) Epoch 14, batch 6100, loss[loss=0.2175, simple_loss=0.282, pruned_loss=0.07644, over 7793.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3012, pruned_loss=0.07282, over 1614885.39 frames. ], batch size: 19, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:15,956 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 15:36:24,938 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:37,111 INFO [train.py:901] (1/4) Epoch 14, batch 6150, loss[loss=0.1932, simple_loss=0.2771, pruned_loss=0.05466, over 7933.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3014, pruned_loss=0.07307, over 1617227.60 frames. ], batch size: 20, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:37,203 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:46,726 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:59,331 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.474e+02 3.213e+02 4.029e+02 8.079e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-06 15:37:11,858 INFO [train.py:901] (1/4) Epoch 14, batch 6200, loss[loss=0.205, simple_loss=0.2934, pruned_loss=0.05826, over 8561.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3021, pruned_loss=0.07369, over 1618140.44 frames. ], batch size: 39, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:13,480 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=5.19 vs. limit=5.0 +2023-02-06 15:37:38,513 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:45,101 INFO [train.py:901] (1/4) Epoch 14, batch 6250, loss[loss=0.1687, simple_loss=0.2562, pruned_loss=0.04061, over 7579.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.301, pruned_loss=0.07304, over 1612742.43 frames. ], batch size: 18, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:54,003 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 15:37:55,172 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:55,812 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:08,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.286e+02 2.818e+02 3.691e+02 1.208e+03, threshold=5.637e+02, percent-clipped=2.0 +2023-02-06 15:38:13,378 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:20,506 INFO [train.py:901] (1/4) Epoch 14, batch 6300, loss[loss=0.2016, simple_loss=0.286, pruned_loss=0.05858, over 8090.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3014, pruned_loss=0.0736, over 1611834.85 frames. ], batch size: 21, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:55,235 INFO [train.py:901] (1/4) Epoch 14, batch 6350, loss[loss=0.1986, simple_loss=0.2789, pruned_loss=0.05916, over 7663.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3013, pruned_loss=0.07365, over 1611740.60 frames. ], batch size: 19, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:56,130 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0888, 3.7109, 2.1826, 2.7871, 2.7555, 1.9809, 2.6969, 3.0304], + device='cuda:1'), covar=tensor([0.1577, 0.0321, 0.1093, 0.0806, 0.0723, 0.1293, 0.1039, 0.1110], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0232, 0.0323, 0.0293, 0.0298, 0.0326, 0.0341, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:38:58,925 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:58,978 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5398, 2.0717, 3.4275, 1.2496, 2.6827, 2.0278, 1.5125, 2.4271], + device='cuda:1'), covar=tensor([0.1783, 0.2535, 0.0808, 0.4380, 0.1619, 0.3075, 0.2182, 0.2467], + device='cuda:1'), in_proj_covar=tensor([0.0499, 0.0553, 0.0539, 0.0601, 0.0625, 0.0566, 0.0495, 0.0621], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:39:17,455 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.340e+02 2.891e+02 3.552e+02 9.934e+02, threshold=5.783e+02, percent-clipped=8.0 +2023-02-06 15:39:23,086 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:30,957 INFO [train.py:901] (1/4) Epoch 14, batch 6400, loss[loss=0.2035, simple_loss=0.2752, pruned_loss=0.06594, over 7517.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3004, pruned_loss=0.07305, over 1613458.58 frames. ], batch size: 18, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:39:32,195 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 15:39:35,213 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:40,674 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:05,256 INFO [train.py:901] (1/4) Epoch 14, batch 6450, loss[loss=0.2151, simple_loss=0.2891, pruned_loss=0.07049, over 8195.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2996, pruned_loss=0.07202, over 1613062.21 frames. ], batch size: 23, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:40:26,315 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.418e+02 3.184e+02 3.807e+02 1.482e+03, threshold=6.367e+02, percent-clipped=8.0 +2023-02-06 15:40:39,068 INFO [train.py:901] (1/4) Epoch 14, batch 6500, loss[loss=0.1681, simple_loss=0.2437, pruned_loss=0.04625, over 7688.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2999, pruned_loss=0.07186, over 1613899.63 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:40:44,376 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:55,099 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111601.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:12,089 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:14,602 INFO [train.py:901] (1/4) Epoch 14, batch 6550, loss[loss=0.2445, simple_loss=0.3286, pruned_loss=0.08024, over 8372.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3015, pruned_loss=0.07279, over 1614155.12 frames. ], batch size: 24, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:24,412 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 15:41:35,853 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 2.515e+02 3.055e+02 3.900e+02 7.605e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:41:43,337 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:41:47,960 INFO [train.py:901] (1/4) Epoch 14, batch 6600, loss[loss=0.1904, simple_loss=0.2786, pruned_loss=0.05108, over 8255.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3032, pruned_loss=0.07383, over 1615420.33 frames. ], batch size: 24, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:55,628 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:03,622 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:14,397 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:23,465 INFO [train.py:901] (1/4) Epoch 14, batch 6650, loss[loss=0.2151, simple_loss=0.2926, pruned_loss=0.06878, over 8237.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3024, pruned_loss=0.07338, over 1614866.62 frames. ], batch size: 22, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:42:45,229 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.391e+02 3.105e+02 3.860e+02 7.189e+02, threshold=6.209e+02, percent-clipped=3.0 +2023-02-06 15:42:48,091 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9162, 2.2339, 1.8141, 2.8003, 1.3688, 1.4515, 1.8983, 2.3579], + device='cuda:1'), covar=tensor([0.0815, 0.0932, 0.1022, 0.0389, 0.1253, 0.1636, 0.1068, 0.0880], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0207, 0.0256, 0.0215, 0.0216, 0.0253, 0.0261, 0.0217], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 15:42:49,653 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 15:42:51,483 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0336, 2.4893, 2.8764, 1.4277, 3.0557, 1.7183, 1.4557, 1.8851], + device='cuda:1'), covar=tensor([0.0755, 0.0328, 0.0255, 0.0655, 0.0362, 0.0706, 0.0789, 0.0485], + device='cuda:1'), in_proj_covar=tensor([0.0409, 0.0354, 0.0302, 0.0405, 0.0339, 0.0493, 0.0369, 0.0376], + device='cuda:1'), out_proj_covar=tensor([1.1356e-04, 9.5802e-05, 8.1746e-05, 1.1006e-04, 9.2653e-05, 1.4440e-04, + 1.0266e-04, 1.0282e-04], device='cuda:1') +2023-02-06 15:42:57,304 INFO [train.py:901] (1/4) Epoch 14, batch 6700, loss[loss=0.2359, simple_loss=0.3269, pruned_loss=0.0725, over 8315.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3024, pruned_loss=0.07378, over 1614176.35 frames. ], batch size: 25, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:16,620 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111809.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:43:32,519 INFO [train.py:901] (1/4) Epoch 14, batch 6750, loss[loss=0.2714, simple_loss=0.3478, pruned_loss=0.09749, over 8573.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3013, pruned_loss=0.07311, over 1612045.63 frames. ], batch size: 49, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:32,588 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:43:54,030 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.559e+02 3.020e+02 4.182e+02 1.269e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-06 15:44:02,412 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 15:44:02,884 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 15:44:07,192 INFO [train.py:901] (1/4) Epoch 14, batch 6800, loss[loss=0.2423, simple_loss=0.3228, pruned_loss=0.08084, over 8288.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3016, pruned_loss=0.07328, over 1608309.07 frames. ], batch size: 23, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:40,418 INFO [train.py:901] (1/4) Epoch 14, batch 6850, loss[loss=0.2379, simple_loss=0.2932, pruned_loss=0.09125, over 7723.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3016, pruned_loss=0.07341, over 1609538.65 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:51,192 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 15:44:52,060 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:01,413 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:03,875 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.542e+02 3.126e+02 4.226e+02 8.027e+02, threshold=6.251e+02, percent-clipped=7.0 +2023-02-06 15:45:16,602 INFO [train.py:901] (1/4) Epoch 14, batch 6900, loss[loss=0.2702, simple_loss=0.3239, pruned_loss=0.1083, over 8254.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.302, pruned_loss=0.07337, over 1609867.65 frames. ], batch size: 22, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:18,708 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:50,586 INFO [train.py:901] (1/4) Epoch 14, batch 6950, loss[loss=0.2159, simple_loss=0.2927, pruned_loss=0.06951, over 7801.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3028, pruned_loss=0.07386, over 1612910.89 frames. ], batch size: 20, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:58,672 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 15:46:13,926 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.419e+02 2.987e+02 3.531e+02 6.552e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-06 15:46:25,966 INFO [train.py:901] (1/4) Epoch 14, batch 7000, loss[loss=0.2104, simple_loss=0.2798, pruned_loss=0.07046, over 6897.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3022, pruned_loss=0.07373, over 1604280.54 frames. ], batch size: 15, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:46:28,779 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:46:28,799 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9539, 1.5890, 3.3172, 1.3962, 2.3390, 3.6124, 3.6892, 3.0862], + device='cuda:1'), covar=tensor([0.1036, 0.1541, 0.0324, 0.2058, 0.0935, 0.0226, 0.0429, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0277, 0.0306, 0.0268, 0.0298, 0.0284, 0.0244, 0.0369, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:46:41,425 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8101, 3.8134, 3.4496, 1.8089, 3.3826, 3.4063, 3.4331, 3.1963], + device='cuda:1'), covar=tensor([0.1007, 0.0786, 0.1207, 0.4880, 0.1103, 0.1154, 0.1530, 0.0883], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0397, 0.0403, 0.0500, 0.0396, 0.0400, 0.0390, 0.0346], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:46:59,921 INFO [train.py:901] (1/4) Epoch 14, batch 7050, loss[loss=0.2196, simple_loss=0.2952, pruned_loss=0.07197, over 8445.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.302, pruned_loss=0.07356, over 1608476.19 frames. ], batch size: 27, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:15,926 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112153.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:47:21,802 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.608e+02 3.153e+02 4.211e+02 1.237e+03, threshold=6.307e+02, percent-clipped=12.0 +2023-02-06 15:47:35,266 INFO [train.py:901] (1/4) Epoch 14, batch 7100, loss[loss=0.2515, simple_loss=0.3204, pruned_loss=0.09135, over 8527.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3025, pruned_loss=0.07356, over 1613547.11 frames. ], batch size: 39, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:50,288 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:47:59,152 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 15:48:07,018 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5715, 2.6663, 1.9294, 2.3601, 2.1976, 1.4543, 2.0765, 2.3070], + device='cuda:1'), covar=tensor([0.1496, 0.0426, 0.1000, 0.0588, 0.0735, 0.1528, 0.1012, 0.0916], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0234, 0.0323, 0.0298, 0.0301, 0.0329, 0.0345, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:48:07,693 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:48:10,128 INFO [train.py:901] (1/4) Epoch 14, batch 7150, loss[loss=0.228, simple_loss=0.314, pruned_loss=0.07103, over 8188.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3019, pruned_loss=0.07292, over 1612850.55 frames. ], batch size: 23, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:48:16,272 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3412, 1.6236, 4.5418, 1.7808, 3.9738, 3.7229, 4.0737, 3.9631], + device='cuda:1'), covar=tensor([0.0561, 0.4137, 0.0525, 0.3617, 0.1108, 0.0978, 0.0560, 0.0608], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0599, 0.0625, 0.0566, 0.0638, 0.0550, 0.0542, 0.0603], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 15:48:31,544 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.371e+02 2.859e+02 3.664e+02 7.587e+02, threshold=5.717e+02, percent-clipped=3.0 +2023-02-06 15:48:35,315 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 15:48:35,572 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112268.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:48:43,212 INFO [train.py:901] (1/4) Epoch 14, batch 7200, loss[loss=0.221, simple_loss=0.3023, pruned_loss=0.06979, over 8559.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.301, pruned_loss=0.07242, over 1614574.66 frames. ], batch size: 31, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:18,426 INFO [train.py:901] (1/4) Epoch 14, batch 7250, loss[loss=0.1927, simple_loss=0.261, pruned_loss=0.06222, over 7546.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3013, pruned_loss=0.07258, over 1617509.57 frames. ], batch size: 18, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:39,912 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.708e+02 3.212e+02 3.989e+02 8.387e+02, threshold=6.424e+02, percent-clipped=5.0 +2023-02-06 15:49:52,045 INFO [train.py:901] (1/4) Epoch 14, batch 7300, loss[loss=0.2028, simple_loss=0.2922, pruned_loss=0.05667, over 8460.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3021, pruned_loss=0.07337, over 1620852.39 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:49:54,806 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8267, 1.8413, 2.4489, 1.5775, 1.2155, 2.4534, 0.3290, 1.2474], + device='cuda:1'), covar=tensor([0.2319, 0.1541, 0.0353, 0.1954, 0.3962, 0.0359, 0.3135, 0.1967], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0175, 0.0107, 0.0215, 0.0256, 0.0111, 0.0161, 0.0173], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 15:50:11,924 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:15,499 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 15:50:24,101 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5007, 2.7760, 1.8122, 2.2071, 2.2919, 1.4731, 2.0218, 2.1858], + device='cuda:1'), covar=tensor([0.1502, 0.0340, 0.1148, 0.0653, 0.0719, 0.1541, 0.1032, 0.0900], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0235, 0.0326, 0.0299, 0.0301, 0.0329, 0.0346, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:50:25,903 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 15:50:26,701 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:28,013 INFO [train.py:901] (1/4) Epoch 14, batch 7350, loss[loss=0.2378, simple_loss=0.3115, pruned_loss=0.08202, over 8463.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3022, pruned_loss=0.07372, over 1616688.05 frames. ], batch size: 27, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:35,536 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2112, 1.5032, 4.6307, 2.2456, 2.5690, 5.1649, 5.2920, 4.4820], + device='cuda:1'), covar=tensor([0.1094, 0.1707, 0.0226, 0.1706, 0.0964, 0.0160, 0.0367, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0278, 0.0309, 0.0270, 0.0300, 0.0288, 0.0247, 0.0373, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 15:50:40,030 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 15:50:40,253 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5176, 1.9627, 2.9346, 1.3066, 2.2294, 1.6917, 1.7153, 1.9860], + device='cuda:1'), covar=tensor([0.1830, 0.2241, 0.0846, 0.4164, 0.1644, 0.3161, 0.1945, 0.2316], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0552, 0.0535, 0.0597, 0.0621, 0.0565, 0.0491, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:50:49,907 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.462e+02 2.972e+02 3.682e+02 1.093e+03, threshold=5.943e+02, percent-clipped=5.0 +2023-02-06 15:50:59,946 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 15:51:02,056 INFO [train.py:901] (1/4) Epoch 14, batch 7400, loss[loss=0.2041, simple_loss=0.2963, pruned_loss=0.05589, over 8333.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3019, pruned_loss=0.07322, over 1617069.23 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:10,094 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.74 vs. limit=2.0 +2023-02-06 15:51:32,629 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112524.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:51:37,189 INFO [train.py:901] (1/4) Epoch 14, batch 7450, loss[loss=0.1796, simple_loss=0.2698, pruned_loss=0.04467, over 8137.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3025, pruned_loss=0.0738, over 1609674.94 frames. ], batch size: 22, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:41,785 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 15:51:46,958 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:51:51,013 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112549.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:59,933 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.473e+02 3.112e+02 3.710e+02 6.215e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 15:52:13,223 INFO [train.py:901] (1/4) Epoch 14, batch 7500, loss[loss=0.225, simple_loss=0.3189, pruned_loss=0.0655, over 8637.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3016, pruned_loss=0.07307, over 1604767.60 frames. ], batch size: 34, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:13,407 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:23,802 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:47,712 INFO [train.py:901] (1/4) Epoch 14, batch 7550, loss[loss=0.2224, simple_loss=0.3019, pruned_loss=0.07145, over 8759.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3008, pruned_loss=0.07278, over 1606762.31 frames. ], batch size: 30, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:51,319 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:53:11,225 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.402e+02 2.890e+02 3.643e+02 7.164e+02, threshold=5.781e+02, percent-clipped=3.0 +2023-02-06 15:53:23,631 INFO [train.py:901] (1/4) Epoch 14, batch 7600, loss[loss=0.2237, simple_loss=0.307, pruned_loss=0.07015, over 8024.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3002, pruned_loss=0.0718, over 1610928.73 frames. ], batch size: 22, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:53:40,424 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1414, 1.7937, 2.0203, 1.8518, 1.2325, 1.8327, 2.4931, 2.4800], + device='cuda:1'), covar=tensor([0.0383, 0.1167, 0.1581, 0.1266, 0.0559, 0.1336, 0.0565, 0.0521], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0155, 0.0101, 0.0161, 0.0115, 0.0137], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 15:53:57,110 INFO [train.py:901] (1/4) Epoch 14, batch 7650, loss[loss=0.2072, simple_loss=0.2816, pruned_loss=0.06635, over 7251.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3006, pruned_loss=0.07188, over 1609813.06 frames. ], batch size: 16, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:11,086 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:18,467 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.546e+02 2.941e+02 3.649e+02 7.123e+02, threshold=5.882e+02, percent-clipped=5.0 +2023-02-06 15:54:32,364 INFO [train.py:901] (1/4) Epoch 14, batch 7700, loss[loss=0.1806, simple_loss=0.2747, pruned_loss=0.04327, over 7925.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3, pruned_loss=0.07203, over 1605032.79 frames. ], batch size: 20, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:44,372 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 15:54:45,559 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:52,888 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 15:55:03,122 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:06,903 INFO [train.py:901] (1/4) Epoch 14, batch 7750, loss[loss=0.2297, simple_loss=0.3074, pruned_loss=0.07603, over 8249.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3005, pruned_loss=0.07184, over 1605756.69 frames. ], batch size: 24, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:55:28,266 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.453e+02 3.172e+02 4.245e+02 8.131e+02, threshold=6.343e+02, percent-clipped=10.0 +2023-02-06 15:55:30,962 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:41,030 INFO [train.py:901] (1/4) Epoch 14, batch 7800, loss[loss=0.2581, simple_loss=0.3361, pruned_loss=0.09001, over 8551.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3002, pruned_loss=0.07136, over 1609521.15 frames. ], batch size: 39, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:12,169 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:16,243 INFO [train.py:901] (1/4) Epoch 14, batch 7850, loss[loss=0.2055, simple_loss=0.2861, pruned_loss=0.06246, over 8028.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3012, pruned_loss=0.07219, over 1612038.90 frames. ], batch size: 22, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:22,361 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:22,439 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:29,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5588, 1.9305, 2.1039, 1.2645, 2.1096, 1.4255, 0.5309, 1.8439], + device='cuda:1'), covar=tensor([0.0442, 0.0260, 0.0212, 0.0411, 0.0317, 0.0710, 0.0669, 0.0219], + device='cuda:1'), in_proj_covar=tensor([0.0415, 0.0355, 0.0302, 0.0407, 0.0342, 0.0495, 0.0368, 0.0380], + device='cuda:1'), out_proj_covar=tensor([1.1508e-04, 9.5920e-05, 8.1591e-05, 1.1046e-04, 9.3161e-05, 1.4483e-04, + 1.0230e-04, 1.0379e-04], device='cuda:1') +2023-02-06 15:56:37,784 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.504e+02 3.067e+02 3.726e+02 7.698e+02, threshold=6.135e+02, percent-clipped=2.0 +2023-02-06 15:56:49,065 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:49,649 INFO [train.py:901] (1/4) Epoch 14, batch 7900, loss[loss=0.232, simple_loss=0.316, pruned_loss=0.07396, over 8322.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3017, pruned_loss=0.07217, over 1615074.87 frames. ], batch size: 25, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:22,239 INFO [train.py:901] (1/4) Epoch 14, batch 7950, loss[loss=0.2264, simple_loss=0.3166, pruned_loss=0.06806, over 8351.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3023, pruned_loss=0.07241, over 1621247.29 frames. ], batch size: 24, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:28,310 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:38,045 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:43,246 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.654e+02 3.191e+02 4.041e+02 1.304e+03, threshold=6.382e+02, percent-clipped=5.0 +2023-02-06 15:57:55,254 INFO [train.py:901] (1/4) Epoch 14, batch 8000, loss[loss=0.227, simple_loss=0.3028, pruned_loss=0.07561, over 7930.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3029, pruned_loss=0.0732, over 1623149.19 frames. ], batch size: 20, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:58:04,698 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:23,452 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:28,564 INFO [train.py:901] (1/4) Epoch 14, batch 8050, loss[loss=0.2104, simple_loss=0.2773, pruned_loss=0.07174, over 7263.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3013, pruned_loss=0.07343, over 1594109.26 frames. ], batch size: 16, lr: 5.36e-03, grad_scale: 16.0 +2023-02-06 15:58:38,807 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7565, 2.2651, 4.5144, 1.4450, 3.3167, 2.3262, 1.9481, 3.1380], + device='cuda:1'), covar=tensor([0.1774, 0.2389, 0.0751, 0.4377, 0.1523, 0.2914, 0.2057, 0.2134], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0554, 0.0538, 0.0606, 0.0626, 0.0569, 0.0495, 0.0625], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 15:58:40,125 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:49,778 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.323e+02 2.856e+02 3.288e+02 8.076e+02, threshold=5.712e+02, percent-clipped=1.0 +2023-02-06 15:59:01,666 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 15:59:06,246 INFO [train.py:901] (1/4) Epoch 15, batch 0, loss[loss=0.242, simple_loss=0.3087, pruned_loss=0.08762, over 8241.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3087, pruned_loss=0.08762, over 8241.00 frames. ], batch size: 22, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 15:59:06,246 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 15:59:17,269 INFO [train.py:935] (1/4) Epoch 15, validation: loss=0.1825, simple_loss=0.283, pruned_loss=0.04098, over 944034.00 frames. +2023-02-06 15:59:17,270 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 15:59:32,304 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 15:59:48,307 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.54 vs. limit=2.0 +2023-02-06 15:59:49,085 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 15:59:51,456 INFO [train.py:901] (1/4) Epoch 15, batch 50, loss[loss=0.225, simple_loss=0.3089, pruned_loss=0.07053, over 8244.00 frames. ], tot_loss[loss=0.22, simple_loss=0.3019, pruned_loss=0.06903, over 370578.86 frames. ], batch size: 22, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:08,699 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 16:00:21,171 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:27,789 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.549e+02 3.077e+02 3.582e+02 9.445e+02, threshold=6.153e+02, percent-clipped=5.0 +2023-02-06 16:00:28,497 INFO [train.py:901] (1/4) Epoch 15, batch 100, loss[loss=0.2043, simple_loss=0.273, pruned_loss=0.0678, over 7556.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3031, pruned_loss=0.0729, over 647668.76 frames. ], batch size: 18, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:29,918 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 16:00:39,303 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6265, 5.6566, 5.0115, 2.1460, 5.0179, 5.4562, 5.2898, 5.2610], + device='cuda:1'), covar=tensor([0.0583, 0.0416, 0.1034, 0.5160, 0.0827, 0.0785, 0.1057, 0.0553], + device='cuda:1'), in_proj_covar=tensor([0.0484, 0.0404, 0.0403, 0.0503, 0.0402, 0.0400, 0.0393, 0.0352], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:00:41,966 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:50,249 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:00,334 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:02,086 INFO [train.py:901] (1/4) Epoch 15, batch 150, loss[loss=0.2302, simple_loss=0.3016, pruned_loss=0.07936, over 8084.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3022, pruned_loss=0.07236, over 865066.56 frames. ], batch size: 21, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:06,868 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:17,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:28,778 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:32,820 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4890, 1.6681, 1.8570, 1.0889, 1.9758, 1.3095, 0.5509, 1.6965], + device='cuda:1'), covar=tensor([0.0435, 0.0266, 0.0212, 0.0404, 0.0261, 0.0653, 0.0627, 0.0194], + device='cuda:1'), in_proj_covar=tensor([0.0416, 0.0358, 0.0304, 0.0408, 0.0343, 0.0497, 0.0370, 0.0380], + device='cuda:1'), out_proj_covar=tensor([1.1544e-04, 9.6722e-05, 8.2157e-05, 1.1080e-04, 9.3424e-05, 1.4533e-04, + 1.0253e-04, 1.0406e-04], device='cuda:1') +2023-02-06 16:01:37,328 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.511e+02 3.032e+02 4.146e+02 1.005e+03, threshold=6.064e+02, percent-clipped=3.0 +2023-02-06 16:01:38,024 INFO [train.py:901] (1/4) Epoch 15, batch 200, loss[loss=0.2311, simple_loss=0.3106, pruned_loss=0.07579, over 8351.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.303, pruned_loss=0.07292, over 1033025.01 frames. ], batch size: 26, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:46,249 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:01,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:11,079 INFO [train.py:901] (1/4) Epoch 15, batch 250, loss[loss=0.2231, simple_loss=0.2959, pruned_loss=0.07513, over 7979.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3014, pruned_loss=0.07302, over 1160634.87 frames. ], batch size: 21, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:02:19,376 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 16:02:28,582 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 16:02:43,773 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.666e+02 3.062e+02 4.026e+02 8.735e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 16:02:44,420 INFO [train.py:901] (1/4) Epoch 15, batch 300, loss[loss=0.2653, simple_loss=0.3425, pruned_loss=0.094, over 8509.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3014, pruned_loss=0.0736, over 1258210.92 frames. ], batch size: 28, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:15,220 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 16:03:19,315 INFO [train.py:901] (1/4) Epoch 15, batch 350, loss[loss=0.1978, simple_loss=0.286, pruned_loss=0.05486, over 8474.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3024, pruned_loss=0.07353, over 1336931.70 frames. ], batch size: 25, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:52,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.415e+02 3.115e+02 3.728e+02 6.919e+02, threshold=6.229e+02, percent-clipped=2.0 +2023-02-06 16:03:52,738 INFO [train.py:901] (1/4) Epoch 15, batch 400, loss[loss=0.1882, simple_loss=0.2699, pruned_loss=0.05324, over 8090.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3023, pruned_loss=0.07363, over 1399321.81 frames. ], batch size: 21, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:17,470 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:28,817 INFO [train.py:901] (1/4) Epoch 15, batch 450, loss[loss=0.2138, simple_loss=0.2974, pruned_loss=0.06505, over 8516.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3008, pruned_loss=0.07218, over 1451830.91 frames. ], batch size: 26, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:30,184 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1542, 1.5032, 1.7044, 1.3576, 0.8740, 1.5017, 1.7574, 1.6388], + device='cuda:1'), covar=tensor([0.0465, 0.1159, 0.1645, 0.1358, 0.0626, 0.1431, 0.0666, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0155, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 16:04:43,277 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:56,096 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113654.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:01,032 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.616e+02 3.268e+02 4.141e+02 9.119e+02, threshold=6.536e+02, percent-clipped=2.0 +2023-02-06 16:05:01,751 INFO [train.py:901] (1/4) Epoch 15, batch 500, loss[loss=0.2001, simple_loss=0.2774, pruned_loss=0.06144, over 7534.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3001, pruned_loss=0.0713, over 1489879.07 frames. ], batch size: 18, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:05:02,554 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:12,315 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:35,423 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113711.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:36,632 INFO [train.py:901] (1/4) Epoch 15, batch 550, loss[loss=0.2007, simple_loss=0.2711, pruned_loss=0.06516, over 7686.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3013, pruned_loss=0.07242, over 1514816.94 frames. ], batch size: 18, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:09,825 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.516e+02 3.119e+02 4.209e+02 9.524e+02, threshold=6.239e+02, percent-clipped=4.0 +2023-02-06 16:06:10,535 INFO [train.py:901] (1/4) Epoch 15, batch 600, loss[loss=0.2081, simple_loss=0.2979, pruned_loss=0.05918, over 8293.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3017, pruned_loss=0.07244, over 1536157.71 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:17,859 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0010, 1.6059, 3.3661, 1.4181, 2.4345, 3.6289, 3.6683, 3.1768], + device='cuda:1'), covar=tensor([0.0996, 0.1525, 0.0299, 0.1970, 0.0877, 0.0229, 0.0470, 0.0524], + device='cuda:1'), in_proj_covar=tensor([0.0278, 0.0308, 0.0273, 0.0301, 0.0288, 0.0247, 0.0377, 0.0299], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:06:24,211 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 16:06:44,318 INFO [train.py:901] (1/4) Epoch 15, batch 650, loss[loss=0.2155, simple_loss=0.2906, pruned_loss=0.07021, over 8568.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3017, pruned_loss=0.07253, over 1554845.79 frames. ], batch size: 31, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:07:01,571 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 16:07:19,223 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.270e+02 2.767e+02 3.649e+02 9.673e+02, threshold=5.535e+02, percent-clipped=4.0 +2023-02-06 16:07:19,890 INFO [train.py:901] (1/4) Epoch 15, batch 700, loss[loss=0.2371, simple_loss=0.3226, pruned_loss=0.07582, over 8331.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3006, pruned_loss=0.07157, over 1571460.62 frames. ], batch size: 25, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:07:28,851 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4068, 2.0180, 2.8468, 2.2688, 2.6332, 2.2537, 1.8983, 1.4390], + device='cuda:1'), covar=tensor([0.4414, 0.4363, 0.1524, 0.3072, 0.2262, 0.2570, 0.1902, 0.4698], + device='cuda:1'), in_proj_covar=tensor([0.0898, 0.0911, 0.0750, 0.0880, 0.0949, 0.0837, 0.0719, 0.0790], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:07:53,445 INFO [train.py:901] (1/4) Epoch 15, batch 750, loss[loss=0.212, simple_loss=0.2934, pruned_loss=0.06525, over 8457.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2995, pruned_loss=0.07166, over 1578964.21 frames. ], batch size: 25, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:11,154 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 16:08:20,445 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 16:08:28,023 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6319, 1.8805, 1.5790, 2.1866, 1.0562, 1.4324, 1.7509, 1.8916], + device='cuda:1'), covar=tensor([0.0813, 0.0664, 0.0969, 0.0508, 0.1117, 0.1367, 0.0714, 0.0699], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0203, 0.0250, 0.0211, 0.0211, 0.0248, 0.0252, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 16:08:29,183 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 2.237e+02 2.791e+02 3.511e+02 6.350e+02, threshold=5.582e+02, percent-clipped=4.0 +2023-02-06 16:08:29,879 INFO [train.py:901] (1/4) Epoch 15, batch 800, loss[loss=0.221, simple_loss=0.3066, pruned_loss=0.06773, over 8105.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2984, pruned_loss=0.07099, over 1586145.65 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:32,837 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:40,773 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:50,090 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:01,972 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:05,305 INFO [train.py:901] (1/4) Epoch 15, batch 850, loss[loss=0.2063, simple_loss=0.2896, pruned_loss=0.06146, over 7978.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2982, pruned_loss=0.07045, over 1595477.96 frames. ], batch size: 21, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:12,127 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9658, 2.4542, 2.8273, 1.3863, 2.9638, 1.7139, 1.5239, 1.9479], + device='cuda:1'), covar=tensor([0.0773, 0.0308, 0.0206, 0.0655, 0.0300, 0.0732, 0.0792, 0.0472], + device='cuda:1'), in_proj_covar=tensor([0.0419, 0.0359, 0.0305, 0.0411, 0.0342, 0.0499, 0.0370, 0.0381], + device='cuda:1'), out_proj_covar=tensor([1.1633e-04, 9.6711e-05, 8.2020e-05, 1.1161e-04, 9.3094e-05, 1.4561e-04, + 1.0267e-04, 1.0432e-04], device='cuda:1') +2023-02-06 16:09:32,153 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0451, 1.4466, 1.6421, 1.3169, 0.9938, 1.4297, 1.6479, 1.3505], + device='cuda:1'), covar=tensor([0.0475, 0.1297, 0.1707, 0.1417, 0.0638, 0.1557, 0.0710, 0.0697], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0156, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 16:09:39,407 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.394e+02 2.826e+02 3.443e+02 6.296e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-06 16:09:40,794 INFO [train.py:901] (1/4) Epoch 15, batch 900, loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07191, over 8348.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2991, pruned_loss=0.07089, over 1603354.98 frames. ], batch size: 26, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:45,805 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 16:09:54,020 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3649, 1.9548, 2.8720, 2.2629, 2.7709, 2.1792, 1.8922, 1.4878], + device='cuda:1'), covar=tensor([0.4566, 0.4738, 0.1399, 0.2996, 0.2132, 0.2701, 0.1834, 0.4795], + device='cuda:1'), in_proj_covar=tensor([0.0890, 0.0903, 0.0742, 0.0872, 0.0938, 0.0828, 0.0711, 0.0781], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:09:58,192 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.67 vs. limit=2.0 +2023-02-06 16:10:02,633 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:15,168 INFO [train.py:901] (1/4) Epoch 15, batch 950, loss[loss=0.1903, simple_loss=0.2761, pruned_loss=0.05227, over 8138.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2997, pruned_loss=0.0708, over 1608459.20 frames. ], batch size: 22, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:21,910 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:36,822 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:38,929 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4896, 1.4268, 1.7804, 1.3384, 1.0745, 1.8296, 0.1353, 1.0346], + device='cuda:1'), covar=tensor([0.2210, 0.1458, 0.0524, 0.1299, 0.3418, 0.0458, 0.2946, 0.1902], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0179, 0.0111, 0.0217, 0.0259, 0.0114, 0.0165, 0.0175], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 16:10:39,443 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 16:10:49,210 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.449e+02 2.913e+02 3.851e+02 8.356e+02, threshold=5.826e+02, percent-clipped=3.0 +2023-02-06 16:10:49,928 INFO [train.py:901] (1/4) Epoch 15, batch 1000, loss[loss=0.2376, simple_loss=0.3161, pruned_loss=0.07954, over 8099.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3008, pruned_loss=0.07099, over 1612251.81 frames. ], batch size: 23, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:04,286 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4978, 2.7923, 2.0121, 2.2121, 2.2400, 1.5524, 2.0730, 2.1477], + device='cuda:1'), covar=tensor([0.1527, 0.0328, 0.1054, 0.0671, 0.0701, 0.1400, 0.1010, 0.0922], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0233, 0.0326, 0.0299, 0.0303, 0.0327, 0.0343, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:11:14,202 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 16:11:25,584 INFO [train.py:901] (1/4) Epoch 15, batch 1050, loss[loss=0.2232, simple_loss=0.2935, pruned_loss=0.07646, over 7802.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3001, pruned_loss=0.07096, over 1607845.76 frames. ], batch size: 19, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:25,599 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 16:11:57,612 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.504e+02 3.058e+02 3.938e+02 1.189e+03, threshold=6.116e+02, percent-clipped=4.0 +2023-02-06 16:11:58,324 INFO [train.py:901] (1/4) Epoch 15, batch 1100, loss[loss=0.2019, simple_loss=0.2884, pruned_loss=0.05771, over 8474.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3004, pruned_loss=0.07149, over 1611592.46 frames. ], batch size: 49, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:11,164 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6747, 2.0378, 3.2687, 1.4196, 2.5451, 1.8873, 1.9150, 2.1881], + device='cuda:1'), covar=tensor([0.1856, 0.2497, 0.0890, 0.4672, 0.1703, 0.3380, 0.2091, 0.2506], + device='cuda:1'), in_proj_covar=tensor([0.0499, 0.0552, 0.0540, 0.0604, 0.0624, 0.0568, 0.0496, 0.0621], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:12:33,901 INFO [train.py:901] (1/4) Epoch 15, batch 1150, loss[loss=0.1872, simple_loss=0.2676, pruned_loss=0.05344, over 7537.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2976, pruned_loss=0.06997, over 1608966.77 frames. ], batch size: 18, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:38,620 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 16:12:59,528 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:07,347 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.463e+02 3.139e+02 3.955e+02 6.139e+02, threshold=6.277e+02, percent-clipped=1.0 +2023-02-06 16:13:07,972 INFO [train.py:901] (1/4) Epoch 15, batch 1200, loss[loss=0.191, simple_loss=0.2715, pruned_loss=0.05527, over 7539.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2974, pruned_loss=0.06971, over 1614360.34 frames. ], batch size: 18, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:13:16,131 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:18,735 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:30,065 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7160, 5.7888, 5.0654, 2.5543, 5.1288, 5.5381, 5.4434, 5.2599], + device='cuda:1'), covar=tensor([0.0542, 0.0376, 0.0892, 0.4060, 0.0641, 0.0706, 0.0898, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0480, 0.0398, 0.0397, 0.0495, 0.0395, 0.0398, 0.0382, 0.0348], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:13:36,376 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114404.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:42,792 INFO [train.py:901] (1/4) Epoch 15, batch 1250, loss[loss=0.2588, simple_loss=0.3189, pruned_loss=0.09935, over 7653.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2984, pruned_loss=0.07019, over 1615606.86 frames. ], batch size: 19, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:14:16,862 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.591e+02 3.148e+02 4.129e+02 1.085e+03, threshold=6.295e+02, percent-clipped=6.0 +2023-02-06 16:14:17,475 INFO [train.py:901] (1/4) Epoch 15, batch 1300, loss[loss=0.2376, simple_loss=0.3205, pruned_loss=0.07732, over 8187.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2977, pruned_loss=0.07009, over 1613052.71 frames. ], batch size: 23, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:14:35,224 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114489.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:14:51,246 INFO [train.py:901] (1/4) Epoch 15, batch 1350, loss[loss=0.1976, simple_loss=0.2654, pruned_loss=0.0649, over 7811.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.298, pruned_loss=0.0705, over 1614298.19 frames. ], batch size: 20, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:26,462 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.434e+02 2.903e+02 3.628e+02 5.826e+02, threshold=5.807e+02, percent-clipped=0.0 +2023-02-06 16:15:27,128 INFO [train.py:901] (1/4) Epoch 15, batch 1400, loss[loss=0.2261, simple_loss=0.3079, pruned_loss=0.07212, over 8513.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2988, pruned_loss=0.07124, over 1613204.70 frames. ], batch size: 26, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:54,552 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:16:00,687 INFO [train.py:901] (1/4) Epoch 15, batch 1450, loss[loss=0.1991, simple_loss=0.2836, pruned_loss=0.05723, over 8125.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2994, pruned_loss=0.07109, over 1613622.07 frames. ], batch size: 22, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:08,827 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 16:16:36,178 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.414e+02 3.068e+02 3.744e+02 6.619e+02, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 16:16:36,890 INFO [train.py:901] (1/4) Epoch 15, batch 1500, loss[loss=0.2198, simple_loss=0.2991, pruned_loss=0.07027, over 7920.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3005, pruned_loss=0.07155, over 1621004.65 frames. ], batch size: 20, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:11,514 INFO [train.py:901] (1/4) Epoch 15, batch 1550, loss[loss=0.2253, simple_loss=0.3095, pruned_loss=0.07056, over 8468.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3001, pruned_loss=0.07123, over 1625087.15 frames. ], batch size: 28, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:15,984 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-06 16:17:20,035 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4637, 2.2929, 4.1311, 1.3632, 2.8628, 2.0425, 1.7931, 2.4813], + device='cuda:1'), covar=tensor([0.2214, 0.2865, 0.0931, 0.4944, 0.1925, 0.3579, 0.2370, 0.3160], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0551, 0.0539, 0.0602, 0.0623, 0.0566, 0.0494, 0.0624], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:17:26,187 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:17:45,705 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.278e+02 2.828e+02 3.736e+02 6.971e+02, threshold=5.655e+02, percent-clipped=1.0 +2023-02-06 16:17:46,444 INFO [train.py:901] (1/4) Epoch 15, batch 1600, loss[loss=0.2342, simple_loss=0.3091, pruned_loss=0.07969, over 8652.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2998, pruned_loss=0.0707, over 1627564.78 frames. ], batch size: 34, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:47,284 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6791, 2.2058, 3.1009, 1.9265, 1.7021, 3.1910, 0.7720, 2.0945], + device='cuda:1'), covar=tensor([0.1987, 0.1506, 0.0343, 0.2316, 0.3360, 0.0399, 0.2859, 0.1812], + device='cuda:1'), in_proj_covar=tensor([0.0173, 0.0178, 0.0110, 0.0217, 0.0260, 0.0114, 0.0164, 0.0175], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 16:18:22,445 INFO [train.py:901] (1/4) Epoch 15, batch 1650, loss[loss=0.2261, simple_loss=0.3059, pruned_loss=0.07321, over 8516.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2994, pruned_loss=0.07044, over 1627143.82 frames. ], batch size: 28, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:55,128 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:18:56,281 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 2.429e+02 2.845e+02 3.384e+02 6.803e+02, threshold=5.691e+02, percent-clipped=1.0 +2023-02-06 16:18:56,976 INFO [train.py:901] (1/4) Epoch 15, batch 1700, loss[loss=0.2213, simple_loss=0.2951, pruned_loss=0.07372, over 8243.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2986, pruned_loss=0.07007, over 1624323.33 frames. ], batch size: 24, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:19:12,814 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:16,114 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:32,924 INFO [train.py:901] (1/4) Epoch 15, batch 1750, loss[loss=0.2353, simple_loss=0.3114, pruned_loss=0.07965, over 8479.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2999, pruned_loss=0.07063, over 1623335.42 frames. ], batch size: 25, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:19:45,240 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8316, 1.2816, 3.9685, 1.4379, 3.5088, 3.2491, 3.5630, 3.4450], + device='cuda:1'), covar=tensor([0.0633, 0.4492, 0.0610, 0.3806, 0.1181, 0.0969, 0.0647, 0.0739], + device='cuda:1'), in_proj_covar=tensor([0.0553, 0.0611, 0.0639, 0.0581, 0.0653, 0.0560, 0.0553, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:20:06,960 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.435e+02 3.025e+02 3.758e+02 7.531e+02, threshold=6.050e+02, percent-clipped=3.0 +2023-02-06 16:20:07,576 INFO [train.py:901] (1/4) Epoch 15, batch 1800, loss[loss=0.2751, simple_loss=0.3473, pruned_loss=0.1014, over 8455.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3014, pruned_loss=0.07115, over 1627171.72 frames. ], batch size: 27, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:43,789 INFO [train.py:901] (1/4) Epoch 15, batch 1850, loss[loss=0.1913, simple_loss=0.2683, pruned_loss=0.05717, over 6762.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3014, pruned_loss=0.07145, over 1623625.64 frames. ], batch size: 15, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:17,803 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.659e+02 3.189e+02 4.139e+02 1.250e+03, threshold=6.379e+02, percent-clipped=4.0 +2023-02-06 16:21:18,508 INFO [train.py:901] (1/4) Epoch 15, batch 1900, loss[loss=0.2021, simple_loss=0.2767, pruned_loss=0.06376, over 7777.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2995, pruned_loss=0.07049, over 1620620.54 frames. ], batch size: 19, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:28,839 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:46,754 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:50,093 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 16:21:53,560 INFO [train.py:901] (1/4) Epoch 15, batch 1950, loss[loss=0.1864, simple_loss=0.27, pruned_loss=0.05138, over 7926.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2984, pruned_loss=0.07023, over 1615500.70 frames. ], batch size: 20, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:04,500 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 16:22:11,847 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0646, 1.8186, 3.3986, 1.4365, 2.2921, 3.7105, 4.0059, 2.9646], + device='cuda:1'), covar=tensor([0.1134, 0.1706, 0.0382, 0.2268, 0.1200, 0.0296, 0.0456, 0.0759], + device='cuda:1'), in_proj_covar=tensor([0.0277, 0.0305, 0.0270, 0.0297, 0.0285, 0.0245, 0.0373, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:22:23,206 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 16:22:28,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.421e+02 3.112e+02 3.916e+02 6.433e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 16:22:29,136 INFO [train.py:901] (1/4) Epoch 15, batch 2000, loss[loss=0.2453, simple_loss=0.3225, pruned_loss=0.08407, over 8462.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2983, pruned_loss=0.07016, over 1613800.83 frames. ], batch size: 27, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:47,916 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 16:22:49,753 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:22:54,588 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3599, 2.4275, 1.7377, 1.9310, 2.0222, 1.4698, 1.8332, 1.8476], + device='cuda:1'), covar=tensor([0.1398, 0.0346, 0.1212, 0.0615, 0.0641, 0.1510, 0.0928, 0.0894], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0229, 0.0324, 0.0302, 0.0300, 0.0330, 0.0345, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:22:58,872 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9524, 2.0684, 1.8498, 2.5282, 1.2012, 1.5635, 1.9340, 2.1173], + device='cuda:1'), covar=tensor([0.0707, 0.0809, 0.0890, 0.0423, 0.1123, 0.1193, 0.0769, 0.0758], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0203, 0.0248, 0.0211, 0.0211, 0.0247, 0.0253, 0.0212], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 16:23:03,503 INFO [train.py:901] (1/4) Epoch 15, batch 2050, loss[loss=0.2316, simple_loss=0.3034, pruned_loss=0.07989, over 8472.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2976, pruned_loss=0.07042, over 1612839.25 frames. ], batch size: 25, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:23:18,039 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115233.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:23:36,042 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 16:23:39,482 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.382e+02 2.963e+02 3.753e+02 6.860e+02, threshold=5.925e+02, percent-clipped=2.0 +2023-02-06 16:23:39,503 INFO [train.py:901] (1/4) Epoch 15, batch 2100, loss[loss=0.2292, simple_loss=0.3169, pruned_loss=0.07076, over 8201.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2982, pruned_loss=0.07036, over 1613505.39 frames. ], batch size: 23, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:05,867 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9377, 1.3217, 6.0141, 2.2670, 5.4822, 5.0296, 5.5460, 5.3386], + device='cuda:1'), covar=tensor([0.0400, 0.4800, 0.0332, 0.3263, 0.0823, 0.0804, 0.0431, 0.0478], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0598, 0.0628, 0.0568, 0.0643, 0.0552, 0.0542, 0.0609], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:24:13,859 INFO [train.py:901] (1/4) Epoch 15, batch 2150, loss[loss=0.2212, simple_loss=0.3091, pruned_loss=0.06664, over 8564.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2996, pruned_loss=0.07079, over 1618845.37 frames. ], batch size: 31, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:29,067 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5333, 1.9504, 2.0533, 1.1352, 2.1329, 1.4184, 0.5285, 1.8458], + device='cuda:1'), covar=tensor([0.0515, 0.0260, 0.0224, 0.0483, 0.0322, 0.0772, 0.0761, 0.0237], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0365, 0.0312, 0.0420, 0.0349, 0.0507, 0.0380, 0.0392], + device='cuda:1'), out_proj_covar=tensor([1.1842e-04, 9.8345e-05, 8.3735e-05, 1.1414e-04, 9.4909e-05, 1.4778e-04, + 1.0527e-04, 1.0719e-04], device='cuda:1') +2023-02-06 16:24:37,891 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:24:49,119 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.532e+02 3.093e+02 4.065e+02 1.254e+03, threshold=6.185e+02, percent-clipped=7.0 +2023-02-06 16:24:49,139 INFO [train.py:901] (1/4) Epoch 15, batch 2200, loss[loss=0.2079, simple_loss=0.2804, pruned_loss=0.06772, over 8247.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2987, pruned_loss=0.07047, over 1620323.83 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 16.0 +2023-02-06 16:25:07,044 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:07,722 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:12,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0742, 1.7247, 3.0621, 1.4977, 2.2799, 3.2191, 3.3445, 2.7748], + device='cuda:1'), covar=tensor([0.0897, 0.1465, 0.0301, 0.1887, 0.0896, 0.0281, 0.0504, 0.0610], + device='cuda:1'), in_proj_covar=tensor([0.0278, 0.0309, 0.0272, 0.0300, 0.0287, 0.0248, 0.0378, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:25:24,252 INFO [train.py:901] (1/4) Epoch 15, batch 2250, loss[loss=0.169, simple_loss=0.2434, pruned_loss=0.04727, over 7440.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2983, pruned_loss=0.07072, over 1614673.83 frames. ], batch size: 17, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:48,107 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:48,945 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:58,281 INFO [train.py:901] (1/4) Epoch 15, batch 2300, loss[loss=0.2162, simple_loss=0.2924, pruned_loss=0.07001, over 8244.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2988, pruned_loss=0.07093, over 1608338.88 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:58,960 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 2.502e+02 3.175e+02 3.927e+02 9.067e+02, threshold=6.350e+02, percent-clipped=5.0 +2023-02-06 16:26:04,775 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6048, 4.5951, 4.1090, 1.9978, 4.0804, 4.1663, 4.1431, 3.9231], + device='cuda:1'), covar=tensor([0.0714, 0.0513, 0.1086, 0.4617, 0.0907, 0.0998, 0.1190, 0.0823], + device='cuda:1'), in_proj_covar=tensor([0.0491, 0.0405, 0.0407, 0.0506, 0.0406, 0.0405, 0.0392, 0.0353], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:26:07,554 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:26:17,336 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6364, 1.4284, 1.6281, 1.2349, 0.8363, 1.3944, 1.4281, 1.2553], + device='cuda:1'), covar=tensor([0.0511, 0.1196, 0.1653, 0.1411, 0.0595, 0.1452, 0.0722, 0.0662], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0156, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 16:26:27,609 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6232, 2.0141, 3.2150, 1.3088, 2.2585, 2.0771, 1.6593, 2.2310], + device='cuda:1'), covar=tensor([0.1785, 0.2441, 0.0700, 0.4190, 0.1947, 0.2836, 0.2022, 0.2312], + device='cuda:1'), in_proj_covar=tensor([0.0495, 0.0548, 0.0539, 0.0601, 0.0621, 0.0564, 0.0493, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:26:34,675 INFO [train.py:901] (1/4) Epoch 15, batch 2350, loss[loss=0.1981, simple_loss=0.2765, pruned_loss=0.05987, over 7544.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.299, pruned_loss=0.07085, over 1610664.95 frames. ], batch size: 18, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:09,321 INFO [train.py:901] (1/4) Epoch 15, batch 2400, loss[loss=0.2094, simple_loss=0.3053, pruned_loss=0.05677, over 8504.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2988, pruned_loss=0.07117, over 1608831.26 frames. ], batch size: 26, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:09,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:10,002 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.542e+02 3.047e+02 3.524e+02 9.073e+02, threshold=6.095e+02, percent-clipped=1.0 +2023-02-06 16:27:39,713 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:45,574 INFO [train.py:901] (1/4) Epoch 15, batch 2450, loss[loss=0.2072, simple_loss=0.2923, pruned_loss=0.06108, over 8512.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2986, pruned_loss=0.07022, over 1612547.83 frames. ], batch size: 28, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:56,554 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:28:19,908 INFO [train.py:901] (1/4) Epoch 15, batch 2500, loss[loss=0.261, simple_loss=0.3258, pruned_loss=0.09816, over 8425.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.297, pruned_loss=0.06988, over 1611588.49 frames. ], batch size: 27, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:28:20,562 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.367e+02 2.686e+02 3.697e+02 9.165e+02, threshold=5.372e+02, percent-clipped=5.0 +2023-02-06 16:28:40,516 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9915, 1.4503, 1.5909, 1.2447, 0.8508, 1.3997, 1.7072, 1.7057], + device='cuda:1'), covar=tensor([0.0509, 0.1329, 0.1798, 0.1503, 0.0629, 0.1586, 0.0728, 0.0603], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 16:28:55,237 INFO [train.py:901] (1/4) Epoch 15, batch 2550, loss[loss=0.1884, simple_loss=0.2573, pruned_loss=0.0598, over 7710.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2978, pruned_loss=0.07035, over 1610836.43 frames. ], batch size: 18, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:05,128 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 16:29:08,941 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115732.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:09,605 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:14,879 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7055, 1.7552, 2.2459, 1.5860, 1.0262, 2.2507, 0.2753, 1.3377], + device='cuda:1'), covar=tensor([0.2175, 0.1431, 0.0410, 0.1707, 0.4068, 0.0400, 0.3052, 0.1806], + device='cuda:1'), in_proj_covar=tensor([0.0171, 0.0176, 0.0110, 0.0214, 0.0258, 0.0115, 0.0163, 0.0174], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 16:29:30,403 INFO [train.py:901] (1/4) Epoch 15, batch 2600, loss[loss=0.203, simple_loss=0.2858, pruned_loss=0.06017, over 8296.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.298, pruned_loss=0.07052, over 1611858.31 frames. ], batch size: 23, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:31,074 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.427e+02 3.148e+02 3.839e+02 8.607e+02, threshold=6.296e+02, percent-clipped=3.0 +2023-02-06 16:30:04,163 INFO [train.py:901] (1/4) Epoch 15, batch 2650, loss[loss=0.1856, simple_loss=0.2612, pruned_loss=0.05498, over 7441.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2984, pruned_loss=0.07092, over 1607568.21 frames. ], batch size: 17, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:08,492 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:27,410 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:29,376 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115847.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:30,057 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:39,733 INFO [train.py:901] (1/4) Epoch 15, batch 2700, loss[loss=0.2582, simple_loss=0.3222, pruned_loss=0.09711, over 5559.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2978, pruned_loss=0.07032, over 1605496.91 frames. ], batch size: 12, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:40,394 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.337e+02 2.718e+02 3.606e+02 6.832e+02, threshold=5.436e+02, percent-clipped=3.0 +2023-02-06 16:31:12,012 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115910.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:31:13,854 INFO [train.py:901] (1/4) Epoch 15, batch 2750, loss[loss=0.2008, simple_loss=0.2869, pruned_loss=0.05736, over 8715.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2979, pruned_loss=0.06998, over 1611874.31 frames. ], batch size: 39, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:49,502 INFO [train.py:901] (1/4) Epoch 15, batch 2800, loss[loss=0.2128, simple_loss=0.283, pruned_loss=0.07133, over 8222.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2985, pruned_loss=0.07083, over 1606863.70 frames. ], batch size: 22, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:50,149 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.517e+02 2.986e+02 3.677e+02 9.071e+02, threshold=5.972e+02, percent-clipped=5.0 +2023-02-06 16:31:53,143 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5805, 2.0173, 3.3448, 1.4000, 2.3209, 2.0504, 1.5931, 2.4133], + device='cuda:1'), covar=tensor([0.1871, 0.2306, 0.0833, 0.4204, 0.1831, 0.2894, 0.2161, 0.2106], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0551, 0.0541, 0.0601, 0.0624, 0.0567, 0.0496, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:32:16,425 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.05 vs. limit=5.0 +2023-02-06 16:32:24,935 INFO [train.py:901] (1/4) Epoch 15, batch 2850, loss[loss=0.2492, simple_loss=0.3322, pruned_loss=0.08312, over 8733.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2989, pruned_loss=0.07098, over 1609873.23 frames. ], batch size: 30, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:32:38,104 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:00,842 INFO [train.py:901] (1/4) Epoch 15, batch 2900, loss[loss=0.183, simple_loss=0.2632, pruned_loss=0.05141, over 7523.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2985, pruned_loss=0.07099, over 1609541.56 frames. ], batch size: 18, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:01,416 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.452e+02 2.959e+02 3.782e+02 6.842e+02, threshold=5.917e+02, percent-clipped=3.0 +2023-02-06 16:33:29,123 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:29,801 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:35,325 INFO [train.py:901] (1/4) Epoch 15, batch 2950, loss[loss=0.1967, simple_loss=0.2645, pruned_loss=0.06442, over 7441.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2987, pruned_loss=0.07082, over 1612852.30 frames. ], batch size: 17, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:36,699 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 16:33:42,121 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:44,354 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-06 16:33:45,593 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:46,297 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:49,699 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:34:01,862 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1455, 4.0346, 2.5531, 2.7167, 2.8525, 2.0448, 2.6072, 2.9566], + device='cuda:1'), covar=tensor([0.1766, 0.0343, 0.0983, 0.0850, 0.0765, 0.1373, 0.1198, 0.1146], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0230, 0.0325, 0.0302, 0.0300, 0.0330, 0.0344, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:34:08,982 INFO [train.py:901] (1/4) Epoch 15, batch 3000, loss[loss=0.208, simple_loss=0.2849, pruned_loss=0.06555, over 7914.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2989, pruned_loss=0.07127, over 1611721.29 frames. ], batch size: 20, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:34:08,982 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 16:34:21,680 INFO [train.py:935] (1/4) Epoch 15, validation: loss=0.1808, simple_loss=0.2809, pruned_loss=0.04034, over 944034.00 frames. +2023-02-06 16:34:21,681 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 16:34:22,362 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.534e+02 3.127e+02 3.845e+02 7.463e+02, threshold=6.253e+02, percent-clipped=8.0 +2023-02-06 16:34:29,498 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 16:34:55,445 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6700, 1.9508, 3.1544, 1.3597, 2.3870, 2.0106, 1.6212, 2.3312], + device='cuda:1'), covar=tensor([0.1685, 0.2200, 0.0843, 0.3988, 0.1551, 0.2754, 0.2031, 0.2072], + device='cuda:1'), in_proj_covar=tensor([0.0494, 0.0550, 0.0539, 0.0598, 0.0621, 0.0565, 0.0493, 0.0616], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:34:57,896 INFO [train.py:901] (1/4) Epoch 15, batch 3050, loss[loss=0.1748, simple_loss=0.2554, pruned_loss=0.0471, over 7540.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.298, pruned_loss=0.07089, over 1608058.47 frames. ], batch size: 18, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:35:09,549 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5226, 1.9689, 3.1947, 1.3193, 2.3394, 1.9374, 1.5898, 2.2987], + device='cuda:1'), covar=tensor([0.1805, 0.2396, 0.0770, 0.4159, 0.1613, 0.3008, 0.2033, 0.2234], + device='cuda:1'), in_proj_covar=tensor([0.0497, 0.0553, 0.0542, 0.0600, 0.0624, 0.0567, 0.0496, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:35:15,120 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 16:35:26,167 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116254.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:35:31,951 INFO [train.py:901] (1/4) Epoch 15, batch 3100, loss[loss=0.2484, simple_loss=0.329, pruned_loss=0.08387, over 8787.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2986, pruned_loss=0.07098, over 1612738.06 frames. ], batch size: 40, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:35:32,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.573e+02 3.095e+02 3.865e+02 1.142e+03, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 16:35:37,003 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 16:35:46,421 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4659, 1.9107, 4.5060, 2.0459, 2.3007, 5.1121, 5.0819, 4.4746], + device='cuda:1'), covar=tensor([0.1034, 0.1548, 0.0262, 0.1775, 0.1219, 0.0171, 0.0393, 0.0515], + device='cuda:1'), in_proj_covar=tensor([0.0276, 0.0304, 0.0270, 0.0297, 0.0286, 0.0248, 0.0376, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:35:59,165 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-06 16:36:06,932 INFO [train.py:901] (1/4) Epoch 15, batch 3150, loss[loss=0.2231, simple_loss=0.311, pruned_loss=0.06763, over 8185.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2975, pruned_loss=0.07036, over 1606687.87 frames. ], batch size: 23, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:15,222 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1973, 1.1454, 1.2928, 1.0765, 0.9552, 1.2923, 0.0224, 0.8623], + device='cuda:1'), covar=tensor([0.2035, 0.1464, 0.0546, 0.1080, 0.3218, 0.0575, 0.2950, 0.1699], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0175, 0.0109, 0.0212, 0.0254, 0.0113, 0.0160, 0.0174], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 16:36:15,822 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2946, 2.2432, 2.1076, 1.2612, 2.0712, 2.0819, 2.0895, 1.9343], + device='cuda:1'), covar=tensor([0.1072, 0.0903, 0.1069, 0.3543, 0.0953, 0.1222, 0.1346, 0.1081], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0398, 0.0402, 0.0497, 0.0394, 0.0396, 0.0387, 0.0347], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:36:27,180 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:30,072 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1946, 1.5325, 1.5295, 1.3683, 0.8781, 1.3422, 1.7394, 1.9387], + device='cuda:1'), covar=tensor([0.0469, 0.1274, 0.1745, 0.1436, 0.0625, 0.1551, 0.0715, 0.0522], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0156, 0.0101, 0.0162, 0.0115, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 16:36:41,971 INFO [train.py:901] (1/4) Epoch 15, batch 3200, loss[loss=0.23, simple_loss=0.3099, pruned_loss=0.07509, over 8097.00 frames. ], tot_loss[loss=0.221, simple_loss=0.299, pruned_loss=0.07147, over 1606494.63 frames. ], batch size: 23, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:43,343 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.524e+02 3.304e+02 3.942e+02 1.206e+03, threshold=6.608e+02, percent-clipped=2.0 +2023-02-06 16:36:46,721 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116369.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:36:51,222 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:53,705 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 16:37:06,193 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6100, 2.9906, 2.4804, 4.0902, 1.7512, 2.3072, 2.2569, 3.0796], + device='cuda:1'), covar=tensor([0.0662, 0.0781, 0.0861, 0.0226, 0.1146, 0.1273, 0.1131, 0.0811], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0203, 0.0250, 0.0212, 0.0211, 0.0250, 0.0257, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 16:37:12,413 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9824, 1.7804, 3.2975, 1.3937, 2.1652, 3.5780, 3.7020, 3.0843], + device='cuda:1'), covar=tensor([0.1023, 0.1449, 0.0308, 0.2009, 0.1024, 0.0238, 0.0508, 0.0568], + device='cuda:1'), in_proj_covar=tensor([0.0277, 0.0306, 0.0272, 0.0299, 0.0288, 0.0248, 0.0377, 0.0299], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:37:16,499 INFO [train.py:901] (1/4) Epoch 15, batch 3250, loss[loss=0.2157, simple_loss=0.3005, pruned_loss=0.0655, over 8133.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2992, pruned_loss=0.07131, over 1611106.77 frames. ], batch size: 22, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:52,484 INFO [train.py:901] (1/4) Epoch 15, batch 3300, loss[loss=0.2299, simple_loss=0.3032, pruned_loss=0.07828, over 7932.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2992, pruned_loss=0.07113, over 1612915.87 frames. ], batch size: 20, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:53,148 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.388e+02 2.875e+02 3.716e+02 9.209e+02, threshold=5.750e+02, percent-clipped=3.0 +2023-02-06 16:37:53,292 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:55,221 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:02,543 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:12,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:26,442 INFO [train.py:901] (1/4) Epoch 15, batch 3350, loss[loss=0.2236, simple_loss=0.3097, pruned_loss=0.06876, over 8183.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.299, pruned_loss=0.07111, over 1613010.87 frames. ], batch size: 23, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:38:33,231 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:02,049 INFO [train.py:901] (1/4) Epoch 15, batch 3400, loss[loss=0.1994, simple_loss=0.2705, pruned_loss=0.06418, over 7703.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2979, pruned_loss=0.07042, over 1610730.75 frames. ], batch size: 18, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:02,716 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.566e+02 3.149e+02 4.104e+02 8.501e+02, threshold=6.298e+02, percent-clipped=7.0 +2023-02-06 16:39:14,872 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:22,204 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:36,209 INFO [train.py:901] (1/4) Epoch 15, batch 3450, loss[loss=0.2464, simple_loss=0.3126, pruned_loss=0.09009, over 8472.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.299, pruned_loss=0.07066, over 1614383.91 frames. ], batch size: 27, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:44,410 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116625.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:39:51,719 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:01,117 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116650.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:40:03,771 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0742, 1.2464, 1.1832, 0.6207, 1.2209, 0.9716, 0.1267, 1.2272], + device='cuda:1'), covar=tensor([0.0355, 0.0286, 0.0269, 0.0439, 0.0320, 0.0779, 0.0655, 0.0245], + device='cuda:1'), in_proj_covar=tensor([0.0424, 0.0363, 0.0311, 0.0419, 0.0351, 0.0507, 0.0375, 0.0385], + device='cuda:1'), out_proj_covar=tensor([1.1716e-04, 9.7643e-05, 8.3560e-05, 1.1352e-04, 9.5440e-05, 1.4759e-04, + 1.0361e-04, 1.0489e-04], device='cuda:1') +2023-02-06 16:40:10,038 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.07 vs. limit=5.0 +2023-02-06 16:40:10,175 INFO [train.py:901] (1/4) Epoch 15, batch 3500, loss[loss=0.2018, simple_loss=0.2967, pruned_loss=0.05348, over 7966.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3002, pruned_loss=0.07097, over 1620113.90 frames. ], batch size: 21, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:40:10,857 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.398e+02 2.936e+02 3.935e+02 9.560e+02, threshold=5.871e+02, percent-clipped=3.0 +2023-02-06 16:40:13,168 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5818, 1.9800, 3.3212, 1.3536, 2.5446, 1.9774, 1.6976, 2.3496], + device='cuda:1'), covar=tensor([0.1942, 0.2500, 0.0820, 0.4502, 0.1765, 0.3148, 0.2151, 0.2419], + device='cuda:1'), in_proj_covar=tensor([0.0503, 0.0557, 0.0547, 0.0609, 0.0628, 0.0571, 0.0502, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:40:26,407 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116685.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:35,640 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 16:40:44,877 INFO [train.py:901] (1/4) Epoch 15, batch 3550, loss[loss=0.2211, simple_loss=0.3046, pruned_loss=0.06878, over 8327.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2996, pruned_loss=0.07084, over 1618790.56 frames. ], batch size: 25, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:40:50,941 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:06,498 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9870, 1.4599, 4.4746, 2.0760, 2.3707, 5.0574, 5.1163, 4.3893], + device='cuda:1'), covar=tensor([0.1179, 0.1771, 0.0249, 0.1881, 0.1160, 0.0180, 0.0355, 0.0512], + device='cuda:1'), in_proj_covar=tensor([0.0280, 0.0308, 0.0272, 0.0304, 0.0291, 0.0251, 0.0380, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:41:08,579 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:17,775 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 16:41:19,406 INFO [train.py:901] (1/4) Epoch 15, batch 3600, loss[loss=0.2219, simple_loss=0.2984, pruned_loss=0.07273, over 8627.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3011, pruned_loss=0.07162, over 1619970.97 frames. ], batch size: 39, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:41:20,116 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.627e+02 3.005e+02 3.918e+02 8.490e+02, threshold=6.010e+02, percent-clipped=4.0 +2023-02-06 16:41:25,803 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:45,870 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6733, 1.5909, 3.1920, 1.1453, 2.3017, 3.4961, 3.7822, 2.6344], + device='cuda:1'), covar=tensor([0.1607, 0.1934, 0.0494, 0.2873, 0.1146, 0.0454, 0.0627, 0.1203], + device='cuda:1'), in_proj_covar=tensor([0.0279, 0.0307, 0.0273, 0.0303, 0.0291, 0.0251, 0.0379, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:41:47,429 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:52,897 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:56,181 INFO [train.py:901] (1/4) Epoch 15, batch 3650, loss[loss=0.2271, simple_loss=0.2976, pruned_loss=0.07827, over 7702.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.299, pruned_loss=0.0706, over 1616069.71 frames. ], batch size: 18, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:41:56,581 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 16:42:00,909 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:13,591 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:21,044 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,282 INFO [train.py:901] (1/4) Epoch 15, batch 3700, loss[loss=0.2275, simple_loss=0.3034, pruned_loss=0.07578, over 8105.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.299, pruned_loss=0.07096, over 1612518.45 frames. ], batch size: 23, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:30,497 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.301e+02 2.797e+02 3.414e+02 8.630e+02, threshold=5.595e+02, percent-clipped=3.0 +2023-02-06 16:42:33,144 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:36,596 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 16:42:38,095 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:06,649 INFO [train.py:901] (1/4) Epoch 15, batch 3750, loss[loss=0.1609, simple_loss=0.2354, pruned_loss=0.04317, over 7412.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2977, pruned_loss=0.07031, over 1610282.09 frames. ], batch size: 17, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:13,675 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:14,068 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 16:43:40,818 INFO [train.py:901] (1/4) Epoch 15, batch 3800, loss[loss=0.2161, simple_loss=0.2848, pruned_loss=0.07365, over 7252.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2972, pruned_loss=0.07018, over 1608910.27 frames. ], batch size: 16, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:41,468 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.512e+02 2.989e+02 3.697e+02 7.171e+02, threshold=5.977e+02, percent-clipped=7.0 +2023-02-06 16:43:52,422 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116980.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:53,919 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:15,608 INFO [train.py:901] (1/4) Epoch 15, batch 3850, loss[loss=0.2475, simple_loss=0.3164, pruned_loss=0.08931, over 8624.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06967, over 1612079.46 frames. ], batch size: 49, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:42,564 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 16:44:46,214 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:46,552 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.72 vs. limit=5.0 +2023-02-06 16:44:50,972 INFO [train.py:901] (1/4) Epoch 15, batch 3900, loss[loss=0.1791, simple_loss=0.262, pruned_loss=0.04816, over 8135.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2984, pruned_loss=0.06993, over 1618358.94 frames. ], batch size: 22, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:51,622 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 2.428e+02 3.027e+02 3.797e+02 6.654e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 16:44:53,045 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:03,688 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:12,955 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:17,722 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5068, 2.7120, 1.9365, 2.2300, 2.2314, 1.6163, 1.9414, 2.1430], + device='cuda:1'), covar=tensor([0.1484, 0.0368, 0.1129, 0.0663, 0.0717, 0.1415, 0.1115, 0.0923], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0233, 0.0327, 0.0306, 0.0303, 0.0333, 0.0349, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 16:45:24,920 INFO [train.py:901] (1/4) Epoch 15, batch 3950, loss[loss=0.1879, simple_loss=0.2637, pruned_loss=0.05609, over 7555.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2971, pruned_loss=0.06872, over 1617339.97 frames. ], batch size: 18, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:45:34,223 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 16:46:01,091 INFO [train.py:901] (1/4) Epoch 15, batch 4000, loss[loss=0.2364, simple_loss=0.3233, pruned_loss=0.07477, over 8322.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2996, pruned_loss=0.07036, over 1616541.17 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:01,784 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.473e+02 2.992e+02 3.534e+02 5.115e+02, threshold=5.984e+02, percent-clipped=0.0 +2023-02-06 16:46:01,906 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117164.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:12,549 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:13,870 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:15,175 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7425, 1.5404, 3.8916, 1.5459, 3.3647, 3.2044, 3.5345, 3.3895], + device='cuda:1'), covar=tensor([0.0730, 0.4229, 0.0797, 0.3819, 0.1440, 0.1118, 0.0678, 0.0835], + device='cuda:1'), in_proj_covar=tensor([0.0554, 0.0602, 0.0633, 0.0571, 0.0654, 0.0556, 0.0548, 0.0613], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:46:17,463 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 16:46:29,841 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:35,577 INFO [train.py:901] (1/4) Epoch 15, batch 4050, loss[loss=0.219, simple_loss=0.2875, pruned_loss=0.07521, over 8238.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2989, pruned_loss=0.07029, over 1613358.61 frames. ], batch size: 22, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:53,310 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:53,896 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:11,623 INFO [train.py:901] (1/4) Epoch 15, batch 4100, loss[loss=0.2276, simple_loss=0.307, pruned_loss=0.07407, over 8466.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.299, pruned_loss=0.07027, over 1611558.79 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:47:11,821 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:12,282 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.506e+02 3.096e+02 3.742e+02 9.544e+02, threshold=6.191e+02, percent-clipped=4.0 +2023-02-06 16:47:22,916 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:46,643 INFO [train.py:901] (1/4) Epoch 15, batch 4150, loss[loss=0.2348, simple_loss=0.316, pruned_loss=0.07683, over 8475.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2998, pruned_loss=0.07081, over 1609563.90 frames. ], batch size: 50, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:10,091 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:12,975 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:21,502 INFO [train.py:901] (1/4) Epoch 15, batch 4200, loss[loss=0.2061, simple_loss=0.2968, pruned_loss=0.05765, over 8284.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2986, pruned_loss=0.07044, over 1606795.39 frames. ], batch size: 23, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:22,820 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.404e+02 2.907e+02 3.383e+02 1.073e+03, threshold=5.814e+02, percent-clipped=1.0 +2023-02-06 16:48:31,972 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:40,573 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 16:48:48,359 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8669, 1.6394, 5.9938, 2.3516, 5.4031, 5.0655, 5.5290, 5.4032], + device='cuda:1'), covar=tensor([0.0456, 0.4761, 0.0395, 0.3475, 0.0890, 0.0797, 0.0471, 0.0480], + device='cuda:1'), in_proj_covar=tensor([0.0550, 0.0605, 0.0628, 0.0571, 0.0650, 0.0552, 0.0546, 0.0610], + device='cuda:1'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:48:57,042 INFO [train.py:901] (1/4) Epoch 15, batch 4250, loss[loss=0.2143, simple_loss=0.3017, pruned_loss=0.06345, over 8022.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2996, pruned_loss=0.071, over 1610892.39 frames. ], batch size: 22, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:03,725 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 16:49:14,102 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:30,900 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:31,426 INFO [train.py:901] (1/4) Epoch 15, batch 4300, loss[loss=0.2597, simple_loss=0.3415, pruned_loss=0.08891, over 8316.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3004, pruned_loss=0.07164, over 1615324.68 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:32,090 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.479e+02 3.115e+02 3.892e+02 7.815e+02, threshold=6.229e+02, percent-clipped=5.0 +2023-02-06 16:50:05,920 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 16:50:07,599 INFO [train.py:901] (1/4) Epoch 15, batch 4350, loss[loss=0.1972, simple_loss=0.2816, pruned_loss=0.05644, over 8082.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3006, pruned_loss=0.07128, over 1619034.84 frames. ], batch size: 21, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:23,045 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117535.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:36,326 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 16:50:40,556 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:42,361 INFO [train.py:901] (1/4) Epoch 15, batch 4400, loss[loss=0.2146, simple_loss=0.2915, pruned_loss=0.06886, over 7257.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3008, pruned_loss=0.07167, over 1611700.37 frames. ], batch size: 16, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:43,035 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.383e+02 3.124e+02 3.901e+02 9.506e+02, threshold=6.248e+02, percent-clipped=7.0 +2023-02-06 16:50:55,789 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:51:17,967 INFO [train.py:901] (1/4) Epoch 15, batch 4450, loss[loss=0.1883, simple_loss=0.2749, pruned_loss=0.05085, over 7984.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2998, pruned_loss=0.07113, over 1611792.40 frames. ], batch size: 21, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:17,986 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 16:51:52,096 INFO [train.py:901] (1/4) Epoch 15, batch 4500, loss[loss=0.2171, simple_loss=0.3033, pruned_loss=0.06547, over 8613.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3012, pruned_loss=0.0727, over 1611653.85 frames. ], batch size: 34, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:52,740 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.480e+02 2.963e+02 4.043e+02 1.091e+03, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 16:52:11,217 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:11,859 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 16:52:16,201 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:27,012 INFO [train.py:901] (1/4) Epoch 15, batch 4550, loss[loss=0.2087, simple_loss=0.3025, pruned_loss=0.05739, over 8499.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3001, pruned_loss=0.07168, over 1609434.70 frames. ], batch size: 29, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:52:32,133 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 16:52:42,013 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0951, 4.0021, 3.6188, 1.9104, 3.5374, 3.6551, 3.7098, 3.4613], + device='cuda:1'), covar=tensor([0.0849, 0.0705, 0.1077, 0.5183, 0.0917, 0.1159, 0.1343, 0.0942], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0403, 0.0405, 0.0503, 0.0396, 0.0405, 0.0387, 0.0350], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:53:02,114 INFO [train.py:901] (1/4) Epoch 15, batch 4600, loss[loss=0.236, simple_loss=0.3207, pruned_loss=0.07568, over 8252.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2996, pruned_loss=0.07169, over 1610386.17 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:03,483 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.311e+02 2.848e+02 3.671e+02 5.923e+02, threshold=5.697e+02, percent-clipped=0.0 +2023-02-06 16:53:26,848 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6431, 1.8857, 2.0251, 1.2095, 2.1277, 1.4019, 0.5072, 1.8609], + device='cuda:1'), covar=tensor([0.0467, 0.0290, 0.0244, 0.0469, 0.0325, 0.0791, 0.0694, 0.0221], + device='cuda:1'), in_proj_covar=tensor([0.0419, 0.0360, 0.0311, 0.0415, 0.0345, 0.0506, 0.0375, 0.0382], + device='cuda:1'), out_proj_covar=tensor([1.1561e-04, 9.6923e-05, 8.3245e-05, 1.1243e-04, 9.3747e-05, 1.4756e-04, + 1.0351e-04, 1.0386e-04], device='cuda:1') +2023-02-06 16:53:31,663 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:53:36,046 INFO [train.py:901] (1/4) Epoch 15, batch 4650, loss[loss=0.2121, simple_loss=0.2821, pruned_loss=0.07105, over 7443.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2985, pruned_loss=0.07122, over 1611076.60 frames. ], batch size: 17, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:58,334 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1522, 4.0863, 3.7176, 1.7991, 3.6179, 3.7434, 3.7589, 3.5029], + device='cuda:1'), covar=tensor([0.0854, 0.0652, 0.1195, 0.5196, 0.0955, 0.0970, 0.1337, 0.0864], + device='cuda:1'), in_proj_covar=tensor([0.0490, 0.0407, 0.0411, 0.0510, 0.0402, 0.0409, 0.0391, 0.0353], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:54:11,643 INFO [train.py:901] (1/4) Epoch 15, batch 4700, loss[loss=0.2219, simple_loss=0.3036, pruned_loss=0.07012, over 8240.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2979, pruned_loss=0.07075, over 1610243.35 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:54:11,861 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5151, 1.9031, 3.3094, 1.3653, 2.4614, 2.0160, 1.5628, 2.4178], + device='cuda:1'), covar=tensor([0.1835, 0.2261, 0.0650, 0.3792, 0.1631, 0.2726, 0.2042, 0.2102], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0549, 0.0534, 0.0597, 0.0620, 0.0563, 0.0491, 0.0616], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:54:12,891 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.509e+02 3.109e+02 4.231e+02 8.316e+02, threshold=6.217e+02, percent-clipped=12.0 +2023-02-06 16:54:21,295 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 16:54:46,558 INFO [train.py:901] (1/4) Epoch 15, batch 4750, loss[loss=0.2121, simple_loss=0.2919, pruned_loss=0.06613, over 8240.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2988, pruned_loss=0.07147, over 1608870.38 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:11,978 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 16:55:15,302 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 16:55:16,047 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:22,513 INFO [train.py:901] (1/4) Epoch 15, batch 4800, loss[loss=0.2593, simple_loss=0.3228, pruned_loss=0.09795, over 7923.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2993, pruned_loss=0.07151, over 1607548.33 frames. ], batch size: 20, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:23,940 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.482e+02 3.121e+02 4.555e+02 1.692e+03, threshold=6.242e+02, percent-clipped=8.0 +2023-02-06 16:55:33,842 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:57,731 INFO [train.py:901] (1/4) Epoch 15, batch 4850, loss[loss=0.2026, simple_loss=0.2998, pruned_loss=0.05265, over 8247.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2996, pruned_loss=0.07161, over 1608927.11 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:07,039 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 16:56:29,206 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:29,474 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 16:56:31,981 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:32,444 INFO [train.py:901] (1/4) Epoch 15, batch 4900, loss[loss=0.2052, simple_loss=0.284, pruned_loss=0.06318, over 7424.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2986, pruned_loss=0.07089, over 1611293.73 frames. ], batch size: 17, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:33,723 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.453e+02 2.951e+02 3.688e+02 9.605e+02, threshold=5.903e+02, percent-clipped=5.0 +2023-02-06 16:56:50,260 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:56,421 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8876, 1.6068, 2.0412, 1.7672, 1.9452, 1.8630, 1.6348, 0.7563], + device='cuda:1'), covar=tensor([0.4928, 0.4161, 0.1560, 0.3055, 0.2133, 0.2586, 0.1871, 0.4482], + device='cuda:1'), in_proj_covar=tensor([0.0908, 0.0914, 0.0752, 0.0882, 0.0950, 0.0838, 0.0717, 0.0793], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 16:57:07,609 INFO [train.py:901] (1/4) Epoch 15, batch 4950, loss[loss=0.2035, simple_loss=0.277, pruned_loss=0.06497, over 8243.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2984, pruned_loss=0.07072, over 1609305.36 frames. ], batch size: 22, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:33,427 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2422, 4.1733, 3.7615, 2.2043, 3.6858, 3.8488, 3.9000, 3.5307], + device='cuda:1'), covar=tensor([0.0856, 0.0696, 0.1081, 0.4317, 0.0919, 0.1028, 0.1259, 0.0914], + device='cuda:1'), in_proj_covar=tensor([0.0484, 0.0405, 0.0408, 0.0507, 0.0399, 0.0407, 0.0388, 0.0351], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 16:57:42,121 INFO [train.py:901] (1/4) Epoch 15, batch 5000, loss[loss=0.2171, simple_loss=0.297, pruned_loss=0.06858, over 8440.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2987, pruned_loss=0.0706, over 1607222.49 frames. ], batch size: 27, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:43,371 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.421e+02 2.910e+02 3.813e+02 6.624e+02, threshold=5.820e+02, percent-clipped=4.0 +2023-02-06 16:57:58,592 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:58:17,622 INFO [train.py:901] (1/4) Epoch 15, batch 5050, loss[loss=0.2121, simple_loss=0.3064, pruned_loss=0.05895, over 8510.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2986, pruned_loss=0.07075, over 1606328.76 frames. ], batch size: 28, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:23,569 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 16:58:43,446 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 16:58:52,552 INFO [train.py:901] (1/4) Epoch 15, batch 5100, loss[loss=0.2051, simple_loss=0.2892, pruned_loss=0.06044, over 7976.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2996, pruned_loss=0.0711, over 1611982.55 frames. ], batch size: 21, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:53,829 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.591e+02 3.125e+02 3.877e+02 7.785e+02, threshold=6.249e+02, percent-clipped=4.0 +2023-02-06 16:58:57,514 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 16:59:09,116 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:23,820 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:27,781 INFO [train.py:901] (1/4) Epoch 15, batch 5150, loss[loss=0.218, simple_loss=0.2985, pruned_loss=0.06874, over 8292.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2994, pruned_loss=0.07132, over 1607774.76 frames. ], batch size: 23, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:59:51,112 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:02,420 INFO [train.py:901] (1/4) Epoch 15, batch 5200, loss[loss=0.1864, simple_loss=0.2554, pruned_loss=0.05869, over 7526.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3, pruned_loss=0.07188, over 1607583.23 frames. ], batch size: 18, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:03,701 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.269e+02 2.811e+02 3.673e+02 9.088e+02, threshold=5.623e+02, percent-clipped=2.0 +2023-02-06 17:00:29,669 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:37,916 INFO [train.py:901] (1/4) Epoch 15, batch 5250, loss[loss=0.246, simple_loss=0.3211, pruned_loss=0.08545, over 8611.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2988, pruned_loss=0.07118, over 1607396.34 frames. ], batch size: 31, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:46,149 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 17:00:56,282 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5515, 2.0139, 3.2511, 1.3641, 2.3310, 1.9018, 1.5825, 2.4133], + device='cuda:1'), covar=tensor([0.1847, 0.2284, 0.0701, 0.4206, 0.1704, 0.2946, 0.2104, 0.2078], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0550, 0.0534, 0.0603, 0.0623, 0.0563, 0.0493, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:01:02,215 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3178, 1.7535, 1.2762, 2.6984, 1.3260, 1.0850, 1.9088, 1.9620], + device='cuda:1'), covar=tensor([0.1866, 0.1499, 0.2270, 0.0453, 0.1526, 0.2384, 0.1247, 0.1266], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0203, 0.0248, 0.0211, 0.0211, 0.0245, 0.0253, 0.0212], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:01:08,330 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8411, 3.7657, 3.4476, 1.8120, 3.3882, 3.4261, 3.5122, 3.2323], + device='cuda:1'), covar=tensor([0.0886, 0.0601, 0.1047, 0.4499, 0.0889, 0.1170, 0.1209, 0.0955], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0400, 0.0407, 0.0503, 0.0399, 0.0409, 0.0387, 0.0352], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:01:12,972 INFO [train.py:901] (1/4) Epoch 15, batch 5300, loss[loss=0.1952, simple_loss=0.2742, pruned_loss=0.0581, over 7664.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3003, pruned_loss=0.07194, over 1609946.31 frames. ], batch size: 19, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:14,346 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.534e+02 2.995e+02 3.765e+02 8.916e+02, threshold=5.991e+02, percent-clipped=4.0 +2023-02-06 17:01:28,654 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5499, 2.7625, 1.7753, 2.1685, 2.2235, 1.5602, 2.1094, 2.0882], + device='cuda:1'), covar=tensor([0.1568, 0.0360, 0.1159, 0.0753, 0.0763, 0.1463, 0.1023, 0.1081], + device='cuda:1'), in_proj_covar=tensor([0.0345, 0.0228, 0.0323, 0.0298, 0.0299, 0.0327, 0.0342, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:01:47,933 INFO [train.py:901] (1/4) Epoch 15, batch 5350, loss[loss=0.2026, simple_loss=0.2741, pruned_loss=0.06554, over 7543.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07183, over 1608899.16 frames. ], batch size: 18, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:50,891 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:01,058 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:24,459 INFO [train.py:901] (1/4) Epoch 15, batch 5400, loss[loss=0.2225, simple_loss=0.2927, pruned_loss=0.07615, over 7804.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2993, pruned_loss=0.07151, over 1611614.21 frames. ], batch size: 19, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:02:25,794 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.478e+02 2.903e+02 3.717e+02 8.291e+02, threshold=5.806e+02, percent-clipped=5.0 +2023-02-06 17:02:58,963 INFO [train.py:901] (1/4) Epoch 15, batch 5450, loss[loss=0.2499, simple_loss=0.322, pruned_loss=0.08888, over 8488.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07187, over 1610736.32 frames. ], batch size: 29, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:11,227 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118631.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:21,561 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:23,679 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6856, 1.8243, 1.6110, 2.2944, 1.0585, 1.3757, 1.6769, 1.9088], + device='cuda:1'), covar=tensor([0.0751, 0.0714, 0.0948, 0.0421, 0.1017, 0.1344, 0.0755, 0.0656], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0204, 0.0250, 0.0213, 0.0212, 0.0247, 0.0254, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:03:24,943 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:26,161 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:34,867 INFO [train.py:901] (1/4) Epoch 15, batch 5500, loss[loss=0.2334, simple_loss=0.317, pruned_loss=0.07496, over 8701.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3008, pruned_loss=0.07236, over 1615461.23 frames. ], batch size: 34, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:36,245 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.592e+02 3.113e+02 3.610e+02 8.755e+02, threshold=6.227e+02, percent-clipped=2.0 +2023-02-06 17:03:38,394 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 17:03:54,399 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:09,097 INFO [train.py:901] (1/4) Epoch 15, batch 5550, loss[loss=0.1758, simple_loss=0.2562, pruned_loss=0.04768, over 7795.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2994, pruned_loss=0.07155, over 1617728.62 frames. ], batch size: 19, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:26,173 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.11 vs. limit=5.0 +2023-02-06 17:04:29,567 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7213, 2.0870, 2.2661, 1.2867, 2.3180, 1.5898, 0.7049, 1.9104], + device='cuda:1'), covar=tensor([0.0506, 0.0236, 0.0239, 0.0526, 0.0335, 0.0690, 0.0715, 0.0264], + device='cuda:1'), in_proj_covar=tensor([0.0414, 0.0357, 0.0307, 0.0412, 0.0342, 0.0501, 0.0370, 0.0379], + device='cuda:1'), out_proj_covar=tensor([1.1430e-04, 9.5911e-05, 8.2284e-05, 1.1148e-04, 9.2555e-05, 1.4585e-04, + 1.0180e-04, 1.0298e-04], device='cuda:1') +2023-02-06 17:04:32,286 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:44,988 INFO [train.py:901] (1/4) Epoch 15, batch 5600, loss[loss=0.2502, simple_loss=0.3265, pruned_loss=0.08693, over 8337.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3002, pruned_loss=0.07127, over 1621938.91 frames. ], batch size: 25, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:46,299 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.537e+02 3.218e+02 3.925e+02 9.216e+02, threshold=6.435e+02, percent-clipped=4.0 +2023-02-06 17:04:47,203 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:52,581 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:56,549 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4842, 2.3037, 3.2736, 2.1033, 2.8397, 3.5845, 3.4916, 3.2603], + device='cuda:1'), covar=tensor([0.0754, 0.1099, 0.0483, 0.1506, 0.1026, 0.0187, 0.0567, 0.0373], + device='cuda:1'), in_proj_covar=tensor([0.0276, 0.0306, 0.0269, 0.0300, 0.0287, 0.0247, 0.0374, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:05:09,079 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:14,494 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:19,155 INFO [train.py:901] (1/4) Epoch 15, batch 5650, loss[loss=0.2571, simple_loss=0.3263, pruned_loss=0.09397, over 8452.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3007, pruned_loss=0.07186, over 1622656.19 frames. ], batch size: 27, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:43,439 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 17:05:53,491 INFO [train.py:901] (1/4) Epoch 15, batch 5700, loss[loss=0.1896, simple_loss=0.2605, pruned_loss=0.05939, over 7538.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2995, pruned_loss=0.07132, over 1617099.82 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:54,817 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.491e+02 2.972e+02 3.726e+02 7.690e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 17:05:57,427 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 17:06:21,157 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:29,112 INFO [train.py:901] (1/4) Epoch 15, batch 5750, loss[loss=0.2096, simple_loss=0.2826, pruned_loss=0.06832, over 7810.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.299, pruned_loss=0.07113, over 1617104.96 frames. ], batch size: 20, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:06:38,212 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:46,363 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 17:06:51,335 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6601, 1.5664, 2.0674, 1.3951, 1.0927, 2.0774, 0.4057, 1.1867], + device='cuda:1'), covar=tensor([0.2048, 0.1403, 0.0472, 0.1450, 0.3514, 0.0449, 0.2668, 0.1658], + device='cuda:1'), in_proj_covar=tensor([0.0170, 0.0175, 0.0110, 0.0210, 0.0254, 0.0113, 0.0160, 0.0171], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 17:07:04,191 INFO [train.py:901] (1/4) Epoch 15, batch 5800, loss[loss=0.1751, simple_loss=0.2444, pruned_loss=0.05292, over 7710.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2981, pruned_loss=0.07021, over 1616747.84 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:07:05,538 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.317e+02 2.944e+02 4.100e+02 6.996e+02, threshold=5.887e+02, percent-clipped=4.0 +2023-02-06 17:07:26,177 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:32,104 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:39,877 INFO [train.py:901] (1/4) Epoch 15, batch 5850, loss[loss=0.2256, simple_loss=0.3083, pruned_loss=0.07151, over 8355.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2981, pruned_loss=0.06997, over 1619192.21 frames. ], batch size: 24, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:07:46,177 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:49,460 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:02,801 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:13,677 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:14,146 INFO [train.py:901] (1/4) Epoch 15, batch 5900, loss[loss=0.2207, simple_loss=0.3064, pruned_loss=0.06747, over 8360.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2987, pruned_loss=0.07035, over 1620681.16 frames. ], batch size: 24, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:08:15,366 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.486e+02 2.938e+02 3.942e+02 7.909e+02, threshold=5.877e+02, percent-clipped=6.0 +2023-02-06 17:08:30,156 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:34,144 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:44,788 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:46,724 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119111.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:47,875 INFO [train.py:901] (1/4) Epoch 15, batch 5950, loss[loss=0.1832, simple_loss=0.2696, pruned_loss=0.04836, over 8246.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2981, pruned_loss=0.07027, over 1622616.55 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:21,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4840, 1.3665, 2.2865, 1.1938, 1.9127, 2.4377, 2.5558, 2.0706], + device='cuda:1'), covar=tensor([0.0917, 0.1276, 0.0482, 0.2104, 0.0859, 0.0403, 0.0681, 0.0782], + device='cuda:1'), in_proj_covar=tensor([0.0276, 0.0306, 0.0269, 0.0299, 0.0286, 0.0247, 0.0374, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:09:22,903 INFO [train.py:901] (1/4) Epoch 15, batch 6000, loss[loss=0.2619, simple_loss=0.3298, pruned_loss=0.09697, over 7439.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2978, pruned_loss=0.07019, over 1615457.11 frames. ], batch size: 17, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,903 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 17:09:35,678 INFO [train.py:935] (1/4) Epoch 15, validation: loss=0.181, simple_loss=0.2808, pruned_loss=0.04056, over 944034.00 frames. +2023-02-06 17:09:35,679 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 17:09:37,104 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.578e+02 3.120e+02 3.956e+02 1.218e+03, threshold=6.240e+02, percent-clipped=5.0 +2023-02-06 17:10:10,481 INFO [train.py:901] (1/4) Epoch 15, batch 6050, loss[loss=0.2912, simple_loss=0.3496, pruned_loss=0.1164, over 8573.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2971, pruned_loss=0.06982, over 1609977.12 frames. ], batch size: 49, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:35,628 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.51 vs. limit=5.0 +2023-02-06 17:10:44,316 INFO [train.py:901] (1/4) Epoch 15, batch 6100, loss[loss=0.193, simple_loss=0.2714, pruned_loss=0.05728, over 8071.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2974, pruned_loss=0.07001, over 1610937.32 frames. ], batch size: 21, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:45,651 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.463e+02 3.114e+02 4.132e+02 8.492e+02, threshold=6.229e+02, percent-clipped=7.0 +2023-02-06 17:10:58,745 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0216, 2.3099, 1.9852, 2.9137, 1.3837, 1.7514, 2.2295, 2.4526], + device='cuda:1'), covar=tensor([0.0764, 0.0816, 0.0864, 0.0338, 0.1139, 0.1298, 0.0846, 0.0721], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0204, 0.0249, 0.0213, 0.0212, 0.0250, 0.0255, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:11:18,272 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 17:11:20,332 INFO [train.py:901] (1/4) Epoch 15, batch 6150, loss[loss=0.2523, simple_loss=0.3397, pruned_loss=0.08243, over 8706.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2977, pruned_loss=0.07009, over 1612006.41 frames. ], batch size: 34, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:21,267 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3144, 2.8970, 2.3113, 3.9331, 1.8030, 2.0551, 2.6192, 3.1681], + device='cuda:1'), covar=tensor([0.0771, 0.0811, 0.0892, 0.0238, 0.1115, 0.1299, 0.0917, 0.0756], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0204, 0.0249, 0.0213, 0.0213, 0.0250, 0.0256, 0.0215], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:11:41,511 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 17:11:54,697 INFO [train.py:901] (1/4) Epoch 15, batch 6200, loss[loss=0.2681, simple_loss=0.3379, pruned_loss=0.09915, over 8461.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2973, pruned_loss=0.06974, over 1613379.57 frames. ], batch size: 49, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:55,639 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:11:56,078 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.347e+02 3.204e+02 3.871e+02 7.576e+02, threshold=6.408e+02, percent-clipped=2.0 +2023-02-06 17:12:10,072 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 17:12:14,471 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:30,389 INFO [train.py:901] (1/4) Epoch 15, batch 6250, loss[loss=0.2105, simple_loss=0.2829, pruned_loss=0.06904, over 7930.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2974, pruned_loss=0.06988, over 1613666.82 frames. ], batch size: 20, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:12:37,235 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2392, 2.2257, 1.6407, 1.9452, 1.7922, 1.4292, 1.6450, 1.6776], + device='cuda:1'), covar=tensor([0.1107, 0.0320, 0.1060, 0.0502, 0.0707, 0.1290, 0.0886, 0.0683], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0231, 0.0326, 0.0302, 0.0302, 0.0331, 0.0346, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:12:47,167 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:59,472 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:13:04,854 INFO [train.py:901] (1/4) Epoch 15, batch 6300, loss[loss=0.1771, simple_loss=0.2576, pruned_loss=0.04826, over 7804.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2976, pruned_loss=0.07011, over 1614490.15 frames. ], batch size: 20, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:13:06,146 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.517e+02 3.087e+02 3.932e+02 1.134e+03, threshold=6.173e+02, percent-clipped=3.0 +2023-02-06 17:13:41,045 INFO [train.py:901] (1/4) Epoch 15, batch 6350, loss[loss=0.2574, simple_loss=0.3445, pruned_loss=0.08514, over 8252.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2985, pruned_loss=0.07069, over 1612442.25 frames. ], batch size: 24, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:13:53,782 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:13:57,576 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.41 vs. limit=5.0 +2023-02-06 17:14:07,838 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119552.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:15,091 INFO [train.py:901] (1/4) Epoch 15, batch 6400, loss[loss=0.2024, simple_loss=0.2775, pruned_loss=0.06363, over 8356.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2988, pruned_loss=0.07128, over 1610513.78 frames. ], batch size: 24, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:14:16,446 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 3.023e+02 3.752e+02 7.818e+02, threshold=6.047e+02, percent-clipped=4.0 +2023-02-06 17:14:20,028 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119570.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:45,443 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6663, 5.8391, 5.0614, 2.3194, 5.0655, 5.4028, 5.3545, 5.0764], + device='cuda:1'), covar=tensor([0.0532, 0.0365, 0.0931, 0.4353, 0.0734, 0.0826, 0.1003, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0489, 0.0405, 0.0408, 0.0502, 0.0398, 0.0411, 0.0389, 0.0354], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:14:49,984 INFO [train.py:901] (1/4) Epoch 15, batch 6450, loss[loss=0.2101, simple_loss=0.299, pruned_loss=0.06058, over 8458.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2987, pruned_loss=0.07103, over 1611966.43 frames. ], batch size: 27, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:24,247 INFO [train.py:901] (1/4) Epoch 15, batch 6500, loss[loss=0.2111, simple_loss=0.2728, pruned_loss=0.07465, over 7548.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.299, pruned_loss=0.0709, over 1615813.01 frames. ], batch size: 18, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:25,571 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.561e+02 2.888e+02 3.578e+02 6.995e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-06 17:15:38,583 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:15:55,014 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 17:15:58,717 INFO [train.py:901] (1/4) Epoch 15, batch 6550, loss[loss=0.2163, simple_loss=0.2995, pruned_loss=0.06657, over 7675.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2984, pruned_loss=0.0703, over 1615739.82 frames. ], batch size: 19, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:29,740 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 17:16:34,450 INFO [train.py:901] (1/4) Epoch 15, batch 6600, loss[loss=0.2328, simple_loss=0.3119, pruned_loss=0.07686, over 8446.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.299, pruned_loss=0.06976, over 1620644.73 frames. ], batch size: 27, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:35,799 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.456e+02 2.938e+02 3.854e+02 9.901e+02, threshold=5.877e+02, percent-clipped=5.0 +2023-02-06 17:16:47,919 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 17:17:05,449 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:08,629 INFO [train.py:901] (1/4) Epoch 15, batch 6650, loss[loss=0.2049, simple_loss=0.2853, pruned_loss=0.0622, over 7912.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2988, pruned_loss=0.06976, over 1617160.45 frames. ], batch size: 20, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:15,023 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0479, 1.2712, 1.1986, 0.7032, 1.2157, 1.0422, 0.1002, 1.2082], + device='cuda:1'), covar=tensor([0.0328, 0.0281, 0.0239, 0.0404, 0.0310, 0.0738, 0.0639, 0.0250], + device='cuda:1'), in_proj_covar=tensor([0.0421, 0.0364, 0.0314, 0.0421, 0.0348, 0.0506, 0.0376, 0.0387], + device='cuda:1'), out_proj_covar=tensor([1.1614e-04, 9.7785e-05, 8.4111e-05, 1.1365e-04, 9.4181e-05, 1.4720e-04, + 1.0351e-04, 1.0515e-04], device='cuda:1') +2023-02-06 17:17:17,671 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119826.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:22,315 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:36,355 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:44,400 INFO [train.py:901] (1/4) Epoch 15, batch 6700, loss[loss=0.1665, simple_loss=0.2551, pruned_loss=0.03899, over 7548.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2994, pruned_loss=0.0699, over 1622549.46 frames. ], batch size: 18, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:45,218 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7785, 5.8618, 5.1026, 2.4854, 5.2106, 5.6258, 5.4364, 5.3645], + device='cuda:1'), covar=tensor([0.0522, 0.0359, 0.0837, 0.4109, 0.0657, 0.0860, 0.0970, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0489, 0.0405, 0.0412, 0.0504, 0.0402, 0.0409, 0.0392, 0.0355], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:17:45,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.601e+02 2.951e+02 3.516e+02 8.618e+02, threshold=5.902e+02, percent-clipped=2.0 +2023-02-06 17:17:52,175 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8872, 1.6794, 3.2724, 1.5395, 2.3907, 3.6029, 3.6973, 3.0204], + device='cuda:1'), covar=tensor([0.1192, 0.1585, 0.0311, 0.2008, 0.0914, 0.0233, 0.0535, 0.0587], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0309, 0.0272, 0.0302, 0.0288, 0.0250, 0.0380, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:17:53,450 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:18:19,548 INFO [train.py:901] (1/4) Epoch 15, batch 6750, loss[loss=0.2331, simple_loss=0.3085, pruned_loss=0.07884, over 8300.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2987, pruned_loss=0.0697, over 1620373.92 frames. ], batch size: 23, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:18:19,807 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5182, 1.9602, 2.9953, 1.3189, 2.2650, 1.8933, 1.5992, 2.1903], + device='cuda:1'), covar=tensor([0.1916, 0.2288, 0.0837, 0.4251, 0.1622, 0.3052, 0.2114, 0.2141], + device='cuda:1'), in_proj_covar=tensor([0.0502, 0.0558, 0.0539, 0.0607, 0.0629, 0.0569, 0.0499, 0.0621], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:18:55,246 INFO [train.py:901] (1/4) Epoch 15, batch 6800, loss[loss=0.2048, simple_loss=0.2855, pruned_loss=0.06208, over 8241.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2986, pruned_loss=0.06934, over 1620244.13 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:18:57,360 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.559e+02 3.032e+02 3.835e+02 7.300e+02, threshold=6.064e+02, percent-clipped=2.0 +2023-02-06 17:19:03,570 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 17:19:15,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:32,093 INFO [train.py:901] (1/4) Epoch 15, batch 6850, loss[loss=0.2314, simple_loss=0.3227, pruned_loss=0.07003, over 8035.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.298, pruned_loss=0.06876, over 1619004.57 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:19:41,652 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=120027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:53,396 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 17:20:06,203 INFO [train.py:901] (1/4) Epoch 15, batch 6900, loss[loss=0.2005, simple_loss=0.2785, pruned_loss=0.06127, over 8092.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2967, pruned_loss=0.06808, over 1617990.11 frames. ], batch size: 21, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:20:07,530 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.397e+02 2.973e+02 3.506e+02 9.980e+02, threshold=5.947e+02, percent-clipped=2.0 +2023-02-06 17:20:42,260 INFO [train.py:901] (1/4) Epoch 15, batch 6950, loss[loss=0.2356, simple_loss=0.3105, pruned_loss=0.08033, over 8247.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2972, pruned_loss=0.06854, over 1617800.78 frames. ], batch size: 24, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:02,382 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:21:03,577 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 17:21:16,268 INFO [train.py:901] (1/4) Epoch 15, batch 7000, loss[loss=0.2048, simple_loss=0.2827, pruned_loss=0.06344, over 8237.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2977, pruned_loss=0.06926, over 1616812.78 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:17,612 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.303e+02 2.879e+02 3.620e+02 6.461e+02, threshold=5.757e+02, percent-clipped=3.0 +2023-02-06 17:21:51,892 INFO [train.py:901] (1/4) Epoch 15, batch 7050, loss[loss=0.2502, simple_loss=0.3401, pruned_loss=0.0801, over 8582.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2984, pruned_loss=0.06995, over 1614972.78 frames. ], batch size: 31, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:11,821 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 17:22:15,019 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:26,161 INFO [train.py:901] (1/4) Epoch 15, batch 7100, loss[loss=0.2217, simple_loss=0.3006, pruned_loss=0.07139, over 8189.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2978, pruned_loss=0.06972, over 1619074.86 frames. ], batch size: 23, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:27,488 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.428e+02 3.078e+02 4.147e+02 9.225e+02, threshold=6.156e+02, percent-clipped=10.0 +2023-02-06 17:22:32,365 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:57,827 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-06 17:23:00,855 INFO [train.py:901] (1/4) Epoch 15, batch 7150, loss[loss=0.2268, simple_loss=0.3129, pruned_loss=0.07034, over 8475.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2988, pruned_loss=0.07002, over 1621836.96 frames. ], batch size: 25, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:27,589 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6404, 1.3732, 1.6093, 1.2252, 0.8730, 1.4749, 1.4750, 1.2335], + device='cuda:1'), covar=tensor([0.0509, 0.1232, 0.1623, 0.1447, 0.0600, 0.1462, 0.0685, 0.0657], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 17:23:27,658 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5115, 1.8622, 2.7434, 1.3786, 2.0274, 1.8798, 1.6218, 1.9170], + device='cuda:1'), covar=tensor([0.1734, 0.2193, 0.0798, 0.4196, 0.1540, 0.2836, 0.1891, 0.2006], + device='cuda:1'), in_proj_covar=tensor([0.0499, 0.0552, 0.0537, 0.0603, 0.0626, 0.0565, 0.0495, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:23:31,187 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 17:23:35,462 INFO [train.py:901] (1/4) Epoch 15, batch 7200, loss[loss=0.237, simple_loss=0.3194, pruned_loss=0.07733, over 8335.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2988, pruned_loss=0.07011, over 1621641.50 frames. ], batch size: 25, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:36,809 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.418e+02 2.853e+02 3.692e+02 6.645e+02, threshold=5.707e+02, percent-clipped=2.0 +2023-02-06 17:24:00,217 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:10,187 INFO [train.py:901] (1/4) Epoch 15, batch 7250, loss[loss=0.2278, simple_loss=0.3108, pruned_loss=0.07239, over 8581.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2984, pruned_loss=0.07005, over 1620435.41 frames. ], batch size: 39, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:24:17,873 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:29,239 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4643, 1.8387, 2.7164, 1.2994, 1.8113, 1.7786, 1.5633, 1.8661], + device='cuda:1'), covar=tensor([0.1808, 0.2271, 0.0788, 0.4291, 0.1834, 0.3059, 0.2086, 0.2173], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0554, 0.0539, 0.0609, 0.0629, 0.0568, 0.0498, 0.0621], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:24:45,978 INFO [train.py:901] (1/4) Epoch 15, batch 7300, loss[loss=0.2139, simple_loss=0.3003, pruned_loss=0.06372, over 8510.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2989, pruned_loss=0.07007, over 1620851.55 frames. ], batch size: 28, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:24:47,334 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.423e+02 2.925e+02 3.483e+02 5.889e+02, threshold=5.849e+02, percent-clipped=3.0 +2023-02-06 17:25:18,671 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3409, 1.5550, 4.4158, 1.8565, 2.2344, 5.1214, 5.0906, 4.3455], + device='cuda:1'), covar=tensor([0.1070, 0.1799, 0.0253, 0.1855, 0.1208, 0.0155, 0.0347, 0.0544], + device='cuda:1'), in_proj_covar=tensor([0.0280, 0.0306, 0.0272, 0.0301, 0.0285, 0.0248, 0.0379, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:25:20,528 INFO [train.py:901] (1/4) Epoch 15, batch 7350, loss[loss=0.1792, simple_loss=0.2626, pruned_loss=0.04784, over 7803.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2989, pruned_loss=0.07008, over 1621910.99 frames. ], batch size: 19, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:27,942 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0087, 1.7065, 3.4272, 1.4492, 2.2906, 3.8793, 3.8876, 3.2817], + device='cuda:1'), covar=tensor([0.1113, 0.1582, 0.0349, 0.2030, 0.1018, 0.0208, 0.0490, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0280, 0.0306, 0.0271, 0.0301, 0.0285, 0.0248, 0.0379, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:25:45,373 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 17:25:56,247 INFO [train.py:901] (1/4) Epoch 15, batch 7400, loss[loss=0.22, simple_loss=0.2933, pruned_loss=0.07332, over 7982.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2999, pruned_loss=0.07101, over 1618677.54 frames. ], batch size: 21, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:57,546 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.487e+02 3.190e+02 4.160e+02 9.613e+02, threshold=6.380e+02, percent-clipped=9.0 +2023-02-06 17:26:04,623 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 17:26:18,149 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2568, 1.4299, 1.5331, 1.2108, 0.8933, 1.4068, 1.7928, 1.7456], + device='cuda:1'), covar=tensor([0.0445, 0.1295, 0.1771, 0.1501, 0.0592, 0.1573, 0.0637, 0.0600], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0099, 0.0161, 0.0113, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 17:26:30,893 INFO [train.py:901] (1/4) Epoch 15, batch 7450, loss[loss=0.2022, simple_loss=0.2944, pruned_loss=0.05496, over 8017.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2992, pruned_loss=0.07084, over 1616912.25 frames. ], batch size: 22, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:26:42,795 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 17:27:06,396 INFO [train.py:901] (1/4) Epoch 15, batch 7500, loss[loss=0.1912, simple_loss=0.2818, pruned_loss=0.05028, over 8635.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2984, pruned_loss=0.0707, over 1612834.43 frames. ], batch size: 31, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:27:07,754 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.388e+02 2.853e+02 3.831e+02 7.536e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 17:27:27,369 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=120694.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:27:37,317 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 17:27:40,251 INFO [train.py:901] (1/4) Epoch 15, batch 7550, loss[loss=0.2178, simple_loss=0.295, pruned_loss=0.07029, over 8228.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2995, pruned_loss=0.07122, over 1619823.19 frames. ], batch size: 22, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:14,835 INFO [train.py:901] (1/4) Epoch 15, batch 7600, loss[loss=0.1912, simple_loss=0.2658, pruned_loss=0.05834, over 7437.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2975, pruned_loss=0.0703, over 1613283.14 frames. ], batch size: 17, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:16,204 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.426e+02 3.048e+02 3.965e+02 8.844e+02, threshold=6.096e+02, percent-clipped=6.0 +2023-02-06 17:28:50,148 INFO [train.py:901] (1/4) Epoch 15, batch 7650, loss[loss=0.1965, simple_loss=0.2896, pruned_loss=0.05165, over 8187.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.299, pruned_loss=0.07111, over 1615516.01 frames. ], batch size: 23, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:25,339 INFO [train.py:901] (1/4) Epoch 15, batch 7700, loss[loss=0.2361, simple_loss=0.313, pruned_loss=0.07964, over 8515.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2981, pruned_loss=0.07074, over 1612962.91 frames. ], batch size: 28, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:27,394 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.503e+02 3.087e+02 4.175e+02 9.539e+02, threshold=6.174e+02, percent-clipped=7.0 +2023-02-06 17:29:40,156 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 17:29:52,764 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 17:30:01,558 INFO [train.py:901] (1/4) Epoch 15, batch 7750, loss[loss=0.1723, simple_loss=0.2495, pruned_loss=0.04756, over 7541.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2962, pruned_loss=0.0691, over 1612152.45 frames. ], batch size: 18, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:30:36,087 INFO [train.py:901] (1/4) Epoch 15, batch 7800, loss[loss=0.2053, simple_loss=0.2962, pruned_loss=0.05715, over 8657.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2965, pruned_loss=0.06912, over 1613952.68 frames. ], batch size: 39, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:30:38,109 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 2.376e+02 2.783e+02 3.266e+02 5.993e+02, threshold=5.565e+02, percent-clipped=0.0 +2023-02-06 17:30:55,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5978, 1.8723, 3.0054, 1.3512, 2.2495, 1.8906, 1.6792, 2.0723], + device='cuda:1'), covar=tensor([0.1568, 0.2042, 0.0670, 0.3707, 0.1389, 0.2735, 0.1746, 0.2103], + device='cuda:1'), in_proj_covar=tensor([0.0498, 0.0551, 0.0536, 0.0604, 0.0624, 0.0567, 0.0497, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:31:09,465 INFO [train.py:901] (1/4) Epoch 15, batch 7850, loss[loss=0.2059, simple_loss=0.2938, pruned_loss=0.05904, over 8468.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.299, pruned_loss=0.07077, over 1617713.68 frames. ], batch size: 25, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:14,866 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:26,042 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121038.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:42,572 INFO [train.py:901] (1/4) Epoch 15, batch 7900, loss[loss=0.1929, simple_loss=0.272, pruned_loss=0.05697, over 7666.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3002, pruned_loss=0.07168, over 1619384.06 frames. ], batch size: 19, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:44,513 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 3.139e+02 4.114e+02 1.036e+03, threshold=6.279e+02, percent-clipped=8.0 +2023-02-06 17:31:45,339 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9778, 1.8538, 6.0651, 2.3275, 5.5406, 5.1706, 5.6264, 5.5925], + device='cuda:1'), covar=tensor([0.0462, 0.4372, 0.0312, 0.3220, 0.0855, 0.0779, 0.0473, 0.0437], + device='cuda:1'), in_proj_covar=tensor([0.0558, 0.0610, 0.0630, 0.0579, 0.0652, 0.0564, 0.0555, 0.0613], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 17:32:05,284 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 17:32:15,966 INFO [train.py:901] (1/4) Epoch 15, batch 7950, loss[loss=0.2104, simple_loss=0.2831, pruned_loss=0.06882, over 8087.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2982, pruned_loss=0.07033, over 1617570.77 frames. ], batch size: 21, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:40,729 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4282, 2.3860, 1.6081, 2.0013, 1.9717, 1.4786, 1.8047, 1.8571], + device='cuda:1'), covar=tensor([0.1389, 0.0367, 0.1263, 0.0583, 0.0655, 0.1401, 0.1009, 0.0866], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0232, 0.0322, 0.0302, 0.0300, 0.0328, 0.0339, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:32:42,026 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:32:48,346 INFO [train.py:901] (1/4) Epoch 15, batch 8000, loss[loss=0.2198, simple_loss=0.2935, pruned_loss=0.07302, over 7646.00 frames. ], tot_loss[loss=0.219, simple_loss=0.298, pruned_loss=0.07004, over 1613903.04 frames. ], batch size: 19, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:50,378 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.992e+02 3.696e+02 7.694e+02, threshold=5.984e+02, percent-clipped=2.0 +2023-02-06 17:33:01,512 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:33:22,852 INFO [train.py:901] (1/4) Epoch 15, batch 8050, loss[loss=0.1907, simple_loss=0.2691, pruned_loss=0.05614, over 8074.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2963, pruned_loss=0.0702, over 1599456.27 frames. ], batch size: 21, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:33:34,505 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1105, 1.3981, 1.7446, 1.2512, 1.0748, 1.4244, 1.7792, 1.3979], + device='cuda:1'), covar=tensor([0.0457, 0.1228, 0.1606, 0.1377, 0.0564, 0.1468, 0.0645, 0.0708], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 17:33:55,806 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 17:34:00,933 INFO [train.py:901] (1/4) Epoch 16, batch 0, loss[loss=0.2322, simple_loss=0.3085, pruned_loss=0.0779, over 7283.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3085, pruned_loss=0.0779, over 7283.00 frames. ], batch size: 71, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:34:00,934 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 17:34:11,350 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5955, 1.5610, 2.6884, 1.2832, 1.9959, 2.8710, 3.0526, 2.4475], + device='cuda:1'), covar=tensor([0.1262, 0.1554, 0.0450, 0.2407, 0.0900, 0.0403, 0.0826, 0.0812], + device='cuda:1'), in_proj_covar=tensor([0.0280, 0.0309, 0.0271, 0.0301, 0.0288, 0.0248, 0.0377, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:34:11,915 INFO [train.py:935] (1/4) Epoch 16, validation: loss=0.1795, simple_loss=0.2801, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 17:34:11,916 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 17:34:17,798 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4994, 1.4235, 4.6868, 1.7023, 4.0936, 3.8633, 4.1505, 4.0700], + device='cuda:1'), covar=tensor([0.0504, 0.4903, 0.0452, 0.4032, 0.1121, 0.1044, 0.0610, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0558, 0.0611, 0.0632, 0.0582, 0.0653, 0.0565, 0.0555, 0.0614], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 17:34:24,910 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.543e+02 3.194e+02 4.084e+02 8.334e+02, threshold=6.389e+02, percent-clipped=7.0 +2023-02-06 17:34:26,234 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 17:34:41,509 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-06 17:34:47,407 INFO [train.py:901] (1/4) Epoch 16, batch 50, loss[loss=0.2155, simple_loss=0.2961, pruned_loss=0.06745, over 8246.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2958, pruned_loss=0.06951, over 361301.50 frames. ], batch size: 22, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:00,270 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 17:35:02,266 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 17:35:09,789 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121329.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:35:20,724 INFO [train.py:901] (1/4) Epoch 16, batch 100, loss[loss=0.2028, simple_loss=0.2769, pruned_loss=0.06435, over 8034.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3002, pruned_loss=0.07091, over 637782.19 frames. ], batch size: 22, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:24,733 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 17:35:33,278 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:35:33,867 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.470e+02 2.913e+02 3.674e+02 6.203e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 17:35:52,048 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5755, 2.1076, 3.4385, 1.3985, 2.5428, 2.0290, 1.6714, 2.5940], + device='cuda:1'), covar=tensor([0.1791, 0.2403, 0.0715, 0.4115, 0.1671, 0.3094, 0.2101, 0.2085], + device='cuda:1'), in_proj_covar=tensor([0.0508, 0.0560, 0.0542, 0.0614, 0.0637, 0.0580, 0.0507, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:35:53,856 INFO [train.py:901] (1/4) Epoch 16, batch 150, loss[loss=0.2541, simple_loss=0.3423, pruned_loss=0.08293, over 8493.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2995, pruned_loss=0.06991, over 857519.53 frames. ], batch size: 28, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:36:00,976 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8193, 1.6480, 2.3865, 1.6677, 1.2440, 2.3931, 0.3064, 1.4171], + device='cuda:1'), covar=tensor([0.2246, 0.1743, 0.0521, 0.1725, 0.3711, 0.0496, 0.3041, 0.1784], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0180, 0.0112, 0.0215, 0.0257, 0.0116, 0.0163, 0.0176], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 17:36:04,319 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:15,650 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:21,672 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:30,080 INFO [train.py:901] (1/4) Epoch 16, batch 200, loss[loss=0.195, simple_loss=0.2827, pruned_loss=0.05367, over 8283.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.3001, pruned_loss=0.07019, over 1028455.03 frames. ], batch size: 23, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:36:33,180 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-06 17:36:43,676 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.385e+02 2.940e+02 3.661e+02 7.455e+02, threshold=5.881e+02, percent-clipped=4.0 +2023-02-06 17:36:53,373 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:56,049 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4986, 2.7970, 2.2490, 3.6912, 1.6762, 1.8448, 2.2090, 3.0059], + device='cuda:1'), covar=tensor([0.0623, 0.0776, 0.0847, 0.0318, 0.1178, 0.1291, 0.1078, 0.0693], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0206, 0.0252, 0.0215, 0.0216, 0.0252, 0.0257, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:36:59,471 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3927, 2.4951, 1.7449, 2.0741, 2.0643, 1.5323, 1.9093, 1.8123], + device='cuda:1'), covar=tensor([0.1387, 0.0339, 0.1237, 0.0561, 0.0608, 0.1375, 0.0912, 0.0939], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0233, 0.0326, 0.0303, 0.0301, 0.0330, 0.0341, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:37:04,015 INFO [train.py:901] (1/4) Epoch 16, batch 250, loss[loss=0.2294, simple_loss=0.3084, pruned_loss=0.07524, over 7803.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2992, pruned_loss=0.07039, over 1157534.87 frames. ], batch size: 20, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:07,580 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6406, 2.3097, 4.3834, 1.5314, 3.0181, 2.3676, 1.7479, 2.7754], + device='cuda:1'), covar=tensor([0.1900, 0.2493, 0.0691, 0.4344, 0.1707, 0.2976, 0.2135, 0.2477], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0559, 0.0542, 0.0612, 0.0636, 0.0578, 0.0505, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:37:18,651 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 17:37:24,808 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:28,152 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 17:37:39,707 INFO [train.py:901] (1/4) Epoch 16, batch 300, loss[loss=0.2173, simple_loss=0.2916, pruned_loss=0.0715, over 7813.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.3, pruned_loss=0.0706, over 1261683.93 frames. ], batch size: 20, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:54,072 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.529e+02 3.079e+02 3.820e+02 7.739e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-06 17:38:14,118 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8166, 1.8955, 1.6454, 2.2469, 1.0019, 1.4192, 1.6460, 1.9060], + device='cuda:1'), covar=tensor([0.0756, 0.0747, 0.0994, 0.0461, 0.1254, 0.1474, 0.0870, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0205, 0.0250, 0.0214, 0.0214, 0.0251, 0.0254, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 17:38:14,590 INFO [train.py:901] (1/4) Epoch 16, batch 350, loss[loss=0.2279, simple_loss=0.3097, pruned_loss=0.07303, over 8261.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2988, pruned_loss=0.06971, over 1341250.27 frames. ], batch size: 24, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:38:45,978 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:38:49,789 INFO [train.py:901] (1/4) Epoch 16, batch 400, loss[loss=0.2403, simple_loss=0.3241, pruned_loss=0.07821, over 8450.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2988, pruned_loss=0.07017, over 1397511.31 frames. ], batch size: 29, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:04,292 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.467e+02 3.087e+02 3.761e+02 6.357e+02, threshold=6.175e+02, percent-clipped=1.0 +2023-02-06 17:39:09,180 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121673.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:39:25,132 INFO [train.py:901] (1/4) Epoch 16, batch 450, loss[loss=0.2471, simple_loss=0.3227, pruned_loss=0.08572, over 8236.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2978, pruned_loss=0.06943, over 1445370.78 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:52,406 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:39:59,044 INFO [train.py:901] (1/4) Epoch 16, batch 500, loss[loss=0.2348, simple_loss=0.2954, pruned_loss=0.08716, over 7257.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2986, pruned_loss=0.06958, over 1483758.34 frames. ], batch size: 16, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:40:00,105 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.00 vs. limit=5.0 +2023-02-06 17:40:10,963 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:14,769 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.435e+02 2.838e+02 3.555e+02 6.989e+02, threshold=5.677e+02, percent-clipped=1.0 +2023-02-06 17:40:17,013 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:29,902 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121788.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 17:40:35,812 INFO [train.py:901] (1/4) Epoch 16, batch 550, loss[loss=0.2323, simple_loss=0.3107, pruned_loss=0.0769, over 8323.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2985, pruned_loss=0.06962, over 1512187.58 frames. ], batch size: 25, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:02,682 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9201, 1.3509, 3.2750, 1.3718, 2.2937, 3.5766, 3.6990, 3.0439], + device='cuda:1'), covar=tensor([0.1138, 0.1879, 0.0355, 0.2203, 0.1050, 0.0238, 0.0579, 0.0652], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0310, 0.0273, 0.0301, 0.0292, 0.0248, 0.0381, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:41:09,168 INFO [train.py:901] (1/4) Epoch 16, batch 600, loss[loss=0.2012, simple_loss=0.2661, pruned_loss=0.06815, over 7240.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2976, pruned_loss=0.06899, over 1536578.89 frames. ], batch size: 16, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:15,570 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 17:41:22,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.425e+02 3.086e+02 4.175e+02 1.417e+03, threshold=6.173e+02, percent-clipped=9.0 +2023-02-06 17:41:26,593 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 17:41:36,804 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:44,746 INFO [train.py:901] (1/4) Epoch 16, batch 650, loss[loss=0.2318, simple_loss=0.3087, pruned_loss=0.0774, over 8505.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.297, pruned_loss=0.06839, over 1552590.39 frames. ], batch size: 28, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:45,635 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:48,936 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 17:42:02,836 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:18,673 INFO [train.py:901] (1/4) Epoch 16, batch 700, loss[loss=0.2186, simple_loss=0.2934, pruned_loss=0.07187, over 8245.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2974, pruned_loss=0.06873, over 1568973.22 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:42:32,102 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.496e+02 2.978e+02 3.542e+02 1.118e+03, threshold=5.957e+02, percent-clipped=1.0 +2023-02-06 17:42:33,596 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:41,116 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 17:42:53,714 INFO [train.py:901] (1/4) Epoch 16, batch 750, loss[loss=0.1422, simple_loss=0.2207, pruned_loss=0.03182, over 7445.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2963, pruned_loss=0.06816, over 1574937.52 frames. ], batch size: 17, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:14,269 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 17:43:14,507 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0496, 1.2536, 1.1631, 0.5839, 1.2408, 1.0349, 0.0900, 1.1723], + device='cuda:1'), covar=tensor([0.0352, 0.0314, 0.0265, 0.0523, 0.0350, 0.0811, 0.0671, 0.0290], + device='cuda:1'), in_proj_covar=tensor([0.0411, 0.0355, 0.0306, 0.0409, 0.0341, 0.0495, 0.0361, 0.0377], + device='cuda:1'), out_proj_covar=tensor([1.1340e-04, 9.4978e-05, 8.1782e-05, 1.1026e-04, 9.1961e-05, 1.4363e-04, + 9.9360e-05, 1.0221e-04], device='cuda:1') +2023-02-06 17:43:23,870 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 17:43:28,672 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122044.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:43:29,777 INFO [train.py:901] (1/4) Epoch 16, batch 800, loss[loss=0.1845, simple_loss=0.2504, pruned_loss=0.05933, over 7441.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2968, pruned_loss=0.06897, over 1586342.86 frames. ], batch size: 17, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:43,081 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.422e+02 2.925e+02 3.576e+02 6.712e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-06 17:43:45,459 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122069.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:44:03,130 INFO [train.py:901] (1/4) Epoch 16, batch 850, loss[loss=0.1906, simple_loss=0.2706, pruned_loss=0.05532, over 7808.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2974, pruned_loss=0.06902, over 1595202.53 frames. ], batch size: 20, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:16,338 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9393, 2.3921, 3.5291, 2.1545, 1.8044, 3.4943, 0.8776, 2.0844], + device='cuda:1'), covar=tensor([0.1603, 0.1447, 0.0309, 0.1787, 0.2947, 0.0333, 0.2666, 0.1832], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0179, 0.0112, 0.0212, 0.0255, 0.0115, 0.0161, 0.0176], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 17:44:31,616 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:34,938 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:38,752 INFO [train.py:901] (1/4) Epoch 16, batch 900, loss[loss=0.244, simple_loss=0.3202, pruned_loss=0.08389, over 8129.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2971, pruned_loss=0.06881, over 1598158.05 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:48,951 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6617, 2.2952, 3.3346, 1.9341, 1.6000, 3.2542, 0.8072, 2.0377], + device='cuda:1'), covar=tensor([0.1754, 0.1408, 0.0292, 0.1953, 0.3185, 0.0345, 0.2743, 0.1521], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0179, 0.0112, 0.0212, 0.0255, 0.0115, 0.0161, 0.0176], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 17:44:52,336 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:52,793 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.482e+02 3.085e+02 4.013e+02 7.148e+02, threshold=6.170e+02, percent-clipped=4.0 +2023-02-06 17:45:01,231 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 17:45:12,889 INFO [train.py:901] (1/4) Epoch 16, batch 950, loss[loss=0.2391, simple_loss=0.3155, pruned_loss=0.0814, over 8681.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2968, pruned_loss=0.06844, over 1604579.06 frames. ], batch size: 39, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:45:24,854 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:45:40,117 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 17:45:49,040 INFO [train.py:901] (1/4) Epoch 16, batch 1000, loss[loss=0.1906, simple_loss=0.2741, pruned_loss=0.05352, over 8341.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2968, pruned_loss=0.06818, over 1611459.99 frames. ], batch size: 25, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:03,407 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.462e+02 3.004e+02 3.600e+02 8.525e+02, threshold=6.009e+02, percent-clipped=4.0 +2023-02-06 17:46:14,159 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 17:46:23,684 INFO [train.py:901] (1/4) Epoch 16, batch 1050, loss[loss=0.2166, simple_loss=0.3052, pruned_loss=0.06394, over 8348.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2966, pruned_loss=0.06777, over 1616777.83 frames. ], batch size: 26, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:26,430 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 17:46:34,620 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:51,299 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:57,875 INFO [train.py:901] (1/4) Epoch 16, batch 1100, loss[loss=0.1787, simple_loss=0.2636, pruned_loss=0.04692, over 7932.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2963, pruned_loss=0.06752, over 1616989.05 frames. ], batch size: 20, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:12,623 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.699e+02 3.204e+02 3.982e+02 8.590e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 17:47:33,544 INFO [train.py:901] (1/4) Epoch 16, batch 1150, loss[loss=0.2085, simple_loss=0.2815, pruned_loss=0.06775, over 7222.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.295, pruned_loss=0.06704, over 1615512.79 frames. ], batch size: 16, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:38,300 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 17:47:54,848 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:07,544 INFO [train.py:901] (1/4) Epoch 16, batch 1200, loss[loss=0.218, simple_loss=0.2991, pruned_loss=0.06841, over 8587.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2958, pruned_loss=0.06836, over 1614984.24 frames. ], batch size: 31, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:48:21,992 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.417e+02 3.007e+02 3.779e+02 1.089e+03, threshold=6.013e+02, percent-clipped=2.0 +2023-02-06 17:48:31,788 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:36,690 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:43,455 INFO [train.py:901] (1/4) Epoch 16, batch 1250, loss[loss=0.2418, simple_loss=0.3097, pruned_loss=0.0869, over 7915.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2956, pruned_loss=0.0685, over 1618450.77 frames. ], batch size: 20, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:19,086 INFO [train.py:901] (1/4) Epoch 16, batch 1300, loss[loss=0.1963, simple_loss=0.2817, pruned_loss=0.05546, over 8248.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2957, pruned_loss=0.06883, over 1617056.59 frames. ], batch size: 24, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:26,973 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:33,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 3.105e+02 3.703e+02 6.719e+02, threshold=6.210e+02, percent-clipped=4.0 +2023-02-06 17:49:54,912 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:56,060 INFO [train.py:901] (1/4) Epoch 16, batch 1350, loss[loss=0.2459, simple_loss=0.3182, pruned_loss=0.08676, over 8139.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06967, over 1616943.31 frames. ], batch size: 22, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:08,290 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0251, 3.8795, 2.2881, 2.7271, 2.7272, 2.0887, 2.8114, 2.8710], + device='cuda:1'), covar=tensor([0.1604, 0.0327, 0.1096, 0.0719, 0.0750, 0.1420, 0.1030, 0.1019], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0238, 0.0332, 0.0307, 0.0305, 0.0332, 0.0348, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:50:31,463 INFO [train.py:901] (1/4) Epoch 16, batch 1400, loss[loss=0.1876, simple_loss=0.2651, pruned_loss=0.05507, over 7421.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2968, pruned_loss=0.06936, over 1617306.44 frames. ], batch size: 17, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:34,437 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5336, 2.3598, 4.2631, 1.3565, 3.0069, 2.2382, 1.6733, 2.7916], + device='cuda:1'), covar=tensor([0.1976, 0.2491, 0.0768, 0.4533, 0.1655, 0.3150, 0.2220, 0.2465], + device='cuda:1'), in_proj_covar=tensor([0.0508, 0.0560, 0.0542, 0.0610, 0.0631, 0.0575, 0.0503, 0.0624], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:50:45,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.319e+02 2.799e+02 3.491e+02 7.123e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 17:50:49,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:55,487 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:57,007 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:05,788 INFO [train.py:901] (1/4) Epoch 16, batch 1450, loss[loss=0.2396, simple_loss=0.3151, pruned_loss=0.08204, over 8016.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2964, pruned_loss=0.0691, over 1614989.65 frames. ], batch size: 22, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:13,476 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 17:51:15,674 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:42,664 INFO [train.py:901] (1/4) Epoch 16, batch 1500, loss[loss=0.1668, simple_loss=0.2423, pruned_loss=0.04562, over 7440.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.296, pruned_loss=0.06871, over 1619430.15 frames. ], batch size: 17, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:56,876 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 2.515e+02 3.024e+02 4.111e+02 8.238e+02, threshold=6.047e+02, percent-clipped=9.0 +2023-02-06 17:52:05,879 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8049, 1.5581, 2.0277, 1.7486, 1.9100, 1.7714, 1.5587, 1.1728], + device='cuda:1'), covar=tensor([0.3270, 0.3329, 0.1234, 0.2173, 0.1659, 0.1983, 0.1416, 0.3151], + device='cuda:1'), in_proj_covar=tensor([0.0909, 0.0923, 0.0758, 0.0893, 0.0952, 0.0840, 0.0720, 0.0796], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 17:52:16,427 INFO [train.py:901] (1/4) Epoch 16, batch 1550, loss[loss=0.2202, simple_loss=0.2992, pruned_loss=0.07064, over 8517.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2965, pruned_loss=0.06923, over 1619851.18 frames. ], batch size: 28, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:52:16,637 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:21,765 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 17:52:41,664 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:41,848 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3157, 1.9835, 2.7103, 2.2431, 2.5663, 2.2328, 1.9514, 1.4068], + device='cuda:1'), covar=tensor([0.4271, 0.4324, 0.1537, 0.2890, 0.2146, 0.2525, 0.1716, 0.4506], + device='cuda:1'), in_proj_covar=tensor([0.0907, 0.0921, 0.0758, 0.0893, 0.0949, 0.0838, 0.0717, 0.0794], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 17:52:52,396 INFO [train.py:901] (1/4) Epoch 16, batch 1600, loss[loss=0.196, simple_loss=0.2879, pruned_loss=0.05201, over 8454.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2967, pruned_loss=0.06885, over 1618800.57 frames. ], batch size: 27, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:52:55,444 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:07,650 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.555e+02 3.178e+02 4.067e+02 1.179e+03, threshold=6.355e+02, percent-clipped=12.0 +2023-02-06 17:53:13,373 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:23,678 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:27,662 INFO [train.py:901] (1/4) Epoch 16, batch 1650, loss[loss=0.198, simple_loss=0.2716, pruned_loss=0.06216, over 7791.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06865, over 1617413.56 frames. ], batch size: 19, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:53:28,087 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 17:53:28,529 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6293, 1.4038, 1.6398, 1.2084, 0.9382, 1.3129, 1.4396, 1.1994], + device='cuda:1'), covar=tensor([0.0566, 0.1268, 0.1735, 0.1510, 0.0631, 0.1586, 0.0747, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0150, 0.0188, 0.0155, 0.0099, 0.0161, 0.0113, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 17:53:49,589 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,346 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,856 INFO [train.py:901] (1/4) Epoch 16, batch 1700, loss[loss=0.1996, simple_loss=0.276, pruned_loss=0.06159, over 8140.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2967, pruned_loss=0.06892, over 1617871.00 frames. ], batch size: 22, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:54:08,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:11,198 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4167, 1.6408, 1.6524, 0.9791, 1.7025, 1.2667, 0.2465, 1.5694], + device='cuda:1'), covar=tensor([0.0322, 0.0261, 0.0230, 0.0377, 0.0266, 0.0715, 0.0632, 0.0221], + device='cuda:1'), in_proj_covar=tensor([0.0417, 0.0360, 0.0313, 0.0413, 0.0346, 0.0504, 0.0368, 0.0386], + device='cuda:1'), out_proj_covar=tensor([1.1497e-04, 9.6637e-05, 8.3694e-05, 1.1138e-04, 9.3238e-05, 1.4601e-04, + 1.0098e-04, 1.0457e-04], device='cuda:1') +2023-02-06 17:54:17,609 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.342e+02 2.881e+02 3.479e+02 7.679e+02, threshold=5.763e+02, percent-clipped=3.0 +2023-02-06 17:54:38,078 INFO [train.py:901] (1/4) Epoch 16, batch 1750, loss[loss=0.2353, simple_loss=0.3178, pruned_loss=0.07637, over 8364.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2967, pruned_loss=0.06954, over 1611688.15 frames. ], batch size: 24, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:12,114 INFO [train.py:901] (1/4) Epoch 16, batch 1800, loss[loss=0.1978, simple_loss=0.2798, pruned_loss=0.05795, over 7812.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2967, pruned_loss=0.06927, over 1611538.95 frames. ], batch size: 19, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:16,365 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:25,147 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0344, 3.8158, 2.2755, 2.7576, 2.9532, 2.1788, 2.8733, 2.9604], + device='cuda:1'), covar=tensor([0.1447, 0.0264, 0.1002, 0.0741, 0.0623, 0.1196, 0.0911, 0.1001], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0234, 0.0327, 0.0303, 0.0302, 0.0331, 0.0345, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 17:55:27,703 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.489e+02 2.922e+02 3.750e+02 7.056e+02, threshold=5.843e+02, percent-clipped=4.0 +2023-02-06 17:55:35,410 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123077.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:48,778 INFO [train.py:901] (1/4) Epoch 16, batch 1850, loss[loss=0.1977, simple_loss=0.2783, pruned_loss=0.05856, over 8239.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.06856, over 1613497.44 frames. ], batch size: 22, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:22,234 INFO [train.py:901] (1/4) Epoch 16, batch 1900, loss[loss=0.237, simple_loss=0.3301, pruned_loss=0.07193, over 8468.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2964, pruned_loss=0.06895, over 1616842.67 frames. ], batch size: 29, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:36,280 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.569e+02 3.077e+02 4.069e+02 9.708e+02, threshold=6.154e+02, percent-clipped=7.0 +2023-02-06 17:56:57,738 INFO [train.py:901] (1/4) Epoch 16, batch 1950, loss[loss=0.2149, simple_loss=0.3038, pruned_loss=0.06301, over 8199.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2986, pruned_loss=0.07032, over 1618585.33 frames. ], batch size: 23, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:59,124 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 17:57:01,389 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:11,332 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 17:57:18,801 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:24,080 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:30,741 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 17:57:32,102 INFO [train.py:901] (1/4) Epoch 16, batch 2000, loss[loss=0.2003, simple_loss=0.2709, pruned_loss=0.06487, over 7535.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2982, pruned_loss=0.07045, over 1613829.36 frames. ], batch size: 18, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:57:46,355 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.202e+02 2.631e+02 3.355e+02 6.225e+02, threshold=5.263e+02, percent-clipped=1.0 +2023-02-06 17:58:05,871 INFO [train.py:901] (1/4) Epoch 16, batch 2050, loss[loss=0.2052, simple_loss=0.29, pruned_loss=0.06018, over 8531.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2966, pruned_loss=0.06915, over 1612557.73 frames. ], batch size: 31, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:31,864 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123332.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:41,130 INFO [train.py:901] (1/4) Epoch 16, batch 2100, loss[loss=0.2003, simple_loss=0.2722, pruned_loss=0.06415, over 7321.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2973, pruned_loss=0.06953, over 1614674.27 frames. ], batch size: 16, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:43,354 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:54,988 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.517e+02 3.000e+02 3.631e+02 1.037e+03, threshold=6.000e+02, percent-clipped=6.0 +2023-02-06 17:59:14,280 INFO [train.py:901] (1/4) Epoch 16, batch 2150, loss[loss=0.2085, simple_loss=0.2913, pruned_loss=0.06282, over 8509.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2979, pruned_loss=0.06964, over 1616041.07 frames. ], batch size: 26, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:59:19,800 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2260, 2.0168, 2.7538, 2.2769, 2.7202, 2.2368, 1.9009, 1.2997], + device='cuda:1'), covar=tensor([0.4923, 0.4774, 0.1633, 0.3184, 0.2237, 0.2921, 0.1985, 0.5170], + device='cuda:1'), in_proj_covar=tensor([0.0920, 0.0928, 0.0763, 0.0900, 0.0964, 0.0846, 0.0720, 0.0803], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 17:59:22,900 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1632, 4.1027, 3.8197, 2.6670, 3.7564, 3.7678, 3.9060, 3.4552], + device='cuda:1'), covar=tensor([0.0835, 0.0615, 0.0928, 0.3455, 0.0795, 0.1064, 0.1112, 0.1041], + device='cuda:1'), in_proj_covar=tensor([0.0493, 0.0410, 0.0410, 0.0511, 0.0404, 0.0410, 0.0397, 0.0358], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 17:59:50,134 INFO [train.py:901] (1/4) Epoch 16, batch 2200, loss[loss=0.2063, simple_loss=0.2822, pruned_loss=0.06518, over 7532.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2996, pruned_loss=0.07081, over 1614151.32 frames. ], batch size: 18, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 17:59:50,269 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:00:04,134 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.694e+02 3.295e+02 4.036e+02 1.292e+03, threshold=6.590e+02, percent-clipped=6.0 +2023-02-06 18:00:23,381 INFO [train.py:901] (1/4) Epoch 16, batch 2250, loss[loss=0.1773, simple_loss=0.2699, pruned_loss=0.04238, over 8457.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2985, pruned_loss=0.06999, over 1615311.80 frames. ], batch size: 25, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:00:46,373 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 18:00:58,620 INFO [train.py:901] (1/4) Epoch 16, batch 2300, loss[loss=0.2455, simple_loss=0.3141, pruned_loss=0.0884, over 8467.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2986, pruned_loss=0.0703, over 1612156.92 frames. ], batch size: 29, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:13,226 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.374e+02 2.935e+02 3.719e+02 2.594e+03, threshold=5.871e+02, percent-clipped=2.0 +2023-02-06 18:01:22,768 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.8955, 1.0921, 1.0542, 0.5647, 1.0795, 0.9026, 0.0834, 1.0449], + device='cuda:1'), covar=tensor([0.0311, 0.0264, 0.0233, 0.0416, 0.0298, 0.0670, 0.0574, 0.0235], + device='cuda:1'), in_proj_covar=tensor([0.0423, 0.0364, 0.0313, 0.0416, 0.0350, 0.0507, 0.0372, 0.0389], + device='cuda:1'), out_proj_covar=tensor([1.1640e-04, 9.7522e-05, 8.3455e-05, 1.1214e-04, 9.4408e-05, 1.4701e-04, + 1.0201e-04, 1.0529e-04], device='cuda:1') +2023-02-06 18:01:32,627 INFO [train.py:901] (1/4) Epoch 16, batch 2350, loss[loss=0.3289, simple_loss=0.3846, pruned_loss=0.1366, over 7300.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2988, pruned_loss=0.0705, over 1613449.44 frames. ], batch size: 72, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:38,852 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:01:55,681 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:06,124 INFO [train.py:901] (1/4) Epoch 16, batch 2400, loss[loss=0.1582, simple_loss=0.2381, pruned_loss=0.03916, over 7535.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2979, pruned_loss=0.07037, over 1611837.66 frames. ], batch size: 18, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:02:22,334 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.321e+02 3.011e+02 3.485e+02 7.740e+02, threshold=6.021e+02, percent-clipped=5.0 +2023-02-06 18:02:28,432 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:42,472 INFO [train.py:901] (1/4) Epoch 16, batch 2450, loss[loss=0.2167, simple_loss=0.2852, pruned_loss=0.0741, over 6836.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2973, pruned_loss=0.06998, over 1609430.16 frames. ], batch size: 15, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:15,350 INFO [train.py:901] (1/4) Epoch 16, batch 2500, loss[loss=0.2368, simple_loss=0.3172, pruned_loss=0.07819, over 8360.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2962, pruned_loss=0.06971, over 1610266.29 frames. ], batch size: 24, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:25,449 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8191, 1.6896, 1.9705, 1.6020, 1.1732, 1.8085, 2.0899, 1.9574], + device='cuda:1'), covar=tensor([0.0481, 0.1141, 0.1529, 0.1375, 0.0609, 0.1325, 0.0664, 0.0568], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0157, 0.0100, 0.0163, 0.0114, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 18:03:29,366 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.388e+02 3.009e+02 3.987e+02 1.163e+03, threshold=6.019e+02, percent-clipped=7.0 +2023-02-06 18:03:46,949 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:47,737 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:50,918 INFO [train.py:901] (1/4) Epoch 16, batch 2550, loss[loss=0.1834, simple_loss=0.2739, pruned_loss=0.04646, over 8029.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2968, pruned_loss=0.06981, over 1609487.09 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:24,937 INFO [train.py:901] (1/4) Epoch 16, batch 2600, loss[loss=0.1992, simple_loss=0.2918, pruned_loss=0.05329, over 8465.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2955, pruned_loss=0.06874, over 1612216.70 frames. ], batch size: 25, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:38,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.447e+02 2.814e+02 3.524e+02 5.517e+02, threshold=5.629e+02, percent-clipped=0.0 +2023-02-06 18:04:54,348 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:04:58,762 INFO [train.py:901] (1/4) Epoch 16, batch 2650, loss[loss=0.2301, simple_loss=0.3015, pruned_loss=0.07934, over 7663.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2965, pruned_loss=0.06924, over 1612073.53 frames. ], batch size: 19, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:06,338 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:05:34,184 INFO [train.py:901] (1/4) Epoch 16, batch 2700, loss[loss=0.2268, simple_loss=0.3061, pruned_loss=0.07371, over 8292.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06968, over 1614115.60 frames. ], batch size: 23, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:48,215 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.455e+02 3.188e+02 4.135e+02 8.908e+02, threshold=6.377e+02, percent-clipped=7.0 +2023-02-06 18:06:07,604 INFO [train.py:901] (1/4) Epoch 16, batch 2750, loss[loss=0.1595, simple_loss=0.2406, pruned_loss=0.03922, over 7795.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2958, pruned_loss=0.06905, over 1612018.36 frames. ], batch size: 19, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:45,100 INFO [train.py:901] (1/4) Epoch 16, batch 2800, loss[loss=0.1985, simple_loss=0.2754, pruned_loss=0.06081, over 8137.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2951, pruned_loss=0.06832, over 1611738.89 frames. ], batch size: 22, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:46,030 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:50,855 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:59,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 2.578e+02 3.039e+02 4.001e+02 1.196e+03, threshold=6.079e+02, percent-clipped=5.0 +2023-02-06 18:07:03,053 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:08,255 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:18,993 INFO [train.py:901] (1/4) Epoch 16, batch 2850, loss[loss=0.1905, simple_loss=0.2801, pruned_loss=0.0505, over 8355.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2968, pruned_loss=0.06906, over 1616402.67 frames. ], batch size: 24, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:07:22,873 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 18:07:55,341 INFO [train.py:901] (1/4) Epoch 16, batch 2900, loss[loss=0.1902, simple_loss=0.2794, pruned_loss=0.05057, over 8358.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.06898, over 1607310.91 frames. ], batch size: 24, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:06,263 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:06,299 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5562, 2.3125, 3.2074, 2.5719, 2.9324, 2.5148, 2.0987, 1.7709], + device='cuda:1'), covar=tensor([0.4712, 0.4853, 0.1613, 0.3263, 0.2690, 0.2647, 0.1864, 0.5206], + device='cuda:1'), in_proj_covar=tensor([0.0914, 0.0924, 0.0758, 0.0894, 0.0961, 0.0848, 0.0723, 0.0797], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:08:10,037 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.456e+02 3.206e+02 4.387e+02 8.191e+02, threshold=6.412e+02, percent-clipped=4.0 +2023-02-06 18:08:22,881 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:29,509 INFO [train.py:901] (1/4) Epoch 16, batch 2950, loss[loss=0.1943, simple_loss=0.2652, pruned_loss=0.06164, over 7542.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2976, pruned_loss=0.06986, over 1607666.58 frames. ], batch size: 18, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:29,742 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0376, 2.4639, 2.7931, 1.4929, 3.0306, 1.7616, 1.3914, 1.9053], + device='cuda:1'), covar=tensor([0.0678, 0.0321, 0.0203, 0.0636, 0.0327, 0.0681, 0.0800, 0.0486], + device='cuda:1'), in_proj_covar=tensor([0.0425, 0.0363, 0.0311, 0.0417, 0.0350, 0.0508, 0.0371, 0.0387], + device='cuda:1'), out_proj_covar=tensor([1.1693e-04, 9.7248e-05, 8.2672e-05, 1.1219e-04, 9.4535e-05, 1.4726e-04, + 1.0189e-04, 1.0494e-04], device='cuda:1') +2023-02-06 18:08:35,666 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 18:08:55,342 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:01,454 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 18:09:03,840 INFO [train.py:901] (1/4) Epoch 16, batch 3000, loss[loss=0.2278, simple_loss=0.311, pruned_loss=0.07227, over 8495.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2979, pruned_loss=0.07009, over 1608773.49 frames. ], batch size: 28, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:09:03,840 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 18:09:16,275 INFO [train.py:935] (1/4) Epoch 16, validation: loss=0.1794, simple_loss=0.2796, pruned_loss=0.03958, over 944034.00 frames. +2023-02-06 18:09:16,277 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 18:09:32,708 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.393e+02 2.939e+02 3.627e+02 1.404e+03, threshold=5.877e+02, percent-clipped=2.0 +2023-02-06 18:09:49,103 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:52,919 INFO [train.py:901] (1/4) Epoch 16, batch 3050, loss[loss=0.2025, simple_loss=0.2834, pruned_loss=0.06081, over 7292.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.298, pruned_loss=0.06975, over 1613398.37 frames. ], batch size: 16, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:25,965 INFO [train.py:901] (1/4) Epoch 16, batch 3100, loss[loss=0.2554, simple_loss=0.3324, pruned_loss=0.08917, over 8342.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2981, pruned_loss=0.07004, over 1613697.46 frames. ], batch size: 26, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:28,059 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:10:39,312 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124366.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:10:39,810 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.464e+02 2.975e+02 4.095e+02 1.383e+03, threshold=5.950e+02, percent-clipped=6.0 +2023-02-06 18:11:01,474 INFO [train.py:901] (1/4) Epoch 16, batch 3150, loss[loss=0.2241, simple_loss=0.307, pruned_loss=0.0706, over 8318.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2986, pruned_loss=0.0703, over 1617433.22 frames. ], batch size: 25, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:02,941 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:21,450 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:36,587 INFO [train.py:901] (1/4) Epoch 16, batch 3200, loss[loss=0.2045, simple_loss=0.2787, pruned_loss=0.06512, over 7705.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06973, over 1616303.04 frames. ], batch size: 18, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:39,401 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:50,425 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.552e+02 3.102e+02 3.772e+02 6.284e+02, threshold=6.205e+02, percent-clipped=3.0 +2023-02-06 18:12:09,966 INFO [train.py:901] (1/4) Epoch 16, batch 3250, loss[loss=0.2221, simple_loss=0.3101, pruned_loss=0.06704, over 8464.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2967, pruned_loss=0.06953, over 1615083.35 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:23,026 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:40,935 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:45,404 INFO [train.py:901] (1/4) Epoch 16, batch 3300, loss[loss=0.217, simple_loss=0.2978, pruned_loss=0.06812, over 8139.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2974, pruned_loss=0.06958, over 1613426.36 frames. ], batch size: 22, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:59,420 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.425e+02 2.919e+02 3.659e+02 6.879e+02, threshold=5.837e+02, percent-clipped=1.0 +2023-02-06 18:13:18,842 INFO [train.py:901] (1/4) Epoch 16, batch 3350, loss[loss=0.1817, simple_loss=0.2676, pruned_loss=0.0479, over 8245.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2962, pruned_loss=0.06875, over 1611873.70 frames. ], batch size: 22, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:13:25,351 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:40,971 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-06 18:13:43,479 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:46,020 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:54,715 INFO [train.py:901] (1/4) Epoch 16, batch 3400, loss[loss=0.1961, simple_loss=0.2849, pruned_loss=0.05361, over 8554.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2964, pruned_loss=0.06846, over 1613630.23 frames. ], batch size: 34, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:14:01,601 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:08,833 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.420e+02 3.011e+02 3.525e+02 7.222e+02, threshold=6.022e+02, percent-clipped=3.0 +2023-02-06 18:14:18,588 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:28,866 INFO [train.py:901] (1/4) Epoch 16, batch 3450, loss[loss=0.1908, simple_loss=0.2782, pruned_loss=0.05167, over 7935.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.297, pruned_loss=0.06875, over 1613773.32 frames. ], batch size: 20, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:14:38,558 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124710.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:14:48,196 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7185, 5.7847, 5.0728, 2.4876, 5.1049, 5.7031, 5.3321, 5.1774], + device='cuda:1'), covar=tensor([0.0471, 0.0405, 0.0806, 0.4187, 0.0667, 0.0631, 0.0918, 0.0535], + device='cuda:1'), in_proj_covar=tensor([0.0489, 0.0408, 0.0410, 0.0511, 0.0401, 0.0408, 0.0396, 0.0355], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:15:04,146 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3221, 1.5621, 1.6366, 0.9905, 1.6876, 1.2214, 0.3140, 1.5009], + device='cuda:1'), covar=tensor([0.0408, 0.0313, 0.0232, 0.0439, 0.0336, 0.0829, 0.0691, 0.0234], + device='cuda:1'), in_proj_covar=tensor([0.0424, 0.0363, 0.0310, 0.0415, 0.0351, 0.0508, 0.0371, 0.0387], + device='cuda:1'), out_proj_covar=tensor([1.1649e-04, 9.7225e-05, 8.2362e-05, 1.1162e-04, 9.4818e-05, 1.4724e-04, + 1.0156e-04, 1.0472e-04], device='cuda:1') +2023-02-06 18:15:05,363 INFO [train.py:901] (1/4) Epoch 16, batch 3500, loss[loss=0.2188, simple_loss=0.3012, pruned_loss=0.06816, over 8453.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2951, pruned_loss=0.06753, over 1613971.47 frames. ], batch size: 27, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:07,658 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:09,711 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8730, 5.9730, 5.2610, 2.3176, 5.3410, 5.6856, 5.5419, 5.2486], + device='cuda:1'), covar=tensor([0.0509, 0.0414, 0.0889, 0.4335, 0.0684, 0.0633, 0.0963, 0.0520], + device='cuda:1'), in_proj_covar=tensor([0.0487, 0.0406, 0.0409, 0.0509, 0.0400, 0.0407, 0.0395, 0.0354], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:15:20,551 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.534e+02 3.082e+02 3.894e+02 7.146e+02, threshold=6.164e+02, percent-clipped=3.0 +2023-02-06 18:15:22,114 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:38,223 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 18:15:38,977 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,096 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,719 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:40,182 INFO [train.py:901] (1/4) Epoch 16, batch 3550, loss[loss=0.1818, simple_loss=0.266, pruned_loss=0.04875, over 8133.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2944, pruned_loss=0.06727, over 1616629.25 frames. ], batch size: 22, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:56,878 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:00,149 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124825.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:16:04,090 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8427, 1.5772, 6.0108, 2.0220, 5.3413, 5.1682, 5.5754, 5.4132], + device='cuda:1'), covar=tensor([0.0456, 0.4666, 0.0366, 0.3799, 0.1001, 0.0798, 0.0452, 0.0510], + device='cuda:1'), in_proj_covar=tensor([0.0561, 0.0613, 0.0637, 0.0589, 0.0662, 0.0569, 0.0561, 0.0625], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:16:08,893 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:14,226 INFO [train.py:901] (1/4) Epoch 16, batch 3600, loss[loss=0.1881, simple_loss=0.2583, pruned_loss=0.05898, over 7804.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2944, pruned_loss=0.06766, over 1614878.73 frames. ], batch size: 19, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:30,802 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.338e+02 2.977e+02 3.463e+02 8.977e+02, threshold=5.954e+02, percent-clipped=2.0 +2023-02-06 18:16:50,928 INFO [train.py:901] (1/4) Epoch 16, batch 3650, loss[loss=0.2273, simple_loss=0.2954, pruned_loss=0.07964, over 7923.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06876, over 1616284.64 frames. ], batch size: 20, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:59,893 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:17:24,965 INFO [train.py:901] (1/4) Epoch 16, batch 3700, loss[loss=0.2613, simple_loss=0.3341, pruned_loss=0.09432, over 6995.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2966, pruned_loss=0.06937, over 1615154.85 frames. ], batch size: 71, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:17:38,858 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:17:40,140 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.643e+02 3.299e+02 4.315e+02 1.525e+03, threshold=6.598e+02, percent-clipped=10.0 +2023-02-06 18:18:01,619 INFO [train.py:901] (1/4) Epoch 16, batch 3750, loss[loss=0.2152, simple_loss=0.301, pruned_loss=0.06467, over 8350.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2951, pruned_loss=0.06814, over 1612423.66 frames. ], batch size: 24, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:18:04,453 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:07,882 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:20,985 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:24,558 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:35,169 INFO [train.py:901] (1/4) Epoch 16, batch 3800, loss[loss=0.198, simple_loss=0.2892, pruned_loss=0.05347, over 7806.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.06903, over 1615985.55 frames. ], batch size: 20, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:18:49,284 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.284e+02 2.854e+02 3.651e+02 7.015e+02, threshold=5.709e+02, percent-clipped=3.0 +2023-02-06 18:18:58,957 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:19:10,734 INFO [train.py:901] (1/4) Epoch 16, batch 3850, loss[loss=0.2266, simple_loss=0.311, pruned_loss=0.07116, over 8125.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2956, pruned_loss=0.06879, over 1609903.64 frames. ], batch size: 22, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:18,418 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125106.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:19:24,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:41,022 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:45,083 INFO [train.py:901] (1/4) Epoch 16, batch 3900, loss[loss=0.2192, simple_loss=0.3102, pruned_loss=0.06409, over 8189.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2952, pruned_loss=0.06878, over 1608734.17 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:45,101 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 18:19:45,238 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:58,185 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:59,305 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.507e+02 2.888e+02 3.601e+02 7.393e+02, threshold=5.777e+02, percent-clipped=3.0 +2023-02-06 18:20:09,607 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:15,217 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:19,100 INFO [train.py:901] (1/4) Epoch 16, batch 3950, loss[loss=0.2223, simple_loss=0.2995, pruned_loss=0.0725, over 8283.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2942, pruned_loss=0.06798, over 1608820.09 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:20:55,593 INFO [train.py:901] (1/4) Epoch 16, batch 4000, loss[loss=0.2071, simple_loss=0.2819, pruned_loss=0.0661, over 7719.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2962, pruned_loss=0.06927, over 1606547.80 frames. ], batch size: 18, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:08,104 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0918, 1.8783, 1.9135, 1.7617, 1.2997, 1.8785, 1.9793, 1.9224], + device='cuda:1'), covar=tensor([0.0549, 0.0859, 0.1262, 0.1061, 0.0599, 0.1084, 0.0652, 0.0470], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0156, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 18:21:09,913 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.424e+02 2.747e+02 3.530e+02 7.172e+02, threshold=5.495e+02, percent-clipped=3.0 +2023-02-06 18:21:22,772 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3852, 1.4250, 1.3068, 1.8190, 0.6688, 1.2033, 1.1883, 1.4410], + device='cuda:1'), covar=tensor([0.0819, 0.0781, 0.1084, 0.0484, 0.1199, 0.1429, 0.0890, 0.0730], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0203, 0.0251, 0.0212, 0.0210, 0.0248, 0.0256, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 18:21:29,135 INFO [train.py:901] (1/4) Epoch 16, batch 4050, loss[loss=0.2204, simple_loss=0.3149, pruned_loss=0.06297, over 8357.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2979, pruned_loss=0.06985, over 1606891.55 frames. ], batch size: 24, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:29,950 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125297.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:21:41,126 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4670, 4.4603, 3.9716, 1.9904, 3.9806, 4.0492, 4.0316, 3.8018], + device='cuda:1'), covar=tensor([0.0710, 0.0552, 0.1201, 0.4741, 0.0823, 0.1014, 0.1204, 0.0827], + device='cuda:1'), in_proj_covar=tensor([0.0495, 0.0412, 0.0410, 0.0516, 0.0405, 0.0411, 0.0402, 0.0358], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:22:05,146 INFO [train.py:901] (1/4) Epoch 16, batch 4100, loss[loss=0.226, simple_loss=0.3131, pruned_loss=0.06944, over 8463.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.299, pruned_loss=0.07026, over 1611358.16 frames. ], batch size: 39, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:19,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.458e+02 2.941e+02 3.398e+02 7.943e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-06 18:22:22,354 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:24,211 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:37,725 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:38,928 INFO [train.py:901] (1/4) Epoch 16, batch 4150, loss[loss=0.2091, simple_loss=0.3076, pruned_loss=0.05529, over 8196.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3001, pruned_loss=0.07094, over 1614292.86 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:39,124 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:39,142 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:42,588 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1532, 2.8078, 3.8616, 1.9460, 2.0585, 3.9736, 1.0174, 2.4391], + device='cuda:1'), covar=tensor([0.1683, 0.1362, 0.0216, 0.2440, 0.3053, 0.0231, 0.2543, 0.1299], + device='cuda:1'), in_proj_covar=tensor([0.0172, 0.0179, 0.0112, 0.0212, 0.0255, 0.0116, 0.0162, 0.0175], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 18:22:43,282 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9117, 2.3901, 2.5336, 1.3982, 2.5385, 1.5612, 1.4860, 1.9803], + device='cuda:1'), covar=tensor([0.0796, 0.0385, 0.0249, 0.0742, 0.0507, 0.0782, 0.0764, 0.0462], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0362, 0.0311, 0.0419, 0.0354, 0.0510, 0.0372, 0.0392], + device='cuda:1'), out_proj_covar=tensor([1.1741e-04, 9.6606e-05, 8.2775e-05, 1.1276e-04, 9.5365e-05, 1.4761e-04, + 1.0191e-04, 1.0613e-04], device='cuda:1') +2023-02-06 18:22:55,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:09,065 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:14,194 INFO [train.py:901] (1/4) Epoch 16, batch 4200, loss[loss=0.1977, simple_loss=0.2841, pruned_loss=0.05568, over 8346.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2982, pruned_loss=0.07035, over 1611626.21 frames. ], batch size: 24, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:23:29,128 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.508e+02 2.881e+02 3.373e+02 7.881e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-06 18:23:39,903 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 18:23:42,027 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7029, 1.3613, 1.6084, 1.1591, 0.9144, 1.4109, 1.5039, 1.5772], + device='cuda:1'), covar=tensor([0.0478, 0.1247, 0.1621, 0.1472, 0.0588, 0.1477, 0.0677, 0.0567], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 18:23:44,591 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:48,645 INFO [train.py:901] (1/4) Epoch 16, batch 4250, loss[loss=0.2184, simple_loss=0.2918, pruned_loss=0.07247, over 8603.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2969, pruned_loss=0.06929, over 1611047.45 frames. ], batch size: 31, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:24:01,592 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 18:24:09,734 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4704, 2.0370, 2.8511, 2.2934, 2.7010, 2.4079, 2.1340, 1.5507], + device='cuda:1'), covar=tensor([0.4543, 0.4490, 0.1662, 0.3334, 0.2361, 0.2551, 0.1653, 0.4914], + device='cuda:1'), in_proj_covar=tensor([0.0913, 0.0921, 0.0758, 0.0898, 0.0961, 0.0849, 0.0720, 0.0795], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:24:11,893 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 18:24:23,120 INFO [train.py:901] (1/4) Epoch 16, batch 4300, loss[loss=0.1944, simple_loss=0.2731, pruned_loss=0.05779, over 8241.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2982, pruned_loss=0.06989, over 1617132.08 frames. ], batch size: 22, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:24:28,629 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125553.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:38,395 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.516e+02 3.115e+02 4.119e+02 8.810e+02, threshold=6.231e+02, percent-clipped=6.0 +2023-02-06 18:24:46,751 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:52,791 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6455, 2.5188, 1.8335, 2.1945, 2.1464, 1.5467, 2.0182, 2.0437], + device='cuda:1'), covar=tensor([0.1266, 0.0298, 0.1029, 0.0609, 0.0635, 0.1348, 0.0948, 0.0966], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0232, 0.0327, 0.0304, 0.0300, 0.0333, 0.0346, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 18:24:58,867 INFO [train.py:901] (1/4) Epoch 16, batch 4350, loss[loss=0.2052, simple_loss=0.2908, pruned_loss=0.0598, over 8246.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2971, pruned_loss=0.06879, over 1616601.58 frames. ], batch size: 24, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:02,856 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 18:25:05,371 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:16,541 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:33,361 INFO [train.py:901] (1/4) Epoch 16, batch 4400, loss[loss=0.2043, simple_loss=0.2941, pruned_loss=0.05723, over 8292.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2964, pruned_loss=0.06862, over 1615451.48 frames. ], batch size: 23, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:34,038 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 18:25:48,659 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.489e+02 3.156e+02 3.927e+02 6.760e+02, threshold=6.312e+02, percent-clipped=2.0 +2023-02-06 18:26:09,565 INFO [train.py:901] (1/4) Epoch 16, batch 4450, loss[loss=0.2381, simple_loss=0.3042, pruned_loss=0.08597, over 7961.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2962, pruned_loss=0.06902, over 1611666.06 frames. ], batch size: 21, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:14,203 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 18:26:23,713 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6212, 1.8979, 2.0246, 1.3726, 2.1618, 1.4214, 0.5451, 1.8405], + device='cuda:1'), covar=tensor([0.0516, 0.0280, 0.0206, 0.0447, 0.0327, 0.0826, 0.0754, 0.0242], + device='cuda:1'), in_proj_covar=tensor([0.0430, 0.0361, 0.0314, 0.0420, 0.0354, 0.0511, 0.0372, 0.0391], + device='cuda:1'), out_proj_covar=tensor([1.1825e-04, 9.6388e-05, 8.3468e-05, 1.1270e-04, 9.5577e-05, 1.4802e-04, + 1.0204e-04, 1.0591e-04], device='cuda:1') +2023-02-06 18:26:24,207 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:38,219 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:41,747 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:43,640 INFO [train.py:901] (1/4) Epoch 16, batch 4500, loss[loss=0.2438, simple_loss=0.3099, pruned_loss=0.08883, over 7934.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2965, pruned_loss=0.06914, over 1611121.51 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:57,818 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.361e+02 2.740e+02 3.373e+02 6.169e+02, threshold=5.479e+02, percent-clipped=0.0 +2023-02-06 18:27:04,102 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 18:27:10,713 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:19,278 INFO [train.py:901] (1/4) Epoch 16, batch 4550, loss[loss=0.1662, simple_loss=0.2472, pruned_loss=0.04265, over 7656.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2966, pruned_loss=0.06905, over 1610150.00 frames. ], batch size: 19, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:20,086 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8645, 1.4029, 6.0028, 2.1129, 5.4341, 4.9930, 5.5204, 5.4076], + device='cuda:1'), covar=tensor([0.0426, 0.4692, 0.0361, 0.3661, 0.0938, 0.0889, 0.0465, 0.0435], + device='cuda:1'), in_proj_covar=tensor([0.0560, 0.0619, 0.0641, 0.0590, 0.0660, 0.0573, 0.0563, 0.0630], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:27:45,705 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:54,456 INFO [train.py:901] (1/4) Epoch 16, batch 4600, loss[loss=0.1891, simple_loss=0.2678, pruned_loss=0.05516, over 8298.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2968, pruned_loss=0.0691, over 1610442.63 frames. ], batch size: 23, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:59,460 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:03,905 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-06 18:28:05,126 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:08,980 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.490e+02 3.040e+02 3.897e+02 1.241e+03, threshold=6.080e+02, percent-clipped=8.0 +2023-02-06 18:28:22,168 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:30,021 INFO [train.py:901] (1/4) Epoch 16, batch 4650, loss[loss=0.2362, simple_loss=0.3264, pruned_loss=0.07301, over 8029.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.06867, over 1611598.21 frames. ], batch size: 22, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:28:31,586 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:06,101 INFO [train.py:901] (1/4) Epoch 16, batch 4700, loss[loss=0.2103, simple_loss=0.2845, pruned_loss=0.06806, over 7807.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2965, pruned_loss=0.06887, over 1610402.95 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:18,971 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:20,231 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.580e+02 3.138e+02 4.127e+02 1.212e+03, threshold=6.277e+02, percent-clipped=5.0 +2023-02-06 18:29:34,299 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 18:29:39,519 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 18:29:39,834 INFO [train.py:901] (1/4) Epoch 16, batch 4750, loss[loss=0.1838, simple_loss=0.281, pruned_loss=0.04326, over 8558.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2964, pruned_loss=0.06868, over 1609858.25 frames. ], batch size: 31, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:55,976 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:11,192 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 18:30:13,728 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 18:30:15,807 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1961, 1.6657, 3.3940, 1.4692, 2.3066, 3.7190, 3.7532, 3.2519], + device='cuda:1'), covar=tensor([0.0953, 0.1612, 0.0335, 0.2068, 0.1089, 0.0226, 0.0510, 0.0538], + device='cuda:1'), in_proj_covar=tensor([0.0279, 0.0310, 0.0275, 0.0300, 0.0292, 0.0250, 0.0384, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 18:30:16,331 INFO [train.py:901] (1/4) Epoch 16, batch 4800, loss[loss=0.1998, simple_loss=0.2872, pruned_loss=0.05624, over 8344.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2955, pruned_loss=0.06822, over 1608473.74 frames. ], batch size: 26, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:30:31,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.301e+02 2.788e+02 3.330e+02 6.705e+02, threshold=5.575e+02, percent-clipped=2.0 +2023-02-06 18:30:37,059 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3903, 1.5759, 1.3456, 2.0836, 1.0472, 1.1649, 1.4812, 1.7023], + device='cuda:1'), covar=tensor([0.0827, 0.0782, 0.0911, 0.0366, 0.0897, 0.1288, 0.0751, 0.0672], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0204, 0.0250, 0.0214, 0.0211, 0.0249, 0.0257, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 18:30:40,380 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:45,002 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:46,461 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:51,047 INFO [train.py:901] (1/4) Epoch 16, batch 4850, loss[loss=0.1951, simple_loss=0.287, pruned_loss=0.05159, over 8246.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.0685, over 1605749.23 frames. ], batch size: 24, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:30:59,315 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6626, 1.9742, 2.0775, 1.2874, 2.2513, 1.3993, 0.6425, 1.9120], + device='cuda:1'), covar=tensor([0.0526, 0.0313, 0.0236, 0.0549, 0.0318, 0.0905, 0.0750, 0.0258], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0362, 0.0314, 0.0421, 0.0353, 0.0511, 0.0371, 0.0390], + device='cuda:1'), out_proj_covar=tensor([1.1717e-04, 9.6436e-05, 8.3497e-05, 1.1313e-04, 9.5198e-05, 1.4780e-04, + 1.0152e-04, 1.0561e-04], device='cuda:1') +2023-02-06 18:30:59,983 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:01,795 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 18:31:03,310 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:19,043 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:26,987 INFO [train.py:901] (1/4) Epoch 16, batch 4900, loss[loss=0.1828, simple_loss=0.2589, pruned_loss=0.05336, over 7209.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2947, pruned_loss=0.06784, over 1604750.32 frames. ], batch size: 16, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:31:32,564 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:41,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.384e+02 3.140e+02 3.836e+02 7.587e+02, threshold=6.281e+02, percent-clipped=5.0 +2023-02-06 18:31:50,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:01,584 INFO [train.py:901] (1/4) Epoch 16, batch 4950, loss[loss=0.1773, simple_loss=0.2627, pruned_loss=0.04597, over 8199.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2947, pruned_loss=0.06831, over 1606207.69 frames. ], batch size: 23, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:06,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:35,840 INFO [train.py:901] (1/4) Epoch 16, batch 5000, loss[loss=0.1994, simple_loss=0.2779, pruned_loss=0.06042, over 8475.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2955, pruned_loss=0.06796, over 1610571.17 frames. ], batch size: 25, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:37,272 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0913, 1.5449, 4.3243, 1.6159, 3.7793, 3.5958, 3.8806, 3.7422], + device='cuda:1'), covar=tensor([0.0637, 0.4204, 0.0575, 0.3901, 0.1293, 0.1024, 0.0621, 0.0757], + device='cuda:1'), in_proj_covar=tensor([0.0564, 0.0622, 0.0647, 0.0593, 0.0669, 0.0576, 0.0565, 0.0633], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:32:50,294 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.421e+02 2.802e+02 3.540e+02 7.456e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-06 18:33:10,452 INFO [train.py:901] (1/4) Epoch 16, batch 5050, loss[loss=0.2166, simple_loss=0.3008, pruned_loss=0.06618, over 8030.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2952, pruned_loss=0.06758, over 1614579.07 frames. ], batch size: 22, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:38,223 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:39,107 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 18:33:41,489 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 18:33:44,853 INFO [train.py:901] (1/4) Epoch 16, batch 5100, loss[loss=0.2552, simple_loss=0.3081, pruned_loss=0.1012, over 7931.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2964, pruned_loss=0.06835, over 1615928.33 frames. ], batch size: 20, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:55,147 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:55,980 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126361.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:01,121 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.574e+02 2.967e+02 3.773e+02 8.448e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-06 18:34:20,681 INFO [train.py:901] (1/4) Epoch 16, batch 5150, loss[loss=0.2404, simple_loss=0.3109, pruned_loss=0.08495, over 8457.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.06884, over 1615703.91 frames. ], batch size: 27, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:34:22,771 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:54,884 INFO [train.py:901] (1/4) Epoch 16, batch 5200, loss[loss=0.2546, simple_loss=0.3255, pruned_loss=0.09181, over 8478.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.296, pruned_loss=0.06879, over 1614410.81 frames. ], batch size: 28, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:03,396 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:10,025 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.450e+02 2.961e+02 4.009e+02 9.502e+02, threshold=5.923e+02, percent-clipped=8.0 +2023-02-06 18:35:10,198 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:15,126 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126475.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:21,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:30,966 INFO [train.py:901] (1/4) Epoch 16, batch 5250, loss[loss=0.2909, simple_loss=0.3467, pruned_loss=0.1176, over 8525.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2957, pruned_loss=0.06854, over 1613800.25 frames. ], batch size: 28, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:35,230 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5575, 1.9893, 3.3274, 1.3875, 2.4450, 1.9561, 1.7300, 2.4449], + device='cuda:1'), covar=tensor([0.1765, 0.2392, 0.0755, 0.4134, 0.1676, 0.2887, 0.1953, 0.2143], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0553, 0.0540, 0.0610, 0.0627, 0.0566, 0.0496, 0.0615], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:35:39,844 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 18:36:05,601 INFO [train.py:901] (1/4) Epoch 16, batch 5300, loss[loss=0.2113, simple_loss=0.2934, pruned_loss=0.06458, over 8581.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2965, pruned_loss=0.0694, over 1612624.48 frames. ], batch size: 31, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:36:20,899 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.415e+02 2.951e+02 3.953e+02 1.148e+03, threshold=5.902e+02, percent-clipped=4.0 +2023-02-06 18:36:34,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7213, 1.5062, 1.8943, 1.4834, 0.9225, 1.7164, 2.1286, 2.0235], + device='cuda:1'), covar=tensor([0.0483, 0.1248, 0.1631, 0.1446, 0.0607, 0.1399, 0.0655, 0.0562], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0162, 0.0114, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 18:36:41,568 INFO [train.py:901] (1/4) Epoch 16, batch 5350, loss[loss=0.2118, simple_loss=0.2989, pruned_loss=0.06231, over 8105.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2971, pruned_loss=0.0696, over 1612619.64 frames. ], batch size: 23, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:36:50,006 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7963, 1.6494, 2.8003, 1.4000, 2.2117, 3.0192, 3.0946, 2.6123], + device='cuda:1'), covar=tensor([0.1012, 0.1407, 0.0451, 0.1977, 0.1010, 0.0285, 0.0613, 0.0534], + device='cuda:1'), in_proj_covar=tensor([0.0279, 0.0311, 0.0276, 0.0299, 0.0292, 0.0250, 0.0384, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 18:36:55,927 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6997, 2.0518, 3.1739, 1.5348, 2.5735, 2.0829, 1.7813, 2.4630], + device='cuda:1'), covar=tensor([0.1661, 0.2057, 0.0763, 0.3834, 0.1477, 0.2577, 0.1845, 0.1895], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0553, 0.0539, 0.0610, 0.0626, 0.0565, 0.0497, 0.0616], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:37:08,154 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5910, 2.1030, 3.2498, 1.4635, 1.4107, 3.0969, 0.7731, 1.9678], + device='cuda:1'), covar=tensor([0.1953, 0.1470, 0.0302, 0.3096, 0.3669, 0.0352, 0.2843, 0.2066], + device='cuda:1'), in_proj_covar=tensor([0.0176, 0.0183, 0.0114, 0.0214, 0.0260, 0.0119, 0.0166, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 18:37:16,915 INFO [train.py:901] (1/4) Epoch 16, batch 5400, loss[loss=0.2402, simple_loss=0.3166, pruned_loss=0.08189, over 8463.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2976, pruned_loss=0.07009, over 1614968.89 frames. ], batch size: 25, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:32,198 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.413e+02 2.875e+02 3.758e+02 9.843e+02, threshold=5.751e+02, percent-clipped=6.0 +2023-02-06 18:37:48,665 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 18:37:51,443 INFO [train.py:901] (1/4) Epoch 16, batch 5450, loss[loss=0.2158, simple_loss=0.2907, pruned_loss=0.07042, over 8142.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2977, pruned_loss=0.06978, over 1616767.77 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:17,609 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126731.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:24,939 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:28,283 INFO [train.py:901] (1/4) Epoch 16, batch 5500, loss[loss=0.2138, simple_loss=0.3039, pruned_loss=0.06179, over 8450.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2971, pruned_loss=0.06974, over 1614813.95 frames. ], batch size: 29, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:28,996 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 18:38:35,365 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:44,224 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.451e+02 2.886e+02 3.496e+02 8.391e+02, threshold=5.772e+02, percent-clipped=4.0 +2023-02-06 18:39:02,249 INFO [train.py:901] (1/4) Epoch 16, batch 5550, loss[loss=0.2786, simple_loss=0.35, pruned_loss=0.1037, over 8244.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2973, pruned_loss=0.06964, over 1617767.36 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 4.0 +2023-02-06 18:39:13,435 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:30,332 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:38,324 INFO [train.py:901] (1/4) Epoch 16, batch 5600, loss[loss=0.2225, simple_loss=0.3102, pruned_loss=0.06741, over 8198.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.297, pruned_loss=0.06937, over 1617809.13 frames. ], batch size: 23, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:39:45,809 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:54,359 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.374e+02 2.959e+02 4.088e+02 8.002e+02, threshold=5.917e+02, percent-clipped=4.0 +2023-02-06 18:40:09,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1965, 1.9344, 2.6323, 2.1439, 2.5147, 2.1796, 1.8690, 1.1963], + device='cuda:1'), covar=tensor([0.4981, 0.4235, 0.1522, 0.3199, 0.2146, 0.2597, 0.1854, 0.4627], + device='cuda:1'), in_proj_covar=tensor([0.0916, 0.0924, 0.0762, 0.0897, 0.0961, 0.0846, 0.0721, 0.0797], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:40:12,811 INFO [train.py:901] (1/4) Epoch 16, batch 5650, loss[loss=0.227, simple_loss=0.2969, pruned_loss=0.07859, over 7986.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2978, pruned_loss=0.06999, over 1618209.73 frames. ], batch size: 21, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:40:33,384 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 18:40:33,506 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:40:48,555 INFO [train.py:901] (1/4) Epoch 16, batch 5700, loss[loss=0.2154, simple_loss=0.2968, pruned_loss=0.06698, over 8494.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2972, pruned_loss=0.06989, over 1613813.19 frames. ], batch size: 29, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:04,180 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.583e+02 3.205e+02 4.543e+02 7.570e+02, threshold=6.410e+02, percent-clipped=11.0 +2023-02-06 18:41:14,333 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8907, 1.5939, 2.0640, 1.7777, 1.9641, 1.8837, 1.6432, 0.7661], + device='cuda:1'), covar=tensor([0.4702, 0.3976, 0.1524, 0.2768, 0.1939, 0.2526, 0.1752, 0.4080], + device='cuda:1'), in_proj_covar=tensor([0.0914, 0.0926, 0.0765, 0.0897, 0.0963, 0.0847, 0.0723, 0.0800], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:41:17,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9914, 1.6277, 1.3857, 1.5401, 1.4174, 1.2693, 1.2848, 1.2999], + device='cuda:1'), covar=tensor([0.1177, 0.0502, 0.1260, 0.0554, 0.0776, 0.1527, 0.0927, 0.0795], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0234, 0.0327, 0.0304, 0.0301, 0.0336, 0.0346, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 18:41:22,819 INFO [train.py:901] (1/4) Epoch 16, batch 5750, loss[loss=0.208, simple_loss=0.2967, pruned_loss=0.05967, over 8558.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2949, pruned_loss=0.06849, over 1614362.58 frames. ], batch size: 31, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:39,579 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 18:41:56,553 INFO [train.py:901] (1/4) Epoch 16, batch 5800, loss[loss=0.2441, simple_loss=0.314, pruned_loss=0.08715, over 8471.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2949, pruned_loss=0.0683, over 1613779.95 frames. ], batch size: 29, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:14,367 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.425e+02 2.951e+02 3.537e+02 6.549e+02, threshold=5.902e+02, percent-clipped=1.0 +2023-02-06 18:42:27,503 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 18:42:33,212 INFO [train.py:901] (1/4) Epoch 16, batch 5850, loss[loss=0.174, simple_loss=0.2684, pruned_loss=0.03976, over 8357.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2956, pruned_loss=0.06827, over 1620648.56 frames. ], batch size: 24, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:45,185 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:02,066 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:07,120 INFO [train.py:901] (1/4) Epoch 16, batch 5900, loss[loss=0.1875, simple_loss=0.2611, pruned_loss=0.05695, over 8099.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2973, pruned_loss=0.06929, over 1622104.59 frames. ], batch size: 21, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:09,478 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2693, 1.9629, 2.6963, 2.2607, 2.7038, 2.2175, 1.9082, 1.3072], + device='cuda:1'), covar=tensor([0.4764, 0.4618, 0.1521, 0.2977, 0.1975, 0.2515, 0.1810, 0.4740], + device='cuda:1'), in_proj_covar=tensor([0.0911, 0.0921, 0.0764, 0.0893, 0.0959, 0.0842, 0.0720, 0.0796], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:43:12,309 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1892, 1.8802, 2.5157, 2.0897, 2.5050, 2.1598, 1.8443, 1.2881], + device='cuda:1'), covar=tensor([0.5325, 0.4915, 0.1771, 0.3516, 0.2490, 0.3061, 0.2195, 0.5027], + device='cuda:1'), in_proj_covar=tensor([0.0911, 0.0922, 0.0764, 0.0894, 0.0960, 0.0843, 0.0721, 0.0796], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:43:22,996 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.337e+02 2.920e+02 3.581e+02 1.365e+03, threshold=5.840e+02, percent-clipped=5.0 +2023-02-06 18:43:30,612 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:34,125 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:42,723 INFO [train.py:901] (1/4) Epoch 16, batch 5950, loss[loss=0.243, simple_loss=0.3159, pruned_loss=0.08506, over 8097.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2965, pruned_loss=0.06913, over 1617349.73 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:51,310 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127208.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:17,703 INFO [train.py:901] (1/4) Epoch 16, batch 6000, loss[loss=0.2504, simple_loss=0.316, pruned_loss=0.09243, over 7642.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2975, pruned_loss=0.06982, over 1620886.35 frames. ], batch size: 19, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:44:17,703 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 18:44:29,970 INFO [train.py:935] (1/4) Epoch 16, validation: loss=0.1793, simple_loss=0.2799, pruned_loss=0.03935, over 944034.00 frames. +2023-02-06 18:44:29,972 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 18:44:44,471 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:45,670 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.282e+02 2.976e+02 3.659e+02 8.304e+02, threshold=5.951e+02, percent-clipped=2.0 +2023-02-06 18:44:47,121 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3312, 2.1070, 3.3029, 1.2115, 2.5372, 1.7993, 1.6256, 2.3853], + device='cuda:1'), covar=tensor([0.2181, 0.2541, 0.0842, 0.4779, 0.1689, 0.3472, 0.2294, 0.2436], + device='cuda:1'), in_proj_covar=tensor([0.0502, 0.0554, 0.0538, 0.0609, 0.0622, 0.0562, 0.0498, 0.0616], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:45:01,816 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:45:03,667 INFO [train.py:901] (1/4) Epoch 16, batch 6050, loss[loss=0.26, simple_loss=0.348, pruned_loss=0.08601, over 8462.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.298, pruned_loss=0.06986, over 1620144.76 frames. ], batch size: 25, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:39,308 INFO [train.py:901] (1/4) Epoch 16, batch 6100, loss[loss=0.2302, simple_loss=0.3076, pruned_loss=0.07641, over 8123.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2973, pruned_loss=0.06925, over 1617492.21 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:55,488 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.555e+02 2.947e+02 3.627e+02 8.036e+02, threshold=5.895e+02, percent-clipped=1.0 +2023-02-06 18:45:59,287 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 18:46:09,096 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 18:46:13,726 INFO [train.py:901] (1/4) Epoch 16, batch 6150, loss[loss=0.2061, simple_loss=0.2877, pruned_loss=0.06224, over 8140.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2972, pruned_loss=0.06924, over 1621340.19 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:28,957 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8824, 6.0148, 5.1500, 2.3830, 5.2864, 5.6150, 5.4798, 5.2724], + device='cuda:1'), covar=tensor([0.0477, 0.0356, 0.0966, 0.4343, 0.0761, 0.0820, 0.1108, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0413, 0.0415, 0.0518, 0.0407, 0.0415, 0.0407, 0.0358], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 18:46:48,829 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127445.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:46:49,325 INFO [train.py:901] (1/4) Epoch 16, batch 6200, loss[loss=0.1918, simple_loss=0.2638, pruned_loss=0.05984, over 7694.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2966, pruned_loss=0.06894, over 1614666.68 frames. ], batch size: 18, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:52,899 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1290, 1.5801, 1.7203, 1.3895, 0.9225, 1.5637, 1.8072, 1.7235], + device='cuda:1'), covar=tensor([0.0459, 0.1150, 0.1669, 0.1367, 0.0594, 0.1430, 0.0654, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0190, 0.0156, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 18:47:04,720 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.656e+02 3.320e+02 4.256e+02 8.643e+02, threshold=6.639e+02, percent-clipped=4.0 +2023-02-06 18:47:23,440 INFO [train.py:901] (1/4) Epoch 16, batch 6250, loss[loss=0.2048, simple_loss=0.2846, pruned_loss=0.06254, over 7714.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2953, pruned_loss=0.06852, over 1612366.45 frames. ], batch size: 18, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:57,828 INFO [train.py:901] (1/4) Epoch 16, batch 6300, loss[loss=0.1943, simple_loss=0.2766, pruned_loss=0.05601, over 7425.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.06882, over 1616096.98 frames. ], batch size: 17, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:58,587 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:47:59,970 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:08,822 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1763, 1.8704, 2.5776, 2.1057, 2.5390, 2.1778, 1.9022, 1.2256], + device='cuda:1'), covar=tensor([0.4513, 0.4258, 0.1468, 0.3102, 0.2002, 0.2616, 0.1693, 0.4586], + device='cuda:1'), in_proj_covar=tensor([0.0913, 0.0921, 0.0766, 0.0896, 0.0964, 0.0846, 0.0722, 0.0796], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:48:13,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2962, 2.6720, 3.0068, 1.5472, 3.1158, 1.9719, 1.4985, 2.1581], + device='cuda:1'), covar=tensor([0.0527, 0.0255, 0.0186, 0.0576, 0.0339, 0.0546, 0.0615, 0.0405], + device='cuda:1'), in_proj_covar=tensor([0.0420, 0.0362, 0.0311, 0.0416, 0.0349, 0.0507, 0.0367, 0.0389], + device='cuda:1'), out_proj_covar=tensor([1.1525e-04, 9.6553e-05, 8.2719e-05, 1.1171e-04, 9.3875e-05, 1.4659e-04, + 1.0041e-04, 1.0499e-04], device='cuda:1') +2023-02-06 18:48:14,536 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.653e+02 3.258e+02 3.936e+02 6.732e+02, threshold=6.516e+02, percent-clipped=2.0 +2023-02-06 18:48:17,991 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:32,719 INFO [train.py:901] (1/4) Epoch 16, batch 6350, loss[loss=0.2025, simple_loss=0.2945, pruned_loss=0.05527, over 8334.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2952, pruned_loss=0.06804, over 1616082.19 frames. ], batch size: 25, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:48:43,690 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:01,230 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4214, 2.4402, 1.4828, 2.2008, 2.0885, 1.2094, 1.9307, 2.0058], + device='cuda:1'), covar=tensor([0.1614, 0.0545, 0.1490, 0.0655, 0.0821, 0.2134, 0.1183, 0.0952], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0233, 0.0323, 0.0300, 0.0297, 0.0330, 0.0340, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 18:49:03,419 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 18:49:07,014 INFO [train.py:901] (1/4) Epoch 16, batch 6400, loss[loss=0.2259, simple_loss=0.3098, pruned_loss=0.07103, over 8341.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2972, pruned_loss=0.06906, over 1621681.92 frames. ], batch size: 26, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:49:18,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0444, 3.9125, 2.3405, 2.9072, 3.0321, 2.0012, 3.0328, 3.0956], + device='cuda:1'), covar=tensor([0.1754, 0.0314, 0.1051, 0.0785, 0.0622, 0.1463, 0.0919, 0.0981], + device='cuda:1'), in_proj_covar=tensor([0.0346, 0.0232, 0.0323, 0.0299, 0.0297, 0.0330, 0.0339, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 18:49:24,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.402e+02 3.034e+02 3.710e+02 8.847e+02, threshold=6.069e+02, percent-clipped=1.0 +2023-02-06 18:49:32,558 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:43,222 INFO [train.py:901] (1/4) Epoch 16, batch 6450, loss[loss=0.1922, simple_loss=0.2785, pruned_loss=0.05302, over 8028.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2951, pruned_loss=0.06806, over 1616170.10 frames. ], batch size: 22, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:04,137 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:17,017 INFO [train.py:901] (1/4) Epoch 16, batch 6500, loss[loss=0.2649, simple_loss=0.3425, pruned_loss=0.09365, over 8336.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2967, pruned_loss=0.06896, over 1613267.43 frames. ], batch size: 25, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:32,626 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.427e+02 3.150e+02 4.006e+02 1.604e+03, threshold=6.301e+02, percent-clipped=4.0 +2023-02-06 18:50:48,406 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127789.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:50:52,953 INFO [train.py:901] (1/4) Epoch 16, batch 6550, loss[loss=0.1967, simple_loss=0.2745, pruned_loss=0.05948, over 7205.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2955, pruned_loss=0.06835, over 1610702.83 frames. ], batch size: 16, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:53,791 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:17,263 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 18:51:27,458 INFO [train.py:901] (1/4) Epoch 16, batch 6600, loss[loss=0.2107, simple_loss=0.291, pruned_loss=0.06519, over 8651.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.295, pruned_loss=0.0681, over 1610669.23 frames. ], batch size: 34, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:51:36,814 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:51:42,275 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:42,773 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.405e+02 2.899e+02 3.574e+02 1.034e+03, threshold=5.799e+02, percent-clipped=3.0 +2023-02-06 18:51:57,078 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:57,612 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127891.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:52:00,756 INFO [train.py:901] (1/4) Epoch 16, batch 6650, loss[loss=0.1709, simple_loss=0.2476, pruned_loss=0.04708, over 7651.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2943, pruned_loss=0.0678, over 1609397.95 frames. ], batch size: 19, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:07,585 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:52:36,164 INFO [train.py:901] (1/4) Epoch 16, batch 6700, loss[loss=0.1896, simple_loss=0.2651, pruned_loss=0.05708, over 7709.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2955, pruned_loss=0.06825, over 1611605.08 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:41,760 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5091, 1.4847, 1.8376, 1.3311, 1.1495, 1.8153, 0.1633, 1.1849], + device='cuda:1'), covar=tensor([0.1894, 0.1415, 0.0433, 0.1043, 0.3032, 0.0485, 0.2471, 0.1361], + device='cuda:1'), in_proj_covar=tensor([0.0174, 0.0184, 0.0114, 0.0212, 0.0259, 0.0118, 0.0165, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 18:52:52,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.543e+02 2.898e+02 3.564e+02 8.195e+02, threshold=5.796e+02, percent-clipped=3.0 +2023-02-06 18:53:01,214 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:10,440 INFO [train.py:901] (1/4) Epoch 16, batch 6750, loss[loss=0.2077, simple_loss=0.2679, pruned_loss=0.07377, over 7540.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2949, pruned_loss=0.06835, over 1609309.13 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:15,090 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0107, 2.0320, 1.8377, 2.5902, 1.1906, 1.5030, 1.8850, 2.0542], + device='cuda:1'), covar=tensor([0.0693, 0.0797, 0.0927, 0.0370, 0.1000, 0.1370, 0.0771, 0.0792], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0201, 0.0247, 0.0211, 0.0208, 0.0247, 0.0252, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 18:53:15,267 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 18:53:18,487 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:19,174 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:32,878 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128024.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:47,639 INFO [train.py:901] (1/4) Epoch 16, batch 6800, loss[loss=0.2002, simple_loss=0.2875, pruned_loss=0.05648, over 8339.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2946, pruned_loss=0.06794, over 1607122.79 frames. ], batch size: 25, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:51,073 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 18:54:04,017 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.604e+02 3.143e+02 4.008e+02 8.483e+02, threshold=6.287e+02, percent-clipped=3.0 +2023-02-06 18:54:22,245 INFO [train.py:901] (1/4) Epoch 16, batch 6850, loss[loss=0.2437, simple_loss=0.3236, pruned_loss=0.08185, over 8103.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2954, pruned_loss=0.06835, over 1607890.28 frames. ], batch size: 23, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:54:40,675 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 18:54:53,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128139.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:54,709 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128141.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:56,196 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6930, 1.8294, 2.6019, 1.4968, 1.1733, 2.4580, 0.3377, 1.4585], + device='cuda:1'), covar=tensor([0.2151, 0.1540, 0.0438, 0.2194, 0.3588, 0.0468, 0.2784, 0.1911], + device='cuda:1'), in_proj_covar=tensor([0.0175, 0.0183, 0.0114, 0.0212, 0.0258, 0.0118, 0.0164, 0.0178], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 18:54:58,084 INFO [train.py:901] (1/4) Epoch 16, batch 6900, loss[loss=0.2428, simple_loss=0.3306, pruned_loss=0.07752, over 8025.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2944, pruned_loss=0.06755, over 1608487.93 frames. ], batch size: 22, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:08,137 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128160.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:55:14,249 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.605e+02 3.172e+02 3.868e+02 9.306e+02, threshold=6.344e+02, percent-clipped=5.0 +2023-02-06 18:55:25,857 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128185.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:55:32,787 INFO [train.py:901] (1/4) Epoch 16, batch 6950, loss[loss=0.2582, simple_loss=0.3312, pruned_loss=0.09257, over 8587.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2948, pruned_loss=0.0675, over 1612053.58 frames. ], batch size: 34, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:34,511 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 18:55:43,518 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:55:48,029 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 18:55:58,348 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:07,210 INFO [train.py:901] (1/4) Epoch 16, batch 7000, loss[loss=0.1843, simple_loss=0.2561, pruned_loss=0.05621, over 7260.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.06769, over 1615458.70 frames. ], batch size: 16, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:56:15,595 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:19,645 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:24,008 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.690e+02 3.457e+02 5.056e+02 8.270e+02, threshold=6.915e+02, percent-clipped=6.0 +2023-02-06 18:56:36,190 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:42,585 INFO [train.py:901] (1/4) Epoch 16, batch 7050, loss[loss=0.2549, simple_loss=0.3234, pruned_loss=0.09318, over 8240.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06862, over 1614204.93 frames. ], batch size: 22, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:03,871 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:11,329 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:16,468 INFO [train.py:901] (1/4) Epoch 16, batch 7100, loss[loss=0.1837, simple_loss=0.262, pruned_loss=0.05272, over 7813.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.06863, over 1612417.36 frames. ], batch size: 20, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:18,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:33,888 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.456e+02 3.083e+02 3.766e+02 8.441e+02, threshold=6.166e+02, percent-clipped=2.0 +2023-02-06 18:57:52,032 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128395.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:52,512 INFO [train.py:901] (1/4) Epoch 16, batch 7150, loss[loss=0.2492, simple_loss=0.3193, pruned_loss=0.0896, over 8478.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2954, pruned_loss=0.06776, over 1610615.30 frames. ], batch size: 49, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:08,447 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1969, 2.2083, 1.7255, 1.9277, 1.7751, 1.3948, 1.6112, 1.6757], + device='cuda:1'), covar=tensor([0.1205, 0.0343, 0.1033, 0.0558, 0.0731, 0.1427, 0.0967, 0.0811], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0233, 0.0325, 0.0301, 0.0302, 0.0330, 0.0342, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 18:58:09,831 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:17,229 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:27,263 INFO [train.py:901] (1/4) Epoch 16, batch 7200, loss[loss=0.2575, simple_loss=0.3307, pruned_loss=0.09211, over 8622.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2956, pruned_loss=0.06829, over 1603577.83 frames. ], batch size: 50, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:42,533 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.378e+02 2.905e+02 3.370e+02 6.119e+02, threshold=5.810e+02, percent-clipped=0.0 +2023-02-06 18:59:02,793 INFO [train.py:901] (1/4) Epoch 16, batch 7250, loss[loss=0.2331, simple_loss=0.3096, pruned_loss=0.07826, over 8288.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2959, pruned_loss=0.06849, over 1606075.88 frames. ], batch size: 23, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:13,764 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:26,460 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1735, 1.3359, 4.3455, 1.7075, 3.8359, 3.5874, 3.9428, 3.7910], + device='cuda:1'), covar=tensor([0.0466, 0.4703, 0.0518, 0.3634, 0.1116, 0.0937, 0.0486, 0.0631], + device='cuda:1'), in_proj_covar=tensor([0.0561, 0.0623, 0.0643, 0.0591, 0.0670, 0.0573, 0.0564, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 18:59:31,352 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:32,910 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 18:59:37,230 INFO [train.py:901] (1/4) Epoch 16, batch 7300, loss[loss=0.2, simple_loss=0.2877, pruned_loss=0.05616, over 8562.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2955, pruned_loss=0.06809, over 1607363.29 frames. ], batch size: 31, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:52,609 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.470e+02 2.980e+02 3.722e+02 1.252e+03, threshold=5.960e+02, percent-clipped=4.0 +2023-02-06 19:00:02,295 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:12,482 INFO [train.py:901] (1/4) Epoch 16, batch 7350, loss[loss=0.2273, simple_loss=0.3063, pruned_loss=0.07417, over 8358.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2954, pruned_loss=0.06827, over 1608027.73 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:19,507 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:21,498 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:31,402 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 19:00:36,122 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:47,435 INFO [train.py:901] (1/4) Epoch 16, batch 7400, loss[loss=0.1968, simple_loss=0.2746, pruned_loss=0.05949, over 7556.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.06864, over 1607873.52 frames. ], batch size: 18, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:49,533 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 19:01:00,966 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5141, 1.3752, 1.7201, 1.3102, 0.9380, 1.5594, 1.5568, 1.4324], + device='cuda:1'), covar=tensor([0.0533, 0.1240, 0.1640, 0.1388, 0.0595, 0.1455, 0.0672, 0.0632], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 19:01:02,829 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.610e+02 3.305e+02 3.788e+02 1.058e+03, threshold=6.610e+02, percent-clipped=7.0 +2023-02-06 19:01:11,760 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:01:21,080 INFO [train.py:901] (1/4) Epoch 16, batch 7450, loss[loss=0.2281, simple_loss=0.3099, pruned_loss=0.07316, over 8643.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2957, pruned_loss=0.06845, over 1609573.95 frames. ], batch size: 34, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:01:30,639 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 19:01:52,242 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8919, 6.0701, 5.2458, 2.2978, 5.3215, 5.7662, 5.5147, 5.4532], + device='cuda:1'), covar=tensor([0.0737, 0.0433, 0.1038, 0.5368, 0.0747, 0.0722, 0.1237, 0.0594], + device='cuda:1'), in_proj_covar=tensor([0.0503, 0.0411, 0.0416, 0.0516, 0.0403, 0.0412, 0.0405, 0.0357], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:01:56,805 INFO [train.py:901] (1/4) Epoch 16, batch 7500, loss[loss=0.2122, simple_loss=0.2972, pruned_loss=0.06359, over 8489.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.295, pruned_loss=0.06803, over 1610134.48 frames. ], batch size: 29, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:02:12,296 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 19:02:13,137 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.417e+02 2.923e+02 3.614e+02 6.549e+02, threshold=5.847e+02, percent-clipped=0.0 +2023-02-06 19:02:17,186 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:31,137 INFO [train.py:901] (1/4) Epoch 16, batch 7550, loss[loss=0.2254, simple_loss=0.2871, pruned_loss=0.08185, over 6833.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2941, pruned_loss=0.06753, over 1608482.33 frames. ], batch size: 15, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:02:32,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:33,331 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:49,736 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5814, 1.7576, 2.6662, 1.3650, 1.9786, 1.9238, 1.5414, 1.9017], + device='cuda:1'), covar=tensor([0.1768, 0.2459, 0.0959, 0.4215, 0.1761, 0.2940, 0.2174, 0.2107], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0562, 0.0546, 0.0613, 0.0630, 0.0570, 0.0504, 0.0620], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:03:04,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2045, 1.1061, 1.2817, 1.0407, 0.9842, 1.3046, 0.0459, 0.8809], + device='cuda:1'), covar=tensor([0.2126, 0.1590, 0.0531, 0.0992, 0.3132, 0.0626, 0.2535, 0.1406], + device='cuda:1'), in_proj_covar=tensor([0.0177, 0.0185, 0.0115, 0.0215, 0.0261, 0.0120, 0.0165, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 19:03:07,389 INFO [train.py:901] (1/4) Epoch 16, batch 7600, loss[loss=0.1965, simple_loss=0.2779, pruned_loss=0.05753, over 8185.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2943, pruned_loss=0.06758, over 1607862.81 frames. ], batch size: 23, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:23,266 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.439e+02 3.123e+02 4.017e+02 8.994e+02, threshold=6.245e+02, percent-clipped=5.0 +2023-02-06 19:03:38,585 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:42,461 INFO [train.py:901] (1/4) Epoch 16, batch 7650, loss[loss=0.1994, simple_loss=0.2862, pruned_loss=0.05631, over 8115.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06767, over 1610763.26 frames. ], batch size: 23, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:50,523 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128908.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:04:08,860 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9381, 6.1241, 5.3248, 2.3723, 5.3571, 5.7358, 5.4823, 5.3904], + device='cuda:1'), covar=tensor([0.0588, 0.0401, 0.1050, 0.4381, 0.0716, 0.0733, 0.1202, 0.0394], + device='cuda:1'), in_proj_covar=tensor([0.0497, 0.0406, 0.0411, 0.0508, 0.0400, 0.0408, 0.0400, 0.0353], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:04:17,495 INFO [train.py:901] (1/4) Epoch 16, batch 7700, loss[loss=0.205, simple_loss=0.2819, pruned_loss=0.06409, over 8236.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2955, pruned_loss=0.06832, over 1612415.82 frames. ], batch size: 22, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:34,542 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.361e+02 3.016e+02 3.880e+02 7.767e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 19:04:42,144 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 19:04:49,725 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4944, 1.9253, 3.2273, 1.3162, 2.5837, 1.8867, 1.6388, 2.3354], + device='cuda:1'), covar=tensor([0.1913, 0.2502, 0.0869, 0.4312, 0.1622, 0.3028, 0.2160, 0.2283], + device='cuda:1'), in_proj_covar=tensor([0.0504, 0.0561, 0.0544, 0.0612, 0.0628, 0.0570, 0.0502, 0.0617], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:04:52,877 INFO [train.py:901] (1/4) Epoch 16, batch 7750, loss[loss=0.1769, simple_loss=0.2479, pruned_loss=0.05296, over 7705.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2946, pruned_loss=0.06749, over 1611984.55 frames. ], batch size: 18, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:56,379 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6571, 1.8218, 2.7083, 1.4719, 1.9941, 1.9703, 1.7527, 1.7493], + device='cuda:1'), covar=tensor([0.1763, 0.2665, 0.0858, 0.4398, 0.1788, 0.3044, 0.2116, 0.2311], + device='cuda:1'), in_proj_covar=tensor([0.0505, 0.0562, 0.0545, 0.0613, 0.0629, 0.0571, 0.0503, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:05:21,214 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5915, 1.6624, 1.7295, 1.3301, 1.8082, 1.4281, 0.9153, 1.5823], + device='cuda:1'), covar=tensor([0.0426, 0.0264, 0.0181, 0.0391, 0.0305, 0.0572, 0.0633, 0.0241], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0364, 0.0314, 0.0421, 0.0350, 0.0510, 0.0373, 0.0393], + device='cuda:1'), out_proj_covar=tensor([1.1664e-04, 9.6988e-05, 8.3333e-05, 1.1296e-04, 9.4072e-05, 1.4731e-04, + 1.0217e-04, 1.0608e-04], device='cuda:1') +2023-02-06 19:05:24,791 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 19:05:26,241 INFO [train.py:901] (1/4) Epoch 16, batch 7800, loss[loss=0.2419, simple_loss=0.3188, pruned_loss=0.08253, over 7971.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2965, pruned_loss=0.06886, over 1610010.88 frames. ], batch size: 21, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:28,997 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1465, 2.1399, 1.5764, 1.8916, 1.8200, 1.2790, 1.5006, 1.7040], + device='cuda:1'), covar=tensor([0.1330, 0.0353, 0.1135, 0.0558, 0.0720, 0.1554, 0.0982, 0.0874], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0234, 0.0327, 0.0300, 0.0299, 0.0332, 0.0341, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:05:31,087 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:05:41,944 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.423e+02 2.949e+02 3.975e+02 9.373e+02, threshold=5.898e+02, percent-clipped=5.0 +2023-02-06 19:05:45,474 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3029, 1.2686, 2.2816, 1.0941, 2.1141, 2.4749, 2.5859, 1.9392], + device='cuda:1'), covar=tensor([0.1172, 0.1456, 0.0569, 0.2360, 0.0852, 0.0451, 0.0786, 0.0974], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0309, 0.0273, 0.0301, 0.0293, 0.0251, 0.0385, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 19:05:48,209 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:00,669 INFO [train.py:901] (1/4) Epoch 16, batch 7850, loss[loss=0.2167, simple_loss=0.2916, pruned_loss=0.07094, over 7966.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2976, pruned_loss=0.06971, over 1614184.62 frames. ], batch size: 21, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:09,643 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8790, 3.8116, 3.4419, 1.6740, 3.3740, 3.4183, 3.5245, 3.1762], + device='cuda:1'), covar=tensor([0.0844, 0.0604, 0.1169, 0.4589, 0.0932, 0.1161, 0.1261, 0.0958], + device='cuda:1'), in_proj_covar=tensor([0.0505, 0.0413, 0.0420, 0.0516, 0.0407, 0.0415, 0.0407, 0.0360], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:06:32,323 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:34,271 INFO [train.py:901] (1/4) Epoch 16, batch 7900, loss[loss=0.1925, simple_loss=0.2625, pruned_loss=0.06121, over 6788.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06908, over 1611482.10 frames. ], batch size: 15, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:34,504 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:51,032 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.289e+02 2.786e+02 3.620e+02 6.776e+02, threshold=5.572e+02, percent-clipped=2.0 +2023-02-06 19:06:51,878 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:08,441 INFO [train.py:901] (1/4) Epoch 16, batch 7950, loss[loss=0.232, simple_loss=0.3122, pruned_loss=0.07588, over 8421.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2958, pruned_loss=0.06835, over 1609779.50 frames. ], batch size: 29, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:42,911 INFO [train.py:901] (1/4) Epoch 16, batch 8000, loss[loss=0.1917, simple_loss=0.2687, pruned_loss=0.05737, over 7420.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2951, pruned_loss=0.06758, over 1613149.23 frames. ], batch size: 17, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:47,178 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129252.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:07:48,166 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-06 19:07:51,187 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:59,068 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.511e+02 2.964e+02 3.601e+02 8.820e+02, threshold=5.927e+02, percent-clipped=6.0 +2023-02-06 19:08:16,585 INFO [train.py:901] (1/4) Epoch 16, batch 8050, loss[loss=0.284, simple_loss=0.3404, pruned_loss=0.1139, over 6659.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.294, pruned_loss=0.0684, over 1595449.24 frames. ], batch size: 72, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:08:53,103 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 19:08:56,566 INFO [train.py:901] (1/4) Epoch 17, batch 0, loss[loss=0.2459, simple_loss=0.3319, pruned_loss=0.07995, over 8187.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3319, pruned_loss=0.07995, over 8187.00 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:08:56,566 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 19:09:04,778 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5326, 1.7732, 2.6447, 1.3627, 1.9236, 1.8299, 1.6015, 1.8623], + device='cuda:1'), covar=tensor([0.1739, 0.2474, 0.0884, 0.4321, 0.1796, 0.3066, 0.2173, 0.2181], + device='cuda:1'), in_proj_covar=tensor([0.0504, 0.0561, 0.0543, 0.0612, 0.0631, 0.0570, 0.0501, 0.0620], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:09:07,566 INFO [train.py:935] (1/4) Epoch 17, validation: loss=0.1792, simple_loss=0.2794, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 19:09:07,567 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 19:09:19,460 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 19:09:21,132 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 19:09:29,754 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3436, 1.9656, 2.5991, 2.1121, 2.4055, 2.2874, 2.0033, 1.2242], + device='cuda:1'), covar=tensor([0.4235, 0.4200, 0.1585, 0.3107, 0.2219, 0.2607, 0.1722, 0.4936], + device='cuda:1'), in_proj_covar=tensor([0.0906, 0.0918, 0.0758, 0.0892, 0.0954, 0.0845, 0.0719, 0.0792], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 19:09:33,854 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129367.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:09:35,632 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.551e+02 3.127e+02 3.678e+02 8.568e+02, threshold=6.254e+02, percent-clipped=4.0 +2023-02-06 19:09:41,817 INFO [train.py:901] (1/4) Epoch 17, batch 50, loss[loss=0.2156, simple_loss=0.3028, pruned_loss=0.06419, over 8249.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2985, pruned_loss=0.07018, over 365293.48 frames. ], batch size: 24, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:09:54,009 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 19:10:18,428 INFO [train.py:901] (1/4) Epoch 17, batch 100, loss[loss=0.2154, simple_loss=0.298, pruned_loss=0.06639, over 8461.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2985, pruned_loss=0.07026, over 641695.60 frames. ], batch size: 25, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:10:18,446 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 19:10:19,955 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129431.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:10:32,096 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7764, 3.2261, 2.0513, 2.4554, 2.3557, 1.6589, 2.4086, 2.7312], + device='cuda:1'), covar=tensor([0.1750, 0.0405, 0.1263, 0.0795, 0.0841, 0.1724, 0.1121, 0.1035], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0235, 0.0328, 0.0301, 0.0299, 0.0335, 0.0342, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:10:44,164 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6644, 1.9220, 2.0179, 1.3073, 2.2164, 1.4452, 0.6674, 1.7828], + device='cuda:1'), covar=tensor([0.0552, 0.0317, 0.0269, 0.0493, 0.0358, 0.0776, 0.0803, 0.0304], + device='cuda:1'), in_proj_covar=tensor([0.0425, 0.0365, 0.0315, 0.0422, 0.0348, 0.0513, 0.0374, 0.0390], + device='cuda:1'), out_proj_covar=tensor([1.1630e-04, 9.7486e-05, 8.3561e-05, 1.1323e-04, 9.3624e-05, 1.4825e-04, + 1.0240e-04, 1.0518e-04], device='cuda:1') +2023-02-06 19:10:46,022 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.489e+02 3.062e+02 3.657e+02 7.822e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 19:10:52,181 INFO [train.py:901] (1/4) Epoch 17, batch 150, loss[loss=0.244, simple_loss=0.3162, pruned_loss=0.08589, over 8323.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2972, pruned_loss=0.06895, over 857518.66 frames. ], batch size: 25, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:18,275 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:29,032 INFO [train.py:901] (1/4) Epoch 17, batch 200, loss[loss=0.2502, simple_loss=0.3343, pruned_loss=0.08304, over 8617.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2987, pruned_loss=0.07003, over 1022811.86 frames. ], batch size: 31, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:36,219 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:57,069 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.455e+02 2.902e+02 3.926e+02 7.649e+02, threshold=5.804e+02, percent-clipped=5.0 +2023-02-06 19:12:03,425 INFO [train.py:901] (1/4) Epoch 17, batch 250, loss[loss=0.218, simple_loss=0.3065, pruned_loss=0.06476, over 8499.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2977, pruned_loss=0.06861, over 1158740.98 frames. ], batch size: 28, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:09,649 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 19:12:11,838 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9312, 1.5990, 1.3488, 1.5490, 1.2725, 1.2181, 1.1816, 1.2823], + device='cuda:1'), covar=tensor([0.1136, 0.0408, 0.1212, 0.0493, 0.0721, 0.1480, 0.0917, 0.0718], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0236, 0.0330, 0.0303, 0.0301, 0.0337, 0.0344, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:12:18,387 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 19:12:33,727 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129623.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:12:38,235 INFO [train.py:901] (1/4) Epoch 17, batch 300, loss[loss=0.2087, simple_loss=0.3024, pruned_loss=0.05748, over 8298.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2982, pruned_loss=0.06981, over 1255016.98 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:39,081 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:12:53,545 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129648.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:13:08,138 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.453e+02 3.064e+02 3.747e+02 1.027e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-06 19:13:14,335 INFO [train.py:901] (1/4) Epoch 17, batch 350, loss[loss=0.2622, simple_loss=0.3424, pruned_loss=0.09105, over 8603.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06912, over 1329793.56 frames. ], batch size: 34, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:13:47,829 INFO [train.py:901] (1/4) Epoch 17, batch 400, loss[loss=0.1997, simple_loss=0.2709, pruned_loss=0.06419, over 7661.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2956, pruned_loss=0.06854, over 1396312.95 frames. ], batch size: 19, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:08,512 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0356, 1.5219, 1.7677, 1.4560, 0.9509, 1.5624, 1.8088, 1.6651], + device='cuda:1'), covar=tensor([0.0493, 0.1210, 0.1624, 0.1354, 0.0586, 0.1423, 0.0670, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0155, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 19:14:18,000 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.355e+02 2.898e+02 3.830e+02 8.224e+02, threshold=5.797e+02, percent-clipped=7.0 +2023-02-06 19:14:21,452 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129775.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:14:24,069 INFO [train.py:901] (1/4) Epoch 17, batch 450, loss[loss=0.1908, simple_loss=0.2622, pruned_loss=0.05973, over 7518.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2953, pruned_loss=0.06843, over 1444315.25 frames. ], batch size: 18, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:26,519 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 19:14:39,896 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.07 vs. limit=5.0 +2023-02-06 19:14:44,133 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 19:14:44,497 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:14:58,034 INFO [train.py:901] (1/4) Epoch 17, batch 500, loss[loss=0.2112, simple_loss=0.2909, pruned_loss=0.0657, over 8138.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2964, pruned_loss=0.06911, over 1485708.32 frames. ], batch size: 22, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:28,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.377e+02 2.910e+02 3.862e+02 1.132e+03, threshold=5.820e+02, percent-clipped=8.0 +2023-02-06 19:15:35,664 INFO [train.py:901] (1/4) Epoch 17, batch 550, loss[loss=0.2378, simple_loss=0.3142, pruned_loss=0.08066, over 8108.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2966, pruned_loss=0.06835, over 1516411.33 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:43,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129890.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:16:10,106 INFO [train.py:901] (1/4) Epoch 17, batch 600, loss[loss=0.2147, simple_loss=0.2838, pruned_loss=0.07283, over 7437.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2961, pruned_loss=0.06839, over 1536145.71 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:16:19,710 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 19:16:24,102 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5640, 1.3915, 1.5486, 1.3007, 0.9192, 1.3426, 1.5240, 1.3568], + device='cuda:1'), covar=tensor([0.0549, 0.1258, 0.1695, 0.1400, 0.0575, 0.1516, 0.0699, 0.0655], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 19:16:38,513 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 2.576e+02 2.936e+02 3.639e+02 7.352e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-06 19:16:41,357 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:16:44,765 INFO [train.py:901] (1/4) Epoch 17, batch 650, loss[loss=0.2029, simple_loss=0.2884, pruned_loss=0.0587, over 8461.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2957, pruned_loss=0.06856, over 1556290.96 frames. ], batch size: 25, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:09,803 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0888, 2.5893, 3.6738, 1.9250, 1.9220, 3.5349, 0.7450, 2.1957], + device='cuda:1'), covar=tensor([0.1504, 0.1297, 0.0266, 0.2023, 0.3023, 0.0373, 0.2446, 0.1391], + device='cuda:1'), in_proj_covar=tensor([0.0178, 0.0185, 0.0115, 0.0217, 0.0265, 0.0121, 0.0165, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 19:17:11,722 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4886, 1.3749, 4.3151, 1.9607, 2.4905, 4.8575, 4.8485, 4.1942], + device='cuda:1'), covar=tensor([0.0963, 0.1881, 0.0283, 0.2004, 0.1112, 0.0195, 0.0589, 0.0543], + device='cuda:1'), in_proj_covar=tensor([0.0279, 0.0306, 0.0270, 0.0298, 0.0291, 0.0251, 0.0384, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 19:17:16,419 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:16,517 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7962, 2.3676, 4.3878, 1.5926, 3.2896, 2.3452, 1.9252, 3.0867], + device='cuda:1'), covar=tensor([0.1702, 0.2290, 0.0705, 0.3995, 0.1412, 0.2773, 0.1880, 0.2129], + device='cuda:1'), in_proj_covar=tensor([0.0505, 0.0560, 0.0542, 0.0610, 0.0628, 0.0568, 0.0502, 0.0617], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:17:17,843 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:23,794 INFO [train.py:901] (1/4) Epoch 17, batch 700, loss[loss=0.2351, simple_loss=0.3139, pruned_loss=0.07819, over 8481.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2953, pruned_loss=0.06859, over 1569221.19 frames. ], batch size: 25, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:51,873 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.350e+02 2.811e+02 3.683e+02 1.098e+03, threshold=5.622e+02, percent-clipped=6.0 +2023-02-06 19:17:58,276 INFO [train.py:901] (1/4) Epoch 17, batch 750, loss[loss=0.1579, simple_loss=0.2528, pruned_loss=0.03153, over 7812.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2963, pruned_loss=0.0685, over 1581140.80 frames. ], batch size: 20, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:04,872 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7090, 1.9911, 2.0933, 1.3894, 2.2350, 1.5726, 0.7433, 1.8906], + device='cuda:1'), covar=tensor([0.0444, 0.0261, 0.0208, 0.0429, 0.0295, 0.0663, 0.0685, 0.0238], + device='cuda:1'), in_proj_covar=tensor([0.0421, 0.0362, 0.0309, 0.0418, 0.0346, 0.0507, 0.0370, 0.0388], + device='cuda:1'), out_proj_covar=tensor([1.1516e-04, 9.6632e-05, 8.1938e-05, 1.1221e-04, 9.3132e-05, 1.4651e-04, + 1.0125e-04, 1.0452e-04], device='cuda:1') +2023-02-06 19:18:05,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:18:08,227 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 19:18:19,441 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 19:18:36,021 INFO [train.py:901] (1/4) Epoch 17, batch 800, loss[loss=0.2404, simple_loss=0.3198, pruned_loss=0.08054, over 8336.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2966, pruned_loss=0.06875, over 1590553.25 frames. ], batch size: 25, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:46,641 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7636, 1.8548, 2.4385, 1.7330, 1.3934, 2.3965, 0.4227, 1.4543], + device='cuda:1'), covar=tensor([0.2154, 0.1216, 0.0396, 0.1681, 0.3157, 0.0484, 0.2581, 0.1869], + device='cuda:1'), in_proj_covar=tensor([0.0178, 0.0186, 0.0116, 0.0218, 0.0265, 0.0122, 0.0167, 0.0181], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 19:18:48,061 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130146.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:18:52,756 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:19:04,215 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.363e+02 2.676e+02 3.408e+02 8.560e+02, threshold=5.353e+02, percent-clipped=3.0 +2023-02-06 19:19:05,147 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:19:10,476 INFO [train.py:901] (1/4) Epoch 17, batch 850, loss[loss=0.2214, simple_loss=0.3059, pruned_loss=0.06845, over 8829.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2966, pruned_loss=0.06887, over 1598139.08 frames. ], batch size: 40, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:19:47,568 INFO [train.py:901] (1/4) Epoch 17, batch 900, loss[loss=0.169, simple_loss=0.2404, pruned_loss=0.04882, over 7407.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2968, pruned_loss=0.06871, over 1599660.47 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:15,370 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:20:16,500 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.489e+02 3.023e+02 3.878e+02 8.176e+02, threshold=6.045e+02, percent-clipped=7.0 +2023-02-06 19:20:22,807 INFO [train.py:901] (1/4) Epoch 17, batch 950, loss[loss=0.1883, simple_loss=0.2694, pruned_loss=0.05355, over 7260.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2965, pruned_loss=0.06882, over 1601326.09 frames. ], batch size: 16, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:29,182 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6267, 2.0793, 3.4998, 1.4116, 2.6465, 2.1485, 1.6470, 2.5414], + device='cuda:1'), covar=tensor([0.1820, 0.2438, 0.0736, 0.4390, 0.1567, 0.2842, 0.2114, 0.2140], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0564, 0.0545, 0.0612, 0.0632, 0.0570, 0.0504, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:20:43,381 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 19:20:57,172 INFO [train.py:901] (1/4) Epoch 17, batch 1000, loss[loss=0.1757, simple_loss=0.2447, pruned_loss=0.05339, over 7420.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2966, pruned_loss=0.06881, over 1604953.03 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:21:04,859 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0498, 2.2964, 1.8162, 2.9518, 1.5113, 1.6468, 1.9934, 2.3471], + device='cuda:1'), covar=tensor([0.0725, 0.0756, 0.1075, 0.0344, 0.1122, 0.1418, 0.0981, 0.0803], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0202, 0.0252, 0.0213, 0.0210, 0.0250, 0.0255, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 19:21:09,225 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:20,030 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 19:21:21,949 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:23,312 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:27,492 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.680e+02 3.059e+02 3.924e+02 8.380e+02, threshold=6.118e+02, percent-clipped=2.0 +2023-02-06 19:21:27,758 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:33,150 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 19:21:33,589 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 19:21:33,835 INFO [train.py:901] (1/4) Epoch 17, batch 1050, loss[loss=0.1799, simple_loss=0.2613, pruned_loss=0.04922, over 7697.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.296, pruned_loss=0.06842, over 1604441.89 frames. ], batch size: 18, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:21:49,933 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:08,459 INFO [train.py:901] (1/4) Epoch 17, batch 1100, loss[loss=0.2382, simple_loss=0.3145, pruned_loss=0.08095, over 7969.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2967, pruned_loss=0.06909, over 1606855.47 frames. ], batch size: 21, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:14,699 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0882, 1.6826, 3.4274, 1.4106, 2.4614, 3.7906, 3.8338, 3.2176], + device='cuda:1'), covar=tensor([0.1012, 0.1603, 0.0352, 0.2070, 0.0962, 0.0222, 0.0543, 0.0564], + device='cuda:1'), in_proj_covar=tensor([0.0282, 0.0307, 0.0272, 0.0301, 0.0293, 0.0252, 0.0386, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 19:22:23,046 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:27,196 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:38,663 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.545e+02 2.978e+02 3.676e+02 6.168e+02, threshold=5.956e+02, percent-clipped=1.0 +2023-02-06 19:22:44,133 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,339 INFO [train.py:901] (1/4) Epoch 17, batch 1150, loss[loss=0.2794, simple_loss=0.343, pruned_loss=0.1079, over 6500.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2958, pruned_loss=0.06909, over 1604405.14 frames. ], batch size: 71, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:45,510 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,990 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 19:23:16,164 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:19,418 INFO [train.py:901] (1/4) Epoch 17, batch 1200, loss[loss=0.2376, simple_loss=0.3134, pruned_loss=0.08092, over 8508.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2948, pruned_loss=0.06832, over 1607718.98 frames. ], batch size: 26, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:33,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:45,144 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130566.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:47,781 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.314e+02 2.862e+02 3.617e+02 1.013e+03, threshold=5.724e+02, percent-clipped=2.0 +2023-02-06 19:23:53,882 INFO [train.py:901] (1/4) Epoch 17, batch 1250, loss[loss=0.2278, simple_loss=0.3163, pruned_loss=0.06967, over 7972.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2946, pruned_loss=0.06761, over 1611569.20 frames. ], batch size: 21, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:57,433 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:24:07,971 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7598, 1.9778, 2.1913, 1.3307, 2.2929, 1.4252, 0.7159, 1.9625], + device='cuda:1'), covar=tensor([0.0615, 0.0307, 0.0228, 0.0525, 0.0363, 0.0829, 0.0770, 0.0303], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0365, 0.0312, 0.0424, 0.0350, 0.0516, 0.0377, 0.0394], + device='cuda:1'), out_proj_covar=tensor([1.1675e-04, 9.7352e-05, 8.2703e-05, 1.1359e-04, 9.3933e-05, 1.4911e-04, + 1.0301e-04, 1.0598e-04], device='cuda:1') +2023-02-06 19:24:30,844 INFO [train.py:901] (1/4) Epoch 17, batch 1300, loss[loss=0.2583, simple_loss=0.3344, pruned_loss=0.09113, over 8315.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.0688, over 1615182.66 frames. ], batch size: 25, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:24:35,121 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4323, 2.6709, 3.0556, 1.6008, 3.2573, 1.8737, 1.6024, 2.3383], + device='cuda:1'), covar=tensor([0.0644, 0.0345, 0.0218, 0.0677, 0.0390, 0.0832, 0.0793, 0.0456], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0365, 0.0313, 0.0424, 0.0348, 0.0516, 0.0376, 0.0393], + device='cuda:1'), out_proj_covar=tensor([1.1640e-04, 9.7338e-05, 8.2955e-05, 1.1361e-04, 9.3577e-05, 1.4901e-04, + 1.0280e-04, 1.0569e-04], device='cuda:1') +2023-02-06 19:24:59,331 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.380e+02 3.126e+02 3.675e+02 7.509e+02, threshold=6.253e+02, percent-clipped=2.0 +2023-02-06 19:25:05,695 INFO [train.py:901] (1/4) Epoch 17, batch 1350, loss[loss=0.2134, simple_loss=0.2918, pruned_loss=0.06746, over 7924.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2964, pruned_loss=0.06878, over 1614806.11 frames. ], batch size: 20, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:06,300 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 19:25:43,059 INFO [train.py:901] (1/4) Epoch 17, batch 1400, loss[loss=0.1962, simple_loss=0.2724, pruned_loss=0.05997, over 7929.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2973, pruned_loss=0.06939, over 1615013.88 frames. ], batch size: 20, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:46,006 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:47,358 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:54,826 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:03,092 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:04,424 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:11,013 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.607e+02 3.260e+02 4.191e+02 1.113e+03, threshold=6.520e+02, percent-clipped=3.0 +2023-02-06 19:26:17,366 INFO [train.py:901] (1/4) Epoch 17, batch 1450, loss[loss=0.2024, simple_loss=0.2954, pruned_loss=0.05467, over 8460.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2967, pruned_loss=0.06893, over 1616183.61 frames. ], batch size: 29, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:26:20,729 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 19:26:27,819 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:32,210 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:34,989 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1450, 1.4886, 4.3397, 1.7002, 3.8937, 3.6137, 3.9970, 3.8310], + device='cuda:1'), covar=tensor([0.0570, 0.4343, 0.0500, 0.3488, 0.1054, 0.0929, 0.0517, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0570, 0.0627, 0.0650, 0.0595, 0.0676, 0.0580, 0.0573, 0.0637], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 19:26:54,254 INFO [train.py:901] (1/4) Epoch 17, batch 1500, loss[loss=0.1974, simple_loss=0.2784, pruned_loss=0.0582, over 8089.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2958, pruned_loss=0.06854, over 1617678.81 frames. ], batch size: 21, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:17,116 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:22,922 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.370e+02 2.974e+02 3.638e+02 1.375e+03, threshold=5.949e+02, percent-clipped=1.0 +2023-02-06 19:27:29,124 INFO [train.py:901] (1/4) Epoch 17, batch 1550, loss[loss=0.2188, simple_loss=0.2768, pruned_loss=0.08039, over 7701.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2959, pruned_loss=0.06849, over 1618171.12 frames. ], batch size: 18, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:50,132 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:50,701 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:54,296 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130915.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:02,600 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:03,810 INFO [train.py:901] (1/4) Epoch 17, batch 1600, loss[loss=0.2756, simple_loss=0.3382, pruned_loss=0.1065, over 7056.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.296, pruned_loss=0.06879, over 1611112.33 frames. ], batch size: 71, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:28:34,758 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.345e+02 2.992e+02 3.546e+02 8.486e+02, threshold=5.983e+02, percent-clipped=5.0 +2023-02-06 19:28:40,949 INFO [train.py:901] (1/4) Epoch 17, batch 1650, loss[loss=0.3223, simple_loss=0.3635, pruned_loss=0.1406, over 6982.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2959, pruned_loss=0.06903, over 1609062.51 frames. ], batch size: 73, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:28:57,389 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 19:29:13,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:16,139 INFO [train.py:901] (1/4) Epoch 17, batch 1700, loss[loss=0.2264, simple_loss=0.3013, pruned_loss=0.07574, over 8032.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2964, pruned_loss=0.06896, over 1613697.66 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:25,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:46,946 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.451e+02 3.155e+02 3.823e+02 7.811e+02, threshold=6.311e+02, percent-clipped=3.0 +2023-02-06 19:29:53,062 INFO [train.py:901] (1/4) Epoch 17, batch 1750, loss[loss=0.2023, simple_loss=0.2865, pruned_loss=0.05909, over 8472.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2972, pruned_loss=0.06961, over 1610612.48 frames. ], batch size: 27, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:58,721 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1028, 1.8126, 2.0625, 1.8840, 1.4336, 1.8395, 2.6766, 2.3860], + device='cuda:1'), covar=tensor([0.0406, 0.1079, 0.1536, 0.1263, 0.0529, 0.1328, 0.0493, 0.0515], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0164, 0.0115, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 19:30:19,608 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131117.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:27,910 INFO [train.py:901] (1/4) Epoch 17, batch 1800, loss[loss=0.1883, simple_loss=0.2606, pruned_loss=0.058, over 8235.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2965, pruned_loss=0.0696, over 1610204.25 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:30:37,098 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:52,694 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:53,338 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2239, 2.1879, 1.6515, 1.9733, 1.7987, 1.4723, 1.6024, 1.6940], + device='cuda:1'), covar=tensor([0.1282, 0.0321, 0.1128, 0.0493, 0.0602, 0.1366, 0.0943, 0.0851], + device='cuda:1'), in_proj_covar=tensor([0.0347, 0.0230, 0.0324, 0.0298, 0.0296, 0.0326, 0.0338, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:30:55,957 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.745e+02 3.356e+02 4.683e+02 1.105e+03, threshold=6.712e+02, percent-clipped=11.0 +2023-02-06 19:30:56,918 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:03,697 INFO [train.py:901] (1/4) Epoch 17, batch 1850, loss[loss=0.2361, simple_loss=0.3201, pruned_loss=0.07599, over 8661.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2962, pruned_loss=0.06925, over 1612773.89 frames. ], batch size: 39, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:31:12,470 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:13,824 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:16,714 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:37,448 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0640, 2.7281, 3.6398, 1.9712, 2.0113, 3.5626, 0.8305, 2.3013], + device='cuda:1'), covar=tensor([0.1491, 0.1110, 0.0239, 0.2130, 0.2786, 0.0283, 0.2570, 0.1326], + device='cuda:1'), in_proj_covar=tensor([0.0179, 0.0183, 0.0115, 0.0216, 0.0261, 0.0122, 0.0166, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 19:31:39,956 INFO [train.py:901] (1/4) Epoch 17, batch 1900, loss[loss=0.2047, simple_loss=0.2836, pruned_loss=0.06288, over 8089.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2967, pruned_loss=0.06933, over 1612634.80 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:08,105 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.313e+02 2.955e+02 3.582e+02 5.685e+02, threshold=5.910e+02, percent-clipped=0.0 +2023-02-06 19:32:08,141 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 19:32:14,123 INFO [train.py:901] (1/4) Epoch 17, batch 1950, loss[loss=0.2083, simple_loss=0.2987, pruned_loss=0.05897, over 8112.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2964, pruned_loss=0.06896, over 1613190.64 frames. ], batch size: 23, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:15,750 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:19,619 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 19:32:28,913 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:35,143 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:39,941 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 19:32:47,535 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:51,200 INFO [train.py:901] (1/4) Epoch 17, batch 2000, loss[loss=0.2541, simple_loss=0.3319, pruned_loss=0.08818, over 8248.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2971, pruned_loss=0.0694, over 1612732.58 frames. ], batch size: 24, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:33:19,856 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.510e+02 3.128e+02 3.622e+02 6.098e+02, threshold=6.257e+02, percent-clipped=1.0 +2023-02-06 19:33:25,355 INFO [train.py:901] (1/4) Epoch 17, batch 2050, loss[loss=0.2047, simple_loss=0.2847, pruned_loss=0.06234, over 8281.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2963, pruned_loss=0.06851, over 1615086.41 frames. ], batch size: 23, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:00,659 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:01,963 INFO [train.py:901] (1/4) Epoch 17, batch 2100, loss[loss=0.1678, simple_loss=0.2459, pruned_loss=0.04482, over 7690.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2953, pruned_loss=0.06764, over 1614031.08 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:06,118 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:31,408 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.457e+02 2.884e+02 3.530e+02 8.686e+02, threshold=5.767e+02, percent-clipped=1.0 +2023-02-06 19:34:36,970 INFO [train.py:901] (1/4) Epoch 17, batch 2150, loss[loss=0.2093, simple_loss=0.271, pruned_loss=0.07378, over 7696.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.06792, over 1614480.26 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:58,678 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:12,357 INFO [train.py:901] (1/4) Epoch 17, batch 2200, loss[loss=0.2243, simple_loss=0.306, pruned_loss=0.0713, over 8337.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2947, pruned_loss=0.06771, over 1612917.97 frames. ], batch size: 26, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:35:17,213 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:36,120 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3149, 1.9151, 1.4063, 3.0907, 1.4086, 1.2067, 2.0863, 2.1828], + device='cuda:1'), covar=tensor([0.1720, 0.1277, 0.2103, 0.0366, 0.1390, 0.2312, 0.1038, 0.0967], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0198, 0.0247, 0.0211, 0.0208, 0.0246, 0.0253, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 19:35:43,531 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.550e+02 3.248e+02 4.465e+02 1.208e+03, threshold=6.496e+02, percent-clipped=6.0 +2023-02-06 19:35:49,219 INFO [train.py:901] (1/4) Epoch 17, batch 2250, loss[loss=0.2176, simple_loss=0.2811, pruned_loss=0.07704, over 7532.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2961, pruned_loss=0.06854, over 1613427.62 frames. ], batch size: 18, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:23,882 INFO [train.py:901] (1/4) Epoch 17, batch 2300, loss[loss=0.2235, simple_loss=0.3003, pruned_loss=0.07337, over 8440.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2965, pruned_loss=0.06879, over 1616756.16 frames. ], batch size: 49, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:24,071 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2925, 1.3874, 1.2704, 1.8513, 0.7750, 1.1416, 1.2324, 1.4374], + device='cuda:1'), covar=tensor([0.0894, 0.0917, 0.1119, 0.0508, 0.1172, 0.1491, 0.0851, 0.0822], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0199, 0.0249, 0.0212, 0.0209, 0.0247, 0.0254, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 19:36:40,751 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:36:55,870 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 2.553e+02 3.001e+02 3.824e+02 6.268e+02, threshold=6.003e+02, percent-clipped=0.0 +2023-02-06 19:37:01,540 INFO [train.py:901] (1/4) Epoch 17, batch 2350, loss[loss=0.2093, simple_loss=0.277, pruned_loss=0.07075, over 7788.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2979, pruned_loss=0.06989, over 1617168.03 frames. ], batch size: 19, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:16,046 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8001, 1.9064, 2.4038, 1.6724, 1.3727, 2.3527, 0.4215, 1.4686], + device='cuda:1'), covar=tensor([0.2216, 0.1224, 0.0383, 0.1444, 0.2976, 0.0462, 0.2580, 0.1372], + device='cuda:1'), in_proj_covar=tensor([0.0179, 0.0183, 0.0114, 0.0216, 0.0260, 0.0121, 0.0166, 0.0179], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 19:37:21,843 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 19:37:35,913 INFO [train.py:901] (1/4) Epoch 17, batch 2400, loss[loss=0.2185, simple_loss=0.2899, pruned_loss=0.07353, over 7923.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2971, pruned_loss=0.06936, over 1613888.64 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:40,380 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0907, 3.5153, 2.2091, 2.7125, 2.6804, 2.0601, 2.7926, 2.9555], + device='cuda:1'), covar=tensor([0.1510, 0.0380, 0.1143, 0.0768, 0.0727, 0.1341, 0.0983, 0.1031], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0233, 0.0324, 0.0299, 0.0297, 0.0328, 0.0340, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:38:06,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.467e+02 3.155e+02 3.892e+02 8.269e+02, threshold=6.310e+02, percent-clipped=4.0 +2023-02-06 19:38:06,491 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,193 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,830 INFO [train.py:901] (1/4) Epoch 17, batch 2450, loss[loss=0.2299, simple_loss=0.3152, pruned_loss=0.07234, over 8103.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2956, pruned_loss=0.06815, over 1612908.51 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:47,881 INFO [train.py:901] (1/4) Epoch 17, batch 2500, loss[loss=0.2086, simple_loss=0.2794, pruned_loss=0.06891, over 7796.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2961, pruned_loss=0.06853, over 1613822.83 frames. ], batch size: 19, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:57,326 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 19:39:05,482 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:17,123 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.481e+02 2.929e+02 3.320e+02 7.417e+02, threshold=5.858e+02, percent-clipped=2.0 +2023-02-06 19:39:20,130 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:22,845 INFO [train.py:901] (1/4) Epoch 17, batch 2550, loss[loss=0.1773, simple_loss=0.2538, pruned_loss=0.05038, over 7441.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.295, pruned_loss=0.06824, over 1614618.10 frames. ], batch size: 17, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:39:29,624 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:34,619 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:45,579 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:00,843 INFO [train.py:901] (1/4) Epoch 17, batch 2600, loss[loss=0.2076, simple_loss=0.3002, pruned_loss=0.05751, over 8466.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.295, pruned_loss=0.06793, over 1615899.55 frames. ], batch size: 25, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:03,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:11,295 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7930, 1.9934, 2.2810, 1.2564, 2.3917, 1.6158, 0.7072, 1.9267], + device='cuda:1'), covar=tensor([0.0483, 0.0302, 0.0208, 0.0469, 0.0289, 0.0729, 0.0710, 0.0287], + device='cuda:1'), in_proj_covar=tensor([0.0426, 0.0368, 0.0314, 0.0423, 0.0351, 0.0510, 0.0375, 0.0391], + device='cuda:1'), out_proj_covar=tensor([1.1642e-04, 9.7987e-05, 8.3426e-05, 1.1333e-04, 9.4234e-05, 1.4691e-04, + 1.0232e-04, 1.0501e-04], device='cuda:1') +2023-02-06 19:40:28,773 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:29,954 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.887e+02 3.716e+02 6.826e+02, threshold=5.774e+02, percent-clipped=1.0 +2023-02-06 19:40:35,451 INFO [train.py:901] (1/4) Epoch 17, batch 2650, loss[loss=0.2294, simple_loss=0.3051, pruned_loss=0.07691, over 8465.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.0679, over 1618399.02 frames. ], batch size: 27, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:09,809 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.50 vs. limit=5.0 +2023-02-06 19:41:13,498 INFO [train.py:901] (1/4) Epoch 17, batch 2700, loss[loss=0.2092, simple_loss=0.2832, pruned_loss=0.06757, over 7817.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2946, pruned_loss=0.06749, over 1611829.04 frames. ], batch size: 20, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:27,363 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132049.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:42,496 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.450e+02 3.248e+02 4.102e+02 1.137e+03, threshold=6.496e+02, percent-clipped=12.0 +2023-02-06 19:41:43,387 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:48,260 INFO [train.py:901] (1/4) Epoch 17, batch 2750, loss[loss=0.2241, simple_loss=0.312, pruned_loss=0.06805, over 8519.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2955, pruned_loss=0.06772, over 1611063.22 frames. ], batch size: 28, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:42:25,035 INFO [train.py:901] (1/4) Epoch 17, batch 2800, loss[loss=0.2181, simple_loss=0.3013, pruned_loss=0.06743, over 8622.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2948, pruned_loss=0.06774, over 1608244.71 frames. ], batch size: 49, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:42:35,384 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:40,216 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:52,683 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:55,272 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.217e+02 2.865e+02 3.623e+02 1.020e+03, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 19:42:57,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:00,623 INFO [train.py:901] (1/4) Epoch 17, batch 2850, loss[loss=0.207, simple_loss=0.2714, pruned_loss=0.07134, over 7197.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2945, pruned_loss=0.06732, over 1611422.40 frames. ], batch size: 16, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:11,910 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 19:43:29,198 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:33,403 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:35,753 INFO [train.py:901] (1/4) Epoch 17, batch 2900, loss[loss=0.2543, simple_loss=0.3299, pruned_loss=0.08939, over 8348.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2951, pruned_loss=0.06846, over 1608762.61 frames. ], batch size: 24, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:52,923 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:44:08,364 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.407e+02 2.887e+02 3.454e+02 7.005e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 19:44:09,839 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 19:44:13,740 INFO [train.py:901] (1/4) Epoch 17, batch 2950, loss[loss=0.2093, simple_loss=0.2924, pruned_loss=0.06306, over 8361.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2956, pruned_loss=0.06862, over 1608594.68 frames. ], batch size: 24, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:36,063 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 19:44:48,313 INFO [train.py:901] (1/4) Epoch 17, batch 3000, loss[loss=0.2068, simple_loss=0.2987, pruned_loss=0.0574, over 8559.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2945, pruned_loss=0.06797, over 1605547.20 frames. ], batch size: 39, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:48,313 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 19:44:56,176 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4822, 1.7250, 2.5224, 1.2980, 1.8678, 1.7881, 1.5887, 1.8383], + device='cuda:1'), covar=tensor([0.1787, 0.2571, 0.0953, 0.4475, 0.1848, 0.3214, 0.2115, 0.2293], + device='cuda:1'), in_proj_covar=tensor([0.0509, 0.0566, 0.0546, 0.0619, 0.0634, 0.0574, 0.0510, 0.0623], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:45:00,597 INFO [train.py:935] (1/4) Epoch 17, validation: loss=0.1786, simple_loss=0.2786, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 19:45:00,598 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 19:45:04,441 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:45:07,385 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3619, 2.0802, 2.8559, 2.3292, 2.7271, 2.3587, 2.0359, 1.4779], + device='cuda:1'), covar=tensor([0.4640, 0.4663, 0.1590, 0.3297, 0.2295, 0.2717, 0.1747, 0.5013], + device='cuda:1'), in_proj_covar=tensor([0.0931, 0.0938, 0.0776, 0.0908, 0.0973, 0.0858, 0.0727, 0.0809], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 19:45:12,171 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3994, 1.6110, 1.7984, 1.4987, 1.0645, 1.4834, 1.9701, 1.7962], + device='cuda:1'), covar=tensor([0.0456, 0.1224, 0.1604, 0.1375, 0.0564, 0.1505, 0.0621, 0.0612], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0163, 0.0115, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 19:45:31,448 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.492e+02 3.005e+02 3.786e+02 8.313e+02, threshold=6.010e+02, percent-clipped=11.0 +2023-02-06 19:45:37,100 INFO [train.py:901] (1/4) Epoch 17, batch 3050, loss[loss=0.2174, simple_loss=0.304, pruned_loss=0.06542, over 8459.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2957, pruned_loss=0.06819, over 1610812.38 frames. ], batch size: 27, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:45:48,262 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:04,203 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:12,931 INFO [train.py:901] (1/4) Epoch 17, batch 3100, loss[loss=0.1798, simple_loss=0.2586, pruned_loss=0.05056, over 7795.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2958, pruned_loss=0.0679, over 1614023.36 frames. ], batch size: 19, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:41,880 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.340e+02 2.843e+02 3.195e+02 7.960e+02, threshold=5.685e+02, percent-clipped=6.0 +2023-02-06 19:46:47,319 INFO [train.py:901] (1/4) Epoch 17, batch 3150, loss[loss=0.2682, simple_loss=0.3372, pruned_loss=0.09954, over 8335.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2955, pruned_loss=0.06796, over 1612264.21 frames. ], batch size: 26, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:48,918 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5021, 2.6745, 2.0309, 2.3952, 2.4577, 1.7388, 2.3102, 2.3163], + device='cuda:1'), covar=tensor([0.1458, 0.0383, 0.1040, 0.0563, 0.0680, 0.1369, 0.0828, 0.0910], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0233, 0.0324, 0.0300, 0.0297, 0.0328, 0.0339, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:47:09,719 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:23,941 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 19:47:24,992 INFO [train.py:901] (1/4) Epoch 17, batch 3200, loss[loss=0.2058, simple_loss=0.297, pruned_loss=0.05729, over 8513.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2944, pruned_loss=0.06748, over 1613597.19 frames. ], batch size: 28, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:47:26,580 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:54,169 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.495e+02 3.112e+02 3.824e+02 1.248e+03, threshold=6.223e+02, percent-clipped=6.0 +2023-02-06 19:47:59,503 INFO [train.py:901] (1/4) Epoch 17, batch 3250, loss[loss=0.2327, simple_loss=0.31, pruned_loss=0.07775, over 8419.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2952, pruned_loss=0.06778, over 1611667.37 frames. ], batch size: 49, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:48:07,396 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:11,622 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4679, 2.5738, 1.8606, 2.3269, 2.3317, 1.5909, 2.1260, 2.2284], + device='cuda:1'), covar=tensor([0.1694, 0.0380, 0.1170, 0.0627, 0.0673, 0.1583, 0.0938, 0.0965], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0233, 0.0324, 0.0301, 0.0298, 0.0330, 0.0341, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:48:26,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:37,513 INFO [train.py:901] (1/4) Epoch 17, batch 3300, loss[loss=0.2445, simple_loss=0.3133, pruned_loss=0.08782, over 6741.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.296, pruned_loss=0.06789, over 1611193.04 frames. ], batch size: 71, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:06,785 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 2.532e+02 2.971e+02 3.744e+02 7.972e+02, threshold=5.942e+02, percent-clipped=3.0 +2023-02-06 19:49:11,875 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132678.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:49:12,450 INFO [train.py:901] (1/4) Epoch 17, batch 3350, loss[loss=0.1703, simple_loss=0.2595, pruned_loss=0.04058, over 8292.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2957, pruned_loss=0.06725, over 1614501.74 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:49,256 INFO [train.py:901] (1/4) Epoch 17, batch 3400, loss[loss=0.2393, simple_loss=0.3138, pruned_loss=0.08235, over 8498.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.296, pruned_loss=0.0675, over 1612661.66 frames. ], batch size: 28, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:49:55,919 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6703, 5.7494, 5.0868, 2.4586, 5.1339, 5.5306, 5.2900, 5.1772], + device='cuda:1'), covar=tensor([0.0535, 0.0352, 0.0901, 0.4551, 0.0699, 0.0771, 0.0998, 0.0596], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0414, 0.0421, 0.0513, 0.0406, 0.0414, 0.0400, 0.0358], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:50:01,001 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 19:50:04,442 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.21 vs. limit=5.0 +2023-02-06 19:50:14,686 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:14,765 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:19,431 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.442e+02 2.969e+02 4.012e+02 9.663e+02, threshold=5.937e+02, percent-clipped=5.0 +2023-02-06 19:50:24,937 INFO [train.py:901] (1/4) Epoch 17, batch 3450, loss[loss=0.2242, simple_loss=0.3089, pruned_loss=0.0698, over 7636.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2962, pruned_loss=0.06763, over 1616361.31 frames. ], batch size: 19, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:30,900 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:32,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:33,515 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5907, 1.9341, 1.9350, 1.2280, 1.9971, 1.5072, 0.4686, 1.7573], + device='cuda:1'), covar=tensor([0.0412, 0.0296, 0.0195, 0.0429, 0.0351, 0.0727, 0.0757, 0.0236], + device='cuda:1'), in_proj_covar=tensor([0.0431, 0.0370, 0.0315, 0.0427, 0.0353, 0.0510, 0.0378, 0.0392], + device='cuda:1'), out_proj_covar=tensor([1.1769e-04, 9.8406e-05, 8.3563e-05, 1.1444e-04, 9.4984e-05, 1.4697e-04, + 1.0310e-04, 1.0527e-04], device='cuda:1') +2023-02-06 19:50:47,676 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:59,803 INFO [train.py:901] (1/4) Epoch 17, batch 3500, loss[loss=0.2083, simple_loss=0.2889, pruned_loss=0.06385, over 8089.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2968, pruned_loss=0.06834, over 1616711.43 frames. ], batch size: 21, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:51:13,854 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 19:51:31,535 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.401e+02 3.009e+02 3.970e+02 8.620e+02, threshold=6.019e+02, percent-clipped=6.0 +2023-02-06 19:51:37,017 INFO [train.py:901] (1/4) Epoch 17, batch 3550, loss[loss=0.2204, simple_loss=0.3042, pruned_loss=0.06827, over 8443.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2968, pruned_loss=0.06825, over 1617122.60 frames. ], batch size: 29, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:11,133 INFO [train.py:901] (1/4) Epoch 17, batch 3600, loss[loss=0.1625, simple_loss=0.2483, pruned_loss=0.03838, over 7539.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2956, pruned_loss=0.06765, over 1613899.32 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:11,368 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9482, 1.1439, 1.0851, 0.5664, 1.1304, 0.9269, 0.0580, 1.1089], + device='cuda:1'), covar=tensor([0.0333, 0.0314, 0.0268, 0.0450, 0.0322, 0.0784, 0.0652, 0.0256], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0375, 0.0319, 0.0431, 0.0358, 0.0515, 0.0380, 0.0396], + device='cuda:1'), out_proj_covar=tensor([1.1865e-04, 9.9762e-05, 8.4682e-05, 1.1547e-04, 9.6214e-05, 1.4857e-04, + 1.0381e-04, 1.0618e-04], device='cuda:1') +2023-02-06 19:52:40,001 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8518, 1.9765, 2.0817, 1.5938, 2.1656, 1.5787, 0.9828, 1.9127], + device='cuda:1'), covar=tensor([0.0478, 0.0291, 0.0245, 0.0422, 0.0325, 0.0672, 0.0704, 0.0269], + device='cuda:1'), in_proj_covar=tensor([0.0432, 0.0373, 0.0318, 0.0429, 0.0356, 0.0512, 0.0378, 0.0394], + device='cuda:1'), out_proj_covar=tensor([1.1802e-04, 9.9351e-05, 8.4339e-05, 1.1490e-04, 9.5670e-05, 1.4765e-04, + 1.0323e-04, 1.0562e-04], device='cuda:1') +2023-02-06 19:52:41,874 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.442e+02 2.775e+02 3.418e+02 6.006e+02, threshold=5.549e+02, percent-clipped=0.0 +2023-02-06 19:52:48,328 INFO [train.py:901] (1/4) Epoch 17, batch 3650, loss[loss=0.2144, simple_loss=0.2769, pruned_loss=0.07599, over 7418.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2946, pruned_loss=0.06712, over 1614999.53 frames. ], batch size: 17, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:18,534 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:53:21,775 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 19:53:23,086 INFO [train.py:901] (1/4) Epoch 17, batch 3700, loss[loss=0.2306, simple_loss=0.2999, pruned_loss=0.08063, over 8075.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2934, pruned_loss=0.06655, over 1614310.80 frames. ], batch size: 21, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:37,039 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 19:53:53,567 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.419e+02 3.081e+02 4.194e+02 7.364e+02, threshold=6.162e+02, percent-clipped=6.0 +2023-02-06 19:53:59,117 INFO [train.py:901] (1/4) Epoch 17, batch 3750, loss[loss=0.2069, simple_loss=0.2858, pruned_loss=0.064, over 7665.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.294, pruned_loss=0.06671, over 1612834.27 frames. ], batch size: 19, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:10,363 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8229, 5.9704, 5.1321, 2.5538, 5.2198, 5.6338, 5.3933, 5.3950], + device='cuda:1'), covar=tensor([0.0555, 0.0371, 0.0915, 0.4484, 0.0758, 0.0664, 0.1104, 0.0581], + device='cuda:1'), in_proj_covar=tensor([0.0504, 0.0412, 0.0419, 0.0514, 0.0408, 0.0416, 0.0400, 0.0360], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 19:54:21,510 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:54:35,500 INFO [train.py:901] (1/4) Epoch 17, batch 3800, loss[loss=0.2236, simple_loss=0.3047, pruned_loss=0.07128, over 8664.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2947, pruned_loss=0.06689, over 1615313.00 frames. ], batch size: 34, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:41,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:04,561 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.594e+02 3.054e+02 3.718e+02 6.772e+02, threshold=6.108e+02, percent-clipped=5.0 +2023-02-06 19:55:09,936 INFO [train.py:901] (1/4) Epoch 17, batch 3850, loss[loss=0.1968, simple_loss=0.2783, pruned_loss=0.05765, over 7931.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2955, pruned_loss=0.06704, over 1619083.14 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:55:31,162 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 19:55:39,010 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4357, 2.3424, 3.1745, 2.5177, 2.8576, 2.4225, 2.1848, 1.7699], + device='cuda:1'), covar=tensor([0.4650, 0.4539, 0.1676, 0.3397, 0.2635, 0.2695, 0.1739, 0.5108], + device='cuda:1'), in_proj_covar=tensor([0.0918, 0.0931, 0.0770, 0.0902, 0.0968, 0.0850, 0.0721, 0.0800], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 19:55:43,012 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:46,904 INFO [train.py:901] (1/4) Epoch 17, batch 3900, loss[loss=0.2027, simple_loss=0.2848, pruned_loss=0.0603, over 7811.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2946, pruned_loss=0.06687, over 1619853.36 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:15,757 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.486e+02 2.968e+02 4.028e+02 1.073e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-06 19:56:21,114 INFO [train.py:901] (1/4) Epoch 17, batch 3950, loss[loss=0.1768, simple_loss=0.2616, pruned_loss=0.04605, over 7436.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2942, pruned_loss=0.06658, over 1616570.37 frames. ], batch size: 17, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:53,890 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-06 19:56:56,946 INFO [train.py:901] (1/4) Epoch 17, batch 4000, loss[loss=0.2016, simple_loss=0.2829, pruned_loss=0.06013, over 8191.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2939, pruned_loss=0.06662, over 1611492.41 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:27,413 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.526e+02 3.333e+02 3.995e+02 7.649e+02, threshold=6.666e+02, percent-clipped=5.0 +2023-02-06 19:57:32,336 INFO [train.py:901] (1/4) Epoch 17, batch 4050, loss[loss=0.1994, simple_loss=0.2786, pruned_loss=0.06007, over 8562.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2942, pruned_loss=0.06711, over 1606000.18 frames. ], batch size: 31, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:38,097 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 19:57:41,370 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133392.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:57:42,105 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:42,763 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:59,642 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:07,778 INFO [train.py:901] (1/4) Epoch 17, batch 4100, loss[loss=0.2221, simple_loss=0.311, pruned_loss=0.06658, over 8535.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2936, pruned_loss=0.0667, over 1609509.17 frames. ], batch size: 31, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:40,076 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.513e+02 2.919e+02 3.658e+02 1.440e+03, threshold=5.839e+02, percent-clipped=2.0 +2023-02-06 19:58:45,042 INFO [train.py:901] (1/4) Epoch 17, batch 4150, loss[loss=0.1694, simple_loss=0.2443, pruned_loss=0.04725, over 7549.00 frames. ], tot_loss[loss=0.214, simple_loss=0.294, pruned_loss=0.06705, over 1604781.89 frames. ], batch size: 18, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:45,257 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:54,024 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:00,147 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4794, 2.4096, 1.5668, 2.2612, 2.1268, 1.3679, 2.0528, 2.1384], + device='cuda:1'), covar=tensor([0.1389, 0.0458, 0.1387, 0.0604, 0.0736, 0.1716, 0.0918, 0.0907], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0231, 0.0321, 0.0298, 0.0296, 0.0327, 0.0338, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 19:59:02,283 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:19,506 INFO [train.py:901] (1/4) Epoch 17, batch 4200, loss[loss=0.2079, simple_loss=0.2928, pruned_loss=0.06151, over 8509.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.295, pruned_loss=0.06718, over 1605458.02 frames. ], batch size: 49, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:32,483 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 19:59:51,070 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.565e+02 3.135e+02 3.827e+02 1.180e+03, threshold=6.269e+02, percent-clipped=6.0 +2023-02-06 19:59:56,756 INFO [train.py:901] (1/4) Epoch 17, batch 4250, loss[loss=0.198, simple_loss=0.2917, pruned_loss=0.05215, over 8321.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2954, pruned_loss=0.06747, over 1603915.70 frames. ], batch size: 26, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:57,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 20:00:22,922 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2940, 4.1936, 3.8933, 1.9848, 3.9142, 3.8380, 3.7954, 3.6113], + device='cuda:1'), covar=tensor([0.0687, 0.0551, 0.1035, 0.4200, 0.0818, 0.0855, 0.1395, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0496, 0.0408, 0.0417, 0.0509, 0.0404, 0.0408, 0.0397, 0.0356], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:00:30,986 INFO [train.py:901] (1/4) Epoch 17, batch 4300, loss[loss=0.1918, simple_loss=0.278, pruned_loss=0.0528, over 8139.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2939, pruned_loss=0.0669, over 1607074.81 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:00,707 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:01:01,926 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.551e+02 3.118e+02 3.976e+02 6.360e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 20:01:06,893 INFO [train.py:901] (1/4) Epoch 17, batch 4350, loss[loss=0.228, simple_loss=0.3121, pruned_loss=0.07198, over 8660.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2936, pruned_loss=0.06687, over 1605576.77 frames. ], batch size: 34, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:31,278 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 20:01:43,144 INFO [train.py:901] (1/4) Epoch 17, batch 4400, loss[loss=0.1843, simple_loss=0.2687, pruned_loss=0.04997, over 7798.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2919, pruned_loss=0.06604, over 1603436.51 frames. ], batch size: 19, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:48,111 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133736.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:01:49,429 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:02:12,877 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.579e+02 3.148e+02 3.884e+02 8.584e+02, threshold=6.297e+02, percent-clipped=6.0 +2023-02-06 20:02:12,923 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 20:02:18,530 INFO [train.py:901] (1/4) Epoch 17, batch 4450, loss[loss=0.2056, simple_loss=0.279, pruned_loss=0.06614, over 7798.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2911, pruned_loss=0.06533, over 1605232.79 frames. ], batch size: 20, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:02:25,362 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8938, 5.9240, 5.2155, 2.4641, 5.3792, 5.6782, 5.4259, 5.3754], + device='cuda:1'), covar=tensor([0.0540, 0.0396, 0.0921, 0.4583, 0.0650, 0.0680, 0.1008, 0.0474], + device='cuda:1'), in_proj_covar=tensor([0.0503, 0.0412, 0.0421, 0.0515, 0.0406, 0.0413, 0.0400, 0.0359], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:02:55,023 INFO [train.py:901] (1/4) Epoch 17, batch 4500, loss[loss=0.2083, simple_loss=0.283, pruned_loss=0.06685, over 8143.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2918, pruned_loss=0.06596, over 1606647.08 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:00,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:10,420 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133851.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:03:10,902 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 20:03:11,728 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:24,314 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.330e+02 2.856e+02 3.592e+02 8.327e+02, threshold=5.711e+02, percent-clipped=1.0 +2023-02-06 20:03:29,184 INFO [train.py:901] (1/4) Epoch 17, batch 4550, loss[loss=0.2601, simple_loss=0.3175, pruned_loss=0.1013, over 6863.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2938, pruned_loss=0.06719, over 1606936.30 frames. ], batch size: 71, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:04:04,548 INFO [train.py:901] (1/4) Epoch 17, batch 4600, loss[loss=0.2623, simple_loss=0.3126, pruned_loss=0.106, over 7539.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2932, pruned_loss=0.06691, over 1608163.88 frames. ], batch size: 18, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:21,357 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:04:23,721 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:04:35,425 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.386e+02 2.834e+02 3.772e+02 7.696e+02, threshold=5.668e+02, percent-clipped=3.0 +2023-02-06 20:04:40,245 INFO [train.py:901] (1/4) Epoch 17, batch 4650, loss[loss=0.2585, simple_loss=0.3352, pruned_loss=0.09086, over 8360.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2941, pruned_loss=0.06737, over 1613246.31 frames. ], batch size: 26, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:50,869 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1481, 1.4046, 4.3301, 1.5705, 3.8249, 3.5559, 3.8788, 3.7432], + device='cuda:1'), covar=tensor([0.0565, 0.4725, 0.0579, 0.4176, 0.1139, 0.1020, 0.0639, 0.0745], + device='cuda:1'), in_proj_covar=tensor([0.0575, 0.0618, 0.0661, 0.0591, 0.0672, 0.0580, 0.0572, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:04:50,938 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3037, 2.4180, 1.7274, 2.0848, 2.0128, 1.4502, 1.8823, 1.9269], + device='cuda:1'), covar=tensor([0.1641, 0.0411, 0.1160, 0.0622, 0.0698, 0.1564, 0.1025, 0.0967], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0233, 0.0325, 0.0301, 0.0297, 0.0331, 0.0341, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 20:05:00,934 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1888, 1.6778, 4.4126, 1.7874, 2.3133, 5.0437, 5.0461, 4.3208], + device='cuda:1'), covar=tensor([0.1235, 0.1858, 0.0289, 0.2150, 0.1308, 0.0166, 0.0363, 0.0543], + device='cuda:1'), in_proj_covar=tensor([0.0283, 0.0311, 0.0275, 0.0304, 0.0295, 0.0253, 0.0392, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 20:05:06,470 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:07,217 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:13,074 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.02 vs. limit=5.0 +2023-02-06 20:05:16,660 INFO [train.py:901] (1/4) Epoch 17, batch 4700, loss[loss=0.1664, simple_loss=0.2504, pruned_loss=0.04117, over 7565.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2942, pruned_loss=0.0675, over 1612157.01 frames. ], batch size: 18, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:48,978 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.331e+02 2.674e+02 3.349e+02 6.559e+02, threshold=5.348e+02, percent-clipped=3.0 +2023-02-06 20:05:53,953 INFO [train.py:901] (1/4) Epoch 17, batch 4750, loss[loss=0.2243, simple_loss=0.3009, pruned_loss=0.07389, over 8511.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2932, pruned_loss=0.06683, over 1609739.85 frames. ], batch size: 26, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:13,290 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134107.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:06:13,882 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4872, 1.1683, 4.6890, 1.8605, 4.1273, 3.8702, 4.1790, 4.0778], + device='cuda:1'), covar=tensor([0.0663, 0.5206, 0.0537, 0.3796, 0.1158, 0.1000, 0.0655, 0.0736], + device='cuda:1'), in_proj_covar=tensor([0.0576, 0.0620, 0.0662, 0.0591, 0.0675, 0.0580, 0.0575, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:06:14,652 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:17,884 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 20:06:20,576 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 20:06:28,172 INFO [train.py:901] (1/4) Epoch 17, batch 4800, loss[loss=0.2228, simple_loss=0.3087, pruned_loss=0.06843, over 8190.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2943, pruned_loss=0.06695, over 1615446.88 frames. ], batch size: 23, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:28,371 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:31,287 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134132.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:06:32,686 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:38,305 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3076, 1.7400, 1.8659, 1.1220, 1.8831, 1.2499, 0.3453, 1.5303], + device='cuda:1'), covar=tensor([0.0576, 0.0357, 0.0256, 0.0527, 0.0398, 0.0949, 0.0832, 0.0297], + device='cuda:1'), in_proj_covar=tensor([0.0432, 0.0372, 0.0316, 0.0427, 0.0354, 0.0512, 0.0377, 0.0395], + device='cuda:1'), out_proj_covar=tensor([1.1799e-04, 9.9152e-05, 8.3567e-05, 1.1411e-04, 9.4922e-05, 1.4763e-04, + 1.0279e-04, 1.0596e-04], device='cuda:1') +2023-02-06 20:06:46,856 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3529, 1.7585, 1.8724, 1.1307, 1.8524, 1.3307, 0.3736, 1.5542], + device='cuda:1'), covar=tensor([0.0607, 0.0398, 0.0320, 0.0562, 0.0476, 0.0878, 0.0854, 0.0325], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0373, 0.0317, 0.0429, 0.0355, 0.0514, 0.0379, 0.0397], + device='cuda:1'), out_proj_covar=tensor([1.1836e-04, 9.9603e-05, 8.3850e-05, 1.1452e-04, 9.5190e-05, 1.4809e-04, + 1.0316e-04, 1.0638e-04], device='cuda:1') +2023-02-06 20:07:00,736 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.352e+02 2.869e+02 3.488e+02 8.440e+02, threshold=5.739e+02, percent-clipped=9.0 +2023-02-06 20:07:06,353 INFO [train.py:901] (1/4) Epoch 17, batch 4850, loss[loss=0.2584, simple_loss=0.3343, pruned_loss=0.09126, over 8464.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.293, pruned_loss=0.06588, over 1616638.54 frames. ], batch size: 29, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:14,643 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 20:07:26,239 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:41,130 INFO [train.py:901] (1/4) Epoch 17, batch 4900, loss[loss=0.2173, simple_loss=0.3093, pruned_loss=0.06262, over 8246.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2948, pruned_loss=0.0668, over 1617090.19 frames. ], batch size: 24, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:43,518 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134232.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:10,409 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3392, 1.7366, 4.0591, 1.9587, 2.6192, 4.5696, 4.6424, 3.9444], + device='cuda:1'), covar=tensor([0.1056, 0.1773, 0.0393, 0.1927, 0.1250, 0.0195, 0.0391, 0.0570], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0310, 0.0275, 0.0302, 0.0294, 0.0254, 0.0390, 0.0298], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 20:08:11,581 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 20:08:13,115 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.657e+02 3.351e+02 4.707e+02 1.168e+03, threshold=6.701e+02, percent-clipped=12.0 +2023-02-06 20:08:17,821 INFO [train.py:901] (1/4) Epoch 17, batch 4950, loss[loss=0.2113, simple_loss=0.2978, pruned_loss=0.06234, over 8717.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2942, pruned_loss=0.0665, over 1616881.87 frames. ], batch size: 39, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:08:18,374 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-06 20:08:49,557 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:52,968 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:54,220 INFO [train.py:901] (1/4) Epoch 17, batch 5000, loss[loss=0.2158, simple_loss=0.2976, pruned_loss=0.06701, over 8239.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2945, pruned_loss=0.0666, over 1617879.12 frames. ], batch size: 22, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:15,211 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:24,842 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.361e+02 2.656e+02 3.405e+02 6.362e+02, threshold=5.311e+02, percent-clipped=0.0 +2023-02-06 20:09:30,487 INFO [train.py:901] (1/4) Epoch 17, batch 5050, loss[loss=0.2054, simple_loss=0.2966, pruned_loss=0.05708, over 8136.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2962, pruned_loss=0.06824, over 1619927.20 frames. ], batch size: 22, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:32,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3389, 1.3711, 4.5641, 1.6624, 4.0036, 3.8318, 4.1389, 3.9733], + device='cuda:1'), covar=tensor([0.0598, 0.4484, 0.0487, 0.3729, 0.1103, 0.0910, 0.0559, 0.0699], + device='cuda:1'), in_proj_covar=tensor([0.0578, 0.0620, 0.0664, 0.0591, 0.0676, 0.0580, 0.0575, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:09:35,086 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:54,075 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:58,691 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 20:10:07,172 INFO [train.py:901] (1/4) Epoch 17, batch 5100, loss[loss=0.2338, simple_loss=0.3052, pruned_loss=0.08121, over 8651.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2948, pruned_loss=0.06739, over 1619307.90 frames. ], batch size: 34, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:36,990 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.384e+02 2.769e+02 3.675e+02 1.185e+03, threshold=5.538e+02, percent-clipped=9.0 +2023-02-06 20:10:38,540 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:10:42,687 INFO [train.py:901] (1/4) Epoch 17, batch 5150, loss[loss=0.2158, simple_loss=0.2932, pruned_loss=0.06915, over 7925.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2943, pruned_loss=0.06742, over 1614024.93 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:45,525 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8010, 5.9311, 4.9876, 2.4901, 5.1606, 5.5677, 5.4152, 5.3318], + device='cuda:1'), covar=tensor([0.0473, 0.0367, 0.0948, 0.4508, 0.0622, 0.0585, 0.0975, 0.0458], + device='cuda:1'), in_proj_covar=tensor([0.0500, 0.0412, 0.0418, 0.0514, 0.0404, 0.0412, 0.0399, 0.0359], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:11:20,277 INFO [train.py:901] (1/4) Epoch 17, batch 5200, loss[loss=0.2078, simple_loss=0.2857, pruned_loss=0.06494, over 7694.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2927, pruned_loss=0.06654, over 1613426.16 frames. ], batch size: 18, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:49,976 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.225e+02 2.783e+02 3.706e+02 1.482e+03, threshold=5.567e+02, percent-clipped=8.0 +2023-02-06 20:11:54,882 INFO [train.py:901] (1/4) Epoch 17, batch 5250, loss[loss=0.198, simple_loss=0.2783, pruned_loss=0.05891, over 8229.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2937, pruned_loss=0.06736, over 1614528.65 frames. ], batch size: 22, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:57,586 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 20:12:31,033 INFO [train.py:901] (1/4) Epoch 17, batch 5300, loss[loss=0.2284, simple_loss=0.3058, pruned_loss=0.07548, over 8134.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2943, pruned_loss=0.06781, over 1615533.47 frames. ], batch size: 22, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:12:48,113 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5583, 1.9730, 2.0479, 1.1004, 2.1486, 1.4640, 0.4797, 1.8364], + device='cuda:1'), covar=tensor([0.0570, 0.0279, 0.0231, 0.0573, 0.0345, 0.0851, 0.0742, 0.0246], + device='cuda:1'), in_proj_covar=tensor([0.0438, 0.0377, 0.0320, 0.0433, 0.0361, 0.0522, 0.0381, 0.0398], + device='cuda:1'), out_proj_covar=tensor([1.1968e-04, 1.0055e-04, 8.4848e-05, 1.1596e-04, 9.6692e-05, 1.5060e-04, + 1.0387e-04, 1.0658e-04], device='cuda:1') +2023-02-06 20:12:58,383 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:01,795 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:02,357 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.333e+02 2.884e+02 3.429e+02 1.143e+03, threshold=5.769e+02, percent-clipped=6.0 +2023-02-06 20:13:07,135 INFO [train.py:901] (1/4) Epoch 17, batch 5350, loss[loss=0.2161, simple_loss=0.2973, pruned_loss=0.06742, over 8332.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2948, pruned_loss=0.06764, over 1614059.31 frames. ], batch size: 26, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:43,348 INFO [train.py:901] (1/4) Epoch 17, batch 5400, loss[loss=0.2275, simple_loss=0.3007, pruned_loss=0.07712, over 7648.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2961, pruned_loss=0.06839, over 1618606.79 frames. ], batch size: 19, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:44,283 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:01,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:14,300 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.337e+02 2.988e+02 3.635e+02 1.067e+03, threshold=5.976e+02, percent-clipped=7.0 +2023-02-06 20:14:18,980 INFO [train.py:901] (1/4) Epoch 17, batch 5450, loss[loss=0.2524, simple_loss=0.3328, pruned_loss=0.08598, over 8292.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2956, pruned_loss=0.06796, over 1615331.75 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:20,483 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:23,983 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134786.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:25,739 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 20:14:47,256 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-06 20:14:52,690 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5791, 1.3445, 1.6037, 1.2586, 0.8988, 1.3641, 1.5878, 1.3394], + device='cuda:1'), covar=tensor([0.0524, 0.1304, 0.1697, 0.1477, 0.0606, 0.1614, 0.0691, 0.0658], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0162, 0.0114, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 20:14:54,786 INFO [train.py:901] (1/4) Epoch 17, batch 5500, loss[loss=0.1901, simple_loss=0.2679, pruned_loss=0.05616, over 7653.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2953, pruned_loss=0.06756, over 1618128.04 frames. ], batch size: 19, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:55,412 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 20:15:25,527 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.381e+02 2.895e+02 3.783e+02 8.489e+02, threshold=5.790e+02, percent-clipped=3.0 +2023-02-06 20:15:31,374 INFO [train.py:901] (1/4) Epoch 17, batch 5550, loss[loss=0.2286, simple_loss=0.3145, pruned_loss=0.07137, over 8392.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2952, pruned_loss=0.0672, over 1620092.42 frames. ], batch size: 49, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:15:32,210 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:15:42,718 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3545, 1.4886, 2.1747, 1.2226, 1.4211, 1.6290, 1.3725, 1.4900], + device='cuda:1'), covar=tensor([0.1857, 0.2557, 0.0902, 0.4440, 0.1915, 0.3172, 0.2287, 0.2133], + device='cuda:1'), in_proj_covar=tensor([0.0509, 0.0567, 0.0544, 0.0615, 0.0633, 0.0572, 0.0508, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:16:06,822 INFO [train.py:901] (1/4) Epoch 17, batch 5600, loss[loss=0.2736, simple_loss=0.3541, pruned_loss=0.09655, over 8587.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2945, pruned_loss=0.0668, over 1617840.23 frames. ], batch size: 34, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:16:38,754 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.724e+02 3.292e+02 4.135e+02 9.276e+02, threshold=6.584e+02, percent-clipped=7.0 +2023-02-06 20:16:42,892 INFO [train.py:901] (1/4) Epoch 17, batch 5650, loss[loss=0.2075, simple_loss=0.2992, pruned_loss=0.05783, over 7819.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2954, pruned_loss=0.06764, over 1616690.86 frames. ], batch size: 20, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:16:47,882 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7601, 1.4807, 5.8831, 2.2159, 5.2478, 4.8570, 5.4407, 5.2896], + device='cuda:1'), covar=tensor([0.0488, 0.5006, 0.0340, 0.3598, 0.0988, 0.0815, 0.0512, 0.0523], + device='cuda:1'), in_proj_covar=tensor([0.0587, 0.0628, 0.0670, 0.0601, 0.0680, 0.0584, 0.0579, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:17:04,362 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 20:17:18,735 INFO [train.py:901] (1/4) Epoch 17, batch 5700, loss[loss=0.2067, simple_loss=0.2796, pruned_loss=0.06687, over 7965.00 frames. ], tot_loss[loss=0.215, simple_loss=0.295, pruned_loss=0.06749, over 1615738.51 frames. ], batch size: 21, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:24,526 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:27,998 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:42,576 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:45,934 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:49,723 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.516e+02 3.214e+02 3.973e+02 1.283e+03, threshold=6.427e+02, percent-clipped=6.0 +2023-02-06 20:17:53,703 INFO [train.py:901] (1/4) Epoch 17, batch 5750, loss[loss=0.2403, simple_loss=0.3118, pruned_loss=0.08441, over 8292.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2948, pruned_loss=0.06745, over 1620054.42 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:18:11,549 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 20:18:14,104 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 20:18:30,180 INFO [train.py:901] (1/4) Epoch 17, batch 5800, loss[loss=0.2647, simple_loss=0.3488, pruned_loss=0.09029, over 8654.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2947, pruned_loss=0.06702, over 1619714.49 frames. ], batch size: 39, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:00,425 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-06 20:19:00,545 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.313e+02 2.882e+02 3.681e+02 6.576e+02, threshold=5.764e+02, percent-clipped=1.0 +2023-02-06 20:19:04,575 INFO [train.py:901] (1/4) Epoch 17, batch 5850, loss[loss=0.2638, simple_loss=0.3123, pruned_loss=0.1077, over 7703.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2946, pruned_loss=0.06701, over 1619353.12 frames. ], batch size: 18, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:12,155 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135189.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:37,622 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:41,091 INFO [train.py:901] (1/4) Epoch 17, batch 5900, loss[loss=0.1754, simple_loss=0.2596, pruned_loss=0.04562, over 7810.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2949, pruned_loss=0.06731, over 1619111.43 frames. ], batch size: 19, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:12,437 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.357e+02 3.084e+02 3.660e+02 6.807e+02, threshold=6.167e+02, percent-clipped=2.0 +2023-02-06 20:20:16,629 INFO [train.py:901] (1/4) Epoch 17, batch 5950, loss[loss=0.1777, simple_loss=0.2453, pruned_loss=0.05504, over 7683.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2947, pruned_loss=0.06726, over 1620845.37 frames. ], batch size: 18, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:52,280 INFO [train.py:901] (1/4) Epoch 17, batch 6000, loss[loss=0.2237, simple_loss=0.2998, pruned_loss=0.07385, over 7814.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2952, pruned_loss=0.06833, over 1615581.63 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:20:52,280 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 20:21:02,381 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7755, 3.7388, 3.4645, 2.1090, 3.3343, 3.4206, 3.4460, 3.1698], + device='cuda:1'), covar=tensor([0.0981, 0.0552, 0.0920, 0.4684, 0.1013, 0.0920, 0.1166, 0.0918], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0412, 0.0417, 0.0518, 0.0406, 0.0410, 0.0401, 0.0360], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:21:05,425 INFO [train.py:935] (1/4) Epoch 17, validation: loss=0.1774, simple_loss=0.2777, pruned_loss=0.03857, over 944034.00 frames. +2023-02-06 20:21:05,426 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 20:21:12,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:21:16,159 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0229, 2.6177, 3.7084, 2.0187, 1.8684, 3.6746, 0.8088, 2.0747], + device='cuda:1'), covar=tensor([0.1423, 0.1134, 0.0193, 0.1990, 0.3091, 0.0400, 0.2564, 0.1879], + device='cuda:1'), in_proj_covar=tensor([0.0176, 0.0184, 0.0115, 0.0216, 0.0260, 0.0123, 0.0165, 0.0180], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 20:21:36,688 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.411e+02 3.026e+02 3.580e+02 8.983e+02, threshold=6.051e+02, percent-clipped=2.0 +2023-02-06 20:21:40,883 INFO [train.py:901] (1/4) Epoch 17, batch 6050, loss[loss=0.1752, simple_loss=0.2559, pruned_loss=0.04725, over 7635.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.295, pruned_loss=0.06824, over 1615266.16 frames. ], batch size: 19, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:21:41,749 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6567, 4.5993, 4.1167, 2.1774, 4.0407, 4.1948, 4.2320, 3.8872], + device='cuda:1'), covar=tensor([0.0716, 0.0507, 0.0966, 0.4694, 0.0868, 0.0931, 0.1246, 0.0860], + device='cuda:1'), in_proj_covar=tensor([0.0501, 0.0412, 0.0416, 0.0517, 0.0406, 0.0410, 0.0400, 0.0359], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:22:16,312 INFO [train.py:901] (1/4) Epoch 17, batch 6100, loss[loss=0.2144, simple_loss=0.3053, pruned_loss=0.06172, over 8340.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2948, pruned_loss=0.06827, over 1610202.78 frames. ], batch size: 24, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:47,556 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.459e+02 2.890e+02 3.783e+02 6.848e+02, threshold=5.780e+02, percent-clipped=3.0 +2023-02-06 20:22:49,626 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 20:22:52,257 INFO [train.py:901] (1/4) Epoch 17, batch 6150, loss[loss=0.1891, simple_loss=0.2892, pruned_loss=0.04451, over 8462.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2949, pruned_loss=0.06797, over 1610211.28 frames. ], batch size: 25, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:20,219 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 20:23:26,589 INFO [train.py:901] (1/4) Epoch 17, batch 6200, loss[loss=0.2202, simple_loss=0.3065, pruned_loss=0.06692, over 8296.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2939, pruned_loss=0.06701, over 1610747.59 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:29,343 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:23:57,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.470e+02 3.035e+02 3.550e+02 6.137e+02, threshold=6.070e+02, percent-clipped=1.0 +2023-02-06 20:24:01,730 INFO [train.py:901] (1/4) Epoch 17, batch 6250, loss[loss=0.1814, simple_loss=0.2732, pruned_loss=0.04481, over 8331.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2933, pruned_loss=0.06651, over 1610900.86 frames. ], batch size: 25, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:13,152 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:29,699 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 20:24:30,230 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:36,952 INFO [train.py:901] (1/4) Epoch 17, batch 6300, loss[loss=0.2042, simple_loss=0.2861, pruned_loss=0.06115, over 8348.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06652, over 1612931.06 frames. ], batch size: 26, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:49,741 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:49,777 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:25:07,390 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.700e+02 3.426e+02 4.477e+02 8.691e+02, threshold=6.853e+02, percent-clipped=8.0 +2023-02-06 20:25:11,447 INFO [train.py:901] (1/4) Epoch 17, batch 6350, loss[loss=0.1906, simple_loss=0.267, pruned_loss=0.05712, over 7815.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.295, pruned_loss=0.06693, over 1616215.36 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:25:46,617 INFO [train.py:901] (1/4) Epoch 17, batch 6400, loss[loss=0.1539, simple_loss=0.2338, pruned_loss=0.037, over 7702.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2946, pruned_loss=0.06693, over 1615084.56 frames. ], batch size: 18, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:16,682 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.216e+02 2.648e+02 3.143e+02 6.334e+02, threshold=5.295e+02, percent-clipped=0.0 +2023-02-06 20:26:20,491 INFO [train.py:901] (1/4) Epoch 17, batch 6450, loss[loss=0.2557, simple_loss=0.3226, pruned_loss=0.09435, over 8070.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2952, pruned_loss=0.0675, over 1617448.73 frames. ], batch size: 21, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:39,795 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2069, 2.2582, 1.9419, 2.9915, 1.4152, 1.6966, 1.8433, 2.3390], + device='cuda:1'), covar=tensor([0.0647, 0.0819, 0.0923, 0.0307, 0.1127, 0.1326, 0.1052, 0.0743], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0198, 0.0248, 0.0211, 0.0209, 0.0248, 0.0256, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 20:26:56,410 INFO [train.py:901] (1/4) Epoch 17, batch 6500, loss[loss=0.2083, simple_loss=0.2974, pruned_loss=0.0596, over 8290.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2957, pruned_loss=0.06771, over 1619935.52 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:27,344 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.443e+02 3.095e+02 4.367e+02 8.897e+02, threshold=6.190e+02, percent-clipped=12.0 +2023-02-06 20:27:31,529 INFO [train.py:901] (1/4) Epoch 17, batch 6550, loss[loss=0.2157, simple_loss=0.3032, pruned_loss=0.0641, over 8300.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2944, pruned_loss=0.06693, over 1623015.12 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:44,297 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5644, 1.9388, 2.1675, 1.3564, 2.1566, 1.4207, 0.7022, 1.8254], + device='cuda:1'), covar=tensor([0.0572, 0.0301, 0.0231, 0.0514, 0.0379, 0.0769, 0.0693, 0.0290], + device='cuda:1'), in_proj_covar=tensor([0.0430, 0.0372, 0.0320, 0.0425, 0.0353, 0.0512, 0.0374, 0.0395], + device='cuda:1'), out_proj_covar=tensor([1.1723e-04, 9.8929e-05, 8.4846e-05, 1.1351e-04, 9.4554e-05, 1.4739e-04, + 1.0180e-04, 1.0568e-04], device='cuda:1') +2023-02-06 20:27:48,292 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135904.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:27:56,455 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 20:28:06,535 INFO [train.py:901] (1/4) Epoch 17, batch 6600, loss[loss=0.2296, simple_loss=0.3201, pruned_loss=0.0695, over 8108.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2947, pruned_loss=0.06721, over 1618522.41 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:06,744 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:16,345 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 20:28:36,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.554e+02 2.943e+02 3.634e+02 1.271e+03, threshold=5.887e+02, percent-clipped=2.0 +2023-02-06 20:28:40,558 INFO [train.py:901] (1/4) Epoch 17, batch 6650, loss[loss=0.2218, simple_loss=0.3115, pruned_loss=0.06605, over 8197.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2931, pruned_loss=0.06631, over 1614286.10 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:49,898 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:56,440 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:04,529 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136012.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:12,172 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.17 vs. limit=5.0 +2023-02-06 20:29:16,446 INFO [train.py:901] (1/4) Epoch 17, batch 6700, loss[loss=0.1659, simple_loss=0.2438, pruned_loss=0.04394, over 7810.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2933, pruned_loss=0.06691, over 1614029.10 frames. ], batch size: 19, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:29:45,902 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1051, 1.5126, 1.7576, 1.5120, 0.9425, 1.5590, 1.7323, 1.4275], + device='cuda:1'), covar=tensor([0.0490, 0.1243, 0.1630, 0.1372, 0.0619, 0.1457, 0.0689, 0.0668], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0162, 0.0114, 0.0138], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 20:29:47,782 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.437e+02 3.090e+02 3.837e+02 8.578e+02, threshold=6.181e+02, percent-clipped=4.0 +2023-02-06 20:29:50,646 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7024, 2.1775, 4.2701, 1.5637, 3.0179, 2.3499, 1.7541, 3.0116], + device='cuda:1'), covar=tensor([0.1864, 0.2581, 0.0579, 0.4184, 0.1739, 0.2927, 0.2217, 0.2210], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0568, 0.0542, 0.0612, 0.0632, 0.0572, 0.0507, 0.0618], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:29:51,768 INFO [train.py:901] (1/4) Epoch 17, batch 6750, loss[loss=0.2129, simple_loss=0.2971, pruned_loss=0.06431, over 8242.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2947, pruned_loss=0.06759, over 1614624.45 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:09,237 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 20:30:11,742 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:26,346 INFO [train.py:901] (1/4) Epoch 17, batch 6800, loss[loss=0.1902, simple_loss=0.2745, pruned_loss=0.05295, over 8107.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2934, pruned_loss=0.0668, over 1609137.52 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:35,099 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 20:30:48,375 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:57,860 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.691e+02 3.204e+02 3.737e+02 8.793e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 20:31:01,823 INFO [train.py:901] (1/4) Epoch 17, batch 6850, loss[loss=0.2126, simple_loss=0.2844, pruned_loss=0.07044, over 7412.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2954, pruned_loss=0.06785, over 1609889.55 frames. ], batch size: 17, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:22,738 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 20:31:37,004 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 20:31:37,309 INFO [train.py:901] (1/4) Epoch 17, batch 6900, loss[loss=0.1968, simple_loss=0.2731, pruned_loss=0.06022, over 7650.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.294, pruned_loss=0.06714, over 1608282.30 frames. ], batch size: 19, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:08,492 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.868e+02 2.541e+02 3.415e+02 4.318e+02 7.722e+02, threshold=6.831e+02, percent-clipped=4.0 +2023-02-06 20:32:12,495 INFO [train.py:901] (1/4) Epoch 17, batch 6950, loss[loss=0.2356, simple_loss=0.3211, pruned_loss=0.07503, over 8242.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.294, pruned_loss=0.06722, over 1606274.72 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:32,958 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 20:32:48,141 INFO [train.py:901] (1/4) Epoch 17, batch 7000, loss[loss=0.2237, simple_loss=0.304, pruned_loss=0.07167, over 8549.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.293, pruned_loss=0.06642, over 1608230.13 frames. ], batch size: 49, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:54,375 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1220, 1.7272, 4.4119, 2.0211, 2.4144, 4.9601, 5.0676, 4.3119], + device='cuda:1'), covar=tensor([0.1367, 0.1875, 0.0280, 0.2041, 0.1368, 0.0198, 0.0469, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0286, 0.0311, 0.0277, 0.0306, 0.0296, 0.0254, 0.0392, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 20:32:58,256 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:04,331 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:06,277 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:11,084 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:18,224 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.499e+02 2.956e+02 3.583e+02 7.307e+02, threshold=5.911e+02, percent-clipped=2.0 +2023-02-06 20:33:22,380 INFO [train.py:901] (1/4) Epoch 17, batch 7050, loss[loss=0.2055, simple_loss=0.2887, pruned_loss=0.06113, over 8499.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2934, pruned_loss=0.0669, over 1609073.64 frames. ], batch size: 26, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:33:29,395 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:41,804 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0270, 1.6347, 1.3509, 1.5042, 1.3439, 1.1478, 1.2296, 1.2205], + device='cuda:1'), covar=tensor([0.1092, 0.0439, 0.1190, 0.0543, 0.0703, 0.1513, 0.0884, 0.0830], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0229, 0.0322, 0.0297, 0.0293, 0.0327, 0.0338, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 20:33:57,947 INFO [train.py:901] (1/4) Epoch 17, batch 7100, loss[loss=0.2332, simple_loss=0.3165, pruned_loss=0.07498, over 8604.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2941, pruned_loss=0.06705, over 1615102.69 frames. ], batch size: 34, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:18,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:26,516 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:27,626 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.503e+02 2.917e+02 3.905e+02 1.004e+03, threshold=5.834e+02, percent-clipped=4.0 +2023-02-06 20:34:31,733 INFO [train.py:901] (1/4) Epoch 17, batch 7150, loss[loss=0.1894, simple_loss=0.2709, pruned_loss=0.05393, over 8292.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2945, pruned_loss=0.06751, over 1618008.25 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:50,070 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:35:07,627 INFO [train.py:901] (1/4) Epoch 17, batch 7200, loss[loss=0.2045, simple_loss=0.2879, pruned_loss=0.06053, over 8489.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2934, pruned_loss=0.06689, over 1616269.88 frames. ], batch size: 26, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:35:09,675 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.5161, 1.1927, 3.8427, 1.5200, 3.0895, 3.0612, 3.4166, 3.3858], + device='cuda:1'), covar=tensor([0.1151, 0.6292, 0.1071, 0.4730, 0.2032, 0.1636, 0.1086, 0.1106], + device='cuda:1'), in_proj_covar=tensor([0.0570, 0.0615, 0.0648, 0.0586, 0.0667, 0.0565, 0.0567, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:35:20,775 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 20:35:21,979 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6615, 2.0007, 2.2051, 1.2997, 2.2683, 1.5669, 0.6500, 1.8865], + device='cuda:1'), covar=tensor([0.0514, 0.0341, 0.0270, 0.0536, 0.0330, 0.0773, 0.0679, 0.0265], + device='cuda:1'), in_proj_covar=tensor([0.0427, 0.0368, 0.0317, 0.0420, 0.0349, 0.0513, 0.0372, 0.0392], + device='cuda:1'), out_proj_covar=tensor([1.1636e-04, 9.7651e-05, 8.3902e-05, 1.1178e-04, 9.3374e-05, 1.4780e-04, + 1.0136e-04, 1.0500e-04], device='cuda:1') +2023-02-06 20:35:37,959 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.498e+02 3.072e+02 3.698e+02 8.742e+02, threshold=6.145e+02, percent-clipped=2.0 +2023-02-06 20:35:42,152 INFO [train.py:901] (1/4) Epoch 17, batch 7250, loss[loss=0.2386, simple_loss=0.3202, pruned_loss=0.07847, over 8291.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2931, pruned_loss=0.06656, over 1617104.14 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:11,359 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136619.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:17,806 INFO [train.py:901] (1/4) Epoch 17, batch 7300, loss[loss=0.2384, simple_loss=0.3248, pruned_loss=0.07599, over 7972.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2929, pruned_loss=0.06613, over 1617711.31 frames. ], batch size: 21, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:40,630 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:48,512 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.506e+02 2.969e+02 3.762e+02 7.100e+02, threshold=5.939e+02, percent-clipped=2.0 +2023-02-06 20:36:52,578 INFO [train.py:901] (1/4) Epoch 17, batch 7350, loss[loss=0.1989, simple_loss=0.2884, pruned_loss=0.05468, over 8110.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2937, pruned_loss=0.06696, over 1612648.64 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:05,620 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:16,518 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 20:37:17,971 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:27,017 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136727.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:28,129 INFO [train.py:901] (1/4) Epoch 17, batch 7400, loss[loss=0.2397, simple_loss=0.3172, pruned_loss=0.08107, over 8644.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2952, pruned_loss=0.06734, over 1616741.73 frames. ], batch size: 39, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:35,010 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 20:37:36,563 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136740.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:45,390 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:59,293 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.354e+02 2.898e+02 3.777e+02 7.037e+02, threshold=5.795e+02, percent-clipped=3.0 +2023-02-06 20:38:03,298 INFO [train.py:901] (1/4) Epoch 17, batch 7450, loss[loss=0.1764, simple_loss=0.2568, pruned_loss=0.04793, over 7801.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2951, pruned_loss=0.06736, over 1619462.18 frames. ], batch size: 20, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:16,576 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 20:38:26,058 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:34,161 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8232, 1.2856, 3.9423, 1.3901, 3.5018, 3.2117, 3.5529, 3.4219], + device='cuda:1'), covar=tensor([0.0603, 0.4530, 0.0563, 0.4332, 0.1132, 0.1021, 0.0645, 0.0778], + device='cuda:1'), in_proj_covar=tensor([0.0577, 0.0622, 0.0656, 0.0596, 0.0673, 0.0574, 0.0574, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:38:34,170 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:37,330 INFO [train.py:901] (1/4) Epoch 17, batch 7500, loss[loss=0.2068, simple_loss=0.2939, pruned_loss=0.05988, over 8558.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2953, pruned_loss=0.06723, over 1625423.13 frames. ], batch size: 34, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:41,765 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 20:39:09,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.462e+02 2.866e+02 3.948e+02 7.787e+02, threshold=5.732e+02, percent-clipped=6.0 +2023-02-06 20:39:11,049 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:13,443 INFO [train.py:901] (1/4) Epoch 17, batch 7550, loss[loss=0.2161, simple_loss=0.2867, pruned_loss=0.07278, over 8580.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2943, pruned_loss=0.06679, over 1618932.86 frames. ], batch size: 34, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:28,636 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:49,028 INFO [train.py:901] (1/4) Epoch 17, batch 7600, loss[loss=0.1994, simple_loss=0.2884, pruned_loss=0.05524, over 8317.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2943, pruned_loss=0.06725, over 1622659.88 frames. ], batch size: 25, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:40:03,457 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 20:40:14,525 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6361, 2.2160, 4.1917, 1.3669, 2.9826, 2.1113, 1.6359, 2.9183], + device='cuda:1'), covar=tensor([0.1806, 0.2545, 0.0676, 0.4379, 0.1744, 0.3111, 0.2216, 0.2271], + device='cuda:1'), in_proj_covar=tensor([0.0509, 0.0571, 0.0545, 0.0614, 0.0635, 0.0576, 0.0509, 0.0619], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:40:21,094 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.516e+02 2.945e+02 3.717e+02 7.457e+02, threshold=5.891e+02, percent-clipped=6.0 +2023-02-06 20:40:25,201 INFO [train.py:901] (1/4) Epoch 17, batch 7650, loss[loss=0.2333, simple_loss=0.3078, pruned_loss=0.07937, over 8246.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2948, pruned_loss=0.06733, over 1624810.86 frames. ], batch size: 24, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:40:27,414 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1257, 1.8460, 2.5450, 2.0541, 2.3999, 2.2008, 1.8803, 1.2008], + device='cuda:1'), covar=tensor([0.5066, 0.4882, 0.1618, 0.3201, 0.2508, 0.2764, 0.1887, 0.5160], + device='cuda:1'), in_proj_covar=tensor([0.0920, 0.0937, 0.0775, 0.0902, 0.0969, 0.0853, 0.0723, 0.0798], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:40:38,677 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7059, 1.6420, 2.4760, 1.9166, 2.1608, 1.6909, 1.4553, 0.9347], + device='cuda:1'), covar=tensor([0.6887, 0.5640, 0.1862, 0.3594, 0.2844, 0.4185, 0.2964, 0.5443], + device='cuda:1'), in_proj_covar=tensor([0.0918, 0.0934, 0.0773, 0.0900, 0.0966, 0.0850, 0.0720, 0.0796], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:40:43,283 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:40:58,549 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:41:00,048 INFO [train.py:901] (1/4) Epoch 17, batch 7700, loss[loss=0.2173, simple_loss=0.2947, pruned_loss=0.06994, over 8525.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2956, pruned_loss=0.06776, over 1628139.34 frames. ], batch size: 28, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:26,488 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 20:41:26,686 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:41:30,556 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.598e+02 3.111e+02 3.900e+02 8.834e+02, threshold=6.222e+02, percent-clipped=1.0 +2023-02-06 20:41:34,727 INFO [train.py:901] (1/4) Epoch 17, batch 7750, loss[loss=0.206, simple_loss=0.2886, pruned_loss=0.06173, over 8134.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2952, pruned_loss=0.06776, over 1623435.44 frames. ], batch size: 22, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:35,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7035, 1.6366, 2.2317, 1.5126, 1.0984, 2.3343, 0.3744, 1.3954], + device='cuda:1'), covar=tensor([0.1683, 0.1286, 0.0396, 0.1451, 0.3430, 0.0438, 0.2610, 0.1645], + device='cuda:1'), in_proj_covar=tensor([0.0176, 0.0183, 0.0116, 0.0216, 0.0263, 0.0123, 0.0166, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 20:41:44,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:03,588 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:09,492 INFO [train.py:901] (1/4) Epoch 17, batch 7800, loss[loss=0.2584, simple_loss=0.3244, pruned_loss=0.09624, over 7125.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2953, pruned_loss=0.06814, over 1620106.09 frames. ], batch size: 71, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:42:36,424 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:39,575 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.454e+02 2.768e+02 3.488e+02 7.043e+02, threshold=5.537e+02, percent-clipped=4.0 +2023-02-06 20:42:41,431 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 20:42:43,620 INFO [train.py:901] (1/4) Epoch 17, batch 7850, loss[loss=0.2951, simple_loss=0.3593, pruned_loss=0.1155, over 7129.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2966, pruned_loss=0.06866, over 1621118.55 frames. ], batch size: 71, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:16,605 INFO [train.py:901] (1/4) Epoch 17, batch 7900, loss[loss=0.2369, simple_loss=0.3174, pruned_loss=0.07822, over 8644.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2972, pruned_loss=0.06923, over 1620882.21 frames. ], batch size: 39, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:45,783 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.478e+02 3.005e+02 3.961e+02 6.905e+02, threshold=6.010e+02, percent-clipped=7.0 +2023-02-06 20:43:49,876 INFO [train.py:901] (1/4) Epoch 17, batch 7950, loss[loss=0.1919, simple_loss=0.2813, pruned_loss=0.05125, over 8491.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2964, pruned_loss=0.06862, over 1619045.60 frames. ], batch size: 26, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:52,850 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:23,211 INFO [train.py:901] (1/4) Epoch 17, batch 8000, loss[loss=0.2709, simple_loss=0.3294, pruned_loss=0.1062, over 7143.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2966, pruned_loss=0.06865, over 1614925.63 frames. ], batch size: 71, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:44:52,962 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.583e+02 3.026e+02 3.684e+02 1.341e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-06 20:44:55,370 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:57,246 INFO [train.py:901] (1/4) Epoch 17, batch 8050, loss[loss=0.2171, simple_loss=0.2994, pruned_loss=0.06733, over 7919.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2953, pruned_loss=0.0687, over 1604278.08 frames. ], batch size: 20, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:45:12,518 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:45:14,065 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.77 vs. limit=5.0 +2023-02-06 20:45:29,702 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 20:45:34,949 INFO [train.py:901] (1/4) Epoch 18, batch 0, loss[loss=0.2273, simple_loss=0.3044, pruned_loss=0.07512, over 8371.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3044, pruned_loss=0.07512, over 8371.00 frames. ], batch size: 24, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:45:34,949 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 20:45:46,126 INFO [train.py:935] (1/4) Epoch 18, validation: loss=0.1783, simple_loss=0.2784, pruned_loss=0.03907, over 944034.00 frames. +2023-02-06 20:45:46,128 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 20:46:00,871 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 20:46:20,796 INFO [train.py:901] (1/4) Epoch 18, batch 50, loss[loss=0.2046, simple_loss=0.2802, pruned_loss=0.06446, over 7440.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2918, pruned_loss=0.06506, over 363008.00 frames. ], batch size: 17, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:28,994 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 2.698e+02 3.585e+02 4.414e+02 8.769e+02, threshold=7.169e+02, percent-clipped=9.0 +2023-02-06 20:46:35,875 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 20:46:42,986 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6738, 2.2123, 4.1828, 1.4629, 3.1904, 2.1811, 1.6836, 3.0387], + device='cuda:1'), covar=tensor([0.1781, 0.2535, 0.0718, 0.4260, 0.1554, 0.3120, 0.2289, 0.2198], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0577, 0.0548, 0.0620, 0.0642, 0.0584, 0.0514, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:46:56,057 INFO [train.py:901] (1/4) Epoch 18, batch 100, loss[loss=0.1988, simple_loss=0.2846, pruned_loss=0.05654, over 8323.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2968, pruned_loss=0.0673, over 644923.62 frames. ], batch size: 25, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:58,846 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 20:47:16,434 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:31,280 INFO [train.py:901] (1/4) Epoch 18, batch 150, loss[loss=0.2355, simple_loss=0.3136, pruned_loss=0.07868, over 8674.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2967, pruned_loss=0.06776, over 862960.65 frames. ], batch size: 40, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:47:33,470 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:39,694 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.369e+02 2.797e+02 3.885e+02 6.122e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-06 20:48:07,690 INFO [train.py:901] (1/4) Epoch 18, batch 200, loss[loss=0.2169, simple_loss=0.3106, pruned_loss=0.06156, over 8444.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2947, pruned_loss=0.06687, over 1028948.66 frames. ], batch size: 49, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:44,087 INFO [train.py:901] (1/4) Epoch 18, batch 250, loss[loss=0.2683, simple_loss=0.3286, pruned_loss=0.104, over 8145.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2972, pruned_loss=0.06808, over 1159308.42 frames. ], batch size: 22, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:51,260 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6944, 1.8860, 1.7447, 2.3007, 0.9555, 1.4358, 1.7207, 1.9219], + device='cuda:1'), covar=tensor([0.0849, 0.0765, 0.0990, 0.0450, 0.1079, 0.1353, 0.0785, 0.0660], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0198, 0.0251, 0.0211, 0.0208, 0.0248, 0.0254, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 20:48:52,398 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.467e+02 3.008e+02 3.586e+02 6.135e+02, threshold=6.015e+02, percent-clipped=1.0 +2023-02-06 20:48:55,926 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 20:49:03,701 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 20:49:19,870 INFO [train.py:901] (1/4) Epoch 18, batch 300, loss[loss=0.1698, simple_loss=0.2543, pruned_loss=0.04269, over 7658.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2947, pruned_loss=0.06713, over 1254296.97 frames. ], batch size: 19, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:49:55,786 INFO [train.py:901] (1/4) Epoch 18, batch 350, loss[loss=0.249, simple_loss=0.3222, pruned_loss=0.08789, over 8343.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2966, pruned_loss=0.06837, over 1338313.04 frames. ], batch size: 24, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:03,096 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2717, 2.2775, 2.0549, 2.8769, 1.3036, 1.7653, 1.9755, 2.4014], + device='cuda:1'), covar=tensor([0.0631, 0.0745, 0.0908, 0.0313, 0.1088, 0.1220, 0.0946, 0.0730], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0251, 0.0211, 0.0208, 0.0249, 0.0255, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 20:50:05,697 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.555e+02 3.034e+02 3.752e+02 7.695e+02, threshold=6.069e+02, percent-clipped=3.0 +2023-02-06 20:50:14,084 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2225, 1.3333, 3.3673, 1.0406, 2.9969, 2.8924, 3.1090, 3.0204], + device='cuda:1'), covar=tensor([0.0817, 0.4150, 0.0784, 0.4104, 0.1342, 0.0991, 0.0777, 0.0891], + device='cuda:1'), in_proj_covar=tensor([0.0578, 0.0622, 0.0660, 0.0594, 0.0673, 0.0576, 0.0574, 0.0639], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:50:32,304 INFO [train.py:901] (1/4) Epoch 18, batch 400, loss[loss=0.1808, simple_loss=0.2647, pruned_loss=0.04849, over 7650.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2948, pruned_loss=0.06708, over 1399056.43 frames. ], batch size: 19, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:33,937 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6764, 1.6546, 2.0978, 1.4926, 1.1606, 2.0888, 0.3014, 1.2754], + device='cuda:1'), covar=tensor([0.1798, 0.1545, 0.0388, 0.1278, 0.3302, 0.0475, 0.2487, 0.1496], + device='cuda:1'), in_proj_covar=tensor([0.0178, 0.0184, 0.0118, 0.0219, 0.0263, 0.0124, 0.0166, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 20:51:08,173 INFO [train.py:901] (1/4) Epoch 18, batch 450, loss[loss=0.2058, simple_loss=0.2796, pruned_loss=0.06601, over 7634.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2937, pruned_loss=0.06694, over 1446314.93 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:16,921 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.504e+02 3.016e+02 3.557e+02 6.367e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 20:51:35,830 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.1247, 1.8456, 6.2461, 2.3946, 5.7769, 5.3746, 5.7984, 5.7370], + device='cuda:1'), covar=tensor([0.0420, 0.4195, 0.0274, 0.3267, 0.0814, 0.0792, 0.0458, 0.0421], + device='cuda:1'), in_proj_covar=tensor([0.0576, 0.0617, 0.0656, 0.0591, 0.0670, 0.0574, 0.0570, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 20:51:43,037 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:43,596 INFO [train.py:901] (1/4) Epoch 18, batch 500, loss[loss=0.1894, simple_loss=0.2731, pruned_loss=0.05282, over 7806.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.293, pruned_loss=0.0664, over 1485010.29 frames. ], batch size: 20, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:45,174 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:50,427 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 20:52:20,562 INFO [train.py:901] (1/4) Epoch 18, batch 550, loss[loss=0.3221, simple_loss=0.3803, pruned_loss=0.1319, over 8451.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.294, pruned_loss=0.06681, over 1513438.08 frames. ], batch size: 27, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:52:29,489 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.606e+02 3.197e+02 3.974e+02 7.545e+02, threshold=6.394e+02, percent-clipped=3.0 +2023-02-06 20:52:35,967 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 20:52:56,986 INFO [train.py:901] (1/4) Epoch 18, batch 600, loss[loss=0.2236, simple_loss=0.3074, pruned_loss=0.0699, over 7806.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2934, pruned_loss=0.06662, over 1534419.10 frames. ], batch size: 20, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:11,901 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 20:53:32,792 INFO [train.py:901] (1/4) Epoch 18, batch 650, loss[loss=0.1936, simple_loss=0.278, pruned_loss=0.05465, over 8148.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2917, pruned_loss=0.06551, over 1549881.53 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:43,407 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.461e+02 2.865e+02 3.365e+02 7.739e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-06 20:54:00,541 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 20:54:09,457 INFO [train.py:901] (1/4) Epoch 18, batch 700, loss[loss=0.236, simple_loss=0.3059, pruned_loss=0.08307, over 8077.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2923, pruned_loss=0.06571, over 1564071.48 frames. ], batch size: 21, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:29,538 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 20:54:30,792 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7066, 1.7049, 2.0172, 1.4595, 1.2784, 2.1493, 0.2877, 1.3200], + device='cuda:1'), covar=tensor([0.1922, 0.1314, 0.0498, 0.1299, 0.3019, 0.0457, 0.2321, 0.1586], + device='cuda:1'), in_proj_covar=tensor([0.0178, 0.0184, 0.0117, 0.0217, 0.0262, 0.0123, 0.0165, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 20:54:44,136 INFO [train.py:901] (1/4) Epoch 18, batch 750, loss[loss=0.2083, simple_loss=0.297, pruned_loss=0.05987, over 8109.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2925, pruned_loss=0.06556, over 1578503.89 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:53,226 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.332e+02 3.041e+02 3.730e+02 6.216e+02, threshold=6.081e+02, percent-clipped=3.0 +2023-02-06 20:54:58,171 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 20:55:08,082 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 20:55:19,816 INFO [train.py:901] (1/4) Epoch 18, batch 800, loss[loss=0.2109, simple_loss=0.296, pruned_loss=0.06293, over 8128.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2923, pruned_loss=0.06535, over 1586081.80 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:55:49,564 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:51,610 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:54,216 INFO [train.py:901] (1/4) Epoch 18, batch 850, loss[loss=0.227, simple_loss=0.3077, pruned_loss=0.07316, over 8448.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2931, pruned_loss=0.06652, over 1591201.25 frames. ], batch size: 27, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:03,044 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.308e+02 2.906e+02 3.562e+02 8.427e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 20:56:13,564 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:56:30,845 INFO [train.py:901] (1/4) Epoch 18, batch 900, loss[loss=0.2418, simple_loss=0.3133, pruned_loss=0.08518, over 7660.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.294, pruned_loss=0.06649, over 1598393.20 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:35,140 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1105, 1.7289, 2.1578, 1.7869, 1.3351, 1.7187, 2.3923, 2.4754], + device='cuda:1'), covar=tensor([0.0394, 0.1184, 0.1534, 0.1293, 0.0518, 0.1386, 0.0557, 0.0496], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 20:56:50,623 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:05,377 INFO [train.py:901] (1/4) Epoch 18, batch 950, loss[loss=0.174, simple_loss=0.2545, pruned_loss=0.04675, over 7407.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2929, pruned_loss=0.06591, over 1600596.86 frames. ], batch size: 17, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:10,959 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:13,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:14,180 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.531e+02 3.020e+02 3.937e+02 8.991e+02, threshold=6.039e+02, percent-clipped=7.0 +2023-02-06 20:57:14,402 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8716, 2.1046, 1.8513, 2.7568, 1.2473, 1.6140, 1.9767, 2.3395], + device='cuda:1'), covar=tensor([0.0751, 0.0748, 0.0862, 0.0314, 0.1044, 0.1254, 0.0840, 0.0667], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0200, 0.0251, 0.0212, 0.0207, 0.0249, 0.0253, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 20:57:29,241 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 20:57:40,381 INFO [train.py:901] (1/4) Epoch 18, batch 1000, loss[loss=0.2275, simple_loss=0.3242, pruned_loss=0.06536, over 8454.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2936, pruned_loss=0.06663, over 1604917.76 frames. ], batch size: 27, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:56,198 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5944, 1.7965, 2.6833, 1.4096, 1.8338, 1.9201, 1.6272, 1.7620], + device='cuda:1'), covar=tensor([0.1930, 0.2695, 0.0930, 0.4278, 0.1980, 0.3145, 0.2194, 0.2348], + device='cuda:1'), in_proj_covar=tensor([0.0511, 0.0574, 0.0544, 0.0618, 0.0636, 0.0577, 0.0510, 0.0624], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:58:05,490 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 20:58:16,697 INFO [train.py:901] (1/4) Epoch 18, batch 1050, loss[loss=0.2111, simple_loss=0.296, pruned_loss=0.06307, over 8134.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2923, pruned_loss=0.06589, over 1611140.96 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:58:18,829 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 20:58:25,548 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.454e+02 3.228e+02 4.133e+02 8.765e+02, threshold=6.456e+02, percent-clipped=4.0 +2023-02-06 20:58:51,059 INFO [train.py:901] (1/4) Epoch 18, batch 1100, loss[loss=0.2411, simple_loss=0.3224, pruned_loss=0.07992, over 8498.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2923, pruned_loss=0.06562, over 1613509.51 frames. ], batch size: 28, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:58:56,193 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2339, 3.1474, 2.9574, 1.7020, 2.8437, 2.8779, 2.8640, 2.7454], + device='cuda:1'), covar=tensor([0.1221, 0.0898, 0.1345, 0.4465, 0.1104, 0.1369, 0.1724, 0.1083], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0419, 0.0419, 0.0517, 0.0409, 0.0422, 0.0407, 0.0365], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 20:59:26,911 INFO [train.py:901] (1/4) Epoch 18, batch 1150, loss[loss=0.1903, simple_loss=0.2631, pruned_loss=0.05877, over 7807.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2923, pruned_loss=0.06563, over 1613804.06 frames. ], batch size: 20, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:59:29,609 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 20:59:35,880 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 2.366e+02 2.909e+02 3.553e+02 5.350e+02, threshold=5.817e+02, percent-clipped=0.0 +2023-02-06 20:59:44,953 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5003, 1.8994, 2.9489, 1.2515, 2.1482, 1.7246, 1.5903, 2.0681], + device='cuda:1'), covar=tensor([0.2161, 0.2716, 0.0920, 0.4962, 0.2110, 0.3697, 0.2472, 0.2691], + device='cuda:1'), in_proj_covar=tensor([0.0516, 0.0577, 0.0550, 0.0625, 0.0642, 0.0583, 0.0515, 0.0630], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:00:02,034 INFO [train.py:901] (1/4) Epoch 18, batch 1200, loss[loss=0.1843, simple_loss=0.2631, pruned_loss=0.05272, over 7652.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2915, pruned_loss=0.06563, over 1611770.62 frames. ], batch size: 19, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:04,277 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2322, 1.2350, 3.3744, 1.0811, 2.9485, 2.8368, 3.0776, 2.9875], + device='cuda:1'), covar=tensor([0.0905, 0.4161, 0.0762, 0.4109, 0.1468, 0.1022, 0.0795, 0.0932], + device='cuda:1'), in_proj_covar=tensor([0.0582, 0.0618, 0.0662, 0.0591, 0.0674, 0.0577, 0.0569, 0.0642], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:00:11,841 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:13,947 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:16,608 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:29,704 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:31,811 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:37,163 INFO [train.py:901] (1/4) Epoch 18, batch 1250, loss[loss=0.2113, simple_loss=0.2864, pruned_loss=0.06811, over 8086.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2935, pruned_loss=0.06628, over 1617358.92 frames. ], batch size: 21, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:46,118 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4074, 4.3864, 4.0423, 1.9004, 3.9950, 4.0225, 4.0080, 3.8372], + device='cuda:1'), covar=tensor([0.0785, 0.0569, 0.1037, 0.4669, 0.0783, 0.0967, 0.1307, 0.0683], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0420, 0.0421, 0.0521, 0.0412, 0.0424, 0.0409, 0.0367], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:00:47,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.310e+02 2.834e+02 3.613e+02 5.274e+02, threshold=5.668e+02, percent-clipped=0.0 +2023-02-06 21:00:54,239 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:04,836 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:08,259 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:12,852 INFO [train.py:901] (1/4) Epoch 18, batch 1300, loss[loss=0.1673, simple_loss=0.2453, pruned_loss=0.04463, over 7807.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2933, pruned_loss=0.06583, over 1621625.97 frames. ], batch size: 20, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:15,828 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:34,961 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4094, 1.6681, 2.7252, 1.2614, 1.9421, 1.7713, 1.4283, 1.9075], + device='cuda:1'), covar=tensor([0.1819, 0.2327, 0.0859, 0.4357, 0.1778, 0.3156, 0.2275, 0.2107], + device='cuda:1'), in_proj_covar=tensor([0.0514, 0.0577, 0.0549, 0.0624, 0.0643, 0.0583, 0.0513, 0.0630], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:01:37,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:38,939 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8228, 1.9794, 1.7562, 2.5818, 1.1232, 1.5067, 1.7309, 2.1446], + device='cuda:1'), covar=tensor([0.0785, 0.0849, 0.0964, 0.0380, 0.1138, 0.1413, 0.0973, 0.0771], + device='cuda:1'), in_proj_covar=tensor([0.0236, 0.0202, 0.0254, 0.0214, 0.0210, 0.0251, 0.0256, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 21:01:47,850 INFO [train.py:901] (1/4) Epoch 18, batch 1350, loss[loss=0.1948, simple_loss=0.282, pruned_loss=0.05384, over 8066.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2928, pruned_loss=0.06534, over 1625857.99 frames. ], batch size: 21, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:56,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.404e+02 2.906e+02 3.545e+02 6.613e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 21:02:15,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:02:23,342 INFO [train.py:901] (1/4) Epoch 18, batch 1400, loss[loss=0.238, simple_loss=0.3013, pruned_loss=0.08738, over 7800.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2942, pruned_loss=0.06654, over 1622195.42 frames. ], batch size: 19, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:02:43,933 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 21:02:57,619 INFO [train.py:901] (1/4) Epoch 18, batch 1450, loss[loss=0.2105, simple_loss=0.3032, pruned_loss=0.0589, over 8450.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2929, pruned_loss=0.06591, over 1619762.10 frames. ], batch size: 27, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:06,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.491e+02 3.050e+02 4.246e+02 7.467e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-06 21:03:07,085 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 21:03:33,396 INFO [train.py:901] (1/4) Epoch 18, batch 1500, loss[loss=0.192, simple_loss=0.2719, pruned_loss=0.05604, over 8085.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2938, pruned_loss=0.06603, over 1620799.83 frames. ], batch size: 21, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:33,624 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7847, 2.1002, 2.2620, 1.3604, 2.3342, 1.5929, 0.7063, 1.9320], + device='cuda:1'), covar=tensor([0.0563, 0.0314, 0.0232, 0.0544, 0.0343, 0.0787, 0.0786, 0.0305], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0373, 0.0321, 0.0428, 0.0358, 0.0514, 0.0379, 0.0400], + device='cuda:1'), out_proj_covar=tensor([1.1840e-04, 9.9095e-05, 8.5226e-05, 1.1405e-04, 9.5537e-05, 1.4746e-04, + 1.0318e-04, 1.0699e-04], device='cuda:1') +2023-02-06 21:03:40,046 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4857, 2.0078, 3.1487, 1.2282, 2.3266, 1.8155, 1.5908, 2.2617], + device='cuda:1'), covar=tensor([0.2104, 0.2380, 0.0842, 0.4784, 0.1921, 0.3311, 0.2340, 0.2448], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0577, 0.0549, 0.0622, 0.0643, 0.0581, 0.0512, 0.0630], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:03:40,186 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 21:03:44,796 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:01,170 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2666, 1.3226, 1.5675, 1.2543, 0.7400, 1.3568, 1.2062, 1.0473], + device='cuda:1'), covar=tensor([0.0542, 0.1256, 0.1592, 0.1441, 0.0553, 0.1482, 0.0697, 0.0701], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 21:04:07,342 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9989, 2.6740, 2.1405, 2.3283, 2.3287, 2.0455, 2.1873, 2.4054], + device='cuda:1'), covar=tensor([0.1075, 0.0323, 0.0879, 0.0573, 0.0585, 0.1049, 0.0746, 0.0809], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0236, 0.0328, 0.0303, 0.0298, 0.0330, 0.0342, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 21:04:08,396 INFO [train.py:901] (1/4) Epoch 18, batch 1550, loss[loss=0.2184, simple_loss=0.2837, pruned_loss=0.07649, over 7784.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2945, pruned_loss=0.06693, over 1614682.59 frames. ], batch size: 19, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:17,342 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.366e+02 2.933e+02 3.736e+02 6.367e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-06 21:04:38,172 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:43,436 INFO [train.py:901] (1/4) Epoch 18, batch 1600, loss[loss=0.1732, simple_loss=0.2592, pruned_loss=0.04359, over 8242.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2944, pruned_loss=0.06658, over 1616497.05 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:56,940 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:07,021 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:10,356 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:15,374 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0244, 2.4103, 3.5929, 1.9825, 1.8614, 3.5356, 0.6786, 2.1732], + device='cuda:1'), covar=tensor([0.1484, 0.1407, 0.0240, 0.1881, 0.3048, 0.0424, 0.2480, 0.1593], + device='cuda:1'), in_proj_covar=tensor([0.0180, 0.0188, 0.0118, 0.0217, 0.0264, 0.0126, 0.0164, 0.0184], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 21:05:15,387 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:18,035 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:20,007 INFO [train.py:901] (1/4) Epoch 18, batch 1650, loss[loss=0.1954, simple_loss=0.2696, pruned_loss=0.06053, over 7275.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2932, pruned_loss=0.06593, over 1614506.71 frames. ], batch size: 16, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:05:28,837 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.375e+02 2.907e+02 3.508e+02 7.626e+02, threshold=5.813e+02, percent-clipped=3.0 +2023-02-06 21:05:33,239 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:41,376 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 21:05:42,208 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 21:05:54,415 INFO [train.py:901] (1/4) Epoch 18, batch 1700, loss[loss=0.186, simple_loss=0.2619, pruned_loss=0.05506, over 8091.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2917, pruned_loss=0.06512, over 1613547.47 frames. ], batch size: 21, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:06:02,371 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0236, 1.9034, 3.2985, 1.6437, 2.4358, 3.6490, 3.6179, 3.2045], + device='cuda:1'), covar=tensor([0.1070, 0.1558, 0.0373, 0.1930, 0.1176, 0.0203, 0.0565, 0.0486], + device='cuda:1'), in_proj_covar=tensor([0.0286, 0.0318, 0.0279, 0.0310, 0.0301, 0.0257, 0.0399, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 21:06:28,686 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:29,301 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1976, 4.1648, 3.7314, 1.8476, 3.6732, 3.7571, 3.8454, 3.4514], + device='cuda:1'), covar=tensor([0.0742, 0.0527, 0.1064, 0.4764, 0.0923, 0.0897, 0.1210, 0.0848], + device='cuda:1'), in_proj_covar=tensor([0.0505, 0.0414, 0.0417, 0.0521, 0.0412, 0.0423, 0.0408, 0.0364], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:06:30,624 INFO [train.py:901] (1/4) Epoch 18, batch 1750, loss[loss=0.1828, simple_loss=0.2612, pruned_loss=0.05219, over 7794.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2919, pruned_loss=0.06548, over 1615111.44 frames. ], batch size: 20, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:06:32,186 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:39,595 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.310e+02 2.913e+02 3.912e+02 7.750e+02, threshold=5.826e+02, percent-clipped=6.0 +2023-02-06 21:06:39,810 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:05,729 INFO [train.py:901] (1/4) Epoch 18, batch 1800, loss[loss=0.208, simple_loss=0.3043, pruned_loss=0.05583, over 8333.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2918, pruned_loss=0.06538, over 1614522.44 frames. ], batch size: 25, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:37,621 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:40,849 INFO [train.py:901] (1/4) Epoch 18, batch 1850, loss[loss=0.2132, simple_loss=0.2816, pruned_loss=0.07235, over 7644.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.06539, over 1614150.85 frames. ], batch size: 19, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:49,485 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:49,612 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6945, 1.6586, 2.2169, 1.4416, 1.2312, 2.2730, 0.2929, 1.3461], + device='cuda:1'), covar=tensor([0.1749, 0.1313, 0.0342, 0.1477, 0.3201, 0.0409, 0.2700, 0.1512], + device='cuda:1'), in_proj_covar=tensor([0.0179, 0.0187, 0.0117, 0.0216, 0.0261, 0.0125, 0.0164, 0.0183], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 21:07:51,404 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.274e+02 2.776e+02 3.369e+02 8.658e+02, threshold=5.552e+02, percent-clipped=2.0 +2023-02-06 21:08:05,265 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:14,572 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:17,142 INFO [train.py:901] (1/4) Epoch 18, batch 1900, loss[loss=0.2174, simple_loss=0.3076, pruned_loss=0.06365, over 8464.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2931, pruned_loss=0.06607, over 1615083.69 frames. ], batch size: 27, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:29,929 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 21:08:52,414 INFO [train.py:901] (1/4) Epoch 18, batch 1950, loss[loss=0.2083, simple_loss=0.2946, pruned_loss=0.06098, over 8239.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2942, pruned_loss=0.06635, over 1618473.05 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:55,254 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 21:09:01,254 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.421e+02 2.964e+02 3.877e+02 7.962e+02, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 21:09:08,113 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 21:09:11,170 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:28,238 INFO [train.py:901] (1/4) Epoch 18, batch 2000, loss[loss=0.2336, simple_loss=0.3162, pruned_loss=0.07556, over 8522.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2948, pruned_loss=0.06672, over 1617046.66 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:09:28,243 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 21:09:30,565 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:33,881 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139419.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:42,124 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139430.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:48,227 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:49,574 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1076, 2.2850, 1.9684, 2.9241, 1.2883, 1.6494, 1.9405, 2.2873], + device='cuda:1'), covar=tensor([0.0748, 0.0824, 0.0952, 0.0376, 0.1182, 0.1357, 0.0942, 0.0811], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0200, 0.0250, 0.0213, 0.0207, 0.0249, 0.0255, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 21:09:51,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139444.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:59,137 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:10:02,970 INFO [train.py:901] (1/4) Epoch 18, batch 2050, loss[loss=0.2279, simple_loss=0.3138, pruned_loss=0.07098, over 8322.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2933, pruned_loss=0.06617, over 1611838.47 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:12,679 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.515e+02 3.080e+02 3.592e+02 7.733e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 21:10:39,819 INFO [train.py:901] (1/4) Epoch 18, batch 2100, loss[loss=0.2471, simple_loss=0.3086, pruned_loss=0.0928, over 7973.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.294, pruned_loss=0.06638, over 1617304.82 frames. ], batch size: 21, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:15,316 INFO [train.py:901] (1/4) Epoch 18, batch 2150, loss[loss=0.1958, simple_loss=0.2892, pruned_loss=0.05116, over 8239.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2943, pruned_loss=0.06643, over 1618343.90 frames. ], batch size: 24, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:24,961 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.487e+02 3.024e+02 3.808e+02 9.008e+02, threshold=6.048e+02, percent-clipped=4.0 +2023-02-06 21:11:34,718 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,090 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,167 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:50,324 INFO [train.py:901] (1/4) Epoch 18, batch 2200, loss[loss=0.2311, simple_loss=0.307, pruned_loss=0.0776, over 8106.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2932, pruned_loss=0.06631, over 1617422.26 frames. ], batch size: 23, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:10,495 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:13,211 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,372 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,555 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5667, 2.0210, 3.2822, 1.2714, 2.3937, 2.0617, 1.6269, 2.4425], + device='cuda:1'), covar=tensor([0.1939, 0.2583, 0.0805, 0.4749, 0.1887, 0.3138, 0.2265, 0.2300], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0577, 0.0554, 0.0622, 0.0640, 0.0577, 0.0513, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:12:26,631 INFO [train.py:901] (1/4) Epoch 18, batch 2250, loss[loss=0.2739, simple_loss=0.3396, pruned_loss=0.1041, over 8131.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2934, pruned_loss=0.06659, over 1617448.88 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:31,128 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:36,169 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.519e+02 3.270e+02 4.475e+02 8.912e+02, threshold=6.540e+02, percent-clipped=11.0 +2023-02-06 21:13:01,638 INFO [train.py:901] (1/4) Epoch 18, batch 2300, loss[loss=0.246, simple_loss=0.3261, pruned_loss=0.0829, over 8496.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2942, pruned_loss=0.06717, over 1620789.93 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:04,658 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:32,010 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:37,197 INFO [train.py:901] (1/4) Epoch 18, batch 2350, loss[loss=0.2181, simple_loss=0.3032, pruned_loss=0.06653, over 8653.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2939, pruned_loss=0.0671, over 1618877.92 frames. ], batch size: 34, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:40,613 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:44,706 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2396, 1.2789, 3.3678, 1.1185, 2.9545, 2.8089, 3.0461, 2.9663], + device='cuda:1'), covar=tensor([0.0835, 0.4435, 0.0828, 0.4123, 0.1466, 0.1161, 0.0849, 0.0942], + device='cuda:1'), in_proj_covar=tensor([0.0585, 0.0618, 0.0668, 0.0594, 0.0673, 0.0578, 0.0574, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:13:47,225 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.439e+02 2.945e+02 3.859e+02 6.515e+02, threshold=5.891e+02, percent-clipped=0.0 +2023-02-06 21:13:49,999 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9913, 1.5362, 3.5574, 1.5514, 2.3382, 3.9549, 3.9766, 3.4002], + device='cuda:1'), covar=tensor([0.1084, 0.1738, 0.0295, 0.1956, 0.1077, 0.0188, 0.0391, 0.0491], + device='cuda:1'), in_proj_covar=tensor([0.0283, 0.0314, 0.0274, 0.0306, 0.0295, 0.0254, 0.0394, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 21:13:51,083 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 21:13:55,510 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:09,008 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:11,589 INFO [train.py:901] (1/4) Epoch 18, batch 2400, loss[loss=0.2219, simple_loss=0.2969, pruned_loss=0.07349, over 7926.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2948, pruned_loss=0.06788, over 1621725.87 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:20,250 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:21,630 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:48,489 INFO [train.py:901] (1/4) Epoch 18, batch 2450, loss[loss=0.2152, simple_loss=0.2884, pruned_loss=0.07099, over 7194.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2946, pruned_loss=0.06806, over 1612498.05 frames. ], batch size: 72, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:53,470 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:58,171 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.359e+02 2.854e+02 3.442e+02 8.627e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-06 21:15:23,626 INFO [train.py:901] (1/4) Epoch 18, batch 2500, loss[loss=0.1832, simple_loss=0.262, pruned_loss=0.05223, over 7441.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2953, pruned_loss=0.06849, over 1608251.55 frames. ], batch size: 17, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:15:39,669 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:15:46,142 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-06 21:15:47,235 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:00,244 INFO [train.py:901] (1/4) Epoch 18, batch 2550, loss[loss=0.1925, simple_loss=0.273, pruned_loss=0.056, over 8856.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2942, pruned_loss=0.06782, over 1614217.92 frames. ], batch size: 40, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:07,327 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:09,813 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.329e+02 2.906e+02 3.594e+02 7.294e+02, threshold=5.811e+02, percent-clipped=3.0 +2023-02-06 21:16:25,058 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:32,756 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6418, 1.9540, 2.0089, 1.2935, 2.1444, 1.4481, 0.6286, 1.9421], + device='cuda:1'), covar=tensor([0.0398, 0.0239, 0.0174, 0.0420, 0.0238, 0.0628, 0.0641, 0.0195], + device='cuda:1'), in_proj_covar=tensor([0.0430, 0.0371, 0.0321, 0.0426, 0.0357, 0.0517, 0.0378, 0.0398], + device='cuda:1'), out_proj_covar=tensor([1.1712e-04, 9.8174e-05, 8.5056e-05, 1.1353e-04, 9.5178e-05, 1.4837e-04, + 1.0278e-04, 1.0658e-04], device='cuda:1') +2023-02-06 21:16:35,505 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:36,614 INFO [train.py:901] (1/4) Epoch 18, batch 2600, loss[loss=0.2534, simple_loss=0.324, pruned_loss=0.09144, over 8563.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2942, pruned_loss=0.06773, over 1617019.59 frames. ], batch size: 31, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:40,965 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0812, 1.3201, 4.3266, 1.5772, 3.7512, 3.5534, 3.8987, 3.7574], + device='cuda:1'), covar=tensor([0.0730, 0.4701, 0.0516, 0.3957, 0.1268, 0.0918, 0.0640, 0.0768], + device='cuda:1'), in_proj_covar=tensor([0.0585, 0.0619, 0.0668, 0.0595, 0.0674, 0.0578, 0.0574, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:16:44,583 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:44,601 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5184, 1.7822, 2.5707, 1.3756, 1.9956, 1.8255, 1.5965, 1.8878], + device='cuda:1'), covar=tensor([0.1617, 0.2168, 0.0775, 0.3928, 0.1580, 0.2767, 0.1964, 0.2049], + device='cuda:1'), in_proj_covar=tensor([0.0511, 0.0575, 0.0552, 0.0620, 0.0637, 0.0578, 0.0513, 0.0626], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:16:52,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:01,563 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:03,000 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:05,689 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0682, 2.3633, 1.8822, 2.8098, 1.3726, 1.5413, 1.9932, 2.3027], + device='cuda:1'), covar=tensor([0.0719, 0.0675, 0.0935, 0.0368, 0.1104, 0.1359, 0.0944, 0.0773], + device='cuda:1'), in_proj_covar=tensor([0.0238, 0.0202, 0.0254, 0.0215, 0.0209, 0.0253, 0.0258, 0.0214], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 21:17:10,537 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:11,787 INFO [train.py:901] (1/4) Epoch 18, batch 2650, loss[loss=0.1734, simple_loss=0.269, pruned_loss=0.03896, over 7794.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2957, pruned_loss=0.06804, over 1619413.06 frames. ], batch size: 19, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:22,343 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 2.973e+02 3.666e+02 6.732e+02, threshold=5.945e+02, percent-clipped=3.0 +2023-02-06 21:17:47,905 INFO [train.py:901] (1/4) Epoch 18, batch 2700, loss[loss=0.1946, simple_loss=0.2837, pruned_loss=0.05278, over 8290.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2958, pruned_loss=0.06837, over 1617639.23 frames. ], batch size: 23, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:55,831 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:00,555 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:02,538 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:16,408 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:23,215 INFO [train.py:901] (1/4) Epoch 18, batch 2750, loss[loss=0.2009, simple_loss=0.2799, pruned_loss=0.06093, over 8441.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2954, pruned_loss=0.06817, over 1618762.71 frames. ], batch size: 49, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:18:27,487 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:28,872 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:33,791 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.336e+02 2.919e+02 3.807e+02 8.313e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-06 21:19:00,768 INFO [train.py:901] (1/4) Epoch 18, batch 2800, loss[loss=0.1941, simple_loss=0.271, pruned_loss=0.05864, over 8242.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.295, pruned_loss=0.06799, over 1614926.29 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:01,489 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:02,304 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1154, 1.8171, 2.3882, 2.0103, 2.2650, 2.0942, 1.8349, 1.0497], + device='cuda:1'), covar=tensor([0.4626, 0.4335, 0.1618, 0.3178, 0.2113, 0.2795, 0.1888, 0.4823], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0949, 0.0785, 0.0914, 0.0981, 0.0867, 0.0732, 0.0812], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:19:25,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:32,578 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1951, 1.4810, 4.3476, 1.7100, 3.8906, 3.6493, 3.9114, 3.7689], + device='cuda:1'), covar=tensor([0.0524, 0.4493, 0.0520, 0.3556, 0.1075, 0.0873, 0.0587, 0.0667], + device='cuda:1'), in_proj_covar=tensor([0.0585, 0.0618, 0.0664, 0.0594, 0.0675, 0.0579, 0.0574, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:19:34,250 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-06 21:19:35,894 INFO [train.py:901] (1/4) Epoch 18, batch 2850, loss[loss=0.2141, simple_loss=0.2797, pruned_loss=0.07423, over 7548.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2944, pruned_loss=0.06728, over 1615277.77 frames. ], batch size: 18, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:39,558 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140266.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:45,685 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.447e+02 2.919e+02 3.574e+02 5.806e+02, threshold=5.838e+02, percent-clipped=0.0 +2023-02-06 21:19:47,302 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:50,899 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:52,284 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:53,087 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7514, 2.0760, 2.1917, 1.2637, 2.2486, 1.6164, 0.6628, 1.9176], + device='cuda:1'), covar=tensor([0.0556, 0.0355, 0.0278, 0.0573, 0.0384, 0.0854, 0.0827, 0.0301], + device='cuda:1'), in_proj_covar=tensor([0.0432, 0.0373, 0.0321, 0.0430, 0.0359, 0.0519, 0.0380, 0.0400], + device='cuda:1'), out_proj_covar=tensor([1.1765e-04, 9.8841e-05, 8.4977e-05, 1.1459e-04, 9.5747e-05, 1.4865e-04, + 1.0347e-04, 1.0696e-04], device='cuda:1') +2023-02-06 21:20:07,403 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:11,940 INFO [train.py:901] (1/4) Epoch 18, batch 2900, loss[loss=0.2154, simple_loss=0.3056, pruned_loss=0.06257, over 8447.00 frames. ], tot_loss[loss=0.215, simple_loss=0.295, pruned_loss=0.06752, over 1613695.60 frames. ], batch size: 27, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:14,966 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:23,758 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:25,183 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140329.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:32,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:44,375 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 21:20:47,994 INFO [train.py:901] (1/4) Epoch 18, batch 2950, loss[loss=0.2073, simple_loss=0.274, pruned_loss=0.07031, over 7534.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.295, pruned_loss=0.06785, over 1613094.09 frames. ], batch size: 18, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:57,349 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.671e+02 3.280e+02 4.327e+02 7.160e+02, threshold=6.561e+02, percent-clipped=5.0 +2023-02-06 21:21:02,944 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6249, 1.6616, 4.7915, 1.8315, 4.3617, 3.9773, 4.3154, 4.1908], + device='cuda:1'), covar=tensor([0.0491, 0.4076, 0.0417, 0.3496, 0.0885, 0.0893, 0.0511, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0586, 0.0618, 0.0665, 0.0596, 0.0675, 0.0578, 0.0575, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:21:18,951 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:21:23,798 INFO [train.py:901] (1/4) Epoch 18, batch 3000, loss[loss=0.2414, simple_loss=0.3167, pruned_loss=0.08311, over 6229.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2959, pruned_loss=0.06861, over 1611296.01 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:21:23,798 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 21:21:37,690 INFO [train.py:935] (1/4) Epoch 18, validation: loss=0.1773, simple_loss=0.2774, pruned_loss=0.03861, over 944034.00 frames. +2023-02-06 21:21:37,691 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 21:22:14,080 INFO [train.py:901] (1/4) Epoch 18, batch 3050, loss[loss=0.2428, simple_loss=0.3226, pruned_loss=0.08147, over 8490.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2973, pruned_loss=0.06959, over 1616042.32 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:16,898 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:21,665 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:24,219 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.663e+02 3.172e+02 4.119e+02 9.916e+02, threshold=6.345e+02, percent-clipped=7.0 +2023-02-06 21:22:40,219 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1798, 1.6210, 4.3153, 1.9997, 2.3727, 4.9154, 4.9758, 4.2393], + device='cuda:1'), covar=tensor([0.1165, 0.1789, 0.0285, 0.1862, 0.1262, 0.0190, 0.0460, 0.0589], + device='cuda:1'), in_proj_covar=tensor([0.0286, 0.0316, 0.0280, 0.0310, 0.0300, 0.0258, 0.0402, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 21:22:42,890 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140502.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:46,273 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:48,880 INFO [train.py:901] (1/4) Epoch 18, batch 3100, loss[loss=0.1968, simple_loss=0.2974, pruned_loss=0.0481, over 8335.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2982, pruned_loss=0.06972, over 1614084.38 frames. ], batch size: 25, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:56,952 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:00,963 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,872 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 21:23:07,862 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:09,255 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:15,571 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:25,507 INFO [train.py:901] (1/4) Epoch 18, batch 3150, loss[loss=0.2457, simple_loss=0.3269, pruned_loss=0.08225, over 8363.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2984, pruned_loss=0.06958, over 1617750.70 frames. ], batch size: 24, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:23:26,371 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140562.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:27,686 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:34,940 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.438e+02 2.948e+02 4.263e+02 1.019e+03, threshold=5.895e+02, percent-clipped=4.0 +2023-02-06 21:23:38,565 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:40,650 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:43,430 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:59,140 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:01,007 INFO [train.py:901] (1/4) Epoch 18, batch 3200, loss[loss=0.2385, simple_loss=0.3087, pruned_loss=0.0842, over 8460.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2969, pruned_loss=0.06874, over 1617599.34 frames. ], batch size: 27, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:06,670 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2404, 2.7277, 3.0636, 1.5754, 3.3102, 2.1223, 1.4844, 2.3515], + device='cuda:1'), covar=tensor([0.0713, 0.0341, 0.0219, 0.0737, 0.0377, 0.0682, 0.0831, 0.0432], + device='cuda:1'), in_proj_covar=tensor([0.0434, 0.0375, 0.0321, 0.0430, 0.0360, 0.0520, 0.0379, 0.0399], + device='cuda:1'), out_proj_covar=tensor([1.1818e-04, 9.9388e-05, 8.5057e-05, 1.1472e-04, 9.6150e-05, 1.4908e-04, + 1.0321e-04, 1.0654e-04], device='cuda:1') +2023-02-06 21:24:07,873 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:09,395 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4787, 2.6909, 1.9615, 2.3245, 2.4007, 1.6992, 2.2458, 2.2396], + device='cuda:1'), covar=tensor([0.1545, 0.0398, 0.1153, 0.0602, 0.0632, 0.1474, 0.0947, 0.1032], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0233, 0.0323, 0.0302, 0.0296, 0.0330, 0.0339, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 21:24:30,744 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3905, 1.6955, 1.7062, 1.0193, 1.7039, 1.3340, 0.2663, 1.6036], + device='cuda:1'), covar=tensor([0.0436, 0.0346, 0.0260, 0.0423, 0.0421, 0.0817, 0.0772, 0.0260], + device='cuda:1'), in_proj_covar=tensor([0.0437, 0.0377, 0.0322, 0.0432, 0.0361, 0.0522, 0.0380, 0.0400], + device='cuda:1'), out_proj_covar=tensor([1.1879e-04, 9.9887e-05, 8.5326e-05, 1.1498e-04, 9.6401e-05, 1.4973e-04, + 1.0349e-04, 1.0701e-04], device='cuda:1') +2023-02-06 21:24:36,814 INFO [train.py:901] (1/4) Epoch 18, batch 3250, loss[loss=0.2099, simple_loss=0.2801, pruned_loss=0.06982, over 7651.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2956, pruned_loss=0.06798, over 1614317.87 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:46,453 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.256e+02 2.889e+02 3.448e+02 6.536e+02, threshold=5.777e+02, percent-clipped=1.0 +2023-02-06 21:25:13,081 INFO [train.py:901] (1/4) Epoch 18, batch 3300, loss[loss=0.2453, simple_loss=0.3208, pruned_loss=0.08485, over 8034.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06793, over 1614317.86 frames. ], batch size: 22, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:22,137 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1894, 1.3687, 1.7190, 1.3400, 0.6576, 1.4536, 1.2174, 1.0984], + device='cuda:1'), covar=tensor([0.0559, 0.1189, 0.1595, 0.1344, 0.0557, 0.1397, 0.0667, 0.0647], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 21:25:30,570 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:33,922 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:39,313 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:47,455 INFO [train.py:901] (1/4) Epoch 18, batch 3350, loss[loss=0.244, simple_loss=0.3144, pruned_loss=0.08675, over 8697.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2958, pruned_loss=0.06842, over 1615035.10 frames. ], batch size: 34, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:57,593 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.504e+02 2.969e+02 3.727e+02 7.020e+02, threshold=5.938e+02, percent-clipped=2.0 +2023-02-06 21:26:02,376 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 21:26:23,938 INFO [train.py:901] (1/4) Epoch 18, batch 3400, loss[loss=0.2075, simple_loss=0.2811, pruned_loss=0.06696, over 8086.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2954, pruned_loss=0.06803, over 1615548.73 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:35,805 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:42,133 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:46,749 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:52,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:58,805 INFO [train.py:901] (1/4) Epoch 18, batch 3450, loss[loss=0.2108, simple_loss=0.2804, pruned_loss=0.0706, over 7706.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2953, pruned_loss=0.06791, over 1611206.96 frames. ], batch size: 18, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:59,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:01,046 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:03,745 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:05,719 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:08,266 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.419e+02 3.065e+02 3.703e+02 6.567e+02, threshold=6.131e+02, percent-clipped=3.0 +2023-02-06 21:27:34,146 INFO [train.py:901] (1/4) Epoch 18, batch 3500, loss[loss=0.1851, simple_loss=0.2489, pruned_loss=0.06069, over 7818.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2957, pruned_loss=0.06804, over 1611794.86 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:27:51,077 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 21:27:51,205 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:09,212 INFO [train.py:901] (1/4) Epoch 18, batch 3550, loss[loss=0.2252, simple_loss=0.2933, pruned_loss=0.07852, over 7925.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.296, pruned_loss=0.06852, over 1611770.57 frames. ], batch size: 20, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:12,011 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:12,766 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140966.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:18,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.456e+02 3.083e+02 3.681e+02 6.081e+02, threshold=6.167e+02, percent-clipped=0.0 +2023-02-06 21:28:26,454 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:30,648 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:44,277 INFO [train.py:901] (1/4) Epoch 18, batch 3600, loss[loss=0.2158, simple_loss=0.2946, pruned_loss=0.0685, over 8076.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2951, pruned_loss=0.06822, over 1610121.70 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:49,258 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:20,372 INFO [train.py:901] (1/4) Epoch 18, batch 3650, loss[loss=0.2454, simple_loss=0.3073, pruned_loss=0.09173, over 7794.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2943, pruned_loss=0.06783, over 1609832.84 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:30,812 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.345e+02 2.956e+02 3.633e+02 6.454e+02, threshold=5.912e+02, percent-clipped=1.0 +2023-02-06 21:29:37,716 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:55,728 INFO [train.py:901] (1/4) Epoch 18, batch 3700, loss[loss=0.2894, simple_loss=0.3613, pruned_loss=0.1087, over 7484.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.293, pruned_loss=0.06738, over 1608814.36 frames. ], batch size: 72, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:57,129 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 21:30:02,962 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:20,682 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:32,113 INFO [train.py:901] (1/4) Epoch 18, batch 3750, loss[loss=0.1871, simple_loss=0.2772, pruned_loss=0.04845, over 8497.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2937, pruned_loss=0.06737, over 1609613.28 frames. ], batch size: 29, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:30:32,261 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:39,107 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:41,863 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.679e+02 3.309e+02 4.099e+02 7.455e+02, threshold=6.618e+02, percent-clipped=7.0 +2023-02-06 21:31:00,278 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:07,419 INFO [train.py:901] (1/4) Epoch 18, batch 3800, loss[loss=0.2205, simple_loss=0.2866, pruned_loss=0.07719, over 8086.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2936, pruned_loss=0.06729, over 1610525.20 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:15,006 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:29,273 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:32,639 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:42,609 INFO [train.py:901] (1/4) Epoch 18, batch 3850, loss[loss=0.2336, simple_loss=0.323, pruned_loss=0.07215, over 8197.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2938, pruned_loss=0.06711, over 1610489.53 frames. ], batch size: 23, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:46,905 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:52,713 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.500e+02 3.018e+02 3.684e+02 7.912e+02, threshold=6.036e+02, percent-clipped=1.0 +2023-02-06 21:31:55,474 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:00,514 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:03,897 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 21:32:17,056 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141309.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:18,347 INFO [train.py:901] (1/4) Epoch 18, batch 3900, loss[loss=0.1882, simple_loss=0.2655, pruned_loss=0.05546, over 7247.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2942, pruned_loss=0.06725, over 1611517.99 frames. ], batch size: 16, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:32:42,386 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:51,252 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1202, 1.3126, 3.2207, 1.0382, 2.8435, 2.6784, 2.9370, 2.8874], + device='cuda:1'), covar=tensor([0.0810, 0.3881, 0.0855, 0.4072, 0.1370, 0.1155, 0.0764, 0.0840], + device='cuda:1'), in_proj_covar=tensor([0.0589, 0.0620, 0.0666, 0.0597, 0.0681, 0.0581, 0.0573, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:32:52,480 INFO [train.py:901] (1/4) Epoch 18, batch 3950, loss[loss=0.2231, simple_loss=0.3041, pruned_loss=0.07104, over 8075.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2937, pruned_loss=0.06719, over 1606190.46 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:33:02,714 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.421e+02 2.990e+02 3.795e+02 7.053e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 21:33:08,820 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9702, 2.5319, 3.5829, 1.6141, 1.9347, 3.6481, 0.6217, 2.1612], + device='cuda:1'), covar=tensor([0.1430, 0.1268, 0.0351, 0.2314, 0.2845, 0.0240, 0.2653, 0.1525], + device='cuda:1'), in_proj_covar=tensor([0.0180, 0.0189, 0.0120, 0.0217, 0.0261, 0.0128, 0.0164, 0.0183], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 21:33:15,883 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:27,610 INFO [train.py:901] (1/4) Epoch 18, batch 4000, loss[loss=0.228, simple_loss=0.3115, pruned_loss=0.07226, over 8453.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2949, pruned_loss=0.0675, over 1610934.88 frames. ], batch size: 27, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:33:34,617 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4184, 1.3628, 2.7690, 1.3781, 2.0193, 2.9835, 3.1409, 2.4647], + device='cuda:1'), covar=tensor([0.1352, 0.1725, 0.0478, 0.2147, 0.1001, 0.0376, 0.0550, 0.0733], + device='cuda:1'), in_proj_covar=tensor([0.0282, 0.0310, 0.0274, 0.0305, 0.0294, 0.0253, 0.0393, 0.0295], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 21:33:37,281 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:44,494 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141435.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:33:53,880 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9182, 1.8412, 3.5207, 1.4809, 2.3729, 3.9395, 3.9846, 3.4336], + device='cuda:1'), covar=tensor([0.1146, 0.1510, 0.0330, 0.1933, 0.1012, 0.0191, 0.0360, 0.0491], + device='cuda:1'), in_proj_covar=tensor([0.0283, 0.0311, 0.0274, 0.0305, 0.0295, 0.0253, 0.0394, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 21:33:58,735 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:00,782 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:02,057 INFO [train.py:901] (1/4) Epoch 18, batch 4050, loss[loss=0.2592, simple_loss=0.3355, pruned_loss=0.09149, over 8351.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2959, pruned_loss=0.06782, over 1616670.18 frames. ], batch size: 26, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:12,709 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.514e+02 3.146e+02 4.229e+02 8.641e+02, threshold=6.293e+02, percent-clipped=9.0 +2023-02-06 21:34:16,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:31,499 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8945, 1.7866, 1.9843, 1.8626, 0.9999, 1.7051, 2.3909, 2.4207], + device='cuda:1'), covar=tensor([0.0413, 0.1096, 0.1507, 0.1216, 0.0529, 0.1327, 0.0553, 0.0473], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0157, 0.0099, 0.0162, 0.0114, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 21:34:34,612 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:38,490 INFO [train.py:901] (1/4) Epoch 18, batch 4100, loss[loss=0.2166, simple_loss=0.3002, pruned_loss=0.06645, over 8601.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2956, pruned_loss=0.06736, over 1619586.01 frames. ], batch size: 34, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:00,220 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:13,075 INFO [train.py:901] (1/4) Epoch 18, batch 4150, loss[loss=0.1918, simple_loss=0.2701, pruned_loss=0.0568, over 7977.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2953, pruned_loss=0.06661, over 1619626.44 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:17,351 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141567.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:22,679 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.507e+02 2.964e+02 3.952e+02 7.900e+02, threshold=5.928e+02, percent-clipped=3.0 +2023-02-06 21:35:48,917 INFO [train.py:901] (1/4) Epoch 18, batch 4200, loss[loss=0.1974, simple_loss=0.2742, pruned_loss=0.06023, over 8084.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2943, pruned_loss=0.06624, over 1618830.47 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:55,109 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:02,352 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 21:36:15,991 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:23,890 INFO [train.py:901] (1/4) Epoch 18, batch 4250, loss[loss=0.1932, simple_loss=0.2662, pruned_loss=0.0601, over 7715.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2937, pruned_loss=0.06602, over 1616229.44 frames. ], batch size: 18, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:36:24,588 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 21:36:33,264 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.491e+02 2.994e+02 3.932e+02 8.485e+02, threshold=5.988e+02, percent-clipped=6.0 +2023-02-06 21:36:33,474 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:36,880 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:44,000 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:54,152 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:58,037 INFO [train.py:901] (1/4) Epoch 18, batch 4300, loss[loss=0.2277, simple_loss=0.3085, pruned_loss=0.07348, over 8506.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2942, pruned_loss=0.0661, over 1618267.85 frames. ], batch size: 26, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:01,163 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.47 vs. limit=5.0 +2023-02-06 21:37:32,940 INFO [train.py:901] (1/4) Epoch 18, batch 4350, loss[loss=0.2244, simple_loss=0.3027, pruned_loss=0.073, over 8383.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2937, pruned_loss=0.06619, over 1613702.34 frames. ], batch size: 49, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:43,209 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.620e+02 3.197e+02 4.150e+02 9.266e+02, threshold=6.393e+02, percent-clipped=5.0 +2023-02-06 21:37:46,110 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141779.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:37:54,229 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 21:38:02,428 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:04,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:07,892 INFO [train.py:901] (1/4) Epoch 18, batch 4400, loss[loss=0.2047, simple_loss=0.2855, pruned_loss=0.06193, over 8614.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.294, pruned_loss=0.06635, over 1613107.79 frames. ], batch size: 39, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:36,597 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 21:38:43,844 INFO [train.py:901] (1/4) Epoch 18, batch 4450, loss[loss=0.1732, simple_loss=0.2417, pruned_loss=0.05241, over 7431.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.294, pruned_loss=0.06723, over 1608084.55 frames. ], batch size: 17, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:53,329 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.507e+02 2.868e+02 3.524e+02 7.777e+02, threshold=5.735e+02, percent-clipped=2.0 +2023-02-06 21:38:54,254 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:56,857 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2025, 3.1242, 2.8937, 1.5298, 2.8200, 2.8556, 2.8438, 2.7137], + device='cuda:1'), covar=tensor([0.1293, 0.0980, 0.1411, 0.4931, 0.1184, 0.1394, 0.1751, 0.1125], + device='cuda:1'), in_proj_covar=tensor([0.0508, 0.0423, 0.0422, 0.0522, 0.0414, 0.0420, 0.0405, 0.0369], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:39:07,111 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141894.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:39:11,825 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:18,281 INFO [train.py:901] (1/4) Epoch 18, batch 4500, loss[loss=0.2618, simple_loss=0.3474, pruned_loss=0.08804, over 8436.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2939, pruned_loss=0.06669, over 1610791.79 frames. ], batch size: 27, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:39:23,246 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:26,144 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.00 vs. limit=5.0 +2023-02-06 21:39:27,816 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 21:39:34,712 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:42,681 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:53,392 INFO [train.py:901] (1/4) Epoch 18, batch 4550, loss[loss=0.2389, simple_loss=0.3153, pruned_loss=0.08122, over 8452.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2944, pruned_loss=0.0674, over 1608909.85 frames. ], batch size: 27, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:40:03,507 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.488e+02 2.920e+02 3.454e+02 6.371e+02, threshold=5.840e+02, percent-clipped=2.0 +2023-02-06 21:40:29,735 INFO [train.py:901] (1/4) Epoch 18, batch 4600, loss[loss=0.2156, simple_loss=0.2965, pruned_loss=0.06734, over 8469.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2942, pruned_loss=0.06696, over 1612953.31 frames. ], batch size: 29, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:40:31,401 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1868, 1.8971, 2.6372, 2.2470, 2.5868, 2.1965, 1.9223, 1.3123], + device='cuda:1'), covar=tensor([0.4871, 0.4694, 0.1756, 0.3222, 0.2214, 0.2797, 0.1868, 0.4931], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0946, 0.0783, 0.0911, 0.0982, 0.0863, 0.0729, 0.0810], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:40:59,842 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-06 21:41:04,190 INFO [train.py:901] (1/4) Epoch 18, batch 4650, loss[loss=0.234, simple_loss=0.3114, pruned_loss=0.0783, over 8523.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2938, pruned_loss=0.0666, over 1618720.46 frames. ], batch size: 28, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:05,114 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:06,000 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 21:41:13,898 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.389e+02 2.901e+02 3.503e+02 7.256e+02, threshold=5.801e+02, percent-clipped=3.0 +2023-02-06 21:41:23,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:39,476 INFO [train.py:901] (1/4) Epoch 18, batch 4700, loss[loss=0.1858, simple_loss=0.2819, pruned_loss=0.0449, over 8252.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2948, pruned_loss=0.06652, over 1619325.70 frames. ], batch size: 24, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:06,019 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142150.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:13,365 INFO [train.py:901] (1/4) Epoch 18, batch 4750, loss[loss=0.216, simple_loss=0.2857, pruned_loss=0.07316, over 7247.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2946, pruned_loss=0.06653, over 1618119.65 frames. ], batch size: 16, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:23,433 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:23,892 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.398e+02 2.792e+02 3.541e+02 9.190e+02, threshold=5.585e+02, percent-clipped=4.0 +2023-02-06 21:42:24,096 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142175.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:30,586 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 21:42:32,638 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 21:42:41,673 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:49,355 INFO [train.py:901] (1/4) Epoch 18, batch 4800, loss[loss=0.2196, simple_loss=0.308, pruned_loss=0.06556, over 8480.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2946, pruned_loss=0.06645, over 1618061.27 frames. ], batch size: 27, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:23,853 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 21:43:24,504 INFO [train.py:901] (1/4) Epoch 18, batch 4850, loss[loss=0.1961, simple_loss=0.2849, pruned_loss=0.05364, over 8678.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2944, pruned_loss=0.06642, over 1616960.35 frames. ], batch size: 34, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:33,973 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.522e+02 3.053e+02 3.876e+02 6.315e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-06 21:43:36,094 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:45,102 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:59,068 INFO [train.py:901] (1/4) Epoch 18, batch 4900, loss[loss=0.2361, simple_loss=0.299, pruned_loss=0.08662, over 7974.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2945, pruned_loss=0.06645, over 1617522.54 frames. ], batch size: 21, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:28,577 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1673, 4.1278, 3.8241, 1.9651, 3.7148, 3.7622, 3.7424, 3.5984], + device='cuda:1'), covar=tensor([0.0888, 0.0658, 0.1149, 0.4821, 0.0957, 0.1094, 0.1294, 0.0887], + device='cuda:1'), in_proj_covar=tensor([0.0509, 0.0423, 0.0421, 0.0521, 0.0412, 0.0418, 0.0402, 0.0369], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:44:34,495 INFO [train.py:901] (1/4) Epoch 18, batch 4950, loss[loss=0.2207, simple_loss=0.3143, pruned_loss=0.06356, over 8340.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2947, pruned_loss=0.06668, over 1615213.63 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:43,723 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5911, 1.3239, 4.7366, 1.8137, 4.1764, 3.9786, 4.2899, 4.1509], + device='cuda:1'), covar=tensor([0.0528, 0.4772, 0.0480, 0.3890, 0.1083, 0.0972, 0.0512, 0.0631], + device='cuda:1'), in_proj_covar=tensor([0.0588, 0.0620, 0.0665, 0.0597, 0.0676, 0.0579, 0.0571, 0.0639], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:44:44,958 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.451e+02 2.943e+02 3.789e+02 7.945e+02, threshold=5.886e+02, percent-clipped=1.0 +2023-02-06 21:44:57,222 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:05,906 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:07,281 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6865, 1.6759, 2.2277, 1.5746, 1.1908, 2.2583, 0.3509, 1.3604], + device='cuda:1'), covar=tensor([0.1982, 0.1445, 0.0404, 0.1346, 0.3230, 0.0449, 0.2472, 0.1455], + device='cuda:1'), in_proj_covar=tensor([0.0183, 0.0190, 0.0121, 0.0218, 0.0266, 0.0129, 0.0165, 0.0184], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 21:45:09,741 INFO [train.py:901] (1/4) Epoch 18, batch 5000, loss[loss=0.2378, simple_loss=0.3051, pruned_loss=0.08521, over 7516.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2947, pruned_loss=0.06686, over 1614229.91 frames. ], batch size: 18, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:44,307 INFO [train.py:901] (1/4) Epoch 18, batch 5050, loss[loss=0.2217, simple_loss=0.2996, pruned_loss=0.07192, over 8459.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2954, pruned_loss=0.06685, over 1622744.65 frames. ], batch size: 29, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:54,481 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.384e+02 2.804e+02 3.417e+02 5.925e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 21:46:04,051 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 21:46:06,029 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 21:46:19,171 INFO [train.py:901] (1/4) Epoch 18, batch 5100, loss[loss=0.2177, simple_loss=0.2911, pruned_loss=0.07217, over 8497.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2959, pruned_loss=0.06771, over 1615039.34 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:46:54,248 INFO [train.py:901] (1/4) Epoch 18, batch 5150, loss[loss=0.2382, simple_loss=0.3179, pruned_loss=0.07923, over 8474.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2954, pruned_loss=0.06728, over 1618058.22 frames. ], batch size: 25, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:04,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.481e+02 3.004e+02 4.323e+02 1.197e+03, threshold=6.009e+02, percent-clipped=7.0 +2023-02-06 21:47:07,363 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7001, 2.7427, 2.0161, 2.4940, 2.4084, 1.7952, 2.2809, 2.3468], + device='cuda:1'), covar=tensor([0.1388, 0.0422, 0.1105, 0.0588, 0.0690, 0.1360, 0.0927, 0.0977], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0237, 0.0327, 0.0306, 0.0300, 0.0332, 0.0346, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 21:47:14,692 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:47:29,005 INFO [train.py:901] (1/4) Epoch 18, batch 5200, loss[loss=0.192, simple_loss=0.2711, pruned_loss=0.05641, over 7667.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2952, pruned_loss=0.06721, over 1615244.92 frames. ], batch size: 19, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:37,468 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2639, 1.8622, 2.4046, 2.0066, 2.2459, 2.1995, 1.9606, 1.1297], + device='cuda:1'), covar=tensor([0.4631, 0.4203, 0.1712, 0.3148, 0.2321, 0.2681, 0.1783, 0.4788], + device='cuda:1'), in_proj_covar=tensor([0.0926, 0.0942, 0.0778, 0.0908, 0.0980, 0.0861, 0.0731, 0.0809], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:47:55,319 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:02,984 INFO [train.py:901] (1/4) Epoch 18, batch 5250, loss[loss=0.223, simple_loss=0.3062, pruned_loss=0.06991, over 8327.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2949, pruned_loss=0.06717, over 1615862.92 frames. ], batch size: 25, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:03,213 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:03,671 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 21:48:11,774 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1111, 1.3571, 4.3122, 1.6494, 3.7596, 3.5577, 3.8237, 3.7490], + device='cuda:1'), covar=tensor([0.0660, 0.4763, 0.0509, 0.3948, 0.1185, 0.0967, 0.0712, 0.0670], + device='cuda:1'), in_proj_covar=tensor([0.0589, 0.0618, 0.0666, 0.0594, 0.0674, 0.0579, 0.0572, 0.0639], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:48:13,216 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:14,366 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.491e+02 3.102e+02 3.692e+02 6.533e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 21:48:21,233 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:37,892 INFO [train.py:901] (1/4) Epoch 18, batch 5300, loss[loss=0.2409, simple_loss=0.329, pruned_loss=0.07641, over 8529.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2951, pruned_loss=0.06735, over 1619472.89 frames. ], batch size: 28, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:39,192 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 21:48:52,079 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4705, 1.8990, 2.9290, 1.3197, 2.1770, 1.8502, 1.6085, 2.0700], + device='cuda:1'), covar=tensor([0.2141, 0.2666, 0.0966, 0.5000, 0.1972, 0.3547, 0.2499, 0.2546], + device='cuda:1'), in_proj_covar=tensor([0.0514, 0.0577, 0.0550, 0.0620, 0.0635, 0.0584, 0.0513, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:48:59,489 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:49:12,896 INFO [train.py:901] (1/4) Epoch 18, batch 5350, loss[loss=0.2501, simple_loss=0.3188, pruned_loss=0.09068, over 7287.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2949, pruned_loss=0.06735, over 1618853.87 frames. ], batch size: 73, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:49:22,771 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.581e+02 3.011e+02 3.651e+02 7.168e+02, threshold=6.023e+02, percent-clipped=3.0 +2023-02-06 21:49:48,114 INFO [train.py:901] (1/4) Epoch 18, batch 5400, loss[loss=0.1871, simple_loss=0.2639, pruned_loss=0.05515, over 7440.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2935, pruned_loss=0.06667, over 1611998.49 frames. ], batch size: 17, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:11,410 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5568, 4.5244, 4.1385, 1.9105, 4.0152, 4.1555, 4.0813, 4.0146], + device='cuda:1'), covar=tensor([0.0665, 0.0501, 0.0866, 0.4716, 0.0855, 0.0866, 0.1153, 0.0730], + device='cuda:1'), in_proj_covar=tensor([0.0507, 0.0422, 0.0420, 0.0521, 0.0412, 0.0420, 0.0404, 0.0369], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:50:22,756 INFO [train.py:901] (1/4) Epoch 18, batch 5450, loss[loss=0.216, simple_loss=0.3074, pruned_loss=0.0623, over 8357.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2939, pruned_loss=0.06634, over 1617273.12 frames. ], batch size: 24, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:33,556 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.381e+02 3.003e+02 4.378e+02 7.690e+02, threshold=6.006e+02, percent-clipped=4.0 +2023-02-06 21:50:33,771 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2747, 2.6508, 2.2030, 3.6176, 1.8034, 2.0879, 2.1976, 2.8763], + device='cuda:1'), covar=tensor([0.0713, 0.0761, 0.0881, 0.0339, 0.1052, 0.1138, 0.0995, 0.0708], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0199, 0.0252, 0.0213, 0.0207, 0.0247, 0.0254, 0.0212], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 21:50:44,189 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-06 21:50:44,889 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-06 21:50:50,020 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 21:50:51,828 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 21:50:58,892 INFO [train.py:901] (1/4) Epoch 18, batch 5500, loss[loss=0.209, simple_loss=0.2919, pruned_loss=0.06303, over 8245.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2934, pruned_loss=0.06584, over 1617451.82 frames. ], batch size: 22, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:00,980 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6910, 2.0926, 3.2751, 1.5099, 2.5211, 2.0673, 1.8597, 2.4815], + device='cuda:1'), covar=tensor([0.1753, 0.2323, 0.0800, 0.4127, 0.1800, 0.2944, 0.1998, 0.2221], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0572, 0.0548, 0.0616, 0.0631, 0.0579, 0.0510, 0.0623], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:51:08,975 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9321, 1.4699, 1.7857, 1.5106, 1.0423, 1.5292, 2.2933, 2.0009], + device='cuda:1'), covar=tensor([0.0450, 0.1354, 0.1804, 0.1494, 0.0619, 0.1589, 0.0626, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0158, 0.0100, 0.0162, 0.0114, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 21:51:14,989 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:51:33,201 INFO [train.py:901] (1/4) Epoch 18, batch 5550, loss[loss=0.1915, simple_loss=0.2807, pruned_loss=0.0512, over 8185.00 frames. ], tot_loss[loss=0.212, simple_loss=0.293, pruned_loss=0.06544, over 1620006.28 frames. ], batch size: 23, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:43,334 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.398e+02 2.938e+02 3.826e+02 1.126e+03, threshold=5.876e+02, percent-clipped=10.0 +2023-02-06 21:52:08,271 INFO [train.py:901] (1/4) Epoch 18, batch 5600, loss[loss=0.2455, simple_loss=0.325, pruned_loss=0.08307, over 8576.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2932, pruned_loss=0.06585, over 1614363.35 frames. ], batch size: 31, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:36,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:52:43,528 INFO [train.py:901] (1/4) Epoch 18, batch 5650, loss[loss=0.1802, simple_loss=0.2547, pruned_loss=0.05283, over 7701.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2929, pruned_loss=0.06558, over 1615071.36 frames. ], batch size: 18, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:54,554 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.315e+02 3.071e+02 3.627e+02 7.364e+02, threshold=6.141e+02, percent-clipped=4.0 +2023-02-06 21:53:00,418 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 21:53:01,153 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143086.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:53:18,751 INFO [train.py:901] (1/4) Epoch 18, batch 5700, loss[loss=0.1982, simple_loss=0.2907, pruned_loss=0.05283, over 8721.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2935, pruned_loss=0.06614, over 1616186.70 frames. ], batch size: 39, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:53:21,077 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-06 21:53:53,727 INFO [train.py:901] (1/4) Epoch 18, batch 5750, loss[loss=0.2454, simple_loss=0.335, pruned_loss=0.07796, over 8487.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06594, over 1614514.83 frames. ], batch size: 27, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:04,019 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.423e+02 2.839e+02 3.621e+02 5.889e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 21:54:04,725 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 21:54:21,874 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:54:28,669 INFO [train.py:901] (1/4) Epoch 18, batch 5800, loss[loss=0.2235, simple_loss=0.2958, pruned_loss=0.07564, over 7807.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2932, pruned_loss=0.06628, over 1610795.81 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:29,535 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5413, 1.3030, 4.7697, 1.7765, 4.2469, 3.9877, 4.2778, 4.1799], + device='cuda:1'), covar=tensor([0.0587, 0.4427, 0.0450, 0.3792, 0.0993, 0.0977, 0.0549, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0595, 0.0623, 0.0672, 0.0604, 0.0680, 0.0584, 0.0576, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 21:54:59,278 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0354, 2.3363, 3.4970, 1.8786, 2.9612, 2.4068, 2.1905, 2.7976], + device='cuda:1'), covar=tensor([0.1515, 0.2215, 0.0800, 0.3660, 0.1460, 0.2494, 0.1826, 0.2125], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0572, 0.0547, 0.0618, 0.0631, 0.0577, 0.0511, 0.0622], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 21:55:04,397 INFO [train.py:901] (1/4) Epoch 18, batch 5850, loss[loss=0.177, simple_loss=0.2542, pruned_loss=0.04988, over 7419.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2939, pruned_loss=0.06635, over 1614830.11 frames. ], batch size: 17, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:15,560 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.467e+02 2.892e+02 3.630e+02 6.628e+02, threshold=5.783e+02, percent-clipped=2.0 +2023-02-06 21:55:21,404 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.65 vs. limit=5.0 +2023-02-06 21:55:25,402 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 21:55:36,559 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:55:39,803 INFO [train.py:901] (1/4) Epoch 18, batch 5900, loss[loss=0.2015, simple_loss=0.2793, pruned_loss=0.06185, over 8085.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2933, pruned_loss=0.06603, over 1611908.69 frames. ], batch size: 21, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:53,322 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143331.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:56:10,806 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 21:56:14,464 INFO [train.py:901] (1/4) Epoch 18, batch 5950, loss[loss=0.2257, simple_loss=0.3055, pruned_loss=0.07297, over 8373.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.294, pruned_loss=0.06665, over 1612195.92 frames. ], batch size: 24, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:25,158 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.388e+02 2.875e+02 3.741e+02 7.794e+02, threshold=5.749e+02, percent-clipped=3.0 +2023-02-06 21:56:37,556 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3803, 2.5417, 1.8011, 2.2361, 2.0713, 1.5086, 1.9692, 2.0882], + device='cuda:1'), covar=tensor([0.1467, 0.0449, 0.1189, 0.0622, 0.0788, 0.1540, 0.1005, 0.0850], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0236, 0.0328, 0.0306, 0.0298, 0.0333, 0.0344, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 21:56:49,478 INFO [train.py:901] (1/4) Epoch 18, batch 6000, loss[loss=0.2076, simple_loss=0.2867, pruned_loss=0.06421, over 7234.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2936, pruned_loss=0.06635, over 1613417.97 frames. ], batch size: 16, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:49,478 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 21:57:03,436 INFO [train.py:935] (1/4) Epoch 18, validation: loss=0.1765, simple_loss=0.2767, pruned_loss=0.03814, over 944034.00 frames. +2023-02-06 21:57:03,437 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 21:57:08,526 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143418.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:57:35,797 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:38,431 INFO [train.py:901] (1/4) Epoch 18, batch 6050, loss[loss=0.2134, simple_loss=0.3002, pruned_loss=0.06332, over 8489.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2935, pruned_loss=0.06619, over 1610894.80 frames. ], batch size: 26, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:57:48,577 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.412e+02 3.060e+02 4.409e+02 1.030e+03, threshold=6.120e+02, percent-clipped=9.0 +2023-02-06 21:57:53,572 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143482.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:59,600 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143491.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:58:13,406 INFO [train.py:901] (1/4) Epoch 18, batch 6100, loss[loss=0.191, simple_loss=0.2718, pruned_loss=0.05513, over 8041.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.294, pruned_loss=0.06607, over 1618671.38 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:58:39,427 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 21:58:49,839 INFO [train.py:901] (1/4) Epoch 18, batch 6150, loss[loss=0.1919, simple_loss=0.2712, pruned_loss=0.05626, over 8023.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2936, pruned_loss=0.06612, over 1616678.68 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:00,212 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.359e+02 3.030e+02 3.820e+02 7.737e+02, threshold=6.061e+02, percent-clipped=3.0 +2023-02-06 21:59:02,577 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0755, 1.0148, 1.2935, 0.9806, 0.8484, 1.2945, 0.2955, 0.9567], + device='cuda:1'), covar=tensor([0.1616, 0.0992, 0.0360, 0.0782, 0.2282, 0.0467, 0.1916, 0.1169], + device='cuda:1'), in_proj_covar=tensor([0.0181, 0.0188, 0.0120, 0.0216, 0.0263, 0.0128, 0.0165, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 21:59:25,641 INFO [train.py:901] (1/4) Epoch 18, batch 6200, loss[loss=0.2152, simple_loss=0.2942, pruned_loss=0.06806, over 8240.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2935, pruned_loss=0.06601, over 1618412.55 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:33,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0894, 3.0793, 2.2254, 2.5109, 2.4772, 2.1128, 2.3860, 2.6696], + device='cuda:1'), covar=tensor([0.1224, 0.0310, 0.0848, 0.0634, 0.0576, 0.1098, 0.0799, 0.0823], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0234, 0.0325, 0.0305, 0.0296, 0.0330, 0.0342, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:00:01,324 INFO [train.py:901] (1/4) Epoch 18, batch 6250, loss[loss=0.1823, simple_loss=0.2659, pruned_loss=0.04931, over 7967.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2938, pruned_loss=0.06607, over 1620172.73 frames. ], batch size: 21, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:12,414 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.460e+02 3.089e+02 4.040e+02 1.017e+03, threshold=6.178e+02, percent-clipped=5.0 +2023-02-06 22:00:37,038 INFO [train.py:901] (1/4) Epoch 18, batch 6300, loss[loss=0.3063, simple_loss=0.3568, pruned_loss=0.1279, over 7197.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2939, pruned_loss=0.06608, over 1620259.87 frames. ], batch size: 72, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:38,579 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5558, 4.5609, 4.1143, 1.9879, 4.0292, 4.1901, 4.1468, 3.9054], + device='cuda:1'), covar=tensor([0.0725, 0.0575, 0.1114, 0.4948, 0.0886, 0.0920, 0.1382, 0.0773], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0425, 0.0425, 0.0524, 0.0415, 0.0421, 0.0408, 0.0374], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:00:54,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1830, 2.5396, 2.8659, 1.5676, 3.1499, 1.8065, 1.5076, 2.0414], + device='cuda:1'), covar=tensor([0.0720, 0.0324, 0.0254, 0.0694, 0.0364, 0.0802, 0.0895, 0.0470], + device='cuda:1'), in_proj_covar=tensor([0.0435, 0.0372, 0.0321, 0.0432, 0.0361, 0.0522, 0.0378, 0.0397], + device='cuda:1'), out_proj_covar=tensor([1.1797e-04, 9.8280e-05, 8.5080e-05, 1.1498e-04, 9.6080e-05, 1.4946e-04, + 1.0271e-04, 1.0566e-04], device='cuda:1') +2023-02-06 22:01:11,904 INFO [train.py:901] (1/4) Epoch 18, batch 6350, loss[loss=0.1885, simple_loss=0.2728, pruned_loss=0.0521, over 7939.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2934, pruned_loss=0.06569, over 1619247.58 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:13,343 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143762.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:01:22,540 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.204e+02 2.882e+02 3.589e+02 6.333e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 22:01:47,364 INFO [train.py:901] (1/4) Epoch 18, batch 6400, loss[loss=0.2311, simple_loss=0.3036, pruned_loss=0.07925, over 7786.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2939, pruned_loss=0.06648, over 1614098.41 frames. ], batch size: 19, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:02:04,437 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143835.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:02:21,085 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 22:02:21,890 INFO [train.py:901] (1/4) Epoch 18, batch 6450, loss[loss=0.1952, simple_loss=0.2771, pruned_loss=0.05666, over 7817.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2939, pruned_loss=0.06629, over 1616056.83 frames. ], batch size: 20, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:02:33,448 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.452e+02 2.973e+02 3.704e+02 1.405e+03, threshold=5.946e+02, percent-clipped=1.0 +2023-02-06 22:02:34,290 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143877.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:57,286 INFO [train.py:901] (1/4) Epoch 18, batch 6500, loss[loss=0.22, simple_loss=0.3125, pruned_loss=0.06374, over 8501.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.295, pruned_loss=0.06729, over 1618828.19 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:24,076 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143950.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:03:31,368 INFO [train.py:901] (1/4) Epoch 18, batch 6550, loss[loss=0.2331, simple_loss=0.3171, pruned_loss=0.07453, over 8477.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2952, pruned_loss=0.06764, over 1620034.01 frames. ], batch size: 29, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:41,858 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.926e+02 2.526e+02 3.154e+02 3.765e+02 8.734e+02, threshold=6.308e+02, percent-clipped=5.0 +2023-02-06 22:03:48,839 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 22:04:08,871 INFO [train.py:901] (1/4) Epoch 18, batch 6600, loss[loss=0.1714, simple_loss=0.2546, pruned_loss=0.04416, over 7542.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2953, pruned_loss=0.06763, over 1619461.80 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:10,899 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 22:04:37,996 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 22:04:43,667 INFO [train.py:901] (1/4) Epoch 18, batch 6650, loss[loss=0.211, simple_loss=0.2973, pruned_loss=0.06234, over 8340.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2947, pruned_loss=0.06718, over 1619671.99 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:54,721 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.298e+02 3.022e+02 3.555e+02 7.360e+02, threshold=6.043e+02, percent-clipped=4.0 +2023-02-06 22:05:19,671 INFO [train.py:901] (1/4) Epoch 18, batch 6700, loss[loss=0.176, simple_loss=0.2642, pruned_loss=0.04391, over 7972.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2937, pruned_loss=0.06651, over 1617167.42 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:05:34,397 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144133.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:05:51,957 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144158.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:05:53,839 INFO [train.py:901] (1/4) Epoch 18, batch 6750, loss[loss=0.1799, simple_loss=0.2682, pruned_loss=0.04581, over 8621.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2934, pruned_loss=0.06645, over 1615664.02 frames. ], batch size: 39, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:00,259 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 22:06:03,938 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.293e+02 3.003e+02 3.717e+02 7.578e+02, threshold=6.007e+02, percent-clipped=1.0 +2023-02-06 22:06:09,033 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9983, 2.2512, 1.8983, 2.7706, 1.3255, 1.6627, 1.9700, 2.2444], + device='cuda:1'), covar=tensor([0.0776, 0.0727, 0.0998, 0.0416, 0.1127, 0.1341, 0.0933, 0.0761], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0200, 0.0253, 0.0214, 0.0209, 0.0250, 0.0254, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:06:17,309 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6185, 1.8345, 2.0360, 1.3158, 2.0999, 1.4727, 0.5127, 1.8077], + device='cuda:1'), covar=tensor([0.0538, 0.0354, 0.0236, 0.0525, 0.0358, 0.0868, 0.0799, 0.0301], + device='cuda:1'), in_proj_covar=tensor([0.0440, 0.0377, 0.0325, 0.0437, 0.0366, 0.0526, 0.0382, 0.0403], + device='cuda:1'), out_proj_covar=tensor([1.1951e-04, 9.9599e-05, 8.6145e-05, 1.1622e-04, 9.7435e-05, 1.5044e-04, + 1.0377e-04, 1.0759e-04], device='cuda:1') +2023-02-06 22:06:25,446 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144206.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:06:27,267 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 22:06:28,568 INFO [train.py:901] (1/4) Epoch 18, batch 6800, loss[loss=0.2034, simple_loss=0.2874, pruned_loss=0.05972, over 8611.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2919, pruned_loss=0.06583, over 1614929.57 frames. ], batch size: 49, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:31,424 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3196, 1.9187, 1.3456, 3.0212, 1.4059, 1.1681, 2.0777, 2.0112], + device='cuda:1'), covar=tensor([0.1702, 0.1141, 0.2191, 0.0379, 0.1270, 0.2069, 0.0929, 0.0927], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0200, 0.0252, 0.0213, 0.0208, 0.0249, 0.0253, 0.0212], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:06:42,854 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144231.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:07:03,971 INFO [train.py:901] (1/4) Epoch 18, batch 6850, loss[loss=0.2319, simple_loss=0.3098, pruned_loss=0.07705, over 8497.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2932, pruned_loss=0.06645, over 1613581.22 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:07:13,993 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.379e+02 2.937e+02 3.634e+02 6.722e+02, threshold=5.873e+02, percent-clipped=2.0 +2023-02-06 22:07:17,336 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 22:07:25,243 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 22:07:38,061 INFO [train.py:901] (1/4) Epoch 18, batch 6900, loss[loss=0.2327, simple_loss=0.297, pruned_loss=0.08416, over 7716.00 frames. ], tot_loss[loss=0.214, simple_loss=0.294, pruned_loss=0.06694, over 1616267.56 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:13,482 INFO [train.py:901] (1/4) Epoch 18, batch 6950, loss[loss=0.2064, simple_loss=0.294, pruned_loss=0.05933, over 8045.00 frames. ], tot_loss[loss=0.214, simple_loss=0.294, pruned_loss=0.06699, over 1616876.73 frames. ], batch size: 22, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:21,217 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 22:08:24,083 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.398e+02 2.919e+02 3.864e+02 7.610e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-06 22:08:25,486 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 22:08:47,777 INFO [train.py:901] (1/4) Epoch 18, batch 7000, loss[loss=0.1583, simple_loss=0.2467, pruned_loss=0.03491, over 7799.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2925, pruned_loss=0.06674, over 1607183.52 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:01,099 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=144429.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:09:22,580 INFO [train.py:901] (1/4) Epoch 18, batch 7050, loss[loss=0.2136, simple_loss=0.2918, pruned_loss=0.06771, over 7437.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2925, pruned_loss=0.06668, over 1607673.40 frames. ], batch size: 17, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:32,284 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1334, 1.2872, 1.6316, 1.2485, 0.7123, 1.3929, 1.2278, 1.0185], + device='cuda:1'), covar=tensor([0.0638, 0.1278, 0.1605, 0.1501, 0.0568, 0.1437, 0.0695, 0.0733], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:09:34,230 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 2.534e+02 2.937e+02 3.689e+02 8.247e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 22:09:37,836 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6052, 2.0947, 3.2625, 1.4300, 2.5439, 2.0867, 1.7640, 2.4252], + device='cuda:1'), covar=tensor([0.1804, 0.2440, 0.0857, 0.4357, 0.1884, 0.3116, 0.2092, 0.2398], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0577, 0.0549, 0.0620, 0.0637, 0.0581, 0.0513, 0.0626], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:09:58,439 INFO [train.py:901] (1/4) Epoch 18, batch 7100, loss[loss=0.2204, simple_loss=0.2979, pruned_loss=0.07143, over 8339.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2938, pruned_loss=0.06696, over 1609205.59 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:10:12,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9787, 6.0214, 5.3409, 2.8596, 5.3553, 5.7367, 5.6151, 5.4055], + device='cuda:1'), covar=tensor([0.0447, 0.0330, 0.0859, 0.3632, 0.0710, 0.0650, 0.0989, 0.0482], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0426, 0.0428, 0.0528, 0.0418, 0.0423, 0.0411, 0.0372], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:10:30,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9631, 3.7430, 2.3495, 2.8885, 2.7341, 2.0802, 2.7706, 2.9235], + device='cuda:1'), covar=tensor([0.1721, 0.0379, 0.0991, 0.0778, 0.0696, 0.1314, 0.1004, 0.0979], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0233, 0.0324, 0.0303, 0.0296, 0.0330, 0.0341, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:10:33,593 INFO [train.py:901] (1/4) Epoch 18, batch 7150, loss[loss=0.2328, simple_loss=0.3111, pruned_loss=0.07725, over 8453.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2939, pruned_loss=0.0672, over 1606946.04 frames. ], batch size: 27, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:10:43,979 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.263e+02 2.906e+02 3.662e+02 1.305e+03, threshold=5.813e+02, percent-clipped=7.0 +2023-02-06 22:11:10,033 INFO [train.py:901] (1/4) Epoch 18, batch 7200, loss[loss=0.2336, simple_loss=0.2995, pruned_loss=0.08383, over 8303.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2934, pruned_loss=0.06681, over 1610075.55 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:24,807 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1890, 1.3653, 1.6505, 1.2680, 0.7657, 1.4026, 1.2224, 1.0709], + device='cuda:1'), covar=tensor([0.0568, 0.1225, 0.1637, 0.1431, 0.0550, 0.1423, 0.0677, 0.0678], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0157, 0.0099, 0.0162, 0.0113, 0.0139], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:11:35,702 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0300, 2.2145, 1.8615, 2.7114, 1.3721, 1.6902, 1.8409, 2.3145], + device='cuda:1'), covar=tensor([0.0633, 0.0724, 0.0831, 0.0331, 0.1093, 0.1158, 0.0908, 0.0666], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0197, 0.0247, 0.0209, 0.0205, 0.0245, 0.0250, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:11:44,464 INFO [train.py:901] (1/4) Epoch 18, batch 7250, loss[loss=0.2244, simple_loss=0.2853, pruned_loss=0.0818, over 7804.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2944, pruned_loss=0.0671, over 1614643.26 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:54,473 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.379e+02 2.816e+02 3.627e+02 9.857e+02, threshold=5.632e+02, percent-clipped=4.0 +2023-02-06 22:12:19,764 INFO [train.py:901] (1/4) Epoch 18, batch 7300, loss[loss=0.1703, simple_loss=0.2543, pruned_loss=0.04316, over 7793.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2936, pruned_loss=0.06632, over 1615713.48 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:12:53,982 INFO [train.py:901] (1/4) Epoch 18, batch 7350, loss[loss=0.2101, simple_loss=0.2841, pruned_loss=0.06805, over 8713.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2934, pruned_loss=0.06626, over 1621385.50 frames. ], batch size: 39, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:13:02,925 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=144773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:13:04,749 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.484e+02 2.992e+02 3.514e+02 8.978e+02, threshold=5.985e+02, percent-clipped=6.0 +2023-02-06 22:13:08,084 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 22:13:26,891 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 22:13:28,903 INFO [train.py:901] (1/4) Epoch 18, batch 7400, loss[loss=0.2256, simple_loss=0.2992, pruned_loss=0.07604, over 7531.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2932, pruned_loss=0.06611, over 1623123.84 frames. ], batch size: 74, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:04,298 INFO [train.py:901] (1/4) Epoch 18, batch 7450, loss[loss=0.2603, simple_loss=0.3349, pruned_loss=0.09286, over 8425.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.06528, over 1621074.17 frames. ], batch size: 48, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:07,790 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 22:14:14,578 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 2.433e+02 3.083e+02 4.140e+02 9.921e+02, threshold=6.167e+02, percent-clipped=3.0 +2023-02-06 22:14:23,398 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:14:30,666 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9336, 2.0513, 1.8417, 2.5900, 1.2526, 1.5355, 1.8046, 2.1016], + device='cuda:1'), covar=tensor([0.0691, 0.0807, 0.0838, 0.0382, 0.1106, 0.1311, 0.0924, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0197, 0.0246, 0.0210, 0.0205, 0.0244, 0.0250, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:14:38,536 INFO [train.py:901] (1/4) Epoch 18, batch 7500, loss[loss=0.2086, simple_loss=0.2967, pruned_loss=0.06022, over 8363.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.292, pruned_loss=0.06551, over 1622475.00 frames. ], batch size: 24, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:12,908 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7877, 1.4594, 2.8559, 1.3963, 2.1856, 3.0318, 3.2026, 2.6246], + device='cuda:1'), covar=tensor([0.1048, 0.1616, 0.0401, 0.2085, 0.0946, 0.0309, 0.0571, 0.0621], + device='cuda:1'), in_proj_covar=tensor([0.0284, 0.0317, 0.0277, 0.0308, 0.0296, 0.0256, 0.0401, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 22:15:14,116 INFO [train.py:901] (1/4) Epoch 18, batch 7550, loss[loss=0.1615, simple_loss=0.2422, pruned_loss=0.04042, over 7430.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2924, pruned_loss=0.06568, over 1622739.94 frames. ], batch size: 17, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:24,764 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.361e+02 2.893e+02 3.293e+02 8.578e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 22:15:48,829 INFO [train.py:901] (1/4) Epoch 18, batch 7600, loss[loss=0.2067, simple_loss=0.2958, pruned_loss=0.05881, over 8329.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2927, pruned_loss=0.06558, over 1623780.66 frames. ], batch size: 25, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:51,069 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:12,832 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:24,428 INFO [train.py:901] (1/4) Epoch 18, batch 7650, loss[loss=0.2416, simple_loss=0.3157, pruned_loss=0.08373, over 8453.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2913, pruned_loss=0.06485, over 1619678.46 frames. ], batch size: 25, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:16:35,692 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.409e+02 3.204e+02 3.806e+02 7.453e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 22:16:40,595 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:58,568 INFO [train.py:901] (1/4) Epoch 18, batch 7700, loss[loss=0.2334, simple_loss=0.3086, pruned_loss=0.07913, over 8019.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2929, pruned_loss=0.06549, over 1623429.45 frames. ], batch size: 22, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:16,299 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 22:17:21,897 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:33,661 INFO [train.py:901] (1/4) Epoch 18, batch 7750, loss[loss=0.2037, simple_loss=0.2985, pruned_loss=0.05448, over 8249.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2928, pruned_loss=0.06567, over 1618878.89 frames. ], batch size: 24, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:40,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:44,036 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4628, 1.6460, 4.5572, 2.1057, 2.5697, 5.1843, 5.2521, 4.5773], + device='cuda:1'), covar=tensor([0.1097, 0.1811, 0.0240, 0.1835, 0.1124, 0.0164, 0.0261, 0.0519], + device='cuda:1'), in_proj_covar=tensor([0.0283, 0.0314, 0.0276, 0.0306, 0.0294, 0.0255, 0.0399, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 22:17:45,202 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.617e+02 3.101e+02 3.765e+02 9.296e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 22:18:08,805 INFO [train.py:901] (1/4) Epoch 18, batch 7800, loss[loss=0.1827, simple_loss=0.2798, pruned_loss=0.04278, over 8245.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2919, pruned_loss=0.06555, over 1614987.57 frames. ], batch size: 24, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:18:30,125 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6322, 1.5859, 2.1485, 1.5386, 1.2098, 2.0541, 0.3638, 1.3603], + device='cuda:1'), covar=tensor([0.1648, 0.1389, 0.0348, 0.1128, 0.2905, 0.0466, 0.2112, 0.1299], + device='cuda:1'), in_proj_covar=tensor([0.0181, 0.0190, 0.0121, 0.0215, 0.0266, 0.0129, 0.0165, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 22:18:36,372 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6254, 1.5603, 2.0660, 1.3888, 1.2248, 2.0326, 0.3601, 1.2510], + device='cuda:1'), covar=tensor([0.1646, 0.1407, 0.0349, 0.1183, 0.2830, 0.0472, 0.2184, 0.1473], + device='cuda:1'), in_proj_covar=tensor([0.0181, 0.0190, 0.0121, 0.0215, 0.0266, 0.0129, 0.0165, 0.0182], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 22:18:42,841 INFO [train.py:901] (1/4) Epoch 18, batch 7850, loss[loss=0.2494, simple_loss=0.3259, pruned_loss=0.08644, over 8516.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06596, over 1616880.25 frames. ], batch size: 28, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:18:53,261 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.477e+02 2.948e+02 3.643e+02 1.044e+03, threshold=5.895e+02, percent-clipped=9.0 +2023-02-06 22:19:03,116 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-06 22:19:16,121 INFO [train.py:901] (1/4) Epoch 18, batch 7900, loss[loss=0.2099, simple_loss=0.3046, pruned_loss=0.05758, over 8562.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2933, pruned_loss=0.0665, over 1616879.22 frames. ], batch size: 34, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:28,931 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5824, 1.8097, 2.0101, 1.2684, 2.0788, 1.3960, 0.5416, 1.7847], + device='cuda:1'), covar=tensor([0.0514, 0.0329, 0.0230, 0.0503, 0.0321, 0.0825, 0.0801, 0.0248], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0380, 0.0324, 0.0433, 0.0362, 0.0526, 0.0382, 0.0404], + device='cuda:1'), out_proj_covar=tensor([1.1925e-04, 1.0051e-04, 8.5804e-05, 1.1502e-04, 9.6359e-05, 1.5044e-04, + 1.0373e-04, 1.0803e-04], device='cuda:1') +2023-02-06 22:19:44,169 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4779, 2.3437, 3.2568, 2.4817, 3.0164, 2.4962, 2.1718, 1.7751], + device='cuda:1'), covar=tensor([0.5185, 0.5014, 0.1796, 0.3680, 0.2521, 0.2929, 0.1932, 0.5568], + device='cuda:1'), in_proj_covar=tensor([0.0939, 0.0956, 0.0783, 0.0921, 0.0988, 0.0876, 0.0737, 0.0817], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 22:19:47,150 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:19:49,086 INFO [train.py:901] (1/4) Epoch 18, batch 7950, loss[loss=0.2348, simple_loss=0.3214, pruned_loss=0.07409, over 8478.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.06606, over 1611670.59 frames. ], batch size: 29, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:58,001 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7958, 2.0761, 1.6279, 2.5842, 1.2196, 1.4744, 1.9038, 2.0264], + device='cuda:1'), covar=tensor([0.0771, 0.0748, 0.0931, 0.0347, 0.1077, 0.1305, 0.0815, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0197, 0.0248, 0.0210, 0.0206, 0.0244, 0.0250, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:19:59,836 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.389e+02 3.012e+02 3.869e+02 1.111e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 22:20:07,941 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:08,011 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:23,114 INFO [train.py:901] (1/4) Epoch 18, batch 8000, loss[loss=0.2426, simple_loss=0.3169, pruned_loss=0.08417, over 8683.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.06522, over 1614510.45 frames. ], batch size: 39, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:20:34,647 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:57,108 INFO [train.py:901] (1/4) Epoch 18, batch 8050, loss[loss=0.2247, simple_loss=0.292, pruned_loss=0.0787, over 7549.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2899, pruned_loss=0.06528, over 1590885.98 frames. ], batch size: 18, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:21:05,674 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:21:08,160 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.350e+02 2.866e+02 3.408e+02 5.747e+02, threshold=5.732e+02, percent-clipped=0.0 +2023-02-06 22:21:29,502 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 22:21:34,891 INFO [train.py:901] (1/4) Epoch 19, batch 0, loss[loss=0.2417, simple_loss=0.3217, pruned_loss=0.08084, over 8700.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3217, pruned_loss=0.08084, over 8700.00 frames. ], batch size: 34, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:21:34,891 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 22:21:46,551 INFO [train.py:935] (1/4) Epoch 19, validation: loss=0.1782, simple_loss=0.2779, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 22:21:46,552 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 22:21:54,194 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:03,055 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 22:22:22,467 INFO [train.py:901] (1/4) Epoch 19, batch 50, loss[loss=0.1984, simple_loss=0.2856, pruned_loss=0.05564, over 8353.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2937, pruned_loss=0.06684, over 362714.64 frames. ], batch size: 24, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:22,664 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:23,341 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8206, 1.4514, 2.8276, 1.3919, 2.1287, 3.0293, 3.1765, 2.5634], + device='cuda:1'), covar=tensor([0.1025, 0.1647, 0.0444, 0.2139, 0.0996, 0.0304, 0.0643, 0.0612], + device='cuda:1'), in_proj_covar=tensor([0.0281, 0.0312, 0.0275, 0.0303, 0.0293, 0.0253, 0.0396, 0.0296], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 22:22:25,795 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 22:22:40,530 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 22:22:42,317 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 22:22:45,196 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.340e+02 2.977e+02 3.641e+02 7.952e+02, threshold=5.953e+02, percent-clipped=6.0 +2023-02-06 22:22:56,255 INFO [train.py:901] (1/4) Epoch 19, batch 100, loss[loss=0.1941, simple_loss=0.2676, pruned_loss=0.06035, over 7305.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2949, pruned_loss=0.06676, over 640186.66 frames. ], batch size: 16, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:59,330 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7508, 2.2637, 4.2002, 1.5351, 3.0005, 2.2907, 1.8378, 2.8245], + device='cuda:1'), covar=tensor([0.1889, 0.2712, 0.0818, 0.4657, 0.1945, 0.3185, 0.2288, 0.2582], + device='cuda:1'), in_proj_covar=tensor([0.0515, 0.0578, 0.0548, 0.0628, 0.0635, 0.0585, 0.0517, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:23:01,906 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 22:23:10,101 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:20,693 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7284, 4.7064, 4.2140, 2.2590, 4.1561, 4.3504, 4.3300, 4.0467], + device='cuda:1'), covar=tensor([0.0679, 0.0480, 0.0880, 0.4248, 0.0748, 0.0896, 0.1049, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0426, 0.0429, 0.0527, 0.0414, 0.0426, 0.0408, 0.0373], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:23:32,313 INFO [train.py:901] (1/4) Epoch 19, batch 150, loss[loss=0.1784, simple_loss=0.2483, pruned_loss=0.05424, over 7689.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2952, pruned_loss=0.0669, over 859877.67 frames. ], batch size: 18, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:23:40,879 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9753, 1.7458, 2.3615, 2.0175, 2.2920, 1.9746, 1.7385, 1.2433], + device='cuda:1'), covar=tensor([0.5288, 0.4764, 0.1702, 0.3045, 0.2156, 0.2955, 0.2062, 0.4657], + device='cuda:1'), in_proj_covar=tensor([0.0925, 0.0945, 0.0773, 0.0911, 0.0976, 0.0864, 0.0728, 0.0808], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 22:23:46,188 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:57,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.454e+02 2.969e+02 3.777e+02 1.176e+03, threshold=5.938e+02, percent-clipped=4.0 +2023-02-06 22:24:07,984 INFO [train.py:901] (1/4) Epoch 19, batch 200, loss[loss=0.2118, simple_loss=0.2929, pruned_loss=0.06531, over 8344.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2944, pruned_loss=0.06658, over 1027778.06 frames. ], batch size: 26, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:24:33,099 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4630, 2.5273, 1.8006, 2.2031, 2.0766, 1.4971, 2.0719, 1.9556], + device='cuda:1'), covar=tensor([0.1488, 0.0392, 0.1090, 0.0553, 0.0663, 0.1400, 0.0839, 0.0965], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0234, 0.0323, 0.0301, 0.0296, 0.0328, 0.0339, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:24:33,115 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:35,725 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:43,207 INFO [train.py:901] (1/4) Epoch 19, batch 250, loss[loss=0.1688, simple_loss=0.2505, pruned_loss=0.04355, over 7689.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2936, pruned_loss=0.06552, over 1164728.25 frames. ], batch size: 18, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:24:51,124 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:55,239 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:58,385 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 22:25:06,966 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 22:25:07,541 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.432e+02 3.022e+02 3.893e+02 7.688e+02, threshold=6.043e+02, percent-clipped=6.0 +2023-02-06 22:25:13,282 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:18,657 INFO [train.py:901] (1/4) Epoch 19, batch 300, loss[loss=0.2015, simple_loss=0.2747, pruned_loss=0.06417, over 5620.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2944, pruned_loss=0.06619, over 1261555.60 frames. ], batch size: 12, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:22,936 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:39,979 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:50,255 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145839.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:25:53,700 INFO [train.py:901] (1/4) Epoch 19, batch 350, loss[loss=0.1896, simple_loss=0.2855, pruned_loss=0.04688, over 8096.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2951, pruned_loss=0.06651, over 1343511.58 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:57,437 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:26:17,675 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.383e+02 2.952e+02 3.795e+02 9.100e+02, threshold=5.904e+02, percent-clipped=6.0 +2023-02-06 22:26:30,025 INFO [train.py:901] (1/4) Epoch 19, batch 400, loss[loss=0.1615, simple_loss=0.2393, pruned_loss=0.0418, over 7529.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2942, pruned_loss=0.06555, over 1407846.24 frames. ], batch size: 18, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:26:32,906 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4578, 4.4775, 4.0250, 1.9145, 3.9783, 4.0936, 3.9563, 3.8319], + device='cuda:1'), covar=tensor([0.0773, 0.0525, 0.1162, 0.5217, 0.0873, 0.0997, 0.1273, 0.0810], + device='cuda:1'), in_proj_covar=tensor([0.0511, 0.0422, 0.0425, 0.0524, 0.0411, 0.0424, 0.0402, 0.0370], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:26:44,748 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2697, 1.5873, 1.7973, 1.4441, 1.2097, 1.6461, 2.0862, 1.9880], + device='cuda:1'), covar=tensor([0.0526, 0.1245, 0.1617, 0.1456, 0.0616, 0.1423, 0.0650, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0161, 0.0112, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:27:04,035 INFO [train.py:901] (1/4) Epoch 19, batch 450, loss[loss=0.1679, simple_loss=0.2507, pruned_loss=0.04252, over 8365.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2957, pruned_loss=0.06678, over 1456966.19 frames. ], batch size: 24, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:12,891 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:27:23,357 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.20 vs. limit=5.0 +2023-02-06 22:27:28,526 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.474e+02 2.839e+02 3.457e+02 5.406e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 22:27:40,183 INFO [train.py:901] (1/4) Epoch 19, batch 500, loss[loss=0.1895, simple_loss=0.2632, pruned_loss=0.05786, over 7982.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2935, pruned_loss=0.06613, over 1488719.93 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:50,094 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:03,401 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5710, 1.3915, 4.7434, 1.7784, 4.2137, 3.8954, 4.2972, 4.1226], + device='cuda:1'), covar=tensor([0.0570, 0.4932, 0.0409, 0.3971, 0.0998, 0.0926, 0.0561, 0.0646], + device='cuda:1'), in_proj_covar=tensor([0.0597, 0.0627, 0.0673, 0.0605, 0.0687, 0.0591, 0.0585, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:28:15,841 INFO [train.py:901] (1/4) Epoch 19, batch 550, loss[loss=0.2071, simple_loss=0.2898, pruned_loss=0.06219, over 8524.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.06513, over 1516839.15 frames. ], batch size: 39, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:19,297 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7064, 4.7183, 4.2677, 1.8156, 4.2075, 4.3123, 4.3906, 4.0287], + device='cuda:1'), covar=tensor([0.0625, 0.0468, 0.0941, 0.4593, 0.0743, 0.0916, 0.1053, 0.0710], + device='cuda:1'), in_proj_covar=tensor([0.0519, 0.0428, 0.0430, 0.0530, 0.0416, 0.0430, 0.0411, 0.0375], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:28:35,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146071.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:38,934 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.557e+02 3.049e+02 4.000e+02 8.642e+02, threshold=6.099e+02, percent-clipped=4.0 +2023-02-06 22:28:50,773 INFO [train.py:901] (1/4) Epoch 19, batch 600, loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05621, over 8658.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2939, pruned_loss=0.06588, over 1537246.40 frames. ], batch size: 34, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:54,549 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4431, 1.2818, 2.5470, 0.9591, 2.2805, 2.1384, 2.3258, 2.2776], + device='cuda:1'), covar=tensor([0.0737, 0.2930, 0.0986, 0.3432, 0.1142, 0.1080, 0.0670, 0.0757], + device='cuda:1'), in_proj_covar=tensor([0.0594, 0.0624, 0.0670, 0.0600, 0.0684, 0.0588, 0.0582, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 22:28:59,454 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:03,469 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3733, 4.2899, 3.9130, 2.0367, 3.8768, 3.9815, 3.9809, 3.7188], + device='cuda:1'), covar=tensor([0.0770, 0.0563, 0.1035, 0.4750, 0.0816, 0.1071, 0.1260, 0.0812], + device='cuda:1'), in_proj_covar=tensor([0.0517, 0.0425, 0.0427, 0.0526, 0.0413, 0.0427, 0.0407, 0.0373], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:29:11,428 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 22:29:11,608 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:17,642 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:26,663 INFO [train.py:901] (1/4) Epoch 19, batch 650, loss[loss=0.2183, simple_loss=0.3046, pruned_loss=0.06599, over 8021.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2938, pruned_loss=0.06569, over 1558214.07 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:29:42,927 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:49,753 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.628e+02 2.995e+02 3.912e+02 8.872e+02, threshold=5.991e+02, percent-clipped=7.0 +2023-02-06 22:29:53,934 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146183.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:30:00,618 INFO [train.py:901] (1/4) Epoch 19, batch 700, loss[loss=0.2443, simple_loss=0.3182, pruned_loss=0.08524, over 8328.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.295, pruned_loss=0.06642, over 1572025.73 frames. ], batch size: 25, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:37,723 INFO [train.py:901] (1/4) Epoch 19, batch 750, loss[loss=0.239, simple_loss=0.3227, pruned_loss=0.07767, over 8495.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2937, pruned_loss=0.0654, over 1586531.66 frames. ], batch size: 26, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:58,076 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 22:31:00,739 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.187e+02 2.733e+02 3.387e+02 1.037e+03, threshold=5.466e+02, percent-clipped=4.0 +2023-02-06 22:31:06,863 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 22:31:11,486 INFO [train.py:901] (1/4) Epoch 19, batch 800, loss[loss=0.2092, simple_loss=0.2839, pruned_loss=0.06723, over 7964.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2943, pruned_loss=0.06611, over 1594955.29 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:14,847 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146298.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:31:15,519 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0349, 1.4527, 1.6441, 1.3550, 0.9987, 1.4658, 1.7496, 1.5376], + device='cuda:1'), covar=tensor([0.0487, 0.1295, 0.1717, 0.1479, 0.0588, 0.1535, 0.0677, 0.0696], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:31:17,172 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.41 vs. limit=5.0 +2023-02-06 22:31:35,743 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:31:47,234 INFO [train.py:901] (1/4) Epoch 19, batch 850, loss[loss=0.2036, simple_loss=0.2916, pruned_loss=0.0578, over 8290.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.294, pruned_loss=0.0659, over 1601380.67 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:54,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:10,846 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:11,292 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.470e+02 3.071e+02 3.941e+02 1.675e+03, threshold=6.141e+02, percent-clipped=6.0 +2023-02-06 22:32:22,254 INFO [train.py:901] (1/4) Epoch 19, batch 900, loss[loss=0.1937, simple_loss=0.2728, pruned_loss=0.05733, over 8515.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.294, pruned_loss=0.06618, over 1601572.16 frames. ], batch size: 28, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:32:27,826 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:41,200 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8208, 2.2532, 4.0353, 1.5560, 2.9932, 2.1967, 1.8680, 2.6409], + device='cuda:1'), covar=tensor([0.1646, 0.2227, 0.0699, 0.3916, 0.1467, 0.2816, 0.1913, 0.2244], + device='cuda:1'), in_proj_covar=tensor([0.0516, 0.0580, 0.0550, 0.0630, 0.0639, 0.0584, 0.0518, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:32:56,365 INFO [train.py:901] (1/4) Epoch 19, batch 950, loss[loss=0.2148, simple_loss=0.2916, pruned_loss=0.06897, over 8628.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2948, pruned_loss=0.06634, over 1607568.80 frames. ], batch size: 34, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:33:09,722 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.47 vs. limit=5.0 +2023-02-06 22:33:20,835 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1515, 1.3087, 1.5822, 1.2088, 0.9145, 1.3080, 1.5737, 1.5461], + device='cuda:1'), covar=tensor([0.0479, 0.1313, 0.1670, 0.1499, 0.0621, 0.1556, 0.0706, 0.0652], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0112, 0.0140], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:33:21,316 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.324e+02 2.987e+02 4.077e+02 9.877e+02, threshold=5.974e+02, percent-clipped=4.0 +2023-02-06 22:33:22,714 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 22:33:32,154 INFO [train.py:901] (1/4) Epoch 19, batch 1000, loss[loss=0.2414, simple_loss=0.3187, pruned_loss=0.08207, over 8410.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2938, pruned_loss=0.06574, over 1608029.56 frames. ], batch size: 48, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:33:44,466 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146511.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:33:54,570 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 22:34:06,365 INFO [train.py:901] (1/4) Epoch 19, batch 1050, loss[loss=0.219, simple_loss=0.3016, pruned_loss=0.0682, over 8364.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.294, pruned_loss=0.06559, over 1607523.31 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:06,379 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 22:34:14,963 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146554.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:34:31,585 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.403e+02 2.837e+02 3.508e+02 6.242e+02, threshold=5.674e+02, percent-clipped=1.0 +2023-02-06 22:34:32,762 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-06 22:34:33,867 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146579.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:34:44,087 INFO [train.py:901] (1/4) Epoch 19, batch 1100, loss[loss=0.1983, simple_loss=0.2897, pruned_loss=0.05342, over 8361.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2923, pruned_loss=0.06535, over 1607312.86 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:55,173 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:06,980 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:18,380 INFO [train.py:901] (1/4) Epoch 19, batch 1150, loss[loss=0.2098, simple_loss=0.2781, pruned_loss=0.07071, over 7430.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2918, pruned_loss=0.065, over 1609797.37 frames. ], batch size: 17, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:35:19,122 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 22:35:19,277 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:42,416 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.484e+02 2.879e+02 3.755e+02 5.922e+02, threshold=5.758e+02, percent-clipped=3.0 +2023-02-06 22:35:53,863 INFO [train.py:901] (1/4) Epoch 19, batch 1200, loss[loss=0.2106, simple_loss=0.2838, pruned_loss=0.06869, over 7814.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2923, pruned_loss=0.06552, over 1614730.46 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:24,675 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9262, 1.5484, 3.2936, 1.4576, 2.1852, 3.5613, 3.6460, 3.0279], + device='cuda:1'), covar=tensor([0.1093, 0.1660, 0.0335, 0.2071, 0.1094, 0.0240, 0.0655, 0.0550], + device='cuda:1'), in_proj_covar=tensor([0.0283, 0.0314, 0.0280, 0.0307, 0.0297, 0.0256, 0.0400, 0.0297], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 22:36:29,002 INFO [train.py:901] (1/4) Epoch 19, batch 1250, loss[loss=0.1808, simple_loss=0.2569, pruned_loss=0.0524, over 7701.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2932, pruned_loss=0.06594, over 1614225.89 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:34,072 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2891, 1.9113, 2.5680, 2.0959, 2.4600, 2.2657, 2.0222, 1.3466], + device='cuda:1'), covar=tensor([0.5141, 0.4809, 0.1893, 0.3666, 0.2337, 0.2824, 0.1930, 0.5041], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0947, 0.0781, 0.0912, 0.0978, 0.0864, 0.0726, 0.0807], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 22:36:52,652 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 2.471e+02 2.976e+02 4.092e+02 7.603e+02, threshold=5.951e+02, percent-clipped=4.0 +2023-02-06 22:37:04,305 INFO [train.py:901] (1/4) Epoch 19, batch 1300, loss[loss=0.1904, simple_loss=0.278, pruned_loss=0.05134, over 8375.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.293, pruned_loss=0.06556, over 1614747.18 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:40,719 INFO [train.py:901] (1/4) Epoch 19, batch 1350, loss[loss=0.1972, simple_loss=0.2821, pruned_loss=0.05613, over 8081.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2934, pruned_loss=0.06552, over 1615302.56 frames. ], batch size: 21, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:53,728 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:37:58,975 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 22:38:03,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.302e+02 2.844e+02 3.659e+02 6.626e+02, threshold=5.688e+02, percent-clipped=1.0 +2023-02-06 22:38:07,775 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146882.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:15,229 INFO [train.py:901] (1/4) Epoch 19, batch 1400, loss[loss=0.1871, simple_loss=0.2737, pruned_loss=0.05029, over 7810.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2918, pruned_loss=0.06476, over 1612316.36 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:38:25,961 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:43,245 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 22:38:52,627 INFO [train.py:901] (1/4) Epoch 19, batch 1450, loss[loss=0.2365, simple_loss=0.3205, pruned_loss=0.07621, over 8243.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2924, pruned_loss=0.06489, over 1616857.69 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:38:56,691 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 22:38:59,396 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:16,186 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.362e+02 2.962e+02 3.993e+02 1.525e+03, threshold=5.923e+02, percent-clipped=6.0 +2023-02-06 22:39:22,647 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6057, 1.3274, 4.8696, 1.7794, 4.2414, 4.0737, 4.3485, 4.2639], + device='cuda:1'), covar=tensor([0.0702, 0.5073, 0.0574, 0.4227, 0.1277, 0.1038, 0.0705, 0.0713], + device='cuda:1'), in_proj_covar=tensor([0.0602, 0.0635, 0.0677, 0.0609, 0.0692, 0.0594, 0.0590, 0.0651], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:39:23,969 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:27,292 INFO [train.py:901] (1/4) Epoch 19, batch 1500, loss[loss=0.1892, simple_loss=0.2632, pruned_loss=0.05755, over 7437.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2912, pruned_loss=0.0644, over 1613930.35 frames. ], batch size: 17, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:03,272 INFO [train.py:901] (1/4) Epoch 19, batch 1550, loss[loss=0.1913, simple_loss=0.2856, pruned_loss=0.04853, over 8104.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2913, pruned_loss=0.06395, over 1614169.51 frames. ], batch size: 23, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:22,634 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:40:28,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.434e+02 2.984e+02 3.600e+02 8.495e+02, threshold=5.968e+02, percent-clipped=1.0 +2023-02-06 22:40:39,458 INFO [train.py:901] (1/4) Epoch 19, batch 1600, loss[loss=0.1683, simple_loss=0.2568, pruned_loss=0.03995, over 7542.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2916, pruned_loss=0.06378, over 1617136.95 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:46,387 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:41:14,549 INFO [train.py:901] (1/4) Epoch 19, batch 1650, loss[loss=0.1876, simple_loss=0.2618, pruned_loss=0.05674, over 7444.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2911, pruned_loss=0.06412, over 1614394.24 frames. ], batch size: 17, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:41:40,971 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.354e+02 2.709e+02 3.474e+02 7.081e+02, threshold=5.418e+02, percent-clipped=1.0 +2023-02-06 22:41:51,243 INFO [train.py:901] (1/4) Epoch 19, batch 1700, loss[loss=0.2374, simple_loss=0.3119, pruned_loss=0.08147, over 8582.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2901, pruned_loss=0.06369, over 1616118.35 frames. ], batch size: 34, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:41:52,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0857, 1.8117, 2.3475, 1.9647, 2.3053, 2.1213, 1.8884, 1.1316], + device='cuda:1'), covar=tensor([0.5138, 0.4573, 0.1816, 0.3609, 0.2403, 0.2910, 0.1818, 0.4881], + device='cuda:1'), in_proj_covar=tensor([0.0936, 0.0953, 0.0788, 0.0917, 0.0984, 0.0869, 0.0730, 0.0810], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 22:42:00,574 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147206.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:25,938 INFO [train.py:901] (1/4) Epoch 19, batch 1750, loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06163, over 8318.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2903, pruned_loss=0.06337, over 1614595.14 frames. ], batch size: 25, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:42:37,957 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:41,406 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3600, 1.5899, 4.5623, 1.6819, 3.9810, 3.7949, 4.0827, 3.9685], + device='cuda:1'), covar=tensor([0.0651, 0.4741, 0.0518, 0.4351, 0.1167, 0.1032, 0.0658, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0601, 0.0636, 0.0677, 0.0612, 0.0692, 0.0596, 0.0591, 0.0653], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:42:46,385 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3957, 4.4194, 3.9720, 2.1516, 3.8962, 4.0056, 3.9524, 3.7968], + device='cuda:1'), covar=tensor([0.0705, 0.0509, 0.0955, 0.4273, 0.0810, 0.1019, 0.1348, 0.0785], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0423, 0.0428, 0.0526, 0.0415, 0.0427, 0.0410, 0.0375], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:42:51,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.529e+02 3.043e+02 3.569e+02 7.736e+02, threshold=6.085e+02, percent-clipped=5.0 +2023-02-06 22:43:03,013 INFO [train.py:901] (1/4) Epoch 19, batch 1800, loss[loss=0.1835, simple_loss=0.2582, pruned_loss=0.0544, over 7705.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2916, pruned_loss=0.06431, over 1614899.50 frames. ], batch size: 18, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:22,536 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:24,599 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,343 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,657 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 22:43:37,466 INFO [train.py:901] (1/4) Epoch 19, batch 1850, loss[loss=0.2229, simple_loss=0.3118, pruned_loss=0.06694, over 8564.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2917, pruned_loss=0.06445, over 1616607.39 frames. ], batch size: 34, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:41,699 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:48,548 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:02,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.300e+02 2.823e+02 3.606e+02 1.006e+03, threshold=5.645e+02, percent-clipped=2.0 +2023-02-06 22:44:02,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 22:44:06,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:12,523 INFO [train.py:901] (1/4) Epoch 19, batch 1900, loss[loss=0.1965, simple_loss=0.2851, pruned_loss=0.05393, over 8331.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2906, pruned_loss=0.06418, over 1612564.32 frames. ], batch size: 26, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:37,275 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:44,950 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 22:44:49,667 INFO [train.py:901] (1/4) Epoch 19, batch 1950, loss[loss=0.256, simple_loss=0.3314, pruned_loss=0.09027, over 8030.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2905, pruned_loss=0.06438, over 1607730.47 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:55,957 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:56,507 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 22:45:13,742 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.289e+02 2.862e+02 3.830e+02 8.439e+02, threshold=5.724e+02, percent-clipped=6.0 +2023-02-06 22:45:15,255 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 22:45:24,879 INFO [train.py:901] (1/4) Epoch 19, batch 2000, loss[loss=0.2109, simple_loss=0.2871, pruned_loss=0.0673, over 7822.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2917, pruned_loss=0.06499, over 1612180.38 frames. ], batch size: 20, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:01,778 INFO [train.py:901] (1/4) Epoch 19, batch 2050, loss[loss=0.1868, simple_loss=0.2848, pruned_loss=0.04437, over 8334.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2902, pruned_loss=0.06495, over 1604939.24 frames. ], batch size: 26, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:09,084 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.02 vs. limit=5.0 +2023-02-06 22:46:25,331 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:25,778 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.500e+02 2.918e+02 3.445e+02 6.516e+02, threshold=5.836e+02, percent-clipped=2.0 +2023-02-06 22:46:36,262 INFO [train.py:901] (1/4) Epoch 19, batch 2100, loss[loss=0.2038, simple_loss=0.2949, pruned_loss=0.05641, over 8301.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2902, pruned_loss=0.06487, over 1606888.50 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:42,944 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:43,436 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:47,318 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 22:47:12,101 INFO [train.py:901] (1/4) Epoch 19, batch 2150, loss[loss=0.2346, simple_loss=0.3225, pruned_loss=0.07341, over 8514.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2899, pruned_loss=0.06461, over 1612279.31 frames. ], batch size: 26, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:47:28,882 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0185, 3.5909, 2.0898, 2.5490, 2.6623, 2.0042, 2.6232, 2.8096], + device='cuda:1'), covar=tensor([0.1668, 0.0330, 0.1172, 0.0879, 0.0846, 0.1357, 0.1040, 0.1073], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0234, 0.0325, 0.0303, 0.0301, 0.0331, 0.0341, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:47:31,900 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 22:47:33,545 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:36,908 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:37,423 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.398e+02 3.174e+02 3.852e+02 9.466e+02, threshold=6.348e+02, percent-clipped=6.0 +2023-02-06 22:47:47,724 INFO [train.py:901] (1/4) Epoch 19, batch 2200, loss[loss=0.1917, simple_loss=0.2829, pruned_loss=0.05028, over 8202.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2906, pruned_loss=0.06481, over 1614805.61 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:04,720 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:21,943 INFO [train.py:901] (1/4) Epoch 19, batch 2250, loss[loss=0.1978, simple_loss=0.2869, pruned_loss=0.05431, over 8530.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2915, pruned_loss=0.06544, over 1614737.54 frames. ], batch size: 39, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:41,107 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:47,103 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.392e+02 3.089e+02 3.849e+02 9.613e+02, threshold=6.179e+02, percent-clipped=2.0 +2023-02-06 22:48:53,334 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:56,992 INFO [train.py:901] (1/4) Epoch 19, batch 2300, loss[loss=0.2157, simple_loss=0.2894, pruned_loss=0.07099, over 8137.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2911, pruned_loss=0.06499, over 1614251.06 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:58,973 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:24,342 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:30,996 INFO [train.py:901] (1/4) Epoch 19, batch 2350, loss[loss=0.2197, simple_loss=0.3061, pruned_loss=0.06662, over 8519.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2909, pruned_loss=0.06461, over 1614309.88 frames. ], batch size: 28, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:49:53,251 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:55,896 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.451e+02 2.984e+02 3.607e+02 1.132e+03, threshold=5.968e+02, percent-clipped=4.0 +2023-02-06 22:50:01,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:05,004 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0967, 2.2688, 1.9712, 2.8831, 1.4514, 1.7242, 2.1709, 2.3893], + device='cuda:1'), covar=tensor([0.0675, 0.0838, 0.0855, 0.0320, 0.1112, 0.1233, 0.0836, 0.0643], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0197, 0.0249, 0.0212, 0.0207, 0.0246, 0.0253, 0.0212], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:50:07,522 INFO [train.py:901] (1/4) Epoch 19, batch 2400, loss[loss=0.1992, simple_loss=0.2846, pruned_loss=0.05688, over 8239.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2911, pruned_loss=0.06405, over 1620235.72 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:50:17,624 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.63 vs. limit=5.0 +2023-02-06 22:50:20,048 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147911.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:32,259 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:41,460 INFO [train.py:901] (1/4) Epoch 19, batch 2450, loss[loss=0.2023, simple_loss=0.2854, pruned_loss=0.05958, over 7658.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2918, pruned_loss=0.0648, over 1616783.13 frames. ], batch size: 19, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:03,066 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:05,437 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.599e+02 2.990e+02 3.557e+02 6.406e+02, threshold=5.981e+02, percent-clipped=1.0 +2023-02-06 22:51:15,597 INFO [train.py:901] (1/4) Epoch 19, batch 2500, loss[loss=0.2271, simple_loss=0.3057, pruned_loss=0.07428, over 8191.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2914, pruned_loss=0.06526, over 1615662.21 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:20,624 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:37,735 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:41,235 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4644, 2.0120, 3.2213, 1.2723, 2.5431, 1.9345, 1.6404, 2.4261], + device='cuda:1'), covar=tensor([0.2100, 0.2487, 0.0862, 0.4721, 0.1857, 0.3362, 0.2293, 0.2403], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0577, 0.0548, 0.0626, 0.0634, 0.0583, 0.0518, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 22:51:52,322 INFO [train.py:901] (1/4) Epoch 19, batch 2550, loss[loss=0.2216, simple_loss=0.298, pruned_loss=0.07261, over 8284.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2921, pruned_loss=0.06576, over 1612994.31 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:52,574 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:09,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:15,626 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.379e+02 2.867e+02 3.516e+02 7.047e+02, threshold=5.734e+02, percent-clipped=3.0 +2023-02-06 22:52:17,126 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5261, 1.8393, 2.0660, 1.2738, 2.0598, 1.3447, 0.6310, 1.7748], + device='cuda:1'), covar=tensor([0.0667, 0.0369, 0.0241, 0.0610, 0.0431, 0.0895, 0.0826, 0.0296], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0380, 0.0329, 0.0435, 0.0363, 0.0526, 0.0383, 0.0404], + device='cuda:1'), out_proj_covar=tensor([1.1934e-04, 1.0026e-04, 8.7018e-05, 1.1568e-04, 9.6228e-05, 1.5023e-04, + 1.0354e-04, 1.0795e-04], device='cuda:1') +2023-02-06 22:52:24,513 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0911, 1.6326, 1.3846, 1.6109, 1.3335, 1.2174, 1.3230, 1.3967], + device='cuda:1'), covar=tensor([0.0991, 0.0461, 0.1249, 0.0494, 0.0812, 0.1486, 0.0865, 0.0712], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0236, 0.0326, 0.0305, 0.0302, 0.0333, 0.0343, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:52:26,280 INFO [train.py:901] (1/4) Epoch 19, batch 2600, loss[loss=0.2183, simple_loss=0.2994, pruned_loss=0.06864, over 8361.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2923, pruned_loss=0.06555, over 1616077.82 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:52:57,239 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:00,041 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:01,854 INFO [train.py:901] (1/4) Epoch 19, batch 2650, loss[loss=0.2035, simple_loss=0.2887, pruned_loss=0.05918, over 8466.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2933, pruned_loss=0.06589, over 1616222.44 frames. ], batch size: 25, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:16,796 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:18,192 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:24,873 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:25,454 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 2.384e+02 2.853e+02 3.529e+02 7.126e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 22:53:27,178 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-06 22:53:35,175 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:35,670 INFO [train.py:901] (1/4) Epoch 19, batch 2700, loss[loss=0.1607, simple_loss=0.2417, pruned_loss=0.03982, over 7544.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.06527, over 1608690.19 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:38,569 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5759, 2.7901, 1.9139, 2.3653, 2.3006, 1.7128, 2.2310, 2.2127], + device='cuda:1'), covar=tensor([0.1541, 0.0384, 0.1143, 0.0640, 0.0711, 0.1377, 0.0935, 0.0999], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0237, 0.0327, 0.0306, 0.0302, 0.0333, 0.0344, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 22:53:54,170 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:03,913 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8748, 1.9624, 1.7071, 2.3211, 1.1250, 1.5631, 1.7236, 1.9154], + device='cuda:1'), covar=tensor([0.0661, 0.0704, 0.0950, 0.0398, 0.1063, 0.1334, 0.0802, 0.0702], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0198, 0.0252, 0.0214, 0.0208, 0.0248, 0.0256, 0.0213], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 22:54:11,924 INFO [train.py:901] (1/4) Epoch 19, batch 2750, loss[loss=0.232, simple_loss=0.3078, pruned_loss=0.07813, over 8185.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2909, pruned_loss=0.06486, over 1609506.74 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:32,993 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:36,065 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.484e+02 2.895e+02 4.098e+02 9.310e+02, threshold=5.790e+02, percent-clipped=8.0 +2023-02-06 22:54:45,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:45,920 INFO [train.py:901] (1/4) Epoch 19, batch 2800, loss[loss=0.1951, simple_loss=0.282, pruned_loss=0.05408, over 8243.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2909, pruned_loss=0.06501, over 1614452.48 frames. ], batch size: 22, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:54,049 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148305.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:13,969 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:19,833 INFO [train.py:901] (1/4) Epoch 19, batch 2850, loss[loss=0.2912, simple_loss=0.336, pruned_loss=0.1232, over 7981.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2912, pruned_loss=0.06559, over 1612429.74 frames. ], batch size: 21, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:55:29,882 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-06 22:55:46,065 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.512e+02 2.931e+02 3.824e+02 7.566e+02, threshold=5.862e+02, percent-clipped=4.0 +2023-02-06 22:55:52,986 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:55,652 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:56,116 INFO [train.py:901] (1/4) Epoch 19, batch 2900, loss[loss=0.2649, simple_loss=0.3339, pruned_loss=0.09796, over 7008.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2897, pruned_loss=0.06438, over 1606786.98 frames. ], batch size: 71, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:10,226 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 22:56:12,658 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:29,358 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 22:56:29,949 INFO [train.py:901] (1/4) Epoch 19, batch 2950, loss[loss=0.2387, simple_loss=0.3178, pruned_loss=0.07978, over 8554.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2903, pruned_loss=0.06514, over 1605597.90 frames. ], batch size: 34, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:32,825 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148447.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:52,428 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0194, 1.5289, 1.6399, 1.3652, 1.0242, 1.4514, 1.7474, 1.5306], + device='cuda:1'), covar=tensor([0.0514, 0.1285, 0.1764, 0.1483, 0.0590, 0.1557, 0.0695, 0.0670], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 22:56:54,932 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.514e+02 3.009e+02 3.973e+02 7.443e+02, threshold=6.017e+02, percent-clipped=3.0 +2023-02-06 22:57:06,341 INFO [train.py:901] (1/4) Epoch 19, batch 3000, loss[loss=0.2486, simple_loss=0.3345, pruned_loss=0.08139, over 8325.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2925, pruned_loss=0.06588, over 1607311.53 frames. ], batch size: 25, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:57:06,342 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 22:57:22,672 INFO [train.py:935] (1/4) Epoch 19, validation: loss=0.1752, simple_loss=0.2756, pruned_loss=0.03738, over 944034.00 frames. +2023-02-06 22:57:22,673 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 22:57:36,057 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-06 22:57:38,590 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148516.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:57:56,934 INFO [train.py:901] (1/4) Epoch 19, batch 3050, loss[loss=0.2342, simple_loss=0.2961, pruned_loss=0.08619, over 8234.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2931, pruned_loss=0.06605, over 1611911.30 frames. ], batch size: 22, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:00,751 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:17,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:21,052 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.398e+02 2.811e+02 3.727e+02 6.995e+02, threshold=5.622e+02, percent-clipped=3.0 +2023-02-06 22:58:30,254 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:32,165 INFO [train.py:901] (1/4) Epoch 19, batch 3100, loss[loss=0.2279, simple_loss=0.3134, pruned_loss=0.07117, over 8446.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06502, over 1608637.74 frames. ], batch size: 27, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:49,347 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:09,452 INFO [train.py:901] (1/4) Epoch 19, batch 3150, loss[loss=0.2476, simple_loss=0.3241, pruned_loss=0.08559, over 8560.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2918, pruned_loss=0.0647, over 1603364.54 frames. ], batch size: 31, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:59:10,338 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:13,422 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,361 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,422 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:32,308 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.358e+02 3.073e+02 3.824e+02 9.523e+02, threshold=6.146e+02, percent-clipped=8.0 +2023-02-06 22:59:42,402 INFO [train.py:901] (1/4) Epoch 19, batch 3200, loss[loss=0.2212, simple_loss=0.3056, pruned_loss=0.0684, over 8556.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2928, pruned_loss=0.06538, over 1607965.82 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:12,967 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:19,662 INFO [train.py:901] (1/4) Epoch 19, batch 3250, loss[loss=0.228, simple_loss=0.3131, pruned_loss=0.07144, over 8589.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2914, pruned_loss=0.06487, over 1607930.29 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:34,077 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:43,253 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.443e+02 3.073e+02 4.112e+02 8.183e+02, threshold=6.146e+02, percent-clipped=4.0 +2023-02-06 23:00:52,317 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:53,595 INFO [train.py:901] (1/4) Epoch 19, batch 3300, loss[loss=0.2837, simple_loss=0.3552, pruned_loss=0.1061, over 6837.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2903, pruned_loss=0.06424, over 1604570.36 frames. ], batch size: 72, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:14,546 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 23:01:28,247 INFO [train.py:901] (1/4) Epoch 19, batch 3350, loss[loss=0.227, simple_loss=0.3097, pruned_loss=0.07218, over 8102.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.06429, over 1607729.81 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:33,141 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8684, 1.4190, 2.7977, 1.3116, 2.0741, 2.9737, 3.1272, 2.3946], + device='cuda:1'), covar=tensor([0.1063, 0.1802, 0.0494, 0.2255, 0.1023, 0.0403, 0.0661, 0.0881], + device='cuda:1'), in_proj_covar=tensor([0.0289, 0.0318, 0.0285, 0.0311, 0.0302, 0.0262, 0.0405, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-06 23:01:41,954 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:01:49,408 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7489, 2.0900, 2.2892, 1.5068, 2.2231, 1.6397, 0.7391, 2.0503], + device='cuda:1'), covar=tensor([0.0588, 0.0346, 0.0251, 0.0548, 0.0417, 0.0775, 0.0769, 0.0290], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0378, 0.0327, 0.0435, 0.0362, 0.0527, 0.0381, 0.0404], + device='cuda:1'), out_proj_covar=tensor([1.1907e-04, 9.9730e-05, 8.6526e-05, 1.1550e-04, 9.6168e-05, 1.5065e-04, + 1.0315e-04, 1.0799e-04], device='cuda:1') +2023-02-06 23:01:53,951 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.291e+02 2.864e+02 3.449e+02 6.722e+02, threshold=5.728e+02, percent-clipped=1.0 +2023-02-06 23:02:04,175 INFO [train.py:901] (1/4) Epoch 19, batch 3400, loss[loss=0.1915, simple_loss=0.2743, pruned_loss=0.05437, over 8115.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2904, pruned_loss=0.0644, over 1605617.98 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:02:13,154 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:02:35,643 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8310, 3.7579, 2.2646, 2.7069, 2.8418, 1.8818, 2.7080, 2.8031], + device='cuda:1'), covar=tensor([0.1939, 0.0343, 0.1123, 0.0833, 0.0774, 0.1478, 0.1147, 0.1202], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0239, 0.0330, 0.0306, 0.0301, 0.0334, 0.0345, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:02:38,068 INFO [train.py:901] (1/4) Epoch 19, batch 3450, loss[loss=0.2226, simple_loss=0.3057, pruned_loss=0.06972, over 8475.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.29, pruned_loss=0.0639, over 1604310.95 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:01,928 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:04,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.340e+02 2.956e+02 3.727e+02 1.104e+03, threshold=5.912e+02, percent-clipped=3.0 +2023-02-06 23:03:14,135 INFO [train.py:901] (1/4) Epoch 19, batch 3500, loss[loss=0.199, simple_loss=0.281, pruned_loss=0.05854, over 8084.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2912, pruned_loss=0.0642, over 1609739.19 frames. ], batch size: 21, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:14,733 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 23:03:28,308 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:33,360 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:35,952 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 23:03:38,850 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6642, 1.9107, 2.0888, 1.3486, 2.1754, 1.5102, 0.6499, 1.9009], + device='cuda:1'), covar=tensor([0.0605, 0.0362, 0.0282, 0.0566, 0.0384, 0.0787, 0.0807, 0.0297], + device='cuda:1'), in_proj_covar=tensor([0.0439, 0.0379, 0.0328, 0.0436, 0.0361, 0.0526, 0.0379, 0.0406], + device='cuda:1'), out_proj_covar=tensor([1.1895e-04, 9.9959e-05, 8.6799e-05, 1.1606e-04, 9.5725e-05, 1.5048e-04, + 1.0273e-04, 1.0838e-04], device='cuda:1') +2023-02-06 23:03:48,890 INFO [train.py:901] (1/4) Epoch 19, batch 3550, loss[loss=0.261, simple_loss=0.3297, pruned_loss=0.09613, over 8239.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2901, pruned_loss=0.06371, over 1607998.81 frames. ], batch size: 24, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:50,365 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:51,746 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5406, 1.9701, 3.2032, 1.3264, 2.3182, 2.0623, 1.6445, 2.3272], + device='cuda:1'), covar=tensor([0.1938, 0.2571, 0.0896, 0.4593, 0.1879, 0.3014, 0.2326, 0.2319], + device='cuda:1'), in_proj_covar=tensor([0.0514, 0.0580, 0.0550, 0.0628, 0.0635, 0.0586, 0.0520, 0.0626], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:04:13,072 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,638 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.461e+02 3.087e+02 3.824e+02 7.251e+02, threshold=6.175e+02, percent-clipped=6.0 +2023-02-06 23:04:25,654 INFO [train.py:901] (1/4) Epoch 19, batch 3600, loss[loss=0.1958, simple_loss=0.2784, pruned_loss=0.05657, over 8232.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2897, pruned_loss=0.06356, over 1602979.36 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:04:49,826 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:59,730 INFO [train.py:901] (1/4) Epoch 19, batch 3650, loss[loss=0.212, simple_loss=0.2822, pruned_loss=0.07085, over 7698.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2903, pruned_loss=0.06373, over 1604772.39 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:13,230 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:24,391 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.296e+02 2.731e+02 3.488e+02 6.725e+02, threshold=5.462e+02, percent-clipped=1.0 +2023-02-06 23:05:30,751 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:35,239 INFO [train.py:901] (1/4) Epoch 19, batch 3700, loss[loss=0.2132, simple_loss=0.2886, pruned_loss=0.06888, over 7541.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2909, pruned_loss=0.06437, over 1604323.92 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:35,425 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:38,923 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:06:02,794 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149231.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:10,472 INFO [train.py:901] (1/4) Epoch 19, batch 3750, loss[loss=0.2411, simple_loss=0.3159, pruned_loss=0.08316, over 8706.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2921, pruned_loss=0.06518, over 1609178.48 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:06:16,636 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3615, 1.4894, 4.5934, 1.8190, 4.0575, 3.8019, 4.1030, 3.9771], + device='cuda:1'), covar=tensor([0.0578, 0.4657, 0.0519, 0.3976, 0.1169, 0.0998, 0.0579, 0.0697], + device='cuda:1'), in_proj_covar=tensor([0.0598, 0.0633, 0.0672, 0.0606, 0.0684, 0.0593, 0.0584, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:06:19,380 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:34,560 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 2.530e+02 3.028e+02 3.831e+02 7.632e+02, threshold=6.056e+02, percent-clipped=6.0 +2023-02-06 23:06:44,214 INFO [train.py:901] (1/4) Epoch 19, batch 3800, loss[loss=0.2201, simple_loss=0.3044, pruned_loss=0.06788, over 8460.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2919, pruned_loss=0.06482, over 1613737.52 frames. ], batch size: 27, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:20,715 INFO [train.py:901] (1/4) Epoch 19, batch 3850, loss[loss=0.1789, simple_loss=0.2642, pruned_loss=0.04682, over 8108.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06506, over 1614760.63 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:25,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0315, 2.2705, 1.8834, 2.8787, 1.5675, 1.6042, 2.1391, 2.3031], + device='cuda:1'), covar=tensor([0.0771, 0.0774, 0.0931, 0.0372, 0.1027, 0.1316, 0.0869, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0248, 0.0212, 0.0205, 0.0245, 0.0251, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:07:42,390 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 23:07:45,095 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.409e+02 2.948e+02 3.728e+02 6.848e+02, threshold=5.896e+02, percent-clipped=3.0 +2023-02-06 23:07:48,726 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:07:54,628 INFO [train.py:901] (1/4) Epoch 19, batch 3900, loss[loss=0.1729, simple_loss=0.2525, pruned_loss=0.04671, over 7539.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.06421, over 1614893.90 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:08:06,569 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:31,934 INFO [train.py:901] (1/4) Epoch 19, batch 3950, loss[loss=0.1771, simple_loss=0.2529, pruned_loss=0.05069, over 7700.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2907, pruned_loss=0.06417, over 1614325.45 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:08:36,308 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:39,079 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9766, 2.1688, 1.8050, 2.7527, 1.4584, 1.6127, 1.9096, 2.2052], + device='cuda:1'), covar=tensor([0.0725, 0.0739, 0.1014, 0.0422, 0.1042, 0.1312, 0.0920, 0.0755], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0250, 0.0214, 0.0206, 0.0247, 0.0254, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:08:53,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:56,239 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.497e+02 2.881e+02 4.050e+02 6.266e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 23:09:05,731 INFO [train.py:901] (1/4) Epoch 19, batch 4000, loss[loss=0.2223, simple_loss=0.3001, pruned_loss=0.07221, over 8629.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.291, pruned_loss=0.06423, over 1618363.53 frames. ], batch size: 50, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:09:32,423 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:09:40,133 INFO [train.py:901] (1/4) Epoch 19, batch 4050, loss[loss=0.2109, simple_loss=0.3021, pruned_loss=0.0599, over 8525.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2908, pruned_loss=0.06439, over 1612853.82 frames. ], batch size: 28, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:09:41,068 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4724, 1.7362, 1.8964, 1.2151, 1.9181, 1.3806, 0.4239, 1.7100], + device='cuda:1'), covar=tensor([0.0496, 0.0370, 0.0311, 0.0495, 0.0413, 0.0841, 0.0754, 0.0252], + device='cuda:1'), in_proj_covar=tensor([0.0442, 0.0382, 0.0330, 0.0439, 0.0364, 0.0530, 0.0384, 0.0408], + device='cuda:1'), out_proj_covar=tensor([1.1971e-04, 1.0084e-04, 8.7247e-05, 1.1675e-04, 9.6446e-05, 1.5159e-04, + 1.0401e-04, 1.0912e-04], device='cuda:1') +2023-02-06 23:10:05,803 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.470e+02 3.003e+02 4.246e+02 8.728e+02, threshold=6.007e+02, percent-clipped=8.0 +2023-02-06 23:10:08,585 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:10:15,178 INFO [train.py:901] (1/4) Epoch 19, batch 4100, loss[loss=0.202, simple_loss=0.2954, pruned_loss=0.05434, over 8096.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2917, pruned_loss=0.06484, over 1613931.44 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:49,882 INFO [train.py:901] (1/4) Epoch 19, batch 4150, loss[loss=0.1989, simple_loss=0.2842, pruned_loss=0.05681, over 7966.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2922, pruned_loss=0.06528, over 1613112.44 frames. ], batch size: 21, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:51,538 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5100, 2.2203, 3.1888, 2.3912, 3.0095, 2.4102, 2.2938, 1.7344], + device='cuda:1'), covar=tensor([0.5005, 0.5115, 0.1941, 0.3767, 0.2477, 0.2970, 0.1734, 0.5609], + device='cuda:1'), in_proj_covar=tensor([0.0924, 0.0953, 0.0788, 0.0917, 0.0979, 0.0870, 0.0726, 0.0810], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:11:00,706 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-06 23:11:16,655 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.273e+02 2.791e+02 3.594e+02 5.057e+02, threshold=5.582e+02, percent-clipped=0.0 +2023-02-06 23:11:26,110 INFO [train.py:901] (1/4) Epoch 19, batch 4200, loss[loss=0.211, simple_loss=0.2849, pruned_loss=0.06853, over 7966.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2922, pruned_loss=0.06536, over 1611256.59 frames. ], batch size: 21, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:11:36,595 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 23:11:59,504 INFO [train.py:901] (1/4) Epoch 19, batch 4250, loss[loss=0.2045, simple_loss=0.2838, pruned_loss=0.06263, over 8081.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.06602, over 1614768.31 frames. ], batch size: 21, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:12:00,930 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 23:12:14,481 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149764.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:12:25,320 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 2.444e+02 3.025e+02 3.928e+02 1.033e+03, threshold=6.050e+02, percent-clipped=5.0 +2023-02-06 23:12:35,591 INFO [train.py:901] (1/4) Epoch 19, batch 4300, loss[loss=0.2252, simple_loss=0.2932, pruned_loss=0.07856, over 6014.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2917, pruned_loss=0.06498, over 1612529.88 frames. ], batch size: 13, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:08,872 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9916, 2.0333, 1.7797, 2.2413, 1.7054, 1.7741, 1.9158, 2.0998], + device='cuda:1'), covar=tensor([0.0622, 0.0663, 0.0788, 0.0602, 0.0813, 0.0969, 0.0657, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0199, 0.0248, 0.0213, 0.0206, 0.0247, 0.0253, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:13:10,096 INFO [train.py:901] (1/4) Epoch 19, batch 4350, loss[loss=0.1754, simple_loss=0.2485, pruned_loss=0.05122, over 7447.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2907, pruned_loss=0.06451, over 1612666.40 frames. ], batch size: 17, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:33,182 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 23:13:33,236 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:13:33,384 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.7567, 0.8504, 0.8084, 0.4588, 0.8279, 0.6685, 0.0789, 0.8322], + device='cuda:1'), covar=tensor([0.0281, 0.0264, 0.0220, 0.0401, 0.0254, 0.0614, 0.0566, 0.0220], + device='cuda:1'), in_proj_covar=tensor([0.0442, 0.0383, 0.0330, 0.0438, 0.0365, 0.0530, 0.0384, 0.0408], + device='cuda:1'), out_proj_covar=tensor([1.1955e-04, 1.0115e-04, 8.7273e-05, 1.1623e-04, 9.6675e-05, 1.5135e-04, + 1.0400e-04, 1.0905e-04], device='cuda:1') +2023-02-06 23:13:35,197 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.416e+02 2.972e+02 3.761e+02 1.184e+03, threshold=5.944e+02, percent-clipped=4.0 +2023-02-06 23:13:44,577 INFO [train.py:901] (1/4) Epoch 19, batch 4400, loss[loss=0.2301, simple_loss=0.3073, pruned_loss=0.0764, over 8533.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2905, pruned_loss=0.06449, over 1607226.14 frames. ], batch size: 49, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:09,979 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:14,583 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 23:14:20,892 INFO [train.py:901] (1/4) Epoch 19, batch 4450, loss[loss=0.2393, simple_loss=0.3121, pruned_loss=0.08327, over 8674.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2913, pruned_loss=0.0653, over 1605435.51 frames. ], batch size: 31, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:44,886 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.522e+02 2.925e+02 4.193e+02 1.036e+03, threshold=5.849e+02, percent-clipped=7.0 +2023-02-06 23:14:53,307 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:54,478 INFO [train.py:901] (1/4) Epoch 19, batch 4500, loss[loss=0.1847, simple_loss=0.2642, pruned_loss=0.05259, over 7816.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.06432, over 1609230.14 frames. ], batch size: 20, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:08,410 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 23:15:11,309 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9186, 1.3400, 3.0851, 1.3121, 2.0504, 3.3225, 3.5126, 2.7932], + device='cuda:1'), covar=tensor([0.1138, 0.1888, 0.0386, 0.2353, 0.1144, 0.0273, 0.0469, 0.0633], + device='cuda:1'), in_proj_covar=tensor([0.0290, 0.0317, 0.0287, 0.0313, 0.0302, 0.0265, 0.0407, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:15:31,633 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:15:32,129 INFO [train.py:901] (1/4) Epoch 19, batch 4550, loss[loss=0.2153, simple_loss=0.2822, pruned_loss=0.07419, over 7238.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.291, pruned_loss=0.06485, over 1612617.97 frames. ], batch size: 16, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:56,361 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.399e+02 2.811e+02 3.428e+02 5.502e+02, threshold=5.622e+02, percent-clipped=0.0 +2023-02-06 23:16:05,766 INFO [train.py:901] (1/4) Epoch 19, batch 4600, loss[loss=0.1902, simple_loss=0.2573, pruned_loss=0.06151, over 7542.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2903, pruned_loss=0.06431, over 1611309.22 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:16:08,470 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:16:10,591 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9999, 2.1286, 1.8665, 2.7656, 1.3299, 1.6822, 1.9378, 2.1437], + device='cuda:1'), covar=tensor([0.0777, 0.0825, 0.0963, 0.0378, 0.1078, 0.1258, 0.0896, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0248, 0.0213, 0.0206, 0.0248, 0.0254, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:16:12,113 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.26 vs. limit=5.0 +2023-02-06 23:16:15,961 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150108.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:16:40,343 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5868, 1.8441, 3.0739, 1.4231, 2.2470, 1.9888, 1.6181, 2.1645], + device='cuda:1'), covar=tensor([0.1864, 0.2675, 0.0748, 0.4564, 0.1843, 0.3036, 0.2293, 0.2257], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0587, 0.0557, 0.0634, 0.0643, 0.0592, 0.0528, 0.0635], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:16:41,502 INFO [train.py:901] (1/4) Epoch 19, batch 4650, loss[loss=0.2303, simple_loss=0.3139, pruned_loss=0.0733, over 8501.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2898, pruned_loss=0.06424, over 1610001.25 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:17:06,564 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.474e+02 2.856e+02 3.464e+02 8.049e+02, threshold=5.712e+02, percent-clipped=3.0 +2023-02-06 23:17:16,089 INFO [train.py:901] (1/4) Epoch 19, batch 4700, loss[loss=0.2388, simple_loss=0.3255, pruned_loss=0.07607, over 8333.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2893, pruned_loss=0.06412, over 1609757.85 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:21,222 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0983, 2.1690, 1.9566, 2.8647, 1.4110, 1.6968, 1.9530, 2.2711], + device='cuda:1'), covar=tensor([0.0717, 0.0780, 0.0854, 0.0352, 0.1072, 0.1268, 0.0949, 0.0740], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0197, 0.0246, 0.0212, 0.0205, 0.0247, 0.0253, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:17:36,661 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150223.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:17:49,661 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3814, 4.3315, 3.9000, 1.9365, 3.8313, 4.0971, 3.9601, 3.7262], + device='cuda:1'), covar=tensor([0.0847, 0.0597, 0.1277, 0.5231, 0.0978, 0.1057, 0.1363, 0.0932], + device='cuda:1'), in_proj_covar=tensor([0.0517, 0.0428, 0.0426, 0.0528, 0.0416, 0.0433, 0.0411, 0.0375], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:17:50,825 INFO [train.py:901] (1/4) Epoch 19, batch 4750, loss[loss=0.1853, simple_loss=0.2647, pruned_loss=0.05297, over 7791.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2892, pruned_loss=0.06389, over 1608571.86 frames. ], batch size: 19, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:53,798 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:12,320 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:13,465 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 23:18:15,513 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 23:18:16,855 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.315e+02 2.829e+02 3.523e+02 6.730e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-06 23:18:25,810 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:26,369 INFO [train.py:901] (1/4) Epoch 19, batch 4800, loss[loss=0.1847, simple_loss=0.28, pruned_loss=0.04471, over 8423.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2888, pruned_loss=0.06322, over 1610217.59 frames. ], batch size: 27, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:18:29,938 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:46,562 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:19:00,012 INFO [train.py:901] (1/4) Epoch 19, batch 4850, loss[loss=0.2142, simple_loss=0.2976, pruned_loss=0.06545, over 8562.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06283, over 1614056.42 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:05,355 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 23:19:27,018 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.476e+02 2.899e+02 3.621e+02 6.951e+02, threshold=5.799e+02, percent-clipped=6.0 +2023-02-06 23:19:36,191 INFO [train.py:901] (1/4) Epoch 19, batch 4900, loss[loss=0.2423, simple_loss=0.3208, pruned_loss=0.08184, over 8429.00 frames. ], tot_loss[loss=0.208, simple_loss=0.289, pruned_loss=0.06347, over 1615662.63 frames. ], batch size: 29, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:39,302 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.85 vs. limit=2.0 +2023-02-06 23:20:07,720 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:20:08,504 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0640, 2.3931, 1.8385, 2.8795, 1.3957, 1.6648, 2.0102, 2.3844], + device='cuda:1'), covar=tensor([0.0725, 0.0678, 0.0939, 0.0368, 0.1120, 0.1318, 0.0927, 0.0774], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0198, 0.0248, 0.0213, 0.0205, 0.0248, 0.0254, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-06 23:20:09,004 INFO [train.py:901] (1/4) Epoch 19, batch 4950, loss[loss=0.2071, simple_loss=0.2867, pruned_loss=0.06377, over 8479.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.0645, over 1616872.40 frames. ], batch size: 29, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:33,699 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.356e+02 2.775e+02 3.573e+02 1.033e+03, threshold=5.550e+02, percent-clipped=4.0 +2023-02-06 23:20:33,939 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150479.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:20:41,454 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5232, 1.7903, 4.4050, 2.0654, 2.4446, 4.9572, 5.0501, 4.2844], + device='cuda:1'), covar=tensor([0.1067, 0.1646, 0.0241, 0.1901, 0.1191, 0.0180, 0.0479, 0.0538], + device='cuda:1'), in_proj_covar=tensor([0.0292, 0.0319, 0.0289, 0.0313, 0.0304, 0.0267, 0.0408, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:20:43,985 INFO [train.py:901] (1/4) Epoch 19, batch 5000, loss[loss=0.2392, simple_loss=0.3275, pruned_loss=0.07543, over 8438.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2906, pruned_loss=0.06426, over 1619376.15 frames. ], batch size: 29, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:50,110 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6143, 2.5570, 1.9301, 2.2056, 2.0541, 1.6592, 2.0137, 2.1756], + device='cuda:1'), covar=tensor([0.1564, 0.0389, 0.1122, 0.0633, 0.0769, 0.1478, 0.1062, 0.1037], + device='cuda:1'), in_proj_covar=tensor([0.0349, 0.0237, 0.0326, 0.0305, 0.0301, 0.0331, 0.0341, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:20:52,071 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150504.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:21:17,801 INFO [train.py:901] (1/4) Epoch 19, batch 5050, loss[loss=0.219, simple_loss=0.2945, pruned_loss=0.07172, over 8609.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.291, pruned_loss=0.06491, over 1619262.64 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:21:25,912 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:26,618 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:40,937 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 23:21:41,604 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.501e+02 3.000e+02 3.972e+02 7.212e+02, threshold=5.999e+02, percent-clipped=3.0 +2023-02-06 23:21:51,786 INFO [train.py:901] (1/4) Epoch 19, batch 5100, loss[loss=0.2986, simple_loss=0.353, pruned_loss=0.1221, over 6835.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2914, pruned_loss=0.06531, over 1617027.67 frames. ], batch size: 72, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:23,318 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:22:27,868 INFO [train.py:901] (1/4) Epoch 19, batch 5150, loss[loss=0.1995, simple_loss=0.2892, pruned_loss=0.05491, over 7810.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2903, pruned_loss=0.06477, over 1612344.28 frames. ], batch size: 20, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:51,867 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.510e+02 3.215e+02 4.688e+02 9.098e+02, threshold=6.429e+02, percent-clipped=11.0 +2023-02-06 23:23:01,331 INFO [train.py:901] (1/4) Epoch 19, batch 5200, loss[loss=0.2243, simple_loss=0.3161, pruned_loss=0.06625, over 8593.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2895, pruned_loss=0.06394, over 1614911.86 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:28,756 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6159, 1.6402, 2.2610, 1.4697, 1.1751, 2.3055, 0.4399, 1.3859], + device='cuda:1'), covar=tensor([0.2295, 0.1463, 0.0391, 0.1721, 0.3004, 0.0387, 0.2254, 0.1619], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0220, 0.0268, 0.0132, 0.0169, 0.0187], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:23:38,104 INFO [train.py:901] (1/4) Epoch 19, batch 5250, loss[loss=0.2014, simple_loss=0.2898, pruned_loss=0.05653, over 8472.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2898, pruned_loss=0.06395, over 1613362.81 frames. ], batch size: 29, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:40,271 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7653, 1.8238, 2.2508, 1.6521, 1.3067, 2.2792, 0.5225, 1.4377], + device='cuda:1'), covar=tensor([0.1464, 0.1039, 0.0386, 0.1114, 0.2457, 0.0353, 0.1896, 0.1186], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0220, 0.0267, 0.0132, 0.0169, 0.0187], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:23:40,650 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 23:23:42,656 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:23:43,374 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:01,540 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.565e+02 3.080e+02 4.191e+02 1.354e+03, threshold=6.160e+02, percent-clipped=9.0 +2023-02-06 23:24:10,893 INFO [train.py:901] (1/4) Epoch 19, batch 5300, loss[loss=0.243, simple_loss=0.3257, pruned_loss=0.08013, over 8741.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2908, pruned_loss=0.0651, over 1611586.34 frames. ], batch size: 30, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:24:23,655 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:41,664 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:46,253 INFO [train.py:901] (1/4) Epoch 19, batch 5350, loss[loss=0.2287, simple_loss=0.2961, pruned_loss=0.0807, over 8076.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2907, pruned_loss=0.06506, over 1605443.64 frames. ], batch size: 21, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:24:58,168 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:25:10,967 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.499e+02 2.979e+02 3.723e+02 8.863e+02, threshold=5.959e+02, percent-clipped=1.0 +2023-02-06 23:25:20,520 INFO [train.py:901] (1/4) Epoch 19, batch 5400, loss[loss=0.216, simple_loss=0.3039, pruned_loss=0.06401, over 8328.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2925, pruned_loss=0.06582, over 1610196.43 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 16.0 +2023-02-06 23:25:24,750 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:27,896 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 23:25:37,927 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:55,457 INFO [train.py:901] (1/4) Epoch 19, batch 5450, loss[loss=0.2453, simple_loss=0.3231, pruned_loss=0.08371, over 8691.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2919, pruned_loss=0.06546, over 1611779.95 frames. ], batch size: 34, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:22,623 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.347e+02 2.658e+02 3.430e+02 7.604e+02, threshold=5.316e+02, percent-clipped=2.0 +2023-02-06 23:26:26,887 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 23:26:28,462 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 23:26:31,932 INFO [train.py:901] (1/4) Epoch 19, batch 5500, loss[loss=0.2103, simple_loss=0.2917, pruned_loss=0.06448, over 8143.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.291, pruned_loss=0.06504, over 1611778.56 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:41,796 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:46,662 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:48,801 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.41 vs. limit=5.0 +2023-02-06 23:26:58,985 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:06,223 INFO [train.py:901] (1/4) Epoch 19, batch 5550, loss[loss=0.2215, simple_loss=0.2879, pruned_loss=0.07761, over 7652.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2897, pruned_loss=0.06418, over 1611996.79 frames. ], batch size: 19, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:27,974 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7053, 1.6686, 2.3462, 1.5546, 1.2908, 2.3109, 0.4971, 1.3927], + device='cuda:1'), covar=tensor([0.1967, 0.1356, 0.0404, 0.1573, 0.2868, 0.0430, 0.2405, 0.1628], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0194, 0.0124, 0.0222, 0.0269, 0.0133, 0.0169, 0.0188], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:27:32,496 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.454e+02 3.027e+02 4.195e+02 6.901e+02, threshold=6.054e+02, percent-clipped=7.0 +2023-02-06 23:27:42,411 INFO [train.py:901] (1/4) Epoch 19, batch 5600, loss[loss=0.1887, simple_loss=0.2786, pruned_loss=0.0494, over 8093.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.06457, over 1608554.17 frames. ], batch size: 21, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:43,136 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:47,848 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7112, 1.7109, 1.9993, 1.6052, 1.2155, 1.7954, 2.3288, 2.1415], + device='cuda:1'), covar=tensor([0.0515, 0.1560, 0.2021, 0.1797, 0.0727, 0.1829, 0.0692, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 23:28:15,312 INFO [train.py:901] (1/4) Epoch 19, batch 5650, loss[loss=0.1986, simple_loss=0.28, pruned_loss=0.05858, over 7239.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2912, pruned_loss=0.06485, over 1607929.66 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:25,140 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4356, 2.1664, 2.8821, 2.3725, 2.8048, 2.4193, 2.1761, 1.7763], + device='cuda:1'), covar=tensor([0.4308, 0.4233, 0.1607, 0.2975, 0.1984, 0.2471, 0.1677, 0.4324], + device='cuda:1'), in_proj_covar=tensor([0.0934, 0.0960, 0.0789, 0.0924, 0.0984, 0.0874, 0.0740, 0.0815], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:28:31,571 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 23:28:39,668 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.685e+02 3.149e+02 3.866e+02 8.044e+02, threshold=6.298e+02, percent-clipped=3.0 +2023-02-06 23:28:50,395 INFO [train.py:901] (1/4) Epoch 19, batch 5700, loss[loss=0.1775, simple_loss=0.2584, pruned_loss=0.04827, over 8081.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2899, pruned_loss=0.06463, over 1605890.73 frames. ], batch size: 21, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:57,633 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:02,316 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:11,910 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 23:29:24,756 INFO [train.py:901] (1/4) Epoch 19, batch 5750, loss[loss=0.2133, simple_loss=0.3021, pruned_loss=0.06223, over 8565.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2888, pruned_loss=0.06433, over 1603144.27 frames. ], batch size: 39, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:29:36,097 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 23:29:37,494 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:41,748 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6902, 2.2933, 2.9557, 2.4827, 2.9245, 2.5225, 2.4202, 2.1691], + device='cuda:1'), covar=tensor([0.3296, 0.3779, 0.1432, 0.2683, 0.1751, 0.2359, 0.1418, 0.3594], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0956, 0.0786, 0.0919, 0.0979, 0.0870, 0.0735, 0.0811], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:29:43,023 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:48,610 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.389e+02 2.918e+02 3.727e+02 7.769e+02, threshold=5.836e+02, percent-clipped=3.0 +2023-02-06 23:29:58,863 INFO [train.py:901] (1/4) Epoch 19, batch 5800, loss[loss=0.24, simple_loss=0.3239, pruned_loss=0.0781, over 8480.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2896, pruned_loss=0.06473, over 1599155.64 frames. ], batch size: 25, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:00,355 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:04,359 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:05,086 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5050, 2.0278, 3.0763, 1.3367, 2.3341, 1.8561, 1.6618, 2.1456], + device='cuda:1'), covar=tensor([0.2139, 0.2417, 0.0879, 0.4725, 0.1953, 0.3422, 0.2441, 0.2509], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0580, 0.0552, 0.0629, 0.0637, 0.0587, 0.0521, 0.0626], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:30:16,600 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:34,589 INFO [train.py:901] (1/4) Epoch 19, batch 5850, loss[loss=0.2067, simple_loss=0.2948, pruned_loss=0.05932, over 8256.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2908, pruned_loss=0.06496, over 1604292.96 frames. ], batch size: 24, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:57,562 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:58,661 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.176e+02 2.714e+02 3.221e+02 1.387e+03, threshold=5.429e+02, percent-clipped=3.0 +2023-02-06 23:31:08,074 INFO [train.py:901] (1/4) Epoch 19, batch 5900, loss[loss=0.1927, simple_loss=0.2718, pruned_loss=0.05682, over 7799.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2908, pruned_loss=0.06475, over 1607426.44 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:15,414 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 23:31:44,452 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5936, 1.9819, 3.3188, 1.4009, 2.4268, 1.9704, 1.6982, 2.4004], + device='cuda:1'), covar=tensor([0.1824, 0.2386, 0.0807, 0.4361, 0.1756, 0.3092, 0.2145, 0.2288], + device='cuda:1'), in_proj_covar=tensor([0.0516, 0.0583, 0.0555, 0.0632, 0.0640, 0.0590, 0.0523, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:31:44,896 INFO [train.py:901] (1/4) Epoch 19, batch 5950, loss[loss=0.2601, simple_loss=0.3351, pruned_loss=0.09255, over 8355.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2907, pruned_loss=0.06449, over 1607090.72 frames. ], batch size: 24, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:59,862 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:09,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.424e+02 3.104e+02 3.851e+02 8.156e+02, threshold=6.208e+02, percent-clipped=3.0 +2023-02-06 23:32:16,803 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:18,556 INFO [train.py:901] (1/4) Epoch 19, batch 6000, loss[loss=0.1878, simple_loss=0.2605, pruned_loss=0.05754, over 7731.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2913, pruned_loss=0.06495, over 1612389.56 frames. ], batch size: 18, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:32:18,557 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 23:32:32,009 INFO [train.py:935] (1/4) Epoch 19, validation: loss=0.1763, simple_loss=0.2764, pruned_loss=0.03805, over 944034.00 frames. +2023-02-06 23:32:32,012 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 23:32:56,730 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6454, 1.9292, 2.1008, 1.2775, 2.1550, 1.5180, 0.7175, 1.8311], + device='cuda:1'), covar=tensor([0.0684, 0.0407, 0.0340, 0.0683, 0.0450, 0.0951, 0.0971, 0.0358], + device='cuda:1'), in_proj_covar=tensor([0.0444, 0.0380, 0.0332, 0.0439, 0.0368, 0.0527, 0.0386, 0.0406], + device='cuda:1'), out_proj_covar=tensor([1.2020e-04, 1.0041e-04, 8.7768e-05, 1.1665e-04, 9.7573e-05, 1.5037e-04, + 1.0445e-04, 1.0841e-04], device='cuda:1') +2023-02-06 23:32:59,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9173, 2.3122, 3.6255, 2.0601, 1.8054, 3.4662, 0.8088, 2.1126], + device='cuda:1'), covar=tensor([0.1271, 0.1341, 0.0207, 0.1718, 0.2787, 0.0321, 0.2297, 0.1455], + device='cuda:1'), in_proj_covar=tensor([0.0185, 0.0193, 0.0123, 0.0221, 0.0268, 0.0133, 0.0168, 0.0187], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:33:06,951 INFO [train.py:901] (1/4) Epoch 19, batch 6050, loss[loss=0.2041, simple_loss=0.2894, pruned_loss=0.05945, over 8146.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2926, pruned_loss=0.06597, over 1607989.76 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:09,101 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:33:32,578 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.535e+02 3.172e+02 3.888e+02 8.825e+02, threshold=6.343e+02, percent-clipped=4.0 +2023-02-06 23:33:42,769 INFO [train.py:901] (1/4) Epoch 19, batch 6100, loss[loss=0.1876, simple_loss=0.2767, pruned_loss=0.04928, over 8250.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2924, pruned_loss=0.06541, over 1612246.32 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:56,036 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2005, 1.4605, 3.4917, 1.4071, 2.2932, 3.8262, 3.8957, 3.3000], + device='cuda:1'), covar=tensor([0.0940, 0.1693, 0.0286, 0.2019, 0.1020, 0.0194, 0.0418, 0.0506], + device='cuda:1'), in_proj_covar=tensor([0.0290, 0.0319, 0.0286, 0.0311, 0.0303, 0.0262, 0.0405, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:34:07,689 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 23:34:10,860 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:17,587 INFO [train.py:901] (1/4) Epoch 19, batch 6150, loss[loss=0.1986, simple_loss=0.2875, pruned_loss=0.05484, over 8227.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2909, pruned_loss=0.06481, over 1611554.32 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:18,368 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:28,851 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,136 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151660.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,975 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:43,585 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.320e+02 2.846e+02 3.654e+02 5.745e+02, threshold=5.693e+02, percent-clipped=0.0 +2023-02-06 23:34:51,571 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 23:34:53,948 INFO [train.py:901] (1/4) Epoch 19, batch 6200, loss[loss=0.2271, simple_loss=0.3029, pruned_loss=0.07566, over 8327.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2905, pruned_loss=0.06464, over 1612498.80 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:02,701 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:28,542 INFO [train.py:901] (1/4) Epoch 19, batch 6250, loss[loss=0.2243, simple_loss=0.2996, pruned_loss=0.07452, over 8623.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2909, pruned_loss=0.06501, over 1611676.39 frames. ], batch size: 39, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:31,304 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8474, 1.6348, 5.8921, 2.2320, 5.3707, 5.0007, 5.4511, 5.3251], + device='cuda:1'), covar=tensor([0.0314, 0.4575, 0.0383, 0.3538, 0.0798, 0.0847, 0.0441, 0.0426], + device='cuda:1'), in_proj_covar=tensor([0.0593, 0.0627, 0.0669, 0.0602, 0.0678, 0.0583, 0.0584, 0.0643], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:35:39,356 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:50,915 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:53,502 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.555e+02 3.246e+02 4.070e+02 8.549e+02, threshold=6.492e+02, percent-clipped=6.0 +2023-02-06 23:36:03,713 INFO [train.py:901] (1/4) Epoch 19, batch 6300, loss[loss=0.2291, simple_loss=0.3104, pruned_loss=0.07391, over 8360.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2925, pruned_loss=0.06548, over 1615424.25 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:22,223 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:36:27,269 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 23:36:39,103 INFO [train.py:901] (1/4) Epoch 19, batch 6350, loss[loss=0.2141, simple_loss=0.2888, pruned_loss=0.06973, over 7972.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06589, over 1615387.30 frames. ], batch size: 21, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:03,145 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.376e+02 2.921e+02 3.593e+02 6.855e+02, threshold=5.841e+02, percent-clipped=1.0 +2023-02-06 23:37:10,746 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1219, 1.8846, 3.5231, 1.5836, 2.3065, 3.8841, 3.9801, 3.3612], + device='cuda:1'), covar=tensor([0.1032, 0.1450, 0.0315, 0.2035, 0.1128, 0.0230, 0.0623, 0.0553], + device='cuda:1'), in_proj_covar=tensor([0.0290, 0.0316, 0.0284, 0.0310, 0.0301, 0.0261, 0.0405, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:37:13,211 INFO [train.py:901] (1/4) Epoch 19, batch 6400, loss[loss=0.185, simple_loss=0.2768, pruned_loss=0.0466, over 8201.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2929, pruned_loss=0.06585, over 1615760.27 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:30,643 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,047 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,526 INFO [train.py:901] (1/4) Epoch 19, batch 6450, loss[loss=0.203, simple_loss=0.283, pruned_loss=0.0615, over 8082.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2924, pruned_loss=0.06575, over 1615385.87 frames. ], batch size: 21, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:13,510 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.409e+02 2.943e+02 3.710e+02 6.232e+02, threshold=5.887e+02, percent-clipped=1.0 +2023-02-06 23:38:23,089 INFO [train.py:901] (1/4) Epoch 19, batch 6500, loss[loss=0.1435, simple_loss=0.2287, pruned_loss=0.02917, over 5536.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2916, pruned_loss=0.06552, over 1608758.95 frames. ], batch size: 12, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:40,015 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:51,598 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:58,592 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152040.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:59,457 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 23:39:00,449 INFO [train.py:901] (1/4) Epoch 19, batch 6550, loss[loss=0.2065, simple_loss=0.2995, pruned_loss=0.05678, over 8254.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2918, pruned_loss=0.06538, over 1610089.38 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:04,937 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:09,227 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:21,623 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 23:39:24,907 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.379e+02 2.761e+02 3.695e+02 7.678e+02, threshold=5.522e+02, percent-clipped=3.0 +2023-02-06 23:39:34,313 INFO [train.py:901] (1/4) Epoch 19, batch 6600, loss[loss=0.1989, simple_loss=0.2718, pruned_loss=0.06301, over 7798.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2915, pruned_loss=0.06524, over 1606148.85 frames. ], batch size: 19, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:39,614 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:40:09,009 INFO [train.py:901] (1/4) Epoch 19, batch 6650, loss[loss=0.2228, simple_loss=0.3027, pruned_loss=0.07149, over 8038.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2931, pruned_loss=0.06611, over 1609876.97 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:23,465 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:24,953 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:34,183 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.686e+02 3.265e+02 3.895e+02 8.931e+02, threshold=6.531e+02, percent-clipped=7.0 +2023-02-06 23:40:44,526 INFO [train.py:901] (1/4) Epoch 19, batch 6700, loss[loss=0.2564, simple_loss=0.3326, pruned_loss=0.09006, over 8439.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2931, pruned_loss=0.06588, over 1614167.27 frames. ], batch size: 29, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:10,479 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9479, 1.5892, 3.4699, 1.5675, 2.4139, 3.8689, 3.8871, 3.3137], + device='cuda:1'), covar=tensor([0.1270, 0.1782, 0.0361, 0.2127, 0.1139, 0.0214, 0.0514, 0.0569], + device='cuda:1'), in_proj_covar=tensor([0.0290, 0.0318, 0.0284, 0.0310, 0.0301, 0.0260, 0.0405, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:41:16,176 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6144, 1.6968, 1.7344, 1.4252, 1.8417, 1.4283, 0.8791, 1.6813], + device='cuda:1'), covar=tensor([0.0471, 0.0354, 0.0224, 0.0424, 0.0338, 0.0644, 0.0681, 0.0220], + device='cuda:1'), in_proj_covar=tensor([0.0437, 0.0377, 0.0330, 0.0436, 0.0363, 0.0522, 0.0382, 0.0402], + device='cuda:1'), out_proj_covar=tensor([1.1824e-04, 9.9495e-05, 8.7267e-05, 1.1562e-04, 9.6266e-05, 1.4874e-04, + 1.0334e-04, 1.0728e-04], device='cuda:1') +2023-02-06 23:41:19,464 INFO [train.py:901] (1/4) Epoch 19, batch 6750, loss[loss=0.1734, simple_loss=0.2606, pruned_loss=0.04312, over 8124.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.06541, over 1613370.74 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:19,611 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:38,483 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8369, 1.7632, 1.9341, 1.7712, 1.1405, 1.7992, 2.2863, 2.0514], + device='cuda:1'), covar=tensor([0.0448, 0.1198, 0.1573, 0.1319, 0.0638, 0.1346, 0.0609, 0.0588], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0157, 0.0099, 0.0160, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 23:41:44,685 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:45,124 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.378e+02 2.909e+02 3.491e+02 6.752e+02, threshold=5.817e+02, percent-clipped=2.0 +2023-02-06 23:41:53,502 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:54,060 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 23:41:54,743 INFO [train.py:901] (1/4) Epoch 19, batch 6800, loss[loss=0.1959, simple_loss=0.2787, pruned_loss=0.05653, over 8359.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2924, pruned_loss=0.06527, over 1614960.57 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:55,685 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6087, 1.8963, 3.0304, 1.4270, 2.2792, 2.0363, 1.6463, 2.3873], + device='cuda:1'), covar=tensor([0.1901, 0.2499, 0.0781, 0.4419, 0.1742, 0.3248, 0.2317, 0.1913], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0581, 0.0549, 0.0627, 0.0638, 0.0586, 0.0521, 0.0627], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:42:29,090 INFO [train.py:901] (1/4) Epoch 19, batch 6850, loss[loss=0.2109, simple_loss=0.2873, pruned_loss=0.06727, over 8035.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2919, pruned_loss=0.06485, over 1614513.87 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:43,979 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 23:42:54,742 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.344e+02 3.012e+02 3.839e+02 8.073e+02, threshold=6.025e+02, percent-clipped=5.0 +2023-02-06 23:42:56,514 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:43:00,375 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7998, 5.9471, 5.1293, 2.1691, 5.2278, 5.4652, 5.4581, 5.3340], + device='cuda:1'), covar=tensor([0.0486, 0.0381, 0.0933, 0.4752, 0.0694, 0.0875, 0.0993, 0.0517], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0428, 0.0431, 0.0535, 0.0418, 0.0433, 0.0414, 0.0376], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:43:02,848 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 23:43:05,118 INFO [train.py:901] (1/4) Epoch 19, batch 6900, loss[loss=0.2049, simple_loss=0.2899, pruned_loss=0.05993, over 8369.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2916, pruned_loss=0.0643, over 1615840.94 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:18,941 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 23:43:25,518 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:40,396 INFO [train.py:901] (1/4) Epoch 19, batch 6950, loss[loss=0.2518, simple_loss=0.3189, pruned_loss=0.09237, over 6771.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2923, pruned_loss=0.06487, over 1613713.79 frames. ], batch size: 73, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:42,624 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:43,491 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 23:43:53,794 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 23:43:53,935 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:05,256 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.443e+02 3.132e+02 3.706e+02 6.613e+02, threshold=6.264e+02, percent-clipped=2.0 +2023-02-06 23:44:14,628 INFO [train.py:901] (1/4) Epoch 19, batch 7000, loss[loss=0.2286, simple_loss=0.3067, pruned_loss=0.07521, over 6775.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2928, pruned_loss=0.06537, over 1614823.36 frames. ], batch size: 71, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:44:24,958 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7372, 1.4424, 4.9042, 1.7026, 4.4305, 4.0245, 4.4459, 4.3519], + device='cuda:1'), covar=tensor([0.0444, 0.4284, 0.0394, 0.3817, 0.0970, 0.0929, 0.0438, 0.0543], + device='cuda:1'), in_proj_covar=tensor([0.0593, 0.0626, 0.0667, 0.0603, 0.0682, 0.0585, 0.0583, 0.0645], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:44:44,335 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:49,244 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1990, 2.0133, 2.7365, 2.2300, 2.6447, 2.2245, 2.0256, 1.4319], + device='cuda:1'), covar=tensor([0.5569, 0.5000, 0.1908, 0.3725, 0.2537, 0.3181, 0.2129, 0.5584], + device='cuda:1'), in_proj_covar=tensor([0.0930, 0.0960, 0.0786, 0.0922, 0.0981, 0.0874, 0.0738, 0.0816], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-06 23:44:51,104 INFO [train.py:901] (1/4) Epoch 19, batch 7050, loss[loss=0.2121, simple_loss=0.3023, pruned_loss=0.06101, over 8329.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2919, pruned_loss=0.06416, over 1618802.78 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:44:59,579 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0768, 3.9420, 2.4967, 2.9506, 3.0669, 2.2403, 2.9912, 3.1694], + device='cuda:1'), covar=tensor([0.1629, 0.0298, 0.1163, 0.0661, 0.0694, 0.1382, 0.0991, 0.1066], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0235, 0.0330, 0.0305, 0.0303, 0.0334, 0.0344, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:45:01,133 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 23:45:02,279 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:15,726 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.420e+02 2.800e+02 3.429e+02 5.549e+02, threshold=5.599e+02, percent-clipped=0.0 +2023-02-06 23:45:21,280 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:25,399 INFO [train.py:901] (1/4) Epoch 19, batch 7100, loss[loss=0.1871, simple_loss=0.2559, pruned_loss=0.05914, over 7241.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.291, pruned_loss=0.06392, over 1617017.99 frames. ], batch size: 16, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:30,681 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:35,439 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:56,369 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:01,875 INFO [train.py:901] (1/4) Epoch 19, batch 7150, loss[loss=0.1597, simple_loss=0.2487, pruned_loss=0.03535, over 7815.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2926, pruned_loss=0.0648, over 1616059.42 frames. ], batch size: 20, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:26,711 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4197, 1.3735, 1.7489, 1.1939, 1.0990, 1.7159, 0.2070, 1.1203], + device='cuda:1'), covar=tensor([0.1966, 0.1549, 0.0402, 0.1067, 0.2747, 0.0509, 0.2278, 0.1406], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0194, 0.0123, 0.0220, 0.0266, 0.0132, 0.0168, 0.0186], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:46:27,177 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.485e+02 2.441e+02 2.885e+02 3.630e+02 1.043e+03, threshold=5.770e+02, percent-clipped=5.0 +2023-02-06 23:46:36,617 INFO [train.py:901] (1/4) Epoch 19, batch 7200, loss[loss=0.197, simple_loss=0.2854, pruned_loss=0.05429, over 8444.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2931, pruned_loss=0.06551, over 1618642.66 frames. ], batch size: 29, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:42,735 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:12,598 INFO [train.py:901] (1/4) Epoch 19, batch 7250, loss[loss=0.1661, simple_loss=0.2525, pruned_loss=0.03989, over 7435.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2927, pruned_loss=0.06536, over 1619213.67 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:13,458 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:15,605 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6313, 1.5306, 2.1162, 1.3537, 1.1770, 2.0522, 0.3230, 1.2774], + device='cuda:1'), covar=tensor([0.1694, 0.1326, 0.0322, 0.1219, 0.2758, 0.0416, 0.2015, 0.1207], + device='cuda:1'), in_proj_covar=tensor([0.0185, 0.0193, 0.0123, 0.0221, 0.0267, 0.0132, 0.0168, 0.0185], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:47:17,696 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:21,920 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1185, 1.4324, 1.7104, 1.3461, 0.9266, 1.4437, 1.7668, 1.4720], + device='cuda:1'), covar=tensor([0.0538, 0.1314, 0.1667, 0.1471, 0.0640, 0.1538, 0.0707, 0.0689], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0099, 0.0161, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 23:47:37,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.392e+02 2.877e+02 3.488e+02 7.359e+02, threshold=5.753e+02, percent-clipped=2.0 +2023-02-06 23:47:47,609 INFO [train.py:901] (1/4) Epoch 19, batch 7300, loss[loss=0.2241, simple_loss=0.3057, pruned_loss=0.07123, over 8568.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2919, pruned_loss=0.0651, over 1618018.35 frames. ], batch size: 31, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:57,327 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:48:21,883 INFO [train.py:901] (1/4) Epoch 19, batch 7350, loss[loss=0.2417, simple_loss=0.3046, pruned_loss=0.08943, over 7528.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2925, pruned_loss=0.06584, over 1613953.22 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:48:33,057 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5148, 1.4719, 1.8415, 1.2800, 1.1419, 1.8277, 0.2265, 1.1617], + device='cuda:1'), covar=tensor([0.1686, 0.1311, 0.0379, 0.1030, 0.2818, 0.0465, 0.2262, 0.1287], + device='cuda:1'), in_proj_covar=tensor([0.0184, 0.0192, 0.0121, 0.0219, 0.0265, 0.0131, 0.0166, 0.0184], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:48:46,740 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 23:48:48,163 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.571e+02 3.070e+02 4.184e+02 8.940e+02, threshold=6.140e+02, percent-clipped=8.0 +2023-02-06 23:48:58,052 INFO [train.py:901] (1/4) Epoch 19, batch 7400, loss[loss=0.166, simple_loss=0.2419, pruned_loss=0.04511, over 7534.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2924, pruned_loss=0.06522, over 1616210.82 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:07,697 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 23:49:18,790 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:32,922 INFO [train.py:901] (1/4) Epoch 19, batch 7450, loss[loss=0.1886, simple_loss=0.2735, pruned_loss=0.05183, over 8249.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2912, pruned_loss=0.06447, over 1619560.74 frames. ], batch size: 24, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:33,699 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:38,521 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:39,907 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7057, 4.7337, 4.1922, 1.9157, 4.1327, 4.3712, 4.2111, 4.0756], + device='cuda:1'), covar=tensor([0.0681, 0.0470, 0.1042, 0.5064, 0.0827, 0.0709, 0.1115, 0.0599], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0429, 0.0429, 0.0535, 0.0419, 0.0430, 0.0413, 0.0374], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:49:44,018 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:46,567 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 23:49:58,932 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.506e+02 3.079e+02 4.075e+02 8.166e+02, threshold=6.159e+02, percent-clipped=5.0 +2023-02-06 23:50:01,986 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:08,330 INFO [train.py:901] (1/4) Epoch 19, batch 7500, loss[loss=0.2562, simple_loss=0.3313, pruned_loss=0.09056, over 8452.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2908, pruned_loss=0.06425, over 1619548.84 frames. ], batch size: 25, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:17,493 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:34,841 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:42,934 INFO [train.py:901] (1/4) Epoch 19, batch 7550, loss[loss=0.2403, simple_loss=0.3078, pruned_loss=0.08646, over 6803.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2906, pruned_loss=0.06443, over 1617733.82 frames. ], batch size: 71, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:53,915 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:58,703 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:06,106 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1378, 1.8610, 2.1053, 1.9965, 1.2622, 1.7953, 2.6783, 2.4781], + device='cuda:1'), covar=tensor([0.0413, 0.1158, 0.1546, 0.1237, 0.0532, 0.1376, 0.0491, 0.0545], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 23:51:08,489 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.431e+02 2.980e+02 3.688e+02 7.634e+02, threshold=5.960e+02, percent-clipped=2.0 +2023-02-06 23:51:10,130 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2463, 1.1979, 1.5384, 1.1477, 0.6901, 1.3084, 1.2111, 1.1720], + device='cuda:1'), covar=tensor([0.0576, 0.1339, 0.1735, 0.1508, 0.0604, 0.1540, 0.0724, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-06 23:51:14,082 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:18,073 INFO [train.py:901] (1/4) Epoch 19, batch 7600, loss[loss=0.2123, simple_loss=0.2819, pruned_loss=0.07131, over 7436.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2913, pruned_loss=0.06484, over 1613202.31 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:51:47,782 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:53,104 INFO [train.py:901] (1/4) Epoch 19, batch 7650, loss[loss=0.2486, simple_loss=0.3118, pruned_loss=0.09265, over 7721.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2911, pruned_loss=0.06474, over 1613335.44 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:17,637 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:18,733 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.290e+02 2.780e+02 3.362e+02 7.829e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-06 23:52:28,394 INFO [train.py:901] (1/4) Epoch 19, batch 7700, loss[loss=0.1905, simple_loss=0.2804, pruned_loss=0.05027, over 8255.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2921, pruned_loss=0.06541, over 1611319.38 frames. ], batch size: 24, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:35,485 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:35,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:52,969 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4691, 1.8598, 4.5298, 1.8526, 2.5023, 5.1360, 5.2256, 4.4963], + device='cuda:1'), covar=tensor([0.1062, 0.1632, 0.0244, 0.2053, 0.1161, 0.0168, 0.0332, 0.0498], + device='cuda:1'), in_proj_covar=tensor([0.0292, 0.0320, 0.0286, 0.0314, 0.0303, 0.0264, 0.0407, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:52:57,461 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 23:53:03,343 INFO [train.py:901] (1/4) Epoch 19, batch 7750, loss[loss=0.2039, simple_loss=0.291, pruned_loss=0.05839, over 8493.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2918, pruned_loss=0.06534, over 1606160.11 frames. ], batch size: 28, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:28,914 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.456e+02 3.001e+02 3.725e+02 8.940e+02, threshold=6.003e+02, percent-clipped=11.0 +2023-02-06 23:53:37,743 INFO [train.py:901] (1/4) Epoch 19, batch 7800, loss[loss=0.2183, simple_loss=0.2957, pruned_loss=0.07047, over 8550.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2916, pruned_loss=0.06579, over 1606249.84 frames. ], batch size: 39, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:39,902 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:53,363 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:58,009 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:08,875 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2318, 4.2173, 3.8211, 2.0344, 3.7922, 3.7747, 3.7877, 3.6172], + device='cuda:1'), covar=tensor([0.0748, 0.0532, 0.1067, 0.4233, 0.0928, 0.0938, 0.1303, 0.0723], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0431, 0.0432, 0.0540, 0.0424, 0.0436, 0.0418, 0.0377], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:54:09,684 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:11,465 INFO [train.py:901] (1/4) Epoch 19, batch 7850, loss[loss=0.1856, simple_loss=0.2742, pruned_loss=0.04852, over 8195.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2918, pruned_loss=0.06545, over 1607235.88 frames. ], batch size: 23, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:54:14,369 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:30,516 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 23:54:36,623 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.457e+02 2.874e+02 3.581e+02 1.670e+03, threshold=5.749e+02, percent-clipped=9.0 +2023-02-06 23:54:44,305 INFO [train.py:901] (1/4) Epoch 19, batch 7900, loss[loss=0.2117, simple_loss=0.3015, pruned_loss=0.061, over 8197.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2921, pruned_loss=0.06526, over 1610560.82 frames. ], batch size: 23, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:08,825 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 23:55:15,513 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:17,980 INFO [train.py:901] (1/4) Epoch 19, batch 7950, loss[loss=0.2237, simple_loss=0.305, pruned_loss=0.07115, over 8285.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2926, pruned_loss=0.06532, over 1615823.34 frames. ], batch size: 48, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:28,831 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:41,874 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:43,178 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.434e+02 3.034e+02 3.983e+02 8.510e+02, threshold=6.068e+02, percent-clipped=6.0 +2023-02-06 23:55:45,323 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:47,949 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9760, 1.6232, 1.4764, 1.6122, 1.3326, 1.3004, 1.2952, 1.3599], + device='cuda:1'), covar=tensor([0.1144, 0.0455, 0.1147, 0.0538, 0.0735, 0.1375, 0.0856, 0.0739], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0236, 0.0330, 0.0305, 0.0298, 0.0332, 0.0343, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-06 23:55:51,002 INFO [train.py:901] (1/4) Epoch 19, batch 8000, loss[loss=0.2264, simple_loss=0.3182, pruned_loss=0.06728, over 8331.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2931, pruned_loss=0.06558, over 1612006.35 frames. ], batch size: 26, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:10,304 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4847, 1.5062, 1.8661, 1.3183, 1.2205, 1.8428, 0.2761, 1.2324], + device='cuda:1'), covar=tensor([0.1703, 0.1281, 0.0358, 0.0949, 0.2832, 0.0408, 0.2093, 0.1220], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0222, 0.0270, 0.0133, 0.0169, 0.0186], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-06 23:56:25,155 INFO [train.py:901] (1/4) Epoch 19, batch 8050, loss[loss=0.188, simple_loss=0.25, pruned_loss=0.06303, over 7536.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2928, pruned_loss=0.06601, over 1603259.36 frames. ], batch size: 18, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:59,001 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 23:57:04,933 INFO [train.py:901] (1/4) Epoch 20, batch 0, loss[loss=0.2719, simple_loss=0.3423, pruned_loss=0.1007, over 8388.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3423, pruned_loss=0.1007, over 8388.00 frames. ], batch size: 48, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:57:04,933 INFO [train.py:926] (1/4) Computing validation loss +2023-02-06 23:57:16,943 INFO [train.py:935] (1/4) Epoch 20, validation: loss=0.1757, simple_loss=0.276, pruned_loss=0.03766, over 944034.00 frames. +2023-02-06 23:57:16,946 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-06 23:57:20,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.577e+02 3.496e+02 4.495e+02 1.164e+03, threshold=6.992e+02, percent-clipped=12.0 +2023-02-06 23:57:29,441 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:57:31,313 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 23:57:51,315 INFO [train.py:901] (1/4) Epoch 20, batch 50, loss[loss=0.208, simple_loss=0.2824, pruned_loss=0.06682, over 7796.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2967, pruned_loss=0.06733, over 365647.95 frames. ], batch size: 19, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:01,101 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:58:06,575 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 23:58:27,836 INFO [train.py:901] (1/4) Epoch 20, batch 100, loss[loss=0.2232, simple_loss=0.2994, pruned_loss=0.07355, over 7922.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2953, pruned_loss=0.06562, over 647534.65 frames. ], batch size: 20, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:29,238 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 23:58:31,349 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.446e+02 2.844e+02 3.351e+02 7.473e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-06 23:58:58,569 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5990, 1.7129, 5.7461, 2.2379, 5.1285, 4.8869, 5.2775, 5.1902], + device='cuda:1'), covar=tensor([0.0524, 0.4918, 0.0423, 0.3752, 0.1014, 0.0866, 0.0514, 0.0532], + device='cuda:1'), in_proj_covar=tensor([0.0607, 0.0639, 0.0679, 0.0613, 0.0691, 0.0595, 0.0593, 0.0655], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:59:03,132 INFO [train.py:901] (1/4) Epoch 20, batch 150, loss[loss=0.2531, simple_loss=0.3257, pruned_loss=0.09021, over 8589.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2931, pruned_loss=0.06452, over 862779.74 frames. ], batch size: 31, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:08,801 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3795, 1.5681, 4.5548, 1.7888, 4.0439, 3.8052, 4.1269, 4.0124], + device='cuda:1'), covar=tensor([0.0523, 0.4580, 0.0508, 0.3773, 0.1039, 0.0941, 0.0524, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0607, 0.0639, 0.0679, 0.0613, 0.0691, 0.0595, 0.0593, 0.0656], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-06 23:59:23,338 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:59:39,296 INFO [train.py:901] (1/4) Epoch 20, batch 200, loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05992, over 8513.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.291, pruned_loss=0.06387, over 1030295.22 frames. ], batch size: 26, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:42,519 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.177e+02 2.784e+02 3.416e+02 8.818e+02, threshold=5.569e+02, percent-clipped=1.0 +2023-02-06 23:59:43,903 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:59:51,028 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-06 23:59:58,792 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 00:00:15,029 INFO [train.py:901] (1/4) Epoch 20, batch 250, loss[loss=0.231, simple_loss=0.3097, pruned_loss=0.0761, over 8404.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2914, pruned_loss=0.06414, over 1160146.55 frames. ], batch size: 49, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:26,546 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 00:00:31,635 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:34,736 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 00:00:48,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:48,774 INFO [train.py:901] (1/4) Epoch 20, batch 300, loss[loss=0.1827, simple_loss=0.2655, pruned_loss=0.04999, over 8094.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2921, pruned_loss=0.06479, over 1263757.76 frames. ], batch size: 21, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:51,993 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.425e+02 2.846e+02 3.739e+02 1.062e+03, threshold=5.691e+02, percent-clipped=2.0 +2023-02-07 00:01:05,166 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:24,551 INFO [train.py:901] (1/4) Epoch 20, batch 350, loss[loss=0.2387, simple_loss=0.3013, pruned_loss=0.08803, over 7977.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2929, pruned_loss=0.0644, over 1345264.79 frames. ], batch size: 21, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:01:35,759 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153941.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:54,117 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5685, 2.3216, 1.6686, 2.2682, 2.1070, 1.3365, 2.0706, 2.2168], + device='cuda:1'), covar=tensor([0.1378, 0.0448, 0.1329, 0.0609, 0.0804, 0.1701, 0.0989, 0.0824], + device='cuda:1'), in_proj_covar=tensor([0.0348, 0.0233, 0.0329, 0.0303, 0.0297, 0.0331, 0.0342, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:01:54,352 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 00:01:59,294 INFO [train.py:901] (1/4) Epoch 20, batch 400, loss[loss=0.2182, simple_loss=0.302, pruned_loss=0.06726, over 8493.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2932, pruned_loss=0.06515, over 1403070.13 frames. ], batch size: 29, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:02,809 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 2.483e+02 2.937e+02 3.652e+02 9.410e+02, threshold=5.874e+02, percent-clipped=4.0 +2023-02-07 00:02:25,628 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:02:36,314 INFO [train.py:901] (1/4) Epoch 20, batch 450, loss[loss=0.2372, simple_loss=0.3159, pruned_loss=0.07926, over 8365.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2927, pruned_loss=0.0652, over 1450270.69 frames. ], batch size: 49, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:44,062 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154036.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:05,764 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154067.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:03:11,796 INFO [train.py:901] (1/4) Epoch 20, batch 500, loss[loss=0.2286, simple_loss=0.3282, pruned_loss=0.06457, over 8493.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2912, pruned_loss=0.065, over 1478922.61 frames. ], batch size: 26, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:13,450 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5022, 2.4203, 3.3192, 2.5618, 3.0961, 2.5631, 2.3786, 1.8285], + device='cuda:1'), covar=tensor([0.5214, 0.4948, 0.1766, 0.4010, 0.2632, 0.2814, 0.1754, 0.5436], + device='cuda:1'), in_proj_covar=tensor([0.0931, 0.0961, 0.0789, 0.0926, 0.0980, 0.0871, 0.0736, 0.0815], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 00:03:15,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 2.274e+02 2.685e+02 3.204e+02 7.760e+02, threshold=5.371e+02, percent-clipped=3.0 +2023-02-07 00:03:24,407 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:29,960 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:46,380 INFO [train.py:901] (1/4) Epoch 20, batch 550, loss[loss=0.199, simple_loss=0.2702, pruned_loss=0.06389, over 8089.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2912, pruned_loss=0.06502, over 1512258.83 frames. ], batch size: 21, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:07,824 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:23,281 INFO [train.py:901] (1/4) Epoch 20, batch 600, loss[loss=0.1988, simple_loss=0.2744, pruned_loss=0.06157, over 7648.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2918, pruned_loss=0.06571, over 1531439.49 frames. ], batch size: 19, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:25,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:26,654 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.477e+02 2.962e+02 3.836e+02 8.919e+02, threshold=5.925e+02, percent-clipped=6.0 +2023-02-07 00:04:45,284 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 00:04:57,562 INFO [train.py:901] (1/4) Epoch 20, batch 650, loss[loss=0.2021, simple_loss=0.2895, pruned_loss=0.05735, over 8450.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.06532, over 1554150.20 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:06,573 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:34,077 INFO [train.py:901] (1/4) Epoch 20, batch 700, loss[loss=0.2374, simple_loss=0.3196, pruned_loss=0.07759, over 8458.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2913, pruned_loss=0.06475, over 1570884.55 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:37,460 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.357e+02 2.958e+02 3.586e+02 6.466e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-07 00:05:40,247 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154285.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:54,062 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:57,554 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:06:08,850 INFO [train.py:901] (1/4) Epoch 20, batch 750, loss[loss=0.1923, simple_loss=0.2788, pruned_loss=0.05287, over 7800.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2908, pruned_loss=0.06433, over 1582096.55 frames. ], batch size: 20, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:28,503 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154355.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:06:31,837 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1893, 2.1247, 1.6089, 1.8818, 1.7954, 1.2864, 1.6613, 1.6305], + device='cuda:1'), covar=tensor([0.1324, 0.0419, 0.1134, 0.0524, 0.0726, 0.1536, 0.0868, 0.0874], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0236, 0.0330, 0.0305, 0.0300, 0.0334, 0.0344, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:06:33,651 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 00:06:43,003 INFO [train.py:901] (1/4) Epoch 20, batch 800, loss[loss=0.1927, simple_loss=0.2845, pruned_loss=0.05045, over 8473.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.06424, over 1592869.52 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:43,011 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 00:06:47,164 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.337e+02 2.441e+02 3.052e+02 3.711e+02 8.675e+02, threshold=6.104e+02, percent-clipped=3.0 +2023-02-07 00:07:01,167 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:07,113 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7216, 2.0193, 2.2243, 1.5691, 2.3787, 1.5163, 0.7646, 1.9103], + device='cuda:1'), covar=tensor([0.0540, 0.0290, 0.0196, 0.0429, 0.0286, 0.0670, 0.0727, 0.0259], + device='cuda:1'), in_proj_covar=tensor([0.0437, 0.0377, 0.0332, 0.0436, 0.0362, 0.0523, 0.0381, 0.0404], + device='cuda:1'), out_proj_covar=tensor([1.1821e-04, 9.9056e-05, 8.7847e-05, 1.1573e-04, 9.5808e-05, 1.4897e-04, + 1.0306e-04, 1.0783e-04], device='cuda:1') +2023-02-07 00:07:08,303 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154411.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:07:19,172 INFO [train.py:901] (1/4) Epoch 20, batch 850, loss[loss=0.2054, simple_loss=0.2884, pruned_loss=0.06122, over 8109.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2907, pruned_loss=0.06374, over 1601320.13 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:27,214 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:32,659 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:52,875 INFO [train.py:901] (1/4) Epoch 20, batch 900, loss[loss=0.1931, simple_loss=0.2721, pruned_loss=0.05703, over 7507.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.0636, over 1601853.45 frames. ], batch size: 18, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:56,214 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.439e+02 2.923e+02 3.686e+02 1.072e+03, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 00:08:00,439 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6621, 1.4579, 1.7241, 1.3547, 0.9076, 1.4469, 1.5432, 1.5413], + device='cuda:1'), covar=tensor([0.0568, 0.1247, 0.1584, 0.1479, 0.0597, 0.1454, 0.0667, 0.0611], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:08:04,405 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:29,161 INFO [train.py:901] (1/4) Epoch 20, batch 950, loss[loss=0.3014, simple_loss=0.3502, pruned_loss=0.1263, over 6712.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.291, pruned_loss=0.06429, over 1601911.68 frames. ], batch size: 71, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:08:29,375 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154526.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:08:30,901 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.26 vs. limit=5.0 +2023-02-07 00:08:48,724 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154553.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:54,157 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:01,513 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 00:09:04,232 INFO [train.py:901] (1/4) Epoch 20, batch 1000, loss[loss=0.2232, simple_loss=0.3058, pruned_loss=0.07032, over 8439.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2907, pruned_loss=0.06408, over 1600725.74 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:07,494 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.511e+02 3.044e+02 3.807e+02 8.767e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 00:09:08,972 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:27,672 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 00:09:35,151 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 00:09:38,954 INFO [train.py:901] (1/4) Epoch 20, batch 1050, loss[loss=0.1608, simple_loss=0.2485, pruned_loss=0.03653, over 7795.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2897, pruned_loss=0.06385, over 1599691.16 frames. ], batch size: 20, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:41,228 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5773, 1.8213, 1.9690, 1.3222, 2.1308, 1.4209, 0.5623, 1.7914], + device='cuda:1'), covar=tensor([0.0600, 0.0359, 0.0303, 0.0569, 0.0415, 0.0836, 0.0815, 0.0317], + device='cuda:1'), in_proj_covar=tensor([0.0436, 0.0377, 0.0332, 0.0436, 0.0362, 0.0523, 0.0381, 0.0403], + device='cuda:1'), out_proj_covar=tensor([1.1771e-04, 9.9159e-05, 8.7781e-05, 1.1574e-04, 9.5813e-05, 1.4894e-04, + 1.0321e-04, 1.0761e-04], device='cuda:1') +2023-02-07 00:09:49,448 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 00:09:53,676 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:54,883 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:59,104 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:01,446 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154656.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:14,713 INFO [train.py:901] (1/4) Epoch 20, batch 1100, loss[loss=0.2115, simple_loss=0.2901, pruned_loss=0.06646, over 8651.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.289, pruned_loss=0.064, over 1598274.88 frames. ], batch size: 34, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:10:18,094 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.486e+02 3.103e+02 3.988e+02 8.246e+02, threshold=6.206e+02, percent-clipped=6.0 +2023-02-07 00:10:18,321 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154681.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:29,761 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:30,329 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154699.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:10:48,875 INFO [train.py:901] (1/4) Epoch 20, batch 1150, loss[loss=0.1856, simple_loss=0.2785, pruned_loss=0.0463, over 7809.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2901, pruned_loss=0.06438, over 1603584.76 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:10:57,842 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154738.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:59,088 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 00:11:16,280 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:19,778 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:25,027 INFO [train.py:901] (1/4) Epoch 20, batch 1200, loss[loss=0.2169, simple_loss=0.2883, pruned_loss=0.07281, over 7648.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2891, pruned_loss=0.06363, over 1605300.36 frames. ], batch size: 19, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:11:28,373 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.412e+02 2.746e+02 3.577e+02 9.067e+02, threshold=5.492e+02, percent-clipped=2.0 +2023-02-07 00:11:29,309 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154782.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:11:46,342 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154807.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:11:47,765 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154809.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:51,159 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154814.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:53,285 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154817.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:59,125 INFO [train.py:901] (1/4) Epoch 20, batch 1250, loss[loss=0.1947, simple_loss=0.2803, pruned_loss=0.05455, over 8247.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2907, pruned_loss=0.0642, over 1607021.65 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:05,337 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154834.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:06,468 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:11,367 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:26,513 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 00:12:34,977 INFO [train.py:901] (1/4) Epoch 20, batch 1300, loss[loss=0.2069, simple_loss=0.2752, pruned_loss=0.06924, over 7802.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2892, pruned_loss=0.0638, over 1606940.59 frames. ], batch size: 19, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:38,316 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.433e+02 3.191e+02 3.995e+02 7.235e+02, threshold=6.381e+02, percent-clipped=6.0 +2023-02-07 00:12:51,205 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7165, 1.9965, 2.1600, 1.3761, 2.3550, 1.5715, 0.7610, 1.9664], + device='cuda:1'), covar=tensor([0.0649, 0.0371, 0.0290, 0.0611, 0.0374, 0.0914, 0.0868, 0.0327], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0383, 0.0335, 0.0442, 0.0367, 0.0528, 0.0387, 0.0408], + device='cuda:1'), out_proj_covar=tensor([1.1908e-04, 1.0093e-04, 8.8555e-05, 1.1715e-04, 9.7293e-05, 1.5050e-04, + 1.0489e-04, 1.0898e-04], device='cuda:1') +2023-02-07 00:12:52,158 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 00:13:05,460 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7629, 1.8183, 2.3366, 1.5873, 1.3525, 2.2984, 0.4782, 1.3628], + device='cuda:1'), covar=tensor([0.1885, 0.1237, 0.0337, 0.1324, 0.3010, 0.0417, 0.2244, 0.1476], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0192, 0.0123, 0.0220, 0.0268, 0.0133, 0.0169, 0.0187], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 00:13:09,371 INFO [train.py:901] (1/4) Epoch 20, batch 1350, loss[loss=0.17, simple_loss=0.2499, pruned_loss=0.04505, over 7429.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2894, pruned_loss=0.06378, over 1608087.88 frames. ], batch size: 17, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:27,091 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:29,048 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:40,767 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2246, 1.2611, 1.4711, 1.2467, 0.7251, 1.2599, 1.1643, 1.0361], + device='cuda:1'), covar=tensor([0.0560, 0.1241, 0.1634, 0.1359, 0.0566, 0.1486, 0.0716, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0157, 0.0100, 0.0160, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:13:44,756 INFO [train.py:901] (1/4) Epoch 20, batch 1400, loss[loss=0.1826, simple_loss=0.2625, pruned_loss=0.05138, over 7544.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.0643, over 1610583.90 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:47,812 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:48,965 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.419e+02 2.969e+02 3.620e+02 8.609e+02, threshold=5.938e+02, percent-clipped=3.0 +2023-02-07 00:13:55,298 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:58,170 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:16,374 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:19,578 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:20,734 INFO [train.py:901] (1/4) Epoch 20, batch 1450, loss[loss=0.1888, simple_loss=0.2858, pruned_loss=0.04591, over 8717.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.291, pruned_loss=0.06465, over 1611076.63 frames. ], batch size: 34, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:29,172 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 00:14:33,302 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:36,493 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:50,887 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155070.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:14:55,251 INFO [train.py:901] (1/4) Epoch 20, batch 1500, loss[loss=0.19, simple_loss=0.2828, pruned_loss=0.04856, over 8252.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06453, over 1615834.92 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:58,584 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.482e+02 3.072e+02 3.822e+02 6.990e+02, threshold=6.143e+02, percent-clipped=2.0 +2023-02-07 00:14:59,297 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:02,788 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:09,002 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155095.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:15:15,565 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:30,481 INFO [train.py:901] (1/4) Epoch 20, batch 1550, loss[loss=0.2231, simple_loss=0.2976, pruned_loss=0.07428, over 8290.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2903, pruned_loss=0.064, over 1615127.21 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:15:37,001 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1980, 1.9838, 2.6763, 2.2052, 2.6984, 2.2388, 1.9575, 1.4023], + device='cuda:1'), covar=tensor([0.5537, 0.5039, 0.2091, 0.3956, 0.2643, 0.3110, 0.2002, 0.5668], + device='cuda:1'), in_proj_covar=tensor([0.0930, 0.0960, 0.0787, 0.0925, 0.0981, 0.0874, 0.0736, 0.0815], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 00:15:44,485 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:16:04,723 INFO [train.py:901] (1/4) Epoch 20, batch 1600, loss[loss=0.1986, simple_loss=0.2832, pruned_loss=0.05697, over 8100.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06358, over 1615509.52 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:08,760 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.295e+02 2.863e+02 3.431e+02 6.352e+02, threshold=5.726e+02, percent-clipped=1.0 +2023-02-07 00:16:20,595 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:27,182 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:31,900 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5756, 1.8428, 2.0006, 1.4923, 2.1642, 1.4820, 0.6204, 1.8340], + device='cuda:1'), covar=tensor([0.0525, 0.0332, 0.0233, 0.0426, 0.0340, 0.0799, 0.0822, 0.0257], + device='cuda:1'), in_proj_covar=tensor([0.0440, 0.0382, 0.0336, 0.0440, 0.0368, 0.0527, 0.0387, 0.0407], + device='cuda:1'), out_proj_covar=tensor([1.1892e-04, 1.0037e-04, 8.8691e-05, 1.1646e-04, 9.7576e-05, 1.5007e-04, + 1.0492e-04, 1.0855e-04], device='cuda:1') +2023-02-07 00:16:40,723 INFO [train.py:901] (1/4) Epoch 20, batch 1650, loss[loss=0.1903, simple_loss=0.2805, pruned_loss=0.05008, over 7927.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2894, pruned_loss=0.06363, over 1611556.36 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:45,155 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:17:15,943 INFO [train.py:901] (1/4) Epoch 20, batch 1700, loss[loss=0.2121, simple_loss=0.299, pruned_loss=0.06263, over 8462.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2904, pruned_loss=0.06419, over 1614644.87 frames. ], batch size: 27, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:17:19,373 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.383e+02 2.759e+02 3.259e+02 7.427e+02, threshold=5.517e+02, percent-clipped=3.0 +2023-02-07 00:17:51,289 INFO [train.py:901] (1/4) Epoch 20, batch 1750, loss[loss=0.1776, simple_loss=0.2648, pruned_loss=0.04524, over 7986.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2917, pruned_loss=0.06464, over 1617525.39 frames. ], batch size: 21, lr: 3.82e-03, grad_scale: 16.0 +2023-02-07 00:18:00,381 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:17,135 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:27,001 INFO [train.py:901] (1/4) Epoch 20, batch 1800, loss[loss=0.2508, simple_loss=0.3297, pruned_loss=0.08596, over 8453.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2911, pruned_loss=0.06455, over 1614788.58 frames. ], batch size: 27, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:18:31,088 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.586e+02 2.965e+02 3.772e+02 7.314e+02, threshold=5.929e+02, percent-clipped=8.0 +2023-02-07 00:18:34,026 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:40,877 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 00:18:51,794 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-07 00:19:01,120 INFO [train.py:901] (1/4) Epoch 20, batch 1850, loss[loss=0.1705, simple_loss=0.2738, pruned_loss=0.03364, over 8351.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2914, pruned_loss=0.06458, over 1614770.14 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:04,533 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,145 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,188 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:36,712 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155475.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:37,253 INFO [train.py:901] (1/4) Epoch 20, batch 1900, loss[loss=0.1995, simple_loss=0.2839, pruned_loss=0.05754, over 7927.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2907, pruned_loss=0.06409, over 1613865.01 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:38,725 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2532, 1.1530, 3.3355, 1.0435, 2.9392, 2.7591, 3.0387, 2.8856], + device='cuda:1'), covar=tensor([0.0711, 0.4525, 0.0759, 0.4167, 0.1297, 0.1134, 0.0720, 0.0931], + device='cuda:1'), in_proj_covar=tensor([0.0602, 0.0624, 0.0673, 0.0604, 0.0684, 0.0588, 0.0587, 0.0656], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 00:19:38,784 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:41,336 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.441e+02 2.899e+02 3.473e+02 6.405e+02, threshold=5.799e+02, percent-clipped=1.0 +2023-02-07 00:20:11,851 INFO [train.py:901] (1/4) Epoch 20, batch 1950, loss[loss=0.2144, simple_loss=0.2894, pruned_loss=0.06969, over 7974.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2901, pruned_loss=0.06331, over 1615693.08 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:13,303 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 00:20:26,412 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:20:26,922 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 00:20:29,836 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1312, 2.4023, 1.9570, 3.0071, 1.4138, 1.7761, 2.2020, 2.3572], + device='cuda:1'), covar=tensor([0.0738, 0.0810, 0.0872, 0.0351, 0.1144, 0.1285, 0.0926, 0.0804], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0248, 0.0213, 0.0205, 0.0250, 0.0252, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 00:20:46,965 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 00:20:47,677 INFO [train.py:901] (1/4) Epoch 20, batch 2000, loss[loss=0.1899, simple_loss=0.2767, pruned_loss=0.05158, over 8472.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2897, pruned_loss=0.06324, over 1610331.55 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:51,752 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.363e+02 2.911e+02 3.881e+02 1.027e+03, threshold=5.822e+02, percent-clipped=2.0 +2023-02-07 00:21:20,956 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:22,875 INFO [train.py:901] (1/4) Epoch 20, batch 2050, loss[loss=0.222, simple_loss=0.3063, pruned_loss=0.06885, over 8455.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2905, pruned_loss=0.06393, over 1611558.46 frames. ], batch size: 27, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:21:56,395 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 00:21:57,539 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:58,087 INFO [train.py:901] (1/4) Epoch 20, batch 2100, loss[loss=0.2334, simple_loss=0.3123, pruned_loss=0.07727, over 8503.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2913, pruned_loss=0.0645, over 1613042.39 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:02,103 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.564e+02 2.968e+02 3.686e+02 8.256e+02, threshold=5.935e+02, percent-clipped=7.0 +2023-02-07 00:22:22,169 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:22:33,481 INFO [train.py:901] (1/4) Epoch 20, batch 2150, loss[loss=0.1954, simple_loss=0.2737, pruned_loss=0.05857, over 8236.00 frames. ], tot_loss[loss=0.211, simple_loss=0.292, pruned_loss=0.06499, over 1614774.34 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:39,028 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:07,320 INFO [train.py:901] (1/4) Epoch 20, batch 2200, loss[loss=0.207, simple_loss=0.2932, pruned_loss=0.06037, over 8505.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2909, pruned_loss=0.06412, over 1618311.61 frames. ], batch size: 28, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:12,096 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.519e+02 2.939e+02 3.787e+02 7.175e+02, threshold=5.878e+02, percent-clipped=4.0 +2023-02-07 00:23:26,038 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:26,117 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:38,412 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155819.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:43,079 INFO [train.py:901] (1/4) Epoch 20, batch 2250, loss[loss=0.1903, simple_loss=0.2814, pruned_loss=0.04964, over 8593.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2905, pruned_loss=0.06374, over 1618798.53 frames. ], batch size: 34, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:44,828 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:24:17,875 INFO [train.py:901] (1/4) Epoch 20, batch 2300, loss[loss=0.2085, simple_loss=0.2872, pruned_loss=0.0649, over 7543.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2908, pruned_loss=0.06406, over 1614426.30 frames. ], batch size: 18, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:24:21,984 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.500e+02 2.966e+02 3.753e+02 6.656e+02, threshold=5.933e+02, percent-clipped=3.0 +2023-02-07 00:24:54,615 INFO [train.py:901] (1/4) Epoch 20, batch 2350, loss[loss=0.2056, simple_loss=0.2942, pruned_loss=0.05845, over 8026.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2903, pruned_loss=0.06327, over 1614346.05 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:00,025 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:23,233 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155967.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:29,309 INFO [train.py:901] (1/4) Epoch 20, batch 2400, loss[loss=0.1986, simple_loss=0.2697, pruned_loss=0.06375, over 8134.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.292, pruned_loss=0.06455, over 1614398.73 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:33,222 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.377e+02 2.729e+02 3.502e+02 6.388e+02, threshold=5.458e+02, percent-clipped=1.0 +2023-02-07 00:26:00,992 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:05,604 INFO [train.py:901] (1/4) Epoch 20, batch 2450, loss[loss=0.2027, simple_loss=0.2961, pruned_loss=0.05462, over 8249.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2923, pruned_loss=0.06466, over 1616101.93 frames. ], batch size: 24, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:35,053 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2499, 2.5378, 2.9008, 1.6863, 3.2386, 1.9453, 1.4798, 2.2295], + device='cuda:1'), covar=tensor([0.0687, 0.0406, 0.0288, 0.0707, 0.0359, 0.0781, 0.0876, 0.0477], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0380, 0.0334, 0.0437, 0.0366, 0.0525, 0.0386, 0.0407], + device='cuda:1'), out_proj_covar=tensor([1.1893e-04, 1.0001e-04, 8.8036e-05, 1.1564e-04, 9.6854e-05, 1.4930e-04, + 1.0449e-04, 1.0845e-04], device='cuda:1') +2023-02-07 00:26:40,969 INFO [train.py:901] (1/4) Epoch 20, batch 2500, loss[loss=0.2096, simple_loss=0.3017, pruned_loss=0.05877, over 8139.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2923, pruned_loss=0.06478, over 1610760.63 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:45,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.463e+02 3.105e+02 3.826e+02 1.382e+03, threshold=6.210e+02, percent-clipped=11.0 +2023-02-07 00:26:45,217 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:49,183 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156088.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:53,948 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3340, 1.2872, 2.3642, 1.3092, 2.1674, 2.5051, 2.6644, 2.1441], + device='cuda:1'), covar=tensor([0.1171, 0.1445, 0.0495, 0.2139, 0.0796, 0.0404, 0.0738, 0.0717], + device='cuda:1'), in_proj_covar=tensor([0.0288, 0.0318, 0.0284, 0.0311, 0.0300, 0.0260, 0.0405, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 00:27:15,375 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4837, 2.4850, 1.7734, 2.1848, 2.0141, 1.5101, 1.8904, 2.0873], + device='cuda:1'), covar=tensor([0.1421, 0.0425, 0.1189, 0.0619, 0.0688, 0.1570, 0.1007, 0.0957], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0235, 0.0330, 0.0306, 0.0298, 0.0335, 0.0342, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:27:15,812 INFO [train.py:901] (1/4) Epoch 20, batch 2550, loss[loss=0.2321, simple_loss=0.3087, pruned_loss=0.07777, over 8026.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2927, pruned_loss=0.06504, over 1612235.34 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:21,374 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:29,846 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:44,239 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5752, 2.6659, 2.0573, 2.4256, 2.2987, 1.8294, 2.0572, 2.2605], + device='cuda:1'), covar=tensor([0.1538, 0.0415, 0.1066, 0.0606, 0.0674, 0.1329, 0.1025, 0.0955], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0236, 0.0331, 0.0306, 0.0298, 0.0336, 0.0343, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:27:50,759 INFO [train.py:901] (1/4) Epoch 20, batch 2600, loss[loss=0.2284, simple_loss=0.3047, pruned_loss=0.07604, over 8474.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2915, pruned_loss=0.06436, over 1613989.45 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:54,662 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.376e+02 3.118e+02 3.808e+02 9.704e+02, threshold=6.236e+02, percent-clipped=5.0 +2023-02-07 00:28:00,362 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:16,947 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 00:28:17,477 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:24,598 INFO [train.py:901] (1/4) Epoch 20, batch 2650, loss[loss=0.2247, simple_loss=0.3094, pruned_loss=0.06997, over 8478.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2917, pruned_loss=0.0645, over 1616061.02 frames. ], batch size: 27, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:28:27,928 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8419, 3.7915, 3.4267, 1.7545, 3.3689, 3.4491, 3.3968, 3.2011], + device='cuda:1'), covar=tensor([0.0855, 0.0647, 0.1108, 0.4734, 0.0903, 0.0966, 0.1494, 0.0961], + device='cuda:1'), in_proj_covar=tensor([0.0513, 0.0425, 0.0429, 0.0526, 0.0419, 0.0431, 0.0417, 0.0372], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:28:28,689 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7588, 1.6700, 1.7854, 1.5766, 1.0215, 1.5976, 2.0586, 1.8841], + device='cuda:1'), covar=tensor([0.0476, 0.1234, 0.1708, 0.1392, 0.0607, 0.1514, 0.0642, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:28:30,656 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:49,174 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:00,117 INFO [train.py:901] (1/4) Epoch 20, batch 2700, loss[loss=0.2186, simple_loss=0.2986, pruned_loss=0.06933, over 8484.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2919, pruned_loss=0.0647, over 1616604.41 frames. ], batch size: 28, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:02,512 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 00:29:03,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7061, 2.5655, 1.8222, 2.3244, 2.3411, 1.6112, 2.1648, 2.2330], + device='cuda:1'), covar=tensor([0.1335, 0.0367, 0.1093, 0.0564, 0.0580, 0.1419, 0.0866, 0.0902], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0236, 0.0333, 0.0307, 0.0300, 0.0338, 0.0344, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:29:04,092 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.401e+02 3.078e+02 3.829e+02 8.557e+02, threshold=6.156e+02, percent-clipped=4.0 +2023-02-07 00:29:23,253 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:35,129 INFO [train.py:901] (1/4) Epoch 20, batch 2750, loss[loss=0.2216, simple_loss=0.2912, pruned_loss=0.07598, over 7651.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2923, pruned_loss=0.06506, over 1615259.43 frames. ], batch size: 19, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:40,761 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7592, 2.6462, 1.8529, 2.3246, 2.3256, 1.6020, 2.1274, 2.2316], + device='cuda:1'), covar=tensor([0.1517, 0.0414, 0.1227, 0.0717, 0.0761, 0.1543, 0.1120, 0.1168], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0236, 0.0332, 0.0307, 0.0299, 0.0337, 0.0343, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:29:43,522 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:59,920 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:30:01,796 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:10,370 INFO [train.py:901] (1/4) Epoch 20, batch 2800, loss[loss=0.2697, simple_loss=0.3279, pruned_loss=0.1057, over 8535.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2926, pruned_loss=0.06576, over 1614959.56 frames. ], batch size: 49, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:15,863 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.534e+02 2.983e+02 3.648e+02 6.974e+02, threshold=5.966e+02, percent-clipped=1.0 +2023-02-07 00:30:20,855 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:38,823 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:46,250 INFO [train.py:901] (1/4) Epoch 20, batch 2850, loss[loss=0.1852, simple_loss=0.2774, pruned_loss=0.04651, over 8290.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2927, pruned_loss=0.06543, over 1617405.54 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:50,404 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156432.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:10,265 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 00:31:20,791 INFO [train.py:901] (1/4) Epoch 20, batch 2900, loss[loss=0.2442, simple_loss=0.322, pruned_loss=0.08313, over 8467.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2928, pruned_loss=0.06492, over 1617552.31 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:31:26,323 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.409e+02 2.783e+02 3.401e+02 8.568e+02, threshold=5.566e+02, percent-clipped=1.0 +2023-02-07 00:31:50,426 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:53,722 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 00:31:57,136 INFO [train.py:901] (1/4) Epoch 20, batch 2950, loss[loss=0.1666, simple_loss=0.25, pruned_loss=0.04162, over 5582.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2925, pruned_loss=0.06436, over 1616869.89 frames. ], batch size: 12, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:02,687 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6734, 1.4925, 4.8829, 1.7557, 4.3470, 4.0846, 4.4459, 4.2988], + device='cuda:1'), covar=tensor([0.0514, 0.4503, 0.0450, 0.3840, 0.1004, 0.0877, 0.0532, 0.0601], + device='cuda:1'), in_proj_covar=tensor([0.0607, 0.0627, 0.0675, 0.0609, 0.0691, 0.0597, 0.0593, 0.0659], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:32:08,262 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156542.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:11,758 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:17,784 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6983, 1.9332, 2.0078, 1.3257, 2.1761, 1.5201, 0.7090, 1.9156], + device='cuda:1'), covar=tensor([0.0518, 0.0334, 0.0276, 0.0551, 0.0390, 0.0755, 0.0774, 0.0280], + device='cuda:1'), in_proj_covar=tensor([0.0446, 0.0384, 0.0337, 0.0439, 0.0368, 0.0530, 0.0389, 0.0409], + device='cuda:1'), out_proj_covar=tensor([1.2015e-04, 1.0112e-04, 8.8875e-05, 1.1625e-04, 9.7303e-05, 1.5098e-04, + 1.0529e-04, 1.0906e-04], device='cuda:1') +2023-02-07 00:32:31,033 INFO [train.py:901] (1/4) Epoch 20, batch 3000, loss[loss=0.2324, simple_loss=0.307, pruned_loss=0.07892, over 8352.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.293, pruned_loss=0.06479, over 1617959.79 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:31,034 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 00:32:44,429 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3656, 1.4579, 1.2987, 1.7345, 0.8165, 1.1299, 1.2046, 1.3697], + device='cuda:1'), covar=tensor([0.0565, 0.0660, 0.0676, 0.0322, 0.0953, 0.1015, 0.0672, 0.0680], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0211, 0.0206, 0.0247, 0.0251, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 00:32:46,840 INFO [train.py:935] (1/4) Epoch 20, validation: loss=0.1756, simple_loss=0.2756, pruned_loss=0.03779, over 944034.00 frames. +2023-02-07 00:32:46,842 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 00:32:48,387 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:51,796 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 2.420e+02 3.007e+02 3.801e+02 6.408e+02, threshold=6.014e+02, percent-clipped=4.0 +2023-02-07 00:33:12,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8763, 1.6197, 1.7586, 1.4070, 0.9328, 1.5805, 1.6441, 1.6643], + device='cuda:1'), covar=tensor([0.0511, 0.1132, 0.1518, 0.1346, 0.0546, 0.1310, 0.0656, 0.0564], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:33:22,164 INFO [train.py:901] (1/4) Epoch 20, batch 3050, loss[loss=0.212, simple_loss=0.2972, pruned_loss=0.06346, over 8237.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.292, pruned_loss=0.06485, over 1614318.30 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:33:40,598 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:33:57,465 INFO [train.py:901] (1/4) Epoch 20, batch 3100, loss[loss=0.2011, simple_loss=0.288, pruned_loss=0.05712, over 8291.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2907, pruned_loss=0.06414, over 1615237.37 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:34:02,301 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.428e+02 2.992e+02 3.732e+02 8.006e+02, threshold=5.985e+02, percent-clipped=5.0 +2023-02-07 00:34:02,449 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.4802, 1.1325, 3.7926, 1.5426, 3.0043, 2.9568, 3.3896, 3.3963], + device='cuda:1'), covar=tensor([0.1594, 0.7361, 0.1461, 0.5383, 0.2538, 0.2118, 0.1379, 0.1304], + device='cuda:1'), in_proj_covar=tensor([0.0613, 0.0632, 0.0681, 0.0614, 0.0694, 0.0600, 0.0598, 0.0662], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:34:09,239 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:34:10,727 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0761, 1.8525, 2.3816, 1.9904, 2.3344, 2.1706, 1.8987, 1.0737], + device='cuda:1'), covar=tensor([0.5246, 0.4428, 0.1786, 0.3427, 0.2324, 0.2859, 0.1840, 0.4982], + device='cuda:1'), in_proj_covar=tensor([0.0933, 0.0963, 0.0790, 0.0929, 0.0986, 0.0881, 0.0740, 0.0815], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 00:34:12,936 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 00:34:28,114 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5352, 2.5559, 1.8686, 2.2407, 2.2352, 1.6077, 2.0467, 2.1458], + device='cuda:1'), covar=tensor([0.1457, 0.0439, 0.1202, 0.0622, 0.0693, 0.1544, 0.0961, 0.0920], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0237, 0.0331, 0.0307, 0.0299, 0.0336, 0.0344, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:34:31,976 INFO [train.py:901] (1/4) Epoch 20, batch 3150, loss[loss=0.18, simple_loss=0.2616, pruned_loss=0.04918, over 7640.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2906, pruned_loss=0.0642, over 1615870.11 frames. ], batch size: 19, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:34:36,352 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 00:34:58,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0136, 1.6964, 1.8176, 1.4716, 0.9557, 1.6194, 1.7603, 1.7141], + device='cuda:1'), covar=tensor([0.0501, 0.1167, 0.1585, 0.1339, 0.0587, 0.1360, 0.0650, 0.0595], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:35:01,275 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:01,312 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:07,246 INFO [train.py:901] (1/4) Epoch 20, batch 3200, loss[loss=0.2161, simple_loss=0.2867, pruned_loss=0.07274, over 7184.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2913, pruned_loss=0.06464, over 1612608.92 frames. ], batch size: 16, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:11,882 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.338e+02 2.875e+02 3.612e+02 1.133e+03, threshold=5.749e+02, percent-clipped=4.0 +2023-02-07 00:35:21,727 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5592, 1.5410, 1.8448, 1.2890, 1.2658, 1.7981, 0.1840, 1.2650], + device='cuda:1'), covar=tensor([0.1796, 0.1458, 0.0419, 0.1071, 0.2622, 0.0511, 0.2047, 0.1260], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0220, 0.0268, 0.0133, 0.0168, 0.0188], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 00:35:26,512 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:41,820 INFO [train.py:901] (1/4) Epoch 20, batch 3250, loss[loss=0.1765, simple_loss=0.2534, pruned_loss=0.04975, over 7435.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2908, pruned_loss=0.06426, over 1611600.02 frames. ], batch size: 17, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:43,292 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:17,055 INFO [train.py:901] (1/4) Epoch 20, batch 3300, loss[loss=0.2031, simple_loss=0.2957, pruned_loss=0.05524, over 8251.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2915, pruned_loss=0.0646, over 1615628.39 frames. ], batch size: 24, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:36:21,773 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.341e+02 2.967e+02 3.887e+02 7.432e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-07 00:36:35,544 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156903.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:51,531 INFO [train.py:901] (1/4) Epoch 20, batch 3350, loss[loss=0.1789, simple_loss=0.2605, pruned_loss=0.0487, over 7414.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2911, pruned_loss=0.06442, over 1612267.88 frames. ], batch size: 17, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:07,172 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:25,860 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:27,106 INFO [train.py:901] (1/4) Epoch 20, batch 3400, loss[loss=0.2166, simple_loss=0.3104, pruned_loss=0.06141, over 8355.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2907, pruned_loss=0.06425, over 1615533.14 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:31,900 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.508e+02 3.011e+02 3.882e+02 8.239e+02, threshold=6.022e+02, percent-clipped=6.0 +2023-02-07 00:38:01,311 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:03,928 INFO [train.py:901] (1/4) Epoch 20, batch 3450, loss[loss=0.2154, simple_loss=0.2866, pruned_loss=0.07205, over 8233.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2908, pruned_loss=0.06449, over 1615766.41 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:05,360 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:18,896 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:38,197 INFO [train.py:901] (1/4) Epoch 20, batch 3500, loss[loss=0.1998, simple_loss=0.2976, pruned_loss=0.05097, over 8101.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.06527, over 1617537.81 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:43,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.548e+02 3.004e+02 3.939e+02 7.448e+02, threshold=6.007e+02, percent-clipped=9.0 +2023-02-07 00:39:02,221 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 00:39:03,067 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:13,060 INFO [train.py:901] (1/4) Epoch 20, batch 3550, loss[loss=0.2251, simple_loss=0.2969, pruned_loss=0.07662, over 8080.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2918, pruned_loss=0.06528, over 1620563.48 frames. ], batch size: 21, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:39:37,685 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157160.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:43,854 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1127, 1.3652, 4.3144, 1.5688, 3.7531, 3.6327, 3.9235, 3.7853], + device='cuda:1'), covar=tensor([0.0714, 0.4832, 0.0550, 0.4022, 0.1225, 0.0958, 0.0631, 0.0724], + device='cuda:1'), in_proj_covar=tensor([0.0608, 0.0631, 0.0680, 0.0610, 0.0692, 0.0598, 0.0595, 0.0659], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:39:48,307 INFO [train.py:901] (1/4) Epoch 20, batch 3600, loss[loss=0.2527, simple_loss=0.3297, pruned_loss=0.08787, over 8352.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2911, pruned_loss=0.06459, over 1620742.66 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:39:49,146 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9148, 1.4113, 1.7026, 1.3012, 0.9557, 1.4360, 1.7101, 1.3555], + device='cuda:1'), covar=tensor([0.0514, 0.1296, 0.1637, 0.1478, 0.0591, 0.1515, 0.0687, 0.0689], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 00:39:53,036 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.446e+02 2.923e+02 3.668e+02 9.434e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 00:40:24,187 INFO [train.py:901] (1/4) Epoch 20, batch 3650, loss[loss=0.2264, simple_loss=0.2948, pruned_loss=0.07898, over 7276.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2899, pruned_loss=0.06401, over 1615332.59 frames. ], batch size: 16, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:40:24,361 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:38,564 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:44,448 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1589, 3.8888, 2.4738, 2.7467, 3.2053, 2.0499, 3.2843, 3.1509], + device='cuda:1'), covar=tensor([0.1491, 0.0314, 0.0992, 0.0756, 0.0575, 0.1377, 0.0846, 0.0934], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0235, 0.0330, 0.0307, 0.0299, 0.0335, 0.0343, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:40:58,619 INFO [train.py:901] (1/4) Epoch 20, batch 3700, loss[loss=0.2152, simple_loss=0.2897, pruned_loss=0.07034, over 8339.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2895, pruned_loss=0.06358, over 1615372.12 frames. ], batch size: 26, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:03,187 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.545e+02 3.038e+02 3.849e+02 9.039e+02, threshold=6.076e+02, percent-clipped=6.0 +2023-02-07 00:41:05,243 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 00:41:33,573 INFO [train.py:901] (1/4) Epoch 20, batch 3750, loss[loss=0.191, simple_loss=0.2619, pruned_loss=0.06004, over 7804.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2895, pruned_loss=0.06412, over 1609337.44 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:58,945 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157362.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:05,451 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:06,810 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:07,964 INFO [train.py:901] (1/4) Epoch 20, batch 3800, loss[loss=0.2202, simple_loss=0.2987, pruned_loss=0.07085, over 8327.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2888, pruned_loss=0.06353, over 1610865.29 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:42:11,380 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3684, 1.6021, 4.5683, 1.8323, 4.0211, 3.8221, 4.1827, 4.0444], + device='cuda:1'), covar=tensor([0.0552, 0.4310, 0.0483, 0.3986, 0.1031, 0.0922, 0.0517, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0612, 0.0634, 0.0684, 0.0614, 0.0694, 0.0604, 0.0598, 0.0663], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:42:12,512 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.302e+02 2.981e+02 3.884e+02 7.104e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 00:42:21,034 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6056, 1.9380, 2.1569, 1.0956, 2.2512, 1.5013, 0.6398, 1.8891], + device='cuda:1'), covar=tensor([0.0619, 0.0366, 0.0271, 0.0664, 0.0349, 0.0894, 0.0884, 0.0295], + device='cuda:1'), in_proj_covar=tensor([0.0446, 0.0388, 0.0339, 0.0441, 0.0372, 0.0533, 0.0393, 0.0413], + device='cuda:1'), out_proj_covar=tensor([1.2026e-04, 1.0208e-04, 8.9457e-05, 1.1672e-04, 9.8420e-05, 1.5163e-04, + 1.0626e-04, 1.1019e-04], device='cuda:1') +2023-02-07 00:42:25,029 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:39,605 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157422.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:42:42,777 INFO [train.py:901] (1/4) Epoch 20, batch 3850, loss[loss=0.1794, simple_loss=0.2693, pruned_loss=0.04479, over 8492.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2902, pruned_loss=0.06449, over 1612486.51 frames. ], batch size: 28, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:09,749 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 00:43:17,669 INFO [train.py:901] (1/4) Epoch 20, batch 3900, loss[loss=0.235, simple_loss=0.3132, pruned_loss=0.07836, over 8439.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2908, pruned_loss=0.06444, over 1614560.08 frames. ], batch size: 27, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:21,793 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157482.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:22,211 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.513e+02 3.153e+02 3.900e+02 7.255e+02, threshold=6.305e+02, percent-clipped=5.0 +2023-02-07 00:43:24,970 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:37,149 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:39,431 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:51,282 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157524.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:43:52,542 INFO [train.py:901] (1/4) Epoch 20, batch 3950, loss[loss=0.2301, simple_loss=0.3081, pruned_loss=0.07607, over 8681.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2903, pruned_loss=0.06417, over 1612184.82 frames. ], batch size: 34, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:21,478 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4314, 2.6518, 3.0646, 1.4960, 3.2535, 1.9918, 1.4624, 2.4044], + device='cuda:1'), covar=tensor([0.0685, 0.0346, 0.0291, 0.0803, 0.0451, 0.0838, 0.0984, 0.0484], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0390, 0.0341, 0.0444, 0.0373, 0.0535, 0.0395, 0.0414], + device='cuda:1'), out_proj_covar=tensor([1.2124e-04, 1.0247e-04, 9.0116e-05, 1.1761e-04, 9.8803e-05, 1.5224e-04, + 1.0693e-04, 1.1042e-04], device='cuda:1') +2023-02-07 00:44:28,450 INFO [train.py:901] (1/4) Epoch 20, batch 4000, loss[loss=0.2112, simple_loss=0.2882, pruned_loss=0.06715, over 8465.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2895, pruned_loss=0.06298, over 1613816.06 frames. ], batch size: 27, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:33,892 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.441e+02 3.259e+02 3.960e+02 7.383e+02, threshold=6.518e+02, percent-clipped=3.0 +2023-02-07 00:44:34,867 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6479, 2.4639, 3.3928, 2.6263, 3.0637, 2.5884, 2.4038, 1.9627], + device='cuda:1'), covar=tensor([0.5087, 0.4821, 0.1797, 0.3581, 0.2519, 0.2835, 0.1806, 0.5193], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0959, 0.0784, 0.0924, 0.0980, 0.0878, 0.0733, 0.0811], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 00:44:36,131 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:57,800 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157618.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:58,428 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:03,491 INFO [train.py:901] (1/4) Epoch 20, batch 4050, loss[loss=0.2377, simple_loss=0.3178, pruned_loss=0.07878, over 8250.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.06388, over 1612420.66 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:12,623 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 00:45:15,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:26,959 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7730, 1.8760, 1.6887, 2.2948, 0.9791, 1.4996, 1.6682, 1.8844], + device='cuda:1'), covar=tensor([0.0783, 0.0807, 0.0936, 0.0443, 0.1322, 0.1393, 0.0876, 0.0754], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0199, 0.0249, 0.0214, 0.0207, 0.0251, 0.0255, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 00:45:33,631 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4238, 1.4874, 1.4497, 1.8290, 0.7369, 1.3108, 1.2875, 1.4957], + device='cuda:1'), covar=tensor([0.0881, 0.0819, 0.0969, 0.0511, 0.1238, 0.1431, 0.0822, 0.0720], + device='cuda:1'), in_proj_covar=tensor([0.0235, 0.0199, 0.0249, 0.0214, 0.0207, 0.0251, 0.0255, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 00:45:38,035 INFO [train.py:901] (1/4) Epoch 20, batch 4100, loss[loss=0.23, simple_loss=0.3206, pruned_loss=0.06972, over 8038.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2919, pruned_loss=0.06506, over 1608903.95 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:42,595 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.468e+02 3.178e+02 4.268e+02 8.149e+02, threshold=6.355e+02, percent-clipped=4.0 +2023-02-07 00:45:52,658 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-07 00:46:07,480 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157718.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:12,789 INFO [train.py:901] (1/4) Epoch 20, batch 4150, loss[loss=0.245, simple_loss=0.3139, pruned_loss=0.08803, over 6796.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2924, pruned_loss=0.06508, over 1610490.62 frames. ], batch size: 71, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:13,016 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7361, 1.6918, 2.3741, 1.6340, 1.2917, 2.3354, 0.5108, 1.3910], + device='cuda:1'), covar=tensor([0.1851, 0.1498, 0.0334, 0.1258, 0.3133, 0.0448, 0.2547, 0.1610], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0195, 0.0124, 0.0219, 0.0268, 0.0133, 0.0168, 0.0189], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 00:46:25,110 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157743.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:25,656 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:40,584 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157766.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:46:41,942 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:46,942 INFO [train.py:901] (1/4) Epoch 20, batch 4200, loss[loss=0.2533, simple_loss=0.3243, pruned_loss=0.09119, over 8460.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2918, pruned_loss=0.06485, over 1610613.40 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:52,351 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.385e+02 2.811e+02 3.577e+02 7.269e+02, threshold=5.621e+02, percent-clipped=2.0 +2023-02-07 00:47:08,781 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 00:47:23,328 INFO [train.py:901] (1/4) Epoch 20, batch 4250, loss[loss=0.2049, simple_loss=0.2827, pruned_loss=0.06353, over 8240.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06445, over 1611498.40 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:47:28,295 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:32,309 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 00:47:46,378 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157859.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:53,368 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157868.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:47:55,490 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,150 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,653 INFO [train.py:901] (1/4) Epoch 20, batch 4300, loss[loss=0.202, simple_loss=0.2956, pruned_loss=0.05426, over 8336.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2909, pruned_loss=0.06402, over 1615827.78 frames. ], batch size: 26, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:02,061 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157881.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:48:03,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.270e+02 2.745e+02 3.400e+02 8.203e+02, threshold=5.491e+02, percent-clipped=7.0 +2023-02-07 00:48:15,330 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157900.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:20,878 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 00:48:33,490 INFO [train.py:901] (1/4) Epoch 20, batch 4350, loss[loss=0.17, simple_loss=0.2513, pruned_loss=0.0444, over 7794.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.29, pruned_loss=0.06389, over 1614737.52 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:36,276 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:49:00,924 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 00:49:04,113 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 00:49:08,270 INFO [train.py:901] (1/4) Epoch 20, batch 4400, loss[loss=0.1859, simple_loss=0.2641, pruned_loss=0.05389, over 7434.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.06312, over 1611183.95 frames. ], batch size: 17, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:49:13,805 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.443e+02 2.894e+02 3.714e+02 1.238e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 00:49:14,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157983.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:49:44,326 INFO [train.py:901] (1/4) Epoch 20, batch 4450, loss[loss=0.2078, simple_loss=0.281, pruned_loss=0.0673, over 7971.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2895, pruned_loss=0.06365, over 1608290.18 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:49:45,694 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 00:49:57,302 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:17,226 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 00:50:18,896 INFO [train.py:901] (1/4) Epoch 20, batch 4500, loss[loss=0.1982, simple_loss=0.2749, pruned_loss=0.06073, over 7926.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2901, pruned_loss=0.06399, over 1609451.65 frames. ], batch size: 20, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:50:23,587 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.382e+02 2.908e+02 3.384e+02 7.082e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-07 00:50:27,959 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:35,485 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1876, 1.3320, 4.3446, 1.7223, 3.9195, 3.6326, 3.9530, 3.8497], + device='cuda:1'), covar=tensor([0.0514, 0.4540, 0.0552, 0.3708, 0.1049, 0.0915, 0.0519, 0.0609], + device='cuda:1'), in_proj_covar=tensor([0.0611, 0.0633, 0.0684, 0.0615, 0.0695, 0.0603, 0.0597, 0.0663], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:50:39,142 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 00:50:45,177 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158114.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:45,857 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158115.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:53,562 INFO [train.py:901] (1/4) Epoch 20, batch 4550, loss[loss=0.1798, simple_loss=0.2656, pruned_loss=0.04701, over 7921.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2907, pruned_loss=0.06438, over 1610195.06 frames. ], batch size: 20, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:01,044 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158137.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:51:02,890 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:02,944 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:18,404 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158162.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:51:28,240 INFO [train.py:901] (1/4) Epoch 20, batch 4600, loss[loss=0.2011, simple_loss=0.2917, pruned_loss=0.05523, over 8196.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2908, pruned_loss=0.06433, over 1611511.14 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:29,052 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7892, 1.3572, 3.9754, 1.4843, 3.5155, 3.3151, 3.5998, 3.4842], + device='cuda:1'), covar=tensor([0.0759, 0.4418, 0.0649, 0.4024, 0.1242, 0.0999, 0.0670, 0.0767], + device='cuda:1'), in_proj_covar=tensor([0.0610, 0.0631, 0.0681, 0.0614, 0.0693, 0.0602, 0.0596, 0.0660], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:51:32,830 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.506e+02 3.217e+02 3.763e+02 8.986e+02, threshold=6.435e+02, percent-clipped=3.0 +2023-02-07 00:51:54,921 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:52:03,091 INFO [train.py:901] (1/4) Epoch 20, batch 4650, loss[loss=0.2052, simple_loss=0.2859, pruned_loss=0.0622, over 8456.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2901, pruned_loss=0.0643, over 1610755.93 frames. ], batch size: 49, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:12,074 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158239.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:52:29,426 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3861, 1.4058, 4.5703, 1.6911, 4.0492, 3.8484, 4.1788, 4.0355], + device='cuda:1'), covar=tensor([0.0595, 0.4422, 0.0471, 0.3820, 0.1059, 0.0874, 0.0518, 0.0646], + device='cuda:1'), in_proj_covar=tensor([0.0612, 0.0634, 0.0685, 0.0616, 0.0695, 0.0605, 0.0598, 0.0663], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:52:30,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158264.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:52:37,869 INFO [train.py:901] (1/4) Epoch 20, batch 4700, loss[loss=0.273, simple_loss=0.3423, pruned_loss=0.1018, over 6651.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2899, pruned_loss=0.06405, over 1610658.41 frames. ], batch size: 71, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:42,600 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.408e+02 3.012e+02 4.119e+02 1.091e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-07 00:52:53,700 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2338, 3.1142, 2.9270, 1.6095, 2.8559, 2.9222, 2.8475, 2.8069], + device='cuda:1'), covar=tensor([0.1109, 0.0811, 0.1291, 0.4581, 0.1164, 0.1414, 0.1565, 0.1145], + device='cuda:1'), in_proj_covar=tensor([0.0519, 0.0422, 0.0429, 0.0528, 0.0418, 0.0430, 0.0418, 0.0373], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:52:55,084 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9210, 1.3352, 3.1740, 1.2254, 2.4710, 2.4923, 2.9078, 2.8907], + device='cuda:1'), covar=tensor([0.1689, 0.5997, 0.1871, 0.5356, 0.3020, 0.2444, 0.1421, 0.1473], + device='cuda:1'), in_proj_covar=tensor([0.0612, 0.0635, 0.0687, 0.0615, 0.0697, 0.0604, 0.0599, 0.0664], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:52:55,798 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:12,689 INFO [train.py:901] (1/4) Epoch 20, batch 4750, loss[loss=0.2472, simple_loss=0.324, pruned_loss=0.08513, over 8330.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2888, pruned_loss=0.06366, over 1610415.59 frames. ], batch size: 26, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:53:12,918 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:15,561 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:21,788 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4189, 2.3436, 1.5441, 2.2488, 2.0032, 1.2388, 1.9330, 2.1629], + device='cuda:1'), covar=tensor([0.1831, 0.0587, 0.1596, 0.0731, 0.1031, 0.2238, 0.1342, 0.1084], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0236, 0.0332, 0.0309, 0.0303, 0.0338, 0.0343, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 00:53:24,190 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 00:53:40,946 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 00:53:43,681 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 00:53:48,277 INFO [train.py:901] (1/4) Epoch 20, batch 4800, loss[loss=0.2263, simple_loss=0.3023, pruned_loss=0.07514, over 8495.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2899, pruned_loss=0.06391, over 1610914.26 frames. ], batch size: 39, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:53:52,906 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.386e+02 2.729e+02 3.445e+02 7.258e+02, threshold=5.458e+02, percent-clipped=2.0 +2023-02-07 00:54:06,942 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:54:22,900 INFO [train.py:901] (1/4) Epoch 20, batch 4850, loss[loss=0.3195, simple_loss=0.3678, pruned_loss=0.1356, over 6664.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2907, pruned_loss=0.0647, over 1613231.07 frames. ], batch size: 71, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:54:33,551 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 00:54:57,241 INFO [train.py:901] (1/4) Epoch 20, batch 4900, loss[loss=0.2044, simple_loss=0.311, pruned_loss=0.04896, over 8191.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2913, pruned_loss=0.06444, over 1612816.19 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:55:02,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.481e+02 3.123e+02 4.208e+02 8.958e+02, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 00:55:03,229 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:55:28,957 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4666, 1.5848, 1.4986, 1.8437, 0.7714, 1.3544, 1.3907, 1.5782], + device='cuda:1'), covar=tensor([0.0759, 0.0719, 0.0916, 0.0479, 0.1067, 0.1303, 0.0692, 0.0652], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0213, 0.0204, 0.0248, 0.0251, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 00:55:32,873 INFO [train.py:901] (1/4) Epoch 20, batch 4950, loss[loss=0.2855, simple_loss=0.3632, pruned_loss=0.1039, over 8671.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06469, over 1609431.33 frames. ], batch size: 34, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:07,761 INFO [train.py:901] (1/4) Epoch 20, batch 5000, loss[loss=0.2243, simple_loss=0.306, pruned_loss=0.07136, over 8367.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2922, pruned_loss=0.06478, over 1614974.90 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:12,218 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.361e+02 2.881e+02 3.667e+02 7.563e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-07 00:56:14,509 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:23,811 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:32,786 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:42,905 INFO [train.py:901] (1/4) Epoch 20, batch 5050, loss[loss=0.1987, simple_loss=0.2905, pruned_loss=0.05345, over 8325.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.292, pruned_loss=0.06482, over 1612578.88 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:10,200 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 00:57:17,769 INFO [train.py:901] (1/4) Epoch 20, batch 5100, loss[loss=0.2115, simple_loss=0.302, pruned_loss=0.06049, over 8094.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2927, pruned_loss=0.06492, over 1616471.64 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:23,332 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.670e+02 3.233e+02 3.910e+02 8.185e+02, threshold=6.466e+02, percent-clipped=7.0 +2023-02-07 00:57:38,618 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 00:57:53,857 INFO [train.py:901] (1/4) Epoch 20, batch 5150, loss[loss=0.1883, simple_loss=0.2554, pruned_loss=0.06065, over 7169.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2932, pruned_loss=0.06515, over 1620073.68 frames. ], batch size: 16, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:55,011 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 00:58:08,320 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:28,394 INFO [train.py:901] (1/4) Epoch 20, batch 5200, loss[loss=0.1894, simple_loss=0.2763, pruned_loss=0.05123, over 7963.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2924, pruned_loss=0.06475, over 1613818.44 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:30,739 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0753, 1.2425, 1.1801, 0.6880, 1.2095, 1.0260, 0.0665, 1.2098], + device='cuda:1'), covar=tensor([0.0359, 0.0327, 0.0290, 0.0507, 0.0361, 0.0805, 0.0740, 0.0276], + device='cuda:1'), in_proj_covar=tensor([0.0454, 0.0389, 0.0340, 0.0442, 0.0372, 0.0535, 0.0395, 0.0416], + device='cuda:1'), out_proj_covar=tensor([1.2222e-04, 1.0224e-04, 8.9611e-05, 1.1686e-04, 9.8562e-05, 1.5211e-04, + 1.0678e-04, 1.1088e-04], device='cuda:1') +2023-02-07 00:58:31,274 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2248, 3.1334, 2.9067, 1.4823, 2.9045, 2.8715, 2.8901, 2.7940], + device='cuda:1'), covar=tensor([0.1153, 0.0789, 0.1333, 0.4592, 0.1162, 0.1207, 0.1567, 0.1015], + device='cuda:1'), in_proj_covar=tensor([0.0521, 0.0425, 0.0436, 0.0533, 0.0422, 0.0435, 0.0422, 0.0377], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 00:58:33,212 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.433e+02 2.837e+02 3.461e+02 7.505e+02, threshold=5.673e+02, percent-clipped=2.0 +2023-02-07 00:58:41,616 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:46,635 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:03,969 INFO [train.py:901] (1/4) Epoch 20, batch 5250, loss[loss=0.2261, simple_loss=0.3023, pruned_loss=0.07498, over 8246.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2918, pruned_loss=0.06423, over 1615727.86 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:59:06,915 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3291, 1.2961, 2.3801, 1.2533, 2.2025, 2.5132, 2.7115, 2.1202], + device='cuda:1'), covar=tensor([0.1203, 0.1443, 0.0435, 0.2060, 0.0673, 0.0400, 0.0573, 0.0683], + device='cuda:1'), in_proj_covar=tensor([0.0292, 0.0318, 0.0285, 0.0312, 0.0301, 0.0261, 0.0407, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 00:59:11,275 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 00:59:22,844 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158853.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:59:24,328 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158855.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:28,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:38,589 INFO [train.py:901] (1/4) Epoch 20, batch 5300, loss[loss=0.2399, simple_loss=0.2993, pruned_loss=0.09029, over 7818.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2906, pruned_loss=0.06371, over 1614225.80 frames. ], batch size: 20, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 00:59:41,427 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:43,360 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.336e+02 2.792e+02 3.296e+02 7.091e+02, threshold=5.585e+02, percent-clipped=2.0 +2023-02-07 01:00:13,210 INFO [train.py:901] (1/4) Epoch 20, batch 5350, loss[loss=0.2248, simple_loss=0.3101, pruned_loss=0.06974, over 8463.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06382, over 1606863.21 frames. ], batch size: 29, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:22,118 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 01:00:48,005 INFO [train.py:901] (1/4) Epoch 20, batch 5400, loss[loss=0.2182, simple_loss=0.3054, pruned_loss=0.06556, over 8462.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06448, over 1607533.23 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:52,647 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.394e+02 2.966e+02 3.887e+02 6.953e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 01:01:22,913 INFO [train.py:901] (1/4) Epoch 20, batch 5450, loss[loss=0.1899, simple_loss=0.2791, pruned_loss=0.05036, over 8247.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2907, pruned_loss=0.06437, over 1611191.94 frames. ], batch size: 24, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:01:23,771 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2362, 1.3211, 3.3799, 1.0870, 3.0170, 2.8508, 3.0974, 2.9826], + device='cuda:1'), covar=tensor([0.0789, 0.3818, 0.0756, 0.3800, 0.1286, 0.1044, 0.0743, 0.0885], + device='cuda:1'), in_proj_covar=tensor([0.0610, 0.0630, 0.0677, 0.0612, 0.0691, 0.0596, 0.0596, 0.0664], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:01:57,430 INFO [train.py:901] (1/4) Epoch 20, batch 5500, loss[loss=0.2051, simple_loss=0.2927, pruned_loss=0.05877, over 8249.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2904, pruned_loss=0.06387, over 1611456.07 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:00,083 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 01:02:02,834 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.298e+02 2.656e+02 3.222e+02 6.486e+02, threshold=5.312e+02, percent-clipped=1.0 +2023-02-07 01:02:05,771 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:10,089 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3966, 1.5655, 2.1964, 1.3366, 1.5769, 1.6542, 1.4723, 1.6278], + device='cuda:1'), covar=tensor([0.2054, 0.2584, 0.0908, 0.4517, 0.1964, 0.3456, 0.2427, 0.1983], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0590, 0.0554, 0.0633, 0.0643, 0.0589, 0.0528, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:02:19,886 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0781, 1.8546, 2.4010, 2.0347, 2.3246, 2.0804, 1.9027, 1.5963], + device='cuda:1'), covar=tensor([0.3775, 0.3935, 0.1506, 0.2629, 0.1769, 0.2377, 0.1565, 0.3769], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0962, 0.0787, 0.0927, 0.0980, 0.0877, 0.0734, 0.0811], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:02:27,309 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:32,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1852, 1.6710, 4.3806, 1.6437, 3.9488, 3.7287, 3.9897, 3.9065], + device='cuda:1'), covar=tensor([0.0617, 0.4078, 0.0539, 0.4235, 0.1114, 0.0895, 0.0604, 0.0657], + device='cuda:1'), in_proj_covar=tensor([0.0609, 0.0628, 0.0674, 0.0610, 0.0690, 0.0595, 0.0594, 0.0662], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:02:32,992 INFO [train.py:901] (1/4) Epoch 20, batch 5550, loss[loss=0.1792, simple_loss=0.2582, pruned_loss=0.05012, over 7724.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2903, pruned_loss=0.06358, over 1610082.28 frames. ], batch size: 18, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:41,928 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159139.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:44,149 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:46,023 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:59,834 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7687, 1.7028, 1.9304, 1.5923, 1.1120, 1.7851, 2.3806, 2.0021], + device='cuda:1'), covar=tensor([0.0502, 0.1222, 0.1590, 0.1396, 0.0624, 0.1394, 0.0602, 0.0614], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:03:01,231 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0539, 2.3822, 3.6950, 1.7707, 1.6435, 3.6879, 0.8740, 2.0553], + device='cuda:1'), covar=tensor([0.1417, 0.1125, 0.0194, 0.1833, 0.2848, 0.0253, 0.1952, 0.1291], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0195, 0.0125, 0.0219, 0.0269, 0.0133, 0.0167, 0.0189], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:03:08,171 INFO [train.py:901] (1/4) Epoch 20, batch 5600, loss[loss=0.2355, simple_loss=0.2979, pruned_loss=0.08658, over 7207.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2898, pruned_loss=0.06356, over 1608065.63 frames. ], batch size: 16, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:03:09,020 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:03:12,919 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.419e+02 2.780e+02 3.445e+02 7.739e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 01:03:15,215 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1849, 1.5111, 4.4563, 1.9549, 2.4758, 5.1095, 5.0706, 4.3724], + device='cuda:1'), covar=tensor([0.1201, 0.1846, 0.0289, 0.1964, 0.1118, 0.0163, 0.0357, 0.0541], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0320, 0.0288, 0.0315, 0.0305, 0.0263, 0.0412, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 01:03:23,294 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159197.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:03:30,905 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8569, 1.7001, 2.4810, 1.4001, 1.1729, 2.4316, 0.3780, 1.3319], + device='cuda:1'), covar=tensor([0.1888, 0.1379, 0.0321, 0.1457, 0.3100, 0.0331, 0.2396, 0.1551], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0196, 0.0125, 0.0219, 0.0270, 0.0134, 0.0168, 0.0190], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:03:43,998 INFO [train.py:901] (1/4) Epoch 20, batch 5650, loss[loss=0.2199, simple_loss=0.2981, pruned_loss=0.07083, over 7807.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2908, pruned_loss=0.06409, over 1609258.87 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:04:03,415 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:04,635 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 01:04:07,512 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:19,061 INFO [train.py:901] (1/4) Epoch 20, batch 5700, loss[loss=0.184, simple_loss=0.2784, pruned_loss=0.04482, over 8495.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2915, pruned_loss=0.06414, over 1609371.34 frames. ], batch size: 26, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:25,334 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.576e+02 3.260e+02 4.013e+02 6.441e+02, threshold=6.520e+02, percent-clipped=4.0 +2023-02-07 01:04:42,294 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:45,035 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159312.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:04:50,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0325, 1.3967, 3.4670, 1.6499, 2.2533, 3.8766, 3.9501, 3.3160], + device='cuda:1'), covar=tensor([0.1167, 0.1928, 0.0390, 0.1982, 0.1238, 0.0215, 0.0508, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0293, 0.0318, 0.0286, 0.0313, 0.0304, 0.0261, 0.0410, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 01:04:54,518 INFO [train.py:901] (1/4) Epoch 20, batch 5750, loss[loss=0.1828, simple_loss=0.2593, pruned_loss=0.05319, over 7656.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.291, pruned_loss=0.06382, over 1610554.99 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:09,319 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 01:05:21,858 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9943, 1.5860, 3.4497, 1.5522, 2.3307, 3.8027, 3.9197, 3.3077], + device='cuda:1'), covar=tensor([0.1083, 0.1646, 0.0342, 0.1988, 0.1008, 0.0214, 0.0499, 0.0481], + device='cuda:1'), in_proj_covar=tensor([0.0291, 0.0316, 0.0284, 0.0311, 0.0302, 0.0260, 0.0407, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 01:05:29,354 INFO [train.py:901] (1/4) Epoch 20, batch 5800, loss[loss=0.2176, simple_loss=0.2974, pruned_loss=0.06894, over 8459.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06364, over 1607160.64 frames. ], batch size: 27, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:35,563 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.438e+02 2.992e+02 3.849e+02 1.447e+03, threshold=5.984e+02, percent-clipped=4.0 +2023-02-07 01:06:04,883 INFO [train.py:901] (1/4) Epoch 20, batch 5850, loss[loss=0.1751, simple_loss=0.2624, pruned_loss=0.0439, over 7961.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2901, pruned_loss=0.06372, over 1608992.53 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:08,465 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:06:09,313 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5088, 1.9013, 3.0499, 1.3893, 2.2024, 1.9978, 1.6400, 2.2314], + device='cuda:1'), covar=tensor([0.2073, 0.2661, 0.0902, 0.4766, 0.2074, 0.3269, 0.2389, 0.2448], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0593, 0.0556, 0.0636, 0.0647, 0.0592, 0.0531, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:06:39,993 INFO [train.py:901] (1/4) Epoch 20, batch 5900, loss[loss=0.1916, simple_loss=0.2699, pruned_loss=0.05669, over 8249.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2891, pruned_loss=0.06316, over 1608360.61 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:45,626 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.454e+02 2.951e+02 3.822e+02 7.063e+02, threshold=5.901e+02, percent-clipped=2.0 +2023-02-07 01:07:04,121 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:08,830 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:12,108 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:15,452 INFO [train.py:901] (1/4) Epoch 20, batch 5950, loss[loss=0.1939, simple_loss=0.2861, pruned_loss=0.05092, over 8107.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2903, pruned_loss=0.06374, over 1608245.10 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:21,779 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:26,380 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:29,667 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:45,569 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159568.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:07:50,936 INFO [train.py:901] (1/4) Epoch 20, batch 6000, loss[loss=0.1823, simple_loss=0.2736, pruned_loss=0.04547, over 8509.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2901, pruned_loss=0.06339, over 1612212.27 frames. ], batch size: 26, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:50,937 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 01:08:01,340 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1314, 1.8333, 2.3860, 1.9580, 2.2722, 2.1581, 1.9560, 1.1843], + device='cuda:1'), covar=tensor([0.5508, 0.4883, 0.1888, 0.3778, 0.2451, 0.3141, 0.1841, 0.5010], + device='cuda:1'), in_proj_covar=tensor([0.0938, 0.0970, 0.0795, 0.0936, 0.0987, 0.0882, 0.0741, 0.0820], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:08:04,192 INFO [train.py:935] (1/4) Epoch 20, validation: loss=0.175, simple_loss=0.275, pruned_loss=0.03755, over 944034.00 frames. +2023-02-07 01:08:04,193 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 01:08:09,555 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.504e+02 2.869e+02 3.482e+02 8.370e+02, threshold=5.739e+02, percent-clipped=5.0 +2023-02-07 01:08:15,887 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159593.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:08:38,920 INFO [train.py:901] (1/4) Epoch 20, batch 6050, loss[loss=0.2258, simple_loss=0.3012, pruned_loss=0.07521, over 7176.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2904, pruned_loss=0.06353, over 1612203.01 frames. ], batch size: 71, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:08:45,949 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:55,617 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159649.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:57,636 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:09:14,770 INFO [train.py:901] (1/4) Epoch 20, batch 6100, loss[loss=0.2612, simple_loss=0.3354, pruned_loss=0.0935, over 8621.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.06389, over 1613089.69 frames. ], batch size: 31, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:09:21,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.453e+02 2.842e+02 3.745e+02 1.322e+03, threshold=5.684e+02, percent-clipped=4.0 +2023-02-07 01:09:34,330 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9836, 1.6774, 2.0712, 1.8207, 1.9688, 2.0108, 1.8423, 0.7858], + device='cuda:1'), covar=tensor([0.5386, 0.4540, 0.1886, 0.3375, 0.2243, 0.3012, 0.1863, 0.4913], + device='cuda:1'), in_proj_covar=tensor([0.0937, 0.0971, 0.0795, 0.0937, 0.0987, 0.0883, 0.0741, 0.0820], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:09:39,605 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6879, 4.7022, 4.2368, 1.8993, 4.2246, 4.3566, 4.2819, 4.1312], + device='cuda:1'), covar=tensor([0.0744, 0.0542, 0.0960, 0.5124, 0.0844, 0.0909, 0.1131, 0.0676], + device='cuda:1'), in_proj_covar=tensor([0.0519, 0.0430, 0.0434, 0.0533, 0.0422, 0.0435, 0.0418, 0.0377], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:09:41,577 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 01:09:50,003 INFO [train.py:901] (1/4) Epoch 20, batch 6150, loss[loss=0.1819, simple_loss=0.2745, pruned_loss=0.04462, over 8457.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2895, pruned_loss=0.06334, over 1610606.51 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:09:55,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0678, 2.3253, 1.9503, 2.8740, 1.3480, 1.6858, 2.0663, 2.2140], + device='cuda:1'), covar=tensor([0.0708, 0.0751, 0.0870, 0.0399, 0.1095, 0.1230, 0.0851, 0.0779], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0204, 0.0246, 0.0248, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 01:10:18,350 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:10:24,867 INFO [train.py:901] (1/4) Epoch 20, batch 6200, loss[loss=0.1886, simple_loss=0.2757, pruned_loss=0.05076, over 7971.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2893, pruned_loss=0.06324, over 1613830.88 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:30,198 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.429e+02 3.094e+02 3.753e+02 7.329e+02, threshold=6.188e+02, percent-clipped=3.0 +2023-02-07 01:10:43,487 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:00,325 INFO [train.py:901] (1/4) Epoch 20, batch 6250, loss[loss=0.1981, simple_loss=0.2778, pruned_loss=0.05925, over 8243.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06292, over 1615349.49 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:01,214 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:15,440 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-07 01:11:32,491 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:34,366 INFO [train.py:901] (1/4) Epoch 20, batch 6300, loss[loss=0.2084, simple_loss=0.3035, pruned_loss=0.05662, over 8242.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.06256, over 1613625.94 frames. ], batch size: 24, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:40,336 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.354e+02 2.951e+02 3.644e+02 9.166e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 01:11:45,865 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:03,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:09,078 INFO [train.py:901] (1/4) Epoch 20, batch 6350, loss[loss=0.2523, simple_loss=0.3258, pruned_loss=0.08946, over 8521.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2893, pruned_loss=0.06318, over 1618846.28 frames. ], batch size: 28, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:10,588 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:43,479 INFO [train.py:901] (1/4) Epoch 20, batch 6400, loss[loss=0.1802, simple_loss=0.2605, pruned_loss=0.04991, over 7807.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2873, pruned_loss=0.06191, over 1617604.11 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:48,765 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.436e+02 2.995e+02 3.881e+02 8.346e+02, threshold=5.989e+02, percent-clipped=6.0 +2023-02-07 01:12:55,755 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:16,776 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:18,631 INFO [train.py:901] (1/4) Epoch 20, batch 6450, loss[loss=0.2438, simple_loss=0.3303, pruned_loss=0.07868, over 8482.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2879, pruned_loss=0.06267, over 1613672.34 frames. ], batch size: 29, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:34,493 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:53,995 INFO [train.py:901] (1/4) Epoch 20, batch 6500, loss[loss=0.2382, simple_loss=0.3031, pruned_loss=0.08667, over 7934.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2876, pruned_loss=0.06259, over 1613013.53 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:59,465 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.613e+02 3.061e+02 4.120e+02 1.100e+03, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 01:14:16,515 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:14:25,335 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6178, 1.6087, 2.0990, 1.4679, 1.2296, 2.0327, 0.3719, 1.2786], + device='cuda:1'), covar=tensor([0.1878, 0.1334, 0.0362, 0.1102, 0.2744, 0.0457, 0.2162, 0.1360], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0195, 0.0126, 0.0221, 0.0271, 0.0133, 0.0169, 0.0190], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:14:29,725 INFO [train.py:901] (1/4) Epoch 20, batch 6550, loss[loss=0.2748, simple_loss=0.3444, pruned_loss=0.1026, over 8511.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2885, pruned_loss=0.06328, over 1612276.78 frames. ], batch size: 28, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:14:44,611 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-07 01:14:53,089 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 01:15:05,565 INFO [train.py:901] (1/4) Epoch 20, batch 6600, loss[loss=0.2198, simple_loss=0.3059, pruned_loss=0.06688, over 8310.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2885, pruned_loss=0.06294, over 1610502.30 frames. ], batch size: 49, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:10,795 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.488e+02 3.067e+02 3.982e+02 8.719e+02, threshold=6.134e+02, percent-clipped=3.0 +2023-02-07 01:15:12,114 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 01:15:13,063 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6960, 2.0987, 3.4200, 1.4903, 2.5419, 2.2232, 1.8328, 2.5444], + device='cuda:1'), covar=tensor([0.1794, 0.2710, 0.0787, 0.4393, 0.1748, 0.2998, 0.2187, 0.2204], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0594, 0.0557, 0.0637, 0.0645, 0.0597, 0.0533, 0.0635], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:15:13,752 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8082, 2.3598, 4.3282, 1.5925, 3.0103, 2.4602, 1.9460, 3.0230], + device='cuda:1'), covar=tensor([0.1856, 0.2799, 0.0724, 0.4473, 0.1974, 0.3096, 0.2225, 0.2391], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0594, 0.0557, 0.0637, 0.0645, 0.0597, 0.0533, 0.0635], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:15:22,710 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6788, 1.6018, 3.2057, 1.4295, 2.1904, 3.5456, 3.6453, 2.8666], + device='cuda:1'), covar=tensor([0.1452, 0.1844, 0.0442, 0.2312, 0.1187, 0.0302, 0.0641, 0.0774], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0321, 0.0287, 0.0315, 0.0306, 0.0263, 0.0412, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 01:15:29,491 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6244, 1.3589, 1.6104, 1.2588, 0.8489, 1.3389, 1.4445, 1.4127], + device='cuda:1'), covar=tensor([0.0603, 0.1367, 0.1701, 0.1529, 0.0585, 0.1550, 0.0756, 0.0633], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0153, 0.0190, 0.0159, 0.0099, 0.0162, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:15:33,423 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160217.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:15:39,320 INFO [train.py:901] (1/4) Epoch 20, batch 6650, loss[loss=0.2279, simple_loss=0.3015, pruned_loss=0.07713, over 8435.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2888, pruned_loss=0.06352, over 1608956.45 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:49,669 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8378, 1.3263, 4.0149, 1.4734, 3.5670, 3.3401, 3.6761, 3.5526], + device='cuda:1'), covar=tensor([0.0679, 0.4599, 0.0590, 0.4148, 0.1149, 0.0970, 0.0637, 0.0721], + device='cuda:1'), in_proj_covar=tensor([0.0613, 0.0633, 0.0684, 0.0619, 0.0696, 0.0597, 0.0600, 0.0670], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:16:12,500 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:16:15,124 INFO [train.py:901] (1/4) Epoch 20, batch 6700, loss[loss=0.1864, simple_loss=0.2528, pruned_loss=0.06001, over 7294.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2888, pruned_loss=0.06341, over 1612226.72 frames. ], batch size: 16, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:20,502 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.299e+02 2.819e+02 3.357e+02 8.975e+02, threshold=5.638e+02, percent-clipped=4.0 +2023-02-07 01:16:48,910 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160325.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:16:49,345 INFO [train.py:901] (1/4) Epoch 20, batch 6750, loss[loss=0.2347, simple_loss=0.2977, pruned_loss=0.0858, over 7650.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.06316, over 1613175.73 frames. ], batch size: 19, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:53,607 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160332.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:06,675 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1341, 1.4364, 1.6327, 1.2735, 1.0474, 1.3628, 1.8624, 1.5404], + device='cuda:1'), covar=tensor([0.0537, 0.1287, 0.1680, 0.1517, 0.0603, 0.1523, 0.0695, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0159, 0.0099, 0.0162, 0.0113, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:17:06,882 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 01:17:16,338 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:23,986 INFO [train.py:901] (1/4) Epoch 20, batch 6800, loss[loss=0.2186, simple_loss=0.3088, pruned_loss=0.0642, over 8466.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2885, pruned_loss=0.06282, over 1612702.51 frames. ], batch size: 29, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:17:28,102 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 01:17:29,320 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.510e+02 3.096e+02 3.947e+02 9.727e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 01:17:31,586 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:33,656 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160389.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:37,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5980, 1.8047, 1.8741, 1.4192, 1.9778, 1.4336, 0.5625, 1.8220], + device='cuda:1'), covar=tensor([0.0447, 0.0309, 0.0269, 0.0417, 0.0340, 0.0757, 0.0782, 0.0234], + device='cuda:1'), in_proj_covar=tensor([0.0441, 0.0376, 0.0331, 0.0432, 0.0362, 0.0523, 0.0383, 0.0403], + device='cuda:1'), out_proj_covar=tensor([1.1873e-04, 9.8639e-05, 8.7278e-05, 1.1426e-04, 9.5605e-05, 1.4867e-04, + 1.0356e-04, 1.0729e-04], device='cuda:1') +2023-02-07 01:17:59,211 INFO [train.py:901] (1/4) Epoch 20, batch 6850, loss[loss=0.1952, simple_loss=0.2663, pruned_loss=0.06202, over 7562.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2879, pruned_loss=0.06276, over 1606644.19 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:19,446 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 01:18:34,197 INFO [train.py:901] (1/4) Epoch 20, batch 6900, loss[loss=0.1851, simple_loss=0.268, pruned_loss=0.05112, over 8297.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2881, pruned_loss=0.06255, over 1609739.45 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:39,573 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.333e+02 2.912e+02 3.495e+02 9.213e+02, threshold=5.824e+02, percent-clipped=3.0 +2023-02-07 01:18:54,841 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4075, 2.2136, 3.1518, 2.4445, 3.0371, 2.3737, 2.1153, 1.8186], + device='cuda:1'), covar=tensor([0.5404, 0.5061, 0.1964, 0.3893, 0.2510, 0.3124, 0.1999, 0.5682], + device='cuda:1'), in_proj_covar=tensor([0.0929, 0.0968, 0.0794, 0.0932, 0.0985, 0.0881, 0.0740, 0.0821], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:19:02,431 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-07 01:19:08,574 INFO [train.py:901] (1/4) Epoch 20, batch 6950, loss[loss=0.2473, simple_loss=0.3213, pruned_loss=0.08669, over 8315.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2882, pruned_loss=0.06258, over 1612169.45 frames. ], batch size: 25, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:19:30,214 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 01:19:42,961 INFO [train.py:901] (1/4) Epoch 20, batch 7000, loss[loss=0.1934, simple_loss=0.2884, pruned_loss=0.04915, over 8444.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2877, pruned_loss=0.06209, over 1612933.37 frames. ], batch size: 29, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:19:48,348 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.497e+02 2.987e+02 3.377e+02 5.985e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-07 01:19:49,093 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7249, 4.7261, 4.2225, 2.0765, 4.1761, 4.3761, 4.1925, 4.2385], + device='cuda:1'), covar=tensor([0.0708, 0.0505, 0.1022, 0.4404, 0.0876, 0.0914, 0.1319, 0.0651], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0434, 0.0437, 0.0538, 0.0424, 0.0440, 0.0423, 0.0383], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:19:52,051 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6405, 1.6440, 2.1026, 1.4742, 1.2986, 2.0875, 0.3762, 1.3899], + device='cuda:1'), covar=tensor([0.1636, 0.1274, 0.0458, 0.1192, 0.2546, 0.0414, 0.2067, 0.1236], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0193, 0.0126, 0.0222, 0.0270, 0.0133, 0.0169, 0.0189], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:19:52,064 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:09,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160613.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:18,199 INFO [train.py:901] (1/4) Epoch 20, batch 7050, loss[loss=0.1993, simple_loss=0.2811, pruned_loss=0.05874, over 7820.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06275, over 1611475.47 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:30,580 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:42,512 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7724, 2.3782, 4.3599, 1.6153, 3.0422, 2.4281, 1.8657, 3.1339], + device='cuda:1'), covar=tensor([0.1779, 0.2412, 0.0722, 0.4146, 0.1765, 0.2891, 0.2117, 0.2087], + device='cuda:1'), in_proj_covar=tensor([0.0516, 0.0585, 0.0548, 0.0627, 0.0636, 0.0587, 0.0523, 0.0623], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:20:48,791 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160668.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:49,342 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160669.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:20:53,993 INFO [train.py:901] (1/4) Epoch 20, batch 7100, loss[loss=0.2024, simple_loss=0.2778, pruned_loss=0.06344, over 8499.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06284, over 1609278.69 frames. ], batch size: 26, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:59,624 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.520e+02 2.814e+02 3.523e+02 7.232e+02, threshold=5.628e+02, percent-clipped=2.0 +2023-02-07 01:21:00,543 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4511, 2.8075, 2.3171, 3.8427, 1.7301, 2.1624, 2.3391, 2.8618], + device='cuda:1'), covar=tensor([0.0693, 0.0870, 0.0870, 0.0294, 0.1156, 0.1166, 0.1025, 0.0820], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0198, 0.0247, 0.0214, 0.0206, 0.0248, 0.0252, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 01:21:29,424 INFO [train.py:901] (1/4) Epoch 20, batch 7150, loss[loss=0.1885, simple_loss=0.2698, pruned_loss=0.05358, over 7821.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2894, pruned_loss=0.06333, over 1608834.48 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:21:36,643 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.9112, 3.8550, 3.5190, 1.9815, 3.4632, 3.5284, 3.3619, 3.4026], + device='cuda:1'), covar=tensor([0.0827, 0.0601, 0.1131, 0.3994, 0.0933, 0.0856, 0.1477, 0.0747], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0432, 0.0436, 0.0536, 0.0424, 0.0440, 0.0424, 0.0381], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:21:54,043 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:22:04,606 INFO [train.py:901] (1/4) Epoch 20, batch 7200, loss[loss=0.2559, simple_loss=0.3193, pruned_loss=0.09622, over 6933.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.29, pruned_loss=0.06336, over 1612964.91 frames. ], batch size: 71, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:22:09,775 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.432e+02 3.066e+02 3.972e+02 8.502e+02, threshold=6.132e+02, percent-clipped=3.0 +2023-02-07 01:22:09,970 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160784.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:22:26,159 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 01:22:39,218 INFO [train.py:901] (1/4) Epoch 20, batch 7250, loss[loss=0.1921, simple_loss=0.2649, pruned_loss=0.05963, over 7665.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06368, over 1610461.34 frames. ], batch size: 19, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:13,942 INFO [train.py:901] (1/4) Epoch 20, batch 7300, loss[loss=0.1939, simple_loss=0.2789, pruned_loss=0.0544, over 7816.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2896, pruned_loss=0.06304, over 1612882.69 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:14,055 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3970, 4.3968, 3.9466, 2.0179, 3.9013, 4.0243, 3.9752, 3.8163], + device='cuda:1'), covar=tensor([0.0711, 0.0513, 0.1067, 0.4523, 0.0838, 0.0920, 0.1178, 0.0780], + device='cuda:1'), in_proj_covar=tensor([0.0519, 0.0431, 0.0434, 0.0536, 0.0426, 0.0439, 0.0421, 0.0379], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:23:19,318 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.519e+02 2.885e+02 3.982e+02 8.183e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 01:23:44,252 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160919.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:23:48,786 INFO [train.py:901] (1/4) Epoch 20, batch 7350, loss[loss=0.2011, simple_loss=0.283, pruned_loss=0.05954, over 8345.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2889, pruned_loss=0.06274, over 1611685.12 frames. ], batch size: 24, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:24:07,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0879, 2.2081, 1.8321, 2.7579, 1.2403, 1.5587, 1.9021, 2.1783], + device='cuda:1'), covar=tensor([0.0639, 0.0712, 0.0905, 0.0370, 0.1170, 0.1336, 0.0883, 0.0684], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0213, 0.0206, 0.0247, 0.0251, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 01:24:16,156 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 01:24:24,317 INFO [train.py:901] (1/4) Epoch 20, batch 7400, loss[loss=0.1885, simple_loss=0.2815, pruned_loss=0.04773, over 8075.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2883, pruned_loss=0.06232, over 1615182.50 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:24:29,923 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:24:30,420 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.344e+02 3.002e+02 3.673e+02 6.079e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-07 01:24:31,262 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7842, 1.4111, 3.1222, 1.3108, 2.2813, 3.3409, 3.4816, 2.7971], + device='cuda:1'), covar=tensor([0.1274, 0.1882, 0.0375, 0.2298, 0.0916, 0.0273, 0.0642, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0321, 0.0285, 0.0315, 0.0305, 0.0262, 0.0412, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 01:24:37,302 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 01:24:59,972 INFO [train.py:901] (1/4) Epoch 20, batch 7450, loss[loss=0.1824, simple_loss=0.2686, pruned_loss=0.0481, over 7930.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2886, pruned_loss=0.0624, over 1616522.21 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:10,069 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161040.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:16,121 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 01:25:28,631 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161065.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:25:35,932 INFO [train.py:901] (1/4) Epoch 20, batch 7500, loss[loss=0.1797, simple_loss=0.258, pruned_loss=0.05074, over 8253.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2885, pruned_loss=0.06276, over 1616291.83 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:41,427 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.441e+02 3.010e+02 3.756e+02 8.900e+02, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:25:56,410 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7495, 1.9671, 2.0457, 1.3868, 2.2003, 1.6378, 0.6352, 1.8879], + device='cuda:1'), covar=tensor([0.0567, 0.0334, 0.0279, 0.0539, 0.0381, 0.0846, 0.0893, 0.0304], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0385, 0.0339, 0.0440, 0.0370, 0.0534, 0.0391, 0.0413], + device='cuda:1'), out_proj_covar=tensor([1.2137e-04, 1.0108e-04, 8.9232e-05, 1.1615e-04, 9.7755e-05, 1.5176e-04, + 1.0577e-04, 1.0976e-04], device='cuda:1') +2023-02-07 01:26:11,154 INFO [train.py:901] (1/4) Epoch 20, batch 7550, loss[loss=0.2073, simple_loss=0.2897, pruned_loss=0.06241, over 7977.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2893, pruned_loss=0.06341, over 1615033.51 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:32,389 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7714, 1.7436, 2.3980, 1.6107, 1.4213, 2.4572, 0.4663, 1.4671], + device='cuda:1'), covar=tensor([0.1827, 0.1233, 0.0376, 0.1326, 0.2655, 0.0413, 0.2301, 0.1352], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0194, 0.0125, 0.0220, 0.0269, 0.0134, 0.0168, 0.0188], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:26:38,652 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-07 01:26:40,487 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5664, 1.8237, 1.8682, 1.2280, 1.9856, 1.4964, 0.4245, 1.7330], + device='cuda:1'), covar=tensor([0.0462, 0.0312, 0.0270, 0.0482, 0.0361, 0.0765, 0.0791, 0.0245], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0384, 0.0338, 0.0439, 0.0368, 0.0533, 0.0390, 0.0412], + device='cuda:1'), out_proj_covar=tensor([1.2109e-04, 1.0080e-04, 8.9035e-05, 1.1583e-04, 9.7149e-05, 1.5137e-04, + 1.0544e-04, 1.0964e-04], device='cuda:1') +2023-02-07 01:26:46,340 INFO [train.py:901] (1/4) Epoch 20, batch 7600, loss[loss=0.1997, simple_loss=0.2872, pruned_loss=0.05611, over 8332.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.0638, over 1618708.02 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:51,651 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.460e+02 3.037e+02 4.113e+02 9.859e+02, threshold=6.074e+02, percent-clipped=9.0 +2023-02-07 01:26:56,504 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:27:20,297 INFO [train.py:901] (1/4) Epoch 20, batch 7650, loss[loss=0.1828, simple_loss=0.2647, pruned_loss=0.05043, over 7789.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2902, pruned_loss=0.06387, over 1616837.48 frames. ], batch size: 19, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:27:25,750 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:36,297 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:45,610 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:54,281 INFO [train.py:901] (1/4) Epoch 20, batch 7700, loss[loss=0.1743, simple_loss=0.2619, pruned_loss=0.04339, over 7689.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2888, pruned_loss=0.06305, over 1609729.20 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:27:59,457 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.411e+02 2.987e+02 3.572e+02 6.786e+02, threshold=5.975e+02, percent-clipped=3.0 +2023-02-07 01:28:25,738 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 01:28:29,788 INFO [train.py:901] (1/4) Epoch 20, batch 7750, loss[loss=0.1918, simple_loss=0.2802, pruned_loss=0.05177, over 8522.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2889, pruned_loss=0.06352, over 1607746.72 frames. ], batch size: 28, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:28:30,573 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:01,925 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5804, 4.6555, 4.1342, 2.0979, 4.1020, 4.1519, 4.1062, 3.9446], + device='cuda:1'), covar=tensor([0.0681, 0.0475, 0.1111, 0.4193, 0.0835, 0.0777, 0.1250, 0.0639], + device='cuda:1'), in_proj_covar=tensor([0.0510, 0.0425, 0.0428, 0.0526, 0.0419, 0.0429, 0.0413, 0.0375], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:29:05,270 INFO [train.py:901] (1/4) Epoch 20, batch 7800, loss[loss=0.1778, simple_loss=0.256, pruned_loss=0.04986, over 7213.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06287, over 1609897.37 frames. ], batch size: 16, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:29:06,858 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:10,615 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.429e+02 2.909e+02 3.732e+02 6.331e+02, threshold=5.818e+02, percent-clipped=2.0 +2023-02-07 01:29:34,441 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:39,043 INFO [train.py:901] (1/4) Epoch 20, batch 7850, loss[loss=0.1845, simple_loss=0.2631, pruned_loss=0.05292, over 7932.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.289, pruned_loss=0.06326, over 1609049.93 frames. ], batch size: 20, lr: 3.74e-03, grad_scale: 16.0 +2023-02-07 01:29:49,655 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:57,456 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1328, 1.8162, 2.0249, 1.8580, 0.9214, 1.8221, 2.3480, 2.2843], + device='cuda:1'), covar=tensor([0.0407, 0.1205, 0.1550, 0.1336, 0.0582, 0.1402, 0.0568, 0.0577], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:30:12,445 INFO [train.py:901] (1/4) Epoch 20, batch 7900, loss[loss=0.182, simple_loss=0.262, pruned_loss=0.05104, over 7221.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06364, over 1602986.56 frames. ], batch size: 16, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:13,738 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:18,874 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 2.352e+02 2.923e+02 4.060e+02 8.940e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 01:30:45,503 INFO [train.py:901] (1/4) Epoch 20, batch 7950, loss[loss=0.2065, simple_loss=0.2976, pruned_loss=0.05767, over 8315.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2911, pruned_loss=0.06456, over 1599689.59 frames. ], batch size: 25, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:50,382 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 01:30:51,104 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 01:31:18,038 INFO [train.py:901] (1/4) Epoch 20, batch 8000, loss[loss=0.1898, simple_loss=0.2664, pruned_loss=0.05664, over 7411.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2905, pruned_loss=0.06437, over 1599207.11 frames. ], batch size: 17, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:19,441 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:23,850 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.449e+02 3.108e+02 3.740e+02 8.675e+02, threshold=6.215e+02, percent-clipped=6.0 +2023-02-07 01:31:24,608 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0013, 1.3943, 3.4231, 1.5444, 2.3082, 3.8087, 3.9369, 3.1811], + device='cuda:1'), covar=tensor([0.1147, 0.1871, 0.0334, 0.2114, 0.1079, 0.0223, 0.0483, 0.0562], + device='cuda:1'), in_proj_covar=tensor([0.0293, 0.0320, 0.0285, 0.0314, 0.0305, 0.0260, 0.0410, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 01:31:29,407 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:44,061 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8036, 2.7712, 2.0004, 2.4949, 2.4120, 1.6944, 2.3722, 2.5161], + device='cuda:1'), covar=tensor([0.1255, 0.0346, 0.1080, 0.0520, 0.0582, 0.1413, 0.0793, 0.0748], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0234, 0.0331, 0.0303, 0.0298, 0.0332, 0.0341, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 01:31:51,317 INFO [train.py:901] (1/4) Epoch 20, batch 8050, loss[loss=0.2854, simple_loss=0.3419, pruned_loss=0.1144, over 7208.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2887, pruned_loss=0.06387, over 1590174.81 frames. ], batch size: 72, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:52,873 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3738, 1.7739, 1.9312, 1.7554, 1.2565, 1.7834, 2.1964, 1.9449], + device='cuda:1'), covar=tensor([0.0619, 0.1424, 0.1914, 0.1573, 0.0777, 0.1650, 0.0762, 0.0643], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0162, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:31:57,072 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:11,404 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0523, 1.2686, 1.6669, 1.0051, 1.1753, 1.2353, 1.1299, 1.2039], + device='cuda:1'), covar=tensor([0.1400, 0.1797, 0.0679, 0.3128, 0.1448, 0.2415, 0.1632, 0.1897], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0598, 0.0554, 0.0641, 0.0647, 0.0597, 0.0531, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:32:12,862 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 01:32:24,914 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 01:32:32,185 INFO [train.py:901] (1/4) Epoch 21, batch 0, loss[loss=0.1866, simple_loss=0.269, pruned_loss=0.05215, over 7918.00 frames. ], tot_loss[loss=0.1866, simple_loss=0.269, pruned_loss=0.05215, over 7918.00 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:32:32,186 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 01:32:44,214 INFO [train.py:935] (1/4) Epoch 21, validation: loss=0.1763, simple_loss=0.2762, pruned_loss=0.03818, over 944034.00 frames. +2023-02-07 01:32:44,214 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 01:32:44,412 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:59,352 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 01:33:02,217 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.415e+02 2.918e+02 3.924e+02 7.413e+02, threshold=5.835e+02, percent-clipped=4.0 +2023-02-07 01:33:07,958 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:11,595 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:18,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6619, 1.3796, 4.8766, 1.8036, 4.2204, 4.0884, 4.4745, 4.3148], + device='cuda:1'), covar=tensor([0.0629, 0.4725, 0.0450, 0.3924, 0.1152, 0.0978, 0.0556, 0.0615], + device='cuda:1'), in_proj_covar=tensor([0.0610, 0.0623, 0.0673, 0.0608, 0.0690, 0.0590, 0.0591, 0.0656], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:33:18,592 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:19,110 INFO [train.py:901] (1/4) Epoch 21, batch 50, loss[loss=0.24, simple_loss=0.3111, pruned_loss=0.08447, over 8107.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2906, pruned_loss=0.06324, over 368507.23 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:29,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:32,472 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 01:33:56,008 INFO [train.py:901] (1/4) Epoch 21, batch 100, loss[loss=0.2066, simple_loss=0.2941, pruned_loss=0.05951, over 8507.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2882, pruned_loss=0.06145, over 644126.92 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:57,263 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 01:33:58,657 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:14,151 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.511e+02 2.964e+02 4.065e+02 7.207e+02, threshold=5.927e+02, percent-clipped=4.0 +2023-02-07 01:34:18,656 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 01:34:30,769 INFO [train.py:901] (1/4) Epoch 21, batch 150, loss[loss=0.2043, simple_loss=0.2812, pruned_loss=0.06369, over 7416.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2878, pruned_loss=0.06161, over 861123.39 frames. ], batch size: 17, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:34:33,278 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:34:39,728 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:47,237 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:06,328 INFO [train.py:901] (1/4) Epoch 21, batch 200, loss[loss=0.1901, simple_loss=0.2743, pruned_loss=0.05295, over 8478.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2903, pruned_loss=0.06342, over 1024129.77 frames. ], batch size: 48, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:19,123 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:23,721 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.494e+02 2.791e+02 3.613e+02 7.338e+02, threshold=5.582e+02, percent-clipped=1.0 +2023-02-07 01:35:41,064 INFO [train.py:901] (1/4) Epoch 21, batch 250, loss[loss=0.181, simple_loss=0.2612, pruned_loss=0.0504, over 7806.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2905, pruned_loss=0.06396, over 1154648.11 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:47,956 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 01:35:57,090 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 01:36:00,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161937.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:08,767 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:15,274 INFO [train.py:901] (1/4) Epoch 21, batch 300, loss[loss=0.2169, simple_loss=0.2796, pruned_loss=0.07713, over 7439.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2894, pruned_loss=0.06371, over 1253212.27 frames. ], batch size: 17, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:19,038 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:20,643 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 01:36:26,608 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:33,755 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.839e+02 3.558e+02 8.067e+02, threshold=5.678e+02, percent-clipped=5.0 +2023-02-07 01:36:36,650 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:51,865 INFO [train.py:901] (1/4) Epoch 21, batch 350, loss[loss=0.2075, simple_loss=0.2783, pruned_loss=0.06832, over 7811.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2892, pruned_loss=0.06313, over 1334308.22 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:57,888 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 01:37:25,811 INFO [train.py:901] (1/4) Epoch 21, batch 400, loss[loss=0.2291, simple_loss=0.3111, pruned_loss=0.07357, over 8499.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2902, pruned_loss=0.06371, over 1396037.14 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:37:44,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.323e+02 2.796e+02 3.394e+02 5.024e+02, threshold=5.592e+02, percent-clipped=0.0 +2023-02-07 01:37:52,936 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:02,164 INFO [train.py:901] (1/4) Epoch 21, batch 450, loss[loss=0.2368, simple_loss=0.3254, pruned_loss=0.07414, over 8240.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.29, pruned_loss=0.06374, over 1445798.62 frames. ], batch size: 24, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:15,018 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 01:38:20,151 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:37,311 INFO [train.py:901] (1/4) Epoch 21, batch 500, loss[loss=0.2072, simple_loss=0.278, pruned_loss=0.06819, over 8052.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2892, pruned_loss=0.06297, over 1485783.54 frames. ], batch size: 20, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:37,535 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:50,066 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:55,585 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.501e+02 2.975e+02 3.750e+02 9.376e+02, threshold=5.950e+02, percent-clipped=8.0 +2023-02-07 01:39:01,450 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162193.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:13,738 INFO [train.py:901] (1/4) Epoch 21, batch 550, loss[loss=0.2158, simple_loss=0.2923, pruned_loss=0.06959, over 7712.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2895, pruned_loss=0.06313, over 1514896.82 frames. ], batch size: 18, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:20,129 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:36,008 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8063, 3.7677, 3.4095, 1.6954, 3.3030, 3.4250, 3.4011, 3.2552], + device='cuda:1'), covar=tensor([0.0967, 0.0703, 0.1205, 0.5260, 0.1068, 0.1203, 0.1325, 0.1037], + device='cuda:1'), in_proj_covar=tensor([0.0515, 0.0432, 0.0431, 0.0532, 0.0421, 0.0435, 0.0414, 0.0377], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:39:42,289 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:48,779 INFO [train.py:901] (1/4) Epoch 21, batch 600, loss[loss=0.1859, simple_loss=0.2645, pruned_loss=0.05367, over 7435.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2904, pruned_loss=0.06383, over 1534661.02 frames. ], batch size: 17, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:49,641 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7476, 2.4449, 4.9218, 2.9046, 4.5486, 4.2957, 4.6286, 4.4998], + device='cuda:1'), covar=tensor([0.0552, 0.3344, 0.0465, 0.2846, 0.0748, 0.0793, 0.0469, 0.0499], + device='cuda:1'), in_proj_covar=tensor([0.0618, 0.0635, 0.0684, 0.0616, 0.0700, 0.0601, 0.0600, 0.0667], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:40:02,415 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 01:40:06,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.365e+02 2.932e+02 3.412e+02 7.385e+02, threshold=5.863e+02, percent-clipped=2.0 +2023-02-07 01:40:08,822 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5485, 1.8054, 1.8427, 1.2837, 1.9489, 1.4426, 0.5354, 1.6976], + device='cuda:1'), covar=tensor([0.0532, 0.0311, 0.0271, 0.0493, 0.0323, 0.0723, 0.0816, 0.0271], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0383, 0.0335, 0.0437, 0.0367, 0.0530, 0.0387, 0.0411], + device='cuda:1'), out_proj_covar=tensor([1.2021e-04, 1.0044e-04, 8.8259e-05, 1.1543e-04, 9.6905e-05, 1.5057e-04, + 1.0474e-04, 1.0932e-04], device='cuda:1') +2023-02-07 01:40:11,428 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:40:22,965 INFO [train.py:901] (1/4) Epoch 21, batch 650, loss[loss=0.2288, simple_loss=0.311, pruned_loss=0.07332, over 8500.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2912, pruned_loss=0.0642, over 1552840.57 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:46,286 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4592, 2.3873, 1.7920, 2.1024, 2.0166, 1.5235, 1.9463, 1.8863], + device='cuda:1'), covar=tensor([0.1476, 0.0440, 0.1219, 0.0581, 0.0730, 0.1515, 0.0910, 0.0975], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0236, 0.0334, 0.0307, 0.0300, 0.0334, 0.0345, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 01:40:59,244 INFO [train.py:901] (1/4) Epoch 21, batch 700, loss[loss=0.1878, simple_loss=0.2724, pruned_loss=0.05163, over 7974.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2909, pruned_loss=0.06392, over 1564656.87 frames. ], batch size: 21, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:17,770 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.443e+02 3.111e+02 4.032e+02 8.821e+02, threshold=6.222e+02, percent-clipped=5.0 +2023-02-07 01:41:34,567 INFO [train.py:901] (1/4) Epoch 21, batch 750, loss[loss=0.2023, simple_loss=0.2879, pruned_loss=0.05831, over 8342.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2914, pruned_loss=0.06404, over 1576574.40 frames. ], batch size: 49, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:40,440 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 01:41:45,483 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 01:41:54,317 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 01:41:56,360 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:42:01,037 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 01:42:11,066 INFO [train.py:901] (1/4) Epoch 21, batch 800, loss[loss=0.1836, simple_loss=0.2544, pruned_loss=0.05641, over 7420.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2901, pruned_loss=0.06374, over 1584029.10 frames. ], batch size: 17, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:42:29,940 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.455e+02 2.861e+02 3.570e+02 7.084e+02, threshold=5.721e+02, percent-clipped=3.0 +2023-02-07 01:42:47,171 INFO [train.py:901] (1/4) Epoch 21, batch 850, loss[loss=0.1927, simple_loss=0.279, pruned_loss=0.05323, over 8621.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2901, pruned_loss=0.06406, over 1586799.06 frames. ], batch size: 39, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:01,479 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:07,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5097, 2.4757, 3.3299, 2.6153, 3.0505, 2.6017, 2.3284, 1.8306], + device='cuda:1'), covar=tensor([0.5126, 0.4851, 0.1714, 0.3380, 0.2484, 0.2662, 0.1721, 0.5244], + device='cuda:1'), in_proj_covar=tensor([0.0936, 0.0967, 0.0791, 0.0929, 0.0983, 0.0878, 0.0738, 0.0815], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:43:16,271 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:21,105 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:24,457 INFO [train.py:901] (1/4) Epoch 21, batch 900, loss[loss=0.2296, simple_loss=0.3167, pruned_loss=0.07121, over 8124.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2888, pruned_loss=0.06323, over 1589734.98 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:34,386 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:42,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.319e+02 2.838e+02 3.637e+02 1.203e+03, threshold=5.677e+02, percent-clipped=5.0 +2023-02-07 01:43:49,232 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:44:00,582 INFO [train.py:901] (1/4) Epoch 21, batch 950, loss[loss=0.2108, simple_loss=0.3061, pruned_loss=0.0577, over 8493.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2891, pruned_loss=0.06328, over 1597160.14 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:07,071 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8173, 1.6248, 2.5172, 1.9346, 2.1835, 1.8154, 1.5946, 1.1127], + device='cuda:1'), covar=tensor([0.6897, 0.5783, 0.1702, 0.3668, 0.2719, 0.4084, 0.2984, 0.4738], + device='cuda:1'), in_proj_covar=tensor([0.0937, 0.0969, 0.0791, 0.0930, 0.0983, 0.0878, 0.0739, 0.0817], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:44:14,244 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 01:44:18,634 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2966, 2.0639, 2.8074, 2.2732, 2.6712, 2.3147, 2.1124, 1.4714], + device='cuda:1'), covar=tensor([0.5165, 0.4686, 0.1845, 0.3608, 0.2430, 0.2876, 0.1820, 0.5161], + device='cuda:1'), in_proj_covar=tensor([0.0939, 0.0970, 0.0793, 0.0931, 0.0985, 0.0879, 0.0740, 0.0818], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 01:44:35,852 INFO [train.py:901] (1/4) Epoch 21, batch 1000, loss[loss=0.2407, simple_loss=0.3201, pruned_loss=0.08064, over 8480.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2899, pruned_loss=0.06385, over 1597597.76 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:48,965 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 01:44:55,195 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.438e+02 2.954e+02 4.014e+02 9.557e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 01:45:01,392 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 01:45:11,699 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:45:12,214 INFO [train.py:901] (1/4) Epoch 21, batch 1050, loss[loss=0.2485, simple_loss=0.3198, pruned_loss=0.08858, over 8467.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2916, pruned_loss=0.06444, over 1608348.86 frames. ], batch size: 27, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:45:46,485 INFO [train.py:901] (1/4) Epoch 21, batch 1100, loss[loss=0.2058, simple_loss=0.2835, pruned_loss=0.06408, over 7930.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.065, over 1614028.50 frames. ], batch size: 20, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:06,010 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.501e+02 3.059e+02 3.494e+02 1.150e+03, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 01:46:14,532 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 01:46:23,780 INFO [train.py:901] (1/4) Epoch 21, batch 1150, loss[loss=0.1926, simple_loss=0.2628, pruned_loss=0.0612, over 7697.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2915, pruned_loss=0.06401, over 1617108.21 frames. ], batch size: 18, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:24,670 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:29,071 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:46:29,779 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 01:46:43,000 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:50,079 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162845.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:59,714 INFO [train.py:901] (1/4) Epoch 21, batch 1200, loss[loss=0.2167, simple_loss=0.3025, pruned_loss=0.06545, over 8334.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2912, pruned_loss=0.0638, over 1615941.34 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:47:09,544 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:47:17,453 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.368e+02 3.051e+02 3.779e+02 6.869e+02, threshold=6.103e+02, percent-clipped=3.0 +2023-02-07 01:47:36,400 INFO [train.py:901] (1/4) Epoch 21, batch 1250, loss[loss=0.2378, simple_loss=0.3176, pruned_loss=0.079, over 8607.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2909, pruned_loss=0.06365, over 1614703.83 frames. ], batch size: 39, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:11,274 INFO [train.py:901] (1/4) Epoch 21, batch 1300, loss[loss=0.1552, simple_loss=0.2284, pruned_loss=0.04105, over 7722.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2895, pruned_loss=0.06265, over 1614834.62 frames. ], batch size: 18, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:14,772 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:27,753 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 01:48:28,485 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.260e+02 2.727e+02 3.317e+02 5.773e+02, threshold=5.453e+02, percent-clipped=0.0 +2023-02-07 01:48:29,597 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-07 01:48:30,726 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:31,444 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:44,797 INFO [train.py:901] (1/4) Epoch 21, batch 1350, loss[loss=0.198, simple_loss=0.2877, pruned_loss=0.05413, over 8557.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.06244, over 1612079.22 frames. ], batch size: 31, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:48:45,178 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 01:49:09,955 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 01:49:21,723 INFO [train.py:901] (1/4) Epoch 21, batch 1400, loss[loss=0.1791, simple_loss=0.2548, pruned_loss=0.05165, over 7514.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2879, pruned_loss=0.06238, over 1611431.78 frames. ], batch size: 18, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:49:34,612 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 01:49:38,380 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.26 vs. limit=5.0 +2023-02-07 01:49:39,404 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.469e+02 3.010e+02 4.050e+02 1.060e+03, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:49:46,325 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 01:49:55,428 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:49:55,918 INFO [train.py:901] (1/4) Epoch 21, batch 1450, loss[loss=0.2259, simple_loss=0.3028, pruned_loss=0.07454, over 8468.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2891, pruned_loss=0.06287, over 1613903.20 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:31,010 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6535, 1.6616, 2.3340, 1.5078, 1.2092, 2.3184, 0.4340, 1.3796], + device='cuda:1'), covar=tensor([0.1836, 0.1368, 0.0304, 0.1337, 0.3010, 0.0390, 0.2382, 0.1489], + device='cuda:1'), in_proj_covar=tensor([0.0186, 0.0192, 0.0125, 0.0219, 0.0269, 0.0132, 0.0167, 0.0188], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 01:50:32,141 INFO [train.py:901] (1/4) Epoch 21, batch 1500, loss[loss=0.2959, simple_loss=0.3482, pruned_loss=0.1218, over 7053.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2905, pruned_loss=0.06334, over 1616303.25 frames. ], batch size: 71, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:35,022 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1766, 2.5457, 3.0427, 1.6958, 3.3030, 1.8323, 1.5441, 2.0140], + device='cuda:1'), covar=tensor([0.0858, 0.0468, 0.0269, 0.0814, 0.0406, 0.0978, 0.1047, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0450, 0.0389, 0.0337, 0.0440, 0.0371, 0.0534, 0.0389, 0.0418], + device='cuda:1'), out_proj_covar=tensor([1.2093e-04, 1.0215e-04, 8.8828e-05, 1.1620e-04, 9.7726e-05, 1.5150e-04, + 1.0536e-04, 1.1126e-04], device='cuda:1') +2023-02-07 01:50:50,508 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.250e+02 2.722e+02 3.392e+02 6.898e+02, threshold=5.444e+02, percent-clipped=4.0 +2023-02-07 01:50:53,289 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163189.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:05,622 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9797, 2.0256, 1.7855, 2.6136, 1.2866, 1.6323, 1.9283, 2.0077], + device='cuda:1'), covar=tensor([0.0673, 0.0776, 0.0927, 0.0405, 0.1042, 0.1200, 0.0763, 0.0757], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0205, 0.0247, 0.0250, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 01:51:06,797 INFO [train.py:901] (1/4) Epoch 21, batch 1550, loss[loss=0.1762, simple_loss=0.2694, pruned_loss=0.04154, over 8108.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2892, pruned_loss=0.06298, over 1614994.12 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:31,325 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,279 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163258.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,818 INFO [train.py:901] (1/4) Epoch 21, batch 1600, loss[loss=0.1918, simple_loss=0.2718, pruned_loss=0.05584, over 8247.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2904, pruned_loss=0.06313, over 1619033.07 frames. ], batch size: 24, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:47,157 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5987, 1.4582, 1.6169, 1.3236, 0.9413, 1.4090, 1.4608, 1.2884], + device='cuda:1'), covar=tensor([0.0566, 0.1246, 0.1746, 0.1467, 0.0620, 0.1500, 0.0683, 0.0689], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0162, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:51:50,627 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163269.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:00,883 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.380e+02 3.009e+02 4.081e+02 9.131e+02, threshold=6.018e+02, percent-clipped=6.0 +2023-02-07 01:52:14,551 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:17,724 INFO [train.py:901] (1/4) Epoch 21, batch 1650, loss[loss=0.2236, simple_loss=0.3039, pruned_loss=0.07161, over 8436.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2903, pruned_loss=0.0634, over 1623061.84 frames. ], batch size: 39, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:52:51,352 INFO [train.py:901] (1/4) Epoch 21, batch 1700, loss[loss=0.2351, simple_loss=0.3043, pruned_loss=0.08297, over 6890.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2917, pruned_loss=0.06364, over 1626163.18 frames. ], batch size: 72, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:09,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.344e+02 2.897e+02 3.678e+02 1.033e+03, threshold=5.793e+02, percent-clipped=5.0 +2023-02-07 01:53:27,422 INFO [train.py:901] (1/4) Epoch 21, batch 1750, loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05627, over 7923.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2909, pruned_loss=0.06326, over 1623351.12 frames. ], batch size: 20, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:51,719 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1269, 1.8760, 2.1428, 1.8199, 1.3427, 1.7435, 2.3606, 2.3052], + device='cuda:1'), covar=tensor([0.0375, 0.1113, 0.1486, 0.1305, 0.0539, 0.1362, 0.0572, 0.0551], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0098, 0.0161, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 01:53:56,338 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:54:01,071 INFO [train.py:901] (1/4) Epoch 21, batch 1800, loss[loss=0.1993, simple_loss=0.287, pruned_loss=0.05585, over 8461.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2901, pruned_loss=0.06292, over 1618309.03 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:18,725 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.661e+02 3.025e+02 4.067e+02 7.408e+02, threshold=6.049e+02, percent-clipped=6.0 +2023-02-07 01:54:36,582 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-07 01:54:37,335 INFO [train.py:901] (1/4) Epoch 21, batch 1850, loss[loss=0.2266, simple_loss=0.3023, pruned_loss=0.07551, over 8433.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2899, pruned_loss=0.0632, over 1614784.86 frames. ], batch size: 27, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:11,686 INFO [train.py:901] (1/4) Epoch 21, batch 1900, loss[loss=0.2308, simple_loss=0.3167, pruned_loss=0.07246, over 8323.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.29, pruned_loss=0.06341, over 1616512.88 frames. ], batch size: 26, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:12,592 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:17,237 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:26,423 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 01:55:29,002 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.410e+02 2.798e+02 3.588e+02 7.290e+02, threshold=5.595e+02, percent-clipped=1.0 +2023-02-07 01:55:29,220 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:37,723 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 01:55:40,620 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163602.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:43,572 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 01:55:45,155 INFO [train.py:901] (1/4) Epoch 21, batch 1950, loss[loss=0.1748, simple_loss=0.2606, pruned_loss=0.04454, over 8079.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06341, over 1614754.40 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:58,505 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 01:56:01,711 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 01:56:21,632 INFO [train.py:901] (1/4) Epoch 21, batch 2000, loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06821, over 8510.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2901, pruned_loss=0.0632, over 1616788.57 frames. ], batch size: 29, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:56:29,908 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8636, 2.0112, 1.9925, 1.6379, 2.0737, 1.6733, 1.2478, 1.8869], + device='cuda:1'), covar=tensor([0.0458, 0.0270, 0.0210, 0.0430, 0.0274, 0.0608, 0.0622, 0.0229], + device='cuda:1'), in_proj_covar=tensor([0.0446, 0.0384, 0.0336, 0.0438, 0.0367, 0.0528, 0.0387, 0.0414], + device='cuda:1'), out_proj_covar=tensor([1.2002e-04, 1.0088e-04, 8.8661e-05, 1.1577e-04, 9.6821e-05, 1.4969e-04, + 1.0456e-04, 1.1004e-04], device='cuda:1') +2023-02-07 01:56:39,053 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.546e+02 3.013e+02 3.975e+02 6.874e+02, threshold=6.025e+02, percent-clipped=4.0 +2023-02-07 01:56:55,198 INFO [train.py:901] (1/4) Epoch 21, batch 2050, loss[loss=0.1712, simple_loss=0.2508, pruned_loss=0.04579, over 7967.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.06321, over 1613843.97 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:00,606 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:57:05,684 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4576, 1.7330, 5.5878, 2.3128, 5.0433, 4.7536, 5.1682, 5.0427], + device='cuda:1'), covar=tensor([0.0563, 0.4912, 0.0419, 0.3784, 0.0961, 0.0890, 0.0529, 0.0523], + device='cuda:1'), in_proj_covar=tensor([0.0624, 0.0640, 0.0685, 0.0622, 0.0701, 0.0603, 0.0602, 0.0670], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:57:30,310 INFO [train.py:901] (1/4) Epoch 21, batch 2100, loss[loss=0.2163, simple_loss=0.2987, pruned_loss=0.06699, over 8245.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2895, pruned_loss=0.06337, over 1609065.51 frames. ], batch size: 24, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:43,622 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 01:57:48,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.575e+02 2.946e+02 3.630e+02 8.805e+02, threshold=5.893e+02, percent-clipped=3.0 +2023-02-07 01:58:04,870 INFO [train.py:901] (1/4) Epoch 21, batch 2150, loss[loss=0.2358, simple_loss=0.3231, pruned_loss=0.07426, over 8361.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.0629, over 1612766.23 frames. ], batch size: 24, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:14,673 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:31,298 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:39,335 INFO [train.py:901] (1/4) Epoch 21, batch 2200, loss[loss=0.2111, simple_loss=0.2918, pruned_loss=0.06516, over 8248.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2878, pruned_loss=0.06231, over 1608332.20 frames. ], batch size: 22, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:58,248 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.475e+02 2.987e+02 3.670e+02 7.762e+02, threshold=5.973e+02, percent-clipped=3.0 +2023-02-07 01:59:15,125 INFO [train.py:901] (1/4) Epoch 21, batch 2250, loss[loss=0.2378, simple_loss=0.3112, pruned_loss=0.08214, over 6807.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2887, pruned_loss=0.06254, over 1611883.67 frames. ], batch size: 72, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:21,984 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2137, 4.2404, 3.7929, 2.0563, 3.7540, 3.7182, 3.7525, 3.6232], + device='cuda:1'), covar=tensor([0.0791, 0.0536, 0.1081, 0.4514, 0.0926, 0.1001, 0.1238, 0.0965], + device='cuda:1'), in_proj_covar=tensor([0.0519, 0.0433, 0.0435, 0.0538, 0.0426, 0.0441, 0.0421, 0.0382], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:59:30,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4928, 5.6367, 4.8602, 2.5951, 4.9358, 5.2925, 5.1674, 5.0844], + device='cuda:1'), covar=tensor([0.0583, 0.0406, 0.0949, 0.4683, 0.0839, 0.0788, 0.1006, 0.0701], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0433, 0.0435, 0.0538, 0.0427, 0.0441, 0.0421, 0.0383], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 01:59:33,087 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6922, 1.9582, 2.1053, 1.4031, 2.2164, 1.5808, 0.6697, 1.9293], + device='cuda:1'), covar=tensor([0.0631, 0.0386, 0.0297, 0.0588, 0.0361, 0.0843, 0.0962, 0.0305], + device='cuda:1'), in_proj_covar=tensor([0.0448, 0.0384, 0.0336, 0.0439, 0.0369, 0.0526, 0.0387, 0.0413], + device='cuda:1'), out_proj_covar=tensor([1.2041e-04, 1.0092e-04, 8.8536e-05, 1.1599e-04, 9.7212e-05, 1.4906e-04, + 1.0461e-04, 1.0979e-04], device='cuda:1') +2023-02-07 01:59:49,141 INFO [train.py:901] (1/4) Epoch 21, batch 2300, loss[loss=0.1984, simple_loss=0.2777, pruned_loss=0.05956, over 8101.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.06223, over 1614689.47 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:58,900 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163973.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:08,071 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.361e+02 2.889e+02 3.736e+02 8.411e+02, threshold=5.778e+02, percent-clipped=4.0 +2023-02-07 02:00:17,861 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:20,568 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-07 02:00:26,181 INFO [train.py:901] (1/4) Epoch 21, batch 2350, loss[loss=0.1756, simple_loss=0.2678, pruned_loss=0.04174, over 8235.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2886, pruned_loss=0.06183, over 1616232.11 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:00:39,166 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 02:00:46,898 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1701, 1.3264, 1.3114, 1.0170, 1.3719, 1.1057, 0.3278, 1.2784], + device='cuda:1'), covar=tensor([0.0377, 0.0273, 0.0213, 0.0375, 0.0290, 0.0560, 0.0683, 0.0190], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0383, 0.0334, 0.0437, 0.0368, 0.0525, 0.0386, 0.0412], + device='cuda:1'), out_proj_covar=tensor([1.2028e-04, 1.0044e-04, 8.8028e-05, 1.1556e-04, 9.7141e-05, 1.4874e-04, + 1.0434e-04, 1.0937e-04], device='cuda:1') +2023-02-07 02:01:01,227 INFO [train.py:901] (1/4) Epoch 21, batch 2400, loss[loss=0.1998, simple_loss=0.2599, pruned_loss=0.06987, over 7533.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2893, pruned_loss=0.0626, over 1616302.88 frames. ], batch size: 18, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:19,285 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 2.419e+02 2.926e+02 3.800e+02 6.132e+02, threshold=5.852e+02, percent-clipped=4.0 +2023-02-07 02:01:37,288 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:01:37,736 INFO [train.py:901] (1/4) Epoch 21, batch 2450, loss[loss=0.2822, simple_loss=0.3453, pruned_loss=0.1095, over 8288.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2889, pruned_loss=0.06262, over 1607638.90 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:54,109 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:02:12,735 INFO [train.py:901] (1/4) Epoch 21, batch 2500, loss[loss=0.2176, simple_loss=0.3, pruned_loss=0.06759, over 8469.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06227, over 1610540.50 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:22,147 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:02:30,792 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.421e+02 3.174e+02 4.025e+02 1.090e+03, threshold=6.349e+02, percent-clipped=9.0 +2023-02-07 02:02:46,233 INFO [train.py:901] (1/4) Epoch 21, batch 2550, loss[loss=0.2259, simple_loss=0.3105, pruned_loss=0.07062, over 8456.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2894, pruned_loss=0.06323, over 1613157.66 frames. ], batch size: 29, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:55,028 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1695, 2.2332, 1.8348, 2.8618, 1.3221, 1.6917, 2.0027, 2.2569], + device='cuda:1'), covar=tensor([0.0667, 0.0803, 0.0900, 0.0362, 0.1189, 0.1304, 0.0947, 0.0835], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0212, 0.0204, 0.0244, 0.0250, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 02:03:22,649 INFO [train.py:901] (1/4) Epoch 21, batch 2600, loss[loss=0.1608, simple_loss=0.2421, pruned_loss=0.03978, over 7797.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.289, pruned_loss=0.06299, over 1612432.74 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:03:40,882 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.272e+02 2.670e+02 3.622e+02 6.852e+02, threshold=5.341e+02, percent-clipped=1.0 +2023-02-07 02:03:56,835 INFO [train.py:901] (1/4) Epoch 21, batch 2650, loss[loss=0.1991, simple_loss=0.2864, pruned_loss=0.05593, over 8030.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2886, pruned_loss=0.06273, over 1612327.06 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:33,159 INFO [train.py:901] (1/4) Epoch 21, batch 2700, loss[loss=0.2687, simple_loss=0.3341, pruned_loss=0.1017, over 7023.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2878, pruned_loss=0.06205, over 1610682.70 frames. ], batch size: 71, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:46,946 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:04:52,076 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.228e+02 2.697e+02 3.361e+02 7.045e+02, threshold=5.394e+02, percent-clipped=4.0 +2023-02-07 02:05:07,796 INFO [train.py:901] (1/4) Epoch 21, batch 2750, loss[loss=0.2386, simple_loss=0.319, pruned_loss=0.07908, over 8331.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2882, pruned_loss=0.06232, over 1608815.14 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:05:36,818 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:05:42,225 INFO [train.py:901] (1/4) Epoch 21, batch 2800, loss[loss=0.1638, simple_loss=0.2531, pruned_loss=0.03731, over 7928.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2879, pruned_loss=0.06219, over 1610576.26 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:02,577 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.305e+02 2.813e+02 3.760e+02 7.507e+02, threshold=5.625e+02, percent-clipped=3.0 +2023-02-07 02:06:18,042 INFO [train.py:901] (1/4) Epoch 21, batch 2850, loss[loss=0.1698, simple_loss=0.2567, pruned_loss=0.04149, over 8024.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2887, pruned_loss=0.06325, over 1604846.86 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:23,425 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:06:51,355 INFO [train.py:901] (1/4) Epoch 21, batch 2900, loss[loss=0.2812, simple_loss=0.364, pruned_loss=0.09921, over 8294.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.06386, over 1606183.63 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:56,992 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:09,738 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 02:07:11,676 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.599e+02 3.265e+02 4.069e+02 1.074e+03, threshold=6.531e+02, percent-clipped=8.0 +2023-02-07 02:07:11,869 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:19,714 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:28,723 INFO [train.py:901] (1/4) Epoch 21, batch 2950, loss[loss=0.2171, simple_loss=0.3063, pruned_loss=0.0639, over 8345.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2915, pruned_loss=0.06397, over 1612596.51 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:07:44,450 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:02,292 INFO [train.py:901] (1/4) Epoch 21, batch 3000, loss[loss=0.1876, simple_loss=0.2726, pruned_loss=0.05127, over 8291.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2918, pruned_loss=0.0639, over 1615027.42 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:02,293 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 02:08:15,069 INFO [train.py:935] (1/4) Epoch 21, validation: loss=0.1742, simple_loss=0.2744, pruned_loss=0.03706, over 944034.00 frames. +2023-02-07 02:08:15,070 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 02:08:26,767 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164676.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:33,560 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.380e+02 2.886e+02 3.399e+02 6.002e+02, threshold=5.772e+02, percent-clipped=0.0 +2023-02-07 02:08:46,925 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 02:08:49,848 INFO [train.py:901] (1/4) Epoch 21, batch 3050, loss[loss=0.1935, simple_loss=0.268, pruned_loss=0.0595, over 7444.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2918, pruned_loss=0.06433, over 1614827.24 frames. ], batch size: 17, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:59,339 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:08,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6771, 5.8706, 5.0045, 2.5629, 5.1006, 5.6039, 5.4923, 5.3792], + device='cuda:1'), covar=tensor([0.0608, 0.0436, 0.1010, 0.4548, 0.0812, 0.0776, 0.1025, 0.0599], + device='cuda:1'), in_proj_covar=tensor([0.0512, 0.0428, 0.0428, 0.0529, 0.0420, 0.0432, 0.0411, 0.0377], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:09:25,492 INFO [train.py:901] (1/4) Epoch 21, batch 3100, loss[loss=0.188, simple_loss=0.2693, pruned_loss=0.05339, over 7932.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2912, pruned_loss=0.0642, over 1615195.67 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:09:29,031 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164764.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:38,547 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8127, 2.3979, 1.9365, 2.1677, 2.1467, 1.8385, 2.0776, 2.1498], + device='cuda:1'), covar=tensor([0.0994, 0.0356, 0.0813, 0.0438, 0.0542, 0.1043, 0.0709, 0.0694], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0234, 0.0333, 0.0306, 0.0296, 0.0331, 0.0341, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:09:39,120 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2128, 4.1848, 3.7762, 1.9858, 3.6875, 3.7754, 3.7126, 3.6130], + device='cuda:1'), covar=tensor([0.0832, 0.0628, 0.1208, 0.4705, 0.1018, 0.0970, 0.1389, 0.0931], + device='cuda:1'), in_proj_covar=tensor([0.0509, 0.0426, 0.0426, 0.0526, 0.0417, 0.0430, 0.0409, 0.0376], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:09:43,641 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.375e+02 2.980e+02 3.572e+02 8.800e+02, threshold=5.960e+02, percent-clipped=5.0 +2023-02-07 02:09:59,127 INFO [train.py:901] (1/4) Epoch 21, batch 3150, loss[loss=0.2219, simple_loss=0.3067, pruned_loss=0.0685, over 8244.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2909, pruned_loss=0.06425, over 1614200.27 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:10:08,647 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:12,645 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9600, 1.5519, 3.3909, 1.5628, 2.4964, 3.7752, 3.8642, 3.2363], + device='cuda:1'), covar=tensor([0.1143, 0.1846, 0.0351, 0.2166, 0.0943, 0.0217, 0.0366, 0.0554], + device='cuda:1'), in_proj_covar=tensor([0.0290, 0.0320, 0.0288, 0.0314, 0.0304, 0.0261, 0.0410, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:10:19,451 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:26,877 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:34,953 INFO [train.py:901] (1/4) Epoch 21, batch 3200, loss[loss=0.1807, simple_loss=0.2651, pruned_loss=0.04813, over 7799.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2903, pruned_loss=0.06415, over 1610469.48 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:10:54,110 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.324e+02 2.650e+02 3.384e+02 7.808e+02, threshold=5.299e+02, percent-clipped=1.0 +2023-02-07 02:10:55,685 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:09,475 INFO [train.py:901] (1/4) Epoch 21, batch 3250, loss[loss=0.1639, simple_loss=0.241, pruned_loss=0.04338, over 7664.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2905, pruned_loss=0.06393, over 1611863.57 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:11:12,444 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:23,942 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:30,775 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:44,786 INFO [train.py:901] (1/4) Epoch 21, batch 3300, loss[loss=0.2117, simple_loss=0.2983, pruned_loss=0.06255, over 8641.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2903, pruned_loss=0.0637, over 1617789.28 frames. ], batch size: 39, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:05,213 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.295e+02 2.742e+02 3.217e+02 7.829e+02, threshold=5.483e+02, percent-clipped=4.0 +2023-02-07 02:12:20,626 INFO [train.py:901] (1/4) Epoch 21, batch 3350, loss[loss=0.211, simple_loss=0.2973, pruned_loss=0.06235, over 8343.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2916, pruned_loss=0.06451, over 1613156.27 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:28,061 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165020.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:45,357 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:52,356 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:54,954 INFO [train.py:901] (1/4) Epoch 21, batch 3400, loss[loss=0.1943, simple_loss=0.2749, pruned_loss=0.05686, over 8664.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2911, pruned_loss=0.06407, over 1614255.90 frames. ], batch size: 39, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:59,414 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.64 vs. limit=5.0 +2023-02-07 02:13:15,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 2.300e+02 2.821e+02 3.884e+02 1.046e+03, threshold=5.643e+02, percent-clipped=8.0 +2023-02-07 02:13:20,661 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,317 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,884 INFO [train.py:901] (1/4) Epoch 21, batch 3450, loss[loss=0.3009, simple_loss=0.364, pruned_loss=0.1189, over 7226.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2902, pruned_loss=0.06348, over 1613510.29 frames. ], batch size: 71, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:38,107 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:49,300 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165135.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:53,299 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:14:05,183 INFO [train.py:901] (1/4) Epoch 21, batch 3500, loss[loss=0.1679, simple_loss=0.2535, pruned_loss=0.04118, over 7200.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06358, over 1616640.86 frames. ], batch size: 16, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:07,492 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.34 vs. limit=5.0 +2023-02-07 02:14:10,616 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 02:14:19,678 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7648, 1.6208, 2.5344, 1.6757, 1.3048, 2.4035, 0.5161, 1.5186], + device='cuda:1'), covar=tensor([0.1754, 0.1721, 0.0341, 0.1418, 0.2929, 0.0533, 0.2301, 0.1572], + device='cuda:1'), in_proj_covar=tensor([0.0187, 0.0196, 0.0128, 0.0222, 0.0272, 0.0135, 0.0170, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 02:14:24,648 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.436e+02 2.745e+02 3.695e+02 8.606e+02, threshold=5.490e+02, percent-clipped=3.0 +2023-02-07 02:14:31,313 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 02:14:41,283 INFO [train.py:901] (1/4) Epoch 21, batch 3550, loss[loss=0.2237, simple_loss=0.3067, pruned_loss=0.07034, over 8252.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2894, pruned_loss=0.06314, over 1615901.15 frames. ], batch size: 22, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:51,651 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:15,595 INFO [train.py:901] (1/4) Epoch 21, batch 3600, loss[loss=0.2087, simple_loss=0.2962, pruned_loss=0.06056, over 8324.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2907, pruned_loss=0.06399, over 1614565.03 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:34,165 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.347e+02 2.942e+02 3.699e+02 7.087e+02, threshold=5.884e+02, percent-clipped=2.0 +2023-02-07 02:15:36,935 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8515, 1.4788, 1.6329, 1.3274, 0.9132, 1.4290, 1.6020, 1.3764], + device='cuda:1'), covar=tensor([0.0533, 0.1283, 0.1683, 0.1444, 0.0633, 0.1517, 0.0715, 0.0694], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0158, 0.0099, 0.0161, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 02:15:44,469 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:51,143 INFO [train.py:901] (1/4) Epoch 21, batch 3650, loss[loss=0.1954, simple_loss=0.2779, pruned_loss=0.05642, over 8519.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2896, pruned_loss=0.06362, over 1613506.34 frames. ], batch size: 29, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:52,693 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:53,562 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 02:15:56,696 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3814, 2.7995, 2.3121, 3.7435, 1.6294, 2.1911, 2.1928, 2.6974], + device='cuda:1'), covar=tensor([0.0699, 0.0751, 0.0820, 0.0305, 0.1137, 0.1135, 0.1035, 0.0773], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0205, 0.0246, 0.0250, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 02:16:03,316 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:08,101 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6144, 2.3669, 4.2219, 1.4170, 3.0507, 2.2967, 1.8812, 3.0525], + device='cuda:1'), covar=tensor([0.2041, 0.2678, 0.0869, 0.4747, 0.1871, 0.3217, 0.2343, 0.2370], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0596, 0.0557, 0.0639, 0.0645, 0.0592, 0.0535, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:16:10,832 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:16,763 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:16:25,985 INFO [train.py:901] (1/4) Epoch 21, batch 3700, loss[loss=0.324, simple_loss=0.3759, pruned_loss=0.136, over 6827.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2901, pruned_loss=0.06412, over 1614402.61 frames. ], batch size: 72, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:16:44,015 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.402e+02 2.885e+02 3.854e+02 8.848e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 02:16:47,623 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165391.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:59,580 INFO [train.py:901] (1/4) Epoch 21, batch 3750, loss[loss=0.2388, simple_loss=0.3238, pruned_loss=0.07688, over 8570.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2903, pruned_loss=0.06429, over 1615041.84 frames. ], batch size: 39, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:04,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:16,250 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1039, 2.3045, 1.9330, 2.8554, 1.3644, 1.7509, 1.8956, 2.2563], + device='cuda:1'), covar=tensor([0.0714, 0.0734, 0.0878, 0.0348, 0.1134, 0.1270, 0.0923, 0.0832], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0205, 0.0246, 0.0251, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 02:17:36,645 INFO [train.py:901] (1/4) Epoch 21, batch 3800, loss[loss=0.2113, simple_loss=0.2951, pruned_loss=0.06382, over 8239.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2905, pruned_loss=0.0644, over 1617584.68 frames. ], batch size: 22, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:49,666 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6706, 1.4750, 1.8037, 1.2734, 0.9125, 1.5480, 1.6052, 1.5642], + device='cuda:1'), covar=tensor([0.0536, 0.1236, 0.1547, 0.1486, 0.0555, 0.1413, 0.0635, 0.0599], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0160, 0.0099, 0.0162, 0.0113, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 02:17:50,392 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,320 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,906 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.401e+02 2.925e+02 3.673e+02 6.793e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-07 02:17:59,010 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7052, 1.5247, 3.1927, 1.3354, 2.3937, 3.4320, 3.5924, 2.9474], + device='cuda:1'), covar=tensor([0.1206, 0.1686, 0.0344, 0.2149, 0.0924, 0.0255, 0.0568, 0.0577], + device='cuda:1'), in_proj_covar=tensor([0.0292, 0.0321, 0.0287, 0.0315, 0.0308, 0.0263, 0.0412, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:18:07,150 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:18:10,365 INFO [train.py:901] (1/4) Epoch 21, batch 3850, loss[loss=0.1798, simple_loss=0.2621, pruned_loss=0.04877, over 5915.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2908, pruned_loss=0.06447, over 1613673.42 frames. ], batch size: 13, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:18,546 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 02:18:31,966 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3063, 2.1498, 1.6842, 1.8998, 1.8412, 1.4541, 1.7498, 1.6519], + device='cuda:1'), covar=tensor([0.1268, 0.0456, 0.1239, 0.0529, 0.0657, 0.1505, 0.0839, 0.0801], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0235, 0.0334, 0.0310, 0.0298, 0.0336, 0.0345, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:18:46,117 INFO [train.py:901] (1/4) Epoch 21, batch 3900, loss[loss=0.1988, simple_loss=0.2829, pruned_loss=0.05736, over 8495.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.064, over 1614014.65 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:53,038 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:05,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.182e+02 2.809e+02 3.459e+02 6.713e+02, threshold=5.619e+02, percent-clipped=4.0 +2023-02-07 02:19:14,928 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:20,732 INFO [train.py:901] (1/4) Epoch 21, batch 3950, loss[loss=0.1946, simple_loss=0.2529, pruned_loss=0.06817, over 7426.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.06422, over 1610668.50 frames. ], batch size: 17, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:19:55,187 INFO [train.py:901] (1/4) Epoch 21, batch 4000, loss[loss=0.1961, simple_loss=0.2673, pruned_loss=0.06245, over 7430.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.06383, over 1613302.30 frames. ], batch size: 17, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:15,749 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.370e+02 2.936e+02 3.785e+02 6.204e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-07 02:20:31,250 INFO [train.py:901] (1/4) Epoch 21, batch 4050, loss[loss=0.2255, simple_loss=0.3101, pruned_loss=0.07041, over 8245.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2907, pruned_loss=0.06312, over 1615531.42 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:47,659 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 02:21:03,807 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 02:21:04,769 INFO [train.py:901] (1/4) Epoch 21, batch 4100, loss[loss=0.1735, simple_loss=0.2435, pruned_loss=0.05174, over 7216.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2895, pruned_loss=0.06239, over 1617087.03 frames. ], batch size: 16, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:11,130 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:21:24,843 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.510e+02 3.105e+02 3.860e+02 6.931e+02, threshold=6.209e+02, percent-clipped=6.0 +2023-02-07 02:21:41,904 INFO [train.py:901] (1/4) Epoch 21, batch 4150, loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06177, over 8082.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2895, pruned_loss=0.06271, over 1616592.47 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:48,858 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6150, 1.5316, 2.1134, 1.4089, 1.2265, 2.0772, 0.3331, 1.2685], + device='cuda:1'), covar=tensor([0.1642, 0.1634, 0.0416, 0.1014, 0.2811, 0.0425, 0.2158, 0.1306], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0195, 0.0128, 0.0221, 0.0272, 0.0135, 0.0171, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 02:21:50,126 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165821.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:13,875 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165856.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:15,091 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 02:22:15,757 INFO [train.py:901] (1/4) Epoch 21, batch 4200, loss[loss=0.2026, simple_loss=0.2911, pruned_loss=0.05707, over 8292.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2898, pruned_loss=0.06324, over 1614563.53 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:30,505 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:33,705 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.320e+02 2.907e+02 3.705e+02 7.802e+02, threshold=5.814e+02, percent-clipped=2.0 +2023-02-07 02:22:37,065 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 02:22:50,802 INFO [train.py:901] (1/4) Epoch 21, batch 4250, loss[loss=0.1659, simple_loss=0.2489, pruned_loss=0.04142, over 7924.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2898, pruned_loss=0.06262, over 1614861.34 frames. ], batch size: 20, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:53,703 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:23:14,530 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5933, 2.0125, 3.2322, 1.4213, 2.3982, 2.0393, 1.6663, 2.3458], + device='cuda:1'), covar=tensor([0.1914, 0.2553, 0.0805, 0.4473, 0.1764, 0.3193, 0.2342, 0.2315], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0597, 0.0555, 0.0639, 0.0645, 0.0594, 0.0536, 0.0635], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:23:26,542 INFO [train.py:901] (1/4) Epoch 21, batch 4300, loss[loss=0.2121, simple_loss=0.2997, pruned_loss=0.06219, over 8353.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2895, pruned_loss=0.0626, over 1618786.35 frames. ], batch size: 24, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:23:44,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.284e+02 2.728e+02 3.396e+02 7.954e+02, threshold=5.457e+02, percent-clipped=4.0 +2023-02-07 02:23:50,846 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5516, 1.7966, 2.6791, 1.4229, 2.0088, 1.8126, 1.6601, 1.9640], + device='cuda:1'), covar=tensor([0.1814, 0.2439, 0.0823, 0.4246, 0.1767, 0.3159, 0.2192, 0.2176], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0595, 0.0551, 0.0636, 0.0642, 0.0591, 0.0534, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:24:01,653 INFO [train.py:901] (1/4) Epoch 21, batch 4350, loss[loss=0.2001, simple_loss=0.2763, pruned_loss=0.06199, over 7648.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06228, over 1618805.18 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:11,729 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 02:24:15,222 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:24:16,540 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5132, 1.6011, 2.2112, 1.4264, 1.6140, 1.7299, 1.5358, 1.5705], + device='cuda:1'), covar=tensor([0.1850, 0.2575, 0.0818, 0.4182, 0.1777, 0.3302, 0.2253, 0.2002], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0593, 0.0551, 0.0635, 0.0641, 0.0590, 0.0533, 0.0631], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:24:36,835 INFO [train.py:901] (1/4) Epoch 21, batch 4400, loss[loss=0.2333, simple_loss=0.3119, pruned_loss=0.07731, over 8598.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2877, pruned_loss=0.06199, over 1619655.80 frames. ], batch size: 34, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:45,739 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166072.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:24:54,411 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 02:24:55,055 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.482e+02 3.095e+02 3.863e+02 7.424e+02, threshold=6.191e+02, percent-clipped=10.0 +2023-02-07 02:25:10,665 INFO [train.py:901] (1/4) Epoch 21, batch 4450, loss[loss=0.2344, simple_loss=0.304, pruned_loss=0.08237, over 6697.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06076, over 1617793.63 frames. ], batch size: 71, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:25:12,802 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166112.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:22,690 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5362, 2.6966, 3.0163, 1.8819, 3.2827, 2.2221, 1.6203, 2.2166], + device='cuda:1'), covar=tensor([0.0764, 0.0420, 0.0289, 0.0770, 0.0486, 0.0791, 0.0993, 0.0547], + device='cuda:1'), in_proj_covar=tensor([0.0454, 0.0387, 0.0341, 0.0441, 0.0374, 0.0533, 0.0391, 0.0415], + device='cuda:1'), out_proj_covar=tensor([1.2196e-04, 1.0144e-04, 8.9743e-05, 1.1657e-04, 9.8624e-05, 1.5080e-04, + 1.0559e-04, 1.1017e-04], device='cuda:1') +2023-02-07 02:25:27,716 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 02:25:45,424 INFO [train.py:901] (1/4) Epoch 21, batch 4500, loss[loss=0.2213, simple_loss=0.3165, pruned_loss=0.06303, over 8469.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06124, over 1617792.22 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:25:50,222 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 02:25:50,296 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:50,366 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:05,003 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.688e+02 3.191e+02 4.415e+02 1.086e+03, threshold=6.382e+02, percent-clipped=9.0 +2023-02-07 02:26:20,604 INFO [train.py:901] (1/4) Epoch 21, batch 4550, loss[loss=0.2418, simple_loss=0.3178, pruned_loss=0.08284, over 8284.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2866, pruned_loss=0.06175, over 1613367.15 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:26:27,578 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2739, 2.0015, 2.7012, 2.1907, 2.6050, 2.3014, 2.0237, 1.4359], + device='cuda:1'), covar=tensor([0.5160, 0.5107, 0.1918, 0.3804, 0.2647, 0.3031, 0.1924, 0.5277], + device='cuda:1'), in_proj_covar=tensor([0.0935, 0.0972, 0.0794, 0.0932, 0.0989, 0.0885, 0.0741, 0.0820], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 02:26:33,047 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166227.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:54,689 INFO [train.py:901] (1/4) Epoch 21, batch 4600, loss[loss=0.1943, simple_loss=0.2831, pruned_loss=0.05269, over 8195.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2863, pruned_loss=0.06158, over 1613627.56 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:10,550 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166280.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:14,138 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166284.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:15,235 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.551e+02 3.310e+02 4.080e+02 7.820e+02, threshold=6.621e+02, percent-clipped=4.0 +2023-02-07 02:27:30,341 INFO [train.py:901] (1/4) Epoch 21, batch 4650, loss[loss=0.2061, simple_loss=0.2911, pruned_loss=0.06057, over 8591.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2882, pruned_loss=0.06255, over 1617394.96 frames. ], batch size: 34, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:30,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:41,198 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:28:03,640 INFO [train.py:901] (1/4) Epoch 21, batch 4700, loss[loss=0.226, simple_loss=0.3143, pruned_loss=0.06889, over 8198.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2882, pruned_loss=0.06243, over 1618290.67 frames. ], batch size: 48, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:28:23,899 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.420e+02 2.373e+02 2.801e+02 3.877e+02 1.145e+03, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 02:28:40,136 INFO [train.py:901] (1/4) Epoch 21, batch 4750, loss[loss=0.1665, simple_loss=0.2461, pruned_loss=0.04348, over 7692.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.289, pruned_loss=0.06262, over 1620793.17 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:28:45,008 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166416.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:28:52,966 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 02:28:55,068 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 02:29:14,176 INFO [train.py:901] (1/4) Epoch 21, batch 4800, loss[loss=0.2618, simple_loss=0.3299, pruned_loss=0.09686, over 8332.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06345, over 1619249.68 frames. ], batch size: 26, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:30,703 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166483.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:33,191 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.407e+02 2.819e+02 3.849e+02 8.316e+02, threshold=5.639e+02, percent-clipped=5.0 +2023-02-07 02:29:39,662 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-07 02:29:42,736 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:43,961 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 02:29:48,835 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166508.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:49,314 INFO [train.py:901] (1/4) Epoch 21, batch 4850, loss[loss=0.168, simple_loss=0.2574, pruned_loss=0.03927, over 7799.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2893, pruned_loss=0.06317, over 1619223.98 frames. ], batch size: 20, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:49,390 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:03,257 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:05,913 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166531.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:30:09,302 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166536.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:17,926 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:21,057 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-07 02:30:24,625 INFO [train.py:901] (1/4) Epoch 21, batch 4900, loss[loss=0.1742, simple_loss=0.2579, pruned_loss=0.04522, over 7797.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06357, over 1619142.30 frames. ], batch size: 20, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:30:26,198 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:43,069 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.429e+02 3.059e+02 4.014e+02 7.599e+02, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 02:30:54,282 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6212, 1.5199, 2.1441, 1.4068, 1.2965, 2.0596, 0.3090, 1.1947], + device='cuda:1'), covar=tensor([0.1754, 0.1436, 0.0428, 0.1277, 0.2997, 0.0493, 0.2427, 0.1612], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0196, 0.0128, 0.0223, 0.0272, 0.0136, 0.0172, 0.0192], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 02:30:58,618 INFO [train.py:901] (1/4) Epoch 21, batch 4950, loss[loss=0.2173, simple_loss=0.2933, pruned_loss=0.07064, over 8464.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.289, pruned_loss=0.06326, over 1616586.58 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:31:09,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:34,523 INFO [train.py:901] (1/4) Epoch 21, batch 5000, loss[loss=0.2017, simple_loss=0.2946, pruned_loss=0.05439, over 8465.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2884, pruned_loss=0.06263, over 1620460.96 frames. ], batch size: 29, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:31:41,052 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:52,864 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.262e+02 2.770e+02 3.475e+02 7.586e+02, threshold=5.540e+02, percent-clipped=2.0 +2023-02-07 02:32:07,639 INFO [train.py:901] (1/4) Epoch 21, batch 5050, loss[loss=0.1663, simple_loss=0.2466, pruned_loss=0.04305, over 7654.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2879, pruned_loss=0.06247, over 1617053.18 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:23,026 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 02:32:38,361 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:32:42,946 INFO [train.py:901] (1/4) Epoch 21, batch 5100, loss[loss=0.1812, simple_loss=0.2594, pruned_loss=0.05149, over 7534.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2886, pruned_loss=0.06302, over 1611552.66 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:59,535 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:00,922 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:02,730 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.499e+02 3.045e+02 3.729e+02 1.083e+03, threshold=6.090e+02, percent-clipped=5.0 +2023-02-07 02:33:02,982 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166787.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:33:17,657 INFO [train.py:901] (1/4) Epoch 21, batch 5150, loss[loss=0.2031, simple_loss=0.2864, pruned_loss=0.05994, over 8341.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2891, pruned_loss=0.06309, over 1608911.97 frames. ], batch size: 26, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:19,983 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166812.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:33:35,606 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9066, 2.2524, 1.8074, 2.8140, 1.3013, 1.5130, 1.8718, 2.2425], + device='cuda:1'), covar=tensor([0.0830, 0.0735, 0.0940, 0.0419, 0.1162, 0.1379, 0.0986, 0.0791], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0241, 0.0210, 0.0203, 0.0241, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 02:33:40,944 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:52,915 INFO [train.py:901] (1/4) Epoch 21, batch 5200, loss[loss=0.2103, simple_loss=0.29, pruned_loss=0.06524, over 8345.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2886, pruned_loss=0.06276, over 1608229.66 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:54,691 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.00 vs. limit=5.0 +2023-02-07 02:34:01,085 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:08,931 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:13,421 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.417e+02 2.893e+02 3.464e+02 9.071e+02, threshold=5.787e+02, percent-clipped=3.0 +2023-02-07 02:34:17,471 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166893.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:22,859 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 02:34:25,856 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166905.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:28,364 INFO [train.py:901] (1/4) Epoch 21, batch 5250, loss[loss=0.1903, simple_loss=0.2757, pruned_loss=0.05241, over 8255.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2886, pruned_loss=0.06302, over 1608886.54 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:34:53,837 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:00,977 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:01,476 INFO [train.py:901] (1/4) Epoch 21, batch 5300, loss[loss=0.2094, simple_loss=0.2951, pruned_loss=0.06183, over 8350.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2892, pruned_loss=0.06334, over 1614217.90 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:21,123 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:21,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.349e+02 2.996e+02 3.802e+02 6.845e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-07 02:35:22,371 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4250, 3.8297, 2.4758, 2.9205, 2.9082, 2.3871, 3.1285, 3.1697], + device='cuda:1'), covar=tensor([0.1408, 0.0310, 0.1070, 0.0749, 0.0683, 0.1220, 0.0882, 0.0981], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0235, 0.0335, 0.0308, 0.0298, 0.0334, 0.0344, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:35:37,478 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:37,990 INFO [train.py:901] (1/4) Epoch 21, batch 5350, loss[loss=0.164, simple_loss=0.2525, pruned_loss=0.03774, over 7557.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.06286, over 1609235.08 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:58,871 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167040.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:12,088 INFO [train.py:901] (1/4) Epoch 21, batch 5400, loss[loss=0.2276, simple_loss=0.3039, pruned_loss=0.0756, over 8526.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2904, pruned_loss=0.06299, over 1615751.62 frames. ], batch size: 28, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:16,470 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167065.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:32,170 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.459e+02 3.013e+02 3.547e+02 6.118e+02, threshold=6.026e+02, percent-clipped=1.0 +2023-02-07 02:36:39,934 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:47,948 INFO [train.py:901] (1/4) Epoch 21, batch 5450, loss[loss=0.1941, simple_loss=0.2823, pruned_loss=0.0529, over 8220.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2901, pruned_loss=0.06289, over 1616701.80 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:55,798 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:58,550 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:01,214 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:12,874 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 02:37:19,298 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.4462, 1.6977, 5.6104, 2.5361, 5.0123, 4.7598, 5.1889, 5.0487], + device='cuda:1'), covar=tensor([0.0505, 0.5096, 0.0348, 0.3567, 0.1038, 0.0845, 0.0494, 0.0534], + device='cuda:1'), in_proj_covar=tensor([0.0630, 0.0643, 0.0695, 0.0629, 0.0707, 0.0605, 0.0605, 0.0677], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:37:24,077 INFO [train.py:901] (1/4) Epoch 21, batch 5500, loss[loss=0.2013, simple_loss=0.2747, pruned_loss=0.06394, over 7654.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.29, pruned_loss=0.06277, over 1616056.52 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:37:43,662 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.538e+02 3.099e+02 3.967e+02 8.838e+02, threshold=6.197e+02, percent-clipped=3.0 +2023-02-07 02:37:58,456 INFO [train.py:901] (1/4) Epoch 21, batch 5550, loss[loss=0.2141, simple_loss=0.2989, pruned_loss=0.06463, over 8512.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2882, pruned_loss=0.06226, over 1612475.63 frames. ], batch size: 39, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:38:01,329 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:02,582 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:21,139 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:22,456 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:23,133 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167242.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:34,484 INFO [train.py:901] (1/4) Epoch 21, batch 5600, loss[loss=0.1923, simple_loss=0.2743, pruned_loss=0.05517, over 7529.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06294, over 1615871.71 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:38:34,956 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.31 vs. limit=5.0 +2023-02-07 02:38:36,020 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9861, 1.7986, 6.1296, 2.3082, 5.5141, 5.1611, 5.6883, 5.5353], + device='cuda:1'), covar=tensor([0.0488, 0.4715, 0.0320, 0.3842, 0.0910, 0.0743, 0.0471, 0.0458], + device='cuda:1'), in_proj_covar=tensor([0.0629, 0.0637, 0.0691, 0.0626, 0.0701, 0.0602, 0.0601, 0.0672], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:38:38,123 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167264.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:40,086 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167267.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:54,593 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.492e+02 3.097e+02 3.838e+02 7.086e+02, threshold=6.194e+02, percent-clipped=1.0 +2023-02-07 02:38:54,814 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:56,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:39:08,032 INFO [train.py:901] (1/4) Epoch 21, batch 5650, loss[loss=0.1994, simple_loss=0.2795, pruned_loss=0.05964, over 7810.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06278, over 1612640.78 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:39:18,773 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 02:39:21,792 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.47 vs. limit=2.0 +2023-02-07 02:39:44,508 INFO [train.py:901] (1/4) Epoch 21, batch 5700, loss[loss=0.2332, simple_loss=0.3176, pruned_loss=0.07445, over 8249.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.0632, over 1606958.58 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:04,669 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.585e+02 3.206e+02 3.925e+02 8.506e+02, threshold=6.412e+02, percent-clipped=6.0 +2023-02-07 02:40:08,236 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6810, 4.6513, 4.2423, 2.0978, 4.1343, 4.3640, 4.2377, 4.1600], + device='cuda:1'), covar=tensor([0.0651, 0.0454, 0.0991, 0.4417, 0.0842, 0.0873, 0.1188, 0.0727], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0430, 0.0430, 0.0534, 0.0425, 0.0440, 0.0420, 0.0382], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:40:16,558 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:17,430 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 02:40:18,419 INFO [train.py:901] (1/4) Epoch 21, batch 5750, loss[loss=0.1872, simple_loss=0.2856, pruned_loss=0.04438, over 8749.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2872, pruned_loss=0.06182, over 1606114.62 frames. ], batch size: 30, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:24,265 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 02:40:47,691 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167450.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:53,630 INFO [train.py:901] (1/4) Epoch 21, batch 5800, loss[loss=0.1481, simple_loss=0.2256, pruned_loss=0.03524, over 7191.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2877, pruned_loss=0.06192, over 1606095.09 frames. ], batch size: 16, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:55,798 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:58,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7087, 5.8644, 5.0571, 2.5596, 5.0942, 5.4855, 5.3660, 5.2568], + device='cuda:1'), covar=tensor([0.0523, 0.0393, 0.0886, 0.3947, 0.0746, 0.0754, 0.1025, 0.0613], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0427, 0.0427, 0.0530, 0.0422, 0.0436, 0.0416, 0.0380], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:40:59,181 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167466.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:59,916 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7566, 5.9438, 5.0946, 2.7668, 5.1253, 5.6559, 5.4273, 5.4036], + device='cuda:1'), covar=tensor([0.0674, 0.0406, 0.1004, 0.4428, 0.0772, 0.0671, 0.1185, 0.0573], + device='cuda:1'), in_proj_covar=tensor([0.0520, 0.0427, 0.0427, 0.0530, 0.0422, 0.0437, 0.0417, 0.0380], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:41:00,728 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:06,728 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3251, 2.7081, 3.0917, 1.4755, 3.1116, 1.9781, 1.6123, 2.2138], + device='cuda:1'), covar=tensor([0.0763, 0.0359, 0.0220, 0.0833, 0.0497, 0.0788, 0.0874, 0.0509], + device='cuda:1'), in_proj_covar=tensor([0.0451, 0.0387, 0.0339, 0.0443, 0.0375, 0.0533, 0.0389, 0.0415], + device='cuda:1'), out_proj_covar=tensor([1.2128e-04, 1.0155e-04, 8.9225e-05, 1.1699e-04, 9.8891e-05, 1.5069e-04, + 1.0509e-04, 1.1005e-04], device='cuda:1') +2023-02-07 02:41:15,047 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.466e+02 2.953e+02 3.603e+02 7.254e+02, threshold=5.907e+02, percent-clipped=1.0 +2023-02-07 02:41:17,997 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:20,749 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:28,628 INFO [train.py:901] (1/4) Epoch 21, batch 5850, loss[loss=0.1716, simple_loss=0.2476, pruned_loss=0.04782, over 7228.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2866, pruned_loss=0.06096, over 1609988.20 frames. ], batch size: 16, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:41:37,829 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:00,639 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7337, 2.1473, 3.2759, 1.5932, 2.4420, 2.1862, 1.8478, 2.4843], + device='cuda:1'), covar=tensor([0.1855, 0.2294, 0.0779, 0.4175, 0.1809, 0.3015, 0.2087, 0.2048], + device='cuda:1'), in_proj_covar=tensor([0.0517, 0.0591, 0.0550, 0.0633, 0.0635, 0.0585, 0.0526, 0.0625], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:42:03,775 INFO [train.py:901] (1/4) Epoch 21, batch 5900, loss[loss=0.2306, simple_loss=0.3256, pruned_loss=0.06782, over 8473.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2858, pruned_loss=0.0607, over 1608640.69 frames. ], batch size: 25, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:42:16,872 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:19,702 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:25,235 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1492, 1.5879, 4.3295, 1.6687, 3.8228, 3.5973, 3.9516, 3.8386], + device='cuda:1'), covar=tensor([0.0622, 0.4166, 0.0572, 0.4052, 0.1239, 0.1030, 0.0635, 0.0656], + device='cuda:1'), in_proj_covar=tensor([0.0631, 0.0641, 0.0695, 0.0632, 0.0706, 0.0607, 0.0608, 0.0681], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:42:25,698 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.257e+02 2.859e+02 3.440e+02 7.059e+02, threshold=5.718e+02, percent-clipped=2.0 +2023-02-07 02:42:34,680 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-07 02:42:40,383 INFO [train.py:901] (1/4) Epoch 21, batch 5950, loss[loss=0.2656, simple_loss=0.3412, pruned_loss=0.09501, over 8574.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2858, pruned_loss=0.0615, over 1601196.14 frames. ], batch size: 34, lr: 3.58e-03, grad_scale: 4.0 +2023-02-07 02:43:14,061 INFO [train.py:901] (1/4) Epoch 21, batch 6000, loss[loss=0.1684, simple_loss=0.2437, pruned_loss=0.0465, over 7710.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2872, pruned_loss=0.06238, over 1607298.60 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:43:14,061 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 02:43:26,396 INFO [train.py:935] (1/4) Epoch 21, validation: loss=0.174, simple_loss=0.2741, pruned_loss=0.03692, over 944034.00 frames. +2023-02-07 02:43:26,397 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 02:43:28,714 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:45,660 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:47,403 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.382e+02 2.918e+02 3.609e+02 5.587e+02, threshold=5.837e+02, percent-clipped=0.0 +2023-02-07 02:44:01,967 INFO [train.py:901] (1/4) Epoch 21, batch 6050, loss[loss=0.222, simple_loss=0.2992, pruned_loss=0.0724, over 8026.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.288, pruned_loss=0.06258, over 1610855.85 frames. ], batch size: 22, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:06,715 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 02:44:22,066 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167737.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:38,028 INFO [train.py:901] (1/4) Epoch 21, batch 6100, loss[loss=0.2203, simple_loss=0.306, pruned_loss=0.0673, over 8467.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2879, pruned_loss=0.06218, over 1612636.10 frames. ], batch size: 29, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:56,062 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167785.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:57,220 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 02:44:58,532 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.390e+02 3.045e+02 3.849e+02 6.701e+02, threshold=6.089e+02, percent-clipped=2.0 +2023-02-07 02:45:02,108 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167794.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:13,115 INFO [train.py:901] (1/4) Epoch 21, batch 6150, loss[loss=0.1671, simple_loss=0.2525, pruned_loss=0.04081, over 7663.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2881, pruned_loss=0.06273, over 1610759.39 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:30,428 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:33,106 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,358 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,818 INFO [train.py:901] (1/4) Epoch 21, batch 6200, loss[loss=0.1693, simple_loss=0.2509, pruned_loss=0.04383, over 7454.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2871, pruned_loss=0.06219, over 1608800.83 frames. ], batch size: 17, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:51,052 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:09,469 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.266e+02 2.776e+02 3.727e+02 8.167e+02, threshold=5.552e+02, percent-clipped=4.0 +2023-02-07 02:46:16,723 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 02:46:23,395 INFO [train.py:901] (1/4) Epoch 21, batch 6250, loss[loss=0.2057, simple_loss=0.2902, pruned_loss=0.06064, over 8670.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2859, pruned_loss=0.06175, over 1605804.05 frames. ], batch size: 34, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:46:23,602 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:57,214 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.35 vs. limit=5.0 +2023-02-07 02:46:58,910 INFO [train.py:901] (1/4) Epoch 21, batch 6300, loss[loss=0.2016, simple_loss=0.2825, pruned_loss=0.06033, over 8091.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2861, pruned_loss=0.06195, over 1609735.40 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:47:02,546 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 02:47:10,142 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-07 02:47:20,725 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.475e+02 2.869e+02 3.545e+02 9.430e+02, threshold=5.737e+02, percent-clipped=7.0 +2023-02-07 02:47:35,234 INFO [train.py:901] (1/4) Epoch 21, batch 6350, loss[loss=0.1761, simple_loss=0.2623, pruned_loss=0.04497, over 5938.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2859, pruned_loss=0.06164, over 1609316.08 frames. ], batch size: 13, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:06,026 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:09,825 INFO [train.py:901] (1/4) Epoch 21, batch 6400, loss[loss=0.1416, simple_loss=0.2293, pruned_loss=0.02698, over 7818.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2861, pruned_loss=0.06174, over 1609884.68 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:25,122 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168081.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:30,484 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.236e+02 2.639e+02 3.603e+02 6.999e+02, threshold=5.279e+02, percent-clipped=2.0 +2023-02-07 02:48:45,442 INFO [train.py:901] (1/4) Epoch 21, batch 6450, loss[loss=0.2085, simple_loss=0.2807, pruned_loss=0.0682, over 7791.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2864, pruned_loss=0.06172, over 1612060.34 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:59,393 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:19,836 INFO [train.py:901] (1/4) Epoch 21, batch 6500, loss[loss=0.2188, simple_loss=0.3077, pruned_loss=0.06494, over 8107.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2866, pruned_loss=0.0617, over 1611793.81 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:49:24,789 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:41,359 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.483e+02 3.129e+02 4.081e+02 1.148e+03, threshold=6.258e+02, percent-clipped=13.0 +2023-02-07 02:49:42,168 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:46,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:54,734 INFO [train.py:901] (1/4) Epoch 21, batch 6550, loss[loss=0.1817, simple_loss=0.2652, pruned_loss=0.04917, over 7929.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06199, over 1613223.60 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:19,977 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:20,487 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 02:50:24,650 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:29,935 INFO [train.py:901] (1/4) Epoch 21, batch 6600, loss[loss=0.2146, simple_loss=0.3047, pruned_loss=0.06227, over 8513.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2879, pruned_loss=0.06195, over 1613916.44 frames. ], batch size: 29, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:32,319 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 02:50:38,741 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:50:50,803 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.414e+02 2.830e+02 3.481e+02 7.637e+02, threshold=5.659e+02, percent-clipped=3.0 +2023-02-07 02:51:05,095 INFO [train.py:901] (1/4) Epoch 21, batch 6650, loss[loss=0.1718, simple_loss=0.246, pruned_loss=0.04884, over 7432.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2876, pruned_loss=0.06208, over 1613584.28 frames. ], batch size: 17, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:51:40,097 INFO [train.py:901] (1/4) Epoch 21, batch 6700, loss[loss=0.1961, simple_loss=0.2625, pruned_loss=0.0649, over 7205.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2878, pruned_loss=0.06225, over 1611718.84 frames. ], batch size: 16, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:00,447 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.306e+02 2.933e+02 3.476e+02 6.537e+02, threshold=5.866e+02, percent-clipped=2.0 +2023-02-07 02:52:04,742 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5617, 1.8936, 3.0743, 1.4006, 2.1828, 1.9997, 1.6007, 2.1667], + device='cuda:1'), covar=tensor([0.1929, 0.2633, 0.0890, 0.4701, 0.2000, 0.3208, 0.2339, 0.2629], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0600, 0.0556, 0.0641, 0.0644, 0.0591, 0.0532, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:52:05,991 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:14,913 INFO [train.py:901] (1/4) Epoch 21, batch 6750, loss[loss=0.2208, simple_loss=0.2919, pruned_loss=0.0748, over 7916.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2869, pruned_loss=0.06185, over 1610027.74 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:19,508 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 02:52:45,398 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:50,751 INFO [train.py:901] (1/4) Epoch 21, batch 6800, loss[loss=0.2159, simple_loss=0.3, pruned_loss=0.06593, over 8421.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06172, over 1611644.35 frames. ], batch size: 49, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:58,512 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 02:53:04,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:12,368 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.333e+02 2.834e+02 3.373e+02 7.883e+02, threshold=5.669e+02, percent-clipped=5.0 +2023-02-07 02:53:20,352 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168500.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:26,307 INFO [train.py:901] (1/4) Epoch 21, batch 6850, loss[loss=0.209, simple_loss=0.2922, pruned_loss=0.06287, over 8252.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2873, pruned_loss=0.06216, over 1609673.19 frames. ], batch size: 22, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:53:28,542 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:37,581 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:45,926 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 02:53:54,104 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8875, 1.3062, 1.5690, 1.2926, 1.0129, 1.4675, 1.8186, 1.5008], + device='cuda:1'), covar=tensor([0.0548, 0.1291, 0.1704, 0.1541, 0.0618, 0.1530, 0.0665, 0.0697], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0158, 0.0099, 0.0162, 0.0112, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 02:54:00,744 INFO [train.py:901] (1/4) Epoch 21, batch 6900, loss[loss=0.2055, simple_loss=0.2942, pruned_loss=0.05844, over 8103.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2881, pruned_loss=0.06295, over 1611453.78 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:22,253 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.867e+02 3.613e+02 6.820e+02, threshold=5.733e+02, percent-clipped=1.0 +2023-02-07 02:54:26,393 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:54:35,740 INFO [train.py:901] (1/4) Epoch 21, batch 6950, loss[loss=0.2404, simple_loss=0.32, pruned_loss=0.08043, over 8493.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2892, pruned_loss=0.06336, over 1611645.28 frames. ], batch size: 29, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:35,962 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8166, 1.7480, 2.5151, 1.6435, 1.3813, 2.3826, 0.6845, 1.6017], + device='cuda:1'), covar=tensor([0.1646, 0.1218, 0.0311, 0.1276, 0.2707, 0.0493, 0.2272, 0.1321], + device='cuda:1'), in_proj_covar=tensor([0.0188, 0.0194, 0.0126, 0.0221, 0.0269, 0.0135, 0.0170, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 02:54:53,419 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 02:55:10,646 INFO [train.py:901] (1/4) Epoch 21, batch 7000, loss[loss=0.2303, simple_loss=0.3086, pruned_loss=0.07604, over 8252.00 frames. ], tot_loss[loss=0.208, simple_loss=0.289, pruned_loss=0.06347, over 1608042.55 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:23,595 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8559, 1.3797, 3.9685, 1.4388, 3.5407, 3.3241, 3.6448, 3.5241], + device='cuda:1'), covar=tensor([0.0628, 0.4543, 0.0658, 0.4410, 0.1181, 0.1053, 0.0656, 0.0782], + device='cuda:1'), in_proj_covar=tensor([0.0634, 0.0646, 0.0701, 0.0637, 0.0716, 0.0612, 0.0616, 0.0685], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:55:31,362 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.444e+02 3.041e+02 3.968e+02 8.528e+02, threshold=6.083e+02, percent-clipped=8.0 +2023-02-07 02:55:41,046 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7926, 6.0520, 5.2107, 2.6301, 5.3503, 5.6342, 5.3609, 5.4075], + device='cuda:1'), covar=tensor([0.0523, 0.0329, 0.0764, 0.3765, 0.0724, 0.0773, 0.1064, 0.0570], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0431, 0.0430, 0.0534, 0.0423, 0.0440, 0.0419, 0.0383], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:55:45,702 INFO [train.py:901] (1/4) Epoch 21, batch 7050, loss[loss=0.1963, simple_loss=0.287, pruned_loss=0.05276, over 8475.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2887, pruned_loss=0.06341, over 1608807.68 frames. ], batch size: 25, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:46,577 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168710.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:55:50,178 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-07 02:56:19,950 INFO [train.py:901] (1/4) Epoch 21, batch 7100, loss[loss=0.2161, simple_loss=0.3013, pruned_loss=0.06547, over 8144.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2866, pruned_loss=0.0622, over 1610403.10 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:56:26,884 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:40,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.605e+02 3.011e+02 3.811e+02 1.077e+03, threshold=6.022e+02, percent-clipped=4.0 +2023-02-07 02:56:41,628 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5922, 2.1176, 3.2745, 1.7177, 1.6598, 3.1759, 0.9130, 2.1027], + device='cuda:1'), covar=tensor([0.1481, 0.1342, 0.0303, 0.1918, 0.2992, 0.0398, 0.2206, 0.1549], + device='cuda:1'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0271, 0.0136, 0.0172, 0.0192], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 02:56:43,692 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:48,512 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5577, 1.8724, 1.9887, 1.2124, 2.0558, 1.4675, 0.5308, 1.7901], + device='cuda:1'), covar=tensor([0.0638, 0.0382, 0.0284, 0.0607, 0.0437, 0.0932, 0.0865, 0.0307], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0391, 0.0342, 0.0443, 0.0375, 0.0534, 0.0389, 0.0417], + device='cuda:1'), out_proj_covar=tensor([1.2173e-04, 1.0258e-04, 9.0082e-05, 1.1684e-04, 9.8664e-05, 1.5105e-04, + 1.0518e-04, 1.1060e-04], device='cuda:1') +2023-02-07 02:56:55,251 INFO [train.py:901] (1/4) Epoch 21, batch 7150, loss[loss=0.1406, simple_loss=0.2275, pruned_loss=0.02689, over 7787.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2868, pruned_loss=0.06238, over 1608702.48 frames. ], batch size: 19, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:07,097 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-07 02:57:07,567 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2311, 2.0931, 2.9777, 2.4250, 2.7649, 2.0797, 1.9678, 2.0289], + device='cuda:1'), covar=tensor([0.5211, 0.5199, 0.1769, 0.3469, 0.2418, 0.3916, 0.2514, 0.4214], + device='cuda:1'), in_proj_covar=tensor([0.0939, 0.0973, 0.0800, 0.0941, 0.0997, 0.0890, 0.0746, 0.0820], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 02:57:29,809 INFO [train.py:901] (1/4) Epoch 21, batch 7200, loss[loss=0.1651, simple_loss=0.2562, pruned_loss=0.03705, over 7921.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2877, pruned_loss=0.06266, over 1610852.88 frames. ], batch size: 20, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:32,848 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.26 vs. limit=5.0 +2023-02-07 02:57:44,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0451, 2.2398, 1.8247, 2.7899, 1.3953, 1.6343, 1.9569, 2.1562], + device='cuda:1'), covar=tensor([0.0770, 0.0832, 0.0977, 0.0391, 0.1111, 0.1366, 0.0945, 0.0835], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0197, 0.0244, 0.0212, 0.0206, 0.0247, 0.0249, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 02:57:51,147 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.343e+02 3.196e+02 4.097e+02 7.456e+02, threshold=6.392e+02, percent-clipped=6.0 +2023-02-07 02:57:56,891 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 02:58:04,698 INFO [train.py:901] (1/4) Epoch 21, batch 7250, loss[loss=0.1775, simple_loss=0.258, pruned_loss=0.0485, over 7719.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.288, pruned_loss=0.06253, over 1611591.10 frames. ], batch size: 18, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:18,037 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5716, 2.4883, 1.8949, 2.2131, 2.1682, 1.5982, 1.9931, 2.0661], + device='cuda:1'), covar=tensor([0.1482, 0.0447, 0.1181, 0.0642, 0.0727, 0.1542, 0.0938, 0.0959], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0237, 0.0337, 0.0309, 0.0302, 0.0338, 0.0347, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:58:40,068 INFO [train.py:901] (1/4) Epoch 21, batch 7300, loss[loss=0.2203, simple_loss=0.3128, pruned_loss=0.0639, over 8328.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.288, pruned_loss=0.06232, over 1613653.58 frames. ], batch size: 25, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:44,984 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168966.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:58,201 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168985.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:59,432 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168987.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:58:59,462 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9756, 3.5274, 1.8276, 2.8222, 2.6229, 1.5286, 2.5930, 2.9940], + device='cuda:1'), covar=tensor([0.1647, 0.0483, 0.1514, 0.0785, 0.0835, 0.2015, 0.1215, 0.0895], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0237, 0.0337, 0.0309, 0.0303, 0.0338, 0.0348, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 02:59:00,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.375e+02 2.880e+02 4.111e+02 9.346e+02, threshold=5.760e+02, percent-clipped=6.0 +2023-02-07 02:59:02,083 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:59:14,679 INFO [train.py:901] (1/4) Epoch 21, batch 7350, loss[loss=0.1932, simple_loss=0.2824, pruned_loss=0.05196, over 8623.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.0626, over 1616657.43 frames. ], batch size: 49, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:35,043 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 02:59:36,091 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 02:59:44,847 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5604, 1.9329, 3.3186, 1.4003, 2.4024, 1.9373, 1.6417, 2.4183], + device='cuda:1'), covar=tensor([0.1970, 0.2671, 0.0818, 0.4550, 0.1939, 0.3150, 0.2385, 0.2328], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0603, 0.0558, 0.0644, 0.0645, 0.0596, 0.0534, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 02:59:49,824 INFO [train.py:901] (1/4) Epoch 21, batch 7400, loss[loss=0.2592, simple_loss=0.3172, pruned_loss=0.1006, over 8290.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2885, pruned_loss=0.06299, over 1613200.22 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:53,401 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 03:00:10,729 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 2.322e+02 3.020e+02 4.298e+02 1.187e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-07 03:00:19,199 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169100.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:00:25,150 INFO [train.py:901] (1/4) Epoch 21, batch 7450, loss[loss=0.2248, simple_loss=0.3017, pruned_loss=0.07393, over 7019.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2877, pruned_loss=0.06227, over 1610901.25 frames. ], batch size: 72, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:00:33,888 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 03:00:42,572 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:01,173 INFO [train.py:901] (1/4) Epoch 21, batch 7500, loss[loss=0.2103, simple_loss=0.2906, pruned_loss=0.06495, over 7972.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.06209, over 1614179.08 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:01:13,525 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:21,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.287e+02 2.739e+02 3.438e+02 5.948e+02, threshold=5.478e+02, percent-clipped=0.0 +2023-02-07 03:01:35,746 INFO [train.py:901] (1/4) Epoch 21, batch 7550, loss[loss=0.2, simple_loss=0.283, pruned_loss=0.05853, over 8256.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06169, over 1615842.66 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:09,759 INFO [train.py:901] (1/4) Epoch 21, batch 7600, loss[loss=0.202, simple_loss=0.2782, pruned_loss=0.06289, over 7439.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2871, pruned_loss=0.06138, over 1613359.44 frames. ], batch size: 17, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:32,179 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.243e+02 2.742e+02 3.349e+02 1.012e+03, threshold=5.485e+02, percent-clipped=5.0 +2023-02-07 03:02:45,876 INFO [train.py:901] (1/4) Epoch 21, batch 7650, loss[loss=0.1939, simple_loss=0.2878, pruned_loss=0.05, over 7974.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06141, over 1611659.75 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:00,453 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:03:01,835 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169331.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:03:21,298 INFO [train.py:901] (1/4) Epoch 21, batch 7700, loss[loss=0.206, simple_loss=0.2897, pruned_loss=0.06115, over 7810.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2876, pruned_loss=0.0616, over 1614457.40 frames. ], batch size: 20, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:37,725 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.51 vs. limit=5.0 +2023-02-07 03:03:42,194 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.349e+02 2.901e+02 3.736e+02 6.675e+02, threshold=5.802e+02, percent-clipped=6.0 +2023-02-07 03:03:44,253 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 03:03:45,054 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2556, 2.1063, 1.6375, 1.9647, 1.7497, 1.4104, 1.6720, 1.6901], + device='cuda:1'), covar=tensor([0.1265, 0.0380, 0.1142, 0.0524, 0.0727, 0.1455, 0.0885, 0.0872], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0236, 0.0335, 0.0307, 0.0301, 0.0337, 0.0345, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:03:57,021 INFO [train.py:901] (1/4) Epoch 21, batch 7750, loss[loss=0.1762, simple_loss=0.2661, pruned_loss=0.04319, over 8187.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06193, over 1614271.26 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:21,907 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:22,015 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:23,339 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169446.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:04:32,648 INFO [train.py:901] (1/4) Epoch 21, batch 7800, loss[loss=0.2205, simple_loss=0.2988, pruned_loss=0.07116, over 7714.00 frames. ], tot_loss[loss=0.206, simple_loss=0.288, pruned_loss=0.06199, over 1612553.48 frames. ], batch size: 18, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:45,392 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:52,675 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.145e+02 2.738e+02 3.428e+02 8.790e+02, threshold=5.476e+02, percent-clipped=3.0 +2023-02-07 03:05:06,025 INFO [train.py:901] (1/4) Epoch 21, batch 7850, loss[loss=0.1731, simple_loss=0.2581, pruned_loss=0.044, over 7925.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.06149, over 1614576.40 frames. ], batch size: 20, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:14,138 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:39,266 INFO [train.py:901] (1/4) Epoch 21, batch 7900, loss[loss=0.1714, simple_loss=0.2478, pruned_loss=0.04746, over 7689.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2863, pruned_loss=0.06125, over 1610412.74 frames. ], batch size: 18, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:39,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:52,128 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2815, 1.2427, 3.4014, 1.0769, 3.0288, 2.8355, 3.1146, 3.0089], + device='cuda:1'), covar=tensor([0.0754, 0.4037, 0.0815, 0.4123, 0.1426, 0.1115, 0.0766, 0.0914], + device='cuda:1'), in_proj_covar=tensor([0.0631, 0.0641, 0.0695, 0.0627, 0.0711, 0.0612, 0.0612, 0.0676], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:05:59,284 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.405e+02 2.884e+02 3.520e+02 8.387e+02, threshold=5.767e+02, percent-clipped=5.0 +2023-02-07 03:06:02,045 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:11,880 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6411, 1.9302, 2.9421, 1.4991, 2.1334, 2.0388, 1.7018, 2.1887], + device='cuda:1'), covar=tensor([0.1810, 0.2573, 0.0895, 0.4573, 0.1901, 0.3112, 0.2357, 0.2222], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0601, 0.0556, 0.0637, 0.0642, 0.0591, 0.0531, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:06:12,852 INFO [train.py:901] (1/4) Epoch 21, batch 7950, loss[loss=0.2009, simple_loss=0.2856, pruned_loss=0.05807, over 8462.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.288, pruned_loss=0.06212, over 1607076.70 frames. ], batch size: 25, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:06:31,343 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:33,349 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:46,591 INFO [train.py:901] (1/4) Epoch 21, batch 8000, loss[loss=0.203, simple_loss=0.2658, pruned_loss=0.07012, over 7691.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2879, pruned_loss=0.06216, over 1607677.58 frames. ], batch size: 18, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:06,442 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.194e+02 2.844e+02 3.383e+02 6.688e+02, threshold=5.687e+02, percent-clipped=2.0 +2023-02-07 03:07:07,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2855, 3.2055, 2.9642, 1.4243, 2.9006, 2.9324, 2.9633, 2.8523], + device='cuda:1'), covar=tensor([0.1146, 0.0757, 0.1335, 0.4682, 0.1060, 0.1148, 0.1542, 0.1044], + device='cuda:1'), in_proj_covar=tensor([0.0521, 0.0428, 0.0431, 0.0531, 0.0422, 0.0440, 0.0421, 0.0380], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:07:12,045 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7861, 2.5630, 3.4169, 2.6742, 3.3352, 2.6742, 2.5334, 2.0563], + device='cuda:1'), covar=tensor([0.5401, 0.5397, 0.1912, 0.3862, 0.2570, 0.2922, 0.1817, 0.5641], + device='cuda:1'), in_proj_covar=tensor([0.0945, 0.0977, 0.0803, 0.0946, 0.0998, 0.0892, 0.0746, 0.0823], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 03:07:12,148 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-07 03:07:14,033 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:15,418 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169702.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:07:19,702 INFO [train.py:901] (1/4) Epoch 21, batch 8050, loss[loss=0.1992, simple_loss=0.2783, pruned_loss=0.06004, over 7244.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.286, pruned_loss=0.06206, over 1583637.07 frames. ], batch size: 16, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:30,628 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:32,003 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169727.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:07:53,204 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 03:07:58,218 INFO [train.py:901] (1/4) Epoch 22, batch 0, loss[loss=0.2471, simple_loss=0.3255, pruned_loss=0.08432, over 8358.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3255, pruned_loss=0.08432, over 8358.00 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:07:58,218 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 03:08:05,225 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6391, 1.7508, 1.5866, 1.9207, 1.3092, 1.5188, 1.7035, 1.7670], + device='cuda:1'), covar=tensor([0.0729, 0.0798, 0.0806, 0.0589, 0.1029, 0.1095, 0.0633, 0.0716], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0207, 0.0246, 0.0250, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:08:09,347 INFO [train.py:935] (1/4) Epoch 22, validation: loss=0.1743, simple_loss=0.2746, pruned_loss=0.03702, over 944034.00 frames. +2023-02-07 03:08:09,349 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 03:08:12,906 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169747.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:17,064 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9997, 1.4695, 1.6766, 1.4677, 0.8828, 1.4862, 1.7655, 1.6655], + device='cuda:1'), covar=tensor([0.0525, 0.1277, 0.1725, 0.1446, 0.0623, 0.1490, 0.0681, 0.0621], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0160, 0.0100, 0.0164, 0.0113, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 03:08:24,250 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 03:08:25,070 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:36,868 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5058, 2.4935, 1.9488, 2.2912, 2.2042, 1.6569, 2.0627, 2.1536], + device='cuda:1'), covar=tensor([0.1514, 0.0459, 0.1220, 0.0664, 0.0685, 0.1534, 0.0919, 0.0918], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0234, 0.0331, 0.0305, 0.0298, 0.0332, 0.0340, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:08:42,189 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.482e+02 2.980e+02 3.558e+02 1.069e+03, threshold=5.959e+02, percent-clipped=8.0 +2023-02-07 03:08:44,174 INFO [train.py:901] (1/4) Epoch 22, batch 50, loss[loss=0.1933, simple_loss=0.284, pruned_loss=0.05132, over 7964.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2928, pruned_loss=0.06392, over 364175.28 frames. ], batch size: 21, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:08:54,112 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:01,045 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 03:09:02,041 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:06,926 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2777, 2.0766, 2.6980, 2.2432, 2.6467, 2.3636, 2.1380, 1.4724], + device='cuda:1'), covar=tensor([0.5047, 0.4808, 0.1934, 0.3850, 0.2534, 0.3087, 0.1884, 0.5188], + device='cuda:1'), in_proj_covar=tensor([0.0943, 0.0975, 0.0801, 0.0943, 0.0997, 0.0889, 0.0744, 0.0822], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 03:09:19,148 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169840.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:20,323 INFO [train.py:901] (1/4) Epoch 22, batch 100, loss[loss=0.1985, simple_loss=0.2685, pruned_loss=0.06429, over 7938.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2917, pruned_loss=0.06333, over 643922.18 frames. ], batch size: 20, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:23,124 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 03:09:25,376 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:42,084 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:52,902 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.356e+02 3.069e+02 3.800e+02 7.981e+02, threshold=6.138e+02, percent-clipped=3.0 +2023-02-07 03:09:55,641 INFO [train.py:901] (1/4) Epoch 22, batch 150, loss[loss=0.2254, simple_loss=0.3148, pruned_loss=0.06802, over 8567.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2916, pruned_loss=0.06311, over 862414.69 frames. ], batch size: 31, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:55,879 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:12,773 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:27,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5508, 1.4605, 2.8541, 1.3466, 2.1392, 3.0396, 3.1844, 2.5933], + device='cuda:1'), covar=tensor([0.1313, 0.1655, 0.0400, 0.2235, 0.1010, 0.0322, 0.0692, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0294, 0.0321, 0.0285, 0.0317, 0.0307, 0.0264, 0.0418, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 03:10:30,762 INFO [train.py:901] (1/4) Epoch 22, batch 200, loss[loss=0.195, simple_loss=0.2814, pruned_loss=0.05435, over 8354.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2903, pruned_loss=0.06216, over 1030705.19 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:10:53,647 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-07 03:10:58,691 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:11:02,622 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.362e+02 2.871e+02 3.395e+02 8.094e+02, threshold=5.742e+02, percent-clipped=2.0 +2023-02-07 03:11:04,633 INFO [train.py:901] (1/4) Epoch 22, batch 250, loss[loss=0.2414, simple_loss=0.3164, pruned_loss=0.08318, over 8435.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2897, pruned_loss=0.06193, over 1159810.23 frames. ], batch size: 27, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:17,872 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 03:11:26,104 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 03:11:41,667 INFO [train.py:901] (1/4) Epoch 22, batch 300, loss[loss=0.2676, simple_loss=0.3301, pruned_loss=0.1026, over 8100.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2877, pruned_loss=0.06112, over 1262067.71 frames. ], batch size: 23, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:56,574 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:13,700 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.486e+02 2.821e+02 3.492e+02 6.452e+02, threshold=5.641e+02, percent-clipped=3.0 +2023-02-07 03:12:15,181 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:15,775 INFO [train.py:901] (1/4) Epoch 22, batch 350, loss[loss=0.2236, simple_loss=0.2993, pruned_loss=0.07397, over 8134.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2886, pruned_loss=0.062, over 1335642.63 frames. ], batch size: 22, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:12:19,956 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:27,049 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:49,716 INFO [train.py:901] (1/4) Epoch 22, batch 400, loss[loss=0.2135, simple_loss=0.3015, pruned_loss=0.06274, over 8606.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2878, pruned_loss=0.06143, over 1396804.01 frames. ], batch size: 34, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:12:53,693 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170148.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:22,572 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.277e+02 2.821e+02 3.460e+02 6.418e+02, threshold=5.643e+02, percent-clipped=3.0 +2023-02-07 03:13:24,661 INFO [train.py:901] (1/4) Epoch 22, batch 450, loss[loss=0.1919, simple_loss=0.2662, pruned_loss=0.05886, over 7689.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2881, pruned_loss=0.06158, over 1446951.56 frames. ], batch size: 18, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:13:34,381 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170206.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:46,334 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:58,272 INFO [train.py:901] (1/4) Epoch 22, batch 500, loss[loss=0.1693, simple_loss=0.2428, pruned_loss=0.04787, over 7420.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2885, pruned_loss=0.06162, over 1487645.88 frames. ], batch size: 17, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:14:13,733 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:14:31,688 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.263e+02 2.770e+02 3.716e+02 6.957e+02, threshold=5.540e+02, percent-clipped=5.0 +2023-02-07 03:14:34,527 INFO [train.py:901] (1/4) Epoch 22, batch 550, loss[loss=0.186, simple_loss=0.28, pruned_loss=0.04601, over 8478.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2887, pruned_loss=0.06248, over 1514854.28 frames. ], batch size: 28, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:00,823 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0143, 2.2607, 1.8518, 2.6694, 1.4924, 1.6714, 2.1082, 2.2067], + device='cuda:1'), covar=tensor([0.0741, 0.0783, 0.0956, 0.0487, 0.1156, 0.1317, 0.0799, 0.0797], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0196, 0.0243, 0.0213, 0.0206, 0.0245, 0.0248, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:15:08,214 INFO [train.py:901] (1/4) Epoch 22, batch 600, loss[loss=0.2017, simple_loss=0.2878, pruned_loss=0.05781, over 8545.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06219, over 1541352.01 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:16,556 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:27,507 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 03:15:34,303 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:40,800 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.463e+02 3.010e+02 3.561e+02 9.437e+02, threshold=6.021e+02, percent-clipped=1.0 +2023-02-07 03:15:42,758 INFO [train.py:901] (1/4) Epoch 22, batch 650, loss[loss=0.1962, simple_loss=0.2793, pruned_loss=0.05651, over 8575.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2899, pruned_loss=0.0628, over 1558988.36 frames. ], batch size: 34, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:52,768 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.2468, 5.2527, 4.7383, 2.5481, 4.6054, 4.9639, 4.8895, 4.6986], + device='cuda:1'), covar=tensor([0.0562, 0.0393, 0.0790, 0.4262, 0.0782, 0.0926, 0.1109, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0430, 0.0428, 0.0530, 0.0422, 0.0441, 0.0423, 0.0381], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:15:53,432 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170407.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:17,635 INFO [train.py:901] (1/4) Epoch 22, batch 700, loss[loss=0.1809, simple_loss=0.2609, pruned_loss=0.05045, over 7702.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06169, over 1570588.86 frames. ], batch size: 18, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:16:31,480 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:42,963 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:43,712 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:49,127 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:50,917 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.347e+02 2.936e+02 3.672e+02 5.936e+02, threshold=5.871e+02, percent-clipped=0.0 +2023-02-07 03:16:52,903 INFO [train.py:901] (1/4) Epoch 22, batch 750, loss[loss=0.2038, simple_loss=0.2903, pruned_loss=0.05861, over 8353.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06173, over 1580181.33 frames. ], batch size: 24, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:01,715 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:11,628 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:13,545 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:14,728 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 03:17:23,963 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 03:17:27,405 INFO [train.py:901] (1/4) Epoch 22, batch 800, loss[loss=0.2007, simple_loss=0.281, pruned_loss=0.06015, over 7967.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2874, pruned_loss=0.06186, over 1590613.63 frames. ], batch size: 21, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:28,966 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170544.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:30,549 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 03:17:57,572 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:58,755 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.234e+02 2.598e+02 3.180e+02 6.753e+02, threshold=5.195e+02, percent-clipped=1.0 +2023-02-07 03:18:00,808 INFO [train.py:901] (1/4) Epoch 22, batch 850, loss[loss=0.1651, simple_loss=0.2584, pruned_loss=0.03594, over 8083.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2869, pruned_loss=0.06166, over 1597193.41 frames. ], batch size: 21, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:14,723 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8591, 2.4092, 3.6746, 1.9219, 1.8696, 3.7249, 0.7287, 2.1679], + device='cuda:1'), covar=tensor([0.1577, 0.1371, 0.0257, 0.1890, 0.2993, 0.0322, 0.2508, 0.1465], + device='cuda:1'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0270, 0.0135, 0.0171, 0.0193], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 03:18:31,335 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 03:18:36,994 INFO [train.py:901] (1/4) Epoch 22, batch 900, loss[loss=0.2164, simple_loss=0.3028, pruned_loss=0.06499, over 8506.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2857, pruned_loss=0.06092, over 1600196.55 frames. ], batch size: 28, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:53,614 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2512, 2.5153, 2.9899, 1.7001, 3.1308, 1.9449, 1.6264, 2.1069], + device='cuda:1'), covar=tensor([0.0720, 0.0432, 0.0269, 0.0729, 0.0406, 0.0843, 0.0883, 0.0529], + device='cuda:1'), in_proj_covar=tensor([0.0454, 0.0394, 0.0345, 0.0444, 0.0375, 0.0534, 0.0390, 0.0419], + device='cuda:1'), out_proj_covar=tensor([1.2187e-04, 1.0338e-04, 9.0653e-05, 1.1683e-04, 9.8477e-05, 1.5078e-04, + 1.0542e-04, 1.1114e-04], device='cuda:1') +2023-02-07 03:19:09,381 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.381e+02 2.827e+02 3.296e+02 7.509e+02, threshold=5.655e+02, percent-clipped=4.0 +2023-02-07 03:19:11,448 INFO [train.py:901] (1/4) Epoch 22, batch 950, loss[loss=0.2178, simple_loss=0.3017, pruned_loss=0.06693, over 8482.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2856, pruned_loss=0.0608, over 1603122.86 frames. ], batch size: 29, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:19:13,662 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9379, 1.5737, 3.1366, 1.6085, 2.2849, 3.4094, 3.5001, 2.9492], + device='cuda:1'), covar=tensor([0.1087, 0.1635, 0.0323, 0.1892, 0.0897, 0.0236, 0.0511, 0.0514], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0323, 0.0286, 0.0317, 0.0310, 0.0265, 0.0420, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:19:43,603 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 03:19:46,359 INFO [train.py:901] (1/4) Epoch 22, batch 1000, loss[loss=0.2087, simple_loss=0.2821, pruned_loss=0.06767, over 8488.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2861, pruned_loss=0.06132, over 1605641.34 frames. ], batch size: 25, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:19:49,226 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2380, 2.1197, 1.6969, 1.9356, 1.7783, 1.4641, 1.6766, 1.6584], + device='cuda:1'), covar=tensor([0.1257, 0.0421, 0.1180, 0.0501, 0.0719, 0.1381, 0.0933, 0.0922], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0235, 0.0331, 0.0308, 0.0299, 0.0334, 0.0341, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:20:12,115 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170778.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:17,087 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 03:20:19,772 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 2.326e+02 2.890e+02 3.504e+02 6.405e+02, threshold=5.779e+02, percent-clipped=4.0 +2023-02-07 03:20:21,021 INFO [train.py:901] (1/4) Epoch 22, batch 1050, loss[loss=0.2237, simple_loss=0.3026, pruned_loss=0.07237, over 8506.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.286, pruned_loss=0.0614, over 1606797.38 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:20:28,506 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 03:20:28,713 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:41,785 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:55,958 INFO [train.py:901] (1/4) Epoch 22, batch 1100, loss[loss=0.2447, simple_loss=0.3343, pruned_loss=0.07752, over 8453.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.287, pruned_loss=0.06186, over 1609538.13 frames. ], batch size: 27, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:27,508 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2684, 2.1135, 1.6839, 1.8945, 1.8070, 1.4494, 1.6995, 1.6018], + device='cuda:1'), covar=tensor([0.1375, 0.0421, 0.1159, 0.0555, 0.0708, 0.1404, 0.0930, 0.0936], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0237, 0.0334, 0.0310, 0.0300, 0.0336, 0.0344, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:21:29,306 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.576e+02 3.127e+02 3.706e+02 1.049e+03, threshold=6.255e+02, percent-clipped=5.0 +2023-02-07 03:21:30,677 INFO [train.py:901] (1/4) Epoch 22, batch 1150, loss[loss=0.2213, simple_loss=0.3035, pruned_loss=0.06957, over 8506.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2884, pruned_loss=0.06242, over 1617329.61 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:37,421 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 03:21:45,384 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5561, 1.5132, 2.6349, 1.1268, 2.0772, 2.9031, 3.1532, 2.1037], + device='cuda:1'), covar=tensor([0.1558, 0.1842, 0.0578, 0.2826, 0.1158, 0.0417, 0.0719, 0.1086], + device='cuda:1'), in_proj_covar=tensor([0.0293, 0.0320, 0.0283, 0.0314, 0.0306, 0.0262, 0.0415, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 03:21:52,865 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0195, 2.1731, 1.8455, 2.7279, 1.3495, 1.6268, 1.9697, 2.2624], + device='cuda:1'), covar=tensor([0.0720, 0.0821, 0.0896, 0.0383, 0.1075, 0.1315, 0.0794, 0.0659], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0215, 0.0209, 0.0249, 0.0251, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:21:56,780 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:01,710 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:04,213 INFO [train.py:901] (1/4) Epoch 22, batch 1200, loss[loss=0.225, simple_loss=0.3011, pruned_loss=0.07445, over 8186.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2881, pruned_loss=0.0623, over 1619736.58 frames. ], batch size: 23, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:07,064 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170946.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:38,801 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.384e+02 2.807e+02 3.549e+02 5.873e+02, threshold=5.615e+02, percent-clipped=0.0 +2023-02-07 03:22:40,092 INFO [train.py:901] (1/4) Epoch 22, batch 1250, loss[loss=0.1839, simple_loss=0.2766, pruned_loss=0.04558, over 8352.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.06143, over 1615037.45 frames. ], batch size: 24, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:57,667 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5125, 4.4737, 4.0141, 2.1417, 3.8498, 4.1398, 4.1789, 3.8919], + device='cuda:1'), covar=tensor([0.0771, 0.0589, 0.1059, 0.4750, 0.0976, 0.1077, 0.1152, 0.0796], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0430, 0.0430, 0.0530, 0.0422, 0.0441, 0.0420, 0.0382], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:22:58,786 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 03:23:14,555 INFO [train.py:901] (1/4) Epoch 22, batch 1300, loss[loss=0.2578, simple_loss=0.3252, pruned_loss=0.09514, over 6760.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2868, pruned_loss=0.06085, over 1616737.61 frames. ], batch size: 71, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:23:17,502 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:23:47,495 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.382e+02 2.988e+02 3.753e+02 7.309e+02, threshold=5.975e+02, percent-clipped=5.0 +2023-02-07 03:23:48,841 INFO [train.py:901] (1/4) Epoch 22, batch 1350, loss[loss=0.2133, simple_loss=0.3024, pruned_loss=0.06208, over 8254.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.288, pruned_loss=0.06167, over 1620504.28 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:01,667 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171110.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:24:02,353 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1748, 1.3040, 3.3204, 1.0699, 2.9269, 2.7552, 3.0272, 2.9057], + device='cuda:1'), covar=tensor([0.0890, 0.4332, 0.0918, 0.4340, 0.1510, 0.1256, 0.0828, 0.1000], + device='cuda:1'), in_proj_covar=tensor([0.0625, 0.0637, 0.0688, 0.0620, 0.0704, 0.0604, 0.0606, 0.0672], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:24:23,452 INFO [train.py:901] (1/4) Epoch 22, batch 1400, loss[loss=0.2191, simple_loss=0.3006, pruned_loss=0.06883, over 8294.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2887, pruned_loss=0.06221, over 1621356.48 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:23,825 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.05 vs. limit=5.0 +2023-02-07 03:24:55,486 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 3.047e+02 3.835e+02 9.203e+02, threshold=6.094e+02, percent-clipped=3.0 +2023-02-07 03:24:57,482 INFO [train.py:901] (1/4) Epoch 22, batch 1450, loss[loss=0.217, simple_loss=0.291, pruned_loss=0.07145, over 8332.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2896, pruned_loss=0.06294, over 1618460.21 frames. ], batch size: 25, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:58,895 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:06,229 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 03:25:12,520 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:16,689 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:32,454 INFO [train.py:901] (1/4) Epoch 22, batch 1500, loss[loss=0.225, simple_loss=0.2929, pruned_loss=0.07855, over 7811.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.29, pruned_loss=0.06358, over 1618441.69 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:04,592 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.477e+02 2.962e+02 3.885e+02 1.079e+03, threshold=5.924e+02, percent-clipped=2.0 +2023-02-07 03:26:04,685 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171290.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:05,961 INFO [train.py:901] (1/4) Epoch 22, batch 1550, loss[loss=0.1902, simple_loss=0.2817, pruned_loss=0.04937, over 7251.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2908, pruned_loss=0.06392, over 1619739.90 frames. ], batch size: 16, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:12,941 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:25,491 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7606, 1.9356, 1.6332, 2.2656, 1.0117, 1.4695, 1.6354, 1.9641], + device='cuda:1'), covar=tensor([0.0730, 0.0727, 0.0936, 0.0442, 0.1157, 0.1313, 0.0823, 0.0669], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0247, 0.0250, 0.0211], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:26:30,108 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:37,455 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.0244, 1.5092, 6.2001, 2.3016, 5.6462, 5.2362, 5.7144, 5.5971], + device='cuda:1'), covar=tensor([0.0346, 0.4680, 0.0320, 0.3655, 0.0812, 0.0821, 0.0411, 0.0419], + device='cuda:1'), in_proj_covar=tensor([0.0629, 0.0642, 0.0693, 0.0623, 0.0708, 0.0608, 0.0611, 0.0676], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:26:40,682 INFO [train.py:901] (1/4) Epoch 22, batch 1600, loss[loss=0.1923, simple_loss=0.2785, pruned_loss=0.05311, over 8285.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2905, pruned_loss=0.06342, over 1623080.31 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:55,765 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:13,639 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 2.510e+02 3.045e+02 3.987e+02 6.104e+02, threshold=6.090e+02, percent-clipped=2.0 +2023-02-07 03:27:15,008 INFO [train.py:901] (1/4) Epoch 22, batch 1650, loss[loss=0.1957, simple_loss=0.2847, pruned_loss=0.05331, over 8102.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2914, pruned_loss=0.06353, over 1625997.78 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:24,118 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171405.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:51,035 INFO [train.py:901] (1/4) Epoch 22, batch 1700, loss[loss=0.2, simple_loss=0.2803, pruned_loss=0.05981, over 8665.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2906, pruned_loss=0.06313, over 1621930.91 frames. ], batch size: 34, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:59,275 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171454.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:28:18,869 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 03:28:24,558 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.429e+02 3.050e+02 3.629e+02 7.357e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-07 03:28:25,928 INFO [train.py:901] (1/4) Epoch 22, batch 1750, loss[loss=0.2195, simple_loss=0.3074, pruned_loss=0.06575, over 8465.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2906, pruned_loss=0.06287, over 1622478.53 frames. ], batch size: 27, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:28:42,131 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:00,471 INFO [train.py:901] (1/4) Epoch 22, batch 1800, loss[loss=0.2014, simple_loss=0.2879, pruned_loss=0.05751, over 8704.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2901, pruned_loss=0.06261, over 1625070.42 frames. ], batch size: 30, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:29:11,512 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:19,688 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:34,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.408e+02 2.801e+02 3.784e+02 7.831e+02, threshold=5.602e+02, percent-clipped=2.0 +2023-02-07 03:29:35,959 INFO [train.py:901] (1/4) Epoch 22, batch 1850, loss[loss=0.185, simple_loss=0.2615, pruned_loss=0.05428, over 8086.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2897, pruned_loss=0.06254, over 1621797.36 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:10,050 INFO [train.py:901] (1/4) Epoch 22, batch 1900, loss[loss=0.1668, simple_loss=0.2489, pruned_loss=0.04233, over 7797.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2896, pruned_loss=0.06233, over 1615742.66 frames. ], batch size: 19, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:24,230 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171661.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:32,195 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:36,782 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 03:30:41,590 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171686.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:44,068 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.518e+02 3.035e+02 3.649e+02 9.576e+02, threshold=6.070e+02, percent-clipped=4.0 +2023-02-07 03:30:45,460 INFO [train.py:901] (1/4) Epoch 22, batch 1950, loss[loss=0.185, simple_loss=0.2631, pruned_loss=0.05343, over 7974.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2897, pruned_loss=0.06243, over 1620229.78 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:48,014 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 03:30:56,295 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171707.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:31:07,796 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 03:31:20,038 INFO [train.py:901] (1/4) Epoch 22, batch 2000, loss[loss=0.187, simple_loss=0.2719, pruned_loss=0.05104, over 7932.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2885, pruned_loss=0.06202, over 1614514.30 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:31:43,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0720, 1.5745, 3.4401, 1.6514, 2.3616, 3.7944, 3.8678, 3.2127], + device='cuda:1'), covar=tensor([0.1155, 0.1822, 0.0353, 0.2141, 0.1153, 0.0207, 0.0589, 0.0574], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0323, 0.0285, 0.0318, 0.0307, 0.0264, 0.0419, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 03:31:53,990 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.301e+02 2.928e+02 3.706e+02 6.798e+02, threshold=5.855e+02, percent-clipped=1.0 +2023-02-07 03:31:55,401 INFO [train.py:901] (1/4) Epoch 22, batch 2050, loss[loss=0.2532, simple_loss=0.3318, pruned_loss=0.08729, over 8248.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.06227, over 1607288.90 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:17,606 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:19,738 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171825.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:31,146 INFO [train.py:901] (1/4) Epoch 22, batch 2100, loss[loss=0.1748, simple_loss=0.2571, pruned_loss=0.04625, over 7778.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.06197, over 1609001.04 frames. ], batch size: 19, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:36,878 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:43,494 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:45,802 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5245, 1.5454, 2.0860, 1.4382, 1.1734, 2.0612, 0.3649, 1.2546], + device='cuda:1'), covar=tensor([0.1838, 0.1187, 0.0413, 0.1125, 0.2646, 0.0416, 0.2153, 0.1293], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0198, 0.0127, 0.0223, 0.0272, 0.0137, 0.0171, 0.0194], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 03:33:05,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.505e+02 2.999e+02 3.749e+02 9.868e+02, threshold=5.998e+02, percent-clipped=7.0 +2023-02-07 03:33:06,896 INFO [train.py:901] (1/4) Epoch 22, batch 2150, loss[loss=0.2133, simple_loss=0.2924, pruned_loss=0.06711, over 7813.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2878, pruned_loss=0.0615, over 1609961.38 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:33,122 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:42,595 INFO [train.py:901] (1/4) Epoch 22, batch 2200, loss[loss=0.2273, simple_loss=0.3091, pruned_loss=0.0728, over 8226.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.06191, over 1609731.35 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:51,045 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:51,683 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:04,374 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-07 03:34:05,587 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:15,550 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.362e+02 2.812e+02 3.623e+02 6.076e+02, threshold=5.624e+02, percent-clipped=1.0 +2023-02-07 03:34:16,938 INFO [train.py:901] (1/4) Epoch 22, batch 2250, loss[loss=0.1706, simple_loss=0.2521, pruned_loss=0.04459, over 7815.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06135, over 1609549.05 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:34:17,296 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 03:34:54,268 INFO [train.py:901] (1/4) Epoch 22, batch 2300, loss[loss=0.2363, simple_loss=0.3206, pruned_loss=0.07601, over 8339.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2878, pruned_loss=0.06164, over 1605704.81 frames. ], batch size: 26, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:35:19,362 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6009, 2.4709, 1.8756, 2.3157, 2.1505, 1.6317, 2.0654, 2.1384], + device='cuda:1'), covar=tensor([0.1310, 0.0373, 0.1088, 0.0517, 0.0723, 0.1421, 0.0904, 0.0824], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0233, 0.0331, 0.0307, 0.0299, 0.0336, 0.0343, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:35:20,118 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:35:28,298 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.401e+02 3.005e+02 3.667e+02 7.010e+02, threshold=6.010e+02, percent-clipped=1.0 +2023-02-07 03:35:29,621 INFO [train.py:901] (1/4) Epoch 22, batch 2350, loss[loss=0.1963, simple_loss=0.2648, pruned_loss=0.0639, over 7714.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2869, pruned_loss=0.06125, over 1606186.35 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:35:37,284 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:01,302 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:05,211 INFO [train.py:901] (1/4) Epoch 22, batch 2400, loss[loss=0.1892, simple_loss=0.2838, pruned_loss=0.04724, over 8456.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2875, pruned_loss=0.06187, over 1605935.83 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:39,685 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.658e+02 3.455e+02 4.348e+02 7.809e+02, threshold=6.910e+02, percent-clipped=6.0 +2023-02-07 03:36:41,123 INFO [train.py:901] (1/4) Epoch 22, batch 2450, loss[loss=0.2304, simple_loss=0.3056, pruned_loss=0.07763, over 8526.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.288, pruned_loss=0.06235, over 1609310.56 frames. ], batch size: 49, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:45,750 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1862, 1.8419, 2.3585, 2.0868, 2.3908, 2.1470, 1.9364, 1.1900], + device='cuda:1'), covar=tensor([0.4837, 0.4665, 0.1865, 0.3323, 0.1977, 0.3143, 0.1841, 0.4775], + device='cuda:1'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0997, 0.0895, 0.0748, 0.0828], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 03:37:08,131 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172231.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:16,956 INFO [train.py:901] (1/4) Epoch 22, batch 2500, loss[loss=0.2067, simple_loss=0.3071, pruned_loss=0.05311, over 8526.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2874, pruned_loss=0.06143, over 1615174.70 frames. ], batch size: 49, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:26,696 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:37,003 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8294, 1.9671, 1.7670, 2.6155, 1.1899, 1.5663, 1.7906, 1.9784], + device='cuda:1'), covar=tensor([0.0764, 0.0766, 0.0889, 0.0367, 0.1049, 0.1270, 0.0851, 0.0809], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0197, 0.0243, 0.0215, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:37:50,845 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.288e+02 2.722e+02 3.540e+02 9.975e+02, threshold=5.443e+02, percent-clipped=1.0 +2023-02-07 03:37:52,252 INFO [train.py:901] (1/4) Epoch 22, batch 2550, loss[loss=0.1954, simple_loss=0.2749, pruned_loss=0.05791, over 8141.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06175, over 1614594.94 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:56,720 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:38:25,419 INFO [train.py:901] (1/4) Epoch 22, batch 2600, loss[loss=0.1982, simple_loss=0.2873, pruned_loss=0.0546, over 8383.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2862, pruned_loss=0.06128, over 1612761.78 frames. ], batch size: 48, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:38:58,398 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.484e+02 3.096e+02 3.957e+02 1.134e+03, threshold=6.191e+02, percent-clipped=6.0 +2023-02-07 03:39:00,470 INFO [train.py:901] (1/4) Epoch 22, batch 2650, loss[loss=0.1832, simple_loss=0.2683, pruned_loss=0.04901, over 8460.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.286, pruned_loss=0.06134, over 1617202.20 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:39:16,288 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172414.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:39:35,340 INFO [train.py:901] (1/4) Epoch 22, batch 2700, loss[loss=0.201, simple_loss=0.2904, pruned_loss=0.05577, over 8331.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2861, pruned_loss=0.06146, over 1616731.23 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:02,834 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:40:09,203 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 03:40:09,436 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.345e+02 2.798e+02 3.767e+02 1.133e+03, threshold=5.596e+02, percent-clipped=4.0 +2023-02-07 03:40:10,848 INFO [train.py:901] (1/4) Epoch 22, batch 2750, loss[loss=0.2027, simple_loss=0.2896, pruned_loss=0.05791, over 8133.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2861, pruned_loss=0.06182, over 1615069.72 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:45,669 INFO [train.py:901] (1/4) Epoch 22, batch 2800, loss[loss=0.2106, simple_loss=0.2812, pruned_loss=0.06995, over 7196.00 frames. ], tot_loss[loss=0.205, simple_loss=0.286, pruned_loss=0.06196, over 1609799.44 frames. ], batch size: 16, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:18,226 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 2.395e+02 2.840e+02 3.614e+02 7.820e+02, threshold=5.680e+02, percent-clipped=6.0 +2023-02-07 03:41:20,379 INFO [train.py:901] (1/4) Epoch 22, batch 2850, loss[loss=0.2411, simple_loss=0.3185, pruned_loss=0.08186, over 8301.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2866, pruned_loss=0.06203, over 1606579.40 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:23,220 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:37,768 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:56,013 INFO [train.py:901] (1/4) Epoch 22, batch 2900, loss[loss=0.1986, simple_loss=0.2932, pruned_loss=0.05203, over 8251.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2854, pruned_loss=0.06138, over 1602941.54 frames. ], batch size: 24, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:57,545 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:15,805 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172670.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:24,172 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 03:42:28,908 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.482e+02 2.975e+02 3.907e+02 6.756e+02, threshold=5.949e+02, percent-clipped=4.0 +2023-02-07 03:42:30,289 INFO [train.py:901] (1/4) Epoch 22, batch 2950, loss[loss=0.194, simple_loss=0.2791, pruned_loss=0.05448, over 7967.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2857, pruned_loss=0.06156, over 1605788.49 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:42:32,556 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:43:05,665 INFO [train.py:901] (1/4) Epoch 22, batch 3000, loss[loss=0.1782, simple_loss=0.2575, pruned_loss=0.0495, over 7645.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2854, pruned_loss=0.06133, over 1604961.10 frames. ], batch size: 19, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:43:05,665 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 03:43:17,970 INFO [train.py:935] (1/4) Epoch 22, validation: loss=0.1735, simple_loss=0.2739, pruned_loss=0.03659, over 944034.00 frames. +2023-02-07 03:43:17,971 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 03:43:25,618 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172752.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:43:51,446 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.191e+02 2.765e+02 3.574e+02 6.067e+02, threshold=5.530e+02, percent-clipped=1.0 +2023-02-07 03:43:52,760 INFO [train.py:901] (1/4) Epoch 22, batch 3050, loss[loss=0.1999, simple_loss=0.2812, pruned_loss=0.05927, over 8125.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2858, pruned_loss=0.06111, over 1606813.36 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:15,791 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7328, 1.3408, 3.3524, 1.4794, 2.2796, 3.7386, 3.9055, 3.1613], + device='cuda:1'), covar=tensor([0.1260, 0.1969, 0.0363, 0.2099, 0.1140, 0.0231, 0.0500, 0.0602], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0325, 0.0286, 0.0318, 0.0310, 0.0266, 0.0423, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 03:44:26,397 INFO [train.py:901] (1/4) Epoch 22, batch 3100, loss[loss=0.1844, simple_loss=0.2741, pruned_loss=0.0473, over 8196.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2849, pruned_loss=0.06082, over 1605899.67 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:31,357 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0154, 2.4542, 2.6836, 1.5091, 3.0550, 1.6585, 1.5047, 2.0922], + device='cuda:1'), covar=tensor([0.0910, 0.0402, 0.0314, 0.0849, 0.0456, 0.1000, 0.0854, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0447, 0.0386, 0.0340, 0.0440, 0.0370, 0.0528, 0.0385, 0.0412], + device='cuda:1'), out_proj_covar=tensor([1.1964e-04, 1.0113e-04, 8.9532e-05, 1.1579e-04, 9.7151e-05, 1.4874e-04, + 1.0389e-04, 1.0921e-04], device='cuda:1') +2023-02-07 03:44:32,686 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172851.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:40,603 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:44,821 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0541, 2.3042, 1.7611, 2.8844, 1.3553, 1.6739, 1.9948, 2.2796], + device='cuda:1'), covar=tensor([0.0717, 0.0652, 0.0906, 0.0328, 0.1011, 0.1155, 0.0751, 0.0756], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0208, 0.0249, 0.0252, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:44:47,321 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2455, 1.3031, 3.3492, 1.0997, 2.9370, 2.7920, 3.0658, 2.9613], + device='cuda:1'), covar=tensor([0.0762, 0.4433, 0.0801, 0.4223, 0.1420, 0.1160, 0.0825, 0.0879], + device='cuda:1'), in_proj_covar=tensor([0.0627, 0.0640, 0.0689, 0.0621, 0.0706, 0.0611, 0.0607, 0.0673], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:44:50,783 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:59,808 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.451e+02 3.163e+02 4.463e+02 7.617e+02, threshold=6.327e+02, percent-clipped=7.0 +2023-02-07 03:45:01,202 INFO [train.py:901] (1/4) Epoch 22, batch 3150, loss[loss=0.2341, simple_loss=0.3072, pruned_loss=0.08054, over 8333.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2868, pruned_loss=0.06191, over 1611160.86 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:27,822 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-07 03:45:29,139 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.94 vs. limit=5.0 +2023-02-07 03:45:35,480 INFO [train.py:901] (1/4) Epoch 22, batch 3200, loss[loss=0.1475, simple_loss=0.2277, pruned_loss=0.03359, over 7442.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2867, pruned_loss=0.06159, over 1613664.97 frames. ], batch size: 17, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:47,724 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:45:53,342 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6464, 3.1149, 2.5384, 4.1476, 1.8036, 2.1934, 2.4287, 3.0904], + device='cuda:1'), covar=tensor([0.0591, 0.0614, 0.0740, 0.0204, 0.1004, 0.1122, 0.0889, 0.0744], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0207, 0.0249, 0.0252, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:46:06,752 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:46:07,595 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9588, 1.7117, 2.0801, 1.8878, 2.0365, 1.9868, 1.8265, 0.8343], + device='cuda:1'), covar=tensor([0.5278, 0.4345, 0.1866, 0.3046, 0.2092, 0.2743, 0.1768, 0.4623], + device='cuda:1'), in_proj_covar=tensor([0.0945, 0.0978, 0.0805, 0.0944, 0.0997, 0.0896, 0.0748, 0.0828], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 03:46:09,262 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.554e+02 2.964e+02 3.773e+02 6.891e+02, threshold=5.928e+02, percent-clipped=2.0 +2023-02-07 03:46:10,572 INFO [train.py:901] (1/4) Epoch 22, batch 3250, loss[loss=0.2011, simple_loss=0.2749, pruned_loss=0.06366, over 7552.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2878, pruned_loss=0.062, over 1615229.46 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:46:19,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4163, 1.6642, 1.6649, 1.1925, 1.7297, 1.3423, 0.2942, 1.6285], + device='cuda:1'), covar=tensor([0.0428, 0.0332, 0.0275, 0.0440, 0.0323, 0.0763, 0.0859, 0.0225], + device='cuda:1'), in_proj_covar=tensor([0.0448, 0.0388, 0.0342, 0.0443, 0.0372, 0.0531, 0.0387, 0.0414], + device='cuda:1'), out_proj_covar=tensor([1.2008e-04, 1.0164e-04, 9.0147e-05, 1.1659e-04, 9.7624e-05, 1.4961e-04, + 1.0443e-04, 1.0972e-04], device='cuda:1') +2023-02-07 03:46:40,192 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6269, 1.4967, 1.7628, 1.4205, 0.9150, 1.5636, 1.5517, 1.3219], + device='cuda:1'), covar=tensor([0.0546, 0.1218, 0.1519, 0.1371, 0.0590, 0.1401, 0.0676, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 03:46:45,374 INFO [train.py:901] (1/4) Epoch 22, batch 3300, loss[loss=0.1862, simple_loss=0.2665, pruned_loss=0.05298, over 7978.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2878, pruned_loss=0.0622, over 1613696.92 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:46:45,483 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8873, 6.1160, 5.3613, 2.6606, 5.4353, 5.7060, 5.5739, 5.4446], + device='cuda:1'), covar=tensor([0.0555, 0.0366, 0.0866, 0.4213, 0.0790, 0.0845, 0.1086, 0.0629], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0434, 0.0431, 0.0538, 0.0426, 0.0446, 0.0426, 0.0385], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:47:07,560 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:17,931 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.506e+02 2.831e+02 3.669e+02 6.075e+02, threshold=5.662e+02, percent-clipped=1.0 +2023-02-07 03:47:18,592 INFO [train.py:901] (1/4) Epoch 22, batch 3350, loss[loss=0.2618, simple_loss=0.3331, pruned_loss=0.09528, over 8285.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2884, pruned_loss=0.0627, over 1612045.04 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:47:22,022 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:23,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7018, 5.8880, 5.1273, 2.6023, 5.0917, 5.5184, 5.4152, 5.2461], + device='cuda:1'), covar=tensor([0.0620, 0.0444, 0.0959, 0.4465, 0.0828, 0.0896, 0.1113, 0.0630], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0434, 0.0432, 0.0536, 0.0426, 0.0446, 0.0425, 0.0385], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:47:26,075 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:43,687 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7218, 1.5060, 4.9110, 1.9350, 4.4127, 4.0809, 4.4665, 4.3068], + device='cuda:1'), covar=tensor([0.0490, 0.4617, 0.0448, 0.4019, 0.1031, 0.0907, 0.0517, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0625, 0.0637, 0.0686, 0.0619, 0.0701, 0.0606, 0.0604, 0.0669], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:47:54,973 INFO [train.py:901] (1/4) Epoch 22, batch 3400, loss[loss=0.2191, simple_loss=0.3007, pruned_loss=0.06874, over 8490.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2875, pruned_loss=0.06221, over 1612220.17 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:12,778 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173168.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:21,529 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4399, 1.4572, 4.5987, 1.7609, 4.1137, 3.8321, 4.1946, 4.0462], + device='cuda:1'), covar=tensor([0.0507, 0.4502, 0.0423, 0.3693, 0.0899, 0.0920, 0.0515, 0.0544], + device='cuda:1'), in_proj_covar=tensor([0.0625, 0.0636, 0.0685, 0.0618, 0.0700, 0.0604, 0.0603, 0.0669], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:48:28,295 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 2.494e+02 3.128e+02 3.771e+02 6.972e+02, threshold=6.255e+02, percent-clipped=4.0 +2023-02-07 03:48:28,968 INFO [train.py:901] (1/4) Epoch 22, batch 3450, loss[loss=0.2562, simple_loss=0.3376, pruned_loss=0.08744, over 8256.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2881, pruned_loss=0.06252, over 1614968.97 frames. ], batch size: 24, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:39,555 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:42,446 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173211.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:49:05,730 INFO [train.py:901] (1/4) Epoch 22, batch 3500, loss[loss=0.2333, simple_loss=0.3113, pruned_loss=0.07768, over 8321.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2867, pruned_loss=0.06147, over 1614621.05 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:24,783 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 03:49:38,939 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.642e+02 3.082e+02 3.788e+02 9.506e+02, threshold=6.164e+02, percent-clipped=4.0 +2023-02-07 03:49:39,651 INFO [train.py:901] (1/4) Epoch 22, batch 3550, loss[loss=0.233, simple_loss=0.3144, pruned_loss=0.07583, over 8535.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2874, pruned_loss=0.06156, over 1613327.31 frames. ], batch size: 28, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:50,460 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7771, 4.7755, 4.2239, 2.0397, 4.2276, 4.2588, 4.3014, 4.1688], + device='cuda:1'), covar=tensor([0.0594, 0.0438, 0.1030, 0.4517, 0.0866, 0.0922, 0.1195, 0.0696], + device='cuda:1'), in_proj_covar=tensor([0.0523, 0.0434, 0.0432, 0.0536, 0.0427, 0.0447, 0.0426, 0.0386], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:50:00,037 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173322.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:03,364 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:06,893 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:10,304 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5622, 1.3735, 1.6552, 1.3018, 0.9284, 1.4244, 1.4695, 1.1477], + device='cuda:1'), covar=tensor([0.0580, 0.1276, 0.1640, 0.1460, 0.0598, 0.1492, 0.0735, 0.0726], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 03:50:14,763 INFO [train.py:901] (1/4) Epoch 22, batch 3600, loss[loss=0.1958, simple_loss=0.2914, pruned_loss=0.05012, over 8524.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.06156, over 1617586.03 frames. ], batch size: 28, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:50:25,001 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173356.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:26,365 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:43,490 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:48,590 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.398e+02 3.034e+02 4.459e+02 8.281e+02, threshold=6.068e+02, percent-clipped=7.0 +2023-02-07 03:50:49,318 INFO [train.py:901] (1/4) Epoch 22, batch 3650, loss[loss=0.1946, simple_loss=0.2822, pruned_loss=0.05347, over 8309.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2888, pruned_loss=0.06216, over 1618688.44 frames. ], batch size: 25, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:23,353 INFO [train.py:901] (1/4) Epoch 22, batch 3700, loss[loss=0.2609, simple_loss=0.331, pruned_loss=0.0954, over 7368.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2892, pruned_loss=0.06242, over 1618503.33 frames. ], batch size: 71, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:24,741 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 03:51:42,307 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:51:51,707 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-07 03:51:57,921 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.519e+02 2.931e+02 3.909e+02 7.363e+02, threshold=5.861e+02, percent-clipped=2.0 +2023-02-07 03:51:58,522 INFO [train.py:901] (1/4) Epoch 22, batch 3750, loss[loss=0.2662, simple_loss=0.3337, pruned_loss=0.09936, over 8358.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.289, pruned_loss=0.06287, over 1614953.20 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:58,726 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:11,076 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173509.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:52:12,776 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:18,135 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7209, 1.3985, 4.9318, 1.7796, 4.3983, 4.1320, 4.4959, 4.3433], + device='cuda:1'), covar=tensor([0.0556, 0.4679, 0.0407, 0.3862, 0.0959, 0.0903, 0.0532, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0628, 0.0640, 0.0689, 0.0621, 0.0702, 0.0607, 0.0605, 0.0674], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:52:22,066 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9126, 1.4978, 1.7449, 1.3873, 0.9565, 1.4857, 1.7426, 1.5124], + device='cuda:1'), covar=tensor([0.0538, 0.1207, 0.1588, 0.1409, 0.0605, 0.1467, 0.0696, 0.0644], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 03:52:23,510 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 03:52:32,199 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1809, 1.5209, 1.7720, 1.4645, 1.0141, 1.5238, 1.9262, 1.7813], + device='cuda:1'), covar=tensor([0.0538, 0.1258, 0.1634, 0.1407, 0.0630, 0.1491, 0.0698, 0.0625], + device='cuda:1'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 03:52:32,727 INFO [train.py:901] (1/4) Epoch 22, batch 3800, loss[loss=0.1883, simple_loss=0.2741, pruned_loss=0.0512, over 8623.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06347, over 1617574.69 frames. ], batch size: 39, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:52:35,962 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-07 03:52:41,038 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5686, 1.5594, 2.0944, 1.3911, 1.2464, 2.0987, 0.4079, 1.2124], + device='cuda:1'), covar=tensor([0.1579, 0.1263, 0.0336, 0.1061, 0.2552, 0.0367, 0.2118, 0.1286], + device='cuda:1'), in_proj_covar=tensor([0.0189, 0.0198, 0.0127, 0.0220, 0.0267, 0.0135, 0.0170, 0.0192], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 03:52:58,746 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:07,973 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.467e+02 3.140e+02 3.842e+02 8.904e+02, threshold=6.281e+02, percent-clipped=2.0 +2023-02-07 03:53:08,694 INFO [train.py:901] (1/4) Epoch 22, batch 3850, loss[loss=0.1826, simple_loss=0.255, pruned_loss=0.05511, over 7654.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2891, pruned_loss=0.06322, over 1609497.10 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:53:16,528 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173603.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:30,754 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 03:53:33,542 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:36,877 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:43,568 INFO [train.py:901] (1/4) Epoch 22, batch 3900, loss[loss=0.1947, simple_loss=0.2704, pruned_loss=0.0595, over 7675.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.289, pruned_loss=0.06313, over 1606536.05 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:02,783 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5190, 1.4832, 1.8604, 1.2704, 1.1921, 1.8637, 0.2618, 1.1664], + device='cuda:1'), covar=tensor([0.1720, 0.1260, 0.0431, 0.0952, 0.2916, 0.0483, 0.2241, 0.1276], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0221, 0.0269, 0.0136, 0.0171, 0.0193], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 03:54:03,324 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173671.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:54:17,229 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.938e+02 2.507e+02 2.945e+02 3.654e+02 8.206e+02, threshold=5.890e+02, percent-clipped=3.0 +2023-02-07 03:54:17,881 INFO [train.py:901] (1/4) Epoch 22, batch 3950, loss[loss=0.2063, simple_loss=0.2809, pruned_loss=0.06589, over 7819.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2892, pruned_loss=0.06351, over 1608145.05 frames. ], batch size: 20, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:53,286 INFO [train.py:901] (1/4) Epoch 22, batch 4000, loss[loss=0.2334, simple_loss=0.3157, pruned_loss=0.07552, over 8642.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2885, pruned_loss=0.06322, over 1607071.07 frames. ], batch size: 39, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:55:17,297 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.06 vs. limit=5.0 +2023-02-07 03:55:23,156 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:55:26,126 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.329e+02 2.821e+02 3.599e+02 1.045e+03, threshold=5.642e+02, percent-clipped=6.0 +2023-02-07 03:55:26,800 INFO [train.py:901] (1/4) Epoch 22, batch 4050, loss[loss=0.2133, simple_loss=0.2841, pruned_loss=0.07124, over 7809.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2885, pruned_loss=0.06284, over 1609348.14 frames. ], batch size: 20, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:02,760 INFO [train.py:901] (1/4) Epoch 22, batch 4100, loss[loss=0.211, simple_loss=0.2951, pruned_loss=0.06344, over 8561.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2882, pruned_loss=0.06299, over 1606409.74 frames. ], batch size: 49, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:10,366 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173853.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:56:23,169 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9216, 1.4831, 6.0909, 2.2893, 5.4759, 5.1007, 5.6534, 5.5084], + device='cuda:1'), covar=tensor([0.0496, 0.5002, 0.0320, 0.3684, 0.0987, 0.0913, 0.0531, 0.0555], + device='cuda:1'), in_proj_covar=tensor([0.0634, 0.0648, 0.0693, 0.0627, 0.0707, 0.0611, 0.0610, 0.0680], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:56:31,244 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173883.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:56:36,365 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.329e+02 2.764e+02 3.605e+02 7.317e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 03:56:37,021 INFO [train.py:901] (1/4) Epoch 22, batch 4150, loss[loss=0.1829, simple_loss=0.2659, pruned_loss=0.04994, over 7658.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2874, pruned_loss=0.06231, over 1607644.06 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:47,601 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:12,186 INFO [train.py:901] (1/4) Epoch 22, batch 4200, loss[loss=0.2404, simple_loss=0.3216, pruned_loss=0.07959, over 8197.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06283, over 1609964.83 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:57:27,001 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8417, 6.0917, 5.1601, 2.6031, 5.2966, 5.6141, 5.5469, 5.4137], + device='cuda:1'), covar=tensor([0.0556, 0.0389, 0.0925, 0.4610, 0.0782, 0.1006, 0.1239, 0.0691], + device='cuda:1'), in_proj_covar=tensor([0.0525, 0.0435, 0.0432, 0.0536, 0.0423, 0.0445, 0.0426, 0.0387], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:57:28,899 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 03:57:29,744 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173968.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:57:35,169 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:46,760 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.391e+02 3.056e+02 3.931e+02 9.713e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 03:57:46,780 INFO [train.py:901] (1/4) Epoch 22, batch 4250, loss[loss=0.2355, simple_loss=0.3206, pruned_loss=0.07525, over 8511.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2881, pruned_loss=0.06242, over 1611880.03 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:57:55,806 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 03:58:04,801 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6323, 4.6188, 4.1842, 2.2061, 4.1510, 4.1445, 4.2655, 4.0431], + device='cuda:1'), covar=tensor([0.0626, 0.0498, 0.1001, 0.4270, 0.0782, 0.0983, 0.1193, 0.0752], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0436, 0.0433, 0.0538, 0.0425, 0.0448, 0.0428, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 03:58:15,723 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174033.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:17,063 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:21,635 INFO [train.py:901] (1/4) Epoch 22, batch 4300, loss[loss=0.2015, simple_loss=0.2936, pruned_loss=0.05467, over 8260.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2869, pruned_loss=0.0617, over 1613345.32 frames. ], batch size: 24, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:58:21,843 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:25,980 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 03:58:40,404 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:56,913 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:57,380 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.427e+02 2.775e+02 3.458e+02 5.995e+02, threshold=5.550e+02, percent-clipped=0.0 +2023-02-07 03:58:57,401 INFO [train.py:901] (1/4) Epoch 22, batch 4350, loss[loss=0.1863, simple_loss=0.2726, pruned_loss=0.04998, over 8567.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06173, over 1613811.16 frames. ], batch size: 31, lr: 3.43e-03, grad_scale: 4.0 +2023-02-07 03:59:25,055 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 03:59:32,552 INFO [train.py:901] (1/4) Epoch 22, batch 4400, loss[loss=0.1622, simple_loss=0.2382, pruned_loss=0.04306, over 7685.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2861, pruned_loss=0.06137, over 1611470.78 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 03:59:36,062 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7610, 2.9688, 2.5079, 4.0816, 1.7009, 2.2734, 2.5613, 3.3796], + device='cuda:1'), covar=tensor([0.0619, 0.0767, 0.0832, 0.0238, 0.1121, 0.1112, 0.0941, 0.0663], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0199, 0.0246, 0.0216, 0.0208, 0.0247, 0.0250, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 03:59:42,814 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:00:06,463 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 04:00:07,767 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.623e+02 3.065e+02 3.902e+02 1.119e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-07 04:00:07,787 INFO [train.py:901] (1/4) Epoch 22, batch 4450, loss[loss=0.2123, simple_loss=0.294, pruned_loss=0.06529, over 8023.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2859, pruned_loss=0.0613, over 1610949.34 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:24,476 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 04:00:26,072 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5099, 1.8895, 2.9090, 1.4372, 2.2060, 1.9070, 1.6357, 2.0948], + device='cuda:1'), covar=tensor([0.1933, 0.2570, 0.0883, 0.4577, 0.1761, 0.3282, 0.2277, 0.2448], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0605, 0.0559, 0.0645, 0.0646, 0.0591, 0.0536, 0.0630], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:00:30,153 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174224.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:00:41,755 INFO [train.py:901] (1/4) Epoch 22, batch 4500, loss[loss=0.2141, simple_loss=0.2971, pruned_loss=0.0656, over 8479.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2864, pruned_loss=0.06149, over 1615705.01 frames. ], batch size: 28, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:46,572 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174249.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:00:56,995 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 04:01:17,050 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.524e+02 3.306e+02 4.354e+02 7.569e+02, threshold=6.612e+02, percent-clipped=6.0 +2023-02-07 04:01:17,070 INFO [train.py:901] (1/4) Epoch 22, batch 4550, loss[loss=0.243, simple_loss=0.3102, pruned_loss=0.08786, over 6971.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.06222, over 1616949.70 frames. ], batch size: 73, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:23,308 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:28,573 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:44,046 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-02-07 04:01:51,062 INFO [train.py:901] (1/4) Epoch 22, batch 4600, loss[loss=0.1769, simple_loss=0.2589, pruned_loss=0.04744, over 7693.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.287, pruned_loss=0.06167, over 1615342.65 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:54,750 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:12,033 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:15,443 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:16,890 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:25,988 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.375e+02 2.973e+02 3.873e+02 1.031e+03, threshold=5.946e+02, percent-clipped=3.0 +2023-02-07 04:02:26,008 INFO [train.py:901] (1/4) Epoch 22, batch 4650, loss[loss=0.1662, simple_loss=0.2525, pruned_loss=0.03991, over 7921.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2864, pruned_loss=0.06135, over 1614660.38 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:02:31,945 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 04:02:37,942 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:02,512 INFO [train.py:901] (1/4) Epoch 22, batch 4700, loss[loss=0.2367, simple_loss=0.3284, pruned_loss=0.07251, over 8334.00 frames. ], tot_loss[loss=0.204, simple_loss=0.286, pruned_loss=0.06097, over 1608879.52 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:21,332 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6299, 1.8415, 1.9478, 1.3238, 2.0407, 1.3903, 0.5887, 1.8491], + device='cuda:1'), covar=tensor([0.0597, 0.0368, 0.0325, 0.0565, 0.0422, 0.0858, 0.0887, 0.0317], + device='cuda:1'), in_proj_covar=tensor([0.0449, 0.0387, 0.0343, 0.0442, 0.0372, 0.0528, 0.0385, 0.0415], + device='cuda:1'), out_proj_covar=tensor([1.2022e-04, 1.0130e-04, 9.0373e-05, 1.1635e-04, 9.7755e-05, 1.4885e-04, + 1.0387e-04, 1.0982e-04], device='cuda:1') +2023-02-07 04:03:37,053 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.374e+02 2.923e+02 3.899e+02 9.329e+02, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 04:03:37,073 INFO [train.py:901] (1/4) Epoch 22, batch 4750, loss[loss=0.2009, simple_loss=0.287, pruned_loss=0.05744, over 7821.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2855, pruned_loss=0.06043, over 1608633.22 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:37,263 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:38,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:43,328 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:59,886 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1865, 1.0512, 1.2618, 1.0113, 1.0073, 1.2930, 0.1087, 0.8851], + device='cuda:1'), covar=tensor([0.1679, 0.1507, 0.0505, 0.0888, 0.2884, 0.0593, 0.2222, 0.1348], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0220, 0.0269, 0.0136, 0.0171, 0.0194], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 04:04:04,468 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 04:04:06,505 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 04:04:12,381 INFO [train.py:901] (1/4) Epoch 22, batch 4800, loss[loss=0.2098, simple_loss=0.3014, pruned_loss=0.0591, over 8725.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2866, pruned_loss=0.06087, over 1605226.01 frames. ], batch size: 30, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:19,970 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.71 vs. limit=5.0 +2023-02-07 04:04:22,158 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7902, 1.7172, 2.5063, 1.5153, 1.2799, 2.4682, 0.5345, 1.4308], + device='cuda:1'), covar=tensor([0.1790, 0.1404, 0.0340, 0.1441, 0.2962, 0.0396, 0.2398, 0.1557], + device='cuda:1'), in_proj_covar=tensor([0.0191, 0.0198, 0.0129, 0.0221, 0.0270, 0.0137, 0.0172, 0.0195], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 04:04:27,165 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 04:04:46,095 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.398e+02 2.995e+02 3.860e+02 8.125e+02, threshold=5.990e+02, percent-clipped=3.0 +2023-02-07 04:04:46,115 INFO [train.py:901] (1/4) Epoch 22, batch 4850, loss[loss=0.1996, simple_loss=0.2742, pruned_loss=0.06251, over 7687.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06153, over 1606993.83 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:55,435 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 04:05:02,312 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:20,309 INFO [train.py:901] (1/4) Epoch 22, batch 4900, loss[loss=0.1791, simple_loss=0.2546, pruned_loss=0.05183, over 7430.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2871, pruned_loss=0.06139, over 1604715.01 frames. ], batch size: 17, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:05:23,130 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:29,309 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:56,491 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.582e+02 3.121e+02 3.821e+02 7.682e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-07 04:05:56,518 INFO [train.py:901] (1/4) Epoch 22, batch 4950, loss[loss=0.1707, simple_loss=0.2499, pruned_loss=0.04582, over 7700.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2875, pruned_loss=0.0617, over 1601395.31 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:17,805 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:21,291 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3858, 2.2397, 1.7337, 2.1235, 1.8639, 1.2939, 1.8182, 1.9315], + device='cuda:1'), covar=tensor([0.1476, 0.0441, 0.1355, 0.0562, 0.0839, 0.1825, 0.1069, 0.0879], + device='cuda:1'), in_proj_covar=tensor([0.0350, 0.0231, 0.0331, 0.0305, 0.0297, 0.0336, 0.0339, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:06:27,439 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3669, 1.4898, 1.3880, 1.7914, 0.7132, 1.2633, 1.3047, 1.4602], + device='cuda:1'), covar=tensor([0.0851, 0.0724, 0.0951, 0.0484, 0.1109, 0.1234, 0.0706, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0216, 0.0208, 0.0248, 0.0250, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:06:30,666 INFO [train.py:901] (1/4) Epoch 22, batch 5000, loss[loss=0.1954, simple_loss=0.2808, pruned_loss=0.05506, over 8035.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2877, pruned_loss=0.06128, over 1609387.40 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:35,039 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,260 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,425 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:44,706 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:51,033 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:54,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174773.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:56,004 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:07:07,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.477e+02 2.928e+02 3.612e+02 7.754e+02, threshold=5.856e+02, percent-clipped=3.0 +2023-02-07 04:07:07,711 INFO [train.py:901] (1/4) Epoch 22, batch 5050, loss[loss=0.1695, simple_loss=0.2537, pruned_loss=0.04263, over 7531.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2876, pruned_loss=0.06133, over 1606719.71 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:16,964 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1571, 1.3263, 1.6109, 1.2363, 0.7393, 1.4082, 1.2513, 1.0188], + device='cuda:1'), covar=tensor([0.0598, 0.1271, 0.1570, 0.1453, 0.0576, 0.1463, 0.0674, 0.0725], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0164, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:07:27,839 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4149, 2.3223, 3.1683, 2.6031, 3.0394, 2.5413, 2.2045, 1.9115], + device='cuda:1'), covar=tensor([0.5511, 0.5080, 0.2010, 0.3759, 0.2467, 0.2754, 0.1852, 0.5359], + device='cuda:1'), in_proj_covar=tensor([0.0935, 0.0975, 0.0800, 0.0941, 0.0989, 0.0888, 0.0745, 0.0821], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:07:34,470 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 04:07:42,661 INFO [train.py:901] (1/4) Epoch 22, batch 5100, loss[loss=0.2317, simple_loss=0.316, pruned_loss=0.07372, over 8293.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.288, pruned_loss=0.06117, over 1612665.67 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:58,167 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:03,624 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:17,820 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.498e+02 3.130e+02 3.757e+02 7.363e+02, threshold=6.259e+02, percent-clipped=3.0 +2023-02-07 04:08:17,841 INFO [train.py:901] (1/4) Epoch 22, batch 5150, loss[loss=0.2295, simple_loss=0.3037, pruned_loss=0.0777, over 8505.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.06106, over 1614380.83 frames. ], batch size: 28, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:21,142 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:52,070 INFO [train.py:901] (1/4) Epoch 22, batch 5200, loss[loss=0.2027, simple_loss=0.2777, pruned_loss=0.06386, over 6781.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06195, over 1613613.68 frames. ], batch size: 71, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:52,925 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2538, 1.7068, 4.3934, 2.0719, 3.9381, 3.6974, 4.0368, 3.8979], + device='cuda:1'), covar=tensor([0.0626, 0.4578, 0.0567, 0.3821, 0.1089, 0.1009, 0.0600, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0628, 0.0646, 0.0695, 0.0627, 0.0708, 0.0603, 0.0606, 0.0677], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:08:58,259 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2182, 4.1628, 3.7656, 1.9057, 3.6859, 3.7931, 3.8068, 3.6190], + device='cuda:1'), covar=tensor([0.0747, 0.0571, 0.1081, 0.5008, 0.0926, 0.1095, 0.1221, 0.0907], + device='cuda:1'), in_proj_covar=tensor([0.0522, 0.0433, 0.0432, 0.0536, 0.0424, 0.0444, 0.0424, 0.0387], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:09:26,985 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.432e+02 2.854e+02 3.739e+02 7.258e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-07 04:09:27,005 INFO [train.py:901] (1/4) Epoch 22, batch 5250, loss[loss=0.1901, simple_loss=0.273, pruned_loss=0.05361, over 7421.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2883, pruned_loss=0.06186, over 1609480.70 frames. ], batch size: 17, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:09:31,814 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 04:09:44,046 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:09:49,542 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,589 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:02,004 INFO [train.py:901] (1/4) Epoch 22, batch 5300, loss[loss=0.2042, simple_loss=0.2849, pruned_loss=0.06174, over 8026.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2886, pruned_loss=0.06252, over 1612653.02 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:07,099 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:19,119 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:35,763 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.519e+02 3.149e+02 3.909e+02 1.075e+03, threshold=6.297e+02, percent-clipped=6.0 +2023-02-07 04:10:35,783 INFO [train.py:901] (1/4) Epoch 22, batch 5350, loss[loss=0.1775, simple_loss=0.2552, pruned_loss=0.04993, over 7540.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2883, pruned_loss=0.06243, over 1607328.12 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:57,900 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:11,832 INFO [train.py:901] (1/4) Epoch 22, batch 5400, loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06172, over 8470.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.06258, over 1611015.93 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:11:13,259 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8171, 3.7595, 3.4029, 1.9076, 3.3089, 3.5101, 3.3924, 3.4128], + device='cuda:1'), covar=tensor([0.0870, 0.0676, 0.1130, 0.4773, 0.0936, 0.1141, 0.1389, 0.0842], + device='cuda:1'), in_proj_covar=tensor([0.0521, 0.0432, 0.0430, 0.0533, 0.0422, 0.0442, 0.0422, 0.0384], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:11:14,644 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:22,727 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:39,695 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:46,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.426e+02 2.833e+02 4.034e+02 1.686e+03, threshold=5.665e+02, percent-clipped=5.0 +2023-02-07 04:11:46,288 INFO [train.py:901] (1/4) Epoch 22, batch 5450, loss[loss=0.1884, simple_loss=0.2615, pruned_loss=0.05761, over 8240.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2878, pruned_loss=0.06201, over 1610720.26 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:09,778 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:12:20,249 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 04:12:22,321 INFO [train.py:901] (1/4) Epoch 22, batch 5500, loss[loss=0.1766, simple_loss=0.2594, pruned_loss=0.04693, over 5124.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06166, over 1607476.38 frames. ], batch size: 11, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:22,568 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4480, 1.6024, 2.1039, 1.3692, 1.4756, 1.7087, 1.4658, 1.4556], + device='cuda:1'), covar=tensor([0.1822, 0.2295, 0.0997, 0.4319, 0.1848, 0.3134, 0.2289, 0.2046], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0605, 0.0555, 0.0643, 0.0646, 0.0591, 0.0535, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:12:40,056 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 04:12:56,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.376e+02 2.848e+02 3.508e+02 8.289e+02, threshold=5.697e+02, percent-clipped=6.0 +2023-02-07 04:12:56,641 INFO [train.py:901] (1/4) Epoch 22, batch 5550, loss[loss=0.1885, simple_loss=0.261, pruned_loss=0.05801, over 7438.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2874, pruned_loss=0.06204, over 1606403.27 frames. ], batch size: 17, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:31,799 INFO [train.py:901] (1/4) Epoch 22, batch 5600, loss[loss=0.1823, simple_loss=0.2661, pruned_loss=0.0492, over 8102.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2887, pruned_loss=0.06195, over 1610637.81 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:46,074 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8480, 2.2693, 3.7917, 1.6856, 2.8733, 2.3229, 1.8558, 2.8958], + device='cuda:1'), covar=tensor([0.1812, 0.2422, 0.0890, 0.4209, 0.1799, 0.2932, 0.2279, 0.2209], + device='cuda:1'), in_proj_covar=tensor([0.0525, 0.0604, 0.0554, 0.0641, 0.0645, 0.0589, 0.0535, 0.0629], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:13:49,361 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2003, 1.0936, 1.3040, 1.0826, 1.0469, 1.3271, 0.0750, 0.9600], + device='cuda:1'), covar=tensor([0.1555, 0.1276, 0.0473, 0.0665, 0.2510, 0.0541, 0.1960, 0.1206], + device='cuda:1'), in_proj_covar=tensor([0.0191, 0.0197, 0.0129, 0.0220, 0.0267, 0.0137, 0.0168, 0.0194], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 04:13:50,743 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.40 vs. limit=5.0 +2023-02-07 04:14:03,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4669, 1.5156, 1.4271, 1.8404, 0.7007, 1.3385, 1.2879, 1.5269], + device='cuda:1'), covar=tensor([0.0831, 0.0751, 0.1044, 0.0544, 0.1132, 0.1288, 0.0799, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0216, 0.0208, 0.0247, 0.0252, 0.0209], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:14:03,828 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:06,409 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.532e+02 3.141e+02 4.135e+02 1.836e+03, threshold=6.283e+02, percent-clipped=10.0 +2023-02-07 04:14:06,429 INFO [train.py:901] (1/4) Epoch 22, batch 5650, loss[loss=0.17, simple_loss=0.2562, pruned_loss=0.04186, over 8237.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.0616, over 1609302.10 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:09,947 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7032, 2.1504, 3.2485, 1.5722, 2.6243, 2.0483, 1.8060, 2.5843], + device='cuda:1'), covar=tensor([0.1953, 0.2565, 0.0889, 0.4452, 0.1778, 0.3264, 0.2435, 0.2248], + device='cuda:1'), in_proj_covar=tensor([0.0525, 0.0604, 0.0553, 0.0640, 0.0645, 0.0589, 0.0535, 0.0628], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:14:20,099 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:22,665 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 04:14:37,557 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:40,743 INFO [train.py:901] (1/4) Epoch 22, batch 5700, loss[loss=0.202, simple_loss=0.2817, pruned_loss=0.06115, over 8026.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06201, over 1610693.46 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:56,158 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:15,528 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.452e+02 2.878e+02 3.661e+02 5.836e+02, threshold=5.755e+02, percent-clipped=0.0 +2023-02-07 04:15:15,548 INFO [train.py:901] (1/4) Epoch 22, batch 5750, loss[loss=0.2151, simple_loss=0.3036, pruned_loss=0.06331, over 8187.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06186, over 1615317.89 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:15:20,411 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:22,436 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:27,232 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 04:15:50,252 INFO [train.py:901] (1/4) Epoch 22, batch 5800, loss[loss=0.1758, simple_loss=0.2464, pruned_loss=0.05259, over 7437.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2881, pruned_loss=0.06231, over 1612904.69 frames. ], batch size: 17, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:09,022 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:16:25,894 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 2.280e+02 2.739e+02 3.457e+02 6.413e+02, threshold=5.479e+02, percent-clipped=3.0 +2023-02-07 04:16:25,914 INFO [train.py:901] (1/4) Epoch 22, batch 5850, loss[loss=0.2113, simple_loss=0.3018, pruned_loss=0.0604, over 8516.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2873, pruned_loss=0.06182, over 1612236.50 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:42,238 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:00,352 INFO [train.py:901] (1/4) Epoch 22, batch 5900, loss[loss=0.1672, simple_loss=0.2526, pruned_loss=0.04091, over 7801.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.06217, over 1611737.13 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:17:29,304 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:35,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.520e+02 3.043e+02 3.699e+02 9.671e+02, threshold=6.086e+02, percent-clipped=7.0 +2023-02-07 04:17:35,284 INFO [train.py:901] (1/4) Epoch 22, batch 5950, loss[loss=0.2035, simple_loss=0.2925, pruned_loss=0.05725, over 8293.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2878, pruned_loss=0.06184, over 1611072.31 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:00,062 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175728.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:02,743 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:09,573 INFO [train.py:901] (1/4) Epoch 22, batch 6000, loss[loss=0.1963, simple_loss=0.2784, pruned_loss=0.05711, over 7656.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.06164, over 1609779.45 frames. ], batch size: 19, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:09,573 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 04:18:21,635 INFO [train.py:935] (1/4) Epoch 22, validation: loss=0.1729, simple_loss=0.2732, pruned_loss=0.03632, over 944034.00 frames. +2023-02-07 04:18:21,635 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 04:18:28,878 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2364, 2.0950, 2.7489, 2.3454, 2.7478, 2.2534, 2.1088, 1.6716], + device='cuda:1'), covar=tensor([0.5538, 0.4931, 0.1997, 0.3506, 0.2344, 0.3104, 0.1919, 0.5268], + device='cuda:1'), in_proj_covar=tensor([0.0937, 0.0977, 0.0802, 0.0942, 0.0992, 0.0892, 0.0745, 0.0824], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:18:31,484 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:36,998 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9608, 2.0997, 1.7263, 2.6971, 1.1833, 1.6114, 1.8194, 2.0695], + device='cuda:1'), covar=tensor([0.0781, 0.0795, 0.0956, 0.0353, 0.1129, 0.1251, 0.0862, 0.0805], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0197, 0.0246, 0.0216, 0.0207, 0.0246, 0.0251, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:18:56,217 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.478e+02 2.934e+02 3.623e+02 7.032e+02, threshold=5.869e+02, percent-clipped=2.0 +2023-02-07 04:18:56,238 INFO [train.py:901] (1/4) Epoch 22, batch 6050, loss[loss=0.2237, simple_loss=0.3123, pruned_loss=0.06756, over 8474.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06193, over 1612184.44 frames. ], batch size: 49, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:06,541 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0046, 2.2716, 1.7612, 2.7845, 1.3042, 1.5902, 1.9167, 2.2405], + device='cuda:1'), covar=tensor([0.0710, 0.0703, 0.0919, 0.0358, 0.1094, 0.1275, 0.0887, 0.0699], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0197, 0.0246, 0.0216, 0.0206, 0.0246, 0.0251, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:19:31,868 INFO [train.py:901] (1/4) Epoch 22, batch 6100, loss[loss=0.212, simple_loss=0.2853, pruned_loss=0.06936, over 6848.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06187, over 1614834.71 frames. ], batch size: 71, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:32,675 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:35,616 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:51,982 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:52,717 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:56,553 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 04:20:07,199 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.365e+02 2.974e+02 3.880e+02 6.577e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 04:20:07,219 INFO [train.py:901] (1/4) Epoch 22, batch 6150, loss[loss=0.2673, simple_loss=0.3391, pruned_loss=0.09778, over 8558.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06083, over 1614275.78 frames. ], batch size: 31, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:10,661 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:11,996 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:20,145 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2152, 2.0128, 2.6343, 2.1814, 2.5890, 2.2654, 2.0447, 1.3074], + device='cuda:1'), covar=tensor([0.5396, 0.4650, 0.1924, 0.3677, 0.2441, 0.2959, 0.1921, 0.5313], + device='cuda:1'), in_proj_covar=tensor([0.0943, 0.0981, 0.0806, 0.0946, 0.0997, 0.0896, 0.0748, 0.0826], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:20:40,362 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:41,485 INFO [train.py:901] (1/4) Epoch 22, batch 6200, loss[loss=0.226, simple_loss=0.3101, pruned_loss=0.07094, over 8448.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2856, pruned_loss=0.06017, over 1613323.27 frames. ], batch size: 27, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:52,906 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:56,910 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:58,222 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175965.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:21:06,753 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3639, 2.2854, 1.6656, 2.1065, 1.9719, 1.4056, 1.7911, 1.7978], + device='cuda:1'), covar=tensor([0.1630, 0.0440, 0.1403, 0.0645, 0.0747, 0.1777, 0.1112, 0.1049], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0233, 0.0335, 0.0310, 0.0299, 0.0340, 0.0345, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:21:15,650 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.329e+02 2.882e+02 3.634e+02 1.217e+03, threshold=5.765e+02, percent-clipped=6.0 +2023-02-07 04:21:15,671 INFO [train.py:901] (1/4) Epoch 22, batch 6250, loss[loss=0.2205, simple_loss=0.3064, pruned_loss=0.06729, over 8245.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06106, over 1617261.67 frames. ], batch size: 24, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:21:51,347 INFO [train.py:901] (1/4) Epoch 22, batch 6300, loss[loss=0.2121, simple_loss=0.2901, pruned_loss=0.06703, over 7792.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2858, pruned_loss=0.06064, over 1617176.25 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:12,730 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176072.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:26,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.297e+02 2.795e+02 3.577e+02 6.374e+02, threshold=5.590e+02, percent-clipped=1.0 +2023-02-07 04:22:26,706 INFO [train.py:901] (1/4) Epoch 22, batch 6350, loss[loss=0.1994, simple_loss=0.2864, pruned_loss=0.05622, over 7975.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2856, pruned_loss=0.06015, over 1615571.03 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:34,421 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,215 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,826 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:01,405 INFO [train.py:901] (1/4) Epoch 22, batch 6400, loss[loss=0.183, simple_loss=0.2846, pruned_loss=0.04073, over 8641.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2867, pruned_loss=0.06079, over 1616034.85 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:08,435 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:33,468 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:36,652 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.375e+02 2.869e+02 3.334e+02 7.002e+02, threshold=5.738e+02, percent-clipped=1.0 +2023-02-07 04:23:36,672 INFO [train.py:901] (1/4) Epoch 22, batch 6450, loss[loss=0.1735, simple_loss=0.2558, pruned_loss=0.04559, over 7804.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06054, over 1613449.13 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:52,615 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:54,745 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-07 04:24:05,442 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-07 04:24:09,913 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:11,687 INFO [train.py:901] (1/4) Epoch 22, batch 6500, loss[loss=0.2244, simple_loss=0.3066, pruned_loss=0.07108, over 8369.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2864, pruned_loss=0.0602, over 1619742.03 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:12,449 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:23,888 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:27,371 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2682, 2.1145, 1.7778, 1.9178, 1.6520, 1.4153, 1.6067, 1.6683], + device='cuda:1'), covar=tensor([0.1238, 0.0399, 0.1223, 0.0537, 0.0768, 0.1547, 0.0979, 0.0810], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0234, 0.0335, 0.0311, 0.0300, 0.0342, 0.0347, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:24:32,064 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5074, 2.4369, 1.7493, 2.2071, 1.9834, 1.4728, 1.8944, 2.0270], + device='cuda:1'), covar=tensor([0.1524, 0.0438, 0.1359, 0.0612, 0.0743, 0.1660, 0.1061, 0.1039], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0233, 0.0334, 0.0311, 0.0300, 0.0341, 0.0347, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:24:34,066 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:45,633 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.325e+02 2.725e+02 3.404e+02 5.159e+02, threshold=5.450e+02, percent-clipped=0.0 +2023-02-07 04:24:45,652 INFO [train.py:901] (1/4) Epoch 22, batch 6550, loss[loss=0.2385, simple_loss=0.2991, pruned_loss=0.08896, over 7973.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.286, pruned_loss=0.05967, over 1620382.40 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:55,357 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6592, 2.1737, 4.2404, 1.5490, 3.2394, 2.2615, 1.6955, 3.2008], + device='cuda:1'), covar=tensor([0.1799, 0.2618, 0.0765, 0.4281, 0.1545, 0.2937, 0.2308, 0.1906], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0608, 0.0558, 0.0646, 0.0650, 0.0596, 0.0540, 0.0635], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:24:57,260 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176307.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:04,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2909, 1.9867, 2.5498, 2.0728, 2.4402, 2.2609, 2.1388, 1.3203], + device='cuda:1'), covar=tensor([0.5233, 0.4818, 0.1959, 0.3734, 0.2609, 0.2985, 0.1798, 0.5366], + device='cuda:1'), in_proj_covar=tensor([0.0941, 0.0979, 0.0806, 0.0945, 0.0997, 0.0896, 0.0749, 0.0828], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:25:09,353 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 04:25:21,083 INFO [train.py:901] (1/4) Epoch 22, batch 6600, loss[loss=0.2032, simple_loss=0.2781, pruned_loss=0.06416, over 7788.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2868, pruned_loss=0.0604, over 1617903.44 frames. ], batch size: 19, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:29,274 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 04:25:32,749 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:55,381 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.510e+02 3.110e+02 4.060e+02 7.968e+02, threshold=6.221e+02, percent-clipped=4.0 +2023-02-07 04:25:55,402 INFO [train.py:901] (1/4) Epoch 22, batch 6650, loss[loss=0.2157, simple_loss=0.3057, pruned_loss=0.0629, over 8619.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2869, pruned_loss=0.06029, over 1619122.67 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:56,917 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6986, 2.2817, 1.8132, 4.0275, 1.6398, 1.5786, 2.2934, 2.7202], + device='cuda:1'), covar=tensor([0.1597, 0.1272, 0.1923, 0.0282, 0.1367, 0.1814, 0.1246, 0.0929], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0243, 0.0214, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:26:17,167 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:31,230 INFO [train.py:901] (1/4) Epoch 22, batch 6700, loss[loss=0.2127, simple_loss=0.2965, pruned_loss=0.06444, over 8441.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2864, pruned_loss=0.06009, over 1616460.94 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:32,111 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:49,612 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:00,364 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:05,580 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.672e+02 3.290e+02 4.002e+02 8.131e+02, threshold=6.579e+02, percent-clipped=6.0 +2023-02-07 04:27:05,600 INFO [train.py:901] (1/4) Epoch 22, batch 6750, loss[loss=0.1996, simple_loss=0.2802, pruned_loss=0.05951, over 8090.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.05984, over 1613863.38 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:41,484 INFO [train.py:901] (1/4) Epoch 22, batch 6800, loss[loss=0.2229, simple_loss=0.3006, pruned_loss=0.07263, over 8464.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2869, pruned_loss=0.06087, over 1613960.20 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:44,996 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 04:28:10,349 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.46 vs. limit=2.0 +2023-02-07 04:28:16,782 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.317e+02 3.026e+02 3.783e+02 8.757e+02, threshold=6.052e+02, percent-clipped=1.0 +2023-02-07 04:28:16,802 INFO [train.py:901] (1/4) Epoch 22, batch 6850, loss[loss=0.1798, simple_loss=0.2499, pruned_loss=0.05488, over 7549.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2867, pruned_loss=0.06082, over 1615211.67 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:24,732 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:31,628 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:34,697 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 04:28:34,752 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:48,422 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:50,268 INFO [train.py:901] (1/4) Epoch 22, batch 6900, loss[loss=0.2337, simple_loss=0.318, pruned_loss=0.07468, over 8458.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06133, over 1614267.74 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:51,045 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:17,275 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:20,584 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:26,754 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 2.439e+02 3.078e+02 3.806e+02 5.995e+02, threshold=6.157e+02, percent-clipped=0.0 +2023-02-07 04:29:26,774 INFO [train.py:901] (1/4) Epoch 22, batch 6950, loss[loss=0.1693, simple_loss=0.2513, pruned_loss=0.04359, over 8085.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.0607, over 1614177.50 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:29:35,488 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176703.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:44,270 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 04:29:46,552 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176719.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:48,145 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-07 04:29:56,849 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:30:02,057 INFO [train.py:901] (1/4) Epoch 22, batch 7000, loss[loss=0.1515, simple_loss=0.2345, pruned_loss=0.03419, over 7257.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06092, over 1609914.51 frames. ], batch size: 16, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:30:03,961 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 04:30:37,821 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.923e+02 3.703e+02 8.900e+02, threshold=5.847e+02, percent-clipped=5.0 +2023-02-07 04:30:37,842 INFO [train.py:901] (1/4) Epoch 22, batch 7050, loss[loss=0.211, simple_loss=0.2897, pruned_loss=0.06612, over 8727.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06065, over 1614336.27 frames. ], batch size: 30, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:03,290 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:11,273 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0831, 1.5785, 1.3998, 1.4950, 1.3312, 1.2096, 1.3041, 1.2694], + device='cuda:1'), covar=tensor([0.1228, 0.0473, 0.1326, 0.0608, 0.0835, 0.1628, 0.0964, 0.0864], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0233, 0.0337, 0.0312, 0.0300, 0.0344, 0.0347, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:31:12,380 INFO [train.py:901] (1/4) Epoch 22, batch 7100, loss[loss=0.1681, simple_loss=0.2529, pruned_loss=0.04162, over 7912.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06096, over 1615719.65 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:31,798 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:46,090 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.316e+02 2.836e+02 3.633e+02 7.093e+02, threshold=5.673e+02, percent-clipped=3.0 +2023-02-07 04:31:46,110 INFO [train.py:901] (1/4) Epoch 22, batch 7150, loss[loss=0.2845, simple_loss=0.3536, pruned_loss=0.1077, over 8458.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2871, pruned_loss=0.06121, over 1615008.57 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:51,637 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 04:32:22,264 INFO [train.py:901] (1/4) Epoch 22, batch 7200, loss[loss=0.2277, simple_loss=0.3255, pruned_loss=0.06495, over 8189.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06218, over 1613561.94 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:23,131 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:44,910 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:52,841 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:54,984 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:56,137 INFO [train.py:901] (1/4) Epoch 22, batch 7250, loss[loss=0.2676, simple_loss=0.3365, pruned_loss=0.09934, over 7267.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2889, pruned_loss=0.0622, over 1614413.16 frames. ], batch size: 71, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:56,787 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.385e+02 2.852e+02 3.441e+02 7.839e+02, threshold=5.703e+02, percent-clipped=2.0 +2023-02-07 04:33:02,404 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:10,522 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1849, 4.1175, 3.7868, 2.1372, 3.6567, 3.7961, 3.7087, 3.6371], + device='cuda:1'), covar=tensor([0.0747, 0.0577, 0.1054, 0.4497, 0.0933, 0.1085, 0.1383, 0.0893], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0438, 0.0432, 0.0542, 0.0427, 0.0448, 0.0428, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:33:14,104 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177015.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:15,422 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8713, 1.5305, 3.5626, 1.7058, 2.4999, 3.8536, 3.9244, 3.3470], + device='cuda:1'), covar=tensor([0.1199, 0.1742, 0.0249, 0.1833, 0.0947, 0.0199, 0.0437, 0.0497], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0319, 0.0284, 0.0313, 0.0309, 0.0265, 0.0418, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:33:21,899 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177027.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:31,982 INFO [train.py:901] (1/4) Epoch 22, batch 7300, loss[loss=0.1879, simple_loss=0.2666, pruned_loss=0.05456, over 7520.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06194, over 1611207.15 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:06,489 INFO [train.py:901] (1/4) Epoch 22, batch 7350, loss[loss=0.2257, simple_loss=0.2995, pruned_loss=0.07592, over 7922.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06226, over 1605444.44 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:07,156 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.532e+02 3.310e+02 4.342e+02 9.656e+02, threshold=6.621e+02, percent-clipped=7.0 +2023-02-07 04:34:13,468 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:26,059 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 04:34:31,743 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:33,081 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:42,503 INFO [train.py:901] (1/4) Epoch 22, batch 7400, loss[loss=0.1921, simple_loss=0.2894, pruned_loss=0.04745, over 8471.00 frames. ], tot_loss[loss=0.206, simple_loss=0.288, pruned_loss=0.06206, over 1609140.00 frames. ], batch size: 25, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:42,680 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:42,692 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7337, 1.9338, 1.6882, 2.3682, 1.0882, 1.5172, 1.7681, 1.8850], + device='cuda:1'), covar=tensor([0.0832, 0.0721, 0.0923, 0.0412, 0.1036, 0.1215, 0.0709, 0.0769], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0243, 0.0212, 0.0206, 0.0245, 0.0248, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:34:47,998 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 04:35:09,031 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1541, 3.0428, 2.8813, 1.6230, 2.7654, 2.8429, 2.8017, 2.7980], + device='cuda:1'), covar=tensor([0.1237, 0.0984, 0.1270, 0.4813, 0.1218, 0.1581, 0.1714, 0.1081], + device='cuda:1'), in_proj_covar=tensor([0.0530, 0.0440, 0.0433, 0.0542, 0.0428, 0.0449, 0.0429, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:35:16,518 INFO [train.py:901] (1/4) Epoch 22, batch 7450, loss[loss=0.1762, simple_loss=0.2582, pruned_loss=0.04705, over 7810.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.0622, over 1608146.86 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:17,193 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.327e+02 2.972e+02 3.761e+02 7.589e+02, threshold=5.944e+02, percent-clipped=3.0 +2023-02-07 04:35:21,631 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:27,661 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 04:35:32,256 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:38,457 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:51,509 INFO [train.py:901] (1/4) Epoch 22, batch 7500, loss[loss=0.2006, simple_loss=0.2964, pruned_loss=0.05243, over 8779.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.288, pruned_loss=0.06208, over 1613385.34 frames. ], batch size: 50, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:58,950 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8667, 1.7164, 1.8780, 1.8024, 0.9798, 1.5662, 2.2093, 2.2216], + device='cuda:1'), covar=tensor([0.0453, 0.1222, 0.1674, 0.1326, 0.0626, 0.1500, 0.0627, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:36:23,738 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-07 04:36:25,359 INFO [train.py:901] (1/4) Epoch 22, batch 7550, loss[loss=0.1804, simple_loss=0.2793, pruned_loss=0.04076, over 8475.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06155, over 1611350.36 frames. ], batch size: 25, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:26,049 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.508e+02 3.019e+02 3.781e+02 7.904e+02, threshold=6.039e+02, percent-clipped=4.0 +2023-02-07 04:36:43,676 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4128, 1.2496, 2.1328, 1.1742, 1.9976, 2.2940, 2.4000, 1.9606], + device='cuda:1'), covar=tensor([0.0972, 0.1342, 0.0487, 0.1900, 0.0904, 0.0340, 0.0648, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0321, 0.0285, 0.0314, 0.0309, 0.0266, 0.0420, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:36:51,646 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:36:59,624 INFO [train.py:901] (1/4) Epoch 22, batch 7600, loss[loss=0.1595, simple_loss=0.2426, pruned_loss=0.03822, over 7921.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.288, pruned_loss=0.06173, over 1615051.94 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:11,460 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:29,851 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:35,779 INFO [train.py:901] (1/4) Epoch 22, batch 7650, loss[loss=0.2059, simple_loss=0.2877, pruned_loss=0.06206, over 8498.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06141, over 1615188.02 frames. ], batch size: 28, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:36,440 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.559e+02 3.074e+02 4.315e+02 1.263e+03, threshold=6.148e+02, percent-clipped=10.0 +2023-02-07 04:37:39,975 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:57,390 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177423.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:00,823 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6679, 2.6917, 1.9387, 2.3711, 2.4001, 1.6648, 2.2345, 2.2903], + device='cuda:1'), covar=tensor([0.1603, 0.0408, 0.1199, 0.0730, 0.0760, 0.1565, 0.1094, 0.1132], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0232, 0.0335, 0.0310, 0.0299, 0.0341, 0.0345, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:38:09,936 INFO [train.py:901] (1/4) Epoch 22, batch 7700, loss[loss=0.1884, simple_loss=0.2601, pruned_loss=0.05836, over 7708.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2863, pruned_loss=0.06109, over 1612231.08 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:30,444 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:31,729 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177473.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:38,598 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 04:38:41,834 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 04:38:45,982 INFO [train.py:901] (1/4) Epoch 22, batch 7750, loss[loss=0.2124, simple_loss=0.3039, pruned_loss=0.06049, over 8257.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2858, pruned_loss=0.06074, over 1609360.91 frames. ], batch size: 24, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:46,657 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.486e+02 3.125e+02 4.090e+02 1.041e+03, threshold=6.251e+02, percent-clipped=8.0 +2023-02-07 04:39:10,513 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6198, 1.8575, 1.9909, 1.1982, 2.1004, 1.3536, 0.6389, 1.7583], + device='cuda:1'), covar=tensor([0.0598, 0.0387, 0.0319, 0.0708, 0.0370, 0.1072, 0.0901, 0.0334], + device='cuda:1'), in_proj_covar=tensor([0.0452, 0.0392, 0.0344, 0.0446, 0.0378, 0.0534, 0.0390, 0.0422], + device='cuda:1'), out_proj_covar=tensor([1.2108e-04, 1.0291e-04, 9.0359e-05, 1.1744e-04, 9.9402e-05, 1.5060e-04, + 1.0499e-04, 1.1163e-04], device='cuda:1') +2023-02-07 04:39:20,411 INFO [train.py:901] (1/4) Epoch 22, batch 7800, loss[loss=0.1671, simple_loss=0.2508, pruned_loss=0.04171, over 7777.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06116, over 1615154.50 frames. ], batch size: 19, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:39,773 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177571.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,858 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,892 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:50,725 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-07 04:39:51,145 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:53,646 INFO [train.py:901] (1/4) Epoch 22, batch 7850, loss[loss=0.1765, simple_loss=0.269, pruned_loss=0.04195, over 8031.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2859, pruned_loss=0.06066, over 1614607.84 frames. ], batch size: 22, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:54,299 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.387e+02 2.753e+02 3.373e+02 6.542e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 04:40:06,555 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:40:26,677 INFO [train.py:901] (1/4) Epoch 22, batch 7900, loss[loss=0.2267, simple_loss=0.3136, pruned_loss=0.06993, over 8291.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2858, pruned_loss=0.06074, over 1613458.40 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:40:53,550 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:41:00,029 INFO [train.py:901] (1/4) Epoch 22, batch 7950, loss[loss=0.1824, simple_loss=0.2665, pruned_loss=0.04911, over 7916.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06085, over 1616675.64 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:00,683 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.424e+02 2.966e+02 3.766e+02 9.319e+02, threshold=5.931e+02, percent-clipped=7.0 +2023-02-07 04:41:06,666 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7098, 5.7128, 5.1252, 2.7887, 5.1709, 5.4708, 5.2796, 5.2051], + device='cuda:1'), covar=tensor([0.0457, 0.0408, 0.0774, 0.3806, 0.0675, 0.0714, 0.1031, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0438, 0.0433, 0.0538, 0.0423, 0.0445, 0.0424, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:41:22,702 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8320, 1.4351, 1.6520, 1.3025, 0.8279, 1.4501, 1.5206, 1.5930], + device='cuda:1'), covar=tensor([0.0516, 0.1277, 0.1673, 0.1454, 0.0634, 0.1515, 0.0707, 0.0646], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0163, 0.0112, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:41:33,730 INFO [train.py:901] (1/4) Epoch 22, batch 8000, loss[loss=0.212, simple_loss=0.3067, pruned_loss=0.05866, over 8297.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2855, pruned_loss=0.06, over 1614682.40 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:42:06,690 INFO [train.py:901] (1/4) Epoch 22, batch 8050, loss[loss=0.1673, simple_loss=0.2503, pruned_loss=0.04211, over 7254.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2848, pruned_loss=0.06047, over 1600587.20 frames. ], batch size: 16, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:42:07,275 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.308e+02 2.923e+02 3.618e+02 1.070e+03, threshold=5.846e+02, percent-clipped=4.0 +2023-02-07 04:42:10,789 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:22,383 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:39,874 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 04:42:44,815 INFO [train.py:901] (1/4) Epoch 23, batch 0, loss[loss=0.2238, simple_loss=0.3032, pruned_loss=0.07221, over 8598.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3032, pruned_loss=0.07221, over 8598.00 frames. ], batch size: 31, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:42:44,816 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 04:42:56,157 INFO [train.py:935] (1/4) Epoch 23, validation: loss=0.1743, simple_loss=0.274, pruned_loss=0.0373, over 944034.00 frames. +2023-02-07 04:42:56,159 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 04:43:08,341 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:10,543 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177844.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:12,395 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 04:43:26,740 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:28,092 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:32,004 INFO [train.py:901] (1/4) Epoch 23, batch 50, loss[loss=0.2369, simple_loss=0.3145, pruned_loss=0.07965, over 8595.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2922, pruned_loss=0.06472, over 370800.27 frames. ], batch size: 39, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:43:42,573 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0451, 2.3649, 3.7032, 1.7839, 3.0542, 2.3080, 2.1510, 2.7324], + device='cuda:1'), covar=tensor([0.1600, 0.2106, 0.0834, 0.3812, 0.1499, 0.2890, 0.1834, 0.2342], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0648, 0.0648, 0.0594, 0.0538, 0.0633], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:43:45,276 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.650e+02 3.149e+02 3.939e+02 1.519e+03, threshold=6.298e+02, percent-clipped=14.0 +2023-02-07 04:43:46,682 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 04:44:01,101 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177915.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:44:02,080 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:44:07,959 INFO [train.py:901] (1/4) Epoch 23, batch 100, loss[loss=0.1799, simple_loss=0.2606, pruned_loss=0.04955, over 7793.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2901, pruned_loss=0.06266, over 646135.15 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:09,367 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 04:44:15,321 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0032, 2.3470, 1.9178, 2.9651, 1.4451, 1.6501, 2.1890, 2.2929], + device='cuda:1'), covar=tensor([0.0766, 0.0734, 0.0904, 0.0357, 0.1178, 0.1371, 0.0855, 0.0803], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:44:42,233 INFO [train.py:901] (1/4) Epoch 23, batch 150, loss[loss=0.1831, simple_loss=0.2789, pruned_loss=0.04364, over 8295.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06187, over 862789.72 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:49,532 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1896, 1.4341, 4.4015, 1.8497, 2.2191, 4.9235, 5.0549, 4.3051], + device='cuda:1'), covar=tensor([0.1246, 0.2138, 0.0267, 0.2219, 0.1532, 0.0176, 0.0351, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0323, 0.0287, 0.0318, 0.0313, 0.0269, 0.0424, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:44:54,933 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.352e+02 3.015e+02 3.767e+02 5.945e+02, threshold=6.031e+02, percent-clipped=0.0 +2023-02-07 04:44:55,345 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 04:45:18,306 INFO [train.py:901] (1/4) Epoch 23, batch 200, loss[loss=0.1879, simple_loss=0.2673, pruned_loss=0.05425, over 8238.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2889, pruned_loss=0.06253, over 1029136.88 frames. ], batch size: 22, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:45:19,114 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:21,851 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:53,024 INFO [train.py:901] (1/4) Epoch 23, batch 250, loss[loss=0.2734, simple_loss=0.3434, pruned_loss=0.1018, over 8200.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2875, pruned_loss=0.06179, over 1159292.44 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:04,759 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 04:46:06,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.380e+02 2.804e+02 3.484e+02 6.736e+02, threshold=5.609e+02, percent-clipped=2.0 +2023-02-07 04:46:12,814 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 04:46:28,460 INFO [train.py:901] (1/4) Epoch 23, batch 300, loss[loss=0.1731, simple_loss=0.2647, pruned_loss=0.04077, over 8106.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.06209, over 1261928.42 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:40,068 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:40,620 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:45,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1224, 3.6074, 2.3452, 2.6207, 2.6870, 1.9069, 2.6600, 2.9092], + device='cuda:1'), covar=tensor([0.1492, 0.0366, 0.1069, 0.0826, 0.0810, 0.1498, 0.1036, 0.0980], + device='cuda:1'), in_proj_covar=tensor([0.0352, 0.0231, 0.0332, 0.0308, 0.0297, 0.0337, 0.0342, 0.0313], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 04:46:52,875 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:53,826 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 04:46:54,583 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 04:46:59,072 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1362, 1.6863, 4.1329, 1.7896, 2.3931, 4.7180, 4.8299, 4.1093], + device='cuda:1'), covar=tensor([0.1307, 0.1890, 0.0303, 0.2150, 0.1335, 0.0196, 0.0397, 0.0546], + device='cuda:1'), in_proj_covar=tensor([0.0299, 0.0322, 0.0288, 0.0316, 0.0312, 0.0267, 0.0423, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:47:03,747 INFO [train.py:901] (1/4) Epoch 23, batch 350, loss[loss=0.2214, simple_loss=0.3017, pruned_loss=0.07051, over 8604.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06184, over 1340958.62 frames. ], batch size: 31, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:16,039 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.905e+02 3.451e+02 8.072e+02, threshold=5.809e+02, percent-clipped=5.0 +2023-02-07 04:47:23,332 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2700, 1.8703, 4.4463, 2.1711, 2.5901, 5.1145, 5.1284, 4.4719], + device='cuda:1'), covar=tensor([0.1185, 0.1641, 0.0253, 0.1750, 0.1100, 0.0173, 0.0428, 0.0522], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0320, 0.0286, 0.0315, 0.0311, 0.0266, 0.0421, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:47:38,679 INFO [train.py:901] (1/4) Epoch 23, batch 400, loss[loss=0.2095, simple_loss=0.3003, pruned_loss=0.0593, over 8034.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06191, over 1399593.31 frames. ], batch size: 22, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:43,337 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 04:47:51,120 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5305, 1.8296, 2.6912, 1.4601, 1.9569, 1.8570, 1.6801, 1.9567], + device='cuda:1'), covar=tensor([0.1959, 0.2669, 0.0987, 0.4606, 0.1887, 0.3385, 0.2304, 0.2286], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0645, 0.0647, 0.0592, 0.0537, 0.0633], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:48:02,280 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,486 INFO [train.py:901] (1/4) Epoch 23, batch 450, loss[loss=0.2072, simple_loss=0.2952, pruned_loss=0.05959, over 8087.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2876, pruned_loss=0.0617, over 1448100.35 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:15,622 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8788, 6.0130, 5.2800, 2.8368, 5.3416, 5.6900, 5.5260, 5.4228], + device='cuda:1'), covar=tensor([0.0560, 0.0355, 0.0810, 0.3937, 0.0687, 0.0879, 0.1007, 0.0552], + device='cuda:1'), in_proj_covar=tensor([0.0530, 0.0439, 0.0434, 0.0542, 0.0426, 0.0446, 0.0427, 0.0390], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:48:17,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0767, 2.4110, 1.9460, 2.9499, 1.4099, 1.7356, 2.1944, 2.3104], + device='cuda:1'), covar=tensor([0.0716, 0.0623, 0.0832, 0.0323, 0.1033, 0.1200, 0.0766, 0.0686], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0215, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:48:23,156 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:27,622 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.308e+02 2.812e+02 3.532e+02 1.107e+03, threshold=5.624e+02, percent-clipped=2.0 +2023-02-07 04:48:40,238 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:50,181 INFO [train.py:901] (1/4) Epoch 23, batch 500, loss[loss=0.2306, simple_loss=0.3064, pruned_loss=0.07739, over 8726.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.0615, over 1483487.48 frames. ], batch size: 30, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:25,960 INFO [train.py:901] (1/4) Epoch 23, batch 550, loss[loss=0.1968, simple_loss=0.2788, pruned_loss=0.05741, over 8330.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2875, pruned_loss=0.06227, over 1508719.78 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:39,372 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.448e+02 3.105e+02 3.761e+02 9.562e+02, threshold=6.211e+02, percent-clipped=5.0 +2023-02-07 04:49:42,440 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:49:59,303 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:01,203 INFO [train.py:901] (1/4) Epoch 23, batch 600, loss[loss=0.2433, simple_loss=0.3294, pruned_loss=0.0786, over 8329.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06161, over 1536610.59 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:14,817 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 04:50:33,525 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178470.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:36,791 INFO [train.py:901] (1/4) Epoch 23, batch 650, loss[loss=0.2552, simple_loss=0.3367, pruned_loss=0.08691, over 8489.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.06206, over 1552348.11 frames. ], batch size: 29, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:49,793 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.230e+02 2.701e+02 3.368e+02 8.641e+02, threshold=5.402e+02, percent-clipped=2.0 +2023-02-07 04:51:04,347 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:12,432 INFO [train.py:901] (1/4) Epoch 23, batch 700, loss[loss=0.1746, simple_loss=0.257, pruned_loss=0.04604, over 7664.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2891, pruned_loss=0.06272, over 1564279.32 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:16,067 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:18,198 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0631, 1.7874, 2.3033, 1.9844, 2.2421, 2.0988, 1.9034, 1.1783], + device='cuda:1'), covar=tensor([0.5909, 0.4871, 0.2078, 0.3828, 0.2582, 0.3189, 0.2035, 0.5339], + device='cuda:1'), in_proj_covar=tensor([0.0941, 0.0986, 0.0813, 0.0952, 0.0997, 0.0899, 0.0754, 0.0831], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:51:21,533 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:33,980 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178555.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:47,514 INFO [train.py:901] (1/4) Epoch 23, batch 750, loss[loss=0.1982, simple_loss=0.2681, pruned_loss=0.06414, over 7675.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2881, pruned_loss=0.06213, over 1575280.86 frames. ], batch size: 18, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:49,820 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7409, 1.6321, 2.4588, 1.5446, 1.3045, 2.3888, 0.5161, 1.5001], + device='cuda:1'), covar=tensor([0.1849, 0.1424, 0.0322, 0.1326, 0.2648, 0.0449, 0.2083, 0.1432], + device='cuda:1'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0221, 0.0270, 0.0137, 0.0171, 0.0196], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 04:51:59,483 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5935, 1.8338, 2.6983, 1.4566, 1.9469, 1.9528, 1.6407, 2.0119], + device='cuda:1'), covar=tensor([0.1858, 0.2477, 0.0821, 0.4332, 0.1806, 0.3092, 0.2281, 0.2084], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0606, 0.0555, 0.0645, 0.0649, 0.0594, 0.0536, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:52:00,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.529e+02 2.988e+02 3.531e+02 9.866e+02, threshold=5.976e+02, percent-clipped=5.0 +2023-02-07 04:52:03,329 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 04:52:12,892 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 04:52:24,029 INFO [train.py:901] (1/4) Epoch 23, batch 800, loss[loss=0.187, simple_loss=0.2779, pruned_loss=0.04804, over 8287.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2886, pruned_loss=0.06212, over 1588827.14 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:52:32,115 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178637.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:52:38,309 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2398, 2.2630, 2.0524, 2.6398, 1.8978, 1.9926, 2.1832, 2.3677], + device='cuda:1'), covar=tensor([0.0582, 0.0656, 0.0713, 0.0456, 0.0828, 0.0952, 0.0653, 0.0602], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0214, 0.0206, 0.0245, 0.0249, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 04:52:57,754 INFO [train.py:901] (1/4) Epoch 23, batch 850, loss[loss=0.191, simple_loss=0.2926, pruned_loss=0.04476, over 8335.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06227, over 1594393.55 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:10,567 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.561e+02 2.992e+02 3.918e+02 1.040e+03, threshold=5.984e+02, percent-clipped=6.0 +2023-02-07 04:53:24,466 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178712.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:53:26,475 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178715.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:53:31,368 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2295, 3.1366, 2.9200, 1.4521, 2.8193, 2.9492, 2.8088, 2.8833], + device='cuda:1'), covar=tensor([0.1065, 0.0756, 0.1153, 0.5010, 0.1074, 0.1133, 0.1571, 0.0913], + device='cuda:1'), in_proj_covar=tensor([0.0535, 0.0440, 0.0435, 0.0544, 0.0426, 0.0448, 0.0431, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:53:34,044 INFO [train.py:901] (1/4) Epoch 23, batch 900, loss[loss=0.2343, simple_loss=0.3145, pruned_loss=0.07704, over 8451.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2874, pruned_loss=0.06189, over 1596261.64 frames. ], batch size: 50, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:55,223 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-07 04:54:03,494 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8678, 1.4861, 3.1683, 1.3946, 2.2564, 3.4260, 3.5830, 2.9186], + device='cuda:1'), covar=tensor([0.1184, 0.1784, 0.0358, 0.2195, 0.0987, 0.0250, 0.0533, 0.0560], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0321, 0.0286, 0.0315, 0.0310, 0.0267, 0.0422, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 04:54:09,420 INFO [train.py:901] (1/4) Epoch 23, batch 950, loss[loss=0.2123, simple_loss=0.3048, pruned_loss=0.05994, over 8106.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06187, over 1606764.36 frames. ], batch size: 23, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:54:18,540 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:21,847 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.330e+02 2.907e+02 3.544e+02 9.473e+02, threshold=5.814e+02, percent-clipped=4.0 +2023-02-07 04:54:22,772 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0217, 1.2254, 1.2048, 0.5546, 1.2342, 1.0227, 0.0758, 1.1495], + device='cuda:1'), covar=tensor([0.0492, 0.0428, 0.0389, 0.0672, 0.0432, 0.1117, 0.0866, 0.0368], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0395, 0.0347, 0.0448, 0.0380, 0.0536, 0.0393, 0.0425], + device='cuda:1'), out_proj_covar=tensor([1.2096e-04, 1.0363e-04, 9.1189e-05, 1.1773e-04, 9.9773e-05, 1.5107e-04, + 1.0597e-04, 1.1251e-04], device='cuda:1') +2023-02-07 04:54:35,788 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 04:54:37,113 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:43,576 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1823, 1.5775, 1.8350, 1.4387, 0.9969, 1.5373, 1.9662, 1.7773], + device='cuda:1'), covar=tensor([0.0517, 0.1278, 0.1640, 0.1447, 0.0621, 0.1485, 0.0632, 0.0635], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:54:45,361 INFO [train.py:901] (1/4) Epoch 23, batch 1000, loss[loss=0.228, simple_loss=0.3038, pruned_loss=0.07605, over 8500.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2878, pruned_loss=0.0619, over 1610919.94 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:54:49,035 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9317, 1.8040, 2.0337, 1.7557, 1.0100, 1.6567, 2.3437, 2.2029], + device='cuda:1'), covar=tensor([0.0448, 0.1156, 0.1595, 0.1350, 0.0581, 0.1393, 0.0562, 0.0575], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:55:12,368 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 04:55:21,365 INFO [train.py:901] (1/4) Epoch 23, batch 1050, loss[loss=0.1759, simple_loss=0.266, pruned_loss=0.04292, over 8483.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.06149, over 1614048.28 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:25,394 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 04:55:33,395 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.332e+02 2.695e+02 3.454e+02 6.847e+02, threshold=5.390e+02, percent-clipped=5.0 +2023-02-07 04:55:46,652 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178912.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:55:50,005 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5292, 5.5799, 4.9890, 2.6199, 4.9901, 5.2538, 5.1941, 5.0767], + device='cuda:1'), covar=tensor([0.0574, 0.0363, 0.0874, 0.4271, 0.0726, 0.0779, 0.0999, 0.0622], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0440, 0.0435, 0.0543, 0.0427, 0.0448, 0.0431, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:55:56,201 INFO [train.py:901] (1/4) Epoch 23, batch 1100, loss[loss=0.2226, simple_loss=0.2953, pruned_loss=0.07497, over 8506.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.0617, over 1615756.59 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:55:59,138 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:01,239 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2311, 1.2492, 3.3665, 1.0344, 2.9753, 2.8161, 3.0878, 2.9844], + device='cuda:1'), covar=tensor([0.0869, 0.4485, 0.0857, 0.4433, 0.1512, 0.1229, 0.0822, 0.1008], + device='cuda:1'), in_proj_covar=tensor([0.0634, 0.0644, 0.0700, 0.0630, 0.0711, 0.0604, 0.0603, 0.0683], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:56:32,124 INFO [train.py:901] (1/4) Epoch 23, batch 1150, loss[loss=0.184, simple_loss=0.268, pruned_loss=0.05004, over 7938.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2883, pruned_loss=0.06194, over 1617015.51 frames. ], batch size: 20, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:56:36,260 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 04:56:36,327 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:45,240 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.628e+02 3.162e+02 4.177e+02 1.087e+03, threshold=6.324e+02, percent-clipped=6.0 +2023-02-07 04:57:07,132 INFO [train.py:901] (1/4) Epoch 23, batch 1200, loss[loss=0.2559, simple_loss=0.322, pruned_loss=0.09491, over 7046.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06216, over 1613128.58 frames. ], batch size: 72, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:13,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3603, 2.1229, 2.7998, 2.2333, 2.6609, 2.3656, 2.1850, 1.4969], + device='cuda:1'), covar=tensor([0.5257, 0.4782, 0.1994, 0.3543, 0.2509, 0.2926, 0.1832, 0.5095], + device='cuda:1'), in_proj_covar=tensor([0.0935, 0.0981, 0.0807, 0.0947, 0.0992, 0.0897, 0.0751, 0.0826], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 04:57:19,757 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6175, 1.3863, 1.6701, 1.2940, 0.8603, 1.4834, 1.4738, 1.5378], + device='cuda:1'), covar=tensor([0.0583, 0.1290, 0.1640, 0.1462, 0.0598, 0.1475, 0.0693, 0.0595], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:57:29,070 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:57:31,018 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179059.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:57:42,798 INFO [train.py:901] (1/4) Epoch 23, batch 1250, loss[loss=0.1919, simple_loss=0.2776, pruned_loss=0.05311, over 8327.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06162, over 1615556.74 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:55,995 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.289e+02 2.896e+02 3.686e+02 5.954e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 04:57:58,269 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:19,004 INFO [train.py:901] (1/4) Epoch 23, batch 1300, loss[loss=0.1894, simple_loss=0.2786, pruned_loss=0.05009, over 8104.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.06044, over 1616308.76 frames. ], batch size: 23, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:58:24,098 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:51,891 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179171.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:53,994 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179174.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:58:54,483 INFO [train.py:901] (1/4) Epoch 23, batch 1350, loss[loss=0.1795, simple_loss=0.262, pruned_loss=0.04851, over 7528.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2867, pruned_loss=0.06011, over 1619433.24 frames. ], batch size: 18, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:01,704 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179185.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:07,800 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.184e+02 2.635e+02 3.098e+02 5.270e+02, threshold=5.271e+02, percent-clipped=0.0 +2023-02-07 04:59:20,397 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179210.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:30,634 INFO [train.py:901] (1/4) Epoch 23, batch 1400, loss[loss=0.2008, simple_loss=0.2942, pruned_loss=0.05373, over 8351.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2874, pruned_loss=0.06019, over 1618578.41 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:41,531 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 04:59:42,859 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6563, 2.2874, 4.1220, 1.5036, 3.0259, 2.3260, 1.7609, 2.8500], + device='cuda:1'), covar=tensor([0.1973, 0.2703, 0.0861, 0.4642, 0.1822, 0.3199, 0.2364, 0.2401], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0607, 0.0555, 0.0647, 0.0651, 0.0594, 0.0537, 0.0633], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:59:44,216 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9931, 1.4380, 1.7508, 1.3450, 1.0588, 1.5056, 1.7415, 1.7584], + device='cuda:1'), covar=tensor([0.0584, 0.1325, 0.1665, 0.1498, 0.0630, 0.1479, 0.0722, 0.0625], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0158, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 04:59:44,934 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5365, 1.6098, 4.7228, 1.7072, 4.1642, 3.9327, 4.2913, 4.1459], + device='cuda:1'), covar=tensor([0.0581, 0.4625, 0.0442, 0.3997, 0.1081, 0.0907, 0.0550, 0.0647], + device='cuda:1'), in_proj_covar=tensor([0.0630, 0.0638, 0.0697, 0.0629, 0.0707, 0.0604, 0.0601, 0.0677], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 04:59:47,170 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:52,290 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 04:59:53,422 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179256.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:00:06,546 INFO [train.py:901] (1/4) Epoch 23, batch 1450, loss[loss=0.2006, simple_loss=0.2785, pruned_loss=0.06138, over 7656.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2876, pruned_loss=0.06032, over 1619464.06 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:16,912 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 05:00:18,543 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6917, 1.4168, 1.6144, 1.3578, 1.0034, 1.4317, 1.6207, 1.6817], + device='cuda:1'), covar=tensor([0.0557, 0.1286, 0.1605, 0.1387, 0.0607, 0.1477, 0.0686, 0.0573], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0158, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 05:00:19,763 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.293e+02 2.971e+02 3.774e+02 8.745e+02, threshold=5.941e+02, percent-clipped=9.0 +2023-02-07 05:00:43,625 INFO [train.py:901] (1/4) Epoch 23, batch 1500, loss[loss=0.2516, simple_loss=0.3268, pruned_loss=0.0882, over 8371.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.289, pruned_loss=0.06115, over 1621427.01 frames. ], batch size: 49, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:03,401 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:16,378 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179371.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:01:18,843 INFO [train.py:901] (1/4) Epoch 23, batch 1550, loss[loss=0.1922, simple_loss=0.2884, pruned_loss=0.04798, over 8359.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2896, pruned_loss=0.06166, over 1622583.03 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:20,471 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:21,725 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:23,199 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3106, 1.4575, 1.3174, 1.7702, 0.6570, 1.1753, 1.2691, 1.4411], + device='cuda:1'), covar=tensor([0.0933, 0.0804, 0.0940, 0.0521, 0.1186, 0.1401, 0.0826, 0.0793], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0198, 0.0244, 0.0214, 0.0206, 0.0247, 0.0250, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:01:28,617 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7274, 5.8222, 5.1640, 2.5830, 5.2087, 5.4866, 5.3837, 5.3330], + device='cuda:1'), covar=tensor([0.0566, 0.0371, 0.0826, 0.4265, 0.0735, 0.0924, 0.1105, 0.0573], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0439, 0.0434, 0.0543, 0.0428, 0.0449, 0.0431, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:01:31,097 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.349e+02 2.958e+02 3.969e+02 7.808e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-07 05:01:54,009 INFO [train.py:901] (1/4) Epoch 23, batch 1600, loss[loss=0.2036, simple_loss=0.2898, pruned_loss=0.05867, over 8463.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2887, pruned_loss=0.0616, over 1623620.30 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:56,465 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179427.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:58,507 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179430.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:14,218 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 05:02:14,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:02:16,577 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179455.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:21,379 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179462.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:31,191 INFO [train.py:901] (1/4) Epoch 23, batch 1650, loss[loss=0.219, simple_loss=0.3075, pruned_loss=0.06519, over 8105.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2882, pruned_loss=0.0609, over 1625525.02 frames. ], batch size: 23, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:02:43,578 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.367e+02 2.783e+02 3.381e+02 8.055e+02, threshold=5.566e+02, percent-clipped=4.0 +2023-02-07 05:02:50,700 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:06,279 INFO [train.py:901] (1/4) Epoch 23, batch 1700, loss[loss=0.1986, simple_loss=0.2801, pruned_loss=0.05859, over 8087.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2882, pruned_loss=0.06113, over 1625850.14 frames. ], batch size: 21, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:03:08,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:42,324 INFO [train.py:901] (1/4) Epoch 23, batch 1750, loss[loss=0.1942, simple_loss=0.2902, pruned_loss=0.04907, over 8752.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2886, pruned_loss=0.06153, over 1620066.43 frames. ], batch size: 30, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:03:48,660 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7642, 1.6283, 2.2812, 1.5983, 1.2946, 2.2589, 0.8263, 1.5805], + device='cuda:1'), covar=tensor([0.1737, 0.1158, 0.0360, 0.1028, 0.2473, 0.0420, 0.1693, 0.1208], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0198, 0.0129, 0.0219, 0.0267, 0.0136, 0.0169, 0.0192], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 05:03:56,238 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.481e+02 2.857e+02 3.517e+02 8.396e+02, threshold=5.713e+02, percent-clipped=3.0 +2023-02-07 05:04:17,975 INFO [train.py:901] (1/4) Epoch 23, batch 1800, loss[loss=0.2588, simple_loss=0.33, pruned_loss=0.09381, over 8491.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2874, pruned_loss=0.06108, over 1619844.97 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:04:19,569 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179627.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:04:37,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179652.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:04:38,540 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179654.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:04:54,468 INFO [train.py:901] (1/4) Epoch 23, batch 1850, loss[loss=0.195, simple_loss=0.2827, pruned_loss=0.05359, over 8586.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06085, over 1617832.92 frames. ], batch size: 31, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:04:58,795 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5322, 1.9735, 2.9397, 1.3806, 2.2723, 1.8263, 1.7432, 2.1870], + device='cuda:1'), covar=tensor([0.1885, 0.2519, 0.1043, 0.4412, 0.1881, 0.3308, 0.2241, 0.2408], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0610, 0.0558, 0.0648, 0.0653, 0.0596, 0.0540, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:05:07,479 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.311e+02 2.831e+02 3.615e+02 8.108e+02, threshold=5.663e+02, percent-clipped=6.0 +2023-02-07 05:05:12,872 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 05:05:28,483 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:05:29,798 INFO [train.py:901] (1/4) Epoch 23, batch 1900, loss[loss=0.1898, simple_loss=0.2715, pruned_loss=0.05408, over 8245.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.287, pruned_loss=0.06145, over 1618524.91 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:41,138 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 05:05:59,886 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 05:06:05,579 INFO [train.py:901] (1/4) Epoch 23, batch 1950, loss[loss=0.2272, simple_loss=0.3087, pruned_loss=0.07282, over 8294.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.287, pruned_loss=0.06185, over 1617328.20 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:12,661 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 05:06:19,490 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.457e+02 2.986e+02 3.643e+02 8.972e+02, threshold=5.972e+02, percent-clipped=4.0 +2023-02-07 05:06:22,396 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7371, 4.7654, 4.2850, 2.1143, 4.2275, 4.3469, 4.3019, 4.2614], + device='cuda:1'), covar=tensor([0.0651, 0.0474, 0.1028, 0.4331, 0.0847, 0.0743, 0.1139, 0.0601], + device='cuda:1'), in_proj_covar=tensor([0.0534, 0.0441, 0.0437, 0.0545, 0.0431, 0.0453, 0.0434, 0.0391], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:06:28,061 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179806.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:06:31,279 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 05:06:34,176 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:06:41,618 INFO [train.py:901] (1/4) Epoch 23, batch 2000, loss[loss=0.191, simple_loss=0.2728, pruned_loss=0.0546, over 8334.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2861, pruned_loss=0.06138, over 1615059.58 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:43,151 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1720, 1.4653, 1.8351, 1.4877, 0.9301, 1.5538, 1.8383, 1.6951], + device='cuda:1'), covar=tensor([0.0484, 0.1274, 0.1584, 0.1365, 0.0579, 0.1380, 0.0636, 0.0612], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0112, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 05:06:50,599 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179838.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:07:15,518 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 05:07:16,430 INFO [train.py:901] (1/4) Epoch 23, batch 2050, loss[loss=0.246, simple_loss=0.3202, pruned_loss=0.08592, over 6823.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2869, pruned_loss=0.06185, over 1614580.37 frames. ], batch size: 71, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:07:30,035 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.444e+02 2.856e+02 3.794e+02 1.051e+03, threshold=5.713e+02, percent-clipped=7.0 +2023-02-07 05:07:40,337 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 05:07:49,563 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179921.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:51,623 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179924.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:07:52,156 INFO [train.py:901] (1/4) Epoch 23, batch 2100, loss[loss=0.1857, simple_loss=0.2768, pruned_loss=0.04737, over 7649.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2876, pruned_loss=0.06202, over 1615998.33 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:07:55,136 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7494, 1.8430, 1.6700, 2.3270, 0.9734, 1.4376, 1.7764, 1.9331], + device='cuda:1'), covar=tensor([0.0791, 0.0783, 0.0883, 0.0436, 0.1118, 0.1378, 0.0731, 0.0683], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0197, 0.0243, 0.0213, 0.0206, 0.0246, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:08:00,347 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-07 05:08:04,834 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179942.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:08:05,925 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-07 05:08:27,555 INFO [train.py:901] (1/4) Epoch 23, batch 2150, loss[loss=0.2163, simple_loss=0.2966, pruned_loss=0.06794, over 7818.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2879, pruned_loss=0.06193, over 1615581.75 frames. ], batch size: 20, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:41,574 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.311e+02 2.940e+02 3.642e+02 8.826e+02, threshold=5.880e+02, percent-clipped=6.0 +2023-02-07 05:08:44,583 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:05,721 INFO [train.py:901] (1/4) Epoch 23, batch 2200, loss[loss=0.216, simple_loss=0.3016, pruned_loss=0.06517, over 8021.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2867, pruned_loss=0.0613, over 1612993.74 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:13,519 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2733, 1.6104, 1.2836, 2.6436, 1.1043, 1.1860, 1.9103, 1.8802], + device='cuda:1'), covar=tensor([0.1536, 0.1310, 0.1945, 0.0383, 0.1406, 0.2011, 0.0935, 0.0977], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0196, 0.0242, 0.0213, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:09:18,800 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:40,651 INFO [train.py:901] (1/4) Epoch 23, batch 2250, loss[loss=0.2327, simple_loss=0.3049, pruned_loss=0.08026, over 8199.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2867, pruned_loss=0.06092, over 1615705.87 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:53,773 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.377e+02 2.815e+02 3.570e+02 6.536e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-07 05:09:54,034 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:07,866 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180113.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:12,125 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:16,776 INFO [train.py:901] (1/4) Epoch 23, batch 2300, loss[loss=0.2525, simple_loss=0.3186, pruned_loss=0.09326, over 7189.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06097, over 1616462.64 frames. ], batch size: 71, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:40,109 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:41,670 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3739, 1.6363, 1.6747, 1.1425, 1.7046, 1.3733, 0.3316, 1.6390], + device='cuda:1'), covar=tensor([0.0476, 0.0357, 0.0282, 0.0481, 0.0433, 0.0910, 0.0824, 0.0258], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0391, 0.0346, 0.0446, 0.0380, 0.0534, 0.0392, 0.0420], + device='cuda:1'), out_proj_covar=tensor([1.2101e-04, 1.0256e-04, 9.0863e-05, 1.1726e-04, 9.9980e-05, 1.5037e-04, + 1.0566e-04, 1.1127e-04], device='cuda:1') +2023-02-07 05:10:52,673 INFO [train.py:901] (1/4) Epoch 23, batch 2350, loss[loss=0.2214, simple_loss=0.317, pruned_loss=0.06296, over 8357.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2868, pruned_loss=0.06086, over 1617687.49 frames. ], batch size: 24, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:54,342 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180177.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:05,883 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.451e+02 2.928e+02 3.544e+02 9.883e+02, threshold=5.856e+02, percent-clipped=4.0 +2023-02-07 05:11:11,595 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180202.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:25,826 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:11:27,102 INFO [train.py:901] (1/4) Epoch 23, batch 2400, loss[loss=0.1836, simple_loss=0.2581, pruned_loss=0.05456, over 7421.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2875, pruned_loss=0.06149, over 1618642.33 frames. ], batch size: 17, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:11:57,640 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8750, 1.4823, 1.8258, 1.4662, 0.9139, 1.5791, 1.7543, 1.7281], + device='cuda:1'), covar=tensor([0.0503, 0.1215, 0.1570, 0.1380, 0.0583, 0.1370, 0.0650, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 05:11:59,472 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180268.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:12:02,929 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:04,119 INFO [train.py:901] (1/4) Epoch 23, batch 2450, loss[loss=0.2131, simple_loss=0.3023, pruned_loss=0.0619, over 8324.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2877, pruned_loss=0.06129, over 1621304.46 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:12:12,625 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:18,007 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.501e+02 2.918e+02 3.866e+02 1.157e+03, threshold=5.835e+02, percent-clipped=6.0 +2023-02-07 05:12:24,145 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 05:12:39,634 INFO [train.py:901] (1/4) Epoch 23, batch 2500, loss[loss=0.1916, simple_loss=0.2804, pruned_loss=0.05145, over 8203.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2878, pruned_loss=0.06112, over 1619761.26 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:00,496 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:12,920 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:16,778 INFO [train.py:901] (1/4) Epoch 23, batch 2550, loss[loss=0.2129, simple_loss=0.2822, pruned_loss=0.07186, over 7195.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2871, pruned_loss=0.06078, over 1619022.32 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:21,734 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0254, 2.1788, 1.8665, 2.7681, 1.2693, 1.6365, 1.9869, 2.2328], + device='cuda:1'), covar=tensor([0.0703, 0.0760, 0.0860, 0.0375, 0.1103, 0.1262, 0.0767, 0.0747], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0198, 0.0243, 0.0214, 0.0206, 0.0246, 0.0249, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:13:22,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180383.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:13:22,606 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 05:13:25,751 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:29,875 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.435e+02 3.031e+02 3.942e+02 1.076e+03, threshold=6.063e+02, percent-clipped=1.0 +2023-02-07 05:13:30,131 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:32,990 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2372, 2.1168, 2.7353, 2.1997, 2.6991, 2.3333, 2.1466, 1.4706], + device='cuda:1'), covar=tensor([0.5854, 0.5052, 0.2056, 0.4038, 0.2568, 0.3356, 0.2021, 0.5554], + device='cuda:1'), in_proj_covar=tensor([0.0948, 0.0993, 0.0818, 0.0954, 0.1003, 0.0905, 0.0756, 0.0835], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:13:35,752 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180401.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:35,901 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 05:13:48,409 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 05:13:51,963 INFO [train.py:901] (1/4) Epoch 23, batch 2600, loss[loss=0.1708, simple_loss=0.2534, pruned_loss=0.04413, over 7518.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2871, pruned_loss=0.06057, over 1613984.64 frames. ], batch size: 18, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:28,401 INFO [train.py:901] (1/4) Epoch 23, batch 2650, loss[loss=0.1756, simple_loss=0.2411, pruned_loss=0.05505, over 7228.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2863, pruned_loss=0.06019, over 1613327.61 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:42,183 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 2.331e+02 2.876e+02 3.734e+02 9.435e+02, threshold=5.753e+02, percent-clipped=4.0 +2023-02-07 05:14:48,440 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:04,100 INFO [train.py:901] (1/4) Epoch 23, batch 2700, loss[loss=0.2554, simple_loss=0.3276, pruned_loss=0.09157, over 8578.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.0604, over 1614578.34 frames. ], batch size: 31, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:07,063 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:24,146 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:33,270 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:39,679 INFO [train.py:901] (1/4) Epoch 23, batch 2750, loss[loss=0.1809, simple_loss=0.2713, pruned_loss=0.04523, over 7816.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2873, pruned_loss=0.06067, over 1619754.51 frames. ], batch size: 20, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:48,808 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:53,488 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.355e+02 2.814e+02 3.432e+02 9.125e+02, threshold=5.629e+02, percent-clipped=4.0 +2023-02-07 05:16:15,665 INFO [train.py:901] (1/4) Epoch 23, batch 2800, loss[loss=0.2165, simple_loss=0.3056, pruned_loss=0.06366, over 8500.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06093, over 1617190.37 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:25,073 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 05:16:26,282 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180639.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:38,613 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180657.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:43,305 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180664.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:50,738 INFO [train.py:901] (1/4) Epoch 23, batch 2850, loss[loss=0.2074, simple_loss=0.2946, pruned_loss=0.0601, over 8030.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.287, pruned_loss=0.06124, over 1615349.24 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:55,705 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:55,727 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:04,507 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.469e+02 3.037e+02 3.866e+02 9.714e+02, threshold=6.075e+02, percent-clipped=7.0 +2023-02-07 05:17:07,438 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:27,378 INFO [train.py:901] (1/4) Epoch 23, batch 2900, loss[loss=0.2079, simple_loss=0.288, pruned_loss=0.06395, over 8446.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06145, over 1611855.43 frames. ], batch size: 29, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:17:45,110 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:52,130 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180759.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:56,171 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5770, 1.8861, 2.0181, 1.1617, 2.1247, 1.4448, 0.5044, 1.8516], + device='cuda:1'), covar=tensor([0.0613, 0.0346, 0.0258, 0.0651, 0.0449, 0.0890, 0.0935, 0.0311], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0393, 0.0347, 0.0446, 0.0379, 0.0535, 0.0393, 0.0422], + device='cuda:1'), out_proj_covar=tensor([1.2156e-04, 1.0301e-04, 9.1094e-05, 1.1721e-04, 9.9850e-05, 1.5056e-04, + 1.0574e-04, 1.1164e-04], device='cuda:1') +2023-02-07 05:17:59,461 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 05:18:02,964 INFO [train.py:901] (1/4) Epoch 23, batch 2950, loss[loss=0.2145, simple_loss=0.2989, pruned_loss=0.06508, over 8239.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06175, over 1605014.16 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:18:09,314 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:16,020 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.356e+02 2.925e+02 3.942e+02 6.480e+02, threshold=5.850e+02, percent-clipped=1.0 +2023-02-07 05:18:24,006 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7189, 2.3390, 3.8379, 1.8215, 1.6333, 3.6547, 0.6321, 2.1528], + device='cuda:1'), covar=tensor([0.1819, 0.1286, 0.0199, 0.1926, 0.2862, 0.0342, 0.2239, 0.1546], + device='cuda:1'), in_proj_covar=tensor([0.0193, 0.0199, 0.0129, 0.0222, 0.0271, 0.0137, 0.0171, 0.0195], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 05:18:30,325 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180813.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:34,562 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-07 05:18:38,135 INFO [train.py:901] (1/4) Epoch 23, batch 3000, loss[loss=0.1722, simple_loss=0.2716, pruned_loss=0.03638, over 8240.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.0616, over 1607784.73 frames. ], batch size: 24, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:18:38,135 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 05:18:50,537 INFO [train.py:935] (1/4) Epoch 23, validation: loss=0.1735, simple_loss=0.2731, pruned_loss=0.03696, over 944034.00 frames. +2023-02-07 05:18:50,538 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 05:19:03,707 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:19:26,990 INFO [train.py:901] (1/4) Epoch 23, batch 3050, loss[loss=0.2226, simple_loss=0.3089, pruned_loss=0.06816, over 8340.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06149, over 1608423.73 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:19:33,846 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 05:19:40,678 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.515e+02 3.107e+02 3.968e+02 1.139e+03, threshold=6.214e+02, percent-clipped=7.0 +2023-02-07 05:20:02,332 INFO [train.py:901] (1/4) Epoch 23, batch 3100, loss[loss=0.2189, simple_loss=0.2961, pruned_loss=0.07092, over 8788.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06156, over 1610747.51 frames. ], batch size: 30, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:07,185 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180932.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:11,450 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:22,461 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9083, 5.9234, 5.2126, 2.8847, 5.3181, 5.6873, 5.4527, 5.4539], + device='cuda:1'), covar=tensor([0.0437, 0.0326, 0.0774, 0.3818, 0.0606, 0.0610, 0.0915, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0438, 0.0431, 0.0540, 0.0429, 0.0445, 0.0430, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:20:29,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:38,173 INFO [train.py:901] (1/4) Epoch 23, batch 3150, loss[loss=0.1874, simple_loss=0.288, pruned_loss=0.04342, over 8500.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.06153, over 1608036.56 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:51,967 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.467e+02 3.042e+02 3.660e+02 1.036e+03, threshold=6.084e+02, percent-clipped=2.0 +2023-02-07 05:20:53,523 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5491, 1.9204, 2.0027, 1.2952, 2.1413, 1.5857, 0.5610, 1.8751], + device='cuda:1'), covar=tensor([0.0628, 0.0353, 0.0253, 0.0610, 0.0387, 0.0876, 0.0914, 0.0282], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0392, 0.0345, 0.0446, 0.0378, 0.0533, 0.0391, 0.0420], + device='cuda:1'), out_proj_covar=tensor([1.2148e-04, 1.0270e-04, 9.0483e-05, 1.1729e-04, 9.9306e-05, 1.4992e-04, + 1.0529e-04, 1.1097e-04], device='cuda:1') +2023-02-07 05:21:14,460 INFO [train.py:901] (1/4) Epoch 23, batch 3200, loss[loss=0.2289, simple_loss=0.3087, pruned_loss=0.07462, over 8354.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2883, pruned_loss=0.06152, over 1614757.00 frames. ], batch size: 24, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:18,874 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3164, 1.5824, 1.6629, 1.1260, 1.6940, 1.3526, 0.3020, 1.6428], + device='cuda:1'), covar=tensor([0.0439, 0.0341, 0.0288, 0.0456, 0.0403, 0.0857, 0.0765, 0.0249], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0392, 0.0345, 0.0446, 0.0378, 0.0532, 0.0391, 0.0419], + device='cuda:1'), out_proj_covar=tensor([1.2117e-04, 1.0258e-04, 9.0443e-05, 1.1715e-04, 9.9282e-05, 1.4990e-04, + 1.0525e-04, 1.1091e-04], device='cuda:1') +2023-02-07 05:21:29,942 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181047.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:45,859 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181069.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:49,893 INFO [train.py:901] (1/4) Epoch 23, batch 3250, loss[loss=0.1668, simple_loss=0.2614, pruned_loss=0.03608, over 8466.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06045, over 1612161.76 frames. ], batch size: 25, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:22:03,763 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.376e+02 2.917e+02 3.369e+02 6.745e+02, threshold=5.834e+02, percent-clipped=1.0 +2023-02-07 05:22:03,877 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:04,011 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:26,227 INFO [train.py:901] (1/4) Epoch 23, batch 3300, loss[loss=0.181, simple_loss=0.2504, pruned_loss=0.05577, over 7708.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2871, pruned_loss=0.06079, over 1613627.31 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:01,366 INFO [train.py:901] (1/4) Epoch 23, batch 3350, loss[loss=0.2375, simple_loss=0.313, pruned_loss=0.08103, over 8617.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.06103, over 1614568.35 frames. ], batch size: 49, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:10,453 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:14,980 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.358e+02 3.053e+02 3.666e+02 9.674e+02, threshold=6.107e+02, percent-clipped=1.0 +2023-02-07 05:23:26,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:38,227 INFO [train.py:901] (1/4) Epoch 23, batch 3400, loss[loss=0.2131, simple_loss=0.3018, pruned_loss=0.06217, over 8108.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2878, pruned_loss=0.06171, over 1615371.55 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:55,608 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3642, 2.1088, 2.7779, 2.3249, 2.6058, 2.4109, 2.1935, 1.4884], + device='cuda:1'), covar=tensor([0.5328, 0.4759, 0.1746, 0.3373, 0.2396, 0.2779, 0.1785, 0.5086], + device='cuda:1'), in_proj_covar=tensor([0.0944, 0.0987, 0.0811, 0.0954, 0.0998, 0.0902, 0.0753, 0.0830], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:24:04,473 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7128, 1.9709, 2.1454, 1.4186, 2.2681, 1.5712, 0.6536, 1.9679], + device='cuda:1'), covar=tensor([0.0624, 0.0375, 0.0299, 0.0643, 0.0431, 0.0923, 0.0913, 0.0320], + device='cuda:1'), in_proj_covar=tensor([0.0453, 0.0392, 0.0345, 0.0448, 0.0379, 0.0534, 0.0391, 0.0421], + device='cuda:1'), out_proj_covar=tensor([1.2093e-04, 1.0258e-04, 9.0627e-05, 1.1771e-04, 9.9600e-05, 1.5055e-04, + 1.0540e-04, 1.1130e-04], device='cuda:1') +2023-02-07 05:24:13,226 INFO [train.py:901] (1/4) Epoch 23, batch 3450, loss[loss=0.2464, simple_loss=0.3193, pruned_loss=0.08675, over 8509.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2881, pruned_loss=0.06256, over 1612234.84 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:27,416 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.466e+02 2.960e+02 3.783e+02 8.296e+02, threshold=5.920e+02, percent-clipped=4.0 +2023-02-07 05:24:29,052 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8675, 1.7566, 2.7410, 2.2289, 2.4296, 1.9243, 1.6259, 1.2888], + device='cuda:1'), covar=tensor([0.6701, 0.6057, 0.1901, 0.3597, 0.2800, 0.3972, 0.2925, 0.5232], + device='cuda:1'), in_proj_covar=tensor([0.0943, 0.0988, 0.0811, 0.0954, 0.0998, 0.0901, 0.0754, 0.0830], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:24:32,993 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:33,730 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:42,795 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181315.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:49,550 INFO [train.py:901] (1/4) Epoch 23, batch 3500, loss[loss=0.1936, simple_loss=0.2803, pruned_loss=0.05345, over 8467.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06191, over 1616434.72 frames. ], batch size: 25, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:52,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181328.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:25:04,317 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.0298, 1.6438, 6.0965, 2.2129, 5.5235, 5.0960, 5.6356, 5.5251], + device='cuda:1'), covar=tensor([0.0348, 0.4771, 0.0338, 0.3791, 0.0863, 0.0798, 0.0407, 0.0450], + device='cuda:1'), in_proj_covar=tensor([0.0639, 0.0648, 0.0705, 0.0637, 0.0715, 0.0612, 0.0609, 0.0685], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:25:07,625 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 05:25:25,296 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2044, 4.0026, 2.5114, 3.0954, 3.0421, 2.2031, 3.0388, 3.1930], + device='cuda:1'), covar=tensor([0.1606, 0.0290, 0.1073, 0.0704, 0.0773, 0.1507, 0.1011, 0.0998], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0232, 0.0335, 0.0308, 0.0299, 0.0337, 0.0343, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 05:25:25,798 INFO [train.py:901] (1/4) Epoch 23, batch 3550, loss[loss=0.171, simple_loss=0.2605, pruned_loss=0.04079, over 7253.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2881, pruned_loss=0.06181, over 1614380.23 frames. ], batch size: 16, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:25:39,016 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.351e+02 2.882e+02 3.469e+02 9.271e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 05:26:01,200 INFO [train.py:901] (1/4) Epoch 23, batch 3600, loss[loss=0.1954, simple_loss=0.2837, pruned_loss=0.05358, over 7643.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.06175, over 1612932.22 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:30,512 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181465.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:37,787 INFO [train.py:901] (1/4) Epoch 23, batch 3650, loss[loss=0.1781, simple_loss=0.2546, pruned_loss=0.0508, over 7527.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2869, pruned_loss=0.06174, over 1605183.29 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:48,288 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181490.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:50,921 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.424e+02 2.919e+02 3.720e+02 6.119e+02, threshold=5.839e+02, percent-clipped=1.0 +2023-02-07 05:27:11,146 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 05:27:12,455 INFO [train.py:901] (1/4) Epoch 23, batch 3700, loss[loss=0.2203, simple_loss=0.3053, pruned_loss=0.06763, over 8108.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2888, pruned_loss=0.06246, over 1607629.78 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:27:30,282 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:37,416 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:49,548 INFO [train.py:901] (1/4) Epoch 23, batch 3750, loss[loss=0.1703, simple_loss=0.2561, pruned_loss=0.04224, over 7816.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06171, over 1605789.43 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:27:55,389 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:28:02,824 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.354e+02 2.844e+02 3.677e+02 7.170e+02, threshold=5.688e+02, percent-clipped=4.0 +2023-02-07 05:28:24,874 INFO [train.py:901] (1/4) Epoch 23, batch 3800, loss[loss=0.2616, simple_loss=0.325, pruned_loss=0.09911, over 7091.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2857, pruned_loss=0.06101, over 1601753.41 frames. ], batch size: 71, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:28:49,195 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:29:00,794 INFO [train.py:901] (1/4) Epoch 23, batch 3850, loss[loss=0.2081, simple_loss=0.3018, pruned_loss=0.05719, over 8319.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.061, over 1599656.10 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:14,851 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 2.361e+02 2.900e+02 3.650e+02 9.007e+02, threshold=5.800e+02, percent-clipped=7.0 +2023-02-07 05:29:22,382 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 05:29:25,354 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2212, 3.5709, 2.0872, 3.0323, 2.7774, 1.7247, 2.8326, 3.1895], + device='cuda:1'), covar=tensor([0.1726, 0.0440, 0.1441, 0.0761, 0.0868, 0.2064, 0.1287, 0.1079], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0232, 0.0337, 0.0310, 0.0300, 0.0338, 0.0343, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 05:29:34,173 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0397, 1.8269, 2.3383, 2.0465, 2.2396, 2.1222, 1.8820, 1.1147], + device='cuda:1'), covar=tensor([0.5199, 0.4334, 0.1792, 0.2976, 0.2086, 0.2690, 0.1839, 0.4637], + device='cuda:1'), in_proj_covar=tensor([0.0949, 0.0995, 0.0816, 0.0957, 0.1004, 0.0906, 0.0758, 0.0834], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:29:36,630 INFO [train.py:901] (1/4) Epoch 23, batch 3900, loss[loss=0.2092, simple_loss=0.2936, pruned_loss=0.0624, over 8097.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06117, over 1605010.42 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:06,235 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5939, 4.5900, 4.1627, 2.2741, 4.0296, 4.2370, 4.1939, 4.0889], + device='cuda:1'), covar=tensor([0.0675, 0.0501, 0.0902, 0.4605, 0.0836, 0.0941, 0.1089, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0441, 0.0430, 0.0540, 0.0429, 0.0444, 0.0428, 0.0388], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:30:07,660 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6652, 1.4565, 2.7992, 1.4431, 2.1895, 3.0333, 3.1721, 2.6004], + device='cuda:1'), covar=tensor([0.1234, 0.1722, 0.0413, 0.2018, 0.0919, 0.0328, 0.0675, 0.0579], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0321, 0.0286, 0.0315, 0.0310, 0.0268, 0.0422, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 05:30:10,464 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:30:10,998 INFO [train.py:901] (1/4) Epoch 23, batch 3950, loss[loss=0.2109, simple_loss=0.3005, pruned_loss=0.06065, over 8530.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2858, pruned_loss=0.06103, over 1606715.35 frames. ], batch size: 28, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:26,257 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.359e+02 2.788e+02 3.393e+02 6.824e+02, threshold=5.575e+02, percent-clipped=4.0 +2023-02-07 05:30:47,699 INFO [train.py:901] (1/4) Epoch 23, batch 4000, loss[loss=0.1889, simple_loss=0.2808, pruned_loss=0.04847, over 8455.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06141, over 1613134.73 frames. ], batch size: 29, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:22,582 INFO [train.py:901] (1/4) Epoch 23, batch 4050, loss[loss=0.1986, simple_loss=0.2805, pruned_loss=0.05839, over 8196.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06075, over 1611244.06 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:26,927 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5871, 1.6254, 2.1247, 1.3892, 1.3392, 2.0868, 0.3907, 1.2913], + device='cuda:1'), covar=tensor([0.1834, 0.1299, 0.0418, 0.1137, 0.2437, 0.0411, 0.1872, 0.1276], + device='cuda:1'), in_proj_covar=tensor([0.0192, 0.0200, 0.0129, 0.0220, 0.0270, 0.0137, 0.0169, 0.0192], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 05:31:34,364 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:31:35,714 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.508e+02 2.885e+02 3.954e+02 8.020e+02, threshold=5.770e+02, percent-clipped=6.0 +2023-02-07 05:31:59,841 INFO [train.py:901] (1/4) Epoch 23, batch 4100, loss[loss=0.1921, simple_loss=0.2679, pruned_loss=0.05814, over 8089.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06137, over 1616079.49 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:32:31,903 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3377, 2.3333, 1.6106, 2.1035, 1.8915, 1.3763, 1.8177, 1.9079], + device='cuda:1'), covar=tensor([0.1666, 0.0422, 0.1421, 0.0672, 0.0786, 0.1709, 0.1114, 0.1060], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0235, 0.0339, 0.0311, 0.0302, 0.0339, 0.0346, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 05:32:34,925 INFO [train.py:901] (1/4) Epoch 23, batch 4150, loss[loss=0.1527, simple_loss=0.2323, pruned_loss=0.03657, over 7528.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2878, pruned_loss=0.06125, over 1616099.29 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:32:48,432 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.356e+02 2.929e+02 3.956e+02 6.697e+02, threshold=5.858e+02, percent-clipped=3.0 +2023-02-07 05:32:49,335 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:32:58,731 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:11,753 INFO [train.py:901] (1/4) Epoch 23, batch 4200, loss[loss=0.2018, simple_loss=0.2812, pruned_loss=0.06117, over 7425.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06133, over 1614447.83 frames. ], batch size: 17, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:16,193 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:25,773 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 05:33:33,427 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:47,594 INFO [train.py:901] (1/4) Epoch 23, batch 4250, loss[loss=0.2031, simple_loss=0.2933, pruned_loss=0.05646, over 8505.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2874, pruned_loss=0.06068, over 1619471.14 frames. ], batch size: 28, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:49,017 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 05:34:01,333 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.416e+02 2.989e+02 3.588e+02 6.339e+02, threshold=5.979e+02, percent-clipped=2.0 +2023-02-07 05:34:06,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5780, 2.4502, 3.1736, 2.4753, 3.1113, 2.5812, 2.3594, 1.8804], + device='cuda:1'), covar=tensor([0.5181, 0.5086, 0.2036, 0.4096, 0.2605, 0.3113, 0.1971, 0.5664], + device='cuda:1'), in_proj_covar=tensor([0.0942, 0.0989, 0.0811, 0.0950, 0.0996, 0.0899, 0.0750, 0.0826], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:34:22,741 INFO [train.py:901] (1/4) Epoch 23, batch 4300, loss[loss=0.2053, simple_loss=0.3001, pruned_loss=0.05527, over 8590.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2876, pruned_loss=0.06099, over 1614544.76 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:34:25,780 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8207, 1.5598, 3.3471, 1.4006, 2.4920, 3.6855, 3.8281, 3.1344], + device='cuda:1'), covar=tensor([0.1285, 0.1868, 0.0370, 0.2175, 0.1003, 0.0242, 0.0503, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0320, 0.0286, 0.0315, 0.0311, 0.0268, 0.0422, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 05:34:57,816 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-07 05:34:58,715 INFO [train.py:901] (1/4) Epoch 23, batch 4350, loss[loss=0.1809, simple_loss=0.2608, pruned_loss=0.0505, over 7809.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2872, pruned_loss=0.06074, over 1613243.23 frames. ], batch size: 20, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:13,489 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.346e+02 2.960e+02 3.931e+02 9.702e+02, threshold=5.919e+02, percent-clipped=9.0 +2023-02-07 05:35:21,961 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 05:35:34,686 INFO [train.py:901] (1/4) Epoch 23, batch 4400, loss[loss=0.1926, simple_loss=0.2817, pruned_loss=0.0517, over 8328.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2868, pruned_loss=0.06047, over 1610501.15 frames. ], batch size: 25, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:46,106 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8335, 1.9702, 1.7674, 2.4912, 1.2767, 1.5192, 1.9156, 2.0310], + device='cuda:1'), covar=tensor([0.0754, 0.0770, 0.0884, 0.0428, 0.1055, 0.1280, 0.0746, 0.0758], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0199, 0.0247, 0.0215, 0.0208, 0.0249, 0.0252, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:35:49,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7660, 1.5343, 5.8988, 2.1540, 5.2567, 4.9360, 5.4342, 5.3374], + device='cuda:1'), covar=tensor([0.0468, 0.5373, 0.0346, 0.4134, 0.1007, 0.0859, 0.0479, 0.0516], + device='cuda:1'), in_proj_covar=tensor([0.0641, 0.0652, 0.0709, 0.0642, 0.0719, 0.0615, 0.0614, 0.0688], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:36:03,262 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:05,080 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 05:36:11,449 INFO [train.py:901] (1/4) Epoch 23, batch 4450, loss[loss=0.2072, simple_loss=0.2823, pruned_loss=0.06604, over 8354.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06136, over 1610019.89 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:20,486 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182288.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:26,032 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.606e+02 3.225e+02 4.349e+02 9.132e+02, threshold=6.449e+02, percent-clipped=7.0 +2023-02-07 05:36:28,977 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:44,236 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 05:36:47,059 INFO [train.py:901] (1/4) Epoch 23, batch 4500, loss[loss=0.2118, simple_loss=0.2934, pruned_loss=0.0651, over 6819.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2876, pruned_loss=0.06127, over 1611552.67 frames. ], batch size: 15, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:54,401 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 05:36:56,811 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 05:36:57,582 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182340.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:37:16,311 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0052, 2.3616, 1.9434, 3.0066, 1.5857, 1.7021, 2.3280, 2.4403], + device='cuda:1'), covar=tensor([0.0736, 0.0724, 0.0825, 0.0309, 0.1011, 0.1237, 0.0741, 0.0733], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0199, 0.0247, 0.0216, 0.0208, 0.0249, 0.0252, 0.0210], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:37:23,617 INFO [train.py:901] (1/4) Epoch 23, batch 4550, loss[loss=0.2591, simple_loss=0.3418, pruned_loss=0.08817, over 8110.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.288, pruned_loss=0.06123, over 1617070.62 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:37:34,406 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 05:37:37,490 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.347e+02 2.810e+02 3.651e+02 9.685e+02, threshold=5.619e+02, percent-clipped=2.0 +2023-02-07 05:37:59,265 INFO [train.py:901] (1/4) Epoch 23, batch 4600, loss[loss=0.1957, simple_loss=0.2854, pruned_loss=0.05299, over 8553.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2871, pruned_loss=0.06067, over 1612339.69 frames. ], batch size: 31, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:20,349 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182455.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:38:34,844 INFO [train.py:901] (1/4) Epoch 23, batch 4650, loss[loss=0.2049, simple_loss=0.2968, pruned_loss=0.05649, over 8443.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2856, pruned_loss=0.06042, over 1609604.87 frames. ], batch size: 27, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:49,709 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 05:38:50,627 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.204e+02 2.647e+02 3.638e+02 6.712e+02, threshold=5.294e+02, percent-clipped=7.0 +2023-02-07 05:38:54,471 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 05:39:12,429 INFO [train.py:901] (1/4) Epoch 23, batch 4700, loss[loss=0.1887, simple_loss=0.2726, pruned_loss=0.05238, over 8464.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06078, over 1612816.67 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:39:17,386 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:39:47,032 INFO [train.py:901] (1/4) Epoch 23, batch 4750, loss[loss=0.163, simple_loss=0.2483, pruned_loss=0.0389, over 7656.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2855, pruned_loss=0.0601, over 1617656.73 frames. ], batch size: 19, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:01,599 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.297e+02 2.902e+02 3.418e+02 7.225e+02, threshold=5.805e+02, percent-clipped=3.0 +2023-02-07 05:40:05,946 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 05:40:08,834 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 05:40:24,093 INFO [train.py:901] (1/4) Epoch 23, batch 4800, loss[loss=0.2019, simple_loss=0.277, pruned_loss=0.06335, over 7925.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2847, pruned_loss=0.05958, over 1617298.60 frames. ], batch size: 20, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:36,434 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:40:59,178 INFO [train.py:901] (1/4) Epoch 23, batch 4850, loss[loss=0.2018, simple_loss=0.2936, pruned_loss=0.05504, over 8698.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2848, pruned_loss=0.05943, over 1612818.34 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:00,621 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 05:41:13,255 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.288e+02 2.781e+02 3.814e+02 7.165e+02, threshold=5.562e+02, percent-clipped=4.0 +2023-02-07 05:41:25,673 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182711.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:41:36,253 INFO [train.py:901] (1/4) Epoch 23, batch 4900, loss[loss=0.2117, simple_loss=0.3028, pruned_loss=0.06028, over 8102.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2852, pruned_loss=0.05986, over 1615300.33 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:40,414 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6260, 5.7300, 5.0325, 2.3131, 5.0346, 5.3626, 5.1865, 5.2411], + device='cuda:1'), covar=tensor([0.0554, 0.0358, 0.0819, 0.4773, 0.0661, 0.0651, 0.1033, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0524, 0.0438, 0.0426, 0.0535, 0.0424, 0.0442, 0.0425, 0.0384], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:41:44,975 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:00,357 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:12,971 INFO [train.py:901] (1/4) Epoch 23, batch 4950, loss[loss=0.1809, simple_loss=0.2619, pruned_loss=0.04997, over 5964.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05937, over 1614133.51 frames. ], batch size: 13, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:42:27,044 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.423e+02 2.989e+02 3.745e+02 1.524e+03, threshold=5.977e+02, percent-clipped=7.0 +2023-02-07 05:42:48,229 INFO [train.py:901] (1/4) Epoch 23, batch 5000, loss[loss=0.2148, simple_loss=0.2969, pruned_loss=0.06633, over 8501.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2841, pruned_loss=0.05938, over 1614824.89 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:13,253 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9065, 1.5999, 3.4970, 1.5482, 2.4915, 3.9205, 3.9709, 3.3557], + device='cuda:1'), covar=tensor([0.1162, 0.1695, 0.0318, 0.1969, 0.1069, 0.0203, 0.0420, 0.0491], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0320, 0.0286, 0.0315, 0.0312, 0.0268, 0.0423, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 05:43:25,214 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:25,712 INFO [train.py:901] (1/4) Epoch 23, batch 5050, loss[loss=0.1968, simple_loss=0.2574, pruned_loss=0.06814, over 7423.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2849, pruned_loss=0.05995, over 1615431.56 frames. ], batch size: 17, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:26,530 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:29,515 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:40,728 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.376e+02 2.932e+02 3.646e+02 6.966e+02, threshold=5.864e+02, percent-clipped=3.0 +2023-02-07 05:43:46,359 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 05:43:57,144 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2220, 1.9110, 2.5291, 2.1668, 2.5196, 2.2156, 2.0310, 1.3430], + device='cuda:1'), covar=tensor([0.5403, 0.4861, 0.2019, 0.3334, 0.2141, 0.2907, 0.1867, 0.5034], + device='cuda:1'), in_proj_covar=tensor([0.0943, 0.0989, 0.0809, 0.0951, 0.0997, 0.0900, 0.0752, 0.0831], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 05:44:01,709 INFO [train.py:901] (1/4) Epoch 23, batch 5100, loss[loss=0.1555, simple_loss=0.2306, pruned_loss=0.04017, over 7703.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.0596, over 1615574.91 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:03,387 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 05:44:38,859 INFO [train.py:901] (1/4) Epoch 23, batch 5150, loss[loss=0.225, simple_loss=0.3095, pruned_loss=0.0702, over 8621.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2849, pruned_loss=0.06006, over 1617445.16 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:50,225 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:44:53,605 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 2.409e+02 2.843e+02 3.449e+02 6.604e+02, threshold=5.686e+02, percent-clipped=1.0 +2023-02-07 05:44:57,415 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6078, 1.4211, 1.5942, 1.3923, 0.8782, 1.4207, 1.4813, 1.3737], + device='cuda:1'), covar=tensor([0.0593, 0.1231, 0.1664, 0.1399, 0.0616, 0.1440, 0.0725, 0.0645], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 05:45:03,992 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 05:45:07,243 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183014.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:14,581 INFO [train.py:901] (1/4) Epoch 23, batch 5200, loss[loss=0.2486, simple_loss=0.3278, pruned_loss=0.08466, over 8100.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2859, pruned_loss=0.06056, over 1615151.69 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:45:24,446 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183039.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:46,583 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 05:45:50,598 INFO [train.py:901] (1/4) Epoch 23, batch 5250, loss[loss=0.2334, simple_loss=0.3089, pruned_loss=0.07893, over 8197.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06124, over 1612956.23 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:45:53,706 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.78 vs. limit=5.0 +2023-02-07 05:46:05,167 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.492e+02 2.942e+02 3.798e+02 7.403e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-07 05:46:27,071 INFO [train.py:901] (1/4) Epoch 23, batch 5300, loss[loss=0.2016, simple_loss=0.2766, pruned_loss=0.06337, over 8237.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2863, pruned_loss=0.06081, over 1613212.16 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:46:43,509 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 05:47:02,889 INFO [train.py:901] (1/4) Epoch 23, batch 5350, loss[loss=0.2002, simple_loss=0.272, pruned_loss=0.06422, over 7292.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2844, pruned_loss=0.05988, over 1612857.32 frames. ], batch size: 16, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:47:03,829 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4914, 1.4552, 1.8480, 1.2360, 1.1998, 1.8458, 0.2606, 1.1285], + device='cuda:1'), covar=tensor([0.1633, 0.1142, 0.0357, 0.0954, 0.2428, 0.0374, 0.1847, 0.1129], + device='cuda:1'), in_proj_covar=tensor([0.0189, 0.0195, 0.0127, 0.0216, 0.0266, 0.0134, 0.0167, 0.0190], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 05:47:17,713 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.502e+02 3.193e+02 3.793e+02 7.809e+02, threshold=6.385e+02, percent-clipped=1.0 +2023-02-07 05:47:34,660 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:38,840 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:39,484 INFO [train.py:901] (1/4) Epoch 23, batch 5400, loss[loss=0.1989, simple_loss=0.2895, pruned_loss=0.05411, over 8530.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2857, pruned_loss=0.06082, over 1612790.57 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:47:55,892 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:13,010 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:14,972 INFO [train.py:901] (1/4) Epoch 23, batch 5450, loss[loss=0.1735, simple_loss=0.27, pruned_loss=0.03854, over 7986.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06081, over 1619768.11 frames. ], batch size: 21, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:15,142 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0931, 1.6122, 4.4462, 2.0774, 2.5881, 5.1154, 5.1486, 4.3618], + device='cuda:1'), covar=tensor([0.1240, 0.1838, 0.0268, 0.1845, 0.1125, 0.0174, 0.0305, 0.0537], + device='cuda:1'), in_proj_covar=tensor([0.0294, 0.0318, 0.0284, 0.0313, 0.0311, 0.0266, 0.0421, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 05:48:17,364 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3955, 1.6473, 2.1791, 1.2767, 1.4916, 1.6661, 1.4975, 1.4781], + device='cuda:1'), covar=tensor([0.1973, 0.2457, 0.0929, 0.4482, 0.1892, 0.3403, 0.2380, 0.2327], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0608, 0.0555, 0.0647, 0.0648, 0.0594, 0.0540, 0.0631], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:48:27,836 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4852, 2.7657, 2.1903, 3.7724, 1.4979, 2.0439, 2.3803, 2.7411], + device='cuda:1'), covar=tensor([0.0656, 0.0836, 0.0790, 0.0258, 0.1113, 0.1170, 0.0965, 0.0761], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0196, 0.0244, 0.0213, 0.0205, 0.0244, 0.0249, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:48:30,406 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.334e+02 2.819e+02 3.622e+02 6.725e+02, threshold=5.637e+02, percent-clipped=1.0 +2023-02-07 05:48:41,158 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 05:48:52,618 INFO [train.py:901] (1/4) Epoch 23, batch 5500, loss[loss=0.1879, simple_loss=0.267, pruned_loss=0.05442, over 7919.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2856, pruned_loss=0.06061, over 1614541.74 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:58,339 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183333.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:02,360 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:27,067 INFO [train.py:901] (1/4) Epoch 23, batch 5550, loss[loss=0.2029, simple_loss=0.2941, pruned_loss=0.05588, over 8107.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2863, pruned_loss=0.06132, over 1615418.64 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:49:41,517 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.428e+02 3.119e+02 4.010e+02 1.058e+03, threshold=6.238e+02, percent-clipped=9.0 +2023-02-07 05:50:03,271 INFO [train.py:901] (1/4) Epoch 23, batch 5600, loss[loss=0.2007, simple_loss=0.3052, pruned_loss=0.04811, over 8473.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.286, pruned_loss=0.0608, over 1614718.53 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:04,084 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:50:39,035 INFO [train.py:901] (1/4) Epoch 23, batch 5650, loss[loss=0.1875, simple_loss=0.2763, pruned_loss=0.04932, over 7654.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2864, pruned_loss=0.0614, over 1609735.66 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:51,436 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 05:50:53,299 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.300e+02 3.084e+02 3.921e+02 7.530e+02, threshold=6.168e+02, percent-clipped=4.0 +2023-02-07 05:51:14,120 INFO [train.py:901] (1/4) Epoch 23, batch 5700, loss[loss=0.1897, simple_loss=0.2706, pruned_loss=0.05443, over 8671.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.06102, over 1611001.45 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:38,825 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1624, 2.2697, 1.8468, 2.7772, 1.3926, 1.7085, 2.1133, 2.3028], + device='cuda:1'), covar=tensor([0.0605, 0.0685, 0.0779, 0.0372, 0.1003, 0.1221, 0.0786, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0198, 0.0247, 0.0215, 0.0208, 0.0248, 0.0252, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 05:51:50,330 INFO [train.py:901] (1/4) Epoch 23, batch 5750, loss[loss=0.2031, simple_loss=0.2806, pruned_loss=0.0628, over 8589.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.0616, over 1613910.74 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:57,138 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 05:52:00,832 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183589.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:03,077 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 05:52:05,021 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:05,469 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.334e+02 3.030e+02 3.740e+02 1.347e+03, threshold=6.060e+02, percent-clipped=7.0 +2023-02-07 05:52:17,390 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7034, 2.0359, 2.1206, 1.2283, 2.2074, 1.4760, 0.7174, 1.9813], + device='cuda:1'), covar=tensor([0.0595, 0.0335, 0.0295, 0.0615, 0.0424, 0.0855, 0.0848, 0.0288], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0397, 0.0349, 0.0451, 0.0384, 0.0538, 0.0395, 0.0426], + device='cuda:1'), out_proj_covar=tensor([1.2229e-04, 1.0367e-04, 9.1667e-05, 1.1846e-04, 1.0105e-04, 1.5145e-04, + 1.0654e-04, 1.1239e-04], device='cuda:1') +2023-02-07 05:52:18,051 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:22,089 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183620.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:25,347 INFO [train.py:901] (1/4) Epoch 23, batch 5800, loss[loss=0.2121, simple_loss=0.2976, pruned_loss=0.06327, over 8245.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2868, pruned_loss=0.06111, over 1615904.47 frames. ], batch size: 24, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:52:30,169 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:53:01,092 INFO [train.py:901] (1/4) Epoch 23, batch 5850, loss[loss=0.198, simple_loss=0.2678, pruned_loss=0.06405, over 7978.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06122, over 1613893.21 frames. ], batch size: 21, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:16,205 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 2.367e+02 2.798e+02 3.640e+02 5.951e+02, threshold=5.597e+02, percent-clipped=0.0 +2023-02-07 05:53:36,759 INFO [train.py:901] (1/4) Epoch 23, batch 5900, loss[loss=0.1876, simple_loss=0.2721, pruned_loss=0.05159, over 8286.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2855, pruned_loss=0.06056, over 1613892.93 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:58,150 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0570, 1.6954, 1.8869, 1.5509, 1.0154, 1.6449, 1.8560, 1.8236], + device='cuda:1'), covar=tensor([0.0537, 0.1201, 0.1528, 0.1375, 0.0569, 0.1341, 0.0642, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0188, 0.0159, 0.0100, 0.0161, 0.0112, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:1') +2023-02-07 05:54:08,425 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183770.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:54:11,757 INFO [train.py:901] (1/4) Epoch 23, batch 5950, loss[loss=0.1913, simple_loss=0.2726, pruned_loss=0.05496, over 7554.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2861, pruned_loss=0.06062, over 1612000.62 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:27,013 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 2.784e+02 3.423e+02 5.836e+02, threshold=5.567e+02, percent-clipped=2.0 +2023-02-07 05:54:47,616 INFO [train.py:901] (1/4) Epoch 23, batch 6000, loss[loss=0.1727, simple_loss=0.2592, pruned_loss=0.04311, over 7976.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2884, pruned_loss=0.06221, over 1614709.30 frames. ], batch size: 21, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:47,616 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 05:55:00,698 INFO [train.py:935] (1/4) Epoch 23, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03597, over 944034.00 frames. +2023-02-07 05:55:00,699 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 05:55:25,807 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:36,139 INFO [train.py:901] (1/4) Epoch 23, batch 6050, loss[loss=0.1966, simple_loss=0.2863, pruned_loss=0.05344, over 8497.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2868, pruned_loss=0.06149, over 1612210.35 frames. ], batch size: 26, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:55:43,238 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:50,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.465e+02 3.097e+02 3.782e+02 8.398e+02, threshold=6.194e+02, percent-clipped=6.0 +2023-02-07 05:56:11,855 INFO [train.py:901] (1/4) Epoch 23, batch 6100, loss[loss=0.162, simple_loss=0.2496, pruned_loss=0.03719, over 8145.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2867, pruned_loss=0.06202, over 1609817.36 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:21,661 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 05:56:23,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8748, 3.8036, 3.5139, 1.8577, 3.4154, 3.4468, 3.4886, 3.3586], + device='cuda:1'), covar=tensor([0.0799, 0.0626, 0.1003, 0.4276, 0.0896, 0.1147, 0.1209, 0.0815], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0443, 0.0431, 0.0539, 0.0430, 0.0446, 0.0428, 0.0386], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:56:32,462 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 05:56:47,365 INFO [train.py:901] (1/4) Epoch 23, batch 6150, loss[loss=0.201, simple_loss=0.2815, pruned_loss=0.0602, over 7654.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2866, pruned_loss=0.06161, over 1610503.63 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:48,167 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:56:51,715 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9571, 1.6160, 3.4409, 1.6052, 2.4267, 3.7815, 3.9057, 3.2153], + device='cuda:1'), covar=tensor([0.1136, 0.1653, 0.0297, 0.1996, 0.0995, 0.0219, 0.0471, 0.0514], + device='cuda:1'), in_proj_covar=tensor([0.0293, 0.0317, 0.0282, 0.0312, 0.0308, 0.0265, 0.0419, 0.0300], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 05:56:59,934 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5399, 1.5383, 4.7718, 1.8009, 4.1950, 3.9326, 4.3185, 4.2067], + device='cuda:1'), covar=tensor([0.0590, 0.4971, 0.0469, 0.4341, 0.1049, 0.1054, 0.0536, 0.0621], + device='cuda:1'), in_proj_covar=tensor([0.0646, 0.0654, 0.0708, 0.0643, 0.0723, 0.0619, 0.0618, 0.0693], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:57:01,785 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.512e+02 2.876e+02 3.577e+02 6.799e+02, threshold=5.752e+02, percent-clipped=2.0 +2023-02-07 05:57:22,984 INFO [train.py:901] (1/4) Epoch 23, batch 6200, loss[loss=0.1753, simple_loss=0.2574, pruned_loss=0.04659, over 7804.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2863, pruned_loss=0.06171, over 1604405.41 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:57:23,224 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9698, 1.6131, 1.4881, 1.5602, 1.3583, 1.3222, 1.2908, 1.2847], + device='cuda:1'), covar=tensor([0.1315, 0.0502, 0.1362, 0.0629, 0.0851, 0.1641, 0.1023, 0.0850], + device='cuda:1'), in_proj_covar=tensor([0.0351, 0.0231, 0.0332, 0.0306, 0.0298, 0.0338, 0.0341, 0.0315], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 05:57:54,991 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:57:59,615 INFO [train.py:901] (1/4) Epoch 23, batch 6250, loss[loss=0.2016, simple_loss=0.2868, pruned_loss=0.05821, over 8522.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2856, pruned_loss=0.06055, over 1608443.23 frames. ], batch size: 28, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:04,010 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4747, 1.4138, 1.6407, 1.2766, 0.9549, 1.4012, 1.4555, 1.3332], + device='cuda:1'), covar=tensor([0.0633, 0.1263, 0.1619, 0.1497, 0.0597, 0.1510, 0.0749, 0.0696], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 05:58:06,672 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5495, 1.8781, 2.7451, 1.3736, 2.0625, 1.8467, 1.5765, 1.9601], + device='cuda:1'), covar=tensor([0.2042, 0.2463, 0.0901, 0.4559, 0.1819, 0.3277, 0.2404, 0.2307], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0611, 0.0554, 0.0647, 0.0647, 0.0594, 0.0542, 0.0631], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 05:58:11,443 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:58:14,642 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.340e+02 2.866e+02 3.425e+02 5.984e+02, threshold=5.731e+02, percent-clipped=3.0 +2023-02-07 05:58:34,480 INFO [train.py:901] (1/4) Epoch 23, batch 6300, loss[loss=0.1601, simple_loss=0.2316, pruned_loss=0.04429, over 7436.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05985, over 1611282.23 frames. ], batch size: 17, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:42,583 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 05:58:45,749 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:04,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:10,625 INFO [train.py:901] (1/4) Epoch 23, batch 6350, loss[loss=0.1966, simple_loss=0.2861, pruned_loss=0.05355, over 8550.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2849, pruned_loss=0.06003, over 1611195.61 frames. ], batch size: 31, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 05:59:25,790 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.298e+02 2.703e+02 3.593e+02 9.198e+02, threshold=5.406e+02, percent-clipped=6.0 +2023-02-07 05:59:32,344 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184204.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:46,835 INFO [train.py:901] (1/4) Epoch 23, batch 6400, loss[loss=0.2092, simple_loss=0.2913, pruned_loss=0.06351, over 8526.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2847, pruned_loss=0.06001, over 1612392.48 frames. ], batch size: 31, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:22,071 INFO [train.py:901] (1/4) Epoch 23, batch 6450, loss[loss=0.1711, simple_loss=0.2552, pruned_loss=0.04353, over 7938.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.285, pruned_loss=0.06011, over 1615952.75 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:31,217 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.4437, 1.6025, 3.7750, 1.5870, 3.0114, 2.9289, 3.3289, 3.3377], + device='cuda:1'), covar=tensor([0.1761, 0.6541, 0.1602, 0.5833, 0.2655, 0.2401, 0.1505, 0.1365], + device='cuda:1'), in_proj_covar=tensor([0.0642, 0.0649, 0.0706, 0.0640, 0.0716, 0.0616, 0.0616, 0.0691], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:00:37,219 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.429e+02 3.055e+02 3.904e+02 7.071e+02, threshold=6.109e+02, percent-clipped=5.0 +2023-02-07 06:00:54,557 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:00:58,473 INFO [train.py:901] (1/4) Epoch 23, batch 6500, loss[loss=0.1952, simple_loss=0.2859, pruned_loss=0.05223, over 8322.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2867, pruned_loss=0.06106, over 1618139.84 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:01,451 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-07 06:01:13,635 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:30,735 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:32,650 INFO [train.py:901] (1/4) Epoch 23, batch 6550, loss[loss=0.2215, simple_loss=0.3073, pruned_loss=0.0679, over 8349.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2857, pruned_loss=0.0606, over 1616919.92 frames. ], batch size: 26, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:34,218 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:48,099 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.251e+02 2.720e+02 3.518e+02 7.175e+02, threshold=5.440e+02, percent-clipped=6.0 +2023-02-07 06:01:51,594 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 06:02:00,074 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:02:07,271 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1496, 2.1906, 2.2651, 1.7231, 2.4306, 1.8683, 1.7796, 2.0671], + device='cuda:1'), covar=tensor([0.0574, 0.0384, 0.0269, 0.0579, 0.0393, 0.0570, 0.0653, 0.0397], + device='cuda:1'), in_proj_covar=tensor([0.0455, 0.0396, 0.0349, 0.0448, 0.0381, 0.0536, 0.0392, 0.0425], + device='cuda:1'), out_proj_covar=tensor([1.2144e-04, 1.0353e-04, 9.1555e-05, 1.1783e-04, 1.0023e-04, 1.5082e-04, + 1.0563e-04, 1.1218e-04], device='cuda:1') +2023-02-07 06:02:09,826 INFO [train.py:901] (1/4) Epoch 23, batch 6600, loss[loss=0.1896, simple_loss=0.2697, pruned_loss=0.05479, over 7661.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2869, pruned_loss=0.06078, over 1616437.31 frames. ], batch size: 19, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:09,857 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 06:02:20,774 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 06:02:42,918 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 06:02:45,212 INFO [train.py:901] (1/4) Epoch 23, batch 6650, loss[loss=0.1949, simple_loss=0.2858, pruned_loss=0.05204, over 8239.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2862, pruned_loss=0.06006, over 1618275.49 frames. ], batch size: 24, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:52,260 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9396, 1.6189, 3.3500, 1.5583, 2.3874, 3.6807, 3.8249, 3.1320], + device='cuda:1'), covar=tensor([0.1228, 0.1736, 0.0360, 0.2163, 0.1106, 0.0247, 0.0550, 0.0600], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0320, 0.0286, 0.0315, 0.0313, 0.0269, 0.0425, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:03:00,392 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.187e+02 2.636e+02 3.150e+02 7.164e+02, threshold=5.273e+02, percent-clipped=1.0 +2023-02-07 06:03:06,015 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:21,185 INFO [train.py:901] (1/4) Epoch 23, batch 6700, loss[loss=0.2408, simple_loss=0.3245, pruned_loss=0.07856, over 8189.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.287, pruned_loss=0.06003, over 1623202.07 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:22,772 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:56,972 INFO [train.py:901] (1/4) Epoch 23, batch 6750, loss[loss=0.1999, simple_loss=0.291, pruned_loss=0.05444, over 8203.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2869, pruned_loss=0.06013, over 1623108.13 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:57,214 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184575.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:11,515 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.299e+02 2.705e+02 3.689e+02 1.087e+03, threshold=5.410e+02, percent-clipped=6.0 +2023-02-07 06:04:14,464 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:30,938 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 06:04:32,276 INFO [train.py:901] (1/4) Epoch 23, batch 6800, loss[loss=0.2275, simple_loss=0.3033, pruned_loss=0.0759, over 7658.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2861, pruned_loss=0.06006, over 1618830.86 frames. ], batch size: 19, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:04:55,642 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 06:05:08,931 INFO [train.py:901] (1/4) Epoch 23, batch 6850, loss[loss=0.1811, simple_loss=0.2633, pruned_loss=0.04938, over 7688.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.287, pruned_loss=0.06078, over 1619308.97 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:05:19,305 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 06:05:23,525 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.645e+02 3.100e+02 4.179e+02 7.238e+02, threshold=6.201e+02, percent-clipped=8.0 +2023-02-07 06:05:40,684 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184721.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:05:43,472 INFO [train.py:901] (1/4) Epoch 23, batch 6900, loss[loss=0.2183, simple_loss=0.2921, pruned_loss=0.07225, over 7972.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2865, pruned_loss=0.06091, over 1618254.29 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:21,332 INFO [train.py:901] (1/4) Epoch 23, batch 6950, loss[loss=0.1837, simple_loss=0.2712, pruned_loss=0.04806, over 8248.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2859, pruned_loss=0.06024, over 1616417.43 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:27,134 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:29,046 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 06:06:35,999 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.334e+02 2.863e+02 3.573e+02 6.345e+02, threshold=5.727e+02, percent-clipped=1.0 +2023-02-07 06:06:38,088 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4702, 4.4613, 3.9944, 1.8160, 4.0032, 3.9553, 4.0221, 3.9160], + device='cuda:1'), covar=tensor([0.0683, 0.0493, 0.1056, 0.4770, 0.0786, 0.1155, 0.1200, 0.0863], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0447, 0.0431, 0.0544, 0.0434, 0.0448, 0.0430, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:06:44,107 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.36 vs. limit=5.0 +2023-02-07 06:06:44,525 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184808.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:56,220 INFO [train.py:901] (1/4) Epoch 23, batch 7000, loss[loss=0.1968, simple_loss=0.2902, pruned_loss=0.05172, over 8111.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2862, pruned_loss=0.06029, over 1618721.74 frames. ], batch size: 23, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:03,938 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:12,024 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:32,118 INFO [train.py:901] (1/4) Epoch 23, batch 7050, loss[loss=0.2166, simple_loss=0.3022, pruned_loss=0.06551, over 8632.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2852, pruned_loss=0.05985, over 1615352.11 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:38,578 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:48,060 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.247e+02 2.854e+02 3.580e+02 1.056e+03, threshold=5.709e+02, percent-clipped=4.0 +2023-02-07 06:07:53,396 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.26 vs. limit=5.0 +2023-02-07 06:08:08,158 INFO [train.py:901] (1/4) Epoch 23, batch 7100, loss[loss=0.1634, simple_loss=0.2439, pruned_loss=0.04149, over 7274.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2846, pruned_loss=0.0593, over 1611506.54 frames. ], batch size: 16, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:30,163 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184957.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:34,369 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:42,479 INFO [train.py:901] (1/4) Epoch 23, batch 7150, loss[loss=0.2059, simple_loss=0.2738, pruned_loss=0.06904, over 8084.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2855, pruned_loss=0.05978, over 1611989.53 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:53,463 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1773, 2.0123, 2.4781, 2.0944, 2.4095, 2.2551, 2.0753, 1.4013], + device='cuda:1'), covar=tensor([0.5206, 0.4400, 0.1881, 0.3592, 0.2610, 0.2833, 0.1848, 0.5048], + device='cuda:1'), in_proj_covar=tensor([0.0942, 0.0991, 0.0806, 0.0950, 0.0997, 0.0896, 0.0753, 0.0829], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 06:08:58,831 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.323e+02 2.664e+02 3.243e+02 7.163e+02, threshold=5.329e+02, percent-clipped=2.0 +2023-02-07 06:09:20,376 INFO [train.py:901] (1/4) Epoch 23, batch 7200, loss[loss=0.1992, simple_loss=0.2875, pruned_loss=0.05544, over 8457.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2855, pruned_loss=0.05968, over 1615210.77 frames. ], batch size: 29, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:29,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7445, 1.4521, 1.6751, 1.3252, 0.9024, 1.4286, 1.6045, 1.3578], + device='cuda:1'), covar=tensor([0.0574, 0.1332, 0.1718, 0.1552, 0.0627, 0.1568, 0.0736, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 06:09:35,042 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9967, 1.4921, 3.4512, 1.5348, 2.4145, 3.8086, 3.9003, 3.3196], + device='cuda:1'), covar=tensor([0.1184, 0.1947, 0.0361, 0.2101, 0.1125, 0.0227, 0.0534, 0.0528], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0323, 0.0287, 0.0315, 0.0314, 0.0271, 0.0426, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:09:35,778 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8135, 1.7558, 2.4346, 1.5608, 1.3094, 2.3993, 0.4241, 1.4573], + device='cuda:1'), covar=tensor([0.1598, 0.1205, 0.0350, 0.1331, 0.2780, 0.0416, 0.2165, 0.1546], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0195, 0.0129, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 06:09:44,880 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9490, 1.5007, 1.7956, 1.3050, 1.0488, 1.4556, 1.8353, 1.5230], + device='cuda:1'), covar=tensor([0.0523, 0.1196, 0.1634, 0.1471, 0.0590, 0.1495, 0.0651, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 06:09:44,924 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7137, 1.6993, 2.3014, 1.5079, 1.2196, 2.2867, 0.4099, 1.4273], + device='cuda:1'), covar=tensor([0.1628, 0.1173, 0.0376, 0.1188, 0.2851, 0.0365, 0.1966, 0.1424], + device='cuda:1'), in_proj_covar=tensor([0.0190, 0.0196, 0.0129, 0.0220, 0.0268, 0.0136, 0.0169, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 06:09:54,644 INFO [train.py:901] (1/4) Epoch 23, batch 7250, loss[loss=0.2406, simple_loss=0.3258, pruned_loss=0.07773, over 8502.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05985, over 1614865.45 frames. ], batch size: 26, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:56,904 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:06,366 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:09,704 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.341e+02 2.701e+02 3.625e+02 6.528e+02, threshold=5.401e+02, percent-clipped=8.0 +2023-02-07 06:10:25,009 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:30,397 INFO [train.py:901] (1/4) Epoch 23, batch 7300, loss[loss=0.1876, simple_loss=0.2812, pruned_loss=0.04704, over 8251.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06008, over 1610375.54 frames. ], batch size: 24, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:10:45,741 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5230, 2.3684, 3.0993, 2.4997, 2.9458, 2.5377, 2.3520, 1.9366], + device='cuda:1'), covar=tensor([0.5090, 0.4786, 0.2026, 0.3755, 0.2522, 0.2778, 0.1749, 0.5136], + device='cuda:1'), in_proj_covar=tensor([0.0939, 0.0986, 0.0805, 0.0948, 0.0995, 0.0896, 0.0751, 0.0826], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 06:10:46,962 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:04,728 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.08 vs. limit=5.0 +2023-02-07 06:11:06,505 INFO [train.py:901] (1/4) Epoch 23, batch 7350, loss[loss=0.2055, simple_loss=0.2899, pruned_loss=0.06057, over 8635.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2858, pruned_loss=0.05991, over 1616735.45 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:11:19,727 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 06:11:21,058 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.380e+02 2.863e+02 3.556e+02 7.708e+02, threshold=5.726e+02, percent-clipped=6.0 +2023-02-07 06:11:38,637 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6533, 1.6886, 2.3991, 1.5299, 1.2162, 2.3436, 0.5158, 1.4442], + device='cuda:1'), covar=tensor([0.1707, 0.1274, 0.0344, 0.1312, 0.2808, 0.0412, 0.2041, 0.1340], + device='cuda:1'), in_proj_covar=tensor([0.0189, 0.0195, 0.0128, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 06:11:38,642 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:41,242 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 06:11:42,633 INFO [train.py:901] (1/4) Epoch 23, batch 7400, loss[loss=0.175, simple_loss=0.2569, pruned_loss=0.04661, over 8085.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2863, pruned_loss=0.06027, over 1616151.81 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:11:44,813 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185228.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:48,317 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6578, 1.5113, 3.0894, 1.4970, 2.2216, 3.3590, 3.5193, 2.8467], + device='cuda:1'), covar=tensor([0.1383, 0.1849, 0.0369, 0.2107, 0.1044, 0.0277, 0.0500, 0.0583], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0322, 0.0287, 0.0315, 0.0314, 0.0269, 0.0424, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:11:56,709 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:04,423 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 06:12:18,690 INFO [train.py:901] (1/4) Epoch 23, batch 7450, loss[loss=0.1622, simple_loss=0.2408, pruned_loss=0.04181, over 7412.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.0599, over 1615747.14 frames. ], batch size: 17, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:12:18,941 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4540, 1.8399, 2.6113, 1.3287, 1.9419, 1.8182, 1.5751, 1.9613], + device='cuda:1'), covar=tensor([0.2072, 0.2595, 0.0968, 0.4695, 0.2018, 0.3479, 0.2423, 0.2358], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0611, 0.0554, 0.0646, 0.0647, 0.0595, 0.0541, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:12:21,573 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 06:12:33,469 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.310e+02 2.954e+02 3.827e+02 6.869e+02, threshold=5.908e+02, percent-clipped=4.0 +2023-02-07 06:12:37,076 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:53,819 INFO [train.py:901] (1/4) Epoch 23, batch 7500, loss[loss=0.19, simple_loss=0.2841, pruned_loss=0.04794, over 8770.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2854, pruned_loss=0.05984, over 1614843.10 frames. ], batch size: 30, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:05,839 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 06:13:08,257 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185343.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:13:22,311 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7490, 1.9051, 1.6520, 2.3197, 0.9102, 1.4426, 1.6913, 1.8536], + device='cuda:1'), covar=tensor([0.0748, 0.0801, 0.0925, 0.0455, 0.1164, 0.1342, 0.0767, 0.0722], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0199, 0.0244, 0.0214, 0.0206, 0.0247, 0.0250, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 06:13:31,741 INFO [train.py:901] (1/4) Epoch 23, batch 7550, loss[loss=0.2063, simple_loss=0.2948, pruned_loss=0.05887, over 8135.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.284, pruned_loss=0.05922, over 1610059.33 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:39,222 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 06:13:46,072 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.461e+02 3.059e+02 3.860e+02 7.244e+02, threshold=6.118e+02, percent-clipped=3.0 +2023-02-07 06:14:00,488 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:04,572 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:06,594 INFO [train.py:901] (1/4) Epoch 23, batch 7600, loss[loss=0.1907, simple_loss=0.2864, pruned_loss=0.0475, over 8528.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.05997, over 1614843.32 frames. ], batch size: 28, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:09,580 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 06:14:41,950 INFO [train.py:901] (1/4) Epoch 23, batch 7650, loss[loss=0.2261, simple_loss=0.3109, pruned_loss=0.07067, over 8187.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2875, pruned_loss=0.06126, over 1612492.98 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:50,197 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:54,393 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:57,808 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.478e+02 3.100e+02 3.999e+02 8.387e+02, threshold=6.200e+02, percent-clipped=6.0 +2023-02-07 06:15:17,431 INFO [train.py:901] (1/4) Epoch 23, batch 7700, loss[loss=0.1932, simple_loss=0.2853, pruned_loss=0.05052, over 8484.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2879, pruned_loss=0.06161, over 1614391.73 frames. ], batch size: 26, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:15:25,723 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:15:37,332 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 06:15:53,097 INFO [train.py:901] (1/4) Epoch 23, batch 7750, loss[loss=0.1728, simple_loss=0.2469, pruned_loss=0.04935, over 7234.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.287, pruned_loss=0.06136, over 1613260.83 frames. ], batch size: 16, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:16:08,174 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.336e+02 2.905e+02 3.607e+02 6.527e+02, threshold=5.810e+02, percent-clipped=2.0 +2023-02-07 06:16:10,501 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:15,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,328 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,832 INFO [train.py:901] (1/4) Epoch 23, batch 7800, loss[loss=0.1781, simple_loss=0.2598, pruned_loss=0.04818, over 7632.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2879, pruned_loss=0.06148, over 1614276.97 frames. ], batch size: 19, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:01,061 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185672.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:02,897 INFO [train.py:901] (1/4) Epoch 23, batch 7850, loss[loss=0.1878, simple_loss=0.277, pruned_loss=0.04929, over 8598.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2867, pruned_loss=0.06068, over 1615966.77 frames. ], batch size: 39, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:17,284 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 2.983e+02 3.607e+02 9.941e+02, threshold=5.966e+02, percent-clipped=5.0 +2023-02-07 06:17:18,191 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185697.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:37,220 INFO [train.py:901] (1/4) Epoch 23, batch 7900, loss[loss=0.1766, simple_loss=0.2617, pruned_loss=0.04569, over 8249.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06074, over 1616953.20 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:00,629 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 06:18:11,079 INFO [train.py:901] (1/4) Epoch 23, batch 7950, loss[loss=0.1852, simple_loss=0.2641, pruned_loss=0.05313, over 7937.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.06067, over 1614272.16 frames. ], batch size: 20, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:12,602 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185777.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:12,816 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-07 06:18:23,319 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:25,078 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.254e+02 2.775e+02 3.427e+02 8.244e+02, threshold=5.550e+02, percent-clipped=2.0 +2023-02-07 06:18:39,377 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185817.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:18:40,124 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:44,666 INFO [train.py:901] (1/4) Epoch 23, batch 8000, loss[loss=0.1813, simple_loss=0.2597, pruned_loss=0.0514, over 7243.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06111, over 1611184.98 frames. ], batch size: 16, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:48,042 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:09,858 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:18,259 INFO [train.py:901] (1/4) Epoch 23, batch 8050, loss[loss=0.1906, simple_loss=0.2659, pruned_loss=0.0577, over 7186.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2847, pruned_loss=0.06101, over 1595863.28 frames. ], batch size: 16, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:19:26,748 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:32,782 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.662e+02 3.318e+02 4.159e+02 9.358e+02, threshold=6.635e+02, percent-clipped=7.0 +2023-02-07 06:19:52,111 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 06:19:57,779 INFO [train.py:901] (1/4) Epoch 24, batch 0, loss[loss=0.1897, simple_loss=0.2687, pruned_loss=0.05531, over 7649.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2687, pruned_loss=0.05531, over 7649.00 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:19:57,779 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 06:20:01,759 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6080, 1.3301, 1.6010, 1.3133, 0.9141, 1.3521, 1.5892, 1.1963], + device='cuda:1'), covar=tensor([0.0662, 0.1378, 0.1785, 0.1571, 0.0646, 0.1592, 0.0714, 0.0755], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 06:20:09,069 INFO [train.py:935] (1/4) Epoch 24, validation: loss=0.1731, simple_loss=0.2733, pruned_loss=0.03644, over 944034.00 frames. +2023-02-07 06:20:09,070 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 06:20:23,906 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 06:20:35,523 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:20:44,023 INFO [train.py:901] (1/4) Epoch 24, batch 50, loss[loss=0.2098, simple_loss=0.2839, pruned_loss=0.06782, over 7654.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2939, pruned_loss=0.06255, over 370899.98 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:20:57,549 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 06:21:11,398 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.437e+02 2.851e+02 3.663e+02 1.155e+03, threshold=5.702e+02, percent-clipped=3.0 +2023-02-07 06:21:20,552 INFO [train.py:901] (1/4) Epoch 24, batch 100, loss[loss=0.2199, simple_loss=0.3021, pruned_loss=0.06883, over 8349.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2916, pruned_loss=0.06198, over 649948.09 frames. ], batch size: 24, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:21:22,592 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 06:21:41,726 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4151, 1.4028, 4.5939, 1.7490, 4.1582, 3.8287, 4.2037, 4.0903], + device='cuda:1'), covar=tensor([0.0499, 0.4783, 0.0467, 0.3939, 0.0895, 0.0935, 0.0500, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0645, 0.0655, 0.0708, 0.0641, 0.0720, 0.0620, 0.0615, 0.0690], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:21:56,098 INFO [train.py:901] (1/4) Epoch 24, batch 150, loss[loss=0.1889, simple_loss=0.2749, pruned_loss=0.05145, over 8599.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2858, pruned_loss=0.05925, over 864495.97 frames. ], batch size: 39, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:00,569 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.61 vs. limit=5.0 +2023-02-07 06:22:03,342 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 06:22:07,742 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8248, 2.0346, 2.2324, 1.4549, 2.3485, 1.6933, 0.6974, 2.0159], + device='cuda:1'), covar=tensor([0.0645, 0.0396, 0.0300, 0.0624, 0.0441, 0.0836, 0.0976, 0.0321], + device='cuda:1'), in_proj_covar=tensor([0.0462, 0.0402, 0.0354, 0.0455, 0.0385, 0.0545, 0.0398, 0.0428], + device='cuda:1'), out_proj_covar=tensor([1.2331e-04, 1.0515e-04, 9.2994e-05, 1.1942e-04, 1.0126e-04, 1.5330e-04, + 1.0710e-04, 1.1294e-04], device='cuda:1') +2023-02-07 06:22:21,897 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.384e+02 2.880e+02 3.401e+02 7.597e+02, threshold=5.761e+02, percent-clipped=1.0 +2023-02-07 06:22:30,264 INFO [train.py:901] (1/4) Epoch 24, batch 200, loss[loss=0.1902, simple_loss=0.2838, pruned_loss=0.04834, over 8551.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2854, pruned_loss=0.05927, over 1028505.80 frames. ], batch size: 31, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:35,401 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4164, 2.0902, 3.1604, 2.0614, 2.7574, 3.5444, 3.4468, 3.2266], + device='cuda:1'), covar=tensor([0.0896, 0.1479, 0.0564, 0.1750, 0.1489, 0.0218, 0.0595, 0.0416], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0317, 0.0314, 0.0269, 0.0426, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:22:38,161 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:22:40,033 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:01,364 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 06:23:05,569 INFO [train.py:901] (1/4) Epoch 24, batch 250, loss[loss=0.2393, simple_loss=0.315, pruned_loss=0.08182, over 8242.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2854, pruned_loss=0.05915, over 1163397.34 frames. ], batch size: 24, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:07,696 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186161.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 06:23:16,524 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 06:23:18,803 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186176.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:25,615 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 06:23:32,345 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.402e+02 3.098e+02 3.972e+02 8.418e+02, threshold=6.197e+02, percent-clipped=5.0 +2023-02-07 06:23:36,038 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186201.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:40,511 INFO [train.py:901] (1/4) Epoch 24, batch 300, loss[loss=0.2042, simple_loss=0.2861, pruned_loss=0.06112, over 8774.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2852, pruned_loss=0.05912, over 1263739.60 frames. ], batch size: 30, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:53,001 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,862 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 06:24:13,919 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6153, 2.6918, 2.0325, 2.5115, 2.2200, 1.8442, 2.2762, 2.2916], + device='cuda:1'), covar=tensor([0.1539, 0.0423, 0.1191, 0.0600, 0.0773, 0.1454, 0.0995, 0.0979], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0235, 0.0337, 0.0311, 0.0303, 0.0342, 0.0350, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:24:15,080 INFO [train.py:901] (1/4) Epoch 24, batch 350, loss[loss=0.2069, simple_loss=0.2975, pruned_loss=0.0581, over 8451.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.0589, over 1342667.08 frames. ], batch size: 25, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:24:24,623 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6764, 4.6407, 4.1935, 2.0638, 4.1422, 4.2792, 4.2356, 4.1756], + device='cuda:1'), covar=tensor([0.0635, 0.0506, 0.0926, 0.4400, 0.0785, 0.0935, 0.1076, 0.0737], + device='cuda:1'), in_proj_covar=tensor([0.0526, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:24:28,144 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186276.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:24:28,714 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:42,181 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.428e+02 2.971e+02 3.348e+02 5.777e+02, threshold=5.941e+02, percent-clipped=0.0 +2023-02-07 06:24:47,691 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.3111, 3.1805, 2.9791, 1.6594, 2.9085, 2.9583, 2.8925, 2.8177], + device='cuda:1'), covar=tensor([0.1050, 0.0766, 0.1289, 0.4198, 0.1167, 0.1277, 0.1500, 0.1144], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:24:50,336 INFO [train.py:901] (1/4) Epoch 24, batch 400, loss[loss=0.1994, simple_loss=0.2641, pruned_loss=0.06733, over 8113.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2853, pruned_loss=0.05955, over 1404274.69 frames. ], batch size: 23, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:26,091 INFO [train.py:901] (1/4) Epoch 24, batch 450, loss[loss=0.2029, simple_loss=0.2915, pruned_loss=0.05708, over 8509.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2863, pruned_loss=0.06026, over 1452744.95 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:52,934 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.487e+02 2.919e+02 3.580e+02 7.824e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-07 06:26:02,029 INFO [train.py:901] (1/4) Epoch 24, batch 500, loss[loss=0.2174, simple_loss=0.3034, pruned_loss=0.06575, over 8511.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2862, pruned_loss=0.06024, over 1489494.38 frames. ], batch size: 28, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:23,342 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:37,816 INFO [train.py:901] (1/4) Epoch 24, batch 550, loss[loss=0.2164, simple_loss=0.3047, pruned_loss=0.0641, over 8329.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2863, pruned_loss=0.06041, over 1518052.86 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:40,765 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:55,561 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7653, 1.9327, 2.1133, 1.2759, 2.2192, 1.6391, 0.6881, 1.9832], + device='cuda:1'), covar=tensor([0.0652, 0.0397, 0.0299, 0.0653, 0.0463, 0.0956, 0.0943, 0.0332], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0399, 0.0351, 0.0449, 0.0382, 0.0539, 0.0393, 0.0426], + device='cuda:1'), out_proj_covar=tensor([1.2231e-04, 1.0434e-04, 9.2115e-05, 1.1794e-04, 1.0032e-04, 1.5167e-04, + 1.0568e-04, 1.1243e-04], device='cuda:1') +2023-02-07 06:27:01,112 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:01,711 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:03,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.350e+02 3.005e+02 3.846e+02 7.955e+02, threshold=6.011e+02, percent-clipped=1.0 +2023-02-07 06:27:12,588 INFO [train.py:901] (1/4) Epoch 24, batch 600, loss[loss=0.2432, simple_loss=0.3134, pruned_loss=0.08652, over 8032.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06188, over 1542699.89 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 16.0 +2023-02-07 06:27:19,620 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:21,528 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186520.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:23,334 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-07 06:27:26,202 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 06:27:29,790 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186532.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:31,744 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:40,305 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7301, 1.3971, 2.8387, 1.4059, 2.1846, 3.0338, 3.1378, 2.5880], + device='cuda:1'), covar=tensor([0.1134, 0.1738, 0.0385, 0.2104, 0.0974, 0.0295, 0.0749, 0.0594], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0314, 0.0312, 0.0268, 0.0424, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:27:46,480 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186557.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:46,550 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186557.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:47,027 INFO [train.py:901] (1/4) Epoch 24, batch 650, loss[loss=0.2038, simple_loss=0.293, pruned_loss=0.05733, over 8524.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2876, pruned_loss=0.06159, over 1557437.52 frames. ], batch size: 39, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:27:52,102 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9108, 1.7060, 3.5669, 1.5993, 2.4876, 3.9185, 3.9817, 3.3591], + device='cuda:1'), covar=tensor([0.1270, 0.1691, 0.0299, 0.1996, 0.0989, 0.0209, 0.0505, 0.0516], + device='cuda:1'), in_proj_covar=tensor([0.0296, 0.0321, 0.0285, 0.0314, 0.0311, 0.0268, 0.0423, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:28:01,234 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:07,495 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:13,142 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3225, 1.7941, 3.3775, 1.4486, 2.4281, 3.7346, 3.8406, 3.2023], + device='cuda:1'), covar=tensor([0.0979, 0.1695, 0.0407, 0.2237, 0.1215, 0.0241, 0.0534, 0.0557], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0320, 0.0284, 0.0312, 0.0310, 0.0267, 0.0422, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:28:15,661 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.377e+02 2.753e+02 3.513e+02 8.271e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 06:28:20,753 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:23,404 INFO [train.py:901] (1/4) Epoch 24, batch 700, loss[loss=0.2047, simple_loss=0.2888, pruned_loss=0.0603, over 8336.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2877, pruned_loss=0.06114, over 1575035.22 frames. ], batch size: 25, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:28:24,190 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2115, 4.1733, 3.8090, 1.9490, 3.7234, 3.7549, 3.7227, 3.6234], + device='cuda:1'), covar=tensor([0.0747, 0.0560, 0.0953, 0.4448, 0.0866, 0.0972, 0.1247, 0.0778], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0446, 0.0432, 0.0543, 0.0430, 0.0448, 0.0427, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:28:33,222 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186621.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:43,730 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186635.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:59,666 INFO [train.py:901] (1/4) Epoch 24, batch 750, loss[loss=0.203, simple_loss=0.2872, pruned_loss=0.05935, over 7819.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2867, pruned_loss=0.06084, over 1582556.48 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:00,494 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5975, 1.9030, 2.0335, 1.1351, 2.1499, 1.3492, 0.7027, 1.7871], + device='cuda:1'), covar=tensor([0.0766, 0.0416, 0.0314, 0.0740, 0.0463, 0.1098, 0.1039, 0.0420], + device='cuda:1'), in_proj_covar=tensor([0.0457, 0.0397, 0.0350, 0.0447, 0.0379, 0.0536, 0.0391, 0.0425], + device='cuda:1'), out_proj_covar=tensor([1.2187e-04, 1.0376e-04, 9.1745e-05, 1.1728e-04, 9.9550e-05, 1.5077e-04, + 1.0520e-04, 1.1211e-04], device='cuda:1') +2023-02-07 06:29:11,910 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 06:29:16,223 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1752, 1.6548, 4.6794, 1.8964, 3.7272, 3.7608, 4.1681, 4.1771], + device='cuda:1'), covar=tensor([0.1376, 0.6218, 0.0931, 0.5071, 0.2129, 0.1573, 0.1023, 0.0964], + device='cuda:1'), in_proj_covar=tensor([0.0645, 0.0653, 0.0707, 0.0637, 0.0721, 0.0620, 0.0613, 0.0689], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:29:21,537 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 06:29:27,090 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.532e+02 3.077e+02 4.008e+02 9.294e+02, threshold=6.153e+02, percent-clipped=8.0 +2023-02-07 06:29:31,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 06:29:35,745 INFO [train.py:901] (1/4) Epoch 24, batch 800, loss[loss=0.2095, simple_loss=0.3068, pruned_loss=0.05609, over 8336.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.06029, over 1593749.28 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:48,932 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:29:56,176 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:11,904 INFO [train.py:901] (1/4) Epoch 24, batch 850, loss[loss=0.2107, simple_loss=0.2951, pruned_loss=0.06318, over 7808.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2864, pruned_loss=0.06003, over 1602509.93 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:30:29,493 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:39,064 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.330e+02 2.764e+02 3.350e+02 7.186e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 06:30:47,640 INFO [train.py:901] (1/4) Epoch 24, batch 900, loss[loss=0.1862, simple_loss=0.2754, pruned_loss=0.0485, over 8451.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2864, pruned_loss=0.05995, over 1608600.99 frames. ], batch size: 27, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:05,961 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:08,582 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:15,813 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6779, 2.4804, 3.2731, 2.5411, 3.2792, 2.7249, 2.5848, 2.0354], + device='cuda:1'), covar=tensor([0.5037, 0.4628, 0.1813, 0.4010, 0.2520, 0.2844, 0.1709, 0.5303], + device='cuda:1'), in_proj_covar=tensor([0.0948, 0.0992, 0.0812, 0.0958, 0.0998, 0.0901, 0.0756, 0.0830], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 06:31:24,266 INFO [train.py:901] (1/4) Epoch 24, batch 950, loss[loss=0.2508, simple_loss=0.3207, pruned_loss=0.09051, over 7269.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.05993, over 1604127.00 frames. ], batch size: 73, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:24,446 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:24,508 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:30,139 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2437, 1.5897, 1.7675, 1.4674, 1.0728, 1.5903, 1.8647, 1.9599], + device='cuda:1'), covar=tensor([0.0495, 0.1215, 0.1688, 0.1418, 0.0614, 0.1455, 0.0674, 0.0574], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 06:31:39,987 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186879.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:43,467 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 06:31:48,341 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186891.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:50,396 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3348, 2.1360, 1.7052, 1.9953, 1.7304, 1.4729, 1.7370, 1.7168], + device='cuda:1'), covar=tensor([0.1179, 0.0383, 0.1186, 0.0499, 0.0733, 0.1506, 0.0902, 0.0776], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0310, 0.0301, 0.0341, 0.0346, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:31:52,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.324e+02 2.850e+02 3.567e+02 7.043e+02, threshold=5.700e+02, percent-clipped=2.0 +2023-02-07 06:31:53,138 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:55,084 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:59,853 INFO [train.py:901] (1/4) Epoch 24, batch 1000, loss[loss=0.2115, simple_loss=0.3002, pruned_loss=0.0614, over 8350.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2854, pruned_loss=0.06007, over 1604905.86 frames. ], batch size: 24, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:32:05,643 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,390 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,466 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:19,246 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5819, 2.6017, 1.7864, 2.3258, 2.1592, 1.5789, 2.1467, 2.2453], + device='cuda:1'), covar=tensor([0.1761, 0.0500, 0.1409, 0.0769, 0.0858, 0.1653, 0.1153, 0.1239], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0235, 0.0336, 0.0310, 0.0301, 0.0341, 0.0347, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:32:20,501 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 06:32:29,428 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186948.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:32,162 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:33,326 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 06:32:36,111 INFO [train.py:901] (1/4) Epoch 24, batch 1050, loss[loss=0.1819, simple_loss=0.2589, pruned_loss=0.0525, over 7805.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2852, pruned_loss=0.06012, over 1607049.31 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:00,036 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7837, 1.4876, 3.9751, 1.5258, 3.5144, 3.2896, 3.6111, 3.4901], + device='cuda:1'), covar=tensor([0.0717, 0.4510, 0.0742, 0.4123, 0.1310, 0.1151, 0.0672, 0.0759], + device='cuda:1'), in_proj_covar=tensor([0.0642, 0.0649, 0.0703, 0.0634, 0.0718, 0.0618, 0.0610, 0.0686], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:33:01,540 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186992.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:02,866 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:04,734 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.475e+02 2.949e+02 3.829e+02 9.793e+02, threshold=5.897e+02, percent-clipped=8.0 +2023-02-07 06:33:12,436 INFO [train.py:901] (1/4) Epoch 24, batch 1100, loss[loss=0.1785, simple_loss=0.2656, pruned_loss=0.04566, over 7555.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.06051, over 1613073.41 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:12,699 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5579, 2.2559, 4.0425, 1.4382, 2.9064, 2.0432, 1.8306, 2.8512], + device='cuda:1'), covar=tensor([0.2087, 0.2779, 0.0828, 0.4962, 0.1913, 0.3443, 0.2493, 0.2553], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0615, 0.0556, 0.0650, 0.0653, 0.0600, 0.0544, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:33:18,326 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:19,043 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:37,509 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:38,291 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:45,711 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 06:33:47,123 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:48,353 INFO [train.py:901] (1/4) Epoch 24, batch 1150, loss[loss=0.1961, simple_loss=0.288, pruned_loss=0.05211, over 8493.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2871, pruned_loss=0.06098, over 1613117.44 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:49,926 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9199, 1.5305, 3.3764, 1.4808, 2.4006, 3.6290, 3.7449, 3.1037], + device='cuda:1'), covar=tensor([0.1179, 0.1771, 0.0307, 0.1964, 0.0967, 0.0231, 0.0569, 0.0526], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0324, 0.0288, 0.0317, 0.0315, 0.0270, 0.0427, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:33:52,032 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:57,505 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187071.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:34:02,538 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0459, 1.6086, 1.4163, 1.5556, 1.3269, 1.3107, 1.3242, 1.3064], + device='cuda:1'), covar=tensor([0.1052, 0.0492, 0.1266, 0.0545, 0.0737, 0.1438, 0.0872, 0.0801], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0309, 0.0301, 0.0341, 0.0346, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:34:16,148 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.205e+02 2.742e+02 3.279e+02 6.267e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 06:34:24,623 INFO [train.py:901] (1/4) Epoch 24, batch 1200, loss[loss=0.2356, simple_loss=0.313, pruned_loss=0.07904, over 7976.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2868, pruned_loss=0.06062, over 1617058.43 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:34:57,398 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:00,007 INFO [train.py:901] (1/4) Epoch 24, batch 1250, loss[loss=0.1936, simple_loss=0.2813, pruned_loss=0.05301, over 8088.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.06039, over 1618073.89 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:15,148 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:19,765 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187186.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:27,953 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.417e+02 2.916e+02 3.659e+02 9.833e+02, threshold=5.832e+02, percent-clipped=6.0 +2023-02-07 06:35:30,894 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187202.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:35,736 INFO [train.py:901] (1/4) Epoch 24, batch 1300, loss[loss=0.2122, simple_loss=0.3133, pruned_loss=0.05559, over 8196.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05986, over 1620177.96 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:35,964 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187208.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:51,426 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-07 06:35:53,889 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187233.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:05,770 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187250.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:10,944 INFO [train.py:901] (1/4) Epoch 24, batch 1350, loss[loss=0.2144, simple_loss=0.2953, pruned_loss=0.06676, over 8508.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05945, over 1618899.58 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:20,761 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:21,318 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:22,816 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,304 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,750 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.391e+02 3.088e+02 3.702e+02 1.176e+03, threshold=6.175e+02, percent-clipped=8.0 +2023-02-07 06:36:41,384 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187300.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:46,710 INFO [train.py:901] (1/4) Epoch 24, batch 1400, loss[loss=0.1937, simple_loss=0.2675, pruned_loss=0.05993, over 7693.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2854, pruned_loss=0.05979, over 1620245.70 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:52,960 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187317.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:54,421 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:59,292 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:03,362 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3843, 1.6197, 2.0963, 1.3456, 1.4975, 1.6665, 1.4953, 1.4287], + device='cuda:1'), covar=tensor([0.2010, 0.2409, 0.0973, 0.4487, 0.1923, 0.3424, 0.2420, 0.2215], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0617, 0.0558, 0.0652, 0.0653, 0.0600, 0.0545, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:37:05,353 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5948, 2.4761, 1.7855, 2.2578, 2.0325, 1.5266, 2.0604, 2.0741], + device='cuda:1'), covar=tensor([0.1537, 0.0421, 0.1232, 0.0624, 0.0785, 0.1563, 0.1062, 0.0967], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0233, 0.0335, 0.0308, 0.0300, 0.0338, 0.0346, 0.0316], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:37:12,877 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187344.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:16,708 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 06:37:21,717 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 06:37:22,426 INFO [train.py:901] (1/4) Epoch 24, batch 1450, loss[loss=0.2103, simple_loss=0.3023, pruned_loss=0.05909, over 8514.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06089, over 1620532.02 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:37:42,314 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:43,074 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:44,410 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:49,534 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.482e+02 2.870e+02 4.012e+02 8.494e+02, threshold=5.740e+02, percent-clipped=8.0 +2023-02-07 06:37:51,823 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:56,995 INFO [train.py:901] (1/4) Epoch 24, batch 1500, loss[loss=0.1731, simple_loss=0.2562, pruned_loss=0.04499, over 8247.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.06075, over 1618739.97 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:22,159 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:33,246 INFO [train.py:901] (1/4) Epoch 24, batch 1550, loss[loss=0.194, simple_loss=0.286, pruned_loss=0.05103, over 8515.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06076, over 1620008.29 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:39,573 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:59,897 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.355e+02 2.764e+02 3.622e+02 7.454e+02, threshold=5.529e+02, percent-clipped=4.0 +2023-02-07 06:39:02,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:06,539 INFO [train.py:901] (1/4) Epoch 24, batch 1600, loss[loss=0.1736, simple_loss=0.2535, pruned_loss=0.04684, over 7715.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2872, pruned_loss=0.06059, over 1626001.38 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:11,516 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:22,051 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:27,624 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:30,381 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:41,438 INFO [train.py:901] (1/4) Epoch 24, batch 1650, loss[loss=0.2043, simple_loss=0.2925, pruned_loss=0.05804, over 8605.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2867, pruned_loss=0.06026, over 1626277.44 frames. ], batch size: 34, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:52,523 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:09,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.451e+02 2.921e+02 3.516e+02 7.853e+02, threshold=5.842e+02, percent-clipped=7.0 +2023-02-07 06:40:09,874 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187598.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:10,449 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9713, 1.3647, 4.4897, 2.1381, 2.4842, 5.0956, 5.1985, 4.4473], + device='cuda:1'), covar=tensor([0.1279, 0.1900, 0.0240, 0.1861, 0.1127, 0.0171, 0.0346, 0.0513], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0319, 0.0283, 0.0312, 0.0311, 0.0267, 0.0422, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:40:16,433 INFO [train.py:901] (1/4) Epoch 24, batch 1700, loss[loss=0.197, simple_loss=0.2829, pruned_loss=0.05555, over 8357.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2865, pruned_loss=0.06051, over 1623379.89 frames. ], batch size: 24, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:40,843 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187644.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:50,898 INFO [train.py:901] (1/4) Epoch 24, batch 1750, loss[loss=0.2273, simple_loss=0.307, pruned_loss=0.07378, over 8443.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06105, over 1619705.38 frames. ], batch size: 49, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:58,584 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:18,565 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.502e+02 3.000e+02 3.757e+02 9.885e+02, threshold=5.999e+02, percent-clipped=2.0 +2023-02-07 06:41:25,257 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 06:41:26,166 INFO [train.py:901] (1/4) Epoch 24, batch 1800, loss[loss=0.1809, simple_loss=0.2554, pruned_loss=0.05313, over 7521.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2863, pruned_loss=0.06063, over 1621016.04 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:43,267 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:59,216 INFO [train.py:901] (1/4) Epoch 24, batch 1850, loss[loss=0.2222, simple_loss=0.2868, pruned_loss=0.07878, over 7517.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06011, over 1618933.64 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:59,466 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:09,326 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:14,901 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6588, 1.4936, 2.8698, 1.4547, 2.2493, 3.1000, 3.2173, 2.6991], + device='cuda:1'), covar=tensor([0.1157, 0.1534, 0.0377, 0.2034, 0.0889, 0.0297, 0.0612, 0.0540], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0319, 0.0284, 0.0312, 0.0312, 0.0267, 0.0423, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:42:18,477 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:27,424 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:28,603 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 2.492e+02 2.912e+02 4.002e+02 8.326e+02, threshold=5.824e+02, percent-clipped=6.0 +2023-02-07 06:42:36,389 INFO [train.py:901] (1/4) Epoch 24, batch 1900, loss[loss=0.1979, simple_loss=0.2733, pruned_loss=0.06125, over 7811.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2851, pruned_loss=0.05991, over 1618968.41 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:42:42,667 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5705, 5.5813, 4.9133, 2.4566, 4.9621, 5.3877, 5.1912, 5.2402], + device='cuda:1'), covar=tensor([0.0580, 0.0413, 0.0923, 0.4780, 0.0822, 0.0964, 0.1067, 0.0649], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0543, 0.0435, 0.0448, 0.0428, 0.0391], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:42:49,918 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-07 06:43:02,032 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 06:43:05,644 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 06:43:05,819 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:11,794 INFO [train.py:901] (1/4) Epoch 24, batch 1950, loss[loss=0.1608, simple_loss=0.2496, pruned_loss=0.03595, over 8035.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.06002, over 1620711.10 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:43:18,398 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 06:43:22,598 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:27,989 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:30,734 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:39,199 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 2.375e+02 2.745e+02 3.412e+02 6.105e+02, threshold=5.491e+02, percent-clipped=1.0 +2023-02-07 06:43:39,235 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 06:43:42,665 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3984, 1.7349, 1.8149, 1.0751, 1.8579, 1.3328, 0.4300, 1.6412], + device='cuda:1'), covar=tensor([0.0768, 0.0449, 0.0422, 0.0708, 0.0565, 0.1179, 0.1082, 0.0393], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0401, 0.0355, 0.0451, 0.0385, 0.0541, 0.0394, 0.0426], + device='cuda:1'), out_proj_covar=tensor([1.2231e-04, 1.0479e-04, 9.3321e-05, 1.1851e-04, 1.0101e-04, 1.5216e-04, + 1.0605e-04, 1.1231e-04], device='cuda:1') +2023-02-07 06:43:46,287 INFO [train.py:901] (1/4) Epoch 24, batch 2000, loss[loss=0.2181, simple_loss=0.3042, pruned_loss=0.06607, over 8458.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2846, pruned_loss=0.05966, over 1619843.25 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:21,537 INFO [train.py:901] (1/4) Epoch 24, batch 2050, loss[loss=0.1955, simple_loss=0.2867, pruned_loss=0.05215, over 8473.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2847, pruned_loss=0.05924, over 1619117.00 frames. ], batch size: 29, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:23,781 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7934, 2.0104, 2.2062, 1.3173, 2.2820, 1.6658, 0.7651, 1.8995], + device='cuda:1'), covar=tensor([0.0612, 0.0379, 0.0296, 0.0667, 0.0456, 0.0940, 0.0931, 0.0365], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0400, 0.0354, 0.0450, 0.0384, 0.0539, 0.0394, 0.0425], + device='cuda:1'), out_proj_covar=tensor([1.2219e-04, 1.0450e-04, 9.3072e-05, 1.1817e-04, 1.0087e-04, 1.5174e-04, + 1.0582e-04, 1.1198e-04], device='cuda:1') +2023-02-07 06:44:42,289 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:46,914 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:48,823 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.387e+02 2.958e+02 3.531e+02 6.524e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 06:44:51,434 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:56,709 INFO [train.py:901] (1/4) Epoch 24, batch 2100, loss[loss=0.2311, simple_loss=0.3025, pruned_loss=0.0798, over 8506.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05977, over 1619082.05 frames. ], batch size: 28, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:58,661 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-02-07 06:45:31,688 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8005, 2.1299, 3.5498, 1.8148, 1.6812, 3.5201, 0.6609, 2.1548], + device='cuda:1'), covar=tensor([0.1351, 0.1357, 0.0255, 0.1604, 0.2704, 0.0268, 0.2296, 0.1156], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0200, 0.0132, 0.0222, 0.0273, 0.0137, 0.0172, 0.0195], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 06:45:32,168 INFO [train.py:901] (1/4) Epoch 24, batch 2150, loss[loss=0.2138, simple_loss=0.2989, pruned_loss=0.06433, over 8426.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.286, pruned_loss=0.06028, over 1615209.57 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:47,939 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2855, 1.9061, 2.4480, 2.1335, 2.4183, 2.3318, 2.1518, 1.2625], + device='cuda:1'), covar=tensor([0.5737, 0.5138, 0.2072, 0.3526, 0.2393, 0.3033, 0.1873, 0.5267], + device='cuda:1'), in_proj_covar=tensor([0.0951, 0.1000, 0.0818, 0.0965, 0.1002, 0.0908, 0.0761, 0.0835], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 06:45:58,768 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.464e+02 3.048e+02 3.692e+02 7.821e+02, threshold=6.095e+02, percent-clipped=5.0 +2023-02-07 06:46:03,898 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:05,700 INFO [train.py:901] (1/4) Epoch 24, batch 2200, loss[loss=0.2193, simple_loss=0.3011, pruned_loss=0.06876, over 8507.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2858, pruned_loss=0.06001, over 1615947.21 frames. ], batch size: 26, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:46:21,943 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188130.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:41,366 INFO [train.py:901] (1/4) Epoch 24, batch 2250, loss[loss=0.2149, simple_loss=0.2925, pruned_loss=0.06867, over 8640.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2852, pruned_loss=0.05923, over 1617320.59 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:09,411 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.349e+02 3.047e+02 3.898e+02 9.680e+02, threshold=6.095e+02, percent-clipped=4.0 +2023-02-07 06:47:16,302 INFO [train.py:901] (1/4) Epoch 24, batch 2300, loss[loss=0.177, simple_loss=0.2571, pruned_loss=0.04838, over 7805.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2854, pruned_loss=0.05929, over 1614865.06 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:42,353 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:47,128 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:50,574 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:51,703 INFO [train.py:901] (1/4) Epoch 24, batch 2350, loss[loss=0.1916, simple_loss=0.2608, pruned_loss=0.0612, over 7236.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05921, over 1610361.34 frames. ], batch size: 16, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:00,161 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188270.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:05,481 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:08,165 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188281.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:20,112 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.514e+02 3.085e+02 3.939e+02 8.316e+02, threshold=6.171e+02, percent-clipped=4.0 +2023-02-07 06:48:22,397 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:27,100 INFO [train.py:901] (1/4) Epoch 24, batch 2400, loss[loss=0.223, simple_loss=0.3049, pruned_loss=0.07049, over 8325.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2858, pruned_loss=0.06018, over 1608789.33 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:30,048 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8144, 1.6166, 3.1107, 1.6301, 2.2474, 3.3417, 3.5078, 2.9287], + device='cuda:1'), covar=tensor([0.1235, 0.1634, 0.0364, 0.1951, 0.1085, 0.0275, 0.0522, 0.0514], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0323, 0.0285, 0.0315, 0.0314, 0.0270, 0.0425, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 06:49:00,630 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-07 06:49:02,243 INFO [train.py:901] (1/4) Epoch 24, batch 2450, loss[loss=0.1931, simple_loss=0.2839, pruned_loss=0.05121, over 8291.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.06039, over 1612383.82 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:49:30,918 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.263e+02 2.982e+02 3.612e+02 7.179e+02, threshold=5.965e+02, percent-clipped=1.0 +2023-02-07 06:49:38,102 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 06:49:38,406 INFO [train.py:901] (1/4) Epoch 24, batch 2500, loss[loss=0.182, simple_loss=0.2719, pruned_loss=0.04607, over 8588.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2859, pruned_loss=0.06039, over 1613627.63 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:11,883 INFO [train.py:901] (1/4) Epoch 24, batch 2550, loss[loss=0.1629, simple_loss=0.2609, pruned_loss=0.03239, over 5536.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06079, over 1610729.40 frames. ], batch size: 12, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:40,453 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 2.538e+02 2.905e+02 3.766e+02 9.788e+02, threshold=5.809e+02, percent-clipped=4.0 +2023-02-07 06:50:41,315 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:50:47,916 INFO [train.py:901] (1/4) Epoch 24, batch 2600, loss[loss=0.2087, simple_loss=0.3011, pruned_loss=0.05819, over 8301.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2858, pruned_loss=0.06046, over 1612328.26 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:21,918 INFO [train.py:901] (1/4) Epoch 24, batch 2650, loss[loss=0.1991, simple_loss=0.2837, pruned_loss=0.05729, over 8040.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2855, pruned_loss=0.06077, over 1611111.68 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:31,890 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-07 06:51:41,776 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.85 vs. limit=5.0 +2023-02-07 06:51:48,605 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.338e+02 2.924e+02 3.924e+02 7.774e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 06:51:55,411 INFO [train.py:901] (1/4) Epoch 24, batch 2700, loss[loss=0.161, simple_loss=0.2565, pruned_loss=0.03281, over 7807.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.06069, over 1611820.12 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:02,966 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7971, 5.9724, 5.1625, 2.6812, 5.2820, 5.6633, 5.3406, 5.4744], + device='cuda:1'), covar=tensor([0.0571, 0.0368, 0.0869, 0.4082, 0.0709, 0.0711, 0.1062, 0.0446], + device='cuda:1'), in_proj_covar=tensor([0.0521, 0.0439, 0.0426, 0.0535, 0.0426, 0.0441, 0.0421, 0.0386], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:52:21,731 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:52:31,277 INFO [train.py:901] (1/4) Epoch 24, batch 2750, loss[loss=0.2309, simple_loss=0.3073, pruned_loss=0.07728, over 8539.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2852, pruned_loss=0.06061, over 1611758.74 frames. ], batch size: 39, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:57,784 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.375e+02 3.087e+02 4.139e+02 1.460e+03, threshold=6.174e+02, percent-clipped=4.0 +2023-02-07 06:53:05,388 INFO [train.py:901] (1/4) Epoch 24, batch 2800, loss[loss=0.2091, simple_loss=0.3046, pruned_loss=0.05677, over 8784.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2853, pruned_loss=0.06078, over 1612802.62 frames. ], batch size: 30, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:23,062 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9053, 1.6673, 2.0513, 1.7986, 2.0083, 1.9813, 1.8109, 0.8297], + device='cuda:1'), covar=tensor([0.5897, 0.4849, 0.2121, 0.3781, 0.2483, 0.3303, 0.2059, 0.5205], + device='cuda:1'), in_proj_covar=tensor([0.0947, 0.0992, 0.0814, 0.0956, 0.0995, 0.0905, 0.0756, 0.0832], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 06:53:40,615 INFO [train.py:901] (1/4) Epoch 24, batch 2850, loss[loss=0.1738, simple_loss=0.251, pruned_loss=0.0483, over 7203.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2862, pruned_loss=0.06105, over 1612475.08 frames. ], batch size: 16, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:42,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:07,931 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.346e+02 3.064e+02 3.754e+02 6.997e+02, threshold=6.129e+02, percent-clipped=3.0 +2023-02-07 06:54:14,861 INFO [train.py:901] (1/4) Epoch 24, batch 2900, loss[loss=0.1913, simple_loss=0.2764, pruned_loss=0.05308, over 7982.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2865, pruned_loss=0.06151, over 1611886.26 frames. ], batch size: 21, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:54:39,482 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:48,810 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4468, 1.5639, 1.4238, 1.8082, 0.7363, 1.3373, 1.3281, 1.5054], + device='cuda:1'), covar=tensor([0.0860, 0.0726, 0.0978, 0.0505, 0.1105, 0.1297, 0.0703, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0197, 0.0242, 0.0214, 0.0205, 0.0246, 0.0249, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 06:54:51,404 INFO [train.py:901] (1/4) Epoch 24, batch 2950, loss[loss=0.2199, simple_loss=0.3073, pruned_loss=0.06623, over 8261.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2858, pruned_loss=0.0607, over 1611746.02 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:54:51,417 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 06:55:19,115 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.217e+02 2.685e+02 3.700e+02 9.567e+02, threshold=5.370e+02, percent-clipped=4.0 +2023-02-07 06:55:25,890 INFO [train.py:901] (1/4) Epoch 24, batch 3000, loss[loss=0.1786, simple_loss=0.2602, pruned_loss=0.04847, over 7974.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2856, pruned_loss=0.06082, over 1608214.64 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:55:25,890 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 06:55:39,550 INFO [train.py:935] (1/4) Epoch 24, validation: loss=0.1724, simple_loss=0.2726, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 06:55:39,551 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 06:56:05,990 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:07,365 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:13,963 INFO [train.py:901] (1/4) Epoch 24, batch 3050, loss[loss=0.2306, simple_loss=0.3196, pruned_loss=0.07078, over 8504.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2869, pruned_loss=0.06134, over 1609830.08 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:14,162 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:41,563 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.422e+02 3.010e+02 3.817e+02 9.746e+02, threshold=6.020e+02, percent-clipped=4.0 +2023-02-07 06:56:49,123 INFO [train.py:901] (1/4) Epoch 24, batch 3100, loss[loss=0.2368, simple_loss=0.332, pruned_loss=0.07083, over 8267.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06103, over 1611311.58 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:54,800 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:58,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4968, 4.5153, 4.0048, 2.1686, 3.9794, 4.0356, 3.9988, 3.8677], + device='cuda:1'), covar=tensor([0.0674, 0.0473, 0.0934, 0.4529, 0.0889, 0.0930, 0.1197, 0.0613], + device='cuda:1'), in_proj_covar=tensor([0.0527, 0.0442, 0.0430, 0.0542, 0.0429, 0.0444, 0.0424, 0.0389], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 06:57:12,105 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:16,988 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:19,123 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6889, 1.6400, 2.4576, 1.5805, 1.3048, 2.3496, 0.6934, 1.5402], + device='cuda:1'), covar=tensor([0.1419, 0.1185, 0.0304, 0.1217, 0.2523, 0.0382, 0.1997, 0.1221], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0202, 0.0132, 0.0223, 0.0274, 0.0139, 0.0173, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 06:57:23,529 INFO [train.py:901] (1/4) Epoch 24, batch 3150, loss[loss=0.2168, simple_loss=0.3073, pruned_loss=0.06313, over 8468.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2871, pruned_loss=0.06121, over 1608096.96 frames. ], batch size: 25, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:57:26,636 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-07 06:57:40,665 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:50,853 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:51,369 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.394e+02 2.917e+02 3.565e+02 6.979e+02, threshold=5.834e+02, percent-clipped=3.0 +2023-02-07 06:57:59,734 INFO [train.py:901] (1/4) Epoch 24, batch 3200, loss[loss=0.193, simple_loss=0.2702, pruned_loss=0.05793, over 7656.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2876, pruned_loss=0.06073, over 1616914.65 frames. ], batch size: 19, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:58:33,951 INFO [train.py:901] (1/4) Epoch 24, batch 3250, loss[loss=0.2098, simple_loss=0.2878, pruned_loss=0.06597, over 7930.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2896, pruned_loss=0.06212, over 1620254.12 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:01,531 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.438e+02 3.003e+02 3.759e+02 6.490e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-07 06:59:08,523 INFO [train.py:901] (1/4) Epoch 24, batch 3300, loss[loss=0.2395, simple_loss=0.3067, pruned_loss=0.08613, over 7038.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2881, pruned_loss=0.0615, over 1614961.83 frames. ], batch size: 73, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:12,860 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:30,915 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:39,008 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6749, 2.6864, 1.8379, 2.3593, 2.2632, 1.6136, 2.1428, 2.2483], + device='cuda:1'), covar=tensor([0.1559, 0.0379, 0.1276, 0.0679, 0.0706, 0.1613, 0.1107, 0.1001], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0235, 0.0334, 0.0310, 0.0299, 0.0341, 0.0345, 0.0317], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 06:59:44,159 INFO [train.py:901] (1/4) Epoch 24, batch 3350, loss[loss=0.2417, simple_loss=0.3128, pruned_loss=0.08529, over 8845.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2883, pruned_loss=0.06136, over 1619438.86 frames. ], batch size: 51, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:49,402 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189266.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:54,641 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8681, 1.9389, 3.2644, 2.3720, 2.8635, 1.8646, 1.6357, 1.7672], + device='cuda:1'), covar=tensor([0.8066, 0.6755, 0.2013, 0.4660, 0.3561, 0.5125, 0.3430, 0.5966], + device='cuda:1'), in_proj_covar=tensor([0.0945, 0.0991, 0.0811, 0.0955, 0.0994, 0.0902, 0.0754, 0.0828], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 07:00:05,778 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:07,042 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189293.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:10,334 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.367e+02 2.967e+02 3.575e+02 9.298e+02, threshold=5.934e+02, percent-clipped=5.0 +2023-02-07 07:00:17,739 INFO [train.py:901] (1/4) Epoch 24, batch 3400, loss[loss=0.2421, simple_loss=0.3312, pruned_loss=0.07653, over 8330.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.289, pruned_loss=0.06189, over 1620462.03 frames. ], batch size: 25, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:33,355 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5940, 1.3326, 2.9106, 1.1511, 2.3041, 3.1360, 3.4445, 2.3329], + device='cuda:1'), covar=tensor([0.1681, 0.2196, 0.0550, 0.3037, 0.1192, 0.0468, 0.0676, 0.1101], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0324, 0.0286, 0.0317, 0.0316, 0.0271, 0.0429, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:00:34,013 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0078, 1.6606, 1.7582, 1.5242, 0.8616, 1.6169, 1.7270, 1.6950], + device='cuda:1'), covar=tensor([0.0502, 0.1174, 0.1587, 0.1309, 0.0571, 0.1348, 0.0664, 0.0571], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 07:00:52,447 INFO [train.py:901] (1/4) Epoch 24, batch 3450, loss[loss=0.199, simple_loss=0.2747, pruned_loss=0.06168, over 8243.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2864, pruned_loss=0.06107, over 1615548.62 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:57,504 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:16,612 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:20,611 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.399e+02 2.884e+02 3.624e+02 7.571e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 07:01:26,256 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:27,409 INFO [train.py:901] (1/4) Epoch 24, batch 3500, loss[loss=0.2213, simple_loss=0.3061, pruned_loss=0.06826, over 8752.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2877, pruned_loss=0.06137, over 1617527.28 frames. ], batch size: 30, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:01:27,625 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:27,666 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7154, 1.9952, 2.0826, 1.4852, 2.2543, 1.5725, 0.7848, 1.9174], + device='cuda:1'), covar=tensor([0.0710, 0.0373, 0.0352, 0.0597, 0.0515, 0.0872, 0.0934, 0.0392], + device='cuda:1'), in_proj_covar=tensor([0.0461, 0.0401, 0.0356, 0.0454, 0.0387, 0.0542, 0.0398, 0.0431], + device='cuda:1'), out_proj_covar=tensor([1.2292e-04, 1.0469e-04, 9.3477e-05, 1.1928e-04, 1.0174e-04, 1.5223e-04, + 1.0689e-04, 1.1361e-04], device='cuda:1') +2023-02-07 07:01:40,504 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 07:01:40,597 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:50,793 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189441.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:56,339 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-07 07:02:03,505 INFO [train.py:901] (1/4) Epoch 24, batch 3550, loss[loss=0.208, simple_loss=0.3008, pruned_loss=0.05759, over 8321.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2878, pruned_loss=0.06146, over 1613173.51 frames. ], batch size: 25, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:26,769 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1407, 1.4478, 4.3669, 1.6698, 3.8385, 3.6434, 3.9544, 3.8333], + device='cuda:1'), covar=tensor([0.0664, 0.4744, 0.0492, 0.4385, 0.1101, 0.0943, 0.0615, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0647, 0.0653, 0.0712, 0.0643, 0.0721, 0.0619, 0.0615, 0.0694], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:02:31,307 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.509e+02 2.981e+02 3.708e+02 7.370e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 07:02:37,646 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:38,145 INFO [train.py:901] (1/4) Epoch 24, batch 3600, loss[loss=0.1835, simple_loss=0.2568, pruned_loss=0.05508, over 7572.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2874, pruned_loss=0.06132, over 1613958.86 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:45,843 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:01,838 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:12,170 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189556.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:13,411 INFO [train.py:901] (1/4) Epoch 24, batch 3650, loss[loss=0.1764, simple_loss=0.2566, pruned_loss=0.0481, over 7656.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.0613, over 1613891.68 frames. ], batch size: 19, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:41,099 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:03:41,740 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.496e+02 2.930e+02 3.600e+02 6.319e+02, threshold=5.860e+02, percent-clipped=2.0 +2023-02-07 07:03:48,366 INFO [train.py:901] (1/4) Epoch 24, batch 3700, loss[loss=0.1992, simple_loss=0.2957, pruned_loss=0.0514, over 8353.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.06058, over 1617286.90 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:49,803 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:23,135 INFO [train.py:901] (1/4) Epoch 24, batch 3750, loss[loss=0.1884, simple_loss=0.2692, pruned_loss=0.0538, over 7922.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06083, over 1613513.80 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:23,302 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189658.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:26,016 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:27,230 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189664.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:42,769 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:43,973 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189689.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:51,123 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.609e+02 3.129e+02 4.249e+02 7.016e+02, threshold=6.258e+02, percent-clipped=8.0 +2023-02-07 07:04:57,806 INFO [train.py:901] (1/4) Epoch 24, batch 3800, loss[loss=0.2017, simple_loss=0.2813, pruned_loss=0.06104, over 8488.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06061, over 1617132.28 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:58,620 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:07,484 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:09,548 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:24,362 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:32,061 INFO [train.py:901] (1/4) Epoch 24, batch 3850, loss[loss=0.2049, simple_loss=0.2982, pruned_loss=0.05577, over 8506.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2856, pruned_loss=0.06063, over 1607449.41 frames. ], batch size: 29, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:05:35,659 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:47,650 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 07:05:53,008 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:53,956 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-07 07:05:59,108 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.407e+02 2.910e+02 3.432e+02 8.251e+02, threshold=5.819e+02, percent-clipped=1.0 +2023-02-07 07:06:06,327 INFO [train.py:901] (1/4) Epoch 24, batch 3900, loss[loss=0.203, simple_loss=0.2819, pruned_loss=0.06206, over 8497.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.06056, over 1609918.53 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:10,117 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189812.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:17,541 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:18,901 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189824.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:27,528 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:42,146 INFO [train.py:901] (1/4) Epoch 24, batch 3950, loss[loss=0.1711, simple_loss=0.2597, pruned_loss=0.04121, over 7972.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.06007, over 1609243.28 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:45,540 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:07:09,592 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.387e+02 3.217e+02 3.997e+02 8.874e+02, threshold=6.434e+02, percent-clipped=5.0 +2023-02-07 07:07:16,323 INFO [train.py:901] (1/4) Epoch 24, batch 4000, loss[loss=0.1885, simple_loss=0.2846, pruned_loss=0.04616, over 8503.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2848, pruned_loss=0.05954, over 1611044.44 frames. ], batch size: 39, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:07:51,151 INFO [train.py:901] (1/4) Epoch 24, batch 4050, loss[loss=0.2444, simple_loss=0.3215, pruned_loss=0.08366, over 8351.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2851, pruned_loss=0.05988, over 1610120.59 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:08:05,461 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:07,509 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:18,681 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.334e+02 2.770e+02 3.399e+02 1.124e+03, threshold=5.539e+02, percent-clipped=1.0 +2023-02-07 07:08:22,559 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:26,235 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:27,386 INFO [train.py:901] (1/4) Epoch 24, batch 4100, loss[loss=0.1903, simple_loss=0.2877, pruned_loss=0.04645, over 8252.00 frames. ], tot_loss[loss=0.202, simple_loss=0.285, pruned_loss=0.05949, over 1612114.64 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:02,440 INFO [train.py:901] (1/4) Epoch 24, batch 4150, loss[loss=0.1915, simple_loss=0.2696, pruned_loss=0.05668, over 8098.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05926, over 1615671.71 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:08,094 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:17,894 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190080.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:25,159 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:30,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 2.377e+02 2.724e+02 3.400e+02 7.023e+02, threshold=5.448e+02, percent-clipped=3.0 +2023-02-07 07:09:35,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:37,386 INFO [train.py:901] (1/4) Epoch 24, batch 4200, loss[loss=0.1719, simple_loss=0.2619, pruned_loss=0.04096, over 8291.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.285, pruned_loss=0.05957, over 1612461.12 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:09:43,686 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:45,031 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 07:09:48,195 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 07:09:50,485 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-07 07:10:10,661 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 07:10:11,425 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190157.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:10:11,871 INFO [train.py:901] (1/4) Epoch 24, batch 4250, loss[loss=0.2302, simple_loss=0.3254, pruned_loss=0.06746, over 8323.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2852, pruned_loss=0.05948, over 1615413.23 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:19,344 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190169.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:28,699 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:40,141 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.309e+02 2.865e+02 3.517e+02 8.092e+02, threshold=5.730e+02, percent-clipped=6.0 +2023-02-07 07:10:45,776 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190205.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:47,630 INFO [train.py:901] (1/4) Epoch 24, batch 4300, loss[loss=0.2178, simple_loss=0.3031, pruned_loss=0.06627, over 8438.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2852, pruned_loss=0.05983, over 1615453.92 frames. ], batch size: 27, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:56,467 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3404, 1.5725, 4.6240, 1.9684, 3.7588, 3.7205, 4.1939, 4.1173], + device='cuda:1'), covar=tensor([0.1224, 0.6515, 0.0991, 0.4882, 0.2074, 0.1472, 0.0993, 0.0986], + device='cuda:1'), in_proj_covar=tensor([0.0651, 0.0657, 0.0718, 0.0645, 0.0723, 0.0622, 0.0622, 0.0697], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:11:05,301 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:21,910 INFO [train.py:901] (1/4) Epoch 24, batch 4350, loss[loss=0.1735, simple_loss=0.2639, pruned_loss=0.04157, over 8247.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.0599, over 1613896.80 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:11:22,815 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190259.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:40,177 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 07:11:50,408 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.430e+02 2.823e+02 3.493e+02 1.012e+03, threshold=5.646e+02, percent-clipped=3.0 +2023-02-07 07:11:57,334 INFO [train.py:901] (1/4) Epoch 24, batch 4400, loss[loss=0.2262, simple_loss=0.309, pruned_loss=0.07165, over 8191.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2867, pruned_loss=0.0605, over 1617381.57 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:15,563 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:12:23,134 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 07:12:32,982 INFO [train.py:901] (1/4) Epoch 24, batch 4450, loss[loss=0.2327, simple_loss=0.3, pruned_loss=0.08271, over 6922.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2854, pruned_loss=0.06016, over 1613741.49 frames. ], batch size: 71, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:43,427 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:00,241 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 2.201e+02 2.691e+02 3.403e+02 6.534e+02, threshold=5.381e+02, percent-clipped=2.0 +2023-02-07 07:13:00,469 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:07,768 INFO [train.py:901] (1/4) Epoch 24, batch 4500, loss[loss=0.2095, simple_loss=0.2962, pruned_loss=0.06144, over 8106.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.0596, over 1616583.29 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:14,925 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:17,409 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 07:13:29,019 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190437.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:42,693 INFO [train.py:901] (1/4) Epoch 24, batch 4550, loss[loss=0.2056, simple_loss=0.2891, pruned_loss=0.06109, over 8499.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2847, pruned_loss=0.0598, over 1616416.25 frames. ], batch size: 28, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:44,873 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:45,495 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:47,435 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9606, 2.3113, 3.6944, 1.9343, 1.7262, 3.6862, 0.6238, 2.1263], + device='cuda:1'), covar=tensor([0.1293, 0.1344, 0.0217, 0.1637, 0.2636, 0.0260, 0.2167, 0.1384], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0201, 0.0130, 0.0222, 0.0273, 0.0138, 0.0171, 0.0196], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 07:14:02,627 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:10,718 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.430e+02 2.973e+02 3.981e+02 9.647e+02, threshold=5.946e+02, percent-clipped=9.0 +2023-02-07 07:14:12,831 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190501.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:14:17,564 INFO [train.py:901] (1/4) Epoch 24, batch 4600, loss[loss=0.1766, simple_loss=0.2564, pruned_loss=0.04835, over 7641.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2841, pruned_loss=0.0596, over 1613866.18 frames. ], batch size: 19, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:14:21,248 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:42,211 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-07 07:14:54,248 INFO [train.py:901] (1/4) Epoch 24, batch 4650, loss[loss=0.1637, simple_loss=0.2449, pruned_loss=0.04119, over 7660.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2831, pruned_loss=0.05913, over 1609441.07 frames. ], batch size: 19, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:22,380 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.469e+02 2.987e+02 3.787e+02 1.231e+03, threshold=5.974e+02, percent-clipped=5.0 +2023-02-07 07:15:29,195 INFO [train.py:901] (1/4) Epoch 24, batch 4700, loss[loss=0.1909, simple_loss=0.2753, pruned_loss=0.05319, over 7971.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2821, pruned_loss=0.05872, over 1604837.71 frames. ], batch size: 21, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:29,946 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190609.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:15:34,427 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190616.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:15:41,739 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:42,471 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190628.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:04,016 INFO [train.py:901] (1/4) Epoch 24, batch 4750, loss[loss=0.2139, simple_loss=0.2949, pruned_loss=0.06648, over 8512.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2839, pruned_loss=0.05953, over 1608994.12 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:16:18,704 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:20,758 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 07:16:22,899 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 07:16:29,415 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:32,693 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.279e+02 2.789e+02 3.393e+02 7.815e+02, threshold=5.578e+02, percent-clipped=3.0 +2023-02-07 07:16:40,395 INFO [train.py:901] (1/4) Epoch 24, batch 4800, loss[loss=0.1747, simple_loss=0.2405, pruned_loss=0.05446, over 7428.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2831, pruned_loss=0.05858, over 1606704.00 frames. ], batch size: 17, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:07,852 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6995, 2.3063, 4.0264, 1.5132, 2.8760, 2.1361, 1.9408, 2.8332], + device='cuda:1'), covar=tensor([0.2190, 0.2677, 0.0969, 0.4988, 0.2088, 0.3593, 0.2526, 0.2651], + device='cuda:1'), in_proj_covar=tensor([0.0525, 0.0612, 0.0552, 0.0650, 0.0646, 0.0595, 0.0545, 0.0632], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:17:13,019 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 07:17:15,041 INFO [train.py:901] (1/4) Epoch 24, batch 4850, loss[loss=0.2218, simple_loss=0.3142, pruned_loss=0.06464, over 8101.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05845, over 1609699.97 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:17,937 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:27,638 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 07:17:40,143 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:43,507 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.278e+02 2.775e+02 3.178e+02 7.824e+02, threshold=5.550e+02, percent-clipped=3.0 +2023-02-07 07:17:50,724 INFO [train.py:901] (1/4) Epoch 24, batch 4900, loss[loss=0.218, simple_loss=0.3043, pruned_loss=0.0658, over 8738.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2834, pruned_loss=0.05867, over 1616782.27 frames. ], batch size: 39, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:06,940 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0463, 1.6747, 3.4548, 1.5271, 2.5076, 3.7527, 3.8686, 3.2303], + device='cuda:1'), covar=tensor([0.1161, 0.1770, 0.0328, 0.2191, 0.0963, 0.0238, 0.0520, 0.0538], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0324, 0.0285, 0.0316, 0.0315, 0.0272, 0.0429, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:18:16,606 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.33 vs. limit=5.0 +2023-02-07 07:18:25,853 INFO [train.py:901] (1/4) Epoch 24, batch 4950, loss[loss=0.1984, simple_loss=0.2724, pruned_loss=0.06216, over 7792.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.0588, over 1613259.21 frames. ], batch size: 19, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:34,366 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 07:18:36,137 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190872.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:18:39,592 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:45,178 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:53,848 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190897.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:18:54,309 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.393e+02 2.890e+02 3.701e+02 7.772e+02, threshold=5.780e+02, percent-clipped=4.0 +2023-02-07 07:19:01,817 INFO [train.py:901] (1/4) Epoch 24, batch 5000, loss[loss=0.1736, simple_loss=0.2675, pruned_loss=0.03992, over 8462.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.059, over 1613322.24 frames. ], batch size: 27, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:02,714 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:33,480 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190953.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:19:36,594 INFO [train.py:901] (1/4) Epoch 24, batch 5050, loss[loss=0.1782, simple_loss=0.2673, pruned_loss=0.0446, over 8105.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2842, pruned_loss=0.05917, over 1614148.14 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:45,291 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190971.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:51,179 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 07:20:04,904 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.521e+02 3.221e+02 4.359e+02 8.705e+02, threshold=6.442e+02, percent-clipped=11.0 +2023-02-07 07:20:11,616 INFO [train.py:901] (1/4) Epoch 24, batch 5100, loss[loss=0.2197, simple_loss=0.2982, pruned_loss=0.07058, over 8284.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2845, pruned_loss=0.05944, over 1613854.97 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:31,903 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:37,437 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9907, 1.1741, 1.1517, 0.6956, 1.1600, 0.9581, 0.1206, 1.1297], + device='cuda:1'), covar=tensor([0.0456, 0.0373, 0.0363, 0.0548, 0.0479, 0.0951, 0.0821, 0.0338], + device='cuda:1'), in_proj_covar=tensor([0.0464, 0.0406, 0.0360, 0.0458, 0.0390, 0.0546, 0.0401, 0.0435], + device='cuda:1'), out_proj_covar=tensor([1.2377e-04, 1.0598e-04, 9.4450e-05, 1.2020e-04, 1.0260e-04, 1.5321e-04, + 1.0777e-04, 1.1456e-04], device='cuda:1') +2023-02-07 07:20:40,077 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:46,726 INFO [train.py:901] (1/4) Epoch 24, batch 5150, loss[loss=0.2231, simple_loss=0.3041, pruned_loss=0.07102, over 8656.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.0595, over 1614918.35 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:53,681 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191068.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:20:57,803 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,749 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,787 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:13,676 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.286e+02 2.691e+02 3.617e+02 7.196e+02, threshold=5.383e+02, percent-clipped=2.0 +2023-02-07 07:21:20,844 INFO [train.py:901] (1/4) Epoch 24, batch 5200, loss[loss=0.1954, simple_loss=0.2837, pruned_loss=0.05356, over 8472.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2851, pruned_loss=0.05975, over 1618391.04 frames. ], batch size: 28, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:21:38,127 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:50,039 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 07:21:50,822 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:52,268 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:55,605 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:56,104 INFO [train.py:901] (1/4) Epoch 24, batch 5250, loss[loss=0.2098, simple_loss=0.2947, pruned_loss=0.06245, over 8900.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2865, pruned_loss=0.06042, over 1623712.82 frames. ], batch size: 40, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:22:04,650 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8796, 3.4933, 2.1957, 2.8553, 2.7730, 2.0167, 2.6905, 2.8820], + device='cuda:1'), covar=tensor([0.1739, 0.0405, 0.1300, 0.0791, 0.0722, 0.1505, 0.1135, 0.1128], + device='cuda:1'), in_proj_covar=tensor([0.0355, 0.0234, 0.0337, 0.0309, 0.0299, 0.0341, 0.0346, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 07:22:25,866 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.847e+02 3.981e+02 6.971e+02, threshold=5.694e+02, percent-clipped=11.0 +2023-02-07 07:22:28,316 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191203.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:22:31,600 INFO [train.py:901] (1/4) Epoch 24, batch 5300, loss[loss=0.1924, simple_loss=0.2823, pruned_loss=0.05121, over 8423.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2868, pruned_loss=0.0602, over 1623284.64 frames. ], batch size: 49, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:07,062 INFO [train.py:901] (1/4) Epoch 24, batch 5350, loss[loss=0.1989, simple_loss=0.2968, pruned_loss=0.05052, over 8292.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2856, pruned_loss=0.05961, over 1611714.30 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:36,917 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.294e+02 2.754e+02 3.342e+02 1.056e+03, threshold=5.508e+02, percent-clipped=2.0 +2023-02-07 07:23:42,379 INFO [train.py:901] (1/4) Epoch 24, batch 5400, loss[loss=0.2192, simple_loss=0.299, pruned_loss=0.06972, over 8246.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2864, pruned_loss=0.05972, over 1616886.43 frames. ], batch size: 24, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:23:53,142 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191324.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:05,750 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191342.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:11,127 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191349.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:17,035 INFO [train.py:901] (1/4) Epoch 24, batch 5450, loss[loss=0.1615, simple_loss=0.2416, pruned_loss=0.04073, over 7724.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2859, pruned_loss=0.05928, over 1618952.32 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:23,281 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:34,970 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:39,466 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 07:24:42,256 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4480, 1.7476, 1.7805, 1.1753, 1.8309, 1.3802, 0.4607, 1.6939], + device='cuda:1'), covar=tensor([0.0681, 0.0445, 0.0371, 0.0704, 0.0596, 0.1105, 0.1021, 0.0342], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0404, 0.0359, 0.0456, 0.0388, 0.0543, 0.0400, 0.0434], + device='cuda:1'), out_proj_covar=tensor([1.2354e-04, 1.0556e-04, 9.4182e-05, 1.1972e-04, 1.0197e-04, 1.5222e-04, + 1.0752e-04, 1.1447e-04], device='cuda:1') +2023-02-07 07:24:46,091 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.294e+02 2.951e+02 3.676e+02 7.135e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 07:24:52,418 INFO [train.py:901] (1/4) Epoch 24, batch 5500, loss[loss=0.2058, simple_loss=0.2885, pruned_loss=0.06155, over 6990.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2851, pruned_loss=0.0592, over 1615364.33 frames. ], batch size: 72, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:52,652 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:07,820 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:10,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:28,140 INFO [train.py:901] (1/4) Epoch 24, batch 5550, loss[loss=0.1732, simple_loss=0.2607, pruned_loss=0.0428, over 7804.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2836, pruned_loss=0.05802, over 1618948.73 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:25:29,825 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1260, 1.9591, 2.5115, 2.1116, 2.4393, 2.1869, 1.9868, 1.3871], + device='cuda:1'), covar=tensor([0.5379, 0.4664, 0.2052, 0.3922, 0.2676, 0.3289, 0.2027, 0.5375], + device='cuda:1'), in_proj_covar=tensor([0.0943, 0.0992, 0.0812, 0.0962, 0.1000, 0.0905, 0.0754, 0.0830], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 07:25:53,336 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:57,930 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.420e+02 3.039e+02 3.989e+02 7.925e+02, threshold=6.078e+02, percent-clipped=5.0 +2023-02-07 07:26:03,388 INFO [train.py:901] (1/4) Epoch 24, batch 5600, loss[loss=0.2047, simple_loss=0.2917, pruned_loss=0.05884, over 8103.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2836, pruned_loss=0.05838, over 1615492.74 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:05,898 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-07 07:26:20,816 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1717, 1.6360, 4.2730, 1.8593, 2.3828, 4.8338, 4.9677, 4.1982], + device='cuda:1'), covar=tensor([0.1214, 0.1861, 0.0301, 0.2132, 0.1291, 0.0190, 0.0359, 0.0525], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0323, 0.0286, 0.0315, 0.0315, 0.0273, 0.0430, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:26:29,667 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191545.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:30,968 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:38,439 INFO [train.py:901] (1/4) Epoch 24, batch 5650, loss[loss=0.1785, simple_loss=0.2682, pruned_loss=0.04441, over 7975.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2846, pruned_loss=0.05922, over 1616692.46 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:39,697 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 07:26:45,400 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 07:27:08,570 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.481e+02 2.901e+02 3.753e+02 9.237e+02, threshold=5.802e+02, percent-clipped=5.0 +2023-02-07 07:27:14,231 INFO [train.py:901] (1/4) Epoch 24, batch 5700, loss[loss=0.1729, simple_loss=0.2625, pruned_loss=0.04163, over 8089.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2836, pruned_loss=0.05904, over 1612526.28 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:14,757 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 07:27:15,041 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:21,337 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-07 07:27:22,489 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8173, 1.6753, 1.9329, 1.6508, 0.9777, 1.7186, 2.2208, 1.9876], + device='cuda:1'), covar=tensor([0.0446, 0.1203, 0.1623, 0.1381, 0.0610, 0.1426, 0.0636, 0.0617], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 07:27:49,745 INFO [train.py:901] (1/4) Epoch 24, batch 5750, loss[loss=0.1769, simple_loss=0.2749, pruned_loss=0.03949, over 8326.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2841, pruned_loss=0.05906, over 1614880.55 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:52,733 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:53,269 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 07:28:19,026 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.394e+02 2.726e+02 3.524e+02 6.240e+02, threshold=5.452e+02, percent-clipped=4.0 +2023-02-07 07:28:25,092 INFO [train.py:901] (1/4) Epoch 24, batch 5800, loss[loss=0.2128, simple_loss=0.2743, pruned_loss=0.07568, over 7284.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2835, pruned_loss=0.05886, over 1614309.18 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:28:38,086 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:28:39,805 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 07:28:40,460 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 07:28:59,640 INFO [train.py:901] (1/4) Epoch 24, batch 5850, loss[loss=0.1649, simple_loss=0.2494, pruned_loss=0.0402, over 7662.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2824, pruned_loss=0.05822, over 1616682.10 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:29,158 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.223e+02 2.821e+02 3.422e+02 9.012e+02, threshold=5.641e+02, percent-clipped=8.0 +2023-02-07 07:29:30,138 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:34,718 INFO [train.py:901] (1/4) Epoch 24, batch 5900, loss[loss=0.1801, simple_loss=0.2707, pruned_loss=0.04477, over 8022.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.0582, over 1613787.06 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:48,211 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191826.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:59,313 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:10,941 INFO [train.py:901] (1/4) Epoch 24, batch 5950, loss[loss=0.2098, simple_loss=0.2941, pruned_loss=0.06271, over 8460.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05849, over 1617641.38 frames. ], batch size: 27, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:16,037 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:33,612 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191890.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:40,234 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 2.338e+02 2.991e+02 3.628e+02 7.270e+02, threshold=5.982e+02, percent-clipped=3.0 +2023-02-07 07:30:45,682 INFO [train.py:901] (1/4) Epoch 24, batch 6000, loss[loss=0.1955, simple_loss=0.2703, pruned_loss=0.06036, over 7292.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.282, pruned_loss=0.0581, over 1612255.21 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:45,682 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 07:31:00,540 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7029, 1.4821, 3.8395, 1.4315, 3.4329, 3.1293, 3.5175, 3.3386], + device='cuda:1'), covar=tensor([0.0667, 0.4878, 0.0591, 0.4679, 0.1243, 0.1135, 0.0691, 0.0831], + device='cuda:1'), in_proj_covar=tensor([0.0648, 0.0653, 0.0710, 0.0643, 0.0720, 0.0617, 0.0618, 0.0690], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:31:01,027 INFO [train.py:935] (1/4) Epoch 24, validation: loss=0.1718, simple_loss=0.2718, pruned_loss=0.0359, over 944034.00 frames. +2023-02-07 07:31:01,027 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 07:31:08,203 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191918.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:12,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2074, 1.5454, 1.7644, 1.4692, 1.0409, 1.6401, 1.8972, 1.8202], + device='cuda:1'), covar=tensor([0.0508, 0.1253, 0.1704, 0.1450, 0.0606, 0.1424, 0.0670, 0.0581], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 07:31:24,853 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:26,286 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6506, 2.0283, 3.1753, 1.4760, 2.4785, 2.0795, 1.7156, 2.4500], + device='cuda:1'), covar=tensor([0.2150, 0.2923, 0.1001, 0.5024, 0.2158, 0.3516, 0.2662, 0.2596], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0615, 0.0555, 0.0652, 0.0652, 0.0600, 0.0547, 0.0637], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:31:35,325 INFO [train.py:901] (1/4) Epoch 24, batch 6050, loss[loss=0.2113, simple_loss=0.3063, pruned_loss=0.0581, over 8295.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2831, pruned_loss=0.05827, over 1614115.83 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:31:41,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4854, 1.8354, 2.6054, 1.3831, 1.9220, 1.8565, 1.5954, 1.9567], + device='cuda:1'), covar=tensor([0.2015, 0.2604, 0.0965, 0.4843, 0.2068, 0.3382, 0.2560, 0.2422], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0615, 0.0555, 0.0652, 0.0652, 0.0600, 0.0548, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:31:57,329 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7650, 1.3417, 1.6391, 1.2582, 0.9229, 1.4125, 1.6268, 1.4682], + device='cuda:1'), covar=tensor([0.0576, 0.1347, 0.1730, 0.1513, 0.0640, 0.1558, 0.0737, 0.0691], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 07:31:58,022 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5707, 3.0058, 2.5429, 4.2003, 1.8694, 2.1293, 2.8164, 3.0089], + device='cuda:1'), covar=tensor([0.0771, 0.0873, 0.0792, 0.0228, 0.1074, 0.1284, 0.0870, 0.0830], + device='cuda:1'), in_proj_covar=tensor([0.0234, 0.0199, 0.0245, 0.0216, 0.0205, 0.0247, 0.0253, 0.0208], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 07:32:04,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.415e+02 2.742e+02 3.441e+02 8.508e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 07:32:10,738 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7944, 2.4022, 4.1471, 1.5920, 3.2112, 2.3060, 1.9628, 3.0112], + device='cuda:1'), covar=tensor([0.2043, 0.2594, 0.0825, 0.4710, 0.1720, 0.3427, 0.2457, 0.2348], + device='cuda:1'), in_proj_covar=tensor([0.0528, 0.0613, 0.0552, 0.0649, 0.0649, 0.0598, 0.0545, 0.0634], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:32:11,923 INFO [train.py:901] (1/4) Epoch 24, batch 6100, loss[loss=0.1813, simple_loss=0.2778, pruned_loss=0.04245, over 8332.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05773, over 1615083.45 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:32:32,943 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0826, 3.1975, 2.2114, 2.6931, 2.5332, 1.9393, 2.4990, 2.8632], + device='cuda:1'), covar=tensor([0.1461, 0.0389, 0.1126, 0.0698, 0.0661, 0.1436, 0.1064, 0.0941], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0234, 0.0338, 0.0311, 0.0302, 0.0343, 0.0348, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 07:32:34,059 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 07:32:37,032 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 07:32:46,755 INFO [train.py:901] (1/4) Epoch 24, batch 6150, loss[loss=0.2172, simple_loss=0.3053, pruned_loss=0.06452, over 8188.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.05756, over 1613000.88 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:06,709 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 07:33:15,571 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:16,711 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.367e+02 2.762e+02 3.348e+02 6.106e+02, threshold=5.524e+02, percent-clipped=2.0 +2023-02-07 07:33:22,018 INFO [train.py:901] (1/4) Epoch 24, batch 6200, loss[loss=0.2249, simple_loss=0.3084, pruned_loss=0.07068, over 8350.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2828, pruned_loss=0.05863, over 1612089.15 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:30,650 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192120.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:32,770 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192123.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:50,993 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 07:33:56,607 INFO [train.py:901] (1/4) Epoch 24, batch 6250, loss[loss=0.1683, simple_loss=0.2465, pruned_loss=0.04502, over 7919.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2831, pruned_loss=0.05889, over 1612407.18 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:07,847 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:26,652 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.364e+02 2.949e+02 3.646e+02 8.976e+02, threshold=5.898e+02, percent-clipped=7.0 +2023-02-07 07:34:31,430 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-07 07:34:33,021 INFO [train.py:901] (1/4) Epoch 24, batch 6300, loss[loss=0.2407, simple_loss=0.3314, pruned_loss=0.07494, over 8513.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2828, pruned_loss=0.05907, over 1606651.67 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:52,688 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192237.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:54,069 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:35:02,879 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8483, 1.4822, 3.5234, 1.5702, 2.5196, 3.9020, 4.0212, 3.3977], + device='cuda:1'), covar=tensor([0.1250, 0.1847, 0.0330, 0.2045, 0.1048, 0.0207, 0.0504, 0.0542], + device='cuda:1'), in_proj_covar=tensor([0.0295, 0.0321, 0.0284, 0.0312, 0.0312, 0.0270, 0.0427, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:35:07,367 INFO [train.py:901] (1/4) Epoch 24, batch 6350, loss[loss=0.2422, simple_loss=0.3165, pruned_loss=0.08392, over 6577.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2824, pruned_loss=0.05861, over 1608090.13 frames. ], batch size: 71, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:35:26,356 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 07:35:36,835 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.329e+02 2.896e+02 3.640e+02 5.459e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 07:35:43,003 INFO [train.py:901] (1/4) Epoch 24, batch 6400, loss[loss=0.214, simple_loss=0.3079, pruned_loss=0.06011, over 8019.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.0586, over 1607740.61 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:19,126 INFO [train.py:901] (1/4) Epoch 24, batch 6450, loss[loss=0.1954, simple_loss=0.2758, pruned_loss=0.05752, over 7968.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2835, pruned_loss=0.05942, over 1610887.76 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:22,558 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-07 07:36:49,106 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.491e+02 2.965e+02 3.858e+02 7.678e+02, threshold=5.930e+02, percent-clipped=7.0 +2023-02-07 07:36:54,683 INFO [train.py:901] (1/4) Epoch 24, batch 6500, loss[loss=0.2113, simple_loss=0.3087, pruned_loss=0.05692, over 8474.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2849, pruned_loss=0.0602, over 1610641.99 frames. ], batch size: 29, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:56,151 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:00,679 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:25,023 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192451.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:29,459 INFO [train.py:901] (1/4) Epoch 24, batch 6550, loss[loss=0.2246, simple_loss=0.3072, pruned_loss=0.07094, over 8340.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2854, pruned_loss=0.05995, over 1614043.37 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:37:33,653 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:48,312 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 07:37:53,276 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7409, 2.1181, 3.2200, 1.5628, 2.5716, 2.0853, 1.8685, 2.5923], + device='cuda:1'), covar=tensor([0.1874, 0.2580, 0.0769, 0.4479, 0.1682, 0.3150, 0.2271, 0.2125], + device='cuda:1'), in_proj_covar=tensor([0.0533, 0.0619, 0.0558, 0.0656, 0.0654, 0.0603, 0.0550, 0.0639], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:37:58,368 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.233e+02 2.734e+02 3.455e+02 6.558e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 07:38:03,875 INFO [train.py:901] (1/4) Epoch 24, batch 6600, loss[loss=0.1938, simple_loss=0.2712, pruned_loss=0.05825, over 7654.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2843, pruned_loss=0.05933, over 1615508.65 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:08,120 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:38:10,835 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:24,030 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-07 07:38:39,631 INFO [train.py:901] (1/4) Epoch 24, batch 6650, loss[loss=0.1764, simple_loss=0.2713, pruned_loss=0.04078, over 8188.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2842, pruned_loss=0.05919, over 1614135.52 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:53,942 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:55,127 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:56,510 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:56,613 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6907, 1.9798, 2.0391, 1.8889, 1.3187, 1.9715, 2.4134, 2.0991], + device='cuda:1'), covar=tensor([0.0497, 0.1070, 0.1506, 0.1222, 0.0599, 0.1225, 0.0607, 0.0525], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 07:39:08,643 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.365e+02 2.876e+02 3.746e+02 9.522e+02, threshold=5.752e+02, percent-clipped=3.0 +2023-02-07 07:39:14,185 INFO [train.py:901] (1/4) Epoch 24, batch 6700, loss[loss=0.1704, simple_loss=0.2677, pruned_loss=0.03654, over 8354.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2843, pruned_loss=0.05934, over 1611603.41 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:39:31,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:32,527 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:49,023 INFO [train.py:901] (1/4) Epoch 24, batch 6750, loss[loss=0.1662, simple_loss=0.251, pruned_loss=0.04067, over 7808.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2842, pruned_loss=0.05937, over 1613057.28 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:39:59,811 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3085, 1.5237, 4.4256, 1.8898, 2.5909, 5.1359, 5.2080, 4.5116], + device='cuda:1'), covar=tensor([0.1124, 0.1922, 0.0272, 0.1922, 0.1152, 0.0163, 0.0393, 0.0504], + device='cuda:1'), in_proj_covar=tensor([0.0297, 0.0323, 0.0287, 0.0315, 0.0314, 0.0273, 0.0430, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:40:01,189 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3883, 2.3606, 1.9552, 2.2741, 2.0798, 1.5773, 1.9435, 1.9104], + device='cuda:1'), covar=tensor([0.1421, 0.0426, 0.1185, 0.0533, 0.0744, 0.1619, 0.0992, 0.0942], + device='cuda:1'), in_proj_covar=tensor([0.0354, 0.0235, 0.0336, 0.0307, 0.0301, 0.0341, 0.0346, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 07:40:12,032 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192691.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:15,248 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192696.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:16,605 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:17,824 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.601e+02 3.163e+02 3.841e+02 9.507e+02, threshold=6.325e+02, percent-clipped=3.0 +2023-02-07 07:40:21,125 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 07:40:23,402 INFO [train.py:901] (1/4) Epoch 24, batch 6800, loss[loss=0.2355, simple_loss=0.3184, pruned_loss=0.0763, over 8576.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2838, pruned_loss=0.05943, over 1607458.44 frames. ], batch size: 31, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:24,828 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 07:40:51,292 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2654, 1.9637, 2.5271, 1.3584, 1.5959, 2.5452, 1.3306, 2.0233], + device='cuda:1'), covar=tensor([0.1398, 0.0935, 0.0412, 0.1436, 0.1853, 0.0396, 0.1438, 0.1274], + device='cuda:1'), in_proj_covar=tensor([0.0193, 0.0199, 0.0129, 0.0220, 0.0270, 0.0138, 0.0171, 0.0196], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 07:40:56,190 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:56,803 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:59,526 INFO [train.py:901] (1/4) Epoch 24, batch 6850, loss[loss=0.1999, simple_loss=0.2844, pruned_loss=0.05772, over 8200.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05927, over 1610259.77 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:01,649 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:15,304 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 07:41:26,178 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:29,430 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.230e+02 2.864e+02 3.611e+02 9.090e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-07 07:41:35,201 INFO [train.py:901] (1/4) Epoch 24, batch 6900, loss[loss=0.2263, simple_loss=0.2948, pruned_loss=0.07891, over 6814.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2845, pruned_loss=0.05948, over 1611590.41 frames. ], batch size: 15, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:54,067 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:03,108 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192849.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:42:08,879 INFO [train.py:901] (1/4) Epoch 24, batch 6950, loss[loss=0.2089, simple_loss=0.2798, pruned_loss=0.06896, over 7699.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2853, pruned_loss=0.06025, over 1609436.54 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:10,326 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:17,133 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:21,981 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:23,184 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 07:42:23,754 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-07 07:42:30,036 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:38,586 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.384e+02 2.955e+02 3.597e+02 9.319e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 07:42:44,709 INFO [train.py:901] (1/4) Epoch 24, batch 7000, loss[loss=0.2065, simple_loss=0.2945, pruned_loss=0.05928, over 8251.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.05943, over 1613657.32 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:46,242 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:48,264 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:48,881 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([6.0364, 1.8109, 6.2124, 2.2274, 5.6335, 5.2709, 5.7439, 5.6380], + device='cuda:1'), covar=tensor([0.0401, 0.4282, 0.0284, 0.3828, 0.0827, 0.0720, 0.0403, 0.0458], + device='cuda:1'), in_proj_covar=tensor([0.0641, 0.0651, 0.0707, 0.0637, 0.0712, 0.0612, 0.0614, 0.0683], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:43:15,544 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:16,861 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:19,400 INFO [train.py:901] (1/4) Epoch 24, batch 7050, loss[loss=0.1557, simple_loss=0.2438, pruned_loss=0.03377, over 7684.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2844, pruned_loss=0.05929, over 1612676.26 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:43:32,663 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:33,235 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:34,071 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:46,466 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4676, 1.3894, 1.7421, 1.1840, 1.1355, 1.7278, 0.2723, 1.1766], + device='cuda:1'), covar=tensor([0.1680, 0.1231, 0.0394, 0.0903, 0.2579, 0.0438, 0.1965, 0.1334], + device='cuda:1'), in_proj_covar=tensor([0.0195, 0.0200, 0.0130, 0.0221, 0.0273, 0.0139, 0.0171, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 07:43:48,981 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.407e+02 3.090e+02 3.925e+02 9.689e+02, threshold=6.179e+02, percent-clipped=7.0 +2023-02-07 07:43:54,448 INFO [train.py:901] (1/4) Epoch 24, batch 7100, loss[loss=0.2186, simple_loss=0.3082, pruned_loss=0.06449, over 8467.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.286, pruned_loss=0.05959, over 1614289.11 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:14,218 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:15,044 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8578, 1.7947, 2.4840, 1.5904, 1.4236, 2.4513, 0.5670, 1.5197], + device='cuda:1'), covar=tensor([0.1735, 0.1258, 0.0314, 0.1207, 0.2584, 0.0359, 0.2046, 0.1333], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0200, 0.0129, 0.0221, 0.0273, 0.0139, 0.0171, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 07:44:29,518 INFO [train.py:901] (1/4) Epoch 24, batch 7150, loss[loss=0.1881, simple_loss=0.2666, pruned_loss=0.0548, over 8083.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05911, over 1612915.37 frames. ], batch size: 21, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:30,370 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193059.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:54,300 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:56,928 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:58,920 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.272e+02 2.945e+02 3.915e+02 7.728e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-07 07:45:05,024 INFO [train.py:901] (1/4) Epoch 24, batch 7200, loss[loss=0.1886, simple_loss=0.2788, pruned_loss=0.04924, over 8470.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2853, pruned_loss=0.05901, over 1614127.51 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:45:16,871 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:21,519 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,270 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,293 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:39,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:40,079 INFO [train.py:901] (1/4) Epoch 24, batch 7250, loss[loss=0.2026, simple_loss=0.2881, pruned_loss=0.05853, over 8469.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.285, pruned_loss=0.05907, over 1612705.59 frames. ], batch size: 27, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:45:45,716 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:03,181 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:04,337 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193193.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:46:08,901 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.389e+02 2.780e+02 3.377e+02 1.311e+03, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 07:46:14,404 INFO [train.py:901] (1/4) Epoch 24, batch 7300, loss[loss=0.2146, simple_loss=0.2902, pruned_loss=0.0695, over 8133.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2863, pruned_loss=0.05975, over 1614715.71 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:17,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:29,142 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193229.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:43,419 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 07:46:48,080 INFO [train.py:901] (1/4) Epoch 24, batch 7350, loss[loss=0.1702, simple_loss=0.2497, pruned_loss=0.04532, over 7233.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2859, pruned_loss=0.05978, over 1614592.75 frames. ], batch size: 16, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:00,601 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 07:47:11,639 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 07:47:17,800 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.477e+02 2.971e+02 3.853e+02 6.522e+02, threshold=5.942e+02, percent-clipped=4.0 +2023-02-07 07:47:19,302 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:23,214 INFO [train.py:901] (1/4) Epoch 24, batch 7400, loss[loss=0.193, simple_loss=0.2609, pruned_loss=0.06255, over 7234.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2846, pruned_loss=0.05926, over 1611308.93 frames. ], batch size: 16, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:23,404 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193308.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:47:31,926 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 07:47:52,386 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:58,367 INFO [train.py:901] (1/4) Epoch 24, batch 7450, loss[loss=0.1864, simple_loss=0.2772, pruned_loss=0.04773, over 8641.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2846, pruned_loss=0.05954, over 1611257.47 frames. ], batch size: 49, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:09,543 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 07:48:10,327 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:21,035 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2079, 4.1901, 3.7539, 1.8193, 3.7290, 3.8071, 3.6524, 3.6148], + device='cuda:1'), covar=tensor([0.0784, 0.0584, 0.1080, 0.5124, 0.0935, 0.1039, 0.1401, 0.0815], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0455, 0.0443, 0.0557, 0.0442, 0.0456, 0.0437, 0.0397], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:48:26,201 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.13 vs. limit=5.0 +2023-02-07 07:48:28,445 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.340e+02 2.930e+02 4.048e+02 8.147e+02, threshold=5.861e+02, percent-clipped=5.0 +2023-02-07 07:48:30,641 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:32,909 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:34,038 INFO [train.py:901] (1/4) Epoch 24, batch 7500, loss[loss=0.1812, simple_loss=0.2668, pruned_loss=0.04785, over 7662.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06143, over 1615713.30 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:50,723 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:56,215 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:09,712 INFO [train.py:901] (1/4) Epoch 24, batch 7550, loss[loss=0.1911, simple_loss=0.2678, pruned_loss=0.05718, over 7445.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.285, pruned_loss=0.06034, over 1607510.28 frames. ], batch size: 17, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:16,841 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:18,144 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9168, 1.6206, 3.4552, 1.5632, 2.3542, 3.7826, 3.9825, 3.2065], + device='cuda:1'), covar=tensor([0.1302, 0.1781, 0.0326, 0.2094, 0.1158, 0.0247, 0.0510, 0.0525], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0322, 0.0286, 0.0314, 0.0314, 0.0272, 0.0428, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:49:19,406 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193472.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:49:32,473 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 07:49:34,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:39,067 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.474e+02 3.046e+02 3.751e+02 6.843e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-07 07:49:45,271 INFO [train.py:901] (1/4) Epoch 24, batch 7600, loss[loss=0.208, simple_loss=0.288, pruned_loss=0.06402, over 7129.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2851, pruned_loss=0.06076, over 1608649.71 frames. ], batch size: 72, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:51,993 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193518.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:19,354 INFO [train.py:901] (1/4) Epoch 24, batch 7650, loss[loss=0.1574, simple_loss=0.2398, pruned_loss=0.03752, over 7818.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2855, pruned_loss=0.06132, over 1609216.98 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:50:24,424 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193564.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:50:30,315 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:41,144 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193589.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:50:48,583 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.626e+02 3.196e+02 4.372e+02 7.437e+02, threshold=6.392e+02, percent-clipped=4.0 +2023-02-07 07:50:53,961 INFO [train.py:901] (1/4) Epoch 24, batch 7700, loss[loss=0.2617, simple_loss=0.3296, pruned_loss=0.09685, over 7340.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2867, pruned_loss=0.06139, over 1610348.22 frames. ], batch size: 71, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:11,796 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:14,902 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 07:51:20,931 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:27,965 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1090, 1.7908, 2.3029, 1.9376, 2.2379, 2.1382, 1.9937, 1.1300], + device='cuda:1'), covar=tensor([0.5424, 0.4907, 0.1991, 0.3916, 0.2524, 0.3081, 0.1966, 0.5146], + device='cuda:1'), in_proj_covar=tensor([0.0950, 0.1001, 0.0824, 0.0969, 0.1011, 0.0912, 0.0763, 0.0836], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 07:51:29,074 INFO [train.py:901] (1/4) Epoch 24, batch 7750, loss[loss=0.2162, simple_loss=0.3016, pruned_loss=0.06544, over 8501.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2873, pruned_loss=0.06139, over 1614562.83 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:50,184 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193688.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:58,105 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.679e+02 3.147e+02 3.999e+02 8.742e+02, threshold=6.294e+02, percent-clipped=3.0 +2023-02-07 07:52:03,353 INFO [train.py:901] (1/4) Epoch 24, batch 7800, loss[loss=0.2185, simple_loss=0.3046, pruned_loss=0.06621, over 8475.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2867, pruned_loss=0.06096, over 1613480.31 frames. ], batch size: 29, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:52:37,263 INFO [train.py:901] (1/4) Epoch 24, batch 7850, loss[loss=0.2018, simple_loss=0.2743, pruned_loss=0.06467, over 7643.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2866, pruned_loss=0.06069, over 1616428.93 frames. ], batch size: 19, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:52:39,491 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:47,566 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5014, 2.0633, 3.2613, 1.4357, 2.4362, 1.9751, 1.6111, 2.4940], + device='cuda:1'), covar=tensor([0.2159, 0.2810, 0.0910, 0.4846, 0.2118, 0.3475, 0.2652, 0.2367], + device='cuda:1'), in_proj_covar=tensor([0.0530, 0.0617, 0.0556, 0.0651, 0.0651, 0.0599, 0.0548, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 07:52:48,222 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:54,181 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,055 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193799.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,511 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.471e+02 2.801e+02 3.652e+02 8.352e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-07 07:53:10,835 INFO [train.py:901] (1/4) Epoch 24, batch 7900, loss[loss=0.1746, simple_loss=0.2647, pruned_loss=0.04227, over 7808.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06082, over 1619135.31 frames. ], batch size: 20, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:53:16,267 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193816.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:53:23,768 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.28 vs. limit=5.0 +2023-02-07 07:53:28,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2228, 2.1046, 1.6358, 1.9561, 1.7377, 1.3948, 1.5921, 1.6346], + device='cuda:1'), covar=tensor([0.1421, 0.0444, 0.1366, 0.0590, 0.0795, 0.1751, 0.1121, 0.0976], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0236, 0.0341, 0.0311, 0.0303, 0.0345, 0.0350, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 07:53:43,830 INFO [train.py:901] (1/4) Epoch 24, batch 7950, loss[loss=0.1978, simple_loss=0.2795, pruned_loss=0.05805, over 8346.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06143, over 1617817.79 frames. ], batch size: 26, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:11,269 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:12,511 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.494e+02 3.061e+02 3.521e+02 6.741e+02, threshold=6.122e+02, percent-clipped=2.0 +2023-02-07 07:54:13,969 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193902.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:17,826 INFO [train.py:901] (1/4) Epoch 24, batch 8000, loss[loss=0.1941, simple_loss=0.2708, pruned_loss=0.05869, over 7652.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2877, pruned_loss=0.06127, over 1619287.30 frames. ], batch size: 19, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:33,384 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193931.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:54:42,139 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:43,444 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0352, 2.1981, 1.8464, 2.4918, 1.6865, 1.7896, 1.9775, 2.2269], + device='cuda:1'), covar=tensor([0.0653, 0.0632, 0.0819, 0.0450, 0.0893, 0.1061, 0.0709, 0.0672], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0197, 0.0245, 0.0214, 0.0205, 0.0247, 0.0251, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 07:54:51,055 INFO [train.py:901] (1/4) Epoch 24, batch 8050, loss[loss=0.2318, simple_loss=0.3067, pruned_loss=0.07849, over 7304.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2861, pruned_loss=0.06127, over 1600025.46 frames. ], batch size: 75, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:58,127 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:03,440 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:23,284 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 07:55:28,455 INFO [train.py:901] (1/4) Epoch 25, batch 0, loss[loss=0.2415, simple_loss=0.3022, pruned_loss=0.09038, over 8247.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3022, pruned_loss=0.09038, over 8247.00 frames. ], batch size: 24, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:55:28,456 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 07:55:39,676 INFO [train.py:935] (1/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 07:55:39,677 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 07:55:46,485 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.577e+02 3.086e+02 3.975e+02 9.885e+02, threshold=6.172e+02, percent-clipped=3.0 +2023-02-07 07:55:57,042 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 07:56:00,101 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:15,956 INFO [train.py:901] (1/4) Epoch 25, batch 50, loss[loss=0.2663, simple_loss=0.3293, pruned_loss=0.1016, over 6930.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.05953, over 364272.69 frames. ], batch size: 72, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:17,544 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:32,521 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 07:56:51,135 INFO [train.py:901] (1/4) Epoch 25, batch 100, loss[loss=0.1649, simple_loss=0.2509, pruned_loss=0.0394, over 7420.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2852, pruned_loss=0.05933, over 643337.17 frames. ], batch size: 17, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:51,286 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:52,672 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:55,700 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 07:56:57,728 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.655e+02 3.251e+02 4.247e+02 7.218e+02, threshold=6.502e+02, percent-clipped=2.0 +2023-02-07 07:57:19,065 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 07:57:22,495 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 07:57:25,373 INFO [train.py:901] (1/4) Epoch 25, batch 150, loss[loss=0.1951, simple_loss=0.2754, pruned_loss=0.05736, over 7968.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.286, pruned_loss=0.06034, over 863209.20 frames. ], batch size: 21, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:57:35,092 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:52,133 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:58,197 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194187.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:00,647 INFO [train.py:901] (1/4) Epoch 25, batch 200, loss[loss=0.2202, simple_loss=0.3068, pruned_loss=0.06681, over 8444.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2858, pruned_loss=0.05953, over 1033325.69 frames. ], batch size: 29, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:07,397 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.343e+02 2.842e+02 3.543e+02 5.999e+02, threshold=5.685e+02, percent-clipped=0.0 +2023-02-07 07:58:16,692 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194212.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:35,370 INFO [train.py:901] (1/4) Epoch 25, batch 250, loss[loss=0.2108, simple_loss=0.2988, pruned_loss=0.06146, over 8373.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2865, pruned_loss=0.05968, over 1167797.73 frames. ], batch size: 24, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:39,450 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194246.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:58:49,469 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 07:58:58,139 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 07:59:08,685 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 07:59:09,584 INFO [train.py:901] (1/4) Epoch 25, batch 300, loss[loss=0.1614, simple_loss=0.2475, pruned_loss=0.03766, over 7695.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2848, pruned_loss=0.05898, over 1265161.46 frames. ], batch size: 18, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:17,103 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.353e+02 2.857e+02 3.504e+02 7.851e+02, threshold=5.715e+02, percent-clipped=2.0 +2023-02-07 07:59:19,404 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7572, 1.5307, 3.2280, 1.3998, 2.3856, 3.3932, 3.5757, 2.9145], + device='cuda:1'), covar=tensor([0.1278, 0.1664, 0.0304, 0.2179, 0.0856, 0.0267, 0.0571, 0.0535], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0322, 0.0287, 0.0316, 0.0316, 0.0273, 0.0429, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 07:59:43,419 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:59:45,908 INFO [train.py:901] (1/4) Epoch 25, batch 350, loss[loss=0.1967, simple_loss=0.2797, pruned_loss=0.05687, over 7544.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2851, pruned_loss=0.05938, over 1341544.82 frames. ], batch size: 18, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:51,517 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:00,445 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:08,055 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194371.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:09,434 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:14,131 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194380.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:00:20,654 INFO [train.py:901] (1/4) Epoch 25, batch 400, loss[loss=0.2143, simple_loss=0.2858, pruned_loss=0.07137, over 7232.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2859, pruned_loss=0.05995, over 1401464.36 frames. ], batch size: 16, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:00:27,619 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.445e+02 3.013e+02 3.982e+02 8.525e+02, threshold=6.027e+02, percent-clipped=7.0 +2023-02-07 08:00:29,489 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 08:00:52,196 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194434.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:56,970 INFO [train.py:901] (1/4) Epoch 25, batch 450, loss[loss=0.2734, simple_loss=0.3418, pruned_loss=0.1025, over 7217.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2845, pruned_loss=0.05904, over 1448401.78 frames. ], batch size: 73, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:01:30,918 INFO [train.py:901] (1/4) Epoch 25, batch 500, loss[loss=0.1937, simple_loss=0.2678, pruned_loss=0.0598, over 7283.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2859, pruned_loss=0.05997, over 1487048.17 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:01:37,839 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.459e+02 3.156e+02 4.025e+02 7.800e+02, threshold=6.312e+02, percent-clipped=3.0 +2023-02-07 08:02:06,155 INFO [train.py:901] (1/4) Epoch 25, batch 550, loss[loss=0.1734, simple_loss=0.2557, pruned_loss=0.04552, over 7660.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2851, pruned_loss=0.05968, over 1513702.87 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:13,446 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:02:30,146 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2075, 2.5276, 2.8508, 1.5905, 3.1437, 1.6561, 1.5506, 2.2063], + device='cuda:1'), covar=tensor([0.0792, 0.0360, 0.0283, 0.0730, 0.0417, 0.0989, 0.0865, 0.0521], + device='cuda:1'), in_proj_covar=tensor([0.0459, 0.0401, 0.0355, 0.0450, 0.0387, 0.0537, 0.0398, 0.0431], + device='cuda:1'), out_proj_covar=tensor([1.2215e-04, 1.0461e-04, 9.3020e-05, 1.1807e-04, 1.0144e-04, 1.5059e-04, + 1.0673e-04, 1.1351e-04], device='cuda:1') +2023-02-07 08:02:42,128 INFO [train.py:901] (1/4) Epoch 25, batch 600, loss[loss=0.169, simple_loss=0.2479, pruned_loss=0.04503, over 7792.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06009, over 1544282.26 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:48,737 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.361e+02 2.970e+02 3.663e+02 1.001e+03, threshold=5.941e+02, percent-clipped=3.0 +2023-02-07 08:03:01,145 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 08:03:01,345 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:10,844 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5628, 1.3370, 1.7069, 1.3069, 0.9123, 1.4390, 1.5161, 1.4510], + device='cuda:1'), covar=tensor([0.0561, 0.1276, 0.1626, 0.1485, 0.0595, 0.1474, 0.0710, 0.0657], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 08:03:16,780 INFO [train.py:901] (1/4) Epoch 25, batch 650, loss[loss=0.2105, simple_loss=0.2928, pruned_loss=0.06412, over 7919.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.285, pruned_loss=0.05968, over 1556266.02 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:03:18,067 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:23,709 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0881, 1.9083, 2.3921, 2.1124, 2.4008, 2.0901, 1.9713, 1.6252], + device='cuda:1'), covar=tensor([0.4073, 0.3868, 0.1652, 0.2891, 0.1843, 0.2670, 0.1457, 0.3709], + device='cuda:1'), in_proj_covar=tensor([0.0942, 0.0995, 0.0814, 0.0961, 0.1001, 0.0904, 0.0754, 0.0830], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:03:25,636 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:45,809 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194680.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:52,562 INFO [train.py:901] (1/4) Epoch 25, batch 700, loss[loss=0.1724, simple_loss=0.2555, pruned_loss=0.04464, over 7544.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2852, pruned_loss=0.06014, over 1567943.10 frames. ], batch size: 18, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:00,047 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.448e+02 2.849e+02 3.638e+02 5.412e+02, threshold=5.698e+02, percent-clipped=0.0 +2023-02-07 08:04:09,640 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194715.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:04:16,394 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194724.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:04:27,404 INFO [train.py:901] (1/4) Epoch 25, batch 750, loss[loss=0.1904, simple_loss=0.2737, pruned_loss=0.05357, over 8195.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2844, pruned_loss=0.06004, over 1576864.61 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:49,463 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 08:04:58,546 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 08:05:03,382 INFO [train.py:901] (1/4) Epoch 25, batch 800, loss[loss=0.176, simple_loss=0.253, pruned_loss=0.04947, over 7248.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2841, pruned_loss=0.05962, over 1587883.37 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:07,603 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:11,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 2.417e+02 2.991e+02 3.771e+02 6.788e+02, threshold=5.982e+02, percent-clipped=2.0 +2023-02-07 08:05:14,533 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194805.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,778 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,810 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:37,855 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194839.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:05:38,299 INFO [train.py:901] (1/4) Epoch 25, batch 850, loss[loss=0.2697, simple_loss=0.3298, pruned_loss=0.1048, over 8437.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2849, pruned_loss=0.05993, over 1593851.25 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:49,038 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3966, 2.3002, 1.8467, 2.1576, 2.0192, 1.5056, 1.8994, 1.8353], + device='cuda:1'), covar=tensor([0.1506, 0.0464, 0.1210, 0.0568, 0.0781, 0.1686, 0.1071, 0.1055], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0234, 0.0335, 0.0307, 0.0298, 0.0339, 0.0344, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 08:05:56,595 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:06:14,007 INFO [train.py:901] (1/4) Epoch 25, batch 900, loss[loss=0.1754, simple_loss=0.26, pruned_loss=0.04539, over 7980.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2837, pruned_loss=0.05943, over 1596840.54 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:06:22,178 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.491e+02 2.923e+02 3.701e+02 8.623e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 08:06:49,617 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 08:06:49,722 INFO [train.py:901] (1/4) Epoch 25, batch 950, loss[loss=0.238, simple_loss=0.3209, pruned_loss=0.07754, over 8189.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2832, pruned_loss=0.05859, over 1601626.84 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:02,579 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0905, 1.6531, 1.8477, 1.6241, 1.1136, 1.6817, 1.9654, 1.8278], + device='cuda:1'), covar=tensor([0.0594, 0.1177, 0.1625, 0.1357, 0.0649, 0.1375, 0.0701, 0.0605], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0160, 0.0101, 0.0164, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 08:07:19,512 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 08:07:24,289 INFO [train.py:901] (1/4) Epoch 25, batch 1000, loss[loss=0.1989, simple_loss=0.2815, pruned_loss=0.05821, over 7934.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2838, pruned_loss=0.05889, over 1606480.57 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:29,065 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:32,163 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.651e+02 3.101e+02 3.894e+02 6.477e+02, threshold=6.202e+02, percent-clipped=4.0 +2023-02-07 08:07:51,784 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195029.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:54,457 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 08:07:59,735 INFO [train.py:901] (1/4) Epoch 25, batch 1050, loss[loss=0.2149, simple_loss=0.2994, pruned_loss=0.06517, over 8493.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05928, over 1611746.58 frames. ], batch size: 49, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:06,381 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 08:08:07,162 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195051.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:14,480 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195062.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:08:23,829 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195076.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:23,884 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4827, 1.3076, 1.5591, 1.4083, 1.4659, 1.4764, 1.3364, 0.7460], + device='cuda:1'), covar=tensor([0.3653, 0.3149, 0.1498, 0.2202, 0.1682, 0.2248, 0.1387, 0.3456], + device='cuda:1'), in_proj_covar=tensor([0.0950, 0.1003, 0.0824, 0.0968, 0.1009, 0.0914, 0.0761, 0.0836], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:08:31,305 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,294 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,849 INFO [train.py:901] (1/4) Epoch 25, batch 1100, loss[loss=0.1711, simple_loss=0.2524, pruned_loss=0.04494, over 7937.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2849, pruned_loss=0.0598, over 1607521.93 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:36,607 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8381, 1.6900, 2.5892, 1.5251, 2.1348, 2.8771, 2.8636, 2.5913], + device='cuda:1'), covar=tensor([0.1024, 0.1450, 0.0688, 0.1882, 0.1885, 0.0300, 0.0726, 0.0454], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0324, 0.0288, 0.0317, 0.0316, 0.0275, 0.0430, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 08:08:37,355 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195095.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:08:41,229 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.550e+02 3.152e+02 4.111e+02 6.650e+02, threshold=6.304e+02, percent-clipped=3.0 +2023-02-07 08:08:48,154 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:48,177 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:53,476 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8411, 5.9161, 5.1817, 2.7069, 5.1796, 5.6158, 5.4262, 5.4014], + device='cuda:1'), covar=tensor([0.0505, 0.0375, 0.0910, 0.4002, 0.0717, 0.0715, 0.1094, 0.0518], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0450, 0.0432, 0.0545, 0.0434, 0.0452, 0.0426, 0.0396], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:08:54,921 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195120.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:09:09,218 INFO [train.py:901] (1/4) Epoch 25, batch 1150, loss[loss=0.2069, simple_loss=0.2817, pruned_loss=0.06606, over 7936.00 frames. ], tot_loss[loss=0.201, simple_loss=0.284, pruned_loss=0.059, over 1609899.39 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:16,846 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 08:09:43,684 INFO [train.py:901] (1/4) Epoch 25, batch 1200, loss[loss=0.2019, simple_loss=0.2819, pruned_loss=0.06098, over 8499.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2852, pruned_loss=0.05897, over 1613459.72 frames. ], batch size: 49, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:44,707 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-07 08:09:51,813 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.229e+02 2.843e+02 3.492e+02 1.399e+03, threshold=5.685e+02, percent-clipped=2.0 +2023-02-07 08:09:57,123 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:10:18,374 INFO [train.py:901] (1/4) Epoch 25, batch 1250, loss[loss=0.1865, simple_loss=0.2653, pruned_loss=0.05379, over 8060.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05919, over 1609536.79 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:10:34,136 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-07 08:10:53,103 INFO [train.py:901] (1/4) Epoch 25, batch 1300, loss[loss=0.1646, simple_loss=0.2577, pruned_loss=0.0358, over 7974.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2849, pruned_loss=0.05896, over 1609377.39 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:00,272 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.452e+02 2.850e+02 4.025e+02 1.071e+03, threshold=5.700e+02, percent-clipped=7.0 +2023-02-07 08:11:16,286 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:26,830 INFO [train.py:901] (1/4) Epoch 25, batch 1350, loss[loss=0.2184, simple_loss=0.2883, pruned_loss=0.07425, over 6831.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2852, pruned_loss=0.05935, over 1607827.50 frames. ], batch size: 15, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:45,672 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:50,112 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:57,386 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-07 08:12:02,401 INFO [train.py:901] (1/4) Epoch 25, batch 1400, loss[loss=0.1909, simple_loss=0.2522, pruned_loss=0.06477, over 7424.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2855, pruned_loss=0.05942, over 1612398.62 frames. ], batch size: 17, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:04,000 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:10,473 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.570e+02 2.915e+02 3.833e+02 8.465e+02, threshold=5.831e+02, percent-clipped=6.0 +2023-02-07 08:12:13,226 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195406.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:12:15,768 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:31,396 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:36,127 INFO [train.py:901] (1/4) Epoch 25, batch 1450, loss[loss=0.2255, simple_loss=0.3033, pruned_loss=0.07385, over 8498.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2861, pruned_loss=0.06039, over 1614504.34 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:36,343 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5252, 1.4839, 1.8279, 1.2218, 1.2539, 1.8464, 0.2078, 1.2234], + device='cuda:1'), covar=tensor([0.1508, 0.1200, 0.0402, 0.0947, 0.2257, 0.0446, 0.1924, 0.1248], + device='cuda:1'), in_proj_covar=tensor([0.0195, 0.0201, 0.0130, 0.0221, 0.0273, 0.0139, 0.0171, 0.0196], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:12:44,157 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 08:13:10,183 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195488.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:13:11,409 INFO [train.py:901] (1/4) Epoch 25, batch 1500, loss[loss=0.2093, simple_loss=0.2949, pruned_loss=0.06187, over 8267.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.285, pruned_loss=0.05998, over 1612335.07 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:19,806 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.401e+02 3.375e+02 4.255e+02 1.024e+03, threshold=6.749e+02, percent-clipped=12.0 +2023-02-07 08:13:33,927 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195521.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:13:46,249 INFO [train.py:901] (1/4) Epoch 25, batch 1550, loss[loss=0.2162, simple_loss=0.2872, pruned_loss=0.07258, over 7980.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05984, over 1613416.42 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:51,909 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:14,116 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:21,362 INFO [train.py:901] (1/4) Epoch 25, batch 1600, loss[loss=0.1964, simple_loss=0.2866, pruned_loss=0.05311, over 7810.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2836, pruned_loss=0.05922, over 1612253.42 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:14:29,486 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.317e+02 3.105e+02 3.813e+02 7.132e+02, threshold=6.211e+02, percent-clipped=3.0 +2023-02-07 08:14:32,329 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:37,017 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0990, 1.8350, 2.2729, 1.9170, 2.1658, 2.1932, 2.0183, 1.1030], + device='cuda:1'), covar=tensor([0.5653, 0.4793, 0.2120, 0.3982, 0.2896, 0.3265, 0.1894, 0.5436], + device='cuda:1'), in_proj_covar=tensor([0.0954, 0.1009, 0.0826, 0.0977, 0.1019, 0.0919, 0.0765, 0.0842], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:14:55,981 INFO [train.py:901] (1/4) Epoch 25, batch 1650, loss[loss=0.1959, simple_loss=0.2774, pruned_loss=0.05721, over 8292.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2837, pruned_loss=0.05908, over 1616631.68 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:29,744 INFO [train.py:901] (1/4) Epoch 25, batch 1700, loss[loss=0.2226, simple_loss=0.3051, pruned_loss=0.07001, over 8326.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05921, over 1616669.28 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:38,029 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.590e+02 3.116e+02 3.996e+02 7.880e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-07 08:16:05,348 INFO [train.py:901] (1/4) Epoch 25, batch 1750, loss[loss=0.1641, simple_loss=0.2477, pruned_loss=0.04026, over 7256.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05936, over 1616516.83 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:16:06,322 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5673, 2.9117, 2.4481, 3.9827, 1.6051, 2.0561, 2.3217, 2.8120], + device='cuda:1'), covar=tensor([0.0637, 0.0697, 0.0760, 0.0243, 0.1074, 0.1235, 0.0972, 0.0796], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0213, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:16:09,114 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:15,708 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:17,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1074, 1.9738, 2.4318, 2.0261, 2.4916, 2.2358, 1.9902, 1.3577], + device='cuda:1'), covar=tensor([0.5551, 0.4881, 0.1958, 0.3931, 0.2401, 0.3144, 0.2036, 0.5287], + device='cuda:1'), in_proj_covar=tensor([0.0953, 0.1008, 0.0825, 0.0976, 0.1016, 0.0917, 0.0764, 0.0841], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:16:26,012 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195769.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:31,522 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195777.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:16:40,098 INFO [train.py:901] (1/4) Epoch 25, batch 1800, loss[loss=0.1879, simple_loss=0.2728, pruned_loss=0.05149, over 8309.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2851, pruned_loss=0.05956, over 1616044.90 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:16:48,985 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.456e+02 2.857e+02 3.484e+02 7.816e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-07 08:16:49,207 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:16:50,558 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:07,910 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:15,239 INFO [train.py:901] (1/4) Epoch 25, batch 1850, loss[loss=0.243, simple_loss=0.3233, pruned_loss=0.08141, over 8332.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05934, over 1618632.51 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:36,404 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:50,148 INFO [train.py:901] (1/4) Epoch 25, batch 1900, loss[loss=0.1773, simple_loss=0.264, pruned_loss=0.04534, over 8632.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.284, pruned_loss=0.05938, over 1616060.02 frames. ], batch size: 49, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:58,368 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.686e+02 3.045e+02 3.689e+02 8.196e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-07 08:18:24,464 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 08:18:25,126 INFO [train.py:901] (1/4) Epoch 25, batch 1950, loss[loss=0.1998, simple_loss=0.2855, pruned_loss=0.05708, over 8602.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2843, pruned_loss=0.05949, over 1612979.59 frames. ], batch size: 31, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:18:37,848 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 08:18:57,294 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 08:19:00,594 INFO [train.py:901] (1/4) Epoch 25, batch 2000, loss[loss=0.1631, simple_loss=0.2498, pruned_loss=0.03822, over 7227.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2844, pruned_loss=0.05951, over 1613604.71 frames. ], batch size: 16, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:19:09,746 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.344e+02 2.823e+02 3.287e+02 7.423e+02, threshold=5.646e+02, percent-clipped=4.0 +2023-02-07 08:19:11,917 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5962, 1.9756, 2.9137, 1.5077, 2.0471, 1.9412, 1.6673, 2.1553], + device='cuda:1'), covar=tensor([0.1956, 0.2538, 0.0829, 0.4534, 0.2094, 0.3205, 0.2440, 0.2311], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0622, 0.0556, 0.0658, 0.0655, 0.0602, 0.0549, 0.0638], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:19:33,236 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-07 08:19:36,109 INFO [train.py:901] (1/4) Epoch 25, batch 2050, loss[loss=0.1935, simple_loss=0.2893, pruned_loss=0.04885, over 8472.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05877, over 1614589.78 frames. ], batch size: 29, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:00,445 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-07 08:20:11,114 INFO [train.py:901] (1/4) Epoch 25, batch 2100, loss[loss=0.1925, simple_loss=0.2697, pruned_loss=0.05762, over 8049.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2854, pruned_loss=0.06012, over 1615846.64 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:20,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.403e+02 2.946e+02 3.659e+02 8.101e+02, threshold=5.892e+02, percent-clipped=3.0 +2023-02-07 08:20:23,911 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5837, 2.7042, 2.4359, 4.0762, 1.5248, 2.0347, 2.4206, 2.8033], + device='cuda:1'), covar=tensor([0.0684, 0.0854, 0.0792, 0.0250, 0.1127, 0.1250, 0.0973, 0.0837], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0212, 0.0204, 0.0244, 0.0248, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:20:25,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3723, 1.3591, 4.7460, 1.9662, 3.8076, 3.7549, 4.2393, 4.2011], + device='cuda:1'), covar=tensor([0.1192, 0.7374, 0.0842, 0.5310, 0.1869, 0.1626, 0.1064, 0.0997], + device='cuda:1'), in_proj_covar=tensor([0.0649, 0.0655, 0.0712, 0.0645, 0.0724, 0.0617, 0.0621, 0.0695], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:20:35,907 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=196125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:20:46,054 INFO [train.py:901] (1/4) Epoch 25, batch 2150, loss[loss=0.1666, simple_loss=0.2442, pruned_loss=0.04447, over 7444.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2858, pruned_loss=0.0601, over 1617594.10 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:54,040 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=196150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:21:22,077 INFO [train.py:901] (1/4) Epoch 25, batch 2200, loss[loss=0.1828, simple_loss=0.276, pruned_loss=0.04486, over 8242.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2845, pruned_loss=0.05959, over 1615917.15 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:21:30,653 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.555e+02 3.213e+02 4.289e+02 6.887e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-07 08:21:37,545 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7137, 4.7466, 4.1774, 2.1715, 4.1630, 4.3145, 4.1023, 4.1350], + device='cuda:1'), covar=tensor([0.0742, 0.0503, 0.1084, 0.4720, 0.0931, 0.0999, 0.1456, 0.0712], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0449, 0.0435, 0.0546, 0.0433, 0.0452, 0.0427, 0.0396], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:21:56,962 INFO [train.py:901] (1/4) Epoch 25, batch 2250, loss[loss=0.2229, simple_loss=0.3076, pruned_loss=0.06909, over 8315.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2836, pruned_loss=0.05983, over 1608526.79 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:07,573 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1950, 1.6977, 1.8287, 1.6877, 1.4086, 1.7131, 2.1036, 1.9765], + device='cuda:1'), covar=tensor([0.0571, 0.1375, 0.1965, 0.1523, 0.0710, 0.1684, 0.0849, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0160, 0.0100, 0.0164, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 08:22:12,518 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6993, 2.4323, 1.8227, 2.2587, 2.2051, 1.6795, 2.0775, 2.1466], + device='cuda:1'), covar=tensor([0.1363, 0.0441, 0.1179, 0.0620, 0.0698, 0.1526, 0.0980, 0.0937], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0238, 0.0341, 0.0313, 0.0303, 0.0345, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 08:22:32,058 INFO [train.py:901] (1/4) Epoch 25, batch 2300, loss[loss=0.1842, simple_loss=0.2749, pruned_loss=0.04677, over 8468.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.285, pruned_loss=0.06037, over 1608382.81 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:40,955 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.794e+02 3.530e+02 9.865e+02, threshold=5.587e+02, percent-clipped=2.0 +2023-02-07 08:23:04,008 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0846, 2.2617, 1.8282, 2.9519, 1.3239, 1.6821, 2.0023, 2.2498], + device='cuda:1'), covar=tensor([0.0672, 0.0675, 0.0900, 0.0288, 0.1072, 0.1231, 0.0830, 0.0781], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0194, 0.0242, 0.0210, 0.0203, 0.0243, 0.0247, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:23:07,217 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:07,753 INFO [train.py:901] (1/4) Epoch 25, batch 2350, loss[loss=0.1822, simple_loss=0.2617, pruned_loss=0.05137, over 7265.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2852, pruned_loss=0.0601, over 1609789.14 frames. ], batch size: 16, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:34,292 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:42,086 INFO [train.py:901] (1/4) Epoch 25, batch 2400, loss[loss=0.1893, simple_loss=0.2668, pruned_loss=0.05593, over 8503.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2852, pruned_loss=0.0603, over 1607445.16 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:50,280 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.902e+02 3.432e+02 7.434e+02, threshold=5.805e+02, percent-clipped=2.0 +2023-02-07 08:24:17,357 INFO [train.py:901] (1/4) Epoch 25, batch 2450, loss[loss=0.254, simple_loss=0.3329, pruned_loss=0.08752, over 8488.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06018, over 1609942.37 frames. ], batch size: 49, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:24:51,935 INFO [train.py:901] (1/4) Epoch 25, batch 2500, loss[loss=0.1962, simple_loss=0.2961, pruned_loss=0.04817, over 8108.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2868, pruned_loss=0.06018, over 1612358.58 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:00,802 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.398e+02 2.858e+02 3.242e+02 5.404e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 08:25:18,290 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6491, 1.4408, 1.7525, 1.3728, 0.9987, 1.4779, 1.5042, 1.3681], + device='cuda:1'), covar=tensor([0.0535, 0.1170, 0.1613, 0.1468, 0.0566, 0.1392, 0.0680, 0.0691], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0101, 0.0164, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 08:25:26,994 INFO [train.py:901] (1/4) Epoch 25, batch 2550, loss[loss=0.1619, simple_loss=0.2443, pruned_loss=0.03976, over 5521.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2856, pruned_loss=0.05948, over 1605530.86 frames. ], batch size: 12, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:54,367 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7739, 1.6489, 2.0311, 1.8009, 2.0518, 1.8181, 1.6901, 1.2089], + device='cuda:1'), covar=tensor([0.4523, 0.3942, 0.1766, 0.2928, 0.1949, 0.2855, 0.1656, 0.4217], + device='cuda:1'), in_proj_covar=tensor([0.0952, 0.1004, 0.0824, 0.0974, 0.1014, 0.0915, 0.0761, 0.0837], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:26:02,159 INFO [train.py:901] (1/4) Epoch 25, batch 2600, loss[loss=0.1689, simple_loss=0.2532, pruned_loss=0.04226, over 8132.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.05879, over 1607183.92 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:06,429 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:26:07,757 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8669, 3.8365, 3.4750, 1.8980, 3.3984, 3.4889, 3.4181, 3.3383], + device='cuda:1'), covar=tensor([0.0943, 0.0666, 0.1262, 0.4306, 0.1058, 0.1067, 0.1394, 0.0901], + device='cuda:1'), in_proj_covar=tensor([0.0530, 0.0447, 0.0433, 0.0542, 0.0434, 0.0450, 0.0424, 0.0396], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:26:10,247 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.366e+02 2.911e+02 3.287e+02 8.101e+02, threshold=5.822e+02, percent-clipped=1.0 +2023-02-07 08:26:13,504 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-07 08:26:37,071 INFO [train.py:901] (1/4) Epoch 25, batch 2650, loss[loss=0.1916, simple_loss=0.2722, pruned_loss=0.05545, over 7434.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05844, over 1607010.29 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:08,210 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:12,709 INFO [train.py:901] (1/4) Epoch 25, batch 2700, loss[loss=0.2158, simple_loss=0.2974, pruned_loss=0.06707, over 8480.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2827, pruned_loss=0.05847, over 1603910.15 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:20,586 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.452e+02 2.909e+02 3.648e+02 8.771e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-07 08:27:33,979 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:46,998 INFO [train.py:901] (1/4) Epoch 25, batch 2750, loss[loss=0.1874, simple_loss=0.277, pruned_loss=0.04888, over 8460.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2828, pruned_loss=0.0588, over 1608135.04 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:55,331 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7655, 1.9087, 2.0406, 1.4427, 2.1095, 1.5243, 0.7799, 1.9297], + device='cuda:1'), covar=tensor([0.0640, 0.0419, 0.0318, 0.0639, 0.0571, 0.0924, 0.0986, 0.0350], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0404, 0.0360, 0.0456, 0.0388, 0.0543, 0.0402, 0.0432], + device='cuda:1'), out_proj_covar=tensor([1.2356e-04, 1.0535e-04, 9.4351e-05, 1.1950e-04, 1.0162e-04, 1.5227e-04, + 1.0762e-04, 1.1362e-04], device='cuda:1') +2023-02-07 08:28:11,179 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.00 vs. limit=5.0 +2023-02-07 08:28:22,175 INFO [train.py:901] (1/4) Epoch 25, batch 2800, loss[loss=0.1914, simple_loss=0.2603, pruned_loss=0.06128, over 7531.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2837, pruned_loss=0.0589, over 1611947.63 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:28:27,858 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:28,543 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:31,145 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.445e+02 2.946e+02 3.604e+02 6.151e+02, threshold=5.892e+02, percent-clipped=2.0 +2023-02-07 08:28:52,423 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 08:28:54,961 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:56,679 INFO [train.py:901] (1/4) Epoch 25, batch 2850, loss[loss=0.2225, simple_loss=0.3005, pruned_loss=0.07229, over 7971.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05928, over 1611553.18 frames. ], batch size: 21, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:19,406 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:32,401 INFO [train.py:901] (1/4) Epoch 25, batch 2900, loss[loss=0.2076, simple_loss=0.3209, pruned_loss=0.04708, over 8236.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2839, pruned_loss=0.05844, over 1614065.02 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:39,447 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:41,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.504e+02 3.053e+02 3.742e+02 6.617e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-07 08:30:08,127 INFO [train.py:901] (1/4) Epoch 25, batch 2950, loss[loss=0.1788, simple_loss=0.2587, pruned_loss=0.04942, over 8248.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.284, pruned_loss=0.0584, over 1617266.93 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:08,205 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:30:08,829 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 08:30:19,843 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 08:30:41,608 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 08:30:42,486 INFO [train.py:901] (1/4) Epoch 25, batch 3000, loss[loss=0.2239, simple_loss=0.3176, pruned_loss=0.06512, over 8515.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.05867, over 1620842.98 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:42,487 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 08:30:55,644 INFO [train.py:935] (1/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2721, pruned_loss=0.03618, over 944034.00 frames. +2023-02-07 08:30:55,645 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 08:31:03,959 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 2.477e+02 2.955e+02 3.925e+02 7.788e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 08:31:30,698 INFO [train.py:901] (1/4) Epoch 25, batch 3050, loss[loss=0.1858, simple_loss=0.2681, pruned_loss=0.05178, over 7652.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.283, pruned_loss=0.05894, over 1612988.81 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:31:40,519 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197054.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:41,157 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:57,862 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197079.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:05,195 INFO [train.py:901] (1/4) Epoch 25, batch 3100, loss[loss=0.2175, simple_loss=0.3008, pruned_loss=0.06711, over 8205.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.284, pruned_loss=0.05939, over 1611666.66 frames. ], batch size: 48, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:07,487 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:13,235 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.425e+02 3.089e+02 3.818e+02 7.102e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-07 08:32:24,976 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:40,204 INFO [train.py:901] (1/4) Epoch 25, batch 3150, loss[loss=0.1707, simple_loss=0.2659, pruned_loss=0.03771, over 8036.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2841, pruned_loss=0.05899, over 1618549.62 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:40,984 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:15,297 INFO [train.py:901] (1/4) Epoch 25, batch 3200, loss[loss=0.193, simple_loss=0.2859, pruned_loss=0.04999, over 8536.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05848, over 1616454.23 frames. ], batch size: 31, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:23,539 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.400e+02 2.739e+02 3.315e+02 1.024e+03, threshold=5.479e+02, percent-clipped=5.0 +2023-02-07 08:33:33,159 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:50,303 INFO [train.py:901] (1/4) Epoch 25, batch 3250, loss[loss=0.2079, simple_loss=0.2901, pruned_loss=0.0628, over 8251.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.05852, over 1614197.54 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:52,456 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:02,152 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:25,384 INFO [train.py:901] (1/4) Epoch 25, batch 3300, loss[loss=0.1562, simple_loss=0.2355, pruned_loss=0.03845, over 7793.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2841, pruned_loss=0.05915, over 1610571.29 frames. ], batch size: 19, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:34:34,249 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.560e+02 3.230e+02 4.212e+02 8.703e+02, threshold=6.460e+02, percent-clipped=10.0 +2023-02-07 08:34:40,505 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:46,281 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-07 08:34:54,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:57,732 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:00,243 INFO [train.py:901] (1/4) Epoch 25, batch 3350, loss[loss=0.2217, simple_loss=0.3077, pruned_loss=0.06788, over 8722.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2847, pruned_loss=0.05955, over 1610168.61 frames. ], batch size: 40, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:35:13,348 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:17,466 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8107, 1.7248, 2.4173, 1.5260, 1.2711, 2.4094, 0.3842, 1.4524], + device='cuda:1'), covar=tensor([0.1584, 0.1289, 0.0363, 0.1129, 0.2517, 0.0392, 0.2089, 0.1296], + device='cuda:1'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0220, 0.0271, 0.0139, 0.0170, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:35:36,172 INFO [train.py:901] (1/4) Epoch 25, batch 3400, loss[loss=0.2005, simple_loss=0.2941, pruned_loss=0.05346, over 8516.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2859, pruned_loss=0.05989, over 1612495.14 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:35:39,767 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5196, 2.1636, 3.5579, 1.6029, 1.4334, 3.5915, 0.4560, 2.0473], + device='cuda:1'), covar=tensor([0.1458, 0.1258, 0.0207, 0.1932, 0.2751, 0.0200, 0.2168, 0.1279], + device='cuda:1'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0220, 0.0271, 0.0138, 0.0170, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:35:44,270 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.489e+02 3.044e+02 3.734e+02 7.163e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 08:36:06,317 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197433.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:36:11,060 INFO [train.py:901] (1/4) Epoch 25, batch 3450, loss[loss=0.1899, simple_loss=0.2787, pruned_loss=0.05052, over 8100.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2859, pruned_loss=0.05985, over 1613559.93 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:16,731 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3857, 2.1699, 1.7278, 2.0094, 1.7643, 1.5146, 1.7474, 1.7481], + device='cuda:1'), covar=tensor([0.1261, 0.0398, 0.1246, 0.0515, 0.0735, 0.1525, 0.0953, 0.0848], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0236, 0.0340, 0.0311, 0.0300, 0.0345, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 08:36:46,261 INFO [train.py:901] (1/4) Epoch 25, batch 3500, loss[loss=0.1907, simple_loss=0.2727, pruned_loss=0.05433, over 7809.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2846, pruned_loss=0.05912, over 1608531.21 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:54,913 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.387e+02 2.953e+02 3.537e+02 5.869e+02, threshold=5.907e+02, percent-clipped=0.0 +2023-02-07 08:37:02,040 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:07,264 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 08:37:17,733 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:19,765 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:21,600 INFO [train.py:901] (1/4) Epoch 25, batch 3550, loss[loss=0.1614, simple_loss=0.2389, pruned_loss=0.04193, over 7542.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2846, pruned_loss=0.05913, over 1608251.86 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:37:22,464 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1969, 1.0281, 1.2968, 1.0403, 0.9231, 1.3313, 0.0485, 0.9502], + device='cuda:1'), covar=tensor([0.1391, 0.1263, 0.0465, 0.0673, 0.2503, 0.0511, 0.2019, 0.1120], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0172, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:37:24,422 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4068, 2.2329, 3.1946, 2.4796, 3.0973, 2.5083, 2.3088, 1.8497], + device='cuda:1'), covar=tensor([0.5716, 0.5361, 0.2081, 0.3956, 0.2417, 0.2980, 0.1878, 0.5529], + device='cuda:1'), in_proj_covar=tensor([0.0951, 0.1006, 0.0822, 0.0974, 0.1014, 0.0914, 0.0763, 0.0840], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:37:37,368 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197563.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:52,450 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3223, 2.5397, 2.0454, 2.9758, 1.5656, 1.9238, 2.2912, 2.4912], + device='cuda:1'), covar=tensor([0.0635, 0.0692, 0.0856, 0.0382, 0.0977, 0.1194, 0.0691, 0.0698], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0245, 0.0212, 0.0204, 0.0247, 0.0248, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:37:54,484 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:56,271 INFO [train.py:901] (1/4) Epoch 25, batch 3600, loss[loss=0.1814, simple_loss=0.2695, pruned_loss=0.04664, over 7932.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.05857, over 1608834.85 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:05,202 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.311e+02 2.881e+02 3.803e+02 6.346e+02, threshold=5.762e+02, percent-clipped=1.0 +2023-02-07 08:38:12,112 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197612.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:13,511 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,215 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,672 INFO [train.py:901] (1/4) Epoch 25, batch 3650, loss[loss=0.2067, simple_loss=0.2843, pruned_loss=0.06456, over 8084.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2836, pruned_loss=0.05838, over 1611137.80 frames. ], batch size: 21, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:40,978 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7867, 2.3420, 3.6320, 1.8849, 1.9377, 3.6474, 0.6284, 2.1433], + device='cuda:1'), covar=tensor([0.1651, 0.1174, 0.0221, 0.1621, 0.2425, 0.0282, 0.2218, 0.1518], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0171, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:38:54,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4534, 1.7088, 2.1069, 1.3981, 1.5015, 1.7405, 1.5459, 1.5372], + device='cuda:1'), covar=tensor([0.1848, 0.2407, 0.1007, 0.4277, 0.2001, 0.3072, 0.2262, 0.2085], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0621, 0.0557, 0.0659, 0.0654, 0.0603, 0.0549, 0.0637], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:39:06,721 INFO [train.py:901] (1/4) Epoch 25, batch 3700, loss[loss=0.1923, simple_loss=0.2825, pruned_loss=0.05104, over 8098.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05818, over 1610139.82 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:39:09,552 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 08:39:15,742 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.484e+02 2.942e+02 3.783e+02 7.174e+02, threshold=5.884e+02, percent-clipped=5.0 +2023-02-07 08:39:35,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1613, 1.5097, 4.3771, 1.6313, 3.9162, 3.6723, 3.9887, 3.8833], + device='cuda:1'), covar=tensor([0.0615, 0.4543, 0.0591, 0.4193, 0.1189, 0.0995, 0.0587, 0.0717], + device='cuda:1'), in_proj_covar=tensor([0.0655, 0.0660, 0.0721, 0.0650, 0.0730, 0.0624, 0.0626, 0.0701], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:39:40,724 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 08:39:43,105 INFO [train.py:901] (1/4) Epoch 25, batch 3750, loss[loss=0.2078, simple_loss=0.2884, pruned_loss=0.06359, over 8751.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2857, pruned_loss=0.05968, over 1616309.57 frames. ], batch size: 39, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:09,409 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197777.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:40:18,198 INFO [train.py:901] (1/4) Epoch 25, batch 3800, loss[loss=0.2119, simple_loss=0.2976, pruned_loss=0.06313, over 8540.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.285, pruned_loss=0.05986, over 1611188.71 frames. ], batch size: 49, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:26,490 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.549e+02 3.044e+02 3.681e+02 9.424e+02, threshold=6.087e+02, percent-clipped=5.0 +2023-02-07 08:40:34,308 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 08:40:43,132 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-07 08:40:53,469 INFO [train.py:901] (1/4) Epoch 25, batch 3850, loss[loss=0.2153, simple_loss=0.3044, pruned_loss=0.06309, over 8090.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2848, pruned_loss=0.05993, over 1611883.33 frames. ], batch size: 21, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:57,614 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9826, 2.0511, 1.7636, 2.5975, 1.2905, 1.6094, 1.9471, 2.0146], + device='cuda:1'), covar=tensor([0.0689, 0.0730, 0.0912, 0.0404, 0.0990, 0.1308, 0.0760, 0.0769], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0211, 0.0205, 0.0247, 0.0247, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:41:12,906 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 08:41:19,586 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:28,516 INFO [train.py:901] (1/4) Epoch 25, batch 3900, loss[loss=0.1739, simple_loss=0.257, pruned_loss=0.04538, over 7805.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.284, pruned_loss=0.05951, over 1609933.33 frames. ], batch size: 19, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:41:29,963 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197892.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:41:36,382 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.445e+02 2.982e+02 3.609e+02 8.629e+02, threshold=5.963e+02, percent-clipped=3.0 +2023-02-07 08:41:39,826 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197907.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:41,929 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:02,723 INFO [train.py:901] (1/4) Epoch 25, batch 3950, loss[loss=0.1838, simple_loss=0.2751, pruned_loss=0.04623, over 8135.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.284, pruned_loss=0.0597, over 1609379.52 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:24,241 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1652, 1.3056, 4.3414, 1.6603, 3.9085, 3.6089, 3.9580, 3.8471], + device='cuda:1'), covar=tensor([0.0596, 0.4843, 0.0539, 0.3939, 0.1078, 0.0923, 0.0568, 0.0664], + device='cuda:1'), in_proj_covar=tensor([0.0648, 0.0653, 0.0714, 0.0642, 0.0723, 0.0618, 0.0621, 0.0692], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:42:37,824 INFO [train.py:901] (1/4) Epoch 25, batch 4000, loss[loss=0.2177, simple_loss=0.298, pruned_loss=0.06866, over 8606.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2842, pruned_loss=0.05962, over 1611754.41 frames. ], batch size: 34, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:40,144 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:47,779 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.312e+02 2.768e+02 3.562e+02 7.475e+02, threshold=5.536e+02, percent-clipped=2.0 +2023-02-07 08:43:01,547 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198022.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:14,008 INFO [train.py:901] (1/4) Epoch 25, batch 4050, loss[loss=0.1906, simple_loss=0.2853, pruned_loss=0.04796, over 8318.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2854, pruned_loss=0.06006, over 1619197.68 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:26,785 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-07 08:43:41,815 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 08:43:48,793 INFO [train.py:901] (1/4) Epoch 25, batch 4100, loss[loss=0.2471, simple_loss=0.331, pruned_loss=0.08163, over 8474.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06018, over 1616271.71 frames. ], batch size: 29, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:55,128 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198099.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:57,001 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.413e+02 2.876e+02 3.434e+02 5.292e+02, threshold=5.752e+02, percent-clipped=1.0 +2023-02-07 08:44:24,280 INFO [train.py:901] (1/4) Epoch 25, batch 4150, loss[loss=0.2362, simple_loss=0.3099, pruned_loss=0.08127, over 8020.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2856, pruned_loss=0.05998, over 1615285.49 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:44:29,938 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198148.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:44:47,408 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198173.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:44:58,967 INFO [train.py:901] (1/4) Epoch 25, batch 4200, loss[loss=0.1735, simple_loss=0.251, pruned_loss=0.04797, over 7644.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2852, pruned_loss=0.05936, over 1618967.59 frames. ], batch size: 19, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:08,033 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.351e+02 3.091e+02 3.845e+02 7.201e+02, threshold=6.182e+02, percent-clipped=4.0 +2023-02-07 08:45:09,432 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 08:45:33,126 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 08:45:35,171 INFO [train.py:901] (1/4) Epoch 25, batch 4250, loss[loss=0.1876, simple_loss=0.2742, pruned_loss=0.05052, over 8251.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2845, pruned_loss=0.05968, over 1608646.26 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:40,414 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 08:45:41,594 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:44,713 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:59,329 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:02,106 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198278.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:07,395 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5542, 1.6558, 2.0956, 1.3842, 1.5570, 1.7121, 1.6174, 1.4194], + device='cuda:1'), covar=tensor([0.2170, 0.2780, 0.1156, 0.5068, 0.2256, 0.3802, 0.2666, 0.2482], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0621, 0.0555, 0.0658, 0.0653, 0.0604, 0.0550, 0.0636], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:46:09,926 INFO [train.py:901] (1/4) Epoch 25, batch 4300, loss[loss=0.1772, simple_loss=0.2725, pruned_loss=0.04091, over 8028.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2843, pruned_loss=0.05957, over 1611185.01 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:46:18,872 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.314e+02 2.735e+02 3.533e+02 6.805e+02, threshold=5.471e+02, percent-clipped=1.0 +2023-02-07 08:46:19,821 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:45,626 INFO [train.py:901] (1/4) Epoch 25, batch 4350, loss[loss=0.1705, simple_loss=0.2671, pruned_loss=0.03691, over 8458.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2834, pruned_loss=0.05892, over 1611031.08 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:47:04,272 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 08:47:06,479 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:47:08,431 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7353, 1.8396, 1.5590, 2.2667, 1.0826, 1.4421, 1.7497, 1.8447], + device='cuda:1'), covar=tensor([0.0811, 0.0793, 0.0976, 0.0445, 0.1065, 0.1382, 0.0786, 0.0799], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0245, 0.0212, 0.0204, 0.0247, 0.0248, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 08:47:21,573 INFO [train.py:901] (1/4) Epoch 25, batch 4400, loss[loss=0.2063, simple_loss=0.2858, pruned_loss=0.0634, over 8075.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2821, pruned_loss=0.0583, over 1610046.21 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:29,520 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.496e+02 2.935e+02 3.768e+02 7.665e+02, threshold=5.870e+02, percent-clipped=6.0 +2023-02-07 08:47:45,265 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 08:47:56,744 INFO [train.py:901] (1/4) Epoch 25, batch 4450, loss[loss=0.2176, simple_loss=0.3076, pruned_loss=0.06377, over 8467.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05843, over 1611996.92 frames. ], batch size: 27, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:58,934 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:48:04,421 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8436, 6.0370, 5.1317, 2.9567, 5.2842, 5.6360, 5.3567, 5.4120], + device='cuda:1'), covar=tensor([0.0462, 0.0308, 0.0795, 0.3787, 0.0705, 0.0711, 0.1042, 0.0586], + device='cuda:1'), in_proj_covar=tensor([0.0535, 0.0451, 0.0438, 0.0549, 0.0436, 0.0456, 0.0427, 0.0400], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:48:31,978 INFO [train.py:901] (1/4) Epoch 25, batch 4500, loss[loss=0.1991, simple_loss=0.2905, pruned_loss=0.05391, over 8644.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2812, pruned_loss=0.05785, over 1603463.46 frames. ], batch size: 39, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:48:40,442 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.274e+02 2.771e+02 3.541e+02 5.802e+02, threshold=5.543e+02, percent-clipped=0.0 +2023-02-07 08:48:40,491 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 08:48:51,917 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:08,839 INFO [train.py:901] (1/4) Epoch 25, batch 4550, loss[loss=0.1843, simple_loss=0.2752, pruned_loss=0.0467, over 8484.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2819, pruned_loss=0.05784, over 1608432.66 frames. ], batch size: 29, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:14,807 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 08:49:22,073 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:44,786 INFO [train.py:901] (1/4) Epoch 25, batch 4600, loss[loss=0.1929, simple_loss=0.2763, pruned_loss=0.05475, over 8233.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2829, pruned_loss=0.05867, over 1607894.88 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:52,976 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.449e+02 2.940e+02 3.432e+02 8.422e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-07 08:50:09,324 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198625.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:14,651 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:19,152 INFO [train.py:901] (1/4) Epoch 25, batch 4650, loss[loss=0.2183, simple_loss=0.3011, pruned_loss=0.06772, over 8576.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.284, pruned_loss=0.05962, over 1605848.34 frames. ], batch size: 39, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:50:26,821 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:54,510 INFO [train.py:901] (1/4) Epoch 25, batch 4700, loss[loss=0.1843, simple_loss=0.2779, pruned_loss=0.04538, over 8478.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2839, pruned_loss=0.05908, over 1609765.04 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:03,365 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.164e+02 2.735e+02 3.323e+02 7.623e+02, threshold=5.470e+02, percent-clipped=2.0 +2023-02-07 08:51:29,729 INFO [train.py:901] (1/4) Epoch 25, batch 4750, loss[loss=0.1596, simple_loss=0.2433, pruned_loss=0.03797, over 7987.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2839, pruned_loss=0.0591, over 1608668.90 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:38,026 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3775, 1.3980, 4.5942, 1.8187, 4.0674, 3.8075, 4.1477, 3.9908], + device='cuda:1'), covar=tensor([0.0568, 0.4758, 0.0514, 0.3831, 0.1102, 0.0940, 0.0579, 0.0721], + device='cuda:1'), in_proj_covar=tensor([0.0646, 0.0650, 0.0709, 0.0640, 0.0723, 0.0616, 0.0617, 0.0690], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:51:39,046 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 08:51:42,014 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 08:51:45,384 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 08:52:05,184 INFO [train.py:901] (1/4) Epoch 25, batch 4800, loss[loss=0.1844, simple_loss=0.2511, pruned_loss=0.05888, over 6781.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.282, pruned_loss=0.05819, over 1604221.87 frames. ], batch size: 15, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:13,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 2.392e+02 2.917e+02 3.409e+02 6.169e+02, threshold=5.835e+02, percent-clipped=3.0 +2023-02-07 08:52:22,061 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:36,134 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 08:52:39,627 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:40,098 INFO [train.py:901] (1/4) Epoch 25, batch 4850, loss[loss=0.2269, simple_loss=0.3093, pruned_loss=0.07223, over 8364.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2841, pruned_loss=0.05934, over 1612943.23 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:55,413 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:01,716 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198870.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:15,867 INFO [train.py:901] (1/4) Epoch 25, batch 4900, loss[loss=0.2215, simple_loss=0.3001, pruned_loss=0.07146, over 8583.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2838, pruned_loss=0.05972, over 1611272.42 frames. ], batch size: 31, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:24,157 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:24,665 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.376e+02 2.954e+02 3.660e+02 6.336e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 08:53:41,105 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 08:53:43,155 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 08:53:50,022 INFO [train.py:901] (1/4) Epoch 25, batch 4950, loss[loss=0.2359, simple_loss=0.3182, pruned_loss=0.07681, over 8522.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05935, over 1616945.65 frames. ], batch size: 39, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:54,413 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:15,937 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:16,534 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:24,178 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1927, 1.9987, 2.6361, 2.1370, 2.5259, 2.2859, 2.0576, 1.4663], + device='cuda:1'), covar=tensor([0.5676, 0.5095, 0.2068, 0.3985, 0.2616, 0.3196, 0.2084, 0.5451], + device='cuda:1'), in_proj_covar=tensor([0.0949, 0.1002, 0.0817, 0.0969, 0.1011, 0.0914, 0.0758, 0.0835], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:54:25,251 INFO [train.py:901] (1/4) Epoch 25, batch 5000, loss[loss=0.2054, simple_loss=0.2959, pruned_loss=0.05747, over 8485.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.05962, over 1614438.25 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:54:33,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 2.360e+02 2.883e+02 3.509e+02 6.136e+02, threshold=5.766e+02, percent-clipped=1.0 +2023-02-07 08:54:38,391 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.64 vs. limit=5.0 +2023-02-07 08:54:59,855 INFO [train.py:901] (1/4) Epoch 25, batch 5050, loss[loss=0.2149, simple_loss=0.3048, pruned_loss=0.0625, over 8452.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2846, pruned_loss=0.05976, over 1615778.84 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:14,363 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 08:55:35,776 INFO [train.py:901] (1/4) Epoch 25, batch 5100, loss[loss=0.1971, simple_loss=0.2934, pruned_loss=0.05038, over 8012.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.283, pruned_loss=0.05874, over 1617143.14 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:37,421 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:55:44,130 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.470e+02 3.005e+02 3.768e+02 7.063e+02, threshold=6.010e+02, percent-clipped=5.0 +2023-02-07 08:56:11,863 INFO [train.py:901] (1/4) Epoch 25, batch 5150, loss[loss=0.1938, simple_loss=0.293, pruned_loss=0.04735, over 8340.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05822, over 1618171.82 frames. ], batch size: 49, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:12,472 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 08:56:47,048 INFO [train.py:901] (1/4) Epoch 25, batch 5200, loss[loss=0.2609, simple_loss=0.3427, pruned_loss=0.08955, over 8325.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2836, pruned_loss=0.05828, over 1617886.68 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:49,910 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:56:55,026 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 2.381e+02 2.894e+02 3.514e+02 1.206e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 08:57:03,627 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 08:57:04,082 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:12,754 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 08:57:17,164 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:19,826 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0786, 1.6369, 1.6790, 1.4672, 0.9424, 1.5498, 1.7610, 1.5072], + device='cuda:1'), covar=tensor([0.0541, 0.1235, 0.1714, 0.1468, 0.0643, 0.1504, 0.0714, 0.0681], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0164, 0.0113, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 08:57:22,321 INFO [train.py:901] (1/4) Epoch 25, batch 5250, loss[loss=0.18, simple_loss=0.2579, pruned_loss=0.05106, over 7530.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2842, pruned_loss=0.05864, over 1620517.89 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:57:25,800 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:34,818 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:41,280 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2552, 3.0813, 2.8880, 1.6269, 2.8560, 2.9429, 2.7868, 2.8305], + device='cuda:1'), covar=tensor([0.1163, 0.0842, 0.1309, 0.4698, 0.1214, 0.1551, 0.1705, 0.1151], + device='cuda:1'), in_proj_covar=tensor([0.0542, 0.0456, 0.0442, 0.0555, 0.0439, 0.0460, 0.0433, 0.0404], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 08:57:44,764 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7871, 2.1897, 3.4986, 1.8330, 1.7918, 3.5023, 0.5425, 2.1282], + device='cuda:1'), covar=tensor([0.1206, 0.1115, 0.0213, 0.1565, 0.2474, 0.0239, 0.2156, 0.1357], + device='cuda:1'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0222, 0.0277, 0.0142, 0.0173, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 08:57:56,836 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:57,414 INFO [train.py:901] (1/4) Epoch 25, batch 5300, loss[loss=0.2122, simple_loss=0.3029, pruned_loss=0.06078, over 8326.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05786, over 1620413.97 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:05,706 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.313e+02 2.718e+02 3.488e+02 6.386e+02, threshold=5.437e+02, percent-clipped=3.0 +2023-02-07 08:58:25,244 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:32,743 INFO [train.py:901] (1/4) Epoch 25, batch 5350, loss[loss=0.2229, simple_loss=0.3059, pruned_loss=0.06999, over 8497.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.05797, over 1623945.94 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:38,560 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:47,638 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199360.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:57,377 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:08,738 INFO [train.py:901] (1/4) Epoch 25, batch 5400, loss[loss=0.1806, simple_loss=0.2683, pruned_loss=0.04644, over 8539.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2838, pruned_loss=0.0581, over 1625146.03 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 32.0 +2023-02-07 08:59:18,156 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 2.292e+02 2.858e+02 3.757e+02 5.815e+02, threshold=5.716e+02, percent-clipped=3.0 +2023-02-07 08:59:18,354 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:22,502 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8423, 1.7511, 2.6913, 2.0823, 2.4527, 1.8983, 1.6673, 1.2400], + device='cuda:1'), covar=tensor([0.7496, 0.6200, 0.2193, 0.4158, 0.3075, 0.4340, 0.3007, 0.5711], + device='cuda:1'), in_proj_covar=tensor([0.0951, 0.1002, 0.0818, 0.0972, 0.1014, 0.0916, 0.0760, 0.0838], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 08:59:28,360 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:43,202 INFO [train.py:901] (1/4) Epoch 25, batch 5450, loss[loss=0.1732, simple_loss=0.2626, pruned_loss=0.04187, over 8246.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2837, pruned_loss=0.05827, over 1623328.99 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:08,099 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 09:00:08,226 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199476.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:17,974 INFO [train.py:901] (1/4) Epoch 25, batch 5500, loss[loss=0.2415, simple_loss=0.3032, pruned_loss=0.08992, over 7653.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.0586, over 1624161.15 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:28,261 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.278e+02 2.767e+02 3.622e+02 8.817e+02, threshold=5.534e+02, percent-clipped=3.0 +2023-02-07 09:00:33,821 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199512.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:00:52,113 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:53,324 INFO [train.py:901] (1/4) Epoch 25, batch 5550, loss[loss=0.2355, simple_loss=0.3088, pruned_loss=0.08112, over 8336.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2862, pruned_loss=0.05988, over 1625688.46 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:02,096 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199553.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:01:13,073 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.95 vs. limit=5.0 +2023-02-07 09:01:24,426 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:27,626 INFO [train.py:901] (1/4) Epoch 25, batch 5600, loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05631, over 8194.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2851, pruned_loss=0.05902, over 1622754.11 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:38,053 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.538e+02 3.116e+02 4.016e+02 1.228e+03, threshold=6.232e+02, percent-clipped=11.0 +2023-02-07 09:01:43,152 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:47,345 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:03,348 INFO [train.py:901] (1/4) Epoch 25, batch 5650, loss[loss=0.2673, simple_loss=0.3325, pruned_loss=0.101, over 7450.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2847, pruned_loss=0.05898, over 1617585.92 frames. ], batch size: 72, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:04,233 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:13,537 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:14,013 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 09:02:18,327 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:36,577 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199685.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:39,932 INFO [train.py:901] (1/4) Epoch 25, batch 5700, loss[loss=0.2053, simple_loss=0.2901, pruned_loss=0.06024, over 8353.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2841, pruned_loss=0.05889, over 1618628.83 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:49,771 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.200e+02 2.647e+02 3.419e+02 7.306e+02, threshold=5.294e+02, percent-clipped=3.0 +2023-02-07 09:03:12,405 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 09:03:16,064 INFO [train.py:901] (1/4) Epoch 25, batch 5750, loss[loss=0.184, simple_loss=0.2751, pruned_loss=0.04647, over 8032.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05834, over 1613421.88 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:03:16,266 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5709, 2.8816, 2.5183, 4.0827, 1.7096, 2.1550, 2.8696, 2.8556], + device='cuda:1'), covar=tensor([0.0732, 0.0719, 0.0743, 0.0234, 0.1143, 0.1233, 0.0825, 0.0852], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0193, 0.0243, 0.0209, 0.0204, 0.0245, 0.0247, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 09:03:21,540 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 09:03:30,823 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:03:39,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8336, 1.4044, 4.0322, 1.4474, 3.5263, 3.3118, 3.6470, 3.5118], + device='cuda:1'), covar=tensor([0.0705, 0.4598, 0.0584, 0.4014, 0.1329, 0.0964, 0.0651, 0.0817], + device='cuda:1'), in_proj_covar=tensor([0.0656, 0.0657, 0.0724, 0.0647, 0.0731, 0.0621, 0.0622, 0.0699], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:03:50,502 INFO [train.py:901] (1/4) Epoch 25, batch 5800, loss[loss=0.1998, simple_loss=0.284, pruned_loss=0.05782, over 8092.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2833, pruned_loss=0.05866, over 1611453.02 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:00,801 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.348e+02 2.869e+02 3.742e+02 6.332e+02, threshold=5.738e+02, percent-clipped=6.0 +2023-02-07 09:04:11,739 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199820.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:04:12,724 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 09:04:26,571 INFO [train.py:901] (1/4) Epoch 25, batch 5850, loss[loss=0.2572, simple_loss=0.3327, pruned_loss=0.09084, over 7150.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05921, over 1613373.61 frames. ], batch size: 71, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:37,361 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199856.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:04:51,703 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199877.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:00,999 INFO [train.py:901] (1/4) Epoch 25, batch 5900, loss[loss=0.2689, simple_loss=0.3314, pruned_loss=0.1032, over 7812.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2848, pruned_loss=0.05952, over 1615826.93 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:05,794 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199897.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:05:10,363 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.352e+02 2.828e+02 3.481e+02 7.421e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-07 09:05:13,997 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:26,457 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:31,322 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:32,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:35,262 INFO [train.py:901] (1/4) Epoch 25, batch 5950, loss[loss=0.1955, simple_loss=0.2863, pruned_loss=0.05237, over 8570.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06049, over 1617294.01 frames. ], batch size: 31, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:54,662 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5157, 1.5295, 4.7721, 1.8565, 4.2652, 3.8971, 4.3159, 4.1498], + device='cuda:1'), covar=tensor([0.0554, 0.4642, 0.0445, 0.4158, 0.1100, 0.0949, 0.0548, 0.0665], + device='cuda:1'), in_proj_covar=tensor([0.0656, 0.0656, 0.0723, 0.0647, 0.0731, 0.0620, 0.0622, 0.0698], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:05:58,238 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199971.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:06:08,497 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3150, 2.0701, 2.7297, 2.2938, 2.7741, 2.3856, 2.2060, 1.5684], + device='cuda:1'), covar=tensor([0.5873, 0.5196, 0.2124, 0.4249, 0.2773, 0.3578, 0.2046, 0.6144], + device='cuda:1'), in_proj_covar=tensor([0.0955, 0.1004, 0.0822, 0.0976, 0.1017, 0.0917, 0.0763, 0.0839], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 09:06:11,076 INFO [train.py:901] (1/4) Epoch 25, batch 6000, loss[loss=0.2116, simple_loss=0.2868, pruned_loss=0.06822, over 8036.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05971, over 1610821.47 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:06:11,076 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 09:06:23,703 INFO [train.py:935] (1/4) Epoch 25, validation: loss=0.1725, simple_loss=0.2721, pruned_loss=0.03643, over 944034.00 frames. +2023-02-07 09:06:23,705 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 09:06:34,586 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.373e+02 2.952e+02 3.581e+02 7.260e+02, threshold=5.903e+02, percent-clipped=4.0 +2023-02-07 09:06:40,187 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200012.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:06:59,670 INFO [train.py:901] (1/4) Epoch 25, batch 6050, loss[loss=0.1695, simple_loss=0.2502, pruned_loss=0.04435, over 7984.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2851, pruned_loss=0.06021, over 1605554.03 frames. ], batch size: 21, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:35,033 INFO [train.py:901] (1/4) Epoch 25, batch 6100, loss[loss=0.1848, simple_loss=0.2709, pruned_loss=0.04938, over 8128.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2844, pruned_loss=0.05968, over 1606067.18 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:36,614 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6767, 1.5752, 4.9198, 1.8433, 4.3356, 4.0602, 4.4384, 4.3094], + device='cuda:1'), covar=tensor([0.0619, 0.4636, 0.0413, 0.4112, 0.1061, 0.0863, 0.0564, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0657, 0.0656, 0.0724, 0.0647, 0.0732, 0.0621, 0.0624, 0.0699], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:07:38,670 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7182, 1.8235, 1.6084, 2.2619, 1.0815, 1.4281, 1.7064, 1.7870], + device='cuda:1'), covar=tensor([0.0732, 0.0741, 0.0877, 0.0469, 0.1069, 0.1340, 0.0755, 0.0741], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0211, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 09:07:45,359 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.345e+02 2.959e+02 3.596e+02 7.197e+02, threshold=5.919e+02, percent-clipped=3.0 +2023-02-07 09:07:54,353 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 09:08:05,861 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200133.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:11,247 INFO [train.py:901] (1/4) Epoch 25, batch 6150, loss[loss=0.2143, simple_loss=0.2864, pruned_loss=0.07106, over 8351.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2849, pruned_loss=0.06022, over 1610281.06 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:23,407 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:46,125 INFO [train.py:901] (1/4) Epoch 25, batch 6200, loss[loss=0.231, simple_loss=0.3178, pruned_loss=0.07207, over 8619.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2839, pruned_loss=0.05947, over 1612505.35 frames. ], batch size: 39, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:47,074 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:49,575 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200195.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:08:52,369 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6679, 1.5928, 5.8240, 2.3425, 5.2065, 4.8444, 5.4053, 5.2631], + device='cuda:1'), covar=tensor([0.0473, 0.5190, 0.0414, 0.4038, 0.1019, 0.0919, 0.0507, 0.0495], + device='cuda:1'), in_proj_covar=tensor([0.0659, 0.0656, 0.0725, 0.0649, 0.0733, 0.0623, 0.0625, 0.0701], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:08:55,700 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.314e+02 2.821e+02 3.535e+02 6.331e+02, threshold=5.643e+02, percent-clipped=2.0 +2023-02-07 09:09:04,326 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:13,093 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200227.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:09:21,657 INFO [train.py:901] (1/4) Epoch 25, batch 6250, loss[loss=0.1883, simple_loss=0.2638, pruned_loss=0.05643, over 7562.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05925, over 1612715.58 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:29,778 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200252.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:41,372 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200268.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:43,282 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:46,148 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9646, 1.4577, 1.6957, 1.2967, 0.8994, 1.4306, 1.6006, 1.5728], + device='cuda:1'), covar=tensor([0.0500, 0.1278, 0.1617, 0.1470, 0.0587, 0.1453, 0.0715, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 09:09:56,027 INFO [train.py:901] (1/4) Epoch 25, batch 6300, loss[loss=0.1636, simple_loss=0.2418, pruned_loss=0.04264, over 7243.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2838, pruned_loss=0.05896, over 1609785.41 frames. ], batch size: 16, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:58,167 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200293.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:10:00,759 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:10:05,879 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 09:10:06,124 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.537e+02 3.046e+02 4.211e+02 7.306e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 09:10:18,421 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0455, 1.3706, 3.2791, 1.2388, 2.6456, 2.5596, 3.0023, 3.0044], + device='cuda:1'), covar=tensor([0.1425, 0.5570, 0.1466, 0.5341, 0.2634, 0.2334, 0.1169, 0.1262], + device='cuda:1'), in_proj_covar=tensor([0.0659, 0.0655, 0.0723, 0.0647, 0.0732, 0.0623, 0.0624, 0.0700], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:10:31,232 INFO [train.py:901] (1/4) Epoch 25, batch 6350, loss[loss=0.199, simple_loss=0.2846, pruned_loss=0.0567, over 7810.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.05878, over 1607395.75 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:10:40,293 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8124, 5.8710, 5.1531, 2.4938, 5.2622, 5.6203, 5.3603, 5.3362], + device='cuda:1'), covar=tensor([0.0522, 0.0315, 0.0795, 0.4074, 0.0615, 0.0599, 0.1086, 0.0613], + device='cuda:1'), in_proj_covar=tensor([0.0531, 0.0448, 0.0434, 0.0546, 0.0432, 0.0451, 0.0427, 0.0397], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:11:03,771 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:11:06,377 INFO [train.py:901] (1/4) Epoch 25, batch 6400, loss[loss=0.1981, simple_loss=0.2924, pruned_loss=0.05187, over 8185.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05884, over 1613835.09 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:15,860 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.247e+02 2.600e+02 3.696e+02 8.014e+02, threshold=5.200e+02, percent-clipped=2.0 +2023-02-07 09:11:40,859 INFO [train.py:901] (1/4) Epoch 25, batch 6450, loss[loss=0.2083, simple_loss=0.2922, pruned_loss=0.06224, over 8345.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05847, over 1614084.82 frames. ], batch size: 48, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:49,291 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 09:11:55,915 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8008, 1.4268, 3.1059, 1.5639, 2.2899, 3.3664, 3.5187, 2.8652], + device='cuda:1'), covar=tensor([0.1190, 0.1774, 0.0335, 0.1925, 0.0999, 0.0253, 0.0498, 0.0540], + device='cuda:1'), in_proj_covar=tensor([0.0299, 0.0320, 0.0285, 0.0314, 0.0314, 0.0271, 0.0429, 0.0302], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 09:12:16,063 INFO [train.py:901] (1/4) Epoch 25, batch 6500, loss[loss=0.2029, simple_loss=0.28, pruned_loss=0.0629, over 8445.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05803, over 1614072.08 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:26,030 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.181e+02 2.613e+02 3.190e+02 4.719e+02, threshold=5.226e+02, percent-clipped=0.0 +2023-02-07 09:12:49,391 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200539.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:12:49,923 INFO [train.py:901] (1/4) Epoch 25, batch 6550, loss[loss=0.227, simple_loss=0.2929, pruned_loss=0.08053, over 7262.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05823, over 1616147.69 frames. ], batch size: 16, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:08,861 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 09:13:09,862 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 09:13:10,555 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4067, 4.3146, 3.9358, 2.3876, 3.8835, 3.9803, 3.9609, 3.7634], + device='cuda:1'), covar=tensor([0.0754, 0.0585, 0.1064, 0.4141, 0.0869, 0.1008, 0.1266, 0.0775], + device='cuda:1'), in_proj_covar=tensor([0.0534, 0.0452, 0.0438, 0.0552, 0.0437, 0.0457, 0.0431, 0.0400], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:13:26,032 INFO [train.py:901] (1/4) Epoch 25, batch 6600, loss[loss=0.222, simple_loss=0.2991, pruned_loss=0.07249, over 8340.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05857, over 1614250.86 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:30,831 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 09:13:35,560 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.321e+02 2.722e+02 3.541e+02 8.507e+02, threshold=5.445e+02, percent-clipped=6.0 +2023-02-07 09:14:00,778 INFO [train.py:901] (1/4) Epoch 25, batch 6650, loss[loss=0.2109, simple_loss=0.2776, pruned_loss=0.07212, over 7527.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2835, pruned_loss=0.05833, over 1616457.77 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:01,601 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:02,410 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:10,431 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200654.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:14:16,372 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200663.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:19,886 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200667.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:35,348 INFO [train.py:901] (1/4) Epoch 25, batch 6700, loss[loss=0.2043, simple_loss=0.2898, pruned_loss=0.05936, over 8454.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2833, pruned_loss=0.05841, over 1610745.61 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:45,627 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.443e+02 2.859e+02 3.397e+02 5.440e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 09:15:01,824 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.96 vs. limit=5.0 +2023-02-07 09:15:10,911 INFO [train.py:901] (1/4) Epoch 25, batch 6750, loss[loss=0.1613, simple_loss=0.2408, pruned_loss=0.04092, over 7718.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2821, pruned_loss=0.05787, over 1613177.50 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:22,780 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:15:24,218 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7176, 2.3870, 3.8892, 1.5489, 3.0013, 2.2821, 1.9594, 2.8086], + device='cuda:1'), covar=tensor([0.1989, 0.2700, 0.0940, 0.4786, 0.1894, 0.3246, 0.2351, 0.2564], + device='cuda:1'), in_proj_covar=tensor([0.0535, 0.0625, 0.0558, 0.0662, 0.0661, 0.0605, 0.0554, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:15:47,012 INFO [train.py:901] (1/4) Epoch 25, batch 6800, loss[loss=0.2153, simple_loss=0.3018, pruned_loss=0.06444, over 8192.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2815, pruned_loss=0.05766, over 1617859.50 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:51,879 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 09:15:56,785 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.322e+02 2.853e+02 3.502e+02 6.162e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-07 09:16:03,490 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 09:16:21,861 INFO [train.py:901] (1/4) Epoch 25, batch 6850, loss[loss=0.1496, simple_loss=0.2251, pruned_loss=0.037, over 6405.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05811, over 1621380.50 frames. ], batch size: 14, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:16:40,952 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 09:16:56,586 INFO [train.py:901] (1/4) Epoch 25, batch 6900, loss[loss=0.1625, simple_loss=0.252, pruned_loss=0.03648, over 7199.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.0585, over 1615118.24 frames. ], batch size: 16, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:02,707 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3670, 4.1977, 3.8465, 2.9096, 3.7658, 3.9660, 3.9435, 3.7567], + device='cuda:1'), covar=tensor([0.0604, 0.0657, 0.0948, 0.3276, 0.0922, 0.1217, 0.1120, 0.0882], + device='cuda:1'), in_proj_covar=tensor([0.0533, 0.0452, 0.0439, 0.0551, 0.0437, 0.0457, 0.0432, 0.0400], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:17:06,816 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.244e+02 2.770e+02 3.533e+02 6.127e+02, threshold=5.541e+02, percent-clipped=2.0 +2023-02-07 09:17:11,243 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200910.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:17:20,893 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 09:17:28,199 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200935.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:17:31,360 INFO [train.py:901] (1/4) Epoch 25, batch 6950, loss[loss=0.2045, simple_loss=0.2958, pruned_loss=0.0566, over 8328.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2829, pruned_loss=0.05842, over 1611566.43 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:50,991 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 09:17:58,863 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5674, 1.4986, 1.8558, 1.2405, 1.2084, 1.8320, 0.2326, 1.2360], + device='cuda:1'), covar=tensor([0.1645, 0.1199, 0.0432, 0.0837, 0.2375, 0.0545, 0.1804, 0.1138], + device='cuda:1'), in_proj_covar=tensor([0.0194, 0.0199, 0.0130, 0.0219, 0.0273, 0.0140, 0.0170, 0.0196], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 09:18:07,790 INFO [train.py:901] (1/4) Epoch 25, batch 7000, loss[loss=0.1363, simple_loss=0.2166, pruned_loss=0.02797, over 7401.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2831, pruned_loss=0.05862, over 1612734.53 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:18:17,567 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.593e+02 3.026e+02 3.851e+02 8.547e+02, threshold=6.052e+02, percent-clipped=7.0 +2023-02-07 09:18:19,774 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:23,071 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2388, 4.1826, 3.8388, 1.9263, 3.7354, 3.7416, 3.7323, 3.5857], + device='cuda:1'), covar=tensor([0.0834, 0.0560, 0.1010, 0.4303, 0.0975, 0.1079, 0.1373, 0.0818], + device='cuda:1'), in_proj_covar=tensor([0.0534, 0.0452, 0.0438, 0.0551, 0.0438, 0.0457, 0.0431, 0.0399], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:18:23,197 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201012.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:40,486 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:42,299 INFO [train.py:901] (1/4) Epoch 25, batch 7050, loss[loss=0.1695, simple_loss=0.2466, pruned_loss=0.0462, over 7208.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2824, pruned_loss=0.05816, over 1608893.39 frames. ], batch size: 16, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:16,835 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:17,378 INFO [train.py:901] (1/4) Epoch 25, batch 7100, loss[loss=0.1976, simple_loss=0.2766, pruned_loss=0.05928, over 7930.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2821, pruned_loss=0.05792, over 1609185.39 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:26,866 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.246e+02 2.728e+02 3.277e+02 5.322e+02, threshold=5.456e+02, percent-clipped=0.0 +2023-02-07 09:19:40,020 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:52,255 INFO [train.py:901] (1/4) Epoch 25, batch 7150, loss[loss=0.151, simple_loss=0.236, pruned_loss=0.03301, over 7787.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.282, pruned_loss=0.0584, over 1609216.98 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:53,558 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.97 vs. limit=5.0 +2023-02-07 09:20:28,435 INFO [train.py:901] (1/4) Epoch 25, batch 7200, loss[loss=0.2344, simple_loss=0.3273, pruned_loss=0.07075, over 8296.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05925, over 1611748.55 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:35,003 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 09:20:38,231 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.475e+02 3.123e+02 4.294e+02 9.608e+02, threshold=6.246e+02, percent-clipped=8.0 +2023-02-07 09:20:56,017 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 09:21:03,482 INFO [train.py:901] (1/4) Epoch 25, batch 7250, loss[loss=0.163, simple_loss=0.2537, pruned_loss=0.03612, over 7808.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2827, pruned_loss=0.05847, over 1609957.35 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:25,202 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:37,433 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0616, 1.5038, 3.4161, 1.5379, 2.2810, 3.7515, 3.8839, 3.1526], + device='cuda:1'), covar=tensor([0.1134, 0.1936, 0.0367, 0.2128, 0.1193, 0.0221, 0.0500, 0.0567], + device='cuda:1'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0319, 0.0318, 0.0276, 0.0436, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 09:21:37,959 INFO [train.py:901] (1/4) Epoch 25, batch 7300, loss[loss=0.1841, simple_loss=0.2774, pruned_loss=0.04541, over 8459.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.283, pruned_loss=0.05881, over 1604766.19 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:39,378 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:48,706 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.341e+02 2.809e+02 3.464e+02 9.506e+02, threshold=5.617e+02, percent-clipped=4.0 +2023-02-07 09:21:50,352 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8533, 1.6398, 1.9844, 1.7766, 1.9197, 1.9593, 1.7928, 0.8797], + device='cuda:1'), covar=tensor([0.5966, 0.4940, 0.2249, 0.3845, 0.2647, 0.3291, 0.2149, 0.5347], + device='cuda:1'), in_proj_covar=tensor([0.0957, 0.1008, 0.0826, 0.0979, 0.1021, 0.0918, 0.0767, 0.0844], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 09:22:13,162 INFO [train.py:901] (1/4) Epoch 25, batch 7350, loss[loss=0.226, simple_loss=0.3002, pruned_loss=0.07587, over 8098.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2834, pruned_loss=0.05882, over 1607459.53 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:27,893 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-07 09:22:39,014 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 09:22:40,519 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:48,512 INFO [train.py:901] (1/4) Epoch 25, batch 7400, loss[loss=0.1592, simple_loss=0.2327, pruned_loss=0.04288, over 7218.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2827, pruned_loss=0.05874, over 1607161.86 frames. ], batch size: 16, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:57,494 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:57,970 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.231e+02 2.880e+02 3.857e+02 7.685e+02, threshold=5.759e+02, percent-clipped=5.0 +2023-02-07 09:22:58,695 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 09:23:19,301 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:23:23,998 INFO [train.py:901] (1/4) Epoch 25, batch 7450, loss[loss=0.2173, simple_loss=0.2933, pruned_loss=0.07067, over 8036.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2843, pruned_loss=0.05977, over 1605730.92 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 16.0 +2023-02-07 09:23:37,760 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 09:23:59,782 INFO [train.py:901] (1/4) Epoch 25, batch 7500, loss[loss=0.1604, simple_loss=0.2401, pruned_loss=0.0403, over 8087.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2845, pruned_loss=0.05974, over 1609228.77 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:09,798 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.281e+02 2.758e+02 3.564e+02 6.593e+02, threshold=5.515e+02, percent-clipped=6.0 +2023-02-07 09:24:12,066 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6756, 2.4733, 1.6816, 2.3375, 2.1304, 1.4399, 2.0997, 2.2921], + device='cuda:1'), covar=tensor([0.1565, 0.0462, 0.1578, 0.0635, 0.0810, 0.1895, 0.1114, 0.0984], + device='cuda:1'), in_proj_covar=tensor([0.0360, 0.0239, 0.0345, 0.0316, 0.0304, 0.0348, 0.0352, 0.0324], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 09:24:34,700 INFO [train.py:901] (1/4) Epoch 25, batch 7550, loss[loss=0.1992, simple_loss=0.2855, pruned_loss=0.05646, over 8450.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2845, pruned_loss=0.05969, over 1611860.70 frames. ], batch size: 29, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:40,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:09,425 INFO [train.py:901] (1/4) Epoch 25, batch 7600, loss[loss=0.1597, simple_loss=0.2349, pruned_loss=0.04227, over 7430.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.05964, over 1611069.36 frames. ], batch size: 17, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:25:20,530 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.467e+02 2.939e+02 3.909e+02 7.265e+02, threshold=5.878e+02, percent-clipped=5.0 +2023-02-07 09:25:27,306 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:41,304 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:43,929 INFO [train.py:901] (1/4) Epoch 25, batch 7650, loss[loss=0.1868, simple_loss=0.2785, pruned_loss=0.04752, over 7986.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2846, pruned_loss=0.05956, over 1609846.48 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:00,385 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:12,135 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:19,111 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.62 vs. limit=5.0 +2023-02-07 09:26:19,453 INFO [train.py:901] (1/4) Epoch 25, batch 7700, loss[loss=0.1946, simple_loss=0.2788, pruned_loss=0.05521, over 7813.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2844, pruned_loss=0.05934, over 1605418.05 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:20,539 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 09:26:20,995 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9064, 2.1157, 6.0372, 2.1583, 5.4270, 5.0833, 5.5562, 5.4706], + device='cuda:1'), covar=tensor([0.0433, 0.4467, 0.0392, 0.4202, 0.0959, 0.0772, 0.0498, 0.0486], + device='cuda:1'), in_proj_covar=tensor([0.0658, 0.0656, 0.0722, 0.0648, 0.0736, 0.0627, 0.0626, 0.0699], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:26:30,382 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.306e+02 2.805e+02 3.732e+02 7.115e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-07 09:26:43,955 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201724.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:48,007 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:49,245 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 09:26:54,628 INFO [train.py:901] (1/4) Epoch 25, batch 7750, loss[loss=0.1916, simple_loss=0.284, pruned_loss=0.0496, over 8340.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2845, pruned_loss=0.05886, over 1610714.76 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:58,895 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6760, 1.9420, 2.0460, 1.4123, 2.0987, 1.5470, 0.5598, 1.9287], + device='cuda:1'), covar=tensor([0.0639, 0.0380, 0.0335, 0.0636, 0.0508, 0.0962, 0.1008, 0.0338], + device='cuda:1'), in_proj_covar=tensor([0.0463, 0.0402, 0.0360, 0.0458, 0.0391, 0.0543, 0.0402, 0.0431], + device='cuda:1'), out_proj_covar=tensor([1.2294e-04, 1.0476e-04, 9.3949e-05, 1.1998e-04, 1.0235e-04, 1.5191e-04, + 1.0748e-04, 1.1315e-04], device='cuda:1') +2023-02-07 09:27:02,274 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201751.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:10,572 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2332, 1.0936, 1.3186, 1.0434, 1.0085, 1.3433, 0.0619, 0.9318], + device='cuda:1'), covar=tensor([0.1434, 0.1261, 0.0471, 0.0687, 0.2300, 0.0489, 0.1923, 0.1109], + device='cuda:1'), in_proj_covar=tensor([0.0197, 0.0201, 0.0130, 0.0221, 0.0274, 0.0141, 0.0171, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 09:27:29,977 INFO [train.py:901] (1/4) Epoch 25, batch 7800, loss[loss=0.244, simple_loss=0.322, pruned_loss=0.083, over 8462.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05878, over 1610827.62 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:40,030 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:40,475 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.301e+02 2.955e+02 3.831e+02 1.047e+03, threshold=5.910e+02, percent-clipped=5.0 +2023-02-07 09:27:40,890 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 09:27:56,321 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:28:03,032 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.26 vs. limit=5.0 +2023-02-07 09:28:03,377 INFO [train.py:901] (1/4) Epoch 25, batch 7850, loss[loss=0.2112, simple_loss=0.3044, pruned_loss=0.05901, over 8481.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05847, over 1614370.95 frames. ], batch size: 28, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:23,070 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1698, 1.2809, 4.3937, 2.0470, 2.6047, 5.0509, 5.1042, 4.3730], + device='cuda:1'), covar=tensor([0.1218, 0.2116, 0.0254, 0.1871, 0.1178, 0.0149, 0.0337, 0.0534], + device='cuda:1'), in_proj_covar=tensor([0.0302, 0.0322, 0.0289, 0.0318, 0.0316, 0.0273, 0.0433, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 09:28:36,527 INFO [train.py:901] (1/4) Epoch 25, batch 7900, loss[loss=0.1982, simple_loss=0.2862, pruned_loss=0.05506, over 8332.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2844, pruned_loss=0.05981, over 1611839.08 frames. ], batch size: 26, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:47,146 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.505e+02 3.187e+02 3.787e+02 7.491e+02, threshold=6.375e+02, percent-clipped=2.0 +2023-02-07 09:29:09,582 INFO [train.py:901] (1/4) Epoch 25, batch 7950, loss[loss=0.1982, simple_loss=0.2886, pruned_loss=0.05385, over 8338.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05928, over 1611023.47 frames. ], batch size: 26, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:29:18,131 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 09:29:40,346 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:42,796 INFO [train.py:901] (1/4) Epoch 25, batch 8000, loss[loss=0.1933, simple_loss=0.2715, pruned_loss=0.05751, over 8444.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2852, pruned_loss=0.06014, over 1615474.36 frames. ], batch size: 27, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:29:54,397 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 2.298e+02 3.131e+02 3.789e+02 6.155e+02, threshold=6.263e+02, percent-clipped=0.0 +2023-02-07 09:29:54,488 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,329 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,931 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:58,062 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:06,543 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:12,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202032.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:17,702 INFO [train.py:901] (1/4) Epoch 25, batch 8050, loss[loss=0.2219, simple_loss=0.2798, pruned_loss=0.08202, over 7544.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2838, pruned_loss=0.06018, over 1599077.08 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:30:36,918 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:50,422 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 09:30:55,056 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-07 09:30:55,320 INFO [train.py:901] (1/4) Epoch 26, batch 0, loss[loss=0.2045, simple_loss=0.2841, pruned_loss=0.06243, over 7654.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2841, pruned_loss=0.06243, over 7654.00 frames. ], batch size: 19, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:30:55,320 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 09:31:04,640 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3147, 2.1390, 1.6702, 1.8825, 1.7846, 1.5152, 1.7373, 1.7196], + device='cuda:1'), covar=tensor([0.1459, 0.0479, 0.1267, 0.0587, 0.0762, 0.1664, 0.1024, 0.0989], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0239, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 09:31:06,904 INFO [train.py:935] (1/4) Epoch 26, validation: loss=0.1717, simple_loss=0.2716, pruned_loss=0.03591, over 944034.00 frames. +2023-02-07 09:31:06,905 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 09:31:21,607 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 09:31:29,809 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.411e+02 2.993e+02 3.956e+02 9.314e+02, threshold=5.987e+02, percent-clipped=4.0 +2023-02-07 09:31:40,822 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:41,314 INFO [train.py:901] (1/4) Epoch 26, batch 50, loss[loss=0.2006, simple_loss=0.283, pruned_loss=0.05903, over 8523.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2866, pruned_loss=0.05892, over 368267.22 frames. ], batch size: 39, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:31:52,551 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202138.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:55,752 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 09:32:15,980 INFO [train.py:901] (1/4) Epoch 26, batch 100, loss[loss=0.1755, simple_loss=0.2559, pruned_loss=0.0475, over 7804.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2856, pruned_loss=0.0587, over 648336.14 frames. ], batch size: 20, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:32:18,604 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 09:32:23,609 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:32:40,575 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.435e+02 2.962e+02 3.649e+02 8.375e+02, threshold=5.925e+02, percent-clipped=4.0 +2023-02-07 09:32:51,108 INFO [train.py:901] (1/4) Epoch 26, batch 150, loss[loss=0.1835, simple_loss=0.2761, pruned_loss=0.04545, over 7976.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2857, pruned_loss=0.06003, over 862609.09 frames. ], batch size: 21, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:22,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5666, 1.4164, 4.7685, 1.7512, 4.2076, 3.9720, 4.3628, 4.2282], + device='cuda:1'), covar=tensor([0.0594, 0.4963, 0.0518, 0.4247, 0.1123, 0.0988, 0.0492, 0.0667], + device='cuda:1'), in_proj_covar=tensor([0.0660, 0.0653, 0.0721, 0.0646, 0.0730, 0.0626, 0.0625, 0.0699], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:33:26,394 INFO [train.py:901] (1/4) Epoch 26, batch 200, loss[loss=0.1844, simple_loss=0.277, pruned_loss=0.04589, over 8028.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.285, pruned_loss=0.05921, over 1029933.95 frames. ], batch size: 22, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:49,941 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.408e+02 2.928e+02 3.669e+02 9.390e+02, threshold=5.857e+02, percent-clipped=3.0 +2023-02-07 09:34:01,574 INFO [train.py:901] (1/4) Epoch 26, batch 250, loss[loss=0.2276, simple_loss=0.3259, pruned_loss=0.06465, over 8317.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2864, pruned_loss=0.05919, over 1167651.31 frames. ], batch size: 25, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:09,714 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 09:34:11,248 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3024, 2.6015, 2.8328, 1.8088, 3.1840, 1.9003, 1.6001, 2.2519], + device='cuda:1'), covar=tensor([0.0891, 0.0461, 0.0405, 0.0852, 0.0628, 0.0928, 0.1027, 0.0617], + device='cuda:1'), in_proj_covar=tensor([0.0458, 0.0398, 0.0355, 0.0451, 0.0384, 0.0536, 0.0395, 0.0426], + device='cuda:1'), out_proj_covar=tensor([1.2172e-04, 1.0358e-04, 9.2619e-05, 1.1821e-04, 1.0042e-04, 1.5006e-04, + 1.0588e-04, 1.1196e-04], device='cuda:1') +2023-02-07 09:34:19,974 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 09:34:22,428 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 09:34:22,770 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:30,088 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 09:34:36,400 INFO [train.py:901] (1/4) Epoch 26, batch 300, loss[loss=0.1835, simple_loss=0.2525, pruned_loss=0.0573, over 7434.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2862, pruned_loss=0.05936, over 1265457.84 frames. ], batch size: 17, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:37,952 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7306, 1.7142, 2.3162, 1.4212, 1.3332, 2.3016, 0.3612, 1.4003], + device='cuda:1'), covar=tensor([0.1730, 0.1086, 0.0316, 0.1049, 0.2282, 0.0338, 0.1868, 0.1365], + device='cuda:1'), in_proj_covar=tensor([0.0197, 0.0202, 0.0131, 0.0222, 0.0276, 0.0142, 0.0171, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 09:34:40,015 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:52,240 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:57,262 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:59,609 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.510e+02 3.033e+02 3.572e+02 1.183e+03, threshold=6.066e+02, percent-clipped=2.0 +2023-02-07 09:35:08,718 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:10,549 INFO [train.py:901] (1/4) Epoch 26, batch 350, loss[loss=0.1873, simple_loss=0.2792, pruned_loss=0.04771, over 8195.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2855, pruned_loss=0.05868, over 1348003.68 frames. ], batch size: 23, lr: 2.93e-03, grad_scale: 4.0 +2023-02-07 09:35:23,411 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:41,018 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:43,113 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:46,431 INFO [train.py:901] (1/4) Epoch 26, batch 400, loss[loss=0.2189, simple_loss=0.297, pruned_loss=0.07035, over 8470.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2845, pruned_loss=0.05796, over 1409459.51 frames. ], batch size: 25, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:36:11,056 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 2.504e+02 3.071e+02 3.633e+02 8.131e+02, threshold=6.142e+02, percent-clipped=3.0 +2023-02-07 09:36:21,078 INFO [train.py:901] (1/4) Epoch 26, batch 450, loss[loss=0.2053, simple_loss=0.2982, pruned_loss=0.05624, over 8297.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2842, pruned_loss=0.05826, over 1453502.52 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:36:55,499 INFO [train.py:901] (1/4) Epoch 26, batch 500, loss[loss=0.2077, simple_loss=0.2942, pruned_loss=0.06057, over 8505.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2852, pruned_loss=0.0593, over 1484300.36 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:11,283 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8522, 1.8398, 2.0384, 1.7157, 0.8972, 1.7866, 2.3283, 2.3793], + device='cuda:1'), covar=tensor([0.0458, 0.1134, 0.1543, 0.1332, 0.0610, 0.1356, 0.0577, 0.0540], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 09:37:19,246 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.396e+02 2.962e+02 4.085e+02 8.069e+02, threshold=5.924e+02, percent-clipped=6.0 +2023-02-07 09:37:29,369 INFO [train.py:901] (1/4) Epoch 26, batch 550, loss[loss=0.1953, simple_loss=0.2723, pruned_loss=0.05914, over 8130.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2848, pruned_loss=0.05944, over 1511657.31 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:45,557 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-07 09:38:00,898 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6077, 2.0502, 3.2128, 1.3696, 2.4157, 2.0702, 1.6946, 2.4922], + device='cuda:1'), covar=tensor([0.1937, 0.2575, 0.0869, 0.4832, 0.1945, 0.3231, 0.2444, 0.2257], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0629, 0.0559, 0.0663, 0.0661, 0.0609, 0.0557, 0.0645], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:38:05,142 INFO [train.py:901] (1/4) Epoch 26, batch 600, loss[loss=0.1754, simple_loss=0.2448, pruned_loss=0.05299, over 6815.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05892, over 1532296.43 frames. ], batch size: 15, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:21,711 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 09:38:29,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.445e+02 2.916e+02 3.512e+02 6.749e+02, threshold=5.833e+02, percent-clipped=3.0 +2023-02-07 09:38:38,959 INFO [train.py:901] (1/4) Epoch 26, batch 650, loss[loss=0.1778, simple_loss=0.2571, pruned_loss=0.04928, over 7929.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05889, over 1556516.52 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:39,868 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:38:57,385 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:39:14,894 INFO [train.py:901] (1/4) Epoch 26, batch 700, loss[loss=0.1878, simple_loss=0.283, pruned_loss=0.04627, over 8509.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2843, pruned_loss=0.0593, over 1564352.27 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:39:38,615 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.498e+02 3.029e+02 3.750e+02 8.351e+02, threshold=6.058e+02, percent-clipped=3.0 +2023-02-07 09:39:49,871 INFO [train.py:901] (1/4) Epoch 26, batch 750, loss[loss=0.2287, simple_loss=0.3079, pruned_loss=0.07475, over 8340.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05893, over 1578024.76 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:05,060 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 09:40:08,014 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202848.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:40:13,840 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 09:40:24,780 INFO [train.py:901] (1/4) Epoch 26, batch 800, loss[loss=0.2084, simple_loss=0.2956, pruned_loss=0.06063, over 8340.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2842, pruned_loss=0.05992, over 1580659.07 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:49,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.489e+02 2.818e+02 3.827e+02 7.280e+02, threshold=5.635e+02, percent-clipped=3.0 +2023-02-07 09:40:59,907 INFO [train.py:901] (1/4) Epoch 26, batch 850, loss[loss=0.1801, simple_loss=0.2713, pruned_loss=0.04448, over 8327.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2838, pruned_loss=0.0596, over 1589434.27 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:33,338 INFO [train.py:901] (1/4) Epoch 26, batch 900, loss[loss=0.2033, simple_loss=0.2979, pruned_loss=0.05437, over 8434.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2837, pruned_loss=0.05944, over 1592447.85 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:58,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.742e+02 3.265e+02 4.005e+02 6.934e+02, threshold=6.531e+02, percent-clipped=5.0 +2023-02-07 09:42:08,851 INFO [train.py:901] (1/4) Epoch 26, batch 950, loss[loss=0.2116, simple_loss=0.296, pruned_loss=0.0636, over 8494.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.0601, over 1595470.61 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:16,930 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.1548, 3.0940, 2.8700, 1.9584, 2.8184, 2.7991, 2.8166, 2.6952], + device='cuda:1'), covar=tensor([0.0862, 0.0772, 0.1043, 0.3547, 0.0884, 0.1279, 0.1324, 0.0990], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0449, 0.0437, 0.0546, 0.0434, 0.0452, 0.0427, 0.0394], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:42:31,713 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 09:42:32,526 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:42:43,150 INFO [train.py:901] (1/4) Epoch 26, batch 1000, loss[loss=0.2066, simple_loss=0.2783, pruned_loss=0.06747, over 7254.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2852, pruned_loss=0.06064, over 1593509.68 frames. ], batch size: 16, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:57,331 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9039, 2.0522, 1.8657, 2.6361, 1.2514, 1.6516, 1.9582, 2.1030], + device='cuda:1'), covar=tensor([0.0765, 0.0845, 0.0936, 0.0371, 0.1086, 0.1309, 0.0743, 0.0699], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0245, 0.0212, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 09:43:04,713 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:43:05,261 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 09:43:07,153 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.348e+02 2.857e+02 3.355e+02 6.976e+02, threshold=5.714e+02, percent-clipped=1.0 +2023-02-07 09:43:17,239 INFO [train.py:901] (1/4) Epoch 26, batch 1050, loss[loss=0.1821, simple_loss=0.2725, pruned_loss=0.04581, over 8570.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2852, pruned_loss=0.06012, over 1601066.81 frames. ], batch size: 39, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:18,711 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 09:43:38,367 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2808, 1.7945, 4.3391, 1.9183, 2.5018, 4.9555, 5.0075, 4.2198], + device='cuda:1'), covar=tensor([0.1236, 0.1882, 0.0273, 0.2121, 0.1305, 0.0180, 0.0404, 0.0576], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0323, 0.0288, 0.0317, 0.0316, 0.0273, 0.0432, 0.0304], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 09:43:53,221 INFO [train.py:901] (1/4) Epoch 26, batch 1100, loss[loss=0.1794, simple_loss=0.2609, pruned_loss=0.04897, over 7657.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2848, pruned_loss=0.05962, over 1598198.19 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:06,850 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203192.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:44:09,472 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8769, 1.6066, 1.7567, 1.5139, 1.0203, 1.6336, 1.7028, 1.4965], + device='cuda:1'), covar=tensor([0.0544, 0.1201, 0.1638, 0.1382, 0.0599, 0.1408, 0.0689, 0.0669], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 09:44:12,284 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7828, 1.4626, 3.3376, 1.5428, 2.3132, 3.6337, 3.6586, 3.1336], + device='cuda:1'), covar=tensor([0.1275, 0.1873, 0.0316, 0.1968, 0.1087, 0.0209, 0.0570, 0.0510], + device='cuda:1'), in_proj_covar=tensor([0.0299, 0.0323, 0.0287, 0.0315, 0.0315, 0.0272, 0.0430, 0.0303], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 09:44:16,728 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.511e+02 2.912e+02 3.711e+02 8.666e+02, threshold=5.824e+02, percent-clipped=4.0 +2023-02-07 09:44:27,590 INFO [train.py:901] (1/4) Epoch 26, batch 1150, loss[loss=0.2177, simple_loss=0.3131, pruned_loss=0.06113, over 8327.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2854, pruned_loss=0.06021, over 1602368.69 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:27,626 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 09:45:02,743 INFO [train.py:901] (1/4) Epoch 26, batch 1200, loss[loss=0.1669, simple_loss=0.2518, pruned_loss=0.04093, over 7813.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2846, pruned_loss=0.05953, over 1604084.58 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:45:27,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203307.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:45:27,739 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.402e+02 2.806e+02 3.306e+02 6.331e+02, threshold=5.612e+02, percent-clipped=2.0 +2023-02-07 09:45:37,091 INFO [train.py:901] (1/4) Epoch 26, batch 1250, loss[loss=0.1808, simple_loss=0.2684, pruned_loss=0.04662, over 6902.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05949, over 1609281.81 frames. ], batch size: 74, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:45:49,713 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 09:46:12,725 INFO [train.py:901] (1/4) Epoch 26, batch 1300, loss[loss=0.164, simple_loss=0.2507, pruned_loss=0.03863, over 7916.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05894, over 1608155.88 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:46:31,897 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:46:37,069 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.437e+02 2.919e+02 3.429e+02 9.499e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-07 09:46:46,434 INFO [train.py:901] (1/4) Epoch 26, batch 1350, loss[loss=0.2012, simple_loss=0.2897, pruned_loss=0.0563, over 8313.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2872, pruned_loss=0.06051, over 1614326.83 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:04,180 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203447.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:22,495 INFO [train.py:901] (1/4) Epoch 26, batch 1400, loss[loss=0.169, simple_loss=0.2644, pruned_loss=0.03685, over 8509.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2865, pruned_loss=0.05994, over 1616340.04 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:27,699 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 09:47:31,494 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:47,756 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.419e+02 2.906e+02 3.589e+02 5.599e+02, threshold=5.812e+02, percent-clipped=0.0 +2023-02-07 09:47:52,604 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:54,389 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 09:47:57,073 INFO [train.py:901] (1/4) Epoch 26, batch 1450, loss[loss=0.172, simple_loss=0.2558, pruned_loss=0.0441, over 8142.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2861, pruned_loss=0.05975, over 1617910.00 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:24,032 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:48:24,759 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203563.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:48:31,363 INFO [train.py:901] (1/4) Epoch 26, batch 1500, loss[loss=0.2357, simple_loss=0.3242, pruned_loss=0.07357, over 8105.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2855, pruned_loss=0.05978, over 1616926.77 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:32,172 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7554, 1.5819, 1.9443, 1.6839, 1.9239, 1.8023, 1.6812, 1.1555], + device='cuda:1'), covar=tensor([0.4357, 0.4038, 0.1768, 0.2980, 0.2043, 0.2580, 0.1601, 0.4146], + device='cuda:1'), in_proj_covar=tensor([0.0962, 0.1018, 0.0830, 0.0987, 0.1024, 0.0926, 0.0771, 0.0848], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 09:48:42,966 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203588.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:48:47,857 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 09:48:56,857 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.262e+02 2.668e+02 3.517e+02 8.500e+02, threshold=5.335e+02, percent-clipped=2.0 +2023-02-07 09:49:06,820 INFO [train.py:901] (1/4) Epoch 26, batch 1550, loss[loss=0.2019, simple_loss=0.2906, pruned_loss=0.05655, over 8316.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06009, over 1618225.09 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:49:40,405 INFO [train.py:901] (1/4) Epoch 26, batch 1600, loss[loss=0.1938, simple_loss=0.27, pruned_loss=0.05879, over 7934.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05942, over 1619964.65 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:49:41,261 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7908, 2.5592, 3.7107, 1.6373, 2.7508, 1.9683, 2.1098, 2.4685], + device='cuda:1'), covar=tensor([0.2039, 0.2304, 0.1101, 0.4902, 0.2051, 0.3882, 0.2312, 0.2826], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0627, 0.0556, 0.0662, 0.0657, 0.0606, 0.0555, 0.0643], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:50:05,088 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.410e+02 3.021e+02 3.901e+02 1.362e+03, threshold=6.042e+02, percent-clipped=8.0 +2023-02-07 09:50:15,009 INFO [train.py:901] (1/4) Epoch 26, batch 1650, loss[loss=0.1747, simple_loss=0.2552, pruned_loss=0.0471, over 7662.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.05981, over 1617145.50 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:18,603 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6897, 1.9679, 2.0393, 1.3366, 2.0731, 1.5850, 0.5670, 1.8801], + device='cuda:1'), covar=tensor([0.0606, 0.0363, 0.0339, 0.0624, 0.0482, 0.0955, 0.0920, 0.0320], + device='cuda:1'), in_proj_covar=tensor([0.0464, 0.0403, 0.0357, 0.0455, 0.0390, 0.0543, 0.0397, 0.0430], + device='cuda:1'), out_proj_covar=tensor([1.2327e-04, 1.0497e-04, 9.3347e-05, 1.1920e-04, 1.0216e-04, 1.5188e-04, + 1.0625e-04, 1.1293e-04], device='cuda:1') +2023-02-07 09:50:44,781 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:48,702 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:49,156 INFO [train.py:901] (1/4) Epoch 26, batch 1700, loss[loss=0.1774, simple_loss=0.2639, pruned_loss=0.04547, over 7549.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2856, pruned_loss=0.05996, over 1618857.04 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:57,394 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:05,481 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:14,291 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.478e+02 3.017e+02 3.791e+02 8.735e+02, threshold=6.035e+02, percent-clipped=4.0 +2023-02-07 09:51:20,896 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:23,319 INFO [train.py:901] (1/4) Epoch 26, batch 1750, loss[loss=0.1783, simple_loss=0.2687, pruned_loss=0.04393, over 8094.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2851, pruned_loss=0.05925, over 1615542.75 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:51:28,103 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:38,332 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:54,070 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:58,821 INFO [train.py:901] (1/4) Epoch 26, batch 1800, loss[loss=0.1606, simple_loss=0.2425, pruned_loss=0.03939, over 7547.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2834, pruned_loss=0.0585, over 1612136.99 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:52:06,722 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.53 vs. limit=5.0 +2023-02-07 09:52:15,983 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0595, 1.8575, 2.3578, 1.9832, 2.3008, 2.0509, 1.9458, 1.5726], + device='cuda:1'), covar=tensor([0.4294, 0.4141, 0.1714, 0.3086, 0.1984, 0.2703, 0.1652, 0.4044], + device='cuda:1'), in_proj_covar=tensor([0.0957, 0.1013, 0.0826, 0.0984, 0.1021, 0.0922, 0.0767, 0.0844], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 09:52:23,002 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.441e+02 2.799e+02 3.336e+02 4.977e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 09:52:29,709 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203918.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:52:32,264 INFO [train.py:901] (1/4) Epoch 26, batch 1850, loss[loss=0.2237, simple_loss=0.3085, pruned_loss=0.0695, over 8469.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.05823, over 1608887.21 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:52:47,699 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:53:07,707 INFO [train.py:901] (1/4) Epoch 26, batch 1900, loss[loss=0.1771, simple_loss=0.2644, pruned_loss=0.0449, over 7817.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2807, pruned_loss=0.0575, over 1603692.29 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:33,466 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.507e+02 3.073e+02 4.108e+02 9.647e+02, threshold=6.146e+02, percent-clipped=9.0 +2023-02-07 09:53:36,907 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 09:53:42,921 INFO [train.py:901] (1/4) Epoch 26, batch 1950, loss[loss=0.1928, simple_loss=0.2697, pruned_loss=0.05798, over 7655.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2804, pruned_loss=0.05713, over 1607137.23 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:49,445 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 09:54:07,151 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 09:54:11,412 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5152, 2.4950, 3.2693, 2.6537, 3.1292, 2.6051, 2.4433, 1.9484], + device='cuda:1'), covar=tensor([0.5459, 0.4980, 0.1982, 0.3657, 0.2511, 0.2896, 0.1789, 0.5444], + device='cuda:1'), in_proj_covar=tensor([0.0955, 0.1011, 0.0824, 0.0981, 0.1020, 0.0919, 0.0766, 0.0845], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 09:54:17,200 INFO [train.py:901] (1/4) Epoch 26, batch 2000, loss[loss=0.1824, simple_loss=0.2765, pruned_loss=0.04415, over 8807.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2808, pruned_loss=0.05708, over 1611175.72 frames. ], batch size: 40, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:34,357 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:43,742 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.388e+02 3.050e+02 3.690e+02 7.171e+02, threshold=6.101e+02, percent-clipped=4.0 +2023-02-07 09:54:44,463 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:53,076 INFO [train.py:901] (1/4) Epoch 26, batch 2050, loss[loss=0.1988, simple_loss=0.2749, pruned_loss=0.06141, over 7978.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2816, pruned_loss=0.05777, over 1606061.44 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:57,138 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:18,567 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8419, 3.7915, 3.5051, 1.7902, 3.4455, 3.5190, 3.3668, 3.3339], + device='cuda:1'), covar=tensor([0.0883, 0.0721, 0.1195, 0.4841, 0.0917, 0.1056, 0.1521, 0.0820], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0453, 0.0437, 0.0551, 0.0436, 0.0457, 0.0432, 0.0397], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:55:26,603 INFO [train.py:901] (1/4) Epoch 26, batch 2100, loss[loss=0.1903, simple_loss=0.2802, pruned_loss=0.05017, over 8290.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2817, pruned_loss=0.05764, over 1606564.00 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:55:33,647 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:47,790 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:52,930 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.312e+02 2.797e+02 3.552e+02 6.063e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-07 09:55:53,722 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:02,442 INFO [train.py:901] (1/4) Epoch 26, batch 2150, loss[loss=0.2098, simple_loss=0.2892, pruned_loss=0.06518, over 8340.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2813, pruned_loss=0.05723, over 1606097.90 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:03,976 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:04,625 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:17,184 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:29,891 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204262.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:56:36,294 INFO [train.py:901] (1/4) Epoch 26, batch 2200, loss[loss=0.1926, simple_loss=0.2734, pruned_loss=0.05588, over 8321.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2814, pruned_loss=0.05746, over 1609114.41 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:40,594 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.0487, 3.9491, 3.6927, 2.2513, 3.5337, 3.6848, 3.5791, 3.5157], + device='cuda:1'), covar=tensor([0.0816, 0.0681, 0.1003, 0.4223, 0.0916, 0.1107, 0.1364, 0.0882], + device='cuda:1'), in_proj_covar=tensor([0.0534, 0.0451, 0.0434, 0.0550, 0.0435, 0.0455, 0.0432, 0.0395], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 09:57:01,390 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.362e+02 3.099e+02 4.074e+02 1.599e+03, threshold=6.197e+02, percent-clipped=8.0 +2023-02-07 09:57:11,827 INFO [train.py:901] (1/4) Epoch 26, batch 2250, loss[loss=0.1941, simple_loss=0.2772, pruned_loss=0.05553, over 7412.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2823, pruned_loss=0.05789, over 1608445.65 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:13,374 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:57:29,318 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 09:57:46,491 INFO [train.py:901] (1/4) Epoch 26, batch 2300, loss[loss=0.1729, simple_loss=0.2625, pruned_loss=0.04166, over 7976.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2817, pruned_loss=0.05763, over 1607489.92 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:50,090 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204377.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:58:10,722 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.304e+02 2.813e+02 3.713e+02 7.684e+02, threshold=5.626e+02, percent-clipped=3.0 +2023-02-07 09:58:21,054 INFO [train.py:901] (1/4) Epoch 26, batch 2350, loss[loss=0.1836, simple_loss=0.2713, pruned_loss=0.04797, over 8246.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05824, over 1609482.61 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:58:33,859 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:58:57,219 INFO [train.py:901] (1/4) Epoch 26, batch 2400, loss[loss=0.1996, simple_loss=0.2868, pruned_loss=0.05623, over 8688.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05849, over 1612729.75 frames. ], batch size: 39, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:02,902 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:16,120 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:20,420 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:22,323 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.551e+02 2.904e+02 3.805e+02 7.023e+02, threshold=5.807e+02, percent-clipped=3.0 +2023-02-07 09:59:32,151 INFO [train.py:901] (1/4) Epoch 26, batch 2450, loss[loss=0.1958, simple_loss=0.2693, pruned_loss=0.06114, over 7929.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.05819, over 1615035.52 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:33,770 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204524.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:34,351 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:56,610 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:08,715 INFO [train.py:901] (1/4) Epoch 26, batch 2500, loss[loss=0.1994, simple_loss=0.2873, pruned_loss=0.0558, over 8560.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2832, pruned_loss=0.05834, over 1615976.36 frames. ], batch size: 31, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:15,133 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:22,357 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7598, 5.8680, 5.1876, 2.3784, 5.2440, 5.5878, 5.3109, 5.3797], + device='cuda:1'), covar=tensor([0.0482, 0.0339, 0.0974, 0.4447, 0.0729, 0.0657, 0.1013, 0.0548], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0449, 0.0434, 0.0547, 0.0433, 0.0454, 0.0432, 0.0394], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:00:31,998 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:33,808 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.483e+02 3.074e+02 3.585e+02 8.993e+02, threshold=6.148e+02, percent-clipped=7.0 +2023-02-07 10:00:35,164 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-07 10:00:43,171 INFO [train.py:901] (1/4) Epoch 26, batch 2550, loss[loss=0.2047, simple_loss=0.284, pruned_loss=0.06273, over 7978.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.05901, over 1611074.89 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:50,562 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204633.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 10:00:54,541 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4272, 2.7725, 3.0584, 1.9859, 3.2821, 2.1279, 1.7363, 2.4740], + device='cuda:1'), covar=tensor([0.0940, 0.0439, 0.0394, 0.0767, 0.0488, 0.0889, 0.0935, 0.0558], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0403, 0.0359, 0.0453, 0.0389, 0.0544, 0.0397, 0.0431], + device='cuda:1'), out_proj_covar=tensor([1.2365e-04, 1.0503e-04, 9.3730e-05, 1.1852e-04, 1.0189e-04, 1.5219e-04, + 1.0617e-04, 1.1351e-04], device='cuda:1') +2023-02-07 10:00:55,139 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204640.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:01:07,871 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204658.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:01:11,217 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5806, 1.9636, 2.9622, 1.4284, 2.2624, 1.8673, 1.6724, 2.2750], + device='cuda:1'), covar=tensor([0.1921, 0.2528, 0.0897, 0.4682, 0.1823, 0.3317, 0.2434, 0.2200], + device='cuda:1'), in_proj_covar=tensor([0.0535, 0.0623, 0.0556, 0.0659, 0.0655, 0.0605, 0.0552, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:01:18,440 INFO [train.py:901] (1/4) Epoch 26, batch 2600, loss[loss=0.2193, simple_loss=0.3036, pruned_loss=0.06755, over 8363.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.05921, over 1612637.10 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:01:29,263 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1781, 1.5120, 1.7036, 1.4497, 1.0201, 1.5068, 1.7581, 1.6364], + device='cuda:1'), covar=tensor([0.0469, 0.1286, 0.1642, 0.1442, 0.0586, 0.1498, 0.0711, 0.0661], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 10:01:43,344 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.465e+02 3.094e+02 3.874e+02 9.576e+02, threshold=6.187e+02, percent-clipped=4.0 +2023-02-07 10:01:52,896 INFO [train.py:901] (1/4) Epoch 26, batch 2650, loss[loss=0.1862, simple_loss=0.2713, pruned_loss=0.05057, over 7802.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.285, pruned_loss=0.05898, over 1616895.07 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:01:57,927 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2100, 2.0866, 2.6236, 2.1553, 2.5750, 2.2868, 2.0969, 1.3887], + device='cuda:1'), covar=tensor([0.6008, 0.5086, 0.2050, 0.4284, 0.2812, 0.3383, 0.2202, 0.5835], + device='cuda:1'), in_proj_covar=tensor([0.0958, 0.1012, 0.0824, 0.0981, 0.1018, 0.0920, 0.0767, 0.0844], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:02:19,919 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0322, 1.9196, 2.4342, 2.0306, 2.3306, 2.1292, 1.9759, 1.2233], + device='cuda:1'), covar=tensor([0.6415, 0.5312, 0.1937, 0.3876, 0.2760, 0.3449, 0.2300, 0.5316], + device='cuda:1'), in_proj_covar=tensor([0.0955, 0.1009, 0.0821, 0.0977, 0.1015, 0.0918, 0.0764, 0.0841], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:02:28,025 INFO [train.py:901] (1/4) Epoch 26, batch 2700, loss[loss=0.219, simple_loss=0.3088, pruned_loss=0.06458, over 8346.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2847, pruned_loss=0.05904, over 1616152.82 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:36,921 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0670, 1.4543, 1.7521, 1.3370, 0.9703, 1.5160, 1.7397, 1.8571], + device='cuda:1'), covar=tensor([0.0493, 0.1258, 0.1674, 0.1447, 0.0589, 0.1439, 0.0679, 0.0588], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 10:02:53,797 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.359e+02 2.865e+02 3.674e+02 6.992e+02, threshold=5.730e+02, percent-clipped=1.0 +2023-02-07 10:02:55,417 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:04,000 INFO [train.py:901] (1/4) Epoch 26, batch 2750, loss[loss=0.2233, simple_loss=0.303, pruned_loss=0.07178, over 7673.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.05866, over 1616958.47 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:13,266 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:16,006 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:38,129 INFO [train.py:901] (1/4) Epoch 26, batch 2800, loss[loss=0.2357, simple_loss=0.312, pruned_loss=0.07964, over 8334.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2844, pruned_loss=0.05871, over 1616875.72 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:55,281 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204896.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:04,626 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.285e+02 3.108e+02 3.828e+02 9.944e+02, threshold=6.216e+02, percent-clipped=6.0 +2023-02-07 10:04:13,928 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204921.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:14,420 INFO [train.py:901] (1/4) Epoch 26, batch 2850, loss[loss=0.1916, simple_loss=0.2818, pruned_loss=0.05067, over 8484.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.0586, over 1616738.28 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:23,876 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9751, 1.4525, 3.4590, 1.7197, 2.4756, 3.7564, 3.8521, 3.2008], + device='cuda:1'), covar=tensor([0.1238, 0.1962, 0.0280, 0.1925, 0.1008, 0.0223, 0.0538, 0.0542], + device='cuda:1'), in_proj_covar=tensor([0.0303, 0.0327, 0.0290, 0.0319, 0.0321, 0.0276, 0.0435, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 10:04:48,513 INFO [train.py:901] (1/4) Epoch 26, batch 2900, loss[loss=0.2, simple_loss=0.2913, pruned_loss=0.05441, over 8135.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2845, pruned_loss=0.05885, over 1615200.73 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:59,572 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.30 vs. limit=5.0 +2023-02-07 10:05:00,980 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 10:05:13,373 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.393e+02 3.052e+02 3.991e+02 9.487e+02, threshold=6.105e+02, percent-clipped=5.0 +2023-02-07 10:05:20,484 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 10:05:23,876 INFO [train.py:901] (1/4) Epoch 26, batch 2950, loss[loss=0.222, simple_loss=0.3107, pruned_loss=0.06661, over 8489.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2844, pruned_loss=0.05885, over 1616461.47 frames. ], batch size: 29, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,484 INFO [train.py:901] (1/4) Epoch 26, batch 3000, loss[loss=0.1713, simple_loss=0.2516, pruned_loss=0.04547, over 7437.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.05853, over 1613808.37 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,484 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 10:06:08,067 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7555, 1.4852, 3.9059, 1.6528, 3.4709, 3.2676, 3.6006, 3.5164], + device='cuda:1'), covar=tensor([0.0760, 0.4715, 0.0564, 0.4283, 0.1156, 0.1095, 0.0702, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0665, 0.0662, 0.0728, 0.0650, 0.0742, 0.0629, 0.0629, 0.0706], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:06:11,420 INFO [train.py:935] (1/4) Epoch 26, validation: loss=0.1716, simple_loss=0.2713, pruned_loss=0.03593, over 944034.00 frames. +2023-02-07 10:06:11,421 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 10:06:31,073 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-07 10:06:36,703 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.246e+02 2.785e+02 3.735e+02 7.523e+02, threshold=5.571e+02, percent-clipped=3.0 +2023-02-07 10:06:46,005 INFO [train.py:901] (1/4) Epoch 26, batch 3050, loss[loss=0.1723, simple_loss=0.262, pruned_loss=0.04124, over 8693.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2839, pruned_loss=0.05817, over 1616558.76 frames. ], batch size: 39, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:06:49,437 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:22,793 INFO [train.py:901] (1/4) Epoch 26, batch 3100, loss[loss=0.2313, simple_loss=0.3053, pruned_loss=0.07861, over 8447.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.05792, over 1615988.13 frames. ], batch size: 29, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:07:30,228 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:48,159 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.327e+02 2.997e+02 4.038e+02 1.256e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 10:07:53,680 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:57,552 INFO [train.py:901] (1/4) Epoch 26, batch 3150, loss[loss=0.1818, simple_loss=0.2606, pruned_loss=0.0515, over 7451.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2843, pruned_loss=0.05845, over 1617133.55 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:33,479 INFO [train.py:901] (1/4) Epoch 26, batch 3200, loss[loss=0.2372, simple_loss=0.3161, pruned_loss=0.0792, over 8473.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.05794, over 1614526.26 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:52,245 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:08:57,569 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9729, 6.1705, 5.3802, 2.8264, 5.4871, 5.7065, 5.6220, 5.5612], + device='cuda:1'), covar=tensor([0.0506, 0.0298, 0.0863, 0.3954, 0.0707, 0.0669, 0.1089, 0.0505], + device='cuda:1'), in_proj_covar=tensor([0.0534, 0.0449, 0.0437, 0.0549, 0.0432, 0.0455, 0.0431, 0.0394], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:08:58,813 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.527e+02 3.010e+02 3.735e+02 6.895e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-07 10:09:09,105 INFO [train.py:901] (1/4) Epoch 26, batch 3250, loss[loss=0.2036, simple_loss=0.2902, pruned_loss=0.05852, over 8598.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05782, over 1610640.45 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:43,189 INFO [train.py:901] (1/4) Epoch 26, batch 3300, loss[loss=0.206, simple_loss=0.2931, pruned_loss=0.05948, over 8509.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2831, pruned_loss=0.05793, over 1610409.57 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:52,943 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9533, 1.9678, 1.8160, 2.5582, 1.0543, 1.5511, 1.9136, 2.1064], + device='cuda:1'), covar=tensor([0.0758, 0.0788, 0.0887, 0.0439, 0.1153, 0.1360, 0.0795, 0.0717], + device='cuda:1'), in_proj_covar=tensor([0.0228, 0.0192, 0.0243, 0.0210, 0.0203, 0.0245, 0.0248, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 10:10:10,316 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.313e+02 2.653e+02 3.358e+02 9.214e+02, threshold=5.305e+02, percent-clipped=4.0 +2023-02-07 10:10:16,828 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4392, 1.8292, 2.6332, 1.3425, 2.0037, 1.8836, 1.4638, 2.1101], + device='cuda:1'), covar=tensor([0.1906, 0.2571, 0.0868, 0.4603, 0.1917, 0.3146, 0.2584, 0.2054], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0626, 0.0559, 0.0663, 0.0658, 0.0607, 0.0557, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:10:20,068 INFO [train.py:901] (1/4) Epoch 26, batch 3350, loss[loss=0.1893, simple_loss=0.272, pruned_loss=0.05326, over 8293.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05768, over 1608964.69 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:54,102 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:10:54,720 INFO [train.py:901] (1/4) Epoch 26, batch 3400, loss[loss=0.2411, simple_loss=0.3133, pruned_loss=0.08443, over 8514.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05751, over 1608307.18 frames. ], batch size: 31, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:02,813 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 10:11:03,621 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2142, 1.5538, 4.4080, 2.1204, 2.4605, 5.1038, 5.2175, 4.4212], + device='cuda:1'), covar=tensor([0.1256, 0.1949, 0.0228, 0.1857, 0.1246, 0.0185, 0.0342, 0.0597], + device='cuda:1'), in_proj_covar=tensor([0.0303, 0.0327, 0.0290, 0.0319, 0.0321, 0.0276, 0.0435, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 10:11:20,313 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.409e+02 2.883e+02 3.635e+02 7.106e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 10:11:30,468 INFO [train.py:901] (1/4) Epoch 26, batch 3450, loss[loss=0.2123, simple_loss=0.2988, pruned_loss=0.06291, over 8486.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.05761, over 1607958.23 frames. ], batch size: 28, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:53,164 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:11:57,167 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:05,234 INFO [train.py:901] (1/4) Epoch 26, batch 3500, loss[loss=0.177, simple_loss=0.2563, pruned_loss=0.04889, over 8031.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05759, over 1606364.14 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:12:10,360 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:15,154 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:21,534 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6101, 2.0768, 3.0779, 1.4567, 2.2593, 1.9938, 1.6686, 2.4055], + device='cuda:1'), covar=tensor([0.1912, 0.2632, 0.0975, 0.4788, 0.2118, 0.3352, 0.2467, 0.2457], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0626, 0.0558, 0.0662, 0.0658, 0.0605, 0.0557, 0.0640], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:12:24,733 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 10:12:30,113 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.208e+02 2.714e+02 3.358e+02 5.744e+02, threshold=5.428e+02, percent-clipped=0.0 +2023-02-07 10:12:35,786 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 10:12:39,510 INFO [train.py:901] (1/4) Epoch 26, batch 3550, loss[loss=0.1658, simple_loss=0.2577, pruned_loss=0.03698, over 7970.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05727, over 1610058.30 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:15,373 INFO [train.py:901] (1/4) Epoch 26, batch 3600, loss[loss=0.1725, simple_loss=0.2509, pruned_loss=0.04699, over 7247.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.0576, over 1611458.30 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:16,199 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:17,473 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:39,661 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.295e+02 2.882e+02 3.730e+02 8.207e+02, threshold=5.763e+02, percent-clipped=6.0 +2023-02-07 10:13:49,105 INFO [train.py:901] (1/4) Epoch 26, batch 3650, loss[loss=0.1977, simple_loss=0.2721, pruned_loss=0.06164, over 7966.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05744, over 1610662.65 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:18,433 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:14:24,801 INFO [train.py:901] (1/4) Epoch 26, batch 3700, loss[loss=0.1816, simple_loss=0.2738, pruned_loss=0.04471, over 8504.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2832, pruned_loss=0.05807, over 1616570.52 frames. ], batch size: 29, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:27,615 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:14:49,669 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.377e+02 2.968e+02 3.727e+02 1.221e+03, threshold=5.937e+02, percent-clipped=5.0 +2023-02-07 10:14:59,197 INFO [train.py:901] (1/4) Epoch 26, batch 3750, loss[loss=0.17, simple_loss=0.2677, pruned_loss=0.03614, over 8308.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05824, over 1614364.45 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:12,920 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:31,310 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:34,019 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5132, 1.3964, 1.8474, 1.3109, 1.1273, 1.8071, 0.1904, 1.1789], + device='cuda:1'), covar=tensor([0.1291, 0.1166, 0.0349, 0.0809, 0.2458, 0.0422, 0.1816, 0.1186], + device='cuda:1'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0221, 0.0274, 0.0144, 0.0171, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 10:15:34,439 INFO [train.py:901] (1/4) Epoch 26, batch 3800, loss[loss=0.2189, simple_loss=0.3013, pruned_loss=0.06819, over 8369.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2817, pruned_loss=0.05794, over 1611253.51 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:59,216 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.381e+02 2.847e+02 3.364e+02 6.986e+02, threshold=5.694e+02, percent-clipped=1.0 +2023-02-07 10:15:59,375 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:08,803 INFO [train.py:901] (1/4) Epoch 26, batch 3850, loss[loss=0.1954, simple_loss=0.275, pruned_loss=0.05788, over 8290.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2817, pruned_loss=0.05775, over 1614691.25 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:16:15,133 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:20,582 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3748, 1.6052, 4.3330, 2.0534, 2.6217, 5.0326, 5.1242, 4.3113], + device='cuda:1'), covar=tensor([0.1240, 0.2007, 0.0290, 0.2029, 0.1211, 0.0177, 0.0475, 0.0552], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0328, 0.0291, 0.0321, 0.0322, 0.0279, 0.0438, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 10:16:23,272 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9464, 2.6706, 4.0816, 1.7106, 3.1932, 2.3773, 2.0718, 3.0596], + device='cuda:1'), covar=tensor([0.1816, 0.2334, 0.0840, 0.4410, 0.1693, 0.3089, 0.2262, 0.2240], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0626, 0.0558, 0.0662, 0.0657, 0.0605, 0.0557, 0.0641], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:16:29,158 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 10:16:32,101 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205956.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:42,785 INFO [train.py:901] (1/4) Epoch 26, batch 3900, loss[loss=0.1948, simple_loss=0.2803, pruned_loss=0.05461, over 8135.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2821, pruned_loss=0.05853, over 1610794.59 frames. ], batch size: 22, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:09,986 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.441e+02 2.892e+02 3.706e+02 7.796e+02, threshold=5.785e+02, percent-clipped=3.0 +2023-02-07 10:17:16,700 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:17:18,466 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 10:17:20,038 INFO [train.py:901] (1/4) Epoch 26, batch 3950, loss[loss=0.174, simple_loss=0.2507, pruned_loss=0.04868, over 7260.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2822, pruned_loss=0.05844, over 1609518.26 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:53,922 INFO [train.py:901] (1/4) Epoch 26, batch 4000, loss[loss=0.1606, simple_loss=0.2538, pruned_loss=0.03373, over 8149.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05852, over 1614342.51 frames. ], batch size: 22, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:55,452 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:18,672 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206106.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:19,947 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.407e+02 2.986e+02 3.556e+02 8.558e+02, threshold=5.971e+02, percent-clipped=6.0 +2023-02-07 10:18:29,512 INFO [train.py:901] (1/4) Epoch 26, batch 4050, loss[loss=0.2361, simple_loss=0.3264, pruned_loss=0.07287, over 8524.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2833, pruned_loss=0.05843, over 1609854.14 frames. ], batch size: 48, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:18:37,175 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:03,800 INFO [train.py:901] (1/4) Epoch 26, batch 4100, loss[loss=0.1644, simple_loss=0.2443, pruned_loss=0.04225, over 7928.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2822, pruned_loss=0.05722, over 1612157.06 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:28,870 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.376e+02 2.755e+02 3.418e+02 9.873e+02, threshold=5.510e+02, percent-clipped=4.0 +2023-02-07 10:19:31,067 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6811, 1.8908, 2.0515, 1.8392, 1.2397, 1.8915, 2.2657, 2.1658], + device='cuda:1'), covar=tensor([0.0513, 0.1142, 0.1616, 0.1348, 0.0627, 0.1333, 0.0670, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 10:19:34,493 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,251 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206221.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,711 INFO [train.py:901] (1/4) Epoch 26, batch 4150, loss[loss=0.189, simple_loss=0.2678, pruned_loss=0.05511, over 7812.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05722, over 1615473.61 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:52,402 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.47 vs. limit=5.0 +2023-02-07 10:20:00,463 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 10:20:00,779 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:20:10,905 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8359, 3.8069, 3.4547, 1.9813, 3.4174, 3.4625, 3.3690, 3.3524], + device='cuda:1'), covar=tensor([0.0902, 0.0666, 0.1230, 0.4470, 0.1026, 0.1184, 0.1512, 0.0931], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0455, 0.0441, 0.0553, 0.0436, 0.0459, 0.0436, 0.0400], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:20:14,202 INFO [train.py:901] (1/4) Epoch 26, batch 4200, loss[loss=0.1854, simple_loss=0.2692, pruned_loss=0.05076, over 7906.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2803, pruned_loss=0.05656, over 1611365.40 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:20:22,977 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 10:20:38,388 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.335e+02 2.968e+02 3.755e+02 9.805e+02, threshold=5.936e+02, percent-clipped=3.0 +2023-02-07 10:20:44,922 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 10:20:49,138 INFO [train.py:901] (1/4) Epoch 26, batch 4250, loss[loss=0.2188, simple_loss=0.2991, pruned_loss=0.06924, over 8462.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05661, over 1612775.77 frames. ], batch size: 27, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:20:54,076 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8283, 3.7981, 3.4560, 1.7794, 3.3513, 3.5020, 3.3954, 3.3651], + device='cuda:1'), covar=tensor([0.0933, 0.0688, 0.1203, 0.5048, 0.1095, 0.1297, 0.1492, 0.0936], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0457, 0.0443, 0.0556, 0.0439, 0.0461, 0.0438, 0.0401], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:21:21,338 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:24,640 INFO [train.py:901] (1/4) Epoch 26, batch 4300, loss[loss=0.1888, simple_loss=0.2752, pruned_loss=0.05118, over 7972.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05679, over 1613072.00 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:31,160 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 10:21:35,882 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:50,294 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.331e+02 2.890e+02 3.800e+02 6.492e+02, threshold=5.781e+02, percent-clipped=2.0 +2023-02-07 10:21:53,273 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206413.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:56,606 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:59,348 INFO [train.py:901] (1/4) Epoch 26, batch 4350, loss[loss=0.1753, simple_loss=0.2706, pruned_loss=0.04005, over 8596.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.05689, over 1615128.22 frames. ], batch size: 31, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:18,982 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 10:22:34,790 INFO [train.py:901] (1/4) Epoch 26, batch 4400, loss[loss=0.2207, simple_loss=0.3031, pruned_loss=0.06909, over 8113.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2835, pruned_loss=0.05767, over 1619537.96 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:38,347 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:22:55,677 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:00,163 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.619e+02 3.000e+02 3.925e+02 8.429e+02, threshold=6.000e+02, percent-clipped=7.0 +2023-02-07 10:23:00,196 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 10:23:08,813 INFO [train.py:901] (1/4) Epoch 26, batch 4450, loss[loss=0.1848, simple_loss=0.274, pruned_loss=0.04776, over 8088.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05763, over 1619132.73 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:16,247 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206533.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:16,908 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5853, 1.5511, 4.7633, 1.7815, 4.2388, 4.0460, 4.3282, 4.1948], + device='cuda:1'), covar=tensor([0.0566, 0.4868, 0.0504, 0.4098, 0.1126, 0.0879, 0.0551, 0.0652], + device='cuda:1'), in_proj_covar=tensor([0.0661, 0.0655, 0.0724, 0.0648, 0.0735, 0.0622, 0.0623, 0.0696], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:23:20,342 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 10:23:34,157 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:44,263 INFO [train.py:901] (1/4) Epoch 26, batch 4500, loss[loss=0.2043, simple_loss=0.2934, pruned_loss=0.05759, over 8246.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.058, over 1617267.83 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:55,206 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 10:23:59,317 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1140, 2.2005, 1.9743, 2.7155, 1.2599, 1.7282, 2.0107, 2.1935], + device='cuda:1'), covar=tensor([0.0743, 0.0802, 0.0803, 0.0383, 0.1070, 0.1248, 0.0804, 0.0899], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0244, 0.0212, 0.0203, 0.0246, 0.0249, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 10:24:10,073 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.363e+02 2.961e+02 3.499e+02 6.135e+02, threshold=5.921e+02, percent-clipped=1.0 +2023-02-07 10:24:18,689 INFO [train.py:901] (1/4) Epoch 26, batch 4550, loss[loss=0.1882, simple_loss=0.2715, pruned_loss=0.05243, over 7658.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05804, over 1614022.87 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:19,486 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:35,788 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:51,534 INFO [train.py:901] (1/4) Epoch 26, batch 4600, loss[loss=0.2123, simple_loss=0.2911, pruned_loss=0.06675, over 8597.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2825, pruned_loss=0.05855, over 1610691.11 frames. ], batch size: 31, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:53,057 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206674.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:25:18,508 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.342e+02 2.811e+02 3.625e+02 9.770e+02, threshold=5.622e+02, percent-clipped=5.0 +2023-02-07 10:25:20,162 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6313, 2.6998, 1.8867, 2.4200, 2.1762, 1.6543, 2.1202, 2.2681], + device='cuda:1'), covar=tensor([0.1586, 0.0405, 0.1232, 0.0682, 0.0811, 0.1590, 0.1190, 0.1067], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0241, 0.0342, 0.0314, 0.0304, 0.0348, 0.0350, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 10:25:23,279 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 10:25:28,328 INFO [train.py:901] (1/4) Epoch 26, batch 4650, loss[loss=0.201, simple_loss=0.2878, pruned_loss=0.05709, over 7668.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.05862, over 1609756.82 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:25:40,977 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 10:26:02,070 INFO [train.py:901] (1/4) Epoch 26, batch 4700, loss[loss=0.2214, simple_loss=0.3126, pruned_loss=0.06511, over 8318.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2823, pruned_loss=0.0585, over 1610291.84 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:08,424 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0451, 1.2658, 1.1991, 0.7332, 1.1944, 1.0976, 0.0625, 1.2042], + device='cuda:1'), covar=tensor([0.0514, 0.0441, 0.0417, 0.0703, 0.0512, 0.1070, 0.0997, 0.0428], + device='cuda:1'), in_proj_covar=tensor([0.0466, 0.0402, 0.0360, 0.0455, 0.0389, 0.0547, 0.0399, 0.0434], + device='cuda:1'), out_proj_covar=tensor([1.2370e-04, 1.0477e-04, 9.4011e-05, 1.1921e-04, 1.0177e-04, 1.5321e-04, + 1.0686e-04, 1.1408e-04], device='cuda:1') +2023-02-07 10:26:13,620 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206789.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:28,922 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.506e+02 2.890e+02 3.298e+02 6.611e+02, threshold=5.779e+02, percent-clipped=3.0 +2023-02-07 10:26:32,508 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:34,595 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2501, 2.0833, 2.7680, 2.2919, 2.7177, 2.2985, 2.1192, 1.5875], + device='cuda:1'), covar=tensor([0.5675, 0.5294, 0.2062, 0.4122, 0.2744, 0.3320, 0.2068, 0.5900], + device='cuda:1'), in_proj_covar=tensor([0.0952, 0.1003, 0.0824, 0.0979, 0.1014, 0.0917, 0.0764, 0.0838], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:26:37,690 INFO [train.py:901] (1/4) Epoch 26, batch 4750, loss[loss=0.164, simple_loss=0.25, pruned_loss=0.03898, over 8090.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.0588, over 1608961.79 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:53,378 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 10:26:55,377 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 10:26:58,849 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206852.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:27:00,991 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 10:27:12,097 INFO [train.py:901] (1/4) Epoch 26, batch 4800, loss[loss=0.17, simple_loss=0.248, pruned_loss=0.04604, over 7547.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2821, pruned_loss=0.05874, over 1602440.76 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:21,153 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 10:27:37,291 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.410e+02 2.886e+02 3.541e+02 7.542e+02, threshold=5.772e+02, percent-clipped=6.0 +2023-02-07 10:27:46,701 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 10:27:47,384 INFO [train.py:901] (1/4) Epoch 26, batch 4850, loss[loss=0.2375, simple_loss=0.3109, pruned_loss=0.08212, over 8487.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2831, pruned_loss=0.05888, over 1609188.03 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:52,975 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:10,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:21,655 INFO [train.py:901] (1/4) Epoch 26, batch 4900, loss[loss=0.2034, simple_loss=0.2885, pruned_loss=0.05916, over 8604.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05929, over 1611926.18 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:28:25,971 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.05 vs. limit=5.0 +2023-02-07 10:28:46,039 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.535e+02 3.142e+02 3.836e+02 8.051e+02, threshold=6.285e+02, percent-clipped=2.0 +2023-02-07 10:28:55,279 INFO [train.py:901] (1/4) Epoch 26, batch 4950, loss[loss=0.2149, simple_loss=0.2886, pruned_loss=0.07057, over 6828.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2847, pruned_loss=0.05987, over 1608511.56 frames. ], batch size: 71, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:29,754 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8521, 1.7347, 2.4595, 1.4290, 1.3240, 2.3788, 0.4270, 1.4583], + device='cuda:1'), covar=tensor([0.1378, 0.1127, 0.0304, 0.1300, 0.2407, 0.0390, 0.1953, 0.1419], + device='cuda:1'), in_proj_covar=tensor([0.0195, 0.0202, 0.0131, 0.0220, 0.0273, 0.0142, 0.0170, 0.0195], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 10:29:32,342 INFO [train.py:901] (1/4) Epoch 26, batch 5000, loss[loss=0.1943, simple_loss=0.2727, pruned_loss=0.05802, over 8283.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2847, pruned_loss=0.05972, over 1607458.16 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:42,773 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1205, 2.2835, 1.9898, 2.9335, 1.3150, 1.7045, 2.1123, 2.2400], + device='cuda:1'), covar=tensor([0.0694, 0.0732, 0.0769, 0.0324, 0.1119, 0.1250, 0.0818, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0213, 0.0204, 0.0247, 0.0250, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 10:29:57,379 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.413e+02 2.985e+02 3.933e+02 1.062e+03, threshold=5.970e+02, percent-clipped=3.0 +2023-02-07 10:30:06,454 INFO [train.py:901] (1/4) Epoch 26, batch 5050, loss[loss=0.1777, simple_loss=0.2806, pruned_loss=0.0374, over 8534.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2839, pruned_loss=0.05942, over 1607283.65 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:22,941 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3926, 2.0783, 2.6497, 2.2229, 2.6214, 2.3804, 2.1949, 1.5439], + device='cuda:1'), covar=tensor([0.5571, 0.5159, 0.1993, 0.4071, 0.2713, 0.3058, 0.1930, 0.5581], + device='cuda:1'), in_proj_covar=tensor([0.0957, 0.1005, 0.0825, 0.0982, 0.1016, 0.0919, 0.0764, 0.0843], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:30:24,762 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 10:30:42,628 INFO [train.py:901] (1/4) Epoch 26, batch 5100, loss[loss=0.1563, simple_loss=0.2408, pruned_loss=0.03585, over 7529.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2838, pruned_loss=0.05921, over 1606639.05 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:58,149 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=207194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:30:59,446 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:31:08,240 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.087e+02 2.633e+02 3.622e+02 6.552e+02, threshold=5.265e+02, percent-clipped=1.0 +2023-02-07 10:31:16,944 INFO [train.py:901] (1/4) Epoch 26, batch 5150, loss[loss=0.1677, simple_loss=0.2451, pruned_loss=0.04519, over 7523.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05928, over 1601496.59 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:31:52,800 INFO [train.py:901] (1/4) Epoch 26, batch 5200, loss[loss=0.1836, simple_loss=0.2757, pruned_loss=0.04577, over 8487.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05911, over 1606185.17 frames. ], batch size: 49, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:17,988 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.585e+02 3.464e+02 4.468e+02 1.375e+03, threshold=6.928e+02, percent-clipped=16.0 +2023-02-07 10:32:18,903 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.17 vs. limit=5.0 +2023-02-07 10:32:19,451 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:32:19,962 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 10:32:26,696 INFO [train.py:901] (1/4) Epoch 26, batch 5250, loss[loss=0.187, simple_loss=0.2845, pruned_loss=0.04477, over 8338.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2834, pruned_loss=0.05878, over 1604264.26 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:48,497 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0639, 1.7885, 3.3726, 1.4498, 2.3913, 3.6848, 3.8618, 3.1693], + device='cuda:1'), covar=tensor([0.1222, 0.1772, 0.0335, 0.2316, 0.1108, 0.0255, 0.0563, 0.0563], + device='cuda:1'), in_proj_covar=tensor([0.0298, 0.0321, 0.0286, 0.0314, 0.0314, 0.0273, 0.0429, 0.0301], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 10:33:00,346 INFO [train.py:901] (1/4) Epoch 26, batch 5300, loss[loss=0.2358, simple_loss=0.3185, pruned_loss=0.0765, over 8195.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05816, over 1610061.01 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:33:27,787 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.392e+02 2.913e+02 3.782e+02 6.658e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-07 10:33:36,852 INFO [train.py:901] (1/4) Epoch 26, batch 5350, loss[loss=0.2082, simple_loss=0.298, pruned_loss=0.05918, over 8353.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.0579, over 1611460.88 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:10,290 INFO [train.py:901] (1/4) Epoch 26, batch 5400, loss[loss=0.2093, simple_loss=0.3048, pruned_loss=0.05687, over 8296.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2843, pruned_loss=0.05834, over 1619414.50 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:37,323 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.344e+02 3.061e+02 4.157e+02 9.885e+02, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 10:34:46,158 INFO [train.py:901] (1/4) Epoch 26, batch 5450, loss[loss=0.2118, simple_loss=0.2998, pruned_loss=0.06189, over 8322.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2837, pruned_loss=0.05824, over 1615300.87 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:47,014 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8029, 1.5515, 1.7144, 1.4212, 0.9717, 1.5232, 1.6871, 1.4300], + device='cuda:1'), covar=tensor([0.0580, 0.1207, 0.1663, 0.1489, 0.0606, 0.1448, 0.0726, 0.0673], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 10:34:57,839 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:00,781 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8697, 1.6767, 1.9994, 1.7964, 1.8966, 1.9451, 1.8054, 0.8182], + device='cuda:1'), covar=tensor([0.6068, 0.4824, 0.2176, 0.3790, 0.2771, 0.3399, 0.2041, 0.5598], + device='cuda:1'), in_proj_covar=tensor([0.0959, 0.1009, 0.0827, 0.0984, 0.1019, 0.0921, 0.0765, 0.0844], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:35:06,590 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 10:35:08,192 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6004, 2.9152, 3.2500, 1.8801, 3.4471, 2.3037, 1.6657, 2.4629], + device='cuda:1'), covar=tensor([0.0762, 0.0366, 0.0265, 0.0808, 0.0467, 0.0777, 0.0964, 0.0561], + device='cuda:1'), in_proj_covar=tensor([0.0465, 0.0404, 0.0359, 0.0456, 0.0390, 0.0548, 0.0401, 0.0434], + device='cuda:1'), out_proj_covar=tensor([1.2354e-04, 1.0516e-04, 9.3747e-05, 1.1948e-04, 1.0224e-04, 1.5327e-04, + 1.0743e-04, 1.1392e-04], device='cuda:1') +2023-02-07 10:35:17,344 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:20,581 INFO [train.py:901] (1/4) Epoch 26, batch 5500, loss[loss=0.1966, simple_loss=0.2741, pruned_loss=0.0595, over 7969.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.284, pruned_loss=0.0584, over 1609985.33 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:35:34,396 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207592.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:47,182 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.395e+02 2.975e+02 3.460e+02 7.775e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 10:35:56,713 INFO [train.py:901] (1/4) Epoch 26, batch 5550, loss[loss=0.2029, simple_loss=0.2907, pruned_loss=0.05759, over 8361.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2838, pruned_loss=0.05798, over 1608000.20 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:17,884 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:36:30,422 INFO [train.py:901] (1/4) Epoch 26, batch 5600, loss[loss=0.1685, simple_loss=0.2597, pruned_loss=0.03864, over 8186.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2839, pruned_loss=0.05836, over 1608471.50 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:55,068 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.412e+02 3.079e+02 3.750e+02 8.490e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-07 10:37:04,589 INFO [train.py:901] (1/4) Epoch 26, batch 5650, loss[loss=0.2457, simple_loss=0.3378, pruned_loss=0.07681, over 8202.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2841, pruned_loss=0.05823, over 1610818.18 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:37:12,978 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 10:37:26,295 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.35 vs. limit=5.0 +2023-02-07 10:37:40,859 INFO [train.py:901] (1/4) Epoch 26, batch 5700, loss[loss=0.1683, simple_loss=0.2514, pruned_loss=0.04263, over 7926.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05772, over 1610521.47 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:05,995 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.232e+02 2.912e+02 3.330e+02 6.698e+02, threshold=5.824e+02, percent-clipped=1.0 +2023-02-07 10:38:14,804 INFO [train.py:901] (1/4) Epoch 26, batch 5750, loss[loss=0.2032, simple_loss=0.2863, pruned_loss=0.06002, over 8438.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05789, over 1609117.93 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:16,900 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 10:38:50,531 INFO [train.py:901] (1/4) Epoch 26, batch 5800, loss[loss=0.1958, simple_loss=0.2858, pruned_loss=0.05291, over 8291.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.282, pruned_loss=0.05731, over 1608579.66 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:15,995 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.614e+02 3.148e+02 4.020e+02 8.026e+02, threshold=6.297e+02, percent-clipped=4.0 +2023-02-07 10:39:16,251 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:24,803 INFO [train.py:901] (1/4) Epoch 26, batch 5850, loss[loss=0.2251, simple_loss=0.3126, pruned_loss=0.06883, over 8320.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2822, pruned_loss=0.0576, over 1608026.54 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:32,993 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:59,794 INFO [train.py:901] (1/4) Epoch 26, batch 5900, loss[loss=0.1659, simple_loss=0.2644, pruned_loss=0.03376, over 8282.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2833, pruned_loss=0.05821, over 1611775.29 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:15,813 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7197, 1.4682, 2.8377, 1.4835, 2.2023, 3.0768, 3.2035, 2.6281], + device='cuda:1'), covar=tensor([0.1134, 0.1537, 0.0371, 0.1911, 0.0856, 0.0293, 0.0574, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0300, 0.0324, 0.0290, 0.0316, 0.0317, 0.0275, 0.0435, 0.0305], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 10:40:26,852 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.451e+02 2.961e+02 3.656e+02 5.483e+02, threshold=5.923e+02, percent-clipped=0.0 +2023-02-07 10:40:35,637 INFO [train.py:901] (1/4) Epoch 26, batch 5950, loss[loss=0.1668, simple_loss=0.2437, pruned_loss=0.04495, over 7802.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.05819, over 1614585.22 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:54,933 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:41:09,813 INFO [train.py:901] (1/4) Epoch 26, batch 6000, loss[loss=0.1875, simple_loss=0.2667, pruned_loss=0.0541, over 7805.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.0583, over 1613381.12 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:41:09,813 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 10:41:24,448 INFO [train.py:935] (1/4) Epoch 26, validation: loss=0.1721, simple_loss=0.2717, pruned_loss=0.03627, over 944034.00 frames. +2023-02-07 10:41:24,449 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 10:41:32,221 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2333, 1.0656, 1.3213, 1.0426, 1.0010, 1.3274, 0.0729, 0.9647], + device='cuda:1'), covar=tensor([0.1370, 0.1331, 0.0507, 0.0682, 0.2228, 0.0555, 0.1842, 0.1069], + device='cuda:1'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0222, 0.0276, 0.0144, 0.0170, 0.0197], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 10:41:51,019 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.308e+02 2.837e+02 3.630e+02 6.769e+02, threshold=5.675e+02, percent-clipped=2.0 +2023-02-07 10:42:00,859 INFO [train.py:901] (1/4) Epoch 26, batch 6050, loss[loss=0.186, simple_loss=0.2655, pruned_loss=0.05328, over 7800.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05805, over 1614514.44 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:42:25,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2208, 4.1948, 3.7969, 1.9999, 3.6420, 3.7843, 3.7046, 3.5815], + device='cuda:1'), covar=tensor([0.0658, 0.0528, 0.0954, 0.4114, 0.0867, 0.1008, 0.1260, 0.0789], + device='cuda:1'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0541, 0.0427, 0.0451, 0.0427, 0.0395], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:42:36,036 INFO [train.py:901] (1/4) Epoch 26, batch 6100, loss[loss=0.2172, simple_loss=0.2952, pruned_loss=0.06955, over 8464.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05759, over 1615994.16 frames. ], batch size: 28, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:42:48,603 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 10:43:01,339 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.422e+02 2.947e+02 3.994e+02 1.088e+03, threshold=5.894e+02, percent-clipped=8.0 +2023-02-07 10:43:06,118 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5508, 4.6323, 4.0911, 2.0302, 3.9774, 4.1655, 4.1113, 3.9577], + device='cuda:1'), covar=tensor([0.0717, 0.0505, 0.1109, 0.4645, 0.0897, 0.0967, 0.1194, 0.0813], + device='cuda:1'), in_proj_covar=tensor([0.0532, 0.0450, 0.0438, 0.0545, 0.0429, 0.0454, 0.0429, 0.0397], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:43:10,773 INFO [train.py:901] (1/4) Epoch 26, batch 6150, loss[loss=0.2279, simple_loss=0.3004, pruned_loss=0.07776, over 8100.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.05849, over 1616410.38 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:45,589 INFO [train.py:901] (1/4) Epoch 26, batch 6200, loss[loss=0.2158, simple_loss=0.309, pruned_loss=0.06135, over 7972.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2841, pruned_loss=0.05906, over 1613394.67 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:53,058 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208283.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:44:10,211 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 2.196e+02 2.837e+02 3.308e+02 7.178e+02, threshold=5.674e+02, percent-clipped=2.0 +2023-02-07 10:44:18,995 INFO [train.py:901] (1/4) Epoch 26, batch 6250, loss[loss=0.2239, simple_loss=0.3099, pruned_loss=0.06892, over 8353.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2841, pruned_loss=0.0594, over 1614402.53 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:44:55,647 INFO [train.py:901] (1/4) Epoch 26, batch 6300, loss[loss=0.2211, simple_loss=0.3031, pruned_loss=0.06959, over 8360.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2836, pruned_loss=0.05942, over 1607988.29 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:45:10,476 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:45:20,552 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.443e+02 2.919e+02 3.618e+02 1.192e+03, threshold=5.838e+02, percent-clipped=3.0 +2023-02-07 10:45:29,139 INFO [train.py:901] (1/4) Epoch 26, batch 6350, loss[loss=0.1782, simple_loss=0.2666, pruned_loss=0.04486, over 8470.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2831, pruned_loss=0.05898, over 1607360.98 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:04,931 INFO [train.py:901] (1/4) Epoch 26, batch 6400, loss[loss=0.1915, simple_loss=0.2741, pruned_loss=0.05446, over 8102.00 frames. ], tot_loss[loss=0.199, simple_loss=0.282, pruned_loss=0.05797, over 1605507.71 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:30,394 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1753, 2.0187, 2.5771, 2.1575, 2.5351, 2.3035, 2.1379, 1.4000], + device='cuda:1'), covar=tensor([0.5890, 0.4984, 0.2086, 0.4084, 0.2847, 0.3191, 0.2015, 0.5608], + device='cuda:1'), in_proj_covar=tensor([0.0962, 0.1013, 0.0831, 0.0985, 0.1022, 0.0923, 0.0770, 0.0847], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:46:30,799 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.580e+02 3.188e+02 3.813e+02 6.849e+02, threshold=6.376e+02, percent-clipped=3.0 +2023-02-07 10:46:31,000 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:46:39,660 INFO [train.py:901] (1/4) Epoch 26, batch 6450, loss[loss=0.2018, simple_loss=0.2858, pruned_loss=0.05894, over 7798.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2817, pruned_loss=0.05788, over 1608097.04 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:13,750 INFO [train.py:901] (1/4) Epoch 26, batch 6500, loss[loss=0.1792, simple_loss=0.2731, pruned_loss=0.04261, over 8104.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2821, pruned_loss=0.05782, over 1606461.55 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:29,486 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208594.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:39,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.371e+02 2.869e+02 3.528e+02 8.936e+02, threshold=5.738e+02, percent-clipped=3.0 +2023-02-07 10:47:44,302 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6075, 2.4053, 3.1162, 2.5570, 3.0939, 2.5440, 2.4283, 2.0950], + device='cuda:1'), covar=tensor([0.4949, 0.5044, 0.2114, 0.3856, 0.2424, 0.2944, 0.1842, 0.5121], + device='cuda:1'), in_proj_covar=tensor([0.0968, 0.1019, 0.0835, 0.0991, 0.1025, 0.0928, 0.0773, 0.0852], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:47:48,763 INFO [train.py:901] (1/4) Epoch 26, batch 6550, loss[loss=0.2173, simple_loss=0.294, pruned_loss=0.07029, over 7917.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2829, pruned_loss=0.05824, over 1608015.09 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:52,200 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:56,158 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 10:48:12,882 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:48:22,503 INFO [train.py:901] (1/4) Epoch 26, batch 6600, loss[loss=0.1867, simple_loss=0.279, pruned_loss=0.04718, over 8344.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05847, over 1608527.38 frames. ], batch size: 24, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:48:49,203 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.581e+02 2.930e+02 3.571e+02 6.165e+02, threshold=5.859e+02, percent-clipped=2.0 +2023-02-07 10:48:58,727 INFO [train.py:901] (1/4) Epoch 26, batch 6650, loss[loss=0.1756, simple_loss=0.254, pruned_loss=0.04866, over 7788.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.05848, over 1608080.07 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:12,771 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:28,886 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:29,622 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0849, 1.9444, 2.5046, 2.0997, 2.5845, 2.1675, 1.9448, 1.4302], + device='cuda:1'), covar=tensor([0.6398, 0.5282, 0.2119, 0.4288, 0.2875, 0.3535, 0.2346, 0.5742], + device='cuda:1'), in_proj_covar=tensor([0.0967, 0.1017, 0.0834, 0.0989, 0.1024, 0.0927, 0.0771, 0.0849], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:49:33,544 INFO [train.py:901] (1/4) Epoch 26, batch 6700, loss[loss=0.2676, simple_loss=0.3323, pruned_loss=0.1015, over 8471.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05887, over 1611136.86 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:46,224 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=208790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:59,643 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.530e+02 3.053e+02 4.076e+02 9.744e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-07 10:50:03,310 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8437, 1.8888, 1.7553, 2.3139, 1.0206, 1.5797, 1.7372, 1.8598], + device='cuda:1'), covar=tensor([0.0729, 0.0715, 0.0897, 0.0389, 0.1113, 0.1218, 0.0720, 0.0804], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0212, 0.0203, 0.0246, 0.0250, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 10:50:09,371 INFO [train.py:901] (1/4) Epoch 26, batch 6750, loss[loss=0.1871, simple_loss=0.2608, pruned_loss=0.05667, over 7447.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05842, over 1610375.52 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:30,943 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 10:50:43,653 INFO [train.py:901] (1/4) Epoch 26, batch 6800, loss[loss=0.1868, simple_loss=0.2601, pruned_loss=0.05677, over 7539.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2842, pruned_loss=0.05885, over 1612829.60 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:57,054 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 10:51:08,807 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.376e+02 2.847e+02 3.449e+02 1.016e+03, threshold=5.694e+02, percent-clipped=2.0 +2023-02-07 10:51:18,814 INFO [train.py:901] (1/4) Epoch 26, batch 6850, loss[loss=0.2016, simple_loss=0.2923, pruned_loss=0.05546, over 8465.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2853, pruned_loss=0.05926, over 1619025.58 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:51:19,504 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 10:51:30,533 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:51:33,384 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9110, 1.9233, 1.7128, 2.5562, 1.1556, 1.5706, 1.9141, 1.9919], + device='cuda:1'), covar=tensor([0.0730, 0.0870, 0.0916, 0.0396, 0.1124, 0.1293, 0.0848, 0.0819], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0212, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 10:51:37,663 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3975, 2.1801, 2.6987, 2.2583, 2.7936, 2.4549, 2.2919, 1.6994], + device='cuda:1'), covar=tensor([0.5631, 0.5141, 0.2238, 0.4033, 0.2642, 0.3071, 0.1877, 0.5387], + device='cuda:1'), in_proj_covar=tensor([0.0961, 0.1014, 0.0830, 0.0986, 0.1020, 0.0924, 0.0769, 0.0847], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:51:42,402 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5376, 2.3314, 3.1221, 2.4592, 3.0285, 2.5789, 2.4390, 1.8379], + device='cuda:1'), covar=tensor([0.5550, 0.5400, 0.2074, 0.4045, 0.2640, 0.3141, 0.2065, 0.5805], + device='cuda:1'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1019, 0.0924, 0.0769, 0.0846], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:51:45,310 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 10:51:54,559 INFO [train.py:901] (1/4) Epoch 26, batch 6900, loss[loss=0.1849, simple_loss=0.2492, pruned_loss=0.06036, over 7711.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05907, over 1615232.22 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:10,630 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0527, 3.4645, 2.0649, 2.7825, 2.5807, 2.0380, 2.5756, 2.8955], + device='cuda:1'), covar=tensor([0.1724, 0.0399, 0.1314, 0.0818, 0.0846, 0.1440, 0.1163, 0.1127], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0239, 0.0339, 0.0310, 0.0302, 0.0345, 0.0347, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 10:52:12,020 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:12,736 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9753, 1.7718, 2.0643, 1.8473, 2.0352, 2.0129, 1.8743, 0.8468], + device='cuda:1'), covar=tensor([0.6423, 0.4972, 0.2283, 0.4033, 0.2714, 0.3428, 0.2224, 0.5785], + device='cuda:1'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1018, 0.0923, 0.0769, 0.0846], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:52:20,225 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.430e+02 2.933e+02 3.890e+02 9.541e+02, threshold=5.866e+02, percent-clipped=7.0 +2023-02-07 10:52:23,035 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 10:52:23,719 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4530, 4.4044, 3.9985, 2.5645, 3.8605, 4.0393, 4.0389, 3.8535], + device='cuda:1'), covar=tensor([0.0641, 0.0578, 0.1051, 0.3713, 0.0895, 0.1075, 0.1138, 0.0873], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0456, 0.0442, 0.0552, 0.0436, 0.0460, 0.0436, 0.0402], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:52:28,982 INFO [train.py:901] (1/4) Epoch 26, batch 6950, loss[loss=0.1895, simple_loss=0.2589, pruned_loss=0.06006, over 7455.00 frames. ], tot_loss[loss=0.201, simple_loss=0.284, pruned_loss=0.05897, over 1615573.84 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:29,848 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,124 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,873 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8970, 1.7897, 2.8074, 2.2059, 2.6456, 1.9454, 1.7262, 1.4495], + device='cuda:1'), covar=tensor([0.7524, 0.6713, 0.2244, 0.4677, 0.3197, 0.4711, 0.3053, 0.6209], + device='cuda:1'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1017, 0.0922, 0.0770, 0.0847], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 10:53:04,217 INFO [train.py:901] (1/4) Epoch 26, batch 7000, loss[loss=0.2117, simple_loss=0.296, pruned_loss=0.06371, over 8512.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05883, over 1616263.50 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:30,442 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.543e+02 3.075e+02 4.223e+02 1.225e+03, threshold=6.150e+02, percent-clipped=4.0 +2023-02-07 10:53:38,407 INFO [train.py:901] (1/4) Epoch 26, batch 7050, loss[loss=0.203, simple_loss=0.266, pruned_loss=0.07005, over 7797.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2839, pruned_loss=0.05917, over 1610734.18 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:44,279 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7329, 2.0906, 3.1409, 1.6360, 2.5497, 2.1796, 1.7710, 2.5156], + device='cuda:1'), covar=tensor([0.1861, 0.2679, 0.0950, 0.4457, 0.1816, 0.3141, 0.2405, 0.2289], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0633, 0.0563, 0.0668, 0.0660, 0.0610, 0.0561, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:54:15,165 INFO [train.py:901] (1/4) Epoch 26, batch 7100, loss[loss=0.1685, simple_loss=0.2443, pruned_loss=0.04634, over 7553.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2828, pruned_loss=0.05878, over 1609311.35 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:42,202 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.538e+02 3.057e+02 3.964e+02 1.199e+03, threshold=6.114e+02, percent-clipped=9.0 +2023-02-07 10:54:49,195 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209220.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:54:50,427 INFO [train.py:901] (1/4) Epoch 26, batch 7150, loss[loss=0.2141, simple_loss=0.3027, pruned_loss=0.0628, over 8037.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2832, pruned_loss=0.0593, over 1607083.87 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:25,278 INFO [train.py:901] (1/4) Epoch 26, batch 7200, loss[loss=0.2048, simple_loss=0.2869, pruned_loss=0.06139, over 8517.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2833, pruned_loss=0.05893, over 1613096.68 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:26,110 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:51,833 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:52,284 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.329e+02 2.733e+02 3.562e+02 6.414e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 10:55:56,509 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209316.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:00,335 INFO [train.py:901] (1/4) Epoch 26, batch 7250, loss[loss=0.1632, simple_loss=0.2426, pruned_loss=0.04192, over 7223.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2832, pruned_loss=0.0588, over 1612984.47 frames. ], batch size: 16, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:08,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:33,562 INFO [train.py:901] (1/4) Epoch 26, batch 7300, loss[loss=0.1827, simple_loss=0.268, pruned_loss=0.04868, over 8325.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2835, pruned_loss=0.05863, over 1616375.34 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:56,000 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2586, 4.2311, 3.8897, 1.9756, 3.8182, 3.8104, 3.7845, 3.6900], + device='cuda:1'), covar=tensor([0.0714, 0.0525, 0.1027, 0.4299, 0.0824, 0.1010, 0.1233, 0.0913], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0458, 0.0445, 0.0556, 0.0439, 0.0464, 0.0439, 0.0404], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 10:56:59,846 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.463e+02 3.055e+02 3.934e+02 7.151e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 10:57:06,738 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 10:57:09,437 INFO [train.py:901] (1/4) Epoch 26, batch 7350, loss[loss=0.2221, simple_loss=0.2958, pruned_loss=0.07418, over 8083.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05929, over 1612567.49 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:26,304 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 10:57:42,849 INFO [train.py:901] (1/4) Epoch 26, batch 7400, loss[loss=0.2055, simple_loss=0.2873, pruned_loss=0.06182, over 8350.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.06003, over 1613550.14 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:50,999 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:04,813 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 10:58:09,479 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.534e+02 3.042e+02 3.812e+02 9.347e+02, threshold=6.084e+02, percent-clipped=5.0 +2023-02-07 10:58:17,131 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8496, 2.2568, 3.7000, 1.8778, 1.7562, 3.5287, 0.6879, 2.1392], + device='cuda:1'), covar=tensor([0.1481, 0.1434, 0.0317, 0.1823, 0.2645, 0.0410, 0.2226, 0.1555], + device='cuda:1'), in_proj_covar=tensor([0.0197, 0.0205, 0.0133, 0.0224, 0.0276, 0.0144, 0.0172, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 10:58:17,541 INFO [train.py:901] (1/4) Epoch 26, batch 7450, loss[loss=0.185, simple_loss=0.2662, pruned_loss=0.05194, over 7419.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2854, pruned_loss=0.0599, over 1606527.88 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:58:22,478 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:47,408 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209564.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:52,736 INFO [train.py:901] (1/4) Epoch 26, batch 7500, loss[loss=0.1789, simple_loss=0.2498, pruned_loss=0.05396, over 7529.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.0596, over 1605715.64 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:59:18,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.436e+02 2.983e+02 3.503e+02 8.056e+02, threshold=5.967e+02, percent-clipped=5.0 +2023-02-07 10:59:24,168 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:27,346 INFO [train.py:901] (1/4) Epoch 26, batch 7550, loss[loss=0.1783, simple_loss=0.269, pruned_loss=0.04378, over 8293.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05925, over 1608262.44 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 10:59:28,767 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:54,106 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:55,495 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2403, 3.1803, 2.9535, 1.6273, 2.8603, 2.9392, 2.8567, 2.8045], + device='cuda:1'), covar=tensor([0.1139, 0.0830, 0.1347, 0.4468, 0.1067, 0.1423, 0.1495, 0.1149], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0456, 0.0443, 0.0554, 0.0436, 0.0462, 0.0436, 0.0403], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:00:01,950 INFO [train.py:901] (1/4) Epoch 26, batch 7600, loss[loss=0.1891, simple_loss=0.2757, pruned_loss=0.05119, over 8322.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2828, pruned_loss=0.05909, over 1601499.20 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:07,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:25,098 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209706.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:27,679 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.404e+02 2.880e+02 3.478e+02 6.437e+02, threshold=5.761e+02, percent-clipped=3.0 +2023-02-07 11:00:35,718 INFO [train.py:901] (1/4) Epoch 26, batch 7650, loss[loss=0.2215, simple_loss=0.3012, pruned_loss=0.07088, over 8684.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2819, pruned_loss=0.05806, over 1607632.55 frames. ], batch size: 34, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:43,081 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:00,268 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-07 11:01:10,731 INFO [train.py:901] (1/4) Epoch 26, batch 7700, loss[loss=0.1898, simple_loss=0.2819, pruned_loss=0.04889, over 8195.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2822, pruned_loss=0.05833, over 1606301.39 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:12,901 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:14,130 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 11:01:23,009 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 11:01:36,858 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.688e+02 3.083e+02 3.850e+02 9.382e+02, threshold=6.167e+02, percent-clipped=8.0 +2023-02-07 11:01:39,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6000, 2.0644, 3.2880, 1.4981, 2.4906, 2.0346, 1.6859, 2.4718], + device='cuda:1'), covar=tensor([0.2008, 0.2720, 0.0909, 0.4843, 0.1969, 0.3493, 0.2601, 0.2435], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0631, 0.0560, 0.0666, 0.0659, 0.0610, 0.0559, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:01:44,902 INFO [train.py:901] (1/4) Epoch 26, batch 7750, loss[loss=0.209, simple_loss=0.298, pruned_loss=0.05999, over 8328.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2828, pruned_loss=0.05863, over 1608964.27 frames. ], batch size: 26, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:48,919 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:19,759 INFO [train.py:901] (1/4) Epoch 26, batch 7800, loss[loss=0.1945, simple_loss=0.2881, pruned_loss=0.05048, over 8461.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2829, pruned_loss=0.05883, over 1606537.07 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:02:19,817 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:44,108 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2811, 1.3473, 3.4028, 1.1706, 3.0096, 2.9091, 3.1221, 3.0477], + device='cuda:1'), covar=tensor([0.0897, 0.4483, 0.0924, 0.4144, 0.1452, 0.1151, 0.0830, 0.0919], + device='cuda:1'), in_proj_covar=tensor([0.0672, 0.0666, 0.0734, 0.0658, 0.0742, 0.0633, 0.0633, 0.0709], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:02:45,323 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.465e+02 2.962e+02 3.430e+02 5.705e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-07 11:02:53,239 INFO [train.py:901] (1/4) Epoch 26, batch 7850, loss[loss=0.1639, simple_loss=0.2476, pruned_loss=0.0401, over 8092.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2842, pruned_loss=0.05921, over 1608413.26 frames. ], batch size: 21, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:01,889 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:07,204 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:18,181 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:23,279 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209968.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:25,900 INFO [train.py:901] (1/4) Epoch 26, batch 7900, loss[loss=0.1907, simple_loss=0.2666, pruned_loss=0.05747, over 7654.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2841, pruned_loss=0.05912, over 1613093.69 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:35,883 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:36,568 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:51,904 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.305e+02 2.795e+02 3.387e+02 5.942e+02, threshold=5.591e+02, percent-clipped=1.0 +2023-02-07 11:03:54,075 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210013.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:59,749 INFO [train.py:901] (1/4) Epoch 26, batch 7950, loss[loss=0.2109, simple_loss=0.2933, pruned_loss=0.06425, over 8257.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2852, pruned_loss=0.0599, over 1612811.64 frames. ], batch size: 24, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:06,009 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210031.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:18,572 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:22,765 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:33,086 INFO [train.py:901] (1/4) Epoch 26, batch 8000, loss[loss=0.1752, simple_loss=0.2586, pruned_loss=0.04586, over 7805.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2836, pruned_loss=0.05929, over 1605458.49 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:35,217 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:40,668 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210083.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:58,002 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.316e+02 2.819e+02 3.710e+02 9.270e+02, threshold=5.638e+02, percent-clipped=7.0 +2023-02-07 11:05:05,834 INFO [train.py:901] (1/4) Epoch 26, batch 8050, loss[loss=0.2088, simple_loss=0.2826, pruned_loss=0.0675, over 7921.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2804, pruned_loss=0.0584, over 1585281.69 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:05:38,157 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 11:05:43,121 INFO [train.py:901] (1/4) Epoch 27, batch 0, loss[loss=0.1783, simple_loss=0.2504, pruned_loss=0.05316, over 7252.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2504, pruned_loss=0.05316, over 7252.00 frames. ], batch size: 16, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:05:43,121 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 11:05:54,193 INFO [train.py:935] (1/4) Epoch 27, validation: loss=0.172, simple_loss=0.2713, pruned_loss=0.03628, over 944034.00 frames. +2023-02-07 11:05:54,195 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 11:06:01,165 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:08,368 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 11:06:24,777 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:28,647 INFO [train.py:901] (1/4) Epoch 27, batch 50, loss[loss=0.1946, simple_loss=0.265, pruned_loss=0.06205, over 7539.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2828, pruned_loss=0.05972, over 361780.31 frames. ], batch size: 18, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:06:33,573 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.417e+02 2.930e+02 3.516e+02 7.088e+02, threshold=5.860e+02, percent-clipped=5.0 +2023-02-07 11:06:41,899 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 11:06:43,309 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:56,868 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:04,542 INFO [train.py:901] (1/4) Epoch 27, batch 100, loss[loss=0.2043, simple_loss=0.2838, pruned_loss=0.06239, over 7639.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2826, pruned_loss=0.05891, over 637334.43 frames. ], batch size: 19, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:05,184 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 11:07:13,381 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210268.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:37,991 INFO [train.py:901] (1/4) Epoch 27, batch 150, loss[loss=0.1725, simple_loss=0.2415, pruned_loss=0.05173, over 4945.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2844, pruned_loss=0.05922, over 855270.38 frames. ], batch size: 11, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:41,158 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.352e+02 2.905e+02 3.661e+02 1.089e+03, threshold=5.811e+02, percent-clipped=3.0 +2023-02-07 11:07:41,641 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 11:08:02,857 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:14,086 INFO [train.py:901] (1/4) Epoch 27, batch 200, loss[loss=0.2122, simple_loss=0.2921, pruned_loss=0.06613, over 8605.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.059, over 1024424.00 frames. ], batch size: 34, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:08:20,492 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:48,349 INFO [train.py:901] (1/4) Epoch 27, batch 250, loss[loss=0.1786, simple_loss=0.2668, pruned_loss=0.04518, over 8254.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2838, pruned_loss=0.05854, over 1157542.17 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:08:49,370 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 11:08:51,589 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.304e+02 2.819e+02 3.559e+02 6.263e+02, threshold=5.638e+02, percent-clipped=1.0 +2023-02-07 11:08:57,573 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 11:08:57,625 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:59,061 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210421.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:06,476 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 11:09:10,762 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1579, 2.0223, 2.6738, 2.2558, 2.7452, 2.2545, 2.1528, 1.5413], + device='cuda:1'), covar=tensor([0.6253, 0.5226, 0.2212, 0.4176, 0.2731, 0.3337, 0.2040, 0.5844], + device='cuda:1'), in_proj_covar=tensor([0.0965, 0.1017, 0.0828, 0.0985, 0.1021, 0.0926, 0.0772, 0.0847], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 11:09:15,930 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:23,015 INFO [train.py:901] (1/4) Epoch 27, batch 300, loss[loss=0.1779, simple_loss=0.2614, pruned_loss=0.04715, over 7917.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.285, pruned_loss=0.05879, over 1265464.35 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:09:57,348 INFO [train.py:901] (1/4) Epoch 27, batch 350, loss[loss=0.1838, simple_loss=0.2793, pruned_loss=0.04409, over 8080.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2832, pruned_loss=0.05729, over 1340474.10 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:10:00,691 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.321e+02 2.740e+02 3.479e+02 7.751e+02, threshold=5.481e+02, percent-clipped=4.0 +2023-02-07 11:10:06,929 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4301, 1.7904, 1.3686, 3.0587, 1.4193, 1.4694, 2.2034, 2.0165], + device='cuda:1'), covar=tensor([0.1680, 0.1380, 0.2011, 0.0326, 0.1335, 0.1839, 0.0929, 0.1044], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0194, 0.0246, 0.0211, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 11:10:16,900 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:10:29,881 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 11:10:30,766 INFO [train.py:901] (1/4) Epoch 27, batch 400, loss[loss=0.1986, simple_loss=0.2914, pruned_loss=0.05286, over 8501.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2832, pruned_loss=0.05745, over 1404708.88 frames. ], batch size: 29, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:06,891 INFO [train.py:901] (1/4) Epoch 27, batch 450, loss[loss=0.2037, simple_loss=0.2937, pruned_loss=0.05689, over 8467.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.283, pruned_loss=0.05775, over 1453086.73 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:10,233 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.445e+02 3.096e+02 3.744e+02 6.670e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 11:11:28,567 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4493, 4.3997, 3.9842, 2.3056, 3.8543, 4.0768, 3.9574, 3.9322], + device='cuda:1'), covar=tensor([0.0681, 0.0558, 0.0963, 0.4098, 0.0819, 0.1014, 0.1226, 0.0819], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0458, 0.0442, 0.0555, 0.0437, 0.0460, 0.0436, 0.0405], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:11:37,202 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:11:40,169 INFO [train.py:901] (1/4) Epoch 27, batch 500, loss[loss=0.2368, simple_loss=0.3186, pruned_loss=0.07749, over 8567.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.05845, over 1492111.16 frames. ], batch size: 31, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:55,130 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4628, 2.7272, 3.0002, 1.8524, 3.1594, 2.0270, 1.6633, 2.4617], + device='cuda:1'), covar=tensor([0.0819, 0.0458, 0.0405, 0.0851, 0.0584, 0.0943, 0.0997, 0.0559], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0406, 0.0360, 0.0458, 0.0392, 0.0548, 0.0402, 0.0436], + device='cuda:1'), out_proj_covar=tensor([1.2454e-04, 1.0560e-04, 9.4168e-05, 1.2007e-04, 1.0262e-04, 1.5318e-04, + 1.0753e-04, 1.1439e-04], device='cuda:1') +2023-02-07 11:12:01,202 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:12,486 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 11:12:12,810 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:15,969 INFO [train.py:901] (1/4) Epoch 27, batch 550, loss[loss=0.2183, simple_loss=0.2939, pruned_loss=0.07139, over 8131.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05822, over 1521009.90 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:19,365 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.336e+02 2.792e+02 3.793e+02 8.487e+02, threshold=5.584e+02, percent-clipped=3.0 +2023-02-07 11:12:50,315 INFO [train.py:901] (1/4) Epoch 27, batch 600, loss[loss=0.2284, simple_loss=0.3074, pruned_loss=0.0747, over 8467.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2833, pruned_loss=0.05828, over 1543230.11 frames. ], batch size: 29, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:08,465 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:11,587 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 11:13:13,656 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:23,584 INFO [train.py:901] (1/4) Epoch 27, batch 650, loss[loss=0.2271, simple_loss=0.3086, pruned_loss=0.07285, over 8582.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05808, over 1559936.53 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:28,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.351e+02 2.894e+02 3.474e+02 6.032e+02, threshold=5.788e+02, percent-clipped=3.0 +2023-02-07 11:13:32,521 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:59,775 INFO [train.py:901] (1/4) Epoch 27, batch 700, loss[loss=0.1541, simple_loss=0.2412, pruned_loss=0.03347, over 7701.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2821, pruned_loss=0.05749, over 1571727.84 frames. ], batch size: 18, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:28,999 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9118, 3.3060, 1.8508, 2.7104, 2.5257, 1.6123, 2.3609, 2.9757], + device='cuda:1'), covar=tensor([0.1857, 0.0552, 0.1612, 0.0795, 0.1048, 0.2157, 0.1460, 0.1025], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0239, 0.0338, 0.0309, 0.0301, 0.0343, 0.0345, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 11:14:32,720 INFO [train.py:901] (1/4) Epoch 27, batch 750, loss[loss=0.1846, simple_loss=0.2597, pruned_loss=0.05472, over 7202.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2824, pruned_loss=0.05824, over 1581394.37 frames. ], batch size: 16, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:35,973 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.536e+02 2.996e+02 3.960e+02 1.304e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 11:14:54,973 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 11:15:04,237 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 11:15:08,850 INFO [train.py:901] (1/4) Epoch 27, batch 800, loss[loss=0.1757, simple_loss=0.2647, pruned_loss=0.04337, over 8244.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2827, pruned_loss=0.05798, over 1592450.72 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:15:11,731 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-07 11:15:34,998 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:38,617 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 11:15:40,388 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:42,228 INFO [train.py:901] (1/4) Epoch 27, batch 850, loss[loss=0.1916, simple_loss=0.286, pruned_loss=0.0486, over 7802.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05766, over 1600529.76 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:15:45,636 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.265e+02 2.725e+02 3.482e+02 8.151e+02, threshold=5.450e+02, percent-clipped=2.0 +2023-02-07 11:15:46,884 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 11:15:57,715 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:10,423 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:17,676 INFO [train.py:901] (1/4) Epoch 27, batch 900, loss[loss=0.2123, simple_loss=0.2959, pruned_loss=0.06434, over 8247.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2846, pruned_loss=0.05884, over 1602070.04 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:51,933 INFO [train.py:901] (1/4) Epoch 27, batch 950, loss[loss=0.1886, simple_loss=0.2744, pruned_loss=0.05141, over 8512.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2835, pruned_loss=0.05804, over 1606086.27 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:54,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:55,251 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.483e+02 2.981e+02 4.008e+02 9.530e+02, threshold=5.961e+02, percent-clipped=10.0 +2023-02-07 11:17:06,151 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:11,860 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 11:17:17,580 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211143.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:18,070 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 11:17:26,828 INFO [train.py:901] (1/4) Epoch 27, batch 1000, loss[loss=0.2429, simple_loss=0.3287, pruned_loss=0.07855, over 8739.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2829, pruned_loss=0.05771, over 1606335.07 frames. ], batch size: 30, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:17:30,481 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:51,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9938, 1.6110, 1.3310, 1.6091, 1.2824, 1.1556, 1.3236, 1.3020], + device='cuda:1'), covar=tensor([0.1259, 0.0543, 0.1478, 0.0572, 0.0975, 0.1773, 0.0975, 0.0823], + device='cuda:1'), in_proj_covar=tensor([0.0360, 0.0239, 0.0340, 0.0311, 0.0304, 0.0344, 0.0346, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 11:17:53,343 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 11:18:03,293 INFO [train.py:901] (1/4) Epoch 27, batch 1050, loss[loss=0.2108, simple_loss=0.274, pruned_loss=0.07377, over 7724.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2822, pruned_loss=0.05745, over 1606600.26 frames. ], batch size: 18, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:18:05,225 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 11:18:07,260 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.494e+02 3.070e+02 3.818e+02 8.233e+02, threshold=6.140e+02, percent-clipped=4.0 +2023-02-07 11:18:08,732 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3475, 2.0777, 2.5963, 2.2028, 2.5621, 2.3617, 2.2343, 1.5791], + device='cuda:1'), covar=tensor([0.5580, 0.4947, 0.2272, 0.4269, 0.2876, 0.3346, 0.1920, 0.5694], + device='cuda:1'), in_proj_covar=tensor([0.0964, 0.1018, 0.0831, 0.0988, 0.1021, 0.0928, 0.0771, 0.0850], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 11:18:27,160 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:18:36,463 INFO [train.py:901] (1/4) Epoch 27, batch 1100, loss[loss=0.211, simple_loss=0.2848, pruned_loss=0.06857, over 7795.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05769, over 1607283.16 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:13,344 INFO [train.py:901] (1/4) Epoch 27, batch 1150, loss[loss=0.1662, simple_loss=0.251, pruned_loss=0.0407, over 7782.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05774, over 1609802.37 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:15,965 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 11:19:17,147 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 11:19:17,280 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.371e+02 2.782e+02 3.549e+02 6.262e+02, threshold=5.564e+02, percent-clipped=1.0 +2023-02-07 11:19:40,860 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211346.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:19:46,762 INFO [train.py:901] (1/4) Epoch 27, batch 1200, loss[loss=0.21, simple_loss=0.3023, pruned_loss=0.0589, over 8440.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2836, pruned_loss=0.05872, over 1609957.26 frames. ], batch size: 27, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:53,750 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:08,485 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0970, 2.2709, 1.8280, 2.8380, 1.3948, 1.6356, 2.1063, 2.1846], + device='cuda:1'), covar=tensor([0.0681, 0.0777, 0.0831, 0.0338, 0.1085, 0.1281, 0.0800, 0.0779], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0211, 0.0202, 0.0244, 0.0249, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 11:20:11,156 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:18,556 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211399.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:21,056 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6722, 4.7330, 4.2158, 2.1126, 4.1231, 4.3632, 4.3068, 4.1413], + device='cuda:1'), covar=tensor([0.0725, 0.0495, 0.1084, 0.4654, 0.0885, 0.0956, 0.1188, 0.0734], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0459, 0.0446, 0.0557, 0.0440, 0.0463, 0.0437, 0.0407], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:20:21,765 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:22,040 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 11:20:22,323 INFO [train.py:901] (1/4) Epoch 27, batch 1250, loss[loss=0.1963, simple_loss=0.2914, pruned_loss=0.05054, over 8191.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.0585, over 1613738.26 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:26,159 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.344e+02 2.922e+02 3.484e+02 6.390e+02, threshold=5.843e+02, percent-clipped=2.0 +2023-02-07 11:20:29,708 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:35,518 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211424.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:39,475 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:46,223 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211440.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:49,955 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-07 11:20:56,233 INFO [train.py:901] (1/4) Epoch 27, batch 1300, loss[loss=0.1446, simple_loss=0.2241, pruned_loss=0.03257, over 7435.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.05798, over 1609807.84 frames. ], batch size: 17, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:59,839 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6022, 2.1120, 3.2898, 1.4775, 2.3251, 2.0223, 1.7564, 2.5060], + device='cuda:1'), covar=tensor([0.1871, 0.2453, 0.0973, 0.4679, 0.2032, 0.3248, 0.2311, 0.2268], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0632, 0.0564, 0.0669, 0.0659, 0.0610, 0.0560, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:21:00,455 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:23,172 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9700, 1.6807, 3.3117, 1.4349, 2.3113, 3.6074, 3.7316, 3.0919], + device='cuda:1'), covar=tensor([0.1235, 0.1704, 0.0307, 0.2191, 0.0946, 0.0246, 0.0537, 0.0522], + device='cuda:1'), in_proj_covar=tensor([0.0307, 0.0327, 0.0293, 0.0321, 0.0320, 0.0278, 0.0437, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 11:21:24,574 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:30,516 INFO [train.py:901] (1/4) Epoch 27, batch 1350, loss[loss=0.1742, simple_loss=0.2522, pruned_loss=0.04809, over 7660.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05687, over 1610131.64 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:21:34,485 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.433e+02 2.859e+02 3.519e+02 6.900e+02, threshold=5.717e+02, percent-clipped=5.0 +2023-02-07 11:21:43,599 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:22:05,946 INFO [train.py:901] (1/4) Epoch 27, batch 1400, loss[loss=0.2124, simple_loss=0.285, pruned_loss=0.06994, over 8233.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05698, over 1608594.06 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:39,557 INFO [train.py:901] (1/4) Epoch 27, batch 1450, loss[loss=0.1933, simple_loss=0.2648, pruned_loss=0.0609, over 7212.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05704, over 1611086.15 frames. ], batch size: 16, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:43,650 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.739e+02 3.417e+02 5.363e+02 1.739e+03, threshold=6.835e+02, percent-clipped=22.0 +2023-02-07 11:22:45,702 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 11:23:15,882 INFO [train.py:901] (1/4) Epoch 27, batch 1500, loss[loss=0.1803, simple_loss=0.255, pruned_loss=0.05275, over 7437.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05705, over 1605362.17 frames. ], batch size: 17, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:42,394 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6053, 1.9186, 5.8018, 2.5943, 4.9093, 4.8054, 5.3844, 5.3670], + device='cuda:1'), covar=tensor([0.1265, 0.6937, 0.0712, 0.4535, 0.1961, 0.1562, 0.1118, 0.0817], + device='cuda:1'), in_proj_covar=tensor([0.0673, 0.0667, 0.0734, 0.0656, 0.0744, 0.0633, 0.0633, 0.0713], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:23:49,659 INFO [train.py:901] (1/4) Epoch 27, batch 1550, loss[loss=0.1848, simple_loss=0.2547, pruned_loss=0.05744, over 7436.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2808, pruned_loss=0.05712, over 1602697.05 frames. ], batch size: 17, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:53,681 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.372e+02 3.027e+02 3.476e+02 5.786e+02, threshold=6.054e+02, percent-clipped=0.0 +2023-02-07 11:23:57,834 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:15,283 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:19,434 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:24,775 INFO [train.py:901] (1/4) Epoch 27, batch 1600, loss[loss=0.2445, simple_loss=0.3181, pruned_loss=0.08546, over 8529.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2807, pruned_loss=0.05709, over 1606141.23 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:24:38,876 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:59,721 INFO [train.py:901] (1/4) Epoch 27, batch 1650, loss[loss=0.192, simple_loss=0.2864, pruned_loss=0.04874, over 8485.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2804, pruned_loss=0.05686, over 1610471.38 frames. ], batch size: 29, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:03,803 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.547e+02 3.089e+02 3.889e+02 1.356e+03, threshold=6.177e+02, percent-clipped=3.0 +2023-02-07 11:25:15,707 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4419, 1.8601, 2.8948, 1.3239, 2.1210, 1.8023, 1.5669, 2.1581], + device='cuda:1'), covar=tensor([0.2425, 0.3040, 0.1133, 0.5477, 0.2365, 0.4013, 0.2976, 0.2870], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0634, 0.0566, 0.0670, 0.0659, 0.0610, 0.0562, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:25:18,306 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7661, 5.9538, 5.1944, 2.8262, 5.1873, 5.6884, 5.3298, 5.4350], + device='cuda:1'), covar=tensor([0.0516, 0.0404, 0.0916, 0.3902, 0.0780, 0.0631, 0.1095, 0.0456], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0459, 0.0445, 0.0557, 0.0441, 0.0463, 0.0437, 0.0406], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:25:34,300 INFO [train.py:901] (1/4) Epoch 27, batch 1700, loss[loss=0.2014, simple_loss=0.289, pruned_loss=0.05692, over 8552.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.05566, over 1613032.93 frames. ], batch size: 31, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:39,905 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:25:57,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9663, 1.6612, 3.4385, 1.4406, 2.2727, 3.8426, 3.9705, 3.3060], + device='cuda:1'), covar=tensor([0.1252, 0.1790, 0.0337, 0.2312, 0.1215, 0.0248, 0.0601, 0.0525], + device='cuda:1'), in_proj_covar=tensor([0.0307, 0.0329, 0.0295, 0.0322, 0.0323, 0.0279, 0.0440, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 11:25:59,114 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211889.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:26:09,570 INFO [train.py:901] (1/4) Epoch 27, batch 1750, loss[loss=0.197, simple_loss=0.2783, pruned_loss=0.05783, over 7700.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2803, pruned_loss=0.05577, over 1613738.80 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:26:13,486 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 2.460e+02 2.972e+02 3.773e+02 5.726e+02, threshold=5.944e+02, percent-clipped=0.0 +2023-02-07 11:26:24,664 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-07 11:26:43,427 INFO [train.py:901] (1/4) Epoch 27, batch 1800, loss[loss=0.1749, simple_loss=0.2598, pruned_loss=0.045, over 7824.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2794, pruned_loss=0.05551, over 1609290.15 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:03,805 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 11:27:20,494 INFO [train.py:901] (1/4) Epoch 27, batch 1850, loss[loss=0.159, simple_loss=0.2283, pruned_loss=0.04479, over 7704.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2793, pruned_loss=0.05557, over 1608875.87 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:24,580 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.252e+02 2.767e+02 3.484e+02 5.487e+02, threshold=5.534e+02, percent-clipped=0.0 +2023-02-07 11:27:54,169 INFO [train.py:901] (1/4) Epoch 27, batch 1900, loss[loss=0.1604, simple_loss=0.243, pruned_loss=0.03889, over 8286.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2792, pruned_loss=0.05576, over 1609571.49 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:23,230 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.6699, 5.7336, 5.0472, 2.7792, 5.0263, 5.4630, 5.1964, 5.2863], + device='cuda:1'), covar=tensor([0.0511, 0.0418, 0.0951, 0.3970, 0.0778, 0.0891, 0.1104, 0.0517], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0456, 0.0442, 0.0552, 0.0438, 0.0460, 0.0437, 0.0402], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:28:25,184 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 11:28:28,384 INFO [train.py:901] (1/4) Epoch 27, batch 1950, loss[loss=0.1833, simple_loss=0.2618, pruned_loss=0.05239, over 8228.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2795, pruned_loss=0.05562, over 1610920.55 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:33,122 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.484e+02 3.059e+02 3.727e+02 7.478e+02, threshold=6.119e+02, percent-clipped=3.0 +2023-02-07 11:28:39,154 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 11:28:39,365 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:41,242 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212122.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:28:56,768 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212144.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:57,280 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 11:28:57,489 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:03,971 INFO [train.py:901] (1/4) Epoch 27, batch 2000, loss[loss=0.201, simple_loss=0.2944, pruned_loss=0.05379, over 8197.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2815, pruned_loss=0.05644, over 1619595.25 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:14,165 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212170.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:37,633 INFO [train.py:901] (1/4) Epoch 27, batch 2050, loss[loss=0.224, simple_loss=0.3063, pruned_loss=0.07089, over 8648.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2817, pruned_loss=0.05674, over 1621818.29 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:41,738 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.364e+02 2.966e+02 3.655e+02 9.314e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 11:29:53,462 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5801, 1.3535, 1.5483, 1.3314, 0.9748, 1.3994, 1.3997, 1.2434], + device='cuda:1'), covar=tensor([0.0617, 0.1310, 0.1761, 0.1540, 0.0612, 0.1510, 0.0778, 0.0730], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0163, 0.0102, 0.0164, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 11:30:13,815 INFO [train.py:901] (1/4) Epoch 27, batch 2100, loss[loss=0.1629, simple_loss=0.2435, pruned_loss=0.04112, over 7235.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05686, over 1622268.70 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:24,389 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2786, 3.1172, 2.9793, 1.4808, 2.8868, 2.9338, 2.8660, 2.8572], + device='cuda:1'), covar=tensor([0.1067, 0.0825, 0.1258, 0.4345, 0.1011, 0.1300, 0.1544, 0.1092], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0456, 0.0442, 0.0554, 0.0438, 0.0459, 0.0437, 0.0404], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:30:25,126 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:30:42,129 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 11:30:47,851 INFO [train.py:901] (1/4) Epoch 27, batch 2150, loss[loss=0.1783, simple_loss=0.2584, pruned_loss=0.04916, over 8240.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05649, over 1620476.24 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:48,019 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0268, 1.6461, 1.4405, 1.5653, 1.3694, 1.3074, 1.3058, 1.2648], + device='cuda:1'), covar=tensor([0.1185, 0.0564, 0.1368, 0.0615, 0.0749, 0.1614, 0.0942, 0.0867], + device='cuda:1'), in_proj_covar=tensor([0.0365, 0.0240, 0.0341, 0.0316, 0.0304, 0.0349, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 11:30:51,761 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.395e+02 2.764e+02 3.582e+02 6.444e+02, threshold=5.527e+02, percent-clipped=1.0 +2023-02-07 11:31:22,778 INFO [train.py:901] (1/4) Epoch 27, batch 2200, loss[loss=0.2251, simple_loss=0.3151, pruned_loss=0.06755, over 8472.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2823, pruned_loss=0.0574, over 1621667.94 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:31:57,347 INFO [train.py:901] (1/4) Epoch 27, batch 2250, loss[loss=0.2276, simple_loss=0.3195, pruned_loss=0.06787, over 8474.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2818, pruned_loss=0.05742, over 1618312.56 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:01,575 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.301e+02 2.815e+02 3.457e+02 5.141e+02, threshold=5.631e+02, percent-clipped=0.0 +2023-02-07 11:32:31,467 INFO [train.py:901] (1/4) Epoch 27, batch 2300, loss[loss=0.1831, simple_loss=0.2715, pruned_loss=0.04733, over 8477.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05736, over 1621954.00 frames. ], batch size: 29, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:32,833 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212457.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:32:39,376 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212466.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:33:07,938 INFO [train.py:901] (1/4) Epoch 27, batch 2350, loss[loss=0.2559, simple_loss=0.3263, pruned_loss=0.09273, over 6920.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05734, over 1618716.99 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:33:12,148 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.366e+02 2.801e+02 3.492e+02 6.818e+02, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 11:33:42,849 INFO [train.py:901] (1/4) Epoch 27, batch 2400, loss[loss=0.195, simple_loss=0.2898, pruned_loss=0.05007, over 8033.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.0576, over 1620399.20 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:33:46,056 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 11:34:01,674 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212581.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:34:19,818 INFO [train.py:901] (1/4) Epoch 27, batch 2450, loss[loss=0.204, simple_loss=0.2891, pruned_loss=0.05948, over 8442.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2832, pruned_loss=0.05758, over 1623166.41 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:23,905 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.406e+02 2.879e+02 3.948e+02 9.646e+02, threshold=5.757e+02, percent-clipped=9.0 +2023-02-07 11:34:26,762 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:34:41,434 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-07 11:34:54,051 INFO [train.py:901] (1/4) Epoch 27, batch 2500, loss[loss=0.2842, simple_loss=0.3637, pruned_loss=0.1023, over 6940.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05838, over 1622105.44 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:35:15,631 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 11:35:21,446 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4308, 2.1641, 2.6997, 2.2656, 2.8053, 2.4525, 2.3103, 1.5742], + device='cuda:1'), covar=tensor([0.5683, 0.4909, 0.2119, 0.4129, 0.2429, 0.3199, 0.1826, 0.5458], + device='cuda:1'), in_proj_covar=tensor([0.0966, 0.1018, 0.0830, 0.0986, 0.1023, 0.0926, 0.0770, 0.0847], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 11:35:28,282 INFO [train.py:901] (1/4) Epoch 27, batch 2550, loss[loss=0.2082, simple_loss=0.2993, pruned_loss=0.0586, over 8190.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2835, pruned_loss=0.05867, over 1620220.14 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:35:33,073 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.333e+02 2.985e+02 3.926e+02 7.498e+02, threshold=5.971e+02, percent-clipped=4.0 +2023-02-07 11:35:37,331 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212716.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:46,856 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212729.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:47,588 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:01,350 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5674, 1.8585, 1.9785, 1.2810, 2.0274, 1.4221, 0.6234, 1.7306], + device='cuda:1'), covar=tensor([0.0743, 0.0421, 0.0331, 0.0731, 0.0434, 0.1058, 0.1045, 0.0426], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0410, 0.0364, 0.0461, 0.0394, 0.0552, 0.0404, 0.0442], + device='cuda:1'), out_proj_covar=tensor([1.2576e-04, 1.0662e-04, 9.5043e-05, 1.2078e-04, 1.0322e-04, 1.5429e-04, + 1.0796e-04, 1.1593e-04], device='cuda:1') +2023-02-07 11:36:04,558 INFO [train.py:901] (1/4) Epoch 27, batch 2600, loss[loss=0.1815, simple_loss=0.2709, pruned_loss=0.04608, over 8433.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.283, pruned_loss=0.05783, over 1623638.19 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:05,343 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7288, 1.6459, 2.1096, 1.4470, 1.2801, 2.1315, 0.3397, 1.3712], + device='cuda:1'), covar=tensor([0.1459, 0.1065, 0.0375, 0.1022, 0.2347, 0.0366, 0.1789, 0.1206], + device='cuda:1'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0277, 0.0144, 0.0172, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 11:36:29,683 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212792.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:35,820 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:38,499 INFO [train.py:901] (1/4) Epoch 27, batch 2650, loss[loss=0.1587, simple_loss=0.2496, pruned_loss=0.03387, over 7795.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2836, pruned_loss=0.05846, over 1621329.08 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:43,310 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.517e+02 2.957e+02 3.589e+02 7.428e+02, threshold=5.913e+02, percent-clipped=3.0 +2023-02-07 11:37:02,117 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212837.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:37:03,989 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5675, 4.5476, 4.1280, 2.3167, 4.0594, 4.2870, 4.0567, 4.0476], + device='cuda:1'), covar=tensor([0.0712, 0.0486, 0.0965, 0.4196, 0.0866, 0.0767, 0.1243, 0.0638], + device='cuda:1'), in_proj_covar=tensor([0.0542, 0.0458, 0.0444, 0.0555, 0.0439, 0.0463, 0.0437, 0.0404], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:37:14,963 INFO [train.py:901] (1/4) Epoch 27, batch 2700, loss[loss=0.2123, simple_loss=0.3145, pruned_loss=0.05511, over 8747.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.0586, over 1618868.15 frames. ], batch size: 30, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:19,862 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212862.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:37:49,376 INFO [train.py:901] (1/4) Epoch 27, batch 2750, loss[loss=0.1666, simple_loss=0.2562, pruned_loss=0.03849, over 7544.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05848, over 1614148.69 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:53,340 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.387e+02 2.942e+02 3.576e+02 8.277e+02, threshold=5.883e+02, percent-clipped=4.0 +2023-02-07 11:37:56,789 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:09,049 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5397, 2.1348, 3.1327, 1.4023, 2.3264, 1.9202, 1.6774, 2.3469], + device='cuda:1'), covar=tensor([0.1994, 0.2532, 0.0888, 0.4809, 0.2166, 0.3458, 0.2608, 0.2576], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0636, 0.0569, 0.0672, 0.0661, 0.0612, 0.0563, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:38:25,581 INFO [train.py:901] (1/4) Epoch 27, batch 2800, loss[loss=0.183, simple_loss=0.2823, pruned_loss=0.04185, over 8105.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05858, over 1615463.52 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:38:29,013 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6342, 1.8758, 1.9287, 1.8559, 1.1481, 1.7439, 2.2876, 1.9153], + device='cuda:1'), covar=tensor([0.0494, 0.1122, 0.1574, 0.1303, 0.0666, 0.1374, 0.0696, 0.0617], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0163, 0.0102, 0.0164, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 11:38:42,835 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9190, 2.0321, 1.7129, 2.5899, 1.1598, 1.5323, 1.9240, 2.0644], + device='cuda:1'), covar=tensor([0.0679, 0.0746, 0.0843, 0.0333, 0.1097, 0.1234, 0.0740, 0.0732], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0213, 0.0203, 0.0245, 0.0249, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 11:38:46,069 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:58,529 INFO [train.py:901] (1/4) Epoch 27, batch 2850, loss[loss=0.2254, simple_loss=0.305, pruned_loss=0.07293, over 7197.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2832, pruned_loss=0.05895, over 1611766.26 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:02,633 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 2.420e+02 3.040e+02 3.738e+02 9.771e+02, threshold=6.080e+02, percent-clipped=4.0 +2023-02-07 11:39:02,868 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:19,518 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2680, 2.5566, 2.7920, 1.6409, 3.0787, 1.8608, 1.4552, 2.1823], + device='cuda:1'), covar=tensor([0.0914, 0.0465, 0.0307, 0.0872, 0.0517, 0.0921, 0.1161, 0.0651], + device='cuda:1'), in_proj_covar=tensor([0.0471, 0.0408, 0.0361, 0.0459, 0.0394, 0.0548, 0.0402, 0.0439], + device='cuda:1'), out_proj_covar=tensor([1.2487e-04, 1.0615e-04, 9.4338e-05, 1.2007e-04, 1.0310e-04, 1.5314e-04, + 1.0733e-04, 1.1538e-04], device='cuda:1') +2023-02-07 11:39:33,423 INFO [train.py:901] (1/4) Epoch 27, batch 2900, loss[loss=0.1846, simple_loss=0.279, pruned_loss=0.04506, over 8335.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2823, pruned_loss=0.05847, over 1614315.75 frames. ], batch size: 26, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:33,647 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4326, 1.8671, 3.2682, 1.3096, 2.4671, 1.9666, 1.4762, 2.4312], + device='cuda:1'), covar=tensor([0.2323, 0.2827, 0.0829, 0.5218, 0.2049, 0.3318, 0.2800, 0.2421], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0635, 0.0568, 0.0670, 0.0659, 0.0610, 0.0562, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:39:36,853 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213060.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:37,639 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2403, 2.1532, 1.7251, 1.9383, 1.7402, 1.4622, 1.7123, 1.6038], + device='cuda:1'), covar=tensor([0.1296, 0.0414, 0.1175, 0.0545, 0.0750, 0.1505, 0.0913, 0.0913], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0241, 0.0340, 0.0315, 0.0304, 0.0347, 0.0349, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 11:39:46,825 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213073.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:09,155 INFO [train.py:901] (1/4) Epoch 27, batch 2950, loss[loss=0.2533, simple_loss=0.3197, pruned_loss=0.09347, over 7214.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2835, pruned_loss=0.05906, over 1617256.83 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:12,519 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 11:40:13,187 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.292e+02 2.734e+02 3.601e+02 6.803e+02, threshold=5.467e+02, percent-clipped=1.0 +2023-02-07 11:40:30,259 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:42,822 INFO [train.py:901] (1/4) Epoch 27, batch 3000, loss[loss=0.1727, simple_loss=0.2424, pruned_loss=0.05152, over 7927.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.283, pruned_loss=0.05834, over 1618484.02 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:42,822 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 11:40:51,047 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7791, 1.7434, 1.5844, 2.2451, 1.1178, 1.4881, 1.7885, 1.7988], + device='cuda:1'), covar=tensor([0.0733, 0.0906, 0.0923, 0.0447, 0.1154, 0.1364, 0.0736, 0.0817], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0212, 0.0203, 0.0245, 0.0249, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 11:40:56,478 INFO [train.py:935] (1/4) Epoch 27, validation: loss=0.171, simple_loss=0.2706, pruned_loss=0.03572, over 944034.00 frames. +2023-02-07 11:40:56,478 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 11:41:08,336 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213172.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:10,344 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213175.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:19,864 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213188.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:20,565 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5407, 1.3457, 4.7335, 1.6072, 4.1940, 3.8714, 4.2727, 4.1419], + device='cuda:1'), covar=tensor([0.0618, 0.5045, 0.0463, 0.4653, 0.1054, 0.1022, 0.0595, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0676, 0.0668, 0.0736, 0.0660, 0.0749, 0.0638, 0.0638, 0.0719], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:41:25,953 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:31,706 INFO [train.py:901] (1/4) Epoch 27, batch 3050, loss[loss=0.1916, simple_loss=0.2872, pruned_loss=0.04793, over 8517.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.0588, over 1619844.97 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:41:36,535 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.283e+02 2.877e+02 3.649e+02 6.604e+02, threshold=5.754e+02, percent-clipped=7.0 +2023-02-07 11:41:46,123 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7592, 1.5963, 2.2264, 1.3711, 1.3033, 2.2246, 0.5635, 1.4126], + device='cuda:1'), covar=tensor([0.1411, 0.1126, 0.0370, 0.1166, 0.2276, 0.0373, 0.1825, 0.1320], + device='cuda:1'), in_proj_covar=tensor([0.0199, 0.0203, 0.0134, 0.0223, 0.0275, 0.0144, 0.0171, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 11:42:01,589 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 11:42:03,697 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-07 11:42:04,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:42:06,027 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9583, 1.4209, 1.7152, 1.3470, 0.9868, 1.4642, 1.8522, 1.4540], + device='cuda:1'), covar=tensor([0.0546, 0.1299, 0.1708, 0.1489, 0.0602, 0.1587, 0.0674, 0.0723], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0164, 0.0112, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 11:42:06,437 INFO [train.py:901] (1/4) Epoch 27, batch 3100, loss[loss=0.2504, simple_loss=0.3184, pruned_loss=0.0912, over 7802.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2838, pruned_loss=0.05877, over 1618672.44 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:07,838 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1004, 1.2217, 4.3239, 2.0548, 2.6308, 4.8972, 4.9791, 4.2220], + device='cuda:1'), covar=tensor([0.1227, 0.2136, 0.0293, 0.1917, 0.1114, 0.0171, 0.0380, 0.0550], + device='cuda:1'), in_proj_covar=tensor([0.0306, 0.0327, 0.0293, 0.0321, 0.0321, 0.0279, 0.0438, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 11:42:18,672 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 11:42:39,679 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9358, 2.0629, 1.6857, 2.7513, 1.1882, 1.5437, 2.0368, 2.1150], + device='cuda:1'), covar=tensor([0.0745, 0.0789, 0.0929, 0.0332, 0.1225, 0.1386, 0.0800, 0.0828], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0197, 0.0246, 0.0214, 0.0205, 0.0248, 0.0252, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 11:42:40,174 INFO [train.py:901] (1/4) Epoch 27, batch 3150, loss[loss=0.1802, simple_loss=0.2763, pruned_loss=0.04202, over 8470.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2846, pruned_loss=0.0589, over 1619355.91 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:44,227 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.557e+02 3.186e+02 3.836e+02 1.080e+03, threshold=6.372e+02, percent-clipped=6.0 +2023-02-07 11:43:15,308 INFO [train.py:901] (1/4) Epoch 27, batch 3200, loss[loss=0.2074, simple_loss=0.2804, pruned_loss=0.06719, over 7971.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2862, pruned_loss=0.05937, over 1625255.53 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:48,887 INFO [train.py:901] (1/4) Epoch 27, batch 3250, loss[loss=0.2096, simple_loss=0.2975, pruned_loss=0.06085, over 8497.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2841, pruned_loss=0.05841, over 1619579.30 frames. ], batch size: 29, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:52,812 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.478e+02 2.885e+02 3.413e+02 5.983e+02, threshold=5.770e+02, percent-clipped=0.0 +2023-02-07 11:44:07,276 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:09,272 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8699, 1.4487, 3.1416, 1.4792, 2.4133, 3.3899, 3.5060, 2.8881], + device='cuda:1'), covar=tensor([0.1241, 0.1908, 0.0335, 0.2244, 0.0928, 0.0259, 0.0691, 0.0556], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0328, 0.0294, 0.0322, 0.0323, 0.0280, 0.0440, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 11:44:17,674 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:25,458 INFO [train.py:901] (1/4) Epoch 27, batch 3300, loss[loss=0.2327, simple_loss=0.3073, pruned_loss=0.07907, over 8460.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2847, pruned_loss=0.05884, over 1615384.67 frames. ], batch size: 27, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:44:26,271 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213456.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:35,226 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213469.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:52,790 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0191, 1.7529, 2.5047, 1.7745, 1.4616, 2.5336, 0.5979, 1.5853], + device='cuda:1'), covar=tensor([0.1286, 0.1232, 0.0309, 0.1018, 0.2241, 0.0311, 0.1726, 0.1217], + device='cuda:1'), in_proj_covar=tensor([0.0199, 0.0203, 0.0134, 0.0223, 0.0275, 0.0144, 0.0171, 0.0198], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 11:44:59,357 INFO [train.py:901] (1/4) Epoch 27, batch 3350, loss[loss=0.2422, simple_loss=0.3326, pruned_loss=0.07592, over 8192.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2848, pruned_loss=0.0588, over 1618712.11 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:00,954 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:03,449 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.606e+02 3.102e+02 3.998e+02 8.787e+02, threshold=6.203e+02, percent-clipped=8.0 +2023-02-07 11:45:18,451 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:34,517 INFO [train.py:901] (1/4) Epoch 27, batch 3400, loss[loss=0.1684, simple_loss=0.2493, pruned_loss=0.04372, over 7688.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.284, pruned_loss=0.05835, over 1616506.75 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:55,323 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=213584.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:46:09,626 INFO [train.py:901] (1/4) Epoch 27, batch 3450, loss[loss=0.1929, simple_loss=0.2816, pruned_loss=0.05212, over 8328.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.283, pruned_loss=0.05769, over 1616575.90 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:13,703 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.297e+02 2.616e+02 3.439e+02 9.820e+02, threshold=5.232e+02, percent-clipped=1.0 +2023-02-07 11:46:44,526 INFO [train.py:901] (1/4) Epoch 27, batch 3500, loss[loss=0.1793, simple_loss=0.2533, pruned_loss=0.05262, over 7440.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05791, over 1614698.03 frames. ], batch size: 17, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:46,128 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4989, 1.6481, 2.1723, 1.4074, 1.5222, 1.7373, 1.5339, 1.5415], + device='cuda:1'), covar=tensor([0.2078, 0.2684, 0.1041, 0.4790, 0.2078, 0.3671, 0.2612, 0.2257], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0634, 0.0568, 0.0671, 0.0660, 0.0609, 0.0562, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:47:08,392 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5560, 5.5550, 4.9708, 2.7779, 4.9241, 5.2975, 5.0338, 5.0370], + device='cuda:1'), covar=tensor([0.0504, 0.0386, 0.0768, 0.3543, 0.0731, 0.0894, 0.1079, 0.0593], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0459, 0.0446, 0.0558, 0.0441, 0.0465, 0.0439, 0.0405], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:47:08,657 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 11:47:11,033 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 11:47:20,454 INFO [train.py:901] (1/4) Epoch 27, batch 3550, loss[loss=0.2369, simple_loss=0.3173, pruned_loss=0.07825, over 8191.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2832, pruned_loss=0.05844, over 1610340.49 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:24,349 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.484e+02 3.157e+02 3.893e+02 8.912e+02, threshold=6.313e+02, percent-clipped=7.0 +2023-02-07 11:47:51,925 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7781, 1.5868, 2.3986, 1.5625, 1.2961, 2.3880, 0.4985, 1.4651], + device='cuda:1'), covar=tensor([0.1467, 0.1359, 0.0319, 0.1083, 0.2643, 0.0347, 0.1944, 0.1312], + device='cuda:1'), in_proj_covar=tensor([0.0200, 0.0206, 0.0135, 0.0224, 0.0277, 0.0145, 0.0172, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 11:47:55,110 INFO [train.py:901] (1/4) Epoch 27, batch 3600, loss[loss=0.1515, simple_loss=0.2339, pruned_loss=0.03452, over 7722.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2829, pruned_loss=0.05823, over 1612949.98 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:31,566 INFO [train.py:901] (1/4) Epoch 27, batch 3650, loss[loss=0.1782, simple_loss=0.2542, pruned_loss=0.05113, over 7541.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2821, pruned_loss=0.05776, over 1612444.77 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:35,659 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.435e+02 3.005e+02 4.000e+02 1.001e+03, threshold=6.009e+02, percent-clipped=1.0 +2023-02-07 11:49:05,215 INFO [train.py:901] (1/4) Epoch 27, batch 3700, loss[loss=0.225, simple_loss=0.3042, pruned_loss=0.07292, over 8596.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2822, pruned_loss=0.05757, over 1612281.10 frames. ], batch size: 31, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:11,355 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 11:49:27,281 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 11:49:40,754 INFO [train.py:901] (1/4) Epoch 27, batch 3750, loss[loss=0.189, simple_loss=0.2752, pruned_loss=0.05139, over 8241.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05789, over 1613647.95 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:44,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.240e+02 2.670e+02 3.453e+02 6.024e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 11:49:57,382 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:50:07,450 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3346, 2.0771, 2.1296, 1.9900, 1.4672, 2.0628, 2.2577, 2.0445], + device='cuda:1'), covar=tensor([0.0596, 0.0956, 0.1356, 0.1188, 0.0639, 0.1170, 0.0748, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0101, 0.0164, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 11:50:15,109 INFO [train.py:901] (1/4) Epoch 27, batch 3800, loss[loss=0.1969, simple_loss=0.2901, pruned_loss=0.05182, over 8341.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2822, pruned_loss=0.05783, over 1609507.90 frames. ], batch size: 26, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:18,681 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6579, 2.1116, 3.1535, 1.5111, 2.3168, 2.1124, 1.7972, 2.4191], + device='cuda:1'), covar=tensor([0.2005, 0.2546, 0.0926, 0.4679, 0.2031, 0.3200, 0.2418, 0.2288], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0632, 0.0565, 0.0667, 0.0659, 0.0606, 0.0561, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:50:51,703 INFO [train.py:901] (1/4) Epoch 27, batch 3850, loss[loss=0.1592, simple_loss=0.2521, pruned_loss=0.03316, over 7527.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05746, over 1609367.52 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:55,678 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.325e+02 2.987e+02 3.815e+02 9.366e+02, threshold=5.974e+02, percent-clipped=6.0 +2023-02-07 11:51:14,749 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6992, 2.5326, 3.3120, 2.6810, 3.2848, 2.7309, 2.6035, 2.0374], + device='cuda:1'), covar=tensor([0.5559, 0.5353, 0.1956, 0.4027, 0.2537, 0.3100, 0.1765, 0.5808], + device='cuda:1'), in_proj_covar=tensor([0.0961, 0.1014, 0.0828, 0.0985, 0.1017, 0.0923, 0.0765, 0.0845], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 11:51:19,438 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:51:21,269 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 11:51:27,208 INFO [train.py:901] (1/4) Epoch 27, batch 3900, loss[loss=0.1964, simple_loss=0.2893, pruned_loss=0.05172, over 8481.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05746, over 1613639.42 frames. ], batch size: 27, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:51:39,612 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 11:52:00,518 INFO [train.py:901] (1/4) Epoch 27, batch 3950, loss[loss=0.1759, simple_loss=0.252, pruned_loss=0.04988, over 7442.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2821, pruned_loss=0.05734, over 1615929.30 frames. ], batch size: 17, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:04,373 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.377e+02 2.717e+02 3.364e+02 5.097e+02, threshold=5.435e+02, percent-clipped=0.0 +2023-02-07 11:52:13,417 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 11:52:13,847 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8594, 2.0900, 3.5487, 1.8399, 1.9450, 3.5608, 0.7296, 2.0346], + device='cuda:1'), covar=tensor([0.1145, 0.1202, 0.0238, 0.1519, 0.2140, 0.0258, 0.2029, 0.1327], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0206, 0.0136, 0.0225, 0.0279, 0.0146, 0.0173, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 11:52:30,743 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:52:35,674 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.27 vs. limit=5.0 +2023-02-07 11:52:36,018 INFO [train.py:901] (1/4) Epoch 27, batch 4000, loss[loss=0.1714, simple_loss=0.2687, pruned_loss=0.03709, over 8026.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05716, over 1617239.31 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:10,789 INFO [train.py:901] (1/4) Epoch 27, batch 4050, loss[loss=0.2019, simple_loss=0.2808, pruned_loss=0.06152, over 7789.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2813, pruned_loss=0.05721, over 1613924.76 frames. ], batch size: 19, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:14,931 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.379e+02 2.958e+02 3.648e+02 7.596e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 11:53:31,871 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:53:45,409 INFO [train.py:901] (1/4) Epoch 27, batch 4100, loss[loss=0.2269, simple_loss=0.3111, pruned_loss=0.07137, over 8600.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05776, over 1616750.85 frames. ], batch size: 31, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:17,263 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:21,106 INFO [train.py:901] (1/4) Epoch 27, batch 4150, loss[loss=0.1648, simple_loss=0.2492, pruned_loss=0.04019, over 8085.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05778, over 1613814.61 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:25,184 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.522e+02 2.957e+02 3.518e+02 6.524e+02, threshold=5.913e+02, percent-clipped=2.0 +2023-02-07 11:54:34,220 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:55,406 INFO [train.py:901] (1/4) Epoch 27, batch 4200, loss[loss=0.1937, simple_loss=0.2701, pruned_loss=0.05863, over 7919.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05719, over 1608301.35 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:15,711 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 11:55:30,770 INFO [train.py:901] (1/4) Epoch 27, batch 4250, loss[loss=0.1829, simple_loss=0.2709, pruned_loss=0.0474, over 8241.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05766, over 1611210.27 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:34,791 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.351e+02 2.930e+02 3.605e+02 8.966e+02, threshold=5.860e+02, percent-clipped=4.0 +2023-02-07 11:55:40,136 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 11:56:04,680 INFO [train.py:901] (1/4) Epoch 27, batch 4300, loss[loss=0.1977, simple_loss=0.2836, pruned_loss=0.05594, over 8501.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2827, pruned_loss=0.0585, over 1611451.56 frames. ], batch size: 26, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:19,497 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6348, 2.1594, 3.2659, 1.5293, 2.4691, 2.0728, 1.7466, 2.4281], + device='cuda:1'), covar=tensor([0.2003, 0.2643, 0.0908, 0.4793, 0.2012, 0.3399, 0.2593, 0.2467], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0635, 0.0566, 0.0671, 0.0662, 0.0611, 0.0564, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:56:29,281 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:56:40,083 INFO [train.py:901] (1/4) Epoch 27, batch 4350, loss[loss=0.1678, simple_loss=0.2441, pruned_loss=0.04575, over 7701.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2837, pruned_loss=0.05861, over 1615063.57 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:44,887 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.457e+02 2.989e+02 4.041e+02 8.697e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-07 11:57:11,488 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 11:57:14,874 INFO [train.py:901] (1/4) Epoch 27, batch 4400, loss[loss=0.2133, simple_loss=0.3035, pruned_loss=0.06159, over 7969.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.05821, over 1611433.45 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:25,505 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6746, 2.1404, 3.3154, 1.6259, 2.4203, 2.1600, 1.8026, 2.4893], + device='cuda:1'), covar=tensor([0.2042, 0.2755, 0.0920, 0.4698, 0.2043, 0.3280, 0.2596, 0.2514], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0632, 0.0563, 0.0668, 0.0658, 0.0607, 0.0562, 0.0643], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 11:57:32,006 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:39,612 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 11:57:43,110 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-02-07 11:57:47,096 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8269, 2.0516, 2.1513, 1.5759, 2.3482, 1.5861, 0.7571, 2.1532], + device='cuda:1'), covar=tensor([0.0742, 0.0434, 0.0345, 0.0633, 0.0414, 0.0988, 0.1067, 0.0301], + device='cuda:1'), in_proj_covar=tensor([0.0472, 0.0409, 0.0362, 0.0459, 0.0394, 0.0549, 0.0402, 0.0438], + device='cuda:1'), out_proj_covar=tensor([1.2508e-04, 1.0637e-04, 9.4530e-05, 1.2029e-04, 1.0328e-04, 1.5321e-04, + 1.0741e-04, 1.1485e-04], device='cuda:1') +2023-02-07 11:57:49,419 INFO [train.py:901] (1/4) Epoch 27, batch 4450, loss[loss=0.2631, simple_loss=0.3471, pruned_loss=0.08952, over 8528.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2831, pruned_loss=0.0582, over 1618145.40 frames. ], batch size: 49, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:50,290 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:51,445 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 11:57:53,331 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.435e+02 2.910e+02 3.675e+02 1.096e+03, threshold=5.821e+02, percent-clipped=3.0 +2023-02-07 11:58:25,066 INFO [train.py:901] (1/4) Epoch 27, batch 4500, loss[loss=0.2078, simple_loss=0.298, pruned_loss=0.05877, over 8472.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.05793, over 1619304.37 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:58:49,089 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 11:58:51,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:58:58,563 INFO [train.py:901] (1/4) Epoch 27, batch 4550, loss[loss=0.2038, simple_loss=0.2928, pruned_loss=0.05741, over 8290.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2824, pruned_loss=0.05757, over 1617175.68 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:59:03,195 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.259e+02 2.795e+02 3.667e+02 7.490e+02, threshold=5.591e+02, percent-clipped=6.0 +2023-02-07 11:59:34,575 INFO [train.py:901] (1/4) Epoch 27, batch 4600, loss[loss=0.191, simple_loss=0.2727, pruned_loss=0.05462, over 8184.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2822, pruned_loss=0.05786, over 1610825.66 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:07,865 INFO [train.py:901] (1/4) Epoch 27, batch 4650, loss[loss=0.1952, simple_loss=0.2823, pruned_loss=0.05402, over 8569.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05794, over 1609592.98 frames. ], batch size: 31, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:11,922 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.452e+02 3.083e+02 3.974e+02 1.018e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-07 12:00:43,658 INFO [train.py:901] (1/4) Epoch 27, batch 4700, loss[loss=0.1991, simple_loss=0.2923, pruned_loss=0.05294, over 8186.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.283, pruned_loss=0.05862, over 1607604.03 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:48,572 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:05,464 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:17,180 INFO [train.py:901] (1/4) Epoch 27, batch 4750, loss[loss=0.2331, simple_loss=0.3102, pruned_loss=0.078, over 8253.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2841, pruned_loss=0.05907, over 1610012.73 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:01:21,121 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.444e+02 3.016e+02 3.790e+02 1.117e+03, threshold=6.032e+02, percent-clipped=6.0 +2023-02-07 12:01:42,269 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 12:01:45,086 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 12:01:49,024 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:51,313 INFO [train.py:901] (1/4) Epoch 27, batch 4800, loss[loss=0.1735, simple_loss=0.2567, pruned_loss=0.04518, over 7640.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.05866, over 1611297.04 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:07,893 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:02:27,034 INFO [train.py:901] (1/4) Epoch 27, batch 4850, loss[loss=0.2094, simple_loss=0.2832, pruned_loss=0.06784, over 7806.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05817, over 1617808.69 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:31,201 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.294e+02 2.705e+02 3.274e+02 6.085e+02, threshold=5.409e+02, percent-clipped=1.0 +2023-02-07 12:02:36,670 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 12:03:00,130 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5758, 2.1446, 3.1935, 1.5119, 2.4888, 2.0320, 1.6865, 2.5303], + device='cuda:1'), covar=tensor([0.2082, 0.2614, 0.0902, 0.4898, 0.1889, 0.3391, 0.2639, 0.2287], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0636, 0.0567, 0.0671, 0.0663, 0.0613, 0.0566, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:03:01,830 INFO [train.py:901] (1/4) Epoch 27, batch 4900, loss[loss=0.254, simple_loss=0.3156, pruned_loss=0.09619, over 8464.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2829, pruned_loss=0.05745, over 1621088.61 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:37,419 INFO [train.py:901] (1/4) Epoch 27, batch 4950, loss[loss=0.2101, simple_loss=0.2949, pruned_loss=0.06262, over 8503.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2824, pruned_loss=0.05699, over 1613886.56 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:41,324 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.323e+02 2.858e+02 3.502e+02 9.819e+02, threshold=5.716e+02, percent-clipped=5.0 +2023-02-07 12:04:10,560 INFO [train.py:901] (1/4) Epoch 27, batch 5000, loss[loss=0.2049, simple_loss=0.2932, pruned_loss=0.05834, over 8476.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2838, pruned_loss=0.0577, over 1613998.87 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:04:47,122 INFO [train.py:901] (1/4) Epoch 27, batch 5050, loss[loss=0.2183, simple_loss=0.3017, pruned_loss=0.06745, over 8365.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2833, pruned_loss=0.05753, over 1615512.07 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:04:50,998 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.415e+02 2.920e+02 3.667e+02 5.760e+02, threshold=5.840e+02, percent-clipped=1.0 +2023-02-07 12:05:10,163 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 12:05:15,552 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215248.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:05:20,048 INFO [train.py:901] (1/4) Epoch 27, batch 5100, loss[loss=0.2159, simple_loss=0.2896, pruned_loss=0.07114, over 8249.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2846, pruned_loss=0.05856, over 1616799.95 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:28,223 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8746, 1.6814, 2.3598, 1.5022, 1.4392, 2.3232, 0.8116, 1.6516], + device='cuda:1'), covar=tensor([0.1429, 0.1099, 0.0295, 0.1025, 0.2144, 0.0330, 0.1766, 0.1211], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0206, 0.0137, 0.0225, 0.0279, 0.0146, 0.0174, 0.0201], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 12:05:32,898 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5182, 1.3853, 1.8277, 1.3052, 1.1464, 1.7895, 0.2923, 1.2305], + device='cuda:1'), covar=tensor([0.1540, 0.1205, 0.0380, 0.0746, 0.2438, 0.0441, 0.1941, 0.1237], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0207, 0.0137, 0.0225, 0.0279, 0.0147, 0.0174, 0.0201], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 12:05:54,329 INFO [train.py:901] (1/4) Epoch 27, batch 5150, loss[loss=0.1827, simple_loss=0.2666, pruned_loss=0.04934, over 8298.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2824, pruned_loss=0.05728, over 1617410.51 frames. ], batch size: 23, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:59,267 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.449e+02 2.868e+02 3.492e+02 6.640e+02, threshold=5.736e+02, percent-clipped=1.0 +2023-02-07 12:06:22,713 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215343.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:06:28,157 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215351.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:06:30,696 INFO [train.py:901] (1/4) Epoch 27, batch 5200, loss[loss=0.2275, simple_loss=0.3164, pruned_loss=0.06928, over 8493.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.283, pruned_loss=0.05738, over 1620497.53 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:05,198 INFO [train.py:901] (1/4) Epoch 27, batch 5250, loss[loss=0.1784, simple_loss=0.2617, pruned_loss=0.04754, over 7929.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2829, pruned_loss=0.05702, over 1617326.69 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:09,825 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.358e+02 2.790e+02 3.638e+02 8.125e+02, threshold=5.579e+02, percent-clipped=3.0 +2023-02-07 12:07:12,624 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 12:07:40,339 INFO [train.py:901] (1/4) Epoch 27, batch 5300, loss[loss=0.2123, simple_loss=0.2963, pruned_loss=0.0641, over 8232.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2828, pruned_loss=0.05705, over 1617617.25 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:13,771 INFO [train.py:901] (1/4) Epoch 27, batch 5350, loss[loss=0.1989, simple_loss=0.2844, pruned_loss=0.05664, over 8587.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05735, over 1609252.13 frames. ], batch size: 49, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:18,670 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 2.455e+02 2.847e+02 3.988e+02 1.267e+03, threshold=5.693e+02, percent-clipped=12.0 +2023-02-07 12:08:25,729 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:08:48,875 INFO [train.py:901] (1/4) Epoch 27, batch 5400, loss[loss=0.2557, simple_loss=0.3278, pruned_loss=0.09183, over 8499.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05741, over 1612686.31 frames. ], batch size: 29, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:52,411 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4966, 2.4910, 1.8236, 2.2189, 2.2094, 1.4453, 2.0527, 2.0407], + device='cuda:1'), covar=tensor([0.1573, 0.0400, 0.1201, 0.0656, 0.0732, 0.1667, 0.1063, 0.1069], + device='cuda:1'), in_proj_covar=tensor([0.0361, 0.0241, 0.0343, 0.0312, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 12:08:57,736 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215566.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:08,905 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:09:14,846 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215592.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:09:23,341 INFO [train.py:901] (1/4) Epoch 27, batch 5450, loss[loss=0.1563, simple_loss=0.2411, pruned_loss=0.03579, over 7922.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2833, pruned_loss=0.05783, over 1618839.75 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:27,901 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.540e+02 3.136e+02 3.819e+02 8.555e+02, threshold=6.272e+02, percent-clipped=5.0 +2023-02-07 12:09:47,129 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8650, 1.5088, 1.7319, 1.3746, 0.8915, 1.5518, 1.6271, 1.6144], + device='cuda:1'), covar=tensor([0.0572, 0.1213, 0.1577, 0.1441, 0.0603, 0.1418, 0.0695, 0.0613], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 12:09:56,879 INFO [train.py:901] (1/4) Epoch 27, batch 5500, loss[loss=0.2292, simple_loss=0.3018, pruned_loss=0.07833, over 7101.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2843, pruned_loss=0.05868, over 1616060.83 frames. ], batch size: 72, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:56,892 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 12:10:20,608 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215687.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:10:25,967 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:10:32,545 INFO [train.py:901] (1/4) Epoch 27, batch 5550, loss[loss=0.1945, simple_loss=0.2755, pruned_loss=0.05675, over 8239.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2838, pruned_loss=0.0581, over 1617727.92 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:10:34,102 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215707.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:10:37,242 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.445e+02 2.973e+02 3.969e+02 8.778e+02, threshold=5.947e+02, percent-clipped=4.0 +2023-02-07 12:11:06,809 INFO [train.py:901] (1/4) Epoch 27, batch 5600, loss[loss=0.185, simple_loss=0.2879, pruned_loss=0.04104, over 8469.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2827, pruned_loss=0.05691, over 1619747.24 frames. ], batch size: 39, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:40,034 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:11:41,822 INFO [train.py:901] (1/4) Epoch 27, batch 5650, loss[loss=0.2095, simple_loss=0.2976, pruned_loss=0.06073, over 8322.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2832, pruned_loss=0.05694, over 1619962.67 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:46,130 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:11:47,195 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.361e+02 2.799e+02 3.308e+02 5.877e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 12:11:50,151 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0563, 1.1910, 1.1610, 0.8340, 1.2105, 0.9908, 0.1764, 1.1858], + device='cuda:1'), covar=tensor([0.0652, 0.0517, 0.0539, 0.0678, 0.0623, 0.1279, 0.1160, 0.0435], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0410, 0.0363, 0.0459, 0.0394, 0.0552, 0.0403, 0.0439], + device='cuda:1'), out_proj_covar=tensor([1.2555e-04, 1.0640e-04, 9.4739e-05, 1.2018e-04, 1.0317e-04, 1.5401e-04, + 1.0772e-04, 1.1511e-04], device='cuda:1') +2023-02-07 12:12:04,981 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 12:12:11,224 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:14,102 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6835, 2.2311, 3.7040, 1.5926, 2.6632, 2.2026, 1.8667, 2.8174], + device='cuda:1'), covar=tensor([0.2023, 0.2742, 0.1062, 0.4746, 0.2189, 0.3394, 0.2527, 0.2586], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0633, 0.0565, 0.0667, 0.0660, 0.0609, 0.0563, 0.0642], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:12:16,646 INFO [train.py:901] (1/4) Epoch 27, batch 5700, loss[loss=0.2076, simple_loss=0.2987, pruned_loss=0.05823, over 8248.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2835, pruned_loss=0.05744, over 1615623.74 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:23,687 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:51,905 INFO [train.py:901] (1/4) Epoch 27, batch 5750, loss[loss=0.1906, simple_loss=0.2654, pruned_loss=0.05783, over 7220.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.05789, over 1615117.72 frames. ], batch size: 16, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:56,026 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215910.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:12:57,190 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.395e+02 2.899e+02 3.864e+02 7.116e+02, threshold=5.798e+02, percent-clipped=7.0 +2023-02-07 12:13:07,810 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:10,343 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 12:13:26,393 INFO [train.py:901] (1/4) Epoch 27, batch 5800, loss[loss=0.1905, simple_loss=0.2824, pruned_loss=0.04934, over 8526.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.05795, over 1610478.00 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:13:27,183 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2532, 3.1884, 2.9868, 1.5617, 2.8886, 2.9479, 2.8230, 2.8346], + device='cuda:1'), covar=tensor([0.1100, 0.0773, 0.1147, 0.4509, 0.1100, 0.1347, 0.1608, 0.1054], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0462, 0.0441, 0.0553, 0.0441, 0.0464, 0.0441, 0.0403], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:13:31,934 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=215963.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:13:43,178 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215980.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:48,230 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=215988.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:13:56,129 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5923, 1.9051, 2.0359, 1.3188, 2.0801, 1.4213, 0.6358, 1.7782], + device='cuda:1'), covar=tensor([0.0765, 0.0434, 0.0369, 0.0748, 0.0529, 0.1222, 0.1139, 0.0418], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0411, 0.0364, 0.0460, 0.0395, 0.0554, 0.0404, 0.0440], + device='cuda:1'), out_proj_covar=tensor([1.2582e-04, 1.0678e-04, 9.4899e-05, 1.2046e-04, 1.0343e-04, 1.5464e-04, + 1.0788e-04, 1.1542e-04], device='cuda:1') +2023-02-07 12:14:01,070 INFO [train.py:901] (1/4) Epoch 27, batch 5850, loss[loss=0.1872, simple_loss=0.2588, pruned_loss=0.05785, over 7433.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2826, pruned_loss=0.0583, over 1611506.44 frames. ], batch size: 17, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:05,651 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.428e+02 2.871e+02 3.760e+02 7.078e+02, threshold=5.742e+02, percent-clipped=9.0 +2023-02-07 12:14:13,528 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 12:14:15,361 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216025.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:27,450 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:28,169 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7129, 1.9558, 2.0555, 1.3575, 2.1396, 1.6245, 0.5874, 1.8856], + device='cuda:1'), covar=tensor([0.0659, 0.0415, 0.0357, 0.0622, 0.0456, 0.1010, 0.1008, 0.0335], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0411, 0.0364, 0.0460, 0.0395, 0.0553, 0.0404, 0.0440], + device='cuda:1'), out_proj_covar=tensor([1.2583e-04, 1.0675e-04, 9.4870e-05, 1.2045e-04, 1.0353e-04, 1.5446e-04, + 1.0791e-04, 1.1531e-04], device='cuda:1') +2023-02-07 12:14:30,134 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:34,947 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0671, 1.8831, 2.2700, 1.9749, 2.2548, 2.1766, 1.9676, 1.2677], + device='cuda:1'), covar=tensor([0.6124, 0.5040, 0.2249, 0.4016, 0.2671, 0.3055, 0.2006, 0.5416], + device='cuda:1'), in_proj_covar=tensor([0.0963, 0.1017, 0.0831, 0.0990, 0.1025, 0.0927, 0.0769, 0.0850], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 12:14:35,103 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 12:14:36,736 INFO [train.py:901] (1/4) Epoch 27, batch 5900, loss[loss=0.1862, simple_loss=0.279, pruned_loss=0.04665, over 8330.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05777, over 1618030.15 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:38,990 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216058.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:44,146 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:55,474 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216083.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:15:00,973 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:15:10,165 INFO [train.py:901] (1/4) Epoch 27, batch 5950, loss[loss=0.2231, simple_loss=0.2967, pruned_loss=0.0747, over 7927.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2812, pruned_loss=0.05738, over 1615946.49 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:15,770 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.411e+02 2.864e+02 3.625e+02 8.908e+02, threshold=5.728e+02, percent-clipped=5.0 +2023-02-07 12:15:46,874 INFO [train.py:901] (1/4) Epoch 27, batch 6000, loss[loss=0.1984, simple_loss=0.2799, pruned_loss=0.05852, over 7975.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2809, pruned_loss=0.05724, over 1613935.01 frames. ], batch size: 21, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:46,874 INFO [train.py:926] (1/4) Computing validation loss +2023-02-07 12:15:59,965 INFO [train.py:935] (1/4) Epoch 27, validation: loss=0.1711, simple_loss=0.2711, pruned_loss=0.03554, over 944034.00 frames. +2023-02-07 12:15:59,966 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6717MB +2023-02-07 12:16:25,737 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:16:35,309 INFO [train.py:901] (1/4) Epoch 27, batch 6050, loss[loss=0.197, simple_loss=0.2714, pruned_loss=0.06127, over 7790.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.0567, over 1615729.40 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:16:40,123 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.565e+02 3.207e+02 4.227e+02 9.285e+02, threshold=6.415e+02, percent-clipped=9.0 +2023-02-07 12:16:56,666 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:09,865 INFO [train.py:901] (1/4) Epoch 27, batch 6100, loss[loss=0.2155, simple_loss=0.2954, pruned_loss=0.06784, over 7047.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05709, over 1618650.12 frames. ], batch size: 71, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:14,012 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:28,454 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216281.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:17:39,621 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:44,974 INFO [train.py:901] (1/4) Epoch 27, batch 6150, loss[loss=0.2216, simple_loss=0.2998, pruned_loss=0.07174, over 8460.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.281, pruned_loss=0.05689, over 1617382.12 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:44,985 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 12:17:45,829 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216306.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:45,852 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216306.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:17:49,771 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.311e+02 2.985e+02 4.036e+02 8.594e+02, threshold=5.970e+02, percent-clipped=2.0 +2023-02-07 12:17:57,193 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216323.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:18,525 INFO [train.py:901] (1/4) Epoch 27, batch 6200, loss[loss=0.1828, simple_loss=0.2631, pruned_loss=0.05123, over 8339.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2801, pruned_loss=0.05651, over 1613897.54 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:28,744 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 12:18:42,452 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:45,892 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5347, 1.4642, 1.8323, 1.2357, 1.1801, 1.7886, 0.1934, 1.2293], + device='cuda:1'), covar=tensor([0.1558, 0.1354, 0.0419, 0.0877, 0.2305, 0.0471, 0.1950, 0.1250], + device='cuda:1'), in_proj_covar=tensor([0.0200, 0.0205, 0.0136, 0.0222, 0.0275, 0.0145, 0.0171, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-07 12:18:53,897 INFO [train.py:901] (1/4) Epoch 27, batch 6250, loss[loss=0.2175, simple_loss=0.3006, pruned_loss=0.06721, over 8619.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2799, pruned_loss=0.05641, over 1610355.97 frames. ], batch size: 31, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:58,454 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.470e+02 2.901e+02 3.405e+02 7.374e+02, threshold=5.803e+02, percent-clipped=1.0 +2023-02-07 12:19:27,767 INFO [train.py:901] (1/4) Epoch 27, batch 6300, loss[loss=0.1544, simple_loss=0.2355, pruned_loss=0.03663, over 7552.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05693, over 1611609.44 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:19:54,188 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9559, 1.5629, 1.6977, 1.4196, 1.0407, 1.5016, 1.7814, 1.4141], + device='cuda:1'), covar=tensor([0.0554, 0.1277, 0.1731, 0.1511, 0.0630, 0.1528, 0.0728, 0.0702], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0152, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 12:19:56,913 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.18 vs. limit=5.0 +2023-02-07 12:20:01,959 INFO [train.py:901] (1/4) Epoch 27, batch 6350, loss[loss=0.2388, simple_loss=0.3018, pruned_loss=0.0879, over 8031.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2806, pruned_loss=0.05719, over 1614479.80 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:02,155 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:07,862 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.511e+02 2.994e+02 4.018e+02 7.521e+02, threshold=5.987e+02, percent-clipped=5.0 +2023-02-07 12:20:23,599 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-07 12:20:30,861 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7145, 1.3908, 2.9402, 1.4685, 2.3344, 3.1115, 3.2605, 2.6916], + device='cuda:1'), covar=tensor([0.1135, 0.1671, 0.0332, 0.2005, 0.0782, 0.0311, 0.0708, 0.0541], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0325, 0.0291, 0.0318, 0.0321, 0.0276, 0.0437, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 12:20:36,668 INFO [train.py:901] (1/4) Epoch 27, batch 6400, loss[loss=0.1702, simple_loss=0.2566, pruned_loss=0.04184, over 7807.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2813, pruned_loss=0.05791, over 1612391.69 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:41,589 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:53,522 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7908, 1.8350, 1.8170, 2.3357, 1.0687, 1.5345, 1.7862, 1.8734], + device='cuda:1'), covar=tensor([0.0746, 0.0777, 0.0775, 0.0376, 0.1021, 0.1295, 0.0646, 0.0701], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0243, 0.0211, 0.0203, 0.0245, 0.0248, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 12:20:58,197 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:10,669 INFO [train.py:901] (1/4) Epoch 27, batch 6450, loss[loss=0.1706, simple_loss=0.2554, pruned_loss=0.0429, over 7429.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2804, pruned_loss=0.05701, over 1612954.62 frames. ], batch size: 17, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:13,554 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:16,164 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.469e+02 2.882e+02 3.609e+02 7.919e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 12:21:46,032 INFO [train.py:901] (1/4) Epoch 27, batch 6500, loss[loss=0.1948, simple_loss=0.292, pruned_loss=0.04881, over 8606.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2811, pruned_loss=0.05758, over 1612468.26 frames. ], batch size: 39, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:58,515 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216673.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:22:19,975 INFO [train.py:901] (1/4) Epoch 27, batch 6550, loss[loss=0.1739, simple_loss=0.2697, pruned_loss=0.03904, over 8250.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2817, pruned_loss=0.05756, over 1611240.62 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:25,120 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.544e+02 2.876e+02 3.743e+02 6.730e+02, threshold=5.752e+02, percent-clipped=5.0 +2023-02-07 12:22:32,730 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:22:48,282 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-07 12:22:52,672 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5304, 1.7276, 2.1139, 1.7284, 1.0797, 1.8579, 2.1585, 2.0382], + device='cuda:1'), covar=tensor([0.0557, 0.1180, 0.1500, 0.1369, 0.0632, 0.1369, 0.0673, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0113, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-07 12:22:54,583 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 12:22:55,725 INFO [train.py:901] (1/4) Epoch 27, batch 6600, loss[loss=0.2821, simple_loss=0.3482, pruned_loss=0.108, over 8456.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05834, over 1609169.84 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:59,344 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0957, 1.9340, 2.3352, 2.0434, 2.3002, 2.0942, 2.0055, 1.5935], + device='cuda:1'), covar=tensor([0.4189, 0.3763, 0.1768, 0.3155, 0.2207, 0.2683, 0.1585, 0.4285], + device='cuda:1'), in_proj_covar=tensor([0.0966, 0.1021, 0.0832, 0.0992, 0.1029, 0.0930, 0.0773, 0.0852], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 12:22:59,994 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:12,955 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 12:23:17,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:29,549 INFO [train.py:901] (1/4) Epoch 27, batch 6650, loss[loss=0.1721, simple_loss=0.2562, pruned_loss=0.044, over 7708.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.05821, over 1610615.76 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:23:34,796 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.571e+02 3.099e+02 3.859e+02 9.745e+02, threshold=6.199e+02, percent-clipped=7.0 +2023-02-07 12:24:03,767 INFO [train.py:901] (1/4) Epoch 27, batch 6700, loss[loss=0.2244, simple_loss=0.2983, pruned_loss=0.07524, over 8316.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.05823, over 1613331.47 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:38,627 INFO [train.py:901] (1/4) Epoch 27, batch 6750, loss[loss=0.2157, simple_loss=0.3131, pruned_loss=0.05913, over 8598.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2844, pruned_loss=0.05863, over 1615079.05 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:43,899 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.479e+02 3.006e+02 3.687e+02 6.813e+02, threshold=6.012e+02, percent-clipped=1.0 +2023-02-07 12:25:11,172 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216953.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:12,359 INFO [train.py:901] (1/4) Epoch 27, batch 6800, loss[loss=0.2023, simple_loss=0.29, pruned_loss=0.05731, over 8101.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2857, pruned_loss=0.05919, over 1613324.35 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:24,107 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 12:25:32,325 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:41,048 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-07 12:25:47,362 INFO [train.py:901] (1/4) Epoch 27, batch 6850, loss[loss=0.1952, simple_loss=0.2874, pruned_loss=0.05146, over 8033.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2846, pruned_loss=0.05871, over 1614221.38 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:52,574 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.408e+02 3.097e+02 3.751e+02 9.876e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-07 12:25:55,363 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217017.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:26:10,981 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 12:26:21,087 INFO [train.py:901] (1/4) Epoch 27, batch 6900, loss[loss=0.2212, simple_loss=0.317, pruned_loss=0.06269, over 8286.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2846, pruned_loss=0.05842, over 1612628.40 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:26:29,751 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:30,521 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:40,219 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 12:26:56,825 INFO [train.py:901] (1/4) Epoch 27, batch 6950, loss[loss=0.1948, simple_loss=0.2878, pruned_loss=0.0509, over 8335.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2845, pruned_loss=0.05826, over 1614673.91 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:26:58,367 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1888, 2.0894, 2.6545, 2.2083, 2.7985, 2.2926, 2.0947, 1.6965], + device='cuda:1'), covar=tensor([0.6143, 0.5218, 0.2123, 0.4436, 0.2661, 0.3287, 0.2050, 0.5647], + device='cuda:1'), in_proj_covar=tensor([0.0966, 0.1020, 0.0833, 0.0992, 0.1030, 0.0928, 0.0772, 0.0850], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-07 12:27:02,039 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.303e+02 2.670e+02 3.410e+02 6.861e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 12:27:15,657 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217132.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:27:19,468 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 12:27:30,825 INFO [train.py:901] (1/4) Epoch 27, batch 7000, loss[loss=0.25, simple_loss=0.327, pruned_loss=0.08646, over 8500.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2839, pruned_loss=0.0581, over 1610613.84 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:31,047 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8897, 2.4920, 4.1384, 1.7154, 3.0284, 2.4606, 1.9739, 3.0800], + device='cuda:1'), covar=tensor([0.1849, 0.2499, 0.0821, 0.4620, 0.1795, 0.3109, 0.2327, 0.2287], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0630, 0.0561, 0.0664, 0.0655, 0.0607, 0.0561, 0.0641], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:27:49,479 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:27:54,192 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1524, 1.3677, 4.3532, 1.6623, 3.8566, 3.6687, 3.9922, 3.8737], + device='cuda:1'), covar=tensor([0.0695, 0.5025, 0.0552, 0.4317, 0.1148, 0.0954, 0.0610, 0.0653], + device='cuda:1'), in_proj_covar=tensor([0.0673, 0.0661, 0.0730, 0.0653, 0.0743, 0.0627, 0.0632, 0.0713], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:28:05,136 INFO [train.py:901] (1/4) Epoch 27, batch 7050, loss[loss=0.2377, simple_loss=0.3139, pruned_loss=0.08071, over 8314.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05915, over 1608087.32 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:28:11,286 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.356e+02 3.046e+02 3.591e+02 8.726e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 12:28:40,066 INFO [train.py:901] (1/4) Epoch 27, batch 7100, loss[loss=0.2033, simple_loss=0.2917, pruned_loss=0.0575, over 8326.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05891, over 1610365.93 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:10,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5400, 2.0517, 2.8696, 1.5120, 2.1904, 1.9734, 1.7536, 2.1837], + device='cuda:1'), covar=tensor([0.2038, 0.2550, 0.0958, 0.4685, 0.1963, 0.3381, 0.2395, 0.2463], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0633, 0.0564, 0.0667, 0.0657, 0.0610, 0.0564, 0.0643], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:29:14,484 INFO [train.py:901] (1/4) Epoch 27, batch 7150, loss[loss=0.1984, simple_loss=0.2849, pruned_loss=0.05595, over 8583.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2842, pruned_loss=0.05841, over 1615343.98 frames. ], batch size: 39, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:19,666 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.444e+02 3.123e+02 4.113e+02 1.134e+03, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 12:29:27,888 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:29,712 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:45,942 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:49,821 INFO [train.py:901] (1/4) Epoch 27, batch 7200, loss[loss=0.1877, simple_loss=0.2696, pruned_loss=0.05287, over 7792.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2837, pruned_loss=0.05766, over 1613683.76 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:11,976 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217388.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:30:23,029 INFO [train.py:901] (1/4) Epoch 27, batch 7250, loss[loss=0.2038, simple_loss=0.2941, pruned_loss=0.05672, over 8359.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2842, pruned_loss=0.05799, over 1612540.99 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:23,857 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:28,396 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.296e+02 2.784e+02 3.610e+02 7.832e+02, threshold=5.568e+02, percent-clipped=2.0 +2023-02-07 12:30:28,621 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217413.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:30:45,935 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:49,265 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:58,441 INFO [train.py:901] (1/4) Epoch 27, batch 7300, loss[loss=0.1593, simple_loss=0.2495, pruned_loss=0.03459, over 8131.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.284, pruned_loss=0.05795, over 1616130.32 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:04,048 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:33,102 INFO [train.py:901] (1/4) Epoch 27, batch 7350, loss[loss=0.1875, simple_loss=0.2804, pruned_loss=0.04728, over 8238.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2834, pruned_loss=0.05772, over 1611738.26 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:36,625 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:38,501 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.322e+02 2.888e+02 3.768e+02 6.651e+02, threshold=5.777e+02, percent-clipped=4.0 +2023-02-07 12:31:59,847 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 12:32:07,063 INFO [train.py:901] (1/4) Epoch 27, batch 7400, loss[loss=0.209, simple_loss=0.2926, pruned_loss=0.06265, over 8205.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05699, over 1611061.37 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:19,152 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 12:32:42,460 INFO [train.py:901] (1/4) Epoch 27, batch 7450, loss[loss=0.2021, simple_loss=0.2787, pruned_loss=0.06269, over 7793.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.283, pruned_loss=0.05784, over 1612209.31 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:43,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8659, 1.3801, 3.5494, 1.6748, 2.5177, 3.8230, 3.9807, 3.2841], + device='cuda:1'), covar=tensor([0.1279, 0.1942, 0.0270, 0.1929, 0.0905, 0.0221, 0.0576, 0.0557], + device='cuda:1'), in_proj_covar=tensor([0.0306, 0.0322, 0.0291, 0.0318, 0.0320, 0.0276, 0.0437, 0.0306], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-07 12:32:47,778 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.478e+02 3.262e+02 4.062e+02 8.102e+02, threshold=6.523e+02, percent-clipped=5.0 +2023-02-07 12:32:58,342 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 12:33:05,996 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 12:33:16,123 INFO [train.py:901] (1/4) Epoch 27, batch 7500, loss[loss=0.2205, simple_loss=0.3045, pruned_loss=0.06828, over 8613.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2832, pruned_loss=0.05753, over 1618611.61 frames. ], batch size: 31, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:34,969 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:46,549 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:51,450 INFO [train.py:901] (1/4) Epoch 27, batch 7550, loss[loss=0.2415, simple_loss=0.3219, pruned_loss=0.08055, over 8660.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2829, pruned_loss=0.05758, over 1618002.52 frames. ], batch size: 34, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:56,752 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.428e+02 3.024e+02 3.911e+02 8.560e+02, threshold=6.047e+02, percent-clipped=1.0 +2023-02-07 12:34:03,669 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:21,849 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:25,196 INFO [train.py:901] (1/4) Epoch 27, batch 7600, loss[loss=0.199, simple_loss=0.2839, pruned_loss=0.05705, over 8296.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2827, pruned_loss=0.05732, over 1614160.90 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:01,505 INFO [train.py:901] (1/4) Epoch 27, batch 7650, loss[loss=0.1695, simple_loss=0.259, pruned_loss=0.04002, over 7969.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2829, pruned_loss=0.05739, over 1613211.29 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:06,795 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.541e+02 2.896e+02 3.920e+02 6.720e+02, threshold=5.793e+02, percent-clipped=4.0 +2023-02-07 12:35:35,073 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217854.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:35,687 INFO [train.py:901] (1/4) Epoch 27, batch 7700, loss[loss=0.1915, simple_loss=0.2837, pruned_loss=0.04971, over 8496.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2831, pruned_loss=0.05756, over 1616492.28 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:42,369 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:54,057 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 12:36:05,167 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 12:36:10,561 INFO [train.py:901] (1/4) Epoch 27, batch 7750, loss[loss=0.1848, simple_loss=0.2767, pruned_loss=0.04645, over 8328.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2831, pruned_loss=0.0578, over 1617105.15 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:15,958 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.515e+02 3.033e+02 3.634e+02 8.452e+02, threshold=6.066e+02, percent-clipped=4.0 +2023-02-07 12:36:18,821 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217916.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:36:45,567 INFO [train.py:901] (1/4) Epoch 27, batch 7800, loss[loss=0.2392, simple_loss=0.3124, pruned_loss=0.08299, over 8511.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.282, pruned_loss=0.05681, over 1617914.05 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:55,073 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:08,175 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7950, 2.6743, 1.8659, 2.3552, 2.2131, 1.7171, 2.1957, 2.4174], + device='cuda:1'), covar=tensor([0.1527, 0.0383, 0.1203, 0.0720, 0.0812, 0.1517, 0.1133, 0.0999], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0241, 0.0341, 0.0312, 0.0303, 0.0346, 0.0349, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-07 12:37:17,814 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4000, 1.4413, 1.3475, 1.8413, 0.7002, 1.2431, 1.3447, 1.4530], + device='cuda:1'), covar=tensor([0.0857, 0.0790, 0.0994, 0.0513, 0.1114, 0.1364, 0.0716, 0.0705], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0193, 0.0245, 0.0212, 0.0202, 0.0245, 0.0249, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-07 12:37:19,657 INFO [train.py:901] (1/4) Epoch 27, batch 7850, loss[loss=0.2168, simple_loss=0.2976, pruned_loss=0.06799, over 7525.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2826, pruned_loss=0.05742, over 1616971.05 frames. ], batch size: 71, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:37:24,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.277e+02 2.828e+02 3.912e+02 8.712e+02, threshold=5.655e+02, percent-clipped=7.0 +2023-02-07 12:37:33,506 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:52,845 INFO [train.py:901] (1/4) Epoch 27, batch 7900, loss[loss=0.1553, simple_loss=0.243, pruned_loss=0.03383, over 7926.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.05701, over 1612983.99 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:38:26,204 INFO [train.py:901] (1/4) Epoch 27, batch 7950, loss[loss=0.2137, simple_loss=0.3027, pruned_loss=0.0623, over 8334.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2821, pruned_loss=0.0575, over 1612794.86 frames. ], batch size: 26, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:38:31,700 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.553e+02 3.230e+02 4.059e+02 8.354e+02, threshold=6.459e+02, percent-clipped=5.0 +2023-02-07 12:38:35,248 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:37,406 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:50,509 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:53,612 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:58,211 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8262, 6.0254, 5.1958, 2.1805, 5.2848, 5.6439, 5.4834, 5.4146], + device='cuda:1'), covar=tensor([0.0486, 0.0382, 0.1042, 0.4830, 0.0759, 0.0938, 0.1037, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0462, 0.0447, 0.0557, 0.0442, 0.0464, 0.0440, 0.0407], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-07 12:38:59,474 INFO [train.py:901] (1/4) Epoch 27, batch 8000, loss[loss=0.2148, simple_loss=0.2972, pruned_loss=0.06616, over 8659.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.0573, over 1615890.20 frames. ], batch size: 34, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:29,224 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:32,352 INFO [train.py:901] (1/4) Epoch 27, batch 8050, loss[loss=0.173, simple_loss=0.2483, pruned_loss=0.04884, over 7428.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05688, over 1611211.90 frames. ], batch size: 17, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:38,065 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.285e+02 2.948e+02 3.498e+02 7.136e+02, threshold=5.897e+02, percent-clipped=2.0 +2023-02-07 12:39:46,230 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:48,249 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218228.0, num_to_drop=0, layers_to_drop=set() diff --git a/log/log-train-2023-02-05-17-58-35-2 b/log/log-train-2023-02-05-17-58-35-2 new file mode 100644 index 0000000000000000000000000000000000000000..009e68a70838b2665a97a2adb47ffe9942f6bb8f --- /dev/null +++ b/log/log-train-2023-02-05-17-58-35-2 @@ -0,0 +1,25111 @@ +2023-02-05 17:58:35,364 INFO [train.py:973] (2/4) Training started +2023-02-05 17:58:35,365 INFO [train.py:983] (2/4) Device: cuda:2 +2023-02-05 17:58:35,412 INFO [train.py:992] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n07', 'IP address': '10.1.7.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-05 17:58:35,412 INFO [train.py:994] (2/4) About to create model +2023-02-05 17:58:36,048 INFO [zipformer.py:402] (2/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-05 17:58:36,060 INFO [train.py:998] (2/4) Number of model parameters: 20697573 +2023-02-05 17:58:51,148 INFO [train.py:1013] (2/4) Using DDP +2023-02-05 17:58:51,427 INFO [asr_datamodule.py:420] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-05 17:58:52,644 INFO [asr_datamodule.py:224] (2/4) Enable MUSAN +2023-02-05 17:58:52,645 INFO [asr_datamodule.py:225] (2/4) About to get Musan cuts +2023-02-05 17:58:54,523 INFO [asr_datamodule.py:249] (2/4) Enable SpecAugment +2023-02-05 17:58:54,523 INFO [asr_datamodule.py:250] (2/4) Time warp factor: 80 +2023-02-05 17:58:54,523 INFO [asr_datamodule.py:260] (2/4) Num frame mask: 10 +2023-02-05 17:58:54,523 INFO [asr_datamodule.py:273] (2/4) About to create train dataset +2023-02-05 17:58:54,523 INFO [asr_datamodule.py:300] (2/4) Using DynamicBucketingSampler. +2023-02-05 17:58:54,545 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:58:57,031 INFO [asr_datamodule.py:316] (2/4) About to create train dataloader +2023-02-05 17:58:57,031 INFO [asr_datamodule.py:430] (2/4) About to get dev-clean cuts +2023-02-05 17:58:57,033 INFO [asr_datamodule.py:437] (2/4) About to get dev-other cuts +2023-02-05 17:58:57,033 INFO [asr_datamodule.py:347] (2/4) About to create dev dataset +2023-02-05 17:58:57,380 INFO [asr_datamodule.py:364] (2/4) About to create dev dataloader +2023-02-05 17:59:06,521 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:59:11,987 INFO [train.py:901] (2/4) Epoch 1, batch 0, loss[loss=7.164, simple_loss=6.476, pruned_loss=6.868, over 7655.00 frames. ], tot_loss[loss=7.164, simple_loss=6.476, pruned_loss=6.868, over 7655.00 frames. ], batch size: 19, lr: 2.50e-02, grad_scale: 2.0 +2023-02-05 17:59:11,988 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 17:59:24,181 INFO [train.py:935] (2/4) Epoch 1, validation: loss=6.888, simple_loss=6.229, pruned_loss=6.575, over 944034.00 frames. +2023-02-05 17:59:24,182 INFO [train.py:936] (2/4) Maximum memory allocated so far is 4557MB +2023-02-05 17:59:31,367 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=4.10 vs. limit=2.0 +2023-02-05 17:59:37,738 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 17:59:48,696 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=5.29 vs. limit=2.0 +2023-02-05 17:59:55,489 INFO [train.py:901] (2/4) Epoch 1, batch 50, loss[loss=1.453, simple_loss=1.287, pruned_loss=1.483, over 8366.00 frames. ], tot_loss[loss=2.149, simple_loss=1.943, pruned_loss=1.971, over 359040.72 frames. ], batch size: 24, lr: 2.75e-02, grad_scale: 0.25 +2023-02-05 17:59:56,162 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:00:11,289 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 18:00:13,743 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:28,699 INFO [train.py:901] (2/4) Epoch 1, batch 100, loss[loss=1.087, simple_loss=0.9315, pruned_loss=1.231, over 7800.00 frames. ], tot_loss[loss=1.64, simple_loss=1.46, pruned_loss=1.614, over 641797.83 frames. ], batch size: 19, lr: 3.00e-02, grad_scale: 0.0625 +2023-02-05 18:00:28,821 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:32,052 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 18:00:32,809 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.087e+01 6.689e+01 1.862e+02 6.030e+02 6.185e+04, threshold=3.723e+02, percent-clipped=0.0 +2023-02-05 18:01:00,488 INFO [train.py:901] (2/4) Epoch 1, batch 150, loss[loss=1.113, simple_loss=0.95, pruned_loss=1.183, over 8342.00 frames. ], tot_loss[loss=1.408, simple_loss=1.238, pruned_loss=1.438, over 861772.80 frames. ], batch size: 26, lr: 3.25e-02, grad_scale: 0.0625 +2023-02-05 18:01:14,169 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=44.96 vs. limit=5.0 +2023-02-05 18:01:34,365 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=6.87 vs. limit=2.0 +2023-02-05 18:01:34,586 INFO [train.py:901] (2/4) Epoch 1, batch 200, loss[loss=1.017, simple_loss=0.8697, pruned_loss=1.003, over 8367.00 frames. ], tot_loss[loss=1.266, simple_loss=1.103, pruned_loss=1.303, over 1028310.46 frames. ], batch size: 49, lr: 3.50e-02, grad_scale: 0.125 +2023-02-05 18:01:36,296 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=31.05 vs. limit=5.0 +2023-02-05 18:01:37,983 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.848e+01 5.119e+01 6.630e+01 8.708e+01 3.236e+02, threshold=1.326e+02, percent-clipped=1.0 +2023-02-05 18:01:39,357 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=4.37 vs. limit=2.0 +2023-02-05 18:01:40,247 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6301, 4.6300, 4.6300, 4.6301, 4.6301, 4.6301, 4.6301, 4.6301], + device='cuda:2'), covar=tensor([2.6490e-05, 2.1035e-05, 5.3192e-05, 4.7522e-05, 3.6533e-05, 4.2642e-05, + 2.0418e-05, 3.2286e-05], device='cuda:2'), in_proj_covar=tensor([0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014, 0.0014], + device='cuda:2'), out_proj_covar=tensor([9.3120e-06, 9.4909e-06, 9.3863e-06, 9.1082e-06, 9.5719e-06, 9.2246e-06, + 9.5521e-06, 9.3924e-06], device='cuda:2') +2023-02-05 18:02:05,438 INFO [train.py:901] (2/4) Epoch 1, batch 250, loss[loss=0.9433, simple_loss=0.7969, pruned_loss=0.9224, over 8583.00 frames. ], tot_loss[loss=1.179, simple_loss=1.019, pruned_loss=1.205, over 1158044.96 frames. ], batch size: 31, lr: 3.75e-02, grad_scale: 0.125 +2023-02-05 18:02:14,822 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 18:02:14,929 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9630, 2.9630, 2.9630, 2.9630, 2.9630, 2.9630, 2.9630, 2.9630], + device='cuda:2'), covar=tensor([3.2164e-05, 3.3923e-05, 5.4673e-05, 4.8642e-05, 3.3072e-05, 3.6856e-05, + 5.7300e-05, 3.8083e-05], device='cuda:2'), in_proj_covar=tensor([0.0013, 0.0013, 0.0013, 0.0014, 0.0013, 0.0013, 0.0014, 0.0013], + device='cuda:2'), out_proj_covar=tensor([9.1469e-06, 9.0843e-06, 9.1827e-06, 8.9071e-06, 9.1140e-06, 8.9794e-06, + 8.9708e-06, 8.8985e-06], device='cuda:2') +2023-02-05 18:02:17,543 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=8.68 vs. limit=2.0 +2023-02-05 18:02:22,941 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 18:02:37,911 INFO [train.py:901] (2/4) Epoch 1, batch 300, loss[loss=1.009, simple_loss=0.8393, pruned_loss=0.987, over 8287.00 frames. ], tot_loss[loss=1.12, simple_loss=0.9605, pruned_loss=1.135, over 1259241.78 frames. ], batch size: 23, lr: 4.00e-02, grad_scale: 0.25 +2023-02-05 18:02:42,331 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=306.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:02:42,690 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.041e+01 5.570e+01 7.201e+01 9.677e+01 1.807e+02, threshold=1.440e+02, percent-clipped=6.0 +2023-02-05 18:02:47,402 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=314.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:02:48,870 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-05 18:03:10,257 INFO [train.py:901] (2/4) Epoch 1, batch 350, loss[loss=1.078, simple_loss=0.8901, pruned_loss=1.031, over 8322.00 frames. ], tot_loss[loss=1.084, simple_loss=0.9222, pruned_loss=1.084, over 1344327.74 frames. ], batch size: 25, lr: 4.25e-02, grad_scale: 0.25 +2023-02-05 18:03:42,314 INFO [train.py:901] (2/4) Epoch 1, batch 400, loss[loss=0.8752, simple_loss=0.7162, pruned_loss=0.8232, over 7981.00 frames. ], tot_loss[loss=1.053, simple_loss=0.8887, pruned_loss=1.04, over 1402798.25 frames. ], batch size: 21, lr: 4.50e-02, grad_scale: 0.5 +2023-02-05 18:03:44,610 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=405.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:03:45,466 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 4.847e+01 5.714e+01 6.661e+01 8.261e+01 1.252e+02, threshold=1.332e+02, percent-clipped=0.0 +2023-02-05 18:03:55,288 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=421.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:04:11,519 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=445.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:04:15,508 INFO [train.py:901] (2/4) Epoch 1, batch 450, loss[loss=0.9806, simple_loss=0.8007, pruned_loss=0.8916, over 8511.00 frames. ], tot_loss[loss=1.03, simple_loss=0.8627, pruned_loss=1.002, over 1449653.42 frames. ], batch size: 28, lr: 4.75e-02, grad_scale: 0.5 +2023-02-05 18:04:36,885 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.64 vs. limit=5.0 +2023-02-05 18:04:45,722 INFO [train.py:901] (2/4) Epoch 1, batch 500, loss[loss=0.9982, simple_loss=0.8112, pruned_loss=0.8869, over 8352.00 frames. ], tot_loss[loss=1.012, simple_loss=0.8426, pruned_loss=0.9674, over 1487974.45 frames. ], batch size: 24, lr: 4.99e-02, grad_scale: 1.0 +2023-02-05 18:04:47,282 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=7.51 vs. limit=2.0 +2023-02-05 18:04:49,478 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.283e+01 6.268e+01 7.626e+01 9.977e+01 2.238e+02, threshold=1.525e+02, percent-clipped=10.0 +2023-02-05 18:05:16,926 INFO [train.py:901] (2/4) Epoch 1, batch 550, loss[loss=0.8864, simple_loss=0.726, pruned_loss=0.7485, over 8099.00 frames. ], tot_loss[loss=0.9934, simple_loss=0.8237, pruned_loss=0.9289, over 1514485.22 frames. ], batch size: 23, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:22,229 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=560.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:30,975 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=8.94 vs. limit=5.0 +2023-02-05 18:05:34,657 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=580.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:39,265 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=586.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:47,855 INFO [train.py:901] (2/4) Epoch 1, batch 600, loss[loss=0.8791, simple_loss=0.7289, pruned_loss=0.7012, over 8093.00 frames. ], tot_loss[loss=0.9781, simple_loss=0.8099, pruned_loss=0.8897, over 1534892.54 frames. ], batch size: 21, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:51,146 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 5.986e+01 8.101e+01 1.064e+02 1.512e+02 3.340e+02, threshold=2.128e+02, percent-clipped=22.0 +2023-02-05 18:05:51,944 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=608.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:57,565 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 18:06:15,545 INFO [train.py:901] (2/4) Epoch 1, batch 650, loss[loss=0.7856, simple_loss=0.656, pruned_loss=0.6015, over 7651.00 frames. ], tot_loss[loss=0.958, simple_loss=0.7941, pruned_loss=0.8449, over 1554479.62 frames. ], batch size: 19, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:16,980 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=5.07 vs. limit=2.0 +2023-02-05 18:06:20,641 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=658.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:06:25,234 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=8.69 vs. limit=5.0 +2023-02-05 18:06:31,053 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=677.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:06:35,971 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=2.62 vs. limit=2.0 +2023-02-05 18:06:44,409 INFO [train.py:901] (2/4) Epoch 1, batch 700, loss[loss=0.8922, simple_loss=0.7397, pruned_loss=0.6791, over 8091.00 frames. ], tot_loss[loss=0.9333, simple_loss=0.7755, pruned_loss=0.7976, over 1566963.43 frames. ], batch size: 21, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:45,066 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=702.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:48,201 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 2.109e+02 3.132e+02 4.412e+02 1.990e+03, threshold=6.264e+02, percent-clipped=73.0 +2023-02-05 18:07:14,475 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=749.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:07:15,369 INFO [train.py:901] (2/4) Epoch 1, batch 750, loss[loss=0.6782, simple_loss=0.5795, pruned_loss=0.4738, over 7427.00 frames. ], tot_loss[loss=0.9072, simple_loss=0.7563, pruned_loss=0.7508, over 1581831.75 frames. ], batch size: 17, lr: 4.97e-02, grad_scale: 1.0 +2023-02-05 18:07:25,623 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 18:07:26,856 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=773.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:32,325 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 18:07:43,624 INFO [train.py:901] (2/4) Epoch 1, batch 800, loss[loss=0.7198, simple_loss=0.6115, pruned_loss=0.5011, over 7689.00 frames. ], tot_loss[loss=0.8789, simple_loss=0.7356, pruned_loss=0.705, over 1588781.46 frames. ], batch size: 18, lr: 4.97e-02, grad_scale: 2.0 +2023-02-05 18:07:46,599 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.528e+02 3.354e+02 4.455e+02 1.086e+03, threshold=6.708e+02, percent-clipped=4.0 +2023-02-05 18:07:51,303 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=816.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:58,528 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2958, 1.3372, 1.0677, 1.2178, 1.1082, 1.0115, 1.1676, 1.2563], + device='cuda:2'), covar=tensor([0.6132, 0.7841, 0.9596, 0.7309, 0.9089, 0.9021, 0.7759, 0.9574], + device='cuda:2'), in_proj_covar=tensor([0.0058, 0.0067, 0.0073, 0.0067, 0.0073, 0.0072, 0.0066, 0.0077], + device='cuda:2'), out_proj_covar=tensor([4.1819e-05, 4.9480e-05, 4.9706e-05, 4.6487e-05, 4.9191e-05, 4.6920e-05, + 4.3869e-05, 5.3112e-05], device='cuda:2') +2023-02-05 18:08:05,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=841.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:11,180 INFO [train.py:901] (2/4) Epoch 1, batch 850, loss[loss=0.7833, simple_loss=0.675, pruned_loss=0.5215, over 8458.00 frames. ], tot_loss[loss=0.8563, simple_loss=0.7195, pruned_loss=0.6662, over 1594074.66 frames. ], batch size: 27, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:11,882 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7108, 1.0789, 2.0276, 1.5175, 1.2097, 1.1850, 1.1840, 1.7279], + device='cuda:2'), covar=tensor([1.0189, 3.1751, 0.8547, 1.0032, 1.8385, 2.0228, 2.1747, 0.8871], + device='cuda:2'), in_proj_covar=tensor([0.0044, 0.0060, 0.0043, 0.0044, 0.0054, 0.0067, 0.0067, 0.0040], + device='cuda:2'), out_proj_covar=tensor([2.5911e-05, 4.3421e-05, 2.4613e-05, 2.3377e-05, 3.1656e-05, 4.4629e-05, + 3.9660e-05, 2.2601e-05], device='cuda:2') +2023-02-05 18:08:22,426 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=864.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:22,899 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=865.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:42,859 INFO [train.py:901] (2/4) Epoch 1, batch 900, loss[loss=0.7653, simple_loss=0.6598, pruned_loss=0.5028, over 8528.00 frames. ], tot_loss[loss=0.8305, simple_loss=0.7009, pruned_loss=0.6277, over 1597467.70 frames. ], batch size: 28, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:46,407 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 3.070e+02 3.818e+02 4.702e+02 7.623e+02, threshold=7.636e+02, percent-clipped=5.0 +2023-02-05 18:08:55,931 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=924.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:58,989 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=930.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:09:10,090 INFO [train.py:901] (2/4) Epoch 1, batch 950, loss[loss=0.7806, simple_loss=0.6694, pruned_loss=0.512, over 8465.00 frames. ], tot_loss[loss=0.8084, simple_loss=0.685, pruned_loss=0.5945, over 1602868.80 frames. ], batch size: 27, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:09:10,749 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=952.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:26,428 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 18:09:36,685 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4710, 1.6700, 3.2811, 2.1683, 2.6066, 3.1310, 1.4641, 1.3278], + device='cuda:2'), covar=tensor([1.3165, 2.2754, 0.3017, 0.9729, 1.1421, 0.6313, 1.8099, 1.8365], + device='cuda:2'), in_proj_covar=tensor([0.0060, 0.0068, 0.0038, 0.0055, 0.0060, 0.0048, 0.0073, 0.0065], + device='cuda:2'), out_proj_covar=tensor([3.9192e-05, 4.7741e-05, 2.1334e-05, 3.4574e-05, 3.9925e-05, 2.8983e-05, + 4.7570e-05, 4.4730e-05], device='cuda:2') +2023-02-05 18:09:37,672 INFO [train.py:901] (2/4) Epoch 1, batch 1000, loss[loss=0.7542, simple_loss=0.6571, pruned_loss=0.4752, over 8355.00 frames. ], tot_loss[loss=0.7845, simple_loss=0.668, pruned_loss=0.5616, over 1606355.82 frames. ], batch size: 24, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:09:40,951 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 3.215e+02 4.159e+02 4.799e+02 1.770e+03, threshold=8.319e+02, percent-clipped=6.0 +2023-02-05 18:09:52,911 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1029.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:53,904 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 18:09:59,186 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1039.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:02,615 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1045.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:05,084 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 18:10:05,582 INFO [train.py:901] (2/4) Epoch 1, batch 1050, loss[loss=0.6874, simple_loss=0.5967, pruned_loss=0.432, over 8141.00 frames. ], tot_loss[loss=0.7644, simple_loss=0.6535, pruned_loss=0.5342, over 1602933.40 frames. ], batch size: 22, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:10:07,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1054.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:14,061 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1067.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:33,046 INFO [train.py:901] (2/4) Epoch 1, batch 1100, loss[loss=0.6044, simple_loss=0.5278, pruned_loss=0.3727, over 7536.00 frames. ], tot_loss[loss=0.7451, simple_loss=0.6398, pruned_loss=0.5085, over 1601890.91 frames. ], batch size: 18, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:10:36,093 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.463e+02 4.480e+02 5.452e+02 1.232e+03, threshold=8.959e+02, percent-clipped=3.0 +2023-02-05 18:10:43,739 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1120.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:56,880 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1145.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:59,912 INFO [train.py:901] (2/4) Epoch 1, batch 1150, loss[loss=0.703, simple_loss=0.6215, pruned_loss=0.4212, over 8499.00 frames. ], tot_loss[loss=0.7284, simple_loss=0.6281, pruned_loss=0.4863, over 1604852.76 frames. ], batch size: 29, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:11:01,584 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 18:11:11,048 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.07 vs. limit=2.0 +2023-02-05 18:11:11,758 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1171.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:11:27,723 INFO [train.py:901] (2/4) Epoch 1, batch 1200, loss[loss=0.6869, simple_loss=0.594, pruned_loss=0.4235, over 8497.00 frames. ], tot_loss[loss=0.7137, simple_loss=0.6181, pruned_loss=0.4667, over 1611427.83 frames. ], batch size: 26, lr: 4.93e-02, grad_scale: 4.0 +2023-02-05 18:11:30,964 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.424e+02 4.173e+02 5.178e+02 8.029e+02, threshold=8.346e+02, percent-clipped=0.0 +2023-02-05 18:11:32,133 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1209.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:11:45,904 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0807, 1.3445, 1.3680, 1.0871, 1.1982, 1.3888, 0.7115, 0.9102], + device='cuda:2'), covar=tensor([0.5114, 0.4500, 0.4305, 0.6412, 0.7333, 0.3541, 0.8490, 0.6987], + device='cuda:2'), in_proj_covar=tensor([0.0069, 0.0072, 0.0068, 0.0070, 0.0088, 0.0066, 0.0083, 0.0079], + device='cuda:2'), out_proj_covar=tensor([4.7009e-05, 4.7788e-05, 4.4192e-05, 4.6527e-05, 6.3388e-05, 3.9561e-05, + 5.5886e-05, 5.1384e-05], device='cuda:2') +2023-02-05 18:11:54,243 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9688, 1.3314, 2.5612, 1.3441, 1.8403, 1.8713, 1.1259, 1.5104], + device='cuda:2'), covar=tensor([1.3166, 1.7315, 0.3142, 0.9488, 1.1208, 0.6498, 1.2656, 1.3139], + device='cuda:2'), in_proj_covar=tensor([0.0069, 0.0074, 0.0043, 0.0061, 0.0073, 0.0055, 0.0078, 0.0077], + device='cuda:2'), out_proj_covar=tensor([4.6758e-05, 5.1502e-05, 2.4205e-05, 3.9120e-05, 5.0068e-05, 3.4794e-05, + 4.9914e-05, 5.3807e-05], device='cuda:2') +2023-02-05 18:11:56,782 INFO [train.py:901] (2/4) Epoch 1, batch 1250, loss[loss=0.6093, simple_loss=0.5432, pruned_loss=0.356, over 8250.00 frames. ], tot_loss[loss=0.7012, simple_loss=0.6093, pruned_loss=0.4505, over 1606753.31 frames. ], batch size: 22, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:21,163 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1295.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:24,272 INFO [train.py:901] (2/4) Epoch 1, batch 1300, loss[loss=0.6638, simple_loss=0.5914, pruned_loss=0.3862, over 7819.00 frames. ], tot_loss[loss=0.6901, simple_loss=0.602, pruned_loss=0.4355, over 1610130.10 frames. ], batch size: 20, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:24,479 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1301.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:27,427 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 3.917e+02 4.747e+02 6.152e+02 9.080e+02, threshold=9.493e+02, percent-clipped=1.0 +2023-02-05 18:12:30,301 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0625, 0.8300, 0.9138, 1.1842, 0.6064, 0.8213, 0.7952, 1.0749], + device='cuda:2'), covar=tensor([1.0489, 1.3925, 1.0122, 0.5149, 1.2366, 1.3617, 1.1965, 1.1750], + device='cuda:2'), in_proj_covar=tensor([0.0126, 0.0123, 0.0112, 0.0097, 0.0134, 0.0137, 0.0127, 0.0133], + device='cuda:2'), out_proj_covar=tensor([8.6420e-05, 8.6967e-05, 8.0697e-05, 5.8041e-05, 9.5128e-05, 9.3522e-05, + 9.0672e-05, 9.1660e-05], device='cuda:2') +2023-02-05 18:12:34,731 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1320.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:36,272 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1323.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:36,784 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1324.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:37,937 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1326.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:51,930 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1348.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:53,374 INFO [train.py:901] (2/4) Epoch 1, batch 1350, loss[loss=0.6161, simple_loss=0.5506, pruned_loss=0.3552, over 8078.00 frames. ], tot_loss[loss=0.6774, simple_loss=0.5934, pruned_loss=0.4206, over 1607357.92 frames. ], batch size: 21, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:22,445 INFO [train.py:901] (2/4) Epoch 1, batch 1400, loss[loss=0.6686, simple_loss=0.5952, pruned_loss=0.3859, over 8251.00 frames. ], tot_loss[loss=0.6652, simple_loss=0.5854, pruned_loss=0.4065, over 1612873.53 frames. ], batch size: 24, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:25,824 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.466e+02 4.520e+02 5.912e+02 1.396e+03, threshold=9.040e+02, percent-clipped=6.0 +2023-02-05 18:13:50,939 INFO [train.py:901] (2/4) Epoch 1, batch 1450, loss[loss=0.6044, simple_loss=0.5534, pruned_loss=0.3343, over 8616.00 frames. ], tot_loss[loss=0.6548, simple_loss=0.5783, pruned_loss=0.3947, over 1611459.68 frames. ], batch size: 39, lr: 4.90e-02, grad_scale: 4.0 +2023-02-05 18:13:51,590 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1452.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:13:54,963 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 18:14:03,117 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1149, 1.9115, 1.3579, 3.4861, 2.3641, 2.5654, 2.0757, 3.3724], + device='cuda:2'), covar=tensor([0.5288, 0.9685, 1.8939, 0.1409, 0.7151, 0.5328, 0.8154, 0.1918], + device='cuda:2'), in_proj_covar=tensor([0.0135, 0.0157, 0.0225, 0.0099, 0.0151, 0.0144, 0.0177, 0.0131], + device='cuda:2'), out_proj_covar=tensor([8.6278e-05, 1.1000e-04, 1.4765e-04, 6.2294e-05, 1.0497e-04, 8.9922e-05, + 1.1418e-04, 7.6896e-05], device='cuda:2') +2023-02-05 18:14:21,297 INFO [train.py:901] (2/4) Epoch 1, batch 1500, loss[loss=0.4871, simple_loss=0.4435, pruned_loss=0.2709, over 7430.00 frames. ], tot_loss[loss=0.6437, simple_loss=0.5706, pruned_loss=0.3832, over 1609638.73 frames. ], batch size: 17, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:14:22,376 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.12 vs. limit=2.0 +2023-02-05 18:14:24,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.084e+02 4.059e+02 4.884e+02 5.820e+02 1.191e+03, threshold=9.769e+02, percent-clipped=4.0 +2023-02-05 18:14:29,250 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1515.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:14:50,474 INFO [train.py:901] (2/4) Epoch 1, batch 1550, loss[loss=0.6331, simple_loss=0.5639, pruned_loss=0.3607, over 8312.00 frames. ], tot_loss[loss=0.6371, simple_loss=0.5659, pruned_loss=0.3754, over 1610204.72 frames. ], batch size: 25, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:15:08,649 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1580.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:10,861 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1584.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:20,750 INFO [train.py:901] (2/4) Epoch 1, batch 1600, loss[loss=0.6757, simple_loss=0.598, pruned_loss=0.3865, over 8615.00 frames. ], tot_loss[loss=0.6322, simple_loss=0.5626, pruned_loss=0.3692, over 1612563.73 frames. ], batch size: 39, lr: 4.88e-02, grad_scale: 8.0 +2023-02-05 18:15:23,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1605.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:24,958 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.844e+02 4.893e+02 6.465e+02 8.597e+02 2.177e+03, threshold=1.293e+03, percent-clipped=12.0 +2023-02-05 18:15:37,781 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1629.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:38,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1630.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:50,682 INFO [train.py:901] (2/4) Epoch 1, batch 1650, loss[loss=0.6347, simple_loss=0.5735, pruned_loss=0.3535, over 8191.00 frames. ], tot_loss[loss=0.6269, simple_loss=0.5598, pruned_loss=0.3625, over 1616981.38 frames. ], batch size: 23, lr: 4.87e-02, grad_scale: 8.0 +2023-02-05 18:15:54,304 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-05 18:16:13,744 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8345, 2.3321, 3.1848, 2.4130, 2.5359, 3.4083, 3.4891, 3.2347], + device='cuda:2'), covar=tensor([0.2115, 0.3258, 0.0435, 0.2043, 0.1620, 0.0400, 0.0288, 0.0547], + device='cuda:2'), in_proj_covar=tensor([0.0125, 0.0137, 0.0075, 0.0120, 0.0104, 0.0067, 0.0063, 0.0083], + device='cuda:2'), out_proj_covar=tensor([8.9322e-05, 1.0254e-04, 4.2933e-05, 7.8463e-05, 7.1411e-05, 3.8906e-05, + 3.5956e-05, 4.9323e-05], device='cuda:2') +2023-02-05 18:16:21,965 INFO [train.py:901] (2/4) Epoch 1, batch 1700, loss[loss=0.5989, simple_loss=0.5377, pruned_loss=0.3349, over 8352.00 frames. ], tot_loss[loss=0.6192, simple_loss=0.5555, pruned_loss=0.3543, over 1617660.71 frames. ], batch size: 24, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:16:25,354 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.633e+02 4.287e+02 5.230e+02 6.455e+02 2.107e+03, threshold=1.046e+03, percent-clipped=2.0 +2023-02-05 18:16:47,459 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-05 18:16:51,240 INFO [train.py:901] (2/4) Epoch 1, batch 1750, loss[loss=0.4947, simple_loss=0.4636, pruned_loss=0.2636, over 6788.00 frames. ], tot_loss[loss=0.6138, simple_loss=0.5527, pruned_loss=0.3481, over 1618518.39 frames. ], batch size: 15, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:17:11,148 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4938, 1.6440, 4.2318, 2.9136, 4.1564, 3.9189, 3.8618, 3.7726], + device='cuda:2'), covar=tensor([0.0173, 0.2616, 0.0230, 0.0627, 0.0264, 0.0217, 0.0242, 0.0368], + device='cuda:2'), in_proj_covar=tensor([0.0048, 0.0134, 0.0064, 0.0078, 0.0065, 0.0060, 0.0071, 0.0081], + device='cuda:2'), out_proj_covar=tensor([2.8501e-05, 8.5486e-05, 3.7921e-05, 5.1943e-05, 3.6545e-05, 3.4185e-05, + 4.2278e-05, 4.8152e-05], device='cuda:2') +2023-02-05 18:17:18,071 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1796.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:17:21,115 INFO [train.py:901] (2/4) Epoch 1, batch 1800, loss[loss=0.5591, simple_loss=0.5284, pruned_loss=0.295, over 8673.00 frames. ], tot_loss[loss=0.6039, simple_loss=0.5462, pruned_loss=0.3396, over 1615340.67 frames. ], batch size: 34, lr: 4.85e-02, grad_scale: 8.0 +2023-02-05 18:17:24,721 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.688e+02 4.554e+02 5.596e+02 6.733e+02 1.418e+03, threshold=1.119e+03, percent-clipped=4.0 +2023-02-05 18:17:52,119 INFO [train.py:901] (2/4) Epoch 1, batch 1850, loss[loss=0.5874, simple_loss=0.5335, pruned_loss=0.3224, over 8365.00 frames. ], tot_loss[loss=0.5973, simple_loss=0.5418, pruned_loss=0.3335, over 1616639.27 frames. ], batch size: 24, lr: 4.84e-02, grad_scale: 8.0 +2023-02-05 18:17:55,088 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1856.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:06,738 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1875.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:13,295 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1886.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:14,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:21,896 INFO [train.py:901] (2/4) Epoch 1, batch 1900, loss[loss=0.5729, simple_loss=0.5403, pruned_loss=0.3028, over 8454.00 frames. ], tot_loss[loss=0.5896, simple_loss=0.5373, pruned_loss=0.3267, over 1616792.05 frames. ], batch size: 25, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:25,476 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.326e+02 4.483e+02 5.242e+02 7.443e+02 2.270e+03, threshold=1.048e+03, percent-clipped=7.0 +2023-02-05 18:18:27,930 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1911.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:27,945 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1911.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:37,733 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:45,005 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 18:18:52,609 INFO [train.py:901] (2/4) Epoch 1, batch 1950, loss[loss=0.5976, simple_loss=0.5504, pruned_loss=0.3227, over 8328.00 frames. ], tot_loss[loss=0.5892, simple_loss=0.5371, pruned_loss=0.3252, over 1615701.81 frames. ], batch size: 25, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:55,539 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 18:19:05,749 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:19:11,336 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 18:19:12,085 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8047, 1.8299, 1.0994, 1.5557, 1.7272, 1.0125, 1.6067, 2.2253], + device='cuda:2'), covar=tensor([0.3789, 0.4028, 0.5655, 0.4000, 0.3799, 0.6380, 0.3388, 0.2639], + device='cuda:2'), in_proj_covar=tensor([0.0133, 0.0124, 0.0112, 0.0117, 0.0143, 0.0129, 0.0113, 0.0118], + device='cuda:2'), out_proj_covar=tensor([1.0037e-04, 9.0365e-05, 8.5071e-05, 8.8396e-05, 1.0665e-04, 9.4161e-05, + 8.7248e-05, 8.9818e-05], device='cuda:2') +2023-02-05 18:19:23,723 INFO [train.py:901] (2/4) Epoch 1, batch 2000, loss[loss=0.5661, simple_loss=0.5401, pruned_loss=0.2961, over 8619.00 frames. ], tot_loss[loss=0.5828, simple_loss=0.5334, pruned_loss=0.3197, over 1613653.06 frames. ], batch size: 31, lr: 4.82e-02, grad_scale: 8.0 +2023-02-05 18:19:27,549 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.734e+02 4.600e+02 5.655e+02 7.771e+02 1.691e+03, threshold=1.131e+03, percent-clipped=5.0 +2023-02-05 18:19:50,359 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2043.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:19:56,663 INFO [train.py:901] (2/4) Epoch 1, batch 2050, loss[loss=0.5672, simple_loss=0.5329, pruned_loss=0.3007, over 8286.00 frames. ], tot_loss[loss=0.5751, simple_loss=0.5295, pruned_loss=0.3132, over 1613646.71 frames. ], batch size: 23, lr: 4.81e-02, grad_scale: 8.0 +2023-02-05 18:20:17,890 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5776, 1.5264, 2.5126, 1.4633, 2.0870, 2.8933, 2.9682, 2.4555], + device='cuda:2'), covar=tensor([0.2911, 0.3557, 0.0492, 0.3073, 0.1418, 0.0328, 0.0307, 0.0454], + device='cuda:2'), in_proj_covar=tensor([0.0168, 0.0180, 0.0096, 0.0165, 0.0144, 0.0084, 0.0079, 0.0096], + device='cuda:2'), out_proj_covar=tensor([1.1817e-04, 1.2844e-04, 5.8833e-05, 1.1005e-04, 1.0260e-04, 5.3408e-05, + 4.6832e-05, 6.1049e-05], device='cuda:2') +2023-02-05 18:20:21,051 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2088.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:20:29,072 INFO [train.py:901] (2/4) Epoch 1, batch 2100, loss[loss=0.5522, simple_loss=0.5284, pruned_loss=0.288, over 8343.00 frames. ], tot_loss[loss=0.5707, simple_loss=0.5277, pruned_loss=0.3091, over 1611808.85 frames. ], batch size: 26, lr: 4.80e-02, grad_scale: 16.0 +2023-02-05 18:20:32,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.532e+02 4.654e+02 5.875e+02 8.240e+02 2.515e+03, threshold=1.175e+03, percent-clipped=11.0 +2023-02-05 18:21:01,648 INFO [train.py:901] (2/4) Epoch 1, batch 2150, loss[loss=0.4429, simple_loss=0.4299, pruned_loss=0.2279, over 7935.00 frames. ], tot_loss[loss=0.5608, simple_loss=0.5218, pruned_loss=0.3016, over 1611613.52 frames. ], batch size: 20, lr: 4.79e-02, grad_scale: 16.0 +2023-02-05 18:21:11,759 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2167.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:21:29,905 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2192.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:21:35,018 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:35,574 INFO [train.py:901] (2/4) Epoch 1, batch 2200, loss[loss=0.485, simple_loss=0.4651, pruned_loss=0.2525, over 7803.00 frames. ], tot_loss[loss=0.5565, simple_loss=0.5199, pruned_loss=0.2979, over 1615110.02 frames. ], batch size: 19, lr: 4.78e-02, grad_scale: 16.0 +2023-02-05 18:21:37,175 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-05 18:21:39,335 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.979e+02 3.885e+02 5.100e+02 6.280e+02 1.293e+03, threshold=1.020e+03, percent-clipped=3.0 +2023-02-05 18:21:46,995 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:55,782 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:22:07,870 INFO [train.py:901] (2/4) Epoch 1, batch 2250, loss[loss=0.5949, simple_loss=0.5616, pruned_loss=0.3141, over 8334.00 frames. ], tot_loss[loss=0.5519, simple_loss=0.5178, pruned_loss=0.294, over 1615207.91 frames. ], batch size: 25, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:41,040 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2299.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:22:42,091 INFO [train.py:901] (2/4) Epoch 1, batch 2300, loss[loss=0.4525, simple_loss=0.4323, pruned_loss=0.2364, over 7554.00 frames. ], tot_loss[loss=0.5448, simple_loss=0.5133, pruned_loss=0.289, over 1611272.47 frames. ], batch size: 18, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:45,952 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.442e+02 5.272e+02 6.513e+02 7.975e+02 1.884e+03, threshold=1.303e+03, percent-clipped=9.0 +2023-02-05 18:22:51,196 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2315.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:22:56,966 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2324.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:03,177 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2334.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:09,695 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2344.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:12,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2347.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:14,708 INFO [train.py:901] (2/4) Epoch 1, batch 2350, loss[loss=0.4633, simple_loss=0.4707, pruned_loss=0.2279, over 8330.00 frames. ], tot_loss[loss=0.5392, simple_loss=0.5102, pruned_loss=0.2847, over 1606326.26 frames. ], batch size: 25, lr: 4.76e-02, grad_scale: 16.0 +2023-02-05 18:23:19,250 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2358.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:21,088 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5141, 1.3793, 1.4629, 1.6497, 1.2357, 1.1624, 0.9843, 1.6211], + device='cuda:2'), covar=tensor([0.2647, 0.2618, 0.2100, 0.0985, 0.2899, 0.3164, 0.3620, 0.2316], + device='cuda:2'), in_proj_covar=tensor([0.0183, 0.0177, 0.0157, 0.0128, 0.0222, 0.0198, 0.0219, 0.0184], + device='cuda:2'), out_proj_covar=tensor([1.3583e-04, 1.3226e-04, 1.2538e-04, 8.9005e-05, 1.6174e-04, 1.4613e-04, + 1.6180e-04, 1.3970e-04], device='cuda:2') +2023-02-05 18:23:26,041 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2369.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:46,437 INFO [train.py:901] (2/4) Epoch 1, batch 2400, loss[loss=0.5104, simple_loss=0.4952, pruned_loss=0.2628, over 8526.00 frames. ], tot_loss[loss=0.5354, simple_loss=0.5085, pruned_loss=0.2816, over 1609826.72 frames. ], batch size: 28, lr: 4.75e-02, grad_scale: 16.0 +2023-02-05 18:23:50,348 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.591e+02 4.467e+02 5.905e+02 7.151e+02 1.301e+03, threshold=1.181e+03, percent-clipped=0.0 +2023-02-05 18:24:16,548 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-05 18:24:20,801 INFO [train.py:901] (2/4) Epoch 1, batch 2450, loss[loss=0.5304, simple_loss=0.5139, pruned_loss=0.2735, over 8132.00 frames. ], tot_loss[loss=0.5332, simple_loss=0.5079, pruned_loss=0.2797, over 1614520.76 frames. ], batch size: 22, lr: 4.74e-02, grad_scale: 16.0 +2023-02-05 18:24:52,767 INFO [train.py:901] (2/4) Epoch 1, batch 2500, loss[loss=0.6053, simple_loss=0.5438, pruned_loss=0.3334, over 6977.00 frames. ], tot_loss[loss=0.5277, simple_loss=0.5042, pruned_loss=0.2759, over 1611009.03 frames. ], batch size: 71, lr: 4.73e-02, grad_scale: 16.0 +2023-02-05 18:24:54,530 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 18:24:56,548 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.099e+02 5.238e+02 6.448e+02 8.237e+02 1.660e+03, threshold=1.290e+03, percent-clipped=6.0 +2023-02-05 18:24:58,276 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 18:25:25,592 INFO [train.py:901] (2/4) Epoch 1, batch 2550, loss[loss=0.4743, simple_loss=0.4834, pruned_loss=0.2326, over 7650.00 frames. ], tot_loss[loss=0.5252, simple_loss=0.5025, pruned_loss=0.2741, over 1614790.43 frames. ], batch size: 19, lr: 4.72e-02, grad_scale: 16.0 +2023-02-05 18:25:38,472 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2571.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:51,085 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2590.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:25:52,533 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.83 vs. limit=2.0 +2023-02-05 18:25:54,860 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2596.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:57,891 INFO [train.py:901] (2/4) Epoch 1, batch 2600, loss[loss=0.4654, simple_loss=0.4657, pruned_loss=0.2326, over 8237.00 frames. ], tot_loss[loss=0.5192, simple_loss=0.4989, pruned_loss=0.2699, over 1614168.11 frames. ], batch size: 22, lr: 4.71e-02, grad_scale: 16.0 +2023-02-05 18:25:59,372 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2603.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:01,609 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.388e+02 4.352e+02 5.534e+02 7.344e+02 1.370e+03, threshold=1.107e+03, percent-clipped=3.0 +2023-02-05 18:26:06,886 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2615.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:15,228 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2628.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:24,305 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3763, 2.1351, 4.0811, 4.1128, 2.7067, 1.2097, 1.9856, 2.8596], + device='cuda:2'), covar=tensor([0.3256, 0.2769, 0.0205, 0.0323, 0.1856, 0.2902, 0.2261, 0.1817], + device='cuda:2'), in_proj_covar=tensor([0.0167, 0.0129, 0.0061, 0.0079, 0.0145, 0.0135, 0.0133, 0.0147], + device='cuda:2'), out_proj_covar=tensor([1.0886e-04, 8.2293e-05, 3.5962e-05, 4.5528e-05, 8.9661e-05, 8.3390e-05, + 8.2727e-05, 8.8429e-05], device='cuda:2') +2023-02-05 18:26:31,161 INFO [train.py:901] (2/4) Epoch 1, batch 2650, loss[loss=0.4574, simple_loss=0.4768, pruned_loss=0.219, over 8199.00 frames. ], tot_loss[loss=0.515, simple_loss=0.4969, pruned_loss=0.2667, over 1613026.59 frames. ], batch size: 23, lr: 4.70e-02, grad_scale: 16.0 +2023-02-05 18:26:54,440 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 18:27:03,471 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-05 18:27:03,816 INFO [train.py:901] (2/4) Epoch 1, batch 2700, loss[loss=0.4226, simple_loss=0.4388, pruned_loss=0.2032, over 8079.00 frames. ], tot_loss[loss=0.513, simple_loss=0.496, pruned_loss=0.2651, over 1614713.32 frames. ], batch size: 21, lr: 4.69e-02, grad_scale: 16.0 +2023-02-05 18:27:04,568 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2702.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:27:05,224 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:27:08,306 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.214e+02 4.351e+02 5.311e+02 6.408e+02 1.471e+03, threshold=1.062e+03, percent-clipped=4.0 +2023-02-05 18:27:15,001 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6314, 1.7586, 1.5639, 1.7922, 1.6312, 1.7748, 1.3882, 1.8914], + device='cuda:2'), covar=tensor([0.1427, 0.1580, 0.2307, 0.0858, 0.2224, 0.1604, 0.2686, 0.1355], + device='cuda:2'), in_proj_covar=tensor([0.0107, 0.0103, 0.0139, 0.0084, 0.0122, 0.0104, 0.0147, 0.0109], + device='cuda:2'), out_proj_covar=tensor([7.5862e-05, 7.1819e-05, 9.3531e-05, 5.8992e-05, 8.7493e-05, 7.1684e-05, + 1.0255e-04, 7.2005e-05], device='cuda:2') +2023-02-05 18:27:37,286 INFO [train.py:901] (2/4) Epoch 1, batch 2750, loss[loss=0.5237, simple_loss=0.5009, pruned_loss=0.2733, over 7011.00 frames. ], tot_loss[loss=0.509, simple_loss=0.4941, pruned_loss=0.262, over 1614165.88 frames. ], batch size: 71, lr: 4.68e-02, grad_scale: 16.0 +2023-02-05 18:28:11,565 INFO [train.py:901] (2/4) Epoch 1, batch 2800, loss[loss=0.4534, simple_loss=0.4503, pruned_loss=0.2282, over 7649.00 frames. ], tot_loss[loss=0.5044, simple_loss=0.4912, pruned_loss=0.2589, over 1613126.36 frames. ], batch size: 19, lr: 4.67e-02, grad_scale: 16.0 +2023-02-05 18:28:15,265 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.916e+02 4.898e+02 6.530e+02 2.276e+03, threshold=9.797e+02, percent-clipped=2.0 +2023-02-05 18:28:21,891 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2817.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:28:38,539 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9241, 1.4245, 5.2981, 2.6250, 5.4342, 4.8987, 4.9499, 4.9941], + device='cuda:2'), covar=tensor([0.0156, 0.4279, 0.0189, 0.1101, 0.0238, 0.0200, 0.0365, 0.0296], + device='cuda:2'), in_proj_covar=tensor([0.0079, 0.0232, 0.0100, 0.0125, 0.0110, 0.0111, 0.0118, 0.0127], + device='cuda:2'), out_proj_covar=tensor([5.0212e-05, 1.4106e-04, 6.6818e-05, 8.4086e-05, 6.4305e-05, 6.4503e-05, + 7.3779e-05, 7.7152e-05], device='cuda:2') +2023-02-05 18:28:38,548 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2842.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:28:44,179 INFO [train.py:901] (2/4) Epoch 1, batch 2850, loss[loss=0.4516, simple_loss=0.4466, pruned_loss=0.2283, over 7818.00 frames. ], tot_loss[loss=0.5012, simple_loss=0.4891, pruned_loss=0.2567, over 1610956.00 frames. ], batch size: 20, lr: 4.66e-02, grad_scale: 16.0 +2023-02-05 18:29:18,784 INFO [train.py:901] (2/4) Epoch 1, batch 2900, loss[loss=0.3887, simple_loss=0.4083, pruned_loss=0.1845, over 7426.00 frames. ], tot_loss[loss=0.5003, simple_loss=0.4886, pruned_loss=0.256, over 1608796.63 frames. ], batch size: 17, lr: 4.65e-02, grad_scale: 16.0 +2023-02-05 18:29:22,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.417e+02 4.413e+02 5.664e+02 7.338e+02 1.737e+03, threshold=1.133e+03, percent-clipped=8.0 +2023-02-05 18:29:48,929 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 18:29:52,148 INFO [train.py:901] (2/4) Epoch 1, batch 2950, loss[loss=0.402, simple_loss=0.4211, pruned_loss=0.1915, over 7544.00 frames. ], tot_loss[loss=0.4975, simple_loss=0.4871, pruned_loss=0.254, over 1609439.06 frames. ], batch size: 18, lr: 4.64e-02, grad_scale: 16.0 +2023-02-05 18:29:54,925 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2955.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:30:25,896 INFO [train.py:901] (2/4) Epoch 1, batch 3000, loss[loss=0.4192, simple_loss=0.4393, pruned_loss=0.1995, over 8578.00 frames. ], tot_loss[loss=0.4961, simple_loss=0.4864, pruned_loss=0.2529, over 1613129.39 frames. ], batch size: 34, lr: 4.63e-02, grad_scale: 16.0 +2023-02-05 18:30:25,897 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 18:30:40,788 INFO [train.py:935] (2/4) Epoch 1, validation: loss=0.4518, simple_loss=0.5106, pruned_loss=0.1966, over 944034.00 frames. +2023-02-05 18:30:40,788 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6115MB +2023-02-05 18:30:44,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.692e+02 4.264e+02 5.642e+02 7.781e+02 1.743e+03, threshold=1.128e+03, percent-clipped=6.0 +2023-02-05 18:31:07,324 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3037.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:13,909 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3047.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:16,509 INFO [train.py:901] (2/4) Epoch 1, batch 3050, loss[loss=0.5479, simple_loss=0.5357, pruned_loss=0.28, over 8350.00 frames. ], tot_loss[loss=0.4946, simple_loss=0.486, pruned_loss=0.2516, over 1611625.04 frames. ], batch size: 26, lr: 4.62e-02, grad_scale: 16.0 +2023-02-05 18:31:30,841 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3073.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:47,505 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3098.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:31:49,292 INFO [train.py:901] (2/4) Epoch 1, batch 3100, loss[loss=0.5595, simple_loss=0.5355, pruned_loss=0.2918, over 8555.00 frames. ], tot_loss[loss=0.4982, simple_loss=0.4879, pruned_loss=0.2542, over 1613310.70 frames. ], batch size: 31, lr: 4.61e-02, grad_scale: 16.0 +2023-02-05 18:31:53,106 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.570e+02 4.257e+02 6.045e+02 8.311e+02 2.838e+03, threshold=1.209e+03, percent-clipped=13.0 +2023-02-05 18:32:24,455 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.85 vs. limit=2.0 +2023-02-05 18:32:24,769 INFO [train.py:901] (2/4) Epoch 1, batch 3150, loss[loss=0.5046, simple_loss=0.5037, pruned_loss=0.2527, over 8293.00 frames. ], tot_loss[loss=0.4984, simple_loss=0.4887, pruned_loss=0.2541, over 1615510.93 frames. ], batch size: 23, lr: 4.60e-02, grad_scale: 16.0 +2023-02-05 18:32:32,227 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3162.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:47,642 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:57,065 INFO [train.py:901] (2/4) Epoch 1, batch 3200, loss[loss=0.4646, simple_loss=0.4696, pruned_loss=0.2298, over 8256.00 frames. ], tot_loss[loss=0.4941, simple_loss=0.4859, pruned_loss=0.2512, over 1614507.72 frames. ], batch size: 24, lr: 4.59e-02, grad_scale: 16.0 +2023-02-05 18:33:00,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 4.232e+02 5.266e+02 6.948e+02 2.778e+03, threshold=1.053e+03, percent-clipped=2.0 +2023-02-05 18:33:32,107 INFO [train.py:901] (2/4) Epoch 1, batch 3250, loss[loss=0.5349, simple_loss=0.5155, pruned_loss=0.2772, over 8030.00 frames. ], tot_loss[loss=0.4945, simple_loss=0.4859, pruned_loss=0.2515, over 1614118.96 frames. ], batch size: 22, lr: 4.58e-02, grad_scale: 16.0 +2023-02-05 18:33:47,473 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1886, 1.0719, 3.0741, 1.3571, 2.7689, 2.5644, 2.6246, 2.5718], + device='cuda:2'), covar=tensor([0.0221, 0.3097, 0.0322, 0.1188, 0.0388, 0.0394, 0.0466, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0080, 0.0242, 0.0103, 0.0135, 0.0119, 0.0121, 0.0119, 0.0134], + device='cuda:2'), out_proj_covar=tensor([5.0069e-05, 1.4327e-04, 6.7418e-05, 9.0741e-05, 7.2105e-05, 7.2046e-05, + 7.5567e-05, 8.4699e-05], device='cuda:2') +2023-02-05 18:33:48,111 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9692, 4.4180, 3.5573, 1.3440, 3.3146, 3.7767, 3.7886, 3.2156], + device='cuda:2'), covar=tensor([0.1041, 0.0401, 0.0713, 0.3969, 0.0542, 0.0459, 0.0971, 0.0607], + device='cuda:2'), in_proj_covar=tensor([0.0158, 0.0121, 0.0143, 0.0200, 0.0110, 0.0092, 0.0144, 0.0103], + device='cuda:2'), out_proj_covar=tensor([1.1819e-04, 9.8272e-05, 9.5450e-05, 1.3694e-04, 7.4376e-05, 6.6610e-05, + 1.1293e-04, 7.1706e-05], device='cuda:2') +2023-02-05 18:33:57,953 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8476, 1.2865, 5.0142, 2.5378, 5.1985, 4.4977, 4.7893, 4.8133], + device='cuda:2'), covar=tensor([0.0107, 0.3652, 0.0187, 0.1026, 0.0181, 0.0194, 0.0301, 0.0245], + device='cuda:2'), in_proj_covar=tensor([0.0079, 0.0244, 0.0104, 0.0135, 0.0120, 0.0121, 0.0120, 0.0133], + device='cuda:2'), out_proj_covar=tensor([4.9538e-05, 1.4432e-04, 6.8518e-05, 9.1354e-05, 7.2619e-05, 7.2649e-05, + 7.5828e-05, 8.4223e-05], device='cuda:2') +2023-02-05 18:34:04,442 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3299.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:05,630 INFO [train.py:901] (2/4) Epoch 1, batch 3300, loss[loss=0.5965, simple_loss=0.5429, pruned_loss=0.325, over 7244.00 frames. ], tot_loss[loss=0.4943, simple_loss=0.4866, pruned_loss=0.251, over 1614843.89 frames. ], batch size: 71, lr: 4.57e-02, grad_scale: 16.0 +2023-02-05 18:34:05,853 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3301.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:06,488 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7524, 2.5058, 1.3749, 1.5842, 2.2580, 1.9431, 1.4862, 2.1616], + device='cuda:2'), covar=tensor([0.1887, 0.1329, 0.2801, 0.1327, 0.2240, 0.1473, 0.4175, 0.1536], + device='cuda:2'), in_proj_covar=tensor([0.0157, 0.0129, 0.0191, 0.0112, 0.0175, 0.0135, 0.0212, 0.0148], + device='cuda:2'), out_proj_covar=tensor([1.1224e-04, 9.2370e-05, 1.3281e-04, 8.3220e-05, 1.2822e-04, 9.8638e-05, + 1.4975e-04, 1.0509e-04], device='cuda:2') +2023-02-05 18:34:08,973 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3306.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:34:09,426 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 4.334e+02 5.638e+02 7.160e+02 2.697e+03, threshold=1.128e+03, percent-clipped=10.0 +2023-02-05 18:34:28,315 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.08 vs. limit=2.0 +2023-02-05 18:34:39,416 INFO [train.py:901] (2/4) Epoch 1, batch 3350, loss[loss=0.4783, simple_loss=0.4668, pruned_loss=0.2449, over 8040.00 frames. ], tot_loss[loss=0.4866, simple_loss=0.4811, pruned_loss=0.2461, over 1612398.56 frames. ], batch size: 20, lr: 4.56e-02, grad_scale: 16.0 +2023-02-05 18:35:01,941 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3381.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:14,988 INFO [train.py:901] (2/4) Epoch 1, batch 3400, loss[loss=0.5244, simple_loss=0.5159, pruned_loss=0.2665, over 8587.00 frames. ], tot_loss[loss=0.4845, simple_loss=0.48, pruned_loss=0.2445, over 1617446.77 frames. ], batch size: 31, lr: 4.55e-02, grad_scale: 16.0 +2023-02-05 18:35:19,028 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.486e+02 3.960e+02 5.068e+02 6.311e+02 1.481e+03, threshold=1.014e+03, percent-clipped=3.0 +2023-02-05 18:35:23,816 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:26,554 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3418.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:35:43,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3443.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:48,737 INFO [train.py:901] (2/4) Epoch 1, batch 3450, loss[loss=0.4272, simple_loss=0.4322, pruned_loss=0.2111, over 7808.00 frames. ], tot_loss[loss=0.4834, simple_loss=0.4792, pruned_loss=0.2438, over 1615659.69 frames. ], batch size: 20, lr: 4.54e-02, grad_scale: 16.0 +2023-02-05 18:36:21,031 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:36:24,211 INFO [train.py:901] (2/4) Epoch 1, batch 3500, loss[loss=0.3858, simple_loss=0.4045, pruned_loss=0.1835, over 7255.00 frames. ], tot_loss[loss=0.4827, simple_loss=0.4788, pruned_loss=0.2433, over 1615504.84 frames. ], batch size: 16, lr: 4.53e-02, grad_scale: 16.0 +2023-02-05 18:36:28,198 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.405e+02 5.773e+02 7.537e+02 2.537e+03, threshold=1.155e+03, percent-clipped=7.0 +2023-02-05 18:36:36,229 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 18:36:57,804 INFO [train.py:901] (2/4) Epoch 1, batch 3550, loss[loss=0.4734, simple_loss=0.4701, pruned_loss=0.2384, over 8249.00 frames. ], tot_loss[loss=0.4809, simple_loss=0.4775, pruned_loss=0.2421, over 1614737.37 frames. ], batch size: 24, lr: 4.51e-02, grad_scale: 16.0 +2023-02-05 18:37:02,106 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3557.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:37:07,150 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3564.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:37:19,181 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3582.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:37:22,793 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1284, 4.3868, 3.8645, 1.4882, 3.6094, 3.8880, 3.8940, 3.4229], + device='cuda:2'), covar=tensor([0.0711, 0.0307, 0.0542, 0.3263, 0.0400, 0.0415, 0.0712, 0.0484], + device='cuda:2'), in_proj_covar=tensor([0.0182, 0.0138, 0.0162, 0.0215, 0.0121, 0.0104, 0.0166, 0.0113], + device='cuda:2'), out_proj_covar=tensor([1.3900e-04, 1.0937e-04, 1.0719e-04, 1.4812e-04, 8.2449e-05, 7.5362e-05, + 1.2759e-04, 7.9803e-05], device='cuda:2') +2023-02-05 18:37:33,298 INFO [train.py:901] (2/4) Epoch 1, batch 3600, loss[loss=0.4789, simple_loss=0.4775, pruned_loss=0.2401, over 8259.00 frames. ], tot_loss[loss=0.4843, simple_loss=0.4791, pruned_loss=0.2447, over 1610019.17 frames. ], batch size: 24, lr: 4.50e-02, grad_scale: 16.0 +2023-02-05 18:37:33,625 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-05 18:37:37,961 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.853e+02 4.660e+02 6.337e+02 8.772e+02 4.832e+03, threshold=1.267e+03, percent-clipped=11.0 +2023-02-05 18:38:06,596 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3650.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:38:06,677 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4546, 1.6179, 1.0510, 1.5138, 1.5539, 1.3908, 1.3216, 2.1162], + device='cuda:2'), covar=tensor([0.1505, 0.1233, 0.2712, 0.0715, 0.1898, 0.1447, 0.2624, 0.0832], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0151, 0.0229, 0.0134, 0.0203, 0.0169, 0.0239, 0.0180], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 18:38:07,049 INFO [train.py:901] (2/4) Epoch 1, batch 3650, loss[loss=0.5549, simple_loss=0.5268, pruned_loss=0.2915, over 7972.00 frames. ], tot_loss[loss=0.4794, simple_loss=0.4759, pruned_loss=0.2415, over 1608574.46 frames. ], batch size: 21, lr: 4.49e-02, grad_scale: 16.0 +2023-02-05 18:38:19,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.7531, 1.0942, 1.2028, 1.0812, 0.8705, 1.1900, 0.2056, 0.8236], + device='cuda:2'), covar=tensor([0.0616, 0.0627, 0.0498, 0.0407, 0.0710, 0.0348, 0.1493, 0.0825], + device='cuda:2'), in_proj_covar=tensor([0.0129, 0.0125, 0.0106, 0.0112, 0.0125, 0.0095, 0.0162, 0.0127], + device='cuda:2'), out_proj_covar=tensor([9.2613e-05, 9.6194e-05, 7.6196e-05, 8.1479e-05, 9.3805e-05, 6.5106e-05, + 1.2417e-04, 1.0064e-04], device='cuda:2') +2023-02-05 18:38:19,628 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3670.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:36,829 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3694.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:38:37,586 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3695.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:40,432 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 18:38:41,136 INFO [train.py:901] (2/4) Epoch 1, batch 3700, loss[loss=0.4501, simple_loss=0.4558, pruned_loss=0.2222, over 7936.00 frames. ], tot_loss[loss=0.4834, simple_loss=0.4794, pruned_loss=0.2437, over 1616929.86 frames. ], batch size: 20, lr: 4.48e-02, grad_scale: 16.0 +2023-02-05 18:38:45,140 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.178e+02 4.586e+02 6.278e+02 1.050e+03 3.437e+03, threshold=1.256e+03, percent-clipped=14.0 +2023-02-05 18:39:17,454 INFO [train.py:901] (2/4) Epoch 1, batch 3750, loss[loss=0.5377, simple_loss=0.5322, pruned_loss=0.2716, over 8359.00 frames. ], tot_loss[loss=0.4828, simple_loss=0.4798, pruned_loss=0.2429, over 1621562.39 frames. ], batch size: 48, lr: 4.47e-02, grad_scale: 16.0 +2023-02-05 18:39:18,333 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3752.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:39:27,123 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3765.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:39:32,484 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 18:39:35,219 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3777.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:39:38,644 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9431, 2.0628, 1.8825, 3.0307, 1.9609, 1.7249, 2.0953, 2.0482], + device='cuda:2'), covar=tensor([0.1929, 0.1850, 0.1659, 0.0276, 0.2463, 0.1946, 0.2526, 0.1955], + device='cuda:2'), in_proj_covar=tensor([0.0248, 0.0256, 0.0242, 0.0156, 0.0314, 0.0290, 0.0339, 0.0248], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-05 18:39:51,681 INFO [train.py:901] (2/4) Epoch 1, batch 3800, loss[loss=0.4504, simple_loss=0.4454, pruned_loss=0.2277, over 8249.00 frames. ], tot_loss[loss=0.4788, simple_loss=0.4767, pruned_loss=0.2404, over 1618437.81 frames. ], batch size: 24, lr: 4.46e-02, grad_scale: 16.0 +2023-02-05 18:39:55,874 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.457e+02 5.389e+02 6.979e+02 9.091e+02 1.609e+03, threshold=1.396e+03, percent-clipped=5.0 +2023-02-05 18:40:24,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2844, 1.9301, 2.0272, 1.6997, 1.2884, 2.0726, 0.4958, 1.3136], + device='cuda:2'), covar=tensor([0.0846, 0.0547, 0.0359, 0.0552, 0.0812, 0.0387, 0.1791, 0.0926], + device='cuda:2'), in_proj_covar=tensor([0.0137, 0.0128, 0.0112, 0.0121, 0.0131, 0.0099, 0.0170, 0.0137], + device='cuda:2'), out_proj_covar=tensor([9.9139e-05, 9.9916e-05, 8.0945e-05, 8.7123e-05, 9.9756e-05, 6.8807e-05, + 1.3293e-04, 1.1100e-04], device='cuda:2') +2023-02-05 18:40:27,870 INFO [train.py:901] (2/4) Epoch 1, batch 3850, loss[loss=0.5425, simple_loss=0.5343, pruned_loss=0.2754, over 8562.00 frames. ], tot_loss[loss=0.4788, simple_loss=0.4767, pruned_loss=0.2404, over 1616838.12 frames. ], batch size: 39, lr: 4.45e-02, grad_scale: 16.0 +2023-02-05 18:40:40,587 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.86 vs. limit=2.0 +2023-02-05 18:40:46,554 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 18:40:56,729 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 18:41:00,999 INFO [train.py:901] (2/4) Epoch 1, batch 3900, loss[loss=0.4638, simple_loss=0.4652, pruned_loss=0.2312, over 8291.00 frames. ], tot_loss[loss=0.4779, simple_loss=0.4764, pruned_loss=0.2397, over 1621623.71 frames. ], batch size: 23, lr: 4.44e-02, grad_scale: 16.0 +2023-02-05 18:41:04,025 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-05 18:41:04,999 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.102e+02 5.552e+02 7.100e+02 9.321e+02 1.906e+03, threshold=1.420e+03, percent-clipped=2.0 +2023-02-05 18:41:05,743 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3908.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:41:29,951 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:41:35,339 INFO [train.py:901] (2/4) Epoch 1, batch 3950, loss[loss=0.5847, simple_loss=0.5569, pruned_loss=0.3062, over 7049.00 frames. ], tot_loss[loss=0.4754, simple_loss=0.4754, pruned_loss=0.2377, over 1623444.83 frames. ], batch size: 71, lr: 4.43e-02, grad_scale: 16.0 +2023-02-05 18:41:46,993 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7714, 2.2444, 2.3694, 2.4412, 1.9568, 1.0162, 2.1528, 2.1402], + device='cuda:2'), covar=tensor([0.1743, 0.0857, 0.0664, 0.0479, 0.1052, 0.1642, 0.0409, 0.0696], + device='cuda:2'), in_proj_covar=tensor([0.0130, 0.0088, 0.0066, 0.0077, 0.0094, 0.0106, 0.0107, 0.0104], + device='cuda:2'), out_proj_covar=tensor([7.8475e-05, 5.0052e-05, 3.6312e-05, 4.4707e-05, 5.5055e-05, 5.5944e-05, + 5.6340e-05, 5.6760e-05], device='cuda:2') +2023-02-05 18:42:10,919 INFO [train.py:901] (2/4) Epoch 1, batch 4000, loss[loss=0.3896, simple_loss=0.4098, pruned_loss=0.1847, over 7433.00 frames. ], tot_loss[loss=0.4748, simple_loss=0.4752, pruned_loss=0.2372, over 1624922.46 frames. ], batch size: 17, lr: 4.42e-02, grad_scale: 8.0 +2023-02-05 18:42:15,518 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.262e+02 4.572e+02 5.687e+02 7.371e+02 1.820e+03, threshold=1.137e+03, percent-clipped=4.0 +2023-02-05 18:42:15,728 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3684, 1.7238, 1.3901, 1.5564, 1.7652, 1.4237, 1.5775, 1.7915], + device='cuda:2'), covar=tensor([0.2668, 0.2743, 0.3060, 0.2734, 0.1785, 0.2991, 0.2204, 0.2055], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0257, 0.0240, 0.0248, 0.0256, 0.0238, 0.0250, 0.0243], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-05 18:42:24,563 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4021.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:25,855 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4023.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:36,382 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4038.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:42,709 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4046.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:46,085 INFO [train.py:901] (2/4) Epoch 1, batch 4050, loss[loss=0.4471, simple_loss=0.4619, pruned_loss=0.2161, over 8507.00 frames. ], tot_loss[loss=0.4758, simple_loss=0.4759, pruned_loss=0.2378, over 1621497.76 frames. ], batch size: 26, lr: 4.41e-02, grad_scale: 8.0 +2023-02-05 18:43:13,072 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-05 18:43:22,338 INFO [train.py:901] (2/4) Epoch 1, batch 4100, loss[loss=0.5816, simple_loss=0.556, pruned_loss=0.3035, over 8576.00 frames. ], tot_loss[loss=0.4746, simple_loss=0.4748, pruned_loss=0.2371, over 1620928.94 frames. ], batch size: 31, lr: 4.40e-02, grad_scale: 8.0 +2023-02-05 18:43:26,883 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.479e+02 4.889e+02 6.474e+02 8.616e+02 2.054e+03, threshold=1.295e+03, percent-clipped=5.0 +2023-02-05 18:43:46,473 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6873, 3.1273, 1.7038, 2.4843, 2.5502, 2.6278, 1.8212, 2.5440], + device='cuda:2'), covar=tensor([0.1561, 0.0758, 0.2325, 0.0839, 0.1700, 0.1557, 0.2943, 0.1257], + device='cuda:2'), in_proj_covar=tensor([0.0222, 0.0164, 0.0264, 0.0161, 0.0237, 0.0196, 0.0267, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 18:43:56,549 INFO [train.py:901] (2/4) Epoch 1, batch 4150, loss[loss=0.42, simple_loss=0.4486, pruned_loss=0.1957, over 8469.00 frames. ], tot_loss[loss=0.469, simple_loss=0.4714, pruned_loss=0.2333, over 1621056.17 frames. ], batch size: 25, lr: 4.39e-02, grad_scale: 8.0 +2023-02-05 18:43:58,155 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4153.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:44:02,263 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4159.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:44:33,532 INFO [train.py:901] (2/4) Epoch 1, batch 4200, loss[loss=0.5268, simple_loss=0.5115, pruned_loss=0.2711, over 7130.00 frames. ], tot_loss[loss=0.4671, simple_loss=0.4702, pruned_loss=0.232, over 1617783.58 frames. ], batch size: 73, lr: 4.38e-02, grad_scale: 8.0 +2023-02-05 18:44:38,312 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 4.057e+02 5.109e+02 6.409e+02 1.525e+03, threshold=1.022e+03, percent-clipped=2.0 +2023-02-05 18:44:44,325 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 18:44:50,725 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.70 vs. limit=5.0 +2023-02-05 18:45:05,002 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 18:45:07,075 INFO [train.py:901] (2/4) Epoch 1, batch 4250, loss[loss=0.4038, simple_loss=0.431, pruned_loss=0.1883, over 8467.00 frames. ], tot_loss[loss=0.4664, simple_loss=0.4693, pruned_loss=0.2318, over 1619991.30 frames. ], batch size: 25, lr: 4.36e-02, grad_scale: 8.0 +2023-02-05 18:45:14,147 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6325, 2.0766, 2.1410, 2.3865, 1.5522, 1.2192, 1.9985, 2.0212], + device='cuda:2'), covar=tensor([0.2286, 0.0981, 0.0631, 0.0528, 0.1029, 0.1546, 0.0938, 0.1047], + device='cuda:2'), in_proj_covar=tensor([0.0145, 0.0094, 0.0070, 0.0076, 0.0098, 0.0112, 0.0119, 0.0110], + device='cuda:2'), out_proj_covar=tensor([8.5845e-05, 5.3153e-05, 3.8652e-05, 4.3017e-05, 5.5241e-05, 6.1308e-05, + 6.6512e-05, 5.9167e-05], device='cuda:2') +2023-02-05 18:45:25,182 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5850, 2.1941, 1.6647, 1.8560, 1.9380, 1.9465, 2.5789, 2.6205], + device='cuda:2'), covar=tensor([0.2135, 0.2960, 0.2894, 0.2558, 0.2472, 0.2981, 0.2058, 0.1808], + device='cuda:2'), in_proj_covar=tensor([0.0257, 0.0262, 0.0243, 0.0253, 0.0265, 0.0242, 0.0258, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 18:45:26,606 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4279.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:33,325 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4288.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:42,872 INFO [train.py:901] (2/4) Epoch 1, batch 4300, loss[loss=0.5123, simple_loss=0.5103, pruned_loss=0.2571, over 8353.00 frames. ], tot_loss[loss=0.4652, simple_loss=0.4685, pruned_loss=0.2309, over 1619079.61 frames. ], batch size: 26, lr: 4.35e-02, grad_scale: 8.0 +2023-02-05 18:45:45,725 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4304.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:45:47,020 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4306.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:48,889 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.647e+02 4.666e+02 6.207e+02 8.078e+02 1.600e+03, threshold=1.241e+03, percent-clipped=6.0 +2023-02-05 18:46:03,750 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 18:46:08,342 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1891, 1.6788, 3.3003, 1.0019, 2.0098, 1.7250, 1.1207, 1.9478], + device='cuda:2'), covar=tensor([0.1404, 0.1484, 0.0236, 0.1544, 0.1244, 0.1952, 0.1481, 0.1189], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0207, 0.0149, 0.0206, 0.0243, 0.0268, 0.0206, 0.0236], + device='cuda:2'), out_proj_covar=tensor([1.3432e-04, 1.4800e-04, 9.7854e-05, 1.3998e-04, 1.5791e-04, 1.9178e-04, + 1.3279e-04, 1.5358e-04], device='cuda:2') +2023-02-05 18:46:18,301 INFO [train.py:901] (2/4) Epoch 1, batch 4350, loss[loss=0.4664, simple_loss=0.465, pruned_loss=0.2339, over 7797.00 frames. ], tot_loss[loss=0.4629, simple_loss=0.4667, pruned_loss=0.2296, over 1612849.84 frames. ], batch size: 20, lr: 4.34e-02, grad_scale: 8.0 +2023-02-05 18:46:37,371 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 18:46:52,944 INFO [train.py:901] (2/4) Epoch 1, batch 4400, loss[loss=0.3859, simple_loss=0.41, pruned_loss=0.1809, over 7525.00 frames. ], tot_loss[loss=0.4592, simple_loss=0.4639, pruned_loss=0.2272, over 1607430.15 frames. ], batch size: 18, lr: 4.33e-02, grad_scale: 8.0 +2023-02-05 18:46:54,533 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:46:57,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 4.338e+02 5.789e+02 7.262e+02 1.136e+03, threshold=1.158e+03, percent-clipped=0.0 +2023-02-05 18:46:58,934 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4409.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:47:18,600 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4434.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:47:21,208 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 18:47:25,469 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1282, 1.4098, 1.5980, 1.6412, 1.4894, 1.1588, 1.3984, 1.5585], + device='cuda:2'), covar=tensor([0.2598, 0.1050, 0.0686, 0.0750, 0.0957, 0.1518, 0.1185, 0.0952], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0107, 0.0078, 0.0088, 0.0112, 0.0125, 0.0131, 0.0127], + device='cuda:2'), out_proj_covar=tensor([1.0117e-04, 6.1551e-05, 4.2938e-05, 5.0339e-05, 6.2559e-05, 6.8881e-05, + 7.3920e-05, 6.8342e-05], device='cuda:2') +2023-02-05 18:47:29,979 INFO [train.py:901] (2/4) Epoch 1, batch 4450, loss[loss=0.3938, simple_loss=0.4048, pruned_loss=0.1914, over 7224.00 frames. ], tot_loss[loss=0.4584, simple_loss=0.4635, pruned_loss=0.2267, over 1609859.49 frames. ], batch size: 16, lr: 4.32e-02, grad_scale: 8.0 +2023-02-05 18:48:04,125 INFO [train.py:901] (2/4) Epoch 1, batch 4500, loss[loss=0.4435, simple_loss=0.4654, pruned_loss=0.2108, over 8246.00 frames. ], tot_loss[loss=0.4567, simple_loss=0.4621, pruned_loss=0.2256, over 1609661.73 frames. ], batch size: 24, lr: 4.31e-02, grad_scale: 8.0 +2023-02-05 18:48:04,970 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5872, 1.3709, 2.8554, 1.5395, 2.1410, 3.2303, 3.0966, 2.8541], + device='cuda:2'), covar=tensor([0.2125, 0.2420, 0.0384, 0.2470, 0.1095, 0.0253, 0.0328, 0.0441], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0248, 0.0135, 0.0238, 0.0181, 0.0103, 0.0104, 0.0149], + device='cuda:2'), out_proj_covar=tensor([1.8068e-04, 1.9319e-04, 1.2062e-04, 1.7884e-04, 1.6182e-04, 8.4974e-05, + 9.4653e-05, 1.2548e-04], device='cuda:2') +2023-02-05 18:48:05,597 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4503.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:48:09,057 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.251e+02 4.383e+02 5.863e+02 8.313e+02 2.632e+03, threshold=1.173e+03, percent-clipped=9.0 +2023-02-05 18:48:15,323 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 18:48:41,811 INFO [train.py:901] (2/4) Epoch 1, batch 4550, loss[loss=0.4245, simple_loss=0.444, pruned_loss=0.2025, over 8790.00 frames. ], tot_loss[loss=0.4516, simple_loss=0.4592, pruned_loss=0.222, over 1613261.10 frames. ], batch size: 32, lr: 4.30e-02, grad_scale: 8.0 +2023-02-05 18:48:47,550 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0412, 2.0814, 1.5364, 1.4062, 1.8910, 1.7400, 2.0376, 2.3106], + device='cuda:2'), covar=tensor([0.2060, 0.2534, 0.2591, 0.2696, 0.1893, 0.2412, 0.1968, 0.1603], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0281, 0.0261, 0.0267, 0.0279, 0.0254, 0.0263, 0.0259], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 18:48:58,681 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0022, 1.3988, 3.9020, 2.1643, 3.5868, 3.2659, 3.3540, 3.3552], + device='cuda:2'), covar=tensor([0.0146, 0.3026, 0.0200, 0.1056, 0.0303, 0.0280, 0.0280, 0.0313], + device='cuda:2'), in_proj_covar=tensor([0.0103, 0.0292, 0.0134, 0.0168, 0.0149, 0.0149, 0.0144, 0.0163], + device='cuda:2'), out_proj_covar=tensor([6.5236e-05, 1.6856e-04, 8.5137e-05, 1.1351e-04, 8.8889e-05, 9.3388e-05, + 9.1333e-05, 1.0776e-04], device='cuda:2') +2023-02-05 18:49:16,696 INFO [train.py:901] (2/4) Epoch 1, batch 4600, loss[loss=0.5059, simple_loss=0.4858, pruned_loss=0.263, over 7981.00 frames. ], tot_loss[loss=0.4536, simple_loss=0.4602, pruned_loss=0.2235, over 1612171.89 frames. ], batch size: 21, lr: 4.29e-02, grad_scale: 8.0 +2023-02-05 18:49:21,483 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.983e+02 5.037e+02 6.922e+02 1.236e+03, threshold=1.007e+03, percent-clipped=2.0 +2023-02-05 18:49:28,473 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4618.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:51,612 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4650.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:52,226 INFO [train.py:901] (2/4) Epoch 1, batch 4650, loss[loss=0.4739, simple_loss=0.481, pruned_loss=0.2334, over 8440.00 frames. ], tot_loss[loss=0.4523, simple_loss=0.4591, pruned_loss=0.2228, over 1609879.06 frames. ], batch size: 27, lr: 4.28e-02, grad_scale: 8.0 +2023-02-05 18:49:59,118 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4659.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:16,195 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4684.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:27,574 INFO [train.py:901] (2/4) Epoch 1, batch 4700, loss[loss=0.4776, simple_loss=0.496, pruned_loss=0.2296, over 8450.00 frames. ], tot_loss[loss=0.4482, simple_loss=0.4568, pruned_loss=0.2199, over 1613615.04 frames. ], batch size: 27, lr: 4.27e-02, grad_scale: 8.0 +2023-02-05 18:50:32,365 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.254e+02 4.576e+02 5.443e+02 6.674e+02 1.320e+03, threshold=1.089e+03, percent-clipped=4.0 +2023-02-05 18:51:01,881 INFO [train.py:901] (2/4) Epoch 1, batch 4750, loss[loss=0.3784, simple_loss=0.3913, pruned_loss=0.1827, over 7682.00 frames. ], tot_loss[loss=0.4452, simple_loss=0.4549, pruned_loss=0.2177, over 1619036.15 frames. ], batch size: 18, lr: 4.26e-02, grad_scale: 8.0 +2023-02-05 18:51:12,268 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4765.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:51:21,687 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 18:51:23,819 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 18:51:37,814 INFO [train.py:901] (2/4) Epoch 1, batch 4800, loss[loss=0.4089, simple_loss=0.4212, pruned_loss=0.1983, over 7259.00 frames. ], tot_loss[loss=0.4457, simple_loss=0.4551, pruned_loss=0.2181, over 1620927.19 frames. ], batch size: 16, lr: 4.25e-02, grad_scale: 8.0 +2023-02-05 18:51:42,622 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.690e+02 4.367e+02 5.327e+02 7.244e+02 1.939e+03, threshold=1.065e+03, percent-clipped=6.0 +2023-02-05 18:52:11,409 INFO [train.py:901] (2/4) Epoch 1, batch 4850, loss[loss=0.4086, simple_loss=0.4257, pruned_loss=0.1958, over 7965.00 frames. ], tot_loss[loss=0.4465, simple_loss=0.4554, pruned_loss=0.2188, over 1619949.48 frames. ], batch size: 21, lr: 4.24e-02, grad_scale: 8.0 +2023-02-05 18:52:13,503 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 18:52:18,628 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 18:52:27,489 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4874.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:47,400 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4899.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:48,546 INFO [train.py:901] (2/4) Epoch 1, batch 4900, loss[loss=0.453, simple_loss=0.4688, pruned_loss=0.2186, over 8751.00 frames. ], tot_loss[loss=0.4437, simple_loss=0.4537, pruned_loss=0.2168, over 1622333.13 frames. ], batch size: 39, lr: 4.23e-02, grad_scale: 8.0 +2023-02-05 18:52:53,380 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.332e+02 4.394e+02 5.447e+02 6.722e+02 1.310e+03, threshold=1.089e+03, percent-clipped=5.0 +2023-02-05 18:53:22,702 INFO [train.py:901] (2/4) Epoch 1, batch 4950, loss[loss=0.5518, simple_loss=0.5064, pruned_loss=0.2986, over 7232.00 frames. ], tot_loss[loss=0.4416, simple_loss=0.4519, pruned_loss=0.2156, over 1618576.08 frames. ], batch size: 16, lr: 4.21e-02, grad_scale: 8.0 +2023-02-05 18:53:59,108 INFO [train.py:901] (2/4) Epoch 1, batch 5000, loss[loss=0.4609, simple_loss=0.4695, pruned_loss=0.2262, over 8507.00 frames. ], tot_loss[loss=0.4419, simple_loss=0.4521, pruned_loss=0.2158, over 1617287.07 frames. ], batch size: 26, lr: 4.20e-02, grad_scale: 8.0 +2023-02-05 18:54:04,640 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.658e+02 4.358e+02 5.438e+02 7.182e+02 1.797e+03, threshold=1.088e+03, percent-clipped=3.0 +2023-02-05 18:54:13,645 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5021.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:30,644 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:33,890 INFO [train.py:901] (2/4) Epoch 1, batch 5050, loss[loss=0.4102, simple_loss=0.4373, pruned_loss=0.1916, over 8506.00 frames. ], tot_loss[loss=0.4387, simple_loss=0.4497, pruned_loss=0.2139, over 1612316.44 frames. ], batch size: 26, lr: 4.19e-02, grad_scale: 8.0 +2023-02-05 18:54:50,690 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 18:55:08,921 INFO [train.py:901] (2/4) Epoch 1, batch 5100, loss[loss=0.4272, simple_loss=0.4203, pruned_loss=0.2171, over 7791.00 frames. ], tot_loss[loss=0.4396, simple_loss=0.4504, pruned_loss=0.2144, over 1609015.81 frames. ], batch size: 19, lr: 4.18e-02, grad_scale: 8.0 +2023-02-05 18:55:13,360 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.82 vs. limit=2.0 +2023-02-05 18:55:13,597 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.507e+02 4.431e+02 5.257e+02 6.582e+02 1.311e+03, threshold=1.051e+03, percent-clipped=2.0 +2023-02-05 18:55:45,841 INFO [train.py:901] (2/4) Epoch 1, batch 5150, loss[loss=0.4781, simple_loss=0.4955, pruned_loss=0.2304, over 8444.00 frames. ], tot_loss[loss=0.4422, simple_loss=0.4524, pruned_loss=0.216, over 1609892.01 frames. ], batch size: 27, lr: 4.17e-02, grad_scale: 8.0 +2023-02-05 18:56:19,014 INFO [train.py:901] (2/4) Epoch 1, batch 5200, loss[loss=0.4503, simple_loss=0.4585, pruned_loss=0.221, over 8036.00 frames. ], tot_loss[loss=0.4417, simple_loss=0.4517, pruned_loss=0.2159, over 1606498.06 frames. ], batch size: 22, lr: 4.16e-02, grad_scale: 8.0 +2023-02-05 18:56:23,576 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.039e+02 3.937e+02 5.264e+02 6.479e+02 1.558e+03, threshold=1.053e+03, percent-clipped=7.0 +2023-02-05 18:56:51,633 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 18:56:55,106 INFO [train.py:901] (2/4) Epoch 1, batch 5250, loss[loss=0.448, simple_loss=0.4418, pruned_loss=0.227, over 6008.00 frames. ], tot_loss[loss=0.4389, simple_loss=0.45, pruned_loss=0.2139, over 1605941.76 frames. ], batch size: 13, lr: 4.15e-02, grad_scale: 8.0 +2023-02-05 18:57:28,852 INFO [train.py:901] (2/4) Epoch 1, batch 5300, loss[loss=0.4444, simple_loss=0.463, pruned_loss=0.2129, over 8486.00 frames. ], tot_loss[loss=0.4413, simple_loss=0.4517, pruned_loss=0.2155, over 1605390.83 frames. ], batch size: 29, lr: 4.14e-02, grad_scale: 8.0 +2023-02-05 18:57:33,640 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.076e+02 4.278e+02 4.955e+02 6.641e+02 1.586e+03, threshold=9.909e+02, percent-clipped=4.0 +2023-02-05 18:57:34,876 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-05 18:58:04,007 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.13 vs. limit=5.0 +2023-02-05 18:58:04,342 INFO [train.py:901] (2/4) Epoch 1, batch 5350, loss[loss=0.4835, simple_loss=0.4571, pruned_loss=0.255, over 7693.00 frames. ], tot_loss[loss=0.4428, simple_loss=0.4528, pruned_loss=0.2165, over 1606023.76 frames. ], batch size: 18, lr: 4.13e-02, grad_scale: 8.0 +2023-02-05 18:58:13,884 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7144, 1.8607, 1.8519, 2.7527, 1.6128, 1.2182, 1.7633, 2.0003], + device='cuda:2'), covar=tensor([0.1380, 0.1511, 0.1216, 0.0227, 0.1907, 0.2243, 0.1868, 0.1201], + device='cuda:2'), in_proj_covar=tensor([0.0278, 0.0298, 0.0281, 0.0167, 0.0334, 0.0321, 0.0373, 0.0278], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 18:58:39,818 INFO [train.py:901] (2/4) Epoch 1, batch 5400, loss[loss=0.4673, simple_loss=0.4834, pruned_loss=0.2256, over 8351.00 frames. ], tot_loss[loss=0.4401, simple_loss=0.4506, pruned_loss=0.2148, over 1611284.98 frames. ], batch size: 24, lr: 4.12e-02, grad_scale: 8.0 +2023-02-05 18:58:44,298 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.977e+02 4.515e+02 5.788e+02 7.308e+02 1.362e+03, threshold=1.158e+03, percent-clipped=5.0 +2023-02-05 18:59:13,401 INFO [train.py:901] (2/4) Epoch 1, batch 5450, loss[loss=0.4047, simple_loss=0.4344, pruned_loss=0.1875, over 8248.00 frames. ], tot_loss[loss=0.4396, simple_loss=0.4502, pruned_loss=0.2145, over 1612048.72 frames. ], batch size: 22, lr: 4.11e-02, grad_scale: 8.0 +2023-02-05 18:59:41,792 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 18:59:49,949 INFO [train.py:901] (2/4) Epoch 1, batch 5500, loss[loss=0.4907, simple_loss=0.4968, pruned_loss=0.2423, over 8504.00 frames. ], tot_loss[loss=0.4371, simple_loss=0.4487, pruned_loss=0.2127, over 1614030.06 frames. ], batch size: 28, lr: 4.10e-02, grad_scale: 8.0 +2023-02-05 18:59:54,517 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.397e+02 4.451e+02 5.295e+02 6.340e+02 1.239e+03, threshold=1.059e+03, percent-clipped=2.0 +2023-02-05 19:00:23,628 INFO [train.py:901] (2/4) Epoch 1, batch 5550, loss[loss=0.4758, simple_loss=0.4721, pruned_loss=0.2398, over 8123.00 frames. ], tot_loss[loss=0.4376, simple_loss=0.4493, pruned_loss=0.213, over 1617561.77 frames. ], batch size: 22, lr: 4.09e-02, grad_scale: 8.0 +2023-02-05 19:01:00,923 INFO [train.py:901] (2/4) Epoch 1, batch 5600, loss[loss=0.4322, simple_loss=0.4642, pruned_loss=0.2001, over 8455.00 frames. ], tot_loss[loss=0.4337, simple_loss=0.4466, pruned_loss=0.2104, over 1611139.69 frames. ], batch size: 27, lr: 4.08e-02, grad_scale: 8.0 +2023-02-05 19:01:05,771 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 3.916e+02 5.301e+02 6.582e+02 1.340e+03, threshold=1.060e+03, percent-clipped=3.0 +2023-02-05 19:01:34,541 INFO [train.py:901] (2/4) Epoch 1, batch 5650, loss[loss=0.3311, simple_loss=0.3582, pruned_loss=0.152, over 7685.00 frames. ], tot_loss[loss=0.433, simple_loss=0.446, pruned_loss=0.21, over 1608537.34 frames. ], batch size: 18, lr: 4.07e-02, grad_scale: 8.0 +2023-02-05 19:01:35,630 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 19:01:45,693 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 19:01:45,826 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5668.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:02:09,329 INFO [train.py:901] (2/4) Epoch 1, batch 5700, loss[loss=0.4667, simple_loss=0.4804, pruned_loss=0.2265, over 8489.00 frames. ], tot_loss[loss=0.4358, simple_loss=0.4479, pruned_loss=0.2119, over 1611451.45 frames. ], batch size: 28, lr: 4.06e-02, grad_scale: 8.0 +2023-02-05 19:02:15,264 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.140e+02 4.740e+02 5.744e+02 8.008e+02 1.790e+03, threshold=1.149e+03, percent-clipped=10.0 +2023-02-05 19:02:44,478 INFO [train.py:901] (2/4) Epoch 1, batch 5750, loss[loss=0.3805, simple_loss=0.4061, pruned_loss=0.1774, over 7780.00 frames. ], tot_loss[loss=0.4316, simple_loss=0.4449, pruned_loss=0.2091, over 1608981.46 frames. ], batch size: 19, lr: 4.05e-02, grad_scale: 8.0 +2023-02-05 19:02:51,395 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 19:02:51,839 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 19:02:54,503 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 19:02:59,859 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5773.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:03:19,619 INFO [train.py:901] (2/4) Epoch 1, batch 5800, loss[loss=0.4244, simple_loss=0.4386, pruned_loss=0.2051, over 8086.00 frames. ], tot_loss[loss=0.4293, simple_loss=0.4432, pruned_loss=0.2077, over 1605198.22 frames. ], batch size: 21, lr: 4.04e-02, grad_scale: 8.0 +2023-02-05 19:03:24,537 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.671e+02 4.595e+02 5.667e+02 1.405e+03, threshold=9.190e+02, percent-clipped=2.0 +2023-02-05 19:03:55,603 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-05 19:03:57,239 INFO [train.py:901] (2/4) Epoch 1, batch 5850, loss[loss=0.4226, simple_loss=0.4086, pruned_loss=0.2183, over 7540.00 frames. ], tot_loss[loss=0.4303, simple_loss=0.4438, pruned_loss=0.2084, over 1606018.02 frames. ], batch size: 18, lr: 4.03e-02, grad_scale: 8.0 +2023-02-05 19:04:15,201 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:04:32,485 INFO [train.py:901] (2/4) Epoch 1, batch 5900, loss[loss=0.4258, simple_loss=0.4263, pruned_loss=0.2126, over 7652.00 frames. ], tot_loss[loss=0.4282, simple_loss=0.4424, pruned_loss=0.2069, over 1607849.64 frames. ], batch size: 19, lr: 4.02e-02, grad_scale: 8.0 +2023-02-05 19:04:37,231 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 3.095e+02 4.155e+02 5.559e+02 6.668e+02 2.372e+03, threshold=1.112e+03, percent-clipped=6.0 +2023-02-05 19:04:54,075 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 19:05:09,347 INFO [train.py:901] (2/4) Epoch 1, batch 5950, loss[loss=0.4502, simple_loss=0.4651, pruned_loss=0.2177, over 8255.00 frames. ], tot_loss[loss=0.4284, simple_loss=0.4424, pruned_loss=0.2072, over 1605632.77 frames. ], batch size: 24, lr: 4.01e-02, grad_scale: 8.0 +2023-02-05 19:05:44,547 INFO [train.py:901] (2/4) Epoch 1, batch 6000, loss[loss=0.3853, simple_loss=0.414, pruned_loss=0.1783, over 8484.00 frames. ], tot_loss[loss=0.4286, simple_loss=0.4426, pruned_loss=0.2073, over 1607308.74 frames. ], batch size: 28, lr: 4.00e-02, grad_scale: 16.0 +2023-02-05 19:05:44,548 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 19:06:02,004 INFO [train.py:935] (2/4) Epoch 1, validation: loss=0.3351, simple_loss=0.4011, pruned_loss=0.1346, over 944034.00 frames. +2023-02-05 19:06:02,005 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 19:06:02,815 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.7432, 1.6809, 1.5357, 0.2610, 1.1541, 1.1077, 0.2178, 1.3994], + device='cuda:2'), covar=tensor([0.0707, 0.0569, 0.0375, 0.1045, 0.0527, 0.0643, 0.0912, 0.0281], + device='cuda:2'), in_proj_covar=tensor([0.0149, 0.0112, 0.0093, 0.0152, 0.0113, 0.0156, 0.0152, 0.0120], + device='cuda:2'), out_proj_covar=tensor([1.0927e-04, 8.0912e-05, 7.1124e-05, 1.1967e-04, 9.3822e-05, 1.1756e-04, + 1.1941e-04, 8.7026e-05], device='cuda:2') +2023-02-05 19:06:06,794 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 3.694e+02 4.999e+02 6.330e+02 1.596e+03, threshold=9.998e+02, percent-clipped=5.0 +2023-02-05 19:06:06,984 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:06:09,470 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6012.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:06:35,735 INFO [train.py:901] (2/4) Epoch 1, batch 6050, loss[loss=0.4491, simple_loss=0.4272, pruned_loss=0.2355, over 7242.00 frames. ], tot_loss[loss=0.4336, simple_loss=0.4454, pruned_loss=0.2109, over 1608102.89 frames. ], batch size: 16, lr: 3.99e-02, grad_scale: 8.0 +2023-02-05 19:06:42,863 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:12,055 INFO [train.py:901] (2/4) Epoch 1, batch 6100, loss[loss=0.5123, simple_loss=0.5102, pruned_loss=0.2572, over 8027.00 frames. ], tot_loss[loss=0.4324, simple_loss=0.4451, pruned_loss=0.2099, over 1608829.34 frames. ], batch size: 22, lr: 3.98e-02, grad_scale: 8.0 +2023-02-05 19:07:17,503 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.508e+02 4.942e+02 6.048e+02 7.564e+02 1.774e+03, threshold=1.210e+03, percent-clipped=15.0 +2023-02-05 19:07:18,495 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8636, 2.1217, 2.8566, 3.4118, 2.1148, 1.5777, 1.9631, 2.3400], + device='cuda:2'), covar=tensor([0.1535, 0.0792, 0.0272, 0.0189, 0.0572, 0.0707, 0.0620, 0.0685], + device='cuda:2'), in_proj_covar=tensor([0.0278, 0.0177, 0.0125, 0.0145, 0.0179, 0.0192, 0.0198, 0.0214], + device='cuda:2'), out_proj_covar=tensor([1.6539e-04, 1.0798e-04, 7.5347e-05, 8.4724e-05, 1.0224e-04, 1.1456e-04, + 1.1366e-04, 1.2117e-04], device='cuda:2') +2023-02-05 19:07:23,139 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6117.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:28,996 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 19:07:29,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6127.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:07:45,967 INFO [train.py:901] (2/4) Epoch 1, batch 6150, loss[loss=0.3821, simple_loss=0.4162, pruned_loss=0.174, over 8441.00 frames. ], tot_loss[loss=0.4293, simple_loss=0.4435, pruned_loss=0.2075, over 1611466.19 frames. ], batch size: 27, lr: 3.97e-02, grad_scale: 8.0 +2023-02-05 19:07:47,408 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6153.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:56,772 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.71 vs. limit=5.0 +2023-02-05 19:08:12,166 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6188.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:22,933 INFO [train.py:901] (2/4) Epoch 1, batch 6200, loss[loss=0.4104, simple_loss=0.4425, pruned_loss=0.1892, over 8132.00 frames. ], tot_loss[loss=0.4296, simple_loss=0.4442, pruned_loss=0.2075, over 1611241.28 frames. ], batch size: 22, lr: 3.96e-02, grad_scale: 8.0 +2023-02-05 19:08:26,614 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0615, 0.9692, 4.0949, 1.8361, 3.5400, 3.3095, 3.4687, 3.4928], + device='cuda:2'), covar=tensor([0.0272, 0.3863, 0.0252, 0.1428, 0.0580, 0.0391, 0.0279, 0.0339], + device='cuda:2'), in_proj_covar=tensor([0.0124, 0.0318, 0.0160, 0.0195, 0.0189, 0.0178, 0.0151, 0.0174], + device='cuda:2'), out_proj_covar=tensor([7.6942e-05, 1.7727e-04, 1.0038e-04, 1.2605e-04, 1.0853e-04, 1.0798e-04, + 9.2981e-05, 1.1110e-04], device='cuda:2') +2023-02-05 19:08:28,567 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.743e+02 4.155e+02 5.130e+02 7.106e+02 1.864e+03, threshold=1.026e+03, percent-clipped=2.0 +2023-02-05 19:08:36,385 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:37,192 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:42,650 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6229.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:44,640 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:48,070 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6020, 1.8373, 3.3817, 1.0140, 2.6766, 2.0241, 1.5414, 2.2691], + device='cuda:2'), covar=tensor([0.1241, 0.1632, 0.0286, 0.1731, 0.1137, 0.1751, 0.1237, 0.1444], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0267, 0.0241, 0.0294, 0.0342, 0.0344, 0.0288, 0.0329], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:08:57,420 INFO [train.py:901] (2/4) Epoch 1, batch 6250, loss[loss=0.398, simple_loss=0.4146, pruned_loss=0.1907, over 7808.00 frames. ], tot_loss[loss=0.4261, simple_loss=0.4419, pruned_loss=0.2051, over 1605911.79 frames. ], batch size: 20, lr: 3.95e-02, grad_scale: 8.0 +2023-02-05 19:09:07,172 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5498, 1.8248, 2.6505, 2.5476, 1.9837, 1.4477, 1.5077, 1.9303], + device='cuda:2'), covar=tensor([0.1710, 0.0713, 0.0234, 0.0226, 0.0376, 0.0701, 0.0748, 0.0652], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0191, 0.0131, 0.0153, 0.0195, 0.0205, 0.0214, 0.0232], + device='cuda:2'), out_proj_covar=tensor([1.8059e-04, 1.1765e-04, 7.9702e-05, 8.8577e-05, 1.1087e-04, 1.2308e-04, + 1.2391e-04, 1.3132e-04], device='cuda:2') +2023-02-05 19:09:20,019 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:09:32,702 INFO [train.py:901] (2/4) Epoch 1, batch 6300, loss[loss=0.4121, simple_loss=0.4255, pruned_loss=0.1993, over 7207.00 frames. ], tot_loss[loss=0.4269, simple_loss=0.4428, pruned_loss=0.2055, over 1612128.34 frames. ], batch size: 16, lr: 3.94e-02, grad_scale: 8.0 +2023-02-05 19:09:38,777 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.783e+02 4.352e+02 5.159e+02 6.362e+02 1.735e+03, threshold=1.032e+03, percent-clipped=4.0 +2023-02-05 19:09:56,812 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6335.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:07,345 INFO [train.py:901] (2/4) Epoch 1, batch 6350, loss[loss=0.3744, simple_loss=0.4073, pruned_loss=0.1707, over 8030.00 frames. ], tot_loss[loss=0.4275, simple_loss=0.4429, pruned_loss=0.2061, over 1610152.93 frames. ], batch size: 22, lr: 3.93e-02, grad_scale: 8.0 +2023-02-05 19:10:08,103 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6352.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:28,923 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6383.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:10:40,794 INFO [train.py:901] (2/4) Epoch 1, batch 6400, loss[loss=0.3879, simple_loss=0.4034, pruned_loss=0.1862, over 7544.00 frames. ], tot_loss[loss=0.4272, simple_loss=0.4435, pruned_loss=0.2055, over 1614013.05 frames. ], batch size: 18, lr: 3.92e-02, grad_scale: 8.0 +2023-02-05 19:10:43,627 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6405.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:45,697 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.3543, 5.7443, 4.6109, 2.0624, 4.7005, 4.8152, 5.0302, 4.0620], + device='cuda:2'), covar=tensor([0.0874, 0.0271, 0.0723, 0.3563, 0.0393, 0.0561, 0.0723, 0.0456], + device='cuda:2'), in_proj_covar=tensor([0.0249, 0.0180, 0.0211, 0.0271, 0.0165, 0.0129, 0.0196, 0.0121], + device='cuda:2'), out_proj_covar=tensor([1.8835e-04, 1.2957e-04, 1.3808e-04, 1.7722e-04, 1.0664e-04, 9.3323e-05, + 1.3682e-04, 8.8519e-05], device='cuda:2') +2023-02-05 19:10:45,803 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6408.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:10:46,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.785e+02 4.017e+02 4.991e+02 6.603e+02 1.156e+03, threshold=9.981e+02, percent-clipped=3.0 +2023-02-05 19:11:09,690 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5968, 1.9536, 3.3702, 1.2112, 2.4039, 2.0034, 1.7763, 2.2025], + device='cuda:2'), covar=tensor([0.0969, 0.1190, 0.0292, 0.1289, 0.0969, 0.1467, 0.0969, 0.1054], + device='cuda:2'), in_proj_covar=tensor([0.0279, 0.0275, 0.0254, 0.0301, 0.0358, 0.0345, 0.0293, 0.0338], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:11:16,786 INFO [train.py:901] (2/4) Epoch 1, batch 6450, loss[loss=0.5176, simple_loss=0.5002, pruned_loss=0.2675, over 8100.00 frames. ], tot_loss[loss=0.4258, simple_loss=0.4426, pruned_loss=0.2045, over 1617186.22 frames. ], batch size: 23, lr: 3.91e-02, grad_scale: 8.0 +2023-02-05 19:11:27,830 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6467.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:36,487 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:39,744 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6485.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:41,921 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:47,716 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:50,292 INFO [train.py:901] (2/4) Epoch 1, batch 6500, loss[loss=0.4221, simple_loss=0.4512, pruned_loss=0.1965, over 8451.00 frames. ], tot_loss[loss=0.4227, simple_loss=0.4404, pruned_loss=0.2025, over 1616330.64 frames. ], batch size: 25, lr: 3.90e-02, grad_scale: 8.0 +2023-02-05 19:11:55,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 4.204e+02 5.270e+02 6.161e+02 1.286e+03, threshold=1.054e+03, percent-clipped=6.0 +2023-02-05 19:11:58,504 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6513.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:03,288 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6520.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:11,141 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:25,067 INFO [train.py:901] (2/4) Epoch 1, batch 6550, loss[loss=0.3823, simple_loss=0.3997, pruned_loss=0.1824, over 7787.00 frames. ], tot_loss[loss=0.4195, simple_loss=0.4383, pruned_loss=0.2003, over 1613471.32 frames. ], batch size: 19, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:12:35,938 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:37,935 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 19:12:41,472 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6573.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:53,795 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:57,646 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 19:13:00,152 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 19:13:00,390 INFO [train.py:901] (2/4) Epoch 1, batch 6600, loss[loss=0.4031, simple_loss=0.4149, pruned_loss=0.1957, over 7543.00 frames. ], tot_loss[loss=0.4197, simple_loss=0.438, pruned_loss=0.2007, over 1613668.03 frames. ], batch size: 18, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:13:05,684 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.999e+02 4.035e+02 4.985e+02 6.404e+02 1.328e+03, threshold=9.970e+02, percent-clipped=3.0 +2023-02-05 19:13:07,907 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:10,618 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:18,642 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6628.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:23,004 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.19 vs. limit=5.0 +2023-02-05 19:13:23,446 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4069, 2.0365, 1.6310, 1.6690, 2.0865, 1.7326, 2.1585, 2.3411], + device='cuda:2'), covar=tensor([0.1390, 0.1944, 0.2208, 0.2133, 0.1253, 0.1932, 0.1429, 0.1098], + device='cuda:2'), in_proj_covar=tensor([0.0278, 0.0289, 0.0291, 0.0284, 0.0278, 0.0262, 0.0266, 0.0262], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:13:31,558 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,594 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:34,155 INFO [train.py:901] (2/4) Epoch 1, batch 6650, loss[loss=0.343, simple_loss=0.3679, pruned_loss=0.1591, over 5585.00 frames. ], tot_loss[loss=0.4182, simple_loss=0.437, pruned_loss=0.1997, over 1606485.04 frames. ], batch size: 12, lr: 3.88e-02, grad_scale: 8.0 +2023-02-05 19:13:56,263 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:01,297 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:09,931 INFO [train.py:901] (2/4) Epoch 1, batch 6700, loss[loss=0.4228, simple_loss=0.4247, pruned_loss=0.2105, over 7714.00 frames. ], tot_loss[loss=0.4166, simple_loss=0.4357, pruned_loss=0.1988, over 1608214.52 frames. ], batch size: 18, lr: 3.87e-02, grad_scale: 8.0 +2023-02-05 19:14:15,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.351e+02 4.140e+02 4.960e+02 6.260e+02 1.494e+03, threshold=9.921e+02, percent-clipped=3.0 +2023-02-05 19:14:25,029 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6723.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:38,640 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6743.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:38,875 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 19:14:42,150 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:44,021 INFO [train.py:901] (2/4) Epoch 1, batch 6750, loss[loss=0.4093, simple_loss=0.4341, pruned_loss=0.1923, over 8503.00 frames. ], tot_loss[loss=0.4143, simple_loss=0.434, pruned_loss=0.1973, over 1607737.38 frames. ], batch size: 28, lr: 3.86e-02, grad_scale: 8.0 +2023-02-05 19:15:00,943 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:14,379 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 19:15:19,964 INFO [train.py:901] (2/4) Epoch 1, batch 6800, loss[loss=0.4865, simple_loss=0.4927, pruned_loss=0.2401, over 8348.00 frames. ], tot_loss[loss=0.416, simple_loss=0.4353, pruned_loss=0.1983, over 1605933.06 frames. ], batch size: 26, lr: 3.85e-02, grad_scale: 8.0 +2023-02-05 19:15:20,161 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6801.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:25,326 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 4.226e+02 5.434e+02 7.341e+02 1.725e+03, threshold=1.087e+03, percent-clipped=4.0 +2023-02-05 19:15:35,602 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6824.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:39,016 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6829.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:54,376 INFO [train.py:901] (2/4) Epoch 1, batch 6850, loss[loss=0.4261, simple_loss=0.4409, pruned_loss=0.2057, over 8031.00 frames. ], tot_loss[loss=0.4171, simple_loss=0.4366, pruned_loss=0.1988, over 1608196.97 frames. ], batch size: 22, lr: 3.84e-02, grad_scale: 8.0 +2023-02-05 19:15:57,382 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6251, 1.1717, 3.2746, 1.3562, 1.8762, 3.5950, 3.2016, 3.1866], + device='cuda:2'), covar=tensor([0.1802, 0.2330, 0.0336, 0.2720, 0.1143, 0.0278, 0.0360, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0258, 0.0156, 0.0251, 0.0188, 0.0121, 0.0124, 0.0175], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 19:16:04,830 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 19:16:06,395 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:23,446 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6893.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:25,434 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6896.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:16:29,305 INFO [train.py:901] (2/4) Epoch 1, batch 6900, loss[loss=0.3463, simple_loss=0.3855, pruned_loss=0.1535, over 8087.00 frames. ], tot_loss[loss=0.4155, simple_loss=0.435, pruned_loss=0.198, over 1604986.78 frames. ], batch size: 21, lr: 3.83e-02, grad_scale: 8.0 +2023-02-05 19:16:31,390 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6903.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:35,803 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.796e+02 4.754e+02 6.076e+02 1.448e+03, threshold=9.507e+02, percent-clipped=2.0 +2023-02-05 19:16:40,306 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.11 vs. limit=2.0 +2023-02-05 19:16:48,743 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6927.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:49,399 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:54,887 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6936.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:56,886 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6939.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,412 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,457 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:05,062 INFO [train.py:901] (2/4) Epoch 1, batch 6950, loss[loss=0.441, simple_loss=0.4626, pruned_loss=0.2097, over 8530.00 frames. ], tot_loss[loss=0.4168, simple_loss=0.4365, pruned_loss=0.1986, over 1611676.81 frames. ], batch size: 39, lr: 3.82e-02, grad_scale: 8.0 +2023-02-05 19:17:11,204 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 19:17:11,443 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0019, 1.9555, 3.3666, 1.4720, 2.5018, 2.3363, 1.9695, 2.4172], + device='cuda:2'), covar=tensor([0.0721, 0.1022, 0.0186, 0.1114, 0.0783, 0.0891, 0.0686, 0.0802], + device='cuda:2'), in_proj_covar=tensor([0.0291, 0.0293, 0.0270, 0.0319, 0.0375, 0.0345, 0.0300, 0.0354], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:17:12,145 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6961.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:17,893 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:32,944 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6991.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:38,573 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6999.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:39,833 INFO [train.py:901] (2/4) Epoch 1, batch 7000, loss[loss=0.4244, simple_loss=0.4496, pruned_loss=0.1996, over 8479.00 frames. ], tot_loss[loss=0.4146, simple_loss=0.4344, pruned_loss=0.1974, over 1610219.84 frames. ], batch size: 29, lr: 3.81e-02, grad_scale: 8.0 +2023-02-05 19:17:45,243 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.380e+02 4.090e+02 4.918e+02 6.048e+02 1.151e+03, threshold=9.836e+02, percent-clipped=6.0 +2023-02-05 19:17:57,721 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:58,275 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1610, 3.2655, 2.7782, 1.1205, 2.7265, 2.7348, 2.8754, 2.3271], + device='cuda:2'), covar=tensor([0.0970, 0.0522, 0.0967, 0.3801, 0.0549, 0.0619, 0.1029, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0258, 0.0187, 0.0216, 0.0290, 0.0168, 0.0133, 0.0202, 0.0130], + device='cuda:2'), out_proj_covar=tensor([1.9296e-04, 1.3380e-04, 1.4189e-04, 1.8658e-04, 1.0993e-04, 9.3640e-05, + 1.3958e-04, 9.3973e-05], device='cuda:2') +2023-02-05 19:18:16,023 INFO [train.py:901] (2/4) Epoch 1, batch 7050, loss[loss=0.4486, simple_loss=0.4665, pruned_loss=0.2153, over 8459.00 frames. ], tot_loss[loss=0.4154, simple_loss=0.435, pruned_loss=0.198, over 1613419.82 frames. ], batch size: 27, lr: 3.80e-02, grad_scale: 8.0 +2023-02-05 19:18:26,091 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7066.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:18:50,228 INFO [train.py:901] (2/4) Epoch 1, batch 7100, loss[loss=0.3676, simple_loss=0.4006, pruned_loss=0.1672, over 7814.00 frames. ], tot_loss[loss=0.4159, simple_loss=0.4359, pruned_loss=0.1979, over 1614697.48 frames. ], batch size: 20, lr: 3.79e-02, grad_scale: 8.0 +2023-02-05 19:18:53,881 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:55,757 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.791e+02 4.613e+02 6.150e+02 1.722e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 19:19:25,907 INFO [train.py:901] (2/4) Epoch 1, batch 7150, loss[loss=0.5054, simple_loss=0.4905, pruned_loss=0.2601, over 7826.00 frames. ], tot_loss[loss=0.4138, simple_loss=0.4345, pruned_loss=0.1966, over 1613884.61 frames. ], batch size: 20, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:19:46,604 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7181.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:56,063 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7195.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,475 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,951 INFO [train.py:901] (2/4) Epoch 1, batch 7200, loss[loss=0.3397, simple_loss=0.3864, pruned_loss=0.1465, over 8132.00 frames. ], tot_loss[loss=0.4139, simple_loss=0.4342, pruned_loss=0.1968, over 1610319.80 frames. ], batch size: 22, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:20:05,328 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.325e+02 4.231e+02 5.262e+02 7.053e+02 1.685e+03, threshold=1.052e+03, percent-clipped=7.0 +2023-02-05 19:20:13,056 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:16,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:24,760 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7193, 1.1832, 5.2130, 2.4781, 3.9697, 4.6037, 5.0921, 5.0195], + device='cuda:2'), covar=tensor([0.0775, 0.4518, 0.0477, 0.1257, 0.1537, 0.0475, 0.0304, 0.0510], + device='cuda:2'), in_proj_covar=tensor([0.0155, 0.0340, 0.0179, 0.0214, 0.0225, 0.0200, 0.0170, 0.0203], + device='cuda:2'), out_proj_covar=tensor([9.5622e-05, 1.8661e-04, 1.0917e-04, 1.3572e-04, 1.2867e-04, 1.1939e-04, + 1.0304e-04, 1.2815e-04], device='cuda:2') +2023-02-05 19:20:25,922 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7240.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:20:33,003 INFO [train.py:901] (2/4) Epoch 1, batch 7250, loss[loss=0.4045, simple_loss=0.4287, pruned_loss=0.1902, over 7656.00 frames. ], tot_loss[loss=0.4149, simple_loss=0.4345, pruned_loss=0.1977, over 1612109.00 frames. ], batch size: 19, lr: 3.77e-02, grad_scale: 8.0 +2023-02-05 19:20:33,214 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4886, 1.9760, 1.1139, 1.9897, 1.7721, 1.4535, 1.5258, 2.1967], + device='cuda:2'), covar=tensor([0.1421, 0.0744, 0.1523, 0.0709, 0.1146, 0.1108, 0.1608, 0.0801], + device='cuda:2'), in_proj_covar=tensor([0.0334, 0.0234, 0.0342, 0.0274, 0.0319, 0.0276, 0.0330, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 19:20:49,155 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:03,983 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:09,003 INFO [train.py:901] (2/4) Epoch 1, batch 7300, loss[loss=0.3889, simple_loss=0.4094, pruned_loss=0.1842, over 7669.00 frames. ], tot_loss[loss=0.4151, simple_loss=0.4347, pruned_loss=0.1977, over 1611192.11 frames. ], batch size: 19, lr: 3.76e-02, grad_scale: 8.0 +2023-02-05 19:21:14,304 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.413e+02 4.263e+02 5.448e+02 6.514e+02 1.215e+03, threshold=1.090e+03, percent-clipped=2.0 +2023-02-05 19:21:42,628 INFO [train.py:901] (2/4) Epoch 1, batch 7350, loss[loss=0.3589, simple_loss=0.3777, pruned_loss=0.1701, over 7791.00 frames. ], tot_loss[loss=0.4152, simple_loss=0.4346, pruned_loss=0.1979, over 1611213.70 frames. ], batch size: 19, lr: 3.75e-02, grad_scale: 8.0 +2023-02-05 19:21:45,537 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7355.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:21:50,110 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7362.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:56,016 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 19:22:03,009 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1531, 1.6142, 1.0406, 1.8403, 1.3357, 1.0706, 1.0615, 1.9658], + device='cuda:2'), covar=tensor([0.1290, 0.0859, 0.2258, 0.0741, 0.1885, 0.1668, 0.1817, 0.0745], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0246, 0.0367, 0.0286, 0.0333, 0.0289, 0.0349, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 19:22:08,387 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9069, 3.0949, 2.6061, 1.0909, 2.5008, 2.6621, 2.7648, 2.1475], + device='cuda:2'), covar=tensor([0.1341, 0.0645, 0.1128, 0.4240, 0.0728, 0.0671, 0.1182, 0.0791], + device='cuda:2'), in_proj_covar=tensor([0.0265, 0.0185, 0.0230, 0.0294, 0.0177, 0.0140, 0.0213, 0.0134], + device='cuda:2'), out_proj_covar=tensor([1.9670e-04, 1.3098e-04, 1.5045e-04, 1.8912e-04, 1.1403e-04, 1.0044e-04, + 1.4600e-04, 9.3769e-05], device='cuda:2') +2023-02-05 19:22:08,467 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7386.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:09,133 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7387.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:18,186 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 19:22:18,974 INFO [train.py:901] (2/4) Epoch 1, batch 7400, loss[loss=0.3591, simple_loss=0.3925, pruned_loss=0.1628, over 7649.00 frames. ], tot_loss[loss=0.4134, simple_loss=0.4333, pruned_loss=0.1967, over 1609237.52 frames. ], batch size: 19, lr: 3.74e-02, grad_scale: 8.0 +2023-02-05 19:22:24,408 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.824e+02 4.270e+02 5.603e+02 6.704e+02 2.452e+03, threshold=1.121e+03, percent-clipped=4.0 +2023-02-05 19:22:25,141 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7410.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:22:35,071 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:52,554 INFO [train.py:901] (2/4) Epoch 1, batch 7450, loss[loss=0.4557, simple_loss=0.4646, pruned_loss=0.2233, over 8243.00 frames. ], tot_loss[loss=0.4128, simple_loss=0.4332, pruned_loss=0.1962, over 1611261.04 frames. ], batch size: 22, lr: 3.73e-02, grad_scale: 8.0 +2023-02-05 19:22:53,051 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-05 19:22:56,078 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 19:23:27,513 INFO [train.py:901] (2/4) Epoch 1, batch 7500, loss[loss=0.4377, simple_loss=0.4695, pruned_loss=0.203, over 8458.00 frames. ], tot_loss[loss=0.4148, simple_loss=0.4347, pruned_loss=0.1975, over 1612780.12 frames. ], batch size: 29, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:23:34,187 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 4.060e+02 5.044e+02 6.934e+02 1.457e+03, threshold=1.009e+03, percent-clipped=3.0 +2023-02-05 19:23:45,044 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7525.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:23:45,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7525.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:24:02,105 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-05 19:24:02,230 INFO [train.py:901] (2/4) Epoch 1, batch 7550, loss[loss=0.3303, simple_loss=0.3557, pruned_loss=0.1525, over 7695.00 frames. ], tot_loss[loss=0.4141, simple_loss=0.434, pruned_loss=0.1971, over 1613594.58 frames. ], batch size: 18, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:24:36,293 INFO [train.py:901] (2/4) Epoch 1, batch 7600, loss[loss=0.3435, simple_loss=0.3643, pruned_loss=0.1614, over 7923.00 frames. ], tot_loss[loss=0.4132, simple_loss=0.4328, pruned_loss=0.1968, over 1608464.32 frames. ], batch size: 20, lr: 3.71e-02, grad_scale: 8.0 +2023-02-05 19:24:41,738 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.765e+02 4.361e+02 5.460e+02 6.853e+02 1.164e+03, threshold=1.092e+03, percent-clipped=2.0 +2023-02-05 19:24:43,938 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7611.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:25:03,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7636.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:25:03,853 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7637.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:05,958 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7640.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:06,827 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.04 vs. limit=5.0 +2023-02-05 19:25:07,273 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,351 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:13,187 INFO [train.py:901] (2/4) Epoch 1, batch 7650, loss[loss=0.3755, simple_loss=0.4165, pruned_loss=0.1672, over 8327.00 frames. ], tot_loss[loss=0.4108, simple_loss=0.4315, pruned_loss=0.195, over 1607552.57 frames. ], batch size: 25, lr: 3.70e-02, grad_scale: 8.0 +2023-02-05 19:25:23,893 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7667.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:46,144 INFO [train.py:901] (2/4) Epoch 1, batch 7700, loss[loss=0.5156, simple_loss=0.5122, pruned_loss=0.2595, over 8348.00 frames. ], tot_loss[loss=0.4092, simple_loss=0.4308, pruned_loss=0.1937, over 1613273.23 frames. ], batch size: 26, lr: 3.69e-02, grad_scale: 8.0 +2023-02-05 19:25:51,307 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.517e+02 4.083e+02 4.742e+02 6.161e+02 2.101e+03, threshold=9.483e+02, percent-clipped=6.0 +2023-02-05 19:26:07,592 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 19:26:14,379 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4737, 1.8799, 3.2077, 1.1496, 2.1277, 1.9325, 1.4136, 2.2233], + device='cuda:2'), covar=tensor([0.1086, 0.1294, 0.0274, 0.1493, 0.1059, 0.1427, 0.1042, 0.1056], + device='cuda:2'), in_proj_covar=tensor([0.0322, 0.0318, 0.0306, 0.0340, 0.0399, 0.0369, 0.0318, 0.0386], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:26:21,659 INFO [train.py:901] (2/4) Epoch 1, batch 7750, loss[loss=0.418, simple_loss=0.4553, pruned_loss=0.1904, over 8358.00 frames. ], tot_loss[loss=0.4088, simple_loss=0.4307, pruned_loss=0.1935, over 1612770.67 frames. ], batch size: 24, lr: 3.68e-02, grad_scale: 8.0 +2023-02-05 19:26:23,165 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7752.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:29,120 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:34,416 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7769.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:42,677 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7781.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:26:56,367 INFO [train.py:901] (2/4) Epoch 1, batch 7800, loss[loss=0.428, simple_loss=0.4527, pruned_loss=0.2016, over 8528.00 frames. ], tot_loss[loss=0.4072, simple_loss=0.4299, pruned_loss=0.1923, over 1615872.58 frames. ], batch size: 28, lr: 3.67e-02, grad_scale: 8.0 +2023-02-05 19:26:59,825 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7806.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:27:01,643 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.720e+02 4.585e+02 5.523e+02 1.290e+03, threshold=9.170e+02, percent-clipped=3.0 +2023-02-05 19:27:07,211 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9586, 3.0596, 1.8203, 2.4712, 2.7740, 1.8730, 2.1206, 2.9091], + device='cuda:2'), covar=tensor([0.1606, 0.0764, 0.1175, 0.1065, 0.1061, 0.1301, 0.1586, 0.0806], + device='cuda:2'), in_proj_covar=tensor([0.0326, 0.0231, 0.0346, 0.0274, 0.0319, 0.0281, 0.0326, 0.0269], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002], + device='cuda:2') +2023-02-05 19:27:09,238 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:11,967 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8347, 2.0160, 1.9511, 2.6324, 1.4457, 1.1985, 1.8922, 1.9493], + device='cuda:2'), covar=tensor([0.1221, 0.1361, 0.1255, 0.0397, 0.1829, 0.2050, 0.1672, 0.1155], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0329, 0.0312, 0.0201, 0.0343, 0.0332, 0.0393, 0.0291], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 19:27:29,719 INFO [train.py:901] (2/4) Epoch 1, batch 7850, loss[loss=0.4551, simple_loss=0.4745, pruned_loss=0.2179, over 8626.00 frames. ], tot_loss[loss=0.4069, simple_loss=0.4295, pruned_loss=0.1922, over 1617868.75 frames. ], batch size: 34, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:27:52,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7884.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:59,879 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7896.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:03,018 INFO [train.py:901] (2/4) Epoch 1, batch 7900, loss[loss=0.4539, simple_loss=0.461, pruned_loss=0.2234, over 6642.00 frames. ], tot_loss[loss=0.406, simple_loss=0.4286, pruned_loss=0.1916, over 1612900.23 frames. ], batch size: 72, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:28:08,433 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.732e+02 4.923e+02 6.190e+02 1.863e+03, threshold=9.845e+02, percent-clipped=5.0 +2023-02-05 19:28:16,374 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:16,631 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-05 19:28:35,812 INFO [train.py:901] (2/4) Epoch 1, batch 7950, loss[loss=0.4412, simple_loss=0.4711, pruned_loss=0.2056, over 8339.00 frames. ], tot_loss[loss=0.4074, simple_loss=0.4299, pruned_loss=0.1924, over 1613344.87 frames. ], batch size: 25, lr: 3.65e-02, grad_scale: 8.0 +2023-02-05 19:28:45,404 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 19:28:59,119 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7986.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:10,062 INFO [train.py:901] (2/4) Epoch 1, batch 8000, loss[loss=0.5517, simple_loss=0.5169, pruned_loss=0.2933, over 6615.00 frames. ], tot_loss[loss=0.4055, simple_loss=0.4284, pruned_loss=0.1913, over 1610131.70 frames. ], batch size: 71, lr: 3.64e-02, grad_scale: 8.0 +2023-02-05 19:29:15,098 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:15,548 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.650e+02 3.959e+02 4.934e+02 6.403e+02 1.426e+03, threshold=9.868e+02, percent-clipped=4.0 +2023-02-05 19:29:29,569 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4159, 2.0298, 1.2013, 2.0030, 1.9489, 1.2772, 1.7432, 2.4443], + device='cuda:2'), covar=tensor([0.1454, 0.0649, 0.1508, 0.0907, 0.1067, 0.1434, 0.1310, 0.0793], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0227, 0.0356, 0.0288, 0.0334, 0.0304, 0.0342, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 19:29:31,430 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8033.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:43,012 INFO [train.py:901] (2/4) Epoch 1, batch 8050, loss[loss=0.4386, simple_loss=0.4441, pruned_loss=0.2165, over 6831.00 frames. ], tot_loss[loss=0.4064, simple_loss=0.4284, pruned_loss=0.1922, over 1596063.85 frames. ], batch size: 71, lr: 3.63e-02, grad_scale: 16.0 +2023-02-05 19:29:53,046 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0026, 1.8516, 3.1078, 1.1702, 1.8527, 2.3540, 0.5054, 1.5992], + device='cuda:2'), covar=tensor([0.0667, 0.0575, 0.0275, 0.0399, 0.0435, 0.0317, 0.1497, 0.0642], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0114, 0.0095, 0.0133, 0.0115, 0.0083, 0.0163, 0.0132], + device='cuda:2'), out_proj_covar=tensor([1.1974e-04, 1.0761e-04, 8.3139e-05, 1.1257e-04, 1.0679e-04, 7.3238e-05, + 1.4342e-04, 1.1716e-04], device='cuda:2') +2023-02-05 19:30:16,951 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 19:30:20,852 INFO [train.py:901] (2/4) Epoch 2, batch 0, loss[loss=0.3829, simple_loss=0.4024, pruned_loss=0.1816, over 7713.00 frames. ], tot_loss[loss=0.3829, simple_loss=0.4024, pruned_loss=0.1816, over 7713.00 frames. ], batch size: 18, lr: 3.56e-02, grad_scale: 8.0 +2023-02-05 19:30:20,853 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 19:30:32,397 INFO [train.py:935] (2/4) Epoch 2, validation: loss=0.3107, simple_loss=0.3861, pruned_loss=0.1176, over 944034.00 frames. +2023-02-05 19:30:32,398 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 19:30:44,062 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8101.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:46,623 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 19:30:46,681 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:49,928 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 3.846e+02 4.676e+02 6.027e+02 1.450e+03, threshold=9.352e+02, percent-clipped=5.0 +2023-02-05 19:30:56,768 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7788, 1.8838, 1.8499, 2.5534, 0.9706, 1.0852, 1.6427, 1.8515], + device='cuda:2'), covar=tensor([0.1222, 0.1525, 0.1394, 0.0376, 0.2416, 0.2550, 0.2023, 0.1308], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0323, 0.0300, 0.0196, 0.0331, 0.0343, 0.0380, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 19:31:06,753 INFO [train.py:901] (2/4) Epoch 2, batch 50, loss[loss=0.4761, simple_loss=0.4615, pruned_loss=0.2453, over 8240.00 frames. ], tot_loss[loss=0.4027, simple_loss=0.4274, pruned_loss=0.189, over 364121.76 frames. ], batch size: 22, lr: 3.55e-02, grad_scale: 8.0 +2023-02-05 19:31:11,116 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8140.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:20,787 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 19:31:28,341 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:29,177 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8165.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:41,604 INFO [train.py:901] (2/4) Epoch 2, batch 100, loss[loss=0.4268, simple_loss=0.4365, pruned_loss=0.2086, over 8035.00 frames. ], tot_loss[loss=0.406, simple_loss=0.4302, pruned_loss=0.1909, over 644360.20 frames. ], batch size: 22, lr: 3.54e-02, grad_scale: 8.0 +2023-02-05 19:31:44,283 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 19:31:59,434 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.246e+02 4.943e+02 6.491e+02 9.375e+02, threshold=9.885e+02, percent-clipped=1.0 +2023-02-05 19:32:06,342 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:07,758 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5228, 1.9354, 3.0883, 0.8696, 2.2812, 1.7347, 1.4636, 2.0157], + device='cuda:2'), covar=tensor([0.1199, 0.1271, 0.0401, 0.1931, 0.1036, 0.1752, 0.1051, 0.1401], + device='cuda:2'), in_proj_covar=tensor([0.0324, 0.0316, 0.0312, 0.0356, 0.0403, 0.0372, 0.0324, 0.0388], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:32:15,464 INFO [train.py:901] (2/4) Epoch 2, batch 150, loss[loss=0.3343, simple_loss=0.3745, pruned_loss=0.147, over 7693.00 frames. ], tot_loss[loss=0.4055, simple_loss=0.4306, pruned_loss=0.1902, over 863622.01 frames. ], batch size: 18, lr: 3.53e-02, grad_scale: 8.0 +2023-02-05 19:32:47,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,393 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8283.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,923 INFO [train.py:901] (2/4) Epoch 2, batch 200, loss[loss=0.3713, simple_loss=0.4049, pruned_loss=0.1689, over 8468.00 frames. ], tot_loss[loss=0.4022, simple_loss=0.4276, pruned_loss=0.1884, over 1029688.39 frames. ], batch size: 25, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:32:53,835 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.74 vs. limit=2.0 +2023-02-05 19:33:08,598 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.581e+02 3.727e+02 4.975e+02 6.903e+02 1.681e+03, threshold=9.950e+02, percent-clipped=7.0 +2023-02-05 19:33:24,845 INFO [train.py:901] (2/4) Epoch 2, batch 250, loss[loss=0.4005, simple_loss=0.4331, pruned_loss=0.184, over 7980.00 frames. ], tot_loss[loss=0.4011, simple_loss=0.427, pruned_loss=0.1877, over 1161484.74 frames. ], batch size: 21, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:33:36,306 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 19:33:40,657 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:45,997 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 19:33:58,466 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:59,607 INFO [train.py:901] (2/4) Epoch 2, batch 300, loss[loss=0.3802, simple_loss=0.4375, pruned_loss=0.1615, over 8251.00 frames. ], tot_loss[loss=0.4027, simple_loss=0.4283, pruned_loss=0.1885, over 1264525.68 frames. ], batch size: 24, lr: 3.51e-02, grad_scale: 8.0 +2023-02-05 19:34:18,662 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 4.043e+02 4.737e+02 5.583e+02 9.957e+02, threshold=9.474e+02, percent-clipped=1.0 +2023-02-05 19:34:19,482 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0731, 2.7201, 4.9477, 1.0727, 2.8216, 2.4383, 2.0344, 2.3323], + device='cuda:2'), covar=tensor([0.1038, 0.1161, 0.0183, 0.1633, 0.1063, 0.1409, 0.0921, 0.1528], + device='cuda:2'), in_proj_covar=tensor([0.0331, 0.0308, 0.0310, 0.0350, 0.0402, 0.0370, 0.0324, 0.0390], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:34:35,500 INFO [train.py:901] (2/4) Epoch 2, batch 350, loss[loss=0.3169, simple_loss=0.3545, pruned_loss=0.1397, over 7706.00 frames. ], tot_loss[loss=0.4006, simple_loss=0.4269, pruned_loss=0.1871, over 1343388.97 frames. ], batch size: 18, lr: 3.50e-02, grad_scale: 8.0 +2023-02-05 19:34:36,952 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5898, 1.0330, 3.7178, 1.4639, 3.0583, 3.1067, 3.1760, 3.2028], + device='cuda:2'), covar=tensor([0.0402, 0.3525, 0.0319, 0.1564, 0.1028, 0.0440, 0.0364, 0.0494], + device='cuda:2'), in_proj_covar=tensor([0.0166, 0.0349, 0.0193, 0.0220, 0.0252, 0.0212, 0.0177, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:34:42,215 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2999, 1.3207, 1.5424, 1.1102, 0.9581, 1.4457, 0.1561, 0.9121], + device='cuda:2'), covar=tensor([0.0575, 0.0530, 0.0266, 0.0372, 0.0507, 0.0276, 0.1465, 0.0647], + device='cuda:2'), in_proj_covar=tensor([0.0143, 0.0119, 0.0102, 0.0145, 0.0118, 0.0089, 0.0171, 0.0142], + device='cuda:2'), out_proj_covar=tensor([1.2182e-04, 1.1227e-04, 9.1913e-05, 1.2420e-04, 1.1222e-04, 8.1590e-05, + 1.4938e-04, 1.2798e-04], device='cuda:2') +2023-02-05 19:35:03,563 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:09,447 INFO [train.py:901] (2/4) Epoch 2, batch 400, loss[loss=0.377, simple_loss=0.4055, pruned_loss=0.1742, over 8140.00 frames. ], tot_loss[loss=0.4001, simple_loss=0.4263, pruned_loss=0.187, over 1405704.73 frames. ], batch size: 22, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:13,313 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-05 19:35:20,909 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:27,438 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.339e+02 4.887e+02 6.099e+02 1.134e+03, threshold=9.773e+02, percent-clipped=6.0 +2023-02-05 19:35:43,481 INFO [train.py:901] (2/4) Epoch 2, batch 450, loss[loss=0.3456, simple_loss=0.3932, pruned_loss=0.149, over 8099.00 frames. ], tot_loss[loss=0.401, simple_loss=0.4268, pruned_loss=0.1877, over 1451341.83 frames. ], batch size: 23, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:44,344 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:01,873 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:13,318 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4780, 1.4363, 3.1697, 1.5868, 2.2987, 3.6588, 3.3380, 3.3107], + device='cuda:2'), covar=tensor([0.1639, 0.1886, 0.0351, 0.2057, 0.0714, 0.0202, 0.0243, 0.0395], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0260, 0.0170, 0.0253, 0.0184, 0.0138, 0.0131, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 19:36:18,002 INFO [train.py:901] (2/4) Epoch 2, batch 500, loss[loss=0.3554, simple_loss=0.3865, pruned_loss=0.1622, over 7635.00 frames. ], tot_loss[loss=0.3984, simple_loss=0.4249, pruned_loss=0.186, over 1488710.60 frames. ], batch size: 19, lr: 3.48e-02, grad_scale: 8.0 +2023-02-05 19:36:26,943 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.3502, 5.6069, 4.5388, 1.7783, 4.6647, 4.8734, 5.0307, 4.2619], + device='cuda:2'), covar=tensor([0.0652, 0.0313, 0.0713, 0.3531, 0.0285, 0.0290, 0.0959, 0.0348], + device='cuda:2'), in_proj_covar=tensor([0.0280, 0.0185, 0.0229, 0.0303, 0.0193, 0.0144, 0.0204, 0.0137], + device='cuda:2'), out_proj_covar=tensor([2.0096e-04, 1.2857e-04, 1.4659e-04, 1.9091e-04, 1.2387e-04, 1.0357e-04, + 1.3701e-04, 9.6944e-05], device='cuda:2') +2023-02-05 19:36:36,157 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.361e+02 3.910e+02 4.803e+02 5.619e+02 9.699e+02, threshold=9.605e+02, percent-clipped=0.0 +2023-02-05 19:36:47,558 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8627.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:52,675 INFO [train.py:901] (2/4) Epoch 2, batch 550, loss[loss=0.434, simple_loss=0.4582, pruned_loss=0.2049, over 8324.00 frames. ], tot_loss[loss=0.3988, simple_loss=0.4255, pruned_loss=0.1861, over 1517622.11 frames. ], batch size: 25, lr: 3.47e-02, grad_scale: 8.0 +2023-02-05 19:37:24,170 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8084, 2.2485, 1.8078, 2.8343, 1.3320, 1.0781, 1.7886, 2.0606], + device='cuda:2'), covar=tensor([0.1066, 0.1105, 0.1144, 0.0265, 0.1882, 0.2316, 0.1978, 0.1274], + device='cuda:2'), in_proj_covar=tensor([0.0306, 0.0329, 0.0317, 0.0209, 0.0343, 0.0352, 0.0395, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:37:26,524 INFO [train.py:901] (2/4) Epoch 2, batch 600, loss[loss=0.4575, simple_loss=0.4519, pruned_loss=0.2315, over 8074.00 frames. ], tot_loss[loss=0.3996, simple_loss=0.4256, pruned_loss=0.1868, over 1537610.90 frames. ], batch size: 21, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:37:43,320 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.752e+02 3.934e+02 5.073e+02 6.758e+02 1.500e+03, threshold=1.015e+03, percent-clipped=5.0 +2023-02-05 19:37:44,752 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 19:37:59,735 INFO [train.py:901] (2/4) Epoch 2, batch 650, loss[loss=0.4907, simple_loss=0.4715, pruned_loss=0.255, over 8533.00 frames. ], tot_loss[loss=0.3983, simple_loss=0.4247, pruned_loss=0.186, over 1555707.93 frames. ], batch size: 49, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:38:05,378 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:31,196 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:32,083 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=2.00 vs. limit=2.0 +2023-02-05 19:38:35,550 INFO [train.py:901] (2/4) Epoch 2, batch 700, loss[loss=0.4477, simple_loss=0.4539, pruned_loss=0.2207, over 8467.00 frames. ], tot_loss[loss=0.395, simple_loss=0.4221, pruned_loss=0.184, over 1568470.09 frames. ], batch size: 25, lr: 3.45e-02, grad_scale: 8.0 +2023-02-05 19:38:53,117 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.421e+02 3.759e+02 4.676e+02 6.060e+02 1.461e+03, threshold=9.352e+02, percent-clipped=1.0 +2023-02-05 19:39:09,177 INFO [train.py:901] (2/4) Epoch 2, batch 750, loss[loss=0.4535, simple_loss=0.4628, pruned_loss=0.2221, over 8454.00 frames. ], tot_loss[loss=0.3955, simple_loss=0.4222, pruned_loss=0.1844, over 1574651.53 frames. ], batch size: 27, lr: 3.44e-02, grad_scale: 8.0 +2023-02-05 19:39:26,413 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 19:39:35,571 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 19:39:42,526 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3476, 1.8328, 1.4828, 1.1817, 1.9969, 1.5978, 1.9084, 2.1006], + device='cuda:2'), covar=tensor([0.1217, 0.2050, 0.2370, 0.2283, 0.1148, 0.2083, 0.1453, 0.1137], + device='cuda:2'), in_proj_covar=tensor([0.0257, 0.0279, 0.0292, 0.0275, 0.0256, 0.0256, 0.0251, 0.0245], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:39:44,339 INFO [train.py:901] (2/4) Epoch 2, batch 800, loss[loss=0.3304, simple_loss=0.3648, pruned_loss=0.148, over 7437.00 frames. ], tot_loss[loss=0.3989, simple_loss=0.4243, pruned_loss=0.1867, over 1584741.25 frames. ], batch size: 17, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:02,285 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 4.043e+02 5.225e+02 6.708e+02 1.302e+03, threshold=1.045e+03, percent-clipped=9.0 +2023-02-05 19:40:18,495 INFO [train.py:901] (2/4) Epoch 2, batch 850, loss[loss=0.3706, simple_loss=0.3947, pruned_loss=0.1732, over 7812.00 frames. ], tot_loss[loss=0.3978, simple_loss=0.4243, pruned_loss=0.1856, over 1596403.37 frames. ], batch size: 20, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:26,052 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8945.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:40:32,050 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4872, 1.9593, 1.9189, 0.5696, 1.9664, 1.3513, 0.5034, 1.3631], + device='cuda:2'), covar=tensor([0.0197, 0.0122, 0.0157, 0.0426, 0.0199, 0.0436, 0.0532, 0.0201], + device='cuda:2'), in_proj_covar=tensor([0.0161, 0.0123, 0.0105, 0.0164, 0.0119, 0.0203, 0.0171, 0.0143], + device='cuda:2'), out_proj_covar=tensor([1.1400e-04, 8.7058e-05, 8.0642e-05, 1.1866e-04, 9.2732e-05, 1.5596e-04, + 1.2641e-04, 1.0579e-04], device='cuda:2') +2023-02-05 19:40:42,766 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-05 19:40:52,653 INFO [train.py:901] (2/4) Epoch 2, batch 900, loss[loss=0.4446, simple_loss=0.4576, pruned_loss=0.2158, over 8139.00 frames. ], tot_loss[loss=0.3959, simple_loss=0.4234, pruned_loss=0.1842, over 1604380.43 frames. ], batch size: 22, lr: 3.42e-02, grad_scale: 8.0 +2023-02-05 19:41:03,939 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8998.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:08,132 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9004.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:12,012 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.317e+02 3.660e+02 4.402e+02 6.333e+02 1.420e+03, threshold=8.805e+02, percent-clipped=4.0 +2023-02-05 19:41:21,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9023.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:27,228 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9031.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:29,113 INFO [train.py:901] (2/4) Epoch 2, batch 950, loss[loss=0.4266, simple_loss=0.4535, pruned_loss=0.1998, over 8343.00 frames. ], tot_loss[loss=0.3949, simple_loss=0.4233, pruned_loss=0.1832, over 1610552.64 frames. ], batch size: 24, lr: 3.41e-02, grad_scale: 8.0 +2023-02-05 19:41:57,091 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 19:42:04,017 INFO [train.py:901] (2/4) Epoch 2, batch 1000, loss[loss=0.3726, simple_loss=0.3981, pruned_loss=0.1735, over 7643.00 frames. ], tot_loss[loss=0.395, simple_loss=0.4226, pruned_loss=0.1837, over 1604017.94 frames. ], batch size: 19, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:22,612 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.505e+02 3.676e+02 4.681e+02 5.718e+02 9.745e+02, threshold=9.362e+02, percent-clipped=2.0 +2023-02-05 19:42:30,647 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9122.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:42:31,277 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 19:42:39,163 INFO [train.py:901] (2/4) Epoch 2, batch 1050, loss[loss=0.3853, simple_loss=0.4131, pruned_loss=0.1788, over 7977.00 frames. ], tot_loss[loss=0.3959, simple_loss=0.4227, pruned_loss=0.1846, over 1604430.27 frames. ], batch size: 21, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:43,232 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 19:43:12,160 INFO [train.py:901] (2/4) Epoch 2, batch 1100, loss[loss=0.409, simple_loss=0.4468, pruned_loss=0.1857, over 8632.00 frames. ], tot_loss[loss=0.3945, simple_loss=0.4217, pruned_loss=0.1836, over 1610236.82 frames. ], batch size: 39, lr: 3.39e-02, grad_scale: 8.0 +2023-02-05 19:43:25,819 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.9653, 1.2069, 5.1880, 2.4207, 3.8908, 4.5145, 4.8101, 4.7724], + device='cuda:2'), covar=tensor([0.0816, 0.4456, 0.0544, 0.1393, 0.1979, 0.0445, 0.0466, 0.0674], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0353, 0.0204, 0.0240, 0.0274, 0.0219, 0.0201, 0.0236], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:43:30,062 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.895e+02 4.986e+02 6.293e+02 1.172e+03, threshold=9.973e+02, percent-clipped=2.0 +2023-02-05 19:43:47,492 INFO [train.py:901] (2/4) Epoch 2, batch 1150, loss[loss=0.3532, simple_loss=0.3999, pruned_loss=0.1533, over 8453.00 frames. ], tot_loss[loss=0.395, simple_loss=0.4223, pruned_loss=0.1838, over 1610764.94 frames. ], batch size: 29, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:43:49,761 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9237.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:43:50,987 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 19:44:11,412 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2580, 4.5551, 3.9148, 1.7215, 3.7246, 3.6812, 4.1057, 3.2629], + device='cuda:2'), covar=tensor([0.0763, 0.0216, 0.0525, 0.3584, 0.0408, 0.0607, 0.0556, 0.0554], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0199, 0.0241, 0.0316, 0.0201, 0.0154, 0.0219, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:44:22,131 INFO [train.py:901] (2/4) Epoch 2, batch 1200, loss[loss=0.4481, simple_loss=0.4703, pruned_loss=0.213, over 8467.00 frames. ], tot_loss[loss=0.3933, simple_loss=0.4216, pruned_loss=0.1825, over 1609362.32 frames. ], batch size: 25, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:44:23,917 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 19:44:25,538 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9289.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:44:41,020 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.810e+02 4.160e+02 4.885e+02 6.720e+02 4.965e+03, threshold=9.769e+02, percent-clipped=5.0 +2023-02-05 19:44:56,723 INFO [train.py:901] (2/4) Epoch 2, batch 1250, loss[loss=0.3875, simple_loss=0.4252, pruned_loss=0.1749, over 8096.00 frames. ], tot_loss[loss=0.3965, simple_loss=0.4235, pruned_loss=0.1847, over 1610866.07 frames. ], batch size: 23, lr: 3.37e-02, grad_scale: 4.0 +2023-02-05 19:45:07,486 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9348.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:25,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9375.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:31,820 INFO [train.py:901] (2/4) Epoch 2, batch 1300, loss[loss=0.3839, simple_loss=0.4253, pruned_loss=0.1713, over 8524.00 frames. ], tot_loss[loss=0.3956, simple_loss=0.423, pruned_loss=0.1841, over 1610277.76 frames. ], batch size: 31, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:45:45,735 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9404.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:50,303 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.161e+02 4.162e+02 5.656e+02 7.688e+02 2.529e+03, threshold=1.131e+03, percent-clipped=11.0 +2023-02-05 19:45:52,204 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 19:45:57,231 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0032, 1.8875, 1.3662, 1.2799, 1.9480, 1.4134, 1.3821, 2.0457], + device='cuda:2'), covar=tensor([0.1297, 0.1701, 0.2519, 0.2259, 0.1047, 0.1919, 0.1524, 0.0966], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0272, 0.0295, 0.0271, 0.0253, 0.0250, 0.0245, 0.0234], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:46:05,050 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:06,285 INFO [train.py:901] (2/4) Epoch 2, batch 1350, loss[loss=0.4649, simple_loss=0.4786, pruned_loss=0.2256, over 8677.00 frames. ], tot_loss[loss=0.3953, simple_loss=0.4227, pruned_loss=0.184, over 1612450.98 frames. ], batch size: 34, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:46:09,322 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-05 19:46:27,277 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:27,301 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6587, 2.1999, 1.1792, 1.9080, 1.7596, 1.3007, 1.3433, 2.0329], + device='cuda:2'), covar=tensor([0.1487, 0.0589, 0.1483, 0.1036, 0.1196, 0.1461, 0.1732, 0.1018], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0241, 0.0352, 0.0295, 0.0341, 0.0307, 0.0359, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 19:46:41,365 INFO [train.py:901] (2/4) Epoch 2, batch 1400, loss[loss=0.4184, simple_loss=0.4626, pruned_loss=0.1871, over 8322.00 frames. ], tot_loss[loss=0.3953, simple_loss=0.4228, pruned_loss=0.1839, over 1613584.35 frames. ], batch size: 25, lr: 3.35e-02, grad_scale: 4.0 +2023-02-05 19:46:45,510 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9490.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:47,576 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9493.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:59,486 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.192e+02 3.889e+02 4.981e+02 6.326e+02 1.555e+03, threshold=9.962e+02, percent-clipped=1.0 +2023-02-05 19:47:04,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9518.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:47:06,512 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:47:15,069 INFO [train.py:901] (2/4) Epoch 2, batch 1450, loss[loss=0.3953, simple_loss=0.4335, pruned_loss=0.1786, over 8114.00 frames. ], tot_loss[loss=0.3952, simple_loss=0.4222, pruned_loss=0.1841, over 1613202.01 frames. ], batch size: 23, lr: 3.34e-02, grad_scale: 4.0 +2023-02-05 19:47:19,042 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 19:47:49,261 INFO [train.py:901] (2/4) Epoch 2, batch 1500, loss[loss=0.3635, simple_loss=0.4058, pruned_loss=0.1607, over 8240.00 frames. ], tot_loss[loss=0.3915, simple_loss=0.42, pruned_loss=0.1814, over 1615710.51 frames. ], batch size: 22, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:01,346 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:07,902 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.496e+02 4.006e+02 4.905e+02 6.157e+02 1.300e+03, threshold=9.811e+02, percent-clipped=3.0 +2023-02-05 19:48:23,387 INFO [train.py:901] (2/4) Epoch 2, batch 1550, loss[loss=0.3511, simple_loss=0.394, pruned_loss=0.1541, over 7808.00 frames. ], tot_loss[loss=0.3899, simple_loss=0.4192, pruned_loss=0.1803, over 1619092.97 frames. ], batch size: 20, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:41,696 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9660.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:45,214 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-05 19:48:50,894 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-05 19:48:52,341 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9676.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:57,567 INFO [train.py:901] (2/4) Epoch 2, batch 1600, loss[loss=0.3373, simple_loss=0.3871, pruned_loss=0.1438, over 8032.00 frames. ], tot_loss[loss=0.392, simple_loss=0.4208, pruned_loss=0.1816, over 1613209.07 frames. ], batch size: 22, lr: 3.32e-02, grad_scale: 8.0 +2023-02-05 19:48:58,392 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9685.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:17,085 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.192e+02 5.177e+02 6.492e+02 1.266e+03, threshold=1.035e+03, percent-clipped=2.0 +2023-02-05 19:49:22,888 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9719.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:33,627 INFO [train.py:901] (2/4) Epoch 2, batch 1650, loss[loss=0.3685, simple_loss=0.403, pruned_loss=0.167, over 8075.00 frames. ], tot_loss[loss=0.3886, simple_loss=0.4184, pruned_loss=0.1794, over 1614202.70 frames. ], batch size: 21, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:49:40,254 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9744.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:41,550 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9746.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:47,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 19:49:51,527 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:58,956 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:02,237 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:03,227 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.17 vs. limit=2.0 +2023-02-05 19:50:07,343 INFO [train.py:901] (2/4) Epoch 2, batch 1700, loss[loss=0.4222, simple_loss=0.4528, pruned_loss=0.1958, over 8502.00 frames. ], tot_loss[loss=0.3903, simple_loss=0.4197, pruned_loss=0.1805, over 1616226.92 frames. ], batch size: 28, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:50:24,477 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2023, 2.1911, 1.9616, 0.5015, 1.9098, 1.4238, 0.3970, 2.0077], + device='cuda:2'), covar=tensor([0.0239, 0.0068, 0.0145, 0.0335, 0.0145, 0.0279, 0.0369, 0.0091], + device='cuda:2'), in_proj_covar=tensor([0.0168, 0.0123, 0.0110, 0.0173, 0.0121, 0.0207, 0.0175, 0.0143], + device='cuda:2'), out_proj_covar=tensor([1.1481e-04, 8.5389e-05, 8.3123e-05, 1.2257e-04, 9.1607e-05, 1.5285e-04, + 1.2569e-04, 1.0142e-04], device='cuda:2') +2023-02-05 19:50:26,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.640e+02 4.068e+02 5.098e+02 6.535e+02 1.207e+03, threshold=1.020e+03, percent-clipped=5.0 +2023-02-05 19:50:42,243 INFO [train.py:901] (2/4) Epoch 2, batch 1750, loss[loss=0.4101, simple_loss=0.4425, pruned_loss=0.1888, over 8322.00 frames. ], tot_loss[loss=0.3893, simple_loss=0.4189, pruned_loss=0.1798, over 1617709.55 frames. ], batch size: 25, lr: 3.30e-02, grad_scale: 8.0 +2023-02-05 19:50:44,397 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8239, 2.2589, 4.5746, 1.2491, 3.0334, 2.2529, 1.9116, 2.3507], + device='cuda:2'), covar=tensor([0.1044, 0.1372, 0.0241, 0.1672, 0.0947, 0.1522, 0.0897, 0.1566], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0337, 0.0347, 0.0382, 0.0430, 0.0403, 0.0343, 0.0423], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 19:51:16,319 INFO [train.py:901] (2/4) Epoch 2, batch 1800, loss[loss=0.4002, simple_loss=0.4161, pruned_loss=0.1922, over 8326.00 frames. ], tot_loss[loss=0.3901, simple_loss=0.4194, pruned_loss=0.1804, over 1622942.53 frames. ], batch size: 25, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:21,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:51:34,083 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.365e+02 4.111e+02 5.198e+02 6.626e+02 1.120e+03, threshold=1.040e+03, percent-clipped=3.0 +2023-02-05 19:51:49,954 INFO [train.py:901] (2/4) Epoch 2, batch 1850, loss[loss=0.4157, simple_loss=0.4438, pruned_loss=0.1938, over 8641.00 frames. ], tot_loss[loss=0.3874, simple_loss=0.4174, pruned_loss=0.1787, over 1619014.06 frames. ], batch size: 49, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:58,649 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9946.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:01,473 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0558, 1.7301, 1.3944, 1.1372, 1.9990, 1.5886, 1.5625, 2.0100], + device='cuda:2'), covar=tensor([0.1162, 0.1781, 0.2504, 0.2153, 0.0870, 0.1890, 0.1285, 0.0840], + device='cuda:2'), in_proj_covar=tensor([0.0247, 0.0267, 0.0290, 0.0258, 0.0240, 0.0250, 0.0234, 0.0227], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:52:08,259 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-05 19:52:13,469 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3849, 1.5459, 2.2579, 0.9768, 1.8512, 1.5340, 1.3488, 1.5809], + device='cuda:2'), covar=tensor([0.1039, 0.1052, 0.0440, 0.1648, 0.0871, 0.1569, 0.0971, 0.1007], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0341, 0.0359, 0.0385, 0.0437, 0.0400, 0.0346, 0.0427], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 19:52:24,359 INFO [train.py:901] (2/4) Epoch 2, batch 1900, loss[loss=0.4002, simple_loss=0.4348, pruned_loss=0.1828, over 8449.00 frames. ], tot_loss[loss=0.3849, simple_loss=0.4158, pruned_loss=0.177, over 1617412.44 frames. ], batch size: 29, lr: 3.28e-02, grad_scale: 8.0 +2023-02-05 19:52:36,761 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3960, 1.4470, 1.2303, 1.4954, 1.3553, 1.1686, 1.1382, 1.7996], + device='cuda:2'), covar=tensor([0.0949, 0.0591, 0.1300, 0.0664, 0.0974, 0.1248, 0.1111, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0369, 0.0250, 0.0370, 0.0298, 0.0354, 0.0314, 0.0369, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 19:52:39,962 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4050, 0.9354, 4.5199, 2.0111, 3.8878, 3.6244, 4.0105, 4.0155], + device='cuda:2'), covar=tensor([0.0326, 0.3613, 0.0217, 0.1390, 0.0805, 0.0334, 0.0279, 0.0338], + device='cuda:2'), in_proj_covar=tensor([0.0179, 0.0361, 0.0209, 0.0252, 0.0285, 0.0235, 0.0202, 0.0240], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:52:43,800 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.513e+02 4.327e+02 5.785e+02 1.080e+03, threshold=8.653e+02, percent-clipped=1.0 +2023-02-05 19:52:49,917 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:54,607 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 19:52:57,444 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9163, 2.2817, 1.8806, 2.7669, 1.1635, 1.3314, 1.8089, 2.2439], + device='cuda:2'), covar=tensor([0.1169, 0.1506, 0.1672, 0.0385, 0.2505, 0.2446, 0.2198, 0.1402], + device='cuda:2'), in_proj_covar=tensor([0.0307, 0.0326, 0.0319, 0.0221, 0.0334, 0.0349, 0.0383, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:2') +2023-02-05 19:52:59,199 INFO [train.py:901] (2/4) Epoch 2, batch 1950, loss[loss=0.4023, simple_loss=0.433, pruned_loss=0.1858, over 8342.00 frames. ], tot_loss[loss=0.3885, simple_loss=0.4183, pruned_loss=0.1794, over 1617293.36 frames. ], batch size: 26, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:06,844 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 19:53:15,911 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10057.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:19,396 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:25,587 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 19:53:33,042 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10080.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:53:33,717 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4033, 2.0820, 3.3599, 3.1256, 2.8009, 2.0168, 1.5446, 1.9102], + device='cuda:2'), covar=tensor([0.0894, 0.0990, 0.0208, 0.0282, 0.0394, 0.0434, 0.0601, 0.0847], + device='cuda:2'), in_proj_covar=tensor([0.0444, 0.0353, 0.0256, 0.0293, 0.0373, 0.0328, 0.0349, 0.0387], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:53:35,461 INFO [train.py:901] (2/4) Epoch 2, batch 2000, loss[loss=0.3437, simple_loss=0.3757, pruned_loss=0.1558, over 7978.00 frames. ], tot_loss[loss=0.3883, simple_loss=0.4181, pruned_loss=0.1793, over 1617823.30 frames. ], batch size: 21, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:50,416 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:55,727 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.648e+02 4.167e+02 5.413e+02 6.926e+02 6.671e+03, threshold=1.083e+03, percent-clipped=14.0 +2023-02-05 19:54:10,561 INFO [train.py:901] (2/4) Epoch 2, batch 2050, loss[loss=0.3545, simple_loss=0.3952, pruned_loss=0.1569, over 8133.00 frames. ], tot_loss[loss=0.3864, simple_loss=0.4159, pruned_loss=0.1784, over 1616483.04 frames. ], batch size: 22, lr: 3.26e-02, grad_scale: 4.0 +2023-02-05 19:54:11,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10135.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:16,423 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 19:54:16,760 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.4249, 1.4231, 5.2435, 2.3122, 4.7862, 4.5565, 4.8102, 4.9938], + device='cuda:2'), covar=tensor([0.0293, 0.3427, 0.0189, 0.1422, 0.0737, 0.0279, 0.0260, 0.0276], + device='cuda:2'), in_proj_covar=tensor([0.0177, 0.0358, 0.0213, 0.0246, 0.0289, 0.0229, 0.0207, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:54:19,453 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:36,885 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:45,486 INFO [train.py:901] (2/4) Epoch 2, batch 2100, loss[loss=0.3794, simple_loss=0.415, pruned_loss=0.1719, over 8085.00 frames. ], tot_loss[loss=0.3843, simple_loss=0.4148, pruned_loss=0.1769, over 1612942.51 frames. ], batch size: 21, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:06,153 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.788e+02 4.646e+02 5.840e+02 1.328e+03, threshold=9.292e+02, percent-clipped=3.0 +2023-02-05 19:55:11,268 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:55:20,260 INFO [train.py:901] (2/4) Epoch 2, batch 2150, loss[loss=0.3923, simple_loss=0.4294, pruned_loss=0.1776, over 8250.00 frames. ], tot_loss[loss=0.3819, simple_loss=0.4132, pruned_loss=0.1753, over 1609033.03 frames. ], batch size: 24, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:50,895 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 19:55:53,988 INFO [train.py:901] (2/4) Epoch 2, batch 2200, loss[loss=0.3849, simple_loss=0.4141, pruned_loss=0.1779, over 8104.00 frames. ], tot_loss[loss=0.383, simple_loss=0.4139, pruned_loss=0.1761, over 1612611.80 frames. ], batch size: 23, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:13,115 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 19:56:14,593 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.803e+02 4.971e+02 6.310e+02 1.458e+03, threshold=9.942e+02, percent-clipped=6.0 +2023-02-05 19:56:18,160 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10317.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:29,272 INFO [train.py:901] (2/4) Epoch 2, batch 2250, loss[loss=0.4924, simple_loss=0.4834, pruned_loss=0.2507, over 7316.00 frames. ], tot_loss[loss=0.3858, simple_loss=0.4157, pruned_loss=0.1779, over 1613953.78 frames. ], batch size: 71, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:34,612 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10342.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:44,802 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 19:57:03,211 INFO [train.py:901] (2/4) Epoch 2, batch 2300, loss[loss=0.3445, simple_loss=0.3935, pruned_loss=0.1478, over 8609.00 frames. ], tot_loss[loss=0.3857, simple_loss=0.4155, pruned_loss=0.178, over 1614980.19 frames. ], batch size: 34, lr: 3.23e-02, grad_scale: 4.0 +2023-02-05 19:57:08,295 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10391.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:15,022 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10401.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:18,756 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-05 19:57:23,810 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.389e+02 3.989e+02 5.161e+02 7.086e+02 1.471e+03, threshold=1.032e+03, percent-clipped=7.0 +2023-02-05 19:57:25,917 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10416.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:31,805 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10424.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:57:33,873 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:39,136 INFO [train.py:901] (2/4) Epoch 2, batch 2350, loss[loss=0.3765, simple_loss=0.4054, pruned_loss=0.1739, over 8358.00 frames. ], tot_loss[loss=0.385, simple_loss=0.4149, pruned_loss=0.1775, over 1616469.40 frames. ], batch size: 24, lr: 3.22e-02, grad_scale: 4.0 +2023-02-05 19:57:39,514 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-05 19:58:05,152 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:07,808 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:12,888 INFO [train.py:901] (2/4) Epoch 2, batch 2400, loss[loss=0.3463, simple_loss=0.3974, pruned_loss=0.1475, over 8459.00 frames. ], tot_loss[loss=0.3835, simple_loss=0.4138, pruned_loss=0.1766, over 1614948.63 frames. ], batch size: 27, lr: 3.22e-02, grad_scale: 8.0 +2023-02-05 19:58:21,309 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7973, 1.0927, 3.2226, 1.0601, 2.0409, 3.8237, 3.3594, 3.3385], + device='cuda:2'), covar=tensor([0.1402, 0.1951, 0.0336, 0.2270, 0.0845, 0.0181, 0.0285, 0.0405], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0263, 0.0180, 0.0254, 0.0195, 0.0149, 0.0137, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 19:58:24,669 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:32,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.364e+02 3.956e+02 5.047e+02 6.263e+02 1.564e+03, threshold=1.009e+03, percent-clipped=2.0 +2023-02-05 19:58:34,740 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10516.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:47,327 INFO [train.py:901] (2/4) Epoch 2, batch 2450, loss[loss=0.3974, simple_loss=0.4337, pruned_loss=0.1806, over 8553.00 frames. ], tot_loss[loss=0.3844, simple_loss=0.4151, pruned_loss=0.1769, over 1618996.30 frames. ], batch size: 49, lr: 3.21e-02, grad_scale: 8.0 +2023-02-05 19:58:50,860 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10539.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:59:22,254 INFO [train.py:901] (2/4) Epoch 2, batch 2500, loss[loss=0.3689, simple_loss=0.3947, pruned_loss=0.1715, over 7811.00 frames. ], tot_loss[loss=0.382, simple_loss=0.4132, pruned_loss=0.1755, over 1618846.60 frames. ], batch size: 20, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 19:59:36,326 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1463, 1.1126, 4.0945, 1.6459, 3.4926, 3.5457, 3.6758, 3.6650], + device='cuda:2'), covar=tensor([0.0234, 0.2926, 0.0235, 0.1207, 0.0878, 0.0304, 0.0259, 0.0355], + device='cuda:2'), in_proj_covar=tensor([0.0177, 0.0355, 0.0216, 0.0241, 0.0286, 0.0228, 0.0215, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 19:59:42,154 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.520e+02 3.522e+02 4.438e+02 6.473e+02 1.354e+03, threshold=8.876e+02, percent-clipped=4.0 +2023-02-05 19:59:54,190 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4677, 2.0938, 3.2439, 3.0263, 2.9317, 2.0215, 1.6402, 2.4449], + device='cuda:2'), covar=tensor([0.0755, 0.0887, 0.0147, 0.0201, 0.0272, 0.0425, 0.0612, 0.0565], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0372, 0.0256, 0.0299, 0.0383, 0.0336, 0.0359, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 19:59:55,943 INFO [train.py:901] (2/4) Epoch 2, batch 2550, loss[loss=0.3289, simple_loss=0.3814, pruned_loss=0.1382, over 7930.00 frames. ], tot_loss[loss=0.3822, simple_loss=0.413, pruned_loss=0.1757, over 1617305.37 frames. ], batch size: 20, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 20:00:31,344 INFO [train.py:901] (2/4) Epoch 2, batch 2600, loss[loss=0.3952, simple_loss=0.43, pruned_loss=0.1802, over 8551.00 frames. ], tot_loss[loss=0.3815, simple_loss=0.4123, pruned_loss=0.1753, over 1613263.59 frames. ], batch size: 31, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:00:34,223 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5017, 2.0122, 1.5037, 2.0730, 1.9919, 1.3061, 1.5443, 2.1713], + device='cuda:2'), covar=tensor([0.1232, 0.0746, 0.1231, 0.0793, 0.1003, 0.1394, 0.1245, 0.0705], + device='cuda:2'), in_proj_covar=tensor([0.0362, 0.0237, 0.0356, 0.0304, 0.0350, 0.0314, 0.0352, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 20:00:44,745 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2756, 2.3787, 2.1084, 2.6333, 1.8501, 1.8621, 1.9755, 2.3972], + device='cuda:2'), covar=tensor([0.0844, 0.1066, 0.1231, 0.0419, 0.1574, 0.1426, 0.1487, 0.0844], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0331, 0.0322, 0.0217, 0.0331, 0.0340, 0.0383, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:00:50,490 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 4.188e+02 4.914e+02 6.333e+02 1.432e+03, threshold=9.828e+02, percent-clipped=6.0 +2023-02-05 20:01:05,101 INFO [train.py:901] (2/4) Epoch 2, batch 2650, loss[loss=0.3803, simple_loss=0.3987, pruned_loss=0.1809, over 7978.00 frames. ], tot_loss[loss=0.3805, simple_loss=0.412, pruned_loss=0.1744, over 1610838.54 frames. ], batch size: 21, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:01:24,204 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10762.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:30,879 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:31,718 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10772.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:40,305 INFO [train.py:901] (2/4) Epoch 2, batch 2700, loss[loss=0.4327, simple_loss=0.4658, pruned_loss=0.1998, over 8757.00 frames. ], tot_loss[loss=0.3789, simple_loss=0.4107, pruned_loss=0.1735, over 1608343.37 frames. ], batch size: 30, lr: 3.18e-02, grad_scale: 8.0 +2023-02-05 20:01:46,658 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10792.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:48,791 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10795.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:01:50,095 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10797.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:01,027 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.290e+02 4.005e+02 5.458e+02 7.000e+02 2.619e+03, threshold=1.092e+03, percent-clipped=7.0 +2023-02-05 20:02:03,279 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10816.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:06,059 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10820.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:02:15,171 INFO [train.py:901] (2/4) Epoch 2, batch 2750, loss[loss=0.4329, simple_loss=0.4565, pruned_loss=0.2047, over 8320.00 frames. ], tot_loss[loss=0.3773, simple_loss=0.4095, pruned_loss=0.1726, over 1605287.11 frames. ], batch size: 26, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:49,756 INFO [train.py:901] (2/4) Epoch 2, batch 2800, loss[loss=0.4238, simple_loss=0.4348, pruned_loss=0.2064, over 7539.00 frames. ], tot_loss[loss=0.3789, simple_loss=0.4109, pruned_loss=0.1734, over 1607923.91 frames. ], batch size: 18, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:51,260 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10886.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:03,319 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10903.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:03:10,621 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.367e+02 3.535e+02 4.531e+02 6.001e+02 1.335e+03, threshold=9.062e+02, percent-clipped=2.0 +2023-02-05 20:03:23,137 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:25,018 INFO [train.py:901] (2/4) Epoch 2, batch 2850, loss[loss=0.4037, simple_loss=0.4539, pruned_loss=0.1767, over 8647.00 frames. ], tot_loss[loss=0.3776, simple_loss=0.4105, pruned_loss=0.1723, over 1608844.50 frames. ], batch size: 34, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:03:40,880 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-05 20:03:59,105 INFO [train.py:901] (2/4) Epoch 2, batch 2900, loss[loss=0.3313, simple_loss=0.3962, pruned_loss=0.1332, over 7807.00 frames. ], tot_loss[loss=0.3782, simple_loss=0.4111, pruned_loss=0.1727, over 1602820.78 frames. ], batch size: 20, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:04:18,979 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0117, 2.3707, 1.8584, 2.8176, 1.1710, 1.3303, 1.5907, 2.0444], + device='cuda:2'), covar=tensor([0.1015, 0.0947, 0.1434, 0.0344, 0.2004, 0.2148, 0.2232, 0.1148], + device='cuda:2'), in_proj_covar=tensor([0.0307, 0.0320, 0.0316, 0.0208, 0.0325, 0.0329, 0.0378, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:04:19,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.551e+02 4.216e+02 5.196e+02 6.845e+02 2.226e+03, threshold=1.039e+03, percent-clipped=10.0 +2023-02-05 20:04:34,448 INFO [train.py:901] (2/4) Epoch 2, batch 2950, loss[loss=0.5775, simple_loss=0.5428, pruned_loss=0.3061, over 8629.00 frames. ], tot_loss[loss=0.3793, simple_loss=0.4108, pruned_loss=0.1739, over 1601661.66 frames. ], batch size: 39, lr: 3.15e-02, grad_scale: 8.0 +2023-02-05 20:04:39,268 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 20:04:41,043 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 20:05:08,646 INFO [train.py:901] (2/4) Epoch 2, batch 3000, loss[loss=0.333, simple_loss=0.3747, pruned_loss=0.1456, over 7649.00 frames. ], tot_loss[loss=0.3793, simple_loss=0.4114, pruned_loss=0.1737, over 1605900.41 frames. ], batch size: 19, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:05:08,646 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 20:05:18,635 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4863, 1.6152, 1.5266, 2.3043, 1.1066, 1.0762, 1.5614, 1.5726], + device='cuda:2'), covar=tensor([0.1496, 0.1671, 0.1785, 0.0535, 0.2258, 0.2648, 0.2011, 0.1435], + device='cuda:2'), in_proj_covar=tensor([0.0307, 0.0324, 0.0313, 0.0213, 0.0320, 0.0331, 0.0372, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:05:24,855 INFO [train.py:935] (2/4) Epoch 2, validation: loss=0.2878, simple_loss=0.369, pruned_loss=0.1033, over 944034.00 frames. +2023-02-05 20:05:24,856 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 20:05:40,501 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:05:45,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.542e+02 3.795e+02 4.955e+02 6.193e+02 1.384e+03, threshold=9.910e+02, percent-clipped=4.0 +2023-02-05 20:06:00,078 INFO [train.py:901] (2/4) Epoch 2, batch 3050, loss[loss=0.391, simple_loss=0.4317, pruned_loss=0.1752, over 8478.00 frames. ], tot_loss[loss=0.3801, simple_loss=0.4122, pruned_loss=0.174, over 1609913.67 frames. ], batch size: 25, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:06:01,582 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:05,919 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11142.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:24,327 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:35,406 INFO [train.py:901] (2/4) Epoch 2, batch 3100, loss[loss=0.3254, simple_loss=0.3742, pruned_loss=0.1384, over 8082.00 frames. ], tot_loss[loss=0.378, simple_loss=0.4105, pruned_loss=0.1728, over 1609017.22 frames. ], batch size: 21, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:06:37,614 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:40,867 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,397 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.930e+02 4.987e+02 6.652e+02 1.229e+03, threshold=9.974e+02, percent-clipped=5.0 +2023-02-05 20:06:58,267 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4639, 2.4919, 2.6664, 0.4062, 2.5494, 1.8421, 1.0658, 1.6419], + device='cuda:2'), covar=tensor([0.0205, 0.0074, 0.0108, 0.0317, 0.0131, 0.0280, 0.0317, 0.0164], + device='cuda:2'), in_proj_covar=tensor([0.0176, 0.0130, 0.0122, 0.0182, 0.0133, 0.0227, 0.0189, 0.0157], + device='cuda:2'), out_proj_covar=tensor([1.1370e-04, 8.5132e-05, 8.6157e-05, 1.2077e-04, 9.3833e-05, 1.5867e-04, + 1.2916e-04, 1.0555e-04], device='cuda:2') +2023-02-05 20:07:01,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:10,344 INFO [train.py:901] (2/4) Epoch 2, batch 3150, loss[loss=0.3711, simple_loss=0.4138, pruned_loss=0.1642, over 8346.00 frames. ], tot_loss[loss=0.3778, simple_loss=0.411, pruned_loss=0.1723, over 1614598.65 frames. ], batch size: 26, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:07:20,132 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11247.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:07:22,966 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11251.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:35,649 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.32 vs. limit=2.0 +2023-02-05 20:07:40,490 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.18 vs. limit=5.0 +2023-02-05 20:07:46,079 INFO [train.py:901] (2/4) Epoch 2, batch 3200, loss[loss=0.3089, simple_loss=0.3588, pruned_loss=0.1295, over 7980.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.4089, pruned_loss=0.1707, over 1608843.02 frames. ], batch size: 21, lr: 3.12e-02, grad_scale: 8.0 +2023-02-05 20:07:48,949 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9401, 1.3284, 2.0778, 0.7977, 2.1909, 2.1855, 2.1515, 1.9046], + device='cuda:2'), covar=tensor([0.1737, 0.1716, 0.0642, 0.2751, 0.0648, 0.0612, 0.0609, 0.0924], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0258, 0.0171, 0.0242, 0.0190, 0.0148, 0.0137, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:07:56,013 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-05 20:08:06,205 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 3.889e+02 4.508e+02 6.050e+02 1.565e+03, threshold=9.016e+02, percent-clipped=4.0 +2023-02-05 20:08:07,294 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-05 20:08:21,234 INFO [train.py:901] (2/4) Epoch 2, batch 3250, loss[loss=0.374, simple_loss=0.4161, pruned_loss=0.1659, over 8111.00 frames. ], tot_loss[loss=0.3714, simple_loss=0.4061, pruned_loss=0.1683, over 1608499.03 frames. ], batch size: 23, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:08:39,992 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11362.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:08:55,029 INFO [train.py:901] (2/4) Epoch 2, batch 3300, loss[loss=0.3557, simple_loss=0.4035, pruned_loss=0.154, over 7906.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.4066, pruned_loss=0.1686, over 1610900.33 frames. ], batch size: 20, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:09:16,011 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.686e+02 3.650e+02 4.417e+02 5.589e+02 1.513e+03, threshold=8.834e+02, percent-clipped=8.0 +2023-02-05 20:09:30,176 INFO [train.py:901] (2/4) Epoch 2, batch 3350, loss[loss=0.3582, simple_loss=0.3887, pruned_loss=0.1638, over 7809.00 frames. ], tot_loss[loss=0.3725, simple_loss=0.4068, pruned_loss=0.1691, over 1609208.03 frames. ], batch size: 20, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:00,609 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11477.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:04,735 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2846, 1.8814, 1.4173, 1.1923, 1.9363, 1.5762, 2.0331, 1.7453], + device='cuda:2'), covar=tensor([0.1086, 0.1639, 0.2358, 0.2128, 0.1075, 0.1994, 0.1165, 0.0979], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0262, 0.0289, 0.0259, 0.0233, 0.0251, 0.0230, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:10:05,251 INFO [train.py:901] (2/4) Epoch 2, batch 3400, loss[loss=0.3528, simple_loss=0.4039, pruned_loss=0.1509, over 8362.00 frames. ], tot_loss[loss=0.376, simple_loss=0.41, pruned_loss=0.171, over 1616508.90 frames. ], batch size: 24, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:17,706 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11502.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:21,204 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11507.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:25,774 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.120e+02 3.730e+02 4.591e+02 5.662e+02 1.223e+03, threshold=9.181e+02, percent-clipped=5.0 +2023-02-05 20:10:39,501 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:40,654 INFO [train.py:901] (2/4) Epoch 2, batch 3450, loss[loss=0.3188, simple_loss=0.3582, pruned_loss=0.1397, over 7523.00 frames. ], tot_loss[loss=0.3766, simple_loss=0.41, pruned_loss=0.1715, over 1619050.79 frames. ], batch size: 18, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:10:42,062 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:51,626 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11550.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:11:15,640 INFO [train.py:901] (2/4) Epoch 2, batch 3500, loss[loss=0.3388, simple_loss=0.398, pruned_loss=0.1398, over 8364.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.4093, pruned_loss=0.1704, over 1620444.98 frames. ], batch size: 24, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:11:35,936 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 4.071e+02 4.877e+02 6.297e+02 1.257e+03, threshold=9.753e+02, percent-clipped=3.0 +2023-02-05 20:11:39,547 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11618.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:11:40,719 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 20:11:50,751 INFO [train.py:901] (2/4) Epoch 2, batch 3550, loss[loss=0.3787, simple_loss=0.4288, pruned_loss=0.1643, over 8459.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.4091, pruned_loss=0.1706, over 1619186.62 frames. ], batch size: 29, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:11:57,576 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11643.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:12:02,958 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:04,329 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:12,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1735, 1.4320, 1.2645, 1.9175, 1.1307, 0.9616, 1.2007, 1.4676], + device='cuda:2'), covar=tensor([0.1632, 0.1476, 0.1848, 0.0602, 0.1634, 0.2600, 0.1650, 0.1144], + device='cuda:2'), in_proj_covar=tensor([0.0315, 0.0331, 0.0332, 0.0215, 0.0323, 0.0338, 0.0371, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:12:25,619 INFO [train.py:901] (2/4) Epoch 2, batch 3600, loss[loss=0.3814, simple_loss=0.4057, pruned_loss=0.1785, over 7791.00 frames. ], tot_loss[loss=0.3763, simple_loss=0.4097, pruned_loss=0.1714, over 1619225.70 frames. ], batch size: 19, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:12:45,381 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.676e+02 3.688e+02 4.691e+02 6.662e+02 1.491e+03, threshold=9.383e+02, percent-clipped=3.0 +2023-02-05 20:12:48,294 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:59,433 INFO [train.py:901] (2/4) Epoch 2, batch 3650, loss[loss=0.3069, simple_loss=0.3478, pruned_loss=0.133, over 7652.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.409, pruned_loss=0.171, over 1618393.87 frames. ], batch size: 19, lr: 3.07e-02, grad_scale: 8.0 +2023-02-05 20:13:25,959 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4989, 1.1253, 2.8990, 1.3218, 2.2572, 3.1656, 2.8646, 2.8247], + device='cuda:2'), covar=tensor([0.1473, 0.1687, 0.0422, 0.1938, 0.0667, 0.0269, 0.0396, 0.0531], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0263, 0.0175, 0.0250, 0.0201, 0.0149, 0.0145, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:13:33,832 INFO [train.py:901] (2/4) Epoch 2, batch 3700, loss[loss=0.3646, simple_loss=0.3952, pruned_loss=0.167, over 7981.00 frames. ], tot_loss[loss=0.3759, simple_loss=0.4091, pruned_loss=0.1714, over 1619202.49 frames. ], batch size: 21, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:13:44,421 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:13:45,468 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0506, 1.8704, 2.9611, 2.5767, 2.4929, 1.8318, 1.4806, 1.5654], + device='cuda:2'), covar=tensor([0.0722, 0.0632, 0.0127, 0.0215, 0.0248, 0.0321, 0.0455, 0.0532], + device='cuda:2'), in_proj_covar=tensor([0.0470, 0.0384, 0.0280, 0.0314, 0.0404, 0.0351, 0.0373, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:13:53,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.910e+02 4.224e+02 5.211e+02 6.213e+02 2.304e+03, threshold=1.042e+03, percent-clipped=10.0 +2023-02-05 20:13:58,270 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.84 vs. limit=2.0 +2023-02-05 20:14:08,519 INFO [train.py:901] (2/4) Epoch 2, batch 3750, loss[loss=0.3467, simple_loss=0.3927, pruned_loss=0.1504, over 8662.00 frames. ], tot_loss[loss=0.3758, simple_loss=0.4087, pruned_loss=0.1715, over 1614824.06 frames. ], batch size: 34, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:14:08,633 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:28,578 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11864.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:33,029 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5845, 1.2356, 4.5360, 1.9071, 3.9661, 3.6762, 3.9794, 3.9014], + device='cuda:2'), covar=tensor([0.0207, 0.2725, 0.0174, 0.1285, 0.0608, 0.0319, 0.0255, 0.0295], + device='cuda:2'), in_proj_covar=tensor([0.0180, 0.0360, 0.0218, 0.0258, 0.0302, 0.0236, 0.0218, 0.0248], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:14:43,033 INFO [train.py:901] (2/4) Epoch 2, batch 3800, loss[loss=0.4234, simple_loss=0.4086, pruned_loss=0.2191, over 7540.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.4086, pruned_loss=0.1712, over 1614979.95 frames. ], batch size: 18, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:14:46,610 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 20:14:49,684 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11894.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:54,224 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-05 20:14:58,776 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:02,634 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.750e+02 4.056e+02 4.773e+02 6.198e+02 1.391e+03, threshold=9.546e+02, percent-clipped=3.0 +2023-02-05 20:15:16,340 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:17,493 INFO [train.py:901] (2/4) Epoch 2, batch 3850, loss[loss=0.3915, simple_loss=0.4214, pruned_loss=0.1808, over 8642.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.4079, pruned_loss=0.1705, over 1615567.49 frames. ], batch size: 39, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:15:20,311 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11938.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:25,285 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1705, 1.6375, 2.8100, 0.9321, 1.9869, 1.3148, 1.3119, 1.4040], + device='cuda:2'), covar=tensor([0.1372, 0.1375, 0.0457, 0.2143, 0.1064, 0.1913, 0.1208, 0.1440], + device='cuda:2'), in_proj_covar=tensor([0.0364, 0.0353, 0.0388, 0.0412, 0.0453, 0.0414, 0.0369, 0.0468], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 20:15:39,750 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11966.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:42,734 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-05 20:15:47,061 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 20:15:51,652 INFO [train.py:901] (2/4) Epoch 2, batch 3900, loss[loss=0.3758, simple_loss=0.4166, pruned_loss=0.1675, over 8516.00 frames. ], tot_loss[loss=0.3748, simple_loss=0.4084, pruned_loss=0.1706, over 1617547.84 frames. ], batch size: 26, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:01,108 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11997.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:06,095 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12002.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:10,862 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12009.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:13,165 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.113e+02 3.926e+02 4.686e+02 5.678e+02 1.222e+03, threshold=9.373e+02, percent-clipped=4.0 +2023-02-05 20:16:19,135 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 20:16:28,132 INFO [train.py:901] (2/4) Epoch 2, batch 3950, loss[loss=0.3349, simple_loss=0.3654, pruned_loss=0.1521, over 7716.00 frames. ], tot_loss[loss=0.3755, simple_loss=0.4088, pruned_loss=0.1711, over 1613608.06 frames. ], batch size: 18, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:36,270 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2209, 2.1283, 3.1773, 2.8075, 2.6261, 1.8086, 1.5058, 1.8413], + device='cuda:2'), covar=tensor([0.0706, 0.0640, 0.0129, 0.0213, 0.0283, 0.0353, 0.0477, 0.0536], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0393, 0.0294, 0.0329, 0.0415, 0.0363, 0.0373, 0.0413], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:16:46,990 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:56,012 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6081, 2.4064, 3.5326, 3.2889, 2.8834, 2.1795, 1.7429, 2.2158], + device='cuda:2'), covar=tensor([0.0587, 0.0634, 0.0124, 0.0180, 0.0256, 0.0316, 0.0414, 0.0522], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0400, 0.0303, 0.0331, 0.0422, 0.0369, 0.0378, 0.0420], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:17:02,479 INFO [train.py:901] (2/4) Epoch 2, batch 4000, loss[loss=0.3235, simple_loss=0.3561, pruned_loss=0.1455, over 7531.00 frames. ], tot_loss[loss=0.3758, simple_loss=0.409, pruned_loss=0.1713, over 1608440.43 frames. ], batch size: 18, lr: 3.03e-02, grad_scale: 8.0 +2023-02-05 20:17:09,212 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12094.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:22,650 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12112.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:23,108 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.955e+02 4.453e+02 5.904e+02 7.845e+02 2.502e+03, threshold=1.181e+03, percent-clipped=13.0 +2023-02-05 20:17:32,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3113, 1.9821, 1.4587, 2.0121, 1.7167, 1.3745, 1.5867, 2.0157], + device='cuda:2'), covar=tensor([0.1100, 0.0547, 0.0977, 0.0595, 0.0798, 0.0995, 0.0900, 0.0558], + device='cuda:2'), in_proj_covar=tensor([0.0368, 0.0249, 0.0352, 0.0313, 0.0364, 0.0317, 0.0360, 0.0327], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 20:17:36,859 INFO [train.py:901] (2/4) Epoch 2, batch 4050, loss[loss=0.3027, simple_loss=0.3331, pruned_loss=0.1362, over 7462.00 frames. ], tot_loss[loss=0.3763, simple_loss=0.4097, pruned_loss=0.1715, over 1609382.84 frames. ], batch size: 17, lr: 3.03e-02, grad_scale: 16.0 +2023-02-05 20:18:06,022 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:07,289 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12178.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:11,164 INFO [train.py:901] (2/4) Epoch 2, batch 4100, loss[loss=0.367, simple_loss=0.3927, pruned_loss=0.1707, over 7408.00 frames. ], tot_loss[loss=0.3766, simple_loss=0.4096, pruned_loss=0.1718, over 1609405.68 frames. ], batch size: 17, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:18:27,598 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12208.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:30,904 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.728e+02 4.672e+02 5.863e+02 2.072e+03, threshold=9.344e+02, percent-clipped=1.0 +2023-02-05 20:18:47,039 INFO [train.py:901] (2/4) Epoch 2, batch 4150, loss[loss=0.3659, simple_loss=0.4003, pruned_loss=0.1658, over 8131.00 frames. ], tot_loss[loss=0.3742, simple_loss=0.4079, pruned_loss=0.1702, over 1602930.85 frames. ], batch size: 22, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:19:08,924 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12265.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:20,419 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12282.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:21,722 INFO [train.py:901] (2/4) Epoch 2, batch 4200, loss[loss=0.3425, simple_loss=0.4033, pruned_loss=0.1408, over 8446.00 frames. ], tot_loss[loss=0.3714, simple_loss=0.4054, pruned_loss=0.1687, over 1601638.42 frames. ], batch size: 27, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:19:25,928 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12290.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:28,635 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:38,116 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1885, 1.7723, 1.8522, 1.3376, 1.0553, 1.9109, 0.3588, 0.8622], + device='cuda:2'), covar=tensor([0.1149, 0.0553, 0.0292, 0.0596, 0.0915, 0.0327, 0.1633, 0.0925], + device='cuda:2'), in_proj_covar=tensor([0.0111, 0.0094, 0.0084, 0.0140, 0.0121, 0.0079, 0.0148, 0.0117], + device='cuda:2'), out_proj_covar=tensor([1.1063e-04, 9.9779e-05, 8.3575e-05, 1.3434e-04, 1.2477e-04, 7.9035e-05, + 1.4442e-04, 1.2076e-04], device='cuda:2') +2023-02-05 20:19:40,058 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12310.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:42,058 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.324e+02 3.573e+02 4.694e+02 5.833e+02 1.413e+03, threshold=9.388e+02, percent-clipped=6.0 +2023-02-05 20:19:43,526 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 20:19:49,246 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12323.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:57,044 INFO [train.py:901] (2/4) Epoch 2, batch 4250, loss[loss=0.38, simple_loss=0.4171, pruned_loss=0.1715, over 8249.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4067, pruned_loss=0.1698, over 1605582.62 frames. ], batch size: 24, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:20:02,188 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3773, 1.5599, 2.3625, 1.0749, 1.9115, 1.6524, 1.4299, 1.6264], + device='cuda:2'), covar=tensor([0.1065, 0.1167, 0.0454, 0.1696, 0.0792, 0.1430, 0.0951, 0.1038], + device='cuda:2'), in_proj_covar=tensor([0.0371, 0.0362, 0.0390, 0.0418, 0.0475, 0.0422, 0.0371, 0.0458], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 20:20:06,019 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:06,619 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 20:20:17,106 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.14 vs. limit=2.0 +2023-02-05 20:20:20,975 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12368.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:32,224 INFO [train.py:901] (2/4) Epoch 2, batch 4300, loss[loss=0.4125, simple_loss=0.4439, pruned_loss=0.1906, over 8487.00 frames. ], tot_loss[loss=0.3714, simple_loss=0.4062, pruned_loss=0.1682, over 1606222.54 frames. ], batch size: 26, lr: 3.00e-02, grad_scale: 16.0 +2023-02-05 20:20:38,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12393.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:41,192 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:53,214 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.244e+02 3.864e+02 4.648e+02 5.983e+02 1.525e+03, threshold=9.296e+02, percent-clipped=6.0 +2023-02-05 20:21:00,843 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:05,819 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:06,902 INFO [train.py:901] (2/4) Epoch 2, batch 4350, loss[loss=0.438, simple_loss=0.4458, pruned_loss=0.2151, over 8473.00 frames. ], tot_loss[loss=0.3693, simple_loss=0.4052, pruned_loss=0.1667, over 1608544.18 frames. ], batch size: 29, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:21:09,737 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12438.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:22,839 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3375, 1.7413, 2.0613, 1.5478, 0.9986, 1.9354, 0.5229, 1.1127], + device='cuda:2'), covar=tensor([0.1196, 0.0700, 0.0219, 0.0635, 0.1457, 0.0410, 0.2084, 0.0944], + device='cuda:2'), in_proj_covar=tensor([0.0117, 0.0096, 0.0086, 0.0144, 0.0126, 0.0081, 0.0155, 0.0119], + device='cuda:2'), out_proj_covar=tensor([1.1647e-04, 1.0284e-04, 8.7467e-05, 1.3871e-04, 1.2978e-04, 8.1588e-05, + 1.5059e-04, 1.2420e-04], device='cuda:2') +2023-02-05 20:21:23,526 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12457.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:26,039 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12461.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:28,052 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:38,126 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 20:21:42,135 INFO [train.py:901] (2/4) Epoch 2, batch 4400, loss[loss=0.39, simple_loss=0.4162, pruned_loss=0.1819, over 7919.00 frames. ], tot_loss[loss=0.3699, simple_loss=0.4055, pruned_loss=0.1672, over 1611190.46 frames. ], batch size: 20, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:22:02,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.494e+02 4.041e+02 4.964e+02 6.742e+02 1.213e+03, threshold=9.928e+02, percent-clipped=4.0 +2023-02-05 20:22:16,720 INFO [train.py:901] (2/4) Epoch 2, batch 4450, loss[loss=0.3848, simple_loss=0.4274, pruned_loss=0.1711, over 8346.00 frames. ], tot_loss[loss=0.3695, simple_loss=0.4051, pruned_loss=0.167, over 1610794.29 frames. ], batch size: 26, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:17,400 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 20:22:27,253 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:29,911 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12553.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:45,079 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12574.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:49,161 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:52,394 INFO [train.py:901] (2/4) Epoch 2, batch 4500, loss[loss=0.3454, simple_loss=0.3987, pruned_loss=0.1461, over 8323.00 frames. ], tot_loss[loss=0.3701, simple_loss=0.4057, pruned_loss=0.1673, over 1611843.87 frames. ], batch size: 25, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:23:06,030 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12604.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:12,558 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 20:23:13,220 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.312e+02 4.309e+02 5.092e+02 6.256e+02 1.421e+03, threshold=1.018e+03, percent-clipped=5.0 +2023-02-05 20:23:27,083 INFO [train.py:901] (2/4) Epoch 2, batch 4550, loss[loss=0.3405, simple_loss=0.3821, pruned_loss=0.1495, over 7793.00 frames. ], tot_loss[loss=0.3685, simple_loss=0.4046, pruned_loss=0.1662, over 1611221.94 frames. ], batch size: 19, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:23:29,978 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.4125, 2.5420, 4.2569, 4.2177, 3.1349, 2.6133, 1.8219, 2.5414], + device='cuda:2'), covar=tensor([0.0515, 0.0746, 0.0108, 0.0201, 0.0299, 0.0281, 0.0448, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0494, 0.0404, 0.0294, 0.0339, 0.0420, 0.0365, 0.0387, 0.0420], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:23:40,670 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:57,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12678.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:59,781 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:01,608 INFO [train.py:901] (2/4) Epoch 2, batch 4600, loss[loss=0.3701, simple_loss=0.4116, pruned_loss=0.1643, over 8693.00 frames. ], tot_loss[loss=0.3691, simple_loss=0.4049, pruned_loss=0.1667, over 1607136.44 frames. ], batch size: 39, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:24:17,921 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:23,151 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 3.817e+02 4.647e+02 5.826e+02 1.354e+03, threshold=9.293e+02, percent-clipped=3.0 +2023-02-05 20:24:25,442 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:37,082 INFO [train.py:901] (2/4) Epoch 2, batch 4650, loss[loss=0.3833, simple_loss=0.4131, pruned_loss=0.1767, over 8300.00 frames. ], tot_loss[loss=0.3695, simple_loss=0.4049, pruned_loss=0.1671, over 1607458.67 frames. ], batch size: 23, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:24:42,619 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:09,736 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:11,634 INFO [train.py:901] (2/4) Epoch 2, batch 4700, loss[loss=0.3656, simple_loss=0.4172, pruned_loss=0.157, over 8337.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4051, pruned_loss=0.167, over 1608307.73 frames. ], batch size: 25, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:25:15,192 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9866, 3.1171, 2.7074, 1.3487, 2.7276, 2.7043, 2.7722, 2.4656], + device='cuda:2'), covar=tensor([0.1215, 0.0731, 0.1102, 0.4144, 0.0731, 0.0833, 0.1327, 0.0767], + device='cuda:2'), in_proj_covar=tensor([0.0313, 0.0209, 0.0255, 0.0335, 0.0230, 0.0178, 0.0237, 0.0162], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:25:28,728 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12808.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:29,549 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12809.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:32,791 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.540e+02 4.122e+02 5.358e+02 6.927e+02 1.344e+03, threshold=1.072e+03, percent-clipped=8.0 +2023-02-05 20:25:47,170 INFO [train.py:901] (2/4) Epoch 2, batch 4750, loss[loss=0.3417, simple_loss=0.3691, pruned_loss=0.1571, over 7439.00 frames. ], tot_loss[loss=0.3695, simple_loss=0.4048, pruned_loss=0.167, over 1609829.39 frames. ], batch size: 17, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:25:47,386 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:59,155 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.61 vs. limit=5.0 +2023-02-05 20:26:17,700 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.40 vs. limit=5.0 +2023-02-05 20:26:18,679 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 20:26:20,732 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 20:26:22,726 INFO [train.py:901] (2/4) Epoch 2, batch 4800, loss[loss=0.3324, simple_loss=0.365, pruned_loss=0.1499, over 7238.00 frames. ], tot_loss[loss=0.3675, simple_loss=0.4036, pruned_loss=0.1657, over 1610289.31 frames. ], batch size: 16, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:26:25,715 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4006, 2.0427, 3.3548, 3.1039, 2.7846, 2.0541, 1.3019, 1.8328], + device='cuda:2'), covar=tensor([0.0639, 0.0741, 0.0142, 0.0257, 0.0298, 0.0346, 0.0553, 0.0594], + device='cuda:2'), in_proj_covar=tensor([0.0503, 0.0413, 0.0307, 0.0354, 0.0431, 0.0377, 0.0401, 0.0435], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:26:42,251 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6582, 3.4488, 2.9520, 4.2237, 1.7471, 2.0442, 2.1117, 3.1753], + device='cuda:2'), covar=tensor([0.1117, 0.1190, 0.1206, 0.0216, 0.2273, 0.2041, 0.2719, 0.1072], + device='cuda:2'), in_proj_covar=tensor([0.0323, 0.0340, 0.0332, 0.0227, 0.0327, 0.0338, 0.0392, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:26:43,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.314e+02 3.678e+02 4.471e+02 5.888e+02 1.234e+03, threshold=8.941e+02, percent-clipped=3.0 +2023-02-05 20:26:49,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12923.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:57,685 INFO [train.py:901] (2/4) Epoch 2, batch 4850, loss[loss=0.5133, simple_loss=0.4893, pruned_loss=0.2687, over 6701.00 frames. ], tot_loss[loss=0.3663, simple_loss=0.4025, pruned_loss=0.165, over 1607262.35 frames. ], batch size: 71, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:12,733 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 20:27:32,108 INFO [train.py:901] (2/4) Epoch 2, batch 4900, loss[loss=0.3677, simple_loss=0.4125, pruned_loss=0.1615, over 8395.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4057, pruned_loss=0.1677, over 1610356.76 frames. ], batch size: 49, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:37,246 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-05 20:27:53,286 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 4.170e+02 5.532e+02 7.452e+02 1.588e+03, threshold=1.106e+03, percent-clipped=9.0 +2023-02-05 20:28:06,717 INFO [train.py:901] (2/4) Epoch 2, batch 4950, loss[loss=0.3662, simple_loss=0.3929, pruned_loss=0.1698, over 8244.00 frames. ], tot_loss[loss=0.3702, simple_loss=0.4058, pruned_loss=0.1673, over 1611471.10 frames. ], batch size: 22, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:28:41,844 INFO [train.py:901] (2/4) Epoch 2, batch 5000, loss[loss=0.3338, simple_loss=0.3716, pruned_loss=0.148, over 7822.00 frames. ], tot_loss[loss=0.3715, simple_loss=0.4068, pruned_loss=0.1681, over 1606215.03 frames. ], batch size: 20, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:29:02,460 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.193e+02 4.113e+02 5.050e+02 6.511e+02 1.788e+03, threshold=1.010e+03, percent-clipped=5.0 +2023-02-05 20:29:09,704 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=13125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:15,837 INFO [train.py:901] (2/4) Epoch 2, batch 5050, loss[loss=0.3131, simple_loss=0.3719, pruned_loss=0.1272, over 8034.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.405, pruned_loss=0.1663, over 1604683.69 frames. ], batch size: 22, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:29:35,420 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-05 20:29:47,478 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13179.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:47,969 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 20:29:50,616 INFO [train.py:901] (2/4) Epoch 2, batch 5100, loss[loss=0.3986, simple_loss=0.4108, pruned_loss=0.1932, over 7233.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.4052, pruned_loss=0.1662, over 1604624.65 frames. ], batch size: 16, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:29:51,469 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2922, 3.5411, 2.7355, 4.0211, 1.9111, 1.6192, 1.8596, 3.1673], + device='cuda:2'), covar=tensor([0.1262, 0.0966, 0.1542, 0.0281, 0.1887, 0.2275, 0.2237, 0.0793], + device='cuda:2'), in_proj_covar=tensor([0.0318, 0.0341, 0.0328, 0.0231, 0.0325, 0.0339, 0.0381, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:29:54,083 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0079, 1.2946, 5.6877, 2.2733, 5.2828, 5.0805, 5.4033, 5.4055], + device='cuda:2'), covar=tensor([0.0158, 0.3121, 0.0184, 0.1351, 0.0592, 0.0220, 0.0187, 0.0211], + device='cuda:2'), in_proj_covar=tensor([0.0188, 0.0382, 0.0242, 0.0276, 0.0327, 0.0260, 0.0244, 0.0265], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:30:04,628 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13204.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:11,535 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.930e+02 4.883e+02 5.892e+02 1.355e+03, threshold=9.766e+02, percent-clipped=3.0 +2023-02-05 20:30:15,661 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4962, 2.2060, 1.5644, 2.0295, 1.7568, 1.1639, 1.6979, 2.2729], + device='cuda:2'), covar=tensor([0.1259, 0.0405, 0.1034, 0.0772, 0.0856, 0.1443, 0.1150, 0.0717], + device='cuda:2'), in_proj_covar=tensor([0.0382, 0.0250, 0.0362, 0.0322, 0.0367, 0.0342, 0.0383, 0.0340], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 20:30:24,602 INFO [train.py:901] (2/4) Epoch 2, batch 5150, loss[loss=0.3501, simple_loss=0.3755, pruned_loss=0.1623, over 7269.00 frames. ], tot_loss[loss=0.3686, simple_loss=0.4046, pruned_loss=0.1663, over 1605335.23 frames. ], batch size: 16, lr: 2.91e-02, grad_scale: 4.0 +2023-02-05 20:30:28,700 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:59,010 INFO [train.py:901] (2/4) Epoch 2, batch 5200, loss[loss=0.4419, simple_loss=0.4609, pruned_loss=0.2114, over 8355.00 frames. ], tot_loss[loss=0.3672, simple_loss=0.4038, pruned_loss=0.1654, over 1611148.00 frames. ], batch size: 24, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:06,110 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-05 20:31:16,381 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5513, 2.0314, 2.3469, 0.3638, 2.1783, 1.3874, 0.4871, 1.8175], + device='cuda:2'), covar=tensor([0.0117, 0.0062, 0.0070, 0.0185, 0.0101, 0.0221, 0.0219, 0.0075], + device='cuda:2'), in_proj_covar=tensor([0.0192, 0.0139, 0.0128, 0.0185, 0.0137, 0.0240, 0.0192, 0.0167], + device='cuda:2'), out_proj_covar=tensor([1.1591e-04, 8.4414e-05, 8.0699e-05, 1.0965e-04, 8.7943e-05, 1.5584e-04, + 1.1968e-04, 1.0186e-04], device='cuda:2') +2023-02-05 20:31:20,912 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 4.339e+02 5.206e+02 6.705e+02 1.063e+03, threshold=1.041e+03, percent-clipped=3.0 +2023-02-05 20:31:33,617 INFO [train.py:901] (2/4) Epoch 2, batch 5250, loss[loss=0.408, simple_loss=0.4399, pruned_loss=0.1881, over 8465.00 frames. ], tot_loss[loss=0.3699, simple_loss=0.4061, pruned_loss=0.1669, over 1614762.47 frames. ], batch size: 25, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:42,986 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 20:31:44,699 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 20:31:54,415 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4565, 1.9620, 1.9056, 0.5381, 1.8947, 1.3942, 0.3208, 1.9933], + device='cuda:2'), covar=tensor([0.0095, 0.0052, 0.0083, 0.0143, 0.0095, 0.0197, 0.0196, 0.0045], + device='cuda:2'), in_proj_covar=tensor([0.0191, 0.0138, 0.0127, 0.0186, 0.0138, 0.0238, 0.0190, 0.0167], + device='cuda:2'), out_proj_covar=tensor([1.1478e-04, 8.3747e-05, 8.0332e-05, 1.1020e-04, 8.8782e-05, 1.5401e-04, + 1.1824e-04, 1.0183e-04], device='cuda:2') +2023-02-05 20:32:07,576 INFO [train.py:901] (2/4) Epoch 2, batch 5300, loss[loss=0.3176, simple_loss=0.374, pruned_loss=0.1306, over 8294.00 frames. ], tot_loss[loss=0.3684, simple_loss=0.4051, pruned_loss=0.1659, over 1614034.39 frames. ], batch size: 23, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:19,194 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-05 20:32:29,092 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.093e+02 3.821e+02 4.884e+02 6.417e+02 1.823e+03, threshold=9.767e+02, percent-clipped=6.0 +2023-02-05 20:32:42,522 INFO [train.py:901] (2/4) Epoch 2, batch 5350, loss[loss=0.3164, simple_loss=0.3527, pruned_loss=0.14, over 7798.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.4049, pruned_loss=0.1669, over 1612949.53 frames. ], batch size: 19, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:54,622 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0756, 1.1578, 4.1611, 1.7526, 3.5822, 3.4848, 3.6101, 3.6309], + device='cuda:2'), covar=tensor([0.0303, 0.2871, 0.0287, 0.1372, 0.0967, 0.0405, 0.0292, 0.0332], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0379, 0.0240, 0.0273, 0.0327, 0.0262, 0.0241, 0.0267], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:33:16,588 INFO [train.py:901] (2/4) Epoch 2, batch 5400, loss[loss=0.3479, simple_loss=0.4031, pruned_loss=0.1463, over 8194.00 frames. ], tot_loss[loss=0.3708, simple_loss=0.4063, pruned_loss=0.1677, over 1618560.28 frames. ], batch size: 23, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:33:24,899 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:38,013 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.355e+02 3.820e+02 4.559e+02 5.766e+02 1.205e+03, threshold=9.119e+02, percent-clipped=6.0 +2023-02-05 20:33:43,050 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:51,329 INFO [train.py:901] (2/4) Epoch 2, batch 5450, loss[loss=0.3755, simple_loss=0.4161, pruned_loss=0.1674, over 7963.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.407, pruned_loss=0.1684, over 1617725.90 frames. ], batch size: 21, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:34:25,974 INFO [train.py:901] (2/4) Epoch 2, batch 5500, loss[loss=0.3436, simple_loss=0.3959, pruned_loss=0.1457, over 8526.00 frames. ], tot_loss[loss=0.3702, simple_loss=0.4057, pruned_loss=0.1674, over 1615247.34 frames. ], batch size: 28, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:34:28,060 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 20:34:46,540 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.726e+02 4.817e+02 6.308e+02 1.682e+03, threshold=9.635e+02, percent-clipped=6.0 +2023-02-05 20:34:53,639 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-05 20:34:59,986 INFO [train.py:901] (2/4) Epoch 2, batch 5550, loss[loss=0.3806, simple_loss=0.4267, pruned_loss=0.1673, over 8473.00 frames. ], tot_loss[loss=0.3683, simple_loss=0.405, pruned_loss=0.1658, over 1617141.63 frames. ], batch size: 27, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:35:14,237 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 20:35:35,316 INFO [train.py:901] (2/4) Epoch 2, batch 5600, loss[loss=0.3918, simple_loss=0.4237, pruned_loss=0.18, over 8449.00 frames. ], tot_loss[loss=0.3681, simple_loss=0.4049, pruned_loss=0.1657, over 1610160.95 frames. ], batch size: 25, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:35:55,770 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 3.833e+02 4.619e+02 6.071e+02 1.383e+03, threshold=9.238e+02, percent-clipped=5.0 +2023-02-05 20:36:04,604 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6436, 1.5648, 3.4338, 1.0613, 2.3689, 4.0136, 3.5100, 3.4709], + device='cuda:2'), covar=tensor([0.1083, 0.1237, 0.0281, 0.1747, 0.0561, 0.0149, 0.0190, 0.0406], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0258, 0.0178, 0.0252, 0.0190, 0.0153, 0.0142, 0.0220], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:36:08,577 INFO [train.py:901] (2/4) Epoch 2, batch 5650, loss[loss=0.3148, simple_loss=0.35, pruned_loss=0.1398, over 7813.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4005, pruned_loss=0.1629, over 1605757.66 frames. ], batch size: 20, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:36:23,373 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=13755.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:36:29,332 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0318, 2.2052, 4.7388, 1.2503, 2.9516, 2.2835, 1.9668, 2.2585], + device='cuda:2'), covar=tensor([0.0871, 0.1049, 0.0290, 0.1508, 0.0852, 0.1236, 0.0795, 0.1409], + device='cuda:2'), in_proj_covar=tensor([0.0386, 0.0369, 0.0403, 0.0430, 0.0490, 0.0423, 0.0386, 0.0484], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 20:36:34,197 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 20:36:43,547 INFO [train.py:901] (2/4) Epoch 2, batch 5700, loss[loss=0.2865, simple_loss=0.3398, pruned_loss=0.1165, over 7708.00 frames. ], tot_loss[loss=0.361, simple_loss=0.399, pruned_loss=0.1615, over 1606956.37 frames. ], batch size: 18, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:00,588 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-05 20:37:05,590 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.337e+02 4.261e+02 5.123e+02 6.631e+02 2.352e+03, threshold=1.025e+03, percent-clipped=5.0 +2023-02-05 20:37:18,906 INFO [train.py:901] (2/4) Epoch 2, batch 5750, loss[loss=0.3795, simple_loss=0.411, pruned_loss=0.1739, over 8561.00 frames. ], tot_loss[loss=0.3616, simple_loss=0.3996, pruned_loss=0.1618, over 1609634.38 frames. ], batch size: 34, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:38,953 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 20:37:54,460 INFO [train.py:901] (2/4) Epoch 2, batch 5800, loss[loss=0.4696, simple_loss=0.468, pruned_loss=0.2356, over 6914.00 frames. ], tot_loss[loss=0.3618, simple_loss=0.3998, pruned_loss=0.1619, over 1602541.40 frames. ], batch size: 71, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:15,740 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.601e+02 3.784e+02 4.729e+02 6.225e+02 2.390e+03, threshold=9.458e+02, percent-clipped=5.0 +2023-02-05 20:38:22,765 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-05 20:38:29,067 INFO [train.py:901] (2/4) Epoch 2, batch 5850, loss[loss=0.4047, simple_loss=0.4352, pruned_loss=0.1871, over 8327.00 frames. ], tot_loss[loss=0.3648, simple_loss=0.4025, pruned_loss=0.1636, over 1609007.68 frames. ], batch size: 25, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:48,631 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 20:39:03,909 INFO [train.py:901] (2/4) Epoch 2, batch 5900, loss[loss=0.3848, simple_loss=0.4326, pruned_loss=0.1685, over 8781.00 frames. ], tot_loss[loss=0.3636, simple_loss=0.4013, pruned_loss=0.163, over 1613218.17 frames. ], batch size: 40, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:39:27,076 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.452e+02 3.946e+02 4.724e+02 6.297e+02 1.551e+03, threshold=9.448e+02, percent-clipped=7.0 +2023-02-05 20:39:40,157 INFO [train.py:901] (2/4) Epoch 2, batch 5950, loss[loss=0.3489, simple_loss=0.4091, pruned_loss=0.1444, over 8512.00 frames. ], tot_loss[loss=0.3662, simple_loss=0.4032, pruned_loss=0.1646, over 1615087.89 frames. ], batch size: 28, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,637 INFO [train.py:901] (2/4) Epoch 2, batch 6000, loss[loss=0.4416, simple_loss=0.451, pruned_loss=0.2161, over 7349.00 frames. ], tot_loss[loss=0.3684, simple_loss=0.405, pruned_loss=0.166, over 1619615.86 frames. ], batch size: 71, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,637 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 20:40:27,828 INFO [train.py:935] (2/4) Epoch 2, validation: loss=0.2758, simple_loss=0.3606, pruned_loss=0.0955, over 944034.00 frames. +2023-02-05 20:40:27,829 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 20:40:32,705 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:38,744 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14099.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:41,500 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5452, 4.6649, 4.1054, 1.6097, 4.0782, 3.6711, 4.2033, 3.1128], + device='cuda:2'), covar=tensor([0.0529, 0.0364, 0.0682, 0.4001, 0.0367, 0.0596, 0.0904, 0.0634], + device='cuda:2'), in_proj_covar=tensor([0.0315, 0.0213, 0.0258, 0.0343, 0.0228, 0.0185, 0.0237, 0.0162], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:40:49,504 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 3.733e+02 4.780e+02 6.772e+02 2.203e+03, threshold=9.561e+02, percent-clipped=10.0 +2023-02-05 20:40:53,729 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:02,697 INFO [train.py:901] (2/4) Epoch 2, batch 6050, loss[loss=0.3924, simple_loss=0.4137, pruned_loss=0.1856, over 7971.00 frames. ], tot_loss[loss=0.3651, simple_loss=0.4023, pruned_loss=0.164, over 1618807.59 frames. ], batch size: 21, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:05,432 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14138.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:41:34,583 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0604, 1.1425, 4.1026, 1.6773, 2.2205, 4.9383, 4.3459, 4.3783], + device='cuda:2'), covar=tensor([0.1164, 0.1804, 0.0285, 0.1803, 0.0700, 0.0167, 0.0222, 0.0401], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0255, 0.0180, 0.0253, 0.0191, 0.0159, 0.0146, 0.0225], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:41:37,174 INFO [train.py:901] (2/4) Epoch 2, batch 6100, loss[loss=0.3916, simple_loss=0.4312, pruned_loss=0.176, over 8344.00 frames. ], tot_loss[loss=0.3666, simple_loss=0.4035, pruned_loss=0.1648, over 1617873.47 frames. ], batch size: 26, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:58,436 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14214.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:58,921 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 3.920e+02 4.920e+02 6.492e+02 2.677e+03, threshold=9.840e+02, percent-clipped=6.0 +2023-02-05 20:42:08,076 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 20:42:09,016 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6913, 1.5715, 2.4592, 2.0380, 2.0469, 1.5152, 1.2181, 1.0290], + device='cuda:2'), covar=tensor([0.0716, 0.0597, 0.0165, 0.0221, 0.0272, 0.0348, 0.0408, 0.0540], + device='cuda:2'), in_proj_covar=tensor([0.0507, 0.0427, 0.0325, 0.0365, 0.0463, 0.0397, 0.0413, 0.0440], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:42:11,468 INFO [train.py:901] (2/4) Epoch 2, batch 6150, loss[loss=0.3925, simple_loss=0.4287, pruned_loss=0.1781, over 8411.00 frames. ], tot_loss[loss=0.3661, simple_loss=0.4032, pruned_loss=0.1645, over 1620689.06 frames. ], batch size: 49, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:25,026 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4998, 2.1532, 1.3654, 2.0597, 1.7538, 1.1435, 1.3735, 1.8326], + device='cuda:2'), covar=tensor([0.1132, 0.0382, 0.1179, 0.0540, 0.0753, 0.1390, 0.1236, 0.0617], + device='cuda:2'), in_proj_covar=tensor([0.0372, 0.0249, 0.0360, 0.0321, 0.0355, 0.0331, 0.0364, 0.0333], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 20:42:29,273 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 20:42:46,421 INFO [train.py:901] (2/4) Epoch 2, batch 6200, loss[loss=0.3432, simple_loss=0.38, pruned_loss=0.1533, over 7432.00 frames. ], tot_loss[loss=0.3645, simple_loss=0.4024, pruned_loss=0.1633, over 1620152.80 frames. ], batch size: 17, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:43:08,135 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 3.453e+02 4.846e+02 6.394e+02 2.249e+03, threshold=9.691e+02, percent-clipped=6.0 +2023-02-05 20:43:21,529 INFO [train.py:901] (2/4) Epoch 2, batch 6250, loss[loss=0.3989, simple_loss=0.4121, pruned_loss=0.1929, over 7535.00 frames. ], tot_loss[loss=0.3625, simple_loss=0.4006, pruned_loss=0.1622, over 1615153.77 frames. ], batch size: 18, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:43:22,060 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 20:43:42,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3762, 1.8279, 1.3862, 1.4885, 2.0446, 1.7828, 2.1057, 1.9002], + device='cuda:2'), covar=tensor([0.0954, 0.1452, 0.2292, 0.1623, 0.0771, 0.1622, 0.0959, 0.0788], + device='cuda:2'), in_proj_covar=tensor([0.0218, 0.0247, 0.0276, 0.0242, 0.0215, 0.0240, 0.0209, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:43:55,847 INFO [train.py:901] (2/4) Epoch 2, batch 6300, loss[loss=0.4118, simple_loss=0.4447, pruned_loss=0.1894, over 7960.00 frames. ], tot_loss[loss=0.3639, simple_loss=0.4018, pruned_loss=0.163, over 1617923.41 frames. ], batch size: 21, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:17,495 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.643e+02 3.823e+02 4.655e+02 5.877e+02 1.568e+03, threshold=9.309e+02, percent-clipped=4.0 +2023-02-05 20:44:28,391 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14431.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:30,244 INFO [train.py:901] (2/4) Epoch 2, batch 6350, loss[loss=0.4201, simple_loss=0.4399, pruned_loss=0.2002, over 7521.00 frames. ], tot_loss[loss=0.3617, simple_loss=0.4005, pruned_loss=0.1615, over 1615765.86 frames. ], batch size: 71, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:30,318 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14434.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:51,463 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14465.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:54,887 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:03,108 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14482.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:45:04,273 INFO [train.py:901] (2/4) Epoch 2, batch 6400, loss[loss=0.2981, simple_loss=0.3595, pruned_loss=0.1183, over 8084.00 frames. ], tot_loss[loss=0.3628, simple_loss=0.4013, pruned_loss=0.1622, over 1619838.25 frames. ], batch size: 21, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:12,405 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14495.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:17,905 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3139, 1.9177, 1.9850, 0.6587, 1.8987, 1.3043, 0.3311, 1.8597], + device='cuda:2'), covar=tensor([0.0090, 0.0048, 0.0063, 0.0129, 0.0072, 0.0190, 0.0176, 0.0044], + device='cuda:2'), in_proj_covar=tensor([0.0191, 0.0137, 0.0125, 0.0190, 0.0133, 0.0242, 0.0196, 0.0176], + device='cuda:2'), out_proj_covar=tensor([1.1004e-04, 7.8344e-05, 7.5238e-05, 1.0894e-04, 7.9981e-05, 1.4923e-04, + 1.1457e-04, 1.0304e-04], device='cuda:2') +2023-02-05 20:45:19,142 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14505.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:25,558 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.974e+02 5.065e+02 7.362e+02 1.328e+03, threshold=1.013e+03, percent-clipped=8.0 +2023-02-05 20:45:38,723 INFO [train.py:901] (2/4) Epoch 2, batch 6450, loss[loss=0.3722, simple_loss=0.4245, pruned_loss=0.1599, over 8284.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4019, pruned_loss=0.1622, over 1624469.95 frames. ], batch size: 23, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:48,962 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:10,579 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14580.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:13,166 INFO [train.py:901] (2/4) Epoch 2, batch 6500, loss[loss=0.2447, simple_loss=0.3116, pruned_loss=0.08892, over 6830.00 frames. ], tot_loss[loss=0.3614, simple_loss=0.4005, pruned_loss=0.1612, over 1618307.30 frames. ], batch size: 15, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:46:22,640 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14597.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:46:35,355 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.999e+02 5.009e+02 6.288e+02 1.522e+03, threshold=1.002e+03, percent-clipped=8.0 +2023-02-05 20:46:48,433 INFO [train.py:901] (2/4) Epoch 2, batch 6550, loss[loss=0.3392, simple_loss=0.39, pruned_loss=0.1442, over 8250.00 frames. ], tot_loss[loss=0.364, simple_loss=0.4026, pruned_loss=0.1627, over 1618362.82 frames. ], batch size: 24, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:47:16,630 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 20:47:23,548 INFO [train.py:901] (2/4) Epoch 2, batch 6600, loss[loss=0.3245, simple_loss=0.3635, pruned_loss=0.1427, over 7924.00 frames. ], tot_loss[loss=0.3628, simple_loss=0.4018, pruned_loss=0.1619, over 1616648.72 frames. ], batch size: 20, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:47:36,561 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:47:45,898 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.289e+02 3.681e+02 4.457e+02 5.556e+02 1.208e+03, threshold=8.913e+02, percent-clipped=4.0 +2023-02-05 20:47:58,943 INFO [train.py:901] (2/4) Epoch 2, batch 6650, loss[loss=0.3714, simple_loss=0.4152, pruned_loss=0.1638, over 8491.00 frames. ], tot_loss[loss=0.3632, simple_loss=0.4017, pruned_loss=0.1624, over 1613267.83 frames. ], batch size: 28, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:16,399 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14758.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:26,143 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4803, 1.8749, 1.9088, 1.6929, 1.1256, 1.8873, 0.4125, 1.0939], + device='cuda:2'), covar=tensor([0.1630, 0.0847, 0.0618, 0.0842, 0.2033, 0.0581, 0.2858, 0.1292], + device='cuda:2'), in_proj_covar=tensor([0.0113, 0.0101, 0.0086, 0.0140, 0.0139, 0.0083, 0.0161, 0.0120], + device='cuda:2'), out_proj_covar=tensor([1.2294e-04, 1.1777e-04, 9.4944e-05, 1.4791e-04, 1.5243e-04, 9.3962e-05, + 1.7045e-04, 1.3307e-04], device='cuda:2') +2023-02-05 20:48:28,648 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14775.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:34,790 INFO [train.py:901] (2/4) Epoch 2, batch 6700, loss[loss=0.3338, simple_loss=0.3784, pruned_loss=0.1447, over 7078.00 frames. ], tot_loss[loss=0.3593, simple_loss=0.3986, pruned_loss=0.16, over 1609420.48 frames. ], batch size: 71, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:39,096 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2934, 1.1833, 1.3301, 1.1076, 1.4372, 1.2680, 1.3325, 1.2426], + device='cuda:2'), covar=tensor([0.1067, 0.1799, 0.2277, 0.1826, 0.0781, 0.1789, 0.0970, 0.0823], + device='cuda:2'), in_proj_covar=tensor([0.0213, 0.0240, 0.0276, 0.0239, 0.0208, 0.0239, 0.0203, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:48:45,397 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3288, 1.7597, 2.6963, 0.9440, 2.0081, 1.4674, 1.3519, 1.8871], + device='cuda:2'), covar=tensor([0.1286, 0.1274, 0.0472, 0.2273, 0.1115, 0.1829, 0.1138, 0.1342], + device='cuda:2'), in_proj_covar=tensor([0.0406, 0.0382, 0.0436, 0.0451, 0.0508, 0.0446, 0.0401, 0.0503], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 20:48:48,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6260, 2.3213, 1.5120, 2.0172, 1.8992, 1.4044, 1.7489, 1.9610], + device='cuda:2'), covar=tensor([0.1207, 0.0372, 0.1124, 0.0703, 0.0740, 0.1282, 0.0985, 0.0733], + device='cuda:2'), in_proj_covar=tensor([0.0381, 0.0246, 0.0369, 0.0322, 0.0360, 0.0341, 0.0363, 0.0331], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 20:48:50,186 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:53,130 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.62 vs. limit=5.0 +2023-02-05 20:48:56,685 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.056e+02 3.873e+02 4.634e+02 6.203e+02 1.536e+03, threshold=9.268e+02, percent-clipped=6.0 +2023-02-05 20:49:04,584 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-05 20:49:07,134 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14830.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:09,756 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1463, 4.2939, 3.6404, 1.6273, 3.6043, 3.6953, 3.8502, 3.1891], + device='cuda:2'), covar=tensor([0.0926, 0.0461, 0.1014, 0.4623, 0.0668, 0.0562, 0.1151, 0.0655], + device='cuda:2'), in_proj_covar=tensor([0.0330, 0.0225, 0.0262, 0.0357, 0.0239, 0.0190, 0.0253, 0.0172], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 20:49:10,351 INFO [train.py:901] (2/4) Epoch 2, batch 6750, loss[loss=0.3053, simple_loss=0.3556, pruned_loss=0.1275, over 8077.00 frames. ], tot_loss[loss=0.3626, simple_loss=0.4009, pruned_loss=0.1622, over 1609973.38 frames. ], batch size: 21, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:11,909 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14836.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:17,972 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2172, 3.1757, 2.7857, 1.3824, 2.8218, 2.6397, 2.9405, 2.4559], + device='cuda:2'), covar=tensor([0.0854, 0.0552, 0.0913, 0.3630, 0.0640, 0.0829, 0.0971, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0333, 0.0225, 0.0264, 0.0358, 0.0241, 0.0192, 0.0253, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 20:49:21,261 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14849.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:24,080 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14853.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:49:29,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14861.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:41,490 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14878.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:49:45,988 INFO [train.py:901] (2/4) Epoch 2, batch 6800, loss[loss=0.3056, simple_loss=0.3764, pruned_loss=0.1174, over 8245.00 frames. ], tot_loss[loss=0.3627, simple_loss=0.4013, pruned_loss=0.162, over 1609774.00 frames. ], batch size: 24, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:50,348 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14890.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:54,304 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 20:50:00,276 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 20:50:07,690 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.663e+02 4.715e+02 6.092e+02 1.805e+03, threshold=9.431e+02, percent-clipped=7.0 +2023-02-05 20:50:10,657 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4695, 1.2732, 1.3323, 1.1901, 1.4909, 1.3674, 1.0501, 1.3616], + device='cuda:2'), covar=tensor([0.1035, 0.1624, 0.2317, 0.1798, 0.0709, 0.1700, 0.1018, 0.0765], + device='cuda:2'), in_proj_covar=tensor([0.0213, 0.0240, 0.0276, 0.0240, 0.0208, 0.0241, 0.0202, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:50:21,310 INFO [train.py:901] (2/4) Epoch 2, batch 6850, loss[loss=0.3144, simple_loss=0.3524, pruned_loss=0.1382, over 7545.00 frames. ], tot_loss[loss=0.3634, simple_loss=0.4021, pruned_loss=0.1624, over 1612680.21 frames. ], batch size: 18, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:50:42,734 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14964.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:50:45,334 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 20:50:50,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2684, 1.4711, 2.3336, 0.9388, 1.7681, 1.4394, 1.3168, 1.4431], + device='cuda:2'), covar=tensor([0.1137, 0.1158, 0.0416, 0.1809, 0.0888, 0.1794, 0.0966, 0.1213], + device='cuda:2'), in_proj_covar=tensor([0.0415, 0.0389, 0.0450, 0.0464, 0.0513, 0.0467, 0.0410, 0.0519], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 20:50:57,102 INFO [train.py:901] (2/4) Epoch 2, batch 6900, loss[loss=0.3375, simple_loss=0.4031, pruned_loss=0.136, over 8466.00 frames. ], tot_loss[loss=0.3635, simple_loss=0.4019, pruned_loss=0.1626, over 1611890.24 frames. ], batch size: 25, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:50:58,971 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 20:51:19,285 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.011e+02 4.191e+02 5.097e+02 7.005e+02 1.700e+03, threshold=1.019e+03, percent-clipped=5.0 +2023-02-05 20:51:32,576 INFO [train.py:901] (2/4) Epoch 2, batch 6950, loss[loss=0.391, simple_loss=0.4388, pruned_loss=0.1716, over 8109.00 frames. ], tot_loss[loss=0.3626, simple_loss=0.4011, pruned_loss=0.1621, over 1612367.21 frames. ], batch size: 23, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:51:56,461 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 20:52:08,404 INFO [train.py:901] (2/4) Epoch 2, batch 7000, loss[loss=0.4045, simple_loss=0.4505, pruned_loss=0.1793, over 8104.00 frames. ], tot_loss[loss=0.3588, simple_loss=0.3986, pruned_loss=0.1595, over 1613172.38 frames. ], batch size: 23, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:52:14,182 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.00 vs. limit=5.0 +2023-02-05 20:52:21,506 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15102.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:30,559 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.233e+02 3.928e+02 4.810e+02 5.818e+02 1.410e+03, threshold=9.621e+02, percent-clipped=1.0 +2023-02-05 20:52:35,507 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0427, 2.3559, 2.0095, 2.7475, 1.8290, 1.6078, 2.0682, 2.2976], + device='cuda:2'), covar=tensor([0.0924, 0.0940, 0.1235, 0.0450, 0.1380, 0.1829, 0.1390, 0.0889], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0332, 0.0320, 0.0231, 0.0313, 0.0335, 0.0354, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:2') +2023-02-05 20:52:44,327 INFO [train.py:901] (2/4) Epoch 2, batch 7050, loss[loss=0.3111, simple_loss=0.3633, pruned_loss=0.1295, over 7657.00 frames. ], tot_loss[loss=0.3585, simple_loss=0.3983, pruned_loss=0.1594, over 1611064.42 frames. ], batch size: 19, lr: 2.75e-02, grad_scale: 16.0 +2023-02-05 20:52:52,901 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:58,260 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.3138, 0.9523, 3.5823, 1.4454, 2.6510, 3.0817, 3.3378, 3.4023], + device='cuda:2'), covar=tensor([0.0611, 0.4047, 0.0571, 0.2086, 0.2019, 0.0690, 0.0570, 0.0675], + device='cuda:2'), in_proj_covar=tensor([0.0211, 0.0394, 0.0250, 0.0291, 0.0355, 0.0282, 0.0266, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:53:06,394 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9307, 2.0882, 4.0602, 3.7695, 2.8600, 2.2742, 1.5896, 1.8398], + device='cuda:2'), covar=tensor([0.0740, 0.1063, 0.0195, 0.0258, 0.0517, 0.0387, 0.0528, 0.0936], + device='cuda:2'), in_proj_covar=tensor([0.0532, 0.0444, 0.0343, 0.0386, 0.0496, 0.0418, 0.0437, 0.0450], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:53:10,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15171.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:18,766 INFO [train.py:901] (2/4) Epoch 2, batch 7100, loss[loss=0.4032, simple_loss=0.4454, pruned_loss=0.1805, over 8523.00 frames. ], tot_loss[loss=0.3584, simple_loss=0.3982, pruned_loss=0.1592, over 1614464.50 frames. ], batch size: 28, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:53:23,894 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3747, 1.9406, 3.2649, 2.6981, 2.4355, 1.9523, 1.4975, 1.6328], + device='cuda:2'), covar=tensor([0.0691, 0.0828, 0.0148, 0.0310, 0.0407, 0.0391, 0.0527, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0534, 0.0451, 0.0347, 0.0389, 0.0501, 0.0422, 0.0442, 0.0454], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 20:53:26,482 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9771, 4.0176, 3.6329, 1.5845, 3.6006, 3.5005, 3.6232, 2.9936], + device='cuda:2'), covar=tensor([0.0844, 0.0580, 0.0908, 0.4282, 0.0580, 0.0640, 0.1335, 0.0728], + device='cuda:2'), in_proj_covar=tensor([0.0321, 0.0228, 0.0258, 0.0344, 0.0237, 0.0190, 0.0247, 0.0170], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:53:39,782 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15213.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:40,997 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.718e+02 4.413e+02 5.855e+02 1.165e+03, threshold=8.826e+02, percent-clipped=3.0 +2023-02-05 20:53:42,524 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15217.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:44,502 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:53,628 INFO [train.py:901] (2/4) Epoch 2, batch 7150, loss[loss=0.3657, simple_loss=0.4138, pruned_loss=0.1588, over 8520.00 frames. ], tot_loss[loss=0.3586, simple_loss=0.3986, pruned_loss=0.1593, over 1616944.27 frames. ], batch size: 28, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:54:02,059 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:54:17,101 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4778, 2.0052, 1.9825, 0.5823, 2.0922, 1.4059, 0.4227, 1.8970], + device='cuda:2'), covar=tensor([0.0142, 0.0057, 0.0068, 0.0170, 0.0067, 0.0241, 0.0224, 0.0070], + device='cuda:2'), in_proj_covar=tensor([0.0201, 0.0135, 0.0124, 0.0186, 0.0139, 0.0251, 0.0201, 0.0175], + device='cuda:2'), out_proj_covar=tensor([1.1355e-04, 7.5475e-05, 7.1644e-05, 1.0169e-04, 8.2241e-05, 1.5283e-04, + 1.1501e-04, 9.9122e-05], device='cuda:2') +2023-02-05 20:54:29,183 INFO [train.py:901] (2/4) Epoch 2, batch 7200, loss[loss=0.4204, simple_loss=0.4175, pruned_loss=0.2117, over 5973.00 frames. ], tot_loss[loss=0.3572, simple_loss=0.3976, pruned_loss=0.1584, over 1613144.58 frames. ], batch size: 13, lr: 2.73e-02, grad_scale: 16.0 +2023-02-05 20:54:51,169 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.528e+02 3.704e+02 4.905e+02 6.625e+02 1.855e+03, threshold=9.809e+02, percent-clipped=12.0 +2023-02-05 20:55:04,885 INFO [train.py:901] (2/4) Epoch 2, batch 7250, loss[loss=0.438, simple_loss=0.4658, pruned_loss=0.2051, over 8045.00 frames. ], tot_loss[loss=0.3579, simple_loss=0.3981, pruned_loss=0.1588, over 1617907.13 frames. ], batch size: 22, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:55:25,881 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.12 vs. limit=2.0 +2023-02-05 20:55:39,918 INFO [train.py:901] (2/4) Epoch 2, batch 7300, loss[loss=0.3076, simple_loss=0.3721, pruned_loss=0.1215, over 8521.00 frames. ], tot_loss[loss=0.3586, simple_loss=0.3985, pruned_loss=0.1594, over 1618141.98 frames. ], batch size: 26, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:56:02,317 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.194e+02 3.434e+02 4.292e+02 5.923e+02 1.449e+03, threshold=8.584e+02, percent-clipped=5.0 +2023-02-05 20:56:05,230 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5872, 2.5762, 1.6894, 2.3583, 2.1659, 1.3619, 1.4369, 2.0321], + device='cuda:2'), covar=tensor([0.1403, 0.0427, 0.0873, 0.0593, 0.0740, 0.1164, 0.1234, 0.0781], + device='cuda:2'), in_proj_covar=tensor([0.0386, 0.0251, 0.0355, 0.0324, 0.0362, 0.0330, 0.0364, 0.0330], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 20:56:14,878 INFO [train.py:901] (2/4) Epoch 2, batch 7350, loss[loss=0.3013, simple_loss=0.3545, pruned_loss=0.1241, over 7797.00 frames. ], tot_loss[loss=0.3591, simple_loss=0.3987, pruned_loss=0.1598, over 1619186.88 frames. ], batch size: 19, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:42,773 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15473.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:56:43,905 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 20:56:49,847 INFO [train.py:901] (2/4) Epoch 2, batch 7400, loss[loss=0.3454, simple_loss=0.392, pruned_loss=0.1494, over 8467.00 frames. ], tot_loss[loss=0.3576, simple_loss=0.3976, pruned_loss=0.1588, over 1618761.01 frames. ], batch size: 25, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:59,515 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:01,981 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 20:57:04,160 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6316, 2.0016, 2.1248, 0.6277, 2.0909, 1.3316, 0.4878, 1.9042], + device='cuda:2'), covar=tensor([0.0105, 0.0038, 0.0050, 0.0135, 0.0066, 0.0195, 0.0176, 0.0051], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0134, 0.0124, 0.0185, 0.0130, 0.0249, 0.0201, 0.0176], + device='cuda:2'), out_proj_covar=tensor([1.1159e-04, 7.3433e-05, 7.0393e-05, 1.0050e-04, 7.5858e-05, 1.5033e-04, + 1.1429e-04, 9.8739e-05], device='cuda:2') +2023-02-05 20:57:11,815 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.940e+02 4.956e+02 6.362e+02 1.377e+03, threshold=9.912e+02, percent-clipped=7.0 +2023-02-05 20:57:24,678 INFO [train.py:901] (2/4) Epoch 2, batch 7450, loss[loss=0.3401, simple_loss=0.3902, pruned_loss=0.145, over 7982.00 frames. ], tot_loss[loss=0.3585, simple_loss=0.3977, pruned_loss=0.1596, over 1615262.58 frames. ], batch size: 21, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:57:30,904 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9388, 1.8822, 1.7639, 2.3629, 1.1677, 1.2468, 1.3955, 1.9274], + device='cuda:2'), covar=tensor([0.1194, 0.1433, 0.1566, 0.0650, 0.1887, 0.2657, 0.2144, 0.1122], + device='cuda:2'), in_proj_covar=tensor([0.0311, 0.0324, 0.0328, 0.0224, 0.0308, 0.0329, 0.0359, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0003, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 20:57:40,476 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15557.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:41,773 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 20:57:49,494 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 20:57:50,501 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3220, 1.4820, 1.2770, 1.6850, 1.2218, 1.0602, 1.0837, 1.4678], + device='cuda:2'), covar=tensor([0.0830, 0.0515, 0.0997, 0.0607, 0.0780, 0.1100, 0.0941, 0.0650], + device='cuda:2'), in_proj_covar=tensor([0.0377, 0.0246, 0.0348, 0.0321, 0.0345, 0.0323, 0.0354, 0.0324], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:2') +2023-02-05 20:57:51,016 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8271, 2.8669, 2.5040, 1.3783, 2.4951, 2.3954, 2.5931, 2.1675], + device='cuda:2'), covar=tensor([0.1143, 0.0757, 0.1104, 0.4070, 0.0767, 0.0981, 0.1370, 0.0916], + device='cuda:2'), in_proj_covar=tensor([0.0330, 0.0221, 0.0259, 0.0348, 0.0233, 0.0198, 0.0247, 0.0175], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:57:59,034 INFO [train.py:901] (2/4) Epoch 2, batch 7500, loss[loss=0.321, simple_loss=0.3754, pruned_loss=0.1333, over 8599.00 frames. ], tot_loss[loss=0.3579, simple_loss=0.3976, pruned_loss=0.1591, over 1620575.91 frames. ], batch size: 34, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:21,355 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.662e+02 4.519e+02 5.678e+02 1.466e+03, threshold=9.038e+02, percent-clipped=6.0 +2023-02-05 20:58:26,262 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9314, 1.0930, 3.0660, 0.9753, 2.4305, 2.5604, 2.6426, 2.6723], + device='cuda:2'), covar=tensor([0.0528, 0.3237, 0.0380, 0.2063, 0.1362, 0.0589, 0.0574, 0.0644], + device='cuda:2'), in_proj_covar=tensor([0.0213, 0.0397, 0.0252, 0.0294, 0.0353, 0.0278, 0.0270, 0.0282], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 20:58:34,053 INFO [train.py:901] (2/4) Epoch 2, batch 7550, loss[loss=0.3504, simple_loss=0.4037, pruned_loss=0.1486, over 8338.00 frames. ], tot_loss[loss=0.3561, simple_loss=0.3964, pruned_loss=0.1579, over 1622062.82 frames. ], batch size: 26, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:59:00,923 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15672.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:59:08,584 INFO [train.py:901] (2/4) Epoch 2, batch 7600, loss[loss=0.3756, simple_loss=0.4156, pruned_loss=0.1679, over 8634.00 frames. ], tot_loss[loss=0.3561, simple_loss=0.3964, pruned_loss=0.1579, over 1621737.69 frames. ], batch size: 34, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 20:59:31,058 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.634e+02 4.473e+02 6.191e+02 1.516e+03, threshold=8.946e+02, percent-clipped=5.0 +2023-02-05 20:59:43,083 INFO [train.py:901] (2/4) Epoch 2, batch 7650, loss[loss=0.3159, simple_loss=0.3557, pruned_loss=0.138, over 7704.00 frames. ], tot_loss[loss=0.356, simple_loss=0.3961, pruned_loss=0.158, over 1619511.56 frames. ], batch size: 18, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 21:00:19,422 INFO [train.py:901] (2/4) Epoch 2, batch 7700, loss[loss=0.3438, simple_loss=0.3779, pruned_loss=0.1549, over 6444.00 frames. ], tot_loss[loss=0.3521, simple_loss=0.3932, pruned_loss=0.1555, over 1615082.24 frames. ], batch size: 14, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:00:41,051 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 3.880e+02 4.902e+02 6.175e+02 1.322e+03, threshold=9.805e+02, percent-clipped=4.0 +2023-02-05 21:00:42,376 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 21:00:51,197 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 21:00:53,924 INFO [train.py:901] (2/4) Epoch 2, batch 7750, loss[loss=0.3417, simple_loss=0.3903, pruned_loss=0.1465, over 8102.00 frames. ], tot_loss[loss=0.3514, simple_loss=0.393, pruned_loss=0.1549, over 1616612.37 frames. ], batch size: 23, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:28,170 INFO [train.py:901] (2/4) Epoch 2, batch 7800, loss[loss=0.3443, simple_loss=0.3744, pruned_loss=0.1571, over 7419.00 frames. ], tot_loss[loss=0.3496, simple_loss=0.3912, pruned_loss=0.154, over 1611137.29 frames. ], batch size: 17, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:40,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:01:50,917 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.569e+02 4.742e+02 5.990e+02 9.896e+02, threshold=9.484e+02, percent-clipped=1.0 +2023-02-05 21:01:59,057 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:02,905 INFO [train.py:901] (2/4) Epoch 2, batch 7850, loss[loss=0.3475, simple_loss=0.3992, pruned_loss=0.1479, over 8336.00 frames. ], tot_loss[loss=0.3484, simple_loss=0.3904, pruned_loss=0.1532, over 1608015.87 frames. ], batch size: 25, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:15,679 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15953.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:36,218 INFO [train.py:901] (2/4) Epoch 2, batch 7900, loss[loss=0.3557, simple_loss=0.3965, pruned_loss=0.1575, over 8464.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3917, pruned_loss=0.1535, over 1614522.06 frames. ], batch size: 27, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:39,746 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7835, 1.9813, 2.3096, 0.7233, 2.1991, 1.3201, 0.5473, 1.5270], + device='cuda:2'), covar=tensor([0.0119, 0.0063, 0.0066, 0.0161, 0.0083, 0.0256, 0.0227, 0.0104], + device='cuda:2'), in_proj_covar=tensor([0.0205, 0.0141, 0.0125, 0.0190, 0.0138, 0.0253, 0.0203, 0.0179], + device='cuda:2'), out_proj_covar=tensor([1.1241e-04, 7.5918e-05, 6.9377e-05, 1.0293e-04, 7.9174e-05, 1.5025e-04, + 1.1324e-04, 9.9462e-05], device='cuda:2') +2023-02-05 21:02:58,236 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.808e+02 4.602e+02 5.936e+02 1.299e+03, threshold=9.205e+02, percent-clipped=9.0 +2023-02-05 21:03:00,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16019.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:03:10,210 INFO [train.py:901] (2/4) Epoch 2, batch 7950, loss[loss=0.3495, simple_loss=0.3947, pruned_loss=0.1522, over 8650.00 frames. ], tot_loss[loss=0.3507, simple_loss=0.3932, pruned_loss=0.1542, over 1616064.70 frames. ], batch size: 27, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:03:43,323 INFO [train.py:901] (2/4) Epoch 2, batch 8000, loss[loss=0.3644, simple_loss=0.4034, pruned_loss=0.1627, over 8194.00 frames. ], tot_loss[loss=0.354, simple_loss=0.3952, pruned_loss=0.1564, over 1614360.32 frames. ], batch size: 23, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:03:56,071 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16103.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:04:04,534 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.336e+02 4.123e+02 4.991e+02 6.647e+02 1.461e+03, threshold=9.983e+02, percent-clipped=10.0 +2023-02-05 21:04:16,503 INFO [train.py:901] (2/4) Epoch 2, batch 8050, loss[loss=0.3042, simple_loss=0.3542, pruned_loss=0.1271, over 7529.00 frames. ], tot_loss[loss=0.3507, simple_loss=0.3918, pruned_loss=0.1548, over 1598533.46 frames. ], batch size: 18, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:04:27,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9195, 1.0108, 1.1046, 0.8440, 0.6964, 1.2339, 0.2374, 0.6125], + device='cuda:2'), covar=tensor([0.1506, 0.1665, 0.0813, 0.1481, 0.2160, 0.0579, 0.3270, 0.1928], + device='cuda:2'), in_proj_covar=tensor([0.0109, 0.0096, 0.0082, 0.0146, 0.0145, 0.0080, 0.0150, 0.0110], + device='cuda:2'), out_proj_covar=tensor([1.2640e-04, 1.1616e-04, 9.4297e-05, 1.5998e-04, 1.6178e-04, 9.5338e-05, + 1.6892e-04, 1.3118e-04], device='cuda:2') +2023-02-05 21:04:51,313 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 21:04:55,113 INFO [train.py:901] (2/4) Epoch 3, batch 0, loss[loss=0.3868, simple_loss=0.4165, pruned_loss=0.1785, over 8497.00 frames. ], tot_loss[loss=0.3868, simple_loss=0.4165, pruned_loss=0.1785, over 8497.00 frames. ], batch size: 49, lr: 2.53e-02, grad_scale: 8.0 +2023-02-05 21:04:55,114 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 21:05:06,962 INFO [train.py:935] (2/4) Epoch 3, validation: loss=0.2731, simple_loss=0.3579, pruned_loss=0.09417, over 944034.00 frames. +2023-02-05 21:05:06,963 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 21:05:07,103 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:05:14,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2574, 2.6282, 4.1171, 3.8018, 3.2511, 2.7121, 1.7577, 2.0637], + device='cuda:2'), covar=tensor([0.0700, 0.0863, 0.0144, 0.0277, 0.0368, 0.0370, 0.0563, 0.0782], + device='cuda:2'), in_proj_covar=tensor([0.0540, 0.0461, 0.0352, 0.0404, 0.0496, 0.0430, 0.0455, 0.0464], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:05:23,570 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 21:05:42,767 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.402e+02 4.065e+02 5.070e+02 6.931e+02 1.670e+03, threshold=1.014e+03, percent-clipped=5.0 +2023-02-05 21:05:42,788 INFO [train.py:901] (2/4) Epoch 3, batch 50, loss[loss=0.3457, simple_loss=0.3911, pruned_loss=0.1501, over 8472.00 frames. ], tot_loss[loss=0.3542, simple_loss=0.3937, pruned_loss=0.1574, over 365506.55 frames. ], batch size: 25, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:05:58,800 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 21:06:02,997 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:06:18,204 INFO [train.py:901] (2/4) Epoch 3, batch 100, loss[loss=0.4973, simple_loss=0.4797, pruned_loss=0.2575, over 6681.00 frames. ], tot_loss[loss=0.3568, simple_loss=0.3957, pruned_loss=0.1589, over 637404.75 frames. ], batch size: 72, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:06:18,929 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 21:06:20,108 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-05 21:06:53,416 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.291e+02 3.520e+02 4.471e+02 5.811e+02 1.196e+03, threshold=8.942e+02, percent-clipped=3.0 +2023-02-05 21:06:53,436 INFO [train.py:901] (2/4) Epoch 3, batch 150, loss[loss=0.3028, simple_loss=0.3628, pruned_loss=0.1214, over 8496.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3917, pruned_loss=0.1535, over 851880.94 frames. ], batch size: 28, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:23,168 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16360.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:24,938 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16363.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:27,430 INFO [train.py:901] (2/4) Epoch 3, batch 200, loss[loss=0.324, simple_loss=0.3674, pruned_loss=0.1402, over 7914.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3924, pruned_loss=0.1532, over 1020458.04 frames. ], batch size: 20, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:27,654 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4997, 2.0219, 2.2897, 0.6817, 2.3039, 1.5856, 0.5649, 1.8375], + device='cuda:2'), covar=tensor([0.0142, 0.0061, 0.0096, 0.0188, 0.0096, 0.0239, 0.0238, 0.0079], + device='cuda:2'), in_proj_covar=tensor([0.0206, 0.0145, 0.0125, 0.0192, 0.0134, 0.0254, 0.0210, 0.0178], + device='cuda:2'), out_proj_covar=tensor([1.1189e-04, 7.7989e-05, 6.8373e-05, 1.0217e-04, 7.4906e-05, 1.4812e-04, + 1.1632e-04, 9.7388e-05], device='cuda:2') +2023-02-05 21:07:42,151 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16389.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:01,474 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 3.609e+02 4.419e+02 5.456e+02 1.161e+03, threshold=8.837e+02, percent-clipped=3.0 +2023-02-05 21:08:01,494 INFO [train.py:901] (2/4) Epoch 3, batch 250, loss[loss=0.4054, simple_loss=0.4373, pruned_loss=0.1868, over 8030.00 frames. ], tot_loss[loss=0.3497, simple_loss=0.3931, pruned_loss=0.1531, over 1155405.59 frames. ], batch size: 22, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:13,913 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 21:08:15,455 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0813, 3.0842, 2.0539, 2.3589, 2.5597, 2.0014, 2.1615, 2.7397], + device='cuda:2'), covar=tensor([0.0915, 0.0394, 0.0724, 0.0707, 0.0549, 0.0862, 0.0972, 0.0709], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0246, 0.0340, 0.0305, 0.0350, 0.0313, 0.0352, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:08:22,557 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 21:08:22,628 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:27,798 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6736, 2.2422, 4.6700, 2.7782, 4.2579, 4.1717, 4.3521, 4.3910], + device='cuda:2'), covar=tensor([0.0243, 0.2031, 0.0232, 0.1205, 0.0640, 0.0292, 0.0269, 0.0258], + device='cuda:2'), in_proj_covar=tensor([0.0214, 0.0403, 0.0262, 0.0303, 0.0370, 0.0290, 0.0276, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 21:08:35,575 INFO [train.py:901] (2/4) Epoch 3, batch 300, loss[loss=0.3435, simple_loss=0.3932, pruned_loss=0.1469, over 7979.00 frames. ], tot_loss[loss=0.3503, simple_loss=0.3935, pruned_loss=0.1536, over 1258969.13 frames. ], batch size: 21, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:43,558 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16478.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:05,163 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16511.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:09,088 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.982e+02 3.752e+02 4.774e+02 5.919e+02 1.248e+03, threshold=9.549e+02, percent-clipped=6.0 +2023-02-05 21:09:09,108 INFO [train.py:901] (2/4) Epoch 3, batch 350, loss[loss=0.3085, simple_loss=0.3743, pruned_loss=0.1214, over 8248.00 frames. ], tot_loss[loss=0.3508, simple_loss=0.3933, pruned_loss=0.1541, over 1342244.78 frames. ], batch size: 24, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:09:14,648 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1799, 1.1493, 4.2943, 1.7999, 3.5746, 3.5648, 3.8125, 3.7704], + device='cuda:2'), covar=tensor([0.0320, 0.3270, 0.0244, 0.1649, 0.0983, 0.0427, 0.0374, 0.0411], + device='cuda:2'), in_proj_covar=tensor([0.0215, 0.0405, 0.0271, 0.0309, 0.0375, 0.0292, 0.0282, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 21:09:22,196 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 21:09:34,321 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.4147, 2.1242, 1.8411, 1.4664, 2.6487, 1.9191, 2.4978, 2.2090], + device='cuda:2'), covar=tensor([0.0854, 0.1491, 0.2092, 0.1774, 0.0764, 0.1721, 0.0875, 0.0788], + device='cuda:2'), in_proj_covar=tensor([0.0207, 0.0233, 0.0267, 0.0230, 0.0203, 0.0229, 0.0197, 0.0196], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:09:40,931 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16562.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:44,001 INFO [train.py:901] (2/4) Epoch 3, batch 400, loss[loss=0.3233, simple_loss=0.3857, pruned_loss=0.1304, over 8359.00 frames. ], tot_loss[loss=0.3512, simple_loss=0.3934, pruned_loss=0.1545, over 1400851.77 frames. ], batch size: 24, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:18,119 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:18,545 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.210e+02 3.588e+02 4.493e+02 6.059e+02 1.047e+03, threshold=8.987e+02, percent-clipped=2.0 +2023-02-05 21:10:18,565 INFO [train.py:901] (2/4) Epoch 3, batch 450, loss[loss=0.3311, simple_loss=0.3882, pruned_loss=0.137, over 8768.00 frames. ], tot_loss[loss=0.3506, simple_loss=0.3931, pruned_loss=0.154, over 1446955.29 frames. ], batch size: 30, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:24,823 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16626.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:35,584 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16641.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:53,050 INFO [train.py:901] (2/4) Epoch 3, batch 500, loss[loss=0.3492, simple_loss=0.3943, pruned_loss=0.1521, over 8191.00 frames. ], tot_loss[loss=0.3502, simple_loss=0.3932, pruned_loss=0.1536, over 1488938.22 frames. ], batch size: 23, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:11:27,933 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 3.547e+02 4.664e+02 6.145e+02 2.246e+03, threshold=9.327e+02, percent-clipped=7.0 +2023-02-05 21:11:27,953 INFO [train.py:901] (2/4) Epoch 3, batch 550, loss[loss=0.2917, simple_loss=0.3599, pruned_loss=0.1118, over 8081.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3904, pruned_loss=0.1513, over 1511594.36 frames. ], batch size: 21, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:11:38,639 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16733.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:39,412 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16734.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:56,556 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16759.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:01,682 INFO [train.py:901] (2/4) Epoch 3, batch 600, loss[loss=0.3535, simple_loss=0.397, pruned_loss=0.155, over 8251.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.3911, pruned_loss=0.152, over 1538533.85 frames. ], batch size: 24, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:16,376 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 21:12:20,620 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3300, 1.5410, 2.8793, 0.9352, 2.0308, 1.6511, 1.4178, 1.6648], + device='cuda:2'), covar=tensor([0.1308, 0.1711, 0.0419, 0.2581, 0.1125, 0.1877, 0.1106, 0.1671], + device='cuda:2'), in_proj_covar=tensor([0.0427, 0.0401, 0.0460, 0.0488, 0.0535, 0.0468, 0.0429, 0.0534], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:12:21,238 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3969, 2.1589, 1.5793, 1.5151, 1.7147, 1.7607, 2.0609, 1.8499], + device='cuda:2'), covar=tensor([0.0830, 0.1202, 0.1918, 0.1567, 0.0863, 0.1482, 0.0938, 0.0634], + device='cuda:2'), in_proj_covar=tensor([0.0208, 0.0237, 0.0270, 0.0236, 0.0207, 0.0234, 0.0201, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:12:22,377 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7138, 1.7649, 4.2316, 2.0182, 2.5059, 5.1036, 4.4843, 4.5760], + device='cuda:2'), covar=tensor([0.0926, 0.1443, 0.0228, 0.1750, 0.0705, 0.0136, 0.0240, 0.0385], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0267, 0.0200, 0.0268, 0.0200, 0.0167, 0.0164, 0.0241], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:12:36,656 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.715e+02 4.834e+02 5.984e+02 1.404e+03, threshold=9.668e+02, percent-clipped=7.0 +2023-02-05 21:12:36,676 INFO [train.py:901] (2/4) Epoch 3, batch 650, loss[loss=0.3438, simple_loss=0.3912, pruned_loss=0.1482, over 8343.00 frames. ], tot_loss[loss=0.347, simple_loss=0.3899, pruned_loss=0.1521, over 1550364.40 frames. ], batch size: 26, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:37,560 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16818.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:54,030 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:57,234 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:10,252 INFO [train.py:901] (2/4) Epoch 3, batch 700, loss[loss=0.3465, simple_loss=0.3983, pruned_loss=0.1474, over 8251.00 frames. ], tot_loss[loss=0.3495, simple_loss=0.3915, pruned_loss=0.1538, over 1564797.02 frames. ], batch size: 24, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:20,420 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16882.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:23,045 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16886.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:13:38,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:44,828 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.086e+02 3.932e+02 4.613e+02 6.231e+02 2.383e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 21:13:44,850 INFO [train.py:901] (2/4) Epoch 3, batch 750, loss[loss=0.4173, simple_loss=0.4392, pruned_loss=0.1977, over 8667.00 frames. ], tot_loss[loss=0.3501, simple_loss=0.3924, pruned_loss=0.1539, over 1579189.35 frames. ], batch size: 49, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:49,809 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16924.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:57,352 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 21:13:59,079 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 21:14:07,691 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 21:14:19,211 INFO [train.py:901] (2/4) Epoch 3, batch 800, loss[loss=0.3278, simple_loss=0.3768, pruned_loss=0.1394, over 8134.00 frames. ], tot_loss[loss=0.3478, simple_loss=0.3917, pruned_loss=0.1519, over 1590978.83 frames. ], batch size: 22, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:14:53,628 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.211e+02 3.452e+02 4.368e+02 5.287e+02 1.393e+03, threshold=8.735e+02, percent-clipped=4.0 +2023-02-05 21:14:53,649 INFO [train.py:901] (2/4) Epoch 3, batch 850, loss[loss=0.3083, simple_loss=0.3602, pruned_loss=0.1282, over 7671.00 frames. ], tot_loss[loss=0.3468, simple_loss=0.3907, pruned_loss=0.1515, over 1599862.25 frames. ], batch size: 19, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:01,681 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2797, 2.7884, 2.2983, 3.2744, 1.9316, 2.0091, 2.1444, 2.7969], + device='cuda:2'), covar=tensor([0.0901, 0.0877, 0.1006, 0.0360, 0.1476, 0.1605, 0.1409, 0.0731], + device='cuda:2'), in_proj_covar=tensor([0.0316, 0.0330, 0.0314, 0.0229, 0.0301, 0.0330, 0.0354, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:15:19,772 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5327, 2.4621, 1.5560, 2.0267, 2.0377, 1.0831, 1.5298, 1.9586], + device='cuda:2'), covar=tensor([0.1296, 0.0420, 0.0980, 0.0582, 0.0703, 0.1245, 0.1003, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0364, 0.0249, 0.0346, 0.0305, 0.0350, 0.0315, 0.0355, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:15:28,355 INFO [train.py:901] (2/4) Epoch 3, batch 900, loss[loss=0.3613, simple_loss=0.403, pruned_loss=0.1598, over 8500.00 frames. ], tot_loss[loss=0.3474, simple_loss=0.3911, pruned_loss=0.1518, over 1604536.98 frames. ], batch size: 26, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:53,797 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17104.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:02,281 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.375e+02 3.695e+02 4.540e+02 5.760e+02 9.795e+02, threshold=9.080e+02, percent-clipped=3.0 +2023-02-05 21:16:02,301 INFO [train.py:901] (2/4) Epoch 3, batch 950, loss[loss=0.2867, simple_loss=0.347, pruned_loss=0.1133, over 7930.00 frames. ], tot_loss[loss=0.3466, simple_loss=0.3906, pruned_loss=0.1513, over 1606181.19 frames. ], batch size: 20, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:16:10,473 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17129.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:25,706 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 21:16:36,857 INFO [train.py:901] (2/4) Epoch 3, batch 1000, loss[loss=0.3981, simple_loss=0.426, pruned_loss=0.1851, over 8458.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.3925, pruned_loss=0.1531, over 1605285.89 frames. ], batch size: 50, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:16:57,949 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 21:17:03,570 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:07,058 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5670, 2.5036, 1.5752, 1.9535, 2.1263, 1.2231, 1.5105, 2.1599], + device='cuda:2'), covar=tensor([0.1549, 0.0515, 0.1106, 0.0724, 0.0700, 0.1383, 0.1339, 0.0999], + device='cuda:2'), in_proj_covar=tensor([0.0368, 0.0250, 0.0349, 0.0311, 0.0346, 0.0320, 0.0361, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:17:08,327 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5284, 1.2859, 1.3429, 1.1889, 1.1734, 1.3472, 1.1675, 1.0993], + device='cuda:2'), covar=tensor([0.0842, 0.1605, 0.2191, 0.1661, 0.0821, 0.1731, 0.0998, 0.0758], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0231, 0.0265, 0.0230, 0.0201, 0.0231, 0.0198, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:17:10,140 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 4.093e+02 4.952e+02 6.088e+02 1.030e+03, threshold=9.904e+02, percent-clipped=7.0 +2023-02-05 21:17:10,160 INFO [train.py:901] (2/4) Epoch 3, batch 1050, loss[loss=0.3539, simple_loss=0.3889, pruned_loss=0.1595, over 8078.00 frames. ], tot_loss[loss=0.3501, simple_loss=0.3932, pruned_loss=0.1535, over 1608544.28 frames. ], batch size: 21, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:10,171 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 21:17:11,624 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2568, 4.3499, 3.7648, 1.9395, 3.7620, 3.8816, 3.9415, 3.2561], + device='cuda:2'), covar=tensor([0.0843, 0.0494, 0.0824, 0.4312, 0.0591, 0.0748, 0.0997, 0.0716], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0228, 0.0267, 0.0358, 0.0251, 0.0199, 0.0259, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:17:19,697 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17230.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:17:45,190 INFO [train.py:901] (2/4) Epoch 3, batch 1100, loss[loss=0.3523, simple_loss=0.4029, pruned_loss=0.1508, over 8132.00 frames. ], tot_loss[loss=0.3484, simple_loss=0.3921, pruned_loss=0.1523, over 1612415.92 frames. ], batch size: 22, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:45,923 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:18:01,487 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 21:18:19,093 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.272e+02 3.840e+02 4.434e+02 5.714e+02 1.415e+03, threshold=8.869e+02, percent-clipped=3.0 +2023-02-05 21:18:19,113 INFO [train.py:901] (2/4) Epoch 3, batch 1150, loss[loss=0.295, simple_loss=0.3392, pruned_loss=0.1254, over 7256.00 frames. ], tot_loss[loss=0.3475, simple_loss=0.3917, pruned_loss=0.1516, over 1615890.31 frames. ], batch size: 16, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:18:22,484 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 21:18:38,635 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17345.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:18:52,840 INFO [train.py:901] (2/4) Epoch 3, batch 1200, loss[loss=0.4143, simple_loss=0.443, pruned_loss=0.1928, over 8192.00 frames. ], tot_loss[loss=0.3489, simple_loss=0.3927, pruned_loss=0.1526, over 1618697.57 frames. ], batch size: 23, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:19:01,229 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 21:19:02,176 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17380.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:04,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17383.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:17,666 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17401.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:19:21,582 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1621, 4.2432, 3.6845, 1.4596, 3.6571, 3.5095, 3.8795, 3.1211], + device='cuda:2'), covar=tensor([0.0757, 0.0551, 0.0925, 0.4198, 0.0544, 0.0586, 0.1201, 0.0593], + device='cuda:2'), in_proj_covar=tensor([0.0338, 0.0227, 0.0269, 0.0352, 0.0245, 0.0196, 0.0252, 0.0184], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:19:28,364 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 3.772e+02 4.989e+02 5.905e+02 9.785e+02, threshold=9.978e+02, percent-clipped=4.0 +2023-02-05 21:19:28,384 INFO [train.py:901] (2/4) Epoch 3, batch 1250, loss[loss=0.3271, simple_loss=0.3704, pruned_loss=0.1419, over 7922.00 frames. ], tot_loss[loss=0.3466, simple_loss=0.3908, pruned_loss=0.1512, over 1618157.85 frames. ], batch size: 20, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:20:02,610 INFO [train.py:901] (2/4) Epoch 3, batch 1300, loss[loss=0.3617, simple_loss=0.4072, pruned_loss=0.1581, over 8503.00 frames. ], tot_loss[loss=0.3493, simple_loss=0.3933, pruned_loss=0.1527, over 1620908.40 frames. ], batch size: 28, lr: 2.44e-02, grad_scale: 8.0 +2023-02-05 21:20:19,165 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=2.00 vs. limit=2.0 +2023-02-05 21:20:37,547 INFO [train.py:901] (2/4) Epoch 3, batch 1350, loss[loss=0.3215, simple_loss=0.3857, pruned_loss=0.1287, over 8312.00 frames. ], tot_loss[loss=0.3496, simple_loss=0.3926, pruned_loss=0.1532, over 1614728.05 frames. ], batch size: 25, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:20:38,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 4.258e+02 5.812e+02 8.345e+02 8.746e+03, threshold=1.162e+03, percent-clipped=16.0 +2023-02-05 21:21:00,236 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17551.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:21:11,045 INFO [train.py:901] (2/4) Epoch 3, batch 1400, loss[loss=0.2801, simple_loss=0.3302, pruned_loss=0.115, over 7538.00 frames. ], tot_loss[loss=0.3473, simple_loss=0.3906, pruned_loss=0.152, over 1612862.66 frames. ], batch size: 18, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:21:34,757 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17601.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:21:46,905 INFO [train.py:901] (2/4) Epoch 3, batch 1450, loss[loss=0.2959, simple_loss=0.3541, pruned_loss=0.1188, over 8355.00 frames. ], tot_loss[loss=0.3463, simple_loss=0.3898, pruned_loss=0.1514, over 1612722.39 frames. ], batch size: 24, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:21:47,590 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.138e+02 3.309e+02 4.161e+02 5.035e+02 1.114e+03, threshold=8.322e+02, percent-clipped=0.0 +2023-02-05 21:21:48,907 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 21:21:53,236 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17626.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:22:02,526 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17639.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:19,041 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17664.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,427 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17666.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,937 INFO [train.py:901] (2/4) Epoch 3, batch 1500, loss[loss=0.3558, simple_loss=0.4122, pruned_loss=0.1497, over 8646.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3898, pruned_loss=0.1516, over 1612444.05 frames. ], batch size: 34, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,184 INFO [train.py:901] (2/4) Epoch 3, batch 1550, loss[loss=0.3554, simple_loss=0.3989, pruned_loss=0.156, over 8078.00 frames. ], tot_loss[loss=0.3448, simple_loss=0.3885, pruned_loss=0.1505, over 1609115.67 frames. ], batch size: 21, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.415e+02 3.678e+02 4.620e+02 5.892e+02 1.697e+03, threshold=9.239e+02, percent-clipped=9.0 +2023-02-05 21:22:57,023 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:01,086 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17724.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:16,060 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17745.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:23:17,957 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:30,932 INFO [train.py:901] (2/4) Epoch 3, batch 1600, loss[loss=0.3308, simple_loss=0.3755, pruned_loss=0.143, over 7675.00 frames. ], tot_loss[loss=0.3442, simple_loss=0.3882, pruned_loss=0.1501, over 1607795.46 frames. ], batch size: 19, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,145 INFO [train.py:901] (2/4) Epoch 3, batch 1650, loss[loss=0.3152, simple_loss=0.3864, pruned_loss=0.122, over 8195.00 frames. ], tot_loss[loss=0.3428, simple_loss=0.3878, pruned_loss=0.1489, over 1614862.01 frames. ], batch size: 23, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,806 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.309e+02 4.132e+02 5.477e+02 8.650e+02, threshold=8.264e+02, percent-clipped=0.0 +2023-02-05 21:24:20,681 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17839.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:24:21,857 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5664, 5.4627, 4.7746, 1.5514, 4.9307, 4.9875, 5.0699, 4.4192], + device='cuda:2'), covar=tensor([0.0683, 0.0489, 0.1015, 0.5330, 0.0417, 0.0601, 0.1069, 0.0597], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0232, 0.0278, 0.0364, 0.0257, 0.0210, 0.0269, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:24:35,253 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17860.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:24:39,697 INFO [train.py:901] (2/4) Epoch 3, batch 1700, loss[loss=0.3891, simple_loss=0.417, pruned_loss=0.1806, over 8581.00 frames. ], tot_loss[loss=0.3431, simple_loss=0.3881, pruned_loss=0.149, over 1616384.14 frames. ], batch size: 34, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:42,706 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 21:25:09,505 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-05 21:25:13,896 INFO [train.py:901] (2/4) Epoch 3, batch 1750, loss[loss=0.3689, simple_loss=0.4122, pruned_loss=0.1628, over 8289.00 frames. ], tot_loss[loss=0.3421, simple_loss=0.3872, pruned_loss=0.1485, over 1616948.04 frames. ], batch size: 23, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:14,598 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.998e+02 5.161e+02 6.686e+02 1.470e+03, threshold=1.032e+03, percent-clipped=12.0 +2023-02-05 21:25:17,622 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17922.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:35,809 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17947.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:48,896 INFO [train.py:901] (2/4) Epoch 3, batch 1800, loss[loss=0.3301, simple_loss=0.3889, pruned_loss=0.1357, over 8341.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.3877, pruned_loss=0.1493, over 1616373.22 frames. ], batch size: 26, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:25:57,025 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17978.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:25,065 INFO [train.py:901] (2/4) Epoch 3, batch 1850, loss[loss=0.2961, simple_loss=0.3482, pruned_loss=0.1219, over 7647.00 frames. ], tot_loss[loss=0.3452, simple_loss=0.3892, pruned_loss=0.1506, over 1616689.37 frames. ], batch size: 19, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:26:25,636 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.564e+02 4.327e+02 5.819e+02 2.228e+03, threshold=8.654e+02, percent-clipped=8.0 +2023-02-05 21:26:39,173 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9617, 4.0756, 3.5775, 1.5986, 3.4744, 3.2775, 3.7085, 3.0696], + device='cuda:2'), covar=tensor([0.0884, 0.0563, 0.0964, 0.4797, 0.0694, 0.0847, 0.0994, 0.0770], + device='cuda:2'), in_proj_covar=tensor([0.0340, 0.0232, 0.0281, 0.0365, 0.0246, 0.0204, 0.0258, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:26:55,942 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18062.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:59,906 INFO [train.py:901] (2/4) Epoch 3, batch 1900, loss[loss=0.3443, simple_loss=0.4003, pruned_loss=0.1441, over 8509.00 frames. ], tot_loss[loss=0.3443, simple_loss=0.3885, pruned_loss=0.1501, over 1615758.01 frames. ], batch size: 28, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:27:17,421 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18092.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:19,613 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:24,169 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 21:27:32,751 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 21:27:34,376 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18116.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:27:34,800 INFO [train.py:901] (2/4) Epoch 3, batch 1950, loss[loss=0.3197, simple_loss=0.3756, pruned_loss=0.1319, over 8039.00 frames. ], tot_loss[loss=0.3429, simple_loss=0.3875, pruned_loss=0.1491, over 1615296.34 frames. ], batch size: 22, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:27:35,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.131e+02 3.385e+02 4.094e+02 5.586e+02 1.173e+03, threshold=8.188e+02, percent-clipped=3.0 +2023-02-05 21:27:36,197 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 21:27:37,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:39,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1692, 4.3007, 3.6948, 1.6964, 3.7001, 3.4570, 4.0261, 3.0502], + device='cuda:2'), covar=tensor([0.1040, 0.0585, 0.1071, 0.5133, 0.0652, 0.0747, 0.1222, 0.0798], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0240, 0.0287, 0.0378, 0.0252, 0.0215, 0.0270, 0.0196], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:27:51,205 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18141.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:27:55,040 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 21:28:02,893 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-05 21:28:07,359 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1411, 1.5761, 1.3102, 1.6648, 1.3894, 1.1024, 1.1266, 1.4628], + device='cuda:2'), covar=tensor([0.0957, 0.0512, 0.0970, 0.0550, 0.0779, 0.1138, 0.0951, 0.0702], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0251, 0.0343, 0.0308, 0.0347, 0.0308, 0.0356, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:28:09,116 INFO [train.py:901] (2/4) Epoch 3, batch 2000, loss[loss=0.4709, simple_loss=0.482, pruned_loss=0.2299, over 8396.00 frames. ], tot_loss[loss=0.3438, simple_loss=0.3879, pruned_loss=0.1499, over 1617748.92 frames. ], batch size: 48, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:17,041 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18177.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:27,336 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:28,012 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18193.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:38,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:44,799 INFO [train.py:901] (2/4) Epoch 3, batch 2050, loss[loss=0.2619, simple_loss=0.3097, pruned_loss=0.107, over 7682.00 frames. ], tot_loss[loss=0.3436, simple_loss=0.3876, pruned_loss=0.1497, over 1619471.65 frames. ], batch size: 18, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:46,138 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.474e+02 3.817e+02 4.995e+02 6.129e+02 1.664e+03, threshold=9.991e+02, percent-clipped=7.0 +2023-02-05 21:29:19,889 INFO [train.py:901] (2/4) Epoch 3, batch 2100, loss[loss=0.3824, simple_loss=0.4135, pruned_loss=0.1756, over 8332.00 frames. ], tot_loss[loss=0.3447, simple_loss=0.3886, pruned_loss=0.1504, over 1622639.56 frames. ], batch size: 26, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,197 INFO [train.py:901] (2/4) Epoch 3, batch 2150, loss[loss=0.3342, simple_loss=0.3963, pruned_loss=0.136, over 8339.00 frames. ], tot_loss[loss=0.3451, simple_loss=0.3892, pruned_loss=0.1505, over 1622368.64 frames. ], batch size: 26, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,882 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.297e+02 3.744e+02 4.718e+02 5.936e+02 1.452e+03, threshold=9.436e+02, percent-clipped=4.0 +2023-02-05 21:29:59,202 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18322.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:30:13,276 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2952, 1.5688, 2.3630, 0.9873, 1.7345, 1.5349, 1.3361, 1.6377], + device='cuda:2'), covar=tensor([0.1094, 0.1205, 0.0434, 0.2096, 0.0885, 0.1663, 0.1037, 0.1119], + device='cuda:2'), in_proj_covar=tensor([0.0436, 0.0398, 0.0470, 0.0487, 0.0535, 0.0478, 0.0426, 0.0540], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:30:31,069 INFO [train.py:901] (2/4) Epoch 3, batch 2200, loss[loss=0.4312, simple_loss=0.444, pruned_loss=0.2093, over 8137.00 frames. ], tot_loss[loss=0.3459, simple_loss=0.3901, pruned_loss=0.1509, over 1620904.12 frames. ], batch size: 22, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:30:38,558 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 21:31:06,829 INFO [train.py:901] (2/4) Epoch 3, batch 2250, loss[loss=0.2884, simple_loss=0.3328, pruned_loss=0.122, over 7529.00 frames. ], tot_loss[loss=0.3461, simple_loss=0.3903, pruned_loss=0.151, over 1621700.74 frames. ], batch size: 18, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:07,500 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.379e+02 3.424e+02 4.222e+02 5.561e+02 1.530e+03, threshold=8.445e+02, percent-clipped=2.0 +2023-02-05 21:31:07,729 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8164, 2.2974, 1.5895, 2.0915, 1.7372, 1.2803, 1.5982, 2.0171], + device='cuda:2'), covar=tensor([0.1126, 0.0539, 0.0976, 0.0632, 0.0845, 0.1203, 0.1100, 0.0746], + device='cuda:2'), in_proj_covar=tensor([0.0368, 0.0254, 0.0340, 0.0313, 0.0348, 0.0308, 0.0358, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:31:18,177 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18433.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:21,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18437.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:36,360 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18458.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:39,797 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:42,291 INFO [train.py:901] (2/4) Epoch 3, batch 2300, loss[loss=0.39, simple_loss=0.4269, pruned_loss=0.1766, over 8191.00 frames. ], tot_loss[loss=0.3436, simple_loss=0.3886, pruned_loss=0.1493, over 1620234.84 frames. ], batch size: 23, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:56,792 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:11,144 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18508.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:17,111 INFO [train.py:901] (2/4) Epoch 3, batch 2350, loss[loss=0.2818, simple_loss=0.3371, pruned_loss=0.1133, over 8093.00 frames. ], tot_loss[loss=0.3424, simple_loss=0.3872, pruned_loss=0.1488, over 1621784.06 frames. ], batch size: 21, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:17,761 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.449e+02 3.759e+02 4.661e+02 5.652e+02 9.227e+02, threshold=9.323e+02, percent-clipped=1.0 +2023-02-05 21:32:23,349 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18526.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:32:30,480 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:31,168 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18537.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:37,151 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3790, 1.6686, 2.1834, 1.3286, 1.0320, 1.9216, 0.2971, 1.0209], + device='cuda:2'), covar=tensor([0.2970, 0.2171, 0.0748, 0.2212, 0.5121, 0.1089, 0.6199, 0.2139], + device='cuda:2'), in_proj_covar=tensor([0.0110, 0.0102, 0.0081, 0.0153, 0.0160, 0.0078, 0.0152, 0.0111], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:32:51,314 INFO [train.py:901] (2/4) Epoch 3, batch 2400, loss[loss=0.4109, simple_loss=0.4305, pruned_loss=0.1957, over 7973.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.3875, pruned_loss=0.1495, over 1623929.42 frames. ], batch size: 21, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:59,586 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-05 21:33:03,868 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2023, 1.4080, 2.2916, 0.9981, 2.1271, 2.3691, 2.3801, 1.9458], + device='cuda:2'), covar=tensor([0.1219, 0.1137, 0.0469, 0.2047, 0.0538, 0.0465, 0.0419, 0.0863], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0261, 0.0198, 0.0259, 0.0200, 0.0168, 0.0167, 0.0246], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:33:20,500 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 21:33:24,822 INFO [train.py:901] (2/4) Epoch 3, batch 2450, loss[loss=0.3436, simple_loss=0.3618, pruned_loss=0.1627, over 7536.00 frames. ], tot_loss[loss=0.3442, simple_loss=0.3882, pruned_loss=0.1501, over 1622622.07 frames. ], batch size: 18, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:33:25,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 3.618e+02 4.763e+02 6.456e+02 1.024e+03, threshold=9.527e+02, percent-clipped=2.0 +2023-02-05 21:33:49,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:49,832 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18652.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:59,418 INFO [train.py:901] (2/4) Epoch 3, batch 2500, loss[loss=0.3269, simple_loss=0.3782, pruned_loss=0.1378, over 8582.00 frames. ], tot_loss[loss=0.3416, simple_loss=0.3865, pruned_loss=0.1484, over 1617446.87 frames. ], batch size: 39, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:17,682 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:18,343 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18693.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:28,091 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8083, 2.6522, 1.8314, 2.1732, 2.0540, 1.4471, 1.7517, 2.2284], + device='cuda:2'), covar=tensor([0.1233, 0.0403, 0.0859, 0.0649, 0.0721, 0.1141, 0.1018, 0.0826], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0243, 0.0337, 0.0308, 0.0331, 0.0309, 0.0349, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:34:33,853 INFO [train.py:901] (2/4) Epoch 3, batch 2550, loss[loss=0.3455, simple_loss=0.3911, pruned_loss=0.15, over 8193.00 frames. ], tot_loss[loss=0.3431, simple_loss=0.3876, pruned_loss=0.1493, over 1617584.65 frames. ], batch size: 23, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:34,506 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.889e+02 4.529e+02 5.619e+02 1.309e+03, threshold=9.058e+02, percent-clipped=5.0 +2023-02-05 21:34:34,733 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:35:08,025 INFO [train.py:901] (2/4) Epoch 3, batch 2600, loss[loss=0.3459, simple_loss=0.3904, pruned_loss=0.1507, over 8436.00 frames. ], tot_loss[loss=0.3405, simple_loss=0.3854, pruned_loss=0.1478, over 1610692.61 frames. ], batch size: 27, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:28,075 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-05 21:35:35,642 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1185, 1.2242, 4.0960, 1.5347, 2.1931, 4.8024, 4.4458, 4.3569], + device='cuda:2'), covar=tensor([0.1381, 0.1818, 0.0277, 0.2101, 0.0826, 0.0317, 0.0309, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0264, 0.0205, 0.0264, 0.0206, 0.0173, 0.0174, 0.0250], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:35:44,450 INFO [train.py:901] (2/4) Epoch 3, batch 2650, loss[loss=0.3089, simple_loss=0.3579, pruned_loss=0.1299, over 7809.00 frames. ], tot_loss[loss=0.3413, simple_loss=0.3861, pruned_loss=0.1482, over 1614860.55 frames. ], batch size: 19, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:45,136 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.426e+02 4.272e+02 5.708e+02 1.020e+03, threshold=8.544e+02, percent-clipped=5.0 +2023-02-05 21:36:08,384 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18852.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:12,057 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6743, 2.9007, 4.3118, 3.8412, 3.5216, 2.8222, 2.1101, 2.4835], + device='cuda:2'), covar=tensor([0.0617, 0.0980, 0.0209, 0.0318, 0.0396, 0.0392, 0.0526, 0.0872], + device='cuda:2'), in_proj_covar=tensor([0.0571, 0.0498, 0.0400, 0.0449, 0.0546, 0.0459, 0.0486, 0.0483], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:36:19,356 INFO [train.py:901] (2/4) Epoch 3, batch 2700, loss[loss=0.3828, simple_loss=0.4047, pruned_loss=0.1805, over 6829.00 frames. ], tot_loss[loss=0.3409, simple_loss=0.386, pruned_loss=0.1479, over 1610362.16 frames. ], batch size: 71, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:21,520 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18870.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:36:29,668 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8110, 2.1208, 1.7477, 2.6671, 1.2392, 1.2884, 2.0409, 2.2403], + device='cuda:2'), covar=tensor([0.1113, 0.1500, 0.1741, 0.0624, 0.2068, 0.2690, 0.1889, 0.1147], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0312, 0.0310, 0.0230, 0.0290, 0.0316, 0.0336, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:36:29,702 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4480, 1.9573, 1.8447, 0.5780, 1.7926, 1.3787, 0.4328, 1.9586], + device='cuda:2'), covar=tensor([0.0130, 0.0071, 0.0073, 0.0150, 0.0119, 0.0224, 0.0205, 0.0054], + device='cuda:2'), in_proj_covar=tensor([0.0217, 0.0153, 0.0131, 0.0199, 0.0150, 0.0269, 0.0211, 0.0179], + device='cuda:2'), out_proj_covar=tensor([1.0840e-04, 7.6736e-05, 6.4520e-05, 9.6488e-05, 7.7425e-05, 1.4440e-04, + 1.0793e-04, 9.0180e-05], device='cuda:2') +2023-02-05 21:36:40,055 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 21:36:42,538 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9905, 1.6293, 3.3515, 1.0254, 1.9711, 3.5933, 3.4078, 3.1188], + device='cuda:2'), covar=tensor([0.1075, 0.1352, 0.0287, 0.2203, 0.0794, 0.0267, 0.0312, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0258, 0.0200, 0.0258, 0.0205, 0.0170, 0.0173, 0.0246], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:36:47,163 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:47,843 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18908.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:53,561 INFO [train.py:901] (2/4) Epoch 3, batch 2750, loss[loss=0.3392, simple_loss=0.3638, pruned_loss=0.1573, over 7422.00 frames. ], tot_loss[loss=0.3414, simple_loss=0.3863, pruned_loss=0.1482, over 1611024.92 frames. ], batch size: 17, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:54,225 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.973e+02 3.360e+02 4.052e+02 5.079e+02 9.265e+02, threshold=8.105e+02, percent-clipped=2.0 +2023-02-05 21:37:05,008 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:05,660 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18933.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:15,749 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:28,077 INFO [train.py:901] (2/4) Epoch 3, batch 2800, loss[loss=0.339, simple_loss=0.39, pruned_loss=0.1439, over 8595.00 frames. ], tot_loss[loss=0.341, simple_loss=0.3863, pruned_loss=0.1479, over 1615446.26 frames. ], batch size: 39, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:37:28,265 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:31,702 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1972, 3.5989, 1.8218, 2.0820, 2.6103, 1.4368, 1.7274, 2.8791], + device='cuda:2'), covar=tensor([0.1251, 0.0314, 0.1131, 0.0954, 0.0894, 0.1369, 0.1387, 0.0997], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0241, 0.0336, 0.0303, 0.0340, 0.0317, 0.0351, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:37:41,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18985.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:37:49,287 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7201, 2.0361, 3.4092, 2.7864, 2.6446, 1.9214, 1.3819, 1.4011], + device='cuda:2'), covar=tensor([0.0738, 0.1059, 0.0189, 0.0383, 0.0429, 0.0492, 0.0653, 0.0968], + device='cuda:2'), in_proj_covar=tensor([0.0569, 0.0499, 0.0397, 0.0447, 0.0550, 0.0462, 0.0485, 0.0490], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:38:03,342 INFO [train.py:901] (2/4) Epoch 3, batch 2850, loss[loss=0.3488, simple_loss=0.3826, pruned_loss=0.1574, over 7979.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.385, pruned_loss=0.147, over 1607822.56 frames. ], batch size: 21, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:03,917 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 3.511e+02 4.402e+02 5.555e+02 1.104e+03, threshold=8.804e+02, percent-clipped=5.0 +2023-02-05 21:38:04,210 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6593, 2.1484, 3.3011, 2.9373, 2.8634, 2.2026, 1.5611, 2.1291], + device='cuda:2'), covar=tensor([0.0553, 0.0888, 0.0153, 0.0282, 0.0334, 0.0371, 0.0522, 0.0668], + device='cuda:2'), in_proj_covar=tensor([0.0575, 0.0502, 0.0399, 0.0451, 0.0555, 0.0463, 0.0485, 0.0493], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:38:15,997 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19036.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:38:37,619 INFO [train.py:901] (2/4) Epoch 3, batch 2900, loss[loss=0.3171, simple_loss=0.3662, pruned_loss=0.134, over 8355.00 frames. ], tot_loss[loss=0.3413, simple_loss=0.3866, pruned_loss=0.148, over 1608159.67 frames. ], batch size: 24, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:39:00,010 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5466, 1.9226, 2.0280, 1.6073, 1.1570, 2.1454, 0.3729, 1.2952], + device='cuda:2'), covar=tensor([0.2756, 0.2212, 0.1541, 0.2293, 0.4740, 0.0654, 0.5949, 0.2214], + device='cuda:2'), in_proj_covar=tensor([0.0111, 0.0100, 0.0080, 0.0147, 0.0163, 0.0077, 0.0149, 0.0109], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:39:02,476 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 21:39:04,034 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0741, 3.5006, 2.0819, 2.3250, 2.7767, 1.9749, 1.7832, 2.6272], + device='cuda:2'), covar=tensor([0.1219, 0.0406, 0.0819, 0.0841, 0.0562, 0.0952, 0.1254, 0.0940], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0243, 0.0334, 0.0307, 0.0340, 0.0315, 0.0346, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:39:11,756 INFO [train.py:901] (2/4) Epoch 3, batch 2950, loss[loss=0.3708, simple_loss=0.4117, pruned_loss=0.1649, over 8185.00 frames. ], tot_loss[loss=0.3404, simple_loss=0.3859, pruned_loss=0.1475, over 1608472.81 frames. ], batch size: 23, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:12,415 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.613e+02 4.498e+02 5.900e+02 1.326e+03, threshold=8.996e+02, percent-clipped=8.0 +2023-02-05 21:39:32,495 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19147.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:39:35,236 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:39:46,226 INFO [train.py:901] (2/4) Epoch 3, batch 3000, loss[loss=0.3521, simple_loss=0.3832, pruned_loss=0.1606, over 7798.00 frames. ], tot_loss[loss=0.3406, simple_loss=0.3861, pruned_loss=0.1475, over 1614511.96 frames. ], batch size: 19, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:46,226 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 21:39:58,667 INFO [train.py:935] (2/4) Epoch 3, validation: loss=0.2584, simple_loss=0.3473, pruned_loss=0.08481, over 944034.00 frames. +2023-02-05 21:39:58,667 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 21:40:29,494 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 21:40:33,769 INFO [train.py:901] (2/4) Epoch 3, batch 3050, loss[loss=0.2731, simple_loss=0.3448, pruned_loss=0.1007, over 7805.00 frames. ], tot_loss[loss=0.3396, simple_loss=0.3855, pruned_loss=0.1469, over 1617244.04 frames. ], batch size: 20, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:40:34,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.016e+02 3.526e+02 4.458e+02 6.217e+02 1.354e+03, threshold=8.917e+02, percent-clipped=3.0 +2023-02-05 21:40:37,621 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-05 21:40:38,079 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:40:50,812 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19241.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:40:55,430 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19248.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:07,578 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19266.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:41:07,999 INFO [train.py:901] (2/4) Epoch 3, batch 3100, loss[loss=0.3384, simple_loss=0.3766, pruned_loss=0.1501, over 8037.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.384, pruned_loss=0.1459, over 1613006.69 frames. ], batch size: 20, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:41:25,129 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-05 21:41:26,059 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19292.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:43,833 INFO [train.py:901] (2/4) Epoch 3, batch 3150, loss[loss=0.2924, simple_loss=0.3324, pruned_loss=0.1262, over 8088.00 frames. ], tot_loss[loss=0.3388, simple_loss=0.3848, pruned_loss=0.1464, over 1612711.21 frames. ], batch size: 21, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:41:44,474 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.556e+02 3.507e+02 4.387e+02 6.193e+02 1.521e+03, threshold=8.773e+02, percent-clipped=4.0 +2023-02-05 21:42:00,580 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8886, 1.3499, 3.3864, 1.2093, 2.3913, 3.7491, 3.5149, 3.2702], + device='cuda:2'), covar=tensor([0.1063, 0.1539, 0.0324, 0.1892, 0.0716, 0.0208, 0.0300, 0.0521], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0264, 0.0204, 0.0268, 0.0206, 0.0172, 0.0175, 0.0250], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:42:11,539 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-05 21:42:17,836 INFO [train.py:901] (2/4) Epoch 3, batch 3200, loss[loss=0.2973, simple_loss=0.342, pruned_loss=0.1263, over 7525.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3843, pruned_loss=0.1464, over 1607013.47 frames. ], batch size: 18, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:45,881 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:45,922 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:49,237 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19412.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:53,531 INFO [train.py:901] (2/4) Epoch 3, batch 3250, loss[loss=0.3204, simple_loss=0.3902, pruned_loss=0.1253, over 8104.00 frames. ], tot_loss[loss=0.3389, simple_loss=0.3848, pruned_loss=0.1465, over 1609515.72 frames. ], batch size: 23, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:54,129 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 3.440e+02 4.583e+02 5.736e+02 1.373e+03, threshold=9.167e+02, percent-clipped=8.0 +2023-02-05 21:43:03,730 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:43:26,821 INFO [train.py:901] (2/4) Epoch 3, batch 3300, loss[loss=0.3318, simple_loss=0.3877, pruned_loss=0.138, over 8022.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3839, pruned_loss=0.1461, over 1610494.57 frames. ], batch size: 22, lr: 2.32e-02, grad_scale: 8.0 +2023-02-05 21:43:27,954 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-05 21:43:43,378 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19491.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:43:49,040 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-05 21:44:01,025 INFO [train.py:901] (2/4) Epoch 3, batch 3350, loss[loss=0.3586, simple_loss=0.4161, pruned_loss=0.1505, over 8261.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3838, pruned_loss=0.1462, over 1611208.03 frames. ], batch size: 24, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:44:01,692 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 3.690e+02 4.650e+02 5.581e+02 1.223e+03, threshold=9.300e+02, percent-clipped=5.0 +2023-02-05 21:44:35,822 INFO [train.py:901] (2/4) Epoch 3, batch 3400, loss[loss=0.4033, simple_loss=0.4156, pruned_loss=0.1955, over 6832.00 frames. ], tot_loss[loss=0.3386, simple_loss=0.3845, pruned_loss=0.1463, over 1610675.78 frames. ], batch size: 71, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:02,613 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19606.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:45:09,816 INFO [train.py:901] (2/4) Epoch 3, batch 3450, loss[loss=0.2712, simple_loss=0.3261, pruned_loss=0.1081, over 8245.00 frames. ], tot_loss[loss=0.3378, simple_loss=0.3835, pruned_loss=0.146, over 1609872.12 frames. ], batch size: 22, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:10,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.543e+02 3.801e+02 4.733e+02 6.108e+02 1.526e+03, threshold=9.466e+02, percent-clipped=4.0 +2023-02-05 21:45:23,376 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19636.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:30,105 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7523, 1.5719, 2.5657, 1.3934, 2.0985, 2.7991, 2.6509, 2.5477], + device='cuda:2'), covar=tensor([0.0959, 0.1231, 0.0693, 0.1557, 0.0822, 0.0293, 0.0349, 0.0497], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0262, 0.0200, 0.0260, 0.0205, 0.0171, 0.0174, 0.0245], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 21:45:42,898 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19663.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:45,374 INFO [train.py:901] (2/4) Epoch 3, batch 3500, loss[loss=0.3256, simple_loss=0.3753, pruned_loss=0.138, over 8133.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.3848, pruned_loss=0.1475, over 1608377.60 frames. ], batch size: 22, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:45:58,035 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 21:45:59,527 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:00,151 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0629, 1.1469, 3.1688, 0.9666, 2.7229, 2.6771, 2.7883, 2.7351], + device='cuda:2'), covar=tensor([0.0449, 0.2778, 0.0393, 0.1840, 0.1146, 0.0580, 0.0457, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0412, 0.0285, 0.0315, 0.0385, 0.0309, 0.0301, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 21:46:00,220 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1699, 1.5868, 1.3092, 1.7406, 1.3344, 1.0608, 1.2972, 1.5346], + device='cuda:2'), covar=tensor([0.0947, 0.0504, 0.1006, 0.0504, 0.0714, 0.1142, 0.0792, 0.0665], + device='cuda:2'), in_proj_covar=tensor([0.0363, 0.0246, 0.0335, 0.0311, 0.0339, 0.0317, 0.0351, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 21:46:07,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3383, 1.6330, 1.2678, 1.9261, 0.8890, 1.0991, 1.3655, 1.6122], + device='cuda:2'), covar=tensor([0.1388, 0.1153, 0.1663, 0.0755, 0.1760, 0.2491, 0.1505, 0.1180], + device='cuda:2'), in_proj_covar=tensor([0.0299, 0.0304, 0.0309, 0.0233, 0.0291, 0.0313, 0.0330, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:46:15,476 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.8922, 1.1231, 0.8433, 1.3087, 0.6747, 0.6883, 0.9340, 1.1539], + device='cuda:2'), covar=tensor([0.0995, 0.0943, 0.1313, 0.0598, 0.1309, 0.1751, 0.1055, 0.0803], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0304, 0.0310, 0.0233, 0.0293, 0.0313, 0.0331, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:46:19,299 INFO [train.py:901] (2/4) Epoch 3, batch 3550, loss[loss=0.3137, simple_loss=0.3742, pruned_loss=0.1266, over 8245.00 frames. ], tot_loss[loss=0.3425, simple_loss=0.387, pruned_loss=0.149, over 1609757.30 frames. ], batch size: 24, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:46:19,960 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.944e+02 3.514e+02 4.193e+02 5.166e+02 1.109e+03, threshold=8.387e+02, percent-clipped=2.0 +2023-02-05 21:46:37,151 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6920, 2.2593, 4.4685, 1.0902, 2.8096, 2.1683, 1.6314, 2.3394], + device='cuda:2'), covar=tensor([0.1277, 0.1362, 0.0469, 0.2465, 0.1182, 0.1806, 0.1167, 0.1982], + device='cuda:2'), in_proj_covar=tensor([0.0431, 0.0401, 0.0476, 0.0488, 0.0540, 0.0477, 0.0423, 0.0535], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:46:40,592 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5656, 1.8653, 2.2015, 1.5760, 0.9741, 1.9172, 0.3250, 1.3160], + device='cuda:2'), covar=tensor([0.2569, 0.1547, 0.0857, 0.1867, 0.4911, 0.0668, 0.4823, 0.1773], + device='cuda:2'), in_proj_covar=tensor([0.0103, 0.0090, 0.0078, 0.0145, 0.0159, 0.0075, 0.0144, 0.0104], + device='cuda:2'), out_proj_covar=tensor([1.2990e-04, 1.1744e-04, 1.0077e-04, 1.7279e-04, 1.8760e-04, 9.8662e-05, + 1.7485e-04, 1.3789e-04], device='cuda:2') +2023-02-05 21:46:46,364 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19756.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:54,372 INFO [train.py:901] (2/4) Epoch 3, batch 3600, loss[loss=0.3619, simple_loss=0.4033, pruned_loss=0.1603, over 8531.00 frames. ], tot_loss[loss=0.3397, simple_loss=0.3849, pruned_loss=0.1472, over 1607624.84 frames. ], batch size: 49, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:47:28,224 INFO [train.py:901] (2/4) Epoch 3, batch 3650, loss[loss=0.316, simple_loss=0.3794, pruned_loss=0.1263, over 8034.00 frames. ], tot_loss[loss=0.3392, simple_loss=0.3848, pruned_loss=0.1468, over 1608813.39 frames. ], batch size: 22, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:47:28,895 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 3.610e+02 4.497e+02 5.952e+02 1.837e+03, threshold=8.994e+02, percent-clipped=7.0 +2023-02-05 21:47:58,279 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3544, 1.6158, 1.9192, 1.3240, 0.7912, 1.8140, 0.2444, 1.2384], + device='cuda:2'), covar=tensor([0.2707, 0.1262, 0.0855, 0.1956, 0.3951, 0.0670, 0.5212, 0.1821], + device='cuda:2'), in_proj_covar=tensor([0.0107, 0.0093, 0.0078, 0.0150, 0.0166, 0.0077, 0.0151, 0.0106], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:47:58,726 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 21:47:59,619 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19862.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:48:02,764 INFO [train.py:901] (2/4) Epoch 3, batch 3700, loss[loss=0.395, simple_loss=0.433, pruned_loss=0.1786, over 8361.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3839, pruned_loss=0.1462, over 1608072.16 frames. ], batch size: 26, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:05,648 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19871.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:48:14,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5023, 3.5649, 3.0770, 1.6496, 3.0438, 2.9820, 3.2694, 2.5585], + device='cuda:2'), covar=tensor([0.0929, 0.0703, 0.1008, 0.4018, 0.0670, 0.0769, 0.1137, 0.0868], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0241, 0.0283, 0.0367, 0.0257, 0.0213, 0.0263, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 21:48:17,451 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19887.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:48:37,847 INFO [train.py:901] (2/4) Epoch 3, batch 3750, loss[loss=0.3237, simple_loss=0.3461, pruned_loss=0.1507, over 7643.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3831, pruned_loss=0.1457, over 1606832.03 frames. ], batch size: 19, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:38,369 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 3.342e+02 4.116e+02 5.480e+02 1.463e+03, threshold=8.233e+02, percent-clipped=1.0 +2023-02-05 21:48:45,548 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.57 vs. limit=5.0 +2023-02-05 21:48:52,341 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 21:49:12,027 INFO [train.py:901] (2/4) Epoch 3, batch 3800, loss[loss=0.3784, simple_loss=0.4258, pruned_loss=0.1655, over 8517.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3842, pruned_loss=0.1464, over 1607345.85 frames. ], batch size: 28, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:49:20,776 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19980.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:49:29,965 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-05 21:49:48,451 INFO [train.py:901] (2/4) Epoch 3, batch 3850, loss[loss=0.3475, simple_loss=0.3941, pruned_loss=0.1504, over 8130.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3852, pruned_loss=0.1464, over 1614089.66 frames. ], batch size: 22, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:49:49,086 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.536e+02 4.444e+02 5.257e+02 1.055e+03, threshold=8.889e+02, percent-clipped=4.0 +2023-02-05 21:49:57,544 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 21:50:01,547 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 21:50:01,930 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 21:50:02,728 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20038.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:19,034 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-05 21:50:22,596 INFO [train.py:901] (2/4) Epoch 3, batch 3900, loss[loss=0.2579, simple_loss=0.3199, pruned_loss=0.09796, over 7819.00 frames. ], tot_loss[loss=0.3402, simple_loss=0.3864, pruned_loss=0.147, over 1620042.64 frames. ], batch size: 20, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:41,536 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:50,429 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20107.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:51,159 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3450, 1.6520, 2.0659, 1.7607, 0.8249, 1.8786, 0.2949, 1.4239], + device='cuda:2'), covar=tensor([0.3024, 0.1566, 0.1114, 0.1647, 0.4366, 0.0740, 0.5983, 0.2115], + device='cuda:2'), in_proj_covar=tensor([0.0108, 0.0093, 0.0081, 0.0142, 0.0164, 0.0075, 0.0150, 0.0108], + device='cuda:2'), out_proj_covar=tensor([1.3734e-04, 1.2106e-04, 1.0481e-04, 1.7228e-04, 1.9372e-04, 9.8992e-05, + 1.8253e-04, 1.4211e-04], device='cuda:2') +2023-02-05 21:50:56,844 INFO [train.py:901] (2/4) Epoch 3, batch 3950, loss[loss=0.3549, simple_loss=0.4058, pruned_loss=0.152, over 8502.00 frames. ], tot_loss[loss=0.3388, simple_loss=0.3854, pruned_loss=0.1461, over 1619394.44 frames. ], batch size: 28, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:57,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.492e+02 4.461e+02 6.032e+02 1.371e+03, threshold=8.922e+02, percent-clipped=4.0 +2023-02-05 21:51:05,052 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:06,976 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20130.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:21,460 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20152.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:31,209 INFO [train.py:901] (2/4) Epoch 3, batch 4000, loss[loss=0.3048, simple_loss=0.3461, pruned_loss=0.1317, over 7570.00 frames. ], tot_loss[loss=0.3365, simple_loss=0.3838, pruned_loss=0.1447, over 1618387.39 frames. ], batch size: 18, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:52:05,183 INFO [train.py:901] (2/4) Epoch 3, batch 4050, loss[loss=0.2823, simple_loss=0.3432, pruned_loss=0.1107, over 7802.00 frames. ], tot_loss[loss=0.3349, simple_loss=0.3828, pruned_loss=0.1435, over 1621755.18 frames. ], batch size: 20, lr: 2.28e-02, grad_scale: 16.0 +2023-02-05 21:52:05,854 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.226e+02 3.505e+02 4.242e+02 5.307e+02 1.364e+03, threshold=8.485e+02, percent-clipped=4.0 +2023-02-05 21:52:09,350 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20222.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:52:40,358 INFO [train.py:901] (2/4) Epoch 3, batch 4100, loss[loss=0.2805, simple_loss=0.3272, pruned_loss=0.1169, over 7263.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.3826, pruned_loss=0.1439, over 1617262.76 frames. ], batch size: 16, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:52:41,887 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0236, 1.2181, 3.1803, 0.9257, 2.5602, 2.6644, 2.7837, 2.7506], + device='cuda:2'), covar=tensor([0.0495, 0.2911, 0.0438, 0.2040, 0.1298, 0.0560, 0.0541, 0.0713], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0420, 0.0299, 0.0327, 0.0397, 0.0308, 0.0306, 0.0340], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 21:53:14,414 INFO [train.py:901] (2/4) Epoch 3, batch 4150, loss[loss=0.4247, simple_loss=0.4506, pruned_loss=0.1995, over 8562.00 frames. ], tot_loss[loss=0.3339, simple_loss=0.3818, pruned_loss=0.143, over 1617210.17 frames. ], batch size: 49, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:53:15,797 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.167e+02 3.849e+02 4.660e+02 5.932e+02 1.097e+03, threshold=9.320e+02, percent-clipped=6.0 +2023-02-05 21:53:38,373 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20351.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:53:49,990 INFO [train.py:901] (2/4) Epoch 3, batch 4200, loss[loss=0.3059, simple_loss=0.3678, pruned_loss=0.122, over 8285.00 frames. ], tot_loss[loss=0.3341, simple_loss=0.3821, pruned_loss=0.143, over 1619214.69 frames. ], batch size: 23, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:53:55,432 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 21:53:56,307 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20376.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:00,118 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,269 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20406.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,803 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 21:54:21,047 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-05 21:54:24,063 INFO [train.py:901] (2/4) Epoch 3, batch 4250, loss[loss=0.3482, simple_loss=0.3973, pruned_loss=0.1495, over 8296.00 frames. ], tot_loss[loss=0.3362, simple_loss=0.3831, pruned_loss=0.1446, over 1614946.27 frames. ], batch size: 23, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:54:25,370 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.360e+02 3.627e+02 5.036e+02 6.332e+02 1.636e+03, threshold=1.007e+03, percent-clipped=4.0 +2023-02-05 21:54:37,009 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20436.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:47,726 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20451.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:49,942 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6510, 1.9345, 3.4879, 1.1631, 1.9959, 1.8440, 1.7194, 1.8500], + device='cuda:2'), covar=tensor([0.1255, 0.1416, 0.0454, 0.2315, 0.1248, 0.1866, 0.1033, 0.1819], + device='cuda:2'), in_proj_covar=tensor([0.0440, 0.0404, 0.0479, 0.0496, 0.0539, 0.0482, 0.0426, 0.0541], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:54:59,263 INFO [train.py:901] (2/4) Epoch 3, batch 4300, loss[loss=0.3592, simple_loss=0.4177, pruned_loss=0.1503, over 8788.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3834, pruned_loss=0.1444, over 1615971.31 frames. ], batch size: 30, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:04,853 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20474.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:20,176 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:33,531 INFO [train.py:901] (2/4) Epoch 3, batch 4350, loss[loss=0.3109, simple_loss=0.3713, pruned_loss=0.1253, over 8255.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3825, pruned_loss=0.1439, over 1611032.93 frames. ], batch size: 24, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:34,898 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.452e+02 4.356e+02 5.638e+02 1.577e+03, threshold=8.711e+02, percent-clipped=2.0 +2023-02-05 21:55:46,525 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 21:55:50,083 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=2.07 vs. limit=2.0 +2023-02-05 21:56:05,194 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 21:56:06,880 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:06,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:07,507 INFO [train.py:901] (2/4) Epoch 3, batch 4400, loss[loss=0.3036, simple_loss=0.3534, pruned_loss=0.1269, over 8084.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.3821, pruned_loss=0.1442, over 1609299.96 frames. ], batch size: 21, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:24,130 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20589.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:27,495 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 21:56:36,783 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:36,819 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1403, 2.0490, 1.4766, 1.2252, 1.6840, 1.5994, 2.2657, 2.1028], + device='cuda:2'), covar=tensor([0.0799, 0.1397, 0.2406, 0.1801, 0.0926, 0.1864, 0.0956, 0.0764], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0219, 0.0256, 0.0218, 0.0186, 0.0220, 0.0181, 0.0183], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 21:56:44,139 INFO [train.py:901] (2/4) Epoch 3, batch 4450, loss[loss=0.3012, simple_loss=0.3523, pruned_loss=0.1251, over 8092.00 frames. ], tot_loss[loss=0.3315, simple_loss=0.3793, pruned_loss=0.1418, over 1609318.72 frames. ], batch size: 21, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:45,438 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.404e+02 4.420e+02 6.069e+02 1.310e+03, threshold=8.839e+02, percent-clipped=8.0 +2023-02-05 21:57:06,677 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4110, 1.9798, 3.4903, 0.9611, 2.1373, 1.6743, 1.4803, 1.9100], + device='cuda:2'), covar=tensor([0.1375, 0.1499, 0.0603, 0.2591, 0.1438, 0.2037, 0.1209, 0.2067], + device='cuda:2'), in_proj_covar=tensor([0.0436, 0.0408, 0.0478, 0.0494, 0.0545, 0.0480, 0.0424, 0.0543], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:57:18,492 INFO [train.py:901] (2/4) Epoch 3, batch 4500, loss[loss=0.3138, simple_loss=0.3677, pruned_loss=0.1299, over 7974.00 frames. ], tot_loss[loss=0.3309, simple_loss=0.3791, pruned_loss=0.1414, over 1610526.01 frames. ], batch size: 21, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:20,854 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 21:57:27,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:57:46,864 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-05 21:57:53,170 INFO [train.py:901] (2/4) Epoch 3, batch 4550, loss[loss=0.3484, simple_loss=0.3912, pruned_loss=0.1528, over 8353.00 frames. ], tot_loss[loss=0.3301, simple_loss=0.3787, pruned_loss=0.1408, over 1610735.71 frames. ], batch size: 49, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:54,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.207e+02 3.483e+02 4.570e+02 6.300e+02 1.347e+03, threshold=9.139e+02, percent-clipped=2.0 +2023-02-05 21:58:14,828 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20750.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:17,080 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:26,243 INFO [train.py:901] (2/4) Epoch 3, batch 4600, loss[loss=0.3695, simple_loss=0.4175, pruned_loss=0.1608, over 8567.00 frames. ], tot_loss[loss=0.3312, simple_loss=0.3796, pruned_loss=0.1414, over 1614550.48 frames. ], batch size: 31, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:58:34,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:35,797 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20780.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:00,497 INFO [train.py:901] (2/4) Epoch 3, batch 4650, loss[loss=0.3351, simple_loss=0.3754, pruned_loss=0.1474, over 8043.00 frames. ], tot_loss[loss=0.3325, simple_loss=0.3809, pruned_loss=0.1421, over 1617293.29 frames. ], batch size: 22, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:02,525 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 3.299e+02 4.239e+02 5.426e+02 9.400e+02, threshold=8.478e+02, percent-clipped=1.0 +2023-02-05 21:59:04,810 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20822.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:21,398 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20845.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:22,738 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20847.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:34,642 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20865.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:35,793 INFO [train.py:901] (2/4) Epoch 3, batch 4700, loss[loss=0.2789, simple_loss=0.338, pruned_loss=0.1099, over 7509.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3811, pruned_loss=0.1425, over 1619807.93 frames. ], batch size: 18, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:37,911 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20870.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:39,888 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3838, 1.7393, 2.9394, 1.0633, 1.9513, 1.7706, 1.3909, 1.7748], + device='cuda:2'), covar=tensor([0.1315, 0.1450, 0.0531, 0.2520, 0.1185, 0.1989, 0.1288, 0.1648], + device='cuda:2'), in_proj_covar=tensor([0.0439, 0.0413, 0.0475, 0.0501, 0.0549, 0.0481, 0.0427, 0.0547], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 21:59:54,902 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20895.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:08,494 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1074, 3.8930, 2.0988, 1.9853, 2.8398, 1.9533, 2.5019, 2.8016], + device='cuda:2'), covar=tensor([0.1241, 0.0331, 0.0730, 0.0913, 0.0570, 0.0944, 0.1017, 0.0887], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0244, 0.0328, 0.0314, 0.0339, 0.0324, 0.0350, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 22:00:08,978 INFO [train.py:901] (2/4) Epoch 3, batch 4750, loss[loss=0.2954, simple_loss=0.3692, pruned_loss=0.1108, over 8435.00 frames. ], tot_loss[loss=0.3344, simple_loss=0.3821, pruned_loss=0.1433, over 1618413.50 frames. ], batch size: 29, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 22:00:10,297 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.634e+02 4.432e+02 5.821e+02 1.296e+03, threshold=8.863e+02, percent-clipped=5.0 +2023-02-05 22:00:23,262 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20937.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:24,393 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 22:00:26,467 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 22:00:32,658 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:41,459 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:44,560 INFO [train.py:901] (2/4) Epoch 3, batch 4800, loss[loss=0.3313, simple_loss=0.373, pruned_loss=0.1448, over 8089.00 frames. ], tot_loss[loss=0.3334, simple_loss=0.3816, pruned_loss=0.1426, over 1616135.75 frames. ], batch size: 21, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:15,904 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.34 vs. limit=5.0 +2023-02-05 22:01:18,149 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 22:01:18,803 INFO [train.py:901] (2/4) Epoch 3, batch 4850, loss[loss=0.461, simple_loss=0.4673, pruned_loss=0.2274, over 6752.00 frames. ], tot_loss[loss=0.3334, simple_loss=0.3814, pruned_loss=0.1427, over 1614291.05 frames. ], batch size: 72, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:20,192 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.459e+02 3.687e+02 4.412e+02 5.668e+02 1.155e+03, threshold=8.825e+02, percent-clipped=6.0 +2023-02-05 22:01:21,142 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3936, 1.5986, 1.6232, 0.7439, 1.5858, 1.2492, 0.2971, 1.6231], + device='cuda:2'), covar=tensor([0.0102, 0.0079, 0.0086, 0.0128, 0.0090, 0.0258, 0.0213, 0.0050], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0165, 0.0138, 0.0214, 0.0159, 0.0282, 0.0224, 0.0194], + device='cuda:2'), out_proj_covar=tensor([1.1142e-04, 7.8030e-05, 6.3686e-05, 9.7799e-05, 7.6079e-05, 1.4329e-04, + 1.0844e-04, 9.0591e-05], device='cuda:2') +2023-02-05 22:01:42,126 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4653, 2.1205, 2.3074, 1.6664, 1.1286, 2.1866, 0.3238, 1.5115], + device='cuda:2'), covar=tensor([0.2738, 0.1176, 0.0610, 0.1947, 0.3779, 0.0663, 0.4856, 0.1669], + device='cuda:2'), in_proj_covar=tensor([0.0107, 0.0101, 0.0080, 0.0147, 0.0164, 0.0081, 0.0146, 0.0109], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:01:51,979 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21065.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:01:53,048 INFO [train.py:901] (2/4) Epoch 3, batch 4900, loss[loss=0.4778, simple_loss=0.4809, pruned_loss=0.2373, over 8484.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3826, pruned_loss=0.1438, over 1615127.43 frames. ], batch size: 28, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:55,915 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:27,712 INFO [train.py:901] (2/4) Epoch 3, batch 4950, loss[loss=0.3551, simple_loss=0.4017, pruned_loss=0.1542, over 8539.00 frames. ], tot_loss[loss=0.3337, simple_loss=0.3816, pruned_loss=0.1429, over 1612729.58 frames. ], batch size: 28, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:02:29,094 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 3.569e+02 4.502e+02 6.229e+02 1.133e+03, threshold=9.004e+02, percent-clipped=2.0 +2023-02-05 22:02:30,685 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:41,327 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:48,064 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:51,482 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:01,832 INFO [train.py:901] (2/4) Epoch 3, batch 5000, loss[loss=0.2955, simple_loss=0.3465, pruned_loss=0.1222, over 7641.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3782, pruned_loss=0.1411, over 1608975.69 frames. ], batch size: 19, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:03,404 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-05 22:03:08,657 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:37,220 INFO [train.py:901] (2/4) Epoch 3, batch 5050, loss[loss=0.3646, simple_loss=0.4142, pruned_loss=0.1574, over 8437.00 frames. ], tot_loss[loss=0.3319, simple_loss=0.3791, pruned_loss=0.1423, over 1607764.11 frames. ], batch size: 27, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:38,541 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.043e+02 3.325e+02 4.224e+02 5.254e+02 1.187e+03, threshold=8.447e+02, percent-clipped=3.0 +2023-02-05 22:03:57,071 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 22:04:11,245 INFO [train.py:901] (2/4) Epoch 3, batch 5100, loss[loss=0.3111, simple_loss=0.3633, pruned_loss=0.1295, over 8520.00 frames. ], tot_loss[loss=0.3334, simple_loss=0.3806, pruned_loss=0.1431, over 1608505.82 frames. ], batch size: 28, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:16,072 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7332, 2.2070, 1.6596, 2.6919, 1.2699, 1.3770, 1.6646, 2.0983], + device='cuda:2'), covar=tensor([0.1297, 0.1293, 0.2044, 0.0503, 0.2124, 0.2627, 0.1972, 0.1415], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0308, 0.0303, 0.0229, 0.0293, 0.0316, 0.0329, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 22:04:46,361 INFO [train.py:901] (2/4) Epoch 3, batch 5150, loss[loss=0.4027, simple_loss=0.4357, pruned_loss=0.1848, over 8369.00 frames. ], tot_loss[loss=0.331, simple_loss=0.3787, pruned_loss=0.1416, over 1606350.73 frames. ], batch size: 24, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:47,675 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.142e+02 3.453e+02 4.061e+02 5.332e+02 1.278e+03, threshold=8.122e+02, percent-clipped=4.0 +2023-02-05 22:04:50,023 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21321.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:04:56,024 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21330.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,474 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,535 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:19,994 INFO [train.py:901] (2/4) Epoch 3, batch 5200, loss[loss=0.3906, simple_loss=0.4047, pruned_loss=0.1882, over 7929.00 frames. ], tot_loss[loss=0.3312, simple_loss=0.3792, pruned_loss=0.1416, over 1612731.99 frames. ], batch size: 20, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:21,044 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 22:05:52,848 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:54,862 INFO [train.py:901] (2/4) Epoch 3, batch 5250, loss[loss=0.3636, simple_loss=0.4013, pruned_loss=0.163, over 7795.00 frames. ], tot_loss[loss=0.3327, simple_loss=0.3804, pruned_loss=0.1425, over 1615463.36 frames. ], batch size: 20, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:54,877 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 22:05:56,261 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 3.353e+02 4.281e+02 5.765e+02 2.364e+03, threshold=8.563e+02, percent-clipped=11.0 +2023-02-05 22:06:30,401 INFO [train.py:901] (2/4) Epoch 3, batch 5300, loss[loss=0.3289, simple_loss=0.3889, pruned_loss=0.1344, over 8188.00 frames. ], tot_loss[loss=0.3323, simple_loss=0.3801, pruned_loss=0.1422, over 1613636.26 frames. ], batch size: 23, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:06:39,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:04,804 INFO [train.py:901] (2/4) Epoch 3, batch 5350, loss[loss=0.3187, simple_loss=0.3606, pruned_loss=0.1384, over 7970.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3798, pruned_loss=0.1417, over 1609394.43 frames. ], batch size: 21, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:07:06,084 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.338e+02 4.128e+02 5.460e+02 1.129e+03, threshold=8.255e+02, percent-clipped=3.0 +2023-02-05 22:07:13,626 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21529.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:38,553 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 22:07:40,164 INFO [train.py:901] (2/4) Epoch 3, batch 5400, loss[loss=0.3791, simple_loss=0.4218, pruned_loss=0.1682, over 8462.00 frames. ], tot_loss[loss=0.3311, simple_loss=0.3791, pruned_loss=0.1415, over 1604972.00 frames. ], batch size: 27, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:07:59,341 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21595.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:12,095 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21613.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:14,745 INFO [train.py:901] (2/4) Epoch 3, batch 5450, loss[loss=0.3465, simple_loss=0.3946, pruned_loss=0.1492, over 8509.00 frames. ], tot_loss[loss=0.3293, simple_loss=0.3777, pruned_loss=0.1404, over 1605349.87 frames. ], batch size: 26, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:16,069 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 3.746e+02 4.366e+02 5.874e+02 2.172e+03, threshold=8.732e+02, percent-clipped=6.0 +2023-02-05 22:08:24,279 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21631.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:27,054 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9280, 1.3509, 3.4463, 1.4628, 2.2749, 3.8013, 3.4113, 3.3295], + device='cuda:2'), covar=tensor([0.1024, 0.1541, 0.0326, 0.1811, 0.0683, 0.0236, 0.0374, 0.0493], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0262, 0.0200, 0.0255, 0.0207, 0.0181, 0.0180, 0.0252], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:08:41,814 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 22:08:49,812 INFO [train.py:901] (2/4) Epoch 3, batch 5500, loss[loss=0.3317, simple_loss=0.3829, pruned_loss=0.1402, over 8192.00 frames. ], tot_loss[loss=0.3312, simple_loss=0.3796, pruned_loss=0.1414, over 1605301.79 frames. ], batch size: 23, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:55,223 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21674.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:56,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.4468, 1.8858, 5.4293, 2.2466, 4.7463, 4.6477, 5.0146, 4.9252], + device='cuda:2'), covar=tensor([0.0313, 0.2871, 0.0232, 0.1488, 0.0855, 0.0372, 0.0282, 0.0338], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0427, 0.0308, 0.0339, 0.0399, 0.0322, 0.0309, 0.0349], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 22:09:05,908 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21690.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:09,887 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0577, 3.1287, 2.7353, 1.7332, 2.7083, 2.6298, 2.9022, 2.2736], + device='cuda:2'), covar=tensor([0.1008, 0.0729, 0.1009, 0.3466, 0.0757, 0.0888, 0.1369, 0.0858], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0240, 0.0288, 0.0370, 0.0266, 0.0215, 0.0265, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:09:23,546 INFO [train.py:901] (2/4) Epoch 3, batch 5550, loss[loss=0.3657, simple_loss=0.4112, pruned_loss=0.1601, over 8604.00 frames. ], tot_loss[loss=0.3323, simple_loss=0.381, pruned_loss=0.1418, over 1611475.70 frames. ], batch size: 31, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:09:23,780 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5998, 1.8739, 2.2486, 1.7490, 1.0346, 2.1141, 0.3488, 1.2970], + device='cuda:2'), covar=tensor([0.3193, 0.2389, 0.1248, 0.2110, 0.5373, 0.0916, 0.6385, 0.2223], + device='cuda:2'), in_proj_covar=tensor([0.0117, 0.0107, 0.0083, 0.0150, 0.0177, 0.0084, 0.0150, 0.0115], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:09:24,912 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 3.296e+02 4.063e+02 5.206e+02 8.291e+02, threshold=8.125e+02, percent-clipped=0.0 +2023-02-05 22:09:58,432 INFO [train.py:901] (2/4) Epoch 3, batch 5600, loss[loss=0.3642, simple_loss=0.4061, pruned_loss=0.1611, over 8716.00 frames. ], tot_loss[loss=0.3317, simple_loss=0.3815, pruned_loss=0.141, over 1615609.79 frames. ], batch size: 49, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:08,495 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:11,828 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21785.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:14,461 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21789.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:25,389 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:28,851 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21810.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:33,430 INFO [train.py:901] (2/4) Epoch 3, batch 5650, loss[loss=0.3039, simple_loss=0.3598, pruned_loss=0.124, over 8111.00 frames. ], tot_loss[loss=0.3322, simple_loss=0.3815, pruned_loss=0.1415, over 1617473.77 frames. ], batch size: 23, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:34,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.984e+02 3.614e+02 4.526e+02 5.980e+02 8.654e+02, threshold=9.051e+02, percent-clipped=4.0 +2023-02-05 22:10:45,265 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 22:10:56,879 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21851.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:02,289 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-05 22:11:07,115 INFO [train.py:901] (2/4) Epoch 3, batch 5700, loss[loss=0.3493, simple_loss=0.3789, pruned_loss=0.1599, over 8075.00 frames. ], tot_loss[loss=0.3338, simple_loss=0.3821, pruned_loss=0.1428, over 1615940.83 frames. ], batch size: 21, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:07,935 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:13,370 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:39,355 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-05 22:11:42,902 INFO [train.py:901] (2/4) Epoch 3, batch 5750, loss[loss=0.2374, simple_loss=0.3028, pruned_loss=0.086, over 7787.00 frames. ], tot_loss[loss=0.3322, simple_loss=0.3803, pruned_loss=0.142, over 1609793.14 frames. ], batch size: 19, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:44,221 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.339e+02 3.657e+02 4.422e+02 5.345e+02 1.248e+03, threshold=8.845e+02, percent-clipped=3.0 +2023-02-05 22:11:49,705 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 22:12:10,247 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21957.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:11,339 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 22:12:16,965 INFO [train.py:901] (2/4) Epoch 3, batch 5800, loss[loss=0.3548, simple_loss=0.3918, pruned_loss=0.1589, over 8655.00 frames. ], tot_loss[loss=0.332, simple_loss=0.3802, pruned_loss=0.1419, over 1609468.72 frames. ], batch size: 39, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:19,906 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1505, 1.3601, 2.3107, 1.0752, 2.2328, 2.4601, 2.2803, 2.0714], + device='cuda:2'), covar=tensor([0.1115, 0.1035, 0.0396, 0.1652, 0.0441, 0.0338, 0.0449, 0.0689], + device='cuda:2'), in_proj_covar=tensor([0.0228, 0.0254, 0.0190, 0.0247, 0.0199, 0.0174, 0.0170, 0.0246], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:12:22,435 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:25,859 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0111, 1.8529, 2.8476, 2.3532, 2.3038, 1.8671, 1.3838, 1.0088], + device='cuda:2'), covar=tensor([0.1092, 0.1001, 0.0239, 0.0456, 0.0450, 0.0534, 0.0687, 0.1040], + device='cuda:2'), in_proj_covar=tensor([0.0622, 0.0534, 0.0453, 0.0489, 0.0603, 0.0499, 0.0509, 0.0517], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:12:30,996 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21988.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:52,162 INFO [train.py:901] (2/4) Epoch 3, batch 5850, loss[loss=0.2978, simple_loss=0.3628, pruned_loss=0.1164, over 8501.00 frames. ], tot_loss[loss=0.3311, simple_loss=0.3798, pruned_loss=0.1412, over 1613734.28 frames. ], batch size: 28, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:53,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.662e+02 4.461e+02 5.594e+02 1.608e+03, threshold=8.923e+02, percent-clipped=8.0 +2023-02-05 22:13:11,817 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:22,187 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:25,941 INFO [train.py:901] (2/4) Epoch 3, batch 5900, loss[loss=0.4242, simple_loss=0.4411, pruned_loss=0.2036, over 7417.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.3786, pruned_loss=0.1406, over 1608007.15 frames. ], batch size: 72, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:13:28,808 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:30,183 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22072.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:39,565 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22086.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:42,233 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:57,695 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3333, 1.5881, 1.4892, 1.2226, 1.5759, 1.4306, 1.6816, 1.6560], + device='cuda:2'), covar=tensor([0.0646, 0.1345, 0.1979, 0.1616, 0.0764, 0.1741, 0.0870, 0.0677], + device='cuda:2'), in_proj_covar=tensor([0.0181, 0.0216, 0.0256, 0.0216, 0.0184, 0.0218, 0.0179, 0.0180], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:14:00,338 INFO [train.py:901] (2/4) Epoch 3, batch 5950, loss[loss=0.3197, simple_loss=0.3712, pruned_loss=0.1341, over 8500.00 frames. ], tot_loss[loss=0.3288, simple_loss=0.3777, pruned_loss=0.14, over 1607219.34 frames. ], batch size: 26, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:02,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.045e+02 3.353e+02 4.485e+02 5.691e+02 1.558e+03, threshold=8.970e+02, percent-clipped=6.0 +2023-02-05 22:14:07,180 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:23,954 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22148.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:36,950 INFO [train.py:901] (2/4) Epoch 3, batch 6000, loss[loss=0.3636, simple_loss=0.409, pruned_loss=0.159, over 8505.00 frames. ], tot_loss[loss=0.3283, simple_loss=0.3776, pruned_loss=0.1395, over 1611060.81 frames. ], batch size: 39, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:36,951 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 22:14:49,936 INFO [train.py:935] (2/4) Epoch 3, validation: loss=0.2472, simple_loss=0.3383, pruned_loss=0.07805, over 944034.00 frames. +2023-02-05 22:14:49,937 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 22:15:08,336 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22194.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:12,103 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-05 22:15:21,642 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:25,116 INFO [train.py:901] (2/4) Epoch 3, batch 6050, loss[loss=0.2928, simple_loss=0.3504, pruned_loss=0.1176, over 7241.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3786, pruned_loss=0.1407, over 1616588.32 frames. ], batch size: 16, lr: 2.18e-02, grad_scale: 8.0 +2023-02-05 22:15:26,473 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.554e+02 3.417e+02 4.364e+02 5.364e+02 3.571e+03, threshold=8.727e+02, percent-clipped=6.0 +2023-02-05 22:15:33,728 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-05 22:15:36,171 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22233.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:40,935 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:51,050 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22255.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:59,773 INFO [train.py:901] (2/4) Epoch 3, batch 6100, loss[loss=0.3227, simple_loss=0.3831, pruned_loss=0.1311, over 8513.00 frames. ], tot_loss[loss=0.3297, simple_loss=0.3788, pruned_loss=0.1403, over 1616655.71 frames. ], batch size: 26, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:18,445 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 22:16:35,144 INFO [train.py:901] (2/4) Epoch 3, batch 6150, loss[loss=0.3623, simple_loss=0.3982, pruned_loss=0.1632, over 8492.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3788, pruned_loss=0.1409, over 1616512.68 frames. ], batch size: 26, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:36,466 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.615e+02 4.380e+02 5.688e+02 1.525e+03, threshold=8.759e+02, percent-clipped=2.0 +2023-02-05 22:16:41,827 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:42,487 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22328.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:45,079 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22332.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:54,538 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:59,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22353.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:04,952 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 22:17:08,710 INFO [train.py:901] (2/4) Epoch 3, batch 6200, loss[loss=0.3329, simple_loss=0.3797, pruned_loss=0.143, over 8070.00 frames. ], tot_loss[loss=0.3293, simple_loss=0.3782, pruned_loss=0.1402, over 1612090.02 frames. ], batch size: 21, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:17:09,125 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-05 22:17:11,706 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22371.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:19,684 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9125, 0.9831, 1.0714, 0.8872, 0.6535, 1.1082, 0.0551, 0.7855], + device='cuda:2'), covar=tensor([0.1999, 0.1657, 0.1063, 0.1539, 0.3641, 0.0633, 0.4170, 0.1910], + device='cuda:2'), in_proj_covar=tensor([0.0115, 0.0108, 0.0079, 0.0150, 0.0169, 0.0075, 0.0144, 0.0115], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:17:27,294 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-05 22:17:34,529 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:40,520 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1411, 1.2964, 1.2314, 0.1980, 1.1813, 0.9631, 0.0788, 1.1711], + device='cuda:2'), covar=tensor([0.0088, 0.0068, 0.0069, 0.0152, 0.0079, 0.0237, 0.0192, 0.0077], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0159, 0.0139, 0.0213, 0.0157, 0.0283, 0.0226, 0.0188], + device='cuda:2'), out_proj_covar=tensor([1.0651e-04, 7.1615e-05, 6.2392e-05, 9.5690e-05, 7.3334e-05, 1.3844e-04, + 1.0564e-04, 8.4073e-05], device='cuda:2') +2023-02-05 22:17:44,417 INFO [train.py:901] (2/4) Epoch 3, batch 6250, loss[loss=0.2899, simple_loss=0.3395, pruned_loss=0.1202, over 7281.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3787, pruned_loss=0.1409, over 1610927.22 frames. ], batch size: 16, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:17:45,753 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.506e+02 4.308e+02 5.585e+02 1.214e+03, threshold=8.617e+02, percent-clipped=6.0 +2023-02-05 22:18:05,793 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:19,126 INFO [train.py:901] (2/4) Epoch 3, batch 6300, loss[loss=0.3837, simple_loss=0.4251, pruned_loss=0.1711, over 8259.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3788, pruned_loss=0.1408, over 1612973.57 frames. ], batch size: 24, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:20,234 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.39 vs. limit=5.0 +2023-02-05 22:18:25,230 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1957, 1.7925, 2.9274, 2.3707, 2.3231, 1.7709, 1.3844, 1.0706], + device='cuda:2'), covar=tensor([0.1045, 0.1072, 0.0243, 0.0484, 0.0470, 0.0594, 0.0755, 0.1124], + device='cuda:2'), in_proj_covar=tensor([0.0618, 0.0528, 0.0453, 0.0488, 0.0603, 0.0495, 0.0511, 0.0518], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:18:36,439 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22492.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:39,274 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7906, 2.0545, 1.5915, 1.4430, 1.8945, 1.8073, 2.3120, 2.3473], + device='cuda:2'), covar=tensor([0.0544, 0.1362, 0.1987, 0.1692, 0.0748, 0.1646, 0.0859, 0.0600], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0215, 0.0252, 0.0214, 0.0179, 0.0217, 0.0178, 0.0179], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:18:39,316 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:54,107 INFO [train.py:901] (2/4) Epoch 3, batch 6350, loss[loss=0.274, simple_loss=0.3286, pruned_loss=0.1097, over 7813.00 frames. ], tot_loss[loss=0.3288, simple_loss=0.3776, pruned_loss=0.14, over 1612513.71 frames. ], batch size: 20, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:55,438 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.104e+02 3.537e+02 4.368e+02 5.315e+02 1.494e+03, threshold=8.736e+02, percent-clipped=5.0 +2023-02-05 22:18:57,019 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:08,840 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22538.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:28,747 INFO [train.py:901] (2/4) Epoch 3, batch 6400, loss[loss=0.3433, simple_loss=0.4063, pruned_loss=0.1401, over 8024.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.3763, pruned_loss=0.1385, over 1612947.79 frames. ], batch size: 22, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:19:35,384 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22577.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:39,442 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:40,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7924, 2.6095, 4.6987, 1.1717, 2.6343, 2.4809, 1.8263, 2.3728], + device='cuda:2'), covar=tensor([0.1238, 0.1434, 0.0501, 0.2584, 0.1307, 0.1721, 0.1160, 0.2053], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0419, 0.0501, 0.0512, 0.0556, 0.0492, 0.0445, 0.0562], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:19:50,049 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22599.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:55,833 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22607.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:56,537 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22608.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:03,200 INFO [train.py:901] (2/4) Epoch 3, batch 6450, loss[loss=0.3801, simple_loss=0.4263, pruned_loss=0.167, over 8039.00 frames. ], tot_loss[loss=0.3266, simple_loss=0.3768, pruned_loss=0.1382, over 1618696.52 frames. ], batch size: 22, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:04,480 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 3.557e+02 4.436e+02 5.729e+02 1.082e+03, threshold=8.871e+02, percent-clipped=7.0 +2023-02-05 22:20:21,543 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-05 22:20:28,490 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:31,150 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22657.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:37,604 INFO [train.py:901] (2/4) Epoch 3, batch 6500, loss[loss=0.2758, simple_loss=0.3191, pruned_loss=0.1162, over 5524.00 frames. ], tot_loss[loss=0.3277, simple_loss=0.3779, pruned_loss=0.1388, over 1619915.37 frames. ], batch size: 12, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:37,825 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7094, 3.1728, 3.2262, 1.9698, 1.6360, 3.0202, 0.7633, 2.5907], + device='cuda:2'), covar=tensor([0.2591, 0.1891, 0.0754, 0.2732, 0.4903, 0.0793, 0.5960, 0.1371], + device='cuda:2'), in_proj_covar=tensor([0.0116, 0.0110, 0.0082, 0.0153, 0.0175, 0.0077, 0.0145, 0.0111], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:20:55,236 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:01,238 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4388, 2.1345, 2.0963, 0.8258, 2.0157, 1.4550, 0.5280, 1.6224], + device='cuda:2'), covar=tensor([0.0158, 0.0084, 0.0099, 0.0194, 0.0116, 0.0292, 0.0234, 0.0089], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0168, 0.0143, 0.0221, 0.0164, 0.0297, 0.0234, 0.0197], + device='cuda:2'), out_proj_covar=tensor([1.1229e-04, 7.5322e-05, 6.4148e-05, 9.8566e-05, 7.6339e-05, 1.4518e-04, + 1.0787e-04, 8.6885e-05], device='cuda:2') +2023-02-05 22:21:02,666 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:03,420 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.06 vs. limit=5.0 +2023-02-05 22:21:09,917 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22714.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:11,757 INFO [train.py:901] (2/4) Epoch 3, batch 6550, loss[loss=0.3895, simple_loss=0.4286, pruned_loss=0.1752, over 8496.00 frames. ], tot_loss[loss=0.3278, simple_loss=0.3777, pruned_loss=0.1389, over 1616235.53 frames. ], batch size: 26, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:21:13,164 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.155e+02 3.258e+02 3.883e+02 5.357e+02 1.264e+03, threshold=7.766e+02, percent-clipped=3.0 +2023-02-05 22:21:19,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:28,616 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 22:21:32,835 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22747.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:47,206 INFO [train.py:901] (2/4) Epoch 3, batch 6600, loss[loss=0.3811, simple_loss=0.4335, pruned_loss=0.1644, over 8475.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.377, pruned_loss=0.1382, over 1618275.15 frames. ], batch size: 25, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:21:47,903 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 22:22:19,031 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22812.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:22,347 INFO [train.py:901] (2/4) Epoch 3, batch 6650, loss[loss=0.2858, simple_loss=0.3384, pruned_loss=0.1166, over 7924.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.3774, pruned_loss=0.138, over 1619311.07 frames. ], batch size: 20, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:22:24,341 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.373e+02 3.456e+02 4.169e+02 5.335e+02 9.931e+02, threshold=8.339e+02, percent-clipped=8.0 +2023-02-05 22:22:34,130 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4527, 2.4714, 1.7365, 2.2196, 1.9535, 1.2751, 1.8645, 1.9867], + device='cuda:2'), covar=tensor([0.1049, 0.0401, 0.0896, 0.0571, 0.0674, 0.1206, 0.0906, 0.0630], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0230, 0.0323, 0.0313, 0.0327, 0.0313, 0.0337, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 22:22:40,066 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:41,434 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3942, 1.2686, 3.0124, 1.0501, 2.1952, 3.2957, 3.1550, 2.8219], + device='cuda:2'), covar=tensor([0.1280, 0.1492, 0.0371, 0.2083, 0.0656, 0.0238, 0.0383, 0.0605], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0262, 0.0199, 0.0260, 0.0206, 0.0178, 0.0177, 0.0249], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:22:53,739 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22862.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:54,496 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22863.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:57,072 INFO [train.py:901] (2/4) Epoch 3, batch 6700, loss[loss=0.2968, simple_loss=0.3652, pruned_loss=0.1142, over 8037.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.3751, pruned_loss=0.1368, over 1615906.40 frames. ], batch size: 22, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:04,028 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5608, 2.0624, 3.5429, 1.0630, 2.2440, 1.8028, 1.5223, 1.9593], + device='cuda:2'), covar=tensor([0.1203, 0.1524, 0.0526, 0.2573, 0.1288, 0.1974, 0.1248, 0.1964], + device='cuda:2'), in_proj_covar=tensor([0.0445, 0.0413, 0.0490, 0.0507, 0.0546, 0.0478, 0.0432, 0.0552], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:23:12,645 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:17,363 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7513, 2.1260, 1.6461, 2.6205, 1.0687, 1.4376, 1.8178, 2.2723], + device='cuda:2'), covar=tensor([0.1175, 0.1308, 0.1817, 0.0596, 0.1986, 0.2288, 0.1529, 0.1043], + device='cuda:2'), in_proj_covar=tensor([0.0299, 0.0304, 0.0300, 0.0227, 0.0272, 0.0310, 0.0313, 0.0293], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:2') +2023-02-05 22:23:26,901 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:32,721 INFO [train.py:901] (2/4) Epoch 3, batch 6750, loss[loss=0.3721, simple_loss=0.4144, pruned_loss=0.165, over 8533.00 frames. ], tot_loss[loss=0.3253, simple_loss=0.3758, pruned_loss=0.1374, over 1618634.50 frames. ], batch size: 49, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:34,753 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.597e+02 4.402e+02 5.483e+02 1.400e+03, threshold=8.804e+02, percent-clipped=7.0 +2023-02-05 22:23:44,514 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22934.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:53,624 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:05,896 INFO [train.py:901] (2/4) Epoch 3, batch 6800, loss[loss=0.3252, simple_loss=0.3737, pruned_loss=0.1383, over 7245.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3762, pruned_loss=0.1381, over 1613547.49 frames. ], batch size: 16, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:05,905 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 22:24:08,774 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22970.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:10,828 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:26,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22995.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:30,242 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23001.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:35,032 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4357, 2.1928, 1.4637, 1.8057, 1.9365, 1.1709, 1.5077, 1.9137], + device='cuda:2'), covar=tensor([0.0963, 0.0341, 0.0883, 0.0573, 0.0525, 0.1045, 0.0837, 0.0598], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0235, 0.0329, 0.0314, 0.0331, 0.0312, 0.0340, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 22:24:41,423 INFO [train.py:901] (2/4) Epoch 3, batch 6850, loss[loss=0.3383, simple_loss=0.4036, pruned_loss=0.1365, over 8457.00 frames. ], tot_loss[loss=0.3256, simple_loss=0.376, pruned_loss=0.1376, over 1615448.66 frames. ], batch size: 25, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:43,431 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.425e+02 4.505e+02 5.413e+02 1.323e+03, threshold=9.011e+02, percent-clipped=6.0 +2023-02-05 22:24:54,848 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 22:25:15,252 INFO [train.py:901] (2/4) Epoch 3, batch 6900, loss[loss=0.32, simple_loss=0.3811, pruned_loss=0.1295, over 8199.00 frames. ], tot_loss[loss=0.3284, simple_loss=0.378, pruned_loss=0.1394, over 1615182.86 frames. ], batch size: 23, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:50,201 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23116.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:50,684 INFO [train.py:901] (2/4) Epoch 3, batch 6950, loss[loss=0.2701, simple_loss=0.333, pruned_loss=0.1035, over 7920.00 frames. ], tot_loss[loss=0.3287, simple_loss=0.3782, pruned_loss=0.1395, over 1613917.13 frames. ], batch size: 20, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:51,596 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:52,722 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.525e+02 4.440e+02 6.025e+02 1.140e+03, threshold=8.880e+02, percent-clipped=3.0 +2023-02-05 22:25:57,666 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23126.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:02,144 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 22:26:09,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:14,266 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 22:26:18,629 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23156.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:26,188 INFO [train.py:901] (2/4) Epoch 3, batch 7000, loss[loss=0.2882, simple_loss=0.3544, pruned_loss=0.111, over 7964.00 frames. ], tot_loss[loss=0.3264, simple_loss=0.3765, pruned_loss=0.1381, over 1613900.79 frames. ], batch size: 21, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:26:33,858 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5121, 1.7964, 2.0553, 1.7497, 1.0059, 1.8909, 0.4232, 1.3197], + device='cuda:2'), covar=tensor([0.3294, 0.1750, 0.1005, 0.1363, 0.5172, 0.0810, 0.5118, 0.1826], + device='cuda:2'), in_proj_covar=tensor([0.0120, 0.0116, 0.0088, 0.0158, 0.0185, 0.0081, 0.0151, 0.0116], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:26:39,926 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:01,199 INFO [train.py:901] (2/4) Epoch 3, batch 7050, loss[loss=0.3497, simple_loss=0.391, pruned_loss=0.1542, over 8257.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3759, pruned_loss=0.1382, over 1610028.17 frames. ], batch size: 48, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:03,862 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 3.682e+02 4.488e+02 5.424e+02 1.788e+03, threshold=8.977e+02, percent-clipped=6.0 +2023-02-05 22:27:14,102 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23235.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:18,794 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3336, 1.4440, 4.4868, 1.8831, 3.7737, 3.6977, 3.9600, 3.9024], + device='cuda:2'), covar=tensor([0.0388, 0.3175, 0.0291, 0.1818, 0.1021, 0.0506, 0.0381, 0.0465], + device='cuda:2'), in_proj_covar=tensor([0.0263, 0.0429, 0.0321, 0.0340, 0.0405, 0.0334, 0.0316, 0.0356], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 22:27:22,741 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:36,329 INFO [train.py:901] (2/4) Epoch 3, batch 7100, loss[loss=0.3406, simple_loss=0.3901, pruned_loss=0.1455, over 8298.00 frames. ], tot_loss[loss=0.3257, simple_loss=0.3754, pruned_loss=0.138, over 1609888.31 frames. ], batch size: 23, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:39,094 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:59,149 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:28:08,849 INFO [train.py:901] (2/4) Epoch 3, batch 7150, loss[loss=0.3742, simple_loss=0.4144, pruned_loss=0.167, over 7971.00 frames. ], tot_loss[loss=0.3264, simple_loss=0.3757, pruned_loss=0.1385, over 1607928.34 frames. ], batch size: 21, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:10,871 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.845e+02 4.572e+02 5.960e+02 1.048e+03, threshold=9.143e+02, percent-clipped=2.0 +2023-02-05 22:28:43,301 INFO [train.py:901] (2/4) Epoch 3, batch 7200, loss[loss=0.3152, simple_loss=0.3556, pruned_loss=0.1373, over 7420.00 frames. ], tot_loss[loss=0.3241, simple_loss=0.3741, pruned_loss=0.1371, over 1608585.78 frames. ], batch size: 17, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:47,566 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23372.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:04,753 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:17,775 INFO [train.py:901] (2/4) Epoch 3, batch 7250, loss[loss=0.2975, simple_loss=0.3373, pruned_loss=0.1288, over 7250.00 frames. ], tot_loss[loss=0.3248, simple_loss=0.3743, pruned_loss=0.1376, over 1606714.98 frames. ], batch size: 16, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:20,311 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.518e+02 3.505e+02 4.323e+02 5.847e+02 9.851e+02, threshold=8.646e+02, percent-clipped=2.0 +2023-02-05 22:29:52,887 INFO [train.py:901] (2/4) Epoch 3, batch 7300, loss[loss=0.3481, simple_loss=0.3941, pruned_loss=0.1511, over 8252.00 frames. ], tot_loss[loss=0.3252, simple_loss=0.3749, pruned_loss=0.1378, over 1610457.10 frames. ], batch size: 49, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:55,076 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:02,050 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7357, 2.7862, 1.6830, 2.1452, 2.3297, 1.4994, 2.0740, 2.2517], + device='cuda:2'), covar=tensor([0.1281, 0.0432, 0.0967, 0.0676, 0.0579, 0.1139, 0.0829, 0.0735], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0236, 0.0324, 0.0318, 0.0339, 0.0319, 0.0341, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 22:30:08,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4040, 1.3897, 4.5357, 1.7156, 3.7150, 3.6726, 3.9828, 3.9384], + device='cuda:2'), covar=tensor([0.0388, 0.3341, 0.0254, 0.1973, 0.1158, 0.0493, 0.0389, 0.0565], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0439, 0.0321, 0.0349, 0.0417, 0.0346, 0.0320, 0.0368], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 22:30:14,541 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1491, 3.1375, 2.7675, 1.4301, 2.7535, 2.6483, 2.8770, 2.5307], + device='cuda:2'), covar=tensor([0.1273, 0.0800, 0.1161, 0.4543, 0.0904, 0.1212, 0.1516, 0.0994], + device='cuda:2'), in_proj_covar=tensor([0.0363, 0.0249, 0.0276, 0.0365, 0.0269, 0.0223, 0.0267, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:30:20,013 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4745, 1.6478, 1.4607, 1.2217, 1.4995, 1.3590, 1.8361, 1.7031], + device='cuda:2'), covar=tensor([0.0638, 0.1208, 0.1985, 0.1638, 0.0714, 0.1639, 0.0835, 0.0616], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0208, 0.0243, 0.0210, 0.0171, 0.0211, 0.0174, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:30:21,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23506.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:28,673 INFO [train.py:901] (2/4) Epoch 3, batch 7350, loss[loss=0.3204, simple_loss=0.3717, pruned_loss=0.1345, over 8237.00 frames. ], tot_loss[loss=0.325, simple_loss=0.3748, pruned_loss=0.1376, over 1611603.18 frames. ], batch size: 22, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:30:31,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.204e+02 3.295e+02 4.174e+02 5.897e+02 1.266e+03, threshold=8.348e+02, percent-clipped=6.0 +2023-02-05 22:30:35,723 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23527.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:45,675 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 22:30:52,325 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23552.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:56,354 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23558.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:03,073 INFO [train.py:901] (2/4) Epoch 3, batch 7400, loss[loss=0.3172, simple_loss=0.3624, pruned_loss=0.136, over 5952.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3764, pruned_loss=0.1389, over 1608166.45 frames. ], batch size: 13, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:03,948 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9639, 1.1986, 1.7757, 0.8506, 1.3700, 1.1639, 1.0446, 1.1818], + device='cuda:2'), covar=tensor([0.0942, 0.1120, 0.0399, 0.1802, 0.0765, 0.1393, 0.0943, 0.1167], + device='cuda:2'), in_proj_covar=tensor([0.0444, 0.0413, 0.0490, 0.0502, 0.0550, 0.0488, 0.0435, 0.0556], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:31:05,768 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 22:31:11,871 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:14,783 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:16,181 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23585.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:20,318 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:38,767 INFO [train.py:901] (2/4) Epoch 3, batch 7450, loss[loss=0.3401, simple_loss=0.4043, pruned_loss=0.1379, over 8479.00 frames. ], tot_loss[loss=0.3279, simple_loss=0.3765, pruned_loss=0.1397, over 1605341.28 frames. ], batch size: 27, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:41,482 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.560e+02 4.542e+02 5.434e+02 8.209e+02, threshold=9.083e+02, percent-clipped=0.0 +2023-02-05 22:31:44,206 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 22:32:11,863 INFO [train.py:901] (2/4) Epoch 3, batch 7500, loss[loss=0.3469, simple_loss=0.3996, pruned_loss=0.1471, over 8571.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.3774, pruned_loss=0.1399, over 1611750.49 frames. ], batch size: 31, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:23,287 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4554, 1.8849, 1.8400, 0.8191, 1.7982, 1.3744, 0.4665, 1.7838], + device='cuda:2'), covar=tensor([0.0122, 0.0078, 0.0087, 0.0143, 0.0110, 0.0230, 0.0194, 0.0057], + device='cuda:2'), in_proj_covar=tensor([0.0245, 0.0175, 0.0144, 0.0223, 0.0170, 0.0294, 0.0238, 0.0196], + device='cuda:2'), out_proj_covar=tensor([1.0878e-04, 7.6709e-05, 6.2373e-05, 9.6665e-05, 7.7073e-05, 1.3988e-04, + 1.0696e-04, 8.5106e-05], device='cuda:2') +2023-02-05 22:32:28,861 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 22:32:31,249 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23694.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:39,264 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:47,010 INFO [train.py:901] (2/4) Epoch 3, batch 7550, loss[loss=0.2915, simple_loss=0.3545, pruned_loss=0.1143, over 8243.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.378, pruned_loss=0.1396, over 1615639.04 frames. ], batch size: 22, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:49,788 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 3.573e+02 4.120e+02 5.568e+02 9.909e+02, threshold=8.240e+02, percent-clipped=1.0 +2023-02-05 22:33:21,016 INFO [train.py:901] (2/4) Epoch 3, batch 7600, loss[loss=0.3514, simple_loss=0.4081, pruned_loss=0.1474, over 8320.00 frames. ], tot_loss[loss=0.3283, simple_loss=0.3777, pruned_loss=0.1395, over 1611783.16 frames. ], batch size: 25, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:21,256 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4435, 1.8664, 2.1130, 1.0626, 1.9960, 1.3739, 0.7501, 1.7674], + device='cuda:2'), covar=tensor([0.0154, 0.0084, 0.0086, 0.0171, 0.0125, 0.0289, 0.0227, 0.0079], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0176, 0.0145, 0.0226, 0.0173, 0.0300, 0.0240, 0.0202], + device='cuda:2'), out_proj_covar=tensor([1.0904e-04, 7.7271e-05, 6.2765e-05, 9.7671e-05, 7.8611e-05, 1.4334e-04, + 1.0812e-04, 8.7947e-05], device='cuda:2') +2023-02-05 22:33:25,982 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 22:33:55,880 INFO [train.py:901] (2/4) Epoch 3, batch 7650, loss[loss=0.2751, simple_loss=0.3454, pruned_loss=0.1024, over 8462.00 frames. ], tot_loss[loss=0.3269, simple_loss=0.3763, pruned_loss=0.1388, over 1614282.62 frames. ], batch size: 25, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:58,394 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.349e+02 3.333e+02 4.379e+02 5.791e+02 1.321e+03, threshold=8.759e+02, percent-clipped=7.0 +2023-02-05 22:34:12,402 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 22:34:13,553 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23841.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:19,388 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23850.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:29,028 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 22:34:30,200 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:30,645 INFO [train.py:901] (2/4) Epoch 3, batch 7700, loss[loss=0.3377, simple_loss=0.3959, pruned_loss=0.1398, over 8512.00 frames. ], tot_loss[loss=0.3263, simple_loss=0.3757, pruned_loss=0.1385, over 1616919.03 frames. ], batch size: 26, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:34:44,965 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7431, 1.9660, 3.0255, 1.2792, 2.5285, 1.9708, 1.7835, 2.2123], + device='cuda:2'), covar=tensor([0.0970, 0.1261, 0.0434, 0.1952, 0.0756, 0.1397, 0.0912, 0.1320], + device='cuda:2'), in_proj_covar=tensor([0.0449, 0.0412, 0.0498, 0.0506, 0.0549, 0.0489, 0.0431, 0.0562], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:34:50,582 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 22:34:50,859 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 22:35:04,773 INFO [train.py:901] (2/4) Epoch 3, batch 7750, loss[loss=0.3372, simple_loss=0.3913, pruned_loss=0.1416, over 8509.00 frames. ], tot_loss[loss=0.3248, simple_loss=0.3747, pruned_loss=0.1375, over 1618350.54 frames. ], batch size: 26, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:08,100 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.458e+02 4.167e+02 5.729e+02 1.393e+03, threshold=8.335e+02, percent-clipped=8.0 +2023-02-05 22:35:27,666 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:37,131 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:39,048 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23965.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:40,188 INFO [train.py:901] (2/4) Epoch 3, batch 7800, loss[loss=0.3238, simple_loss=0.3586, pruned_loss=0.1445, over 8088.00 frames. ], tot_loss[loss=0.3238, simple_loss=0.3738, pruned_loss=0.1369, over 1615855.38 frames. ], batch size: 21, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:45,559 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:53,490 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:14,028 INFO [train.py:901] (2/4) Epoch 3, batch 7850, loss[loss=0.239, simple_loss=0.3065, pruned_loss=0.08577, over 7793.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.3746, pruned_loss=0.1371, over 1614619.40 frames. ], batch size: 19, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:36:16,553 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.230e+02 3.608e+02 4.565e+02 5.801e+02 1.089e+03, threshold=9.129e+02, percent-clipped=5.0 +2023-02-05 22:36:18,831 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3799, 1.9866, 3.3372, 2.6294, 2.6462, 2.0062, 1.3868, 1.2561], + device='cuda:2'), covar=tensor([0.1198, 0.1487, 0.0271, 0.0628, 0.0615, 0.0627, 0.0830, 0.1414], + device='cuda:2'), in_proj_covar=tensor([0.0637, 0.0556, 0.0463, 0.0519, 0.0630, 0.0512, 0.0529, 0.0536], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:36:39,257 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24055.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:47,366 INFO [train.py:901] (2/4) Epoch 3, batch 7900, loss[loss=0.4175, simple_loss=0.4433, pruned_loss=0.1958, over 6804.00 frames. ], tot_loss[loss=0.324, simple_loss=0.3744, pruned_loss=0.1368, over 1617189.51 frames. ], batch size: 71, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:20,406 INFO [train.py:901] (2/4) Epoch 3, batch 7950, loss[loss=0.3536, simple_loss=0.3945, pruned_loss=0.1563, over 8362.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.3749, pruned_loss=0.1369, over 1616148.48 frames. ], batch size: 24, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:23,166 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.080e+02 3.295e+02 4.369e+02 5.897e+02 2.335e+03, threshold=8.738e+02, percent-clipped=5.0 +2023-02-05 22:37:45,312 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-05 22:37:54,048 INFO [train.py:901] (2/4) Epoch 3, batch 8000, loss[loss=0.3301, simple_loss=0.3667, pruned_loss=0.1467, over 7452.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.3745, pruned_loss=0.137, over 1614930.49 frames. ], batch size: 17, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:54,334 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0922, 1.1614, 4.2038, 1.6394, 3.6567, 3.5261, 3.7254, 3.6168], + device='cuda:2'), covar=tensor([0.0313, 0.3215, 0.0313, 0.1968, 0.0944, 0.0518, 0.0395, 0.0500], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0439, 0.0334, 0.0356, 0.0425, 0.0347, 0.0333, 0.0372], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 22:37:59,711 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6729, 3.7470, 3.3094, 1.5643, 3.3547, 3.2449, 3.5037, 2.8711], + device='cuda:2'), covar=tensor([0.1031, 0.0654, 0.1019, 0.4208, 0.0693, 0.0767, 0.1172, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0248, 0.0290, 0.0366, 0.0273, 0.0234, 0.0263, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:38:02,053 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-05 22:38:25,113 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-05 22:38:27,963 INFO [train.py:901] (2/4) Epoch 3, batch 8050, loss[loss=0.4157, simple_loss=0.4356, pruned_loss=0.1979, over 6955.00 frames. ], tot_loss[loss=0.3238, simple_loss=0.3727, pruned_loss=0.1375, over 1597158.95 frames. ], batch size: 71, lr: 2.09e-02, grad_scale: 8.0 +2023-02-05 22:38:30,754 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.328e+02 4.149e+02 5.404e+02 3.135e+03, threshold=8.298e+02, percent-clipped=6.0 +2023-02-05 22:38:30,985 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:38:44,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1263, 2.7771, 3.9348, 3.2146, 3.0575, 2.2642, 1.4843, 1.9689], + device='cuda:2'), covar=tensor([0.0871, 0.1158, 0.0237, 0.0505, 0.0590, 0.0581, 0.0813, 0.1136], + device='cuda:2'), in_proj_covar=tensor([0.0630, 0.0549, 0.0468, 0.0516, 0.0622, 0.0510, 0.0523, 0.0528], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:38:48,118 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24246.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:39:03,944 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 22:39:07,722 INFO [train.py:901] (2/4) Epoch 4, batch 0, loss[loss=0.3011, simple_loss=0.3579, pruned_loss=0.1222, over 8026.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.3579, pruned_loss=0.1222, over 8026.00 frames. ], batch size: 22, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:39:07,722 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 22:39:18,719 INFO [train.py:935] (2/4) Epoch 4, validation: loss=0.2476, simple_loss=0.3384, pruned_loss=0.07836, over 944034.00 frames. +2023-02-05 22:39:18,719 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6564MB +2023-02-05 22:39:32,880 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6978, 3.6689, 3.2487, 1.7623, 3.1514, 3.0984, 3.4193, 2.9311], + device='cuda:2'), covar=tensor([0.1171, 0.0666, 0.1092, 0.5078, 0.0967, 0.1239, 0.1271, 0.1101], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0247, 0.0288, 0.0371, 0.0276, 0.0233, 0.0268, 0.0216], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:39:34,134 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 22:39:52,985 INFO [train.py:901] (2/4) Epoch 4, batch 50, loss[loss=0.2869, simple_loss=0.3583, pruned_loss=0.1077, over 8491.00 frames. ], tot_loss[loss=0.3231, simple_loss=0.375, pruned_loss=0.1356, over 365837.07 frames. ], batch size: 26, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:40:03,957 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4029, 1.7756, 3.2740, 2.6655, 2.5747, 1.8999, 1.3475, 1.2670], + device='cuda:2'), covar=tensor([0.1165, 0.1665, 0.0267, 0.0559, 0.0610, 0.0682, 0.0871, 0.1426], + device='cuda:2'), in_proj_covar=tensor([0.0631, 0.0555, 0.0470, 0.0516, 0.0634, 0.0513, 0.0532, 0.0535], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:40:07,592 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.017e+02 3.527e+02 4.250e+02 5.116e+02 9.987e+02, threshold=8.500e+02, percent-clipped=2.0 +2023-02-05 22:40:08,999 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 22:40:27,954 INFO [train.py:901] (2/4) Epoch 4, batch 100, loss[loss=0.379, simple_loss=0.4239, pruned_loss=0.1671, over 8360.00 frames. ], tot_loss[loss=0.3244, simple_loss=0.3738, pruned_loss=0.1375, over 637599.97 frames. ], batch size: 24, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:40:31,335 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 22:40:48,362 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9156, 1.3866, 2.9918, 1.5036, 2.5771, 2.5658, 2.7345, 2.7140], + device='cuda:2'), covar=tensor([0.0346, 0.2389, 0.0476, 0.1827, 0.0921, 0.0518, 0.0366, 0.0433], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0433, 0.0324, 0.0351, 0.0413, 0.0343, 0.0325, 0.0365], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 22:41:01,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24399.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:41:02,095 INFO [train.py:901] (2/4) Epoch 4, batch 150, loss[loss=0.2991, simple_loss=0.3557, pruned_loss=0.1213, over 8141.00 frames. ], tot_loss[loss=0.3218, simple_loss=0.372, pruned_loss=0.1358, over 854350.89 frames. ], batch size: 22, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:41:17,163 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.490e+02 4.203e+02 5.614e+02 1.653e+03, threshold=8.406e+02, percent-clipped=4.0 +2023-02-05 22:41:37,211 INFO [train.py:901] (2/4) Epoch 4, batch 200, loss[loss=0.364, simple_loss=0.4092, pruned_loss=0.1594, over 8019.00 frames. ], tot_loss[loss=0.3203, simple_loss=0.3713, pruned_loss=0.1346, over 1020946.82 frames. ], batch size: 22, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:02,656 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 22:42:03,947 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0130, 2.4003, 4.8065, 1.3269, 2.8339, 2.3523, 1.8669, 2.6899], + device='cuda:2'), covar=tensor([0.1118, 0.1592, 0.0454, 0.2452, 0.1175, 0.1857, 0.1147, 0.1888], + device='cuda:2'), in_proj_covar=tensor([0.0468, 0.0424, 0.0511, 0.0519, 0.0567, 0.0498, 0.0451, 0.0577], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:42:11,040 INFO [train.py:901] (2/4) Epoch 4, batch 250, loss[loss=0.3639, simple_loss=0.4059, pruned_loss=0.1609, over 8622.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3697, pruned_loss=0.1329, over 1153564.52 frames. ], batch size: 49, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:12,491 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4537, 1.5421, 4.2051, 2.0050, 2.4697, 4.7966, 4.6641, 4.0719], + device='cuda:2'), covar=tensor([0.1106, 0.1539, 0.0282, 0.1700, 0.0778, 0.0344, 0.0254, 0.0636], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0266, 0.0213, 0.0266, 0.0207, 0.0185, 0.0191, 0.0269], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:42:16,536 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5109, 2.0366, 1.3666, 2.6755, 1.3118, 1.2232, 1.7503, 2.1948], + device='cuda:2'), covar=tensor([0.2021, 0.1707, 0.3222, 0.0535, 0.2113, 0.3184, 0.1848, 0.1252], + device='cuda:2'), in_proj_covar=tensor([0.0294, 0.0296, 0.0305, 0.0224, 0.0285, 0.0306, 0.0320, 0.0291], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:42:20,371 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24514.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:42:23,557 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 22:42:24,837 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.156e+02 3.531e+02 4.434e+02 5.277e+02 1.190e+03, threshold=8.868e+02, percent-clipped=4.0 +2023-02-05 22:42:31,623 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 22:42:46,001 INFO [train.py:901] (2/4) Epoch 4, batch 300, loss[loss=0.3167, simple_loss=0.381, pruned_loss=0.1262, over 8283.00 frames. ], tot_loss[loss=0.3196, simple_loss=0.3714, pruned_loss=0.134, over 1256769.72 frames. ], batch size: 23, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:57,003 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:12,327 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24587.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:16,435 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4696, 2.0942, 2.1202, 0.7628, 2.0289, 1.3109, 0.5689, 1.7895], + device='cuda:2'), covar=tensor([0.0161, 0.0061, 0.0060, 0.0161, 0.0102, 0.0259, 0.0229, 0.0068], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0175, 0.0143, 0.0225, 0.0170, 0.0302, 0.0239, 0.0206], + device='cuda:2'), out_proj_covar=tensor([1.1207e-04, 7.5289e-05, 6.1270e-05, 9.5059e-05, 7.5759e-05, 1.4152e-04, + 1.0535e-04, 8.8042e-05], device='cuda:2') +2023-02-05 22:43:21,552 INFO [train.py:901] (2/4) Epoch 4, batch 350, loss[loss=0.2605, simple_loss=0.3114, pruned_loss=0.1048, over 7915.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3698, pruned_loss=0.1323, over 1336906.19 frames. ], batch size: 20, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:43:31,764 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5210, 2.0137, 2.1217, 0.6280, 1.9808, 1.4557, 0.6019, 1.7378], + device='cuda:2'), covar=tensor([0.0161, 0.0060, 0.0057, 0.0163, 0.0111, 0.0237, 0.0212, 0.0079], + device='cuda:2'), in_proj_covar=tensor([0.0259, 0.0178, 0.0146, 0.0229, 0.0172, 0.0306, 0.0242, 0.0210], + device='cuda:2'), out_proj_covar=tensor([1.1364e-04, 7.6424e-05, 6.2785e-05, 9.6814e-05, 7.6290e-05, 1.4319e-04, + 1.0660e-04, 8.9545e-05], device='cuda:2') +2023-02-05 22:43:34,090 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 22:43:35,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 3.300e+02 4.421e+02 5.071e+02 1.044e+03, threshold=8.841e+02, percent-clipped=4.0 +2023-02-05 22:43:52,422 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 22:43:56,477 INFO [train.py:901] (2/4) Epoch 4, batch 400, loss[loss=0.3465, simple_loss=0.3954, pruned_loss=0.1488, over 8553.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.3708, pruned_loss=0.1328, over 1396852.00 frames. ], batch size: 39, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:04,951 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-05 22:44:30,026 INFO [train.py:901] (2/4) Epoch 4, batch 450, loss[loss=0.3246, simple_loss=0.3724, pruned_loss=0.1384, over 7930.00 frames. ], tot_loss[loss=0.3199, simple_loss=0.3726, pruned_loss=0.1336, over 1447405.54 frames. ], batch size: 20, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:44,808 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.414e+02 4.548e+02 5.600e+02 1.007e+03, threshold=9.096e+02, percent-clipped=5.0 +2023-02-05 22:45:04,969 INFO [train.py:901] (2/4) Epoch 4, batch 500, loss[loss=0.301, simple_loss=0.3566, pruned_loss=0.1227, over 8242.00 frames. ], tot_loss[loss=0.321, simple_loss=0.3737, pruned_loss=0.1341, over 1486425.30 frames. ], batch size: 22, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:19,918 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24770.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:28,219 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24783.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:36,834 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24795.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:40,053 INFO [train.py:901] (2/4) Epoch 4, batch 550, loss[loss=0.3306, simple_loss=0.382, pruned_loss=0.1397, over 8129.00 frames. ], tot_loss[loss=0.321, simple_loss=0.3743, pruned_loss=0.1338, over 1520617.20 frames. ], batch size: 22, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:50,772 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7140, 2.0395, 2.9213, 1.2130, 2.7887, 1.5980, 1.4149, 1.8563], + device='cuda:2'), covar=tensor([0.0231, 0.0102, 0.0086, 0.0186, 0.0110, 0.0275, 0.0240, 0.0125], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0174, 0.0142, 0.0222, 0.0167, 0.0300, 0.0235, 0.0207], + device='cuda:2'), out_proj_covar=tensor([1.1079e-04, 7.4644e-05, 6.0301e-05, 9.3264e-05, 7.3447e-05, 1.4024e-04, + 1.0275e-04, 8.7701e-05], device='cuda:2') +2023-02-05 22:45:53,858 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 3.369e+02 4.426e+02 5.591e+02 8.767e+02, threshold=8.852e+02, percent-clipped=0.0 +2023-02-05 22:46:08,701 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5704, 1.8408, 1.6031, 2.3752, 1.2623, 1.2209, 1.5462, 2.0627], + device='cuda:2'), covar=tensor([0.1257, 0.1411, 0.1787, 0.0609, 0.1848, 0.2683, 0.1917, 0.1015], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0299, 0.0309, 0.0227, 0.0278, 0.0308, 0.0323, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:46:13,959 INFO [train.py:901] (2/4) Epoch 4, batch 600, loss[loss=0.3192, simple_loss=0.3717, pruned_loss=0.1333, over 8447.00 frames. ], tot_loss[loss=0.3201, simple_loss=0.374, pruned_loss=0.1331, over 1547852.35 frames. ], batch size: 49, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:16,993 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 22:46:24,941 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:46:28,959 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 22:46:43,929 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9019, 2.3406, 1.8630, 2.7374, 1.6016, 1.3826, 1.7767, 2.3080], + device='cuda:2'), covar=tensor([0.1253, 0.1239, 0.1598, 0.0543, 0.1741, 0.2597, 0.1874, 0.1148], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0300, 0.0306, 0.0226, 0.0275, 0.0310, 0.0317, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:46:49,153 INFO [train.py:901] (2/4) Epoch 4, batch 650, loss[loss=0.3044, simple_loss=0.369, pruned_loss=0.1199, over 8475.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3717, pruned_loss=0.132, over 1560888.10 frames. ], batch size: 25, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:55,190 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:03,759 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 3.310e+02 4.230e+02 5.108e+02 1.167e+03, threshold=8.459e+02, percent-clipped=4.0 +2023-02-05 22:47:10,602 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:24,033 INFO [train.py:901] (2/4) Epoch 4, batch 700, loss[loss=0.3146, simple_loss=0.3764, pruned_loss=0.1264, over 8774.00 frames. ], tot_loss[loss=0.3188, simple_loss=0.3727, pruned_loss=0.1324, over 1576635.04 frames. ], batch size: 40, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:47:59,149 INFO [train.py:901] (2/4) Epoch 4, batch 750, loss[loss=0.3419, simple_loss=0.395, pruned_loss=0.1444, over 8658.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3714, pruned_loss=0.1313, over 1589679.47 frames. ], batch size: 34, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:08,254 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25013.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:13,342 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.175e+02 4.108e+02 5.247e+02 1.235e+03, threshold=8.217e+02, percent-clipped=4.0 +2023-02-05 22:48:14,035 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 22:48:15,517 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:22,616 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 22:48:30,669 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:33,225 INFO [train.py:901] (2/4) Epoch 4, batch 800, loss[loss=0.3102, simple_loss=0.3685, pruned_loss=0.1259, over 8462.00 frames. ], tot_loss[loss=0.3174, simple_loss=0.3712, pruned_loss=0.1318, over 1594933.26 frames. ], batch size: 27, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:50,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1825, 4.1975, 3.6835, 1.8535, 3.6939, 3.6124, 3.9127, 3.3450], + device='cuda:2'), covar=tensor([0.0654, 0.0469, 0.0931, 0.3969, 0.0699, 0.0820, 0.1066, 0.0798], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0243, 0.0289, 0.0372, 0.0280, 0.0228, 0.0263, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:49:01,544 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-05 22:49:06,962 INFO [train.py:901] (2/4) Epoch 4, batch 850, loss[loss=0.2899, simple_loss=0.3424, pruned_loss=0.1187, over 7448.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3702, pruned_loss=0.1319, over 1597413.04 frames. ], batch size: 17, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:49:11,901 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4012, 2.1070, 2.3053, 1.0462, 2.2790, 1.5868, 0.7790, 1.8753], + device='cuda:2'), covar=tensor([0.0174, 0.0062, 0.0057, 0.0145, 0.0099, 0.0215, 0.0213, 0.0082], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0176, 0.0143, 0.0220, 0.0168, 0.0297, 0.0240, 0.0208], + device='cuda:2'), out_proj_covar=tensor([1.0997e-04, 7.4906e-05, 6.0257e-05, 9.1788e-05, 7.3460e-05, 1.3828e-04, + 1.0417e-04, 8.7632e-05], device='cuda:2') +2023-02-05 22:49:22,453 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 3.301e+02 4.277e+02 5.478e+02 1.022e+03, threshold=8.554e+02, percent-clipped=4.0 +2023-02-05 22:49:26,600 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:49:28,074 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0664, 1.5213, 1.4254, 1.1412, 1.3559, 1.3016, 1.4335, 1.4933], + device='cuda:2'), covar=tensor([0.0753, 0.1473, 0.2231, 0.1765, 0.0796, 0.1966, 0.0944, 0.0711], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0204, 0.0247, 0.0206, 0.0167, 0.0212, 0.0171, 0.0175], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:49:42,455 INFO [train.py:901] (2/4) Epoch 4, batch 900, loss[loss=0.3238, simple_loss=0.3793, pruned_loss=0.1342, over 8645.00 frames. ], tot_loss[loss=0.317, simple_loss=0.3702, pruned_loss=0.1319, over 1601826.08 frames. ], batch size: 49, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:49:42,805 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-05 22:50:16,545 INFO [train.py:901] (2/4) Epoch 4, batch 950, loss[loss=0.3221, simple_loss=0.3669, pruned_loss=0.1386, over 8084.00 frames. ], tot_loss[loss=0.314, simple_loss=0.3675, pruned_loss=0.1302, over 1600090.65 frames. ], batch size: 21, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:23,515 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25210.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:28,346 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6427, 1.2599, 5.5178, 2.2324, 4.9657, 4.6779, 5.3139, 5.1450], + device='cuda:2'), covar=tensor([0.0236, 0.3847, 0.0245, 0.1890, 0.0813, 0.0474, 0.0242, 0.0339], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0439, 0.0338, 0.0353, 0.0413, 0.0353, 0.0322, 0.0370], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 22:50:30,879 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.215e+02 3.501e+02 4.488e+02 5.717e+02 1.063e+03, threshold=8.976e+02, percent-clipped=5.0 +2023-02-05 22:50:40,883 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 22:50:46,576 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25242.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:49,882 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4610, 1.3992, 4.6132, 1.8823, 3.9620, 3.7771, 4.1587, 4.0733], + device='cuda:2'), covar=tensor([0.0378, 0.3784, 0.0308, 0.2168, 0.1044, 0.0586, 0.0392, 0.0482], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0442, 0.0341, 0.0355, 0.0413, 0.0355, 0.0326, 0.0372], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 22:50:51,550 INFO [train.py:901] (2/4) Epoch 4, batch 1000, loss[loss=0.3901, simple_loss=0.4213, pruned_loss=0.1794, over 8319.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3692, pruned_loss=0.1313, over 1607376.30 frames. ], batch size: 25, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:12,236 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25280.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:13,325 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 22:51:25,837 INFO [train.py:901] (2/4) Epoch 4, batch 1050, loss[loss=0.3166, simple_loss=0.3779, pruned_loss=0.1276, over 8310.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.3695, pruned_loss=0.1318, over 1605759.63 frames. ], batch size: 25, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:26,415 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 22:51:27,148 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:28,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:31,081 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-05 22:51:39,506 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 3.519e+02 4.399e+02 5.664e+02 1.146e+03, threshold=8.797e+02, percent-clipped=2.0 +2023-02-05 22:51:41,049 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2445, 1.5371, 1.4543, 1.1264, 1.5807, 1.3031, 1.6224, 1.5339], + device='cuda:2'), covar=tensor([0.0688, 0.1210, 0.2017, 0.1565, 0.0704, 0.1736, 0.0945, 0.0667], + device='cuda:2'), in_proj_covar=tensor([0.0166, 0.0202, 0.0243, 0.0202, 0.0164, 0.0209, 0.0171, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:51:42,398 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25325.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:43,771 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:53,723 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7881, 2.1036, 1.8467, 2.7739, 1.3718, 1.2673, 1.9082, 2.2976], + device='cuda:2'), covar=tensor([0.1104, 0.1238, 0.1460, 0.0362, 0.1523, 0.2184, 0.1339, 0.1012], + device='cuda:2'), in_proj_covar=tensor([0.0288, 0.0299, 0.0307, 0.0221, 0.0277, 0.0309, 0.0315, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 22:51:58,863 INFO [train.py:901] (2/4) Epoch 4, batch 1100, loss[loss=0.2619, simple_loss=0.3177, pruned_loss=0.103, over 7526.00 frames. ], tot_loss[loss=0.3185, simple_loss=0.3714, pruned_loss=0.1328, over 1612306.99 frames. ], batch size: 18, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:52:05,139 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:52:09,895 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6566, 1.7205, 2.0226, 1.6430, 1.2801, 2.0393, 0.4642, 1.1095], + device='cuda:2'), covar=tensor([0.3578, 0.2670, 0.2064, 0.3845, 0.6284, 0.1603, 0.7435, 0.2982], + device='cuda:2'), in_proj_covar=tensor([0.0114, 0.0110, 0.0082, 0.0156, 0.0188, 0.0080, 0.0153, 0.0115], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:52:27,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5190, 4.6084, 3.9865, 1.8057, 3.9643, 3.9865, 4.2010, 3.5742], + device='cuda:2'), covar=tensor([0.0874, 0.0456, 0.0885, 0.4714, 0.0752, 0.0787, 0.1408, 0.0797], + device='cuda:2'), in_proj_covar=tensor([0.0369, 0.0249, 0.0292, 0.0382, 0.0292, 0.0232, 0.0272, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 22:52:34,557 INFO [train.py:901] (2/4) Epoch 4, batch 1150, loss[loss=0.3212, simple_loss=0.3736, pruned_loss=0.1344, over 8462.00 frames. ], tot_loss[loss=0.3182, simple_loss=0.371, pruned_loss=0.1327, over 1608618.05 frames. ], batch size: 49, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:52:37,395 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 22:52:49,211 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.195e+02 3.278e+02 3.972e+02 4.649e+02 8.065e+02, threshold=7.944e+02, percent-clipped=0.0 +2023-02-05 22:53:06,104 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25446.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:53:08,496 INFO [train.py:901] (2/4) Epoch 4, batch 1200, loss[loss=0.2584, simple_loss=0.3292, pruned_loss=0.09382, over 7799.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3703, pruned_loss=0.1325, over 1608664.76 frames. ], batch size: 19, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:23,226 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:36,619 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5596, 1.7998, 1.9332, 1.5071, 1.0104, 2.0597, 0.3926, 0.9168], + device='cuda:2'), covar=tensor([0.4158, 0.2623, 0.1488, 0.2860, 0.7601, 0.1078, 0.6953, 0.2991], + device='cuda:2'), in_proj_covar=tensor([0.0119, 0.0113, 0.0086, 0.0163, 0.0194, 0.0083, 0.0158, 0.0119], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 22:53:42,019 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:43,163 INFO [train.py:901] (2/4) Epoch 4, batch 1250, loss[loss=0.2776, simple_loss=0.327, pruned_loss=0.1141, over 7924.00 frames. ], tot_loss[loss=0.3176, simple_loss=0.3699, pruned_loss=0.1327, over 1611679.63 frames. ], batch size: 20, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:57,794 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 3.538e+02 4.328e+02 6.105e+02 1.271e+03, threshold=8.657e+02, percent-clipped=4.0 +2023-02-05 22:53:59,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25523.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:06,838 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 22:54:18,017 INFO [train.py:901] (2/4) Epoch 4, batch 1300, loss[loss=0.3877, simple_loss=0.4299, pruned_loss=0.1728, over 8540.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3698, pruned_loss=0.1324, over 1611551.30 frames. ], batch size: 31, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:39,386 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25581.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:53,160 INFO [train.py:901] (2/4) Epoch 4, batch 1350, loss[loss=0.3494, simple_loss=0.3972, pruned_loss=0.1508, over 8592.00 frames. ], tot_loss[loss=0.3179, simple_loss=0.371, pruned_loss=0.1324, over 1613095.85 frames. ], batch size: 31, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:57,488 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:55:08,861 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.283e+02 4.098e+02 5.393e+02 1.175e+03, threshold=8.196e+02, percent-clipped=3.0 +2023-02-05 22:55:28,873 INFO [train.py:901] (2/4) Epoch 4, batch 1400, loss[loss=0.3071, simple_loss=0.3619, pruned_loss=0.1261, over 8493.00 frames. ], tot_loss[loss=0.3173, simple_loss=0.3704, pruned_loss=0.1321, over 1614641.34 frames. ], batch size: 49, lr: 1.91e-02, grad_scale: 8.0 +2023-02-05 22:55:29,807 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2964, 1.8262, 1.8097, 0.5523, 1.8603, 1.2238, 0.4792, 1.5941], + device='cuda:2'), covar=tensor([0.0152, 0.0072, 0.0070, 0.0168, 0.0079, 0.0286, 0.0228, 0.0071], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0184, 0.0146, 0.0226, 0.0169, 0.0301, 0.0252, 0.0211], + device='cuda:2'), out_proj_covar=tensor([1.0935e-04, 7.7768e-05, 6.0936e-05, 9.3158e-05, 7.2259e-05, 1.3835e-04, + 1.0928e-04, 8.8565e-05], device='cuda:2') +2023-02-05 22:56:03,160 INFO [train.py:901] (2/4) Epoch 4, batch 1450, loss[loss=0.2987, simple_loss=0.3486, pruned_loss=0.1244, over 7788.00 frames. ], tot_loss[loss=0.3154, simple_loss=0.3687, pruned_loss=0.131, over 1613631.09 frames. ], batch size: 19, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:05,847 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 22:56:14,685 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 22:56:18,904 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.243e+02 3.964e+02 4.847e+02 1.034e+03, threshold=7.929e+02, percent-clipped=2.0 +2023-02-05 22:56:23,195 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:56:38,595 INFO [train.py:901] (2/4) Epoch 4, batch 1500, loss[loss=0.2873, simple_loss=0.35, pruned_loss=0.1123, over 8077.00 frames. ], tot_loss[loss=0.3153, simple_loss=0.3686, pruned_loss=0.131, over 1615469.80 frames. ], batch size: 21, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:40,765 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:57:05,989 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25790.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:57:12,574 INFO [train.py:901] (2/4) Epoch 4, batch 1550, loss[loss=0.2707, simple_loss=0.3416, pruned_loss=0.09994, over 8498.00 frames. ], tot_loss[loss=0.3139, simple_loss=0.3677, pruned_loss=0.13, over 1612763.09 frames. ], batch size: 28, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:57:20,794 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-05 22:57:27,006 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 3.100e+02 3.836e+02 5.066e+02 1.009e+03, threshold=7.672e+02, percent-clipped=5.0 +2023-02-05 22:57:46,740 INFO [train.py:901] (2/4) Epoch 4, batch 1600, loss[loss=0.3198, simple_loss=0.3676, pruned_loss=0.136, over 7821.00 frames. ], tot_loss[loss=0.3153, simple_loss=0.3688, pruned_loss=0.1309, over 1614571.71 frames. ], batch size: 20, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:04,753 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25876.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:58:21,019 INFO [train.py:901] (2/4) Epoch 4, batch 1650, loss[loss=0.3951, simple_loss=0.4226, pruned_loss=0.1838, over 7149.00 frames. ], tot_loss[loss=0.3149, simple_loss=0.3679, pruned_loss=0.1309, over 1606248.51 frames. ], batch size: 71, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:24,588 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25905.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:35,953 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.823e+02 4.768e+02 5.766e+02 1.707e+03, threshold=9.535e+02, percent-clipped=9.0 +2023-02-05 22:58:56,117 INFO [train.py:901] (2/4) Epoch 4, batch 1700, loss[loss=0.3178, simple_loss=0.3689, pruned_loss=0.1333, over 8495.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3683, pruned_loss=0.1309, over 1600160.83 frames. ], batch size: 26, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:59:14,220 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.46 vs. limit=5.0 +2023-02-05 22:59:31,188 INFO [train.py:901] (2/4) Epoch 4, batch 1750, loss[loss=0.2703, simple_loss=0.3302, pruned_loss=0.1052, over 7824.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3683, pruned_loss=0.1308, over 1606282.16 frames. ], batch size: 20, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 22:59:35,850 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1015, 1.6169, 3.2868, 1.3897, 2.1769, 3.7145, 3.5391, 3.2163], + device='cuda:2'), covar=tensor([0.1089, 0.1458, 0.0391, 0.2071, 0.0863, 0.0266, 0.0326, 0.0599], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0266, 0.0212, 0.0277, 0.0217, 0.0194, 0.0196, 0.0266], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 22:59:47,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.187e+02 3.816e+02 4.801e+02 8.317e+02, threshold=7.632e+02, percent-clipped=0.0 +2023-02-05 23:00:06,097 INFO [train.py:901] (2/4) Epoch 4, batch 1800, loss[loss=0.2887, simple_loss=0.3349, pruned_loss=0.1213, over 7704.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.3672, pruned_loss=0.1305, over 1604689.85 frames. ], batch size: 18, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:41,282 INFO [train.py:901] (2/4) Epoch 4, batch 1850, loss[loss=0.3179, simple_loss=0.3796, pruned_loss=0.1281, over 8503.00 frames. ], tot_loss[loss=0.3126, simple_loss=0.3664, pruned_loss=0.1294, over 1607075.80 frames. ], batch size: 26, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:55,439 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:00:56,608 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.260e+02 3.379e+02 4.261e+02 5.084e+02 1.608e+03, threshold=8.521e+02, percent-clipped=6.0 +2023-02-05 23:01:15,412 INFO [train.py:901] (2/4) Epoch 4, batch 1900, loss[loss=0.3001, simple_loss=0.3399, pruned_loss=0.1301, over 7787.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3656, pruned_loss=0.129, over 1610720.67 frames. ], batch size: 19, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:22,879 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26161.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:01:29,033 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-05 23:01:40,600 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:01:40,662 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26186.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:01:41,083 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 23:01:49,628 INFO [train.py:901] (2/4) Epoch 4, batch 1950, loss[loss=0.3514, simple_loss=0.4022, pruned_loss=0.1503, over 8545.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3662, pruned_loss=0.1294, over 1613397.18 frames. ], batch size: 49, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:52,431 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 23:02:04,048 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26220.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:02:05,152 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.303e+02 3.684e+02 4.572e+02 6.046e+02 1.247e+03, threshold=9.144e+02, percent-clipped=2.0 +2023-02-05 23:02:10,406 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 23:02:22,285 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 23:02:24,334 INFO [train.py:901] (2/4) Epoch 4, batch 2000, loss[loss=0.3698, simple_loss=0.4134, pruned_loss=0.1631, over 8030.00 frames. ], tot_loss[loss=0.3127, simple_loss=0.3665, pruned_loss=0.1295, over 1615926.87 frames. ], batch size: 22, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:02:36,631 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:02:59,488 INFO [train.py:901] (2/4) Epoch 4, batch 2050, loss[loss=0.2981, simple_loss=0.3627, pruned_loss=0.1167, over 8103.00 frames. ], tot_loss[loss=0.3141, simple_loss=0.3675, pruned_loss=0.1304, over 1618516.98 frames. ], batch size: 23, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:03:14,377 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.223e+02 3.433e+02 4.198e+02 5.260e+02 1.263e+03, threshold=8.396e+02, percent-clipped=5.0 +2023-02-05 23:03:14,623 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6531, 1.7920, 2.0379, 1.5471, 0.8512, 1.9652, 0.3664, 1.2768], + device='cuda:2'), covar=tensor([0.3874, 0.2025, 0.0875, 0.2384, 0.7274, 0.0978, 0.6407, 0.2540], + device='cuda:2'), in_proj_covar=tensor([0.0121, 0.0110, 0.0082, 0.0159, 0.0191, 0.0085, 0.0150, 0.0119], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:03:24,280 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26335.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:03:34,743 INFO [train.py:901] (2/4) Epoch 4, batch 2100, loss[loss=0.2548, simple_loss=0.3253, pruned_loss=0.0922, over 8089.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3682, pruned_loss=0.1309, over 1617834.26 frames. ], batch size: 21, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:08,106 INFO [train.py:901] (2/4) Epoch 4, batch 2150, loss[loss=0.3058, simple_loss=0.3697, pruned_loss=0.1209, over 8025.00 frames. ], tot_loss[loss=0.3148, simple_loss=0.3683, pruned_loss=0.1307, over 1618645.20 frames. ], batch size: 22, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:24,393 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.431e+02 3.407e+02 4.210e+02 5.616e+02 1.521e+03, threshold=8.419e+02, percent-clipped=4.0 +2023-02-05 23:04:31,146 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:04:43,605 INFO [train.py:901] (2/4) Epoch 4, batch 2200, loss[loss=0.2715, simple_loss=0.3407, pruned_loss=0.1012, over 8517.00 frames. ], tot_loss[loss=0.3138, simple_loss=0.3679, pruned_loss=0.1299, over 1621762.02 frames. ], batch size: 28, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:53,163 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:18,123 INFO [train.py:901] (2/4) Epoch 4, batch 2250, loss[loss=0.3501, simple_loss=0.3973, pruned_loss=0.1514, over 8508.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.3678, pruned_loss=0.1305, over 1615511.80 frames. ], batch size: 28, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:05:25,219 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3849, 1.9794, 2.1826, 0.8263, 2.1937, 1.5225, 0.6542, 1.7672], + device='cuda:2'), covar=tensor([0.0188, 0.0077, 0.0081, 0.0162, 0.0095, 0.0249, 0.0221, 0.0081], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0193, 0.0156, 0.0231, 0.0183, 0.0311, 0.0254, 0.0218], + device='cuda:2'), out_proj_covar=tensor([1.1417e-04, 8.0643e-05, 6.3799e-05, 9.4011e-05, 7.7814e-05, 1.4057e-04, + 1.0803e-04, 8.9445e-05], device='cuda:2') +2023-02-05 23:05:33,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.188e+02 3.857e+02 4.748e+02 9.287e+02, threshold=7.714e+02, percent-clipped=1.0 +2023-02-05 23:05:38,936 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26530.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:52,824 INFO [train.py:901] (2/4) Epoch 4, batch 2300, loss[loss=0.2805, simple_loss=0.3324, pruned_loss=0.1143, over 6789.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3679, pruned_loss=0.1306, over 1613745.33 frames. ], batch size: 15, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:12,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:21,882 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26591.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:06:27,546 INFO [train.py:901] (2/4) Epoch 4, batch 2350, loss[loss=0.4086, simple_loss=0.4307, pruned_loss=0.1933, over 7210.00 frames. ], tot_loss[loss=0.3139, simple_loss=0.3674, pruned_loss=0.1302, over 1610550.18 frames. ], batch size: 72, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:35,960 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:38,760 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26616.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:06:42,410 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.505e+02 4.841e+02 5.770e+02 1.247e+03, threshold=9.683e+02, percent-clipped=6.0 +2023-02-05 23:06:58,652 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26645.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:07:00,513 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2517, 1.3127, 2.2810, 1.1360, 2.1547, 2.4682, 2.4547, 2.0758], + device='cuda:2'), covar=tensor([0.0912, 0.1072, 0.0451, 0.1683, 0.0479, 0.0333, 0.0371, 0.0659], + device='cuda:2'), in_proj_covar=tensor([0.0222, 0.0257, 0.0205, 0.0262, 0.0208, 0.0186, 0.0194, 0.0259], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:07:01,512 INFO [train.py:901] (2/4) Epoch 4, batch 2400, loss[loss=0.3169, simple_loss=0.3697, pruned_loss=0.1321, over 7929.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3701, pruned_loss=0.1319, over 1615571.09 frames. ], batch size: 20, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:26,619 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1534, 1.1903, 4.3761, 1.6370, 3.6203, 3.5542, 3.8171, 3.7129], + device='cuda:2'), covar=tensor([0.0378, 0.3695, 0.0317, 0.2459, 0.0937, 0.0563, 0.0467, 0.0503], + device='cuda:2'), in_proj_covar=tensor([0.0274, 0.0450, 0.0351, 0.0368, 0.0433, 0.0366, 0.0347, 0.0384], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:07:37,131 INFO [train.py:901] (2/4) Epoch 4, batch 2450, loss[loss=0.3072, simple_loss=0.3739, pruned_loss=0.1202, over 8335.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3699, pruned_loss=0.1317, over 1615814.21 frames. ], batch size: 25, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:48,166 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-05 23:07:51,856 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.211e+02 4.300e+02 5.616e+02 1.854e+03, threshold=8.599e+02, percent-clipped=7.0 +2023-02-05 23:07:55,325 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26727.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:10,583 INFO [train.py:901] (2/4) Epoch 4, batch 2500, loss[loss=0.3255, simple_loss=0.3792, pruned_loss=0.1359, over 8030.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.3688, pruned_loss=0.1308, over 1612083.40 frames. ], batch size: 22, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:08:24,858 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1543, 1.9079, 2.9441, 2.4621, 2.4096, 1.8856, 1.3670, 1.0491], + device='cuda:2'), covar=tensor([0.1455, 0.1491, 0.0343, 0.0671, 0.0700, 0.0804, 0.0936, 0.1586], + device='cuda:2'), in_proj_covar=tensor([0.0663, 0.0590, 0.0489, 0.0565, 0.0675, 0.0546, 0.0546, 0.0556], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:08:29,250 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:45,150 INFO [train.py:901] (2/4) Epoch 4, batch 2550, loss[loss=0.2528, simple_loss=0.3093, pruned_loss=0.09817, over 7434.00 frames. ], tot_loss[loss=0.3149, simple_loss=0.3683, pruned_loss=0.1308, over 1613364.23 frames. ], batch size: 17, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:09:01,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.235e+02 3.301e+02 4.146e+02 5.074e+02 1.055e+03, threshold=8.293e+02, percent-clipped=2.0 +2023-02-05 23:09:10,582 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26835.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:16,260 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-05 23:09:20,514 INFO [train.py:901] (2/4) Epoch 4, batch 2600, loss[loss=0.251, simple_loss=0.322, pruned_loss=0.09, over 7800.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3677, pruned_loss=0.1298, over 1614236.57 frames. ], batch size: 20, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:27,765 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26860.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:50,369 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:56,423 INFO [train.py:901] (2/4) Epoch 4, batch 2650, loss[loss=0.2871, simple_loss=0.3573, pruned_loss=0.1084, over 7640.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3658, pruned_loss=0.1284, over 1609728.97 frames. ], batch size: 19, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:57,280 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:12,341 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 3.245e+02 3.916e+02 5.024e+02 1.006e+03, threshold=7.831e+02, percent-clipped=3.0 +2023-02-05 23:10:12,506 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26922.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:10:15,318 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26926.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:32,094 INFO [train.py:901] (2/4) Epoch 4, batch 2700, loss[loss=0.3496, simple_loss=0.3851, pruned_loss=0.1571, over 8354.00 frames. ], tot_loss[loss=0.3129, simple_loss=0.3667, pruned_loss=0.1296, over 1608789.96 frames. ], batch size: 24, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:10:44,345 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26968.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:45,657 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5975, 1.9706, 5.7493, 2.1141, 5.0463, 4.6955, 5.2789, 5.1508], + device='cuda:2'), covar=tensor([0.0404, 0.3152, 0.0183, 0.2151, 0.0813, 0.0433, 0.0296, 0.0342], + device='cuda:2'), in_proj_covar=tensor([0.0280, 0.0458, 0.0356, 0.0372, 0.0445, 0.0368, 0.0352, 0.0394], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:10:47,100 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8779, 2.2649, 3.9214, 3.0702, 3.1346, 2.0422, 1.4919, 1.6842], + device='cuda:2'), covar=tensor([0.1202, 0.1661, 0.0321, 0.0704, 0.0727, 0.0785, 0.0824, 0.1625], + device='cuda:2'), in_proj_covar=tensor([0.0668, 0.0594, 0.0494, 0.0564, 0.0679, 0.0546, 0.0547, 0.0561], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:10:54,314 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26983.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:05,897 INFO [train.py:901] (2/4) Epoch 4, batch 2750, loss[loss=0.3148, simple_loss=0.3552, pruned_loss=0.1372, over 7800.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3665, pruned_loss=0.1295, over 1605739.83 frames. ], batch size: 19, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:12,363 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:21,382 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 3.589e+02 4.354e+02 5.460e+02 1.197e+03, threshold=8.707e+02, percent-clipped=9.0 +2023-02-05 23:11:40,845 INFO [train.py:901] (2/4) Epoch 4, batch 2800, loss[loss=0.3061, simple_loss=0.3709, pruned_loss=0.1207, over 8258.00 frames. ], tot_loss[loss=0.3135, simple_loss=0.3672, pruned_loss=0.1299, over 1609820.39 frames. ], batch size: 24, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:52,397 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3625, 1.5401, 1.5879, 1.2883, 0.8860, 1.6307, 0.0636, 1.0711], + device='cuda:2'), covar=tensor([0.4288, 0.2391, 0.1564, 0.2043, 0.6177, 0.1197, 0.6025, 0.2548], + device='cuda:2'), in_proj_covar=tensor([0.0125, 0.0114, 0.0083, 0.0162, 0.0200, 0.0084, 0.0155, 0.0121], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:12:04,130 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 23:12:14,851 INFO [train.py:901] (2/4) Epoch 4, batch 2850, loss[loss=0.3494, simple_loss=0.3932, pruned_loss=0.1528, over 8506.00 frames. ], tot_loss[loss=0.314, simple_loss=0.3671, pruned_loss=0.1304, over 1607061.07 frames. ], batch size: 26, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:12:30,250 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 3.374e+02 4.464e+02 5.831e+02 1.992e+03, threshold=8.927e+02, percent-clipped=6.0 +2023-02-05 23:12:30,479 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2869, 1.6706, 1.5154, 0.4988, 1.5397, 1.2692, 0.2454, 1.5608], + device='cuda:2'), covar=tensor([0.0178, 0.0090, 0.0080, 0.0158, 0.0115, 0.0283, 0.0251, 0.0077], + device='cuda:2'), in_proj_covar=tensor([0.0265, 0.0191, 0.0158, 0.0225, 0.0183, 0.0309, 0.0255, 0.0214], + device='cuda:2'), out_proj_covar=tensor([1.1074e-04, 7.8909e-05, 6.3577e-05, 9.0078e-05, 7.7392e-05, 1.3781e-04, + 1.0779e-04, 8.6775e-05], device='cuda:2') +2023-02-05 23:12:47,569 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:12:49,277 INFO [train.py:901] (2/4) Epoch 4, batch 2900, loss[loss=0.3301, simple_loss=0.3763, pruned_loss=0.1419, over 7429.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3676, pruned_loss=0.1312, over 1603831.89 frames. ], batch size: 17, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:00,624 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27166.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:05,282 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:11,805 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 23:13:24,074 INFO [train.py:901] (2/4) Epoch 4, batch 2950, loss[loss=0.2905, simple_loss=0.3586, pruned_loss=0.1112, over 8082.00 frames. ], tot_loss[loss=0.3146, simple_loss=0.3672, pruned_loss=0.131, over 1605416.53 frames. ], batch size: 21, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:38,737 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 3.092e+02 3.649e+02 5.055e+02 1.216e+03, threshold=7.299e+02, percent-clipped=3.0 +2023-02-05 23:13:58,832 INFO [train.py:901] (2/4) Epoch 4, batch 3000, loss[loss=0.4599, simple_loss=0.4529, pruned_loss=0.2335, over 7477.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3662, pruned_loss=0.1299, over 1604771.34 frames. ], batch size: 73, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:58,832 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 23:14:11,273 INFO [train.py:935] (2/4) Epoch 4, validation: loss=0.2374, simple_loss=0.3304, pruned_loss=0.07225, over 944034.00 frames. +2023-02-05 23:14:11,274 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6599MB +2023-02-05 23:14:23,016 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27266.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:14:45,721 INFO [train.py:901] (2/4) Epoch 4, batch 3050, loss[loss=0.3566, simple_loss=0.4055, pruned_loss=0.1539, over 8581.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3655, pruned_loss=0.1289, over 1608633.68 frames. ], batch size: 31, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:14:54,658 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27312.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:14:55,443 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0828, 1.2443, 3.1642, 0.9101, 2.7101, 2.6911, 2.9253, 2.8413], + device='cuda:2'), covar=tensor([0.0509, 0.2840, 0.0575, 0.2336, 0.1352, 0.0768, 0.0576, 0.0694], + device='cuda:2'), in_proj_covar=tensor([0.0282, 0.0458, 0.0357, 0.0371, 0.0447, 0.0368, 0.0364, 0.0402], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:15:01,945 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.415e+02 4.317e+02 5.768e+02 1.933e+03, threshold=8.634e+02, percent-clipped=10.0 +2023-02-05 23:15:20,630 INFO [train.py:901] (2/4) Epoch 4, batch 3100, loss[loss=0.3306, simple_loss=0.3775, pruned_loss=0.1418, over 7695.00 frames. ], tot_loss[loss=0.3117, simple_loss=0.3656, pruned_loss=0.1289, over 1611171.28 frames. ], batch size: 76, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:15:41,815 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27381.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:15:54,817 INFO [train.py:901] (2/4) Epoch 4, batch 3150, loss[loss=0.29, simple_loss=0.3412, pruned_loss=0.1194, over 7177.00 frames. ], tot_loss[loss=0.3114, simple_loss=0.3654, pruned_loss=0.1287, over 1607839.92 frames. ], batch size: 16, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:16:09,489 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 3.237e+02 4.041e+02 5.193e+02 1.210e+03, threshold=8.082e+02, percent-clipped=3.0 +2023-02-05 23:16:13,659 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:16:16,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8755, 2.1231, 3.4101, 1.3178, 2.5876, 2.1650, 1.9705, 2.2932], + device='cuda:2'), covar=tensor([0.0876, 0.1229, 0.0309, 0.2104, 0.0835, 0.1352, 0.0852, 0.1254], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0417, 0.0499, 0.0512, 0.0560, 0.0494, 0.0430, 0.0560], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:16:29,612 INFO [train.py:901] (2/4) Epoch 4, batch 3200, loss[loss=0.2361, simple_loss=0.3011, pruned_loss=0.08549, over 7326.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3658, pruned_loss=0.1291, over 1608985.94 frames. ], batch size: 16, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:16:50,835 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8420, 2.7484, 2.4928, 1.5484, 2.4825, 2.4693, 2.5743, 2.2075], + device='cuda:2'), covar=tensor([0.1245, 0.1026, 0.1225, 0.4062, 0.1074, 0.1091, 0.1518, 0.1046], + device='cuda:2'), in_proj_covar=tensor([0.0366, 0.0260, 0.0301, 0.0383, 0.0290, 0.0238, 0.0282, 0.0221], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 23:17:03,119 INFO [train.py:901] (2/4) Epoch 4, batch 3250, loss[loss=0.3038, simple_loss=0.3543, pruned_loss=0.1266, over 7430.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3667, pruned_loss=0.1291, over 1615289.42 frames. ], batch size: 17, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:17:07,514 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 23:17:10,580 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:17:18,422 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 3.449e+02 4.059e+02 4.930e+02 7.939e+02, threshold=8.117e+02, percent-clipped=0.0 +2023-02-05 23:17:37,477 INFO [train.py:901] (2/4) Epoch 4, batch 3300, loss[loss=0.2972, simple_loss=0.3606, pruned_loss=0.1169, over 8294.00 frames. ], tot_loss[loss=0.3103, simple_loss=0.3647, pruned_loss=0.1279, over 1609533.93 frames. ], batch size: 23, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:12,287 INFO [train.py:901] (2/4) Epoch 4, batch 3350, loss[loss=0.2735, simple_loss=0.3252, pruned_loss=0.1109, over 7810.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3644, pruned_loss=0.1274, over 1612580.35 frames. ], batch size: 20, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:12,670 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2144, 1.9550, 2.8706, 2.3484, 2.3728, 1.9950, 1.4369, 1.0170], + device='cuda:2'), covar=tensor([0.1307, 0.1365, 0.0324, 0.0672, 0.0607, 0.0660, 0.0835, 0.1396], + device='cuda:2'), in_proj_covar=tensor([0.0670, 0.0605, 0.0510, 0.0570, 0.0678, 0.0556, 0.0557, 0.0564], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:18:28,393 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 3.326e+02 4.176e+02 5.439e+02 1.733e+03, threshold=8.353e+02, percent-clipped=9.0 +2023-02-05 23:18:30,492 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27625.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:18:38,368 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27637.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:18:46,470 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4252, 1.8724, 1.8743, 0.8781, 1.8809, 1.3511, 0.3836, 1.6839], + device='cuda:2'), covar=tensor([0.0186, 0.0101, 0.0091, 0.0151, 0.0098, 0.0305, 0.0266, 0.0073], + device='cuda:2'), in_proj_covar=tensor([0.0267, 0.0196, 0.0157, 0.0227, 0.0182, 0.0313, 0.0255, 0.0217], + device='cuda:2'), out_proj_covar=tensor([1.1082e-04, 8.0562e-05, 6.2599e-05, 9.0173e-05, 7.5884e-05, 1.3866e-04, + 1.0689e-04, 8.7278e-05], device='cuda:2') +2023-02-05 23:18:46,885 INFO [train.py:901] (2/4) Epoch 4, batch 3400, loss[loss=0.3182, simple_loss=0.378, pruned_loss=0.1292, over 7935.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.365, pruned_loss=0.1281, over 1611997.90 frames. ], batch size: 20, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:18:55,799 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27662.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:19:10,318 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27683.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:10,336 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4564, 1.8425, 3.2107, 1.0596, 2.2843, 1.8246, 1.5606, 1.8441], + device='cuda:2'), covar=tensor([0.1388, 0.1639, 0.0604, 0.2774, 0.1285, 0.2048, 0.1378, 0.1879], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0428, 0.0512, 0.0518, 0.0567, 0.0495, 0.0443, 0.0568], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:19:21,470 INFO [train.py:901] (2/4) Epoch 4, batch 3450, loss[loss=0.2952, simple_loss=0.3551, pruned_loss=0.1177, over 8285.00 frames. ], tot_loss[loss=0.3108, simple_loss=0.3652, pruned_loss=0.1282, over 1613750.03 frames. ], batch size: 23, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:19:26,946 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27708.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:36,059 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.499e+02 3.357e+02 4.072e+02 5.275e+02 9.264e+02, threshold=8.144e+02, percent-clipped=1.0 +2023-02-05 23:19:40,426 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 23:19:43,549 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27732.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:19:46,192 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.3829, 5.3356, 4.8680, 2.1195, 4.7839, 4.9752, 4.9231, 4.3181], + device='cuda:2'), covar=tensor([0.0637, 0.0389, 0.0631, 0.4155, 0.0610, 0.0596, 0.0955, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0364, 0.0257, 0.0299, 0.0383, 0.0283, 0.0233, 0.0275, 0.0216], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 23:19:55,909 INFO [train.py:901] (2/4) Epoch 4, batch 3500, loss[loss=0.2779, simple_loss=0.3324, pruned_loss=0.1117, over 7782.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3649, pruned_loss=0.1276, over 1613360.29 frames. ], batch size: 19, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:10,690 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 23:20:31,114 INFO [train.py:901] (2/4) Epoch 4, batch 3550, loss[loss=0.2927, simple_loss=0.3431, pruned_loss=0.1212, over 7639.00 frames. ], tot_loss[loss=0.3091, simple_loss=0.3641, pruned_loss=0.1271, over 1614607.14 frames. ], batch size: 19, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:46,085 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.262e+02 3.955e+02 5.254e+02 1.114e+03, threshold=7.909e+02, percent-clipped=8.0 +2023-02-05 23:21:05,473 INFO [train.py:901] (2/4) Epoch 4, batch 3600, loss[loss=0.3037, simple_loss=0.3714, pruned_loss=0.1179, over 8342.00 frames. ], tot_loss[loss=0.3098, simple_loss=0.3646, pruned_loss=0.1275, over 1609612.21 frames. ], batch size: 26, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:27,351 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27881.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:32,203 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 23:21:33,264 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5920, 5.5407, 4.5843, 2.3203, 4.7807, 5.1870, 5.0568, 4.6766], + device='cuda:2'), covar=tensor([0.0755, 0.0608, 0.1002, 0.4309, 0.0668, 0.0735, 0.1133, 0.0790], + device='cuda:2'), in_proj_covar=tensor([0.0364, 0.0262, 0.0294, 0.0384, 0.0287, 0.0240, 0.0281, 0.0217], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 23:21:39,940 INFO [train.py:901] (2/4) Epoch 4, batch 3650, loss[loss=0.3041, simple_loss=0.363, pruned_loss=0.1226, over 8460.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.3642, pruned_loss=0.1271, over 1611308.29 frames. ], batch size: 25, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:44,920 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27906.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:56,097 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.998e+02 3.334e+02 3.945e+02 4.811e+02 1.062e+03, threshold=7.891e+02, percent-clipped=4.0 +2023-02-05 23:22:13,494 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:22:14,786 INFO [train.py:901] (2/4) Epoch 4, batch 3700, loss[loss=0.3544, simple_loss=0.3999, pruned_loss=0.1545, over 8634.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.3652, pruned_loss=0.1283, over 1607250.22 frames. ], batch size: 34, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:22:49,604 INFO [train.py:901] (2/4) Epoch 4, batch 3750, loss[loss=0.3563, simple_loss=0.4097, pruned_loss=0.1514, over 8483.00 frames. ], tot_loss[loss=0.3122, simple_loss=0.3664, pruned_loss=0.1289, over 1610502.25 frames. ], batch size: 29, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:05,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.245e+02 3.553e+02 4.442e+02 6.055e+02 1.985e+03, threshold=8.883e+02, percent-clipped=11.0 +2023-02-05 23:23:24,893 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 23:23:25,245 INFO [train.py:901] (2/4) Epoch 4, batch 3800, loss[loss=0.2972, simple_loss=0.3682, pruned_loss=0.113, over 8293.00 frames. ], tot_loss[loss=0.311, simple_loss=0.3654, pruned_loss=0.1283, over 1608394.58 frames. ], batch size: 23, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:42,796 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28076.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:24:00,305 INFO [train.py:901] (2/4) Epoch 4, batch 3850, loss[loss=0.2872, simple_loss=0.3435, pruned_loss=0.1155, over 8096.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3641, pruned_loss=0.1275, over 1606321.76 frames. ], batch size: 21, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:15,224 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 3.238e+02 4.124e+02 5.182e+02 9.210e+02, threshold=8.247e+02, percent-clipped=1.0 +2023-02-05 23:24:17,311 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 23:24:34,689 INFO [train.py:901] (2/4) Epoch 4, batch 3900, loss[loss=0.264, simple_loss=0.3171, pruned_loss=0.1054, over 7697.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3648, pruned_loss=0.1277, over 1606153.61 frames. ], batch size: 18, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:02,727 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28191.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:25:08,591 INFO [train.py:901] (2/4) Epoch 4, batch 3950, loss[loss=0.3299, simple_loss=0.3788, pruned_loss=0.1405, over 8512.00 frames. ], tot_loss[loss=0.3099, simple_loss=0.3641, pruned_loss=0.1278, over 1601837.18 frames. ], batch size: 26, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:17,220 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 23:25:24,837 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.357e+02 4.080e+02 5.453e+02 1.389e+03, threshold=8.161e+02, percent-clipped=8.0 +2023-02-05 23:25:41,193 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:25:43,104 INFO [train.py:901] (2/4) Epoch 4, batch 4000, loss[loss=0.2743, simple_loss=0.3223, pruned_loss=0.1132, over 7532.00 frames. ], tot_loss[loss=0.3103, simple_loss=0.3644, pruned_loss=0.1281, over 1603018.43 frames. ], batch size: 18, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:59,931 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28273.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:26:17,616 INFO [train.py:901] (2/4) Epoch 4, batch 4050, loss[loss=0.3013, simple_loss=0.3574, pruned_loss=0.1226, over 8126.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3642, pruned_loss=0.1275, over 1609295.22 frames. ], batch size: 22, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:26:21,158 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:26:34,456 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.482e+02 4.201e+02 5.400e+02 1.078e+03, threshold=8.403e+02, percent-clipped=4.0 +2023-02-05 23:26:52,366 INFO [train.py:901] (2/4) Epoch 4, batch 4100, loss[loss=0.2923, simple_loss=0.3681, pruned_loss=0.1083, over 8524.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3645, pruned_loss=0.1271, over 1612662.18 frames. ], batch size: 28, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:27:27,345 INFO [train.py:901] (2/4) Epoch 4, batch 4150, loss[loss=0.4092, simple_loss=0.4221, pruned_loss=0.1982, over 7098.00 frames. ], tot_loss[loss=0.3093, simple_loss=0.3644, pruned_loss=0.1271, over 1611063.33 frames. ], batch size: 72, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:27:43,618 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.222e+02 3.372e+02 4.170e+02 5.520e+02 1.384e+03, threshold=8.341e+02, percent-clipped=6.0 +2023-02-05 23:28:00,677 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28447.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:28:02,477 INFO [train.py:901] (2/4) Epoch 4, batch 4200, loss[loss=0.2592, simple_loss=0.3278, pruned_loss=0.09533, over 8018.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3647, pruned_loss=0.127, over 1610069.15 frames. ], batch size: 22, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:07,669 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 23:28:12,245 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-05 23:28:17,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28472.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:28:29,056 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 23:28:36,443 INFO [train.py:901] (2/4) Epoch 4, batch 4250, loss[loss=0.3343, simple_loss=0.3881, pruned_loss=0.1402, over 8464.00 frames. ], tot_loss[loss=0.3103, simple_loss=0.3653, pruned_loss=0.1276, over 1611254.64 frames. ], batch size: 27, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:39,210 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28504.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:43,325 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:51,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 3.170e+02 4.105e+02 5.662e+02 1.430e+03, threshold=8.210e+02, percent-clipped=9.0 +2023-02-05 23:29:10,385 INFO [train.py:901] (2/4) Epoch 4, batch 4300, loss[loss=0.3003, simple_loss=0.3591, pruned_loss=0.1207, over 8738.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3648, pruned_loss=0.1272, over 1610353.14 frames. ], batch size: 39, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:38,464 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:29:45,232 INFO [train.py:901] (2/4) Epoch 4, batch 4350, loss[loss=0.3266, simple_loss=0.3563, pruned_loss=0.1484, over 7794.00 frames. ], tot_loss[loss=0.3087, simple_loss=0.364, pruned_loss=0.1267, over 1610409.11 frames. ], batch size: 19, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:57,580 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28617.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:29:58,761 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 23:30:01,427 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 3.285e+02 3.917e+02 4.771e+02 1.131e+03, threshold=7.833e+02, percent-clipped=1.0 +2023-02-05 23:30:19,077 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28649.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:30:19,682 INFO [train.py:901] (2/4) Epoch 4, batch 4400, loss[loss=0.2615, simple_loss=0.3233, pruned_loss=0.09981, over 7785.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3619, pruned_loss=0.125, over 1612134.87 frames. ], batch size: 19, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:30:41,088 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 23:30:54,262 INFO [train.py:901] (2/4) Epoch 4, batch 4450, loss[loss=0.2608, simple_loss=0.3229, pruned_loss=0.09938, over 7544.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3623, pruned_loss=0.1252, over 1611008.58 frames. ], batch size: 18, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:30:58,485 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:31:10,734 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.181e+02 3.229e+02 4.056e+02 4.786e+02 8.259e+02, threshold=8.113e+02, percent-clipped=1.0 +2023-02-05 23:31:17,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28732.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:31:30,205 INFO [train.py:901] (2/4) Epoch 4, batch 4500, loss[loss=0.3, simple_loss=0.3671, pruned_loss=0.1164, over 8335.00 frames. ], tot_loss[loss=0.3064, simple_loss=0.3626, pruned_loss=0.125, over 1613799.80 frames. ], batch size: 25, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:31:36,214 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 23:31:39,879 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28764.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:05,450 INFO [train.py:901] (2/4) Epoch 4, batch 4550, loss[loss=0.2766, simple_loss=0.3477, pruned_loss=0.1028, over 7802.00 frames. ], tot_loss[loss=0.3062, simple_loss=0.3624, pruned_loss=0.125, over 1614188.38 frames. ], batch size: 19, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:21,335 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.133e+02 4.046e+02 5.517e+02 1.256e+03, threshold=8.093e+02, percent-clipped=3.0 +2023-02-05 23:32:39,633 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:40,898 INFO [train.py:901] (2/4) Epoch 4, batch 4600, loss[loss=0.2928, simple_loss=0.3634, pruned_loss=0.1111, over 8491.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3608, pruned_loss=0.1244, over 1608491.16 frames. ], batch size: 29, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:43,601 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28854.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:00,447 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28879.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:10,535 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 23:33:14,777 INFO [train.py:901] (2/4) Epoch 4, batch 4650, loss[loss=0.3085, simple_loss=0.3625, pruned_loss=0.1273, over 8589.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3607, pruned_loss=0.1246, over 1609407.66 frames. ], batch size: 34, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:30,675 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 3.425e+02 4.570e+02 5.631e+02 1.457e+03, threshold=9.141e+02, percent-clipped=7.0 +2023-02-05 23:33:49,337 INFO [train.py:901] (2/4) Epoch 4, batch 4700, loss[loss=0.3279, simple_loss=0.3761, pruned_loss=0.1399, over 7050.00 frames. ], tot_loss[loss=0.3067, simple_loss=0.3625, pruned_loss=0.1255, over 1615442.82 frames. ], batch size: 72, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:58,491 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:59,159 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28963.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:03,899 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:15,889 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:16,552 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28988.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:34:24,389 INFO [train.py:901] (2/4) Epoch 4, batch 4750, loss[loss=0.3222, simple_loss=0.378, pruned_loss=0.1332, over 8617.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3614, pruned_loss=0.1245, over 1609667.12 frames. ], batch size: 50, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:34:32,591 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4230, 1.8282, 1.5192, 1.4376, 1.4075, 1.5014, 1.6826, 1.7739], + device='cuda:2'), covar=tensor([0.0658, 0.1237, 0.1847, 0.1453, 0.0729, 0.1604, 0.0892, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0153, 0.0196, 0.0232, 0.0196, 0.0152, 0.0201, 0.0161, 0.0164], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:34:33,284 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29013.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:34:38,670 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:38,753 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:40,432 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.078e+02 3.145e+02 3.754e+02 5.040e+02 8.107e+02, threshold=7.508e+02, percent-clipped=0.0 +2023-02-05 23:34:40,461 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 23:34:42,486 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 23:34:56,216 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:58,838 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3640, 1.5128, 1.6113, 1.3022, 0.7753, 1.6621, 0.1155, 1.0166], + device='cuda:2'), covar=tensor([0.3140, 0.2466, 0.1066, 0.2384, 0.6242, 0.0911, 0.5484, 0.2401], + device='cuda:2'), in_proj_covar=tensor([0.0126, 0.0110, 0.0079, 0.0157, 0.0198, 0.0082, 0.0140, 0.0118], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:34:59,298 INFO [train.py:901] (2/4) Epoch 4, batch 4800, loss[loss=0.3535, simple_loss=0.3929, pruned_loss=0.157, over 8578.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3611, pruned_loss=0.1247, over 1609564.05 frames. ], batch size: 34, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:33,998 INFO [train.py:901] (2/4) Epoch 4, batch 4850, loss[loss=0.3329, simple_loss=0.3602, pruned_loss=0.1528, over 7968.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3605, pruned_loss=0.1244, over 1608414.51 frames. ], batch size: 21, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,009 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 23:35:41,372 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 23:35:49,588 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.099e+02 3.374e+02 4.405e+02 6.016e+02 1.134e+03, threshold=8.810e+02, percent-clipped=7.0 +2023-02-05 23:36:08,337 INFO [train.py:901] (2/4) Epoch 4, batch 4900, loss[loss=0.3283, simple_loss=0.3719, pruned_loss=0.1424, over 8240.00 frames. ], tot_loss[loss=0.3041, simple_loss=0.3605, pruned_loss=0.1238, over 1611897.86 frames. ], batch size: 22, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:08,612 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7718, 2.3507, 3.9158, 2.9962, 2.9341, 2.2471, 1.6795, 1.7431], + device='cuda:2'), covar=tensor([0.1522, 0.1942, 0.0335, 0.0969, 0.1016, 0.0812, 0.0954, 0.1984], + device='cuda:2'), in_proj_covar=tensor([0.0685, 0.0614, 0.0523, 0.0580, 0.0695, 0.0572, 0.0562, 0.0573], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:36:41,947 INFO [train.py:901] (2/4) Epoch 4, batch 4950, loss[loss=0.2808, simple_loss=0.3577, pruned_loss=0.102, over 8245.00 frames. ], tot_loss[loss=0.3051, simple_loss=0.3613, pruned_loss=0.1244, over 1611663.61 frames. ], batch size: 24, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:56,272 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:36:58,772 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.208e+02 3.912e+02 5.596e+02 9.849e+02, threshold=7.824e+02, percent-clipped=2.0 +2023-02-05 23:36:58,865 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:00,343 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:12,962 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29244.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:16,652 INFO [train.py:901] (2/4) Epoch 4, batch 5000, loss[loss=0.3407, simple_loss=0.3836, pruned_loss=0.1489, over 8296.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3625, pruned_loss=0.1246, over 1616944.16 frames. ], batch size: 23, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:37:16,810 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9529, 1.5624, 4.0982, 1.7571, 3.6123, 3.4789, 3.6599, 3.6073], + device='cuda:2'), covar=tensor([0.0371, 0.3037, 0.0338, 0.2096, 0.0889, 0.0552, 0.0410, 0.0455], + device='cuda:2'), in_proj_covar=tensor([0.0288, 0.0460, 0.0356, 0.0373, 0.0439, 0.0370, 0.0357, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:37:16,879 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29250.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:18,900 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2285, 1.4524, 4.3951, 1.5365, 3.7624, 3.6403, 3.9007, 3.8445], + device='cuda:2'), covar=tensor([0.0383, 0.3385, 0.0335, 0.2430, 0.1203, 0.0620, 0.0412, 0.0486], + device='cuda:2'), in_proj_covar=tensor([0.0288, 0.0460, 0.0356, 0.0373, 0.0439, 0.0370, 0.0357, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:37:19,516 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29254.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:35,452 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5870, 5.6475, 5.0248, 2.1991, 4.9053, 5.3100, 5.1684, 4.7035], + device='cuda:2'), covar=tensor([0.0651, 0.0416, 0.0816, 0.4169, 0.0572, 0.0416, 0.1000, 0.0572], + device='cuda:2'), in_proj_covar=tensor([0.0369, 0.0268, 0.0293, 0.0381, 0.0291, 0.0238, 0.0288, 0.0223], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 23:37:51,640 INFO [train.py:901] (2/4) Epoch 4, batch 5050, loss[loss=0.2958, simple_loss=0.3598, pruned_loss=0.1159, over 7919.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3625, pruned_loss=0.124, over 1614817.70 frames. ], batch size: 20, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:07,700 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 3.436e+02 4.072e+02 5.001e+02 1.022e+03, threshold=8.144e+02, percent-clipped=3.0 +2023-02-05 23:38:14,946 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 23:38:18,459 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29338.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:26,649 INFO [train.py:901] (2/4) Epoch 4, batch 5100, loss[loss=0.3068, simple_loss=0.3417, pruned_loss=0.136, over 7720.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3617, pruned_loss=0.1244, over 1613470.64 frames. ], batch size: 18, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:36,225 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29364.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:44,636 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8328, 3.8379, 3.3675, 1.6473, 3.3452, 3.0285, 3.4847, 2.9574], + device='cuda:2'), covar=tensor([0.0897, 0.0695, 0.1008, 0.4907, 0.0905, 0.0942, 0.1330, 0.0887], + device='cuda:2'), in_proj_covar=tensor([0.0372, 0.0270, 0.0296, 0.0390, 0.0298, 0.0243, 0.0293, 0.0223], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-05 23:38:57,977 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0384, 2.4049, 3.9247, 3.2335, 3.0781, 2.4129, 1.6163, 1.8254], + device='cuda:2'), covar=tensor([0.1280, 0.1818, 0.0404, 0.0738, 0.0860, 0.0720, 0.0893, 0.1743], + device='cuda:2'), in_proj_covar=tensor([0.0695, 0.0616, 0.0528, 0.0574, 0.0698, 0.0572, 0.0564, 0.0573], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-05 23:39:00,484 INFO [train.py:901] (2/4) Epoch 4, batch 5150, loss[loss=0.2792, simple_loss=0.3513, pruned_loss=0.1035, over 8464.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3614, pruned_loss=0.124, over 1615706.03 frames. ], batch size: 27, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:16,241 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.145e+02 3.888e+02 4.871e+02 1.199e+03, threshold=7.777e+02, percent-clipped=1.0 +2023-02-05 23:39:35,373 INFO [train.py:901] (2/4) Epoch 4, batch 5200, loss[loss=0.312, simple_loss=0.3771, pruned_loss=0.1234, over 8478.00 frames. ], tot_loss[loss=0.3082, simple_loss=0.3637, pruned_loss=0.1263, over 1613269.63 frames. ], batch size: 29, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:54,955 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29479.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:40:09,487 INFO [train.py:901] (2/4) Epoch 4, batch 5250, loss[loss=0.3392, simple_loss=0.3926, pruned_loss=0.1428, over 7802.00 frames. ], tot_loss[loss=0.3076, simple_loss=0.3633, pruned_loss=0.126, over 1614464.33 frames. ], batch size: 20, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:12,203 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 23:40:25,985 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.346e+02 3.507e+02 4.371e+02 5.555e+02 1.318e+03, threshold=8.742e+02, percent-clipped=11.0 +2023-02-05 23:40:39,165 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 23:40:43,419 INFO [train.py:901] (2/4) Epoch 4, batch 5300, loss[loss=0.3275, simple_loss=0.3774, pruned_loss=0.1388, over 8631.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3631, pruned_loss=0.1258, over 1610913.14 frames. ], batch size: 34, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:51,103 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2480, 1.6415, 3.3769, 0.9675, 2.5304, 1.5513, 1.2993, 2.0186], + device='cuda:2'), covar=tensor([0.1852, 0.1946, 0.0515, 0.3508, 0.1125, 0.2521, 0.1851, 0.1971], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0429, 0.0505, 0.0517, 0.0560, 0.0500, 0.0437, 0.0576], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:40:57,670 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29569.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:02,754 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-05 23:41:14,744 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29594.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:17,286 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29598.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:18,612 INFO [train.py:901] (2/4) Epoch 4, batch 5350, loss[loss=0.3094, simple_loss=0.364, pruned_loss=0.1274, over 7935.00 frames. ], tot_loss[loss=0.3082, simple_loss=0.3644, pruned_loss=0.126, over 1618452.86 frames. ], batch size: 20, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:41:32,363 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29619.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:35,509 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 3.127e+02 4.006e+02 4.952e+02 2.682e+03, threshold=8.012e+02, percent-clipped=7.0 +2023-02-05 23:41:50,011 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 23:41:53,621 INFO [train.py:901] (2/4) Epoch 4, batch 5400, loss[loss=0.3108, simple_loss=0.3638, pruned_loss=0.1289, over 8595.00 frames. ], tot_loss[loss=0.3105, simple_loss=0.366, pruned_loss=0.1275, over 1616693.92 frames. ], batch size: 49, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:14,527 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:28,608 INFO [train.py:901] (2/4) Epoch 4, batch 5450, loss[loss=0.2339, simple_loss=0.3036, pruned_loss=0.08204, over 7549.00 frames. ], tot_loss[loss=0.3075, simple_loss=0.3638, pruned_loss=0.1256, over 1615367.36 frames. ], batch size: 18, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:37,304 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29713.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:44,948 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.180e+02 3.089e+02 4.007e+02 5.016e+02 9.074e+02, threshold=8.014e+02, percent-clipped=4.0 +2023-02-05 23:42:52,701 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29735.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:57,993 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 23:43:02,828 INFO [train.py:901] (2/4) Epoch 4, batch 5500, loss[loss=0.2953, simple_loss=0.3646, pruned_loss=0.113, over 8184.00 frames. ], tot_loss[loss=0.3071, simple_loss=0.3632, pruned_loss=0.1254, over 1610364.91 frames. ], batch size: 23, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:10,333 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29760.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:12,460 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 23:43:20,938 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6340, 1.4395, 5.6025, 2.2588, 4.8577, 4.7332, 5.1886, 5.0539], + device='cuda:2'), covar=tensor([0.0346, 0.3786, 0.0221, 0.2107, 0.0931, 0.0445, 0.0342, 0.0418], + device='cuda:2'), in_proj_covar=tensor([0.0284, 0.0452, 0.0348, 0.0374, 0.0434, 0.0371, 0.0355, 0.0391], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-05 23:43:24,177 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-05 23:43:38,373 INFO [train.py:901] (2/4) Epoch 4, batch 5550, loss[loss=0.2514, simple_loss=0.3282, pruned_loss=0.08728, over 7817.00 frames. ], tot_loss[loss=0.3073, simple_loss=0.3634, pruned_loss=0.1256, over 1614423.66 frames. ], batch size: 19, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:38,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4995, 1.3314, 1.4073, 1.1904, 1.0276, 1.3306, 1.2410, 1.3341], + device='cuda:2'), covar=tensor([0.0653, 0.1186, 0.2005, 0.1437, 0.0668, 0.1547, 0.0784, 0.0551], + device='cuda:2'), in_proj_covar=tensor([0.0152, 0.0196, 0.0232, 0.0196, 0.0152, 0.0198, 0.0160, 0.0163], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:43:51,783 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:53,127 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6566, 1.9932, 2.3179, 0.9692, 2.2584, 1.5170, 0.7439, 1.8161], + device='cuda:2'), covar=tensor([0.0182, 0.0090, 0.0068, 0.0166, 0.0113, 0.0285, 0.0248, 0.0092], + device='cuda:2'), in_proj_covar=tensor([0.0281, 0.0198, 0.0157, 0.0238, 0.0191, 0.0326, 0.0261, 0.0225], + device='cuda:2'), out_proj_covar=tensor([1.1202e-04, 7.8135e-05, 5.9470e-05, 9.1674e-05, 7.6732e-05, 1.3897e-04, + 1.0497e-04, 8.8034e-05], device='cuda:2') +2023-02-05 23:43:54,237 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.211e+02 3.931e+02 4.808e+02 9.688e+02, threshold=7.861e+02, percent-clipped=2.0 +2023-02-05 23:44:12,158 INFO [train.py:901] (2/4) Epoch 4, batch 5600, loss[loss=0.2971, simple_loss=0.3564, pruned_loss=0.1189, over 8294.00 frames. ], tot_loss[loss=0.3069, simple_loss=0.363, pruned_loss=0.1253, over 1614532.78 frames. ], batch size: 23, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:31,748 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8157, 2.4705, 4.6485, 1.2985, 3.1876, 2.0824, 1.9245, 2.5633], + device='cuda:2'), covar=tensor([0.1313, 0.1619, 0.0584, 0.2729, 0.1172, 0.2188, 0.1224, 0.2083], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0422, 0.0496, 0.0510, 0.0551, 0.0493, 0.0433, 0.0554], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:44:39,041 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6438, 1.7820, 1.5031, 1.1490, 1.5017, 1.5566, 1.7317, 1.6765], + device='cuda:2'), covar=tensor([0.0605, 0.1250, 0.1934, 0.1657, 0.0756, 0.1675, 0.0936, 0.0666], + device='cuda:2'), in_proj_covar=tensor([0.0152, 0.0195, 0.0230, 0.0195, 0.0152, 0.0198, 0.0160, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:44:46,060 INFO [train.py:901] (2/4) Epoch 4, batch 5650, loss[loss=0.3656, simple_loss=0.4072, pruned_loss=0.1621, over 8179.00 frames. ], tot_loss[loss=0.3077, simple_loss=0.3635, pruned_loss=0.126, over 1617947.88 frames. ], batch size: 48, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:55,377 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29913.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:03,295 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.236e+02 4.025e+02 5.119e+02 8.732e+02, threshold=8.050e+02, percent-clipped=2.0 +2023-02-05 23:45:03,336 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 23:45:10,182 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4099, 1.1998, 4.5044, 1.7076, 3.8605, 3.7195, 4.0652, 3.9920], + device='cuda:2'), covar=tensor([0.0433, 0.3706, 0.0319, 0.2299, 0.1038, 0.0523, 0.0416, 0.0457], + device='cuda:2'), in_proj_covar=tensor([0.0294, 0.0464, 0.0365, 0.0383, 0.0451, 0.0378, 0.0366, 0.0401], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:45:10,970 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1118, 1.9891, 3.3268, 0.8307, 2.1464, 1.3406, 1.4736, 1.9360], + device='cuda:2'), covar=tensor([0.2306, 0.1874, 0.0791, 0.4316, 0.1765, 0.3040, 0.1869, 0.2530], + device='cuda:2'), in_proj_covar=tensor([0.0452, 0.0427, 0.0499, 0.0516, 0.0559, 0.0492, 0.0435, 0.0561], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:45:18,368 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2537, 1.5725, 1.2803, 1.8954, 0.8059, 1.1213, 1.1908, 1.5228], + device='cuda:2'), covar=tensor([0.1310, 0.1020, 0.1485, 0.0699, 0.1627, 0.2105, 0.1349, 0.1065], + device='cuda:2'), in_proj_covar=tensor([0.0280, 0.0287, 0.0298, 0.0222, 0.0264, 0.0293, 0.0306, 0.0275], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:45:20,771 INFO [train.py:901] (2/4) Epoch 4, batch 5700, loss[loss=0.2726, simple_loss=0.3379, pruned_loss=0.1036, over 8019.00 frames. ], tot_loss[loss=0.3069, simple_loss=0.3626, pruned_loss=0.1256, over 1615327.13 frames. ], batch size: 22, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:45:34,467 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:52,113 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29994.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:55,950 INFO [train.py:901] (2/4) Epoch 4, batch 5750, loss[loss=0.2759, simple_loss=0.3456, pruned_loss=0.1031, over 8511.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3612, pruned_loss=0.1241, over 1614984.26 frames. ], batch size: 26, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:46:07,130 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 23:46:13,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.352e+02 3.278e+02 4.024e+02 4.787e+02 1.009e+03, threshold=8.047e+02, percent-clipped=4.0 +2023-02-05 23:46:13,360 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:17,493 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30028.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:25,007 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3605, 1.6697, 2.8401, 1.0707, 1.9916, 1.6175, 1.4281, 1.7189], + device='cuda:2'), covar=tensor([0.1579, 0.1642, 0.0530, 0.3093, 0.1270, 0.2212, 0.1381, 0.1763], + device='cuda:2'), in_proj_covar=tensor([0.0451, 0.0423, 0.0502, 0.0512, 0.0557, 0.0492, 0.0436, 0.0561], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:46:32,285 INFO [train.py:901] (2/4) Epoch 4, batch 5800, loss[loss=0.2686, simple_loss=0.3249, pruned_loss=0.1062, over 7428.00 frames. ], tot_loss[loss=0.3023, simple_loss=0.3589, pruned_loss=0.1228, over 1609990.58 frames. ], batch size: 17, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:47:02,997 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 23:47:06,526 INFO [train.py:901] (2/4) Epoch 4, batch 5850, loss[loss=0.2782, simple_loss=0.3369, pruned_loss=0.1097, over 8093.00 frames. ], tot_loss[loss=0.3032, simple_loss=0.3594, pruned_loss=0.1234, over 1610899.29 frames. ], batch size: 21, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:13,369 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7653, 1.6922, 5.7152, 2.0663, 5.0148, 4.8142, 5.3330, 5.2688], + device='cuda:2'), covar=tensor([0.0341, 0.3216, 0.0230, 0.2171, 0.0820, 0.0437, 0.0318, 0.0350], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0457, 0.0364, 0.0374, 0.0450, 0.0379, 0.0361, 0.0404], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-05 23:47:23,087 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 3.427e+02 4.657e+02 5.932e+02 9.223e+02, threshold=9.314e+02, percent-clipped=4.0 +2023-02-05 23:47:33,285 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30139.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:33,521 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 23:47:35,875 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:41,675 INFO [train.py:901] (2/4) Epoch 4, batch 5900, loss[loss=0.2877, simple_loss=0.3576, pruned_loss=0.1089, over 8573.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3601, pruned_loss=0.1238, over 1616103.71 frames. ], batch size: 31, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:51,360 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:15,960 INFO [train.py:901] (2/4) Epoch 4, batch 5950, loss[loss=0.3506, simple_loss=0.4005, pruned_loss=0.1503, over 8652.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.3607, pruned_loss=0.124, over 1618165.45 frames. ], batch size: 34, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:32,430 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.869e+02 3.143e+02 3.968e+02 4.977e+02 1.070e+03, threshold=7.937e+02, percent-clipped=1.0 +2023-02-05 23:48:35,342 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30227.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:50,175 INFO [train.py:901] (2/4) Epoch 4, batch 6000, loss[loss=0.2519, simple_loss=0.3053, pruned_loss=0.09923, over 7778.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.3614, pruned_loss=0.1242, over 1618615.54 frames. ], batch size: 19, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:50,175 INFO [train.py:926] (2/4) Computing validation loss +2023-02-05 23:48:58,490 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4411, 1.7435, 2.7208, 1.1491, 2.0493, 1.7198, 1.5251, 1.8164], + device='cuda:2'), covar=tensor([0.1295, 0.1718, 0.0507, 0.2844, 0.1164, 0.2027, 0.1375, 0.1797], + device='cuda:2'), in_proj_covar=tensor([0.0460, 0.0435, 0.0512, 0.0523, 0.0563, 0.0499, 0.0447, 0.0574], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-05 23:49:02,857 INFO [train.py:935] (2/4) Epoch 4, validation: loss=0.2338, simple_loss=0.3275, pruned_loss=0.07005, over 944034.00 frames. +2023-02-05 23:49:02,858 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6599MB +2023-02-05 23:49:10,944 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.12 vs. limit=5.0 +2023-02-05 23:49:22,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:25,897 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:27,951 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6633, 2.0908, 1.6715, 2.1895, 1.4802, 1.4488, 1.7094, 2.0378], + device='cuda:2'), covar=tensor([0.0962, 0.0806, 0.1289, 0.0548, 0.1330, 0.1580, 0.1044, 0.0787], + device='cuda:2'), in_proj_covar=tensor([0.0274, 0.0280, 0.0295, 0.0223, 0.0262, 0.0288, 0.0299, 0.0274], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:49:37,708 INFO [train.py:901] (2/4) Epoch 4, batch 6050, loss[loss=0.274, simple_loss=0.3314, pruned_loss=0.1083, over 7553.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3622, pruned_loss=0.1252, over 1616590.61 frames. ], batch size: 18, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:49:44,056 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30309.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:53,935 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 3.338e+02 3.992e+02 4.649e+02 1.183e+03, threshold=7.984e+02, percent-clipped=3.0 +2023-02-05 23:50:12,471 INFO [train.py:901] (2/4) Epoch 4, batch 6100, loss[loss=0.311, simple_loss=0.3744, pruned_loss=0.1239, over 8140.00 frames. ], tot_loss[loss=0.3055, simple_loss=0.362, pruned_loss=0.1245, over 1616361.33 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:14,756 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4075, 2.7541, 1.6351, 1.9166, 2.0006, 1.4289, 1.6751, 1.9637], + device='cuda:2'), covar=tensor([0.1319, 0.0302, 0.0873, 0.0628, 0.0656, 0.1172, 0.1052, 0.0738], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0234, 0.0306, 0.0302, 0.0330, 0.0311, 0.0334, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 23:50:24,482 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4930, 2.7253, 1.5717, 2.0689, 2.0148, 1.2929, 1.8000, 2.0560], + device='cuda:2'), covar=tensor([0.1301, 0.0408, 0.0965, 0.0575, 0.0639, 0.1269, 0.0995, 0.0849], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0235, 0.0307, 0.0304, 0.0331, 0.0313, 0.0336, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 23:50:32,436 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30378.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:39,277 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 23:50:44,131 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30395.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:47,296 INFO [train.py:901] (2/4) Epoch 4, batch 6150, loss[loss=0.3077, simple_loss=0.3571, pruned_loss=0.1292, over 7805.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3619, pruned_loss=0.1244, over 1615562.29 frames. ], batch size: 19, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:51:02,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30420.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:05,068 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.511e+02 4.267e+02 5.249e+02 1.089e+03, threshold=8.535e+02, percent-clipped=6.0 +2023-02-05 23:51:23,134 INFO [train.py:901] (2/4) Epoch 4, batch 6200, loss[loss=0.3335, simple_loss=0.3859, pruned_loss=0.1406, over 8247.00 frames. ], tot_loss[loss=0.3066, simple_loss=0.363, pruned_loss=0.1251, over 1615184.87 frames. ], batch size: 22, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:51:47,746 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7484, 2.4657, 3.1109, 1.0401, 3.2020, 1.8928, 1.2973, 1.6534], + device='cuda:2'), covar=tensor([0.0312, 0.0093, 0.0085, 0.0268, 0.0112, 0.0284, 0.0360, 0.0179], + device='cuda:2'), in_proj_covar=tensor([0.0285, 0.0201, 0.0166, 0.0248, 0.0198, 0.0332, 0.0266, 0.0230], + device='cuda:2'), out_proj_covar=tensor([1.1253e-04, 7.8713e-05, 6.2006e-05, 9.4146e-05, 7.8766e-05, 1.3988e-04, + 1.0582e-04, 8.8970e-05], device='cuda:2') +2023-02-05 23:51:48,270 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30487.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:57,460 INFO [train.py:901] (2/4) Epoch 4, batch 6250, loss[loss=0.2824, simple_loss=0.3491, pruned_loss=0.1078, over 8495.00 frames. ], tot_loss[loss=0.3063, simple_loss=0.3626, pruned_loss=0.125, over 1612983.33 frames. ], batch size: 39, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:14,459 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 3.291e+02 3.933e+02 5.014e+02 1.132e+03, threshold=7.866e+02, percent-clipped=4.0 +2023-02-05 23:52:17,389 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2500, 2.6540, 1.5234, 1.8995, 1.7426, 1.1709, 1.6273, 1.7406], + device='cuda:2'), covar=tensor([0.1550, 0.0359, 0.1103, 0.0736, 0.0914, 0.1573, 0.1277, 0.0966], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0242, 0.0311, 0.0311, 0.0335, 0.0317, 0.0340, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-05 23:52:22,911 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:32,703 INFO [train.py:901] (2/4) Epoch 4, batch 6300, loss[loss=0.281, simple_loss=0.333, pruned_loss=0.1145, over 7437.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3615, pruned_loss=0.1245, over 1611772.09 frames. ], batch size: 17, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:39,499 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:47,315 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30571.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:06,768 INFO [train.py:901] (2/4) Epoch 4, batch 6350, loss[loss=0.2664, simple_loss=0.3423, pruned_loss=0.09524, over 8470.00 frames. ], tot_loss[loss=0.3046, simple_loss=0.3614, pruned_loss=0.1239, over 1610274.00 frames. ], batch size: 31, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:08,354 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:19,901 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8960, 2.0806, 1.6739, 2.6068, 1.2227, 1.3652, 1.7005, 2.2164], + device='cuda:2'), covar=tensor([0.0961, 0.1243, 0.1587, 0.0565, 0.1706, 0.2142, 0.1630, 0.0903], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0280, 0.0289, 0.0221, 0.0260, 0.0285, 0.0298, 0.0269], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:53:23,786 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.165e+02 3.849e+02 5.077e+02 1.430e+03, threshold=7.697e+02, percent-clipped=4.0 +2023-02-05 23:53:42,465 INFO [train.py:901] (2/4) Epoch 4, batch 6400, loss[loss=0.2488, simple_loss=0.3234, pruned_loss=0.08714, over 8027.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3615, pruned_loss=0.124, over 1614310.37 frames. ], batch size: 22, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:58,098 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30673.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:07,693 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30686.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:16,879 INFO [train.py:901] (2/4) Epoch 4, batch 6450, loss[loss=0.2828, simple_loss=0.3604, pruned_loss=0.1026, over 8362.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3609, pruned_loss=0.1231, over 1615365.57 frames. ], batch size: 24, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:54:31,540 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30722.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:32,830 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.022e+02 3.987e+02 5.645e+02 1.412e+03, threshold=7.975e+02, percent-clipped=10.0 +2023-02-05 23:54:50,958 INFO [train.py:901] (2/4) Epoch 4, batch 6500, loss[loss=0.2831, simple_loss=0.3649, pruned_loss=0.1007, over 8245.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3605, pruned_loss=0.1226, over 1616285.54 frames. ], batch size: 24, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:55:01,899 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8993, 2.2142, 3.0702, 1.2162, 3.0201, 1.9484, 1.4081, 1.9042], + device='cuda:2'), covar=tensor([0.0262, 0.0111, 0.0082, 0.0206, 0.0123, 0.0277, 0.0326, 0.0133], + device='cuda:2'), in_proj_covar=tensor([0.0281, 0.0200, 0.0162, 0.0239, 0.0195, 0.0332, 0.0261, 0.0224], + device='cuda:2'), out_proj_covar=tensor([1.1033e-04, 7.7747e-05, 6.0131e-05, 9.0085e-05, 7.7270e-05, 1.3957e-04, + 1.0364e-04, 8.6134e-05], device='cuda:2') +2023-02-05 23:55:26,186 INFO [train.py:901] (2/4) Epoch 4, batch 6550, loss[loss=0.278, simple_loss=0.3504, pruned_loss=0.1028, over 8470.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.3584, pruned_loss=0.1214, over 1614947.30 frames. ], batch size: 25, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:55:42,638 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.539e+02 4.251e+02 5.114e+02 1.135e+03, threshold=8.501e+02, percent-clipped=1.0 +2023-02-05 23:55:50,034 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 23:55:51,484 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30837.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:55:54,909 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.82 vs. limit=2.0 +2023-02-05 23:56:00,637 INFO [train.py:901] (2/4) Epoch 4, batch 6600, loss[loss=0.3099, simple_loss=0.3759, pruned_loss=0.122, over 8494.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3597, pruned_loss=0.1224, over 1616200.90 frames. ], batch size: 28, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:06,245 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30858.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:08,701 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:56:24,233 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30883.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:35,345 INFO [train.py:901] (2/4) Epoch 4, batch 6650, loss[loss=0.2984, simple_loss=0.3463, pruned_loss=0.1253, over 7784.00 frames. ], tot_loss[loss=0.3003, simple_loss=0.3579, pruned_loss=0.1214, over 1613930.04 frames. ], batch size: 19, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:50,088 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:51,871 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 3.362e+02 4.352e+02 5.461e+02 1.446e+03, threshold=8.703e+02, percent-clipped=3.0 +2023-02-05 23:57:04,064 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30942.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:04,651 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30943.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:09,163 INFO [train.py:901] (2/4) Epoch 4, batch 6700, loss[loss=0.2856, simple_loss=0.3476, pruned_loss=0.1118, over 8477.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3584, pruned_loss=0.1222, over 1614571.80 frames. ], batch size: 25, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:10,676 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30952.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:13,688 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 23:57:21,644 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:45,059 INFO [train.py:901] (2/4) Epoch 4, batch 6750, loss[loss=0.3135, simple_loss=0.3695, pruned_loss=0.1288, over 8329.00 frames. ], tot_loss[loss=0.303, simple_loss=0.3595, pruned_loss=0.1232, over 1614934.39 frames. ], batch size: 25, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:56,392 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31017.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:00,825 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.225e+02 3.317e+02 4.136e+02 5.252e+02 1.678e+03, threshold=8.272e+02, percent-clipped=4.0 +2023-02-05 23:58:04,873 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31029.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:58:18,933 INFO [train.py:901] (2/4) Epoch 4, batch 6800, loss[loss=0.2559, simple_loss=0.3384, pruned_loss=0.08664, over 8247.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.3598, pruned_loss=0.1243, over 1613581.81 frames. ], batch size: 24, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:58:19,597 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 23:58:19,773 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1553, 1.6840, 1.4609, 1.1262, 1.3996, 1.4242, 1.5187, 1.7485], + device='cuda:2'), covar=tensor([0.0616, 0.1219, 0.1944, 0.1615, 0.0652, 0.1627, 0.0849, 0.0562], + device='cuda:2'), in_proj_covar=tensor([0.0151, 0.0193, 0.0232, 0.0195, 0.0148, 0.0198, 0.0160, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:58:48,706 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31093.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:53,201 INFO [train.py:901] (2/4) Epoch 4, batch 6850, loss[loss=0.2514, simple_loss=0.312, pruned_loss=0.09539, over 7811.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3593, pruned_loss=0.123, over 1614993.38 frames. ], batch size: 19, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:59:06,897 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:10,006 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 23:59:10,567 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.178e+02 3.797e+02 5.313e+02 1.260e+03, threshold=7.594e+02, percent-clipped=4.0 +2023-02-05 23:59:13,446 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6946, 3.0508, 2.4501, 4.0332, 1.7540, 1.7601, 2.1858, 3.2101], + device='cuda:2'), covar=tensor([0.0945, 0.1397, 0.1377, 0.0311, 0.1945, 0.2367, 0.2063, 0.1294], + device='cuda:2'), in_proj_covar=tensor([0.0275, 0.0284, 0.0299, 0.0223, 0.0272, 0.0295, 0.0308, 0.0276], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-05 23:59:16,201 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31132.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:28,756 INFO [train.py:901] (2/4) Epoch 4, batch 6900, loss[loss=0.2991, simple_loss=0.3675, pruned_loss=0.1153, over 8625.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.3601, pruned_loss=0.1237, over 1610824.46 frames. ], batch size: 34, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:03,327 INFO [train.py:901] (2/4) Epoch 4, batch 6950, loss[loss=0.2515, simple_loss=0.3111, pruned_loss=0.09592, over 7801.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3606, pruned_loss=0.1235, over 1609915.57 frames. ], batch size: 19, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:18,141 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 00:00:20,075 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 3.425e+02 4.122e+02 5.302e+02 9.579e+02, threshold=8.244e+02, percent-clipped=6.0 +2023-02-06 00:00:23,107 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-06 00:00:38,134 INFO [train.py:901] (2/4) Epoch 4, batch 7000, loss[loss=0.2872, simple_loss=0.359, pruned_loss=0.1077, over 8292.00 frames. ], tot_loss[loss=0.3049, simple_loss=0.3613, pruned_loss=0.1242, over 1613335.07 frames. ], batch size: 23, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:48,782 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:00:53,584 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0394, 2.2856, 3.8466, 3.0713, 3.1938, 2.2299, 1.6267, 1.5466], + device='cuda:2'), covar=tensor([0.1384, 0.1987, 0.0394, 0.0906, 0.0891, 0.0816, 0.0953, 0.2035], + device='cuda:2'), in_proj_covar=tensor([0.0709, 0.0625, 0.0529, 0.0609, 0.0727, 0.0595, 0.0574, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:00:54,246 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2265, 1.7285, 1.7168, 0.5871, 1.7479, 1.1930, 0.2781, 1.5480], + device='cuda:2'), covar=tensor([0.0186, 0.0076, 0.0080, 0.0162, 0.0104, 0.0320, 0.0276, 0.0080], + device='cuda:2'), in_proj_covar=tensor([0.0280, 0.0200, 0.0165, 0.0244, 0.0194, 0.0326, 0.0267, 0.0231], + device='cuda:2'), out_proj_covar=tensor([1.0915e-04, 7.6842e-05, 6.1419e-05, 9.1168e-05, 7.5797e-05, 1.3587e-04, + 1.0511e-04, 8.8277e-05], device='cuda:2') +2023-02-06 00:00:56,885 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2296, 2.2509, 1.6349, 1.9591, 1.8569, 1.3120, 1.6796, 1.9855], + device='cuda:2'), covar=tensor([0.1040, 0.0323, 0.0817, 0.0408, 0.0463, 0.1037, 0.0708, 0.0536], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0241, 0.0309, 0.0305, 0.0323, 0.0317, 0.0338, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:01:03,281 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:09,175 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:11,662 INFO [train.py:901] (2/4) Epoch 4, batch 7050, loss[loss=0.3136, simple_loss=0.379, pruned_loss=0.1241, over 8334.00 frames. ], tot_loss[loss=0.3061, simple_loss=0.3627, pruned_loss=0.1247, over 1615898.82 frames. ], batch size: 25, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:17,359 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7458, 2.2960, 1.8217, 2.9638, 1.2075, 1.4131, 1.8196, 2.3669], + device='cuda:2'), covar=tensor([0.1285, 0.1211, 0.1619, 0.0522, 0.1987, 0.2347, 0.1714, 0.1200], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0280, 0.0294, 0.0217, 0.0264, 0.0294, 0.0296, 0.0272], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:01:28,375 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 3.221e+02 3.873e+02 5.326e+02 1.178e+03, threshold=7.746e+02, percent-clipped=8.0 +2023-02-06 00:01:47,447 INFO [train.py:901] (2/4) Epoch 4, batch 7100, loss[loss=0.3715, simple_loss=0.4045, pruned_loss=0.1693, over 8504.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3632, pruned_loss=0.1252, over 1615592.65 frames. ], batch size: 26, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:57,592 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4522, 1.9826, 3.1433, 2.5376, 2.6549, 2.1344, 1.4671, 1.1232], + device='cuda:2'), covar=tensor([0.1314, 0.1571, 0.0323, 0.0786, 0.0724, 0.0809, 0.0949, 0.1660], + device='cuda:2'), in_proj_covar=tensor([0.0704, 0.0629, 0.0528, 0.0606, 0.0718, 0.0593, 0.0574, 0.0591], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:02:02,610 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31373.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:02:08,043 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:13,373 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:21,059 INFO [train.py:901] (2/4) Epoch 4, batch 7150, loss[loss=0.3061, simple_loss=0.3524, pruned_loss=0.1299, over 7232.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3612, pruned_loss=0.1233, over 1616077.70 frames. ], batch size: 16, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:02:22,722 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:28,579 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:28,656 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7612, 1.4935, 2.2813, 1.8460, 2.0260, 1.6269, 1.2380, 0.7068], + device='cuda:2'), covar=tensor([0.1625, 0.1561, 0.0419, 0.0817, 0.0659, 0.0892, 0.0951, 0.1511], + device='cuda:2'), in_proj_covar=tensor([0.0706, 0.0630, 0.0533, 0.0606, 0.0719, 0.0595, 0.0574, 0.0590], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:02:29,932 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:37,019 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.187e+02 3.955e+02 5.000e+02 8.847e+02, threshold=7.910e+02, percent-clipped=2.0 +2023-02-06 00:02:39,207 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:56,011 INFO [train.py:901] (2/4) Epoch 4, batch 7200, loss[loss=0.3491, simple_loss=0.3907, pruned_loss=0.1538, over 6740.00 frames. ], tot_loss[loss=0.3034, simple_loss=0.3608, pruned_loss=0.123, over 1614624.57 frames. ], batch size: 71, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:03:22,242 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31488.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:03:28,007 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 00:03:30,966 INFO [train.py:901] (2/4) Epoch 4, batch 7250, loss[loss=0.2539, simple_loss=0.3095, pruned_loss=0.09909, over 7695.00 frames. ], tot_loss[loss=0.3022, simple_loss=0.3595, pruned_loss=0.1225, over 1613500.94 frames. ], batch size: 18, lr: 1.73e-02, grad_scale: 16.0 +2023-02-06 00:03:32,538 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1496, 2.0937, 3.4400, 2.1969, 2.8058, 3.7921, 3.6122, 3.5810], + device='cuda:2'), covar=tensor([0.0956, 0.1089, 0.0667, 0.1418, 0.0820, 0.0241, 0.0331, 0.0402], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0263, 0.0216, 0.0263, 0.0220, 0.0197, 0.0208, 0.0268], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:03:35,400 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5840, 1.9679, 3.3556, 1.1012, 2.4454, 1.8278, 1.5759, 2.0138], + device='cuda:2'), covar=tensor([0.1429, 0.1774, 0.0636, 0.3015, 0.1223, 0.2284, 0.1308, 0.2063], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0429, 0.0506, 0.0510, 0.0553, 0.0503, 0.0436, 0.0571], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:03:37,360 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31509.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:03:47,334 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.150e+02 3.858e+02 4.938e+02 9.845e+02, threshold=7.715e+02, percent-clipped=4.0 +2023-02-06 00:03:57,286 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.73 vs. limit=5.0 +2023-02-06 00:04:05,046 INFO [train.py:901] (2/4) Epoch 4, batch 7300, loss[loss=0.3061, simple_loss=0.3668, pruned_loss=0.1227, over 8620.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3583, pruned_loss=0.1218, over 1615511.84 frames. ], batch size: 34, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:40,415 INFO [train.py:901] (2/4) Epoch 4, batch 7350, loss[loss=0.3139, simple_loss=0.373, pruned_loss=0.1274, over 8451.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3585, pruned_loss=0.1221, over 1614310.51 frames. ], batch size: 27, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:57,282 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.182e+02 2.774e+02 3.613e+02 4.483e+02 1.102e+03, threshold=7.227e+02, percent-clipped=2.0 +2023-02-06 00:04:59,997 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 00:05:05,415 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:14,568 INFO [train.py:901] (2/4) Epoch 4, batch 7400, loss[loss=0.2738, simple_loss=0.3339, pruned_loss=0.1068, over 8205.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3592, pruned_loss=0.1229, over 1611822.19 frames. ], batch size: 23, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:19,259 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 00:05:20,117 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:21,987 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:26,686 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:37,008 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:43,497 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:48,578 INFO [train.py:901] (2/4) Epoch 4, batch 7450, loss[loss=0.2738, simple_loss=0.345, pruned_loss=0.1013, over 8297.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3607, pruned_loss=0.1235, over 1615009.00 frames. ], batch size: 23, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:58,000 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 00:06:04,224 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:05,435 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.933e+02 3.216e+02 3.933e+02 5.503e+02 1.387e+03, threshold=7.866e+02, percent-clipped=9.0 +2023-02-06 00:06:19,851 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31744.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:06:23,709 INFO [train.py:901] (2/4) Epoch 4, batch 7500, loss[loss=0.335, simple_loss=0.3847, pruned_loss=0.1427, over 8463.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.361, pruned_loss=0.123, over 1620453.63 frames. ], batch size: 25, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:06:36,764 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31769.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:06:37,959 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:40,259 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 00:06:57,583 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1761, 1.2116, 3.2005, 0.9674, 2.7357, 2.6696, 2.9595, 2.8627], + device='cuda:2'), covar=tensor([0.0510, 0.3276, 0.0546, 0.2496, 0.1321, 0.0897, 0.0586, 0.0698], + device='cuda:2'), in_proj_covar=tensor([0.0304, 0.0467, 0.0378, 0.0390, 0.0450, 0.0387, 0.0374, 0.0417], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:06:58,136 INFO [train.py:901] (2/4) Epoch 4, batch 7550, loss[loss=0.3784, simple_loss=0.4139, pruned_loss=0.1714, over 8502.00 frames. ], tot_loss[loss=0.3044, simple_loss=0.3611, pruned_loss=0.1239, over 1620856.69 frames. ], batch size: 26, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:02,985 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:16,184 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.990e+02 2.842e+02 3.963e+02 5.244e+02 1.193e+03, threshold=7.926e+02, percent-clipped=8.0 +2023-02-06 00:07:27,220 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:34,061 INFO [train.py:901] (2/4) Epoch 4, batch 7600, loss[loss=0.379, simple_loss=0.4118, pruned_loss=0.1731, over 8577.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.361, pruned_loss=0.1237, over 1622129.15 frames. ], batch size: 49, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:36,151 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31853.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:07:58,613 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:05,425 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:07,983 INFO [train.py:901] (2/4) Epoch 4, batch 7650, loss[loss=0.2802, simple_loss=0.3543, pruned_loss=0.1031, over 8334.00 frames. ], tot_loss[loss=0.3029, simple_loss=0.3601, pruned_loss=0.1228, over 1622875.85 frames. ], batch size: 26, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:26,156 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.122e+02 3.181e+02 3.860e+02 4.828e+02 9.649e+02, threshold=7.720e+02, percent-clipped=2.0 +2023-02-06 00:08:31,516 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:43,637 INFO [train.py:901] (2/4) Epoch 4, batch 7700, loss[loss=0.2669, simple_loss=0.3154, pruned_loss=0.1092, over 7513.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.3589, pruned_loss=0.1216, over 1622949.69 frames. ], batch size: 18, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:55,986 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:56,022 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31968.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:09:06,798 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 00:09:17,470 INFO [train.py:901] (2/4) Epoch 4, batch 7750, loss[loss=0.3055, simple_loss=0.3723, pruned_loss=0.1194, over 8196.00 frames. ], tot_loss[loss=0.302, simple_loss=0.3598, pruned_loss=0.1221, over 1623633.47 frames. ], batch size: 23, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:09:35,964 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.165e+02 3.163e+02 3.927e+02 5.355e+02 1.239e+03, threshold=7.853e+02, percent-clipped=4.0 +2023-02-06 00:09:53,603 INFO [train.py:901] (2/4) Epoch 4, batch 7800, loss[loss=0.3393, simple_loss=0.389, pruned_loss=0.1448, over 8474.00 frames. ], tot_loss[loss=0.3025, simple_loss=0.3604, pruned_loss=0.1223, over 1623642.65 frames. ], batch size: 25, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:05,168 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:10:27,395 INFO [train.py:901] (2/4) Epoch 4, batch 7850, loss[loss=0.3969, simple_loss=0.4365, pruned_loss=0.1787, over 8625.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.3595, pruned_loss=0.1215, over 1622289.28 frames. ], batch size: 34, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:43,942 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.460e+02 3.521e+02 4.480e+02 6.179e+02 1.308e+03, threshold=8.960e+02, percent-clipped=13.0 +2023-02-06 00:10:55,615 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:00,787 INFO [train.py:901] (2/4) Epoch 4, batch 7900, loss[loss=0.3347, simple_loss=0.3877, pruned_loss=0.1409, over 8622.00 frames. ], tot_loss[loss=0.3012, simple_loss=0.3594, pruned_loss=0.1215, over 1623963.72 frames. ], batch size: 50, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:00,854 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:12,544 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:21,562 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:24,163 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:34,316 INFO [train.py:901] (2/4) Epoch 4, batch 7950, loss[loss=0.3433, simple_loss=0.3957, pruned_loss=0.1454, over 8331.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3607, pruned_loss=0.1224, over 1624542.83 frames. ], batch size: 26, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:42,539 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 00:11:51,236 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32224.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:11:51,629 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.248e+02 3.536e+02 4.226e+02 5.315e+02 1.259e+03, threshold=8.452e+02, percent-clipped=4.0 +2023-02-06 00:12:01,777 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:08,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32249.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:12:08,749 INFO [train.py:901] (2/4) Epoch 4, batch 8000, loss[loss=0.257, simple_loss=0.3179, pruned_loss=0.09802, over 7436.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.359, pruned_loss=0.1212, over 1622283.73 frames. ], batch size: 17, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:12:13,076 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 00:12:19,085 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:22,646 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 00:12:26,800 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:37,101 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3975, 2.0740, 1.4158, 1.7757, 1.6702, 1.2352, 1.4721, 1.8076], + device='cuda:2'), covar=tensor([0.0936, 0.0310, 0.0842, 0.0480, 0.0629, 0.1139, 0.0816, 0.0665], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0241, 0.0316, 0.0309, 0.0324, 0.0316, 0.0341, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:12:42,459 INFO [train.py:901] (2/4) Epoch 4, batch 8050, loss[loss=0.2434, simple_loss=0.314, pruned_loss=0.08645, over 7227.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3575, pruned_loss=0.1222, over 1599171.16 frames. ], batch size: 16, lr: 1.70e-02, grad_scale: 8.0 +2023-02-06 00:12:42,657 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:50,705 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:58,916 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.697e+02 3.496e+02 4.220e+02 5.135e+02 1.064e+03, threshold=8.441e+02, percent-clipped=2.0 +2023-02-06 00:13:15,884 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 00:13:19,660 INFO [train.py:901] (2/4) Epoch 5, batch 0, loss[loss=0.3705, simple_loss=0.4112, pruned_loss=0.1649, over 8715.00 frames. ], tot_loss[loss=0.3705, simple_loss=0.4112, pruned_loss=0.1649, over 8715.00 frames. ], batch size: 34, lr: 1.59e-02, grad_scale: 8.0 +2023-02-06 00:13:19,660 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 00:13:31,617 INFO [train.py:935] (2/4) Epoch 5, validation: loss=0.2309, simple_loss=0.3254, pruned_loss=0.06822, over 944034.00 frames. +2023-02-06 00:13:31,618 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6599MB +2023-02-06 00:13:46,441 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 00:13:46,611 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:06,998 INFO [train.py:901] (2/4) Epoch 5, batch 50, loss[loss=0.3227, simple_loss=0.3775, pruned_loss=0.1339, over 8564.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3653, pruned_loss=0.1242, over 369329.72 frames. ], batch size: 31, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:14,083 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:22,921 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 00:14:36,523 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.148e+02 3.721e+02 4.839e+02 1.477e+03, threshold=7.442e+02, percent-clipped=1.0 +2023-02-06 00:14:38,052 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:41,775 INFO [train.py:901] (2/4) Epoch 5, batch 100, loss[loss=0.3893, simple_loss=0.4212, pruned_loss=0.1787, over 8715.00 frames. ], tot_loss[loss=0.3012, simple_loss=0.3595, pruned_loss=0.1215, over 648626.42 frames. ], batch size: 34, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:44,566 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:45,035 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 00:14:50,603 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9268, 3.8867, 3.5286, 1.7914, 3.4906, 3.6469, 3.6970, 3.2017], + device='cuda:2'), covar=tensor([0.1310, 0.0836, 0.1084, 0.5199, 0.0880, 0.0906, 0.1454, 0.0875], + device='cuda:2'), in_proj_covar=tensor([0.0380, 0.0282, 0.0301, 0.0402, 0.0306, 0.0263, 0.0294, 0.0233], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 00:15:02,119 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:15,812 INFO [train.py:901] (2/4) Epoch 5, batch 150, loss[loss=0.2457, simple_loss=0.3277, pruned_loss=0.08187, over 8111.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3577, pruned_loss=0.1198, over 862437.31 frames. ], batch size: 23, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:43,038 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:45,458 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.082e+02 3.007e+02 3.818e+02 4.644e+02 8.323e+02, threshold=7.636e+02, percent-clipped=1.0 +2023-02-06 00:15:50,791 INFO [train.py:901] (2/4) Epoch 5, batch 200, loss[loss=0.3033, simple_loss=0.3582, pruned_loss=0.1242, over 8254.00 frames. ], tot_loss[loss=0.2979, simple_loss=0.3558, pruned_loss=0.12, over 1029899.50 frames. ], batch size: 22, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:59,818 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:06,415 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:09,713 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0536, 1.1600, 1.0799, 0.2270, 1.1398, 0.9115, 0.1400, 1.0812], + device='cuda:2'), covar=tensor([0.0138, 0.0100, 0.0079, 0.0177, 0.0097, 0.0295, 0.0232, 0.0094], + device='cuda:2'), in_proj_covar=tensor([0.0291, 0.0204, 0.0164, 0.0251, 0.0200, 0.0328, 0.0268, 0.0232], + device='cuda:2'), out_proj_covar=tensor([1.1034e-04, 7.7110e-05, 6.0176e-05, 9.2228e-05, 7.6379e-05, 1.3345e-04, + 1.0301e-04, 8.6821e-05], device='cuda:2') +2023-02-06 00:16:23,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:24,858 INFO [train.py:901] (2/4) Epoch 5, batch 250, loss[loss=0.2894, simple_loss=0.3498, pruned_loss=0.1145, over 8356.00 frames. ], tot_loss[loss=0.2982, simple_loss=0.3556, pruned_loss=0.1204, over 1156375.59 frames. ], batch size: 24, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:16:36,163 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 00:16:45,160 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:46,291 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 00:16:54,483 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 3.241e+02 4.131e+02 4.869e+02 1.219e+03, threshold=8.263e+02, percent-clipped=9.0 +2023-02-06 00:17:00,673 INFO [train.py:901] (2/4) Epoch 5, batch 300, loss[loss=0.3234, simple_loss=0.381, pruned_loss=0.133, over 8376.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.3578, pruned_loss=0.1219, over 1257844.03 frames. ], batch size: 49, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:03,006 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:10,885 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:24,181 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7561, 2.3459, 4.5918, 1.1956, 2.9962, 2.4300, 1.7410, 2.4904], + device='cuda:2'), covar=tensor([0.1442, 0.1802, 0.0609, 0.3273, 0.1227, 0.2060, 0.1399, 0.2286], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0435, 0.0517, 0.0525, 0.0565, 0.0497, 0.0445, 0.0577], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:17:27,573 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:34,177 INFO [train.py:901] (2/4) Epoch 5, batch 350, loss[loss=0.2364, simple_loss=0.3028, pruned_loss=0.085, over 7800.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3583, pruned_loss=0.1218, over 1342656.99 frames. ], batch size: 19, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:34,412 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:41,060 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 00:17:51,720 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:18:04,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.189e+02 4.031e+02 4.810e+02 8.158e+02, threshold=8.062e+02, percent-clipped=0.0 +2023-02-06 00:18:09,327 INFO [train.py:901] (2/4) Epoch 5, batch 400, loss[loss=0.3371, simple_loss=0.3878, pruned_loss=0.1432, over 8688.00 frames. ], tot_loss[loss=0.3, simple_loss=0.3584, pruned_loss=0.1208, over 1404086.24 frames. ], batch size: 34, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:18:30,397 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6502, 2.7547, 1.8207, 2.2364, 2.3004, 1.5161, 2.1210, 2.1765], + device='cuda:2'), covar=tensor([0.1105, 0.0432, 0.0824, 0.0510, 0.0541, 0.1040, 0.0739, 0.0790], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0240, 0.0308, 0.0304, 0.0316, 0.0312, 0.0336, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:18:43,757 INFO [train.py:901] (2/4) Epoch 5, batch 450, loss[loss=0.2754, simple_loss=0.349, pruned_loss=0.1008, over 8459.00 frames. ], tot_loss[loss=0.298, simple_loss=0.3573, pruned_loss=0.1194, over 1452948.47 frames. ], batch size: 29, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:19:05,621 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.12 vs. limit=5.0 +2023-02-06 00:19:12,442 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.263e+02 3.122e+02 4.068e+02 4.898e+02 9.897e+02, threshold=8.137e+02, percent-clipped=5.0 +2023-02-06 00:19:17,686 INFO [train.py:901] (2/4) Epoch 5, batch 500, loss[loss=0.3144, simple_loss=0.3636, pruned_loss=0.1326, over 7978.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3561, pruned_loss=0.1187, over 1489196.20 frames. ], batch size: 21, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:38,471 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3592, 2.7049, 1.7494, 1.9907, 2.1399, 1.2862, 1.8817, 2.2776], + device='cuda:2'), covar=tensor([0.1504, 0.0332, 0.0926, 0.0685, 0.0677, 0.1226, 0.1008, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0239, 0.0310, 0.0303, 0.0320, 0.0315, 0.0337, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:19:52,905 INFO [train.py:901] (2/4) Epoch 5, batch 550, loss[loss=0.2999, simple_loss=0.3628, pruned_loss=0.1185, over 8312.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.3556, pruned_loss=0.1188, over 1516623.21 frames. ], batch size: 23, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:21,235 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.184e+02 3.133e+02 3.697e+02 5.126e+02 1.321e+03, threshold=7.393e+02, percent-clipped=4.0 +2023-02-06 00:20:21,442 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9634, 2.2049, 1.7163, 2.8238, 1.3089, 1.2475, 1.6535, 2.2997], + device='cuda:2'), covar=tensor([0.1004, 0.1280, 0.1728, 0.0465, 0.1888, 0.2717, 0.2046, 0.1359], + device='cuda:2'), in_proj_covar=tensor([0.0274, 0.0272, 0.0294, 0.0218, 0.0255, 0.0284, 0.0290, 0.0269], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:20:26,722 INFO [train.py:901] (2/4) Epoch 5, batch 600, loss[loss=0.2401, simple_loss=0.2986, pruned_loss=0.09077, over 7431.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3534, pruned_loss=0.1175, over 1537015.84 frames. ], batch size: 17, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:36,137 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6645, 1.2546, 3.7852, 1.3437, 3.2555, 3.1395, 3.3761, 3.3434], + device='cuda:2'), covar=tensor([0.0418, 0.3507, 0.0469, 0.2528, 0.1159, 0.0758, 0.0503, 0.0556], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0478, 0.0381, 0.0393, 0.0460, 0.0381, 0.0382, 0.0421], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:20:50,124 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 00:21:02,154 INFO [train.py:901] (2/4) Epoch 5, batch 650, loss[loss=0.2854, simple_loss=0.3323, pruned_loss=0.1193, over 7544.00 frames. ], tot_loss[loss=0.2965, simple_loss=0.3549, pruned_loss=0.119, over 1551534.56 frames. ], batch size: 18, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:21:02,983 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:21:21,553 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1643, 3.1060, 2.8528, 1.4263, 2.8250, 2.8422, 2.9051, 2.6086], + device='cuda:2'), covar=tensor([0.1265, 0.0883, 0.1278, 0.4944, 0.0972, 0.0974, 0.1542, 0.0925], + device='cuda:2'), in_proj_covar=tensor([0.0381, 0.0280, 0.0315, 0.0409, 0.0303, 0.0266, 0.0296, 0.0238], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 00:21:30,782 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 3.090e+02 3.854e+02 5.024e+02 8.355e+02, threshold=7.708e+02, percent-clipped=4.0 +2023-02-06 00:21:36,138 INFO [train.py:901] (2/4) Epoch 5, batch 700, loss[loss=0.3237, simple_loss=0.3623, pruned_loss=0.1426, over 7926.00 frames. ], tot_loss[loss=0.2966, simple_loss=0.355, pruned_loss=0.1191, over 1564778.23 frames. ], batch size: 20, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:21:44,894 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4213, 1.6393, 1.4643, 1.3272, 1.5274, 1.3358, 1.8160, 1.5699], + device='cuda:2'), covar=tensor([0.0577, 0.1244, 0.1889, 0.1444, 0.0624, 0.1584, 0.0744, 0.0590], + device='cuda:2'), in_proj_covar=tensor([0.0143, 0.0187, 0.0228, 0.0187, 0.0142, 0.0196, 0.0152, 0.0157], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:22:11,096 INFO [train.py:901] (2/4) Epoch 5, batch 750, loss[loss=0.3219, simple_loss=0.3802, pruned_loss=0.1319, over 8587.00 frames. ], tot_loss[loss=0.2973, simple_loss=0.3553, pruned_loss=0.1196, over 1572958.27 frames. ], batch size: 31, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:14,422 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:22:36,868 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 00:22:40,967 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 3.306e+02 4.079e+02 5.042e+02 1.499e+03, threshold=8.159e+02, percent-clipped=7.0 +2023-02-06 00:22:45,500 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 00:22:46,149 INFO [train.py:901] (2/4) Epoch 5, batch 800, loss[loss=0.2903, simple_loss=0.3592, pruned_loss=0.1106, over 8322.00 frames. ], tot_loss[loss=0.2959, simple_loss=0.3542, pruned_loss=0.1188, over 1579681.82 frames. ], batch size: 25, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:13,721 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:23:19,948 INFO [train.py:901] (2/4) Epoch 5, batch 850, loss[loss=0.3127, simple_loss=0.3784, pruned_loss=0.1235, over 8514.00 frames. ], tot_loss[loss=0.2987, simple_loss=0.3568, pruned_loss=0.1203, over 1593164.32 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:49,970 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.888e+02 3.855e+02 5.468e+02 1.103e+03, threshold=7.709e+02, percent-clipped=2.0 +2023-02-06 00:23:56,030 INFO [train.py:901] (2/4) Epoch 5, batch 900, loss[loss=0.2914, simple_loss=0.3429, pruned_loss=0.1199, over 7199.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.3555, pruned_loss=0.1197, over 1593739.72 frames. ], batch size: 16, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:24:06,311 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3180, 2.7814, 1.6508, 1.9617, 2.0366, 1.3141, 1.8485, 2.0099], + device='cuda:2'), covar=tensor([0.1258, 0.0235, 0.0883, 0.0587, 0.0630, 0.1165, 0.0906, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0240, 0.0312, 0.0305, 0.0322, 0.0313, 0.0338, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:24:29,752 INFO [train.py:901] (2/4) Epoch 5, batch 950, loss[loss=0.2873, simple_loss=0.3618, pruned_loss=0.1064, over 8202.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3551, pruned_loss=0.1187, over 1597258.93 frames. ], batch size: 23, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:00,483 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8385, 1.3877, 3.2460, 1.1803, 2.2004, 3.4133, 3.4665, 3.0307], + device='cuda:2'), covar=tensor([0.1016, 0.1501, 0.0351, 0.2065, 0.0749, 0.0287, 0.0336, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0262, 0.0219, 0.0260, 0.0221, 0.0195, 0.0213, 0.0268], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:25:01,024 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.004e+02 3.759e+02 4.642e+02 8.675e+02, threshold=7.519e+02, percent-clipped=2.0 +2023-02-06 00:25:03,068 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:25:05,105 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 00:25:06,446 INFO [train.py:901] (2/4) Epoch 5, batch 1000, loss[loss=0.2946, simple_loss=0.3542, pruned_loss=0.1176, over 8502.00 frames. ], tot_loss[loss=0.296, simple_loss=0.355, pruned_loss=0.1185, over 1602352.53 frames. ], batch size: 26, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,439 INFO [train.py:901] (2/4) Epoch 5, batch 1050, loss[loss=0.2375, simple_loss=0.295, pruned_loss=0.08996, over 7704.00 frames. ], tot_loss[loss=0.2962, simple_loss=0.3548, pruned_loss=0.1188, over 1602712.05 frames. ], batch size: 18, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,443 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 00:25:47,549 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2573, 1.2436, 3.6318, 1.4196, 2.6256, 2.9230, 3.2067, 3.2275], + device='cuda:2'), covar=tensor([0.0994, 0.4872, 0.0981, 0.3253, 0.2579, 0.1425, 0.0950, 0.1133], + device='cuda:2'), in_proj_covar=tensor([0.0314, 0.0476, 0.0383, 0.0392, 0.0466, 0.0383, 0.0380, 0.0423], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:25:52,110 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 00:26:08,784 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.885e+02 3.252e+02 3.786e+02 4.850e+02 9.380e+02, threshold=7.572e+02, percent-clipped=3.0 +2023-02-06 00:26:13,583 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:14,851 INFO [train.py:901] (2/4) Epoch 5, batch 1100, loss[loss=0.2643, simple_loss=0.336, pruned_loss=0.09623, over 7965.00 frames. ], tot_loss[loss=0.2959, simple_loss=0.3544, pruned_loss=0.1187, over 1606192.93 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:23,004 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:50,044 INFO [train.py:901] (2/4) Epoch 5, batch 1150, loss[loss=0.2798, simple_loss=0.3308, pruned_loss=0.1144, over 7975.00 frames. ], tot_loss[loss=0.2952, simple_loss=0.3541, pruned_loss=0.1181, over 1609713.32 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:54,937 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:59,859 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 00:27:02,054 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 00:27:13,081 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:18,265 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 3.101e+02 4.052e+02 5.357e+02 1.331e+03, threshold=8.105e+02, percent-clipped=11.0 +2023-02-06 00:27:23,596 INFO [train.py:901] (2/4) Epoch 5, batch 1200, loss[loss=0.2942, simple_loss=0.3668, pruned_loss=0.1108, over 8243.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3538, pruned_loss=0.117, over 1615373.38 frames. ], batch size: 24, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:27:32,563 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:34,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0960, 1.2695, 1.1956, 0.2991, 1.2620, 0.9907, 0.2180, 1.1756], + device='cuda:2'), covar=tensor([0.0124, 0.0096, 0.0087, 0.0157, 0.0086, 0.0322, 0.0231, 0.0088], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0209, 0.0174, 0.0254, 0.0209, 0.0351, 0.0282, 0.0243], + device='cuda:2'), out_proj_covar=tensor([1.1046e-04, 7.7795e-05, 6.2877e-05, 9.1910e-05, 7.8908e-05, 1.4121e-04, + 1.0697e-04, 8.9627e-05], device='cuda:2') +2023-02-06 00:27:38,026 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5878, 1.9538, 2.3298, 1.0084, 2.4471, 1.5221, 0.8096, 1.8437], + device='cuda:2'), covar=tensor([0.0194, 0.0094, 0.0063, 0.0181, 0.0070, 0.0279, 0.0290, 0.0087], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0209, 0.0174, 0.0254, 0.0208, 0.0349, 0.0281, 0.0243], + device='cuda:2'), out_proj_covar=tensor([1.1037e-04, 7.7643e-05, 6.2731e-05, 9.1967e-05, 7.8688e-05, 1.4054e-04, + 1.0671e-04, 8.9448e-05], device='cuda:2') +2023-02-06 00:28:00,122 INFO [train.py:901] (2/4) Epoch 5, batch 1250, loss[loss=0.3365, simple_loss=0.3961, pruned_loss=0.1384, over 8650.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3534, pruned_loss=0.1177, over 1610638.06 frames. ], batch size: 34, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:09,238 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5226, 1.6439, 1.4975, 1.1980, 1.5047, 1.3952, 1.7372, 1.8544], + device='cuda:2'), covar=tensor([0.0597, 0.1293, 0.1849, 0.1482, 0.0657, 0.1754, 0.0830, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0145, 0.0186, 0.0226, 0.0187, 0.0141, 0.0197, 0.0154, 0.0158], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:28:29,019 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.057e+02 3.737e+02 5.343e+02 1.068e+03, threshold=7.474e+02, percent-clipped=1.0 +2023-02-06 00:28:34,046 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:28:34,551 INFO [train.py:901] (2/4) Epoch 5, batch 1300, loss[loss=0.2793, simple_loss=0.3489, pruned_loss=0.1048, over 8490.00 frames. ], tot_loss[loss=0.2937, simple_loss=0.353, pruned_loss=0.1172, over 1612616.69 frames. ], batch size: 28, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:48,194 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2215, 2.2912, 1.6105, 1.9156, 1.7695, 1.2919, 1.4698, 1.8449], + device='cuda:2'), covar=tensor([0.0971, 0.0330, 0.0880, 0.0460, 0.0521, 0.1102, 0.0863, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0239, 0.0310, 0.0301, 0.0318, 0.0309, 0.0336, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:29:10,153 INFO [train.py:901] (2/4) Epoch 5, batch 1350, loss[loss=0.2626, simple_loss=0.3358, pruned_loss=0.09469, over 8661.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3523, pruned_loss=0.1172, over 1611698.74 frames. ], batch size: 34, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:29:18,826 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:21,575 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:38,304 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:39,349 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 3.141e+02 3.942e+02 4.566e+02 9.800e+02, threshold=7.885e+02, percent-clipped=1.0 +2023-02-06 00:29:44,190 INFO [train.py:901] (2/4) Epoch 5, batch 1400, loss[loss=0.317, simple_loss=0.3733, pruned_loss=0.1304, over 8202.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3534, pruned_loss=0.1175, over 1615035.18 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:02,698 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 00:30:18,171 INFO [train.py:901] (2/4) Epoch 5, batch 1450, loss[loss=0.3202, simple_loss=0.3729, pruned_loss=0.1337, over 8478.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3539, pruned_loss=0.118, over 1615953.03 frames. ], batch size: 25, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:32,272 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 00:30:32,497 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:35,063 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0495, 1.6839, 3.8761, 1.9131, 2.3866, 4.3445, 4.0264, 3.7642], + device='cuda:2'), covar=tensor([0.1115, 0.1507, 0.0433, 0.1678, 0.0983, 0.0215, 0.0430, 0.0527], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0273, 0.0223, 0.0262, 0.0228, 0.0201, 0.0226, 0.0280], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 00:30:49,117 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.073e+02 3.068e+02 3.705e+02 5.190e+02 1.303e+03, threshold=7.410e+02, percent-clipped=4.0 +2023-02-06 00:30:50,032 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:54,029 INFO [train.py:901] (2/4) Epoch 5, batch 1500, loss[loss=0.2586, simple_loss=0.3168, pruned_loss=0.1002, over 7691.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3538, pruned_loss=0.1177, over 1617803.73 frames. ], batch size: 18, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:54,780 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:12,656 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.82 vs. limit=2.0 +2023-02-06 00:31:27,611 INFO [train.py:901] (2/4) Epoch 5, batch 1550, loss[loss=0.2715, simple_loss=0.3201, pruned_loss=0.1114, over 6821.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3527, pruned_loss=0.1174, over 1612219.89 frames. ], batch size: 15, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:31:29,110 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9734, 2.0916, 2.3990, 1.3074, 2.2877, 1.7700, 1.5397, 1.8883], + device='cuda:2'), covar=tensor([0.0211, 0.0099, 0.0062, 0.0180, 0.0096, 0.0199, 0.0248, 0.0106], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0210, 0.0177, 0.0259, 0.0209, 0.0348, 0.0279, 0.0244], + device='cuda:2'), out_proj_covar=tensor([1.1086e-04, 7.7402e-05, 6.3712e-05, 9.3764e-05, 7.8667e-05, 1.3926e-04, + 1.0551e-04, 8.9919e-05], device='cuda:2') +2023-02-06 00:31:31,091 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:31,251 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.83 vs. limit=2.0 +2023-02-06 00:31:48,413 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:58,126 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.067e+02 3.419e+02 4.027e+02 4.998e+02 8.696e+02, threshold=8.054e+02, percent-clipped=2.0 +2023-02-06 00:32:03,146 INFO [train.py:901] (2/4) Epoch 5, batch 1600, loss[loss=0.2739, simple_loss=0.3469, pruned_loss=0.1004, over 8301.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.3522, pruned_loss=0.117, over 1615462.42 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:32:14,803 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:20,124 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:37,058 INFO [train.py:901] (2/4) Epoch 5, batch 1650, loss[loss=0.3019, simple_loss=0.3707, pruned_loss=0.1166, over 8026.00 frames. ], tot_loss[loss=0.2916, simple_loss=0.3517, pruned_loss=0.1158, over 1618884.92 frames. ], batch size: 22, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:00,643 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.84 vs. limit=2.0 +2023-02-06 00:33:07,304 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.926e+02 3.722e+02 4.611e+02 9.053e+02, threshold=7.444e+02, percent-clipped=4.0 +2023-02-06 00:33:11,843 INFO [train.py:901] (2/4) Epoch 5, batch 1700, loss[loss=0.2279, simple_loss=0.2893, pruned_loss=0.08325, over 7714.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.354, pruned_loss=0.1169, over 1621944.15 frames. ], batch size: 18, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:17,289 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:33,625 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:36,450 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8899, 2.2676, 4.5686, 1.2540, 2.8977, 2.4600, 1.7582, 2.6021], + device='cuda:2'), covar=tensor([0.1326, 0.1748, 0.0537, 0.3079, 0.1250, 0.2107, 0.1367, 0.1981], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0451, 0.0527, 0.0538, 0.0578, 0.0518, 0.0450, 0.0586], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:33:45,392 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3810, 2.0435, 3.1168, 2.6654, 2.6633, 2.0462, 1.4891, 1.3183], + device='cuda:2'), covar=tensor([0.1442, 0.1670, 0.0373, 0.0750, 0.0754, 0.0845, 0.0916, 0.1744], + device='cuda:2'), in_proj_covar=tensor([0.0713, 0.0657, 0.0564, 0.0640, 0.0750, 0.0608, 0.0591, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:33:47,846 INFO [train.py:901] (2/4) Epoch 5, batch 1750, loss[loss=0.2144, simple_loss=0.2853, pruned_loss=0.07179, over 7700.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3523, pruned_loss=0.1155, over 1623699.01 frames. ], batch size: 18, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:34:16,524 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.177e+02 3.118e+02 3.687e+02 4.787e+02 9.448e+02, threshold=7.373e+02, percent-clipped=7.0 +2023-02-06 00:34:21,853 INFO [train.py:901] (2/4) Epoch 5, batch 1800, loss[loss=0.2578, simple_loss=0.3251, pruned_loss=0.09521, over 7649.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3526, pruned_loss=0.1156, over 1621342.28 frames. ], batch size: 19, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:34:36,711 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:43,249 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:57,300 INFO [train.py:901] (2/4) Epoch 5, batch 1850, loss[loss=0.2348, simple_loss=0.3176, pruned_loss=0.07597, over 8230.00 frames. ], tot_loss[loss=0.2916, simple_loss=0.3519, pruned_loss=0.1156, over 1618947.18 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:35:12,359 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34205.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:26,214 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.489e+02 4.150e+02 5.670e+02 1.027e+03, threshold=8.299e+02, percent-clipped=7.0 +2023-02-06 00:35:29,069 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:30,903 INFO [train.py:901] (2/4) Epoch 5, batch 1900, loss[loss=0.2628, simple_loss=0.3313, pruned_loss=0.09713, over 8253.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3523, pruned_loss=0.1157, over 1618599.70 frames. ], batch size: 24, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:05,945 INFO [train.py:901] (2/4) Epoch 5, batch 1950, loss[loss=0.3396, simple_loss=0.3927, pruned_loss=0.1432, over 8579.00 frames. ], tot_loss[loss=0.2916, simple_loss=0.3519, pruned_loss=0.1157, over 1617453.68 frames. ], batch size: 39, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:09,863 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 00:36:18,719 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:36:23,388 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 00:36:35,388 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.967e+02 3.945e+02 4.927e+02 1.257e+03, threshold=7.890e+02, percent-clipped=2.0 +2023-02-06 00:36:39,083 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4530, 2.2947, 2.5481, 2.1317, 1.5305, 2.6190, 0.8697, 1.9499], + device='cuda:2'), covar=tensor([0.3309, 0.2791, 0.1117, 0.3496, 0.7540, 0.1056, 0.7161, 0.2759], + device='cuda:2'), in_proj_covar=tensor([0.0130, 0.0123, 0.0084, 0.0167, 0.0217, 0.0085, 0.0149, 0.0125], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:36:40,168 INFO [train.py:901] (2/4) Epoch 5, batch 2000, loss[loss=0.3461, simple_loss=0.3906, pruned_loss=0.1507, over 8027.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3522, pruned_loss=0.1161, over 1616881.56 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:42,286 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 00:37:14,366 INFO [train.py:901] (2/4) Epoch 5, batch 2050, loss[loss=0.3248, simple_loss=0.3801, pruned_loss=0.1347, over 8662.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3524, pruned_loss=0.1165, over 1615616.00 frames. ], batch size: 34, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:17,170 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5295, 1.8106, 1.9898, 1.7850, 1.0903, 2.0836, 0.3620, 1.2614], + device='cuda:2'), covar=tensor([0.4257, 0.2737, 0.1425, 0.2651, 0.6797, 0.1099, 0.6874, 0.2970], + device='cuda:2'), in_proj_covar=tensor([0.0130, 0.0122, 0.0083, 0.0167, 0.0213, 0.0084, 0.0147, 0.0123], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:37:31,121 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:33,924 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:38,677 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:45,186 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 3.245e+02 4.066e+02 4.898e+02 1.293e+03, threshold=8.132e+02, percent-clipped=4.0 +2023-02-06 00:37:49,847 INFO [train.py:901] (2/4) Epoch 5, batch 2100, loss[loss=0.2145, simple_loss=0.2854, pruned_loss=0.07183, over 7664.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.3526, pruned_loss=0.1168, over 1615585.35 frames. ], batch size: 19, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:51,414 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:52,722 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7166, 2.0145, 1.5013, 2.3573, 0.9737, 1.4167, 1.5064, 1.9338], + device='cuda:2'), covar=tensor([0.0963, 0.1151, 0.1698, 0.0643, 0.1933, 0.2239, 0.1587, 0.1101], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0268, 0.0287, 0.0223, 0.0260, 0.0288, 0.0290, 0.0264], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:38:23,173 INFO [train.py:901] (2/4) Epoch 5, batch 2150, loss[loss=0.3058, simple_loss=0.3783, pruned_loss=0.1166, over 8239.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3535, pruned_loss=0.117, over 1617895.16 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:38:30,768 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6441, 1.9198, 3.3721, 1.1944, 2.3518, 1.7788, 1.6085, 2.1157], + device='cuda:2'), covar=tensor([0.1312, 0.1701, 0.0622, 0.2755, 0.1246, 0.2251, 0.1292, 0.1889], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0448, 0.0532, 0.0530, 0.0577, 0.0520, 0.0445, 0.0581], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:38:39,926 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:50,620 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:51,960 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1846, 1.4443, 4.3763, 1.7706, 3.7815, 3.6025, 3.9078, 3.7908], + device='cuda:2'), covar=tensor([0.0490, 0.3793, 0.0398, 0.2466, 0.1142, 0.0659, 0.0521, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0321, 0.0489, 0.0399, 0.0408, 0.0484, 0.0393, 0.0398, 0.0440], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:38:53,806 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.929e+02 3.753e+02 4.663e+02 1.529e+03, threshold=7.506e+02, percent-clipped=2.0 +2023-02-06 00:38:53,933 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8629, 5.9102, 5.1298, 2.4727, 5.1898, 5.4757, 5.4198, 5.0124], + device='cuda:2'), covar=tensor([0.0518, 0.0380, 0.0711, 0.4141, 0.0550, 0.0539, 0.1014, 0.0573], + device='cuda:2'), in_proj_covar=tensor([0.0391, 0.0289, 0.0310, 0.0401, 0.0309, 0.0270, 0.0300, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 00:38:59,149 INFO [train.py:901] (2/4) Epoch 5, batch 2200, loss[loss=0.3045, simple_loss=0.3665, pruned_loss=0.1212, over 8243.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3531, pruned_loss=0.1167, over 1619436.48 frames. ], batch size: 24, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:39:09,611 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.84 vs. limit=5.0 +2023-02-06 00:39:20,089 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3397, 1.2305, 4.4342, 1.6561, 3.8619, 3.6957, 3.8894, 3.8772], + device='cuda:2'), covar=tensor([0.0356, 0.3797, 0.0351, 0.2441, 0.0998, 0.0594, 0.0520, 0.0500], + device='cuda:2'), in_proj_covar=tensor([0.0322, 0.0489, 0.0400, 0.0411, 0.0483, 0.0395, 0.0403, 0.0440], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:39:32,427 INFO [train.py:901] (2/4) Epoch 5, batch 2250, loss[loss=0.3093, simple_loss=0.3726, pruned_loss=0.123, over 8464.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3531, pruned_loss=0.1174, over 1618850.29 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:39:39,440 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.02 vs. limit=5.0 +2023-02-06 00:39:52,223 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:39:59,511 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:02,635 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.996e+02 3.688e+02 4.883e+02 6.349e+02 4.437e+03, threshold=9.766e+02, percent-clipped=16.0 +2023-02-06 00:40:07,909 INFO [train.py:901] (2/4) Epoch 5, batch 2300, loss[loss=0.3049, simple_loss=0.3655, pruned_loss=0.1221, over 8193.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3519, pruned_loss=0.1167, over 1615001.92 frames. ], batch size: 23, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:12,727 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:16,071 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:34,981 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:42,248 INFO [train.py:901] (2/4) Epoch 5, batch 2350, loss[loss=0.2958, simple_loss=0.3665, pruned_loss=0.1125, over 8505.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3534, pruned_loss=0.1172, over 1618207.50 frames. ], batch size: 28, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:51,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:11,430 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 3.189e+02 4.018e+02 4.942e+02 1.178e+03, threshold=8.036e+02, percent-clipped=1.0 +2023-02-06 00:41:12,934 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34728.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:41:16,102 INFO [train.py:901] (2/4) Epoch 5, batch 2400, loss[loss=0.3197, simple_loss=0.374, pruned_loss=0.1327, over 8510.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3534, pruned_loss=0.1177, over 1616868.42 frames. ], batch size: 26, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:41:47,415 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:51,215 INFO [train.py:901] (2/4) Epoch 5, batch 2450, loss[loss=0.2828, simple_loss=0.3478, pruned_loss=0.1089, over 8734.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3542, pruned_loss=0.1186, over 1613644.38 frames. ], batch size: 34, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:04,208 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:14,155 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1720, 1.8298, 2.8919, 2.2981, 2.3161, 1.9143, 1.4528, 1.0054], + device='cuda:2'), covar=tensor([0.1498, 0.1492, 0.0345, 0.0738, 0.0723, 0.0790, 0.0928, 0.1551], + device='cuda:2'), in_proj_covar=tensor([0.0726, 0.0661, 0.0564, 0.0639, 0.0744, 0.0609, 0.0593, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:42:19,462 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.991e+02 3.791e+02 4.954e+02 1.109e+03, threshold=7.583e+02, percent-clipped=3.0 +2023-02-06 00:42:24,154 INFO [train.py:901] (2/4) Epoch 5, batch 2500, loss[loss=0.2735, simple_loss=0.3426, pruned_loss=0.1022, over 8197.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3535, pruned_loss=0.1182, over 1617482.10 frames. ], batch size: 23, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:55,975 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:59,761 INFO [train.py:901] (2/4) Epoch 5, batch 2550, loss[loss=0.2764, simple_loss=0.3355, pruned_loss=0.1086, over 8235.00 frames. ], tot_loss[loss=0.2925, simple_loss=0.3517, pruned_loss=0.1166, over 1616454.45 frames. ], batch size: 22, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:13,486 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:43:29,138 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 2.947e+02 3.618e+02 4.736e+02 1.253e+03, threshold=7.237e+02, percent-clipped=4.0 +2023-02-06 00:43:33,913 INFO [train.py:901] (2/4) Epoch 5, batch 2600, loss[loss=0.3373, simple_loss=0.3903, pruned_loss=0.1421, over 8369.00 frames. ], tot_loss[loss=0.2957, simple_loss=0.3545, pruned_loss=0.1184, over 1619250.16 frames. ], batch size: 24, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:49,010 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:09,526 INFO [train.py:901] (2/4) Epoch 5, batch 2650, loss[loss=0.2813, simple_loss=0.3383, pruned_loss=0.1121, over 7820.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3549, pruned_loss=0.1188, over 1620806.48 frames. ], batch size: 20, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:44:10,293 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:13,019 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:39,307 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.922e+02 3.827e+02 4.980e+02 8.274e+02, threshold=7.654e+02, percent-clipped=5.0 +2023-02-06 00:44:41,488 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3077, 1.4882, 1.5317, 0.8240, 1.5746, 1.1234, 0.6827, 1.4233], + device='cuda:2'), covar=tensor([0.0162, 0.0087, 0.0059, 0.0155, 0.0091, 0.0254, 0.0212, 0.0065], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0216, 0.0177, 0.0259, 0.0203, 0.0347, 0.0270, 0.0246], + device='cuda:2'), out_proj_covar=tensor([1.1211e-04, 7.8453e-05, 6.2863e-05, 9.3077e-05, 7.5833e-05, 1.3713e-04, + 9.9953e-05, 8.9828e-05], device='cuda:2') +2023-02-06 00:44:43,766 INFO [train.py:901] (2/4) Epoch 5, batch 2700, loss[loss=0.3468, simple_loss=0.3856, pruned_loss=0.1539, over 8498.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3537, pruned_loss=0.1175, over 1623190.24 frames. ], batch size: 26, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:44:44,126 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.52 vs. limit=5.0 +2023-02-06 00:45:09,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:10,728 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35072.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:45:18,323 INFO [train.py:901] (2/4) Epoch 5, batch 2750, loss[loss=0.2983, simple_loss=0.3616, pruned_loss=0.1175, over 8181.00 frames. ], tot_loss[loss=0.2946, simple_loss=0.3538, pruned_loss=0.1177, over 1619691.90 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:45:29,196 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4036, 1.8614, 3.3191, 2.5844, 2.6773, 1.9161, 1.4033, 1.3165], + device='cuda:2'), covar=tensor([0.1703, 0.2051, 0.0436, 0.1010, 0.0973, 0.0926, 0.0985, 0.2009], + device='cuda:2'), in_proj_covar=tensor([0.0738, 0.0666, 0.0564, 0.0649, 0.0747, 0.0619, 0.0594, 0.0621], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:45:29,757 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:32,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:37,951 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:39,343 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7726, 2.9122, 1.8413, 2.0867, 2.4404, 1.5716, 2.2082, 2.2969], + device='cuda:2'), covar=tensor([0.1123, 0.0209, 0.0833, 0.0589, 0.0462, 0.1028, 0.0775, 0.0769], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0240, 0.0313, 0.0308, 0.0321, 0.0304, 0.0336, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:45:45,352 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8099, 2.3000, 1.7019, 2.7335, 1.6347, 1.4968, 1.8634, 2.2436], + device='cuda:2'), covar=tensor([0.1094, 0.0979, 0.1365, 0.0507, 0.1308, 0.1928, 0.1436, 0.0921], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0272, 0.0294, 0.0227, 0.0255, 0.0289, 0.0288, 0.0263], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 00:45:48,687 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.004e+02 3.039e+02 3.659e+02 5.251e+02 1.248e+03, threshold=7.317e+02, percent-clipped=8.0 +2023-02-06 00:45:53,667 INFO [train.py:901] (2/4) Epoch 5, batch 2800, loss[loss=0.2491, simple_loss=0.3193, pruned_loss=0.08946, over 7914.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3533, pruned_loss=0.1172, over 1619498.94 frames. ], batch size: 20, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:16,473 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 00:46:27,512 INFO [train.py:901] (2/4) Epoch 5, batch 2850, loss[loss=0.2782, simple_loss=0.3374, pruned_loss=0.1096, over 8022.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3531, pruned_loss=0.1179, over 1613735.84 frames. ], batch size: 22, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:30,531 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35187.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:46:34,577 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1752, 1.8337, 1.6046, 1.4350, 1.6327, 1.6733, 2.1908, 1.7184], + device='cuda:2'), covar=tensor([0.0590, 0.1250, 0.1811, 0.1502, 0.0670, 0.1625, 0.0822, 0.0656], + device='cuda:2'), in_proj_covar=tensor([0.0143, 0.0184, 0.0225, 0.0187, 0.0139, 0.0194, 0.0150, 0.0158], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 00:46:46,215 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4964, 1.6055, 2.3726, 1.0883, 1.7572, 1.7419, 1.4547, 1.4214], + device='cuda:2'), covar=tensor([0.1252, 0.1507, 0.0592, 0.2693, 0.1025, 0.2046, 0.1301, 0.1492], + device='cuda:2'), in_proj_covar=tensor([0.0462, 0.0437, 0.0519, 0.0526, 0.0576, 0.0508, 0.0447, 0.0581], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:46:58,369 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.990e+02 3.598e+02 4.675e+02 1.498e+03, threshold=7.197e+02, percent-clipped=4.0 +2023-02-06 00:47:03,665 INFO [train.py:901] (2/4) Epoch 5, batch 2900, loss[loss=0.3125, simple_loss=0.3711, pruned_loss=0.1269, over 8659.00 frames. ], tot_loss[loss=0.2953, simple_loss=0.3535, pruned_loss=0.1186, over 1614036.76 frames. ], batch size: 34, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:10,554 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2081, 1.7422, 2.6996, 2.2969, 2.3088, 1.9542, 1.4519, 1.3932], + device='cuda:2'), covar=tensor([0.1148, 0.1620, 0.0353, 0.0673, 0.0628, 0.0665, 0.0772, 0.1337], + device='cuda:2'), in_proj_covar=tensor([0.0729, 0.0665, 0.0559, 0.0645, 0.0748, 0.0612, 0.0593, 0.0611], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:47:36,539 INFO [train.py:901] (2/4) Epoch 5, batch 2950, loss[loss=0.3443, simple_loss=0.3931, pruned_loss=0.1477, over 8407.00 frames. ], tot_loss[loss=0.2945, simple_loss=0.3531, pruned_loss=0.1179, over 1609054.61 frames. ], batch size: 49, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:41,853 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 00:47:54,166 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5429, 4.5887, 4.0044, 1.8127, 4.0764, 3.9962, 4.2315, 3.6249], + device='cuda:2'), covar=tensor([0.0714, 0.0553, 0.0916, 0.4690, 0.0666, 0.0826, 0.1191, 0.0715], + device='cuda:2'), in_proj_covar=tensor([0.0383, 0.0279, 0.0309, 0.0393, 0.0302, 0.0267, 0.0292, 0.0236], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 00:48:06,832 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 3.185e+02 3.825e+02 4.988e+02 1.295e+03, threshold=7.649e+02, percent-clipped=4.0 +2023-02-06 00:48:07,076 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:12,109 INFO [train.py:901] (2/4) Epoch 5, batch 3000, loss[loss=0.3211, simple_loss=0.3784, pruned_loss=0.1319, over 8664.00 frames. ], tot_loss[loss=0.2937, simple_loss=0.3527, pruned_loss=0.1174, over 1607296.12 frames. ], batch size: 39, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:48:12,109 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 00:48:25,509 INFO [train.py:935] (2/4) Epoch 5, validation: loss=0.2228, simple_loss=0.319, pruned_loss=0.0633, over 944034.00 frames. +2023-02-06 00:48:25,509 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 00:48:34,488 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5193, 1.5792, 1.4836, 1.3641, 0.7791, 1.6212, 0.0843, 1.0144], + device='cuda:2'), covar=tensor([0.3225, 0.2284, 0.1114, 0.2253, 0.6226, 0.0974, 0.5533, 0.2258], + device='cuda:2'), in_proj_covar=tensor([0.0123, 0.0124, 0.0078, 0.0163, 0.0202, 0.0080, 0.0142, 0.0119], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:48:39,261 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:41,986 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:44,693 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:47,340 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35363.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:48:59,229 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:01,098 INFO [train.py:901] (2/4) Epoch 5, batch 3050, loss[loss=0.2839, simple_loss=0.3515, pruned_loss=0.1081, over 8645.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3535, pruned_loss=0.1175, over 1615156.85 frames. ], batch size: 34, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:01,954 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:07,210 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:21,413 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1442, 1.6249, 1.2411, 1.6533, 1.4496, 1.0833, 1.1097, 1.4335], + device='cuda:2'), covar=tensor([0.0766, 0.0332, 0.0779, 0.0384, 0.0525, 0.0892, 0.0689, 0.0502], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0240, 0.0313, 0.0308, 0.0322, 0.0307, 0.0344, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:49:29,669 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.238e+02 3.010e+02 3.735e+02 4.816e+02 9.592e+02, threshold=7.471e+02, percent-clipped=3.0 +2023-02-06 00:49:34,199 INFO [train.py:901] (2/4) Epoch 5, batch 3100, loss[loss=0.3456, simple_loss=0.4, pruned_loss=0.1456, over 8697.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3529, pruned_loss=0.1173, over 1612464.48 frames. ], batch size: 34, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:36,283 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1551, 1.3344, 2.2968, 1.0400, 2.3448, 2.4341, 2.4256, 2.0881], + device='cuda:2'), covar=tensor([0.1091, 0.1171, 0.0499, 0.2007, 0.0487, 0.0367, 0.0535, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0265, 0.0225, 0.0264, 0.0221, 0.0198, 0.0229, 0.0276], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 00:49:41,039 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35443.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:49:48,740 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35454.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:59,648 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35468.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:50:09,986 INFO [train.py:901] (2/4) Epoch 5, batch 3150, loss[loss=0.3054, simple_loss=0.3691, pruned_loss=0.1208, over 8193.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3532, pruned_loss=0.117, over 1610995.41 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:50:39,622 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.108e+02 3.249e+02 4.087e+02 5.030e+02 9.472e+02, threshold=8.174e+02, percent-clipped=3.0 +2023-02-06 00:50:43,440 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 00:50:44,419 INFO [train.py:901] (2/4) Epoch 5, batch 3200, loss[loss=0.3147, simple_loss=0.3619, pruned_loss=0.1338, over 8031.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3541, pruned_loss=0.1179, over 1610285.94 frames. ], batch size: 22, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:50:54,843 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:09,461 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:20,160 INFO [train.py:901] (2/4) Epoch 5, batch 3250, loss[loss=0.2882, simple_loss=0.3639, pruned_loss=0.1062, over 8256.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.3537, pruned_loss=0.1173, over 1616424.11 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:51:35,383 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5381, 1.5059, 1.6992, 1.5575, 1.2085, 1.6790, 0.8322, 1.3322], + device='cuda:2'), covar=tensor([0.2778, 0.1556, 0.0887, 0.1455, 0.3830, 0.0735, 0.3652, 0.1735], + device='cuda:2'), in_proj_covar=tensor([0.0126, 0.0125, 0.0080, 0.0165, 0.0204, 0.0081, 0.0140, 0.0120], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:51:43,807 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2992, 1.9945, 3.0845, 2.5049, 2.6102, 2.0060, 1.4036, 1.4387], + device='cuda:2'), covar=tensor([0.1895, 0.2057, 0.0484, 0.0954, 0.0860, 0.1011, 0.1147, 0.1972], + device='cuda:2'), in_proj_covar=tensor([0.0738, 0.0667, 0.0571, 0.0652, 0.0760, 0.0623, 0.0605, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:51:50,492 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.378e+02 4.149e+02 5.121e+02 1.146e+03, threshold=8.298e+02, percent-clipped=3.0 +2023-02-06 00:51:55,280 INFO [train.py:901] (2/4) Epoch 5, batch 3300, loss[loss=0.2926, simple_loss=0.3561, pruned_loss=0.1145, over 8318.00 frames. ], tot_loss[loss=0.295, simple_loss=0.3547, pruned_loss=0.1177, over 1613704.43 frames. ], batch size: 25, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:52:04,544 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0899, 1.6540, 1.1371, 1.4769, 1.2087, 0.9542, 1.2289, 1.4106], + device='cuda:2'), covar=tensor([0.1058, 0.0426, 0.1315, 0.0638, 0.0854, 0.1528, 0.0971, 0.0779], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0236, 0.0312, 0.0302, 0.0318, 0.0307, 0.0344, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 00:52:12,160 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1541, 1.1992, 1.0666, 1.0112, 0.7948, 1.1944, 0.0121, 0.8895], + device='cuda:2'), covar=tensor([0.3825, 0.2337, 0.1360, 0.2314, 0.5847, 0.1034, 0.5168, 0.2555], + device='cuda:2'), in_proj_covar=tensor([0.0125, 0.0124, 0.0080, 0.0164, 0.0204, 0.0081, 0.0140, 0.0121], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:52:30,151 INFO [train.py:901] (2/4) Epoch 5, batch 3350, loss[loss=0.319, simple_loss=0.3636, pruned_loss=0.1372, over 8232.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3557, pruned_loss=0.1181, over 1616864.88 frames. ], batch size: 22, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:52:47,835 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35707.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:53:01,469 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.966e+02 3.555e+02 4.125e+02 4.946e+02 1.065e+03, threshold=8.250e+02, percent-clipped=5.0 +2023-02-06 00:53:06,230 INFO [train.py:901] (2/4) Epoch 5, batch 3400, loss[loss=0.2854, simple_loss=0.3476, pruned_loss=0.1116, over 8511.00 frames. ], tot_loss[loss=0.2959, simple_loss=0.3555, pruned_loss=0.1182, over 1619401.18 frames. ], batch size: 26, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:08,409 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:09,277 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4735, 1.9298, 3.0909, 1.1243, 2.2264, 1.7011, 1.5706, 1.8636], + device='cuda:2'), covar=tensor([0.1438, 0.1539, 0.0567, 0.2931, 0.1194, 0.2246, 0.1357, 0.1990], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0443, 0.0525, 0.0531, 0.0573, 0.0513, 0.0446, 0.0590], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:53:28,169 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6462, 1.9002, 1.9039, 1.3574, 0.9078, 2.0132, 0.2754, 1.2517], + device='cuda:2'), covar=tensor([0.3136, 0.1839, 0.1132, 0.2946, 0.7359, 0.0961, 0.5482, 0.2059], + device='cuda:2'), in_proj_covar=tensor([0.0123, 0.0125, 0.0080, 0.0164, 0.0206, 0.0083, 0.0141, 0.0119], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 00:53:28,797 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1901, 1.4084, 1.4022, 1.2474, 1.3729, 1.3059, 1.6840, 1.6744], + device='cuda:2'), covar=tensor([0.0659, 0.1311, 0.1847, 0.1566, 0.0658, 0.1691, 0.0795, 0.0596], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0185, 0.0223, 0.0186, 0.0138, 0.0196, 0.0148, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 00:53:36,086 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 00:53:39,718 INFO [train.py:901] (2/4) Epoch 5, batch 3450, loss[loss=0.2562, simple_loss=0.3304, pruned_loss=0.09096, over 8466.00 frames. ], tot_loss[loss=0.2958, simple_loss=0.3554, pruned_loss=0.1181, over 1618389.19 frames. ], batch size: 25, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:43,893 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:56,605 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1376, 1.2509, 3.3692, 1.2187, 2.3084, 3.8931, 3.7331, 3.3224], + device='cuda:2'), covar=tensor([0.0930, 0.1658, 0.0356, 0.2054, 0.0835, 0.0191, 0.0312, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0227, 0.0263, 0.0218, 0.0259, 0.0217, 0.0195, 0.0225, 0.0268], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 00:54:05,362 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8251, 2.5160, 4.5419, 1.2864, 2.6679, 2.1919, 2.0155, 2.3103], + device='cuda:2'), covar=tensor([0.1543, 0.1735, 0.0608, 0.3387, 0.1725, 0.2473, 0.1356, 0.2758], + device='cuda:2'), in_proj_covar=tensor([0.0468, 0.0443, 0.0525, 0.0537, 0.0579, 0.0516, 0.0447, 0.0594], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 00:54:07,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35822.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:54:09,940 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:10,372 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.123e+02 3.051e+02 3.738e+02 4.571e+02 6.690e+02, threshold=7.475e+02, percent-clipped=0.0 +2023-02-06 00:54:12,584 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35829.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:15,091 INFO [train.py:901] (2/4) Epoch 5, batch 3500, loss[loss=0.297, simple_loss=0.3562, pruned_loss=0.119, over 8106.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3531, pruned_loss=0.1163, over 1614461.58 frames. ], batch size: 23, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:27,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:27,675 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:40,687 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 00:54:48,819 INFO [train.py:901] (2/4) Epoch 5, batch 3550, loss[loss=0.2729, simple_loss=0.3401, pruned_loss=0.1028, over 8338.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3528, pruned_loss=0.1166, over 1610845.35 frames. ], batch size: 25, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:54,910 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:55:19,532 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.232e+02 3.318e+02 3.882e+02 4.908e+02 1.221e+03, threshold=7.763e+02, percent-clipped=6.0 +2023-02-06 00:55:19,958 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-06 00:55:23,997 INFO [train.py:901] (2/4) Epoch 5, batch 3600, loss[loss=0.3259, simple_loss=0.3653, pruned_loss=0.1433, over 7921.00 frames. ], tot_loss[loss=0.2937, simple_loss=0.3533, pruned_loss=0.117, over 1611294.50 frames. ], batch size: 20, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:55:31,761 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 00:55:52,653 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 00:55:57,697 INFO [train.py:901] (2/4) Epoch 5, batch 3650, loss[loss=0.277, simple_loss=0.3586, pruned_loss=0.09768, over 8487.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3537, pruned_loss=0.1172, over 1617321.84 frames. ], batch size: 28, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:56:00,559 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8246, 1.4462, 3.1974, 1.3826, 2.2263, 3.5715, 3.3498, 2.9954], + device='cuda:2'), covar=tensor([0.1097, 0.1320, 0.0402, 0.1789, 0.0680, 0.0210, 0.0351, 0.0591], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0264, 0.0221, 0.0259, 0.0218, 0.0197, 0.0226, 0.0269], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 00:56:15,074 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:56:27,680 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.237e+02 3.345e+02 4.197e+02 5.280e+02 9.599e+02, threshold=8.394e+02, percent-clipped=10.0 +2023-02-06 00:56:32,338 INFO [train.py:901] (2/4) Epoch 5, batch 3700, loss[loss=0.3066, simple_loss=0.3626, pruned_loss=0.1253, over 8448.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.3535, pruned_loss=0.1171, over 1613864.70 frames. ], batch size: 27, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:56:40,794 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 00:57:04,793 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36078.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:57:07,929 INFO [train.py:901] (2/4) Epoch 5, batch 3750, loss[loss=0.2784, simple_loss=0.3401, pruned_loss=0.1083, over 8493.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3532, pruned_loss=0.117, over 1614360.89 frames. ], batch size: 49, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:57:21,391 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36103.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:57:24,067 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7259, 1.2483, 3.8815, 1.3838, 3.3239, 3.2258, 3.4194, 3.3799], + device='cuda:2'), covar=tensor([0.0468, 0.3311, 0.0415, 0.2523, 0.1065, 0.0665, 0.0478, 0.0576], + device='cuda:2'), in_proj_covar=tensor([0.0337, 0.0494, 0.0410, 0.0421, 0.0489, 0.0404, 0.0397, 0.0450], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 00:57:24,154 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:36,067 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7189, 2.0748, 2.1979, 0.9880, 2.2433, 1.5648, 0.6259, 1.7956], + device='cuda:2'), covar=tensor([0.0168, 0.0107, 0.0091, 0.0201, 0.0118, 0.0294, 0.0288, 0.0097], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0221, 0.0176, 0.0264, 0.0203, 0.0349, 0.0274, 0.0246], + device='cuda:2'), out_proj_covar=tensor([1.0850e-04, 7.9083e-05, 6.2023e-05, 9.3622e-05, 7.4231e-05, 1.3604e-04, + 1.0045e-04, 8.8113e-05], device='cuda:2') +2023-02-06 00:57:37,191 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 3.033e+02 3.704e+02 4.599e+02 1.470e+03, threshold=7.408e+02, percent-clipped=9.0 +2023-02-06 00:57:40,783 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:41,247 INFO [train.py:901] (2/4) Epoch 5, batch 3800, loss[loss=0.266, simple_loss=0.3326, pruned_loss=0.09973, over 8349.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.352, pruned_loss=0.1157, over 1617855.83 frames. ], batch size: 24, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:57:41,318 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:09,342 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:15,719 INFO [train.py:901] (2/4) Epoch 5, batch 3850, loss[loss=0.4042, simple_loss=0.4168, pruned_loss=0.1958, over 6554.00 frames. ], tot_loss[loss=0.2921, simple_loss=0.3521, pruned_loss=0.1161, over 1618745.12 frames. ], batch size: 71, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:27,233 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:29,596 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.86 vs. limit=5.0 +2023-02-06 00:58:41,067 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 00:58:44,299 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-06 00:58:45,741 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 3.284e+02 4.097e+02 5.243e+02 1.380e+03, threshold=8.194e+02, percent-clipped=10.0 +2023-02-06 00:58:49,706 INFO [train.py:901] (2/4) Epoch 5, batch 3900, loss[loss=0.2502, simple_loss=0.3025, pruned_loss=0.09897, over 7230.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.352, pruned_loss=0.1156, over 1619226.21 frames. ], batch size: 16, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:59,605 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:09,645 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:24,818 INFO [train.py:901] (2/4) Epoch 5, batch 3950, loss[loss=0.2771, simple_loss=0.3435, pruned_loss=0.1053, over 7818.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3511, pruned_loss=0.1151, over 1616310.63 frames. ], batch size: 20, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:59:28,399 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:28,422 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:54,567 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.894e+02 2.959e+02 3.540e+02 4.519e+02 1.633e+03, threshold=7.079e+02, percent-clipped=6.0 +2023-02-06 00:59:58,368 INFO [train.py:901] (2/4) Epoch 5, batch 4000, loss[loss=0.2765, simple_loss=0.3278, pruned_loss=0.1126, over 7830.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.3512, pruned_loss=0.1152, over 1618803.85 frames. ], batch size: 20, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:00:22,607 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1130, 4.0985, 3.6605, 2.1052, 3.6243, 3.7200, 3.7566, 3.3492], + device='cuda:2'), covar=tensor([0.0858, 0.0608, 0.1089, 0.4109, 0.0798, 0.0735, 0.1315, 0.0848], + device='cuda:2'), in_proj_covar=tensor([0.0387, 0.0280, 0.0314, 0.0399, 0.0312, 0.0270, 0.0298, 0.0241], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 01:00:31,740 INFO [train.py:901] (2/4) Epoch 5, batch 4050, loss[loss=0.2791, simple_loss=0.3299, pruned_loss=0.1141, over 7245.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3513, pruned_loss=0.1151, over 1615915.69 frames. ], batch size: 16, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:03,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 3.036e+02 3.577e+02 4.439e+02 7.437e+02, threshold=7.154e+02, percent-clipped=1.0 +2023-02-06 01:01:07,957 INFO [train.py:901] (2/4) Epoch 5, batch 4100, loss[loss=0.2759, simple_loss=0.3535, pruned_loss=0.09918, over 8441.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3512, pruned_loss=0.1151, over 1617041.63 frames. ], batch size: 27, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:41,918 INFO [train.py:901] (2/4) Epoch 5, batch 4150, loss[loss=0.2551, simple_loss=0.3197, pruned_loss=0.09527, over 8030.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3498, pruned_loss=0.1141, over 1615466.52 frames. ], batch size: 22, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:56,692 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:09,789 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7941, 2.1183, 3.7351, 3.0538, 3.1518, 2.2549, 1.6850, 2.0739], + device='cuda:2'), covar=tensor([0.1515, 0.2191, 0.0447, 0.0891, 0.0796, 0.0923, 0.0947, 0.1816], + device='cuda:2'), in_proj_covar=tensor([0.0745, 0.0668, 0.0567, 0.0654, 0.0754, 0.0621, 0.0601, 0.0614], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:02:13,614 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.751e+02 3.740e+02 4.679e+02 9.033e+02, threshold=7.480e+02, percent-clipped=3.0 +2023-02-06 01:02:15,168 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:17,736 INFO [train.py:901] (2/4) Epoch 5, batch 4200, loss[loss=0.288, simple_loss=0.3635, pruned_loss=0.1062, over 8308.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3489, pruned_loss=0.113, over 1613820.41 frames. ], batch size: 25, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:02:24,837 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:25,675 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36544.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:43,320 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 01:02:43,514 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:52,964 INFO [train.py:901] (2/4) Epoch 5, batch 4250, loss[loss=0.2958, simple_loss=0.3438, pruned_loss=0.1239, over 5566.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3501, pruned_loss=0.1142, over 1607388.09 frames. ], batch size: 12, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:02:53,363 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 01:02:58,784 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0803, 1.2928, 1.2061, 0.2034, 1.1588, 0.9146, 0.1063, 1.1216], + device='cuda:2'), covar=tensor([0.0134, 0.0108, 0.0088, 0.0199, 0.0127, 0.0334, 0.0255, 0.0100], + device='cuda:2'), in_proj_covar=tensor([0.0304, 0.0226, 0.0177, 0.0263, 0.0209, 0.0352, 0.0277, 0.0251], + device='cuda:2'), out_proj_covar=tensor([1.1029e-04, 8.0523e-05, 6.1631e-05, 9.2824e-05, 7.6212e-05, 1.3625e-04, + 1.0114e-04, 8.9913e-05], device='cuda:2') +2023-02-06 01:03:05,699 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 01:03:19,528 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6778, 1.3992, 3.8286, 1.3777, 3.2439, 3.2056, 3.3438, 3.2788], + device='cuda:2'), covar=tensor([0.0506, 0.3419, 0.0469, 0.2686, 0.1306, 0.0740, 0.0582, 0.0662], + device='cuda:2'), in_proj_covar=tensor([0.0323, 0.0481, 0.0405, 0.0415, 0.0476, 0.0398, 0.0388, 0.0442], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 01:03:20,912 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,276 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,817 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.959e+02 3.120e+02 3.802e+02 4.654e+02 9.583e+02, threshold=7.605e+02, percent-clipped=3.0 +2023-02-06 01:03:27,106 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:28,971 INFO [train.py:901] (2/4) Epoch 5, batch 4300, loss[loss=0.2516, simple_loss=0.3053, pruned_loss=0.09894, over 7286.00 frames. ], tot_loss[loss=0.2909, simple_loss=0.3518, pruned_loss=0.1149, over 1610615.26 frames. ], batch size: 16, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:42,648 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-06 01:03:47,180 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:52,222 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 01:04:05,000 INFO [train.py:901] (2/4) Epoch 5, batch 4350, loss[loss=0.3142, simple_loss=0.3513, pruned_loss=0.1386, over 7536.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3518, pruned_loss=0.1148, over 1607169.75 frames. ], batch size: 18, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:04:28,889 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36718.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:04:34,836 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.000e+02 3.265e+02 4.032e+02 4.973e+02 1.053e+03, threshold=8.064e+02, percent-clipped=5.0 +2023-02-06 01:04:36,256 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 01:04:38,954 INFO [train.py:901] (2/4) Epoch 5, batch 4400, loss[loss=0.2504, simple_loss=0.3082, pruned_loss=0.09636, over 7282.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3514, pruned_loss=0.1149, over 1605546.59 frames. ], batch size: 16, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:04:56,225 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 01:05:15,304 INFO [train.py:901] (2/4) Epoch 5, batch 4450, loss[loss=0.2689, simple_loss=0.3325, pruned_loss=0.1027, over 8328.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3514, pruned_loss=0.1149, over 1606220.02 frames. ], batch size: 25, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:18,074 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 01:05:43,195 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36823.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:05:45,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 3.029e+02 3.648e+02 4.687e+02 9.435e+02, threshold=7.296e+02, percent-clipped=4.0 +2023-02-06 01:05:49,708 INFO [train.py:901] (2/4) Epoch 5, batch 4500, loss[loss=0.2918, simple_loss=0.3436, pruned_loss=0.1201, over 7694.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3503, pruned_loss=0.1141, over 1605875.23 frames. ], batch size: 18, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:14,972 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 01:06:25,763 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.67 vs. limit=5.0 +2023-02-06 01:06:26,110 INFO [train.py:901] (2/4) Epoch 5, batch 4550, loss[loss=0.2822, simple_loss=0.3607, pruned_loss=0.1018, over 8455.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3499, pruned_loss=0.1136, over 1608133.05 frames. ], batch size: 25, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:47,951 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36914.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:06:56,558 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 3.174e+02 3.779e+02 4.790e+02 8.988e+02, threshold=7.559e+02, percent-clipped=4.0 +2023-02-06 01:06:58,109 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:00,569 INFO [train.py:901] (2/4) Epoch 5, batch 4600, loss[loss=0.347, simple_loss=0.3829, pruned_loss=0.1556, over 8509.00 frames. ], tot_loss[loss=0.2885, simple_loss=0.3503, pruned_loss=0.1134, over 1613203.47 frames. ], batch size: 28, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:07:04,717 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:23,548 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:25,629 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36970.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:29,034 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:30,332 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:35,633 INFO [train.py:901] (2/4) Epoch 5, batch 4650, loss[loss=0.3363, simple_loss=0.3841, pruned_loss=0.1443, over 8475.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3504, pruned_loss=0.1144, over 1605553.08 frames. ], batch size: 49, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:08:02,861 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:06,067 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 3.207e+02 3.974e+02 5.163e+02 9.904e+02, threshold=7.949e+02, percent-clipped=4.0 +2023-02-06 01:08:10,735 INFO [train.py:901] (2/4) Epoch 5, batch 4700, loss[loss=0.1808, simple_loss=0.2634, pruned_loss=0.04907, over 7418.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3501, pruned_loss=0.1144, over 1603125.91 frames. ], batch size: 17, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:29,881 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37062.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:08:43,597 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:44,118 INFO [train.py:901] (2/4) Epoch 5, batch 4750, loss[loss=0.2726, simple_loss=0.3478, pruned_loss=0.09866, over 8484.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3509, pruned_loss=0.1151, over 1606045.08 frames. ], batch size: 26, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:45,691 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:49,139 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:09:08,550 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.4091, 5.4158, 4.8348, 2.1624, 4.7617, 5.0409, 5.0727, 4.5045], + device='cuda:2'), covar=tensor([0.0594, 0.0345, 0.0725, 0.4399, 0.0663, 0.0500, 0.0841, 0.0505], + device='cuda:2'), in_proj_covar=tensor([0.0399, 0.0290, 0.0324, 0.0406, 0.0323, 0.0270, 0.0305, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:09:15,970 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 3.010e+02 3.846e+02 4.879e+02 1.523e+03, threshold=7.692e+02, percent-clipped=5.0 +2023-02-06 01:09:17,400 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 01:09:20,175 INFO [train.py:901] (2/4) Epoch 5, batch 4800, loss[loss=0.3066, simple_loss=0.3637, pruned_loss=0.1247, over 8570.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.3511, pruned_loss=0.1152, over 1611440.11 frames. ], batch size: 31, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:09:20,180 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 01:09:44,353 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37167.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:09:51,279 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37177.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:09:51,932 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3775, 1.4345, 2.2243, 1.0578, 2.1841, 2.4081, 2.3979, 2.0381], + device='cuda:2'), covar=tensor([0.1053, 0.1085, 0.0552, 0.1996, 0.0549, 0.0383, 0.0591, 0.0833], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0270, 0.0225, 0.0262, 0.0225, 0.0198, 0.0231, 0.0273], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 01:09:55,226 INFO [train.py:901] (2/4) Epoch 5, batch 4850, loss[loss=0.3262, simple_loss=0.3769, pruned_loss=0.1377, over 8414.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3502, pruned_loss=0.1144, over 1613742.72 frames. ], batch size: 48, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:10,652 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 01:10:12,995 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-06 01:10:27,460 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.956e+02 3.581e+02 4.871e+02 1.087e+03, threshold=7.163e+02, percent-clipped=6.0 +2023-02-06 01:10:31,481 INFO [train.py:901] (2/4) Epoch 5, batch 4900, loss[loss=0.3863, simple_loss=0.4125, pruned_loss=0.1801, over 6752.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3503, pruned_loss=0.1147, over 1610056.49 frames. ], batch size: 72, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:59,368 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:04,934 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9463, 3.7934, 2.4936, 2.1489, 2.6222, 2.2620, 2.2209, 2.7478], + device='cuda:2'), covar=tensor([0.1298, 0.0291, 0.0760, 0.0708, 0.0663, 0.0922, 0.1020, 0.0938], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0243, 0.0305, 0.0298, 0.0319, 0.0304, 0.0339, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 01:11:05,621 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:11:06,073 INFO [train.py:901] (2/4) Epoch 5, batch 4950, loss[loss=0.2874, simple_loss=0.3428, pruned_loss=0.116, over 8331.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3496, pruned_loss=0.1141, over 1606031.97 frames. ], batch size: 25, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:08,195 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:31,043 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:35,592 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 3.052e+02 3.616e+02 4.696e+02 1.143e+03, threshold=7.231e+02, percent-clipped=5.0 +2023-02-06 01:11:40,305 INFO [train.py:901] (2/4) Epoch 5, batch 5000, loss[loss=0.2345, simple_loss=0.3045, pruned_loss=0.08227, over 7805.00 frames. ], tot_loss[loss=0.2885, simple_loss=0.3497, pruned_loss=0.1136, over 1609943.77 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:43,901 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:45,945 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:49,268 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:01,327 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,196 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,313 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:05,952 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:14,485 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:15,020 INFO [train.py:901] (2/4) Epoch 5, batch 5050, loss[loss=0.2765, simple_loss=0.3349, pruned_loss=0.1091, over 7801.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3505, pruned_loss=0.1144, over 1614325.20 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:18,411 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:44,491 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 3.496e+02 4.122e+02 5.072e+02 9.522e+02, threshold=8.245e+02, percent-clipped=6.0 +2023-02-06 01:12:48,503 INFO [train.py:901] (2/4) Epoch 5, batch 5100, loss[loss=0.3242, simple_loss=0.3767, pruned_loss=0.1358, over 8425.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3514, pruned_loss=0.1149, over 1613704.31 frames. ], batch size: 49, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:48,515 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 01:12:48,718 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37433.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:12:49,925 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:06,546 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:13:11,708 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7869, 1.4222, 3.3763, 1.3746, 2.5047, 3.7443, 3.5916, 3.2364], + device='cuda:2'), covar=tensor([0.1110, 0.1412, 0.0342, 0.1862, 0.0612, 0.0211, 0.0345, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0274, 0.0227, 0.0264, 0.0226, 0.0199, 0.0232, 0.0279], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 01:13:22,246 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:23,387 INFO [train.py:901] (2/4) Epoch 5, batch 5150, loss[loss=0.3044, simple_loss=0.3578, pruned_loss=0.1255, over 7816.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3494, pruned_loss=0.1141, over 1613149.00 frames. ], batch size: 20, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:13:53,504 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.138e+02 3.879e+02 5.454e+02 1.167e+03, threshold=7.757e+02, percent-clipped=4.0 +2023-02-06 01:13:57,568 INFO [train.py:901] (2/4) Epoch 5, batch 5200, loss[loss=0.3599, simple_loss=0.3957, pruned_loss=0.162, over 6717.00 frames. ], tot_loss[loss=0.2887, simple_loss=0.3494, pruned_loss=0.114, over 1608978.33 frames. ], batch size: 71, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:01,148 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37538.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:14:19,393 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37563.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:14:33,076 INFO [train.py:901] (2/4) Epoch 5, batch 5250, loss[loss=0.2624, simple_loss=0.3396, pruned_loss=0.09259, over 8133.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.35, pruned_loss=0.1141, over 1603641.99 frames. ], batch size: 22, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:40,789 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8626, 2.1190, 2.2596, 1.7328, 1.1775, 2.2159, 0.3775, 1.1931], + device='cuda:2'), covar=tensor([0.3867, 0.1888, 0.0965, 0.3172, 0.6800, 0.1048, 0.6850, 0.2978], + device='cuda:2'), in_proj_covar=tensor([0.0129, 0.0123, 0.0079, 0.0172, 0.0209, 0.0083, 0.0149, 0.0127], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:14:45,218 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 01:15:03,494 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 3.205e+02 3.781e+02 5.298e+02 9.083e+02, threshold=7.562e+02, percent-clipped=4.0 +2023-02-06 01:15:05,550 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:07,535 INFO [train.py:901] (2/4) Epoch 5, batch 5300, loss[loss=0.2922, simple_loss=0.3538, pruned_loss=0.1153, over 6823.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3491, pruned_loss=0.1128, over 1607822.16 frames. ], batch size: 15, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:15,193 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:32,585 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:42,453 INFO [train.py:901] (2/4) Epoch 5, batch 5350, loss[loss=0.3462, simple_loss=0.3906, pruned_loss=0.1508, over 8732.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3499, pruned_loss=0.1136, over 1612684.51 frames. ], batch size: 49, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:47,816 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:05,030 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:11,798 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:12,347 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 3.472e+02 4.206e+02 5.536e+02 1.524e+03, threshold=8.412e+02, percent-clipped=7.0 +2023-02-06 01:16:17,027 INFO [train.py:901] (2/4) Epoch 5, batch 5400, loss[loss=0.3083, simple_loss=0.3609, pruned_loss=0.1278, over 8499.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3517, pruned_loss=0.1155, over 1612699.56 frames. ], batch size: 26, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:16:19,920 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:25,230 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:36,758 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:50,838 INFO [train.py:901] (2/4) Epoch 5, batch 5450, loss[loss=0.2707, simple_loss=0.3153, pruned_loss=0.113, over 7696.00 frames. ], tot_loss[loss=0.287, simple_loss=0.3486, pruned_loss=0.1127, over 1607838.91 frames. ], batch size: 18, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:22,464 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 2.936e+02 3.882e+02 5.021e+02 1.156e+03, threshold=7.764e+02, percent-clipped=3.0 +2023-02-06 01:17:26,498 INFO [train.py:901] (2/4) Epoch 5, batch 5500, loss[loss=0.3139, simple_loss=0.3798, pruned_loss=0.124, over 8476.00 frames. ], tot_loss[loss=0.2874, simple_loss=0.3496, pruned_loss=0.1126, over 1611808.78 frames. ], batch size: 25, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:30,497 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 01:17:32,018 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:18:00,732 INFO [train.py:901] (2/4) Epoch 5, batch 5550, loss[loss=0.3861, simple_loss=0.4318, pruned_loss=0.1702, over 8464.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.3499, pruned_loss=0.1134, over 1609741.76 frames. ], batch size: 27, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:32,131 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 3.124e+02 3.941e+02 5.093e+02 9.977e+02, threshold=7.882e+02, percent-clipped=4.0 +2023-02-06 01:18:36,201 INFO [train.py:901] (2/4) Epoch 5, batch 5600, loss[loss=0.2917, simple_loss=0.3433, pruned_loss=0.12, over 7823.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.3504, pruned_loss=0.114, over 1613593.54 frames. ], batch size: 20, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:39,364 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 01:19:10,749 INFO [train.py:901] (2/4) Epoch 5, batch 5650, loss[loss=0.288, simple_loss=0.3481, pruned_loss=0.1139, over 8238.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3488, pruned_loss=0.113, over 1611346.32 frames. ], batch size: 22, lr: 1.47e-02, grad_scale: 4.0 +2023-02-06 01:19:20,217 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37997.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:23,903 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:34,403 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 01:19:41,409 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:42,550 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 3.005e+02 3.717e+02 4.758e+02 1.120e+03, threshold=7.434e+02, percent-clipped=3.0 +2023-02-06 01:19:45,907 INFO [train.py:901] (2/4) Epoch 5, batch 5700, loss[loss=0.2466, simple_loss=0.3148, pruned_loss=0.08919, over 7204.00 frames. ], tot_loss[loss=0.2883, simple_loss=0.349, pruned_loss=0.1138, over 1607092.40 frames. ], batch size: 16, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:20,736 INFO [train.py:901] (2/4) Epoch 5, batch 5750, loss[loss=0.2842, simple_loss=0.3395, pruned_loss=0.1144, over 8243.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3502, pruned_loss=0.1144, over 1607233.47 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:20,968 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4300, 1.8084, 1.9554, 1.3111, 0.9500, 1.9720, 0.2127, 1.0927], + device='cuda:2'), covar=tensor([0.4668, 0.1813, 0.0929, 0.3247, 0.6763, 0.0770, 0.6319, 0.2573], + device='cuda:2'), in_proj_covar=tensor([0.0130, 0.0120, 0.0079, 0.0171, 0.0210, 0.0079, 0.0145, 0.0124], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:20:30,238 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:37,194 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 01:20:41,417 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0248, 1.0497, 3.2734, 0.8666, 2.7250, 2.7266, 2.8508, 2.7704], + device='cuda:2'), covar=tensor([0.0652, 0.3440, 0.0553, 0.2690, 0.1427, 0.0785, 0.0645, 0.0798], + device='cuda:2'), in_proj_covar=tensor([0.0340, 0.0498, 0.0417, 0.0433, 0.0491, 0.0414, 0.0406, 0.0453], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 01:20:45,514 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5570, 1.7964, 3.4252, 1.1259, 2.2221, 1.8345, 1.5029, 1.9347], + device='cuda:2'), covar=tensor([0.1526, 0.1874, 0.0611, 0.3356, 0.1441, 0.2408, 0.1546, 0.2245], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0445, 0.0523, 0.0538, 0.0576, 0.0517, 0.0446, 0.0589], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 01:20:46,797 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:50,581 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.053e+02 3.876e+02 4.925e+02 1.023e+03, threshold=7.752e+02, percent-clipped=4.0 +2023-02-06 01:20:54,528 INFO [train.py:901] (2/4) Epoch 5, batch 5800, loss[loss=0.2536, simple_loss=0.3314, pruned_loss=0.08794, over 8293.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3492, pruned_loss=0.1132, over 1610191.51 frames. ], batch size: 23, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:21:29,404 INFO [train.py:901] (2/4) Epoch 5, batch 5850, loss[loss=0.3158, simple_loss=0.3781, pruned_loss=0.1268, over 8293.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3494, pruned_loss=0.1134, over 1612480.88 frames. ], batch size: 49, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:00,285 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 01:22:01,294 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.005e+02 3.016e+02 3.759e+02 4.889e+02 1.185e+03, threshold=7.518e+02, percent-clipped=2.0 +2023-02-06 01:22:02,502 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 01:22:04,818 INFO [train.py:901] (2/4) Epoch 5, batch 5900, loss[loss=0.2444, simple_loss=0.328, pruned_loss=0.08045, over 8439.00 frames. ], tot_loss[loss=0.287, simple_loss=0.349, pruned_loss=0.1125, over 1609737.10 frames. ], batch size: 27, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:40,895 INFO [train.py:901] (2/4) Epoch 5, batch 5950, loss[loss=0.2627, simple_loss=0.3185, pruned_loss=0.1034, over 7254.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.349, pruned_loss=0.1124, over 1612827.24 frames. ], batch size: 16, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:23:04,141 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-06 01:23:12,021 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.191e+02 3.790e+02 5.332e+02 1.075e+03, threshold=7.580e+02, percent-clipped=7.0 +2023-02-06 01:23:14,240 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0110, 2.5288, 1.9172, 2.9868, 1.4930, 1.6865, 1.7357, 2.3544], + device='cuda:2'), covar=tensor([0.0947, 0.0844, 0.1236, 0.0490, 0.1561, 0.1713, 0.1679, 0.1153], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0256, 0.0278, 0.0219, 0.0246, 0.0278, 0.0290, 0.0262], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 01:23:15,478 INFO [train.py:901] (2/4) Epoch 5, batch 6000, loss[loss=0.2818, simple_loss=0.3459, pruned_loss=0.1088, over 7969.00 frames. ], tot_loss[loss=0.287, simple_loss=0.3494, pruned_loss=0.1123, over 1618832.42 frames. ], batch size: 21, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:23:15,478 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 01:23:28,274 INFO [train.py:935] (2/4) Epoch 5, validation: loss=0.2196, simple_loss=0.3162, pruned_loss=0.06146, over 944034.00 frames. +2023-02-06 01:23:28,275 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 01:23:33,797 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:23:58,477 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38378.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:24:01,709 INFO [train.py:901] (2/4) Epoch 5, batch 6050, loss[loss=0.2921, simple_loss=0.3317, pruned_loss=0.1263, over 6822.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.348, pruned_loss=0.1117, over 1617246.16 frames. ], batch size: 15, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:33,770 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 3.108e+02 3.868e+02 4.827e+02 8.119e+02, threshold=7.737e+02, percent-clipped=1.0 +2023-02-06 01:24:37,062 INFO [train.py:901] (2/4) Epoch 5, batch 6100, loss[loss=0.2657, simple_loss=0.329, pruned_loss=0.1012, over 8231.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3486, pruned_loss=0.1121, over 1619274.59 frames. ], batch size: 22, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:40,778 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-06 01:24:53,393 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:24:54,054 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4377, 2.4369, 1.4411, 3.1499, 1.4932, 1.2381, 2.0167, 2.2393], + device='cuda:2'), covar=tensor([0.2386, 0.1796, 0.3004, 0.0439, 0.2279, 0.3321, 0.1961, 0.1536], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0254, 0.0280, 0.0219, 0.0249, 0.0281, 0.0291, 0.0268], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 01:24:57,956 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7503, 1.5041, 3.2083, 1.2603, 2.1563, 3.5729, 3.4278, 3.0728], + device='cuda:2'), covar=tensor([0.1152, 0.1460, 0.0371, 0.2063, 0.0778, 0.0290, 0.0394, 0.0628], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0269, 0.0224, 0.0263, 0.0225, 0.0201, 0.0232, 0.0276], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 01:25:09,662 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 01:25:10,932 INFO [train.py:901] (2/4) Epoch 5, batch 6150, loss[loss=0.2663, simple_loss=0.3401, pruned_loss=0.09622, over 8476.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3481, pruned_loss=0.112, over 1620259.62 frames. ], batch size: 25, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:32,112 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.29 vs. limit=5.0 +2023-02-06 01:25:42,282 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 3.089e+02 4.008e+02 5.119e+02 1.011e+03, threshold=8.016e+02, percent-clipped=7.0 +2023-02-06 01:25:45,617 INFO [train.py:901] (2/4) Epoch 5, batch 6200, loss[loss=0.3141, simple_loss=0.3696, pruned_loss=0.1294, over 8800.00 frames. ], tot_loss[loss=0.2844, simple_loss=0.3465, pruned_loss=0.1111, over 1617467.25 frames. ], batch size: 40, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:26:20,117 INFO [train.py:901] (2/4) Epoch 5, batch 6250, loss[loss=0.2957, simple_loss=0.3539, pruned_loss=0.1188, over 8589.00 frames. ], tot_loss[loss=0.284, simple_loss=0.3458, pruned_loss=0.1111, over 1617450.57 frames. ], batch size: 31, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:26:51,177 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.239e+02 3.994e+02 4.997e+02 1.061e+03, threshold=7.988e+02, percent-clipped=3.0 +2023-02-06 01:26:54,599 INFO [train.py:901] (2/4) Epoch 5, batch 6300, loss[loss=0.279, simple_loss=0.3357, pruned_loss=0.1112, over 8088.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3467, pruned_loss=0.1116, over 1616296.64 frames. ], batch size: 21, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:29,613 INFO [train.py:901] (2/4) Epoch 5, batch 6350, loss[loss=0.2889, simple_loss=0.3375, pruned_loss=0.1201, over 8074.00 frames. ], tot_loss[loss=0.2837, simple_loss=0.3453, pruned_loss=0.111, over 1611710.91 frames. ], batch size: 21, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:49,930 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38712.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:27:56,422 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:00,326 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.862e+02 3.826e+02 4.732e+02 1.596e+03, threshold=7.652e+02, percent-clipped=5.0 +2023-02-06 01:28:03,611 INFO [train.py:901] (2/4) Epoch 5, batch 6400, loss[loss=0.3929, simple_loss=0.4177, pruned_loss=0.1841, over 6874.00 frames. ], tot_loss[loss=0.2831, simple_loss=0.345, pruned_loss=0.1106, over 1613758.15 frames. ], batch size: 71, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:06,533 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:22,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4437, 4.3965, 3.9500, 1.6525, 3.8878, 3.9693, 4.2072, 3.6925], + device='cuda:2'), covar=tensor([0.0870, 0.0552, 0.0947, 0.5152, 0.0722, 0.0933, 0.0964, 0.0761], + device='cuda:2'), in_proj_covar=tensor([0.0399, 0.0294, 0.0322, 0.0406, 0.0310, 0.0272, 0.0303, 0.0253], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:28:38,892 INFO [train.py:901] (2/4) Epoch 5, batch 6450, loss[loss=0.3068, simple_loss=0.3665, pruned_loss=0.1235, over 8425.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.3471, pruned_loss=0.1116, over 1618536.23 frames. ], batch size: 27, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:09,816 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.470e+02 3.536e+02 4.141e+02 5.010e+02 9.096e+02, threshold=8.281e+02, percent-clipped=4.0 +2023-02-06 01:29:11,283 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9232, 3.4507, 2.7055, 4.3660, 1.9228, 2.1786, 2.6657, 3.7336], + device='cuda:2'), covar=tensor([0.0845, 0.0983, 0.1190, 0.0231, 0.1591, 0.1794, 0.1443, 0.0697], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0252, 0.0282, 0.0220, 0.0243, 0.0277, 0.0282, 0.0254], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 01:29:13,134 INFO [train.py:901] (2/4) Epoch 5, batch 6500, loss[loss=0.2643, simple_loss=0.3373, pruned_loss=0.09564, over 8450.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3473, pruned_loss=0.1123, over 1614844.00 frames. ], batch size: 25, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:16,037 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:18,076 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4501, 2.0606, 3.3164, 2.7674, 2.6455, 2.1361, 1.5185, 1.3394], + device='cuda:2'), covar=tensor([0.1948, 0.2172, 0.0452, 0.1067, 0.1124, 0.1081, 0.1161, 0.2221], + device='cuda:2'), in_proj_covar=tensor([0.0745, 0.0674, 0.0577, 0.0660, 0.0766, 0.0635, 0.0604, 0.0625], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:29:18,660 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:20,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1115, 1.1151, 1.0517, 1.0715, 0.8195, 1.1907, 0.0785, 0.8410], + device='cuda:2'), covar=tensor([0.3097, 0.2269, 0.1060, 0.1857, 0.5375, 0.0848, 0.4470, 0.2478], + device='cuda:2'), in_proj_covar=tensor([0.0128, 0.0124, 0.0080, 0.0166, 0.0209, 0.0078, 0.0140, 0.0125], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:29:48,036 INFO [train.py:901] (2/4) Epoch 5, batch 6550, loss[loss=0.2904, simple_loss=0.3691, pruned_loss=0.1058, over 8334.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3476, pruned_loss=0.1126, over 1612181.77 frames. ], batch size: 25, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:49,535 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0713, 1.2247, 4.2352, 1.6192, 3.6956, 3.5549, 3.8199, 3.7151], + device='cuda:2'), covar=tensor([0.0453, 0.3706, 0.0421, 0.2435, 0.1032, 0.0593, 0.0431, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0332, 0.0481, 0.0416, 0.0422, 0.0481, 0.0401, 0.0399, 0.0442], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-06 01:30:06,593 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9826, 1.4298, 1.3352, 1.0755, 1.1372, 1.3230, 1.5178, 1.4728], + device='cuda:2'), covar=tensor([0.0598, 0.1252, 0.1952, 0.1579, 0.0627, 0.1531, 0.0744, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0135, 0.0178, 0.0220, 0.0183, 0.0130, 0.0187, 0.0142, 0.0153], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 01:30:19,275 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.239e+02 3.737e+02 4.952e+02 1.438e+03, threshold=7.474e+02, percent-clipped=4.0 +2023-02-06 01:30:22,012 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 01:30:22,676 INFO [train.py:901] (2/4) Epoch 5, batch 6600, loss[loss=0.2855, simple_loss=0.3496, pruned_loss=0.1107, over 8327.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3484, pruned_loss=0.1138, over 1613202.23 frames. ], batch size: 26, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:39,818 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 01:30:51,529 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5834, 2.4245, 1.5581, 2.1251, 1.9352, 1.2992, 1.7736, 1.8729], + device='cuda:2'), covar=tensor([0.0902, 0.0267, 0.0924, 0.0443, 0.0624, 0.1219, 0.0730, 0.0695], + device='cuda:2'), in_proj_covar=tensor([0.0341, 0.0235, 0.0305, 0.0299, 0.0315, 0.0309, 0.0335, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 01:30:58,095 INFO [train.py:901] (2/4) Epoch 5, batch 6650, loss[loss=0.284, simple_loss=0.3539, pruned_loss=0.1071, over 8031.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.3478, pruned_loss=0.1124, over 1616385.15 frames. ], batch size: 22, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:29,853 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.068e+02 3.660e+02 4.252e+02 1.265e+03, threshold=7.321e+02, percent-clipped=3.0 +2023-02-06 01:31:33,318 INFO [train.py:901] (2/4) Epoch 5, batch 6700, loss[loss=0.2514, simple_loss=0.3242, pruned_loss=0.08925, over 8140.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3473, pruned_loss=0.1117, over 1617823.59 frames. ], batch size: 22, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:49,152 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0110, 2.4435, 3.9997, 3.1168, 3.3226, 2.3161, 1.7759, 1.9957], + device='cuda:2'), covar=tensor([0.1669, 0.2039, 0.0479, 0.1101, 0.0927, 0.1012, 0.1038, 0.2179], + device='cuda:2'), in_proj_covar=tensor([0.0753, 0.0683, 0.0582, 0.0675, 0.0786, 0.0648, 0.0615, 0.0638], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:32:08,096 INFO [train.py:901] (2/4) Epoch 5, batch 6750, loss[loss=0.3047, simple_loss=0.3601, pruned_loss=0.1247, over 8462.00 frames. ], tot_loss[loss=0.285, simple_loss=0.3468, pruned_loss=0.1116, over 1615341.43 frames. ], batch size: 27, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:15,841 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:26,943 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4290, 1.9214, 3.6853, 1.0743, 2.5999, 1.7326, 1.4573, 2.0980], + device='cuda:2'), covar=tensor([0.1719, 0.1827, 0.0546, 0.3411, 0.1294, 0.2417, 0.1642, 0.2285], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0448, 0.0516, 0.0533, 0.0570, 0.0509, 0.0442, 0.0587], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 01:32:33,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:40,320 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.802e+02 3.437e+02 4.300e+02 8.945e+02, threshold=6.874e+02, percent-clipped=2.0 +2023-02-06 01:32:43,709 INFO [train.py:901] (2/4) Epoch 5, batch 6800, loss[loss=0.2925, simple_loss=0.3411, pruned_loss=0.1219, over 7445.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.347, pruned_loss=0.112, over 1615449.87 frames. ], batch size: 17, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:32:49,774 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5331, 1.7730, 2.1406, 1.0631, 2.2269, 1.3290, 0.6737, 1.5874], + device='cuda:2'), covar=tensor([0.0214, 0.0111, 0.0079, 0.0185, 0.0148, 0.0360, 0.0293, 0.0117], + device='cuda:2'), in_proj_covar=tensor([0.0317, 0.0226, 0.0194, 0.0273, 0.0220, 0.0363, 0.0286, 0.0268], + device='cuda:2'), out_proj_covar=tensor([1.1177e-04, 7.7858e-05, 6.6506e-05, 9.3822e-05, 7.7362e-05, 1.3607e-04, + 1.0147e-04, 9.3531e-05], device='cuda:2') +2023-02-06 01:32:52,269 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3078, 1.3710, 1.4540, 1.1433, 1.3539, 1.3285, 1.7211, 1.6401], + device='cuda:2'), covar=tensor([0.0543, 0.1300, 0.1803, 0.1442, 0.0588, 0.1600, 0.0699, 0.0533], + device='cuda:2'), in_proj_covar=tensor([0.0135, 0.0177, 0.0220, 0.0182, 0.0129, 0.0186, 0.0142, 0.0152], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 01:32:54,769 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 01:33:17,603 INFO [train.py:901] (2/4) Epoch 5, batch 6850, loss[loss=0.3004, simple_loss=0.3568, pruned_loss=0.122, over 7933.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3495, pruned_loss=0.1136, over 1619619.59 frames. ], batch size: 20, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:33:19,089 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:31,225 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:43,505 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 01:33:48,728 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 3.284e+02 3.960e+02 5.468e+02 1.321e+03, threshold=7.919e+02, percent-clipped=11.0 +2023-02-06 01:33:52,162 INFO [train.py:901] (2/4) Epoch 5, batch 6900, loss[loss=0.2903, simple_loss=0.3681, pruned_loss=0.1063, over 8495.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3503, pruned_loss=0.1143, over 1619730.83 frames. ], batch size: 26, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:33:59,429 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1955, 4.2106, 3.7824, 1.5850, 3.7202, 3.4965, 3.8313, 3.2372], + device='cuda:2'), covar=tensor([0.0747, 0.0533, 0.0912, 0.4701, 0.0757, 0.0994, 0.1136, 0.0922], + device='cuda:2'), in_proj_covar=tensor([0.0392, 0.0290, 0.0317, 0.0399, 0.0305, 0.0277, 0.0302, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 01:34:12,819 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0306, 1.1820, 4.2111, 1.5779, 3.6762, 3.4824, 3.7612, 3.6988], + device='cuda:2'), covar=tensor([0.0492, 0.3948, 0.0494, 0.2653, 0.1258, 0.0789, 0.0488, 0.0555], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0499, 0.0426, 0.0428, 0.0498, 0.0409, 0.0404, 0.0457], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 01:34:26,651 INFO [train.py:901] (2/4) Epoch 5, batch 6950, loss[loss=0.3508, simple_loss=0.4021, pruned_loss=0.1498, over 8358.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3509, pruned_loss=0.1141, over 1619854.26 frames. ], batch size: 24, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:38,150 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:39,409 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:49,520 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 01:34:51,219 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 01:34:58,303 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.231e+02 3.801e+02 5.196e+02 1.038e+03, threshold=7.603e+02, percent-clipped=4.0 +2023-02-06 01:35:01,662 INFO [train.py:901] (2/4) Epoch 5, batch 7000, loss[loss=0.2992, simple_loss=0.3715, pruned_loss=0.1135, over 8471.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3494, pruned_loss=0.1132, over 1616863.94 frames. ], batch size: 25, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:35,821 INFO [train.py:901] (2/4) Epoch 5, batch 7050, loss[loss=0.3119, simple_loss=0.3786, pruned_loss=0.1226, over 8248.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3485, pruned_loss=0.1122, over 1615662.30 frames. ], batch size: 24, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:58,318 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8433, 1.4641, 5.8037, 2.1309, 5.2647, 4.9002, 5.4377, 5.3783], + device='cuda:2'), covar=tensor([0.0242, 0.3612, 0.0184, 0.2081, 0.0774, 0.0519, 0.0305, 0.0357], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0501, 0.0436, 0.0432, 0.0507, 0.0420, 0.0415, 0.0467], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 01:36:06,897 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 2.867e+02 3.538e+02 4.706e+02 1.662e+03, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 01:36:10,285 INFO [train.py:901] (2/4) Epoch 5, batch 7100, loss[loss=0.2752, simple_loss=0.3251, pruned_loss=0.1127, over 7708.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.3484, pruned_loss=0.1121, over 1616098.76 frames. ], batch size: 18, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:34,777 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6640, 2.0659, 3.6868, 1.0990, 2.7351, 1.9537, 1.6620, 2.1974], + device='cuda:2'), covar=tensor([0.1376, 0.1592, 0.0513, 0.3173, 0.1129, 0.2137, 0.1348, 0.1982], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0447, 0.0515, 0.0526, 0.0565, 0.0505, 0.0442, 0.0584], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 01:36:44,799 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:36:45,999 INFO [train.py:901] (2/4) Epoch 5, batch 7150, loss[loss=0.2986, simple_loss=0.3623, pruned_loss=0.1174, over 8604.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3499, pruned_loss=0.113, over 1619399.75 frames. ], batch size: 39, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:17,185 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.977e+02 2.912e+02 3.907e+02 4.774e+02 1.202e+03, threshold=7.813e+02, percent-clipped=7.0 +2023-02-06 01:37:20,760 INFO [train.py:901] (2/4) Epoch 5, batch 7200, loss[loss=0.3073, simple_loss=0.3666, pruned_loss=0.1239, over 8625.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3509, pruned_loss=0.1139, over 1618532.65 frames. ], batch size: 39, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:30,594 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:37,496 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:55,143 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:56,306 INFO [train.py:901] (2/4) Epoch 5, batch 7250, loss[loss=0.2382, simple_loss=0.3038, pruned_loss=0.08625, over 7679.00 frames. ], tot_loss[loss=0.2871, simple_loss=0.3487, pruned_loss=0.1127, over 1613170.49 frames. ], batch size: 18, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:13,473 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1570, 3.2654, 2.4570, 4.2173, 2.0503, 2.1025, 2.3496, 3.2345], + device='cuda:2'), covar=tensor([0.0747, 0.0980, 0.1314, 0.0279, 0.1580, 0.1790, 0.1864, 0.1013], + device='cuda:2'), in_proj_covar=tensor([0.0276, 0.0254, 0.0290, 0.0231, 0.0252, 0.0283, 0.0289, 0.0256], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 01:38:27,204 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 2.862e+02 3.679e+02 5.056e+02 1.142e+03, threshold=7.358e+02, percent-clipped=8.0 +2023-02-06 01:38:30,501 INFO [train.py:901] (2/4) Epoch 5, batch 7300, loss[loss=0.3206, simple_loss=0.3796, pruned_loss=0.1308, over 8653.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.3488, pruned_loss=0.1128, over 1612915.86 frames. ], batch size: 34, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:39,559 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:38:47,110 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39657.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:38:50,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39662.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:39:05,558 INFO [train.py:901] (2/4) Epoch 5, batch 7350, loss[loss=0.2534, simple_loss=0.3137, pruned_loss=0.09653, over 7711.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3498, pruned_loss=0.1139, over 1612740.69 frames. ], batch size: 18, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:13,007 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1713, 1.6052, 1.6106, 1.1736, 1.1806, 1.4239, 1.6253, 1.5929], + device='cuda:2'), covar=tensor([0.0609, 0.1251, 0.1769, 0.1565, 0.0689, 0.1657, 0.0833, 0.0597], + device='cuda:2'), in_proj_covar=tensor([0.0134, 0.0177, 0.0218, 0.0182, 0.0131, 0.0188, 0.0143, 0.0151], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 01:39:15,966 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-06 01:39:16,374 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6767, 5.6475, 5.0564, 2.2453, 5.0353, 5.4727, 5.1733, 5.0050], + device='cuda:2'), covar=tensor([0.0550, 0.0451, 0.0824, 0.4451, 0.0626, 0.0504, 0.1030, 0.0523], + device='cuda:2'), in_proj_covar=tensor([0.0394, 0.0284, 0.0312, 0.0402, 0.0303, 0.0271, 0.0294, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 01:39:17,818 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0023, 2.3732, 1.8675, 2.8849, 1.5516, 1.4439, 1.6793, 2.3607], + device='cuda:2'), covar=tensor([0.1106, 0.1041, 0.1561, 0.0604, 0.1611, 0.2117, 0.1854, 0.1033], + device='cuda:2'), in_proj_covar=tensor([0.0276, 0.0254, 0.0288, 0.0228, 0.0249, 0.0282, 0.0289, 0.0255], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 01:39:27,328 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 01:39:33,464 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 01:39:36,162 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.846e+02 3.982e+02 4.999e+02 1.878e+03, threshold=7.964e+02, percent-clipped=11.0 +2023-02-06 01:39:39,632 INFO [train.py:901] (2/4) Epoch 5, batch 7400, loss[loss=0.2453, simple_loss=0.3142, pruned_loss=0.08817, over 7922.00 frames. ], tot_loss[loss=0.289, simple_loss=0.3502, pruned_loss=0.1139, over 1614042.31 frames. ], batch size: 20, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:52,999 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 01:39:55,398 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 01:39:59,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:13,745 INFO [train.py:901] (2/4) Epoch 5, batch 7450, loss[loss=0.2509, simple_loss=0.3269, pruned_loss=0.08748, over 8245.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3497, pruned_loss=0.1131, over 1612384.03 frames. ], batch size: 24, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:15,958 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:28,289 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8433, 5.8414, 5.1236, 2.2412, 5.1013, 5.4809, 5.4629, 5.3616], + device='cuda:2'), covar=tensor([0.0553, 0.0490, 0.0888, 0.4730, 0.0681, 0.0468, 0.0937, 0.0585], + device='cuda:2'), in_proj_covar=tensor([0.0396, 0.0285, 0.0315, 0.0405, 0.0305, 0.0274, 0.0296, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:2') +2023-02-06 01:40:32,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 01:40:43,636 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:45,624 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 3.116e+02 3.779e+02 4.440e+02 1.107e+03, threshold=7.558e+02, percent-clipped=3.0 +2023-02-06 01:40:49,054 INFO [train.py:901] (2/4) Epoch 5, batch 7500, loss[loss=0.2735, simple_loss=0.3478, pruned_loss=0.09957, over 8620.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3497, pruned_loss=0.113, over 1614100.12 frames. ], batch size: 31, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:49,198 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:22,984 INFO [train.py:901] (2/4) Epoch 5, batch 7550, loss[loss=0.266, simple_loss=0.3197, pruned_loss=0.1061, over 7544.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3485, pruned_loss=0.113, over 1610727.57 frames. ], batch size: 18, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:41:48,010 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:54,547 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.144e+02 3.978e+02 5.379e+02 1.554e+03, threshold=7.957e+02, percent-clipped=6.0 +2023-02-06 01:41:57,848 INFO [train.py:901] (2/4) Epoch 5, batch 7600, loss[loss=0.2636, simple_loss=0.3259, pruned_loss=0.1006, over 7652.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.347, pruned_loss=0.1117, over 1607798.05 frames. ], batch size: 19, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:42:02,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:04,534 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39943.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:32,756 INFO [train.py:901] (2/4) Epoch 5, batch 7650, loss[loss=0.2848, simple_loss=0.3411, pruned_loss=0.1142, over 8359.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3467, pruned_loss=0.1122, over 1606024.46 frames. ], batch size: 24, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:42:33,867 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 01:42:45,886 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40001.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:42:49,489 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-06 01:42:56,997 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:05,593 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.281e+02 3.028e+02 3.689e+02 4.703e+02 1.290e+03, threshold=7.379e+02, percent-clipped=1.0 +2023-02-06 01:43:08,870 INFO [train.py:901] (2/4) Epoch 5, batch 7700, loss[loss=0.3034, simple_loss=0.3653, pruned_loss=0.1207, over 8509.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3463, pruned_loss=0.1117, over 1610215.06 frames. ], batch size: 26, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:15,432 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:30,101 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 01:43:37,803 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 01:43:44,273 INFO [train.py:901] (2/4) Epoch 5, batch 7750, loss[loss=0.2737, simple_loss=0.3495, pruned_loss=0.09897, over 8331.00 frames. ], tot_loss[loss=0.2848, simple_loss=0.3463, pruned_loss=0.1116, over 1611169.88 frames. ], batch size: 26, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:44,300 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 01:43:48,564 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3508, 2.2095, 1.5427, 1.9220, 1.7717, 1.2813, 1.8092, 1.6687], + device='cuda:2'), covar=tensor([0.0959, 0.0290, 0.0829, 0.0405, 0.0562, 0.1069, 0.0624, 0.0619], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0248, 0.0316, 0.0305, 0.0320, 0.0319, 0.0339, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 01:44:07,435 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40116.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:44:08,908 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 01:44:15,426 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.050e+02 3.016e+02 3.638e+02 4.428e+02 8.911e+02, threshold=7.276e+02, percent-clipped=8.0 +2023-02-06 01:44:16,231 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:18,943 INFO [train.py:901] (2/4) Epoch 5, batch 7800, loss[loss=0.2864, simple_loss=0.3485, pruned_loss=0.1122, over 8561.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3491, pruned_loss=0.1133, over 1615931.26 frames. ], batch size: 31, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:44:50,153 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:51,119 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 01:44:54,262 INFO [train.py:901] (2/4) Epoch 5, batch 7850, loss[loss=0.3014, simple_loss=0.3645, pruned_loss=0.1192, over 8659.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3478, pruned_loss=0.1122, over 1613487.89 frames. ], batch size: 39, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:45:03,202 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:14,679 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:20,118 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:24,747 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.315e+02 3.285e+02 3.978e+02 4.753e+02 1.108e+03, threshold=7.955e+02, percent-clipped=4.0 +2023-02-06 01:45:28,277 INFO [train.py:901] (2/4) Epoch 5, batch 7900, loss[loss=0.2562, simple_loss=0.3108, pruned_loss=0.1008, over 7799.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.348, pruned_loss=0.1127, over 1610365.16 frames. ], batch size: 19, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:45:30,495 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40236.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:35,934 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:41,990 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8245, 1.5188, 5.7363, 2.0561, 5.1582, 4.8605, 5.3342, 5.2270], + device='cuda:2'), covar=tensor([0.0317, 0.3550, 0.0257, 0.2338, 0.0907, 0.0593, 0.0371, 0.0377], + device='cuda:2'), in_proj_covar=tensor([0.0337, 0.0491, 0.0434, 0.0428, 0.0496, 0.0414, 0.0399, 0.0457], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 01:46:02,026 INFO [train.py:901] (2/4) Epoch 5, batch 7950, loss[loss=0.2501, simple_loss=0.3149, pruned_loss=0.09264, over 7708.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3478, pruned_loss=0.1122, over 1610583.08 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:07,558 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:09,028 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:33,072 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.942e+02 3.003e+02 3.931e+02 4.743e+02 9.937e+02, threshold=7.862e+02, percent-clipped=4.0 +2023-02-06 01:46:36,390 INFO [train.py:901] (2/4) Epoch 5, batch 8000, loss[loss=0.2782, simple_loss=0.3386, pruned_loss=0.1089, over 8031.00 frames. ], tot_loss[loss=0.2866, simple_loss=0.3482, pruned_loss=0.1125, over 1612445.96 frames. ], batch size: 22, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:46,581 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:47:03,145 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40372.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:47:10,475 INFO [train.py:901] (2/4) Epoch 5, batch 8050, loss[loss=0.2338, simple_loss=0.2938, pruned_loss=0.08691, over 7569.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3439, pruned_loss=0.1104, over 1604590.18 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 8.0 +2023-02-06 01:47:20,234 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:47:23,377 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.09 vs. limit=2.0 +2023-02-06 01:47:44,060 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 01:47:48,097 INFO [train.py:901] (2/4) Epoch 6, batch 0, loss[loss=0.3028, simple_loss=0.3503, pruned_loss=0.1276, over 7928.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3503, pruned_loss=0.1276, over 7928.00 frames. ], batch size: 20, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:47:48,097 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 01:47:59,054 INFO [train.py:935] (2/4) Epoch 6, validation: loss=0.2203, simple_loss=0.3165, pruned_loss=0.06206, over 944034.00 frames. +2023-02-06 01:47:59,054 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 01:48:07,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.052e+02 3.992e+02 5.098e+02 1.227e+03, threshold=7.983e+02, percent-clipped=7.0 +2023-02-06 01:48:13,426 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 01:48:26,724 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 01:48:34,118 INFO [train.py:901] (2/4) Epoch 6, batch 50, loss[loss=0.2769, simple_loss=0.3549, pruned_loss=0.09941, over 8323.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3485, pruned_loss=0.1135, over 361333.23 frames. ], batch size: 25, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:48:38,478 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 01:48:48,496 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 01:48:57,360 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:04,768 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:08,799 INFO [train.py:901] (2/4) Epoch 6, batch 100, loss[loss=0.283, simple_loss=0.3504, pruned_loss=0.1078, over 8349.00 frames. ], tot_loss[loss=0.2819, simple_loss=0.3452, pruned_loss=0.1093, over 641303.44 frames. ], batch size: 24, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:49:13,091 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 01:49:15,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:17,934 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.877e+02 3.627e+02 4.294e+02 7.601e+02, threshold=7.253e+02, percent-clipped=0.0 +2023-02-06 01:49:31,500 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:37,659 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:42,465 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3279, 2.0225, 3.2719, 2.5182, 2.6063, 2.0438, 1.5086, 1.4230], + device='cuda:2'), covar=tensor([0.1909, 0.2069, 0.0465, 0.1116, 0.1014, 0.1083, 0.1100, 0.2065], + device='cuda:2'), in_proj_covar=tensor([0.0770, 0.0707, 0.0600, 0.0693, 0.0798, 0.0662, 0.0627, 0.0654], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 01:49:44,247 INFO [train.py:901] (2/4) Epoch 6, batch 150, loss[loss=0.2582, simple_loss=0.3283, pruned_loss=0.09403, over 8781.00 frames. ], tot_loss[loss=0.2798, simple_loss=0.3436, pruned_loss=0.108, over 857818.61 frames. ], batch size: 30, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:49:49,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:54,328 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:19,246 INFO [train.py:901] (2/4) Epoch 6, batch 200, loss[loss=0.2484, simple_loss=0.3332, pruned_loss=0.08178, over 8521.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3436, pruned_loss=0.1077, over 1031245.66 frames. ], batch size: 28, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:28,759 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 3.079e+02 3.898e+02 5.213e+02 9.157e+02, threshold=7.795e+02, percent-clipped=3.0 +2023-02-06 01:50:32,268 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:54,056 INFO [train.py:901] (2/4) Epoch 6, batch 250, loss[loss=0.2876, simple_loss=0.3566, pruned_loss=0.1093, over 8364.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.3445, pruned_loss=0.1081, over 1164507.81 frames. ], batch size: 24, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:56,273 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:58,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:04,095 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 01:51:12,263 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 01:51:13,018 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:15,103 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:29,243 INFO [train.py:901] (2/4) Epoch 6, batch 300, loss[loss=0.2427, simple_loss=0.3155, pruned_loss=0.08493, over 7783.00 frames. ], tot_loss[loss=0.2814, simple_loss=0.3452, pruned_loss=0.1088, over 1265635.33 frames. ], batch size: 19, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:51:38,581 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 3.025e+02 3.729e+02 4.724e+02 9.863e+02, threshold=7.458e+02, percent-clipped=3.0 +2023-02-06 01:51:52,710 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:03,708 INFO [train.py:901] (2/4) Epoch 6, batch 350, loss[loss=0.2848, simple_loss=0.338, pruned_loss=0.1158, over 7971.00 frames. ], tot_loss[loss=0.2817, simple_loss=0.3453, pruned_loss=0.1091, over 1344824.38 frames. ], batch size: 21, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:14,009 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 01:52:17,242 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5454, 1.8820, 1.9379, 0.7131, 2.0108, 1.3430, 0.3866, 1.6569], + device='cuda:2'), covar=tensor([0.0181, 0.0116, 0.0080, 0.0208, 0.0115, 0.0345, 0.0304, 0.0096], + device='cuda:2'), in_proj_covar=tensor([0.0324, 0.0234, 0.0195, 0.0282, 0.0228, 0.0374, 0.0300, 0.0274], + device='cuda:2'), out_proj_covar=tensor([1.1224e-04, 7.8652e-05, 6.5579e-05, 9.5827e-05, 7.8811e-05, 1.3856e-04, + 1.0426e-04, 9.3391e-05], device='cuda:2') +2023-02-06 01:52:32,434 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:38,237 INFO [train.py:901] (2/4) Epoch 6, batch 400, loss[loss=0.2675, simple_loss=0.3413, pruned_loss=0.0968, over 8466.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.3456, pruned_loss=0.1095, over 1405806.51 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:46,905 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.939e+02 3.080e+02 3.801e+02 5.022e+02 1.220e+03, threshold=7.601e+02, percent-clipped=4.0 +2023-02-06 01:53:04,908 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:12,737 INFO [train.py:901] (2/4) Epoch 6, batch 450, loss[loss=0.2781, simple_loss=0.3484, pruned_loss=0.1039, over 8450.00 frames. ], tot_loss[loss=0.2831, simple_loss=0.3466, pruned_loss=0.1098, over 1454759.41 frames. ], batch size: 27, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:24,455 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:47,833 INFO [train.py:901] (2/4) Epoch 6, batch 500, loss[loss=0.2805, simple_loss=0.3503, pruned_loss=0.1054, over 8287.00 frames. ], tot_loss[loss=0.281, simple_loss=0.3452, pruned_loss=0.1084, over 1490614.16 frames. ], batch size: 23, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:49,256 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:56,749 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:57,236 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.058e+02 3.738e+02 5.288e+02 8.550e+02, threshold=7.476e+02, percent-clipped=3.0 +2023-02-06 01:54:12,298 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:13,627 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:22,949 INFO [train.py:901] (2/4) Epoch 6, batch 550, loss[loss=0.2414, simple_loss=0.3054, pruned_loss=0.08873, over 8089.00 frames. ], tot_loss[loss=0.2802, simple_loss=0.3445, pruned_loss=0.1079, over 1520697.60 frames. ], batch size: 21, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:54:25,215 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:30,604 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:49,828 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:55,069 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:56,494 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:57,039 INFO [train.py:901] (2/4) Epoch 6, batch 600, loss[loss=0.3477, simple_loss=0.3921, pruned_loss=0.1516, over 7127.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3444, pruned_loss=0.1083, over 1535303.85 frames. ], batch size: 71, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:06,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.839e+02 3.515e+02 4.292e+02 8.268e+02, threshold=7.031e+02, percent-clipped=4.0 +2023-02-06 01:55:06,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:09,479 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 01:55:29,303 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:29,382 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:31,088 INFO [train.py:901] (2/4) Epoch 6, batch 650, loss[loss=0.2576, simple_loss=0.333, pruned_loss=0.09109, over 8334.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3471, pruned_loss=0.1104, over 1556442.31 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:43,236 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-06 01:55:46,217 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:05,822 INFO [train.py:901] (2/4) Epoch 6, batch 700, loss[loss=0.283, simple_loss=0.3512, pruned_loss=0.1074, over 8637.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.347, pruned_loss=0.1103, over 1571317.60 frames. ], batch size: 34, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:56:08,626 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,231 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,730 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.998e+02 3.776e+02 4.654e+02 1.221e+03, threshold=7.553e+02, percent-clipped=4.0 +2023-02-06 01:56:40,069 INFO [train.py:901] (2/4) Epoch 6, batch 750, loss[loss=0.2452, simple_loss=0.309, pruned_loss=0.09066, over 7793.00 frames. ], tot_loss[loss=0.2833, simple_loss=0.3462, pruned_loss=0.1102, over 1582511.42 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:56:52,788 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 01:57:00,440 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3155, 2.5639, 1.6238, 2.1055, 2.1475, 1.3909, 1.8639, 1.8827], + device='cuda:2'), covar=tensor([0.1127, 0.0265, 0.0856, 0.0537, 0.0554, 0.1138, 0.0773, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0339, 0.0237, 0.0306, 0.0298, 0.0311, 0.0315, 0.0335, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 01:57:00,945 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 01:57:15,154 INFO [train.py:901] (2/4) Epoch 6, batch 800, loss[loss=0.2549, simple_loss=0.321, pruned_loss=0.09433, over 7817.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.3454, pruned_loss=0.1096, over 1589966.09 frames. ], batch size: 20, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:21,519 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:22,744 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:23,995 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.937e+02 3.578e+02 4.897e+02 8.076e+02, threshold=7.157e+02, percent-clipped=3.0 +2023-02-06 01:57:38,349 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:41,025 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6231, 2.5912, 4.6942, 1.2157, 3.4364, 2.1935, 1.7041, 2.8651], + device='cuda:2'), covar=tensor([0.1538, 0.1568, 0.0597, 0.3158, 0.1151, 0.2280, 0.1473, 0.2220], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0454, 0.0526, 0.0539, 0.0590, 0.0517, 0.0449, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 01:57:46,852 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:49,531 INFO [train.py:901] (2/4) Epoch 6, batch 850, loss[loss=0.2512, simple_loss=0.3255, pruned_loss=0.08841, over 7967.00 frames. ], tot_loss[loss=0.2835, simple_loss=0.3468, pruned_loss=0.1101, over 1598469.81 frames. ], batch size: 21, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:23,893 INFO [train.py:901] (2/4) Epoch 6, batch 900, loss[loss=0.3166, simple_loss=0.3786, pruned_loss=0.1273, over 8231.00 frames. ], tot_loss[loss=0.2838, simple_loss=0.3469, pruned_loss=0.1103, over 1607052.51 frames. ], batch size: 24, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:33,480 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.871e+02 3.405e+02 4.321e+02 1.147e+03, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 01:58:42,527 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:53,863 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:58,546 INFO [train.py:901] (2/4) Epoch 6, batch 950, loss[loss=0.3472, simple_loss=0.4, pruned_loss=0.1472, over 8624.00 frames. ], tot_loss[loss=0.2809, simple_loss=0.3447, pruned_loss=0.1086, over 1610569.18 frames. ], batch size: 39, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:06,367 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:11,061 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:21,425 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 01:59:26,997 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:28,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:32,969 INFO [train.py:901] (2/4) Epoch 6, batch 1000, loss[loss=0.3065, simple_loss=0.3659, pruned_loss=0.1236, over 8841.00 frames. ], tot_loss[loss=0.2815, simple_loss=0.3456, pruned_loss=0.1087, over 1615141.83 frames. ], batch size: 40, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:41,357 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.042e+02 3.293e+02 3.921e+02 5.074e+02 1.211e+03, threshold=7.843e+02, percent-clipped=6.0 +2023-02-06 01:59:55,288 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 02:00:06,252 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:07,520 INFO [train.py:901] (2/4) Epoch 6, batch 1050, loss[loss=0.2685, simple_loss=0.3345, pruned_loss=0.1012, over 7710.00 frames. ], tot_loss[loss=0.2819, simple_loss=0.3458, pruned_loss=0.109, over 1612900.61 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:08,220 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 02:00:13,144 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:42,275 INFO [train.py:901] (2/4) Epoch 6, batch 1100, loss[loss=0.326, simple_loss=0.379, pruned_loss=0.1365, over 8441.00 frames. ], tot_loss[loss=0.282, simple_loss=0.3458, pruned_loss=0.1091, over 1613472.10 frames. ], batch size: 49, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:46,701 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:51,107 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.937e+02 3.488e+02 4.376e+02 9.981e+02, threshold=6.976e+02, percent-clipped=3.0 +2023-02-06 02:01:13,000 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1797, 2.1446, 1.5123, 1.8958, 1.6880, 1.3066, 1.5088, 1.6569], + device='cuda:2'), covar=tensor([0.0940, 0.0267, 0.0897, 0.0466, 0.0584, 0.1124, 0.0797, 0.0698], + device='cuda:2'), in_proj_covar=tensor([0.0338, 0.0234, 0.0310, 0.0296, 0.0313, 0.0309, 0.0334, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 02:01:16,051 INFO [train.py:901] (2/4) Epoch 6, batch 1150, loss[loss=0.2894, simple_loss=0.3659, pruned_loss=0.1065, over 8522.00 frames. ], tot_loss[loss=0.2805, simple_loss=0.3446, pruned_loss=0.1083, over 1611927.27 frames. ], batch size: 39, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:18,818 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 02:01:25,451 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:38,066 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:50,379 INFO [train.py:901] (2/4) Epoch 6, batch 1200, loss[loss=0.2934, simple_loss=0.3523, pruned_loss=0.1173, over 8648.00 frames. ], tot_loss[loss=0.2796, simple_loss=0.3437, pruned_loss=0.1077, over 1614282.59 frames. ], batch size: 34, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:55,789 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:00,249 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.965e+02 3.060e+02 3.864e+02 4.910e+02 1.275e+03, threshold=7.729e+02, percent-clipped=9.0 +2023-02-06 02:02:02,461 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5965, 1.5164, 3.0240, 1.1311, 2.0396, 3.3231, 3.2140, 2.8356], + device='cuda:2'), covar=tensor([0.1059, 0.1288, 0.0405, 0.1987, 0.0772, 0.0275, 0.0498, 0.0615], + device='cuda:2'), in_proj_covar=tensor([0.0244, 0.0272, 0.0233, 0.0267, 0.0234, 0.0211, 0.0248, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 02:02:03,213 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:19,548 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:24,290 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7299, 2.3038, 3.7425, 2.8894, 2.8104, 2.2923, 1.6609, 1.6071], + device='cuda:2'), covar=tensor([0.2009, 0.2546, 0.0500, 0.1331, 0.1338, 0.1204, 0.1219, 0.2659], + device='cuda:2'), in_proj_covar=tensor([0.0772, 0.0716, 0.0606, 0.0709, 0.0799, 0.0661, 0.0630, 0.0658], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:02:24,697 INFO [train.py:901] (2/4) Epoch 6, batch 1250, loss[loss=0.2757, simple_loss=0.3506, pruned_loss=0.1004, over 8484.00 frames. ], tot_loss[loss=0.2818, simple_loss=0.3456, pruned_loss=0.109, over 1617422.97 frames. ], batch size: 28, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:02:29,690 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:59,806 INFO [train.py:901] (2/4) Epoch 6, batch 1300, loss[loss=0.2926, simple_loss=0.3575, pruned_loss=0.1139, over 7976.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3445, pruned_loss=0.1081, over 1614433.68 frames. ], batch size: 21, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:08,606 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.137e+02 4.028e+02 4.813e+02 9.668e+02, threshold=8.056e+02, percent-clipped=5.0 +2023-02-06 02:03:09,499 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:27,427 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:34,646 INFO [train.py:901] (2/4) Epoch 6, batch 1350, loss[loss=0.2651, simple_loss=0.3346, pruned_loss=0.09781, over 8290.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3437, pruned_loss=0.1077, over 1609444.43 frames. ], batch size: 23, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:42,962 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:00,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:09,625 INFO [train.py:901] (2/4) Epoch 6, batch 1400, loss[loss=0.2133, simple_loss=0.282, pruned_loss=0.07232, over 7252.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.3426, pruned_loss=0.1072, over 1609412.87 frames. ], batch size: 16, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:18,118 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.090e+02 3.079e+02 3.704e+02 4.589e+02 8.838e+02, threshold=7.407e+02, percent-clipped=2.0 +2023-02-06 02:04:22,375 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:39,963 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:44,592 INFO [train.py:901] (2/4) Epoch 6, batch 1450, loss[loss=0.2734, simple_loss=0.3369, pruned_loss=0.1049, over 7804.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.342, pruned_loss=0.1073, over 1608355.64 frames. ], batch size: 20, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:47,868 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 02:05:18,590 INFO [train.py:901] (2/4) Epoch 6, batch 1500, loss[loss=0.2392, simple_loss=0.315, pruned_loss=0.08166, over 7806.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3428, pruned_loss=0.1075, over 1610731.02 frames. ], batch size: 20, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:24,646 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:05:27,835 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 2.922e+02 3.542e+02 4.432e+02 1.007e+03, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 02:05:53,224 INFO [train.py:901] (2/4) Epoch 6, batch 1550, loss[loss=0.2279, simple_loss=0.2884, pruned_loss=0.08373, over 7447.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3418, pruned_loss=0.1062, over 1616188.09 frames. ], batch size: 17, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:53,365 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41966.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:06:28,450 INFO [train.py:901] (2/4) Epoch 6, batch 1600, loss[loss=0.3487, simple_loss=0.3894, pruned_loss=0.154, over 6999.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.342, pruned_loss=0.1065, over 1613010.41 frames. ], batch size: 72, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:06:28,516 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:06:37,879 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.135e+02 3.132e+02 3.836e+02 5.392e+02 3.005e+03, threshold=7.672e+02, percent-clipped=11.0 +2023-02-06 02:07:03,789 INFO [train.py:901] (2/4) Epoch 6, batch 1650, loss[loss=0.2663, simple_loss=0.3317, pruned_loss=0.1004, over 8354.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3409, pruned_loss=0.1055, over 1612589.46 frames. ], batch size: 24, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:39,088 INFO [train.py:901] (2/4) Epoch 6, batch 1700, loss[loss=0.262, simple_loss=0.3417, pruned_loss=0.09116, over 8513.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3408, pruned_loss=0.1059, over 1611995.42 frames. ], batch size: 26, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:47,893 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.826e+02 3.670e+02 4.452e+02 1.049e+03, threshold=7.339e+02, percent-clipped=2.0 +2023-02-06 02:07:49,313 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:07:52,036 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1006, 1.1038, 3.2521, 0.9398, 2.7808, 2.7568, 2.9302, 2.8530], + device='cuda:2'), covar=tensor([0.0629, 0.3634, 0.0747, 0.2865, 0.1502, 0.0885, 0.0655, 0.0772], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0504, 0.0442, 0.0435, 0.0500, 0.0414, 0.0412, 0.0463], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 02:08:05,229 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8494, 2.9244, 3.0766, 2.3707, 1.4474, 3.4812, 0.6916, 1.6625], + device='cuda:2'), covar=tensor([0.3325, 0.1599, 0.0675, 0.2471, 0.6924, 0.0424, 0.5907, 0.2882], + device='cuda:2'), in_proj_covar=tensor([0.0131, 0.0132, 0.0082, 0.0179, 0.0216, 0.0083, 0.0145, 0.0135], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:08:14,469 INFO [train.py:901] (2/4) Epoch 6, batch 1750, loss[loss=0.2538, simple_loss=0.3238, pruned_loss=0.09186, over 8146.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3413, pruned_loss=0.1067, over 1610977.99 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:49,384 INFO [train.py:901] (2/4) Epoch 6, batch 1800, loss[loss=0.2825, simple_loss=0.343, pruned_loss=0.111, over 7971.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3424, pruned_loss=0.1075, over 1610853.75 frames. ], batch size: 21, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:59,176 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 3.032e+02 3.540e+02 4.353e+02 2.015e+03, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 02:09:24,921 INFO [train.py:901] (2/4) Epoch 6, batch 1850, loss[loss=0.2712, simple_loss=0.3477, pruned_loss=0.09729, over 8489.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3428, pruned_loss=0.1073, over 1613818.36 frames. ], batch size: 28, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:09:26,410 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:09:55,347 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42310.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:09:59,287 INFO [train.py:901] (2/4) Epoch 6, batch 1900, loss[loss=0.3067, simple_loss=0.362, pruned_loss=0.1257, over 8318.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3434, pruned_loss=0.1075, over 1614873.05 frames. ], batch size: 25, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:10:08,770 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.992e+02 2.715e+02 3.297e+02 4.142e+02 7.213e+02, threshold=6.594e+02, percent-clipped=2.0 +2023-02-06 02:10:23,937 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 02:10:34,058 INFO [train.py:901] (2/4) Epoch 6, batch 1950, loss[loss=0.3136, simple_loss=0.369, pruned_loss=0.1291, over 8198.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3433, pruned_loss=0.1073, over 1617865.64 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:10:36,639 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 02:10:46,220 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:48,987 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42387.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:56,181 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 02:10:56,318 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:06,406 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:08,979 INFO [train.py:901] (2/4) Epoch 6, batch 2000, loss[loss=0.2613, simple_loss=0.3189, pruned_loss=0.1018, over 7793.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3423, pruned_loss=0.1069, over 1615346.06 frames. ], batch size: 20, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:11:15,181 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42425.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:11:18,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.766e+02 3.581e+02 4.303e+02 8.011e+02, threshold=7.162e+02, percent-clipped=3.0 +2023-02-06 02:11:37,942 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3747, 1.5924, 1.5599, 1.2755, 1.3132, 1.3933, 1.9985, 1.6622], + device='cuda:2'), covar=tensor([0.0524, 0.1212, 0.1717, 0.1405, 0.0599, 0.1577, 0.0631, 0.0539], + device='cuda:2'), in_proj_covar=tensor([0.0128, 0.0173, 0.0211, 0.0178, 0.0125, 0.0183, 0.0138, 0.0149], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 02:11:43,879 INFO [train.py:901] (2/4) Epoch 6, batch 2050, loss[loss=0.2963, simple_loss=0.3597, pruned_loss=0.1165, over 8591.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3421, pruned_loss=0.1065, over 1619122.29 frames. ], batch size: 49, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:17,698 INFO [train.py:901] (2/4) Epoch 6, batch 2100, loss[loss=0.2718, simple_loss=0.3219, pruned_loss=0.1108, over 7795.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3438, pruned_loss=0.1081, over 1618184.03 frames. ], batch size: 19, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:23,879 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:12:27,686 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.916e+02 3.481e+02 4.572e+02 1.310e+03, threshold=6.962e+02, percent-clipped=2.0 +2023-02-06 02:12:52,386 INFO [train.py:901] (2/4) Epoch 6, batch 2150, loss[loss=0.2818, simple_loss=0.3598, pruned_loss=0.1019, over 8623.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3444, pruned_loss=0.1082, over 1614941.94 frames. ], batch size: 31, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:54,238 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 02:13:16,791 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 02:13:27,027 INFO [train.py:901] (2/4) Epoch 6, batch 2200, loss[loss=0.2906, simple_loss=0.3536, pruned_loss=0.1138, over 8023.00 frames. ], tot_loss[loss=0.2799, simple_loss=0.344, pruned_loss=0.108, over 1615866.33 frames. ], batch size: 22, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:36,143 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 3.020e+02 3.729e+02 5.072e+02 1.122e+03, threshold=7.459e+02, percent-clipped=5.0 +2023-02-06 02:13:43,112 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:13:59,702 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:00,826 INFO [train.py:901] (2/4) Epoch 6, batch 2250, loss[loss=0.3157, simple_loss=0.3635, pruned_loss=0.134, over 6745.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3423, pruned_loss=0.1071, over 1611028.25 frames. ], batch size: 71, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:01,631 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:11,906 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42681.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:14:29,254 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42706.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:14:35,833 INFO [train.py:901] (2/4) Epoch 6, batch 2300, loss[loss=0.2883, simple_loss=0.3422, pruned_loss=0.1172, over 7653.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.3431, pruned_loss=0.1076, over 1617058.23 frames. ], batch size: 19, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:45,246 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.977e+02 3.532e+02 4.435e+02 7.362e+02, threshold=7.063e+02, percent-clipped=0.0 +2023-02-06 02:14:53,236 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:15:11,251 INFO [train.py:901] (2/4) Epoch 6, batch 2350, loss[loss=0.315, simple_loss=0.3727, pruned_loss=0.1287, over 8529.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3442, pruned_loss=0.1083, over 1616182.67 frames. ], batch size: 28, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:46,904 INFO [train.py:901] (2/4) Epoch 6, batch 2400, loss[loss=0.308, simple_loss=0.3743, pruned_loss=0.1209, over 8248.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3429, pruned_loss=0.1075, over 1615535.86 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:56,306 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.099e+02 3.712e+02 4.452e+02 1.076e+03, threshold=7.425e+02, percent-clipped=4.0 +2023-02-06 02:16:14,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:20,856 INFO [train.py:901] (2/4) Epoch 6, batch 2450, loss[loss=0.3236, simple_loss=0.3849, pruned_loss=0.1311, over 8504.00 frames. ], tot_loss[loss=0.2791, simple_loss=0.343, pruned_loss=0.1076, over 1615442.02 frames. ], batch size: 26, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:16:22,282 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:27,381 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 02:16:29,160 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:42,219 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:54,610 INFO [train.py:901] (2/4) Epoch 6, batch 2500, loss[loss=0.3004, simple_loss=0.3696, pruned_loss=0.1156, over 8323.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3428, pruned_loss=0.1069, over 1619263.75 frames. ], batch size: 25, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:05,200 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 3.094e+02 4.004e+02 4.995e+02 1.056e+03, threshold=8.009e+02, percent-clipped=4.0 +2023-02-06 02:17:15,514 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3211, 1.4792, 1.3971, 1.9471, 0.9709, 1.1554, 1.2643, 1.4138], + device='cuda:2'), covar=tensor([0.1130, 0.1027, 0.1333, 0.0593, 0.1230, 0.1925, 0.0997, 0.0938], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0253, 0.0285, 0.0228, 0.0247, 0.0283, 0.0284, 0.0258], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 02:17:29,431 INFO [train.py:901] (2/4) Epoch 6, batch 2550, loss[loss=0.2934, simple_loss=0.3392, pruned_loss=0.1238, over 7793.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3422, pruned_loss=0.107, over 1615355.03 frames. ], batch size: 19, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:41,648 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:01,224 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:04,439 INFO [train.py:901] (2/4) Epoch 6, batch 2600, loss[loss=0.2618, simple_loss=0.3278, pruned_loss=0.09789, over 8361.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3412, pruned_loss=0.1064, over 1614567.82 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:13,987 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.052e+02 3.779e+02 5.019e+02 1.784e+03, threshold=7.558e+02, percent-clipped=4.0 +2023-02-06 02:18:39,586 INFO [train.py:901] (2/4) Epoch 6, batch 2650, loss[loss=0.2769, simple_loss=0.3422, pruned_loss=0.1058, over 8111.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3397, pruned_loss=0.1056, over 1607706.90 frames. ], batch size: 23, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:43,297 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4779, 1.7888, 3.1095, 1.1035, 2.2811, 1.8044, 1.3701, 1.8937], + device='cuda:2'), covar=tensor([0.1548, 0.2102, 0.0644, 0.3475, 0.1370, 0.2480, 0.1654, 0.2175], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0455, 0.0521, 0.0538, 0.0584, 0.0520, 0.0445, 0.0589], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:19:05,116 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1709, 1.0974, 1.1278, 1.1036, 0.8377, 1.2492, 0.0284, 0.7853], + device='cuda:2'), covar=tensor([0.3080, 0.2555, 0.1079, 0.2130, 0.5575, 0.0822, 0.5121, 0.2514], + device='cuda:2'), in_proj_covar=tensor([0.0134, 0.0133, 0.0083, 0.0182, 0.0219, 0.0084, 0.0144, 0.0136], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:19:11,157 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:14,347 INFO [train.py:901] (2/4) Epoch 6, batch 2700, loss[loss=0.2528, simple_loss=0.3319, pruned_loss=0.08679, over 8359.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3388, pruned_loss=0.1049, over 1608675.26 frames. ], batch size: 24, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:19:15,204 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7834, 2.1118, 3.5744, 1.2618, 2.6431, 1.9795, 1.7035, 2.1834], + device='cuda:2'), covar=tensor([0.1377, 0.1934, 0.0549, 0.3336, 0.1320, 0.2359, 0.1404, 0.2188], + device='cuda:2'), in_proj_covar=tensor([0.0472, 0.0462, 0.0529, 0.0542, 0.0594, 0.0528, 0.0449, 0.0601], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:19:20,977 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:23,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.935e+02 3.532e+02 4.548e+02 1.003e+03, threshold=7.064e+02, percent-clipped=2.0 +2023-02-06 02:19:28,348 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:45,823 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 02:19:47,959 INFO [train.py:901] (2/4) Epoch 6, batch 2750, loss[loss=0.2879, simple_loss=0.3567, pruned_loss=0.1095, over 8287.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.3392, pruned_loss=0.105, over 1611019.40 frames. ], batch size: 49, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:19:57,318 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4570, 1.9542, 2.1732, 0.9905, 2.3370, 1.3861, 0.6678, 1.8327], + device='cuda:2'), covar=tensor([0.0248, 0.0115, 0.0084, 0.0228, 0.0116, 0.0385, 0.0330, 0.0114], + device='cuda:2'), in_proj_covar=tensor([0.0321, 0.0235, 0.0206, 0.0288, 0.0227, 0.0381, 0.0298, 0.0273], + device='cuda:2'), out_proj_covar=tensor([1.0839e-04, 7.7382e-05, 6.8384e-05, 9.5718e-05, 7.6279e-05, 1.3795e-04, + 1.0173e-04, 9.1606e-05], device='cuda:2') +2023-02-06 02:20:02,630 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6565, 1.8257, 3.2679, 1.3187, 2.7864, 1.9649, 1.7410, 2.4896], + device='cuda:2'), covar=tensor([0.1712, 0.2103, 0.0570, 0.3352, 0.0995, 0.2234, 0.1758, 0.1600], + device='cuda:2'), in_proj_covar=tensor([0.0469, 0.0460, 0.0526, 0.0535, 0.0589, 0.0523, 0.0444, 0.0590], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:20:22,670 INFO [train.py:901] (2/4) Epoch 6, batch 2800, loss[loss=0.239, simple_loss=0.3051, pruned_loss=0.08645, over 7817.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3401, pruned_loss=0.1051, over 1614380.60 frames. ], batch size: 20, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:20:26,191 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:32,061 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.696e+02 3.315e+02 4.271e+02 8.534e+02, threshold=6.630e+02, percent-clipped=4.0 +2023-02-06 02:20:39,131 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:40,382 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:55,847 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:56,926 INFO [train.py:901] (2/4) Epoch 6, batch 2850, loss[loss=0.2729, simple_loss=0.3557, pruned_loss=0.0951, over 8320.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3419, pruned_loss=0.1063, over 1617440.42 frames. ], batch size: 25, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:11,142 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0534, 2.2364, 1.6649, 2.7790, 1.4320, 1.4231, 1.9094, 2.4604], + device='cuda:2'), covar=tensor([0.0986, 0.1221, 0.1480, 0.0541, 0.1474, 0.2053, 0.1316, 0.0847], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0254, 0.0282, 0.0229, 0.0247, 0.0284, 0.0286, 0.0254], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 02:21:31,678 INFO [train.py:901] (2/4) Epoch 6, batch 2900, loss[loss=0.2355, simple_loss=0.2939, pruned_loss=0.08858, over 7423.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3416, pruned_loss=0.1061, over 1614002.74 frames. ], batch size: 17, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:41,568 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.996e+02 3.885e+02 4.976e+02 9.964e+02, threshold=7.771e+02, percent-clipped=9.0 +2023-02-06 02:21:46,021 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:00,470 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:01,679 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 02:22:07,125 INFO [train.py:901] (2/4) Epoch 6, batch 2950, loss[loss=0.2471, simple_loss=0.3013, pruned_loss=0.09646, over 7229.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3414, pruned_loss=0.1062, over 1612057.58 frames. ], batch size: 16, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:17,900 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:36,001 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:41,761 INFO [train.py:901] (2/4) Epoch 6, batch 3000, loss[loss=0.2907, simple_loss=0.3575, pruned_loss=0.112, over 8512.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3403, pruned_loss=0.1055, over 1611425.30 frames. ], batch size: 26, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:41,761 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 02:22:53,878 INFO [train.py:935] (2/4) Epoch 6, validation: loss=0.2158, simple_loss=0.3124, pruned_loss=0.05962, over 944034.00 frames. +2023-02-06 02:22:53,879 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 02:23:03,880 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.288e+02 4.080e+02 5.339e+02 1.082e+03, threshold=8.161e+02, percent-clipped=5.0 +2023-02-06 02:23:28,757 INFO [train.py:901] (2/4) Epoch 6, batch 3050, loss[loss=0.3169, simple_loss=0.364, pruned_loss=0.1349, over 7979.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3422, pruned_loss=0.1071, over 1610863.96 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:03,331 INFO [train.py:901] (2/4) Epoch 6, batch 3100, loss[loss=0.2567, simple_loss=0.3268, pruned_loss=0.09328, over 7972.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3404, pruned_loss=0.1058, over 1612719.96 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:12,759 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.871e+02 3.509e+02 4.582e+02 1.148e+03, threshold=7.017e+02, percent-clipped=4.0 +2023-02-06 02:24:14,871 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:24:38,261 INFO [train.py:901] (2/4) Epoch 6, batch 3150, loss[loss=0.2275, simple_loss=0.2932, pruned_loss=0.08091, over 7726.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.3411, pruned_loss=0.107, over 1606225.89 frames. ], batch size: 18, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:57,058 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:10,976 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:13,475 INFO [train.py:901] (2/4) Epoch 6, batch 3200, loss[loss=0.2486, simple_loss=0.3172, pruned_loss=0.08996, over 8024.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3417, pruned_loss=0.1073, over 1607159.12 frames. ], batch size: 22, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:14,372 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:23,589 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.828e+02 3.409e+02 4.222e+02 1.719e+03, threshold=6.818e+02, percent-clipped=4.0 +2023-02-06 02:25:28,581 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43637.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:29,270 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0064, 4.0926, 2.5033, 2.7308, 2.8803, 2.4438, 2.9435, 3.2798], + device='cuda:2'), covar=tensor([0.1289, 0.0254, 0.0740, 0.0669, 0.0563, 0.0858, 0.0671, 0.0709], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0234, 0.0309, 0.0303, 0.0316, 0.0310, 0.0338, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 02:25:49,153 INFO [train.py:901] (2/4) Epoch 6, batch 3250, loss[loss=0.267, simple_loss=0.3314, pruned_loss=0.1013, over 7973.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3413, pruned_loss=0.1074, over 1606928.68 frames. ], batch size: 21, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:49,412 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1398, 1.9519, 3.0667, 2.3354, 2.4810, 1.9000, 1.3838, 1.1807], + device='cuda:2'), covar=tensor([0.2126, 0.2093, 0.0493, 0.1143, 0.1007, 0.1162, 0.1194, 0.2231], + device='cuda:2'), in_proj_covar=tensor([0.0792, 0.0722, 0.0624, 0.0712, 0.0801, 0.0670, 0.0635, 0.0663], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:25:57,411 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2493, 1.3037, 3.4358, 1.5500, 2.2211, 3.8195, 3.6985, 3.2082], + device='cuda:2'), covar=tensor([0.0918, 0.1655, 0.0346, 0.1954, 0.0930, 0.0230, 0.0384, 0.0659], + device='cuda:2'), in_proj_covar=tensor([0.0241, 0.0272, 0.0225, 0.0265, 0.0239, 0.0213, 0.0250, 0.0284], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 02:26:16,640 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8439, 3.7589, 3.4487, 1.8966, 3.3734, 3.3224, 3.5500, 2.9526], + device='cuda:2'), covar=tensor([0.0940, 0.0625, 0.0937, 0.4316, 0.0841, 0.0825, 0.1288, 0.0998], + device='cuda:2'), in_proj_covar=tensor([0.0405, 0.0297, 0.0334, 0.0411, 0.0321, 0.0285, 0.0316, 0.0260], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:26:17,077 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 02:26:23,414 INFO [train.py:901] (2/4) Epoch 6, batch 3300, loss[loss=0.2245, simple_loss=0.2868, pruned_loss=0.08107, over 7707.00 frames. ], tot_loss[loss=0.2777, simple_loss=0.3415, pruned_loss=0.1069, over 1613666.47 frames. ], batch size: 18, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:24,932 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3533, 2.1008, 1.4947, 1.9995, 1.8125, 1.2552, 1.5293, 1.8784], + device='cuda:2'), covar=tensor([0.0970, 0.0364, 0.0920, 0.0449, 0.0595, 0.1225, 0.0828, 0.0670], + device='cuda:2'), in_proj_covar=tensor([0.0334, 0.0230, 0.0304, 0.0297, 0.0310, 0.0304, 0.0330, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 02:26:33,006 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.968e+02 3.670e+02 5.054e+02 9.057e+02, threshold=7.341e+02, percent-clipped=6.0 +2023-02-06 02:26:43,374 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8405, 2.0899, 1.7408, 2.7053, 1.1035, 1.3448, 1.6287, 2.2672], + device='cuda:2'), covar=tensor([0.1106, 0.1019, 0.1358, 0.0577, 0.1572, 0.2092, 0.1517, 0.1057], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0251, 0.0277, 0.0225, 0.0243, 0.0276, 0.0282, 0.0251], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 02:26:58,035 INFO [train.py:901] (2/4) Epoch 6, batch 3350, loss[loss=0.2862, simple_loss=0.3447, pruned_loss=0.1139, over 8474.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3405, pruned_loss=0.106, over 1615812.17 frames. ], batch size: 29, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:27:25,486 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:27:33,330 INFO [train.py:901] (2/4) Epoch 6, batch 3400, loss[loss=0.3114, simple_loss=0.3663, pruned_loss=0.1283, over 7809.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3418, pruned_loss=0.1064, over 1618187.60 frames. ], batch size: 20, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:27:39,291 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7808, 5.8062, 4.9779, 2.2716, 5.1185, 5.4492, 5.4541, 4.7972], + device='cuda:2'), covar=tensor([0.0555, 0.0349, 0.0827, 0.4489, 0.0617, 0.0497, 0.0855, 0.0592], + device='cuda:2'), in_proj_covar=tensor([0.0407, 0.0303, 0.0339, 0.0421, 0.0325, 0.0290, 0.0324, 0.0266], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:27:42,441 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.693e+02 3.397e+02 4.441e+02 9.371e+02, threshold=6.793e+02, percent-clipped=2.0 +2023-02-06 02:27:46,358 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 02:27:47,659 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-06 02:28:07,549 INFO [train.py:901] (2/4) Epoch 6, batch 3450, loss[loss=0.2669, simple_loss=0.3213, pruned_loss=0.1063, over 7711.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3405, pruned_loss=0.1052, over 1619614.12 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:09,137 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3702, 2.4456, 3.1100, 1.2068, 3.1740, 1.9445, 1.4708, 1.9491], + device='cuda:2'), covar=tensor([0.0282, 0.0158, 0.0150, 0.0284, 0.0192, 0.0414, 0.0387, 0.0204], + device='cuda:2'), in_proj_covar=tensor([0.0332, 0.0244, 0.0206, 0.0297, 0.0239, 0.0394, 0.0306, 0.0279], + device='cuda:2'), out_proj_covar=tensor([1.1211e-04, 8.0431e-05, 6.7840e-05, 9.8214e-05, 8.0310e-05, 1.4249e-04, + 1.0373e-04, 9.3226e-05], device='cuda:2') +2023-02-06 02:28:14,344 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:24,929 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 02:28:42,228 INFO [train.py:901] (2/4) Epoch 6, batch 3500, loss[loss=0.2334, simple_loss=0.3, pruned_loss=0.0834, over 7656.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3399, pruned_loss=0.1045, over 1617600.67 frames. ], batch size: 19, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:50,530 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:52,404 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 3.111e+02 3.775e+02 4.956e+02 7.195e+02, threshold=7.550e+02, percent-clipped=1.0 +2023-02-06 02:28:59,179 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 02:29:16,409 INFO [train.py:901] (2/4) Epoch 6, batch 3550, loss[loss=0.3241, simple_loss=0.3609, pruned_loss=0.1437, over 8030.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3412, pruned_loss=0.106, over 1615640.09 frames. ], batch size: 22, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:29:23,547 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.52 vs. limit=5.0 +2023-02-06 02:29:24,742 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:34,248 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:52,632 INFO [train.py:901] (2/4) Epoch 6, batch 3600, loss[loss=0.2892, simple_loss=0.3496, pruned_loss=0.1144, over 8464.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.34, pruned_loss=0.1048, over 1617543.98 frames. ], batch size: 27, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:30:02,263 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.983e+02 3.632e+02 4.470e+02 1.452e+03, threshold=7.265e+02, percent-clipped=1.0 +2023-02-06 02:30:27,000 INFO [train.py:901] (2/4) Epoch 6, batch 3650, loss[loss=0.2525, simple_loss=0.3021, pruned_loss=0.1014, over 7806.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3404, pruned_loss=0.1056, over 1615007.06 frames. ], batch size: 20, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:00,588 INFO [train.py:901] (2/4) Epoch 6, batch 3700, loss[loss=0.2657, simple_loss=0.3405, pruned_loss=0.09546, over 8352.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.3402, pruned_loss=0.1056, over 1614492.65 frames. ], batch size: 26, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:01,284 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 02:31:11,218 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 3.019e+02 3.651e+02 4.413e+02 8.839e+02, threshold=7.303e+02, percent-clipped=3.0 +2023-02-06 02:31:23,983 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:31:24,338 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 02:31:35,820 INFO [train.py:901] (2/4) Epoch 6, batch 3750, loss[loss=0.2501, simple_loss=0.314, pruned_loss=0.09311, over 8518.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3396, pruned_loss=0.105, over 1614563.39 frames. ], batch size: 28, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:09,331 INFO [train.py:901] (2/4) Epoch 6, batch 3800, loss[loss=0.2822, simple_loss=0.3628, pruned_loss=0.1008, over 8466.00 frames. ], tot_loss[loss=0.2779, simple_loss=0.342, pruned_loss=0.1069, over 1621075.09 frames. ], batch size: 29, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:19,582 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.008e+02 3.761e+02 4.930e+02 1.044e+03, threshold=7.521e+02, percent-clipped=7.0 +2023-02-06 02:32:32,434 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:44,256 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:45,454 INFO [train.py:901] (2/4) Epoch 6, batch 3850, loss[loss=0.2252, simple_loss=0.2888, pruned_loss=0.08076, over 7543.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3413, pruned_loss=0.1064, over 1619939.53 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:48,967 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:49,771 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:02,820 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 02:33:20,475 INFO [train.py:901] (2/4) Epoch 6, batch 3900, loss[loss=0.2708, simple_loss=0.3422, pruned_loss=0.0997, over 8194.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.3422, pruned_loss=0.1066, over 1617576.60 frames. ], batch size: 23, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:33:23,954 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:30,569 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 2.909e+02 3.535e+02 4.398e+02 8.405e+02, threshold=7.069e+02, percent-clipped=2.0 +2023-02-06 02:33:56,224 INFO [train.py:901] (2/4) Epoch 6, batch 3950, loss[loss=0.2857, simple_loss=0.3547, pruned_loss=0.1084, over 8142.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3418, pruned_loss=0.1061, over 1612914.70 frames. ], batch size: 22, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:09,756 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:34:30,790 INFO [train.py:901] (2/4) Epoch 6, batch 4000, loss[loss=0.233, simple_loss=0.3004, pruned_loss=0.08284, over 7542.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3404, pruned_loss=0.1058, over 1607438.20 frames. ], batch size: 18, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:40,321 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.014e+02 2.805e+02 3.702e+02 4.857e+02 8.487e+02, threshold=7.405e+02, percent-clipped=7.0 +2023-02-06 02:34:44,614 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:05,733 INFO [train.py:901] (2/4) Epoch 6, batch 4050, loss[loss=0.2999, simple_loss=0.3665, pruned_loss=0.1166, over 8464.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3409, pruned_loss=0.1057, over 1615200.15 frames. ], batch size: 29, lr: 1.27e-02, grad_scale: 16.0 +2023-02-06 02:35:08,219 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-06 02:35:21,628 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2632, 1.8140, 3.0982, 1.1011, 2.2292, 1.6303, 1.4901, 1.7779], + device='cuda:2'), covar=tensor([0.1610, 0.1705, 0.0557, 0.3129, 0.1388, 0.2535, 0.1493, 0.2123], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0452, 0.0524, 0.0536, 0.0579, 0.0520, 0.0439, 0.0580], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:35:41,181 INFO [train.py:901] (2/4) Epoch 6, batch 4100, loss[loss=0.3286, simple_loss=0.3888, pruned_loss=0.1342, over 8500.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3414, pruned_loss=0.1053, over 1620365.57 frames. ], batch size: 26, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:35:44,017 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:50,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.131e+02 3.987e+02 5.314e+02 1.327e+03, threshold=7.973e+02, percent-clipped=4.0 +2023-02-06 02:36:00,536 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:00,598 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:14,412 INFO [train.py:901] (2/4) Epoch 6, batch 4150, loss[loss=0.2041, simple_loss=0.2785, pruned_loss=0.06484, over 7795.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.34, pruned_loss=0.1048, over 1616306.47 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:15,791 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:49,813 INFO [train.py:901] (2/4) Epoch 6, batch 4200, loss[loss=0.1915, simple_loss=0.2633, pruned_loss=0.05988, over 7806.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3387, pruned_loss=0.1044, over 1610649.90 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:58,994 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 2.806e+02 3.559e+02 4.787e+02 1.284e+03, threshold=7.119e+02, percent-clipped=4.0 +2023-02-06 02:37:05,643 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 02:37:07,980 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:23,783 INFO [train.py:901] (2/4) Epoch 6, batch 4250, loss[loss=0.3322, simple_loss=0.3882, pruned_loss=0.138, over 8487.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3382, pruned_loss=0.1044, over 1612300.87 frames. ], batch size: 28, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:37:24,685 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:29,278 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 02:37:41,582 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:58,489 INFO [train.py:901] (2/4) Epoch 6, batch 4300, loss[loss=0.2376, simple_loss=0.303, pruned_loss=0.08608, over 7662.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3395, pruned_loss=0.1048, over 1616771.88 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:00,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:38:08,662 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.771e+02 3.321e+02 4.102e+02 9.930e+02, threshold=6.641e+02, percent-clipped=2.0 +2023-02-06 02:38:17,337 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.3674, 1.5286, 5.4484, 2.0341, 4.8159, 4.5243, 5.0227, 4.9452], + device='cuda:2'), covar=tensor([0.0384, 0.4000, 0.0290, 0.2784, 0.0882, 0.0581, 0.0398, 0.0418], + device='cuda:2'), in_proj_covar=tensor([0.0363, 0.0504, 0.0456, 0.0442, 0.0512, 0.0421, 0.0422, 0.0478], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 02:38:33,190 INFO [train.py:901] (2/4) Epoch 6, batch 4350, loss[loss=0.2934, simple_loss=0.3542, pruned_loss=0.1163, over 8526.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.339, pruned_loss=0.1048, over 1611805.69 frames. ], batch size: 39, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:45,512 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4088, 1.8396, 2.8270, 1.0849, 1.8980, 1.6639, 1.5464, 1.7629], + device='cuda:2'), covar=tensor([0.1424, 0.1700, 0.0609, 0.3236, 0.1391, 0.2447, 0.1431, 0.1926], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0459, 0.0536, 0.0543, 0.0588, 0.0529, 0.0443, 0.0597], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:38:55,443 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6191, 1.8427, 1.8862, 1.6268, 0.9068, 2.0580, 0.2020, 1.2668], + device='cuda:2'), covar=tensor([0.2913, 0.2222, 0.0895, 0.2164, 0.6099, 0.0554, 0.5035, 0.2196], + device='cuda:2'), in_proj_covar=tensor([0.0137, 0.0137, 0.0086, 0.0183, 0.0229, 0.0083, 0.0147, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:39:00,035 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 02:39:06,569 INFO [train.py:901] (2/4) Epoch 6, batch 4400, loss[loss=0.2918, simple_loss=0.3584, pruned_loss=0.1126, over 8464.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.3391, pruned_loss=0.1051, over 1610827.88 frames. ], batch size: 27, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:17,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 3.434e+02 4.206e+02 5.183e+02 1.151e+03, threshold=8.413e+02, percent-clipped=11.0 +2023-02-06 02:39:40,227 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 02:39:42,261 INFO [train.py:901] (2/4) Epoch 6, batch 4450, loss[loss=0.2786, simple_loss=0.3477, pruned_loss=0.1047, over 8448.00 frames. ], tot_loss[loss=0.275, simple_loss=0.339, pruned_loss=0.1055, over 1607199.09 frames. ], batch size: 27, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:45,222 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-06 02:39:58,545 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:13,883 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:16,359 INFO [train.py:901] (2/4) Epoch 6, batch 4500, loss[loss=0.3013, simple_loss=0.3461, pruned_loss=0.1283, over 7516.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3388, pruned_loss=0.105, over 1608019.31 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:40:26,435 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.100e+02 3.740e+02 5.266e+02 1.703e+03, threshold=7.479e+02, percent-clipped=4.0 +2023-02-06 02:40:31,753 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 02:40:48,245 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9173, 1.4148, 4.4168, 1.6364, 3.1580, 3.4246, 3.8484, 3.8763], + device='cuda:2'), covar=tensor([0.1116, 0.5565, 0.0842, 0.3740, 0.2320, 0.1315, 0.1064, 0.0995], + device='cuda:2'), in_proj_covar=tensor([0.0362, 0.0507, 0.0461, 0.0448, 0.0516, 0.0424, 0.0426, 0.0483], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 02:40:52,040 INFO [train.py:901] (2/4) Epoch 6, batch 4550, loss[loss=0.3109, simple_loss=0.3782, pruned_loss=0.1218, over 8316.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3392, pruned_loss=0.1051, over 1611154.98 frames. ], batch size: 25, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:18,800 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:27,059 INFO [train.py:901] (2/4) Epoch 6, batch 4600, loss[loss=0.3039, simple_loss=0.3566, pruned_loss=0.1256, over 6755.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3394, pruned_loss=0.1048, over 1613278.93 frames. ], batch size: 73, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:34,832 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:36,658 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.977e+02 3.732e+02 4.434e+02 1.135e+03, threshold=7.465e+02, percent-clipped=1.0 +2023-02-06 02:41:50,227 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9134, 2.3380, 1.7418, 2.8675, 1.2251, 1.4785, 2.0004, 2.6801], + device='cuda:2'), covar=tensor([0.1117, 0.1158, 0.1435, 0.0498, 0.1736, 0.2160, 0.1459, 0.0788], + device='cuda:2'), in_proj_covar=tensor([0.0265, 0.0247, 0.0279, 0.0223, 0.0242, 0.0280, 0.0286, 0.0252], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 02:42:02,765 INFO [train.py:901] (2/4) Epoch 6, batch 4650, loss[loss=0.3126, simple_loss=0.3638, pruned_loss=0.1307, over 7817.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3378, pruned_loss=0.1036, over 1613283.73 frames. ], batch size: 20, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:08,404 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:13,626 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3540, 4.3277, 3.9710, 1.8352, 3.8617, 3.8237, 3.9727, 3.4945], + device='cuda:2'), covar=tensor([0.0849, 0.0613, 0.0961, 0.4373, 0.0795, 0.0746, 0.1329, 0.0751], + device='cuda:2'), in_proj_covar=tensor([0.0407, 0.0304, 0.0332, 0.0413, 0.0328, 0.0292, 0.0315, 0.0264], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:42:25,263 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:33,712 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2334, 3.1239, 2.9309, 1.4686, 2.8068, 2.8440, 2.9738, 2.6673], + device='cuda:2'), covar=tensor([0.1208, 0.0802, 0.1198, 0.4251, 0.1094, 0.0978, 0.1416, 0.1013], + device='cuda:2'), in_proj_covar=tensor([0.0407, 0.0303, 0.0334, 0.0412, 0.0329, 0.0293, 0.0314, 0.0263], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:42:36,969 INFO [train.py:901] (2/4) Epoch 6, batch 4700, loss[loss=0.3493, simple_loss=0.4055, pruned_loss=0.1466, over 8328.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3401, pruned_loss=0.1051, over 1614166.60 frames. ], batch size: 26, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:46,387 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.041e+02 3.187e+02 3.833e+02 4.569e+02 1.251e+03, threshold=7.667e+02, percent-clipped=2.0 +2023-02-06 02:43:02,585 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8319, 1.6187, 3.1469, 1.3261, 2.0291, 3.5557, 3.4129, 2.9677], + device='cuda:2'), covar=tensor([0.1115, 0.1412, 0.0423, 0.2164, 0.1049, 0.0276, 0.0475, 0.0720], + device='cuda:2'), in_proj_covar=tensor([0.0242, 0.0270, 0.0228, 0.0269, 0.0238, 0.0215, 0.0252, 0.0284], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 02:43:11,101 INFO [train.py:901] (2/4) Epoch 6, batch 4750, loss[loss=0.2448, simple_loss=0.3145, pruned_loss=0.08757, over 7801.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3408, pruned_loss=0.1052, over 1615383.75 frames. ], batch size: 20, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:30,404 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 02:43:31,802 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 02:43:37,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6503, 4.1087, 2.3432, 2.5853, 2.9647, 1.9746, 2.2697, 3.0916], + device='cuda:2'), covar=tensor([0.1690, 0.0267, 0.0920, 0.0826, 0.0659, 0.1199, 0.1145, 0.0969], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0237, 0.0312, 0.0305, 0.0320, 0.0315, 0.0336, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 02:43:37,696 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7700, 2.4182, 4.6036, 1.3353, 3.0567, 2.1747, 1.7886, 2.4006], + device='cuda:2'), covar=tensor([0.1362, 0.1626, 0.0581, 0.2956, 0.1229, 0.2226, 0.1337, 0.2228], + device='cuda:2'), in_proj_covar=tensor([0.0463, 0.0460, 0.0527, 0.0539, 0.0582, 0.0525, 0.0437, 0.0589], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:43:41,057 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2932, 1.7005, 2.8602, 1.1816, 1.9288, 1.6391, 1.4432, 1.5556], + device='cuda:2'), covar=tensor([0.1774, 0.1897, 0.0724, 0.3422, 0.1526, 0.2727, 0.1693, 0.2196], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0462, 0.0530, 0.0541, 0.0584, 0.0526, 0.0439, 0.0591], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:43:46,230 INFO [train.py:901] (2/4) Epoch 6, batch 4800, loss[loss=0.279, simple_loss=0.3529, pruned_loss=0.1026, over 8341.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3401, pruned_loss=0.1052, over 1611328.85 frames. ], batch size: 24, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:55,769 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 3.021e+02 3.501e+02 4.623e+02 8.497e+02, threshold=7.001e+02, percent-clipped=1.0 +2023-02-06 02:44:16,232 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:16,880 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1587, 1.1176, 2.2459, 1.0623, 2.1059, 2.4623, 2.4569, 2.0593], + device='cuda:2'), covar=tensor([0.1020, 0.1177, 0.0475, 0.1846, 0.0580, 0.0379, 0.0536, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0264, 0.0224, 0.0265, 0.0232, 0.0211, 0.0247, 0.0279], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 02:44:20,163 INFO [train.py:901] (2/4) Epoch 6, batch 4850, loss[loss=0.3309, simple_loss=0.3738, pruned_loss=0.144, over 8519.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3394, pruned_loss=0.1053, over 1611293.21 frames. ], batch size: 49, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:44:20,866 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 02:44:32,455 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3553, 1.3607, 1.4404, 1.2934, 0.8963, 1.4896, 0.4168, 1.1631], + device='cuda:2'), covar=tensor([0.2913, 0.1941, 0.0940, 0.2108, 0.5391, 0.0793, 0.5632, 0.2475], + device='cuda:2'), in_proj_covar=tensor([0.0138, 0.0136, 0.0088, 0.0185, 0.0228, 0.0083, 0.0147, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:44:32,469 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:35,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45285.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:50,596 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:56,001 INFO [train.py:901] (2/4) Epoch 6, batch 4900, loss[loss=0.2539, simple_loss=0.3249, pruned_loss=0.0915, over 8347.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3405, pruned_loss=0.1055, over 1615543.48 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:04,918 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7880, 1.4398, 5.8710, 2.2279, 5.1530, 4.8854, 5.3424, 5.2331], + device='cuda:2'), covar=tensor([0.0427, 0.4133, 0.0282, 0.2629, 0.0801, 0.0582, 0.0373, 0.0439], + device='cuda:2'), in_proj_covar=tensor([0.0366, 0.0514, 0.0456, 0.0444, 0.0510, 0.0416, 0.0427, 0.0477], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 02:45:05,437 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.896e+02 3.521e+02 4.501e+02 9.960e+02, threshold=7.042e+02, percent-clipped=7.0 +2023-02-06 02:45:30,240 INFO [train.py:901] (2/4) Epoch 6, batch 4950, loss[loss=0.2795, simple_loss=0.3619, pruned_loss=0.09855, over 8108.00 frames. ], tot_loss[loss=0.274, simple_loss=0.339, pruned_loss=0.1045, over 1614464.88 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:30,383 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:46:05,710 INFO [train.py:901] (2/4) Epoch 6, batch 5000, loss[loss=0.2638, simple_loss=0.3398, pruned_loss=0.09388, over 8316.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3395, pruned_loss=0.1049, over 1616265.47 frames. ], batch size: 25, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:06,589 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6825, 1.7376, 3.2901, 1.4319, 2.1533, 3.7158, 3.6524, 3.1875], + device='cuda:2'), covar=tensor([0.1220, 0.1357, 0.0362, 0.1987, 0.0889, 0.0248, 0.0434, 0.0620], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0261, 0.0222, 0.0264, 0.0230, 0.0210, 0.0246, 0.0275], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 02:46:07,202 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:15,104 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.255e+02 4.005e+02 4.887e+02 1.315e+03, threshold=8.009e+02, percent-clipped=7.0 +2023-02-06 02:46:16,063 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4650, 2.3723, 4.4668, 1.0509, 3.1684, 2.0515, 1.7671, 2.4412], + device='cuda:2'), covar=tensor([0.1719, 0.1881, 0.0681, 0.3690, 0.1301, 0.2614, 0.1611, 0.2340], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0463, 0.0525, 0.0540, 0.0586, 0.0524, 0.0444, 0.0591], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:46:24,042 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:40,007 INFO [train.py:901] (2/4) Epoch 6, batch 5050, loss[loss=0.2853, simple_loss=0.3599, pruned_loss=0.1054, over 8479.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3398, pruned_loss=0.1049, over 1616971.76 frames. ], batch size: 29, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:44,903 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0889, 3.4820, 2.3261, 2.4944, 2.8010, 1.8599, 2.5417, 2.8147], + device='cuda:2'), covar=tensor([0.1093, 0.0300, 0.0726, 0.0639, 0.0467, 0.1030, 0.0690, 0.0703], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0232, 0.0308, 0.0303, 0.0315, 0.0315, 0.0333, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 02:46:58,883 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 02:47:14,063 INFO [train.py:901] (2/4) Epoch 6, batch 5100, loss[loss=0.2093, simple_loss=0.2721, pruned_loss=0.07329, over 6766.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3401, pruned_loss=0.1051, over 1615120.49 frames. ], batch size: 15, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:47:24,714 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.842e+02 3.419e+02 4.219e+02 7.828e+02, threshold=6.837e+02, percent-clipped=0.0 +2023-02-06 02:47:26,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:43,445 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:49,315 INFO [train.py:901] (2/4) Epoch 6, batch 5150, loss[loss=0.3166, simple_loss=0.3693, pruned_loss=0.1319, over 8258.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3405, pruned_loss=0.1056, over 1611102.67 frames. ], batch size: 24, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:23,749 INFO [train.py:901] (2/4) Epoch 6, batch 5200, loss[loss=0.2239, simple_loss=0.2925, pruned_loss=0.0777, over 5974.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3398, pruned_loss=0.1045, over 1612375.79 frames. ], batch size: 13, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:34,004 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.204e+02 4.015e+02 4.654e+02 8.708e+02, threshold=8.029e+02, percent-clipped=4.0 +2023-02-06 02:48:57,693 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 02:48:59,757 INFO [train.py:901] (2/4) Epoch 6, batch 5250, loss[loss=0.3028, simple_loss=0.3635, pruned_loss=0.1211, over 8485.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3417, pruned_loss=0.1057, over 1615746.36 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:30,224 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45710.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:49:34,102 INFO [train.py:901] (2/4) Epoch 6, batch 5300, loss[loss=0.2564, simple_loss=0.3083, pruned_loss=0.1023, over 7698.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3402, pruned_loss=0.1059, over 1610645.59 frames. ], batch size: 18, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:36,947 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:49:43,549 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.935e+02 3.437e+02 4.667e+02 1.283e+03, threshold=6.874e+02, percent-clipped=3.0 +2023-02-06 02:50:09,978 INFO [train.py:901] (2/4) Epoch 6, batch 5350, loss[loss=0.2472, simple_loss=0.3334, pruned_loss=0.08052, over 8350.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3389, pruned_loss=0.1044, over 1611373.93 frames. ], batch size: 24, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:25,442 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:27,340 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:36,781 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3582, 1.8687, 3.2300, 2.5127, 2.6830, 2.0049, 1.5349, 1.3186], + device='cuda:2'), covar=tensor([0.2477, 0.2709, 0.0604, 0.1506, 0.1322, 0.1401, 0.1198, 0.2824], + device='cuda:2'), in_proj_covar=tensor([0.0803, 0.0739, 0.0628, 0.0725, 0.0837, 0.0675, 0.0638, 0.0673], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 02:50:42,757 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,777 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:43,910 INFO [train.py:901] (2/4) Epoch 6, batch 5400, loss[loss=0.2887, simple_loss=0.3556, pruned_loss=0.111, over 7242.00 frames. ], tot_loss[loss=0.2761, simple_loss=0.3407, pruned_loss=0.1057, over 1610119.59 frames. ], batch size: 71, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:49,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45825.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:50:53,703 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.842e+02 3.609e+02 4.644e+02 1.367e+03, threshold=7.218e+02, percent-clipped=2.0 +2023-02-06 02:50:59,048 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:17,300 INFO [train.py:901] (2/4) Epoch 6, batch 5450, loss[loss=0.2399, simple_loss=0.3023, pruned_loss=0.08873, over 7931.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.34, pruned_loss=0.1058, over 1605795.20 frames. ], batch size: 20, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:51:26,215 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:40,421 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.43 vs. limit=5.0 +2023-02-06 02:51:47,554 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 02:51:52,372 INFO [train.py:901] (2/4) Epoch 6, batch 5500, loss[loss=0.2886, simple_loss=0.3655, pruned_loss=0.1059, over 8348.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3399, pruned_loss=0.1052, over 1612125.25 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:51:54,758 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-06 02:52:03,079 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.821e+02 3.418e+02 4.385e+02 9.516e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 02:52:25,879 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3466, 1.4675, 2.2777, 1.1757, 1.5879, 1.6457, 1.4134, 1.3819], + device='cuda:2'), covar=tensor([0.1433, 0.1969, 0.0720, 0.2976, 0.1289, 0.2423, 0.1511, 0.1663], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0460, 0.0526, 0.0537, 0.0583, 0.0528, 0.0442, 0.0582], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:52:27,017 INFO [train.py:901] (2/4) Epoch 6, batch 5550, loss[loss=0.2377, simple_loss=0.307, pruned_loss=0.08415, over 8137.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3393, pruned_loss=0.1049, over 1609710.45 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:53:03,507 INFO [train.py:901] (2/4) Epoch 6, batch 5600, loss[loss=0.2318, simple_loss=0.3124, pruned_loss=0.07563, over 8295.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3395, pruned_loss=0.1045, over 1612667.41 frames. ], batch size: 23, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:13,368 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.809e+02 3.495e+02 4.670e+02 1.291e+03, threshold=6.989e+02, percent-clipped=6.0 +2023-02-06 02:53:36,015 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46064.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:53:37,306 INFO [train.py:901] (2/4) Epoch 6, batch 5650, loss[loss=0.2684, simple_loss=0.3339, pruned_loss=0.1014, over 8027.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3402, pruned_loss=0.1055, over 1613639.95 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:47,425 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46081.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:53:51,178 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 02:54:04,762 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46106.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:54:12,530 INFO [train.py:901] (2/4) Epoch 6, batch 5700, loss[loss=0.2588, simple_loss=0.3259, pruned_loss=0.09587, over 5094.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3393, pruned_loss=0.1047, over 1611246.23 frames. ], batch size: 11, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:22,543 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.829e+02 3.489e+02 4.392e+02 1.030e+03, threshold=6.978e+02, percent-clipped=3.0 +2023-02-06 02:54:25,977 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:54:35,703 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 02:54:46,331 INFO [train.py:901] (2/4) Epoch 6, batch 5750, loss[loss=0.2254, simple_loss=0.2922, pruned_loss=0.0793, over 8135.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3383, pruned_loss=0.1044, over 1608273.12 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:53,639 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 02:54:55,251 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:54:56,134 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 02:55:00,722 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0169, 1.5900, 1.3732, 1.2344, 1.0668, 1.2708, 1.6017, 1.2674], + device='cuda:2'), covar=tensor([0.0610, 0.1219, 0.1903, 0.1530, 0.0655, 0.1655, 0.0780, 0.0655], + device='cuda:2'), in_proj_covar=tensor([0.0122, 0.0170, 0.0212, 0.0174, 0.0121, 0.0179, 0.0136, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 02:55:21,181 INFO [train.py:901] (2/4) Epoch 6, batch 5800, loss[loss=0.3184, simple_loss=0.387, pruned_loss=0.1248, over 8506.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3395, pruned_loss=0.1049, over 1605597.27 frames. ], batch size: 28, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:55:24,800 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:32,629 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.949e+02 3.358e+02 4.338e+02 9.471e+02, threshold=6.717e+02, percent-clipped=1.0 +2023-02-06 02:55:45,833 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:49,917 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2296, 1.4659, 2.2489, 1.1166, 1.5895, 1.5743, 1.4068, 1.4211], + device='cuda:2'), covar=tensor([0.1550, 0.1843, 0.0677, 0.2947, 0.1281, 0.2307, 0.1492, 0.1653], + device='cuda:2'), in_proj_covar=tensor([0.0462, 0.0460, 0.0525, 0.0535, 0.0580, 0.0517, 0.0436, 0.0581], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:55:55,786 INFO [train.py:901] (2/4) Epoch 6, batch 5850, loss[loss=0.2493, simple_loss=0.3089, pruned_loss=0.09485, over 7926.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3375, pruned_loss=0.1037, over 1606125.03 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:55:59,171 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0055, 1.5682, 4.1960, 1.4821, 3.4590, 3.3759, 3.6787, 3.6382], + device='cuda:2'), covar=tensor([0.0506, 0.3906, 0.0493, 0.3283, 0.1332, 0.0884, 0.0614, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0512, 0.0460, 0.0446, 0.0511, 0.0422, 0.0434, 0.0473], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 02:56:14,983 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:56:29,476 INFO [train.py:901] (2/4) Epoch 6, batch 5900, loss[loss=0.3348, simple_loss=0.3574, pruned_loss=0.1561, over 7658.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3364, pruned_loss=0.1028, over 1609122.38 frames. ], batch size: 19, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:39,482 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 3.022e+02 3.849e+02 5.141e+02 8.536e+02, threshold=7.697e+02, percent-clipped=7.0 +2023-02-06 02:56:43,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:57:04,663 INFO [train.py:901] (2/4) Epoch 6, batch 5950, loss[loss=0.3053, simple_loss=0.3611, pruned_loss=0.1247, over 8469.00 frames. ], tot_loss[loss=0.2713, simple_loss=0.3369, pruned_loss=0.1029, over 1610961.08 frames. ], batch size: 27, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:06,801 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4946, 1.7443, 1.5444, 1.3064, 1.1858, 1.3270, 1.8957, 1.7002], + device='cuda:2'), covar=tensor([0.0473, 0.1171, 0.1637, 0.1331, 0.0637, 0.1577, 0.0722, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0122, 0.0171, 0.0211, 0.0175, 0.0121, 0.0181, 0.0136, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 02:57:38,437 INFO [train.py:901] (2/4) Epoch 6, batch 6000, loss[loss=0.3002, simple_loss=0.3624, pruned_loss=0.1191, over 6826.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3366, pruned_loss=0.1027, over 1611417.33 frames. ], batch size: 15, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,437 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 02:57:50,764 INFO [train.py:935] (2/4) Epoch 6, validation: loss=0.2127, simple_loss=0.3094, pruned_loss=0.05799, over 944034.00 frames. +2023-02-06 02:57:50,765 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 02:58:01,258 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.683e+02 3.226e+02 4.100e+02 1.140e+03, threshold=6.453e+02, percent-clipped=1.0 +2023-02-06 02:58:04,340 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:21,767 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:25,653 INFO [train.py:901] (2/4) Epoch 6, batch 6050, loss[loss=0.2984, simple_loss=0.3391, pruned_loss=0.1289, over 7717.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3366, pruned_loss=0.1027, over 1611486.53 frames. ], batch size: 18, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:58:56,092 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:02,119 INFO [train.py:901] (2/4) Epoch 6, batch 6100, loss[loss=0.2476, simple_loss=0.3072, pruned_loss=0.09403, over 7939.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3366, pruned_loss=0.1027, over 1610855.19 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:12,643 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 3.046e+02 3.657e+02 4.398e+02 9.620e+02, threshold=7.315e+02, percent-clipped=4.0 +2023-02-06 02:59:13,539 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:24,604 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 02:59:29,768 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4198, 2.0046, 3.1396, 1.1930, 2.1559, 1.8956, 1.6190, 1.8380], + device='cuda:2'), covar=tensor([0.1518, 0.1649, 0.0588, 0.3219, 0.1320, 0.2251, 0.1362, 0.2016], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0474, 0.0540, 0.0560, 0.0597, 0.0533, 0.0446, 0.0597], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 02:59:37,763 INFO [train.py:901] (2/4) Epoch 6, batch 6150, loss[loss=0.2316, simple_loss=0.2828, pruned_loss=0.09017, over 6366.00 frames. ], tot_loss[loss=0.272, simple_loss=0.3373, pruned_loss=0.1034, over 1607979.05 frames. ], batch size: 14, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:56,265 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:11,863 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:13,806 INFO [train.py:901] (2/4) Epoch 6, batch 6200, loss[loss=0.2663, simple_loss=0.3252, pruned_loss=0.1037, over 8031.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3378, pruned_loss=0.1042, over 1605955.76 frames. ], batch size: 22, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:00:14,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:16,773 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:24,152 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.053e+02 3.051e+02 3.861e+02 4.926e+02 1.016e+03, threshold=7.722e+02, percent-clipped=3.0 +2023-02-06 03:00:28,972 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:48,695 INFO [train.py:901] (2/4) Epoch 6, batch 6250, loss[loss=0.2573, simple_loss=0.3402, pruned_loss=0.08725, over 8506.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.337, pruned_loss=0.1036, over 1606796.18 frames. ], batch size: 39, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:22,871 INFO [train.py:901] (2/4) Epoch 6, batch 6300, loss[loss=0.2699, simple_loss=0.3358, pruned_loss=0.102, over 7795.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3376, pruned_loss=0.1038, over 1608182.07 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:34,435 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.734e+02 3.399e+02 4.377e+02 1.449e+03, threshold=6.797e+02, percent-clipped=4.0 +2023-02-06 03:01:38,092 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1689, 1.1568, 1.2541, 1.1904, 0.8213, 1.2494, 0.1882, 0.9612], + device='cuda:2'), covar=tensor([0.3202, 0.2029, 0.1025, 0.1744, 0.5030, 0.0931, 0.4027, 0.1824], + device='cuda:2'), in_proj_covar=tensor([0.0135, 0.0132, 0.0082, 0.0182, 0.0223, 0.0082, 0.0144, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:01:49,382 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:01:57,776 INFO [train.py:901] (2/4) Epoch 6, batch 6350, loss[loss=0.3177, simple_loss=0.3829, pruned_loss=0.1262, over 8465.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3381, pruned_loss=0.1041, over 1610534.83 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:32,255 INFO [train.py:901] (2/4) Epoch 6, batch 6400, loss[loss=0.2736, simple_loss=0.3478, pruned_loss=0.09976, over 8458.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3378, pruned_loss=0.1041, over 1609082.77 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:35,378 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 03:02:41,187 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:02:43,104 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.744e+02 3.578e+02 4.396e+02 9.504e+02, threshold=7.157e+02, percent-clipped=5.0 +2023-02-06 03:03:07,334 INFO [train.py:901] (2/4) Epoch 6, batch 6450, loss[loss=0.2908, simple_loss=0.354, pruned_loss=0.1138, over 8451.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3375, pruned_loss=0.1034, over 1609641.86 frames. ], batch size: 27, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:40,297 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5898, 5.6445, 4.9054, 2.0435, 4.9556, 5.2170, 5.2684, 4.7987], + device='cuda:2'), covar=tensor([0.0667, 0.0394, 0.0804, 0.4954, 0.0592, 0.0539, 0.0842, 0.0548], + device='cuda:2'), in_proj_covar=tensor([0.0397, 0.0304, 0.0327, 0.0411, 0.0315, 0.0291, 0.0304, 0.0263], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:03:41,565 INFO [train.py:901] (2/4) Epoch 6, batch 6500, loss[loss=0.2912, simple_loss=0.3639, pruned_loss=0.1093, over 8466.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3375, pruned_loss=0.1032, over 1610088.72 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:51,600 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.994e+02 3.001e+02 3.759e+02 4.377e+02 1.086e+03, threshold=7.517e+02, percent-clipped=1.0 +2023-02-06 03:04:09,448 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:14,548 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:15,764 INFO [train.py:901] (2/4) Epoch 6, batch 6550, loss[loss=0.2843, simple_loss=0.3657, pruned_loss=0.1014, over 8450.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3372, pruned_loss=0.1029, over 1611121.84 frames. ], batch size: 29, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:37,526 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 03:04:45,809 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:50,976 INFO [train.py:901] (2/4) Epoch 6, batch 6600, loss[loss=0.2619, simple_loss=0.3379, pruned_loss=0.09299, over 8463.00 frames. ], tot_loss[loss=0.2729, simple_loss=0.3378, pruned_loss=0.104, over 1611406.45 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:56,456 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 03:05:01,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.927e+02 3.687e+02 4.772e+02 1.123e+03, threshold=7.374e+02, percent-clipped=4.0 +2023-02-06 03:05:03,298 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:08,620 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3614, 2.7181, 1.8213, 2.0352, 2.2348, 1.5225, 1.8829, 2.0997], + device='cuda:2'), covar=tensor([0.1194, 0.0252, 0.0823, 0.0536, 0.0501, 0.1041, 0.0856, 0.0746], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0233, 0.0309, 0.0302, 0.0312, 0.0315, 0.0340, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 03:05:25,535 INFO [train.py:901] (2/4) Epoch 6, batch 6650, loss[loss=0.2445, simple_loss=0.3085, pruned_loss=0.09022, over 7530.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3377, pruned_loss=0.1037, over 1608676.33 frames. ], batch size: 18, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:05:30,596 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:35,435 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:39,577 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9277, 1.6001, 2.2564, 1.8872, 2.0059, 1.7235, 1.3826, 0.6603], + device='cuda:2'), covar=tensor([0.2398, 0.2482, 0.0650, 0.1272, 0.0992, 0.1251, 0.1178, 0.2400], + device='cuda:2'), in_proj_covar=tensor([0.0798, 0.0735, 0.0638, 0.0722, 0.0824, 0.0676, 0.0635, 0.0674], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:05:59,664 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7566, 2.0446, 1.6288, 2.5734, 1.3137, 1.3265, 1.6358, 2.0460], + device='cuda:2'), covar=tensor([0.1103, 0.1069, 0.1659, 0.0628, 0.1498, 0.2154, 0.1635, 0.1169], + device='cuda:2'), in_proj_covar=tensor([0.0267, 0.0247, 0.0283, 0.0227, 0.0246, 0.0274, 0.0282, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:06:00,868 INFO [train.py:901] (2/4) Epoch 6, batch 6700, loss[loss=0.2558, simple_loss=0.3177, pruned_loss=0.09701, over 8246.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3387, pruned_loss=0.1039, over 1612248.21 frames. ], batch size: 22, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:12,492 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.777e+02 3.640e+02 4.922e+02 1.093e+03, threshold=7.281e+02, percent-clipped=6.0 +2023-02-06 03:06:25,941 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 03:06:34,840 INFO [train.py:901] (2/4) Epoch 6, batch 6750, loss[loss=0.2843, simple_loss=0.3535, pruned_loss=0.1075, over 8501.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3385, pruned_loss=0.1034, over 1613272.38 frames. ], batch size: 28, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:38,950 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:10,709 INFO [train.py:901] (2/4) Epoch 6, batch 6800, loss[loss=0.2567, simple_loss=0.3214, pruned_loss=0.09601, over 7665.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3381, pruned_loss=0.1033, over 1612089.62 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:12,676 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 03:07:12,931 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7075, 2.2069, 3.6891, 2.6741, 2.9644, 2.1579, 1.7361, 1.7751], + device='cuda:2'), covar=tensor([0.1992, 0.2380, 0.0556, 0.1443, 0.1311, 0.1245, 0.1141, 0.2581], + device='cuda:2'), in_proj_covar=tensor([0.0795, 0.0731, 0.0637, 0.0723, 0.0824, 0.0673, 0.0635, 0.0674], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:07:21,205 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.942e+02 3.591e+02 4.804e+02 1.528e+03, threshold=7.182e+02, percent-clipped=7.0 +2023-02-06 03:07:34,546 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7860, 3.7310, 3.3625, 1.5981, 3.2332, 3.2899, 3.4984, 2.9329], + device='cuda:2'), covar=tensor([0.0863, 0.0632, 0.0943, 0.4848, 0.0922, 0.0916, 0.1271, 0.0997], + device='cuda:2'), in_proj_covar=tensor([0.0400, 0.0303, 0.0326, 0.0414, 0.0319, 0.0291, 0.0304, 0.0264], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:07:40,019 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:44,568 INFO [train.py:901] (2/4) Epoch 6, batch 6850, loss[loss=0.3, simple_loss=0.3569, pruned_loss=0.1216, over 7650.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.339, pruned_loss=0.1037, over 1616129.97 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:52,848 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5948, 1.7935, 2.0161, 1.6308, 1.1053, 1.9683, 0.4314, 1.2187], + device='cuda:2'), covar=tensor([0.3174, 0.2424, 0.0752, 0.2044, 0.5440, 0.0825, 0.6752, 0.2690], + device='cuda:2'), in_proj_covar=tensor([0.0137, 0.0134, 0.0082, 0.0182, 0.0224, 0.0083, 0.0145, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:07:58,970 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:00,908 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 03:08:03,824 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3042, 2.4440, 1.9384, 3.0560, 1.4289, 1.4860, 2.0959, 2.5918], + device='cuda:2'), covar=tensor([0.0764, 0.1040, 0.1302, 0.0410, 0.1468, 0.1756, 0.1345, 0.0986], + device='cuda:2'), in_proj_covar=tensor([0.0265, 0.0249, 0.0278, 0.0224, 0.0247, 0.0272, 0.0279, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:08:11,541 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3984, 1.7717, 2.8862, 1.1847, 1.9997, 1.8008, 1.5113, 1.6944], + device='cuda:2'), covar=tensor([0.1491, 0.1702, 0.0635, 0.3058, 0.1294, 0.2180, 0.1500, 0.1927], + device='cuda:2'), in_proj_covar=tensor([0.0463, 0.0466, 0.0535, 0.0549, 0.0590, 0.0523, 0.0449, 0.0594], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:08:16,339 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 03:08:19,247 INFO [train.py:901] (2/4) Epoch 6, batch 6900, loss[loss=0.2813, simple_loss=0.343, pruned_loss=0.1098, over 7983.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3394, pruned_loss=0.1039, over 1619563.48 frames. ], batch size: 21, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:08:22,399 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 03:08:28,205 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:30,616 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 2.873e+02 3.537e+02 4.379e+02 9.664e+02, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 03:08:32,861 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:44,864 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:45,931 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 03:08:49,792 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:54,240 INFO [train.py:901] (2/4) Epoch 6, batch 6950, loss[loss=0.2834, simple_loss=0.351, pruned_loss=0.1079, over 8258.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.341, pruned_loss=0.1046, over 1618341.86 frames. ], batch size: 48, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:09,778 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 03:09:15,147 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:09:28,479 INFO [train.py:901] (2/4) Epoch 6, batch 7000, loss[loss=0.276, simple_loss=0.3466, pruned_loss=0.1027, over 8542.00 frames. ], tot_loss[loss=0.2725, simple_loss=0.3388, pruned_loss=0.1031, over 1615145.20 frames. ], batch size: 49, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:34,305 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 03:09:39,919 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.860e+02 2.784e+02 3.553e+02 4.437e+02 1.281e+03, threshold=7.106e+02, percent-clipped=4.0 +2023-02-06 03:10:03,567 INFO [train.py:901] (2/4) Epoch 6, batch 7050, loss[loss=0.205, simple_loss=0.2868, pruned_loss=0.0616, over 7931.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3383, pruned_loss=0.1032, over 1612535.63 frames. ], batch size: 20, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:10:25,054 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 03:10:37,634 INFO [train.py:901] (2/4) Epoch 6, batch 7100, loss[loss=0.2547, simple_loss=0.3204, pruned_loss=0.09453, over 7816.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3379, pruned_loss=0.1032, over 1608984.85 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:10:48,827 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.154e+02 3.207e+02 3.842e+02 5.073e+02 1.424e+03, threshold=7.684e+02, percent-clipped=2.0 +2023-02-06 03:10:56,276 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:12,602 INFO [train.py:901] (2/4) Epoch 6, batch 7150, loss[loss=0.2471, simple_loss=0.3153, pruned_loss=0.08944, over 8136.00 frames. ], tot_loss[loss=0.272, simple_loss=0.3381, pruned_loss=0.103, over 1611308.16 frames. ], batch size: 22, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:14,091 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:34,465 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1083, 2.3011, 1.8137, 2.6763, 1.2988, 1.5090, 1.7030, 2.4205], + device='cuda:2'), covar=tensor([0.0830, 0.0979, 0.1280, 0.0477, 0.1546, 0.1855, 0.1461, 0.0819], + device='cuda:2'), in_proj_covar=tensor([0.0267, 0.0250, 0.0278, 0.0226, 0.0248, 0.0270, 0.0279, 0.0245], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:11:37,948 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:46,767 INFO [train.py:901] (2/4) Epoch 6, batch 7200, loss[loss=0.2403, simple_loss=0.3175, pruned_loss=0.08154, over 8249.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3391, pruned_loss=0.1038, over 1615815.75 frames. ], batch size: 24, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:57,335 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6593, 1.9559, 2.1566, 1.5367, 0.9092, 2.1106, 0.2951, 1.3581], + device='cuda:2'), covar=tensor([0.3487, 0.2000, 0.0627, 0.2271, 0.6350, 0.0571, 0.4603, 0.1940], + device='cuda:2'), in_proj_covar=tensor([0.0135, 0.0132, 0.0080, 0.0176, 0.0220, 0.0080, 0.0140, 0.0134], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:11:57,791 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.983e+02 3.737e+02 4.630e+02 8.445e+02, threshold=7.473e+02, percent-clipped=4.0 +2023-02-06 03:12:22,028 INFO [train.py:901] (2/4) Epoch 6, batch 7250, loss[loss=0.275, simple_loss=0.343, pruned_loss=0.1035, over 8578.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.34, pruned_loss=0.1042, over 1619203.58 frames. ], batch size: 34, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:56,481 INFO [train.py:901] (2/4) Epoch 6, batch 7300, loss[loss=0.275, simple_loss=0.3459, pruned_loss=0.102, over 8182.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3399, pruned_loss=0.1039, over 1618279.59 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:57,902 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:07,209 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.071e+02 3.696e+02 4.839e+02 1.031e+03, threshold=7.393e+02, percent-clipped=2.0 +2023-02-06 03:13:13,283 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:23,400 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:30,053 INFO [train.py:901] (2/4) Epoch 6, batch 7350, loss[loss=0.2896, simple_loss=0.3492, pruned_loss=0.115, over 8134.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3402, pruned_loss=0.1043, over 1617872.14 frames. ], batch size: 22, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:13:48,823 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 03:13:59,322 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:05,871 INFO [train.py:901] (2/4) Epoch 6, batch 7400, loss[loss=0.2657, simple_loss=0.3332, pruned_loss=0.09908, over 7191.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3401, pruned_loss=0.1048, over 1615431.56 frames. ], batch size: 72, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:08,012 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 03:14:13,519 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47827.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:14:17,406 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 3.124e+02 3.904e+02 4.877e+02 9.892e+02, threshold=7.808e+02, percent-clipped=5.0 +2023-02-06 03:14:33,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:35,676 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0074, 1.2468, 4.2514, 1.5331, 3.6463, 3.6007, 3.8745, 3.6952], + device='cuda:2'), covar=tensor([0.0455, 0.3882, 0.0384, 0.2847, 0.1100, 0.0695, 0.0489, 0.0551], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0503, 0.0448, 0.0444, 0.0500, 0.0418, 0.0423, 0.0468], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-06 03:14:40,241 INFO [train.py:901] (2/4) Epoch 6, batch 7450, loss[loss=0.2576, simple_loss=0.3227, pruned_loss=0.09631, over 8288.00 frames. ], tot_loss[loss=0.2733, simple_loss=0.3392, pruned_loss=0.1037, over 1617233.18 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:46,243 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 03:15:15,316 INFO [train.py:901] (2/4) Epoch 6, batch 7500, loss[loss=0.2795, simple_loss=0.3494, pruned_loss=0.1048, over 8493.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3402, pruned_loss=0.1046, over 1618859.68 frames. ], batch size: 29, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:25,968 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.098e+02 3.102e+02 3.706e+02 4.699e+02 1.511e+03, threshold=7.412e+02, percent-clipped=9.0 +2023-02-06 03:15:48,132 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4475, 1.7171, 1.8582, 1.5584, 1.0538, 1.8792, 0.3011, 1.2204], + device='cuda:2'), covar=tensor([0.3677, 0.1691, 0.0736, 0.1846, 0.5031, 0.0607, 0.3883, 0.2045], + device='cuda:2'), in_proj_covar=tensor([0.0138, 0.0133, 0.0081, 0.0182, 0.0227, 0.0082, 0.0143, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:15:49,281 INFO [train.py:901] (2/4) Epoch 6, batch 7550, loss[loss=0.2642, simple_loss=0.3285, pruned_loss=0.09996, over 7663.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.3381, pruned_loss=0.1038, over 1613895.29 frames. ], batch size: 19, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:54,925 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:11,855 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:24,742 INFO [train.py:901] (2/4) Epoch 6, batch 7600, loss[loss=0.3025, simple_loss=0.3637, pruned_loss=0.1206, over 7116.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3392, pruned_loss=0.1045, over 1613116.27 frames. ], batch size: 71, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:16:32,091 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.36 vs. limit=5.0 +2023-02-06 03:16:34,118 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-06 03:16:37,192 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.897e+02 3.536e+02 4.611e+02 2.294e+03, threshold=7.072e+02, percent-clipped=5.0 +2023-02-06 03:17:01,525 INFO [train.py:901] (2/4) Epoch 6, batch 7650, loss[loss=0.3069, simple_loss=0.3808, pruned_loss=0.1165, over 8495.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.34, pruned_loss=0.1046, over 1615310.93 frames. ], batch size: 26, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:17,556 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48090.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:21,566 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5678, 2.0045, 2.2013, 1.0341, 2.2458, 1.3659, 0.5907, 1.7661], + device='cuda:2'), covar=tensor([0.0275, 0.0151, 0.0134, 0.0236, 0.0152, 0.0414, 0.0379, 0.0133], + device='cuda:2'), in_proj_covar=tensor([0.0337, 0.0248, 0.0205, 0.0300, 0.0240, 0.0383, 0.0314, 0.0286], + device='cuda:2'), out_proj_covar=tensor([1.1077e-04, 7.9298e-05, 6.4561e-05, 9.5806e-05, 7.7792e-05, 1.3366e-04, + 1.0300e-04, 9.2505e-05], device='cuda:2') +2023-02-06 03:17:24,183 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:32,465 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:34,938 INFO [train.py:901] (2/4) Epoch 6, batch 7700, loss[loss=0.2762, simple_loss=0.3468, pruned_loss=0.1028, over 8575.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3392, pruned_loss=0.1044, over 1614094.04 frames. ], batch size: 31, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:46,047 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.821e+02 3.617e+02 4.667e+02 9.808e+02, threshold=7.234e+02, percent-clipped=3.0 +2023-02-06 03:17:50,865 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:54,512 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 03:17:57,319 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 03:17:59,319 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:10,116 INFO [train.py:901] (2/4) Epoch 6, batch 7750, loss[loss=0.203, simple_loss=0.283, pruned_loss=0.06146, over 8082.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3385, pruned_loss=0.104, over 1616316.84 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:13,410 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:18:43,313 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:43,802 INFO [train.py:901] (2/4) Epoch 6, batch 7800, loss[loss=0.2298, simple_loss=0.2987, pruned_loss=0.08046, over 7922.00 frames. ], tot_loss[loss=0.2746, simple_loss=0.3398, pruned_loss=0.1047, over 1617348.54 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:53,444 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48230.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:18:54,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 3.053e+02 3.731e+02 4.789e+02 1.133e+03, threshold=7.462e+02, percent-clipped=3.0 +2023-02-06 03:19:04,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4787, 1.7952, 2.8291, 1.1482, 1.8963, 1.6771, 1.5217, 1.7031], + device='cuda:2'), covar=tensor([0.1408, 0.1733, 0.0589, 0.3184, 0.1380, 0.2401, 0.1428, 0.1896], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0466, 0.0528, 0.0547, 0.0593, 0.0530, 0.0447, 0.0588], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:19:16,578 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:19:17,077 INFO [train.py:901] (2/4) Epoch 6, batch 7850, loss[loss=0.2736, simple_loss=0.3466, pruned_loss=0.1003, over 8357.00 frames. ], tot_loss[loss=0.2742, simple_loss=0.3396, pruned_loss=0.1044, over 1616119.00 frames. ], batch size: 24, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:19:30,594 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48286.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:19:51,008 INFO [train.py:901] (2/4) Epoch 6, batch 7900, loss[loss=0.2697, simple_loss=0.3401, pruned_loss=0.09961, over 8355.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3384, pruned_loss=0.1032, over 1617128.01 frames. ], batch size: 24, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:01,871 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.927e+02 3.494e+02 4.326e+02 7.205e+02, threshold=6.988e+02, percent-clipped=0.0 +2023-02-06 03:20:06,675 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3155, 1.4282, 1.2687, 1.9225, 0.9855, 1.1449, 1.2948, 1.4613], + device='cuda:2'), covar=tensor([0.1091, 0.1064, 0.1575, 0.0613, 0.1220, 0.1957, 0.1062, 0.0967], + device='cuda:2'), in_proj_covar=tensor([0.0263, 0.0241, 0.0275, 0.0223, 0.0239, 0.0267, 0.0273, 0.0243], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:20:09,322 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:25,105 INFO [train.py:901] (2/4) Epoch 6, batch 7950, loss[loss=0.3076, simple_loss=0.3753, pruned_loss=0.1199, over 8195.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3409, pruned_loss=0.1047, over 1619601.84 frames. ], batch size: 23, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:37,998 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7784, 1.4604, 3.2550, 1.3314, 2.2625, 3.5901, 3.4969, 3.0057], + device='cuda:2'), covar=tensor([0.1050, 0.1491, 0.0330, 0.2011, 0.0753, 0.0270, 0.0400, 0.0653], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0272, 0.0223, 0.0265, 0.0233, 0.0209, 0.0257, 0.0276], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 03:20:58,691 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:59,245 INFO [train.py:901] (2/4) Epoch 6, batch 8000, loss[loss=0.2442, simple_loss=0.3254, pruned_loss=0.08146, over 8356.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3389, pruned_loss=0.1033, over 1618825.89 frames. ], batch size: 24, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:10,362 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.174e+02 2.873e+02 3.488e+02 4.217e+02 8.104e+02, threshold=6.977e+02, percent-clipped=2.0 +2023-02-06 03:21:11,764 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:16,772 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:33,916 INFO [train.py:901] (2/4) Epoch 6, batch 8050, loss[loss=0.2066, simple_loss=0.2824, pruned_loss=0.06542, over 7545.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3364, pruned_loss=0.103, over 1587867.38 frames. ], batch size: 18, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:37,657 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:48,692 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5522, 1.9719, 2.0689, 1.0348, 2.2025, 1.3784, 0.4989, 1.7963], + device='cuda:2'), covar=tensor([0.0310, 0.0173, 0.0143, 0.0297, 0.0157, 0.0460, 0.0455, 0.0134], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0254, 0.0205, 0.0305, 0.0240, 0.0391, 0.0316, 0.0291], + device='cuda:2'), out_proj_covar=tensor([1.1342e-04, 8.1646e-05, 6.4531e-05, 9.7245e-05, 7.7309e-05, 1.3615e-04, + 1.0318e-04, 9.3782e-05], device='cuda:2') +2023-02-06 03:21:54,571 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48496.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:07,192 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 03:22:10,939 INFO [train.py:901] (2/4) Epoch 7, batch 0, loss[loss=0.2699, simple_loss=0.3259, pruned_loss=0.1069, over 7538.00 frames. ], tot_loss[loss=0.2699, simple_loss=0.3259, pruned_loss=0.1069, over 7538.00 frames. ], batch size: 18, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:10,940 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 03:22:22,761 INFO [train.py:935] (2/4) Epoch 7, validation: loss=0.2113, simple_loss=0.3091, pruned_loss=0.05678, over 944034.00 frames. +2023-02-06 03:22:22,763 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 03:22:28,413 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6705, 1.8911, 1.5661, 2.4143, 1.0188, 1.4724, 1.5769, 1.8919], + device='cuda:2'), covar=tensor([0.0942, 0.1020, 0.1361, 0.0500, 0.1362, 0.1635, 0.1183, 0.0950], + device='cuda:2'), in_proj_covar=tensor([0.0263, 0.0239, 0.0276, 0.0221, 0.0240, 0.0267, 0.0273, 0.0244], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:22:37,615 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:38,075 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 03:22:39,696 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9919, 2.3085, 2.9876, 1.0234, 2.9609, 1.5682, 1.3707, 1.6258], + device='cuda:2'), covar=tensor([0.0386, 0.0195, 0.0143, 0.0337, 0.0192, 0.0440, 0.0464, 0.0269], + device='cuda:2'), in_proj_covar=tensor([0.0340, 0.0251, 0.0203, 0.0302, 0.0238, 0.0385, 0.0310, 0.0287], + device='cuda:2'), out_proj_covar=tensor([1.1150e-04, 8.0551e-05, 6.3816e-05, 9.6298e-05, 7.6935e-05, 1.3405e-04, + 1.0118e-04, 9.2305e-05], device='cuda:2') +2023-02-06 03:22:41,674 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8283, 2.0357, 1.6763, 2.6967, 1.0545, 1.4340, 1.7468, 1.9018], + device='cuda:2'), covar=tensor([0.1128, 0.1282, 0.1654, 0.0502, 0.1824, 0.2070, 0.1455, 0.1378], + device='cuda:2'), in_proj_covar=tensor([0.0266, 0.0242, 0.0278, 0.0224, 0.0244, 0.0271, 0.0277, 0.0247], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:22:45,424 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.727e+02 3.570e+02 4.321e+02 1.428e+03, threshold=7.140e+02, percent-clipped=5.0 +2023-02-06 03:22:53,283 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48542.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:22:55,822 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:56,685 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 03:22:57,629 INFO [train.py:901] (2/4) Epoch 7, batch 50, loss[loss=0.2557, simple_loss=0.3361, pruned_loss=0.08763, over 8196.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3386, pruned_loss=0.1024, over 367158.58 frames. ], batch size: 23, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:57,785 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:23:09,791 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48567.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:23:12,934 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 03:23:14,279 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48574.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:23:31,384 INFO [train.py:901] (2/4) Epoch 7, batch 100, loss[loss=0.2682, simple_loss=0.342, pruned_loss=0.09716, over 8290.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3377, pruned_loss=0.1027, over 645090.83 frames. ], batch size: 23, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:23:35,003 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 03:23:44,209 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2690, 1.5189, 2.2709, 1.1321, 1.7282, 1.5847, 1.4109, 1.3311], + device='cuda:2'), covar=tensor([0.1557, 0.1768, 0.0649, 0.3044, 0.1199, 0.2441, 0.1540, 0.1672], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0473, 0.0526, 0.0547, 0.0594, 0.0534, 0.0451, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:23:54,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 2.936e+02 3.434e+02 4.642e+02 8.961e+02, threshold=6.868e+02, percent-clipped=3.0 +2023-02-06 03:24:06,741 INFO [train.py:901] (2/4) Epoch 7, batch 150, loss[loss=0.233, simple_loss=0.3054, pruned_loss=0.08029, over 8515.00 frames. ], tot_loss[loss=0.272, simple_loss=0.3377, pruned_loss=0.1032, over 859885.97 frames. ], batch size: 29, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:24:31,972 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:24:34,081 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48689.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:24:40,627 INFO [train.py:901] (2/4) Epoch 7, batch 200, loss[loss=0.2557, simple_loss=0.3316, pruned_loss=0.08986, over 8495.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3383, pruned_loss=0.1031, over 1029964.41 frames. ], batch size: 26, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:03,491 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.629e+02 3.306e+02 4.274e+02 1.004e+03, threshold=6.612e+02, percent-clipped=3.0 +2023-02-06 03:25:15,496 INFO [train.py:901] (2/4) Epoch 7, batch 250, loss[loss=0.2512, simple_loss=0.3269, pruned_loss=0.08778, over 8520.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.338, pruned_loss=0.1028, over 1158855.92 frames. ], batch size: 28, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:22,707 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:26,752 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 03:25:35,607 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 03:25:41,110 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:43,879 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5680, 1.4913, 3.1506, 1.3696, 2.0994, 3.3648, 3.3406, 2.7971], + device='cuda:2'), covar=tensor([0.1080, 0.1288, 0.0311, 0.1822, 0.0710, 0.0240, 0.0358, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0272, 0.0226, 0.0269, 0.0235, 0.0213, 0.0264, 0.0278], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 03:25:50,541 INFO [train.py:901] (2/4) Epoch 7, batch 300, loss[loss=0.299, simple_loss=0.3649, pruned_loss=0.1165, over 8466.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3375, pruned_loss=0.1022, over 1262210.18 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:52,229 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:54,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:55,625 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7173, 3.0189, 2.2351, 3.6835, 1.9303, 1.7974, 2.4127, 3.0644], + device='cuda:2'), covar=tensor([0.0695, 0.0888, 0.1256, 0.0338, 0.1330, 0.1701, 0.1288, 0.0840], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0236, 0.0271, 0.0220, 0.0236, 0.0261, 0.0268, 0.0236], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:26:12,388 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:13,525 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.973e+02 3.476e+02 4.340e+02 1.124e+03, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 03:26:18,393 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:18,510 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4362, 1.9986, 3.1613, 2.4925, 2.7290, 2.1345, 1.5622, 1.3775], + device='cuda:2'), covar=tensor([0.2348, 0.2744, 0.0682, 0.1516, 0.1274, 0.1400, 0.1318, 0.2861], + device='cuda:2'), in_proj_covar=tensor([0.0800, 0.0742, 0.0640, 0.0731, 0.0829, 0.0683, 0.0638, 0.0672], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:26:25,097 INFO [train.py:901] (2/4) Epoch 7, batch 350, loss[loss=0.2571, simple_loss=0.3228, pruned_loss=0.09565, over 8246.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3371, pruned_loss=0.1021, over 1342098.44 frames. ], batch size: 22, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:26:30,610 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:43,227 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:00,371 INFO [train.py:901] (2/4) Epoch 7, batch 400, loss[loss=0.2901, simple_loss=0.3702, pruned_loss=0.105, over 8491.00 frames. ], tot_loss[loss=0.2728, simple_loss=0.339, pruned_loss=0.1033, over 1406099.76 frames. ], batch size: 29, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:01,277 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:06,124 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 03:27:22,466 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.855e+02 2.734e+02 3.619e+02 4.506e+02 1.679e+03, threshold=7.237e+02, percent-clipped=8.0 +2023-02-06 03:27:32,151 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48945.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:27:34,591 INFO [train.py:901] (2/4) Epoch 7, batch 450, loss[loss=0.2426, simple_loss=0.31, pruned_loss=0.0876, over 8098.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3387, pruned_loss=0.1021, over 1455325.31 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:49,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48970.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:28:10,025 INFO [train.py:901] (2/4) Epoch 7, batch 500, loss[loss=0.3092, simple_loss=0.3647, pruned_loss=0.1269, over 8108.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.338, pruned_loss=0.1013, over 1490278.32 frames. ], batch size: 23, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:28:24,469 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5305, 2.1860, 4.6791, 1.2110, 2.8495, 2.1151, 1.4784, 2.7694], + device='cuda:2'), covar=tensor([0.1672, 0.2050, 0.0594, 0.3371, 0.1523, 0.2521, 0.1728, 0.2271], + device='cuda:2'), in_proj_covar=tensor([0.0480, 0.0477, 0.0530, 0.0552, 0.0596, 0.0537, 0.0458, 0.0597], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:28:30,187 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 03:28:32,336 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.572e+02 3.184e+02 4.227e+02 8.649e+02, threshold=6.369e+02, percent-clipped=1.0 +2023-02-06 03:28:32,518 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7297, 1.4262, 2.7334, 1.1462, 2.0420, 2.9409, 2.8911, 2.4727], + device='cuda:2'), covar=tensor([0.0928, 0.1356, 0.0467, 0.2137, 0.0757, 0.0319, 0.0558, 0.0698], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0277, 0.0229, 0.0272, 0.0238, 0.0214, 0.0267, 0.0281], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 03:28:43,904 INFO [train.py:901] (2/4) Epoch 7, batch 550, loss[loss=0.238, simple_loss=0.3075, pruned_loss=0.08427, over 7798.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3387, pruned_loss=0.102, over 1517613.21 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:28:50,291 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:28:52,346 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4114, 2.4685, 1.6069, 1.9681, 2.0727, 1.2965, 1.6079, 1.9389], + device='cuda:2'), covar=tensor([0.1120, 0.0308, 0.0952, 0.0531, 0.0559, 0.1242, 0.0958, 0.0785], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0232, 0.0319, 0.0301, 0.0316, 0.0313, 0.0347, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 03:29:07,256 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:07,273 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1482, 2.5578, 3.2512, 1.0688, 3.0388, 1.9798, 1.5219, 1.8160], + device='cuda:2'), covar=tensor([0.0390, 0.0209, 0.0108, 0.0372, 0.0230, 0.0445, 0.0439, 0.0264], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0256, 0.0212, 0.0315, 0.0245, 0.0403, 0.0323, 0.0298], + device='cuda:2'), out_proj_covar=tensor([1.1616e-04, 8.1790e-05, 6.6471e-05, 1.0018e-04, 7.8685e-05, 1.3979e-04, + 1.0517e-04, 9.5776e-05], device='cuda:2') +2023-02-06 03:29:07,958 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1023, 1.6603, 3.1538, 0.9573, 1.9408, 1.5598, 1.1478, 1.9016], + device='cuda:2'), covar=tensor([0.2133, 0.2104, 0.0630, 0.3951, 0.1704, 0.3025, 0.2214, 0.2330], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0469, 0.0523, 0.0541, 0.0587, 0.0528, 0.0452, 0.0590], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:29:19,499 INFO [train.py:901] (2/4) Epoch 7, batch 600, loss[loss=0.3052, simple_loss=0.3777, pruned_loss=0.1163, over 8464.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3384, pruned_loss=0.1024, over 1538317.57 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:31,450 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 03:29:41,663 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49130.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:42,811 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.845e+02 3.510e+02 4.694e+02 1.227e+03, threshold=7.020e+02, percent-clipped=5.0 +2023-02-06 03:29:54,583 INFO [train.py:901] (2/4) Epoch 7, batch 650, loss[loss=0.2546, simple_loss=0.3291, pruned_loss=0.09008, over 8630.00 frames. ], tot_loss[loss=0.2717, simple_loss=0.3384, pruned_loss=0.1024, over 1555048.41 frames. ], batch size: 34, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:58,984 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:59,706 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:17,665 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:18,990 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:24,026 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4102, 1.9001, 3.0530, 1.1807, 2.2334, 1.8687, 1.5113, 1.8058], + device='cuda:2'), covar=tensor([0.1756, 0.2089, 0.0717, 0.3915, 0.1367, 0.2586, 0.1814, 0.2272], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0474, 0.0530, 0.0551, 0.0590, 0.0531, 0.0455, 0.0596], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:30:29,826 INFO [train.py:901] (2/4) Epoch 7, batch 700, loss[loss=0.2742, simple_loss=0.3314, pruned_loss=0.1085, over 7942.00 frames. ], tot_loss[loss=0.2723, simple_loss=0.3385, pruned_loss=0.1031, over 1565809.77 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:30:31,258 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:54,550 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 2.876e+02 3.436e+02 4.276e+02 6.994e+02, threshold=6.873e+02, percent-clipped=0.0 +2023-02-06 03:31:03,863 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 03:31:06,145 INFO [train.py:901] (2/4) Epoch 7, batch 750, loss[loss=0.3634, simple_loss=0.4091, pruned_loss=0.1588, over 8469.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3389, pruned_loss=0.1038, over 1578942.37 frames. ], batch size: 27, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:09,786 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:18,084 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 03:31:26,425 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 03:31:40,962 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:41,432 INFO [train.py:901] (2/4) Epoch 7, batch 800, loss[loss=0.2915, simple_loss=0.3491, pruned_loss=0.117, over 7925.00 frames. ], tot_loss[loss=0.272, simple_loss=0.3379, pruned_loss=0.1031, over 1590788.32 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:53,365 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:32:05,543 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.832e+02 3.318e+02 4.162e+02 1.224e+03, threshold=6.636e+02, percent-clipped=6.0 +2023-02-06 03:32:08,087 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.59 vs. limit=5.0 +2023-02-06 03:32:17,931 INFO [train.py:901] (2/4) Epoch 7, batch 850, loss[loss=0.2584, simple_loss=0.3256, pruned_loss=0.09562, over 7939.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3378, pruned_loss=0.1023, over 1602510.73 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:32:48,868 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 03:32:52,542 INFO [train.py:901] (2/4) Epoch 7, batch 900, loss[loss=0.2233, simple_loss=0.2924, pruned_loss=0.07712, over 7934.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.3367, pruned_loss=0.1018, over 1605093.18 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:32:58,995 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7131, 5.9017, 4.9066, 2.3477, 5.0886, 5.4384, 5.2793, 4.8704], + device='cuda:2'), covar=tensor([0.0588, 0.0439, 0.0949, 0.4620, 0.0595, 0.0728, 0.1275, 0.0664], + device='cuda:2'), in_proj_covar=tensor([0.0414, 0.0320, 0.0345, 0.0430, 0.0330, 0.0311, 0.0319, 0.0271], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:33:17,124 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.812e+02 3.278e+02 4.578e+02 1.649e+03, threshold=6.556e+02, percent-clipped=8.0 +2023-02-06 03:33:20,691 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0802, 1.4261, 1.5756, 1.2665, 1.0672, 1.3447, 1.6060, 1.4891], + device='cuda:2'), covar=tensor([0.0538, 0.1297, 0.1676, 0.1427, 0.0620, 0.1526, 0.0754, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0118, 0.0166, 0.0207, 0.0169, 0.0118, 0.0174, 0.0129, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 03:33:21,994 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:33:28,071 INFO [train.py:901] (2/4) Epoch 7, batch 950, loss[loss=0.3037, simple_loss=0.3763, pruned_loss=0.1155, over 8447.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3353, pruned_loss=0.1003, over 1606128.93 frames. ], batch size: 27, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:28,881 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49450.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:33:50,507 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 03:33:53,371 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1015, 4.0885, 3.6247, 1.8508, 3.5666, 3.6211, 3.8277, 3.2980], + device='cuda:2'), covar=tensor([0.0803, 0.0569, 0.0877, 0.4369, 0.0780, 0.0892, 0.0994, 0.0887], + device='cuda:2'), in_proj_covar=tensor([0.0410, 0.0315, 0.0342, 0.0424, 0.0325, 0.0309, 0.0314, 0.0270], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:34:03,657 INFO [train.py:901] (2/4) Epoch 7, batch 1000, loss[loss=0.2842, simple_loss=0.3527, pruned_loss=0.1078, over 8334.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3358, pruned_loss=0.1004, over 1611869.46 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:34:24,235 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 03:34:27,699 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.091e+02 3.599e+02 4.515e+02 1.445e+03, threshold=7.198e+02, percent-clipped=7.0 +2023-02-06 03:34:35,908 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 03:34:38,738 INFO [train.py:901] (2/4) Epoch 7, batch 1050, loss[loss=0.2655, simple_loss=0.3472, pruned_loss=0.09197, over 8102.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3352, pruned_loss=0.1007, over 1609452.78 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:34:43,050 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:34:54,543 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49571.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:00,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:13,256 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:14,560 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:15,147 INFO [train.py:901] (2/4) Epoch 7, batch 1100, loss[loss=0.2252, simple_loss=0.3007, pruned_loss=0.07489, over 7934.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3354, pruned_loss=0.1008, over 1610758.75 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:35:38,351 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.766e+02 3.386e+02 4.310e+02 6.415e+02, threshold=6.771e+02, percent-clipped=0.0 +2023-02-06 03:35:46,088 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 03:35:49,462 INFO [train.py:901] (2/4) Epoch 7, batch 1150, loss[loss=0.2899, simple_loss=0.3469, pruned_loss=0.1165, over 8026.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3343, pruned_loss=0.1001, over 1609700.90 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:35:55,626 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1423, 1.0732, 1.1426, 1.1055, 0.7976, 1.2015, 0.0299, 0.8573], + device='cuda:2'), covar=tensor([0.2562, 0.1939, 0.0869, 0.1543, 0.4995, 0.0706, 0.3812, 0.1921], + device='cuda:2'), in_proj_covar=tensor([0.0140, 0.0130, 0.0083, 0.0182, 0.0224, 0.0084, 0.0141, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:36:13,940 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 03:36:23,369 INFO [train.py:901] (2/4) Epoch 7, batch 1200, loss[loss=0.2751, simple_loss=0.3409, pruned_loss=0.1047, over 8511.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3348, pruned_loss=0.1005, over 1611912.44 frames. ], batch size: 49, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:33,699 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:36:47,174 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.915e+02 3.820e+02 5.048e+02 1.193e+03, threshold=7.640e+02, percent-clipped=11.0 +2023-02-06 03:36:57,970 INFO [train.py:901] (2/4) Epoch 7, batch 1250, loss[loss=0.2665, simple_loss=0.337, pruned_loss=0.09802, over 8392.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3329, pruned_loss=0.09995, over 1608436.72 frames. ], batch size: 49, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:09,616 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4144, 1.9059, 3.1012, 1.1532, 2.2477, 1.8535, 1.5654, 1.8612], + device='cuda:2'), covar=tensor([0.1653, 0.1898, 0.0717, 0.3419, 0.1460, 0.2556, 0.1581, 0.2190], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0480, 0.0538, 0.0552, 0.0601, 0.0534, 0.0455, 0.0599], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:37:10,636 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 03:37:12,336 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3411, 2.2349, 3.5402, 1.1330, 2.5742, 1.8158, 1.7457, 1.9687], + device='cuda:2'), covar=tensor([0.1789, 0.2015, 0.0643, 0.3560, 0.1535, 0.2730, 0.1597, 0.2560], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0480, 0.0538, 0.0553, 0.0602, 0.0535, 0.0456, 0.0599], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:37:22,187 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:22,255 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:29,679 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49794.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:37:32,800 INFO [train.py:901] (2/4) Epoch 7, batch 1300, loss[loss=0.2974, simple_loss=0.3577, pruned_loss=0.1186, over 7936.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3329, pruned_loss=0.1, over 1604460.53 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:57,653 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.650e+02 3.390e+02 4.402e+02 9.600e+02, threshold=6.781e+02, percent-clipped=3.0 +2023-02-06 03:38:08,115 INFO [train.py:901] (2/4) Epoch 7, batch 1350, loss[loss=0.2532, simple_loss=0.331, pruned_loss=0.08769, over 8355.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3327, pruned_loss=0.0999, over 1608806.34 frames. ], batch size: 24, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:28,832 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1064, 1.3066, 4.2826, 1.5697, 3.7475, 3.5755, 3.8120, 3.6848], + device='cuda:2'), covar=tensor([0.0397, 0.3567, 0.0383, 0.2606, 0.0908, 0.0616, 0.0433, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0366, 0.0509, 0.0461, 0.0447, 0.0508, 0.0423, 0.0426, 0.0481], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-06 03:38:42,135 INFO [train.py:901] (2/4) Epoch 7, batch 1400, loss[loss=0.2708, simple_loss=0.351, pruned_loss=0.09535, over 8466.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.334, pruned_loss=0.1006, over 1609924.84 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:43,013 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:38:49,798 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49909.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:39:07,149 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 2.921e+02 3.790e+02 4.996e+02 8.997e+02, threshold=7.579e+02, percent-clipped=6.0 +2023-02-06 03:39:11,338 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 03:39:18,087 INFO [train.py:901] (2/4) Epoch 7, batch 1450, loss[loss=0.237, simple_loss=0.3053, pruned_loss=0.08439, over 7816.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3339, pruned_loss=0.1005, over 1613340.51 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:39:26,267 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2063, 1.3965, 2.3376, 1.1520, 2.0366, 2.5281, 2.5000, 2.1456], + device='cuda:2'), covar=tensor([0.0968, 0.1065, 0.0440, 0.1915, 0.0593, 0.0366, 0.0535, 0.0752], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0275, 0.0231, 0.0272, 0.0236, 0.0215, 0.0269, 0.0281], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 03:39:31,684 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:41,670 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-06 03:39:49,099 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:52,397 INFO [train.py:901] (2/4) Epoch 7, batch 1500, loss[loss=0.2615, simple_loss=0.3415, pruned_loss=0.09074, over 8331.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3338, pruned_loss=0.1005, over 1612168.10 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:07,867 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 03:40:16,574 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.823e+02 3.555e+02 4.038e+02 9.229e+02, threshold=7.110e+02, percent-clipped=3.0 +2023-02-06 03:40:27,887 INFO [train.py:901] (2/4) Epoch 7, batch 1550, loss[loss=0.3726, simple_loss=0.4146, pruned_loss=0.1653, over 8524.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3343, pruned_loss=0.1009, over 1607817.41 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:57,942 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0873, 3.0445, 3.3287, 2.4728, 1.7996, 3.2207, 0.5397, 2.1194], + device='cuda:2'), covar=tensor([0.2600, 0.1930, 0.0723, 0.2570, 0.6234, 0.0508, 0.5331, 0.2319], + device='cuda:2'), in_proj_covar=tensor([0.0141, 0.0134, 0.0082, 0.0186, 0.0225, 0.0084, 0.0144, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:41:02,311 INFO [train.py:901] (2/4) Epoch 7, batch 1600, loss[loss=0.321, simple_loss=0.3768, pruned_loss=0.1326, over 8031.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3349, pruned_loss=0.1012, over 1607621.45 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:22,665 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:25,916 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.849e+02 3.464e+02 4.418e+02 7.019e+02, threshold=6.928e+02, percent-clipped=0.0 +2023-02-06 03:41:35,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:35,570 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 03:41:37,156 INFO [train.py:901] (2/4) Epoch 7, batch 1650, loss[loss=0.2625, simple_loss=0.3358, pruned_loss=0.09455, over 8607.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3336, pruned_loss=0.09971, over 1608186.95 frames. ], batch size: 39, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:40,108 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1054, 1.3343, 1.2422, 0.7107, 1.3220, 1.0140, 0.4186, 1.2286], + device='cuda:2'), covar=tensor([0.0167, 0.0102, 0.0085, 0.0155, 0.0100, 0.0275, 0.0265, 0.0089], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0258, 0.0215, 0.0311, 0.0249, 0.0403, 0.0315, 0.0292], + device='cuda:2'), out_proj_covar=tensor([1.1342e-04, 8.1425e-05, 6.7136e-05, 9.8328e-05, 7.9676e-05, 1.3926e-04, + 1.0242e-04, 9.3340e-05], device='cuda:2') +2023-02-06 03:41:41,429 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:48,658 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50165.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:41:59,247 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:05,844 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:11,948 INFO [train.py:901] (2/4) Epoch 7, batch 1700, loss[loss=0.2807, simple_loss=0.3428, pruned_loss=0.1093, over 8106.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3341, pruned_loss=0.1001, over 1611089.86 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:42:22,984 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50215.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:35,242 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.886e+02 3.481e+02 4.608e+02 1.233e+03, threshold=6.962e+02, percent-clipped=3.0 +2023-02-06 03:42:41,502 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4592, 2.0479, 3.3814, 1.1173, 2.5005, 1.6914, 1.6307, 2.1408], + device='cuda:2'), covar=tensor([0.1544, 0.1861, 0.0633, 0.3315, 0.1279, 0.2652, 0.1437, 0.2172], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0472, 0.0530, 0.0550, 0.0590, 0.0534, 0.0451, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:42:42,134 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:46,680 INFO [train.py:901] (2/4) Epoch 7, batch 1750, loss[loss=0.2956, simple_loss=0.3577, pruned_loss=0.1167, over 8282.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3353, pruned_loss=0.1006, over 1615540.72 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:14,405 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1905, 1.2827, 2.3361, 1.1834, 2.2042, 2.5401, 2.5420, 2.1026], + device='cuda:2'), covar=tensor([0.0910, 0.1059, 0.0417, 0.1803, 0.0478, 0.0331, 0.0474, 0.0778], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0278, 0.0232, 0.0275, 0.0239, 0.0215, 0.0273, 0.0283], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 03:43:14,442 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3100, 1.5048, 1.4112, 1.9517, 0.6812, 1.1393, 1.4449, 1.5164], + device='cuda:2'), covar=tensor([0.1133, 0.0940, 0.1456, 0.0603, 0.1434, 0.1927, 0.0955, 0.0845], + device='cuda:2'), in_proj_covar=tensor([0.0257, 0.0239, 0.0281, 0.0227, 0.0240, 0.0270, 0.0278, 0.0242], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:43:21,550 INFO [train.py:901] (2/4) Epoch 7, batch 1800, loss[loss=0.2629, simple_loss=0.3428, pruned_loss=0.09151, over 8325.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3358, pruned_loss=0.1008, over 1618443.42 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:44,800 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.922e+02 3.562e+02 4.379e+02 1.030e+03, threshold=7.125e+02, percent-clipped=4.0 +2023-02-06 03:43:56,170 INFO [train.py:901] (2/4) Epoch 7, batch 1850, loss[loss=0.2761, simple_loss=0.3414, pruned_loss=0.1054, over 8188.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3352, pruned_loss=0.1006, over 1615873.29 frames. ], batch size: 23, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:30,593 INFO [train.py:901] (2/4) Epoch 7, batch 1900, loss[loss=0.3132, simple_loss=0.3664, pruned_loss=0.13, over 8585.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3338, pruned_loss=0.09963, over 1611039.27 frames. ], batch size: 49, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:43,991 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 03:44:53,923 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.786e+02 3.637e+02 4.614e+02 8.948e+02, threshold=7.273e+02, percent-clipped=3.0 +2023-02-06 03:44:56,013 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 03:45:04,598 INFO [train.py:901] (2/4) Epoch 7, batch 1950, loss[loss=0.3575, simple_loss=0.3893, pruned_loss=0.1629, over 8062.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3337, pruned_loss=0.09975, over 1613139.17 frames. ], batch size: 73, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:12,441 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 03:45:15,311 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 03:45:33,288 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:39,204 INFO [train.py:901] (2/4) Epoch 7, batch 2000, loss[loss=0.2563, simple_loss=0.3321, pruned_loss=0.09028, over 7977.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3336, pruned_loss=0.09994, over 1610481.38 frames. ], batch size: 21, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:39,435 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50499.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:56,773 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:03,226 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.800e+02 3.583e+02 4.591e+02 1.075e+03, threshold=7.166e+02, percent-clipped=7.0 +2023-02-06 03:46:13,949 INFO [train.py:901] (2/4) Epoch 7, batch 2050, loss[loss=0.296, simple_loss=0.3602, pruned_loss=0.1159, over 8562.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3349, pruned_loss=0.1007, over 1615724.45 frames. ], batch size: 39, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:20,539 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50559.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:46:23,779 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:28,399 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3952, 2.0725, 3.6244, 1.1685, 2.5339, 1.9033, 1.6873, 2.2076], + device='cuda:2'), covar=tensor([0.1590, 0.1754, 0.0530, 0.3410, 0.1264, 0.2437, 0.1442, 0.2122], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0465, 0.0526, 0.0544, 0.0589, 0.0527, 0.0447, 0.0586], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:46:37,581 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5280, 1.2888, 4.6937, 1.7323, 4.0261, 3.9432, 4.2096, 4.0771], + device='cuda:2'), covar=tensor([0.0369, 0.3937, 0.0303, 0.2702, 0.0967, 0.0624, 0.0423, 0.0495], + device='cuda:2'), in_proj_covar=tensor([0.0370, 0.0509, 0.0464, 0.0456, 0.0515, 0.0429, 0.0432, 0.0493], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 03:46:47,297 INFO [train.py:901] (2/4) Epoch 7, batch 2100, loss[loss=0.3125, simple_loss=0.3778, pruned_loss=0.1236, over 8600.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3359, pruned_loss=0.1009, over 1619523.37 frames. ], batch size: 34, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:52,370 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:47:11,024 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 3.066e+02 3.697e+02 4.610e+02 1.063e+03, threshold=7.394e+02, percent-clipped=3.0 +2023-02-06 03:47:22,367 INFO [train.py:901] (2/4) Epoch 7, batch 2150, loss[loss=0.263, simple_loss=0.3323, pruned_loss=0.09685, over 8526.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3335, pruned_loss=0.1001, over 1610387.25 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:47:40,258 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50674.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:47:47,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2819, 1.4251, 1.6646, 1.3445, 1.2254, 1.3706, 1.8203, 1.8962], + device='cuda:2'), covar=tensor([0.0523, 0.1278, 0.1850, 0.1466, 0.0630, 0.1564, 0.0724, 0.0556], + device='cuda:2'), in_proj_covar=tensor([0.0117, 0.0167, 0.0208, 0.0171, 0.0118, 0.0176, 0.0130, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 03:47:56,608 INFO [train.py:901] (2/4) Epoch 7, batch 2200, loss[loss=0.2522, simple_loss=0.331, pruned_loss=0.08667, over 8105.00 frames. ], tot_loss[loss=0.2658, simple_loss=0.3328, pruned_loss=0.09941, over 1606621.17 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:48:05,939 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 03:48:20,824 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.939e+02 3.492e+02 4.230e+02 8.261e+02, threshold=6.983e+02, percent-clipped=2.0 +2023-02-06 03:48:30,767 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1382, 1.1054, 1.1385, 1.1270, 0.8776, 1.2491, 0.1132, 0.9026], + device='cuda:2'), covar=tensor([0.3378, 0.2010, 0.0853, 0.1534, 0.4922, 0.0782, 0.3930, 0.1878], + device='cuda:2'), in_proj_covar=tensor([0.0141, 0.0135, 0.0082, 0.0185, 0.0227, 0.0085, 0.0144, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:48:31,237 INFO [train.py:901] (2/4) Epoch 7, batch 2250, loss[loss=0.2849, simple_loss=0.3244, pruned_loss=0.1227, over 7684.00 frames. ], tot_loss[loss=0.2666, simple_loss=0.3338, pruned_loss=0.0997, over 1607993.60 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:05,383 INFO [train.py:901] (2/4) Epoch 7, batch 2300, loss[loss=0.275, simple_loss=0.3484, pruned_loss=0.1008, over 8029.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.3354, pruned_loss=0.1008, over 1607944.96 frames. ], batch size: 22, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:06,291 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:49:23,399 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50826.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:49:28,605 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.140e+02 4.073e+02 5.620e+02 1.608e+03, threshold=8.146e+02, percent-clipped=16.0 +2023-02-06 03:49:29,696 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 03:49:38,685 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4631, 1.8102, 3.1382, 1.1433, 2.2593, 1.8663, 1.4449, 1.9314], + device='cuda:2'), covar=tensor([0.1513, 0.1922, 0.0496, 0.3406, 0.1268, 0.2395, 0.1612, 0.1965], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0473, 0.0535, 0.0557, 0.0596, 0.0537, 0.0457, 0.0596], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 03:49:39,826 INFO [train.py:901] (2/4) Epoch 7, batch 2350, loss[loss=0.235, simple_loss=0.3125, pruned_loss=0.07868, over 8073.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3359, pruned_loss=0.101, over 1609575.57 frames. ], batch size: 21, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:44,715 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5691, 2.6793, 1.7792, 2.1730, 2.1536, 1.3710, 1.9584, 2.0397], + device='cuda:2'), covar=tensor([0.1262, 0.0244, 0.1008, 0.0519, 0.0607, 0.1300, 0.0912, 0.0860], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0229, 0.0313, 0.0299, 0.0307, 0.0316, 0.0339, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 03:49:47,971 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:49:55,891 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 03:50:05,009 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:05,743 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4075, 1.9372, 3.0511, 2.4139, 2.5016, 2.1001, 1.5566, 1.1316], + device='cuda:2'), covar=tensor([0.2501, 0.2722, 0.0634, 0.1544, 0.1383, 0.1430, 0.1392, 0.3076], + device='cuda:2'), in_proj_covar=tensor([0.0835, 0.0770, 0.0668, 0.0767, 0.0864, 0.0709, 0.0664, 0.0707], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:50:14,220 INFO [train.py:901] (2/4) Epoch 7, batch 2400, loss[loss=0.2925, simple_loss=0.3606, pruned_loss=0.1122, over 7802.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3359, pruned_loss=0.1009, over 1610774.35 frames. ], batch size: 19, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:20,198 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:35,186 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50930.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:50:36,977 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.900e+02 3.414e+02 4.196e+02 7.276e+02, threshold=6.828e+02, percent-clipped=0.0 +2023-02-06 03:50:47,532 INFO [train.py:901] (2/4) Epoch 7, batch 2450, loss[loss=0.2805, simple_loss=0.3488, pruned_loss=0.1061, over 8500.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3359, pruned_loss=0.1012, over 1613194.25 frames. ], batch size: 28, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:48,446 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0726, 1.2978, 1.1757, 0.4326, 1.2010, 0.9871, 0.1521, 1.1657], + device='cuda:2'), covar=tensor([0.0187, 0.0157, 0.0148, 0.0245, 0.0169, 0.0425, 0.0337, 0.0133], + device='cuda:2'), in_proj_covar=tensor([0.0343, 0.0251, 0.0213, 0.0305, 0.0249, 0.0394, 0.0310, 0.0286], + device='cuda:2'), out_proj_covar=tensor([1.1103e-04, 7.8587e-05, 6.6356e-05, 9.6048e-05, 7.9174e-05, 1.3551e-04, + 1.0040e-04, 9.0933e-05], device='cuda:2') +2023-02-06 03:50:52,348 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50955.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:50:59,072 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:22,791 INFO [train.py:901] (2/4) Epoch 7, batch 2500, loss[loss=0.2903, simple_loss=0.3542, pruned_loss=0.1132, over 8506.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.335, pruned_loss=0.1003, over 1615345.44 frames. ], batch size: 28, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:51:39,933 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51023.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:46,386 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.634e+02 3.421e+02 4.023e+02 8.503e+02, threshold=6.842e+02, percent-clipped=1.0 +2023-02-06 03:51:56,889 INFO [train.py:901] (2/4) Epoch 7, batch 2550, loss[loss=0.267, simple_loss=0.3433, pruned_loss=0.09532, over 8462.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3357, pruned_loss=0.1009, over 1614830.89 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:51:58,478 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-06 03:52:04,252 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3504, 1.5351, 1.2860, 1.8881, 0.6935, 1.0798, 1.2767, 1.4673], + device='cuda:2'), covar=tensor([0.1143, 0.0970, 0.1506, 0.0694, 0.1580, 0.2100, 0.1039, 0.1024], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0236, 0.0275, 0.0221, 0.0238, 0.0266, 0.0274, 0.0241], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 03:52:18,135 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:52:31,080 INFO [train.py:901] (2/4) Epoch 7, batch 2600, loss[loss=0.3208, simple_loss=0.3685, pruned_loss=0.1366, over 8611.00 frames. ], tot_loss[loss=0.2695, simple_loss=0.3365, pruned_loss=0.1013, over 1618537.43 frames. ], batch size: 34, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:37,432 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 03:52:45,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6478, 1.7563, 2.0462, 1.7878, 1.0425, 2.1407, 0.2893, 1.2133], + device='cuda:2'), covar=tensor([0.2829, 0.2167, 0.0737, 0.2108, 0.6345, 0.0724, 0.4665, 0.2604], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0138, 0.0083, 0.0186, 0.0234, 0.0085, 0.0145, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 03:52:54,868 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.187e+02 3.140e+02 3.874e+02 4.757e+02 8.436e+02, threshold=7.747e+02, percent-clipped=5.0 +2023-02-06 03:53:02,030 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:53:05,172 INFO [train.py:901] (2/4) Epoch 7, batch 2650, loss[loss=0.359, simple_loss=0.3951, pruned_loss=0.1615, over 8346.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3365, pruned_loss=0.1018, over 1617595.27 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:19,257 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51170.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:22,246 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 03:53:23,605 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 03:53:32,645 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51190.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:53:39,229 INFO [train.py:901] (2/4) Epoch 7, batch 2700, loss[loss=0.2476, simple_loss=0.3174, pruned_loss=0.08886, over 7978.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3355, pruned_loss=0.101, over 1613901.32 frames. ], batch size: 21, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:02,457 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 3.046e+02 3.584e+02 4.560e+02 9.753e+02, threshold=7.169e+02, percent-clipped=4.0 +2023-02-06 03:54:12,325 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8433, 1.6582, 5.9156, 2.1272, 5.2945, 5.0580, 5.5259, 5.4265], + device='cuda:2'), covar=tensor([0.0322, 0.3706, 0.0215, 0.2541, 0.0860, 0.0620, 0.0321, 0.0344], + device='cuda:2'), in_proj_covar=tensor([0.0379, 0.0520, 0.0469, 0.0458, 0.0525, 0.0437, 0.0432, 0.0490], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 03:54:14,234 INFO [train.py:901] (2/4) Epoch 7, batch 2750, loss[loss=0.2417, simple_loss=0.3282, pruned_loss=0.07756, over 8475.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3347, pruned_loss=0.09978, over 1612327.99 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:19,286 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 03:54:21,062 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:34,382 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:38,285 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51285.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:54:47,539 INFO [train.py:901] (2/4) Epoch 7, batch 2800, loss[loss=0.2718, simple_loss=0.3414, pruned_loss=0.1011, over 8492.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3342, pruned_loss=0.09928, over 1616582.88 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:51,134 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:53,748 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:55:11,783 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.811e+02 3.563e+02 4.674e+02 6.809e+02, threshold=7.126e+02, percent-clipped=0.0 +2023-02-06 03:55:22,692 INFO [train.py:901] (2/4) Epoch 7, batch 2850, loss[loss=0.2361, simple_loss=0.2946, pruned_loss=0.08879, over 7813.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3326, pruned_loss=0.09811, over 1614945.78 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:57,256 INFO [train.py:901] (2/4) Epoch 7, batch 2900, loss[loss=0.2483, simple_loss=0.3359, pruned_loss=0.08034, over 8294.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3348, pruned_loss=0.09992, over 1615119.19 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:58,211 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51400.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:04,947 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:13,542 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:14,145 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:19,650 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 03:56:20,272 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.833e+02 3.577e+02 4.732e+02 1.075e+03, threshold=7.153e+02, percent-clipped=9.0 +2023-02-06 03:56:32,325 INFO [train.py:901] (2/4) Epoch 7, batch 2950, loss[loss=0.2645, simple_loss=0.3317, pruned_loss=0.09863, over 8322.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3354, pruned_loss=0.0998, over 1615834.08 frames. ], batch size: 25, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:57:06,416 INFO [train.py:901] (2/4) Epoch 7, batch 3000, loss[loss=0.3211, simple_loss=0.3831, pruned_loss=0.1296, over 8519.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3364, pruned_loss=0.1011, over 1619514.01 frames. ], batch size: 50, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:57:06,416 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 03:57:21,709 INFO [train.py:935] (2/4) Epoch 7, validation: loss=0.2071, simple_loss=0.305, pruned_loss=0.05459, over 944034.00 frames. +2023-02-06 03:57:21,710 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 03:57:31,201 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:32,575 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51515.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:45,141 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.842e+02 3.422e+02 4.197e+02 1.269e+03, threshold=6.844e+02, percent-clipped=2.0 +2023-02-06 03:57:45,245 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51534.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:57:48,654 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:49,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51540.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:50,077 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51541.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:57:53,750 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 03:57:55,342 INFO [train.py:901] (2/4) Epoch 7, batch 3050, loss[loss=0.2524, simple_loss=0.3047, pruned_loss=0.1001, over 7719.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3358, pruned_loss=0.1008, over 1610909.79 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:06,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51566.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:58:29,894 INFO [train.py:901] (2/4) Epoch 7, batch 3100, loss[loss=0.2319, simple_loss=0.3125, pruned_loss=0.07558, over 8243.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3336, pruned_loss=0.09951, over 1610848.63 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:54,843 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.035e+02 3.902e+02 5.145e+02 1.067e+03, threshold=7.804e+02, percent-clipped=7.0 +2023-02-06 03:59:05,327 INFO [train.py:901] (2/4) Epoch 7, batch 3150, loss[loss=0.2204, simple_loss=0.2829, pruned_loss=0.079, over 7706.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3338, pruned_loss=0.1002, over 1613425.98 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:05,503 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51649.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:59:26,403 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:59:33,828 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 03:59:40,120 INFO [train.py:901] (2/4) Epoch 7, batch 3200, loss[loss=0.2597, simple_loss=0.3153, pruned_loss=0.102, over 7225.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3323, pruned_loss=0.09888, over 1613434.14 frames. ], batch size: 16, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:43,599 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:05,272 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.946e+02 3.588e+02 4.680e+02 7.788e+02, threshold=7.176e+02, percent-clipped=0.0 +2023-02-06 04:00:12,050 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:16,043 INFO [train.py:901] (2/4) Epoch 7, batch 3250, loss[loss=0.2855, simple_loss=0.3566, pruned_loss=0.1072, over 8507.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3335, pruned_loss=0.09976, over 1617896.36 frames. ], batch size: 29, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:00:19,467 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:26,781 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51765.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:46,822 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:49,337 INFO [train.py:901] (2/4) Epoch 7, batch 3300, loss[loss=0.2596, simple_loss=0.3359, pruned_loss=0.09166, over 8473.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3347, pruned_loss=0.1005, over 1618162.13 frames. ], batch size: 25, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:03,664 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:14,384 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.988e+02 3.662e+02 4.246e+02 9.313e+02, threshold=7.324e+02, percent-clipped=2.0 +2023-02-06 04:01:24,625 INFO [train.py:901] (2/4) Epoch 7, batch 3350, loss[loss=0.2558, simple_loss=0.3157, pruned_loss=0.09794, over 7925.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.334, pruned_loss=0.09973, over 1621952.33 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:30,918 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:32,346 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51859.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:39,540 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:59,346 INFO [train.py:901] (2/4) Epoch 7, batch 3400, loss[loss=0.2246, simple_loss=0.3077, pruned_loss=0.07076, over 8199.00 frames. ], tot_loss[loss=0.266, simple_loss=0.3329, pruned_loss=0.09955, over 1617810.83 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:03,623 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51905.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:02:10,931 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 04:02:16,116 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9588, 2.3734, 1.7901, 2.7239, 1.4432, 1.4501, 1.9741, 2.4152], + device='cuda:2'), covar=tensor([0.0850, 0.0823, 0.1200, 0.0452, 0.1328, 0.1731, 0.1170, 0.0762], + device='cuda:2'), in_proj_covar=tensor([0.0252, 0.0233, 0.0275, 0.0219, 0.0239, 0.0263, 0.0272, 0.0237], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:02:20,930 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51930.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:02:23,260 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.723e+02 3.470e+02 4.144e+02 7.359e+02, threshold=6.940e+02, percent-clipped=1.0 +2023-02-06 04:02:24,206 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5417, 2.0931, 3.1251, 2.3747, 2.5028, 2.1918, 1.6748, 1.3690], + device='cuda:2'), covar=tensor([0.2579, 0.2844, 0.0679, 0.1512, 0.1571, 0.1612, 0.1540, 0.2869], + device='cuda:2'), in_proj_covar=tensor([0.0822, 0.0758, 0.0656, 0.0752, 0.0856, 0.0704, 0.0653, 0.0691], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:02:34,620 INFO [train.py:901] (2/4) Epoch 7, batch 3450, loss[loss=0.2487, simple_loss=0.3071, pruned_loss=0.09511, over 7924.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.331, pruned_loss=0.09814, over 1613872.44 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:50,958 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:03:04,793 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1373, 1.2768, 2.2359, 1.1108, 2.0722, 2.4135, 2.4580, 2.0453], + device='cuda:2'), covar=tensor([0.1178, 0.1360, 0.0589, 0.2207, 0.0678, 0.0427, 0.0647, 0.0930], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0274, 0.0230, 0.0271, 0.0243, 0.0218, 0.0275, 0.0281], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 04:03:09,378 INFO [train.py:901] (2/4) Epoch 7, batch 3500, loss[loss=0.2268, simple_loss=0.298, pruned_loss=0.07785, over 7705.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.331, pruned_loss=0.0983, over 1610024.86 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:22,433 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 04:03:33,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.822e+02 3.302e+02 4.435e+02 1.594e+03, threshold=6.604e+02, percent-clipped=5.0 +2023-02-06 04:03:43,724 INFO [train.py:901] (2/4) Epoch 7, batch 3550, loss[loss=0.2506, simple_loss=0.3162, pruned_loss=0.09255, over 8080.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3314, pruned_loss=0.09823, over 1606880.93 frames. ], batch size: 21, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:50,004 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:19,842 INFO [train.py:901] (2/4) Epoch 7, batch 3600, loss[loss=0.2928, simple_loss=0.3593, pruned_loss=0.1131, over 8498.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.332, pruned_loss=0.09853, over 1611480.95 frames. ], batch size: 28, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:26,700 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:30,891 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:37,712 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:43,467 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.818e+02 3.176e+02 4.094e+02 8.086e+02, threshold=6.353e+02, percent-clipped=5.0 +2023-02-06 04:04:47,734 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:53,394 INFO [train.py:901] (2/4) Epoch 7, batch 3650, loss[loss=0.2482, simple_loss=0.3331, pruned_loss=0.08164, over 8517.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3316, pruned_loss=0.09807, over 1609982.35 frames. ], batch size: 49, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:54,178 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:23,184 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:05:28,575 INFO [train.py:901] (2/4) Epoch 7, batch 3700, loss[loss=0.2626, simple_loss=0.3293, pruned_loss=0.09794, over 8366.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3312, pruned_loss=0.09758, over 1610218.75 frames. ], batch size: 26, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:05:36,879 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:41,685 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7605, 1.9912, 2.9033, 1.5090, 2.4684, 1.9727, 1.8218, 2.2716], + device='cuda:2'), covar=tensor([0.1160, 0.1553, 0.0487, 0.2595, 0.0897, 0.1906, 0.1288, 0.1557], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0469, 0.0530, 0.0550, 0.0586, 0.0523, 0.0455, 0.0597], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 04:05:47,067 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:49,849 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:53,719 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.602e+02 3.554e+02 4.404e+02 9.700e+02, threshold=7.108e+02, percent-clipped=5.0 +2023-02-06 04:06:04,132 INFO [train.py:901] (2/4) Epoch 7, batch 3750, loss[loss=0.2496, simple_loss=0.3136, pruned_loss=0.09284, over 7665.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.3314, pruned_loss=0.0977, over 1609966.52 frames. ], batch size: 19, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:06:05,809 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3599, 1.5460, 2.2475, 1.1247, 1.5603, 1.5850, 1.4815, 1.5362], + device='cuda:2'), covar=tensor([0.1473, 0.1919, 0.0699, 0.3327, 0.1385, 0.2561, 0.1574, 0.1735], + device='cuda:2'), in_proj_covar=tensor([0.0473, 0.0471, 0.0532, 0.0550, 0.0585, 0.0525, 0.0456, 0.0596], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 04:06:07,176 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:06:12,881 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6633, 3.1500, 2.3484, 4.0644, 1.8325, 1.9669, 2.2280, 3.0014], + device='cuda:2'), covar=tensor([0.0827, 0.0962, 0.1175, 0.0281, 0.1519, 0.1635, 0.1488, 0.1020], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0238, 0.0280, 0.0223, 0.0241, 0.0266, 0.0274, 0.0241], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:06:24,074 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.66 vs. limit=5.0 +2023-02-06 04:06:38,743 INFO [train.py:901] (2/4) Epoch 7, batch 3800, loss[loss=0.2849, simple_loss=0.3497, pruned_loss=0.11, over 8525.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3319, pruned_loss=0.09782, over 1611733.85 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:01,885 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52330.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:07:04,421 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.780e+02 3.361e+02 4.228e+02 6.516e+02, threshold=6.722e+02, percent-clipped=0.0 +2023-02-06 04:07:15,844 INFO [train.py:901] (2/4) Epoch 7, batch 3850, loss[loss=0.2205, simple_loss=0.2958, pruned_loss=0.07255, over 7642.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3303, pruned_loss=0.09701, over 1611448.19 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:25,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3617, 1.5846, 2.3342, 1.2131, 1.6268, 1.6612, 1.5024, 1.5303], + device='cuda:2'), covar=tensor([0.1601, 0.1812, 0.0608, 0.3272, 0.1320, 0.2585, 0.1615, 0.1707], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0474, 0.0533, 0.0549, 0.0591, 0.0528, 0.0458, 0.0595], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 04:07:30,474 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 04:07:35,590 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.70 vs. limit=5.0 +2023-02-06 04:07:49,730 INFO [train.py:901] (2/4) Epoch 7, batch 3900, loss[loss=0.3023, simple_loss=0.351, pruned_loss=0.1268, over 7805.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3321, pruned_loss=0.09876, over 1607928.26 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:51,994 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:08:15,068 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.697e+02 3.207e+02 4.225e+02 1.297e+03, threshold=6.414e+02, percent-clipped=5.0 +2023-02-06 04:08:25,215 INFO [train.py:901] (2/4) Epoch 7, batch 3950, loss[loss=0.2571, simple_loss=0.3327, pruned_loss=0.09075, over 8105.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3322, pruned_loss=0.09846, over 1609955.30 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:08:48,005 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:00,582 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 04:09:00,796 INFO [train.py:901] (2/4) Epoch 7, batch 4000, loss[loss=0.3386, simple_loss=0.3857, pruned_loss=0.1457, over 8567.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3317, pruned_loss=0.09848, over 1608284.67 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:04,524 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7990, 2.4446, 2.8107, 1.0866, 2.7439, 1.6399, 1.6003, 1.6730], + device='cuda:2'), covar=tensor([0.0437, 0.0166, 0.0102, 0.0371, 0.0179, 0.0430, 0.0378, 0.0247], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0257, 0.0215, 0.0317, 0.0258, 0.0403, 0.0311, 0.0292], + device='cuda:2'), out_proj_covar=tensor([1.1301e-04, 8.0308e-05, 6.6344e-05, 9.9245e-05, 8.1370e-05, 1.3735e-04, + 9.9871e-05, 9.1926e-05], device='cuda:2') +2023-02-06 04:09:05,165 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:13,175 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:23,937 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 04:09:24,189 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.935e+02 3.629e+02 4.693e+02 1.248e+03, threshold=7.258e+02, percent-clipped=9.0 +2023-02-06 04:09:28,447 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1213, 2.2860, 1.9311, 2.6950, 1.2943, 1.5397, 1.9859, 2.5017], + device='cuda:2'), covar=tensor([0.0867, 0.0951, 0.1319, 0.0615, 0.1518, 0.1695, 0.1191, 0.0734], + device='cuda:2'), in_proj_covar=tensor([0.0258, 0.0240, 0.0282, 0.0224, 0.0243, 0.0268, 0.0278, 0.0240], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:09:35,740 INFO [train.py:901] (2/4) Epoch 7, batch 4050, loss[loss=0.2517, simple_loss=0.323, pruned_loss=0.09017, over 8619.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3312, pruned_loss=0.09741, over 1608291.52 frames. ], batch size: 39, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:39,787 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:52,443 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52573.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:10:10,473 INFO [train.py:901] (2/4) Epoch 7, batch 4100, loss[loss=0.2464, simple_loss=0.3212, pruned_loss=0.08578, over 8197.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3312, pruned_loss=0.09765, over 1608739.63 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:33,844 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.709e+02 3.346e+02 4.687e+02 1.096e+03, threshold=6.691e+02, percent-clipped=5.0 +2023-02-06 04:10:44,015 INFO [train.py:901] (2/4) Epoch 7, batch 4150, loss[loss=0.2422, simple_loss=0.3188, pruned_loss=0.08275, over 7975.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3326, pruned_loss=0.09849, over 1614684.16 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:59,829 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:02,381 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:20,448 INFO [train.py:901] (2/4) Epoch 7, batch 4200, loss[loss=0.3323, simple_loss=0.3687, pruned_loss=0.148, over 8129.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3328, pruned_loss=0.09882, over 1612138.47 frames. ], batch size: 22, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:11:27,420 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5557, 2.8017, 2.4354, 3.7685, 1.8110, 1.7828, 2.3880, 2.9066], + device='cuda:2'), covar=tensor([0.0801, 0.1028, 0.1221, 0.0339, 0.1448, 0.1727, 0.1298, 0.0906], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0239, 0.0281, 0.0224, 0.0241, 0.0269, 0.0276, 0.0239], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:11:30,519 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 04:11:43,884 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.675e+02 3.334e+02 4.108e+02 1.082e+03, threshold=6.669e+02, percent-clipped=4.0 +2023-02-06 04:11:49,448 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5220, 1.9347, 1.9840, 1.1035, 2.1956, 1.3053, 0.6421, 1.8148], + device='cuda:2'), covar=tensor([0.0242, 0.0124, 0.0097, 0.0214, 0.0135, 0.0359, 0.0353, 0.0107], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0259, 0.0218, 0.0319, 0.0257, 0.0405, 0.0314, 0.0296], + device='cuda:2'), out_proj_covar=tensor([1.1252e-04, 8.0884e-05, 6.7414e-05, 9.9557e-05, 8.0962e-05, 1.3825e-04, + 1.0056e-04, 9.2977e-05], device='cuda:2') +2023-02-06 04:11:49,487 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3007, 1.7285, 3.1218, 2.3115, 2.5604, 1.8659, 1.4147, 1.4722], + device='cuda:2'), covar=tensor([0.2660, 0.3272, 0.0630, 0.1605, 0.1373, 0.1788, 0.1570, 0.2731], + device='cuda:2'), in_proj_covar=tensor([0.0830, 0.0774, 0.0667, 0.0762, 0.0858, 0.0711, 0.0658, 0.0706], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:11:53,177 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 04:11:53,286 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7438, 5.7763, 4.9637, 2.1293, 5.0400, 5.3475, 5.3472, 5.0005], + device='cuda:2'), covar=tensor([0.0548, 0.0374, 0.0758, 0.4373, 0.0625, 0.0708, 0.0927, 0.0554], + device='cuda:2'), in_proj_covar=tensor([0.0422, 0.0323, 0.0357, 0.0440, 0.0342, 0.0319, 0.0326, 0.0280], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:11:53,853 INFO [train.py:901] (2/4) Epoch 7, batch 4250, loss[loss=0.256, simple_loss=0.3321, pruned_loss=0.08996, over 8261.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3328, pruned_loss=0.099, over 1611608.55 frames. ], batch size: 24, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:08,140 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:10,301 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:17,100 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0724, 1.7144, 1.3719, 1.7304, 1.4418, 1.2320, 1.3157, 1.4304], + device='cuda:2'), covar=tensor([0.0893, 0.0399, 0.0899, 0.0391, 0.0549, 0.1022, 0.0690, 0.0629], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0236, 0.0305, 0.0298, 0.0307, 0.0314, 0.0335, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:12:22,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,470 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,933 INFO [train.py:901] (2/4) Epoch 7, batch 4300, loss[loss=0.2787, simple_loss=0.3544, pruned_loss=0.1015, over 8466.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3329, pruned_loss=0.09887, over 1611681.64 frames. ], batch size: 27, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:53,660 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.879e+02 3.462e+02 4.347e+02 1.112e+03, threshold=6.924e+02, percent-clipped=5.0 +2023-02-06 04:13:03,872 INFO [train.py:901] (2/4) Epoch 7, batch 4350, loss[loss=0.2502, simple_loss=0.316, pruned_loss=0.09225, over 7802.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3306, pruned_loss=0.09752, over 1607173.28 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:14,670 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4921, 1.9206, 2.1296, 1.0150, 2.3931, 1.2725, 0.7280, 1.7940], + device='cuda:2'), covar=tensor([0.0360, 0.0171, 0.0124, 0.0310, 0.0153, 0.0489, 0.0471, 0.0159], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0263, 0.0220, 0.0320, 0.0260, 0.0408, 0.0317, 0.0298], + device='cuda:2'), out_proj_covar=tensor([1.1267e-04, 8.2178e-05, 6.8024e-05, 9.9893e-05, 8.2111e-05, 1.3864e-04, + 1.0155e-04, 9.3844e-05], device='cuda:2') +2023-02-06 04:13:24,425 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 04:13:38,493 INFO [train.py:901] (2/4) Epoch 7, batch 4400, loss[loss=0.2863, simple_loss=0.3417, pruned_loss=0.1155, over 6889.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3309, pruned_loss=0.09758, over 1606317.75 frames. ], batch size: 72, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:50,776 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52917.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:13:56,762 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:02,505 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.715e+02 3.689e+02 4.508e+02 8.331e+02, threshold=7.379e+02, percent-clipped=6.0 +2023-02-06 04:14:06,678 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 04:14:13,200 INFO [train.py:901] (2/4) Epoch 7, batch 4450, loss[loss=0.3228, simple_loss=0.3642, pruned_loss=0.1407, over 8088.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3309, pruned_loss=0.09737, over 1611198.56 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:14,728 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:38,155 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4446, 1.7775, 3.1528, 2.4416, 2.5792, 1.8873, 1.4754, 1.2788], + device='cuda:2'), covar=tensor([0.2669, 0.3258, 0.0690, 0.1666, 0.1466, 0.1744, 0.1656, 0.3104], + device='cuda:2'), in_proj_covar=tensor([0.0829, 0.0774, 0.0676, 0.0768, 0.0863, 0.0712, 0.0662, 0.0711], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:14:46,593 INFO [train.py:901] (2/4) Epoch 7, batch 4500, loss[loss=0.3048, simple_loss=0.3628, pruned_loss=0.1233, over 8505.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3335, pruned_loss=0.09963, over 1610728.26 frames. ], batch size: 28, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:59,361 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 04:15:10,357 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53032.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:15:11,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.236e+02 2.890e+02 3.405e+02 4.030e+02 1.067e+03, threshold=6.809e+02, percent-clipped=4.0 +2023-02-06 04:15:18,996 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:22,052 INFO [train.py:901] (2/4) Epoch 7, batch 4550, loss[loss=0.3101, simple_loss=0.3689, pruned_loss=0.1257, over 8521.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3319, pruned_loss=0.09816, over 1611910.01 frames. ], batch size: 28, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:15:33,123 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8002, 2.0608, 1.8534, 1.4310, 2.1148, 1.6320, 1.2251, 1.7359], + device='cuda:2'), covar=tensor([0.0244, 0.0142, 0.0122, 0.0207, 0.0136, 0.0256, 0.0312, 0.0129], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0264, 0.0221, 0.0323, 0.0262, 0.0409, 0.0319, 0.0298], + device='cuda:2'), out_proj_covar=tensor([1.1323e-04, 8.2176e-05, 6.8216e-05, 1.0069e-04, 8.2625e-05, 1.3878e-04, + 1.0222e-04, 9.3637e-05], device='cuda:2') +2023-02-06 04:15:36,990 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:39,628 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:56,227 INFO [train.py:901] (2/4) Epoch 7, batch 4600, loss[loss=0.242, simple_loss=0.3245, pruned_loss=0.07975, over 8289.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3322, pruned_loss=0.09789, over 1610957.17 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:16:06,495 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:16:20,974 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.951e+02 3.579e+02 4.375e+02 1.013e+03, threshold=7.158e+02, percent-clipped=5.0 +2023-02-06 04:16:31,864 INFO [train.py:901] (2/4) Epoch 7, batch 4650, loss[loss=0.2701, simple_loss=0.3417, pruned_loss=0.09929, over 8477.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3306, pruned_loss=0.09673, over 1611336.74 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:06,494 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3954, 1.9054, 3.4237, 1.0596, 2.4158, 1.8658, 1.4802, 2.2071], + device='cuda:2'), covar=tensor([0.1722, 0.2048, 0.0703, 0.3717, 0.1543, 0.2696, 0.1754, 0.2318], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0480, 0.0531, 0.0557, 0.0603, 0.0540, 0.0457, 0.0597], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 04:17:06,934 INFO [train.py:901] (2/4) Epoch 7, batch 4700, loss[loss=0.2459, simple_loss=0.3342, pruned_loss=0.07883, over 8200.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3312, pruned_loss=0.09716, over 1610900.43 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:27,472 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:30,531 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.793e+02 3.469e+02 4.275e+02 9.300e+02, threshold=6.939e+02, percent-clipped=3.0 +2023-02-06 04:17:34,228 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3209, 1.7826, 2.9669, 2.2245, 2.4383, 2.0164, 1.5099, 1.0996], + device='cuda:2'), covar=tensor([0.2448, 0.2852, 0.0618, 0.1638, 0.1295, 0.1451, 0.1330, 0.2986], + device='cuda:2'), in_proj_covar=tensor([0.0819, 0.0764, 0.0659, 0.0757, 0.0844, 0.0700, 0.0651, 0.0696], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:17:41,203 INFO [train.py:901] (2/4) Epoch 7, batch 4750, loss[loss=0.2298, simple_loss=0.2877, pruned_loss=0.08598, over 7798.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3312, pruned_loss=0.09745, over 1609492.22 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:48,716 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:56,643 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 04:18:00,062 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 04:18:02,329 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7024, 1.9082, 1.6036, 2.3158, 0.9707, 1.3929, 1.6657, 1.8302], + device='cuda:2'), covar=tensor([0.0822, 0.0962, 0.1315, 0.0529, 0.1513, 0.1728, 0.1021, 0.0962], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0238, 0.0279, 0.0224, 0.0239, 0.0266, 0.0276, 0.0238], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:18:09,725 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53288.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:18:16,605 INFO [train.py:901] (2/4) Epoch 7, batch 4800, loss[loss=0.2588, simple_loss=0.3348, pruned_loss=0.09144, over 8320.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3309, pruned_loss=0.09658, over 1613413.45 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:26,448 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53313.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:18:32,416 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:39,897 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:41,111 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.630e+02 3.191e+02 3.984e+02 9.617e+02, threshold=6.381e+02, percent-clipped=3.0 +2023-02-06 04:18:49,306 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8389, 1.9004, 4.2625, 1.9082, 2.2677, 4.9230, 4.9294, 4.3085], + device='cuda:2'), covar=tensor([0.0815, 0.1382, 0.0300, 0.1887, 0.0904, 0.0222, 0.0292, 0.0548], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0278, 0.0233, 0.0274, 0.0244, 0.0220, 0.0279, 0.0283], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 04:18:50,454 INFO [train.py:901] (2/4) Epoch 7, batch 4850, loss[loss=0.2748, simple_loss=0.342, pruned_loss=0.1038, over 8416.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.332, pruned_loss=0.09773, over 1614145.31 frames. ], batch size: 27, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:51,160 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 04:19:16,477 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:26,608 INFO [train.py:901] (2/4) Epoch 7, batch 4900, loss[loss=0.2406, simple_loss=0.3145, pruned_loss=0.08331, over 8092.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3311, pruned_loss=0.0969, over 1616194.33 frames. ], batch size: 21, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:19:35,805 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 04:19:40,217 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:51,455 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.811e+02 3.269e+02 4.328e+02 9.769e+02, threshold=6.539e+02, percent-clipped=6.0 +2023-02-06 04:20:00,859 INFO [train.py:901] (2/4) Epoch 7, batch 4950, loss[loss=0.2954, simple_loss=0.3495, pruned_loss=0.1206, over 8334.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3326, pruned_loss=0.0988, over 1608016.17 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:20,045 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6658, 2.1983, 4.7299, 2.5943, 4.2811, 4.0504, 4.4552, 4.3021], + device='cuda:2'), covar=tensor([0.0461, 0.3313, 0.0483, 0.2621, 0.0806, 0.0636, 0.0406, 0.0472], + device='cuda:2'), in_proj_covar=tensor([0.0392, 0.0537, 0.0487, 0.0469, 0.0531, 0.0447, 0.0440, 0.0504], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 04:20:27,197 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:36,815 INFO [train.py:901] (2/4) Epoch 7, batch 5000, loss[loss=0.2698, simple_loss=0.3484, pruned_loss=0.09562, over 8434.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.332, pruned_loss=0.09819, over 1609752.56 frames. ], batch size: 29, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:45,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:46,626 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4233, 1.4739, 1.6344, 1.2977, 1.1065, 1.4478, 1.7503, 1.5766], + device='cuda:2'), covar=tensor([0.0552, 0.1249, 0.1955, 0.1510, 0.0639, 0.1583, 0.0707, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0117, 0.0166, 0.0206, 0.0169, 0.0116, 0.0174, 0.0127, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 04:21:01,977 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:03,199 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.612e+02 3.156e+02 4.000e+02 8.821e+02, threshold=6.312e+02, percent-clipped=7.0 +2023-02-06 04:21:12,888 INFO [train.py:901] (2/4) Epoch 7, batch 5050, loss[loss=0.2573, simple_loss=0.3296, pruned_loss=0.0925, over 8736.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3306, pruned_loss=0.09709, over 1609507.85 frames. ], batch size: 30, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:31,984 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 04:21:46,773 INFO [train.py:901] (2/4) Epoch 7, batch 5100, loss[loss=0.3076, simple_loss=0.358, pruned_loss=0.1286, over 8369.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3321, pruned_loss=0.09834, over 1614187.53 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:51,221 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:04,007 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5564, 1.6956, 3.6814, 1.9160, 3.3343, 3.1668, 3.3696, 3.3076], + device='cuda:2'), covar=tensor([0.0483, 0.2800, 0.0594, 0.2554, 0.0912, 0.0684, 0.0467, 0.0511], + device='cuda:2'), in_proj_covar=tensor([0.0390, 0.0525, 0.0483, 0.0464, 0.0527, 0.0439, 0.0434, 0.0495], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 04:22:13,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.889e+02 3.391e+02 4.238e+02 9.606e+02, threshold=6.783e+02, percent-clipped=10.0 +2023-02-06 04:22:23,446 INFO [train.py:901] (2/4) Epoch 7, batch 5150, loss[loss=0.1993, simple_loss=0.2794, pruned_loss=0.05964, over 7821.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3306, pruned_loss=0.09694, over 1607985.19 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:22:34,956 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:40,557 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=6.40 vs. limit=5.0 +2023-02-06 04:22:42,267 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:56,582 INFO [train.py:901] (2/4) Epoch 7, batch 5200, loss[loss=0.2279, simple_loss=0.298, pruned_loss=0.07896, over 7809.00 frames. ], tot_loss[loss=0.262, simple_loss=0.33, pruned_loss=0.09698, over 1605143.61 frames. ], batch size: 19, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:10,747 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:17,975 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:22,034 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.983e+02 3.078e+02 4.028e+02 5.378e+02 1.177e+03, threshold=8.056e+02, percent-clipped=8.0 +2023-02-06 04:23:28,936 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 04:23:32,186 INFO [train.py:901] (2/4) Epoch 7, batch 5250, loss[loss=0.2883, simple_loss=0.3548, pruned_loss=0.1109, over 8493.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3299, pruned_loss=0.09684, over 1605416.79 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:54,701 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:00,279 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:02,337 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:06,924 INFO [train.py:901] (2/4) Epoch 7, batch 5300, loss[loss=0.2872, simple_loss=0.3568, pruned_loss=0.1089, over 8459.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.3295, pruned_loss=0.0971, over 1603825.30 frames. ], batch size: 25, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:24:17,554 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:31,353 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.647e+02 3.169e+02 3.870e+02 1.211e+03, threshold=6.339e+02, percent-clipped=2.0 +2023-02-06 04:24:39,056 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:42,222 INFO [train.py:901] (2/4) Epoch 7, batch 5350, loss[loss=0.2475, simple_loss=0.3178, pruned_loss=0.08863, over 7915.00 frames. ], tot_loss[loss=0.2626, simple_loss=0.3297, pruned_loss=0.09776, over 1604814.00 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:17,624 INFO [train.py:901] (2/4) Epoch 7, batch 5400, loss[loss=0.2495, simple_loss=0.321, pruned_loss=0.08903, over 8112.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3299, pruned_loss=0.09735, over 1608601.47 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:41,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.728e+02 3.458e+02 4.119e+02 1.009e+03, threshold=6.915e+02, percent-clipped=3.0 +2023-02-06 04:25:51,250 INFO [train.py:901] (2/4) Epoch 7, batch 5450, loss[loss=0.239, simple_loss=0.3079, pruned_loss=0.08507, over 7820.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3299, pruned_loss=0.09743, over 1606952.73 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:55,229 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-06 04:26:05,668 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 04:26:09,514 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:18,658 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 04:26:21,819 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 04:26:26,957 INFO [train.py:901] (2/4) Epoch 7, batch 5500, loss[loss=0.2994, simple_loss=0.3485, pruned_loss=0.1251, over 7316.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.33, pruned_loss=0.09724, over 1609797.33 frames. ], batch size: 72, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:27,120 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:52,075 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.929e+02 2.794e+02 3.496e+02 4.646e+02 1.157e+03, threshold=6.993e+02, percent-clipped=7.0 +2023-02-06 04:26:53,576 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,154 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,615 INFO [train.py:901] (2/4) Epoch 7, batch 5550, loss[loss=0.2506, simple_loss=0.3273, pruned_loss=0.08699, over 8188.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3305, pruned_loss=0.097, over 1615206.29 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:10,528 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:18,052 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:23,949 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7408, 5.7439, 5.1672, 2.0912, 5.1716, 5.4991, 5.4391, 5.2071], + device='cuda:2'), covar=tensor([0.0544, 0.0447, 0.0727, 0.4367, 0.0560, 0.0577, 0.0897, 0.0544], + device='cuda:2'), in_proj_covar=tensor([0.0413, 0.0322, 0.0348, 0.0430, 0.0337, 0.0314, 0.0324, 0.0273], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:27:37,055 INFO [train.py:901] (2/4) Epoch 7, batch 5600, loss[loss=0.299, simple_loss=0.3579, pruned_loss=0.1201, over 8264.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3319, pruned_loss=0.09816, over 1615726.27 frames. ], batch size: 49, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:37,987 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:55,819 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:28:02,448 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.954e+02 2.795e+02 3.455e+02 4.516e+02 9.788e+02, threshold=6.911e+02, percent-clipped=3.0 +2023-02-06 04:28:12,193 INFO [train.py:901] (2/4) Epoch 7, batch 5650, loss[loss=0.213, simple_loss=0.2862, pruned_loss=0.06985, over 7424.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.3313, pruned_loss=0.09701, over 1617513.78 frames. ], batch size: 17, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:28:25,363 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 04:28:47,264 INFO [train.py:901] (2/4) Epoch 7, batch 5700, loss[loss=0.2618, simple_loss=0.3387, pruned_loss=0.09249, over 8623.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3299, pruned_loss=0.09624, over 1615657.54 frames. ], batch size: 34, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:01,029 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0626, 1.7082, 1.3137, 1.6270, 1.4380, 1.1149, 1.0920, 1.3957], + device='cuda:2'), covar=tensor([0.1011, 0.0385, 0.1051, 0.0490, 0.0617, 0.1272, 0.0913, 0.0666], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0239, 0.0311, 0.0303, 0.0314, 0.0319, 0.0342, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:29:12,985 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.786e+02 3.155e+02 4.023e+02 8.991e+02, threshold=6.311e+02, percent-clipped=4.0 +2023-02-06 04:29:22,459 INFO [train.py:901] (2/4) Epoch 7, batch 5750, loss[loss=0.2641, simple_loss=0.3292, pruned_loss=0.09951, over 8235.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3299, pruned_loss=0.09618, over 1611123.38 frames. ], batch size: 22, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:31,456 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 04:29:53,945 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 04:29:56,273 INFO [train.py:901] (2/4) Epoch 7, batch 5800, loss[loss=0.2402, simple_loss=0.3255, pruned_loss=0.07743, over 8493.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3292, pruned_loss=0.09559, over 1610385.04 frames. ], batch size: 28, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:22,027 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.127e+02 2.882e+02 3.737e+02 4.385e+02 9.194e+02, threshold=7.474e+02, percent-clipped=5.0 +2023-02-06 04:30:32,272 INFO [train.py:901] (2/4) Epoch 7, batch 5850, loss[loss=0.2532, simple_loss=0.3317, pruned_loss=0.08736, over 8350.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3292, pruned_loss=0.09529, over 1612145.49 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:58,370 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-06 04:31:06,107 INFO [train.py:901] (2/4) Epoch 7, batch 5900, loss[loss=0.2038, simple_loss=0.2697, pruned_loss=0.06892, over 7437.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3298, pruned_loss=0.0961, over 1610612.29 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:30,649 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.634e+02 3.151e+02 3.851e+02 7.879e+02, threshold=6.301e+02, percent-clipped=2.0 +2023-02-06 04:31:40,692 INFO [train.py:901] (2/4) Epoch 7, batch 5950, loss[loss=0.2333, simple_loss=0.3037, pruned_loss=0.08148, over 7434.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3288, pruned_loss=0.09569, over 1607797.69 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:14,299 INFO [train.py:901] (2/4) Epoch 7, batch 6000, loss[loss=0.2551, simple_loss=0.341, pruned_loss=0.08467, over 8109.00 frames. ], tot_loss[loss=0.2617, simple_loss=0.3301, pruned_loss=0.0966, over 1604740.36 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:14,299 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 04:32:26,544 INFO [train.py:935] (2/4) Epoch 7, validation: loss=0.2048, simple_loss=0.3036, pruned_loss=0.05298, over 944034.00 frames. +2023-02-06 04:32:26,544 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 04:32:49,996 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 04:32:50,868 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.687e+02 3.524e+02 4.445e+02 8.914e+02, threshold=7.048e+02, percent-clipped=8.0 +2023-02-06 04:33:00,125 INFO [train.py:901] (2/4) Epoch 7, batch 6050, loss[loss=0.2981, simple_loss=0.3728, pruned_loss=0.1117, over 8438.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3309, pruned_loss=0.09707, over 1612015.57 frames. ], batch size: 29, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:00,357 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3469, 1.7241, 1.5705, 0.9533, 1.6591, 1.2773, 0.2768, 1.5493], + device='cuda:2'), covar=tensor([0.0219, 0.0152, 0.0145, 0.0213, 0.0177, 0.0468, 0.0400, 0.0121], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0264, 0.0225, 0.0327, 0.0264, 0.0417, 0.0324, 0.0303], + device='cuda:2'), out_proj_covar=tensor([1.1182e-04, 8.1893e-05, 6.8761e-05, 1.0064e-04, 8.2835e-05, 1.4052e-04, + 1.0283e-04, 9.4368e-05], device='cuda:2') +2023-02-06 04:33:36,263 INFO [train.py:901] (2/4) Epoch 7, batch 6100, loss[loss=0.2462, simple_loss=0.3245, pruned_loss=0.08392, over 8502.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3309, pruned_loss=0.09766, over 1611664.60 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:51,051 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2460, 1.7799, 2.7691, 2.1228, 2.2241, 1.9443, 1.5132, 0.9964], + device='cuda:2'), covar=tensor([0.2658, 0.2831, 0.0622, 0.1589, 0.1276, 0.1576, 0.1585, 0.2648], + device='cuda:2'), in_proj_covar=tensor([0.0834, 0.0777, 0.0661, 0.0768, 0.0857, 0.0713, 0.0664, 0.0704], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:34:00,446 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 04:34:01,817 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.824e+02 3.447e+02 4.351e+02 1.012e+03, threshold=6.894e+02, percent-clipped=2.0 +2023-02-06 04:34:11,158 INFO [train.py:901] (2/4) Epoch 7, batch 6150, loss[loss=0.325, simple_loss=0.3812, pruned_loss=0.1344, over 8483.00 frames. ], tot_loss[loss=0.2632, simple_loss=0.3308, pruned_loss=0.09777, over 1609961.34 frames. ], batch size: 49, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:34:27,174 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3303, 1.8611, 3.1139, 2.3717, 2.5682, 2.0936, 1.4955, 1.2249], + device='cuda:2'), covar=tensor([0.2856, 0.3110, 0.0652, 0.1614, 0.1348, 0.1435, 0.1408, 0.3018], + device='cuda:2'), in_proj_covar=tensor([0.0837, 0.0780, 0.0665, 0.0771, 0.0862, 0.0717, 0.0667, 0.0710], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:34:34,034 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54682.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:34:46,675 INFO [train.py:901] (2/4) Epoch 7, batch 6200, loss[loss=0.2878, simple_loss=0.3542, pruned_loss=0.1107, over 8428.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3308, pruned_loss=0.09693, over 1614681.96 frames. ], batch size: 29, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:12,146 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.928e+02 3.624e+02 4.953e+02 9.267e+02, threshold=7.248e+02, percent-clipped=4.0 +2023-02-06 04:35:12,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4315, 1.3837, 2.3114, 1.1256, 2.1704, 2.4626, 2.4939, 2.0769], + device='cuda:2'), covar=tensor([0.0859, 0.1091, 0.0465, 0.1791, 0.0598, 0.0353, 0.0519, 0.0692], + device='cuda:2'), in_proj_covar=tensor([0.0241, 0.0273, 0.0231, 0.0270, 0.0243, 0.0216, 0.0275, 0.0279], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 04:35:21,774 INFO [train.py:901] (2/4) Epoch 7, batch 6250, loss[loss=0.2528, simple_loss=0.3207, pruned_loss=0.09242, over 7655.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.332, pruned_loss=0.09837, over 1614188.49 frames. ], batch size: 19, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:42,946 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7919, 3.8513, 2.3507, 2.4506, 2.9841, 2.0475, 2.5611, 2.7283], + device='cuda:2'), covar=tensor([0.1579, 0.0199, 0.0890, 0.0776, 0.0559, 0.1115, 0.0922, 0.0949], + device='cuda:2'), in_proj_covar=tensor([0.0339, 0.0229, 0.0307, 0.0294, 0.0307, 0.0309, 0.0332, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:35:55,498 INFO [train.py:901] (2/4) Epoch 7, batch 6300, loss[loss=0.2624, simple_loss=0.3301, pruned_loss=0.09735, over 7659.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3306, pruned_loss=0.0977, over 1613044.54 frames. ], batch size: 19, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:22,281 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 3.001e+02 3.662e+02 4.451e+02 9.002e+02, threshold=7.325e+02, percent-clipped=3.0 +2023-02-06 04:36:32,311 INFO [train.py:901] (2/4) Epoch 7, batch 6350, loss[loss=0.302, simple_loss=0.3706, pruned_loss=0.1167, over 8489.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3317, pruned_loss=0.09817, over 1616820.27 frames. ], batch size: 39, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:45,393 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2886, 2.2072, 1.6922, 2.0863, 1.7330, 1.2929, 1.6073, 1.7459], + device='cuda:2'), covar=tensor([0.1141, 0.0315, 0.0910, 0.0399, 0.0625, 0.1251, 0.0794, 0.0768], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0236, 0.0312, 0.0298, 0.0314, 0.0317, 0.0337, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:36:53,638 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:37:05,996 INFO [train.py:901] (2/4) Epoch 7, batch 6400, loss[loss=0.2383, simple_loss=0.3101, pruned_loss=0.08323, over 8251.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3317, pruned_loss=0.0984, over 1615826.27 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:31,193 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.661e+02 3.281e+02 3.949e+02 1.010e+03, threshold=6.562e+02, percent-clipped=2.0 +2023-02-06 04:37:40,710 INFO [train.py:901] (2/4) Epoch 7, batch 6450, loss[loss=0.2167, simple_loss=0.2878, pruned_loss=0.07276, over 7803.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3305, pruned_loss=0.09785, over 1609404.63 frames. ], batch size: 19, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:15,584 INFO [train.py:901] (2/4) Epoch 7, batch 6500, loss[loss=0.2275, simple_loss=0.3073, pruned_loss=0.0739, over 7982.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3288, pruned_loss=0.09617, over 1611816.43 frames. ], batch size: 21, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:33,804 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55026.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:38:39,665 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.546e+02 3.271e+02 4.197e+02 5.859e+02, threshold=6.542e+02, percent-clipped=0.0 +2023-02-06 04:38:49,579 INFO [train.py:901] (2/4) Epoch 7, batch 6550, loss[loss=0.2922, simple_loss=0.3652, pruned_loss=0.1096, over 8312.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3303, pruned_loss=0.09717, over 1612481.16 frames. ], batch size: 25, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:58,667 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6537, 1.9970, 2.1595, 1.5663, 1.1501, 2.2057, 0.3106, 1.4105], + device='cuda:2'), covar=tensor([0.2744, 0.1810, 0.0590, 0.2386, 0.5349, 0.0692, 0.4123, 0.2104], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0143, 0.0084, 0.0191, 0.0228, 0.0089, 0.0147, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:39:12,198 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 04:39:25,813 INFO [train.py:901] (2/4) Epoch 7, batch 6600, loss[loss=0.2356, simple_loss=0.3098, pruned_loss=0.08074, over 8235.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3303, pruned_loss=0.09669, over 1616276.54 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:27,508 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 04:39:32,344 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:39:49,421 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.699e+02 3.503e+02 4.413e+02 7.218e+02, threshold=7.007e+02, percent-clipped=4.0 +2023-02-06 04:39:53,570 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55141.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:39:58,776 INFO [train.py:901] (2/4) Epoch 7, batch 6650, loss[loss=0.2219, simple_loss=0.297, pruned_loss=0.0734, over 7810.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.3307, pruned_loss=0.0973, over 1615427.57 frames. ], batch size: 19, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:10,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:32,844 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2678, 4.2221, 3.7800, 1.7954, 3.7119, 3.8502, 3.9255, 3.5049], + device='cuda:2'), covar=tensor([0.1020, 0.0655, 0.1171, 0.5146, 0.0915, 0.0907, 0.1392, 0.1089], + device='cuda:2'), in_proj_covar=tensor([0.0418, 0.0322, 0.0356, 0.0437, 0.0343, 0.0319, 0.0331, 0.0282], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:40:34,123 INFO [train.py:901] (2/4) Epoch 7, batch 6700, loss[loss=0.234, simple_loss=0.3052, pruned_loss=0.08135, over 7564.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3317, pruned_loss=0.09778, over 1615971.95 frames. ], batch size: 18, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:38,422 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5504, 2.0406, 3.2263, 2.6289, 2.6723, 2.1102, 1.7036, 1.3920], + device='cuda:2'), covar=tensor([0.2544, 0.2886, 0.0787, 0.1662, 0.1557, 0.1577, 0.1361, 0.3148], + device='cuda:2'), in_proj_covar=tensor([0.0831, 0.0777, 0.0675, 0.0773, 0.0866, 0.0714, 0.0668, 0.0705], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:40:51,595 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:55,049 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:55,085 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8406, 1.4931, 1.5575, 1.2382, 1.0147, 1.3628, 1.4904, 1.4110], + device='cuda:2'), covar=tensor([0.0510, 0.1211, 0.1742, 0.1376, 0.0593, 0.1501, 0.0671, 0.0591], + device='cuda:2'), in_proj_covar=tensor([0.0113, 0.0166, 0.0206, 0.0169, 0.0115, 0.0172, 0.0126, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 04:40:58,789 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 3.031e+02 3.759e+02 4.673e+02 1.170e+03, threshold=7.519e+02, percent-clipped=9.0 +2023-02-06 04:41:07,993 INFO [train.py:901] (2/4) Epoch 7, batch 6750, loss[loss=0.2607, simple_loss=0.3193, pruned_loss=0.1011, over 7797.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.331, pruned_loss=0.09736, over 1615845.90 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:41:12,180 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 04:41:42,537 INFO [train.py:901] (2/4) Epoch 7, batch 6800, loss[loss=0.2464, simple_loss=0.3226, pruned_loss=0.08508, over 8239.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3312, pruned_loss=0.09751, over 1612063.03 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 16.0 +2023-02-06 04:41:47,229 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 04:42:08,558 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.859e+02 3.364e+02 4.161e+02 9.626e+02, threshold=6.728e+02, percent-clipped=3.0 +2023-02-06 04:42:09,378 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2429, 4.1686, 3.8246, 1.9090, 3.6790, 3.6166, 3.8356, 3.3257], + device='cuda:2'), covar=tensor([0.0735, 0.0519, 0.0892, 0.4278, 0.0875, 0.0940, 0.1099, 0.0904], + device='cuda:2'), in_proj_covar=tensor([0.0414, 0.0323, 0.0355, 0.0434, 0.0339, 0.0320, 0.0328, 0.0281], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:42:11,504 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:42:18,203 INFO [train.py:901] (2/4) Epoch 7, batch 6850, loss[loss=0.3136, simple_loss=0.3802, pruned_loss=0.1235, over 8246.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3312, pruned_loss=0.09765, over 1608956.45 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 16.0 +2023-02-06 04:42:19,222 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.28 vs. limit=5.0 +2023-02-06 04:42:25,226 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.24 vs. limit=2.0 +2023-02-06 04:42:34,174 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 04:42:50,635 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:42:52,432 INFO [train.py:901] (2/4) Epoch 7, batch 6900, loss[loss=0.2203, simple_loss=0.2966, pruned_loss=0.07199, over 8248.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3316, pruned_loss=0.09792, over 1608605.29 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:09,634 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55422.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:43:19,277 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.767e+02 3.318e+02 4.413e+02 7.718e+02, threshold=6.635e+02, percent-clipped=1.0 +2023-02-06 04:43:28,911 INFO [train.py:901] (2/4) Epoch 7, batch 6950, loss[loss=0.2464, simple_loss=0.3281, pruned_loss=0.08235, over 8482.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3315, pruned_loss=0.09755, over 1610460.66 frames. ], batch size: 28, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:34,097 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 04:43:46,611 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 04:44:02,255 INFO [train.py:901] (2/4) Epoch 7, batch 7000, loss[loss=0.2586, simple_loss=0.3158, pruned_loss=0.1007, over 7528.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3312, pruned_loss=0.09675, over 1614961.83 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:09,454 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:44:28,246 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.942e+02 3.699e+02 4.542e+02 1.220e+03, threshold=7.399e+02, percent-clipped=11.0 +2023-02-06 04:44:37,052 INFO [train.py:901] (2/4) Epoch 7, batch 7050, loss[loss=0.258, simple_loss=0.3363, pruned_loss=0.08984, over 6670.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3308, pruned_loss=0.09669, over 1611530.14 frames. ], batch size: 73, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:54,229 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:09,300 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:11,225 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2327, 1.5896, 1.4539, 1.2675, 1.1273, 1.3728, 1.7183, 1.6213], + device='cuda:2'), covar=tensor([0.0531, 0.1186, 0.1758, 0.1441, 0.0554, 0.1476, 0.0709, 0.0566], + device='cuda:2'), in_proj_covar=tensor([0.0112, 0.0166, 0.0204, 0.0167, 0.0112, 0.0171, 0.0126, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 04:45:11,741 INFO [train.py:901] (2/4) Epoch 7, batch 7100, loss[loss=0.3103, simple_loss=0.3699, pruned_loss=0.1253, over 8581.00 frames. ], tot_loss[loss=0.2613, simple_loss=0.3299, pruned_loss=0.09631, over 1609922.41 frames. ], batch size: 31, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:45:26,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:29,035 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.86 vs. limit=2.0 +2023-02-06 04:45:29,489 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:36,689 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.720e+02 3.297e+02 4.008e+02 7.250e+02, threshold=6.594e+02, percent-clipped=0.0 +2023-02-06 04:45:45,950 INFO [train.py:901] (2/4) Epoch 7, batch 7150, loss[loss=0.2481, simple_loss=0.3198, pruned_loss=0.08822, over 8132.00 frames. ], tot_loss[loss=0.2606, simple_loss=0.3297, pruned_loss=0.09579, over 1612609.45 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:10,053 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9558, 1.4885, 3.4360, 1.5469, 2.3093, 3.9134, 3.8695, 3.3506], + device='cuda:2'), covar=tensor([0.1066, 0.1422, 0.0332, 0.1878, 0.0829, 0.0229, 0.0363, 0.0638], + device='cuda:2'), in_proj_covar=tensor([0.0244, 0.0273, 0.0231, 0.0270, 0.0242, 0.0214, 0.0278, 0.0280], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 04:46:14,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:46:21,652 INFO [train.py:901] (2/4) Epoch 7, batch 7200, loss[loss=0.2276, simple_loss=0.2911, pruned_loss=0.08205, over 5114.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3296, pruned_loss=0.09631, over 1609877.96 frames. ], batch size: 11, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:42,577 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6948, 1.8409, 2.2578, 1.7367, 1.1337, 2.3371, 0.4081, 1.2947], + device='cuda:2'), covar=tensor([0.3428, 0.1951, 0.0611, 0.2602, 0.5223, 0.0624, 0.4391, 0.2208], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0144, 0.0084, 0.0191, 0.0230, 0.0089, 0.0145, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:46:47,138 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.857e+02 3.487e+02 4.455e+02 1.230e+03, threshold=6.974e+02, percent-clipped=5.0 +2023-02-06 04:46:49,878 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5888, 4.6304, 4.0372, 1.8942, 4.0627, 4.1665, 4.2462, 3.7098], + device='cuda:2'), covar=tensor([0.0874, 0.0681, 0.1163, 0.4967, 0.0861, 0.0744, 0.1445, 0.0872], + device='cuda:2'), in_proj_covar=tensor([0.0411, 0.0325, 0.0354, 0.0436, 0.0336, 0.0315, 0.0327, 0.0277], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:46:55,810 INFO [train.py:901] (2/4) Epoch 7, batch 7250, loss[loss=0.2435, simple_loss=0.3134, pruned_loss=0.08683, over 7942.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3312, pruned_loss=0.09719, over 1609238.36 frames. ], batch size: 20, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:07,795 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:47:31,018 INFO [train.py:901] (2/4) Epoch 7, batch 7300, loss[loss=0.2919, simple_loss=0.3585, pruned_loss=0.1127, over 8495.00 frames. ], tot_loss[loss=0.2641, simple_loss=0.3324, pruned_loss=0.09788, over 1610671.26 frames. ], batch size: 28, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:55,727 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.887e+02 3.402e+02 4.424e+02 1.529e+03, threshold=6.804e+02, percent-clipped=7.0 +2023-02-06 04:48:04,220 INFO [train.py:901] (2/4) Epoch 7, batch 7350, loss[loss=0.2608, simple_loss=0.3395, pruned_loss=0.09104, over 8598.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3318, pruned_loss=0.09786, over 1613129.81 frames. ], batch size: 31, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:15,840 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:26,612 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55881.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:27,829 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 04:48:40,221 INFO [train.py:901] (2/4) Epoch 7, batch 7400, loss[loss=0.2849, simple_loss=0.3531, pruned_loss=0.1083, over 8449.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.332, pruned_loss=0.09752, over 1611385.45 frames. ], batch size: 27, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:45,313 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:50,021 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 04:49:01,547 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 04:49:05,903 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.646e+02 3.471e+02 4.467e+02 1.348e+03, threshold=6.942e+02, percent-clipped=5.0 +2023-02-06 04:49:11,666 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:14,916 INFO [train.py:901] (2/4) Epoch 7, batch 7450, loss[loss=0.2275, simple_loss=0.3106, pruned_loss=0.07213, over 8601.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.331, pruned_loss=0.09673, over 1613839.30 frames. ], batch size: 31, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:49:25,938 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 04:49:28,832 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:29,058 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 04:49:50,666 INFO [train.py:901] (2/4) Epoch 7, batch 7500, loss[loss=0.239, simple_loss=0.3112, pruned_loss=0.08345, over 8030.00 frames. ], tot_loss[loss=0.2605, simple_loss=0.3294, pruned_loss=0.09579, over 1612568.07 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:17,129 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.816e+02 3.537e+02 4.737e+02 9.745e+02, threshold=7.074e+02, percent-clipped=6.0 +2023-02-06 04:50:25,618 INFO [train.py:901] (2/4) Epoch 7, batch 7550, loss[loss=0.3643, simple_loss=0.3938, pruned_loss=0.1674, over 8325.00 frames. ], tot_loss[loss=0.2598, simple_loss=0.3287, pruned_loss=0.09543, over 1611642.21 frames. ], batch size: 25, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:55,597 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8969, 2.9237, 3.3883, 2.0817, 1.5989, 3.4126, 0.5735, 2.1130], + device='cuda:2'), covar=tensor([0.2241, 0.2098, 0.0525, 0.3494, 0.5768, 0.0854, 0.5085, 0.2513], + device='cuda:2'), in_proj_covar=tensor([0.0145, 0.0148, 0.0088, 0.0199, 0.0237, 0.0091, 0.0150, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:50:58,642 INFO [train.py:901] (2/4) Epoch 7, batch 7600, loss[loss=0.3341, simple_loss=0.3849, pruned_loss=0.1416, over 7236.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3288, pruned_loss=0.09579, over 1610707.76 frames. ], batch size: 71, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:06,788 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:25,286 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.750e+02 3.495e+02 4.537e+02 9.121e+02, threshold=6.990e+02, percent-clipped=3.0 +2023-02-06 04:51:34,923 INFO [train.py:901] (2/4) Epoch 7, batch 7650, loss[loss=0.2128, simple_loss=0.2795, pruned_loss=0.07305, over 7523.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3271, pruned_loss=0.09489, over 1610284.96 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:44,426 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:03,621 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-06 04:52:08,618 INFO [train.py:901] (2/4) Epoch 7, batch 7700, loss[loss=0.2577, simple_loss=0.3337, pruned_loss=0.0908, over 7778.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3277, pruned_loss=0.09507, over 1610924.74 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:16,096 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:26,757 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:34,679 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.791e+02 3.394e+02 3.978e+02 9.035e+02, threshold=6.788e+02, percent-clipped=3.0 +2023-02-06 04:52:34,708 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 04:52:44,010 INFO [train.py:901] (2/4) Epoch 7, batch 7750, loss[loss=0.2887, simple_loss=0.3382, pruned_loss=0.1196, over 7271.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3286, pruned_loss=0.09504, over 1615016.60 frames. ], batch size: 16, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:11,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6643, 1.8676, 2.2866, 1.8920, 1.0880, 2.3880, 0.3987, 1.2662], + device='cuda:2'), covar=tensor([0.2917, 0.2285, 0.0514, 0.1825, 0.6073, 0.0549, 0.4482, 0.2030], + device='cuda:2'), in_proj_covar=tensor([0.0144, 0.0147, 0.0086, 0.0195, 0.0235, 0.0089, 0.0146, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:53:18,259 INFO [train.py:901] (2/4) Epoch 7, batch 7800, loss[loss=0.2347, simple_loss=0.2914, pruned_loss=0.08902, over 7691.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3291, pruned_loss=0.09543, over 1616469.64 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:35,707 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:53:36,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1296, 1.6244, 3.2280, 1.3283, 2.2144, 3.4100, 3.5399, 2.7542], + device='cuda:2'), covar=tensor([0.0977, 0.1508, 0.0415, 0.2212, 0.0927, 0.0358, 0.0428, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0274, 0.0234, 0.0271, 0.0243, 0.0213, 0.0278, 0.0278], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 04:53:42,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.702e+02 3.307e+02 4.383e+02 8.490e+02, threshold=6.613e+02, percent-clipped=4.0 +2023-02-06 04:53:51,370 INFO [train.py:901] (2/4) Epoch 7, batch 7850, loss[loss=0.3167, simple_loss=0.3789, pruned_loss=0.1272, over 8340.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3305, pruned_loss=0.09652, over 1619589.86 frames. ], batch size: 26, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:54:24,870 INFO [train.py:901] (2/4) Epoch 7, batch 7900, loss[loss=0.2924, simple_loss=0.3607, pruned_loss=0.1121, over 8678.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3316, pruned_loss=0.09719, over 1616148.81 frames. ], batch size: 39, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:54:41,689 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0383, 2.3282, 1.8448, 2.8536, 1.4687, 1.5034, 2.0254, 2.4088], + device='cuda:2'), covar=tensor([0.0774, 0.0898, 0.1145, 0.0429, 0.1247, 0.1665, 0.1058, 0.0835], + device='cuda:2'), in_proj_covar=tensor([0.0253, 0.0239, 0.0277, 0.0221, 0.0232, 0.0269, 0.0272, 0.0234], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 04:54:49,422 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.758e+02 3.520e+02 4.424e+02 1.197e+03, threshold=7.039e+02, percent-clipped=9.0 +2023-02-06 04:54:58,057 INFO [train.py:901] (2/4) Epoch 7, batch 7950, loss[loss=0.2428, simple_loss=0.322, pruned_loss=0.08177, over 8289.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3309, pruned_loss=0.09733, over 1610598.23 frames. ], batch size: 23, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:19,787 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9567, 4.1648, 2.7599, 2.7921, 3.1287, 2.2506, 2.9195, 2.8871], + device='cuda:2'), covar=tensor([0.1526, 0.0242, 0.0776, 0.0762, 0.0631, 0.1152, 0.0961, 0.0885], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0236, 0.0312, 0.0300, 0.0307, 0.0317, 0.0336, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:55:19,811 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:31,643 INFO [train.py:901] (2/4) Epoch 7, batch 8000, loss[loss=0.3046, simple_loss=0.368, pruned_loss=0.1206, over 8594.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3317, pruned_loss=0.09769, over 1607973.59 frames. ], batch size: 39, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:36,302 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4382, 2.5026, 1.8287, 2.1243, 2.1328, 1.4621, 1.7850, 1.8653], + device='cuda:2'), covar=tensor([0.1148, 0.0333, 0.0817, 0.0452, 0.0492, 0.1139, 0.0803, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0239, 0.0312, 0.0301, 0.0308, 0.0317, 0.0337, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 04:55:36,307 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:36,855 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:56,103 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.809e+02 3.378e+02 4.457e+02 7.052e+02, threshold=6.755e+02, percent-clipped=1.0 +2023-02-06 04:55:59,642 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56541.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:56:03,027 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9420, 1.2514, 4.2324, 1.5390, 3.5640, 3.4200, 3.7164, 3.6082], + device='cuda:2'), covar=tensor([0.0638, 0.4521, 0.0530, 0.3355, 0.1362, 0.0872, 0.0657, 0.0746], + device='cuda:2'), in_proj_covar=tensor([0.0400, 0.0532, 0.0494, 0.0477, 0.0533, 0.0450, 0.0452, 0.0510], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 04:56:04,838 INFO [train.py:901] (2/4) Epoch 7, batch 8050, loss[loss=0.262, simple_loss=0.3237, pruned_loss=0.1001, over 7929.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.3288, pruned_loss=0.09669, over 1599358.09 frames. ], batch size: 20, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:56:05,401 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 04:56:37,916 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 04:56:42,615 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:56:43,091 INFO [train.py:901] (2/4) Epoch 8, batch 0, loss[loss=0.2522, simple_loss=0.3286, pruned_loss=0.0879, over 8254.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3286, pruned_loss=0.0879, over 8254.00 frames. ], batch size: 24, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:56:43,091 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 04:56:54,074 INFO [train.py:935] (2/4) Epoch 8, validation: loss=0.205, simple_loss=0.3028, pruned_loss=0.05355, over 944034.00 frames. +2023-02-06 04:56:54,075 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 04:56:54,911 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3510, 4.3434, 3.9303, 1.7898, 3.8450, 3.8421, 3.9993, 3.5948], + device='cuda:2'), covar=tensor([0.0928, 0.0658, 0.1237, 0.5355, 0.0828, 0.1085, 0.1276, 0.0988], + device='cuda:2'), in_proj_covar=tensor([0.0419, 0.0327, 0.0354, 0.0451, 0.0343, 0.0322, 0.0334, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 04:57:08,603 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 04:57:10,757 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:22,341 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:28,988 INFO [train.py:901] (2/4) Epoch 8, batch 50, loss[loss=0.305, simple_loss=0.3555, pruned_loss=0.1273, over 8582.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.335, pruned_loss=0.09765, over 369156.74 frames. ], batch size: 31, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:57:31,765 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.831e+02 3.488e+02 4.265e+02 1.069e+03, threshold=6.975e+02, percent-clipped=2.0 +2023-02-06 04:57:39,011 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 04:57:43,126 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 04:58:03,656 INFO [train.py:901] (2/4) Epoch 8, batch 100, loss[loss=0.2643, simple_loss=0.3392, pruned_loss=0.09466, over 8133.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3309, pruned_loss=0.09658, over 647841.72 frames. ], batch size: 22, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:05,729 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 04:58:38,289 INFO [train.py:901] (2/4) Epoch 8, batch 150, loss[loss=0.2432, simple_loss=0.3156, pruned_loss=0.08542, over 8292.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3283, pruned_loss=0.0951, over 858079.46 frames. ], batch size: 23, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:40,461 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0897, 1.2358, 4.2744, 1.5109, 3.6993, 3.5862, 3.8516, 3.6680], + device='cuda:2'), covar=tensor([0.0451, 0.3967, 0.0472, 0.3203, 0.1134, 0.0813, 0.0510, 0.0694], + device='cuda:2'), in_proj_covar=tensor([0.0402, 0.0531, 0.0489, 0.0473, 0.0538, 0.0450, 0.0448, 0.0507], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 04:58:40,996 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.710e+02 3.372e+02 4.105e+02 8.611e+02, threshold=6.744e+02, percent-clipped=2.0 +2023-02-06 04:59:12,800 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 04:59:13,816 INFO [train.py:901] (2/4) Epoch 8, batch 200, loss[loss=0.2799, simple_loss=0.3453, pruned_loss=0.1073, over 8350.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3292, pruned_loss=0.0954, over 1030724.26 frames. ], batch size: 26, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:41,302 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:59:48,621 INFO [train.py:901] (2/4) Epoch 8, batch 250, loss[loss=0.2409, simple_loss=0.3279, pruned_loss=0.07691, over 8359.00 frames. ], tot_loss[loss=0.2597, simple_loss=0.3285, pruned_loss=0.0955, over 1158870.91 frames. ], batch size: 24, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:51,336 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.703e+02 3.318e+02 4.204e+02 1.022e+03, threshold=6.636e+02, percent-clipped=1.0 +2023-02-06 04:59:56,872 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 05:00:06,240 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 05:00:21,365 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:23,888 INFO [train.py:901] (2/4) Epoch 8, batch 300, loss[loss=0.2947, simple_loss=0.3486, pruned_loss=0.1204, over 8028.00 frames. ], tot_loss[loss=0.2594, simple_loss=0.3286, pruned_loss=0.0951, over 1260911.62 frames. ], batch size: 22, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:00:26,000 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56885.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:00:28,630 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4416, 1.2310, 1.4445, 1.1126, 0.8579, 1.2406, 1.2018, 1.0279], + device='cuda:2'), covar=tensor([0.0608, 0.1284, 0.1851, 0.1425, 0.0566, 0.1556, 0.0697, 0.0668], + device='cuda:2'), in_proj_covar=tensor([0.0112, 0.0162, 0.0201, 0.0164, 0.0112, 0.0169, 0.0124, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:00:38,046 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:54,958 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:58,776 INFO [train.py:901] (2/4) Epoch 8, batch 350, loss[loss=0.3056, simple_loss=0.3615, pruned_loss=0.1249, over 8362.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.328, pruned_loss=0.09433, over 1341574.94 frames. ], batch size: 24, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:01,452 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.612e+02 3.168e+02 3.951e+02 1.059e+03, threshold=6.336e+02, percent-clipped=3.0 +2023-02-06 05:01:02,321 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0237, 1.6047, 1.6205, 1.4015, 1.2031, 1.4802, 1.6996, 1.7640], + device='cuda:2'), covar=tensor([0.0548, 0.1172, 0.1751, 0.1331, 0.0590, 0.1466, 0.0717, 0.0516], + device='cuda:2'), in_proj_covar=tensor([0.0111, 0.0162, 0.0200, 0.0163, 0.0112, 0.0168, 0.0123, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:01:33,167 INFO [train.py:901] (2/4) Epoch 8, batch 400, loss[loss=0.2552, simple_loss=0.3108, pruned_loss=0.09984, over 7257.00 frames. ], tot_loss[loss=0.2603, simple_loss=0.3293, pruned_loss=0.09566, over 1404709.30 frames. ], batch size: 16, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:45,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57000.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:01:52,587 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:01:53,837 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:02:07,049 INFO [train.py:901] (2/4) Epoch 8, batch 450, loss[loss=0.273, simple_loss=0.3507, pruned_loss=0.09769, over 8641.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3297, pruned_loss=0.09552, over 1451745.84 frames. ], batch size: 49, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:02:10,304 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 2.769e+02 3.532e+02 4.551e+02 9.004e+02, threshold=7.064e+02, percent-clipped=7.0 +2023-02-06 05:02:21,405 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.58 vs. limit=5.0 +2023-02-06 05:02:33,934 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0005, 1.3322, 4.2097, 1.5721, 3.5234, 3.3768, 3.6425, 3.5994], + device='cuda:2'), covar=tensor([0.0604, 0.4491, 0.0517, 0.3138, 0.1413, 0.0906, 0.0698, 0.0767], + device='cuda:2'), in_proj_covar=tensor([0.0403, 0.0531, 0.0492, 0.0477, 0.0544, 0.0456, 0.0454, 0.0508], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 05:02:41,866 INFO [train.py:901] (2/4) Epoch 8, batch 500, loss[loss=0.2224, simple_loss=0.2979, pruned_loss=0.07348, over 8038.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3308, pruned_loss=0.09603, over 1490591.06 frames. ], batch size: 22, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:03:15,889 INFO [train.py:901] (2/4) Epoch 8, batch 550, loss[loss=0.2636, simple_loss=0.323, pruned_loss=0.1021, over 7185.00 frames. ], tot_loss[loss=0.2587, simple_loss=0.3281, pruned_loss=0.09464, over 1517108.03 frames. ], batch size: 16, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:03:18,519 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.761e+02 3.532e+02 4.192e+02 1.400e+03, threshold=7.064e+02, percent-clipped=6.0 +2023-02-06 05:03:39,525 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:03:40,892 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5473, 5.7055, 4.9631, 2.3152, 5.0248, 5.3777, 5.2764, 4.9052], + device='cuda:2'), covar=tensor([0.0633, 0.0412, 0.0915, 0.4298, 0.0666, 0.0572, 0.0990, 0.0563], + device='cuda:2'), in_proj_covar=tensor([0.0418, 0.0324, 0.0354, 0.0441, 0.0344, 0.0322, 0.0332, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:03:50,883 INFO [train.py:901] (2/4) Epoch 8, batch 600, loss[loss=0.255, simple_loss=0.3142, pruned_loss=0.09786, over 7823.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3259, pruned_loss=0.09296, over 1536540.69 frames. ], batch size: 20, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:04:03,002 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 05:04:06,350 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:13,912 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:25,748 INFO [train.py:901] (2/4) Epoch 8, batch 650, loss[loss=0.231, simple_loss=0.3036, pruned_loss=0.07922, over 7927.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3255, pruned_loss=0.09294, over 1550880.03 frames. ], batch size: 20, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:04:28,378 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.474e+02 3.242e+02 4.284e+02 1.059e+03, threshold=6.484e+02, percent-clipped=6.0 +2023-02-06 05:04:41,995 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57256.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:04:46,653 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:50,069 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8440, 3.8253, 2.3600, 2.2778, 2.9921, 1.6869, 2.4475, 2.6079], + device='cuda:2'), covar=tensor([0.1407, 0.0243, 0.0781, 0.0778, 0.0531, 0.1173, 0.0990, 0.0977], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0232, 0.0309, 0.0298, 0.0306, 0.0312, 0.0335, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 05:04:51,987 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:59,517 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57280.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:00,217 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57281.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 05:05:00,683 INFO [train.py:901] (2/4) Epoch 8, batch 700, loss[loss=0.2561, simple_loss=0.3267, pruned_loss=0.09282, over 8135.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.326, pruned_loss=0.09233, over 1570712.87 frames. ], batch size: 22, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:34,918 INFO [train.py:901] (2/4) Epoch 8, batch 750, loss[loss=0.2194, simple_loss=0.2835, pruned_loss=0.07766, over 7695.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3263, pruned_loss=0.09341, over 1580057.12 frames. ], batch size: 18, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:38,348 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.846e+02 3.371e+02 4.091e+02 7.333e+02, threshold=6.742e+02, percent-clipped=1.0 +2023-02-06 05:05:49,677 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 05:05:51,156 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:52,579 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:57,731 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 05:06:09,705 INFO [train.py:901] (2/4) Epoch 8, batch 800, loss[loss=0.2807, simple_loss=0.3512, pruned_loss=0.1051, over 8512.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.327, pruned_loss=0.09377, over 1590319.22 frames. ], batch size: 28, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:11,944 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:33,596 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:44,183 INFO [train.py:901] (2/4) Epoch 8, batch 850, loss[loss=0.2376, simple_loss=0.3226, pruned_loss=0.0763, over 8500.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3269, pruned_loss=0.09324, over 1598341.29 frames. ], batch size: 26, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:46,892 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.664e+02 3.287e+02 4.255e+02 8.769e+02, threshold=6.575e+02, percent-clipped=4.0 +2023-02-06 05:07:11,115 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:12,475 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:17,825 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:19,065 INFO [train.py:901] (2/4) Epoch 8, batch 900, loss[loss=0.224, simple_loss=0.3031, pruned_loss=0.07248, over 8577.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3264, pruned_loss=0.09263, over 1605720.65 frames. ], batch size: 31, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:40,918 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:52,601 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8253, 2.0895, 2.3681, 1.9210, 1.1924, 2.3618, 0.5074, 1.4687], + device='cuda:2'), covar=tensor([0.3059, 0.1603, 0.0619, 0.1896, 0.4932, 0.0722, 0.3798, 0.2289], + device='cuda:2'), in_proj_covar=tensor([0.0144, 0.0144, 0.0085, 0.0191, 0.0233, 0.0090, 0.0145, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:07:53,660 INFO [train.py:901] (2/4) Epoch 8, batch 950, loss[loss=0.285, simple_loss=0.3473, pruned_loss=0.1113, over 8022.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.328, pruned_loss=0.09413, over 1608993.26 frames. ], batch size: 22, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:56,420 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.713e+02 3.197e+02 4.416e+02 7.629e+02, threshold=6.394e+02, percent-clipped=6.0 +2023-02-06 05:07:56,676 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:04,585 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:13,024 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:14,313 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 05:08:14,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:29,125 INFO [train.py:901] (2/4) Epoch 8, batch 1000, loss[loss=0.2339, simple_loss=0.2978, pruned_loss=0.08502, over 7434.00 frames. ], tot_loss[loss=0.256, simple_loss=0.326, pruned_loss=0.09297, over 1610087.72 frames. ], batch size: 17, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:08:45,896 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:47,899 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 05:09:00,600 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 05:09:03,908 INFO [train.py:901] (2/4) Epoch 8, batch 1050, loss[loss=0.2401, simple_loss=0.3144, pruned_loss=0.08285, over 8193.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3276, pruned_loss=0.09368, over 1618016.59 frames. ], batch size: 23, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:09:06,640 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.778e+02 2.733e+02 3.382e+02 4.210e+02 1.523e+03, threshold=6.765e+02, percent-clipped=11.0 +2023-02-06 05:09:10,049 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:24,434 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:26,526 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:31,690 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:37,588 INFO [train.py:901] (2/4) Epoch 8, batch 1100, loss[loss=0.3033, simple_loss=0.3592, pruned_loss=0.1237, over 7053.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3288, pruned_loss=0.09468, over 1618003.03 frames. ], batch size: 72, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:10:05,401 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:08,893 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,257 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57728.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,699 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 05:10:12,767 INFO [train.py:901] (2/4) Epoch 8, batch 1150, loss[loss=0.2503, simple_loss=0.3077, pruned_loss=0.09644, over 7274.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3285, pruned_loss=0.09463, over 1615421.55 frames. ], batch size: 16, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:10:15,548 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.752e+02 3.349e+02 4.211e+02 1.172e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 05:10:26,719 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:26,782 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:28,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:32,805 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:48,142 INFO [train.py:901] (2/4) Epoch 8, batch 1200, loss[loss=0.2146, simple_loss=0.2965, pruned_loss=0.06637, over 7644.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3265, pruned_loss=0.09338, over 1612820.43 frames. ], batch size: 19, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:11:10,444 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7381, 5.6697, 5.0097, 2.3553, 5.1543, 5.5155, 5.3585, 5.2605], + device='cuda:2'), covar=tensor([0.0439, 0.0343, 0.0690, 0.4445, 0.0597, 0.0533, 0.0776, 0.0467], + device='cuda:2'), in_proj_covar=tensor([0.0417, 0.0325, 0.0360, 0.0446, 0.0350, 0.0325, 0.0337, 0.0284], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:11:17,881 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:23,243 INFO [train.py:901] (2/4) Epoch 8, batch 1250, loss[loss=0.2476, simple_loss=0.3222, pruned_loss=0.08648, over 8641.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3269, pruned_loss=0.09361, over 1616308.35 frames. ], batch size: 34, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:11:25,902 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.817e+02 3.577e+02 4.191e+02 8.690e+02, threshold=7.155e+02, percent-clipped=5.0 +2023-02-06 05:11:41,409 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:53,669 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:58,364 INFO [train.py:901] (2/4) Epoch 8, batch 1300, loss[loss=0.2519, simple_loss=0.3221, pruned_loss=0.09089, over 7804.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3269, pruned_loss=0.09346, over 1615948.52 frames. ], batch size: 20, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:12:14,828 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 05:12:24,233 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57919.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:32,230 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:33,387 INFO [train.py:901] (2/4) Epoch 8, batch 1350, loss[loss=0.2511, simple_loss=0.333, pruned_loss=0.08462, over 8730.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3267, pruned_loss=0.09343, over 1617042.00 frames. ], batch size: 30, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:12:36,120 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.787e+02 3.281e+02 4.089e+02 1.129e+03, threshold=6.562e+02, percent-clipped=4.0 +2023-02-06 05:12:38,227 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3999, 1.4479, 1.6540, 1.3872, 1.2778, 1.5076, 1.7827, 1.6615], + device='cuda:2'), covar=tensor([0.0485, 0.1250, 0.1715, 0.1387, 0.0538, 0.1485, 0.0631, 0.0587], + device='cuda:2'), in_proj_covar=tensor([0.0109, 0.0161, 0.0199, 0.0164, 0.0112, 0.0169, 0.0123, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:12:38,234 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:41,652 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:49,717 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:01,726 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:05,180 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:08,418 INFO [train.py:901] (2/4) Epoch 8, batch 1400, loss[loss=0.2261, simple_loss=0.2924, pruned_loss=0.07986, over 7439.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3275, pruned_loss=0.09469, over 1616690.80 frames. ], batch size: 17, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:23,095 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:41,345 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 05:13:42,226 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8499, 2.7955, 3.2120, 2.2322, 1.7086, 3.3185, 0.5616, 1.9467], + device='cuda:2'), covar=tensor([0.2530, 0.1458, 0.0453, 0.2559, 0.4311, 0.0568, 0.4194, 0.2218], + device='cuda:2'), in_proj_covar=tensor([0.0142, 0.0143, 0.0083, 0.0189, 0.0230, 0.0089, 0.0143, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:13:43,356 INFO [train.py:901] (2/4) Epoch 8, batch 1450, loss[loss=0.2459, simple_loss=0.303, pruned_loss=0.09441, over 7705.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3282, pruned_loss=0.09502, over 1615926.84 frames. ], batch size: 18, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:46,037 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.668e+02 3.298e+02 4.223e+02 1.032e+03, threshold=6.596e+02, percent-clipped=5.0 +2023-02-06 05:14:18,670 INFO [train.py:901] (2/4) Epoch 8, batch 1500, loss[loss=0.1875, simple_loss=0.2633, pruned_loss=0.05585, over 7694.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3272, pruned_loss=0.09409, over 1614425.41 frames. ], batch size: 18, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:28,078 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:39,241 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5258, 1.5740, 1.6807, 1.4802, 0.9859, 1.7313, 0.0962, 0.9817], + device='cuda:2'), covar=tensor([0.3062, 0.2073, 0.0735, 0.1292, 0.5457, 0.0817, 0.3511, 0.1981], + device='cuda:2'), in_proj_covar=tensor([0.0145, 0.0145, 0.0085, 0.0189, 0.0233, 0.0090, 0.0145, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:14:49,420 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4512, 1.9049, 2.1054, 1.1670, 2.2570, 1.3688, 0.6099, 1.6643], + device='cuda:2'), covar=tensor([0.0394, 0.0205, 0.0142, 0.0297, 0.0221, 0.0512, 0.0492, 0.0189], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0275, 0.0224, 0.0331, 0.0270, 0.0423, 0.0328, 0.0307], + device='cuda:2'), out_proj_covar=tensor([1.1037e-04, 8.3422e-05, 6.7244e-05, 9.9829e-05, 8.3318e-05, 1.3999e-04, + 1.0181e-04, 9.4163e-05], device='cuda:2') +2023-02-06 05:14:52,821 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:53,264 INFO [train.py:901] (2/4) Epoch 8, batch 1550, loss[loss=0.2104, simple_loss=0.2895, pruned_loss=0.06562, over 7787.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3265, pruned_loss=0.09339, over 1614897.28 frames. ], batch size: 19, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:54,854 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3749, 1.8527, 2.0249, 1.1769, 2.1936, 1.3925, 0.5486, 1.6169], + device='cuda:2'), covar=tensor([0.0381, 0.0168, 0.0122, 0.0257, 0.0190, 0.0464, 0.0438, 0.0160], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0275, 0.0225, 0.0332, 0.0271, 0.0423, 0.0329, 0.0308], + device='cuda:2'), out_proj_covar=tensor([1.1069e-04, 8.3546e-05, 6.7375e-05, 1.0023e-04, 8.3492e-05, 1.4010e-04, + 1.0201e-04, 9.4436e-05], device='cuda:2') +2023-02-06 05:14:56,007 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.601e+02 3.218e+02 3.979e+02 6.246e+02, threshold=6.435e+02, percent-clipped=0.0 +2023-02-06 05:15:08,670 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5893, 5.7902, 4.8809, 2.3740, 5.0495, 5.2848, 5.3444, 4.9812], + device='cuda:2'), covar=tensor([0.0706, 0.0442, 0.0993, 0.4478, 0.0640, 0.0638, 0.1177, 0.0597], + device='cuda:2'), in_proj_covar=tensor([0.0421, 0.0328, 0.0357, 0.0446, 0.0349, 0.0326, 0.0335, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:15:10,025 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:12,045 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58159.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:14,755 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:27,768 INFO [train.py:901] (2/4) Epoch 8, batch 1600, loss[loss=0.2544, simple_loss=0.3383, pruned_loss=0.08524, over 8481.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3273, pruned_loss=0.09419, over 1617621.25 frames. ], batch size: 28, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:15:37,318 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:47,377 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:54,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:00,075 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:02,613 INFO [train.py:901] (2/4) Epoch 8, batch 1650, loss[loss=0.1928, simple_loss=0.2651, pruned_loss=0.06029, over 7449.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3272, pruned_loss=0.09379, over 1615428.07 frames. ], batch size: 17, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:16:05,269 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.785e+02 3.241e+02 4.331e+02 1.468e+03, threshold=6.482e+02, percent-clipped=4.0 +2023-02-06 05:16:16,839 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:37,568 INFO [train.py:901] (2/4) Epoch 8, batch 1700, loss[loss=0.246, simple_loss=0.3114, pruned_loss=0.09025, over 7813.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3256, pruned_loss=0.09313, over 1614446.76 frames. ], batch size: 20, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:17:00,581 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2765, 4.2106, 3.8170, 1.7505, 3.8082, 3.6877, 3.9389, 3.4913], + device='cuda:2'), covar=tensor([0.0810, 0.0651, 0.1037, 0.4954, 0.0828, 0.0926, 0.1201, 0.0895], + device='cuda:2'), in_proj_covar=tensor([0.0419, 0.0324, 0.0355, 0.0444, 0.0349, 0.0325, 0.0335, 0.0284], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:17:11,716 INFO [train.py:901] (2/4) Epoch 8, batch 1750, loss[loss=0.3043, simple_loss=0.3622, pruned_loss=0.1232, over 8595.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3268, pruned_loss=0.09409, over 1615040.92 frames. ], batch size: 31, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:17:15,046 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.752e+02 3.204e+02 3.949e+02 8.384e+02, threshold=6.409e+02, percent-clipped=4.0 +2023-02-06 05:17:41,591 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 05:17:43,448 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 05:17:45,739 INFO [train.py:901] (2/4) Epoch 8, batch 1800, loss[loss=0.2588, simple_loss=0.3327, pruned_loss=0.09248, over 8439.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3269, pruned_loss=0.09405, over 1619053.19 frames. ], batch size: 27, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:18:06,233 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 05:18:21,313 INFO [train.py:901] (2/4) Epoch 8, batch 1850, loss[loss=0.2659, simple_loss=0.34, pruned_loss=0.09592, over 8463.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3261, pruned_loss=0.09349, over 1618291.76 frames. ], batch size: 27, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:18:24,003 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.956e+02 3.603e+02 4.636e+02 8.044e+02, threshold=7.207e+02, percent-clipped=5.0 +2023-02-06 05:18:45,040 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58466.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:18:55,852 INFO [train.py:901] (2/4) Epoch 8, batch 1900, loss[loss=0.2595, simple_loss=0.3378, pruned_loss=0.09061, over 8506.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3256, pruned_loss=0.09291, over 1619720.76 frames. ], batch size: 26, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:19:02,000 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:10,023 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:12,309 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-06 05:19:12,701 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:19,278 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 05:19:30,166 INFO [train.py:901] (2/4) Epoch 8, batch 1950, loss[loss=0.2427, simple_loss=0.3145, pruned_loss=0.08547, over 8241.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3272, pruned_loss=0.09383, over 1621538.73 frames. ], batch size: 24, lr: 9.75e-03, grad_scale: 16.0 +2023-02-06 05:19:30,816 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 05:19:32,607 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 05:19:32,812 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.699e+02 3.417e+02 4.103e+02 8.210e+02, threshold=6.834e+02, percent-clipped=5.0 +2023-02-06 05:19:50,861 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 05:20:04,896 INFO [train.py:901] (2/4) Epoch 8, batch 2000, loss[loss=0.2233, simple_loss=0.3069, pruned_loss=0.0699, over 7972.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3278, pruned_loss=0.09456, over 1615137.25 frames. ], batch size: 21, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:29,412 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58618.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:31,392 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:36,253 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 05:20:39,666 INFO [train.py:901] (2/4) Epoch 8, batch 2050, loss[loss=0.2469, simple_loss=0.3155, pruned_loss=0.08919, over 8085.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3266, pruned_loss=0.09391, over 1611988.50 frames. ], batch size: 21, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:42,941 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.785e+02 3.396e+02 4.687e+02 1.585e+03, threshold=6.792e+02, percent-clipped=4.0 +2023-02-06 05:21:00,881 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1127, 1.3419, 4.2168, 1.5420, 3.6471, 3.4612, 3.8030, 3.6520], + device='cuda:2'), covar=tensor([0.0448, 0.4162, 0.0476, 0.2961, 0.1139, 0.0767, 0.0503, 0.0626], + device='cuda:2'), in_proj_covar=tensor([0.0402, 0.0524, 0.0500, 0.0472, 0.0535, 0.0452, 0.0451, 0.0506], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 05:21:13,673 INFO [train.py:901] (2/4) Epoch 8, batch 2100, loss[loss=0.2268, simple_loss=0.2974, pruned_loss=0.07811, over 7791.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.328, pruned_loss=0.09456, over 1615989.43 frames. ], batch size: 19, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:25,812 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:21:47,361 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5817, 2.0043, 2.1869, 1.0377, 2.3456, 1.4436, 0.6574, 1.6775], + device='cuda:2'), covar=tensor([0.0279, 0.0132, 0.0093, 0.0269, 0.0153, 0.0420, 0.0376, 0.0162], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0272, 0.0223, 0.0330, 0.0268, 0.0417, 0.0321, 0.0305], + device='cuda:2'), out_proj_covar=tensor([1.0852e-04, 8.2677e-05, 6.6972e-05, 9.9202e-05, 8.2319e-05, 1.3771e-04, + 9.8989e-05, 9.3451e-05], device='cuda:2') +2023-02-06 05:21:47,831 INFO [train.py:901] (2/4) Epoch 8, batch 2150, loss[loss=0.2758, simple_loss=0.3401, pruned_loss=0.1057, over 7973.00 frames. ], tot_loss[loss=0.2601, simple_loss=0.3289, pruned_loss=0.09564, over 1614943.92 frames. ], batch size: 21, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:51,082 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.818e+02 3.372e+02 4.104e+02 8.704e+02, threshold=6.743e+02, percent-clipped=2.0 +2023-02-06 05:22:23,672 INFO [train.py:901] (2/4) Epoch 8, batch 2200, loss[loss=0.2966, simple_loss=0.3437, pruned_loss=0.1247, over 7527.00 frames. ], tot_loss[loss=0.2581, simple_loss=0.3276, pruned_loss=0.09432, over 1617375.48 frames. ], batch size: 73, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:22:31,533 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58793.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:22:58,799 INFO [train.py:901] (2/4) Epoch 8, batch 2250, loss[loss=0.2607, simple_loss=0.3295, pruned_loss=0.09594, over 7797.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3268, pruned_loss=0.09354, over 1621272.02 frames. ], batch size: 20, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:02,318 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.600e+02 3.138e+02 4.259e+02 8.800e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-06 05:23:11,292 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0817, 1.2815, 4.2604, 1.6396, 3.6962, 3.5146, 3.7507, 3.6497], + device='cuda:2'), covar=tensor([0.0486, 0.4214, 0.0465, 0.3038, 0.1205, 0.0914, 0.0540, 0.0627], + device='cuda:2'), in_proj_covar=tensor([0.0403, 0.0528, 0.0504, 0.0473, 0.0542, 0.0456, 0.0457, 0.0510], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 05:23:21,594 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7572, 1.4151, 3.9055, 1.3698, 3.4199, 3.2403, 3.5066, 3.3648], + device='cuda:2'), covar=tensor([0.0546, 0.3869, 0.0571, 0.3028, 0.1318, 0.0810, 0.0598, 0.0730], + device='cuda:2'), in_proj_covar=tensor([0.0402, 0.0529, 0.0504, 0.0473, 0.0542, 0.0456, 0.0455, 0.0510], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 05:23:29,270 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:31,269 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:34,473 INFO [train.py:901] (2/4) Epoch 8, batch 2300, loss[loss=0.2417, simple_loss=0.3229, pruned_loss=0.08026, over 8021.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.327, pruned_loss=0.09381, over 1621757.90 frames. ], batch size: 22, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:46,517 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:48,571 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:03,548 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:09,484 INFO [train.py:901] (2/4) Epoch 8, batch 2350, loss[loss=0.2185, simple_loss=0.2915, pruned_loss=0.0727, over 7538.00 frames. ], tot_loss[loss=0.258, simple_loss=0.3277, pruned_loss=0.09415, over 1619267.17 frames. ], batch size: 18, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:24:12,939 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.611e+02 3.221e+02 3.780e+02 8.999e+02, threshold=6.441e+02, percent-clipped=2.0 +2023-02-06 05:24:44,048 INFO [train.py:901] (2/4) Epoch 8, batch 2400, loss[loss=0.2807, simple_loss=0.3461, pruned_loss=0.1077, over 8510.00 frames. ], tot_loss[loss=0.258, simple_loss=0.3276, pruned_loss=0.09426, over 1618822.57 frames. ], batch size: 26, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:24:44,255 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3436, 1.7546, 1.8186, 1.1404, 1.9829, 1.3411, 0.4643, 1.6631], + device='cuda:2'), covar=tensor([0.0287, 0.0150, 0.0140, 0.0210, 0.0189, 0.0431, 0.0403, 0.0136], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0273, 0.0227, 0.0332, 0.0271, 0.0422, 0.0325, 0.0306], + device='cuda:2'), out_proj_covar=tensor([1.0931e-04, 8.2739e-05, 6.8222e-05, 9.9816e-05, 8.3264e-05, 1.3915e-04, + 1.0044e-04, 9.3606e-05], device='cuda:2') +2023-02-06 05:25:18,654 INFO [train.py:901] (2/4) Epoch 8, batch 2450, loss[loss=0.2802, simple_loss=0.3651, pruned_loss=0.0977, over 8250.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3272, pruned_loss=0.0939, over 1615341.86 frames. ], batch size: 24, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:25:21,882 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 3.001e+02 3.706e+02 4.542e+02 9.599e+02, threshold=7.413e+02, percent-clipped=3.0 +2023-02-06 05:25:25,464 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6090, 4.6373, 4.1052, 1.9941, 4.0978, 4.0195, 4.2205, 3.8158], + device='cuda:2'), covar=tensor([0.0657, 0.0547, 0.0907, 0.4346, 0.0784, 0.0879, 0.1120, 0.0747], + device='cuda:2'), in_proj_covar=tensor([0.0423, 0.0327, 0.0351, 0.0446, 0.0346, 0.0325, 0.0337, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:25:26,106 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:25:52,506 INFO [train.py:901] (2/4) Epoch 8, batch 2500, loss[loss=0.2209, simple_loss=0.289, pruned_loss=0.07644, over 8086.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.327, pruned_loss=0.09393, over 1617177.71 frames. ], batch size: 21, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:27,546 INFO [train.py:901] (2/4) Epoch 8, batch 2550, loss[loss=0.266, simple_loss=0.3374, pruned_loss=0.09733, over 8569.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3265, pruned_loss=0.0939, over 1617288.29 frames. ], batch size: 49, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:29,721 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2433, 1.5981, 3.4716, 1.5173, 2.3090, 3.9161, 3.8675, 3.3159], + device='cuda:2'), covar=tensor([0.0917, 0.1466, 0.0342, 0.1850, 0.0949, 0.0246, 0.0403, 0.0559], + device='cuda:2'), in_proj_covar=tensor([0.0249, 0.0285, 0.0241, 0.0278, 0.0247, 0.0225, 0.0294, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 05:26:30,875 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.676e+02 3.180e+02 4.175e+02 9.807e+02, threshold=6.360e+02, percent-clipped=4.0 +2023-02-06 05:26:30,962 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:26:42,639 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3714, 2.0266, 3.1060, 2.4479, 2.7217, 2.0659, 1.6219, 1.3406], + device='cuda:2'), covar=tensor([0.2855, 0.3134, 0.0853, 0.2076, 0.1680, 0.1717, 0.1411, 0.3604], + device='cuda:2'), in_proj_covar=tensor([0.0844, 0.0793, 0.0683, 0.0787, 0.0882, 0.0735, 0.0671, 0.0719], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:26:42,857 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 05:26:45,981 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:03,040 INFO [train.py:901] (2/4) Epoch 8, batch 2600, loss[loss=0.2097, simple_loss=0.2793, pruned_loss=0.07003, over 7940.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3261, pruned_loss=0.09348, over 1616391.80 frames. ], batch size: 20, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:03,175 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:38,229 INFO [train.py:901] (2/4) Epoch 8, batch 2650, loss[loss=0.2105, simple_loss=0.2958, pruned_loss=0.06264, over 8133.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3259, pruned_loss=0.09323, over 1614448.97 frames. ], batch size: 22, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:41,652 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.757e+02 3.213e+02 4.207e+02 1.360e+03, threshold=6.426e+02, percent-clipped=6.0 +2023-02-06 05:27:42,696 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 05:27:48,555 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6818, 5.7761, 5.0335, 2.3751, 5.0859, 5.4822, 5.2712, 4.9753], + device='cuda:2'), covar=tensor([0.0569, 0.0375, 0.0823, 0.4486, 0.0572, 0.0461, 0.1020, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0423, 0.0327, 0.0351, 0.0447, 0.0346, 0.0325, 0.0338, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:27:51,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:03,087 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:06,560 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8541, 2.1479, 1.7028, 2.4672, 1.2899, 1.5166, 1.7916, 2.2191], + device='cuda:2'), covar=tensor([0.0800, 0.0865, 0.1088, 0.0580, 0.1364, 0.1481, 0.1044, 0.0719], + device='cuda:2'), in_proj_covar=tensor([0.0249, 0.0229, 0.0265, 0.0215, 0.0232, 0.0267, 0.0268, 0.0237], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 05:28:12,227 INFO [train.py:901] (2/4) Epoch 8, batch 2700, loss[loss=0.3882, simple_loss=0.4201, pruned_loss=0.1782, over 8334.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3273, pruned_loss=0.09371, over 1616210.57 frames. ], batch size: 25, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:45,925 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 05:28:46,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1417, 1.7496, 2.6612, 2.1071, 2.3076, 1.9115, 1.4949, 1.0043], + device='cuda:2'), covar=tensor([0.2841, 0.3029, 0.0785, 0.1839, 0.1403, 0.1738, 0.1494, 0.3141], + device='cuda:2'), in_proj_covar=tensor([0.0842, 0.0796, 0.0686, 0.0790, 0.0887, 0.0735, 0.0671, 0.0721], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:28:46,772 INFO [train.py:901] (2/4) Epoch 8, batch 2750, loss[loss=0.2648, simple_loss=0.3467, pruned_loss=0.09147, over 8360.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3277, pruned_loss=0.09391, over 1620924.09 frames. ], batch size: 24, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:50,099 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.846e+02 3.367e+02 4.274e+02 9.837e+02, threshold=6.735e+02, percent-clipped=6.0 +2023-02-06 05:29:07,219 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4939, 1.5553, 1.6718, 1.4674, 0.9788, 1.7982, 0.1093, 1.0810], + device='cuda:2'), covar=tensor([0.2467, 0.1618, 0.0738, 0.1788, 0.4939, 0.0598, 0.3710, 0.1991], + device='cuda:2'), in_proj_covar=tensor([0.0145, 0.0143, 0.0082, 0.0195, 0.0228, 0.0088, 0.0150, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:29:22,656 INFO [train.py:901] (2/4) Epoch 8, batch 2800, loss[loss=0.2819, simple_loss=0.3322, pruned_loss=0.1158, over 7544.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3276, pruned_loss=0.09345, over 1622900.83 frames. ], batch size: 18, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:29:23,481 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:44,998 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:56,665 INFO [train.py:901] (2/4) Epoch 8, batch 2850, loss[loss=0.2546, simple_loss=0.3259, pruned_loss=0.09166, over 8518.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3267, pruned_loss=0.09285, over 1621002.40 frames. ], batch size: 28, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:00,148 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.577e+02 2.974e+02 3.773e+02 5.956e+02, threshold=5.948e+02, percent-clipped=0.0 +2023-02-06 05:30:01,806 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:32,530 INFO [train.py:901] (2/4) Epoch 8, batch 2900, loss[loss=0.2058, simple_loss=0.2749, pruned_loss=0.06835, over 7718.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3272, pruned_loss=0.09342, over 1617107.49 frames. ], batch size: 18, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:51,227 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:59,189 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:03,077 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:04,356 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 05:31:06,043 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-06 05:31:07,544 INFO [train.py:901] (2/4) Epoch 8, batch 2950, loss[loss=0.2574, simple_loss=0.3376, pruned_loss=0.08857, over 8500.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3265, pruned_loss=0.09271, over 1614368.82 frames. ], batch size: 26, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:08,350 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:10,787 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.847e+02 3.468e+02 5.057e+02 9.591e+02, threshold=6.936e+02, percent-clipped=13.0 +2023-02-06 05:31:42,181 INFO [train.py:901] (2/4) Epoch 8, batch 3000, loss[loss=0.2292, simple_loss=0.3206, pruned_loss=0.06894, over 8288.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3277, pruned_loss=0.09342, over 1617896.57 frames. ], batch size: 23, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:42,181 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 05:31:54,428 INFO [train.py:935] (2/4) Epoch 8, validation: loss=0.2021, simple_loss=0.3001, pruned_loss=0.05199, over 944034.00 frames. +2023-02-06 05:31:54,429 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 05:32:10,778 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 05:32:29,771 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4301, 1.8541, 3.1025, 1.1998, 2.2756, 1.8269, 1.5298, 1.9524], + device='cuda:2'), covar=tensor([0.1605, 0.1982, 0.0631, 0.3497, 0.1398, 0.2519, 0.1657, 0.2095], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0479, 0.0530, 0.0558, 0.0601, 0.0535, 0.0457, 0.0591], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:32:30,901 INFO [train.py:901] (2/4) Epoch 8, batch 3050, loss[loss=0.2836, simple_loss=0.3563, pruned_loss=0.1054, over 8468.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3274, pruned_loss=0.09311, over 1619221.50 frames. ], batch size: 25, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:32:34,251 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.623e+02 3.324e+02 4.059e+02 7.396e+02, threshold=6.648e+02, percent-clipped=1.0 +2023-02-06 05:32:35,802 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:37,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:52,397 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:33:05,113 INFO [train.py:901] (2/4) Epoch 8, batch 3100, loss[loss=0.2233, simple_loss=0.3035, pruned_loss=0.07152, over 8036.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3269, pruned_loss=0.0926, over 1618841.66 frames. ], batch size: 22, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:40,028 INFO [train.py:901] (2/4) Epoch 8, batch 3150, loss[loss=0.2335, simple_loss=0.2979, pruned_loss=0.08453, over 7270.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.3282, pruned_loss=0.09353, over 1620202.33 frames. ], batch size: 16, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:43,231 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.052e+02 2.894e+02 3.427e+02 4.526e+02 8.691e+02, threshold=6.853e+02, percent-clipped=4.0 +2023-02-06 05:34:14,628 INFO [train.py:901] (2/4) Epoch 8, batch 3200, loss[loss=0.2555, simple_loss=0.3182, pruned_loss=0.09639, over 8090.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3289, pruned_loss=0.09408, over 1621935.13 frames. ], batch size: 21, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:21,173 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.52 vs. limit=5.0 +2023-02-06 05:34:30,939 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 05:34:50,981 INFO [train.py:901] (2/4) Epoch 8, batch 3250, loss[loss=0.2488, simple_loss=0.3112, pruned_loss=0.09326, over 7430.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3272, pruned_loss=0.09288, over 1620064.84 frames. ], batch size: 17, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:54,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.545e+02 3.201e+02 4.295e+02 9.179e+02, threshold=6.402e+02, percent-clipped=6.0 +2023-02-06 05:34:54,760 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 05:35:13,094 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:14,530 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:25,218 INFO [train.py:901] (2/4) Epoch 8, batch 3300, loss[loss=0.2349, simple_loss=0.3161, pruned_loss=0.07683, over 8247.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3262, pruned_loss=0.0921, over 1619160.49 frames. ], batch size: 24, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:35:35,306 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:41,745 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:52,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:53,732 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1839, 1.9289, 1.7353, 1.9741, 1.2360, 1.6971, 2.3595, 2.2747], + device='cuda:2'), covar=tensor([0.0463, 0.1100, 0.1632, 0.1213, 0.0558, 0.1491, 0.0595, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0109, 0.0160, 0.0199, 0.0164, 0.0109, 0.0169, 0.0123, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:35:59,535 INFO [train.py:901] (2/4) Epoch 8, batch 3350, loss[loss=0.2765, simple_loss=0.3547, pruned_loss=0.09917, over 8467.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3264, pruned_loss=0.09245, over 1618356.35 frames. ], batch size: 25, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:02,906 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.795e+02 3.400e+02 4.166e+02 8.824e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 05:36:25,155 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3074, 1.5627, 1.5021, 1.2811, 1.0494, 1.3183, 1.6558, 1.6325], + device='cuda:2'), covar=tensor([0.0518, 0.1262, 0.1861, 0.1469, 0.0622, 0.1602, 0.0753, 0.0602], + device='cuda:2'), in_proj_covar=tensor([0.0110, 0.0162, 0.0200, 0.0165, 0.0111, 0.0170, 0.0124, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:36:31,821 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:36:33,704 INFO [train.py:901] (2/4) Epoch 8, batch 3400, loss[loss=0.2326, simple_loss=0.3138, pruned_loss=0.07571, over 8348.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3249, pruned_loss=0.0916, over 1614535.65 frames. ], batch size: 24, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:51,164 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 05:37:09,557 INFO [train.py:901] (2/4) Epoch 8, batch 3450, loss[loss=0.2442, simple_loss=0.3293, pruned_loss=0.07954, over 8362.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3256, pruned_loss=0.09197, over 1619381.96 frames. ], batch size: 24, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:12,878 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.668e+02 3.106e+02 3.891e+02 9.201e+02, threshold=6.211e+02, percent-clipped=2.0 +2023-02-06 05:37:27,911 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:36,406 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:44,252 INFO [train.py:901] (2/4) Epoch 8, batch 3500, loss[loss=0.2301, simple_loss=0.3111, pruned_loss=0.07456, over 8138.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3261, pruned_loss=0.09192, over 1616813.57 frames. ], batch size: 22, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:45,084 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:53,896 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60096.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:38:02,935 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 05:38:18,649 INFO [train.py:901] (2/4) Epoch 8, batch 3550, loss[loss=0.2864, simple_loss=0.3432, pruned_loss=0.1148, over 8507.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3264, pruned_loss=0.09235, over 1614192.05 frames. ], batch size: 26, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:38:22,095 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.844e+02 3.449e+02 4.512e+02 7.529e+02, threshold=6.898e+02, percent-clipped=5.0 +2023-02-06 05:38:43,017 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.79 vs. limit=2.0 +2023-02-06 05:38:54,373 INFO [train.py:901] (2/4) Epoch 8, batch 3600, loss[loss=0.2845, simple_loss=0.3438, pruned_loss=0.1126, over 8469.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3272, pruned_loss=0.09308, over 1613183.39 frames. ], batch size: 27, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:14,698 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:18,773 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:29,804 INFO [train.py:901] (2/4) Epoch 8, batch 3650, loss[loss=0.304, simple_loss=0.3526, pruned_loss=0.1277, over 7537.00 frames. ], tot_loss[loss=0.2582, simple_loss=0.3281, pruned_loss=0.09416, over 1607936.66 frames. ], batch size: 18, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:32,078 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:33,128 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.875e+02 2.702e+02 3.457e+02 4.155e+02 9.631e+02, threshold=6.915e+02, percent-clipped=4.0 +2023-02-06 05:39:42,751 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:48,810 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:03,446 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 05:40:04,774 INFO [train.py:901] (2/4) Epoch 8, batch 3700, loss[loss=0.2367, simple_loss=0.3142, pruned_loss=0.07963, over 8280.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3264, pruned_loss=0.09351, over 1605223.13 frames. ], batch size: 23, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:34,569 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:39,115 INFO [train.py:901] (2/4) Epoch 8, batch 3750, loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.0618, over 7666.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3259, pruned_loss=0.09308, over 1606568.21 frames. ], batch size: 19, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:43,054 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.685e+02 3.295e+02 3.882e+02 8.274e+02, threshold=6.589e+02, percent-clipped=2.0 +2023-02-06 05:41:02,726 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:13,776 INFO [train.py:901] (2/4) Epoch 8, batch 3800, loss[loss=0.1937, simple_loss=0.2918, pruned_loss=0.04778, over 8343.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3267, pruned_loss=0.09349, over 1609224.87 frames. ], batch size: 24, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:41:28,150 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:36,467 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:45,878 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:49,067 INFO [train.py:901] (2/4) Epoch 8, batch 3850, loss[loss=0.2669, simple_loss=0.3424, pruned_loss=0.09571, over 8466.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3267, pruned_loss=0.09317, over 1609086.16 frames. ], batch size: 27, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:41:52,292 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.691e+02 3.271e+02 4.212e+02 1.032e+03, threshold=6.541e+02, percent-clipped=5.0 +2023-02-06 05:41:54,203 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:08,490 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 05:42:22,375 INFO [train.py:901] (2/4) Epoch 8, batch 3900, loss[loss=0.2373, simple_loss=0.3116, pruned_loss=0.08148, over 8200.00 frames. ], tot_loss[loss=0.2576, simple_loss=0.327, pruned_loss=0.09406, over 1612480.54 frames. ], batch size: 23, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:42:46,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:55,308 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:57,130 INFO [train.py:901] (2/4) Epoch 8, batch 3950, loss[loss=0.223, simple_loss=0.306, pruned_loss=0.06996, over 8358.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3246, pruned_loss=0.09283, over 1608649.90 frames. ], batch size: 26, lr: 9.59e-03, grad_scale: 8.0 +2023-02-06 05:43:00,413 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.562e+02 3.362e+02 4.082e+02 8.516e+02, threshold=6.724e+02, percent-clipped=2.0 +2023-02-06 05:43:03,877 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:13,334 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:16,391 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,325 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,760 INFO [train.py:901] (2/4) Epoch 8, batch 4000, loss[loss=0.2465, simple_loss=0.3176, pruned_loss=0.08774, over 8471.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3262, pruned_loss=0.09423, over 1612200.62 frames. ], batch size: 25, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:43:46,944 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3582, 1.5932, 1.5623, 0.8799, 1.6930, 1.2190, 0.2913, 1.5020], + device='cuda:2'), covar=tensor([0.0259, 0.0157, 0.0147, 0.0241, 0.0175, 0.0529, 0.0423, 0.0142], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0276, 0.0226, 0.0335, 0.0267, 0.0427, 0.0329, 0.0304], + device='cuda:2'), out_proj_covar=tensor([1.0801e-04, 8.2682e-05, 6.7501e-05, 9.9941e-05, 8.1928e-05, 1.4039e-04, + 1.0076e-04, 9.2206e-05], device='cuda:2') +2023-02-06 05:43:48,273 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:59,775 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:06,268 INFO [train.py:901] (2/4) Epoch 8, batch 4050, loss[loss=0.2005, simple_loss=0.2843, pruned_loss=0.05837, over 7975.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3257, pruned_loss=0.09327, over 1611677.33 frames. ], batch size: 21, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:44:09,650 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.835e+02 3.722e+02 4.462e+02 8.493e+02, threshold=7.445e+02, percent-clipped=1.0 +2023-02-06 05:44:17,204 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:36,995 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:41,543 INFO [train.py:901] (2/4) Epoch 8, batch 4100, loss[loss=0.2423, simple_loss=0.3211, pruned_loss=0.08177, over 8481.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3256, pruned_loss=0.09342, over 1610949.20 frames. ], batch size: 29, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:44:51,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9947, 1.4644, 1.5664, 1.3418, 0.9405, 1.3716, 1.5110, 1.4968], + device='cuda:2'), covar=tensor([0.0539, 0.1178, 0.1673, 0.1335, 0.0580, 0.1443, 0.0713, 0.0598], + device='cuda:2'), in_proj_covar=tensor([0.0108, 0.0161, 0.0198, 0.0164, 0.0111, 0.0169, 0.0123, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:45:16,314 INFO [train.py:901] (2/4) Epoch 8, batch 4150, loss[loss=0.207, simple_loss=0.2856, pruned_loss=0.06424, over 7709.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3254, pruned_loss=0.09285, over 1614886.75 frames. ], batch size: 18, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:19,088 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:19,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.904e+02 3.574e+02 4.093e+02 8.234e+02, threshold=7.147e+02, percent-clipped=2.0 +2023-02-06 05:45:45,318 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:51,067 INFO [train.py:901] (2/4) Epoch 8, batch 4200, loss[loss=0.296, simple_loss=0.3487, pruned_loss=0.1216, over 6856.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3261, pruned_loss=0.09331, over 1611887.02 frames. ], batch size: 71, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:45:54,019 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,596 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,614 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:07,986 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 05:46:10,887 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:11,534 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60811.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:20,191 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:25,973 INFO [train.py:901] (2/4) Epoch 8, batch 4250, loss[loss=0.3123, simple_loss=0.3798, pruned_loss=0.1224, over 8506.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3256, pruned_loss=0.09317, over 1610403.32 frames. ], batch size: 49, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:46:28,902 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:30,099 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.807e+02 3.546e+02 4.515e+02 1.213e+03, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 05:46:30,813 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 05:46:56,667 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8880, 2.4390, 3.0316, 1.0907, 3.2754, 1.9971, 1.4828, 1.5947], + device='cuda:2'), covar=tensor([0.0459, 0.0231, 0.0123, 0.0423, 0.0199, 0.0433, 0.0543, 0.0305], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0276, 0.0225, 0.0337, 0.0264, 0.0424, 0.0330, 0.0304], + device='cuda:2'), out_proj_covar=tensor([1.0853e-04, 8.2391e-05, 6.6769e-05, 1.0081e-04, 8.0577e-05, 1.3922e-04, + 1.0109e-04, 9.1903e-05], device='cuda:2') +2023-02-06 05:47:01,076 INFO [train.py:901] (2/4) Epoch 8, batch 4300, loss[loss=0.2826, simple_loss=0.3534, pruned_loss=0.1059, over 8342.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3257, pruned_loss=0.09302, over 1617753.92 frames. ], batch size: 26, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:47:04,228 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 05:47:35,209 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:35,678 INFO [train.py:901] (2/4) Epoch 8, batch 4350, loss[loss=0.2598, simple_loss=0.3282, pruned_loss=0.09574, over 8464.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3265, pruned_loss=0.09364, over 1619339.59 frames. ], batch size: 29, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:47:39,641 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.751e+02 3.442e+02 4.335e+02 7.709e+02, threshold=6.884e+02, percent-clipped=1.0 +2023-02-06 05:47:40,103 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 05:47:51,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:59,289 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 05:48:10,331 INFO [train.py:901] (2/4) Epoch 8, batch 4400, loss[loss=0.241, simple_loss=0.3268, pruned_loss=0.07763, over 8353.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3248, pruned_loss=0.09253, over 1617916.38 frames. ], batch size: 26, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:48:34,694 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3643, 1.6676, 2.7579, 1.2053, 2.1005, 1.7348, 1.5689, 1.7182], + device='cuda:2'), covar=tensor([0.1941, 0.2286, 0.0756, 0.4135, 0.1494, 0.3071, 0.1904, 0.2307], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0486, 0.0522, 0.0559, 0.0598, 0.0541, 0.0455, 0.0593], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:48:40,683 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1442, 1.4173, 3.4679, 1.5167, 2.3146, 3.8877, 3.8829, 3.2929], + device='cuda:2'), covar=tensor([0.1003, 0.1552, 0.0340, 0.1974, 0.0859, 0.0229, 0.0394, 0.0605], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0283, 0.0245, 0.0277, 0.0253, 0.0224, 0.0290, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 05:48:42,527 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 05:48:45,123 INFO [train.py:901] (2/4) Epoch 8, batch 4450, loss[loss=0.3303, simple_loss=0.376, pruned_loss=0.1423, over 8501.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3247, pruned_loss=0.09248, over 1616728.04 frames. ], batch size: 26, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:48:49,119 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.403e+02 3.130e+02 3.948e+02 8.767e+02, threshold=6.260e+02, percent-clipped=3.0 +2023-02-06 05:49:17,374 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:17,968 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:19,295 INFO [train.py:901] (2/4) Epoch 8, batch 4500, loss[loss=0.2878, simple_loss=0.3345, pruned_loss=0.1206, over 8073.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3246, pruned_loss=0.092, over 1614067.38 frames. ], batch size: 21, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:36,537 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 05:49:53,490 INFO [train.py:901] (2/4) Epoch 8, batch 4550, loss[loss=0.264, simple_loss=0.3281, pruned_loss=0.09995, over 7930.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3255, pruned_loss=0.09303, over 1615854.36 frames. ], batch size: 20, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:58,149 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.768e+02 3.493e+02 4.645e+02 1.007e+03, threshold=6.986e+02, percent-clipped=6.0 +2023-02-06 05:50:26,108 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2716, 2.2377, 1.5520, 1.9229, 1.8139, 1.2620, 1.5957, 1.7663], + device='cuda:2'), covar=tensor([0.1193, 0.0353, 0.1073, 0.0467, 0.0580, 0.1302, 0.0840, 0.0809], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0235, 0.0313, 0.0297, 0.0305, 0.0317, 0.0340, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 05:50:29,339 INFO [train.py:901] (2/4) Epoch 8, batch 4600, loss[loss=0.2346, simple_loss=0.3089, pruned_loss=0.08012, over 7977.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3255, pruned_loss=0.09231, over 1617405.43 frames. ], batch size: 21, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:50:38,246 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:50:42,597 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 05:50:45,618 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6877, 2.1903, 4.8284, 2.6634, 4.3538, 4.2512, 4.5398, 4.4013], + device='cuda:2'), covar=tensor([0.0472, 0.3230, 0.0410, 0.2608, 0.0834, 0.0642, 0.0405, 0.0418], + device='cuda:2'), in_proj_covar=tensor([0.0417, 0.0532, 0.0514, 0.0484, 0.0543, 0.0461, 0.0461, 0.0510], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:2') +2023-02-06 05:51:04,224 INFO [train.py:901] (2/4) Epoch 8, batch 4650, loss[loss=0.2175, simple_loss=0.2983, pruned_loss=0.06834, over 8239.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.326, pruned_loss=0.09268, over 1617013.22 frames. ], batch size: 22, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:51:08,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 2.693e+02 3.115e+02 3.876e+02 8.832e+02, threshold=6.229e+02, percent-clipped=3.0 +2023-02-06 05:51:38,689 INFO [train.py:901] (2/4) Epoch 8, batch 4700, loss[loss=0.2706, simple_loss=0.3443, pruned_loss=0.09848, over 8667.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3267, pruned_loss=0.09268, over 1616101.42 frames. ], batch size: 39, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:52:13,892 INFO [train.py:901] (2/4) Epoch 8, batch 4750, loss[loss=0.252, simple_loss=0.3242, pruned_loss=0.08993, over 8029.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3267, pruned_loss=0.09246, over 1616409.94 frames. ], batch size: 22, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:52:14,755 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1015, 2.2412, 1.8255, 2.8616, 1.3215, 1.4281, 1.9128, 2.2650], + device='cuda:2'), covar=tensor([0.0841, 0.0857, 0.1192, 0.0433, 0.1271, 0.1906, 0.1158, 0.0926], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0231, 0.0270, 0.0217, 0.0229, 0.0265, 0.0269, 0.0236], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 05:52:17,850 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.870e+02 3.425e+02 4.672e+02 9.837e+02, threshold=6.850e+02, percent-clipped=8.0 +2023-02-06 05:52:36,774 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 05:52:39,420 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 05:52:48,204 INFO [train.py:901] (2/4) Epoch 8, batch 4800, loss[loss=0.2301, simple_loss=0.2996, pruned_loss=0.08032, over 7931.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.326, pruned_loss=0.09236, over 1614126.12 frames. ], batch size: 20, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:53:16,792 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:22,678 INFO [train.py:901] (2/4) Epoch 8, batch 4850, loss[loss=0.2814, simple_loss=0.3524, pruned_loss=0.1053, over 8461.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.327, pruned_loss=0.09273, over 1619032.66 frames. ], batch size: 25, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:53:26,652 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.780e+02 3.448e+02 4.323e+02 7.771e+02, threshold=6.895e+02, percent-clipped=1.0 +2023-02-06 05:53:28,674 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 05:53:36,187 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61451.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:53,262 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:56,852 INFO [train.py:901] (2/4) Epoch 8, batch 4900, loss[loss=0.2341, simple_loss=0.3023, pruned_loss=0.08297, over 7435.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3268, pruned_loss=0.09254, over 1620298.01 frames. ], batch size: 17, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:07,875 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 05:54:11,125 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4474, 1.5414, 2.3212, 1.1983, 1.5874, 1.6801, 1.3874, 1.4299], + device='cuda:2'), covar=tensor([0.1535, 0.1986, 0.0627, 0.3349, 0.1394, 0.2562, 0.1702, 0.1823], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0491, 0.0533, 0.0567, 0.0607, 0.0544, 0.0459, 0.0604], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:2') +2023-02-06 05:54:31,292 INFO [train.py:901] (2/4) Epoch 8, batch 4950, loss[loss=0.3152, simple_loss=0.3764, pruned_loss=0.127, over 7125.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3273, pruned_loss=0.09333, over 1619033.95 frames. ], batch size: 71, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:35,325 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.701e+02 3.325e+02 4.582e+02 7.633e+02, threshold=6.649e+02, percent-clipped=1.0 +2023-02-06 05:54:35,519 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61538.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:54:36,262 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0823, 1.2235, 1.2215, 0.4656, 1.2343, 1.0357, 0.1430, 1.1673], + device='cuda:2'), covar=tensor([0.0181, 0.0165, 0.0149, 0.0262, 0.0182, 0.0509, 0.0384, 0.0154], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0285, 0.0234, 0.0340, 0.0273, 0.0434, 0.0337, 0.0314], + device='cuda:2'), out_proj_covar=tensor([1.0832e-04, 8.5266e-05, 6.9743e-05, 1.0126e-04, 8.3255e-05, 1.4241e-04, + 1.0312e-04, 9.4617e-05], device='cuda:2') +2023-02-06 05:55:07,163 INFO [train.py:901] (2/4) Epoch 8, batch 5000, loss[loss=0.2206, simple_loss=0.31, pruned_loss=0.06561, over 8508.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3262, pruned_loss=0.09291, over 1619688.01 frames. ], batch size: 26, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:24,083 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5892, 1.9756, 2.1383, 1.0202, 2.2478, 1.4086, 0.6566, 1.7695], + device='cuda:2'), covar=tensor([0.0356, 0.0169, 0.0137, 0.0333, 0.0186, 0.0540, 0.0508, 0.0169], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0287, 0.0235, 0.0342, 0.0275, 0.0437, 0.0338, 0.0315], + device='cuda:2'), out_proj_covar=tensor([1.0891e-04, 8.5657e-05, 7.0072e-05, 1.0190e-04, 8.3875e-05, 1.4321e-04, + 1.0342e-04, 9.5080e-05], device='cuda:2') +2023-02-06 05:55:42,155 INFO [train.py:901] (2/4) Epoch 8, batch 5050, loss[loss=0.2798, simple_loss=0.3526, pruned_loss=0.1035, over 8107.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3262, pruned_loss=0.09278, over 1620368.03 frames. ], batch size: 23, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:46,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.771e+02 3.459e+02 4.924e+02 1.310e+03, threshold=6.919e+02, percent-clipped=9.0 +2023-02-06 05:56:07,700 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 05:56:17,393 INFO [train.py:901] (2/4) Epoch 8, batch 5100, loss[loss=0.2684, simple_loss=0.3256, pruned_loss=0.1056, over 8107.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3254, pruned_loss=0.09246, over 1617698.46 frames. ], batch size: 23, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:37,561 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8180, 2.0688, 2.1564, 1.9425, 1.5887, 2.1406, 2.5379, 2.1874], + device='cuda:2'), covar=tensor([0.0429, 0.0926, 0.1328, 0.1078, 0.0587, 0.1157, 0.0535, 0.0427], + device='cuda:2'), in_proj_covar=tensor([0.0108, 0.0161, 0.0200, 0.0164, 0.0112, 0.0170, 0.0123, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:56:52,939 INFO [train.py:901] (2/4) Epoch 8, batch 5150, loss[loss=0.2272, simple_loss=0.314, pruned_loss=0.07022, over 8138.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3266, pruned_loss=0.09346, over 1616730.68 frames. ], batch size: 22, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:57,119 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.584e+02 3.190e+02 4.018e+02 8.337e+02, threshold=6.381e+02, percent-clipped=2.0 +2023-02-06 05:57:06,176 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:11,023 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8409, 2.1212, 2.3898, 1.8680, 1.2168, 2.5459, 0.4488, 1.4832], + device='cuda:2'), covar=tensor([0.2992, 0.1848, 0.0704, 0.2268, 0.5934, 0.0538, 0.4573, 0.2336], + device='cuda:2'), in_proj_covar=tensor([0.0153, 0.0152, 0.0087, 0.0200, 0.0238, 0.0092, 0.0160, 0.0153], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 05:57:13,688 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:27,554 INFO [train.py:901] (2/4) Epoch 8, batch 5200, loss[loss=0.2787, simple_loss=0.3419, pruned_loss=0.1077, over 7976.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3251, pruned_loss=0.09288, over 1614254.13 frames. ], batch size: 21, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:57:35,725 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:53,737 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:58:00,556 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3722, 2.0851, 2.8828, 2.4741, 2.5655, 2.2012, 1.7458, 1.2619], + device='cuda:2'), covar=tensor([0.2871, 0.2978, 0.0740, 0.1793, 0.1548, 0.1627, 0.1582, 0.3096], + device='cuda:2'), in_proj_covar=tensor([0.0848, 0.0803, 0.0677, 0.0787, 0.0887, 0.0736, 0.0671, 0.0727], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 05:58:02,408 INFO [train.py:901] (2/4) Epoch 8, batch 5250, loss[loss=0.1887, simple_loss=0.2643, pruned_loss=0.05656, over 7416.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3253, pruned_loss=0.09312, over 1615212.43 frames. ], batch size: 17, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:58:06,467 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.711e+02 3.309e+02 4.013e+02 1.150e+03, threshold=6.618e+02, percent-clipped=3.0 +2023-02-06 05:58:07,805 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 05:58:12,702 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8393, 1.2420, 1.3611, 1.0917, 1.1651, 1.2220, 1.5332, 1.4533], + device='cuda:2'), covar=tensor([0.0597, 0.1751, 0.2567, 0.1879, 0.0654, 0.2152, 0.0842, 0.0717], + device='cuda:2'), in_proj_covar=tensor([0.0108, 0.0160, 0.0199, 0.0163, 0.0110, 0.0168, 0.0122, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 05:58:37,169 INFO [train.py:901] (2/4) Epoch 8, batch 5300, loss[loss=0.2522, simple_loss=0.3335, pruned_loss=0.08547, over 8198.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.325, pruned_loss=0.09237, over 1616134.63 frames. ], batch size: 23, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:08,401 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-06 05:59:12,097 INFO [train.py:901] (2/4) Epoch 8, batch 5350, loss[loss=0.2294, simple_loss=0.296, pruned_loss=0.08143, over 7265.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3243, pruned_loss=0.09217, over 1613605.92 frames. ], batch size: 16, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,256 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:59:16,964 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.785e+02 2.596e+02 3.249e+02 3.983e+02 1.109e+03, threshold=6.498e+02, percent-clipped=6.0 +2023-02-06 05:59:47,798 INFO [train.py:901] (2/4) Epoch 8, batch 5400, loss[loss=0.2394, simple_loss=0.3169, pruned_loss=0.08099, over 8422.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3239, pruned_loss=0.09165, over 1611932.75 frames. ], batch size: 39, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:23,965 INFO [train.py:901] (2/4) Epoch 8, batch 5450, loss[loss=0.2036, simple_loss=0.277, pruned_loss=0.06507, over 7426.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3236, pruned_loss=0.09158, over 1606204.47 frames. ], batch size: 17, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:28,689 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.625e+02 3.240e+02 4.068e+02 8.471e+02, threshold=6.479e+02, percent-clipped=5.0 +2023-02-06 06:00:48,385 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8726, 2.3690, 3.8764, 2.9135, 3.2274, 2.5309, 1.9703, 1.9657], + device='cuda:2'), covar=tensor([0.2775, 0.3472, 0.0839, 0.1969, 0.1563, 0.1559, 0.1334, 0.3460], + device='cuda:2'), in_proj_covar=tensor([0.0846, 0.0803, 0.0677, 0.0788, 0.0886, 0.0738, 0.0668, 0.0726], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 06:01:00,411 INFO [train.py:901] (2/4) Epoch 8, batch 5500, loss[loss=0.2197, simple_loss=0.2912, pruned_loss=0.07405, over 7927.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3238, pruned_loss=0.09137, over 1610057.33 frames. ], batch size: 20, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:01,783 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 06:01:08,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:16,546 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:34,828 INFO [train.py:901] (2/4) Epoch 8, batch 5550, loss[loss=0.2004, simple_loss=0.2694, pruned_loss=0.06573, over 7688.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3233, pruned_loss=0.09123, over 1609192.99 frames. ], batch size: 18, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:38,597 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.724e+02 3.277e+02 4.222e+02 9.983e+02, threshold=6.553e+02, percent-clipped=5.0 +2023-02-06 06:01:40,100 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:09,237 INFO [train.py:901] (2/4) Epoch 8, batch 5600, loss[loss=0.2628, simple_loss=0.3345, pruned_loss=0.09557, over 8335.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3238, pruned_loss=0.09177, over 1605829.72 frames. ], batch size: 25, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:02:27,948 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:35,378 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:44,108 INFO [train.py:901] (2/4) Epoch 8, batch 5650, loss[loss=0.3056, simple_loss=0.363, pruned_loss=0.1241, over 8516.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3259, pruned_loss=0.09257, over 1611615.34 frames. ], batch size: 26, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:02:48,206 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.893e+02 3.442e+02 4.058e+02 7.819e+02, threshold=6.884e+02, percent-clipped=2.0 +2023-02-06 06:03:03,294 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 06:03:14,926 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:19,751 INFO [train.py:901] (2/4) Epoch 8, batch 5700, loss[loss=0.305, simple_loss=0.3594, pruned_loss=0.1253, over 6983.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3259, pruned_loss=0.09289, over 1605432.21 frames. ], batch size: 72, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:03:19,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8036, 2.0240, 2.3439, 1.8217, 1.2169, 2.3350, 0.4250, 1.6254], + device='cuda:2'), covar=tensor([0.3052, 0.1711, 0.0475, 0.1719, 0.5099, 0.0439, 0.4287, 0.1745], + device='cuda:2'), in_proj_covar=tensor([0.0152, 0.0152, 0.0087, 0.0199, 0.0240, 0.0091, 0.0159, 0.0151], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 06:03:24,800 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9533, 2.2856, 3.8144, 2.8842, 3.3063, 2.5142, 1.9116, 1.7995], + device='cuda:2'), covar=tensor([0.2544, 0.3379, 0.0740, 0.2055, 0.1349, 0.1667, 0.1347, 0.3638], + device='cuda:2'), in_proj_covar=tensor([0.0851, 0.0806, 0.0679, 0.0792, 0.0892, 0.0742, 0.0677, 0.0730], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:03:26,015 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:53,771 INFO [train.py:901] (2/4) Epoch 8, batch 5750, loss[loss=0.2913, simple_loss=0.3577, pruned_loss=0.1124, over 8646.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3268, pruned_loss=0.0935, over 1607268.97 frames. ], batch size: 34, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:03:58,446 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.702e+02 3.342e+02 4.214e+02 1.406e+03, threshold=6.684e+02, percent-clipped=3.0 +2023-02-06 06:04:07,823 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 06:04:28,273 INFO [train.py:901] (2/4) Epoch 8, batch 5800, loss[loss=0.2124, simple_loss=0.2811, pruned_loss=0.07183, over 7791.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3268, pruned_loss=0.09357, over 1605505.74 frames. ], batch size: 19, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:04:35,219 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:44,671 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0403, 3.0738, 3.1077, 2.0774, 1.7171, 3.2829, 0.7549, 2.1053], + device='cuda:2'), covar=tensor([0.2010, 0.1134, 0.0459, 0.2969, 0.5139, 0.0381, 0.4688, 0.2199], + device='cuda:2'), in_proj_covar=tensor([0.0151, 0.0153, 0.0088, 0.0200, 0.0239, 0.0092, 0.0159, 0.0153], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 06:04:48,170 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:59,503 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1418, 1.5786, 1.6039, 1.4928, 1.1010, 1.3983, 1.7151, 1.8307], + device='cuda:2'), covar=tensor([0.0505, 0.1208, 0.1768, 0.1349, 0.0592, 0.1529, 0.0663, 0.0532], + device='cuda:2'), in_proj_covar=tensor([0.0110, 0.0161, 0.0200, 0.0165, 0.0112, 0.0169, 0.0123, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 06:05:04,104 INFO [train.py:901] (2/4) Epoch 8, batch 5850, loss[loss=0.2205, simple_loss=0.2864, pruned_loss=0.07724, over 7525.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3264, pruned_loss=0.09347, over 1607714.31 frames. ], batch size: 18, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:05:08,211 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.970e+02 2.690e+02 3.286e+02 4.000e+02 6.740e+02, threshold=6.571e+02, percent-clipped=1.0 +2023-02-06 06:05:27,198 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:34,614 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:38,456 INFO [train.py:901] (2/4) Epoch 8, batch 5900, loss[loss=0.2556, simple_loss=0.3238, pruned_loss=0.0937, over 7671.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3252, pruned_loss=0.093, over 1604048.21 frames. ], batch size: 19, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:05:39,870 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:44,020 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:51,358 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:54,035 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7975, 1.5080, 3.5140, 1.3574, 2.1985, 3.8031, 3.8625, 3.2846], + device='cuda:2'), covar=tensor([0.1101, 0.1446, 0.0293, 0.1885, 0.0962, 0.0250, 0.0338, 0.0601], + device='cuda:2'), in_proj_covar=tensor([0.0252, 0.0284, 0.0242, 0.0274, 0.0256, 0.0226, 0.0292, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 06:06:13,154 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 06:06:13,413 INFO [train.py:901] (2/4) Epoch 8, batch 5950, loss[loss=0.3148, simple_loss=0.3604, pruned_loss=0.1346, over 6895.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3254, pruned_loss=0.09284, over 1607888.87 frames. ], batch size: 71, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:16,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8836, 2.6804, 1.7854, 3.9779, 2.0921, 1.4834, 2.5777, 2.8047], + device='cuda:2'), covar=tensor([0.1895, 0.1575, 0.2652, 0.0339, 0.1553, 0.2683, 0.1399, 0.1166], + device='cuda:2'), in_proj_covar=tensor([0.0245, 0.0224, 0.0264, 0.0213, 0.0226, 0.0260, 0.0262, 0.0229], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:06:17,413 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 2.652e+02 3.220e+02 3.904e+02 8.315e+02, threshold=6.439e+02, percent-clipped=2.0 +2023-02-06 06:06:34,791 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:06:47,816 INFO [train.py:901] (2/4) Epoch 8, batch 6000, loss[loss=0.2255, simple_loss=0.286, pruned_loss=0.08254, over 7695.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3242, pruned_loss=0.09208, over 1603640.12 frames. ], batch size: 18, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:47,816 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 06:07:00,014 INFO [train.py:935] (2/4) Epoch 8, validation: loss=0.1996, simple_loss=0.2985, pruned_loss=0.05037, over 944034.00 frames. +2023-02-06 06:07:00,015 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 06:07:12,285 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62599.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:14,968 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:33,814 INFO [train.py:901] (2/4) Epoch 8, batch 6050, loss[loss=0.2319, simple_loss=0.2988, pruned_loss=0.08249, over 7205.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3239, pruned_loss=0.09201, over 1604975.39 frames. ], batch size: 16, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:07:35,910 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:37,858 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.575e+02 3.268e+02 4.071e+02 9.720e+02, threshold=6.536e+02, percent-clipped=3.0 +2023-02-06 06:07:44,182 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:56,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2438, 1.8339, 1.9000, 1.9496, 1.0971, 2.0130, 2.4587, 2.2830], + device='cuda:2'), covar=tensor([0.0461, 0.1113, 0.1643, 0.1261, 0.0627, 0.1267, 0.0561, 0.0501], + device='cuda:2'), in_proj_covar=tensor([0.0110, 0.0161, 0.0201, 0.0165, 0.0113, 0.0169, 0.0123, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 06:08:02,399 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:08,760 INFO [train.py:901] (2/4) Epoch 8, batch 6100, loss[loss=0.2081, simple_loss=0.2881, pruned_loss=0.06401, over 7801.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3233, pruned_loss=0.09147, over 1605251.32 frames. ], batch size: 20, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:08:10,157 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:37,242 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 06:08:43,088 INFO [train.py:901] (2/4) Epoch 8, batch 6150, loss[loss=0.2271, simple_loss=0.2974, pruned_loss=0.07842, over 7798.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3241, pruned_loss=0.09156, over 1611687.30 frames. ], batch size: 19, lr: 9.42e-03, grad_scale: 8.0 +2023-02-06 06:08:47,084 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.667e+02 3.544e+02 4.037e+02 8.376e+02, threshold=7.087e+02, percent-clipped=5.0 +2023-02-06 06:08:55,075 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:57,067 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:14,631 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.96 vs. limit=2.0 +2023-02-06 06:09:17,711 INFO [train.py:901] (2/4) Epoch 8, batch 6200, loss[loss=0.2118, simple_loss=0.2905, pruned_loss=0.06652, over 6873.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3242, pruned_loss=0.09227, over 1606237.24 frames. ], batch size: 15, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:23,759 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:52,742 INFO [train.py:901] (2/4) Epoch 8, batch 6250, loss[loss=0.234, simple_loss=0.3029, pruned_loss=0.0826, over 7788.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3236, pruned_loss=0.09193, over 1609631.07 frames. ], batch size: 19, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:56,740 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.905e+02 2.717e+02 3.222e+02 4.596e+02 9.217e+02, threshold=6.445e+02, percent-clipped=3.0 +2023-02-06 06:10:08,418 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:16,910 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:24,981 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:26,043 INFO [train.py:901] (2/4) Epoch 8, batch 6300, loss[loss=0.2295, simple_loss=0.3134, pruned_loss=0.07275, over 8358.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3237, pruned_loss=0.09204, over 1606366.74 frames. ], batch size: 24, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:10:28,481 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 06:10:44,098 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:01,294 INFO [train.py:901] (2/4) Epoch 8, batch 6350, loss[loss=0.301, simple_loss=0.3786, pruned_loss=0.1117, over 8585.00 frames. ], tot_loss[loss=0.257, simple_loss=0.3263, pruned_loss=0.09388, over 1602742.07 frames. ], batch size: 34, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:05,344 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.824e+02 3.504e+02 4.161e+02 7.437e+02, threshold=7.007e+02, percent-clipped=2.0 +2023-02-06 06:11:12,100 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:22,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0611, 2.1947, 1.8198, 2.5391, 1.4587, 1.6424, 1.9309, 2.3071], + device='cuda:2'), covar=tensor([0.0850, 0.0898, 0.1221, 0.0501, 0.1106, 0.1457, 0.0955, 0.0768], + device='cuda:2'), in_proj_covar=tensor([0.0251, 0.0227, 0.0268, 0.0217, 0.0230, 0.0263, 0.0266, 0.0234], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:11:35,244 INFO [train.py:901] (2/4) Epoch 8, batch 6400, loss[loss=0.2989, simple_loss=0.3742, pruned_loss=0.1118, over 8467.00 frames. ], tot_loss[loss=0.2562, simple_loss=0.3259, pruned_loss=0.09325, over 1604723.50 frames. ], batch size: 25, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:52,126 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:54,035 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5397, 4.4913, 4.0147, 1.9058, 4.0259, 3.9571, 4.1716, 3.6564], + device='cuda:2'), covar=tensor([0.0707, 0.0554, 0.1092, 0.4737, 0.0862, 0.0889, 0.1347, 0.0764], + device='cuda:2'), in_proj_covar=tensor([0.0425, 0.0337, 0.0353, 0.0446, 0.0352, 0.0334, 0.0344, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:12:03,489 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:07,444 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:09,594 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:10,068 INFO [train.py:901] (2/4) Epoch 8, batch 6450, loss[loss=0.3628, simple_loss=0.3943, pruned_loss=0.1657, over 6693.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.3263, pruned_loss=0.09355, over 1605975.47 frames. ], batch size: 71, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:12:14,130 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 2.983e+02 3.820e+02 5.218e+02 9.633e+02, threshold=7.640e+02, percent-clipped=4.0 +2023-02-06 06:12:31,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:45,219 INFO [train.py:901] (2/4) Epoch 8, batch 6500, loss[loss=0.2821, simple_loss=0.3604, pruned_loss=0.1019, over 8338.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3265, pruned_loss=0.09339, over 1606413.09 frames. ], batch size: 26, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:14,194 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:20,106 INFO [train.py:901] (2/4) Epoch 8, batch 6550, loss[loss=0.2737, simple_loss=0.3557, pruned_loss=0.09589, over 8244.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.327, pruned_loss=0.09328, over 1611661.52 frames. ], batch size: 24, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:22,292 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:24,224 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.645e+02 3.116e+02 3.905e+02 9.747e+02, threshold=6.232e+02, percent-clipped=3.0 +2023-02-06 06:13:24,627 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 06:13:27,749 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:31,967 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:49,398 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 06:13:54,675 INFO [train.py:901] (2/4) Epoch 8, batch 6600, loss[loss=0.2862, simple_loss=0.3623, pruned_loss=0.105, over 8637.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.327, pruned_loss=0.09377, over 1613334.55 frames. ], batch size: 34, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:07,363 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 06:14:29,747 INFO [train.py:901] (2/4) Epoch 8, batch 6650, loss[loss=0.2533, simple_loss=0.3248, pruned_loss=0.09087, over 8466.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3254, pruned_loss=0.09309, over 1607335.54 frames. ], batch size: 27, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:33,643 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 2.943e+02 3.528e+02 4.449e+02 1.178e+03, threshold=7.055e+02, percent-clipped=8.0 +2023-02-06 06:14:34,520 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:14:42,438 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:01,174 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:03,728 INFO [train.py:901] (2/4) Epoch 8, batch 6700, loss[loss=0.2275, simple_loss=0.3059, pruned_loss=0.07449, over 8484.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3237, pruned_loss=0.0918, over 1606909.84 frames. ], batch size: 25, lr: 9.38e-03, grad_scale: 16.0 +2023-02-06 06:15:07,926 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6837, 2.9977, 2.6705, 3.9571, 1.7016, 2.0297, 2.3750, 3.1706], + device='cuda:2'), covar=tensor([0.0753, 0.1001, 0.0905, 0.0258, 0.1362, 0.1530, 0.1248, 0.0847], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0225, 0.0269, 0.0217, 0.0229, 0.0262, 0.0266, 0.0232], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:15:19,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:23,927 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5897, 2.0454, 2.1381, 1.1783, 2.2339, 1.4590, 0.7031, 1.7808], + device='cuda:2'), covar=tensor([0.0287, 0.0162, 0.0134, 0.0284, 0.0187, 0.0478, 0.0399, 0.0133], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0284, 0.0232, 0.0338, 0.0271, 0.0434, 0.0333, 0.0311], + device='cuda:2'), out_proj_covar=tensor([1.0913e-04, 8.3731e-05, 6.8561e-05, 1.0041e-04, 8.1878e-05, 1.4112e-04, + 1.0095e-04, 9.3288e-05], device='cuda:2') +2023-02-06 06:15:29,350 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:38,594 INFO [train.py:901] (2/4) Epoch 8, batch 6750, loss[loss=0.2561, simple_loss=0.3102, pruned_loss=0.101, over 7529.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.323, pruned_loss=0.09125, over 1604523.65 frames. ], batch size: 18, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:15:43,298 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.603e+02 3.068e+02 3.707e+02 1.416e+03, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 06:15:46,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:13,021 INFO [train.py:901] (2/4) Epoch 8, batch 6800, loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05834, over 7648.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3236, pruned_loss=0.09129, over 1609040.78 frames. ], batch size: 19, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:16:19,355 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-06 06:16:20,988 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 06:16:24,489 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63399.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:40,082 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5922, 2.4658, 4.5965, 1.2450, 2.9040, 2.0485, 1.7543, 2.5695], + device='cuda:2'), covar=tensor([0.1741, 0.1961, 0.0730, 0.4081, 0.1556, 0.2860, 0.1810, 0.2529], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0487, 0.0531, 0.0566, 0.0603, 0.0535, 0.0459, 0.0598], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:16:42,731 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:47,930 INFO [train.py:901] (2/4) Epoch 8, batch 6850, loss[loss=0.3152, simple_loss=0.3642, pruned_loss=0.1331, over 6894.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3234, pruned_loss=0.09162, over 1604417.84 frames. ], batch size: 71, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:16:52,299 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:52,777 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.669e+02 3.418e+02 4.059e+02 7.847e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 06:16:53,655 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:11,111 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 06:17:22,528 INFO [train.py:901] (2/4) Epoch 8, batch 6900, loss[loss=0.2234, simple_loss=0.298, pruned_loss=0.07435, over 8240.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.323, pruned_loss=0.09174, over 1606309.25 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:17:31,380 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 06:17:39,557 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,288 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,757 INFO [train.py:901] (2/4) Epoch 8, batch 6950, loss[loss=0.2139, simple_loss=0.3035, pruned_loss=0.06214, over 8039.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3239, pruned_loss=0.09181, over 1605843.35 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:18:03,570 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.671e+02 3.369e+02 4.495e+02 9.890e+02, threshold=6.738e+02, percent-clipped=4.0 +2023-02-06 06:18:09,754 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1678, 1.2538, 4.3108, 1.7766, 2.2838, 4.7950, 4.9068, 3.9431], + device='cuda:2'), covar=tensor([0.1162, 0.1931, 0.0304, 0.1881, 0.1106, 0.0244, 0.0258, 0.0735], + device='cuda:2'), in_proj_covar=tensor([0.0247, 0.0278, 0.0239, 0.0265, 0.0250, 0.0221, 0.0289, 0.0278], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 06:18:18,577 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 06:18:32,008 INFO [train.py:901] (2/4) Epoch 8, batch 7000, loss[loss=0.3027, simple_loss=0.3664, pruned_loss=0.1195, over 8463.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3234, pruned_loss=0.0916, over 1607680.23 frames. ], batch size: 29, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:18:32,809 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:36,323 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:48,873 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:19:08,075 INFO [train.py:901] (2/4) Epoch 8, batch 7050, loss[loss=0.2082, simple_loss=0.2872, pruned_loss=0.06459, over 7647.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3244, pruned_loss=0.09168, over 1608388.54 frames. ], batch size: 19, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:19:12,575 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.720e+02 3.242e+02 3.930e+02 7.648e+02, threshold=6.484e+02, percent-clipped=3.0 +2023-02-06 06:19:42,436 INFO [train.py:901] (2/4) Epoch 8, batch 7100, loss[loss=0.2747, simple_loss=0.3475, pruned_loss=0.101, over 8253.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3245, pruned_loss=0.09116, over 1610041.29 frames. ], batch size: 24, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:19:53,533 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:17,191 INFO [train.py:901] (2/4) Epoch 8, batch 7150, loss[loss=0.2348, simple_loss=0.3168, pruned_loss=0.07634, over 8197.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3254, pruned_loss=0.09202, over 1613203.12 frames. ], batch size: 23, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:21,741 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.778e+02 3.549e+02 4.516e+02 1.097e+03, threshold=7.098e+02, percent-clipped=7.0 +2023-02-06 06:20:51,748 INFO [train.py:901] (2/4) Epoch 8, batch 7200, loss[loss=0.2283, simple_loss=0.309, pruned_loss=0.0738, over 8192.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3248, pruned_loss=0.09156, over 1613973.88 frames. ], batch size: 23, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:51,831 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:53,156 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:21,048 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 06:21:21,571 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5517, 1.9182, 3.1741, 1.3205, 2.2741, 1.9689, 1.6191, 1.9109], + device='cuda:2'), covar=tensor([0.1520, 0.2012, 0.0634, 0.3460, 0.1341, 0.2486, 0.1654, 0.2108], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0488, 0.0534, 0.0568, 0.0606, 0.0542, 0.0461, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:21:23,973 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:26,392 INFO [train.py:901] (2/4) Epoch 8, batch 7250, loss[loss=0.2367, simple_loss=0.3159, pruned_loss=0.07879, over 8755.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3244, pruned_loss=0.09181, over 1611382.32 frames. ], batch size: 30, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:21:30,981 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.694e+02 3.202e+02 4.148e+02 8.009e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 06:21:42,697 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.60 vs. limit=5.0 +2023-02-06 06:21:48,005 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:00,775 INFO [train.py:901] (2/4) Epoch 8, batch 7300, loss[loss=0.227, simple_loss=0.2985, pruned_loss=0.07777, over 8089.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3248, pruned_loss=0.09138, over 1615495.47 frames. ], batch size: 21, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:12,945 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:14,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:36,710 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:37,304 INFO [train.py:901] (2/4) Epoch 8, batch 7350, loss[loss=0.2179, simple_loss=0.3045, pruned_loss=0.06563, over 8079.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3237, pruned_loss=0.09032, over 1615192.18 frames. ], batch size: 21, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:38,881 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:42,238 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.551e+02 3.183e+02 3.767e+02 5.416e+02, threshold=6.365e+02, percent-clipped=0.0 +2023-02-06 06:22:48,933 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63948.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:53,925 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:05,956 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 06:23:10,849 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:12,547 INFO [train.py:901] (2/4) Epoch 8, batch 7400, loss[loss=0.2082, simple_loss=0.284, pruned_loss=0.0662, over 7925.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3242, pruned_loss=0.09048, over 1616637.52 frames. ], batch size: 20, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:22,471 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-06 06:23:24,819 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 06:23:38,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3613, 2.6077, 2.2805, 2.9854, 2.1444, 2.0496, 2.2936, 2.7541], + device='cuda:2'), covar=tensor([0.0678, 0.0701, 0.0888, 0.0410, 0.0886, 0.1090, 0.0793, 0.0604], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0223, 0.0270, 0.0214, 0.0226, 0.0262, 0.0265, 0.0232], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:23:48,438 INFO [train.py:901] (2/4) Epoch 8, batch 7450, loss[loss=0.2406, simple_loss=0.3231, pruned_loss=0.07907, over 8679.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3234, pruned_loss=0.09092, over 1610313.65 frames. ], batch size: 34, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:53,166 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.827e+02 3.358e+02 3.935e+02 9.777e+02, threshold=6.715e+02, percent-clipped=5.0 +2023-02-06 06:23:54,690 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:58,105 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:05,494 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 06:24:10,182 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:23,173 INFO [train.py:901] (2/4) Epoch 8, batch 7500, loss[loss=0.236, simple_loss=0.3037, pruned_loss=0.08418, over 7810.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3249, pruned_loss=0.09164, over 1615320.73 frames. ], batch size: 20, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:24:26,658 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 06:24:57,465 INFO [train.py:901] (2/4) Epoch 8, batch 7550, loss[loss=0.2466, simple_loss=0.3205, pruned_loss=0.08637, over 8458.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3262, pruned_loss=0.09253, over 1613542.32 frames. ], batch size: 29, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:02,136 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.846e+02 3.017e+02 3.905e+02 4.969e+02 7.546e+02, threshold=7.810e+02, percent-clipped=1.0 +2023-02-06 06:25:11,701 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:13,074 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:15,753 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9164, 1.5337, 2.1421, 1.8022, 1.9059, 1.7798, 1.4490, 0.5911], + device='cuda:2'), covar=tensor([0.3031, 0.3086, 0.0911, 0.1803, 0.1405, 0.1745, 0.1399, 0.2975], + device='cuda:2'), in_proj_covar=tensor([0.0847, 0.0808, 0.0689, 0.0803, 0.0893, 0.0746, 0.0678, 0.0723], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:25:24,180 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:28,950 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:30,999 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:32,166 INFO [train.py:901] (2/4) Epoch 8, batch 7600, loss[loss=0.2446, simple_loss=0.3188, pruned_loss=0.08522, over 8250.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.325, pruned_loss=0.09205, over 1610477.97 frames. ], batch size: 22, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:49,063 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:07,006 INFO [train.py:901] (2/4) Epoch 8, batch 7650, loss[loss=0.2221, simple_loss=0.3025, pruned_loss=0.0708, over 8507.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3251, pruned_loss=0.09232, over 1611357.10 frames. ], batch size: 26, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:07,157 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6541, 1.4860, 4.7517, 1.7361, 4.1846, 3.8902, 4.3124, 4.1764], + device='cuda:2'), covar=tensor([0.0406, 0.3990, 0.0503, 0.3249, 0.1131, 0.0794, 0.0472, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0426, 0.0546, 0.0536, 0.0494, 0.0561, 0.0477, 0.0471, 0.0529], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 06:26:11,834 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.778e+02 3.467e+02 5.154e+02 1.113e+03, threshold=6.933e+02, percent-clipped=3.0 +2023-02-06 06:26:19,381 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:35,467 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6877, 2.9633, 2.3460, 3.9154, 1.8385, 2.2429, 2.5195, 2.9882], + device='cuda:2'), covar=tensor([0.0716, 0.0917, 0.1072, 0.0246, 0.1328, 0.1516, 0.1295, 0.0894], + device='cuda:2'), in_proj_covar=tensor([0.0249, 0.0227, 0.0270, 0.0215, 0.0228, 0.0265, 0.0266, 0.0233], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:26:38,167 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:40,894 INFO [train.py:901] (2/4) Epoch 8, batch 7700, loss[loss=0.2373, simple_loss=0.3156, pruned_loss=0.0795, over 8427.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3254, pruned_loss=0.09212, over 1618625.15 frames. ], batch size: 49, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:45,248 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:56,487 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:08,129 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:09,995 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,465 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 06:27:10,608 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1647, 1.8046, 3.4432, 1.4346, 2.1780, 3.7668, 3.8302, 3.2008], + device='cuda:2'), covar=tensor([0.1016, 0.1387, 0.0348, 0.1937, 0.0967, 0.0258, 0.0431, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0287, 0.0244, 0.0274, 0.0257, 0.0227, 0.0298, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 06:27:13,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:16,534 INFO [train.py:901] (2/4) Epoch 8, batch 7750, loss[loss=0.1956, simple_loss=0.272, pruned_loss=0.05961, over 7434.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3245, pruned_loss=0.09137, over 1618720.92 frames. ], batch size: 17, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:27:21,027 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.516e+02 3.070e+02 3.996e+02 6.859e+02, threshold=6.139e+02, percent-clipped=0.0 +2023-02-06 06:27:25,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:38,598 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:43,247 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3427, 2.6901, 2.1556, 3.8021, 1.9151, 1.8810, 2.2938, 2.7707], + device='cuda:2'), covar=tensor([0.0862, 0.0957, 0.1133, 0.0313, 0.1217, 0.1522, 0.1222, 0.0875], + device='cuda:2'), in_proj_covar=tensor([0.0247, 0.0227, 0.0268, 0.0215, 0.0224, 0.0263, 0.0264, 0.0231], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:27:51,130 INFO [train.py:901] (2/4) Epoch 8, batch 7800, loss[loss=0.2571, simple_loss=0.3317, pruned_loss=0.09119, over 8477.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3253, pruned_loss=0.09134, over 1618714.09 frames. ], batch size: 27, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:27:53,240 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:58,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:28:25,437 INFO [train.py:901] (2/4) Epoch 8, batch 7850, loss[loss=0.3021, simple_loss=0.3703, pruned_loss=0.117, over 8242.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3252, pruned_loss=0.09157, over 1618654.77 frames. ], batch size: 24, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:30,088 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.075e+02 2.873e+02 3.519e+02 4.505e+02 1.254e+03, threshold=7.037e+02, percent-clipped=6.0 +2023-02-06 06:28:58,104 INFO [train.py:901] (2/4) Epoch 8, batch 7900, loss[loss=0.2499, simple_loss=0.3099, pruned_loss=0.09498, over 7435.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3256, pruned_loss=0.09183, over 1618914.53 frames. ], batch size: 17, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:58,915 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:10,198 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:32,361 INFO [train.py:901] (2/4) Epoch 8, batch 7950, loss[loss=0.2376, simple_loss=0.3054, pruned_loss=0.08493, over 7432.00 frames. ], tot_loss[loss=0.254, simple_loss=0.325, pruned_loss=0.0915, over 1617812.45 frames. ], batch size: 17, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:29:37,072 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.689e+02 3.383e+02 4.341e+02 8.251e+02, threshold=6.766e+02, percent-clipped=4.0 +2023-02-06 06:29:40,085 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:56,718 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:03,508 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:06,037 INFO [train.py:901] (2/4) Epoch 8, batch 8000, loss[loss=0.227, simple_loss=0.2932, pruned_loss=0.08041, over 8055.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3247, pruned_loss=0.09174, over 1616260.31 frames. ], batch size: 20, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:10,408 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2791, 1.5222, 2.2690, 1.0857, 1.7575, 1.4988, 1.4004, 1.5451], + device='cuda:2'), covar=tensor([0.1291, 0.1623, 0.0553, 0.2863, 0.1167, 0.2004, 0.1351, 0.1659], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0480, 0.0518, 0.0554, 0.0594, 0.0529, 0.0453, 0.0591], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:30:14,259 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:20,519 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:23,054 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:23,112 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6295, 2.9854, 2.0256, 2.3658, 2.3702, 1.5247, 2.3146, 2.3991], + device='cuda:2'), covar=tensor([0.1356, 0.0354, 0.0956, 0.0618, 0.0713, 0.1358, 0.0890, 0.0813], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0235, 0.0308, 0.0293, 0.0303, 0.0318, 0.0336, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 06:30:39,930 INFO [train.py:901] (2/4) Epoch 8, batch 8050, loss[loss=0.2185, simple_loss=0.2926, pruned_loss=0.07221, over 7942.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3235, pruned_loss=0.0914, over 1604067.36 frames. ], batch size: 20, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:44,638 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.434e+02 2.955e+02 3.616e+02 6.730e+02, threshold=5.909e+02, percent-clipped=0.0 +2023-02-06 06:30:51,579 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:13,129 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 06:31:17,613 INFO [train.py:901] (2/4) Epoch 9, batch 0, loss[loss=0.2864, simple_loss=0.3456, pruned_loss=0.1136, over 8246.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3456, pruned_loss=0.1136, over 8246.00 frames. ], batch size: 24, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:31:17,613 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 06:31:28,851 INFO [train.py:935] (2/4) Epoch 9, validation: loss=0.1983, simple_loss=0.2974, pruned_loss=0.04961, over 944034.00 frames. +2023-02-06 06:31:28,852 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 06:31:29,665 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,196 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,991 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-06 06:31:43,422 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 06:31:56,868 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:58,428 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:02,442 INFO [train.py:901] (2/4) Epoch 9, batch 50, loss[loss=0.2364, simple_loss=0.3133, pruned_loss=0.07972, over 8321.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3281, pruned_loss=0.09302, over 368190.11 frames. ], batch size: 25, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:32:06,625 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:07,942 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4177, 1.8923, 3.2332, 1.1299, 2.3346, 1.7131, 1.7069, 1.8614], + device='cuda:2'), covar=tensor([0.2025, 0.2260, 0.0848, 0.4242, 0.1737, 0.3124, 0.1877, 0.2746], + device='cuda:2'), in_proj_covar=tensor([0.0475, 0.0482, 0.0522, 0.0562, 0.0595, 0.0532, 0.0457, 0.0595], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:32:16,330 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 06:32:18,980 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.818e+02 3.347e+02 4.122e+02 1.189e+03, threshold=6.695e+02, percent-clipped=9.0 +2023-02-06 06:32:30,209 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4947, 1.9379, 2.0774, 1.2049, 2.1982, 1.3041, 0.6339, 1.7030], + device='cuda:2'), covar=tensor([0.0371, 0.0190, 0.0148, 0.0341, 0.0208, 0.0617, 0.0505, 0.0185], + device='cuda:2'), in_proj_covar=tensor([0.0372, 0.0288, 0.0239, 0.0353, 0.0281, 0.0447, 0.0340, 0.0324], + device='cuda:2'), out_proj_covar=tensor([1.1223e-04, 8.4071e-05, 7.0473e-05, 1.0408e-04, 8.4099e-05, 1.4525e-04, + 1.0258e-04, 9.6760e-05], device='cuda:2') +2023-02-06 06:32:31,576 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:36,073 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:37,300 INFO [train.py:901] (2/4) Epoch 9, batch 100, loss[loss=0.2468, simple_loss=0.3297, pruned_loss=0.08195, over 8452.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3236, pruned_loss=0.0899, over 646475.57 frames. ], batch size: 25, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:32:41,536 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:42,074 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 06:32:49,740 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:11,714 INFO [train.py:901] (2/4) Epoch 9, batch 150, loss[loss=0.262, simple_loss=0.3116, pruned_loss=0.1061, over 8227.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3231, pruned_loss=0.0892, over 865017.51 frames. ], batch size: 22, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:33:16,751 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:20,060 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:27,774 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.896e+02 2.577e+02 3.213e+02 3.848e+02 9.281e+02, threshold=6.425e+02, percent-clipped=3.0 +2023-02-06 06:33:45,632 INFO [train.py:901] (2/4) Epoch 9, batch 200, loss[loss=0.2694, simple_loss=0.3362, pruned_loss=0.1012, over 7810.00 frames. ], tot_loss[loss=0.25, simple_loss=0.322, pruned_loss=0.089, over 1026989.43 frames. ], batch size: 20, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:34:21,132 INFO [train.py:901] (2/4) Epoch 9, batch 250, loss[loss=0.2579, simple_loss=0.3146, pruned_loss=0.1006, over 7922.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3204, pruned_loss=0.08764, over 1155514.51 frames. ], batch size: 20, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:34,285 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 06:34:36,819 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 2.841e+02 3.295e+02 4.179e+02 1.029e+03, threshold=6.590e+02, percent-clipped=5.0 +2023-02-06 06:34:39,018 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:42,840 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 06:34:44,923 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:54,260 INFO [train.py:901] (2/4) Epoch 9, batch 300, loss[loss=0.2777, simple_loss=0.3464, pruned_loss=0.1045, over 8547.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3209, pruned_loss=0.0881, over 1257433.34 frames. ], batch size: 49, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:54,475 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:11,901 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:15,056 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:26,440 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:30,328 INFO [train.py:901] (2/4) Epoch 9, batch 350, loss[loss=0.2741, simple_loss=0.348, pruned_loss=0.1001, over 8033.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3197, pruned_loss=0.08826, over 1333077.91 frames. ], batch size: 22, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:35:31,387 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 06:35:46,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.892e+02 2.570e+02 3.183e+02 3.796e+02 1.000e+03, threshold=6.367e+02, percent-clipped=4.0 +2023-02-06 06:36:03,871 INFO [train.py:901] (2/4) Epoch 9, batch 400, loss[loss=0.1848, simple_loss=0.2607, pruned_loss=0.05449, over 7424.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3205, pruned_loss=0.08869, over 1395368.89 frames. ], batch size: 17, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:03,965 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65065.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:04,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:09,406 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:12,870 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:30,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:33,062 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:37,775 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:38,389 INFO [train.py:901] (2/4) Epoch 9, batch 450, loss[loss=0.2343, simple_loss=0.3171, pruned_loss=0.07577, over 8508.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3218, pruned_loss=0.08902, over 1448163.03 frames. ], batch size: 26, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:46,195 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:56,307 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.705e+02 3.323e+02 3.920e+02 9.407e+02, threshold=6.647e+02, percent-clipped=6.0 +2023-02-06 06:37:13,469 INFO [train.py:901] (2/4) Epoch 9, batch 500, loss[loss=0.253, simple_loss=0.3265, pruned_loss=0.08976, over 8590.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3229, pruned_loss=0.08986, over 1486817.35 frames. ], batch size: 31, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:37:16,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1731, 2.0836, 1.1600, 2.9116, 1.3383, 1.0831, 2.0821, 2.1544], + device='cuda:2'), covar=tensor([0.2015, 0.1506, 0.2579, 0.0536, 0.1640, 0.2669, 0.1438, 0.1182], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0225, 0.0269, 0.0217, 0.0225, 0.0260, 0.0268, 0.0230], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:37:23,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:35,047 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:45,912 INFO [train.py:901] (2/4) Epoch 9, batch 550, loss[loss=0.2586, simple_loss=0.3336, pruned_loss=0.09179, over 8482.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3232, pruned_loss=0.08983, over 1516932.15 frames. ], batch size: 28, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:37:51,952 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:52,666 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:56,718 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:03,161 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.829e+02 3.496e+02 4.355e+02 8.306e+02, threshold=6.991e+02, percent-clipped=2.0 +2023-02-06 06:38:21,261 INFO [train.py:901] (2/4) Epoch 9, batch 600, loss[loss=0.2802, simple_loss=0.3528, pruned_loss=0.1038, over 8545.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3231, pruned_loss=0.08969, over 1543413.61 frames. ], batch size: 31, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:38:38,362 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 06:38:46,522 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:52,418 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3713, 4.3304, 3.8909, 1.9226, 3.8654, 3.9461, 4.1259, 3.5729], + device='cuda:2'), covar=tensor([0.0693, 0.0532, 0.0907, 0.4089, 0.0768, 0.0793, 0.0939, 0.0883], + device='cuda:2'), in_proj_covar=tensor([0.0435, 0.0348, 0.0364, 0.0454, 0.0357, 0.0338, 0.0352, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:38:54,302 INFO [train.py:901] (2/4) Epoch 9, batch 650, loss[loss=0.2736, simple_loss=0.3493, pruned_loss=0.09892, over 8504.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3235, pruned_loss=0.08994, over 1558642.58 frames. ], batch size: 26, lr: 8.75e-03, grad_scale: 16.0 +2023-02-06 06:38:59,116 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,303 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,881 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.656e+02 3.252e+02 4.080e+02 6.220e+02, threshold=6.503e+02, percent-clipped=0.0 +2023-02-06 06:39:12,455 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3842, 1.7999, 3.1274, 1.1279, 2.1999, 1.7876, 1.5501, 1.9503], + device='cuda:2'), covar=tensor([0.1685, 0.2083, 0.0614, 0.3727, 0.1430, 0.2629, 0.1682, 0.2074], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0490, 0.0525, 0.0565, 0.0600, 0.0537, 0.0461, 0.0600], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:39:17,065 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:19,635 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2119, 3.1107, 2.8271, 1.4182, 2.8405, 2.8521, 2.8302, 2.6386], + device='cuda:2'), covar=tensor([0.1228, 0.0930, 0.1371, 0.4630, 0.1139, 0.1285, 0.1558, 0.1174], + device='cuda:2'), in_proj_covar=tensor([0.0433, 0.0346, 0.0362, 0.0453, 0.0356, 0.0337, 0.0350, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:39:23,679 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8310, 5.8742, 5.0180, 2.3651, 5.1505, 5.4641, 5.3693, 5.0368], + device='cuda:2'), covar=tensor([0.0481, 0.0363, 0.0741, 0.4013, 0.0611, 0.0567, 0.0867, 0.0718], + device='cuda:2'), in_proj_covar=tensor([0.0432, 0.0345, 0.0361, 0.0451, 0.0355, 0.0336, 0.0349, 0.0300], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:39:29,082 INFO [train.py:901] (2/4) Epoch 9, batch 700, loss[loss=0.2118, simple_loss=0.277, pruned_loss=0.07332, over 7435.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3217, pruned_loss=0.08954, over 1570570.90 frames. ], batch size: 17, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:39:41,001 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65381.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:57,981 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:03,865 INFO [train.py:901] (2/4) Epoch 9, batch 750, loss[loss=0.2101, simple_loss=0.2959, pruned_loss=0.06215, over 8298.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.321, pruned_loss=0.0891, over 1575973.21 frames. ], batch size: 23, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:05,346 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:18,167 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:19,958 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.803e+02 3.527e+02 4.474e+02 1.505e+03, threshold=7.053e+02, percent-clipped=7.0 +2023-02-06 06:40:21,301 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 06:40:30,030 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 06:40:30,193 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:30,516 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 06:40:36,165 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65461.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:38,589 INFO [train.py:901] (2/4) Epoch 9, batch 800, loss[loss=0.2338, simple_loss=0.2981, pruned_loss=0.08478, over 7243.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3205, pruned_loss=0.0892, over 1578895.65 frames. ], batch size: 16, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:47,458 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:53,606 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:58,222 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0564, 2.4151, 1.7988, 2.9540, 1.3288, 1.4251, 1.9866, 2.4502], + device='cuda:2'), covar=tensor([0.0885, 0.0946, 0.1132, 0.0436, 0.1433, 0.1894, 0.1248, 0.0802], + device='cuda:2'), in_proj_covar=tensor([0.0247, 0.0226, 0.0270, 0.0219, 0.0226, 0.0263, 0.0266, 0.0228], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:41:05,677 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:09,189 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3557, 1.8951, 2.8855, 2.2326, 2.3730, 2.1804, 1.6497, 1.1426], + device='cuda:2'), covar=tensor([0.3073, 0.3073, 0.0781, 0.1945, 0.1622, 0.1655, 0.1442, 0.3278], + device='cuda:2'), in_proj_covar=tensor([0.0855, 0.0810, 0.0700, 0.0803, 0.0895, 0.0751, 0.0685, 0.0729], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 06:41:10,502 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:13,593 INFO [train.py:901] (2/4) Epoch 9, batch 850, loss[loss=0.2899, simple_loss=0.3504, pruned_loss=0.1147, over 8318.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3215, pruned_loss=0.08918, over 1590030.98 frames. ], batch size: 25, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:17,141 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0230, 1.2967, 1.2050, 0.4624, 1.2691, 1.0913, 0.0963, 1.1817], + device='cuda:2'), covar=tensor([0.0165, 0.0129, 0.0125, 0.0235, 0.0159, 0.0371, 0.0345, 0.0120], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0290, 0.0241, 0.0350, 0.0282, 0.0447, 0.0336, 0.0319], + device='cuda:2'), out_proj_covar=tensor([1.0988e-04, 8.4788e-05, 7.1140e-05, 1.0286e-04, 8.4558e-05, 1.4460e-04, + 1.0110e-04, 9.5098e-05], device='cuda:2') +2023-02-06 06:41:25,164 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:29,557 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.736e+02 3.271e+02 4.209e+02 1.110e+03, threshold=6.542e+02, percent-clipped=5.0 +2023-02-06 06:41:37,089 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65550.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:41:47,587 INFO [train.py:901] (2/4) Epoch 9, batch 900, loss[loss=0.1947, simple_loss=0.2808, pruned_loss=0.05423, over 8457.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3226, pruned_loss=0.09015, over 1594618.08 frames. ], batch size: 27, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:12,162 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3553, 2.5188, 1.7072, 2.0124, 2.0481, 1.3276, 1.7521, 1.7531], + device='cuda:2'), covar=tensor([0.1376, 0.0353, 0.0952, 0.0558, 0.0587, 0.1359, 0.0896, 0.0915], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0237, 0.0312, 0.0300, 0.0304, 0.0320, 0.0341, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 06:42:23,181 INFO [train.py:901] (2/4) Epoch 9, batch 950, loss[loss=0.2024, simple_loss=0.2812, pruned_loss=0.06173, over 7538.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.323, pruned_loss=0.09016, over 1600373.44 frames. ], batch size: 18, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:39,187 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.498e+02 3.047e+02 4.041e+02 6.463e+02, threshold=6.094e+02, percent-clipped=0.0 +2023-02-06 06:42:44,699 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:42:50,379 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 06:42:56,309 INFO [train.py:901] (2/4) Epoch 9, batch 1000, loss[loss=0.2121, simple_loss=0.2982, pruned_loss=0.06303, over 8243.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3242, pruned_loss=0.09072, over 1607976.85 frames. ], batch size: 22, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:20,517 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9538, 4.1683, 2.2899, 2.7265, 2.8202, 1.8609, 2.7342, 2.8736], + device='cuda:2'), covar=tensor([0.1330, 0.0173, 0.0839, 0.0686, 0.0616, 0.1230, 0.0928, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0235, 0.0314, 0.0301, 0.0307, 0.0321, 0.0342, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 06:43:23,105 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 06:43:27,432 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:31,857 INFO [train.py:901] (2/4) Epoch 9, batch 1050, loss[loss=0.2621, simple_loss=0.3431, pruned_loss=0.09058, over 8605.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.324, pruned_loss=0.09091, over 1604331.94 frames. ], batch size: 34, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:35,713 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 06:43:44,970 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:47,995 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 2.877e+02 3.398e+02 4.338e+02 8.070e+02, threshold=6.796e+02, percent-clipped=6.0 +2023-02-06 06:43:57,422 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3558, 1.8814, 1.3291, 2.7754, 1.2263, 1.0940, 1.9238, 2.1081], + device='cuda:2'), covar=tensor([0.1779, 0.1369, 0.2299, 0.0507, 0.1602, 0.2462, 0.1173, 0.1046], + device='cuda:2'), in_proj_covar=tensor([0.0245, 0.0223, 0.0268, 0.0217, 0.0225, 0.0262, 0.0265, 0.0229], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:44:03,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:05,257 INFO [train.py:901] (2/4) Epoch 9, batch 1100, loss[loss=0.2348, simple_loss=0.307, pruned_loss=0.08132, over 8138.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3252, pruned_loss=0.09189, over 1609061.68 frames. ], batch size: 22, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:44:20,622 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:38,692 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:39,893 INFO [train.py:901] (2/4) Epoch 9, batch 1150, loss[loss=0.2431, simple_loss=0.3246, pruned_loss=0.08076, over 8522.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3235, pruned_loss=0.09082, over 1607043.49 frames. ], batch size: 28, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:44:44,631 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 06:44:56,767 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.552e+02 3.121e+02 3.966e+02 8.304e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-06 06:45:09,006 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65856.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:14,589 INFO [train.py:901] (2/4) Epoch 9, batch 1200, loss[loss=0.2942, simple_loss=0.3383, pruned_loss=0.125, over 7921.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3225, pruned_loss=0.08996, over 1605548.22 frames. ], batch size: 20, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:45:33,741 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65894.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:47,641 INFO [train.py:901] (2/4) Epoch 9, batch 1250, loss[loss=0.2642, simple_loss=0.3322, pruned_loss=0.09811, over 7933.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.324, pruned_loss=0.0907, over 1608594.53 frames. ], batch size: 20, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:46:05,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.844e+02 3.477e+02 4.312e+02 8.167e+02, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 06:46:20,605 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8699, 1.8196, 2.5815, 1.3990, 2.1952, 2.8782, 2.7971, 2.5588], + device='cuda:2'), covar=tensor([0.0925, 0.1133, 0.0707, 0.1748, 0.1096, 0.0302, 0.0673, 0.0570], + device='cuda:2'), in_proj_covar=tensor([0.0253, 0.0284, 0.0244, 0.0275, 0.0259, 0.0227, 0.0301, 0.0289], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 06:46:23,824 INFO [train.py:901] (2/4) Epoch 9, batch 1300, loss[loss=0.2312, simple_loss=0.2956, pruned_loss=0.08339, over 7700.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3245, pruned_loss=0.09086, over 1611873.78 frames. ], batch size: 18, lr: 8.70e-03, grad_scale: 16.0 +2023-02-06 06:46:44,911 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 06:46:55,296 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66009.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:46:59,216 INFO [train.py:901] (2/4) Epoch 9, batch 1350, loss[loss=0.2756, simple_loss=0.3415, pruned_loss=0.1049, over 8549.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3226, pruned_loss=0.08943, over 1614014.68 frames. ], batch size: 31, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:47:01,386 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:03,664 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 06:47:17,555 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 2.557e+02 3.336e+02 4.233e+02 1.201e+03, threshold=6.672e+02, percent-clipped=8.0 +2023-02-06 06:47:19,777 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:35,101 INFO [train.py:901] (2/4) Epoch 9, batch 1400, loss[loss=0.283, simple_loss=0.3445, pruned_loss=0.1108, over 8635.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3224, pruned_loss=0.08962, over 1612468.63 frames. ], batch size: 34, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:48:09,442 INFO [train.py:901] (2/4) Epoch 9, batch 1450, loss[loss=0.2882, simple_loss=0.3601, pruned_loss=0.1082, over 8644.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3226, pruned_loss=0.09016, over 1611215.23 frames. ], batch size: 34, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:12,191 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 06:48:26,154 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.633e+02 3.463e+02 4.686e+02 9.003e+02, threshold=6.925e+02, percent-clipped=5.0 +2023-02-06 06:48:42,245 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:48:44,184 INFO [train.py:901] (2/4) Epoch 9, batch 1500, loss[loss=0.1987, simple_loss=0.2929, pruned_loss=0.05227, over 8459.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3211, pruned_loss=0.08902, over 1609533.31 frames. ], batch size: 25, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:52,883 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66178.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:08,955 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66200.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:12,623 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 06:49:18,740 INFO [train.py:901] (2/4) Epoch 9, batch 1550, loss[loss=0.2578, simple_loss=0.3461, pruned_loss=0.08481, over 8239.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3212, pruned_loss=0.08933, over 1608482.25 frames. ], batch size: 24, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:49:28,444 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9307, 1.4202, 1.4615, 1.1417, 1.0545, 1.3154, 1.6520, 1.5222], + device='cuda:2'), covar=tensor([0.0577, 0.1154, 0.1729, 0.1455, 0.0583, 0.1530, 0.0671, 0.0582], + device='cuda:2'), in_proj_covar=tensor([0.0107, 0.0158, 0.0198, 0.0163, 0.0109, 0.0167, 0.0122, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 06:49:35,629 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.559e+02 2.942e+02 3.565e+02 7.942e+02, threshold=5.885e+02, percent-clipped=2.0 +2023-02-06 06:49:36,160 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.50 vs. limit=5.0 +2023-02-06 06:49:53,203 INFO [train.py:901] (2/4) Epoch 9, batch 1600, loss[loss=0.1985, simple_loss=0.2917, pruned_loss=0.05266, over 8085.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3205, pruned_loss=0.08781, over 1614581.39 frames. ], batch size: 21, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:49:53,444 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66265.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:02,300 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:50:11,771 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66290.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:29,749 INFO [train.py:901] (2/4) Epoch 9, batch 1650, loss[loss=0.2761, simple_loss=0.343, pruned_loss=0.1046, over 8135.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3205, pruned_loss=0.08775, over 1612715.36 frames. ], batch size: 22, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:50:29,939 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66315.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:46,552 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.536e+02 3.360e+02 4.258e+02 7.701e+02, threshold=6.719e+02, percent-clipped=5.0 +2023-02-06 06:51:03,464 INFO [train.py:901] (2/4) Epoch 9, batch 1700, loss[loss=0.2693, simple_loss=0.3428, pruned_loss=0.09797, over 8296.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3208, pruned_loss=0.08829, over 1614815.84 frames. ], batch size: 23, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:51:39,896 INFO [train.py:901] (2/4) Epoch 9, batch 1750, loss[loss=0.2529, simple_loss=0.3345, pruned_loss=0.0856, over 8089.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3208, pruned_loss=0.08851, over 1617781.27 frames. ], batch size: 21, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:51:57,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.942e+02 3.542e+02 4.261e+02 7.419e+02, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 06:52:13,915 INFO [train.py:901] (2/4) Epoch 9, batch 1800, loss[loss=0.2309, simple_loss=0.3092, pruned_loss=0.07626, over 8284.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3223, pruned_loss=0.08978, over 1615426.93 frames. ], batch size: 23, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:42,186 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:52:49,000 INFO [train.py:901] (2/4) Epoch 9, batch 1850, loss[loss=0.2475, simple_loss=0.3242, pruned_loss=0.0854, over 6806.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3227, pruned_loss=0.09002, over 1617540.66 frames. ], batch size: 15, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:53,697 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66522.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:05,499 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66539.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:05,997 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.848e+02 3.228e+02 4.154e+02 1.120e+03, threshold=6.457e+02, percent-clipped=1.0 +2023-02-06 06:53:22,983 INFO [train.py:901] (2/4) Epoch 9, batch 1900, loss[loss=0.2519, simple_loss=0.3147, pruned_loss=0.09453, over 7658.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3227, pruned_loss=0.09061, over 1614695.23 frames. ], batch size: 19, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:27,130 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66571.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:43,851 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66596.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:47,185 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 06:53:56,566 INFO [train.py:901] (2/4) Epoch 9, batch 1950, loss[loss=0.2491, simple_loss=0.3223, pruned_loss=0.08795, over 8488.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3236, pruned_loss=0.09102, over 1614583.72 frames. ], batch size: 28, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:58,610 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 06:54:00,034 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:00,834 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:03,721 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.04 vs. limit=5.0 +2023-02-06 06:54:12,896 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66637.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:54:14,189 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8895, 1.9861, 1.7565, 2.5841, 1.1518, 1.3453, 1.8621, 2.0643], + device='cuda:2'), covar=tensor([0.0809, 0.0945, 0.1126, 0.0392, 0.1248, 0.1592, 0.0905, 0.0806], + device='cuda:2'), in_proj_covar=tensor([0.0245, 0.0224, 0.0266, 0.0217, 0.0222, 0.0260, 0.0262, 0.0226], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 06:54:14,627 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.852e+02 3.410e+02 4.369e+02 9.021e+02, threshold=6.820e+02, percent-clipped=7.0 +2023-02-06 06:54:20,063 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 06:54:32,103 INFO [train.py:901] (2/4) Epoch 9, batch 2000, loss[loss=0.2635, simple_loss=0.3273, pruned_loss=0.09988, over 8132.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3223, pruned_loss=0.09014, over 1613853.97 frames. ], batch size: 22, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:55:06,982 INFO [train.py:901] (2/4) Epoch 9, batch 2050, loss[loss=0.2578, simple_loss=0.325, pruned_loss=0.09528, over 8130.00 frames. ], tot_loss[loss=0.2506, simple_loss=0.3217, pruned_loss=0.08978, over 1611928.63 frames. ], batch size: 22, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:55:20,386 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:55:23,653 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.024e+02 2.763e+02 3.349e+02 4.333e+02 1.017e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 06:55:31,186 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66749.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:55:33,260 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5328, 2.0009, 2.2196, 1.1555, 2.2423, 1.4357, 0.6982, 1.7910], + device='cuda:2'), covar=tensor([0.0424, 0.0227, 0.0149, 0.0397, 0.0269, 0.0659, 0.0551, 0.0204], + device='cuda:2'), in_proj_covar=tensor([0.0370, 0.0298, 0.0244, 0.0360, 0.0288, 0.0455, 0.0341, 0.0328], + device='cuda:2'), out_proj_covar=tensor([1.1046e-04, 8.7178e-05, 7.1478e-05, 1.0569e-04, 8.5958e-05, 1.4669e-04, + 1.0234e-04, 9.7499e-05], device='cuda:2') +2023-02-06 06:55:42,431 INFO [train.py:901] (2/4) Epoch 9, batch 2100, loss[loss=0.248, simple_loss=0.3294, pruned_loss=0.08325, over 8236.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3221, pruned_loss=0.09047, over 1610407.45 frames. ], batch size: 22, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:17,428 INFO [train.py:901] (2/4) Epoch 9, batch 2150, loss[loss=0.2705, simple_loss=0.3499, pruned_loss=0.09556, over 8204.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3222, pruned_loss=0.0903, over 1610467.57 frames. ], batch size: 23, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:18,961 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1215, 1.7537, 3.4060, 1.4730, 2.4206, 3.8919, 3.8535, 3.3397], + device='cuda:2'), covar=tensor([0.0983, 0.1409, 0.0372, 0.1991, 0.0890, 0.0220, 0.0439, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0252, 0.0284, 0.0244, 0.0276, 0.0259, 0.0227, 0.0302, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 06:56:34,545 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.810e+02 3.362e+02 4.511e+02 1.000e+03, threshold=6.724e+02, percent-clipped=7.0 +2023-02-06 06:56:53,076 INFO [train.py:901] (2/4) Epoch 9, batch 2200, loss[loss=0.2534, simple_loss=0.3169, pruned_loss=0.0949, over 7935.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3211, pruned_loss=0.08978, over 1612847.88 frames. ], batch size: 20, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:56:54,808 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-06 06:57:01,062 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:05,630 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66883.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:12,267 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66893.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:18,130 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:27,006 INFO [train.py:901] (2/4) Epoch 9, batch 2250, loss[loss=0.248, simple_loss=0.3218, pruned_loss=0.08713, over 8551.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3212, pruned_loss=0.08979, over 1614418.18 frames. ], batch size: 31, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:29,219 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66918.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:43,695 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.716e+02 3.375e+02 4.203e+02 7.579e+02, threshold=6.750e+02, percent-clipped=1.0 +2023-02-06 06:58:00,327 INFO [train.py:901] (2/4) Epoch 9, batch 2300, loss[loss=0.2147, simple_loss=0.3033, pruned_loss=0.06312, over 8249.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3216, pruned_loss=0.08956, over 1618902.59 frames. ], batch size: 24, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:58:07,510 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:19,737 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:24,436 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66998.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:58:36,110 INFO [train.py:901] (2/4) Epoch 9, batch 2350, loss[loss=0.2313, simple_loss=0.291, pruned_loss=0.08578, over 7518.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3208, pruned_loss=0.08948, over 1618322.77 frames. ], batch size: 18, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:58:36,992 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:53,558 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.755e+02 3.236e+02 4.430e+02 1.005e+03, threshold=6.472e+02, percent-clipped=3.0 +2023-02-06 06:59:10,046 INFO [train.py:901] (2/4) Epoch 9, batch 2400, loss[loss=0.2063, simple_loss=0.2692, pruned_loss=0.07166, over 7708.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3203, pruned_loss=0.08931, over 1612874.49 frames. ], batch size: 18, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:59:24,782 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 06:59:28,535 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67093.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:59:35,220 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:59:42,858 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-06 06:59:44,443 INFO [train.py:901] (2/4) Epoch 9, batch 2450, loss[loss=0.2848, simple_loss=0.3561, pruned_loss=0.1067, over 8243.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3218, pruned_loss=0.08984, over 1611793.71 frames. ], batch size: 22, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:59:45,250 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0111, 2.8577, 3.0234, 1.8771, 1.6229, 3.3222, 0.7374, 2.1397], + device='cuda:2'), covar=tensor([0.2290, 0.1535, 0.1241, 0.3929, 0.5650, 0.0588, 0.4659, 0.2537], + device='cuda:2'), in_proj_covar=tensor([0.0153, 0.0151, 0.0091, 0.0202, 0.0244, 0.0094, 0.0154, 0.0154], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 07:00:02,010 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.801e+02 2.787e+02 3.467e+02 4.148e+02 8.119e+02, threshold=6.934e+02, percent-clipped=3.0 +2023-02-06 07:00:18,497 INFO [train.py:901] (2/4) Epoch 9, batch 2500, loss[loss=0.2469, simple_loss=0.3285, pruned_loss=0.08265, over 8457.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3229, pruned_loss=0.09095, over 1609966.06 frames. ], batch size: 27, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:00:48,179 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67208.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:00:52,870 INFO [train.py:901] (2/4) Epoch 9, batch 2550, loss[loss=0.2134, simple_loss=0.2977, pruned_loss=0.06451, over 8262.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3215, pruned_loss=0.08955, over 1610586.98 frames. ], batch size: 24, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:12,353 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.679e+02 3.405e+02 4.203e+02 8.726e+02, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 07:01:21,875 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:01:29,612 INFO [train.py:901] (2/4) Epoch 9, batch 2600, loss[loss=0.2164, simple_loss=0.2807, pruned_loss=0.07604, over 7208.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3217, pruned_loss=0.0902, over 1604429.97 frames. ], batch size: 16, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:39,248 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67279.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:02:03,433 INFO [train.py:901] (2/4) Epoch 9, batch 2650, loss[loss=0.2783, simple_loss=0.3444, pruned_loss=0.1061, over 8504.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3213, pruned_loss=0.08948, over 1610973.59 frames. ], batch size: 26, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:02:06,281 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:02:21,820 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.739e+02 3.376e+02 4.238e+02 9.756e+02, threshold=6.752e+02, percent-clipped=4.0 +2023-02-06 07:02:39,313 INFO [train.py:901] (2/4) Epoch 9, batch 2700, loss[loss=0.3144, simple_loss=0.3724, pruned_loss=0.1282, over 8514.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3229, pruned_loss=0.09002, over 1612140.60 frames. ], batch size: 26, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:02:44,769 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6958, 1.4418, 4.4528, 1.7402, 2.4683, 5.0218, 5.0064, 4.3462], + device='cuda:2'), covar=tensor([0.0936, 0.1611, 0.0257, 0.1950, 0.0919, 0.0198, 0.0368, 0.0555], + device='cuda:2'), in_proj_covar=tensor([0.0250, 0.0279, 0.0240, 0.0271, 0.0254, 0.0224, 0.0299, 0.0283], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:02:58,435 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7825, 1.8126, 2.1642, 1.5478, 1.0874, 2.1367, 0.3911, 1.3502], + device='cuda:2'), covar=tensor([0.2401, 0.1558, 0.0661, 0.2192, 0.4765, 0.0546, 0.3906, 0.2257], + device='cuda:2'), in_proj_covar=tensor([0.0155, 0.0154, 0.0092, 0.0204, 0.0243, 0.0095, 0.0153, 0.0154], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:03:06,866 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4414, 1.9754, 3.0538, 2.5508, 2.6019, 2.1713, 1.6493, 1.4689], + device='cuda:2'), covar=tensor([0.2939, 0.3391, 0.0928, 0.1848, 0.1610, 0.1750, 0.1507, 0.3436], + device='cuda:2'), in_proj_covar=tensor([0.0849, 0.0812, 0.0697, 0.0806, 0.0895, 0.0759, 0.0683, 0.0732], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:03:13,103 INFO [train.py:901] (2/4) Epoch 9, batch 2750, loss[loss=0.2897, simple_loss=0.3411, pruned_loss=0.1191, over 8086.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3229, pruned_loss=0.09031, over 1611842.52 frames. ], batch size: 21, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:03:26,081 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:29,957 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.867e+02 3.446e+02 4.196e+02 9.783e+02, threshold=6.892e+02, percent-clipped=3.0 +2023-02-06 07:03:34,268 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67445.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:38,822 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 07:03:48,010 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67464.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:03:48,379 INFO [train.py:901] (2/4) Epoch 9, batch 2800, loss[loss=0.3103, simple_loss=0.3773, pruned_loss=0.1217, over 8626.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3227, pruned_loss=0.09049, over 1612019.61 frames. ], batch size: 31, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:04:05,327 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67489.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:04:23,679 INFO [train.py:901] (2/4) Epoch 9, batch 2850, loss[loss=0.2568, simple_loss=0.3348, pruned_loss=0.08944, over 8591.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3228, pruned_loss=0.08996, over 1616263.34 frames. ], batch size: 34, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:04:34,544 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:40,478 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.765e+02 3.269e+02 4.105e+02 6.649e+02, threshold=6.538e+02, percent-clipped=0.0 +2023-02-06 07:04:53,911 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 07:04:55,077 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:58,190 INFO [train.py:901] (2/4) Epoch 9, batch 2900, loss[loss=0.2613, simple_loss=0.3312, pruned_loss=0.09571, over 8200.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3236, pruned_loss=0.0905, over 1615262.17 frames. ], batch size: 23, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:24,267 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 07:05:33,886 INFO [train.py:901] (2/4) Epoch 9, batch 2950, loss[loss=0.2432, simple_loss=0.3133, pruned_loss=0.08654, over 8036.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3238, pruned_loss=0.08998, over 1617887.66 frames. ], batch size: 22, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:43,266 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7254, 1.9925, 3.0174, 1.4270, 2.4763, 1.9490, 1.8415, 2.3278], + device='cuda:2'), covar=tensor([0.1270, 0.1772, 0.0560, 0.3163, 0.1067, 0.2067, 0.1231, 0.1651], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0498, 0.0530, 0.0572, 0.0605, 0.0541, 0.0462, 0.0608], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:05:51,259 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.115e+02 2.827e+02 3.390e+02 4.435e+02 7.404e+02, threshold=6.780e+02, percent-clipped=4.0 +2023-02-06 07:06:08,216 INFO [train.py:901] (2/4) Epoch 9, batch 3000, loss[loss=0.2519, simple_loss=0.3226, pruned_loss=0.09064, over 8475.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3239, pruned_loss=0.09003, over 1619202.80 frames. ], batch size: 27, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:08,217 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 07:06:13,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7210, 1.6851, 2.6921, 1.3006, 2.1365, 2.8772, 2.8988, 2.5171], + device='cuda:2'), covar=tensor([0.1139, 0.1409, 0.0419, 0.2272, 0.0837, 0.0363, 0.0560, 0.0715], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0285, 0.0249, 0.0279, 0.0262, 0.0230, 0.0309, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:06:20,342 INFO [train.py:935] (2/4) Epoch 9, validation: loss=0.1965, simple_loss=0.2957, pruned_loss=0.04864, over 944034.00 frames. +2023-02-06 07:06:20,342 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6620MB +2023-02-06 07:06:37,436 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:43,347 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:52,182 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:55,496 INFO [train.py:901] (2/4) Epoch 9, batch 3050, loss[loss=0.2325, simple_loss=0.3063, pruned_loss=0.07939, over 7644.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.323, pruned_loss=0.0897, over 1621148.29 frames. ], batch size: 19, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:55,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:07:00,567 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2138, 1.7956, 1.9678, 2.0391, 1.3489, 1.7572, 2.2569, 2.3910], + device='cuda:2'), covar=tensor([0.0385, 0.1171, 0.1616, 0.1141, 0.0542, 0.1446, 0.0593, 0.0502], + device='cuda:2'), in_proj_covar=tensor([0.0105, 0.0158, 0.0196, 0.0161, 0.0108, 0.0166, 0.0121, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:07:13,238 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.629e+02 3.194e+02 3.976e+02 7.575e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:07:29,777 INFO [train.py:901] (2/4) Epoch 9, batch 3100, loss[loss=0.2483, simple_loss=0.3333, pruned_loss=0.08165, over 8442.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3226, pruned_loss=0.08884, over 1622251.00 frames. ], batch size: 27, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:07:57,812 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3481, 1.4191, 1.5900, 1.3099, 1.0479, 1.4054, 1.7320, 1.5747], + device='cuda:2'), covar=tensor([0.0485, 0.1279, 0.1647, 0.1403, 0.0618, 0.1466, 0.0742, 0.0611], + device='cuda:2'), in_proj_covar=tensor([0.0106, 0.0159, 0.0197, 0.0162, 0.0109, 0.0167, 0.0121, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:08:04,177 INFO [train.py:901] (2/4) Epoch 9, batch 3150, loss[loss=0.2418, simple_loss=0.3163, pruned_loss=0.08358, over 8324.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3232, pruned_loss=0.08978, over 1618958.35 frames. ], batch size: 26, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:05,055 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:21,146 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.202e+02 2.768e+02 3.401e+02 4.235e+02 8.418e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 07:08:22,002 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:37,988 INFO [train.py:901] (2/4) Epoch 9, batch 3200, loss[loss=0.2352, simple_loss=0.3152, pruned_loss=0.07761, over 8518.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3218, pruned_loss=0.08937, over 1614162.52 frames. ], batch size: 26, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:38,989 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 07:08:44,735 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:47,156 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 07:09:12,192 INFO [train.py:901] (2/4) Epoch 9, batch 3250, loss[loss=0.3024, simple_loss=0.3698, pruned_loss=0.1175, over 8327.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3206, pruned_loss=0.08843, over 1611958.55 frames. ], batch size: 25, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:09:14,080 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 07:09:29,469 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.709e+02 3.362e+02 4.203e+02 8.128e+02, threshold=6.724e+02, percent-clipped=5.0 +2023-02-06 07:09:31,090 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3854, 1.8576, 2.7673, 1.1726, 2.0203, 1.7521, 1.5146, 1.9068], + device='cuda:2'), covar=tensor([0.1733, 0.2012, 0.0766, 0.3678, 0.1469, 0.2849, 0.1708, 0.2017], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0494, 0.0527, 0.0566, 0.0601, 0.0540, 0.0460, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:09:46,665 INFO [train.py:901] (2/4) Epoch 9, batch 3300, loss[loss=0.2056, simple_loss=0.2821, pruned_loss=0.06455, over 7554.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3224, pruned_loss=0.08973, over 1609215.10 frames. ], batch size: 18, lr: 8.57e-03, grad_scale: 8.0 +2023-02-06 07:10:04,401 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:10,480 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:22,535 INFO [train.py:901] (2/4) Epoch 9, batch 3350, loss[loss=0.2816, simple_loss=0.3403, pruned_loss=0.1114, over 8295.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3227, pruned_loss=0.08972, over 1610881.84 frames. ], batch size: 23, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:10:39,236 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.493e+02 3.108e+02 4.287e+02 1.101e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-06 07:10:40,597 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:49,105 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:56,399 INFO [train.py:901] (2/4) Epoch 9, batch 3400, loss[loss=0.2558, simple_loss=0.3382, pruned_loss=0.08674, over 8109.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3213, pruned_loss=0.08847, over 1611127.95 frames. ], batch size: 23, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:11:24,319 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68105.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:11:30,825 INFO [train.py:901] (2/4) Epoch 9, batch 3450, loss[loss=0.1809, simple_loss=0.261, pruned_loss=0.05042, over 7793.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3215, pruned_loss=0.08923, over 1610741.90 frames. ], batch size: 19, lr: 8.56e-03, grad_scale: 16.0 +2023-02-06 07:11:46,279 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1837, 2.4158, 1.9294, 2.8828, 1.5301, 1.5155, 1.9721, 2.4968], + device='cuda:2'), covar=tensor([0.0739, 0.0863, 0.1102, 0.0412, 0.1191, 0.1549, 0.1175, 0.0804], + device='cuda:2'), in_proj_covar=tensor([0.0244, 0.0225, 0.0263, 0.0219, 0.0223, 0.0262, 0.0266, 0.0227], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 07:11:48,140 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.585e+02 3.242e+02 3.955e+02 1.617e+03, threshold=6.484e+02, percent-clipped=7.0 +2023-02-06 07:11:59,840 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:05,798 INFO [train.py:901] (2/4) Epoch 9, batch 3500, loss[loss=0.276, simple_loss=0.3439, pruned_loss=0.104, over 7916.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3215, pruned_loss=0.08931, over 1611611.65 frames. ], batch size: 20, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:08,692 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:17,930 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 07:12:40,952 INFO [train.py:901] (2/4) Epoch 9, batch 3550, loss[loss=0.2315, simple_loss=0.2981, pruned_loss=0.0825, over 5144.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3209, pruned_loss=0.08873, over 1608587.27 frames. ], batch size: 11, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:58,945 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.838e+02 3.387e+02 4.304e+02 7.616e+02, threshold=6.774e+02, percent-clipped=6.0 +2023-02-06 07:13:02,582 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:14,901 INFO [train.py:901] (2/4) Epoch 9, batch 3600, loss[loss=0.2978, simple_loss=0.3662, pruned_loss=0.1147, over 8187.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3208, pruned_loss=0.0884, over 1608504.40 frames. ], batch size: 23, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:13:19,106 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:32,120 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-06 07:13:49,586 INFO [train.py:901] (2/4) Epoch 9, batch 3650, loss[loss=0.2831, simple_loss=0.359, pruned_loss=0.1037, over 8522.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3208, pruned_loss=0.08842, over 1605101.92 frames. ], batch size: 28, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:08,230 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.637e+02 3.214e+02 4.100e+02 7.421e+02, threshold=6.428e+02, percent-clipped=2.0 +2023-02-06 07:14:09,680 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:18,267 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:14:25,009 INFO [train.py:901] (2/4) Epoch 9, batch 3700, loss[loss=0.213, simple_loss=0.2818, pruned_loss=0.07205, over 7177.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.3189, pruned_loss=0.08736, over 1604204.11 frames. ], batch size: 16, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:44,968 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6993, 1.7348, 1.6391, 1.4516, 0.9560, 1.5259, 1.6293, 1.8331], + device='cuda:2'), covar=tensor([0.0569, 0.1053, 0.1597, 0.1231, 0.0560, 0.1406, 0.0628, 0.0482], + device='cuda:2'), in_proj_covar=tensor([0.0106, 0.0157, 0.0195, 0.0159, 0.0108, 0.0165, 0.0119, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:14:57,600 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:58,733 INFO [train.py:901] (2/4) Epoch 9, batch 3750, loss[loss=0.2481, simple_loss=0.3192, pruned_loss=0.08846, over 8238.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3195, pruned_loss=0.08782, over 1610390.68 frames. ], batch size: 24, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:15:00,178 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:06,058 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:14,905 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:16,796 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.868e+02 3.639e+02 4.960e+02 1.282e+03, threshold=7.278e+02, percent-clipped=8.0 +2023-02-06 07:15:22,898 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68449.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:15:23,629 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:29,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:33,817 INFO [train.py:901] (2/4) Epoch 9, batch 3800, loss[loss=0.257, simple_loss=0.3312, pruned_loss=0.09135, over 8447.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.319, pruned_loss=0.08734, over 1612583.68 frames. ], batch size: 49, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:15:37,777 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 07:16:07,818 INFO [train.py:901] (2/4) Epoch 9, batch 3850, loss[loss=0.2039, simple_loss=0.2819, pruned_loss=0.06292, over 7652.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3177, pruned_loss=0.08659, over 1607369.04 frames. ], batch size: 19, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:24,983 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 07:16:25,653 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.582e+02 3.048e+02 3.724e+02 6.674e+02, threshold=6.096e+02, percent-clipped=0.0 +2023-02-06 07:16:42,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68564.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:16:42,695 INFO [train.py:901] (2/4) Epoch 9, batch 3900, loss[loss=0.272, simple_loss=0.3375, pruned_loss=0.1033, over 8133.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3171, pruned_loss=0.08559, over 1612048.63 frames. ], batch size: 22, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:17:04,305 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:17:17,691 INFO [train.py:901] (2/4) Epoch 9, batch 3950, loss[loss=0.2488, simple_loss=0.3204, pruned_loss=0.08864, over 8246.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3163, pruned_loss=0.08519, over 1608623.48 frames. ], batch size: 22, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:17:35,327 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.589e+02 3.045e+02 4.133e+02 1.084e+03, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 07:17:51,771 INFO [train.py:901] (2/4) Epoch 9, batch 4000, loss[loss=0.2677, simple_loss=0.3412, pruned_loss=0.0971, over 8442.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3185, pruned_loss=0.08682, over 1613292.01 frames. ], batch size: 27, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:17,944 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68703.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,233 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,677 INFO [train.py:901] (2/4) Epoch 9, batch 4050, loss[loss=0.2778, simple_loss=0.3516, pruned_loss=0.102, over 8507.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.3188, pruned_loss=0.0874, over 1613317.96 frames. ], batch size: 39, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:41,427 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 07:18:42,621 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:43,691 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.478e+02 3.133e+02 3.692e+02 8.585e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 07:18:57,741 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:58,666 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.12 vs. limit=5.0 +2023-02-06 07:19:00,367 INFO [train.py:901] (2/4) Epoch 9, batch 4100, loss[loss=0.2743, simple_loss=0.3524, pruned_loss=0.0981, over 8782.00 frames. ], tot_loss[loss=0.247, simple_loss=0.3192, pruned_loss=0.08741, over 1615999.23 frames. ], batch size: 30, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:14,704 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 07:19:18,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5099, 1.4317, 4.7198, 1.7495, 4.0680, 3.9148, 4.1479, 4.0507], + device='cuda:2'), covar=tensor([0.0528, 0.4213, 0.0382, 0.3022, 0.1075, 0.0701, 0.0526, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0555, 0.0552, 0.0512, 0.0578, 0.0488, 0.0491, 0.0553], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:19:34,810 INFO [train.py:901] (2/4) Epoch 9, batch 4150, loss[loss=0.2216, simple_loss=0.2969, pruned_loss=0.07314, over 8085.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.3194, pruned_loss=0.08752, over 1617175.13 frames. ], batch size: 21, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:38,350 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68820.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:19:52,100 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.558e+02 3.576e+02 4.352e+02 8.740e+02, threshold=7.151e+02, percent-clipped=5.0 +2023-02-06 07:19:55,806 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68845.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:20:09,485 INFO [train.py:901] (2/4) Epoch 9, batch 4200, loss[loss=0.2153, simple_loss=0.284, pruned_loss=0.07331, over 7815.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3188, pruned_loss=0.08658, over 1617774.37 frames. ], batch size: 19, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:17,179 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:20:23,075 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 07:20:44,846 INFO [train.py:901] (2/4) Epoch 9, batch 4250, loss[loss=0.2788, simple_loss=0.3557, pruned_loss=0.1009, over 8463.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3213, pruned_loss=0.08809, over 1617558.10 frames. ], batch size: 27, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:45,571 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 07:21:02,845 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:21:03,451 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.856e+02 3.701e+02 4.402e+02 9.379e+02, threshold=7.403e+02, percent-clipped=2.0 +2023-02-06 07:21:20,735 INFO [train.py:901] (2/4) Epoch 9, batch 4300, loss[loss=0.222, simple_loss=0.3003, pruned_loss=0.07187, over 7980.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3206, pruned_loss=0.08814, over 1613311.96 frames. ], batch size: 21, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:21:31,865 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2543, 3.0561, 3.4857, 2.1686, 1.8356, 3.5670, 0.5757, 2.2844], + device='cuda:2'), covar=tensor([0.1984, 0.1110, 0.0334, 0.2780, 0.4640, 0.0455, 0.4897, 0.2054], + device='cuda:2'), in_proj_covar=tensor([0.0158, 0.0158, 0.0091, 0.0211, 0.0248, 0.0094, 0.0159, 0.0155], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:21:54,538 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0839, 1.7089, 1.3885, 1.6125, 1.4549, 1.2837, 1.3446, 1.4183], + device='cuda:2'), covar=tensor([0.0860, 0.0335, 0.0971, 0.0459, 0.0567, 0.1092, 0.0753, 0.0683], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0236, 0.0310, 0.0294, 0.0304, 0.0319, 0.0341, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 07:21:55,035 INFO [train.py:901] (2/4) Epoch 9, batch 4350, loss[loss=0.2454, simple_loss=0.3103, pruned_loss=0.09029, over 7936.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3207, pruned_loss=0.08832, over 1613138.12 frames. ], batch size: 20, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:06,430 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9218, 6.1653, 5.2998, 2.3360, 5.3786, 5.6591, 5.6644, 5.3026], + device='cuda:2'), covar=tensor([0.0667, 0.0360, 0.0913, 0.4388, 0.0687, 0.0531, 0.0933, 0.0619], + device='cuda:2'), in_proj_covar=tensor([0.0440, 0.0348, 0.0366, 0.0456, 0.0357, 0.0342, 0.0357, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:22:12,971 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.761e+02 3.203e+02 3.985e+02 6.558e+02, threshold=6.405e+02, percent-clipped=0.0 +2023-02-06 07:22:16,298 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 07:22:17,725 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:23,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:23,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5248, 1.8743, 3.4240, 1.2228, 2.4333, 1.9413, 1.5525, 2.3619], + device='cuda:2'), covar=tensor([0.1630, 0.2194, 0.0675, 0.3752, 0.1479, 0.2693, 0.1749, 0.2070], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0491, 0.0524, 0.0561, 0.0599, 0.0538, 0.0461, 0.0600], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:22:27,153 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:29,684 INFO [train.py:901] (2/4) Epoch 9, batch 4400, loss[loss=0.2275, simple_loss=0.3103, pruned_loss=0.07237, over 8468.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3207, pruned_loss=0.08829, over 1611037.39 frames. ], batch size: 25, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:32,740 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.04 vs. limit=5.0 +2023-02-06 07:22:45,417 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6405, 1.9623, 2.1823, 1.0737, 2.2726, 1.5161, 0.6018, 1.9232], + device='cuda:2'), covar=tensor([0.0372, 0.0204, 0.0179, 0.0351, 0.0257, 0.0556, 0.0499, 0.0157], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0291, 0.0244, 0.0350, 0.0283, 0.0442, 0.0335, 0.0315], + device='cuda:2'), out_proj_covar=tensor([1.0849e-04, 8.4175e-05, 7.1314e-05, 1.0202e-04, 8.3654e-05, 1.4123e-04, + 9.9800e-05, 9.2963e-05], device='cuda:2') +2023-02-06 07:22:55,805 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 07:23:04,528 INFO [train.py:901] (2/4) Epoch 9, batch 4450, loss[loss=0.1831, simple_loss=0.255, pruned_loss=0.05566, over 7221.00 frames. ], tot_loss[loss=0.2469, simple_loss=0.3193, pruned_loss=0.08723, over 1611341.08 frames. ], batch size: 16, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:16,659 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:22,376 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.746e+02 3.298e+02 3.852e+02 8.052e+02, threshold=6.596e+02, percent-clipped=4.0 +2023-02-06 07:23:33,866 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:37,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:39,015 INFO [train.py:901] (2/4) Epoch 9, batch 4500, loss[loss=0.2234, simple_loss=0.3009, pruned_loss=0.073, over 8239.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3194, pruned_loss=0.08699, over 1614556.74 frames. ], batch size: 22, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:49,134 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 07:23:49,291 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:53,178 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:12,704 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1721, 1.6911, 1.2061, 2.4270, 1.0247, 1.1718, 1.7227, 1.8001], + device='cuda:2'), covar=tensor([0.2001, 0.1363, 0.2410, 0.0556, 0.1500, 0.2214, 0.1146, 0.1211], + device='cuda:2'), in_proj_covar=tensor([0.0248, 0.0223, 0.0264, 0.0222, 0.0226, 0.0263, 0.0268, 0.0230], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 07:24:13,185 INFO [train.py:901] (2/4) Epoch 9, batch 4550, loss[loss=0.244, simple_loss=0.3222, pruned_loss=0.08292, over 8136.00 frames. ], tot_loss[loss=0.2462, simple_loss=0.3194, pruned_loss=0.08646, over 1615728.88 frames. ], batch size: 22, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:24:16,809 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8946, 1.5553, 3.3500, 1.2770, 2.3219, 3.6841, 3.6478, 3.0701], + device='cuda:2'), covar=tensor([0.1101, 0.1538, 0.0353, 0.2224, 0.0853, 0.0274, 0.0464, 0.0690], + device='cuda:2'), in_proj_covar=tensor([0.0258, 0.0290, 0.0251, 0.0280, 0.0266, 0.0235, 0.0309, 0.0291], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:24:31,949 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.522e+02 2.943e+02 3.743e+02 5.945e+02, threshold=5.886e+02, percent-clipped=0.0 +2023-02-06 07:24:33,338 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:47,701 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69263.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:24:48,920 INFO [train.py:901] (2/4) Epoch 9, batch 4600, loss[loss=0.2454, simple_loss=0.3133, pruned_loss=0.08881, over 8083.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3181, pruned_loss=0.08605, over 1615189.65 frames. ], batch size: 21, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:07,530 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:22,106 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:24,600 INFO [train.py:901] (2/4) Epoch 9, batch 4650, loss[loss=0.2315, simple_loss=0.3088, pruned_loss=0.0771, over 8134.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3185, pruned_loss=0.08651, over 1613938.45 frames. ], batch size: 22, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:38,791 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:40,120 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:42,602 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.666e+02 3.298e+02 3.900e+02 8.712e+02, threshold=6.595e+02, percent-clipped=8.0 +2023-02-06 07:25:58,526 INFO [train.py:901] (2/4) Epoch 9, batch 4700, loss[loss=0.2908, simple_loss=0.3642, pruned_loss=0.1087, over 8364.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.319, pruned_loss=0.08726, over 1615449.94 frames. ], batch size: 24, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:26:14,068 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:26,899 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:31,852 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:33,681 INFO [train.py:901] (2/4) Epoch 9, batch 4750, loss[loss=0.2374, simple_loss=0.3195, pruned_loss=0.07767, over 8464.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3182, pruned_loss=0.08661, over 1614968.56 frames. ], batch size: 29, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:26:35,878 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:43,658 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 07:26:49,115 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 07:26:50,994 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 07:26:51,644 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.564e+02 3.173e+02 4.227e+02 9.736e+02, threshold=6.346e+02, percent-clipped=4.0 +2023-02-06 07:26:53,202 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:08,536 INFO [train.py:901] (2/4) Epoch 9, batch 4800, loss[loss=0.2336, simple_loss=0.3073, pruned_loss=0.07997, over 8281.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3187, pruned_loss=0.08707, over 1611782.60 frames. ], batch size: 23, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:38,648 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9330, 1.3165, 6.0229, 2.2113, 5.3738, 5.0397, 5.5913, 5.5299], + device='cuda:2'), covar=tensor([0.0419, 0.4362, 0.0306, 0.2722, 0.0885, 0.0671, 0.0358, 0.0398], + device='cuda:2'), in_proj_covar=tensor([0.0447, 0.0549, 0.0545, 0.0505, 0.0576, 0.0488, 0.0481, 0.0547], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:27:41,299 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 07:27:43,205 INFO [train.py:901] (2/4) Epoch 9, batch 4850, loss[loss=0.2242, simple_loss=0.2873, pruned_loss=0.08053, over 8231.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3191, pruned_loss=0.08791, over 1608165.66 frames. ], batch size: 22, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:46,755 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:49,410 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:53,335 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:55,751 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 07:28:00,637 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.717e+02 3.193e+02 3.973e+02 8.915e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:28:17,599 INFO [train.py:901] (2/4) Epoch 9, batch 4900, loss[loss=0.219, simple_loss=0.306, pruned_loss=0.06598, over 8761.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3201, pruned_loss=0.08822, over 1610629.01 frames. ], batch size: 30, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:28:33,122 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:47,478 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69607.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:28:53,429 INFO [train.py:901] (2/4) Epoch 9, batch 4950, loss[loss=0.3128, simple_loss=0.3751, pruned_loss=0.1253, over 6900.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3206, pruned_loss=0.0879, over 1612030.93 frames. ], batch size: 71, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:06,611 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:09,346 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:10,455 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.734e+02 3.225e+02 4.131e+02 8.295e+02, threshold=6.450e+02, percent-clipped=5.0 +2023-02-06 07:29:13,212 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:27,248 INFO [train.py:901] (2/4) Epoch 9, batch 5000, loss[loss=0.2011, simple_loss=0.2794, pruned_loss=0.06142, over 7514.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3213, pruned_loss=0.08831, over 1611244.17 frames. ], batch size: 18, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:39,129 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:46,883 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69692.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:29:53,590 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:02,816 INFO [train.py:901] (2/4) Epoch 9, batch 5050, loss[loss=0.2477, simple_loss=0.3192, pruned_loss=0.08807, over 8477.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3202, pruned_loss=0.08751, over 1611454.55 frames. ], batch size: 25, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:30:07,762 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69722.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:30:12,770 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:17,058 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3575, 1.8843, 3.0137, 2.2793, 2.6103, 2.0537, 1.6215, 1.3317], + device='cuda:2'), covar=tensor([0.3198, 0.3460, 0.0940, 0.2132, 0.1698, 0.1796, 0.1514, 0.3494], + device='cuda:2'), in_proj_covar=tensor([0.0858, 0.0825, 0.0702, 0.0808, 0.0911, 0.0762, 0.0689, 0.0738], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:30:18,144 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 07:30:20,813 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.703e+02 3.249e+02 3.895e+02 8.845e+02, threshold=6.498e+02, percent-clipped=2.0 +2023-02-06 07:30:26,987 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:30,985 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:36,720 INFO [train.py:901] (2/4) Epoch 9, batch 5100, loss[loss=0.2524, simple_loss=0.3384, pruned_loss=0.08314, over 8494.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3212, pruned_loss=0.08835, over 1614244.91 frames. ], batch size: 26, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:30:44,225 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:59,029 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:01,166 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1334, 1.6223, 3.5191, 1.5430, 2.3575, 3.8645, 3.8861, 3.2849], + device='cuda:2'), covar=tensor([0.1113, 0.1669, 0.0356, 0.2044, 0.0970, 0.0270, 0.0470, 0.0681], + device='cuda:2'), in_proj_covar=tensor([0.0254, 0.0288, 0.0249, 0.0277, 0.0265, 0.0229, 0.0305, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:31:01,900 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:11,532 INFO [train.py:901] (2/4) Epoch 9, batch 5150, loss[loss=0.2646, simple_loss=0.3316, pruned_loss=0.09876, over 8613.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3211, pruned_loss=0.08884, over 1609223.35 frames. ], batch size: 39, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:29,711 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 2.410e+02 3.240e+02 3.896e+02 9.119e+02, threshold=6.481e+02, percent-clipped=3.0 +2023-02-06 07:31:32,779 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:43,082 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 07:31:46,671 INFO [train.py:901] (2/4) Epoch 9, batch 5200, loss[loss=0.2411, simple_loss=0.321, pruned_loss=0.0806, over 8033.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3197, pruned_loss=0.08767, over 1607689.05 frames. ], batch size: 22, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:50,849 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:02,099 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4152, 1.9893, 3.0532, 2.4514, 2.7685, 2.2268, 1.7697, 1.4523], + device='cuda:2'), covar=tensor([0.3219, 0.3824, 0.0941, 0.2153, 0.1673, 0.1862, 0.1499, 0.3672], + device='cuda:2'), in_proj_covar=tensor([0.0853, 0.0820, 0.0700, 0.0807, 0.0907, 0.0758, 0.0686, 0.0735], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:32:06,754 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69895.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:11,376 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:17,773 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 07:32:20,225 INFO [train.py:901] (2/4) Epoch 9, batch 5250, loss[loss=0.2619, simple_loss=0.3246, pruned_loss=0.09959, over 8503.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3211, pruned_loss=0.08844, over 1609974.54 frames. ], batch size: 28, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:32:23,772 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69920.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:27,863 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:38,259 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 2.909e+02 3.504e+02 4.160e+02 7.603e+02, threshold=7.007e+02, percent-clipped=5.0 +2023-02-06 07:32:50,717 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:55,303 INFO [train.py:901] (2/4) Epoch 9, batch 5300, loss[loss=0.2573, simple_loss=0.3218, pruned_loss=0.09644, over 6850.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3204, pruned_loss=0.08792, over 1608639.89 frames. ], batch size: 72, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:04,978 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69978.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:33:08,282 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:19,169 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3215, 1.4161, 1.5432, 1.3157, 1.1074, 1.4310, 1.8601, 1.9569], + device='cuda:2'), covar=tensor([0.0440, 0.1281, 0.1804, 0.1411, 0.0592, 0.1530, 0.0639, 0.0523], + device='cuda:2'), in_proj_covar=tensor([0.0104, 0.0159, 0.0199, 0.0162, 0.0109, 0.0167, 0.0121, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:33:22,935 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70003.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:33:24,838 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:31,222 INFO [train.py:901] (2/4) Epoch 9, batch 5350, loss[loss=0.2418, simple_loss=0.3203, pruned_loss=0.08169, over 8521.00 frames. ], tot_loss[loss=0.2486, simple_loss=0.3206, pruned_loss=0.08831, over 1605489.64 frames. ], batch size: 28, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:36,940 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 07:33:42,007 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:45,286 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70036.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:48,397 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.965e+02 3.484e+02 4.155e+02 9.515e+02, threshold=6.968e+02, percent-clipped=2.0 +2023-02-06 07:33:57,319 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:05,194 INFO [train.py:901] (2/4) Epoch 9, batch 5400, loss[loss=0.293, simple_loss=0.3729, pruned_loss=0.1066, over 8601.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3211, pruned_loss=0.08818, over 1605699.38 frames. ], batch size: 31, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:34:14,818 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:31,571 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:40,958 INFO [train.py:901] (2/4) Epoch 9, batch 5450, loss[loss=0.2434, simple_loss=0.3333, pruned_loss=0.07674, over 8106.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3208, pruned_loss=0.08793, over 1608789.93 frames. ], batch size: 23, lr: 8.44e-03, grad_scale: 8.0 +2023-02-06 07:34:48,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,042 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,624 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:58,814 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.682e+02 3.191e+02 4.046e+02 1.028e+03, threshold=6.382e+02, percent-clipped=4.0 +2023-02-06 07:35:04,332 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 07:35:05,827 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70151.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:35:06,525 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70152.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:35:15,866 INFO [train.py:901] (2/4) Epoch 9, batch 5500, loss[loss=0.2183, simple_loss=0.2973, pruned_loss=0.06959, over 8246.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.32, pruned_loss=0.08735, over 1609048.84 frames. ], batch size: 24, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:35:31,379 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6704, 1.6690, 2.1493, 1.3161, 1.0721, 2.1527, 0.2932, 1.3081], + device='cuda:2'), covar=tensor([0.2942, 0.1810, 0.0552, 0.2787, 0.5313, 0.0471, 0.4117, 0.2306], + device='cuda:2'), in_proj_covar=tensor([0.0155, 0.0156, 0.0091, 0.0206, 0.0246, 0.0094, 0.0158, 0.0155], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:35:50,278 INFO [train.py:901] (2/4) Epoch 9, batch 5550, loss[loss=0.2823, simple_loss=0.3497, pruned_loss=0.1075, over 8098.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3196, pruned_loss=0.08777, over 1608377.11 frames. ], batch size: 23, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:36:07,873 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.484e+02 3.031e+02 3.937e+02 9.276e+02, threshold=6.062e+02, percent-clipped=2.0 +2023-02-06 07:36:14,988 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 07:36:24,752 INFO [train.py:901] (2/4) Epoch 9, batch 5600, loss[loss=0.2474, simple_loss=0.3242, pruned_loss=0.0853, over 8359.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3207, pruned_loss=0.08817, over 1612435.48 frames. ], batch size: 24, lr: 8.43e-03, grad_scale: 16.0 +2023-02-06 07:36:34,078 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:36:35,512 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5028, 2.0525, 3.4373, 1.2002, 2.6530, 1.9914, 1.7288, 2.3096], + device='cuda:2'), covar=tensor([0.1789, 0.1993, 0.0812, 0.3719, 0.1452, 0.2577, 0.1664, 0.2147], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0496, 0.0524, 0.0564, 0.0600, 0.0537, 0.0463, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:36:59,388 INFO [train.py:901] (2/4) Epoch 9, batch 5650, loss[loss=0.2733, simple_loss=0.3563, pruned_loss=0.09515, over 8192.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3203, pruned_loss=0.08814, over 1609017.05 frames. ], batch size: 23, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:08,097 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 07:37:18,047 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.609e+02 3.248e+02 4.005e+02 8.106e+02, threshold=6.497e+02, percent-clipped=5.0 +2023-02-06 07:37:32,963 INFO [train.py:901] (2/4) Epoch 9, batch 5700, loss[loss=0.2779, simple_loss=0.3606, pruned_loss=0.09755, over 8496.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3206, pruned_loss=0.08835, over 1607820.41 frames. ], batch size: 28, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:56,393 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4331, 1.9498, 3.1170, 2.5265, 2.7753, 2.2079, 1.7797, 1.3292], + device='cuda:2'), covar=tensor([0.3174, 0.3523, 0.0884, 0.2015, 0.1607, 0.1847, 0.1454, 0.3701], + device='cuda:2'), in_proj_covar=tensor([0.0861, 0.0829, 0.0707, 0.0806, 0.0905, 0.0765, 0.0686, 0.0737], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:38:02,548 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6746, 4.6890, 4.1800, 1.6809, 4.1259, 4.1925, 4.2617, 3.9384], + device='cuda:2'), covar=tensor([0.0982, 0.0693, 0.1284, 0.5835, 0.0906, 0.1016, 0.1470, 0.0839], + device='cuda:2'), in_proj_covar=tensor([0.0449, 0.0353, 0.0367, 0.0468, 0.0365, 0.0350, 0.0362, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:38:02,669 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:08,013 INFO [train.py:901] (2/4) Epoch 9, batch 5750, loss[loss=0.248, simple_loss=0.3279, pruned_loss=0.08403, over 8517.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3203, pruned_loss=0.08776, over 1615157.03 frames. ], batch size: 29, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:12,130 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 07:38:20,277 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:27,523 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.028e+02 2.898e+02 3.376e+02 4.229e+02 8.555e+02, threshold=6.753e+02, percent-clipped=3.0 +2023-02-06 07:38:43,380 INFO [train.py:901] (2/4) Epoch 9, batch 5800, loss[loss=0.268, simple_loss=0.337, pruned_loss=0.09953, over 8500.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3202, pruned_loss=0.08737, over 1615394.41 frames. ], batch size: 26, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:48,295 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:38:48,580 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.70 vs. limit=5.0 +2023-02-06 07:39:18,642 INFO [train.py:901] (2/4) Epoch 9, batch 5850, loss[loss=0.1944, simple_loss=0.2681, pruned_loss=0.06039, over 7787.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3188, pruned_loss=0.08702, over 1612963.77 frames. ], batch size: 19, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:39:37,361 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.477e+02 3.501e+02 4.376e+02 8.995e+02, threshold=7.001e+02, percent-clipped=4.0 +2023-02-06 07:39:48,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2859, 1.9519, 3.0793, 2.5058, 2.6875, 2.2270, 1.6964, 1.3785], + device='cuda:2'), covar=tensor([0.3291, 0.3509, 0.0954, 0.1932, 0.1630, 0.1802, 0.1552, 0.3728], + device='cuda:2'), in_proj_covar=tensor([0.0853, 0.0819, 0.0703, 0.0805, 0.0900, 0.0761, 0.0683, 0.0733], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:39:53,322 INFO [train.py:901] (2/4) Epoch 9, batch 5900, loss[loss=0.2274, simple_loss=0.2982, pruned_loss=0.07827, over 6808.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3195, pruned_loss=0.08782, over 1610286.49 frames. ], batch size: 15, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:40:08,006 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:23,563 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.3422, 3.0098, 3.4782, 2.2982, 1.9127, 3.5624, 0.5739, 2.4138], + device='cuda:2'), covar=tensor([0.1888, 0.1654, 0.0406, 0.2642, 0.4751, 0.0439, 0.4939, 0.1791], + device='cuda:2'), in_proj_covar=tensor([0.0159, 0.0160, 0.0091, 0.0210, 0.0248, 0.0096, 0.0161, 0.0158], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:40:27,231 INFO [train.py:901] (2/4) Epoch 9, batch 5950, loss[loss=0.2103, simple_loss=0.285, pruned_loss=0.06774, over 7548.00 frames. ], tot_loss[loss=0.2477, simple_loss=0.3196, pruned_loss=0.08788, over 1608836.56 frames. ], batch size: 18, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:40:32,699 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:45,949 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.730e+02 3.193e+02 3.849e+02 7.953e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 07:41:02,077 INFO [train.py:901] (2/4) Epoch 9, batch 6000, loss[loss=0.2804, simple_loss=0.3496, pruned_loss=0.1056, over 8579.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.3196, pruned_loss=0.08766, over 1614547.39 frames. ], batch size: 34, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:02,078 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 07:41:08,106 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8421, 1.4247, 1.4583, 1.2532, 1.0448, 1.3559, 1.5346, 1.4734], + device='cuda:2'), covar=tensor([0.0599, 0.1412, 0.1911, 0.1519, 0.0645, 0.1684, 0.0750, 0.0638], + device='cuda:2'), in_proj_covar=tensor([0.0105, 0.0158, 0.0198, 0.0162, 0.0108, 0.0166, 0.0121, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:41:14,591 INFO [train.py:935] (2/4) Epoch 9, validation: loss=0.1952, simple_loss=0.2947, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 07:41:14,592 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 07:41:33,926 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0428, 1.6752, 3.1734, 1.3838, 2.2008, 3.4861, 3.4845, 2.9711], + device='cuda:2'), covar=tensor([0.1005, 0.1398, 0.0410, 0.1940, 0.0992, 0.0260, 0.0529, 0.0642], + device='cuda:2'), in_proj_covar=tensor([0.0251, 0.0286, 0.0250, 0.0273, 0.0265, 0.0229, 0.0312, 0.0290], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:41:49,711 INFO [train.py:901] (2/4) Epoch 9, batch 6050, loss[loss=0.2214, simple_loss=0.2901, pruned_loss=0.07633, over 7534.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3181, pruned_loss=0.08701, over 1612320.31 frames. ], batch size: 18, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:55,399 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-06 07:42:04,732 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:42:07,988 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.822e+02 3.602e+02 4.348e+02 1.269e+03, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 07:42:24,368 INFO [train.py:901] (2/4) Epoch 9, batch 6100, loss[loss=0.264, simple_loss=0.3393, pruned_loss=0.09436, over 8587.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3189, pruned_loss=0.08707, over 1613720.61 frames. ], batch size: 31, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:42:40,908 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9127, 1.6942, 3.1714, 1.4765, 2.3531, 3.4795, 3.4258, 2.9563], + device='cuda:2'), covar=tensor([0.1077, 0.1491, 0.0448, 0.1910, 0.0977, 0.0255, 0.0540, 0.0637], + device='cuda:2'), in_proj_covar=tensor([0.0251, 0.0287, 0.0250, 0.0274, 0.0267, 0.0229, 0.0311, 0.0290], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:42:42,822 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 07:43:00,373 INFO [train.py:901] (2/4) Epoch 9, batch 6150, loss[loss=0.2758, simple_loss=0.3423, pruned_loss=0.1047, over 6976.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3195, pruned_loss=0.08774, over 1610773.86 frames. ], batch size: 71, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:14,656 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 07:43:18,324 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.692e+02 3.232e+02 3.879e+02 7.941e+02, threshold=6.463e+02, percent-clipped=1.0 +2023-02-06 07:43:19,258 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:43:20,207 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-06 07:43:33,935 INFO [train.py:901] (2/4) Epoch 9, batch 6200, loss[loss=0.2697, simple_loss=0.3448, pruned_loss=0.09734, over 8202.00 frames. ], tot_loss[loss=0.2474, simple_loss=0.3199, pruned_loss=0.08749, over 1615722.23 frames. ], batch size: 23, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:35,020 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 07:43:36,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:44:05,850 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3098, 1.9056, 2.9527, 2.3081, 2.4694, 2.0295, 1.6427, 1.4153], + device='cuda:2'), covar=tensor([0.3205, 0.3487, 0.0901, 0.2071, 0.1709, 0.1911, 0.1619, 0.3439], + device='cuda:2'), in_proj_covar=tensor([0.0862, 0.0832, 0.0706, 0.0816, 0.0905, 0.0767, 0.0690, 0.0745], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:44:09,711 INFO [train.py:901] (2/4) Epoch 9, batch 6250, loss[loss=0.291, simple_loss=0.3567, pruned_loss=0.1127, over 8528.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3203, pruned_loss=0.08814, over 1612413.51 frames. ], batch size: 26, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:44:28,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.792e+02 3.423e+02 4.432e+02 1.474e+03, threshold=6.847e+02, percent-clipped=7.0 +2023-02-06 07:44:30,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2413, 1.4033, 2.0555, 1.1012, 1.4780, 1.5175, 1.2902, 1.4101], + device='cuda:2'), covar=tensor([0.1586, 0.2082, 0.0696, 0.3491, 0.1430, 0.2568, 0.1718, 0.1761], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0499, 0.0529, 0.0570, 0.0607, 0.0545, 0.0464, 0.0606], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:44:30,308 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 07:44:43,937 INFO [train.py:901] (2/4) Epoch 9, batch 6300, loss[loss=0.2297, simple_loss=0.3036, pruned_loss=0.0779, over 8229.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3205, pruned_loss=0.08853, over 1610319.80 frames. ], batch size: 22, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:03,882 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:16,145 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:19,448 INFO [train.py:901] (2/4) Epoch 9, batch 6350, loss[loss=0.2571, simple_loss=0.3289, pruned_loss=0.09267, over 8718.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3202, pruned_loss=0.08821, over 1613602.61 frames. ], batch size: 39, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:21,647 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:38,840 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.814e+02 3.293e+02 4.210e+02 8.338e+02, threshold=6.585e+02, percent-clipped=5.0 +2023-02-06 07:45:40,425 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5024, 2.0504, 3.3925, 1.3652, 2.5310, 1.9669, 1.7701, 2.1102], + device='cuda:2'), covar=tensor([0.1966, 0.2157, 0.0753, 0.4077, 0.1519, 0.2961, 0.1691, 0.2356], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0500, 0.0526, 0.0572, 0.0609, 0.0545, 0.0464, 0.0608], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:45:42,976 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:54,184 INFO [train.py:901] (2/4) Epoch 9, batch 6400, loss[loss=0.2591, simple_loss=0.3407, pruned_loss=0.0888, over 8017.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3224, pruned_loss=0.08965, over 1613167.18 frames. ], batch size: 22, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:55,035 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2150, 1.2352, 3.3038, 0.9651, 2.9295, 2.7645, 3.0387, 2.9312], + device='cuda:2'), covar=tensor([0.0591, 0.3660, 0.0767, 0.3367, 0.1255, 0.1064, 0.0579, 0.0754], + device='cuda:2'), in_proj_covar=tensor([0.0447, 0.0560, 0.0554, 0.0514, 0.0584, 0.0495, 0.0485, 0.0548], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:46:28,849 INFO [train.py:901] (2/4) Epoch 9, batch 6450, loss[loss=0.2407, simple_loss=0.3108, pruned_loss=0.08531, over 7691.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3215, pruned_loss=0.08902, over 1610172.11 frames. ], batch size: 18, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:46:37,163 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8606, 1.5742, 3.2298, 1.4410, 2.2960, 3.5442, 3.5408, 2.9968], + device='cuda:2'), covar=tensor([0.1078, 0.1462, 0.0368, 0.1901, 0.0898, 0.0239, 0.0425, 0.0651], + device='cuda:2'), in_proj_covar=tensor([0.0251, 0.0284, 0.0247, 0.0275, 0.0263, 0.0228, 0.0306, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:46:48,400 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.655e+02 3.350e+02 4.272e+02 1.011e+03, threshold=6.701e+02, percent-clipped=3.0 +2023-02-06 07:47:03,136 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4551, 1.5043, 1.7264, 1.3294, 0.9015, 1.7323, 0.0963, 1.1322], + device='cuda:2'), covar=tensor([0.2615, 0.1805, 0.0594, 0.1873, 0.4987, 0.0627, 0.3892, 0.1862], + device='cuda:2'), in_proj_covar=tensor([0.0157, 0.0158, 0.0094, 0.0208, 0.0242, 0.0096, 0.0158, 0.0156], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 07:47:03,607 INFO [train.py:901] (2/4) Epoch 9, batch 6500, loss[loss=0.2847, simple_loss=0.362, pruned_loss=0.1037, over 8626.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.32, pruned_loss=0.08829, over 1607081.06 frames. ], batch size: 31, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:23,302 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0959, 2.4954, 3.0067, 1.2076, 3.1721, 1.8463, 1.4952, 1.8505], + device='cuda:2'), covar=tensor([0.0520, 0.0249, 0.0156, 0.0488, 0.0270, 0.0582, 0.0555, 0.0320], + device='cuda:2'), in_proj_covar=tensor([0.0374, 0.0300, 0.0247, 0.0356, 0.0287, 0.0446, 0.0339, 0.0323], + device='cuda:2'), out_proj_covar=tensor([1.1071e-04, 8.6471e-05, 7.1391e-05, 1.0291e-04, 8.4344e-05, 1.4103e-04, + 1.0042e-04, 9.5095e-05], device='cuda:2') +2023-02-06 07:47:37,713 INFO [train.py:901] (2/4) Epoch 9, batch 6550, loss[loss=0.227, simple_loss=0.2923, pruned_loss=0.08085, over 7450.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3201, pruned_loss=0.08771, over 1613047.18 frames. ], batch size: 17, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:50,023 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 07:47:52,884 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2975, 1.5812, 4.4385, 1.4940, 3.8431, 3.7068, 3.9669, 3.8020], + device='cuda:2'), covar=tensor([0.0488, 0.4103, 0.0506, 0.3508, 0.1161, 0.0821, 0.0567, 0.0596], + device='cuda:2'), in_proj_covar=tensor([0.0450, 0.0562, 0.0554, 0.0513, 0.0584, 0.0496, 0.0486, 0.0547], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:47:58,021 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.500e+02 3.444e+02 4.178e+02 7.414e+02, threshold=6.887e+02, percent-clipped=1.0 +2023-02-06 07:48:10,530 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:48:13,164 INFO [train.py:901] (2/4) Epoch 9, batch 6600, loss[loss=0.3293, simple_loss=0.3871, pruned_loss=0.1358, over 6792.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3206, pruned_loss=0.08783, over 1612156.92 frames. ], batch size: 71, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:48:47,047 INFO [train.py:901] (2/4) Epoch 9, batch 6650, loss[loss=0.2175, simple_loss=0.2975, pruned_loss=0.06871, over 8138.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3209, pruned_loss=0.08795, over 1615538.68 frames. ], batch size: 22, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:05,717 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.641e+02 3.214e+02 4.234e+02 1.005e+03, threshold=6.427e+02, percent-clipped=4.0 +2023-02-06 07:49:13,932 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:21,688 INFO [train.py:901] (2/4) Epoch 9, batch 6700, loss[loss=0.2595, simple_loss=0.316, pruned_loss=0.1015, over 7653.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3199, pruned_loss=0.08792, over 1611562.86 frames. ], batch size: 19, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:37,835 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7674, 1.4390, 4.0519, 1.6072, 3.1786, 3.1526, 3.5988, 3.5029], + device='cuda:2'), covar=tensor([0.1019, 0.5930, 0.1072, 0.4324, 0.2191, 0.1581, 0.1008, 0.1040], + device='cuda:2'), in_proj_covar=tensor([0.0445, 0.0555, 0.0548, 0.0512, 0.0583, 0.0494, 0.0484, 0.0545], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:49:41,042 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:56,537 INFO [train.py:901] (2/4) Epoch 9, batch 6750, loss[loss=0.2335, simple_loss=0.3183, pruned_loss=0.07434, over 8468.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3202, pruned_loss=0.08815, over 1612836.72 frames. ], batch size: 27, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:50:15,374 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 3.032e+02 3.821e+02 4.704e+02 1.129e+03, threshold=7.641e+02, percent-clipped=7.0 +2023-02-06 07:50:23,396 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 07:50:30,841 INFO [train.py:901] (2/4) Epoch 9, batch 6800, loss[loss=0.2936, simple_loss=0.357, pruned_loss=0.1151, over 8355.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.321, pruned_loss=0.08831, over 1613095.26 frames. ], batch size: 24, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:50:33,616 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:50:50,784 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 07:51:01,250 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:06,467 INFO [train.py:901] (2/4) Epoch 9, batch 6850, loss[loss=0.2242, simple_loss=0.3041, pruned_loss=0.07221, over 8520.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3196, pruned_loss=0.08726, over 1611608.95 frames. ], batch size: 31, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:14,502 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 07:51:25,243 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.628e+02 3.217e+02 4.054e+02 6.964e+02, threshold=6.433e+02, percent-clipped=0.0 +2023-02-06 07:51:40,041 INFO [train.py:901] (2/4) Epoch 9, batch 6900, loss[loss=0.3097, simple_loss=0.3687, pruned_loss=0.1254, over 8561.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3206, pruned_loss=0.08801, over 1614059.98 frames. ], batch size: 34, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:52:15,379 INFO [train.py:901] (2/4) Epoch 9, batch 6950, loss[loss=0.2523, simple_loss=0.3243, pruned_loss=0.09015, over 8478.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3203, pruned_loss=0.08805, over 1615604.03 frames. ], batch size: 25, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:23,472 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 07:52:29,649 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:52:35,520 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.622e+02 3.284e+02 3.978e+02 8.428e+02, threshold=6.567e+02, percent-clipped=2.0 +2023-02-06 07:52:50,437 INFO [train.py:901] (2/4) Epoch 9, batch 7000, loss[loss=0.2135, simple_loss=0.2892, pruned_loss=0.06894, over 7679.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3198, pruned_loss=0.08767, over 1612777.63 frames. ], batch size: 18, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:57,329 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1248, 1.1558, 3.3064, 0.9204, 2.8373, 2.7978, 3.0160, 2.9075], + device='cuda:2'), covar=tensor([0.0815, 0.3715, 0.0741, 0.3375, 0.1521, 0.0990, 0.0735, 0.0882], + device='cuda:2'), in_proj_covar=tensor([0.0445, 0.0552, 0.0554, 0.0511, 0.0580, 0.0492, 0.0485, 0.0543], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 07:53:06,293 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9689, 1.0716, 1.0144, 0.4941, 1.0865, 0.8693, 0.1448, 0.9903], + device='cuda:2'), covar=tensor([0.0213, 0.0185, 0.0176, 0.0285, 0.0205, 0.0482, 0.0407, 0.0168], + device='cuda:2'), in_proj_covar=tensor([0.0375, 0.0301, 0.0250, 0.0360, 0.0291, 0.0450, 0.0344, 0.0326], + device='cuda:2'), out_proj_covar=tensor([1.1063e-04, 8.6493e-05, 7.2229e-05, 1.0388e-04, 8.5340e-05, 1.4201e-04, + 1.0171e-04, 9.5733e-05], device='cuda:2') +2023-02-06 07:53:24,883 INFO [train.py:901] (2/4) Epoch 9, batch 7050, loss[loss=0.2202, simple_loss=0.2945, pruned_loss=0.07294, over 7697.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3195, pruned_loss=0.08818, over 1610283.04 frames. ], batch size: 18, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:32,433 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71725.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:41,130 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8458, 5.9484, 5.0469, 2.0901, 5.2211, 5.4374, 5.3900, 5.1491], + device='cuda:2'), covar=tensor([0.0492, 0.0360, 0.0836, 0.4480, 0.0629, 0.0620, 0.1033, 0.0667], + device='cuda:2'), in_proj_covar=tensor([0.0444, 0.0354, 0.0363, 0.0463, 0.0363, 0.0349, 0.0361, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:53:45,153 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.886e+02 3.338e+02 4.007e+02 6.250e+02, threshold=6.676e+02, percent-clipped=0.0 +2023-02-06 07:53:50,715 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:59,294 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:00,447 INFO [train.py:901] (2/4) Epoch 9, batch 7100, loss[loss=0.2586, simple_loss=0.3319, pruned_loss=0.09264, over 8352.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3191, pruned_loss=0.08824, over 1603433.26 frames. ], batch size: 26, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:54:05,647 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 07:54:06,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.8836, 0.8044, 0.9112, 0.8306, 0.5932, 0.9229, 0.1100, 0.7153], + device='cuda:2'), covar=tensor([0.1725, 0.1466, 0.0533, 0.1148, 0.3266, 0.0520, 0.3375, 0.1882], + device='cuda:2'), in_proj_covar=tensor([0.0157, 0.0156, 0.0092, 0.0205, 0.0242, 0.0095, 0.0155, 0.0153], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 07:54:16,149 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:34,522 INFO [train.py:901] (2/4) Epoch 9, batch 7150, loss[loss=0.2253, simple_loss=0.295, pruned_loss=0.07779, over 8236.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3187, pruned_loss=0.08828, over 1601700.58 frames. ], batch size: 22, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:54:53,688 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.29 vs. limit=5.0 +2023-02-06 07:54:54,749 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.560e+02 3.246e+02 4.043e+02 1.359e+03, threshold=6.493e+02, percent-clipped=7.0 +2023-02-06 07:55:10,759 INFO [train.py:901] (2/4) Epoch 9, batch 7200, loss[loss=0.2536, simple_loss=0.3334, pruned_loss=0.0869, over 8638.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.319, pruned_loss=0.08904, over 1598393.55 frames. ], batch size: 39, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:34,215 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 07:55:43,959 INFO [train.py:901] (2/4) Epoch 9, batch 7250, loss[loss=0.2605, simple_loss=0.344, pruned_loss=0.0885, over 8495.00 frames. ], tot_loss[loss=0.2493, simple_loss=0.3198, pruned_loss=0.08933, over 1602470.70 frames. ], batch size: 26, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:58,953 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:02,878 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.597e+02 3.277e+02 3.984e+02 9.565e+02, threshold=6.554e+02, percent-clipped=6.0 +2023-02-06 07:56:19,573 INFO [train.py:901] (2/4) Epoch 9, batch 7300, loss[loss=0.2433, simple_loss=0.3021, pruned_loss=0.09218, over 8082.00 frames. ], tot_loss[loss=0.248, simple_loss=0.3188, pruned_loss=0.08858, over 1603093.08 frames. ], batch size: 21, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:56:28,839 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:41,715 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4448, 2.0101, 3.3362, 1.2519, 2.5048, 1.8789, 1.4865, 2.3502], + device='cuda:2'), covar=tensor([0.1652, 0.1932, 0.0716, 0.3562, 0.1382, 0.2653, 0.1725, 0.1927], + device='cuda:2'), in_proj_covar=tensor([0.0480, 0.0493, 0.0527, 0.0570, 0.0605, 0.0541, 0.0462, 0.0606], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:56:47,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6934, 1.3355, 1.6488, 1.2052, 0.8892, 1.4103, 1.4962, 1.4186], + device='cuda:2'), covar=tensor([0.0541, 0.1282, 0.1740, 0.1500, 0.0613, 0.1508, 0.0700, 0.0635], + device='cuda:2'), in_proj_covar=tensor([0.0103, 0.0156, 0.0195, 0.0162, 0.0108, 0.0166, 0.0119, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 07:56:54,487 INFO [train.py:901] (2/4) Epoch 9, batch 7350, loss[loss=0.2359, simple_loss=0.3194, pruned_loss=0.07618, over 8294.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3188, pruned_loss=0.08817, over 1604888.95 frames. ], batch size: 23, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:56:59,696 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-06 07:57:02,501 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 07:57:13,521 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.925e+02 3.749e+02 4.804e+02 1.068e+03, threshold=7.499e+02, percent-clipped=9.0 +2023-02-06 07:57:22,478 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 07:57:30,036 INFO [train.py:901] (2/4) Epoch 9, batch 7400, loss[loss=0.2744, simple_loss=0.3388, pruned_loss=0.105, over 7154.00 frames. ], tot_loss[loss=0.2468, simple_loss=0.3183, pruned_loss=0.0877, over 1608448.83 frames. ], batch size: 72, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:40,352 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8207, 2.8460, 3.3016, 2.2393, 1.5847, 3.3149, 0.7037, 1.8828], + device='cuda:2'), covar=tensor([0.1914, 0.1045, 0.0388, 0.2124, 0.4466, 0.0556, 0.3777, 0.1984], + device='cuda:2'), in_proj_covar=tensor([0.0156, 0.0155, 0.0091, 0.0204, 0.0240, 0.0093, 0.0152, 0.0154], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 07:57:50,403 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:03,869 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 07:58:05,138 INFO [train.py:901] (2/4) Epoch 9, batch 7450, loss[loss=0.2753, simple_loss=0.3395, pruned_loss=0.1055, over 8609.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3171, pruned_loss=0.08621, over 1612486.68 frames. ], batch size: 39, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:58:23,965 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.410e+02 3.229e+02 3.860e+02 9.903e+02, threshold=6.459e+02, percent-clipped=1.0 +2023-02-06 07:58:26,830 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:38,700 INFO [train.py:901] (2/4) Epoch 9, batch 7500, loss[loss=0.2359, simple_loss=0.3146, pruned_loss=0.07859, over 7940.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3166, pruned_loss=0.08564, over 1612439.20 frames. ], batch size: 20, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:15,028 INFO [train.py:901] (2/4) Epoch 9, batch 7550, loss[loss=0.2279, simple_loss=0.3019, pruned_loss=0.07696, over 7974.00 frames. ], tot_loss[loss=0.2444, simple_loss=0.3171, pruned_loss=0.08585, over 1613698.93 frames. ], batch size: 21, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:33,695 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.819e+02 3.433e+02 4.309e+02 8.597e+02, threshold=6.865e+02, percent-clipped=4.0 +2023-02-06 07:59:48,158 INFO [train.py:901] (2/4) Epoch 9, batch 7600, loss[loss=0.2418, simple_loss=0.3225, pruned_loss=0.08058, over 8498.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3184, pruned_loss=0.08652, over 1614543.71 frames. ], batch size: 26, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:58,716 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:22,185 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6536, 1.5515, 2.8295, 1.2129, 2.0532, 3.0260, 3.0696, 2.6061], + device='cuda:2'), covar=tensor([0.1033, 0.1365, 0.0394, 0.2063, 0.0893, 0.0314, 0.0544, 0.0666], + device='cuda:2'), in_proj_covar=tensor([0.0260, 0.0295, 0.0255, 0.0287, 0.0271, 0.0236, 0.0319, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:00:22,667 INFO [train.py:901] (2/4) Epoch 9, batch 7650, loss[loss=0.2639, simple_loss=0.339, pruned_loss=0.09444, over 8247.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3182, pruned_loss=0.08634, over 1614354.22 frames. ], batch size: 24, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:00:42,776 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.706e+02 3.178e+02 3.983e+02 6.818e+02, threshold=6.357e+02, percent-clipped=0.0 +2023-02-06 08:00:43,590 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:47,001 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:57,586 INFO [train.py:901] (2/4) Epoch 9, batch 7700, loss[loss=0.2541, simple_loss=0.3381, pruned_loss=0.08504, over 8467.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3173, pruned_loss=0.08601, over 1614275.80 frames. ], batch size: 25, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:02,559 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7601, 3.7933, 2.5908, 2.4123, 2.7265, 1.7685, 2.7162, 2.7211], + device='cuda:2'), covar=tensor([0.1553, 0.0262, 0.0789, 0.0829, 0.0570, 0.1387, 0.0972, 0.0981], + device='cuda:2'), in_proj_covar=tensor([0.0340, 0.0235, 0.0311, 0.0295, 0.0301, 0.0320, 0.0334, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 08:01:03,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:09,770 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 08:01:18,754 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:32,217 INFO [train.py:901] (2/4) Epoch 9, batch 7750, loss[loss=0.2131, simple_loss=0.2841, pruned_loss=0.0711, over 7218.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3166, pruned_loss=0.08589, over 1611130.91 frames. ], batch size: 16, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:53,035 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.670e+02 3.267e+02 4.054e+02 1.108e+03, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 08:02:08,197 INFO [train.py:901] (2/4) Epoch 9, batch 7800, loss[loss=0.2757, simple_loss=0.3369, pruned_loss=0.1073, over 8637.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3178, pruned_loss=0.08688, over 1613685.94 frames. ], batch size: 34, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:02:19,579 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7726, 4.6934, 4.1873, 1.8902, 4.2448, 4.1836, 4.2624, 3.8665], + device='cuda:2'), covar=tensor([0.0641, 0.0488, 0.1016, 0.5004, 0.0725, 0.0900, 0.1220, 0.0745], + device='cuda:2'), in_proj_covar=tensor([0.0441, 0.0350, 0.0365, 0.0462, 0.0362, 0.0345, 0.0357, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:02:25,493 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:02:41,268 INFO [train.py:901] (2/4) Epoch 9, batch 7850, loss[loss=0.2371, simple_loss=0.3089, pruned_loss=0.08263, over 7810.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.3184, pruned_loss=0.08713, over 1612770.53 frames. ], batch size: 20, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:02:59,522 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.530e+02 3.201e+02 3.890e+02 8.475e+02, threshold=6.403e+02, percent-clipped=6.0 +2023-02-06 08:03:14,038 INFO [train.py:901] (2/4) Epoch 9, batch 7900, loss[loss=0.2502, simple_loss=0.3319, pruned_loss=0.08427, over 8252.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3185, pruned_loss=0.08666, over 1614882.68 frames. ], batch size: 24, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:03:41,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:03:47,068 INFO [train.py:901] (2/4) Epoch 9, batch 7950, loss[loss=0.2301, simple_loss=0.3108, pruned_loss=0.07473, over 8650.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.317, pruned_loss=0.08566, over 1614722.47 frames. ], batch size: 39, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:04:05,356 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.498e+02 3.176e+02 4.184e+02 8.861e+02, threshold=6.353e+02, percent-clipped=6.0 +2023-02-06 08:04:11,432 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:19,757 INFO [train.py:901] (2/4) Epoch 9, batch 8000, loss[loss=0.218, simple_loss=0.2784, pruned_loss=0.07882, over 7529.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3177, pruned_loss=0.08643, over 1617980.18 frames. ], batch size: 18, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:04:27,785 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:34,988 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:52,976 INFO [train.py:901] (2/4) Epoch 9, batch 8050, loss[loss=0.2066, simple_loss=0.2884, pruned_loss=0.06245, over 7552.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3163, pruned_loss=0.0867, over 1598197.07 frames. ], batch size: 18, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:05:05,108 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1294, 1.7355, 3.5120, 1.4271, 2.3955, 3.8794, 3.8555, 3.3519], + device='cuda:2'), covar=tensor([0.1021, 0.1478, 0.0321, 0.2105, 0.1015, 0.0221, 0.0440, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0251, 0.0288, 0.0248, 0.0277, 0.0263, 0.0228, 0.0308, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:05:11,520 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.635e+02 3.102e+02 3.711e+02 7.462e+02, threshold=6.205e+02, percent-clipped=1.0 +2023-02-06 08:05:25,732 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 08:05:31,309 INFO [train.py:901] (2/4) Epoch 10, batch 0, loss[loss=0.2745, simple_loss=0.3505, pruned_loss=0.09926, over 8469.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3505, pruned_loss=0.09926, over 8469.00 frames. ], batch size: 25, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:05:31,309 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 08:05:43,260 INFO [train.py:935] (2/4) Epoch 10, validation: loss=0.1954, simple_loss=0.295, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 08:05:43,261 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 08:05:57,119 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 08:06:07,500 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 08:06:17,948 INFO [train.py:901] (2/4) Epoch 10, batch 50, loss[loss=0.2896, simple_loss=0.357, pruned_loss=0.1111, over 8529.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3199, pruned_loss=0.08768, over 365087.33 frames. ], batch size: 39, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:06:21,761 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:06:31,220 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 08:06:49,397 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.716e+02 3.124e+02 3.887e+02 7.160e+02, threshold=6.248e+02, percent-clipped=5.0 +2023-02-06 08:06:52,292 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 08:06:52,950 INFO [train.py:901] (2/4) Epoch 10, batch 100, loss[loss=0.2738, simple_loss=0.3435, pruned_loss=0.1021, over 8188.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3174, pruned_loss=0.08673, over 646286.33 frames. ], batch size: 23, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:07:03,806 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:22,310 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:30,272 INFO [train.py:901] (2/4) Epoch 10, batch 150, loss[loss=0.24, simple_loss=0.3278, pruned_loss=0.07611, over 8440.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3192, pruned_loss=0.08631, over 864263.40 frames. ], batch size: 29, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:07:33,243 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9184, 1.5441, 2.2072, 1.7927, 2.0480, 1.8136, 1.4660, 0.7081], + device='cuda:2'), covar=tensor([0.3360, 0.3209, 0.0987, 0.2068, 0.1489, 0.1848, 0.1580, 0.3247], + device='cuda:2'), in_proj_covar=tensor([0.0865, 0.0828, 0.0700, 0.0817, 0.0905, 0.0763, 0.0689, 0.0744], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:07:36,683 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5952, 1.4672, 2.8103, 1.2275, 2.0963, 3.0598, 3.0482, 2.6185], + device='cuda:2'), covar=tensor([0.1100, 0.1409, 0.0403, 0.2096, 0.0824, 0.0303, 0.0600, 0.0695], + device='cuda:2'), in_proj_covar=tensor([0.0252, 0.0289, 0.0251, 0.0279, 0.0264, 0.0230, 0.0312, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:07:40,076 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:08:01,152 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.670e+02 3.307e+02 4.288e+02 9.841e+02, threshold=6.614e+02, percent-clipped=3.0 +2023-02-06 08:08:04,548 INFO [train.py:901] (2/4) Epoch 10, batch 200, loss[loss=0.2416, simple_loss=0.3225, pruned_loss=0.08039, over 8352.00 frames. ], tot_loss[loss=0.2485, simple_loss=0.3216, pruned_loss=0.08766, over 1033410.63 frames. ], batch size: 26, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:29,435 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72982.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:08:41,006 INFO [train.py:901] (2/4) Epoch 10, batch 250, loss[loss=0.2085, simple_loss=0.294, pruned_loss=0.06154, over 8027.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3207, pruned_loss=0.08779, over 1160888.87 frames. ], batch size: 22, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:47,838 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 08:08:56,861 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 08:09:02,432 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1101, 1.4714, 1.5731, 1.3892, 1.1341, 1.3195, 1.7447, 1.8019], + device='cuda:2'), covar=tensor([0.0572, 0.1285, 0.1736, 0.1411, 0.0585, 0.1627, 0.0700, 0.0548], + device='cuda:2'), in_proj_covar=tensor([0.0106, 0.0155, 0.0196, 0.0160, 0.0107, 0.0167, 0.0120, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 08:09:07,477 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 08:09:12,566 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.688e+02 3.158e+02 3.760e+02 5.735e+02, threshold=6.316e+02, percent-clipped=0.0 +2023-02-06 08:09:16,047 INFO [train.py:901] (2/4) Epoch 10, batch 300, loss[loss=0.2716, simple_loss=0.3402, pruned_loss=0.1015, over 7815.00 frames. ], tot_loss[loss=0.2488, simple_loss=0.3211, pruned_loss=0.08829, over 1258643.54 frames. ], batch size: 20, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:09:23,774 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:40,900 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:51,613 INFO [train.py:901] (2/4) Epoch 10, batch 350, loss[loss=0.2461, simple_loss=0.3161, pruned_loss=0.08805, over 8366.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3208, pruned_loss=0.08857, over 1345194.03 frames. ], batch size: 24, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:09:55,107 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5732, 1.9699, 3.4121, 1.2701, 2.5744, 1.8812, 1.5181, 2.3875], + device='cuda:2'), covar=tensor([0.1619, 0.2143, 0.0719, 0.3879, 0.1499, 0.2779, 0.1799, 0.2230], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0495, 0.0534, 0.0567, 0.0607, 0.0540, 0.0466, 0.0605], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:10:23,500 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 2.632e+02 3.058e+02 3.924e+02 7.931e+02, threshold=6.116e+02, percent-clipped=5.0 +2023-02-06 08:10:26,918 INFO [train.py:901] (2/4) Epoch 10, batch 400, loss[loss=0.1886, simple_loss=0.2645, pruned_loss=0.05636, over 7716.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3183, pruned_loss=0.08679, over 1404108.04 frames. ], batch size: 18, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:10:49,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1741, 3.8896, 2.7977, 2.7411, 2.8089, 2.2474, 2.8115, 2.9877], + device='cuda:2'), covar=tensor([0.1599, 0.0239, 0.0848, 0.0719, 0.0629, 0.1081, 0.1016, 0.1095], + device='cuda:2'), in_proj_covar=tensor([0.0343, 0.0231, 0.0310, 0.0297, 0.0300, 0.0318, 0.0335, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 08:11:01,343 INFO [train.py:901] (2/4) Epoch 10, batch 450, loss[loss=0.2391, simple_loss=0.306, pruned_loss=0.08611, over 7518.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3194, pruned_loss=0.08677, over 1452813.49 frames. ], batch size: 18, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:33,877 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.629e+02 3.140e+02 3.877e+02 8.143e+02, threshold=6.279e+02, percent-clipped=4.0 +2023-02-06 08:11:35,981 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1040, 4.1477, 3.6956, 1.9507, 3.6533, 3.7004, 3.8233, 3.2886], + device='cuda:2'), covar=tensor([0.0949, 0.0617, 0.1092, 0.4801, 0.0916, 0.0960, 0.1287, 0.1142], + device='cuda:2'), in_proj_covar=tensor([0.0461, 0.0358, 0.0375, 0.0477, 0.0373, 0.0357, 0.0367, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:11:37,157 INFO [train.py:901] (2/4) Epoch 10, batch 500, loss[loss=0.2203, simple_loss=0.2949, pruned_loss=0.07286, over 8144.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3173, pruned_loss=0.08618, over 1485476.37 frames. ], batch size: 22, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:42,548 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:10,508 INFO [train.py:901] (2/4) Epoch 10, batch 550, loss[loss=0.2507, simple_loss=0.3138, pruned_loss=0.0938, over 7963.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.3172, pruned_loss=0.08598, over 1513114.20 frames. ], batch size: 21, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:18,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4408, 2.0165, 3.9281, 1.8773, 2.5284, 4.4670, 4.3211, 3.8421], + device='cuda:2'), covar=tensor([0.0872, 0.1246, 0.0353, 0.1672, 0.1037, 0.0178, 0.0431, 0.0520], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0290, 0.0252, 0.0282, 0.0265, 0.0231, 0.0314, 0.0290], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:12:19,359 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:19,472 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5108, 2.0572, 3.1326, 2.4484, 2.8143, 2.2358, 1.7461, 1.4552], + device='cuda:2'), covar=tensor([0.3265, 0.3779, 0.0971, 0.2353, 0.1808, 0.1930, 0.1644, 0.3836], + device='cuda:2'), in_proj_covar=tensor([0.0860, 0.0831, 0.0701, 0.0817, 0.0907, 0.0763, 0.0690, 0.0744], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:12:23,928 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7763, 1.3461, 1.5022, 1.2107, 1.0580, 1.3220, 1.5668, 1.4506], + device='cuda:2'), covar=tensor([0.0551, 0.1369, 0.1849, 0.1486, 0.0620, 0.1626, 0.0728, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0105, 0.0157, 0.0197, 0.0161, 0.0108, 0.0168, 0.0120, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 08:12:29,158 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73326.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:12:41,609 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.475e+02 3.100e+02 3.629e+02 1.040e+03, threshold=6.201e+02, percent-clipped=3.0 +2023-02-06 08:12:44,822 INFO [train.py:901] (2/4) Epoch 10, batch 600, loss[loss=0.3062, simple_loss=0.3504, pruned_loss=0.131, over 7394.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3178, pruned_loss=0.08614, over 1537815.28 frames. ], batch size: 71, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:56,196 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 08:13:01,723 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:13:20,006 INFO [train.py:901] (2/4) Epoch 10, batch 650, loss[loss=0.2852, simple_loss=0.3393, pruned_loss=0.1155, over 7804.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3185, pruned_loss=0.08637, over 1560436.77 frames. ], batch size: 20, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:13:50,088 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73441.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:13:51,167 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.848e+02 2.469e+02 3.040e+02 3.840e+02 6.530e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 08:13:54,587 INFO [train.py:901] (2/4) Epoch 10, batch 700, loss[loss=0.2307, simple_loss=0.2951, pruned_loss=0.08312, over 7566.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.317, pruned_loss=0.08505, over 1574285.69 frames. ], batch size: 18, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:31,477 INFO [train.py:901] (2/4) Epoch 10, batch 750, loss[loss=0.2728, simple_loss=0.341, pruned_loss=0.1023, over 8557.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3177, pruned_loss=0.08515, over 1586210.27 frames. ], batch size: 31, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:45,794 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 08:14:54,750 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 08:15:02,254 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.756e+02 3.307e+02 3.958e+02 8.111e+02, threshold=6.615e+02, percent-clipped=6.0 +2023-02-06 08:15:02,466 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3098, 1.4216, 1.2764, 1.9087, 0.8137, 1.1299, 1.2663, 1.5571], + device='cuda:2'), covar=tensor([0.1026, 0.0974, 0.1256, 0.0567, 0.1259, 0.1754, 0.0944, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0246, 0.0221, 0.0264, 0.0220, 0.0223, 0.0260, 0.0267, 0.0228], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:15:05,717 INFO [train.py:901] (2/4) Epoch 10, batch 800, loss[loss=0.2546, simple_loss=0.3255, pruned_loss=0.09184, over 8671.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3176, pruned_loss=0.08528, over 1596089.53 frames. ], batch size: 49, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:17,221 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7227, 1.3467, 3.9232, 1.4404, 3.4316, 3.2875, 3.5480, 3.4302], + device='cuda:2'), covar=tensor([0.0700, 0.4134, 0.0628, 0.3241, 0.1366, 0.1024, 0.0611, 0.0787], + device='cuda:2'), in_proj_covar=tensor([0.0444, 0.0556, 0.0557, 0.0510, 0.0585, 0.0496, 0.0488, 0.0552], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:15:41,988 INFO [train.py:901] (2/4) Epoch 10, batch 850, loss[loss=0.2457, simple_loss=0.3196, pruned_loss=0.0859, over 7654.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3178, pruned_loss=0.08581, over 1600983.07 frames. ], batch size: 19, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:49,885 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:03,006 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73627.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:13,764 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.847e+02 3.470e+02 4.482e+02 1.720e+03, threshold=6.940e+02, percent-clipped=10.0 +2023-02-06 08:16:17,263 INFO [train.py:901] (2/4) Epoch 10, batch 900, loss[loss=0.2251, simple_loss=0.3007, pruned_loss=0.07476, over 8348.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3172, pruned_loss=0.0851, over 1605144.54 frames. ], batch size: 24, lr: 7.83e-03, grad_scale: 16.0 +2023-02-06 08:16:20,226 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:22,223 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73655.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:52,131 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73697.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:16:52,440 INFO [train.py:901] (2/4) Epoch 10, batch 950, loss[loss=0.3706, simple_loss=0.4093, pruned_loss=0.166, over 8745.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3182, pruned_loss=0.0859, over 1608823.66 frames. ], batch size: 30, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:10,351 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73722.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:17:12,016 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 08:17:18,312 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 08:17:24,917 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.751e+02 3.323e+02 4.211e+02 1.163e+03, threshold=6.645e+02, percent-clipped=9.0 +2023-02-06 08:17:25,308 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 08:17:27,464 INFO [train.py:901] (2/4) Epoch 10, batch 1000, loss[loss=0.213, simple_loss=0.3023, pruned_loss=0.06179, over 8464.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3172, pruned_loss=0.08516, over 1609074.96 frames. ], batch size: 25, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:28,903 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:35,503 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3331, 1.6420, 1.5774, 0.8752, 1.6581, 1.2527, 0.3392, 1.5454], + device='cuda:2'), covar=tensor([0.0263, 0.0169, 0.0184, 0.0294, 0.0214, 0.0560, 0.0450, 0.0147], + device='cuda:2'), in_proj_covar=tensor([0.0370, 0.0304, 0.0256, 0.0360, 0.0287, 0.0452, 0.0340, 0.0332], + device='cuda:2'), out_proj_covar=tensor([1.0860e-04, 8.6769e-05, 7.3630e-05, 1.0359e-04, 8.3571e-05, 1.4230e-04, + 9.9960e-05, 9.7048e-05], device='cuda:2') +2023-02-06 08:17:42,250 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:50,797 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 08:18:00,667 INFO [train.py:901] (2/4) Epoch 10, batch 1050, loss[loss=0.2635, simple_loss=0.3264, pruned_loss=0.1003, over 8348.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3185, pruned_loss=0.08604, over 1617821.26 frames. ], batch size: 24, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:18:01,390 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 08:18:16,647 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2320, 1.7838, 2.7217, 2.1598, 2.3318, 2.0481, 1.6336, 1.0659], + device='cuda:2'), covar=tensor([0.3123, 0.3301, 0.0875, 0.2029, 0.1559, 0.1805, 0.1458, 0.3553], + device='cuda:2'), in_proj_covar=tensor([0.0865, 0.0829, 0.0710, 0.0821, 0.0910, 0.0771, 0.0691, 0.0748], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:18:34,182 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.634e+02 3.058e+02 3.903e+02 1.179e+03, threshold=6.116e+02, percent-clipped=2.0 +2023-02-06 08:18:36,808 INFO [train.py:901] (2/4) Epoch 10, batch 1100, loss[loss=0.2687, simple_loss=0.3377, pruned_loss=0.09983, over 8133.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.318, pruned_loss=0.08578, over 1614919.18 frames. ], batch size: 22, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:09,834 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 08:19:10,510 INFO [train.py:901] (2/4) Epoch 10, batch 1150, loss[loss=0.2377, simple_loss=0.3082, pruned_loss=0.08361, over 7785.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3178, pruned_loss=0.08559, over 1614944.48 frames. ], batch size: 19, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:23,867 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9176, 1.6095, 1.7243, 1.2656, 1.1237, 1.3686, 1.6914, 1.4566], + device='cuda:2'), covar=tensor([0.0484, 0.1128, 0.1564, 0.1362, 0.0532, 0.1403, 0.0631, 0.0569], + device='cuda:2'), in_proj_covar=tensor([0.0104, 0.0157, 0.0195, 0.0161, 0.0107, 0.0166, 0.0119, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 08:19:42,449 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.356e+02 2.791e+02 3.726e+02 1.227e+03, threshold=5.583e+02, percent-clipped=4.0 +2023-02-06 08:19:45,136 INFO [train.py:901] (2/4) Epoch 10, batch 1200, loss[loss=0.2484, simple_loss=0.3223, pruned_loss=0.08724, over 8665.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.3179, pruned_loss=0.08569, over 1614261.67 frames. ], batch size: 34, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:48,562 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:20,055 INFO [train.py:901] (2/4) Epoch 10, batch 1250, loss[loss=0.2139, simple_loss=0.2886, pruned_loss=0.06957, over 7698.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3176, pruned_loss=0.08545, over 1617113.91 frames. ], batch size: 18, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:20:35,800 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7972, 2.1541, 1.6351, 2.5962, 1.2159, 1.2635, 1.8150, 2.1982], + device='cuda:2'), covar=tensor([0.0984, 0.0839, 0.1315, 0.0521, 0.1272, 0.1890, 0.1122, 0.0797], + device='cuda:2'), in_proj_covar=tensor([0.0242, 0.0219, 0.0262, 0.0219, 0.0222, 0.0257, 0.0264, 0.0224], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:20:39,877 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:51,572 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.519e+02 3.075e+02 3.983e+02 7.817e+02, threshold=6.150e+02, percent-clipped=4.0 +2023-02-06 08:20:54,941 INFO [train.py:901] (2/4) Epoch 10, batch 1300, loss[loss=0.2577, simple_loss=0.3335, pruned_loss=0.09096, over 8499.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.317, pruned_loss=0.08501, over 1619780.22 frames. ], batch size: 28, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:20:57,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:58,729 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 08:21:07,843 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:19,112 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:21,529 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.00 vs. limit=5.0 +2023-02-06 08:21:26,838 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:29,347 INFO [train.py:901] (2/4) Epoch 10, batch 1350, loss[loss=0.2044, simple_loss=0.2747, pruned_loss=0.06704, over 7249.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3178, pruned_loss=0.08537, over 1620230.24 frames. ], batch size: 16, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:21:59,534 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.773e+02 3.448e+02 4.052e+02 8.675e+02, threshold=6.895e+02, percent-clipped=5.0 +2023-02-06 08:22:02,250 INFO [train.py:901] (2/4) Epoch 10, batch 1400, loss[loss=0.2504, simple_loss=0.3024, pruned_loss=0.09917, over 8027.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3186, pruned_loss=0.08629, over 1619573.63 frames. ], batch size: 22, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:16,769 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 08:22:38,033 INFO [train.py:901] (2/4) Epoch 10, batch 1450, loss[loss=0.2008, simple_loss=0.2796, pruned_loss=0.06098, over 7558.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3184, pruned_loss=0.08594, over 1617982.85 frames. ], batch size: 18, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:41,666 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 08:22:45,869 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:01,296 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5355, 5.6400, 4.9311, 2.5451, 4.9479, 5.3732, 5.1697, 4.8504], + device='cuda:2'), covar=tensor([0.0712, 0.0480, 0.0889, 0.4293, 0.0747, 0.0648, 0.1121, 0.0696], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0356, 0.0377, 0.0470, 0.0372, 0.0353, 0.0361, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:23:01,603 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 08:23:07,379 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2172, 1.4547, 3.4763, 1.2855, 2.4098, 3.9377, 3.8933, 3.2979], + device='cuda:2'), covar=tensor([0.0907, 0.1551, 0.0302, 0.1983, 0.0916, 0.0213, 0.0387, 0.0663], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0293, 0.0254, 0.0284, 0.0269, 0.0232, 0.0318, 0.0289], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:23:09,187 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.838e+02 2.525e+02 3.045e+02 3.954e+02 1.310e+03, threshold=6.089e+02, percent-clipped=4.0 +2023-02-06 08:23:11,852 INFO [train.py:901] (2/4) Epoch 10, batch 1500, loss[loss=0.2507, simple_loss=0.3271, pruned_loss=0.08713, over 7978.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3175, pruned_loss=0.08555, over 1615583.91 frames. ], batch size: 21, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:18,347 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:46,596 INFO [train.py:901] (2/4) Epoch 10, batch 1550, loss[loss=0.2521, simple_loss=0.333, pruned_loss=0.08565, over 8507.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3185, pruned_loss=0.08609, over 1616764.24 frames. ], batch size: 26, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:47,773 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 08:24:05,684 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:19,877 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.540e+02 3.095e+02 3.981e+02 6.537e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 08:24:22,707 INFO [train.py:901] (2/4) Epoch 10, batch 1600, loss[loss=0.2093, simple_loss=0.278, pruned_loss=0.07026, over 7818.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3185, pruned_loss=0.08596, over 1618091.03 frames. ], batch size: 20, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:24:22,910 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:56,948 INFO [train.py:901] (2/4) Epoch 10, batch 1650, loss[loss=0.299, simple_loss=0.3612, pruned_loss=0.1184, over 8468.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.3178, pruned_loss=0.08575, over 1616407.96 frames. ], batch size: 27, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:18,074 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:25:30,268 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.508e+02 3.008e+02 3.971e+02 8.483e+02, threshold=6.016e+02, percent-clipped=6.0 +2023-02-06 08:25:31,722 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3484, 1.5611, 1.3303, 1.8571, 0.8236, 1.1302, 1.4039, 1.5270], + device='cuda:2'), covar=tensor([0.0997, 0.0776, 0.1320, 0.0574, 0.1185, 0.1721, 0.0823, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0241, 0.0216, 0.0261, 0.0217, 0.0221, 0.0255, 0.0261, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:25:32,863 INFO [train.py:901] (2/4) Epoch 10, batch 1700, loss[loss=0.239, simple_loss=0.3188, pruned_loss=0.07957, over 8140.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3164, pruned_loss=0.08491, over 1612401.33 frames. ], batch size: 22, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:44,221 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:00,778 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:05,906 INFO [train.py:901] (2/4) Epoch 10, batch 1750, loss[loss=0.2395, simple_loss=0.3098, pruned_loss=0.08458, over 8241.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.317, pruned_loss=0.08539, over 1615801.27 frames. ], batch size: 22, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:26:36,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74541.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:38,773 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.804e+02 3.517e+02 4.698e+02 1.546e+03, threshold=7.034e+02, percent-clipped=7.0 +2023-02-06 08:26:41,532 INFO [train.py:901] (2/4) Epoch 10, batch 1800, loss[loss=0.2125, simple_loss=0.2915, pruned_loss=0.06674, over 7928.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3165, pruned_loss=0.08549, over 1608356.58 frames. ], batch size: 20, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:27:14,907 INFO [train.py:901] (2/4) Epoch 10, batch 1850, loss[loss=0.2352, simple_loss=0.2976, pruned_loss=0.08644, over 7701.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.316, pruned_loss=0.08536, over 1607192.57 frames. ], batch size: 18, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:27:17,789 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:25,265 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:46,970 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.251e+02 2.722e+02 3.219e+02 4.226e+02 1.097e+03, threshold=6.437e+02, percent-clipped=2.0 +2023-02-06 08:27:50,389 INFO [train.py:901] (2/4) Epoch 10, batch 1900, loss[loss=0.2621, simple_loss=0.3386, pruned_loss=0.09279, over 8488.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3167, pruned_loss=0.08564, over 1605834.17 frames. ], batch size: 28, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:13,876 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 08:28:25,274 INFO [train.py:901] (2/4) Epoch 10, batch 1950, loss[loss=0.2608, simple_loss=0.3175, pruned_loss=0.102, over 7701.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3149, pruned_loss=0.08467, over 1607189.51 frames. ], batch size: 18, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:25,938 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 08:28:38,216 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:28:43,969 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 08:28:56,744 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.465e+02 3.030e+02 3.717e+02 6.494e+02, threshold=6.060e+02, percent-clipped=3.0 +2023-02-06 08:28:59,513 INFO [train.py:901] (2/4) Epoch 10, batch 2000, loss[loss=0.3344, simple_loss=0.3772, pruned_loss=0.1459, over 6842.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3151, pruned_loss=0.08467, over 1611328.92 frames. ], batch size: 71, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:29:34,157 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:34,600 INFO [train.py:901] (2/4) Epoch 10, batch 2050, loss[loss=0.241, simple_loss=0.3, pruned_loss=0.09094, over 7693.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3147, pruned_loss=0.0843, over 1611943.55 frames. ], batch size: 18, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:29:36,988 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 08:29:50,349 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:58,215 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2359, 1.5652, 1.6474, 1.3887, 0.9980, 1.4870, 1.7824, 1.3911], + device='cuda:2'), covar=tensor([0.0488, 0.1228, 0.1592, 0.1417, 0.0603, 0.1503, 0.0655, 0.0651], + device='cuda:2'), in_proj_covar=tensor([0.0103, 0.0156, 0.0197, 0.0162, 0.0106, 0.0167, 0.0119, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 08:30:04,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.471e+02 3.084e+02 4.282e+02 1.276e+03, threshold=6.169e+02, percent-clipped=5.0 +2023-02-06 08:30:06,171 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1109, 1.6483, 1.3522, 1.6493, 1.4297, 1.1815, 1.1914, 1.3637], + device='cuda:2'), covar=tensor([0.0833, 0.0359, 0.1018, 0.0411, 0.0569, 0.1239, 0.0769, 0.0640], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0232, 0.0312, 0.0296, 0.0304, 0.0317, 0.0337, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 08:30:07,353 INFO [train.py:901] (2/4) Epoch 10, batch 2100, loss[loss=0.1817, simple_loss=0.2553, pruned_loss=0.05406, over 7818.00 frames. ], tot_loss[loss=0.243, simple_loss=0.3161, pruned_loss=0.08494, over 1612292.35 frames. ], batch size: 19, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:30:18,144 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6708, 1.6558, 2.0119, 1.6739, 1.1684, 2.1155, 0.1866, 1.2176], + device='cuda:2'), covar=tensor([0.3124, 0.2405, 0.0584, 0.1832, 0.4669, 0.0516, 0.3799, 0.1936], + device='cuda:2'), in_proj_covar=tensor([0.0162, 0.0163, 0.0092, 0.0213, 0.0254, 0.0097, 0.0161, 0.0158], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:30:21,955 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9258, 6.0983, 5.2247, 2.5187, 5.3657, 5.8179, 5.6840, 5.2667], + device='cuda:2'), covar=tensor([0.0495, 0.0419, 0.0962, 0.4303, 0.0774, 0.0450, 0.1126, 0.0576], + device='cuda:2'), in_proj_covar=tensor([0.0444, 0.0358, 0.0379, 0.0465, 0.0367, 0.0352, 0.0361, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:30:24,017 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5181, 1.9847, 1.9955, 1.1820, 2.2054, 1.4399, 0.6123, 1.6926], + device='cuda:2'), covar=tensor([0.0453, 0.0207, 0.0200, 0.0402, 0.0234, 0.0595, 0.0586, 0.0207], + device='cuda:2'), in_proj_covar=tensor([0.0374, 0.0306, 0.0265, 0.0364, 0.0296, 0.0452, 0.0348, 0.0332], + device='cuda:2'), out_proj_covar=tensor([1.0950e-04, 8.7103e-05, 7.5990e-05, 1.0479e-04, 8.5882e-05, 1.4168e-04, + 1.0204e-04, 9.6580e-05], device='cuda:2') +2023-02-06 08:30:43,223 INFO [train.py:901] (2/4) Epoch 10, batch 2150, loss[loss=0.2318, simple_loss=0.2997, pruned_loss=0.08199, over 7657.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3161, pruned_loss=0.0847, over 1614117.50 frames. ], batch size: 19, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:02,632 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:13,914 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.626e+02 3.226e+02 3.775e+02 6.882e+02, threshold=6.451e+02, percent-clipped=1.0 +2023-02-06 08:31:16,706 INFO [train.py:901] (2/4) Epoch 10, batch 2200, loss[loss=0.2456, simple_loss=0.3103, pruned_loss=0.09044, over 7984.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3165, pruned_loss=0.08452, over 1619982.10 frames. ], batch size: 21, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:22,705 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:33,574 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:39,396 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:50,364 INFO [train.py:901] (2/4) Epoch 10, batch 2250, loss[loss=0.2547, simple_loss=0.327, pruned_loss=0.0912, over 8460.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3158, pruned_loss=0.08436, over 1617986.03 frames. ], batch size: 27, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:31:50,575 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:21,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8022, 2.1969, 1.7406, 2.7170, 1.8936, 1.6406, 2.3270, 2.3643], + device='cuda:2'), covar=tensor([0.1457, 0.1021, 0.1897, 0.0571, 0.1222, 0.1797, 0.0777, 0.0730], + device='cuda:2'), in_proj_covar=tensor([0.0244, 0.0216, 0.0262, 0.0218, 0.0223, 0.0255, 0.0263, 0.0223], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:32:23,117 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.748e+02 3.468e+02 4.709e+02 1.048e+03, threshold=6.936e+02, percent-clipped=3.0 +2023-02-06 08:32:25,869 INFO [train.py:901] (2/4) Epoch 10, batch 2300, loss[loss=0.2409, simple_loss=0.3276, pruned_loss=0.07712, over 8520.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3157, pruned_loss=0.08493, over 1616302.02 frames. ], batch size: 28, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:32:42,300 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:59,557 INFO [train.py:901] (2/4) Epoch 10, batch 2350, loss[loss=0.2514, simple_loss=0.3243, pruned_loss=0.08924, over 8342.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3151, pruned_loss=0.08466, over 1611128.42 frames. ], batch size: 26, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:33:24,635 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1762, 1.1178, 1.2280, 1.1260, 0.8711, 1.2760, 0.0348, 0.9273], + device='cuda:2'), covar=tensor([0.2945, 0.2111, 0.0720, 0.1597, 0.4273, 0.0714, 0.3592, 0.2051], + device='cuda:2'), in_proj_covar=tensor([0.0162, 0.0162, 0.0091, 0.0212, 0.0253, 0.0097, 0.0161, 0.0157], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:33:33,018 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.526e+02 3.215e+02 4.182e+02 1.054e+03, threshold=6.430e+02, percent-clipped=5.0 +2023-02-06 08:33:35,793 INFO [train.py:901] (2/4) Epoch 10, batch 2400, loss[loss=0.2611, simple_loss=0.3305, pruned_loss=0.09584, over 8453.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3155, pruned_loss=0.08473, over 1612700.16 frames. ], batch size: 27, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:34:08,759 INFO [train.py:901] (2/4) Epoch 10, batch 2450, loss[loss=0.2584, simple_loss=0.3382, pruned_loss=0.08929, over 8486.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.316, pruned_loss=0.08509, over 1618335.48 frames. ], batch size: 28, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:34:40,830 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.613e+02 3.092e+02 4.227e+02 1.037e+03, threshold=6.184e+02, percent-clipped=5.0 +2023-02-06 08:34:43,537 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:34:44,708 INFO [train.py:901] (2/4) Epoch 10, batch 2500, loss[loss=0.2033, simple_loss=0.2835, pruned_loss=0.06155, over 8030.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3151, pruned_loss=0.08449, over 1615901.34 frames. ], batch size: 22, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:00,232 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:09,291 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 08:35:11,671 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:18,086 INFO [train.py:901] (2/4) Epoch 10, batch 2550, loss[loss=0.2394, simple_loss=0.3182, pruned_loss=0.08033, over 8197.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3147, pruned_loss=0.08449, over 1611610.82 frames. ], batch size: 23, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:18,200 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5928, 4.6039, 4.1256, 1.7815, 4.1582, 4.3013, 4.2070, 3.9509], + device='cuda:2'), covar=tensor([0.0711, 0.0497, 0.0989, 0.4991, 0.0737, 0.0666, 0.1069, 0.0872], + device='cuda:2'), in_proj_covar=tensor([0.0451, 0.0360, 0.0377, 0.0473, 0.0368, 0.0356, 0.0365, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:35:36,579 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:37,543 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 08:35:38,119 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:40,137 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5477, 2.0272, 3.5262, 1.2860, 2.4734, 2.1591, 1.6110, 2.4039], + device='cuda:2'), covar=tensor([0.1607, 0.2094, 0.0691, 0.3680, 0.1419, 0.2450, 0.1757, 0.1967], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0496, 0.0532, 0.0566, 0.0603, 0.0543, 0.0463, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:35:49,134 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.664e+02 3.245e+02 3.791e+02 6.757e+02, threshold=6.490e+02, percent-clipped=2.0 +2023-02-06 08:35:51,832 INFO [train.py:901] (2/4) Epoch 10, batch 2600, loss[loss=0.2278, simple_loss=0.3078, pruned_loss=0.0739, over 7966.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3138, pruned_loss=0.08431, over 1609288.01 frames. ], batch size: 21, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:55,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:19,400 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:27,351 INFO [train.py:901] (2/4) Epoch 10, batch 2650, loss[loss=0.186, simple_loss=0.259, pruned_loss=0.05653, over 7799.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3142, pruned_loss=0.08439, over 1611191.69 frames. ], batch size: 19, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:36:30,000 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 08:36:40,126 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 08:36:52,043 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9411, 1.5527, 5.9457, 2.3495, 5.4026, 5.0068, 5.5911, 5.4559], + device='cuda:2'), covar=tensor([0.0314, 0.4513, 0.0318, 0.2850, 0.0909, 0.0694, 0.0363, 0.0415], + device='cuda:2'), in_proj_covar=tensor([0.0450, 0.0559, 0.0564, 0.0514, 0.0591, 0.0504, 0.0492, 0.0554], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:36:56,747 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:58,551 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.723e+02 3.413e+02 4.384e+02 8.455e+02, threshold=6.827e+02, percent-clipped=3.0 +2023-02-06 08:37:01,351 INFO [train.py:901] (2/4) Epoch 10, batch 2700, loss[loss=0.1954, simple_loss=0.2698, pruned_loss=0.06054, over 7788.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3126, pruned_loss=0.0831, over 1608204.52 frames. ], batch size: 19, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:01,581 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6157, 1.7046, 2.1761, 1.5972, 1.0206, 2.1416, 0.3076, 1.1828], + device='cuda:2'), covar=tensor([0.2800, 0.2018, 0.0510, 0.2115, 0.5282, 0.0576, 0.3949, 0.2367], + device='cuda:2'), in_proj_covar=tensor([0.0162, 0.0163, 0.0093, 0.0215, 0.0255, 0.0098, 0.0163, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:37:02,240 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9105, 2.2169, 1.6734, 2.7545, 1.4443, 1.3838, 1.9748, 2.3699], + device='cuda:2'), covar=tensor([0.1032, 0.0881, 0.1435, 0.0499, 0.1287, 0.1853, 0.0951, 0.0804], + device='cuda:2'), in_proj_covar=tensor([0.0241, 0.0216, 0.0260, 0.0218, 0.0221, 0.0254, 0.0261, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:37:27,127 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7684, 1.8702, 2.2141, 1.7316, 1.1868, 2.2334, 0.2817, 1.3475], + device='cuda:2'), covar=tensor([0.2492, 0.1484, 0.0495, 0.1955, 0.4494, 0.0525, 0.3768, 0.1959], + device='cuda:2'), in_proj_covar=tensor([0.0162, 0.0164, 0.0093, 0.0215, 0.0256, 0.0098, 0.0164, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:37:37,698 INFO [train.py:901] (2/4) Epoch 10, batch 2750, loss[loss=0.2284, simple_loss=0.3013, pruned_loss=0.07779, over 7967.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3134, pruned_loss=0.08324, over 1612978.55 frames. ], batch size: 21, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:40,744 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 08:37:43,367 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 08:38:08,183 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.609e+02 3.111e+02 3.957e+02 1.084e+03, threshold=6.223e+02, percent-clipped=3.0 +2023-02-06 08:38:10,735 INFO [train.py:901] (2/4) Epoch 10, batch 2800, loss[loss=0.3058, simple_loss=0.3746, pruned_loss=0.1185, over 8189.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3141, pruned_loss=0.08377, over 1612089.07 frames. ], batch size: 23, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:38:14,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3787, 1.8050, 3.2947, 1.1645, 2.3492, 1.8739, 1.4243, 2.1353], + device='cuda:2'), covar=tensor([0.1754, 0.2225, 0.0647, 0.3867, 0.1478, 0.2772, 0.1812, 0.2218], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0498, 0.0530, 0.0563, 0.0602, 0.0544, 0.0460, 0.0604], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:38:32,112 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 08:38:39,099 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:38:45,072 INFO [train.py:901] (2/4) Epoch 10, batch 2850, loss[loss=0.1948, simple_loss=0.2669, pruned_loss=0.06132, over 7450.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.314, pruned_loss=0.08323, over 1612498.32 frames. ], batch size: 17, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:04,152 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.50 vs. limit=5.0 +2023-02-06 08:39:09,363 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:16,272 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:17,406 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.670e+02 3.172e+02 3.749e+02 6.038e+02, threshold=6.343e+02, percent-clipped=0.0 +2023-02-06 08:39:20,058 INFO [train.py:901] (2/4) Epoch 10, batch 2900, loss[loss=0.1874, simple_loss=0.2623, pruned_loss=0.05627, over 7792.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3129, pruned_loss=0.08249, over 1607922.25 frames. ], batch size: 19, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:32,776 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:49,779 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 08:39:53,280 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:53,736 INFO [train.py:901] (2/4) Epoch 10, batch 2950, loss[loss=0.2286, simple_loss=0.3143, pruned_loss=0.07147, over 8500.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.313, pruned_loss=0.08198, over 1604657.38 frames. ], batch size: 26, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:39:58,781 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:59,831 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 08:40:11,379 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:26,686 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.480e+02 3.030e+02 3.596e+02 1.304e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-06 08:40:28,958 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:29,499 INFO [train.py:901] (2/4) Epoch 10, batch 3000, loss[loss=0.2487, simple_loss=0.3107, pruned_loss=0.09341, over 7653.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.313, pruned_loss=0.08197, over 1606886.12 frames. ], batch size: 19, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:40:29,499 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 08:40:41,881 INFO [train.py:935] (2/4) Epoch 10, validation: loss=0.1918, simple_loss=0.2916, pruned_loss=0.04599, over 944034.00 frames. +2023-02-06 08:40:41,882 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 08:41:02,448 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 08:41:08,740 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 08:41:15,547 INFO [train.py:901] (2/4) Epoch 10, batch 3050, loss[loss=0.2253, simple_loss=0.2944, pruned_loss=0.07809, over 7550.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3128, pruned_loss=0.08218, over 1607206.19 frames. ], batch size: 18, lr: 7.72e-03, grad_scale: 16.0 +2023-02-06 08:41:35,987 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5655, 4.6156, 4.1211, 1.9141, 4.1287, 4.0573, 4.2300, 3.7919], + device='cuda:2'), covar=tensor([0.0825, 0.0615, 0.1138, 0.4930, 0.0843, 0.1034, 0.1201, 0.0920], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0361, 0.0376, 0.0468, 0.0368, 0.0354, 0.0368, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:41:44,322 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 08:41:47,924 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.518e+02 3.138e+02 4.468e+02 1.006e+03, threshold=6.276e+02, percent-clipped=13.0 +2023-02-06 08:41:50,031 INFO [train.py:901] (2/4) Epoch 10, batch 3100, loss[loss=0.1959, simple_loss=0.271, pruned_loss=0.06038, over 7287.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3132, pruned_loss=0.08189, over 1612377.22 frames. ], batch size: 16, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:25,562 INFO [train.py:901] (2/4) Epoch 10, batch 3150, loss[loss=0.2268, simple_loss=0.3003, pruned_loss=0.07662, over 8562.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.314, pruned_loss=0.08248, over 1617645.86 frames. ], batch size: 31, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:57,478 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.584e+02 3.323e+02 3.941e+02 8.938e+02, threshold=6.646e+02, percent-clipped=3.0 +2023-02-06 08:42:59,536 INFO [train.py:901] (2/4) Epoch 10, batch 3200, loss[loss=0.2198, simple_loss=0.2971, pruned_loss=0.07122, over 7805.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.315, pruned_loss=0.08343, over 1615059.93 frames. ], batch size: 20, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:43:09,324 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75961.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:26,966 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0804, 1.2633, 1.4373, 1.1967, 1.0242, 1.3080, 1.7019, 1.4493], + device='cuda:2'), covar=tensor([0.0564, 0.1459, 0.2034, 0.1683, 0.0665, 0.1737, 0.0768, 0.0712], + device='cuda:2'), in_proj_covar=tensor([0.0103, 0.0158, 0.0198, 0.0163, 0.0107, 0.0167, 0.0118, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 08:43:28,326 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:36,368 INFO [train.py:901] (2/4) Epoch 10, batch 3250, loss[loss=0.2393, simple_loss=0.3168, pruned_loss=0.08087, over 8726.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3143, pruned_loss=0.08342, over 1608165.09 frames. ], batch size: 30, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:43:41,262 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:57,863 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:44:05,883 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3711, 1.9538, 3.0909, 2.3771, 2.6090, 2.2486, 1.7821, 1.3747], + device='cuda:2'), covar=tensor([0.3375, 0.3726, 0.0968, 0.2250, 0.1760, 0.1835, 0.1421, 0.3888], + device='cuda:2'), in_proj_covar=tensor([0.0868, 0.0842, 0.0709, 0.0827, 0.0916, 0.0780, 0.0698, 0.0758], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:44:09,020 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.688e+02 3.300e+02 3.989e+02 9.835e+02, threshold=6.601e+02, percent-clipped=4.0 +2023-02-06 08:44:11,016 INFO [train.py:901] (2/4) Epoch 10, batch 3300, loss[loss=0.2354, simple_loss=0.3144, pruned_loss=0.07819, over 8139.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3139, pruned_loss=0.08276, over 1610899.01 frames. ], batch size: 22, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:47,567 INFO [train.py:901] (2/4) Epoch 10, batch 3350, loss[loss=0.2314, simple_loss=0.305, pruned_loss=0.07885, over 8325.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3141, pruned_loss=0.08307, over 1609739.53 frames. ], batch size: 25, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:01,648 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7763, 3.7741, 2.2474, 2.7809, 2.9090, 1.6834, 2.6955, 2.9741], + device='cuda:2'), covar=tensor([0.1543, 0.0347, 0.1004, 0.0649, 0.0671, 0.1509, 0.1026, 0.0937], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0237, 0.0314, 0.0299, 0.0310, 0.0324, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 08:45:18,633 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.652e+02 3.239e+02 4.192e+02 7.352e+02, threshold=6.477e+02, percent-clipped=1.0 +2023-02-06 08:45:20,669 INFO [train.py:901] (2/4) Epoch 10, batch 3400, loss[loss=0.2454, simple_loss=0.3312, pruned_loss=0.07978, over 8467.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3142, pruned_loss=0.08318, over 1607232.22 frames. ], batch size: 25, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:37,971 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0947, 1.4768, 1.5264, 1.4255, 1.1072, 1.3889, 1.7646, 1.7620], + device='cuda:2'), covar=tensor([0.0470, 0.1129, 0.1682, 0.1297, 0.0549, 0.1417, 0.0630, 0.0508], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0157, 0.0197, 0.0162, 0.0107, 0.0166, 0.0117, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 08:45:55,812 INFO [train.py:901] (2/4) Epoch 10, batch 3450, loss[loss=0.2537, simple_loss=0.3371, pruned_loss=0.08513, over 8770.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3141, pruned_loss=0.08292, over 1611791.31 frames. ], batch size: 30, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:30,137 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.394e+02 3.045e+02 3.881e+02 9.338e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 08:46:32,218 INFO [train.py:901] (2/4) Epoch 10, batch 3500, loss[loss=0.1727, simple_loss=0.25, pruned_loss=0.04771, over 7222.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3131, pruned_loss=0.08246, over 1609597.83 frames. ], batch size: 16, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:48,069 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 08:46:59,307 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76287.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:47:06,727 INFO [train.py:901] (2/4) Epoch 10, batch 3550, loss[loss=0.198, simple_loss=0.2778, pruned_loss=0.05911, over 7662.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3136, pruned_loss=0.08291, over 1607694.96 frames. ], batch size: 19, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:14,511 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9173, 1.5352, 5.9418, 2.0604, 5.3173, 4.9546, 5.5132, 5.3656], + device='cuda:2'), covar=tensor([0.0388, 0.4438, 0.0334, 0.3223, 0.0906, 0.0813, 0.0404, 0.0460], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0566, 0.0574, 0.0525, 0.0603, 0.0512, 0.0498, 0.0563], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:47:17,269 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:47:39,378 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76341.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:47:42,025 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 2.725e+02 3.470e+02 4.316e+02 7.747e+02, threshold=6.941e+02, percent-clipped=6.0 +2023-02-06 08:47:44,159 INFO [train.py:901] (2/4) Epoch 10, batch 3600, loss[loss=0.2633, simple_loss=0.3347, pruned_loss=0.09595, over 8673.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.315, pruned_loss=0.08387, over 1613567.68 frames. ], batch size: 49, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:48:01,496 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.6943, 1.3327, 3.9397, 1.5173, 3.3481, 3.2278, 3.4722, 3.3568], + device='cuda:2'), covar=tensor([0.0635, 0.4390, 0.0571, 0.3245, 0.1300, 0.0935, 0.0653, 0.0792], + device='cuda:2'), in_proj_covar=tensor([0.0451, 0.0566, 0.0569, 0.0521, 0.0599, 0.0509, 0.0497, 0.0562], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:48:18,380 INFO [train.py:901] (2/4) Epoch 10, batch 3650, loss[loss=0.1609, simple_loss=0.2405, pruned_loss=0.04067, over 7703.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3132, pruned_loss=0.08305, over 1610503.06 frames. ], batch size: 18, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:50,989 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.530e+02 3.057e+02 3.624e+02 8.995e+02, threshold=6.114e+02, percent-clipped=3.0 +2023-02-06 08:48:52,994 INFO [train.py:901] (2/4) Epoch 10, batch 3700, loss[loss=0.1926, simple_loss=0.269, pruned_loss=0.05808, over 7788.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3145, pruned_loss=0.08413, over 1609843.74 frames. ], batch size: 19, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:54,939 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 08:49:28,897 INFO [train.py:901] (2/4) Epoch 10, batch 3750, loss[loss=0.2408, simple_loss=0.3217, pruned_loss=0.07992, over 8496.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3151, pruned_loss=0.08483, over 1610761.86 frames. ], batch size: 49, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:00,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 2.765e+02 3.416e+02 4.278e+02 1.031e+03, threshold=6.832e+02, percent-clipped=4.0 +2023-02-06 08:50:02,995 INFO [train.py:901] (2/4) Epoch 10, batch 3800, loss[loss=0.2811, simple_loss=0.3455, pruned_loss=0.1083, over 8471.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3169, pruned_loss=0.08569, over 1611622.26 frames. ], batch size: 49, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:38,441 INFO [train.py:901] (2/4) Epoch 10, batch 3850, loss[loss=0.2323, simple_loss=0.3115, pruned_loss=0.07652, over 8298.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3157, pruned_loss=0.08505, over 1609619.52 frames. ], batch size: 23, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:50:59,043 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 08:51:00,491 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76631.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:51:09,921 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.563e+02 3.093e+02 4.191e+02 1.151e+03, threshold=6.187e+02, percent-clipped=5.0 +2023-02-06 08:51:11,826 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 08:51:11,981 INFO [train.py:901] (2/4) Epoch 10, batch 3900, loss[loss=0.1878, simple_loss=0.2697, pruned_loss=0.0529, over 7444.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3151, pruned_loss=0.08444, over 1608523.80 frames. ], batch size: 17, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:17,438 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:51:38,044 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76685.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:51:48,068 INFO [train.py:901] (2/4) Epoch 10, batch 3950, loss[loss=0.2622, simple_loss=0.3338, pruned_loss=0.09531, over 8186.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3134, pruned_loss=0.08326, over 1609785.29 frames. ], batch size: 23, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:55,075 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5262, 1.6915, 1.9371, 1.4633, 1.0955, 2.0013, 0.1457, 1.1506], + device='cuda:2'), covar=tensor([0.2715, 0.2056, 0.0611, 0.2241, 0.4770, 0.0501, 0.3592, 0.2053], + device='cuda:2'), in_proj_covar=tensor([0.0159, 0.0161, 0.0090, 0.0208, 0.0249, 0.0098, 0.0157, 0.0156], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:52:19,558 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.430e+02 3.097e+02 3.693e+02 7.444e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-06 08:52:20,438 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76746.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:52:20,462 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6556, 1.7539, 2.2376, 1.6542, 1.0849, 2.1839, 0.4328, 1.2782], + device='cuda:2'), covar=tensor([0.2702, 0.1769, 0.0492, 0.2426, 0.4903, 0.0553, 0.3718, 0.2429], + device='cuda:2'), in_proj_covar=tensor([0.0161, 0.0163, 0.0092, 0.0211, 0.0253, 0.0099, 0.0160, 0.0158], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:52:21,615 INFO [train.py:901] (2/4) Epoch 10, batch 4000, loss[loss=0.2059, simple_loss=0.2837, pruned_loss=0.06409, over 7818.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.3126, pruned_loss=0.08261, over 1610359.01 frames. ], batch size: 20, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:37,416 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:52:56,079 INFO [train.py:901] (2/4) Epoch 10, batch 4050, loss[loss=0.2294, simple_loss=0.3048, pruned_loss=0.07695, over 8735.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3137, pruned_loss=0.08309, over 1612775.66 frames. ], batch size: 34, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:52:57,682 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76800.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:53:05,596 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8333, 1.9422, 1.6752, 2.3121, 1.1596, 1.3563, 1.6540, 2.0172], + device='cuda:2'), covar=tensor([0.0777, 0.0917, 0.1119, 0.0531, 0.1226, 0.1575, 0.1000, 0.0939], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0214, 0.0256, 0.0217, 0.0220, 0.0250, 0.0260, 0.0221], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 08:53:29,319 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.637e+02 3.294e+02 4.061e+02 9.505e+02, threshold=6.587e+02, percent-clipped=7.0 +2023-02-06 08:53:31,234 INFO [train.py:901] (2/4) Epoch 10, batch 4100, loss[loss=0.2606, simple_loss=0.3352, pruned_loss=0.09301, over 8191.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.315, pruned_loss=0.08369, over 1614551.61 frames. ], batch size: 23, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:53:37,254 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:54:04,757 INFO [train.py:901] (2/4) Epoch 10, batch 4150, loss[loss=0.2221, simple_loss=0.3048, pruned_loss=0.06963, over 8075.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3156, pruned_loss=0.08381, over 1616839.73 frames. ], batch size: 21, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:38,758 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.505e+02 2.967e+02 3.617e+02 8.554e+02, threshold=5.933e+02, percent-clipped=2.0 +2023-02-06 08:54:39,010 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2888, 1.5463, 1.6492, 1.0137, 1.7069, 1.2411, 0.3304, 1.4617], + device='cuda:2'), covar=tensor([0.0328, 0.0222, 0.0167, 0.0286, 0.0208, 0.0585, 0.0535, 0.0146], + device='cuda:2'), in_proj_covar=tensor([0.0369, 0.0302, 0.0262, 0.0367, 0.0296, 0.0454, 0.0346, 0.0331], + device='cuda:2'), out_proj_covar=tensor([1.0711e-04, 8.5533e-05, 7.4512e-05, 1.0493e-04, 8.5728e-05, 1.4126e-04, + 1.0067e-04, 9.5930e-05], device='cuda:2') +2023-02-06 08:54:40,847 INFO [train.py:901] (2/4) Epoch 10, batch 4200, loss[loss=0.238, simple_loss=0.3211, pruned_loss=0.07743, over 8103.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3147, pruned_loss=0.08321, over 1619831.89 frames. ], batch size: 23, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:55:00,917 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 08:55:14,227 INFO [train.py:901] (2/4) Epoch 10, batch 4250, loss[loss=0.2441, simple_loss=0.3261, pruned_loss=0.0811, over 8344.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3147, pruned_loss=0.08344, over 1616000.18 frames. ], batch size: 26, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:17,220 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77002.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:23,798 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 08:55:31,990 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3689, 1.2834, 2.3695, 1.0897, 2.0034, 2.5420, 2.6089, 2.1331], + device='cuda:2'), covar=tensor([0.0918, 0.1096, 0.0452, 0.1907, 0.0651, 0.0372, 0.0571, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0292, 0.0250, 0.0280, 0.0267, 0.0232, 0.0322, 0.0284], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 08:55:34,051 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77027.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:34,062 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:46,495 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.530e+02 3.131e+02 3.743e+02 6.568e+02, threshold=6.262e+02, percent-clipped=1.0 +2023-02-06 08:55:48,445 INFO [train.py:901] (2/4) Epoch 10, batch 4300, loss[loss=0.2839, simple_loss=0.3477, pruned_loss=0.1101, over 8576.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.315, pruned_loss=0.08382, over 1618556.05 frames. ], batch size: 39, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:52,731 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:53,059 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.03 vs. limit=5.0 +2023-02-06 08:55:55,459 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77056.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:56:08,875 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6525, 2.2366, 3.5330, 2.6132, 3.0758, 2.3655, 1.7853, 1.6794], + device='cuda:2'), covar=tensor([0.3533, 0.3980, 0.1165, 0.2535, 0.1945, 0.2053, 0.1698, 0.4477], + device='cuda:2'), in_proj_covar=tensor([0.0867, 0.0844, 0.0710, 0.0826, 0.0912, 0.0773, 0.0695, 0.0750], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:56:12,966 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77081.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:56:18,180 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7755, 3.7606, 3.3605, 1.7942, 3.3039, 3.3497, 3.4173, 3.0866], + device='cuda:2'), covar=tensor([0.0944, 0.0706, 0.1105, 0.4728, 0.0947, 0.0918, 0.1353, 0.0955], + device='cuda:2'), in_proj_covar=tensor([0.0453, 0.0360, 0.0372, 0.0472, 0.0366, 0.0357, 0.0365, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 08:56:23,970 INFO [train.py:901] (2/4) Epoch 10, batch 4350, loss[loss=0.228, simple_loss=0.2964, pruned_loss=0.07976, over 7783.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3156, pruned_loss=0.08422, over 1618010.98 frames. ], batch size: 19, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:56:33,984 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:56:53,837 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 08:56:54,997 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.751e+02 3.331e+02 4.491e+02 1.022e+03, threshold=6.663e+02, percent-clipped=8.0 +2023-02-06 08:56:57,037 INFO [train.py:901] (2/4) Epoch 10, batch 4400, loss[loss=0.2136, simple_loss=0.282, pruned_loss=0.07255, over 7445.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.315, pruned_loss=0.08408, over 1611045.03 frames. ], batch size: 17, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:57:02,162 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 08:57:10,783 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 08:57:33,158 INFO [train.py:901] (2/4) Epoch 10, batch 4450, loss[loss=0.2248, simple_loss=0.3003, pruned_loss=0.07461, over 8104.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3156, pruned_loss=0.08435, over 1608891.26 frames. ], batch size: 23, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:57:35,380 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:57:36,002 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 08:57:45,673 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4378, 1.9682, 3.0756, 2.3907, 2.5994, 2.1797, 1.7974, 1.4252], + device='cuda:2'), covar=tensor([0.3407, 0.3653, 0.1024, 0.2369, 0.1922, 0.1971, 0.1601, 0.3845], + device='cuda:2'), in_proj_covar=tensor([0.0856, 0.0833, 0.0702, 0.0814, 0.0903, 0.0765, 0.0687, 0.0741], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 08:57:54,214 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:57:55,873 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.30 vs. limit=5.0 +2023-02-06 08:58:04,751 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.784e+02 3.289e+02 4.035e+02 8.452e+02, threshold=6.579e+02, percent-clipped=2.0 +2023-02-06 08:58:06,784 INFO [train.py:901] (2/4) Epoch 10, batch 4500, loss[loss=0.2569, simple_loss=0.3226, pruned_loss=0.09558, over 8276.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.315, pruned_loss=0.08392, over 1611547.43 frames. ], batch size: 23, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:27,560 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 08:58:38,001 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:41,446 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:43,378 INFO [train.py:901] (2/4) Epoch 10, batch 4550, loss[loss=0.196, simple_loss=0.2694, pruned_loss=0.06136, over 8051.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3134, pruned_loss=0.08317, over 1606530.79 frames. ], batch size: 20, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:55,662 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:59:14,841 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.639e+02 3.213e+02 4.072e+02 8.769e+02, threshold=6.426e+02, percent-clipped=3.0 +2023-02-06 08:59:16,946 INFO [train.py:901] (2/4) Epoch 10, batch 4600, loss[loss=0.2306, simple_loss=0.3079, pruned_loss=0.07667, over 8251.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3136, pruned_loss=0.08248, over 1607626.06 frames. ], batch size: 24, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:59:50,935 INFO [train.py:901] (2/4) Epoch 10, batch 4650, loss[loss=0.2052, simple_loss=0.2688, pruned_loss=0.07076, over 7791.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3141, pruned_loss=0.08361, over 1608738.46 frames. ], batch size: 19, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:25,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 3.048e+02 3.591e+02 4.434e+02 8.168e+02, threshold=7.182e+02, percent-clipped=8.0 +2023-02-06 09:00:27,546 INFO [train.py:901] (2/4) Epoch 10, batch 4700, loss[loss=0.2514, simple_loss=0.3299, pruned_loss=0.08639, over 8470.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3136, pruned_loss=0.08323, over 1608833.65 frames. ], batch size: 27, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:33,878 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:00:40,479 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9659, 1.5051, 1.6600, 1.2871, 1.0917, 1.4002, 1.6933, 1.4077], + device='cuda:2'), covar=tensor([0.0491, 0.1185, 0.1633, 0.1410, 0.0589, 0.1517, 0.0630, 0.0646], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0194, 0.0160, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 09:01:02,740 INFO [train.py:901] (2/4) Epoch 10, batch 4750, loss[loss=0.2223, simple_loss=0.3067, pruned_loss=0.0689, over 8473.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3144, pruned_loss=0.08331, over 1612308.32 frames. ], batch size: 25, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:04,954 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.8859, 1.0518, 0.9781, 0.5851, 1.0511, 0.8153, 0.0798, 1.0172], + device='cuda:2'), covar=tensor([0.0291, 0.0225, 0.0208, 0.0337, 0.0238, 0.0642, 0.0525, 0.0187], + device='cuda:2'), in_proj_covar=tensor([0.0376, 0.0309, 0.0263, 0.0374, 0.0302, 0.0460, 0.0351, 0.0338], + device='cuda:2'), out_proj_covar=tensor([1.0908e-04, 8.7775e-05, 7.5022e-05, 1.0704e-04, 8.7344e-05, 1.4314e-04, + 1.0219e-04, 9.7788e-05], device='cuda:2') +2023-02-06 09:01:28,504 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 09:01:30,522 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 09:01:31,372 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3364, 1.2034, 1.4584, 1.1143, 0.8423, 1.2534, 1.2082, 0.9849], + device='cuda:2'), covar=tensor([0.0546, 0.1320, 0.1678, 0.1462, 0.0582, 0.1570, 0.0693, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0194, 0.0160, 0.0105, 0.0165, 0.0118, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 09:01:35,880 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.648e+02 3.312e+02 4.103e+02 1.054e+03, threshold=6.623e+02, percent-clipped=5.0 +2023-02-06 09:01:37,936 INFO [train.py:901] (2/4) Epoch 10, batch 4800, loss[loss=0.1921, simple_loss=0.2678, pruned_loss=0.05821, over 7809.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3143, pruned_loss=0.08339, over 1612424.44 frames. ], batch size: 20, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:54,063 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,106 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,626 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:04,821 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6265, 1.7679, 2.0076, 1.5896, 1.1557, 2.0501, 0.1732, 1.2167], + device='cuda:2'), covar=tensor([0.2556, 0.1627, 0.0461, 0.1717, 0.4451, 0.0490, 0.3621, 0.1953], + device='cuda:2'), in_proj_covar=tensor([0.0160, 0.0163, 0.0092, 0.0212, 0.0251, 0.0098, 0.0162, 0.0160], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:02:10,897 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:11,384 INFO [train.py:901] (2/4) Epoch 10, batch 4850, loss[loss=0.2783, simple_loss=0.3432, pruned_loss=0.1067, over 6834.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3145, pruned_loss=0.08399, over 1609685.66 frames. ], batch size: 72, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:16,316 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 09:02:38,005 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:38,639 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:42,021 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:46,052 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.616e+02 3.128e+02 3.870e+02 7.279e+02, threshold=6.256e+02, percent-clipped=1.0 +2023-02-06 09:02:48,054 INFO [train.py:901] (2/4) Epoch 10, batch 4900, loss[loss=0.27, simple_loss=0.3326, pruned_loss=0.1037, over 8341.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3145, pruned_loss=0.08388, over 1610623.35 frames. ], batch size: 26, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:15,424 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:20,013 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:21,734 INFO [train.py:901] (2/4) Epoch 10, batch 4950, loss[loss=0.2448, simple_loss=0.326, pruned_loss=0.08179, over 8208.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3148, pruned_loss=0.08382, over 1612194.90 frames. ], batch size: 23, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:33,221 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5152, 2.8116, 1.9680, 2.2562, 2.3093, 1.4861, 1.9795, 2.1184], + device='cuda:2'), covar=tensor([0.1322, 0.0300, 0.0900, 0.0539, 0.0579, 0.1396, 0.0954, 0.0932], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0233, 0.0310, 0.0296, 0.0303, 0.0324, 0.0337, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:03:54,515 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 2.775e+02 3.348e+02 4.012e+02 9.680e+02, threshold=6.695e+02, percent-clipped=4.0 +2023-02-06 09:03:57,178 INFO [train.py:901] (2/4) Epoch 10, batch 5000, loss[loss=0.2524, simple_loss=0.3276, pruned_loss=0.08864, over 7931.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3141, pruned_loss=0.08341, over 1607554.08 frames. ], batch size: 20, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:58,709 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:01,939 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:25,735 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9834, 1.6159, 2.2155, 1.7825, 1.9578, 1.8557, 1.5254, 0.7071], + device='cuda:2'), covar=tensor([0.3441, 0.3113, 0.1091, 0.2100, 0.1610, 0.1844, 0.1488, 0.3275], + device='cuda:2'), in_proj_covar=tensor([0.0882, 0.0854, 0.0721, 0.0834, 0.0930, 0.0785, 0.0701, 0.0761], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:04:30,707 INFO [train.py:901] (2/4) Epoch 10, batch 5050, loss[loss=0.2558, simple_loss=0.3234, pruned_loss=0.09411, over 7808.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3146, pruned_loss=0.08343, over 1612964.14 frames. ], batch size: 19, lr: 7.62e-03, grad_scale: 8.0 +2023-02-06 09:04:51,130 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:52,869 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 09:05:02,962 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.592e+02 3.051e+02 4.098e+02 9.089e+02, threshold=6.102e+02, percent-clipped=4.0 +2023-02-06 09:05:05,645 INFO [train.py:901] (2/4) Epoch 10, batch 5100, loss[loss=0.2617, simple_loss=0.3309, pruned_loss=0.09621, over 8334.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3133, pruned_loss=0.08254, over 1609225.26 frames. ], batch size: 26, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:09,144 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:05:25,006 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1837, 1.3215, 1.4931, 1.1800, 0.7030, 1.3216, 1.1805, 1.1478], + device='cuda:2'), covar=tensor([0.0526, 0.1225, 0.1609, 0.1344, 0.0526, 0.1415, 0.0648, 0.0605], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0195, 0.0160, 0.0105, 0.0165, 0.0118, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 09:05:31,888 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 09:05:34,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4737, 1.5243, 1.7442, 1.3811, 1.0522, 1.7181, 0.0795, 1.1075], + device='cuda:2'), covar=tensor([0.2354, 0.1816, 0.0540, 0.1724, 0.4553, 0.0711, 0.3768, 0.2077], + device='cuda:2'), in_proj_covar=tensor([0.0160, 0.0161, 0.0091, 0.0208, 0.0247, 0.0098, 0.0159, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:05:40,061 INFO [train.py:901] (2/4) Epoch 10, batch 5150, loss[loss=0.2111, simple_loss=0.2696, pruned_loss=0.07627, over 7549.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3129, pruned_loss=0.08235, over 1610142.96 frames. ], batch size: 18, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:11,282 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:11,738 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.805e+02 3.349e+02 3.898e+02 8.134e+02, threshold=6.697e+02, percent-clipped=4.0 +2023-02-06 09:06:13,803 INFO [train.py:901] (2/4) Epoch 10, batch 5200, loss[loss=0.2714, simple_loss=0.3465, pruned_loss=0.09814, over 8339.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3136, pruned_loss=0.08269, over 1612605.04 frames. ], batch size: 25, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:29,755 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:35,812 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:50,964 INFO [train.py:901] (2/4) Epoch 10, batch 5250, loss[loss=0.2376, simple_loss=0.3194, pruned_loss=0.07788, over 8548.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3141, pruned_loss=0.08316, over 1613791.48 frames. ], batch size: 39, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:06:56,856 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 09:06:57,793 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:00,625 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:14,858 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:17,419 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:19,992 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:23,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.672e+02 3.378e+02 4.041e+02 9.848e+02, threshold=6.756e+02, percent-clipped=3.0 +2023-02-06 09:07:25,979 INFO [train.py:901] (2/4) Epoch 10, batch 5300, loss[loss=0.2074, simple_loss=0.2946, pruned_loss=0.06009, over 8242.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3142, pruned_loss=0.08355, over 1607721.62 frames. ], batch size: 22, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:07:31,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0074, 1.9890, 3.9340, 1.6790, 2.5110, 4.5300, 4.5626, 3.7037], + device='cuda:2'), covar=tensor([0.1216, 0.1416, 0.0434, 0.2099, 0.1064, 0.0263, 0.0387, 0.0757], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0292, 0.0252, 0.0282, 0.0262, 0.0229, 0.0323, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:07:37,478 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1587, 1.9008, 3.5496, 2.7104, 2.9326, 1.7878, 1.5567, 1.8535], + device='cuda:2'), covar=tensor([0.5471, 0.5540, 0.1100, 0.2621, 0.2578, 0.3683, 0.3026, 0.4561], + device='cuda:2'), in_proj_covar=tensor([0.0871, 0.0847, 0.0709, 0.0824, 0.0918, 0.0778, 0.0694, 0.0750], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:07:43,498 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8393, 2.1676, 1.6426, 2.5672, 1.1208, 1.2994, 1.6881, 2.0865], + device='cuda:2'), covar=tensor([0.0780, 0.0841, 0.1157, 0.0435, 0.1286, 0.1575, 0.1084, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0242, 0.0214, 0.0258, 0.0217, 0.0222, 0.0252, 0.0262, 0.0221], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:07:46,779 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0274, 1.4578, 4.2909, 1.7025, 2.2610, 4.9550, 4.9668, 4.1891], + device='cuda:2'), covar=tensor([0.1137, 0.1726, 0.0253, 0.2076, 0.1081, 0.0178, 0.0363, 0.0587], + device='cuda:2'), in_proj_covar=tensor([0.0256, 0.0294, 0.0253, 0.0283, 0.0263, 0.0230, 0.0324, 0.0287], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:07:57,648 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:00,797 INFO [train.py:901] (2/4) Epoch 10, batch 5350, loss[loss=0.247, simple_loss=0.3266, pruned_loss=0.08373, over 8503.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3144, pruned_loss=0.08377, over 1609772.78 frames. ], batch size: 26, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:08:12,906 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4834, 2.7844, 1.7854, 2.2232, 2.2283, 1.5610, 2.0622, 2.2304], + device='cuda:2'), covar=tensor([0.1527, 0.0268, 0.1052, 0.0619, 0.0723, 0.1367, 0.0989, 0.0883], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0234, 0.0310, 0.0300, 0.0307, 0.0325, 0.0340, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:08:34,365 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.713e+02 3.238e+02 4.266e+02 6.892e+02, threshold=6.476e+02, percent-clipped=1.0 +2023-02-06 09:08:35,743 INFO [train.py:901] (2/4) Epoch 10, batch 5400, loss[loss=0.2196, simple_loss=0.2958, pruned_loss=0.07173, over 8255.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3153, pruned_loss=0.08406, over 1616278.53 frames. ], batch size: 22, lr: 7.61e-03, grad_scale: 8.0 +2023-02-06 09:08:40,010 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:49,864 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3769, 4.4227, 3.9565, 1.9907, 3.7143, 3.8935, 3.9477, 3.5457], + device='cuda:2'), covar=tensor([0.0854, 0.0617, 0.1159, 0.4419, 0.1125, 0.0798, 0.1314, 0.0900], + device='cuda:2'), in_proj_covar=tensor([0.0459, 0.0366, 0.0374, 0.0476, 0.0371, 0.0363, 0.0369, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:09:08,708 INFO [train.py:901] (2/4) Epoch 10, batch 5450, loss[loss=0.2795, simple_loss=0.331, pruned_loss=0.114, over 6553.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3155, pruned_loss=0.08466, over 1612605.73 frames. ], batch size: 71, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:43,437 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.697e+02 3.396e+02 4.413e+02 8.943e+02, threshold=6.791e+02, percent-clipped=7.0 +2023-02-06 09:09:44,805 INFO [train.py:901] (2/4) Epoch 10, batch 5500, loss[loss=0.2643, simple_loss=0.3383, pruned_loss=0.09519, over 8523.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3145, pruned_loss=0.08405, over 1613621.56 frames. ], batch size: 28, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:46,325 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:46,834 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 09:10:18,552 INFO [train.py:901] (2/4) Epoch 10, batch 5550, loss[loss=0.2475, simple_loss=0.3064, pruned_loss=0.09423, over 7248.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3148, pruned_loss=0.08442, over 1610405.46 frames. ], batch size: 16, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:52,572 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.563e+02 3.102e+02 4.076e+02 7.679e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 09:10:54,679 INFO [train.py:901] (2/4) Epoch 10, batch 5600, loss[loss=0.2776, simple_loss=0.3426, pruned_loss=0.1063, over 7203.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3146, pruned_loss=0.08461, over 1610276.85 frames. ], batch size: 71, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:55,586 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:07,556 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8435, 5.9915, 5.1487, 2.5921, 5.1889, 5.6183, 5.4990, 5.2823], + device='cuda:2'), covar=tensor([0.0565, 0.0400, 0.0931, 0.4402, 0.0763, 0.0720, 0.1048, 0.0637], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0357, 0.0371, 0.0472, 0.0366, 0.0361, 0.0368, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:11:12,345 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:28,345 INFO [train.py:901] (2/4) Epoch 10, batch 5650, loss[loss=0.2227, simple_loss=0.2998, pruned_loss=0.07274, over 8250.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3135, pruned_loss=0.08344, over 1610704.82 frames. ], batch size: 22, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:11:32,081 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6156, 4.6068, 4.1189, 1.9838, 4.0405, 4.1180, 4.2157, 3.8446], + device='cuda:2'), covar=tensor([0.0723, 0.0555, 0.1071, 0.4683, 0.0823, 0.0918, 0.1200, 0.0830], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0357, 0.0371, 0.0473, 0.0365, 0.0361, 0.0368, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:11:36,902 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:48,314 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 09:11:54,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:02,397 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.443e+02 2.913e+02 3.480e+02 5.594e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 09:12:03,748 INFO [train.py:901] (2/4) Epoch 10, batch 5700, loss[loss=0.2307, simple_loss=0.3088, pruned_loss=0.07627, over 8032.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3136, pruned_loss=0.08253, over 1617369.98 frames. ], batch size: 22, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:16,129 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4733, 1.3872, 2.1740, 1.1846, 1.7197, 2.3984, 2.3578, 2.1404], + device='cuda:2'), covar=tensor([0.0867, 0.1195, 0.0633, 0.1688, 0.1178, 0.0301, 0.0688, 0.0565], + device='cuda:2'), in_proj_covar=tensor([0.0257, 0.0294, 0.0255, 0.0285, 0.0269, 0.0233, 0.0326, 0.0285], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:12:25,608 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:36,615 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5739, 2.1473, 3.5061, 1.3427, 2.4852, 2.0408, 1.7349, 2.2819], + device='cuda:2'), covar=tensor([0.1680, 0.1894, 0.0669, 0.3962, 0.1511, 0.2699, 0.1712, 0.2247], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0504, 0.0528, 0.0572, 0.0611, 0.0548, 0.0463, 0.0602], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:12:39,148 INFO [train.py:901] (2/4) Epoch 10, batch 5750, loss[loss=0.1992, simple_loss=0.273, pruned_loss=0.06268, over 7926.00 frames. ], tot_loss[loss=0.24, simple_loss=0.314, pruned_loss=0.08301, over 1618080.52 frames. ], batch size: 20, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:53,267 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 09:13:11,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.793e+02 3.478e+02 4.404e+02 1.244e+03, threshold=6.955e+02, percent-clipped=11.0 +2023-02-06 09:13:12,608 INFO [train.py:901] (2/4) Epoch 10, batch 5800, loss[loss=0.2358, simple_loss=0.3029, pruned_loss=0.08434, over 6833.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3132, pruned_loss=0.08242, over 1617939.91 frames. ], batch size: 15, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:13:29,731 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3647, 2.3854, 1.9323, 2.9286, 1.4615, 1.6395, 2.0601, 2.3689], + device='cuda:2'), covar=tensor([0.0616, 0.0825, 0.1040, 0.0381, 0.1148, 0.1421, 0.0978, 0.0811], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0211, 0.0257, 0.0216, 0.0217, 0.0253, 0.0256, 0.0221], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:13:31,639 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:45,423 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:48,020 INFO [train.py:901] (2/4) Epoch 10, batch 5850, loss[loss=0.2514, simple_loss=0.3138, pruned_loss=0.09449, over 7654.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.314, pruned_loss=0.08266, over 1618405.15 frames. ], batch size: 19, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:12,863 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7247, 2.1585, 4.3699, 1.3851, 2.8874, 2.1911, 1.7769, 2.6048], + device='cuda:2'), covar=tensor([0.1637, 0.2192, 0.0748, 0.3711, 0.1614, 0.2752, 0.1726, 0.2497], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0507, 0.0530, 0.0574, 0.0614, 0.0549, 0.0465, 0.0608], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:14:16,179 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2230, 1.4541, 2.1708, 1.1319, 1.4619, 1.5160, 1.3702, 1.3943], + device='cuda:2'), covar=tensor([0.1819, 0.2194, 0.0810, 0.3854, 0.1698, 0.2952, 0.1865, 0.1996], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0507, 0.0530, 0.0573, 0.0613, 0.0548, 0.0464, 0.0607], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:14:19,903 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.589e+02 3.164e+02 4.281e+02 9.296e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-06 09:14:21,268 INFO [train.py:901] (2/4) Epoch 10, batch 5900, loss[loss=0.2563, simple_loss=0.3225, pruned_loss=0.09506, over 8567.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3128, pruned_loss=0.08208, over 1619376.43 frames. ], batch size: 39, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:43,766 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 09:14:44,475 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-06 09:14:57,572 INFO [train.py:901] (2/4) Epoch 10, batch 5950, loss[loss=0.2814, simple_loss=0.3488, pruned_loss=0.107, over 8183.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3124, pruned_loss=0.08157, over 1617186.67 frames. ], batch size: 23, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:05,427 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:15:30,052 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.430e+02 2.939e+02 3.954e+02 7.661e+02, threshold=5.878e+02, percent-clipped=3.0 +2023-02-06 09:15:31,441 INFO [train.py:901] (2/4) Epoch 10, batch 6000, loss[loss=0.2025, simple_loss=0.2736, pruned_loss=0.06571, over 6388.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3128, pruned_loss=0.08198, over 1616663.70 frames. ], batch size: 14, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:31,441 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 09:15:43,952 INFO [train.py:935] (2/4) Epoch 10, validation: loss=0.1914, simple_loss=0.2907, pruned_loss=0.04604, over 944034.00 frames. +2023-02-06 09:15:43,952 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 09:15:46,395 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-06 09:15:58,896 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 09:16:04,202 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7385, 3.6947, 3.4218, 1.7491, 3.3273, 3.3963, 3.4786, 3.2311], + device='cuda:2'), covar=tensor([0.1087, 0.0706, 0.1199, 0.5267, 0.0938, 0.1066, 0.1426, 0.1012], + device='cuda:2'), in_proj_covar=tensor([0.0447, 0.0356, 0.0365, 0.0470, 0.0360, 0.0358, 0.0364, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:16:18,420 INFO [train.py:901] (2/4) Epoch 10, batch 6050, loss[loss=0.1924, simple_loss=0.2785, pruned_loss=0.05313, over 8234.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3121, pruned_loss=0.08127, over 1619224.39 frames. ], batch size: 22, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:16:35,967 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:16:42,721 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3224, 2.0679, 3.1147, 2.4510, 2.7064, 2.1727, 1.7289, 1.4269], + device='cuda:2'), covar=tensor([0.3761, 0.3833, 0.1023, 0.2375, 0.1936, 0.1983, 0.1663, 0.4079], + device='cuda:2'), in_proj_covar=tensor([0.0873, 0.0846, 0.0708, 0.0819, 0.0916, 0.0775, 0.0692, 0.0745], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:16:52,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.842e+02 3.348e+02 4.641e+02 9.072e+02, threshold=6.696e+02, percent-clipped=15.0 +2023-02-06 09:16:54,184 INFO [train.py:901] (2/4) Epoch 10, batch 6100, loss[loss=0.2901, simple_loss=0.3543, pruned_loss=0.1129, over 8542.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3131, pruned_loss=0.08268, over 1617598.81 frames. ], batch size: 31, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:24,370 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 09:17:27,728 INFO [train.py:901] (2/4) Epoch 10, batch 6150, loss[loss=0.2254, simple_loss=0.2934, pruned_loss=0.07867, over 7658.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.312, pruned_loss=0.08225, over 1612822.65 frames. ], batch size: 19, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:33,873 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0548, 2.2416, 1.7020, 2.7244, 1.3125, 1.5078, 1.9044, 2.3168], + device='cuda:2'), covar=tensor([0.0766, 0.0806, 0.1161, 0.0416, 0.1292, 0.1484, 0.1073, 0.0745], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0208, 0.0254, 0.0216, 0.0217, 0.0251, 0.0254, 0.0220], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:17:41,307 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:17:54,637 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:01,035 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.486e+02 3.076e+02 3.632e+02 7.166e+02, threshold=6.152e+02, percent-clipped=1.0 +2023-02-06 09:18:02,462 INFO [train.py:901] (2/4) Epoch 10, batch 6200, loss[loss=0.2014, simple_loss=0.279, pruned_loss=0.06189, over 7915.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.312, pruned_loss=0.08188, over 1613116.66 frames. ], batch size: 20, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:18:15,656 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:28,057 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:32,907 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:38,063 INFO [train.py:901] (2/4) Epoch 10, batch 6250, loss[loss=0.3084, simple_loss=0.3649, pruned_loss=0.1259, over 8192.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.3127, pruned_loss=0.08255, over 1613948.99 frames. ], batch size: 23, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:19:01,795 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79033.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:19:10,137 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.509e+02 3.177e+02 4.128e+02 1.006e+03, threshold=6.355e+02, percent-clipped=7.0 +2023-02-06 09:19:11,556 INFO [train.py:901] (2/4) Epoch 10, batch 6300, loss[loss=0.2841, simple_loss=0.3541, pruned_loss=0.1071, over 8512.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3133, pruned_loss=0.08298, over 1615312.82 frames. ], batch size: 26, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:43,985 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0009, 1.7067, 3.3746, 1.4757, 2.2733, 3.6910, 3.7401, 3.2086], + device='cuda:2'), covar=tensor([0.1015, 0.1409, 0.0327, 0.1969, 0.0995, 0.0219, 0.0394, 0.0600], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0293, 0.0253, 0.0285, 0.0267, 0.0234, 0.0325, 0.0289], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:19:47,654 INFO [train.py:901] (2/4) Epoch 10, batch 6350, loss[loss=0.2513, simple_loss=0.3041, pruned_loss=0.09924, over 7546.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3142, pruned_loss=0.08377, over 1617517.92 frames. ], batch size: 18, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:56,618 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7284, 2.2597, 3.5685, 2.6490, 2.9692, 2.4339, 1.8793, 1.7192], + device='cuda:2'), covar=tensor([0.3425, 0.3833, 0.1073, 0.2521, 0.2001, 0.1898, 0.1586, 0.4263], + device='cuda:2'), in_proj_covar=tensor([0.0867, 0.0844, 0.0701, 0.0817, 0.0912, 0.0773, 0.0687, 0.0743], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:20:00,245 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 09:20:01,432 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2120, 2.0012, 2.8438, 2.3199, 2.4498, 2.1149, 1.6897, 1.3719], + device='cuda:2'), covar=tensor([0.3707, 0.3588, 0.1060, 0.2315, 0.1937, 0.1955, 0.1751, 0.3767], + device='cuda:2'), in_proj_covar=tensor([0.0865, 0.0842, 0.0701, 0.0816, 0.0911, 0.0772, 0.0685, 0.0742], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:20:20,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.998e+02 3.636e+02 4.667e+02 1.201e+03, threshold=7.271e+02, percent-clipped=11.0 +2023-02-06 09:20:21,298 INFO [train.py:901] (2/4) Epoch 10, batch 6400, loss[loss=0.2674, simple_loss=0.3214, pruned_loss=0.1067, over 7550.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3123, pruned_loss=0.08271, over 1612711.33 frames. ], batch size: 18, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:54,148 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:20:57,426 INFO [train.py:901] (2/4) Epoch 10, batch 6450, loss[loss=0.23, simple_loss=0.3007, pruned_loss=0.07967, over 8246.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3113, pruned_loss=0.08203, over 1613977.85 frames. ], batch size: 22, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:21:12,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79218.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:21:27,159 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5173, 2.0474, 2.8837, 2.2936, 2.5820, 2.2502, 1.8166, 1.2497], + device='cuda:2'), covar=tensor([0.3026, 0.3439, 0.1023, 0.2303, 0.1630, 0.1804, 0.1512, 0.3667], + device='cuda:2'), in_proj_covar=tensor([0.0871, 0.0846, 0.0707, 0.0821, 0.0918, 0.0775, 0.0690, 0.0748], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:21:31,630 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.577e+02 3.130e+02 4.050e+02 7.383e+02, threshold=6.260e+02, percent-clipped=1.0 +2023-02-06 09:21:32,337 INFO [train.py:901] (2/4) Epoch 10, batch 6500, loss[loss=0.1796, simple_loss=0.2538, pruned_loss=0.05275, over 7706.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3118, pruned_loss=0.08233, over 1616611.41 frames. ], batch size: 18, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:21:49,243 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8683, 2.2250, 3.6498, 2.6093, 3.1735, 2.4095, 1.9665, 1.6027], + device='cuda:2'), covar=tensor([0.3762, 0.4468, 0.1251, 0.2829, 0.1954, 0.2170, 0.1710, 0.4697], + device='cuda:2'), in_proj_covar=tensor([0.0880, 0.0853, 0.0714, 0.0830, 0.0925, 0.0782, 0.0699, 0.0756], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:21:59,866 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79289.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:06,999 INFO [train.py:901] (2/4) Epoch 10, batch 6550, loss[loss=0.2276, simple_loss=0.295, pruned_loss=0.08006, over 8299.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3127, pruned_loss=0.08295, over 1612476.93 frames. ], batch size: 23, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:11,151 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79303.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:22:18,368 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79314.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:27,870 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:36,563 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 09:22:41,241 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.767e+02 3.312e+02 4.239e+02 1.073e+03, threshold=6.623e+02, percent-clipped=3.0 +2023-02-06 09:22:41,948 INFO [train.py:901] (2/4) Epoch 10, batch 6600, loss[loss=0.2222, simple_loss=0.2915, pruned_loss=0.07643, over 7809.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3124, pruned_loss=0.08282, over 1611544.06 frames. ], batch size: 20, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:44,159 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:53,888 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 09:23:04,709 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:11,915 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79393.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:23:15,195 INFO [train.py:901] (2/4) Epoch 10, batch 6650, loss[loss=0.2186, simple_loss=0.2963, pruned_loss=0.07042, over 8082.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3131, pruned_loss=0.08333, over 1611494.74 frames. ], batch size: 21, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:23:33,608 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2246, 2.2526, 1.5783, 2.0536, 1.7745, 1.3862, 1.7171, 1.7405], + device='cuda:2'), covar=tensor([0.0968, 0.0298, 0.0860, 0.0373, 0.0503, 0.1109, 0.0624, 0.0677], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0229, 0.0308, 0.0296, 0.0303, 0.0319, 0.0336, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:23:47,670 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:50,865 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.666e+02 3.220e+02 4.193e+02 8.839e+02, threshold=6.440e+02, percent-clipped=3.0 +2023-02-06 09:23:51,584 INFO [train.py:901] (2/4) Epoch 10, batch 6700, loss[loss=0.272, simple_loss=0.3398, pruned_loss=0.1021, over 8613.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3133, pruned_loss=0.08359, over 1612235.01 frames. ], batch size: 39, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:24,664 INFO [train.py:901] (2/4) Epoch 10, batch 6750, loss[loss=0.2234, simple_loss=0.3146, pruned_loss=0.0661, over 8572.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3144, pruned_loss=0.08429, over 1608100.45 frames. ], batch size: 39, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:28,984 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1767, 3.1764, 2.3990, 2.5405, 2.5519, 2.1535, 2.3460, 2.7569], + device='cuda:2'), covar=tensor([0.1046, 0.0246, 0.0733, 0.0576, 0.0483, 0.0904, 0.0858, 0.0803], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0229, 0.0308, 0.0296, 0.0305, 0.0320, 0.0338, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:25:00,368 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.662e+02 3.188e+02 4.113e+02 8.575e+02, threshold=6.376e+02, percent-clipped=4.0 +2023-02-06 09:25:01,059 INFO [train.py:901] (2/4) Epoch 10, batch 6800, loss[loss=0.237, simple_loss=0.3145, pruned_loss=0.07975, over 8332.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3145, pruned_loss=0.08375, over 1611328.69 frames. ], batch size: 26, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:11,665 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 09:25:14,137 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 09:25:19,547 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1631, 2.3420, 1.8137, 2.7970, 1.2255, 1.4496, 1.8329, 2.3643], + device='cuda:2'), covar=tensor([0.0686, 0.0765, 0.1094, 0.0426, 0.1274, 0.1552, 0.1135, 0.0805], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0211, 0.0254, 0.0215, 0.0218, 0.0251, 0.0258, 0.0224], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:25:35,818 INFO [train.py:901] (2/4) Epoch 10, batch 6850, loss[loss=0.225, simple_loss=0.3135, pruned_loss=0.0682, over 8491.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.314, pruned_loss=0.08291, over 1612519.04 frames. ], batch size: 26, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:39,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:25:59,647 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 09:26:10,489 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.480e+02 2.958e+02 3.519e+02 6.592e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-06 09:26:10,581 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79647.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:26:11,119 INFO [train.py:901] (2/4) Epoch 10, batch 6900, loss[loss=0.2577, simple_loss=0.32, pruned_loss=0.09777, over 8077.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3153, pruned_loss=0.08426, over 1610488.26 frames. ], batch size: 21, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:30,985 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:44,402 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:46,399 INFO [train.py:901] (2/4) Epoch 10, batch 6950, loss[loss=0.2382, simple_loss=0.3214, pruned_loss=0.07743, over 8459.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.315, pruned_loss=0.08368, over 1609524.76 frames. ], batch size: 27, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:46,620 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:03,561 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:05,488 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:10,652 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 09:27:12,697 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79737.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:19,256 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.770e+02 3.379e+02 4.019e+02 1.115e+03, threshold=6.759e+02, percent-clipped=8.0 +2023-02-06 09:27:19,980 INFO [train.py:901] (2/4) Epoch 10, batch 7000, loss[loss=0.1901, simple_loss=0.2794, pruned_loss=0.05046, over 8200.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3155, pruned_loss=0.08385, over 1613525.04 frames. ], batch size: 23, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:27:30,364 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:43,460 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:55,328 INFO [train.py:901] (2/4) Epoch 10, batch 7050, loss[loss=0.2115, simple_loss=0.2849, pruned_loss=0.06901, over 7789.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3153, pruned_loss=0.08379, over 1612889.03 frames. ], batch size: 19, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:04,461 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:25,532 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:26,881 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2184, 1.4040, 1.7170, 1.3246, 1.1153, 1.4647, 1.8778, 1.8324], + device='cuda:2'), covar=tensor([0.0525, 0.1271, 0.1674, 0.1429, 0.0634, 0.1529, 0.0661, 0.0581], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0152, 0.0194, 0.0159, 0.0106, 0.0164, 0.0118, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 09:28:29,361 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.704e+02 3.361e+02 4.306e+02 1.362e+03, threshold=6.722e+02, percent-clipped=5.0 +2023-02-06 09:28:30,075 INFO [train.py:901] (2/4) Epoch 10, batch 7100, loss[loss=0.2197, simple_loss=0.2932, pruned_loss=0.07311, over 7969.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3157, pruned_loss=0.08361, over 1617582.88 frames. ], batch size: 21, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:32,888 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79852.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:29:06,026 INFO [train.py:901] (2/4) Epoch 10, batch 7150, loss[loss=0.2923, simple_loss=0.3553, pruned_loss=0.1146, over 6988.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3142, pruned_loss=0.08299, over 1610954.83 frames. ], batch size: 72, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:29:39,472 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.702e+02 3.262e+02 4.332e+02 1.613e+03, threshold=6.525e+02, percent-clipped=3.0 +2023-02-06 09:29:39,575 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:29:40,174 INFO [train.py:901] (2/4) Epoch 10, batch 7200, loss[loss=0.259, simple_loss=0.3327, pruned_loss=0.09262, over 8105.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3141, pruned_loss=0.08282, over 1612640.31 frames. ], batch size: 23, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:13,866 INFO [train.py:901] (2/4) Epoch 10, batch 7250, loss[loss=0.2348, simple_loss=0.3284, pruned_loss=0.07062, over 8501.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3138, pruned_loss=0.08309, over 1612674.29 frames. ], batch size: 28, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:30,535 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80018.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:31,018 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80019.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:30:47,885 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80043.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:30:48,199 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 09:30:50,296 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.755e+02 3.243e+02 3.993e+02 1.489e+03, threshold=6.485e+02, percent-clipped=9.0 +2023-02-06 09:30:50,943 INFO [train.py:901] (2/4) Epoch 10, batch 7300, loss[loss=0.1894, simple_loss=0.2681, pruned_loss=0.05538, over 7541.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.313, pruned_loss=0.08234, over 1615661.31 frames. ], batch size: 18, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:31:00,593 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:03,353 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:04,056 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3503, 1.5610, 2.3130, 1.1487, 1.4795, 1.6057, 1.4116, 1.4414], + device='cuda:2'), covar=tensor([0.1742, 0.2039, 0.0678, 0.3682, 0.1622, 0.2999, 0.1783, 0.1901], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0507, 0.0529, 0.0568, 0.0611, 0.0550, 0.0465, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:31:16,362 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 09:31:20,009 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80091.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,180 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,604 INFO [train.py:901] (2/4) Epoch 10, batch 7350, loss[loss=0.2563, simple_loss=0.3241, pruned_loss=0.09426, over 8434.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3121, pruned_loss=0.08247, over 1614177.04 frames. ], batch size: 49, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:31:31,683 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80108.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:31:42,410 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:44,301 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:50,390 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80133.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:31:50,997 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:56,728 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 09:31:58,935 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7977, 1.5707, 5.8827, 1.9742, 5.2470, 4.9642, 5.4748, 5.2732], + device='cuda:2'), covar=tensor([0.0397, 0.4112, 0.0256, 0.3295, 0.0806, 0.0622, 0.0365, 0.0447], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0553, 0.0554, 0.0505, 0.0578, 0.0489, 0.0485, 0.0550], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:31:59,433 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.519e+02 3.343e+02 4.224e+02 9.659e+02, threshold=6.686e+02, percent-clipped=6.0 +2023-02-06 09:32:00,145 INFO [train.py:901] (2/4) Epoch 10, batch 7400, loss[loss=0.21, simple_loss=0.2903, pruned_loss=0.06482, over 8239.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3116, pruned_loss=0.08207, over 1606156.34 frames. ], batch size: 22, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:16,188 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 09:32:34,307 INFO [train.py:901] (2/4) Epoch 10, batch 7450, loss[loss=0.3052, simple_loss=0.3759, pruned_loss=0.1173, over 8647.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3114, pruned_loss=0.08168, over 1607401.66 frames. ], batch size: 39, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:35,892 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1746, 1.3297, 3.3090, 0.9572, 2.8500, 2.7322, 3.0014, 2.9041], + device='cuda:2'), covar=tensor([0.0687, 0.3452, 0.0744, 0.3243, 0.1353, 0.0969, 0.0677, 0.0785], + device='cuda:2'), in_proj_covar=tensor([0.0462, 0.0558, 0.0561, 0.0511, 0.0584, 0.0492, 0.0489, 0.0555], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:32:39,985 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8502, 2.1689, 4.0508, 1.5493, 2.9784, 2.2259, 1.8434, 2.6693], + device='cuda:2'), covar=tensor([0.1550, 0.2209, 0.0691, 0.3618, 0.1500, 0.2682, 0.1691, 0.2301], + device='cuda:2'), in_proj_covar=tensor([0.0488, 0.0511, 0.0532, 0.0574, 0.0614, 0.0554, 0.0467, 0.0606], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:32:54,358 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 09:33:02,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:09,145 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.637e+02 3.217e+02 3.901e+02 6.824e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:33:09,858 INFO [train.py:901] (2/4) Epoch 10, batch 7500, loss[loss=0.2242, simple_loss=0.2971, pruned_loss=0.07563, over 7672.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3112, pruned_loss=0.08239, over 1601562.29 frames. ], batch size: 19, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:33:19,687 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 09:33:43,947 INFO [train.py:901] (2/4) Epoch 10, batch 7550, loss[loss=0.1995, simple_loss=0.2851, pruned_loss=0.05698, over 7659.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3128, pruned_loss=0.08255, over 1607691.06 frames. ], batch size: 19, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:33:46,243 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:57,962 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:15,033 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:17,459 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.399e+02 2.908e+02 3.933e+02 1.078e+03, threshold=5.816e+02, percent-clipped=3.0 +2023-02-06 09:34:18,143 INFO [train.py:901] (2/4) Epoch 10, batch 7600, loss[loss=0.3066, simple_loss=0.3864, pruned_loss=0.1134, over 8518.00 frames. ], tot_loss[loss=0.2389, simple_loss=0.313, pruned_loss=0.08241, over 1609787.07 frames. ], batch size: 28, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:34:43,239 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3141, 1.4658, 1.3270, 1.8675, 0.7277, 1.2227, 1.2926, 1.4920], + device='cuda:2'), covar=tensor([0.0981, 0.0976, 0.1280, 0.0607, 0.1221, 0.1618, 0.0869, 0.0767], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0211, 0.0253, 0.0215, 0.0217, 0.0249, 0.0255, 0.0225], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:34:47,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2165, 1.2819, 1.5046, 1.1539, 0.8360, 1.3257, 1.3353, 0.9762], + device='cuda:2'), covar=tensor([0.0578, 0.1259, 0.1760, 0.1438, 0.0553, 0.1539, 0.0677, 0.0690], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0160, 0.0105, 0.0164, 0.0119, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 09:34:49,208 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80390.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:54,343 INFO [train.py:901] (2/4) Epoch 10, batch 7650, loss[loss=0.2873, simple_loss=0.3495, pruned_loss=0.1126, over 8344.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3153, pruned_loss=0.08414, over 1613481.14 frames. ], batch size: 26, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:03,331 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:06,028 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4795, 4.4409, 4.0231, 1.9918, 3.9638, 4.1015, 4.1219, 3.7362], + device='cuda:2'), covar=tensor([0.0886, 0.0579, 0.0980, 0.5203, 0.0898, 0.0837, 0.1211, 0.0853], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0359, 0.0377, 0.0475, 0.0374, 0.0362, 0.0365, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:35:06,133 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:12,130 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6097, 1.2301, 2.7982, 1.3115, 1.9807, 2.9822, 3.0284, 2.6048], + device='cuda:2'), covar=tensor([0.1047, 0.1554, 0.0413, 0.1962, 0.0925, 0.0318, 0.0585, 0.0627], + device='cuda:2'), in_proj_covar=tensor([0.0259, 0.0294, 0.0255, 0.0285, 0.0268, 0.0232, 0.0330, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:35:27,280 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.621e+02 3.149e+02 3.913e+02 9.838e+02, threshold=6.298e+02, percent-clipped=6.0 +2023-02-06 09:35:27,982 INFO [train.py:901] (2/4) Epoch 10, batch 7700, loss[loss=0.2381, simple_loss=0.3152, pruned_loss=0.08043, over 8481.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3145, pruned_loss=0.08366, over 1614710.63 frames. ], batch size: 49, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:51,539 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:01,535 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:03,341 INFO [train.py:901] (2/4) Epoch 10, batch 7750, loss[loss=0.2019, simple_loss=0.2931, pruned_loss=0.05531, over 8246.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3144, pruned_loss=0.08342, over 1611989.69 frames. ], batch size: 24, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:06,751 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 09:36:12,267 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.56 vs. limit=5.0 +2023-02-06 09:36:13,276 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:18,621 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:36,346 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.713e+02 3.406e+02 4.090e+02 8.759e+02, threshold=6.812e+02, percent-clipped=3.0 +2023-02-06 09:36:37,057 INFO [train.py:901] (2/4) Epoch 10, batch 7800, loss[loss=0.2101, simple_loss=0.2873, pruned_loss=0.06649, over 7644.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3146, pruned_loss=0.08347, over 1613320.43 frames. ], batch size: 19, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:04,766 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1906, 3.0466, 3.5241, 2.1118, 1.7908, 3.6845, 0.5750, 2.3407], + device='cuda:2'), covar=tensor([0.2080, 0.1189, 0.0496, 0.3071, 0.4860, 0.0424, 0.4191, 0.2227], + device='cuda:2'), in_proj_covar=tensor([0.0164, 0.0163, 0.0096, 0.0211, 0.0255, 0.0101, 0.0162, 0.0161], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:37:09,858 INFO [train.py:901] (2/4) Epoch 10, batch 7850, loss[loss=0.2335, simple_loss=0.3135, pruned_loss=0.07671, over 8195.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3152, pruned_loss=0.08392, over 1611447.63 frames. ], batch size: 23, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:40,875 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:37:42,726 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.556e+02 3.372e+02 4.255e+02 7.191e+02, threshold=6.744e+02, percent-clipped=1.0 +2023-02-06 09:37:43,427 INFO [train.py:901] (2/4) Epoch 10, batch 7900, loss[loss=0.2279, simple_loss=0.3119, pruned_loss=0.07191, over 8321.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3151, pruned_loss=0.08357, over 1615649.19 frames. ], batch size: 25, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:02,367 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4344, 1.6661, 4.2462, 1.9917, 2.4121, 4.9082, 4.9020, 4.2505], + device='cuda:2'), covar=tensor([0.0931, 0.1497, 0.0290, 0.1856, 0.1044, 0.0181, 0.0357, 0.0492], + device='cuda:2'), in_proj_covar=tensor([0.0255, 0.0289, 0.0253, 0.0282, 0.0266, 0.0230, 0.0326, 0.0283], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 09:38:16,932 INFO [train.py:901] (2/4) Epoch 10, batch 7950, loss[loss=0.2071, simple_loss=0.2843, pruned_loss=0.06493, over 7944.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3139, pruned_loss=0.08271, over 1612088.04 frames. ], batch size: 20, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:50,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 2.660e+02 3.023e+02 3.700e+02 9.606e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-06 09:38:51,443 INFO [train.py:901] (2/4) Epoch 10, batch 8000, loss[loss=0.1811, simple_loss=0.2661, pruned_loss=0.04802, over 7808.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3136, pruned_loss=0.08268, over 1613399.43 frames. ], batch size: 20, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:38:55,693 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:56,305 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:59,808 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:20,380 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:24,988 INFO [train.py:901] (2/4) Epoch 10, batch 8050, loss[loss=0.2132, simple_loss=0.2882, pruned_loss=0.0691, over 7786.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3136, pruned_loss=0.08276, over 1610932.20 frames. ], batch size: 19, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:39:43,568 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:57,914 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 09:40:01,792 INFO [train.py:901] (2/4) Epoch 11, batch 0, loss[loss=0.3053, simple_loss=0.3629, pruned_loss=0.1239, over 8363.00 frames. ], tot_loss[loss=0.3053, simple_loss=0.3629, pruned_loss=0.1239, over 8363.00 frames. ], batch size: 24, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:01,793 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 09:40:13,090 INFO [train.py:935] (2/4) Epoch 11, validation: loss=0.1907, simple_loss=0.2907, pruned_loss=0.04534, over 944034.00 frames. +2023-02-06 09:40:13,091 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 09:40:23,922 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.899e+02 3.439e+02 4.416e+02 1.589e+03, threshold=6.879e+02, percent-clipped=9.0 +2023-02-06 09:40:27,458 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 09:40:30,152 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:39,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80870.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:47,948 INFO [train.py:901] (2/4) Epoch 11, batch 50, loss[loss=0.2755, simple_loss=0.3429, pruned_loss=0.104, over 8589.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3273, pruned_loss=0.08782, over 373619.91 frames. ], batch size: 31, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:52,491 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 09:41:03,889 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 09:41:04,383 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 09:41:24,352 INFO [train.py:901] (2/4) Epoch 11, batch 100, loss[loss=0.2029, simple_loss=0.2881, pruned_loss=0.05887, over 8233.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.3216, pruned_loss=0.08551, over 654749.51 frames. ], batch size: 22, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:41:29,240 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 09:41:30,711 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:35,314 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.679e+02 3.187e+02 3.933e+02 1.063e+03, threshold=6.374e+02, percent-clipped=2.0 +2023-02-06 09:41:51,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:58,396 INFO [train.py:901] (2/4) Epoch 11, batch 150, loss[loss=0.278, simple_loss=0.3531, pruned_loss=0.1015, over 8248.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.319, pruned_loss=0.08468, over 865759.80 frames. ], batch size: 24, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:23,721 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:34,640 INFO [train.py:901] (2/4) Epoch 11, batch 200, loss[loss=0.2232, simple_loss=0.3039, pruned_loss=0.07128, over 7644.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3171, pruned_loss=0.08373, over 1031331.71 frames. ], batch size: 19, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:37,863 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 09:42:42,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:47,013 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.935e+02 2.662e+02 3.186e+02 4.005e+02 8.686e+02, threshold=6.371e+02, percent-clipped=5.0 +2023-02-06 09:43:10,552 INFO [train.py:901] (2/4) Epoch 11, batch 250, loss[loss=0.2641, simple_loss=0.3427, pruned_loss=0.09274, over 8486.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3162, pruned_loss=0.08312, over 1164622.25 frames. ], batch size: 49, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:43:18,698 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 09:43:21,542 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 09:43:21,965 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 09:43:22,306 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:31,353 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 09:43:42,712 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:46,010 INFO [train.py:901] (2/4) Epoch 11, batch 300, loss[loss=0.2576, simple_loss=0.3351, pruned_loss=0.09009, over 8596.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3142, pruned_loss=0.08237, over 1260635.14 frames. ], batch size: 39, lr: 7.13e-03, grad_scale: 16.0 +2023-02-06 09:43:48,260 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:48,874 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:57,132 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.697e+02 3.136e+02 4.054e+02 9.565e+02, threshold=6.271e+02, percent-clipped=1.0 +2023-02-06 09:44:00,772 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:22,507 INFO [train.py:901] (2/4) Epoch 11, batch 350, loss[loss=0.2601, simple_loss=0.3267, pruned_loss=0.09671, over 7802.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3137, pruned_loss=0.08195, over 1340629.96 frames. ], batch size: 20, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:44:32,494 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 09:44:33,013 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:39,225 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1177, 1.7625, 2.4916, 2.0467, 2.2265, 2.0435, 1.7371, 1.0064], + device='cuda:2'), covar=tensor([0.4033, 0.3948, 0.1216, 0.2310, 0.1975, 0.2312, 0.1690, 0.4098], + device='cuda:2'), in_proj_covar=tensor([0.0892, 0.0863, 0.0720, 0.0833, 0.0935, 0.0794, 0.0704, 0.0762], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:44:44,354 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:46,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0436, 1.4058, 1.6764, 1.3420, 1.0982, 1.4210, 1.7531, 1.4116], + device='cuda:2'), covar=tensor([0.0466, 0.1212, 0.1629, 0.1327, 0.0565, 0.1413, 0.0667, 0.0595], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0155, 0.0195, 0.0159, 0.0106, 0.0164, 0.0118, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 09:44:49,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:53,751 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:56,286 INFO [train.py:901] (2/4) Epoch 11, batch 400, loss[loss=0.2344, simple_loss=0.3254, pruned_loss=0.07166, over 8323.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3148, pruned_loss=0.08298, over 1401715.24 frames. ], batch size: 25, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:00,799 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 09:45:08,650 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.601e+02 3.216e+02 4.274e+02 6.931e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:45:10,280 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:11,755 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:23,640 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9575, 2.5220, 2.9718, 1.2569, 3.0042, 1.7505, 1.3435, 1.7709], + device='cuda:2'), covar=tensor([0.0565, 0.0225, 0.0165, 0.0501, 0.0284, 0.0602, 0.0644, 0.0361], + device='cuda:2'), in_proj_covar=tensor([0.0373, 0.0304, 0.0259, 0.0372, 0.0293, 0.0457, 0.0345, 0.0339], + device='cuda:2'), out_proj_covar=tensor([1.0753e-04, 8.5703e-05, 7.3022e-05, 1.0561e-04, 8.3944e-05, 1.4109e-04, + 9.9503e-05, 9.6983e-05], device='cuda:2') +2023-02-06 09:45:29,082 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4186, 2.6316, 1.9250, 2.3165, 2.2175, 1.5239, 2.1689, 2.2088], + device='cuda:2'), covar=tensor([0.1433, 0.0409, 0.0968, 0.0545, 0.0611, 0.1460, 0.0967, 0.0873], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0233, 0.0308, 0.0296, 0.0303, 0.0325, 0.0338, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:45:32,911 INFO [train.py:901] (2/4) Epoch 11, batch 450, loss[loss=0.2331, simple_loss=0.3177, pruned_loss=0.07421, over 8518.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.315, pruned_loss=0.08289, over 1448181.48 frames. ], batch size: 49, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:57,086 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:58,457 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:46:06,250 INFO [train.py:901] (2/4) Epoch 11, batch 500, loss[loss=0.1861, simple_loss=0.2675, pruned_loss=0.0523, over 7451.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3134, pruned_loss=0.08231, over 1482592.57 frames. ], batch size: 17, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:46:07,904 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.73 vs. limit=5.0 +2023-02-06 09:46:17,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.501e+02 3.364e+02 4.069e+02 6.845e+02, threshold=6.728e+02, percent-clipped=2.0 +2023-02-06 09:46:40,092 INFO [train.py:901] (2/4) Epoch 11, batch 550, loss[loss=0.2323, simple_loss=0.312, pruned_loss=0.07633, over 8321.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.313, pruned_loss=0.08235, over 1513077.88 frames. ], batch size: 26, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:47:04,570 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4320, 1.9294, 2.7727, 2.1918, 2.5729, 2.2347, 1.8784, 1.1533], + device='cuda:2'), covar=tensor([0.3677, 0.3746, 0.1136, 0.2380, 0.1705, 0.2075, 0.1733, 0.4009], + device='cuda:2'), in_proj_covar=tensor([0.0886, 0.0853, 0.0717, 0.0828, 0.0927, 0.0789, 0.0701, 0.0759], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:47:15,692 INFO [train.py:901] (2/4) Epoch 11, batch 600, loss[loss=0.2388, simple_loss=0.3348, pruned_loss=0.0714, over 8479.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3126, pruned_loss=0.08217, over 1534756.00 frames. ], batch size: 29, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:27,354 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.633e+02 3.080e+02 3.885e+02 6.931e+02, threshold=6.160e+02, percent-clipped=1.0 +2023-02-06 09:47:27,533 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5472, 1.7497, 4.6501, 2.3915, 2.5287, 5.2996, 5.1936, 4.6987], + device='cuda:2'), covar=tensor([0.0876, 0.1434, 0.0234, 0.1533, 0.1000, 0.0160, 0.0406, 0.0478], + device='cuda:2'), in_proj_covar=tensor([0.0257, 0.0286, 0.0247, 0.0278, 0.0264, 0.0226, 0.0322, 0.0280], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 09:47:27,559 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.3637, 1.9569, 2.2729, 2.1120, 1.2502, 2.2338, 2.5316, 2.9131], + device='cuda:2'), covar=tensor([0.0372, 0.1112, 0.1544, 0.1162, 0.0588, 0.1218, 0.0588, 0.0382], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0155, 0.0196, 0.0159, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 09:47:27,982 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 09:47:35,475 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 09:47:42,469 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:48,471 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:50,444 INFO [train.py:901] (2/4) Epoch 11, batch 650, loss[loss=0.264, simple_loss=0.329, pruned_loss=0.0995, over 8647.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3132, pruned_loss=0.08246, over 1557042.37 frames. ], batch size: 31, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:59,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:04,889 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5927, 2.7699, 1.9000, 2.2740, 2.1849, 1.5737, 2.1328, 2.1658], + device='cuda:2'), covar=tensor([0.1327, 0.0290, 0.0958, 0.0592, 0.0644, 0.1258, 0.0900, 0.0868], + device='cuda:2'), in_proj_covar=tensor([0.0340, 0.0228, 0.0304, 0.0295, 0.0297, 0.0318, 0.0332, 0.0300], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 09:48:08,328 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:26,931 INFO [train.py:901] (2/4) Epoch 11, batch 700, loss[loss=0.2516, simple_loss=0.322, pruned_loss=0.09061, over 8501.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.312, pruned_loss=0.08108, over 1569898.87 frames. ], batch size: 26, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:48:27,089 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:28,483 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5560, 2.3127, 4.0493, 1.1736, 2.9053, 1.9563, 1.8128, 2.3765], + device='cuda:2'), covar=tensor([0.2010, 0.2319, 0.0967, 0.4614, 0.1912, 0.3289, 0.1938, 0.3171], + device='cuda:2'), in_proj_covar=tensor([0.0480, 0.0504, 0.0526, 0.0567, 0.0609, 0.0541, 0.0466, 0.0608], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:48:38,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.541e+02 3.049e+02 3.626e+02 6.264e+02, threshold=6.097e+02, percent-clipped=1.0 +2023-02-06 09:49:01,394 INFO [train.py:901] (2/4) Epoch 11, batch 750, loss[loss=0.2631, simple_loss=0.3362, pruned_loss=0.09499, over 8245.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3128, pruned_loss=0.08123, over 1585233.95 frames. ], batch size: 24, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:07,838 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 09:49:10,210 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:10,586 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 09:49:21,640 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3684, 1.9007, 3.0371, 2.3517, 2.6413, 2.1959, 1.6982, 1.4321], + device='cuda:2'), covar=tensor([0.3790, 0.4413, 0.1127, 0.2463, 0.2015, 0.2226, 0.1742, 0.4136], + device='cuda:2'), in_proj_covar=tensor([0.0885, 0.0855, 0.0718, 0.0833, 0.0931, 0.0790, 0.0701, 0.0761], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:49:24,833 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 09:49:25,689 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3154, 3.0773, 2.3798, 3.8525, 1.6972, 1.9159, 2.2739, 3.0074], + device='cuda:2'), covar=tensor([0.0827, 0.0756, 0.0954, 0.0269, 0.1235, 0.1501, 0.1184, 0.0784], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0212, 0.0254, 0.0216, 0.0216, 0.0254, 0.0257, 0.0226], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:49:29,020 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3713, 4.4655, 3.9336, 1.8878, 3.9308, 3.9261, 4.1081, 3.7041], + device='cuda:2'), covar=tensor([0.0986, 0.0617, 0.1137, 0.5192, 0.0841, 0.0845, 0.1392, 0.0876], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0368, 0.0379, 0.0479, 0.0373, 0.0365, 0.0367, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:49:33,742 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 09:49:36,445 INFO [train.py:901] (2/4) Epoch 11, batch 800, loss[loss=0.2185, simple_loss=0.3036, pruned_loss=0.06669, over 8334.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3126, pruned_loss=0.08097, over 1593435.58 frames. ], batch size: 26, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:49,260 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.494e+02 2.971e+02 3.970e+02 9.403e+02, threshold=5.941e+02, percent-clipped=2.0 +2023-02-06 09:49:58,273 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:59,645 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:11,556 INFO [train.py:901] (2/4) Epoch 11, batch 850, loss[loss=0.2616, simple_loss=0.3266, pruned_loss=0.09829, over 6700.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3134, pruned_loss=0.08171, over 1595743.27 frames. ], batch size: 71, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:13,988 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 09:50:16,509 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:39,504 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6276, 1.6911, 1.9454, 1.5512, 1.0745, 2.0730, 0.2120, 1.2386], + device='cuda:2'), covar=tensor([0.3231, 0.1821, 0.0604, 0.1938, 0.4516, 0.0544, 0.3788, 0.1914], + device='cuda:2'), in_proj_covar=tensor([0.0162, 0.0163, 0.0094, 0.0210, 0.0252, 0.0102, 0.0161, 0.0159], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:50:46,078 INFO [train.py:901] (2/4) Epoch 11, batch 900, loss[loss=0.2789, simple_loss=0.3483, pruned_loss=0.1048, over 8344.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3123, pruned_loss=0.08117, over 1601449.01 frames. ], batch size: 26, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:58,849 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.543e+02 3.289e+02 4.286e+02 9.063e+02, threshold=6.577e+02, percent-clipped=7.0 +2023-02-06 09:51:14,957 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 09:51:18,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:20,087 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:21,852 INFO [train.py:901] (2/4) Epoch 11, batch 950, loss[loss=0.2591, simple_loss=0.3308, pruned_loss=0.09369, over 8451.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3125, pruned_loss=0.08148, over 1605990.46 frames. ], batch size: 27, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:51:28,235 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4986, 2.0196, 3.0960, 2.3929, 2.8916, 2.2960, 1.9004, 1.4788], + device='cuda:2'), covar=tensor([0.3790, 0.4188, 0.1079, 0.2586, 0.1711, 0.2291, 0.1755, 0.4239], + device='cuda:2'), in_proj_covar=tensor([0.0887, 0.0856, 0.0721, 0.0833, 0.0925, 0.0793, 0.0705, 0.0760], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 09:51:51,860 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 09:51:56,016 INFO [train.py:901] (2/4) Epoch 11, batch 1000, loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06362, over 8326.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.312, pruned_loss=0.08162, over 1609673.00 frames. ], batch size: 25, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:52:07,438 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.713e+02 3.211e+02 4.023e+02 7.481e+02, threshold=6.422e+02, percent-clipped=3.0 +2023-02-06 09:52:08,402 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:12,966 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1463, 2.4648, 1.9220, 2.8622, 1.3433, 1.6697, 1.9214, 2.3242], + device='cuda:2'), covar=tensor([0.0734, 0.0783, 0.0958, 0.0437, 0.1257, 0.1314, 0.1143, 0.0873], + device='cuda:2'), in_proj_covar=tensor([0.0240, 0.0213, 0.0256, 0.0218, 0.0219, 0.0254, 0.0258, 0.0225], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:52:27,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 09:52:27,397 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:31,956 INFO [train.py:901] (2/4) Epoch 11, batch 1050, loss[loss=0.2965, simple_loss=0.3521, pruned_loss=0.1204, over 8526.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3117, pruned_loss=0.08137, over 1612885.41 frames. ], batch size: 28, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:52:39,047 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 09:53:06,144 INFO [train.py:901] (2/4) Epoch 11, batch 1100, loss[loss=0.2597, simple_loss=0.3347, pruned_loss=0.09241, over 8510.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3103, pruned_loss=0.08026, over 1616757.40 frames. ], batch size: 34, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:18,517 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.436e+02 2.887e+02 3.709e+02 9.106e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 09:53:41,608 INFO [train.py:901] (2/4) Epoch 11, batch 1150, loss[loss=0.2608, simple_loss=0.3349, pruned_loss=0.09335, over 8320.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3118, pruned_loss=0.08078, over 1625391.29 frames. ], batch size: 25, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:51,787 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 09:54:00,452 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:17,961 INFO [train.py:901] (2/4) Epoch 11, batch 1200, loss[loss=0.204, simple_loss=0.2957, pruned_loss=0.05618, over 8361.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3112, pruned_loss=0.0806, over 1619391.46 frames. ], batch size: 24, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:54:18,730 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:18,854 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:20,248 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:29,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.664e+02 3.172e+02 3.772e+02 1.117e+03, threshold=6.345e+02, percent-clipped=5.0 +2023-02-06 09:54:36,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:38,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:47,992 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:53,469 INFO [train.py:901] (2/4) Epoch 11, batch 1250, loss[loss=0.2696, simple_loss=0.3443, pruned_loss=0.09742, over 8187.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3109, pruned_loss=0.08027, over 1619349.85 frames. ], batch size: 23, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:55:29,356 INFO [train.py:901] (2/4) Epoch 11, batch 1300, loss[loss=0.1983, simple_loss=0.2836, pruned_loss=0.05647, over 7805.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3107, pruned_loss=0.0797, over 1619992.14 frames. ], batch size: 20, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:55:40,413 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:55:40,854 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.583e+02 3.223e+02 4.179e+02 7.623e+02, threshold=6.447e+02, percent-clipped=2.0 +2023-02-06 09:55:42,377 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2390, 1.5108, 1.2560, 2.2169, 1.0581, 1.1044, 1.6503, 1.6824], + device='cuda:2'), covar=tensor([0.1701, 0.1407, 0.2064, 0.0604, 0.1491, 0.2102, 0.0954, 0.0928], + device='cuda:2'), in_proj_covar=tensor([0.0239, 0.0214, 0.0255, 0.0217, 0.0218, 0.0254, 0.0254, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:56:03,694 INFO [train.py:901] (2/4) Epoch 11, batch 1350, loss[loss=0.2525, simple_loss=0.3377, pruned_loss=0.08365, over 8475.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3119, pruned_loss=0.08073, over 1621471.33 frames. ], batch size: 25, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:29,754 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0715, 1.4404, 1.5737, 1.3668, 1.0326, 1.3697, 1.7878, 1.5210], + device='cuda:2'), covar=tensor([0.0466, 0.1318, 0.1753, 0.1389, 0.0588, 0.1585, 0.0678, 0.0637], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0153, 0.0194, 0.0157, 0.0104, 0.0164, 0.0116, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:2') +2023-02-06 09:56:38,845 INFO [train.py:901] (2/4) Epoch 11, batch 1400, loss[loss=0.2484, simple_loss=0.3233, pruned_loss=0.08674, over 8317.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3109, pruned_loss=0.08037, over 1621159.33 frames. ], batch size: 25, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:51,055 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.627e+02 3.119e+02 3.954e+02 1.224e+03, threshold=6.238e+02, percent-clipped=1.0 +2023-02-06 09:57:13,607 INFO [train.py:901] (2/4) Epoch 11, batch 1450, loss[loss=0.2864, simple_loss=0.3548, pruned_loss=0.109, over 8662.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3098, pruned_loss=0.0798, over 1617190.80 frames. ], batch size: 34, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:57:17,231 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9647, 2.3150, 1.8769, 2.8147, 1.4687, 1.4467, 2.0633, 2.3541], + device='cuda:2'), covar=tensor([0.0764, 0.0879, 0.1004, 0.0450, 0.1170, 0.1568, 0.1017, 0.0874], + device='cuda:2'), in_proj_covar=tensor([0.0241, 0.0214, 0.0255, 0.0219, 0.0218, 0.0256, 0.0255, 0.0225], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 09:57:27,767 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 09:57:48,806 INFO [train.py:901] (2/4) Epoch 11, batch 1500, loss[loss=0.2418, simple_loss=0.3221, pruned_loss=0.0808, over 8248.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3125, pruned_loss=0.08149, over 1618748.87 frames. ], batch size: 24, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:58:01,384 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.943e+02 2.743e+02 3.193e+02 4.270e+02 9.879e+02, threshold=6.387e+02, percent-clipped=7.0 +2023-02-06 09:58:02,226 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:12,521 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2677, 1.3515, 2.3339, 1.1331, 2.1320, 2.4892, 2.5599, 2.1209], + device='cuda:2'), covar=tensor([0.0969, 0.1140, 0.0457, 0.1966, 0.0636, 0.0372, 0.0646, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0260, 0.0293, 0.0254, 0.0286, 0.0272, 0.0232, 0.0330, 0.0289], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 09:58:13,198 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:24,634 INFO [train.py:901] (2/4) Epoch 11, batch 1550, loss[loss=0.2416, simple_loss=0.3296, pruned_loss=0.07681, over 8313.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3118, pruned_loss=0.08121, over 1616427.98 frames. ], batch size: 25, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:58:39,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82403.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:42,339 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.17 vs. limit=5.0 +2023-02-06 09:58:50,132 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:57,848 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:59,746 INFO [train.py:901] (2/4) Epoch 11, batch 1600, loss[loss=0.1824, simple_loss=0.2623, pruned_loss=0.05127, over 7442.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3102, pruned_loss=0.07981, over 1612378.75 frames. ], batch size: 17, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:06,491 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 09:59:11,745 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9612, 2.8552, 2.6792, 1.5686, 2.6429, 2.6140, 2.7172, 2.5293], + device='cuda:2'), covar=tensor([0.1251, 0.0964, 0.1192, 0.4248, 0.1089, 0.1312, 0.1363, 0.1091], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0375, 0.0384, 0.0482, 0.0377, 0.0369, 0.0369, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 09:59:12,992 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.328e+02 2.878e+02 3.468e+02 7.869e+02, threshold=5.757e+02, percent-clipped=2.0 +2023-02-06 09:59:18,835 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 09:59:24,125 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:59:36,397 INFO [train.py:901] (2/4) Epoch 11, batch 1650, loss[loss=0.2476, simple_loss=0.33, pruned_loss=0.08259, over 8505.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3108, pruned_loss=0.0801, over 1616454.32 frames. ], batch size: 26, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:57,314 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82511.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:00:11,602 INFO [train.py:901] (2/4) Epoch 11, batch 1700, loss[loss=0.2207, simple_loss=0.2884, pruned_loss=0.07648, over 7440.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3105, pruned_loss=0.0803, over 1612123.28 frames. ], batch size: 17, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 10:00:12,444 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:00:23,195 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.517e+02 3.185e+02 4.066e+02 8.085e+02, threshold=6.370e+02, percent-clipped=5.0 +2023-02-06 10:00:47,519 INFO [train.py:901] (2/4) Epoch 11, batch 1750, loss[loss=0.2448, simple_loss=0.3166, pruned_loss=0.0865, over 7807.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3113, pruned_loss=0.0811, over 1611233.72 frames. ], batch size: 20, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:00:52,551 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:01:23,307 INFO [train.py:901] (2/4) Epoch 11, batch 1800, loss[loss=0.2295, simple_loss=0.3109, pruned_loss=0.074, over 8373.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3115, pruned_loss=0.08126, over 1611231.51 frames. ], batch size: 24, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:01:26,965 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2989, 2.1821, 1.5643, 2.0438, 1.7708, 1.3311, 1.6649, 1.8277], + device='cuda:2'), covar=tensor([0.1196, 0.0348, 0.1071, 0.0503, 0.0606, 0.1350, 0.0851, 0.0706], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0234, 0.0314, 0.0300, 0.0300, 0.0324, 0.0341, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:01:35,825 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.598e+02 3.107e+02 4.193e+02 1.199e+03, threshold=6.213e+02, percent-clipped=8.0 +2023-02-06 10:01:42,099 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8261, 1.5644, 3.0949, 1.2945, 2.0413, 3.3807, 3.4014, 2.8921], + device='cuda:2'), covar=tensor([0.0996, 0.1497, 0.0356, 0.2049, 0.1028, 0.0263, 0.0436, 0.0619], + device='cuda:2'), in_proj_covar=tensor([0.0261, 0.0297, 0.0258, 0.0289, 0.0273, 0.0234, 0.0333, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:01:58,583 INFO [train.py:901] (2/4) Epoch 11, batch 1850, loss[loss=0.2385, simple_loss=0.3288, pruned_loss=0.07414, over 8519.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3125, pruned_loss=0.08149, over 1614410.46 frames. ], batch size: 26, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:10,530 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8557, 1.8718, 2.1334, 1.7418, 1.2729, 2.3014, 0.3694, 1.3334], + device='cuda:2'), covar=tensor([0.2318, 0.1943, 0.0547, 0.1922, 0.4178, 0.0557, 0.3442, 0.2107], + device='cuda:2'), in_proj_covar=tensor([0.0165, 0.0167, 0.0096, 0.0214, 0.0255, 0.0105, 0.0164, 0.0164], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:02:18,372 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:26,582 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:34,001 INFO [train.py:901] (2/4) Epoch 11, batch 1900, loss[loss=0.2115, simple_loss=0.2903, pruned_loss=0.06634, over 7981.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3121, pruned_loss=0.08169, over 1613454.14 frames. ], batch size: 21, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:43,684 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:45,519 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.442e+02 3.142e+02 3.936e+02 6.780e+02, threshold=6.284e+02, percent-clipped=1.0 +2023-02-06 10:03:08,879 INFO [train.py:901] (2/4) Epoch 11, batch 1950, loss[loss=0.2225, simple_loss=0.3026, pruned_loss=0.07118, over 7812.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3117, pruned_loss=0.08116, over 1611710.99 frames. ], batch size: 20, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:03:12,343 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 10:03:13,853 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:26,478 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 10:03:32,227 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:39,052 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:45,035 INFO [train.py:901] (2/4) Epoch 11, batch 2000, loss[loss=0.2204, simple_loss=0.2807, pruned_loss=0.08005, over 7798.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3122, pruned_loss=0.08154, over 1615272.63 frames. ], batch size: 19, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:03:47,028 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 10:03:56,671 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.675e+02 3.279e+02 3.987e+02 1.082e+03, threshold=6.559e+02, percent-clipped=7.0 +2023-02-06 10:03:57,937 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-06 10:04:01,560 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82855.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:04:16,778 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1453, 1.0530, 1.1947, 1.0766, 0.8991, 1.2354, 0.0393, 0.8860], + device='cuda:2'), covar=tensor([0.2226, 0.1932, 0.0649, 0.1310, 0.3967, 0.0765, 0.3312, 0.2059], + device='cuda:2'), in_proj_covar=tensor([0.0166, 0.0166, 0.0096, 0.0214, 0.0255, 0.0105, 0.0163, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:04:19,319 INFO [train.py:901] (2/4) Epoch 11, batch 2050, loss[loss=0.2242, simple_loss=0.308, pruned_loss=0.07022, over 7812.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3127, pruned_loss=0.08141, over 1619352.35 frames. ], batch size: 20, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,308 INFO [train.py:901] (2/4) Epoch 11, batch 2100, loss[loss=0.2271, simple_loss=0.3158, pruned_loss=0.0692, over 8466.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3127, pruned_loss=0.08093, over 1619487.32 frames. ], batch size: 25, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,375 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:07,191 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.489e+02 3.174e+02 3.706e+02 9.083e+02, threshold=6.348e+02, percent-clipped=2.0 +2023-02-06 10:05:09,477 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 10:05:22,110 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82970.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:05:29,198 INFO [train.py:901] (2/4) Epoch 11, batch 2150, loss[loss=0.2624, simple_loss=0.3317, pruned_loss=0.09653, over 8082.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.315, pruned_loss=0.08242, over 1617390.68 frames. ], batch size: 21, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:05:29,352 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82981.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:53,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8020, 2.4281, 4.6899, 1.4909, 3.3649, 2.3542, 1.8753, 2.9359], + device='cuda:2'), covar=tensor([0.1548, 0.1979, 0.0621, 0.3666, 0.1312, 0.2633, 0.1724, 0.2223], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0514, 0.0537, 0.0574, 0.0616, 0.0548, 0.0469, 0.0611], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:06:04,068 INFO [train.py:901] (2/4) Epoch 11, batch 2200, loss[loss=0.2815, simple_loss=0.3495, pruned_loss=0.1067, over 8491.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3132, pruned_loss=0.0811, over 1619190.67 frames. ], batch size: 28, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:06:15,771 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:16,953 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.510e+02 3.092e+02 4.104e+02 1.639e+03, threshold=6.185e+02, percent-clipped=4.0 +2023-02-06 10:06:38,939 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:40,016 INFO [train.py:901] (2/4) Epoch 11, batch 2250, loss[loss=0.2735, simple_loss=0.342, pruned_loss=0.1025, over 8362.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3119, pruned_loss=0.0811, over 1613889.89 frames. ], batch size: 24, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:06:55,783 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:07:14,108 INFO [train.py:901] (2/4) Epoch 11, batch 2300, loss[loss=0.2132, simple_loss=0.2845, pruned_loss=0.07093, over 7534.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.313, pruned_loss=0.08228, over 1614129.34 frames. ], batch size: 18, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:07:25,647 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.480e+02 3.199e+02 4.275e+02 9.806e+02, threshold=6.398e+02, percent-clipped=6.0 +2023-02-06 10:07:48,926 INFO [train.py:901] (2/4) Epoch 11, batch 2350, loss[loss=0.2514, simple_loss=0.3233, pruned_loss=0.08974, over 8456.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3131, pruned_loss=0.08265, over 1611392.89 frames. ], batch size: 27, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:12,059 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:08:20,034 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83226.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:08:23,028 INFO [train.py:901] (2/4) Epoch 11, batch 2400, loss[loss=0.1952, simple_loss=0.2656, pruned_loss=0.06238, over 7260.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3126, pruned_loss=0.08222, over 1609499.80 frames. ], batch size: 16, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:35,098 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.359e+02 2.853e+02 3.666e+02 7.740e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-06 10:08:37,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83251.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:08:58,651 INFO [train.py:901] (2/4) Epoch 11, batch 2450, loss[loss=0.2781, simple_loss=0.3336, pruned_loss=0.1113, over 7928.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3117, pruned_loss=0.08128, over 1610529.66 frames. ], batch size: 20, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:09:13,728 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:13,795 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:29,006 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:30,518 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:33,054 INFO [train.py:901] (2/4) Epoch 11, batch 2500, loss[loss=0.2587, simple_loss=0.3268, pruned_loss=0.09529, over 8463.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3112, pruned_loss=0.08096, over 1612640.51 frames. ], batch size: 27, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:09:44,598 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.634e+02 3.143e+02 3.904e+02 7.323e+02, threshold=6.285e+02, percent-clipped=4.0 +2023-02-06 10:10:07,372 INFO [train.py:901] (2/4) Epoch 11, batch 2550, loss[loss=0.2497, simple_loss=0.3284, pruned_loss=0.08554, over 8530.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3118, pruned_loss=0.08155, over 1613607.43 frames. ], batch size: 28, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:43,102 INFO [train.py:901] (2/4) Epoch 11, batch 2600, loss[loss=0.2604, simple_loss=0.3276, pruned_loss=0.09663, over 7191.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3115, pruned_loss=0.08132, over 1609506.16 frames. ], batch size: 71, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:49,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:10:54,710 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.607e+02 3.192e+02 4.372e+02 8.439e+02, threshold=6.384e+02, percent-clipped=10.0 +2023-02-06 10:10:55,569 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5490, 1.5787, 4.7138, 1.9085, 4.2085, 3.8867, 4.3014, 4.1133], + device='cuda:2'), covar=tensor([0.0469, 0.4090, 0.0436, 0.3189, 0.0951, 0.0817, 0.0419, 0.0515], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0569, 0.0586, 0.0528, 0.0599, 0.0503, 0.0502, 0.0576], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:11:17,509 INFO [train.py:901] (2/4) Epoch 11, batch 2650, loss[loss=0.2301, simple_loss=0.301, pruned_loss=0.07963, over 8209.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3113, pruned_loss=0.0811, over 1610372.84 frames. ], batch size: 23, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:11:52,411 INFO [train.py:901] (2/4) Epoch 11, batch 2700, loss[loss=0.198, simple_loss=0.2833, pruned_loss=0.05637, over 8567.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.31, pruned_loss=0.08023, over 1607217.90 frames. ], batch size: 31, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:11:58,916 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.47 vs. limit=5.0 +2023-02-06 10:12:04,666 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.578e+02 3.131e+02 4.095e+02 6.916e+02, threshold=6.263e+02, percent-clipped=2.0 +2023-02-06 10:12:11,631 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:12:27,318 INFO [train.py:901] (2/4) Epoch 11, batch 2750, loss[loss=0.2539, simple_loss=0.325, pruned_loss=0.09143, over 6693.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.3084, pruned_loss=0.07923, over 1603856.48 frames. ], batch size: 71, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:12:49,813 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1832, 1.1454, 3.3313, 0.9788, 2.9036, 2.8335, 3.0388, 2.9119], + device='cuda:2'), covar=tensor([0.0742, 0.3766, 0.0629, 0.3418, 0.1302, 0.0940, 0.0707, 0.0828], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0567, 0.0577, 0.0524, 0.0596, 0.0498, 0.0501, 0.0571], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:12:51,398 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 10:13:03,333 INFO [train.py:901] (2/4) Epoch 11, batch 2800, loss[loss=0.209, simple_loss=0.2858, pruned_loss=0.0661, over 8129.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3088, pruned_loss=0.07916, over 1606647.79 frames. ], batch size: 22, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:03,709 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 10:13:13,811 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:15,049 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.535e+02 3.136e+02 3.769e+02 1.201e+03, threshold=6.273e+02, percent-clipped=3.0 +2023-02-06 10:13:32,688 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:37,616 INFO [train.py:901] (2/4) Epoch 11, batch 2850, loss[loss=0.2981, simple_loss=0.3402, pruned_loss=0.128, over 7454.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.3086, pruned_loss=0.07906, over 1607728.74 frames. ], batch size: 17, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:37,812 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5212, 2.7322, 1.7457, 2.1007, 2.2178, 1.5683, 2.1276, 2.1299], + device='cuda:2'), covar=tensor([0.1453, 0.0396, 0.1189, 0.0709, 0.0667, 0.1354, 0.0965, 0.1000], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0235, 0.0314, 0.0294, 0.0301, 0.0323, 0.0338, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:13:47,912 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83696.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:02,150 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9429, 1.5092, 1.7033, 1.3218, 1.0751, 1.4458, 1.7207, 1.6502], + device='cuda:2'), covar=tensor([0.0560, 0.1218, 0.1710, 0.1377, 0.0598, 0.1478, 0.0693, 0.0596], + device='cuda:2'), in_proj_covar=tensor([0.0102, 0.0153, 0.0194, 0.0159, 0.0105, 0.0163, 0.0117, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:14:05,631 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:12,966 INFO [train.py:901] (2/4) Epoch 11, batch 2900, loss[loss=0.2217, simple_loss=0.3063, pruned_loss=0.06856, over 8554.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3098, pruned_loss=0.07978, over 1609848.28 frames. ], batch size: 31, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:14:25,269 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.545e+02 3.159e+02 4.165e+02 9.643e+02, threshold=6.318e+02, percent-clipped=5.0 +2023-02-06 10:14:34,292 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:46,312 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:48,173 INFO [train.py:901] (2/4) Epoch 11, batch 2950, loss[loss=0.2457, simple_loss=0.3152, pruned_loss=0.08805, over 8190.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3104, pruned_loss=0.08027, over 1611243.22 frames. ], batch size: 23, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:14:53,616 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 10:15:19,845 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3155, 1.5300, 2.1259, 1.1689, 1.4670, 1.5771, 1.4021, 1.3945], + device='cuda:2'), covar=tensor([0.1807, 0.2106, 0.0850, 0.3743, 0.1616, 0.2874, 0.1851, 0.1992], + device='cuda:2'), in_proj_covar=tensor([0.0487, 0.0515, 0.0533, 0.0576, 0.0614, 0.0544, 0.0469, 0.0609], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:15:22,302 INFO [train.py:901] (2/4) Epoch 11, batch 3000, loss[loss=0.2977, simple_loss=0.3593, pruned_loss=0.118, over 7015.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3105, pruned_loss=0.08057, over 1609130.69 frames. ], batch size: 73, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:15:22,302 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 10:15:34,552 INFO [train.py:935] (2/4) Epoch 11, validation: loss=0.1889, simple_loss=0.2886, pruned_loss=0.04461, over 944034.00 frames. +2023-02-06 10:15:34,553 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 10:15:46,617 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.511e+02 2.977e+02 3.600e+02 5.313e+02, threshold=5.953e+02, percent-clipped=0.0 +2023-02-06 10:16:10,356 INFO [train.py:901] (2/4) Epoch 11, batch 3050, loss[loss=0.2384, simple_loss=0.3244, pruned_loss=0.07622, over 8561.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3119, pruned_loss=0.08117, over 1611738.82 frames. ], batch size: 31, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:43,148 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:16:44,282 INFO [train.py:901] (2/4) Epoch 11, batch 3100, loss[loss=0.2595, simple_loss=0.338, pruned_loss=0.09051, over 8618.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3116, pruned_loss=0.08119, over 1616825.53 frames. ], batch size: 39, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:55,416 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 2.748e+02 3.262e+02 3.755e+02 7.942e+02, threshold=6.525e+02, percent-clipped=1.0 +2023-02-06 10:17:00,080 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:08,759 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:18,437 INFO [train.py:901] (2/4) Epoch 11, batch 3150, loss[loss=0.2112, simple_loss=0.2966, pruned_loss=0.06288, over 8103.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3111, pruned_loss=0.08099, over 1614665.70 frames. ], batch size: 23, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:17:34,197 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:42,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7751, 2.3521, 3.3739, 2.7950, 3.1037, 2.4054, 2.0110, 1.9682], + device='cuda:2'), covar=tensor([0.3102, 0.3514, 0.1235, 0.2143, 0.1775, 0.2168, 0.1538, 0.3827], + device='cuda:2'), in_proj_covar=tensor([0.0884, 0.0865, 0.0727, 0.0837, 0.0934, 0.0793, 0.0700, 0.0763], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:17:44,276 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:53,259 INFO [train.py:901] (2/4) Epoch 11, batch 3200, loss[loss=0.1925, simple_loss=0.2713, pruned_loss=0.05681, over 7438.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3112, pruned_loss=0.08124, over 1612410.23 frames. ], batch size: 17, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:01,384 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:05,764 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.726e+02 3.369e+02 4.220e+02 9.302e+02, threshold=6.739e+02, percent-clipped=4.0 +2023-02-06 10:18:27,182 INFO [train.py:901] (2/4) Epoch 11, batch 3250, loss[loss=0.2206, simple_loss=0.2915, pruned_loss=0.07483, over 7971.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3113, pruned_loss=0.08118, over 1614632.38 frames. ], batch size: 21, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:50,446 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:55,081 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:19:01,831 INFO [train.py:901] (2/4) Epoch 11, batch 3300, loss[loss=0.2251, simple_loss=0.2983, pruned_loss=0.07597, over 8067.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3119, pruned_loss=0.08137, over 1613118.58 frames. ], batch size: 21, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:08,603 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8683, 1.9476, 2.4012, 1.7024, 1.3614, 2.5826, 0.5304, 1.6169], + device='cuda:2'), covar=tensor([0.2969, 0.1699, 0.0532, 0.2268, 0.3932, 0.0395, 0.3333, 0.1754], + device='cuda:2'), in_proj_covar=tensor([0.0164, 0.0167, 0.0097, 0.0215, 0.0253, 0.0104, 0.0166, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:19:13,378 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.729e+02 3.101e+02 4.103e+02 8.191e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 10:19:35,409 INFO [train.py:901] (2/4) Epoch 11, batch 3350, loss[loss=0.2576, simple_loss=0.3528, pruned_loss=0.08114, over 8576.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3133, pruned_loss=0.08207, over 1616299.18 frames. ], batch size: 31, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:37,881 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 10:20:01,020 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:10,208 INFO [train.py:901] (2/4) Epoch 11, batch 3400, loss[loss=0.2471, simple_loss=0.3211, pruned_loss=0.08659, over 8529.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3126, pruned_loss=0.08151, over 1613566.09 frames. ], batch size: 49, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:20:15,226 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:23,162 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 2.553e+02 3.068e+02 3.977e+02 7.727e+02, threshold=6.137e+02, percent-clipped=2.0 +2023-02-06 10:20:26,644 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:45,372 INFO [train.py:901] (2/4) Epoch 11, batch 3450, loss[loss=0.2185, simple_loss=0.3038, pruned_loss=0.06657, over 8558.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3137, pruned_loss=0.08238, over 1615402.10 frames. ], batch size: 31, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:06,407 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:20,276 INFO [train.py:901] (2/4) Epoch 11, batch 3500, loss[loss=0.2066, simple_loss=0.2743, pruned_loss=0.06948, over 8245.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3128, pruned_loss=0.08196, over 1614056.35 frames. ], batch size: 22, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:31,075 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:32,273 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.912e+02 2.703e+02 3.166e+02 4.187e+02 8.001e+02, threshold=6.332e+02, percent-clipped=6.0 +2023-02-06 10:21:36,475 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:48,769 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 10:21:54,138 INFO [train.py:901] (2/4) Epoch 11, batch 3550, loss[loss=0.2503, simple_loss=0.3196, pruned_loss=0.09056, over 7810.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3126, pruned_loss=0.08188, over 1604485.19 frames. ], batch size: 20, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:04,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3151, 2.6139, 1.7186, 2.1970, 1.9788, 1.3037, 1.7399, 2.1207], + device='cuda:2'), covar=tensor([0.1610, 0.0386, 0.1211, 0.0609, 0.0792, 0.1775, 0.1234, 0.0841], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0232, 0.0314, 0.0291, 0.0301, 0.0319, 0.0338, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:22:25,881 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:28,969 INFO [train.py:901] (2/4) Epoch 11, batch 3600, loss[loss=0.3038, simple_loss=0.3587, pruned_loss=0.1244, over 7089.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3135, pruned_loss=0.08274, over 1603401.75 frames. ], batch size: 71, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:41,776 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.788e+02 3.447e+02 4.179e+02 1.001e+03, threshold=6.895e+02, percent-clipped=4.0 +2023-02-06 10:22:48,586 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:50,733 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:02,588 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 10:23:03,534 INFO [train.py:901] (2/4) Epoch 11, batch 3650, loss[loss=0.2185, simple_loss=0.2861, pruned_loss=0.07546, over 7645.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3121, pruned_loss=0.08167, over 1606116.86 frames. ], batch size: 19, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:23:07,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2690, 1.6046, 1.7113, 1.3521, 1.2621, 1.5002, 1.9889, 1.8947], + device='cuda:2'), covar=tensor([0.0433, 0.1152, 0.1652, 0.1381, 0.0524, 0.1407, 0.0586, 0.0521], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0158, 0.0105, 0.0163, 0.0117, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:23:11,601 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84493.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:20,289 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7228, 1.7262, 2.1404, 1.6441, 1.1787, 2.1846, 0.2747, 1.2317], + device='cuda:2'), covar=tensor([0.2770, 0.1702, 0.0513, 0.1708, 0.4443, 0.0547, 0.3616, 0.2157], + device='cuda:2'), in_proj_covar=tensor([0.0163, 0.0166, 0.0097, 0.0212, 0.0253, 0.0104, 0.0165, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:23:28,901 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84518.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:37,222 INFO [train.py:901] (2/4) Epoch 11, batch 3700, loss[loss=0.2143, simple_loss=0.289, pruned_loss=0.06979, over 7950.00 frames. ], tot_loss[loss=0.2376, simple_loss=0.3119, pruned_loss=0.08162, over 1608912.03 frames. ], batch size: 20, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:23:46,139 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:48,588 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:23:49,869 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.648e+02 3.219e+02 3.938e+02 7.332e+02, threshold=6.437e+02, percent-clipped=1.0 +2023-02-06 10:23:58,483 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:07,286 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:08,932 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 10:24:11,839 INFO [train.py:901] (2/4) Epoch 11, batch 3750, loss[loss=0.2313, simple_loss=0.3162, pruned_loss=0.07319, over 8373.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3106, pruned_loss=0.08032, over 1610796.16 frames. ], batch size: 24, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:23,994 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:46,870 INFO [train.py:901] (2/4) Epoch 11, batch 3800, loss[loss=0.2778, simple_loss=0.3447, pruned_loss=0.1055, over 8457.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3117, pruned_loss=0.08098, over 1613570.42 frames. ], batch size: 25, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:57,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7657, 1.7609, 2.1635, 1.7019, 1.1602, 2.1905, 0.4122, 1.4023], + device='cuda:2'), covar=tensor([0.2155, 0.1479, 0.0491, 0.1864, 0.4254, 0.0547, 0.3046, 0.1794], + device='cuda:2'), in_proj_covar=tensor([0.0160, 0.0164, 0.0095, 0.0210, 0.0251, 0.0103, 0.0164, 0.0163], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:24:58,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.607e+02 3.118e+02 4.251e+02 1.041e+03, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:25:00,934 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:18,149 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:18,379 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 10:25:21,361 INFO [train.py:901] (2/4) Epoch 11, batch 3850, loss[loss=0.2447, simple_loss=0.3214, pruned_loss=0.08397, over 7979.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3115, pruned_loss=0.08088, over 1611893.10 frames. ], batch size: 21, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:25:22,273 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:32,823 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:39,154 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3430, 1.9650, 2.9819, 2.3676, 2.7007, 2.1810, 1.7444, 1.3169], + device='cuda:2'), covar=tensor([0.3662, 0.3770, 0.1093, 0.2472, 0.1858, 0.2007, 0.1559, 0.3988], + device='cuda:2'), in_proj_covar=tensor([0.0881, 0.0862, 0.0724, 0.0834, 0.0930, 0.0794, 0.0701, 0.0762], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:25:39,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:43,786 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:47,049 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:51,558 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 10:25:53,839 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 10:25:55,492 INFO [train.py:901] (2/4) Epoch 11, batch 3900, loss[loss=0.2188, simple_loss=0.312, pruned_loss=0.06283, over 8185.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3111, pruned_loss=0.08083, over 1616363.04 frames. ], batch size: 23, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:03,707 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:08,291 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.619e+02 3.238e+02 3.926e+02 9.069e+02, threshold=6.476e+02, percent-clipped=5.0 +2023-02-06 10:26:30,325 INFO [train.py:901] (2/4) Epoch 11, batch 3950, loss[loss=0.2355, simple_loss=0.3131, pruned_loss=0.07892, over 7817.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.311, pruned_loss=0.08086, over 1616126.64 frames. ], batch size: 20, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:34,624 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2918, 1.4702, 1.6399, 1.4002, 1.0304, 1.4583, 1.8426, 1.6337], + device='cuda:2'), covar=tensor([0.0464, 0.1252, 0.1678, 0.1418, 0.0618, 0.1488, 0.0694, 0.0618], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0154, 0.0192, 0.0159, 0.0105, 0.0164, 0.0117, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:26:52,779 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:56,211 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84818.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:04,812 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:05,273 INFO [train.py:901] (2/4) Epoch 11, batch 4000, loss[loss=0.2688, simple_loss=0.3499, pruned_loss=0.09383, over 8461.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3108, pruned_loss=0.08071, over 1612080.83 frames. ], batch size: 27, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:17,172 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.608e+02 2.990e+02 3.694e+02 8.393e+02, threshold=5.981e+02, percent-clipped=2.0 +2023-02-06 10:27:21,548 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:39,658 INFO [train.py:901] (2/4) Epoch 11, batch 4050, loss[loss=0.1938, simple_loss=0.2903, pruned_loss=0.04866, over 8025.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3116, pruned_loss=0.08062, over 1614451.41 frames. ], batch size: 22, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:44,485 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:15,559 INFO [train.py:901] (2/4) Epoch 11, batch 4100, loss[loss=0.2114, simple_loss=0.29, pruned_loss=0.06639, over 7817.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3117, pruned_loss=0.08099, over 1610239.98 frames. ], batch size: 20, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:28:16,482 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:27,742 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.518e+02 2.978e+02 3.788e+02 7.594e+02, threshold=5.956e+02, percent-clipped=4.0 +2023-02-06 10:28:33,376 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:34,752 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6181, 1.5052, 4.8416, 1.8472, 4.2693, 4.0135, 4.3727, 4.2662], + device='cuda:2'), covar=tensor([0.0491, 0.4123, 0.0346, 0.3234, 0.0939, 0.0755, 0.0441, 0.0527], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0573, 0.0581, 0.0525, 0.0602, 0.0513, 0.0505, 0.0575], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:28:41,369 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,447 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:49,616 INFO [train.py:901] (2/4) Epoch 11, batch 4150, loss[loss=0.2748, simple_loss=0.3383, pruned_loss=0.1057, over 7964.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3119, pruned_loss=0.08058, over 1611245.07 frames. ], batch size: 21, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:28:58,534 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:59,078 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84995.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:04,353 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:23,824 INFO [train.py:901] (2/4) Epoch 11, batch 4200, loss[loss=0.2156, simple_loss=0.2976, pruned_loss=0.06682, over 7798.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3114, pruned_loss=0.08048, over 1612989.08 frames. ], batch size: 19, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:29:36,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.581e+02 3.261e+02 3.967e+02 9.417e+02, threshold=6.523e+02, percent-clipped=7.0 +2023-02-06 10:29:47,924 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 10:29:50,138 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:58,035 INFO [train.py:901] (2/4) Epoch 11, batch 4250, loss[loss=0.2363, simple_loss=0.3139, pruned_loss=0.07933, over 8683.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3107, pruned_loss=0.08022, over 1610366.94 frames. ], batch size: 34, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:06,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:10,065 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 10:30:18,077 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:25,991 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1174, 1.4205, 4.3121, 1.5613, 3.7614, 3.6024, 3.8615, 3.6992], + device='cuda:2'), covar=tensor([0.0469, 0.3798, 0.0413, 0.3254, 0.1020, 0.0788, 0.0512, 0.0607], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0570, 0.0574, 0.0522, 0.0601, 0.0513, 0.0502, 0.0572], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:30:28,615 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:32,505 INFO [train.py:901] (2/4) Epoch 11, batch 4300, loss[loss=0.2423, simple_loss=0.3233, pruned_loss=0.08067, over 8252.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3112, pruned_loss=0.08087, over 1608528.59 frames. ], batch size: 24, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:33,970 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:45,182 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.616e+02 3.014e+02 4.154e+02 7.931e+02, threshold=6.027e+02, percent-clipped=5.0 +2023-02-06 10:30:54,027 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:31:06,968 INFO [train.py:901] (2/4) Epoch 11, batch 4350, loss[loss=0.2396, simple_loss=0.2999, pruned_loss=0.08968, over 7218.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3117, pruned_loss=0.08074, over 1611937.39 frames. ], batch size: 16, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:31:40,295 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 10:31:41,574 INFO [train.py:901] (2/4) Epoch 11, batch 4400, loss[loss=0.2777, simple_loss=0.3384, pruned_loss=0.1086, over 7806.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3116, pruned_loss=0.08102, over 1613839.59 frames. ], batch size: 20, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:31:54,340 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.553e+02 3.172e+02 3.669e+02 6.483e+02, threshold=6.345e+02, percent-clipped=4.0 +2023-02-06 10:32:01,478 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:05,695 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 10:32:14,081 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:16,486 INFO [train.py:901] (2/4) Epoch 11, batch 4450, loss[loss=0.2279, simple_loss=0.3023, pruned_loss=0.07672, over 8092.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3107, pruned_loss=0.08075, over 1608108.23 frames. ], batch size: 21, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:32:18,773 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:22,678 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 10:32:38,776 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:41,052 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 10:32:50,691 INFO [train.py:901] (2/4) Epoch 11, batch 4500, loss[loss=0.208, simple_loss=0.2866, pruned_loss=0.06467, over 7648.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3107, pruned_loss=0.08051, over 1611865.74 frames. ], batch size: 19, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:03,396 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.629e+02 3.227e+02 4.085e+02 1.162e+03, threshold=6.455e+02, percent-clipped=2.0 +2023-02-06 10:33:15,850 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 10:33:16,068 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:26,538 INFO [train.py:901] (2/4) Epoch 11, batch 4550, loss[loss=0.2265, simple_loss=0.3056, pruned_loss=0.07369, over 8656.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3129, pruned_loss=0.08217, over 1613726.30 frames. ], batch size: 34, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:33,530 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:59,392 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:01,128 INFO [train.py:901] (2/4) Epoch 11, batch 4600, loss[loss=0.2677, simple_loss=0.3496, pruned_loss=0.09292, over 8359.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3136, pruned_loss=0.08236, over 1615802.87 frames. ], batch size: 24, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:34:11,931 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:13,795 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.573e+02 3.214e+02 4.149e+02 1.527e+03, threshold=6.427e+02, percent-clipped=2.0 +2023-02-06 10:34:14,058 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4655, 1.8110, 2.8487, 1.2168, 1.8988, 1.7983, 1.4800, 1.8578], + device='cuda:2'), covar=tensor([0.1738, 0.2043, 0.0674, 0.3919, 0.1625, 0.2835, 0.1899, 0.2111], + device='cuda:2'), in_proj_covar=tensor([0.0489, 0.0517, 0.0533, 0.0578, 0.0617, 0.0554, 0.0473, 0.0612], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:34:28,038 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:33,579 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:36,251 INFO [train.py:901] (2/4) Epoch 11, batch 4650, loss[loss=0.2692, simple_loss=0.3563, pruned_loss=0.09111, over 8415.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.314, pruned_loss=0.08286, over 1613540.75 frames. ], batch size: 49, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:34:42,455 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 10:34:50,393 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:54,609 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 10:35:11,112 INFO [train.py:901] (2/4) Epoch 11, batch 4700, loss[loss=0.2727, simple_loss=0.3455, pruned_loss=0.0999, over 8569.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3136, pruned_loss=0.0823, over 1612867.32 frames. ], batch size: 31, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:12,715 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:13,285 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0231, 6.0379, 5.2555, 2.6168, 5.3648, 5.6666, 5.4844, 5.3325], + device='cuda:2'), covar=tensor([0.0443, 0.0351, 0.0801, 0.4255, 0.0776, 0.0553, 0.0967, 0.0584], + device='cuda:2'), in_proj_covar=tensor([0.0472, 0.0377, 0.0379, 0.0487, 0.0377, 0.0380, 0.0376, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:35:22,559 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:23,054 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 2.022e+02 2.812e+02 3.491e+02 4.674e+02 1.006e+03, threshold=6.983e+02, percent-clipped=9.0 +2023-02-06 10:35:30,014 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:41,262 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1653, 1.4139, 2.3884, 1.2014, 2.0864, 2.5224, 2.6265, 2.1508], + device='cuda:2'), covar=tensor([0.1013, 0.1152, 0.0433, 0.1860, 0.0632, 0.0350, 0.0606, 0.0719], + device='cuda:2'), in_proj_covar=tensor([0.0258, 0.0292, 0.0254, 0.0285, 0.0266, 0.0232, 0.0333, 0.0286], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 10:35:45,764 INFO [train.py:901] (2/4) Epoch 11, batch 4750, loss[loss=0.197, simple_loss=0.2782, pruned_loss=0.05788, over 7807.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3143, pruned_loss=0.08222, over 1612221.45 frames. ], batch size: 19, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:47,965 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:53,286 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:08,950 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-06 10:36:09,863 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 10:36:11,813 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 10:36:20,649 INFO [train.py:901] (2/4) Epoch 11, batch 4800, loss[loss=0.227, simple_loss=0.3092, pruned_loss=0.07238, over 8572.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3144, pruned_loss=0.0827, over 1612409.26 frames. ], batch size: 31, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:28,392 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 10:36:28,789 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:32,775 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.628e+02 3.255e+02 4.281e+02 8.051e+02, threshold=6.510e+02, percent-clipped=3.0 +2023-02-06 10:36:35,661 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5308, 4.4948, 4.0105, 1.8960, 3.9831, 4.1197, 4.0908, 3.8220], + device='cuda:2'), covar=tensor([0.0855, 0.0684, 0.1213, 0.5561, 0.1016, 0.1172, 0.1440, 0.1030], + device='cuda:2'), in_proj_covar=tensor([0.0473, 0.0377, 0.0379, 0.0485, 0.0376, 0.0380, 0.0375, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:36:55,313 INFO [train.py:901] (2/4) Epoch 11, batch 4850, loss[loss=0.2278, simple_loss=0.3131, pruned_loss=0.07125, over 8554.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3138, pruned_loss=0.0821, over 1613448.30 frames. ], batch size: 31, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:57,570 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:01,316 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 10:37:14,640 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:30,188 INFO [train.py:901] (2/4) Epoch 11, batch 4900, loss[loss=0.2, simple_loss=0.2768, pruned_loss=0.06157, over 7784.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.313, pruned_loss=0.08163, over 1612675.46 frames. ], batch size: 19, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:37:42,882 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.544e+02 3.151e+02 4.004e+02 8.063e+02, threshold=6.301e+02, percent-clipped=5.0 +2023-02-06 10:38:04,648 INFO [train.py:901] (2/4) Epoch 11, batch 4950, loss[loss=0.2502, simple_loss=0.3281, pruned_loss=0.08618, over 8365.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3115, pruned_loss=0.08093, over 1613919.44 frames. ], batch size: 24, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:10,866 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2285, 1.3525, 1.3034, 1.8986, 0.6964, 1.1353, 1.3783, 1.4692], + device='cuda:2'), covar=tensor([0.1244, 0.0937, 0.1360, 0.0674, 0.1234, 0.1608, 0.0856, 0.0805], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0216, 0.0256, 0.0217, 0.0218, 0.0253, 0.0257, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 10:38:11,407 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:26,563 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5589, 2.2465, 3.3842, 2.6522, 2.9177, 2.4399, 1.8489, 1.6487], + device='cuda:2'), covar=tensor([0.3846, 0.4031, 0.1290, 0.2540, 0.2167, 0.2083, 0.1656, 0.4288], + device='cuda:2'), in_proj_covar=tensor([0.0886, 0.0868, 0.0735, 0.0834, 0.0940, 0.0799, 0.0704, 0.0773], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:38:39,606 INFO [train.py:901] (2/4) Epoch 11, batch 5000, loss[loss=0.2866, simple_loss=0.3494, pruned_loss=0.1119, over 8492.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3118, pruned_loss=0.08081, over 1617293.46 frames. ], batch size: 29, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:46,525 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:49,800 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:51,924 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:52,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.585e+02 3.219e+02 4.097e+02 8.363e+02, threshold=6.438e+02, percent-clipped=6.0 +2023-02-06 10:39:03,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:08,908 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:13,944 INFO [train.py:901] (2/4) Epoch 11, batch 5050, loss[loss=0.2408, simple_loss=0.3116, pruned_loss=0.08502, over 8623.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3123, pruned_loss=0.08108, over 1615743.76 frames. ], batch size: 34, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:21,268 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:22,013 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:30,135 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:39,824 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 10:39:48,590 INFO [train.py:901] (2/4) Epoch 11, batch 5100, loss[loss=0.2129, simple_loss=0.2959, pruned_loss=0.06491, over 7655.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3119, pruned_loss=0.0803, over 1624087.62 frames. ], batch size: 19, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:53,825 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 10:40:00,818 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85948.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:40:01,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.570e+02 3.113e+02 3.980e+02 6.838e+02, threshold=6.226e+02, percent-clipped=2.0 +2023-02-06 10:40:02,904 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8586, 1.8875, 2.2268, 1.6429, 1.2563, 2.4349, 0.2929, 1.3127], + device='cuda:2'), covar=tensor([0.2258, 0.1704, 0.0739, 0.1880, 0.4099, 0.0451, 0.3426, 0.1914], + device='cuda:2'), in_proj_covar=tensor([0.0163, 0.0167, 0.0100, 0.0213, 0.0255, 0.0103, 0.0166, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:40:06,266 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0536, 1.5493, 1.5343, 1.3720, 1.0308, 1.3850, 1.8156, 1.7571], + device='cuda:2'), covar=tensor([0.0516, 0.1205, 0.1828, 0.1408, 0.0620, 0.1550, 0.0694, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0152, 0.0192, 0.0158, 0.0104, 0.0164, 0.0117, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:40:08,934 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85960.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:19,551 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:23,496 INFO [train.py:901] (2/4) Epoch 11, batch 5150, loss[loss=0.2237, simple_loss=0.2866, pruned_loss=0.08043, over 7636.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3129, pruned_loss=0.08077, over 1627022.31 frames. ], batch size: 19, lr: 6.92e-03, grad_scale: 8.0 +2023-02-06 10:40:27,617 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:38,308 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-06 10:40:42,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:51,395 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:59,434 INFO [train.py:901] (2/4) Epoch 11, batch 5200, loss[loss=0.2823, simple_loss=0.3362, pruned_loss=0.1142, over 8191.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3114, pruned_loss=0.07981, over 1621739.90 frames. ], batch size: 23, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:12,340 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.648e+02 3.082e+02 3.913e+02 1.007e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-06 10:41:27,748 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:35,328 INFO [train.py:901] (2/4) Epoch 11, batch 5250, loss[loss=0.2299, simple_loss=0.3081, pruned_loss=0.07584, over 8660.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3119, pruned_loss=0.08008, over 1624309.05 frames. ], batch size: 34, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:39,724 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:40,967 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 10:41:50,759 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:10,643 INFO [train.py:901] (2/4) Epoch 11, batch 5300, loss[loss=0.2639, simple_loss=0.3355, pruned_loss=0.0961, over 8475.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3108, pruned_loss=0.07925, over 1620387.71 frames. ], batch size: 25, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:23,758 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.570e+02 3.118e+02 4.195e+02 8.045e+02, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:42:32,850 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:46,494 INFO [train.py:901] (2/4) Epoch 11, batch 5350, loss[loss=0.2147, simple_loss=0.2992, pruned_loss=0.06505, over 8634.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3104, pruned_loss=0.0795, over 1618211.03 frames. ], batch size: 34, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:50,726 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:12,428 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:14,734 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.73 vs. limit=5.0 +2023-02-06 10:43:22,276 INFO [train.py:901] (2/4) Epoch 11, batch 5400, loss[loss=0.2061, simple_loss=0.2919, pruned_loss=0.06021, over 8357.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3102, pruned_loss=0.07978, over 1616486.90 frames. ], batch size: 24, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:43:26,574 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:29,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:34,673 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.471e+02 3.223e+02 4.268e+02 9.619e+02, threshold=6.446e+02, percent-clipped=7.0 +2023-02-06 10:43:44,507 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:45,130 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:57,388 INFO [train.py:901] (2/4) Epoch 11, batch 5450, loss[loss=0.2312, simple_loss=0.2927, pruned_loss=0.08485, over 5152.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3103, pruned_loss=0.08014, over 1611525.11 frames. ], batch size: 11, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:03,061 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:05,816 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86292.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:44:25,138 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:34,080 INFO [train.py:901] (2/4) Epoch 11, batch 5500, loss[loss=0.2563, simple_loss=0.3228, pruned_loss=0.09491, over 7970.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3103, pruned_loss=0.08042, over 1611551.09 frames. ], batch size: 21, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:34,719 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 10:44:46,143 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.494e+02 3.013e+02 3.770e+02 8.759e+02, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 10:44:48,466 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:52,694 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:56,787 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:09,177 INFO [train.py:901] (2/4) Epoch 11, batch 5550, loss[loss=0.2295, simple_loss=0.2998, pruned_loss=0.07955, over 8293.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3107, pruned_loss=0.08074, over 1609003.29 frames. ], batch size: 23, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:10,785 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:27,852 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:45:33,154 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:44,334 INFO [train.py:901] (2/4) Epoch 11, batch 5600, loss[loss=0.2448, simple_loss=0.32, pruned_loss=0.0848, over 8192.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3103, pruned_loss=0.08017, over 1611592.14 frames. ], batch size: 23, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:44,407 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:46,522 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:57,250 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.527e+02 3.003e+02 3.802e+02 9.548e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-06 10:46:03,255 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:06,609 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:17,347 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:18,542 INFO [train.py:901] (2/4) Epoch 11, batch 5650, loss[loss=0.2083, simple_loss=0.2912, pruned_loss=0.06269, over 8298.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3103, pruned_loss=0.08019, over 1608724.68 frames. ], batch size: 23, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:46:39,900 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 10:46:44,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6898, 1.6511, 2.0598, 1.5079, 1.1790, 2.1051, 0.2232, 1.2343], + device='cuda:2'), covar=tensor([0.2375, 0.1826, 0.0452, 0.1438, 0.3909, 0.0431, 0.3387, 0.1847], + device='cuda:2'), in_proj_covar=tensor([0.0159, 0.0163, 0.0096, 0.0204, 0.0245, 0.0100, 0.0158, 0.0160], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 10:46:52,194 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:53,988 INFO [train.py:901] (2/4) Epoch 11, batch 5700, loss[loss=0.1858, simple_loss=0.2648, pruned_loss=0.05339, over 7680.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3095, pruned_loss=0.07959, over 1605339.63 frames. ], batch size: 18, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:04,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:06,038 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.473e+02 3.032e+02 3.837e+02 8.433e+02, threshold=6.065e+02, percent-clipped=5.0 +2023-02-06 10:47:26,925 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 10:47:28,594 INFO [train.py:901] (2/4) Epoch 11, batch 5750, loss[loss=0.2491, simple_loss=0.3208, pruned_loss=0.08868, over 8028.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3089, pruned_loss=0.07934, over 1604846.75 frames. ], batch size: 22, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:35,590 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5163, 2.6618, 1.7859, 2.1595, 2.1361, 1.5329, 2.0762, 2.1908], + device='cuda:2'), covar=tensor([0.1322, 0.0355, 0.1092, 0.0638, 0.0670, 0.1390, 0.0954, 0.0828], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0234, 0.0315, 0.0297, 0.0304, 0.0323, 0.0341, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:47:40,227 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:42,137 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 10:47:47,610 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:47,750 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:03,809 INFO [train.py:901] (2/4) Epoch 11, batch 5800, loss[loss=0.2404, simple_loss=0.3191, pruned_loss=0.08078, over 8502.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3091, pruned_loss=0.07912, over 1606864.35 frames. ], batch size: 49, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:05,400 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:17,063 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.625e+02 3.434e+02 4.363e+02 1.044e+03, threshold=6.867e+02, percent-clipped=16.0 +2023-02-06 10:48:26,842 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86663.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:48:39,359 INFO [train.py:901] (2/4) Epoch 11, batch 5850, loss[loss=0.216, simple_loss=0.2934, pruned_loss=0.06932, over 7217.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.31, pruned_loss=0.07885, over 1610311.91 frames. ], batch size: 16, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:44,300 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86688.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:45,650 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:02,124 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:07,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:13,225 INFO [train.py:901] (2/4) Epoch 11, batch 5900, loss[loss=0.2291, simple_loss=0.31, pruned_loss=0.07409, over 8315.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3106, pruned_loss=0.07991, over 1606775.43 frames. ], batch size: 25, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:16,680 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:25,724 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.878e+02 2.650e+02 3.002e+02 3.837e+02 8.505e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-06 10:49:33,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:48,258 INFO [train.py:901] (2/4) Epoch 11, batch 5950, loss[loss=0.2115, simple_loss=0.2831, pruned_loss=0.06996, over 7651.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3103, pruned_loss=0.07999, over 1606148.35 frames. ], batch size: 19, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:51,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:03,638 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:03,791 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:06,871 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:09,062 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:20,485 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:22,934 INFO [train.py:901] (2/4) Epoch 11, batch 6000, loss[loss=0.2462, simple_loss=0.3089, pruned_loss=0.09171, over 8253.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3086, pruned_loss=0.07894, over 1605126.83 frames. ], batch size: 22, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:50:22,934 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 10:50:35,333 INFO [train.py:935] (2/4) Epoch 11, validation: loss=0.1887, simple_loss=0.2887, pruned_loss=0.04439, over 944034.00 frames. +2023-02-06 10:50:35,334 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 10:50:36,203 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86832.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:47,363 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.431e+02 2.934e+02 3.566e+02 7.044e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-06 10:51:00,724 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 10:51:10,322 INFO [train.py:901] (2/4) Epoch 11, batch 6050, loss[loss=0.2086, simple_loss=0.2861, pruned_loss=0.0656, over 7814.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3108, pruned_loss=0.0805, over 1607098.56 frames. ], batch size: 20, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:20,528 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:35,573 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:39,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:45,226 INFO [train.py:901] (2/4) Epoch 11, batch 6100, loss[loss=0.2682, simple_loss=0.3406, pruned_loss=0.0979, over 8496.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3104, pruned_loss=0.07972, over 1614094.24 frames. ], batch size: 29, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:53,495 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:58,246 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.604e+02 3.114e+02 3.901e+02 9.212e+02, threshold=6.229e+02, percent-clipped=4.0 +2023-02-06 10:52:06,857 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 10:52:19,158 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:20,334 INFO [train.py:901] (2/4) Epoch 11, batch 6150, loss[loss=0.2666, simple_loss=0.3293, pruned_loss=0.102, over 8573.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.31, pruned_loss=0.0788, over 1620995.98 frames. ], batch size: 31, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:52:36,926 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:47,749 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87020.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:52:55,887 INFO [train.py:901] (2/4) Epoch 11, batch 6200, loss[loss=0.2308, simple_loss=0.3139, pruned_loss=0.0738, over 8359.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.309, pruned_loss=0.07818, over 1616156.15 frames. ], batch size: 24, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:52:56,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0891, 1.6831, 4.3541, 1.9096, 2.4249, 4.9102, 4.9363, 4.2484], + device='cuda:2'), covar=tensor([0.1180, 0.1642, 0.0271, 0.1974, 0.1150, 0.0179, 0.0347, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0260, 0.0293, 0.0254, 0.0287, 0.0267, 0.0231, 0.0333, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:2') +2023-02-06 10:53:07,899 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.592e+02 3.192e+02 4.476e+02 1.804e+03, threshold=6.384e+02, percent-clipped=5.0 +2023-02-06 10:53:14,449 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:53:30,909 INFO [train.py:901] (2/4) Epoch 11, batch 6250, loss[loss=0.2638, simple_loss=0.3417, pruned_loss=0.09297, over 8286.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3091, pruned_loss=0.07867, over 1611132.56 frames. ], batch size: 23, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:06,553 INFO [train.py:901] (2/4) Epoch 11, batch 6300, loss[loss=0.2306, simple_loss=0.3089, pruned_loss=0.07618, over 8342.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3098, pruned_loss=0.07918, over 1614379.41 frames. ], batch size: 24, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:19,292 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.563e+02 3.017e+02 3.734e+02 8.364e+02, threshold=6.034e+02, percent-clipped=3.0 +2023-02-06 10:54:36,421 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:38,304 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87176.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:39,797 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:41,627 INFO [train.py:901] (2/4) Epoch 11, batch 6350, loss[loss=0.2194, simple_loss=0.3018, pruned_loss=0.06847, over 7977.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3089, pruned_loss=0.07842, over 1614592.03 frames. ], batch size: 21, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:53,184 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:57,269 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:58,591 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9393, 1.4997, 1.6678, 1.4388, 0.8707, 1.3896, 1.5549, 1.5307], + device='cuda:2'), covar=tensor([0.0500, 0.1187, 0.1693, 0.1301, 0.0609, 0.1486, 0.0694, 0.0611], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0158, 0.0104, 0.0164, 0.0116, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:55:03,603 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-06 10:55:16,800 INFO [train.py:901] (2/4) Epoch 11, batch 6400, loss[loss=0.2165, simple_loss=0.3034, pruned_loss=0.06485, over 8364.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3087, pruned_loss=0.07784, over 1619721.86 frames. ], batch size: 24, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:19,151 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4952, 1.9285, 2.9684, 2.3580, 2.6528, 2.2781, 1.8643, 1.4362], + device='cuda:2'), covar=tensor([0.3678, 0.4044, 0.1122, 0.2406, 0.1741, 0.1993, 0.1522, 0.3846], + device='cuda:2'), in_proj_covar=tensor([0.0891, 0.0871, 0.0731, 0.0843, 0.0933, 0.0798, 0.0702, 0.0766], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 10:55:23,181 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:28,876 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:29,363 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.577e+02 3.020e+02 3.786e+02 7.428e+02, threshold=6.041e+02, percent-clipped=2.0 +2023-02-06 10:55:51,534 INFO [train.py:901] (2/4) Epoch 11, batch 6450, loss[loss=0.2738, simple_loss=0.352, pruned_loss=0.09779, over 8466.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3095, pruned_loss=0.07847, over 1617260.54 frames. ], batch size: 25, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:59,191 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:14,180 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:27,324 INFO [train.py:901] (2/4) Epoch 11, batch 6500, loss[loss=0.2483, simple_loss=0.3183, pruned_loss=0.08912, over 8513.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3099, pruned_loss=0.0788, over 1617284.03 frames. ], batch size: 29, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:56:32,334 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:36,462 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6829, 5.6884, 5.0250, 2.2693, 5.0754, 5.4081, 5.2295, 5.1648], + device='cuda:2'), covar=tensor([0.0543, 0.0405, 0.0843, 0.4918, 0.0708, 0.0842, 0.1133, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0366, 0.0380, 0.0476, 0.0376, 0.0372, 0.0372, 0.0329], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 10:56:39,860 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.605e+02 3.245e+02 4.169e+02 7.875e+02, threshold=6.489e+02, percent-clipped=5.0 +2023-02-06 10:56:44,220 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:50,424 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87364.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:57:01,898 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:57:02,432 INFO [train.py:901] (2/4) Epoch 11, batch 6550, loss[loss=0.2411, simple_loss=0.3193, pruned_loss=0.08147, over 8286.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3099, pruned_loss=0.07917, over 1616310.39 frames. ], batch size: 23, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:17,755 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 10:57:37,081 INFO [train.py:901] (2/4) Epoch 11, batch 6600, loss[loss=0.1941, simple_loss=0.2714, pruned_loss=0.05838, over 7806.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3103, pruned_loss=0.07916, over 1621846.06 frames. ], batch size: 19, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:37,783 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:57:50,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.293e+02 2.790e+02 3.732e+02 8.562e+02, threshold=5.581e+02, percent-clipped=1.0 +2023-02-06 10:57:50,526 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.77 vs. limit=2.0 +2023-02-06 10:58:11,509 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:11,545 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87479.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:58:12,690 INFO [train.py:901] (2/4) Epoch 11, batch 6650, loss[loss=0.2044, simple_loss=0.286, pruned_loss=0.06146, over 8067.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3091, pruned_loss=0.07814, over 1622677.22 frames. ], batch size: 21, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:12,874 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1620, 2.1610, 1.9387, 2.0830, 1.1135, 1.8101, 2.2068, 2.3107], + device='cuda:2'), covar=tensor([0.0375, 0.1085, 0.1632, 0.1137, 0.0550, 0.1323, 0.0607, 0.0519], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0164, 0.0116, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:2') +2023-02-06 10:58:35,863 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 10:58:41,518 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:47,482 INFO [train.py:901] (2/4) Epoch 11, batch 6700, loss[loss=0.2605, simple_loss=0.3268, pruned_loss=0.09714, over 8471.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3091, pruned_loss=0.07866, over 1622799.40 frames. ], batch size: 29, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:58,351 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:59,467 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.493e+02 3.158e+02 4.170e+02 8.693e+02, threshold=6.316e+02, percent-clipped=8.0 +2023-02-06 10:59:16,933 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:22,975 INFO [train.py:901] (2/4) Epoch 11, batch 6750, loss[loss=0.2622, simple_loss=0.3257, pruned_loss=0.09937, over 8136.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.309, pruned_loss=0.0789, over 1617188.43 frames. ], batch size: 22, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:59:30,579 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:37,578 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:40,785 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:43,424 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:49,572 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8569, 3.7561, 2.3331, 2.3299, 2.6411, 1.8817, 2.2940, 2.6861], + device='cuda:2'), covar=tensor([0.1708, 0.0339, 0.1039, 0.0918, 0.0644, 0.1350, 0.1148, 0.1125], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0231, 0.0312, 0.0293, 0.0298, 0.0316, 0.0335, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 10:59:52,301 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:56,901 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 10:59:57,591 INFO [train.py:901] (2/4) Epoch 11, batch 6800, loss[loss=0.2055, simple_loss=0.2784, pruned_loss=0.06634, over 7514.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3089, pruned_loss=0.07952, over 1615114.76 frames. ], batch size: 18, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:01,243 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:10,523 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.375e+02 2.980e+02 3.798e+02 7.616e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 11:00:32,377 INFO [train.py:901] (2/4) Epoch 11, batch 6850, loss[loss=0.2215, simple_loss=0.2967, pruned_loss=0.07311, over 7784.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3101, pruned_loss=0.08013, over 1612444.85 frames. ], batch size: 19, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:33,307 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5321, 2.1041, 3.4932, 1.3368, 2.4846, 1.9868, 1.6813, 2.3836], + device='cuda:2'), covar=tensor([0.1739, 0.2276, 0.0677, 0.3842, 0.1597, 0.2840, 0.1827, 0.2306], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0518, 0.0526, 0.0572, 0.0611, 0.0548, 0.0466, 0.0608], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:00:45,135 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 11:00:50,749 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:01,875 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:06,414 INFO [train.py:901] (2/4) Epoch 11, batch 6900, loss[loss=0.1918, simple_loss=0.2728, pruned_loss=0.05539, over 7647.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.31, pruned_loss=0.07977, over 1615199.87 frames. ], batch size: 19, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:01:10,021 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87735.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:01:19,190 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.628e+02 3.043e+02 4.130e+02 7.700e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 11:01:20,945 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 11:01:26,849 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87760.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:01:41,613 INFO [train.py:901] (2/4) Epoch 11, batch 6950, loss[loss=0.2231, simple_loss=0.3084, pruned_loss=0.06893, over 8240.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3098, pruned_loss=0.0793, over 1615110.22 frames. ], batch size: 22, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:01:49,691 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 11:01:50,314 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 11:01:52,562 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 11:02:11,620 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:16,826 INFO [train.py:901] (2/4) Epoch 11, batch 7000, loss[loss=0.2193, simple_loss=0.2873, pruned_loss=0.0756, over 7709.00 frames. ], tot_loss[loss=0.2334, simple_loss=0.3087, pruned_loss=0.07899, over 1610960.86 frames. ], batch size: 18, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:02:22,314 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:29,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.548e+02 3.185e+02 4.052e+02 9.283e+02, threshold=6.369e+02, percent-clipped=6.0 +2023-02-06 11:02:41,530 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:51,623 INFO [train.py:901] (2/4) Epoch 11, batch 7050, loss[loss=0.2725, simple_loss=0.3473, pruned_loss=0.09881, over 8336.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3102, pruned_loss=0.07978, over 1614276.43 frames. ], batch size: 26, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:26,705 INFO [train.py:901] (2/4) Epoch 11, batch 7100, loss[loss=0.1944, simple_loss=0.2646, pruned_loss=0.06207, over 7403.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.31, pruned_loss=0.0797, over 1615338.83 frames. ], batch size: 17, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:31,613 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87938.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:36,826 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:38,767 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.732e+02 3.356e+02 4.654e+02 1.650e+03, threshold=6.712e+02, percent-clipped=12.0 +2023-02-06 11:03:40,144 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:48,305 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:51,560 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:00,730 INFO [train.py:901] (2/4) Epoch 11, batch 7150, loss[loss=0.2719, simple_loss=0.3343, pruned_loss=0.1047, over 8458.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3114, pruned_loss=0.08094, over 1611293.33 frames. ], batch size: 39, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:04:01,618 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:05,830 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:36,671 INFO [train.py:901] (2/4) Epoch 11, batch 7200, loss[loss=0.2495, simple_loss=0.3314, pruned_loss=0.08379, over 8581.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3114, pruned_loss=0.08093, over 1612202.01 frames. ], batch size: 31, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:04:49,439 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.591e+02 3.086e+02 3.706e+02 9.715e+02, threshold=6.172e+02, percent-clipped=2.0 +2023-02-06 11:04:57,783 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:01,282 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:11,822 INFO [train.py:901] (2/4) Epoch 11, batch 7250, loss[loss=0.216, simple_loss=0.2951, pruned_loss=0.06843, over 8464.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3106, pruned_loss=0.0802, over 1609564.21 frames. ], batch size: 49, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:05:12,645 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:21,492 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:37,917 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:39,337 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:40,258 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 11:05:46,909 INFO [train.py:901] (2/4) Epoch 11, batch 7300, loss[loss=0.2384, simple_loss=0.318, pruned_loss=0.07939, over 8500.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3101, pruned_loss=0.07962, over 1609023.09 frames. ], batch size: 28, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:00,690 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.414e+02 2.958e+02 3.757e+02 7.369e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-06 11:06:10,197 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 11:06:22,820 INFO [train.py:901] (2/4) Epoch 11, batch 7350, loss[loss=0.1819, simple_loss=0.2578, pruned_loss=0.05297, over 7705.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3102, pruned_loss=0.0797, over 1606891.59 frames. ], batch size: 18, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:32,250 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 11:06:32,470 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88194.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:50,351 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:51,485 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 11:06:58,211 INFO [train.py:901] (2/4) Epoch 11, batch 7400, loss[loss=0.2622, simple_loss=0.3365, pruned_loss=0.09398, over 8341.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3105, pruned_loss=0.07947, over 1607029.45 frames. ], batch size: 26, lr: 6.84e-03, grad_scale: 16.0 +2023-02-06 11:07:03,169 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:11,892 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.577e+02 3.074e+02 3.691e+02 9.024e+02, threshold=6.148e+02, percent-clipped=4.0 +2023-02-06 11:07:20,894 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:26,470 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 11:07:31,795 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7262, 2.3229, 4.4721, 1.4621, 3.0869, 2.2646, 1.9345, 2.8530], + device='cuda:2'), covar=tensor([0.1567, 0.2224, 0.0588, 0.3840, 0.1494, 0.2742, 0.1669, 0.2260], + device='cuda:2'), in_proj_covar=tensor([0.0487, 0.0522, 0.0529, 0.0576, 0.0618, 0.0558, 0.0472, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:07:32,884 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 11:07:33,599 INFO [train.py:901] (2/4) Epoch 11, batch 7450, loss[loss=0.1996, simple_loss=0.2764, pruned_loss=0.0614, over 7759.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3093, pruned_loss=0.07902, over 1607976.19 frames. ], batch size: 19, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:07:52,585 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88309.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:07:58,569 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:01,847 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:07,738 INFO [train.py:901] (2/4) Epoch 11, batch 7500, loss[loss=0.2262, simple_loss=0.299, pruned_loss=0.07667, over 7906.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.309, pruned_loss=0.07882, over 1607992.46 frames. ], batch size: 20, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:13,326 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:15,898 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:19,203 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:20,935 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.827e+02 3.509e+02 4.304e+02 1.282e+03, threshold=7.018e+02, percent-clipped=8.0 +2023-02-06 11:08:30,002 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:42,589 INFO [train.py:901] (2/4) Epoch 11, batch 7550, loss[loss=0.2403, simple_loss=0.3229, pruned_loss=0.0789, over 8362.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3105, pruned_loss=0.07964, over 1609899.34 frames. ], batch size: 26, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:17,376 INFO [train.py:901] (2/4) Epoch 11, batch 7600, loss[loss=0.255, simple_loss=0.321, pruned_loss=0.09453, over 7307.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3106, pruned_loss=0.08023, over 1611199.59 frames. ], batch size: 72, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:31,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 2.441e+02 2.975e+02 3.888e+02 6.138e+02, threshold=5.951e+02, percent-clipped=0.0 +2023-02-06 11:09:39,150 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:09:49,837 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0772, 2.9960, 2.1758, 4.2563, 1.5119, 2.0030, 2.1950, 2.9919], + device='cuda:2'), covar=tensor([0.1029, 0.0697, 0.1145, 0.0233, 0.1156, 0.1366, 0.1071, 0.0716], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0213, 0.0256, 0.0216, 0.0220, 0.0255, 0.0256, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:09:51,498 INFO [train.py:901] (2/4) Epoch 11, batch 7650, loss[loss=0.1819, simple_loss=0.2636, pruned_loss=0.05008, over 7225.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3111, pruned_loss=0.07991, over 1614313.51 frames. ], batch size: 16, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:10:24,205 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 11:10:24,769 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-06 11:10:26,441 INFO [train.py:901] (2/4) Epoch 11, batch 7700, loss[loss=0.2447, simple_loss=0.3223, pruned_loss=0.08353, over 8024.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3107, pruned_loss=0.07975, over 1614737.86 frames. ], batch size: 22, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:10:39,149 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 11:10:39,701 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.472e+02 3.053e+02 3.571e+02 8.603e+02, threshold=6.105e+02, percent-clipped=3.0 +2023-02-06 11:10:58,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:11:00,812 INFO [train.py:901] (2/4) Epoch 11, batch 7750, loss[loss=0.1729, simple_loss=0.2494, pruned_loss=0.0482, over 7427.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3102, pruned_loss=0.07984, over 1616422.33 frames. ], batch size: 17, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:36,340 INFO [train.py:901] (2/4) Epoch 11, batch 7800, loss[loss=0.2222, simple_loss=0.2983, pruned_loss=0.07309, over 7975.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3111, pruned_loss=0.08027, over 1610961.15 frames. ], batch size: 21, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:37,767 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6505, 2.8516, 1.9156, 2.2847, 2.3206, 1.5892, 2.2124, 2.2488], + device='cuda:2'), covar=tensor([0.1458, 0.0351, 0.1112, 0.0671, 0.0621, 0.1430, 0.1002, 0.0986], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0236, 0.0318, 0.0299, 0.0302, 0.0323, 0.0339, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:11:48,833 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.685e+02 3.345e+02 4.152e+02 1.012e+03, threshold=6.690e+02, percent-clipped=6.0 +2023-02-06 11:11:50,872 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:11:51,150 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 11:12:09,486 INFO [train.py:901] (2/4) Epoch 11, batch 7850, loss[loss=0.2377, simple_loss=0.3047, pruned_loss=0.08535, over 7923.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3109, pruned_loss=0.0807, over 1610416.92 frames. ], batch size: 20, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:33,306 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0775, 1.7302, 2.5131, 1.9709, 2.3000, 1.9186, 1.5659, 1.0225], + device='cuda:2'), covar=tensor([0.4077, 0.3842, 0.1141, 0.2532, 0.1787, 0.2406, 0.1840, 0.3706], + device='cuda:2'), in_proj_covar=tensor([0.0886, 0.0865, 0.0727, 0.0842, 0.0925, 0.0798, 0.0701, 0.0763], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:12:42,909 INFO [train.py:901] (2/4) Epoch 11, batch 7900, loss[loss=0.1805, simple_loss=0.265, pruned_loss=0.048, over 7807.00 frames. ], tot_loss[loss=0.2352, simple_loss=0.3103, pruned_loss=0.0801, over 1608520.95 frames. ], batch size: 19, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:55,418 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.490e+02 3.060e+02 3.735e+02 6.734e+02, threshold=6.120e+02, percent-clipped=1.0 +2023-02-06 11:13:07,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88768.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:13:15,813 INFO [train.py:901] (2/4) Epoch 11, batch 7950, loss[loss=0.2342, simple_loss=0.3128, pruned_loss=0.07781, over 8520.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3114, pruned_loss=0.0803, over 1613840.56 frames. ], batch size: 28, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:46,952 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3886, 2.7556, 3.1202, 1.4699, 3.4161, 1.9076, 1.5480, 2.0065], + device='cuda:2'), covar=tensor([0.0533, 0.0234, 0.0206, 0.0485, 0.0282, 0.0582, 0.0580, 0.0336], + device='cuda:2'), in_proj_covar=tensor([0.0386, 0.0322, 0.0270, 0.0376, 0.0308, 0.0468, 0.0353, 0.0347], + device='cuda:2'), out_proj_covar=tensor([1.0990e-04, 8.9445e-05, 7.5304e-05, 1.0534e-04, 8.7000e-05, 1.4238e-04, + 1.0043e-04, 9.8118e-05], device='cuda:2') +2023-02-06 11:13:49,364 INFO [train.py:901] (2/4) Epoch 11, batch 8000, loss[loss=0.2166, simple_loss=0.28, pruned_loss=0.07659, over 7699.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3098, pruned_loss=0.07947, over 1605800.79 frames. ], batch size: 18, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:50,909 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:02,010 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.603e+02 3.071e+02 3.730e+02 8.421e+02, threshold=6.141e+02, percent-clipped=3.0 +2023-02-06 11:14:07,241 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:22,236 INFO [train.py:901] (2/4) Epoch 11, batch 8050, loss[loss=0.1923, simple_loss=0.2607, pruned_loss=0.06197, over 7226.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3068, pruned_loss=0.07819, over 1592394.89 frames. ], batch size: 16, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:14:41,733 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9960, 2.4017, 1.9312, 3.0288, 1.3319, 1.5821, 1.8301, 2.4927], + device='cuda:2'), covar=tensor([0.0775, 0.0795, 0.0970, 0.0332, 0.1162, 0.1435, 0.1043, 0.0674], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0216, 0.0256, 0.0215, 0.0219, 0.0253, 0.0258, 0.0222], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:14:54,231 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 11:14:58,686 INFO [train.py:901] (2/4) Epoch 12, batch 0, loss[loss=0.2166, simple_loss=0.2942, pruned_loss=0.06946, over 8038.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2942, pruned_loss=0.06946, over 8038.00 frames. ], batch size: 22, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:14:58,687 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 11:15:09,781 INFO [train.py:935] (2/4) Epoch 12, validation: loss=0.1897, simple_loss=0.2896, pruned_loss=0.04486, over 944034.00 frames. +2023-02-06 11:15:09,782 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 11:15:23,303 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 11:15:35,201 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.690e+02 3.540e+02 4.339e+02 7.249e+02, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 11:15:44,686 INFO [train.py:901] (2/4) Epoch 12, batch 50, loss[loss=0.2658, simple_loss=0.3426, pruned_loss=0.09449, over 8444.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3155, pruned_loss=0.08072, over 366782.36 frames. ], batch size: 27, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:15:57,434 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 11:16:19,045 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 11:16:19,755 INFO [train.py:901] (2/4) Epoch 12, batch 100, loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.06314, over 7971.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3125, pruned_loss=0.08022, over 646310.94 frames. ], batch size: 21, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:16:26,516 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89024.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:16:33,361 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.50 vs. limit=5.0 +2023-02-06 11:16:40,637 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:16:43,468 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89049.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:16:43,913 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.771e+02 3.256e+02 4.152e+02 1.357e+03, threshold=6.512e+02, percent-clipped=1.0 +2023-02-06 11:16:54,731 INFO [train.py:901] (2/4) Epoch 12, batch 150, loss[loss=0.234, simple_loss=0.3124, pruned_loss=0.0778, over 7648.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3101, pruned_loss=0.07862, over 859261.58 frames. ], batch size: 19, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:24,723 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 11:17:29,009 INFO [train.py:901] (2/4) Epoch 12, batch 200, loss[loss=0.2317, simple_loss=0.3064, pruned_loss=0.07851, over 7548.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3106, pruned_loss=0.07968, over 1026211.66 frames. ], batch size: 18, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:53,943 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.712e+02 3.423e+02 4.383e+02 1.008e+03, threshold=6.845e+02, percent-clipped=3.0 +2023-02-06 11:18:03,547 INFO [train.py:901] (2/4) Epoch 12, batch 250, loss[loss=0.221, simple_loss=0.2991, pruned_loss=0.07145, over 8510.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3113, pruned_loss=0.07958, over 1162040.51 frames. ], batch size: 26, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:18:13,231 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 11:18:17,197 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 11:18:20,886 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:18:22,844 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 11:18:40,040 INFO [train.py:901] (2/4) Epoch 12, batch 300, loss[loss=0.1816, simple_loss=0.2666, pruned_loss=0.04824, over 7806.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3118, pruned_loss=0.07972, over 1264953.02 frames. ], batch size: 20, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:05,001 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.536e+02 3.052e+02 3.921e+02 6.584e+02, threshold=6.103e+02, percent-clipped=0.0 +2023-02-06 11:19:14,500 INFO [train.py:901] (2/4) Epoch 12, batch 350, loss[loss=0.2412, simple_loss=0.3213, pruned_loss=0.08062, over 8317.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.3108, pruned_loss=0.07992, over 1343850.90 frames. ], batch size: 25, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:49,367 INFO [train.py:901] (2/4) Epoch 12, batch 400, loss[loss=0.2616, simple_loss=0.3295, pruned_loss=0.09679, over 8617.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3103, pruned_loss=0.07991, over 1403050.63 frames. ], batch size: 39, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:14,265 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.417e+02 2.965e+02 3.513e+02 5.511e+02, threshold=5.929e+02, percent-clipped=0.0 +2023-02-06 11:20:24,218 INFO [train.py:901] (2/4) Epoch 12, batch 450, loss[loss=0.2268, simple_loss=0.3118, pruned_loss=0.07087, over 8360.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3096, pruned_loss=0.0793, over 1452148.55 frames. ], batch size: 24, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:25,042 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1173, 1.0779, 4.2985, 1.5538, 3.7510, 3.5478, 3.8357, 3.6895], + device='cuda:2'), covar=tensor([0.0529, 0.4630, 0.0418, 0.3389, 0.1016, 0.0842, 0.0554, 0.0667], + device='cuda:2'), in_proj_covar=tensor([0.0490, 0.0571, 0.0579, 0.0529, 0.0606, 0.0511, 0.0507, 0.0575], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:20:40,988 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:43,195 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:58,861 INFO [train.py:901] (2/4) Epoch 12, batch 500, loss[loss=0.2395, simple_loss=0.314, pruned_loss=0.08249, over 8485.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3103, pruned_loss=0.07917, over 1489783.61 frames. ], batch size: 28, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:21:19,376 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:21:22,222 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5374, 1.8663, 3.4222, 1.3297, 2.5759, 2.0078, 1.5955, 2.4302], + device='cuda:2'), covar=tensor([0.1642, 0.2440, 0.0687, 0.3913, 0.1508, 0.2739, 0.1827, 0.2156], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0521, 0.0534, 0.0580, 0.0619, 0.0555, 0.0473, 0.0612], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:21:24,101 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.539e+02 3.031e+02 3.696e+02 8.346e+02, threshold=6.063e+02, percent-clipped=3.0 +2023-02-06 11:21:29,672 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89457.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:21:34,346 INFO [train.py:901] (2/4) Epoch 12, batch 550, loss[loss=0.2174, simple_loss=0.3016, pruned_loss=0.06658, over 8245.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3101, pruned_loss=0.07971, over 1516795.21 frames. ], batch size: 22, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:21:53,123 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7725, 2.1966, 1.7775, 2.7268, 1.1524, 1.5517, 1.7324, 2.2617], + device='cuda:2'), covar=tensor([0.0895, 0.0826, 0.1041, 0.0417, 0.1309, 0.1494, 0.1131, 0.0766], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0213, 0.0255, 0.0214, 0.0218, 0.0251, 0.0257, 0.0220], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:22:02,587 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:09,198 INFO [train.py:901] (2/4) Epoch 12, batch 600, loss[loss=0.2565, simple_loss=0.313, pruned_loss=0.09996, over 7798.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3109, pruned_loss=0.08027, over 1543311.56 frames. ], batch size: 20, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:17,835 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:19,216 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0189, 1.7512, 3.0837, 1.2931, 2.1344, 3.2861, 3.4623, 2.8498], + device='cuda:2'), covar=tensor([0.0972, 0.1379, 0.0387, 0.2155, 0.0972, 0.0297, 0.0505, 0.0621], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0298, 0.0262, 0.0294, 0.0273, 0.0237, 0.0344, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:22:20,461 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:26,583 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 11:22:34,513 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.630e+02 3.047e+02 3.733e+02 1.036e+03, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 11:22:44,043 INFO [train.py:901] (2/4) Epoch 12, batch 650, loss[loss=0.2416, simple_loss=0.3215, pruned_loss=0.08082, over 8286.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3113, pruned_loss=0.08, over 1562947.09 frames. ], batch size: 23, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:18,860 INFO [train.py:901] (2/4) Epoch 12, batch 700, loss[loss=0.287, simple_loss=0.3565, pruned_loss=0.1087, over 8446.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3116, pruned_loss=0.08032, over 1575915.38 frames. ], batch size: 27, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:40,443 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:23:43,633 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.821e+02 3.296e+02 4.031e+02 9.579e+02, threshold=6.593e+02, percent-clipped=5.0 +2023-02-06 11:23:53,838 INFO [train.py:901] (2/4) Epoch 12, batch 750, loss[loss=0.217, simple_loss=0.2869, pruned_loss=0.07357, over 7659.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3115, pruned_loss=0.0804, over 1583965.00 frames. ], batch size: 19, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:24:02,096 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3363, 1.4694, 4.4533, 2.0411, 2.4567, 5.1291, 5.1549, 4.4783], + device='cuda:2'), covar=tensor([0.1057, 0.1858, 0.0261, 0.1851, 0.1129, 0.0203, 0.0372, 0.0585], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0300, 0.0264, 0.0296, 0.0276, 0.0239, 0.0347, 0.0293], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:24:04,276 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-06 11:24:11,508 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 11:24:17,639 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:20,256 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 11:24:28,186 INFO [train.py:901] (2/4) Epoch 12, batch 800, loss[loss=0.2293, simple_loss=0.3204, pruned_loss=0.06914, over 8492.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.311, pruned_loss=0.08024, over 1589993.38 frames. ], batch size: 29, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:24:32,437 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0000, 2.7754, 3.4411, 2.0348, 1.7656, 3.4287, 0.6746, 2.1560], + device='cuda:2'), covar=tensor([0.1847, 0.1308, 0.0339, 0.2427, 0.3718, 0.0318, 0.3867, 0.2017], + device='cuda:2'), in_proj_covar=tensor([0.0170, 0.0171, 0.0103, 0.0218, 0.0259, 0.0107, 0.0167, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:24:43,682 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:53,390 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.628e+02 3.285e+02 4.121e+02 9.349e+02, threshold=6.571e+02, percent-clipped=6.0 +2023-02-06 11:24:59,659 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:02,837 INFO [train.py:901] (2/4) Epoch 12, batch 850, loss[loss=0.232, simple_loss=0.3072, pruned_loss=0.07838, over 7980.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3097, pruned_loss=0.07923, over 1594926.61 frames. ], batch size: 21, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:25:18,006 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:19,284 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:28,670 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89801.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:25:34,027 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3615, 1.6529, 1.6558, 0.9248, 1.6958, 1.2495, 0.2785, 1.5678], + device='cuda:2'), covar=tensor([0.0333, 0.0224, 0.0168, 0.0347, 0.0285, 0.0673, 0.0587, 0.0189], + device='cuda:2'), in_proj_covar=tensor([0.0391, 0.0330, 0.0275, 0.0384, 0.0317, 0.0474, 0.0356, 0.0353], + device='cuda:2'), out_proj_covar=tensor([1.1120e-04, 9.1706e-05, 7.6565e-05, 1.0750e-04, 8.9654e-05, 1.4412e-04, + 1.0104e-04, 9.9689e-05], device='cuda:2') +2023-02-06 11:25:37,801 INFO [train.py:901] (2/4) Epoch 12, batch 900, loss[loss=0.2165, simple_loss=0.2939, pruned_loss=0.06958, over 7978.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3101, pruned_loss=0.07884, over 1604277.72 frames. ], batch size: 21, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:03,292 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.446e+02 3.021e+02 3.729e+02 6.397e+02, threshold=6.041e+02, percent-clipped=0.0 +2023-02-06 11:26:03,490 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:11,307 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6048, 1.8878, 1.5920, 2.2895, 1.0003, 1.3928, 1.4741, 1.8623], + device='cuda:2'), covar=tensor([0.0856, 0.0826, 0.0979, 0.0418, 0.1199, 0.1486, 0.1031, 0.0761], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0213, 0.0255, 0.0215, 0.0218, 0.0252, 0.0258, 0.0218], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:26:11,795 INFO [train.py:901] (2/4) Epoch 12, batch 950, loss[loss=0.2267, simple_loss=0.2999, pruned_loss=0.07678, over 8030.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3094, pruned_loss=0.07859, over 1604868.98 frames. ], batch size: 22, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:16,404 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:24,521 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:33,230 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7528, 2.2426, 3.6921, 2.4765, 3.1468, 2.4367, 2.0400, 1.7255], + device='cuda:2'), covar=tensor([0.3813, 0.4272, 0.1178, 0.2922, 0.1968, 0.2186, 0.1588, 0.4468], + device='cuda:2'), in_proj_covar=tensor([0.0895, 0.0876, 0.0743, 0.0853, 0.0936, 0.0806, 0.0708, 0.0772], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:26:38,570 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:38,612 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:39,087 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 11:26:46,279 INFO [train.py:901] (2/4) Epoch 12, batch 1000, loss[loss=0.2129, simple_loss=0.2801, pruned_loss=0.07288, over 7420.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3102, pruned_loss=0.07879, over 1609511.14 frames. ], batch size: 17, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:47,816 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89916.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:26:48,419 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9439, 2.8620, 3.5215, 2.0191, 1.6545, 3.6618, 0.6037, 2.1063], + device='cuda:2'), covar=tensor([0.2507, 0.1254, 0.0387, 0.2788, 0.4494, 0.0285, 0.3965, 0.2236], + device='cuda:2'), in_proj_covar=tensor([0.0170, 0.0172, 0.0103, 0.0218, 0.0259, 0.0107, 0.0166, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:26:55,680 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:11,383 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.648e+02 3.254e+02 4.081e+02 9.414e+02, threshold=6.507e+02, percent-clipped=7.0 +2023-02-06 11:27:11,408 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 11:27:13,883 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 11:27:20,836 INFO [train.py:901] (2/4) Epoch 12, batch 1050, loss[loss=0.2964, simple_loss=0.362, pruned_loss=0.1155, over 8490.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3102, pruned_loss=0.07876, over 1610328.59 frames. ], batch size: 28, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:27:22,384 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0677, 1.3748, 6.1377, 2.2005, 5.5044, 5.2512, 5.8017, 5.5569], + device='cuda:2'), covar=tensor([0.0361, 0.4507, 0.0333, 0.3100, 0.0850, 0.0736, 0.0305, 0.0408], + device='cuda:2'), in_proj_covar=tensor([0.0501, 0.0586, 0.0593, 0.0543, 0.0620, 0.0528, 0.0520, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:27:24,339 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 11:27:35,860 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:56,235 INFO [train.py:901] (2/4) Epoch 12, batch 1100, loss[loss=0.2943, simple_loss=0.3594, pruned_loss=0.1146, over 8328.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3102, pruned_loss=0.07903, over 1612662.42 frames. ], batch size: 49, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:28:04,632 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1908, 2.1776, 1.5813, 1.9480, 1.8046, 1.2853, 1.7221, 1.7198], + device='cuda:2'), covar=tensor([0.1207, 0.0366, 0.1129, 0.0505, 0.0636, 0.1457, 0.0755, 0.0730], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0240, 0.0316, 0.0297, 0.0301, 0.0323, 0.0336, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:28:16,467 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:28:22,907 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.536e+02 3.046e+02 3.976e+02 6.882e+02, threshold=6.092e+02, percent-clipped=1.0 +2023-02-06 11:28:31,018 INFO [train.py:901] (2/4) Epoch 12, batch 1150, loss[loss=0.226, simple_loss=0.2919, pruned_loss=0.0801, over 7697.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3105, pruned_loss=0.07904, over 1615925.56 frames. ], batch size: 18, lr: 6.48e-03, grad_scale: 4.0 +2023-02-06 11:28:34,457 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 11:29:01,100 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:05,604 INFO [train.py:901] (2/4) Epoch 12, batch 1200, loss[loss=0.2226, simple_loss=0.2937, pruned_loss=0.07578, over 7811.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3107, pruned_loss=0.07957, over 1614369.46 frames. ], batch size: 20, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:19,570 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:24,287 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1723, 1.0342, 1.2694, 1.1273, 0.9227, 1.2957, 0.0421, 0.9136], + device='cuda:2'), covar=tensor([0.2056, 0.1799, 0.0599, 0.1310, 0.3839, 0.0606, 0.3015, 0.1717], + device='cuda:2'), in_proj_covar=tensor([0.0168, 0.0171, 0.0103, 0.0218, 0.0258, 0.0107, 0.0165, 0.0168], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:29:30,358 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:32,963 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.449e+02 3.099e+02 4.282e+02 6.791e+02, threshold=6.197e+02, percent-clipped=4.0 +2023-02-06 11:29:36,556 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:37,305 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:41,830 INFO [train.py:901] (2/4) Epoch 12, batch 1250, loss[loss=0.2288, simple_loss=0.3004, pruned_loss=0.07866, over 7631.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3103, pruned_loss=0.07919, over 1613744.95 frames. ], batch size: 19, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:47,513 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90172.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:29:53,674 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0672, 1.6405, 1.3788, 1.5902, 1.3649, 1.2190, 1.2789, 1.4443], + device='cuda:2'), covar=tensor([0.1033, 0.0436, 0.1173, 0.0535, 0.0636, 0.1292, 0.0840, 0.0665], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0240, 0.0318, 0.0300, 0.0302, 0.0325, 0.0339, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:29:55,720 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:04,776 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1217, 2.8828, 3.4701, 2.3903, 1.9116, 3.4733, 0.7979, 2.1428], + device='cuda:2'), covar=tensor([0.1864, 0.1429, 0.0402, 0.2128, 0.3537, 0.0465, 0.3360, 0.1948], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0172, 0.0104, 0.0218, 0.0258, 0.0107, 0.0165, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:30:05,421 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90197.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:30:17,139 INFO [train.py:901] (2/4) Epoch 12, batch 1300, loss[loss=0.2548, simple_loss=0.3327, pruned_loss=0.0885, over 8523.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.31, pruned_loss=0.07872, over 1614304.31 frames. ], batch size: 26, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:30:26,145 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:37,502 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:43,700 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:44,956 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.408e+02 3.209e+02 4.069e+02 1.568e+03, threshold=6.418e+02, percent-clipped=9.0 +2023-02-06 11:30:53,233 INFO [train.py:901] (2/4) Epoch 12, batch 1350, loss[loss=0.3337, simple_loss=0.3755, pruned_loss=0.1459, over 6879.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.31, pruned_loss=0.07858, over 1616226.09 frames. ], batch size: 71, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:30:55,572 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:19,418 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 11:31:28,605 INFO [train.py:901] (2/4) Epoch 12, batch 1400, loss[loss=0.1898, simple_loss=0.2751, pruned_loss=0.05225, over 8080.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3095, pruned_loss=0.0783, over 1618472.62 frames. ], batch size: 21, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:31:47,914 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:54,627 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.398e+02 2.808e+02 3.540e+02 8.131e+02, threshold=5.617e+02, percent-clipped=1.0 +2023-02-06 11:32:03,611 INFO [train.py:901] (2/4) Epoch 12, batch 1450, loss[loss=0.1829, simple_loss=0.2779, pruned_loss=0.04396, over 8246.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3077, pruned_loss=0.07702, over 1618237.58 frames. ], batch size: 24, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:07,896 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:08,443 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 11:32:38,149 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:38,609 INFO [train.py:901] (2/4) Epoch 12, batch 1500, loss[loss=0.2349, simple_loss=0.3195, pruned_loss=0.07512, over 8335.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3078, pruned_loss=0.07719, over 1618916.62 frames. ], batch size: 26, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:50,009 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6913, 1.9804, 2.2103, 1.2450, 2.3517, 1.4210, 0.6466, 1.8811], + device='cuda:2'), covar=tensor([0.0432, 0.0222, 0.0187, 0.0441, 0.0238, 0.0653, 0.0603, 0.0221], + device='cuda:2'), in_proj_covar=tensor([0.0395, 0.0332, 0.0278, 0.0388, 0.0317, 0.0474, 0.0359, 0.0356], + device='cuda:2'), out_proj_covar=tensor([1.1232e-04, 9.2314e-05, 7.7557e-05, 1.0856e-04, 8.9250e-05, 1.4397e-04, + 1.0217e-04, 1.0033e-04], device='cuda:2') +2023-02-06 11:32:55,142 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:04,310 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.462e+02 2.993e+02 3.898e+02 9.256e+02, threshold=5.985e+02, percent-clipped=2.0 +2023-02-06 11:33:12,499 INFO [train.py:901] (2/4) Epoch 12, batch 1550, loss[loss=0.2391, simple_loss=0.305, pruned_loss=0.08661, over 7973.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3081, pruned_loss=0.0773, over 1617283.35 frames. ], batch size: 21, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:15,003 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 11:33:21,457 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:33,067 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:48,745 INFO [train.py:901] (2/4) Epoch 12, batch 1600, loss[loss=0.2857, simple_loss=0.3534, pruned_loss=0.109, over 8360.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3065, pruned_loss=0.07667, over 1616111.92 frames. ], batch size: 26, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:48,905 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:15,464 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.712e+02 3.378e+02 4.197e+02 8.231e+02, threshold=6.755e+02, percent-clipped=6.0 +2023-02-06 11:34:23,535 INFO [train.py:901] (2/4) Epoch 12, batch 1650, loss[loss=0.2154, simple_loss=0.3008, pruned_loss=0.06503, over 8018.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3071, pruned_loss=0.07681, over 1617692.94 frames. ], batch size: 22, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:34:43,459 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:47,049 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:53,818 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:58,216 INFO [train.py:901] (2/4) Epoch 12, batch 1700, loss[loss=0.2379, simple_loss=0.3024, pruned_loss=0.0867, over 7915.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3075, pruned_loss=0.07691, over 1621207.16 frames. ], batch size: 20, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:04,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:35:19,239 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3520, 1.4687, 1.3377, 1.9003, 0.7176, 1.1822, 1.3037, 1.5626], + device='cuda:2'), covar=tensor([0.0850, 0.0850, 0.1119, 0.0512, 0.1259, 0.1523, 0.0816, 0.0726], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0213, 0.0255, 0.0216, 0.0218, 0.0251, 0.0256, 0.0219], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:35:24,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.486e+02 2.952e+02 3.646e+02 6.764e+02, threshold=5.904e+02, percent-clipped=1.0 +2023-02-06 11:35:33,331 INFO [train.py:901] (2/4) Epoch 12, batch 1750, loss[loss=0.203, simple_loss=0.2775, pruned_loss=0.06427, over 7802.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3074, pruned_loss=0.0766, over 1623468.86 frames. ], batch size: 20, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:04,031 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:07,401 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:08,038 INFO [train.py:901] (2/4) Epoch 12, batch 1800, loss[loss=0.2563, simple_loss=0.3342, pruned_loss=0.0892, over 8522.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3073, pruned_loss=0.07688, over 1617585.11 frames. ], batch size: 26, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:35,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.625e+02 3.119e+02 3.569e+02 7.012e+02, threshold=6.239e+02, percent-clipped=2.0 +2023-02-06 11:36:43,315 INFO [train.py:901] (2/4) Epoch 12, batch 1850, loss[loss=0.2595, simple_loss=0.337, pruned_loss=0.09099, over 8479.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3064, pruned_loss=0.07658, over 1612422.38 frames. ], batch size: 28, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:50,561 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9202, 1.5123, 6.0247, 2.2396, 5.3817, 5.0860, 5.6315, 5.3962], + device='cuda:2'), covar=tensor([0.0410, 0.4619, 0.0332, 0.3207, 0.0948, 0.0683, 0.0378, 0.0465], + device='cuda:2'), in_proj_covar=tensor([0.0503, 0.0590, 0.0598, 0.0541, 0.0618, 0.0526, 0.0524, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:37:17,707 INFO [train.py:901] (2/4) Epoch 12, batch 1900, loss[loss=0.1957, simple_loss=0.2683, pruned_loss=0.06156, over 7537.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3057, pruned_loss=0.07667, over 1611332.68 frames. ], batch size: 18, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:22,472 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:27,316 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:31,347 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2826, 1.8955, 2.6866, 2.0889, 2.4609, 2.1821, 1.8201, 1.1308], + device='cuda:2'), covar=tensor([0.3723, 0.3768, 0.1214, 0.2933, 0.1999, 0.2126, 0.1586, 0.4410], + device='cuda:2'), in_proj_covar=tensor([0.0889, 0.0874, 0.0740, 0.0852, 0.0937, 0.0809, 0.0708, 0.0773], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:37:36,340 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 11:37:44,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.569e+02 3.031e+02 3.632e+02 7.649e+02, threshold=6.063e+02, percent-clipped=2.0 +2023-02-06 11:37:47,237 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 11:37:48,678 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,168 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,633 INFO [train.py:901] (2/4) Epoch 12, batch 1950, loss[loss=0.2234, simple_loss=0.302, pruned_loss=0.0724, over 7660.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3037, pruned_loss=0.07574, over 1605171.34 frames. ], batch size: 19, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:37:55,457 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:59,339 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 11:38:10,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:19,026 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 11:38:27,977 INFO [train.py:901] (2/4) Epoch 12, batch 2000, loss[loss=0.2429, simple_loss=0.3196, pruned_loss=0.08315, over 8608.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3049, pruned_loss=0.07605, over 1609057.69 frames. ], batch size: 31, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:38:43,401 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90936.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:54,924 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.641e+02 3.163e+02 4.034e+02 9.087e+02, threshold=6.326e+02, percent-clipped=9.0 +2023-02-06 11:39:02,895 INFO [train.py:901] (2/4) Epoch 12, batch 2050, loss[loss=0.2174, simple_loss=0.2991, pruned_loss=0.06783, over 8535.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3065, pruned_loss=0.0774, over 1609114.21 frames. ], batch size: 26, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:39:03,761 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:09,923 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:10,140 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 11:39:21,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:38,681 INFO [train.py:901] (2/4) Epoch 12, batch 2100, loss[loss=0.2497, simple_loss=0.3183, pruned_loss=0.09055, over 8313.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3087, pruned_loss=0.07828, over 1613411.46 frames. ], batch size: 25, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:04,173 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 2.659e+02 3.265e+02 4.247e+02 8.349e+02, threshold=6.531e+02, percent-clipped=2.0 +2023-02-06 11:40:12,108 INFO [train.py:901] (2/4) Epoch 12, batch 2150, loss[loss=0.2016, simple_loss=0.2763, pruned_loss=0.06341, over 7454.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3083, pruned_loss=0.07827, over 1613347.96 frames. ], batch size: 17, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:26,821 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:30,728 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0587, 1.4926, 1.7635, 1.3672, 0.9376, 1.5623, 1.6981, 1.5090], + device='cuda:2'), covar=tensor([0.0462, 0.1179, 0.1568, 0.1330, 0.0556, 0.1402, 0.0644, 0.0592], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0152, 0.0190, 0.0159, 0.0103, 0.0162, 0.0116, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 11:40:44,051 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:47,222 INFO [train.py:901] (2/4) Epoch 12, batch 2200, loss[loss=0.2003, simple_loss=0.2829, pruned_loss=0.05883, over 7249.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.308, pruned_loss=0.07843, over 1616233.86 frames. ], batch size: 16, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:40:51,509 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1999, 1.5417, 3.3605, 1.5249, 2.9839, 2.8564, 3.0693, 2.9628], + device='cuda:2'), covar=tensor([0.0685, 0.3230, 0.0759, 0.3033, 0.1013, 0.0787, 0.0587, 0.0654], + device='cuda:2'), in_proj_covar=tensor([0.0496, 0.0577, 0.0587, 0.0534, 0.0612, 0.0521, 0.0517, 0.0580], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:41:13,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.751e+02 3.546e+02 4.173e+02 9.054e+02, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 11:41:21,753 INFO [train.py:901] (2/4) Epoch 12, batch 2250, loss[loss=0.1903, simple_loss=0.2666, pruned_loss=0.05698, over 7537.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3082, pruned_loss=0.07886, over 1613178.80 frames. ], batch size: 18, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:41,137 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:54,522 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:56,402 INFO [train.py:901] (2/4) Epoch 12, batch 2300, loss[loss=0.1648, simple_loss=0.249, pruned_loss=0.04032, over 7453.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3074, pruned_loss=0.07825, over 1610197.50 frames. ], batch size: 17, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:58,498 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:07,356 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:11,983 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7072, 1.7059, 2.2743, 1.6525, 1.1005, 2.3063, 0.4004, 1.3284], + device='cuda:2'), covar=tensor([0.2479, 0.1651, 0.0404, 0.1792, 0.4131, 0.0400, 0.3282, 0.2048], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0174, 0.0104, 0.0219, 0.0255, 0.0107, 0.0167, 0.0170], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:42:23,426 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.635e+02 3.142e+02 4.194e+02 9.102e+02, threshold=6.284e+02, percent-clipped=2.0 +2023-02-06 11:42:24,990 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:31,681 INFO [train.py:901] (2/4) Epoch 12, batch 2350, loss[loss=0.2319, simple_loss=0.315, pruned_loss=0.07435, over 8234.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3069, pruned_loss=0.07757, over 1612747.56 frames. ], batch size: 22, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:42:57,738 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:05,700 INFO [train.py:901] (2/4) Epoch 12, batch 2400, loss[loss=0.2385, simple_loss=0.3129, pruned_loss=0.08203, over 8492.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3069, pruned_loss=0.07809, over 1612145.16 frames. ], batch size: 26, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:43:14,304 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:31,008 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1662, 3.0512, 2.8422, 1.4698, 2.7576, 2.8350, 2.7889, 2.6434], + device='cuda:2'), covar=tensor([0.1333, 0.0973, 0.1314, 0.5255, 0.1257, 0.1372, 0.1708, 0.1336], + device='cuda:2'), in_proj_covar=tensor([0.0459, 0.0371, 0.0383, 0.0478, 0.0376, 0.0380, 0.0373, 0.0328], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:43:32,245 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.547e+02 3.046e+02 3.774e+02 7.420e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-06 11:43:41,054 INFO [train.py:901] (2/4) Epoch 12, batch 2450, loss[loss=0.2557, simple_loss=0.3252, pruned_loss=0.09314, over 7805.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3077, pruned_loss=0.0788, over 1614845.72 frames. ], batch size: 20, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:44:15,046 INFO [train.py:901] (2/4) Epoch 12, batch 2500, loss[loss=0.1969, simple_loss=0.2846, pruned_loss=0.05463, over 7962.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3061, pruned_loss=0.07817, over 1610632.90 frames. ], batch size: 21, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:44:41,740 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.576e+02 3.186e+02 4.386e+02 8.083e+02, threshold=6.372e+02, percent-clipped=11.0 +2023-02-06 11:44:50,280 INFO [train.py:901] (2/4) Epoch 12, batch 2550, loss[loss=0.2098, simple_loss=0.2816, pruned_loss=0.06905, over 7916.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3078, pruned_loss=0.07905, over 1612919.40 frames. ], batch size: 20, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:03,017 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6637, 1.5410, 2.8649, 1.3691, 2.0778, 3.1069, 3.1618, 2.6030], + device='cuda:2'), covar=tensor([0.1108, 0.1447, 0.0361, 0.2018, 0.0865, 0.0271, 0.0459, 0.0745], + device='cuda:2'), in_proj_covar=tensor([0.0267, 0.0298, 0.0262, 0.0295, 0.0279, 0.0236, 0.0350, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:45:24,403 INFO [train.py:901] (2/4) Epoch 12, batch 2600, loss[loss=0.2053, simple_loss=0.2892, pruned_loss=0.06068, over 7922.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3085, pruned_loss=0.07942, over 1615172.20 frames. ], batch size: 20, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:33,951 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-06 11:45:50,013 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.880e+02 3.430e+02 4.544e+02 8.443e+02, threshold=6.860e+02, percent-clipped=9.0 +2023-02-06 11:45:56,362 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91560.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:45:58,890 INFO [train.py:901] (2/4) Epoch 12, batch 2650, loss[loss=0.2142, simple_loss=0.3038, pruned_loss=0.06229, over 7813.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.3104, pruned_loss=0.08065, over 1615852.69 frames. ], batch size: 20, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:11,932 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:18,126 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-06 11:46:29,369 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:33,854 INFO [train.py:901] (2/4) Epoch 12, batch 2700, loss[loss=0.2118, simple_loss=0.2913, pruned_loss=0.06613, over 8523.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3091, pruned_loss=0.07948, over 1619292.86 frames. ], batch size: 28, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:55,396 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6553, 1.9295, 2.0350, 1.0413, 2.2106, 1.5050, 0.4991, 1.7157], + device='cuda:2'), covar=tensor([0.0403, 0.0226, 0.0167, 0.0399, 0.0251, 0.0593, 0.0591, 0.0211], + device='cuda:2'), in_proj_covar=tensor([0.0401, 0.0334, 0.0283, 0.0388, 0.0321, 0.0477, 0.0363, 0.0360], + device='cuda:2'), out_proj_covar=tensor([1.1393e-04, 9.2547e-05, 7.8851e-05, 1.0843e-04, 9.0469e-05, 1.4417e-04, + 1.0303e-04, 1.0155e-04], device='cuda:2') +2023-02-06 11:46:55,932 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:59,291 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.691e+02 3.205e+02 3.908e+02 7.628e+02, threshold=6.410e+02, percent-clipped=2.0 +2023-02-06 11:47:08,034 INFO [train.py:901] (2/4) Epoch 12, batch 2750, loss[loss=0.2415, simple_loss=0.3193, pruned_loss=0.0818, over 8135.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3083, pruned_loss=0.07834, over 1618658.85 frames. ], batch size: 22, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:47:43,505 INFO [train.py:901] (2/4) Epoch 12, batch 2800, loss[loss=0.2024, simple_loss=0.2877, pruned_loss=0.05861, over 7927.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3083, pruned_loss=0.07793, over 1615843.62 frames. ], batch size: 20, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:08,848 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.634e+02 3.181e+02 3.784e+02 9.192e+02, threshold=6.362e+02, percent-clipped=3.0 +2023-02-06 11:48:15,801 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:48:16,915 INFO [train.py:901] (2/4) Epoch 12, batch 2850, loss[loss=0.242, simple_loss=0.3244, pruned_loss=0.07981, over 8466.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3097, pruned_loss=0.07804, over 1618276.21 frames. ], batch size: 25, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:52,946 INFO [train.py:901] (2/4) Epoch 12, batch 2900, loss[loss=0.2009, simple_loss=0.2782, pruned_loss=0.06185, over 7810.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3071, pruned_loss=0.07709, over 1611617.14 frames. ], batch size: 20, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:14,741 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 11:49:18,830 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.538e+02 3.175e+02 3.875e+02 8.885e+02, threshold=6.349e+02, percent-clipped=4.0 +2023-02-06 11:49:22,151 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 11:49:26,753 INFO [train.py:901] (2/4) Epoch 12, batch 2950, loss[loss=0.2505, simple_loss=0.3301, pruned_loss=0.08547, over 8541.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3088, pruned_loss=0.07817, over 1611262.73 frames. ], batch size: 39, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:30,885 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4298, 4.3856, 3.9407, 1.8581, 3.9318, 4.0216, 3.9850, 3.7504], + device='cuda:2'), covar=tensor([0.0621, 0.0523, 0.0940, 0.4646, 0.0731, 0.0934, 0.1102, 0.0833], + device='cuda:2'), in_proj_covar=tensor([0.0461, 0.0374, 0.0385, 0.0483, 0.0376, 0.0381, 0.0375, 0.0328], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:49:40,299 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 11:49:54,045 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91904.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:49:57,393 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:50:00,613 INFO [train.py:901] (2/4) Epoch 12, batch 3000, loss[loss=0.1792, simple_loss=0.2536, pruned_loss=0.05244, over 7229.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3088, pruned_loss=0.07886, over 1611175.57 frames. ], batch size: 16, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:50:00,613 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 11:50:12,187 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4363, 1.7318, 2.6991, 1.2878, 2.0173, 1.8243, 1.5596, 1.9032], + device='cuda:2'), covar=tensor([0.1635, 0.2508, 0.0768, 0.4173, 0.1734, 0.2879, 0.1968, 0.1943], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0524, 0.0532, 0.0581, 0.0616, 0.0556, 0.0471, 0.0610], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:50:13,634 INFO [train.py:935] (2/4) Epoch 12, validation: loss=0.1868, simple_loss=0.2871, pruned_loss=0.04323, over 944034.00 frames. +2023-02-06 11:50:13,635 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 11:50:39,691 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 11:50:40,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.361e+02 2.883e+02 3.802e+02 7.578e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-06 11:50:49,090 INFO [train.py:901] (2/4) Epoch 12, batch 3050, loss[loss=0.2445, simple_loss=0.3406, pruned_loss=0.07425, over 8281.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3095, pruned_loss=0.07913, over 1613052.17 frames. ], batch size: 23, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:50:56,130 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:14,061 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91999.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:51:25,121 INFO [train.py:901] (2/4) Epoch 12, batch 3100, loss[loss=0.231, simple_loss=0.3099, pruned_loss=0.07602, over 8378.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3103, pruned_loss=0.07975, over 1613214.33 frames. ], batch size: 49, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:51:28,117 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:28,799 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92019.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:51:45,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:51,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.663e+02 3.347e+02 4.142e+02 7.838e+02, threshold=6.695e+02, percent-clipped=5.0 +2023-02-06 11:52:01,126 INFO [train.py:901] (2/4) Epoch 12, batch 3150, loss[loss=0.2443, simple_loss=0.3129, pruned_loss=0.08787, over 8075.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3096, pruned_loss=0.0792, over 1610462.39 frames. ], batch size: 21, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:10,914 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4873, 1.5028, 4.7070, 1.7430, 4.1046, 3.8548, 4.2508, 4.0752], + device='cuda:2'), covar=tensor([0.0570, 0.4412, 0.0458, 0.3575, 0.1202, 0.0952, 0.0578, 0.0676], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0576, 0.0590, 0.0537, 0.0617, 0.0525, 0.0520, 0.0584], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 11:52:35,760 INFO [train.py:901] (2/4) Epoch 12, batch 3200, loss[loss=0.2645, simple_loss=0.3364, pruned_loss=0.0963, over 8488.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3096, pruned_loss=0.07905, over 1613652.57 frames. ], batch size: 39, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:02,007 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.674e+02 3.226e+02 3.971e+02 7.397e+02, threshold=6.453e+02, percent-clipped=3.0 +2023-02-06 11:53:10,357 INFO [train.py:901] (2/4) Epoch 12, batch 3250, loss[loss=0.2256, simple_loss=0.3118, pruned_loss=0.06975, over 8510.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3094, pruned_loss=0.07907, over 1613000.75 frames. ], batch size: 28, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:46,149 INFO [train.py:901] (2/4) Epoch 12, batch 3300, loss[loss=0.1896, simple_loss=0.2738, pruned_loss=0.05266, over 7908.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3084, pruned_loss=0.07856, over 1612146.48 frames. ], batch size: 20, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:55,659 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4362, 1.8166, 1.8447, 1.0480, 1.8800, 1.3600, 0.4129, 1.5197], + device='cuda:2'), covar=tensor([0.0430, 0.0249, 0.0211, 0.0431, 0.0314, 0.0743, 0.0697, 0.0234], + device='cuda:2'), in_proj_covar=tensor([0.0395, 0.0328, 0.0282, 0.0389, 0.0317, 0.0476, 0.0358, 0.0356], + device='cuda:2'), out_proj_covar=tensor([1.1205e-04, 9.0791e-05, 7.8236e-05, 1.0855e-04, 8.9083e-05, 1.4390e-04, + 1.0156e-04, 1.0017e-04], device='cuda:2') +2023-02-06 11:54:11,039 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.357e+02 2.935e+02 3.680e+02 6.719e+02, threshold=5.870e+02, percent-clipped=1.0 +2023-02-06 11:54:11,766 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:54:19,107 INFO [train.py:901] (2/4) Epoch 12, batch 3350, loss[loss=0.2253, simple_loss=0.3046, pruned_loss=0.073, over 8337.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3078, pruned_loss=0.07808, over 1609408.88 frames. ], batch size: 26, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:27,348 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92275.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:54:36,860 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4267, 4.4514, 3.9558, 1.9575, 4.0174, 3.9911, 4.0446, 3.6895], + device='cuda:2'), covar=tensor([0.0868, 0.0597, 0.1167, 0.4885, 0.0772, 0.0872, 0.1391, 0.0866], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0379, 0.0388, 0.0489, 0.0382, 0.0382, 0.0378, 0.0333], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 11:54:45,176 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92300.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:54:55,335 INFO [train.py:901] (2/4) Epoch 12, batch 3400, loss[loss=0.2156, simple_loss=0.3065, pruned_loss=0.06231, over 8351.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3081, pruned_loss=0.07809, over 1612597.64 frames. ], batch size: 24, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:57,464 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:15,992 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92343.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:55:21,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.364e+02 2.893e+02 3.659e+02 6.777e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 11:55:29,928 INFO [train.py:901] (2/4) Epoch 12, batch 3450, loss[loss=0.2258, simple_loss=0.3054, pruned_loss=0.07317, over 8440.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3084, pruned_loss=0.07838, over 1608769.25 frames. ], batch size: 27, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:55:32,861 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92368.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:46,609 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.47 vs. limit=2.0 +2023-02-06 11:56:04,898 INFO [train.py:901] (2/4) Epoch 12, batch 3500, loss[loss=0.2286, simple_loss=0.3046, pruned_loss=0.07629, over 7528.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.307, pruned_loss=0.07764, over 1608981.28 frames. ], batch size: 18, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:18,395 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92432.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:56:29,130 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 11:56:33,046 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.438e+02 2.928e+02 3.742e+02 8.211e+02, threshold=5.856e+02, percent-clipped=5.0 +2023-02-06 11:56:36,517 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:56:40,250 INFO [train.py:901] (2/4) Epoch 12, batch 3550, loss[loss=0.1958, simple_loss=0.2739, pruned_loss=0.05886, over 7718.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.307, pruned_loss=0.07775, over 1605250.53 frames. ], batch size: 18, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:48,396 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6250, 1.5783, 2.2207, 1.6804, 1.1199, 2.2238, 0.2609, 1.2846], + device='cuda:2'), covar=tensor([0.2834, 0.2322, 0.0549, 0.1917, 0.4623, 0.0542, 0.3549, 0.1920], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0175, 0.0104, 0.0219, 0.0256, 0.0108, 0.0165, 0.0168], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 11:57:14,696 INFO [train.py:901] (2/4) Epoch 12, batch 3600, loss[loss=0.2597, simple_loss=0.3309, pruned_loss=0.09423, over 7808.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3084, pruned_loss=0.07798, over 1615567.30 frames. ], batch size: 20, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:42,455 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.628e+02 3.055e+02 4.234e+02 9.851e+02, threshold=6.109e+02, percent-clipped=7.0 +2023-02-06 11:57:50,882 INFO [train.py:901] (2/4) Epoch 12, batch 3650, loss[loss=0.1972, simple_loss=0.277, pruned_loss=0.05873, over 7546.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3082, pruned_loss=0.0774, over 1613393.32 frames. ], batch size: 18, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:01,981 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-06 11:58:05,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9827, 1.4722, 3.2277, 1.4812, 2.2913, 3.5858, 3.6067, 3.0324], + device='cuda:2'), covar=tensor([0.1041, 0.1585, 0.0348, 0.2039, 0.0997, 0.0242, 0.0468, 0.0597], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0302, 0.0265, 0.0298, 0.0281, 0.0241, 0.0355, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 11:58:19,599 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.37 vs. limit=5.0 +2023-02-06 11:58:23,912 INFO [train.py:901] (2/4) Epoch 12, batch 3700, loss[loss=0.2453, simple_loss=0.333, pruned_loss=0.07878, over 8240.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3096, pruned_loss=0.07881, over 1614679.52 frames. ], batch size: 24, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:28,536 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 11:58:30,693 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92624.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:44,255 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:48,449 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:50,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.476e+02 3.116e+02 4.152e+02 8.400e+02, threshold=6.233e+02, percent-clipped=9.0 +2023-02-06 11:58:59,710 INFO [train.py:901] (2/4) Epoch 12, batch 3750, loss[loss=0.2725, simple_loss=0.3227, pruned_loss=0.1112, over 6723.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3094, pruned_loss=0.07905, over 1610772.98 frames. ], batch size: 71, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:17,078 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:20,025 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 11:59:28,421 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 11:59:32,388 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4187, 1.8186, 1.9216, 0.9541, 1.9983, 1.2907, 0.4569, 1.6123], + device='cuda:2'), covar=tensor([0.0470, 0.0232, 0.0194, 0.0455, 0.0266, 0.0750, 0.0637, 0.0212], + device='cuda:2'), in_proj_covar=tensor([0.0407, 0.0336, 0.0285, 0.0399, 0.0327, 0.0488, 0.0364, 0.0366], + device='cuda:2'), out_proj_covar=tensor([1.1541e-04, 9.3007e-05, 7.8854e-05, 1.1123e-04, 9.1677e-05, 1.4725e-04, + 1.0333e-04, 1.0285e-04], device='cuda:2') +2023-02-06 11:59:34,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:34,936 INFO [train.py:901] (2/4) Epoch 12, batch 3800, loss[loss=0.209, simple_loss=0.274, pruned_loss=0.07194, over 7700.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3084, pruned_loss=0.07813, over 1611163.97 frames. ], batch size: 18, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:35,190 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92714.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:59:43,939 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0624, 2.2238, 1.9052, 2.5113, 1.8363, 1.7958, 2.0002, 2.3805], + device='cuda:2'), covar=tensor([0.0662, 0.0752, 0.0906, 0.0436, 0.0913, 0.1123, 0.0796, 0.0565], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0212, 0.0258, 0.0217, 0.0217, 0.0252, 0.0258, 0.0218], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 11:59:52,026 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:52,779 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92739.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:00:02,135 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.558e+02 2.972e+02 3.756e+02 9.318e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 12:00:03,335 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 12:00:09,491 INFO [train.py:901] (2/4) Epoch 12, batch 3850, loss[loss=0.2542, simple_loss=0.3239, pruned_loss=0.09225, over 8659.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3095, pruned_loss=0.0785, over 1616801.48 frames. ], batch size: 34, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:00:33,553 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 12:00:45,108 INFO [train.py:901] (2/4) Epoch 12, batch 3900, loss[loss=0.19, simple_loss=0.2625, pruned_loss=0.05877, over 7793.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3082, pruned_loss=0.07779, over 1617620.13 frames. ], batch size: 19, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:01:08,875 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:01:11,295 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.538e+02 2.989e+02 3.922e+02 7.912e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 12:01:18,298 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3390, 1.5599, 2.2540, 1.1571, 1.5423, 1.6440, 1.3746, 1.4389], + device='cuda:2'), covar=tensor([0.1772, 0.2283, 0.0726, 0.3984, 0.1621, 0.2976, 0.1924, 0.1954], + device='cuda:2'), in_proj_covar=tensor([0.0492, 0.0529, 0.0539, 0.0583, 0.0622, 0.0564, 0.0477, 0.0615], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:01:19,448 INFO [train.py:901] (2/4) Epoch 12, batch 3950, loss[loss=0.234, simple_loss=0.3214, pruned_loss=0.07333, over 8286.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3079, pruned_loss=0.0774, over 1617176.12 frames. ], batch size: 23, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:54,545 INFO [train.py:901] (2/4) Epoch 12, batch 4000, loss[loss=0.1888, simple_loss=0.2721, pruned_loss=0.05272, over 7810.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3082, pruned_loss=0.07766, over 1616131.60 frames. ], batch size: 20, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:56,841 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:18,348 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:20,892 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.373e+02 3.059e+02 3.649e+02 8.513e+02, threshold=6.118e+02, percent-clipped=6.0 +2023-02-06 12:02:28,371 INFO [train.py:901] (2/4) Epoch 12, batch 4050, loss[loss=0.2758, simple_loss=0.35, pruned_loss=0.1008, over 8518.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3092, pruned_loss=0.07834, over 1619705.72 frames. ], batch size: 26, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:02:44,203 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:48,266 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:57,121 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1879, 1.0988, 1.2505, 1.1297, 0.9631, 1.3024, 0.0239, 0.8615], + device='cuda:2'), covar=tensor([0.2473, 0.1843, 0.0573, 0.1148, 0.3588, 0.0631, 0.2918, 0.1581], + device='cuda:2'), in_proj_covar=tensor([0.0170, 0.0174, 0.0103, 0.0216, 0.0253, 0.0107, 0.0162, 0.0168], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:03:03,722 INFO [train.py:901] (2/4) Epoch 12, batch 4100, loss[loss=0.2391, simple_loss=0.3204, pruned_loss=0.07888, over 8473.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3086, pruned_loss=0.07851, over 1616771.12 frames. ], batch size: 25, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:04,569 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8789, 1.5574, 1.8159, 1.4319, 1.0029, 1.5462, 2.3224, 2.1142], + device='cuda:2'), covar=tensor([0.0446, 0.1310, 0.1732, 0.1413, 0.0646, 0.1484, 0.0634, 0.0548], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0153, 0.0194, 0.0160, 0.0104, 0.0164, 0.0117, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 12:03:13,908 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:21,021 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 12:03:30,614 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.418e+02 3.048e+02 3.757e+02 7.047e+02, threshold=6.097e+02, percent-clipped=3.0 +2023-02-06 12:03:31,427 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:37,937 INFO [train.py:901] (2/4) Epoch 12, batch 4150, loss[loss=0.2681, simple_loss=0.3322, pruned_loss=0.102, over 8711.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3081, pruned_loss=0.07792, over 1620220.95 frames. ], batch size: 34, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:50,876 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:04,576 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:05,526 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 12:04:12,237 INFO [train.py:901] (2/4) Epoch 12, batch 4200, loss[loss=0.2126, simple_loss=0.291, pruned_loss=0.06712, over 7920.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.308, pruned_loss=0.07719, over 1621003.13 frames. ], batch size: 20, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:04:13,197 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6981, 2.1648, 3.4701, 2.4948, 3.0134, 2.3868, 2.0095, 1.6288], + device='cuda:2'), covar=tensor([0.4120, 0.4413, 0.1283, 0.3065, 0.2125, 0.2470, 0.1842, 0.4753], + device='cuda:2'), in_proj_covar=tensor([0.0885, 0.0864, 0.0720, 0.0845, 0.0927, 0.0796, 0.0699, 0.0758], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:04:24,931 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 12:04:26,409 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:29,265 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0160, 2.7758, 3.4804, 2.0101, 1.8112, 3.4035, 0.8244, 1.9317], + device='cuda:2'), covar=tensor([0.2284, 0.1592, 0.0469, 0.2452, 0.3812, 0.0401, 0.3399, 0.2189], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0172, 0.0101, 0.0214, 0.0252, 0.0107, 0.0161, 0.0167], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:04:40,122 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.563e+02 2.943e+02 3.717e+02 8.503e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-06 12:04:47,442 INFO [train.py:901] (2/4) Epoch 12, batch 4250, loss[loss=0.2197, simple_loss=0.288, pruned_loss=0.07566, over 7919.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3094, pruned_loss=0.07788, over 1623942.37 frames. ], batch size: 20, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:04:48,815 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 12:04:53,107 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-06 12:05:06,771 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:09,368 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93197.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:21,310 INFO [train.py:901] (2/4) Epoch 12, batch 4300, loss[loss=0.2569, simple_loss=0.3339, pruned_loss=0.08991, over 8759.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3096, pruned_loss=0.07883, over 1611086.18 frames. ], batch size: 30, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:05:22,138 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0792, 1.4016, 4.3320, 1.5285, 3.7984, 3.6247, 3.8777, 3.7208], + device='cuda:2'), covar=tensor([0.0572, 0.4114, 0.0478, 0.3290, 0.1122, 0.0809, 0.0577, 0.0633], + device='cuda:2'), in_proj_covar=tensor([0.0501, 0.0580, 0.0594, 0.0537, 0.0615, 0.0527, 0.0519, 0.0581], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:05:48,576 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.718e+02 3.236e+02 4.116e+02 1.260e+03, threshold=6.473e+02, percent-clipped=7.0 +2023-02-06 12:05:54,515 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93261.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:56,509 INFO [train.py:901] (2/4) Epoch 12, batch 4350, loss[loss=0.2421, simple_loss=0.3135, pruned_loss=0.08536, over 8079.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3091, pruned_loss=0.07849, over 1609775.57 frames. ], batch size: 21, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:15,790 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:16,408 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 12:06:25,941 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:29,909 INFO [train.py:901] (2/4) Epoch 12, batch 4400, loss[loss=0.2248, simple_loss=0.2892, pruned_loss=0.08019, over 7810.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3083, pruned_loss=0.07796, over 1611218.75 frames. ], batch size: 20, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:42,311 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7050, 2.1240, 3.3100, 1.5035, 2.4295, 2.1004, 1.7661, 2.2493], + device='cuda:2'), covar=tensor([0.1553, 0.1977, 0.0699, 0.3579, 0.1418, 0.2538, 0.1638, 0.1951], + device='cuda:2'), in_proj_covar=tensor([0.0488, 0.0525, 0.0535, 0.0579, 0.0618, 0.0555, 0.0473, 0.0611], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:06:46,155 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:58,347 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.551e+02 2.995e+02 3.715e+02 7.484e+02, threshold=5.990e+02, percent-clipped=1.0 +2023-02-06 12:06:58,372 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 12:07:01,790 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:05,646 INFO [train.py:901] (2/4) Epoch 12, batch 4450, loss[loss=0.24, simple_loss=0.3175, pruned_loss=0.08131, over 8494.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3074, pruned_loss=0.07748, over 1612977.88 frames. ], batch size: 26, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:11,752 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:12,587 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4897, 2.0102, 3.0260, 2.3914, 2.8126, 2.2579, 1.8944, 1.4207], + device='cuda:2'), covar=tensor([0.3683, 0.3943, 0.1155, 0.2424, 0.1633, 0.2068, 0.1532, 0.4078], + device='cuda:2'), in_proj_covar=tensor([0.0886, 0.0868, 0.0725, 0.0847, 0.0927, 0.0798, 0.0700, 0.0762], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:07:14,525 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:19,337 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:29,362 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:36,071 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:39,922 INFO [train.py:901] (2/4) Epoch 12, batch 4500, loss[loss=0.1941, simple_loss=0.2744, pruned_loss=0.0569, over 7644.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3071, pruned_loss=0.07764, over 1612418.07 frames. ], batch size: 19, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:50,713 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 12:08:06,002 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:06,470 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.576e+02 3.193e+02 4.187e+02 6.619e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:08:06,708 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:13,859 INFO [train.py:901] (2/4) Epoch 12, batch 4550, loss[loss=0.2393, simple_loss=0.3161, pruned_loss=0.08127, over 8703.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3067, pruned_loss=0.07683, over 1619483.32 frames. ], batch size: 39, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:08:24,173 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:24,999 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:31,901 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93487.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:49,674 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:50,191 INFO [train.py:901] (2/4) Epoch 12, batch 4600, loss[loss=0.1823, simple_loss=0.2665, pruned_loss=0.04904, over 8133.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3063, pruned_loss=0.07649, over 1620399.22 frames. ], batch size: 22, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:16,605 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.455e+02 3.020e+02 4.052e+02 9.299e+02, threshold=6.041e+02, percent-clipped=5.0 +2023-02-06 12:09:24,858 INFO [train.py:901] (2/4) Epoch 12, batch 4650, loss[loss=0.2285, simple_loss=0.3132, pruned_loss=0.07196, over 8465.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3063, pruned_loss=0.0767, over 1614675.97 frames. ], batch size: 25, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:25,072 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:41,900 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,160 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,361 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 12:09:47,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5305, 1.9036, 1.9882, 1.1826, 2.0929, 1.3696, 0.5188, 1.7134], + device='cuda:2'), covar=tensor([0.0436, 0.0254, 0.0182, 0.0418, 0.0305, 0.0722, 0.0660, 0.0213], + device='cuda:2'), in_proj_covar=tensor([0.0398, 0.0332, 0.0277, 0.0394, 0.0322, 0.0481, 0.0361, 0.0363], + device='cuda:2'), out_proj_covar=tensor([1.1275e-04, 9.1637e-05, 7.6628e-05, 1.0989e-04, 9.0315e-05, 1.4519e-04, + 1.0213e-04, 1.0190e-04], device='cuda:2') +2023-02-06 12:09:59,871 INFO [train.py:901] (2/4) Epoch 12, batch 4700, loss[loss=0.1899, simple_loss=0.2723, pruned_loss=0.05373, over 7770.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.305, pruned_loss=0.07586, over 1609993.55 frames. ], batch size: 19, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:12,706 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:20,658 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9996, 6.0896, 5.2252, 2.5248, 5.4501, 5.6126, 5.5237, 5.2887], + device='cuda:2'), covar=tensor([0.0630, 0.0494, 0.1072, 0.4393, 0.0889, 0.0767, 0.1236, 0.0635], + device='cuda:2'), in_proj_covar=tensor([0.0472, 0.0389, 0.0395, 0.0493, 0.0387, 0.0390, 0.0384, 0.0336], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:10:25,038 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 12:10:26,588 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.370e+02 2.939e+02 3.568e+02 8.447e+02, threshold=5.879e+02, percent-clipped=4.0 +2023-02-06 12:10:29,424 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93657.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:33,899 INFO [train.py:901] (2/4) Epoch 12, batch 4750, loss[loss=0.2354, simple_loss=0.3139, pruned_loss=0.07842, over 8252.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3061, pruned_loss=0.0772, over 1607721.80 frames. ], batch size: 24, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:34,127 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:36,644 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4338, 1.6794, 4.5791, 2.0543, 2.4728, 5.1793, 5.1450, 4.5240], + device='cuda:2'), covar=tensor([0.0970, 0.1673, 0.0233, 0.1767, 0.1151, 0.0153, 0.0405, 0.0497], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0300, 0.0264, 0.0295, 0.0278, 0.0240, 0.0354, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:10:47,983 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 12:10:51,744 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93689.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:56,585 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6303, 2.0796, 2.1610, 1.2227, 2.3084, 1.3733, 0.7347, 1.8280], + device='cuda:2'), covar=tensor([0.0442, 0.0215, 0.0146, 0.0455, 0.0274, 0.0690, 0.0623, 0.0226], + device='cuda:2'), in_proj_covar=tensor([0.0398, 0.0330, 0.0277, 0.0394, 0.0324, 0.0480, 0.0359, 0.0360], + device='cuda:2'), out_proj_covar=tensor([1.1261e-04, 9.0855e-05, 7.6483e-05, 1.0976e-04, 9.0692e-05, 1.4513e-04, + 1.0152e-04, 1.0112e-04], device='cuda:2') +2023-02-06 12:11:00,460 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 12:11:02,495 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 12:11:04,761 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:09,472 INFO [train.py:901] (2/4) Epoch 12, batch 4800, loss[loss=0.226, simple_loss=0.3059, pruned_loss=0.073, over 8238.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3072, pruned_loss=0.07734, over 1613102.80 frames. ], batch size: 22, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:23,311 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:30,151 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:36,832 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.519e+02 2.967e+02 3.635e+02 7.460e+02, threshold=5.934e+02, percent-clipped=2.0 +2023-02-06 12:11:44,129 INFO [train.py:901] (2/4) Epoch 12, batch 4850, loss[loss=0.2572, simple_loss=0.3365, pruned_loss=0.08896, over 8638.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.308, pruned_loss=0.07807, over 1618037.05 frames. ], batch size: 34, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:47,087 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93768.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:47,773 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:53,086 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 12:11:53,201 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1926, 4.1094, 3.7404, 1.9440, 3.6530, 3.7695, 3.7408, 3.4549], + device='cuda:2'), covar=tensor([0.0764, 0.0604, 0.0975, 0.4808, 0.0941, 0.1019, 0.1349, 0.0867], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0383, 0.0391, 0.0491, 0.0385, 0.0388, 0.0385, 0.0335], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:12:00,548 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:04,713 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:18,628 INFO [train.py:901] (2/4) Epoch 12, batch 4900, loss[loss=0.2167, simple_loss=0.2907, pruned_loss=0.07129, over 7789.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3079, pruned_loss=0.07809, over 1616314.10 frames. ], batch size: 19, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:12:42,679 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:45,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.389e+02 2.920e+02 3.679e+02 7.315e+02, threshold=5.841e+02, percent-clipped=3.0 +2023-02-06 12:12:53,898 INFO [train.py:901] (2/4) Epoch 12, batch 4950, loss[loss=0.225, simple_loss=0.3059, pruned_loss=0.072, over 8101.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3067, pruned_loss=0.07761, over 1613462.44 frames. ], batch size: 23, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:00,013 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:13:20,756 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5453, 1.4125, 4.7349, 1.7871, 4.1955, 4.0290, 4.3327, 4.2647], + device='cuda:2'), covar=tensor([0.0510, 0.4108, 0.0418, 0.3199, 0.0981, 0.0821, 0.0484, 0.0484], + device='cuda:2'), in_proj_covar=tensor([0.0497, 0.0569, 0.0584, 0.0528, 0.0603, 0.0516, 0.0506, 0.0569], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:13:27,054 INFO [train.py:901] (2/4) Epoch 12, batch 5000, loss[loss=0.2461, simple_loss=0.3228, pruned_loss=0.08472, over 8465.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.307, pruned_loss=0.07795, over 1617586.90 frames. ], batch size: 27, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:55,391 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.574e+02 3.082e+02 3.748e+02 7.333e+02, threshold=6.165e+02, percent-clipped=4.0 +2023-02-06 12:13:56,535 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 12:14:02,955 INFO [train.py:901] (2/4) Epoch 12, batch 5050, loss[loss=0.2593, simple_loss=0.3231, pruned_loss=0.09776, over 8482.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3056, pruned_loss=0.07694, over 1615060.03 frames. ], batch size: 28, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:14:03,320 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.24 vs. limit=5.0 +2023-02-06 12:14:27,661 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:14:31,132 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 12:14:38,576 INFO [train.py:901] (2/4) Epoch 12, batch 5100, loss[loss=0.2214, simple_loss=0.29, pruned_loss=0.07646, over 7555.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3063, pruned_loss=0.07713, over 1613431.66 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:02,451 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 12:15:04,423 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 12:15:05,336 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.576e+02 2.962e+02 4.029e+02 5.912e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-06 12:15:10,262 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0682, 3.9493, 3.6555, 2.0668, 3.6280, 3.6135, 3.7364, 3.2995], + device='cuda:2'), covar=tensor([0.0813, 0.0660, 0.0900, 0.4234, 0.0881, 0.1003, 0.1104, 0.1012], + device='cuda:2'), in_proj_covar=tensor([0.0463, 0.0379, 0.0389, 0.0486, 0.0382, 0.0386, 0.0380, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:15:13,501 INFO [train.py:901] (2/4) Epoch 12, batch 5150, loss[loss=0.2167, simple_loss=0.292, pruned_loss=0.07074, over 8236.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3054, pruned_loss=0.07687, over 1609043.19 frames. ], batch size: 22, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:47,476 INFO [train.py:901] (2/4) Epoch 12, batch 5200, loss[loss=0.2066, simple_loss=0.2825, pruned_loss=0.06534, over 7527.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.305, pruned_loss=0.07692, over 1606582.24 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:59,921 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:16:04,815 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9526, 1.1012, 3.0910, 1.0212, 2.6771, 2.6062, 2.7955, 2.7162], + device='cuda:2'), covar=tensor([0.0770, 0.3859, 0.0839, 0.3517, 0.1401, 0.0993, 0.0725, 0.0830], + device='cuda:2'), in_proj_covar=tensor([0.0496, 0.0573, 0.0584, 0.0529, 0.0608, 0.0519, 0.0512, 0.0570], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:16:14,659 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.571e+02 3.074e+02 4.467e+02 8.286e+02, threshold=6.149e+02, percent-clipped=7.0 +2023-02-06 12:16:21,913 INFO [train.py:901] (2/4) Epoch 12, batch 5250, loss[loss=0.2187, simple_loss=0.293, pruned_loss=0.07216, over 7931.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3059, pruned_loss=0.07711, over 1605964.77 frames. ], batch size: 20, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:16:25,905 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 12:16:57,676 INFO [train.py:901] (2/4) Epoch 12, batch 5300, loss[loss=0.2282, simple_loss=0.3039, pruned_loss=0.07621, over 8509.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3061, pruned_loss=0.07653, over 1610223.94 frames. ], batch size: 39, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:08,946 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7626, 4.6313, 4.1224, 1.9410, 4.1939, 4.2919, 4.2648, 3.8955], + device='cuda:2'), covar=tensor([0.0618, 0.0614, 0.1159, 0.4796, 0.0848, 0.0789, 0.1500, 0.0841], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0380, 0.0390, 0.0483, 0.0379, 0.0385, 0.0379, 0.0333], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:17:15,483 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94241.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:17:19,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:17:23,401 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.628e+02 3.237e+02 4.138e+02 9.258e+02, threshold=6.473e+02, percent-clipped=5.0 +2023-02-06 12:17:31,599 INFO [train.py:901] (2/4) Epoch 12, batch 5350, loss[loss=0.2504, simple_loss=0.323, pruned_loss=0.08887, over 8637.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3063, pruned_loss=0.07707, over 1607852.74 frames. ], batch size: 34, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:18:05,213 INFO [train.py:901] (2/4) Epoch 12, batch 5400, loss[loss=0.2091, simple_loss=0.2877, pruned_loss=0.06521, over 7796.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3067, pruned_loss=0.07745, over 1609830.45 frames. ], batch size: 19, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:12,846 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9160, 2.1387, 3.3355, 1.7240, 2.7744, 2.2271, 2.0570, 2.6433], + device='cuda:2'), covar=tensor([0.1322, 0.1865, 0.0521, 0.3052, 0.1087, 0.2100, 0.1454, 0.1642], + device='cuda:2'), in_proj_covar=tensor([0.0491, 0.0524, 0.0535, 0.0586, 0.0615, 0.0558, 0.0478, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:18:25,500 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:32,249 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.485e+02 2.978e+02 4.110e+02 9.009e+02, threshold=5.957e+02, percent-clipped=6.0 +2023-02-06 12:18:39,975 INFO [train.py:901] (2/4) Epoch 12, batch 5450, loss[loss=0.1965, simple_loss=0.2828, pruned_loss=0.05512, over 8203.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.308, pruned_loss=0.0779, over 1608679.30 frames. ], batch size: 23, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:41,431 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:47,004 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 12:19:12,398 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 12:19:15,766 INFO [train.py:901] (2/4) Epoch 12, batch 5500, loss[loss=0.2626, simple_loss=0.3268, pruned_loss=0.09916, over 8339.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3079, pruned_loss=0.07794, over 1611289.68 frames. ], batch size: 26, lr: 6.33e-03, grad_scale: 16.0 +2023-02-06 12:19:23,448 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1111, 1.5554, 1.6666, 1.4552, 1.0376, 1.4472, 1.8729, 1.5099], + device='cuda:2'), covar=tensor([0.0431, 0.1087, 0.1455, 0.1184, 0.0539, 0.1288, 0.0520, 0.0562], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0152, 0.0192, 0.0157, 0.0103, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 12:19:43,163 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.406e+02 2.798e+02 3.361e+02 6.650e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 12:19:45,384 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:48,697 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9440, 1.8213, 2.3103, 1.7296, 1.3186, 2.4045, 0.4905, 1.4743], + device='cuda:2'), covar=tensor([0.2161, 0.1753, 0.0520, 0.2014, 0.4484, 0.0545, 0.3620, 0.2044], + device='cuda:2'), in_proj_covar=tensor([0.0167, 0.0170, 0.0101, 0.0216, 0.0255, 0.0105, 0.0164, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:19:49,114 INFO [train.py:901] (2/4) Epoch 12, batch 5550, loss[loss=0.2006, simple_loss=0.2822, pruned_loss=0.05951, over 7528.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3086, pruned_loss=0.07825, over 1609367.41 frames. ], batch size: 18, lr: 6.33e-03, grad_scale: 4.0 +2023-02-06 12:20:16,681 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:24,041 INFO [train.py:901] (2/4) Epoch 12, batch 5600, loss[loss=0.2537, simple_loss=0.3297, pruned_loss=0.0889, over 8579.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3082, pruned_loss=0.07803, over 1609963.56 frames. ], batch size: 31, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:20:35,258 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:54,482 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.675e+02 3.313e+02 4.214e+02 1.006e+03, threshold=6.626e+02, percent-clipped=7.0 +2023-02-06 12:21:00,660 INFO [train.py:901] (2/4) Epoch 12, batch 5650, loss[loss=0.2253, simple_loss=0.318, pruned_loss=0.06631, over 8461.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3061, pruned_loss=0.07658, over 1605517.94 frames. ], batch size: 25, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:21:15,256 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94585.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:21:21,314 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 12:21:35,757 INFO [train.py:901] (2/4) Epoch 12, batch 5700, loss[loss=0.2294, simple_loss=0.302, pruned_loss=0.0784, over 7915.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.306, pruned_loss=0.07631, over 1609627.49 frames. ], batch size: 20, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:21:46,150 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4004, 1.7797, 1.8511, 0.9590, 1.9880, 1.3048, 0.3688, 1.5767], + device='cuda:2'), covar=tensor([0.0395, 0.0240, 0.0192, 0.0409, 0.0250, 0.0708, 0.0603, 0.0204], + device='cuda:2'), in_proj_covar=tensor([0.0394, 0.0327, 0.0276, 0.0386, 0.0318, 0.0474, 0.0356, 0.0356], + device='cuda:2'), out_proj_covar=tensor([1.1137e-04, 8.9839e-05, 7.6204e-05, 1.0733e-04, 8.8825e-05, 1.4288e-04, + 1.0048e-04, 9.9711e-05], device='cuda:2') +2023-02-06 12:22:04,729 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.327e+02 3.036e+02 3.801e+02 7.493e+02, threshold=6.072e+02, percent-clipped=2.0 +2023-02-06 12:22:06,921 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7719, 5.9872, 5.1413, 2.2300, 5.1729, 5.5895, 5.4106, 5.1945], + device='cuda:2'), covar=tensor([0.0591, 0.0417, 0.0935, 0.5043, 0.0836, 0.0685, 0.1095, 0.0567], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0378, 0.0391, 0.0484, 0.0381, 0.0384, 0.0381, 0.0333], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:22:10,799 INFO [train.py:901] (2/4) Epoch 12, batch 5750, loss[loss=0.2162, simple_loss=0.3045, pruned_loss=0.06395, over 8337.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3065, pruned_loss=0.0766, over 1613053.09 frames. ], batch size: 26, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:26,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 12:22:35,728 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94700.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:22:42,476 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:45,146 INFO [train.py:901] (2/4) Epoch 12, batch 5800, loss[loss=0.2498, simple_loss=0.3265, pruned_loss=0.08659, over 8459.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3076, pruned_loss=0.07743, over 1612733.55 frames. ], batch size: 25, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:45,333 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:46,398 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4834, 4.4205, 4.0105, 1.7555, 3.9771, 4.0556, 4.0413, 3.6365], + device='cuda:2'), covar=tensor([0.0657, 0.0515, 0.0989, 0.4921, 0.0810, 0.0835, 0.1119, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0378, 0.0389, 0.0484, 0.0381, 0.0385, 0.0381, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:23:02,552 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:13,644 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.575e+02 3.288e+02 4.021e+02 7.847e+02, threshold=6.576e+02, percent-clipped=2.0 +2023-02-06 12:23:19,973 INFO [train.py:901] (2/4) Epoch 12, batch 5850, loss[loss=0.2522, simple_loss=0.3344, pruned_loss=0.08496, over 8354.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3077, pruned_loss=0.078, over 1615886.61 frames. ], batch size: 24, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:23:22,899 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9706, 1.9454, 2.3995, 1.7737, 1.2736, 2.4209, 0.5347, 1.4435], + device='cuda:2'), covar=tensor([0.2316, 0.1521, 0.0505, 0.2006, 0.4104, 0.0501, 0.2965, 0.1870], + device='cuda:2'), in_proj_covar=tensor([0.0168, 0.0171, 0.0101, 0.0215, 0.0254, 0.0106, 0.0164, 0.0165], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:23:54,270 INFO [train.py:901] (2/4) Epoch 12, batch 5900, loss[loss=0.2079, simple_loss=0.2898, pruned_loss=0.06307, over 8086.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3063, pruned_loss=0.07716, over 1616536.05 frames. ], batch size: 21, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:24:01,764 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:24:07,292 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 12:24:17,126 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4919, 2.1261, 3.2976, 1.2528, 2.2756, 1.7876, 1.8285, 2.0350], + device='cuda:2'), covar=tensor([0.2026, 0.2344, 0.0897, 0.4394, 0.1978, 0.3387, 0.1960, 0.2900], + device='cuda:2'), in_proj_covar=tensor([0.0490, 0.0524, 0.0534, 0.0582, 0.0615, 0.0555, 0.0477, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:24:22,275 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.604e+02 3.248e+02 4.213e+02 6.479e+02, threshold=6.496e+02, percent-clipped=0.0 +2023-02-06 12:24:26,530 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0077, 1.5246, 1.7006, 1.3946, 0.8739, 1.4944, 1.6337, 1.4691], + device='cuda:2'), covar=tensor([0.0463, 0.1136, 0.1637, 0.1344, 0.0596, 0.1414, 0.0643, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0151, 0.0191, 0.0157, 0.0103, 0.0161, 0.0114, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 12:24:28,377 INFO [train.py:901] (2/4) Epoch 12, batch 5950, loss[loss=0.2371, simple_loss=0.3153, pruned_loss=0.07951, over 8499.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3058, pruned_loss=0.07648, over 1617382.62 frames. ], batch size: 26, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:25:03,795 INFO [train.py:901] (2/4) Epoch 12, batch 6000, loss[loss=0.2276, simple_loss=0.2992, pruned_loss=0.07798, over 7694.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3061, pruned_loss=0.07712, over 1616280.19 frames. ], batch size: 18, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:25:03,796 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 12:25:16,950 INFO [train.py:935] (2/4) Epoch 12, validation: loss=0.1862, simple_loss=0.286, pruned_loss=0.04318, over 944034.00 frames. +2023-02-06 12:25:16,950 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 12:25:44,731 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.442e+02 2.970e+02 3.787e+02 9.017e+02, threshold=5.940e+02, percent-clipped=3.0 +2023-02-06 12:25:45,513 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94956.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:25:50,750 INFO [train.py:901] (2/4) Epoch 12, batch 6050, loss[loss=0.2113, simple_loss=0.298, pruned_loss=0.06227, over 8131.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3065, pruned_loss=0.07735, over 1614570.30 frames. ], batch size: 22, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:02,541 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94981.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:26:07,069 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8620, 3.7705, 3.5061, 1.7283, 3.4215, 3.4591, 3.4572, 3.0607], + device='cuda:2'), covar=tensor([0.0899, 0.0680, 0.1024, 0.5027, 0.0983, 0.1237, 0.1381, 0.1120], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0384, 0.0392, 0.0490, 0.0384, 0.0392, 0.0382, 0.0337], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:26:25,544 INFO [train.py:901] (2/4) Epoch 12, batch 6100, loss[loss=0.2265, simple_loss=0.3077, pruned_loss=0.07266, over 8477.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3065, pruned_loss=0.07693, over 1617485.18 frames. ], batch size: 27, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:54,014 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 12:26:54,670 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.590e+02 3.216e+02 4.301e+02 8.648e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 12:27:00,769 INFO [train.py:901] (2/4) Epoch 12, batch 6150, loss[loss=0.2135, simple_loss=0.2952, pruned_loss=0.06591, over 8094.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3065, pruned_loss=0.07701, over 1613693.60 frames. ], batch size: 21, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:12,233 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:29,634 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95106.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:34,664 INFO [train.py:901] (2/4) Epoch 12, batch 6200, loss[loss=0.1998, simple_loss=0.2676, pruned_loss=0.066, over 7799.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3075, pruned_loss=0.07775, over 1614864.19 frames. ], batch size: 19, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:41,088 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95123.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:27:45,840 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6279, 4.6544, 4.1890, 2.0635, 4.1635, 4.3509, 4.2072, 3.8081], + device='cuda:2'), covar=tensor([0.0724, 0.0579, 0.1079, 0.4659, 0.0853, 0.0870, 0.1450, 0.0826], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0382, 0.0388, 0.0487, 0.0383, 0.0388, 0.0382, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:27:57,990 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6387, 2.3905, 4.5409, 2.0059, 2.6429, 5.2388, 5.1305, 4.5834], + device='cuda:2'), covar=tensor([0.0913, 0.1203, 0.0259, 0.1817, 0.0910, 0.0155, 0.0418, 0.0484], + device='cuda:2'), in_proj_covar=tensor([0.0266, 0.0297, 0.0260, 0.0289, 0.0274, 0.0236, 0.0347, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 12:28:04,337 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.519e+02 2.980e+02 3.798e+02 7.393e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 12:28:10,299 INFO [train.py:901] (2/4) Epoch 12, batch 6250, loss[loss=0.2364, simple_loss=0.3026, pruned_loss=0.08508, over 7655.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3085, pruned_loss=0.07826, over 1620125.78 frames. ], batch size: 19, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:28:21,987 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 12:28:43,818 INFO [train.py:901] (2/4) Epoch 12, batch 6300, loss[loss=0.2284, simple_loss=0.3061, pruned_loss=0.07536, over 8641.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3086, pruned_loss=0.07852, over 1621359.34 frames. ], batch size: 49, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:13,408 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.657e+02 3.224e+02 4.358e+02 1.571e+03, threshold=6.448e+02, percent-clipped=5.0 +2023-02-06 12:29:20,987 INFO [train.py:901] (2/4) Epoch 12, batch 6350, loss[loss=0.2187, simple_loss=0.2962, pruned_loss=0.07056, over 8321.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.308, pruned_loss=0.07813, over 1618174.83 frames. ], batch size: 26, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:30,726 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:36,281 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:55,395 INFO [train.py:901] (2/4) Epoch 12, batch 6400, loss[loss=0.217, simple_loss=0.2821, pruned_loss=0.07595, over 7789.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3071, pruned_loss=0.07763, over 1619563.99 frames. ], batch size: 19, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:30:23,572 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.403e+02 2.937e+02 3.904e+02 6.682e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 12:30:25,096 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:30:29,662 INFO [train.py:901] (2/4) Epoch 12, batch 6450, loss[loss=0.1939, simple_loss=0.2679, pruned_loss=0.05997, over 7540.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3066, pruned_loss=0.07711, over 1619710.46 frames. ], batch size: 18, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:05,016 INFO [train.py:901] (2/4) Epoch 12, batch 6500, loss[loss=0.234, simple_loss=0.325, pruned_loss=0.07148, over 8374.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3072, pruned_loss=0.07713, over 1621407.53 frames. ], batch size: 24, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:14,610 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5037, 1.8198, 3.0447, 1.3042, 2.0189, 1.9292, 1.6159, 1.9289], + device='cuda:2'), covar=tensor([0.1763, 0.2432, 0.0733, 0.4152, 0.1896, 0.2835, 0.1852, 0.2336], + device='cuda:2'), in_proj_covar=tensor([0.0489, 0.0527, 0.0533, 0.0580, 0.0618, 0.0555, 0.0473, 0.0611], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:31:31,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.431e+02 2.857e+02 3.846e+02 1.801e+03, threshold=5.713e+02, percent-clipped=8.0 +2023-02-06 12:31:35,003 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 12:31:37,942 INFO [train.py:901] (2/4) Epoch 12, batch 6550, loss[loss=0.1951, simple_loss=0.2844, pruned_loss=0.05283, over 8028.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.307, pruned_loss=0.07692, over 1620387.03 frames. ], batch size: 22, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:40,442 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.40 vs. limit=5.0 +2023-02-06 12:31:40,701 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95467.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:31:59,688 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0171, 1.7097, 6.1075, 2.3015, 5.5384, 5.0987, 5.7165, 5.5526], + device='cuda:2'), covar=tensor([0.0360, 0.4188, 0.0263, 0.2954, 0.0697, 0.0679, 0.0354, 0.0347], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0581, 0.0594, 0.0544, 0.0622, 0.0532, 0.0520, 0.0582], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:32:06,852 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 12:32:13,517 INFO [train.py:901] (2/4) Epoch 12, batch 6600, loss[loss=0.263, simple_loss=0.3352, pruned_loss=0.09542, over 8447.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3077, pruned_loss=0.07725, over 1620168.02 frames. ], batch size: 27, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:25,469 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 12:32:25,781 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 12:32:40,270 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.522e+02 3.078e+02 3.913e+02 8.021e+02, threshold=6.157e+02, percent-clipped=7.0 +2023-02-06 12:32:43,801 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5661, 1.6708, 2.1569, 1.3067, 1.0669, 2.2329, 0.2036, 1.2528], + device='cuda:2'), covar=tensor([0.2982, 0.1544, 0.0496, 0.2630, 0.4355, 0.0408, 0.3225, 0.1990], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0173, 0.0103, 0.0216, 0.0256, 0.0108, 0.0163, 0.0166], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:32:46,205 INFO [train.py:901] (2/4) Epoch 12, batch 6650, loss[loss=0.2144, simple_loss=0.3013, pruned_loss=0.06369, over 8100.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3092, pruned_loss=0.0784, over 1615807.52 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:59,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95582.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:33:15,570 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-06 12:33:21,228 INFO [train.py:901] (2/4) Epoch 12, batch 6700, loss[loss=0.2217, simple_loss=0.2952, pruned_loss=0.07406, over 7548.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3085, pruned_loss=0.07871, over 1609121.07 frames. ], batch size: 18, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:33:27,481 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:33,686 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:35,025 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3410, 1.4752, 4.3285, 1.6667, 2.2245, 5.0094, 5.0134, 4.3093], + device='cuda:2'), covar=tensor([0.1056, 0.1777, 0.0290, 0.2072, 0.1197, 0.0164, 0.0420, 0.0549], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0297, 0.0261, 0.0291, 0.0272, 0.0236, 0.0347, 0.0288], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 12:33:37,107 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:50,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 2.656e+02 3.142e+02 4.011e+02 7.522e+02, threshold=6.284e+02, percent-clipped=4.0 +2023-02-06 12:33:56,566 INFO [train.py:901] (2/4) Epoch 12, batch 6750, loss[loss=0.2511, simple_loss=0.3201, pruned_loss=0.09103, over 7804.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3083, pruned_loss=0.07854, over 1606303.62 frames. ], batch size: 20, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:22,170 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95701.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:30,620 INFO [train.py:901] (2/4) Epoch 12, batch 6800, loss[loss=0.2063, simple_loss=0.2755, pruned_loss=0.06856, over 7526.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3072, pruned_loss=0.078, over 1608578.54 frames. ], batch size: 18, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:40,704 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 12:34:47,120 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:53,258 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:00,263 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.510e+02 2.822e+02 3.564e+02 9.162e+02, threshold=5.644e+02, percent-clipped=3.0 +2023-02-06 12:35:06,292 INFO [train.py:901] (2/4) Epoch 12, batch 6850, loss[loss=0.2671, simple_loss=0.3298, pruned_loss=0.1022, over 6881.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3075, pruned_loss=0.07833, over 1607029.86 frames. ], batch size: 71, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:19,154 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1013, 2.7394, 3.2258, 1.2326, 3.1953, 1.7559, 1.4635, 2.0527], + device='cuda:2'), covar=tensor([0.0612, 0.0208, 0.0174, 0.0575, 0.0345, 0.0698, 0.0634, 0.0403], + device='cuda:2'), in_proj_covar=tensor([0.0394, 0.0329, 0.0282, 0.0393, 0.0325, 0.0483, 0.0360, 0.0360], + device='cuda:2'), out_proj_covar=tensor([1.1132e-04, 8.9943e-05, 7.7810e-05, 1.0918e-04, 9.0896e-05, 1.4543e-04, + 1.0159e-04, 1.0083e-04], device='cuda:2') +2023-02-06 12:35:25,953 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-06 12:35:26,864 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 12:35:40,354 INFO [train.py:901] (2/4) Epoch 12, batch 6900, loss[loss=0.221, simple_loss=0.3004, pruned_loss=0.07075, over 8142.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3095, pruned_loss=0.0792, over 1611346.38 frames. ], batch size: 22, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:41,922 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:43,309 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7751, 1.6473, 2.8207, 2.2284, 2.4326, 1.6104, 1.3365, 1.2535], + device='cuda:2'), covar=tensor([0.6114, 0.5365, 0.1301, 0.2755, 0.2316, 0.3620, 0.2925, 0.4514], + device='cuda:2'), in_proj_covar=tensor([0.0888, 0.0878, 0.0733, 0.0858, 0.0934, 0.0806, 0.0705, 0.0769], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:35:48,561 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5575, 2.4053, 4.3442, 1.4223, 3.1906, 2.2285, 1.8347, 2.9250], + device='cuda:2'), covar=tensor([0.1710, 0.2103, 0.0620, 0.3955, 0.1423, 0.2717, 0.1736, 0.2230], + device='cuda:2'), in_proj_covar=tensor([0.0487, 0.0525, 0.0530, 0.0579, 0.0615, 0.0554, 0.0475, 0.0609], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:35:51,216 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95830.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:35:57,286 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95838.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:36:08,021 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.693e+02 3.422e+02 4.342e+02 1.062e+03, threshold=6.843e+02, percent-clipped=12.0 +2023-02-06 12:36:14,243 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95863.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:36:14,701 INFO [train.py:901] (2/4) Epoch 12, batch 6950, loss[loss=0.2459, simple_loss=0.3145, pruned_loss=0.08862, over 8017.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.309, pruned_loss=0.0788, over 1613408.10 frames. ], batch size: 22, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:36:15,459 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:36:34,588 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 12:36:48,736 INFO [train.py:901] (2/4) Epoch 12, batch 7000, loss[loss=0.2099, simple_loss=0.2948, pruned_loss=0.06256, over 8127.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3088, pruned_loss=0.07799, over 1617973.45 frames. ], batch size: 22, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:17,431 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.501e+02 3.116e+02 3.850e+02 8.001e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-06 12:37:19,590 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.40 vs. limit=5.0 +2023-02-06 12:37:23,289 INFO [train.py:901] (2/4) Epoch 12, batch 7050, loss[loss=0.2128, simple_loss=0.2861, pruned_loss=0.06976, over 7653.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3083, pruned_loss=0.07803, over 1611377.52 frames. ], batch size: 19, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:24,101 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1163, 1.3870, 4.2711, 1.5978, 3.7154, 3.5445, 3.8495, 3.7120], + device='cuda:2'), covar=tensor([0.0562, 0.4412, 0.0568, 0.3704, 0.1221, 0.0885, 0.0605, 0.0669], + device='cuda:2'), in_proj_covar=tensor([0.0504, 0.0580, 0.0592, 0.0543, 0.0618, 0.0530, 0.0521, 0.0582], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:37:34,013 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:44,077 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:45,562 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.82 vs. limit=5.0 +2023-02-06 12:37:47,417 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:50,531 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:58,502 INFO [train.py:901] (2/4) Epoch 12, batch 7100, loss[loss=0.1902, simple_loss=0.2839, pruned_loss=0.04831, over 8199.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.308, pruned_loss=0.07828, over 1606762.99 frames. ], batch size: 23, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:01,407 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:07,006 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:26,973 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.539e+02 3.029e+02 4.080e+02 8.783e+02, threshold=6.058e+02, percent-clipped=4.0 +2023-02-06 12:38:33,146 INFO [train.py:901] (2/4) Epoch 12, batch 7150, loss[loss=0.2649, simple_loss=0.3273, pruned_loss=0.1012, over 8321.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3065, pruned_loss=0.07725, over 1610460.34 frames. ], batch size: 49, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:38,700 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:54,775 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:56,840 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:58,153 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:08,660 INFO [train.py:901] (2/4) Epoch 12, batch 7200, loss[loss=0.2078, simple_loss=0.281, pruned_loss=0.06726, over 8091.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3072, pruned_loss=0.07739, over 1612185.76 frames. ], batch size: 21, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:39:36,231 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.446e+02 3.002e+02 3.633e+02 6.248e+02, threshold=6.005e+02, percent-clipped=1.0 +2023-02-06 12:39:42,858 INFO [train.py:901] (2/4) Epoch 12, batch 7250, loss[loss=0.2307, simple_loss=0.3067, pruned_loss=0.07734, over 7973.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.308, pruned_loss=0.07759, over 1613433.54 frames. ], batch size: 21, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:39:49,511 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96174.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:39:55,437 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:56,079 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8560, 3.8523, 3.5136, 1.7639, 3.3842, 3.3334, 3.5981, 3.0875], + device='cuda:2'), covar=tensor([0.0960, 0.0631, 0.1057, 0.4158, 0.1007, 0.1107, 0.1155, 0.1086], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0381, 0.0393, 0.0484, 0.0385, 0.0385, 0.0382, 0.0334], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:40:14,150 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:17,295 INFO [train.py:901] (2/4) Epoch 12, batch 7300, loss[loss=0.2732, simple_loss=0.3445, pruned_loss=0.1009, over 8334.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3083, pruned_loss=0.07786, over 1614429.09 frames. ], batch size: 26, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:40:44,971 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6510, 2.0834, 3.5420, 1.3459, 2.8382, 2.1694, 1.7237, 2.5893], + device='cuda:2'), covar=tensor([0.1613, 0.2105, 0.0608, 0.3907, 0.1301, 0.2621, 0.1762, 0.1856], + device='cuda:2'), in_proj_covar=tensor([0.0488, 0.0526, 0.0535, 0.0578, 0.0620, 0.0556, 0.0475, 0.0610], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:40:45,403 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.567e+02 3.297e+02 4.044e+02 1.170e+03, threshold=6.593e+02, percent-clipped=7.0 +2023-02-06 12:40:51,434 INFO [train.py:901] (2/4) Epoch 12, batch 7350, loss[loss=0.214, simple_loss=0.2865, pruned_loss=0.07072, over 7426.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3089, pruned_loss=0.07813, over 1613298.64 frames. ], batch size: 17, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:09,298 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96289.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:41:15,844 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 12:41:26,502 INFO [train.py:901] (2/4) Epoch 12, batch 7400, loss[loss=0.2766, simple_loss=0.34, pruned_loss=0.1066, over 7653.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3073, pruned_loss=0.07713, over 1612278.39 frames. ], batch size: 19, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:33,419 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:36,479 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 12:41:40,810 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7392, 4.6382, 4.1788, 2.7588, 4.1010, 4.1858, 4.4382, 3.6851], + device='cuda:2'), covar=tensor([0.0558, 0.0427, 0.0834, 0.3601, 0.0713, 0.0895, 0.0835, 0.0926], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0381, 0.0390, 0.0483, 0.0381, 0.0384, 0.0380, 0.0333], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:41:42,988 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7510, 1.1342, 3.9012, 1.3879, 3.3824, 3.2595, 3.4993, 3.4233], + device='cuda:2'), covar=tensor([0.0654, 0.5082, 0.0679, 0.3906, 0.1361, 0.1005, 0.0690, 0.0775], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0589, 0.0604, 0.0549, 0.0627, 0.0535, 0.0530, 0.0592], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 12:41:47,014 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:47,084 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:52,327 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:55,371 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.475e+02 3.182e+02 4.307e+02 9.281e+02, threshold=6.365e+02, percent-clipped=3.0 +2023-02-06 12:42:01,559 INFO [train.py:901] (2/4) Epoch 12, batch 7450, loss[loss=0.2253, simple_loss=0.308, pruned_loss=0.07131, over 8609.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3082, pruned_loss=0.07765, over 1614898.69 frames. ], batch size: 34, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:09,243 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:15,390 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 12:42:31,443 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:35,248 INFO [train.py:901] (2/4) Epoch 12, batch 7500, loss[loss=0.2547, simple_loss=0.3249, pruned_loss=0.09223, over 7132.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3086, pruned_loss=0.07799, over 1611408.43 frames. ], batch size: 71, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:54,778 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:02,270 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3904, 2.7265, 1.9179, 2.2424, 2.1241, 1.5139, 1.8999, 1.9640], + device='cuda:2'), covar=tensor([0.1341, 0.0328, 0.1028, 0.0557, 0.0662, 0.1373, 0.0896, 0.0930], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0240, 0.0323, 0.0301, 0.0306, 0.0325, 0.0343, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:43:03,968 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.684e+02 3.354e+02 4.069e+02 8.964e+02, threshold=6.707e+02, percent-clipped=7.0 +2023-02-06 12:43:05,483 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:09,785 INFO [train.py:901] (2/4) Epoch 12, batch 7550, loss[loss=0.2855, simple_loss=0.354, pruned_loss=0.1085, over 8286.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.308, pruned_loss=0.07778, over 1609316.75 frames. ], batch size: 23, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:15,330 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0569, 2.5299, 3.3832, 1.9855, 1.7813, 3.3113, 0.6790, 1.9837], + device='cuda:2'), covar=tensor([0.1739, 0.1543, 0.0382, 0.2331, 0.4153, 0.0653, 0.3505, 0.2052], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0173, 0.0103, 0.0216, 0.0254, 0.0109, 0.0164, 0.0166], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:43:42,988 INFO [train.py:901] (2/4) Epoch 12, batch 7600, loss[loss=0.2516, simple_loss=0.3152, pruned_loss=0.09403, over 8399.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3091, pruned_loss=0.0782, over 1610852.70 frames. ], batch size: 49, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:52,501 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:05,571 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96545.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:44:11,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 2.706e+02 3.173e+02 4.121e+02 9.971e+02, threshold=6.345e+02, percent-clipped=8.0 +2023-02-06 12:44:13,334 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:18,541 INFO [train.py:901] (2/4) Epoch 12, batch 7650, loss[loss=0.2041, simple_loss=0.2806, pruned_loss=0.06382, over 8190.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3082, pruned_loss=0.07775, over 1613061.19 frames. ], batch size: 23, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:44:23,310 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96570.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:44:29,847 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:47,076 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:53,025 INFO [train.py:901] (2/4) Epoch 12, batch 7700, loss[loss=0.2379, simple_loss=0.3265, pruned_loss=0.07469, over 8584.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3075, pruned_loss=0.07743, over 1607431.97 frames. ], batch size: 34, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:02,659 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1752, 1.1113, 1.2358, 1.1060, 0.9319, 1.2823, 0.0770, 0.8486], + device='cuda:2'), covar=tensor([0.2116, 0.1524, 0.0649, 0.1230, 0.3667, 0.0608, 0.3007, 0.1694], + device='cuda:2'), in_proj_covar=tensor([0.0169, 0.0172, 0.0103, 0.0215, 0.0255, 0.0109, 0.0163, 0.0166], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 12:45:12,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:21,241 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.521e+02 3.004e+02 3.630e+02 7.905e+02, threshold=6.007e+02, percent-clipped=3.0 +2023-02-06 12:45:23,905 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 12:45:27,882 INFO [train.py:901] (2/4) Epoch 12, batch 7750, loss[loss=0.2534, simple_loss=0.3273, pruned_loss=0.08977, over 8294.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3055, pruned_loss=0.07596, over 1608399.58 frames. ], batch size: 23, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:42,857 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,099 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,580 INFO [train.py:901] (2/4) Epoch 12, batch 7800, loss[loss=0.2094, simple_loss=0.2797, pruned_loss=0.06955, over 8096.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3047, pruned_loss=0.07555, over 1611961.70 frames. ], batch size: 21, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:46:15,975 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-06 12:46:19,264 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:23,937 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6967, 1.7593, 4.2229, 1.8317, 2.3773, 4.9663, 4.9351, 4.0407], + device='cuda:2'), covar=tensor([0.1035, 0.1727, 0.0373, 0.2192, 0.1268, 0.0236, 0.0411, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0299, 0.0263, 0.0291, 0.0274, 0.0238, 0.0354, 0.0289], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 12:46:28,440 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:30,315 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.519e+02 3.193e+02 4.174e+02 8.059e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:46:31,382 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 12:46:36,557 INFO [train.py:901] (2/4) Epoch 12, batch 7850, loss[loss=0.2292, simple_loss=0.2995, pruned_loss=0.0794, over 8224.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3052, pruned_loss=0.07612, over 1612766.37 frames. ], batch size: 22, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:46:56,860 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:01,901 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,009 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,492 INFO [train.py:901] (2/4) Epoch 12, batch 7900, loss[loss=0.2099, simple_loss=0.2856, pruned_loss=0.06714, over 8142.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3036, pruned_loss=0.07568, over 1602721.17 frames. ], batch size: 22, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:27,488 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:38,783 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.595e+02 3.080e+02 3.878e+02 8.124e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 12:47:44,784 INFO [train.py:901] (2/4) Epoch 12, batch 7950, loss[loss=0.1988, simple_loss=0.2892, pruned_loss=0.05417, over 7662.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3027, pruned_loss=0.07556, over 1599012.98 frames. ], batch size: 19, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:47,002 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:07,428 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:17,695 INFO [train.py:901] (2/4) Epoch 12, batch 8000, loss[loss=0.2549, simple_loss=0.3408, pruned_loss=0.08451, over 8246.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3024, pruned_loss=0.07502, over 1602445.70 frames. ], batch size: 24, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:48:23,689 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:45,040 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.377e+02 3.280e+02 4.266e+02 7.100e+02, threshold=6.559e+02, percent-clipped=4.0 +2023-02-06 12:48:50,058 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8930, 1.7588, 2.8661, 1.2308, 2.2465, 3.0661, 3.1153, 2.6320], + device='cuda:2'), covar=tensor([0.0927, 0.1195, 0.0337, 0.2078, 0.0713, 0.0292, 0.0531, 0.0590], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0302, 0.0263, 0.0292, 0.0274, 0.0239, 0.0357, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:48:51,258 INFO [train.py:901] (2/4) Epoch 12, batch 8050, loss[loss=0.2217, simple_loss=0.2916, pruned_loss=0.07586, over 7532.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3028, pruned_loss=0.07548, over 1598822.76 frames. ], batch size: 18, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:49:24,720 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 12:49:29,795 INFO [train.py:901] (2/4) Epoch 13, batch 0, loss[loss=0.2219, simple_loss=0.3053, pruned_loss=0.0692, over 8078.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3053, pruned_loss=0.0692, over 8078.00 frames. ], batch size: 21, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:49:29,796 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 12:49:40,738 INFO [train.py:935] (2/4) Epoch 13, validation: loss=0.1867, simple_loss=0.2865, pruned_loss=0.04345, over 944034.00 frames. +2023-02-06 12:49:40,739 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 12:49:41,812 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 12:49:55,386 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 12:49:55,520 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:14,914 INFO [train.py:901] (2/4) Epoch 13, batch 50, loss[loss=0.1955, simple_loss=0.2833, pruned_loss=0.05388, over 7954.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3094, pruned_loss=0.07578, over 369955.89 frames. ], batch size: 21, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:20,337 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.833e+02 3.357e+02 4.758e+02 6.927e+02, threshold=6.715e+02, percent-clipped=2.0 +2023-02-06 12:50:21,933 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:29,185 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 12:50:41,098 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:50,958 INFO [train.py:901] (2/4) Epoch 13, batch 100, loss[loss=0.2149, simple_loss=0.3015, pruned_loss=0.06411, over 8475.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3088, pruned_loss=0.07501, over 650175.92 frames. ], batch size: 25, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:52,983 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 12:51:09,080 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:18,983 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:24,694 INFO [train.py:901] (2/4) Epoch 13, batch 150, loss[loss=0.2562, simple_loss=0.336, pruned_loss=0.08819, over 8186.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3097, pruned_loss=0.07684, over 866872.64 frames. ], batch size: 23, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:51:25,592 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:30,102 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.477e+02 2.848e+02 3.342e+02 7.997e+02, threshold=5.696e+02, percent-clipped=2.0 +2023-02-06 12:51:39,289 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 12:51:42,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9432, 1.5890, 3.5014, 1.5163, 2.3711, 3.8665, 3.8908, 3.3747], + device='cuda:2'), covar=tensor([0.1153, 0.1556, 0.0308, 0.1993, 0.0976, 0.0237, 0.0468, 0.0563], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0302, 0.0265, 0.0293, 0.0275, 0.0239, 0.0359, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:51:58,442 INFO [train.py:901] (2/4) Epoch 13, batch 200, loss[loss=0.2046, simple_loss=0.281, pruned_loss=0.06408, over 8079.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3086, pruned_loss=0.07619, over 1039274.49 frames. ], batch size: 21, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:09,220 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0126, 2.1885, 1.8589, 2.7297, 1.3658, 1.7329, 1.8757, 2.2848], + device='cuda:2'), covar=tensor([0.0649, 0.0891, 0.0916, 0.0363, 0.1148, 0.1295, 0.0966, 0.0745], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0211, 0.0251, 0.0214, 0.0213, 0.0251, 0.0254, 0.0218], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 12:52:18,287 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3167, 1.3481, 2.2666, 1.1065, 2.0557, 2.3693, 2.5555, 1.8960], + device='cuda:2'), covar=tensor([0.1176, 0.1370, 0.0571, 0.2235, 0.0846, 0.0534, 0.0706, 0.1079], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0301, 0.0263, 0.0292, 0.0274, 0.0238, 0.0356, 0.0291], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 12:52:23,162 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 12:52:33,409 INFO [train.py:901] (2/4) Epoch 13, batch 250, loss[loss=0.2288, simple_loss=0.3094, pruned_loss=0.0741, over 8507.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3082, pruned_loss=0.0768, over 1167011.71 frames. ], batch size: 26, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:37,624 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:38,766 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.455e+02 3.117e+02 3.819e+02 7.824e+02, threshold=6.233e+02, percent-clipped=7.0 +2023-02-06 12:52:46,027 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 12:52:53,685 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 12:52:54,018 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:54,530 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 12:53:06,364 INFO [train.py:901] (2/4) Epoch 13, batch 300, loss[loss=0.1947, simple_loss=0.2571, pruned_loss=0.06618, over 7529.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3087, pruned_loss=0.07792, over 1268010.69 frames. ], batch size: 18, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:06,751 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 12:53:34,632 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.51 vs. limit=5.0 +2023-02-06 12:53:41,548 INFO [train.py:901] (2/4) Epoch 13, batch 350, loss[loss=0.2509, simple_loss=0.324, pruned_loss=0.08894, over 8655.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3086, pruned_loss=0.07788, over 1347548.39 frames. ], batch size: 34, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:46,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.508e+02 3.076e+02 3.709e+02 6.548e+02, threshold=6.153e+02, percent-clipped=1.0 +2023-02-06 12:53:50,474 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:51,766 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:53,813 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:55,087 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2479, 4.2281, 3.8247, 1.9622, 3.7816, 3.6788, 3.8310, 3.3730], + device='cuda:2'), covar=tensor([0.0895, 0.0654, 0.1116, 0.4843, 0.1033, 0.1082, 0.1398, 0.1174], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0385, 0.0392, 0.0488, 0.0387, 0.0387, 0.0381, 0.0338], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:54:11,898 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:15,163 INFO [train.py:901] (2/4) Epoch 13, batch 400, loss[loss=0.2506, simple_loss=0.3216, pruned_loss=0.08983, over 8547.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3067, pruned_loss=0.07706, over 1402026.82 frames. ], batch size: 31, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:39,465 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7364, 5.6911, 5.0878, 2.5054, 5.1271, 5.4801, 5.2699, 5.1943], + device='cuda:2'), covar=tensor([0.0488, 0.0376, 0.0819, 0.4514, 0.0639, 0.0848, 0.0876, 0.0696], + device='cuda:2'), in_proj_covar=tensor([0.0468, 0.0380, 0.0390, 0.0485, 0.0383, 0.0386, 0.0377, 0.0335], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:54:51,246 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2043, 1.6528, 1.4343, 1.7115, 1.3886, 1.2760, 1.3284, 1.4479], + device='cuda:2'), covar=tensor([0.0836, 0.0385, 0.1061, 0.0428, 0.0613, 0.1164, 0.0774, 0.0585], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0235, 0.0318, 0.0300, 0.0301, 0.0321, 0.0339, 0.0300], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:54:51,730 INFO [train.py:901] (2/4) Epoch 13, batch 450, loss[loss=0.2625, simple_loss=0.3204, pruned_loss=0.1023, over 7777.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3056, pruned_loss=0.07646, over 1445987.16 frames. ], batch size: 19, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:57,107 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.318e+02 2.836e+02 3.756e+02 7.381e+02, threshold=5.672e+02, percent-clipped=3.0 +2023-02-06 12:54:58,931 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 12:55:13,265 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:26,766 INFO [train.py:901] (2/4) Epoch 13, batch 500, loss[loss=0.2302, simple_loss=0.3002, pruned_loss=0.08011, over 8125.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3049, pruned_loss=0.07588, over 1482259.73 frames. ], batch size: 22, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:55:35,500 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97509.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:52,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:01,767 INFO [train.py:901] (2/4) Epoch 13, batch 550, loss[loss=0.2581, simple_loss=0.3296, pruned_loss=0.09331, over 8108.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3056, pruned_loss=0.07627, over 1514706.77 frames. ], batch size: 23, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:56:07,720 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.533e+02 3.037e+02 3.770e+02 9.997e+02, threshold=6.074e+02, percent-clipped=4.0 +2023-02-06 12:56:20,095 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:36,757 INFO [train.py:901] (2/4) Epoch 13, batch 600, loss[loss=0.3007, simple_loss=0.3638, pruned_loss=0.1188, over 6940.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3065, pruned_loss=0.07675, over 1539792.73 frames. ], batch size: 71, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:56:53,703 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:54,431 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2175, 3.1753, 2.9030, 1.4487, 2.8987, 2.9580, 2.9001, 2.7429], + device='cuda:2'), covar=tensor([0.1245, 0.0921, 0.1405, 0.4847, 0.1199, 0.1236, 0.1602, 0.1144], + device='cuda:2'), in_proj_covar=tensor([0.0470, 0.0385, 0.0391, 0.0488, 0.0385, 0.0389, 0.0382, 0.0339], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:56:55,693 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 12:56:55,856 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8668, 1.5649, 1.6552, 1.4545, 1.1467, 1.5415, 1.7373, 1.7317], + device='cuda:2'), covar=tensor([0.0498, 0.0904, 0.1312, 0.1084, 0.0596, 0.1115, 0.0644, 0.0421], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0151, 0.0191, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 12:56:59,736 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6389, 2.6006, 1.9479, 2.2554, 2.0384, 1.4088, 1.9212, 2.1304], + device='cuda:2'), covar=tensor([0.1407, 0.0364, 0.1117, 0.0618, 0.0764, 0.1520, 0.1008, 0.1005], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0238, 0.0324, 0.0304, 0.0306, 0.0327, 0.0345, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:57:09,686 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7992, 3.7749, 3.4358, 1.9248, 3.2746, 3.4841, 3.4905, 3.1413], + device='cuda:2'), covar=tensor([0.0988, 0.0716, 0.1224, 0.4671, 0.1126, 0.1131, 0.1230, 0.1084], + device='cuda:2'), in_proj_covar=tensor([0.0467, 0.0385, 0.0391, 0.0487, 0.0384, 0.0388, 0.0380, 0.0338], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 12:57:10,250 INFO [train.py:901] (2/4) Epoch 13, batch 650, loss[loss=0.1914, simple_loss=0.2858, pruned_loss=0.04847, over 8354.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3062, pruned_loss=0.07655, over 1557620.38 frames. ], batch size: 24, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:16,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.537e+02 2.925e+02 3.842e+02 7.324e+02, threshold=5.850e+02, percent-clipped=4.0 +2023-02-06 12:57:37,850 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3351, 2.1317, 1.7632, 1.9615, 1.7160, 1.3324, 1.5934, 1.7509], + device='cuda:2'), covar=tensor([0.1127, 0.0344, 0.0995, 0.0476, 0.0677, 0.1292, 0.0859, 0.0659], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0238, 0.0323, 0.0303, 0.0306, 0.0327, 0.0345, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:57:42,540 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97692.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:57:45,737 INFO [train.py:901] (2/4) Epoch 13, batch 700, loss[loss=0.2311, simple_loss=0.3143, pruned_loss=0.07395, over 8252.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3061, pruned_loss=0.07654, over 1571068.10 frames. ], batch size: 24, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:50,154 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-06 12:57:51,260 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:54,513 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:59,103 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 12:58:10,715 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:12,618 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:13,391 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:19,711 INFO [train.py:901] (2/4) Epoch 13, batch 750, loss[loss=0.2015, simple_loss=0.2661, pruned_loss=0.06845, over 7542.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3048, pruned_loss=0.0761, over 1579235.75 frames. ], batch size: 18, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:58:25,053 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.478e+02 2.997e+02 3.995e+02 8.399e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-06 12:58:27,374 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:39,715 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 12:58:49,002 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 12:58:51,769 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4053, 2.2477, 3.2890, 2.0657, 2.7745, 3.6880, 3.7544, 2.9897], + device='cuda:2'), covar=tensor([0.1058, 0.1426, 0.0762, 0.1967, 0.1526, 0.0371, 0.0654, 0.0861], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0302, 0.0267, 0.0294, 0.0278, 0.0242, 0.0359, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 12:58:54,236 INFO [train.py:901] (2/4) Epoch 13, batch 800, loss[loss=0.2103, simple_loss=0.2779, pruned_loss=0.07134, over 7718.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3048, pruned_loss=0.07605, over 1585230.34 frames. ], batch size: 18, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:10,091 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:14,043 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:29,475 INFO [train.py:901] (2/4) Epoch 13, batch 850, loss[loss=0.2031, simple_loss=0.2893, pruned_loss=0.05847, over 8111.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3055, pruned_loss=0.07615, over 1591669.23 frames. ], batch size: 23, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:32,320 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:35,501 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.627e+02 3.254e+02 4.246e+02 9.834e+02, threshold=6.507e+02, percent-clipped=8.0 +2023-02-06 13:00:03,797 INFO [train.py:901] (2/4) Epoch 13, batch 900, loss[loss=0.2728, simple_loss=0.3541, pruned_loss=0.09574, over 8667.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3049, pruned_loss=0.07556, over 1593755.34 frames. ], batch size: 34, lr: 5.98e-03, grad_scale: 8.0 +2023-02-06 13:00:05,040 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 13:00:09,703 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:18,212 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:39,215 INFO [train.py:901] (2/4) Epoch 13, batch 950, loss[loss=0.2427, simple_loss=0.303, pruned_loss=0.09123, over 7791.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3046, pruned_loss=0.07531, over 1597839.40 frames. ], batch size: 19, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:00:45,306 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.593e+02 3.202e+02 4.020e+02 7.231e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 13:00:55,372 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9915, 1.7935, 3.4231, 1.4564, 2.2976, 3.8166, 3.8901, 3.2734], + device='cuda:2'), covar=tensor([0.1022, 0.1341, 0.0333, 0.1985, 0.1019, 0.0221, 0.0447, 0.0568], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0300, 0.0266, 0.0292, 0.0276, 0.0241, 0.0358, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:01:08,765 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 13:01:11,125 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:13,712 INFO [train.py:901] (2/4) Epoch 13, batch 1000, loss[loss=0.2047, simple_loss=0.2875, pruned_loss=0.06091, over 8338.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3053, pruned_loss=0.0757, over 1596558.28 frames. ], batch size: 25, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:29,781 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:39,964 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:43,178 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98036.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:01:44,386 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 13:01:50,454 INFO [train.py:901] (2/4) Epoch 13, batch 1050, loss[loss=0.1939, simple_loss=0.2664, pruned_loss=0.06068, over 7687.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3052, pruned_loss=0.07568, over 1602182.15 frames. ], batch size: 18, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:56,533 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 2.365e+02 2.893e+02 3.782e+02 5.594e+02, threshold=5.785e+02, percent-clipped=0.0 +2023-02-06 13:01:57,233 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 13:02:10,160 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98075.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:13,458 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:24,727 INFO [train.py:901] (2/4) Epoch 13, batch 1100, loss[loss=0.186, simple_loss=0.2635, pruned_loss=0.0543, over 7772.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3044, pruned_loss=0.0753, over 1605430.88 frames. ], batch size: 19, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:02:26,972 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:30,281 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:31,648 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:43,619 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:49,148 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:59,486 INFO [train.py:901] (2/4) Epoch 13, batch 1150, loss[loss=0.2001, simple_loss=0.2917, pruned_loss=0.0543, over 8590.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3036, pruned_loss=0.07428, over 1609482.47 frames. ], batch size: 31, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:02,368 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98151.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:03:05,420 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 13:03:06,082 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.603e+02 3.101e+02 3.825e+02 7.832e+02, threshold=6.203e+02, percent-clipped=6.0 +2023-02-06 13:03:14,956 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1204, 1.2755, 1.1852, 0.6791, 1.2497, 1.0130, 0.1084, 1.2533], + device='cuda:2'), covar=tensor([0.0276, 0.0241, 0.0223, 0.0357, 0.0257, 0.0620, 0.0494, 0.0200], + device='cuda:2'), in_proj_covar=tensor([0.0402, 0.0337, 0.0289, 0.0397, 0.0325, 0.0484, 0.0360, 0.0362], + device='cuda:2'), out_proj_covar=tensor([1.1293e-04, 9.2114e-05, 7.9625e-05, 1.0987e-04, 9.0458e-05, 1.4467e-04, + 1.0148e-04, 1.0106e-04], device='cuda:2') +2023-02-06 13:03:34,172 INFO [train.py:901] (2/4) Epoch 13, batch 1200, loss[loss=0.2248, simple_loss=0.3099, pruned_loss=0.06989, over 8686.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3047, pruned_loss=0.07488, over 1614449.76 frames. ], batch size: 34, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:46,029 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 13:04:06,223 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3935, 1.6306, 4.5479, 2.1326, 4.0443, 3.8926, 4.1529, 4.0138], + device='cuda:2'), covar=tensor([0.0505, 0.4036, 0.0476, 0.3235, 0.0945, 0.0781, 0.0479, 0.0551], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0587, 0.0598, 0.0544, 0.0618, 0.0531, 0.0522, 0.0579], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:04:08,147 INFO [train.py:901] (2/4) Epoch 13, batch 1250, loss[loss=0.2179, simple_loss=0.2919, pruned_loss=0.07191, over 8311.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3046, pruned_loss=0.07489, over 1617121.56 frames. ], batch size: 25, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:10,208 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:14,076 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.568e+02 3.066e+02 4.053e+02 1.440e+03, threshold=6.132e+02, percent-clipped=8.0 +2023-02-06 13:04:36,958 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:43,429 INFO [train.py:901] (2/4) Epoch 13, batch 1300, loss[loss=0.2517, simple_loss=0.3102, pruned_loss=0.09663, over 7654.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3048, pruned_loss=0.07485, over 1619714.21 frames. ], batch size: 19, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:44,913 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4783, 1.9311, 2.9227, 2.3570, 2.5877, 2.2338, 1.8794, 1.2985], + device='cuda:2'), covar=tensor([0.3948, 0.4415, 0.1222, 0.2408, 0.1842, 0.2304, 0.1848, 0.4270], + device='cuda:2'), in_proj_covar=tensor([0.0893, 0.0883, 0.0741, 0.0861, 0.0939, 0.0810, 0.0706, 0.0771], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:04:54,299 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8161, 1.6936, 1.8415, 1.5613, 1.1584, 1.5925, 2.1894, 1.9729], + device='cuda:2'), covar=tensor([0.0424, 0.1181, 0.1629, 0.1371, 0.0593, 0.1503, 0.0645, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0153, 0.0193, 0.0158, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:04:54,329 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:12,874 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8737, 3.8630, 2.4459, 2.5415, 2.7701, 2.0317, 2.3868, 2.9276], + device='cuda:2'), covar=tensor([0.1537, 0.0301, 0.0945, 0.0832, 0.0629, 0.1239, 0.1175, 0.1045], + device='cuda:2'), in_proj_covar=tensor([0.0338, 0.0231, 0.0313, 0.0295, 0.0296, 0.0317, 0.0334, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:05:16,645 INFO [train.py:901] (2/4) Epoch 13, batch 1350, loss[loss=0.1825, simple_loss=0.2526, pruned_loss=0.05617, over 7693.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3057, pruned_loss=0.07566, over 1621870.92 frames. ], batch size: 18, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:23,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.492e+02 3.102e+02 3.697e+02 5.327e+02, threshold=6.205e+02, percent-clipped=0.0 +2023-02-06 13:05:29,572 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:52,228 INFO [train.py:901] (2/4) Epoch 13, batch 1400, loss[loss=0.2355, simple_loss=0.3235, pruned_loss=0.07376, over 8466.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3047, pruned_loss=0.07462, over 1622270.73 frames. ], batch size: 29, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:59,398 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98407.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:06:14,070 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:06:16,852 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:26,901 INFO [train.py:901] (2/4) Epoch 13, batch 1450, loss[loss=0.2333, simple_loss=0.3239, pruned_loss=0.07131, over 8241.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3041, pruned_loss=0.07469, over 1619080.83 frames. ], batch size: 24, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:06:32,939 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.293e+02 2.812e+02 3.491e+02 8.118e+02, threshold=5.625e+02, percent-clipped=1.0 +2023-02-06 13:06:34,304 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 13:06:41,232 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:06:59,729 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-06 13:07:02,157 INFO [train.py:901] (2/4) Epoch 13, batch 1500, loss[loss=0.2463, simple_loss=0.3348, pruned_loss=0.0789, over 8194.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3056, pruned_loss=0.07538, over 1621112.13 frames. ], batch size: 23, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:37,383 INFO [train.py:901] (2/4) Epoch 13, batch 1550, loss[loss=0.1808, simple_loss=0.2579, pruned_loss=0.05182, over 7689.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3056, pruned_loss=0.07548, over 1616702.34 frames. ], batch size: 18, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:43,378 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.583e+02 3.208e+02 4.119e+02 6.608e+02, threshold=6.417e+02, percent-clipped=3.0 +2023-02-06 13:07:46,952 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:02,057 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:05,867 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1413, 2.8286, 3.5014, 2.2075, 1.9658, 3.6364, 0.7757, 2.2225], + device='cuda:2'), covar=tensor([0.1963, 0.1394, 0.0420, 0.2252, 0.3681, 0.0399, 0.3240, 0.1738], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0175, 0.0104, 0.0222, 0.0262, 0.0112, 0.0164, 0.0168], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:08:06,458 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1452, 4.1194, 3.7419, 1.7478, 3.5581, 3.6327, 3.7979, 3.3487], + device='cuda:2'), covar=tensor([0.0819, 0.0649, 0.1124, 0.4962, 0.1015, 0.1105, 0.1311, 0.0996], + device='cuda:2'), in_proj_covar=tensor([0.0470, 0.0387, 0.0396, 0.0489, 0.0387, 0.0392, 0.0382, 0.0341], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:08:11,761 INFO [train.py:901] (2/4) Epoch 13, batch 1600, loss[loss=0.2314, simple_loss=0.3123, pruned_loss=0.07525, over 8455.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.305, pruned_loss=0.07526, over 1617954.25 frames. ], batch size: 29, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:29,745 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:46,809 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:47,298 INFO [train.py:901] (2/4) Epoch 13, batch 1650, loss[loss=0.2509, simple_loss=0.3142, pruned_loss=0.09377, over 7796.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3036, pruned_loss=0.07478, over 1612220.02 frames. ], batch size: 20, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:53,412 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.462e+02 2.942e+02 3.707e+02 8.113e+02, threshold=5.885e+02, percent-clipped=6.0 +2023-02-06 13:09:20,791 INFO [train.py:901] (2/4) Epoch 13, batch 1700, loss[loss=0.2053, simple_loss=0.2989, pruned_loss=0.05583, over 8241.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.304, pruned_loss=0.07461, over 1608248.65 frames. ], batch size: 24, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:09:45,855 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9617, 2.1104, 1.7762, 2.7601, 1.2365, 1.6246, 1.8937, 2.1880], + device='cuda:2'), covar=tensor([0.0737, 0.0845, 0.1021, 0.0409, 0.1208, 0.1325, 0.0962, 0.0833], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0213, 0.0254, 0.0215, 0.0215, 0.0253, 0.0257, 0.0219], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:09:57,388 INFO [train.py:901] (2/4) Epoch 13, batch 1750, loss[loss=0.2162, simple_loss=0.2975, pruned_loss=0.06744, over 8026.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3052, pruned_loss=0.07511, over 1608386.09 frames. ], batch size: 22, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:10:03,432 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.663e+02 3.311e+02 3.905e+02 7.561e+02, threshold=6.622e+02, percent-clipped=6.0 +2023-02-06 13:10:15,124 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:10:23,960 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9851, 1.4648, 6.1291, 2.1536, 5.5436, 5.2403, 5.7533, 5.5799], + device='cuda:2'), covar=tensor([0.0388, 0.4287, 0.0337, 0.3219, 0.0831, 0.0776, 0.0350, 0.0416], + device='cuda:2'), in_proj_covar=tensor([0.0509, 0.0583, 0.0596, 0.0549, 0.0619, 0.0531, 0.0521, 0.0584], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:10:31,882 INFO [train.py:901] (2/4) Epoch 13, batch 1800, loss[loss=0.2444, simple_loss=0.3352, pruned_loss=0.07685, over 8460.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3038, pruned_loss=0.07386, over 1608435.54 frames. ], batch size: 25, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:01,378 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:06,993 INFO [train.py:901] (2/4) Epoch 13, batch 1850, loss[loss=0.2621, simple_loss=0.3395, pruned_loss=0.09237, over 8201.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.303, pruned_loss=0.07362, over 1608980.97 frames. ], batch size: 23, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:13,499 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.419e+02 2.914e+02 4.067e+02 1.078e+03, threshold=5.828e+02, percent-clipped=2.0 +2023-02-06 13:11:18,920 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:29,415 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8422, 2.7815, 2.0857, 2.2376, 2.2013, 1.7204, 2.2063, 2.3290], + device='cuda:2'), covar=tensor([0.1243, 0.0330, 0.0870, 0.0624, 0.0612, 0.1250, 0.0820, 0.0853], + device='cuda:2'), in_proj_covar=tensor([0.0343, 0.0235, 0.0316, 0.0299, 0.0300, 0.0323, 0.0339, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:11:34,563 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:41,190 INFO [train.py:901] (2/4) Epoch 13, batch 1900, loss[loss=0.1871, simple_loss=0.2745, pruned_loss=0.04982, over 7973.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3033, pruned_loss=0.07392, over 1612330.30 frames. ], batch size: 21, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:46,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:48,678 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:51,826 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:11,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 13:12:15,141 INFO [train.py:901] (2/4) Epoch 13, batch 1950, loss[loss=0.1968, simple_loss=0.2729, pruned_loss=0.06034, over 7799.00 frames. ], tot_loss[loss=0.225, simple_loss=0.303, pruned_loss=0.07348, over 1613256.93 frames. ], batch size: 20, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:12:21,304 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.445e+02 3.079e+02 3.874e+02 6.986e+02, threshold=6.158e+02, percent-clipped=4.0 +2023-02-06 13:12:23,960 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 13:12:35,737 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7287, 5.8084, 5.0572, 2.5773, 4.9522, 5.5218, 5.3064, 5.3060], + device='cuda:2'), covar=tensor([0.0523, 0.0464, 0.0982, 0.4142, 0.0783, 0.0735, 0.1198, 0.0578], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0390, 0.0396, 0.0492, 0.0388, 0.0392, 0.0384, 0.0341], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:12:44,582 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 13:12:45,369 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98989.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:50,408 INFO [train.py:901] (2/4) Epoch 13, batch 2000, loss[loss=0.2267, simple_loss=0.2971, pruned_loss=0.07814, over 7548.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3032, pruned_loss=0.07375, over 1615603.07 frames. ], batch size: 18, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:06,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:24,249 INFO [train.py:901] (2/4) Epoch 13, batch 2050, loss[loss=0.2728, simple_loss=0.3378, pruned_loss=0.1039, over 8508.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3039, pruned_loss=0.07428, over 1615421.45 frames. ], batch size: 49, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:30,110 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.527e+02 3.267e+02 4.166e+02 9.227e+02, threshold=6.535e+02, percent-clipped=8.0 +2023-02-06 13:13:39,428 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:43,692 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 13:13:51,524 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1535, 1.3063, 3.3015, 1.1098, 2.8895, 2.7788, 3.0028, 2.9124], + device='cuda:2'), covar=tensor([0.0842, 0.3881, 0.0885, 0.3625, 0.1584, 0.1198, 0.0756, 0.0922], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0581, 0.0594, 0.0550, 0.0624, 0.0534, 0.0524, 0.0587], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:13:52,406 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 13:13:58,777 INFO [train.py:901] (2/4) Epoch 13, batch 2100, loss[loss=0.246, simple_loss=0.3289, pruned_loss=0.0816, over 8488.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3038, pruned_loss=0.07481, over 1615867.29 frames. ], batch size: 29, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:27,699 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9946, 1.6493, 6.0889, 2.2763, 5.5008, 5.0911, 5.6696, 5.5545], + device='cuda:2'), covar=tensor([0.0496, 0.4429, 0.0314, 0.3169, 0.0968, 0.0848, 0.0422, 0.0517], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0583, 0.0596, 0.0550, 0.0624, 0.0534, 0.0523, 0.0587], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:14:31,277 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:14:33,890 INFO [train.py:901] (2/4) Epoch 13, batch 2150, loss[loss=0.2402, simple_loss=0.3232, pruned_loss=0.07865, over 8249.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3037, pruned_loss=0.07463, over 1615398.40 frames. ], batch size: 24, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:39,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.393e+02 2.767e+02 3.323e+02 5.467e+02, threshold=5.533e+02, percent-clipped=0.0 +2023-02-06 13:14:48,048 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:07,838 INFO [train.py:901] (2/4) Epoch 13, batch 2200, loss[loss=0.2307, simple_loss=0.315, pruned_loss=0.07319, over 8544.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3042, pruned_loss=0.07529, over 1614718.94 frames. ], batch size: 31, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:15:27,755 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0915, 1.8116, 3.3405, 1.5093, 2.3467, 3.7156, 3.7118, 3.1726], + device='cuda:2'), covar=tensor([0.0987, 0.1277, 0.0367, 0.2056, 0.1002, 0.0230, 0.0531, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0273, 0.0303, 0.0270, 0.0295, 0.0280, 0.0241, 0.0361, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:15:43,035 INFO [train.py:901] (2/4) Epoch 13, batch 2250, loss[loss=0.2559, simple_loss=0.3162, pruned_loss=0.09778, over 7185.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3053, pruned_loss=0.07589, over 1614566.69 frames. ], batch size: 16, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:15:43,152 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:46,454 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:48,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.623e+02 3.377e+02 4.135e+02 6.545e+02, threshold=6.753e+02, percent-clipped=6.0 +2023-02-06 13:15:49,683 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:02,576 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:09,209 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:16,492 INFO [train.py:901] (2/4) Epoch 13, batch 2300, loss[loss=0.194, simple_loss=0.288, pruned_loss=0.05, over 8128.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3049, pruned_loss=0.07512, over 1618130.95 frames. ], batch size: 22, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:19,366 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:43,130 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:49,184 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:52,409 INFO [train.py:901] (2/4) Epoch 13, batch 2350, loss[loss=0.1916, simple_loss=0.2696, pruned_loss=0.05678, over 7441.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3045, pruned_loss=0.0752, over 1618776.52 frames. ], batch size: 17, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:58,394 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.470e+02 3.074e+02 3.865e+02 1.080e+03, threshold=6.149e+02, percent-clipped=3.0 +2023-02-06 13:17:06,764 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99367.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:10,178 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:12,142 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5482, 1.5979, 4.7519, 1.7543, 4.2045, 3.9957, 4.3062, 4.1782], + device='cuda:2'), covar=tensor([0.0467, 0.4095, 0.0408, 0.3635, 0.1022, 0.0838, 0.0459, 0.0533], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0596, 0.0613, 0.0564, 0.0637, 0.0546, 0.0538, 0.0601], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:17:22,906 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4843, 1.8012, 1.8707, 1.1257, 2.0319, 1.4319, 0.3781, 1.7938], + device='cuda:2'), covar=tensor([0.0362, 0.0236, 0.0195, 0.0369, 0.0240, 0.0657, 0.0559, 0.0169], + device='cuda:2'), in_proj_covar=tensor([0.0401, 0.0336, 0.0292, 0.0402, 0.0325, 0.0487, 0.0361, 0.0367], + device='cuda:2'), out_proj_covar=tensor([1.1254e-04, 9.1758e-05, 8.0184e-05, 1.1101e-04, 9.0365e-05, 1.4522e-04, + 1.0168e-04, 1.0207e-04], device='cuda:2') +2023-02-06 13:17:26,591 INFO [train.py:901] (2/4) Epoch 13, batch 2400, loss[loss=0.2212, simple_loss=0.3101, pruned_loss=0.06613, over 8031.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3037, pruned_loss=0.07463, over 1619631.30 frames. ], batch size: 22, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:17:37,519 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:54,054 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4340, 1.9876, 2.8745, 2.2859, 2.7148, 2.3021, 1.9627, 1.3865], + device='cuda:2'), covar=tensor([0.4350, 0.4358, 0.1447, 0.2982, 0.2161, 0.2354, 0.1698, 0.4720], + device='cuda:2'), in_proj_covar=tensor([0.0896, 0.0887, 0.0740, 0.0862, 0.0942, 0.0813, 0.0705, 0.0775], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:17:55,999 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:01,834 INFO [train.py:901] (2/4) Epoch 13, batch 2450, loss[loss=0.2115, simple_loss=0.2878, pruned_loss=0.06765, over 7540.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3039, pruned_loss=0.07459, over 1619634.18 frames. ], batch size: 18, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:02,725 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:08,608 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.541e+02 3.157e+02 3.793e+02 6.756e+02, threshold=6.314e+02, percent-clipped=3.0 +2023-02-06 13:18:29,566 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99486.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:18:36,513 INFO [train.py:901] (2/4) Epoch 13, batch 2500, loss[loss=0.219, simple_loss=0.2959, pruned_loss=0.07105, over 8362.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.304, pruned_loss=0.07455, over 1616600.50 frames. ], batch size: 49, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:45,253 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7656, 1.7552, 2.5019, 1.7675, 1.2564, 2.4169, 0.4021, 1.4164], + device='cuda:2'), covar=tensor([0.2708, 0.1877, 0.0411, 0.2193, 0.4284, 0.0572, 0.3372, 0.2109], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0176, 0.0104, 0.0220, 0.0257, 0.0111, 0.0165, 0.0167], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:18:57,698 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:10,746 INFO [train.py:901] (2/4) Epoch 13, batch 2550, loss[loss=0.231, simple_loss=0.315, pruned_loss=0.07348, over 8258.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3052, pruned_loss=0.07547, over 1615268.96 frames. ], batch size: 24, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:19:17,198 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.420e+02 2.977e+02 3.875e+02 7.325e+02, threshold=5.954e+02, percent-clipped=4.0 +2023-02-06 13:19:19,408 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2739, 1.9505, 2.9236, 2.2485, 2.6890, 2.0848, 1.7439, 1.4583], + device='cuda:2'), covar=tensor([0.4550, 0.4568, 0.1373, 0.3410, 0.2228, 0.2557, 0.1848, 0.4748], + device='cuda:2'), in_proj_covar=tensor([0.0893, 0.0887, 0.0738, 0.0862, 0.0943, 0.0812, 0.0703, 0.0774], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:19:20,015 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6148, 1.7120, 2.3142, 1.4758, 1.0917, 2.2388, 0.2779, 1.2545], + device='cuda:2'), covar=tensor([0.2554, 0.1804, 0.0421, 0.2366, 0.4379, 0.0462, 0.3391, 0.2124], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0176, 0.0103, 0.0219, 0.0257, 0.0110, 0.0165, 0.0167], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:19:21,125 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3746, 2.1852, 1.6454, 1.9309, 1.9019, 1.3526, 1.6034, 1.7337], + device='cuda:2'), covar=tensor([0.1116, 0.0342, 0.0991, 0.0487, 0.0581, 0.1280, 0.0887, 0.0764], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0240, 0.0320, 0.0300, 0.0301, 0.0326, 0.0344, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:19:37,747 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99586.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:41,075 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:45,636 INFO [train.py:901] (2/4) Epoch 13, batch 2600, loss[loss=0.2102, simple_loss=0.3004, pruned_loss=0.05999, over 8030.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3054, pruned_loss=0.07569, over 1615545.15 frames. ], batch size: 22, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:19:52,601 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:03,510 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:06,925 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:08,161 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:19,553 INFO [train.py:901] (2/4) Epoch 13, batch 2650, loss[loss=0.2577, simple_loss=0.3302, pruned_loss=0.09257, over 8448.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3063, pruned_loss=0.07604, over 1620844.57 frames. ], batch size: 27, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:20,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:23,637 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:25,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.403e+02 3.099e+02 4.031e+02 8.160e+02, threshold=6.198e+02, percent-clipped=1.0 +2023-02-06 13:20:47,384 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:54,457 INFO [train.py:901] (2/4) Epoch 13, batch 2700, loss[loss=0.191, simple_loss=0.2753, pruned_loss=0.05333, over 7819.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3058, pruned_loss=0.07552, over 1624585.69 frames. ], batch size: 20, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:55,213 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:59,158 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0549, 1.3906, 1.6435, 1.3623, 0.9553, 1.4303, 1.7039, 1.6517], + device='cuda:2'), covar=tensor([0.0465, 0.1343, 0.1748, 0.1378, 0.0610, 0.1571, 0.0667, 0.0661], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:20:59,205 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:00,476 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:12,260 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 13:21:16,801 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:27,687 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:28,872 INFO [train.py:901] (2/4) Epoch 13, batch 2750, loss[loss=0.1914, simple_loss=0.2699, pruned_loss=0.05647, over 7544.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3052, pruned_loss=0.07528, over 1617529.13 frames. ], batch size: 18, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:21:34,775 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.508e+02 3.194e+02 3.866e+02 8.318e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 13:21:52,456 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:53,985 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:03,192 INFO [train.py:901] (2/4) Epoch 13, batch 2800, loss[loss=0.228, simple_loss=0.3067, pruned_loss=0.07467, over 8352.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3064, pruned_loss=0.07658, over 1617220.99 frames. ], batch size: 26, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:06,163 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:11,445 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:22,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0474, 2.7076, 3.6929, 1.9869, 1.8327, 3.5274, 0.5915, 2.1280], + device='cuda:2'), covar=tensor([0.1427, 0.1455, 0.0219, 0.2675, 0.3630, 0.0429, 0.3510, 0.1515], + device='cuda:2'), in_proj_covar=tensor([0.0174, 0.0179, 0.0105, 0.0223, 0.0260, 0.0113, 0.0166, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:22:26,105 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99830.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:22:37,608 INFO [train.py:901] (2/4) Epoch 13, batch 2850, loss[loss=0.2465, simple_loss=0.3187, pruned_loss=0.08715, over 8357.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3055, pruned_loss=0.07603, over 1615158.73 frames. ], batch size: 24, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:43,878 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.486e+02 2.909e+02 3.673e+02 9.445e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-06 13:22:56,571 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4538, 2.0033, 3.2850, 1.2994, 2.4960, 1.9107, 1.6042, 2.3098], + device='cuda:2'), covar=tensor([0.1799, 0.2132, 0.0717, 0.4140, 0.1617, 0.2966, 0.1978, 0.2201], + device='cuda:2'), in_proj_covar=tensor([0.0490, 0.0530, 0.0538, 0.0589, 0.0624, 0.0559, 0.0482, 0.0613], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:23:02,725 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.74 vs. limit=2.0 +2023-02-06 13:23:11,489 INFO [train.py:901] (2/4) Epoch 13, batch 2900, loss[loss=0.1692, simple_loss=0.2555, pruned_loss=0.04145, over 7428.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3072, pruned_loss=0.07723, over 1615931.73 frames. ], batch size: 17, lr: 5.92e-03, grad_scale: 16.0 +2023-02-06 13:23:11,689 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:34,892 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:43,670 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5033, 1.7210, 2.8315, 1.3225, 2.1063, 1.9212, 1.5774, 1.9529], + device='cuda:2'), covar=tensor([0.1723, 0.2171, 0.0597, 0.3968, 0.1441, 0.2835, 0.1896, 0.1902], + device='cuda:2'), in_proj_covar=tensor([0.0493, 0.0535, 0.0542, 0.0594, 0.0629, 0.0565, 0.0487, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:23:44,992 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99945.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:23:46,141 INFO [train.py:901] (2/4) Epoch 13, batch 2950, loss[loss=0.2007, simple_loss=0.278, pruned_loss=0.06168, over 7932.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3068, pruned_loss=0.07666, over 1621412.82 frames. ], batch size: 20, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:23:48,969 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:52,247 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 13:23:52,906 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.762e+02 3.280e+02 4.150e+02 8.176e+02, threshold=6.560e+02, percent-clipped=12.0 +2023-02-06 13:23:57,303 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99962.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:14,478 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:21,166 INFO [train.py:901] (2/4) Epoch 13, batch 3000, loss[loss=0.2039, simple_loss=0.2901, pruned_loss=0.05888, over 7968.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3064, pruned_loss=0.07591, over 1622045.81 frames. ], batch size: 21, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:24:21,167 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 13:24:33,565 INFO [train.py:935] (2/4) Epoch 13, validation: loss=0.1841, simple_loss=0.2841, pruned_loss=0.04204, over 944034.00 frames. +2023-02-06 13:24:33,565 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 13:24:37,724 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:46,103 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.51 vs. limit=5.0 +2023-02-06 13:24:54,613 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100025.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:24:55,300 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:05,695 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:07,832 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:09,023 INFO [train.py:901] (2/4) Epoch 13, batch 3050, loss[loss=0.2026, simple_loss=0.2732, pruned_loss=0.06604, over 6807.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3055, pruned_loss=0.07517, over 1621495.22 frames. ], batch size: 15, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:25:15,849 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.522e+02 3.008e+02 4.207e+02 1.157e+03, threshold=6.017e+02, percent-clipped=6.0 +2023-02-06 13:25:16,795 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:22,892 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:34,604 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:40,054 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4721, 1.4013, 2.3758, 1.1840, 2.0998, 2.5460, 2.6478, 2.1428], + device='cuda:2'), covar=tensor([0.0937, 0.1169, 0.0416, 0.1972, 0.0722, 0.0359, 0.0659, 0.0766], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0299, 0.0266, 0.0292, 0.0278, 0.0241, 0.0357, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:25:44,732 INFO [train.py:901] (2/4) Epoch 13, batch 3100, loss[loss=0.2118, simple_loss=0.2923, pruned_loss=0.0657, over 7965.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07497, over 1615897.12 frames. ], batch size: 21, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:02,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9416, 2.1441, 1.8553, 2.8140, 1.2764, 1.6047, 1.8733, 2.3563], + device='cuda:2'), covar=tensor([0.0766, 0.0819, 0.0954, 0.0401, 0.1158, 0.1384, 0.1004, 0.0670], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0211, 0.0255, 0.0214, 0.0214, 0.0251, 0.0260, 0.0220], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:26:11,764 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 13:26:19,854 INFO [train.py:901] (2/4) Epoch 13, batch 3150, loss[loss=0.1982, simple_loss=0.268, pruned_loss=0.06413, over 7708.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3049, pruned_loss=0.07573, over 1613616.94 frames. ], batch size: 18, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:20,929 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.17 vs. limit=5.0 +2023-02-06 13:26:22,136 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6626, 2.3529, 3.5086, 2.6452, 3.1925, 2.5747, 2.1387, 1.9853], + device='cuda:2'), covar=tensor([0.4330, 0.4804, 0.1370, 0.3218, 0.2214, 0.2402, 0.1832, 0.4619], + device='cuda:2'), in_proj_covar=tensor([0.0892, 0.0887, 0.0740, 0.0859, 0.0943, 0.0814, 0.0706, 0.0774], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:26:23,028 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 13:26:24,135 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:25,933 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.491e+02 3.036e+02 4.077e+02 6.258e+02, threshold=6.072e+02, percent-clipped=1.0 +2023-02-06 13:26:26,824 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:29,083 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-06 13:26:41,741 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:55,156 INFO [train.py:901] (2/4) Epoch 13, batch 3200, loss[loss=0.2043, simple_loss=0.2757, pruned_loss=0.06642, over 7430.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3046, pruned_loss=0.07545, over 1611175.17 frames. ], batch size: 17, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:58,256 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100201.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:27:13,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1540, 1.4140, 1.6057, 1.3127, 0.9914, 1.3192, 1.7952, 1.8823], + device='cuda:2'), covar=tensor([0.0489, 0.1231, 0.1743, 0.1378, 0.0587, 0.1492, 0.0647, 0.0534], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0158, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:27:15,356 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100226.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:27:29,407 INFO [train.py:901] (2/4) Epoch 13, batch 3250, loss[loss=0.2329, simple_loss=0.2917, pruned_loss=0.08708, over 7804.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3049, pruned_loss=0.0761, over 1608792.23 frames. ], batch size: 19, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:27:35,465 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.430e+02 2.991e+02 3.670e+02 7.489e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 13:27:35,623 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:04,523 INFO [train.py:901] (2/4) Epoch 13, batch 3300, loss[loss=0.2267, simple_loss=0.2996, pruned_loss=0.0769, over 7916.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3062, pruned_loss=0.07646, over 1613009.49 frames. ], batch size: 20, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:07,521 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:22,244 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:24,890 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:27,059 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.77 vs. limit=5.0 +2023-02-06 13:28:39,552 INFO [train.py:901] (2/4) Epoch 13, batch 3350, loss[loss=0.2233, simple_loss=0.3116, pruned_loss=0.06748, over 8287.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3055, pruned_loss=0.07565, over 1617392.65 frames. ], batch size: 23, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:39,767 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:45,562 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.599e+02 3.166e+02 3.997e+02 7.990e+02, threshold=6.333e+02, percent-clipped=6.0 +2023-02-06 13:28:54,364 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100369.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:29:09,754 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5511, 1.9679, 2.2530, 1.1934, 2.1834, 1.3542, 0.6892, 1.8664], + device='cuda:2'), covar=tensor([0.0475, 0.0232, 0.0160, 0.0467, 0.0315, 0.0678, 0.0621, 0.0245], + device='cuda:2'), in_proj_covar=tensor([0.0398, 0.0341, 0.0292, 0.0399, 0.0327, 0.0490, 0.0362, 0.0366], + device='cuda:2'), out_proj_covar=tensor([1.1149e-04, 9.3206e-05, 8.0129e-05, 1.1026e-04, 9.0678e-05, 1.4605e-04, + 1.0167e-04, 1.0158e-04], device='cuda:2') +2023-02-06 13:29:13,503 INFO [train.py:901] (2/4) Epoch 13, batch 3400, loss[loss=0.2183, simple_loss=0.2836, pruned_loss=0.07653, over 7538.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.305, pruned_loss=0.07567, over 1611279.17 frames. ], batch size: 18, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:25,260 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:29,919 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:42,784 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:43,513 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7671, 1.8803, 2.4428, 1.7384, 1.3423, 2.4556, 0.4517, 1.4467], + device='cuda:2'), covar=tensor([0.2444, 0.1773, 0.0520, 0.1957, 0.3929, 0.0519, 0.3091, 0.2035], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0176, 0.0106, 0.0218, 0.0257, 0.0112, 0.0165, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:29:48,799 INFO [train.py:901] (2/4) Epoch 13, batch 3450, loss[loss=0.2169, simple_loss=0.3026, pruned_loss=0.0656, over 8158.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3049, pruned_loss=0.07528, over 1613643.09 frames. ], batch size: 48, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:52,569 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 13:29:54,814 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.523e+02 3.011e+02 4.006e+02 7.808e+02, threshold=6.023e+02, percent-clipped=2.0 +2023-02-06 13:30:13,839 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 13:30:14,319 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100484.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:30:23,421 INFO [train.py:901] (2/4) Epoch 13, batch 3500, loss[loss=0.256, simple_loss=0.3321, pruned_loss=0.08997, over 8325.00 frames. ], tot_loss[loss=0.228, simple_loss=0.305, pruned_loss=0.07552, over 1616203.71 frames. ], batch size: 26, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:30:26,289 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5114, 1.4897, 1.7823, 1.4178, 1.1750, 1.8184, 0.1950, 1.1290], + device='cuda:2'), covar=tensor([0.2676, 0.1627, 0.0474, 0.1315, 0.3571, 0.0529, 0.2684, 0.1604], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0175, 0.0106, 0.0217, 0.0254, 0.0111, 0.0164, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:30:45,388 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 13:30:53,033 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 13:30:58,420 INFO [train.py:901] (2/4) Epoch 13, batch 3550, loss[loss=0.2415, simple_loss=0.3242, pruned_loss=0.07941, over 8621.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3041, pruned_loss=0.07489, over 1616270.07 frames. ], batch size: 34, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:31:03,295 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6607, 1.7538, 2.2280, 1.7524, 1.3488, 2.2063, 0.6982, 1.5456], + device='cuda:2'), covar=tensor([0.2748, 0.1227, 0.0591, 0.1525, 0.3113, 0.0648, 0.2521, 0.1582], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0175, 0.0106, 0.0217, 0.0253, 0.0111, 0.0164, 0.0169], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:31:04,446 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.564e+02 3.091e+02 3.906e+02 9.185e+02, threshold=6.182e+02, percent-clipped=3.0 +2023-02-06 13:31:12,643 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2900, 2.7003, 2.3056, 3.6884, 1.6757, 1.9026, 2.2037, 2.7755], + device='cuda:2'), covar=tensor([0.0720, 0.0754, 0.0893, 0.0282, 0.1159, 0.1339, 0.1104, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0211, 0.0256, 0.0213, 0.0215, 0.0253, 0.0261, 0.0220], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:31:33,167 INFO [train.py:901] (2/4) Epoch 13, batch 3600, loss[loss=0.2521, simple_loss=0.318, pruned_loss=0.09313, over 8611.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3035, pruned_loss=0.07475, over 1612663.44 frames. ], batch size: 34, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:31:35,334 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:32:08,079 INFO [train.py:901] (2/4) Epoch 13, batch 3650, loss[loss=0.2836, simple_loss=0.3554, pruned_loss=0.1059, over 8504.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.303, pruned_loss=0.07431, over 1614863.61 frames. ], batch size: 26, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:32:14,109 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.362e+02 3.080e+02 3.827e+02 7.938e+02, threshold=6.161e+02, percent-clipped=3.0 +2023-02-06 13:32:40,076 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4709, 1.8155, 3.1220, 1.3009, 2.1656, 1.8887, 1.5453, 2.0361], + device='cuda:2'), covar=tensor([0.1831, 0.2410, 0.0684, 0.4122, 0.1673, 0.2995, 0.2038, 0.2137], + device='cuda:2'), in_proj_covar=tensor([0.0490, 0.0531, 0.0534, 0.0587, 0.0623, 0.0556, 0.0481, 0.0611], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:32:43,148 INFO [train.py:901] (2/4) Epoch 13, batch 3700, loss[loss=0.1756, simple_loss=0.2483, pruned_loss=0.05146, over 7708.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3031, pruned_loss=0.07415, over 1616228.53 frames. ], batch size: 18, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:32:55,275 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:32:57,155 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 13:33:13,283 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100740.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:17,842 INFO [train.py:901] (2/4) Epoch 13, batch 3750, loss[loss=0.2399, simple_loss=0.3256, pruned_loss=0.07704, over 8643.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3022, pruned_loss=0.07357, over 1615394.42 frames. ], batch size: 49, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:18,685 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100748.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:33:24,691 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.543e+02 3.029e+02 3.909e+02 6.778e+02, threshold=6.059e+02, percent-clipped=2.0 +2023-02-06 13:33:30,142 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:33:30,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100765.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:52,960 INFO [train.py:901] (2/4) Epoch 13, batch 3800, loss[loss=0.1736, simple_loss=0.2547, pruned_loss=0.04622, over 7925.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3027, pruned_loss=0.07435, over 1615356.83 frames. ], batch size: 20, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:27,589 INFO [train.py:901] (2/4) Epoch 13, batch 3850, loss[loss=0.1982, simple_loss=0.2666, pruned_loss=0.0649, over 7786.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3032, pruned_loss=0.07471, over 1612115.44 frames. ], batch size: 19, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:34,506 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.985e+02 2.830e+02 3.312e+02 3.730e+02 7.453e+02, threshold=6.624e+02, percent-clipped=3.0 +2023-02-06 13:34:50,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100879.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:35:01,752 INFO [train.py:901] (2/4) Epoch 13, batch 3900, loss[loss=0.1987, simple_loss=0.2797, pruned_loss=0.05884, over 8130.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3034, pruned_loss=0.07496, over 1612936.46 frames. ], batch size: 22, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:35:01,759 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 13:35:33,289 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6729, 2.3476, 3.4247, 2.6149, 3.0836, 2.4523, 2.0791, 1.7658], + device='cuda:2'), covar=tensor([0.3917, 0.4305, 0.1305, 0.2939, 0.2008, 0.2255, 0.1595, 0.4644], + device='cuda:2'), in_proj_covar=tensor([0.0891, 0.0887, 0.0734, 0.0862, 0.0941, 0.0813, 0.0705, 0.0776], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:35:37,124 INFO [train.py:901] (2/4) Epoch 13, batch 3950, loss[loss=0.2155, simple_loss=0.2827, pruned_loss=0.07417, over 7518.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3037, pruned_loss=0.07511, over 1611980.58 frames. ], batch size: 18, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:35:44,007 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.489e+02 3.011e+02 3.855e+02 9.802e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-06 13:35:54,139 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:10,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2612, 1.2578, 3.3467, 1.0426, 2.9407, 2.7989, 3.0405, 2.9938], + device='cuda:2'), covar=tensor([0.0662, 0.3709, 0.0760, 0.3588, 0.1355, 0.1053, 0.0684, 0.0772], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0588, 0.0612, 0.0551, 0.0630, 0.0543, 0.0533, 0.0593], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 13:36:11,900 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:12,364 INFO [train.py:901] (2/4) Epoch 13, batch 4000, loss[loss=0.2348, simple_loss=0.3109, pruned_loss=0.07939, over 8506.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3032, pruned_loss=0.07526, over 1608714.16 frames. ], batch size: 26, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:20,495 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.95 vs. limit=5.0 +2023-02-06 13:36:47,690 INFO [train.py:901] (2/4) Epoch 13, batch 4050, loss[loss=0.1972, simple_loss=0.2852, pruned_loss=0.05461, over 8133.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3026, pruned_loss=0.07492, over 1604664.83 frames. ], batch size: 22, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:54,253 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.645e+02 3.184e+02 3.816e+02 9.518e+02, threshold=6.368e+02, percent-clipped=3.0 +2023-02-06 13:37:18,352 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101092.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:37:21,623 INFO [train.py:901] (2/4) Epoch 13, batch 4100, loss[loss=0.2381, simple_loss=0.3221, pruned_loss=0.07705, over 8571.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3037, pruned_loss=0.07528, over 1608660.21 frames. ], batch size: 34, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:37:41,407 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:47,945 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:55,650 INFO [train.py:901] (2/4) Epoch 13, batch 4150, loss[loss=0.1706, simple_loss=0.2682, pruned_loss=0.03646, over 7799.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3035, pruned_loss=0.07504, over 1610120.46 frames. ], batch size: 19, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:02,997 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.783e+02 3.401e+02 4.642e+02 1.010e+03, threshold=6.803e+02, percent-clipped=7.0 +2023-02-06 13:38:05,275 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:38:05,884 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5684, 5.6496, 4.9119, 2.2034, 5.0007, 5.2723, 5.2231, 4.9877], + device='cuda:2'), covar=tensor([0.0547, 0.0362, 0.0817, 0.4630, 0.0618, 0.0684, 0.1003, 0.0660], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0391, 0.0401, 0.0500, 0.0394, 0.0395, 0.0389, 0.0344], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:38:06,616 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0951, 1.4135, 1.5459, 1.2806, 0.8912, 1.3517, 1.7392, 1.5998], + device='cuda:2'), covar=tensor([0.0560, 0.1678, 0.2339, 0.1712, 0.0706, 0.1964, 0.0753, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0157, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:38:30,242 INFO [train.py:901] (2/4) Epoch 13, batch 4200, loss[loss=0.2207, simple_loss=0.3106, pruned_loss=0.06538, over 8458.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3033, pruned_loss=0.07432, over 1615742.18 frames. ], batch size: 25, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:37,998 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101207.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:38:39,766 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 13:38:54,601 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 13:39:05,859 INFO [train.py:901] (2/4) Epoch 13, batch 4250, loss[loss=0.2031, simple_loss=0.2813, pruned_loss=0.06249, over 7967.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3032, pruned_loss=0.0746, over 1612577.19 frames. ], batch size: 21, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:39:12,480 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 2.514e+02 3.154e+02 3.992e+02 7.648e+02, threshold=6.307e+02, percent-clipped=3.0 +2023-02-06 13:39:16,498 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 13:39:40,012 INFO [train.py:901] (2/4) Epoch 13, batch 4300, loss[loss=0.1977, simple_loss=0.2819, pruned_loss=0.0568, over 8243.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3037, pruned_loss=0.07457, over 1615183.65 frames. ], batch size: 22, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:39:58,266 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 13:40:01,018 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-06 13:40:14,581 INFO [train.py:901] (2/4) Epoch 13, batch 4350, loss[loss=0.2255, simple_loss=0.3031, pruned_loss=0.07396, over 7971.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3046, pruned_loss=0.07523, over 1614805.91 frames. ], batch size: 21, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:14,815 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6117, 1.6364, 2.0048, 1.5413, 1.2408, 2.0731, 0.3680, 1.3489], + device='cuda:2'), covar=tensor([0.2029, 0.1535, 0.0456, 0.1366, 0.3361, 0.0427, 0.2763, 0.1596], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0176, 0.0107, 0.0219, 0.0256, 0.0111, 0.0164, 0.0170], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 13:40:21,341 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.682e+02 3.184e+02 4.441e+02 9.358e+02, threshold=6.368e+02, percent-clipped=11.0 +2023-02-06 13:40:49,428 INFO [train.py:901] (2/4) Epoch 13, batch 4400, loss[loss=0.2088, simple_loss=0.2772, pruned_loss=0.07019, over 7410.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3051, pruned_loss=0.07565, over 1615925.75 frames. ], batch size: 17, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:49,433 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 13:40:57,009 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6699, 1.4037, 1.6021, 1.2618, 0.8531, 1.3406, 1.3754, 1.3011], + device='cuda:2'), covar=tensor([0.0519, 0.1299, 0.1700, 0.1403, 0.0613, 0.1561, 0.0723, 0.0643], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0151, 0.0189, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:41:23,440 INFO [train.py:901] (2/4) Epoch 13, batch 4450, loss[loss=0.2154, simple_loss=0.2874, pruned_loss=0.07171, over 8070.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.305, pruned_loss=0.07597, over 1611191.11 frames. ], batch size: 21, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:41:28,823 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 13:41:30,660 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 2.700e+02 3.319e+02 4.103e+02 1.285e+03, threshold=6.638e+02, percent-clipped=3.0 +2023-02-06 13:41:34,863 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101463.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:41:38,576 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:41:52,070 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5146, 2.8521, 1.9869, 2.2212, 2.3576, 1.6338, 2.1081, 2.2273], + device='cuda:2'), covar=tensor([0.1378, 0.0300, 0.0956, 0.0648, 0.0583, 0.1281, 0.0879, 0.0819], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0233, 0.0313, 0.0294, 0.0298, 0.0319, 0.0338, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:41:52,075 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101488.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:41:57,886 INFO [train.py:901] (2/4) Epoch 13, batch 4500, loss[loss=0.2657, simple_loss=0.332, pruned_loss=0.09974, over 8317.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3056, pruned_loss=0.07625, over 1612634.90 frames. ], batch size: 25, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:14,781 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2208, 1.2318, 1.6362, 1.1734, 0.6628, 1.3120, 1.1468, 1.1188], + device='cuda:2'), covar=tensor([0.0522, 0.1303, 0.1555, 0.1364, 0.0548, 0.1488, 0.0643, 0.0629], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0156, 0.0101, 0.0161, 0.0114, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:42:22,208 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 13:42:33,098 INFO [train.py:901] (2/4) Epoch 13, batch 4550, loss[loss=0.2626, simple_loss=0.3108, pruned_loss=0.1071, over 7803.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3044, pruned_loss=0.0758, over 1609606.59 frames. ], batch size: 20, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:39,881 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.403e+02 2.986e+02 3.546e+02 6.918e+02, threshold=5.973e+02, percent-clipped=1.0 +2023-02-06 13:42:58,212 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1680, 4.1469, 3.7147, 1.9949, 3.6249, 3.7699, 3.8666, 3.5059], + device='cuda:2'), covar=tensor([0.0885, 0.0566, 0.1064, 0.4469, 0.0955, 0.1046, 0.1200, 0.1000], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0384, 0.0389, 0.0488, 0.0383, 0.0387, 0.0378, 0.0336], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 13:42:58,931 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:43:08,196 INFO [train.py:901] (2/4) Epoch 13, batch 4600, loss[loss=0.244, simple_loss=0.3292, pruned_loss=0.07943, over 8481.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07575, over 1611047.65 frames. ], batch size: 29, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:43:17,792 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1207, 1.1734, 1.1017, 1.4892, 0.5772, 0.9951, 1.0398, 1.2483], + device='cuda:2'), covar=tensor([0.0753, 0.0714, 0.0951, 0.0507, 0.1015, 0.1158, 0.0681, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0210, 0.0254, 0.0212, 0.0214, 0.0250, 0.0256, 0.0216], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:43:42,600 INFO [train.py:901] (2/4) Epoch 13, batch 4650, loss[loss=0.2346, simple_loss=0.3101, pruned_loss=0.07951, over 8301.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3057, pruned_loss=0.07687, over 1609855.19 frames. ], batch size: 23, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:43:49,456 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.500e+02 2.989e+02 3.844e+02 7.619e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-06 13:44:17,683 INFO [train.py:901] (2/4) Epoch 13, batch 4700, loss[loss=0.2483, simple_loss=0.3278, pruned_loss=0.08439, over 8459.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3066, pruned_loss=0.07704, over 1613876.86 frames. ], batch size: 27, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:21,756 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:44:52,084 INFO [train.py:901] (2/4) Epoch 13, batch 4750, loss[loss=0.1995, simple_loss=0.2734, pruned_loss=0.06281, over 7540.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3047, pruned_loss=0.07588, over 1613356.74 frames. ], batch size: 18, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:59,493 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.558e+02 3.081e+02 3.778e+02 8.564e+02, threshold=6.162e+02, percent-clipped=2.0 +2023-02-06 13:45:21,968 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 13:45:24,382 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 13:45:27,098 INFO [train.py:901] (2/4) Epoch 13, batch 4800, loss[loss=0.2467, simple_loss=0.3231, pruned_loss=0.08511, over 8603.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3047, pruned_loss=0.07543, over 1614918.34 frames. ], batch size: 31, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:45:32,218 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0766, 2.5259, 3.1304, 1.2121, 3.2014, 1.6943, 1.5131, 1.9556], + device='cuda:2'), covar=tensor([0.0623, 0.0259, 0.0180, 0.0652, 0.0304, 0.0733, 0.0733, 0.0450], + device='cuda:2'), in_proj_covar=tensor([0.0396, 0.0334, 0.0287, 0.0393, 0.0325, 0.0482, 0.0360, 0.0364], + device='cuda:2'), out_proj_covar=tensor([1.1075e-04, 9.1230e-05, 7.8297e-05, 1.0815e-04, 9.0008e-05, 1.4335e-04, + 1.0114e-04, 1.0107e-04], device='cuda:2') +2023-02-06 13:45:38,839 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5050, 1.8441, 1.9792, 1.0875, 2.0056, 1.3564, 0.3858, 1.6506], + device='cuda:2'), covar=tensor([0.0356, 0.0224, 0.0165, 0.0352, 0.0250, 0.0594, 0.0551, 0.0192], + device='cuda:2'), in_proj_covar=tensor([0.0397, 0.0335, 0.0287, 0.0394, 0.0327, 0.0483, 0.0361, 0.0365], + device='cuda:2'), out_proj_covar=tensor([1.1113e-04, 9.1418e-05, 7.8489e-05, 1.0848e-04, 9.0429e-05, 1.4374e-04, + 1.0132e-04, 1.0128e-04], device='cuda:2') +2023-02-06 13:45:57,602 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:01,953 INFO [train.py:901] (2/4) Epoch 13, batch 4850, loss[loss=0.2658, simple_loss=0.3388, pruned_loss=0.09639, over 8239.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3068, pruned_loss=0.07659, over 1612734.28 frames. ], batch size: 24, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:08,644 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.589e+02 3.137e+02 3.918e+02 7.572e+02, threshold=6.274e+02, percent-clipped=4.0 +2023-02-06 13:46:14,081 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 13:46:14,940 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:36,896 INFO [train.py:901] (2/4) Epoch 13, batch 4900, loss[loss=0.2328, simple_loss=0.311, pruned_loss=0.07732, over 8245.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3059, pruned_loss=0.07635, over 1613067.79 frames. ], batch size: 22, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:47:06,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101938.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:47:12,292 INFO [train.py:901] (2/4) Epoch 13, batch 4950, loss[loss=0.2275, simple_loss=0.3116, pruned_loss=0.07172, over 8345.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3061, pruned_loss=0.07645, over 1612778.51 frames. ], batch size: 26, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:47:15,205 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3085, 1.4701, 1.3885, 1.8957, 0.8032, 1.1501, 1.2892, 1.4572], + device='cuda:2'), covar=tensor([0.1008, 0.0874, 0.1151, 0.0529, 0.1228, 0.1576, 0.0851, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0211, 0.0253, 0.0212, 0.0215, 0.0252, 0.0254, 0.0217], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:47:19,104 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.432e+02 3.023e+02 3.670e+02 7.494e+02, threshold=6.046e+02, percent-clipped=3.0 +2023-02-06 13:47:20,638 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1455, 1.5516, 1.6877, 1.4978, 0.9145, 1.5678, 1.6689, 1.7123], + device='cuda:2'), covar=tensor([0.0492, 0.1198, 0.1654, 0.1363, 0.0605, 0.1425, 0.0703, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0163, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:47:46,757 INFO [train.py:901] (2/4) Epoch 13, batch 5000, loss[loss=0.1707, simple_loss=0.2612, pruned_loss=0.0401, over 8289.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3051, pruned_loss=0.07497, over 1610976.04 frames. ], batch size: 23, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:47:52,333 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 13:47:53,374 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:16,623 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1179, 1.5359, 1.7380, 1.4092, 1.0044, 1.5069, 1.7595, 1.6311], + device='cuda:2'), covar=tensor([0.0466, 0.1226, 0.1646, 0.1363, 0.0591, 0.1470, 0.0648, 0.0630], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0158, 0.0102, 0.0163, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 13:48:22,479 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:23,083 INFO [train.py:901] (2/4) Epoch 13, batch 5050, loss[loss=0.1939, simple_loss=0.2766, pruned_loss=0.05561, over 7549.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3053, pruned_loss=0.07556, over 1607838.64 frames. ], batch size: 18, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:48:29,968 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.626e+02 3.300e+02 4.185e+02 9.088e+02, threshold=6.599e+02, percent-clipped=3.0 +2023-02-06 13:48:54,017 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 13:48:56,650 INFO [train.py:901] (2/4) Epoch 13, batch 5100, loss[loss=0.2251, simple_loss=0.3065, pruned_loss=0.07181, over 8237.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.304, pruned_loss=0.07466, over 1609319.37 frames. ], batch size: 22, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:31,560 INFO [train.py:901] (2/4) Epoch 13, batch 5150, loss[loss=0.2257, simple_loss=0.3016, pruned_loss=0.07497, over 8455.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3042, pruned_loss=0.07478, over 1613731.76 frames. ], batch size: 25, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:38,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.413e+02 2.853e+02 3.425e+02 7.647e+02, threshold=5.706e+02, percent-clipped=3.0 +2023-02-06 13:49:41,790 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:50:06,644 INFO [train.py:901] (2/4) Epoch 13, batch 5200, loss[loss=0.2498, simple_loss=0.3383, pruned_loss=0.08069, over 8359.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3052, pruned_loss=0.07547, over 1614039.10 frames. ], batch size: 24, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:41,855 INFO [train.py:901] (2/4) Epoch 13, batch 5250, loss[loss=0.2471, simple_loss=0.3206, pruned_loss=0.08676, over 8486.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3044, pruned_loss=0.07486, over 1614422.22 frames. ], batch size: 49, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:48,577 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.565e+02 3.047e+02 3.925e+02 1.157e+03, threshold=6.094e+02, percent-clipped=6.0 +2023-02-06 13:50:51,019 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 13:50:53,901 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 13:51:06,839 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102282.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:51:16,475 INFO [train.py:901] (2/4) Epoch 13, batch 5300, loss[loss=0.1913, simple_loss=0.2674, pruned_loss=0.05758, over 7925.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.303, pruned_loss=0.07412, over 1614187.40 frames. ], batch size: 20, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:51:51,015 INFO [train.py:901] (2/4) Epoch 13, batch 5350, loss[loss=0.2141, simple_loss=0.2972, pruned_loss=0.06552, over 8087.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3018, pruned_loss=0.07331, over 1614217.62 frames. ], batch size: 21, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:51:52,483 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:51:52,611 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5094, 2.7625, 1.8562, 2.1713, 2.2184, 1.5444, 2.0018, 2.1561], + device='cuda:2'), covar=tensor([0.1465, 0.0389, 0.1149, 0.0619, 0.0659, 0.1366, 0.1022, 0.0929], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0236, 0.0318, 0.0297, 0.0298, 0.0324, 0.0345, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:51:57,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.535e+02 3.049e+02 3.805e+02 7.372e+02, threshold=6.098e+02, percent-clipped=2.0 +2023-02-06 13:52:08,203 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-06 13:52:26,065 INFO [train.py:901] (2/4) Epoch 13, batch 5400, loss[loss=0.2615, simple_loss=0.3186, pruned_loss=0.1022, over 6881.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3012, pruned_loss=0.07357, over 1609175.80 frames. ], batch size: 71, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:52:26,257 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:52:40,299 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:52:56,878 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:00,140 INFO [train.py:901] (2/4) Epoch 13, batch 5450, loss[loss=0.2047, simple_loss=0.2848, pruned_loss=0.06234, over 8084.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3035, pruned_loss=0.07508, over 1613241.94 frames. ], batch size: 21, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:07,658 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.724e+02 3.222e+02 3.900e+02 7.023e+02, threshold=6.444e+02, percent-clipped=3.0 +2023-02-06 13:53:12,593 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:34,976 INFO [train.py:901] (2/4) Epoch 13, batch 5500, loss[loss=0.207, simple_loss=0.2955, pruned_loss=0.05924, over 8255.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3034, pruned_loss=0.07483, over 1616796.59 frames. ], batch size: 24, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:41,583 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 13:54:03,097 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0903, 1.6516, 3.3318, 1.5372, 2.2561, 3.8270, 3.9064, 3.2138], + device='cuda:2'), covar=tensor([0.1056, 0.1568, 0.0409, 0.2142, 0.1133, 0.0228, 0.0511, 0.0622], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0300, 0.0265, 0.0296, 0.0277, 0.0237, 0.0360, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:54:09,466 INFO [train.py:901] (2/4) Epoch 13, batch 5550, loss[loss=0.2162, simple_loss=0.2914, pruned_loss=0.07049, over 7664.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3036, pruned_loss=0.07469, over 1615696.85 frames. ], batch size: 19, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:15,938 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.279e+02 3.010e+02 3.933e+02 6.976e+02, threshold=6.019e+02, percent-clipped=1.0 +2023-02-06 13:54:43,200 INFO [train.py:901] (2/4) Epoch 13, batch 5600, loss[loss=0.2239, simple_loss=0.3101, pruned_loss=0.06884, over 7978.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3025, pruned_loss=0.07434, over 1612719.58 frames. ], batch size: 21, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:44,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2119, 1.8741, 3.2753, 1.7348, 2.3946, 3.6815, 3.6363, 3.2201], + device='cuda:2'), covar=tensor([0.1027, 0.1470, 0.0453, 0.1933, 0.1221, 0.0218, 0.0634, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0302, 0.0266, 0.0296, 0.0278, 0.0239, 0.0360, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 13:55:04,664 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5180, 1.8095, 1.9610, 1.0785, 2.0017, 1.4345, 0.4049, 1.7537], + device='cuda:2'), covar=tensor([0.0338, 0.0242, 0.0164, 0.0387, 0.0268, 0.0632, 0.0599, 0.0169], + device='cuda:2'), in_proj_covar=tensor([0.0406, 0.0341, 0.0293, 0.0401, 0.0332, 0.0490, 0.0368, 0.0373], + device='cuda:2'), out_proj_covar=tensor([1.1347e-04, 9.3025e-05, 7.9922e-05, 1.1031e-04, 9.1562e-05, 1.4528e-04, + 1.0327e-04, 1.0324e-04], device='cuda:2') +2023-02-06 13:55:18,286 INFO [train.py:901] (2/4) Epoch 13, batch 5650, loss[loss=0.2339, simple_loss=0.3194, pruned_loss=0.07414, over 8131.00 frames. ], tot_loss[loss=0.225, simple_loss=0.302, pruned_loss=0.07399, over 1610809.42 frames. ], batch size: 22, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:55:22,472 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:24,861 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.730e+02 3.267e+02 4.266e+02 8.129e+02, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 13:55:39,201 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102678.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:43,635 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 13:55:52,506 INFO [train.py:901] (2/4) Epoch 13, batch 5700, loss[loss=0.2168, simple_loss=0.2955, pruned_loss=0.06907, over 7826.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3031, pruned_loss=0.07437, over 1609567.95 frames. ], batch size: 20, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:08,738 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:25,982 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:27,756 INFO [train.py:901] (2/4) Epoch 13, batch 5750, loss[loss=0.2227, simple_loss=0.3074, pruned_loss=0.06901, over 8374.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3042, pruned_loss=0.07494, over 1612773.75 frames. ], batch size: 24, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:34,421 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 2.514e+02 3.075e+02 4.012e+02 7.214e+02, threshold=6.150e+02, percent-clipped=2.0 +2023-02-06 13:56:47,297 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 13:56:57,966 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 13:57:01,390 INFO [train.py:901] (2/4) Epoch 13, batch 5800, loss[loss=0.2623, simple_loss=0.3288, pruned_loss=0.09792, over 8484.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3034, pruned_loss=0.07459, over 1609873.49 frames. ], batch size: 25, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:28,953 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3878, 1.7287, 1.8138, 0.9839, 1.8714, 1.2408, 0.3742, 1.5387], + device='cuda:2'), covar=tensor([0.0503, 0.0305, 0.0234, 0.0513, 0.0341, 0.0846, 0.0766, 0.0281], + device='cuda:2'), in_proj_covar=tensor([0.0407, 0.0342, 0.0295, 0.0403, 0.0333, 0.0492, 0.0369, 0.0374], + device='cuda:2'), out_proj_covar=tensor([1.1376e-04, 9.3217e-05, 8.0403e-05, 1.1081e-04, 9.1968e-05, 1.4587e-04, + 1.0361e-04, 1.0345e-04], device='cuda:2') +2023-02-06 13:57:36,623 INFO [train.py:901] (2/4) Epoch 13, batch 5850, loss[loss=0.2272, simple_loss=0.3119, pruned_loss=0.07125, over 8463.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3038, pruned_loss=0.07455, over 1611777.94 frames. ], batch size: 29, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:36,816 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9560, 2.0848, 1.9603, 2.8273, 1.4364, 1.5489, 2.0606, 2.2893], + device='cuda:2'), covar=tensor([0.0743, 0.0951, 0.0994, 0.0402, 0.1144, 0.1480, 0.0846, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0210, 0.0254, 0.0212, 0.0216, 0.0254, 0.0256, 0.0218], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 13:57:43,173 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.218e+02 2.874e+02 3.517e+02 7.476e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-06 13:57:52,588 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:11,324 INFO [train.py:901] (2/4) Epoch 13, batch 5900, loss[loss=0.2954, simple_loss=0.347, pruned_loss=0.1218, over 6843.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3038, pruned_loss=0.07486, over 1605677.61 frames. ], batch size: 72, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:16,843 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:46,272 INFO [train.py:901] (2/4) Epoch 13, batch 5950, loss[loss=0.2068, simple_loss=0.2905, pruned_loss=0.06156, over 8239.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3055, pruned_loss=0.07586, over 1609937.39 frames. ], batch size: 22, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:52,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.537e+02 3.124e+02 4.010e+02 1.248e+03, threshold=6.247e+02, percent-clipped=9.0 +2023-02-06 13:59:21,495 INFO [train.py:901] (2/4) Epoch 13, batch 6000, loss[loss=0.2811, simple_loss=0.3396, pruned_loss=0.1113, over 7134.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3057, pruned_loss=0.07595, over 1608600.08 frames. ], batch size: 71, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:59:21,495 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 13:59:36,606 INFO [train.py:935] (2/4) Epoch 13, validation: loss=0.1836, simple_loss=0.2836, pruned_loss=0.04176, over 944034.00 frames. +2023-02-06 13:59:36,607 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 14:00:11,030 INFO [train.py:901] (2/4) Epoch 13, batch 6050, loss[loss=0.1972, simple_loss=0.2702, pruned_loss=0.06206, over 7658.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3055, pruned_loss=0.07574, over 1610425.12 frames. ], batch size: 19, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:14,465 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4436, 1.5418, 1.8171, 1.1784, 0.9657, 1.7977, 0.0816, 1.1459], + device='cuda:2'), covar=tensor([0.2178, 0.1438, 0.0472, 0.1660, 0.3754, 0.0420, 0.2559, 0.1505], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0173, 0.0106, 0.0218, 0.0254, 0.0110, 0.0164, 0.0170], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 14:00:18,307 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.480e+02 3.014e+02 3.999e+02 8.436e+02, threshold=6.027e+02, percent-clipped=4.0 +2023-02-06 14:00:21,130 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2082, 4.1070, 3.7820, 1.8337, 3.6432, 3.7625, 3.7861, 3.4137], + device='cuda:2'), covar=tensor([0.0830, 0.0688, 0.1046, 0.5022, 0.0980, 0.0897, 0.1450, 0.1050], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0391, 0.0397, 0.0495, 0.0391, 0.0392, 0.0389, 0.0343], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:00:45,637 INFO [train.py:901] (2/4) Epoch 13, batch 6100, loss[loss=0.2304, simple_loss=0.3083, pruned_loss=0.07623, over 8330.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3061, pruned_loss=0.07614, over 1611535.14 frames. ], batch size: 26, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:58,590 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103116.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:14,163 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 14:01:19,578 INFO [train.py:901] (2/4) Epoch 13, batch 6150, loss[loss=0.1817, simple_loss=0.2603, pruned_loss=0.05158, over 7221.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3053, pruned_loss=0.07556, over 1612622.72 frames. ], batch size: 16, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:01:21,558 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:26,199 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.434e+02 3.117e+02 4.172e+02 7.466e+02, threshold=6.235e+02, percent-clipped=2.0 +2023-02-06 14:01:46,193 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5078, 1.4210, 2.8112, 1.2294, 1.9101, 2.9860, 3.1020, 2.5107], + device='cuda:2'), covar=tensor([0.1279, 0.1627, 0.0420, 0.2272, 0.1067, 0.0342, 0.0694, 0.0760], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0303, 0.0265, 0.0297, 0.0278, 0.0239, 0.0361, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:01:51,716 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0418, 1.6584, 3.3604, 1.4072, 2.2324, 3.6828, 3.6924, 3.1061], + device='cuda:2'), covar=tensor([0.1098, 0.1532, 0.0401, 0.2087, 0.1114, 0.0220, 0.0517, 0.0601], + device='cuda:2'), in_proj_covar=tensor([0.0267, 0.0301, 0.0264, 0.0296, 0.0277, 0.0239, 0.0359, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:01:55,078 INFO [train.py:901] (2/4) Epoch 13, batch 6200, loss[loss=0.1892, simple_loss=0.2699, pruned_loss=0.05425, over 8079.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3044, pruned_loss=0.075, over 1616789.92 frames. ], batch size: 21, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:06,143 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:30,996 INFO [train.py:901] (2/4) Epoch 13, batch 6250, loss[loss=0.1726, simple_loss=0.252, pruned_loss=0.04658, over 7812.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3039, pruned_loss=0.07493, over 1612004.81 frames. ], batch size: 20, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:32,362 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103249.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:37,850 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.482e+02 2.950e+02 3.630e+02 6.819e+02, threshold=5.900e+02, percent-clipped=4.0 +2023-02-06 14:03:06,056 INFO [train.py:901] (2/4) Epoch 13, batch 6300, loss[loss=0.1979, simple_loss=0.2625, pruned_loss=0.0667, over 7280.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3034, pruned_loss=0.07495, over 1614237.69 frames. ], batch size: 16, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:27,935 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:03:40,545 INFO [train.py:901] (2/4) Epoch 13, batch 6350, loss[loss=0.219, simple_loss=0.3004, pruned_loss=0.06875, over 8553.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3041, pruned_loss=0.07508, over 1614348.86 frames. ], batch size: 31, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:48,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.547e+02 3.093e+02 3.716e+02 8.603e+02, threshold=6.185e+02, percent-clipped=3.0 +2023-02-06 14:03:52,985 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:04:06,352 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103384.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:04:14,877 INFO [train.py:901] (2/4) Epoch 13, batch 6400, loss[loss=0.2456, simple_loss=0.3067, pruned_loss=0.09227, over 8243.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3035, pruned_loss=0.07459, over 1618989.30 frames. ], batch size: 22, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:49,451 INFO [train.py:901] (2/4) Epoch 13, batch 6450, loss[loss=0.21, simple_loss=0.2816, pruned_loss=0.06923, over 7648.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3034, pruned_loss=0.0742, over 1617054.05 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:56,168 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.528e+02 3.186e+02 3.863e+02 6.544e+02, threshold=6.372e+02, percent-clipped=1.0 +2023-02-06 14:04:58,241 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:13,645 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2416, 1.2197, 1.5279, 1.1549, 0.6901, 1.3109, 1.1977, 1.2003], + device='cuda:2'), covar=tensor([0.0528, 0.1258, 0.1677, 0.1390, 0.0573, 0.1484, 0.0638, 0.0626], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0155, 0.0100, 0.0161, 0.0113, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:05:22,247 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:24,240 INFO [train.py:901] (2/4) Epoch 13, batch 6500, loss[loss=0.2211, simple_loss=0.2985, pruned_loss=0.07185, over 8159.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3025, pruned_loss=0.07423, over 1615140.00 frames. ], batch size: 48, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:05:58,666 INFO [train.py:901] (2/4) Epoch 13, batch 6550, loss[loss=0.1978, simple_loss=0.2818, pruned_loss=0.05693, over 7934.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3032, pruned_loss=0.07446, over 1615895.73 frames. ], batch size: 20, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:05,487 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.442e+02 3.089e+02 4.027e+02 9.292e+02, threshold=6.177e+02, percent-clipped=8.0 +2023-02-06 14:06:12,684 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5199, 5.5948, 4.9976, 2.4799, 4.8696, 5.3161, 5.2035, 5.0680], + device='cuda:2'), covar=tensor([0.0555, 0.0400, 0.0848, 0.4383, 0.0680, 0.0659, 0.0938, 0.0637], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0389, 0.0399, 0.0498, 0.0394, 0.0392, 0.0385, 0.0343], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:06:18,302 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103575.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:24,184 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 14:06:24,395 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:34,118 INFO [train.py:901] (2/4) Epoch 13, batch 6600, loss[loss=0.1991, simple_loss=0.2785, pruned_loss=0.05984, over 7925.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3023, pruned_loss=0.07375, over 1618188.86 frames. ], batch size: 20, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:36,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6377, 1.1954, 1.4500, 1.1199, 0.7893, 1.2223, 1.4744, 1.3720], + device='cuda:2'), covar=tensor([0.0546, 0.1314, 0.1895, 0.1519, 0.0621, 0.1626, 0.0715, 0.0697], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0154, 0.0100, 0.0161, 0.0114, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:06:37,031 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0121, 1.3598, 1.5589, 1.2566, 0.8300, 1.3585, 1.6425, 1.6594], + device='cuda:2'), covar=tensor([0.0483, 0.1197, 0.1657, 0.1404, 0.0622, 0.1500, 0.0687, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0154, 0.0100, 0.0161, 0.0114, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:06:42,485 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:42,507 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:43,683 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 14:06:49,829 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:02,402 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6339, 2.2575, 3.6400, 2.6525, 3.1274, 2.4964, 2.0747, 1.8613], + device='cuda:2'), covar=tensor([0.4115, 0.4517, 0.1331, 0.3241, 0.2200, 0.2364, 0.1733, 0.4915], + device='cuda:2'), in_proj_covar=tensor([0.0896, 0.0887, 0.0742, 0.0866, 0.0945, 0.0822, 0.0706, 0.0774], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:07:07,869 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:09,047 INFO [train.py:901] (2/4) Epoch 13, batch 6650, loss[loss=0.2201, simple_loss=0.2952, pruned_loss=0.07255, over 7814.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3033, pruned_loss=0.0743, over 1615442.15 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:07:16,587 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.459e+02 2.800e+02 3.637e+02 6.016e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-06 14:07:43,986 INFO [train.py:901] (2/4) Epoch 13, batch 6700, loss[loss=0.2316, simple_loss=0.2906, pruned_loss=0.08631, over 7805.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3038, pruned_loss=0.07468, over 1612000.30 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:08:06,486 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:08:18,549 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 14:08:19,449 INFO [train.py:901] (2/4) Epoch 13, batch 6750, loss[loss=0.2214, simple_loss=0.2865, pruned_loss=0.07811, over 7241.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3038, pruned_loss=0.07437, over 1616514.16 frames. ], batch size: 16, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:24,987 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3831, 2.9781, 2.4095, 3.9285, 1.7465, 2.2584, 2.4112, 2.9175], + device='cuda:2'), covar=tensor([0.0789, 0.0821, 0.0933, 0.0273, 0.1219, 0.1264, 0.1104, 0.0919], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0205, 0.0250, 0.0208, 0.0213, 0.0249, 0.0252, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 14:08:26,126 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.551e+02 3.234e+02 3.983e+02 1.044e+03, threshold=6.469e+02, percent-clipped=6.0 +2023-02-06 14:08:26,989 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:44,328 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:53,551 INFO [train.py:901] (2/4) Epoch 13, batch 6800, loss[loss=0.2401, simple_loss=0.3034, pruned_loss=0.08838, over 7239.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3025, pruned_loss=0.07345, over 1610201.13 frames. ], batch size: 16, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:58,344 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 14:09:17,167 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103831.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:19,856 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7937, 1.8793, 1.6915, 2.3511, 1.0490, 1.5116, 1.6444, 1.9374], + device='cuda:2'), covar=tensor([0.0692, 0.0753, 0.0961, 0.0423, 0.1093, 0.1402, 0.0814, 0.0691], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0205, 0.0251, 0.0208, 0.0213, 0.0249, 0.0253, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 14:09:25,231 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103843.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:09:27,543 INFO [train.py:901] (2/4) Epoch 13, batch 6850, loss[loss=0.2477, simple_loss=0.3122, pruned_loss=0.09161, over 7704.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3016, pruned_loss=0.07334, over 1609690.38 frames. ], batch size: 18, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:09:33,732 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:34,154 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.670e+02 3.153e+02 3.957e+02 9.275e+02, threshold=6.306e+02, percent-clipped=2.0 +2023-02-06 14:09:40,249 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:44,804 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 14:09:46,353 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0183, 1.8392, 2.4765, 2.0362, 2.3540, 2.0454, 1.7221, 1.1536], + device='cuda:2'), covar=tensor([0.4531, 0.3908, 0.1390, 0.2580, 0.1864, 0.2481, 0.1805, 0.3919], + device='cuda:2'), in_proj_covar=tensor([0.0899, 0.0887, 0.0742, 0.0866, 0.0948, 0.0822, 0.0705, 0.0777], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:09:57,532 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:00,231 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103894.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:02,110 INFO [train.py:901] (2/4) Epoch 13, batch 6900, loss[loss=0.1722, simple_loss=0.2524, pruned_loss=0.04601, over 7686.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3021, pruned_loss=0.07391, over 1607562.86 frames. ], batch size: 18, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:35,839 INFO [train.py:901] (2/4) Epoch 13, batch 6950, loss[loss=0.2544, simple_loss=0.3179, pruned_loss=0.09539, over 8442.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3022, pruned_loss=0.07429, over 1608306.50 frames. ], batch size: 27, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:43,033 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.539e+02 3.074e+02 3.917e+02 9.810e+02, threshold=6.147e+02, percent-clipped=9.0 +2023-02-06 14:10:53,184 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 14:11:00,841 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4555, 1.9419, 3.2058, 1.2906, 2.3105, 1.8525, 1.6029, 2.1039], + device='cuda:2'), covar=tensor([0.1812, 0.2242, 0.0695, 0.4045, 0.1697, 0.3104, 0.2006, 0.2413], + device='cuda:2'), in_proj_covar=tensor([0.0496, 0.0537, 0.0531, 0.0588, 0.0620, 0.0559, 0.0483, 0.0613], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:11:09,727 INFO [train.py:901] (2/4) Epoch 13, batch 7000, loss[loss=0.2305, simple_loss=0.3115, pruned_loss=0.07477, over 8648.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3019, pruned_loss=0.07421, over 1607167.29 frames. ], batch size: 34, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:18,201 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:19,231 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 14:11:23,553 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:35,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9301, 2.1461, 1.6380, 2.5638, 1.3958, 1.4277, 1.8636, 2.1194], + device='cuda:2'), covar=tensor([0.0715, 0.0751, 0.1039, 0.0420, 0.1101, 0.1441, 0.0894, 0.0702], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0208, 0.0252, 0.0210, 0.0215, 0.0252, 0.0256, 0.0215], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 14:11:43,224 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-06 14:11:44,818 INFO [train.py:901] (2/4) Epoch 13, batch 7050, loss[loss=0.2957, simple_loss=0.3705, pruned_loss=0.1104, over 8617.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3021, pruned_loss=0.07394, over 1613320.93 frames. ], batch size: 31, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:52,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.385e+02 2.879e+02 3.637e+02 6.044e+02, threshold=5.759e+02, percent-clipped=0.0 +2023-02-06 14:11:58,856 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:01,820 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 14:12:18,830 INFO [train.py:901] (2/4) Epoch 13, batch 7100, loss[loss=0.207, simple_loss=0.2913, pruned_loss=0.06133, over 8255.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3017, pruned_loss=0.07394, over 1611607.77 frames. ], batch size: 24, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:12:21,101 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104099.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:12:22,946 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:37,509 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104124.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:12:39,432 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:46,609 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 14:12:47,798 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 14:12:52,701 INFO [train.py:901] (2/4) Epoch 13, batch 7150, loss[loss=0.2211, simple_loss=0.3073, pruned_loss=0.0675, over 8499.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3036, pruned_loss=0.07522, over 1611018.67 frames. ], batch size: 28, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:00,084 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.541e+02 2.991e+02 4.071e+02 7.912e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 14:13:27,754 INFO [train.py:901] (2/4) Epoch 13, batch 7200, loss[loss=0.1989, simple_loss=0.2731, pruned_loss=0.06236, over 7203.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3041, pruned_loss=0.07537, over 1614409.03 frames. ], batch size: 16, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:42,139 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:55,692 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:58,507 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:14:01,682 INFO [train.py:901] (2/4) Epoch 13, batch 7250, loss[loss=0.296, simple_loss=0.3501, pruned_loss=0.121, over 6740.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3037, pruned_loss=0.07532, over 1605997.30 frames. ], batch size: 72, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:09,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.469e+02 3.063e+02 3.939e+02 8.277e+02, threshold=6.126e+02, percent-clipped=7.0 +2023-02-06 14:14:37,319 INFO [train.py:901] (2/4) Epoch 13, batch 7300, loss[loss=0.2023, simple_loss=0.2824, pruned_loss=0.06111, over 7194.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3035, pruned_loss=0.07512, over 1606902.85 frames. ], batch size: 16, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:41,423 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1621, 1.4904, 4.3835, 1.9241, 2.4374, 5.1048, 5.0474, 4.3595], + device='cuda:2'), covar=tensor([0.1197, 0.1772, 0.0282, 0.1990, 0.1176, 0.0170, 0.0494, 0.0572], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0302, 0.0264, 0.0295, 0.0280, 0.0240, 0.0361, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:15:11,548 INFO [train.py:901] (2/4) Epoch 13, batch 7350, loss[loss=0.2069, simple_loss=0.2926, pruned_loss=0.06061, over 7633.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3033, pruned_loss=0.07487, over 1607602.39 frames. ], batch size: 19, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:14,977 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:15,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:19,027 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 14:15:19,099 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.439e+02 3.043e+02 3.823e+02 6.373e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 14:15:19,897 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:30,134 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4850, 1.9116, 3.4795, 1.3171, 2.4430, 1.8831, 1.5562, 2.4448], + device='cuda:2'), covar=tensor([0.2007, 0.2455, 0.0753, 0.4240, 0.1713, 0.3141, 0.2152, 0.2180], + device='cuda:2'), in_proj_covar=tensor([0.0500, 0.0543, 0.0535, 0.0595, 0.0623, 0.0563, 0.0490, 0.0619], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:15:32,076 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7584, 2.2923, 3.4963, 2.6169, 3.0185, 2.5306, 2.1056, 1.8337], + device='cuda:2'), covar=tensor([0.3971, 0.4711, 0.1382, 0.2984, 0.2262, 0.2380, 0.1700, 0.4844], + device='cuda:2'), in_proj_covar=tensor([0.0886, 0.0885, 0.0744, 0.0862, 0.0941, 0.0819, 0.0704, 0.0776], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:15:33,232 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 14:15:46,158 INFO [train.py:901] (2/4) Epoch 13, batch 7400, loss[loss=0.2079, simple_loss=0.2908, pruned_loss=0.06244, over 8195.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3022, pruned_loss=0.074, over 1607418.62 frames. ], batch size: 23, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:53,248 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 14:15:56,727 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:12,371 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0321, 1.5422, 3.3241, 1.4273, 2.2141, 3.7440, 3.7791, 3.1262], + device='cuda:2'), covar=tensor([0.1134, 0.1650, 0.0387, 0.2069, 0.1157, 0.0205, 0.0456, 0.0632], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0302, 0.0264, 0.0294, 0.0279, 0.0239, 0.0360, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:16:21,096 INFO [train.py:901] (2/4) Epoch 13, batch 7450, loss[loss=0.2383, simple_loss=0.3192, pruned_loss=0.07871, over 8460.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3028, pruned_loss=0.07462, over 1606742.06 frames. ], batch size: 27, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:16:29,258 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 2.494e+02 3.000e+02 3.814e+02 1.100e+03, threshold=5.999e+02, percent-clipped=4.0 +2023-02-06 14:16:33,245 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 14:16:33,404 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8550, 1.8564, 4.4123, 1.9551, 2.4229, 5.1396, 5.0561, 4.3685], + device='cuda:2'), covar=tensor([0.0910, 0.1569, 0.0258, 0.1990, 0.1187, 0.0143, 0.0387, 0.0562], + device='cuda:2'), in_proj_covar=tensor([0.0268, 0.0299, 0.0263, 0.0292, 0.0277, 0.0237, 0.0357, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:16:35,445 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:39,562 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:40,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:55,893 INFO [train.py:901] (2/4) Epoch 13, batch 7500, loss[loss=0.2334, simple_loss=0.3229, pruned_loss=0.07194, over 8258.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3034, pruned_loss=0.07528, over 1607282.98 frames. ], batch size: 24, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:16:56,753 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:56,772 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:14,578 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:16,624 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:30,650 INFO [train.py:901] (2/4) Epoch 13, batch 7550, loss[loss=0.2136, simple_loss=0.2781, pruned_loss=0.0746, over 7777.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3049, pruned_loss=0.07619, over 1606004.99 frames. ], batch size: 19, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:17:37,881 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.482e+02 3.042e+02 4.105e+02 9.709e+02, threshold=6.085e+02, percent-clipped=7.0 +2023-02-06 14:18:05,105 INFO [train.py:901] (2/4) Epoch 13, batch 7600, loss[loss=0.2087, simple_loss=0.3001, pruned_loss=0.05869, over 8332.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3044, pruned_loss=0.0755, over 1609254.16 frames. ], batch size: 25, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:13,225 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:30,785 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:40,157 INFO [train.py:901] (2/4) Epoch 13, batch 7650, loss[loss=0.2243, simple_loss=0.2958, pruned_loss=0.07638, over 7906.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3033, pruned_loss=0.07489, over 1606225.12 frames. ], batch size: 20, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:47,609 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 2.638e+02 3.280e+02 4.340e+02 1.130e+03, threshold=6.560e+02, percent-clipped=9.0 +2023-02-06 14:19:14,835 INFO [train.py:901] (2/4) Epoch 13, batch 7700, loss[loss=0.2271, simple_loss=0.3121, pruned_loss=0.07107, over 8338.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3026, pruned_loss=0.0746, over 1605752.52 frames. ], batch size: 25, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:33,198 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:37,761 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 14:19:37,980 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:49,824 INFO [train.py:901] (2/4) Epoch 13, batch 7750, loss[loss=0.269, simple_loss=0.3371, pruned_loss=0.1005, over 8668.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3031, pruned_loss=0.07479, over 1604255.26 frames. ], batch size: 49, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:50,602 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104748.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:55,987 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:57,815 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.530e+02 2.944e+02 3.392e+02 9.198e+02, threshold=5.888e+02, percent-clipped=3.0 +2023-02-06 14:20:10,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1822, 1.2050, 2.3033, 1.1024, 1.9494, 2.4987, 2.6207, 2.1052], + device='cuda:2'), covar=tensor([0.1191, 0.1421, 0.0508, 0.2208, 0.0841, 0.0404, 0.0796, 0.0823], + device='cuda:2'), in_proj_covar=tensor([0.0269, 0.0304, 0.0263, 0.0293, 0.0278, 0.0238, 0.0360, 0.0294], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:20:14,010 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:20,306 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 14:20:24,829 INFO [train.py:901] (2/4) Epoch 13, batch 7800, loss[loss=0.2091, simple_loss=0.2975, pruned_loss=0.06031, over 8497.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3007, pruned_loss=0.07278, over 1609019.08 frames. ], batch size: 26, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:20:31,891 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:52,229 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9757, 1.5098, 1.6431, 1.4219, 0.9842, 1.5052, 1.7184, 1.5740], + device='cuda:2'), covar=tensor([0.0500, 0.1145, 0.1538, 0.1299, 0.0595, 0.1377, 0.0635, 0.0563], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0149, 0.0188, 0.0155, 0.0100, 0.0160, 0.0112, 0.0136], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:20:57,821 INFO [train.py:901] (2/4) Epoch 13, batch 7850, loss[loss=0.1935, simple_loss=0.274, pruned_loss=0.05651, over 7815.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3015, pruned_loss=0.07325, over 1607474.90 frames. ], batch size: 20, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:05,227 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.619e+02 3.074e+02 4.074e+02 1.012e+03, threshold=6.148e+02, percent-clipped=5.0 +2023-02-06 14:21:30,904 INFO [train.py:901] (2/4) Epoch 13, batch 7900, loss[loss=0.1881, simple_loss=0.2677, pruned_loss=0.05423, over 7534.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3024, pruned_loss=0.07372, over 1607790.91 frames. ], batch size: 18, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:04,219 INFO [train.py:901] (2/4) Epoch 13, batch 7950, loss[loss=0.2468, simple_loss=0.323, pruned_loss=0.08533, over 8339.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3027, pruned_loss=0.07355, over 1608254.36 frames. ], batch size: 25, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:11,301 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.521e+02 3.027e+02 3.866e+02 6.555e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 14:22:37,760 INFO [train.py:901] (2/4) Epoch 13, batch 8000, loss[loss=0.2199, simple_loss=0.2861, pruned_loss=0.07685, over 8081.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3038, pruned_loss=0.07436, over 1603966.89 frames. ], batch size: 21, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:10,562 INFO [train.py:901] (2/4) Epoch 13, batch 8050, loss[loss=0.2232, simple_loss=0.2862, pruned_loss=0.08008, over 7925.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3033, pruned_loss=0.07494, over 1596860.94 frames. ], batch size: 20, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:18,070 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.417e+02 2.946e+02 3.621e+02 6.025e+02, threshold=5.892e+02, percent-clipped=0.0 +2023-02-06 14:23:23,619 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9567, 3.9175, 2.4403, 2.7493, 3.0817, 2.4109, 2.8256, 3.1208], + device='cuda:2'), covar=tensor([0.1701, 0.0305, 0.1119, 0.0824, 0.0627, 0.1316, 0.1055, 0.0989], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0236, 0.0318, 0.0296, 0.0298, 0.0321, 0.0338, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:23:50,198 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 14:23:54,152 INFO [train.py:901] (2/4) Epoch 14, batch 0, loss[loss=0.2055, simple_loss=0.2811, pruned_loss=0.06502, over 8085.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2811, pruned_loss=0.06502, over 8085.00 frames. ], batch size: 21, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:23:54,152 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 14:24:01,292 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5383, 1.7647, 2.6126, 1.3037, 1.9883, 1.8453, 1.5494, 1.9024], + device='cuda:2'), covar=tensor([0.1744, 0.2448, 0.0878, 0.4174, 0.1742, 0.3060, 0.2134, 0.2035], + device='cuda:2'), in_proj_covar=tensor([0.0499, 0.0545, 0.0539, 0.0600, 0.0623, 0.0566, 0.0493, 0.0619], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:24:05,197 INFO [train.py:935] (2/4) Epoch 14, validation: loss=0.184, simple_loss=0.2839, pruned_loss=0.04201, over 944034.00 frames. +2023-02-06 14:24:05,197 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 14:24:21,244 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 14:24:38,588 INFO [train.py:901] (2/4) Epoch 14, batch 50, loss[loss=0.2323, simple_loss=0.3111, pruned_loss=0.07677, over 8347.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.309, pruned_loss=0.07643, over 370019.40 frames. ], batch size: 26, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:24:54,770 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 14:24:58,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.684e+02 3.092e+02 3.835e+02 7.852e+02, threshold=6.183e+02, percent-clipped=3.0 +2023-02-06 14:25:14,421 INFO [train.py:901] (2/4) Epoch 14, batch 100, loss[loss=0.2358, simple_loss=0.3168, pruned_loss=0.07736, over 8491.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3055, pruned_loss=0.07428, over 647109.44 frames. ], batch size: 26, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:25:17,792 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 14:25:48,648 INFO [train.py:901] (2/4) Epoch 14, batch 150, loss[loss=0.1824, simple_loss=0.2706, pruned_loss=0.04708, over 7931.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3065, pruned_loss=0.07534, over 858909.02 frames. ], batch size: 20, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:08,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.384e+02 2.990e+02 3.742e+02 5.781e+02, threshold=5.980e+02, percent-clipped=0.0 +2023-02-06 14:26:23,058 INFO [train.py:901] (2/4) Epoch 14, batch 200, loss[loss=0.2304, simple_loss=0.3085, pruned_loss=0.07613, over 8627.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3062, pruned_loss=0.07535, over 1030769.11 frames. ], batch size: 49, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:58,947 INFO [train.py:901] (2/4) Epoch 14, batch 250, loss[loss=0.2162, simple_loss=0.3057, pruned_loss=0.06333, over 8113.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3056, pruned_loss=0.07552, over 1159766.86 frames. ], batch size: 23, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:07,613 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 14:27:15,937 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 14:27:18,043 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.546e+02 3.157e+02 4.204e+02 9.163e+02, threshold=6.313e+02, percent-clipped=6.0 +2023-02-06 14:27:33,656 INFO [train.py:901] (2/4) Epoch 14, batch 300, loss[loss=0.2404, simple_loss=0.3249, pruned_loss=0.07792, over 8715.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.305, pruned_loss=0.07542, over 1259129.51 frames. ], batch size: 34, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:52,149 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5455, 4.4620, 3.9973, 1.8553, 3.9434, 4.1569, 4.0478, 3.8187], + device='cuda:2'), covar=tensor([0.0757, 0.0629, 0.1137, 0.5291, 0.0872, 0.0871, 0.1341, 0.0923], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0391, 0.0402, 0.0502, 0.0394, 0.0393, 0.0382, 0.0344], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:27:52,820 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:27:52,839 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7203, 1.4031, 2.7988, 1.3671, 2.0749, 3.0286, 3.1209, 2.5606], + device='cuda:2'), covar=tensor([0.1081, 0.1546, 0.0404, 0.1991, 0.0902, 0.0312, 0.0666, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0304, 0.0266, 0.0294, 0.0280, 0.0242, 0.0363, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:28:09,626 INFO [train.py:901] (2/4) Epoch 14, batch 350, loss[loss=0.2129, simple_loss=0.273, pruned_loss=0.0764, over 7252.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3041, pruned_loss=0.07426, over 1338583.95 frames. ], batch size: 16, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:28:28,598 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 2.437e+02 2.818e+02 3.446e+02 5.751e+02, threshold=5.636e+02, percent-clipped=0.0 +2023-02-06 14:28:43,599 INFO [train.py:901] (2/4) Epoch 14, batch 400, loss[loss=0.2557, simple_loss=0.3329, pruned_loss=0.08922, over 8291.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.305, pruned_loss=0.07463, over 1403742.19 frames. ], batch size: 23, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:00,989 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:04,408 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2457, 2.6517, 2.1077, 3.6319, 1.7025, 1.8961, 2.4121, 2.9734], + device='cuda:2'), covar=tensor([0.0772, 0.0831, 0.0959, 0.0324, 0.1131, 0.1334, 0.0995, 0.0749], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0207, 0.0252, 0.0212, 0.0213, 0.0254, 0.0259, 0.0216], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 14:29:13,227 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:13,562 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 14:29:20,748 INFO [train.py:901] (2/4) Epoch 14, batch 450, loss[loss=0.2509, simple_loss=0.3339, pruned_loss=0.08395, over 8332.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3041, pruned_loss=0.07363, over 1456075.99 frames. ], batch size: 26, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:40,047 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.497e+02 2.804e+02 3.770e+02 6.336e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 14:29:55,211 INFO [train.py:901] (2/4) Epoch 14, batch 500, loss[loss=0.2248, simple_loss=0.307, pruned_loss=0.0713, over 8323.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3043, pruned_loss=0.07418, over 1491129.82 frames. ], batch size: 25, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:22,228 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9278, 1.5557, 2.1102, 1.8470, 1.9947, 1.8821, 1.6324, 0.7751], + device='cuda:2'), covar=tensor([0.4681, 0.4233, 0.1509, 0.2769, 0.2099, 0.2446, 0.1795, 0.4246], + device='cuda:2'), in_proj_covar=tensor([0.0894, 0.0887, 0.0743, 0.0868, 0.0947, 0.0819, 0.0704, 0.0778], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:30:29,395 INFO [train.py:901] (2/4) Epoch 14, batch 550, loss[loss=0.2565, simple_loss=0.3349, pruned_loss=0.0891, over 8629.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3042, pruned_loss=0.07408, over 1518922.39 frames. ], batch size: 49, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:50,293 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.442e+02 2.933e+02 3.700e+02 8.163e+02, threshold=5.867e+02, percent-clipped=3.0 +2023-02-06 14:30:56,676 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3355, 2.1626, 1.5258, 1.8915, 1.7209, 1.3687, 1.5503, 1.6797], + device='cuda:2'), covar=tensor([0.1302, 0.0416, 0.1275, 0.0578, 0.0751, 0.1462, 0.1063, 0.0938], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0236, 0.0318, 0.0295, 0.0299, 0.0320, 0.0339, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:31:05,191 INFO [train.py:901] (2/4) Epoch 14, batch 600, loss[loss=0.249, simple_loss=0.3274, pruned_loss=0.08528, over 8448.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3037, pruned_loss=0.07337, over 1544779.58 frames. ], batch size: 25, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:18,468 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 14:31:39,813 INFO [train.py:901] (2/4) Epoch 14, batch 650, loss[loss=0.2032, simple_loss=0.2874, pruned_loss=0.05946, over 8261.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3037, pruned_loss=0.07358, over 1559565.02 frames. ], batch size: 24, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:44,178 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7278, 3.0274, 2.6982, 4.1241, 1.6513, 2.2557, 2.5682, 3.2670], + device='cuda:2'), covar=tensor([0.0665, 0.0839, 0.0869, 0.0271, 0.1237, 0.1333, 0.1011, 0.0755], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0205, 0.0252, 0.0211, 0.0213, 0.0253, 0.0258, 0.0217], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 14:31:54,393 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:01,338 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.402e+02 3.000e+02 3.711e+02 7.109e+02, threshold=6.000e+02, percent-clipped=4.0 +2023-02-06 14:32:17,353 INFO [train.py:901] (2/4) Epoch 14, batch 700, loss[loss=0.2242, simple_loss=0.3144, pruned_loss=0.06705, over 8473.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3013, pruned_loss=0.07247, over 1569768.59 frames. ], batch size: 25, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:37,870 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:42,742 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105817.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:51,388 INFO [train.py:901] (2/4) Epoch 14, batch 750, loss[loss=0.2319, simple_loss=0.3254, pruned_loss=0.06915, over 8287.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3021, pruned_loss=0.07288, over 1581915.17 frames. ], batch size: 23, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:33:03,873 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:06,461 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 14:33:06,989 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 14:33:11,294 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.459e+02 2.898e+02 3.725e+02 7.154e+02, threshold=5.796e+02, percent-clipped=4.0 +2023-02-06 14:33:12,182 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9656, 1.6426, 1.7330, 1.5100, 1.0765, 1.5789, 1.7099, 1.7214], + device='cuda:2'), covar=tensor([0.0502, 0.1104, 0.1560, 0.1344, 0.0613, 0.1371, 0.0669, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0157, 0.0101, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:33:15,491 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:16,040 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 14:33:16,240 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:18,463 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-06 14:33:26,359 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 14:33:27,173 INFO [train.py:901] (2/4) Epoch 14, batch 800, loss[loss=0.2451, simple_loss=0.3248, pruned_loss=0.08271, over 8131.00 frames. ], tot_loss[loss=0.224, simple_loss=0.302, pruned_loss=0.07303, over 1589067.98 frames. ], batch size: 22, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:33:27,419 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5505, 1.7633, 2.7585, 1.3133, 1.9156, 1.8579, 1.6114, 1.8681], + device='cuda:2'), covar=tensor([0.1763, 0.2249, 0.0813, 0.4042, 0.1801, 0.2966, 0.1823, 0.2121], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0543, 0.0535, 0.0598, 0.0625, 0.0562, 0.0492, 0.0619], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:34:02,179 INFO [train.py:901] (2/4) Epoch 14, batch 850, loss[loss=0.2696, simple_loss=0.3425, pruned_loss=0.09834, over 8466.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3023, pruned_loss=0.07325, over 1598067.68 frames. ], batch size: 25, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:20,960 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.477e+02 2.961e+02 4.061e+02 6.411e+02, threshold=5.921e+02, percent-clipped=4.0 +2023-02-06 14:34:24,619 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:36,587 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:37,082 INFO [train.py:901] (2/4) Epoch 14, batch 900, loss[loss=0.2469, simple_loss=0.3288, pruned_loss=0.08247, over 8452.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.304, pruned_loss=0.07437, over 1604300.32 frames. ], batch size: 29, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:02,679 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1357, 2.4917, 3.0631, 1.7334, 3.1801, 1.7112, 1.3972, 2.0230], + device='cuda:2'), covar=tensor([0.0668, 0.0286, 0.0168, 0.0573, 0.0333, 0.0750, 0.0700, 0.0472], + device='cuda:2'), in_proj_covar=tensor([0.0409, 0.0344, 0.0299, 0.0405, 0.0333, 0.0493, 0.0367, 0.0376], + device='cuda:2'), out_proj_covar=tensor([1.1399e-04, 9.3554e-05, 8.1063e-05, 1.1089e-04, 9.1507e-05, 1.4540e-04, + 1.0260e-04, 1.0377e-04], device='cuda:2') +2023-02-06 14:35:14,900 INFO [train.py:901] (2/4) Epoch 14, batch 950, loss[loss=0.2115, simple_loss=0.2861, pruned_loss=0.06842, over 8538.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3036, pruned_loss=0.07418, over 1602530.72 frames. ], batch size: 28, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:33,973 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.617e+02 3.202e+02 4.119e+02 6.844e+02, threshold=6.403e+02, percent-clipped=3.0 +2023-02-06 14:35:38,945 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 14:35:49,296 INFO [train.py:901] (2/4) Epoch 14, batch 1000, loss[loss=0.1869, simple_loss=0.2608, pruned_loss=0.05653, over 7548.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3023, pruned_loss=0.07318, over 1601169.47 frames. ], batch size: 18, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:00,600 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:14,287 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 14:36:20,059 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:26,960 INFO [train.py:901] (2/4) Epoch 14, batch 1050, loss[loss=0.1661, simple_loss=0.2469, pruned_loss=0.04268, over 7439.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3018, pruned_loss=0.07272, over 1604591.56 frames. ], batch size: 17, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:26,968 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 14:36:37,984 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:43,496 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:46,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.417e+02 2.951e+02 3.593e+02 9.096e+02, threshold=5.903e+02, percent-clipped=2.0 +2023-02-06 14:36:48,430 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:48,710 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 14:37:01,621 INFO [train.py:901] (2/4) Epoch 14, batch 1100, loss[loss=0.2064, simple_loss=0.2918, pruned_loss=0.06055, over 8245.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3026, pruned_loss=0.07322, over 1611321.63 frames. ], batch size: 24, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:27,046 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9151, 1.6108, 2.0609, 1.8525, 1.9699, 1.8752, 1.6201, 0.8031], + device='cuda:2'), covar=tensor([0.4736, 0.4090, 0.1504, 0.2715, 0.2079, 0.2528, 0.1787, 0.4086], + device='cuda:2'), in_proj_covar=tensor([0.0899, 0.0893, 0.0747, 0.0871, 0.0955, 0.0826, 0.0707, 0.0782], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:37:29,713 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:35,898 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 14:37:38,717 INFO [train.py:901] (2/4) Epoch 14, batch 1150, loss[loss=0.2163, simple_loss=0.3025, pruned_loss=0.06504, over 8187.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3028, pruned_loss=0.07323, over 1613318.87 frames. ], batch size: 23, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:42,398 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:46,996 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6479, 1.4047, 4.7786, 1.7840, 4.2401, 3.9400, 4.3053, 4.1724], + device='cuda:2'), covar=tensor([0.0442, 0.4627, 0.0460, 0.3695, 0.0980, 0.0879, 0.0509, 0.0558], + device='cuda:2'), in_proj_covar=tensor([0.0525, 0.0588, 0.0621, 0.0557, 0.0636, 0.0541, 0.0531, 0.0600], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:37:49,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:53,384 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 14:37:58,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.475e+02 3.133e+02 3.919e+02 6.906e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 14:37:59,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:06,201 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106269.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:07,700 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2999, 1.8990, 2.7342, 2.2075, 2.5822, 2.1793, 1.8689, 1.2014], + device='cuda:2'), covar=tensor([0.4579, 0.4388, 0.1363, 0.3006, 0.2039, 0.2637, 0.1729, 0.4808], + device='cuda:2'), in_proj_covar=tensor([0.0897, 0.0890, 0.0742, 0.0867, 0.0950, 0.0821, 0.0704, 0.0778], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:38:10,869 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:13,354 INFO [train.py:901] (2/4) Epoch 14, batch 1200, loss[loss=0.2202, simple_loss=0.2933, pruned_loss=0.07358, over 8255.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.303, pruned_loss=0.0737, over 1614924.53 frames. ], batch size: 24, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:38:17,553 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:47,945 INFO [train.py:901] (2/4) Epoch 14, batch 1250, loss[loss=0.203, simple_loss=0.2888, pruned_loss=0.05866, over 8565.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3044, pruned_loss=0.0746, over 1617133.82 frames. ], batch size: 31, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:38:51,204 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 14:38:54,191 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5800, 4.5385, 4.0764, 2.0092, 4.0195, 4.1893, 4.1816, 3.8268], + device='cuda:2'), covar=tensor([0.0666, 0.0583, 0.1051, 0.5049, 0.0803, 0.0842, 0.1159, 0.0847], + device='cuda:2'), in_proj_covar=tensor([0.0474, 0.0393, 0.0399, 0.0498, 0.0392, 0.0394, 0.0381, 0.0341], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:39:05,943 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:39:08,463 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.539e+02 3.303e+02 4.386e+02 1.450e+03, threshold=6.607e+02, percent-clipped=4.0 +2023-02-06 14:39:24,632 INFO [train.py:901] (2/4) Epoch 14, batch 1300, loss[loss=0.2113, simple_loss=0.2828, pruned_loss=0.06993, over 7558.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3027, pruned_loss=0.0737, over 1613526.22 frames. ], batch size: 18, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:39:58,995 INFO [train.py:901] (2/4) Epoch 14, batch 1350, loss[loss=0.2139, simple_loss=0.3075, pruned_loss=0.06012, over 8357.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3019, pruned_loss=0.07322, over 1614510.47 frames. ], batch size: 24, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:40:05,435 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:17,483 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 14:40:19,202 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.550e+02 3.060e+02 3.665e+02 8.767e+02, threshold=6.121e+02, percent-clipped=1.0 +2023-02-06 14:40:29,788 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:35,138 INFO [train.py:901] (2/4) Epoch 14, batch 1400, loss[loss=0.2308, simple_loss=0.3058, pruned_loss=0.0779, over 8342.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3017, pruned_loss=0.07311, over 1617659.91 frames. ], batch size: 49, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:07,398 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:10,541 INFO [train.py:901] (2/4) Epoch 14, batch 1450, loss[loss=0.2598, simple_loss=0.3348, pruned_loss=0.09237, over 8592.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3021, pruned_loss=0.07344, over 1619661.31 frames. ], batch size: 31, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:11,263 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 14:41:12,184 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:24,664 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106550.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:27,432 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,529 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,959 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.546e+02 3.123e+02 4.151e+02 8.254e+02, threshold=6.246e+02, percent-clipped=6.0 +2023-02-06 14:41:47,572 INFO [train.py:901] (2/4) Epoch 14, batch 1500, loss[loss=0.2464, simple_loss=0.3196, pruned_loss=0.08655, over 8030.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3042, pruned_loss=0.07475, over 1619013.94 frames. ], batch size: 22, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,568 INFO [train.py:901] (2/4) Epoch 14, batch 1550, loss[loss=0.2268, simple_loss=0.3087, pruned_loss=0.07243, over 8323.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3049, pruned_loss=0.07505, over 1618815.54 frames. ], batch size: 25, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,651 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:42:41,334 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.593e+02 3.196e+02 4.114e+02 8.054e+02, threshold=6.391e+02, percent-clipped=4.0 +2023-02-06 14:42:52,899 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0999, 1.6568, 3.4969, 1.4849, 2.3805, 3.7350, 3.8793, 3.2416], + device='cuda:2'), covar=tensor([0.1057, 0.1564, 0.0291, 0.2015, 0.0968, 0.0243, 0.0396, 0.0591], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0303, 0.0263, 0.0293, 0.0281, 0.0242, 0.0362, 0.0293], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 14:42:56,718 INFO [train.py:901] (2/4) Epoch 14, batch 1600, loss[loss=0.2815, simple_loss=0.3435, pruned_loss=0.1098, over 8354.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3053, pruned_loss=0.07533, over 1618715.86 frames. ], batch size: 26, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:43:10,339 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:32,335 INFO [train.py:901] (2/4) Epoch 14, batch 1650, loss[loss=0.2074, simple_loss=0.2845, pruned_loss=0.06518, over 8080.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07503, over 1614847.85 frames. ], batch size: 21, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:43:35,230 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6613, 1.5704, 1.9662, 1.6943, 0.9730, 1.7221, 2.2619, 1.8692], + device='cuda:2'), covar=tensor([0.0456, 0.1216, 0.1592, 0.1302, 0.0608, 0.1407, 0.0594, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0101, 0.0162, 0.0113, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:43:42,581 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:51,907 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.493e+02 3.038e+02 4.078e+02 1.080e+03, threshold=6.076e+02, percent-clipped=3.0 +2023-02-06 14:43:52,348 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.16 vs. limit=5.0 +2023-02-06 14:43:53,531 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5954, 1.9837, 2.1191, 1.1827, 2.2290, 1.4785, 0.4675, 1.8236], + device='cuda:2'), covar=tensor([0.0395, 0.0244, 0.0167, 0.0415, 0.0254, 0.0625, 0.0607, 0.0213], + device='cuda:2'), in_proj_covar=tensor([0.0412, 0.0349, 0.0304, 0.0406, 0.0338, 0.0492, 0.0370, 0.0375], + device='cuda:2'), out_proj_covar=tensor([1.1478e-04, 9.4922e-05, 8.2619e-05, 1.1079e-04, 9.2894e-05, 1.4505e-04, + 1.0328e-04, 1.0334e-04], device='cuda:2') +2023-02-06 14:44:06,421 INFO [train.py:901] (2/4) Epoch 14, batch 1700, loss[loss=0.2152, simple_loss=0.2978, pruned_loss=0.06625, over 7807.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3038, pruned_loss=0.07448, over 1609962.39 frames. ], batch size: 20, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:28,260 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:31,566 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:33,569 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:43,437 INFO [train.py:901] (2/4) Epoch 14, batch 1750, loss[loss=0.2206, simple_loss=0.2993, pruned_loss=0.07093, over 8026.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3037, pruned_loss=0.07448, over 1610041.57 frames. ], batch size: 22, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:47,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:45:04,129 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.358e+02 2.865e+02 3.554e+02 7.426e+02, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 14:45:18,429 INFO [train.py:901] (2/4) Epoch 14, batch 1800, loss[loss=0.2075, simple_loss=0.2871, pruned_loss=0.0639, over 8250.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3043, pruned_loss=0.0748, over 1609732.79 frames. ], batch size: 24, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:45:54,581 INFO [train.py:901] (2/4) Epoch 14, batch 1850, loss[loss=0.2086, simple_loss=0.2777, pruned_loss=0.06971, over 7668.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3044, pruned_loss=0.07518, over 1610353.41 frames. ], batch size: 19, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:45:55,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:16,023 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.570e+02 3.068e+02 3.847e+02 1.325e+03, threshold=6.136e+02, percent-clipped=4.0 +2023-02-06 14:46:29,524 INFO [train.py:901] (2/4) Epoch 14, batch 1900, loss[loss=0.1903, simple_loss=0.2666, pruned_loss=0.05705, over 7832.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3028, pruned_loss=0.07461, over 1605749.04 frames. ], batch size: 20, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:46:37,375 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 14:46:43,881 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:47,076 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 14:46:59,782 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 14:47:01,263 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:03,727 INFO [train.py:901] (2/4) Epoch 14, batch 1950, loss[loss=0.1785, simple_loss=0.2503, pruned_loss=0.05329, over 7431.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3022, pruned_loss=0.07444, over 1604496.04 frames. ], batch size: 17, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:47:19,867 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 14:47:26,055 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.386e+02 2.840e+02 3.483e+02 6.138e+02, threshold=5.681e+02, percent-clipped=1.0 +2023-02-06 14:47:31,988 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:39,093 INFO [train.py:901] (2/4) Epoch 14, batch 2000, loss[loss=0.2394, simple_loss=0.3052, pruned_loss=0.0868, over 6389.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3023, pruned_loss=0.07407, over 1603396.45 frames. ], batch size: 14, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:47:48,656 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:12,604 INFO [train.py:901] (2/4) Epoch 14, batch 2050, loss[loss=0.2095, simple_loss=0.2954, pruned_loss=0.06175, over 8340.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3027, pruned_loss=0.07418, over 1610141.93 frames. ], batch size: 26, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:33,216 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107158.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:48:34,368 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.411e+02 3.055e+02 3.713e+02 7.642e+02, threshold=6.109e+02, percent-clipped=4.0 +2023-02-06 14:48:42,098 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107170.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:49,407 INFO [train.py:901] (2/4) Epoch 14, batch 2100, loss[loss=0.2581, simple_loss=0.3451, pruned_loss=0.08562, over 8185.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3026, pruned_loss=0.07365, over 1609901.00 frames. ], batch size: 23, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:54,367 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:11,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:21,509 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4850, 1.8874, 3.4267, 1.3599, 2.4440, 2.0617, 1.6210, 2.3887], + device='cuda:2'), covar=tensor([0.2125, 0.2661, 0.0588, 0.4406, 0.1597, 0.2945, 0.2175, 0.2135], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0544, 0.0537, 0.0594, 0.0621, 0.0562, 0.0488, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 14:49:23,201 INFO [train.py:901] (2/4) Epoch 14, batch 2150, loss[loss=0.233, simple_loss=0.3229, pruned_loss=0.07149, over 8341.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3023, pruned_loss=0.07301, over 1610425.27 frames. ], batch size: 25, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:49:44,431 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.615e+02 3.041e+02 3.823e+02 8.460e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 14:49:58,901 INFO [train.py:901] (2/4) Epoch 14, batch 2200, loss[loss=0.2256, simple_loss=0.3034, pruned_loss=0.07387, over 8354.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3014, pruned_loss=0.0724, over 1611244.57 frames. ], batch size: 26, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:34,527 INFO [train.py:901] (2/4) Epoch 14, batch 2250, loss[loss=0.1735, simple_loss=0.2608, pruned_loss=0.04307, over 7906.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3019, pruned_loss=0.07246, over 1615117.35 frames. ], batch size: 20, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:54,554 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.599e+02 3.319e+02 4.071e+02 1.027e+03, threshold=6.637e+02, percent-clipped=7.0 +2023-02-06 14:51:08,893 INFO [train.py:901] (2/4) Epoch 14, batch 2300, loss[loss=0.2331, simple_loss=0.3198, pruned_loss=0.07319, over 8543.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3026, pruned_loss=0.07298, over 1616583.74 frames. ], batch size: 31, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:51:44,730 INFO [train.py:901] (2/4) Epoch 14, batch 2350, loss[loss=0.2065, simple_loss=0.2736, pruned_loss=0.06968, over 7698.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3006, pruned_loss=0.07229, over 1613611.30 frames. ], batch size: 18, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:00,477 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8909, 1.6109, 2.0499, 1.8074, 1.8711, 1.8603, 1.5882, 0.7732], + device='cuda:2'), covar=tensor([0.4090, 0.3803, 0.1385, 0.2350, 0.1936, 0.2198, 0.1535, 0.3747], + device='cuda:2'), in_proj_covar=tensor([0.0892, 0.0893, 0.0745, 0.0866, 0.0953, 0.0822, 0.0709, 0.0776], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:52:04,939 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.358e+02 2.889e+02 3.449e+02 7.134e+02, threshold=5.779e+02, percent-clipped=1.0 +2023-02-06 14:52:18,373 INFO [train.py:901] (2/4) Epoch 14, batch 2400, loss[loss=0.198, simple_loss=0.272, pruned_loss=0.06199, over 7805.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3004, pruned_loss=0.07221, over 1613064.84 frames. ], batch size: 20, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:28,437 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 14:52:34,432 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107502.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:52:43,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:53,748 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:54,996 INFO [train.py:901] (2/4) Epoch 14, batch 2450, loss[loss=0.2295, simple_loss=0.3159, pruned_loss=0.07152, over 8362.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3012, pruned_loss=0.07273, over 1614549.00 frames. ], batch size: 24, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:16,528 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.477e+02 3.089e+02 4.011e+02 1.178e+03, threshold=6.179e+02, percent-clipped=8.0 +2023-02-06 14:53:29,783 INFO [train.py:901] (2/4) Epoch 14, batch 2500, loss[loss=0.2028, simple_loss=0.2811, pruned_loss=0.06222, over 7548.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3004, pruned_loss=0.07238, over 1615511.53 frames. ], batch size: 18, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:54,962 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5090, 1.4674, 1.7818, 1.4033, 1.1313, 1.7905, 0.1953, 1.2380], + device='cuda:2'), covar=tensor([0.2197, 0.1486, 0.0532, 0.1048, 0.3339, 0.0501, 0.2465, 0.1433], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0177, 0.0108, 0.0218, 0.0261, 0.0112, 0.0162, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 14:53:55,571 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107617.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:54:03,470 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:03,973 INFO [train.py:901] (2/4) Epoch 14, batch 2550, loss[loss=0.2188, simple_loss=0.3128, pruned_loss=0.06236, over 8035.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3001, pruned_loss=0.07219, over 1614822.42 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:12,187 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7390, 1.3149, 1.7403, 1.2148, 0.9782, 1.4566, 2.2281, 2.2602], + device='cuda:2'), covar=tensor([0.0471, 0.1767, 0.2334, 0.1821, 0.0691, 0.2076, 0.0725, 0.0623], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0156, 0.0100, 0.0162, 0.0113, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 14:54:17,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7860, 2.0032, 2.1985, 1.2712, 2.2381, 1.5190, 0.9071, 1.9328], + device='cuda:2'), covar=tensor([0.0451, 0.0253, 0.0184, 0.0453, 0.0336, 0.0597, 0.0622, 0.0238], + device='cuda:2'), in_proj_covar=tensor([0.0413, 0.0353, 0.0309, 0.0409, 0.0341, 0.0499, 0.0375, 0.0381], + device='cuda:2'), out_proj_covar=tensor([1.1511e-04, 9.5815e-05, 8.3996e-05, 1.1152e-04, 9.3479e-05, 1.4706e-04, + 1.0461e-04, 1.0489e-04], device='cuda:2') +2023-02-06 14:54:26,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.642e+02 3.253e+02 4.518e+02 1.030e+03, threshold=6.506e+02, percent-clipped=5.0 +2023-02-06 14:54:37,781 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:39,572 INFO [train.py:901] (2/4) Epoch 14, batch 2600, loss[loss=0.2389, simple_loss=0.3105, pruned_loss=0.0836, over 8467.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.301, pruned_loss=0.07295, over 1618745.03 frames. ], batch size: 27, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:45,902 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9141, 2.4442, 3.3941, 1.9957, 1.7375, 3.3594, 0.5886, 2.0000], + device='cuda:2'), covar=tensor([0.2048, 0.1701, 0.0371, 0.2358, 0.3600, 0.0283, 0.3213, 0.1884], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0178, 0.0107, 0.0219, 0.0261, 0.0112, 0.0163, 0.0175], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 14:55:12,835 INFO [train.py:901] (2/4) Epoch 14, batch 2650, loss[loss=0.2309, simple_loss=0.3168, pruned_loss=0.07245, over 8721.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3017, pruned_loss=0.07335, over 1620715.57 frames. ], batch size: 30, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:30,855 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:55:34,865 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.443e+02 2.980e+02 3.881e+02 9.981e+02, threshold=5.960e+02, percent-clipped=6.0 +2023-02-06 14:55:38,610 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8893, 2.1927, 2.3313, 1.4396, 2.4275, 1.7111, 0.8025, 2.1190], + device='cuda:2'), covar=tensor([0.0423, 0.0231, 0.0178, 0.0405, 0.0279, 0.0644, 0.0582, 0.0222], + device='cuda:2'), in_proj_covar=tensor([0.0415, 0.0353, 0.0308, 0.0411, 0.0341, 0.0499, 0.0376, 0.0381], + device='cuda:2'), out_proj_covar=tensor([1.1548e-04, 9.5921e-05, 8.3857e-05, 1.1200e-04, 9.3445e-05, 1.4695e-04, + 1.0487e-04, 1.0510e-04], device='cuda:2') +2023-02-06 14:55:49,934 INFO [train.py:901] (2/4) Epoch 14, batch 2700, loss[loss=0.2161, simple_loss=0.3074, pruned_loss=0.06235, over 8566.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3009, pruned_loss=0.07284, over 1619317.39 frames. ], batch size: 31, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:04,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5782, 2.8675, 1.8722, 2.3147, 2.3889, 1.5435, 2.0960, 2.2193], + device='cuda:2'), covar=tensor([0.1527, 0.0310, 0.1054, 0.0670, 0.0648, 0.1336, 0.1073, 0.0948], + device='cuda:2'), in_proj_covar=tensor([0.0343, 0.0230, 0.0316, 0.0295, 0.0296, 0.0319, 0.0336, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:56:23,700 INFO [train.py:901] (2/4) Epoch 14, batch 2750, loss[loss=0.2139, simple_loss=0.2835, pruned_loss=0.07217, over 7705.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3011, pruned_loss=0.0728, over 1618431.04 frames. ], batch size: 18, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:44,689 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.404e+02 2.918e+02 3.592e+02 1.217e+03, threshold=5.837e+02, percent-clipped=4.0 +2023-02-06 14:56:53,331 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107872.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:56:54,144 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107873.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:56:59,450 INFO [train.py:901] (2/4) Epoch 14, batch 2800, loss[loss=0.224, simple_loss=0.2998, pruned_loss=0.07414, over 7968.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.302, pruned_loss=0.07284, over 1619109.62 frames. ], batch size: 21, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:57:03,899 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:12,675 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107898.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:57:20,776 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:33,814 INFO [train.py:901] (2/4) Epoch 14, batch 2850, loss[loss=0.2645, simple_loss=0.3479, pruned_loss=0.09053, over 8356.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.302, pruned_loss=0.07279, over 1617487.17 frames. ], batch size: 24, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:57:40,958 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2878, 1.9273, 2.8180, 2.2265, 2.7610, 2.1673, 1.7997, 1.4603], + device='cuda:2'), covar=tensor([0.4265, 0.4226, 0.1357, 0.2911, 0.1788, 0.2358, 0.1701, 0.4257], + device='cuda:2'), in_proj_covar=tensor([0.0898, 0.0897, 0.0741, 0.0867, 0.0952, 0.0827, 0.0711, 0.0780], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 14:57:54,103 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.478e+02 3.087e+02 3.919e+02 8.173e+02, threshold=6.173e+02, percent-clipped=5.0 +2023-02-06 14:58:08,144 INFO [train.py:901] (2/4) Epoch 14, batch 2900, loss[loss=0.2374, simple_loss=0.3018, pruned_loss=0.08643, over 7804.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3018, pruned_loss=0.07282, over 1615953.67 frames. ], batch size: 20, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:12,786 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:27,879 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 14:58:38,640 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:44,575 INFO [train.py:901] (2/4) Epoch 14, batch 2950, loss[loss=0.1949, simple_loss=0.2626, pruned_loss=0.06356, over 7787.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3009, pruned_loss=0.07259, over 1612515.31 frames. ], batch size: 19, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:55,741 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 14:59:04,838 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.696e+02 3.199e+02 4.019e+02 8.231e+02, threshold=6.398e+02, percent-clipped=3.0 +2023-02-06 14:59:05,061 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9432, 3.6625, 2.2135, 2.8521, 2.9009, 2.0221, 2.5885, 2.7574], + device='cuda:2'), covar=tensor([0.1647, 0.0313, 0.1094, 0.0692, 0.0633, 0.1389, 0.1101, 0.1102], + device='cuda:2'), in_proj_covar=tensor([0.0342, 0.0228, 0.0318, 0.0296, 0.0295, 0.0320, 0.0339, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 14:59:18,172 INFO [train.py:901] (2/4) Epoch 14, batch 3000, loss[loss=0.2366, simple_loss=0.3141, pruned_loss=0.07951, over 8494.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3022, pruned_loss=0.07318, over 1617278.59 frames. ], batch size: 26, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:18,173 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 14:59:30,508 INFO [train.py:935] (2/4) Epoch 14, validation: loss=0.1827, simple_loss=0.283, pruned_loss=0.04121, over 944034.00 frames. +2023-02-06 14:59:30,510 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 14:59:37,800 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0814, 1.2668, 1.2583, 0.5996, 1.2313, 0.9657, 0.0767, 1.1973], + device='cuda:2'), covar=tensor([0.0304, 0.0286, 0.0243, 0.0442, 0.0340, 0.0791, 0.0636, 0.0265], + device='cuda:2'), in_proj_covar=tensor([0.0418, 0.0355, 0.0309, 0.0411, 0.0342, 0.0500, 0.0378, 0.0382], + device='cuda:2'), out_proj_covar=tensor([1.1639e-04, 9.6309e-05, 8.3979e-05, 1.1187e-04, 9.3452e-05, 1.4703e-04, + 1.0569e-04, 1.0526e-04], device='cuda:2') +2023-02-06 14:59:43,701 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:59:50,130 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 15:00:05,760 INFO [train.py:901] (2/4) Epoch 14, batch 3050, loss[loss=0.2159, simple_loss=0.2915, pruned_loss=0.07013, over 7928.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3028, pruned_loss=0.0735, over 1623117.20 frames. ], batch size: 20, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:10,682 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:28,082 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.670e+02 3.118e+02 3.835e+02 7.160e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 15:00:41,676 INFO [train.py:901] (2/4) Epoch 14, batch 3100, loss[loss=0.2176, simple_loss=0.2996, pruned_loss=0.06782, over 8085.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3024, pruned_loss=0.07297, over 1623809.99 frames. ], batch size: 21, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:04,692 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:15,954 INFO [train.py:901] (2/4) Epoch 14, batch 3150, loss[loss=0.1715, simple_loss=0.2427, pruned_loss=0.05009, over 7787.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3027, pruned_loss=0.07338, over 1619705.20 frames. ], batch size: 19, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:24,690 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:37,159 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.570e+02 3.163e+02 4.155e+02 7.848e+02, threshold=6.326e+02, percent-clipped=5.0 +2023-02-06 15:01:43,487 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:46,163 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7226, 1.3294, 4.8094, 1.8322, 4.2669, 3.9615, 4.3015, 4.2157], + device='cuda:2'), covar=tensor([0.0467, 0.4828, 0.0426, 0.3669, 0.0979, 0.0794, 0.0557, 0.0534], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0600, 0.0615, 0.0565, 0.0643, 0.0550, 0.0535, 0.0604], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:01:51,504 INFO [train.py:901] (2/4) Epoch 14, batch 3200, loss[loss=0.1677, simple_loss=0.2482, pruned_loss=0.04363, over 6842.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3018, pruned_loss=0.07248, over 1619784.88 frames. ], batch size: 15, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:02:25,764 INFO [train.py:901] (2/4) Epoch 14, batch 3250, loss[loss=0.1914, simple_loss=0.2771, pruned_loss=0.05283, over 8254.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3017, pruned_loss=0.0723, over 1619777.73 frames. ], batch size: 22, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:02:35,710 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:02:47,016 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.638e+02 3.239e+02 4.086e+02 1.012e+03, threshold=6.478e+02, percent-clipped=4.0 +2023-02-06 15:03:02,194 INFO [train.py:901] (2/4) Epoch 14, batch 3300, loss[loss=0.2583, simple_loss=0.3403, pruned_loss=0.08814, over 8355.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3021, pruned_loss=0.07234, over 1620406.53 frames. ], batch size: 24, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:11,256 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:17,375 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4296, 2.0780, 3.3796, 1.2873, 2.3638, 1.9253, 1.6671, 2.3700], + device='cuda:2'), covar=tensor([0.1823, 0.2190, 0.0775, 0.3952, 0.1759, 0.2794, 0.1919, 0.2352], + device='cuda:2'), in_proj_covar=tensor([0.0499, 0.0548, 0.0543, 0.0595, 0.0621, 0.0561, 0.0492, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:03:27,911 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:36,472 INFO [train.py:901] (2/4) Epoch 14, batch 3350, loss[loss=0.2286, simple_loss=0.2907, pruned_loss=0.08329, over 7821.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3034, pruned_loss=0.0733, over 1620561.72 frames. ], batch size: 20, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:49,806 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:57,194 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.656e+02 3.299e+02 4.467e+02 8.781e+02, threshold=6.597e+02, percent-clipped=5.0 +2023-02-06 15:04:04,262 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:10,805 INFO [train.py:901] (2/4) Epoch 14, batch 3400, loss[loss=0.1819, simple_loss=0.2665, pruned_loss=0.0486, over 7674.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3034, pruned_loss=0.07336, over 1618134.31 frames. ], batch size: 19, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:04:22,161 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:46,705 INFO [train.py:901] (2/4) Epoch 14, batch 3450, loss[loss=0.2158, simple_loss=0.292, pruned_loss=0.06978, over 8568.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3038, pruned_loss=0.07398, over 1619530.43 frames. ], batch size: 39, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:07,907 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.481e+02 3.055e+02 3.627e+02 7.933e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:05:21,982 INFO [train.py:901] (2/4) Epoch 14, batch 3500, loss[loss=0.2295, simple_loss=0.3129, pruned_loss=0.07304, over 8335.00 frames. ], tot_loss[loss=0.227, simple_loss=0.305, pruned_loss=0.07452, over 1624583.17 frames. ], batch size: 25, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:29,138 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 15:05:31,991 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:05:57,712 INFO [train.py:901] (2/4) Epoch 14, batch 3550, loss[loss=0.2346, simple_loss=0.3109, pruned_loss=0.07913, over 8531.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3037, pruned_loss=0.07433, over 1618429.39 frames. ], batch size: 28, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:17,919 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.417e+02 3.151e+02 4.175e+02 8.210e+02, threshold=6.301e+02, percent-clipped=3.0 +2023-02-06 15:06:31,408 INFO [train.py:901] (2/4) Epoch 14, batch 3600, loss[loss=0.1877, simple_loss=0.2694, pruned_loss=0.05293, over 7696.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3025, pruned_loss=0.07425, over 1613737.33 frames. ], batch size: 18, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:36,123 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108687.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:07,032 INFO [train.py:901] (2/4) Epoch 14, batch 3650, loss[loss=0.1941, simple_loss=0.277, pruned_loss=0.0556, over 8079.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.3037, pruned_loss=0.07439, over 1612985.45 frames. ], batch size: 21, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:27,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 2.655e+02 3.191e+02 3.880e+02 8.243e+02, threshold=6.382e+02, percent-clipped=2.0 +2023-02-06 15:07:30,605 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:07:41,526 INFO [train.py:901] (2/4) Epoch 14, batch 3700, loss[loss=0.2715, simple_loss=0.3406, pruned_loss=0.1012, over 8337.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.303, pruned_loss=0.07373, over 1616097.99 frames. ], batch size: 26, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:47,698 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0129, 1.2272, 1.2213, 0.5848, 1.2201, 1.0159, 0.0555, 1.1426], + device='cuda:2'), covar=tensor([0.0296, 0.0278, 0.0240, 0.0391, 0.0294, 0.0707, 0.0576, 0.0233], + device='cuda:2'), in_proj_covar=tensor([0.0414, 0.0349, 0.0306, 0.0406, 0.0338, 0.0494, 0.0372, 0.0377], + device='cuda:2'), out_proj_covar=tensor([1.1507e-04, 9.4579e-05, 8.2963e-05, 1.1041e-04, 9.2260e-05, 1.4516e-04, + 1.0364e-04, 1.0377e-04], device='cuda:2') +2023-02-06 15:07:50,764 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:56,397 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:01,073 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:15,867 INFO [train.py:901] (2/4) Epoch 14, batch 3750, loss[loss=0.2236, simple_loss=0.2975, pruned_loss=0.07482, over 7918.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3025, pruned_loss=0.07284, over 1619200.71 frames. ], batch size: 20, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:08:37,485 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.414e+02 2.846e+02 3.664e+02 8.039e+02, threshold=5.692e+02, percent-clipped=5.0 +2023-02-06 15:08:37,680 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7441, 1.5706, 3.1313, 1.4707, 2.2495, 3.3709, 3.4685, 2.9078], + device='cuda:2'), covar=tensor([0.1096, 0.1521, 0.0335, 0.1887, 0.0891, 0.0277, 0.0480, 0.0630], + device='cuda:2'), in_proj_covar=tensor([0.0272, 0.0303, 0.0267, 0.0297, 0.0283, 0.0245, 0.0366, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:08:51,938 INFO [train.py:901] (2/4) Epoch 14, batch 3800, loss[loss=0.2324, simple_loss=0.3151, pruned_loss=0.0749, over 8293.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.302, pruned_loss=0.07312, over 1614866.38 frames. ], batch size: 23, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:09:12,245 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:21,318 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1602, 1.7094, 4.3537, 2.0099, 2.3488, 4.9276, 4.9073, 4.3091], + device='cuda:2'), covar=tensor([0.1152, 0.1659, 0.0269, 0.1924, 0.1326, 0.0188, 0.0368, 0.0523], + device='cuda:2'), in_proj_covar=tensor([0.0272, 0.0303, 0.0266, 0.0296, 0.0283, 0.0246, 0.0367, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:09:26,876 INFO [train.py:901] (2/4) Epoch 14, batch 3850, loss[loss=0.2124, simple_loss=0.2953, pruned_loss=0.06477, over 8484.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.07341, over 1619612.57 frames. ], batch size: 29, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:09:33,935 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:35,483 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6506, 2.2087, 3.5344, 2.6409, 3.1952, 2.4932, 2.1720, 1.8519], + device='cuda:2'), covar=tensor([0.4087, 0.4478, 0.1275, 0.3188, 0.2247, 0.2467, 0.1736, 0.4846], + device='cuda:2'), in_proj_covar=tensor([0.0892, 0.0896, 0.0741, 0.0869, 0.0948, 0.0825, 0.0708, 0.0777], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:09:35,959 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 15:09:49,057 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.574e+02 3.020e+02 4.517e+02 9.725e+02, threshold=6.039e+02, percent-clipped=15.0 +2023-02-06 15:09:51,340 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9929, 2.3945, 1.8431, 2.8801, 1.3428, 1.4984, 2.0411, 2.5714], + device='cuda:2'), covar=tensor([0.0786, 0.0824, 0.1021, 0.0408, 0.1186, 0.1659, 0.1019, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0207, 0.0252, 0.0214, 0.0216, 0.0252, 0.0257, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 15:10:04,279 INFO [train.py:901] (2/4) Epoch 14, batch 3900, loss[loss=0.1867, simple_loss=0.2696, pruned_loss=0.05187, over 8082.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3016, pruned_loss=0.07287, over 1616235.40 frames. ], batch size: 21, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:07,763 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1861, 1.1526, 4.5762, 1.7545, 3.6350, 3.5730, 4.0727, 4.0419], + device='cuda:2'), covar=tensor([0.1088, 0.6498, 0.0803, 0.4578, 0.1860, 0.1439, 0.0974, 0.0894], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0608, 0.0623, 0.0570, 0.0645, 0.0552, 0.0543, 0.0606], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:10:26,821 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1647, 4.1321, 3.7821, 2.4742, 3.8028, 3.7862, 3.8781, 3.4481], + device='cuda:2'), covar=tensor([0.0861, 0.0611, 0.0959, 0.3944, 0.0881, 0.1147, 0.1125, 0.0981], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0396, 0.0404, 0.0497, 0.0395, 0.0401, 0.0385, 0.0345], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:10:39,023 INFO [train.py:901] (2/4) Epoch 14, batch 3950, loss[loss=0.2171, simple_loss=0.3079, pruned_loss=0.06313, over 8503.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3013, pruned_loss=0.07281, over 1613281.24 frames. ], batch size: 28, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:40,197 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-06 15:10:56,307 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:10:56,956 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109055.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:10:58,501 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.55 vs. limit=5.0 +2023-02-06 15:10:59,097 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:00,248 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.458e+02 2.966e+02 3.777e+02 8.079e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-06 15:11:14,744 INFO [train.py:901] (2/4) Epoch 14, batch 4000, loss[loss=0.2482, simple_loss=0.3288, pruned_loss=0.08386, over 8532.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3019, pruned_loss=0.07349, over 1613022.38 frames. ], batch size: 49, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:11:17,674 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:23,849 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1619, 1.3914, 1.5666, 1.3346, 0.8271, 1.4184, 1.7879, 1.6537], + device='cuda:2'), covar=tensor([0.0462, 0.1322, 0.1728, 0.1443, 0.0628, 0.1521, 0.0698, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0157, 0.0101, 0.0162, 0.0113, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 15:11:43,039 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7395, 5.8351, 5.0479, 2.3899, 5.0745, 5.6102, 5.3231, 5.3220], + device='cuda:2'), covar=tensor([0.0508, 0.0371, 0.0901, 0.4594, 0.0715, 0.0738, 0.0960, 0.0533], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0399, 0.0407, 0.0500, 0.0399, 0.0402, 0.0388, 0.0348], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:11:50,547 INFO [train.py:901] (2/4) Epoch 14, batch 4050, loss[loss=0.201, simple_loss=0.2719, pruned_loss=0.06504, over 7514.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3009, pruned_loss=0.07266, over 1610144.28 frames. ], batch size: 18, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:06,803 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:11,650 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.362e+02 2.684e+02 3.543e+02 7.215e+02, threshold=5.369e+02, percent-clipped=4.0 +2023-02-06 15:12:16,034 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:26,284 INFO [train.py:901] (2/4) Epoch 14, batch 4100, loss[loss=0.2753, simple_loss=0.3419, pruned_loss=0.1043, over 8526.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3024, pruned_loss=0.07388, over 1608149.70 frames. ], batch size: 29, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:33,976 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:50,502 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:02,498 INFO [train.py:901] (2/4) Epoch 14, batch 4150, loss[loss=0.2422, simple_loss=0.3153, pruned_loss=0.08451, over 8644.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3022, pruned_loss=0.0737, over 1609939.10 frames. ], batch size: 34, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:10,242 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1547, 3.8651, 2.2018, 2.6088, 2.9403, 2.1523, 2.6538, 2.9731], + device='cuda:2'), covar=tensor([0.1491, 0.0271, 0.0988, 0.0721, 0.0599, 0.1186, 0.1041, 0.1020], + device='cuda:2'), in_proj_covar=tensor([0.0344, 0.0231, 0.0320, 0.0294, 0.0297, 0.0324, 0.0342, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:13:23,981 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.677e+02 3.078e+02 3.893e+02 8.547e+02, threshold=6.157e+02, percent-clipped=10.0 +2023-02-06 15:13:28,944 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:35,717 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 15:13:37,136 INFO [train.py:901] (2/4) Epoch 14, batch 4200, loss[loss=0.1901, simple_loss=0.2653, pruned_loss=0.05746, over 7795.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3011, pruned_loss=0.07268, over 1611535.65 frames. ], batch size: 19, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:59,673 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:00,996 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,568 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 15:14:14,549 INFO [train.py:901] (2/4) Epoch 14, batch 4250, loss[loss=0.2198, simple_loss=0.3048, pruned_loss=0.0674, over 8447.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3008, pruned_loss=0.07234, over 1615369.13 frames. ], batch size: 49, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:14:18,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:18,815 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:28,518 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8916, 1.5761, 2.0576, 1.8073, 1.9634, 1.8674, 1.6228, 0.7657], + device='cuda:2'), covar=tensor([0.4847, 0.4175, 0.1533, 0.2820, 0.2014, 0.2344, 0.1765, 0.4295], + device='cuda:2'), in_proj_covar=tensor([0.0896, 0.0899, 0.0743, 0.0873, 0.0950, 0.0827, 0.0712, 0.0781], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:14:32,592 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2199, 1.5748, 3.5283, 1.4442, 2.2085, 3.7880, 3.8632, 3.1674], + device='cuda:2'), covar=tensor([0.0997, 0.1619, 0.0284, 0.2207, 0.1127, 0.0235, 0.0523, 0.0601], + device='cuda:2'), in_proj_covar=tensor([0.0272, 0.0302, 0.0267, 0.0296, 0.0283, 0.0246, 0.0366, 0.0293], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 15:14:35,684 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.488e+02 3.016e+02 3.845e+02 8.299e+02, threshold=6.033e+02, percent-clipped=4.0 +2023-02-06 15:14:48,162 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6964, 1.1163, 1.4334, 1.0689, 0.9165, 1.2286, 1.4985, 1.3535], + device='cuda:2'), covar=tensor([0.0594, 0.1978, 0.2484, 0.1878, 0.0694, 0.2183, 0.0801, 0.0780], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0156, 0.0101, 0.0162, 0.0113, 0.0137], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 15:14:48,667 INFO [train.py:901] (2/4) Epoch 14, batch 4300, loss[loss=0.2452, simple_loss=0.3245, pruned_loss=0.08297, over 8238.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3027, pruned_loss=0.07373, over 1613770.74 frames. ], batch size: 24, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:01,894 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109399.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:15:24,493 INFO [train.py:901] (2/4) Epoch 14, batch 4350, loss[loss=0.1774, simple_loss=0.2531, pruned_loss=0.05083, over 7427.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3021, pruned_loss=0.07327, over 1613942.87 frames. ], batch size: 17, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:27,404 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8956, 3.8300, 3.4731, 1.7112, 3.4402, 3.4244, 3.4022, 3.1475], + device='cuda:2'), covar=tensor([0.0868, 0.0706, 0.1310, 0.4718, 0.0948, 0.1020, 0.1706, 0.1031], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0392, 0.0400, 0.0491, 0.0392, 0.0396, 0.0383, 0.0343], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:15:34,081 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 15:15:47,273 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.678e+02 3.270e+02 4.253e+02 1.326e+03, threshold=6.540e+02, percent-clipped=8.0 +2023-02-06 15:16:00,544 INFO [train.py:901] (2/4) Epoch 14, batch 4400, loss[loss=0.2588, simple_loss=0.329, pruned_loss=0.09426, over 8181.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3029, pruned_loss=0.07356, over 1618026.58 frames. ], batch size: 23, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:16:15,765 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 15:16:24,166 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109514.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:16:31,893 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:35,898 INFO [train.py:901] (2/4) Epoch 14, batch 4450, loss[loss=0.2015, simple_loss=0.2745, pruned_loss=0.06427, over 7707.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3021, pruned_loss=0.073, over 1616330.95 frames. ], batch size: 18, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:16:49,975 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:55,367 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:58,634 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.456e+02 2.864e+02 3.608e+02 1.087e+03, threshold=5.728e+02, percent-clipped=4.0 +2023-02-06 15:17:11,127 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 15:17:12,486 INFO [train.py:901] (2/4) Epoch 14, batch 4500, loss[loss=0.1883, simple_loss=0.2627, pruned_loss=0.05697, over 7422.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3015, pruned_loss=0.07274, over 1612479.71 frames. ], batch size: 17, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:17:24,283 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:17:47,002 INFO [train.py:901] (2/4) Epoch 14, batch 4550, loss[loss=0.1712, simple_loss=0.2446, pruned_loss=0.04894, over 7658.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3022, pruned_loss=0.07324, over 1612976.47 frames. ], batch size: 19, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:05,732 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:09,031 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.640e+02 3.232e+02 4.162e+02 9.021e+02, threshold=6.464e+02, percent-clipped=8.0 +2023-02-06 15:18:16,734 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:21,366 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:23,259 INFO [train.py:901] (2/4) Epoch 14, batch 4600, loss[loss=0.2046, simple_loss=0.2902, pruned_loss=0.05951, over 8100.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3008, pruned_loss=0.07214, over 1616035.69 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:23,331 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:42,475 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 15:18:56,947 INFO [train.py:901] (2/4) Epoch 14, batch 4650, loss[loss=0.2325, simple_loss=0.3109, pruned_loss=0.0771, over 7978.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3003, pruned_loss=0.07213, over 1612555.91 frames. ], batch size: 21, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:08,007 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0606, 2.3688, 2.7199, 1.4731, 2.7690, 1.6948, 1.6485, 2.0260], + device='cuda:2'), covar=tensor([0.0509, 0.0284, 0.0205, 0.0508, 0.0299, 0.0636, 0.0546, 0.0356], + device='cuda:2'), in_proj_covar=tensor([0.0415, 0.0353, 0.0304, 0.0409, 0.0341, 0.0498, 0.0373, 0.0379], + device='cuda:2'), out_proj_covar=tensor([1.1533e-04, 9.5424e-05, 8.2226e-05, 1.1137e-04, 9.2971e-05, 1.4660e-04, + 1.0396e-04, 1.0414e-04], device='cuda:2') +2023-02-06 15:19:18,374 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 15:19:18,715 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.556e+02 3.032e+02 3.907e+02 9.020e+02, threshold=6.065e+02, percent-clipped=4.0 +2023-02-06 15:19:24,820 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109770.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:19:25,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:31,312 INFO [train.py:901] (2/4) Epoch 14, batch 4700, loss[loss=0.2478, simple_loss=0.3228, pruned_loss=0.08635, over 8361.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.301, pruned_loss=0.07284, over 1611319.28 frames. ], batch size: 24, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:39,520 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8867, 2.4438, 2.9422, 1.3546, 3.1320, 1.5857, 1.4499, 1.7906], + device='cuda:2'), covar=tensor([0.0731, 0.0359, 0.0263, 0.0636, 0.0408, 0.0829, 0.0780, 0.0502], + device='cuda:2'), in_proj_covar=tensor([0.0414, 0.0352, 0.0303, 0.0408, 0.0340, 0.0497, 0.0372, 0.0378], + device='cuda:2'), out_proj_covar=tensor([1.1512e-04, 9.5139e-05, 8.1990e-05, 1.1119e-04, 9.2708e-05, 1.4612e-04, + 1.0354e-04, 1.0378e-04], device='cuda:2') +2023-02-06 15:19:42,824 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:42,848 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109795.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:20:06,516 INFO [train.py:901] (2/4) Epoch 14, batch 4750, loss[loss=0.2152, simple_loss=0.2902, pruned_loss=0.07005, over 8105.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3019, pruned_loss=0.07308, over 1614292.49 frames. ], batch size: 23, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:10,482 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 15:20:12,424 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 15:20:26,961 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.643e+02 3.166e+02 4.371e+02 1.104e+03, threshold=6.332e+02, percent-clipped=5.0 +2023-02-06 15:20:40,300 INFO [train.py:901] (2/4) Epoch 14, batch 4800, loss[loss=0.2219, simple_loss=0.2917, pruned_loss=0.07605, over 8151.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3007, pruned_loss=0.07296, over 1608700.57 frames. ], batch size: 22, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:50,358 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0353, 2.2713, 1.8614, 2.9469, 1.2543, 1.6257, 2.2371, 2.3976], + device='cuda:2'), covar=tensor([0.0740, 0.0840, 0.0895, 0.0344, 0.1177, 0.1347, 0.0881, 0.0768], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0207, 0.0252, 0.0211, 0.0213, 0.0250, 0.0256, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 15:21:03,196 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 15:21:14,265 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:16,039 INFO [train.py:901] (2/4) Epoch 14, batch 4850, loss[loss=0.2311, simple_loss=0.3135, pruned_loss=0.07433, over 8510.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.2999, pruned_loss=0.07256, over 1604504.80 frames. ], batch size: 26, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:21:23,509 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109941.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:31,141 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:37,038 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.854e+02 3.344e+02 7.947e+02, threshold=5.708e+02, percent-clipped=2.0 +2023-02-06 15:21:49,880 INFO [train.py:901] (2/4) Epoch 14, batch 4900, loss[loss=0.2518, simple_loss=0.33, pruned_loss=0.08685, over 8252.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3023, pruned_loss=0.07359, over 1606680.04 frames. ], batch size: 24, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:19,457 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:24,322 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:26,032 INFO [train.py:901] (2/4) Epoch 14, batch 4950, loss[loss=0.2608, simple_loss=0.3403, pruned_loss=0.09065, over 8515.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3007, pruned_loss=0.07269, over 1609177.58 frames. ], batch size: 28, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:30,963 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:39,018 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5744, 1.9811, 3.1114, 1.3837, 2.3115, 2.0631, 1.6957, 2.2013], + device='cuda:2'), covar=tensor([0.1876, 0.2445, 0.0943, 0.4069, 0.1786, 0.2950, 0.2010, 0.2362], + device='cuda:2'), in_proj_covar=tensor([0.0499, 0.0547, 0.0539, 0.0593, 0.0619, 0.0563, 0.0486, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:22:41,738 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:42,425 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:45,152 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:48,332 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.585e+02 3.180e+02 4.032e+02 7.448e+02, threshold=6.360e+02, percent-clipped=3.0 +2023-02-06 15:22:49,157 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:58,260 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:00,777 INFO [train.py:901] (2/4) Epoch 14, batch 5000, loss[loss=0.1973, simple_loss=0.2792, pruned_loss=0.05769, over 7975.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3007, pruned_loss=0.07263, over 1610435.70 frames. ], batch size: 21, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:31,467 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5251, 1.3659, 2.3327, 1.1901, 2.1011, 2.5130, 2.6586, 2.0952], + device='cuda:2'), covar=tensor([0.0949, 0.1220, 0.0439, 0.2020, 0.0733, 0.0346, 0.0605, 0.0735], + device='cuda:2'), in_proj_covar=tensor([0.0270, 0.0302, 0.0266, 0.0294, 0.0281, 0.0243, 0.0366, 0.0292], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 15:23:33,487 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:34,635 INFO [train.py:901] (2/4) Epoch 14, batch 5050, loss[loss=0.2801, simple_loss=0.3518, pruned_loss=0.1042, over 8823.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3019, pruned_loss=0.07357, over 1606224.76 frames. ], batch size: 40, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:36,750 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:38,741 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:41,650 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 15:23:43,174 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 15:23:57,271 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.889e+02 3.601e+02 4.263e+02 9.587e+02, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 15:24:09,949 INFO [train.py:901] (2/4) Epoch 14, batch 5100, loss[loss=0.2096, simple_loss=0.3019, pruned_loss=0.05868, over 8471.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3021, pruned_loss=0.07368, over 1608595.08 frames. ], batch size: 25, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:24:21,133 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5066, 2.2265, 2.8231, 1.7780, 1.7068, 2.8284, 1.0662, 2.0667], + device='cuda:2'), covar=tensor([0.2020, 0.1335, 0.0494, 0.2316, 0.3427, 0.0488, 0.2581, 0.1847], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0175, 0.0107, 0.0217, 0.0262, 0.0113, 0.0162, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 15:24:42,795 INFO [train.py:901] (2/4) Epoch 14, batch 5150, loss[loss=0.2787, simple_loss=0.3486, pruned_loss=0.1044, over 8482.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3018, pruned_loss=0.07401, over 1606455.60 frames. ], batch size: 29, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:24:44,028 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.21 vs. limit=5.0 +2023-02-06 15:25:05,057 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.455e+02 3.012e+02 3.817e+02 9.599e+02, threshold=6.024e+02, percent-clipped=2.0 +2023-02-06 15:25:19,324 INFO [train.py:901] (2/4) Epoch 14, batch 5200, loss[loss=0.2363, simple_loss=0.3101, pruned_loss=0.08123, over 8358.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3032, pruned_loss=0.0745, over 1609611.47 frames. ], batch size: 24, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:33,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:39,480 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 15:25:41,059 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:47,126 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:53,166 INFO [train.py:901] (2/4) Epoch 14, batch 5250, loss[loss=0.1848, simple_loss=0.2587, pruned_loss=0.05543, over 7640.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3022, pruned_loss=0.07421, over 1607142.10 frames. ], batch size: 19, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:25:57,544 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:58,346 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:15,514 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.532e+02 3.204e+02 3.879e+02 8.466e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 15:26:28,847 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:29,481 INFO [train.py:901] (2/4) Epoch 14, batch 5300, loss[loss=0.2026, simple_loss=0.2916, pruned_loss=0.05679, over 7920.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3031, pruned_loss=0.07441, over 1608368.76 frames. ], batch size: 20, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:26:37,624 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 15:26:39,495 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:48,920 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:56,331 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:03,328 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-06 15:27:05,003 INFO [train.py:901] (2/4) Epoch 14, batch 5350, loss[loss=0.234, simple_loss=0.3141, pruned_loss=0.07695, over 8505.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3034, pruned_loss=0.07404, over 1614058.04 frames. ], batch size: 26, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:25,505 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.452e+02 3.047e+02 3.791e+02 6.566e+02, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 15:27:32,803 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:36,872 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:38,768 INFO [train.py:901] (2/4) Epoch 14, batch 5400, loss[loss=0.3003, simple_loss=0.3582, pruned_loss=0.1213, over 8606.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3048, pruned_loss=0.07481, over 1612452.78 frames. ], batch size: 31, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:48,332 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:08,457 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:14,356 INFO [train.py:901] (2/4) Epoch 14, batch 5450, loss[loss=0.2507, simple_loss=0.3217, pruned_loss=0.08978, over 6966.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3045, pruned_loss=0.07473, over 1612980.91 frames. ], batch size: 71, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:30,484 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 15:28:34,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.429e+02 2.846e+02 3.589e+02 7.640e+02, threshold=5.692e+02, percent-clipped=1.0 +2023-02-06 15:28:39,769 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3037, 2.6333, 1.7557, 2.1303, 2.2123, 1.5002, 1.9069, 2.0079], + device='cuda:2'), covar=tensor([0.1539, 0.0360, 0.1106, 0.0603, 0.0670, 0.1495, 0.1062, 0.0891], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0235, 0.0322, 0.0297, 0.0298, 0.0328, 0.0344, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:28:47,542 INFO [train.py:901] (2/4) Epoch 14, batch 5500, loss[loss=0.2474, simple_loss=0.3268, pruned_loss=0.08397, over 8522.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3045, pruned_loss=0.07488, over 1614129.24 frames. ], batch size: 49, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:52,322 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:55,815 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:01,840 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7530, 1.8291, 2.3490, 1.6182, 1.2454, 2.3906, 0.3232, 1.3779], + device='cuda:2'), covar=tensor([0.2579, 0.1672, 0.0475, 0.2430, 0.4118, 0.0440, 0.3129, 0.1911], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0176, 0.0108, 0.0219, 0.0263, 0.0113, 0.0162, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 15:29:07,790 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0336, 1.5197, 3.6209, 1.5144, 2.2721, 3.9644, 3.9699, 3.4112], + device='cuda:2'), covar=tensor([0.1045, 0.1681, 0.0277, 0.2055, 0.1070, 0.0195, 0.0405, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0271, 0.0304, 0.0268, 0.0295, 0.0283, 0.0243, 0.0368, 0.0290], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 15:29:23,307 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,833 INFO [train.py:901] (2/4) Epoch 14, batch 5550, loss[loss=0.2575, simple_loss=0.3276, pruned_loss=0.0937, over 8133.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3029, pruned_loss=0.07411, over 1612866.20 frames. ], batch size: 22, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:25,763 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-06 15:29:28,288 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 15:29:34,006 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:35,490 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4385, 2.7319, 1.9086, 2.1894, 2.3137, 1.5785, 2.1089, 2.0066], + device='cuda:2'), covar=tensor([0.1290, 0.0327, 0.1042, 0.0572, 0.0599, 0.1375, 0.0849, 0.0824], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0235, 0.0323, 0.0298, 0.0298, 0.0329, 0.0345, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:29:44,459 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.421e+02 3.120e+02 3.692e+02 1.093e+03, threshold=6.240e+02, percent-clipped=9.0 +2023-02-06 15:29:47,079 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110665.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:56,916 INFO [train.py:901] (2/4) Epoch 14, batch 5600, loss[loss=0.2355, simple_loss=0.3228, pruned_loss=0.07407, over 8505.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3024, pruned_loss=0.07379, over 1612545.20 frames. ], batch size: 26, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:56,987 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:30,944 INFO [train.py:901] (2/4) Epoch 14, batch 5650, loss[loss=0.1878, simple_loss=0.2568, pruned_loss=0.05947, over 7677.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3014, pruned_loss=0.07336, over 1612775.57 frames. ], batch size: 18, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:30:33,059 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 15:30:34,099 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-06 15:30:47,217 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:49,879 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,030 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,498 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.502e+02 3.092e+02 3.638e+02 5.778e+02, threshold=6.185e+02, percent-clipped=0.0 +2023-02-06 15:31:02,499 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 15:31:04,322 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:05,717 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:07,494 INFO [train.py:901] (2/4) Epoch 14, batch 5700, loss[loss=0.202, simple_loss=0.2899, pruned_loss=0.05703, over 8341.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3005, pruned_loss=0.07267, over 1610202.17 frames. ], batch size: 26, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:07,673 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:15,732 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1133, 4.0852, 3.6710, 1.8493, 3.6353, 3.8048, 3.7127, 3.5639], + device='cuda:2'), covar=tensor([0.0823, 0.0646, 0.0965, 0.4517, 0.0868, 0.0968, 0.1234, 0.0856], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0397, 0.0396, 0.0496, 0.0392, 0.0399, 0.0385, 0.0343], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:31:17,912 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:22,872 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:40,945 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 15:31:41,630 INFO [train.py:901] (2/4) Epoch 14, batch 5750, loss[loss=0.2095, simple_loss=0.2948, pruned_loss=0.06209, over 8367.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3006, pruned_loss=0.07246, over 1610883.77 frames. ], batch size: 24, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:51,510 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:54,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:04,400 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.447e+02 3.019e+02 3.853e+02 7.521e+02, threshold=6.038e+02, percent-clipped=3.0 +2023-02-06 15:32:10,141 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:14,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:18,667 INFO [train.py:901] (2/4) Epoch 14, batch 5800, loss[loss=0.192, simple_loss=0.274, pruned_loss=0.05496, over 8247.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3016, pruned_loss=0.07325, over 1616803.69 frames. ], batch size: 22, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:32:22,951 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:53,260 INFO [train.py:901] (2/4) Epoch 14, batch 5850, loss[loss=0.2393, simple_loss=0.3062, pruned_loss=0.08618, over 7973.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3018, pruned_loss=0.07304, over 1618278.59 frames. ], batch size: 21, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:02,463 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-06 15:33:14,131 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.498e+02 3.098e+02 4.112e+02 1.106e+03, threshold=6.195e+02, percent-clipped=10.0 +2023-02-06 15:33:22,882 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:26,431 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:28,980 INFO [train.py:901] (2/4) Epoch 14, batch 5900, loss[loss=0.3363, simple_loss=0.378, pruned_loss=0.1473, over 6898.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.303, pruned_loss=0.07346, over 1613665.63 frames. ], batch size: 72, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:38,588 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4506, 1.8055, 1.8885, 1.1879, 1.9364, 1.3112, 0.4604, 1.7484], + device='cuda:2'), covar=tensor([0.0410, 0.0245, 0.0208, 0.0376, 0.0297, 0.0680, 0.0636, 0.0177], + device='cuda:2'), in_proj_covar=tensor([0.0415, 0.0358, 0.0305, 0.0406, 0.0345, 0.0499, 0.0372, 0.0377], + device='cuda:2'), out_proj_covar=tensor([1.1526e-04, 9.6830e-05, 8.2572e-05, 1.1029e-04, 9.4174e-05, 1.4647e-04, + 1.0367e-04, 1.0325e-04], device='cuda:2') +2023-02-06 15:33:54,412 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:03,738 INFO [train.py:901] (2/4) Epoch 14, batch 5950, loss[loss=0.2409, simple_loss=0.3174, pruned_loss=0.08221, over 8631.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3025, pruned_loss=0.07321, over 1618576.14 frames. ], batch size: 31, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:07,780 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111036.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:11,075 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:17,723 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:24,231 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.648e+02 3.047e+02 4.016e+02 7.772e+02, threshold=6.093e+02, percent-clipped=5.0 +2023-02-06 15:34:24,459 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:34,723 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:38,000 INFO [train.py:901] (2/4) Epoch 14, batch 6000, loss[loss=0.2377, simple_loss=0.3196, pruned_loss=0.07793, over 8470.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3033, pruned_loss=0.0737, over 1618511.69 frames. ], batch size: 25, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:38,000 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 15:34:50,552 INFO [train.py:935] (2/4) Epoch 14, validation: loss=0.1818, simple_loss=0.2816, pruned_loss=0.04094, over 944034.00 frames. +2023-02-06 15:34:50,553 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 15:34:51,683 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 15:34:56,292 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:03,696 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:11,766 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 15:35:27,227 INFO [train.py:901] (2/4) Epoch 14, batch 6050, loss[loss=0.2018, simple_loss=0.2868, pruned_loss=0.05841, over 8288.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3025, pruned_loss=0.07359, over 1615501.19 frames. ], batch size: 23, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:35:49,432 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.432e+02 2.876e+02 3.526e+02 5.542e+02, threshold=5.752e+02, percent-clipped=0.0 +2023-02-06 15:35:59,326 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 15:36:01,589 INFO [train.py:901] (2/4) Epoch 14, batch 6100, loss[loss=0.2043, simple_loss=0.2751, pruned_loss=0.0667, over 7771.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3031, pruned_loss=0.07442, over 1616970.20 frames. ], batch size: 19, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:15,951 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 15:36:24,937 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:37,116 INFO [train.py:901] (2/4) Epoch 14, batch 6150, loss[loss=0.1845, simple_loss=0.2705, pruned_loss=0.04923, over 7654.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3021, pruned_loss=0.07379, over 1618171.24 frames. ], batch size: 19, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:37,874 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:39,287 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8239, 3.7709, 3.4121, 1.7415, 3.3540, 3.4510, 3.4019, 3.1553], + device='cuda:2'), covar=tensor([0.0965, 0.0695, 0.1157, 0.4650, 0.1000, 0.1068, 0.1454, 0.1035], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0402, 0.0405, 0.0503, 0.0396, 0.0403, 0.0389, 0.0350], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:36:46,727 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:59,331 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.474e+02 3.213e+02 4.029e+02 8.079e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-06 15:37:11,858 INFO [train.py:901] (2/4) Epoch 14, batch 6200, loss[loss=0.2089, simple_loss=0.2954, pruned_loss=0.06123, over 8073.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3014, pruned_loss=0.07381, over 1613288.82 frames. ], batch size: 21, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:20,717 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2658, 2.1841, 1.7499, 1.9769, 1.8856, 1.3467, 1.6407, 1.7220], + device='cuda:2'), covar=tensor([0.1194, 0.0376, 0.1016, 0.0507, 0.0614, 0.1415, 0.0903, 0.0737], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0232, 0.0322, 0.0293, 0.0296, 0.0326, 0.0340, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:37:38,514 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:45,102 INFO [train.py:901] (2/4) Epoch 14, batch 6250, loss[loss=0.1793, simple_loss=0.2655, pruned_loss=0.04652, over 5186.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3001, pruned_loss=0.07309, over 1608351.29 frames. ], batch size: 11, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:55,176 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:55,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:08,443 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.286e+02 2.818e+02 3.691e+02 1.208e+03, threshold=5.637e+02, percent-clipped=2.0 +2023-02-06 15:38:13,369 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:20,509 INFO [train.py:901] (2/4) Epoch 14, batch 6300, loss[loss=0.2385, simple_loss=0.3117, pruned_loss=0.0826, over 8143.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3007, pruned_loss=0.07322, over 1607726.22 frames. ], batch size: 22, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:28,035 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3484, 2.4024, 1.6654, 2.0985, 2.0928, 1.3991, 1.7724, 1.9210], + device='cuda:2'), covar=tensor([0.1393, 0.0367, 0.1141, 0.0503, 0.0675, 0.1392, 0.0967, 0.0835], + device='cuda:2'), in_proj_covar=tensor([0.0346, 0.0232, 0.0322, 0.0293, 0.0297, 0.0327, 0.0342, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:38:39,596 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7021, 1.5315, 4.8924, 1.9330, 4.3716, 4.1392, 4.4645, 4.3219], + device='cuda:2'), covar=tensor([0.0462, 0.4157, 0.0433, 0.3349, 0.0943, 0.0786, 0.0424, 0.0552], + device='cuda:2'), in_proj_covar=tensor([0.0540, 0.0608, 0.0630, 0.0573, 0.0645, 0.0555, 0.0544, 0.0609], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:38:55,226 INFO [train.py:901] (2/4) Epoch 14, batch 6350, loss[loss=0.1979, simple_loss=0.2831, pruned_loss=0.05632, over 7967.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.301, pruned_loss=0.07301, over 1608138.53 frames. ], batch size: 21, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:58,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:05,851 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7389, 1.5583, 1.8298, 1.5805, 0.9912, 1.7251, 2.1481, 1.8944], + device='cuda:2'), covar=tensor([0.0408, 0.1192, 0.1569, 0.1302, 0.0605, 0.1367, 0.0608, 0.0587], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0191, 0.0157, 0.0101, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 15:39:17,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.340e+02 2.891e+02 3.552e+02 9.934e+02, threshold=5.783e+02, percent-clipped=8.0 +2023-02-06 15:39:23,084 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:30,948 INFO [train.py:901] (2/4) Epoch 14, batch 6400, loss[loss=0.2107, simple_loss=0.2931, pruned_loss=0.06415, over 8790.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3002, pruned_loss=0.07214, over 1614862.51 frames. ], batch size: 30, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:39:32,188 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 15:39:35,205 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:40,669 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:05,246 INFO [train.py:901] (2/4) Epoch 14, batch 6450, loss[loss=0.1965, simple_loss=0.2856, pruned_loss=0.05375, over 8342.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2988, pruned_loss=0.07094, over 1617220.19 frames. ], batch size: 26, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:40:26,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.418e+02 3.184e+02 3.807e+02 1.482e+03, threshold=6.367e+02, percent-clipped=8.0 +2023-02-06 15:40:39,067 INFO [train.py:901] (2/4) Epoch 14, batch 6500, loss[loss=0.1983, simple_loss=0.2831, pruned_loss=0.05676, over 8533.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3003, pruned_loss=0.07233, over 1613397.39 frames. ], batch size: 28, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:40:44,372 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:55,099 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111601.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:12,084 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:14,591 INFO [train.py:901] (2/4) Epoch 14, batch 6550, loss[loss=0.1718, simple_loss=0.2547, pruned_loss=0.04445, over 7421.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3016, pruned_loss=0.07311, over 1612097.05 frames. ], batch size: 17, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:24,427 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 15:41:35,844 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 2.515e+02 3.055e+02 3.900e+02 7.605e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:41:43,362 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:41:47,951 INFO [train.py:901] (2/4) Epoch 14, batch 6600, loss[loss=0.2117, simple_loss=0.2805, pruned_loss=0.07145, over 7690.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3008, pruned_loss=0.0728, over 1608594.81 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:55,650 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:03,625 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:14,400 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:23,467 INFO [train.py:901] (2/4) Epoch 14, batch 6650, loss[loss=0.2221, simple_loss=0.3051, pruned_loss=0.06951, over 8024.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3001, pruned_loss=0.0723, over 1608744.81 frames. ], batch size: 22, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:42:45,230 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.391e+02 3.105e+02 3.860e+02 7.189e+02, threshold=6.209e+02, percent-clipped=3.0 +2023-02-06 15:42:48,090 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9464, 2.1737, 1.9518, 2.8507, 1.3558, 1.5464, 1.8693, 2.3657], + device='cuda:2'), covar=tensor([0.0844, 0.0991, 0.0985, 0.0387, 0.1295, 0.1621, 0.1072, 0.0837], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0207, 0.0256, 0.0215, 0.0216, 0.0253, 0.0261, 0.0217], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 15:42:49,656 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 15:42:51,480 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6988, 1.9312, 2.1781, 1.3317, 2.3222, 1.4259, 0.7507, 1.8243], + device='cuda:2'), covar=tensor([0.0530, 0.0293, 0.0223, 0.0497, 0.0306, 0.0711, 0.0717, 0.0276], + device='cuda:2'), in_proj_covar=tensor([0.0409, 0.0354, 0.0302, 0.0405, 0.0339, 0.0493, 0.0369, 0.0376], + device='cuda:2'), out_proj_covar=tensor([1.1356e-04, 9.5802e-05, 8.1746e-05, 1.1006e-04, 9.2653e-05, 1.4440e-04, + 1.0266e-04, 1.0282e-04], device='cuda:2') +2023-02-06 15:42:57,305 INFO [train.py:901] (2/4) Epoch 14, batch 6700, loss[loss=0.1881, simple_loss=0.2649, pruned_loss=0.0557, over 7548.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3001, pruned_loss=0.07285, over 1604091.79 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:16,619 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111809.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:43:32,517 INFO [train.py:901] (2/4) Epoch 14, batch 6750, loss[loss=0.2311, simple_loss=0.3033, pruned_loss=0.07948, over 8105.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3, pruned_loss=0.07242, over 1610011.22 frames. ], batch size: 23, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:32,588 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:43:54,028 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.559e+02 3.020e+02 4.182e+02 1.269e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-06 15:44:02,381 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 15:44:02,883 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 15:44:07,192 INFO [train.py:901] (2/4) Epoch 14, batch 6800, loss[loss=0.2253, simple_loss=0.2968, pruned_loss=0.07687, over 7914.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3001, pruned_loss=0.07205, over 1616495.93 frames. ], batch size: 20, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:40,417 INFO [train.py:901] (2/4) Epoch 14, batch 6850, loss[loss=0.2305, simple_loss=0.315, pruned_loss=0.07299, over 8031.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3014, pruned_loss=0.07258, over 1616152.70 frames. ], batch size: 22, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:51,217 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 15:44:52,739 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:01,425 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:03,874 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.542e+02 3.126e+02 4.226e+02 8.027e+02, threshold=6.251e+02, percent-clipped=7.0 +2023-02-06 15:45:16,594 INFO [train.py:901] (2/4) Epoch 14, batch 6900, loss[loss=0.2249, simple_loss=0.3023, pruned_loss=0.07373, over 8251.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2994, pruned_loss=0.07196, over 1608652.58 frames. ], batch size: 24, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:18,705 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:50,580 INFO [train.py:901] (2/4) Epoch 14, batch 6950, loss[loss=0.2038, simple_loss=0.2661, pruned_loss=0.07077, over 7441.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.2997, pruned_loss=0.07224, over 1607697.68 frames. ], batch size: 17, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:58,677 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 15:46:13,915 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.419e+02 2.987e+02 3.531e+02 6.552e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-06 15:46:25,965 INFO [train.py:901] (2/4) Epoch 14, batch 7000, loss[loss=0.1639, simple_loss=0.251, pruned_loss=0.03834, over 7223.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2998, pruned_loss=0.07175, over 1609274.01 frames. ], batch size: 16, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:46:28,777 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:46:28,796 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0966, 1.5641, 4.5188, 2.0376, 2.3851, 5.1807, 5.2182, 4.5196], + device='cuda:2'), covar=tensor([0.1113, 0.1714, 0.0225, 0.1810, 0.1052, 0.0156, 0.0323, 0.0534], + device='cuda:2'), in_proj_covar=tensor([0.0277, 0.0306, 0.0268, 0.0298, 0.0284, 0.0244, 0.0369, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:46:41,424 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7593, 3.7746, 3.4494, 1.9582, 3.3278, 3.3549, 3.4547, 3.1977], + device='cuda:2'), covar=tensor([0.1060, 0.0727, 0.1091, 0.4503, 0.1060, 0.1197, 0.1368, 0.0932], + device='cuda:2'), in_proj_covar=tensor([0.0485, 0.0397, 0.0403, 0.0500, 0.0396, 0.0400, 0.0390, 0.0346], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 15:46:59,912 INFO [train.py:901] (2/4) Epoch 14, batch 7050, loss[loss=0.2539, simple_loss=0.328, pruned_loss=0.08994, over 8673.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3003, pruned_loss=0.07203, over 1613049.36 frames. ], batch size: 39, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:15,925 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112153.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:47:21,792 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.608e+02 3.153e+02 4.211e+02 1.237e+03, threshold=6.307e+02, percent-clipped=12.0 +2023-02-06 15:47:35,265 INFO [train.py:901] (2/4) Epoch 14, batch 7100, loss[loss=0.1714, simple_loss=0.2523, pruned_loss=0.04522, over 8091.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2992, pruned_loss=0.07139, over 1609149.57 frames. ], batch size: 21, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:50,289 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:47:59,152 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 15:48:07,015 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1050, 3.9361, 2.5203, 2.8951, 2.8132, 1.9670, 2.7918, 3.1552], + device='cuda:2'), covar=tensor([0.1559, 0.0285, 0.0854, 0.0692, 0.0668, 0.1331, 0.1036, 0.0942], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0234, 0.0323, 0.0298, 0.0301, 0.0329, 0.0345, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:48:07,692 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:48:10,129 INFO [train.py:901] (2/4) Epoch 14, batch 7150, loss[loss=0.2392, simple_loss=0.3157, pruned_loss=0.08134, over 8606.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2995, pruned_loss=0.07136, over 1612406.01 frames. ], batch size: 39, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:48:16,274 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7036, 1.2922, 4.8841, 1.7893, 4.3531, 4.0897, 4.3625, 4.2921], + device='cuda:2'), covar=tensor([0.0476, 0.4456, 0.0411, 0.3531, 0.0968, 0.0837, 0.0521, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0599, 0.0625, 0.0566, 0.0638, 0.0550, 0.0542, 0.0603], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 15:48:31,549 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.371e+02 2.859e+02 3.664e+02 7.587e+02, threshold=5.717e+02, percent-clipped=3.0 +2023-02-06 15:48:35,312 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 15:48:35,576 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112268.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:48:43,202 INFO [train.py:901] (2/4) Epoch 14, batch 7200, loss[loss=0.2326, simple_loss=0.3142, pruned_loss=0.07552, over 8461.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2996, pruned_loss=0.07142, over 1610862.05 frames. ], batch size: 25, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:18,426 INFO [train.py:901] (2/4) Epoch 14, batch 7250, loss[loss=0.2097, simple_loss=0.2857, pruned_loss=0.06683, over 8328.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3014, pruned_loss=0.07211, over 1611572.46 frames. ], batch size: 25, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:39,910 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.708e+02 3.212e+02 3.989e+02 8.387e+02, threshold=6.424e+02, percent-clipped=5.0 +2023-02-06 15:49:52,049 INFO [train.py:901] (2/4) Epoch 14, batch 7300, loss[loss=0.2496, simple_loss=0.3361, pruned_loss=0.08161, over 8624.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3006, pruned_loss=0.07151, over 1609803.69 frames. ], batch size: 34, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:49:54,808 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8481, 1.7833, 2.5314, 1.6363, 1.1534, 2.5984, 0.4512, 1.2808], + device='cuda:2'), covar=tensor([0.2490, 0.1520, 0.0361, 0.1813, 0.3992, 0.0272, 0.3014, 0.2027], + device='cuda:2'), in_proj_covar=tensor([0.0172, 0.0175, 0.0107, 0.0215, 0.0256, 0.0111, 0.0161, 0.0173], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 15:50:11,933 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:26,718 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:28,007 INFO [train.py:901] (2/4) Epoch 14, batch 7350, loss[loss=0.1954, simple_loss=0.2749, pruned_loss=0.05791, over 7975.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3019, pruned_loss=0.07236, over 1612087.15 frames. ], batch size: 21, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:40,030 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 15:50:49,906 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.462e+02 2.972e+02 3.682e+02 1.093e+03, threshold=5.943e+02, percent-clipped=5.0 +2023-02-06 15:50:59,967 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 15:51:02,056 INFO [train.py:901] (2/4) Epoch 14, batch 7400, loss[loss=0.2585, simple_loss=0.337, pruned_loss=0.09003, over 8109.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3024, pruned_loss=0.07219, over 1611157.00 frames. ], batch size: 23, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:32,634 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112524.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:37,190 INFO [train.py:901] (2/4) Epoch 14, batch 7450, loss[loss=0.2451, simple_loss=0.3368, pruned_loss=0.07668, over 8845.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3033, pruned_loss=0.07271, over 1611645.02 frames. ], batch size: 32, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:41,775 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 15:51:46,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:51:50,987 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112549.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:53,939 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 15:51:59,931 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.473e+02 3.112e+02 3.710e+02 6.215e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 15:52:13,215 INFO [train.py:901] (2/4) Epoch 14, batch 7500, loss[loss=0.2169, simple_loss=0.3028, pruned_loss=0.06552, over 8466.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3025, pruned_loss=0.07251, over 1609421.54 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:13,390 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:23,790 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:47,695 INFO [train.py:901] (2/4) Epoch 14, batch 7550, loss[loss=0.2416, simple_loss=0.3178, pruned_loss=0.08272, over 8565.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3027, pruned_loss=0.07285, over 1610236.16 frames. ], batch size: 39, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:51,298 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:53:08,103 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5376, 1.3266, 2.2698, 1.0955, 1.9996, 2.3941, 2.5485, 2.0721], + device='cuda:2'), covar=tensor([0.0955, 0.1278, 0.0492, 0.2237, 0.0809, 0.0415, 0.0680, 0.0744], + device='cuda:2'), in_proj_covar=tensor([0.0278, 0.0308, 0.0270, 0.0301, 0.0288, 0.0248, 0.0372, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 15:53:11,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.402e+02 2.890e+02 3.643e+02 7.164e+02, threshold=5.781e+02, percent-clipped=3.0 +2023-02-06 15:53:23,622 INFO [train.py:901] (2/4) Epoch 14, batch 7600, loss[loss=0.2198, simple_loss=0.3013, pruned_loss=0.06917, over 8538.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3037, pruned_loss=0.07343, over 1613415.07 frames. ], batch size: 28, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:53:57,102 INFO [train.py:901] (2/4) Epoch 14, batch 7650, loss[loss=0.2684, simple_loss=0.3326, pruned_loss=0.1021, over 8568.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3027, pruned_loss=0.07309, over 1612348.33 frames. ], batch size: 39, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:11,112 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:18,473 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.546e+02 2.941e+02 3.649e+02 7.123e+02, threshold=5.882e+02, percent-clipped=5.0 +2023-02-06 15:54:32,364 INFO [train.py:901] (2/4) Epoch 14, batch 7700, loss[loss=0.2082, simple_loss=0.2998, pruned_loss=0.05837, over 8469.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3012, pruned_loss=0.07264, over 1609723.67 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:44,371 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 15:54:45,583 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:52,888 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 15:55:03,126 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:06,911 INFO [train.py:901] (2/4) Epoch 14, batch 7750, loss[loss=0.1942, simple_loss=0.2854, pruned_loss=0.05152, over 8288.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3009, pruned_loss=0.07279, over 1605249.07 frames. ], batch size: 23, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:55:28,269 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.453e+02 3.172e+02 4.245e+02 8.131e+02, threshold=6.343e+02, percent-clipped=10.0 +2023-02-06 15:55:30,967 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:41,028 INFO [train.py:901] (2/4) Epoch 14, batch 7800, loss[loss=0.2391, simple_loss=0.3203, pruned_loss=0.07893, over 8509.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3, pruned_loss=0.07243, over 1606562.68 frames. ], batch size: 28, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:02,573 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 15:56:12,172 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:16,236 INFO [train.py:901] (2/4) Epoch 14, batch 7850, loss[loss=0.2429, simple_loss=0.309, pruned_loss=0.08842, over 8096.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3017, pruned_loss=0.07319, over 1609043.40 frames. ], batch size: 21, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:22,358 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:22,423 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:37,784 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.504e+02 3.067e+02 3.726e+02 7.698e+02, threshold=6.135e+02, percent-clipped=2.0 +2023-02-06 15:56:49,064 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:49,646 INFO [train.py:901] (2/4) Epoch 14, batch 7900, loss[loss=0.1935, simple_loss=0.2818, pruned_loss=0.05265, over 8029.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3026, pruned_loss=0.07342, over 1613743.61 frames. ], batch size: 22, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:22,233 INFO [train.py:901] (2/4) Epoch 14, batch 7950, loss[loss=0.2275, simple_loss=0.3075, pruned_loss=0.07378, over 8021.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3021, pruned_loss=0.07323, over 1613144.88 frames. ], batch size: 22, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:28,306 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:38,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:43,248 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.654e+02 3.191e+02 4.041e+02 1.304e+03, threshold=6.382e+02, percent-clipped=5.0 +2023-02-06 15:57:55,257 INFO [train.py:901] (2/4) Epoch 14, batch 8000, loss[loss=0.2317, simple_loss=0.3035, pruned_loss=0.07996, over 7653.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3009, pruned_loss=0.07279, over 1612647.81 frames. ], batch size: 19, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:58:04,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:23,457 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:28,557 INFO [train.py:901] (2/4) Epoch 14, batch 8050, loss[loss=0.1948, simple_loss=0.2717, pruned_loss=0.05901, over 7246.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3003, pruned_loss=0.07364, over 1587135.20 frames. ], batch size: 16, lr: 5.36e-03, grad_scale: 16.0 +2023-02-06 15:58:40,129 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:49,769 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.323e+02 2.856e+02 3.288e+02 8.076e+02, threshold=5.712e+02, percent-clipped=1.0 +2023-02-06 15:59:01,309 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 15:59:06,257 INFO [train.py:901] (2/4) Epoch 15, batch 0, loss[loss=0.2079, simple_loss=0.2896, pruned_loss=0.06314, over 7814.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2896, pruned_loss=0.06314, over 7814.00 frames. ], batch size: 20, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 15:59:06,257 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 15:59:17,270 INFO [train.py:935] (2/4) Epoch 15, validation: loss=0.1825, simple_loss=0.283, pruned_loss=0.04098, over 944034.00 frames. +2023-02-06 15:59:17,270 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 15:59:32,303 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 15:59:48,307 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-06 15:59:49,089 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 15:59:51,453 INFO [train.py:901] (2/4) Epoch 15, batch 50, loss[loss=0.197, simple_loss=0.2768, pruned_loss=0.05863, over 7930.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3026, pruned_loss=0.07113, over 365608.40 frames. ], batch size: 20, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:08,691 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 16:00:21,174 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:27,786 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.549e+02 3.077e+02 3.582e+02 9.445e+02, threshold=6.153e+02, percent-clipped=5.0 +2023-02-06 16:00:28,491 INFO [train.py:901] (2/4) Epoch 15, batch 100, loss[loss=0.2298, simple_loss=0.312, pruned_loss=0.07384, over 8282.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3025, pruned_loss=0.07272, over 639345.74 frames. ], batch size: 23, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:29,902 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 16:00:39,305 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7139, 5.8051, 5.0449, 2.2919, 5.0237, 5.4679, 5.4138, 5.3265], + device='cuda:2'), covar=tensor([0.0660, 0.0522, 0.1046, 0.4778, 0.0847, 0.1014, 0.1129, 0.0661], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0404, 0.0403, 0.0503, 0.0402, 0.0400, 0.0393, 0.0352], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:00:41,981 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:50,245 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:00,329 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:02,086 INFO [train.py:901] (2/4) Epoch 15, batch 150, loss[loss=0.1908, simple_loss=0.2668, pruned_loss=0.05742, over 7433.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3016, pruned_loss=0.0723, over 854900.90 frames. ], batch size: 17, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:06,864 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:17,347 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:28,773 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:32,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6480, 1.9703, 2.2013, 1.1957, 2.2776, 1.3747, 0.6950, 1.8896], + device='cuda:2'), covar=tensor([0.0619, 0.0287, 0.0228, 0.0555, 0.0325, 0.0818, 0.0722, 0.0289], + device='cuda:2'), in_proj_covar=tensor([0.0416, 0.0358, 0.0304, 0.0408, 0.0343, 0.0497, 0.0370, 0.0380], + device='cuda:2'), out_proj_covar=tensor([1.1544e-04, 9.6722e-05, 8.2157e-05, 1.1080e-04, 9.3424e-05, 1.4533e-04, + 1.0253e-04, 1.0406e-04], device='cuda:2') +2023-02-06 16:01:37,319 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.511e+02 3.032e+02 4.146e+02 1.005e+03, threshold=6.064e+02, percent-clipped=3.0 +2023-02-06 16:01:38,016 INFO [train.py:901] (2/4) Epoch 15, batch 200, loss[loss=0.2026, simple_loss=0.2891, pruned_loss=0.05806, over 8360.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07163, over 1027307.89 frames. ], batch size: 24, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:46,247 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:01,346 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:11,078 INFO [train.py:901] (2/4) Epoch 15, batch 250, loss[loss=0.2239, simple_loss=0.3084, pruned_loss=0.06966, over 8301.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.304, pruned_loss=0.0741, over 1161453.26 frames. ], batch size: 23, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:02:19,373 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 16:02:28,581 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 16:02:43,772 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.666e+02 3.062e+02 4.026e+02 8.735e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 16:02:44,412 INFO [train.py:901] (2/4) Epoch 15, batch 300, loss[loss=0.2678, simple_loss=0.3464, pruned_loss=0.09459, over 8585.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.3037, pruned_loss=0.07383, over 1262894.82 frames. ], batch size: 31, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:15,226 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 16:03:19,321 INFO [train.py:901] (2/4) Epoch 15, batch 350, loss[loss=0.2829, simple_loss=0.3447, pruned_loss=0.1105, over 6608.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3036, pruned_loss=0.07404, over 1341678.86 frames. ], batch size: 71, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:52,044 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.415e+02 3.115e+02 3.728e+02 6.919e+02, threshold=6.229e+02, percent-clipped=2.0 +2023-02-06 16:03:52,739 INFO [train.py:901] (2/4) Epoch 15, batch 400, loss[loss=0.2279, simple_loss=0.3078, pruned_loss=0.07395, over 8203.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3026, pruned_loss=0.07348, over 1403570.50 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:17,489 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:28,817 INFO [train.py:901] (2/4) Epoch 15, batch 450, loss[loss=0.2204, simple_loss=0.2922, pruned_loss=0.07426, over 7652.00 frames. ], tot_loss[loss=0.225, simple_loss=0.303, pruned_loss=0.07354, over 1451751.51 frames. ], batch size: 19, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:30,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1341, 1.5555, 1.7845, 1.3996, 0.9524, 1.6175, 1.9086, 1.6459], + device='cuda:2'), covar=tensor([0.0478, 0.1143, 0.1531, 0.1313, 0.0590, 0.1341, 0.0631, 0.0622], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0155, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 16:04:43,286 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:56,091 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113654.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:01,031 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.616e+02 3.268e+02 4.141e+02 9.119e+02, threshold=6.536e+02, percent-clipped=2.0 +2023-02-06 16:05:01,749 INFO [train.py:901] (2/4) Epoch 15, batch 500, loss[loss=0.239, simple_loss=0.3124, pruned_loss=0.08284, over 6805.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3018, pruned_loss=0.07321, over 1486059.21 frames. ], batch size: 71, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:05:02,562 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:12,317 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:35,420 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113711.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:36,634 INFO [train.py:901] (2/4) Epoch 15, batch 550, loss[loss=0.2506, simple_loss=0.3186, pruned_loss=0.09129, over 8036.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3017, pruned_loss=0.07329, over 1514411.76 frames. ], batch size: 22, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:09,824 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.516e+02 3.119e+02 4.209e+02 9.524e+02, threshold=6.239e+02, percent-clipped=4.0 +2023-02-06 16:06:10,533 INFO [train.py:901] (2/4) Epoch 15, batch 600, loss[loss=0.1858, simple_loss=0.2648, pruned_loss=0.05336, over 7436.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3017, pruned_loss=0.07292, over 1535817.22 frames. ], batch size: 17, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:17,857 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9834, 1.4728, 3.3205, 1.4908, 2.2782, 3.5952, 3.6573, 3.1079], + device='cuda:2'), covar=tensor([0.0968, 0.1663, 0.0327, 0.1985, 0.1041, 0.0261, 0.0595, 0.0574], + device='cuda:2'), in_proj_covar=tensor([0.0278, 0.0308, 0.0273, 0.0301, 0.0288, 0.0247, 0.0377, 0.0299], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:06:24,220 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 16:06:44,312 INFO [train.py:901] (2/4) Epoch 15, batch 650, loss[loss=0.2164, simple_loss=0.2968, pruned_loss=0.06802, over 8497.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3028, pruned_loss=0.07299, over 1558817.20 frames. ], batch size: 26, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:53,836 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6905, 3.0969, 2.4006, 4.1227, 1.7858, 2.1494, 2.6148, 3.1485], + device='cuda:2'), covar=tensor([0.0671, 0.0789, 0.0923, 0.0192, 0.1112, 0.1270, 0.0935, 0.0742], + device='cuda:2'), in_proj_covar=tensor([0.0226, 0.0201, 0.0247, 0.0207, 0.0208, 0.0244, 0.0249, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 16:07:19,220 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.270e+02 2.767e+02 3.649e+02 9.673e+02, threshold=5.535e+02, percent-clipped=4.0 +2023-02-06 16:07:19,888 INFO [train.py:901] (2/4) Epoch 15, batch 700, loss[loss=0.3217, simple_loss=0.3656, pruned_loss=0.1389, over 6960.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3026, pruned_loss=0.07277, over 1572423.19 frames. ], batch size: 71, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:07:53,444 INFO [train.py:901] (2/4) Epoch 15, batch 750, loss[loss=0.2406, simple_loss=0.3125, pruned_loss=0.08434, over 8196.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3028, pruned_loss=0.07289, over 1584211.49 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:11,189 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 16:08:20,441 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 16:08:29,183 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 2.237e+02 2.791e+02 3.511e+02 6.350e+02, threshold=5.582e+02, percent-clipped=4.0 +2023-02-06 16:08:29,879 INFO [train.py:901] (2/4) Epoch 15, batch 800, loss[loss=0.2036, simple_loss=0.2982, pruned_loss=0.05446, over 8292.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07175, over 1585391.12 frames. ], batch size: 23, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:32,849 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:40,773 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:50,093 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:01,983 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:05,302 INFO [train.py:901] (2/4) Epoch 15, batch 850, loss[loss=0.1866, simple_loss=0.2799, pruned_loss=0.04665, over 8334.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3004, pruned_loss=0.07146, over 1594222.37 frames. ], batch size: 26, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:12,128 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0595, 1.2480, 1.2106, 0.5235, 1.2184, 1.0099, 0.0467, 1.1874], + device='cuda:2'), covar=tensor([0.0334, 0.0277, 0.0238, 0.0450, 0.0335, 0.0736, 0.0630, 0.0240], + device='cuda:2'), in_proj_covar=tensor([0.0419, 0.0359, 0.0305, 0.0411, 0.0342, 0.0499, 0.0370, 0.0381], + device='cuda:2'), out_proj_covar=tensor([1.1633e-04, 9.6711e-05, 8.2020e-05, 1.1161e-04, 9.3094e-05, 1.4561e-04, + 1.0267e-04, 1.0432e-04], device='cuda:2') +2023-02-06 16:09:32,139 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2691, 1.2558, 1.4266, 1.2028, 0.7061, 1.2849, 1.1507, 1.0085], + device='cuda:2'), covar=tensor([0.0550, 0.1170, 0.1570, 0.1293, 0.0595, 0.1360, 0.0683, 0.0632], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0156, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 16:09:39,413 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.394e+02 2.826e+02 3.443e+02 6.296e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-06 16:09:40,788 INFO [train.py:901] (2/4) Epoch 15, batch 900, loss[loss=0.2129, simple_loss=0.3061, pruned_loss=0.05986, over 8352.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3012, pruned_loss=0.0721, over 1598913.29 frames. ], batch size: 24, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:45,805 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 16:09:54,016 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4226, 2.0018, 2.8781, 2.2804, 2.7716, 2.2801, 1.9779, 1.3718], + device='cuda:2'), covar=tensor([0.4208, 0.4430, 0.1427, 0.2832, 0.1964, 0.2438, 0.1680, 0.4806], + device='cuda:2'), in_proj_covar=tensor([0.0890, 0.0903, 0.0742, 0.0872, 0.0938, 0.0828, 0.0711, 0.0781], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:09:58,192 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 16:10:02,636 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:15,159 INFO [train.py:901] (2/4) Epoch 15, batch 950, loss[loss=0.2492, simple_loss=0.3267, pruned_loss=0.0859, over 8362.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3005, pruned_loss=0.07201, over 1600080.13 frames. ], batch size: 24, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:21,911 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:36,821 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:38,931 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4415, 1.3746, 1.6751, 1.3112, 1.0699, 1.6959, 0.1640, 1.1361], + device='cuda:2'), covar=tensor([0.1995, 0.1689, 0.0554, 0.1165, 0.3417, 0.0488, 0.2793, 0.1659], + device='cuda:2'), in_proj_covar=tensor([0.0174, 0.0179, 0.0111, 0.0217, 0.0259, 0.0114, 0.0165, 0.0175], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:10:39,440 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 16:10:45,954 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4435, 2.7443, 1.9397, 2.1884, 2.2590, 1.4576, 2.0590, 2.0636], + device='cuda:2'), covar=tensor([0.1633, 0.0349, 0.0995, 0.0612, 0.0628, 0.1428, 0.0936, 0.0985], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0233, 0.0327, 0.0300, 0.0303, 0.0327, 0.0344, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:10:49,208 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.449e+02 2.913e+02 3.851e+02 8.356e+02, threshold=5.826e+02, percent-clipped=3.0 +2023-02-06 16:10:49,925 INFO [train.py:901] (2/4) Epoch 15, batch 1000, loss[loss=0.1945, simple_loss=0.2872, pruned_loss=0.05086, over 8292.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3009, pruned_loss=0.07205, over 1602414.33 frames. ], batch size: 23, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:00,772 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9705, 4.0179, 2.5561, 2.7252, 2.8625, 2.1425, 2.8410, 2.9742], + device='cuda:2'), covar=tensor([0.1601, 0.0256, 0.0898, 0.0760, 0.0705, 0.1171, 0.0889, 0.1020], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0233, 0.0326, 0.0299, 0.0303, 0.0327, 0.0343, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:11:14,220 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 16:11:25,588 INFO [train.py:901] (2/4) Epoch 15, batch 1050, loss[loss=0.2054, simple_loss=0.2737, pruned_loss=0.06852, over 7660.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3014, pruned_loss=0.07272, over 1609882.32 frames. ], batch size: 19, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:25,602 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 16:11:57,606 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.504e+02 3.058e+02 3.938e+02 1.189e+03, threshold=6.116e+02, percent-clipped=4.0 +2023-02-06 16:11:58,328 INFO [train.py:901] (2/4) Epoch 15, batch 1100, loss[loss=0.2319, simple_loss=0.3244, pruned_loss=0.06971, over 8362.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3005, pruned_loss=0.07271, over 1610518.29 frames. ], batch size: 24, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:11,299 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 16:12:26,183 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 16:12:32,998 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 16:12:33,900 INFO [train.py:901] (2/4) Epoch 15, batch 1150, loss[loss=0.1889, simple_loss=0.2824, pruned_loss=0.04766, over 7800.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2998, pruned_loss=0.07211, over 1613428.14 frames. ], batch size: 20, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:38,620 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 16:12:59,538 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:07,354 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.463e+02 3.139e+02 3.955e+02 6.139e+02, threshold=6.277e+02, percent-clipped=1.0 +2023-02-06 16:13:07,982 INFO [train.py:901] (2/4) Epoch 15, batch 1200, loss[loss=0.2032, simple_loss=0.2673, pruned_loss=0.0696, over 7682.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2997, pruned_loss=0.07208, over 1611205.77 frames. ], batch size: 18, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:13:16,141 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:18,728 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:36,375 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114404.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:42,788 INFO [train.py:901] (2/4) Epoch 15, batch 1250, loss[loss=0.2297, simple_loss=0.3119, pruned_loss=0.07373, over 8456.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2992, pruned_loss=0.07151, over 1615119.29 frames. ], batch size: 27, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:14:16,861 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.591e+02 3.148e+02 4.129e+02 1.085e+03, threshold=6.295e+02, percent-clipped=6.0 +2023-02-06 16:14:17,473 INFO [train.py:901] (2/4) Epoch 15, batch 1300, loss[loss=0.243, simple_loss=0.3263, pruned_loss=0.07986, over 8560.00 frames. ], tot_loss[loss=0.222, simple_loss=0.2998, pruned_loss=0.07206, over 1616011.64 frames. ], batch size: 31, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:14:35,221 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114489.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:14:51,240 INFO [train.py:901] (2/4) Epoch 15, batch 1350, loss[loss=0.2311, simple_loss=0.3101, pruned_loss=0.07604, over 8285.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3, pruned_loss=0.07156, over 1617962.03 frames. ], batch size: 23, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:08,701 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1053, 4.1509, 3.7312, 1.5738, 3.5648, 3.7359, 3.7772, 3.4701], + device='cuda:2'), covar=tensor([0.0908, 0.0627, 0.1051, 0.5569, 0.0955, 0.0981, 0.1289, 0.1001], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0398, 0.0399, 0.0498, 0.0395, 0.0398, 0.0381, 0.0348], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:15:12,373 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 16:15:26,456 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.434e+02 2.903e+02 3.628e+02 5.826e+02, threshold=5.807e+02, percent-clipped=0.0 +2023-02-06 16:15:27,128 INFO [train.py:901] (2/4) Epoch 15, batch 1400, loss[loss=0.2546, simple_loss=0.3331, pruned_loss=0.08803, over 8646.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2994, pruned_loss=0.07091, over 1618164.81 frames. ], batch size: 34, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:54,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:16:00,677 INFO [train.py:901] (2/4) Epoch 15, batch 1450, loss[loss=0.2007, simple_loss=0.2706, pruned_loss=0.06545, over 7450.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2998, pruned_loss=0.07104, over 1614879.39 frames. ], batch size: 17, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:08,831 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 16:16:36,177 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.414e+02 3.068e+02 3.744e+02 6.619e+02, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 16:16:36,887 INFO [train.py:901] (2/4) Epoch 15, batch 1500, loss[loss=0.1811, simple_loss=0.2577, pruned_loss=0.05225, over 7702.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.3004, pruned_loss=0.07091, over 1616345.52 frames. ], batch size: 18, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:48,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3541, 1.3278, 4.5299, 1.6676, 3.9587, 3.7987, 4.0643, 3.9393], + device='cuda:2'), covar=tensor([0.0630, 0.4973, 0.0548, 0.4061, 0.1261, 0.0885, 0.0618, 0.0677], + device='cuda:2'), in_proj_covar=tensor([0.0549, 0.0609, 0.0634, 0.0576, 0.0651, 0.0554, 0.0548, 0.0614], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:16:58,918 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4401, 1.8095, 2.7922, 1.2796, 2.0224, 1.9072, 1.5061, 1.9806], + device='cuda:2'), covar=tensor([0.1747, 0.2197, 0.0710, 0.4020, 0.1529, 0.2778, 0.2008, 0.1932], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0551, 0.0537, 0.0601, 0.0622, 0.0565, 0.0493, 0.0621], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:17:00,203 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5074, 1.7948, 1.9102, 1.1910, 1.9762, 1.4185, 0.4084, 1.6535], + device='cuda:2'), covar=tensor([0.0393, 0.0280, 0.0200, 0.0333, 0.0296, 0.0662, 0.0612, 0.0210], + device='cuda:2'), in_proj_covar=tensor([0.0415, 0.0354, 0.0302, 0.0406, 0.0338, 0.0493, 0.0366, 0.0378], + device='cuda:2'), out_proj_covar=tensor([1.1498e-04, 9.5558e-05, 8.1249e-05, 1.1026e-04, 9.1970e-05, 1.4356e-04, + 1.0150e-04, 1.0324e-04], device='cuda:2') +2023-02-06 16:17:09,892 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 16:17:11,507 INFO [train.py:901] (2/4) Epoch 15, batch 1550, loss[loss=0.2224, simple_loss=0.3089, pruned_loss=0.06798, over 8361.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.3009, pruned_loss=0.07102, over 1621741.90 frames. ], batch size: 24, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:26,183 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:17:45,709 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.278e+02 2.828e+02 3.736e+02 6.971e+02, threshold=5.655e+02, percent-clipped=1.0 +2023-02-06 16:17:46,443 INFO [train.py:901] (2/4) Epoch 15, batch 1600, loss[loss=0.2812, simple_loss=0.3373, pruned_loss=0.1125, over 7513.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.3001, pruned_loss=0.07045, over 1619220.95 frames. ], batch size: 72, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:22,446 INFO [train.py:901] (2/4) Epoch 15, batch 1650, loss[loss=0.1745, simple_loss=0.255, pruned_loss=0.047, over 7650.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2982, pruned_loss=0.0698, over 1617162.47 frames. ], batch size: 19, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:55,126 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:18:56,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 2.429e+02 2.845e+02 3.384e+02 6.803e+02, threshold=5.691e+02, percent-clipped=1.0 +2023-02-06 16:18:56,970 INFO [train.py:901] (2/4) Epoch 15, batch 1700, loss[loss=0.2125, simple_loss=0.2775, pruned_loss=0.07374, over 7437.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2979, pruned_loss=0.07, over 1616043.54 frames. ], batch size: 17, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:19:12,811 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:16,114 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:32,921 INFO [train.py:901] (2/4) Epoch 15, batch 1750, loss[loss=0.2141, simple_loss=0.2837, pruned_loss=0.0722, over 7444.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2985, pruned_loss=0.07018, over 1619452.72 frames. ], batch size: 17, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:19:45,254 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7419, 1.7529, 4.8926, 1.9673, 4.3796, 4.0626, 4.4484, 4.3579], + device='cuda:2'), covar=tensor([0.0480, 0.4221, 0.0379, 0.3555, 0.0905, 0.0852, 0.0487, 0.0506], + device='cuda:2'), in_proj_covar=tensor([0.0553, 0.0611, 0.0639, 0.0581, 0.0653, 0.0560, 0.0553, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:20:06,955 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.435e+02 3.025e+02 3.758e+02 7.531e+02, threshold=6.050e+02, percent-clipped=3.0 +2023-02-06 16:20:07,575 INFO [train.py:901] (2/4) Epoch 15, batch 1800, loss[loss=0.2319, simple_loss=0.3076, pruned_loss=0.07811, over 8466.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2968, pruned_loss=0.06925, over 1613108.19 frames. ], batch size: 28, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:43,782 INFO [train.py:901] (2/4) Epoch 15, batch 1850, loss[loss=0.205, simple_loss=0.2945, pruned_loss=0.05773, over 8508.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2984, pruned_loss=0.06998, over 1615640.53 frames. ], batch size: 26, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:48,473 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 16:20:49,523 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0510, 2.4023, 2.6850, 1.3245, 2.8103, 1.5963, 1.5274, 1.9132], + device='cuda:2'), covar=tensor([0.0668, 0.0313, 0.0233, 0.0589, 0.0312, 0.0668, 0.0728, 0.0445], + device='cuda:2'), in_proj_covar=tensor([0.0420, 0.0358, 0.0306, 0.0410, 0.0341, 0.0495, 0.0369, 0.0381], + device='cuda:2'), out_proj_covar=tensor([1.1640e-04, 9.6495e-05, 8.2128e-05, 1.1120e-04, 9.2733e-05, 1.4421e-04, + 1.0221e-04, 1.0411e-04], device='cuda:2') +2023-02-06 16:20:59,009 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7595, 1.6949, 2.2577, 1.6453, 1.2298, 2.2921, 0.4198, 1.3895], + device='cuda:2'), covar=tensor([0.2509, 0.1798, 0.0448, 0.1758, 0.3841, 0.0462, 0.2903, 0.1867], + device='cuda:2'), in_proj_covar=tensor([0.0174, 0.0180, 0.0112, 0.0219, 0.0262, 0.0117, 0.0166, 0.0177], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:21:05,807 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1353, 1.6684, 3.4596, 1.4309, 2.5314, 3.7839, 3.8905, 3.2940], + device='cuda:2'), covar=tensor([0.1017, 0.1650, 0.0346, 0.2147, 0.0988, 0.0212, 0.0472, 0.0531], + device='cuda:2'), in_proj_covar=tensor([0.0276, 0.0303, 0.0267, 0.0296, 0.0282, 0.0244, 0.0371, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 16:21:17,801 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.659e+02 3.189e+02 4.139e+02 1.250e+03, threshold=6.379e+02, percent-clipped=4.0 +2023-02-06 16:21:18,502 INFO [train.py:901] (2/4) Epoch 15, batch 1900, loss[loss=0.2107, simple_loss=0.2919, pruned_loss=0.06475, over 8335.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2982, pruned_loss=0.06957, over 1619264.28 frames. ], batch size: 26, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:28,842 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:46,756 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:50,087 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 16:21:53,561 INFO [train.py:901] (2/4) Epoch 15, batch 1950, loss[loss=0.2637, simple_loss=0.3336, pruned_loss=0.09695, over 8339.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2999, pruned_loss=0.07001, over 1622798.71 frames. ], batch size: 48, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:04,496 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 16:22:23,220 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 16:22:28,420 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.421e+02 3.112e+02 3.916e+02 6.433e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 16:22:29,125 INFO [train.py:901] (2/4) Epoch 15, batch 2000, loss[loss=0.2097, simple_loss=0.2963, pruned_loss=0.06158, over 8348.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3009, pruned_loss=0.07085, over 1622133.75 frames. ], batch size: 24, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:49,747 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:23:03,503 INFO [train.py:901] (2/4) Epoch 15, batch 2050, loss[loss=0.2247, simple_loss=0.2918, pruned_loss=0.07876, over 7262.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2995, pruned_loss=0.07031, over 1618680.10 frames. ], batch size: 16, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:23:18,037 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115233.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:23:39,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.382e+02 2.963e+02 3.753e+02 6.860e+02, threshold=5.925e+02, percent-clipped=2.0 +2023-02-06 16:23:39,504 INFO [train.py:901] (2/4) Epoch 15, batch 2100, loss[loss=0.1684, simple_loss=0.2512, pruned_loss=0.04277, over 7720.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2985, pruned_loss=0.0699, over 1614923.72 frames. ], batch size: 18, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:23:51,102 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9091, 2.6104, 3.4420, 1.9846, 1.7517, 3.5134, 0.5568, 2.0736], + device='cuda:2'), covar=tensor([0.1812, 0.1447, 0.0347, 0.2464, 0.3886, 0.0356, 0.3328, 0.1789], + device='cuda:2'), in_proj_covar=tensor([0.0175, 0.0181, 0.0112, 0.0220, 0.0264, 0.0117, 0.0166, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:24:13,866 INFO [train.py:901] (2/4) Epoch 15, batch 2150, loss[loss=0.2255, simple_loss=0.3159, pruned_loss=0.06757, over 8449.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2997, pruned_loss=0.07098, over 1614006.80 frames. ], batch size: 27, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:37,892 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:24:49,119 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.532e+02 3.093e+02 4.065e+02 1.254e+03, threshold=6.185e+02, percent-clipped=7.0 +2023-02-06 16:24:49,140 INFO [train.py:901] (2/4) Epoch 15, batch 2200, loss[loss=0.1924, simple_loss=0.2786, pruned_loss=0.05311, over 8248.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2988, pruned_loss=0.07012, over 1612349.23 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 16.0 +2023-02-06 16:25:07,044 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:07,718 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:24,252 INFO [train.py:901] (2/4) Epoch 15, batch 2250, loss[loss=0.2105, simple_loss=0.298, pruned_loss=0.06156, over 8649.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2995, pruned_loss=0.0711, over 1611822.57 frames. ], batch size: 27, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:31,073 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2664, 2.7611, 3.1820, 1.7788, 3.3612, 1.9243, 1.4903, 2.1653], + device='cuda:2'), covar=tensor([0.0735, 0.0298, 0.0203, 0.0597, 0.0334, 0.0792, 0.0790, 0.0437], + device='cuda:2'), in_proj_covar=tensor([0.0424, 0.0362, 0.0309, 0.0417, 0.0346, 0.0504, 0.0377, 0.0388], + device='cuda:2'), out_proj_covar=tensor([1.1748e-04, 9.7619e-05, 8.3126e-05, 1.1313e-04, 9.4066e-05, 1.4695e-04, + 1.0433e-04, 1.0588e-04], device='cuda:2') +2023-02-06 16:25:48,106 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:48,941 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:58,282 INFO [train.py:901] (2/4) Epoch 15, batch 2300, loss[loss=0.2421, simple_loss=0.3249, pruned_loss=0.07965, over 8191.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3008, pruned_loss=0.07186, over 1618362.69 frames. ], batch size: 23, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:58,953 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 2.502e+02 3.175e+02 3.927e+02 9.067e+02, threshold=6.350e+02, percent-clipped=5.0 +2023-02-06 16:26:07,458 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5462, 4.4913, 4.1325, 2.1432, 4.0376, 4.1274, 4.1280, 3.9272], + device='cuda:2'), covar=tensor([0.0659, 0.0489, 0.0921, 0.4292, 0.0813, 0.0965, 0.1156, 0.0834], + device='cuda:2'), in_proj_covar=tensor([0.0491, 0.0405, 0.0407, 0.0506, 0.0405, 0.0405, 0.0392, 0.0353], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:26:07,559 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:26:16,645 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9675, 2.3755, 1.9980, 2.9607, 1.5291, 1.5729, 2.1785, 2.4999], + device='cuda:2'), covar=tensor([0.0853, 0.0781, 0.0928, 0.0428, 0.1167, 0.1524, 0.0900, 0.0816], + device='cuda:2'), in_proj_covar=tensor([0.0227, 0.0202, 0.0245, 0.0209, 0.0209, 0.0246, 0.0250, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 16:26:17,334 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9644, 1.5813, 3.2428, 1.3265, 2.2147, 3.4238, 3.6408, 2.9890], + device='cuda:2'), covar=tensor([0.1126, 0.1761, 0.0342, 0.2275, 0.1085, 0.0295, 0.0476, 0.0621], + device='cuda:2'), in_proj_covar=tensor([0.0276, 0.0306, 0.0271, 0.0297, 0.0286, 0.0247, 0.0376, 0.0299], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:26:34,675 INFO [train.py:901] (2/4) Epoch 15, batch 2350, loss[loss=0.2034, simple_loss=0.2845, pruned_loss=0.06113, over 8319.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3005, pruned_loss=0.07149, over 1620567.73 frames. ], batch size: 25, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:02,129 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0594, 1.2221, 1.1586, 0.6679, 1.1894, 0.9929, 0.1219, 1.1862], + device='cuda:2'), covar=tensor([0.0318, 0.0270, 0.0225, 0.0399, 0.0322, 0.0761, 0.0630, 0.0222], + device='cuda:2'), in_proj_covar=tensor([0.0420, 0.0358, 0.0308, 0.0413, 0.0344, 0.0500, 0.0373, 0.0384], + device='cuda:2'), out_proj_covar=tensor([1.1646e-04, 9.6316e-05, 8.2875e-05, 1.1209e-04, 9.3527e-05, 1.4545e-04, + 1.0325e-04, 1.0486e-04], device='cuda:2') +2023-02-06 16:27:03,472 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6999, 1.7787, 1.6025, 1.9040, 1.2977, 1.5104, 1.6944, 1.8889], + device='cuda:2'), covar=tensor([0.0588, 0.0686, 0.0785, 0.0604, 0.0898, 0.0953, 0.0655, 0.0539], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0203, 0.0247, 0.0210, 0.0209, 0.0247, 0.0251, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 16:27:09,319 INFO [train.py:901] (2/4) Epoch 15, batch 2400, loss[loss=0.2616, simple_loss=0.3342, pruned_loss=0.09452, over 8593.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3009, pruned_loss=0.07213, over 1620202.42 frames. ], batch size: 39, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:09,505 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:10,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.542e+02 3.047e+02 3.524e+02 9.073e+02, threshold=6.095e+02, percent-clipped=1.0 +2023-02-06 16:27:32,718 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7326, 2.8023, 2.0442, 2.3399, 2.3293, 1.7563, 2.1882, 2.3614], + device='cuda:2'), covar=tensor([0.1406, 0.0280, 0.0940, 0.0618, 0.0619, 0.1276, 0.0871, 0.0823], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0230, 0.0327, 0.0303, 0.0302, 0.0332, 0.0345, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:27:39,713 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:45,572 INFO [train.py:901] (2/4) Epoch 15, batch 2450, loss[loss=0.1798, simple_loss=0.255, pruned_loss=0.05231, over 7224.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3, pruned_loss=0.0718, over 1616667.18 frames. ], batch size: 16, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:56,565 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:59,314 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4161, 1.9693, 3.1295, 1.2629, 2.2326, 1.8444, 1.5660, 2.2039], + device='cuda:2'), covar=tensor([0.1926, 0.2329, 0.0848, 0.4319, 0.1861, 0.3139, 0.2057, 0.2358], + device='cuda:2'), in_proj_covar=tensor([0.0494, 0.0547, 0.0537, 0.0600, 0.0619, 0.0563, 0.0492, 0.0615], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:28:11,489 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1862, 1.2089, 1.4999, 1.1350, 0.6472, 1.2659, 1.1100, 0.9326], + device='cuda:2'), covar=tensor([0.0552, 0.1337, 0.1670, 0.1465, 0.0598, 0.1560, 0.0732, 0.0717], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0157, 0.0102, 0.0161, 0.0115, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 16:28:19,898 INFO [train.py:901] (2/4) Epoch 15, batch 2500, loss[loss=0.2299, simple_loss=0.2954, pruned_loss=0.08223, over 7926.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3002, pruned_loss=0.07171, over 1617651.89 frames. ], batch size: 20, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:28:20,557 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.367e+02 2.686e+02 3.697e+02 9.165e+02, threshold=5.372e+02, percent-clipped=5.0 +2023-02-06 16:28:25,817 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.28 vs. limit=5.0 +2023-02-06 16:28:48,720 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9556, 1.8947, 2.5016, 1.6491, 1.2847, 2.4690, 0.3211, 1.4651], + device='cuda:2'), covar=tensor([0.2345, 0.1584, 0.0349, 0.1983, 0.3853, 0.0451, 0.3038, 0.1827], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0177, 0.0110, 0.0214, 0.0258, 0.0115, 0.0163, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:28:49,386 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4561, 1.4093, 1.7370, 1.2570, 0.9249, 1.7381, 0.1145, 1.1778], + device='cuda:2'), covar=tensor([0.2324, 0.1677, 0.0567, 0.1572, 0.3961, 0.0505, 0.2967, 0.1578], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0177, 0.0110, 0.0214, 0.0258, 0.0115, 0.0163, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:28:55,234 INFO [train.py:901] (2/4) Epoch 15, batch 2550, loss[loss=0.2138, simple_loss=0.2952, pruned_loss=0.06622, over 8096.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3, pruned_loss=0.07202, over 1617096.88 frames. ], batch size: 23, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:08,940 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115732.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:09,205 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.31 vs. limit=5.0 +2023-02-06 16:29:09,605 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:13,049 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 16:29:30,405 INFO [train.py:901] (2/4) Epoch 15, batch 2600, loss[loss=0.235, simple_loss=0.3104, pruned_loss=0.07979, over 8583.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2983, pruned_loss=0.0712, over 1611312.57 frames. ], batch size: 31, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:31,072 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.427e+02 3.148e+02 3.839e+02 8.607e+02, threshold=6.296e+02, percent-clipped=3.0 +2023-02-06 16:29:49,239 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0653, 2.6935, 3.4797, 1.9790, 1.7813, 3.5944, 0.6180, 2.0394], + device='cuda:2'), covar=tensor([0.1481, 0.1108, 0.0313, 0.2284, 0.3498, 0.0346, 0.2933, 0.1554], + device='cuda:2'), in_proj_covar=tensor([0.0170, 0.0176, 0.0110, 0.0213, 0.0257, 0.0115, 0.0162, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:29:59,661 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 16:30:04,163 INFO [train.py:901] (2/4) Epoch 15, batch 2650, loss[loss=0.2274, simple_loss=0.286, pruned_loss=0.08441, over 7220.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2979, pruned_loss=0.07087, over 1612627.06 frames. ], batch size: 16, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:08,492 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:27,405 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:29,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115847.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:30,055 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:39,731 INFO [train.py:901] (2/4) Epoch 15, batch 2700, loss[loss=0.2115, simple_loss=0.2888, pruned_loss=0.06717, over 7509.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2969, pruned_loss=0.07022, over 1611842.10 frames. ], batch size: 18, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:40,393 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.337e+02 2.718e+02 3.606e+02 6.832e+02, threshold=5.436e+02, percent-clipped=3.0 +2023-02-06 16:30:45,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1054, 1.2478, 4.3061, 1.6783, 3.7909, 3.5928, 3.8475, 3.7007], + device='cuda:2'), covar=tensor([0.0586, 0.4667, 0.0557, 0.3619, 0.1153, 0.0978, 0.0585, 0.0714], + device='cuda:2'), in_proj_covar=tensor([0.0557, 0.0613, 0.0639, 0.0583, 0.0656, 0.0564, 0.0557, 0.0620], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:30:50,043 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0952, 2.5657, 2.9075, 1.3813, 2.9812, 1.7099, 1.4732, 2.0661], + device='cuda:2'), covar=tensor([0.0775, 0.0354, 0.0210, 0.0676, 0.0362, 0.0853, 0.0868, 0.0505], + device='cuda:2'), in_proj_covar=tensor([0.0429, 0.0366, 0.0314, 0.0422, 0.0353, 0.0510, 0.0379, 0.0392], + device='cuda:2'), out_proj_covar=tensor([1.1883e-04, 9.8599e-05, 8.4276e-05, 1.1444e-04, 9.6013e-05, 1.4856e-04, + 1.0471e-04, 1.0695e-04], device='cuda:2') +2023-02-06 16:31:12,009 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115910.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:31:13,863 INFO [train.py:901] (2/4) Epoch 15, batch 2750, loss[loss=0.2327, simple_loss=0.3185, pruned_loss=0.07339, over 8494.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2974, pruned_loss=0.07043, over 1608296.50 frames. ], batch size: 26, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:19,885 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 16:31:29,674 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 16:31:49,502 INFO [train.py:901] (2/4) Epoch 15, batch 2800, loss[loss=0.244, simple_loss=0.323, pruned_loss=0.08255, over 8356.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2974, pruned_loss=0.07048, over 1608385.47 frames. ], batch size: 24, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:50,150 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.517e+02 2.986e+02 3.677e+02 9.071e+02, threshold=5.972e+02, percent-clipped=5.0 +2023-02-06 16:32:24,937 INFO [train.py:901] (2/4) Epoch 15, batch 2850, loss[loss=0.2243, simple_loss=0.3014, pruned_loss=0.07356, over 8462.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2976, pruned_loss=0.07065, over 1607901.48 frames. ], batch size: 27, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:32:38,094 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:00,834 INFO [train.py:901] (2/4) Epoch 15, batch 2900, loss[loss=0.1786, simple_loss=0.2587, pruned_loss=0.0493, over 8232.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2982, pruned_loss=0.0706, over 1610845.03 frames. ], batch size: 22, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:01,416 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.452e+02 2.959e+02 3.782e+02 6.842e+02, threshold=5.917e+02, percent-clipped=3.0 +2023-02-06 16:33:04,305 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6243, 2.7019, 1.9506, 2.2864, 2.2439, 1.4666, 2.1214, 2.1751], + device='cuda:2'), covar=tensor([0.1630, 0.0386, 0.1122, 0.0660, 0.0744, 0.1550, 0.1018, 0.1011], + device='cuda:2'), in_proj_covar=tensor([0.0345, 0.0228, 0.0321, 0.0299, 0.0297, 0.0326, 0.0340, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:33:29,119 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:29,807 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:35,327 INFO [train.py:901] (2/4) Epoch 15, batch 2950, loss[loss=0.2243, simple_loss=0.3077, pruned_loss=0.0705, over 8494.00 frames. ], tot_loss[loss=0.221, simple_loss=0.3, pruned_loss=0.071, over 1619342.89 frames. ], batch size: 26, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:36,701 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 16:33:42,119 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:45,602 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:46,299 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:49,703 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:58,563 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9343, 2.6313, 3.7399, 1.6126, 1.6207, 3.7123, 0.5371, 1.8980], + device='cuda:2'), covar=tensor([0.2254, 0.1264, 0.0233, 0.2971, 0.3847, 0.0293, 0.3282, 0.2076], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0178, 0.0111, 0.0216, 0.0260, 0.0116, 0.0164, 0.0176], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:34:08,976 INFO [train.py:901] (2/4) Epoch 15, batch 3000, loss[loss=0.2164, simple_loss=0.2855, pruned_loss=0.07359, over 7229.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2989, pruned_loss=0.07031, over 1614405.13 frames. ], batch size: 16, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:34:08,977 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 16:34:21,683 INFO [train.py:935] (2/4) Epoch 15, validation: loss=0.1808, simple_loss=0.2809, pruned_loss=0.04034, over 944034.00 frames. +2023-02-06 16:34:21,684 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 16:34:22,357 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.534e+02 3.127e+02 3.845e+02 7.463e+02, threshold=6.253e+02, percent-clipped=8.0 +2023-02-06 16:34:55,002 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 16:34:57,897 INFO [train.py:901] (2/4) Epoch 15, batch 3050, loss[loss=0.1865, simple_loss=0.2766, pruned_loss=0.04818, over 8346.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2977, pruned_loss=0.0696, over 1615874.79 frames. ], batch size: 26, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:35:07,481 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7584, 2.1170, 3.4650, 1.4680, 2.4786, 2.1815, 1.7693, 2.5118], + device='cuda:2'), covar=tensor([0.1587, 0.2145, 0.0572, 0.3907, 0.1597, 0.2672, 0.1792, 0.2050], + device='cuda:2'), in_proj_covar=tensor([0.0497, 0.0553, 0.0541, 0.0600, 0.0624, 0.0567, 0.0495, 0.0619], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:35:26,163 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:35:31,947 INFO [train.py:901] (2/4) Epoch 15, batch 3100, loss[loss=0.2199, simple_loss=0.2898, pruned_loss=0.07498, over 7804.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2984, pruned_loss=0.07001, over 1615695.73 frames. ], batch size: 19, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:35:32,570 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.573e+02 3.095e+02 3.865e+02 1.142e+03, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 16:35:40,327 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 16:35:55,812 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 16:36:06,923 INFO [train.py:901] (2/4) Epoch 15, batch 3150, loss[loss=0.2471, simple_loss=0.3189, pruned_loss=0.0876, over 8240.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2989, pruned_loss=0.07069, over 1611805.96 frames. ], batch size: 22, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:20,579 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8055, 1.6172, 1.9956, 1.7077, 1.7737, 1.8710, 1.6107, 0.8030], + device='cuda:2'), covar=tensor([0.4552, 0.3694, 0.1389, 0.2705, 0.2084, 0.2395, 0.1770, 0.3909], + device='cuda:2'), in_proj_covar=tensor([0.0904, 0.0913, 0.0747, 0.0884, 0.0945, 0.0840, 0.0719, 0.0791], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:36:27,181 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:41,970 INFO [train.py:901] (2/4) Epoch 15, batch 3200, loss[loss=0.26, simple_loss=0.3386, pruned_loss=0.09073, over 8311.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3002, pruned_loss=0.0716, over 1615095.54 frames. ], batch size: 25, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:42,889 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7477, 1.6192, 2.1287, 1.4559, 1.2206, 2.0798, 0.2022, 1.2463], + device='cuda:2'), covar=tensor([0.1857, 0.1640, 0.0361, 0.1497, 0.3399, 0.0444, 0.2729, 0.1553], + device='cuda:2'), in_proj_covar=tensor([0.0171, 0.0176, 0.0109, 0.0213, 0.0256, 0.0114, 0.0161, 0.0174], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 16:36:43,343 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.524e+02 3.304e+02 3.942e+02 1.206e+03, threshold=6.608e+02, percent-clipped=2.0 +2023-02-06 16:36:46,745 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116369.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:36:51,223 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:16,502 INFO [train.py:901] (2/4) Epoch 15, batch 3250, loss[loss=0.1855, simple_loss=0.2589, pruned_loss=0.05611, over 7791.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3, pruned_loss=0.07175, over 1612645.89 frames. ], batch size: 19, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:17,465 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4001, 1.6748, 1.6886, 0.9288, 1.7198, 1.2746, 0.2797, 1.6313], + device='cuda:2'), covar=tensor([0.0360, 0.0257, 0.0230, 0.0386, 0.0281, 0.0748, 0.0634, 0.0203], + device='cuda:2'), in_proj_covar=tensor([0.0422, 0.0361, 0.0311, 0.0416, 0.0347, 0.0504, 0.0373, 0.0384], + device='cuda:2'), out_proj_covar=tensor([1.1662e-04, 9.7238e-05, 8.3609e-05, 1.1273e-04, 9.4500e-05, 1.4688e-04, + 1.0307e-04, 1.0473e-04], device='cuda:2') +2023-02-06 16:37:52,495 INFO [train.py:901] (2/4) Epoch 15, batch 3300, loss[loss=0.2168, simple_loss=0.2979, pruned_loss=0.06785, over 8290.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2997, pruned_loss=0.07144, over 1612408.76 frames. ], batch size: 48, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:53,156 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.388e+02 2.875e+02 3.716e+02 9.209e+02, threshold=5.750e+02, percent-clipped=3.0 +2023-02-06 16:37:53,298 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:55,235 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:02,541 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:03,353 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7047, 1.9014, 1.5714, 2.3127, 0.9651, 1.4450, 1.6489, 1.8708], + device='cuda:2'), covar=tensor([0.0815, 0.0822, 0.1088, 0.0462, 0.1188, 0.1475, 0.0894, 0.0763], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0203, 0.0250, 0.0212, 0.0211, 0.0249, 0.0257, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 16:38:08,602 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1373, 2.1798, 4.3188, 2.5416, 3.8983, 3.6805, 3.9815, 3.8858], + device='cuda:2'), covar=tensor([0.0642, 0.3550, 0.0691, 0.3231, 0.0877, 0.0863, 0.0566, 0.0553], + device='cuda:2'), in_proj_covar=tensor([0.0554, 0.0608, 0.0633, 0.0577, 0.0654, 0.0560, 0.0551, 0.0609], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 16:38:12,020 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:26,446 INFO [train.py:901] (2/4) Epoch 15, batch 3350, loss[loss=0.2285, simple_loss=0.3028, pruned_loss=0.07709, over 7689.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3007, pruned_loss=0.07189, over 1613292.06 frames. ], batch size: 18, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:38:33,230 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:00,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7631, 2.1053, 1.5503, 2.5808, 1.1125, 1.4215, 1.7153, 1.9841], + device='cuda:2'), covar=tensor([0.0777, 0.0760, 0.1091, 0.0428, 0.1172, 0.1422, 0.0987, 0.0812], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0200, 0.0248, 0.0210, 0.0209, 0.0246, 0.0253, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 16:39:02,059 INFO [train.py:901] (2/4) Epoch 15, batch 3400, loss[loss=0.2823, simple_loss=0.3312, pruned_loss=0.1167, over 6855.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2999, pruned_loss=0.0716, over 1610486.10 frames. ], batch size: 71, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:02,726 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.566e+02 3.149e+02 4.104e+02 8.501e+02, threshold=6.298e+02, percent-clipped=7.0 +2023-02-06 16:39:14,871 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:22,204 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:36,209 INFO [train.py:901] (2/4) Epoch 15, batch 3450, loss[loss=0.2626, simple_loss=0.315, pruned_loss=0.1051, over 8082.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2996, pruned_loss=0.07163, over 1611742.53 frames. ], batch size: 21, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:44,405 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116625.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:39:51,716 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:01,116 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116650.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:40:10,185 INFO [train.py:901] (2/4) Epoch 15, batch 3500, loss[loss=0.1881, simple_loss=0.2721, pruned_loss=0.052, over 7963.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3004, pruned_loss=0.07139, over 1618215.99 frames. ], batch size: 21, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:40:10,857 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.398e+02 2.936e+02 3.935e+02 9.560e+02, threshold=5.871e+02, percent-clipped=3.0 +2023-02-06 16:40:26,417 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116685.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:35,611 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 16:40:44,884 INFO [train.py:901] (2/4) Epoch 15, batch 3550, loss[loss=0.1869, simple_loss=0.257, pruned_loss=0.05843, over 7232.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.3, pruned_loss=0.07077, over 1620585.83 frames. ], batch size: 16, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:40:50,943 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:08,579 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:19,405 INFO [train.py:901] (2/4) Epoch 15, batch 3600, loss[loss=0.2208, simple_loss=0.3021, pruned_loss=0.06975, over 8586.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2999, pruned_loss=0.07083, over 1622823.98 frames. ], batch size: 31, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:41:20,115 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.627e+02 3.005e+02 3.918e+02 8.490e+02, threshold=6.010e+02, percent-clipped=4.0 +2023-02-06 16:41:25,802 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:47,427 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:52,897 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:56,175 INFO [train.py:901] (2/4) Epoch 15, batch 3650, loss[loss=0.252, simple_loss=0.3329, pruned_loss=0.08557, over 8139.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2991, pruned_loss=0.07048, over 1617756.01 frames. ], batch size: 22, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:00,908 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:13,597 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:21,037 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,275 INFO [train.py:901] (2/4) Epoch 15, batch 3700, loss[loss=0.246, simple_loss=0.2997, pruned_loss=0.09613, over 7525.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2988, pruned_loss=0.07081, over 1614282.19 frames. ], batch size: 18, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:30,504 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,956 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.301e+02 2.797e+02 3.414e+02 8.630e+02, threshold=5.595e+02, percent-clipped=3.0 +2023-02-06 16:42:33,143 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:36,563 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 16:42:38,088 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:47,930 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 16:43:06,644 INFO [train.py:901] (2/4) Epoch 15, batch 3750, loss[loss=0.2477, simple_loss=0.3288, pruned_loss=0.08331, over 8469.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2981, pruned_loss=0.07032, over 1609509.02 frames. ], batch size: 29, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:13,670 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:23,720 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8412, 2.4804, 4.4279, 1.6292, 3.1103, 2.2792, 2.0364, 2.5191], + device='cuda:2'), covar=tensor([0.1638, 0.2013, 0.0636, 0.3762, 0.1469, 0.2698, 0.1648, 0.2499], + device='cuda:2'), in_proj_covar=tensor([0.0499, 0.0553, 0.0542, 0.0602, 0.0624, 0.0565, 0.0497, 0.0622], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:43:25,968 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 16:43:40,810 INFO [train.py:901] (2/4) Epoch 15, batch 3800, loss[loss=0.2435, simple_loss=0.3188, pruned_loss=0.08407, over 7229.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2993, pruned_loss=0.07128, over 1606366.98 frames. ], batch size: 16, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:41,462 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.512e+02 2.989e+02 3.697e+02 7.171e+02, threshold=5.977e+02, percent-clipped=7.0 +2023-02-06 16:43:45,791 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-06 16:43:52,420 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116980.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:53,917 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:15,607 INFO [train.py:901] (2/4) Epoch 15, batch 3850, loss[loss=0.2375, simple_loss=0.3061, pruned_loss=0.08448, over 8671.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2991, pruned_loss=0.07108, over 1611306.49 frames. ], batch size: 34, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:23,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4689, 1.7824, 3.0316, 1.2486, 2.0932, 1.9098, 1.5166, 2.1193], + device='cuda:2'), covar=tensor([0.1891, 0.2526, 0.0783, 0.4270, 0.1802, 0.3007, 0.2146, 0.2225], + device='cuda:2'), in_proj_covar=tensor([0.0500, 0.0554, 0.0540, 0.0602, 0.0625, 0.0565, 0.0496, 0.0622], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:44:42,555 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 16:44:46,205 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:50,964 INFO [train.py:901] (2/4) Epoch 15, batch 3900, loss[loss=0.244, simple_loss=0.3108, pruned_loss=0.08864, over 6958.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2995, pruned_loss=0.07088, over 1613538.16 frames. ], batch size: 71, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:51,618 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 2.428e+02 3.027e+02 3.797e+02 6.654e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 16:44:53,042 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:03,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:12,951 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:24,921 INFO [train.py:901] (2/4) Epoch 15, batch 3950, loss[loss=0.2182, simple_loss=0.3053, pruned_loss=0.06551, over 8507.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3004, pruned_loss=0.0715, over 1612795.54 frames. ], batch size: 28, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:46:01,086 INFO [train.py:901] (2/4) Epoch 15, batch 4000, loss[loss=0.203, simple_loss=0.2899, pruned_loss=0.05807, over 8475.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2998, pruned_loss=0.0713, over 1610355.29 frames. ], batch size: 27, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:01,785 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.473e+02 2.992e+02 3.534e+02 5.115e+02, threshold=5.984e+02, percent-clipped=0.0 +2023-02-06 16:46:01,905 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117164.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:12,553 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:13,874 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:29,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:35,577 INFO [train.py:901] (2/4) Epoch 15, batch 4050, loss[loss=0.2316, simple_loss=0.3061, pruned_loss=0.0785, over 8327.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2994, pruned_loss=0.07185, over 1609218.05 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:53,306 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:53,910 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:11,627 INFO [train.py:901] (2/4) Epoch 15, batch 4100, loss[loss=0.2512, simple_loss=0.3255, pruned_loss=0.08842, over 8321.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07185, over 1610113.23 frames. ], batch size: 25, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:47:11,816 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:12,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.506e+02 3.096e+02 3.742e+02 9.544e+02, threshold=6.191e+02, percent-clipped=4.0 +2023-02-06 16:47:20,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4230, 1.6493, 1.6582, 1.1822, 1.7437, 1.3097, 0.2351, 1.6213], + device='cuda:2'), covar=tensor([0.0330, 0.0275, 0.0220, 0.0297, 0.0282, 0.0681, 0.0644, 0.0181], + device='cuda:2'), in_proj_covar=tensor([0.0424, 0.0366, 0.0314, 0.0422, 0.0349, 0.0510, 0.0377, 0.0387], + device='cuda:2'), out_proj_covar=tensor([1.1707e-04, 9.8413e-05, 8.4152e-05, 1.1460e-04, 9.4907e-05, 1.4848e-04, + 1.0405e-04, 1.0521e-04], device='cuda:2') +2023-02-06 16:47:22,938 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:46,643 INFO [train.py:901] (2/4) Epoch 15, batch 4150, loss[loss=0.1883, simple_loss=0.2766, pruned_loss=0.04998, over 8287.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2994, pruned_loss=0.07135, over 1613154.67 frames. ], batch size: 23, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:47:51,112 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-06 16:48:10,097 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:12,980 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:21,502 INFO [train.py:901] (2/4) Epoch 15, batch 4200, loss[loss=0.2289, simple_loss=0.3118, pruned_loss=0.07298, over 8185.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2995, pruned_loss=0.07108, over 1612060.58 frames. ], batch size: 23, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:22,820 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.404e+02 2.907e+02 3.383e+02 1.073e+03, threshold=5.814e+02, percent-clipped=1.0 +2023-02-06 16:48:31,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:40,572 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 16:48:41,600 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.47 vs. limit=2.0 +2023-02-06 16:48:45,700 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.38 vs. limit=5.0 +2023-02-06 16:48:57,040 INFO [train.py:901] (2/4) Epoch 15, batch 4250, loss[loss=0.233, simple_loss=0.3039, pruned_loss=0.08111, over 8456.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2992, pruned_loss=0.07104, over 1610264.14 frames. ], batch size: 27, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:03,723 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 16:49:14,101 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:30,903 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:31,426 INFO [train.py:901] (2/4) Epoch 15, batch 4300, loss[loss=0.1806, simple_loss=0.2589, pruned_loss=0.05117, over 7819.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2994, pruned_loss=0.07119, over 1611711.17 frames. ], batch size: 20, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:32,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.479e+02 3.115e+02 3.892e+02 7.815e+02, threshold=6.229e+02, percent-clipped=5.0 +2023-02-06 16:49:37,253 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 16:50:07,598 INFO [train.py:901] (2/4) Epoch 15, batch 4350, loss[loss=0.2546, simple_loss=0.3374, pruned_loss=0.08596, over 8433.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2978, pruned_loss=0.07047, over 1608217.98 frames. ], batch size: 27, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:23,039 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117535.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:25,369 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.72 vs. limit=2.0 +2023-02-06 16:50:36,325 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 16:50:40,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:42,367 INFO [train.py:901] (2/4) Epoch 15, batch 4400, loss[loss=0.1705, simple_loss=0.2441, pruned_loss=0.04839, over 7433.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2973, pruned_loss=0.07022, over 1604965.42 frames. ], batch size: 17, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:43,039 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.383e+02 3.124e+02 3.901e+02 9.506e+02, threshold=6.248e+02, percent-clipped=7.0 +2023-02-06 16:50:55,790 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:55,919 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1873, 1.7042, 1.4700, 1.5628, 1.4896, 1.3333, 1.3585, 1.3757], + device='cuda:2'), covar=tensor([0.0997, 0.0450, 0.1096, 0.0543, 0.0674, 0.1211, 0.0818, 0.0692], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0232, 0.0327, 0.0303, 0.0302, 0.0332, 0.0347, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:51:17,967 INFO [train.py:901] (2/4) Epoch 15, batch 4450, loss[loss=0.2145, simple_loss=0.2866, pruned_loss=0.07125, over 7705.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2984, pruned_loss=0.07063, over 1610574.17 frames. ], batch size: 18, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:17,985 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 16:51:38,328 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.49 vs. limit=5.0 +2023-02-06 16:51:52,101 INFO [train.py:901] (2/4) Epoch 15, batch 4500, loss[loss=0.2393, simple_loss=0.3069, pruned_loss=0.08578, over 8079.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2981, pruned_loss=0.07052, over 1611786.58 frames. ], batch size: 21, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:52,740 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.480e+02 2.963e+02 4.043e+02 1.091e+03, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 16:52:11,215 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:11,859 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 16:52:16,205 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:27,010 INFO [train.py:901] (2/4) Epoch 15, batch 4550, loss[loss=0.1927, simple_loss=0.2694, pruned_loss=0.05804, over 7801.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2975, pruned_loss=0.07032, over 1612768.49 frames. ], batch size: 20, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:02,114 INFO [train.py:901] (2/4) Epoch 15, batch 4600, loss[loss=0.2369, simple_loss=0.3118, pruned_loss=0.08101, over 8460.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2975, pruned_loss=0.07009, over 1610241.85 frames. ], batch size: 27, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:03,483 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.311e+02 2.848e+02 3.671e+02 5.923e+02, threshold=5.697e+02, percent-clipped=0.0 +2023-02-06 16:53:27,493 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7684, 4.6815, 4.3113, 3.0128, 4.2149, 4.3004, 4.4117, 4.0565], + device='cuda:2'), covar=tensor([0.0633, 0.0516, 0.1021, 0.3371, 0.0763, 0.0941, 0.1265, 0.0726], + device='cuda:2'), in_proj_covar=tensor([0.0486, 0.0403, 0.0407, 0.0505, 0.0398, 0.0407, 0.0389, 0.0350], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:53:31,670 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:53:35,833 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.63 vs. limit=5.0 +2023-02-06 16:53:36,051 INFO [train.py:901] (2/4) Epoch 15, batch 4650, loss[loss=0.2209, simple_loss=0.3014, pruned_loss=0.07015, over 8281.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2979, pruned_loss=0.07087, over 1611500.50 frames. ], batch size: 23, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:44,243 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7048, 1.3314, 3.4019, 1.5457, 2.3526, 3.7921, 3.8953, 3.2091], + device='cuda:2'), covar=tensor([0.1304, 0.1888, 0.0359, 0.2179, 0.1079, 0.0240, 0.0510, 0.0632], + device='cuda:2'), in_proj_covar=tensor([0.0277, 0.0306, 0.0270, 0.0300, 0.0288, 0.0246, 0.0376, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:53:57,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2284, 1.2387, 1.5604, 1.1950, 0.7326, 1.2958, 1.2235, 1.1282], + device='cuda:2'), covar=tensor([0.0551, 0.1310, 0.1625, 0.1421, 0.0578, 0.1487, 0.0660, 0.0646], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0156, 0.0101, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 16:54:11,638 INFO [train.py:901] (2/4) Epoch 15, batch 4700, loss[loss=0.1971, simple_loss=0.2733, pruned_loss=0.06049, over 7814.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2986, pruned_loss=0.0712, over 1612819.09 frames. ], batch size: 20, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:54:12,892 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.509e+02 3.109e+02 4.231e+02 8.316e+02, threshold=6.217e+02, percent-clipped=12.0 +2023-02-06 16:54:46,541 INFO [train.py:901] (2/4) Epoch 15, batch 4750, loss[loss=0.1954, simple_loss=0.2701, pruned_loss=0.06029, over 7655.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2995, pruned_loss=0.07217, over 1613568.48 frames. ], batch size: 19, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:11,979 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 16:55:15,276 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 16:55:16,043 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:22,504 INFO [train.py:901] (2/4) Epoch 15, batch 4800, loss[loss=0.2428, simple_loss=0.3298, pruned_loss=0.07787, over 8617.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2985, pruned_loss=0.07166, over 1610109.35 frames. ], batch size: 31, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:23,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.482e+02 3.121e+02 4.555e+02 1.692e+03, threshold=6.242e+02, percent-clipped=8.0 +2023-02-06 16:55:33,838 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:38,254 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7038, 4.6804, 4.2105, 2.0450, 4.2153, 4.2122, 4.3159, 4.0541], + device='cuda:2'), covar=tensor([0.0767, 0.0551, 0.1080, 0.4399, 0.0654, 0.0947, 0.1185, 0.0732], + device='cuda:2'), in_proj_covar=tensor([0.0482, 0.0400, 0.0405, 0.0499, 0.0396, 0.0403, 0.0384, 0.0348], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 16:55:38,472 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 16:55:57,733 INFO [train.py:901] (2/4) Epoch 15, batch 4850, loss[loss=0.2092, simple_loss=0.2763, pruned_loss=0.07106, over 8079.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2986, pruned_loss=0.07196, over 1607249.34 frames. ], batch size: 21, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:07,039 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 16:56:29,211 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:31,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:32,449 INFO [train.py:901] (2/4) Epoch 15, batch 4900, loss[loss=0.2021, simple_loss=0.2718, pruned_loss=0.06622, over 7655.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2972, pruned_loss=0.07078, over 1609531.92 frames. ], batch size: 19, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:33,728 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.453e+02 2.951e+02 3.688e+02 9.605e+02, threshold=5.903e+02, percent-clipped=5.0 +2023-02-06 16:56:50,257 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:57:07,602 INFO [train.py:901] (2/4) Epoch 15, batch 4950, loss[loss=0.2099, simple_loss=0.289, pruned_loss=0.06547, over 8452.00 frames. ], tot_loss[loss=0.219, simple_loss=0.297, pruned_loss=0.07051, over 1609984.61 frames. ], batch size: 27, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:29,397 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8894, 1.6793, 3.3849, 1.5373, 2.2517, 3.7341, 3.7505, 3.2017], + device='cuda:2'), covar=tensor([0.1154, 0.1545, 0.0356, 0.1986, 0.1094, 0.0229, 0.0518, 0.0552], + device='cuda:2'), in_proj_covar=tensor([0.0274, 0.0303, 0.0268, 0.0299, 0.0286, 0.0245, 0.0373, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 16:57:42,120 INFO [train.py:901] (2/4) Epoch 15, batch 5000, loss[loss=0.2047, simple_loss=0.2726, pruned_loss=0.06845, over 7653.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2983, pruned_loss=0.07116, over 1615691.53 frames. ], batch size: 19, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:43,379 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.421e+02 2.910e+02 3.813e+02 6.624e+02, threshold=5.820e+02, percent-clipped=4.0 +2023-02-06 16:57:58,599 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:58:17,604 INFO [train.py:901] (2/4) Epoch 15, batch 5050, loss[loss=0.2189, simple_loss=0.3013, pruned_loss=0.06826, over 8667.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2977, pruned_loss=0.07078, over 1613922.35 frames. ], batch size: 39, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:43,428 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 16:58:46,024 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 16:58:46,883 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-06 16:58:52,546 INFO [train.py:901] (2/4) Epoch 15, batch 5100, loss[loss=0.2273, simple_loss=0.2867, pruned_loss=0.08397, over 7793.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2981, pruned_loss=0.07113, over 1614292.88 frames. ], batch size: 19, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:53,819 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.591e+02 3.125e+02 3.877e+02 7.785e+02, threshold=6.249e+02, percent-clipped=4.0 +2023-02-06 16:59:09,115 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:23,816 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:27,781 INFO [train.py:901] (2/4) Epoch 15, batch 5150, loss[loss=0.2291, simple_loss=0.2979, pruned_loss=0.08013, over 8480.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2975, pruned_loss=0.07078, over 1614974.87 frames. ], batch size: 29, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:59:51,101 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:02,425 INFO [train.py:901] (2/4) Epoch 15, batch 5200, loss[loss=0.1969, simple_loss=0.2842, pruned_loss=0.05487, over 8472.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2993, pruned_loss=0.07185, over 1617200.87 frames. ], batch size: 27, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:03,700 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.269e+02 2.811e+02 3.673e+02 9.088e+02, threshold=5.623e+02, percent-clipped=2.0 +2023-02-06 17:00:10,163 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4385, 1.5556, 2.1734, 1.3297, 1.5372, 1.7023, 1.4962, 1.4759], + device='cuda:2'), covar=tensor([0.1795, 0.2339, 0.0811, 0.3853, 0.1746, 0.3066, 0.2041, 0.1984], + device='cuda:2'), in_proj_covar=tensor([0.0497, 0.0552, 0.0535, 0.0604, 0.0625, 0.0564, 0.0494, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:00:29,678 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:37,913 INFO [train.py:901] (2/4) Epoch 15, batch 5250, loss[loss=0.2118, simple_loss=0.2913, pruned_loss=0.06612, over 8469.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2999, pruned_loss=0.07184, over 1612837.62 frames. ], batch size: 29, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:46,145 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 17:01:12,973 INFO [train.py:901] (2/4) Epoch 15, batch 5300, loss[loss=0.1981, simple_loss=0.2897, pruned_loss=0.05324, over 8295.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2998, pruned_loss=0.07134, over 1606779.21 frames. ], batch size: 23, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:14,342 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.534e+02 2.995e+02 3.765e+02 8.916e+02, threshold=5.991e+02, percent-clipped=4.0 +2023-02-06 17:01:47,930 INFO [train.py:901] (2/4) Epoch 15, batch 5350, loss[loss=0.2099, simple_loss=0.2898, pruned_loss=0.06499, over 8607.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3001, pruned_loss=0.07183, over 1606158.57 frames. ], batch size: 39, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:50,885 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:01,053 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:11,013 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 17:02:24,467 INFO [train.py:901] (2/4) Epoch 15, batch 5400, loss[loss=0.2067, simple_loss=0.2836, pruned_loss=0.06491, over 7454.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07174, over 1608526.77 frames. ], batch size: 17, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:02:25,794 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.478e+02 2.903e+02 3.717e+02 8.291e+02, threshold=5.806e+02, percent-clipped=5.0 +2023-02-06 17:02:27,340 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1893, 1.3725, 1.2647, 1.8401, 0.7582, 1.0805, 1.3528, 1.4576], + device='cuda:2'), covar=tensor([0.1096, 0.0891, 0.1270, 0.0547, 0.1178, 0.1699, 0.0785, 0.0770], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0204, 0.0249, 0.0212, 0.0211, 0.0246, 0.0253, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:02:58,969 INFO [train.py:901] (2/4) Epoch 15, batch 5450, loss[loss=0.2045, simple_loss=0.2906, pruned_loss=0.05922, over 8469.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3002, pruned_loss=0.07181, over 1607960.10 frames. ], batch size: 25, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:11,221 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118631.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:21,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:24,941 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:26,157 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:31,677 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3057, 1.3992, 1.3313, 1.7680, 0.7443, 1.1465, 1.2014, 1.3948], + device='cuda:2'), covar=tensor([0.0940, 0.0895, 0.1102, 0.0570, 0.1220, 0.1590, 0.0956, 0.0864], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0204, 0.0250, 0.0213, 0.0213, 0.0248, 0.0255, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:03:34,862 INFO [train.py:901] (2/4) Epoch 15, batch 5500, loss[loss=0.2852, simple_loss=0.3416, pruned_loss=0.1144, over 8604.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2992, pruned_loss=0.07117, over 1611341.71 frames. ], batch size: 31, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:36,244 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.592e+02 3.113e+02 3.610e+02 8.755e+02, threshold=6.227e+02, percent-clipped=2.0 +2023-02-06 17:03:38,406 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 17:03:54,400 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:09,096 INFO [train.py:901] (2/4) Epoch 15, batch 5550, loss[loss=0.217, simple_loss=0.3068, pruned_loss=0.0636, over 8468.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2992, pruned_loss=0.07133, over 1612328.01 frames. ], batch size: 25, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:32,307 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:37,106 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3176, 1.3081, 4.5071, 1.6719, 3.9366, 3.7373, 4.0669, 3.9300], + device='cuda:2'), covar=tensor([0.0674, 0.4981, 0.0525, 0.3841, 0.1193, 0.1028, 0.0567, 0.0751], + device='cuda:2'), in_proj_covar=tensor([0.0551, 0.0599, 0.0632, 0.0572, 0.0649, 0.0552, 0.0547, 0.0610], + device='cuda:2'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:04:44,988 INFO [train.py:901] (2/4) Epoch 15, batch 5600, loss[loss=0.2283, simple_loss=0.3071, pruned_loss=0.07477, over 8289.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2999, pruned_loss=0.07145, over 1612811.24 frames. ], batch size: 23, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:46,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.537e+02 3.218e+02 3.925e+02 9.216e+02, threshold=6.435e+02, percent-clipped=4.0 +2023-02-06 17:04:47,204 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:52,579 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:09,076 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:14,505 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:19,155 INFO [train.py:901] (2/4) Epoch 15, batch 5650, loss[loss=0.1938, simple_loss=0.2869, pruned_loss=0.05033, over 8292.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2991, pruned_loss=0.07095, over 1611454.96 frames. ], batch size: 23, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:43,436 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 17:05:53,489 INFO [train.py:901] (2/4) Epoch 15, batch 5700, loss[loss=0.1851, simple_loss=0.2723, pruned_loss=0.04896, over 7530.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2981, pruned_loss=0.07042, over 1610590.10 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:54,817 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.491e+02 2.972e+02 3.726e+02 7.690e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 17:05:56,526 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 17:06:21,155 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:28,617 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5069, 2.8938, 2.3523, 4.0248, 1.5136, 2.0855, 2.1837, 3.1358], + device='cuda:2'), covar=tensor([0.0667, 0.0805, 0.0886, 0.0248, 0.1246, 0.1283, 0.1144, 0.0738], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0202, 0.0249, 0.0212, 0.0212, 0.0246, 0.0254, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:06:29,109 INFO [train.py:901] (2/4) Epoch 15, batch 5750, loss[loss=0.1846, simple_loss=0.2673, pruned_loss=0.05093, over 8254.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2984, pruned_loss=0.07097, over 1606867.48 frames. ], batch size: 22, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:06:31,384 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0692, 2.2549, 1.8169, 2.8166, 1.1684, 1.5565, 1.8653, 2.2951], + device='cuda:2'), covar=tensor([0.0696, 0.0802, 0.0973, 0.0366, 0.1217, 0.1350, 0.1010, 0.0690], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0203, 0.0249, 0.0212, 0.0212, 0.0246, 0.0254, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:06:38,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:46,382 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 17:06:54,645 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8226, 5.9244, 5.1983, 2.2512, 5.2908, 5.6956, 5.4865, 5.3252], + device='cuda:2'), covar=tensor([0.0696, 0.0497, 0.1175, 0.4682, 0.0737, 0.0824, 0.1110, 0.0636], + device='cuda:2'), in_proj_covar=tensor([0.0486, 0.0400, 0.0407, 0.0502, 0.0395, 0.0408, 0.0387, 0.0353], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:07:03,693 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2910, 1.2830, 1.6356, 1.2249, 0.7108, 1.3332, 1.2530, 1.2425], + device='cuda:2'), covar=tensor([0.0534, 0.1278, 0.1560, 0.1399, 0.0563, 0.1493, 0.0676, 0.0644], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0156, 0.0102, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 17:07:04,189 INFO [train.py:901] (2/4) Epoch 15, batch 5800, loss[loss=0.205, simple_loss=0.2771, pruned_loss=0.06643, over 7445.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2993, pruned_loss=0.07132, over 1609465.31 frames. ], batch size: 17, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:07:05,529 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.317e+02 2.944e+02 4.100e+02 6.996e+02, threshold=5.887e+02, percent-clipped=4.0 +2023-02-06 17:07:26,178 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:32,110 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:39,875 INFO [train.py:901] (2/4) Epoch 15, batch 5850, loss[loss=0.1919, simple_loss=0.2828, pruned_loss=0.05048, over 8029.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2996, pruned_loss=0.07102, over 1612946.28 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:07:46,175 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:49,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:02,803 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:13,676 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:14,145 INFO [train.py:901] (2/4) Epoch 15, batch 5900, loss[loss=0.1853, simple_loss=0.2608, pruned_loss=0.05487, over 7807.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2986, pruned_loss=0.07049, over 1609490.31 frames. ], batch size: 20, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:08:15,366 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.486e+02 2.938e+02 3.942e+02 7.909e+02, threshold=5.877e+02, percent-clipped=6.0 +2023-02-06 17:08:27,002 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 17:08:30,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:34,141 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:44,790 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:46,719 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119111.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:47,873 INFO [train.py:901] (2/4) Epoch 15, batch 5950, loss[loss=0.2391, simple_loss=0.3207, pruned_loss=0.07876, over 8358.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2992, pruned_loss=0.07069, over 1615175.08 frames. ], batch size: 24, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,903 INFO [train.py:901] (2/4) Epoch 15, batch 6000, loss[loss=0.2375, simple_loss=0.3108, pruned_loss=0.08213, over 8366.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2987, pruned_loss=0.07027, over 1611782.42 frames. ], batch size: 24, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,903 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 17:09:30,930 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6924, 1.6260, 2.7898, 1.3990, 2.0171, 2.9215, 3.0533, 2.5217], + device='cuda:2'), covar=tensor([0.1079, 0.1442, 0.0334, 0.2066, 0.0919, 0.0331, 0.0615, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0276, 0.0306, 0.0268, 0.0298, 0.0286, 0.0247, 0.0374, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:09:35,678 INFO [train.py:935] (2/4) Epoch 15, validation: loss=0.181, simple_loss=0.2808, pruned_loss=0.04056, over 944034.00 frames. +2023-02-06 17:09:35,679 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 17:09:37,100 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.578e+02 3.120e+02 3.956e+02 1.218e+03, threshold=6.240e+02, percent-clipped=5.0 +2023-02-06 17:09:38,918 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 17:10:10,487 INFO [train.py:901] (2/4) Epoch 15, batch 6050, loss[loss=0.2253, simple_loss=0.2927, pruned_loss=0.07893, over 7525.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2977, pruned_loss=0.0699, over 1613129.37 frames. ], batch size: 18, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:32,944 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.27 vs. limit=5.0 +2023-02-06 17:10:44,323 INFO [train.py:901] (2/4) Epoch 15, batch 6100, loss[loss=0.1757, simple_loss=0.2534, pruned_loss=0.04898, over 8078.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2967, pruned_loss=0.06944, over 1609957.50 frames. ], batch size: 21, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:45,656 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.463e+02 3.114e+02 4.132e+02 8.492e+02, threshold=6.229e+02, percent-clipped=7.0 +2023-02-06 17:11:00,074 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4845, 2.7722, 1.7648, 2.1883, 2.3028, 1.5426, 2.0115, 2.1283], + device='cuda:2'), covar=tensor([0.1561, 0.0388, 0.1322, 0.0729, 0.0706, 0.1602, 0.1108, 0.0931], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0234, 0.0329, 0.0304, 0.0305, 0.0335, 0.0351, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:11:18,256 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 17:11:20,322 INFO [train.py:901] (2/4) Epoch 15, batch 6150, loss[loss=0.2425, simple_loss=0.3148, pruned_loss=0.08511, over 7647.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2949, pruned_loss=0.06901, over 1603017.95 frames. ], batch size: 19, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:54,707 INFO [train.py:901] (2/4) Epoch 15, batch 6200, loss[loss=0.2508, simple_loss=0.3254, pruned_loss=0.08805, over 8597.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2958, pruned_loss=0.0692, over 1605004.50 frames. ], batch size: 31, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:55,641 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:11:56,081 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.347e+02 3.204e+02 3.871e+02 7.576e+02, threshold=6.408e+02, percent-clipped=2.0 +2023-02-06 17:12:14,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:30,378 INFO [train.py:901] (2/4) Epoch 15, batch 6250, loss[loss=0.2862, simple_loss=0.3402, pruned_loss=0.1161, over 8678.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.297, pruned_loss=0.06994, over 1608211.66 frames. ], batch size: 49, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:12:47,167 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:59,471 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:13:04,842 INFO [train.py:901] (2/4) Epoch 15, batch 6300, loss[loss=0.1806, simple_loss=0.2512, pruned_loss=0.05502, over 7545.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2968, pruned_loss=0.07009, over 1606895.92 frames. ], batch size: 18, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:13:06,138 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.517e+02 3.087e+02 3.932e+02 1.134e+03, threshold=6.173e+02, percent-clipped=3.0 +2023-02-06 17:13:41,043 INFO [train.py:901] (2/4) Epoch 15, batch 6350, loss[loss=0.2082, simple_loss=0.2912, pruned_loss=0.06259, over 7927.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2973, pruned_loss=0.07018, over 1610182.02 frames. ], batch size: 20, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:13:53,347 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 17:13:53,782 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:03,580 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 17:14:07,837 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119552.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:15,082 INFO [train.py:901] (2/4) Epoch 15, batch 6400, loss[loss=0.1642, simple_loss=0.2537, pruned_loss=0.03733, over 7974.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2968, pruned_loss=0.06989, over 1612495.64 frames. ], batch size: 21, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:14:16,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 3.023e+02 3.752e+02 7.818e+02, threshold=6.047e+02, percent-clipped=4.0 +2023-02-06 17:14:20,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119570.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:49,975 INFO [train.py:901] (2/4) Epoch 15, batch 6450, loss[loss=0.2119, simple_loss=0.3007, pruned_loss=0.06152, over 8368.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2964, pruned_loss=0.06992, over 1608516.12 frames. ], batch size: 24, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:24,235 INFO [train.py:901] (2/4) Epoch 15, batch 6500, loss[loss=0.2177, simple_loss=0.3054, pruned_loss=0.06504, over 8711.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2969, pruned_loss=0.06982, over 1614493.26 frames. ], batch size: 34, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:25,561 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.561e+02 2.888e+02 3.578e+02 6.995e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-06 17:15:38,170 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 17:15:38,582 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:15:53,105 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 17:15:58,707 INFO [train.py:901] (2/4) Epoch 15, batch 6550, loss[loss=0.2486, simple_loss=0.319, pruned_loss=0.08906, over 8463.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2964, pruned_loss=0.06935, over 1614172.12 frames. ], batch size: 27, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:23,985 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 17:16:27,862 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9264, 3.9328, 2.2456, 2.6493, 2.7857, 1.9691, 2.6152, 2.9356], + device='cuda:2'), covar=tensor([0.1608, 0.0275, 0.1171, 0.0823, 0.0745, 0.1393, 0.1053, 0.0965], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0232, 0.0328, 0.0304, 0.0302, 0.0334, 0.0346, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:16:29,728 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 17:16:34,455 INFO [train.py:901] (2/4) Epoch 15, batch 6600, loss[loss=0.1865, simple_loss=0.2642, pruned_loss=0.05444, over 7529.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2966, pruned_loss=0.06919, over 1616560.06 frames. ], batch size: 18, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:35,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.456e+02 2.938e+02 3.854e+02 9.901e+02, threshold=5.877e+02, percent-clipped=5.0 +2023-02-06 17:16:47,907 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 17:16:51,495 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 17:17:05,454 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:08,628 INFO [train.py:901] (2/4) Epoch 15, batch 6650, loss[loss=0.2047, simple_loss=0.287, pruned_loss=0.0612, over 8359.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2969, pruned_loss=0.06944, over 1617093.55 frames. ], batch size: 26, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:17,684 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119826.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:22,306 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:31,209 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 17:17:36,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:44,400 INFO [train.py:901] (2/4) Epoch 15, batch 6700, loss[loss=0.2237, simple_loss=0.3121, pruned_loss=0.06764, over 8322.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2971, pruned_loss=0.06925, over 1618265.06 frames. ], batch size: 25, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:45,753 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.601e+02 2.951e+02 3.516e+02 8.618e+02, threshold=5.902e+02, percent-clipped=2.0 +2023-02-06 17:17:53,452 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:18:19,554 INFO [train.py:901] (2/4) Epoch 15, batch 6750, loss[loss=0.2811, simple_loss=0.346, pruned_loss=0.1081, over 8631.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2987, pruned_loss=0.07053, over 1618592.51 frames. ], batch size: 34, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:18:33,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6275, 2.6369, 1.7106, 2.3488, 2.3200, 1.3857, 2.1000, 2.1400], + device='cuda:2'), covar=tensor([0.1420, 0.0375, 0.1209, 0.0588, 0.0663, 0.1560, 0.1008, 0.0945], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0232, 0.0326, 0.0304, 0.0302, 0.0332, 0.0345, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:18:55,244 INFO [train.py:901] (2/4) Epoch 15, batch 6800, loss[loss=0.2767, simple_loss=0.3332, pruned_loss=0.1101, over 7964.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2977, pruned_loss=0.06984, over 1616775.29 frames. ], batch size: 21, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:18:57,360 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.559e+02 3.032e+02 3.835e+02 7.300e+02, threshold=6.064e+02, percent-clipped=2.0 +2023-02-06 17:19:03,572 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 17:19:15,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:32,101 INFO [train.py:901] (2/4) Epoch 15, batch 6850, loss[loss=0.2512, simple_loss=0.3296, pruned_loss=0.08637, over 8249.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2992, pruned_loss=0.07043, over 1619359.54 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:19:41,653 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=120027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:49,109 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 17:19:53,397 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 17:20:06,210 INFO [train.py:901] (2/4) Epoch 15, batch 6900, loss[loss=0.2187, simple_loss=0.3033, pruned_loss=0.0671, over 8723.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2981, pruned_loss=0.07, over 1614660.24 frames. ], batch size: 49, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:20:07,530 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.397e+02 2.973e+02 3.506e+02 9.980e+02, threshold=5.947e+02, percent-clipped=2.0 +2023-02-06 17:20:40,404 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1899, 2.1737, 1.5464, 1.8508, 1.7972, 1.3260, 1.5672, 1.5979], + device='cuda:2'), covar=tensor([0.1364, 0.0377, 0.1194, 0.0606, 0.0720, 0.1407, 0.1021, 0.0811], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0232, 0.0326, 0.0305, 0.0303, 0.0331, 0.0345, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:20:42,259 INFO [train.py:901] (2/4) Epoch 15, batch 6950, loss[loss=0.2426, simple_loss=0.3156, pruned_loss=0.08484, over 8621.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2972, pruned_loss=0.06949, over 1610134.01 frames. ], batch size: 34, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:20:59,737 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1517, 1.6324, 3.3522, 1.6676, 2.3667, 3.7538, 3.7945, 3.2029], + device='cuda:2'), covar=tensor([0.1100, 0.1718, 0.0373, 0.2005, 0.1153, 0.0232, 0.0526, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0310, 0.0275, 0.0304, 0.0290, 0.0252, 0.0383, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:21:02,420 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:21:03,577 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 17:21:16,268 INFO [train.py:901] (2/4) Epoch 15, batch 7000, loss[loss=0.2529, simple_loss=0.3241, pruned_loss=0.09081, over 8140.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.069, over 1610734.47 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:17,612 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.303e+02 2.879e+02 3.620e+02 6.461e+02, threshold=5.757e+02, percent-clipped=3.0 +2023-02-06 17:21:51,887 INFO [train.py:901] (2/4) Epoch 15, batch 7050, loss[loss=0.2215, simple_loss=0.3118, pruned_loss=0.0656, over 8460.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2963, pruned_loss=0.06923, over 1605996.84 frames. ], batch size: 25, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:15,023 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:26,158 INFO [train.py:901] (2/4) Epoch 15, batch 7100, loss[loss=0.2056, simple_loss=0.2892, pruned_loss=0.06097, over 8606.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2976, pruned_loss=0.07009, over 1607280.97 frames. ], batch size: 49, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:27,479 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.428e+02 3.078e+02 4.147e+02 9.225e+02, threshold=6.156e+02, percent-clipped=10.0 +2023-02-06 17:22:32,371 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:37,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.1285, 1.8179, 6.1754, 2.3440, 5.5677, 5.2871, 5.7877, 5.6661], + device='cuda:2'), covar=tensor([0.0399, 0.4111, 0.0254, 0.3028, 0.0795, 0.0710, 0.0409, 0.0409], + device='cuda:2'), in_proj_covar=tensor([0.0559, 0.0611, 0.0636, 0.0582, 0.0657, 0.0563, 0.0555, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:23:00,844 INFO [train.py:901] (2/4) Epoch 15, batch 7150, loss[loss=0.2838, simple_loss=0.3359, pruned_loss=0.1158, over 6893.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2969, pruned_loss=0.07008, over 1604164.07 frames. ], batch size: 71, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:22,065 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1932, 1.2146, 3.3193, 1.0390, 2.9197, 2.7542, 3.0301, 2.9058], + device='cuda:2'), covar=tensor([0.0849, 0.4236, 0.0845, 0.4063, 0.1503, 0.1149, 0.0735, 0.0940], + device='cuda:2'), in_proj_covar=tensor([0.0567, 0.0618, 0.0642, 0.0590, 0.0666, 0.0567, 0.0560, 0.0620], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:23:22,132 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9982, 1.6640, 1.3740, 1.4961, 1.3599, 1.2049, 1.2125, 1.2672], + device='cuda:2'), covar=tensor([0.1121, 0.0434, 0.1220, 0.0559, 0.0708, 0.1384, 0.0913, 0.0769], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0232, 0.0324, 0.0305, 0.0302, 0.0330, 0.0344, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:23:35,464 INFO [train.py:901] (2/4) Epoch 15, batch 7200, loss[loss=0.2528, simple_loss=0.3207, pruned_loss=0.09247, over 8433.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2971, pruned_loss=0.07021, over 1604415.52 frames. ], batch size: 49, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:36,812 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.418e+02 2.853e+02 3.692e+02 6.645e+02, threshold=5.707e+02, percent-clipped=2.0 +2023-02-06 17:23:44,759 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 17:23:56,966 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 17:24:00,215 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:08,687 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 17:24:10,194 INFO [train.py:901] (2/4) Epoch 15, batch 7250, loss[loss=0.1964, simple_loss=0.2799, pruned_loss=0.05641, over 8029.00 frames. ], tot_loss[loss=0.218, simple_loss=0.297, pruned_loss=0.06953, over 1608627.83 frames. ], batch size: 22, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:24:17,175 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0704, 1.4980, 1.7290, 1.4440, 0.9003, 1.5794, 1.7335, 1.5343], + device='cuda:2'), covar=tensor([0.0481, 0.1220, 0.1677, 0.1356, 0.0584, 0.1383, 0.0652, 0.0631], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0157, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 17:24:17,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:45,987 INFO [train.py:901] (2/4) Epoch 15, batch 7300, loss[loss=0.206, simple_loss=0.2962, pruned_loss=0.05786, over 8587.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2972, pruned_loss=0.06978, over 1611479.23 frames. ], batch size: 34, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:24:47,338 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.423e+02 2.925e+02 3.483e+02 5.889e+02, threshold=5.849e+02, percent-clipped=3.0 +2023-02-06 17:24:55,830 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.73 vs. limit=5.0 +2023-02-06 17:25:20,536 INFO [train.py:901] (2/4) Epoch 15, batch 7350, loss[loss=0.175, simple_loss=0.256, pruned_loss=0.04704, over 7657.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2983, pruned_loss=0.07009, over 1613743.44 frames. ], batch size: 19, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:45,380 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 17:25:56,238 INFO [train.py:901] (2/4) Epoch 15, batch 7400, loss[loss=0.1748, simple_loss=0.2574, pruned_loss=0.04615, over 7782.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.299, pruned_loss=0.0704, over 1618854.00 frames. ], batch size: 19, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:57,545 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.487e+02 3.190e+02 4.160e+02 9.613e+02, threshold=6.380e+02, percent-clipped=9.0 +2023-02-06 17:26:04,620 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 17:26:30,896 INFO [train.py:901] (2/4) Epoch 15, batch 7450, loss[loss=0.2357, simple_loss=0.3069, pruned_loss=0.0822, over 8520.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2985, pruned_loss=0.07055, over 1616467.88 frames. ], batch size: 28, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:26:42,790 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 17:26:45,071 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7199, 1.9536, 1.6564, 2.3039, 1.1126, 1.4362, 1.6979, 1.9692], + device='cuda:2'), covar=tensor([0.0782, 0.0712, 0.1012, 0.0465, 0.1120, 0.1358, 0.0791, 0.0695], + device='cuda:2'), in_proj_covar=tensor([0.0237, 0.0206, 0.0251, 0.0217, 0.0215, 0.0254, 0.0258, 0.0216], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:27:06,387 INFO [train.py:901] (2/4) Epoch 15, batch 7500, loss[loss=0.2309, simple_loss=0.3137, pruned_loss=0.07403, over 8504.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2983, pruned_loss=0.07035, over 1616887.42 frames. ], batch size: 28, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:27:07,745 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.388e+02 2.853e+02 3.831e+02 7.536e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 17:27:27,373 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=120694.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:27:40,251 INFO [train.py:901] (2/4) Epoch 15, batch 7550, loss[loss=0.2294, simple_loss=0.3192, pruned_loss=0.06982, over 8245.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2993, pruned_loss=0.07092, over 1615687.63 frames. ], batch size: 24, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:14,829 INFO [train.py:901] (2/4) Epoch 15, batch 7600, loss[loss=0.2073, simple_loss=0.3005, pruned_loss=0.05706, over 8357.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2988, pruned_loss=0.07072, over 1615962.44 frames. ], batch size: 24, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:16,200 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.426e+02 3.048e+02 3.965e+02 8.844e+02, threshold=6.096e+02, percent-clipped=6.0 +2023-02-06 17:28:42,985 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.42 vs. limit=5.0 +2023-02-06 17:28:50,147 INFO [train.py:901] (2/4) Epoch 15, batch 7650, loss[loss=0.2223, simple_loss=0.2959, pruned_loss=0.0743, over 7801.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2983, pruned_loss=0.07018, over 1618742.29 frames. ], batch size: 20, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:25,339 INFO [train.py:901] (2/4) Epoch 15, batch 7700, loss[loss=0.2329, simple_loss=0.3131, pruned_loss=0.07634, over 8486.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.299, pruned_loss=0.07036, over 1622166.97 frames. ], batch size: 26, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:27,393 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.503e+02 3.087e+02 4.175e+02 9.539e+02, threshold=6.174e+02, percent-clipped=7.0 +2023-02-06 17:29:32,696 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 17:29:46,994 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 17:29:52,773 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 17:30:01,556 INFO [train.py:901] (2/4) Epoch 15, batch 7750, loss[loss=0.2287, simple_loss=0.3124, pruned_loss=0.07244, over 8472.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2993, pruned_loss=0.0706, over 1620028.87 frames. ], batch size: 29, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:30:36,085 INFO [train.py:901] (2/4) Epoch 15, batch 7800, loss[loss=0.2148, simple_loss=0.2795, pruned_loss=0.07505, over 7230.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2992, pruned_loss=0.07077, over 1619732.82 frames. ], batch size: 16, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:30:38,108 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 2.376e+02 2.783e+02 3.266e+02 5.993e+02, threshold=5.565e+02, percent-clipped=0.0 +2023-02-06 17:31:09,466 INFO [train.py:901] (2/4) Epoch 15, batch 7850, loss[loss=0.2044, simple_loss=0.2971, pruned_loss=0.05588, over 8314.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3001, pruned_loss=0.0713, over 1618622.03 frames. ], batch size: 25, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:14,873 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:26,042 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121038.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:42,563 INFO [train.py:901] (2/4) Epoch 15, batch 7900, loss[loss=0.2275, simple_loss=0.3151, pruned_loss=0.06996, over 8555.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3008, pruned_loss=0.07182, over 1621260.30 frames. ], batch size: 34, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:44,514 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 3.139e+02 4.114e+02 1.036e+03, threshold=6.279e+02, percent-clipped=8.0 +2023-02-06 17:32:15,975 INFO [train.py:901] (2/4) Epoch 15, batch 7950, loss[loss=0.2081, simple_loss=0.2966, pruned_loss=0.05985, over 8510.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3006, pruned_loss=0.0717, over 1615328.90 frames. ], batch size: 28, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:42,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:32:48,355 INFO [train.py:901] (2/4) Epoch 15, batch 8000, loss[loss=0.2286, simple_loss=0.3204, pruned_loss=0.06837, over 8492.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3006, pruned_loss=0.072, over 1617664.97 frames. ], batch size: 26, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:50,388 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.992e+02 3.696e+02 7.694e+02, threshold=5.984e+02, percent-clipped=2.0 +2023-02-06 17:33:01,524 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:33:22,838 INFO [train.py:901] (2/4) Epoch 15, batch 8050, loss[loss=0.2252, simple_loss=0.2981, pruned_loss=0.07616, over 8090.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2986, pruned_loss=0.07135, over 1603599.81 frames. ], batch size: 21, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:33:55,752 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 17:34:00,925 INFO [train.py:901] (2/4) Epoch 16, batch 0, loss[loss=0.2302, simple_loss=0.3024, pruned_loss=0.07899, over 8286.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3024, pruned_loss=0.07899, over 8286.00 frames. ], batch size: 23, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:34:00,925 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 17:34:10,746 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6787, 1.6783, 2.6650, 1.3632, 2.0772, 2.8583, 2.9148, 2.5272], + device='cuda:2'), covar=tensor([0.1193, 0.1447, 0.0415, 0.2216, 0.0931, 0.0344, 0.0743, 0.0616], + device='cuda:2'), in_proj_covar=tensor([0.0280, 0.0309, 0.0271, 0.0301, 0.0288, 0.0248, 0.0377, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:34:11,911 INFO [train.py:935] (2/4) Epoch 16, validation: loss=0.1795, simple_loss=0.2801, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 17:34:11,912 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 17:34:24,910 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.543e+02 3.194e+02 4.084e+02 8.334e+02, threshold=6.389e+02, percent-clipped=7.0 +2023-02-06 17:34:26,235 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 17:34:47,406 INFO [train.py:901] (2/4) Epoch 16, batch 50, loss[loss=0.2182, simple_loss=0.3083, pruned_loss=0.06401, over 8254.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3028, pruned_loss=0.07288, over 365253.25 frames. ], batch size: 24, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:34:55,517 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5707, 1.9058, 3.1936, 1.3531, 2.2885, 1.9741, 1.5539, 2.3199], + device='cuda:2'), covar=tensor([0.1855, 0.2435, 0.0725, 0.4311, 0.1706, 0.3034, 0.2293, 0.2147], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0559, 0.0541, 0.0612, 0.0634, 0.0577, 0.0505, 0.0627], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:35:02,271 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 17:35:09,789 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121329.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 17:35:20,725 INFO [train.py:901] (2/4) Epoch 16, batch 100, loss[loss=0.2326, simple_loss=0.295, pruned_loss=0.08513, over 7789.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3015, pruned_loss=0.07111, over 643269.61 frames. ], batch size: 19, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:24,731 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 17:35:33,287 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:35:33,868 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.470e+02 2.913e+02 3.674e+02 6.203e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 17:35:53,850 INFO [train.py:901] (2/4) Epoch 16, batch 150, loss[loss=0.208, simple_loss=0.294, pruned_loss=0.06097, over 8032.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3029, pruned_loss=0.07232, over 861258.18 frames. ], batch size: 22, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:36:04,297 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:11,281 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 17:36:15,655 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:20,976 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7808, 1.7646, 1.9283, 1.6589, 0.9711, 1.5923, 2.1485, 1.9568], + device='cuda:2'), covar=tensor([0.0434, 0.1146, 0.1575, 0.1250, 0.0611, 0.1378, 0.0632, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0156, 0.0100, 0.0161, 0.0113, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 17:36:21,676 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:30,076 INFO [train.py:901] (2/4) Epoch 16, batch 200, loss[loss=0.1978, simple_loss=0.2624, pruned_loss=0.06656, over 7662.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3017, pruned_loss=0.07222, over 1027426.61 frames. ], batch size: 19, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:36:43,676 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.385e+02 2.940e+02 3.661e+02 7.455e+02, threshold=5.881e+02, percent-clipped=4.0 +2023-02-06 17:36:53,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:04,016 INFO [train.py:901] (2/4) Epoch 16, batch 250, loss[loss=0.2303, simple_loss=0.3028, pruned_loss=0.07892, over 8148.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3018, pruned_loss=0.07268, over 1157609.57 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:18,646 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 17:37:24,826 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:28,166 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 17:37:36,145 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 17:37:39,707 INFO [train.py:901] (2/4) Epoch 16, batch 300, loss[loss=0.2238, simple_loss=0.3001, pruned_loss=0.07376, over 7808.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3007, pruned_loss=0.07239, over 1258040.72 frames. ], batch size: 20, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:54,064 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.529e+02 3.079e+02 3.820e+02 7.739e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-06 17:38:01,960 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2672, 1.9125, 2.6666, 2.1249, 2.4581, 2.1737, 1.8626, 1.3282], + device='cuda:2'), covar=tensor([0.4586, 0.4366, 0.1535, 0.3344, 0.2448, 0.2637, 0.1848, 0.4762], + device='cuda:2'), in_proj_covar=tensor([0.0911, 0.0921, 0.0756, 0.0890, 0.0955, 0.0840, 0.0722, 0.0799], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:38:09,460 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6677, 1.4527, 1.8024, 1.4296, 0.9443, 1.4958, 2.0337, 1.7332], + device='cuda:2'), covar=tensor([0.0447, 0.1355, 0.1712, 0.1519, 0.0653, 0.1533, 0.0695, 0.0685], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0157, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 17:38:14,587 INFO [train.py:901] (2/4) Epoch 16, batch 350, loss[loss=0.2664, simple_loss=0.3422, pruned_loss=0.09535, over 8578.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07173, over 1341557.57 frames. ], batch size: 31, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:38:15,674 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-02-06 17:38:45,977 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:38:49,789 INFO [train.py:901] (2/4) Epoch 16, batch 400, loss[loss=0.2164, simple_loss=0.301, pruned_loss=0.06592, over 8317.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3014, pruned_loss=0.07196, over 1405871.30 frames. ], batch size: 25, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:38:52,983 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 17:39:04,290 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.467e+02 3.087e+02 3.761e+02 6.357e+02, threshold=6.175e+02, percent-clipped=1.0 +2023-02-06 17:39:09,178 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121673.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:39:17,981 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1497, 1.4313, 4.3287, 1.5924, 3.8064, 3.6048, 3.9087, 3.7745], + device='cuda:2'), covar=tensor([0.0548, 0.4500, 0.0534, 0.3919, 0.1134, 0.0928, 0.0541, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0557, 0.0611, 0.0637, 0.0582, 0.0656, 0.0564, 0.0556, 0.0615], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:39:24,231 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 17:39:25,132 INFO [train.py:901] (2/4) Epoch 16, batch 450, loss[loss=0.2477, simple_loss=0.3008, pruned_loss=0.09726, over 7539.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3008, pruned_loss=0.0718, over 1449688.25 frames. ], batch size: 18, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:48,309 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4756, 1.7724, 2.7201, 1.3114, 2.0505, 1.8509, 1.5257, 2.0006], + device='cuda:2'), covar=tensor([0.1425, 0.1988, 0.0603, 0.3401, 0.1481, 0.2341, 0.1624, 0.1917], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0559, 0.0542, 0.0610, 0.0632, 0.0577, 0.0504, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:39:52,402 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:39:59,042 INFO [train.py:901] (2/4) Epoch 16, batch 500, loss[loss=0.2326, simple_loss=0.3013, pruned_loss=0.08195, over 7922.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3019, pruned_loss=0.07229, over 1488569.79 frames. ], batch size: 20, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:40:10,166 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4173, 1.4899, 4.5897, 1.6488, 4.1103, 3.8514, 4.1250, 4.0352], + device='cuda:2'), covar=tensor([0.0448, 0.4234, 0.0451, 0.3845, 0.0893, 0.0842, 0.0506, 0.0552], + device='cuda:2'), in_proj_covar=tensor([0.0554, 0.0609, 0.0635, 0.0582, 0.0653, 0.0563, 0.0557, 0.0613], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:40:10,954 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:14,769 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.435e+02 2.838e+02 3.555e+02 6.989e+02, threshold=5.677e+02, percent-clipped=1.0 +2023-02-06 17:40:17,014 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:29,898 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121788.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 17:40:35,812 INFO [train.py:901] (2/4) Epoch 16, batch 550, loss[loss=0.1879, simple_loss=0.2735, pruned_loss=0.05118, over 8020.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3016, pruned_loss=0.07162, over 1522172.03 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:40:52,141 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8041, 1.7438, 2.3791, 1.6157, 1.2238, 2.4259, 0.3872, 1.3874], + device='cuda:2'), covar=tensor([0.2250, 0.1520, 0.0442, 0.1650, 0.3478, 0.0455, 0.2759, 0.1670], + device='cuda:2'), in_proj_covar=tensor([0.0176, 0.0181, 0.0112, 0.0214, 0.0257, 0.0116, 0.0163, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 17:40:58,378 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-06 17:41:09,174 INFO [train.py:901] (2/4) Epoch 16, batch 600, loss[loss=0.2395, simple_loss=0.3195, pruned_loss=0.07976, over 8030.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3008, pruned_loss=0.07128, over 1543608.49 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:22,430 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.425e+02 3.086e+02 4.175e+02 1.417e+03, threshold=6.173e+02, percent-clipped=9.0 +2023-02-06 17:41:26,595 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 17:41:36,805 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:44,735 INFO [train.py:901] (2/4) Epoch 16, batch 650, loss[loss=0.2349, simple_loss=0.3114, pruned_loss=0.07924, over 8511.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.3, pruned_loss=0.07033, over 1559644.27 frames. ], batch size: 28, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:45,637 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:02,833 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:05,542 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5693, 2.3337, 3.4831, 2.6202, 3.2338, 2.4740, 2.1107, 1.7736], + device='cuda:2'), covar=tensor([0.4759, 0.4858, 0.1616, 0.3325, 0.2295, 0.2733, 0.1825, 0.5324], + device='cuda:2'), in_proj_covar=tensor([0.0912, 0.0923, 0.0759, 0.0890, 0.0956, 0.0843, 0.0721, 0.0797], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:42:18,664 INFO [train.py:901] (2/4) Epoch 16, batch 700, loss[loss=0.2155, simple_loss=0.2814, pruned_loss=0.07479, over 8238.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.3004, pruned_loss=0.07017, over 1578042.63 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:42:32,096 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.496e+02 2.978e+02 3.542e+02 1.118e+03, threshold=5.957e+02, percent-clipped=1.0 +2023-02-06 17:42:33,592 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:53,716 INFO [train.py:901] (2/4) Epoch 16, batch 750, loss[loss=0.2468, simple_loss=0.3138, pruned_loss=0.08989, over 8549.00 frames. ], tot_loss[loss=0.22, simple_loss=0.3, pruned_loss=0.06995, over 1589828.73 frames. ], batch size: 49, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:07,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1701, 3.0679, 2.8138, 1.6159, 2.8120, 2.8595, 2.8160, 2.7486], + device='cuda:2'), covar=tensor([0.1299, 0.0920, 0.1567, 0.4748, 0.1232, 0.1248, 0.1709, 0.1163], + device='cuda:2'), in_proj_covar=tensor([0.0496, 0.0410, 0.0413, 0.0514, 0.0403, 0.0410, 0.0396, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:43:14,269 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 17:43:23,869 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 17:43:28,677 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122044.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 17:43:29,776 INFO [train.py:901] (2/4) Epoch 16, batch 800, loss[loss=0.2202, simple_loss=0.2783, pruned_loss=0.08108, over 7683.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2995, pruned_loss=0.06997, over 1598389.26 frames. ], batch size: 18, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:43,079 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.422e+02 2.925e+02 3.576e+02 6.712e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-06 17:43:45,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122069.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:44:03,133 INFO [train.py:901] (2/4) Epoch 16, batch 850, loss[loss=0.2231, simple_loss=0.287, pruned_loss=0.07963, over 7809.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2996, pruned_loss=0.07021, over 1604084.13 frames. ], batch size: 20, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:07,377 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7691, 1.9643, 1.7393, 2.3108, 1.1228, 1.4742, 1.6312, 2.0776], + device='cuda:2'), covar=tensor([0.0713, 0.0724, 0.0868, 0.0418, 0.1066, 0.1345, 0.0759, 0.0623], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0203, 0.0248, 0.0214, 0.0213, 0.0251, 0.0255, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 17:44:31,621 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:34,939 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:38,759 INFO [train.py:901] (2/4) Epoch 16, batch 900, loss[loss=0.1983, simple_loss=0.2777, pruned_loss=0.05951, over 7802.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2987, pruned_loss=0.06983, over 1607804.45 frames. ], batch size: 19, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:52,336 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:52,800 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.482e+02 3.085e+02 4.013e+02 7.148e+02, threshold=6.170e+02, percent-clipped=4.0 +2023-02-06 17:45:12,883 INFO [train.py:901] (2/4) Epoch 16, batch 950, loss[loss=0.2685, simple_loss=0.3345, pruned_loss=0.1012, over 8472.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2984, pruned_loss=0.06948, over 1610872.95 frames. ], batch size: 25, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:45:24,862 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:45:40,113 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 17:45:49,036 INFO [train.py:901] (2/4) Epoch 16, batch 1000, loss[loss=0.2369, simple_loss=0.3167, pruned_loss=0.07853, over 8199.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2984, pruned_loss=0.06934, over 1614142.85 frames. ], batch size: 23, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:03,407 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.462e+02 3.004e+02 3.600e+02 8.525e+02, threshold=6.009e+02, percent-clipped=4.0 +2023-02-06 17:46:14,178 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 17:46:23,685 INFO [train.py:901] (2/4) Epoch 16, batch 1050, loss[loss=0.185, simple_loss=0.2761, pruned_loss=0.04697, over 7954.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2993, pruned_loss=0.0698, over 1617503.28 frames. ], batch size: 21, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:26,447 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 17:46:34,620 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:51,300 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:57,870 INFO [train.py:901] (2/4) Epoch 16, batch 1100, loss[loss=0.2017, simple_loss=0.2734, pruned_loss=0.06497, over 7973.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2993, pruned_loss=0.0699, over 1617256.60 frames. ], batch size: 21, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:12,614 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.699e+02 3.204e+02 3.982e+02 8.590e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 17:47:33,533 INFO [train.py:901] (2/4) Epoch 16, batch 1150, loss[loss=0.2222, simple_loss=0.2962, pruned_loss=0.07407, over 8139.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2986, pruned_loss=0.06916, over 1618483.50 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:38,292 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 17:47:42,503 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3327, 1.4368, 4.5151, 1.6563, 3.9947, 3.7021, 4.0816, 3.9508], + device='cuda:2'), covar=tensor([0.0541, 0.4644, 0.0548, 0.3991, 0.1076, 0.1017, 0.0592, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0557, 0.0610, 0.0643, 0.0588, 0.0658, 0.0565, 0.0562, 0.0622], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:47:43,906 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7076, 1.6721, 2.0861, 1.5902, 1.2324, 2.1303, 0.3286, 1.2485], + device='cuda:2'), covar=tensor([0.1810, 0.1349, 0.0437, 0.1214, 0.3315, 0.0454, 0.2460, 0.1845], + device='cuda:2'), in_proj_covar=tensor([0.0174, 0.0179, 0.0111, 0.0210, 0.0254, 0.0115, 0.0160, 0.0177], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 17:47:54,849 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:07,550 INFO [train.py:901] (2/4) Epoch 16, batch 1200, loss[loss=0.2025, simple_loss=0.2956, pruned_loss=0.0547, over 8465.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2984, pruned_loss=0.06899, over 1617512.51 frames. ], batch size: 25, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:48:21,991 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.417e+02 3.007e+02 3.779e+02 1.089e+03, threshold=6.013e+02, percent-clipped=2.0 +2023-02-06 17:48:31,789 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:33,529 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 17:48:36,683 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:43,455 INFO [train.py:901] (2/4) Epoch 16, batch 1250, loss[loss=0.2302, simple_loss=0.3038, pruned_loss=0.07829, over 8280.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2979, pruned_loss=0.06893, over 1618646.42 frames. ], batch size: 23, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:19,088 INFO [train.py:901] (2/4) Epoch 16, batch 1300, loss[loss=0.2173, simple_loss=0.2975, pruned_loss=0.06856, over 7659.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2983, pruned_loss=0.0689, over 1617254.86 frames. ], batch size: 19, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:26,971 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:33,313 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 3.105e+02 3.703e+02 6.719e+02, threshold=6.210e+02, percent-clipped=4.0 +2023-02-06 17:49:44,500 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9920, 3.4800, 2.1117, 2.7016, 2.4299, 1.7032, 2.5947, 2.9117], + device='cuda:2'), covar=tensor([0.1614, 0.0389, 0.1261, 0.0703, 0.0919, 0.1719, 0.1193, 0.0972], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0237, 0.0330, 0.0306, 0.0304, 0.0330, 0.0346, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:49:52,142 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6727, 1.5401, 2.0441, 1.5579, 1.1996, 2.0656, 0.2517, 1.1897], + device='cuda:2'), covar=tensor([0.1923, 0.1789, 0.0424, 0.1180, 0.3334, 0.0456, 0.2614, 0.1648], + device='cuda:2'), in_proj_covar=tensor([0.0176, 0.0182, 0.0112, 0.0214, 0.0257, 0.0117, 0.0162, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 17:49:54,914 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:56,063 INFO [train.py:901] (2/4) Epoch 16, batch 1350, loss[loss=0.2263, simple_loss=0.3075, pruned_loss=0.0725, over 7821.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2982, pruned_loss=0.06886, over 1620193.92 frames. ], batch size: 20, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:31,463 INFO [train.py:901] (2/4) Epoch 16, batch 1400, loss[loss=0.2502, simple_loss=0.328, pruned_loss=0.08624, over 8359.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2967, pruned_loss=0.06818, over 1617950.03 frames. ], batch size: 24, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:45,931 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.319e+02 2.799e+02 3.491e+02 7.123e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 17:50:49,437 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:50,210 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 17:50:55,476 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:56,973 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:05,778 INFO [train.py:901] (2/4) Epoch 16, batch 1450, loss[loss=0.2365, simple_loss=0.3057, pruned_loss=0.0836, over 8023.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2964, pruned_loss=0.06815, over 1615944.52 frames. ], batch size: 22, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:12,699 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 17:51:15,653 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:42,668 INFO [train.py:901] (2/4) Epoch 16, batch 1500, loss[loss=0.2485, simple_loss=0.3248, pruned_loss=0.08614, over 8198.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2973, pruned_loss=0.06899, over 1618635.72 frames. ], batch size: 23, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:56,867 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 2.515e+02 3.024e+02 4.111e+02 8.238e+02, threshold=6.047e+02, percent-clipped=9.0 +2023-02-06 17:52:16,413 INFO [train.py:901] (2/4) Epoch 16, batch 1550, loss[loss=0.1954, simple_loss=0.2665, pruned_loss=0.06219, over 7441.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.06904, over 1618300.23 frames. ], batch size: 17, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:52:16,630 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:41,665 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:47,997 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4052, 1.5825, 2.8568, 1.3690, 2.1289, 1.8860, 1.4403, 2.0340], + device='cuda:2'), covar=tensor([0.1987, 0.2497, 0.0689, 0.4265, 0.1469, 0.2997, 0.2235, 0.1848], + device='cuda:2'), in_proj_covar=tensor([0.0504, 0.0558, 0.0539, 0.0609, 0.0627, 0.0570, 0.0501, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:52:52,395 INFO [train.py:901] (2/4) Epoch 16, batch 1600, loss[loss=0.2258, simple_loss=0.3078, pruned_loss=0.07194, over 8532.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2966, pruned_loss=0.06853, over 1619898.25 frames. ], batch size: 28, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:52:55,435 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:07,575 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 17:53:07,650 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.555e+02 3.178e+02 4.067e+02 1.179e+03, threshold=6.355e+02, percent-clipped=12.0 +2023-02-06 17:53:13,369 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:23,678 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:27,666 INFO [train.py:901] (2/4) Epoch 16, batch 1650, loss[loss=0.2131, simple_loss=0.2911, pruned_loss=0.06752, over 8084.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.06818, over 1611373.36 frames. ], batch size: 21, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:53:29,433 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.03 vs. limit=5.0 +2023-02-06 17:53:49,585 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,350 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,864 INFO [train.py:901] (2/4) Epoch 16, batch 1700, loss[loss=0.234, simple_loss=0.3164, pruned_loss=0.07584, over 8021.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2968, pruned_loss=0.0688, over 1616590.50 frames. ], batch size: 22, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:54:08,415 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:09,136 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4984, 1.8064, 2.6997, 1.3927, 1.8887, 1.8473, 1.5523, 1.8743], + device='cuda:2'), covar=tensor([0.1971, 0.2494, 0.0767, 0.4528, 0.1806, 0.3296, 0.2245, 0.2215], + device='cuda:2'), in_proj_covar=tensor([0.0503, 0.0557, 0.0539, 0.0608, 0.0627, 0.0570, 0.0500, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:54:17,614 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.342e+02 2.881e+02 3.479e+02 7.679e+02, threshold=5.763e+02, percent-clipped=3.0 +2023-02-06 17:54:21,196 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9948, 1.7627, 3.3604, 1.5351, 2.3309, 3.6486, 3.6784, 3.0818], + device='cuda:2'), covar=tensor([0.1135, 0.1595, 0.0380, 0.1989, 0.1140, 0.0254, 0.0547, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0279, 0.0308, 0.0270, 0.0297, 0.0290, 0.0248, 0.0379, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 17:54:38,071 INFO [train.py:901] (2/4) Epoch 16, batch 1750, loss[loss=0.1955, simple_loss=0.2651, pruned_loss=0.06294, over 7696.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2952, pruned_loss=0.06821, over 1615830.59 frames. ], batch size: 18, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:54:42,733 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 17:55:01,805 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4073, 2.4150, 1.5878, 2.2130, 2.0715, 1.2279, 1.9250, 2.1071], + device='cuda:2'), covar=tensor([0.1759, 0.0510, 0.1557, 0.0755, 0.0889, 0.2008, 0.1289, 0.1062], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0234, 0.0327, 0.0303, 0.0302, 0.0330, 0.0344, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 17:55:12,114 INFO [train.py:901] (2/4) Epoch 16, batch 1800, loss[loss=0.2008, simple_loss=0.2837, pruned_loss=0.05893, over 8220.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2947, pruned_loss=0.06825, over 1616863.61 frames. ], batch size: 22, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:16,362 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:18,921 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2520, 1.5807, 1.7806, 1.4784, 1.1569, 1.5629, 1.8000, 1.8783], + device='cuda:2'), covar=tensor([0.0461, 0.1156, 0.1597, 0.1340, 0.0563, 0.1439, 0.0657, 0.0578], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 17:55:25,835 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9810, 1.9226, 2.5796, 1.5052, 1.3936, 2.5471, 0.4237, 1.4702], + device='cuda:2'), covar=tensor([0.1862, 0.1415, 0.0313, 0.2025, 0.3239, 0.0342, 0.2504, 0.1817], + device='cuda:2'), in_proj_covar=tensor([0.0173, 0.0179, 0.0111, 0.0211, 0.0254, 0.0115, 0.0161, 0.0176], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 17:55:27,695 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.489e+02 2.922e+02 3.750e+02 7.056e+02, threshold=5.843e+02, percent-clipped=4.0 +2023-02-06 17:55:35,411 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123077.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:48,781 INFO [train.py:901] (2/4) Epoch 16, batch 1850, loss[loss=0.2667, simple_loss=0.3363, pruned_loss=0.09857, over 7182.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2945, pruned_loss=0.06815, over 1613344.11 frames. ], batch size: 71, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:22,235 INFO [train.py:901] (2/4) Epoch 16, batch 1900, loss[loss=0.2518, simple_loss=0.3399, pruned_loss=0.08181, over 8342.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.06854, over 1616530.46 frames. ], batch size: 26, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:36,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.569e+02 3.077e+02 4.069e+02 9.708e+02, threshold=6.154e+02, percent-clipped=7.0 +2023-02-06 17:56:57,741 INFO [train.py:901] (2/4) Epoch 16, batch 1950, loss[loss=0.2219, simple_loss=0.2806, pruned_loss=0.08163, over 7792.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.296, pruned_loss=0.06919, over 1615572.45 frames. ], batch size: 19, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:59,131 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 17:57:01,385 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:11,361 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 17:57:18,813 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:24,081 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:30,728 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 17:57:32,102 INFO [train.py:901] (2/4) Epoch 16, batch 2000, loss[loss=0.1605, simple_loss=0.2494, pruned_loss=0.03583, over 7783.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2957, pruned_loss=0.06881, over 1613090.46 frames. ], batch size: 19, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:57:46,356 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.202e+02 2.631e+02 3.355e+02 6.225e+02, threshold=5.263e+02, percent-clipped=1.0 +2023-02-06 17:58:05,870 INFO [train.py:901] (2/4) Epoch 16, batch 2050, loss[loss=0.219, simple_loss=0.3034, pruned_loss=0.06727, over 8474.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2944, pruned_loss=0.06835, over 1610066.92 frames. ], batch size: 25, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:31,864 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123332.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:41,130 INFO [train.py:901] (2/4) Epoch 16, batch 2100, loss[loss=0.182, simple_loss=0.2585, pruned_loss=0.05272, over 7688.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2949, pruned_loss=0.06863, over 1611329.32 frames. ], batch size: 18, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:43,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:54,988 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.517e+02 3.000e+02 3.631e+02 1.037e+03, threshold=6.000e+02, percent-clipped=6.0 +2023-02-06 17:59:14,279 INFO [train.py:901] (2/4) Epoch 16, batch 2150, loss[loss=0.1867, simple_loss=0.2619, pruned_loss=0.05574, over 7311.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.297, pruned_loss=0.07003, over 1609985.44 frames. ], batch size: 16, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:59:19,816 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3808, 2.0513, 2.8538, 2.2971, 2.8771, 2.3560, 2.0455, 1.4435], + device='cuda:2'), covar=tensor([0.4892, 0.4604, 0.1564, 0.3089, 0.2144, 0.2482, 0.1855, 0.5103], + device='cuda:2'), in_proj_covar=tensor([0.0920, 0.0928, 0.0763, 0.0900, 0.0964, 0.0846, 0.0720, 0.0803], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 17:59:22,908 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2383, 3.1978, 2.9665, 1.5560, 2.9563, 2.8645, 2.9088, 2.7160], + device='cuda:2'), covar=tensor([0.1347, 0.0899, 0.1345, 0.4461, 0.1154, 0.1231, 0.1585, 0.1264], + device='cuda:2'), in_proj_covar=tensor([0.0493, 0.0410, 0.0410, 0.0511, 0.0404, 0.0410, 0.0397, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 17:59:50,132 INFO [train.py:901] (2/4) Epoch 16, batch 2200, loss[loss=0.2842, simple_loss=0.3444, pruned_loss=0.112, over 8459.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2987, pruned_loss=0.07095, over 1617102.11 frames. ], batch size: 25, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 17:59:50,271 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:00:04,135 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.694e+02 3.295e+02 4.036e+02 1.292e+03, threshold=6.590e+02, percent-clipped=6.0 +2023-02-06 18:00:23,391 INFO [train.py:901] (2/4) Epoch 16, batch 2250, loss[loss=0.2181, simple_loss=0.292, pruned_loss=0.07211, over 8563.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2974, pruned_loss=0.07045, over 1614150.96 frames. ], batch size: 39, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:00:46,379 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 18:00:58,618 INFO [train.py:901] (2/4) Epoch 16, batch 2300, loss[loss=0.2231, simple_loss=0.29, pruned_loss=0.07814, over 7797.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2977, pruned_loss=0.07013, over 1612979.23 frames. ], batch size: 19, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:13,226 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.374e+02 2.935e+02 3.719e+02 2.594e+03, threshold=5.871e+02, percent-clipped=2.0 +2023-02-06 18:01:22,765 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4901, 1.8010, 1.8062, 1.1810, 1.9534, 1.4981, 0.4826, 1.6961], + device='cuda:2'), covar=tensor([0.0416, 0.0280, 0.0217, 0.0411, 0.0266, 0.0607, 0.0636, 0.0216], + device='cuda:2'), in_proj_covar=tensor([0.0423, 0.0364, 0.0313, 0.0416, 0.0350, 0.0507, 0.0372, 0.0389], + device='cuda:2'), out_proj_covar=tensor([1.1640e-04, 9.7522e-05, 8.3455e-05, 1.1214e-04, 9.4408e-05, 1.4701e-04, + 1.0201e-04, 1.0529e-04], device='cuda:2') +2023-02-06 18:01:32,631 INFO [train.py:901] (2/4) Epoch 16, batch 2350, loss[loss=0.2476, simple_loss=0.317, pruned_loss=0.0891, over 8138.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2981, pruned_loss=0.07041, over 1614097.08 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:38,851 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:01:55,683 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:06,131 INFO [train.py:901] (2/4) Epoch 16, batch 2400, loss[loss=0.2248, simple_loss=0.305, pruned_loss=0.07227, over 8335.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.298, pruned_loss=0.07028, over 1615266.69 frames. ], batch size: 25, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:02:22,333 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.321e+02 3.011e+02 3.485e+02 7.740e+02, threshold=6.021e+02, percent-clipped=5.0 +2023-02-06 18:02:28,429 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:42,479 INFO [train.py:901] (2/4) Epoch 16, batch 2450, loss[loss=0.2602, simple_loss=0.3369, pruned_loss=0.09177, over 8565.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2977, pruned_loss=0.06978, over 1617294.45 frames. ], batch size: 39, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:15,342 INFO [train.py:901] (2/4) Epoch 16, batch 2500, loss[loss=0.2038, simple_loss=0.2738, pruned_loss=0.06688, over 7162.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2987, pruned_loss=0.07102, over 1614643.88 frames. ], batch size: 16, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:25,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0300, 1.7129, 2.2809, 1.6795, 1.1653, 1.8910, 2.1332, 2.2429], + device='cuda:2'), covar=tensor([0.0427, 0.1173, 0.1505, 0.1297, 0.0570, 0.1347, 0.0599, 0.0532], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0157, 0.0100, 0.0163, 0.0114, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 18:03:29,367 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.388e+02 3.009e+02 3.987e+02 1.163e+03, threshold=6.019e+02, percent-clipped=7.0 +2023-02-06 18:03:46,948 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:47,745 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:50,911 INFO [train.py:901] (2/4) Epoch 16, batch 2550, loss[loss=0.2255, simple_loss=0.3033, pruned_loss=0.07383, over 8323.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2985, pruned_loss=0.07115, over 1614396.12 frames. ], batch size: 25, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:24,936 INFO [train.py:901] (2/4) Epoch 16, batch 2600, loss[loss=0.2512, simple_loss=0.3076, pruned_loss=0.09744, over 7786.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2982, pruned_loss=0.07128, over 1609391.95 frames. ], batch size: 19, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:38,925 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.447e+02 2.814e+02 3.524e+02 5.517e+02, threshold=5.629e+02, percent-clipped=0.0 +2023-02-06 18:04:54,348 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:04:58,762 INFO [train.py:901] (2/4) Epoch 16, batch 2650, loss[loss=0.2406, simple_loss=0.3237, pruned_loss=0.07874, over 8503.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2978, pruned_loss=0.07092, over 1607558.67 frames. ], batch size: 28, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:06,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:05:34,191 INFO [train.py:901] (2/4) Epoch 16, batch 2700, loss[loss=0.2191, simple_loss=0.2978, pruned_loss=0.07025, over 8648.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2986, pruned_loss=0.07103, over 1613524.55 frames. ], batch size: 34, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:48,205 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.455e+02 3.188e+02 4.135e+02 8.908e+02, threshold=6.377e+02, percent-clipped=7.0 +2023-02-06 18:06:07,595 INFO [train.py:901] (2/4) Epoch 16, batch 2750, loss[loss=0.2273, simple_loss=0.3165, pruned_loss=0.06906, over 8654.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3001, pruned_loss=0.07151, over 1615255.47 frames. ], batch size: 39, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:45,090 INFO [train.py:901] (2/4) Epoch 16, batch 2800, loss[loss=0.2464, simple_loss=0.3285, pruned_loss=0.08211, over 8539.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2988, pruned_loss=0.07051, over 1615751.78 frames. ], batch size: 49, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:46,013 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:50,849 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:59,429 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 2.578e+02 3.039e+02 4.001e+02 1.196e+03, threshold=6.079e+02, percent-clipped=5.0 +2023-02-06 18:07:03,063 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:08,263 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:18,991 INFO [train.py:901] (2/4) Epoch 16, batch 2850, loss[loss=0.2255, simple_loss=0.3055, pruned_loss=0.07278, over 8655.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.299, pruned_loss=0.07041, over 1618130.30 frames. ], batch size: 34, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:07:22,868 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 18:07:55,341 INFO [train.py:901] (2/4) Epoch 16, batch 2900, loss[loss=0.1911, simple_loss=0.278, pruned_loss=0.05207, over 7806.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2979, pruned_loss=0.06972, over 1615042.01 frames. ], batch size: 20, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:06,284 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:06,322 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9119, 1.6538, 2.0948, 1.8371, 1.9231, 1.9267, 1.6753, 0.7772], + device='cuda:2'), covar=tensor([0.5179, 0.4409, 0.1636, 0.2748, 0.2056, 0.2673, 0.1884, 0.4526], + device='cuda:2'), in_proj_covar=tensor([0.0914, 0.0924, 0.0758, 0.0894, 0.0961, 0.0848, 0.0723, 0.0797], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:08:10,038 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.456e+02 3.206e+02 4.387e+02 8.191e+02, threshold=6.412e+02, percent-clipped=4.0 +2023-02-06 18:08:22,886 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:29,509 INFO [train.py:901] (2/4) Epoch 16, batch 2950, loss[loss=0.2445, simple_loss=0.3241, pruned_loss=0.08245, over 8182.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2981, pruned_loss=0.06952, over 1618233.05 frames. ], batch size: 23, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:29,747 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4038, 1.6242, 1.6325, 1.0578, 1.7628, 1.3100, 0.2676, 1.6161], + device='cuda:2'), covar=tensor([0.0362, 0.0260, 0.0224, 0.0387, 0.0288, 0.0753, 0.0668, 0.0213], + device='cuda:2'), in_proj_covar=tensor([0.0425, 0.0363, 0.0311, 0.0417, 0.0350, 0.0508, 0.0371, 0.0387], + device='cuda:2'), out_proj_covar=tensor([1.1693e-04, 9.7248e-05, 8.2672e-05, 1.1219e-04, 9.4535e-05, 1.4726e-04, + 1.0189e-04, 1.0494e-04], device='cuda:2') +2023-02-06 18:08:35,643 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 18:08:55,344 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:01,457 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.79 vs. limit=5.0 +2023-02-06 18:09:03,841 INFO [train.py:901] (2/4) Epoch 16, batch 3000, loss[loss=0.1771, simple_loss=0.2596, pruned_loss=0.0473, over 7420.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.297, pruned_loss=0.0692, over 1612766.25 frames. ], batch size: 17, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:09:03,841 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 18:09:16,271 INFO [train.py:935] (2/4) Epoch 16, validation: loss=0.1794, simple_loss=0.2796, pruned_loss=0.03958, over 944034.00 frames. +2023-02-06 18:09:16,271 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 18:09:32,700 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.393e+02 2.939e+02 3.627e+02 1.404e+03, threshold=5.877e+02, percent-clipped=2.0 +2023-02-06 18:09:49,112 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:52,909 INFO [train.py:901] (2/4) Epoch 16, batch 3050, loss[loss=0.2247, simple_loss=0.3189, pruned_loss=0.06524, over 8524.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2977, pruned_loss=0.06986, over 1605905.14 frames. ], batch size: 49, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:25,958 INFO [train.py:901] (2/4) Epoch 16, batch 3100, loss[loss=0.2019, simple_loss=0.2907, pruned_loss=0.0566, over 8490.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2975, pruned_loss=0.07001, over 1608265.74 frames. ], batch size: 28, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:28,055 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:10:39,329 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124366.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:10:39,809 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.464e+02 2.975e+02 4.095e+02 1.383e+03, threshold=5.950e+02, percent-clipped=6.0 +2023-02-06 18:11:01,463 INFO [train.py:901] (2/4) Epoch 16, batch 3150, loss[loss=0.2145, simple_loss=0.3088, pruned_loss=0.06015, over 8361.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2981, pruned_loss=0.07046, over 1608938.99 frames. ], batch size: 24, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:02,940 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:21,449 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:36,587 INFO [train.py:901] (2/4) Epoch 16, batch 3200, loss[loss=0.246, simple_loss=0.3304, pruned_loss=0.08085, over 8495.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2985, pruned_loss=0.07118, over 1609093.82 frames. ], batch size: 26, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:39,398 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:50,425 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.552e+02 3.102e+02 3.772e+02 6.284e+02, threshold=6.205e+02, percent-clipped=3.0 +2023-02-06 18:12:09,966 INFO [train.py:901] (2/4) Epoch 16, batch 3250, loss[loss=0.2406, simple_loss=0.3095, pruned_loss=0.08584, over 7922.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2987, pruned_loss=0.07116, over 1606507.59 frames. ], batch size: 20, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:23,020 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:40,936 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:45,404 INFO [train.py:901] (2/4) Epoch 16, batch 3300, loss[loss=0.2227, simple_loss=0.3092, pruned_loss=0.06812, over 8099.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2974, pruned_loss=0.07007, over 1606400.79 frames. ], batch size: 23, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:59,413 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.425e+02 2.919e+02 3.659e+02 6.879e+02, threshold=5.837e+02, percent-clipped=1.0 +2023-02-06 18:13:18,843 INFO [train.py:901] (2/4) Epoch 16, batch 3350, loss[loss=0.1864, simple_loss=0.275, pruned_loss=0.04887, over 8277.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2961, pruned_loss=0.06894, over 1611022.55 frames. ], batch size: 23, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:13:25,354 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:40,969 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-06 18:13:43,500 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:46,714 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:54,713 INFO [train.py:901] (2/4) Epoch 16, batch 3400, loss[loss=0.1966, simple_loss=0.2764, pruned_loss=0.05842, over 7940.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2951, pruned_loss=0.06827, over 1612200.87 frames. ], batch size: 20, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:14:01,615 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:08,830 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.420e+02 3.011e+02 3.525e+02 7.222e+02, threshold=6.022e+02, percent-clipped=3.0 +2023-02-06 18:14:18,573 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:28,869 INFO [train.py:901] (2/4) Epoch 16, batch 3450, loss[loss=0.2034, simple_loss=0.2842, pruned_loss=0.06133, over 8336.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.297, pruned_loss=0.06932, over 1613338.16 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:14:38,586 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124710.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:14:48,191 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2185, 3.1128, 2.9070, 1.6035, 2.8608, 2.9475, 2.8540, 2.7838], + device='cuda:2'), covar=tensor([0.1156, 0.0864, 0.1376, 0.4423, 0.1161, 0.1190, 0.1597, 0.1109], + device='cuda:2'), in_proj_covar=tensor([0.0489, 0.0408, 0.0410, 0.0511, 0.0401, 0.0408, 0.0396, 0.0355], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 18:15:04,129 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3559, 2.6414, 3.1768, 1.4943, 3.2180, 1.9320, 1.5376, 2.1387], + device='cuda:2'), covar=tensor([0.0650, 0.0308, 0.0171, 0.0640, 0.0370, 0.0668, 0.0804, 0.0472], + device='cuda:2'), in_proj_covar=tensor([0.0424, 0.0363, 0.0310, 0.0415, 0.0351, 0.0508, 0.0371, 0.0387], + device='cuda:2'), out_proj_covar=tensor([1.1649e-04, 9.7225e-05, 8.2362e-05, 1.1162e-04, 9.4818e-05, 1.4724e-04, + 1.0156e-04, 1.0472e-04], device='cuda:2') +2023-02-06 18:15:05,352 INFO [train.py:901] (2/4) Epoch 16, batch 3500, loss[loss=0.2622, simple_loss=0.3331, pruned_loss=0.09569, over 8335.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2969, pruned_loss=0.06912, over 1611693.58 frames. ], batch size: 49, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:07,646 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:09,702 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6577, 4.6433, 4.1657, 1.8920, 4.1689, 4.1224, 4.2273, 3.8283], + device='cuda:2'), covar=tensor([0.0693, 0.0534, 0.1126, 0.4457, 0.0796, 0.0847, 0.1199, 0.0811], + device='cuda:2'), in_proj_covar=tensor([0.0487, 0.0406, 0.0409, 0.0509, 0.0400, 0.0407, 0.0395, 0.0354], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 18:15:20,549 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.534e+02 3.082e+02 3.894e+02 7.146e+02, threshold=6.164e+02, percent-clipped=3.0 +2023-02-06 18:15:22,095 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:38,262 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 18:15:38,975 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,092 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,719 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:40,174 INFO [train.py:901] (2/4) Epoch 16, batch 3550, loss[loss=0.1902, simple_loss=0.2787, pruned_loss=0.05088, over 8452.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2973, pruned_loss=0.06943, over 1614943.40 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:56,877 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:00,143 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124825.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:16:04,089 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6193, 1.2784, 4.8319, 1.8695, 4.3015, 4.0914, 4.3727, 4.2051], + device='cuda:2'), covar=tensor([0.0553, 0.4966, 0.0424, 0.3900, 0.0984, 0.0864, 0.0523, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0561, 0.0613, 0.0637, 0.0589, 0.0662, 0.0569, 0.0561, 0.0625], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:16:08,892 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:14,228 INFO [train.py:901] (2/4) Epoch 16, batch 3600, loss[loss=0.1902, simple_loss=0.2829, pruned_loss=0.04877, over 8510.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2975, pruned_loss=0.06954, over 1612605.44 frames. ], batch size: 26, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:30,802 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.338e+02 2.977e+02 3.463e+02 8.977e+02, threshold=5.954e+02, percent-clipped=2.0 +2023-02-06 18:16:50,928 INFO [train.py:901] (2/4) Epoch 16, batch 3650, loss[loss=0.2579, simple_loss=0.3314, pruned_loss=0.09223, over 8497.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2984, pruned_loss=0.0695, over 1613908.47 frames. ], batch size: 26, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:59,900 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:17:24,963 INFO [train.py:901] (2/4) Epoch 16, batch 3700, loss[loss=0.225, simple_loss=0.3024, pruned_loss=0.07381, over 8033.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2979, pruned_loss=0.06926, over 1614458.88 frames. ], batch size: 22, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:17:38,855 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:17:40,140 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.643e+02 3.299e+02 4.315e+02 1.525e+03, threshold=6.598e+02, percent-clipped=10.0 +2023-02-06 18:18:01,618 INFO [train.py:901] (2/4) Epoch 16, batch 3750, loss[loss=0.235, simple_loss=0.3139, pruned_loss=0.07807, over 8471.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2975, pruned_loss=0.06956, over 1615463.63 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:18:04,451 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:07,883 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:20,985 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:24,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:35,166 INFO [train.py:901] (2/4) Epoch 16, batch 3800, loss[loss=0.2103, simple_loss=0.296, pruned_loss=0.06233, over 8471.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2958, pruned_loss=0.06876, over 1612657.02 frames. ], batch size: 29, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:18:49,275 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.284e+02 2.854e+02 3.651e+02 7.015e+02, threshold=5.709e+02, percent-clipped=3.0 +2023-02-06 18:18:58,965 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125081.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:19:10,722 INFO [train.py:901] (2/4) Epoch 16, batch 3850, loss[loss=0.2254, simple_loss=0.3022, pruned_loss=0.07433, over 8098.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2966, pruned_loss=0.06934, over 1612798.41 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:18,424 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125106.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:19:24,393 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:41,038 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:45,073 INFO [train.py:901] (2/4) Epoch 16, batch 3900, loss[loss=0.1835, simple_loss=0.2604, pruned_loss=0.05331, over 6832.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2957, pruned_loss=0.06852, over 1610703.88 frames. ], batch size: 15, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:45,092 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 18:19:45,230 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:58,195 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:59,306 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.507e+02 2.888e+02 3.601e+02 7.393e+02, threshold=5.777e+02, percent-clipped=3.0 +2023-02-06 18:20:09,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:15,224 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:19,099 INFO [train.py:901] (2/4) Epoch 16, batch 3950, loss[loss=0.2441, simple_loss=0.3316, pruned_loss=0.07825, over 8233.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2952, pruned_loss=0.0678, over 1609766.31 frames. ], batch size: 22, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:20:55,574 INFO [train.py:901] (2/4) Epoch 16, batch 4000, loss[loss=0.1888, simple_loss=0.2707, pruned_loss=0.05343, over 7157.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.0682, over 1611447.64 frames. ], batch size: 16, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:08,118 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9739, 1.4904, 1.5850, 1.3733, 0.8952, 1.4512, 1.6357, 1.5585], + device='cuda:2'), covar=tensor([0.0517, 0.1278, 0.1725, 0.1410, 0.0613, 0.1519, 0.0724, 0.0646], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0156, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 18:21:09,904 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.424e+02 2.747e+02 3.530e+02 7.172e+02, threshold=5.495e+02, percent-clipped=3.0 +2023-02-06 18:21:22,770 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8331, 2.9475, 2.2923, 4.0068, 1.8417, 1.9607, 2.3122, 3.0560], + device='cuda:2'), covar=tensor([0.0569, 0.0789, 0.0841, 0.0210, 0.1105, 0.1354, 0.1091, 0.0735], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0203, 0.0251, 0.0212, 0.0210, 0.0248, 0.0256, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 18:21:29,134 INFO [train.py:901] (2/4) Epoch 16, batch 4050, loss[loss=0.2582, simple_loss=0.3269, pruned_loss=0.09475, over 8624.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2963, pruned_loss=0.06913, over 1612054.75 frames. ], batch size: 39, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:29,951 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125297.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:21:41,117 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5962, 4.5230, 4.0826, 2.2321, 4.0271, 4.1490, 4.2062, 3.8815], + device='cuda:2'), covar=tensor([0.0626, 0.0552, 0.0887, 0.4425, 0.0756, 0.1062, 0.1108, 0.0859], + device='cuda:2'), in_proj_covar=tensor([0.0495, 0.0412, 0.0410, 0.0516, 0.0405, 0.0411, 0.0402, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 18:22:05,146 INFO [train.py:901] (2/4) Epoch 16, batch 4100, loss[loss=0.1824, simple_loss=0.2817, pruned_loss=0.04152, over 8364.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2964, pruned_loss=0.06889, over 1612404.25 frames. ], batch size: 24, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:19,363 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.458e+02 2.941e+02 3.398e+02 7.943e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-06 18:22:22,353 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:24,216 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:26,972 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1061, 1.8016, 2.4309, 1.9758, 2.2002, 2.0995, 1.8294, 1.0142], + device='cuda:2'), covar=tensor([0.5035, 0.4411, 0.1561, 0.3259, 0.2223, 0.2745, 0.1965, 0.4654], + device='cuda:2'), in_proj_covar=tensor([0.0911, 0.0917, 0.0755, 0.0893, 0.0957, 0.0846, 0.0716, 0.0791], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:22:37,727 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:38,920 INFO [train.py:901] (2/4) Epoch 16, batch 4150, loss[loss=0.2339, simple_loss=0.3102, pruned_loss=0.07875, over 8135.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2975, pruned_loss=0.0694, over 1617214.55 frames. ], batch size: 22, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:39,125 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:39,142 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:51,749 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5935, 1.3532, 2.3780, 1.3036, 2.1113, 2.5404, 2.6526, 2.1597], + device='cuda:2'), covar=tensor([0.0877, 0.1296, 0.0431, 0.1811, 0.0718, 0.0360, 0.0550, 0.0666], + device='cuda:2'), in_proj_covar=tensor([0.0277, 0.0307, 0.0273, 0.0297, 0.0289, 0.0248, 0.0381, 0.0295], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 18:22:55,761 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:09,081 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:14,198 INFO [train.py:901] (2/4) Epoch 16, batch 4200, loss[loss=0.2633, simple_loss=0.3432, pruned_loss=0.09165, over 8649.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06906, over 1616551.69 frames. ], batch size: 34, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:23:29,128 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.508e+02 2.881e+02 3.373e+02 7.881e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-06 18:23:39,904 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 18:23:44,591 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:48,645 INFO [train.py:901] (2/4) Epoch 16, batch 4250, loss[loss=0.1647, simple_loss=0.2464, pruned_loss=0.04151, over 7645.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2965, pruned_loss=0.06854, over 1619265.74 frames. ], batch size: 19, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:24:01,577 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 18:24:09,735 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2188, 1.9903, 2.7845, 2.2629, 2.6907, 2.2832, 1.9691, 1.4961], + device='cuda:2'), covar=tensor([0.4986, 0.4549, 0.1689, 0.3194, 0.2252, 0.2547, 0.1758, 0.4945], + device='cuda:2'), in_proj_covar=tensor([0.0913, 0.0921, 0.0758, 0.0898, 0.0961, 0.0849, 0.0720, 0.0795], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:24:11,903 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 18:24:23,119 INFO [train.py:901] (2/4) Epoch 16, batch 4300, loss[loss=0.1978, simple_loss=0.2823, pruned_loss=0.05662, over 8477.00 frames. ], tot_loss[loss=0.217, simple_loss=0.297, pruned_loss=0.06853, over 1619052.17 frames. ], batch size: 28, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:24:28,632 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125553.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:38,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.516e+02 3.115e+02 4.119e+02 8.810e+02, threshold=6.231e+02, percent-clipped=6.0 +2023-02-06 18:24:46,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:52,794 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5057, 2.5069, 1.7984, 2.2682, 2.1554, 1.4583, 1.9594, 2.0188], + device='cuda:2'), covar=tensor([0.1520, 0.0352, 0.1198, 0.0628, 0.0716, 0.1563, 0.1075, 0.1169], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0232, 0.0327, 0.0304, 0.0300, 0.0333, 0.0346, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 18:24:58,864 INFO [train.py:901] (2/4) Epoch 16, batch 4350, loss[loss=0.2263, simple_loss=0.3123, pruned_loss=0.0702, over 8708.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2972, pruned_loss=0.06873, over 1619224.80 frames. ], batch size: 39, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:02,855 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 18:25:05,371 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:16,538 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:33,360 INFO [train.py:901] (2/4) Epoch 16, batch 4400, loss[loss=0.1926, simple_loss=0.2701, pruned_loss=0.05755, over 7776.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2969, pruned_loss=0.06868, over 1617595.29 frames. ], batch size: 19, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:34,015 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 18:25:48,651 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.489e+02 3.156e+02 3.927e+02 6.760e+02, threshold=6.312e+02, percent-clipped=2.0 +2023-02-06 18:26:09,564 INFO [train.py:901] (2/4) Epoch 16, batch 4450, loss[loss=0.3044, simple_loss=0.3466, pruned_loss=0.1311, over 7060.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2973, pruned_loss=0.06879, over 1617468.29 frames. ], batch size: 72, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:14,216 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 18:26:23,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2908, 2.6324, 3.0840, 1.6439, 3.4061, 1.8513, 1.4332, 2.1722], + device='cuda:2'), covar=tensor([0.0663, 0.0308, 0.0222, 0.0638, 0.0325, 0.0792, 0.0838, 0.0425], + device='cuda:2'), in_proj_covar=tensor([0.0430, 0.0361, 0.0314, 0.0420, 0.0354, 0.0511, 0.0372, 0.0391], + device='cuda:2'), out_proj_covar=tensor([1.1825e-04, 9.6388e-05, 8.3468e-05, 1.1270e-04, 9.5577e-05, 1.4802e-04, + 1.0204e-04, 1.0591e-04], device='cuda:2') +2023-02-06 18:26:24,218 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:38,220 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:41,742 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:43,638 INFO [train.py:901] (2/4) Epoch 16, batch 4500, loss[loss=0.1946, simple_loss=0.2662, pruned_loss=0.06152, over 7930.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2979, pruned_loss=0.0691, over 1619462.74 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:57,815 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.361e+02 2.740e+02 3.373e+02 6.169e+02, threshold=5.479e+02, percent-clipped=0.0 +2023-02-06 18:27:04,067 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 18:27:10,712 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:19,272 INFO [train.py:901] (2/4) Epoch 16, batch 4550, loss[loss=0.1578, simple_loss=0.2446, pruned_loss=0.03546, over 7433.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2973, pruned_loss=0.06869, over 1617818.20 frames. ], batch size: 17, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:20,087 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1953, 1.4214, 3.3561, 0.9749, 2.9419, 2.8124, 3.0475, 2.9068], + device='cuda:2'), covar=tensor([0.0826, 0.3744, 0.0830, 0.3958, 0.1546, 0.1187, 0.0750, 0.0957], + device='cuda:2'), in_proj_covar=tensor([0.0560, 0.0619, 0.0641, 0.0590, 0.0660, 0.0573, 0.0563, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:27:45,706 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:54,464 INFO [train.py:901] (2/4) Epoch 16, batch 4600, loss[loss=0.2108, simple_loss=0.2874, pruned_loss=0.06706, over 7927.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2967, pruned_loss=0.0686, over 1614633.26 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:59,460 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:03,905 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-06 18:28:05,122 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:08,980 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.490e+02 3.040e+02 3.897e+02 1.241e+03, threshold=6.080e+02, percent-clipped=8.0 +2023-02-06 18:28:22,164 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:30,018 INFO [train.py:901] (2/4) Epoch 16, batch 4650, loss[loss=0.2284, simple_loss=0.3169, pruned_loss=0.06994, over 8320.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2971, pruned_loss=0.06908, over 1617634.52 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:28:31,587 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:06,106 INFO [train.py:901] (2/4) Epoch 16, batch 4700, loss[loss=0.2127, simple_loss=0.2827, pruned_loss=0.07137, over 7529.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2962, pruned_loss=0.06897, over 1611418.45 frames. ], batch size: 18, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:18,700 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 18:29:18,975 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:20,236 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.580e+02 3.138e+02 4.127e+02 1.212e+03, threshold=6.277e+02, percent-clipped=5.0 +2023-02-06 18:29:39,829 INFO [train.py:901] (2/4) Epoch 16, batch 4750, loss[loss=0.1861, simple_loss=0.2635, pruned_loss=0.05434, over 7919.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2977, pruned_loss=0.06956, over 1615463.51 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:55,981 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:58,748 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7831, 1.7694, 2.4001, 1.6860, 1.3191, 2.3202, 0.3324, 1.5479], + device='cuda:2'), covar=tensor([0.2040, 0.1428, 0.0356, 0.1713, 0.3197, 0.0438, 0.2675, 0.1814], + device='cuda:2'), in_proj_covar=tensor([0.0175, 0.0181, 0.0114, 0.0214, 0.0260, 0.0118, 0.0164, 0.0177], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 18:30:11,196 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 18:30:13,729 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 18:30:16,332 INFO [train.py:901] (2/4) Epoch 16, batch 4800, loss[loss=0.2484, simple_loss=0.3235, pruned_loss=0.08661, over 8813.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2971, pruned_loss=0.06898, over 1618089.15 frames. ], batch size: 32, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:30:31,319 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.301e+02 2.788e+02 3.330e+02 6.705e+02, threshold=5.575e+02, percent-clipped=2.0 +2023-02-06 18:30:37,043 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7186, 2.0509, 1.6068, 2.6651, 1.2899, 1.3094, 1.7116, 2.2439], + device='cuda:2'), covar=tensor([0.0968, 0.0940, 0.1192, 0.0465, 0.1107, 0.1667, 0.1042, 0.0746], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0204, 0.0250, 0.0214, 0.0211, 0.0249, 0.0257, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 18:30:40,398 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:45,001 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:46,459 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:51,047 INFO [train.py:901] (2/4) Epoch 16, batch 4850, loss[loss=0.2335, simple_loss=0.3066, pruned_loss=0.08019, over 8505.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2972, pruned_loss=0.06926, over 1619255.76 frames. ], batch size: 28, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:30:59,316 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6937, 2.0208, 2.1615, 1.3819, 2.2427, 1.4879, 0.6336, 1.9475], + device='cuda:2'), covar=tensor([0.0499, 0.0278, 0.0211, 0.0510, 0.0317, 0.0806, 0.0722, 0.0237], + device='cuda:2'), in_proj_covar=tensor([0.0427, 0.0362, 0.0314, 0.0421, 0.0353, 0.0511, 0.0371, 0.0390], + device='cuda:2'), out_proj_covar=tensor([1.1717e-04, 9.6436e-05, 8.3497e-05, 1.1313e-04, 9.5198e-05, 1.4780e-04, + 1.0152e-04, 1.0561e-04], device='cuda:2') +2023-02-06 18:30:59,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:01,797 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 18:31:03,308 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:19,049 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:26,988 INFO [train.py:901] (2/4) Epoch 16, batch 4900, loss[loss=0.2085, simple_loss=0.3072, pruned_loss=0.05493, over 8024.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2964, pruned_loss=0.06855, over 1620328.87 frames. ], batch size: 22, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:31:32,562 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:41,755 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.384e+02 3.140e+02 3.836e+02 7.587e+02, threshold=6.281e+02, percent-clipped=5.0 +2023-02-06 18:31:50,105 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:01,581 INFO [train.py:901] (2/4) Epoch 16, batch 4950, loss[loss=0.2297, simple_loss=0.3084, pruned_loss=0.0755, over 8561.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2963, pruned_loss=0.06902, over 1615426.55 frames. ], batch size: 31, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:06,040 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:35,842 INFO [train.py:901] (2/4) Epoch 16, batch 5000, loss[loss=0.2129, simple_loss=0.2979, pruned_loss=0.06393, over 8357.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2962, pruned_loss=0.06908, over 1612590.59 frames. ], batch size: 24, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:37,271 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5960, 1.6912, 4.8206, 1.8334, 4.2495, 3.9778, 4.2982, 4.1904], + device='cuda:2'), covar=tensor([0.0586, 0.4059, 0.0475, 0.3624, 0.0999, 0.0947, 0.0578, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0564, 0.0622, 0.0647, 0.0593, 0.0669, 0.0576, 0.0565, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:32:50,295 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.421e+02 2.802e+02 3.540e+02 7.456e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-06 18:33:10,450 INFO [train.py:901] (2/4) Epoch 16, batch 5050, loss[loss=0.2253, simple_loss=0.3161, pruned_loss=0.06722, over 8371.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2969, pruned_loss=0.0694, over 1611624.60 frames. ], batch size: 24, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:38,225 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:39,105 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 18:33:41,485 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 18:33:44,853 INFO [train.py:901] (2/4) Epoch 16, batch 5100, loss[loss=0.2494, simple_loss=0.3095, pruned_loss=0.09466, over 7785.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2962, pruned_loss=0.06944, over 1611426.12 frames. ], batch size: 19, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:55,180 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:55,977 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126361.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:01,121 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.574e+02 2.967e+02 3.773e+02 8.448e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-06 18:34:20,689 INFO [train.py:901] (2/4) Epoch 16, batch 5150, loss[loss=0.2043, simple_loss=0.2796, pruned_loss=0.06448, over 7254.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2955, pruned_loss=0.06901, over 1607791.06 frames. ], batch size: 16, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:34:22,777 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:54,893 INFO [train.py:901] (2/4) Epoch 16, batch 5200, loss[loss=0.1666, simple_loss=0.2496, pruned_loss=0.04179, over 7702.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2958, pruned_loss=0.06906, over 1607957.12 frames. ], batch size: 18, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:03,390 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:10,023 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.450e+02 2.961e+02 4.009e+02 9.502e+02, threshold=5.923e+02, percent-clipped=8.0 +2023-02-06 18:35:10,199 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:15,127 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126475.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:21,844 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:30,964 INFO [train.py:901] (2/4) Epoch 16, batch 5250, loss[loss=0.2638, simple_loss=0.3179, pruned_loss=0.1049, over 7713.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.296, pruned_loss=0.06912, over 1604506.47 frames. ], batch size: 18, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:39,842 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 18:36:05,590 INFO [train.py:901] (2/4) Epoch 16, batch 5300, loss[loss=0.238, simple_loss=0.3216, pruned_loss=0.07716, over 8332.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2957, pruned_loss=0.06893, over 1606946.01 frames. ], batch size: 25, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:36:20,886 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.415e+02 2.951e+02 3.953e+02 1.148e+03, threshold=5.902e+02, percent-clipped=4.0 +2023-02-06 18:36:41,565 INFO [train.py:901] (2/4) Epoch 16, batch 5350, loss[loss=0.2141, simple_loss=0.2997, pruned_loss=0.0643, over 8099.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2964, pruned_loss=0.06907, over 1608621.77 frames. ], batch size: 23, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:16,921 INFO [train.py:901] (2/4) Epoch 16, batch 5400, loss[loss=0.1988, simple_loss=0.2982, pruned_loss=0.04975, over 8373.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2969, pruned_loss=0.06928, over 1607948.96 frames. ], batch size: 24, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:32,196 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.413e+02 2.875e+02 3.758e+02 9.843e+02, threshold=5.751e+02, percent-clipped=6.0 +2023-02-06 18:37:36,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5625, 2.3115, 3.3808, 2.6250, 3.1644, 2.4808, 2.1904, 1.7585], + device='cuda:2'), covar=tensor([0.4739, 0.4472, 0.1606, 0.3441, 0.2260, 0.2844, 0.1799, 0.5238], + device='cuda:2'), in_proj_covar=tensor([0.0907, 0.0914, 0.0755, 0.0891, 0.0956, 0.0840, 0.0716, 0.0794], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:37:51,432 INFO [train.py:901] (2/4) Epoch 16, batch 5450, loss[loss=0.2086, simple_loss=0.3065, pruned_loss=0.05532, over 8747.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2956, pruned_loss=0.06856, over 1603081.44 frames. ], batch size: 30, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:17,602 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126731.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:24,942 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:28,282 INFO [train.py:901] (2/4) Epoch 16, batch 5500, loss[loss=0.2679, simple_loss=0.3429, pruned_loss=0.09642, over 8505.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2968, pruned_loss=0.06929, over 1609196.14 frames. ], batch size: 28, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:28,990 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 18:38:35,356 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:44,227 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.451e+02 2.886e+02 3.496e+02 8.391e+02, threshold=5.772e+02, percent-clipped=4.0 +2023-02-06 18:39:02,248 INFO [train.py:901] (2/4) Epoch 16, batch 5550, loss[loss=0.2905, simple_loss=0.3639, pruned_loss=0.1085, over 8463.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2964, pruned_loss=0.06896, over 1608458.08 frames. ], batch size: 27, lr: 4.73e-03, grad_scale: 4.0 +2023-02-06 18:39:13,434 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:30,334 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:38,324 INFO [train.py:901] (2/4) Epoch 16, batch 5600, loss[loss=0.2243, simple_loss=0.3023, pruned_loss=0.07315, over 8333.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2964, pruned_loss=0.06882, over 1611841.61 frames. ], batch size: 25, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:39:45,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:54,360 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.374e+02 2.959e+02 4.088e+02 8.002e+02, threshold=5.917e+02, percent-clipped=4.0 +2023-02-06 18:40:09,605 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1877, 1.9371, 2.6355, 2.1796, 2.6208, 2.1906, 1.9214, 1.3718], + device='cuda:2'), covar=tensor([0.5005, 0.4416, 0.1568, 0.3404, 0.2261, 0.2581, 0.1764, 0.4779], + device='cuda:2'), in_proj_covar=tensor([0.0916, 0.0924, 0.0762, 0.0897, 0.0961, 0.0846, 0.0721, 0.0797], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:40:12,816 INFO [train.py:901] (2/4) Epoch 16, batch 5650, loss[loss=0.2091, simple_loss=0.2997, pruned_loss=0.05924, over 8470.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2962, pruned_loss=0.0687, over 1612822.88 frames. ], batch size: 29, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:40:33,382 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 18:40:33,506 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:40:48,558 INFO [train.py:901] (2/4) Epoch 16, batch 5700, loss[loss=0.2275, simple_loss=0.3145, pruned_loss=0.07026, over 8246.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2958, pruned_loss=0.06856, over 1614400.50 frames. ], batch size: 24, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:04,180 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.583e+02 3.205e+02 4.543e+02 7.570e+02, threshold=6.410e+02, percent-clipped=11.0 +2023-02-06 18:41:14,338 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3568, 1.9328, 2.7509, 2.2314, 2.6458, 2.2917, 2.0086, 1.4021], + device='cuda:2'), covar=tensor([0.4799, 0.4557, 0.1543, 0.3119, 0.2156, 0.2583, 0.1735, 0.4671], + device='cuda:2'), in_proj_covar=tensor([0.0914, 0.0926, 0.0765, 0.0897, 0.0963, 0.0847, 0.0723, 0.0800], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:41:17,568 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4906, 2.7186, 1.8336, 2.1427, 2.1539, 1.6487, 2.0346, 2.1165], + device='cuda:2'), covar=tensor([0.1632, 0.0397, 0.1135, 0.0733, 0.0713, 0.1404, 0.1020, 0.1106], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0234, 0.0327, 0.0304, 0.0301, 0.0336, 0.0346, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 18:41:22,817 INFO [train.py:901] (2/4) Epoch 16, batch 5750, loss[loss=0.2369, simple_loss=0.3213, pruned_loss=0.07624, over 8316.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2951, pruned_loss=0.06815, over 1613468.03 frames. ], batch size: 25, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:39,583 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 18:41:56,554 INFO [train.py:901] (2/4) Epoch 16, batch 5800, loss[loss=0.2709, simple_loss=0.339, pruned_loss=0.1015, over 8510.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2957, pruned_loss=0.06892, over 1613317.67 frames. ], batch size: 49, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:14,367 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.425e+02 2.951e+02 3.537e+02 6.549e+02, threshold=5.902e+02, percent-clipped=1.0 +2023-02-06 18:42:27,503 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 18:42:33,216 INFO [train.py:901] (2/4) Epoch 16, batch 5850, loss[loss=0.2822, simple_loss=0.3439, pruned_loss=0.1102, over 8651.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2969, pruned_loss=0.06955, over 1617642.58 frames. ], batch size: 49, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:45,190 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:02,065 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:07,130 INFO [train.py:901] (2/4) Epoch 16, batch 5900, loss[loss=0.2082, simple_loss=0.2988, pruned_loss=0.05878, over 8578.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2967, pruned_loss=0.06942, over 1614499.40 frames. ], batch size: 34, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:09,451 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4284, 2.1165, 2.8232, 2.3318, 2.7605, 2.3748, 2.0354, 1.5166], + device='cuda:2'), covar=tensor([0.4477, 0.4385, 0.1487, 0.3172, 0.2068, 0.2698, 0.1664, 0.4691], + device='cuda:2'), in_proj_covar=tensor([0.0911, 0.0921, 0.0764, 0.0893, 0.0959, 0.0842, 0.0720, 0.0796], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:43:12,285 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8720, 1.6144, 2.1005, 1.8059, 2.0323, 1.8904, 1.6210, 0.7463], + device='cuda:2'), covar=tensor([0.5249, 0.4689, 0.1616, 0.2799, 0.1973, 0.2778, 0.2081, 0.4441], + device='cuda:2'), in_proj_covar=tensor([0.0911, 0.0922, 0.0764, 0.0894, 0.0960, 0.0843, 0.0721, 0.0796], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:43:23,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.337e+02 2.920e+02 3.581e+02 1.365e+03, threshold=5.840e+02, percent-clipped=5.0 +2023-02-06 18:43:30,612 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:34,123 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:42,721 INFO [train.py:901] (2/4) Epoch 16, batch 5950, loss[loss=0.2246, simple_loss=0.3025, pruned_loss=0.07332, over 8257.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.297, pruned_loss=0.06922, over 1618689.47 frames. ], batch size: 24, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:51,308 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127208.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:17,704 INFO [train.py:901] (2/4) Epoch 16, batch 6000, loss[loss=0.1978, simple_loss=0.2712, pruned_loss=0.06221, over 7523.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.297, pruned_loss=0.06928, over 1613812.30 frames. ], batch size: 18, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:44:17,704 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 18:44:29,968 INFO [train.py:935] (2/4) Epoch 16, validation: loss=0.1793, simple_loss=0.2799, pruned_loss=0.03935, over 944034.00 frames. +2023-02-06 18:44:29,969 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 18:44:44,469 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:45,663 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.282e+02 2.976e+02 3.659e+02 8.304e+02, threshold=5.951e+02, percent-clipped=2.0 +2023-02-06 18:44:47,121 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3214, 1.8768, 2.9192, 1.2293, 2.0194, 1.7565, 1.5725, 1.9603], + device='cuda:2'), covar=tensor([0.2163, 0.2593, 0.0866, 0.4798, 0.2042, 0.3332, 0.2419, 0.2549], + device='cuda:2'), in_proj_covar=tensor([0.0502, 0.0554, 0.0538, 0.0609, 0.0622, 0.0562, 0.0498, 0.0616], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 18:45:01,834 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:45:03,656 INFO [train.py:901] (2/4) Epoch 16, batch 6050, loss[loss=0.2904, simple_loss=0.3651, pruned_loss=0.1079, over 8487.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2988, pruned_loss=0.0704, over 1617356.17 frames. ], batch size: 49, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:39,301 INFO [train.py:901] (2/4) Epoch 16, batch 6100, loss[loss=0.2365, simple_loss=0.3289, pruned_loss=0.072, over 8026.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2972, pruned_loss=0.06931, over 1611569.46 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:55,485 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.555e+02 2.947e+02 3.627e+02 8.036e+02, threshold=5.895e+02, percent-clipped=1.0 +2023-02-06 18:45:59,284 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 18:46:09,095 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 18:46:13,726 INFO [train.py:901] (2/4) Epoch 16, batch 6150, loss[loss=0.2065, simple_loss=0.2896, pruned_loss=0.06165, over 8249.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2965, pruned_loss=0.06918, over 1609469.16 frames. ], batch size: 24, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:28,955 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8230, 5.9028, 5.1445, 2.4190, 5.2041, 5.6266, 5.4133, 5.2227], + device='cuda:2'), covar=tensor([0.0456, 0.0365, 0.0928, 0.4302, 0.0679, 0.0602, 0.1072, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0500, 0.0413, 0.0415, 0.0518, 0.0407, 0.0415, 0.0407, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 18:46:48,822 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127445.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:46:49,324 INFO [train.py:901] (2/4) Epoch 16, batch 6200, loss[loss=0.2441, simple_loss=0.3333, pruned_loss=0.07744, over 8327.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2972, pruned_loss=0.069, over 1614018.99 frames. ], batch size: 25, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:52,901 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7901, 1.4005, 1.5509, 1.1915, 0.8840, 1.3908, 1.5526, 1.4015], + device='cuda:2'), covar=tensor([0.0532, 0.1308, 0.1765, 0.1531, 0.0634, 0.1566, 0.0712, 0.0660], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0190, 0.0156, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 18:47:04,719 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.656e+02 3.320e+02 4.256e+02 8.643e+02, threshold=6.639e+02, percent-clipped=4.0 +2023-02-06 18:47:23,432 INFO [train.py:901] (2/4) Epoch 16, batch 6250, loss[loss=0.2629, simple_loss=0.343, pruned_loss=0.09143, over 8466.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06861, over 1615121.30 frames. ], batch size: 29, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:57,839 INFO [train.py:901] (2/4) Epoch 16, batch 6300, loss[loss=0.214, simple_loss=0.29, pruned_loss=0.06896, over 7821.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2973, pruned_loss=0.06904, over 1616802.70 frames. ], batch size: 20, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:58,580 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:47:59,971 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:08,819 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4211, 2.2172, 3.1528, 2.4190, 3.0311, 2.3973, 2.1696, 1.7885], + device='cuda:2'), covar=tensor([0.4678, 0.4746, 0.1749, 0.3448, 0.2360, 0.2623, 0.1698, 0.4826], + device='cuda:2'), in_proj_covar=tensor([0.0913, 0.0921, 0.0766, 0.0896, 0.0964, 0.0846, 0.0722, 0.0796], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:48:13,475 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1462, 1.3183, 1.2877, 0.6737, 1.2972, 1.0854, 0.1247, 1.2985], + device='cuda:2'), covar=tensor([0.0254, 0.0214, 0.0195, 0.0326, 0.0247, 0.0509, 0.0468, 0.0193], + device='cuda:2'), in_proj_covar=tensor([0.0420, 0.0362, 0.0311, 0.0416, 0.0349, 0.0507, 0.0367, 0.0389], + device='cuda:2'), out_proj_covar=tensor([1.1525e-04, 9.6553e-05, 8.2719e-05, 1.1171e-04, 9.3875e-05, 1.4659e-04, + 1.0041e-04, 1.0499e-04], device='cuda:2') +2023-02-06 18:48:14,532 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.653e+02 3.258e+02 3.936e+02 6.732e+02, threshold=6.516e+02, percent-clipped=2.0 +2023-02-06 18:48:17,991 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:32,713 INFO [train.py:901] (2/4) Epoch 16, batch 6350, loss[loss=0.2097, simple_loss=0.3001, pruned_loss=0.05964, over 8340.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2965, pruned_loss=0.06853, over 1614162.62 frames. ], batch size: 25, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:48:43,690 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:01,232 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0125, 1.6278, 1.2411, 1.5577, 1.3302, 1.0378, 1.2587, 1.3169], + device='cuda:2'), covar=tensor([0.1073, 0.0479, 0.1296, 0.0536, 0.0790, 0.1690, 0.1000, 0.0767], + device='cuda:2'), in_proj_covar=tensor([0.0347, 0.0233, 0.0323, 0.0300, 0.0297, 0.0330, 0.0340, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 18:49:03,417 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 18:49:07,014 INFO [train.py:901] (2/4) Epoch 16, batch 6400, loss[loss=0.198, simple_loss=0.279, pruned_loss=0.05851, over 7933.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2974, pruned_loss=0.06895, over 1614931.28 frames. ], batch size: 20, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:49:24,188 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.402e+02 3.034e+02 3.710e+02 8.847e+02, threshold=6.069e+02, percent-clipped=1.0 +2023-02-06 18:49:32,563 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:43,214 INFO [train.py:901] (2/4) Epoch 16, batch 6450, loss[loss=0.2743, simple_loss=0.348, pruned_loss=0.1003, over 8623.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2972, pruned_loss=0.06855, over 1618309.61 frames. ], batch size: 34, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:04,134 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:17,017 INFO [train.py:901] (2/4) Epoch 16, batch 6500, loss[loss=0.2733, simple_loss=0.3405, pruned_loss=0.103, over 8509.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2988, pruned_loss=0.06971, over 1619198.49 frames. ], batch size: 26, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:32,622 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.427e+02 3.150e+02 4.006e+02 1.604e+03, threshold=6.301e+02, percent-clipped=4.0 +2023-02-06 18:50:48,409 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127789.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:50:52,953 INFO [train.py:901] (2/4) Epoch 16, batch 6550, loss[loss=0.2284, simple_loss=0.3069, pruned_loss=0.07497, over 8599.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2994, pruned_loss=0.07014, over 1624708.51 frames. ], batch size: 31, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:53,788 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:17,239 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 18:51:27,450 INFO [train.py:901] (2/4) Epoch 16, batch 6600, loss[loss=0.2409, simple_loss=0.3065, pruned_loss=0.08768, over 7923.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2988, pruned_loss=0.06986, over 1622182.30 frames. ], batch size: 20, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:51:36,812 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:51:42,286 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:42,776 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.405e+02 2.899e+02 3.574e+02 1.034e+03, threshold=5.799e+02, percent-clipped=3.0 +2023-02-06 18:51:57,075 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:57,611 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127891.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:52:00,745 INFO [train.py:901] (2/4) Epoch 16, batch 6650, loss[loss=0.2047, simple_loss=0.2921, pruned_loss=0.05864, over 8467.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2978, pruned_loss=0.06945, over 1618172.35 frames. ], batch size: 25, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:07,581 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:52:36,161 INFO [train.py:901] (2/4) Epoch 16, batch 6700, loss[loss=0.222, simple_loss=0.2908, pruned_loss=0.07658, over 8088.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2977, pruned_loss=0.0692, over 1618877.29 frames. ], batch size: 21, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:41,772 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1865, 1.1007, 1.2847, 1.0556, 0.9073, 1.2972, 0.0556, 0.9104], + device='cuda:2'), covar=tensor([0.1984, 0.1513, 0.0529, 0.0988, 0.3235, 0.0621, 0.2650, 0.1448], + device='cuda:2'), in_proj_covar=tensor([0.0174, 0.0184, 0.0114, 0.0212, 0.0259, 0.0118, 0.0165, 0.0179], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 18:52:52,429 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.543e+02 2.898e+02 3.564e+02 8.195e+02, threshold=5.796e+02, percent-clipped=3.0 +2023-02-06 18:53:01,215 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:10,437 INFO [train.py:901] (2/4) Epoch 16, batch 6750, loss[loss=0.2677, simple_loss=0.3483, pruned_loss=0.09359, over 8369.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2971, pruned_loss=0.06901, over 1613305.59 frames. ], batch size: 24, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:15,087 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3724, 1.4360, 1.3696, 1.8080, 0.7605, 1.2011, 1.3079, 1.4933], + device='cuda:2'), covar=tensor([0.0859, 0.0753, 0.1017, 0.0499, 0.1096, 0.1351, 0.0783, 0.0691], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0201, 0.0247, 0.0211, 0.0208, 0.0247, 0.0252, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 18:53:15,265 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 18:53:18,488 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:19,176 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:32,877 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128024.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:47,637 INFO [train.py:901] (2/4) Epoch 16, batch 6800, loss[loss=0.2104, simple_loss=0.2973, pruned_loss=0.06169, over 8265.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.069, over 1610856.87 frames. ], batch size: 24, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:51,039 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 18:54:04,017 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.604e+02 3.143e+02 4.008e+02 8.483e+02, threshold=6.287e+02, percent-clipped=3.0 +2023-02-06 18:54:22,242 INFO [train.py:901] (2/4) Epoch 16, batch 6850, loss[loss=0.2336, simple_loss=0.3183, pruned_loss=0.07444, over 8287.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.298, pruned_loss=0.06948, over 1615406.97 frames. ], batch size: 23, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:54:40,675 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 18:54:53,393 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128139.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:54,707 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128141.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:56,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8318, 2.6682, 3.7221, 1.8279, 1.6488, 3.6385, 0.6702, 2.1522], + device='cuda:2'), covar=tensor([0.1592, 0.1176, 0.0334, 0.2558, 0.3517, 0.0370, 0.2884, 0.1737], + device='cuda:2'), in_proj_covar=tensor([0.0175, 0.0183, 0.0114, 0.0212, 0.0258, 0.0118, 0.0164, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 18:54:58,083 INFO [train.py:901] (2/4) Epoch 16, batch 6900, loss[loss=0.1776, simple_loss=0.2577, pruned_loss=0.04879, over 7705.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2975, pruned_loss=0.06904, over 1615264.25 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:08,128 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128160.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:55:14,249 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.605e+02 3.172e+02 3.868e+02 9.306e+02, threshold=6.344e+02, percent-clipped=5.0 +2023-02-06 18:55:25,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128185.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:55:32,783 INFO [train.py:901] (2/4) Epoch 16, batch 6950, loss[loss=0.191, simple_loss=0.2753, pruned_loss=0.05339, over 7922.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2981, pruned_loss=0.06937, over 1611707.47 frames. ], batch size: 20, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:34,511 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 18:55:43,517 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:55:48,019 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 18:55:58,348 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:07,200 INFO [train.py:901] (2/4) Epoch 16, batch 7000, loss[loss=0.2338, simple_loss=0.3131, pruned_loss=0.07725, over 8729.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06903, over 1608558.15 frames. ], batch size: 34, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:56:15,592 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:19,638 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:23,999 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.690e+02 3.457e+02 5.056e+02 8.270e+02, threshold=6.915e+02, percent-clipped=6.0 +2023-02-06 18:56:36,186 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:42,574 INFO [train.py:901] (2/4) Epoch 16, batch 7050, loss[loss=0.2502, simple_loss=0.3219, pruned_loss=0.0893, over 8356.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2971, pruned_loss=0.06876, over 1608333.00 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:03,869 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:11,323 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:16,463 INFO [train.py:901] (2/4) Epoch 16, batch 7100, loss[loss=0.2461, simple_loss=0.3184, pruned_loss=0.08689, over 8513.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2964, pruned_loss=0.06851, over 1606006.71 frames. ], batch size: 26, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:18,647 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:33,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.456e+02 3.083e+02 3.766e+02 8.441e+02, threshold=6.166e+02, percent-clipped=2.0 +2023-02-06 18:57:52,038 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128395.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:52,510 INFO [train.py:901] (2/4) Epoch 16, batch 7150, loss[loss=0.185, simple_loss=0.2728, pruned_loss=0.0486, over 8078.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.296, pruned_loss=0.06777, over 1608336.94 frames. ], batch size: 21, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:08,475 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4858, 2.5875, 1.8652, 2.1163, 2.1382, 1.4433, 1.8990, 2.0902], + device='cuda:2'), covar=tensor([0.1475, 0.0338, 0.1098, 0.0677, 0.0700, 0.1639, 0.1038, 0.1036], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0233, 0.0325, 0.0301, 0.0302, 0.0330, 0.0342, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 18:58:09,864 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:17,227 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:27,263 INFO [train.py:901] (2/4) Epoch 16, batch 7200, loss[loss=0.2698, simple_loss=0.3461, pruned_loss=0.09676, over 7091.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.297, pruned_loss=0.06829, over 1608031.59 frames. ], batch size: 71, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:42,535 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.378e+02 2.905e+02 3.370e+02 6.119e+02, threshold=5.810e+02, percent-clipped=0.0 +2023-02-06 18:59:02,791 INFO [train.py:901] (2/4) Epoch 16, batch 7250, loss[loss=0.1795, simple_loss=0.2524, pruned_loss=0.05332, over 7538.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2984, pruned_loss=0.06928, over 1613442.42 frames. ], batch size: 18, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:13,767 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:26,470 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6691, 2.3269, 4.7831, 2.9351, 4.3618, 4.1657, 4.5029, 4.4273], + device='cuda:2'), covar=tensor([0.0460, 0.3732, 0.0555, 0.2916, 0.0875, 0.0792, 0.0459, 0.0490], + device='cuda:2'), in_proj_covar=tensor([0.0561, 0.0623, 0.0643, 0.0591, 0.0670, 0.0573, 0.0564, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 18:59:31,351 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:32,908 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 18:59:37,227 INFO [train.py:901] (2/4) Epoch 16, batch 7300, loss[loss=0.1895, simple_loss=0.2842, pruned_loss=0.04739, over 8258.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2973, pruned_loss=0.06877, over 1614411.87 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:52,609 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.470e+02 2.980e+02 3.722e+02 1.252e+03, threshold=5.960e+02, percent-clipped=4.0 +2023-02-06 19:00:02,293 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:12,483 INFO [train.py:901] (2/4) Epoch 16, batch 7350, loss[loss=0.1879, simple_loss=0.262, pruned_loss=0.05684, over 7705.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2981, pruned_loss=0.06936, over 1614729.75 frames. ], batch size: 18, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:19,519 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:21,501 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:31,403 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 19:00:36,116 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:47,434 INFO [train.py:901] (2/4) Epoch 16, batch 7400, loss[loss=0.1666, simple_loss=0.2456, pruned_loss=0.04378, over 7702.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2978, pruned_loss=0.06962, over 1615189.43 frames. ], batch size: 18, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:49,532 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 19:01:00,966 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8788, 1.7080, 2.0768, 1.6743, 1.0099, 1.9641, 2.3179, 2.3976], + device='cuda:2'), covar=tensor([0.0451, 0.1198, 0.1611, 0.1360, 0.0578, 0.1321, 0.0601, 0.0530], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 19:01:02,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.610e+02 3.305e+02 3.788e+02 1.058e+03, threshold=6.610e+02, percent-clipped=7.0 +2023-02-06 19:01:11,762 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:01:21,078 INFO [train.py:901] (2/4) Epoch 16, batch 7450, loss[loss=0.2224, simple_loss=0.3052, pruned_loss=0.06981, over 8352.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2984, pruned_loss=0.06977, over 1617289.38 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:01:30,645 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 19:01:52,238 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6880, 5.7371, 5.0309, 2.2740, 5.0141, 5.4384, 5.3561, 5.1385], + device='cuda:2'), covar=tensor([0.0549, 0.0401, 0.0860, 0.4835, 0.0784, 0.0725, 0.0954, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0503, 0.0411, 0.0416, 0.0516, 0.0403, 0.0412, 0.0405, 0.0357], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:01:56,807 INFO [train.py:901] (2/4) Epoch 16, batch 7500, loss[loss=0.2115, simple_loss=0.2798, pruned_loss=0.07156, over 7224.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2974, pruned_loss=0.06919, over 1618493.50 frames. ], batch size: 16, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:02:12,299 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 19:02:13,138 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.417e+02 2.923e+02 3.614e+02 6.549e+02, threshold=5.847e+02, percent-clipped=0.0 +2023-02-06 19:02:17,186 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:31,138 INFO [train.py:901] (2/4) Epoch 16, batch 7550, loss[loss=0.1847, simple_loss=0.281, pruned_loss=0.04425, over 8478.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2967, pruned_loss=0.06862, over 1617991.73 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:02:32,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:33,327 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:49,735 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9301, 1.7276, 2.8895, 2.2502, 2.5887, 1.8621, 1.5609, 1.3161], + device='cuda:2'), covar=tensor([0.6491, 0.6144, 0.1709, 0.3315, 0.2535, 0.3975, 0.2864, 0.5228], + device='cuda:2'), in_proj_covar=tensor([0.0910, 0.0921, 0.0758, 0.0897, 0.0958, 0.0848, 0.0721, 0.0794], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:03:04,845 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5901, 1.5735, 2.2353, 1.5230, 1.1772, 2.1122, 0.4965, 1.3135], + device='cuda:2'), covar=tensor([0.2057, 0.1401, 0.0360, 0.1369, 0.3197, 0.0496, 0.2437, 0.1588], + device='cuda:2'), in_proj_covar=tensor([0.0177, 0.0185, 0.0115, 0.0215, 0.0261, 0.0120, 0.0165, 0.0179], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:03:07,387 INFO [train.py:901] (2/4) Epoch 16, batch 7600, loss[loss=0.2379, simple_loss=0.3148, pruned_loss=0.0805, over 8354.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2969, pruned_loss=0.06887, over 1616450.26 frames. ], batch size: 49, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:23,267 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.439e+02 3.123e+02 4.017e+02 8.994e+02, threshold=6.245e+02, percent-clipped=5.0 +2023-02-06 19:03:38,583 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:42,463 INFO [train.py:901] (2/4) Epoch 16, batch 7650, loss[loss=0.2296, simple_loss=0.3067, pruned_loss=0.07629, over 8191.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2963, pruned_loss=0.06816, over 1617860.00 frames. ], batch size: 23, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:50,523 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128908.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:04:08,892 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8976, 3.8563, 3.5190, 1.6058, 3.4784, 3.4419, 3.4578, 3.1332], + device='cuda:2'), covar=tensor([0.0909, 0.0601, 0.1239, 0.4549, 0.0978, 0.1113, 0.1381, 0.0909], + device='cuda:2'), in_proj_covar=tensor([0.0497, 0.0406, 0.0411, 0.0508, 0.0400, 0.0408, 0.0400, 0.0353], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:04:17,494 INFO [train.py:901] (2/4) Epoch 16, batch 7700, loss[loss=0.2108, simple_loss=0.3068, pruned_loss=0.05744, over 8501.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2943, pruned_loss=0.06755, over 1613433.07 frames. ], batch size: 28, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:34,543 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.361e+02 3.016e+02 3.880e+02 7.767e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 19:04:42,141 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 19:04:49,724 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6209, 2.0215, 3.2687, 1.3681, 2.6281, 2.1259, 1.7264, 2.3439], + device='cuda:2'), covar=tensor([0.1847, 0.2393, 0.0883, 0.4476, 0.1568, 0.2998, 0.2064, 0.2277], + device='cuda:2'), in_proj_covar=tensor([0.0504, 0.0561, 0.0544, 0.0612, 0.0628, 0.0570, 0.0502, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:04:52,875 INFO [train.py:901] (2/4) Epoch 16, batch 7750, loss[loss=0.1922, simple_loss=0.2741, pruned_loss=0.05516, over 7805.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2941, pruned_loss=0.06712, over 1605487.02 frames. ], batch size: 20, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:56,371 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3515, 1.5275, 2.1197, 1.2177, 1.4707, 1.6410, 1.4197, 1.3650], + device='cuda:2'), covar=tensor([0.1910, 0.2505, 0.0913, 0.4348, 0.1932, 0.3273, 0.2246, 0.2170], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0562, 0.0545, 0.0613, 0.0629, 0.0571, 0.0503, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:05:21,211 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6043, 1.8473, 1.9584, 1.1939, 2.0739, 1.4724, 0.5800, 1.7505], + device='cuda:2'), covar=tensor([0.0485, 0.0285, 0.0233, 0.0501, 0.0354, 0.0774, 0.0741, 0.0266], + device='cuda:2'), in_proj_covar=tensor([0.0426, 0.0364, 0.0314, 0.0421, 0.0350, 0.0510, 0.0373, 0.0393], + device='cuda:2'), out_proj_covar=tensor([1.1664e-04, 9.6988e-05, 8.3333e-05, 1.1296e-04, 9.4072e-05, 1.4731e-04, + 1.0217e-04, 1.0608e-04], device='cuda:2') +2023-02-06 19:05:24,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 19:05:26,242 INFO [train.py:901] (2/4) Epoch 16, batch 7800, loss[loss=0.25, simple_loss=0.3369, pruned_loss=0.08153, over 8324.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2957, pruned_loss=0.06795, over 1610394.30 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:28,998 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5205, 2.7059, 1.8679, 2.3102, 2.2952, 1.5317, 1.9883, 2.2636], + device='cuda:2'), covar=tensor([0.1507, 0.0337, 0.1116, 0.0651, 0.0670, 0.1542, 0.1014, 0.0965], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0234, 0.0327, 0.0300, 0.0299, 0.0332, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:05:31,086 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:05:41,941 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.423e+02 2.949e+02 3.975e+02 9.373e+02, threshold=5.898e+02, percent-clipped=5.0 +2023-02-06 19:05:45,472 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3470, 1.3561, 2.2985, 1.1146, 2.0708, 2.4501, 2.5765, 1.9516], + device='cuda:2'), covar=tensor([0.1158, 0.1390, 0.0537, 0.2303, 0.0883, 0.0470, 0.0758, 0.0920], + device='cuda:2'), in_proj_covar=tensor([0.0281, 0.0309, 0.0273, 0.0301, 0.0293, 0.0251, 0.0385, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 19:05:48,216 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:00,670 INFO [train.py:901] (2/4) Epoch 16, batch 7850, loss[loss=0.2747, simple_loss=0.3398, pruned_loss=0.1048, over 7091.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.296, pruned_loss=0.06836, over 1606854.59 frames. ], batch size: 71, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:09,640 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1866, 4.1498, 3.7659, 1.9500, 3.6609, 3.7447, 3.8322, 3.4702], + device='cuda:2'), covar=tensor([0.0799, 0.0571, 0.1080, 0.4404, 0.0857, 0.0956, 0.1172, 0.0829], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0413, 0.0420, 0.0516, 0.0407, 0.0415, 0.0407, 0.0360], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:06:32,322 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:34,266 INFO [train.py:901] (2/4) Epoch 16, batch 7900, loss[loss=0.2199, simple_loss=0.2969, pruned_loss=0.07141, over 8572.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2949, pruned_loss=0.06754, over 1603015.41 frames. ], batch size: 34, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:34,503 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:51,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.289e+02 2.786e+02 3.620e+02 6.776e+02, threshold=5.572e+02, percent-clipped=2.0 +2023-02-06 19:06:51,877 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:08,434 INFO [train.py:901] (2/4) Epoch 16, batch 7950, loss[loss=0.2358, simple_loss=0.3149, pruned_loss=0.07831, over 8800.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2951, pruned_loss=0.06754, over 1604341.66 frames. ], batch size: 40, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:42,916 INFO [train.py:901] (2/4) Epoch 16, batch 8000, loss[loss=0.2289, simple_loss=0.3117, pruned_loss=0.07307, over 8766.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2957, pruned_loss=0.06742, over 1611966.79 frames. ], batch size: 30, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:47,178 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129252.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:07:48,169 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.39 vs. limit=5.0 +2023-02-06 19:07:51,183 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:59,068 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.511e+02 2.964e+02 3.601e+02 8.820e+02, threshold=5.927e+02, percent-clipped=6.0 +2023-02-06 19:08:16,586 INFO [train.py:901] (2/4) Epoch 16, batch 8050, loss[loss=0.1991, simple_loss=0.2743, pruned_loss=0.06192, over 7554.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2946, pruned_loss=0.06837, over 1595521.39 frames. ], batch size: 18, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:08:52,753 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 19:08:56,567 INFO [train.py:901] (2/4) Epoch 17, batch 0, loss[loss=0.2165, simple_loss=0.3116, pruned_loss=0.06071, over 8180.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.3116, pruned_loss=0.06071, over 8180.00 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:08:56,567 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 19:09:04,438 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5238, 1.7924, 2.6773, 1.3606, 1.9870, 1.7836, 1.6368, 1.9044], + device='cuda:2'), covar=tensor([0.1694, 0.2352, 0.0741, 0.4194, 0.1743, 0.3058, 0.2083, 0.2293], + device='cuda:2'), in_proj_covar=tensor([0.0504, 0.0561, 0.0543, 0.0612, 0.0631, 0.0570, 0.0501, 0.0620], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:09:07,561 INFO [train.py:935] (2/4) Epoch 17, validation: loss=0.1792, simple_loss=0.2794, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 19:09:07,562 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 19:09:19,459 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 19:09:21,129 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 19:09:29,750 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2338, 1.9814, 2.6159, 2.1498, 2.3910, 2.1583, 1.9561, 1.3298], + device='cuda:2'), covar=tensor([0.4450, 0.4228, 0.1530, 0.2919, 0.2149, 0.2705, 0.1728, 0.4719], + device='cuda:2'), in_proj_covar=tensor([0.0906, 0.0918, 0.0758, 0.0892, 0.0954, 0.0845, 0.0719, 0.0792], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:09:33,856 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129367.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:09:35,633 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.551e+02 3.127e+02 3.678e+02 8.568e+02, threshold=6.254e+02, percent-clipped=4.0 +2023-02-06 19:09:41,816 INFO [train.py:901] (2/4) Epoch 17, batch 50, loss[loss=0.2059, simple_loss=0.2848, pruned_loss=0.0635, over 8454.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.3006, pruned_loss=0.07051, over 367479.17 frames. ], batch size: 27, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:09:54,006 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 19:10:18,436 INFO [train.py:901] (2/4) Epoch 17, batch 100, loss[loss=0.2165, simple_loss=0.2958, pruned_loss=0.0686, over 8492.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2988, pruned_loss=0.06967, over 646979.90 frames. ], batch size: 28, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:10:18,444 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 19:10:19,953 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129431.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:10:23,384 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5297, 1.7543, 2.6111, 1.4234, 1.9028, 1.9357, 1.5470, 1.9446], + device='cuda:2'), covar=tensor([0.1837, 0.2518, 0.0888, 0.4182, 0.1832, 0.2963, 0.2270, 0.2138], + device='cuda:2'), in_proj_covar=tensor([0.0507, 0.0563, 0.0546, 0.0613, 0.0633, 0.0573, 0.0505, 0.0621], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:10:46,024 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.489e+02 3.062e+02 3.657e+02 7.822e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 19:10:52,181 INFO [train.py:901] (2/4) Epoch 17, batch 150, loss[loss=0.2182, simple_loss=0.296, pruned_loss=0.07022, over 8347.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2995, pruned_loss=0.06975, over 863003.91 frames. ], batch size: 24, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:18,263 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:29,025 INFO [train.py:901] (2/4) Epoch 17, batch 200, loss[loss=0.228, simple_loss=0.2986, pruned_loss=0.07876, over 8498.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2987, pruned_loss=0.06876, over 1034679.06 frames. ], batch size: 28, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:36,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:57,077 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.455e+02 2.902e+02 3.926e+02 7.649e+02, threshold=5.804e+02, percent-clipped=5.0 +2023-02-06 19:12:03,433 INFO [train.py:901] (2/4) Epoch 17, batch 250, loss[loss=0.2239, simple_loss=0.3017, pruned_loss=0.07308, over 8311.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.298, pruned_loss=0.06851, over 1167431.71 frames. ], batch size: 49, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:09,653 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 19:12:11,839 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4871, 2.7357, 1.8101, 2.2300, 2.2070, 1.6109, 2.1074, 2.2681], + device='cuda:2'), covar=tensor([0.1521, 0.0340, 0.1139, 0.0634, 0.0745, 0.1410, 0.0959, 0.0866], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0236, 0.0330, 0.0303, 0.0301, 0.0337, 0.0344, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:12:18,387 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 19:12:33,734 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129623.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:12:38,236 INFO [train.py:901] (2/4) Epoch 17, batch 300, loss[loss=0.2228, simple_loss=0.3046, pruned_loss=0.07045, over 8518.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.298, pruned_loss=0.06827, over 1269824.61 frames. ], batch size: 31, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:39,078 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:12:53,545 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129648.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:13:08,148 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.453e+02 3.064e+02 3.747e+02 1.027e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-06 19:13:14,344 INFO [train.py:901] (2/4) Epoch 17, batch 350, loss[loss=0.2268, simple_loss=0.2994, pruned_loss=0.07705, over 8203.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06826, over 1345144.53 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:13:47,840 INFO [train.py:901] (2/4) Epoch 17, batch 400, loss[loss=0.2439, simple_loss=0.3152, pruned_loss=0.0863, over 8285.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06822, over 1405011.40 frames. ], batch size: 23, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:18,001 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.355e+02 2.898e+02 3.830e+02 8.224e+02, threshold=5.797e+02, percent-clipped=7.0 +2023-02-06 19:14:21,454 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129775.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:14:24,071 INFO [train.py:901] (2/4) Epoch 17, batch 450, loss[loss=0.1814, simple_loss=0.2616, pruned_loss=0.05055, over 8086.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2974, pruned_loss=0.0689, over 1453156.63 frames. ], batch size: 21, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:34,957 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3020, 2.0130, 2.8393, 2.3303, 2.6695, 2.2548, 2.0244, 1.4054], + device='cuda:2'), covar=tensor([0.4505, 0.4366, 0.1450, 0.3196, 0.2250, 0.2774, 0.1884, 0.4849], + device='cuda:2'), in_proj_covar=tensor([0.0911, 0.0922, 0.0763, 0.0897, 0.0956, 0.0848, 0.0721, 0.0796], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:14:44,505 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:14:58,027 INFO [train.py:901] (2/4) Epoch 17, batch 500, loss[loss=0.212, simple_loss=0.2971, pruned_loss=0.06349, over 8244.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2958, pruned_loss=0.06834, over 1488556.74 frames. ], batch size: 24, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:03,870 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 19:15:05,062 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8607, 1.8806, 2.3881, 1.5499, 1.2914, 2.3768, 0.3375, 1.4206], + device='cuda:2'), covar=tensor([0.2135, 0.1430, 0.0417, 0.1579, 0.3568, 0.0434, 0.2730, 0.1668], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0186, 0.0115, 0.0218, 0.0266, 0.0121, 0.0166, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:15:28,005 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.377e+02 2.910e+02 3.862e+02 1.132e+03, threshold=5.820e+02, percent-clipped=8.0 +2023-02-06 19:15:35,661 INFO [train.py:901] (2/4) Epoch 17, batch 550, loss[loss=0.2626, simple_loss=0.3218, pruned_loss=0.1016, over 7805.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2971, pruned_loss=0.06908, over 1518420.20 frames. ], batch size: 20, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:43,354 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129890.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:15:47,845 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 19:16:10,112 INFO [train.py:901] (2/4) Epoch 17, batch 600, loss[loss=0.1844, simple_loss=0.2577, pruned_loss=0.05553, over 7788.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2968, pruned_loss=0.069, over 1538713.00 frames. ], batch size: 19, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:16:19,712 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 19:16:38,512 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 2.576e+02 2.936e+02 3.639e+02 7.352e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-06 19:16:41,365 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:16:44,753 INFO [train.py:901] (2/4) Epoch 17, batch 650, loss[loss=0.2431, simple_loss=0.3152, pruned_loss=0.08552, over 8024.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2955, pruned_loss=0.06816, over 1551801.47 frames. ], batch size: 22, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:05,620 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1094, 2.1593, 1.6092, 1.8750, 1.7330, 1.3720, 1.5782, 1.7126], + device='cuda:2'), covar=tensor([0.1353, 0.0343, 0.1171, 0.0496, 0.0715, 0.1449, 0.0981, 0.0842], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0233, 0.0328, 0.0301, 0.0300, 0.0332, 0.0342, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:17:16,415 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:17,844 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:23,793 INFO [train.py:901] (2/4) Epoch 17, batch 700, loss[loss=0.2408, simple_loss=0.3041, pruned_loss=0.08873, over 7978.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2953, pruned_loss=0.06801, over 1564690.61 frames. ], batch size: 21, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:24,267 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 19:17:51,871 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.350e+02 2.811e+02 3.683e+02 1.098e+03, threshold=5.622e+02, percent-clipped=6.0 +2023-02-06 19:17:58,281 INFO [train.py:901] (2/4) Epoch 17, batch 750, loss[loss=0.1764, simple_loss=0.2497, pruned_loss=0.05152, over 7428.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2949, pruned_loss=0.06777, over 1572520.69 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:05,544 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:18:08,226 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 19:18:10,422 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8639, 1.4196, 1.6993, 1.2463, 0.9335, 1.3996, 1.7655, 1.3798], + device='cuda:2'), covar=tensor([0.0515, 0.1279, 0.1591, 0.1481, 0.0603, 0.1538, 0.0680, 0.0662], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 19:18:19,420 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 19:18:30,498 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7927, 4.7156, 4.2114, 2.0995, 4.2247, 4.3979, 4.3595, 4.1271], + device='cuda:2'), covar=tensor([0.0577, 0.0502, 0.1003, 0.4657, 0.0827, 0.0909, 0.1248, 0.0651], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0416, 0.0419, 0.0513, 0.0408, 0.0417, 0.0405, 0.0359], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:18:36,027 INFO [train.py:901] (2/4) Epoch 17, batch 800, loss[loss=0.2071, simple_loss=0.2901, pruned_loss=0.06199, over 8294.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2947, pruned_loss=0.06778, over 1582719.67 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:41,743 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1797, 1.0835, 1.2653, 1.1209, 0.9250, 1.2933, 0.0798, 1.0481], + device='cuda:2'), covar=tensor([0.1868, 0.1311, 0.0511, 0.0987, 0.3075, 0.0591, 0.2277, 0.1489], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0186, 0.0116, 0.0218, 0.0265, 0.0122, 0.0167, 0.0181], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:18:45,902 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1511, 1.5796, 4.3496, 1.6139, 3.8522, 3.6661, 4.0106, 3.8587], + device='cuda:2'), covar=tensor([0.0562, 0.3965, 0.0505, 0.3634, 0.1110, 0.0869, 0.0543, 0.0605], + device='cuda:2'), in_proj_covar=tensor([0.0562, 0.0621, 0.0643, 0.0589, 0.0671, 0.0575, 0.0566, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:18:48,062 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130146.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:18:52,763 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:19:04,223 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.363e+02 2.676e+02 3.408e+02 8.560e+02, threshold=5.353e+02, percent-clipped=3.0 +2023-02-06 19:19:05,144 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:19:10,475 INFO [train.py:901] (2/4) Epoch 17, batch 850, loss[loss=0.2071, simple_loss=0.2916, pruned_loss=0.06127, over 8438.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2961, pruned_loss=0.0681, over 1590834.25 frames. ], batch size: 29, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:19:47,569 INFO [train.py:901] (2/4) Epoch 17, batch 900, loss[loss=0.2322, simple_loss=0.3146, pruned_loss=0.07492, over 8250.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2962, pruned_loss=0.06786, over 1600212.91 frames. ], batch size: 24, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:12,542 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3979, 1.5000, 4.4813, 2.0039, 2.5520, 5.0946, 5.1065, 4.3492], + device='cuda:2'), covar=tensor([0.1033, 0.1804, 0.0258, 0.1931, 0.1086, 0.0142, 0.0293, 0.0551], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0309, 0.0275, 0.0303, 0.0295, 0.0254, 0.0389, 0.0299], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 19:20:15,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:20:16,505 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.489e+02 3.023e+02 3.878e+02 8.176e+02, threshold=6.045e+02, percent-clipped=7.0 +2023-02-06 19:20:22,802 INFO [train.py:901] (2/4) Epoch 17, batch 950, loss[loss=0.1859, simple_loss=0.2597, pruned_loss=0.056, over 7257.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2975, pruned_loss=0.06876, over 1604749.44 frames. ], batch size: 16, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:43,392 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 19:20:57,181 INFO [train.py:901] (2/4) Epoch 17, batch 1000, loss[loss=0.2244, simple_loss=0.307, pruned_loss=0.07096, over 8332.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2973, pruned_loss=0.0686, over 1605554.09 frames. ], batch size: 25, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:21:04,779 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3528, 4.3178, 3.8506, 2.1350, 3.8153, 3.9580, 3.9532, 3.6679], + device='cuda:2'), covar=tensor([0.0763, 0.0552, 0.1073, 0.4546, 0.0871, 0.1049, 0.1119, 0.0883], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0412, 0.0415, 0.0510, 0.0405, 0.0413, 0.0401, 0.0356], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:21:09,239 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:15,351 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8106, 5.9404, 5.0850, 2.3867, 5.2303, 5.6322, 5.5489, 5.3494], + device='cuda:2'), covar=tensor([0.0647, 0.0407, 0.0914, 0.4513, 0.0732, 0.0721, 0.1139, 0.0587], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0413, 0.0417, 0.0511, 0.0407, 0.0415, 0.0402, 0.0357], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:21:20,026 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 19:21:21,980 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:23,330 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:26,305 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6038, 1.7218, 2.0892, 1.4726, 1.2597, 2.1006, 0.2428, 1.3720], + device='cuda:2'), covar=tensor([0.2263, 0.1317, 0.0434, 0.1401, 0.3204, 0.0530, 0.2560, 0.1288], + device='cuda:2'), in_proj_covar=tensor([0.0177, 0.0184, 0.0115, 0.0216, 0.0263, 0.0120, 0.0167, 0.0178], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:21:27,490 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.680e+02 3.059e+02 3.924e+02 8.380e+02, threshold=6.118e+02, percent-clipped=2.0 +2023-02-06 19:21:27,757 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:33,156 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 19:21:33,840 INFO [train.py:901] (2/4) Epoch 17, batch 1050, loss[loss=0.2015, simple_loss=0.3022, pruned_loss=0.05042, over 8104.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2968, pruned_loss=0.06784, over 1609125.28 frames. ], batch size: 23, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:21:49,934 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:08,449 INFO [train.py:901] (2/4) Epoch 17, batch 1100, loss[loss=0.2027, simple_loss=0.2756, pruned_loss=0.06494, over 7246.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2957, pruned_loss=0.06757, over 1609953.44 frames. ], batch size: 16, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:23,041 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:27,218 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:35,459 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0026, 3.6174, 2.3013, 2.7764, 2.6859, 2.1588, 2.7130, 2.9451], + device='cuda:2'), covar=tensor([0.1650, 0.0314, 0.0993, 0.0732, 0.0766, 0.1239, 0.0998, 0.0944], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0231, 0.0325, 0.0298, 0.0297, 0.0328, 0.0339, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:22:38,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.545e+02 2.978e+02 3.676e+02 6.168e+02, threshold=5.956e+02, percent-clipped=1.0 +2023-02-06 19:22:44,137 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,337 INFO [train.py:901] (2/4) Epoch 17, batch 1150, loss[loss=0.2094, simple_loss=0.3004, pruned_loss=0.05922, over 8488.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2946, pruned_loss=0.06698, over 1613458.55 frames. ], batch size: 28, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:45,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,962 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 19:23:16,145 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:19,425 INFO [train.py:901] (2/4) Epoch 17, batch 1200, loss[loss=0.2263, simple_loss=0.3099, pruned_loss=0.07137, over 8460.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2949, pruned_loss=0.06726, over 1613898.25 frames. ], batch size: 27, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:26,513 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0761, 1.4413, 1.7269, 1.4035, 1.0027, 1.4727, 1.8880, 1.6823], + device='cuda:2'), covar=tensor([0.0527, 0.1192, 0.1657, 0.1360, 0.0609, 0.1477, 0.0652, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0163, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 19:23:33,401 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:45,147 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130566.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:47,782 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.314e+02 2.862e+02 3.617e+02 1.013e+03, threshold=5.724e+02, percent-clipped=2.0 +2023-02-06 19:23:53,885 INFO [train.py:901] (2/4) Epoch 17, batch 1250, loss[loss=0.2171, simple_loss=0.2819, pruned_loss=0.07613, over 7535.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2943, pruned_loss=0.06727, over 1607607.08 frames. ], batch size: 18, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:57,478 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:24:16,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-06 19:24:30,849 INFO [train.py:901] (2/4) Epoch 17, batch 1300, loss[loss=0.2848, simple_loss=0.349, pruned_loss=0.1103, over 8425.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2948, pruned_loss=0.06768, over 1612541.39 frames. ], batch size: 49, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:24:59,337 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.380e+02 3.126e+02 3.675e+02 7.509e+02, threshold=6.253e+02, percent-clipped=2.0 +2023-02-06 19:25:05,686 INFO [train.py:901] (2/4) Epoch 17, batch 1350, loss[loss=0.2133, simple_loss=0.2957, pruned_loss=0.06541, over 8488.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2944, pruned_loss=0.06726, over 1615319.89 frames. ], batch size: 29, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:16,157 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6667, 1.9699, 3.4774, 1.4069, 2.5530, 2.2340, 1.7072, 2.5651], + device='cuda:2'), covar=tensor([0.1816, 0.2345, 0.0771, 0.4231, 0.1701, 0.2752, 0.2062, 0.2112], + device='cuda:2'), in_proj_covar=tensor([0.0509, 0.0564, 0.0545, 0.0613, 0.0635, 0.0571, 0.0506, 0.0619], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:25:29,182 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2744, 1.8774, 2.4513, 2.0321, 2.2822, 2.2666, 2.0167, 1.1389], + device='cuda:2'), covar=tensor([0.4738, 0.4287, 0.1664, 0.3255, 0.2290, 0.2571, 0.1804, 0.4693], + device='cuda:2'), in_proj_covar=tensor([0.0921, 0.0930, 0.0780, 0.0905, 0.0967, 0.0856, 0.0729, 0.0804], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:25:43,052 INFO [train.py:901] (2/4) Epoch 17, batch 1400, loss[loss=0.2359, simple_loss=0.3166, pruned_loss=0.07764, over 7926.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06723, over 1617961.89 frames. ], batch size: 20, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:46,010 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:47,361 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:54,825 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:03,100 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:04,418 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:05,097 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6107, 2.0709, 4.2282, 1.4324, 2.9580, 2.2974, 1.6932, 2.8012], + device='cuda:2'), covar=tensor([0.1960, 0.2683, 0.0905, 0.4525, 0.1781, 0.3093, 0.2244, 0.2494], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0569, 0.0550, 0.0618, 0.0639, 0.0576, 0.0510, 0.0624], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:26:11,026 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.607e+02 3.260e+02 4.191e+02 1.113e+03, threshold=6.520e+02, percent-clipped=3.0 +2023-02-06 19:26:17,373 INFO [train.py:901] (2/4) Epoch 17, batch 1450, loss[loss=0.2362, simple_loss=0.3028, pruned_loss=0.08485, over 8125.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2945, pruned_loss=0.06685, over 1622143.63 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:26:20,756 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 19:26:27,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:32,213 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:49,003 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0535, 1.5825, 1.3333, 1.5695, 1.4134, 1.1661, 1.2274, 1.3417], + device='cuda:2'), covar=tensor([0.1126, 0.0443, 0.1257, 0.0518, 0.0681, 0.1505, 0.0923, 0.0727], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0232, 0.0324, 0.0299, 0.0297, 0.0328, 0.0339, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:26:54,258 INFO [train.py:901] (2/4) Epoch 17, batch 1500, loss[loss=0.2521, simple_loss=0.3326, pruned_loss=0.08583, over 8448.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2956, pruned_loss=0.06745, over 1619566.67 frames. ], batch size: 27, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:17,137 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:22,935 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.370e+02 2.974e+02 3.638e+02 1.375e+03, threshold=5.949e+02, percent-clipped=1.0 +2023-02-06 19:27:29,127 INFO [train.py:901] (2/4) Epoch 17, batch 1550, loss[loss=0.2414, simple_loss=0.3293, pruned_loss=0.07671, over 8241.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2948, pruned_loss=0.06691, over 1620228.62 frames. ], batch size: 24, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:50,135 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:50,703 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:54,329 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130915.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:02,604 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:03,818 INFO [train.py:901] (2/4) Epoch 17, batch 1600, loss[loss=0.245, simple_loss=0.3224, pruned_loss=0.08375, over 8467.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2949, pruned_loss=0.06692, over 1620839.90 frames. ], batch size: 27, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:28:09,165 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 19:28:34,755 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.345e+02 2.992e+02 3.546e+02 8.486e+02, threshold=5.983e+02, percent-clipped=5.0 +2023-02-06 19:28:40,942 INFO [train.py:901] (2/4) Epoch 17, batch 1650, loss[loss=0.2169, simple_loss=0.2911, pruned_loss=0.07135, over 7807.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2944, pruned_loss=0.06681, over 1618689.91 frames. ], batch size: 20, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:10,874 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 19:29:13,496 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:16,141 INFO [train.py:901] (2/4) Epoch 17, batch 1700, loss[loss=0.2294, simple_loss=0.3159, pruned_loss=0.07143, over 8496.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2944, pruned_loss=0.06725, over 1616411.90 frames. ], batch size: 28, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:25,391 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:46,947 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.451e+02 3.155e+02 3.823e+02 7.811e+02, threshold=6.311e+02, percent-clipped=3.0 +2023-02-06 19:29:53,064 INFO [train.py:901] (2/4) Epoch 17, batch 1750, loss[loss=0.2346, simple_loss=0.3003, pruned_loss=0.08448, over 7433.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2934, pruned_loss=0.06712, over 1613159.90 frames. ], batch size: 17, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:30:19,622 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131117.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:23,362 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-06 19:30:27,911 INFO [train.py:901] (2/4) Epoch 17, batch 1800, loss[loss=0.2196, simple_loss=0.2966, pruned_loss=0.07135, over 7655.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2932, pruned_loss=0.06707, over 1612034.32 frames. ], batch size: 19, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:30:30,799 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5038, 2.0584, 3.3673, 1.3368, 2.4974, 2.0421, 1.6654, 2.5696], + device='cuda:2'), covar=tensor([0.2025, 0.2808, 0.0852, 0.4595, 0.1776, 0.3225, 0.2332, 0.2243], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0564, 0.0545, 0.0611, 0.0632, 0.0570, 0.0506, 0.0617], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:30:37,107 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:44,376 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5936, 1.8430, 2.8119, 1.4784, 1.8854, 2.0522, 1.6736, 1.9685], + device='cuda:2'), covar=tensor([0.1543, 0.2102, 0.0724, 0.3630, 0.1635, 0.2487, 0.1788, 0.1864], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0564, 0.0544, 0.0612, 0.0633, 0.0570, 0.0507, 0.0618], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:30:52,690 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:55,953 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.745e+02 3.356e+02 4.683e+02 1.105e+03, threshold=6.712e+02, percent-clipped=11.0 +2023-02-06 19:30:56,917 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:02,079 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.38 vs. limit=5.0 +2023-02-06 19:31:03,694 INFO [train.py:901] (2/4) Epoch 17, batch 1850, loss[loss=0.2336, simple_loss=0.3089, pruned_loss=0.07917, over 8286.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2929, pruned_loss=0.0676, over 1606444.19 frames. ], batch size: 23, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:31:12,488 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:13,835 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:16,712 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:39,965 INFO [train.py:901] (2/4) Epoch 17, batch 1900, loss[loss=0.2267, simple_loss=0.3005, pruned_loss=0.07642, over 8069.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.293, pruned_loss=0.0676, over 1603327.34 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:08,103 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.313e+02 2.955e+02 3.582e+02 5.685e+02, threshold=5.910e+02, percent-clipped=0.0 +2023-02-06 19:32:08,132 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 19:32:14,118 INFO [train.py:901] (2/4) Epoch 17, batch 1950, loss[loss=0.2282, simple_loss=0.3041, pruned_loss=0.07612, over 8589.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2946, pruned_loss=0.068, over 1610992.08 frames. ], batch size: 31, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:15,759 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:19,635 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 19:32:26,415 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 19:32:28,924 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:35,146 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:39,920 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 19:32:46,202 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6693, 2.1927, 4.0623, 1.4427, 2.8461, 2.2057, 1.6265, 2.7087], + device='cuda:2'), covar=tensor([0.1836, 0.2462, 0.0725, 0.4230, 0.1799, 0.3049, 0.2257, 0.2456], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0568, 0.0547, 0.0617, 0.0638, 0.0574, 0.0511, 0.0623], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:32:47,522 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:51,198 INFO [train.py:901] (2/4) Epoch 17, batch 2000, loss[loss=0.2133, simple_loss=0.3105, pruned_loss=0.05803, over 8250.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2961, pruned_loss=0.06918, over 1614047.84 frames. ], batch size: 24, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:58,450 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.09 vs. limit=5.0 +2023-02-06 19:33:19,856 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.510e+02 3.128e+02 3.622e+02 6.098e+02, threshold=6.257e+02, percent-clipped=1.0 +2023-02-06 19:33:25,354 INFO [train.py:901] (2/4) Epoch 17, batch 2050, loss[loss=0.1959, simple_loss=0.2829, pruned_loss=0.05443, over 7931.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2964, pruned_loss=0.06868, over 1619109.65 frames. ], batch size: 20, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:33:41,634 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.19 vs. limit=5.0 +2023-02-06 19:34:00,670 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:01,969 INFO [train.py:901] (2/4) Epoch 17, batch 2100, loss[loss=0.2007, simple_loss=0.2782, pruned_loss=0.06159, over 8034.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.06852, over 1615957.69 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:06,123 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:28,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5644, 1.5277, 1.8769, 1.4218, 1.1902, 1.8592, 0.7787, 1.4866], + device='cuda:2'), covar=tensor([0.1824, 0.1019, 0.0390, 0.1223, 0.2709, 0.0519, 0.2066, 0.1427], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0183, 0.0115, 0.0216, 0.0261, 0.0122, 0.0166, 0.0179], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:34:31,417 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.457e+02 2.884e+02 3.530e+02 8.686e+02, threshold=5.767e+02, percent-clipped=1.0 +2023-02-06 19:34:36,976 INFO [train.py:901] (2/4) Epoch 17, batch 2150, loss[loss=0.1994, simple_loss=0.2817, pruned_loss=0.0586, over 8145.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2965, pruned_loss=0.06909, over 1614583.63 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:58,678 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:12,358 INFO [train.py:901] (2/4) Epoch 17, batch 2200, loss[loss=0.2294, simple_loss=0.312, pruned_loss=0.07338, over 8759.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2968, pruned_loss=0.06852, over 1619781.64 frames. ], batch size: 40, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:35:17,212 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:43,520 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.550e+02 3.248e+02 4.465e+02 1.208e+03, threshold=6.496e+02, percent-clipped=6.0 +2023-02-06 19:35:45,151 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0254, 2.5085, 3.5838, 2.0761, 1.9516, 3.4237, 0.8253, 2.0793], + device='cuda:2'), covar=tensor([0.1564, 0.1476, 0.0336, 0.1958, 0.3309, 0.0703, 0.2670, 0.1792], + device='cuda:2'), in_proj_covar=tensor([0.0180, 0.0184, 0.0115, 0.0218, 0.0263, 0.0122, 0.0168, 0.0180], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:35:49,218 INFO [train.py:901] (2/4) Epoch 17, batch 2250, loss[loss=0.2002, simple_loss=0.2865, pruned_loss=0.05694, over 8681.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2963, pruned_loss=0.06799, over 1620768.58 frames. ], batch size: 39, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:23,879 INFO [train.py:901] (2/4) Epoch 17, batch 2300, loss[loss=0.2119, simple_loss=0.3037, pruned_loss=0.0601, over 8494.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2963, pruned_loss=0.06784, over 1620139.34 frames. ], batch size: 29, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:40,758 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:36:47,781 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7785, 4.7825, 4.2495, 2.2709, 4.1972, 4.4240, 4.2517, 4.1008], + device='cuda:2'), covar=tensor([0.0643, 0.0511, 0.1045, 0.4133, 0.0897, 0.0758, 0.1378, 0.0717], + device='cuda:2'), in_proj_covar=tensor([0.0500, 0.0411, 0.0415, 0.0511, 0.0403, 0.0411, 0.0398, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:36:55,870 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 2.553e+02 3.001e+02 3.824e+02 6.268e+02, threshold=6.003e+02, percent-clipped=0.0 +2023-02-06 19:37:01,525 INFO [train.py:901] (2/4) Epoch 17, batch 2350, loss[loss=0.2228, simple_loss=0.3016, pruned_loss=0.07205, over 8441.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2984, pruned_loss=0.06931, over 1621838.65 frames. ], batch size: 27, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:35,924 INFO [train.py:901] (2/4) Epoch 17, batch 2400, loss[loss=0.1932, simple_loss=0.2757, pruned_loss=0.05538, over 8137.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2981, pruned_loss=0.06929, over 1622072.84 frames. ], batch size: 22, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:06,374 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.467e+02 3.155e+02 3.892e+02 8.269e+02, threshold=6.310e+02, percent-clipped=4.0 +2023-02-06 19:38:06,494 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,216 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,839 INFO [train.py:901] (2/4) Epoch 17, batch 2450, loss[loss=0.2099, simple_loss=0.2838, pruned_loss=0.068, over 8196.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2971, pruned_loss=0.06896, over 1619483.76 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:47,885 INFO [train.py:901] (2/4) Epoch 17, batch 2500, loss[loss=0.2207, simple_loss=0.309, pruned_loss=0.0662, over 8292.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2971, pruned_loss=0.06867, over 1619994.59 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:39:05,481 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:17,124 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.481e+02 2.929e+02 3.320e+02 7.417e+02, threshold=5.858e+02, percent-clipped=2.0 +2023-02-06 19:39:20,114 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:22,845 INFO [train.py:901] (2/4) Epoch 17, batch 2550, loss[loss=0.1913, simple_loss=0.2751, pruned_loss=0.05375, over 8252.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2967, pruned_loss=0.06858, over 1619068.94 frames. ], batch size: 22, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:39:29,619 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:34,616 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:35,580 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-06 19:39:45,578 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:00,838 INFO [train.py:901] (2/4) Epoch 17, batch 2600, loss[loss=0.2253, simple_loss=0.2986, pruned_loss=0.07603, over 8084.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2962, pruned_loss=0.06834, over 1618344.59 frames. ], batch size: 21, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:03,026 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:05,735 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8223, 2.1424, 2.3221, 1.3352, 2.3957, 1.6695, 0.7403, 2.0326], + device='cuda:2'), covar=tensor([0.0510, 0.0268, 0.0180, 0.0481, 0.0277, 0.0683, 0.0721, 0.0259], + device='cuda:2'), in_proj_covar=tensor([0.0427, 0.0368, 0.0315, 0.0424, 0.0351, 0.0510, 0.0375, 0.0391], + device='cuda:2'), out_proj_covar=tensor([1.1662e-04, 9.8122e-05, 8.3504e-05, 1.1353e-04, 9.4333e-05, 1.4711e-04, + 1.0244e-04, 1.0512e-04], device='cuda:2') +2023-02-06 19:40:25,954 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1348, 1.5307, 1.2085, 2.3631, 1.1078, 1.1065, 1.6723, 1.6883], + device='cuda:2'), covar=tensor([0.1687, 0.1316, 0.2096, 0.0463, 0.1350, 0.2232, 0.0949, 0.1004], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0198, 0.0246, 0.0211, 0.0207, 0.0244, 0.0251, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 19:40:28,790 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:29,953 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.887e+02 3.716e+02 6.826e+02, threshold=5.774e+02, percent-clipped=1.0 +2023-02-06 19:40:32,198 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8046, 1.6367, 1.8437, 1.6321, 0.8057, 1.5311, 2.0259, 1.9265], + device='cuda:2'), covar=tensor([0.0442, 0.1228, 0.1547, 0.1344, 0.0625, 0.1525, 0.0640, 0.0579], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 19:40:35,446 INFO [train.py:901] (2/4) Epoch 17, batch 2650, loss[loss=0.221, simple_loss=0.3047, pruned_loss=0.06871, over 8335.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2942, pruned_loss=0.06743, over 1614045.52 frames. ], batch size: 25, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:13,492 INFO [train.py:901] (2/4) Epoch 17, batch 2700, loss[loss=0.2253, simple_loss=0.2974, pruned_loss=0.07663, over 8321.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2945, pruned_loss=0.06769, over 1616215.93 frames. ], batch size: 26, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:17,451 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-06 19:41:27,379 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132049.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:42,494 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.450e+02 3.248e+02 4.102e+02 1.137e+03, threshold=6.496e+02, percent-clipped=12.0 +2023-02-06 19:41:43,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:48,254 INFO [train.py:901] (2/4) Epoch 17, batch 2750, loss[loss=0.2544, simple_loss=0.3146, pruned_loss=0.09715, over 7205.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06762, over 1616575.39 frames. ], batch size: 72, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:42:25,031 INFO [train.py:901] (2/4) Epoch 17, batch 2800, loss[loss=0.1877, simple_loss=0.2549, pruned_loss=0.0602, over 7542.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2952, pruned_loss=0.06853, over 1614065.83 frames. ], batch size: 18, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:42:35,386 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:40,211 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:50,606 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9741, 1.7122, 3.0973, 1.5167, 2.3228, 3.3746, 3.4604, 2.8838], + device='cuda:2'), covar=tensor([0.1082, 0.1532, 0.0363, 0.1935, 0.0924, 0.0255, 0.0574, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0309, 0.0275, 0.0303, 0.0294, 0.0253, 0.0388, 0.0299], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 19:42:52,683 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:55,265 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.217e+02 2.865e+02 3.623e+02 1.020e+03, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 19:42:57,334 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:00,626 INFO [train.py:901] (2/4) Epoch 17, batch 2850, loss[loss=0.2366, simple_loss=0.3168, pruned_loss=0.07821, over 8451.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2966, pruned_loss=0.06885, over 1618708.78 frames. ], batch size: 27, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:12,453 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9735, 1.6179, 1.3983, 1.5371, 1.3517, 1.2516, 1.2544, 1.3009], + device='cuda:2'), covar=tensor([0.1114, 0.0449, 0.1251, 0.0562, 0.0734, 0.1379, 0.0858, 0.0755], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0232, 0.0323, 0.0298, 0.0297, 0.0326, 0.0337, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:43:29,199 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:33,409 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:35,753 INFO [train.py:901] (2/4) Epoch 17, batch 2900, loss[loss=0.2317, simple_loss=0.2985, pruned_loss=0.08241, over 7781.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2981, pruned_loss=0.06973, over 1619520.64 frames. ], batch size: 19, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:37,428 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9093, 2.1167, 1.8287, 2.6257, 1.2398, 1.5898, 1.8506, 1.9605], + device='cuda:2'), covar=tensor([0.0755, 0.0787, 0.1025, 0.0408, 0.1150, 0.1374, 0.0860, 0.0892], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0198, 0.0246, 0.0210, 0.0209, 0.0244, 0.0252, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 19:43:52,933 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:44:08,372 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.407e+02 2.887e+02 3.454e+02 7.005e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 19:44:09,829 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 19:44:13,738 INFO [train.py:901] (2/4) Epoch 17, batch 2950, loss[loss=0.1961, simple_loss=0.2669, pruned_loss=0.06265, over 7544.00 frames. ], tot_loss[loss=0.218, simple_loss=0.297, pruned_loss=0.06953, over 1615647.24 frames. ], batch size: 18, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:20,352 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.75 vs. limit=5.0 +2023-02-06 19:44:48,310 INFO [train.py:901] (2/4) Epoch 17, batch 3000, loss[loss=0.2445, simple_loss=0.3188, pruned_loss=0.08507, over 8189.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.298, pruned_loss=0.06941, over 1621541.23 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:48,310 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 19:45:00,594 INFO [train.py:935] (2/4) Epoch 17, validation: loss=0.1786, simple_loss=0.2786, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 19:45:00,595 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 19:45:04,454 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:45:31,446 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.492e+02 3.005e+02 3.786e+02 8.313e+02, threshold=6.010e+02, percent-clipped=11.0 +2023-02-06 19:45:37,096 INFO [train.py:901] (2/4) Epoch 17, batch 3050, loss[loss=0.2059, simple_loss=0.2884, pruned_loss=0.06167, over 8289.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2974, pruned_loss=0.06897, over 1619418.20 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:45:48,261 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:04,204 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:12,935 INFO [train.py:901] (2/4) Epoch 17, batch 3100, loss[loss=0.2172, simple_loss=0.2975, pruned_loss=0.06844, over 8239.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2984, pruned_loss=0.06912, over 1622608.29 frames. ], batch size: 22, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:13,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1849, 3.1029, 2.8447, 1.5846, 2.7547, 2.9655, 2.8661, 2.7694], + device='cuda:2'), covar=tensor([0.1364, 0.0930, 0.1496, 0.5592, 0.1288, 0.1278, 0.1824, 0.1223], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0411, 0.0414, 0.0510, 0.0401, 0.0413, 0.0399, 0.0357], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:46:41,883 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.340e+02 2.843e+02 3.195e+02 7.960e+02, threshold=5.685e+02, percent-clipped=6.0 +2023-02-06 19:46:46,902 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1830, 2.1018, 1.6635, 1.9478, 1.7856, 1.4495, 1.6702, 1.7190], + device='cuda:2'), covar=tensor([0.1155, 0.0346, 0.1075, 0.0473, 0.0621, 0.1290, 0.0764, 0.0763], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0233, 0.0324, 0.0300, 0.0297, 0.0328, 0.0339, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:46:47,324 INFO [train.py:901] (2/4) Epoch 17, batch 3150, loss[loss=0.2265, simple_loss=0.2943, pruned_loss=0.07938, over 7974.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2975, pruned_loss=0.06904, over 1621022.04 frames. ], batch size: 21, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:59,436 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 19:47:09,744 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:24,999 INFO [train.py:901] (2/4) Epoch 17, batch 3200, loss[loss=0.26, simple_loss=0.3255, pruned_loss=0.09724, over 7210.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2976, pruned_loss=0.0689, over 1621298.99 frames. ], batch size: 73, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:47:26,593 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:28,227 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 19:47:54,176 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.495e+02 3.112e+02 3.824e+02 1.248e+03, threshold=6.223e+02, percent-clipped=6.0 +2023-02-06 19:47:59,505 INFO [train.py:901] (2/4) Epoch 17, batch 3250, loss[loss=0.1895, simple_loss=0.2708, pruned_loss=0.05414, over 7252.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2966, pruned_loss=0.06842, over 1620738.14 frames. ], batch size: 16, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:48:03,307 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 19:48:07,386 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:26,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:37,501 INFO [train.py:901] (2/4) Epoch 17, batch 3300, loss[loss=0.196, simple_loss=0.2939, pruned_loss=0.04907, over 8246.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06786, over 1618385.95 frames. ], batch size: 24, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:06,785 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 2.532e+02 2.971e+02 3.744e+02 7.972e+02, threshold=5.942e+02, percent-clipped=3.0 +2023-02-06 19:49:11,862 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132678.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:49:12,441 INFO [train.py:901] (2/4) Epoch 17, batch 3350, loss[loss=0.2142, simple_loss=0.2998, pruned_loss=0.06428, over 8342.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2956, pruned_loss=0.06829, over 1615365.07 frames. ], batch size: 49, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:49,261 INFO [train.py:901] (2/4) Epoch 17, batch 3400, loss[loss=0.2068, simple_loss=0.2786, pruned_loss=0.06753, over 7538.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2954, pruned_loss=0.06843, over 1605423.83 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:14,689 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:14,782 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:19,434 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.442e+02 2.969e+02 4.012e+02 9.663e+02, threshold=5.937e+02, percent-clipped=5.0 +2023-02-06 19:50:24,933 INFO [train.py:901] (2/4) Epoch 17, batch 3450, loss[loss=0.1622, simple_loss=0.2423, pruned_loss=0.04105, over 7549.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2952, pruned_loss=0.06882, over 1607035.86 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:30,886 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:32,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:47,675 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:59,800 INFO [train.py:901] (2/4) Epoch 17, batch 3500, loss[loss=0.1897, simple_loss=0.2704, pruned_loss=0.05449, over 7223.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2956, pruned_loss=0.06838, over 1608186.04 frames. ], batch size: 16, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:51:13,850 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 19:51:31,533 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.401e+02 3.009e+02 3.970e+02 8.620e+02, threshold=6.019e+02, percent-clipped=6.0 +2023-02-06 19:51:33,854 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5049, 1.6404, 3.7619, 1.5950, 3.0687, 2.9050, 3.3979, 3.3150], + device='cuda:2'), covar=tensor([0.1382, 0.5926, 0.1333, 0.5085, 0.2380, 0.1898, 0.1114, 0.1297], + device='cuda:2'), in_proj_covar=tensor([0.0568, 0.0617, 0.0654, 0.0588, 0.0664, 0.0576, 0.0565, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:51:37,027 INFO [train.py:901] (2/4) Epoch 17, batch 3550, loss[loss=0.2687, simple_loss=0.3382, pruned_loss=0.09963, over 8645.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2959, pruned_loss=0.06844, over 1608920.18 frames. ], batch size: 34, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:11,138 INFO [train.py:901] (2/4) Epoch 17, batch 3600, loss[loss=0.2083, simple_loss=0.2825, pruned_loss=0.06706, over 7713.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2945, pruned_loss=0.06785, over 1604389.35 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:41,875 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.442e+02 2.775e+02 3.418e+02 6.006e+02, threshold=5.549e+02, percent-clipped=0.0 +2023-02-06 19:52:48,330 INFO [train.py:901] (2/4) Epoch 17, batch 3650, loss[loss=0.2038, simple_loss=0.2823, pruned_loss=0.06269, over 7658.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2954, pruned_loss=0.06815, over 1607533.45 frames. ], batch size: 19, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:55,313 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8325, 5.8950, 5.1253, 2.5865, 5.1243, 5.5481, 5.4653, 5.3726], + device='cuda:2'), covar=tensor([0.0470, 0.0410, 0.0936, 0.4161, 0.0749, 0.0740, 0.0982, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0503, 0.0412, 0.0418, 0.0513, 0.0407, 0.0415, 0.0400, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 19:53:18,535 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:53:21,773 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 19:53:23,086 INFO [train.py:901] (2/4) Epoch 17, batch 3700, loss[loss=0.2213, simple_loss=0.3028, pruned_loss=0.06986, over 8109.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.295, pruned_loss=0.06811, over 1608211.33 frames. ], batch size: 23, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:53,576 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.419e+02 3.081e+02 4.194e+02 7.364e+02, threshold=6.162e+02, percent-clipped=6.0 +2023-02-06 19:53:59,127 INFO [train.py:901] (2/4) Epoch 17, batch 3750, loss[loss=0.2041, simple_loss=0.2885, pruned_loss=0.05982, over 8335.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2947, pruned_loss=0.06779, over 1612281.43 frames. ], batch size: 25, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:21,508 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:54:30,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1776, 2.1026, 1.5837, 1.8999, 1.7555, 1.3880, 1.6928, 1.6097], + device='cuda:2'), covar=tensor([0.1229, 0.0346, 0.1138, 0.0499, 0.0660, 0.1350, 0.0818, 0.0870], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0232, 0.0325, 0.0301, 0.0299, 0.0330, 0.0341, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 19:54:35,507 INFO [train.py:901] (2/4) Epoch 17, batch 3800, loss[loss=0.2041, simple_loss=0.2725, pruned_loss=0.06785, over 7802.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.06822, over 1615319.94 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:41,302 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:04,560 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.594e+02 3.054e+02 3.718e+02 6.772e+02, threshold=6.108e+02, percent-clipped=5.0 +2023-02-06 19:55:09,947 INFO [train.py:901] (2/4) Epoch 17, batch 3850, loss[loss=0.2503, simple_loss=0.3137, pruned_loss=0.09346, over 8656.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.295, pruned_loss=0.06779, over 1612325.68 frames. ], batch size: 49, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:55:15,494 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7351, 1.5500, 3.9347, 1.4279, 3.4821, 3.2836, 3.5930, 3.4545], + device='cuda:2'), covar=tensor([0.0756, 0.4357, 0.0715, 0.4077, 0.1314, 0.1107, 0.0730, 0.0815], + device='cuda:2'), in_proj_covar=tensor([0.0577, 0.0626, 0.0666, 0.0596, 0.0674, 0.0586, 0.0572, 0.0640], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:55:31,132 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 19:55:42,999 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:46,893 INFO [train.py:901] (2/4) Epoch 17, batch 3900, loss[loss=0.2125, simple_loss=0.2867, pruned_loss=0.06918, over 8139.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2952, pruned_loss=0.06811, over 1611690.90 frames. ], batch size: 22, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:00,183 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5527, 1.4875, 4.7800, 1.8935, 4.2996, 4.0798, 4.4229, 4.2079], + device='cuda:2'), covar=tensor([0.0494, 0.4042, 0.0445, 0.3499, 0.0875, 0.0855, 0.0420, 0.0590], + device='cuda:2'), in_proj_covar=tensor([0.0570, 0.0617, 0.0659, 0.0589, 0.0665, 0.0579, 0.0566, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:56:14,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1652, 1.8885, 2.7123, 2.2384, 2.5872, 2.1090, 1.8589, 1.3366], + device='cuda:2'), covar=tensor([0.5144, 0.4790, 0.1609, 0.3375, 0.2368, 0.2965, 0.1873, 0.4987], + device='cuda:2'), in_proj_covar=tensor([0.0922, 0.0937, 0.0772, 0.0906, 0.0971, 0.0853, 0.0724, 0.0803], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 19:56:15,759 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.486e+02 2.968e+02 4.028e+02 1.073e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-06 19:56:21,117 INFO [train.py:901] (2/4) Epoch 17, batch 3950, loss[loss=0.1896, simple_loss=0.2734, pruned_loss=0.05295, over 7965.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2955, pruned_loss=0.06847, over 1612065.06 frames. ], batch size: 21, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:56,943 INFO [train.py:901] (2/4) Epoch 17, batch 4000, loss[loss=0.2219, simple_loss=0.301, pruned_loss=0.07144, over 8497.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2945, pruned_loss=0.06796, over 1609291.56 frames. ], batch size: 26, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:27,415 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.526e+02 3.333e+02 3.995e+02 7.649e+02, threshold=6.666e+02, percent-clipped=5.0 +2023-02-06 19:57:32,341 INFO [train.py:901] (2/4) Epoch 17, batch 4050, loss[loss=0.195, simple_loss=0.2844, pruned_loss=0.05281, over 8462.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2941, pruned_loss=0.06736, over 1612089.26 frames. ], batch size: 29, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:41,374 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133392.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:57:42,122 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:42,762 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:59,643 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:07,781 INFO [train.py:901] (2/4) Epoch 17, batch 4100, loss[loss=0.2218, simple_loss=0.3126, pruned_loss=0.06545, over 8634.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2942, pruned_loss=0.06726, over 1613333.69 frames. ], batch size: 39, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:40,085 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.513e+02 2.919e+02 3.658e+02 1.440e+03, threshold=5.839e+02, percent-clipped=2.0 +2023-02-06 19:58:45,039 INFO [train.py:901] (2/4) Epoch 17, batch 4150, loss[loss=0.2192, simple_loss=0.3054, pruned_loss=0.06653, over 8467.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2942, pruned_loss=0.06773, over 1611400.76 frames. ], batch size: 25, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:45,264 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:54,028 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:02,279 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:19,507 INFO [train.py:901] (2/4) Epoch 17, batch 4200, loss[loss=0.1968, simple_loss=0.2734, pruned_loss=0.06008, over 7929.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2943, pruned_loss=0.0678, over 1607110.07 frames. ], batch size: 20, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:32,479 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 19:59:37,348 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5982, 1.5616, 1.9216, 1.3751, 1.2270, 1.9432, 0.4343, 1.3414], + device='cuda:2'), covar=tensor([0.1924, 0.1246, 0.0449, 0.1183, 0.2897, 0.0440, 0.2276, 0.1323], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0184, 0.0116, 0.0218, 0.0264, 0.0123, 0.0168, 0.0179], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 19:59:51,075 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.565e+02 3.135e+02 3.827e+02 1.180e+03, threshold=6.269e+02, percent-clipped=6.0 +2023-02-06 19:59:56,768 INFO [train.py:901] (2/4) Epoch 17, batch 4250, loss[loss=0.2098, simple_loss=0.2943, pruned_loss=0.06268, over 7971.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2942, pruned_loss=0.06766, over 1604748.25 frames. ], batch size: 21, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:57,448 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 20:00:07,950 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:00:30,994 INFO [train.py:901] (2/4) Epoch 17, batch 4300, loss[loss=0.1723, simple_loss=0.2583, pruned_loss=0.04318, over 8361.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2938, pruned_loss=0.0676, over 1604499.73 frames. ], batch size: 24, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:00:53,053 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4080, 1.5285, 1.4501, 1.8450, 0.7156, 1.2906, 1.3139, 1.5313], + device='cuda:2'), covar=tensor([0.0854, 0.0802, 0.1004, 0.0494, 0.1133, 0.1484, 0.0784, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0199, 0.0248, 0.0211, 0.0208, 0.0246, 0.0254, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:00:55,152 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2561, 2.4652, 2.2465, 2.9178, 2.0686, 2.1352, 2.3123, 2.6610], + device='cuda:2'), covar=tensor([0.0675, 0.0735, 0.0703, 0.0509, 0.0869, 0.1030, 0.0716, 0.0598], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0199, 0.0248, 0.0211, 0.0208, 0.0246, 0.0254, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:01:00,693 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:01:01,924 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.551e+02 3.118e+02 3.976e+02 6.360e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 20:01:06,898 INFO [train.py:901] (2/4) Epoch 17, batch 4350, loss[loss=0.1633, simple_loss=0.2459, pruned_loss=0.04033, over 7531.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2946, pruned_loss=0.06787, over 1605413.06 frames. ], batch size: 18, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:08,326 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5561, 4.4617, 4.0754, 2.2993, 4.0087, 4.0860, 4.2163, 3.9000], + device='cuda:2'), covar=tensor([0.0707, 0.0507, 0.0997, 0.4269, 0.0827, 0.1093, 0.1020, 0.0858], + device='cuda:2'), in_proj_covar=tensor([0.0502, 0.0411, 0.0421, 0.0515, 0.0407, 0.0413, 0.0400, 0.0359], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:01:17,795 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5443, 1.9198, 3.0164, 1.3715, 2.2425, 1.9746, 1.5859, 2.2814], + device='cuda:2'), covar=tensor([0.1855, 0.2543, 0.0733, 0.4495, 0.1794, 0.3015, 0.2266, 0.2133], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0571, 0.0550, 0.0621, 0.0637, 0.0576, 0.0514, 0.0627], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:01:31,320 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 20:01:43,141 INFO [train.py:901] (2/4) Epoch 17, batch 4400, loss[loss=0.202, simple_loss=0.2627, pruned_loss=0.07069, over 7443.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2942, pruned_loss=0.06729, over 1611504.65 frames. ], batch size: 17, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:48,105 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133736.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:01:49,427 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:02:05,418 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5242, 1.9338, 3.1989, 1.3925, 2.4374, 1.9187, 1.5860, 2.3293], + device='cuda:2'), covar=tensor([0.1896, 0.2498, 0.0756, 0.4397, 0.1678, 0.3035, 0.2223, 0.2191], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0573, 0.0549, 0.0622, 0.0640, 0.0576, 0.0516, 0.0628], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:02:12,880 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.579e+02 3.148e+02 3.884e+02 8.584e+02, threshold=6.297e+02, percent-clipped=6.0 +2023-02-06 20:02:12,934 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 20:02:18,531 INFO [train.py:901] (2/4) Epoch 17, batch 4450, loss[loss=0.2323, simple_loss=0.3117, pruned_loss=0.07641, over 8290.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2949, pruned_loss=0.06708, over 1614700.21 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:02:55,021 INFO [train.py:901] (2/4) Epoch 17, batch 4500, loss[loss=0.2439, simple_loss=0.315, pruned_loss=0.0864, over 8618.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06794, over 1614735.77 frames. ], batch size: 34, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:00,079 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:10,422 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133851.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:03:10,900 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 20:03:11,735 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:24,313 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.330e+02 2.856e+02 3.592e+02 8.327e+02, threshold=5.711e+02, percent-clipped=1.0 +2023-02-06 20:03:29,175 INFO [train.py:901] (2/4) Epoch 17, batch 4550, loss[loss=0.2024, simple_loss=0.2868, pruned_loss=0.05901, over 8337.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2959, pruned_loss=0.06825, over 1613891.86 frames. ], batch size: 25, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:04:04,547 INFO [train.py:901] (2/4) Epoch 17, batch 4600, loss[loss=0.2514, simple_loss=0.3159, pruned_loss=0.09349, over 7811.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2955, pruned_loss=0.06815, over 1611268.08 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:07,611 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0637, 1.6266, 1.4199, 1.5922, 1.3664, 1.3023, 1.2807, 1.3398], + device='cuda:2'), covar=tensor([0.1146, 0.0478, 0.1208, 0.0523, 0.0737, 0.1427, 0.0907, 0.0827], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0233, 0.0325, 0.0301, 0.0297, 0.0332, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 20:04:18,981 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 20:04:21,359 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:04:35,429 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.386e+02 2.834e+02 3.772e+02 7.696e+02, threshold=5.668e+02, percent-clipped=3.0 +2023-02-06 20:04:40,243 INFO [train.py:901] (2/4) Epoch 17, batch 4650, loss[loss=0.2545, simple_loss=0.3326, pruned_loss=0.08819, over 8352.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2946, pruned_loss=0.06765, over 1613031.93 frames. ], batch size: 24, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:01,910 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 20:05:02,393 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0326, 1.7502, 3.3685, 1.4562, 2.3250, 3.6991, 3.7655, 3.1427], + device='cuda:2'), covar=tensor([0.1083, 0.1564, 0.0319, 0.2062, 0.0988, 0.0205, 0.0397, 0.0545], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0311, 0.0275, 0.0304, 0.0295, 0.0253, 0.0392, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 20:05:06,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:07,207 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:16,663 INFO [train.py:901] (2/4) Epoch 17, batch 4700, loss[loss=0.2727, simple_loss=0.3375, pruned_loss=0.104, over 8581.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2949, pruned_loss=0.06756, over 1612512.13 frames. ], batch size: 49, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:45,984 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 20:05:48,988 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.331e+02 2.674e+02 3.349e+02 6.559e+02, threshold=5.348e+02, percent-clipped=3.0 +2023-02-06 20:05:53,967 INFO [train.py:901] (2/4) Epoch 17, batch 4750, loss[loss=0.2242, simple_loss=0.3039, pruned_loss=0.07221, over 8660.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2958, pruned_loss=0.06776, over 1615126.01 frames. ], batch size: 34, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:13,291 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134107.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:06:14,638 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:17,894 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 20:06:20,569 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 20:06:28,174 INFO [train.py:901] (2/4) Epoch 17, batch 4800, loss[loss=0.1649, simple_loss=0.2466, pruned_loss=0.04164, over 7410.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2948, pruned_loss=0.06739, over 1613352.20 frames. ], batch size: 17, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:28,387 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:31,287 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134132.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:06:32,699 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:00,735 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.352e+02 2.869e+02 3.488e+02 8.440e+02, threshold=5.739e+02, percent-clipped=9.0 +2023-02-06 20:07:06,357 INFO [train.py:901] (2/4) Epoch 17, batch 4850, loss[loss=0.216, simple_loss=0.2938, pruned_loss=0.06916, over 7817.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2945, pruned_loss=0.06665, over 1619275.13 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:14,652 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 20:07:26,230 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:29,619 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5725, 1.5160, 1.8540, 1.4196, 1.2063, 1.8374, 0.2257, 1.2609], + device='cuda:2'), covar=tensor([0.1899, 0.1210, 0.0399, 0.0996, 0.2673, 0.0450, 0.2168, 0.1166], + device='cuda:2'), in_proj_covar=tensor([0.0179, 0.0185, 0.0117, 0.0219, 0.0264, 0.0123, 0.0167, 0.0182], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:07:41,126 INFO [train.py:901] (2/4) Epoch 17, batch 4900, loss[loss=0.252, simple_loss=0.3365, pruned_loss=0.0837, over 8469.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.295, pruned_loss=0.0676, over 1614739.54 frames. ], batch size: 25, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:43,500 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134232.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:13,115 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.657e+02 3.351e+02 4.707e+02 1.168e+03, threshold=6.701e+02, percent-clipped=12.0 +2023-02-06 20:08:17,820 INFO [train.py:901] (2/4) Epoch 17, batch 4950, loss[loss=0.1726, simple_loss=0.2593, pruned_loss=0.04292, over 7652.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2962, pruned_loss=0.0682, over 1607589.72 frames. ], batch size: 19, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:08:18,297 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 20:08:49,554 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:52,964 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:54,212 INFO [train.py:901] (2/4) Epoch 17, batch 5000, loss[loss=0.2181, simple_loss=0.28, pruned_loss=0.07813, over 7434.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2957, pruned_loss=0.06758, over 1609337.69 frames. ], batch size: 17, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:08:55,956 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 20:09:15,213 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:24,852 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.361e+02 2.656e+02 3.405e+02 6.362e+02, threshold=5.311e+02, percent-clipped=0.0 +2023-02-06 20:09:30,477 INFO [train.py:901] (2/4) Epoch 17, batch 5050, loss[loss=0.2461, simple_loss=0.3219, pruned_loss=0.08515, over 8509.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2966, pruned_loss=0.06848, over 1610136.74 frames. ], batch size: 48, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:35,097 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:37,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5437, 1.9862, 3.2543, 1.3983, 2.3566, 2.0295, 1.6903, 2.3470], + device='cuda:2'), covar=tensor([0.2024, 0.2602, 0.0884, 0.4384, 0.1866, 0.3012, 0.2133, 0.2336], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0566, 0.0543, 0.0614, 0.0634, 0.0572, 0.0507, 0.0620], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:09:54,070 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:58,687 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 20:10:04,495 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5835, 2.8019, 2.4685, 4.0907, 1.4862, 2.2174, 2.4381, 3.1748], + device='cuda:2'), covar=tensor([0.0700, 0.0797, 0.0781, 0.0208, 0.1230, 0.1232, 0.1037, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0200, 0.0249, 0.0211, 0.0211, 0.0247, 0.0255, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:10:07,172 INFO [train.py:901] (2/4) Epoch 17, batch 5100, loss[loss=0.2283, simple_loss=0.3059, pruned_loss=0.07534, over 8493.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06805, over 1607548.14 frames. ], batch size: 39, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:36,991 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.384e+02 2.769e+02 3.675e+02 1.185e+03, threshold=5.538e+02, percent-clipped=9.0 +2023-02-06 20:10:38,570 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:10:42,692 INFO [train.py:901] (2/4) Epoch 17, batch 5150, loss[loss=0.2002, simple_loss=0.2839, pruned_loss=0.05822, over 7959.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2961, pruned_loss=0.06811, over 1607465.91 frames. ], batch size: 21, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:44,804 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4682, 4.4104, 3.9725, 2.1210, 3.9463, 3.9334, 4.0334, 3.7930], + device='cuda:2'), covar=tensor([0.0682, 0.0509, 0.0995, 0.4295, 0.0775, 0.0789, 0.1159, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0500, 0.0412, 0.0418, 0.0514, 0.0404, 0.0412, 0.0399, 0.0359], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:11:17,577 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4598, 4.3830, 3.9467, 2.3567, 3.8937, 4.0486, 4.0135, 3.7371], + device='cuda:2'), covar=tensor([0.0623, 0.0554, 0.1014, 0.4109, 0.0753, 0.0985, 0.1199, 0.0756], + device='cuda:2'), in_proj_covar=tensor([0.0498, 0.0410, 0.0416, 0.0513, 0.0402, 0.0409, 0.0398, 0.0358], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:11:20,277 INFO [train.py:901] (2/4) Epoch 17, batch 5200, loss[loss=0.2077, simple_loss=0.2713, pruned_loss=0.07202, over 7433.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2953, pruned_loss=0.06766, over 1606772.19 frames. ], batch size: 17, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:25,570 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.68 vs. limit=2.0 +2023-02-06 20:11:49,978 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.225e+02 2.783e+02 3.706e+02 1.482e+03, threshold=5.567e+02, percent-clipped=8.0 +2023-02-06 20:11:54,889 INFO [train.py:901] (2/4) Epoch 17, batch 5250, loss[loss=0.2107, simple_loss=0.2782, pruned_loss=0.07161, over 7663.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2953, pruned_loss=0.06777, over 1604785.23 frames. ], batch size: 19, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:57,602 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 20:12:17,327 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 20:12:21,443 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 20:12:31,032 INFO [train.py:901] (2/4) Epoch 17, batch 5300, loss[loss=0.2106, simple_loss=0.3051, pruned_loss=0.05803, over 8344.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2945, pruned_loss=0.0671, over 1602241.09 frames. ], batch size: 24, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:12:56,715 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 20:12:58,389 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:01,795 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:02,352 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.333e+02 2.884e+02 3.429e+02 1.143e+03, threshold=5.769e+02, percent-clipped=6.0 +2023-02-06 20:13:07,125 INFO [train.py:901] (2/4) Epoch 17, batch 5350, loss[loss=0.172, simple_loss=0.2589, pruned_loss=0.04253, over 7980.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2947, pruned_loss=0.06734, over 1606448.92 frames. ], batch size: 21, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:16,921 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8152, 2.0729, 1.8098, 2.6055, 1.1668, 1.4605, 1.8385, 2.0262], + device='cuda:2'), covar=tensor([0.0771, 0.0708, 0.0943, 0.0363, 0.1185, 0.1395, 0.0834, 0.0704], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0199, 0.0249, 0.0212, 0.0211, 0.0247, 0.0255, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:13:36,021 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 20:13:42,898 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6596, 2.2693, 4.1210, 1.4026, 2.7946, 2.2451, 1.6883, 2.7601], + device='cuda:2'), covar=tensor([0.1869, 0.2392, 0.0678, 0.4305, 0.1809, 0.2978, 0.2146, 0.2324], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0563, 0.0542, 0.0611, 0.0631, 0.0570, 0.0506, 0.0615], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:13:43,340 INFO [train.py:901] (2/4) Epoch 17, batch 5400, loss[loss=0.2059, simple_loss=0.2929, pruned_loss=0.05946, over 8530.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2944, pruned_loss=0.06756, over 1605370.60 frames. ], batch size: 28, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:44,323 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:01,973 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:10,291 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0514, 2.5517, 3.6022, 1.9621, 1.9326, 3.5853, 0.7371, 2.1503], + device='cuda:2'), covar=tensor([0.1513, 0.1451, 0.0240, 0.2087, 0.3109, 0.0503, 0.2671, 0.1670], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0185, 0.0116, 0.0219, 0.0263, 0.0123, 0.0167, 0.0181], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:14:14,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.337e+02 2.988e+02 3.635e+02 1.067e+03, threshold=5.976e+02, percent-clipped=7.0 +2023-02-06 20:14:17,748 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8932, 3.7775, 3.4384, 1.9282, 3.3906, 3.4360, 3.4885, 3.3468], + device='cuda:2'), covar=tensor([0.0910, 0.0686, 0.1248, 0.4841, 0.1029, 0.1140, 0.1440, 0.0981], + device='cuda:2'), in_proj_covar=tensor([0.0508, 0.0418, 0.0424, 0.0527, 0.0413, 0.0418, 0.0407, 0.0364], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:14:18,990 INFO [train.py:901] (2/4) Epoch 17, batch 5450, loss[loss=0.2154, simple_loss=0.2891, pruned_loss=0.07087, over 7509.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2935, pruned_loss=0.06693, over 1600528.60 frames. ], batch size: 18, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:20,487 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:23,985 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134786.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:54,763 INFO [train.py:901] (2/4) Epoch 17, batch 5500, loss[loss=0.1951, simple_loss=0.2777, pruned_loss=0.05622, over 8361.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.292, pruned_loss=0.06619, over 1601585.06 frames. ], batch size: 24, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:55,399 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 20:15:25,532 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.381e+02 2.895e+02 3.783e+02 8.489e+02, threshold=5.790e+02, percent-clipped=3.0 +2023-02-06 20:15:31,381 INFO [train.py:901] (2/4) Epoch 17, batch 5550, loss[loss=0.235, simple_loss=0.3099, pruned_loss=0.0801, over 8026.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2925, pruned_loss=0.06663, over 1603208.25 frames. ], batch size: 22, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:15:32,208 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:16:06,820 INFO [train.py:901] (2/4) Epoch 17, batch 5600, loss[loss=0.2108, simple_loss=0.2937, pruned_loss=0.06394, over 8520.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2926, pruned_loss=0.06648, over 1605183.39 frames. ], batch size: 26, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:16:34,848 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1969, 1.9345, 2.5724, 2.1437, 2.5664, 2.1876, 1.8485, 1.2275], + device='cuda:2'), covar=tensor([0.5072, 0.4325, 0.1761, 0.3193, 0.2010, 0.2637, 0.1839, 0.5043], + device='cuda:2'), in_proj_covar=tensor([0.0914, 0.0930, 0.0774, 0.0902, 0.0972, 0.0849, 0.0721, 0.0800], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 20:16:38,758 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.724e+02 3.292e+02 4.135e+02 9.276e+02, threshold=6.584e+02, percent-clipped=7.0 +2023-02-06 20:16:42,898 INFO [train.py:901] (2/4) Epoch 17, batch 5650, loss[loss=0.1607, simple_loss=0.2377, pruned_loss=0.04187, over 7202.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2947, pruned_loss=0.06712, over 1610770.74 frames. ], batch size: 16, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:04,353 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 20:17:18,725 INFO [train.py:901] (2/4) Epoch 17, batch 5700, loss[loss=0.226, simple_loss=0.3096, pruned_loss=0.07118, over 8644.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06726, over 1613810.66 frames. ], batch size: 34, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:24,533 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:28,048 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:40,716 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-06 20:17:42,583 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:45,954 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:49,729 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.516e+02 3.214e+02 3.973e+02 1.283e+03, threshold=6.427e+02, percent-clipped=6.0 +2023-02-06 20:17:53,700 INFO [train.py:901] (2/4) Epoch 17, batch 5750, loss[loss=0.2357, simple_loss=0.3047, pruned_loss=0.08337, over 8488.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.06783, over 1609016.39 frames. ], batch size: 26, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:18:02,757 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2169, 1.2384, 1.5172, 1.1705, 0.7122, 1.2750, 1.1661, 0.9852], + device='cuda:2'), covar=tensor([0.0592, 0.1342, 0.1692, 0.1463, 0.0598, 0.1573, 0.0720, 0.0712], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0099, 0.0162, 0.0113, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:18:11,551 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 20:18:21,899 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8354, 3.8784, 2.3427, 2.8764, 2.7472, 2.2024, 2.8593, 3.1195], + device='cuda:2'), covar=tensor([0.1778, 0.0266, 0.1065, 0.0741, 0.0804, 0.1305, 0.1124, 0.1041], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0231, 0.0326, 0.0301, 0.0296, 0.0330, 0.0341, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 20:18:30,181 INFO [train.py:901] (2/4) Epoch 17, batch 5800, loss[loss=0.2058, simple_loss=0.2921, pruned_loss=0.05974, over 5115.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2939, pruned_loss=0.06726, over 1604470.82 frames. ], batch size: 11, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:18:36,169 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 20:19:00,545 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.313e+02 2.882e+02 3.681e+02 6.576e+02, threshold=5.764e+02, percent-clipped=1.0 +2023-02-06 20:19:04,573 INFO [train.py:901] (2/4) Epoch 17, batch 5850, loss[loss=0.2665, simple_loss=0.3323, pruned_loss=0.1004, over 8108.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.294, pruned_loss=0.06716, over 1607300.12 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:12,152 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135189.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:37,628 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:41,090 INFO [train.py:901] (2/4) Epoch 17, batch 5900, loss[loss=0.211, simple_loss=0.2947, pruned_loss=0.0637, over 8198.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2936, pruned_loss=0.06689, over 1607143.79 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:03,977 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.15 vs. limit=5.0 +2023-02-06 20:20:12,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.357e+02 3.084e+02 3.660e+02 6.807e+02, threshold=6.167e+02, percent-clipped=2.0 +2023-02-06 20:20:16,622 INFO [train.py:901] (2/4) Epoch 17, batch 5950, loss[loss=0.3018, simple_loss=0.3567, pruned_loss=0.1234, over 6963.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2938, pruned_loss=0.06724, over 1606832.55 frames. ], batch size: 71, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:25,207 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7070, 2.1178, 2.3932, 1.2569, 2.4279, 1.6006, 0.7073, 1.9292], + device='cuda:2'), covar=tensor([0.0488, 0.0267, 0.0192, 0.0519, 0.0271, 0.0700, 0.0673, 0.0278], + device='cuda:2'), in_proj_covar=tensor([0.0435, 0.0376, 0.0320, 0.0431, 0.0358, 0.0516, 0.0380, 0.0397], + device='cuda:2'), out_proj_covar=tensor([1.1862e-04, 9.9983e-05, 8.4629e-05, 1.1535e-04, 9.5846e-05, 1.4872e-04, + 1.0343e-04, 1.0629e-04], device='cuda:2') +2023-02-06 20:20:43,831 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-06 20:20:52,281 INFO [train.py:901] (2/4) Epoch 17, batch 6000, loss[loss=0.2272, simple_loss=0.3171, pruned_loss=0.06866, over 8025.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.293, pruned_loss=0.06675, over 1603882.12 frames. ], batch size: 22, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:20:52,281 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 20:21:05,422 INFO [train.py:935] (2/4) Epoch 17, validation: loss=0.1774, simple_loss=0.2777, pruned_loss=0.03857, over 944034.00 frames. +2023-02-06 20:21:05,423 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 20:21:12,597 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:21:30,590 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5808, 1.2722, 2.7852, 1.2373, 2.1559, 2.9683, 3.1010, 2.3508], + device='cuda:2'), covar=tensor([0.1313, 0.1885, 0.0538, 0.2381, 0.1003, 0.0431, 0.0745, 0.0963], + device='cuda:2'), in_proj_covar=tensor([0.0282, 0.0311, 0.0276, 0.0302, 0.0294, 0.0253, 0.0389, 0.0300], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 20:21:36,687 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.411e+02 3.026e+02 3.580e+02 8.983e+02, threshold=6.051e+02, percent-clipped=2.0 +2023-02-06 20:21:40,885 INFO [train.py:901] (2/4) Epoch 17, batch 6050, loss[loss=0.2579, simple_loss=0.3087, pruned_loss=0.1035, over 7698.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.06785, over 1608435.37 frames. ], batch size: 18, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:16,316 INFO [train.py:901] (2/4) Epoch 17, batch 6100, loss[loss=0.2438, simple_loss=0.3167, pruned_loss=0.08546, over 8571.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2944, pruned_loss=0.0672, over 1614722.65 frames. ], batch size: 49, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:47,554 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.459e+02 2.890e+02 3.783e+02 6.848e+02, threshold=5.780e+02, percent-clipped=3.0 +2023-02-06 20:22:49,609 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 20:22:52,250 INFO [train.py:901] (2/4) Epoch 17, batch 6150, loss[loss=0.2107, simple_loss=0.2839, pruned_loss=0.06875, over 8496.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2933, pruned_loss=0.06643, over 1613416.05 frames. ], batch size: 26, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:26,579 INFO [train.py:901] (2/4) Epoch 17, batch 6200, loss[loss=0.2333, simple_loss=0.3112, pruned_loss=0.07769, over 8345.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2936, pruned_loss=0.06663, over 1610662.35 frames. ], batch size: 26, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:29,344 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:23:57,601 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.470e+02 3.035e+02 3.550e+02 6.137e+02, threshold=6.070e+02, percent-clipped=1.0 +2023-02-06 20:24:01,728 INFO [train.py:901] (2/4) Epoch 17, batch 6250, loss[loss=0.1897, simple_loss=0.276, pruned_loss=0.05163, over 7923.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2928, pruned_loss=0.06634, over 1607698.47 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:13,155 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:24,743 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9797, 2.0230, 1.8087, 2.3377, 1.3993, 1.6218, 1.8377, 2.0873], + device='cuda:2'), covar=tensor([0.0626, 0.0704, 0.0871, 0.0535, 0.1035, 0.1119, 0.0803, 0.0668], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0197, 0.0246, 0.0209, 0.0207, 0.0245, 0.0254, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:24:29,109 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 20:24:30,231 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:36,950 INFO [train.py:901] (2/4) Epoch 17, batch 6300, loss[loss=0.1833, simple_loss=0.2709, pruned_loss=0.04789, over 8109.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2939, pruned_loss=0.06646, over 1613276.86 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:49,744 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:49,781 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:25:07,391 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.700e+02 3.426e+02 4.477e+02 8.691e+02, threshold=6.853e+02, percent-clipped=8.0 +2023-02-06 20:25:11,449 INFO [train.py:901] (2/4) Epoch 17, batch 6350, loss[loss=0.1803, simple_loss=0.2692, pruned_loss=0.04576, over 7964.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2932, pruned_loss=0.06608, over 1612008.53 frames. ], batch size: 21, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:25:46,610 INFO [train.py:901] (2/4) Epoch 17, batch 6400, loss[loss=0.2148, simple_loss=0.2986, pruned_loss=0.06548, over 8288.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2944, pruned_loss=0.06673, over 1611709.83 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:16,683 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.216e+02 2.648e+02 3.143e+02 6.334e+02, threshold=5.295e+02, percent-clipped=0.0 +2023-02-06 20:26:20,488 INFO [train.py:901] (2/4) Epoch 17, batch 6450, loss[loss=0.1711, simple_loss=0.2456, pruned_loss=0.04836, over 7260.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2955, pruned_loss=0.06759, over 1609777.62 frames. ], batch size: 16, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:56,417 INFO [train.py:901] (2/4) Epoch 17, batch 6500, loss[loss=0.2217, simple_loss=0.2965, pruned_loss=0.07349, over 8240.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2953, pruned_loss=0.06766, over 1612447.76 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:27,342 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.443e+02 3.095e+02 4.367e+02 8.897e+02, threshold=6.190e+02, percent-clipped=12.0 +2023-02-06 20:27:31,527 INFO [train.py:901] (2/4) Epoch 17, batch 6550, loss[loss=0.1923, simple_loss=0.2809, pruned_loss=0.05183, over 7939.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.295, pruned_loss=0.0672, over 1610441.66 frames. ], batch size: 20, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:45,159 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 20:27:48,291 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135904.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:27:56,443 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 20:28:06,531 INFO [train.py:901] (2/4) Epoch 17, batch 6600, loss[loss=0.298, simple_loss=0.3655, pruned_loss=0.1152, over 7330.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2943, pruned_loss=0.06728, over 1609512.54 frames. ], batch size: 71, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:06,731 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:16,335 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 20:28:27,298 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8690, 1.6403, 1.9836, 1.6682, 0.8645, 1.6504, 2.1568, 2.1791], + device='cuda:2'), covar=tensor([0.0428, 0.1232, 0.1594, 0.1382, 0.0598, 0.1387, 0.0626, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0153, 0.0191, 0.0158, 0.0101, 0.0163, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:28:36,536 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.554e+02 2.943e+02 3.634e+02 1.271e+03, threshold=5.887e+02, percent-clipped=2.0 +2023-02-06 20:28:40,553 INFO [train.py:901] (2/4) Epoch 17, batch 6650, loss[loss=0.2274, simple_loss=0.3208, pruned_loss=0.06703, over 8450.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2942, pruned_loss=0.06728, over 1613358.96 frames. ], batch size: 27, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:49,898 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:56,429 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:04,531 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136012.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:07,755 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7965, 5.9278, 5.1293, 2.4547, 5.2135, 5.5959, 5.5111, 5.2746], + device='cuda:2'), covar=tensor([0.0519, 0.0350, 0.0951, 0.4333, 0.0717, 0.0705, 0.0916, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0418, 0.0421, 0.0522, 0.0411, 0.0416, 0.0404, 0.0364], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:29:16,459 INFO [train.py:901] (2/4) Epoch 17, batch 6700, loss[loss=0.1883, simple_loss=0.2649, pruned_loss=0.05583, over 7527.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2942, pruned_loss=0.06762, over 1612326.49 frames. ], batch size: 18, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:29:47,790 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.437e+02 3.090e+02 3.837e+02 8.578e+02, threshold=6.181e+02, percent-clipped=4.0 +2023-02-06 20:29:51,765 INFO [train.py:901] (2/4) Epoch 17, batch 6750, loss[loss=0.1769, simple_loss=0.2557, pruned_loss=0.04902, over 7432.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2948, pruned_loss=0.06733, over 1618393.10 frames. ], batch size: 17, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:11,741 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:13,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9873, 2.6226, 3.1933, 1.3535, 3.2631, 1.7348, 1.5786, 1.9707], + device='cuda:2'), covar=tensor([0.0789, 0.0325, 0.0190, 0.0721, 0.0429, 0.0738, 0.0793, 0.0475], + device='cuda:2'), in_proj_covar=tensor([0.0429, 0.0371, 0.0320, 0.0423, 0.0353, 0.0514, 0.0375, 0.0394], + device='cuda:2'), out_proj_covar=tensor([1.1705e-04, 9.8736e-05, 8.4776e-05, 1.1300e-04, 9.4536e-05, 1.4809e-04, + 1.0197e-04, 1.0554e-04], device='cuda:2') +2023-02-06 20:30:26,347 INFO [train.py:901] (2/4) Epoch 17, batch 6800, loss[loss=0.2028, simple_loss=0.274, pruned_loss=0.06576, over 7535.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2959, pruned_loss=0.06769, over 1619453.33 frames. ], batch size: 18, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:35,092 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 20:30:45,877 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4385, 1.5955, 2.1892, 1.3063, 1.5068, 1.6528, 1.4925, 1.4478], + device='cuda:2'), covar=tensor([0.1812, 0.2391, 0.0819, 0.4310, 0.1772, 0.3242, 0.2224, 0.2076], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0571, 0.0546, 0.0617, 0.0636, 0.0577, 0.0511, 0.0623], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:30:48,378 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:57,852 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.691e+02 3.204e+02 3.737e+02 8.793e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 20:31:01,826 INFO [train.py:901] (2/4) Epoch 17, batch 6850, loss[loss=0.214, simple_loss=0.2942, pruned_loss=0.06691, over 8142.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2974, pruned_loss=0.06848, over 1621210.86 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:22,725 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 20:31:24,399 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0878, 2.3084, 3.5909, 1.9298, 1.7772, 3.5815, 0.8747, 2.1748], + device='cuda:2'), covar=tensor([0.1489, 0.1391, 0.0237, 0.1809, 0.2957, 0.0314, 0.2453, 0.1537], + device='cuda:2'), in_proj_covar=tensor([0.0176, 0.0184, 0.0115, 0.0216, 0.0259, 0.0123, 0.0165, 0.0181], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:31:37,319 INFO [train.py:901] (2/4) Epoch 17, batch 6900, loss[loss=0.2423, simple_loss=0.3182, pruned_loss=0.08321, over 8247.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2977, pruned_loss=0.06873, over 1616457.30 frames. ], batch size: 22, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:40,196 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7482, 1.2961, 3.9880, 1.4532, 3.4755, 3.3451, 3.5808, 3.4778], + device='cuda:2'), covar=tensor([0.0747, 0.4507, 0.0630, 0.3773, 0.1394, 0.0947, 0.0719, 0.0809], + device='cuda:2'), in_proj_covar=tensor([0.0574, 0.0617, 0.0654, 0.0590, 0.0670, 0.0569, 0.0569, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 20:32:08,491 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.868e+02 2.541e+02 3.415e+02 4.318e+02 7.722e+02, threshold=6.831e+02, percent-clipped=4.0 +2023-02-06 20:32:12,488 INFO [train.py:901] (2/4) Epoch 17, batch 6950, loss[loss=0.2039, simple_loss=0.2779, pruned_loss=0.06488, over 7933.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2972, pruned_loss=0.06898, over 1607631.18 frames. ], batch size: 20, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:22,236 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2272, 1.3468, 1.5963, 1.3738, 0.6945, 1.4173, 1.2549, 1.2316], + device='cuda:2'), covar=tensor([0.0534, 0.1363, 0.1697, 0.1421, 0.0595, 0.1515, 0.0667, 0.0656], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0101, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:32:32,958 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 20:32:47,695 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9893, 1.2558, 1.1922, 0.5193, 1.2101, 0.9983, 0.0987, 1.1906], + device='cuda:2'), covar=tensor([0.0407, 0.0343, 0.0348, 0.0556, 0.0396, 0.0885, 0.0733, 0.0294], + device='cuda:2'), in_proj_covar=tensor([0.0425, 0.0366, 0.0317, 0.0419, 0.0348, 0.0509, 0.0371, 0.0390], + device='cuda:2'), out_proj_covar=tensor([1.1580e-04, 9.7235e-05, 8.4124e-05, 1.1157e-04, 9.2951e-05, 1.4670e-04, + 1.0103e-04, 1.0438e-04], device='cuda:2') +2023-02-06 20:32:48,136 INFO [train.py:901] (2/4) Epoch 17, batch 7000, loss[loss=0.2255, simple_loss=0.3063, pruned_loss=0.0723, over 8498.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.06897, over 1611360.98 frames. ], batch size: 26, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:58,254 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:04,323 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:06,278 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:11,085 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:18,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.499e+02 2.956e+02 3.583e+02 7.307e+02, threshold=5.911e+02, percent-clipped=2.0 +2023-02-06 20:33:22,378 INFO [train.py:901] (2/4) Epoch 17, batch 7050, loss[loss=0.1802, simple_loss=0.2636, pruned_loss=0.04837, over 7680.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2967, pruned_loss=0.06893, over 1607360.69 frames. ], batch size: 18, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:33:29,402 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:46,653 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6490, 1.8336, 1.6380, 2.2621, 1.0470, 1.4191, 1.6626, 1.8383], + device='cuda:2'), covar=tensor([0.0798, 0.0763, 0.0999, 0.0474, 0.1158, 0.1359, 0.0833, 0.0768], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0198, 0.0249, 0.0212, 0.0209, 0.0247, 0.0254, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:33:57,954 INFO [train.py:901] (2/4) Epoch 17, batch 7100, loss[loss=0.2354, simple_loss=0.3226, pruned_loss=0.07406, over 8189.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2977, pruned_loss=0.06935, over 1609647.13 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:04,102 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2583, 3.1284, 2.9020, 1.6701, 2.8489, 2.8500, 2.8221, 2.7333], + device='cuda:2'), covar=tensor([0.1349, 0.0964, 0.1596, 0.4886, 0.1310, 0.1250, 0.1953, 0.1243], + device='cuda:2'), in_proj_covar=tensor([0.0507, 0.0417, 0.0422, 0.0520, 0.0411, 0.0417, 0.0404, 0.0364], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:34:18,670 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:26,520 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:27,626 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.503e+02 2.917e+02 3.905e+02 1.004e+03, threshold=5.834e+02, percent-clipped=4.0 +2023-02-06 20:34:31,743 INFO [train.py:901] (2/4) Epoch 17, batch 7150, loss[loss=0.2213, simple_loss=0.3061, pruned_loss=0.06819, over 8341.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2972, pruned_loss=0.06943, over 1608233.62 frames. ], batch size: 25, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:50,068 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:35:07,636 INFO [train.py:901] (2/4) Epoch 17, batch 7200, loss[loss=0.2179, simple_loss=0.2874, pruned_loss=0.07417, over 7198.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2963, pruned_loss=0.0692, over 1606144.57 frames. ], batch size: 16, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:35:37,959 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.498e+02 3.072e+02 3.698e+02 8.742e+02, threshold=6.145e+02, percent-clipped=2.0 +2023-02-06 20:35:42,151 INFO [train.py:901] (2/4) Epoch 17, batch 7250, loss[loss=0.2109, simple_loss=0.2936, pruned_loss=0.06409, over 8350.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2965, pruned_loss=0.06901, over 1612219.01 frames. ], batch size: 24, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:11,362 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136619.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:17,806 INFO [train.py:901] (2/4) Epoch 17, batch 7300, loss[loss=0.2477, simple_loss=0.3299, pruned_loss=0.08275, over 8480.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2955, pruned_loss=0.0681, over 1613063.17 frames. ], batch size: 29, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:40,630 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:48,514 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.506e+02 2.969e+02 3.762e+02 7.100e+02, threshold=5.939e+02, percent-clipped=2.0 +2023-02-06 20:36:52,584 INFO [train.py:901] (2/4) Epoch 17, batch 7350, loss[loss=0.1986, simple_loss=0.286, pruned_loss=0.05559, over 8357.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2954, pruned_loss=0.06824, over 1612447.23 frames. ], batch size: 24, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:59,052 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 20:37:05,620 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:14,745 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4233, 1.8239, 2.0238, 1.2252, 2.0789, 1.2129, 0.6522, 1.7799], + device='cuda:2'), covar=tensor([0.0643, 0.0364, 0.0245, 0.0604, 0.0414, 0.0981, 0.0807, 0.0311], + device='cuda:2'), in_proj_covar=tensor([0.0428, 0.0367, 0.0316, 0.0420, 0.0348, 0.0512, 0.0371, 0.0392], + device='cuda:2'), out_proj_covar=tensor([1.1670e-04, 9.7663e-05, 8.3805e-05, 1.1183e-04, 9.3130e-05, 1.4729e-04, + 1.0086e-04, 1.0478e-04], device='cuda:2') +2023-02-06 20:37:16,510 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 20:37:17,971 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:27,018 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136727.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:28,122 INFO [train.py:901] (2/4) Epoch 17, batch 7400, loss[loss=0.2208, simple_loss=0.2858, pruned_loss=0.07788, over 7797.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2957, pruned_loss=0.06822, over 1613966.84 frames. ], batch size: 19, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:35,034 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 20:37:36,604 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136740.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:45,398 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:55,327 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6530, 4.6579, 4.2433, 2.1770, 4.1226, 4.2342, 4.2749, 3.9716], + device='cuda:2'), covar=tensor([0.0721, 0.0522, 0.0922, 0.4662, 0.0875, 0.1010, 0.1236, 0.0882], + device='cuda:2'), in_proj_covar=tensor([0.0505, 0.0419, 0.0420, 0.0522, 0.0412, 0.0417, 0.0408, 0.0365], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:37:59,284 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.354e+02 2.898e+02 3.777e+02 7.037e+02, threshold=5.795e+02, percent-clipped=3.0 +2023-02-06 20:38:03,290 INFO [train.py:901] (2/4) Epoch 17, batch 7450, loss[loss=0.2185, simple_loss=0.2894, pruned_loss=0.07382, over 7658.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06796, over 1611876.37 frames. ], batch size: 19, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:16,571 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 20:38:26,054 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:33,499 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8957, 1.8954, 6.0248, 2.2068, 5.3777, 5.0131, 5.4857, 5.4107], + device='cuda:2'), covar=tensor([0.0432, 0.4159, 0.0385, 0.3737, 0.1083, 0.0829, 0.0504, 0.0488], + device='cuda:2'), in_proj_covar=tensor([0.0578, 0.0622, 0.0656, 0.0597, 0.0673, 0.0574, 0.0574, 0.0635], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 20:38:34,156 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:37,330 INFO [train.py:901] (2/4) Epoch 17, batch 7500, loss[loss=0.1824, simple_loss=0.2677, pruned_loss=0.04858, over 7773.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2955, pruned_loss=0.06804, over 1616105.97 frames. ], batch size: 19, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:38,168 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9826, 1.8464, 2.0350, 1.7374, 0.9872, 1.8005, 2.3726, 2.3179], + device='cuda:2'), covar=tensor([0.0422, 0.1245, 0.1632, 0.1370, 0.0553, 0.1421, 0.0606, 0.0550], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:39:09,457 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.462e+02 2.866e+02 3.948e+02 7.787e+02, threshold=5.732e+02, percent-clipped=6.0 +2023-02-06 20:39:11,049 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:13,444 INFO [train.py:901] (2/4) Epoch 17, batch 7550, loss[loss=0.2531, simple_loss=0.3407, pruned_loss=0.08276, over 8341.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2964, pruned_loss=0.06816, over 1619431.18 frames. ], batch size: 26, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:28,624 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:49,021 INFO [train.py:901] (2/4) Epoch 17, batch 7600, loss[loss=0.2499, simple_loss=0.3242, pruned_loss=0.08784, over 8516.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2959, pruned_loss=0.0687, over 1613183.26 frames. ], batch size: 28, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:40:21,096 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.516e+02 2.945e+02 3.717e+02 7.457e+02, threshold=5.891e+02, percent-clipped=6.0 +2023-02-06 20:40:25,198 INFO [train.py:901] (2/4) Epoch 17, batch 7650, loss[loss=0.2408, simple_loss=0.3257, pruned_loss=0.07793, over 8457.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06869, over 1614185.92 frames. ], batch size: 27, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:40:43,283 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:41:00,049 INFO [train.py:901] (2/4) Epoch 17, batch 7700, loss[loss=0.2205, simple_loss=0.3087, pruned_loss=0.06617, over 8247.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2971, pruned_loss=0.06914, over 1618249.28 frames. ], batch size: 24, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:26,488 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 20:41:26,651 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1092, 1.3859, 1.6593, 1.3590, 1.0399, 1.4323, 1.7731, 1.4380], + device='cuda:2'), covar=tensor([0.0520, 0.1389, 0.1716, 0.1486, 0.0597, 0.1561, 0.0684, 0.0673], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0157, 0.0100, 0.0161, 0.0113, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:41:26,691 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:41:30,556 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.598e+02 3.111e+02 3.900e+02 8.834e+02, threshold=6.222e+02, percent-clipped=1.0 +2023-02-06 20:41:34,727 INFO [train.py:901] (2/4) Epoch 17, batch 7750, loss[loss=0.176, simple_loss=0.2538, pruned_loss=0.04905, over 7548.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.06825, over 1616404.16 frames. ], batch size: 18, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:44,962 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:03,587 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:09,490 INFO [train.py:901] (2/4) Epoch 17, batch 7800, loss[loss=0.2225, simple_loss=0.3014, pruned_loss=0.07185, over 8499.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2964, pruned_loss=0.06844, over 1618591.92 frames. ], batch size: 26, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:42:36,424 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:39,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.454e+02 2.768e+02 3.488e+02 7.043e+02, threshold=5.537e+02, percent-clipped=4.0 +2023-02-06 20:42:43,618 INFO [train.py:901] (2/4) Epoch 17, batch 7850, loss[loss=0.1961, simple_loss=0.2686, pruned_loss=0.06185, over 7652.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2958, pruned_loss=0.06788, over 1622283.95 frames. ], batch size: 19, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:42:59,279 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0676, 1.5215, 1.6879, 1.4180, 0.9325, 1.5356, 1.7700, 1.6371], + device='cuda:2'), covar=tensor([0.0519, 0.1261, 0.1736, 0.1428, 0.0609, 0.1491, 0.0666, 0.0623], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0157, 0.0100, 0.0160, 0.0113, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:43:11,946 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5212, 1.5025, 1.8049, 1.2397, 1.0848, 1.8361, 0.1557, 1.1538], + device='cuda:2'), covar=tensor([0.1853, 0.1323, 0.0409, 0.1230, 0.3338, 0.0507, 0.2405, 0.1450], + device='cuda:2'), in_proj_covar=tensor([0.0176, 0.0182, 0.0116, 0.0216, 0.0261, 0.0122, 0.0165, 0.0181], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:43:16,598 INFO [train.py:901] (2/4) Epoch 17, batch 7900, loss[loss=0.2469, simple_loss=0.3237, pruned_loss=0.0851, over 8372.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2957, pruned_loss=0.06821, over 1618934.29 frames. ], batch size: 24, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:32,169 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 20:43:45,781 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.478e+02 3.005e+02 3.961e+02 6.905e+02, threshold=6.010e+02, percent-clipped=7.0 +2023-02-06 20:43:49,863 INFO [train.py:901] (2/4) Epoch 17, batch 7950, loss[loss=0.1717, simple_loss=0.2462, pruned_loss=0.04858, over 7650.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2942, pruned_loss=0.06684, over 1616669.53 frames. ], batch size: 19, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:52,843 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:23,205 INFO [train.py:901] (2/4) Epoch 17, batch 8000, loss[loss=0.187, simple_loss=0.2672, pruned_loss=0.05345, over 8137.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2946, pruned_loss=0.06748, over 1615158.81 frames. ], batch size: 22, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:44:36,541 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1968, 1.9055, 2.6132, 2.1121, 2.4262, 2.2351, 1.9462, 1.3220], + device='cuda:2'), covar=tensor([0.5033, 0.4654, 0.1618, 0.3573, 0.2482, 0.2772, 0.1804, 0.4930], + device='cuda:2'), in_proj_covar=tensor([0.0916, 0.0936, 0.0768, 0.0903, 0.0969, 0.0849, 0.0721, 0.0798], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 20:44:52,977 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.583e+02 3.026e+02 3.684e+02 1.341e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-06 20:44:55,373 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:57,245 INFO [train.py:901] (2/4) Epoch 17, batch 8050, loss[loss=0.1769, simple_loss=0.2521, pruned_loss=0.05082, over 7437.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2945, pruned_loss=0.0678, over 1604585.70 frames. ], batch size: 17, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:45:12,523 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:45:29,500 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 20:45:34,942 INFO [train.py:901] (2/4) Epoch 18, batch 0, loss[loss=0.2195, simple_loss=0.2904, pruned_loss=0.07435, over 7794.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2904, pruned_loss=0.07435, over 7794.00 frames. ], batch size: 19, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:45:34,943 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 20:45:46,129 INFO [train.py:935] (2/4) Epoch 18, validation: loss=0.1783, simple_loss=0.2784, pruned_loss=0.03907, over 944034.00 frames. +2023-02-06 20:45:46,129 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 20:46:00,873 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 20:46:20,803 INFO [train.py:901] (2/4) Epoch 18, batch 50, loss[loss=0.2131, simple_loss=0.2896, pruned_loss=0.06826, over 8190.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2996, pruned_loss=0.06864, over 369368.78 frames. ], batch size: 23, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:29,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 2.698e+02 3.585e+02 4.414e+02 8.769e+02, threshold=7.169e+02, percent-clipped=9.0 +2023-02-06 20:46:35,903 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 20:46:42,988 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5781, 1.9751, 3.3505, 1.3393, 2.6154, 1.9800, 1.6336, 2.5619], + device='cuda:2'), covar=tensor([0.1865, 0.2436, 0.0911, 0.4306, 0.1659, 0.3134, 0.2172, 0.2116], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0577, 0.0548, 0.0620, 0.0642, 0.0584, 0.0514, 0.0629], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:46:56,055 INFO [train.py:901] (2/4) Epoch 18, batch 100, loss[loss=0.1916, simple_loss=0.2745, pruned_loss=0.05434, over 8187.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2975, pruned_loss=0.06761, over 646260.52 frames. ], batch size: 23, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:58,818 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 20:47:16,442 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:31,279 INFO [train.py:901] (2/4) Epoch 18, batch 150, loss[loss=0.1803, simple_loss=0.2527, pruned_loss=0.05397, over 7427.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2939, pruned_loss=0.0651, over 862370.95 frames. ], batch size: 17, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:47:33,520 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:39,693 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.369e+02 2.797e+02 3.885e+02 6.122e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-06 20:48:07,693 INFO [train.py:901] (2/4) Epoch 18, batch 200, loss[loss=0.2458, simple_loss=0.3235, pruned_loss=0.08409, over 8492.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2953, pruned_loss=0.06636, over 1031781.55 frames. ], batch size: 28, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:33,245 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 20:48:44,088 INFO [train.py:901] (2/4) Epoch 18, batch 250, loss[loss=0.2226, simple_loss=0.3013, pruned_loss=0.07193, over 8223.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2961, pruned_loss=0.06696, over 1160913.37 frames. ], batch size: 22, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:52,404 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.467e+02 3.008e+02 3.586e+02 6.135e+02, threshold=6.015e+02, percent-clipped=1.0 +2023-02-06 20:48:55,930 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 20:49:03,705 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 20:49:19,871 INFO [train.py:901] (2/4) Epoch 18, batch 300, loss[loss=0.1973, simple_loss=0.2798, pruned_loss=0.05741, over 8239.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.296, pruned_loss=0.06684, over 1259435.91 frames. ], batch size: 22, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:49:55,785 INFO [train.py:901] (2/4) Epoch 18, batch 350, loss[loss=0.2265, simple_loss=0.3064, pruned_loss=0.0733, over 8548.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2958, pruned_loss=0.06697, over 1335398.12 frames. ], batch size: 39, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:05,700 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.555e+02 3.034e+02 3.752e+02 7.695e+02, threshold=6.069e+02, percent-clipped=3.0 +2023-02-06 20:50:21,152 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2107, 2.2742, 1.9267, 2.8593, 1.4041, 1.6364, 1.9310, 2.3289], + device='cuda:2'), covar=tensor([0.0638, 0.0713, 0.0902, 0.0360, 0.1093, 0.1274, 0.0974, 0.0692], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0198, 0.0251, 0.0211, 0.0208, 0.0248, 0.0255, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:50:32,304 INFO [train.py:901] (2/4) Epoch 18, batch 400, loss[loss=0.2752, simple_loss=0.3467, pruned_loss=0.1018, over 8529.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.296, pruned_loss=0.06696, over 1403156.48 frames. ], batch size: 49, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:33,932 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8832, 2.3362, 3.4614, 1.9494, 1.6530, 3.5022, 0.4626, 2.1511], + device='cuda:2'), covar=tensor([0.1655, 0.1347, 0.0271, 0.1974, 0.3185, 0.0322, 0.2788, 0.1640], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0184, 0.0118, 0.0219, 0.0263, 0.0124, 0.0166, 0.0182], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:51:08,176 INFO [train.py:901] (2/4) Epoch 18, batch 450, loss[loss=0.2359, simple_loss=0.3124, pruned_loss=0.07972, over 8451.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2955, pruned_loss=0.06675, over 1450223.43 frames. ], batch size: 27, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:16,916 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.504e+02 3.016e+02 3.557e+02 6.367e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 20:51:35,829 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8671, 1.7443, 5.9911, 2.4207, 5.4755, 5.0580, 5.5093, 5.4601], + device='cuda:2'), covar=tensor([0.0463, 0.4510, 0.0269, 0.3277, 0.0828, 0.0807, 0.0460, 0.0418], + device='cuda:2'), in_proj_covar=tensor([0.0576, 0.0617, 0.0656, 0.0591, 0.0670, 0.0574, 0.0570, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 20:51:43,047 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:43,593 INFO [train.py:901] (2/4) Epoch 18, batch 500, loss[loss=0.2126, simple_loss=0.3038, pruned_loss=0.06068, over 8187.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2961, pruned_loss=0.06758, over 1483054.79 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:45,192 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:50,424 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 20:52:20,557 INFO [train.py:901] (2/4) Epoch 18, batch 550, loss[loss=0.2831, simple_loss=0.3228, pruned_loss=0.1217, over 7411.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2953, pruned_loss=0.06781, over 1508482.60 frames. ], batch size: 17, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:52:29,487 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.606e+02 3.197e+02 3.974e+02 7.545e+02, threshold=6.394e+02, percent-clipped=3.0 +2023-02-06 20:52:35,965 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 20:52:56,991 INFO [train.py:901] (2/4) Epoch 18, batch 600, loss[loss=0.2237, simple_loss=0.3045, pruned_loss=0.07139, over 8537.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.06789, over 1532133.77 frames. ], batch size: 28, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:11,899 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 20:53:32,793 INFO [train.py:901] (2/4) Epoch 18, batch 650, loss[loss=0.1881, simple_loss=0.2587, pruned_loss=0.05875, over 7645.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2951, pruned_loss=0.06763, over 1546379.46 frames. ], batch size: 19, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:43,406 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.461e+02 2.865e+02 3.365e+02 7.739e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-06 20:54:00,543 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 20:54:09,457 INFO [train.py:901] (2/4) Epoch 18, batch 700, loss[loss=0.2299, simple_loss=0.3103, pruned_loss=0.07471, over 8761.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2944, pruned_loss=0.06745, over 1557310.55 frames. ], batch size: 49, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:29,540 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 20:54:30,790 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1008, 2.6567, 3.5468, 2.2481, 2.0891, 3.7118, 0.7116, 2.2171], + device='cuda:2'), covar=tensor([0.1657, 0.1417, 0.0269, 0.1887, 0.2930, 0.0318, 0.2686, 0.1640], + device='cuda:2'), in_proj_covar=tensor([0.0178, 0.0184, 0.0117, 0.0217, 0.0262, 0.0123, 0.0165, 0.0182], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 20:54:44,137 INFO [train.py:901] (2/4) Epoch 18, batch 750, loss[loss=0.1836, simple_loss=0.2574, pruned_loss=0.05492, over 7530.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.293, pruned_loss=0.06638, over 1570752.05 frames. ], batch size: 18, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:53,226 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.332e+02 3.041e+02 3.730e+02 6.216e+02, threshold=6.081e+02, percent-clipped=3.0 +2023-02-06 20:54:58,181 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 20:55:08,082 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 20:55:19,814 INFO [train.py:901] (2/4) Epoch 18, batch 800, loss[loss=0.1779, simple_loss=0.2695, pruned_loss=0.04317, over 8088.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2934, pruned_loss=0.06617, over 1586389.46 frames. ], batch size: 21, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:55:49,565 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:51,609 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:54,217 INFO [train.py:901] (2/4) Epoch 18, batch 850, loss[loss=0.2455, simple_loss=0.3233, pruned_loss=0.08389, over 8501.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.294, pruned_loss=0.06637, over 1597549.35 frames. ], batch size: 29, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:03,035 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.308e+02 2.906e+02 3.562e+02 8.427e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 20:56:13,562 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:56:30,843 INFO [train.py:901] (2/4) Epoch 18, batch 900, loss[loss=0.2066, simple_loss=0.3007, pruned_loss=0.05624, over 8196.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06722, over 1603965.26 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:35,137 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2904, 1.7155, 1.9344, 1.6810, 1.1372, 1.7158, 1.9501, 1.9872], + device='cuda:2'), covar=tensor([0.0494, 0.1140, 0.1485, 0.1302, 0.0600, 0.1386, 0.0696, 0.0541], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 20:56:50,638 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:05,377 INFO [train.py:901] (2/4) Epoch 18, batch 950, loss[loss=0.1814, simple_loss=0.266, pruned_loss=0.04842, over 7807.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2947, pruned_loss=0.06722, over 1607544.72 frames. ], batch size: 20, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:10,956 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:13,033 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:14,179 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.531e+02 3.020e+02 3.937e+02 8.991e+02, threshold=6.039e+02, percent-clipped=7.0 +2023-02-06 20:57:14,411 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2494, 2.3551, 2.2072, 2.9693, 1.9779, 2.1383, 2.2962, 2.6604], + device='cuda:2'), covar=tensor([0.0693, 0.0734, 0.0720, 0.0456, 0.0846, 0.0912, 0.0715, 0.0559], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0200, 0.0251, 0.0212, 0.0207, 0.0249, 0.0253, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 20:57:29,251 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 20:57:40,369 INFO [train.py:901] (2/4) Epoch 18, batch 1000, loss[loss=0.2369, simple_loss=0.3159, pruned_loss=0.07894, over 8104.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2951, pruned_loss=0.06742, over 1609632.83 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:56,203 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3826, 1.7276, 2.6718, 1.2329, 1.8592, 1.7574, 1.4701, 1.8009], + device='cuda:2'), covar=tensor([0.1960, 0.2513, 0.0844, 0.4549, 0.1950, 0.3177, 0.2298, 0.2339], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0574, 0.0544, 0.0618, 0.0636, 0.0577, 0.0510, 0.0624], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:58:05,512 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 20:58:16,696 INFO [train.py:901] (2/4) Epoch 18, batch 1050, loss[loss=0.1801, simple_loss=0.2571, pruned_loss=0.05153, over 7260.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2944, pruned_loss=0.067, over 1609504.31 frames. ], batch size: 16, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:58:18,825 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 20:58:25,546 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.454e+02 3.228e+02 4.133e+02 8.765e+02, threshold=6.456e+02, percent-clipped=4.0 +2023-02-06 20:58:51,053 INFO [train.py:901] (2/4) Epoch 18, batch 1100, loss[loss=0.1734, simple_loss=0.2504, pruned_loss=0.04822, over 7705.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2945, pruned_loss=0.06714, over 1610063.76 frames. ], batch size: 18, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:58:56,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3129, 4.2554, 3.8820, 2.2147, 3.7883, 3.8314, 3.9121, 3.6571], + device='cuda:2'), covar=tensor([0.0656, 0.0548, 0.1030, 0.3918, 0.0783, 0.0996, 0.1199, 0.0834], + device='cuda:2'), in_proj_covar=tensor([0.0507, 0.0419, 0.0419, 0.0517, 0.0409, 0.0422, 0.0407, 0.0365], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 20:59:26,908 INFO [train.py:901] (2/4) Epoch 18, batch 1150, loss[loss=0.2112, simple_loss=0.3007, pruned_loss=0.06088, over 8187.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.294, pruned_loss=0.06682, over 1607824.67 frames. ], batch size: 23, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:59:29,613 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 20:59:35,879 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 2.366e+02 2.909e+02 3.553e+02 5.350e+02, threshold=5.817e+02, percent-clipped=0.0 +2023-02-06 20:59:44,957 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6499, 2.1302, 3.2036, 1.3304, 2.4370, 1.9738, 1.7472, 2.3156], + device='cuda:2'), covar=tensor([0.2094, 0.2597, 0.1023, 0.4908, 0.1933, 0.3495, 0.2292, 0.2545], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0577, 0.0550, 0.0625, 0.0642, 0.0583, 0.0515, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:00:02,026 INFO [train.py:901] (2/4) Epoch 18, batch 1200, loss[loss=0.2177, simple_loss=0.3048, pruned_loss=0.06534, over 8598.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2931, pruned_loss=0.06608, over 1610617.67 frames. ], batch size: 34, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:04,276 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2392, 1.1852, 3.3869, 1.0111, 2.9852, 2.8335, 3.0834, 3.0047], + device='cuda:2'), covar=tensor([0.0810, 0.3971, 0.0741, 0.3987, 0.1302, 0.1069, 0.0769, 0.0832], + device='cuda:2'), in_proj_covar=tensor([0.0582, 0.0618, 0.0662, 0.0591, 0.0674, 0.0577, 0.0569, 0.0642], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:00:11,879 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:13,945 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:16,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:29,710 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:31,808 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:37,163 INFO [train.py:901] (2/4) Epoch 18, batch 1250, loss[loss=0.2026, simple_loss=0.288, pruned_loss=0.05863, over 8105.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2934, pruned_loss=0.06589, over 1614510.45 frames. ], batch size: 23, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:46,117 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5739, 1.9320, 5.8880, 2.3813, 4.7879, 4.8043, 5.5386, 5.4211], + device='cuda:2'), covar=tensor([0.1012, 0.5625, 0.0758, 0.4443, 0.1953, 0.1380, 0.0771, 0.0730], + device='cuda:2'), in_proj_covar=tensor([0.0584, 0.0621, 0.0664, 0.0594, 0.0677, 0.0580, 0.0572, 0.0645], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:00:47,264 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.310e+02 2.834e+02 3.613e+02 5.274e+02, threshold=5.668e+02, percent-clipped=0.0 +2023-02-06 21:00:54,246 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:04,840 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:08,266 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:12,862 INFO [train.py:901] (2/4) Epoch 18, batch 1300, loss[loss=0.2701, simple_loss=0.3448, pruned_loss=0.09773, over 8328.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2937, pruned_loss=0.06598, over 1615476.45 frames. ], batch size: 25, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:15,833 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:34,957 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3974, 1.7489, 3.3869, 1.2485, 2.4229, 2.0063, 1.4589, 2.4668], + device='cuda:2'), covar=tensor([0.2109, 0.2698, 0.0762, 0.4645, 0.1826, 0.3085, 0.2363, 0.2026], + device='cuda:2'), in_proj_covar=tensor([0.0514, 0.0577, 0.0549, 0.0624, 0.0643, 0.0583, 0.0513, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:01:37,541 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:38,945 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1702, 2.3341, 2.0668, 2.9256, 1.4564, 1.8221, 2.1374, 2.5414], + device='cuda:2'), covar=tensor([0.0685, 0.0765, 0.0790, 0.0348, 0.0995, 0.1181, 0.0784, 0.0620], + device='cuda:2'), in_proj_covar=tensor([0.0236, 0.0202, 0.0254, 0.0214, 0.0210, 0.0251, 0.0256, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:01:47,854 INFO [train.py:901] (2/4) Epoch 18, batch 1350, loss[loss=0.2419, simple_loss=0.3264, pruned_loss=0.07873, over 8523.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.293, pruned_loss=0.06571, over 1612643.41 frames. ], batch size: 48, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:56,586 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.404e+02 2.906e+02 3.545e+02 6.613e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 21:02:15,436 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:02:23,336 INFO [train.py:901] (2/4) Epoch 18, batch 1400, loss[loss=0.2521, simple_loss=0.3084, pruned_loss=0.09785, over 7254.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2942, pruned_loss=0.06612, over 1616311.63 frames. ], batch size: 16, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:02:43,933 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.03 vs. limit=5.0 +2023-02-06 21:02:57,612 INFO [train.py:901] (2/4) Epoch 18, batch 1450, loss[loss=0.1529, simple_loss=0.2331, pruned_loss=0.03634, over 7202.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2934, pruned_loss=0.066, over 1612685.54 frames. ], batch size: 16, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:06,393 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.491e+02 3.050e+02 4.246e+02 7.467e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-06 21:03:07,097 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 21:03:33,394 INFO [train.py:901] (2/4) Epoch 18, batch 1500, loss[loss=0.1929, simple_loss=0.274, pruned_loss=0.05587, over 7789.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2933, pruned_loss=0.06591, over 1614689.31 frames. ], batch size: 19, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:44,791 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:08,397 INFO [train.py:901] (2/4) Epoch 18, batch 1550, loss[loss=0.2079, simple_loss=0.2847, pruned_loss=0.06557, over 8230.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2943, pruned_loss=0.06668, over 1618869.51 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:17,340 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.366e+02 2.933e+02 3.736e+02 6.367e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-06 21:04:38,201 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:43,428 INFO [train.py:901] (2/4) Epoch 18, batch 1600, loss[loss=0.1783, simple_loss=0.2515, pruned_loss=0.05257, over 7548.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2939, pruned_loss=0.06654, over 1618295.31 frames. ], batch size: 18, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:56,943 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:07,017 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:10,363 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:15,396 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:18,029 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:18,147 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8012, 2.8385, 2.6664, 4.2171, 1.6362, 2.2155, 2.3742, 3.3011], + device='cuda:2'), covar=tensor([0.0643, 0.0840, 0.0756, 0.0193, 0.1170, 0.1309, 0.1090, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0202, 0.0251, 0.0212, 0.0208, 0.0249, 0.0256, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:05:19,994 INFO [train.py:901] (2/4) Epoch 18, batch 1650, loss[loss=0.1673, simple_loss=0.2435, pruned_loss=0.04558, over 8040.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.06612, over 1618961.85 frames. ], batch size: 20, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:05:28,835 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.375e+02 2.907e+02 3.508e+02 7.626e+02, threshold=5.813e+02, percent-clipped=3.0 +2023-02-06 21:05:33,157 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2101, 3.0751, 2.9164, 1.5447, 2.8553, 2.9281, 2.8539, 2.7220], + device='cuda:2'), covar=tensor([0.1283, 0.0890, 0.1377, 0.5227, 0.1277, 0.1360, 0.1695, 0.1204], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0417, 0.0418, 0.0523, 0.0412, 0.0424, 0.0410, 0.0366], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:05:33,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:54,417 INFO [train.py:901] (2/4) Epoch 18, batch 1700, loss[loss=0.1807, simple_loss=0.2774, pruned_loss=0.04198, over 8187.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2928, pruned_loss=0.06584, over 1616301.98 frames. ], batch size: 23, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:06:28,711 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:30,624 INFO [train.py:901] (2/4) Epoch 18, batch 1750, loss[loss=0.2188, simple_loss=0.3022, pruned_loss=0.06769, over 8509.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06714, over 1615992.49 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:06:32,200 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:39,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.310e+02 2.913e+02 3.912e+02 7.750e+02, threshold=5.826e+02, percent-clipped=6.0 +2023-02-06 21:06:39,806 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:49,072 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.86 vs. limit=5.0 +2023-02-06 21:07:05,719 INFO [train.py:901] (2/4) Epoch 18, batch 1800, loss[loss=0.2261, simple_loss=0.3077, pruned_loss=0.07227, over 8481.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2933, pruned_loss=0.0664, over 1614718.10 frames. ], batch size: 28, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:37,627 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:40,841 INFO [train.py:901] (2/4) Epoch 18, batch 1850, loss[loss=0.2232, simple_loss=0.3006, pruned_loss=0.07292, over 8597.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2931, pruned_loss=0.06627, over 1615191.86 frames. ], batch size: 31, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:49,483 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:51,403 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.274e+02 2.776e+02 3.369e+02 8.658e+02, threshold=5.552e+02, percent-clipped=2.0 +2023-02-06 21:07:55,859 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 21:08:01,848 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0868, 2.4244, 2.0047, 2.8780, 1.2155, 1.7058, 1.9422, 2.4797], + device='cuda:2'), covar=tensor([0.0686, 0.0726, 0.0856, 0.0380, 0.1203, 0.1299, 0.1007, 0.0663], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0199, 0.0249, 0.0211, 0.0206, 0.0247, 0.0253, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:08:05,261 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:14,572 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:14,591 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0224, 1.7525, 3.4564, 1.4899, 2.2557, 3.8392, 3.9267, 3.2866], + device='cuda:2'), covar=tensor([0.1113, 0.1740, 0.0418, 0.2174, 0.1264, 0.0230, 0.0480, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0281, 0.0311, 0.0274, 0.0305, 0.0293, 0.0253, 0.0391, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 21:08:17,141 INFO [train.py:901] (2/4) Epoch 18, batch 1900, loss[loss=0.1947, simple_loss=0.2707, pruned_loss=0.05937, over 8247.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2934, pruned_loss=0.06654, over 1619310.05 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:32,784 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 21:08:52,412 INFO [train.py:901] (2/4) Epoch 18, batch 1950, loss[loss=0.2024, simple_loss=0.284, pruned_loss=0.06039, over 8340.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2928, pruned_loss=0.06588, over 1620059.79 frames. ], batch size: 25, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:55,262 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 21:09:01,254 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.421e+02 2.964e+02 3.877e+02 7.962e+02, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 21:09:08,112 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 21:09:11,155 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:28,235 INFO [train.py:901] (2/4) Epoch 18, batch 2000, loss[loss=0.2225, simple_loss=0.3046, pruned_loss=0.07021, over 7974.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.294, pruned_loss=0.06613, over 1624011.27 frames. ], batch size: 21, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:09:28,240 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 21:09:30,568 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:33,876 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139419.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:42,129 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139430.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:48,232 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:49,604 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1044, 2.3603, 1.9522, 2.9649, 1.3263, 1.6559, 2.0259, 2.3218], + device='cuda:2'), covar=tensor([0.0697, 0.0809, 0.0929, 0.0321, 0.1128, 0.1338, 0.0943, 0.0769], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0200, 0.0250, 0.0213, 0.0207, 0.0249, 0.0255, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:09:51,681 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139444.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:59,137 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:10:02,980 INFO [train.py:901] (2/4) Epoch 18, batch 2050, loss[loss=0.2026, simple_loss=0.2674, pruned_loss=0.0689, over 7287.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2933, pruned_loss=0.06606, over 1616969.55 frames. ], batch size: 16, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:12,675 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.515e+02 3.080e+02 3.592e+02 7.733e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 21:10:39,816 INFO [train.py:901] (2/4) Epoch 18, batch 2100, loss[loss=0.2259, simple_loss=0.2972, pruned_loss=0.07734, over 7816.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2928, pruned_loss=0.06566, over 1613110.30 frames. ], batch size: 20, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:15,315 INFO [train.py:901] (2/4) Epoch 18, batch 2150, loss[loss=0.2272, simple_loss=0.3087, pruned_loss=0.07284, over 8339.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2941, pruned_loss=0.06653, over 1610411.87 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:24,960 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.487e+02 3.024e+02 3.808e+02 9.008e+02, threshold=6.048e+02, percent-clipped=4.0 +2023-02-06 21:11:34,711 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,086 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,157 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:50,324 INFO [train.py:901] (2/4) Epoch 18, batch 2200, loss[loss=0.2326, simple_loss=0.3195, pruned_loss=0.07286, over 8142.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2937, pruned_loss=0.067, over 1611301.11 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:10,498 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:13,210 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,363 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,531 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5615, 1.9554, 3.2536, 1.3018, 2.5302, 2.0087, 1.6058, 2.3300], + device='cuda:2'), covar=tensor([0.1854, 0.2597, 0.0854, 0.4643, 0.1715, 0.3050, 0.2244, 0.2256], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0577, 0.0554, 0.0622, 0.0640, 0.0577, 0.0513, 0.0627], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:12:26,632 INFO [train.py:901] (2/4) Epoch 18, batch 2250, loss[loss=0.204, simple_loss=0.2927, pruned_loss=0.0576, over 8041.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2943, pruned_loss=0.06759, over 1609803.87 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:31,130 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:36,176 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.519e+02 3.270e+02 4.475e+02 8.912e+02, threshold=6.540e+02, percent-clipped=11.0 +2023-02-06 21:13:01,635 INFO [train.py:901] (2/4) Epoch 18, batch 2300, loss[loss=0.196, simple_loss=0.2852, pruned_loss=0.05344, over 8569.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2933, pruned_loss=0.06718, over 1607779.47 frames. ], batch size: 31, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:04,655 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:32,011 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:37,206 INFO [train.py:901] (2/4) Epoch 18, batch 2350, loss[loss=0.2211, simple_loss=0.3041, pruned_loss=0.06905, over 8489.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2947, pruned_loss=0.06754, over 1613271.67 frames. ], batch size: 29, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:40,614 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:44,709 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7929, 1.5903, 4.0189, 1.4304, 3.5443, 3.2911, 3.6170, 3.5153], + device='cuda:2'), covar=tensor([0.0687, 0.4055, 0.0565, 0.4094, 0.1247, 0.1042, 0.0634, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0585, 0.0618, 0.0668, 0.0594, 0.0673, 0.0578, 0.0574, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:13:47,226 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.439e+02 2.945e+02 3.859e+02 6.515e+02, threshold=5.891e+02, percent-clipped=0.0 +2023-02-06 21:13:49,997 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2714, 1.6678, 4.5562, 2.1081, 2.4946, 5.2215, 5.1808, 4.5204], + device='cuda:2'), covar=tensor([0.1056, 0.1752, 0.0260, 0.1701, 0.1114, 0.0134, 0.0411, 0.0505], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0314, 0.0274, 0.0306, 0.0295, 0.0254, 0.0394, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 21:13:51,087 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 21:13:55,504 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:09,010 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:11,605 INFO [train.py:901] (2/4) Epoch 18, batch 2400, loss[loss=0.1755, simple_loss=0.2747, pruned_loss=0.03809, over 8248.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2948, pruned_loss=0.06766, over 1616060.14 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:20,250 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:21,630 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:48,487 INFO [train.py:901] (2/4) Epoch 18, batch 2450, loss[loss=0.2067, simple_loss=0.2758, pruned_loss=0.06882, over 7702.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2936, pruned_loss=0.06735, over 1612011.33 frames. ], batch size: 18, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:53,489 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:58,180 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.359e+02 2.854e+02 3.442e+02 8.627e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-06 21:15:23,627 INFO [train.py:901] (2/4) Epoch 18, batch 2500, loss[loss=0.2392, simple_loss=0.3133, pruned_loss=0.08256, over 7583.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2939, pruned_loss=0.06706, over 1615205.67 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:15:39,670 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:15:46,144 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.24 vs. limit=5.0 +2023-02-06 21:15:47,235 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:00,240 INFO [train.py:901] (2/4) Epoch 18, batch 2550, loss[loss=0.1901, simple_loss=0.2568, pruned_loss=0.06168, over 7209.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2925, pruned_loss=0.06652, over 1611663.36 frames. ], batch size: 16, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:07,337 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:09,810 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.329e+02 2.906e+02 3.594e+02 7.294e+02, threshold=5.811e+02, percent-clipped=3.0 +2023-02-06 21:16:25,053 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:32,758 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6977, 1.9270, 1.9724, 1.3277, 2.1065, 1.4136, 0.6158, 1.9707], + device='cuda:2'), covar=tensor([0.0402, 0.0279, 0.0234, 0.0449, 0.0341, 0.0677, 0.0744, 0.0188], + device='cuda:2'), in_proj_covar=tensor([0.0430, 0.0371, 0.0321, 0.0426, 0.0357, 0.0517, 0.0378, 0.0398], + device='cuda:2'), out_proj_covar=tensor([1.1712e-04, 9.8174e-05, 8.5056e-05, 1.1353e-04, 9.5178e-05, 1.4837e-04, + 1.0278e-04, 1.0658e-04], device='cuda:2') +2023-02-06 21:16:35,503 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:36,618 INFO [train.py:901] (2/4) Epoch 18, batch 2600, loss[loss=0.2398, simple_loss=0.3293, pruned_loss=0.07513, over 8242.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2928, pruned_loss=0.06667, over 1611017.68 frames. ], batch size: 24, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:40,978 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6606, 1.6099, 4.9242, 2.0669, 4.3634, 4.0551, 4.4212, 4.2352], + device='cuda:2'), covar=tensor([0.0556, 0.4332, 0.0404, 0.3425, 0.1046, 0.0815, 0.0484, 0.0648], + device='cuda:2'), in_proj_covar=tensor([0.0585, 0.0619, 0.0668, 0.0595, 0.0674, 0.0578, 0.0574, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:16:44,583 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:44,601 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4966, 1.8120, 2.6163, 1.3027, 1.8892, 1.8881, 1.5935, 1.7950], + device='cuda:2'), covar=tensor([0.1806, 0.2392, 0.1013, 0.4301, 0.1851, 0.3043, 0.2143, 0.2300], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0575, 0.0552, 0.0620, 0.0637, 0.0578, 0.0513, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:16:52,718 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:01,563 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:03,001 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:05,697 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7416, 1.9412, 1.6592, 2.2507, 1.3003, 1.4464, 1.7267, 1.8751], + device='cuda:2'), covar=tensor([0.0771, 0.0721, 0.0965, 0.0429, 0.0958, 0.1300, 0.0744, 0.0785], + device='cuda:2'), in_proj_covar=tensor([0.0238, 0.0202, 0.0254, 0.0215, 0.0209, 0.0253, 0.0258, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:17:10,538 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:11,784 INFO [train.py:901] (2/4) Epoch 18, batch 2650, loss[loss=0.2111, simple_loss=0.2998, pruned_loss=0.06119, over 8257.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2931, pruned_loss=0.06648, over 1614703.40 frames. ], batch size: 24, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:22,348 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 2.973e+02 3.666e+02 6.732e+02, threshold=5.945e+02, percent-clipped=3.0 +2023-02-06 21:17:43,051 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 21:17:44,109 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.12 vs. limit=5.0 +2023-02-06 21:17:47,904 INFO [train.py:901] (2/4) Epoch 18, batch 2700, loss[loss=0.289, simple_loss=0.3412, pruned_loss=0.1184, over 6711.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2932, pruned_loss=0.06612, over 1615805.73 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:55,819 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:00,545 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:02,537 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:09,874 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 21:18:16,407 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:23,213 INFO [train.py:901] (2/4) Epoch 18, batch 2750, loss[loss=0.178, simple_loss=0.2585, pruned_loss=0.04872, over 7931.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2942, pruned_loss=0.06675, over 1616105.71 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:18:27,488 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:28,895 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:33,790 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.336e+02 2.919e+02 3.807e+02 8.313e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-06 21:19:00,768 INFO [train.py:901] (2/4) Epoch 18, batch 2800, loss[loss=0.1924, simple_loss=0.2537, pruned_loss=0.06552, over 7243.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2943, pruned_loss=0.06657, over 1620372.64 frames. ], batch size: 16, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:01,489 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:02,875 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7447, 1.5740, 1.8192, 1.4716, 1.2179, 1.5509, 2.2256, 1.9581], + device='cuda:2'), covar=tensor([0.0474, 0.1297, 0.1733, 0.1511, 0.0572, 0.1562, 0.0636, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0158, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 21:19:25,737 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:35,885 INFO [train.py:901] (2/4) Epoch 18, batch 2850, loss[loss=0.2082, simple_loss=0.2934, pruned_loss=0.0615, over 7810.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2944, pruned_loss=0.06646, over 1622226.50 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:39,552 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140266.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:45,685 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.447e+02 2.919e+02 3.574e+02 5.806e+02, threshold=5.838e+02, percent-clipped=0.0 +2023-02-06 21:19:47,308 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:50,909 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:51,970 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 21:19:52,309 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:07,401 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:10,144 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7836, 2.3549, 3.2363, 1.7065, 1.5954, 3.3178, 0.7361, 2.0667], + device='cuda:2'), covar=tensor([0.1701, 0.1392, 0.0350, 0.2039, 0.3255, 0.0381, 0.2673, 0.1731], + device='cuda:2'), in_proj_covar=tensor([0.0180, 0.0189, 0.0119, 0.0219, 0.0263, 0.0126, 0.0166, 0.0184], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 21:20:11,951 INFO [train.py:901] (2/4) Epoch 18, batch 2900, loss[loss=0.2059, simple_loss=0.2889, pruned_loss=0.06146, over 8020.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2953, pruned_loss=0.06742, over 1619930.38 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:14,965 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:23,767 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:25,188 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140329.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:31,571 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6722, 1.6750, 2.1843, 1.4443, 1.2333, 2.2626, 0.4180, 1.3783], + device='cuda:2'), covar=tensor([0.1939, 0.1675, 0.0445, 0.1330, 0.3335, 0.0404, 0.2657, 0.1421], + device='cuda:2'), in_proj_covar=tensor([0.0181, 0.0190, 0.0120, 0.0220, 0.0265, 0.0127, 0.0166, 0.0185], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 21:20:33,501 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:44,337 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 21:20:47,991 INFO [train.py:901] (2/4) Epoch 18, batch 2950, loss[loss=0.2464, simple_loss=0.332, pruned_loss=0.08036, over 8474.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2952, pruned_loss=0.06732, over 1618049.82 frames. ], batch size: 29, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:57,357 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.671e+02 3.280e+02 4.327e+02 7.160e+02, threshold=6.561e+02, percent-clipped=5.0 +2023-02-06 21:21:02,935 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3402, 1.5333, 4.5299, 1.7625, 3.9935, 3.7625, 4.0463, 3.9261], + device='cuda:2'), covar=tensor([0.0570, 0.4388, 0.0460, 0.3833, 0.1123, 0.0939, 0.0616, 0.0664], + device='cuda:2'), in_proj_covar=tensor([0.0586, 0.0618, 0.0665, 0.0596, 0.0675, 0.0578, 0.0575, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:21:18,956 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:21:23,788 INFO [train.py:901] (2/4) Epoch 18, batch 3000, loss[loss=0.1846, simple_loss=0.2607, pruned_loss=0.05423, over 7703.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2959, pruned_loss=0.06785, over 1618790.19 frames. ], batch size: 18, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:21:23,788 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 21:21:37,684 INFO [train.py:935] (2/4) Epoch 18, validation: loss=0.1773, simple_loss=0.2774, pruned_loss=0.03861, over 944034.00 frames. +2023-02-06 21:21:37,685 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 21:22:14,087 INFO [train.py:901] (2/4) Epoch 18, batch 3050, loss[loss=0.2229, simple_loss=0.3157, pruned_loss=0.06504, over 8331.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2952, pruned_loss=0.06751, over 1617099.10 frames. ], batch size: 25, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:16,898 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:21,665 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:24,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.663e+02 3.172e+02 4.119e+02 9.916e+02, threshold=6.345e+02, percent-clipped=7.0 +2023-02-06 21:22:40,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1764, 2.0041, 4.0472, 1.7440, 2.5361, 4.5493, 4.5854, 3.9141], + device='cuda:2'), covar=tensor([0.1179, 0.1595, 0.0344, 0.2053, 0.1187, 0.0188, 0.0405, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0286, 0.0316, 0.0280, 0.0310, 0.0300, 0.0258, 0.0402, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 21:22:42,883 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140502.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:46,268 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:48,874 INFO [train.py:901] (2/4) Epoch 18, batch 3100, loss[loss=0.207, simple_loss=0.2815, pruned_loss=0.0662, over 7790.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2943, pruned_loss=0.06713, over 1615190.27 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:56,950 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:00,962 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,021 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,872 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 21:23:07,875 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:09,291 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:15,572 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:25,516 INFO [train.py:901] (2/4) Epoch 18, batch 3150, loss[loss=0.1903, simple_loss=0.2651, pruned_loss=0.05769, over 7649.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2936, pruned_loss=0.06693, over 1611017.83 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:23:26,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140562.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:27,685 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:34,940 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.438e+02 2.948e+02 4.263e+02 1.019e+03, threshold=5.895e+02, percent-clipped=4.0 +2023-02-06 21:23:38,569 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:40,647 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:43,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:59,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:01,005 INFO [train.py:901] (2/4) Epoch 18, batch 3200, loss[loss=0.2719, simple_loss=0.3341, pruned_loss=0.1049, over 6770.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2939, pruned_loss=0.0672, over 1609599.38 frames. ], batch size: 71, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:06,658 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3461, 2.8650, 3.2448, 1.6509, 3.3367, 2.2749, 1.5972, 2.4336], + device='cuda:2'), covar=tensor([0.0704, 0.0265, 0.0196, 0.0700, 0.0397, 0.0593, 0.0781, 0.0414], + device='cuda:2'), in_proj_covar=tensor([0.0434, 0.0375, 0.0321, 0.0430, 0.0360, 0.0520, 0.0379, 0.0399], + device='cuda:2'), out_proj_covar=tensor([1.1818e-04, 9.9388e-05, 8.5057e-05, 1.1472e-04, 9.6150e-05, 1.4908e-04, + 1.0321e-04, 1.0654e-04], device='cuda:2') +2023-02-06 21:24:07,872 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:36,820 INFO [train.py:901] (2/4) Epoch 18, batch 3250, loss[loss=0.1816, simple_loss=0.2697, pruned_loss=0.04673, over 8300.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2945, pruned_loss=0.06728, over 1612310.86 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:46,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.256e+02 2.889e+02 3.448e+02 6.536e+02, threshold=5.777e+02, percent-clipped=1.0 +2023-02-06 21:25:13,075 INFO [train.py:901] (2/4) Epoch 18, batch 3300, loss[loss=0.2594, simple_loss=0.3243, pruned_loss=0.09719, over 7783.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06722, over 1613447.14 frames. ], batch size: 19, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:30,567 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:33,926 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:39,313 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:44,917 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8592, 1.3812, 4.2275, 1.7556, 3.3234, 3.4269, 3.7906, 3.7671], + device='cuda:2'), covar=tensor([0.1419, 0.6559, 0.1043, 0.4850, 0.2406, 0.1740, 0.1123, 0.1069], + device='cuda:2'), in_proj_covar=tensor([0.0586, 0.0617, 0.0664, 0.0593, 0.0672, 0.0575, 0.0572, 0.0640], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:25:47,443 INFO [train.py:901] (2/4) Epoch 18, batch 3350, loss[loss=0.1793, simple_loss=0.2685, pruned_loss=0.04505, over 8091.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2955, pruned_loss=0.06751, over 1614465.55 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:57,584 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.504e+02 2.969e+02 3.727e+02 7.020e+02, threshold=5.938e+02, percent-clipped=2.0 +2023-02-06 21:26:17,431 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4219, 2.1170, 2.9045, 2.4282, 2.8327, 2.3590, 2.0553, 1.5875], + device='cuda:2'), covar=tensor([0.4572, 0.4442, 0.1627, 0.3121, 0.2117, 0.2717, 0.1799, 0.4877], + device='cuda:2'), in_proj_covar=tensor([0.0925, 0.0943, 0.0781, 0.0909, 0.0979, 0.0864, 0.0729, 0.0808], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:26:23,947 INFO [train.py:901] (2/4) Epoch 18, batch 3400, loss[loss=0.2279, simple_loss=0.3052, pruned_loss=0.07528, over 8555.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.295, pruned_loss=0.06792, over 1612945.62 frames. ], batch size: 39, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:35,825 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:42,137 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:46,744 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:52,094 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:55,938 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 21:26:58,803 INFO [train.py:901] (2/4) Epoch 18, batch 3450, loss[loss=0.2291, simple_loss=0.3163, pruned_loss=0.07099, over 8502.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2958, pruned_loss=0.06825, over 1616000.70 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:59,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:01,086 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:03,751 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:05,723 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:08,275 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.419e+02 3.065e+02 3.703e+02 6.567e+02, threshold=6.131e+02, percent-clipped=3.0 +2023-02-06 21:27:34,152 INFO [train.py:901] (2/4) Epoch 18, batch 3500, loss[loss=0.2126, simple_loss=0.2986, pruned_loss=0.06329, over 8463.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.06789, over 1616215.51 frames. ], batch size: 49, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:27:37,701 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8174, 3.8064, 3.4893, 2.0585, 3.3615, 3.5246, 3.3547, 3.2003], + device='cuda:2'), covar=tensor([0.1112, 0.0700, 0.1355, 0.4483, 0.1089, 0.1073, 0.1585, 0.0919], + device='cuda:2'), in_proj_covar=tensor([0.0508, 0.0419, 0.0418, 0.0520, 0.0412, 0.0420, 0.0405, 0.0367], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:27:51,057 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 21:27:51,204 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:09,210 INFO [train.py:901] (2/4) Epoch 18, batch 3550, loss[loss=0.3175, simple_loss=0.37, pruned_loss=0.1325, over 8504.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2972, pruned_loss=0.06899, over 1619699.26 frames. ], batch size: 49, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:12,006 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:12,767 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140966.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:18,756 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.456e+02 3.083e+02 3.681e+02 6.081e+02, threshold=6.167e+02, percent-clipped=0.0 +2023-02-06 21:28:26,488 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:30,650 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:44,277 INFO [train.py:901] (2/4) Epoch 18, batch 3600, loss[loss=0.212, simple_loss=0.2994, pruned_loss=0.06227, over 7808.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2961, pruned_loss=0.06812, over 1617783.67 frames. ], batch size: 20, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:49,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:20,362 INFO [train.py:901] (2/4) Epoch 18, batch 3650, loss[loss=0.2212, simple_loss=0.3061, pruned_loss=0.06811, over 8584.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2959, pruned_loss=0.06815, over 1619372.11 frames. ], batch size: 31, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:30,818 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.345e+02 2.956e+02 3.633e+02 6.454e+02, threshold=5.912e+02, percent-clipped=1.0 +2023-02-06 21:29:37,715 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:55,736 INFO [train.py:901] (2/4) Epoch 18, batch 3700, loss[loss=0.2033, simple_loss=0.2874, pruned_loss=0.0596, over 8131.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.295, pruned_loss=0.06759, over 1621385.87 frames. ], batch size: 22, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:57,135 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 21:30:02,953 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:20,680 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:32,121 INFO [train.py:901] (2/4) Epoch 18, batch 3750, loss[loss=0.2404, simple_loss=0.3115, pruned_loss=0.08464, over 7373.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2951, pruned_loss=0.0673, over 1620161.19 frames. ], batch size: 71, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:30:32,305 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:39,106 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:41,865 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.679e+02 3.309e+02 4.099e+02 7.455e+02, threshold=6.618e+02, percent-clipped=7.0 +2023-02-06 21:31:00,284 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:07,418 INFO [train.py:901] (2/4) Epoch 18, batch 3800, loss[loss=0.2361, simple_loss=0.3061, pruned_loss=0.08308, over 7798.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.06785, over 1615579.59 frames. ], batch size: 19, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:15,006 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:15,221 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 21:31:29,267 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:32,638 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:42,607 INFO [train.py:901] (2/4) Epoch 18, batch 3850, loss[loss=0.2276, simple_loss=0.3074, pruned_loss=0.0739, over 8501.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2963, pruned_loss=0.06833, over 1620722.81 frames. ], batch size: 28, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:46,881 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:52,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.500e+02 3.018e+02 3.684e+02 7.912e+02, threshold=6.036e+02, percent-clipped=1.0 +2023-02-06 21:31:53,563 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8782, 1.5367, 3.2377, 1.3815, 2.2721, 3.5661, 3.6544, 3.0858], + device='cuda:2'), covar=tensor([0.1209, 0.1774, 0.0353, 0.2132, 0.1146, 0.0267, 0.0635, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0311, 0.0275, 0.0306, 0.0297, 0.0254, 0.0396, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 21:31:55,475 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:00,518 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:03,626 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 21:32:03,922 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 21:32:17,072 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141309.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:18,342 INFO [train.py:901] (2/4) Epoch 18, batch 3900, loss[loss=0.2244, simple_loss=0.306, pruned_loss=0.07142, over 8431.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2948, pruned_loss=0.06748, over 1613052.27 frames. ], batch size: 27, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:32:42,395 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:52,481 INFO [train.py:901] (2/4) Epoch 18, batch 3950, loss[loss=0.2173, simple_loss=0.2935, pruned_loss=0.07061, over 8091.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2964, pruned_loss=0.06836, over 1611656.91 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:33:02,713 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.421e+02 2.990e+02 3.795e+02 7.053e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 21:33:15,875 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:27,610 INFO [train.py:901] (2/4) Epoch 18, batch 4000, loss[loss=0.1747, simple_loss=0.2585, pruned_loss=0.04541, over 7652.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2954, pruned_loss=0.06821, over 1613480.22 frames. ], batch size: 19, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:33:37,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:40,606 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2261, 2.0307, 2.6818, 2.1875, 2.6565, 2.2484, 1.9728, 1.4554], + device='cuda:2'), covar=tensor([0.4845, 0.4416, 0.1755, 0.3313, 0.2237, 0.2672, 0.1791, 0.4857], + device='cuda:2'), in_proj_covar=tensor([0.0921, 0.0942, 0.0779, 0.0906, 0.0979, 0.0858, 0.0725, 0.0804], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:33:44,509 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141435.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:33:45,254 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4592, 1.6299, 2.2218, 1.3281, 1.5904, 1.7565, 1.5048, 1.4616], + device='cuda:2'), covar=tensor([0.1795, 0.2533, 0.0918, 0.4250, 0.1729, 0.3098, 0.2106, 0.2167], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0575, 0.0549, 0.0620, 0.0637, 0.0578, 0.0512, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:33:58,725 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:00,764 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:02,050 INFO [train.py:901] (2/4) Epoch 18, batch 4050, loss[loss=0.2202, simple_loss=0.2976, pruned_loss=0.07145, over 8243.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2954, pruned_loss=0.06767, over 1612940.17 frames. ], batch size: 24, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:12,707 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.514e+02 3.146e+02 4.229e+02 8.641e+02, threshold=6.293e+02, percent-clipped=9.0 +2023-02-06 21:34:16,937 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:34,631 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:38,486 INFO [train.py:901] (2/4) Epoch 18, batch 4100, loss[loss=0.1868, simple_loss=0.2767, pruned_loss=0.04838, over 8074.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06825, over 1614091.85 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:47,610 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3111, 1.6584, 1.6793, 0.9860, 1.7000, 1.3225, 0.2663, 1.6020], + device='cuda:2'), covar=tensor([0.0400, 0.0286, 0.0216, 0.0405, 0.0309, 0.0754, 0.0715, 0.0204], + device='cuda:2'), in_proj_covar=tensor([0.0431, 0.0370, 0.0317, 0.0428, 0.0359, 0.0516, 0.0374, 0.0395], + device='cuda:2'), out_proj_covar=tensor([1.1716e-04, 9.7988e-05, 8.3866e-05, 1.1409e-04, 9.5594e-05, 1.4816e-04, + 1.0182e-04, 1.0552e-04], device='cuda:2') +2023-02-06 21:34:50,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3122, 1.9806, 2.8062, 2.2647, 2.8316, 2.2715, 1.9112, 1.5040], + device='cuda:2'), covar=tensor([0.5012, 0.4974, 0.1624, 0.3247, 0.2089, 0.2825, 0.1940, 0.4932], + device='cuda:2'), in_proj_covar=tensor([0.0923, 0.0944, 0.0779, 0.0907, 0.0979, 0.0860, 0.0727, 0.0807], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:35:00,240 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:09,996 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6515, 2.0081, 2.0936, 1.3451, 2.2224, 1.6135, 0.5737, 1.9185], + device='cuda:2'), covar=tensor([0.0466, 0.0297, 0.0257, 0.0493, 0.0368, 0.0756, 0.0708, 0.0242], + device='cuda:2'), in_proj_covar=tensor([0.0432, 0.0372, 0.0318, 0.0430, 0.0359, 0.0518, 0.0376, 0.0397], + device='cuda:2'), out_proj_covar=tensor([1.1750e-04, 9.8399e-05, 8.4316e-05, 1.1459e-04, 9.5719e-05, 1.4871e-04, + 1.0227e-04, 1.0603e-04], device='cuda:2') +2023-02-06 21:35:13,072 INFO [train.py:901] (2/4) Epoch 18, batch 4150, loss[loss=0.2188, simple_loss=0.2903, pruned_loss=0.07363, over 7921.00 frames. ], tot_loss[loss=0.215, simple_loss=0.295, pruned_loss=0.06744, over 1615280.39 frames. ], batch size: 20, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:17,337 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141567.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:20,207 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8442, 1.9151, 2.2057, 2.0578, 1.0654, 1.8928, 2.4862, 2.4604], + device='cuda:2'), covar=tensor([0.0471, 0.1166, 0.1516, 0.1219, 0.0578, 0.1384, 0.0573, 0.0497], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0099, 0.0161, 0.0114, 0.0138], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 21:35:22,670 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.507e+02 2.964e+02 3.952e+02 7.900e+02, threshold=5.928e+02, percent-clipped=3.0 +2023-02-06 21:35:37,892 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-02-06 21:35:48,917 INFO [train.py:901] (2/4) Epoch 18, batch 4200, loss[loss=0.2195, simple_loss=0.3128, pruned_loss=0.06314, over 8108.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2952, pruned_loss=0.06731, over 1614637.91 frames. ], batch size: 23, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:55,112 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:02,347 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 21:36:15,990 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:23,889 INFO [train.py:901] (2/4) Epoch 18, batch 4250, loss[loss=0.1857, simple_loss=0.2719, pruned_loss=0.04977, over 8327.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2946, pruned_loss=0.0669, over 1614087.83 frames. ], batch size: 25, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:36:24,587 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 21:36:33,261 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.491e+02 2.994e+02 3.932e+02 8.485e+02, threshold=5.988e+02, percent-clipped=6.0 +2023-02-06 21:36:33,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:36,882 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:44,012 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:54,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:58,037 INFO [train.py:901] (2/4) Epoch 18, batch 4300, loss[loss=0.2053, simple_loss=0.2943, pruned_loss=0.05816, over 8477.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2959, pruned_loss=0.0676, over 1614862.31 frames. ], batch size: 29, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:32,932 INFO [train.py:901] (2/4) Epoch 18, batch 4350, loss[loss=0.2182, simple_loss=0.306, pruned_loss=0.06518, over 8493.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2959, pruned_loss=0.06718, over 1620231.05 frames. ], batch size: 26, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:38,875 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 21:37:43,208 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.620e+02 3.197e+02 4.150e+02 9.266e+02, threshold=6.393e+02, percent-clipped=5.0 +2023-02-06 21:37:46,100 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141779.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:37:54,221 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 21:38:02,426 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:04,521 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:07,895 INFO [train.py:901] (2/4) Epoch 18, batch 4400, loss[loss=0.1967, simple_loss=0.2939, pruned_loss=0.0497, over 8539.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2948, pruned_loss=0.06623, over 1619526.50 frames. ], batch size: 28, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:36,599 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 21:38:43,835 INFO [train.py:901] (2/4) Epoch 18, batch 4450, loss[loss=0.1959, simple_loss=0.285, pruned_loss=0.05338, over 8331.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2938, pruned_loss=0.06559, over 1620195.62 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:53,320 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.507e+02 2.868e+02 3.524e+02 7.777e+02, threshold=5.735e+02, percent-clipped=2.0 +2023-02-06 21:38:54,252 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:07,098 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141894.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:39:11,835 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:18,270 INFO [train.py:901] (2/4) Epoch 18, batch 4500, loss[loss=0.2304, simple_loss=0.3233, pruned_loss=0.06872, over 8614.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2946, pruned_loss=0.06668, over 1612332.32 frames. ], batch size: 31, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:39:23,247 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:27,783 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 21:39:34,728 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:42,674 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:53,383 INFO [train.py:901] (2/4) Epoch 18, batch 4550, loss[loss=0.1815, simple_loss=0.254, pruned_loss=0.05446, over 7414.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2946, pruned_loss=0.06671, over 1608678.76 frames. ], batch size: 17, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:40:03,496 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.488e+02 2.920e+02 3.454e+02 6.371e+02, threshold=5.840e+02, percent-clipped=2.0 +2023-02-06 21:40:29,728 INFO [train.py:901] (2/4) Epoch 18, batch 4600, loss[loss=0.2076, simple_loss=0.2691, pruned_loss=0.07309, over 7416.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2942, pruned_loss=0.06654, over 1610183.69 frames. ], batch size: 17, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:04,180 INFO [train.py:901] (2/4) Epoch 18, batch 4650, loss[loss=0.2353, simple_loss=0.3081, pruned_loss=0.08128, over 8501.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2947, pruned_loss=0.067, over 1612489.57 frames. ], batch size: 29, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:05,108 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:13,896 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.389e+02 2.901e+02 3.503e+02 7.256e+02, threshold=5.801e+02, percent-clipped=3.0 +2023-02-06 21:41:23,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:39,476 INFO [train.py:901] (2/4) Epoch 18, batch 4700, loss[loss=0.2069, simple_loss=0.2885, pruned_loss=0.06266, over 8469.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2941, pruned_loss=0.06651, over 1612590.39 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:42,996 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.8210, 4.7016, 4.3204, 2.9727, 4.2798, 4.3861, 4.4701, 4.0995], + device='cuda:2'), covar=tensor([0.0530, 0.0462, 0.0861, 0.3262, 0.0682, 0.0879, 0.1045, 0.0610], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0421, 0.0417, 0.0517, 0.0410, 0.0415, 0.0399, 0.0364], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:42:06,026 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142150.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:13,354 INFO [train.py:901] (2/4) Epoch 18, batch 4750, loss[loss=0.2241, simple_loss=0.3053, pruned_loss=0.07142, over 8143.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2944, pruned_loss=0.06687, over 1614298.60 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:15,763 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4864, 2.0269, 3.2225, 1.3762, 2.3519, 1.9287, 1.7037, 2.4747], + device='cuda:2'), covar=tensor([0.1874, 0.2372, 0.0941, 0.4181, 0.1798, 0.2976, 0.2073, 0.2050], + device='cuda:2'), in_proj_covar=tensor([0.0510, 0.0574, 0.0546, 0.0620, 0.0633, 0.0578, 0.0510, 0.0621], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:42:23,428 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:23,882 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.398e+02 2.792e+02 3.541e+02 9.190e+02, threshold=5.585e+02, percent-clipped=4.0 +2023-02-06 21:42:24,107 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142175.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:42:30,577 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 21:42:32,632 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 21:42:41,665 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:49,343 INFO [train.py:901] (2/4) Epoch 18, batch 4800, loss[loss=0.2299, simple_loss=0.314, pruned_loss=0.0729, over 8497.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2945, pruned_loss=0.06722, over 1613404.30 frames. ], batch size: 28, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:59,095 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8619, 1.4975, 3.9683, 1.4145, 3.5490, 3.2963, 3.5996, 3.5044], + device='cuda:2'), covar=tensor([0.0599, 0.4090, 0.0640, 0.3944, 0.1132, 0.0951, 0.0585, 0.0661], + device='cuda:2'), in_proj_covar=tensor([0.0595, 0.0626, 0.0670, 0.0602, 0.0682, 0.0582, 0.0575, 0.0643], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:43:23,840 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 21:43:24,503 INFO [train.py:901] (2/4) Epoch 18, batch 4850, loss[loss=0.2451, simple_loss=0.3138, pruned_loss=0.0882, over 7973.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2938, pruned_loss=0.06693, over 1613577.32 frames. ], batch size: 21, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:33,963 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.522e+02 3.053e+02 3.876e+02 6.315e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-06 21:43:36,095 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:45,116 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:59,066 INFO [train.py:901] (2/4) Epoch 18, batch 4900, loss[loss=0.24, simple_loss=0.3283, pruned_loss=0.07589, over 8356.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2928, pruned_loss=0.06651, over 1612412.55 frames. ], batch size: 24, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:11,380 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1015, 1.4335, 4.3137, 1.6852, 3.7887, 3.5647, 3.9109, 3.7538], + device='cuda:2'), covar=tensor([0.0724, 0.4445, 0.0568, 0.3953, 0.1218, 0.1026, 0.0620, 0.0752], + device='cuda:2'), in_proj_covar=tensor([0.0589, 0.0620, 0.0664, 0.0596, 0.0675, 0.0578, 0.0571, 0.0638], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:44:34,497 INFO [train.py:901] (2/4) Epoch 18, batch 4950, loss[loss=0.2128, simple_loss=0.2888, pruned_loss=0.06843, over 7657.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2909, pruned_loss=0.06508, over 1611405.59 frames. ], batch size: 19, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:44,951 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.451e+02 2.943e+02 3.789e+02 7.945e+02, threshold=5.886e+02, percent-clipped=1.0 +2023-02-06 21:44:51,991 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2096, 2.2677, 2.0720, 2.5390, 1.9558, 2.0624, 2.1349, 2.4801], + device='cuda:2'), covar=tensor([0.0616, 0.0697, 0.0770, 0.0507, 0.0825, 0.0922, 0.0647, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0235, 0.0201, 0.0253, 0.0214, 0.0207, 0.0250, 0.0256, 0.0213], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:44:57,236 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:05,907 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:09,747 INFO [train.py:901] (2/4) Epoch 18, batch 5000, loss[loss=0.2662, simple_loss=0.3432, pruned_loss=0.09462, over 8499.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2935, pruned_loss=0.0666, over 1617828.06 frames. ], batch size: 26, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:44,314 INFO [train.py:901] (2/4) Epoch 18, batch 5050, loss[loss=0.2039, simple_loss=0.2908, pruned_loss=0.05852, over 8329.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2935, pruned_loss=0.06645, over 1619481.64 frames. ], batch size: 26, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:48,971 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 21:45:54,478 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.384e+02 2.804e+02 3.417e+02 5.925e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 21:46:04,022 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 21:46:19,170 INFO [train.py:901] (2/4) Epoch 18, batch 5100, loss[loss=0.2407, simple_loss=0.3194, pruned_loss=0.08102, over 8244.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2942, pruned_loss=0.06672, over 1618989.39 frames. ], batch size: 24, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:46:46,513 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1839, 2.0255, 2.4691, 1.6697, 1.6351, 2.3737, 1.2432, 2.0629], + device='cuda:2'), covar=tensor([0.1921, 0.1164, 0.0427, 0.1423, 0.2524, 0.0585, 0.2087, 0.1372], + device='cuda:2'), in_proj_covar=tensor([0.0182, 0.0190, 0.0122, 0.0217, 0.0265, 0.0129, 0.0166, 0.0184], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 21:46:54,257 INFO [train.py:901] (2/4) Epoch 18, batch 5150, loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.06385, over 8319.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2941, pruned_loss=0.0671, over 1619084.49 frames. ], batch size: 25, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:04,404 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.481e+02 3.004e+02 4.323e+02 1.197e+03, threshold=6.009e+02, percent-clipped=7.0 +2023-02-06 21:47:14,692 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:47:29,009 INFO [train.py:901] (2/4) Epoch 18, batch 5200, loss[loss=0.2026, simple_loss=0.2745, pruned_loss=0.06537, over 7217.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2929, pruned_loss=0.06646, over 1612914.09 frames. ], batch size: 16, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:34,977 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-06 21:47:55,310 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:02,991 INFO [train.py:901] (2/4) Epoch 18, batch 5250, loss[loss=0.1981, simple_loss=0.2736, pruned_loss=0.06128, over 7552.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2923, pruned_loss=0.06634, over 1606737.17 frames. ], batch size: 18, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:03,209 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:03,672 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 21:48:13,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:14,359 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.491e+02 3.102e+02 3.692e+02 6.533e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 21:48:21,247 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:37,895 INFO [train.py:901] (2/4) Epoch 18, batch 5300, loss[loss=0.2592, simple_loss=0.3292, pruned_loss=0.09457, over 8757.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06767, over 1606866.42 frames. ], batch size: 30, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:52,020 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9890, 1.7103, 2.0096, 1.7126, 1.2766, 1.6732, 2.3593, 1.9964], + device='cuda:2'), covar=tensor([0.0411, 0.1149, 0.1493, 0.1290, 0.0522, 0.1327, 0.0540, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0099, 0.0161, 0.0113, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 21:48:59,519 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:49:12,900 INFO [train.py:901] (2/4) Epoch 18, batch 5350, loss[loss=0.199, simple_loss=0.2908, pruned_loss=0.05362, over 8259.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2928, pruned_loss=0.06662, over 1604081.09 frames. ], batch size: 24, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:49:22,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.581e+02 3.011e+02 3.651e+02 7.168e+02, threshold=6.023e+02, percent-clipped=3.0 +2023-02-06 21:49:48,114 INFO [train.py:901] (2/4) Epoch 18, batch 5400, loss[loss=0.2259, simple_loss=0.3091, pruned_loss=0.07136, over 8509.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.292, pruned_loss=0.06569, over 1610156.14 frames. ], batch size: 49, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:22,758 INFO [train.py:901] (2/4) Epoch 18, batch 5450, loss[loss=0.1982, simple_loss=0.2958, pruned_loss=0.05026, over 8114.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2906, pruned_loss=0.06517, over 1605662.95 frames. ], batch size: 23, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:33,557 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.381e+02 3.003e+02 4.378e+02 7.690e+02, threshold=6.006e+02, percent-clipped=4.0 +2023-02-06 21:50:50,022 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 21:50:58,893 INFO [train.py:901] (2/4) Epoch 18, batch 5500, loss[loss=0.2229, simple_loss=0.3034, pruned_loss=0.07117, over 8083.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2918, pruned_loss=0.06554, over 1607619.33 frames. ], batch size: 21, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:14,988 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:51:18,599 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3634, 2.2765, 3.1627, 2.4591, 2.8816, 2.4647, 2.1632, 1.7574], + device='cuda:2'), covar=tensor([0.5055, 0.4852, 0.1766, 0.3550, 0.2591, 0.2805, 0.1960, 0.5498], + device='cuda:2'), in_proj_covar=tensor([0.0926, 0.0946, 0.0778, 0.0912, 0.0984, 0.0865, 0.0732, 0.0811], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:51:33,201 INFO [train.py:901] (2/4) Epoch 18, batch 5550, loss[loss=0.1953, simple_loss=0.2778, pruned_loss=0.05637, over 8237.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2918, pruned_loss=0.06573, over 1604075.71 frames. ], batch size: 22, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:43,334 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.398e+02 2.938e+02 3.826e+02 1.126e+03, threshold=5.876e+02, percent-clipped=10.0 +2023-02-06 21:51:48,889 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5368, 1.9952, 3.1275, 1.3664, 2.3339, 1.9565, 1.6766, 2.3058], + device='cuda:2'), covar=tensor([0.1888, 0.2357, 0.0831, 0.4409, 0.1805, 0.3085, 0.2164, 0.2212], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0574, 0.0549, 0.0620, 0.0634, 0.0580, 0.0512, 0.0625], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:52:08,269 INFO [train.py:901] (2/4) Epoch 18, batch 5600, loss[loss=0.1819, simple_loss=0.262, pruned_loss=0.05085, over 7813.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2914, pruned_loss=0.06515, over 1604683.32 frames. ], batch size: 20, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:11,238 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0872, 1.7447, 6.1815, 2.2666, 5.5985, 5.2158, 5.7818, 5.7034], + device='cuda:2'), covar=tensor([0.0458, 0.4501, 0.0372, 0.3695, 0.1058, 0.0989, 0.0418, 0.0444], + device='cuda:2'), in_proj_covar=tensor([0.0595, 0.0623, 0.0670, 0.0602, 0.0680, 0.0584, 0.0576, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:52:18,470 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 21:52:36,271 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:52:43,531 INFO [train.py:901] (2/4) Epoch 18, batch 5650, loss[loss=0.1967, simple_loss=0.2671, pruned_loss=0.06312, over 7781.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2905, pruned_loss=0.06472, over 1601223.93 frames. ], batch size: 19, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:46,062 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.20 vs. limit=5.0 +2023-02-06 21:52:54,552 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.315e+02 3.071e+02 3.627e+02 7.364e+02, threshold=6.141e+02, percent-clipped=4.0 +2023-02-06 21:53:00,420 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 21:53:01,151 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143086.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:53:18,752 INFO [train.py:901] (2/4) Epoch 18, batch 5700, loss[loss=0.1985, simple_loss=0.2913, pruned_loss=0.05287, over 8027.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.291, pruned_loss=0.06494, over 1606806.32 frames. ], batch size: 22, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:53:19,575 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0290, 1.7733, 3.2541, 1.2053, 2.3196, 3.6057, 3.8277, 2.6251], + device='cuda:2'), covar=tensor([0.1336, 0.1908, 0.0508, 0.2908, 0.1325, 0.0383, 0.0638, 0.1089], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0312, 0.0276, 0.0307, 0.0297, 0.0254, 0.0397, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 21:53:53,726 INFO [train.py:901] (2/4) Epoch 18, batch 5750, loss[loss=0.2049, simple_loss=0.28, pruned_loss=0.06493, over 7534.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.291, pruned_loss=0.06489, over 1607538.82 frames. ], batch size: 18, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:04,013 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.423e+02 2.839e+02 3.621e+02 5.889e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 21:54:04,723 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 21:54:21,856 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:54:28,668 INFO [train.py:901] (2/4) Epoch 18, batch 5800, loss[loss=0.1637, simple_loss=0.2395, pruned_loss=0.04394, over 7431.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2903, pruned_loss=0.06413, over 1610710.36 frames. ], batch size: 17, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:43,931 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5751, 5.5685, 4.9082, 2.6600, 4.9447, 5.1911, 5.2193, 4.9829], + device='cuda:2'), covar=tensor([0.0488, 0.0364, 0.0895, 0.3888, 0.0720, 0.0816, 0.0950, 0.0615], + device='cuda:2'), in_proj_covar=tensor([0.0506, 0.0421, 0.0420, 0.0517, 0.0412, 0.0419, 0.0402, 0.0368], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:55:04,395 INFO [train.py:901] (2/4) Epoch 18, batch 5850, loss[loss=0.1977, simple_loss=0.2883, pruned_loss=0.0536, over 8037.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2906, pruned_loss=0.06459, over 1610862.48 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:15,560 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.467e+02 2.892e+02 3.630e+02 6.628e+02, threshold=5.783e+02, percent-clipped=2.0 +2023-02-06 21:55:36,571 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:55:39,811 INFO [train.py:901] (2/4) Epoch 18, batch 5900, loss[loss=0.1859, simple_loss=0.2588, pruned_loss=0.05646, over 7693.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2912, pruned_loss=0.0648, over 1609165.51 frames. ], batch size: 18, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:53,316 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143331.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:56:03,174 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1903, 2.0252, 2.6838, 2.2099, 2.5993, 2.2423, 1.9546, 1.3801], + device='cuda:2'), covar=tensor([0.5285, 0.4799, 0.1831, 0.3393, 0.2260, 0.2811, 0.1835, 0.5066], + device='cuda:2'), in_proj_covar=tensor([0.0929, 0.0946, 0.0777, 0.0915, 0.0981, 0.0866, 0.0729, 0.0812], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 21:56:14,463 INFO [train.py:901] (2/4) Epoch 18, batch 5950, loss[loss=0.2188, simple_loss=0.2982, pruned_loss=0.06969, over 8532.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2926, pruned_loss=0.06552, over 1611317.05 frames. ], batch size: 31, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:25,158 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.388e+02 2.875e+02 3.741e+02 7.794e+02, threshold=5.749e+02, percent-clipped=3.0 +2023-02-06 21:56:49,476 INFO [train.py:901] (2/4) Epoch 18, batch 6000, loss[loss=0.242, simple_loss=0.3158, pruned_loss=0.08407, over 8452.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2905, pruned_loss=0.0643, over 1610727.54 frames. ], batch size: 27, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:49,476 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 21:57:03,430 INFO [train.py:935] (2/4) Epoch 18, validation: loss=0.1765, simple_loss=0.2767, pruned_loss=0.03814, over 944034.00 frames. +2023-02-06 21:57:03,431 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 21:57:07,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9626, 2.2037, 1.9479, 2.8238, 1.2612, 1.7254, 2.0898, 2.2499], + device='cuda:2'), covar=tensor([0.0734, 0.0830, 0.0856, 0.0352, 0.1211, 0.1254, 0.0851, 0.0843], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0199, 0.0251, 0.0213, 0.0208, 0.0247, 0.0254, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:57:08,523 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143418.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:57:33,582 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8328, 5.9689, 5.2323, 2.6779, 5.3235, 5.5432, 5.5477, 5.3397], + device='cuda:2'), covar=tensor([0.0602, 0.0403, 0.0888, 0.4084, 0.0792, 0.0760, 0.0976, 0.0655], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0428, 0.0427, 0.0527, 0.0419, 0.0425, 0.0410, 0.0375], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:57:35,795 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:38,433 INFO [train.py:901] (2/4) Epoch 18, batch 6050, loss[loss=0.2301, simple_loss=0.3048, pruned_loss=0.07768, over 8480.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.292, pruned_loss=0.06529, over 1612278.79 frames. ], batch size: 27, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:57:47,850 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 21:57:48,584 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.412e+02 3.060e+02 4.409e+02 1.030e+03, threshold=6.120e+02, percent-clipped=9.0 +2023-02-06 21:57:53,569 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143482.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:59,612 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143491.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:58:02,441 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5309, 1.3266, 1.6041, 1.2474, 0.8827, 1.3586, 1.5467, 1.3840], + device='cuda:2'), covar=tensor([0.0569, 0.1331, 0.1729, 0.1487, 0.0602, 0.1557, 0.0707, 0.0681], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 21:58:06,561 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7574, 1.8754, 1.6457, 2.2966, 0.9794, 1.4821, 1.6744, 1.8844], + device='cuda:2'), covar=tensor([0.0725, 0.0806, 0.0926, 0.0399, 0.1109, 0.1245, 0.0783, 0.0740], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0199, 0.0250, 0.0213, 0.0208, 0.0247, 0.0254, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 21:58:13,404 INFO [train.py:901] (2/4) Epoch 18, batch 6100, loss[loss=0.1747, simple_loss=0.2556, pruned_loss=0.04692, over 7425.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2926, pruned_loss=0.06565, over 1614201.00 frames. ], batch size: 17, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:58:22,898 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7848, 4.6963, 4.3373, 2.8914, 4.2951, 4.3329, 4.4286, 4.0654], + device='cuda:2'), covar=tensor([0.0649, 0.0468, 0.0841, 0.3436, 0.0762, 0.0966, 0.1074, 0.0746], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0426, 0.0425, 0.0524, 0.0418, 0.0423, 0.0408, 0.0374], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 21:58:39,427 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 21:58:40,456 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.25 vs. limit=5.0 +2023-02-06 21:58:49,838 INFO [train.py:901] (2/4) Epoch 18, batch 6150, loss[loss=0.1988, simple_loss=0.2848, pruned_loss=0.05635, over 7652.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.06541, over 1616714.10 frames. ], batch size: 19, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:00,210 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.359e+02 3.030e+02 3.820e+02 7.737e+02, threshold=6.061e+02, percent-clipped=3.0 +2023-02-06 21:59:25,629 INFO [train.py:901] (2/4) Epoch 18, batch 6200, loss[loss=0.2497, simple_loss=0.3345, pruned_loss=0.08246, over 8583.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.06516, over 1612960.29 frames. ], batch size: 39, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:56,260 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2206, 1.5805, 1.2018, 2.4054, 1.0676, 1.1109, 1.8290, 1.7221], + device='cuda:2'), covar=tensor([0.1800, 0.1403, 0.2283, 0.0478, 0.1500, 0.2398, 0.0914, 0.1124], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0200, 0.0252, 0.0213, 0.0209, 0.0249, 0.0255, 0.0214], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 22:00:01,323 INFO [train.py:901] (2/4) Epoch 18, batch 6250, loss[loss=0.2165, simple_loss=0.2905, pruned_loss=0.0713, over 8581.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.293, pruned_loss=0.06533, over 1614501.02 frames. ], batch size: 31, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:07,092 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2100, 2.0478, 1.4789, 1.8899, 1.7109, 1.2924, 1.6037, 1.6347], + device='cuda:2'), covar=tensor([0.1261, 0.0451, 0.1256, 0.0579, 0.0754, 0.1542, 0.0950, 0.0808], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0234, 0.0325, 0.0304, 0.0295, 0.0330, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:00:12,419 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.460e+02 3.089e+02 4.040e+02 1.017e+03, threshold=6.178e+02, percent-clipped=5.0 +2023-02-06 22:00:20,715 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6616, 1.9579, 2.1545, 1.4160, 2.2489, 1.5319, 0.6382, 1.8778], + device='cuda:2'), covar=tensor([0.0557, 0.0290, 0.0221, 0.0511, 0.0325, 0.0782, 0.0801, 0.0257], + device='cuda:2'), in_proj_covar=tensor([0.0436, 0.0372, 0.0322, 0.0433, 0.0362, 0.0523, 0.0380, 0.0398], + device='cuda:2'), out_proj_covar=tensor([1.1826e-04, 9.8424e-05, 8.5340e-05, 1.1531e-04, 9.6311e-05, 1.4985e-04, + 1.0310e-04, 1.0596e-04], device='cuda:2') +2023-02-06 22:00:37,041 INFO [train.py:901] (2/4) Epoch 18, batch 6300, loss[loss=0.1698, simple_loss=0.2477, pruned_loss=0.04593, over 7420.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2925, pruned_loss=0.06534, over 1612370.55 frames. ], batch size: 17, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:11,903 INFO [train.py:901] (2/4) Epoch 18, batch 6350, loss[loss=0.2364, simple_loss=0.2945, pruned_loss=0.08918, over 7196.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.0651, over 1616718.15 frames. ], batch size: 16, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:13,345 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:01:22,533 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.204e+02 2.882e+02 3.589e+02 6.333e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 22:01:47,363 INFO [train.py:901] (2/4) Epoch 18, batch 6400, loss[loss=0.2046, simple_loss=0.2801, pruned_loss=0.06459, over 8476.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2915, pruned_loss=0.06495, over 1615159.36 frames. ], batch size: 27, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:49,682 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 22:01:55,547 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7027, 1.9358, 2.1258, 1.3885, 2.2803, 1.5047, 0.7240, 1.9312], + device='cuda:2'), covar=tensor([0.0702, 0.0383, 0.0307, 0.0594, 0.0456, 0.0886, 0.0834, 0.0293], + device='cuda:2'), in_proj_covar=tensor([0.0437, 0.0373, 0.0322, 0.0435, 0.0362, 0.0524, 0.0380, 0.0399], + device='cuda:2'), out_proj_covar=tensor([1.1853e-04, 9.8686e-05, 8.5205e-05, 1.1575e-04, 9.6397e-05, 1.5007e-04, + 1.0315e-04, 1.0630e-04], device='cuda:2') +2023-02-06 22:02:04,438 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143835.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:20,746 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9643, 1.7956, 2.5971, 1.6453, 2.2411, 2.8824, 2.8447, 2.5764], + device='cuda:2'), covar=tensor([0.0893, 0.1337, 0.0712, 0.1689, 0.1480, 0.0265, 0.0698, 0.0460], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0313, 0.0277, 0.0306, 0.0295, 0.0254, 0.0399, 0.0297], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 22:02:21,898 INFO [train.py:901] (2/4) Epoch 18, batch 6450, loss[loss=0.1803, simple_loss=0.2595, pruned_loss=0.0506, over 7661.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2925, pruned_loss=0.06542, over 1610748.55 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:02:33,457 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.452e+02 2.973e+02 3.704e+02 1.405e+03, threshold=5.946e+02, percent-clipped=1.0 +2023-02-06 22:02:34,290 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143877.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:39,646 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 22:02:57,285 INFO [train.py:901] (2/4) Epoch 18, batch 6500, loss[loss=0.2034, simple_loss=0.2975, pruned_loss=0.05461, over 8361.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2939, pruned_loss=0.06598, over 1615168.24 frames. ], batch size: 24, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:24,066 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143950.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:03:31,366 INFO [train.py:901] (2/4) Epoch 18, batch 6550, loss[loss=0.1651, simple_loss=0.2471, pruned_loss=0.04157, over 7542.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2932, pruned_loss=0.06565, over 1613289.21 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:38,658 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2549, 2.0061, 2.7096, 2.2699, 2.6318, 2.2051, 2.0030, 1.4978], + device='cuda:2'), covar=tensor([0.4559, 0.4310, 0.1686, 0.3101, 0.2128, 0.2825, 0.1802, 0.4591], + device='cuda:2'), in_proj_covar=tensor([0.0928, 0.0945, 0.0779, 0.0913, 0.0982, 0.0866, 0.0731, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:03:41,856 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.926e+02 2.526e+02 3.154e+02 3.765e+02 8.734e+02, threshold=6.308e+02, percent-clipped=5.0 +2023-02-06 22:03:48,802 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 22:04:08,861 INFO [train.py:901] (2/4) Epoch 18, batch 6600, loss[loss=0.1892, simple_loss=0.2675, pruned_loss=0.0554, over 7970.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2926, pruned_loss=0.06515, over 1614212.45 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:10,888 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 22:04:25,261 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 22:04:34,008 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-06 22:04:43,665 INFO [train.py:901] (2/4) Epoch 18, batch 6650, loss[loss=0.2016, simple_loss=0.2908, pruned_loss=0.05621, over 8504.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2925, pruned_loss=0.06493, over 1615004.02 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:54,721 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.298e+02 3.022e+02 3.555e+02 7.360e+02, threshold=6.043e+02, percent-clipped=4.0 +2023-02-06 22:05:17,161 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5565, 1.8007, 1.9362, 1.1325, 1.9891, 1.4313, 0.4516, 1.8195], + device='cuda:2'), covar=tensor([0.0427, 0.0280, 0.0204, 0.0475, 0.0313, 0.0705, 0.0669, 0.0203], + device='cuda:2'), in_proj_covar=tensor([0.0438, 0.0375, 0.0323, 0.0436, 0.0364, 0.0524, 0.0381, 0.0401], + device='cuda:2'), out_proj_covar=tensor([1.1901e-04, 9.9063e-05, 8.5492e-05, 1.1602e-04, 9.7010e-05, 1.5011e-04, + 1.0345e-04, 1.0684e-04], device='cuda:2') +2023-02-06 22:05:19,670 INFO [train.py:901] (2/4) Epoch 18, batch 6700, loss[loss=0.1789, simple_loss=0.2497, pruned_loss=0.05401, over 7707.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2904, pruned_loss=0.0637, over 1615851.33 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:05:34,398 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144133.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:05:51,948 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144158.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:05:53,841 INFO [train.py:901] (2/4) Epoch 18, batch 6750, loss[loss=0.2682, simple_loss=0.3335, pruned_loss=0.1014, over 6847.00 frames. ], tot_loss[loss=0.211, simple_loss=0.292, pruned_loss=0.06495, over 1615969.77 frames. ], batch size: 71, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:03,939 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.293e+02 3.003e+02 3.717e+02 7.578e+02, threshold=6.007e+02, percent-clipped=1.0 +2023-02-06 22:06:04,345 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 22:06:19,927 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1718, 4.1255, 3.7897, 1.9855, 3.6736, 3.8589, 3.6626, 3.5711], + device='cuda:2'), covar=tensor([0.0835, 0.0622, 0.1053, 0.4847, 0.0935, 0.1028, 0.1313, 0.0794], + device='cuda:2'), in_proj_covar=tensor([0.0517, 0.0428, 0.0429, 0.0529, 0.0420, 0.0426, 0.0413, 0.0376], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:06:25,432 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144206.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:06:27,290 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 22:06:28,560 INFO [train.py:901] (2/4) Epoch 18, batch 6800, loss[loss=0.3044, simple_loss=0.3552, pruned_loss=0.1268, over 7393.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2927, pruned_loss=0.06555, over 1615650.22 frames. ], batch size: 73, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:42,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144231.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:06:51,121 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 22:07:03,973 INFO [train.py:901] (2/4) Epoch 18, batch 6850, loss[loss=0.2012, simple_loss=0.2802, pruned_loss=0.06106, over 8090.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2929, pruned_loss=0.06604, over 1612455.26 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:07:13,993 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.379e+02 2.937e+02 3.634e+02 6.722e+02, threshold=5.873e+02, percent-clipped=2.0 +2023-02-06 22:07:17,341 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 22:07:22,930 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9289, 3.7843, 2.3687, 2.9499, 2.8142, 2.1815, 2.6969, 2.9231], + device='cuda:2'), covar=tensor([0.1958, 0.0401, 0.1226, 0.0765, 0.0839, 0.1476, 0.1223, 0.1131], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0237, 0.0328, 0.0306, 0.0300, 0.0332, 0.0345, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:07:38,060 INFO [train.py:901] (2/4) Epoch 18, batch 6900, loss[loss=0.238, simple_loss=0.308, pruned_loss=0.08402, over 7427.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2921, pruned_loss=0.06594, over 1607756.99 frames. ], batch size: 17, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:09,502 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0475, 1.6947, 6.0918, 2.1076, 5.5266, 5.1637, 5.7347, 5.6025], + device='cuda:2'), covar=tensor([0.0438, 0.4377, 0.0335, 0.3607, 0.0947, 0.0827, 0.0437, 0.0453], + device='cuda:2'), in_proj_covar=tensor([0.0593, 0.0628, 0.0670, 0.0600, 0.0679, 0.0581, 0.0578, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:08:13,481 INFO [train.py:901] (2/4) Epoch 18, batch 6950, loss[loss=0.1838, simple_loss=0.2614, pruned_loss=0.05307, over 7927.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2915, pruned_loss=0.06526, over 1611653.69 frames. ], batch size: 20, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:20,470 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 22:08:24,077 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.398e+02 2.919e+02 3.864e+02 7.610e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-06 22:08:25,463 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 22:08:47,780 INFO [train.py:901] (2/4) Epoch 18, batch 7000, loss[loss=0.2158, simple_loss=0.2973, pruned_loss=0.06713, over 8086.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2918, pruned_loss=0.06522, over 1616321.36 frames. ], batch size: 21, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:08:49,927 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8080, 1.4702, 3.9718, 1.3947, 3.5234, 3.2395, 3.6039, 3.4815], + device='cuda:2'), covar=tensor([0.0656, 0.3879, 0.0585, 0.3755, 0.1102, 0.1004, 0.0633, 0.0703], + device='cuda:2'), in_proj_covar=tensor([0.0596, 0.0632, 0.0676, 0.0603, 0.0685, 0.0584, 0.0582, 0.0652], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:09:01,097 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=144429.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:09:05,729 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0953, 1.7321, 3.4140, 1.5840, 2.3406, 3.7470, 3.8883, 3.2205], + device='cuda:2'), covar=tensor([0.1120, 0.1680, 0.0334, 0.2110, 0.1104, 0.0218, 0.0491, 0.0546], + device='cuda:2'), in_proj_covar=tensor([0.0283, 0.0314, 0.0278, 0.0307, 0.0295, 0.0255, 0.0400, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 22:09:22,578 INFO [train.py:901] (2/4) Epoch 18, batch 7050, loss[loss=0.1988, simple_loss=0.2757, pruned_loss=0.06092, over 7660.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2913, pruned_loss=0.06514, over 1613292.73 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:34,230 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 2.534e+02 2.937e+02 3.689e+02 8.247e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 22:09:58,438 INFO [train.py:901] (2/4) Epoch 18, batch 7100, loss[loss=0.2491, simple_loss=0.3349, pruned_loss=0.0817, over 8512.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2917, pruned_loss=0.06546, over 1607641.57 frames. ], batch size: 26, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:10:33,591 INFO [train.py:901] (2/4) Epoch 18, batch 7150, loss[loss=0.1618, simple_loss=0.2394, pruned_loss=0.04216, over 6352.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2917, pruned_loss=0.06493, over 1608215.68 frames. ], batch size: 14, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:10:43,980 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.263e+02 2.906e+02 3.662e+02 1.305e+03, threshold=5.813e+02, percent-clipped=7.0 +2023-02-06 22:11:00,102 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.25 vs. limit=5.0 +2023-02-06 22:11:10,031 INFO [train.py:901] (2/4) Epoch 18, batch 7200, loss[loss=0.1962, simple_loss=0.2811, pruned_loss=0.05563, over 8044.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2916, pruned_loss=0.06506, over 1607247.18 frames. ], batch size: 22, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:12,572 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 22:11:20,762 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7859, 1.3959, 1.6371, 1.1427, 0.9610, 1.3605, 1.6775, 1.3608], + device='cuda:2'), covar=tensor([0.0568, 0.1325, 0.1712, 0.1580, 0.0603, 0.1586, 0.0707, 0.0714], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0157, 0.0099, 0.0161, 0.0113, 0.0139], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:11:44,457 INFO [train.py:901] (2/4) Epoch 18, batch 7250, loss[loss=0.1912, simple_loss=0.2694, pruned_loss=0.05647, over 7714.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2927, pruned_loss=0.0658, over 1607820.22 frames. ], batch size: 18, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:54,469 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.379e+02 2.816e+02 3.627e+02 9.857e+02, threshold=5.632e+02, percent-clipped=4.0 +2023-02-06 22:12:19,769 INFO [train.py:901] (2/4) Epoch 18, batch 7300, loss[loss=0.2178, simple_loss=0.2957, pruned_loss=0.06995, over 8707.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2927, pruned_loss=0.06563, over 1607150.05 frames. ], batch size: 34, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:12:50,206 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0358, 3.8722, 2.3082, 3.0713, 2.8679, 1.9540, 2.9448, 2.9963], + device='cuda:2'), covar=tensor([0.1614, 0.0256, 0.1181, 0.0687, 0.0763, 0.1502, 0.1053, 0.1012], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0233, 0.0324, 0.0301, 0.0294, 0.0328, 0.0340, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:12:53,982 INFO [train.py:901] (2/4) Epoch 18, batch 7350, loss[loss=0.1759, simple_loss=0.2554, pruned_loss=0.04818, over 8085.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2923, pruned_loss=0.06569, over 1607190.13 frames. ], batch size: 21, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:13:02,928 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=144773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:13:04,748 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.484e+02 2.992e+02 3.514e+02 8.978e+02, threshold=5.985e+02, percent-clipped=6.0 +2023-02-06 22:13:08,073 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 22:13:26,911 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 22:13:28,896 INFO [train.py:901] (2/4) Epoch 18, batch 7400, loss[loss=0.2055, simple_loss=0.2864, pruned_loss=0.06234, over 7815.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06652, over 1608718.73 frames. ], batch size: 20, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:04,302 INFO [train.py:901] (2/4) Epoch 18, batch 7450, loss[loss=0.2547, simple_loss=0.3401, pruned_loss=0.08462, over 8106.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2948, pruned_loss=0.06718, over 1610299.83 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:07,791 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 22:14:07,987 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9929, 3.8597, 2.2421, 2.9190, 2.9673, 2.0455, 3.0104, 2.9524], + device='cuda:2'), covar=tensor([0.1592, 0.0253, 0.0937, 0.0607, 0.0583, 0.1216, 0.0869, 0.0964], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0233, 0.0324, 0.0302, 0.0295, 0.0329, 0.0342, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:14:14,577 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 2.433e+02 3.083e+02 4.140e+02 9.921e+02, threshold=6.167e+02, percent-clipped=3.0 +2023-02-06 22:14:23,398 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:14:38,529 INFO [train.py:901] (2/4) Epoch 18, batch 7500, loss[loss=0.2062, simple_loss=0.2905, pruned_loss=0.06096, over 8454.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2934, pruned_loss=0.06649, over 1603376.62 frames. ], batch size: 25, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:14,116 INFO [train.py:901] (2/4) Epoch 18, batch 7550, loss[loss=0.2021, simple_loss=0.2863, pruned_loss=0.05895, over 8109.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2923, pruned_loss=0.06576, over 1603882.52 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:24,766 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.361e+02 2.893e+02 3.293e+02 8.578e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 22:15:48,827 INFO [train.py:901] (2/4) Epoch 18, batch 7600, loss[loss=0.2016, simple_loss=0.2803, pruned_loss=0.06149, over 7967.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2926, pruned_loss=0.06599, over 1607133.99 frames. ], batch size: 21, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:51,058 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:12,837 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:24,429 INFO [train.py:901] (2/4) Epoch 18, batch 7650, loss[loss=0.1943, simple_loss=0.2825, pruned_loss=0.05308, over 8466.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2922, pruned_loss=0.06541, over 1608558.15 frames. ], batch size: 25, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:16:35,693 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.409e+02 3.204e+02 3.806e+02 7.453e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 22:16:40,599 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:57,521 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9305, 2.5816, 3.5415, 2.0637, 2.0824, 3.5542, 0.7398, 2.0961], + device='cuda:2'), covar=tensor([0.1428, 0.1591, 0.0404, 0.1894, 0.2717, 0.0310, 0.2543, 0.1579], + device='cuda:2'), in_proj_covar=tensor([0.0182, 0.0191, 0.0121, 0.0215, 0.0265, 0.0129, 0.0166, 0.0183], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 22:16:58,566 INFO [train.py:901] (2/4) Epoch 18, batch 7700, loss[loss=0.2315, simple_loss=0.3165, pruned_loss=0.07324, over 8102.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2927, pruned_loss=0.06584, over 1610484.63 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:16,300 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 22:17:17,759 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2392, 2.5372, 2.9193, 1.5480, 3.1826, 1.8196, 1.4815, 2.0313], + device='cuda:2'), covar=tensor([0.0678, 0.0347, 0.0269, 0.0736, 0.0398, 0.0773, 0.0846, 0.0501], + device='cuda:2'), in_proj_covar=tensor([0.0438, 0.0377, 0.0323, 0.0431, 0.0362, 0.0523, 0.0379, 0.0403], + device='cuda:2'), out_proj_covar=tensor([1.1892e-04, 9.9597e-05, 8.5574e-05, 1.1453e-04, 9.6450e-05, 1.4958e-04, + 1.0290e-04, 1.0766e-04], device='cuda:2') +2023-02-06 22:17:21,899 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:33,670 INFO [train.py:901] (2/4) Epoch 18, batch 7750, loss[loss=0.2518, simple_loss=0.3172, pruned_loss=0.09317, over 8704.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2925, pruned_loss=0.06609, over 1610093.90 frames. ], batch size: 40, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:40,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:45,202 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.617e+02 3.101e+02 3.765e+02 9.296e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 22:18:08,798 INFO [train.py:901] (2/4) Epoch 18, batch 7800, loss[loss=0.1829, simple_loss=0.2742, pruned_loss=0.04577, over 7969.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2928, pruned_loss=0.06619, over 1609113.77 frames. ], batch size: 21, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:18:42,838 INFO [train.py:901] (2/4) Epoch 18, batch 7850, loss[loss=0.1798, simple_loss=0.2556, pruned_loss=0.05199, over 7447.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2927, pruned_loss=0.0661, over 1611292.48 frames. ], batch size: 17, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:18:53,262 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.477e+02 2.948e+02 3.643e+02 1.044e+03, threshold=5.895e+02, percent-clipped=9.0 +2023-02-06 22:19:16,123 INFO [train.py:901] (2/4) Epoch 18, batch 7900, loss[loss=0.1824, simple_loss=0.2712, pruned_loss=0.04676, over 8473.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2923, pruned_loss=0.06543, over 1613485.11 frames. ], batch size: 25, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:47,151 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:19:49,081 INFO [train.py:901] (2/4) Epoch 18, batch 7950, loss[loss=0.1921, simple_loss=0.2813, pruned_loss=0.05147, over 8593.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2911, pruned_loss=0.06514, over 1611013.93 frames. ], batch size: 39, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:59,837 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.389e+02 3.012e+02 3.869e+02 1.111e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 22:20:04,931 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-06 22:20:07,939 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:08,007 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:23,113 INFO [train.py:901] (2/4) Epoch 18, batch 8000, loss[loss=0.1646, simple_loss=0.2506, pruned_loss=0.03925, over 7922.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2903, pruned_loss=0.06448, over 1608857.16 frames. ], batch size: 20, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:20:25,591 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 22:20:31,455 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8700, 2.3414, 4.3476, 1.6325, 3.2322, 2.4145, 1.8774, 3.0634], + device='cuda:2'), covar=tensor([0.1797, 0.2551, 0.0787, 0.4213, 0.1618, 0.2890, 0.2158, 0.2190], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0578, 0.0548, 0.0625, 0.0638, 0.0583, 0.0515, 0.0628], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:20:34,670 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:57,108 INFO [train.py:901] (2/4) Epoch 18, batch 8050, loss[loss=0.2216, simple_loss=0.2957, pruned_loss=0.07371, over 7541.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2895, pruned_loss=0.06513, over 1587677.61 frames. ], batch size: 18, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:21:05,672 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:21:08,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.350e+02 2.866e+02 3.408e+02 5.747e+02, threshold=5.732e+02, percent-clipped=0.0 +2023-02-06 22:21:29,480 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 22:21:34,901 INFO [train.py:901] (2/4) Epoch 19, batch 0, loss[loss=0.1762, simple_loss=0.2647, pruned_loss=0.04381, over 8089.00 frames. ], tot_loss[loss=0.1762, simple_loss=0.2647, pruned_loss=0.04381, over 8089.00 frames. ], batch size: 21, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:21:34,901 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 22:21:46,549 INFO [train.py:935] (2/4) Epoch 19, validation: loss=0.1782, simple_loss=0.2779, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 22:21:46,550 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 22:21:54,197 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:03,067 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 22:22:22,457 INFO [train.py:901] (2/4) Epoch 19, batch 50, loss[loss=0.1894, simple_loss=0.2651, pruned_loss=0.05688, over 7554.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2861, pruned_loss=0.0623, over 364238.99 frames. ], batch size: 18, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:22,669 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:23,349 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9496, 1.5742, 3.3026, 1.3906, 2.4622, 3.6057, 3.7523, 3.0501], + device='cuda:2'), covar=tensor([0.1166, 0.1741, 0.0353, 0.2218, 0.0980, 0.0244, 0.0456, 0.0567], + device='cuda:2'), in_proj_covar=tensor([0.0281, 0.0312, 0.0275, 0.0303, 0.0293, 0.0253, 0.0396, 0.0296], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 22:22:25,796 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:22:40,524 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 22:22:42,317 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 22:22:45,196 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.340e+02 2.977e+02 3.641e+02 7.952e+02, threshold=5.953e+02, percent-clipped=6.0 +2023-02-06 22:22:56,254 INFO [train.py:901] (2/4) Epoch 19, batch 100, loss[loss=0.1994, simple_loss=0.2818, pruned_loss=0.05852, over 8246.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2907, pruned_loss=0.06546, over 642250.73 frames. ], batch size: 22, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:59,376 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6809, 2.0841, 3.3634, 1.5783, 2.5349, 2.0626, 1.7920, 2.4940], + device='cuda:2'), covar=tensor([0.1791, 0.2605, 0.0743, 0.4393, 0.1726, 0.3153, 0.2188, 0.2150], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0578, 0.0548, 0.0628, 0.0635, 0.0585, 0.0517, 0.0629], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:23:01,889 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 22:23:10,099 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:20,686 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7599, 4.6848, 4.1966, 2.0033, 4.1711, 4.3046, 4.3147, 4.0384], + device='cuda:2'), covar=tensor([0.0608, 0.0467, 0.0957, 0.4629, 0.0782, 0.0881, 0.1042, 0.0737], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0426, 0.0429, 0.0527, 0.0414, 0.0426, 0.0408, 0.0373], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:23:32,319 INFO [train.py:901] (2/4) Epoch 19, batch 150, loss[loss=0.1722, simple_loss=0.25, pruned_loss=0.04714, over 7429.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2912, pruned_loss=0.06427, over 860716.80 frames. ], batch size: 17, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:23:40,877 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0631, 1.7612, 2.4042, 1.9539, 2.2270, 2.0678, 1.8299, 1.1677], + device='cuda:2'), covar=tensor([0.5237, 0.4760, 0.1727, 0.3359, 0.2392, 0.2973, 0.1990, 0.4819], + device='cuda:2'), in_proj_covar=tensor([0.0925, 0.0945, 0.0773, 0.0911, 0.0976, 0.0864, 0.0728, 0.0808], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:23:46,186 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:57,040 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.454e+02 2.969e+02 3.777e+02 1.176e+03, threshold=5.938e+02, percent-clipped=4.0 +2023-02-06 22:24:07,965 INFO [train.py:901] (2/4) Epoch 19, batch 200, loss[loss=0.1813, simple_loss=0.2692, pruned_loss=0.04663, over 8087.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2921, pruned_loss=0.06378, over 1032203.45 frames. ], batch size: 21, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:24:33,094 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4402, 2.5029, 1.7938, 2.1509, 2.1339, 1.4192, 2.0260, 1.9669], + device='cuda:2'), covar=tensor([0.1598, 0.0397, 0.1158, 0.0624, 0.0681, 0.1601, 0.1021, 0.1010], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0234, 0.0323, 0.0301, 0.0296, 0.0328, 0.0339, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:24:33,110 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:35,723 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:43,201 INFO [train.py:901] (2/4) Epoch 19, batch 250, loss[loss=0.2447, simple_loss=0.322, pruned_loss=0.08372, over 8291.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2944, pruned_loss=0.06527, over 1163951.14 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:24:51,128 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:55,242 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:58,390 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 22:25:06,977 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 22:25:07,541 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.432e+02 3.022e+02 3.893e+02 7.688e+02, threshold=6.043e+02, percent-clipped=6.0 +2023-02-06 22:25:13,284 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:18,656 INFO [train.py:901] (2/4) Epoch 19, batch 300, loss[loss=0.1918, simple_loss=0.2716, pruned_loss=0.05604, over 8226.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.294, pruned_loss=0.06559, over 1266303.55 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:22,942 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:39,976 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:50,254 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145839.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:25:53,699 INFO [train.py:901] (2/4) Epoch 19, batch 350, loss[loss=0.2163, simple_loss=0.3035, pruned_loss=0.06453, over 8237.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2943, pruned_loss=0.06608, over 1343727.95 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:57,444 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:26:17,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.383e+02 2.952e+02 3.795e+02 9.100e+02, threshold=5.904e+02, percent-clipped=6.0 +2023-02-06 22:26:30,026 INFO [train.py:901] (2/4) Epoch 19, batch 400, loss[loss=0.2193, simple_loss=0.3043, pruned_loss=0.06712, over 8102.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2948, pruned_loss=0.06653, over 1405288.78 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:26:32,897 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.4245, 5.5156, 4.9408, 2.5376, 4.9571, 5.1564, 5.1168, 4.8917], + device='cuda:2'), covar=tensor([0.0688, 0.0450, 0.0910, 0.4491, 0.0704, 0.0868, 0.1075, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0422, 0.0425, 0.0524, 0.0411, 0.0424, 0.0402, 0.0370], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:26:44,768 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0829, 1.7574, 2.0331, 1.7669, 1.3531, 1.8067, 2.5077, 2.0546], + device='cuda:2'), covar=tensor([0.0416, 0.1132, 0.1551, 0.1278, 0.0524, 0.1339, 0.0547, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0161, 0.0112, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:27:04,031 INFO [train.py:901] (2/4) Epoch 19, batch 450, loss[loss=0.1984, simple_loss=0.2957, pruned_loss=0.05058, over 8475.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2962, pruned_loss=0.06683, over 1455804.32 frames. ], batch size: 25, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:12,889 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:27:23,353 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.09 vs. limit=5.0 +2023-02-06 22:27:28,514 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.474e+02 2.839e+02 3.457e+02 5.406e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 22:27:40,173 INFO [train.py:901] (2/4) Epoch 19, batch 500, loss[loss=0.1959, simple_loss=0.2752, pruned_loss=0.05829, over 8240.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2948, pruned_loss=0.06572, over 1494681.99 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:50,093 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:03,430 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4279, 1.5097, 4.5809, 1.6743, 4.1090, 3.7570, 4.1663, 3.9965], + device='cuda:2'), covar=tensor([0.0549, 0.4464, 0.0457, 0.4053, 0.1002, 0.0946, 0.0553, 0.0634], + device='cuda:2'), in_proj_covar=tensor([0.0597, 0.0627, 0.0673, 0.0605, 0.0687, 0.0591, 0.0585, 0.0649], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:28:15,843 INFO [train.py:901] (2/4) Epoch 19, batch 550, loss[loss=0.1921, simple_loss=0.2844, pruned_loss=0.04989, over 8282.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2926, pruned_loss=0.06463, over 1517654.32 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:19,299 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4495, 4.4076, 3.9937, 1.9643, 3.9660, 4.0754, 4.0242, 3.7291], + device='cuda:2'), covar=tensor([0.0659, 0.0547, 0.1118, 0.4599, 0.0784, 0.0815, 0.1216, 0.0752], + device='cuda:2'), in_proj_covar=tensor([0.0519, 0.0428, 0.0430, 0.0530, 0.0416, 0.0430, 0.0411, 0.0375], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:28:35,110 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146071.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:38,925 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.557e+02 3.049e+02 4.000e+02 8.642e+02, threshold=6.099e+02, percent-clipped=4.0 +2023-02-06 22:28:50,769 INFO [train.py:901] (2/4) Epoch 19, batch 600, loss[loss=0.236, simple_loss=0.3124, pruned_loss=0.07983, over 8735.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2933, pruned_loss=0.06558, over 1540410.47 frames. ], batch size: 30, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:54,562 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7888, 1.5575, 3.9856, 1.4170, 3.5386, 3.3258, 3.6068, 3.4798], + device='cuda:2'), covar=tensor([0.0639, 0.4122, 0.0630, 0.3879, 0.1250, 0.1065, 0.0656, 0.0766], + device='cuda:2'), in_proj_covar=tensor([0.0594, 0.0624, 0.0670, 0.0600, 0.0684, 0.0588, 0.0582, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:28:59,454 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:03,469 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6236, 4.5942, 4.2057, 2.4450, 4.1176, 4.1790, 4.2963, 3.9827], + device='cuda:2'), covar=tensor([0.0714, 0.0484, 0.0867, 0.3865, 0.0788, 0.0975, 0.1065, 0.0753], + device='cuda:2'), in_proj_covar=tensor([0.0517, 0.0425, 0.0427, 0.0526, 0.0413, 0.0427, 0.0407, 0.0373], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:29:11,437 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 22:29:11,612 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:17,650 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:26,685 INFO [train.py:901] (2/4) Epoch 19, batch 650, loss[loss=0.1701, simple_loss=0.2606, pruned_loss=0.03976, over 7814.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2937, pruned_loss=0.06565, over 1559603.48 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:29:42,929 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:49,759 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.628e+02 2.995e+02 3.912e+02 8.872e+02, threshold=5.991e+02, percent-clipped=7.0 +2023-02-06 22:29:53,934 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146183.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:30:00,627 INFO [train.py:901] (2/4) Epoch 19, batch 700, loss[loss=0.2176, simple_loss=0.2983, pruned_loss=0.06843, over 8576.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.294, pruned_loss=0.06614, over 1568904.45 frames. ], batch size: 31, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:37,726 INFO [train.py:901] (2/4) Epoch 19, batch 750, loss[loss=0.248, simple_loss=0.3165, pruned_loss=0.08976, over 8607.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2927, pruned_loss=0.0655, over 1582737.29 frames. ], batch size: 39, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:58,055 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 22:31:00,739 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.187e+02 2.733e+02 3.387e+02 1.037e+03, threshold=5.466e+02, percent-clipped=4.0 +2023-02-06 22:31:06,860 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 22:31:11,497 INFO [train.py:901] (2/4) Epoch 19, batch 800, loss[loss=0.1978, simple_loss=0.2939, pruned_loss=0.05086, over 8035.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2938, pruned_loss=0.06658, over 1590161.72 frames. ], batch size: 22, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:14,849 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146298.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:31:15,523 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0693, 1.7646, 2.0273, 1.7231, 1.0895, 1.8121, 2.3063, 2.2282], + device='cuda:2'), covar=tensor([0.0401, 0.1200, 0.1529, 0.1327, 0.0535, 0.1384, 0.0574, 0.0584], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:31:17,171 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 22:31:35,749 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:31:47,235 INFO [train.py:901] (2/4) Epoch 19, batch 850, loss[loss=0.1824, simple_loss=0.2683, pruned_loss=0.04828, over 7517.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.06534, over 1595100.24 frames. ], batch size: 18, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:54,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:10,865 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:11,294 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.470e+02 3.071e+02 3.941e+02 1.675e+03, threshold=6.141e+02, percent-clipped=6.0 +2023-02-06 22:32:22,255 INFO [train.py:901] (2/4) Epoch 19, batch 900, loss[loss=0.2364, simple_loss=0.3134, pruned_loss=0.07968, over 8110.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2933, pruned_loss=0.06579, over 1606684.21 frames. ], batch size: 23, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:32:27,822 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:41,230 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8052, 1.9935, 3.3727, 1.5127, 2.5288, 2.1117, 1.8703, 2.3178], + device='cuda:2'), covar=tensor([0.1630, 0.2287, 0.0733, 0.4085, 0.1638, 0.2905, 0.1906, 0.2182], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0580, 0.0550, 0.0630, 0.0639, 0.0584, 0.0518, 0.0629], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:32:56,375 INFO [train.py:901] (2/4) Epoch 19, batch 950, loss[loss=0.2381, simple_loss=0.2949, pruned_loss=0.0907, over 7981.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2926, pruned_loss=0.06586, over 1607039.45 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:33:09,723 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 22:33:20,833 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8952, 1.6551, 1.9726, 1.5869, 1.0220, 1.6977, 1.9889, 2.0191], + device='cuda:2'), covar=tensor([0.0430, 0.1228, 0.1555, 0.1369, 0.0617, 0.1449, 0.0672, 0.0603], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0112, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:33:21,310 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.324e+02 2.987e+02 4.077e+02 9.877e+02, threshold=5.974e+02, percent-clipped=4.0 +2023-02-06 22:33:22,690 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 22:33:32,153 INFO [train.py:901] (2/4) Epoch 19, batch 1000, loss[loss=0.2121, simple_loss=0.2879, pruned_loss=0.0682, over 7059.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2922, pruned_loss=0.06523, over 1613853.96 frames. ], batch size: 71, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:33:44,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146511.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:33:54,571 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 22:34:06,369 INFO [train.py:901] (2/4) Epoch 19, batch 1050, loss[loss=0.243, simple_loss=0.3219, pruned_loss=0.08202, over 8510.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2931, pruned_loss=0.0653, over 1620929.57 frames. ], batch size: 28, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:06,388 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 22:34:14,962 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146554.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:34:31,583 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.403e+02 2.837e+02 3.508e+02 6.242e+02, threshold=5.674e+02, percent-clipped=1.0 +2023-02-06 22:34:34,619 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146579.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:34:44,087 INFO [train.py:901] (2/4) Epoch 19, batch 1100, loss[loss=0.2235, simple_loss=0.3111, pruned_loss=0.068, over 8481.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2928, pruned_loss=0.06508, over 1621059.39 frames. ], batch size: 25, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:55,189 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:34:56,651 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4709, 2.2399, 3.2797, 2.4759, 2.9828, 2.4520, 2.2367, 1.7434], + device='cuda:2'), covar=tensor([0.5249, 0.4906, 0.1773, 0.3751, 0.2603, 0.2821, 0.1700, 0.5695], + device='cuda:2'), in_proj_covar=tensor([0.0931, 0.0948, 0.0782, 0.0913, 0.0978, 0.0866, 0.0726, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:35:06,981 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:18,380 INFO [train.py:901] (2/4) Epoch 19, batch 1150, loss[loss=0.1843, simple_loss=0.2563, pruned_loss=0.05619, over 7219.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.0654, over 1618947.43 frames. ], batch size: 16, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:35:19,116 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 22:35:19,275 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:42,420 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.484e+02 2.879e+02 3.755e+02 5.922e+02, threshold=5.758e+02, percent-clipped=3.0 +2023-02-06 22:35:53,864 INFO [train.py:901] (2/4) Epoch 19, batch 1200, loss[loss=0.2295, simple_loss=0.3149, pruned_loss=0.07211, over 8496.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2926, pruned_loss=0.06581, over 1616702.95 frames. ], batch size: 26, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:03,787 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=5.29 vs. limit=5.0 +2023-02-06 22:36:28,998 INFO [train.py:901] (2/4) Epoch 19, batch 1250, loss[loss=0.1891, simple_loss=0.2618, pruned_loss=0.05823, over 7430.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2909, pruned_loss=0.06529, over 1612419.33 frames. ], batch size: 17, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:52,646 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 2.471e+02 2.976e+02 4.092e+02 7.603e+02, threshold=5.951e+02, percent-clipped=4.0 +2023-02-06 22:37:04,304 INFO [train.py:901] (2/4) Epoch 19, batch 1300, loss[loss=0.2124, simple_loss=0.3, pruned_loss=0.0624, over 8630.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2898, pruned_loss=0.06478, over 1608150.94 frames. ], batch size: 34, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:40,717 INFO [train.py:901] (2/4) Epoch 19, batch 1350, loss[loss=0.1792, simple_loss=0.2538, pruned_loss=0.05229, over 7242.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2901, pruned_loss=0.06456, over 1607545.03 frames. ], batch size: 16, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:53,751 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:03,884 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.302e+02 2.844e+02 3.659e+02 6.626e+02, threshold=5.688e+02, percent-clipped=1.0 +2023-02-06 22:38:07,775 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146882.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:15,247 INFO [train.py:901] (2/4) Epoch 19, batch 1400, loss[loss=0.2648, simple_loss=0.3269, pruned_loss=0.1013, over 8710.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2904, pruned_loss=0.06495, over 1608586.48 frames. ], batch size: 39, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:38:25,960 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:52,632 INFO [train.py:901] (2/4) Epoch 19, batch 1450, loss[loss=0.1921, simple_loss=0.2515, pruned_loss=0.06634, over 7708.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.291, pruned_loss=0.0654, over 1614923.68 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:38:56,664 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 22:38:59,402 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:16,185 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.362e+02 2.962e+02 3.993e+02 1.525e+03, threshold=5.923e+02, percent-clipped=6.0 +2023-02-06 22:39:17,114 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7300, 2.4726, 3.5439, 2.7400, 3.4107, 2.6903, 2.3675, 2.2123], + device='cuda:2'), covar=tensor([0.4596, 0.4569, 0.1443, 0.3585, 0.2211, 0.2666, 0.1732, 0.4970], + device='cuda:2'), in_proj_covar=tensor([0.0927, 0.0943, 0.0777, 0.0905, 0.0972, 0.0860, 0.0722, 0.0802], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:39:20,639 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4680, 1.5341, 2.0694, 1.2390, 1.3912, 1.6302, 1.4489, 1.2915], + device='cuda:2'), covar=tensor([0.1971, 0.2506, 0.1055, 0.4659, 0.2072, 0.3448, 0.2342, 0.2439], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0581, 0.0551, 0.0628, 0.0637, 0.0586, 0.0518, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:39:23,967 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:27,300 INFO [train.py:901] (2/4) Epoch 19, batch 1500, loss[loss=0.2027, simple_loss=0.2645, pruned_loss=0.07045, over 7251.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2904, pruned_loss=0.06503, over 1612911.13 frames. ], batch size: 16, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:39:50,166 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8808, 3.8606, 3.4789, 1.6354, 3.4177, 3.4238, 3.4639, 3.2747], + device='cuda:2'), covar=tensor([0.0907, 0.0642, 0.1233, 0.5027, 0.0997, 0.1093, 0.1369, 0.0897], + device='cuda:2'), in_proj_covar=tensor([0.0514, 0.0422, 0.0427, 0.0524, 0.0415, 0.0426, 0.0407, 0.0374], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:39:53,154 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 22:40:03,265 INFO [train.py:901] (2/4) Epoch 19, batch 1550, loss[loss=0.2395, simple_loss=0.3203, pruned_loss=0.07931, over 8526.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.292, pruned_loss=0.06584, over 1615913.80 frames. ], batch size: 31, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:22,638 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:40:28,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.434e+02 2.984e+02 3.600e+02 8.495e+02, threshold=5.968e+02, percent-clipped=1.0 +2023-02-06 22:40:39,456 INFO [train.py:901] (2/4) Epoch 19, batch 1600, loss[loss=0.2004, simple_loss=0.2823, pruned_loss=0.05926, over 8037.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2926, pruned_loss=0.06552, over 1617226.84 frames. ], batch size: 22, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:46,380 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:41:14,548 INFO [train.py:901] (2/4) Epoch 19, batch 1650, loss[loss=0.2483, simple_loss=0.3281, pruned_loss=0.08423, over 8482.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2928, pruned_loss=0.06583, over 1616250.79 frames. ], batch size: 28, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:41:40,975 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.354e+02 2.709e+02 3.474e+02 7.081e+02, threshold=5.418e+02, percent-clipped=1.0 +2023-02-06 22:41:51,229 INFO [train.py:901] (2/4) Epoch 19, batch 1700, loss[loss=0.1946, simple_loss=0.2637, pruned_loss=0.06276, over 7423.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.06522, over 1613281.97 frames. ], batch size: 17, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:42:00,577 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147206.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:25,936 INFO [train.py:901] (2/4) Epoch 19, batch 1750, loss[loss=0.2214, simple_loss=0.297, pruned_loss=0.07296, over 8506.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2921, pruned_loss=0.06533, over 1614899.63 frames. ], batch size: 26, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:42:37,955 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:40,819 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9378, 3.6435, 2.0838, 2.7817, 2.7605, 2.0194, 2.7202, 2.9199], + device='cuda:2'), covar=tensor([0.1621, 0.0366, 0.1156, 0.0719, 0.0798, 0.1369, 0.0921, 0.0989], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0236, 0.0326, 0.0306, 0.0301, 0.0332, 0.0343, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:42:51,054 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.529e+02 3.043e+02 3.569e+02 7.736e+02, threshold=6.085e+02, percent-clipped=5.0 +2023-02-06 22:43:03,023 INFO [train.py:901] (2/4) Epoch 19, batch 1800, loss[loss=0.2194, simple_loss=0.2869, pruned_loss=0.07594, over 7798.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2914, pruned_loss=0.06488, over 1613555.84 frames. ], batch size: 19, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:22,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:24,598 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,345 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:37,467 INFO [train.py:901] (2/4) Epoch 19, batch 1850, loss[loss=0.1949, simple_loss=0.2669, pruned_loss=0.0614, over 7648.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2914, pruned_loss=0.0652, over 1612520.70 frames. ], batch size: 19, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:41,692 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:48,571 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:00,461 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1328, 1.6841, 4.3431, 1.5860, 3.8214, 3.6624, 3.9016, 3.7667], + device='cuda:2'), covar=tensor([0.0603, 0.4055, 0.0525, 0.3967, 0.1084, 0.0953, 0.0637, 0.0700], + device='cuda:2'), in_proj_covar=tensor([0.0600, 0.0634, 0.0674, 0.0612, 0.0691, 0.0592, 0.0589, 0.0651], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:44:02,407 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.300e+02 2.823e+02 3.606e+02 1.006e+03, threshold=5.645e+02, percent-clipped=2.0 +2023-02-06 22:44:06,242 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 22:44:06,694 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:12,515 INFO [train.py:901] (2/4) Epoch 19, batch 1900, loss[loss=0.2275, simple_loss=0.3163, pruned_loss=0.06931, over 8359.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2917, pruned_loss=0.06516, over 1617280.85 frames. ], batch size: 24, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:37,306 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:44,949 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 22:44:49,673 INFO [train.py:901] (2/4) Epoch 19, batch 1950, loss[loss=0.2281, simple_loss=0.303, pruned_loss=0.07656, over 7928.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2912, pruned_loss=0.06453, over 1619792.12 frames. ], batch size: 20, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:51,462 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:44:55,960 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:56,489 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 22:45:13,741 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.289e+02 2.862e+02 3.830e+02 8.439e+02, threshold=5.724e+02, percent-clipped=6.0 +2023-02-06 22:45:15,253 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 22:45:23,813 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-06 22:45:24,867 INFO [train.py:901] (2/4) Epoch 19, batch 2000, loss[loss=0.2174, simple_loss=0.3038, pruned_loss=0.06552, over 8477.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2919, pruned_loss=0.06464, over 1621970.69 frames. ], batch size: 27, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:01,793 INFO [train.py:901] (2/4) Epoch 19, batch 2050, loss[loss=0.184, simple_loss=0.2546, pruned_loss=0.05674, over 7676.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2914, pruned_loss=0.06436, over 1618551.66 frames. ], batch size: 18, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:25,336 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:25,778 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.500e+02 2.918e+02 3.445e+02 6.516e+02, threshold=5.836e+02, percent-clipped=2.0 +2023-02-06 22:46:35,051 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4243, 2.4373, 1.6362, 2.1735, 1.9733, 1.4786, 1.9722, 1.9180], + device='cuda:2'), covar=tensor([0.1705, 0.0370, 0.1340, 0.0642, 0.0796, 0.1559, 0.1042, 0.1050], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0233, 0.0325, 0.0302, 0.0300, 0.0330, 0.0340, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 22:46:36,262 INFO [train.py:901] (2/4) Epoch 19, batch 2100, loss[loss=0.2688, simple_loss=0.3324, pruned_loss=0.1027, over 8244.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.06517, over 1619115.60 frames. ], batch size: 24, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:42,937 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:43,437 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:12,100 INFO [train.py:901] (2/4) Epoch 19, batch 2150, loss[loss=0.2246, simple_loss=0.3215, pruned_loss=0.06384, over 8327.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2917, pruned_loss=0.06481, over 1616879.67 frames. ], batch size: 25, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:47:33,572 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:36,909 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:37,414 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.398e+02 3.174e+02 3.852e+02 9.466e+02, threshold=6.348e+02, percent-clipped=6.0 +2023-02-06 22:47:46,173 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-06 22:47:47,715 INFO [train.py:901] (2/4) Epoch 19, batch 2200, loss[loss=0.1752, simple_loss=0.2566, pruned_loss=0.04689, over 8230.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2904, pruned_loss=0.06443, over 1611680.05 frames. ], batch size: 22, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:47:47,898 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0855, 1.6516, 3.1084, 1.4900, 2.1784, 3.3378, 3.4537, 2.8135], + device='cuda:2'), covar=tensor([0.0975, 0.1510, 0.0369, 0.1996, 0.1018, 0.0253, 0.0640, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0284, 0.0314, 0.0279, 0.0306, 0.0296, 0.0258, 0.0398, 0.0298], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 22:48:04,732 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:21,941 INFO [train.py:901] (2/4) Epoch 19, batch 2250, loss[loss=0.2123, simple_loss=0.3011, pruned_loss=0.06175, over 8194.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2917, pruned_loss=0.06499, over 1611743.87 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:41,110 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:46,060 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:48:47,097 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.392e+02 3.089e+02 3.849e+02 9.613e+02, threshold=6.179e+02, percent-clipped=2.0 +2023-02-06 22:48:53,337 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:56,991 INFO [train.py:901] (2/4) Epoch 19, batch 2300, loss[loss=0.2065, simple_loss=0.2935, pruned_loss=0.05976, over 8501.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06502, over 1611062.73 frames. ], batch size: 26, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:58,973 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:24,349 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:30,994 INFO [train.py:901] (2/4) Epoch 19, batch 2350, loss[loss=0.2206, simple_loss=0.2987, pruned_loss=0.07125, over 8463.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2911, pruned_loss=0.06439, over 1615741.02 frames. ], batch size: 29, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:49:53,255 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:55,895 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.451e+02 2.984e+02 3.607e+02 1.132e+03, threshold=5.968e+02, percent-clipped=4.0 +2023-02-06 22:50:01,542 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:07,522 INFO [train.py:901] (2/4) Epoch 19, batch 2400, loss[loss=0.2223, simple_loss=0.3125, pruned_loss=0.0661, over 8498.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2921, pruned_loss=0.06503, over 1618970.25 frames. ], batch size: 28, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:50:20,044 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147911.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:24,948 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6475, 1.5686, 2.2744, 1.6065, 1.2301, 2.2613, 0.3276, 1.3618], + device='cuda:2'), covar=tensor([0.1735, 0.1442, 0.0367, 0.1344, 0.3006, 0.0429, 0.2467, 0.1544], + device='cuda:2'), in_proj_covar=tensor([0.0185, 0.0194, 0.0123, 0.0220, 0.0269, 0.0131, 0.0169, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 22:50:32,261 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:41,465 INFO [train.py:901] (2/4) Epoch 19, batch 2450, loss[loss=0.1973, simple_loss=0.269, pruned_loss=0.06278, over 7411.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2922, pruned_loss=0.06556, over 1616006.53 frames. ], batch size: 17, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:50:56,377 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7051, 2.7700, 2.4300, 4.0089, 1.8877, 2.1625, 2.5455, 3.2001], + device='cuda:2'), covar=tensor([0.0578, 0.0782, 0.0778, 0.0203, 0.0954, 0.1073, 0.0914, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0197, 0.0249, 0.0212, 0.0207, 0.0246, 0.0253, 0.0212], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 22:51:03,067 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:04,979 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8379, 1.4593, 4.2371, 1.7450, 3.3911, 3.2712, 3.8056, 3.7428], + device='cuda:2'), covar=tensor([0.1281, 0.6040, 0.1134, 0.4811, 0.2111, 0.1972, 0.1025, 0.1009], + device='cuda:2'), in_proj_covar=tensor([0.0597, 0.0627, 0.0676, 0.0607, 0.0687, 0.0590, 0.0586, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:51:05,437 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.599e+02 2.990e+02 3.557e+02 6.406e+02, threshold=5.981e+02, percent-clipped=1.0 +2023-02-06 22:51:15,598 INFO [train.py:901] (2/4) Epoch 19, batch 2500, loss[loss=0.201, simple_loss=0.2887, pruned_loss=0.05663, over 8081.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2918, pruned_loss=0.06542, over 1612085.02 frames. ], batch size: 21, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:20,626 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:37,736 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:52,313 INFO [train.py:901] (2/4) Epoch 19, batch 2550, loss[loss=0.2142, simple_loss=0.2902, pruned_loss=0.06915, over 8575.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.291, pruned_loss=0.06473, over 1611953.14 frames. ], batch size: 31, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:52,574 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:09,290 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:15,629 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.379e+02 2.867e+02 3.516e+02 7.047e+02, threshold=5.734e+02, percent-clipped=3.0 +2023-02-06 22:52:23,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8821, 1.6261, 2.0514, 1.8131, 2.0577, 1.9392, 1.7542, 0.7631], + device='cuda:2'), covar=tensor([0.5627, 0.4682, 0.1903, 0.3235, 0.2238, 0.2937, 0.1860, 0.5024], + device='cuda:2'), in_proj_covar=tensor([0.0932, 0.0952, 0.0787, 0.0917, 0.0980, 0.0868, 0.0730, 0.0808], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:52:26,279 INFO [train.py:901] (2/4) Epoch 19, batch 2600, loss[loss=0.2262, simple_loss=0.2904, pruned_loss=0.08095, over 7521.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2905, pruned_loss=0.06521, over 1608211.38 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:52:28,762 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 22:52:57,246 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:00,040 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:01,854 INFO [train.py:901] (2/4) Epoch 19, batch 2650, loss[loss=0.2161, simple_loss=0.3039, pruned_loss=0.06413, over 8472.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2914, pruned_loss=0.0656, over 1606210.14 frames. ], batch size: 25, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:16,795 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:18,198 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:24,869 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:25,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 2.384e+02 2.853e+02 3.529e+02 7.126e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 22:53:27,036 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7961, 2.3394, 3.4773, 2.0243, 1.7378, 3.4505, 0.7190, 2.1242], + device='cuda:2'), covar=tensor([0.1815, 0.1451, 0.0288, 0.1868, 0.3240, 0.0331, 0.2599, 0.1524], + device='cuda:2'), in_proj_covar=tensor([0.0185, 0.0193, 0.0123, 0.0219, 0.0270, 0.0131, 0.0169, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 22:53:35,180 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:35,669 INFO [train.py:901] (2/4) Epoch 19, batch 2700, loss[loss=0.1857, simple_loss=0.2774, pruned_loss=0.04702, over 8140.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.291, pruned_loss=0.06529, over 1610085.06 frames. ], batch size: 22, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:44,879 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3130, 2.0044, 2.7336, 2.2301, 2.6393, 2.1879, 2.0472, 1.8550], + device='cuda:2'), covar=tensor([0.3774, 0.4060, 0.1443, 0.2775, 0.1723, 0.2610, 0.1556, 0.3704], + device='cuda:2'), in_proj_covar=tensor([0.0936, 0.0958, 0.0790, 0.0922, 0.0986, 0.0874, 0.0734, 0.0814], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:53:54,174 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:11,924 INFO [train.py:901] (2/4) Epoch 19, batch 2750, loss[loss=0.2402, simple_loss=0.3169, pruned_loss=0.08172, over 8581.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2912, pruned_loss=0.06558, over 1610428.76 frames. ], batch size: 31, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:32,994 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:36,056 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.484e+02 2.895e+02 4.098e+02 9.310e+02, threshold=5.790e+02, percent-clipped=8.0 +2023-02-06 22:54:45,407 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:45,919 INFO [train.py:901] (2/4) Epoch 19, batch 2800, loss[loss=0.1979, simple_loss=0.2708, pruned_loss=0.06254, over 7432.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2921, pruned_loss=0.06602, over 1611597.24 frames. ], batch size: 17, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:54,041 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148305.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:55,513 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2170, 1.8656, 2.5305, 2.0856, 2.4259, 2.1803, 1.9377, 1.2891], + device='cuda:2'), covar=tensor([0.5213, 0.4743, 0.1658, 0.3486, 0.2320, 0.2957, 0.1928, 0.4924], + device='cuda:2'), in_proj_covar=tensor([0.0932, 0.0953, 0.0784, 0.0916, 0.0981, 0.0869, 0.0730, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:54:58,709 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8201, 1.7722, 2.5483, 1.5486, 1.2857, 2.5250, 0.4227, 1.5542], + device='cuda:2'), covar=tensor([0.2052, 0.1413, 0.0323, 0.1536, 0.3129, 0.0379, 0.2467, 0.1526], + device='cuda:2'), in_proj_covar=tensor([0.0184, 0.0193, 0.0123, 0.0219, 0.0270, 0.0131, 0.0169, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 22:55:13,965 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:19,425 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9640, 1.3355, 1.7273, 1.1791, 0.9282, 1.4820, 1.7234, 1.5242], + device='cuda:2'), covar=tensor([0.0501, 0.1272, 0.1637, 0.1538, 0.0613, 0.1524, 0.0692, 0.0693], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0188, 0.0157, 0.0100, 0.0161, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:55:19,842 INFO [train.py:901] (2/4) Epoch 19, batch 2850, loss[loss=0.2452, simple_loss=0.3137, pruned_loss=0.08833, over 8338.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.292, pruned_loss=0.06548, over 1614834.00 frames. ], batch size: 26, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:55:46,065 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.512e+02 2.931e+02 3.824e+02 7.566e+02, threshold=5.862e+02, percent-clipped=4.0 +2023-02-06 22:55:50,339 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7251, 1.6817, 4.8732, 1.8604, 4.3711, 3.9841, 4.4322, 4.2885], + device='cuda:2'), covar=tensor([0.0448, 0.4075, 0.0442, 0.3764, 0.0879, 0.1093, 0.0501, 0.0531], + device='cuda:2'), in_proj_covar=tensor([0.0591, 0.0624, 0.0669, 0.0597, 0.0676, 0.0585, 0.0580, 0.0642], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 22:55:51,897 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 22:55:52,987 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:55,660 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:56,117 INFO [train.py:901] (2/4) Epoch 19, batch 2900, loss[loss=0.1869, simple_loss=0.2735, pruned_loss=0.05013, over 7660.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.292, pruned_loss=0.06562, over 1609557.36 frames. ], batch size: 19, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:12,658 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:29,356 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 22:56:29,949 INFO [train.py:901] (2/4) Epoch 19, batch 2950, loss[loss=0.2199, simple_loss=0.2901, pruned_loss=0.07485, over 7807.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2927, pruned_loss=0.06613, over 1608870.17 frames. ], batch size: 19, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:32,822 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148447.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:41,585 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5768, 1.9324, 3.0919, 1.4134, 2.3549, 2.0118, 1.5953, 2.4623], + device='cuda:2'), covar=tensor([0.1800, 0.2522, 0.0893, 0.4274, 0.1747, 0.2944, 0.2141, 0.2133], + device='cuda:2'), in_proj_covar=tensor([0.0519, 0.0584, 0.0554, 0.0632, 0.0640, 0.0589, 0.0523, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:56:42,930 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9362, 2.4209, 4.0858, 1.7834, 3.0303, 2.3923, 2.0071, 2.9544], + device='cuda:2'), covar=tensor([0.1751, 0.2462, 0.1004, 0.4011, 0.1765, 0.2944, 0.2023, 0.2526], + device='cuda:2'), in_proj_covar=tensor([0.0518, 0.0583, 0.0554, 0.0632, 0.0639, 0.0589, 0.0522, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 22:56:54,933 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.514e+02 3.009e+02 3.973e+02 7.443e+02, threshold=6.017e+02, percent-clipped=3.0 +2023-02-06 22:57:06,344 INFO [train.py:901] (2/4) Epoch 19, batch 3000, loss[loss=0.2068, simple_loss=0.2913, pruned_loss=0.06114, over 8360.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2919, pruned_loss=0.06571, over 1610255.81 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:57:06,344 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 22:57:22,670 INFO [train.py:935] (2/4) Epoch 19, validation: loss=0.1752, simple_loss=0.2756, pruned_loss=0.03738, over 944034.00 frames. +2023-02-06 22:57:22,671 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 22:57:38,584 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148516.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:57:56,926 INFO [train.py:901] (2/4) Epoch 19, batch 3050, loss[loss=0.2112, simple_loss=0.2717, pruned_loss=0.07535, over 7529.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2922, pruned_loss=0.06601, over 1611467.03 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:00,731 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:17,727 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:21,053 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.398e+02 2.811e+02 3.727e+02 6.995e+02, threshold=5.622e+02, percent-clipped=3.0 +2023-02-06 22:58:30,233 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:32,157 INFO [train.py:901] (2/4) Epoch 19, batch 3100, loss[loss=0.1767, simple_loss=0.2644, pruned_loss=0.04453, over 8131.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2913, pruned_loss=0.06535, over 1606574.31 frames. ], batch size: 22, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:49,329 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:09,435 INFO [train.py:901] (2/4) Epoch 19, batch 3150, loss[loss=0.2008, simple_loss=0.2815, pruned_loss=0.06002, over 7919.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2915, pruned_loss=0.06503, over 1610154.16 frames. ], batch size: 20, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:59:10,323 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:13,424 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,360 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,421 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:32,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.358e+02 3.073e+02 3.824e+02 9.523e+02, threshold=6.146e+02, percent-clipped=8.0 +2023-02-06 22:59:42,404 INFO [train.py:901] (2/4) Epoch 19, batch 3200, loss[loss=0.2081, simple_loss=0.3, pruned_loss=0.05814, over 8591.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2929, pruned_loss=0.06572, over 1608510.32 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 22:59:50,082 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1937, 1.3609, 1.5993, 1.2709, 0.7329, 1.4319, 1.2848, 1.2129], + device='cuda:2'), covar=tensor([0.0559, 0.1250, 0.1633, 0.1415, 0.0543, 0.1415, 0.0646, 0.0618], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 22:59:53,525 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1045, 1.4844, 1.6425, 1.3845, 1.0894, 1.4638, 1.9111, 1.7158], + device='cuda:2'), covar=tensor([0.0491, 0.1216, 0.1622, 0.1391, 0.0597, 0.1444, 0.0627, 0.0585], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 23:00:12,965 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:19,659 INFO [train.py:901] (2/4) Epoch 19, batch 3250, loss[loss=0.2448, simple_loss=0.3149, pruned_loss=0.08737, over 8562.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2936, pruned_loss=0.06576, over 1615792.46 frames. ], batch size: 39, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:34,071 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:35,478 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2129, 1.8852, 2.5406, 2.0684, 2.4677, 2.1815, 1.9367, 1.1844], + device='cuda:2'), covar=tensor([0.5147, 0.4674, 0.1836, 0.3680, 0.2528, 0.3004, 0.1903, 0.5225], + device='cuda:2'), in_proj_covar=tensor([0.0932, 0.0953, 0.0783, 0.0916, 0.0982, 0.0871, 0.0729, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:00:40,075 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3639, 2.2240, 3.1456, 2.4720, 3.0647, 2.3560, 2.1637, 1.8348], + device='cuda:2'), covar=tensor([0.5709, 0.5494, 0.2046, 0.3932, 0.2608, 0.3097, 0.1979, 0.5919], + device='cuda:2'), in_proj_covar=tensor([0.0930, 0.0951, 0.0782, 0.0915, 0.0980, 0.0870, 0.0728, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:00:43,259 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.443e+02 3.073e+02 4.112e+02 8.183e+02, threshold=6.146e+02, percent-clipped=4.0 +2023-02-06 23:00:52,319 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:53,591 INFO [train.py:901] (2/4) Epoch 19, batch 3300, loss[loss=0.2619, simple_loss=0.3373, pruned_loss=0.09322, over 8506.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2923, pruned_loss=0.06481, over 1618969.54 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:28,246 INFO [train.py:901] (2/4) Epoch 19, batch 3350, loss[loss=0.2022, simple_loss=0.276, pruned_loss=0.06424, over 8086.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2931, pruned_loss=0.06466, over 1618160.28 frames. ], batch size: 21, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:41,953 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:01:53,952 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.291e+02 2.864e+02 3.449e+02 6.722e+02, threshold=5.728e+02, percent-clipped=1.0 +2023-02-06 23:02:02,990 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7730, 2.2428, 3.3610, 1.6285, 2.7046, 2.2198, 1.9372, 2.6226], + device='cuda:2'), covar=tensor([0.1619, 0.2176, 0.0710, 0.3891, 0.1507, 0.2683, 0.1894, 0.2009], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0581, 0.0552, 0.0630, 0.0638, 0.0588, 0.0521, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:02:04,168 INFO [train.py:901] (2/4) Epoch 19, batch 3400, loss[loss=0.2256, simple_loss=0.3077, pruned_loss=0.07181, over 8192.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2929, pruned_loss=0.06535, over 1619350.33 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:02:13,156 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:02:32,358 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1858, 1.8113, 2.3635, 2.0051, 2.3331, 2.1502, 1.9208, 1.1864], + device='cuda:2'), covar=tensor([0.4781, 0.4238, 0.1745, 0.3427, 0.2146, 0.2775, 0.1830, 0.4707], + device='cuda:2'), in_proj_covar=tensor([0.0930, 0.0951, 0.0785, 0.0917, 0.0979, 0.0870, 0.0729, 0.0810], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:02:38,071 INFO [train.py:901] (2/4) Epoch 19, batch 3450, loss[loss=0.2358, simple_loss=0.3229, pruned_loss=0.07441, over 8463.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2927, pruned_loss=0.0652, over 1618963.96 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:01,930 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:04,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.340e+02 2.956e+02 3.727e+02 1.104e+03, threshold=5.912e+02, percent-clipped=3.0 +2023-02-06 23:03:14,140 INFO [train.py:901] (2/4) Epoch 19, batch 3500, loss[loss=0.1949, simple_loss=0.264, pruned_loss=0.06291, over 7207.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2945, pruned_loss=0.06633, over 1617963.36 frames. ], batch size: 16, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:28,308 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:33,359 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:35,920 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 23:03:48,890 INFO [train.py:901] (2/4) Epoch 19, batch 3550, loss[loss=0.2448, simple_loss=0.327, pruned_loss=0.08129, over 8506.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.295, pruned_loss=0.06591, over 1624692.81 frames. ], batch size: 28, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:50,372 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,070 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,636 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.461e+02 3.087e+02 3.824e+02 7.251e+02, threshold=6.175e+02, percent-clipped=6.0 +2023-02-06 23:04:25,637 INFO [train.py:901] (2/4) Epoch 19, batch 3600, loss[loss=0.1997, simple_loss=0.2753, pruned_loss=0.06204, over 8023.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2941, pruned_loss=0.06575, over 1622502.55 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:04:49,822 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:59,721 INFO [train.py:901] (2/4) Epoch 19, batch 3650, loss[loss=0.2345, simple_loss=0.2953, pruned_loss=0.08679, over 8079.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2947, pruned_loss=0.06629, over 1621608.77 frames. ], batch size: 21, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:13,232 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:24,380 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.296e+02 2.731e+02 3.488e+02 6.725e+02, threshold=5.462e+02, percent-clipped=1.0 +2023-02-06 23:05:30,733 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:35,220 INFO [train.py:901] (2/4) Epoch 19, batch 3700, loss[loss=0.2015, simple_loss=0.2876, pruned_loss=0.05774, over 8246.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2938, pruned_loss=0.06584, over 1617606.43 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:35,409 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:38,051 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:06:02,788 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149231.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:10,460 INFO [train.py:901] (2/4) Epoch 19, batch 3750, loss[loss=0.1898, simple_loss=0.2636, pruned_loss=0.05804, over 7439.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.293, pruned_loss=0.06574, over 1609191.11 frames. ], batch size: 17, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:06:19,378 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:34,566 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 2.530e+02 3.028e+02 3.831e+02 7.632e+02, threshold=6.056e+02, percent-clipped=6.0 +2023-02-06 23:06:38,967 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9972, 3.6616, 2.2844, 2.8766, 2.8803, 1.9410, 2.7587, 3.0213], + device='cuda:2'), covar=tensor([0.1749, 0.0347, 0.1130, 0.0769, 0.0713, 0.1481, 0.1197, 0.1061], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0241, 0.0330, 0.0307, 0.0303, 0.0335, 0.0345, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:06:44,222 INFO [train.py:901] (2/4) Epoch 19, batch 3800, loss[loss=0.2027, simple_loss=0.295, pruned_loss=0.05514, over 8353.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.293, pruned_loss=0.06504, over 1615689.58 frames. ], batch size: 24, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:06:44,676 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 23:07:20,712 INFO [train.py:901] (2/4) Epoch 19, batch 3850, loss[loss=0.2276, simple_loss=0.3036, pruned_loss=0.07581, over 8580.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2912, pruned_loss=0.06418, over 1614248.93 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:42,416 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 23:07:45,095 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.409e+02 2.948e+02 3.728e+02 6.848e+02, threshold=5.896e+02, percent-clipped=3.0 +2023-02-06 23:07:48,713 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:07:54,635 INFO [train.py:901] (2/4) Epoch 19, batch 3900, loss[loss=0.2098, simple_loss=0.2878, pruned_loss=0.06586, over 8078.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2915, pruned_loss=0.06434, over 1614205.71 frames. ], batch size: 21, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:08:06,570 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:10,670 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1316, 1.8469, 1.9092, 1.7851, 1.2809, 1.8552, 2.0662, 1.8885], + device='cuda:2'), covar=tensor([0.0555, 0.0971, 0.1333, 0.1107, 0.0643, 0.1129, 0.0671, 0.0495], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0188, 0.0157, 0.0099, 0.0160, 0.0112, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 23:08:31,939 INFO [train.py:901] (2/4) Epoch 19, batch 3950, loss[loss=0.2054, simple_loss=0.289, pruned_loss=0.06093, over 7810.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2919, pruned_loss=0.06449, over 1616131.98 frames. ], batch size: 20, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:08:36,330 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:53,036 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:56,238 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.497e+02 2.881e+02 4.050e+02 6.266e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 23:09:05,730 INFO [train.py:901] (2/4) Epoch 19, batch 4000, loss[loss=0.2241, simple_loss=0.3011, pruned_loss=0.07349, over 8698.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2916, pruned_loss=0.06475, over 1616280.80 frames. ], batch size: 34, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:09:11,133 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 23:09:13,966 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9124, 1.7574, 6.0799, 2.3578, 5.3972, 5.0521, 5.4881, 5.4073], + device='cuda:2'), covar=tensor([0.0454, 0.4913, 0.0346, 0.3779, 0.1018, 0.0839, 0.0548, 0.0505], + device='cuda:2'), in_proj_covar=tensor([0.0599, 0.0635, 0.0674, 0.0608, 0.0687, 0.0592, 0.0586, 0.0650], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:09:32,420 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:09:35,725 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1726, 1.0676, 1.2843, 1.1062, 0.9649, 1.3027, 0.0766, 0.8972], + device='cuda:2'), covar=tensor([0.1756, 0.1487, 0.0465, 0.0828, 0.3177, 0.0536, 0.2435, 0.1360], + device='cuda:2'), in_proj_covar=tensor([0.0184, 0.0194, 0.0124, 0.0222, 0.0271, 0.0132, 0.0171, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:09:40,142 INFO [train.py:901] (2/4) Epoch 19, batch 4050, loss[loss=0.2046, simple_loss=0.2956, pruned_loss=0.05682, over 8446.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2908, pruned_loss=0.06457, over 1618129.41 frames. ], batch size: 29, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:03,202 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 23:10:05,803 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.470e+02 3.003e+02 4.246e+02 8.728e+02, threshold=6.007e+02, percent-clipped=8.0 +2023-02-06 23:10:08,589 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:10:12,316 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 23:10:15,178 INFO [train.py:901] (2/4) Epoch 19, batch 4100, loss[loss=0.2223, simple_loss=0.3037, pruned_loss=0.07046, over 8334.00 frames. ], tot_loss[loss=0.209, simple_loss=0.29, pruned_loss=0.06406, over 1615990.84 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:39,673 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6935, 1.3361, 1.6339, 1.1545, 0.8522, 1.3932, 1.4332, 1.3644], + device='cuda:2'), covar=tensor([0.0526, 0.1324, 0.1717, 0.1535, 0.0601, 0.1540, 0.0721, 0.0659], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0161, 0.0113, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 23:10:49,871 INFO [train.py:901] (2/4) Epoch 19, batch 4150, loss[loss=0.2564, simple_loss=0.3242, pruned_loss=0.09425, over 7040.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2906, pruned_loss=0.06431, over 1615243.69 frames. ], batch size: 71, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:11:16,655 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.273e+02 2.791e+02 3.594e+02 5.057e+02, threshold=5.582e+02, percent-clipped=0.0 +2023-02-06 23:11:26,106 INFO [train.py:901] (2/4) Epoch 19, batch 4200, loss[loss=0.1841, simple_loss=0.2691, pruned_loss=0.04955, over 8240.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2898, pruned_loss=0.06371, over 1611726.02 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:11:36,335 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 23:11:36,601 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 23:11:59,497 INFO [train.py:901] (2/4) Epoch 19, batch 4250, loss[loss=0.1903, simple_loss=0.2657, pruned_loss=0.05743, over 7546.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.06364, over 1614973.38 frames. ], batch size: 18, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:12:00,922 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 23:12:14,489 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149764.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:12:25,320 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 2.444e+02 3.025e+02 3.928e+02 1.033e+03, threshold=6.050e+02, percent-clipped=5.0 +2023-02-06 23:12:35,581 INFO [train.py:901] (2/4) Epoch 19, batch 4300, loss[loss=0.2339, simple_loss=0.3215, pruned_loss=0.07314, over 8104.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2907, pruned_loss=0.06403, over 1618510.90 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:12:42,699 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0957, 3.9912, 3.7276, 2.1090, 3.5944, 3.6319, 3.7182, 3.4384], + device='cuda:2'), covar=tensor([0.0797, 0.0635, 0.1065, 0.4335, 0.0872, 0.1084, 0.1286, 0.1008], + device='cuda:2'), in_proj_covar=tensor([0.0518, 0.0430, 0.0430, 0.0533, 0.0420, 0.0437, 0.0415, 0.0378], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:13:10,090 INFO [train.py:901] (2/4) Epoch 19, batch 4350, loss[loss=0.199, simple_loss=0.2812, pruned_loss=0.05846, over 7975.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2906, pruned_loss=0.06386, over 1620199.98 frames. ], batch size: 21, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:33,154 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 23:13:33,238 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:13:35,197 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.416e+02 2.972e+02 3.761e+02 1.184e+03, threshold=5.944e+02, percent-clipped=4.0 +2023-02-06 23:13:39,040 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 23:13:44,579 INFO [train.py:901] (2/4) Epoch 19, batch 4400, loss[loss=0.263, simple_loss=0.3299, pruned_loss=0.09803, over 8239.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2913, pruned_loss=0.0644, over 1616477.77 frames. ], batch size: 24, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:50,363 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1820, 2.0936, 1.6530, 1.9767, 1.7646, 1.3964, 1.5969, 1.5848], + device='cuda:2'), covar=tensor([0.1358, 0.0459, 0.1306, 0.0510, 0.0707, 0.1617, 0.0950, 0.0903], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0238, 0.0329, 0.0306, 0.0301, 0.0333, 0.0343, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:13:53,807 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6772, 2.2512, 4.0895, 1.4511, 3.0946, 2.2459, 1.8282, 2.9774], + device='cuda:2'), covar=tensor([0.1977, 0.2627, 0.0872, 0.4659, 0.1753, 0.3216, 0.2298, 0.2323], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0590, 0.0560, 0.0638, 0.0645, 0.0595, 0.0531, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:14:09,977 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:14,611 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 23:14:20,891 INFO [train.py:901] (2/4) Epoch 19, batch 4450, loss[loss=0.2231, simple_loss=0.3191, pruned_loss=0.0636, over 8325.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.292, pruned_loss=0.06483, over 1615763.25 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:44,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.522e+02 2.925e+02 4.193e+02 1.036e+03, threshold=5.849e+02, percent-clipped=7.0 +2023-02-06 23:14:53,315 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:54,476 INFO [train.py:901] (2/4) Epoch 19, batch 4500, loss[loss=0.211, simple_loss=0.3044, pruned_loss=0.05881, over 8492.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2914, pruned_loss=0.06406, over 1617831.13 frames. ], batch size: 28, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:08,401 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 23:15:08,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6418, 1.9938, 3.2707, 1.4572, 2.4700, 2.0659, 1.6545, 2.3888], + device='cuda:2'), covar=tensor([0.1891, 0.2489, 0.0820, 0.4440, 0.1809, 0.3068, 0.2250, 0.2233], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0589, 0.0558, 0.0637, 0.0644, 0.0593, 0.0530, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:15:19,530 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9540, 1.6417, 3.2563, 1.4534, 2.1575, 3.5476, 3.6864, 2.9875], + device='cuda:2'), covar=tensor([0.1138, 0.1679, 0.0345, 0.2129, 0.1105, 0.0249, 0.0492, 0.0604], + device='cuda:2'), in_proj_covar=tensor([0.0290, 0.0316, 0.0286, 0.0312, 0.0302, 0.0265, 0.0406, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:15:31,625 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:15:32,128 INFO [train.py:901] (2/4) Epoch 19, batch 4550, loss[loss=0.1933, simple_loss=0.2856, pruned_loss=0.0505, over 8295.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2919, pruned_loss=0.06427, over 1617459.88 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:51,857 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4581, 2.7003, 1.6511, 2.3654, 2.0579, 1.3512, 1.9413, 2.1458], + device='cuda:2'), covar=tensor([0.1748, 0.0413, 0.1459, 0.0671, 0.0936, 0.1888, 0.1330, 0.1073], + device='cuda:2'), in_proj_covar=tensor([0.0348, 0.0236, 0.0326, 0.0305, 0.0300, 0.0330, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:15:56,361 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.399e+02 2.811e+02 3.428e+02 5.502e+02, threshold=5.622e+02, percent-clipped=0.0 +2023-02-06 23:16:05,764 INFO [train.py:901] (2/4) Epoch 19, batch 4600, loss[loss=0.1894, simple_loss=0.2724, pruned_loss=0.0532, over 8617.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2912, pruned_loss=0.06395, over 1618771.62 frames. ], batch size: 34, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:16:08,470 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:16:15,963 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150108.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:16:29,379 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3168, 2.8809, 2.2251, 3.9470, 1.7825, 2.0308, 2.4385, 2.9941], + device='cuda:2'), covar=tensor([0.0774, 0.0728, 0.0870, 0.0263, 0.1087, 0.1285, 0.1040, 0.0755], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0197, 0.0246, 0.0212, 0.0205, 0.0247, 0.0252, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 23:16:41,494 INFO [train.py:901] (2/4) Epoch 19, batch 4650, loss[loss=0.2215, simple_loss=0.3129, pruned_loss=0.065, over 8494.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2911, pruned_loss=0.06411, over 1617485.79 frames. ], batch size: 26, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:17:06,556 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.474e+02 2.856e+02 3.464e+02 8.049e+02, threshold=5.712e+02, percent-clipped=3.0 +2023-02-06 23:17:16,089 INFO [train.py:901] (2/4) Epoch 19, batch 4700, loss[loss=0.1797, simple_loss=0.2488, pruned_loss=0.05532, over 7429.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06453, over 1617156.30 frames. ], batch size: 17, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:30,094 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:17:36,661 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150223.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:17:50,830 INFO [train.py:901] (2/4) Epoch 19, batch 4750, loss[loss=0.2178, simple_loss=0.3024, pruned_loss=0.06661, over 8331.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2919, pruned_loss=0.06507, over 1614691.20 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:53,806 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:05,658 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8801, 1.9618, 2.5472, 1.6947, 1.4021, 2.5522, 0.5129, 1.5687], + device='cuda:2'), covar=tensor([0.1582, 0.1138, 0.0296, 0.1364, 0.2713, 0.0342, 0.2239, 0.1386], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0223, 0.0272, 0.0133, 0.0170, 0.0189], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:18:12,327 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:13,462 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 23:18:15,517 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 23:18:16,856 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.315e+02 2.829e+02 3.523e+02 6.730e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-06 23:18:25,824 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:26,369 INFO [train.py:901] (2/4) Epoch 19, batch 4800, loss[loss=0.2636, simple_loss=0.3255, pruned_loss=0.1009, over 6519.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2907, pruned_loss=0.06455, over 1611040.65 frames. ], batch size: 71, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:18:29,962 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:46,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:19:00,012 INFO [train.py:901] (2/4) Epoch 19, batch 4850, loss[loss=0.2478, simple_loss=0.3212, pruned_loss=0.08719, over 8324.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2907, pruned_loss=0.06472, over 1608152.08 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:05,339 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 23:19:10,226 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3975, 4.3336, 3.9182, 1.8649, 3.8426, 3.9606, 3.9739, 3.6498], + device='cuda:2'), covar=tensor([0.0810, 0.0564, 0.1153, 0.5085, 0.0885, 0.1036, 0.1277, 0.0924], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0425, 0.0427, 0.0528, 0.0414, 0.0432, 0.0411, 0.0374], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:19:27,011 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.476e+02 2.899e+02 3.621e+02 6.951e+02, threshold=5.799e+02, percent-clipped=6.0 +2023-02-06 23:19:32,496 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1616, 1.0669, 1.2914, 1.0516, 0.9918, 1.3258, 0.0659, 0.9057], + device='cuda:2'), covar=tensor([0.1728, 0.1310, 0.0476, 0.0844, 0.2479, 0.0557, 0.2249, 0.1295], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0221, 0.0269, 0.0132, 0.0169, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:19:36,180 INFO [train.py:901] (2/4) Epoch 19, batch 4900, loss[loss=0.2398, simple_loss=0.3234, pruned_loss=0.0781, over 8588.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2913, pruned_loss=0.06497, over 1612258.95 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:07,720 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:20:09,003 INFO [train.py:901] (2/4) Epoch 19, batch 4950, loss[loss=0.2112, simple_loss=0.2901, pruned_loss=0.06615, over 8552.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2917, pruned_loss=0.06556, over 1614117.29 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:33,696 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.356e+02 2.775e+02 3.573e+02 1.033e+03, threshold=5.550e+02, percent-clipped=4.0 +2023-02-06 23:20:33,934 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150479.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:20:36,639 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9651, 2.4782, 3.7797, 1.9894, 1.9248, 3.6501, 0.8244, 2.1660], + device='cuda:2'), covar=tensor([0.1393, 0.1188, 0.0193, 0.1840, 0.2647, 0.0284, 0.2332, 0.1439], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0222, 0.0269, 0.0132, 0.0169, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:20:43,979 INFO [train.py:901] (2/4) Epoch 19, batch 5000, loss[loss=0.1949, simple_loss=0.2829, pruned_loss=0.05342, over 8316.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2929, pruned_loss=0.06643, over 1614767.68 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:51,583 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-06 23:20:52,082 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150504.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:21:17,800 INFO [train.py:901] (2/4) Epoch 19, batch 5050, loss[loss=0.1886, simple_loss=0.2771, pruned_loss=0.04999, over 8139.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.06612, over 1617961.64 frames. ], batch size: 22, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:21:25,940 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:26,620 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:40,934 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 23:21:41,605 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.501e+02 3.000e+02 3.972e+02 7.212e+02, threshold=5.999e+02, percent-clipped=3.0 +2023-02-06 23:21:51,778 INFO [train.py:901] (2/4) Epoch 19, batch 5100, loss[loss=0.2487, simple_loss=0.3248, pruned_loss=0.08635, over 8598.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2928, pruned_loss=0.06593, over 1616221.93 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:23,317 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:22:27,867 INFO [train.py:901] (2/4) Epoch 19, batch 5150, loss[loss=0.2009, simple_loss=0.2877, pruned_loss=0.05712, over 7969.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2937, pruned_loss=0.06612, over 1615732.03 frames. ], batch size: 21, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:49,043 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 23:22:49,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0330, 1.2618, 1.2170, 0.6891, 1.2317, 1.0264, 0.0638, 1.2086], + device='cuda:2'), covar=tensor([0.0409, 0.0361, 0.0321, 0.0528, 0.0390, 0.0936, 0.0766, 0.0335], + device='cuda:2'), in_proj_covar=tensor([0.0439, 0.0379, 0.0330, 0.0435, 0.0363, 0.0525, 0.0383, 0.0405], + device='cuda:2'), out_proj_covar=tensor([1.1883e-04, 1.0025e-04, 8.7162e-05, 1.1536e-04, 9.6351e-05, 1.4964e-04, + 1.0370e-04, 1.0816e-04], device='cuda:2') +2023-02-06 23:22:51,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.510e+02 3.215e+02 4.688e+02 9.098e+02, threshold=6.429e+02, percent-clipped=11.0 +2023-02-06 23:23:01,329 INFO [train.py:901] (2/4) Epoch 19, batch 5200, loss[loss=0.2215, simple_loss=0.2966, pruned_loss=0.07325, over 8421.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2933, pruned_loss=0.06616, over 1611355.97 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:29,363 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1042, 1.4783, 3.2785, 1.5075, 2.3658, 3.5933, 3.6909, 3.1131], + device='cuda:2'), covar=tensor([0.0982, 0.1649, 0.0354, 0.1939, 0.0965, 0.0225, 0.0496, 0.0511], + device='cuda:2'), in_proj_covar=tensor([0.0289, 0.0316, 0.0286, 0.0312, 0.0301, 0.0264, 0.0405, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-06 23:23:38,104 INFO [train.py:901] (2/4) Epoch 19, batch 5250, loss[loss=0.1708, simple_loss=0.2589, pruned_loss=0.04131, over 8321.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2933, pruned_loss=0.06621, over 1610738.81 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:40,636 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 23:23:42,649 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:23:43,385 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:01,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.565e+02 3.080e+02 4.191e+02 1.354e+03, threshold=6.160e+02, percent-clipped=9.0 +2023-02-06 23:24:03,321 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.78 vs. limit=5.0 +2023-02-06 23:24:10,893 INFO [train.py:901] (2/4) Epoch 19, batch 5300, loss[loss=0.2321, simple_loss=0.3146, pruned_loss=0.07478, over 8517.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2942, pruned_loss=0.06687, over 1613681.55 frames. ], batch size: 49, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:24:23,654 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:29,036 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0965, 1.6560, 1.7415, 1.4885, 0.9860, 1.5599, 1.7892, 1.6294], + device='cuda:2'), covar=tensor([0.0468, 0.1093, 0.1536, 0.1337, 0.0549, 0.1314, 0.0596, 0.0610], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0099, 0.0160, 0.0112, 0.0140], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 23:24:35,547 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 23:24:41,663 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:46,252 INFO [train.py:901] (2/4) Epoch 19, batch 5350, loss[loss=0.2243, simple_loss=0.3077, pruned_loss=0.07044, over 8463.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2945, pruned_loss=0.06737, over 1609148.09 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:25:10,968 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.499e+02 2.979e+02 3.723e+02 8.863e+02, threshold=5.959e+02, percent-clipped=1.0 +2023-02-06 23:25:20,521 INFO [train.py:901] (2/4) Epoch 19, batch 5400, loss[loss=0.1744, simple_loss=0.265, pruned_loss=0.04186, over 8081.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2941, pruned_loss=0.06697, over 1611071.16 frames. ], batch size: 21, lr: 3.98e-03, grad_scale: 16.0 +2023-02-06 23:25:24,749 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:37,931 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:55,451 INFO [train.py:901] (2/4) Epoch 19, batch 5450, loss[loss=0.1998, simple_loss=0.282, pruned_loss=0.05882, over 8140.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2936, pruned_loss=0.06625, over 1612908.06 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:22,622 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.347e+02 2.658e+02 3.430e+02 7.604e+02, threshold=5.316e+02, percent-clipped=2.0 +2023-02-06 23:26:28,450 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 23:26:31,933 INFO [train.py:901] (2/4) Epoch 19, batch 5500, loss[loss=0.2429, simple_loss=0.3112, pruned_loss=0.08732, over 8666.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2934, pruned_loss=0.06596, over 1617126.23 frames. ], batch size: 39, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:33,443 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3893, 2.5293, 1.7188, 2.1825, 1.9351, 1.4459, 1.9598, 2.1517], + device='cuda:2'), covar=tensor([0.1899, 0.0437, 0.1404, 0.0811, 0.0935, 0.1821, 0.1156, 0.1087], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0237, 0.0327, 0.0305, 0.0301, 0.0331, 0.0341, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:26:41,794 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:46,651 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:58,972 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:06,213 INFO [train.py:901] (2/4) Epoch 19, batch 5550, loss[loss=0.2174, simple_loss=0.2977, pruned_loss=0.06849, over 8352.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2938, pruned_loss=0.06595, over 1617974.32 frames. ], batch size: 24, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:32,506 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.454e+02 3.027e+02 4.195e+02 6.901e+02, threshold=6.054e+02, percent-clipped=7.0 +2023-02-06 23:27:42,411 INFO [train.py:901] (2/4) Epoch 19, batch 5600, loss[loss=0.2173, simple_loss=0.3085, pruned_loss=0.06306, over 8537.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2934, pruned_loss=0.06599, over 1620192.59 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:43,136 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:47,922 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4352, 2.1354, 3.1324, 2.3470, 2.9858, 2.3998, 2.1004, 1.6951], + device='cuda:2'), covar=tensor([0.5373, 0.5611, 0.1852, 0.3788, 0.2471, 0.3011, 0.1995, 0.5707], + device='cuda:2'), in_proj_covar=tensor([0.0931, 0.0957, 0.0788, 0.0920, 0.0981, 0.0872, 0.0737, 0.0814], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:27:57,252 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7858, 1.9451, 4.6616, 2.4434, 2.8266, 5.2518, 5.3249, 4.5174], + device='cuda:2'), covar=tensor([0.1005, 0.1683, 0.0225, 0.1625, 0.0941, 0.0180, 0.0402, 0.0586], + device='cuda:2'), in_proj_covar=tensor([0.0292, 0.0320, 0.0288, 0.0313, 0.0302, 0.0265, 0.0407, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:27:58,841 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 23:28:15,302 INFO [train.py:901] (2/4) Epoch 19, batch 5650, loss[loss=0.2048, simple_loss=0.2842, pruned_loss=0.06274, over 7801.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2936, pruned_loss=0.0654, over 1618064.09 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:31,560 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 23:28:39,658 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.685e+02 3.149e+02 3.866e+02 8.044e+02, threshold=6.298e+02, percent-clipped=3.0 +2023-02-06 23:28:50,405 INFO [train.py:901] (2/4) Epoch 19, batch 5700, loss[loss=0.2218, simple_loss=0.3055, pruned_loss=0.06905, over 8345.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2934, pruned_loss=0.06565, over 1614368.98 frames. ], batch size: 26, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:57,636 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:02,317 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:20,368 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2848, 2.0515, 2.7794, 2.2785, 2.6891, 2.3157, 2.0383, 1.5166], + device='cuda:2'), covar=tensor([0.5068, 0.4953, 0.1801, 0.3411, 0.2477, 0.2719, 0.1862, 0.5238], + device='cuda:2'), in_proj_covar=tensor([0.0930, 0.0956, 0.0787, 0.0919, 0.0980, 0.0871, 0.0736, 0.0811], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:29:24,756 INFO [train.py:901] (2/4) Epoch 19, batch 5750, loss[loss=0.2054, simple_loss=0.2993, pruned_loss=0.0557, over 8473.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2928, pruned_loss=0.06532, over 1616509.74 frames. ], batch size: 28, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:29:36,100 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 23:29:37,493 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:43,022 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:48,610 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.389e+02 2.918e+02 3.727e+02 7.769e+02, threshold=5.836e+02, percent-clipped=3.0 +2023-02-06 23:29:58,855 INFO [train.py:901] (2/4) Epoch 19, batch 5800, loss[loss=0.2015, simple_loss=0.2981, pruned_loss=0.05248, over 8294.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06502, over 1615087.57 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:00,347 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:01,708 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8015, 2.0271, 2.2273, 1.4659, 2.3525, 1.5708, 0.7383, 2.0931], + device='cuda:2'), covar=tensor([0.0547, 0.0342, 0.0255, 0.0523, 0.0348, 0.0794, 0.0761, 0.0254], + device='cuda:2'), in_proj_covar=tensor([0.0443, 0.0381, 0.0331, 0.0438, 0.0367, 0.0526, 0.0384, 0.0407], + device='cuda:2'), out_proj_covar=tensor([1.1993e-04, 1.0078e-04, 8.7415e-05, 1.1621e-04, 9.7324e-05, 1.5027e-04, + 1.0398e-04, 1.0879e-04], device='cuda:2') +2023-02-06 23:30:04,360 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:16,598 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:34,587 INFO [train.py:901] (2/4) Epoch 19, batch 5850, loss[loss=0.1709, simple_loss=0.256, pruned_loss=0.04286, over 7664.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2919, pruned_loss=0.06488, over 1613014.17 frames. ], batch size: 19, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:44,232 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1951, 1.0854, 1.3194, 1.0693, 0.9320, 1.3293, 0.0512, 0.8616], + device='cuda:2'), covar=tensor([0.1894, 0.1407, 0.0542, 0.0911, 0.2884, 0.0609, 0.2255, 0.1246], + device='cuda:2'), in_proj_covar=tensor([0.0188, 0.0196, 0.0124, 0.0222, 0.0270, 0.0134, 0.0169, 0.0189], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:30:57,556 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:58,661 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.176e+02 2.714e+02 3.221e+02 1.387e+03, threshold=5.429e+02, percent-clipped=3.0 +2023-02-06 23:31:08,074 INFO [train.py:901] (2/4) Epoch 19, batch 5900, loss[loss=0.1869, simple_loss=0.2809, pruned_loss=0.04649, over 8560.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.291, pruned_loss=0.06469, over 1608039.14 frames. ], batch size: 34, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:44,895 INFO [train.py:901] (2/4) Epoch 19, batch 5950, loss[loss=0.2232, simple_loss=0.2917, pruned_loss=0.07732, over 7801.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2908, pruned_loss=0.06492, over 1608852.21 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:47,006 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5036, 2.2082, 3.2613, 2.6017, 3.0011, 2.4721, 2.2158, 1.7339], + device='cuda:2'), covar=tensor([0.5068, 0.4860, 0.1732, 0.3445, 0.2443, 0.2930, 0.1880, 0.5571], + device='cuda:2'), in_proj_covar=tensor([0.0932, 0.0961, 0.0789, 0.0922, 0.0982, 0.0876, 0.0737, 0.0817], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:31:59,861 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:09,188 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.424e+02 3.104e+02 3.851e+02 8.156e+02, threshold=6.208e+02, percent-clipped=3.0 +2023-02-06 23:32:16,816 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:18,557 INFO [train.py:901] (2/4) Epoch 19, batch 6000, loss[loss=0.217, simple_loss=0.2873, pruned_loss=0.07335, over 7529.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2915, pruned_loss=0.06576, over 1606616.03 frames. ], batch size: 18, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:32:18,558 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 23:32:32,010 INFO [train.py:935] (2/4) Epoch 19, validation: loss=0.1763, simple_loss=0.2764, pruned_loss=0.03805, over 944034.00 frames. +2023-02-06 23:32:32,011 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 23:33:06,934 INFO [train.py:901] (2/4) Epoch 19, batch 6050, loss[loss=0.1713, simple_loss=0.264, pruned_loss=0.03924, over 8462.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2924, pruned_loss=0.06581, over 1612948.17 frames. ], batch size: 25, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:09,100 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:33:20,065 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 23:33:32,578 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.535e+02 3.172e+02 3.888e+02 8.825e+02, threshold=6.343e+02, percent-clipped=4.0 +2023-02-06 23:33:42,766 INFO [train.py:901] (2/4) Epoch 19, batch 6100, loss[loss=0.2156, simple_loss=0.2958, pruned_loss=0.0677, over 7909.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.294, pruned_loss=0.06623, over 1621357.59 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:07,684 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 23:34:10,834 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:12,958 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2458, 2.5315, 2.9391, 1.5907, 3.1671, 1.8818, 1.5707, 2.2183], + device='cuda:2'), covar=tensor([0.0765, 0.0370, 0.0284, 0.0712, 0.0404, 0.0817, 0.0920, 0.0488], + device='cuda:2'), in_proj_covar=tensor([0.0442, 0.0380, 0.0332, 0.0438, 0.0367, 0.0526, 0.0384, 0.0405], + device='cuda:2'), out_proj_covar=tensor([1.1975e-04, 1.0018e-04, 8.7593e-05, 1.1622e-04, 9.7466e-05, 1.4998e-04, + 1.0391e-04, 1.0815e-04], device='cuda:2') +2023-02-06 23:34:17,586 INFO [train.py:901] (2/4) Epoch 19, batch 6150, loss[loss=0.2136, simple_loss=0.2997, pruned_loss=0.06373, over 8508.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2937, pruned_loss=0.06598, over 1622503.28 frames. ], batch size: 28, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:18,368 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:28,875 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,158 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151660.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,966 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:43,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.320e+02 2.846e+02 3.654e+02 5.745e+02, threshold=5.693e+02, percent-clipped=0.0 +2023-02-06 23:34:53,937 INFO [train.py:901] (2/4) Epoch 19, batch 6200, loss[loss=0.1926, simple_loss=0.2792, pruned_loss=0.053, over 7922.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2939, pruned_loss=0.06605, over 1622475.29 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:02,694 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:28,542 INFO [train.py:901] (2/4) Epoch 19, batch 6250, loss[loss=0.2145, simple_loss=0.2925, pruned_loss=0.06823, over 8754.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2932, pruned_loss=0.06568, over 1620519.32 frames. ], batch size: 30, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:37,322 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5908, 1.3590, 1.4996, 1.2720, 0.8644, 1.3074, 1.4585, 1.1127], + device='cuda:2'), covar=tensor([0.0571, 0.1285, 0.1777, 0.1537, 0.0615, 0.1541, 0.0730, 0.0750], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-06 23:35:39,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:50,938 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:53,500 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.555e+02 3.246e+02 4.070e+02 8.549e+02, threshold=6.492e+02, percent-clipped=6.0 +2023-02-06 23:36:03,716 INFO [train.py:901] (2/4) Epoch 19, batch 6300, loss[loss=0.227, simple_loss=0.3133, pruned_loss=0.07031, over 8284.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.294, pruned_loss=0.0661, over 1623026.24 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:22,214 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:36:39,103 INFO [train.py:901] (2/4) Epoch 19, batch 6350, loss[loss=0.2179, simple_loss=0.2897, pruned_loss=0.07301, over 8367.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.294, pruned_loss=0.06586, over 1625876.89 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:40,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 23:36:57,097 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8461, 1.9770, 1.7805, 2.4623, 1.3844, 1.5536, 1.9049, 2.1130], + device='cuda:2'), covar=tensor([0.0768, 0.0771, 0.0872, 0.0481, 0.0996, 0.1282, 0.0740, 0.0706], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0198, 0.0248, 0.0213, 0.0205, 0.0250, 0.0254, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-06 23:37:03,135 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.376e+02 2.921e+02 3.593e+02 6.855e+02, threshold=5.841e+02, percent-clipped=1.0 +2023-02-06 23:37:13,200 INFO [train.py:901] (2/4) Epoch 19, batch 6400, loss[loss=0.2415, simple_loss=0.322, pruned_loss=0.08046, over 8632.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.292, pruned_loss=0.06521, over 1619752.82 frames. ], batch size: 34, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:30,637 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,045 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,525 INFO [train.py:901] (2/4) Epoch 19, batch 6450, loss[loss=0.2134, simple_loss=0.2964, pruned_loss=0.06522, over 8245.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2926, pruned_loss=0.0654, over 1621863.69 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:13,512 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.409e+02 2.943e+02 3.710e+02 6.232e+02, threshold=5.887e+02, percent-clipped=1.0 +2023-02-06 23:38:19,954 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5835, 1.9771, 3.1116, 1.4891, 2.2804, 2.0433, 1.7352, 2.2628], + device='cuda:2'), covar=tensor([0.1869, 0.2501, 0.0844, 0.4348, 0.1856, 0.3143, 0.2212, 0.2366], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0581, 0.0551, 0.0629, 0.0640, 0.0587, 0.0523, 0.0628], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:38:23,087 INFO [train.py:901] (2/4) Epoch 19, batch 6500, loss[loss=0.1855, simple_loss=0.273, pruned_loss=0.04895, over 7825.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2913, pruned_loss=0.06486, over 1616313.63 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:40,009 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:51,615 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:58,599 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152040.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:00,457 INFO [train.py:901] (2/4) Epoch 19, batch 6550, loss[loss=0.1709, simple_loss=0.2494, pruned_loss=0.04622, over 7697.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2907, pruned_loss=0.06462, over 1615164.41 frames. ], batch size: 18, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:04,936 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:09,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:21,605 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 23:39:24,910 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.379e+02 2.761e+02 3.695e+02 7.678e+02, threshold=5.522e+02, percent-clipped=3.0 +2023-02-06 23:39:34,322 INFO [train.py:901] (2/4) Epoch 19, batch 6600, loss[loss=0.246, simple_loss=0.3241, pruned_loss=0.0839, over 6838.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2922, pruned_loss=0.06567, over 1613360.64 frames. ], batch size: 72, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:36,561 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 23:39:39,626 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:39:59,802 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-06 23:40:09,007 INFO [train.py:901] (2/4) Epoch 19, batch 6650, loss[loss=0.1917, simple_loss=0.2805, pruned_loss=0.05148, over 8189.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.293, pruned_loss=0.0661, over 1614883.88 frames. ], batch size: 23, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:19,001 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 23:40:23,464 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:24,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:32,971 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2207, 1.6683, 4.3357, 2.0454, 2.4450, 4.9385, 4.9486, 4.2435], + device='cuda:2'), covar=tensor([0.1277, 0.1693, 0.0335, 0.1957, 0.1210, 0.0189, 0.0451, 0.0584], + device='cuda:2'), in_proj_covar=tensor([0.0290, 0.0317, 0.0283, 0.0309, 0.0301, 0.0260, 0.0404, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:40:34,183 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.686e+02 3.265e+02 3.895e+02 8.931e+02, threshold=6.531e+02, percent-clipped=7.0 +2023-02-06 23:40:44,518 INFO [train.py:901] (2/4) Epoch 19, batch 6700, loss[loss=0.1982, simple_loss=0.2881, pruned_loss=0.05411, over 8329.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.0652, over 1616674.27 frames. ], batch size: 25, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:53,760 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 23:41:19,449 INFO [train.py:901] (2/4) Epoch 19, batch 6750, loss[loss=0.1656, simple_loss=0.2484, pruned_loss=0.0414, over 7442.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06473, over 1619251.63 frames. ], batch size: 17, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:19,592 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:41,477 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-06 23:41:44,675 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:45,118 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.378e+02 2.909e+02 3.491e+02 6.752e+02, threshold=5.817e+02, percent-clipped=2.0 +2023-02-06 23:41:53,507 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:54,062 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 23:41:54,741 INFO [train.py:901] (2/4) Epoch 19, batch 6800, loss[loss=0.1797, simple_loss=0.265, pruned_loss=0.04718, over 8236.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2914, pruned_loss=0.06422, over 1621229.37 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:29,085 INFO [train.py:901] (2/4) Epoch 19, batch 6850, loss[loss=0.1932, simple_loss=0.2706, pruned_loss=0.05791, over 7822.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2911, pruned_loss=0.06402, over 1616128.69 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:43,978 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 23:42:54,734 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.344e+02 3.012e+02 3.839e+02 8.073e+02, threshold=6.025e+02, percent-clipped=5.0 +2023-02-06 23:43:05,119 INFO [train.py:901] (2/4) Epoch 19, batch 6900, loss[loss=0.2062, simple_loss=0.2763, pruned_loss=0.06801, over 7653.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2905, pruned_loss=0.06366, over 1615559.76 frames. ], batch size: 19, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:17,188 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8134, 1.5839, 3.4516, 1.4869, 2.4218, 3.9168, 3.9649, 3.2756], + device='cuda:2'), covar=tensor([0.1403, 0.1747, 0.0353, 0.2127, 0.1060, 0.0197, 0.0501, 0.0605], + device='cuda:2'), in_proj_covar=tensor([0.0291, 0.0319, 0.0285, 0.0311, 0.0301, 0.0261, 0.0407, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:43:18,645 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3910, 1.3713, 2.3418, 1.1883, 2.1393, 2.5185, 2.6574, 2.1088], + device='cuda:2'), covar=tensor([0.1200, 0.1379, 0.0478, 0.2222, 0.0787, 0.0400, 0.0740, 0.0791], + device='cuda:2'), in_proj_covar=tensor([0.0291, 0.0319, 0.0285, 0.0311, 0.0301, 0.0261, 0.0406, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:43:25,516 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:27,852 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-06 23:43:40,394 INFO [train.py:901] (2/4) Epoch 19, batch 6950, loss[loss=0.1927, simple_loss=0.279, pruned_loss=0.05326, over 8355.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06379, over 1613484.41 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:42,607 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:53,798 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 23:43:53,936 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:58,611 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5237, 1.2566, 4.7246, 1.7499, 4.1523, 3.9040, 4.1983, 4.1145], + device='cuda:2'), covar=tensor([0.0618, 0.5003, 0.0461, 0.4317, 0.1172, 0.0975, 0.0656, 0.0671], + device='cuda:2'), in_proj_covar=tensor([0.0594, 0.0626, 0.0668, 0.0604, 0.0684, 0.0585, 0.0584, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:44:05,256 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.443e+02 3.132e+02 3.706e+02 6.613e+02, threshold=6.264e+02, percent-clipped=2.0 +2023-02-06 23:44:08,852 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8861, 2.3752, 3.6966, 1.8910, 1.9079, 3.7044, 0.7420, 2.1043], + device='cuda:2'), covar=tensor([0.1424, 0.1189, 0.0177, 0.1905, 0.2766, 0.0210, 0.2489, 0.1488], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0220, 0.0266, 0.0132, 0.0168, 0.0185], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:44:14,626 INFO [train.py:901] (2/4) Epoch 19, batch 7000, loss[loss=0.2006, simple_loss=0.2866, pruned_loss=0.05731, over 8343.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.06361, over 1608934.55 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:44:44,332 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:51,099 INFO [train.py:901] (2/4) Epoch 19, batch 7050, loss[loss=0.201, simple_loss=0.2885, pruned_loss=0.05676, over 8665.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2908, pruned_loss=0.06425, over 1612582.01 frames. ], batch size: 39, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:02,279 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:15,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.420e+02 2.800e+02 3.429e+02 5.549e+02, threshold=5.599e+02, percent-clipped=0.0 +2023-02-06 23:45:21,280 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:25,390 INFO [train.py:901] (2/4) Epoch 19, batch 7100, loss[loss=0.2199, simple_loss=0.2904, pruned_loss=0.07471, over 8047.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2908, pruned_loss=0.06413, over 1608904.56 frames. ], batch size: 22, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:30,673 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:35,440 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:56,367 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:01,876 INFO [train.py:901] (2/4) Epoch 19, batch 7150, loss[loss=0.1985, simple_loss=0.2736, pruned_loss=0.06172, over 7804.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2909, pruned_loss=0.06456, over 1608574.55 frames. ], batch size: 20, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:02,142 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7005, 2.3577, 4.2724, 1.5410, 3.1867, 2.1848, 1.8379, 2.8865], + device='cuda:2'), covar=tensor([0.1922, 0.2561, 0.0816, 0.4624, 0.1685, 0.3305, 0.2196, 0.2549], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0585, 0.0553, 0.0628, 0.0640, 0.0589, 0.0522, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:46:27,015 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 23:46:27,178 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.485e+02 2.441e+02 2.885e+02 3.630e+02 1.043e+03, threshold=5.770e+02, percent-clipped=5.0 +2023-02-06 23:46:36,619 INFO [train.py:901] (2/4) Epoch 19, batch 7200, loss[loss=0.227, simple_loss=0.3123, pruned_loss=0.07085, over 8534.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2917, pruned_loss=0.06471, over 1606786.75 frames. ], batch size: 28, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:42,742 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:56,239 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3993, 1.6339, 1.7201, 1.1059, 1.7403, 1.3804, 0.2900, 1.5911], + device='cuda:2'), covar=tensor([0.0428, 0.0347, 0.0295, 0.0443, 0.0381, 0.0848, 0.0788, 0.0233], + device='cuda:2'), in_proj_covar=tensor([0.0445, 0.0380, 0.0335, 0.0443, 0.0369, 0.0530, 0.0387, 0.0410], + device='cuda:2'), out_proj_covar=tensor([1.2024e-04, 1.0022e-04, 8.8461e-05, 1.1754e-04, 9.7831e-05, 1.5109e-04, + 1.0467e-04, 1.0941e-04], device='cuda:2') +2023-02-06 23:47:12,600 INFO [train.py:901] (2/4) Epoch 19, batch 7250, loss[loss=0.1913, simple_loss=0.263, pruned_loss=0.05975, over 7534.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.06436, over 1605572.28 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:13,474 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:17,697 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:37,386 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.392e+02 2.877e+02 3.488e+02 7.359e+02, threshold=5.753e+02, percent-clipped=2.0 +2023-02-06 23:47:47,609 INFO [train.py:901] (2/4) Epoch 19, batch 7300, loss[loss=0.1631, simple_loss=0.2434, pruned_loss=0.04143, over 7434.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06364, over 1608939.69 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:48,531 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4651, 1.6368, 2.1594, 1.3403, 1.4639, 1.6084, 1.5693, 1.4184], + device='cuda:2'), covar=tensor([0.2049, 0.2368, 0.1001, 0.4319, 0.1961, 0.3551, 0.2283, 0.2270], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0581, 0.0551, 0.0624, 0.0637, 0.0586, 0.0519, 0.0627], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:47:57,327 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:48:21,874 INFO [train.py:901] (2/4) Epoch 19, batch 7350, loss[loss=0.2632, simple_loss=0.3226, pruned_loss=0.1019, over 7663.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.0643, over 1606721.73 frames. ], batch size: 71, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:48:22,993 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 23:48:45,481 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1946, 4.1995, 3.7951, 1.9118, 3.6772, 3.8139, 3.7765, 3.5244], + device='cuda:2'), covar=tensor([0.0856, 0.0601, 0.1114, 0.4816, 0.0922, 0.1038, 0.1318, 0.0902], + device='cuda:2'), in_proj_covar=tensor([0.0517, 0.0427, 0.0427, 0.0531, 0.0416, 0.0428, 0.0411, 0.0372], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:48:46,698 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 23:48:48,153 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.571e+02 3.070e+02 4.184e+02 8.940e+02, threshold=6.140e+02, percent-clipped=8.0 +2023-02-06 23:48:58,039 INFO [train.py:901] (2/4) Epoch 19, batch 7400, loss[loss=0.1849, simple_loss=0.2687, pruned_loss=0.05055, over 7655.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2896, pruned_loss=0.06413, over 1605458.10 frames. ], batch size: 19, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:07,685 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 23:49:17,393 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4392, 2.5982, 1.8142, 2.1399, 2.1784, 1.4719, 1.9524, 2.0580], + device='cuda:2'), covar=tensor([0.1568, 0.0402, 0.1259, 0.0731, 0.0778, 0.1579, 0.1166, 0.1199], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0233, 0.0328, 0.0304, 0.0299, 0.0330, 0.0342, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:49:18,786 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:25,744 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 23:49:32,918 INFO [train.py:901] (2/4) Epoch 19, batch 7450, loss[loss=0.2025, simple_loss=0.2919, pruned_loss=0.05658, over 8351.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2906, pruned_loss=0.06414, over 1608764.40 frames. ], batch size: 24, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:33,722 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:38,519 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:44,022 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:46,567 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 23:49:58,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.506e+02 3.079e+02 4.075e+02 8.166e+02, threshold=6.159e+02, percent-clipped=5.0 +2023-02-06 23:50:01,976 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:08,330 INFO [train.py:901] (2/4) Epoch 19, batch 7500, loss[loss=0.226, simple_loss=0.303, pruned_loss=0.07454, over 8192.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06471, over 1609685.36 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:17,498 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:34,844 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:42,932 INFO [train.py:901] (2/4) Epoch 19, batch 7550, loss[loss=0.2264, simple_loss=0.3021, pruned_loss=0.07531, over 6941.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2918, pruned_loss=0.06509, over 1610612.81 frames. ], batch size: 72, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:53,932 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:58,702 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:08,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.431e+02 2.980e+02 3.688e+02 7.634e+02, threshold=5.960e+02, percent-clipped=2.0 +2023-02-06 23:51:14,777 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:18,071 INFO [train.py:901] (2/4) Epoch 19, batch 7600, loss[loss=0.1976, simple_loss=0.2787, pruned_loss=0.05831, over 7439.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2915, pruned_loss=0.06495, over 1606814.61 frames. ], batch size: 17, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:51:37,054 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7022, 2.3814, 3.7107, 1.4969, 2.8585, 2.0608, 1.9600, 2.4001], + device='cuda:2'), covar=tensor([0.1962, 0.2331, 0.1006, 0.4578, 0.1836, 0.3598, 0.2113, 0.2871], + device='cuda:2'), in_proj_covar=tensor([0.0519, 0.0588, 0.0557, 0.0632, 0.0643, 0.0592, 0.0523, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:51:42,024 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 23:51:47,771 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:53,092 INFO [train.py:901] (2/4) Epoch 19, batch 7650, loss[loss=0.2186, simple_loss=0.2958, pruned_loss=0.07073, over 7972.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2922, pruned_loss=0.06568, over 1608321.48 frames. ], batch size: 21, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:51:59,413 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 23:52:00,688 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-06 23:52:17,643 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:18,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.290e+02 2.780e+02 3.362e+02 7.829e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-06 23:52:28,393 INFO [train.py:901] (2/4) Epoch 19, batch 7700, loss[loss=0.2203, simple_loss=0.2985, pruned_loss=0.07102, over 8289.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2913, pruned_loss=0.06498, over 1609301.69 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:35,482 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:35,506 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:46,188 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 23:52:57,461 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 23:53:03,345 INFO [train.py:901] (2/4) Epoch 19, batch 7750, loss[loss=0.1907, simple_loss=0.2645, pruned_loss=0.05849, over 7228.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2919, pruned_loss=0.06547, over 1609271.58 frames. ], batch size: 16, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:18,703 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 23:53:28,921 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.456e+02 3.001e+02 3.725e+02 8.940e+02, threshold=6.003e+02, percent-clipped=11.0 +2023-02-06 23:53:37,740 INFO [train.py:901] (2/4) Epoch 19, batch 7800, loss[loss=0.2148, simple_loss=0.302, pruned_loss=0.06382, over 8188.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.06522, over 1610504.01 frames. ], batch size: 23, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:39,909 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:53,359 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:58,000 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:09,674 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:11,468 INFO [train.py:901] (2/4) Epoch 19, batch 7850, loss[loss=0.2021, simple_loss=0.294, pruned_loss=0.05514, over 8359.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2902, pruned_loss=0.06446, over 1607866.49 frames. ], batch size: 24, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:54:14,365 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:36,618 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.457e+02 2.874e+02 3.581e+02 1.670e+03, threshold=5.749e+02, percent-clipped=9.0 +2023-02-06 23:54:44,306 INFO [train.py:901] (2/4) Epoch 19, batch 7900, loss[loss=0.1778, simple_loss=0.2566, pruned_loss=0.04944, over 7229.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2884, pruned_loss=0.06349, over 1602553.48 frames. ], batch size: 16, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:08,556 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6150, 1.7496, 2.3217, 1.3468, 1.1801, 2.3268, 0.3621, 1.3317], + device='cuda:2'), covar=tensor([0.2148, 0.1189, 0.0317, 0.1625, 0.3081, 0.0344, 0.2208, 0.1413], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0195, 0.0125, 0.0224, 0.0272, 0.0134, 0.0171, 0.0187], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:55:13,049 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6946, 1.5173, 4.9147, 1.7966, 4.3243, 4.0971, 4.4098, 4.2727], + device='cuda:2'), covar=tensor([0.0622, 0.4871, 0.0446, 0.4096, 0.1060, 0.0923, 0.0541, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0605, 0.0637, 0.0678, 0.0611, 0.0693, 0.0594, 0.0593, 0.0654], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:55:15,517 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:17,979 INFO [train.py:901] (2/4) Epoch 19, batch 7950, loss[loss=0.206, simple_loss=0.2966, pruned_loss=0.05771, over 8251.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2891, pruned_loss=0.06356, over 1607952.24 frames. ], batch size: 24, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:28,827 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:38,851 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2221, 1.9655, 2.7294, 2.2117, 2.6308, 2.2267, 2.0058, 1.4985], + device='cuda:2'), covar=tensor([0.5739, 0.5232, 0.1837, 0.3643, 0.2519, 0.3177, 0.2073, 0.5345], + device='cuda:2'), in_proj_covar=tensor([0.0926, 0.0955, 0.0783, 0.0922, 0.0981, 0.0867, 0.0736, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-06 23:55:41,873 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:43,176 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.434e+02 3.034e+02 3.983e+02 8.510e+02, threshold=6.068e+02, percent-clipped=6.0 +2023-02-06 23:55:43,590 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 23:55:45,342 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:49,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3200, 1.3015, 2.1426, 1.1673, 1.8730, 2.2677, 2.3603, 1.9529], + device='cuda:2'), covar=tensor([0.1031, 0.1259, 0.0464, 0.1898, 0.0898, 0.0384, 0.0720, 0.0640], + device='cuda:2'), in_proj_covar=tensor([0.0290, 0.0318, 0.0286, 0.0312, 0.0303, 0.0262, 0.0406, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:55:51,002 INFO [train.py:901] (2/4) Epoch 19, batch 8000, loss[loss=0.2225, simple_loss=0.3029, pruned_loss=0.07107, over 8251.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2899, pruned_loss=0.0639, over 1611409.32 frames. ], batch size: 24, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:25,144 INFO [train.py:901] (2/4) Epoch 19, batch 8050, loss[loss=0.1983, simple_loss=0.2678, pruned_loss=0.06445, over 7422.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2889, pruned_loss=0.064, over 1602472.44 frames. ], batch size: 17, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:27,180 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7539, 4.6643, 4.2671, 3.1522, 4.2026, 4.2417, 4.4392, 3.9136], + device='cuda:2'), covar=tensor([0.0566, 0.0453, 0.0898, 0.3192, 0.0718, 0.1023, 0.1057, 0.0895], + device='cuda:2'), in_proj_covar=tensor([0.0518, 0.0428, 0.0427, 0.0534, 0.0420, 0.0432, 0.0416, 0.0373], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:56:34,265 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 23:56:59,236 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 23:57:04,931 INFO [train.py:901] (2/4) Epoch 20, batch 0, loss[loss=0.1976, simple_loss=0.2768, pruned_loss=0.05918, over 7801.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2768, pruned_loss=0.05918, over 7801.00 frames. ], batch size: 19, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:57:04,931 INFO [train.py:926] (2/4) Computing validation loss +2023-02-06 23:57:12,510 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7191, 1.4997, 3.8866, 1.5686, 3.4355, 3.1864, 3.5413, 3.3766], + device='cuda:2'), covar=tensor([0.0744, 0.4588, 0.0523, 0.4158, 0.1139, 0.1044, 0.0703, 0.0788], + device='cuda:2'), in_proj_covar=tensor([0.0606, 0.0638, 0.0678, 0.0612, 0.0692, 0.0593, 0.0591, 0.0655], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-06 23:57:16,941 INFO [train.py:935] (2/4) Epoch 20, validation: loss=0.1757, simple_loss=0.276, pruned_loss=0.03766, over 944034.00 frames. +2023-02-06 23:57:16,942 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-06 23:57:20,454 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.577e+02 3.496e+02 4.495e+02 1.164e+03, threshold=6.992e+02, percent-clipped=12.0 +2023-02-06 23:57:29,442 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:57:31,330 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 23:57:51,322 INFO [train.py:901] (2/4) Epoch 20, batch 50, loss[loss=0.2204, simple_loss=0.304, pruned_loss=0.06845, over 8294.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2941, pruned_loss=0.06348, over 367979.95 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:01,109 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:58:06,571 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 23:58:26,060 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1196, 2.0976, 1.5196, 1.8537, 1.5779, 1.2142, 1.4303, 1.6452], + device='cuda:2'), covar=tensor([0.1616, 0.0529, 0.1510, 0.0648, 0.0938, 0.1978, 0.1243, 0.0987], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0236, 0.0332, 0.0305, 0.0300, 0.0334, 0.0344, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:58:27,837 INFO [train.py:901] (2/4) Epoch 20, batch 100, loss[loss=0.2534, simple_loss=0.3335, pruned_loss=0.08663, over 8290.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2941, pruned_loss=0.06485, over 648351.16 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:29,260 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 23:58:31,349 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.446e+02 2.844e+02 3.351e+02 7.473e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-06 23:59:03,151 INFO [train.py:901] (2/4) Epoch 20, batch 150, loss[loss=0.2124, simple_loss=0.3016, pruned_loss=0.06164, over 8200.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2939, pruned_loss=0.06456, over 866937.96 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:06,099 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5031, 2.6234, 1.9276, 2.3146, 2.2093, 1.6485, 2.1461, 2.1641], + device='cuda:2'), covar=tensor([0.1318, 0.0373, 0.1140, 0.0545, 0.0620, 0.1452, 0.0849, 0.0881], + device='cuda:2'), in_proj_covar=tensor([0.0349, 0.0234, 0.0329, 0.0303, 0.0298, 0.0331, 0.0341, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-06 23:59:23,326 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:59:26,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5419, 1.5177, 1.8564, 1.3338, 1.1501, 1.8470, 0.2538, 1.1746], + device='cuda:2'), covar=tensor([0.1823, 0.1307, 0.0371, 0.0986, 0.3005, 0.0418, 0.2265, 0.1141], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0221, 0.0270, 0.0133, 0.0169, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-06 23:59:39,296 INFO [train.py:901] (2/4) Epoch 20, batch 200, loss[loss=0.1752, simple_loss=0.2701, pruned_loss=0.0402, over 8108.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2923, pruned_loss=0.06357, over 1032415.76 frames. ], batch size: 23, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:42,414 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-06 23:59:42,528 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.177e+02 2.784e+02 3.416e+02 8.818e+02, threshold=5.569e+02, percent-clipped=1.0 +2023-02-06 23:59:43,921 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:15,026 INFO [train.py:901] (2/4) Epoch 20, batch 250, loss[loss=0.2065, simple_loss=0.2871, pruned_loss=0.06295, over 8464.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2912, pruned_loss=0.06374, over 1160675.15 frames. ], batch size: 25, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:26,528 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 00:00:31,635 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:34,732 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 00:00:48,282 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:48,774 INFO [train.py:901] (2/4) Epoch 20, batch 300, loss[loss=0.1855, simple_loss=0.2614, pruned_loss=0.05485, over 7663.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2913, pruned_loss=0.0639, over 1260729.13 frames. ], batch size: 19, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:52,001 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.425e+02 2.846e+02 3.739e+02 1.062e+03, threshold=5.691e+02, percent-clipped=2.0 +2023-02-07 00:01:05,174 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:24,550 INFO [train.py:901] (2/4) Epoch 20, batch 350, loss[loss=0.202, simple_loss=0.2956, pruned_loss=0.0542, over 8259.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2907, pruned_loss=0.06371, over 1340967.73 frames. ], batch size: 24, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:01:35,765 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153941.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:36,640 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 00:01:59,289 INFO [train.py:901] (2/4) Epoch 20, batch 400, loss[loss=0.222, simple_loss=0.3063, pruned_loss=0.06889, over 8596.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2929, pruned_loss=0.06501, over 1399792.87 frames. ], batch size: 31, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:02,805 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 2.483e+02 2.937e+02 3.652e+02 9.410e+02, threshold=5.874e+02, percent-clipped=4.0 +2023-02-07 00:02:22,963 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 00:02:25,626 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:02:36,308 INFO [train.py:901] (2/4) Epoch 20, batch 450, loss[loss=0.1901, simple_loss=0.2671, pruned_loss=0.05657, over 7808.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2915, pruned_loss=0.06424, over 1449397.54 frames. ], batch size: 20, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:44,056 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154036.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:05,787 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154067.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:03:11,784 INFO [train.py:901] (2/4) Epoch 20, batch 500, loss[loss=0.204, simple_loss=0.2871, pruned_loss=0.06041, over 8328.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2913, pruned_loss=0.06435, over 1489543.93 frames. ], batch size: 26, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:15,235 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 2.274e+02 2.685e+02 3.204e+02 7.760e+02, threshold=5.371e+02, percent-clipped=3.0 +2023-02-07 00:03:24,419 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:29,960 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:46,396 INFO [train.py:901] (2/4) Epoch 20, batch 550, loss[loss=0.22, simple_loss=0.2989, pruned_loss=0.07057, over 8488.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2901, pruned_loss=0.06354, over 1513931.08 frames. ], batch size: 29, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:49,712 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 00:03:59,236 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 00:04:07,821 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:23,276 INFO [train.py:901] (2/4) Epoch 20, batch 600, loss[loss=0.2218, simple_loss=0.2874, pruned_loss=0.07816, over 7701.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2909, pruned_loss=0.06462, over 1535015.87 frames. ], batch size: 18, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:25,542 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:26,652 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.477e+02 2.962e+02 3.836e+02 8.919e+02, threshold=5.925e+02, percent-clipped=6.0 +2023-02-07 00:04:45,283 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 00:04:57,544 INFO [train.py:901] (2/4) Epoch 20, batch 650, loss[loss=0.1915, simple_loss=0.2895, pruned_loss=0.04674, over 8456.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2915, pruned_loss=0.06505, over 1554966.41 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:58,453 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6336, 2.6557, 1.8109, 2.3762, 2.3003, 1.5438, 2.2293, 2.3238], + device='cuda:2'), covar=tensor([0.1563, 0.0404, 0.1277, 0.0678, 0.0760, 0.1605, 0.0960, 0.1006], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0237, 0.0332, 0.0307, 0.0302, 0.0335, 0.0346, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:05:06,564 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:14,624 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-07 00:05:34,064 INFO [train.py:901] (2/4) Epoch 20, batch 700, loss[loss=0.2193, simple_loss=0.3137, pruned_loss=0.06241, over 8129.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2913, pruned_loss=0.06461, over 1569102.91 frames. ], batch size: 22, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:37,467 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.357e+02 2.958e+02 3.586e+02 6.466e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-07 00:05:40,241 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154285.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:54,042 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:57,544 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:06:08,843 INFO [train.py:901] (2/4) Epoch 20, batch 750, loss[loss=0.22, simple_loss=0.3104, pruned_loss=0.06482, over 8295.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2922, pruned_loss=0.0648, over 1583288.53 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:11,930 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:06:28,500 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154355.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:06:33,643 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 00:06:43,001 INFO [train.py:901] (2/4) Epoch 20, batch 800, loss[loss=0.1934, simple_loss=0.2818, pruned_loss=0.05253, over 8479.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2919, pruned_loss=0.06475, over 1591719.18 frames. ], batch size: 29, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:43,740 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 00:06:47,165 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.337e+02 2.441e+02 3.052e+02 3.711e+02 8.675e+02, threshold=6.104e+02, percent-clipped=3.0 +2023-02-07 00:07:01,172 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:08,303 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154411.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:07:19,173 INFO [train.py:901] (2/4) Epoch 20, batch 850, loss[loss=0.2279, simple_loss=0.2994, pruned_loss=0.07819, over 8712.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.06537, over 1597183.98 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:27,235 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:32,660 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:52,894 INFO [train.py:901] (2/4) Epoch 20, batch 900, loss[loss=0.2121, simple_loss=0.2944, pruned_loss=0.06489, over 8640.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2919, pruned_loss=0.06485, over 1605852.80 frames. ], batch size: 34, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:56,215 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.439e+02 2.923e+02 3.686e+02 1.072e+03, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 00:07:57,784 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1989, 2.5444, 2.0263, 2.9774, 1.5644, 1.8387, 2.0912, 2.4693], + device='cuda:2'), covar=tensor([0.0661, 0.0719, 0.0817, 0.0331, 0.1015, 0.1180, 0.0886, 0.0693], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0195, 0.0246, 0.0209, 0.0205, 0.0248, 0.0249, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 00:07:59,841 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4383, 2.1758, 3.2595, 2.6160, 2.9901, 2.2206, 2.1335, 2.2935], + device='cuda:2'), covar=tensor([0.4459, 0.4953, 0.1683, 0.2912, 0.2384, 0.3427, 0.2299, 0.3972], + device='cuda:2'), in_proj_covar=tensor([0.0930, 0.0960, 0.0786, 0.0924, 0.0978, 0.0871, 0.0734, 0.0813], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 00:08:04,403 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:29,162 INFO [train.py:901] (2/4) Epoch 20, batch 950, loss[loss=0.2148, simple_loss=0.2913, pruned_loss=0.06916, over 8286.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2929, pruned_loss=0.06523, over 1611469.28 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:08:29,372 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154526.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:08:48,739 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154553.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:54,164 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:00,292 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0726, 1.6629, 4.0500, 1.9264, 2.4027, 4.6178, 4.7444, 3.9714], + device='cuda:2'), covar=tensor([0.1281, 0.1918, 0.0351, 0.2060, 0.1366, 0.0225, 0.0454, 0.0621], + device='cuda:2'), in_proj_covar=tensor([0.0288, 0.0318, 0.0286, 0.0310, 0.0302, 0.0259, 0.0403, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:09:01,511 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 00:09:04,233 INFO [train.py:901] (2/4) Epoch 20, batch 1000, loss[loss=0.1994, simple_loss=0.2727, pruned_loss=0.06308, over 7648.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2911, pruned_loss=0.06431, over 1611033.69 frames. ], batch size: 19, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:07,495 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.511e+02 3.044e+02 3.807e+02 8.767e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 00:09:08,976 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:35,150 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 00:09:38,960 INFO [train.py:901] (2/4) Epoch 20, batch 1050, loss[loss=0.2221, simple_loss=0.3011, pruned_loss=0.07158, over 8450.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2908, pruned_loss=0.06458, over 1608152.82 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:49,447 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 00:09:52,626 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-07 00:09:53,674 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:54,884 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:59,107 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:01,455 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154656.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:14,711 INFO [train.py:901] (2/4) Epoch 20, batch 1100, loss[loss=0.1894, simple_loss=0.2545, pruned_loss=0.06215, over 7433.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2909, pruned_loss=0.06476, over 1610691.90 frames. ], batch size: 17, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:10:18,092 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.486e+02 3.103e+02 3.988e+02 8.246e+02, threshold=6.206e+02, percent-clipped=6.0 +2023-02-07 00:10:18,331 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154681.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:29,758 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:30,330 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154699.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:10:45,302 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 00:10:48,873 INFO [train.py:901] (2/4) Epoch 20, batch 1150, loss[loss=0.2103, simple_loss=0.3027, pruned_loss=0.05892, over 8366.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2916, pruned_loss=0.06443, over 1616073.85 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:10:57,852 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154738.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:59,076 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 00:11:10,831 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4990, 1.8703, 3.1520, 1.4513, 2.4122, 1.8722, 1.6422, 2.4229], + device='cuda:2'), covar=tensor([0.1899, 0.2587, 0.0806, 0.4326, 0.1663, 0.3302, 0.2159, 0.2087], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0588, 0.0554, 0.0632, 0.0643, 0.0590, 0.0525, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:11:16,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:19,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:25,036 INFO [train.py:901] (2/4) Epoch 20, batch 1200, loss[loss=0.2036, simple_loss=0.2885, pruned_loss=0.05936, over 8718.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2913, pruned_loss=0.06437, over 1614434.08 frames. ], batch size: 34, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:11:28,382 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.412e+02 2.746e+02 3.577e+02 9.067e+02, threshold=5.492e+02, percent-clipped=2.0 +2023-02-07 00:11:29,309 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154782.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:46,343 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154807.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:47,771 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154809.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:51,160 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154814.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:53,284 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154817.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:59,116 INFO [train.py:901] (2/4) Epoch 20, batch 1250, loss[loss=0.2083, simple_loss=0.3027, pruned_loss=0.05689, over 8190.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2918, pruned_loss=0.06451, over 1616263.81 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:05,338 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154834.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:06,466 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:11,372 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:34,985 INFO [train.py:901] (2/4) Epoch 20, batch 1300, loss[loss=0.2327, simple_loss=0.311, pruned_loss=0.07723, over 8135.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2909, pruned_loss=0.06431, over 1614192.37 frames. ], batch size: 22, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:38,325 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.433e+02 3.191e+02 3.995e+02 7.235e+02, threshold=6.381e+02, percent-clipped=6.0 +2023-02-07 00:12:56,079 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5876, 2.2198, 4.1559, 1.5157, 2.9814, 2.0670, 1.7289, 2.8887], + device='cuda:2'), covar=tensor([0.1851, 0.2487, 0.0674, 0.4440, 0.1731, 0.3236, 0.2294, 0.2297], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0588, 0.0553, 0.0633, 0.0644, 0.0590, 0.0527, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:13:01,362 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2123, 3.6785, 2.4407, 3.0756, 3.0682, 2.2537, 2.7450, 3.0873], + device='cuda:2'), covar=tensor([0.1518, 0.0365, 0.1109, 0.0644, 0.0608, 0.1361, 0.0972, 0.1024], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0238, 0.0332, 0.0306, 0.0300, 0.0336, 0.0346, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:13:09,378 INFO [train.py:901] (2/4) Epoch 20, batch 1350, loss[loss=0.1895, simple_loss=0.2622, pruned_loss=0.05839, over 7538.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2912, pruned_loss=0.06433, over 1615609.92 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:27,091 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:29,761 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:44,747 INFO [train.py:901] (2/4) Epoch 20, batch 1400, loss[loss=0.1908, simple_loss=0.2728, pruned_loss=0.05447, over 8033.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06374, over 1616390.89 frames. ], batch size: 22, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:47,808 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:48,964 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.419e+02 2.969e+02 3.620e+02 8.609e+02, threshold=5.938e+02, percent-clipped=3.0 +2023-02-07 00:13:55,292 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:58,165 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:16,376 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:19,578 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:20,735 INFO [train.py:901] (2/4) Epoch 20, batch 1450, loss[loss=0.169, simple_loss=0.2453, pruned_loss=0.04637, over 7706.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.291, pruned_loss=0.06423, over 1617933.03 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:29,172 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 00:14:33,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:36,494 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:48,932 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0208, 1.2461, 1.1907, 0.7940, 1.2641, 1.0526, 0.1157, 1.2257], + device='cuda:2'), covar=tensor([0.0379, 0.0322, 0.0303, 0.0458, 0.0376, 0.0861, 0.0758, 0.0279], + device='cuda:2'), in_proj_covar=tensor([0.0438, 0.0382, 0.0336, 0.0440, 0.0368, 0.0528, 0.0386, 0.0407], + device='cuda:2'), out_proj_covar=tensor([1.1838e-04, 1.0062e-04, 8.8634e-05, 1.1644e-04, 9.7577e-05, 1.5049e-04, + 1.0466e-04, 1.0849e-04], device='cuda:2') +2023-02-07 00:14:50,879 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155070.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:14:55,249 INFO [train.py:901] (2/4) Epoch 20, batch 1500, loss[loss=0.1973, simple_loss=0.2857, pruned_loss=0.05439, over 8029.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2912, pruned_loss=0.06454, over 1616925.64 frames. ], batch size: 22, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:58,577 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.482e+02 3.072e+02 3.822e+02 6.990e+02, threshold=6.143e+02, percent-clipped=2.0 +2023-02-07 00:14:59,298 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:02,792 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:08,997 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155095.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:15:15,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:30,481 INFO [train.py:901] (2/4) Epoch 20, batch 1550, loss[loss=0.1793, simple_loss=0.2603, pruned_loss=0.04911, over 8134.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2892, pruned_loss=0.06327, over 1614705.58 frames. ], batch size: 22, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:04,700 INFO [train.py:901] (2/4) Epoch 20, batch 1600, loss[loss=0.1667, simple_loss=0.2516, pruned_loss=0.04095, over 7932.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.289, pruned_loss=0.06294, over 1616953.58 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:08,766 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.295e+02 2.863e+02 3.431e+02 6.352e+02, threshold=5.726e+02, percent-clipped=1.0 +2023-02-07 00:16:20,590 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:27,195 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:40,703 INFO [train.py:901] (2/4) Epoch 20, batch 1650, loss[loss=0.1957, simple_loss=0.2721, pruned_loss=0.05959, over 7656.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2891, pruned_loss=0.06317, over 1612507.78 frames. ], batch size: 19, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:45,142 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:17:15,938 INFO [train.py:901] (2/4) Epoch 20, batch 1700, loss[loss=0.2207, simple_loss=0.3055, pruned_loss=0.06799, over 8204.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2887, pruned_loss=0.06277, over 1613338.42 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:17:17,133 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-07 00:17:19,366 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.383e+02 2.759e+02 3.259e+02 7.427e+02, threshold=5.517e+02, percent-clipped=3.0 +2023-02-07 00:17:51,278 INFO [train.py:901] (2/4) Epoch 20, batch 1750, loss[loss=0.173, simple_loss=0.2577, pruned_loss=0.04416, over 7702.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.06316, over 1616510.05 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 16.0 +2023-02-07 00:18:00,382 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:17,135 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:27,006 INFO [train.py:901] (2/4) Epoch 20, batch 1800, loss[loss=0.1711, simple_loss=0.2623, pruned_loss=0.03998, over 7535.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.0636, over 1616477.04 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:18:31,095 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.586e+02 2.965e+02 3.772e+02 7.314e+02, threshold=5.929e+02, percent-clipped=8.0 +2023-02-07 00:18:31,377 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5780, 2.3670, 3.3007, 2.6015, 3.1480, 2.5475, 2.3018, 1.8920], + device='cuda:2'), covar=tensor([0.4971, 0.4850, 0.1650, 0.3540, 0.2475, 0.2879, 0.1804, 0.5312], + device='cuda:2'), in_proj_covar=tensor([0.0941, 0.0971, 0.0795, 0.0935, 0.0994, 0.0884, 0.0743, 0.0823], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 00:18:34,019 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:36,205 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 00:18:51,553 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8704, 1.7701, 5.9703, 2.1807, 5.3388, 5.0219, 5.5175, 5.3702], + device='cuda:2'), covar=tensor([0.0570, 0.4799, 0.0486, 0.3991, 0.1188, 0.0971, 0.0562, 0.0626], + device='cuda:2'), in_proj_covar=tensor([0.0604, 0.0627, 0.0677, 0.0606, 0.0685, 0.0590, 0.0589, 0.0658], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:19:01,130 INFO [train.py:901] (2/4) Epoch 20, batch 1850, loss[loss=0.2293, simple_loss=0.2919, pruned_loss=0.08333, over 7531.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2894, pruned_loss=0.06396, over 1612332.35 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:04,531 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,142 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,185 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:36,708 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155475.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:37,247 INFO [train.py:901] (2/4) Epoch 20, batch 1900, loss[loss=0.2077, simple_loss=0.2798, pruned_loss=0.06785, over 8082.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2896, pruned_loss=0.06366, over 1614526.24 frames. ], batch size: 21, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:38,772 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:41,327 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.441e+02 2.899e+02 3.473e+02 6.405e+02, threshold=5.799e+02, percent-clipped=1.0 +2023-02-07 00:19:48,155 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 00:19:52,334 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 00:20:11,842 INFO [train.py:901] (2/4) Epoch 20, batch 1950, loss[loss=0.2127, simple_loss=0.2915, pruned_loss=0.06691, over 7937.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2895, pruned_loss=0.06359, over 1614792.84 frames. ], batch size: 20, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:13,301 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 00:20:20,905 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0889, 1.5927, 3.4416, 1.5240, 2.3933, 3.8708, 3.9313, 3.2629], + device='cuda:2'), covar=tensor([0.1055, 0.1725, 0.0325, 0.2050, 0.1040, 0.0209, 0.0435, 0.0552], + device='cuda:2'), in_proj_covar=tensor([0.0290, 0.0320, 0.0287, 0.0313, 0.0303, 0.0262, 0.0407, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:20:26,414 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:20:26,920 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 00:20:32,378 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9939, 2.1395, 1.7776, 2.3695, 1.4595, 1.7263, 1.8879, 2.0940], + device='cuda:2'), covar=tensor([0.0640, 0.0606, 0.0805, 0.0482, 0.0986, 0.1082, 0.0713, 0.0624], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0197, 0.0248, 0.0213, 0.0205, 0.0250, 0.0252, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 00:20:46,962 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 00:20:47,674 INFO [train.py:901] (2/4) Epoch 20, batch 2000, loss[loss=0.2014, simple_loss=0.283, pruned_loss=0.05984, over 8084.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2902, pruned_loss=0.06397, over 1614492.58 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:51,754 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.363e+02 2.911e+02 3.881e+02 1.027e+03, threshold=5.822e+02, percent-clipped=2.0 +2023-02-07 00:21:02,540 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.22 vs. limit=5.0 +2023-02-07 00:21:20,956 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:22,881 INFO [train.py:901] (2/4) Epoch 20, batch 2050, loss[loss=0.2235, simple_loss=0.3125, pruned_loss=0.06728, over 8447.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2902, pruned_loss=0.06395, over 1616852.56 frames. ], batch size: 27, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:21:57,546 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:58,082 INFO [train.py:901] (2/4) Epoch 20, batch 2100, loss[loss=0.1764, simple_loss=0.2533, pruned_loss=0.04976, over 7239.00 frames. ], tot_loss[loss=0.209, simple_loss=0.29, pruned_loss=0.06399, over 1618097.29 frames. ], batch size: 16, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:21:58,407 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 00:22:02,102 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.564e+02 2.968e+02 3.686e+02 8.256e+02, threshold=5.935e+02, percent-clipped=7.0 +2023-02-07 00:22:22,173 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:22:33,489 INFO [train.py:901] (2/4) Epoch 20, batch 2150, loss[loss=0.2089, simple_loss=0.2898, pruned_loss=0.06404, over 7808.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2907, pruned_loss=0.06454, over 1616412.43 frames. ], batch size: 20, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:39,018 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:07,323 INFO [train.py:901] (2/4) Epoch 20, batch 2200, loss[loss=0.2532, simple_loss=0.3419, pruned_loss=0.08224, over 8514.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2924, pruned_loss=0.06545, over 1618858.75 frames. ], batch size: 28, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:12,092 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.519e+02 2.939e+02 3.787e+02 7.175e+02, threshold=5.878e+02, percent-clipped=4.0 +2023-02-07 00:23:26,031 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:26,115 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:38,413 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155819.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:43,085 INFO [train.py:901] (2/4) Epoch 20, batch 2250, loss[loss=0.2028, simple_loss=0.2887, pruned_loss=0.05844, over 8827.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2931, pruned_loss=0.06536, over 1625340.84 frames. ], batch size: 49, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:44,831 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:24:17,860 INFO [train.py:901] (2/4) Epoch 20, batch 2300, loss[loss=0.2237, simple_loss=0.3089, pruned_loss=0.06927, over 8491.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2928, pruned_loss=0.0652, over 1624926.16 frames. ], batch size: 29, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:24:21,976 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.500e+02 2.966e+02 3.753e+02 6.656e+02, threshold=5.933e+02, percent-clipped=3.0 +2023-02-07 00:24:54,616 INFO [train.py:901] (2/4) Epoch 20, batch 2350, loss[loss=0.1727, simple_loss=0.2433, pruned_loss=0.05104, over 7710.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2926, pruned_loss=0.06521, over 1622721.77 frames. ], batch size: 18, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:00,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:00,857 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 00:25:04,969 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9940, 1.6538, 1.7902, 1.4778, 0.8716, 1.6176, 1.7142, 1.6271], + device='cuda:2'), covar=tensor([0.0553, 0.1200, 0.1584, 0.1388, 0.0634, 0.1417, 0.0710, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:25:23,233 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155967.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:28,796 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8234, 1.5224, 1.6309, 1.3357, 1.0457, 1.4607, 1.7074, 1.4241], + device='cuda:2'), covar=tensor([0.0568, 0.1221, 0.1704, 0.1496, 0.0610, 0.1464, 0.0691, 0.0681], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:25:29,309 INFO [train.py:901] (2/4) Epoch 20, batch 2400, loss[loss=0.2399, simple_loss=0.3257, pruned_loss=0.07701, over 8661.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2917, pruned_loss=0.06476, over 1617406.37 frames. ], batch size: 34, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:33,221 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.377e+02 2.729e+02 3.502e+02 6.388e+02, threshold=5.458e+02, percent-clipped=1.0 +2023-02-07 00:25:36,387 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 00:25:58,367 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9364, 1.6753, 2.0154, 1.7488, 1.9485, 2.0029, 1.7874, 0.7128], + device='cuda:2'), covar=tensor([0.5439, 0.4431, 0.1909, 0.3546, 0.2355, 0.3041, 0.1805, 0.4875], + device='cuda:2'), in_proj_covar=tensor([0.0939, 0.0968, 0.0793, 0.0932, 0.0991, 0.0881, 0.0743, 0.0821], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 00:26:00,994 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:05,601 INFO [train.py:901] (2/4) Epoch 20, batch 2450, loss[loss=0.2009, simple_loss=0.2906, pruned_loss=0.05557, over 8338.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2911, pruned_loss=0.06446, over 1616918.49 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:29,109 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 00:26:33,872 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 00:26:37,719 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8904, 1.6251, 1.7001, 1.4059, 0.9327, 1.5182, 1.5892, 1.4022], + device='cuda:2'), covar=tensor([0.0539, 0.1184, 0.1660, 0.1399, 0.0585, 0.1474, 0.0714, 0.0669], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:26:40,974 INFO [train.py:901] (2/4) Epoch 20, batch 2500, loss[loss=0.1814, simple_loss=0.2652, pruned_loss=0.04875, over 7792.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.06413, over 1617869.27 frames. ], batch size: 19, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:45,021 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.463e+02 3.105e+02 3.826e+02 1.382e+03, threshold=6.210e+02, percent-clipped=11.0 +2023-02-07 00:26:45,218 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:49,176 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156088.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:15,809 INFO [train.py:901] (2/4) Epoch 20, batch 2550, loss[loss=0.233, simple_loss=0.3092, pruned_loss=0.0784, over 8516.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06369, over 1616502.93 frames. ], batch size: 31, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:18,741 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1097, 1.1817, 1.2209, 0.7270, 1.2358, 1.0561, 0.1003, 1.1957], + device='cuda:2'), covar=tensor([0.0428, 0.0412, 0.0342, 0.0549, 0.0415, 0.0914, 0.0812, 0.0313], + device='cuda:2'), in_proj_covar=tensor([0.0439, 0.0380, 0.0332, 0.0434, 0.0364, 0.0524, 0.0385, 0.0406], + device='cuda:2'), out_proj_covar=tensor([1.1853e-04, 9.9870e-05, 8.7688e-05, 1.1495e-04, 9.6314e-05, 1.4922e-04, + 1.0419e-04, 1.0835e-04], device='cuda:2') +2023-02-07 00:27:21,365 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:29,846 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:50,757 INFO [train.py:901] (2/4) Epoch 20, batch 2600, loss[loss=0.2407, simple_loss=0.3099, pruned_loss=0.08579, over 8197.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2908, pruned_loss=0.06453, over 1616762.34 frames. ], batch size: 23, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:54,662 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.376e+02 3.118e+02 3.808e+02 9.704e+02, threshold=6.236e+02, percent-clipped=5.0 +2023-02-07 00:28:00,367 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:17,466 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:18,165 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8335, 2.2634, 3.9885, 1.5838, 2.9332, 2.3702, 1.7989, 2.8045], + device='cuda:2'), covar=tensor([0.1811, 0.2472, 0.0787, 0.4405, 0.1877, 0.3035, 0.2233, 0.2576], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0586, 0.0549, 0.0628, 0.0637, 0.0588, 0.0522, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:28:20,844 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6908, 2.9364, 2.4436, 4.0353, 1.8063, 2.2727, 2.4561, 3.1908], + device='cuda:2'), covar=tensor([0.0604, 0.0791, 0.0772, 0.0212, 0.1090, 0.1120, 0.1077, 0.0689], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0210, 0.0204, 0.0247, 0.0250, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 00:28:24,592 INFO [train.py:901] (2/4) Epoch 20, batch 2650, loss[loss=0.1849, simple_loss=0.2645, pruned_loss=0.05262, over 7831.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2888, pruned_loss=0.06332, over 1614804.32 frames. ], batch size: 20, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:28:30,654 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:49,181 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:00,117 INFO [train.py:901] (2/4) Epoch 20, batch 2700, loss[loss=0.2341, simple_loss=0.3143, pruned_loss=0.077, over 8497.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2903, pruned_loss=0.06473, over 1614620.03 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:04,086 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.401e+02 3.078e+02 3.829e+02 8.557e+02, threshold=6.156e+02, percent-clipped=4.0 +2023-02-07 00:29:23,258 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:35,120 INFO [train.py:901] (2/4) Epoch 20, batch 2750, loss[loss=0.2435, simple_loss=0.3199, pruned_loss=0.08355, over 8541.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2913, pruned_loss=0.06512, over 1613181.36 frames. ], batch size: 49, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:43,528 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:01,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:10,369 INFO [train.py:901] (2/4) Epoch 20, batch 2800, loss[loss=0.2389, simple_loss=0.3066, pruned_loss=0.0856, over 7302.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2912, pruned_loss=0.06523, over 1611475.01 frames. ], batch size: 72, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:15,856 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.534e+02 2.983e+02 3.648e+02 6.974e+02, threshold=5.966e+02, percent-clipped=1.0 +2023-02-07 00:30:20,852 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:38,823 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:46,249 INFO [train.py:901] (2/4) Epoch 20, batch 2850, loss[loss=0.215, simple_loss=0.2937, pruned_loss=0.06812, over 8334.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2902, pruned_loss=0.06401, over 1614822.79 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:50,405 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156432.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:52,534 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4183, 4.4129, 3.9796, 1.9640, 3.8876, 4.0457, 4.0029, 3.8400], + device='cuda:2'), covar=tensor([0.0731, 0.0490, 0.1032, 0.4928, 0.0822, 0.0895, 0.1251, 0.0786], + device='cuda:2'), in_proj_covar=tensor([0.0513, 0.0425, 0.0429, 0.0529, 0.0419, 0.0430, 0.0416, 0.0373], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:31:20,790 INFO [train.py:901] (2/4) Epoch 20, batch 2900, loss[loss=0.1646, simple_loss=0.2456, pruned_loss=0.04178, over 7542.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2913, pruned_loss=0.06449, over 1615866.71 frames. ], batch size: 18, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:31:26,316 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.409e+02 2.783e+02 3.401e+02 8.568e+02, threshold=5.566e+02, percent-clipped=1.0 +2023-02-07 00:31:50,391 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:53,716 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 00:31:57,134 INFO [train.py:901] (2/4) Epoch 20, batch 2950, loss[loss=0.1862, simple_loss=0.2559, pruned_loss=0.05819, over 7529.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2915, pruned_loss=0.06519, over 1611921.94 frames. ], batch size: 18, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:08,279 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156542.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:11,760 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:31,023 INFO [train.py:901] (2/4) Epoch 20, batch 3000, loss[loss=0.2446, simple_loss=0.3136, pruned_loss=0.08777, over 8497.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2908, pruned_loss=0.06495, over 1610297.38 frames. ], batch size: 28, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:31,024 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 00:32:46,839 INFO [train.py:935] (2/4) Epoch 20, validation: loss=0.1756, simple_loss=0.2756, pruned_loss=0.03779, over 944034.00 frames. +2023-02-07 00:32:46,840 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 00:32:48,401 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:51,796 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 2.420e+02 3.007e+02 3.801e+02 6.408e+02, threshold=6.014e+02, percent-clipped=4.0 +2023-02-07 00:33:22,163 INFO [train.py:901] (2/4) Epoch 20, batch 3050, loss[loss=0.206, simple_loss=0.2892, pruned_loss=0.06145, over 8071.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2897, pruned_loss=0.06404, over 1610296.55 frames. ], batch size: 21, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:33:33,203 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1736, 1.0826, 1.2919, 1.0785, 0.9227, 1.2893, 0.0961, 1.0187], + device='cuda:2'), covar=tensor([0.1751, 0.1439, 0.0471, 0.0791, 0.2837, 0.0581, 0.2295, 0.1161], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0193, 0.0123, 0.0218, 0.0266, 0.0133, 0.0166, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 00:33:40,595 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:33:56,274 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1778, 1.0663, 1.3310, 1.1195, 0.9356, 1.3219, 0.0712, 0.9921], + device='cuda:2'), covar=tensor([0.1553, 0.1359, 0.0446, 0.0715, 0.2599, 0.0524, 0.2011, 0.1118], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0220, 0.0268, 0.0134, 0.0167, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 00:33:57,463 INFO [train.py:901] (2/4) Epoch 20, batch 3100, loss[loss=0.1891, simple_loss=0.2619, pruned_loss=0.05818, over 7670.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2883, pruned_loss=0.06343, over 1605625.25 frames. ], batch size: 19, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:34:02,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.428e+02 2.992e+02 3.732e+02 8.006e+02, threshold=5.985e+02, percent-clipped=5.0 +2023-02-07 00:34:09,239 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:34:31,974 INFO [train.py:901] (2/4) Epoch 20, batch 3150, loss[loss=0.22, simple_loss=0.3016, pruned_loss=0.06914, over 8602.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.29, pruned_loss=0.06434, over 1606407.78 frames. ], batch size: 31, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:01,275 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:01,311 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:07,253 INFO [train.py:901] (2/4) Epoch 20, batch 3200, loss[loss=0.1779, simple_loss=0.2542, pruned_loss=0.05079, over 7969.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2889, pruned_loss=0.06365, over 1609907.55 frames. ], batch size: 21, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:11,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.338e+02 2.875e+02 3.612e+02 1.133e+03, threshold=5.749e+02, percent-clipped=4.0 +2023-02-07 00:35:26,503 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:41,818 INFO [train.py:901] (2/4) Epoch 20, batch 3250, loss[loss=0.1981, simple_loss=0.2892, pruned_loss=0.05353, over 8031.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2888, pruned_loss=0.06337, over 1608543.17 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:43,291 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:17,055 INFO [train.py:901] (2/4) Epoch 20, batch 3300, loss[loss=0.193, simple_loss=0.2761, pruned_loss=0.05499, over 8392.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2904, pruned_loss=0.06427, over 1611829.78 frames. ], batch size: 49, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:36:21,772 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.341e+02 2.967e+02 3.887e+02 7.432e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-07 00:36:32,803 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9233, 1.4756, 1.7560, 1.3236, 1.0609, 1.4504, 1.8166, 1.4831], + device='cuda:2'), covar=tensor([0.0519, 0.1309, 0.1663, 0.1448, 0.0599, 0.1565, 0.0644, 0.0658], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:36:35,551 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156903.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:51,528 INFO [train.py:901] (2/4) Epoch 20, batch 3350, loss[loss=0.2079, simple_loss=0.2949, pruned_loss=0.06041, over 8463.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2887, pruned_loss=0.06298, over 1612898.45 frames. ], batch size: 29, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:05,904 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 00:37:07,167 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:25,819 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:27,098 INFO [train.py:901] (2/4) Epoch 20, batch 3400, loss[loss=0.2093, simple_loss=0.2822, pruned_loss=0.06818, over 8240.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2888, pruned_loss=0.06331, over 1612547.58 frames. ], batch size: 24, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:31,899 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.508e+02 3.011e+02 3.882e+02 8.239e+02, threshold=6.022e+02, percent-clipped=6.0 +2023-02-07 00:37:49,722 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6962, 1.6283, 1.9644, 1.5697, 1.0328, 1.6441, 2.2391, 1.9298], + device='cuda:2'), covar=tensor([0.0449, 0.1231, 0.1607, 0.1414, 0.0621, 0.1467, 0.0635, 0.0582], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:37:59,116 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1208, 1.5445, 1.7116, 1.3960, 1.1283, 1.5029, 1.9389, 1.4611], + device='cuda:2'), covar=tensor([0.0501, 0.1200, 0.1606, 0.1441, 0.0601, 0.1430, 0.0647, 0.0676], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:38:01,292 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:03,917 INFO [train.py:901] (2/4) Epoch 20, batch 3450, loss[loss=0.2284, simple_loss=0.3155, pruned_loss=0.07068, over 8322.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2892, pruned_loss=0.06316, over 1611300.38 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:05,349 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:11,405 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7045, 1.7349, 2.3149, 1.5958, 1.3587, 2.2358, 0.4073, 1.3687], + device='cuda:2'), covar=tensor([0.1759, 0.1327, 0.0371, 0.1147, 0.2844, 0.0615, 0.2362, 0.1323], + device='cuda:2'), in_proj_covar=tensor([0.0184, 0.0191, 0.0123, 0.0216, 0.0265, 0.0132, 0.0165, 0.0186], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 00:38:18,893 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:38,200 INFO [train.py:901] (2/4) Epoch 20, batch 3500, loss[loss=0.1927, simple_loss=0.2674, pruned_loss=0.05903, over 8098.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2897, pruned_loss=0.06396, over 1610837.78 frames. ], batch size: 21, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:43,584 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.548e+02 3.004e+02 3.939e+02 7.448e+02, threshold=6.007e+02, percent-clipped=9.0 +2023-02-07 00:39:02,212 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 00:39:03,069 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:03,905 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4443, 1.7683, 2.6832, 1.3556, 2.0274, 1.7966, 1.5245, 1.9121], + device='cuda:2'), covar=tensor([0.1897, 0.2563, 0.0802, 0.4350, 0.1723, 0.3096, 0.2205, 0.2234], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0593, 0.0556, 0.0633, 0.0643, 0.0592, 0.0529, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:39:13,059 INFO [train.py:901] (2/4) Epoch 20, batch 3550, loss[loss=0.2057, simple_loss=0.2918, pruned_loss=0.05973, over 8233.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2892, pruned_loss=0.06387, over 1610916.34 frames. ], batch size: 22, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:39:37,691 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157160.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:48,306 INFO [train.py:901] (2/4) Epoch 20, batch 3600, loss[loss=0.2322, simple_loss=0.3037, pruned_loss=0.08033, over 8187.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2894, pruned_loss=0.06368, over 1615410.81 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:39:53,028 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.446e+02 2.923e+02 3.668e+02 9.434e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 00:40:19,603 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1900, 3.6250, 2.2690, 2.7380, 2.9251, 1.8251, 2.9893, 3.0466], + device='cuda:2'), covar=tensor([0.1493, 0.0328, 0.0996, 0.0727, 0.0632, 0.1432, 0.0965, 0.1023], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0236, 0.0330, 0.0309, 0.0300, 0.0336, 0.0344, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:40:24,177 INFO [train.py:901] (2/4) Epoch 20, batch 3650, loss[loss=0.1975, simple_loss=0.2719, pruned_loss=0.06158, over 7435.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2895, pruned_loss=0.06396, over 1615074.95 frames. ], batch size: 17, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:40:24,364 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:38,561 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:58,619 INFO [train.py:901] (2/4) Epoch 20, batch 3700, loss[loss=0.1862, simple_loss=0.2624, pruned_loss=0.05499, over 8246.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2908, pruned_loss=0.06465, over 1618525.13 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:03,188 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.545e+02 3.038e+02 3.849e+02 9.039e+02, threshold=6.076e+02, percent-clipped=6.0 +2023-02-07 00:41:05,241 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 00:41:33,567 INFO [train.py:901] (2/4) Epoch 20, batch 3750, loss[loss=0.2126, simple_loss=0.2875, pruned_loss=0.0689, over 6622.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2913, pruned_loss=0.06479, over 1618184.96 frames. ], batch size: 72, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:58,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157362.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:05,450 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:06,815 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:07,956 INFO [train.py:901] (2/4) Epoch 20, batch 3800, loss[loss=0.2998, simple_loss=0.362, pruned_loss=0.1188, over 8677.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2926, pruned_loss=0.06525, over 1619413.54 frames. ], batch size: 34, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:42:12,516 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.302e+02 2.981e+02 3.884e+02 7.104e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 00:42:25,025 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:39,607 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157422.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:42:42,776 INFO [train.py:901] (2/4) Epoch 20, batch 3850, loss[loss=0.1872, simple_loss=0.2729, pruned_loss=0.0507, over 7968.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2931, pruned_loss=0.06583, over 1616352.76 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:05,839 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6172, 4.5644, 4.1613, 1.9833, 4.1348, 4.2686, 4.0810, 4.0276], + device='cuda:2'), covar=tensor([0.0624, 0.0480, 0.1039, 0.4292, 0.0770, 0.0798, 0.1218, 0.0604], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0421, 0.0426, 0.0526, 0.0415, 0.0428, 0.0413, 0.0371], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:43:09,739 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 00:43:17,658 INFO [train.py:901] (2/4) Epoch 20, batch 3900, loss[loss=0.2307, simple_loss=0.3142, pruned_loss=0.07356, over 8196.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2933, pruned_loss=0.06562, over 1617131.60 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:21,793 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157482.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:22,201 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.513e+02 3.153e+02 3.900e+02 7.255e+02, threshold=6.305e+02, percent-clipped=5.0 +2023-02-07 00:43:24,971 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:37,143 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:39,411 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:43,554 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1640, 1.3719, 4.3263, 1.3845, 3.7829, 3.6165, 3.9500, 3.7905], + device='cuda:2'), covar=tensor([0.0559, 0.4706, 0.0577, 0.4388, 0.1189, 0.1000, 0.0590, 0.0691], + device='cuda:2'), in_proj_covar=tensor([0.0612, 0.0635, 0.0687, 0.0616, 0.0697, 0.0603, 0.0599, 0.0665], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:43:51,269 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157524.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:43:52,528 INFO [train.py:901] (2/4) Epoch 20, batch 3950, loss[loss=0.2068, simple_loss=0.2889, pruned_loss=0.0624, over 8238.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2923, pruned_loss=0.0649, over 1614171.10 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:25,096 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5570, 4.5499, 4.1452, 2.0349, 4.0748, 4.2119, 4.1987, 4.0849], + device='cuda:2'), covar=tensor([0.0675, 0.0477, 0.0974, 0.4593, 0.0763, 0.0956, 0.1098, 0.0722], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0425, 0.0431, 0.0531, 0.0418, 0.0432, 0.0418, 0.0374], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:44:28,110 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.52 vs. limit=5.0 +2023-02-07 00:44:28,440 INFO [train.py:901] (2/4) Epoch 20, batch 4000, loss[loss=0.1842, simple_loss=0.2655, pruned_loss=0.05141, over 7661.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.291, pruned_loss=0.0641, over 1613624.25 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:33,882 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.441e+02 3.259e+02 3.960e+02 7.383e+02, threshold=6.518e+02, percent-clipped=3.0 +2023-02-07 00:44:34,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9162, 1.4337, 6.0152, 2.0411, 5.4275, 5.0987, 5.6199, 5.4974], + device='cuda:2'), covar=tensor([0.0419, 0.5187, 0.0385, 0.3957, 0.0979, 0.0896, 0.0453, 0.0445], + device='cuda:2'), in_proj_covar=tensor([0.0614, 0.0637, 0.0688, 0.0619, 0.0701, 0.0606, 0.0601, 0.0667], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:44:36,142 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:41,340 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0656, 3.8166, 2.3048, 2.8625, 2.8277, 2.1162, 3.0931, 3.1458], + device='cuda:2'), covar=tensor([0.1562, 0.0310, 0.1117, 0.0721, 0.0684, 0.1368, 0.0851, 0.1009], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0235, 0.0332, 0.0307, 0.0300, 0.0335, 0.0344, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 00:44:57,791 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157618.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:58,430 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:03,486 INFO [train.py:901] (2/4) Epoch 20, batch 4050, loss[loss=0.2009, simple_loss=0.3045, pruned_loss=0.0486, over 8143.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06396, over 1612697.70 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:15,101 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:29,209 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 00:45:33,138 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-07 00:45:38,025 INFO [train.py:901] (2/4) Epoch 20, batch 4100, loss[loss=0.2719, simple_loss=0.3514, pruned_loss=0.09619, over 8358.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.292, pruned_loss=0.06476, over 1613740.35 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:42,584 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.468e+02 3.178e+02 4.268e+02 8.149e+02, threshold=6.355e+02, percent-clipped=4.0 +2023-02-07 00:46:07,476 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157718.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:12,784 INFO [train.py:901] (2/4) Epoch 20, batch 4150, loss[loss=0.1857, simple_loss=0.2777, pruned_loss=0.04684, over 8108.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.291, pruned_loss=0.06446, over 1611440.66 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:25,111 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157743.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:25,658 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:40,586 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157766.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:46:41,942 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:45,978 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2795, 1.9848, 2.7114, 2.2062, 2.6783, 2.2802, 2.0456, 1.5494], + device='cuda:2'), covar=tensor([0.5247, 0.4926, 0.1744, 0.3544, 0.2410, 0.2852, 0.1839, 0.5106], + device='cuda:2'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0931, 0.0984, 0.0882, 0.0737, 0.0815], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 00:46:46,933 INFO [train.py:901] (2/4) Epoch 20, batch 4200, loss[loss=0.2508, simple_loss=0.3212, pruned_loss=0.09016, over 8095.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2904, pruned_loss=0.06419, over 1609385.82 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:52,349 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.385e+02 2.811e+02 3.577e+02 7.269e+02, threshold=5.621e+02, percent-clipped=2.0 +2023-02-07 00:47:08,775 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 00:47:10,321 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8344, 1.5981, 5.9817, 2.3583, 5.3447, 5.0618, 5.5422, 5.3847], + device='cuda:2'), covar=tensor([0.0502, 0.5005, 0.0371, 0.3538, 0.1004, 0.0844, 0.0510, 0.0516], + device='cuda:2'), in_proj_covar=tensor([0.0619, 0.0640, 0.0694, 0.0622, 0.0703, 0.0610, 0.0605, 0.0670], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 00:47:23,327 INFO [train.py:901] (2/4) Epoch 20, batch 4250, loss[loss=0.2117, simple_loss=0.302, pruned_loss=0.06066, over 8314.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2887, pruned_loss=0.06387, over 1606508.46 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:47:26,206 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8039, 1.7538, 1.9634, 1.6173, 1.2739, 1.7315, 2.3806, 2.0386], + device='cuda:2'), covar=tensor([0.0450, 0.1182, 0.1587, 0.1349, 0.0577, 0.1363, 0.0570, 0.0602], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0113, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 00:47:28,290 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:32,309 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 00:47:46,379 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157859.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:53,372 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157868.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:47:55,484 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,177 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,652 INFO [train.py:901] (2/4) Epoch 20, batch 4300, loss[loss=0.2008, simple_loss=0.2792, pruned_loss=0.06117, over 8132.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2893, pruned_loss=0.06385, over 1608488.11 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:02,059 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157881.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:48:03,195 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.270e+02 2.745e+02 3.400e+02 8.203e+02, threshold=5.491e+02, percent-clipped=7.0 +2023-02-07 00:48:15,327 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157900.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:17,999 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6537, 1.3450, 2.8958, 1.4184, 2.0654, 3.0476, 3.2123, 2.6280], + device='cuda:2'), covar=tensor([0.1147, 0.1656, 0.0402, 0.2030, 0.0939, 0.0296, 0.0635, 0.0589], + device='cuda:2'), in_proj_covar=tensor([0.0292, 0.0318, 0.0285, 0.0311, 0.0300, 0.0262, 0.0407, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 00:48:24,968 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 00:48:33,499 INFO [train.py:901] (2/4) Epoch 20, batch 4350, loss[loss=0.2208, simple_loss=0.3096, pruned_loss=0.06598, over 8185.00 frames. ], tot_loss[loss=0.208, simple_loss=0.289, pruned_loss=0.06356, over 1612469.77 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:36,288 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:48,937 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-07 00:49:04,106 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 00:49:08,264 INFO [train.py:901] (2/4) Epoch 20, batch 4400, loss[loss=0.2078, simple_loss=0.2889, pruned_loss=0.06335, over 8254.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2897, pruned_loss=0.06388, over 1611988.22 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:49:13,805 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.443e+02 2.894e+02 3.714e+02 1.238e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 00:49:14,001 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157983.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:49:44,318 INFO [train.py:901] (2/4) Epoch 20, batch 4450, loss[loss=0.2223, simple_loss=0.3058, pruned_loss=0.06939, over 8473.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2908, pruned_loss=0.06397, over 1617761.06 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:49:45,687 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 00:49:57,297 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:18,894 INFO [train.py:901] (2/4) Epoch 20, batch 4500, loss[loss=0.1998, simple_loss=0.2886, pruned_loss=0.05549, over 8134.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2905, pruned_loss=0.06352, over 1619509.20 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:50:23,581 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.382e+02 2.908e+02 3.384e+02 7.082e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-07 00:50:27,966 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:39,140 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 00:50:45,180 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158114.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:45,855 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158115.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:53,563 INFO [train.py:901] (2/4) Epoch 20, batch 4550, loss[loss=0.1889, simple_loss=0.273, pruned_loss=0.05241, over 8244.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.29, pruned_loss=0.06329, over 1618101.07 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:01,051 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158137.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:51:02,892 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:02,940 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:18,411 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158162.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:51:28,241 INFO [train.py:901] (2/4) Epoch 20, batch 4600, loss[loss=0.1978, simple_loss=0.2892, pruned_loss=0.05323, over 8037.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2898, pruned_loss=0.06291, over 1619237.72 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:32,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.506e+02 3.217e+02 3.763e+02 8.986e+02, threshold=6.435e+02, percent-clipped=3.0 +2023-02-07 00:51:54,923 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:52:01,859 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4524, 1.3911, 1.7429, 1.2584, 1.1176, 1.7186, 0.2564, 1.2109], + device='cuda:2'), covar=tensor([0.1757, 0.1314, 0.0441, 0.1097, 0.2813, 0.0536, 0.2253, 0.1369], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0195, 0.0124, 0.0219, 0.0268, 0.0134, 0.0167, 0.0189], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 00:52:03,092 INFO [train.py:901] (2/4) Epoch 20, batch 4650, loss[loss=0.2072, simple_loss=0.2813, pruned_loss=0.06652, over 7522.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2907, pruned_loss=0.06395, over 1618592.53 frames. ], batch size: 18, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:12,064 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158239.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:52:30,113 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158264.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:52:37,867 INFO [train.py:901] (2/4) Epoch 20, batch 4700, loss[loss=0.1968, simple_loss=0.2811, pruned_loss=0.05627, over 8297.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2913, pruned_loss=0.06451, over 1616943.57 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:42,603 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.408e+02 3.012e+02 4.119e+02 1.091e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-07 00:52:55,802 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:12,687 INFO [train.py:901] (2/4) Epoch 20, batch 4750, loss[loss=0.2124, simple_loss=0.3008, pruned_loss=0.06197, over 8503.00 frames. ], tot_loss[loss=0.209, simple_loss=0.29, pruned_loss=0.06399, over 1610311.09 frames. ], batch size: 28, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:53:12,915 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:15,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:40,929 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 00:53:43,689 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 00:53:48,277 INFO [train.py:901] (2/4) Epoch 20, batch 4800, loss[loss=0.209, simple_loss=0.2946, pruned_loss=0.06176, over 8674.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.06429, over 1607387.80 frames. ], batch size: 39, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:53:52,907 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.386e+02 2.729e+02 3.445e+02 7.258e+02, threshold=5.458e+02, percent-clipped=2.0 +2023-02-07 00:54:06,936 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:54:22,901 INFO [train.py:901] (2/4) Epoch 20, batch 4850, loss[loss=0.1888, simple_loss=0.2836, pruned_loss=0.04704, over 8142.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2903, pruned_loss=0.06397, over 1608420.17 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:54:23,842 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1195, 1.8373, 2.3870, 1.9722, 2.2867, 2.1726, 1.8914, 1.0932], + device='cuda:2'), covar=tensor([0.5311, 0.4685, 0.1844, 0.3444, 0.2466, 0.2742, 0.1869, 0.5268], + device='cuda:2'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0933, 0.0990, 0.0882, 0.0740, 0.0820], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 00:54:33,530 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 00:54:35,079 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1846, 1.5334, 4.4214, 2.0350, 2.4686, 4.9638, 5.0344, 4.2714], + device='cuda:2'), covar=tensor([0.1292, 0.1848, 0.0278, 0.1934, 0.1202, 0.0208, 0.0502, 0.0610], + device='cuda:2'), in_proj_covar=tensor([0.0293, 0.0320, 0.0287, 0.0313, 0.0303, 0.0263, 0.0410, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 00:54:51,915 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0356, 1.8250, 3.1931, 1.7766, 2.4658, 3.4625, 3.5021, 2.9874], + device='cuda:2'), covar=tensor([0.1155, 0.1589, 0.0428, 0.1815, 0.1039, 0.0254, 0.0679, 0.0526], + device='cuda:2'), in_proj_covar=tensor([0.0293, 0.0319, 0.0286, 0.0312, 0.0303, 0.0262, 0.0409, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 00:54:57,239 INFO [train.py:901] (2/4) Epoch 20, batch 4900, loss[loss=0.2489, simple_loss=0.3209, pruned_loss=0.08847, over 8241.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2925, pruned_loss=0.06518, over 1608196.56 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:55:02,467 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.481e+02 3.123e+02 4.208e+02 8.958e+02, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 00:55:03,228 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:55:32,882 INFO [train.py:901] (2/4) Epoch 20, batch 4950, loss[loss=0.1889, simple_loss=0.2661, pruned_loss=0.05589, over 7709.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2911, pruned_loss=0.06424, over 1609984.34 frames. ], batch size: 18, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:07,759 INFO [train.py:901] (2/4) Epoch 20, batch 5000, loss[loss=0.2347, simple_loss=0.3122, pruned_loss=0.07858, over 8325.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2908, pruned_loss=0.06437, over 1609935.36 frames. ], batch size: 26, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:12,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.361e+02 2.881e+02 3.667e+02 7.563e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-07 00:56:12,598 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.77 vs. limit=5.0 +2023-02-07 00:56:14,508 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:23,821 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:32,798 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:42,905 INFO [train.py:901] (2/4) Epoch 20, batch 5050, loss[loss=0.1984, simple_loss=0.2902, pruned_loss=0.05329, over 8349.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2925, pruned_loss=0.06563, over 1610631.23 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:54,127 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4687, 1.4400, 1.7976, 1.1603, 1.1007, 1.7888, 0.1642, 1.1528], + device='cuda:2'), covar=tensor([0.1977, 0.1477, 0.0452, 0.1132, 0.3152, 0.0510, 0.2283, 0.1370], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0197, 0.0127, 0.0222, 0.0273, 0.0135, 0.0171, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 00:57:10,205 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 00:57:17,761 INFO [train.py:901] (2/4) Epoch 20, batch 5100, loss[loss=0.1669, simple_loss=0.2612, pruned_loss=0.03625, over 8110.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2917, pruned_loss=0.06489, over 1611833.57 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:23,331 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.670e+02 3.233e+02 3.910e+02 8.185e+02, threshold=6.466e+02, percent-clipped=7.0 +2023-02-07 00:57:53,856 INFO [train.py:901] (2/4) Epoch 20, batch 5150, loss[loss=0.2076, simple_loss=0.2953, pruned_loss=0.0599, over 8292.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2921, pruned_loss=0.06486, over 1615567.89 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:08,320 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:28,393 INFO [train.py:901] (2/4) Epoch 20, batch 5200, loss[loss=0.2505, simple_loss=0.3273, pruned_loss=0.08692, over 8461.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06466, over 1612715.19 frames. ], batch size: 25, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:30,716 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3867, 1.8154, 1.3472, 2.9688, 1.4027, 1.2524, 2.0883, 1.9597], + device='cuda:2'), covar=tensor([0.1684, 0.1381, 0.2026, 0.0336, 0.1406, 0.2163, 0.0984, 0.1113], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0212, 0.0203, 0.0246, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 00:58:33,213 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.433e+02 2.837e+02 3.461e+02 7.505e+02, threshold=5.673e+02, percent-clipped=2.0 +2023-02-07 00:58:41,617 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:46,634 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:52,344 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 00:59:03,978 INFO [train.py:901] (2/4) Epoch 20, batch 5250, loss[loss=0.2647, simple_loss=0.3378, pruned_loss=0.09578, over 8540.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2914, pruned_loss=0.06496, over 1612488.30 frames. ], batch size: 49, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:59:11,285 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 00:59:22,853 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158853.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:59:24,328 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158855.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:28,231 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:38,588 INFO [train.py:901] (2/4) Epoch 20, batch 5300, loss[loss=0.1812, simple_loss=0.2527, pruned_loss=0.05485, over 7659.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2919, pruned_loss=0.06553, over 1614074.04 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 00:59:41,443 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:43,361 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.336e+02 2.792e+02 3.296e+02 7.091e+02, threshold=5.585e+02, percent-clipped=2.0 +2023-02-07 00:59:52,521 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-07 01:00:13,210 INFO [train.py:901] (2/4) Epoch 20, batch 5350, loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.06161, over 8665.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.291, pruned_loss=0.06501, over 1611816.05 frames. ], batch size: 49, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:15,070 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 01:00:27,706 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 01:00:48,000 INFO [train.py:901] (2/4) Epoch 20, batch 5400, loss[loss=0.2389, simple_loss=0.3128, pruned_loss=0.08245, over 8469.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2914, pruned_loss=0.06554, over 1609622.84 frames. ], batch size: 29, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:52,644 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.394e+02 2.966e+02 3.887e+02 6.953e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 01:01:16,690 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 01:01:22,917 INFO [train.py:901] (2/4) Epoch 20, batch 5450, loss[loss=0.1898, simple_loss=0.2772, pruned_loss=0.05121, over 8028.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2914, pruned_loss=0.06538, over 1611226.74 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:01:42,893 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2616, 1.9942, 2.7589, 2.2079, 2.6565, 2.2896, 1.9844, 1.5760], + device='cuda:2'), covar=tensor([0.4920, 0.4790, 0.1703, 0.3381, 0.2254, 0.2830, 0.1916, 0.4881], + device='cuda:2'), in_proj_covar=tensor([0.0925, 0.0958, 0.0784, 0.0924, 0.0977, 0.0874, 0.0732, 0.0809], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 01:01:57,423 INFO [train.py:901] (2/4) Epoch 20, batch 5500, loss[loss=0.227, simple_loss=0.3056, pruned_loss=0.07415, over 8466.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2917, pruned_loss=0.06537, over 1614320.19 frames. ], batch size: 27, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:00,095 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 01:02:02,825 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.298e+02 2.656e+02 3.222e+02 6.486e+02, threshold=5.312e+02, percent-clipped=1.0 +2023-02-07 01:02:03,906 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-07 01:02:05,787 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:21,369 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 01:02:27,315 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:29,240 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5199, 1.7949, 2.7415, 1.4320, 2.0340, 1.8774, 1.6302, 1.9791], + device='cuda:2'), covar=tensor([0.1884, 0.2682, 0.0775, 0.4541, 0.1754, 0.3213, 0.2229, 0.2139], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0590, 0.0554, 0.0634, 0.0643, 0.0589, 0.0528, 0.0629], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:02:32,441 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2662, 1.6576, 4.3134, 2.0478, 2.5243, 4.9640, 4.9257, 4.2368], + device='cuda:2'), covar=tensor([0.1096, 0.1741, 0.0333, 0.1854, 0.1123, 0.0160, 0.0439, 0.0536], + device='cuda:2'), in_proj_covar=tensor([0.0294, 0.0319, 0.0286, 0.0314, 0.0304, 0.0262, 0.0410, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:02:32,989 INFO [train.py:901] (2/4) Epoch 20, batch 5550, loss[loss=0.2206, simple_loss=0.3015, pruned_loss=0.06987, over 8348.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.291, pruned_loss=0.06502, over 1604669.34 frames. ], batch size: 26, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:41,927 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159139.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:43,056 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 01:02:44,166 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:46,019 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:50,309 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 01:03:08,164 INFO [train.py:901] (2/4) Epoch 20, batch 5600, loss[loss=0.271, simple_loss=0.3359, pruned_loss=0.103, over 8543.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2902, pruned_loss=0.06489, over 1604701.26 frames. ], batch size: 31, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:03:09,019 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:03:12,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.419e+02 2.780e+02 3.445e+02 7.739e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 01:03:20,717 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5735, 2.0276, 3.0748, 1.4216, 2.3352, 1.9524, 1.7730, 2.2808], + device='cuda:2'), covar=tensor([0.1824, 0.2379, 0.0803, 0.4199, 0.1739, 0.3043, 0.2023, 0.2180], + device='cuda:2'), in_proj_covar=tensor([0.0519, 0.0589, 0.0555, 0.0634, 0.0643, 0.0589, 0.0528, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:03:23,295 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159197.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:03:43,998 INFO [train.py:901] (2/4) Epoch 20, batch 5650, loss[loss=0.1565, simple_loss=0.2456, pruned_loss=0.03376, over 7684.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2911, pruned_loss=0.06514, over 1606730.46 frames. ], batch size: 18, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:04:03,423 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:04,621 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 01:04:07,499 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:19,056 INFO [train.py:901] (2/4) Epoch 20, batch 5700, loss[loss=0.2026, simple_loss=0.2931, pruned_loss=0.05608, over 8464.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2925, pruned_loss=0.06581, over 1610446.25 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:25,332 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.576e+02 3.260e+02 4.013e+02 6.441e+02, threshold=6.520e+02, percent-clipped=4.0 +2023-02-07 01:04:42,299 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:45,040 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159312.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:04:54,516 INFO [train.py:901] (2/4) Epoch 20, batch 5750, loss[loss=0.1664, simple_loss=0.2514, pruned_loss=0.04073, over 7970.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2933, pruned_loss=0.06613, over 1614979.76 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:59,222 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 01:05:09,316 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 01:05:23,228 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8697, 1.6295, 3.2578, 1.5749, 2.3154, 3.5420, 3.6450, 3.0586], + device='cuda:2'), covar=tensor([0.1211, 0.1605, 0.0343, 0.1987, 0.1060, 0.0228, 0.0540, 0.0501], + device='cuda:2'), in_proj_covar=tensor([0.0291, 0.0316, 0.0284, 0.0311, 0.0302, 0.0260, 0.0407, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 01:05:29,355 INFO [train.py:901] (2/4) Epoch 20, batch 5800, loss[loss=0.2501, simple_loss=0.3397, pruned_loss=0.08023, over 8455.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2949, pruned_loss=0.06683, over 1617723.73 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:35,566 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.438e+02 2.992e+02 3.849e+02 1.447e+03, threshold=5.984e+02, percent-clipped=4.0 +2023-02-07 01:06:04,880 INFO [train.py:901] (2/4) Epoch 20, batch 5850, loss[loss=0.1779, simple_loss=0.2618, pruned_loss=0.04701, over 7805.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2934, pruned_loss=0.06631, over 1612950.78 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:08,469 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:06:30,805 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:06:39,994 INFO [train.py:901] (2/4) Epoch 20, batch 5900, loss[loss=0.1887, simple_loss=0.2798, pruned_loss=0.04879, over 8291.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2926, pruned_loss=0.06527, over 1617536.55 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:45,624 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.454e+02 2.951e+02 3.822e+02 7.063e+02, threshold=5.901e+02, percent-clipped=2.0 +2023-02-07 01:07:00,826 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6722, 2.2432, 4.1939, 1.4645, 2.9928, 2.2942, 1.8172, 2.8465], + device='cuda:2'), covar=tensor([0.1855, 0.2692, 0.0693, 0.4549, 0.1697, 0.3102, 0.2248, 0.2450], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0593, 0.0556, 0.0637, 0.0645, 0.0592, 0.0530, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:07:04,117 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:08,825 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:12,107 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:15,459 INFO [train.py:901] (2/4) Epoch 20, batch 5950, loss[loss=0.1592, simple_loss=0.2419, pruned_loss=0.03822, over 7659.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2917, pruned_loss=0.06465, over 1615902.46 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:15,838 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 01:07:22,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:26,382 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:29,670 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:42,011 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5508, 1.8475, 1.9288, 1.2190, 1.9586, 1.4603, 0.4589, 1.8082], + device='cuda:2'), covar=tensor([0.0496, 0.0324, 0.0254, 0.0488, 0.0344, 0.0838, 0.0815, 0.0225], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0386, 0.0337, 0.0444, 0.0369, 0.0533, 0.0392, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2254e-04, 1.0147e-04, 8.8862e-05, 1.1734e-04, 9.7634e-05, 1.5151e-04, + 1.0602e-04, 1.1062e-04], device='cuda:2') +2023-02-07 01:07:45,567 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159568.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:07:50,925 INFO [train.py:901] (2/4) Epoch 20, batch 6000, loss[loss=0.2086, simple_loss=0.2805, pruned_loss=0.06836, over 7647.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2906, pruned_loss=0.06427, over 1610592.80 frames. ], batch size: 19, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:50,926 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 01:08:04,189 INFO [train.py:935] (2/4) Epoch 20, validation: loss=0.175, simple_loss=0.275, pruned_loss=0.03755, over 944034.00 frames. +2023-02-07 01:08:04,190 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 01:08:09,554 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.504e+02 2.869e+02 3.482e+02 8.370e+02, threshold=5.739e+02, percent-clipped=5.0 +2023-02-07 01:08:15,888 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159593.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:08:38,924 INFO [train.py:901] (2/4) Epoch 20, batch 6050, loss[loss=0.2062, simple_loss=0.2977, pruned_loss=0.05739, over 8335.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06359, over 1610182.01 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:08:45,953 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:51,473 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3759, 2.8378, 2.3802, 3.8795, 1.5722, 2.1252, 2.2806, 2.6966], + device='cuda:2'), covar=tensor([0.0663, 0.0755, 0.0743, 0.0230, 0.1092, 0.1186, 0.0944, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0204, 0.0245, 0.0248, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 01:08:55,604 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159649.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:57,636 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:09:14,759 INFO [train.py:901] (2/4) Epoch 20, batch 6100, loss[loss=0.2149, simple_loss=0.3055, pruned_loss=0.06216, over 8103.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2913, pruned_loss=0.06461, over 1610601.10 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:09:21,004 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.453e+02 2.842e+02 3.745e+02 1.322e+03, threshold=5.684e+02, percent-clipped=4.0 +2023-02-07 01:09:30,876 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1397, 2.3174, 2.0008, 2.7860, 1.4553, 1.7524, 2.1176, 2.2324], + device='cuda:2'), covar=tensor([0.0613, 0.0711, 0.0792, 0.0406, 0.1009, 0.1161, 0.0783, 0.0716], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0197, 0.0245, 0.0213, 0.0204, 0.0245, 0.0248, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 01:09:41,580 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 01:09:49,993 INFO [train.py:901] (2/4) Epoch 20, batch 6150, loss[loss=0.1953, simple_loss=0.2957, pruned_loss=0.04746, over 8463.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2906, pruned_loss=0.06454, over 1611381.40 frames. ], batch size: 25, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:18,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:10:24,871 INFO [train.py:901] (2/4) Epoch 20, batch 6200, loss[loss=0.2032, simple_loss=0.2829, pruned_loss=0.06171, over 8262.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2903, pruned_loss=0.06443, over 1610297.58 frames. ], batch size: 24, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:30,207 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.429e+02 3.094e+02 3.753e+02 7.329e+02, threshold=6.188e+02, percent-clipped=3.0 +2023-02-07 01:10:33,806 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0644, 1.5889, 1.4320, 1.5018, 1.2951, 1.2450, 1.3205, 1.3282], + device='cuda:2'), covar=tensor([0.1105, 0.0505, 0.1248, 0.0539, 0.0769, 0.1536, 0.0809, 0.0780], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0237, 0.0331, 0.0309, 0.0301, 0.0337, 0.0344, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:10:43,480 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:00,336 INFO [train.py:901] (2/4) Epoch 20, batch 6250, loss[loss=0.2449, simple_loss=0.3242, pruned_loss=0.0828, over 8355.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2895, pruned_loss=0.06337, over 1611753.50 frames. ], batch size: 24, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:01,219 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:05,579 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 01:11:15,896 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6910, 2.4419, 4.0516, 1.6077, 3.0601, 2.2213, 1.8402, 2.8282], + device='cuda:2'), covar=tensor([0.2016, 0.2473, 0.0804, 0.4531, 0.1891, 0.3294, 0.2356, 0.2575], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0589, 0.0552, 0.0633, 0.0642, 0.0590, 0.0528, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:11:32,492 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:34,365 INFO [train.py:901] (2/4) Epoch 20, batch 6300, loss[loss=0.2256, simple_loss=0.3097, pruned_loss=0.07079, over 8831.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2894, pruned_loss=0.06355, over 1607891.69 frames. ], batch size: 32, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:40,337 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.354e+02 2.951e+02 3.644e+02 9.166e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 01:11:45,861 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:03,493 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:09,078 INFO [train.py:901] (2/4) Epoch 20, batch 6350, loss[loss=0.2395, simple_loss=0.3227, pruned_loss=0.07814, over 8554.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.06431, over 1611761.53 frames. ], batch size: 49, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:10,589 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:43,479 INFO [train.py:901] (2/4) Epoch 20, batch 6400, loss[loss=0.2315, simple_loss=0.3114, pruned_loss=0.07581, over 8731.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2909, pruned_loss=0.06458, over 1610884.45 frames. ], batch size: 34, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:48,765 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.436e+02 2.995e+02 3.881e+02 8.346e+02, threshold=5.989e+02, percent-clipped=6.0 +2023-02-07 01:12:52,384 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5750, 1.4725, 4.7852, 2.0011, 4.2753, 4.0408, 4.3846, 4.2304], + device='cuda:2'), covar=tensor([0.0563, 0.4884, 0.0508, 0.3848, 0.1046, 0.0944, 0.0542, 0.0621], + device='cuda:2'), in_proj_covar=tensor([0.0609, 0.0629, 0.0678, 0.0614, 0.0692, 0.0593, 0.0593, 0.0665], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:12:55,757 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:16,774 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:18,629 INFO [train.py:901] (2/4) Epoch 20, batch 6450, loss[loss=0.1858, simple_loss=0.2778, pruned_loss=0.04686, over 8254.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2911, pruned_loss=0.06475, over 1612244.16 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:31,724 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1318, 1.5183, 1.7454, 1.3226, 0.9470, 1.4952, 1.6942, 1.6805], + device='cuda:2'), covar=tensor([0.0503, 0.1269, 0.1622, 0.1457, 0.0624, 0.1465, 0.0698, 0.0620], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0099, 0.0161, 0.0112, 0.0141], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 01:13:34,503 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:53,994 INFO [train.py:901] (2/4) Epoch 20, batch 6500, loss[loss=0.2173, simple_loss=0.3018, pruned_loss=0.06644, over 8369.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2903, pruned_loss=0.06417, over 1612666.41 frames. ], batch size: 24, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:59,464 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.613e+02 3.061e+02 4.120e+02 1.100e+03, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 01:14:04,195 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-07 01:14:16,510 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:14:29,724 INFO [train.py:901] (2/4) Epoch 20, batch 6550, loss[loss=0.1819, simple_loss=0.2572, pruned_loss=0.0533, over 7173.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2888, pruned_loss=0.06329, over 1616048.78 frames. ], batch size: 16, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:14:52,109 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 01:14:53,115 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 01:15:05,564 INFO [train.py:901] (2/4) Epoch 20, batch 6600, loss[loss=0.1772, simple_loss=0.2667, pruned_loss=0.04391, over 8279.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2884, pruned_loss=0.063, over 1612413.15 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:10,796 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.488e+02 3.067e+02 3.982e+02 8.719e+02, threshold=6.134e+02, percent-clipped=3.0 +2023-02-07 01:15:12,142 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 01:15:33,421 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160217.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:15:39,330 INFO [train.py:901] (2/4) Epoch 20, batch 6650, loss[loss=0.2265, simple_loss=0.3048, pruned_loss=0.07413, over 8140.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2886, pruned_loss=0.06327, over 1613869.83 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:10,082 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 01:16:12,500 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:16:15,123 INFO [train.py:901] (2/4) Epoch 20, batch 6700, loss[loss=0.1988, simple_loss=0.2935, pruned_loss=0.05201, over 8195.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2888, pruned_loss=0.06368, over 1613752.36 frames. ], batch size: 23, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:20,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.299e+02 2.819e+02 3.357e+02 8.975e+02, threshold=5.638e+02, percent-clipped=4.0 +2023-02-07 01:16:44,747 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-07 01:16:48,910 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160325.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:16:49,344 INFO [train.py:901] (2/4) Epoch 20, batch 6750, loss[loss=0.3196, simple_loss=0.3658, pruned_loss=0.1367, over 8621.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.29, pruned_loss=0.0646, over 1612302.76 frames. ], batch size: 39, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:53,599 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160332.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:03,236 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8874, 1.5383, 6.0458, 2.0660, 5.4006, 5.0518, 5.5686, 5.4728], + device='cuda:2'), covar=tensor([0.0529, 0.5110, 0.0422, 0.4043, 0.1078, 0.0941, 0.0558, 0.0526], + device='cuda:2'), in_proj_covar=tensor([0.0612, 0.0629, 0.0681, 0.0614, 0.0694, 0.0594, 0.0596, 0.0665], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:17:16,338 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:18,899 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6749, 1.6239, 2.1440, 1.4984, 1.3091, 2.0687, 0.6929, 1.5097], + device='cuda:2'), covar=tensor([0.1649, 0.1242, 0.0363, 0.1062, 0.2607, 0.0439, 0.2247, 0.1185], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0196, 0.0126, 0.0223, 0.0272, 0.0134, 0.0170, 0.0190], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 01:17:23,988 INFO [train.py:901] (2/4) Epoch 20, batch 6800, loss[loss=0.2532, simple_loss=0.3253, pruned_loss=0.09058, over 6821.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2906, pruned_loss=0.0645, over 1616172.84 frames. ], batch size: 71, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:17:28,102 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 01:17:29,321 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.510e+02 3.096e+02 3.947e+02 9.727e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 01:17:32,265 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:33,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160389.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:59,210 INFO [train.py:901] (2/4) Epoch 20, batch 6850, loss[loss=0.2309, simple_loss=0.3173, pruned_loss=0.07225, over 8351.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06405, over 1614997.08 frames. ], batch size: 26, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:19,449 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 01:18:34,196 INFO [train.py:901] (2/4) Epoch 20, batch 6900, loss[loss=0.1893, simple_loss=0.2688, pruned_loss=0.05494, over 7649.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2897, pruned_loss=0.06399, over 1614111.62 frames. ], batch size: 19, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:39,573 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.333e+02 2.912e+02 3.495e+02 9.213e+02, threshold=5.824e+02, percent-clipped=3.0 +2023-02-07 01:19:08,580 INFO [train.py:901] (2/4) Epoch 20, batch 6950, loss[loss=0.2454, simple_loss=0.3222, pruned_loss=0.08427, over 8320.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.06387, over 1620984.46 frames. ], batch size: 25, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:19:15,598 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1651, 4.1607, 3.7809, 1.9294, 3.6826, 3.8747, 3.6899, 3.6710], + device='cuda:2'), covar=tensor([0.0786, 0.0597, 0.1101, 0.4374, 0.0890, 0.0902, 0.1382, 0.0814], + device='cuda:2'), in_proj_covar=tensor([0.0524, 0.0434, 0.0438, 0.0540, 0.0424, 0.0439, 0.0423, 0.0382], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:19:30,217 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 01:19:42,958 INFO [train.py:901] (2/4) Epoch 20, batch 7000, loss[loss=0.1701, simple_loss=0.2626, pruned_loss=0.03882, over 8099.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2908, pruned_loss=0.06351, over 1622460.04 frames. ], batch size: 23, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:19:43,788 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5997, 1.5141, 4.8100, 1.8139, 4.2044, 4.0297, 4.3815, 4.1927], + device='cuda:2'), covar=tensor([0.0595, 0.4741, 0.0494, 0.4227, 0.1138, 0.0969, 0.0562, 0.0689], + device='cuda:2'), in_proj_covar=tensor([0.0614, 0.0631, 0.0681, 0.0617, 0.0697, 0.0595, 0.0596, 0.0668], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:19:48,348 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.497e+02 2.987e+02 3.377e+02 5.985e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-07 01:19:52,063 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:07,125 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-07 01:20:09,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160613.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:18,197 INFO [train.py:901] (2/4) Epoch 20, batch 7050, loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.06902, over 8125.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2908, pruned_loss=0.06344, over 1617595.36 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:30,576 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:48,791 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160668.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:49,340 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160669.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:20:53,994 INFO [train.py:901] (2/4) Epoch 20, batch 7100, loss[loss=0.2047, simple_loss=0.2857, pruned_loss=0.06187, over 8239.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2899, pruned_loss=0.06332, over 1618033.84 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:59,618 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.520e+02 2.814e+02 3.523e+02 7.232e+02, threshold=5.628e+02, percent-clipped=2.0 +2023-02-07 01:21:06,712 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7220, 4.7670, 4.2816, 1.9297, 4.2436, 4.3671, 4.3364, 4.2357], + device='cuda:2'), covar=tensor([0.0604, 0.0431, 0.0854, 0.4421, 0.0746, 0.0763, 0.1087, 0.0619], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0433, 0.0437, 0.0538, 0.0425, 0.0441, 0.0424, 0.0382], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:21:29,432 INFO [train.py:901] (2/4) Epoch 20, batch 7150, loss[loss=0.2418, simple_loss=0.322, pruned_loss=0.08078, over 8634.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2902, pruned_loss=0.0641, over 1614638.72 frames. ], batch size: 34, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:21:39,918 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 01:22:04,615 INFO [train.py:901] (2/4) Epoch 20, batch 7200, loss[loss=0.2769, simple_loss=0.3447, pruned_loss=0.1046, over 6948.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2906, pruned_loss=0.06426, over 1615434.88 frames. ], batch size: 73, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:22:09,775 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.432e+02 3.066e+02 3.972e+02 8.502e+02, threshold=6.132e+02, percent-clipped=3.0 +2023-02-07 01:22:09,991 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160784.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:22:39,217 INFO [train.py:901] (2/4) Epoch 20, batch 7250, loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06496, over 8338.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2925, pruned_loss=0.06521, over 1617480.15 frames. ], batch size: 49, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:13,934 INFO [train.py:901] (2/4) Epoch 20, batch 7300, loss[loss=0.2415, simple_loss=0.3081, pruned_loss=0.08749, over 7965.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2927, pruned_loss=0.06536, over 1619752.56 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:19,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.519e+02 2.885e+02 3.982e+02 8.183e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 01:23:44,251 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160919.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:23:48,785 INFO [train.py:901] (2/4) Epoch 20, batch 7350, loss[loss=0.1797, simple_loss=0.2618, pruned_loss=0.04877, over 8230.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.291, pruned_loss=0.06434, over 1617452.36 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:55,277 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 01:24:16,171 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 01:24:24,315 INFO [train.py:901] (2/4) Epoch 20, batch 7400, loss[loss=0.1824, simple_loss=0.2706, pruned_loss=0.04708, over 8147.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2917, pruned_loss=0.06405, over 1622067.69 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:24:29,920 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:24:30,422 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.344e+02 3.002e+02 3.673e+02 6.079e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-07 01:24:37,301 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 01:24:59,969 INFO [train.py:901] (2/4) Epoch 20, batch 7450, loss[loss=0.1468, simple_loss=0.2298, pruned_loss=0.03187, over 7646.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.29, pruned_loss=0.06326, over 1620152.50 frames. ], batch size: 19, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:01,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3284, 2.1129, 3.5329, 2.1521, 2.8325, 3.9837, 3.9105, 3.5139], + device='cuda:2'), covar=tensor([0.1053, 0.1520, 0.0541, 0.1690, 0.1434, 0.0202, 0.0602, 0.0481], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0321, 0.0285, 0.0315, 0.0304, 0.0262, 0.0412, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 01:25:10,076 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161040.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:16,123 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 01:25:28,631 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161065.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:25:35,929 INFO [train.py:901] (2/4) Epoch 20, batch 7500, loss[loss=0.2253, simple_loss=0.303, pruned_loss=0.07382, over 8664.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2912, pruned_loss=0.06387, over 1619718.84 frames. ], batch size: 49, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:41,428 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.441e+02 3.010e+02 3.756e+02 8.900e+02, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:26:11,135 INFO [train.py:901] (2/4) Epoch 20, batch 7550, loss[loss=0.2082, simple_loss=0.2969, pruned_loss=0.05974, over 8464.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2918, pruned_loss=0.06472, over 1620779.56 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:46,334 INFO [train.py:901] (2/4) Epoch 20, batch 7600, loss[loss=0.2005, simple_loss=0.2759, pruned_loss=0.06252, over 8033.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2917, pruned_loss=0.06457, over 1621519.70 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:51,651 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.460e+02 3.037e+02 4.113e+02 9.859e+02, threshold=6.074e+02, percent-clipped=9.0 +2023-02-07 01:27:20,295 INFO [train.py:901] (2/4) Epoch 20, batch 7650, loss[loss=0.1866, simple_loss=0.2665, pruned_loss=0.05334, over 7928.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2921, pruned_loss=0.06495, over 1622985.47 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:27:25,750 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:36,289 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:45,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:54,278 INFO [train.py:901] (2/4) Epoch 20, batch 7700, loss[loss=0.2202, simple_loss=0.3023, pruned_loss=0.06906, over 8362.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2927, pruned_loss=0.065, over 1625949.94 frames. ], batch size: 24, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:27:59,457 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.411e+02 2.987e+02 3.572e+02 6.786e+02, threshold=5.975e+02, percent-clipped=3.0 +2023-02-07 01:28:22,560 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2882, 2.0102, 2.7066, 2.2042, 2.5153, 2.2724, 2.0499, 1.4251], + device='cuda:2'), covar=tensor([0.5188, 0.4734, 0.1714, 0.3674, 0.2505, 0.2923, 0.1878, 0.5144], + device='cuda:2'), in_proj_covar=tensor([0.0935, 0.0970, 0.0799, 0.0932, 0.0989, 0.0882, 0.0742, 0.0819], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 01:28:25,737 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 01:28:26,650 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5906, 2.7115, 1.9223, 2.2854, 2.3049, 1.5787, 2.1694, 2.4033], + device='cuda:2'), covar=tensor([0.1602, 0.0419, 0.1205, 0.0689, 0.0782, 0.1558, 0.1055, 0.1018], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0237, 0.0333, 0.0306, 0.0301, 0.0335, 0.0345, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:28:29,787 INFO [train.py:901] (2/4) Epoch 20, batch 7750, loss[loss=0.2039, simple_loss=0.2812, pruned_loss=0.06331, over 8242.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2909, pruned_loss=0.06434, over 1616595.34 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:28:30,570 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:05,270 INFO [train.py:901] (2/4) Epoch 20, batch 7800, loss[loss=0.2078, simple_loss=0.2882, pruned_loss=0.06366, over 7814.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06446, over 1616276.62 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:29:06,860 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:10,616 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.429e+02 2.909e+02 3.732e+02 6.331e+02, threshold=5.818e+02, percent-clipped=2.0 +2023-02-07 01:29:34,440 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:37,519 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 01:29:39,043 INFO [train.py:901] (2/4) Epoch 20, batch 7850, loss[loss=0.2405, simple_loss=0.318, pruned_loss=0.08151, over 8646.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2908, pruned_loss=0.0645, over 1615612.01 frames. ], batch size: 50, lr: 3.74e-03, grad_scale: 16.0 +2023-02-07 01:29:49,661 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:56,804 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5687, 2.7147, 1.9156, 2.2760, 2.3041, 1.6289, 2.0660, 2.3506], + device='cuda:2'), covar=tensor([0.1771, 0.0410, 0.1153, 0.0663, 0.0740, 0.1508, 0.1082, 0.0997], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0234, 0.0330, 0.0303, 0.0298, 0.0332, 0.0342, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:30:12,443 INFO [train.py:901] (2/4) Epoch 20, batch 7900, loss[loss=0.2055, simple_loss=0.2887, pruned_loss=0.06118, over 8352.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2903, pruned_loss=0.06401, over 1615099.06 frames. ], batch size: 24, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:13,741 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:15,380 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 01:30:18,875 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 2.352e+02 2.923e+02 4.060e+02 8.940e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 01:30:45,510 INFO [train.py:901] (2/4) Epoch 20, batch 7950, loss[loss=0.1696, simple_loss=0.26, pruned_loss=0.03962, over 7959.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2887, pruned_loss=0.0636, over 1610048.81 frames. ], batch size: 21, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:50,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8338, 3.7764, 3.4972, 1.8953, 3.4036, 3.4069, 3.4633, 3.2976], + device='cuda:2'), covar=tensor([0.0932, 0.0663, 0.1131, 0.4254, 0.0969, 0.1171, 0.1471, 0.0984], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0432, 0.0431, 0.0531, 0.0424, 0.0435, 0.0419, 0.0379], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:31:18,035 INFO [train.py:901] (2/4) Epoch 20, batch 8000, loss[loss=0.1734, simple_loss=0.2484, pruned_loss=0.04921, over 7533.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2884, pruned_loss=0.06363, over 1607748.41 frames. ], batch size: 18, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:19,436 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:23,852 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.449e+02 3.108e+02 3.740e+02 8.675e+02, threshold=6.215e+02, percent-clipped=6.0 +2023-02-07 01:31:29,406 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:36,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6607, 2.6881, 1.9200, 2.4009, 2.3543, 1.5969, 2.2255, 2.3836], + device='cuda:2'), covar=tensor([0.1605, 0.0427, 0.1234, 0.0640, 0.0740, 0.1622, 0.0947, 0.0926], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0234, 0.0330, 0.0303, 0.0298, 0.0332, 0.0341, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:31:51,325 INFO [train.py:901] (2/4) Epoch 20, batch 8050, loss[loss=0.1876, simple_loss=0.2748, pruned_loss=0.05015, over 6391.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2872, pruned_loss=0.06302, over 1599417.66 frames. ], batch size: 14, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:57,080 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:25,047 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 01:32:32,189 INFO [train.py:901] (2/4) Epoch 21, batch 0, loss[loss=0.1867, simple_loss=0.2611, pruned_loss=0.05613, over 8093.00 frames. ], tot_loss[loss=0.1867, simple_loss=0.2611, pruned_loss=0.05613, over 8093.00 frames. ], batch size: 21, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:32:32,190 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 01:32:44,209 INFO [train.py:935] (2/4) Epoch 21, validation: loss=0.1763, simple_loss=0.2762, pruned_loss=0.03818, over 944034.00 frames. +2023-02-07 01:32:44,210 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 01:32:44,430 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:59,349 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 01:33:02,224 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.415e+02 2.918e+02 3.924e+02 7.413e+02, threshold=5.835e+02, percent-clipped=4.0 +2023-02-07 01:33:07,970 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:11,616 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:18,551 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5540, 1.4710, 4.7469, 1.7547, 4.1237, 3.9858, 4.3382, 4.2036], + device='cuda:2'), covar=tensor([0.0585, 0.4756, 0.0466, 0.4029, 0.1220, 0.0936, 0.0548, 0.0652], + device='cuda:2'), in_proj_covar=tensor([0.0610, 0.0623, 0.0673, 0.0608, 0.0690, 0.0590, 0.0591, 0.0656], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:33:18,597 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:19,107 INFO [train.py:901] (2/4) Epoch 21, batch 50, loss[loss=0.2372, simple_loss=0.319, pruned_loss=0.07771, over 8190.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.06612, over 368383.40 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:29,238 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:32,475 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 01:33:56,011 INFO [train.py:901] (2/4) Epoch 21, batch 100, loss[loss=0.1877, simple_loss=0.2689, pruned_loss=0.05326, over 7532.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.293, pruned_loss=0.06531, over 646436.88 frames. ], batch size: 18, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:57,250 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 01:33:58,658 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:14,164 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.511e+02 2.964e+02 4.065e+02 7.207e+02, threshold=5.927e+02, percent-clipped=4.0 +2023-02-07 01:34:18,656 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:34:30,766 INFO [train.py:901] (2/4) Epoch 21, batch 150, loss[loss=0.1546, simple_loss=0.245, pruned_loss=0.03213, over 7813.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2939, pruned_loss=0.06565, over 864878.42 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:34:33,279 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:34:39,725 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:47,260 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:06,331 INFO [train.py:901] (2/4) Epoch 21, batch 200, loss[loss=0.197, simple_loss=0.2654, pruned_loss=0.06425, over 7427.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2914, pruned_loss=0.06476, over 1029982.38 frames. ], batch size: 17, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:19,119 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:23,717 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.494e+02 2.791e+02 3.613e+02 7.338e+02, threshold=5.582e+02, percent-clipped=1.0 +2023-02-07 01:35:41,064 INFO [train.py:901] (2/4) Epoch 21, batch 250, loss[loss=0.2253, simple_loss=0.3095, pruned_loss=0.07053, over 8112.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2913, pruned_loss=0.06482, over 1157313.26 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:47,945 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 01:35:57,067 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 01:36:00,717 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161937.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:08,760 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:15,267 INFO [train.py:901] (2/4) Epoch 21, batch 300, loss[loss=0.2355, simple_loss=0.312, pruned_loss=0.07956, over 8488.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2922, pruned_loss=0.06493, over 1265340.80 frames. ], batch size: 48, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:19,021 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:20,639 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 01:36:26,617 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:33,754 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.839e+02 3.558e+02 8.067e+02, threshold=5.678e+02, percent-clipped=5.0 +2023-02-07 01:36:36,660 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:51,874 INFO [train.py:901] (2/4) Epoch 21, batch 350, loss[loss=0.2948, simple_loss=0.3489, pruned_loss=0.1203, over 7093.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2899, pruned_loss=0.06355, over 1342194.00 frames. ], batch size: 71, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:57,889 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 01:37:25,806 INFO [train.py:901] (2/4) Epoch 21, batch 400, loss[loss=0.219, simple_loss=0.291, pruned_loss=0.07344, over 7813.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2904, pruned_loss=0.06383, over 1402486.18 frames. ], batch size: 20, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:37:44,476 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.323e+02 2.796e+02 3.394e+02 5.024e+02, threshold=5.592e+02, percent-clipped=0.0 +2023-02-07 01:37:52,929 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:37:58,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7314, 1.4881, 4.9302, 1.9156, 4.4189, 4.1634, 4.5072, 4.3877], + device='cuda:2'), covar=tensor([0.0480, 0.4603, 0.0400, 0.3578, 0.0891, 0.0732, 0.0512, 0.0539], + device='cuda:2'), in_proj_covar=tensor([0.0612, 0.0629, 0.0679, 0.0612, 0.0692, 0.0594, 0.0596, 0.0661], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:38:02,163 INFO [train.py:901] (2/4) Epoch 21, batch 450, loss[loss=0.2013, simple_loss=0.2893, pruned_loss=0.05668, over 8241.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2903, pruned_loss=0.06385, over 1452023.27 frames. ], batch size: 24, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:20,172 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:37,308 INFO [train.py:901] (2/4) Epoch 21, batch 500, loss[loss=0.2032, simple_loss=0.2979, pruned_loss=0.05419, over 8238.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2911, pruned_loss=0.064, over 1489739.11 frames. ], batch size: 24, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:37,582 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:50,065 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:55,586 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.501e+02 2.975e+02 3.750e+02 9.376e+02, threshold=5.950e+02, percent-clipped=8.0 +2023-02-07 01:39:01,456 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162193.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:09,311 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 01:39:13,736 INFO [train.py:901] (2/4) Epoch 21, batch 550, loss[loss=0.1775, simple_loss=0.2611, pruned_loss=0.0469, over 7796.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2905, pruned_loss=0.06332, over 1519346.68 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:20,156 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:42,304 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:45,165 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-07 01:39:48,780 INFO [train.py:901] (2/4) Epoch 21, batch 600, loss[loss=0.2273, simple_loss=0.2996, pruned_loss=0.07745, over 8028.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2895, pruned_loss=0.0635, over 1537853.73 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:02,428 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 01:40:06,587 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.365e+02 2.932e+02 3.412e+02 7.385e+02, threshold=5.863e+02, percent-clipped=2.0 +2023-02-07 01:40:11,434 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:40:16,973 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7180, 2.0735, 3.3524, 1.5349, 2.6268, 2.1966, 1.7836, 2.5794], + device='cuda:2'), covar=tensor([0.1848, 0.2467, 0.0761, 0.4431, 0.1619, 0.2968, 0.2210, 0.2151], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0592, 0.0549, 0.0631, 0.0640, 0.0591, 0.0529, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:40:22,966 INFO [train.py:901] (2/4) Epoch 21, batch 650, loss[loss=0.198, simple_loss=0.2827, pruned_loss=0.05671, over 8326.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06292, over 1557753.45 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:26,759 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-07 01:40:32,758 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2353, 2.5563, 2.9893, 1.7281, 3.1559, 1.9448, 1.5378, 2.0869], + device='cuda:2'), covar=tensor([0.0747, 0.0357, 0.0257, 0.0680, 0.0382, 0.0776, 0.0906, 0.0534], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0384, 0.0336, 0.0438, 0.0368, 0.0531, 0.0388, 0.0412], + device='cuda:2'), out_proj_covar=tensor([1.2043e-04, 1.0076e-04, 8.8479e-05, 1.1571e-04, 9.6954e-05, 1.5088e-04, + 1.0492e-04, 1.0957e-04], device='cuda:2') +2023-02-07 01:40:35,429 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2531, 3.1424, 2.9538, 1.6500, 2.8535, 2.9326, 2.8509, 2.8528], + device='cuda:2'), covar=tensor([0.1146, 0.0988, 0.1449, 0.4445, 0.1192, 0.1252, 0.1716, 0.0950], + device='cuda:2'), in_proj_covar=tensor([0.0515, 0.0432, 0.0430, 0.0531, 0.0419, 0.0435, 0.0413, 0.0378], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:40:59,236 INFO [train.py:901] (2/4) Epoch 21, batch 700, loss[loss=0.194, simple_loss=0.2878, pruned_loss=0.05012, over 8329.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2898, pruned_loss=0.06306, over 1568152.64 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:17,771 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.443e+02 3.111e+02 4.032e+02 8.821e+02, threshold=6.222e+02, percent-clipped=5.0 +2023-02-07 01:41:34,562 INFO [train.py:901] (2/4) Epoch 21, batch 750, loss[loss=0.2691, simple_loss=0.334, pruned_loss=0.1021, over 7338.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2902, pruned_loss=0.06348, over 1575547.23 frames. ], batch size: 72, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:45,486 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 01:41:49,073 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1908, 3.8538, 2.5360, 2.8223, 3.0757, 2.3371, 2.9746, 3.1294], + device='cuda:2'), covar=tensor([0.1613, 0.0400, 0.1160, 0.0773, 0.0702, 0.1303, 0.1094, 0.1031], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0236, 0.0334, 0.0308, 0.0300, 0.0336, 0.0347, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 01:41:54,320 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 01:41:56,380 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:42:11,055 INFO [train.py:901] (2/4) Epoch 21, batch 800, loss[loss=0.1989, simple_loss=0.2863, pruned_loss=0.05568, over 8237.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2902, pruned_loss=0.06316, over 1587921.30 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:42:29,948 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.455e+02 2.861e+02 3.570e+02 7.084e+02, threshold=5.721e+02, percent-clipped=3.0 +2023-02-07 01:42:47,169 INFO [train.py:901] (2/4) Epoch 21, batch 850, loss[loss=0.1959, simple_loss=0.2757, pruned_loss=0.05806, over 8297.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2898, pruned_loss=0.06327, over 1593784.91 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:01,484 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:16,266 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:18,607 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-07 01:43:21,103 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:24,449 INFO [train.py:901] (2/4) Epoch 21, batch 900, loss[loss=0.2159, simple_loss=0.2976, pruned_loss=0.06709, over 8335.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2896, pruned_loss=0.06343, over 1594483.34 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:34,390 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:38,964 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.53 vs. limit=5.0 +2023-02-07 01:43:42,630 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.319e+02 2.838e+02 3.637e+02 1.203e+03, threshold=5.677e+02, percent-clipped=5.0 +2023-02-07 01:43:49,237 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:44:00,580 INFO [train.py:901] (2/4) Epoch 21, batch 950, loss[loss=0.1974, simple_loss=0.2897, pruned_loss=0.05252, over 8862.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2897, pruned_loss=0.06311, over 1600157.17 frames. ], batch size: 40, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:14,210 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 01:44:35,848 INFO [train.py:901] (2/4) Epoch 21, batch 1000, loss[loss=0.1994, simple_loss=0.2816, pruned_loss=0.05861, over 7932.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2898, pruned_loss=0.06296, over 1599568.32 frames. ], batch size: 20, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:48,971 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 01:44:55,206 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.438e+02 2.954e+02 4.014e+02 9.557e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 01:45:01,390 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 01:45:11,703 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:45:12,207 INFO [train.py:901] (2/4) Epoch 21, batch 1050, loss[loss=0.2096, simple_loss=0.2985, pruned_loss=0.06034, over 8436.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2894, pruned_loss=0.06251, over 1601781.49 frames. ], batch size: 27, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:45:17,835 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7046, 1.7426, 2.3332, 1.5118, 1.4244, 2.2920, 0.4466, 1.4433], + device='cuda:2'), covar=tensor([0.1779, 0.1347, 0.0315, 0.1317, 0.2419, 0.0418, 0.2173, 0.1388], + device='cuda:2'), in_proj_covar=tensor([0.0187, 0.0193, 0.0125, 0.0219, 0.0269, 0.0132, 0.0168, 0.0188], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 01:45:46,483 INFO [train.py:901] (2/4) Epoch 21, batch 1100, loss[loss=0.1832, simple_loss=0.261, pruned_loss=0.05268, over 7811.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2895, pruned_loss=0.06251, over 1606562.82 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:06,018 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.501e+02 3.059e+02 3.494e+02 1.150e+03, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 01:46:14,526 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 01:46:23,789 INFO [train.py:901] (2/4) Epoch 21, batch 1150, loss[loss=0.2074, simple_loss=0.2925, pruned_loss=0.06119, over 8466.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2893, pruned_loss=0.06233, over 1606998.51 frames. ], batch size: 25, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:24,671 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:42,980 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:50,072 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162845.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:54,517 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:46:59,716 INFO [train.py:901] (2/4) Epoch 21, batch 1200, loss[loss=0.1844, simple_loss=0.2661, pruned_loss=0.0514, over 8244.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2894, pruned_loss=0.0622, over 1607893.94 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:47:09,540 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:47:17,443 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.368e+02 3.051e+02 3.779e+02 6.869e+02, threshold=6.103e+02, percent-clipped=3.0 +2023-02-07 01:47:36,397 INFO [train.py:901] (2/4) Epoch 21, batch 1250, loss[loss=0.2517, simple_loss=0.3382, pruned_loss=0.08264, over 8478.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.289, pruned_loss=0.06212, over 1613687.80 frames. ], batch size: 29, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:47:58,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9528, 2.0700, 1.8134, 2.4529, 1.2674, 1.6582, 1.9706, 2.0891], + device='cuda:2'), covar=tensor([0.0660, 0.0721, 0.0905, 0.0499, 0.1059, 0.1156, 0.0738, 0.0706], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0195, 0.0243, 0.0211, 0.0203, 0.0244, 0.0247, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 01:48:11,274 INFO [train.py:901] (2/4) Epoch 21, batch 1300, loss[loss=0.1656, simple_loss=0.2358, pruned_loss=0.04766, over 7712.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2887, pruned_loss=0.06226, over 1612770.04 frames. ], batch size: 18, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:14,766 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:28,476 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.260e+02 2.727e+02 3.317e+02 5.773e+02, threshold=5.453e+02, percent-clipped=0.0 +2023-02-07 01:48:30,745 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:31,449 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:44,793 INFO [train.py:901] (2/4) Epoch 21, batch 1350, loss[loss=0.1732, simple_loss=0.2553, pruned_loss=0.04556, over 7932.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.06315, over 1606014.84 frames. ], batch size: 20, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:49:21,731 INFO [train.py:901] (2/4) Epoch 21, batch 1400, loss[loss=0.2095, simple_loss=0.2916, pruned_loss=0.06375, over 8637.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2895, pruned_loss=0.06325, over 1606395.47 frames. ], batch size: 39, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:49:39,401 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.469e+02 3.010e+02 4.050e+02 1.060e+03, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:49:46,305 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 01:49:55,441 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:49:55,924 INFO [train.py:901] (2/4) Epoch 21, batch 1450, loss[loss=0.2893, simple_loss=0.3439, pruned_loss=0.1173, over 7664.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2908, pruned_loss=0.06409, over 1608546.34 frames. ], batch size: 71, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:01,717 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7613, 1.5269, 3.1779, 1.3690, 2.3155, 3.4112, 3.5257, 2.9432], + device='cuda:2'), covar=tensor([0.1201, 0.1584, 0.0341, 0.2032, 0.0900, 0.0251, 0.0531, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0294, 0.0319, 0.0289, 0.0313, 0.0307, 0.0263, 0.0412, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 01:50:32,151 INFO [train.py:901] (2/4) Epoch 21, batch 1500, loss[loss=0.1758, simple_loss=0.2588, pruned_loss=0.04642, over 7227.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.06388, over 1608296.86 frames. ], batch size: 16, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:50,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.250e+02 2.722e+02 3.392e+02 6.898e+02, threshold=5.444e+02, percent-clipped=4.0 +2023-02-07 01:50:51,635 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.96 vs. limit=5.0 +2023-02-07 01:50:53,290 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163189.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:02,320 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8649, 2.3832, 4.4404, 1.6251, 3.2514, 2.5496, 1.9679, 3.2287], + device='cuda:2'), covar=tensor([0.1916, 0.2833, 0.0822, 0.4801, 0.1688, 0.3106, 0.2457, 0.2185], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0592, 0.0554, 0.0636, 0.0641, 0.0589, 0.0531, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:51:06,797 INFO [train.py:901] (2/4) Epoch 21, batch 1550, loss[loss=0.2076, simple_loss=0.3035, pruned_loss=0.05584, over 8321.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2896, pruned_loss=0.06337, over 1608046.60 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:31,315 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,280 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163258.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,813 INFO [train.py:901] (2/4) Epoch 21, batch 1600, loss[loss=0.198, simple_loss=0.2746, pruned_loss=0.06076, over 7549.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2895, pruned_loss=0.0632, over 1610709.95 frames. ], batch size: 18, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:50,628 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163269.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:56,978 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 01:52:00,875 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.380e+02 3.009e+02 4.081e+02 9.131e+02, threshold=6.018e+02, percent-clipped=6.0 +2023-02-07 01:52:14,555 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:17,715 INFO [train.py:901] (2/4) Epoch 21, batch 1650, loss[loss=0.2215, simple_loss=0.2949, pruned_loss=0.07401, over 7711.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2897, pruned_loss=0.06304, over 1614972.61 frames. ], batch size: 18, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:52:50,771 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1880, 4.1840, 3.7949, 1.9027, 3.6849, 3.8011, 3.7789, 3.5890], + device='cuda:2'), covar=tensor([0.0753, 0.0585, 0.1066, 0.4579, 0.0891, 0.0978, 0.1294, 0.0839], + device='cuda:2'), in_proj_covar=tensor([0.0511, 0.0428, 0.0426, 0.0532, 0.0421, 0.0432, 0.0415, 0.0377], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 01:52:51,355 INFO [train.py:901] (2/4) Epoch 21, batch 1700, loss[loss=0.2004, simple_loss=0.2791, pruned_loss=0.06092, over 8290.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2906, pruned_loss=0.06393, over 1617130.18 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:09,954 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.344e+02 2.897e+02 3.678e+02 1.033e+03, threshold=5.793e+02, percent-clipped=5.0 +2023-02-07 01:53:27,413 INFO [train.py:901] (2/4) Epoch 21, batch 1750, loss[loss=0.1812, simple_loss=0.2677, pruned_loss=0.04733, over 8142.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06396, over 1615545.85 frames. ], batch size: 22, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:39,432 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3811, 2.6397, 3.0935, 1.8179, 3.3605, 2.0492, 1.6592, 2.2410], + device='cuda:2'), covar=tensor([0.0696, 0.0321, 0.0226, 0.0658, 0.0403, 0.0786, 0.0743, 0.0482], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0386, 0.0336, 0.0439, 0.0369, 0.0531, 0.0387, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2041e-04, 1.0128e-04, 8.8523e-05, 1.1595e-04, 9.7193e-05, 1.5056e-04, + 1.0471e-04, 1.1038e-04], device='cuda:2') +2023-02-07 01:53:49,137 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.46 vs. limit=5.0 +2023-02-07 01:53:56,338 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:54:01,060 INFO [train.py:901] (2/4) Epoch 21, batch 1800, loss[loss=0.2156, simple_loss=0.2974, pruned_loss=0.06683, over 8105.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.06317, over 1616067.54 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:18,713 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.661e+02 3.025e+02 4.067e+02 7.408e+02, threshold=6.049e+02, percent-clipped=6.0 +2023-02-07 01:54:37,342 INFO [train.py:901] (2/4) Epoch 21, batch 1850, loss[loss=0.2069, simple_loss=0.281, pruned_loss=0.06641, over 8112.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2889, pruned_loss=0.06308, over 1614287.01 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:53,428 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.72 vs. limit=5.0 +2023-02-07 01:55:07,798 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9369, 1.7083, 1.9219, 1.7944, 1.1654, 1.7448, 2.2904, 2.1927], + device='cuda:2'), covar=tensor([0.0416, 0.1237, 0.1670, 0.1371, 0.0625, 0.1440, 0.0613, 0.0581], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0162, 0.0113, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 01:55:11,340 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 01:55:11,696 INFO [train.py:901] (2/4) Epoch 21, batch 1900, loss[loss=0.1791, simple_loss=0.263, pruned_loss=0.04759, over 7972.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.288, pruned_loss=0.06236, over 1619236.91 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:12,601 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:17,242 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:26,433 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 01:55:28,236 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 01:55:29,011 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.410e+02 2.798e+02 3.588e+02 7.290e+02, threshold=5.595e+02, percent-clipped=1.0 +2023-02-07 01:55:29,212 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:37,705 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 01:55:40,618 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163602.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:45,159 INFO [train.py:901] (2/4) Epoch 21, batch 1950, loss[loss=0.2238, simple_loss=0.3138, pruned_loss=0.06687, over 8458.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2891, pruned_loss=0.06226, over 1627438.02 frames. ], batch size: 27, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:58,503 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 01:56:21,640 INFO [train.py:901] (2/4) Epoch 21, batch 2000, loss[loss=0.2048, simple_loss=0.2823, pruned_loss=0.06364, over 8086.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2889, pruned_loss=0.06192, over 1627262.25 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:56:39,058 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.546e+02 3.013e+02 3.975e+02 6.874e+02, threshold=6.025e+02, percent-clipped=4.0 +2023-02-07 01:56:44,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7143, 1.6323, 2.8228, 1.3182, 2.1454, 3.0349, 3.1614, 2.5963], + device='cuda:2'), covar=tensor([0.1200, 0.1493, 0.0400, 0.2102, 0.1008, 0.0291, 0.0598, 0.0593], + device='cuda:2'), in_proj_covar=tensor([0.0293, 0.0319, 0.0287, 0.0312, 0.0306, 0.0260, 0.0411, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 01:56:55,198 INFO [train.py:901] (2/4) Epoch 21, batch 2050, loss[loss=0.2151, simple_loss=0.2976, pruned_loss=0.0663, over 8111.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2892, pruned_loss=0.06246, over 1624910.81 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:00,609 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:57:30,300 INFO [train.py:901] (2/4) Epoch 21, batch 2100, loss[loss=0.2106, simple_loss=0.2879, pruned_loss=0.06664, over 8466.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2887, pruned_loss=0.06251, over 1619361.11 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:48,244 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1875, 2.0218, 2.7398, 2.2270, 2.6775, 2.2779, 2.0671, 1.5617], + device='cuda:2'), covar=tensor([0.5653, 0.5041, 0.1917, 0.3989, 0.2665, 0.3091, 0.1985, 0.5601], + device='cuda:2'), in_proj_covar=tensor([0.0942, 0.0974, 0.0798, 0.0935, 0.0994, 0.0887, 0.0743, 0.0824], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 01:57:48,642 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.575e+02 2.946e+02 3.630e+02 8.805e+02, threshold=5.893e+02, percent-clipped=3.0 +2023-02-07 01:58:04,870 INFO [train.py:901] (2/4) Epoch 21, batch 2150, loss[loss=0.2111, simple_loss=0.2898, pruned_loss=0.06625, over 7922.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.06286, over 1619441.84 frames. ], batch size: 20, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:14,694 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:31,302 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:39,334 INFO [train.py:901] (2/4) Epoch 21, batch 2200, loss[loss=0.1819, simple_loss=0.2738, pruned_loss=0.04501, over 8324.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.289, pruned_loss=0.06297, over 1618532.70 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:49,741 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8828, 1.7432, 2.4862, 1.6555, 1.4227, 2.4142, 0.3614, 1.4883], + device='cuda:2'), covar=tensor([0.1432, 0.1426, 0.0301, 0.1122, 0.2883, 0.0389, 0.2315, 0.1355], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0195, 0.0126, 0.0221, 0.0271, 0.0132, 0.0168, 0.0189], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 01:58:53,008 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0405, 2.4884, 3.7160, 2.1366, 2.0916, 3.6261, 0.8912, 2.2910], + device='cuda:2'), covar=tensor([0.1269, 0.1334, 0.0212, 0.1623, 0.2451, 0.0343, 0.2221, 0.1461], + device='cuda:2'), in_proj_covar=tensor([0.0186, 0.0195, 0.0126, 0.0221, 0.0270, 0.0133, 0.0168, 0.0189], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 01:58:58,248 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.475e+02 2.987e+02 3.670e+02 7.762e+02, threshold=5.973e+02, percent-clipped=3.0 +2023-02-07 01:59:15,124 INFO [train.py:901] (2/4) Epoch 21, batch 2250, loss[loss=0.2152, simple_loss=0.3031, pruned_loss=0.06358, over 8465.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2893, pruned_loss=0.06292, over 1622272.48 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:49,147 INFO [train.py:901] (2/4) Epoch 21, batch 2300, loss[loss=0.2082, simple_loss=0.2805, pruned_loss=0.06794, over 7820.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2894, pruned_loss=0.06319, over 1619603.03 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:58,910 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163973.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:08,073 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.361e+02 2.889e+02 3.736e+02 8.411e+02, threshold=5.778e+02, percent-clipped=4.0 +2023-02-07 02:00:17,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:26,190 INFO [train.py:901] (2/4) Epoch 21, batch 2350, loss[loss=0.2085, simple_loss=0.2861, pruned_loss=0.06544, over 8721.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2885, pruned_loss=0.06266, over 1619646.10 frames. ], batch size: 49, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:00:35,300 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6823, 4.7052, 4.1646, 2.3925, 4.1746, 4.3668, 4.1890, 4.2489], + device='cuda:2'), covar=tensor([0.0721, 0.0504, 0.1125, 0.4669, 0.0861, 0.1113, 0.1402, 0.0768], + device='cuda:2'), in_proj_covar=tensor([0.0516, 0.0433, 0.0434, 0.0538, 0.0426, 0.0440, 0.0421, 0.0382], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:01:01,226 INFO [train.py:901] (2/4) Epoch 21, batch 2400, loss[loss=0.2231, simple_loss=0.2971, pruned_loss=0.07456, over 8134.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2886, pruned_loss=0.06288, over 1618457.10 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:19,286 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 2.419e+02 2.926e+02 3.800e+02 6.132e+02, threshold=5.852e+02, percent-clipped=4.0 +2023-02-07 02:01:37,286 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:01:37,736 INFO [train.py:901] (2/4) Epoch 21, batch 2450, loss[loss=0.2298, simple_loss=0.3073, pruned_loss=0.07614, over 8558.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2888, pruned_loss=0.06292, over 1620744.59 frames. ], batch size: 31, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:02:12,735 INFO [train.py:901] (2/4) Epoch 21, batch 2500, loss[loss=0.1963, simple_loss=0.2825, pruned_loss=0.05507, over 7803.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.288, pruned_loss=0.06229, over 1621909.76 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:22,153 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:02:30,792 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.421e+02 3.174e+02 4.025e+02 1.090e+03, threshold=6.349e+02, percent-clipped=9.0 +2023-02-07 02:02:46,232 INFO [train.py:901] (2/4) Epoch 21, batch 2550, loss[loss=0.196, simple_loss=0.2813, pruned_loss=0.05538, over 7656.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.06243, over 1626742.05 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:55,032 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1800, 2.3180, 1.8231, 2.8148, 1.2952, 1.6524, 1.9658, 2.2734], + device='cuda:2'), covar=tensor([0.0683, 0.0716, 0.0943, 0.0377, 0.1173, 0.1313, 0.0913, 0.0801], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0212, 0.0204, 0.0244, 0.0250, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 02:03:22,649 INFO [train.py:901] (2/4) Epoch 21, batch 2600, loss[loss=0.2238, simple_loss=0.304, pruned_loss=0.07185, over 8341.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.06235, over 1625616.75 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:03:40,890 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.272e+02 2.670e+02 3.622e+02 6.852e+02, threshold=5.341e+02, percent-clipped=1.0 +2023-02-07 02:03:56,816 INFO [train.py:901] (2/4) Epoch 21, batch 2650, loss[loss=0.209, simple_loss=0.3014, pruned_loss=0.05825, over 8299.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2892, pruned_loss=0.0625, over 1625431.91 frames. ], batch size: 23, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:33,157 INFO [train.py:901] (2/4) Epoch 21, batch 2700, loss[loss=0.2147, simple_loss=0.2817, pruned_loss=0.07381, over 7794.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.0621, over 1621441.63 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:46,950 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:04:52,078 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.228e+02 2.697e+02 3.361e+02 7.045e+02, threshold=5.394e+02, percent-clipped=4.0 +2023-02-07 02:05:07,796 INFO [train.py:901] (2/4) Epoch 21, batch 2750, loss[loss=0.2041, simple_loss=0.2954, pruned_loss=0.05637, over 8774.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.06162, over 1619709.01 frames. ], batch size: 30, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:05:36,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:05:42,224 INFO [train.py:901] (2/4) Epoch 21, batch 2800, loss[loss=0.1841, simple_loss=0.272, pruned_loss=0.04815, over 7812.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2884, pruned_loss=0.06202, over 1621370.40 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:02,580 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.305e+02 2.813e+02 3.760e+02 7.507e+02, threshold=5.625e+02, percent-clipped=3.0 +2023-02-07 02:06:18,052 INFO [train.py:901] (2/4) Epoch 21, batch 2850, loss[loss=0.206, simple_loss=0.2918, pruned_loss=0.06008, over 8496.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2878, pruned_loss=0.06205, over 1619411.72 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:23,426 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:06:51,364 INFO [train.py:901] (2/4) Epoch 21, batch 2900, loss[loss=0.2209, simple_loss=0.3046, pruned_loss=0.06853, over 8475.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06186, over 1617655.30 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:56,997 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:09,755 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 02:07:11,685 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.599e+02 3.265e+02 4.069e+02 1.074e+03, threshold=6.531e+02, percent-clipped=8.0 +2023-02-07 02:07:11,862 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:19,712 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:28,727 INFO [train.py:901] (2/4) Epoch 21, batch 2950, loss[loss=0.2256, simple_loss=0.3162, pruned_loss=0.06749, over 8489.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2881, pruned_loss=0.06147, over 1617892.81 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:07:44,449 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:02,296 INFO [train.py:901] (2/4) Epoch 21, batch 3000, loss[loss=0.1933, simple_loss=0.2852, pruned_loss=0.05071, over 8474.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2883, pruned_loss=0.06196, over 1616139.48 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:02,296 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 02:08:15,066 INFO [train.py:935] (2/4) Epoch 21, validation: loss=0.1742, simple_loss=0.2744, pruned_loss=0.03706, over 944034.00 frames. +2023-02-07 02:08:15,067 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 02:08:26,763 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164676.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:33,568 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.380e+02 2.886e+02 3.399e+02 6.002e+02, threshold=5.772e+02, percent-clipped=0.0 +2023-02-07 02:08:46,930 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 02:08:49,843 INFO [train.py:901] (2/4) Epoch 21, batch 3050, loss[loss=0.211, simple_loss=0.2927, pruned_loss=0.06466, over 8257.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2886, pruned_loss=0.06249, over 1612808.52 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:59,357 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:08,316 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.4455, 5.5674, 4.8211, 2.3653, 4.8654, 5.2364, 5.2479, 5.0528], + device='cuda:2'), covar=tensor([0.0634, 0.0396, 0.0924, 0.4436, 0.0727, 0.0785, 0.0913, 0.0670], + device='cuda:2'), in_proj_covar=tensor([0.0512, 0.0428, 0.0428, 0.0529, 0.0420, 0.0432, 0.0411, 0.0377], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:09:25,484 INFO [train.py:901] (2/4) Epoch 21, batch 3100, loss[loss=0.1891, simple_loss=0.2688, pruned_loss=0.05469, over 7937.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2898, pruned_loss=0.0629, over 1615862.65 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:09:29,031 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164764.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:38,555 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4953, 2.5246, 1.7804, 2.2295, 2.1741, 1.4701, 2.0386, 2.0698], + device='cuda:2'), covar=tensor([0.1746, 0.0476, 0.1315, 0.0636, 0.0796, 0.1685, 0.1123, 0.1040], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0234, 0.0333, 0.0306, 0.0296, 0.0331, 0.0341, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:09:39,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9178, 6.1047, 5.3509, 2.5021, 5.4496, 5.7859, 5.5524, 5.4800], + device='cuda:2'), covar=tensor([0.0498, 0.0345, 0.0802, 0.4497, 0.0637, 0.0623, 0.1030, 0.0504], + device='cuda:2'), in_proj_covar=tensor([0.0509, 0.0426, 0.0426, 0.0526, 0.0417, 0.0430, 0.0409, 0.0376], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:09:43,637 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.375e+02 2.980e+02 3.572e+02 8.800e+02, threshold=5.960e+02, percent-clipped=5.0 +2023-02-07 02:09:59,117 INFO [train.py:901] (2/4) Epoch 21, batch 3150, loss[loss=0.2059, simple_loss=0.2754, pruned_loss=0.06816, over 7539.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2878, pruned_loss=0.06223, over 1610069.52 frames. ], batch size: 18, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:10:08,646 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:12,638 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8793, 1.6949, 3.0202, 1.4897, 2.3701, 3.2932, 3.3605, 2.8519], + device='cuda:2'), covar=tensor([0.1129, 0.1560, 0.0397, 0.2126, 0.0958, 0.0265, 0.0631, 0.0569], + device='cuda:2'), in_proj_covar=tensor([0.0290, 0.0320, 0.0288, 0.0314, 0.0304, 0.0261, 0.0410, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:10:19,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:26,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:34,948 INFO [train.py:901] (2/4) Epoch 21, batch 3200, loss[loss=0.2853, simple_loss=0.3359, pruned_loss=0.1173, over 6867.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2882, pruned_loss=0.06242, over 1608866.98 frames. ], batch size: 71, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:10:54,110 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.324e+02 2.650e+02 3.384e+02 7.808e+02, threshold=5.299e+02, percent-clipped=1.0 +2023-02-07 02:10:55,687 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:09,477 INFO [train.py:901] (2/4) Epoch 21, batch 3250, loss[loss=0.184, simple_loss=0.2791, pruned_loss=0.04443, over 8466.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2886, pruned_loss=0.06231, over 1608903.78 frames. ], batch size: 25, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:11:12,441 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:23,959 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:30,775 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:44,788 INFO [train.py:901] (2/4) Epoch 21, batch 3300, loss[loss=0.17, simple_loss=0.2648, pruned_loss=0.03759, over 8030.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06228, over 1611947.84 frames. ], batch size: 22, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:05,208 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.295e+02 2.742e+02 3.217e+02 7.829e+02, threshold=5.483e+02, percent-clipped=4.0 +2023-02-07 02:12:20,627 INFO [train.py:901] (2/4) Epoch 21, batch 3350, loss[loss=0.2237, simple_loss=0.3124, pruned_loss=0.06756, over 8245.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2888, pruned_loss=0.0624, over 1611849.41 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:28,063 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165020.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:45,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:52,348 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:54,954 INFO [train.py:901] (2/4) Epoch 21, batch 3400, loss[loss=0.2164, simple_loss=0.3017, pruned_loss=0.06549, over 8393.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06187, over 1613136.47 frames. ], batch size: 49, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:59,414 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.37 vs. limit=5.0 +2023-02-07 02:13:15,612 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 2.300e+02 2.821e+02 3.884e+02 1.046e+03, threshold=5.643e+02, percent-clipped=8.0 +2023-02-07 02:13:20,665 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,317 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,892 INFO [train.py:901] (2/4) Epoch 21, batch 3450, loss[loss=0.2462, simple_loss=0.3333, pruned_loss=0.07953, over 8565.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06224, over 1617918.97 frames. ], batch size: 39, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:38,108 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:49,305 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165135.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:53,301 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:14:05,181 INFO [train.py:901] (2/4) Epoch 21, batch 3500, loss[loss=0.2149, simple_loss=0.2931, pruned_loss=0.06836, over 7805.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2901, pruned_loss=0.06354, over 1617669.23 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:07,492 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.22 vs. limit=5.0 +2023-02-07 02:14:10,626 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 02:14:19,675 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6142, 1.9552, 2.1163, 1.3217, 2.1066, 1.4208, 0.6992, 1.8431], + device='cuda:2'), covar=tensor([0.0868, 0.0410, 0.0326, 0.0725, 0.0580, 0.1051, 0.1073, 0.0395], + device='cuda:2'), in_proj_covar=tensor([0.0449, 0.0384, 0.0339, 0.0440, 0.0372, 0.0528, 0.0386, 0.0414], + device='cuda:2'), out_proj_covar=tensor([1.2068e-04, 1.0065e-04, 8.9298e-05, 1.1636e-04, 9.8080e-05, 1.4961e-04, + 1.0436e-04, 1.1000e-04], device='cuda:2') +2023-02-07 02:14:24,648 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.436e+02 2.745e+02 3.695e+02 8.606e+02, threshold=5.490e+02, percent-clipped=3.0 +2023-02-07 02:14:31,313 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 02:14:41,284 INFO [train.py:901] (2/4) Epoch 21, batch 3550, loss[loss=0.2275, simple_loss=0.2866, pruned_loss=0.08414, over 7927.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2897, pruned_loss=0.06269, over 1620539.61 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:51,650 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:15,601 INFO [train.py:901] (2/4) Epoch 21, batch 3600, loss[loss=0.1928, simple_loss=0.2743, pruned_loss=0.05569, over 7968.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2896, pruned_loss=0.06293, over 1620866.52 frames. ], batch size: 21, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:34,163 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.347e+02 2.942e+02 3.699e+02 7.087e+02, threshold=5.884e+02, percent-clipped=2.0 +2023-02-07 02:15:36,933 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1451, 1.1972, 1.4462, 1.1088, 0.7146, 1.2257, 1.0599, 0.8531], + device='cuda:2'), covar=tensor([0.0628, 0.1191, 0.1494, 0.1373, 0.0583, 0.1386, 0.0717, 0.0714], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0158, 0.0099, 0.0161, 0.0112, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 02:15:44,469 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:51,140 INFO [train.py:901] (2/4) Epoch 21, batch 3650, loss[loss=0.2314, simple_loss=0.3116, pruned_loss=0.07557, over 7935.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2872, pruned_loss=0.06186, over 1617721.86 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:52,698 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:53,563 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 02:15:56,696 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7998, 2.2126, 1.6757, 2.7602, 1.3046, 1.5499, 1.9047, 2.1448], + device='cuda:2'), covar=tensor([0.0791, 0.0727, 0.1007, 0.0372, 0.1194, 0.1349, 0.1005, 0.0742], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0205, 0.0246, 0.0250, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 02:16:03,316 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:08,100 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8156, 2.4387, 4.3779, 1.6796, 3.3553, 2.3999, 2.0467, 3.1950], + device='cuda:2'), covar=tensor([0.1945, 0.2528, 0.0823, 0.4390, 0.1554, 0.3118, 0.2191, 0.2229], + device='cuda:2'), in_proj_covar=tensor([0.0523, 0.0596, 0.0557, 0.0639, 0.0645, 0.0592, 0.0535, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:16:10,826 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:16,761 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:16:25,986 INFO [train.py:901] (2/4) Epoch 21, batch 3700, loss[loss=0.2545, simple_loss=0.3327, pruned_loss=0.0882, over 8327.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2879, pruned_loss=0.06227, over 1614976.29 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:16:44,014 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.402e+02 2.885e+02 3.854e+02 8.848e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 02:16:47,619 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165391.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:59,579 INFO [train.py:901] (2/4) Epoch 21, batch 3750, loss[loss=0.2002, simple_loss=0.278, pruned_loss=0.06117, over 8091.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2886, pruned_loss=0.06277, over 1609716.74 frames. ], batch size: 21, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:04,500 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:16,250 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3612, 2.8475, 2.3420, 3.9498, 1.7047, 2.2747, 2.3077, 2.8679], + device='cuda:2'), covar=tensor([0.0659, 0.0709, 0.0834, 0.0217, 0.1092, 0.1092, 0.1041, 0.0797], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0205, 0.0246, 0.0251, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 02:17:36,650 INFO [train.py:901] (2/4) Epoch 21, batch 3800, loss[loss=0.1875, simple_loss=0.2664, pruned_loss=0.0543, over 7552.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2886, pruned_loss=0.06313, over 1607042.61 frames. ], batch size: 18, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:49,673 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9530, 1.5577, 1.7311, 1.3794, 0.9129, 1.4752, 1.7942, 1.6294], + device='cuda:2'), covar=tensor([0.0544, 0.1242, 0.1612, 0.1477, 0.0612, 0.1525, 0.0688, 0.0637], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0160, 0.0099, 0.0162, 0.0113, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 02:17:50,402 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,319 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,906 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.401e+02 2.925e+02 3.673e+02 6.793e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-07 02:17:59,010 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9704, 1.5263, 4.4564, 2.0112, 2.5793, 5.1125, 5.1629, 4.3895], + device='cuda:2'), covar=tensor([0.1285, 0.1911, 0.0271, 0.1922, 0.1094, 0.0158, 0.0429, 0.0578], + device='cuda:2'), in_proj_covar=tensor([0.0292, 0.0321, 0.0287, 0.0315, 0.0308, 0.0263, 0.0412, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:18:07,159 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:18:10,359 INFO [train.py:901] (2/4) Epoch 21, batch 3850, loss[loss=0.2028, simple_loss=0.2892, pruned_loss=0.0582, over 8458.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2884, pruned_loss=0.06268, over 1607881.26 frames. ], batch size: 25, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:18,565 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 02:18:31,966 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1524, 3.8856, 2.7277, 3.0208, 3.1919, 2.4157, 3.0900, 3.1118], + device='cuda:2'), covar=tensor([0.1695, 0.0332, 0.1051, 0.0814, 0.0678, 0.1351, 0.0985, 0.1085], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0235, 0.0334, 0.0310, 0.0298, 0.0336, 0.0345, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:18:46,107 INFO [train.py:901] (2/4) Epoch 21, batch 3900, loss[loss=0.1945, simple_loss=0.2791, pruned_loss=0.05492, over 8547.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2886, pruned_loss=0.06293, over 1608932.07 frames. ], batch size: 31, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:53,053 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:05,131 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.182e+02 2.809e+02 3.459e+02 6.713e+02, threshold=5.619e+02, percent-clipped=4.0 +2023-02-07 02:19:14,939 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:20,726 INFO [train.py:901] (2/4) Epoch 21, batch 3950, loss[loss=0.2504, simple_loss=0.3244, pruned_loss=0.08822, over 8133.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2891, pruned_loss=0.06303, over 1611957.61 frames. ], batch size: 22, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:19:55,186 INFO [train.py:901] (2/4) Epoch 21, batch 4000, loss[loss=0.2243, simple_loss=0.3206, pruned_loss=0.06397, over 8248.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2901, pruned_loss=0.06344, over 1611431.37 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:15,739 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.370e+02 2.936e+02 3.785e+02 6.204e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-07 02:20:31,248 INFO [train.py:901] (2/4) Epoch 21, batch 4050, loss[loss=0.1608, simple_loss=0.2515, pruned_loss=0.03505, over 7646.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2897, pruned_loss=0.06297, over 1613653.73 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:47,659 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 02:21:03,807 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 02:21:04,761 INFO [train.py:901] (2/4) Epoch 21, batch 4100, loss[loss=0.171, simple_loss=0.2638, pruned_loss=0.03916, over 8291.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06221, over 1607704.90 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:11,130 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:21:24,843 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.510e+02 3.105e+02 3.860e+02 6.931e+02, threshold=6.209e+02, percent-clipped=6.0 +2023-02-07 02:21:41,898 INFO [train.py:901] (2/4) Epoch 21, batch 4150, loss[loss=0.2402, simple_loss=0.3145, pruned_loss=0.08299, over 8765.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2891, pruned_loss=0.06287, over 1612290.97 frames. ], batch size: 30, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:48,855 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6716, 1.5887, 2.2857, 1.5742, 1.2362, 2.1852, 0.4557, 1.3436], + device='cuda:2'), covar=tensor([0.1880, 0.1498, 0.0339, 0.1138, 0.2903, 0.0392, 0.2283, 0.1395], + device='cuda:2'), in_proj_covar=tensor([0.0188, 0.0195, 0.0128, 0.0221, 0.0272, 0.0135, 0.0171, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 02:21:50,124 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165821.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:13,881 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165856.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:15,094 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 02:22:15,767 INFO [train.py:901] (2/4) Epoch 21, batch 4200, loss[loss=0.1688, simple_loss=0.2483, pruned_loss=0.04466, over 7788.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2883, pruned_loss=0.06197, over 1615829.42 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:30,503 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:33,700 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.320e+02 2.907e+02 3.705e+02 7.802e+02, threshold=5.814e+02, percent-clipped=2.0 +2023-02-07 02:22:37,068 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 02:22:50,803 INFO [train.py:901] (2/4) Epoch 21, batch 4250, loss[loss=0.2027, simple_loss=0.2919, pruned_loss=0.05671, over 8466.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06187, over 1617822.44 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:53,703 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:23:14,527 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5363, 2.0140, 3.1892, 1.4163, 2.3595, 1.9050, 1.7349, 2.3655], + device='cuda:2'), covar=tensor([0.1927, 0.2453, 0.0864, 0.4409, 0.1906, 0.3295, 0.2255, 0.2343], + device='cuda:2'), in_proj_covar=tensor([0.0523, 0.0597, 0.0555, 0.0639, 0.0645, 0.0594, 0.0536, 0.0635], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:23:26,544 INFO [train.py:901] (2/4) Epoch 21, batch 4300, loss[loss=0.2391, simple_loss=0.329, pruned_loss=0.07463, over 8520.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2897, pruned_loss=0.06331, over 1617572.03 frames. ], batch size: 28, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:23:44,458 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.284e+02 2.728e+02 3.396e+02 7.954e+02, threshold=5.457e+02, percent-clipped=4.0 +2023-02-07 02:23:50,849 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5074, 1.9955, 3.1896, 1.3762, 2.3277, 1.9584, 1.7372, 2.3726], + device='cuda:2'), covar=tensor([0.1870, 0.2570, 0.0824, 0.4524, 0.1935, 0.3348, 0.2279, 0.2386], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0595, 0.0551, 0.0636, 0.0642, 0.0591, 0.0534, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:24:01,652 INFO [train.py:901] (2/4) Epoch 21, batch 4350, loss[loss=0.2126, simple_loss=0.2967, pruned_loss=0.06425, over 8328.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.06364, over 1614560.40 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:11,727 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 02:24:15,244 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:24:16,542 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6916, 2.0714, 3.2315, 1.5139, 2.4615, 2.1491, 1.7986, 2.5012], + device='cuda:2'), covar=tensor([0.1810, 0.2465, 0.0779, 0.4313, 0.1777, 0.3015, 0.2245, 0.2197], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0593, 0.0551, 0.0635, 0.0641, 0.0590, 0.0533, 0.0631], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:24:36,828 INFO [train.py:901] (2/4) Epoch 21, batch 4400, loss[loss=0.2008, simple_loss=0.2856, pruned_loss=0.05796, over 7965.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2908, pruned_loss=0.06421, over 1614373.34 frames. ], batch size: 21, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:45,737 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166072.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:24:54,405 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 02:24:55,048 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.482e+02 3.095e+02 3.863e+02 7.424e+02, threshold=6.191e+02, percent-clipped=10.0 +2023-02-07 02:25:10,664 INFO [train.py:901] (2/4) Epoch 21, batch 4450, loss[loss=0.2024, simple_loss=0.2816, pruned_loss=0.06158, over 8036.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.0637, over 1615960.85 frames. ], batch size: 22, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:25:12,803 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166112.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:22,694 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5137, 2.7434, 3.2815, 1.8714, 3.4430, 2.2208, 1.6321, 2.2788], + device='cuda:2'), covar=tensor([0.0727, 0.0363, 0.0277, 0.0728, 0.0394, 0.0733, 0.0964, 0.0521], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0387, 0.0341, 0.0441, 0.0374, 0.0533, 0.0391, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2196e-04, 1.0144e-04, 8.9743e-05, 1.1657e-04, 9.8624e-05, 1.5080e-04, + 1.0559e-04, 1.1017e-04], device='cuda:2') +2023-02-07 02:25:27,717 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 02:25:45,423 INFO [train.py:901] (2/4) Epoch 21, batch 4500, loss[loss=0.2078, simple_loss=0.2969, pruned_loss=0.05935, over 8351.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2894, pruned_loss=0.06357, over 1611906.58 frames. ], batch size: 24, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:25:50,219 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 02:25:50,295 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:50,362 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:05,008 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.688e+02 3.191e+02 4.415e+02 1.086e+03, threshold=6.382e+02, percent-clipped=9.0 +2023-02-07 02:26:20,603 INFO [train.py:901] (2/4) Epoch 21, batch 4550, loss[loss=0.2042, simple_loss=0.297, pruned_loss=0.05572, over 8527.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2896, pruned_loss=0.06341, over 1615124.74 frames. ], batch size: 28, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:26:27,579 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9417, 1.6750, 2.0951, 1.8517, 2.0383, 1.9481, 1.7349, 0.8641], + device='cuda:2'), covar=tensor([0.5185, 0.4585, 0.1776, 0.3234, 0.2272, 0.2826, 0.1906, 0.4744], + device='cuda:2'), in_proj_covar=tensor([0.0935, 0.0972, 0.0794, 0.0932, 0.0989, 0.0885, 0.0741, 0.0820], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:26:33,046 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166227.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:54,696 INFO [train.py:901] (2/4) Epoch 21, batch 4600, loss[loss=0.2301, simple_loss=0.298, pruned_loss=0.08108, over 8250.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2898, pruned_loss=0.06365, over 1612895.11 frames. ], batch size: 22, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:10,560 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166280.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:14,133 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166284.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:15,243 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.551e+02 3.310e+02 4.080e+02 7.820e+02, threshold=6.621e+02, percent-clipped=4.0 +2023-02-07 02:27:30,339 INFO [train.py:901] (2/4) Epoch 21, batch 4650, loss[loss=0.1883, simple_loss=0.2629, pruned_loss=0.05687, over 7796.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2895, pruned_loss=0.06349, over 1614083.46 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:30,537 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:41,203 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:28:03,638 INFO [train.py:901] (2/4) Epoch 21, batch 4700, loss[loss=0.2176, simple_loss=0.3073, pruned_loss=0.06395, over 8574.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2897, pruned_loss=0.0633, over 1614024.10 frames. ], batch size: 39, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:28:23,890 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.420e+02 2.373e+02 2.801e+02 3.877e+02 1.145e+03, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 02:28:40,127 INFO [train.py:901] (2/4) Epoch 21, batch 4750, loss[loss=0.1686, simple_loss=0.252, pruned_loss=0.04262, over 7534.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.291, pruned_loss=0.06385, over 1613820.00 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:28:45,011 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166416.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:28:52,945 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 02:28:55,070 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 02:29:14,172 INFO [train.py:901] (2/4) Epoch 21, batch 4800, loss[loss=0.2381, simple_loss=0.2965, pruned_loss=0.08987, over 7803.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.0638, over 1610755.77 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:30,702 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166483.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:33,189 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.407e+02 2.819e+02 3.849e+02 8.316e+02, threshold=5.639e+02, percent-clipped=5.0 +2023-02-07 02:29:39,662 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.82 vs. limit=5.0 +2023-02-07 02:29:42,736 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:43,963 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 02:29:48,838 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166508.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:49,313 INFO [train.py:901] (2/4) Epoch 21, batch 4850, loss[loss=0.1768, simple_loss=0.2499, pruned_loss=0.05186, over 7697.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.29, pruned_loss=0.0634, over 1605861.27 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:49,391 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:03,258 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:05,913 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166531.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:30:09,300 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166536.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:17,939 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:21,057 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.32 vs. limit=5.0 +2023-02-07 02:30:24,634 INFO [train.py:901] (2/4) Epoch 21, batch 4900, loss[loss=0.19, simple_loss=0.2784, pruned_loss=0.0508, over 8026.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2894, pruned_loss=0.06327, over 1604856.30 frames. ], batch size: 22, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:30:26,224 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:43,074 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.429e+02 3.059e+02 4.014e+02 7.599e+02, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 02:30:54,286 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1826, 1.0668, 1.2971, 1.0798, 1.0019, 1.3470, 0.0801, 0.8796], + device='cuda:2'), covar=tensor([0.1640, 0.1444, 0.0514, 0.0810, 0.2780, 0.0543, 0.2235, 0.1337], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0196, 0.0128, 0.0223, 0.0272, 0.0136, 0.0172, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 02:30:58,617 INFO [train.py:901] (2/4) Epoch 21, batch 4950, loss[loss=0.1832, simple_loss=0.2645, pruned_loss=0.05094, over 7231.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2891, pruned_loss=0.06308, over 1607443.72 frames. ], batch size: 16, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:31:09,544 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:34,515 INFO [train.py:901] (2/4) Epoch 21, batch 5000, loss[loss=0.2276, simple_loss=0.2957, pruned_loss=0.07975, over 7807.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2896, pruned_loss=0.0632, over 1609091.76 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:31:41,052 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:52,855 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.262e+02 2.770e+02 3.475e+02 7.586e+02, threshold=5.540e+02, percent-clipped=2.0 +2023-02-07 02:32:07,630 INFO [train.py:901] (2/4) Epoch 21, batch 5050, loss[loss=0.2099, simple_loss=0.2947, pruned_loss=0.06255, over 8245.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2896, pruned_loss=0.0636, over 1606770.23 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:23,042 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 02:32:38,359 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:32:42,937 INFO [train.py:901] (2/4) Epoch 21, batch 5100, loss[loss=0.1937, simple_loss=0.2788, pruned_loss=0.05431, over 8480.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2895, pruned_loss=0.06364, over 1607041.83 frames. ], batch size: 29, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:59,530 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:00,920 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:02,720 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.499e+02 3.045e+02 3.729e+02 1.083e+03, threshold=6.090e+02, percent-clipped=5.0 +2023-02-07 02:33:02,983 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166787.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:33:17,643 INFO [train.py:901] (2/4) Epoch 21, batch 5150, loss[loss=0.2006, simple_loss=0.2802, pruned_loss=0.06047, over 8148.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2899, pruned_loss=0.06371, over 1614714.20 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:19,983 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166812.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:33:35,604 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0794, 2.3065, 1.8774, 2.7978, 1.3497, 1.6818, 1.8829, 2.3235], + device='cuda:2'), covar=tensor([0.0705, 0.0694, 0.0853, 0.0339, 0.1078, 0.1216, 0.0904, 0.0714], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0194, 0.0241, 0.0210, 0.0203, 0.0241, 0.0249, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 02:33:40,946 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:52,912 INFO [train.py:901] (2/4) Epoch 21, batch 5200, loss[loss=0.2063, simple_loss=0.2842, pruned_loss=0.0642, over 7926.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.06322, over 1615710.29 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:54,692 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.33 vs. limit=5.0 +2023-02-07 02:34:01,085 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:08,928 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:13,412 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.417e+02 2.893e+02 3.464e+02 9.071e+02, threshold=5.787e+02, percent-clipped=3.0 +2023-02-07 02:34:17,473 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166893.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:22,880 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 02:34:25,859 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166905.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:28,354 INFO [train.py:901] (2/4) Epoch 21, batch 5250, loss[loss=0.2085, simple_loss=0.2936, pruned_loss=0.06165, over 7929.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2875, pruned_loss=0.06262, over 1616390.79 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:34:53,833 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:00,990 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:01,468 INFO [train.py:901] (2/4) Epoch 21, batch 5300, loss[loss=0.1934, simple_loss=0.2735, pruned_loss=0.05663, over 8138.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2889, pruned_loss=0.06371, over 1614933.23 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:21,124 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:21,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.349e+02 2.996e+02 3.802e+02 6.845e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-07 02:35:22,370 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2656, 3.6358, 2.4378, 2.9715, 2.8141, 2.2571, 2.9631, 3.1893], + device='cuda:2'), covar=tensor([0.1637, 0.0647, 0.1141, 0.0736, 0.0855, 0.1297, 0.1095, 0.1054], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0235, 0.0335, 0.0308, 0.0298, 0.0334, 0.0344, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:35:37,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:37,979 INFO [train.py:901] (2/4) Epoch 21, batch 5350, loss[loss=0.2085, simple_loss=0.3015, pruned_loss=0.05772, over 8484.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2891, pruned_loss=0.06392, over 1612103.71 frames. ], batch size: 28, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:58,871 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167040.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:12,081 INFO [train.py:901] (2/4) Epoch 21, batch 5400, loss[loss=0.184, simple_loss=0.2625, pruned_loss=0.0527, over 7641.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2886, pruned_loss=0.06366, over 1609037.17 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:16,473 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167065.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:32,172 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.459e+02 3.013e+02 3.547e+02 6.118e+02, threshold=6.026e+02, percent-clipped=1.0 +2023-02-07 02:36:39,934 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:47,942 INFO [train.py:901] (2/4) Epoch 21, batch 5450, loss[loss=0.2651, simple_loss=0.343, pruned_loss=0.09357, over 8244.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2907, pruned_loss=0.0644, over 1615547.70 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:55,797 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:58,552 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:01,213 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:12,854 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 02:37:19,304 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8747, 2.0167, 5.9898, 2.3388, 5.3949, 5.0756, 5.5589, 5.4530], + device='cuda:2'), covar=tensor([0.0465, 0.4365, 0.0330, 0.3876, 0.0981, 0.0845, 0.0511, 0.0498], + device='cuda:2'), in_proj_covar=tensor([0.0630, 0.0643, 0.0695, 0.0629, 0.0707, 0.0605, 0.0605, 0.0677], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:37:24,066 INFO [train.py:901] (2/4) Epoch 21, batch 5500, loss[loss=0.1917, simple_loss=0.2817, pruned_loss=0.05085, over 8196.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.29, pruned_loss=0.06385, over 1616310.93 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:37:43,662 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.538e+02 3.099e+02 3.967e+02 8.838e+02, threshold=6.197e+02, percent-clipped=3.0 +2023-02-07 02:37:58,454 INFO [train.py:901] (2/4) Epoch 21, batch 5550, loss[loss=0.2112, simple_loss=0.2847, pruned_loss=0.06887, over 8034.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2903, pruned_loss=0.06391, over 1616721.86 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:38:01,331 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:02,595 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:21,135 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:22,454 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:23,134 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167242.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:34,482 INFO [train.py:901] (2/4) Epoch 21, batch 5600, loss[loss=0.1836, simple_loss=0.2521, pruned_loss=0.05759, over 7661.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2898, pruned_loss=0.06382, over 1612998.74 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:38:34,957 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-07 02:38:36,019 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7033, 1.3005, 4.8691, 1.8519, 4.3328, 4.0786, 4.4105, 4.2757], + device='cuda:2'), covar=tensor([0.0500, 0.4833, 0.0442, 0.3928, 0.0948, 0.0821, 0.0542, 0.0573], + device='cuda:2'), in_proj_covar=tensor([0.0629, 0.0637, 0.0691, 0.0626, 0.0701, 0.0602, 0.0601, 0.0672], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:38:38,122 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167264.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:40,084 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167267.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:54,594 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.492e+02 3.097e+02 3.838e+02 7.086e+02, threshold=6.194e+02, percent-clipped=1.0 +2023-02-07 02:38:54,810 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:56,076 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:39:08,035 INFO [train.py:901] (2/4) Epoch 21, batch 5650, loss[loss=0.2166, simple_loss=0.2986, pruned_loss=0.06735, over 8504.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2902, pruned_loss=0.06382, over 1614365.08 frames. ], batch size: 28, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:39:18,773 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 02:39:21,791 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-07 02:39:44,497 INFO [train.py:901] (2/4) Epoch 21, batch 5700, loss[loss=0.2006, simple_loss=0.2827, pruned_loss=0.05921, over 7963.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2895, pruned_loss=0.06322, over 1615620.55 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:39:48,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5336, 1.9231, 2.1403, 1.2938, 2.1864, 1.4334, 0.5832, 1.7232], + device='cuda:2'), covar=tensor([0.0623, 0.0352, 0.0242, 0.0511, 0.0335, 0.0857, 0.0786, 0.0298], + device='cuda:2'), in_proj_covar=tensor([0.0448, 0.0385, 0.0337, 0.0439, 0.0372, 0.0528, 0.0385, 0.0411], + device='cuda:2'), out_proj_covar=tensor([1.2043e-04, 1.0098e-04, 8.8761e-05, 1.1606e-04, 9.8009e-05, 1.4936e-04, + 1.0401e-04, 1.0882e-04], device='cuda:2') +2023-02-07 02:40:04,669 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.585e+02 3.206e+02 3.925e+02 8.506e+02, threshold=6.412e+02, percent-clipped=6.0 +2023-02-07 02:40:16,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:18,411 INFO [train.py:901] (2/4) Epoch 21, batch 5750, loss[loss=0.2904, simple_loss=0.3477, pruned_loss=0.1166, over 7130.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2898, pruned_loss=0.06348, over 1614148.77 frames. ], batch size: 72, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:24,269 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 02:40:47,696 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167450.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:53,628 INFO [train.py:901] (2/4) Epoch 21, batch 5800, loss[loss=0.152, simple_loss=0.2396, pruned_loss=0.03223, over 7958.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2888, pruned_loss=0.06263, over 1616336.20 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:55,796 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:59,179 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167466.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:00,730 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:15,048 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.466e+02 2.953e+02 3.603e+02 7.254e+02, threshold=5.907e+02, percent-clipped=1.0 +2023-02-07 02:41:17,999 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:20,774 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:28,633 INFO [train.py:901] (2/4) Epoch 21, batch 5850, loss[loss=0.1935, simple_loss=0.2695, pruned_loss=0.05873, over 7662.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2891, pruned_loss=0.06248, over 1615945.97 frames. ], batch size: 19, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:41:36,514 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3671, 1.5538, 2.1760, 1.3283, 1.3801, 1.6366, 1.4504, 1.4422], + device='cuda:2'), covar=tensor([0.1941, 0.2474, 0.0851, 0.4216, 0.1989, 0.3336, 0.2311, 0.2260], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0593, 0.0551, 0.0634, 0.0637, 0.0587, 0.0528, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:41:37,829 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4972, 2.6528, 1.8999, 2.2901, 2.0902, 1.7211, 2.0220, 2.2244], + device='cuda:2'), covar=tensor([0.1612, 0.0388, 0.1262, 0.0711, 0.0789, 0.1482, 0.1086, 0.1001], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0234, 0.0333, 0.0306, 0.0298, 0.0333, 0.0343, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:41:37,835 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:03,758 INFO [train.py:901] (2/4) Epoch 21, batch 5900, loss[loss=0.2114, simple_loss=0.3014, pruned_loss=0.06064, over 8275.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2887, pruned_loss=0.06229, over 1618829.13 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:42:16,868 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:19,704 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:22,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7464, 2.0025, 2.1452, 1.5071, 2.1631, 1.4802, 0.7336, 1.8720], + device='cuda:2'), covar=tensor([0.0754, 0.0385, 0.0335, 0.0675, 0.0495, 0.1052, 0.0938, 0.0370], + device='cuda:2'), in_proj_covar=tensor([0.0451, 0.0387, 0.0339, 0.0441, 0.0374, 0.0531, 0.0390, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2118e-04, 1.0146e-04, 8.9180e-05, 1.1651e-04, 9.8491e-05, 1.5011e-04, + 1.0522e-04, 1.0989e-04], device='cuda:2') +2023-02-07 02:42:25,698 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.257e+02 2.859e+02 3.440e+02 7.059e+02, threshold=5.718e+02, percent-clipped=2.0 +2023-02-07 02:42:40,372 INFO [train.py:901] (2/4) Epoch 21, batch 5950, loss[loss=0.2234, simple_loss=0.3065, pruned_loss=0.07018, over 8107.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2884, pruned_loss=0.0625, over 1617908.21 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 4.0 +2023-02-07 02:43:14,051 INFO [train.py:901] (2/4) Epoch 21, batch 6000, loss[loss=0.2063, simple_loss=0.2802, pruned_loss=0.06614, over 7526.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2882, pruned_loss=0.0621, over 1617297.91 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:43:14,052 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 02:43:26,396 INFO [train.py:935] (2/4) Epoch 21, validation: loss=0.174, simple_loss=0.2741, pruned_loss=0.03692, over 944034.00 frames. +2023-02-07 02:43:26,397 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 02:43:28,712 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:45,658 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:47,406 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.382e+02 2.918e+02 3.609e+02 5.587e+02, threshold=5.837e+02, percent-clipped=0.0 +2023-02-07 02:44:01,967 INFO [train.py:901] (2/4) Epoch 21, batch 6050, loss[loss=0.183, simple_loss=0.2745, pruned_loss=0.04578, over 8088.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2878, pruned_loss=0.06204, over 1615280.69 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:06,718 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-07 02:44:22,063 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167737.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:38,027 INFO [train.py:901] (2/4) Epoch 21, batch 6100, loss[loss=0.2363, simple_loss=0.3297, pruned_loss=0.0715, over 8345.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2882, pruned_loss=0.06273, over 1608358.68 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:56,065 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167785.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:57,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 02:44:58,541 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.390e+02 3.045e+02 3.849e+02 6.701e+02, threshold=6.089e+02, percent-clipped=2.0 +2023-02-07 02:45:02,109 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167794.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:13,115 INFO [train.py:901] (2/4) Epoch 21, batch 6150, loss[loss=0.2022, simple_loss=0.28, pruned_loss=0.06219, over 8090.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2884, pruned_loss=0.06309, over 1608188.17 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:30,426 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:33,105 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:44,766 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-07 02:45:48,361 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,817 INFO [train.py:901] (2/4) Epoch 21, batch 6200, loss[loss=0.242, simple_loss=0.3204, pruned_loss=0.08183, over 8338.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2888, pruned_loss=0.06324, over 1608513.77 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:51,065 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:09,469 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.266e+02 2.776e+02 3.727e+02 8.167e+02, threshold=5.552e+02, percent-clipped=4.0 +2023-02-07 02:46:23,394 INFO [train.py:901] (2/4) Epoch 21, batch 6250, loss[loss=0.1818, simple_loss=0.2797, pruned_loss=0.04199, over 8351.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2881, pruned_loss=0.06295, over 1609414.58 frames. ], batch size: 24, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:46:23,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:58,905 INFO [train.py:901] (2/4) Epoch 21, batch 6300, loss[loss=0.1664, simple_loss=0.2398, pruned_loss=0.04651, over 7541.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2875, pruned_loss=0.06253, over 1610009.08 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:47:06,584 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-07 02:47:20,718 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.475e+02 2.869e+02 3.545e+02 9.430e+02, threshold=5.737e+02, percent-clipped=7.0 +2023-02-07 02:47:22,361 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8669, 2.4124, 4.1086, 1.6089, 3.1210, 2.3233, 1.9038, 2.9446], + device='cuda:2'), covar=tensor([0.1877, 0.2552, 0.0789, 0.4542, 0.1808, 0.3125, 0.2288, 0.2407], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0597, 0.0553, 0.0639, 0.0642, 0.0588, 0.0530, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:47:35,234 INFO [train.py:901] (2/4) Epoch 21, batch 6350, loss[loss=0.1848, simple_loss=0.2756, pruned_loss=0.047, over 8026.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2879, pruned_loss=0.06255, over 1608910.48 frames. ], batch size: 22, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:02,053 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9380, 2.0108, 6.0684, 2.3401, 5.4947, 5.1191, 5.5952, 5.4847], + device='cuda:2'), covar=tensor([0.0396, 0.4277, 0.0328, 0.3762, 0.0862, 0.0776, 0.0448, 0.0463], + device='cuda:2'), in_proj_covar=tensor([0.0627, 0.0639, 0.0693, 0.0628, 0.0705, 0.0603, 0.0606, 0.0677], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:48:06,024 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:09,816 INFO [train.py:901] (2/4) Epoch 21, batch 6400, loss[loss=0.184, simple_loss=0.2638, pruned_loss=0.05211, over 7926.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2875, pruned_loss=0.06217, over 1609923.02 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:25,123 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168081.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:30,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.236e+02 2.639e+02 3.603e+02 6.999e+02, threshold=5.279e+02, percent-clipped=2.0 +2023-02-07 02:48:45,445 INFO [train.py:901] (2/4) Epoch 21, batch 6450, loss[loss=0.1894, simple_loss=0.2819, pruned_loss=0.04838, over 8198.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2872, pruned_loss=0.06213, over 1610209.03 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:50,429 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1654, 3.7231, 2.3909, 2.8834, 3.0308, 2.0753, 2.9091, 3.0352], + device='cuda:2'), covar=tensor([0.1751, 0.0349, 0.1156, 0.0837, 0.0715, 0.1457, 0.1094, 0.1047], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0235, 0.0335, 0.0308, 0.0300, 0.0336, 0.0345, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:48:59,388 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:19,828 INFO [train.py:901] (2/4) Epoch 21, batch 6500, loss[loss=0.1798, simple_loss=0.2678, pruned_loss=0.04589, over 7806.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.287, pruned_loss=0.0621, over 1609212.75 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:49:24,797 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:26,330 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 02:49:26,879 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4415, 2.2860, 3.2010, 2.6081, 3.0161, 2.4303, 2.1900, 1.9306], + device='cuda:2'), covar=tensor([0.5136, 0.5025, 0.1896, 0.3434, 0.2504, 0.3101, 0.1980, 0.5049], + device='cuda:2'), in_proj_covar=tensor([0.0940, 0.0974, 0.0802, 0.0941, 0.0998, 0.0893, 0.0745, 0.0821], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:49:41,358 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.483e+02 3.129e+02 4.081e+02 1.148e+03, threshold=6.258e+02, percent-clipped=13.0 +2023-02-07 02:49:42,168 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:46,109 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:52,318 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0679, 1.8557, 2.3925, 2.0506, 2.2474, 2.1351, 1.8251, 1.1405], + device='cuda:2'), covar=tensor([0.5519, 0.4306, 0.1704, 0.2988, 0.2339, 0.2768, 0.1982, 0.4441], + device='cuda:2'), in_proj_covar=tensor([0.0939, 0.0974, 0.0801, 0.0941, 0.0998, 0.0892, 0.0745, 0.0821], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:49:54,742 INFO [train.py:901] (2/4) Epoch 21, batch 6550, loss[loss=0.1633, simple_loss=0.2427, pruned_loss=0.04194, over 7816.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2873, pruned_loss=0.06251, over 1611822.11 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:19,983 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:20,482 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 02:50:24,647 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:29,932 INFO [train.py:901] (2/4) Epoch 21, batch 6600, loss[loss=0.2056, simple_loss=0.2858, pruned_loss=0.06274, over 8079.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.287, pruned_loss=0.06207, over 1614167.24 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:38,734 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:50:50,801 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.414e+02 2.830e+02 3.481e+02 7.637e+02, threshold=5.659e+02, percent-clipped=3.0 +2023-02-07 02:51:05,095 INFO [train.py:901] (2/4) Epoch 21, batch 6650, loss[loss=0.2429, simple_loss=0.3282, pruned_loss=0.07883, over 8487.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2865, pruned_loss=0.06202, over 1610386.42 frames. ], batch size: 28, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:51:16,784 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9780, 1.9198, 3.2272, 2.4164, 2.7374, 1.9566, 1.7234, 1.8441], + device='cuda:2'), covar=tensor([0.6662, 0.5801, 0.1770, 0.3907, 0.3298, 0.4212, 0.2862, 0.5219], + device='cuda:2'), in_proj_covar=tensor([0.0932, 0.0967, 0.0797, 0.0935, 0.0992, 0.0887, 0.0741, 0.0817], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:51:29,486 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5071, 2.4711, 1.7989, 2.1874, 2.0417, 1.4908, 1.9819, 2.0219], + device='cuda:2'), covar=tensor([0.1554, 0.0418, 0.1298, 0.0654, 0.0791, 0.1607, 0.1031, 0.0978], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0235, 0.0334, 0.0307, 0.0300, 0.0335, 0.0344, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:51:40,100 INFO [train.py:901] (2/4) Epoch 21, batch 6700, loss[loss=0.1806, simple_loss=0.2625, pruned_loss=0.04929, over 8242.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2873, pruned_loss=0.06189, over 1613395.03 frames. ], batch size: 22, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:51:58,802 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 02:52:00,445 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.306e+02 2.933e+02 3.476e+02 6.537e+02, threshold=5.866e+02, percent-clipped=2.0 +2023-02-07 02:52:05,989 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:14,913 INFO [train.py:901] (2/4) Epoch 21, batch 6750, loss[loss=0.2004, simple_loss=0.2846, pruned_loss=0.0581, over 8193.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06138, over 1611756.62 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:16,706 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:52:32,330 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6827, 2.5938, 1.8343, 2.3246, 2.1722, 1.6037, 2.0323, 2.2170], + device='cuda:2'), covar=tensor([0.1383, 0.0349, 0.1231, 0.0583, 0.0746, 0.1470, 0.0956, 0.0867], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0236, 0.0336, 0.0307, 0.0300, 0.0336, 0.0344, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 02:52:39,060 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7896, 1.5820, 4.9311, 1.8835, 4.4117, 4.1597, 4.4743, 4.3784], + device='cuda:2'), covar=tensor([0.0452, 0.4426, 0.0405, 0.4088, 0.1008, 0.0925, 0.0513, 0.0635], + device='cuda:2'), in_proj_covar=tensor([0.0634, 0.0646, 0.0699, 0.0635, 0.0718, 0.0612, 0.0614, 0.0685], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:52:45,400 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:50,751 INFO [train.py:901] (2/4) Epoch 21, batch 6800, loss[loss=0.2079, simple_loss=0.2956, pruned_loss=0.06013, over 8484.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06146, over 1612681.69 frames. ], batch size: 27, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:58,515 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 02:53:04,338 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:09,208 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 02:53:12,378 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.333e+02 2.834e+02 3.373e+02 7.883e+02, threshold=5.669e+02, percent-clipped=5.0 +2023-02-07 02:53:15,413 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4530, 1.8875, 1.9779, 1.2202, 2.0062, 1.3733, 0.4100, 1.6761], + device='cuda:2'), covar=tensor([0.0593, 0.0348, 0.0253, 0.0547, 0.0389, 0.0924, 0.0915, 0.0282], + device='cuda:2'), in_proj_covar=tensor([0.0453, 0.0389, 0.0341, 0.0441, 0.0375, 0.0533, 0.0390, 0.0416], + device='cuda:2'), out_proj_covar=tensor([1.2177e-04, 1.0202e-04, 8.9875e-05, 1.1635e-04, 9.8628e-05, 1.5061e-04, + 1.0535e-04, 1.1017e-04], device='cuda:2') +2023-02-07 02:53:16,002 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5905, 4.6380, 4.1050, 2.2351, 4.0559, 4.1504, 4.1247, 3.9117], + device='cuda:2'), covar=tensor([0.0691, 0.0475, 0.1076, 0.4044, 0.0927, 0.1043, 0.1211, 0.0874], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0429, 0.0429, 0.0533, 0.0423, 0.0439, 0.0419, 0.0383], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:53:20,350 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168500.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:26,309 INFO [train.py:901] (2/4) Epoch 21, batch 6850, loss[loss=0.1611, simple_loss=0.2383, pruned_loss=0.04196, over 7802.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2878, pruned_loss=0.06149, over 1615173.58 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:53:28,535 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:31,835 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5097, 1.7546, 2.8888, 1.3988, 2.1742, 1.8942, 1.5495, 2.0879], + device='cuda:2'), covar=tensor([0.1918, 0.2623, 0.0932, 0.4430, 0.1808, 0.3049, 0.2345, 0.2223], + device='cuda:2'), in_proj_covar=tensor([0.0519, 0.0594, 0.0550, 0.0635, 0.0636, 0.0585, 0.0528, 0.0625], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:53:37,585 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:45,921 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 02:54:00,738 INFO [train.py:901] (2/4) Epoch 21, batch 6900, loss[loss=0.1664, simple_loss=0.2625, pruned_loss=0.03521, over 7977.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2898, pruned_loss=0.06253, over 1618388.41 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:17,245 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0226, 1.7778, 2.8154, 2.1231, 2.4283, 1.8775, 1.6971, 1.2631], + device='cuda:2'), covar=tensor([0.6723, 0.6378, 0.2088, 0.4268, 0.3299, 0.4600, 0.2916, 0.5695], + device='cuda:2'), in_proj_covar=tensor([0.0940, 0.0974, 0.0802, 0.0940, 0.0999, 0.0893, 0.0745, 0.0824], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:54:22,253 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.867e+02 3.613e+02 6.820e+02, threshold=5.733e+02, percent-clipped=1.0 +2023-02-07 02:54:26,393 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:54:35,737 INFO [train.py:901] (2/4) Epoch 21, batch 6950, loss[loss=0.2464, simple_loss=0.3254, pruned_loss=0.08372, over 8617.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2908, pruned_loss=0.06327, over 1621073.45 frames. ], batch size: 39, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:53,399 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 02:55:10,659 INFO [train.py:901] (2/4) Epoch 21, batch 7000, loss[loss=0.1859, simple_loss=0.2879, pruned_loss=0.04193, over 8327.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2905, pruned_loss=0.06321, over 1617957.03 frames. ], batch size: 26, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:26,413 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 02:55:31,362 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.444e+02 3.041e+02 3.968e+02 8.528e+02, threshold=6.083e+02, percent-clipped=8.0 +2023-02-07 02:55:39,890 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2656, 1.9763, 2.6966, 2.2473, 2.6573, 2.2114, 2.0610, 1.6062], + device='cuda:2'), covar=tensor([0.4732, 0.4569, 0.1766, 0.3201, 0.2136, 0.2851, 0.1882, 0.4565], + device='cuda:2'), in_proj_covar=tensor([0.0938, 0.0972, 0.0801, 0.0940, 0.0995, 0.0891, 0.0744, 0.0822], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 02:55:45,700 INFO [train.py:901] (2/4) Epoch 21, batch 7050, loss[loss=0.2389, simple_loss=0.3131, pruned_loss=0.08235, over 8464.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2902, pruned_loss=0.06325, over 1614325.39 frames. ], batch size: 29, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:46,589 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168710.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:19,948 INFO [train.py:901] (2/4) Epoch 21, batch 7100, loss[loss=0.1953, simple_loss=0.287, pruned_loss=0.05175, over 8245.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2902, pruned_loss=0.06283, over 1616442.83 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:56:26,885 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:40,683 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.605e+02 3.011e+02 3.811e+02 1.077e+03, threshold=6.022e+02, percent-clipped=4.0 +2023-02-07 02:56:43,689 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:44,554 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-07 02:56:55,259 INFO [train.py:901] (2/4) Epoch 21, batch 7150, loss[loss=0.2121, simple_loss=0.2847, pruned_loss=0.06979, over 7653.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2905, pruned_loss=0.0629, over 1618895.98 frames. ], batch size: 19, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:20,050 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.02 vs. limit=5.0 +2023-02-07 02:57:29,818 INFO [train.py:901] (2/4) Epoch 21, batch 7200, loss[loss=0.2547, simple_loss=0.3219, pruned_loss=0.09378, over 7312.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2896, pruned_loss=0.06258, over 1617041.74 frames. ], batch size: 72, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:36,066 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1424, 2.5405, 2.9500, 1.5168, 3.1818, 1.9148, 1.4849, 2.0685], + device='cuda:2'), covar=tensor([0.0955, 0.0438, 0.0350, 0.0937, 0.0453, 0.0927, 0.0958, 0.0644], + device='cuda:2'), in_proj_covar=tensor([0.0451, 0.0389, 0.0341, 0.0442, 0.0372, 0.0532, 0.0388, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2098e-04, 1.0219e-04, 8.9754e-05, 1.1650e-04, 9.7969e-05, 1.5034e-04, + 1.0475e-04, 1.1013e-04], device='cuda:2') +2023-02-07 02:57:38,238 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 02:57:51,146 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.343e+02 3.196e+02 4.097e+02 7.456e+02, threshold=6.392e+02, percent-clipped=6.0 +2023-02-07 02:58:02,866 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6066, 1.9652, 2.9496, 1.5270, 2.1528, 2.0637, 1.7442, 2.0341], + device='cuda:2'), covar=tensor([0.1840, 0.2560, 0.0905, 0.4454, 0.1806, 0.3040, 0.2223, 0.2162], + device='cuda:2'), in_proj_covar=tensor([0.0528, 0.0606, 0.0560, 0.0646, 0.0648, 0.0599, 0.0536, 0.0635], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:58:04,692 INFO [train.py:901] (2/4) Epoch 21, batch 7250, loss[loss=0.1812, simple_loss=0.2616, pruned_loss=0.05035, over 7970.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2894, pruned_loss=0.06276, over 1614306.12 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:40,072 INFO [train.py:901] (2/4) Epoch 21, batch 7300, loss[loss=0.1846, simple_loss=0.2815, pruned_loss=0.04381, over 8368.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06223, over 1613392.93 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:44,981 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168966.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:58,208 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168985.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:59,404 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8494, 3.7886, 3.4699, 1.8483, 3.4177, 3.4764, 3.3983, 3.2266], + device='cuda:2'), covar=tensor([0.0816, 0.0652, 0.1208, 0.4470, 0.0966, 0.0944, 0.1471, 0.0982], + device='cuda:2'), in_proj_covar=tensor([0.0523, 0.0431, 0.0432, 0.0533, 0.0423, 0.0442, 0.0421, 0.0384], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 02:58:59,448 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168987.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:59:00,602 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.375e+02 2.880e+02 4.111e+02 9.346e+02, threshold=5.760e+02, percent-clipped=6.0 +2023-02-07 02:59:02,086 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:59:14,682 INFO [train.py:901] (2/4) Epoch 21, batch 7350, loss[loss=0.1832, simple_loss=0.2682, pruned_loss=0.04914, over 7787.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2879, pruned_loss=0.06206, over 1614792.21 frames. ], batch size: 19, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:35,046 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 02:59:49,830 INFO [train.py:901] (2/4) Epoch 21, batch 7400, loss[loss=0.2241, simple_loss=0.3021, pruned_loss=0.07307, over 8434.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.288, pruned_loss=0.06221, over 1612171.94 frames. ], batch size: 27, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:53,389 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 03:00:01,565 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-07 03:00:10,727 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 2.322e+02 3.020e+02 4.298e+02 1.187e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-07 03:00:19,220 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169100.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:00:25,151 INFO [train.py:901] (2/4) Epoch 21, batch 7450, loss[loss=0.2383, simple_loss=0.317, pruned_loss=0.07977, over 8447.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2883, pruned_loss=0.06201, over 1613836.46 frames. ], batch size: 48, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:00:33,887 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 03:00:42,576 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:01,174 INFO [train.py:901] (2/4) Epoch 21, batch 7500, loss[loss=0.2047, simple_loss=0.295, pruned_loss=0.05718, over 8251.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.062, over 1612958.12 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:01:13,527 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:21,459 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.287e+02 2.739e+02 3.438e+02 5.948e+02, threshold=5.478e+02, percent-clipped=0.0 +2023-02-07 03:01:35,755 INFO [train.py:901] (2/4) Epoch 21, batch 7550, loss[loss=0.2101, simple_loss=0.2893, pruned_loss=0.06542, over 7924.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.0622, over 1611312.64 frames. ], batch size: 20, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:09,750 INFO [train.py:901] (2/4) Epoch 21, batch 7600, loss[loss=0.1773, simple_loss=0.257, pruned_loss=0.04874, over 7807.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06199, over 1608389.87 frames. ], batch size: 19, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:32,180 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.243e+02 2.742e+02 3.349e+02 1.012e+03, threshold=5.485e+02, percent-clipped=5.0 +2023-02-07 03:02:33,803 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3515, 2.9881, 2.1737, 3.8493, 1.9401, 2.0192, 2.4432, 2.8767], + device='cuda:2'), covar=tensor([0.0731, 0.0746, 0.0957, 0.0255, 0.1050, 0.1271, 0.0940, 0.0800], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0197, 0.0246, 0.0215, 0.0208, 0.0248, 0.0252, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:02:44,277 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 03:02:45,876 INFO [train.py:901] (2/4) Epoch 21, batch 7650, loss[loss=0.2452, simple_loss=0.3198, pruned_loss=0.08534, over 8502.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2876, pruned_loss=0.06202, over 1610243.62 frames. ], batch size: 26, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:00,457 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:03:01,834 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169331.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:03:21,300 INFO [train.py:901] (2/4) Epoch 21, batch 7700, loss[loss=0.242, simple_loss=0.3155, pruned_loss=0.08423, over 8328.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06168, over 1607340.94 frames. ], batch size: 26, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:24,165 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8843, 2.2908, 4.2429, 1.6145, 3.0407, 2.4585, 1.8023, 3.1910], + device='cuda:2'), covar=tensor([0.1896, 0.2745, 0.0736, 0.4434, 0.1721, 0.3051, 0.2437, 0.2014], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0602, 0.0559, 0.0642, 0.0646, 0.0596, 0.0534, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:03:42,195 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.349e+02 2.901e+02 3.736e+02 6.675e+02, threshold=5.802e+02, percent-clipped=6.0 +2023-02-07 03:03:44,250 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 03:03:57,026 INFO [train.py:901] (2/4) Epoch 21, batch 7750, loss[loss=0.1965, simple_loss=0.2825, pruned_loss=0.05526, over 8347.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06196, over 1612450.90 frames. ], batch size: 26, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:21,910 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:22,019 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:23,340 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169446.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:04:32,641 INFO [train.py:901] (2/4) Epoch 21, batch 7800, loss[loss=0.1911, simple_loss=0.2727, pruned_loss=0.05476, over 7705.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2867, pruned_loss=0.06173, over 1604954.96 frames. ], batch size: 18, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:34,847 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5298, 1.8036, 1.9607, 1.4367, 2.0536, 1.3902, 0.5829, 1.7925], + device='cuda:2'), covar=tensor([0.0628, 0.0399, 0.0286, 0.0555, 0.0432, 0.0991, 0.0850, 0.0303], + device='cuda:2'), in_proj_covar=tensor([0.0453, 0.0391, 0.0342, 0.0442, 0.0374, 0.0533, 0.0389, 0.0419], + device='cuda:2'), out_proj_covar=tensor([1.2170e-04, 1.0256e-04, 9.0148e-05, 1.1650e-04, 9.8285e-05, 1.5050e-04, + 1.0519e-04, 1.1112e-04], device='cuda:2') +2023-02-07 03:04:45,393 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:52,675 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.145e+02 2.738e+02 3.428e+02 8.790e+02, threshold=5.476e+02, percent-clipped=3.0 +2023-02-07 03:04:52,926 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4001, 2.3407, 1.7148, 2.1289, 2.0045, 1.5215, 1.8423, 1.8619], + device='cuda:2'), covar=tensor([0.1520, 0.0469, 0.1281, 0.0613, 0.0742, 0.1520, 0.0993, 0.1062], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0236, 0.0335, 0.0307, 0.0301, 0.0336, 0.0343, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:04:55,617 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4135, 1.4426, 1.7967, 1.3635, 1.1448, 1.8161, 0.2364, 1.1909], + device='cuda:2'), covar=tensor([0.1639, 0.1187, 0.0437, 0.0845, 0.2417, 0.0420, 0.1964, 0.1103], + device='cuda:2'), in_proj_covar=tensor([0.0188, 0.0195, 0.0127, 0.0221, 0.0270, 0.0135, 0.0171, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:05:06,025 INFO [train.py:901] (2/4) Epoch 21, batch 7850, loss[loss=0.1827, simple_loss=0.2681, pruned_loss=0.0486, over 7811.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2861, pruned_loss=0.06078, over 1607233.00 frames. ], batch size: 20, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:06,872 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3598, 2.0368, 1.4935, 2.0318, 1.7742, 1.2818, 1.7002, 1.7982], + device='cuda:2'), covar=tensor([0.1160, 0.0424, 0.1368, 0.0441, 0.0716, 0.1677, 0.0860, 0.0724], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0236, 0.0335, 0.0308, 0.0300, 0.0336, 0.0344, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:05:10,168 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2013, 1.4707, 3.3163, 1.1583, 2.9333, 2.7499, 3.0289, 2.9190], + device='cuda:2'), covar=tensor([0.0801, 0.3801, 0.0818, 0.3958, 0.1398, 0.1179, 0.0819, 0.0891], + device='cuda:2'), in_proj_covar=tensor([0.0630, 0.0640, 0.0693, 0.0626, 0.0711, 0.0611, 0.0611, 0.0675], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:05:14,138 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:39,262 INFO [train.py:901] (2/4) Epoch 21, batch 7900, loss[loss=0.2746, simple_loss=0.3445, pruned_loss=0.1024, over 8681.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06192, over 1612540.57 frames. ], batch size: 34, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:39,446 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:41,601 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.77 vs. limit=5.0 +2023-02-07 03:05:59,284 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.405e+02 2.884e+02 3.520e+02 8.387e+02, threshold=5.767e+02, percent-clipped=5.0 +2023-02-07 03:06:02,050 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:08,079 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-07 03:06:12,852 INFO [train.py:901] (2/4) Epoch 21, batch 7950, loss[loss=0.217, simple_loss=0.2995, pruned_loss=0.06725, over 8256.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2883, pruned_loss=0.06209, over 1610717.56 frames. ], batch size: 24, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:06:31,342 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:33,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:46,581 INFO [train.py:901] (2/4) Epoch 21, batch 8000, loss[loss=0.1616, simple_loss=0.2393, pruned_loss=0.04196, over 7259.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06136, over 1611345.21 frames. ], batch size: 16, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:06,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.194e+02 2.844e+02 3.383e+02 6.688e+02, threshold=5.687e+02, percent-clipped=2.0 +2023-02-07 03:07:07,193 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9847, 6.2235, 5.3384, 2.6708, 5.4706, 5.8563, 5.6813, 5.6032], + device='cuda:2'), covar=tensor([0.0547, 0.0362, 0.0853, 0.3943, 0.0698, 0.0703, 0.1149, 0.0544], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0428, 0.0431, 0.0531, 0.0422, 0.0440, 0.0421, 0.0380], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:07:12,039 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9842, 1.6845, 2.0240, 1.8205, 1.9282, 2.0047, 1.8400, 0.8259], + device='cuda:2'), covar=tensor([0.5719, 0.5068, 0.2023, 0.3555, 0.2511, 0.3105, 0.1970, 0.5033], + device='cuda:2'), in_proj_covar=tensor([0.0945, 0.0977, 0.0803, 0.0946, 0.0998, 0.0892, 0.0746, 0.0823], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 03:07:12,145 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 03:07:14,027 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:15,418 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169702.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:07:19,700 INFO [train.py:901] (2/4) Epoch 21, batch 8050, loss[loss=0.2326, simple_loss=0.3121, pruned_loss=0.07651, over 7193.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2852, pruned_loss=0.06139, over 1586477.39 frames. ], batch size: 71, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:30,630 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:32,001 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169727.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:07:52,653 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 03:07:58,222 INFO [train.py:901] (2/4) Epoch 22, batch 0, loss[loss=0.1745, simple_loss=0.2549, pruned_loss=0.04706, over 7821.00 frames. ], tot_loss[loss=0.1745, simple_loss=0.2549, pruned_loss=0.04706, over 7821.00 frames. ], batch size: 20, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:07:58,222 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 03:08:05,038 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6955, 1.8440, 1.5897, 2.0750, 1.2268, 1.4524, 1.6733, 1.7797], + device='cuda:2'), covar=tensor([0.0736, 0.0631, 0.0922, 0.0537, 0.1046, 0.1256, 0.0741, 0.0751], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0207, 0.0246, 0.0250, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:08:09,347 INFO [train.py:935] (2/4) Epoch 22, validation: loss=0.1743, simple_loss=0.2746, pruned_loss=0.03702, over 944034.00 frames. +2023-02-07 03:08:09,348 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 03:08:12,912 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169747.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:17,071 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6966, 1.6528, 1.9986, 1.8593, 1.0779, 1.7082, 2.2375, 2.2141], + device='cuda:2'), covar=tensor([0.0432, 0.1220, 0.1597, 0.1294, 0.0559, 0.1393, 0.0583, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0160, 0.0100, 0.0164, 0.0113, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:08:24,252 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 03:08:25,073 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:36,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5692, 2.5386, 1.8092, 2.2599, 2.1150, 1.6367, 2.0404, 2.1955], + device='cuda:2'), covar=tensor([0.1501, 0.0473, 0.1225, 0.0628, 0.0749, 0.1467, 0.0972, 0.0997], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0234, 0.0331, 0.0305, 0.0298, 0.0332, 0.0340, 0.0315], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:08:42,193 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.482e+02 2.980e+02 3.558e+02 1.069e+03, threshold=5.959e+02, percent-clipped=8.0 +2023-02-07 03:08:44,166 INFO [train.py:901] (2/4) Epoch 22, batch 50, loss[loss=0.24, simple_loss=0.2982, pruned_loss=0.09092, over 7428.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2849, pruned_loss=0.06159, over 361730.18 frames. ], batch size: 17, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:08:54,109 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:01,057 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 03:09:02,044 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:19,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169840.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:20,336 INFO [train.py:901] (2/4) Epoch 22, batch 100, loss[loss=0.2066, simple_loss=0.2971, pruned_loss=0.05801, over 8359.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2869, pruned_loss=0.06269, over 639048.65 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:23,119 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 03:09:25,374 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:28,968 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 03:09:42,078 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:52,904 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.356e+02 3.069e+02 3.800e+02 7.981e+02, threshold=6.138e+02, percent-clipped=3.0 +2023-02-07 03:09:55,642 INFO [train.py:901] (2/4) Epoch 22, batch 150, loss[loss=0.2235, simple_loss=0.3038, pruned_loss=0.07157, over 8468.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2873, pruned_loss=0.06145, over 859170.34 frames. ], batch size: 25, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:55,877 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:10,890 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.08 vs. limit=5.0 +2023-02-07 03:10:12,774 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:23,673 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9657, 1.3088, 1.5736, 1.2552, 0.9158, 1.3810, 1.7578, 1.6070], + device='cuda:2'), covar=tensor([0.0515, 0.1349, 0.1876, 0.1584, 0.0662, 0.1586, 0.0694, 0.0669], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0159, 0.0099, 0.0163, 0.0112, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:10:30,755 INFO [train.py:901] (2/4) Epoch 22, batch 200, loss[loss=0.2061, simple_loss=0.3011, pruned_loss=0.05561, over 8495.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2884, pruned_loss=0.06127, over 1031482.82 frames. ], batch size: 49, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:10:58,689 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:11:02,622 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.362e+02 2.871e+02 3.395e+02 8.094e+02, threshold=5.742e+02, percent-clipped=2.0 +2023-02-07 03:11:04,627 INFO [train.py:901] (2/4) Epoch 22, batch 250, loss[loss=0.2707, simple_loss=0.3363, pruned_loss=0.1026, over 8513.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06134, over 1159818.56 frames. ], batch size: 28, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:17,880 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 03:11:26,113 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 03:11:41,665 INFO [train.py:901] (2/4) Epoch 22, batch 300, loss[loss=0.2038, simple_loss=0.2914, pruned_loss=0.05806, over 7940.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2861, pruned_loss=0.0602, over 1263827.46 frames. ], batch size: 20, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:50,133 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 03:11:56,574 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:13,696 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.486e+02 2.821e+02 3.492e+02 6.452e+02, threshold=5.641e+02, percent-clipped=3.0 +2023-02-07 03:12:15,179 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:15,769 INFO [train.py:901] (2/4) Epoch 22, batch 350, loss[loss=0.2177, simple_loss=0.3017, pruned_loss=0.06682, over 8507.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2868, pruned_loss=0.06064, over 1338116.23 frames. ], batch size: 26, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:12:19,958 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:27,046 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:49,714 INFO [train.py:901] (2/4) Epoch 22, batch 400, loss[loss=0.1995, simple_loss=0.2926, pruned_loss=0.05324, over 8257.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2859, pruned_loss=0.06021, over 1398861.83 frames. ], batch size: 24, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:12:53,699 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170148.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:22,569 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.277e+02 2.821e+02 3.460e+02 6.418e+02, threshold=5.643e+02, percent-clipped=3.0 +2023-02-07 03:13:24,663 INFO [train.py:901] (2/4) Epoch 22, batch 450, loss[loss=0.1989, simple_loss=0.2713, pruned_loss=0.0633, over 8086.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.286, pruned_loss=0.05986, over 1449873.14 frames. ], batch size: 21, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:13:34,380 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170206.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:46,333 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:58,268 INFO [train.py:901] (2/4) Epoch 22, batch 500, loss[loss=0.1816, simple_loss=0.2761, pruned_loss=0.04349, over 8034.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2859, pruned_loss=0.06042, over 1487203.41 frames. ], batch size: 22, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:14:13,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:14:31,687 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.263e+02 2.770e+02 3.716e+02 6.957e+02, threshold=5.540e+02, percent-clipped=5.0 +2023-02-07 03:14:34,516 INFO [train.py:901] (2/4) Epoch 22, batch 550, loss[loss=0.1947, simple_loss=0.2955, pruned_loss=0.04698, over 8536.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.287, pruned_loss=0.06114, over 1516891.84 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:00,844 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9104, 2.2664, 1.7213, 2.8947, 1.2527, 1.5441, 1.8616, 2.2332], + device='cuda:2'), covar=tensor([0.0823, 0.0789, 0.1053, 0.0405, 0.1256, 0.1428, 0.1065, 0.0835], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0196, 0.0243, 0.0213, 0.0206, 0.0245, 0.0248, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:15:08,213 INFO [train.py:901] (2/4) Epoch 22, batch 600, loss[loss=0.196, simple_loss=0.2804, pruned_loss=0.05581, over 7810.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.06158, over 1536058.89 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:16,557 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:27,502 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 03:15:34,303 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:40,792 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.463e+02 3.010e+02 3.561e+02 9.437e+02, threshold=6.021e+02, percent-clipped=1.0 +2023-02-07 03:15:42,748 INFO [train.py:901] (2/4) Epoch 22, batch 650, loss[loss=0.2166, simple_loss=0.2944, pruned_loss=0.06944, over 8130.00 frames. ], tot_loss[loss=0.208, simple_loss=0.29, pruned_loss=0.06298, over 1558593.02 frames. ], batch size: 22, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:52,779 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5164, 2.4072, 2.2851, 1.3271, 2.2101, 2.2769, 2.2389, 2.1857], + device='cuda:2'), covar=tensor([0.1090, 0.0864, 0.1190, 0.3818, 0.1003, 0.1205, 0.1523, 0.1042], + device='cuda:2'), in_proj_covar=tensor([0.0524, 0.0430, 0.0428, 0.0530, 0.0422, 0.0441, 0.0423, 0.0381], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:15:53,431 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170407.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:17,636 INFO [train.py:901] (2/4) Epoch 22, batch 700, loss[loss=0.1883, simple_loss=0.2847, pruned_loss=0.04593, over 8469.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2887, pruned_loss=0.0626, over 1570550.02 frames. ], batch size: 25, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:16:31,460 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:42,963 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:43,708 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:49,120 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:50,916 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.347e+02 2.936e+02 3.672e+02 5.936e+02, threshold=5.871e+02, percent-clipped=0.0 +2023-02-07 03:16:52,904 INFO [train.py:901] (2/4) Epoch 22, batch 750, loss[loss=0.2622, simple_loss=0.3188, pruned_loss=0.1027, over 7806.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2889, pruned_loss=0.06272, over 1579810.90 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:01,713 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:11,626 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:13,543 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:14,716 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 03:17:23,976 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 03:17:27,394 INFO [train.py:901] (2/4) Epoch 22, batch 800, loss[loss=0.222, simple_loss=0.2987, pruned_loss=0.07263, over 8500.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2882, pruned_loss=0.06262, over 1587087.36 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:28,964 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170544.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:30,548 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 03:17:57,574 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:58,761 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.234e+02 2.598e+02 3.180e+02 6.753e+02, threshold=5.195e+02, percent-clipped=1.0 +2023-02-07 03:18:00,806 INFO [train.py:901] (2/4) Epoch 22, batch 850, loss[loss=0.1972, simple_loss=0.288, pruned_loss=0.05315, over 8518.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06174, over 1594771.55 frames. ], batch size: 28, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:14,727 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6144, 1.6707, 2.2506, 1.4339, 1.1844, 2.2663, 0.4571, 1.3637], + device='cuda:2'), covar=tensor([0.2023, 0.1305, 0.0388, 0.1347, 0.3113, 0.0376, 0.2174, 0.1387], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0270, 0.0135, 0.0171, 0.0193], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:18:31,338 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 03:18:36,994 INFO [train.py:901] (2/4) Epoch 22, batch 900, loss[loss=0.2293, simple_loss=0.3103, pruned_loss=0.07414, over 8457.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06194, over 1598287.66 frames. ], batch size: 49, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:53,611 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5662, 1.8495, 1.9311, 1.1942, 2.0450, 1.4049, 0.5990, 1.8218], + device='cuda:2'), covar=tensor([0.0622, 0.0354, 0.0279, 0.0570, 0.0389, 0.0880, 0.0859, 0.0287], + device='cuda:2'), in_proj_covar=tensor([0.0454, 0.0394, 0.0345, 0.0444, 0.0375, 0.0534, 0.0390, 0.0419], + device='cuda:2'), out_proj_covar=tensor([1.2187e-04, 1.0338e-04, 9.0653e-05, 1.1683e-04, 9.8477e-05, 1.5078e-04, + 1.0542e-04, 1.1114e-04], device='cuda:2') +2023-02-07 03:19:09,383 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.381e+02 2.827e+02 3.296e+02 7.509e+02, threshold=5.655e+02, percent-clipped=4.0 +2023-02-07 03:19:11,446 INFO [train.py:901] (2/4) Epoch 22, batch 950, loss[loss=0.1776, simple_loss=0.2548, pruned_loss=0.05019, over 7658.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06106, over 1599045.46 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:19:13,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2850, 1.4652, 4.2683, 2.0180, 2.5092, 4.9129, 4.9136, 4.2291], + device='cuda:2'), covar=tensor([0.1247, 0.2008, 0.0306, 0.1984, 0.1181, 0.0157, 0.0418, 0.0539], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0323, 0.0286, 0.0317, 0.0310, 0.0265, 0.0420, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:19:43,613 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 03:19:46,353 INFO [train.py:901] (2/4) Epoch 22, batch 1000, loss[loss=0.1683, simple_loss=0.2578, pruned_loss=0.03939, over 8254.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.06159, over 1601561.42 frames. ], batch size: 22, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:19:49,225 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2496, 2.0838, 1.6516, 1.8865, 1.7540, 1.4463, 1.6586, 1.6608], + device='cuda:2'), covar=tensor([0.1364, 0.0435, 0.1181, 0.0563, 0.0722, 0.1465, 0.0939, 0.0899], + device='cuda:2'), in_proj_covar=tensor([0.0351, 0.0235, 0.0331, 0.0308, 0.0299, 0.0334, 0.0341, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:20:12,108 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170778.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:17,087 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 03:20:19,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 2.326e+02 2.890e+02 3.504e+02 6.405e+02, threshold=5.779e+02, percent-clipped=4.0 +2023-02-07 03:20:21,012 INFO [train.py:901] (2/4) Epoch 22, batch 1050, loss[loss=0.2265, simple_loss=0.2975, pruned_loss=0.07774, over 7638.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2875, pruned_loss=0.06186, over 1603366.74 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:20:28,486 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 03:20:28,709 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:41,785 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:55,954 INFO [train.py:901] (2/4) Epoch 22, batch 1100, loss[loss=0.1998, simple_loss=0.2771, pruned_loss=0.06124, over 8589.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06224, over 1608092.98 frames. ], batch size: 34, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:27,511 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2938, 2.1197, 1.7538, 1.9541, 1.8290, 1.4851, 1.7275, 1.6844], + device='cuda:2'), covar=tensor([0.1231, 0.0455, 0.1120, 0.0511, 0.0728, 0.1468, 0.0943, 0.0831], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0237, 0.0334, 0.0310, 0.0300, 0.0336, 0.0344, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:21:29,315 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.576e+02 3.127e+02 3.706e+02 1.049e+03, threshold=6.255e+02, percent-clipped=5.0 +2023-02-07 03:21:30,680 INFO [train.py:901] (2/4) Epoch 22, batch 1150, loss[loss=0.1678, simple_loss=0.2594, pruned_loss=0.0381, over 7811.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2894, pruned_loss=0.06315, over 1609326.71 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:37,427 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 03:21:45,385 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8493, 1.4399, 3.2534, 1.2410, 2.1947, 3.5850, 3.8663, 2.6422], + device='cuda:2'), covar=tensor([0.1459, 0.2146, 0.0481, 0.2784, 0.1265, 0.0352, 0.0592, 0.1082], + device='cuda:2'), in_proj_covar=tensor([0.0293, 0.0320, 0.0283, 0.0314, 0.0306, 0.0262, 0.0415, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 03:21:52,887 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4214, 2.7099, 2.2009, 3.2705, 1.9467, 2.0606, 2.2906, 2.7397], + device='cuda:2'), covar=tensor([0.0638, 0.0715, 0.0800, 0.0460, 0.0959, 0.1138, 0.0841, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0215, 0.0209, 0.0249, 0.0251, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:21:56,817 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:01,706 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:04,213 INFO [train.py:901] (2/4) Epoch 22, batch 1200, loss[loss=0.2063, simple_loss=0.2864, pruned_loss=0.06308, over 8195.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2889, pruned_loss=0.0627, over 1608574.85 frames. ], batch size: 23, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:07,060 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170946.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:38,802 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.384e+02 2.807e+02 3.549e+02 5.873e+02, threshold=5.615e+02, percent-clipped=0.0 +2023-02-07 03:22:40,086 INFO [train.py:901] (2/4) Epoch 22, batch 1250, loss[loss=0.1794, simple_loss=0.2781, pruned_loss=0.04037, over 8366.00 frames. ], tot_loss[loss=0.207, simple_loss=0.289, pruned_loss=0.06246, over 1612735.29 frames. ], batch size: 24, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:57,672 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2086, 4.1700, 3.7681, 1.9819, 3.6646, 3.8081, 3.7020, 3.5828], + device='cuda:2'), covar=tensor([0.0835, 0.0580, 0.1264, 0.4681, 0.0984, 0.0810, 0.1422, 0.0731], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0430, 0.0430, 0.0530, 0.0422, 0.0441, 0.0420, 0.0382], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:22:58,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 03:23:14,550 INFO [train.py:901] (2/4) Epoch 22, batch 1300, loss[loss=0.207, simple_loss=0.2881, pruned_loss=0.06296, over 8583.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.0617, over 1612899.46 frames. ], batch size: 31, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:23:17,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:23:47,495 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.382e+02 2.988e+02 3.753e+02 7.309e+02, threshold=5.975e+02, percent-clipped=5.0 +2023-02-07 03:23:48,839 INFO [train.py:901] (2/4) Epoch 22, batch 1350, loss[loss=0.2062, simple_loss=0.2957, pruned_loss=0.05833, over 8638.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2878, pruned_loss=0.06162, over 1612674.71 frames. ], batch size: 31, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:01,669 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171110.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:24:02,359 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6442, 1.4850, 4.8870, 1.7520, 4.2901, 4.0117, 4.3466, 4.2100], + device='cuda:2'), covar=tensor([0.0639, 0.5129, 0.0439, 0.4368, 0.1193, 0.0993, 0.0632, 0.0678], + device='cuda:2'), in_proj_covar=tensor([0.0625, 0.0637, 0.0688, 0.0620, 0.0704, 0.0604, 0.0606, 0.0672], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:24:23,449 INFO [train.py:901] (2/4) Epoch 22, batch 1400, loss[loss=0.2092, simple_loss=0.2944, pruned_loss=0.06199, over 8027.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.062, over 1613191.40 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:23,822 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.78 vs. limit=5.0 +2023-02-07 03:24:55,486 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 3.047e+02 3.835e+02 9.203e+02, threshold=6.094e+02, percent-clipped=3.0 +2023-02-07 03:24:57,482 INFO [train.py:901] (2/4) Epoch 22, batch 1450, loss[loss=0.2702, simple_loss=0.3379, pruned_loss=0.1013, over 8536.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2874, pruned_loss=0.06169, over 1613904.35 frames. ], batch size: 49, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:58,894 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:06,244 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 03:25:12,522 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:16,690 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:32,453 INFO [train.py:901] (2/4) Epoch 22, batch 1500, loss[loss=0.2091, simple_loss=0.2804, pruned_loss=0.06893, over 7261.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2878, pruned_loss=0.06183, over 1616198.22 frames. ], batch size: 16, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:04,592 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.477e+02 2.962e+02 3.885e+02 1.079e+03, threshold=5.924e+02, percent-clipped=2.0 +2023-02-07 03:26:04,686 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171290.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:05,961 INFO [train.py:901] (2/4) Epoch 22, batch 1550, loss[loss=0.2025, simple_loss=0.2957, pruned_loss=0.05462, over 7978.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.289, pruned_loss=0.06273, over 1614916.27 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:12,939 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:25,492 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3800, 1.5271, 1.3834, 1.7823, 0.7532, 1.2605, 1.2549, 1.4901], + device='cuda:2'), covar=tensor([0.0862, 0.0766, 0.1073, 0.0540, 0.1210, 0.1435, 0.0848, 0.0774], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0247, 0.0250, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:26:30,106 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:37,452 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8512, 1.6389, 6.0313, 2.1266, 5.4167, 5.1198, 5.5382, 5.4356], + device='cuda:2'), covar=tensor([0.0451, 0.4809, 0.0311, 0.3933, 0.0921, 0.0836, 0.0487, 0.0500], + device='cuda:2'), in_proj_covar=tensor([0.0629, 0.0642, 0.0693, 0.0623, 0.0708, 0.0608, 0.0611, 0.0676], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:26:40,691 INFO [train.py:901] (2/4) Epoch 22, batch 1600, loss[loss=0.191, simple_loss=0.2746, pruned_loss=0.05368, over 8124.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.288, pruned_loss=0.06235, over 1611366.31 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:55,771 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:13,638 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 2.510e+02 3.045e+02 3.987e+02 6.104e+02, threshold=6.090e+02, percent-clipped=2.0 +2023-02-07 03:27:15,007 INFO [train.py:901] (2/4) Epoch 22, batch 1650, loss[loss=0.2066, simple_loss=0.2962, pruned_loss=0.05854, over 7800.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2879, pruned_loss=0.06169, over 1610928.80 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:24,117 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171405.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:51,036 INFO [train.py:901] (2/4) Epoch 22, batch 1700, loss[loss=0.1604, simple_loss=0.2424, pruned_loss=0.03924, over 7221.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.287, pruned_loss=0.06114, over 1611496.26 frames. ], batch size: 16, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:59,281 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171454.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:28:18,869 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 03:28:24,567 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.429e+02 3.050e+02 3.629e+02 7.357e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-07 03:28:25,936 INFO [train.py:901] (2/4) Epoch 22, batch 1750, loss[loss=0.2093, simple_loss=0.2841, pruned_loss=0.06722, over 7804.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2867, pruned_loss=0.0611, over 1612242.03 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:28:42,135 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:00,472 INFO [train.py:901] (2/4) Epoch 22, batch 1800, loss[loss=0.1838, simple_loss=0.278, pruned_loss=0.04484, over 8485.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2871, pruned_loss=0.06129, over 1612737.56 frames. ], batch size: 27, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:29:11,509 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:19,681 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:34,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.408e+02 2.801e+02 3.784e+02 7.831e+02, threshold=5.602e+02, percent-clipped=2.0 +2023-02-07 03:29:35,951 INFO [train.py:901] (2/4) Epoch 22, batch 1850, loss[loss=0.2111, simple_loss=0.2935, pruned_loss=0.06437, over 8286.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2874, pruned_loss=0.06168, over 1610709.02 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:29:39,056 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 03:30:10,047 INFO [train.py:901] (2/4) Epoch 22, batch 1900, loss[loss=0.224, simple_loss=0.3088, pruned_loss=0.06956, over 8515.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2867, pruned_loss=0.06092, over 1613391.56 frames. ], batch size: 39, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:24,236 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171661.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:32,189 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:36,795 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 03:30:41,592 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171686.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:44,064 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.518e+02 3.035e+02 3.649e+02 9.576e+02, threshold=6.070e+02, percent-clipped=4.0 +2023-02-07 03:30:45,464 INFO [train.py:901] (2/4) Epoch 22, batch 1950, loss[loss=0.2202, simple_loss=0.3011, pruned_loss=0.06967, over 8461.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.0608, over 1612006.30 frames. ], batch size: 25, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:48,017 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 03:30:56,293 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171707.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:31:07,790 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 03:31:20,038 INFO [train.py:901] (2/4) Epoch 22, batch 2000, loss[loss=0.2102, simple_loss=0.2979, pruned_loss=0.06127, over 8464.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2871, pruned_loss=0.06088, over 1617227.69 frames. ], batch size: 25, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:31:53,981 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.301e+02 2.928e+02 3.706e+02 6.798e+02, threshold=5.855e+02, percent-clipped=1.0 +2023-02-07 03:31:55,392 INFO [train.py:901] (2/4) Epoch 22, batch 2050, loss[loss=0.2293, simple_loss=0.3129, pruned_loss=0.07279, over 8190.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2872, pruned_loss=0.0607, over 1618581.05 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:17,603 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:19,740 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171825.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:31,138 INFO [train.py:901] (2/4) Epoch 22, batch 2100, loss[loss=0.2081, simple_loss=0.2962, pruned_loss=0.05998, over 8476.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.288, pruned_loss=0.06093, over 1619309.74 frames. ], batch size: 49, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:36,879 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:43,494 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:45,788 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4896, 1.5083, 1.9330, 1.3316, 1.0849, 1.9057, 0.4622, 1.2745], + device='cuda:2'), covar=tensor([0.1730, 0.1138, 0.0400, 0.1066, 0.2911, 0.0444, 0.2025, 0.1352], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0198, 0.0127, 0.0223, 0.0272, 0.0137, 0.0171, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:33:05,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.505e+02 2.999e+02 3.749e+02 9.868e+02, threshold=5.998e+02, percent-clipped=7.0 +2023-02-07 03:33:06,894 INFO [train.py:901] (2/4) Epoch 22, batch 2150, loss[loss=0.1958, simple_loss=0.2829, pruned_loss=0.05436, over 8238.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.06099, over 1617144.42 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:33,135 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:42,597 INFO [train.py:901] (2/4) Epoch 22, batch 2200, loss[loss=0.2213, simple_loss=0.2995, pruned_loss=0.0715, over 8426.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2884, pruned_loss=0.06161, over 1619113.60 frames. ], batch size: 29, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:51,043 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:51,692 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:04,373 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-07 03:34:05,584 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:15,551 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.362e+02 2.812e+02 3.623e+02 6.076e+02, threshold=5.624e+02, percent-clipped=1.0 +2023-02-07 03:34:16,933 INFO [train.py:901] (2/4) Epoch 22, batch 2250, loss[loss=0.2436, simple_loss=0.3095, pruned_loss=0.08884, over 8326.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2879, pruned_loss=0.06136, over 1621165.75 frames. ], batch size: 26, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:34:17,294 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 03:34:54,269 INFO [train.py:901] (2/4) Epoch 22, batch 2300, loss[loss=0.2066, simple_loss=0.2828, pruned_loss=0.06514, over 7980.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2893, pruned_loss=0.06229, over 1620870.47 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:35:19,366 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2000, 2.1162, 1.7638, 2.0169, 1.7792, 1.5346, 1.6750, 1.7040], + device='cuda:2'), covar=tensor([0.1343, 0.0413, 0.1170, 0.0481, 0.0721, 0.1445, 0.0952, 0.0877], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0233, 0.0331, 0.0307, 0.0299, 0.0336, 0.0343, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:35:20,117 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:35:28,306 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.401e+02 3.005e+02 3.667e+02 7.010e+02, threshold=6.010e+02, percent-clipped=1.0 +2023-02-07 03:35:29,624 INFO [train.py:901] (2/4) Epoch 22, batch 2350, loss[loss=0.2196, simple_loss=0.2957, pruned_loss=0.07179, over 8191.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2882, pruned_loss=0.0615, over 1619922.93 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:35:37,304 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:01,295 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:05,202 INFO [train.py:901] (2/4) Epoch 22, batch 2400, loss[loss=0.1888, simple_loss=0.267, pruned_loss=0.05531, over 7696.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2884, pruned_loss=0.06204, over 1618919.67 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:39,692 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.658e+02 3.455e+02 4.348e+02 7.809e+02, threshold=6.910e+02, percent-clipped=6.0 +2023-02-07 03:36:41,123 INFO [train.py:901] (2/4) Epoch 22, batch 2450, loss[loss=0.1882, simple_loss=0.2715, pruned_loss=0.05244, over 8472.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2882, pruned_loss=0.06206, over 1614098.34 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:45,727 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2835, 2.0309, 2.7069, 2.3271, 2.7035, 2.2833, 2.0822, 1.4778], + device='cuda:2'), covar=tensor([0.4970, 0.4819, 0.1888, 0.3402, 0.2333, 0.3039, 0.1868, 0.5184], + device='cuda:2'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0997, 0.0895, 0.0748, 0.0828], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 03:37:08,134 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172231.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:16,956 INFO [train.py:901] (2/4) Epoch 22, batch 2500, loss[loss=0.2203, simple_loss=0.2988, pruned_loss=0.07092, over 8499.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2868, pruned_loss=0.0614, over 1611793.11 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:26,699 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:36,998 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8529, 1.9946, 1.7809, 2.5600, 1.2001, 1.5751, 1.7590, 1.9471], + device='cuda:2'), covar=tensor([0.0741, 0.0777, 0.0856, 0.0389, 0.1097, 0.1232, 0.0858, 0.0856], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0197, 0.0243, 0.0215, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:37:50,849 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.288e+02 2.722e+02 3.540e+02 9.975e+02, threshold=5.443e+02, percent-clipped=1.0 +2023-02-07 03:37:52,251 INFO [train.py:901] (2/4) Epoch 22, batch 2550, loss[loss=0.204, simple_loss=0.2821, pruned_loss=0.06298, over 7935.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2868, pruned_loss=0.06127, over 1613426.88 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:56,720 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:38:25,418 INFO [train.py:901] (2/4) Epoch 22, batch 2600, loss[loss=0.2084, simple_loss=0.2741, pruned_loss=0.0713, over 7801.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2859, pruned_loss=0.06072, over 1613769.01 frames. ], batch size: 19, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:38:58,399 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.484e+02 3.096e+02 3.957e+02 1.134e+03, threshold=6.191e+02, percent-clipped=6.0 +2023-02-07 03:39:00,473 INFO [train.py:901] (2/4) Epoch 22, batch 2650, loss[loss=0.2734, simple_loss=0.3386, pruned_loss=0.1041, over 6752.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2862, pruned_loss=0.06122, over 1610698.86 frames. ], batch size: 72, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:39:10,806 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4452, 2.8656, 2.1340, 3.9654, 1.7484, 2.1647, 2.2655, 3.0009], + device='cuda:2'), covar=tensor([0.0697, 0.0736, 0.0853, 0.0278, 0.1036, 0.1189, 0.0973, 0.0750], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0197, 0.0244, 0.0216, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:39:16,288 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172414.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:39:35,342 INFO [train.py:901] (2/4) Epoch 22, batch 2700, loss[loss=0.1806, simple_loss=0.2653, pruned_loss=0.04793, over 8140.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2859, pruned_loss=0.06051, over 1613636.16 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:02,842 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:40:09,203 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 03:40:09,437 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.345e+02 2.798e+02 3.767e+02 1.133e+03, threshold=5.596e+02, percent-clipped=4.0 +2023-02-07 03:40:10,850 INFO [train.py:901] (2/4) Epoch 22, batch 2750, loss[loss=0.1958, simple_loss=0.2863, pruned_loss=0.05271, over 8610.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2866, pruned_loss=0.06095, over 1616936.60 frames. ], batch size: 31, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:45,666 INFO [train.py:901] (2/4) Epoch 22, batch 2800, loss[loss=0.1799, simple_loss=0.2549, pruned_loss=0.05248, over 7193.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2864, pruned_loss=0.06083, over 1615383.85 frames. ], batch size: 16, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:18,227 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 2.395e+02 2.840e+02 3.614e+02 7.820e+02, threshold=5.680e+02, percent-clipped=6.0 +2023-02-07 03:41:20,389 INFO [train.py:901] (2/4) Epoch 22, batch 2850, loss[loss=0.2315, simple_loss=0.3132, pruned_loss=0.07491, over 8754.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.287, pruned_loss=0.06084, over 1616310.49 frames. ], batch size: 30, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:23,208 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:25,863 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2501, 3.1636, 2.9217, 1.5378, 2.8256, 2.9490, 2.8651, 2.6968], + device='cuda:2'), covar=tensor([0.1223, 0.0864, 0.1442, 0.4954, 0.1286, 0.1366, 0.1795, 0.1135], + device='cuda:2'), in_proj_covar=tensor([0.0520, 0.0431, 0.0427, 0.0532, 0.0423, 0.0443, 0.0424, 0.0383], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:41:37,777 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:44,843 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 03:41:56,010 INFO [train.py:901] (2/4) Epoch 22, batch 2900, loss[loss=0.218, simple_loss=0.2979, pruned_loss=0.06906, over 7918.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.06101, over 1617321.67 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:57,554 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:15,798 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172670.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:24,160 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 03:42:28,900 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.482e+02 2.975e+02 3.907e+02 6.756e+02, threshold=5.949e+02, percent-clipped=4.0 +2023-02-07 03:42:30,295 INFO [train.py:901] (2/4) Epoch 22, batch 2950, loss[loss=0.1659, simple_loss=0.2555, pruned_loss=0.03818, over 7930.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2885, pruned_loss=0.06143, over 1616604.47 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:42:32,546 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:56,537 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4341, 2.3536, 3.1626, 2.5707, 2.9509, 2.4143, 2.2899, 1.8199], + device='cuda:2'), covar=tensor([0.5341, 0.4962, 0.1946, 0.3461, 0.2608, 0.3140, 0.1819, 0.5349], + device='cuda:2'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0995, 0.0896, 0.0745, 0.0827], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 03:43:05,667 INFO [train.py:901] (2/4) Epoch 22, batch 3000, loss[loss=0.1741, simple_loss=0.262, pruned_loss=0.04317, over 7965.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2897, pruned_loss=0.06203, over 1619838.91 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:43:05,667 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 03:43:17,970 INFO [train.py:935] (2/4) Epoch 22, validation: loss=0.1735, simple_loss=0.2739, pruned_loss=0.03659, over 944034.00 frames. +2023-02-07 03:43:17,971 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 03:43:25,641 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172752.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:43:51,438 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.191e+02 2.765e+02 3.574e+02 6.067e+02, threshold=5.530e+02, percent-clipped=1.0 +2023-02-07 03:43:52,749 INFO [train.py:901] (2/4) Epoch 22, batch 3050, loss[loss=0.219, simple_loss=0.294, pruned_loss=0.07202, over 7805.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2895, pruned_loss=0.0626, over 1618588.86 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:23,922 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9607, 2.2404, 1.7948, 2.7387, 1.2216, 1.5869, 2.0288, 2.2228], + device='cuda:2'), covar=tensor([0.0737, 0.0800, 0.0884, 0.0369, 0.1153, 0.1312, 0.0811, 0.0729], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0198, 0.0246, 0.0217, 0.0208, 0.0249, 0.0252, 0.0211], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:44:26,386 INFO [train.py:901] (2/4) Epoch 22, batch 3100, loss[loss=0.1868, simple_loss=0.2688, pruned_loss=0.05243, over 8088.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2886, pruned_loss=0.06208, over 1619864.16 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:32,707 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172851.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:40,608 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:50,781 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:59,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.451e+02 3.163e+02 4.463e+02 7.617e+02, threshold=6.327e+02, percent-clipped=7.0 +2023-02-07 03:45:01,192 INFO [train.py:901] (2/4) Epoch 22, batch 3150, loss[loss=0.1988, simple_loss=0.2879, pruned_loss=0.05489, over 8318.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2887, pruned_loss=0.06272, over 1615370.39 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:33,032 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5883, 1.9463, 2.9155, 1.4852, 2.1558, 1.9509, 1.6291, 2.2153], + device='cuda:2'), covar=tensor([0.1917, 0.2525, 0.0920, 0.4414, 0.1840, 0.3188, 0.2322, 0.2110], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0605, 0.0558, 0.0642, 0.0645, 0.0591, 0.0534, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:45:35,470 INFO [train.py:901] (2/4) Epoch 22, batch 3200, loss[loss=0.1729, simple_loss=0.2673, pruned_loss=0.0393, over 8107.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06167, over 1617285.83 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:40,965 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3600, 1.4793, 1.4035, 1.7655, 0.6516, 1.2684, 1.2589, 1.4678], + device='cuda:2'), covar=tensor([0.0929, 0.0849, 0.1101, 0.0548, 0.1272, 0.1449, 0.0844, 0.0812], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0207, 0.0249, 0.0252, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:45:47,741 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:45:53,322 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1570, 1.4880, 1.6913, 1.4659, 1.0469, 1.4500, 1.9061, 1.6823], + device='cuda:2'), covar=tensor([0.0512, 0.1286, 0.1687, 0.1427, 0.0588, 0.1493, 0.0669, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0163, 0.0111, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:46:06,752 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:46:09,252 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.554e+02 2.964e+02 3.773e+02 6.891e+02, threshold=5.928e+02, percent-clipped=2.0 +2023-02-07 03:46:10,572 INFO [train.py:901] (2/4) Epoch 22, batch 3250, loss[loss=0.2349, simple_loss=0.3129, pruned_loss=0.0785, over 8248.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2876, pruned_loss=0.06188, over 1613009.45 frames. ], batch size: 24, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:46:28,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2779, 2.0673, 1.6320, 1.9357, 1.7041, 1.3959, 1.6687, 1.7431], + device='cuda:2'), covar=tensor([0.1346, 0.0425, 0.1226, 0.0530, 0.0728, 0.1572, 0.0990, 0.0861], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0235, 0.0336, 0.0312, 0.0302, 0.0341, 0.0348, 0.0321], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 03:46:45,372 INFO [train.py:901] (2/4) Epoch 22, batch 3300, loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06502, over 8340.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2873, pruned_loss=0.06203, over 1611952.74 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:47:07,553 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:17,922 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.506e+02 2.831e+02 3.669e+02 6.075e+02, threshold=5.662e+02, percent-clipped=1.0 +2023-02-07 03:47:18,581 INFO [train.py:901] (2/4) Epoch 22, batch 3350, loss[loss=0.2251, simple_loss=0.3018, pruned_loss=0.07422, over 8043.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.06155, over 1612359.96 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:47:22,039 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:26,075 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:54,974 INFO [train.py:901] (2/4) Epoch 22, batch 3400, loss[loss=0.1981, simple_loss=0.2692, pruned_loss=0.06346, over 7933.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.288, pruned_loss=0.06264, over 1612517.45 frames. ], batch size: 20, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:12,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173168.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:21,517 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3750, 1.6316, 4.5663, 1.6790, 4.0362, 3.7663, 4.1487, 3.9926], + device='cuda:2'), covar=tensor([0.0618, 0.4131, 0.0423, 0.3972, 0.1052, 0.1005, 0.0586, 0.0594], + device='cuda:2'), in_proj_covar=tensor([0.0625, 0.0636, 0.0685, 0.0618, 0.0700, 0.0604, 0.0603, 0.0669], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:48:28,293 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 2.494e+02 3.128e+02 3.771e+02 6.972e+02, threshold=6.255e+02, percent-clipped=4.0 +2023-02-07 03:48:28,960 INFO [train.py:901] (2/4) Epoch 22, batch 3450, loss[loss=0.213, simple_loss=0.2945, pruned_loss=0.06581, over 8205.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2871, pruned_loss=0.06185, over 1615449.39 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:39,550 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:42,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173211.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:49:05,729 INFO [train.py:901] (2/4) Epoch 22, batch 3500, loss[loss=0.1926, simple_loss=0.272, pruned_loss=0.05665, over 7812.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2879, pruned_loss=0.06213, over 1611748.60 frames. ], batch size: 20, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:24,790 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 03:49:38,937 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.642e+02 3.082e+02 3.788e+02 9.506e+02, threshold=6.164e+02, percent-clipped=4.0 +2023-02-07 03:49:39,646 INFO [train.py:901] (2/4) Epoch 22, batch 3550, loss[loss=0.2091, simple_loss=0.287, pruned_loss=0.06554, over 8132.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2882, pruned_loss=0.06242, over 1609543.26 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:50,461 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6322, 4.6304, 4.1319, 2.0269, 4.0572, 4.1991, 4.2293, 4.0289], + device='cuda:2'), covar=tensor([0.0702, 0.0473, 0.1044, 0.4958, 0.0925, 0.0914, 0.1262, 0.0701], + device='cuda:2'), in_proj_covar=tensor([0.0523, 0.0434, 0.0432, 0.0536, 0.0427, 0.0447, 0.0426, 0.0386], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:50:00,040 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173322.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:03,373 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:06,896 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:10,303 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9630, 1.6451, 1.9663, 1.5526, 1.0077, 1.5940, 2.2711, 2.2757], + device='cuda:2'), covar=tensor([0.0430, 0.1248, 0.1629, 0.1460, 0.0588, 0.1480, 0.0620, 0.0569], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:50:14,764 INFO [train.py:901] (2/4) Epoch 22, batch 3600, loss[loss=0.1675, simple_loss=0.2399, pruned_loss=0.04755, over 7694.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2891, pruned_loss=0.06281, over 1614069.84 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:50:24,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173356.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:26,359 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:43,489 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:48,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.398e+02 3.034e+02 4.459e+02 8.281e+02, threshold=6.068e+02, percent-clipped=7.0 +2023-02-07 03:50:49,307 INFO [train.py:901] (2/4) Epoch 22, batch 3650, loss[loss=0.193, simple_loss=0.2711, pruned_loss=0.05742, over 7651.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2894, pruned_loss=0.06301, over 1612417.11 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:23,355 INFO [train.py:901] (2/4) Epoch 22, batch 3700, loss[loss=0.1791, simple_loss=0.259, pruned_loss=0.04954, over 7960.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06218, over 1614517.05 frames. ], batch size: 21, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:24,745 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 03:51:42,307 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:51:51,707 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 03:51:57,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.519e+02 2.931e+02 3.909e+02 7.363e+02, threshold=5.861e+02, percent-clipped=2.0 +2023-02-07 03:51:58,512 INFO [train.py:901] (2/4) Epoch 22, batch 3750, loss[loss=0.1783, simple_loss=0.2701, pruned_loss=0.04324, over 8268.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2893, pruned_loss=0.06266, over 1615231.68 frames. ], batch size: 24, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:58,725 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:11,077 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173509.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:52:12,777 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:18,141 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8067, 1.3291, 4.0058, 1.4603, 3.5143, 3.3472, 3.6507, 3.5161], + device='cuda:2'), covar=tensor([0.0667, 0.4642, 0.0596, 0.4094, 0.1259, 0.1012, 0.0653, 0.0724], + device='cuda:2'), in_proj_covar=tensor([0.0628, 0.0640, 0.0689, 0.0621, 0.0702, 0.0607, 0.0605, 0.0674], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:52:22,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9343, 1.7720, 2.0560, 1.7337, 0.8894, 1.7455, 2.1277, 2.2663], + device='cuda:2'), covar=tensor([0.0413, 0.1209, 0.1531, 0.1301, 0.0563, 0.1428, 0.0617, 0.0515], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:52:23,510 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 03:52:32,200 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6706, 1.5248, 1.8997, 1.5508, 0.9161, 1.6001, 2.0498, 1.8461], + device='cuda:2'), covar=tensor([0.0488, 0.1290, 0.1660, 0.1416, 0.0602, 0.1519, 0.0654, 0.0650], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 03:52:32,724 INFO [train.py:901] (2/4) Epoch 22, batch 3800, loss[loss=0.2018, simple_loss=0.2896, pruned_loss=0.057, over 8505.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2894, pruned_loss=0.0629, over 1614975.88 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:52:35,962 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.28 vs. limit=5.0 +2023-02-07 03:52:41,039 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8521, 1.6891, 2.4997, 1.6609, 1.3069, 2.5179, 0.5607, 1.5479], + device='cuda:2'), covar=tensor([0.1736, 0.1286, 0.0322, 0.1293, 0.2634, 0.0315, 0.2290, 0.1229], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0198, 0.0127, 0.0220, 0.0267, 0.0135, 0.0170, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:52:58,746 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:07,968 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.467e+02 3.140e+02 3.842e+02 8.904e+02, threshold=6.281e+02, percent-clipped=2.0 +2023-02-07 03:53:08,695 INFO [train.py:901] (2/4) Epoch 22, batch 3850, loss[loss=0.2099, simple_loss=0.2903, pruned_loss=0.06471, over 6972.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2882, pruned_loss=0.06226, over 1613344.88 frames. ], batch size: 71, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:53:16,529 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173603.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:30,745 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 03:53:33,549 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:36,894 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:43,567 INFO [train.py:901] (2/4) Epoch 22, batch 3900, loss[loss=0.2308, simple_loss=0.3165, pruned_loss=0.07252, over 8522.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.0619, over 1615262.47 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:02,780 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8451, 2.2277, 3.5862, 1.9202, 2.0167, 3.5228, 0.9968, 2.0894], + device='cuda:2'), covar=tensor([0.1561, 0.1242, 0.0262, 0.1784, 0.2298, 0.0354, 0.2078, 0.1475], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0221, 0.0269, 0.0136, 0.0171, 0.0193], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:54:03,323 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173671.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:54:17,229 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.938e+02 2.507e+02 2.945e+02 3.654e+02 8.206e+02, threshold=5.890e+02, percent-clipped=3.0 +2023-02-07 03:54:17,888 INFO [train.py:901] (2/4) Epoch 22, batch 3950, loss[loss=0.1919, simple_loss=0.2822, pruned_loss=0.05074, over 8472.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2889, pruned_loss=0.06226, over 1619387.66 frames. ], batch size: 25, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:37,442 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8507, 2.2321, 3.5249, 1.8599, 1.7318, 3.4975, 0.6340, 2.0306], + device='cuda:2'), covar=tensor([0.1181, 0.1163, 0.0221, 0.1683, 0.2653, 0.0292, 0.2377, 0.1358], + device='cuda:2'), in_proj_covar=tensor([0.0188, 0.0196, 0.0127, 0.0219, 0.0266, 0.0135, 0.0170, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:54:52,908 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.12 vs. limit=5.0 +2023-02-07 03:54:53,285 INFO [train.py:901] (2/4) Epoch 22, batch 4000, loss[loss=0.1981, simple_loss=0.2829, pruned_loss=0.05661, over 8092.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.06196, over 1618112.21 frames. ], batch size: 21, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:55:23,150 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:55:26,124 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.329e+02 2.821e+02 3.599e+02 1.045e+03, threshold=5.642e+02, percent-clipped=6.0 +2023-02-07 03:55:26,793 INFO [train.py:901] (2/4) Epoch 22, batch 4050, loss[loss=0.2931, simple_loss=0.3463, pruned_loss=0.1199, over 7144.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2893, pruned_loss=0.06241, over 1620675.45 frames. ], batch size: 71, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:02,760 INFO [train.py:901] (2/4) Epoch 22, batch 4100, loss[loss=0.2061, simple_loss=0.287, pruned_loss=0.06264, over 8564.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2892, pruned_loss=0.06244, over 1618438.54 frames. ], batch size: 34, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:10,365 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173853.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:56:31,255 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173883.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:56:36,362 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.329e+02 2.764e+02 3.605e+02 7.317e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 03:56:37,019 INFO [train.py:901] (2/4) Epoch 22, batch 4150, loss[loss=0.2161, simple_loss=0.2842, pruned_loss=0.07406, over 7552.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2878, pruned_loss=0.06165, over 1616775.73 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:45,820 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5067, 1.5832, 2.0756, 1.2899, 1.2108, 2.0576, 0.3631, 1.2183], + device='cuda:2'), covar=tensor([0.1901, 0.1288, 0.0376, 0.1317, 0.2633, 0.0413, 0.2048, 0.1361], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0220, 0.0267, 0.0135, 0.0171, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 03:56:47,603 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:12,183 INFO [train.py:901] (2/4) Epoch 22, batch 4200, loss[loss=0.2069, simple_loss=0.2877, pruned_loss=0.06311, over 8525.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2896, pruned_loss=0.06261, over 1619802.01 frames. ], batch size: 28, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:57:28,908 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 03:57:29,769 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173968.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:57:35,170 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:46,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.391e+02 3.056e+02 3.931e+02 9.713e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 03:57:46,782 INFO [train.py:901] (2/4) Epoch 22, batch 4250, loss[loss=0.1624, simple_loss=0.2335, pruned_loss=0.04568, over 7432.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06192, over 1613429.67 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:57:55,804 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 03:58:04,800 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1636, 4.1155, 3.7537, 1.8663, 3.7140, 3.8016, 3.7368, 3.6034], + device='cuda:2'), covar=tensor([0.0896, 0.0691, 0.1230, 0.5474, 0.0938, 0.1078, 0.1457, 0.0885], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0436, 0.0433, 0.0538, 0.0425, 0.0448, 0.0428, 0.0388], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 03:58:15,727 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174033.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:17,065 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:21,636 INFO [train.py:901] (2/4) Epoch 22, batch 4300, loss[loss=0.1843, simple_loss=0.2781, pruned_loss=0.04523, over 8145.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2879, pruned_loss=0.06163, over 1607351.12 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:58:21,844 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:25,981 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 03:58:40,405 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:56,912 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:57,375 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.427e+02 2.775e+02 3.458e+02 5.995e+02, threshold=5.550e+02, percent-clipped=0.0 +2023-02-07 03:58:57,395 INFO [train.py:901] (2/4) Epoch 22, batch 4350, loss[loss=0.1977, simple_loss=0.2886, pruned_loss=0.0534, over 8483.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.288, pruned_loss=0.06182, over 1606579.11 frames. ], batch size: 29, lr: 3.43e-03, grad_scale: 4.0 +2023-02-07 03:59:25,061 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 03:59:32,547 INFO [train.py:901] (2/4) Epoch 22, batch 4400, loss[loss=0.2068, simple_loss=0.2875, pruned_loss=0.06305, over 8463.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2883, pruned_loss=0.06162, over 1607868.94 frames. ], batch size: 39, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 03:59:36,055 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1800, 2.3705, 1.9188, 2.9649, 1.5032, 1.7737, 2.3263, 2.4454], + device='cuda:2'), covar=tensor([0.0711, 0.0759, 0.0946, 0.0347, 0.0989, 0.1236, 0.0664, 0.0719], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0199, 0.0246, 0.0216, 0.0208, 0.0247, 0.0250, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 03:59:42,823 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:00:06,455 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 04:00:07,758 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.623e+02 3.065e+02 3.902e+02 1.119e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-07 04:00:07,777 INFO [train.py:901] (2/4) Epoch 22, batch 4450, loss[loss=0.1876, simple_loss=0.2739, pruned_loss=0.05068, over 8290.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2885, pruned_loss=0.06229, over 1602088.44 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:24,478 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 04:00:26,069 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4538, 1.8563, 3.0207, 1.3517, 2.2916, 1.8611, 1.6242, 2.2655], + device='cuda:2'), covar=tensor([0.2014, 0.2635, 0.0769, 0.4741, 0.1764, 0.3350, 0.2337, 0.2158], + device='cuda:2'), in_proj_covar=tensor([0.0528, 0.0605, 0.0559, 0.0645, 0.0646, 0.0591, 0.0536, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:00:30,159 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174224.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:00:41,752 INFO [train.py:901] (2/4) Epoch 22, batch 4500, loss[loss=0.1901, simple_loss=0.2602, pruned_loss=0.05999, over 7673.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2882, pruned_loss=0.06239, over 1604030.64 frames. ], batch size: 19, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:46,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174249.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:00:56,983 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 04:01:17,041 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.524e+02 3.306e+02 4.354e+02 7.569e+02, threshold=6.612e+02, percent-clipped=6.0 +2023-02-07 04:01:17,062 INFO [train.py:901] (2/4) Epoch 22, batch 4550, loss[loss=0.233, simple_loss=0.3072, pruned_loss=0.07939, over 8085.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2884, pruned_loss=0.06237, over 1607368.28 frames. ], batch size: 21, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:23,306 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:28,571 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:44,047 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 04:01:51,053 INFO [train.py:901] (2/4) Epoch 22, batch 4600, loss[loss=0.2718, simple_loss=0.336, pruned_loss=0.1038, over 7258.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.289, pruned_loss=0.06279, over 1609647.87 frames. ], batch size: 72, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:54,751 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:12,035 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:15,444 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:16,892 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:25,978 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.375e+02 2.973e+02 3.873e+02 1.031e+03, threshold=5.946e+02, percent-clipped=3.0 +2023-02-07 04:02:25,999 INFO [train.py:901] (2/4) Epoch 22, batch 4650, loss[loss=0.2122, simple_loss=0.2797, pruned_loss=0.07233, over 7715.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2879, pruned_loss=0.06194, over 1612098.15 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:02:31,945 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 04:02:37,939 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:02,511 INFO [train.py:901] (2/4) Epoch 22, batch 4700, loss[loss=0.2274, simple_loss=0.3104, pruned_loss=0.07221, over 8509.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2876, pruned_loss=0.06173, over 1614865.39 frames. ], batch size: 49, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:21,327 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4608, 2.6551, 3.1384, 1.7709, 3.3068, 2.1522, 1.6700, 2.4090], + device='cuda:2'), covar=tensor([0.0763, 0.0367, 0.0335, 0.0753, 0.0380, 0.0768, 0.0924, 0.0528], + device='cuda:2'), in_proj_covar=tensor([0.0449, 0.0387, 0.0343, 0.0442, 0.0372, 0.0528, 0.0385, 0.0415], + device='cuda:2'), out_proj_covar=tensor([1.2022e-04, 1.0130e-04, 9.0373e-05, 1.1635e-04, 9.7755e-05, 1.4885e-04, + 1.0387e-04, 1.0982e-04], device='cuda:2') +2023-02-07 04:03:37,052 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.374e+02 2.923e+02 3.899e+02 9.329e+02, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 04:03:37,072 INFO [train.py:901] (2/4) Epoch 22, batch 4750, loss[loss=0.2375, simple_loss=0.3213, pruned_loss=0.07687, over 8457.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.06117, over 1613960.11 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:37,251 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:38,612 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:43,329 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:04:04,458 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 04:04:06,510 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 04:04:12,369 INFO [train.py:901] (2/4) Epoch 22, batch 4800, loss[loss=0.2017, simple_loss=0.2911, pruned_loss=0.05616, over 8460.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2866, pruned_loss=0.06093, over 1612592.33 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:46,102 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.398e+02 2.995e+02 3.860e+02 8.125e+02, threshold=5.990e+02, percent-clipped=3.0 +2023-02-07 04:04:46,122 INFO [train.py:901] (2/4) Epoch 22, batch 4850, loss[loss=0.227, simple_loss=0.3111, pruned_loss=0.07148, over 8335.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.0613, over 1614633.75 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:55,435 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 04:05:02,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:20,310 INFO [train.py:901] (2/4) Epoch 22, batch 4900, loss[loss=0.2146, simple_loss=0.3027, pruned_loss=0.0633, over 8133.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.288, pruned_loss=0.06187, over 1615630.62 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:05:23,131 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:29,310 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:56,489 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.582e+02 3.121e+02 3.821e+02 7.682e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-07 04:05:56,510 INFO [train.py:901] (2/4) Epoch 22, batch 4950, loss[loss=0.1967, simple_loss=0.2815, pruned_loss=0.05596, over 8511.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2873, pruned_loss=0.0614, over 1613342.03 frames. ], batch size: 28, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:17,800 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:21,278 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3650, 2.0185, 1.6710, 2.0627, 1.7406, 1.2574, 1.7365, 1.8431], + device='cuda:2'), covar=tensor([0.1144, 0.0441, 0.1279, 0.0424, 0.0768, 0.1683, 0.0900, 0.0611], + device='cuda:2'), in_proj_covar=tensor([0.0350, 0.0231, 0.0331, 0.0305, 0.0297, 0.0336, 0.0339, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:06:27,422 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0723, 2.1023, 1.8469, 2.5290, 1.4454, 1.7466, 2.0214, 2.1497], + device='cuda:2'), covar=tensor([0.0625, 0.0732, 0.0807, 0.0444, 0.0979, 0.1047, 0.0715, 0.0668], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0216, 0.0208, 0.0248, 0.0250, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:06:30,654 INFO [train.py:901] (2/4) Epoch 22, batch 5000, loss[loss=0.2502, simple_loss=0.3252, pruned_loss=0.08756, over 7130.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06195, over 1614575.28 frames. ], batch size: 71, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:35,017 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,250 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,400 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:44,694 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:51,025 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:54,585 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174773.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:55,981 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:07:07,682 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.477e+02 2.928e+02 3.612e+02 7.754e+02, threshold=5.856e+02, percent-clipped=3.0 +2023-02-07 04:07:07,702 INFO [train.py:901] (2/4) Epoch 22, batch 5050, loss[loss=0.2168, simple_loss=0.307, pruned_loss=0.06334, over 8514.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2871, pruned_loss=0.06121, over 1616354.44 frames. ], batch size: 28, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:16,958 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1869, 1.3053, 1.5708, 1.2247, 0.7719, 1.3379, 1.2467, 1.0475], + device='cuda:2'), covar=tensor([0.0619, 0.1293, 0.1633, 0.1451, 0.0582, 0.1509, 0.0697, 0.0711], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0100, 0.0164, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 04:07:27,836 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4252, 2.3696, 3.2085, 2.5781, 3.0171, 2.5281, 2.2470, 1.7638], + device='cuda:2'), covar=tensor([0.5706, 0.4896, 0.1995, 0.3668, 0.2551, 0.2982, 0.1824, 0.5562], + device='cuda:2'), in_proj_covar=tensor([0.0935, 0.0975, 0.0800, 0.0941, 0.0989, 0.0888, 0.0745, 0.0821], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 04:07:34,469 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 04:07:42,673 INFO [train.py:901] (2/4) Epoch 22, batch 5100, loss[loss=0.1943, simple_loss=0.2775, pruned_loss=0.05556, over 8293.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06151, over 1616203.53 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:58,167 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:03,634 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:17,810 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.498e+02 3.130e+02 3.757e+02 7.363e+02, threshold=6.259e+02, percent-clipped=3.0 +2023-02-07 04:08:17,830 INFO [train.py:901] (2/4) Epoch 22, batch 5150, loss[loss=0.19, simple_loss=0.2638, pruned_loss=0.05811, over 7660.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2875, pruned_loss=0.06248, over 1610316.82 frames. ], batch size: 19, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:21,138 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:52,065 INFO [train.py:901] (2/4) Epoch 22, batch 5200, loss[loss=0.2162, simple_loss=0.2929, pruned_loss=0.06972, over 8507.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2884, pruned_loss=0.06257, over 1610843.38 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:52,925 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1250, 1.3867, 4.2866, 1.5795, 3.7814, 3.5479, 3.9165, 3.7509], + device='cuda:2'), covar=tensor([0.0683, 0.4953, 0.0550, 0.4426, 0.1284, 0.1044, 0.0650, 0.0785], + device='cuda:2'), in_proj_covar=tensor([0.0628, 0.0646, 0.0695, 0.0627, 0.0708, 0.0603, 0.0606, 0.0677], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:08:58,259 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2067, 4.1683, 3.7670, 1.8584, 3.6530, 3.7678, 3.8598, 3.5793], + device='cuda:2'), covar=tensor([0.0783, 0.0552, 0.1104, 0.4847, 0.0957, 0.1036, 0.1158, 0.0879], + device='cuda:2'), in_proj_covar=tensor([0.0522, 0.0433, 0.0432, 0.0536, 0.0424, 0.0444, 0.0424, 0.0387], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:09:26,992 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.432e+02 2.854e+02 3.739e+02 7.258e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-07 04:09:27,012 INFO [train.py:901] (2/4) Epoch 22, batch 5250, loss[loss=0.2411, simple_loss=0.3145, pruned_loss=0.08384, over 8503.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.06288, over 1605734.27 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:09:31,835 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 04:09:44,061 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:09:49,559 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,590 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,992 INFO [train.py:901] (2/4) Epoch 22, batch 5300, loss[loss=0.24, simple_loss=0.3086, pruned_loss=0.08566, over 8314.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2896, pruned_loss=0.06355, over 1611778.97 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:07,100 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:19,114 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:35,754 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.519e+02 3.149e+02 3.909e+02 1.075e+03, threshold=6.297e+02, percent-clipped=6.0 +2023-02-07 04:10:35,774 INFO [train.py:901] (2/4) Epoch 22, batch 5350, loss[loss=0.1874, simple_loss=0.2679, pruned_loss=0.05343, over 8025.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06286, over 1615406.97 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:57,896 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:11,826 INFO [train.py:901] (2/4) Epoch 22, batch 5400, loss[loss=0.1754, simple_loss=0.2532, pruned_loss=0.0488, over 7201.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2893, pruned_loss=0.06291, over 1617519.25 frames. ], batch size: 16, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:11:13,265 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2653, 3.1636, 2.9301, 1.5968, 2.8698, 2.8823, 2.8748, 2.8385], + device='cuda:2'), covar=tensor([0.1168, 0.0848, 0.1437, 0.4503, 0.1119, 0.1260, 0.1663, 0.1024], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0432, 0.0430, 0.0533, 0.0422, 0.0442, 0.0422, 0.0384], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:11:14,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:22,726 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:39,705 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:46,268 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.426e+02 2.833e+02 4.034e+02 1.686e+03, threshold=5.665e+02, percent-clipped=5.0 +2023-02-07 04:11:46,288 INFO [train.py:901] (2/4) Epoch 22, batch 5450, loss[loss=0.149, simple_loss=0.2283, pruned_loss=0.03484, over 7694.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2876, pruned_loss=0.06199, over 1613042.74 frames. ], batch size: 18, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:09,775 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:12:20,248 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 04:12:22,329 INFO [train.py:901] (2/4) Epoch 22, batch 5500, loss[loss=0.2011, simple_loss=0.2867, pruned_loss=0.05774, over 7239.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2854, pruned_loss=0.06077, over 1613037.27 frames. ], batch size: 16, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:40,057 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 04:12:56,619 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.376e+02 2.848e+02 3.508e+02 8.289e+02, threshold=5.697e+02, percent-clipped=6.0 +2023-02-07 04:12:56,639 INFO [train.py:901] (2/4) Epoch 22, batch 5550, loss[loss=0.2108, simple_loss=0.3004, pruned_loss=0.06062, over 8505.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06092, over 1615181.43 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:31,809 INFO [train.py:901] (2/4) Epoch 22, batch 5600, loss[loss=0.2097, simple_loss=0.2985, pruned_loss=0.06047, over 8559.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2856, pruned_loss=0.0605, over 1612541.88 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:46,081 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8241, 2.3045, 3.9167, 1.6599, 2.7860, 2.3698, 1.8079, 2.7687], + device='cuda:2'), covar=tensor([0.1798, 0.2480, 0.1018, 0.4376, 0.1919, 0.3049, 0.2387, 0.2601], + device='cuda:2'), in_proj_covar=tensor([0.0525, 0.0604, 0.0554, 0.0641, 0.0645, 0.0589, 0.0535, 0.0629], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:13:49,365 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9755, 1.8769, 2.5511, 1.5059, 1.5212, 2.5318, 0.5354, 1.5890], + device='cuda:2'), covar=tensor([0.1662, 0.1260, 0.0333, 0.1356, 0.2409, 0.0356, 0.2004, 0.1261], + device='cuda:2'), in_proj_covar=tensor([0.0191, 0.0197, 0.0129, 0.0220, 0.0267, 0.0137, 0.0168, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 04:13:50,743 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.30 vs. limit=5.0 +2023-02-07 04:14:03,188 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5612, 2.9099, 2.3279, 3.9589, 1.7090, 2.1611, 2.4850, 2.9671], + device='cuda:2'), covar=tensor([0.0656, 0.0775, 0.0826, 0.0217, 0.1108, 0.1134, 0.0902, 0.0783], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0216, 0.0208, 0.0247, 0.0252, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:14:03,829 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:06,408 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.532e+02 3.141e+02 4.135e+02 1.836e+03, threshold=6.283e+02, percent-clipped=10.0 +2023-02-07 04:14:06,428 INFO [train.py:901] (2/4) Epoch 22, batch 5650, loss[loss=0.1708, simple_loss=0.2523, pruned_loss=0.04467, over 8092.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2852, pruned_loss=0.06, over 1612197.36 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:09,955 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7511, 2.1491, 3.2733, 1.5229, 2.4357, 2.0008, 1.8586, 2.3541], + device='cuda:2'), covar=tensor([0.1880, 0.2272, 0.0800, 0.4442, 0.1871, 0.3263, 0.2220, 0.2323], + device='cuda:2'), in_proj_covar=tensor([0.0525, 0.0604, 0.0553, 0.0640, 0.0645, 0.0589, 0.0535, 0.0628], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:14:20,095 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:22,660 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 04:14:37,557 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:40,743 INFO [train.py:901] (2/4) Epoch 22, batch 5700, loss[loss=0.1839, simple_loss=0.2587, pruned_loss=0.05456, over 7815.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2869, pruned_loss=0.06091, over 1611391.79 frames. ], batch size: 19, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:56,174 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:15,527 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.452e+02 2.878e+02 3.661e+02 5.836e+02, threshold=5.755e+02, percent-clipped=0.0 +2023-02-07 04:15:15,548 INFO [train.py:901] (2/4) Epoch 22, batch 5750, loss[loss=0.2087, simple_loss=0.2966, pruned_loss=0.06038, over 8083.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06099, over 1613815.02 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:15:20,430 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:22,437 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:27,206 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 04:15:50,255 INFO [train.py:901] (2/4) Epoch 22, batch 5800, loss[loss=0.1957, simple_loss=0.2787, pruned_loss=0.0564, over 8465.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2867, pruned_loss=0.06113, over 1609898.91 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:09,022 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:16:25,902 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 2.280e+02 2.739e+02 3.457e+02 6.413e+02, threshold=5.479e+02, percent-clipped=3.0 +2023-02-07 04:16:25,922 INFO [train.py:901] (2/4) Epoch 22, batch 5850, loss[loss=0.2184, simple_loss=0.2946, pruned_loss=0.0711, over 7632.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06072, over 1610913.13 frames. ], batch size: 19, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:42,242 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:00,355 INFO [train.py:901] (2/4) Epoch 22, batch 5900, loss[loss=0.2153, simple_loss=0.2922, pruned_loss=0.06923, over 7973.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06075, over 1613926.18 frames. ], batch size: 21, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:17:29,299 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:35,264 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.520e+02 3.043e+02 3.699e+02 9.671e+02, threshold=6.086e+02, percent-clipped=7.0 +2023-02-07 04:17:35,284 INFO [train.py:901] (2/4) Epoch 22, batch 5950, loss[loss=0.1639, simple_loss=0.2572, pruned_loss=0.03526, over 7916.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.287, pruned_loss=0.06121, over 1609969.80 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:00,065 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175728.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:02,746 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:09,579 INFO [train.py:901] (2/4) Epoch 22, batch 6000, loss[loss=0.2631, simple_loss=0.3273, pruned_loss=0.09943, over 6491.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2868, pruned_loss=0.06139, over 1603829.89 frames. ], batch size: 71, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:09,579 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 04:18:21,635 INFO [train.py:935] (2/4) Epoch 22, validation: loss=0.1729, simple_loss=0.2732, pruned_loss=0.03632, over 944034.00 frames. +2023-02-07 04:18:21,636 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 04:18:24,899 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 04:18:31,486 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:34,523 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.94 vs. limit=5.0 +2023-02-07 04:18:41,928 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-07 04:18:56,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.478e+02 2.934e+02 3.623e+02 7.032e+02, threshold=5.869e+02, percent-clipped=2.0 +2023-02-07 04:18:56,237 INFO [train.py:901] (2/4) Epoch 22, batch 6050, loss[loss=0.2285, simple_loss=0.3115, pruned_loss=0.07273, over 8496.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06111, over 1607076.52 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:06,555 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1857, 2.3657, 1.8567, 2.8388, 1.4857, 1.7611, 2.1106, 2.4146], + device='cuda:2'), covar=tensor([0.0671, 0.0698, 0.0922, 0.0387, 0.1042, 0.1212, 0.0780, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0197, 0.0246, 0.0216, 0.0206, 0.0246, 0.0251, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:19:31,868 INFO [train.py:901] (2/4) Epoch 22, batch 6100, loss[loss=0.2111, simple_loss=0.2901, pruned_loss=0.06602, over 8195.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06128, over 1612281.92 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:32,679 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:35,610 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:51,976 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:52,698 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:56,544 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 04:20:07,208 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.365e+02 2.974e+02 3.880e+02 6.577e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 04:20:07,228 INFO [train.py:901] (2/4) Epoch 22, batch 6150, loss[loss=0.2246, simple_loss=0.3139, pruned_loss=0.06763, over 8727.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2862, pruned_loss=0.06058, over 1612861.82 frames. ], batch size: 34, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:10,663 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:11,991 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:20,143 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9889, 1.6984, 2.0319, 1.8260, 1.9801, 2.0271, 1.8834, 0.7973], + device='cuda:2'), covar=tensor([0.5632, 0.4784, 0.1943, 0.3467, 0.2457, 0.3082, 0.1853, 0.5252], + device='cuda:2'), in_proj_covar=tensor([0.0943, 0.0981, 0.0806, 0.0946, 0.0997, 0.0896, 0.0748, 0.0826], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 04:20:40,360 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:41,476 INFO [train.py:901] (2/4) Epoch 22, batch 6200, loss[loss=0.2388, simple_loss=0.3247, pruned_loss=0.07641, over 8214.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2858, pruned_loss=0.06038, over 1613420.37 frames. ], batch size: 23, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:53,613 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:56,911 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:58,214 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175965.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:21:06,754 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0040, 1.5709, 1.4419, 1.5511, 1.3083, 1.2639, 1.2976, 1.2527], + device='cuda:2'), covar=tensor([0.1157, 0.0482, 0.1241, 0.0588, 0.0789, 0.1579, 0.0939, 0.0843], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0233, 0.0335, 0.0310, 0.0299, 0.0340, 0.0345, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:21:15,640 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.329e+02 2.882e+02 3.634e+02 1.217e+03, threshold=5.765e+02, percent-clipped=6.0 +2023-02-07 04:21:15,660 INFO [train.py:901] (2/4) Epoch 22, batch 6250, loss[loss=0.183, simple_loss=0.2504, pruned_loss=0.0578, over 6842.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2868, pruned_loss=0.06135, over 1613020.01 frames. ], batch size: 15, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:21:51,339 INFO [train.py:901] (2/4) Epoch 22, batch 6300, loss[loss=0.1896, simple_loss=0.2663, pruned_loss=0.05641, over 8133.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2868, pruned_loss=0.06208, over 1609781.13 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:12,729 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176072.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:26,686 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.297e+02 2.795e+02 3.577e+02 6.374e+02, threshold=5.590e+02, percent-clipped=1.0 +2023-02-07 04:22:26,706 INFO [train.py:901] (2/4) Epoch 22, batch 6350, loss[loss=0.1741, simple_loss=0.2654, pruned_loss=0.0414, over 7938.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2872, pruned_loss=0.06231, over 1610769.91 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:34,428 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,216 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,825 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:01,397 INFO [train.py:901] (2/4) Epoch 22, batch 6400, loss[loss=0.2073, simple_loss=0.2826, pruned_loss=0.06601, over 7933.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2869, pruned_loss=0.06235, over 1610845.96 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:08,437 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:33,466 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:36,645 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.375e+02 2.869e+02 3.334e+02 7.002e+02, threshold=5.738e+02, percent-clipped=1.0 +2023-02-07 04:23:36,665 INFO [train.py:901] (2/4) Epoch 22, batch 6450, loss[loss=0.157, simple_loss=0.2337, pruned_loss=0.04012, over 7408.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2858, pruned_loss=0.06163, over 1612201.37 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:52,614 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:54,751 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-07 04:24:05,443 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-07 04:24:09,914 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:11,679 INFO [train.py:901] (2/4) Epoch 22, batch 6500, loss[loss=0.2302, simple_loss=0.3094, pruned_loss=0.0755, over 8332.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2866, pruned_loss=0.06126, over 1615306.47 frames. ], batch size: 26, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:12,448 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:23,897 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:27,368 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2391, 3.6149, 2.3204, 2.8745, 2.6680, 2.0007, 2.7886, 3.0380], + device='cuda:2'), covar=tensor([0.1637, 0.0342, 0.1213, 0.0752, 0.0827, 0.1539, 0.1050, 0.1157], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0234, 0.0335, 0.0311, 0.0300, 0.0342, 0.0347, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:24:32,079 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6300, 2.5389, 1.7838, 2.2364, 2.1110, 1.5358, 1.9901, 2.1172], + device='cuda:2'), covar=tensor([0.1453, 0.0389, 0.1282, 0.0680, 0.0732, 0.1588, 0.1051, 0.1012], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0233, 0.0334, 0.0311, 0.0300, 0.0341, 0.0347, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:24:34,063 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:45,633 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.325e+02 2.725e+02 3.404e+02 5.159e+02, threshold=5.450e+02, percent-clipped=0.0 +2023-02-07 04:24:45,653 INFO [train.py:901] (2/4) Epoch 22, batch 6550, loss[loss=0.1841, simple_loss=0.2564, pruned_loss=0.05595, over 7543.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2855, pruned_loss=0.06085, over 1611348.00 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:55,361 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5792, 1.9661, 3.2891, 1.4628, 2.5122, 2.0744, 1.6334, 2.5062], + device='cuda:2'), covar=tensor([0.1816, 0.2503, 0.0789, 0.4345, 0.1706, 0.2886, 0.2321, 0.2033], + device='cuda:2'), in_proj_covar=tensor([0.0528, 0.0608, 0.0558, 0.0646, 0.0650, 0.0596, 0.0540, 0.0635], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:24:57,258 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176307.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:04,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3025, 2.0446, 2.6358, 2.2648, 2.5983, 2.2313, 2.1528, 1.8972], + device='cuda:2'), covar=tensor([0.3778, 0.4139, 0.1614, 0.2894, 0.1813, 0.2669, 0.1576, 0.3748], + device='cuda:2'), in_proj_covar=tensor([0.0941, 0.0979, 0.0806, 0.0945, 0.0997, 0.0896, 0.0749, 0.0828], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 04:25:09,377 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 04:25:21,086 INFO [train.py:901] (2/4) Epoch 22, batch 6600, loss[loss=0.1985, simple_loss=0.2828, pruned_loss=0.05711, over 8244.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2869, pruned_loss=0.06168, over 1612997.30 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:29,277 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 04:25:32,753 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:55,379 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.510e+02 3.110e+02 4.060e+02 7.968e+02, threshold=6.221e+02, percent-clipped=4.0 +2023-02-07 04:25:55,400 INFO [train.py:901] (2/4) Epoch 22, batch 6650, loss[loss=0.2389, simple_loss=0.3164, pruned_loss=0.08069, over 8566.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2885, pruned_loss=0.06321, over 1608602.04 frames. ], batch size: 31, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:56,917 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3155, 2.8264, 2.4491, 4.0561, 1.8815, 1.9168, 2.6520, 3.0101], + device='cuda:2'), covar=tensor([0.0944, 0.0893, 0.1024, 0.0264, 0.1090, 0.1370, 0.0924, 0.0794], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0243, 0.0214, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:26:17,168 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:31,229 INFO [train.py:901] (2/4) Epoch 22, batch 6700, loss[loss=0.2237, simple_loss=0.3024, pruned_loss=0.07253, over 8646.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2875, pruned_loss=0.06254, over 1607202.47 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:32,108 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:49,608 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:00,366 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:05,583 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.672e+02 3.290e+02 4.002e+02 8.131e+02, threshold=6.579e+02, percent-clipped=6.0 +2023-02-07 04:27:05,603 INFO [train.py:901] (2/4) Epoch 22, batch 6750, loss[loss=0.2215, simple_loss=0.3027, pruned_loss=0.07021, over 8549.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2859, pruned_loss=0.06214, over 1607106.11 frames. ], batch size: 49, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:41,476 INFO [train.py:901] (2/4) Epoch 22, batch 6800, loss[loss=0.1766, simple_loss=0.2508, pruned_loss=0.0512, over 7720.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2864, pruned_loss=0.06225, over 1609980.82 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:45,668 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 04:28:10,347 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 04:28:16,786 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.317e+02 3.026e+02 3.783e+02 8.757e+02, threshold=6.052e+02, percent-clipped=1.0 +2023-02-07 04:28:16,806 INFO [train.py:901] (2/4) Epoch 22, batch 6850, loss[loss=0.2103, simple_loss=0.2955, pruned_loss=0.06256, over 8478.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2869, pruned_loss=0.06199, over 1610278.88 frames. ], batch size: 49, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:24,731 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:31,630 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:34,719 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 04:28:34,769 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:38,965 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8943, 2.2940, 4.2568, 1.6141, 3.1299, 2.4623, 1.8987, 3.0397], + device='cuda:2'), covar=tensor([0.1806, 0.2611, 0.0753, 0.4427, 0.1725, 0.2930, 0.2226, 0.2255], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0643, 0.0648, 0.0594, 0.0537, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:28:48,427 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:50,268 INFO [train.py:901] (2/4) Epoch 22, batch 6900, loss[loss=0.2151, simple_loss=0.2981, pruned_loss=0.06605, over 8252.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2879, pruned_loss=0.06261, over 1609754.25 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:51,036 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:17,272 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:20,580 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:26,746 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 2.439e+02 3.078e+02 3.806e+02 5.995e+02, threshold=6.157e+02, percent-clipped=0.0 +2023-02-07 04:29:26,766 INFO [train.py:901] (2/4) Epoch 22, batch 6950, loss[loss=0.2114, simple_loss=0.2925, pruned_loss=0.06513, over 8454.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2877, pruned_loss=0.06202, over 1612434.42 frames. ], batch size: 27, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:29:35,481 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176703.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:44,284 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 04:29:46,554 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176719.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:48,146 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-07 04:29:56,844 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:30:02,046 INFO [train.py:901] (2/4) Epoch 22, batch 7000, loss[loss=0.1977, simple_loss=0.2866, pruned_loss=0.0544, over 7930.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.289, pruned_loss=0.06298, over 1611590.18 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:30:03,961 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-07 04:30:37,809 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.923e+02 3.703e+02 8.900e+02, threshold=5.847e+02, percent-clipped=5.0 +2023-02-07 04:30:37,829 INFO [train.py:901] (2/4) Epoch 22, batch 7050, loss[loss=0.194, simple_loss=0.2812, pruned_loss=0.05339, over 8486.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2892, pruned_loss=0.06274, over 1613763.23 frames. ], batch size: 28, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:03,290 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:11,270 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0807, 3.5499, 2.3039, 2.8101, 2.7646, 2.0070, 2.6772, 2.9951], + device='cuda:2'), covar=tensor([0.1624, 0.0338, 0.1232, 0.0757, 0.0771, 0.1526, 0.1092, 0.1074], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0233, 0.0337, 0.0312, 0.0300, 0.0344, 0.0347, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:31:12,370 INFO [train.py:901] (2/4) Epoch 22, batch 7100, loss[loss=0.2189, simple_loss=0.3079, pruned_loss=0.06492, over 8608.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2896, pruned_loss=0.06301, over 1615322.33 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:31,794 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:46,089 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.316e+02 2.836e+02 3.633e+02 7.093e+02, threshold=5.673e+02, percent-clipped=3.0 +2023-02-07 04:31:46,109 INFO [train.py:901] (2/4) Epoch 22, batch 7150, loss[loss=0.2673, simple_loss=0.337, pruned_loss=0.09883, over 7654.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.29, pruned_loss=0.06325, over 1614867.05 frames. ], batch size: 19, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:51,640 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 04:32:22,266 INFO [train.py:901] (2/4) Epoch 22, batch 7200, loss[loss=0.1943, simple_loss=0.2785, pruned_loss=0.05501, over 8285.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2886, pruned_loss=0.06276, over 1617339.06 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:23,127 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:44,916 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:52,859 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:54,980 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:56,137 INFO [train.py:901] (2/4) Epoch 22, batch 7250, loss[loss=0.1744, simple_loss=0.2448, pruned_loss=0.05201, over 7445.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2886, pruned_loss=0.06247, over 1620654.07 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:56,793 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.385e+02 2.852e+02 3.441e+02 7.839e+02, threshold=5.703e+02, percent-clipped=2.0 +2023-02-07 04:33:02,406 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:10,525 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6722, 4.6192, 4.1939, 2.0658, 4.1466, 4.2658, 4.1026, 4.0799], + device='cuda:2'), covar=tensor([0.0638, 0.0498, 0.0932, 0.4748, 0.0850, 0.0850, 0.1263, 0.0785], + device='cuda:2'), in_proj_covar=tensor([0.0528, 0.0438, 0.0432, 0.0542, 0.0427, 0.0448, 0.0428, 0.0388], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:33:14,105 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177015.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:15,421 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3004, 1.5175, 4.5126, 2.3136, 2.4151, 5.0563, 5.1245, 4.4045], + device='cuda:2'), covar=tensor([0.1131, 0.1850, 0.0273, 0.1643, 0.1225, 0.0182, 0.0507, 0.0538], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0319, 0.0284, 0.0313, 0.0309, 0.0265, 0.0418, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:33:21,898 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177027.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:31,972 INFO [train.py:901] (2/4) Epoch 22, batch 7300, loss[loss=0.1635, simple_loss=0.2352, pruned_loss=0.04591, over 7434.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06201, over 1614722.10 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:06,482 INFO [train.py:901] (2/4) Epoch 22, batch 7350, loss[loss=0.202, simple_loss=0.2907, pruned_loss=0.0567, over 8350.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.0617, over 1611633.53 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:07,154 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.532e+02 3.310e+02 4.342e+02 9.656e+02, threshold=6.621e+02, percent-clipped=7.0 +2023-02-07 04:34:13,471 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:26,102 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 04:34:31,738 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:33,086 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:42,494 INFO [train.py:901] (2/4) Epoch 22, batch 7400, loss[loss=0.1817, simple_loss=0.2716, pruned_loss=0.04593, over 7814.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2853, pruned_loss=0.06081, over 1609399.89 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:42,675 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:42,687 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4090, 2.8185, 2.3209, 4.0480, 1.7237, 2.0914, 2.4673, 2.8758], + device='cuda:2'), covar=tensor([0.0763, 0.0801, 0.0867, 0.0212, 0.1168, 0.1299, 0.1046, 0.0776], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0243, 0.0212, 0.0206, 0.0245, 0.0248, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:34:47,987 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 04:35:09,031 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6990, 5.7123, 5.1220, 2.4716, 5.0860, 5.4656, 5.3356, 5.4150], + device='cuda:2'), covar=tensor([0.0612, 0.0481, 0.0924, 0.4701, 0.0728, 0.0777, 0.1067, 0.0530], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0440, 0.0433, 0.0542, 0.0428, 0.0449, 0.0429, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:35:16,507 INFO [train.py:901] (2/4) Epoch 22, batch 7450, loss[loss=0.1799, simple_loss=0.257, pruned_loss=0.05138, over 7930.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06099, over 1614993.78 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:17,190 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.327e+02 2.972e+02 3.761e+02 7.589e+02, threshold=5.944e+02, percent-clipped=3.0 +2023-02-07 04:35:21,630 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:27,656 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 04:35:32,257 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:38,466 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:51,507 INFO [train.py:901] (2/4) Epoch 22, batch 7500, loss[loss=0.1817, simple_loss=0.2538, pruned_loss=0.05484, over 7422.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06106, over 1612999.90 frames. ], batch size: 17, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:58,953 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8529, 1.6609, 1.8404, 1.7347, 0.9567, 1.5908, 2.1364, 2.2960], + device='cuda:2'), covar=tensor([0.0426, 0.1210, 0.1643, 0.1352, 0.0629, 0.1497, 0.0628, 0.0557], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0162, 0.0111, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 04:36:23,739 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-07 04:36:25,348 INFO [train.py:901] (2/4) Epoch 22, batch 7550, loss[loss=0.1594, simple_loss=0.2443, pruned_loss=0.03723, over 7458.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2861, pruned_loss=0.06082, over 1610542.34 frames. ], batch size: 17, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:26,041 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.508e+02 3.019e+02 3.781e+02 7.904e+02, threshold=6.039e+02, percent-clipped=4.0 +2023-02-07 04:36:43,691 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6845, 1.4264, 3.2010, 1.4355, 2.3893, 3.4617, 3.5530, 2.9711], + device='cuda:2'), covar=tensor([0.1271, 0.1760, 0.0286, 0.2010, 0.0854, 0.0226, 0.0475, 0.0518], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0321, 0.0285, 0.0314, 0.0309, 0.0266, 0.0420, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:36:51,643 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:36:59,619 INFO [train.py:901] (2/4) Epoch 22, batch 7600, loss[loss=0.1808, simple_loss=0.2733, pruned_loss=0.04417, over 7926.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2853, pruned_loss=0.0602, over 1607300.32 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:11,460 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:29,851 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:35,783 INFO [train.py:901] (2/4) Epoch 22, batch 7650, loss[loss=0.2203, simple_loss=0.3042, pruned_loss=0.06819, over 8467.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2854, pruned_loss=0.06035, over 1606045.14 frames. ], batch size: 49, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:36,440 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.559e+02 3.074e+02 4.315e+02 1.263e+03, threshold=6.148e+02, percent-clipped=10.0 +2023-02-07 04:37:39,983 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:57,393 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177423.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:00,846 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0105, 2.6569, 2.1622, 2.3227, 2.3643, 2.0481, 2.2219, 2.4232], + device='cuda:2'), covar=tensor([0.1123, 0.0302, 0.0865, 0.0537, 0.0527, 0.1155, 0.0730, 0.0740], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0232, 0.0335, 0.0310, 0.0299, 0.0341, 0.0345, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:38:09,936 INFO [train.py:901] (2/4) Epoch 22, batch 7700, loss[loss=0.2241, simple_loss=0.3016, pruned_loss=0.07334, over 7153.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2848, pruned_loss=0.06038, over 1603215.37 frames. ], batch size: 72, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:30,444 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:31,726 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177473.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:38,598 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 04:38:45,982 INFO [train.py:901] (2/4) Epoch 22, batch 7750, loss[loss=0.2335, simple_loss=0.306, pruned_loss=0.08049, over 8101.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.06107, over 1606144.83 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:46,657 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.486e+02 3.125e+02 4.090e+02 1.041e+03, threshold=6.251e+02, percent-clipped=8.0 +2023-02-07 04:39:20,412 INFO [train.py:901] (2/4) Epoch 22, batch 7800, loss[loss=0.1686, simple_loss=0.2468, pruned_loss=0.04524, over 7536.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2861, pruned_loss=0.06133, over 1605193.16 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:39,794 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177571.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,858 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,890 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:50,723 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-07 04:39:51,146 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:53,646 INFO [train.py:901] (2/4) Epoch 22, batch 7850, loss[loss=0.1963, simple_loss=0.2879, pruned_loss=0.05231, over 8355.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2871, pruned_loss=0.06164, over 1605791.50 frames. ], batch size: 24, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:54,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.387e+02 2.753e+02 3.373e+02 6.542e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 04:40:06,551 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:40:26,686 INFO [train.py:901] (2/4) Epoch 22, batch 7900, loss[loss=0.2134, simple_loss=0.3005, pruned_loss=0.06316, over 8197.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.06167, over 1605032.84 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:40:53,547 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:41:00,025 INFO [train.py:901] (2/4) Epoch 22, batch 7950, loss[loss=0.1731, simple_loss=0.2522, pruned_loss=0.04704, over 7765.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.0611, over 1608906.88 frames. ], batch size: 19, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:00,683 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.424e+02 2.966e+02 3.766e+02 9.319e+02, threshold=5.931e+02, percent-clipped=7.0 +2023-02-07 04:41:00,853 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2378, 1.4968, 4.3660, 1.9953, 2.4422, 4.9613, 4.9993, 4.3531], + device='cuda:2'), covar=tensor([0.1186, 0.1918, 0.0264, 0.1953, 0.1251, 0.0158, 0.0384, 0.0541], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0323, 0.0285, 0.0315, 0.0311, 0.0267, 0.0422, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:41:18,843 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.52 vs. limit=5.0 +2023-02-07 04:41:33,729 INFO [train.py:901] (2/4) Epoch 22, batch 8000, loss[loss=0.2034, simple_loss=0.2748, pruned_loss=0.066, over 7306.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06119, over 1606458.14 frames. ], batch size: 16, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:51,625 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 04:42:06,692 INFO [train.py:901] (2/4) Epoch 22, batch 8050, loss[loss=0.1941, simple_loss=0.2745, pruned_loss=0.05685, over 7528.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2853, pruned_loss=0.06142, over 1586136.95 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:42:07,272 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.308e+02 2.923e+02 3.618e+02 1.070e+03, threshold=5.846e+02, percent-clipped=4.0 +2023-02-07 04:42:10,809 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:22,386 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:39,547 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 04:42:44,817 INFO [train.py:901] (2/4) Epoch 23, batch 0, loss[loss=0.2402, simple_loss=0.3177, pruned_loss=0.08136, over 8455.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3177, pruned_loss=0.08136, over 8455.00 frames. ], batch size: 27, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:42:44,817 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 04:42:56,157 INFO [train.py:935] (2/4) Epoch 23, validation: loss=0.1743, simple_loss=0.274, pruned_loss=0.0373, over 944034.00 frames. +2023-02-07 04:42:56,158 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 04:43:08,349 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:10,538 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177844.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:12,384 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 04:43:26,738 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:28,089 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:32,010 INFO [train.py:901] (2/4) Epoch 23, batch 50, loss[loss=0.1893, simple_loss=0.2789, pruned_loss=0.04987, over 8511.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2928, pruned_loss=0.06369, over 367436.98 frames. ], batch size: 28, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:43:42,582 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9498, 2.1846, 3.1364, 1.8035, 2.7672, 2.2058, 2.0867, 2.5818], + device='cuda:2'), covar=tensor([0.1655, 0.2320, 0.0706, 0.3984, 0.1470, 0.2681, 0.2016, 0.1990], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0648, 0.0648, 0.0594, 0.0538, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:43:45,276 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.650e+02 3.149e+02 3.939e+02 1.519e+03, threshold=6.298e+02, percent-clipped=14.0 +2023-02-07 04:43:46,678 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 04:44:01,100 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177915.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:44:02,081 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:44:07,951 INFO [train.py:901] (2/4) Epoch 23, batch 100, loss[loss=0.2116, simple_loss=0.3022, pruned_loss=0.06054, over 8293.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2896, pruned_loss=0.0622, over 643482.62 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:09,372 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 04:44:15,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2270, 2.3698, 1.9144, 2.8975, 1.4186, 1.7578, 2.0592, 2.2561], + device='cuda:2'), covar=tensor([0.0608, 0.0634, 0.0845, 0.0355, 0.1072, 0.1147, 0.0881, 0.0771], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:44:42,228 INFO [train.py:901] (2/4) Epoch 23, batch 150, loss[loss=0.1896, simple_loss=0.2665, pruned_loss=0.05639, over 7969.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2918, pruned_loss=0.0638, over 861768.31 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:49,533 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1908, 1.5744, 4.0936, 1.8344, 2.4348, 4.5903, 4.6460, 3.9712], + device='cuda:2'), covar=tensor([0.1203, 0.1999, 0.0329, 0.2182, 0.1456, 0.0185, 0.0426, 0.0553], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0323, 0.0287, 0.0318, 0.0313, 0.0269, 0.0424, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:44:54,927 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.352e+02 3.015e+02 3.767e+02 5.945e+02, threshold=6.031e+02, percent-clipped=0.0 +2023-02-07 04:44:55,344 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 04:45:18,302 INFO [train.py:901] (2/4) Epoch 23, batch 200, loss[loss=0.2349, simple_loss=0.325, pruned_loss=0.07244, over 8195.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2941, pruned_loss=0.06474, over 1028916.69 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:45:19,115 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:21,865 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:53,034 INFO [train.py:901] (2/4) Epoch 23, batch 250, loss[loss=0.1645, simple_loss=0.2529, pruned_loss=0.03804, over 8131.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2912, pruned_loss=0.06284, over 1160977.11 frames. ], batch size: 22, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:04,761 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 04:46:06,105 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.380e+02 2.804e+02 3.484e+02 6.736e+02, threshold=5.609e+02, percent-clipped=2.0 +2023-02-07 04:46:12,814 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 04:46:28,460 INFO [train.py:901] (2/4) Epoch 23, batch 300, loss[loss=0.2085, simple_loss=0.3019, pruned_loss=0.05755, over 8316.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2907, pruned_loss=0.06228, over 1267758.17 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:40,067 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:40,621 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:45,437 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0686, 1.5839, 1.4081, 1.4923, 1.3167, 1.2700, 1.2774, 1.3023], + device='cuda:2'), covar=tensor([0.1217, 0.0485, 0.1345, 0.0584, 0.0773, 0.1558, 0.0927, 0.0857], + device='cuda:2'), in_proj_covar=tensor([0.0352, 0.0231, 0.0332, 0.0308, 0.0297, 0.0337, 0.0342, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 04:46:52,877 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:53,827 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:46:54,585 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 04:46:59,077 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3107, 1.2206, 2.3559, 1.3109, 2.1568, 2.5425, 2.6908, 2.1596], + device='cuda:2'), covar=tensor([0.1284, 0.1480, 0.0430, 0.2010, 0.0716, 0.0380, 0.0671, 0.0677], + device='cuda:2'), in_proj_covar=tensor([0.0299, 0.0322, 0.0288, 0.0316, 0.0312, 0.0267, 0.0423, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:47:03,746 INFO [train.py:901] (2/4) Epoch 23, batch 350, loss[loss=0.2085, simple_loss=0.2977, pruned_loss=0.05965, over 8500.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2896, pruned_loss=0.06196, over 1346784.96 frames. ], batch size: 29, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:16,037 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.905e+02 3.451e+02 8.072e+02, threshold=5.809e+02, percent-clipped=5.0 +2023-02-07 04:47:23,326 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6783, 1.5208, 2.8067, 1.3575, 2.2235, 3.0416, 3.1703, 2.5723], + device='cuda:2'), covar=tensor([0.1229, 0.1603, 0.0370, 0.2122, 0.0895, 0.0292, 0.0557, 0.0600], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0320, 0.0286, 0.0315, 0.0311, 0.0266, 0.0421, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:47:38,678 INFO [train.py:901] (2/4) Epoch 23, batch 400, loss[loss=0.2, simple_loss=0.2919, pruned_loss=0.05406, over 8650.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2889, pruned_loss=0.06183, over 1401773.58 frames. ], batch size: 34, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:43,362 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 04:47:51,826 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3584, 1.5234, 2.2532, 1.2618, 1.8046, 1.5725, 1.4208, 1.7548], + device='cuda:2'), covar=tensor([0.1533, 0.2135, 0.0639, 0.3584, 0.1418, 0.2588, 0.1825, 0.2020], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0645, 0.0647, 0.0592, 0.0537, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:48:02,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,038 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,486 INFO [train.py:901] (2/4) Epoch 23, batch 450, loss[loss=0.2528, simple_loss=0.3118, pruned_loss=0.09688, over 7633.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.288, pruned_loss=0.06157, over 1448537.45 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:17,136 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3382, 2.7948, 2.3314, 3.9579, 1.7979, 2.0779, 2.5907, 2.8598], + device='cuda:2'), covar=tensor([0.0711, 0.0744, 0.0779, 0.0241, 0.1060, 0.1202, 0.0991, 0.0729], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0215, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:48:23,158 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:27,632 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.308e+02 2.812e+02 3.532e+02 1.107e+03, threshold=5.624e+02, percent-clipped=2.0 +2023-02-07 04:48:40,215 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:50,185 INFO [train.py:901] (2/4) Epoch 23, batch 500, loss[loss=0.2611, simple_loss=0.3187, pruned_loss=0.1018, over 7928.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2911, pruned_loss=0.06302, over 1492028.84 frames. ], batch size: 20, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:25,960 INFO [train.py:901] (2/4) Epoch 23, batch 550, loss[loss=0.1961, simple_loss=0.2852, pruned_loss=0.05353, over 8237.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2903, pruned_loss=0.06235, over 1515252.45 frames. ], batch size: 22, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:39,363 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.448e+02 3.105e+02 3.761e+02 9.562e+02, threshold=6.211e+02, percent-clipped=5.0 +2023-02-07 04:49:42,442 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:49:59,309 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:01,200 INFO [train.py:901] (2/4) Epoch 23, batch 600, loss[loss=0.2102, simple_loss=0.2819, pruned_loss=0.0693, over 8076.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2894, pruned_loss=0.0616, over 1540136.02 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:14,803 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 04:50:33,525 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178470.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:36,799 INFO [train.py:901] (2/4) Epoch 23, batch 650, loss[loss=0.1821, simple_loss=0.2668, pruned_loss=0.04869, over 8515.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2885, pruned_loss=0.06146, over 1559846.14 frames. ], batch size: 31, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:49,804 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.230e+02 2.701e+02 3.368e+02 8.641e+02, threshold=5.402e+02, percent-clipped=2.0 +2023-02-07 04:51:04,373 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:12,430 INFO [train.py:901] (2/4) Epoch 23, batch 700, loss[loss=0.1919, simple_loss=0.2692, pruned_loss=0.05728, over 7779.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.0616, over 1569389.16 frames. ], batch size: 19, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:16,058 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:18,198 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1704, 2.0513, 2.6889, 2.2525, 2.6014, 2.2959, 2.0505, 1.5367], + device='cuda:2'), covar=tensor([0.6030, 0.5068, 0.2133, 0.3862, 0.2682, 0.2991, 0.1943, 0.5536], + device='cuda:2'), in_proj_covar=tensor([0.0941, 0.0986, 0.0813, 0.0952, 0.0997, 0.0899, 0.0754, 0.0831], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 04:51:21,532 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:33,985 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178555.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:47,511 INFO [train.py:901] (2/4) Epoch 23, batch 750, loss[loss=0.1894, simple_loss=0.2679, pruned_loss=0.05539, over 7428.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06132, over 1582382.28 frames. ], batch size: 17, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:49,831 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8753, 2.2371, 3.5668, 1.8429, 1.8588, 3.5226, 0.7215, 2.1568], + device='cuda:2'), covar=tensor([0.1322, 0.1270, 0.0230, 0.1668, 0.2448, 0.0324, 0.2067, 0.1376], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0221, 0.0270, 0.0137, 0.0171, 0.0196], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 04:51:59,472 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9860, 2.3744, 4.1835, 1.6793, 3.1539, 2.4419, 2.0293, 2.9363], + device='cuda:2'), covar=tensor([0.1798, 0.2694, 0.0910, 0.4521, 0.1783, 0.3138, 0.2172, 0.2584], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0606, 0.0555, 0.0645, 0.0649, 0.0594, 0.0536, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:52:00,639 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.529e+02 2.988e+02 3.531e+02 9.866e+02, threshold=5.976e+02, percent-clipped=5.0 +2023-02-07 04:52:03,331 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 04:52:12,883 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 04:52:24,020 INFO [train.py:901] (2/4) Epoch 23, batch 800, loss[loss=0.1709, simple_loss=0.2479, pruned_loss=0.04699, over 7548.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.287, pruned_loss=0.06136, over 1586212.20 frames. ], batch size: 18, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:52:32,111 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178637.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:52:38,299 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0287, 2.2060, 1.8405, 2.8102, 1.3666, 1.6589, 2.0640, 2.1417], + device='cuda:2'), covar=tensor([0.0690, 0.0741, 0.0856, 0.0340, 0.1093, 0.1246, 0.0763, 0.0800], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0214, 0.0206, 0.0245, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 04:52:57,747 INFO [train.py:901] (2/4) Epoch 23, batch 850, loss[loss=0.2302, simple_loss=0.3084, pruned_loss=0.07603, over 8452.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2869, pruned_loss=0.06182, over 1591768.86 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:10,566 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.561e+02 2.992e+02 3.918e+02 1.040e+03, threshold=5.984e+02, percent-clipped=6.0 +2023-02-07 04:53:24,482 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178712.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:53:26,495 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178715.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:53:31,385 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1767, 4.1579, 3.7910, 1.9346, 3.6751, 3.8171, 3.6335, 3.6550], + device='cuda:2'), covar=tensor([0.0701, 0.0524, 0.0927, 0.4719, 0.0811, 0.0845, 0.1362, 0.0782], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0440, 0.0435, 0.0544, 0.0426, 0.0448, 0.0431, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:53:34,048 INFO [train.py:901] (2/4) Epoch 23, batch 900, loss[loss=0.2104, simple_loss=0.2902, pruned_loss=0.06531, over 8498.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06155, over 1600026.84 frames. ], batch size: 28, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:55,223 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-07 04:54:03,482 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6402, 1.9305, 4.6702, 2.2721, 2.9836, 5.3287, 5.2976, 4.6739], + device='cuda:2'), covar=tensor([0.1066, 0.1773, 0.0239, 0.1804, 0.1053, 0.0138, 0.0307, 0.0452], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0321, 0.0286, 0.0315, 0.0310, 0.0267, 0.0422, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 04:54:09,414 INFO [train.py:901] (2/4) Epoch 23, batch 950, loss[loss=0.211, simple_loss=0.2971, pruned_loss=0.06241, over 8470.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06148, over 1602151.50 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:54:18,546 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:21,857 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.330e+02 2.907e+02 3.544e+02 9.473e+02, threshold=5.814e+02, percent-clipped=4.0 +2023-02-07 04:54:22,772 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6473, 1.9394, 2.0394, 1.2745, 2.3038, 1.3955, 0.8056, 1.7901], + device='cuda:2'), covar=tensor([0.0732, 0.0432, 0.0328, 0.0671, 0.0435, 0.1010, 0.0882, 0.0437], + device='cuda:2'), in_proj_covar=tensor([0.0453, 0.0395, 0.0347, 0.0448, 0.0380, 0.0536, 0.0393, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2096e-04, 1.0363e-04, 9.1189e-05, 1.1773e-04, 9.9773e-05, 1.5107e-04, + 1.0597e-04, 1.1251e-04], device='cuda:2') +2023-02-07 04:54:35,810 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 04:54:37,115 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:45,364 INFO [train.py:901] (2/4) Epoch 23, batch 1000, loss[loss=0.1959, simple_loss=0.273, pruned_loss=0.05937, over 8032.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2854, pruned_loss=0.06053, over 1604649.92 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:12,384 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 04:55:21,361 INFO [train.py:901] (2/4) Epoch 23, batch 1050, loss[loss=0.1796, simple_loss=0.276, pruned_loss=0.04161, over 8454.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2848, pruned_loss=0.06008, over 1609453.11 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:25,409 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 04:55:33,401 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.332e+02 2.695e+02 3.454e+02 6.847e+02, threshold=5.390e+02, percent-clipped=5.0 +2023-02-07 04:55:46,651 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178912.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:55:56,207 INFO [train.py:901] (2/4) Epoch 23, batch 1100, loss[loss=0.2271, simple_loss=0.3071, pruned_loss=0.07356, over 8227.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06073, over 1610043.09 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:55:59,139 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:29,584 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0405, 2.4524, 3.8461, 2.2189, 2.0713, 3.8077, 0.8090, 2.2981], + device='cuda:2'), covar=tensor([0.1346, 0.1265, 0.0300, 0.1518, 0.2293, 0.0364, 0.2028, 0.1562], + device='cuda:2'), in_proj_covar=tensor([0.0192, 0.0200, 0.0129, 0.0220, 0.0268, 0.0136, 0.0170, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 04:56:32,124 INFO [train.py:901] (2/4) Epoch 23, batch 1150, loss[loss=0.1793, simple_loss=0.2656, pruned_loss=0.04648, over 7452.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2868, pruned_loss=0.06109, over 1608412.08 frames. ], batch size: 17, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:56:36,274 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 04:56:36,348 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:41,187 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2657, 3.1862, 2.9853, 1.5395, 2.9337, 2.9249, 2.9613, 2.8642], + device='cuda:2'), covar=tensor([0.1155, 0.0793, 0.1238, 0.4758, 0.1057, 0.1355, 0.1616, 0.1042], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0439, 0.0432, 0.0542, 0.0426, 0.0447, 0.0430, 0.0387], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 04:56:45,237 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.628e+02 3.162e+02 4.177e+02 1.087e+03, threshold=6.324e+02, percent-clipped=6.0 +2023-02-07 04:57:07,131 INFO [train.py:901] (2/4) Epoch 23, batch 1200, loss[loss=0.1933, simple_loss=0.288, pruned_loss=0.04934, over 8357.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2864, pruned_loss=0.06065, over 1612561.03 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:29,093 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:57:31,025 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179059.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:57:42,798 INFO [train.py:901] (2/4) Epoch 23, batch 1250, loss[loss=0.1953, simple_loss=0.2913, pruned_loss=0.04971, over 8331.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2869, pruned_loss=0.06108, over 1610041.13 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:55,982 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.289e+02 2.896e+02 3.686e+02 5.954e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 04:57:58,265 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:19,011 INFO [train.py:901] (2/4) Epoch 23, batch 1300, loss[loss=0.1806, simple_loss=0.2683, pruned_loss=0.04648, over 8126.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2864, pruned_loss=0.06026, over 1613044.45 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:58:24,104 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:51,907 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179171.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:53,991 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179174.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:58:54,481 INFO [train.py:901] (2/4) Epoch 23, batch 1350, loss[loss=0.1914, simple_loss=0.2815, pruned_loss=0.05065, over 7661.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2866, pruned_loss=0.06064, over 1609980.47 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:01,718 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179185.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:07,800 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.184e+02 2.635e+02 3.098e+02 5.270e+02, threshold=5.271e+02, percent-clipped=0.0 +2023-02-07 04:59:20,404 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179210.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:30,637 INFO [train.py:901] (2/4) Epoch 23, batch 1400, loss[loss=0.1626, simple_loss=0.2453, pruned_loss=0.03992, over 7429.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06087, over 1607924.76 frames. ], batch size: 17, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:47,155 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:53,436 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179256.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:00:06,546 INFO [train.py:901] (2/4) Epoch 23, batch 1450, loss[loss=0.2127, simple_loss=0.2954, pruned_loss=0.06496, over 8461.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2866, pruned_loss=0.06072, over 1611622.71 frames. ], batch size: 29, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:16,912 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 05:00:19,768 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.293e+02 2.971e+02 3.774e+02 8.745e+02, threshold=5.941e+02, percent-clipped=9.0 +2023-02-07 05:00:37,216 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 05:00:43,614 INFO [train.py:901] (2/4) Epoch 23, batch 1500, loss[loss=0.2259, simple_loss=0.3078, pruned_loss=0.07196, over 7924.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2863, pruned_loss=0.06079, over 1607482.86 frames. ], batch size: 20, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:03,400 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:16,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179371.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:01:18,849 INFO [train.py:901] (2/4) Epoch 23, batch 1550, loss[loss=0.1844, simple_loss=0.2723, pruned_loss=0.04827, over 8460.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2884, pruned_loss=0.06207, over 1610601.29 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:20,470 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:21,728 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:31,102 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.349e+02 2.958e+02 3.969e+02 7.808e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-07 05:01:37,883 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-07 05:01:40,056 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 05:01:54,021 INFO [train.py:901] (2/4) Epoch 23, batch 1600, loss[loss=0.2206, simple_loss=0.3027, pruned_loss=0.06919, over 8344.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.0623, over 1613642.47 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:56,466 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179427.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:58,516 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179430.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:01:59,828 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4311, 1.4857, 1.3970, 1.7885, 0.7751, 1.2675, 1.3974, 1.5277], + device='cuda:2'), covar=tensor([0.0947, 0.0830, 0.1026, 0.0518, 0.1142, 0.1529, 0.0724, 0.0749], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0199, 0.0245, 0.0215, 0.0207, 0.0248, 0.0250, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:02:14,539 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:02:16,567 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179455.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:21,384 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179462.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:26,495 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6588, 1.6086, 2.4538, 1.3572, 1.0745, 2.3621, 0.5787, 1.4102], + device='cuda:2'), covar=tensor([0.1849, 0.1457, 0.0317, 0.1451, 0.2948, 0.0379, 0.1987, 0.1456], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0198, 0.0129, 0.0219, 0.0267, 0.0136, 0.0169, 0.0193], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:02:31,190 INFO [train.py:901] (2/4) Epoch 23, batch 1650, loss[loss=0.2593, simple_loss=0.3335, pruned_loss=0.09256, over 8666.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2881, pruned_loss=0.06211, over 1612342.46 frames. ], batch size: 34, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:02:41,802 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3193, 1.8614, 1.3369, 2.7801, 1.2135, 1.1723, 2.1950, 2.0242], + device='cuda:2'), covar=tensor([0.1639, 0.1247, 0.2017, 0.0405, 0.1460, 0.2235, 0.0872, 0.0944], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0199, 0.0245, 0.0215, 0.0207, 0.0248, 0.0251, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:02:43,578 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.367e+02 2.783e+02 3.381e+02 8.055e+02, threshold=5.566e+02, percent-clipped=4.0 +2023-02-07 05:02:50,707 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:06,271 INFO [train.py:901] (2/4) Epoch 23, batch 1700, loss[loss=0.2371, simple_loss=0.3152, pruned_loss=0.07953, over 8523.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2888, pruned_loss=0.06238, over 1613494.14 frames. ], batch size: 39, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:03:08,727 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:42,331 INFO [train.py:901] (2/4) Epoch 23, batch 1750, loss[loss=0.2365, simple_loss=0.3182, pruned_loss=0.07736, over 8616.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2883, pruned_loss=0.06244, over 1612551.13 frames. ], batch size: 49, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:03:56,241 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.481e+02 2.857e+02 3.517e+02 8.396e+02, threshold=5.713e+02, percent-clipped=3.0 +2023-02-07 05:04:17,968 INFO [train.py:901] (2/4) Epoch 23, batch 1800, loss[loss=0.1869, simple_loss=0.2769, pruned_loss=0.04843, over 8477.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2881, pruned_loss=0.06201, over 1612150.19 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:04:19,578 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179627.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:04:33,217 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 05:04:37,264 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179652.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:04:38,536 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179654.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:04:54,476 INFO [train.py:901] (2/4) Epoch 23, batch 1850, loss[loss=0.2071, simple_loss=0.2951, pruned_loss=0.05956, over 8341.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.0615, over 1612646.91 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:07,480 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.311e+02 2.831e+02 3.615e+02 8.108e+02, threshold=5.663e+02, percent-clipped=6.0 +2023-02-07 05:05:28,519 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:05:29,819 INFO [train.py:901] (2/4) Epoch 23, batch 1900, loss[loss=0.2431, simple_loss=0.3199, pruned_loss=0.08311, over 8235.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2868, pruned_loss=0.06162, over 1611840.32 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:59,876 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 05:06:05,566 INFO [train.py:901] (2/4) Epoch 23, batch 1950, loss[loss=0.2124, simple_loss=0.2812, pruned_loss=0.07186, over 7225.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06175, over 1610840.49 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:12,614 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 05:06:19,490 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.457e+02 2.986e+02 3.643e+02 8.972e+02, threshold=5.972e+02, percent-clipped=4.0 +2023-02-07 05:06:21,801 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3806, 1.4661, 1.5251, 1.1591, 1.6163, 1.2169, 0.6908, 1.4695], + device='cuda:2'), covar=tensor([0.0478, 0.0356, 0.0255, 0.0435, 0.0338, 0.0714, 0.0728, 0.0251], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0393, 0.0346, 0.0449, 0.0380, 0.0534, 0.0393, 0.0422], + device='cuda:2'), out_proj_covar=tensor([1.2144e-04, 1.0307e-04, 9.0866e-05, 1.1813e-04, 1.0013e-04, 1.5032e-04, + 1.0593e-04, 1.1160e-04], device='cuda:2') +2023-02-07 05:06:28,060 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179806.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:06:31,241 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 05:06:34,176 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:06:37,042 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4255, 1.6150, 2.1663, 1.3164, 1.6116, 1.7048, 1.4705, 1.5393], + device='cuda:2'), covar=tensor([0.1852, 0.2379, 0.0951, 0.4324, 0.1823, 0.3217, 0.2269, 0.2041], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0608, 0.0556, 0.0647, 0.0650, 0.0594, 0.0538, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:06:41,620 INFO [train.py:901] (2/4) Epoch 23, batch 2000, loss[loss=0.1924, simple_loss=0.292, pruned_loss=0.04639, over 8578.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2871, pruned_loss=0.0611, over 1612837.11 frames. ], batch size: 39, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:50,597 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179838.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:07:16,428 INFO [train.py:901] (2/4) Epoch 23, batch 2050, loss[loss=0.1764, simple_loss=0.2489, pruned_loss=0.0519, over 7213.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06187, over 1611388.06 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:07:30,040 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.444e+02 2.856e+02 3.794e+02 1.051e+03, threshold=5.713e+02, percent-clipped=7.0 +2023-02-07 05:07:49,559 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179921.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:50,918 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1944, 4.1531, 3.7911, 1.9303, 3.6904, 3.7925, 3.7043, 3.6185], + device='cuda:2'), covar=tensor([0.0733, 0.0643, 0.1157, 0.4656, 0.0881, 0.1027, 0.1429, 0.0957], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0440, 0.0435, 0.0542, 0.0429, 0.0448, 0.0432, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:07:51,623 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179924.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:52,160 INFO [train.py:901] (2/4) Epoch 23, batch 2100, loss[loss=0.1811, simple_loss=0.2572, pruned_loss=0.05253, over 7788.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.288, pruned_loss=0.06234, over 1607018.44 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:04,845 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179942.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:08:24,974 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3636, 1.5017, 1.3507, 1.8191, 0.6643, 1.2408, 1.2731, 1.5078], + device='cuda:2'), covar=tensor([0.0882, 0.0795, 0.1011, 0.0515, 0.1249, 0.1436, 0.0800, 0.0750], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0198, 0.0243, 0.0214, 0.0206, 0.0246, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:08:27,554 INFO [train.py:901] (2/4) Epoch 23, batch 2150, loss[loss=0.2088, simple_loss=0.2985, pruned_loss=0.0596, over 8568.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2887, pruned_loss=0.0624, over 1610334.09 frames. ], batch size: 31, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:41,587 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.311e+02 2.940e+02 3.642e+02 8.826e+02, threshold=5.880e+02, percent-clipped=6.0 +2023-02-07 05:08:44,586 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:05,727 INFO [train.py:901] (2/4) Epoch 23, batch 2200, loss[loss=0.2226, simple_loss=0.3018, pruned_loss=0.07167, over 8239.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2875, pruned_loss=0.06169, over 1611055.06 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:18,804 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:23,719 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6291, 1.8083, 2.7693, 1.4637, 2.0312, 2.0748, 1.6283, 2.0062], + device='cuda:2'), covar=tensor([0.1779, 0.2667, 0.0826, 0.4553, 0.1787, 0.2898, 0.2324, 0.2025], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0610, 0.0557, 0.0648, 0.0650, 0.0595, 0.0539, 0.0633], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:09:40,654 INFO [train.py:901] (2/4) Epoch 23, batch 2250, loss[loss=0.1782, simple_loss=0.2727, pruned_loss=0.0419, over 8355.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.288, pruned_loss=0.06222, over 1612162.49 frames. ], batch size: 24, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:53,767 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.377e+02 2.815e+02 3.570e+02 6.536e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-07 05:09:54,034 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:07,874 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180113.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:12,121 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:16,768 INFO [train.py:901] (2/4) Epoch 23, batch 2300, loss[loss=0.2109, simple_loss=0.2983, pruned_loss=0.06177, over 8498.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2886, pruned_loss=0.06213, over 1617730.81 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:40,104 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:52,677 INFO [train.py:901] (2/4) Epoch 23, batch 2350, loss[loss=0.3004, simple_loss=0.3539, pruned_loss=0.1235, over 7145.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2885, pruned_loss=0.06212, over 1613089.10 frames. ], batch size: 72, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:54,330 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180177.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:05,883 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.451e+02 2.928e+02 3.544e+02 9.883e+02, threshold=5.856e+02, percent-clipped=4.0 +2023-02-07 05:11:11,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180202.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:11:25,818 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:11:27,087 INFO [train.py:901] (2/4) Epoch 23, batch 2400, loss[loss=0.189, simple_loss=0.278, pruned_loss=0.05004, over 8286.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06155, over 1613758.97 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:11:59,473 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180268.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:12:02,922 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:04,127 INFO [train.py:901] (2/4) Epoch 23, batch 2450, loss[loss=0.2373, simple_loss=0.3072, pruned_loss=0.08366, over 8342.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06187, over 1615250.51 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:12:12,624 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:18,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.501e+02 2.918e+02 3.866e+02 1.157e+03, threshold=5.835e+02, percent-clipped=6.0 +2023-02-07 05:12:39,631 INFO [train.py:901] (2/4) Epoch 23, batch 2500, loss[loss=0.1733, simple_loss=0.2513, pruned_loss=0.04761, over 7791.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2879, pruned_loss=0.06223, over 1615538.87 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:00,484 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:12,934 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:15,763 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0745, 1.8545, 2.3502, 1.9848, 2.3345, 2.1344, 1.9484, 1.1794], + device='cuda:2'), covar=tensor([0.5616, 0.4874, 0.2008, 0.3578, 0.2333, 0.3056, 0.1856, 0.5065], + device='cuda:2'), in_proj_covar=tensor([0.0949, 0.0993, 0.0818, 0.0955, 0.1004, 0.0906, 0.0755, 0.0837], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:13:16,784 INFO [train.py:901] (2/4) Epoch 23, batch 2550, loss[loss=0.2321, simple_loss=0.2839, pruned_loss=0.09019, over 7807.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2884, pruned_loss=0.06261, over 1615676.67 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:22,394 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180383.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:13:25,751 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:27,629 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 05:13:29,883 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.435e+02 3.031e+02 3.942e+02 1.076e+03, threshold=6.063e+02, percent-clipped=1.0 +2023-02-07 05:13:30,128 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:32,962 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2389, 1.6139, 1.6652, 1.0004, 1.6798, 1.2759, 0.2511, 1.4965], + device='cuda:2'), covar=tensor([0.0556, 0.0374, 0.0290, 0.0544, 0.0457, 0.0960, 0.0895, 0.0297], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0396, 0.0349, 0.0449, 0.0382, 0.0537, 0.0395, 0.0424], + device='cuda:2'), out_proj_covar=tensor([1.2232e-04, 1.0370e-04, 9.1689e-05, 1.1798e-04, 1.0060e-04, 1.5134e-04, + 1.0646e-04, 1.1215e-04], device='cuda:2') +2023-02-07 05:13:35,750 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180401.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:51,957 INFO [train.py:901] (2/4) Epoch 23, batch 2600, loss[loss=0.2291, simple_loss=0.3087, pruned_loss=0.07475, over 8438.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2875, pruned_loss=0.06186, over 1612572.60 frames. ], batch size: 29, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:28,402 INFO [train.py:901] (2/4) Epoch 23, batch 2650, loss[loss=0.1817, simple_loss=0.265, pruned_loss=0.04921, over 7192.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.0615, over 1614750.38 frames. ], batch size: 16, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:42,181 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 2.331e+02 2.876e+02 3.734e+02 9.435e+02, threshold=5.753e+02, percent-clipped=4.0 +2023-02-07 05:14:48,432 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:04,098 INFO [train.py:901] (2/4) Epoch 23, batch 2700, loss[loss=0.2124, simple_loss=0.3044, pruned_loss=0.06019, over 8440.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2867, pruned_loss=0.06123, over 1618412.68 frames. ], batch size: 27, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:07,062 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:24,088 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8070, 1.3532, 3.9877, 1.3842, 3.5247, 3.3234, 3.6021, 3.4963], + device='cuda:2'), covar=tensor([0.0698, 0.4889, 0.0657, 0.4415, 0.1364, 0.1038, 0.0685, 0.0857], + device='cuda:2'), in_proj_covar=tensor([0.0631, 0.0636, 0.0692, 0.0627, 0.0702, 0.0600, 0.0601, 0.0679], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:15:24,163 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:33,273 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:39,688 INFO [train.py:901] (2/4) Epoch 23, batch 2750, loss[loss=0.2146, simple_loss=0.3099, pruned_loss=0.05965, over 8449.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2862, pruned_loss=0.06132, over 1617941.15 frames. ], batch size: 27, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:48,799 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:53,491 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.355e+02 2.814e+02 3.432e+02 9.125e+02, threshold=5.629e+02, percent-clipped=4.0 +2023-02-07 05:16:15,674 INFO [train.py:901] (2/4) Epoch 23, batch 2800, loss[loss=0.1636, simple_loss=0.2525, pruned_loss=0.03735, over 8356.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2872, pruned_loss=0.06166, over 1619528.47 frames. ], batch size: 24, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:26,283 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180639.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:16:38,611 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180657.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:40,120 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 05:16:43,318 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180664.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:50,749 INFO [train.py:901] (2/4) Epoch 23, batch 2850, loss[loss=0.1922, simple_loss=0.2794, pruned_loss=0.05249, over 8478.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06123, over 1617188.34 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:55,715 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:55,739 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:04,507 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.469e+02 3.037e+02 3.866e+02 9.714e+02, threshold=6.075e+02, percent-clipped=7.0 +2023-02-07 05:17:07,453 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:27,370 INFO [train.py:901] (2/4) Epoch 23, batch 2900, loss[loss=0.1992, simple_loss=0.2795, pruned_loss=0.05946, over 8028.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06198, over 1619839.97 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:17:45,117 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:52,152 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180759.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:59,465 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 05:18:02,965 INFO [train.py:901] (2/4) Epoch 23, batch 2950, loss[loss=0.2701, simple_loss=0.3563, pruned_loss=0.09199, over 8453.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.287, pruned_loss=0.06178, over 1615160.50 frames. ], batch size: 29, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:18:05,925 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7904, 2.2813, 4.1420, 1.5482, 2.8862, 2.2629, 1.7746, 2.7305], + device='cuda:2'), covar=tensor([0.2012, 0.2727, 0.0867, 0.4708, 0.2061, 0.3142, 0.2415, 0.2603], + device='cuda:2'), in_proj_covar=tensor([0.0524, 0.0606, 0.0555, 0.0646, 0.0645, 0.0591, 0.0537, 0.0628], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:18:07,974 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1643, 2.5350, 2.8287, 1.6247, 3.0935, 1.8149, 1.5419, 2.0938], + device='cuda:2'), covar=tensor([0.0832, 0.0394, 0.0310, 0.0813, 0.0476, 0.0981, 0.1013, 0.0600], + device='cuda:2'), in_proj_covar=tensor([0.0456, 0.0394, 0.0348, 0.0447, 0.0380, 0.0535, 0.0393, 0.0422], + device='cuda:2'), out_proj_covar=tensor([1.2179e-04, 1.0310e-04, 9.1223e-05, 1.1749e-04, 1.0009e-04, 1.5077e-04, + 1.0586e-04, 1.1172e-04], device='cuda:2') +2023-02-07 05:18:09,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:16,017 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.356e+02 2.925e+02 3.942e+02 6.480e+02, threshold=5.850e+02, percent-clipped=1.0 +2023-02-07 05:18:30,322 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180813.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:38,125 INFO [train.py:901] (2/4) Epoch 23, batch 3000, loss[loss=0.1612, simple_loss=0.2452, pruned_loss=0.03863, over 8081.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2868, pruned_loss=0.06167, over 1617311.52 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:18:38,125 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 05:18:50,538 INFO [train.py:935] (2/4) Epoch 23, validation: loss=0.1735, simple_loss=0.2731, pruned_loss=0.03696, over 944034.00 frames. +2023-02-07 05:18:50,539 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 05:19:03,711 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:19:26,991 INFO [train.py:901] (2/4) Epoch 23, batch 3050, loss[loss=0.2175, simple_loss=0.2886, pruned_loss=0.07321, over 8193.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2869, pruned_loss=0.06169, over 1610575.78 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:19:40,681 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.515e+02 3.107e+02 3.968e+02 1.139e+03, threshold=6.214e+02, percent-clipped=7.0 +2023-02-07 05:20:02,335 INFO [train.py:901] (2/4) Epoch 23, batch 3100, loss[loss=0.1781, simple_loss=0.2644, pruned_loss=0.04595, over 7968.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2867, pruned_loss=0.06116, over 1615197.52 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:07,186 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180932.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:11,450 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:29,338 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:31,171 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 05:20:38,169 INFO [train.py:901] (2/4) Epoch 23, batch 3150, loss[loss=0.2157, simple_loss=0.2977, pruned_loss=0.06679, over 8460.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2862, pruned_loss=0.06095, over 1617574.49 frames. ], batch size: 29, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:51,966 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.467e+02 3.042e+02 3.660e+02 1.036e+03, threshold=6.084e+02, percent-clipped=2.0 +2023-02-07 05:21:14,468 INFO [train.py:901] (2/4) Epoch 23, batch 3200, loss[loss=0.1947, simple_loss=0.2752, pruned_loss=0.05709, over 8084.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2864, pruned_loss=0.06124, over 1620759.94 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:22,624 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-07 05:21:29,929 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181047.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:45,849 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181069.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:49,879 INFO [train.py:901] (2/4) Epoch 23, batch 3250, loss[loss=0.2206, simple_loss=0.311, pruned_loss=0.06505, over 8538.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2863, pruned_loss=0.06134, over 1616571.63 frames. ], batch size: 39, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:54,665 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 05:22:03,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.376e+02 2.917e+02 3.369e+02 6.745e+02, threshold=5.834e+02, percent-clipped=1.0 +2023-02-07 05:22:04,709 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:04,835 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:20,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7776, 1.6803, 2.4363, 1.5864, 1.2490, 2.3485, 0.4091, 1.4668], + device='cuda:2'), covar=tensor([0.1834, 0.1299, 0.0436, 0.1457, 0.3148, 0.0442, 0.2405, 0.1567], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0222, 0.0272, 0.0138, 0.0171, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:22:26,220 INFO [train.py:901] (2/4) Epoch 23, batch 3300, loss[loss=0.1563, simple_loss=0.2347, pruned_loss=0.03896, over 7433.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2864, pruned_loss=0.06112, over 1616046.64 frames. ], batch size: 17, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:01,365 INFO [train.py:901] (2/4) Epoch 23, batch 3350, loss[loss=0.1985, simple_loss=0.2802, pruned_loss=0.05836, over 8234.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06173, over 1614019.45 frames. ], batch size: 22, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:10,452 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:14,984 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.358e+02 3.053e+02 3.666e+02 9.674e+02, threshold=6.107e+02, percent-clipped=1.0 +2023-02-07 05:23:26,415 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:32,884 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8289, 3.8201, 3.4421, 1.8268, 3.3435, 3.4827, 3.4224, 3.3206], + device='cuda:2'), covar=tensor([0.0931, 0.0637, 0.1212, 0.4713, 0.1029, 0.1192, 0.1346, 0.0821], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0439, 0.0430, 0.0540, 0.0429, 0.0445, 0.0427, 0.0388], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:23:38,224 INFO [train.py:901] (2/4) Epoch 23, batch 3400, loss[loss=0.2042, simple_loss=0.2957, pruned_loss=0.05631, over 8093.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2876, pruned_loss=0.06148, over 1615550.81 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:50,593 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 05:24:13,228 INFO [train.py:901] (2/4) Epoch 23, batch 3450, loss[loss=0.2547, simple_loss=0.331, pruned_loss=0.08921, over 8693.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2887, pruned_loss=0.06225, over 1618159.22 frames. ], batch size: 49, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:22,686 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4707, 1.9953, 1.5496, 1.7691, 1.6974, 1.4731, 1.6853, 1.7418], + device='cuda:2'), covar=tensor([0.0982, 0.0341, 0.0993, 0.0502, 0.0642, 0.1189, 0.0733, 0.0688], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0233, 0.0336, 0.0309, 0.0300, 0.0338, 0.0344, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 05:24:27,416 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.466e+02 2.960e+02 3.783e+02 8.296e+02, threshold=5.920e+02, percent-clipped=4.0 +2023-02-07 05:24:32,991 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:33,725 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:42,789 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181315.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:49,555 INFO [train.py:901] (2/4) Epoch 23, batch 3500, loss[loss=0.1759, simple_loss=0.2656, pruned_loss=0.04314, over 8452.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2887, pruned_loss=0.06155, over 1622339.36 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:52,754 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181328.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:25:07,649 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 05:25:25,791 INFO [train.py:901] (2/4) Epoch 23, batch 3550, loss[loss=0.1781, simple_loss=0.2553, pruned_loss=0.05044, over 7799.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2878, pruned_loss=0.06153, over 1617207.49 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:25:39,006 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.351e+02 2.882e+02 3.469e+02 9.271e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 05:25:42,664 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9009, 1.6218, 3.3426, 1.4151, 2.4500, 3.6652, 3.7481, 3.0924], + device='cuda:2'), covar=tensor([0.1229, 0.1818, 0.0313, 0.2171, 0.0963, 0.0215, 0.0463, 0.0541], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0319, 0.0285, 0.0314, 0.0310, 0.0267, 0.0422, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 05:26:01,200 INFO [train.py:901] (2/4) Epoch 23, batch 3600, loss[loss=0.2377, simple_loss=0.3136, pruned_loss=0.08092, over 8623.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06177, over 1617636.37 frames. ], batch size: 49, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:30,530 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181465.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:37,785 INFO [train.py:901] (2/4) Epoch 23, batch 3650, loss[loss=0.201, simple_loss=0.2731, pruned_loss=0.06445, over 7790.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2884, pruned_loss=0.06181, over 1617086.32 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:48,314 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181490.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:50,925 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.424e+02 2.919e+02 3.720e+02 6.119e+02, threshold=5.839e+02, percent-clipped=1.0 +2023-02-07 05:27:11,127 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 05:27:12,459 INFO [train.py:901] (2/4) Epoch 23, batch 3700, loss[loss=0.2549, simple_loss=0.3343, pruned_loss=0.08779, over 8602.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2878, pruned_loss=0.06161, over 1615387.62 frames. ], batch size: 34, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:27:30,293 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:37,397 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:49,549 INFO [train.py:901] (2/4) Epoch 23, batch 3750, loss[loss=0.2018, simple_loss=0.2877, pruned_loss=0.05796, over 7976.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2876, pruned_loss=0.06159, over 1616415.45 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:27:55,396 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:28:01,752 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3854, 2.0938, 2.8152, 2.3176, 2.7579, 2.3658, 2.1795, 1.6902], + device='cuda:2'), covar=tensor([0.5590, 0.4959, 0.1920, 0.3697, 0.2383, 0.2799, 0.1870, 0.5104], + device='cuda:2'), in_proj_covar=tensor([0.0949, 0.0992, 0.0815, 0.0957, 0.1003, 0.0905, 0.0757, 0.0834], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:28:02,823 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.354e+02 2.844e+02 3.677e+02 7.170e+02, threshold=5.688e+02, percent-clipped=4.0 +2023-02-07 05:28:19,922 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 05:28:24,873 INFO [train.py:901] (2/4) Epoch 23, batch 3800, loss[loss=0.1762, simple_loss=0.2599, pruned_loss=0.0462, over 7794.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2866, pruned_loss=0.0609, over 1614769.51 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:28:35,135 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 05:28:49,222 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:29:00,793 INFO [train.py:901] (2/4) Epoch 23, batch 3850, loss[loss=0.1637, simple_loss=0.2554, pruned_loss=0.03604, over 7970.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.0614, over 1613599.31 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:14,859 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 2.361e+02 2.900e+02 3.650e+02 9.007e+02, threshold=5.800e+02, percent-clipped=7.0 +2023-02-07 05:29:22,388 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 05:29:36,633 INFO [train.py:901] (2/4) Epoch 23, batch 3900, loss[loss=0.1752, simple_loss=0.2594, pruned_loss=0.04549, over 7919.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2889, pruned_loss=0.06244, over 1612296.66 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:53,929 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9464, 1.4975, 1.6446, 1.4226, 0.9174, 1.4824, 1.7141, 1.6421], + device='cuda:2'), covar=tensor([0.0531, 0.1264, 0.1700, 0.1458, 0.0649, 0.1488, 0.0697, 0.0617], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0188, 0.0159, 0.0100, 0.0162, 0.0111, 0.0142], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 05:30:10,483 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:30:10,999 INFO [train.py:901] (2/4) Epoch 23, batch 3950, loss[loss=0.1744, simple_loss=0.2515, pruned_loss=0.04865, over 7531.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2874, pruned_loss=0.06179, over 1610333.97 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:26,249 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.359e+02 2.788e+02 3.393e+02 6.824e+02, threshold=5.575e+02, percent-clipped=4.0 +2023-02-07 05:30:29,315 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7547, 1.5919, 1.8260, 1.7027, 0.9267, 1.6551, 2.0860, 1.9209], + device='cuda:2'), covar=tensor([0.0454, 0.1266, 0.1685, 0.1377, 0.0599, 0.1429, 0.0643, 0.0626], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0163, 0.0112, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 05:30:47,710 INFO [train.py:901] (2/4) Epoch 23, batch 4000, loss[loss=0.2479, simple_loss=0.3131, pruned_loss=0.09133, over 6762.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.287, pruned_loss=0.06071, over 1611642.06 frames. ], batch size: 71, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:56,754 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3331, 1.2561, 4.5687, 1.6503, 4.0018, 3.7257, 4.0945, 4.0104], + device='cuda:2'), covar=tensor([0.0813, 0.5288, 0.0518, 0.4408, 0.1211, 0.1037, 0.0695, 0.0743], + device='cuda:2'), in_proj_covar=tensor([0.0640, 0.0649, 0.0705, 0.0640, 0.0717, 0.0615, 0.0613, 0.0686], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:31:22,578 INFO [train.py:901] (2/4) Epoch 23, batch 4050, loss[loss=0.1793, simple_loss=0.2589, pruned_loss=0.04982, over 7418.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2862, pruned_loss=0.06048, over 1609768.83 frames. ], batch size: 17, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:34,365 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:31:35,722 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.508e+02 2.885e+02 3.954e+02 8.020e+02, threshold=5.770e+02, percent-clipped=6.0 +2023-02-07 05:31:59,837 INFO [train.py:901] (2/4) Epoch 23, batch 4100, loss[loss=0.2458, simple_loss=0.3234, pruned_loss=0.08408, over 8590.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2868, pruned_loss=0.06088, over 1613851.26 frames. ], batch size: 34, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:32:34,913 INFO [train.py:901] (2/4) Epoch 23, batch 4150, loss[loss=0.166, simple_loss=0.2517, pruned_loss=0.04014, over 8241.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2862, pruned_loss=0.06053, over 1614304.69 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:32:48,433 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.356e+02 2.929e+02 3.956e+02 6.697e+02, threshold=5.858e+02, percent-clipped=3.0 +2023-02-07 05:32:49,334 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:32:58,751 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:11,759 INFO [train.py:901] (2/4) Epoch 23, batch 4200, loss[loss=0.2081, simple_loss=0.2881, pruned_loss=0.06402, over 8297.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2874, pruned_loss=0.06113, over 1618405.46 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:14,747 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6673, 1.6293, 2.3046, 1.6149, 1.3274, 2.2218, 0.4213, 1.4370], + device='cuda:2'), covar=tensor([0.1670, 0.1252, 0.0270, 0.1024, 0.2444, 0.0387, 0.1908, 0.1339], + device='cuda:2'), in_proj_covar=tensor([0.0191, 0.0199, 0.0128, 0.0219, 0.0267, 0.0136, 0.0168, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:33:16,200 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:25,750 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 05:33:33,429 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:47,600 INFO [train.py:901] (2/4) Epoch 23, batch 4250, loss[loss=0.1898, simple_loss=0.2865, pruned_loss=0.0466, over 8294.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06087, over 1616674.50 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:49,041 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 05:34:01,344 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.416e+02 2.989e+02 3.588e+02 6.339e+02, threshold=5.979e+02, percent-clipped=2.0 +2023-02-07 05:34:22,740 INFO [train.py:901] (2/4) Epoch 23, batch 4300, loss[loss=0.2056, simple_loss=0.2931, pruned_loss=0.05904, over 8355.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2851, pruned_loss=0.06029, over 1613739.64 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:34:58,708 INFO [train.py:901] (2/4) Epoch 23, batch 4350, loss[loss=0.2199, simple_loss=0.3014, pruned_loss=0.06916, over 8461.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06066, over 1616832.73 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:13,006 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0042, 2.2025, 1.8619, 2.7335, 1.2540, 1.6452, 1.9476, 2.2215], + device='cuda:2'), covar=tensor([0.0674, 0.0736, 0.0822, 0.0341, 0.1132, 0.1234, 0.0819, 0.0750], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0199, 0.0246, 0.0215, 0.0208, 0.0249, 0.0252, 0.0210], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:35:13,488 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.346e+02 2.960e+02 3.931e+02 9.702e+02, threshold=5.919e+02, percent-clipped=9.0 +2023-02-07 05:35:21,958 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 05:35:34,678 INFO [train.py:901] (2/4) Epoch 23, batch 4400, loss[loss=0.1867, simple_loss=0.2706, pruned_loss=0.05138, over 8135.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2861, pruned_loss=0.06027, over 1616494.81 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:46,164 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5005, 1.8960, 2.9049, 1.3522, 2.1360, 1.8639, 1.5819, 2.2019], + device='cuda:2'), covar=tensor([0.1992, 0.2664, 0.0867, 0.4694, 0.1981, 0.3368, 0.2428, 0.2257], + device='cuda:2'), in_proj_covar=tensor([0.0525, 0.0607, 0.0554, 0.0647, 0.0645, 0.0596, 0.0539, 0.0630], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:36:03,255 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:05,073 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 05:36:11,441 INFO [train.py:901] (2/4) Epoch 23, batch 4450, loss[loss=0.1944, simple_loss=0.279, pruned_loss=0.05488, over 8244.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06021, over 1615502.18 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:20,472 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182288.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:26,033 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.606e+02 3.225e+02 4.349e+02 9.132e+02, threshold=6.449e+02, percent-clipped=7.0 +2023-02-07 05:36:28,977 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:47,060 INFO [train.py:901] (2/4) Epoch 23, batch 4500, loss[loss=0.2457, simple_loss=0.3327, pruned_loss=0.07937, over 8470.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2853, pruned_loss=0.06016, over 1612022.61 frames. ], batch size: 25, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:56,852 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 05:36:57,584 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182340.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:37:14,933 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1067, 1.7893, 2.3487, 1.9955, 2.2919, 2.1318, 1.9845, 1.1542], + device='cuda:2'), covar=tensor([0.5898, 0.5036, 0.2032, 0.3707, 0.2379, 0.3033, 0.1887, 0.5070], + device='cuda:2'), in_proj_covar=tensor([0.0949, 0.0996, 0.0816, 0.0957, 0.1002, 0.0905, 0.0757, 0.0834], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:37:22,880 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 05:37:23,620 INFO [train.py:901] (2/4) Epoch 23, batch 4550, loss[loss=0.2495, simple_loss=0.3258, pruned_loss=0.0866, over 8610.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2862, pruned_loss=0.06118, over 1613806.48 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:37:34,953 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7578, 2.0286, 1.7886, 2.6380, 1.1896, 1.5296, 1.8843, 2.1049], + device='cuda:2'), covar=tensor([0.0822, 0.0766, 0.0860, 0.0364, 0.1107, 0.1313, 0.0813, 0.0750], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0198, 0.0246, 0.0215, 0.0208, 0.0249, 0.0252, 0.0209], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:37:37,489 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.347e+02 2.810e+02 3.651e+02 9.685e+02, threshold=5.619e+02, percent-clipped=2.0 +2023-02-07 05:37:46,727 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.44 vs. limit=5.0 +2023-02-07 05:37:59,258 INFO [train.py:901] (2/4) Epoch 23, batch 4600, loss[loss=0.1943, simple_loss=0.2811, pruned_loss=0.05377, over 8375.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2858, pruned_loss=0.061, over 1611464.97 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:07,959 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5909, 2.1129, 3.1600, 1.4727, 2.4723, 2.0433, 1.6701, 2.3645], + device='cuda:2'), covar=tensor([0.1847, 0.2400, 0.0897, 0.4411, 0.1696, 0.3047, 0.2340, 0.2210], + device='cuda:2'), in_proj_covar=tensor([0.0523, 0.0605, 0.0550, 0.0643, 0.0641, 0.0590, 0.0536, 0.0626], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:38:20,353 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182455.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:38:26,828 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-07 05:38:34,856 INFO [train.py:901] (2/4) Epoch 23, batch 4650, loss[loss=0.1996, simple_loss=0.2811, pruned_loss=0.05904, over 8195.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2853, pruned_loss=0.06079, over 1609911.49 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:50,626 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.204e+02 2.647e+02 3.638e+02 6.712e+02, threshold=5.294e+02, percent-clipped=7.0 +2023-02-07 05:39:12,439 INFO [train.py:901] (2/4) Epoch 23, batch 4700, loss[loss=0.2008, simple_loss=0.2919, pruned_loss=0.05487, over 8700.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2846, pruned_loss=0.06049, over 1604273.37 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:39:17,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:39:47,030 INFO [train.py:901] (2/4) Epoch 23, batch 4750, loss[loss=0.1702, simple_loss=0.2498, pruned_loss=0.04528, over 7190.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2849, pruned_loss=0.06054, over 1604168.41 frames. ], batch size: 16, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:01,608 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.297e+02 2.902e+02 3.418e+02 7.225e+02, threshold=5.805e+02, percent-clipped=3.0 +2023-02-07 05:40:05,960 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 05:40:08,826 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 05:40:11,282 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.41 vs. limit=5.0 +2023-02-07 05:40:12,598 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3987, 1.6760, 1.7137, 1.1487, 1.7413, 1.4169, 0.3131, 1.6214], + device='cuda:2'), covar=tensor([0.0471, 0.0347, 0.0294, 0.0484, 0.0452, 0.0882, 0.0923, 0.0280], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0394, 0.0348, 0.0449, 0.0383, 0.0536, 0.0394, 0.0423], + device='cuda:2'), out_proj_covar=tensor([1.2220e-04, 1.0303e-04, 9.1370e-05, 1.1794e-04, 1.0090e-04, 1.5095e-04, + 1.0622e-04, 1.1179e-04], device='cuda:2') +2023-02-07 05:40:24,102 INFO [train.py:901] (2/4) Epoch 23, batch 4800, loss[loss=0.196, simple_loss=0.2849, pruned_loss=0.05353, over 8031.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2842, pruned_loss=0.06019, over 1606400.19 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:36,438 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:40:59,186 INFO [train.py:901] (2/4) Epoch 23, batch 4850, loss[loss=0.1826, simple_loss=0.2749, pruned_loss=0.04508, over 8304.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2838, pruned_loss=0.05966, over 1605118.89 frames. ], batch size: 23, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:00,602 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 05:41:00,763 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0389, 1.6146, 3.2428, 1.4376, 2.3082, 3.5836, 3.6965, 2.9611], + device='cuda:2'), covar=tensor([0.1203, 0.1801, 0.0422, 0.2287, 0.1123, 0.0279, 0.0695, 0.0685], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0320, 0.0286, 0.0315, 0.0311, 0.0268, 0.0423, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 05:41:13,247 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.288e+02 2.781e+02 3.814e+02 7.165e+02, threshold=5.562e+02, percent-clipped=4.0 +2023-02-07 05:41:17,124 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3944, 1.2709, 1.6561, 1.1557, 1.1066, 1.6362, 0.3009, 1.1131], + device='cuda:2'), covar=tensor([0.1502, 0.1264, 0.0455, 0.0969, 0.2413, 0.0522, 0.2233, 0.1371], + device='cuda:2'), in_proj_covar=tensor([0.0191, 0.0197, 0.0129, 0.0219, 0.0268, 0.0136, 0.0168, 0.0192], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:41:25,677 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182711.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:41:36,258 INFO [train.py:901] (2/4) Epoch 23, batch 4900, loss[loss=0.2204, simple_loss=0.302, pruned_loss=0.06941, over 8421.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2842, pruned_loss=0.05994, over 1608811.21 frames. ], batch size: 27, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:44,975 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:41:50,003 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3283, 2.0023, 2.6140, 2.1552, 2.5279, 2.3198, 2.1344, 1.3444], + device='cuda:2'), covar=tensor([0.5065, 0.4581, 0.1816, 0.3779, 0.2511, 0.3040, 0.1980, 0.5328], + device='cuda:2'), in_proj_covar=tensor([0.0938, 0.0985, 0.0806, 0.0946, 0.0993, 0.0896, 0.0750, 0.0827], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:42:00,366 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:09,168 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 05:42:12,967 INFO [train.py:901] (2/4) Epoch 23, batch 4950, loss[loss=0.2144, simple_loss=0.3, pruned_loss=0.06436, over 8345.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2831, pruned_loss=0.05937, over 1608738.00 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:42:27,043 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.423e+02 2.989e+02 3.745e+02 1.524e+03, threshold=5.977e+02, percent-clipped=7.0 +2023-02-07 05:42:27,228 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3810, 1.3488, 2.3195, 1.2333, 2.1868, 2.5144, 2.7029, 2.0469], + device='cuda:2'), covar=tensor([0.1302, 0.1517, 0.0465, 0.2234, 0.0803, 0.0394, 0.0579, 0.0784], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0318, 0.0285, 0.0314, 0.0310, 0.0267, 0.0421, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 05:42:33,127 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6897, 2.4828, 3.2309, 2.6490, 3.0901, 2.6207, 2.5693, 2.3491], + device='cuda:2'), covar=tensor([0.3905, 0.4222, 0.1604, 0.3059, 0.1928, 0.2423, 0.1415, 0.3950], + device='cuda:2'), in_proj_covar=tensor([0.0939, 0.0985, 0.0806, 0.0947, 0.0993, 0.0895, 0.0750, 0.0827], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:42:37,324 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2863, 2.6082, 2.9455, 1.7303, 3.2032, 1.9254, 1.5677, 2.1072], + device='cuda:2'), covar=tensor([0.0783, 0.0419, 0.0327, 0.0794, 0.0427, 0.0869, 0.0868, 0.0599], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0395, 0.0348, 0.0448, 0.0383, 0.0537, 0.0393, 0.0422], + device='cuda:2'), out_proj_covar=tensor([1.2140e-04, 1.0336e-04, 9.1409e-05, 1.1769e-04, 1.0070e-04, 1.5131e-04, + 1.0595e-04, 1.1145e-04], device='cuda:2') +2023-02-07 05:42:48,229 INFO [train.py:901] (2/4) Epoch 23, batch 5000, loss[loss=0.2006, simple_loss=0.2951, pruned_loss=0.05301, over 8245.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.283, pruned_loss=0.05907, over 1603250.85 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:25,219 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:25,722 INFO [train.py:901] (2/4) Epoch 23, batch 5050, loss[loss=0.2712, simple_loss=0.3367, pruned_loss=0.1029, over 8345.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2842, pruned_loss=0.06007, over 1610786.08 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:26,556 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:29,541 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:40,729 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.376e+02 2.932e+02 3.646e+02 6.966e+02, threshold=5.864e+02, percent-clipped=3.0 +2023-02-07 05:43:46,349 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 05:43:57,094 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7615, 1.8743, 1.6847, 2.3403, 1.1184, 1.4506, 1.7573, 1.8899], + device='cuda:2'), covar=tensor([0.0832, 0.0789, 0.0938, 0.0413, 0.1086, 0.1451, 0.0792, 0.0802], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0197, 0.0244, 0.0215, 0.0206, 0.0246, 0.0250, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 05:44:01,718 INFO [train.py:901] (2/4) Epoch 23, batch 5100, loss[loss=0.183, simple_loss=0.2723, pruned_loss=0.04689, over 8243.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2835, pruned_loss=0.0595, over 1612378.48 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:38,868 INFO [train.py:901] (2/4) Epoch 23, batch 5150, loss[loss=0.2169, simple_loss=0.2942, pruned_loss=0.06977, over 8032.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.06013, over 1612875.10 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:50,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:44:53,603 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 2.409e+02 2.843e+02 3.449e+02 6.604e+02, threshold=5.686e+02, percent-clipped=1.0 +2023-02-07 05:44:55,928 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5534, 1.4309, 1.6327, 1.3453, 0.8778, 1.4212, 1.4699, 1.2151], + device='cuda:2'), covar=tensor([0.0591, 0.1242, 0.1615, 0.1423, 0.0645, 0.1442, 0.0736, 0.0690], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 05:45:07,264 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183014.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:14,590 INFO [train.py:901] (2/4) Epoch 23, batch 5200, loss[loss=0.2255, simple_loss=0.3035, pruned_loss=0.07381, over 8246.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2855, pruned_loss=0.06033, over 1615587.14 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:45:20,497 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 05:45:24,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183039.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:46,615 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 05:45:50,592 INFO [train.py:901] (2/4) Epoch 23, batch 5250, loss[loss=0.1875, simple_loss=0.2826, pruned_loss=0.04615, over 8242.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2856, pruned_loss=0.06027, over 1615584.88 frames. ], batch size: 24, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:45:55,889 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 05:46:05,165 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.492e+02 2.942e+02 3.798e+02 7.403e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-07 05:46:27,065 INFO [train.py:901] (2/4) Epoch 23, batch 5300, loss[loss=0.2151, simple_loss=0.292, pruned_loss=0.06911, over 8696.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06099, over 1620350.43 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:46:38,061 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 05:46:48,959 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1050, 1.7761, 2.3051, 1.9479, 2.2633, 2.1089, 1.9155, 1.0723], + device='cuda:2'), covar=tensor([0.5409, 0.4755, 0.1992, 0.3782, 0.2397, 0.3079, 0.1993, 0.5116], + device='cuda:2'), in_proj_covar=tensor([0.0942, 0.0988, 0.0806, 0.0949, 0.0997, 0.0898, 0.0752, 0.0829], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:46:56,901 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6901, 2.1272, 3.4949, 1.8140, 1.5663, 3.4462, 0.5624, 2.0545], + device='cuda:2'), covar=tensor([0.1381, 0.1282, 0.0213, 0.1550, 0.2863, 0.0249, 0.2223, 0.1368], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0195, 0.0127, 0.0216, 0.0266, 0.0134, 0.0167, 0.0190], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:47:02,889 INFO [train.py:901] (2/4) Epoch 23, batch 5350, loss[loss=0.211, simple_loss=0.2907, pruned_loss=0.06566, over 8233.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2873, pruned_loss=0.06183, over 1620601.57 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:47:17,713 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.502e+02 3.193e+02 3.793e+02 7.809e+02, threshold=6.385e+02, percent-clipped=1.0 +2023-02-07 05:47:34,656 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:38,864 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:39,481 INFO [train.py:901] (2/4) Epoch 23, batch 5400, loss[loss=0.2217, simple_loss=0.3076, pruned_loss=0.06791, over 8031.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.287, pruned_loss=0.06092, over 1621505.07 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:47:55,899 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:10,988 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5217, 1.8938, 1.9308, 1.0617, 1.9382, 1.4667, 0.4418, 1.7638], + device='cuda:2'), covar=tensor([0.0615, 0.0351, 0.0319, 0.0618, 0.0444, 0.0924, 0.0938, 0.0307], + device='cuda:2'), in_proj_covar=tensor([0.0457, 0.0395, 0.0348, 0.0449, 0.0384, 0.0538, 0.0394, 0.0424], + device='cuda:2'), out_proj_covar=tensor([1.2197e-04, 1.0329e-04, 9.1275e-05, 1.1787e-04, 1.0107e-04, 1.5149e-04, + 1.0619e-04, 1.1176e-04], device='cuda:2') +2023-02-07 05:48:13,005 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:14,971 INFO [train.py:901] (2/4) Epoch 23, batch 5450, loss[loss=0.1783, simple_loss=0.2532, pruned_loss=0.05167, over 7440.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.06055, over 1616143.44 frames. ], batch size: 17, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:15,404 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 05:48:30,404 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.334e+02 2.819e+02 3.622e+02 6.725e+02, threshold=5.637e+02, percent-clipped=1.0 +2023-02-07 05:48:41,152 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 05:48:52,623 INFO [train.py:901] (2/4) Epoch 23, batch 5500, loss[loss=0.1812, simple_loss=0.2628, pruned_loss=0.04984, over 8199.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2863, pruned_loss=0.06093, over 1615132.09 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:54,993 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6540, 2.4659, 3.2600, 2.6571, 3.3117, 2.6414, 2.4631, 2.0712], + device='cuda:2'), covar=tensor([0.5354, 0.4966, 0.1925, 0.3813, 0.2375, 0.2927, 0.1752, 0.5324], + device='cuda:2'), in_proj_covar=tensor([0.0941, 0.0985, 0.0805, 0.0946, 0.0994, 0.0896, 0.0750, 0.0828], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:48:55,604 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8971, 1.5579, 1.7149, 1.4207, 1.0297, 1.5258, 1.7056, 1.4764], + device='cuda:2'), covar=tensor([0.0558, 0.1171, 0.1629, 0.1438, 0.0613, 0.1368, 0.0684, 0.0669], + device='cuda:2'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 05:48:55,691 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2807, 1.9841, 2.6199, 2.1638, 2.6141, 2.3084, 2.1044, 1.4343], + device='cuda:2'), covar=tensor([0.5155, 0.4742, 0.1934, 0.4003, 0.2368, 0.2974, 0.1886, 0.5340], + device='cuda:2'), in_proj_covar=tensor([0.0941, 0.0985, 0.0805, 0.0946, 0.0994, 0.0896, 0.0750, 0.0827], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 05:48:58,326 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183333.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:02,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:27,073 INFO [train.py:901] (2/4) Epoch 23, batch 5550, loss[loss=0.1926, simple_loss=0.2909, pruned_loss=0.04721, over 8197.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2853, pruned_loss=0.0604, over 1613367.81 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:49:41,517 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.428e+02 3.119e+02 4.010e+02 1.058e+03, threshold=6.238e+02, percent-clipped=9.0 +2023-02-07 05:49:46,633 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5987, 1.8827, 1.9561, 1.2079, 2.0790, 1.4804, 0.6591, 1.8646], + device='cuda:2'), covar=tensor([0.0635, 0.0371, 0.0270, 0.0564, 0.0417, 0.0835, 0.0877, 0.0281], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0397, 0.0350, 0.0450, 0.0385, 0.0539, 0.0395, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2231e-04, 1.0380e-04, 9.1876e-05, 1.1820e-04, 1.0126e-04, 1.5191e-04, + 1.0653e-04, 1.1211e-04], device='cuda:2') +2023-02-07 05:50:03,264 INFO [train.py:901] (2/4) Epoch 23, batch 5600, loss[loss=0.1676, simple_loss=0.2471, pruned_loss=0.04405, over 7527.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2853, pruned_loss=0.06062, over 1615564.68 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:04,084 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:50:39,035 INFO [train.py:901] (2/4) Epoch 23, batch 5650, loss[loss=0.1842, simple_loss=0.2631, pruned_loss=0.05269, over 7216.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2849, pruned_loss=0.0609, over 1611164.72 frames. ], batch size: 16, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:51,427 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 05:50:53,308 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.300e+02 3.084e+02 3.921e+02 7.530e+02, threshold=6.168e+02, percent-clipped=4.0 +2023-02-07 05:51:14,120 INFO [train.py:901] (2/4) Epoch 23, batch 5700, loss[loss=0.1743, simple_loss=0.2485, pruned_loss=0.05009, over 7690.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2865, pruned_loss=0.06093, over 1614978.65 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:50,329 INFO [train.py:901] (2/4) Epoch 23, batch 5750, loss[loss=0.1929, simple_loss=0.2751, pruned_loss=0.05531, over 8472.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2861, pruned_loss=0.06083, over 1614017.68 frames. ], batch size: 27, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:57,126 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 05:52:00,847 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183589.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:05,020 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:05,469 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.334e+02 3.030e+02 3.740e+02 1.347e+03, threshold=6.060e+02, percent-clipped=7.0 +2023-02-07 05:52:18,041 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:22,090 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183620.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:25,344 INFO [train.py:901] (2/4) Epoch 23, batch 5800, loss[loss=0.2103, simple_loss=0.295, pruned_loss=0.06287, over 8324.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2865, pruned_loss=0.06115, over 1619618.60 frames. ], batch size: 25, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:52:30,172 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:53:01,085 INFO [train.py:901] (2/4) Epoch 23, batch 5850, loss[loss=0.2477, simple_loss=0.3185, pruned_loss=0.08843, over 8665.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06101, over 1619957.62 frames. ], batch size: 50, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:16,206 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 2.367e+02 2.798e+02 3.640e+02 5.951e+02, threshold=5.597e+02, percent-clipped=0.0 +2023-02-07 05:53:36,770 INFO [train.py:901] (2/4) Epoch 23, batch 5900, loss[loss=0.1735, simple_loss=0.2507, pruned_loss=0.04814, over 7696.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.286, pruned_loss=0.06053, over 1620568.86 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:58,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9769, 1.4950, 3.1171, 1.5515, 2.8192, 2.6354, 2.8440, 2.7755], + device='cuda:2'), covar=tensor([0.0747, 0.3291, 0.0794, 0.3531, 0.0899, 0.0891, 0.0599, 0.0660], + device='cuda:2'), in_proj_covar=tensor([0.0643, 0.0650, 0.0705, 0.0637, 0.0716, 0.0614, 0.0612, 0.0690], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:54:08,422 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183770.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:54:11,753 INFO [train.py:901] (2/4) Epoch 23, batch 5950, loss[loss=0.1995, simple_loss=0.281, pruned_loss=0.05903, over 7652.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2864, pruned_loss=0.06064, over 1616830.98 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:24,493 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5852, 1.4839, 1.7633, 1.3545, 0.8418, 1.5070, 1.4709, 1.4989], + device='cuda:2'), covar=tensor([0.0562, 0.1188, 0.1604, 0.1449, 0.0585, 0.1405, 0.0680, 0.0630], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:2') +2023-02-07 05:54:27,017 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 2.784e+02 3.423e+02 5.836e+02, threshold=5.567e+02, percent-clipped=2.0 +2023-02-07 05:54:39,709 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 05:54:47,607 INFO [train.py:901] (2/4) Epoch 23, batch 6000, loss[loss=0.1723, simple_loss=0.2501, pruned_loss=0.04729, over 7930.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06139, over 1619570.59 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:47,607 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 05:55:00,697 INFO [train.py:935] (2/4) Epoch 23, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03597, over 944034.00 frames. +2023-02-07 05:55:00,698 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 05:55:25,818 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:29,284 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8667, 3.8200, 3.4687, 2.0289, 3.4191, 3.3898, 3.3623, 3.3228], + device='cuda:2'), covar=tensor([0.0957, 0.0695, 0.1241, 0.4487, 0.1099, 0.1268, 0.1587, 0.1060], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0444, 0.0431, 0.0540, 0.0431, 0.0447, 0.0430, 0.0387], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:55:36,130 INFO [train.py:901] (2/4) Epoch 23, batch 6050, loss[loss=0.1793, simple_loss=0.2517, pruned_loss=0.05345, over 7664.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06154, over 1616022.66 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:55:43,224 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:50,617 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.465e+02 3.097e+02 3.782e+02 8.398e+02, threshold=6.194e+02, percent-clipped=6.0 +2023-02-07 05:56:11,859 INFO [train.py:901] (2/4) Epoch 23, batch 6100, loss[loss=0.2262, simple_loss=0.3021, pruned_loss=0.0752, over 8607.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06199, over 1617158.85 frames. ], batch size: 31, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:32,460 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 05:56:47,365 INFO [train.py:901] (2/4) Epoch 23, batch 6150, loss[loss=0.2213, simple_loss=0.3062, pruned_loss=0.06821, over 8661.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.287, pruned_loss=0.0622, over 1614107.50 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:48,166 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:57:01,785 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.512e+02 2.876e+02 3.577e+02 6.799e+02, threshold=5.752e+02, percent-clipped=2.0 +2023-02-07 05:57:12,086 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0296, 2.2777, 3.7487, 1.9503, 1.8434, 3.6978, 0.8731, 2.2737], + device='cuda:2'), covar=tensor([0.1200, 0.1459, 0.0154, 0.1741, 0.2593, 0.0178, 0.1938, 0.1197], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0196, 0.0128, 0.0219, 0.0268, 0.0135, 0.0168, 0.0190], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 05:57:17,560 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8308, 3.8009, 3.4499, 1.8693, 3.4114, 3.4285, 3.4010, 3.2925], + device='cuda:2'), covar=tensor([0.0911, 0.0677, 0.1163, 0.4676, 0.0939, 0.1503, 0.1400, 0.1010], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0447, 0.0433, 0.0543, 0.0432, 0.0450, 0.0432, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:57:22,985 INFO [train.py:901] (2/4) Epoch 23, batch 6200, loss[loss=0.2291, simple_loss=0.3064, pruned_loss=0.07585, over 8643.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06167, over 1618239.23 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:57:54,993 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:57:59,614 INFO [train.py:901] (2/4) Epoch 23, batch 6250, loss[loss=0.1848, simple_loss=0.2721, pruned_loss=0.04876, over 8296.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2852, pruned_loss=0.06056, over 1612546.25 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:07,975 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0524, 1.7413, 6.2857, 2.2400, 5.6489, 5.2744, 5.7711, 5.6873], + device='cuda:2'), covar=tensor([0.0423, 0.4770, 0.0254, 0.3788, 0.0794, 0.0819, 0.0472, 0.0482], + device='cuda:2'), in_proj_covar=tensor([0.0644, 0.0651, 0.0707, 0.0641, 0.0719, 0.0616, 0.0617, 0.0693], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 05:58:11,448 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:58:14,638 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.340e+02 2.866e+02 3.425e+02 5.984e+02, threshold=5.731e+02, percent-clipped=3.0 +2023-02-07 05:58:34,471 INFO [train.py:901] (2/4) Epoch 23, batch 6300, loss[loss=0.1679, simple_loss=0.2468, pruned_loss=0.04452, over 7432.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2852, pruned_loss=0.06026, over 1614005.97 frames. ], batch size: 17, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:45,745 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:04,550 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:10,598 INFO [train.py:901] (2/4) Epoch 23, batch 6350, loss[loss=0.1966, simple_loss=0.2742, pruned_loss=0.0595, over 7529.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2845, pruned_loss=0.05968, over 1616991.19 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 05:59:25,790 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.298e+02 2.703e+02 3.593e+02 9.198e+02, threshold=5.406e+02, percent-clipped=6.0 +2023-02-07 05:59:28,534 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 05:59:32,342 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184204.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:46,844 INFO [train.py:901] (2/4) Epoch 23, batch 6400, loss[loss=0.1614, simple_loss=0.2476, pruned_loss=0.03757, over 8235.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.285, pruned_loss=0.06029, over 1615628.58 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:22,071 INFO [train.py:901] (2/4) Epoch 23, batch 6450, loss[loss=0.2487, simple_loss=0.3279, pruned_loss=0.08473, over 8689.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2862, pruned_loss=0.06098, over 1615934.34 frames. ], batch size: 34, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:37,219 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.429e+02 3.055e+02 3.904e+02 7.071e+02, threshold=6.109e+02, percent-clipped=5.0 +2023-02-07 06:00:54,549 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:00:58,474 INFO [train.py:901] (2/4) Epoch 23, batch 6500, loss[loss=0.1782, simple_loss=0.2657, pruned_loss=0.04538, over 7801.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06106, over 1614467.17 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:13,631 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:30,746 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:32,660 INFO [train.py:901] (2/4) Epoch 23, batch 6550, loss[loss=0.1895, simple_loss=0.2674, pruned_loss=0.0558, over 7977.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2863, pruned_loss=0.06048, over 1615820.08 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:34,222 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:48,099 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.251e+02 2.720e+02 3.518e+02 7.175e+02, threshold=5.440e+02, percent-clipped=6.0 +2023-02-07 06:01:51,603 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 06:02:00,104 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:02:07,283 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2395, 2.5927, 2.8996, 1.6258, 3.2686, 2.0044, 1.5724, 2.1696], + device='cuda:2'), covar=tensor([0.0927, 0.0405, 0.0306, 0.0849, 0.0453, 0.0899, 0.1017, 0.0617], + device='cuda:2'), in_proj_covar=tensor([0.0455, 0.0396, 0.0349, 0.0448, 0.0381, 0.0536, 0.0392, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2144e-04, 1.0353e-04, 9.1555e-05, 1.1783e-04, 1.0023e-04, 1.5082e-04, + 1.0563e-04, 1.1218e-04], device='cuda:2') +2023-02-07 06:02:09,827 INFO [train.py:901] (2/4) Epoch 23, batch 6600, loss[loss=0.1778, simple_loss=0.2596, pruned_loss=0.04804, over 7302.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2857, pruned_loss=0.06034, over 1614463.40 frames. ], batch size: 16, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:09,865 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 06:02:20,790 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 06:02:31,490 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7136, 1.6823, 2.1224, 1.7040, 1.0821, 1.8388, 2.1958, 2.1263], + device='cuda:2'), covar=tensor([0.0489, 0.1177, 0.1556, 0.1385, 0.0546, 0.1289, 0.0618, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0101, 0.0163, 0.0111, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:02:45,210 INFO [train.py:901] (2/4) Epoch 23, batch 6650, loss[loss=0.2248, simple_loss=0.3134, pruned_loss=0.06814, over 8459.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2865, pruned_loss=0.06049, over 1615842.78 frames. ], batch size: 27, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:00,392 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.187e+02 2.636e+02 3.150e+02 7.164e+02, threshold=5.273e+02, percent-clipped=1.0 +2023-02-07 06:03:06,026 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:21,187 INFO [train.py:901] (2/4) Epoch 23, batch 6700, loss[loss=0.2161, simple_loss=0.3029, pruned_loss=0.0646, over 8037.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2866, pruned_loss=0.06055, over 1616372.10 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:22,782 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:56,972 INFO [train.py:901] (2/4) Epoch 23, batch 6750, loss[loss=0.2358, simple_loss=0.3109, pruned_loss=0.08036, over 6791.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2857, pruned_loss=0.06019, over 1613056.60 frames. ], batch size: 72, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:57,212 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184575.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:11,515 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.299e+02 2.705e+02 3.689e+02 1.087e+03, threshold=5.410e+02, percent-clipped=6.0 +2023-02-07 06:04:14,467 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:30,946 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 06:04:32,277 INFO [train.py:901] (2/4) Epoch 23, batch 6800, loss[loss=0.2256, simple_loss=0.2996, pruned_loss=0.07579, over 7640.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.05988, over 1615565.76 frames. ], batch size: 19, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:04:55,642 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 06:05:08,933 INFO [train.py:901] (2/4) Epoch 23, batch 6850, loss[loss=0.1682, simple_loss=0.2398, pruned_loss=0.04832, over 6819.00 frames. ], tot_loss[loss=0.203, simple_loss=0.286, pruned_loss=0.05998, over 1614776.62 frames. ], batch size: 15, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:05:19,332 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 06:05:23,524 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.645e+02 3.100e+02 4.179e+02 7.238e+02, threshold=6.201e+02, percent-clipped=8.0 +2023-02-07 06:05:40,732 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184721.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:05:43,468 INFO [train.py:901] (2/4) Epoch 23, batch 6900, loss[loss=0.1886, simple_loss=0.27, pruned_loss=0.05357, over 7917.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06016, over 1613818.52 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:21,332 INFO [train.py:901] (2/4) Epoch 23, batch 6950, loss[loss=0.1692, simple_loss=0.2423, pruned_loss=0.04804, over 7713.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2852, pruned_loss=0.05982, over 1615612.31 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:27,133 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:29,044 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 06:06:35,999 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.334e+02 2.863e+02 3.573e+02 6.345e+02, threshold=5.727e+02, percent-clipped=1.0 +2023-02-07 06:06:38,089 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4801, 4.4775, 4.0595, 2.0947, 4.0187, 4.0162, 3.9965, 3.9035], + device='cuda:2'), covar=tensor([0.0683, 0.0502, 0.0926, 0.3955, 0.0842, 0.0939, 0.1300, 0.0792], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0447, 0.0431, 0.0544, 0.0434, 0.0448, 0.0430, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:06:44,107 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.36 vs. limit=5.0 +2023-02-07 06:06:44,536 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184808.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:56,217 INFO [train.py:901] (2/4) Epoch 23, batch 7000, loss[loss=0.1937, simple_loss=0.287, pruned_loss=0.05024, over 8541.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2846, pruned_loss=0.05965, over 1608823.07 frames. ], batch size: 28, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:03,957 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:12,025 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:32,115 INFO [train.py:901] (2/4) Epoch 23, batch 7050, loss[loss=0.2098, simple_loss=0.2932, pruned_loss=0.06319, over 8456.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2847, pruned_loss=0.05951, over 1609332.60 frames. ], batch size: 27, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:38,586 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:48,061 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.247e+02 2.854e+02 3.580e+02 1.056e+03, threshold=5.709e+02, percent-clipped=4.0 +2023-02-07 06:07:53,398 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.47 vs. limit=5.0 +2023-02-07 06:08:08,157 INFO [train.py:901] (2/4) Epoch 23, batch 7100, loss[loss=0.1995, simple_loss=0.2867, pruned_loss=0.05615, over 8035.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2851, pruned_loss=0.06007, over 1608170.97 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:30,174 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184957.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:34,373 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:42,473 INFO [train.py:901] (2/4) Epoch 23, batch 7150, loss[loss=0.1926, simple_loss=0.2888, pruned_loss=0.04819, over 8333.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2843, pruned_loss=0.05954, over 1605696.14 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:53,467 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0924, 1.8615, 2.3540, 2.0355, 2.3080, 2.1573, 1.9436, 1.1697], + device='cuda:2'), covar=tensor([0.5486, 0.4703, 0.1891, 0.3330, 0.2369, 0.2896, 0.1840, 0.4910], + device='cuda:2'), in_proj_covar=tensor([0.0942, 0.0991, 0.0806, 0.0950, 0.0997, 0.0896, 0.0753, 0.0829], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 06:08:58,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.323e+02 2.664e+02 3.243e+02 7.163e+02, threshold=5.329e+02, percent-clipped=2.0 +2023-02-07 06:09:20,376 INFO [train.py:901] (2/4) Epoch 23, batch 7200, loss[loss=0.2521, simple_loss=0.3213, pruned_loss=0.09144, over 8625.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2837, pruned_loss=0.05909, over 1607931.81 frames. ], batch size: 49, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:29,466 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7513, 1.4901, 1.6486, 1.3912, 0.8567, 1.4341, 1.5164, 1.3472], + device='cuda:2'), covar=tensor([0.0564, 0.1304, 0.1689, 0.1474, 0.0627, 0.1511, 0.0763, 0.0700], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:09:35,037 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9701, 1.3880, 1.6884, 1.3201, 0.9280, 1.4803, 1.7966, 1.4931], + device='cuda:2'), covar=tensor([0.0577, 0.1613, 0.2235, 0.1792, 0.0689, 0.1866, 0.0732, 0.0762], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:09:35,780 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7634, 1.6612, 2.3517, 1.4459, 1.2484, 2.3578, 0.4156, 1.4187], + device='cuda:2'), covar=tensor([0.1661, 0.1215, 0.0298, 0.1324, 0.2652, 0.0302, 0.2275, 0.1448], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0195, 0.0129, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:09:44,874 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5357, 1.3851, 1.6045, 1.2651, 0.9883, 1.4070, 1.5743, 1.2087], + device='cuda:2'), covar=tensor([0.0614, 0.1282, 0.1742, 0.1544, 0.0588, 0.1487, 0.0704, 0.0729], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:09:44,913 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4796, 1.4334, 1.8591, 1.2311, 1.0974, 1.7826, 0.2104, 1.1575], + device='cuda:2'), covar=tensor([0.1755, 0.1208, 0.0363, 0.1108, 0.2718, 0.0468, 0.2068, 0.1374], + device='cuda:2'), in_proj_covar=tensor([0.0190, 0.0196, 0.0129, 0.0220, 0.0268, 0.0136, 0.0169, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:09:54,644 INFO [train.py:901] (2/4) Epoch 23, batch 7250, loss[loss=0.2277, simple_loss=0.3021, pruned_loss=0.07669, over 7920.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2845, pruned_loss=0.0601, over 1605216.65 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:56,898 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:06,386 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:09,712 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.341e+02 2.701e+02 3.625e+02 6.528e+02, threshold=5.401e+02, percent-clipped=8.0 +2023-02-07 06:10:25,004 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:30,397 INFO [train.py:901] (2/4) Epoch 23, batch 7300, loss[loss=0.191, simple_loss=0.2707, pruned_loss=0.05563, over 7976.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2853, pruned_loss=0.06048, over 1605403.46 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:10:45,740 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1198, 1.9046, 2.4645, 2.0855, 2.3844, 2.2046, 1.9835, 1.2334], + device='cuda:2'), covar=tensor([0.5315, 0.4867, 0.1933, 0.3452, 0.2456, 0.2920, 0.1937, 0.5367], + device='cuda:2'), in_proj_covar=tensor([0.0939, 0.0986, 0.0805, 0.0948, 0.0995, 0.0896, 0.0751, 0.0826], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 06:10:46,969 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:04,728 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.51 vs. limit=5.0 +2023-02-07 06:11:06,505 INFO [train.py:901] (2/4) Epoch 23, batch 7350, loss[loss=0.2023, simple_loss=0.2718, pruned_loss=0.06644, over 8255.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2858, pruned_loss=0.06057, over 1609561.03 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:11:19,715 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 06:11:21,069 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.380e+02 2.863e+02 3.556e+02 7.708e+02, threshold=5.726e+02, percent-clipped=6.0 +2023-02-07 06:11:38,649 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5337, 1.5096, 1.8799, 1.3429, 1.2771, 1.8240, 0.1964, 1.2833], + device='cuda:2'), covar=tensor([0.1340, 0.1073, 0.0344, 0.0876, 0.2229, 0.0406, 0.1718, 0.1052], + device='cuda:2'), in_proj_covar=tensor([0.0189, 0.0195, 0.0128, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:11:38,653 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:41,244 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 06:11:42,632 INFO [train.py:901] (2/4) Epoch 23, batch 7400, loss[loss=0.1903, simple_loss=0.2867, pruned_loss=0.04695, over 8329.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.06056, over 1616312.26 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:11:44,819 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185228.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:48,325 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7765, 1.6086, 2.8583, 1.3821, 2.0483, 3.0875, 3.2663, 2.5665], + device='cuda:2'), covar=tensor([0.1124, 0.1554, 0.0386, 0.2129, 0.0998, 0.0290, 0.0546, 0.0599], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0322, 0.0287, 0.0315, 0.0314, 0.0269, 0.0424, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:11:56,745 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:04,426 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 06:12:18,684 INFO [train.py:901] (2/4) Epoch 23, batch 7450, loss[loss=0.203, simple_loss=0.2866, pruned_loss=0.05968, over 8490.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2868, pruned_loss=0.06091, over 1614526.05 frames. ], batch size: 39, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:12:18,947 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4040, 1.6277, 2.1279, 1.2814, 1.5338, 1.6714, 1.4783, 1.5446], + device='cuda:2'), covar=tensor([0.2010, 0.2634, 0.1007, 0.4843, 0.2063, 0.3491, 0.2475, 0.2294], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0611, 0.0554, 0.0646, 0.0647, 0.0595, 0.0541, 0.0632], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:12:21,577 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 06:12:33,471 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.310e+02 2.954e+02 3.827e+02 6.869e+02, threshold=5.908e+02, percent-clipped=4.0 +2023-02-07 06:12:37,081 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:53,822 INFO [train.py:901] (2/4) Epoch 23, batch 7500, loss[loss=0.1764, simple_loss=0.2458, pruned_loss=0.05345, over 7197.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06104, over 1613700.17 frames. ], batch size: 16, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:05,840 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 06:13:08,273 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185343.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:13:22,330 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3952, 1.5143, 1.2689, 1.7268, 1.0219, 1.1931, 1.4427, 1.5248], + device='cuda:2'), covar=tensor([0.0670, 0.0620, 0.0831, 0.0570, 0.0959, 0.1104, 0.0598, 0.0580], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0199, 0.0244, 0.0214, 0.0206, 0.0247, 0.0250, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 06:13:31,746 INFO [train.py:901] (2/4) Epoch 23, batch 7550, loss[loss=0.1692, simple_loss=0.2604, pruned_loss=0.03902, over 8104.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2857, pruned_loss=0.06056, over 1606816.08 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:39,223 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 06:13:46,072 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.461e+02 3.059e+02 3.860e+02 7.244e+02, threshold=6.118e+02, percent-clipped=3.0 +2023-02-07 06:14:00,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:04,573 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:06,593 INFO [train.py:901] (2/4) Epoch 23, batch 7600, loss[loss=0.1978, simple_loss=0.2842, pruned_loss=0.05564, over 8499.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.284, pruned_loss=0.0599, over 1604701.71 frames. ], batch size: 26, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:09,580 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-07 06:14:41,949 INFO [train.py:901] (2/4) Epoch 23, batch 7650, loss[loss=0.2329, simple_loss=0.3031, pruned_loss=0.08135, over 8036.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06084, over 1614338.39 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:50,192 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:54,420 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:57,809 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.478e+02 3.100e+02 3.999e+02 8.387e+02, threshold=6.200e+02, percent-clipped=6.0 +2023-02-07 06:15:17,431 INFO [train.py:901] (2/4) Epoch 23, batch 7700, loss[loss=0.2541, simple_loss=0.3279, pruned_loss=0.09018, over 8288.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06146, over 1614106.07 frames. ], batch size: 49, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:15:25,721 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:15:37,375 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 06:15:53,096 INFO [train.py:901] (2/4) Epoch 23, batch 7750, loss[loss=0.1985, simple_loss=0.2856, pruned_loss=0.05565, over 7782.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.287, pruned_loss=0.0614, over 1616566.45 frames. ], batch size: 19, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:16:08,172 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.336e+02 2.905e+02 3.607e+02 6.527e+02, threshold=5.810e+02, percent-clipped=2.0 +2023-02-07 06:16:10,502 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:15,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,336 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,832 INFO [train.py:901] (2/4) Epoch 23, batch 7800, loss[loss=0.1598, simple_loss=0.2443, pruned_loss=0.03762, over 7444.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06151, over 1613280.84 frames. ], batch size: 17, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:01,064 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185672.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:02,901 INFO [train.py:901] (2/4) Epoch 23, batch 7850, loss[loss=0.2218, simple_loss=0.3063, pruned_loss=0.0686, over 8628.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2864, pruned_loss=0.06111, over 1611019.39 frames. ], batch size: 34, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:17,286 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 2.983e+02 3.607e+02 9.941e+02, threshold=5.966e+02, percent-clipped=5.0 +2023-02-07 06:17:18,198 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185697.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:37,217 INFO [train.py:901] (2/4) Epoch 23, batch 7900, loss[loss=0.1964, simple_loss=0.2729, pruned_loss=0.05995, over 8192.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2855, pruned_loss=0.06057, over 1612067.09 frames. ], batch size: 23, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:00,628 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 06:18:11,083 INFO [train.py:901] (2/4) Epoch 23, batch 7950, loss[loss=0.1761, simple_loss=0.2582, pruned_loss=0.04702, over 7808.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2841, pruned_loss=0.05995, over 1610198.81 frames. ], batch size: 20, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:12,605 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185777.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:12,818 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 06:18:23,328 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:25,078 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.254e+02 2.775e+02 3.427e+02 8.244e+02, threshold=5.550e+02, percent-clipped=2.0 +2023-02-07 06:18:39,405 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185817.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 06:18:40,117 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:44,661 INFO [train.py:901] (2/4) Epoch 23, batch 8000, loss[loss=0.2245, simple_loss=0.3181, pruned_loss=0.06544, over 8235.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2862, pruned_loss=0.06114, over 1612442.77 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:48,037 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:09,878 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:18,259 INFO [train.py:901] (2/4) Epoch 23, batch 8050, loss[loss=0.2507, simple_loss=0.3226, pruned_loss=0.08935, over 7038.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2856, pruned_loss=0.06183, over 1595740.69 frames. ], batch size: 71, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:19:26,749 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:32,783 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.662e+02 3.318e+02 4.159e+02 9.358e+02, threshold=6.635e+02, percent-clipped=7.0 +2023-02-07 06:19:51,747 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 06:19:57,763 INFO [train.py:901] (2/4) Epoch 24, batch 0, loss[loss=0.2107, simple_loss=0.2965, pruned_loss=0.06246, over 8493.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2965, pruned_loss=0.06246, over 8493.00 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:19:57,763 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 06:20:01,675 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6433, 1.4850, 1.7678, 1.4545, 0.9422, 1.5211, 1.6903, 1.3955], + device='cuda:2'), covar=tensor([0.0666, 0.1352, 0.1749, 0.1499, 0.0646, 0.1577, 0.0690, 0.0692], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:20:09,066 INFO [train.py:935] (2/4) Epoch 24, validation: loss=0.1731, simple_loss=0.2733, pruned_loss=0.03644, over 944034.00 frames. +2023-02-07 06:20:09,067 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 06:20:23,915 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 06:20:35,543 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:20:44,033 INFO [train.py:901] (2/4) Epoch 24, batch 50, loss[loss=0.1671, simple_loss=0.2434, pruned_loss=0.04544, over 7715.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2855, pruned_loss=0.06133, over 361757.31 frames. ], batch size: 18, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:20:57,559 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 06:21:11,398 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.437e+02 2.851e+02 3.663e+02 1.155e+03, threshold=5.702e+02, percent-clipped=3.0 +2023-02-07 06:21:20,557 INFO [train.py:901] (2/4) Epoch 24, batch 100, loss[loss=0.2487, simple_loss=0.3292, pruned_loss=0.0841, over 8327.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2902, pruned_loss=0.06372, over 641672.30 frames. ], batch size: 25, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:21:22,593 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 06:21:41,718 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5731, 1.8119, 5.7772, 2.2746, 5.2472, 4.8570, 5.3425, 5.2483], + device='cuda:2'), covar=tensor([0.0579, 0.4640, 0.0366, 0.3822, 0.0923, 0.0895, 0.0504, 0.0567], + device='cuda:2'), in_proj_covar=tensor([0.0645, 0.0655, 0.0708, 0.0641, 0.0720, 0.0620, 0.0615, 0.0690], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:21:56,096 INFO [train.py:901] (2/4) Epoch 24, batch 150, loss[loss=0.1656, simple_loss=0.2426, pruned_loss=0.04425, over 8088.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.06199, over 855920.89 frames. ], batch size: 21, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:00,573 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.72 vs. limit=5.0 +2023-02-07 06:22:03,342 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 06:22:07,731 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0536, 1.2177, 1.2235, 0.7578, 1.2045, 1.0907, 0.1289, 1.2163], + device='cuda:2'), covar=tensor([0.0434, 0.0389, 0.0365, 0.0549, 0.0426, 0.0943, 0.0851, 0.0322], + device='cuda:2'), in_proj_covar=tensor([0.0462, 0.0402, 0.0354, 0.0455, 0.0385, 0.0545, 0.0398, 0.0428], + device='cuda:2'), out_proj_covar=tensor([1.2331e-04, 1.0515e-04, 9.2994e-05, 1.1942e-04, 1.0126e-04, 1.5330e-04, + 1.0710e-04, 1.1294e-04], device='cuda:2') +2023-02-07 06:22:21,896 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.384e+02 2.880e+02 3.401e+02 7.597e+02, threshold=5.761e+02, percent-clipped=1.0 +2023-02-07 06:22:30,261 INFO [train.py:901] (2/4) Epoch 24, batch 200, loss[loss=0.1782, simple_loss=0.2676, pruned_loss=0.0444, over 8357.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2866, pruned_loss=0.06106, over 1024272.48 frames. ], batch size: 24, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:35,413 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8509, 1.4354, 3.5045, 1.5315, 2.4010, 3.8034, 3.9024, 3.3002], + device='cuda:2'), covar=tensor([0.1268, 0.1914, 0.0280, 0.2050, 0.1060, 0.0220, 0.0430, 0.0526], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0317, 0.0314, 0.0269, 0.0426, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:22:38,159 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:22:40,033 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:01,369 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-07 06:23:05,571 INFO [train.py:901] (2/4) Epoch 24, batch 250, loss[loss=0.2487, simple_loss=0.3252, pruned_loss=0.08609, over 6672.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2856, pruned_loss=0.061, over 1149499.44 frames. ], batch size: 72, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:07,697 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186161.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:23:16,544 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 06:23:18,802 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186176.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:25,610 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 06:23:32,345 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.402e+02 3.098e+02 3.972e+02 8.418e+02, threshold=6.197e+02, percent-clipped=5.0 +2023-02-07 06:23:36,037 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186201.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:40,506 INFO [train.py:901] (2/4) Epoch 24, batch 300, loss[loss=0.1975, simple_loss=0.2753, pruned_loss=0.05985, over 7811.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.0615, over 1254186.77 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:53,002 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,553 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,861 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 06:24:13,913 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9836, 3.8039, 2.2423, 2.7787, 2.6553, 2.1931, 2.7851, 3.0198], + device='cuda:2'), covar=tensor([0.1528, 0.0296, 0.1178, 0.0754, 0.0814, 0.1351, 0.0977, 0.0933], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0235, 0.0337, 0.0311, 0.0303, 0.0342, 0.0350, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:24:15,084 INFO [train.py:901] (2/4) Epoch 24, batch 350, loss[loss=0.2768, simple_loss=0.3453, pruned_loss=0.1042, over 7382.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2873, pruned_loss=0.06129, over 1335754.39 frames. ], batch size: 71, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:24:24,625 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2036, 4.1639, 3.7892, 2.2002, 3.7543, 3.8464, 3.7040, 3.7269], + device='cuda:2'), covar=tensor([0.0763, 0.0577, 0.1098, 0.3984, 0.0892, 0.0777, 0.1253, 0.0764], + device='cuda:2'), in_proj_covar=tensor([0.0526, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:24:28,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186276.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 06:24:28,716 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:42,184 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.428e+02 2.971e+02 3.348e+02 5.777e+02, threshold=5.941e+02, percent-clipped=0.0 +2023-02-07 06:24:47,709 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5924, 4.6469, 4.1602, 2.1649, 4.0607, 4.1984, 4.1402, 4.0573], + device='cuda:2'), covar=tensor([0.0701, 0.0501, 0.1062, 0.4241, 0.0881, 0.0774, 0.1203, 0.0688], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:24:50,342 INFO [train.py:901] (2/4) Epoch 24, batch 400, loss[loss=0.2355, simple_loss=0.3229, pruned_loss=0.07408, over 8328.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2868, pruned_loss=0.06082, over 1398145.30 frames. ], batch size: 25, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:26,090 INFO [train.py:901] (2/4) Epoch 24, batch 450, loss[loss=0.1988, simple_loss=0.2984, pruned_loss=0.04955, over 8186.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2881, pruned_loss=0.0607, over 1452321.61 frames. ], batch size: 23, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:52,928 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.487e+02 2.919e+02 3.580e+02 7.824e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-07 06:26:02,021 INFO [train.py:901] (2/4) Epoch 24, batch 500, loss[loss=0.2054, simple_loss=0.2916, pruned_loss=0.05966, over 8300.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2868, pruned_loss=0.05997, over 1490712.31 frames. ], batch size: 23, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:23,360 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:37,824 INFO [train.py:901] (2/4) Epoch 24, batch 550, loss[loss=0.1868, simple_loss=0.2677, pruned_loss=0.05299, over 8187.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2873, pruned_loss=0.06089, over 1513674.72 frames. ], batch size: 23, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:40,776 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:55,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7725, 1.9312, 2.0812, 1.3773, 2.1781, 1.5459, 0.6655, 1.9788], + device='cuda:2'), covar=tensor([0.0681, 0.0407, 0.0366, 0.0716, 0.0499, 0.1014, 0.0991, 0.0345], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0399, 0.0351, 0.0449, 0.0382, 0.0539, 0.0393, 0.0426], + device='cuda:2'), out_proj_covar=tensor([1.2231e-04, 1.0434e-04, 9.2115e-05, 1.1794e-04, 1.0032e-04, 1.5167e-04, + 1.0568e-04, 1.1243e-04], device='cuda:2') +2023-02-07 06:27:01,113 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:01,713 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:03,582 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.350e+02 3.005e+02 3.846e+02 7.955e+02, threshold=6.011e+02, percent-clipped=1.0 +2023-02-07 06:27:12,601 INFO [train.py:901] (2/4) Epoch 24, batch 600, loss[loss=0.1921, simple_loss=0.2725, pruned_loss=0.05584, over 8086.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2868, pruned_loss=0.0605, over 1536958.39 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 16.0 +2023-02-07 06:27:19,641 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:21,528 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186520.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:23,346 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.51 vs. limit=5.0 +2023-02-07 06:27:26,201 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 06:27:29,792 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186532.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:31,735 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:40,300 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7250, 1.4761, 3.1159, 1.4138, 2.2279, 3.3597, 3.4458, 2.8447], + device='cuda:2'), covar=tensor([0.1270, 0.1727, 0.0320, 0.2114, 0.0981, 0.0263, 0.0588, 0.0559], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0314, 0.0312, 0.0268, 0.0424, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:27:46,469 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186557.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:46,539 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186557.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:47,019 INFO [train.py:901] (2/4) Epoch 24, batch 650, loss[loss=0.2481, simple_loss=0.3329, pruned_loss=0.08165, over 8518.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2876, pruned_loss=0.06119, over 1551600.91 frames. ], batch size: 49, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:27:52,098 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1436, 1.8969, 3.3329, 1.6777, 2.5484, 3.7436, 3.6957, 3.2601], + device='cuda:2'), covar=tensor([0.1115, 0.1649, 0.0390, 0.1985, 0.1197, 0.0212, 0.0618, 0.0517], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0321, 0.0285, 0.0314, 0.0311, 0.0268, 0.0423, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:28:01,233 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:07,510 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:13,143 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7673, 1.4464, 2.8190, 1.3443, 2.2068, 2.9815, 3.1868, 2.5805], + device='cuda:2'), covar=tensor([0.1133, 0.1704, 0.0368, 0.2206, 0.0930, 0.0316, 0.0560, 0.0582], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0320, 0.0284, 0.0312, 0.0310, 0.0267, 0.0422, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:28:15,659 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.377e+02 2.753e+02 3.513e+02 8.271e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 06:28:20,763 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:23,403 INFO [train.py:901] (2/4) Epoch 24, batch 700, loss[loss=0.1691, simple_loss=0.2466, pruned_loss=0.04586, over 7562.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06013, over 1567711.49 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:28:24,187 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7993, 5.8855, 5.1640, 2.6435, 5.2338, 5.5312, 5.4108, 5.4255], + device='cuda:2'), covar=tensor([0.0483, 0.0385, 0.0874, 0.4005, 0.0669, 0.0791, 0.1017, 0.0568], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0446, 0.0432, 0.0543, 0.0430, 0.0448, 0.0427, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:28:33,216 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186621.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:43,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186635.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:59,668 INFO [train.py:901] (2/4) Epoch 24, batch 750, loss[loss=0.1603, simple_loss=0.2427, pruned_loss=0.03896, over 7542.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2864, pruned_loss=0.06007, over 1581433.09 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:00,494 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7736, 2.0970, 2.3065, 1.2564, 2.3053, 1.6248, 0.7719, 1.9077], + device='cuda:2'), covar=tensor([0.0802, 0.0416, 0.0326, 0.0751, 0.0514, 0.0936, 0.1069, 0.0417], + device='cuda:2'), in_proj_covar=tensor([0.0457, 0.0397, 0.0350, 0.0447, 0.0379, 0.0536, 0.0391, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2187e-04, 1.0376e-04, 9.1745e-05, 1.1728e-04, 9.9550e-05, 1.5077e-04, + 1.0520e-04, 1.1211e-04], device='cuda:2') +2023-02-07 06:29:11,911 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 06:29:16,209 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2040, 4.1475, 3.7700, 1.9052, 3.7004, 3.7723, 3.7022, 3.5982], + device='cuda:2'), covar=tensor([0.0799, 0.0655, 0.1081, 0.4681, 0.0888, 0.1181, 0.1431, 0.0914], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0446, 0.0433, 0.0542, 0.0430, 0.0448, 0.0427, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:29:21,531 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 06:29:27,092 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.532e+02 3.077e+02 4.008e+02 9.294e+02, threshold=6.153e+02, percent-clipped=8.0 +2023-02-07 06:29:31,907 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 06:29:35,762 INFO [train.py:901] (2/4) Epoch 24, batch 800, loss[loss=0.239, simple_loss=0.308, pruned_loss=0.08498, over 8346.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.286, pruned_loss=0.06039, over 1589254.40 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:48,934 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:29:56,174 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:11,914 INFO [train.py:901] (2/4) Epoch 24, batch 850, loss[loss=0.1948, simple_loss=0.2706, pruned_loss=0.05955, over 6746.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2862, pruned_loss=0.05988, over 1595268.13 frames. ], batch size: 15, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:30:29,503 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:39,065 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.330e+02 2.764e+02 3.350e+02 7.186e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 06:30:47,647 INFO [train.py:901] (2/4) Epoch 24, batch 900, loss[loss=0.1828, simple_loss=0.2774, pruned_loss=0.04413, over 8716.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2867, pruned_loss=0.0601, over 1603940.15 frames. ], batch size: 34, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:05,965 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:08,582 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:15,808 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3324, 2.1092, 2.6342, 2.2456, 2.6404, 2.3354, 2.1877, 1.6383], + device='cuda:2'), covar=tensor([0.4938, 0.4731, 0.1879, 0.3346, 0.2240, 0.2845, 0.1753, 0.4881], + device='cuda:2'), in_proj_covar=tensor([0.0948, 0.0992, 0.0812, 0.0958, 0.0998, 0.0901, 0.0756, 0.0830], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 06:31:24,267 INFO [train.py:901] (2/4) Epoch 24, batch 950, loss[loss=0.1924, simple_loss=0.2753, pruned_loss=0.05475, over 7969.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2864, pruned_loss=0.06001, over 1607327.68 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:24,450 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:24,512 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:30,134 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1127, 1.5024, 1.6899, 1.3708, 0.9700, 1.4929, 1.8077, 1.8116], + device='cuda:2'), covar=tensor([0.0548, 0.1261, 0.1741, 0.1511, 0.0592, 0.1503, 0.0670, 0.0608], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 06:31:39,988 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186879.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:43,466 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 06:31:48,340 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186891.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:50,408 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6229, 2.6277, 1.8994, 2.3297, 2.1727, 1.5234, 2.1465, 2.2650], + device='cuda:2'), covar=tensor([0.1551, 0.0393, 0.1202, 0.0705, 0.0773, 0.1659, 0.1071, 0.0968], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0310, 0.0301, 0.0341, 0.0346, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:31:52,263 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.324e+02 2.850e+02 3.567e+02 7.043e+02, threshold=5.700e+02, percent-clipped=2.0 +2023-02-07 06:31:53,149 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:55,093 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:59,853 INFO [train.py:901] (2/4) Epoch 24, batch 1000, loss[loss=0.2711, simple_loss=0.3556, pruned_loss=0.09328, over 8559.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2867, pruned_loss=0.06, over 1614436.93 frames. ], batch size: 31, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:32:05,644 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,421 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,491 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:19,256 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2843, 2.1044, 1.5912, 1.9255, 1.7001, 1.4032, 1.6920, 1.6797], + device='cuda:2'), covar=tensor([0.1320, 0.0476, 0.1283, 0.0583, 0.0753, 0.1554, 0.0984, 0.0966], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0235, 0.0336, 0.0310, 0.0301, 0.0341, 0.0347, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:32:20,497 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 06:32:29,434 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186948.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:32,167 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:33,359 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 06:32:36,112 INFO [train.py:901] (2/4) Epoch 24, batch 1050, loss[loss=0.1929, simple_loss=0.2673, pruned_loss=0.05921, over 7541.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2861, pruned_loss=0.05985, over 1610473.81 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:00,058 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2528, 1.2503, 3.3874, 1.0303, 3.0103, 2.8833, 3.0957, 3.0197], + device='cuda:2'), covar=tensor([0.0816, 0.4171, 0.0820, 0.4123, 0.1321, 0.1124, 0.0791, 0.0840], + device='cuda:2'), in_proj_covar=tensor([0.0642, 0.0649, 0.0703, 0.0634, 0.0718, 0.0618, 0.0610, 0.0686], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:33:01,571 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186992.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:02,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:04,738 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.475e+02 2.949e+02 3.829e+02 9.793e+02, threshold=5.897e+02, percent-clipped=8.0 +2023-02-07 06:33:12,427 INFO [train.py:901] (2/4) Epoch 24, batch 1100, loss[loss=0.1817, simple_loss=0.2595, pruned_loss=0.05191, over 7989.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.286, pruned_loss=0.05989, over 1610592.43 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:12,688 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6891, 2.1878, 4.1004, 1.4766, 2.8919, 2.1373, 1.8368, 2.9917], + device='cuda:2'), covar=tensor([0.2041, 0.3010, 0.0855, 0.4984, 0.2021, 0.3504, 0.2490, 0.2406], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0615, 0.0556, 0.0650, 0.0653, 0.0600, 0.0544, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:33:18,320 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:19,042 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:37,517 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:38,301 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:45,706 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 06:33:47,140 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:48,355 INFO [train.py:901] (2/4) Epoch 24, batch 1150, loss[loss=0.2006, simple_loss=0.2834, pruned_loss=0.0589, over 8562.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2867, pruned_loss=0.06026, over 1609794.66 frames. ], batch size: 49, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:49,930 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0196, 1.5167, 3.5720, 1.5587, 2.5057, 3.9088, 4.0445, 3.3823], + device='cuda:2'), covar=tensor([0.1142, 0.1892, 0.0283, 0.1941, 0.1001, 0.0209, 0.0583, 0.0509], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0324, 0.0288, 0.0317, 0.0315, 0.0270, 0.0427, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:33:52,048 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:57,506 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187071.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:34:02,532 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0504, 1.6035, 1.3552, 1.5442, 1.3156, 1.2347, 1.3249, 1.2973], + device='cuda:2'), covar=tensor([0.1020, 0.0477, 0.1243, 0.0539, 0.0729, 0.1444, 0.0878, 0.0770], + device='cuda:2'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0309, 0.0301, 0.0341, 0.0346, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:34:16,153 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.205e+02 2.742e+02 3.279e+02 6.267e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 06:34:24,623 INFO [train.py:901] (2/4) Epoch 24, batch 1200, loss[loss=0.2068, simple_loss=0.2985, pruned_loss=0.05757, over 8291.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06005, over 1607479.17 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:34:57,405 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:00,007 INFO [train.py:901] (2/4) Epoch 24, batch 1250, loss[loss=0.194, simple_loss=0.2797, pruned_loss=0.05413, over 8502.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2862, pruned_loss=0.06054, over 1610900.42 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:15,160 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:19,762 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187186.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:27,951 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.417e+02 2.916e+02 3.659e+02 9.833e+02, threshold=5.832e+02, percent-clipped=6.0 +2023-02-07 06:35:30,893 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187202.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:35,743 INFO [train.py:901] (2/4) Epoch 24, batch 1300, loss[loss=0.2152, simple_loss=0.2922, pruned_loss=0.06909, over 8830.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2864, pruned_loss=0.06013, over 1616386.05 frames. ], batch size: 40, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:35,952 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187208.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:51,427 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-07 06:35:53,890 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187233.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:05,804 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187250.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:10,944 INFO [train.py:901] (2/4) Epoch 24, batch 1350, loss[loss=0.1768, simple_loss=0.2506, pruned_loss=0.05152, over 7421.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2863, pruned_loss=0.05987, over 1619410.69 frames. ], batch size: 17, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:20,761 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:21,330 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:22,835 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,749 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.391e+02 3.088e+02 3.702e+02 1.176e+03, threshold=6.175e+02, percent-clipped=8.0 +2023-02-07 06:36:41,381 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187300.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:46,710 INFO [train.py:901] (2/4) Epoch 24, batch 1400, loss[loss=0.1647, simple_loss=0.2509, pruned_loss=0.03924, over 7922.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2869, pruned_loss=0.06049, over 1618584.45 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:52,967 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187317.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:54,421 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:59,275 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:03,363 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9407, 2.3617, 4.2871, 1.6721, 3.1490, 2.4234, 2.0737, 3.0454], + device='cuda:2'), covar=tensor([0.1799, 0.2550, 0.0701, 0.4456, 0.1633, 0.3000, 0.2224, 0.2303], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0617, 0.0558, 0.0652, 0.0653, 0.0600, 0.0545, 0.0636], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:37:05,352 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2608, 3.6471, 2.3822, 2.9519, 2.7933, 2.1092, 2.9791, 3.0780], + device='cuda:2'), covar=tensor([0.1669, 0.0391, 0.1197, 0.0760, 0.0767, 0.1459, 0.1024, 0.1151], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0233, 0.0335, 0.0308, 0.0300, 0.0338, 0.0346, 0.0316], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:37:12,877 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187344.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:16,689 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-07 06:37:21,749 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 06:37:22,421 INFO [train.py:901] (2/4) Epoch 24, batch 1450, loss[loss=0.1996, simple_loss=0.2941, pruned_loss=0.05258, over 8752.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.286, pruned_loss=0.05987, over 1616044.91 frames. ], batch size: 30, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:37:42,337 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:43,074 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:44,409 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:49,533 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.482e+02 2.870e+02 4.012e+02 8.494e+02, threshold=5.740e+02, percent-clipped=8.0 +2023-02-07 06:37:51,822 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:56,992 INFO [train.py:901] (2/4) Epoch 24, batch 1500, loss[loss=0.2154, simple_loss=0.3009, pruned_loss=0.06493, over 8496.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05925, over 1616008.31 frames. ], batch size: 27, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:22,162 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:33,247 INFO [train.py:901] (2/4) Epoch 24, batch 1550, loss[loss=0.2127, simple_loss=0.2949, pruned_loss=0.06527, over 8285.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.0599, over 1618900.38 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:39,575 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:59,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.355e+02 2.764e+02 3.622e+02 7.454e+02, threshold=5.529e+02, percent-clipped=4.0 +2023-02-07 06:39:02,835 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:06,537 INFO [train.py:901] (2/4) Epoch 24, batch 1600, loss[loss=0.1984, simple_loss=0.2696, pruned_loss=0.06364, over 7430.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2856, pruned_loss=0.05963, over 1618559.84 frames. ], batch size: 17, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:11,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:22,052 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:27,626 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:30,380 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:41,433 INFO [train.py:901] (2/4) Epoch 24, batch 1650, loss[loss=0.206, simple_loss=0.2933, pruned_loss=0.05937, over 8091.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2854, pruned_loss=0.05941, over 1619668.79 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:52,523 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:09,686 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.451e+02 2.921e+02 3.516e+02 7.853e+02, threshold=5.842e+02, percent-clipped=7.0 +2023-02-07 06:40:09,873 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187598.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:10,451 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3385, 1.2338, 2.4122, 1.3753, 2.2145, 2.5421, 2.7104, 2.2116], + device='cuda:2'), covar=tensor([0.1176, 0.1391, 0.0398, 0.1986, 0.0734, 0.0381, 0.0668, 0.0638], + device='cuda:2'), in_proj_covar=tensor([0.0295, 0.0319, 0.0283, 0.0312, 0.0311, 0.0267, 0.0422, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:40:16,432 INFO [train.py:901] (2/4) Epoch 24, batch 1700, loss[loss=0.2287, simple_loss=0.3087, pruned_loss=0.07434, over 7168.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2863, pruned_loss=0.06022, over 1620087.23 frames. ], batch size: 16, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:40,839 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187644.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:50,891 INFO [train.py:901] (2/4) Epoch 24, batch 1750, loss[loss=0.196, simple_loss=0.2747, pruned_loss=0.05861, over 8081.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2865, pruned_loss=0.06057, over 1618995.91 frames. ], batch size: 21, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:58,580 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:18,565 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.502e+02 3.000e+02 3.757e+02 9.885e+02, threshold=5.999e+02, percent-clipped=2.0 +2023-02-07 06:41:25,258 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 06:41:26,170 INFO [train.py:901] (2/4) Epoch 24, batch 1800, loss[loss=0.195, simple_loss=0.2887, pruned_loss=0.05067, over 8483.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2854, pruned_loss=0.06038, over 1613668.61 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:43,298 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:59,215 INFO [train.py:901] (2/4) Epoch 24, batch 1850, loss[loss=0.2402, simple_loss=0.315, pruned_loss=0.08272, over 8617.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2859, pruned_loss=0.06058, over 1615687.36 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:59,464 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:09,322 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:18,458 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:27,406 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:28,601 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 2.492e+02 2.912e+02 4.002e+02 8.326e+02, threshold=5.824e+02, percent-clipped=6.0 +2023-02-07 06:42:36,380 INFO [train.py:901] (2/4) Epoch 24, batch 1900, loss[loss=0.2149, simple_loss=0.2965, pruned_loss=0.06665, over 8456.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2851, pruned_loss=0.06018, over 1612064.01 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:42:42,657 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.2617, 5.2090, 4.7096, 2.7859, 4.7169, 4.9382, 4.8307, 4.7744], + device='cuda:2'), covar=tensor([0.0574, 0.0415, 0.0851, 0.3799, 0.0833, 0.1042, 0.1029, 0.0712], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0543, 0.0435, 0.0448, 0.0428, 0.0391], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:42:49,918 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.47 vs. limit=2.0 +2023-02-07 06:43:02,032 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 06:43:05,614 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 06:43:05,816 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:11,795 INFO [train.py:901] (2/4) Epoch 24, batch 1950, loss[loss=0.1777, simple_loss=0.2599, pruned_loss=0.04773, over 7419.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.285, pruned_loss=0.05996, over 1614363.39 frames. ], batch size: 17, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:43:18,404 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 06:43:22,599 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:27,989 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:30,733 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:39,205 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 2.375e+02 2.745e+02 3.412e+02 6.105e+02, threshold=5.491e+02, percent-clipped=1.0 +2023-02-07 06:43:39,241 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 06:43:42,659 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4408, 1.3483, 1.7863, 1.2127, 1.1050, 1.7437, 0.2391, 1.2147], + device='cuda:2'), covar=tensor([0.1615, 0.1219, 0.0395, 0.0958, 0.2541, 0.0439, 0.1886, 0.1134], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0200, 0.0132, 0.0222, 0.0273, 0.0137, 0.0171, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:43:46,284 INFO [train.py:901] (2/4) Epoch 24, batch 2000, loss[loss=0.2026, simple_loss=0.2947, pruned_loss=0.05532, over 8454.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2866, pruned_loss=0.0606, over 1618952.45 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:21,541 INFO [train.py:901] (2/4) Epoch 24, batch 2050, loss[loss=0.1997, simple_loss=0.2893, pruned_loss=0.05506, over 8492.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2863, pruned_loss=0.06013, over 1619152.30 frames. ], batch size: 29, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:23,778 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0128, 1.2778, 1.2458, 0.6585, 1.2436, 1.0701, 0.1250, 1.1965], + device='cuda:2'), covar=tensor([0.0443, 0.0396, 0.0365, 0.0597, 0.0503, 0.1040, 0.0901, 0.0362], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0400, 0.0354, 0.0450, 0.0384, 0.0539, 0.0394, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2219e-04, 1.0450e-04, 9.3072e-05, 1.1817e-04, 1.0087e-04, 1.5174e-04, + 1.0582e-04, 1.1198e-04], device='cuda:2') +2023-02-07 06:44:42,292 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:46,912 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:48,824 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.387e+02 2.958e+02 3.531e+02 6.524e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 06:44:51,435 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:56,704 INFO [train.py:901] (2/4) Epoch 24, batch 2100, loss[loss=0.1847, simple_loss=0.281, pruned_loss=0.04425, over 8189.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2866, pruned_loss=0.06053, over 1617658.94 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:58,672 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 06:45:31,694 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6719, 1.5887, 2.2995, 1.5693, 1.2906, 2.2001, 0.3360, 1.4467], + device='cuda:2'), covar=tensor([0.1640, 0.1257, 0.0332, 0.1182, 0.2642, 0.0460, 0.2053, 0.1203], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0200, 0.0132, 0.0222, 0.0273, 0.0137, 0.0172, 0.0195], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:45:32,170 INFO [train.py:901] (2/4) Epoch 24, batch 2150, loss[loss=0.2087, simple_loss=0.2943, pruned_loss=0.06152, over 8343.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2866, pruned_loss=0.06056, over 1617161.42 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:47,941 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6401, 2.3952, 3.2437, 2.6440, 3.0403, 2.7092, 2.5269, 1.8049], + device='cuda:2'), covar=tensor([0.5317, 0.5153, 0.1842, 0.3855, 0.2586, 0.2894, 0.1755, 0.5304], + device='cuda:2'), in_proj_covar=tensor([0.0951, 0.1000, 0.0818, 0.0965, 0.1002, 0.0908, 0.0761, 0.0835], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 06:45:58,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.464e+02 3.048e+02 3.692e+02 7.821e+02, threshold=6.095e+02, percent-clipped=5.0 +2023-02-07 06:46:03,900 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:05,701 INFO [train.py:901] (2/4) Epoch 24, batch 2200, loss[loss=0.2217, simple_loss=0.3007, pruned_loss=0.07131, over 8301.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2871, pruned_loss=0.06099, over 1613888.17 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:46:21,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188130.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:41,360 INFO [train.py:901] (2/4) Epoch 24, batch 2250, loss[loss=0.1791, simple_loss=0.2615, pruned_loss=0.04833, over 7928.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2858, pruned_loss=0.0603, over 1610313.73 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:09,411 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.349e+02 3.047e+02 3.898e+02 9.680e+02, threshold=6.095e+02, percent-clipped=4.0 +2023-02-07 06:47:16,302 INFO [train.py:901] (2/4) Epoch 24, batch 2300, loss[loss=0.2168, simple_loss=0.3017, pruned_loss=0.06597, over 8514.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2858, pruned_loss=0.05993, over 1612748.20 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:42,368 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:47,130 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:50,575 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:51,707 INFO [train.py:901] (2/4) Epoch 24, batch 2350, loss[loss=0.2292, simple_loss=0.3088, pruned_loss=0.07478, over 8470.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.05967, over 1609316.62 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:00,162 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188270.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:05,509 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:08,186 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188281.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:20,112 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.514e+02 3.085e+02 3.939e+02 8.316e+02, threshold=6.171e+02, percent-clipped=4.0 +2023-02-07 06:48:22,399 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:27,099 INFO [train.py:901] (2/4) Epoch 24, batch 2400, loss[loss=0.1831, simple_loss=0.2546, pruned_loss=0.05574, over 7203.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2848, pruned_loss=0.06012, over 1606684.69 frames. ], batch size: 16, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:30,048 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4778, 1.2800, 2.3473, 1.2903, 2.1989, 2.5124, 2.7028, 2.1620], + device='cuda:2'), covar=tensor([0.1100, 0.1438, 0.0470, 0.2113, 0.0763, 0.0392, 0.0695, 0.0658], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0323, 0.0285, 0.0315, 0.0314, 0.0270, 0.0425, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 06:49:00,630 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.70 vs. limit=5.0 +2023-02-07 06:49:02,251 INFO [train.py:901] (2/4) Epoch 24, batch 2450, loss[loss=0.208, simple_loss=0.2999, pruned_loss=0.05806, over 8696.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2855, pruned_loss=0.06037, over 1607216.53 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:49:30,924 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.263e+02 2.982e+02 3.612e+02 7.179e+02, threshold=5.965e+02, percent-clipped=1.0 +2023-02-07 06:49:38,096 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 06:49:38,406 INFO [train.py:901] (2/4) Epoch 24, batch 2500, loss[loss=0.184, simple_loss=0.2632, pruned_loss=0.05239, over 8030.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2844, pruned_loss=0.05949, over 1612123.48 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:11,881 INFO [train.py:901] (2/4) Epoch 24, batch 2550, loss[loss=0.2257, simple_loss=0.3146, pruned_loss=0.0684, over 8260.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2843, pruned_loss=0.05954, over 1611872.57 frames. ], batch size: 49, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:40,459 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 2.538e+02 2.905e+02 3.766e+02 9.788e+02, threshold=5.809e+02, percent-clipped=4.0 +2023-02-07 06:50:41,319 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:50:47,914 INFO [train.py:901] (2/4) Epoch 24, batch 2600, loss[loss=0.1722, simple_loss=0.2448, pruned_loss=0.04983, over 7931.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2851, pruned_loss=0.06002, over 1611153.27 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:21,923 INFO [train.py:901] (2/4) Epoch 24, batch 2650, loss[loss=0.1815, simple_loss=0.2628, pruned_loss=0.0501, over 7794.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2836, pruned_loss=0.05972, over 1610182.34 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:31,890 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-07 06:51:41,773 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-07 06:51:48,597 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.338e+02 2.924e+02 3.924e+02 7.774e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 06:51:55,409 INFO [train.py:901] (2/4) Epoch 24, batch 2700, loss[loss=0.1896, simple_loss=0.2794, pruned_loss=0.04988, over 8566.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.06013, over 1611252.01 frames. ], batch size: 39, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:02,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2350, 3.1371, 2.9544, 1.6801, 2.8913, 2.9299, 2.9068, 2.7823], + device='cuda:2'), covar=tensor([0.1132, 0.0795, 0.1217, 0.4477, 0.1138, 0.1277, 0.1545, 0.1023], + device='cuda:2'), in_proj_covar=tensor([0.0521, 0.0439, 0.0426, 0.0535, 0.0426, 0.0441, 0.0421, 0.0386], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:52:21,731 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:52:31,276 INFO [train.py:901] (2/4) Epoch 24, batch 2750, loss[loss=0.2037, simple_loss=0.2886, pruned_loss=0.05942, over 8356.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2848, pruned_loss=0.06037, over 1611366.94 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:57,777 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.375e+02 3.087e+02 4.139e+02 1.460e+03, threshold=6.174e+02, percent-clipped=4.0 +2023-02-07 06:53:05,388 INFO [train.py:901] (2/4) Epoch 24, batch 2800, loss[loss=0.1879, simple_loss=0.2734, pruned_loss=0.05126, over 8136.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2849, pruned_loss=0.06067, over 1604621.37 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:23,062 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0291, 1.8710, 2.4322, 2.0317, 2.4257, 2.1113, 1.9572, 1.2742], + device='cuda:2'), covar=tensor([0.6001, 0.5154, 0.2124, 0.4038, 0.2571, 0.3275, 0.2044, 0.5617], + device='cuda:2'), in_proj_covar=tensor([0.0947, 0.0992, 0.0814, 0.0956, 0.0995, 0.0905, 0.0756, 0.0832], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 06:53:40,615 INFO [train.py:901] (2/4) Epoch 24, batch 2850, loss[loss=0.2556, simple_loss=0.3323, pruned_loss=0.08941, over 8706.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2847, pruned_loss=0.06015, over 1607311.12 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:42,192 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:07,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.346e+02 3.064e+02 3.754e+02 6.997e+02, threshold=6.129e+02, percent-clipped=3.0 +2023-02-07 06:54:14,861 INFO [train.py:901] (2/4) Epoch 24, batch 2900, loss[loss=0.1843, simple_loss=0.2719, pruned_loss=0.04835, over 8137.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.06053, over 1612846.06 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:54:39,477 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:48,791 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8915, 2.1305, 1.6898, 2.5392, 1.1278, 1.5135, 1.8291, 2.0067], + device='cuda:2'), covar=tensor([0.0727, 0.0680, 0.0940, 0.0379, 0.1105, 0.1304, 0.0781, 0.0855], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0197, 0.0242, 0.0214, 0.0205, 0.0246, 0.0249, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 06:54:51,391 INFO [train.py:901] (2/4) Epoch 24, batch 2950, loss[loss=0.2118, simple_loss=0.2916, pruned_loss=0.06597, over 8238.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2856, pruned_loss=0.06041, over 1610853.37 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:54:51,400 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 06:55:19,106 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.217e+02 2.685e+02 3.700e+02 9.567e+02, threshold=5.370e+02, percent-clipped=4.0 +2023-02-07 06:55:25,889 INFO [train.py:901] (2/4) Epoch 24, batch 3000, loss[loss=0.2231, simple_loss=0.2944, pruned_loss=0.07588, over 7136.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06109, over 1606091.71 frames. ], batch size: 73, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:55:25,889 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 06:55:39,550 INFO [train.py:935] (2/4) Epoch 24, validation: loss=0.1724, simple_loss=0.2726, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 06:55:39,551 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 06:56:05,991 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:07,362 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:13,965 INFO [train.py:901] (2/4) Epoch 24, batch 3050, loss[loss=0.2089, simple_loss=0.2882, pruned_loss=0.06482, over 7982.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.0614, over 1609225.05 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:14,161 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:41,554 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.422e+02 3.010e+02 3.817e+02 9.746e+02, threshold=6.020e+02, percent-clipped=4.0 +2023-02-07 06:56:49,112 INFO [train.py:901] (2/4) Epoch 24, batch 3100, loss[loss=0.2225, simple_loss=0.3207, pruned_loss=0.06219, over 8327.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06132, over 1606860.41 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:54,798 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:58,620 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6688, 4.7168, 4.1907, 1.9835, 4.1140, 4.3501, 4.2853, 4.1302], + device='cuda:2'), covar=tensor([0.0730, 0.0536, 0.1038, 0.5207, 0.0887, 0.0924, 0.1309, 0.0710], + device='cuda:2'), in_proj_covar=tensor([0.0527, 0.0442, 0.0430, 0.0542, 0.0429, 0.0444, 0.0424, 0.0389], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 06:57:12,120 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:16,985 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:19,144 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5590, 1.5058, 2.0811, 1.4463, 1.1765, 2.0204, 0.4018, 1.2628], + device='cuda:2'), covar=tensor([0.1466, 0.1156, 0.0327, 0.0879, 0.2321, 0.0355, 0.1844, 0.1070], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0202, 0.0132, 0.0223, 0.0274, 0.0139, 0.0173, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 06:57:23,526 INFO [train.py:901] (2/4) Epoch 24, batch 3150, loss[loss=0.2308, simple_loss=0.3152, pruned_loss=0.07314, over 8560.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06139, over 1604378.77 frames. ], batch size: 31, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:57:26,637 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-07 06:57:40,666 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:50,855 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:51,360 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.394e+02 2.917e+02 3.565e+02 6.979e+02, threshold=5.834e+02, percent-clipped=3.0 +2023-02-07 06:57:59,738 INFO [train.py:901] (2/4) Epoch 24, batch 3200, loss[loss=0.1898, simple_loss=0.2699, pruned_loss=0.05487, over 7540.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2863, pruned_loss=0.06071, over 1604175.55 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:58:33,956 INFO [train.py:901] (2/4) Epoch 24, batch 3250, loss[loss=0.1955, simple_loss=0.284, pruned_loss=0.05351, over 8031.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2867, pruned_loss=0.06072, over 1609062.49 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:01,534 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.438e+02 3.003e+02 3.759e+02 6.490e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-07 06:59:08,532 INFO [train.py:901] (2/4) Epoch 24, batch 3300, loss[loss=0.1713, simple_loss=0.2576, pruned_loss=0.04246, over 7923.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2863, pruned_loss=0.05998, over 1614420.16 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:12,858 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:30,912 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:39,008 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6220, 2.6806, 1.9724, 2.2945, 2.2679, 1.7286, 2.1352, 2.2141], + device='cuda:2'), covar=tensor([0.1649, 0.0398, 0.1181, 0.0709, 0.0691, 0.1557, 0.1077, 0.1126], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0235, 0.0334, 0.0310, 0.0299, 0.0341, 0.0345, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 06:59:44,170 INFO [train.py:901] (2/4) Epoch 24, batch 3350, loss[loss=0.2092, simple_loss=0.2789, pruned_loss=0.06975, over 7150.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06005, over 1609362.02 frames. ], batch size: 16, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:49,413 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189266.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:54,641 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6903, 1.5891, 2.5866, 1.9717, 2.2860, 1.7208, 1.5072, 1.2075], + device='cuda:2'), covar=tensor([0.8682, 0.7337, 0.2311, 0.4571, 0.3675, 0.5177, 0.3583, 0.6321], + device='cuda:2'), in_proj_covar=tensor([0.0945, 0.0991, 0.0811, 0.0955, 0.0994, 0.0902, 0.0754, 0.0828], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 07:00:05,776 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:07,043 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189293.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:10,342 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.367e+02 2.967e+02 3.575e+02 9.298e+02, threshold=5.934e+02, percent-clipped=5.0 +2023-02-07 07:00:17,750 INFO [train.py:901] (2/4) Epoch 24, batch 3400, loss[loss=0.2104, simple_loss=0.2945, pruned_loss=0.0631, over 8047.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2876, pruned_loss=0.06102, over 1615399.66 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:33,370 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6782, 1.3908, 2.9250, 1.1838, 2.2801, 3.1692, 3.4729, 2.3853], + device='cuda:2'), covar=tensor([0.1602, 0.2178, 0.0560, 0.2920, 0.1248, 0.0439, 0.0632, 0.1048], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0324, 0.0286, 0.0317, 0.0316, 0.0271, 0.0429, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:00:34,013 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8494, 1.3626, 1.6083, 1.2278, 0.8644, 1.3909, 1.6033, 1.3751], + device='cuda:2'), covar=tensor([0.0533, 0.1340, 0.1698, 0.1570, 0.0648, 0.1554, 0.0754, 0.0677], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:00:52,446 INFO [train.py:901] (2/4) Epoch 24, batch 3450, loss[loss=0.2173, simple_loss=0.3095, pruned_loss=0.06254, over 8245.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06075, over 1615453.08 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:57,500 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:16,615 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:20,600 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.399e+02 2.884e+02 3.624e+02 7.571e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 07:01:26,259 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:27,399 INFO [train.py:901] (2/4) Epoch 24, batch 3500, loss[loss=0.2523, simple_loss=0.3255, pruned_loss=0.08955, over 7178.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2863, pruned_loss=0.06082, over 1615127.60 frames. ], batch size: 71, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:01:27,623 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:27,663 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6989, 1.9824, 2.1136, 1.4704, 2.2275, 1.4623, 0.7167, 1.9093], + device='cuda:2'), covar=tensor([0.0741, 0.0411, 0.0320, 0.0666, 0.0461, 0.1069, 0.0957, 0.0383], + device='cuda:2'), in_proj_covar=tensor([0.0461, 0.0401, 0.0356, 0.0454, 0.0387, 0.0542, 0.0398, 0.0431], + device='cuda:2'), out_proj_covar=tensor([1.2292e-04, 1.0469e-04, 9.3477e-05, 1.1928e-04, 1.0174e-04, 1.5223e-04, + 1.0689e-04, 1.1361e-04], device='cuda:2') +2023-02-07 07:01:40,495 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 07:01:40,595 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:50,804 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189441.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:56,335 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 07:02:03,505 INFO [train.py:901] (2/4) Epoch 24, batch 3550, loss[loss=0.2083, simple_loss=0.2906, pruned_loss=0.06299, over 8329.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06117, over 1612088.41 frames. ], batch size: 25, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:26,873 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5603, 2.2826, 3.1231, 2.5192, 3.0714, 2.5146, 2.3265, 1.9292], + device='cuda:2'), covar=tensor([0.5475, 0.5040, 0.1972, 0.3835, 0.2426, 0.2961, 0.1840, 0.5456], + device='cuda:2'), in_proj_covar=tensor([0.0943, 0.0992, 0.0811, 0.0958, 0.0997, 0.0903, 0.0755, 0.0828], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 07:02:31,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.509e+02 2.981e+02 3.708e+02 7.370e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 07:02:37,633 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:38,143 INFO [train.py:901] (2/4) Epoch 24, batch 3600, loss[loss=0.1984, simple_loss=0.2925, pruned_loss=0.05218, over 8355.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2861, pruned_loss=0.06091, over 1614672.71 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:42,682 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 07:02:45,848 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:01,847 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:12,157 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189556.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:13,407 INFO [train.py:901] (2/4) Epoch 24, batch 3650, loss[loss=0.229, simple_loss=0.2946, pruned_loss=0.08175, over 6755.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2862, pruned_loss=0.06096, over 1611573.09 frames. ], batch size: 71, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:41,105 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:03:41,749 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.496e+02 2.930e+02 3.600e+02 6.319e+02, threshold=5.860e+02, percent-clipped=2.0 +2023-02-07 07:03:48,377 INFO [train.py:901] (2/4) Epoch 24, batch 3700, loss[loss=0.181, simple_loss=0.27, pruned_loss=0.04595, over 7958.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2859, pruned_loss=0.06084, over 1608676.63 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:49,802 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:55,988 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7661, 1.9488, 2.0369, 1.4694, 2.2031, 1.4297, 0.7325, 2.0010], + device='cuda:2'), covar=tensor([0.0663, 0.0391, 0.0320, 0.0575, 0.0425, 0.0943, 0.0908, 0.0278], + device='cuda:2'), in_proj_covar=tensor([0.0459, 0.0399, 0.0354, 0.0451, 0.0385, 0.0540, 0.0394, 0.0427], + device='cuda:2'), out_proj_covar=tensor([1.2235e-04, 1.0415e-04, 9.2972e-05, 1.1844e-04, 1.0110e-04, 1.5183e-04, + 1.0590e-04, 1.1266e-04], device='cuda:2') +2023-02-07 07:04:00,074 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3911, 1.6015, 1.6510, 1.2360, 1.6191, 1.2931, 0.3412, 1.6052], + device='cuda:2'), covar=tensor([0.0531, 0.0398, 0.0339, 0.0502, 0.0482, 0.1054, 0.0922, 0.0282], + device='cuda:2'), in_proj_covar=tensor([0.0458, 0.0399, 0.0354, 0.0451, 0.0384, 0.0540, 0.0394, 0.0427], + device='cuda:2'), out_proj_covar=tensor([1.2229e-04, 1.0409e-04, 9.2928e-05, 1.1839e-04, 1.0099e-04, 1.5174e-04, + 1.0583e-04, 1.1259e-04], device='cuda:2') +2023-02-07 07:04:23,127 INFO [train.py:901] (2/4) Epoch 24, batch 3750, loss[loss=0.195, simple_loss=0.2908, pruned_loss=0.04962, over 8481.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2858, pruned_loss=0.06058, over 1609444.39 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:23,298 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189658.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:26,018 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1135, 1.2953, 1.2411, 0.8679, 1.2232, 1.0617, 0.1722, 1.2320], + device='cuda:2'), covar=tensor([0.0418, 0.0365, 0.0373, 0.0563, 0.0521, 0.1022, 0.0819, 0.0344], + device='cuda:2'), in_proj_covar=tensor([0.0456, 0.0397, 0.0352, 0.0449, 0.0382, 0.0537, 0.0393, 0.0425], + device='cuda:2'), out_proj_covar=tensor([1.2159e-04, 1.0350e-04, 9.2355e-05, 1.1792e-04, 1.0030e-04, 1.5080e-04, + 1.0553e-04, 1.1201e-04], device='cuda:2') +2023-02-07 07:04:26,023 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:27,227 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189664.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:42,766 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:43,975 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189689.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:51,127 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.609e+02 3.129e+02 4.249e+02 7.016e+02, threshold=6.258e+02, percent-clipped=8.0 +2023-02-07 07:04:57,800 INFO [train.py:901] (2/4) Epoch 24, batch 3800, loss[loss=0.1774, simple_loss=0.2687, pruned_loss=0.04299, over 8448.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2859, pruned_loss=0.06043, over 1610455.56 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:58,622 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:07,489 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:09,543 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:24,358 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:32,067 INFO [train.py:901] (2/4) Epoch 24, batch 3850, loss[loss=0.1776, simple_loss=0.2614, pruned_loss=0.04694, over 7701.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06093, over 1608762.84 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:05:35,653 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:47,661 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 07:05:53,007 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:53,958 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-07 07:05:59,103 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.407e+02 2.910e+02 3.432e+02 8.251e+02, threshold=5.819e+02, percent-clipped=1.0 +2023-02-07 07:06:06,335 INFO [train.py:901] (2/4) Epoch 24, batch 3900, loss[loss=0.1919, simple_loss=0.2842, pruned_loss=0.04983, over 8498.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2867, pruned_loss=0.06081, over 1608348.80 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:10,780 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189812.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:17,542 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:18,899 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189824.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:27,525 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:42,145 INFO [train.py:901] (2/4) Epoch 24, batch 3950, loss[loss=0.1922, simple_loss=0.2733, pruned_loss=0.05557, over 8020.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.287, pruned_loss=0.06102, over 1611867.92 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:45,564 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:07:09,594 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.387e+02 3.217e+02 3.997e+02 8.874e+02, threshold=6.434e+02, percent-clipped=5.0 +2023-02-07 07:07:16,321 INFO [train.py:901] (2/4) Epoch 24, batch 4000, loss[loss=0.1467, simple_loss=0.228, pruned_loss=0.03271, over 7436.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06046, over 1608713.86 frames. ], batch size: 17, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:07:51,143 INFO [train.py:901] (2/4) Epoch 24, batch 4050, loss[loss=0.2582, simple_loss=0.3339, pruned_loss=0.09122, over 8501.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.0602, over 1609230.49 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:08:05,480 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:07,500 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:18,681 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.334e+02 2.770e+02 3.399e+02 1.124e+03, threshold=5.539e+02, percent-clipped=1.0 +2023-02-07 07:08:22,556 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:26,230 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:27,388 INFO [train.py:901] (2/4) Epoch 24, batch 4100, loss[loss=0.1898, simple_loss=0.273, pruned_loss=0.05326, over 7688.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.0593, over 1611399.86 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:02,436 INFO [train.py:901] (2/4) Epoch 24, batch 4150, loss[loss=0.2254, simple_loss=0.3067, pruned_loss=0.07206, over 8321.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05927, over 1613239.65 frames. ], batch size: 26, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:08,094 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:17,893 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190080.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:25,160 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:30,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 2.377e+02 2.724e+02 3.400e+02 7.023e+02, threshold=5.448e+02, percent-clipped=3.0 +2023-02-07 07:09:35,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:37,392 INFO [train.py:901] (2/4) Epoch 24, batch 4200, loss[loss=0.2153, simple_loss=0.3045, pruned_loss=0.06304, over 8100.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2847, pruned_loss=0.05896, over 1615704.84 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:09:43,678 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:45,031 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 07:09:48,185 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 07:09:50,482 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.09 vs. limit=5.0 +2023-02-07 07:10:10,654 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 07:10:11,425 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190157.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:10:11,873 INFO [train.py:901] (2/4) Epoch 24, batch 4250, loss[loss=0.237, simple_loss=0.3136, pruned_loss=0.0802, over 8446.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2849, pruned_loss=0.05938, over 1611589.46 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:19,343 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190169.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:28,695 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:40,134 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.309e+02 2.865e+02 3.517e+02 8.092e+02, threshold=5.730e+02, percent-clipped=6.0 +2023-02-07 07:10:45,780 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190205.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:47,629 INFO [train.py:901] (2/4) Epoch 24, batch 4300, loss[loss=0.207, simple_loss=0.2817, pruned_loss=0.06615, over 7161.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05952, over 1612856.88 frames. ], batch size: 72, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:56,464 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8555, 1.4685, 4.2075, 1.7095, 3.3969, 3.3308, 3.8011, 3.7241], + device='cuda:2'), covar=tensor([0.1404, 0.6531, 0.1124, 0.5419, 0.2360, 0.1636, 0.1069, 0.1177], + device='cuda:2'), in_proj_covar=tensor([0.0651, 0.0657, 0.0718, 0.0645, 0.0723, 0.0622, 0.0622, 0.0697], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:11:05,299 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:21,901 INFO [train.py:901] (2/4) Epoch 24, batch 4350, loss[loss=0.2185, simple_loss=0.2972, pruned_loss=0.06986, over 8585.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2854, pruned_loss=0.05958, over 1612014.63 frames. ], batch size: 31, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:11:22,814 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190259.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:40,172 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 07:11:50,405 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.430e+02 2.823e+02 3.493e+02 1.012e+03, threshold=5.646e+02, percent-clipped=3.0 +2023-02-07 07:11:57,334 INFO [train.py:901] (2/4) Epoch 24, batch 4400, loss[loss=0.2174, simple_loss=0.2931, pruned_loss=0.07085, over 8036.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2853, pruned_loss=0.05927, over 1612850.28 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:15,560 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:12:23,140 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 07:12:32,981 INFO [train.py:901] (2/4) Epoch 24, batch 4450, loss[loss=0.2296, simple_loss=0.3058, pruned_loss=0.0767, over 8358.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2848, pruned_loss=0.059, over 1615021.58 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:43,423 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:00,241 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 2.201e+02 2.691e+02 3.403e+02 6.534e+02, threshold=5.381e+02, percent-clipped=2.0 +2023-02-07 07:13:00,480 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:07,778 INFO [train.py:901] (2/4) Epoch 24, batch 4500, loss[loss=0.1625, simple_loss=0.25, pruned_loss=0.03752, over 7922.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.285, pruned_loss=0.05902, over 1616486.04 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:14,930 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:17,414 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 07:13:29,015 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190437.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:42,696 INFO [train.py:901] (2/4) Epoch 24, batch 4550, loss[loss=0.2135, simple_loss=0.296, pruned_loss=0.06544, over 8470.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.285, pruned_loss=0.05919, over 1615023.57 frames. ], batch size: 27, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:44,874 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:45,487 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:47,442 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.1688, 2.4789, 3.9178, 2.0430, 2.2191, 3.9416, 1.0962, 2.3975], + device='cuda:2'), covar=tensor([0.1134, 0.1292, 0.0228, 0.1571, 0.2125, 0.0189, 0.1856, 0.1221], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0201, 0.0130, 0.0222, 0.0273, 0.0138, 0.0171, 0.0196], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 07:14:02,626 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:10,719 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.430e+02 2.973e+02 3.981e+02 9.647e+02, threshold=5.946e+02, percent-clipped=9.0 +2023-02-07 07:14:12,831 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190501.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:14:17,566 INFO [train.py:901] (2/4) Epoch 24, batch 4600, loss[loss=0.1837, simple_loss=0.2587, pruned_loss=0.05439, over 7445.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05911, over 1613202.83 frames. ], batch size: 17, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:14:21,246 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:42,210 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 07:14:54,258 INFO [train.py:901] (2/4) Epoch 24, batch 4650, loss[loss=0.1826, simple_loss=0.2703, pruned_loss=0.0475, over 8250.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2843, pruned_loss=0.05888, over 1611283.47 frames. ], batch size: 24, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:22,378 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.469e+02 2.987e+02 3.787e+02 1.231e+03, threshold=5.974e+02, percent-clipped=5.0 +2023-02-07 07:15:29,195 INFO [train.py:901] (2/4) Epoch 24, batch 4700, loss[loss=0.2225, simple_loss=0.2979, pruned_loss=0.07355, over 7939.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2835, pruned_loss=0.0588, over 1610581.01 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:29,957 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190609.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:34,417 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190616.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:41,743 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:42,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190628.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:04,007 INFO [train.py:901] (2/4) Epoch 24, batch 4750, loss[loss=0.2298, simple_loss=0.3084, pruned_loss=0.07559, over 8076.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.283, pruned_loss=0.05844, over 1610760.77 frames. ], batch size: 21, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:16:18,717 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:20,750 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 07:16:22,886 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 07:16:29,418 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:32,691 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.279e+02 2.789e+02 3.393e+02 7.815e+02, threshold=5.578e+02, percent-clipped=3.0 +2023-02-07 07:16:40,387 INFO [train.py:901] (2/4) Epoch 24, batch 4800, loss[loss=0.1865, simple_loss=0.2798, pruned_loss=0.04665, over 8508.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05839, over 1613279.61 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:13,014 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 07:17:15,035 INFO [train.py:901] (2/4) Epoch 24, batch 4850, loss[loss=0.1723, simple_loss=0.2487, pruned_loss=0.048, over 7180.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2846, pruned_loss=0.05916, over 1612057.19 frames. ], batch size: 16, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:17,934 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:25,450 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0423, 1.7884, 2.3086, 1.9665, 2.2533, 2.1211, 1.9407, 1.1555], + device='cuda:2'), covar=tensor([0.5900, 0.5022, 0.2069, 0.3780, 0.2704, 0.3237, 0.2099, 0.5372], + device='cuda:2'), in_proj_covar=tensor([0.0947, 0.0996, 0.0813, 0.0966, 0.1002, 0.0907, 0.0756, 0.0831], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 07:17:40,140 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:43,500 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.278e+02 2.775e+02 3.178e+02 7.824e+02, threshold=5.550e+02, percent-clipped=3.0 +2023-02-07 07:17:50,723 INFO [train.py:901] (2/4) Epoch 24, batch 4900, loss[loss=0.186, simple_loss=0.2829, pruned_loss=0.04451, over 8595.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05911, over 1610956.85 frames. ], batch size: 31, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:06,938 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5897, 1.4341, 2.8955, 1.4094, 2.1879, 3.0534, 3.2197, 2.6206], + device='cuda:2'), covar=tensor([0.1316, 0.1770, 0.0340, 0.2142, 0.0859, 0.0313, 0.0563, 0.0577], + device='cuda:2'), in_proj_covar=tensor([0.0297, 0.0324, 0.0285, 0.0316, 0.0315, 0.0272, 0.0429, 0.0302], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:18:16,604 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.38 vs. limit=5.0 +2023-02-07 07:18:25,852 INFO [train.py:901] (2/4) Epoch 24, batch 4950, loss[loss=0.1858, simple_loss=0.2663, pruned_loss=0.05265, over 7924.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.05996, over 1609320.39 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:34,367 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 07:18:36,138 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190872.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:18:39,587 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:45,197 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:53,852 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190897.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:18:54,299 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.393e+02 2.890e+02 3.701e+02 7.772e+02, threshold=5.780e+02, percent-clipped=4.0 +2023-02-07 07:19:01,823 INFO [train.py:901] (2/4) Epoch 24, batch 5000, loss[loss=0.2236, simple_loss=0.3084, pruned_loss=0.06941, over 8186.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06093, over 1614575.57 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:02,713 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:23,201 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.3046, 3.2037, 3.0326, 1.6072, 2.9402, 2.9086, 2.9268, 2.7932], + device='cuda:2'), covar=tensor([0.0999, 0.0779, 0.1108, 0.4372, 0.1090, 0.1335, 0.1513, 0.0993], + device='cuda:2'), in_proj_covar=tensor([0.0532, 0.0446, 0.0432, 0.0543, 0.0427, 0.0448, 0.0425, 0.0391], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:19:33,489 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190953.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:19:36,599 INFO [train.py:901] (2/4) Epoch 24, batch 5050, loss[loss=0.1776, simple_loss=0.254, pruned_loss=0.05058, over 7655.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2861, pruned_loss=0.06066, over 1613962.30 frames. ], batch size: 19, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:43,537 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 07:19:45,289 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190971.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:51,185 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 07:20:04,894 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.521e+02 3.221e+02 4.359e+02 8.705e+02, threshold=6.442e+02, percent-clipped=11.0 +2023-02-07 07:20:11,606 INFO [train.py:901] (2/4) Epoch 24, batch 5100, loss[loss=0.1895, simple_loss=0.2613, pruned_loss=0.05886, over 7701.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2857, pruned_loss=0.06056, over 1611868.93 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:18,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0976, 1.7861, 6.2556, 2.3390, 5.6759, 5.3192, 5.8052, 5.7350], + device='cuda:2'), covar=tensor([0.0463, 0.4301, 0.0347, 0.3499, 0.0832, 0.0746, 0.0397, 0.0446], + device='cuda:2'), in_proj_covar=tensor([0.0647, 0.0653, 0.0711, 0.0641, 0.0722, 0.0619, 0.0618, 0.0690], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:20:31,917 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:36,679 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8480, 1.6182, 4.0048, 1.4757, 3.5658, 3.3325, 3.6522, 3.5276], + device='cuda:2'), covar=tensor([0.0665, 0.4069, 0.0592, 0.4201, 0.1089, 0.0949, 0.0598, 0.0722], + device='cuda:2'), in_proj_covar=tensor([0.0645, 0.0651, 0.0710, 0.0639, 0.0719, 0.0617, 0.0616, 0.0688], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:20:40,076 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:46,716 INFO [train.py:901] (2/4) Epoch 24, batch 5150, loss[loss=0.1868, simple_loss=0.2696, pruned_loss=0.05202, over 7912.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.286, pruned_loss=0.06089, over 1609395.66 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:53,686 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191068.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:20:57,803 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,746 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,781 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:13,676 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.286e+02 2.691e+02 3.617e+02 7.196e+02, threshold=5.383e+02, percent-clipped=2.0 +2023-02-07 07:21:20,836 INFO [train.py:901] (2/4) Epoch 24, batch 5200, loss[loss=0.1921, simple_loss=0.2731, pruned_loss=0.05555, over 8472.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2849, pruned_loss=0.06002, over 1608887.31 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:21:38,122 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:41,676 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 07:21:44,218 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7142, 2.3404, 4.0174, 1.6494, 2.9349, 2.2851, 1.8292, 2.7221], + device='cuda:2'), covar=tensor([0.2026, 0.2760, 0.0803, 0.4669, 0.1855, 0.3162, 0.2550, 0.2609], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0617, 0.0556, 0.0655, 0.0651, 0.0598, 0.0549, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:21:49,618 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8115, 1.9430, 2.0883, 1.3914, 2.2478, 1.5574, 0.6891, 1.9012], + device='cuda:2'), covar=tensor([0.0609, 0.0398, 0.0346, 0.0618, 0.0469, 0.0954, 0.0992, 0.0360], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0406, 0.0360, 0.0457, 0.0388, 0.0544, 0.0401, 0.0434], + device='cuda:2'), out_proj_covar=tensor([1.2363e-04, 1.0604e-04, 9.4292e-05, 1.1997e-04, 1.0206e-04, 1.5281e-04, + 1.0776e-04, 1.1453e-04], device='cuda:2') +2023-02-07 07:21:50,033 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 07:21:50,822 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:52,267 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:55,601 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:56,101 INFO [train.py:901] (2/4) Epoch 24, batch 5250, loss[loss=0.176, simple_loss=0.2518, pruned_loss=0.05013, over 7451.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2848, pruned_loss=0.06032, over 1609243.94 frames. ], batch size: 17, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:22:11,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3883, 1.4554, 1.3957, 1.7671, 0.7099, 1.2656, 1.3030, 1.4992], + device='cuda:2'), covar=tensor([0.0861, 0.0800, 0.0982, 0.0508, 0.1099, 0.1341, 0.0744, 0.0702], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0199, 0.0244, 0.0215, 0.0205, 0.0247, 0.0252, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 07:22:14,956 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7557, 1.6956, 2.4592, 1.5970, 1.3315, 2.4381, 0.4175, 1.5378], + device='cuda:2'), covar=tensor([0.1660, 0.1292, 0.0327, 0.1295, 0.2531, 0.0397, 0.2074, 0.1301], + device='cuda:2'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0223, 0.0275, 0.0140, 0.0172, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 07:22:25,865 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.847e+02 3.981e+02 6.971e+02, threshold=5.694e+02, percent-clipped=11.0 +2023-02-07 07:22:28,317 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191203.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:22:31,598 INFO [train.py:901] (2/4) Epoch 24, batch 5300, loss[loss=0.1692, simple_loss=0.26, pruned_loss=0.03917, over 8288.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.06016, over 1608956.34 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:07,067 INFO [train.py:901] (2/4) Epoch 24, batch 5350, loss[loss=0.1894, simple_loss=0.2713, pruned_loss=0.05379, over 7799.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2851, pruned_loss=0.06029, over 1611421.49 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:36,917 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.294e+02 2.754e+02 3.342e+02 1.056e+03, threshold=5.508e+02, percent-clipped=2.0 +2023-02-07 07:23:42,385 INFO [train.py:901] (2/4) Epoch 24, batch 5400, loss[loss=0.1866, simple_loss=0.2735, pruned_loss=0.04983, over 8088.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2853, pruned_loss=0.0601, over 1611545.06 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:23:53,142 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191324.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:23:56,006 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 07:24:05,744 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191342.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:11,125 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191349.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:17,037 INFO [train.py:901] (2/4) Epoch 24, batch 5450, loss[loss=0.2009, simple_loss=0.2784, pruned_loss=0.06173, over 7806.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2848, pruned_loss=0.0597, over 1611923.77 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:21,964 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4277, 2.1149, 2.2540, 2.1284, 1.4805, 2.1550, 2.4030, 2.3032], + device='cuda:2'), covar=tensor([0.0531, 0.0914, 0.1205, 0.1040, 0.0572, 0.1067, 0.0591, 0.0442], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:24:23,296 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:34,967 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:39,460 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 07:24:46,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.294e+02 2.951e+02 3.676e+02 7.135e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 07:24:52,420 INFO [train.py:901] (2/4) Epoch 24, batch 5500, loss[loss=0.2273, simple_loss=0.3098, pruned_loss=0.0724, over 7818.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2861, pruned_loss=0.06049, over 1612088.22 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:52,647 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:07,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:10,089 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:12,913 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3010, 2.1180, 1.6625, 1.9589, 1.8272, 1.4633, 1.7390, 1.6367], + device='cuda:2'), covar=tensor([0.1256, 0.0423, 0.1218, 0.0516, 0.0710, 0.1613, 0.0918, 0.0904], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0234, 0.0336, 0.0309, 0.0298, 0.0342, 0.0346, 0.0317], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 07:25:28,138 INFO [train.py:901] (2/4) Epoch 24, batch 5550, loss[loss=0.1721, simple_loss=0.2561, pruned_loss=0.04404, over 7976.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2853, pruned_loss=0.05971, over 1613601.31 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:25:39,666 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6545, 2.3961, 3.1442, 2.5367, 3.1033, 2.6623, 2.4196, 1.9167], + device='cuda:2'), covar=tensor([0.5359, 0.5501, 0.2202, 0.4022, 0.2676, 0.2999, 0.1852, 0.5700], + device='cuda:2'), in_proj_covar=tensor([0.0943, 0.0992, 0.0812, 0.0962, 0.0999, 0.0904, 0.0754, 0.0830], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 07:25:52,797 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1090, 1.2971, 1.6832, 1.3078, 0.7330, 1.3908, 1.1665, 1.0507], + device='cuda:2'), covar=tensor([0.0633, 0.1198, 0.1604, 0.1423, 0.0558, 0.1384, 0.0696, 0.0720], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:25:53,334 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:57,931 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.420e+02 3.039e+02 3.989e+02 7.925e+02, threshold=6.078e+02, percent-clipped=5.0 +2023-02-07 07:26:03,389 INFO [train.py:901] (2/4) Epoch 24, batch 5600, loss[loss=0.2072, simple_loss=0.2781, pruned_loss=0.06809, over 7235.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2853, pruned_loss=0.06013, over 1613467.42 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:29,660 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191545.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:30,969 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:38,448 INFO [train.py:901] (2/4) Epoch 24, batch 5650, loss[loss=0.1842, simple_loss=0.2655, pruned_loss=0.05151, over 7712.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2854, pruned_loss=0.06024, over 1615381.33 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:45,397 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 07:27:08,570 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.481e+02 2.901e+02 3.753e+02 9.237e+02, threshold=5.802e+02, percent-clipped=5.0 +2023-02-07 07:27:13,001 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7696, 1.4919, 3.0885, 1.4490, 2.2241, 3.2756, 3.4541, 2.8187], + device='cuda:2'), covar=tensor([0.1215, 0.1813, 0.0360, 0.2164, 0.1084, 0.0322, 0.0572, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0296, 0.0324, 0.0286, 0.0315, 0.0315, 0.0273, 0.0430, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:27:14,232 INFO [train.py:901] (2/4) Epoch 24, batch 5700, loss[loss=0.1682, simple_loss=0.2504, pruned_loss=0.04298, over 7924.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2842, pruned_loss=0.05969, over 1610505.93 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:15,043 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:49,745 INFO [train.py:901] (2/4) Epoch 24, batch 5750, loss[loss=0.2176, simple_loss=0.2931, pruned_loss=0.07109, over 8474.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2835, pruned_loss=0.05915, over 1612232.12 frames. ], batch size: 28, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:52,729 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:53,259 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 07:28:19,025 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.394e+02 2.726e+02 3.524e+02 6.240e+02, threshold=5.452e+02, percent-clipped=4.0 +2023-02-07 07:28:25,087 INFO [train.py:901] (2/4) Epoch 24, batch 5800, loss[loss=0.2483, simple_loss=0.3174, pruned_loss=0.08953, over 8158.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.285, pruned_loss=0.0598, over 1615809.31 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:28:38,093 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:28:39,806 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 07:28:40,459 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-07 07:28:59,639 INFO [train.py:901] (2/4) Epoch 24, batch 5850, loss[loss=0.1914, simple_loss=0.2854, pruned_loss=0.04865, over 8361.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05927, over 1612106.60 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:29,158 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.223e+02 2.821e+02 3.422e+02 9.012e+02, threshold=5.641e+02, percent-clipped=8.0 +2023-02-07 07:29:30,134 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:34,717 INFO [train.py:901] (2/4) Epoch 24, batch 5900, loss[loss=0.215, simple_loss=0.3086, pruned_loss=0.06068, over 8333.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2832, pruned_loss=0.05878, over 1606509.74 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:48,202 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191826.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:59,325 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:10,950 INFO [train.py:901] (2/4) Epoch 24, batch 5950, loss[loss=0.2061, simple_loss=0.2942, pruned_loss=0.05905, over 8617.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2841, pruned_loss=0.0592, over 1610982.92 frames. ], batch size: 34, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:16,034 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:33,604 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191890.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:40,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 2.338e+02 2.991e+02 3.628e+02 7.270e+02, threshold=5.982e+02, percent-clipped=3.0 +2023-02-07 07:30:45,681 INFO [train.py:901] (2/4) Epoch 24, batch 6000, loss[loss=0.2201, simple_loss=0.3172, pruned_loss=0.06153, over 8465.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2852, pruned_loss=0.05979, over 1615370.74 frames. ], batch size: 29, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:45,681 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 07:30:59,033 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7141, 1.7122, 3.8428, 1.6687, 3.4615, 3.1975, 3.5350, 3.3572], + device='cuda:2'), covar=tensor([0.0618, 0.4198, 0.0536, 0.4245, 0.1005, 0.1034, 0.0609, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0648, 0.0653, 0.0710, 0.0643, 0.0720, 0.0617, 0.0618, 0.0690], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:31:01,028 INFO [train.py:935] (2/4) Epoch 24, validation: loss=0.1718, simple_loss=0.2718, pruned_loss=0.0359, over 944034.00 frames. +2023-02-07 07:31:01,030 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 07:31:08,194 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191918.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:12,832 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1763, 1.4057, 1.6709, 1.3380, 0.7284, 1.4272, 1.1964, 1.0995], + device='cuda:2'), covar=tensor([0.0601, 0.1202, 0.1598, 0.1383, 0.0553, 0.1440, 0.0700, 0.0712], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:31:24,848 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:26,283 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8734, 2.3201, 4.1486, 1.5830, 3.1707, 2.3483, 1.9279, 2.9737], + device='cuda:2'), covar=tensor([0.2059, 0.2984, 0.1017, 0.5109, 0.1895, 0.3498, 0.2626, 0.2685], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0615, 0.0555, 0.0652, 0.0652, 0.0600, 0.0547, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:31:35,315 INFO [train.py:901] (2/4) Epoch 24, batch 6050, loss[loss=0.1952, simple_loss=0.289, pruned_loss=0.0507, over 8327.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2852, pruned_loss=0.05997, over 1615249.16 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:31:41,183 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5047, 1.9400, 2.8837, 1.4137, 2.0910, 1.9206, 1.6314, 2.1110], + device='cuda:2'), covar=tensor([0.2000, 0.2599, 0.0936, 0.4690, 0.2100, 0.3297, 0.2409, 0.2525], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0615, 0.0555, 0.0652, 0.0652, 0.0600, 0.0548, 0.0636], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:31:57,329 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0402, 1.5685, 1.7955, 1.3691, 0.9564, 1.5767, 1.7456, 1.6230], + device='cuda:2'), covar=tensor([0.0525, 0.1212, 0.1601, 0.1457, 0.0612, 0.1441, 0.0694, 0.0662], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:31:58,021 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7707, 2.0254, 1.7319, 2.5520, 1.1057, 1.5114, 1.8247, 2.0065], + device='cuda:2'), covar=tensor([0.0825, 0.0759, 0.0855, 0.0384, 0.1137, 0.1334, 0.0836, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0199, 0.0245, 0.0216, 0.0205, 0.0247, 0.0253, 0.0208], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 07:32:04,605 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.415e+02 2.742e+02 3.441e+02 8.508e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 07:32:10,731 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9575, 2.4675, 4.1307, 1.6655, 3.1303, 2.3223, 2.0631, 2.6156], + device='cuda:2'), covar=tensor([0.1673, 0.2349, 0.0789, 0.4373, 0.1672, 0.3096, 0.2133, 0.2646], + device='cuda:2'), in_proj_covar=tensor([0.0528, 0.0613, 0.0552, 0.0649, 0.0649, 0.0598, 0.0545, 0.0634], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:32:11,925 INFO [train.py:901] (2/4) Epoch 24, batch 6100, loss[loss=0.2134, simple_loss=0.2965, pruned_loss=0.06518, over 8452.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.0592, over 1616629.85 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:32:32,939 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1023, 1.5927, 1.4369, 1.5849, 1.4030, 1.3133, 1.2861, 1.2990], + device='cuda:2'), covar=tensor([0.1178, 0.0480, 0.1367, 0.0569, 0.0746, 0.1589, 0.1031, 0.0842], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0234, 0.0338, 0.0311, 0.0302, 0.0343, 0.0348, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 07:32:34,056 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 07:32:37,030 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 07:32:46,752 INFO [train.py:901] (2/4) Epoch 24, batch 6150, loss[loss=0.1883, simple_loss=0.2756, pruned_loss=0.0505, over 8112.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2839, pruned_loss=0.0592, over 1615295.39 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:15,574 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:16,713 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.367e+02 2.762e+02 3.348e+02 6.106e+02, threshold=5.524e+02, percent-clipped=2.0 +2023-02-07 07:33:22,017 INFO [train.py:901] (2/4) Epoch 24, batch 6200, loss[loss=0.1758, simple_loss=0.26, pruned_loss=0.04582, over 7211.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05924, over 1610897.06 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:30,652 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192120.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:32,773 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192123.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:56,608 INFO [train.py:901] (2/4) Epoch 24, batch 6250, loss[loss=0.1918, simple_loss=0.2678, pruned_loss=0.05791, over 7262.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05805, over 1611239.75 frames. ], batch size: 16, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:07,838 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:26,659 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.364e+02 2.949e+02 3.646e+02 8.976e+02, threshold=5.898e+02, percent-clipped=7.0 +2023-02-07 07:34:33,021 INFO [train.py:901] (2/4) Epoch 24, batch 6300, loss[loss=0.1985, simple_loss=0.2865, pruned_loss=0.05529, over 8252.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05848, over 1609406.05 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:42,831 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 07:34:52,694 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192237.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:54,074 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:35:02,889 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9971, 1.5462, 1.7381, 1.4549, 1.0910, 1.5469, 1.9308, 1.5404], + device='cuda:2'), covar=tensor([0.0512, 0.1176, 0.1587, 0.1391, 0.0572, 0.1420, 0.0631, 0.0646], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0153, 0.0188, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:35:07,359 INFO [train.py:901] (2/4) Epoch 24, batch 6350, loss[loss=0.1981, simple_loss=0.2827, pruned_loss=0.05673, over 8336.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05932, over 1612977.05 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:35:36,835 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.329e+02 2.896e+02 3.640e+02 5.459e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 07:35:43,003 INFO [train.py:901] (2/4) Epoch 24, batch 6400, loss[loss=0.2073, simple_loss=0.3027, pruned_loss=0.05596, over 8336.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.05876, over 1614160.71 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:19,118 INFO [train.py:901] (2/4) Epoch 24, batch 6450, loss[loss=0.2004, simple_loss=0.2795, pruned_loss=0.06065, over 8341.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05924, over 1611054.86 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:49,115 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.491e+02 2.965e+02 3.858e+02 7.678e+02, threshold=5.930e+02, percent-clipped=7.0 +2023-02-07 07:36:54,691 INFO [train.py:901] (2/4) Epoch 24, batch 6500, loss[loss=0.1943, simple_loss=0.2943, pruned_loss=0.04713, over 8331.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.05895, over 1609545.10 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:56,155 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:00,677 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:25,023 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192451.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:29,466 INFO [train.py:901] (2/4) Epoch 24, batch 6550, loss[loss=0.1998, simple_loss=0.2896, pruned_loss=0.05494, over 8619.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2845, pruned_loss=0.05885, over 1614317.76 frames. ], batch size: 31, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:37:33,654 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:48,320 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 07:37:58,367 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.233e+02 2.734e+02 3.455e+02 6.558e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 07:38:03,872 INFO [train.py:901] (2/4) Epoch 24, batch 6600, loss[loss=0.2062, simple_loss=0.2904, pruned_loss=0.06103, over 8139.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2837, pruned_loss=0.05902, over 1611611.23 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:08,110 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:38:10,834 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:39,632 INFO [train.py:901] (2/4) Epoch 24, batch 6650, loss[loss=0.2009, simple_loss=0.2916, pruned_loss=0.05512, over 8663.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.285, pruned_loss=0.0596, over 1617668.22 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:53,937 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:55,134 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:56,512 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:08,651 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.365e+02 2.876e+02 3.746e+02 9.522e+02, threshold=5.752e+02, percent-clipped=3.0 +2023-02-07 07:39:11,895 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 07:39:14,187 INFO [train.py:901] (2/4) Epoch 24, batch 6700, loss[loss=0.2224, simple_loss=0.3073, pruned_loss=0.06879, over 8322.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.05879, over 1615528.60 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:39:31,147 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1637, 4.1005, 3.7338, 2.8838, 3.6377, 3.7132, 3.8073, 3.5866], + device='cuda:2'), covar=tensor([0.0722, 0.0565, 0.0870, 0.3147, 0.0851, 0.1335, 0.1073, 0.0898], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0446, 0.0434, 0.0546, 0.0434, 0.0450, 0.0429, 0.0392], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:39:31,226 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:32,518 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:36,667 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 07:39:49,033 INFO [train.py:901] (2/4) Epoch 24, batch 6750, loss[loss=0.1877, simple_loss=0.2775, pruned_loss=0.04895, over 8232.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.05925, over 1614105.22 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:12,033 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192691.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:15,248 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192696.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:16,608 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:17,823 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.601e+02 3.163e+02 3.841e+02 9.507e+02, threshold=6.325e+02, percent-clipped=3.0 +2023-02-07 07:40:23,402 INFO [train.py:901] (2/4) Epoch 24, batch 6800, loss[loss=0.164, simple_loss=0.2426, pruned_loss=0.04275, over 7809.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2854, pruned_loss=0.05978, over 1616289.45 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:24,829 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 07:40:30,217 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 07:40:44,401 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 07:40:54,020 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4492, 4.4636, 4.0228, 2.0786, 3.9311, 3.9369, 3.9565, 3.8506], + device='cuda:2'), covar=tensor([0.0663, 0.0489, 0.1001, 0.3959, 0.0888, 0.0916, 0.1252, 0.0705], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0446, 0.0434, 0.0547, 0.0434, 0.0449, 0.0429, 0.0392], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:40:56,188 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:56,811 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:58,551 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.71 vs. limit=5.0 +2023-02-07 07:40:59,525 INFO [train.py:901] (2/4) Epoch 24, batch 6850, loss[loss=0.21, simple_loss=0.294, pruned_loss=0.06302, over 8254.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.0599, over 1619236.79 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:01,649 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:15,299 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 07:41:26,175 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:29,430 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.230e+02 2.864e+02 3.611e+02 9.090e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-07 07:41:35,200 INFO [train.py:901] (2/4) Epoch 24, batch 6900, loss[loss=0.222, simple_loss=0.3028, pruned_loss=0.07062, over 8028.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05916, over 1621154.45 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:51,988 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4879, 1.2762, 2.2123, 1.1694, 2.1549, 2.3771, 2.5633, 2.0514], + device='cuda:2'), covar=tensor([0.1039, 0.1489, 0.0505, 0.2168, 0.0819, 0.0430, 0.0747, 0.0631], + device='cuda:2'), in_proj_covar=tensor([0.0299, 0.0326, 0.0288, 0.0317, 0.0316, 0.0274, 0.0433, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:41:54,061 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:03,100 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192849.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:42:08,875 INFO [train.py:901] (2/4) Epoch 24, batch 6950, loss[loss=0.2565, simple_loss=0.3457, pruned_loss=0.08366, over 8494.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2861, pruned_loss=0.05988, over 1625057.99 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:10,320 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:17,129 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:21,212 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2386, 3.1461, 2.9432, 1.6409, 2.8658, 2.9049, 2.8365, 2.8320], + device='cuda:2'), covar=tensor([0.1193, 0.0861, 0.1269, 0.4626, 0.1190, 0.1419, 0.1601, 0.1084], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0444, 0.0432, 0.0544, 0.0432, 0.0446, 0.0427, 0.0390], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:42:21,972 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:23,173 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 07:42:24,001 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8225, 1.3967, 3.9857, 1.4263, 3.5139, 3.2905, 3.5613, 3.4696], + device='cuda:2'), covar=tensor([0.0727, 0.4323, 0.0611, 0.4224, 0.1220, 0.0996, 0.0773, 0.0803], + device='cuda:2'), in_proj_covar=tensor([0.0644, 0.0654, 0.0710, 0.0639, 0.0715, 0.0614, 0.0617, 0.0686], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:42:30,054 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:38,585 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.384e+02 2.955e+02 3.597e+02 9.319e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 07:42:44,706 INFO [train.py:901] (2/4) Epoch 24, batch 7000, loss[loss=0.1683, simple_loss=0.247, pruned_loss=0.04482, over 7241.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06045, over 1623390.33 frames. ], batch size: 16, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:46,238 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:48,256 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:55,821 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 07:43:15,536 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:16,862 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:19,408 INFO [train.py:901] (2/4) Epoch 24, batch 7050, loss[loss=0.1667, simple_loss=0.2594, pruned_loss=0.03698, over 7928.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.06072, over 1622713.28 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:43:32,655 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:33,234 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:34,079 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:48,986 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.407e+02 3.090e+02 3.925e+02 9.689e+02, threshold=6.179e+02, percent-clipped=7.0 +2023-02-07 07:43:54,449 INFO [train.py:901] (2/4) Epoch 24, batch 7100, loss[loss=0.1906, simple_loss=0.2547, pruned_loss=0.0633, over 7540.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2854, pruned_loss=0.0603, over 1618457.17 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:43:57,556 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.38 vs. limit=5.0 +2023-02-07 07:44:14,217 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:29,517 INFO [train.py:901] (2/4) Epoch 24, batch 7150, loss[loss=0.1914, simple_loss=0.2702, pruned_loss=0.05626, over 7650.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2845, pruned_loss=0.05955, over 1615330.25 frames. ], batch size: 19, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:30,374 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193059.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:54,294 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:56,929 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:58,917 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.272e+02 2.945e+02 3.915e+02 7.728e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-07 07:45:05,030 INFO [train.py:901] (2/4) Epoch 24, batch 7200, loss[loss=0.2151, simple_loss=0.2989, pruned_loss=0.06563, over 8506.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2847, pruned_loss=0.05958, over 1617688.98 frames. ], batch size: 26, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:45:16,876 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:21,521 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,266 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:39,616 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:40,079 INFO [train.py:901] (2/4) Epoch 24, batch 7250, loss[loss=0.152, simple_loss=0.2341, pruned_loss=0.03493, over 7536.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2837, pruned_loss=0.05935, over 1611869.33 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:45:45,719 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:03,182 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:04,339 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193193.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:46:08,901 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.389e+02 2.780e+02 3.377e+02 1.311e+03, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 07:46:14,403 INFO [train.py:901] (2/4) Epoch 24, batch 7300, loss[loss=0.2077, simple_loss=0.2902, pruned_loss=0.06258, over 8106.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2828, pruned_loss=0.0588, over 1611310.13 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:17,193 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:29,133 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193229.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:44,639 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 07:46:48,081 INFO [train.py:901] (2/4) Epoch 24, batch 7350, loss[loss=0.1877, simple_loss=0.2786, pruned_loss=0.0484, over 8250.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2838, pruned_loss=0.05924, over 1613037.48 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:54,914 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5933, 1.8340, 1.5349, 2.2799, 1.0550, 1.3662, 1.6953, 1.8075], + device='cuda:2'), covar=tensor([0.0859, 0.0694, 0.0966, 0.0407, 0.1098, 0.1404, 0.0796, 0.0776], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0197, 0.0245, 0.0214, 0.0205, 0.0247, 0.0251, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 07:47:00,372 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2776, 1.9681, 2.5269, 1.5827, 1.7277, 2.4822, 1.2395, 2.0995], + device='cuda:2'), covar=tensor([0.1323, 0.1061, 0.0291, 0.1110, 0.1904, 0.0394, 0.1648, 0.0987], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0201, 0.0130, 0.0221, 0.0273, 0.0140, 0.0171, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 07:47:05,436 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 07:47:11,624 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 07:47:17,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.477e+02 2.971e+02 3.853e+02 6.522e+02, threshold=5.942e+02, percent-clipped=4.0 +2023-02-07 07:47:19,306 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:23,212 INFO [train.py:901] (2/4) Epoch 24, batch 7400, loss[loss=0.2018, simple_loss=0.2922, pruned_loss=0.05566, over 8434.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2841, pruned_loss=0.05944, over 1613576.60 frames. ], batch size: 49, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:23,401 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193308.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:47:26,298 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 07:47:31,906 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 07:47:52,394 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:58,366 INFO [train.py:901] (2/4) Epoch 24, batch 7450, loss[loss=0.1908, simple_loss=0.2793, pruned_loss=0.05116, over 8476.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2856, pruned_loss=0.06035, over 1616427.67 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:09,526 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 07:48:10,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:28,444 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.340e+02 2.930e+02 4.048e+02 8.147e+02, threshold=5.861e+02, percent-clipped=5.0 +2023-02-07 07:48:30,642 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:32,906 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:34,037 INFO [train.py:901] (2/4) Epoch 24, batch 7500, loss[loss=0.1768, simple_loss=0.2497, pruned_loss=0.05196, over 6821.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.0591, over 1613740.76 frames. ], batch size: 15, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:50,719 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:56,214 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:07,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5287, 2.9948, 2.3840, 4.1040, 1.8512, 1.8984, 2.5752, 2.8761], + device='cuda:2'), covar=tensor([0.0709, 0.0734, 0.0778, 0.0208, 0.1010, 0.1310, 0.0864, 0.0747], + device='cuda:2'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0214, 0.0205, 0.0248, 0.0252, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 07:49:08,070 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.69 vs. limit=5.0 +2023-02-07 07:49:09,712 INFO [train.py:901] (2/4) Epoch 24, batch 7550, loss[loss=0.2016, simple_loss=0.3039, pruned_loss=0.04965, over 8642.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2851, pruned_loss=0.0598, over 1610763.42 frames. ], batch size: 49, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:16,844 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:19,413 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193472.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:49:34,387 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:39,066 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.474e+02 3.046e+02 3.751e+02 6.843e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-07 07:49:40,770 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2350, 2.0766, 2.7205, 2.2163, 2.7434, 2.3367, 2.0883, 1.5549], + device='cuda:2'), covar=tensor([0.5619, 0.4995, 0.2053, 0.4068, 0.2662, 0.3168, 0.2004, 0.5506], + device='cuda:2'), in_proj_covar=tensor([0.0949, 0.0998, 0.0820, 0.0969, 0.1010, 0.0911, 0.0760, 0.0834], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 07:49:45,273 INFO [train.py:901] (2/4) Epoch 24, batch 7600, loss[loss=0.1908, simple_loss=0.2797, pruned_loss=0.051, over 8106.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2853, pruned_loss=0.05997, over 1611582.96 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:51,992 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193518.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:12,034 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9643, 1.4462, 1.6874, 1.3905, 1.0268, 1.4576, 1.9277, 1.6513], + device='cuda:2'), covar=tensor([0.0550, 0.1316, 0.1704, 0.1490, 0.0622, 0.1551, 0.0684, 0.0640], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0188, 0.0159, 0.0100, 0.0162, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 07:50:19,356 INFO [train.py:901] (2/4) Epoch 24, batch 7650, loss[loss=0.1671, simple_loss=0.2524, pruned_loss=0.04094, over 7912.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.05956, over 1611043.31 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:50:24,425 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193564.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:50:30,317 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:41,160 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193589.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:50:48,585 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.626e+02 3.196e+02 4.372e+02 7.437e+02, threshold=6.392e+02, percent-clipped=4.0 +2023-02-07 07:50:53,961 INFO [train.py:901] (2/4) Epoch 24, batch 7700, loss[loss=0.2444, simple_loss=0.3305, pruned_loss=0.07912, over 8294.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06056, over 1613108.48 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:11,800 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:13,831 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4886, 1.6440, 2.1151, 1.3885, 1.5104, 1.7456, 1.5127, 1.5804], + device='cuda:2'), covar=tensor([0.1932, 0.2467, 0.0921, 0.4518, 0.2005, 0.3251, 0.2406, 0.2079], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0618, 0.0557, 0.0653, 0.0652, 0.0600, 0.0548, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:51:14,903 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 07:51:17,909 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 07:51:20,928 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:29,069 INFO [train.py:901] (2/4) Epoch 24, batch 7750, loss[loss=0.1773, simple_loss=0.2512, pruned_loss=0.05166, over 7818.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2855, pruned_loss=0.06028, over 1614733.40 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:50,190 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193688.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:58,096 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.679e+02 3.147e+02 3.999e+02 8.742e+02, threshold=6.294e+02, percent-clipped=3.0 +2023-02-07 07:52:03,342 INFO [train.py:901] (2/4) Epoch 24, batch 7800, loss[loss=0.2163, simple_loss=0.3046, pruned_loss=0.064, over 8451.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2852, pruned_loss=0.05961, over 1610030.52 frames. ], batch size: 27, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:52:26,012 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6076, 1.9961, 3.0780, 1.4787, 2.1894, 2.0716, 1.6411, 2.3222], + device='cuda:2'), covar=tensor([0.1934, 0.2764, 0.0796, 0.4585, 0.2182, 0.3225, 0.2521, 0.2414], + device='cuda:2'), in_proj_covar=tensor([0.0530, 0.0617, 0.0557, 0.0651, 0.0651, 0.0599, 0.0548, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 07:52:37,261 INFO [train.py:901] (2/4) Epoch 24, batch 7850, loss[loss=0.1786, simple_loss=0.2649, pruned_loss=0.04617, over 8195.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2854, pruned_loss=0.05976, over 1608121.54 frames. ], batch size: 23, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:52:39,508 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:48,224 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:54,182 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,054 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193799.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,510 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.471e+02 2.801e+02 3.652e+02 8.352e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-07 07:53:08,051 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-07 07:53:10,834 INFO [train.py:901] (2/4) Epoch 24, batch 7900, loss[loss=0.165, simple_loss=0.2398, pruned_loss=0.04513, over 7429.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2845, pruned_loss=0.05917, over 1609853.78 frames. ], batch size: 17, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:53:11,731 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1845, 1.0593, 1.2791, 1.0344, 0.9078, 1.2898, 0.0867, 0.9626], + device='cuda:2'), covar=tensor([0.1501, 0.1188, 0.0464, 0.0728, 0.2504, 0.0587, 0.1976, 0.1165], + device='cuda:2'), in_proj_covar=tensor([0.0196, 0.0202, 0.0130, 0.0221, 0.0274, 0.0139, 0.0172, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 07:53:16,269 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193816.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:53:28,542 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 07:53:43,829 INFO [train.py:901] (2/4) Epoch 24, batch 7950, loss[loss=0.2078, simple_loss=0.2973, pruned_loss=0.05921, over 8758.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2851, pruned_loss=0.05965, over 1611230.38 frames. ], batch size: 30, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:02,330 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8118, 1.4981, 2.9063, 1.3788, 2.1979, 3.0917, 3.2500, 2.6658], + device='cuda:2'), covar=tensor([0.1104, 0.1545, 0.0335, 0.2130, 0.0847, 0.0285, 0.0633, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0323, 0.0288, 0.0316, 0.0316, 0.0272, 0.0430, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:54:11,266 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:12,502 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.494e+02 3.061e+02 3.521e+02 6.741e+02, threshold=6.122e+02, percent-clipped=2.0 +2023-02-07 07:54:13,606 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-07 07:54:13,965 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193902.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:17,817 INFO [train.py:901] (2/4) Epoch 24, batch 8000, loss[loss=0.222, simple_loss=0.3071, pruned_loss=0.06846, over 8578.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2859, pruned_loss=0.06, over 1613430.55 frames. ], batch size: 31, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:25,815 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 07:54:33,388 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193931.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:54:42,133 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:51,049 INFO [train.py:901] (2/4) Epoch 24, batch 8050, loss[loss=0.218, simple_loss=0.302, pruned_loss=0.06703, over 7445.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2843, pruned_loss=0.0602, over 1593128.89 frames. ], batch size: 17, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:58,154 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:03,439 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:23,351 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 07:55:28,455 INFO [train.py:901] (2/4) Epoch 25, batch 0, loss[loss=0.2383, simple_loss=0.2949, pruned_loss=0.09081, over 7541.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.2949, pruned_loss=0.09081, over 7541.00 frames. ], batch size: 18, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:55:28,455 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 07:55:39,670 INFO [train.py:935] (2/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 07:55:39,671 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 07:55:46,475 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.577e+02 3.086e+02 3.975e+02 9.885e+02, threshold=6.172e+02, percent-clipped=3.0 +2023-02-07 07:55:57,074 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 07:56:00,133 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:15,966 INFO [train.py:901] (2/4) Epoch 25, batch 50, loss[loss=0.2081, simple_loss=0.2947, pruned_loss=0.06076, over 8182.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2857, pruned_loss=0.05744, over 366118.39 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:17,563 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:32,519 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 07:56:51,133 INFO [train.py:901] (2/4) Epoch 25, batch 100, loss[loss=0.2381, simple_loss=0.3325, pruned_loss=0.07188, over 8486.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2869, pruned_loss=0.05929, over 644985.71 frames. ], batch size: 28, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:51,282 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:52,672 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:55,684 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 07:56:57,734 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.655e+02 3.251e+02 4.247e+02 7.218e+02, threshold=6.502e+02, percent-clipped=2.0 +2023-02-07 07:57:19,066 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 07:57:22,502 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 07:57:25,371 INFO [train.py:901] (2/4) Epoch 25, batch 150, loss[loss=0.2087, simple_loss=0.2797, pruned_loss=0.06881, over 7531.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2888, pruned_loss=0.06135, over 858675.81 frames. ], batch size: 18, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:57:35,092 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:52,138 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:58,199 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194187.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:00,648 INFO [train.py:901] (2/4) Epoch 25, batch 200, loss[loss=0.2034, simple_loss=0.2901, pruned_loss=0.05836, over 8145.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2866, pruned_loss=0.05989, over 1027842.56 frames. ], batch size: 22, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:07,390 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.343e+02 2.842e+02 3.543e+02 5.999e+02, threshold=5.685e+02, percent-clipped=0.0 +2023-02-07 07:58:16,690 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194212.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:58:35,360 INFO [train.py:901] (2/4) Epoch 25, batch 250, loss[loss=0.179, simple_loss=0.2582, pruned_loss=0.04984, over 8069.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2878, pruned_loss=0.06074, over 1157472.48 frames. ], batch size: 21, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:39,446 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194246.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:58:49,462 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 07:58:58,129 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 07:59:08,685 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-07 07:59:09,575 INFO [train.py:901] (2/4) Epoch 25, batch 300, loss[loss=0.1908, simple_loss=0.27, pruned_loss=0.0558, over 7970.00 frames. ], tot_loss[loss=0.205, simple_loss=0.288, pruned_loss=0.06105, over 1260693.54 frames. ], batch size: 21, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:17,105 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.353e+02 2.857e+02 3.504e+02 7.851e+02, threshold=5.715e+02, percent-clipped=2.0 +2023-02-07 07:59:19,403 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0172, 1.7326, 3.2990, 1.4545, 2.3084, 3.5333, 3.6584, 3.0206], + device='cuda:2'), covar=tensor([0.1204, 0.1652, 0.0343, 0.2233, 0.1126, 0.0287, 0.0721, 0.0532], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0322, 0.0287, 0.0316, 0.0316, 0.0273, 0.0429, 0.0303], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 07:59:43,441 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:59:45,911 INFO [train.py:901] (2/4) Epoch 25, batch 350, loss[loss=0.1991, simple_loss=0.2851, pruned_loss=0.05656, over 7973.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.287, pruned_loss=0.06069, over 1337865.93 frames. ], batch size: 21, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:51,532 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:00,425 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:08,052 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194371.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:09,431 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:14,181 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194380.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:00:20,650 INFO [train.py:901] (2/4) Epoch 25, batch 400, loss[loss=0.1862, simple_loss=0.2747, pruned_loss=0.04889, over 8457.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2872, pruned_loss=0.06079, over 1403366.54 frames. ], batch size: 25, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:00:27,610 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.445e+02 3.013e+02 3.982e+02 8.525e+02, threshold=6.027e+02, percent-clipped=7.0 +2023-02-07 08:00:29,487 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 08:00:52,193 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194434.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:56,958 INFO [train.py:901] (2/4) Epoch 25, batch 450, loss[loss=0.2053, simple_loss=0.274, pruned_loss=0.06834, over 7647.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2864, pruned_loss=0.05986, over 1450576.66 frames. ], batch size: 19, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:01:30,923 INFO [train.py:901] (2/4) Epoch 25, batch 500, loss[loss=0.2434, simple_loss=0.3298, pruned_loss=0.07849, over 8412.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2879, pruned_loss=0.0616, over 1487125.42 frames. ], batch size: 49, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:01:37,840 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.459e+02 3.156e+02 4.025e+02 7.800e+02, threshold=6.312e+02, percent-clipped=3.0 +2023-02-07 08:02:06,155 INFO [train.py:901] (2/4) Epoch 25, batch 550, loss[loss=0.1965, simple_loss=0.284, pruned_loss=0.05447, over 8105.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2874, pruned_loss=0.06116, over 1515142.75 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:08,553 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8426, 1.4805, 1.7443, 1.4762, 1.0303, 1.5344, 1.7442, 1.6082], + device='cuda:2'), covar=tensor([0.0572, 0.1239, 0.1593, 0.1421, 0.0621, 0.1463, 0.0737, 0.0627], + device='cuda:2'), in_proj_covar=tensor([0.0097, 0.0152, 0.0188, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 08:02:13,479 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:02:24,462 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2205, 3.1418, 2.9035, 1.6178, 2.8273, 2.8847, 2.8924, 2.7349], + device='cuda:2'), covar=tensor([0.1250, 0.0931, 0.1433, 0.5053, 0.1174, 0.1346, 0.1638, 0.1177], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0454, 0.0435, 0.0552, 0.0436, 0.0457, 0.0433, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:02:38,877 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4818, 2.3623, 3.0432, 2.5307, 3.1051, 2.5979, 2.3983, 1.8739], + device='cuda:2'), covar=tensor([0.5742, 0.5033, 0.2204, 0.3941, 0.2574, 0.2943, 0.1789, 0.5606], + device='cuda:2'), in_proj_covar=tensor([0.0946, 0.0999, 0.0818, 0.0964, 0.1005, 0.0908, 0.0758, 0.0834], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:02:40,191 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9118, 1.4797, 1.7364, 1.3986, 1.0721, 1.4479, 1.7966, 1.4521], + device='cuda:2'), covar=tensor([0.0577, 0.1268, 0.1660, 0.1466, 0.0612, 0.1518, 0.0693, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 08:02:42,130 INFO [train.py:901] (2/4) Epoch 25, batch 600, loss[loss=0.202, simple_loss=0.2947, pruned_loss=0.05463, over 8500.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2869, pruned_loss=0.06079, over 1539519.35 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:48,737 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.361e+02 2.970e+02 3.663e+02 1.001e+03, threshold=5.941e+02, percent-clipped=3.0 +2023-02-07 08:03:01,141 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 08:03:01,339 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:16,776 INFO [train.py:901] (2/4) Epoch 25, batch 650, loss[loss=0.2038, simple_loss=0.2875, pruned_loss=0.06003, over 8592.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2864, pruned_loss=0.06034, over 1559668.14 frames. ], batch size: 39, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:03:18,074 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:25,643 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:35,792 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7134, 1.6886, 2.2886, 1.5071, 1.3146, 2.2694, 0.3796, 1.4664], + device='cuda:2'), covar=tensor([0.1444, 0.1118, 0.0336, 0.0970, 0.2324, 0.0346, 0.1800, 0.1030], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0198, 0.0129, 0.0218, 0.0270, 0.0138, 0.0169, 0.0195], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:03:45,808 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194680.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:52,562 INFO [train.py:901] (2/4) Epoch 25, batch 700, loss[loss=0.2382, simple_loss=0.316, pruned_loss=0.0802, over 8580.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2867, pruned_loss=0.06033, over 1570609.37 frames. ], batch size: 31, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:00,039 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.448e+02 2.849e+02 3.638e+02 5.412e+02, threshold=5.698e+02, percent-clipped=0.0 +2023-02-07 08:04:09,641 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194715.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:04:09,782 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8338, 2.1679, 3.6178, 1.7993, 1.7937, 3.5028, 0.5569, 2.1852], + device='cuda:2'), covar=tensor([0.1343, 0.1154, 0.0220, 0.1590, 0.2311, 0.0254, 0.2024, 0.1118], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0198, 0.0129, 0.0218, 0.0270, 0.0138, 0.0169, 0.0195], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:04:16,433 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194724.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:04:27,401 INFO [train.py:901] (2/4) Epoch 25, batch 750, loss[loss=0.1578, simple_loss=0.2501, pruned_loss=0.03276, over 7414.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.06025, over 1579739.19 frames. ], batch size: 17, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:42,121 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6417, 2.5973, 1.7920, 2.2621, 2.1894, 1.5279, 2.1009, 2.1565], + device='cuda:2'), covar=tensor([0.1734, 0.0462, 0.1443, 0.0778, 0.0829, 0.1755, 0.1183, 0.1122], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0235, 0.0337, 0.0310, 0.0300, 0.0342, 0.0346, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 08:04:49,489 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 08:04:58,548 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 08:05:03,379 INFO [train.py:901] (2/4) Epoch 25, batch 800, loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.0686, over 8358.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2844, pruned_loss=0.0601, over 1580661.03 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:07,600 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:11,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 2.417e+02 2.991e+02 3.771e+02 6.788e+02, threshold=5.982e+02, percent-clipped=2.0 +2023-02-07 08:05:14,552 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194805.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:19,316 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9134, 6.0075, 5.2139, 2.4933, 5.3297, 5.6606, 5.4419, 5.5015], + device='cuda:2'), covar=tensor([0.0493, 0.0355, 0.0892, 0.4348, 0.0751, 0.0617, 0.1049, 0.0480], + device='cuda:2'), in_proj_covar=tensor([0.0533, 0.0452, 0.0436, 0.0551, 0.0436, 0.0456, 0.0432, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:05:31,802 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,827 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:34,489 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1049, 1.6458, 1.4161, 1.5827, 1.4078, 1.2706, 1.3594, 1.3017], + device='cuda:2'), covar=tensor([0.1081, 0.0496, 0.1295, 0.0568, 0.0770, 0.1588, 0.0903, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0354, 0.0234, 0.0335, 0.0308, 0.0298, 0.0339, 0.0344, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 08:05:37,856 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194839.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:05:38,297 INFO [train.py:901] (2/4) Epoch 25, batch 850, loss[loss=0.2407, simple_loss=0.3247, pruned_loss=0.07829, over 8605.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2859, pruned_loss=0.06077, over 1594146.87 frames. ], batch size: 31, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:56,607 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:06:14,011 INFO [train.py:901] (2/4) Epoch 25, batch 900, loss[loss=0.1885, simple_loss=0.2681, pruned_loss=0.05448, over 7423.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2856, pruned_loss=0.06057, over 1595484.94 frames. ], batch size: 17, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:06:17,739 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7800, 1.3861, 1.6194, 1.3487, 0.9919, 1.4479, 1.6313, 1.3195], + device='cuda:2'), covar=tensor([0.0567, 0.1318, 0.1651, 0.1474, 0.0616, 0.1496, 0.0726, 0.0751], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 08:06:22,179 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.491e+02 2.923e+02 3.701e+02 8.623e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 08:06:34,964 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6716, 1.7170, 2.5159, 1.5307, 1.2939, 2.4718, 0.4189, 1.5288], + device='cuda:2'), covar=tensor([0.1948, 0.1243, 0.0309, 0.1258, 0.2560, 0.0315, 0.2021, 0.1246], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0198, 0.0128, 0.0218, 0.0269, 0.0137, 0.0169, 0.0194], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:06:49,722 INFO [train.py:901] (2/4) Epoch 25, batch 950, loss[loss=0.1958, simple_loss=0.278, pruned_loss=0.05682, over 8089.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2861, pruned_loss=0.06023, over 1600665.94 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:19,513 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 08:07:24,290 INFO [train.py:901] (2/4) Epoch 25, batch 1000, loss[loss=0.1687, simple_loss=0.2494, pruned_loss=0.04399, over 7214.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2858, pruned_loss=0.06024, over 1602606.59 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:25,388 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 08:07:29,064 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:32,158 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.651e+02 3.101e+02 3.894e+02 6.477e+02, threshold=6.202e+02, percent-clipped=4.0 +2023-02-07 08:07:51,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195029.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:54,450 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 08:07:59,734 INFO [train.py:901] (2/4) Epoch 25, batch 1050, loss[loss=0.2245, simple_loss=0.2983, pruned_loss=0.07529, over 8024.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06072, over 1602598.27 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:06,387 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 08:08:07,157 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195051.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:14,489 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195062.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:08:23,831 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195076.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:31,299 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:31,922 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3928, 1.3942, 4.5645, 1.7468, 4.0501, 3.7662, 4.1254, 4.0146], + device='cuda:2'), covar=tensor([0.0559, 0.5006, 0.0485, 0.4210, 0.1014, 0.0943, 0.0559, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0640, 0.0646, 0.0702, 0.0637, 0.0712, 0.0605, 0.0610, 0.0684], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:08:33,316 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,849 INFO [train.py:901] (2/4) Epoch 25, batch 1100, loss[loss=0.2004, simple_loss=0.292, pruned_loss=0.05444, over 8241.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.288, pruned_loss=0.0615, over 1609433.73 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:37,351 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195095.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:08:41,228 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.550e+02 3.152e+02 4.111e+02 6.650e+02, threshold=6.304e+02, percent-clipped=3.0 +2023-02-07 08:08:48,159 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:48,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:53,475 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5746, 4.5886, 4.0850, 2.1959, 4.0560, 4.2352, 4.1339, 4.1027], + device='cuda:2'), covar=tensor([0.0648, 0.0455, 0.0907, 0.4518, 0.0796, 0.0814, 0.1078, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0450, 0.0432, 0.0545, 0.0434, 0.0452, 0.0426, 0.0396], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:08:54,917 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195120.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:09:09,221 INFO [train.py:901] (2/4) Epoch 25, batch 1150, loss[loss=0.2548, simple_loss=0.3367, pruned_loss=0.08647, over 8337.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2868, pruned_loss=0.06094, over 1611183.26 frames. ], batch size: 26, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:16,871 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 08:09:43,696 INFO [train.py:901] (2/4) Epoch 25, batch 1200, loss[loss=0.1659, simple_loss=0.2516, pruned_loss=0.04009, over 7524.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.06, over 1611086.15 frames. ], batch size: 18, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:44,707 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 08:09:51,821 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.229e+02 2.843e+02 3.492e+02 1.399e+03, threshold=5.685e+02, percent-clipped=2.0 +2023-02-07 08:09:57,122 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:10:18,379 INFO [train.py:901] (2/4) Epoch 25, batch 1250, loss[loss=0.1659, simple_loss=0.2496, pruned_loss=0.04104, over 7240.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2857, pruned_loss=0.06045, over 1610247.68 frames. ], batch size: 16, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:10:34,134 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-07 08:10:53,103 INFO [train.py:901] (2/4) Epoch 25, batch 1300, loss[loss=0.2042, simple_loss=0.3028, pruned_loss=0.05281, over 8024.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.06024, over 1608985.10 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:00,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.452e+02 2.850e+02 4.025e+02 1.071e+03, threshold=5.700e+02, percent-clipped=7.0 +2023-02-07 08:11:16,291 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:26,831 INFO [train.py:901] (2/4) Epoch 25, batch 1350, loss[loss=0.2251, simple_loss=0.3084, pruned_loss=0.07087, over 7793.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.286, pruned_loss=0.06051, over 1609436.79 frames. ], batch size: 20, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:45,671 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:50,114 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:57,385 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 08:12:02,392 INFO [train.py:901] (2/4) Epoch 25, batch 1400, loss[loss=0.1998, simple_loss=0.2919, pruned_loss=0.05379, over 8317.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05951, over 1611044.10 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:04,001 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:10,480 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.570e+02 2.915e+02 3.833e+02 8.465e+02, threshold=5.831e+02, percent-clipped=6.0 +2023-02-07 08:12:13,227 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195406.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:12:15,773 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:31,396 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:36,125 INFO [train.py:901] (2/4) Epoch 25, batch 1450, loss[loss=0.2327, simple_loss=0.3229, pruned_loss=0.07124, over 8327.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2843, pruned_loss=0.05987, over 1610802.98 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:36,339 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6389, 2.1912, 3.4062, 1.6993, 1.7314, 3.4008, 0.7326, 2.1086], + device='cuda:2'), covar=tensor([0.1661, 0.1288, 0.0262, 0.1698, 0.2318, 0.0290, 0.2046, 0.1517], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0201, 0.0130, 0.0221, 0.0273, 0.0139, 0.0171, 0.0196], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:12:44,148 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 08:13:10,177 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195488.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:13:11,402 INFO [train.py:901] (2/4) Epoch 25, batch 1500, loss[loss=0.1913, simple_loss=0.2669, pruned_loss=0.05783, over 8081.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05983, over 1614309.59 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:19,806 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.401e+02 3.375e+02 4.255e+02 1.024e+03, threshold=6.749e+02, percent-clipped=12.0 +2023-02-07 08:13:33,926 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195521.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:13:46,247 INFO [train.py:901] (2/4) Epoch 25, batch 1550, loss[loss=0.2121, simple_loss=0.2878, pruned_loss=0.06818, over 8102.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.285, pruned_loss=0.05974, over 1616977.15 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:51,917 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:14,118 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:21,359 INFO [train.py:901] (2/4) Epoch 25, batch 1600, loss[loss=0.2228, simple_loss=0.3049, pruned_loss=0.07036, over 8435.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.05939, over 1611222.65 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:14:29,485 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.317e+02 3.105e+02 3.813e+02 7.132e+02, threshold=6.211e+02, percent-clipped=3.0 +2023-02-07 08:14:32,325 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:37,017 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2058, 2.0178, 2.6869, 2.1951, 2.6957, 2.2857, 2.1177, 1.5906], + device='cuda:2'), covar=tensor([0.5670, 0.5427, 0.2169, 0.4102, 0.2639, 0.3364, 0.1998, 0.5745], + device='cuda:2'), in_proj_covar=tensor([0.0954, 0.1009, 0.0826, 0.0977, 0.1019, 0.0919, 0.0765, 0.0842], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:14:55,981 INFO [train.py:901] (2/4) Epoch 25, batch 1650, loss[loss=0.1692, simple_loss=0.2585, pruned_loss=0.03994, over 7989.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05944, over 1609559.18 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:29,744 INFO [train.py:901] (2/4) Epoch 25, batch 1700, loss[loss=0.1791, simple_loss=0.256, pruned_loss=0.0511, over 7698.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05915, over 1609914.69 frames. ], batch size: 18, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:38,027 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.590e+02 3.116e+02 3.996e+02 7.880e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-07 08:16:05,347 INFO [train.py:901] (2/4) Epoch 25, batch 1750, loss[loss=0.1737, simple_loss=0.2587, pruned_loss=0.04438, over 7650.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2848, pruned_loss=0.05971, over 1608660.26 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:16:06,321 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5956, 2.9907, 2.5196, 4.0726, 1.7527, 2.1696, 2.5246, 2.9733], + device='cuda:2'), covar=tensor([0.0643, 0.0710, 0.0720, 0.0198, 0.1046, 0.1127, 0.0859, 0.0720], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0213, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 08:16:09,112 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:15,705 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:17,273 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4839, 2.4142, 3.1828, 2.4816, 3.1501, 2.5357, 2.3016, 1.9786], + device='cuda:2'), covar=tensor([0.5430, 0.4946, 0.1868, 0.3766, 0.2447, 0.2955, 0.1846, 0.5415], + device='cuda:2'), in_proj_covar=tensor([0.0953, 0.1008, 0.0825, 0.0976, 0.1016, 0.0917, 0.0764, 0.0841], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:16:26,010 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195769.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:31,518 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195777.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:16:40,098 INFO [train.py:901] (2/4) Epoch 25, batch 1800, loss[loss=0.1643, simple_loss=0.2457, pruned_loss=0.04149, over 7810.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2851, pruned_loss=0.06015, over 1609112.61 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:16:48,976 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.456e+02 2.857e+02 3.484e+02 7.816e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-07 08:16:49,215 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:16:50,549 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:07,905 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:15,239 INFO [train.py:901] (2/4) Epoch 25, batch 1850, loss[loss=0.2038, simple_loss=0.2945, pruned_loss=0.05653, over 8444.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2862, pruned_loss=0.0602, over 1610647.85 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:36,394 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:50,144 INFO [train.py:901] (2/4) Epoch 25, batch 1900, loss[loss=0.1869, simple_loss=0.2704, pruned_loss=0.05173, over 8248.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2858, pruned_loss=0.06016, over 1607852.63 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:58,360 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.686e+02 3.045e+02 3.689e+02 8.196e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-07 08:18:24,461 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 08:18:25,126 INFO [train.py:901] (2/4) Epoch 25, batch 1950, loss[loss=0.1583, simple_loss=0.2452, pruned_loss=0.03565, over 7801.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2857, pruned_loss=0.05979, over 1613403.19 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:18:37,850 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 08:18:56,451 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-07 08:18:57,285 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 08:18:58,124 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5209, 2.4578, 1.7908, 2.2694, 2.0978, 1.5065, 1.9967, 2.1343], + device='cuda:2'), covar=tensor([0.1464, 0.0383, 0.1199, 0.0623, 0.0754, 0.1551, 0.1040, 0.0941], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0237, 0.0341, 0.0312, 0.0302, 0.0345, 0.0349, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 08:19:00,594 INFO [train.py:901] (2/4) Epoch 25, batch 2000, loss[loss=0.2102, simple_loss=0.2994, pruned_loss=0.0605, over 8619.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05941, over 1615660.74 frames. ], batch size: 50, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:19:09,749 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.344e+02 2.823e+02 3.287e+02 7.423e+02, threshold=5.646e+02, percent-clipped=4.0 +2023-02-07 08:19:36,116 INFO [train.py:901] (2/4) Epoch 25, batch 2050, loss[loss=0.2024, simple_loss=0.2895, pruned_loss=0.05769, over 8344.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.05899, over 1617073.43 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:11,105 INFO [train.py:901] (2/4) Epoch 25, batch 2100, loss[loss=0.2109, simple_loss=0.2909, pruned_loss=0.06543, over 8079.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.05913, over 1617799.82 frames. ], batch size: 21, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:20,386 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.403e+02 2.946e+02 3.659e+02 8.101e+02, threshold=5.892e+02, percent-clipped=3.0 +2023-02-07 08:20:35,889 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=196125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:20:46,046 INFO [train.py:901] (2/4) Epoch 25, batch 2150, loss[loss=0.1866, simple_loss=0.2597, pruned_loss=0.0567, over 7794.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2858, pruned_loss=0.0596, over 1619397.46 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:54,019 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=196150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:21:22,072 INFO [train.py:901] (2/4) Epoch 25, batch 2200, loss[loss=0.2032, simple_loss=0.2898, pruned_loss=0.05832, over 8460.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2867, pruned_loss=0.06038, over 1621811.33 frames. ], batch size: 27, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:21:30,652 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.555e+02 3.213e+02 4.289e+02 6.887e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-07 08:21:37,544 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2424, 3.1655, 2.9092, 1.9065, 2.8679, 2.8703, 2.8404, 2.7760], + device='cuda:2'), covar=tensor([0.0898, 0.0803, 0.1188, 0.3718, 0.0974, 0.1294, 0.1492, 0.0942], + device='cuda:2'), in_proj_covar=tensor([0.0532, 0.0449, 0.0435, 0.0546, 0.0433, 0.0452, 0.0427, 0.0396], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:21:56,963 INFO [train.py:901] (2/4) Epoch 25, batch 2250, loss[loss=0.1889, simple_loss=0.2647, pruned_loss=0.05656, over 8289.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2867, pruned_loss=0.06047, over 1624637.10 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:07,576 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0478, 1.5750, 3.4326, 1.5708, 2.4024, 3.8070, 3.9019, 3.2755], + device='cuda:2'), covar=tensor([0.1167, 0.1852, 0.0377, 0.2112, 0.1202, 0.0232, 0.0495, 0.0542], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0325, 0.0289, 0.0317, 0.0317, 0.0275, 0.0432, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 08:22:12,519 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5970, 2.4572, 1.8252, 2.3190, 2.1413, 1.5948, 2.0582, 2.1202], + device='cuda:2'), covar=tensor([0.1459, 0.0441, 0.1278, 0.0632, 0.0774, 0.1599, 0.1063, 0.0990], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0238, 0.0341, 0.0313, 0.0303, 0.0345, 0.0350, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 08:22:32,057 INFO [train.py:901] (2/4) Epoch 25, batch 2300, loss[loss=0.185, simple_loss=0.2813, pruned_loss=0.0443, over 8324.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2871, pruned_loss=0.06026, over 1624737.42 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:40,954 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.794e+02 3.530e+02 9.865e+02, threshold=5.587e+02, percent-clipped=2.0 +2023-02-07 08:23:04,009 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8567, 1.9659, 1.7165, 2.6611, 1.1382, 1.5345, 1.8525, 1.9714], + device='cuda:2'), covar=tensor([0.0715, 0.0784, 0.0928, 0.0384, 0.1152, 0.1364, 0.0821, 0.0809], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0194, 0.0242, 0.0210, 0.0203, 0.0243, 0.0247, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 08:23:07,217 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:07,751 INFO [train.py:901] (2/4) Epoch 25, batch 2350, loss[loss=0.188, simple_loss=0.2631, pruned_loss=0.05645, over 7698.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2862, pruned_loss=0.05971, over 1624641.04 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:34,294 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:42,090 INFO [train.py:901] (2/4) Epoch 25, batch 2400, loss[loss=0.194, simple_loss=0.2731, pruned_loss=0.05743, over 7911.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.285, pruned_loss=0.05918, over 1620446.32 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:50,271 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.902e+02 3.432e+02 7.434e+02, threshold=5.805e+02, percent-clipped=2.0 +2023-02-07 08:24:17,357 INFO [train.py:901] (2/4) Epoch 25, batch 2450, loss[loss=0.1561, simple_loss=0.2328, pruned_loss=0.03974, over 7436.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2844, pruned_loss=0.05862, over 1623574.70 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:24:51,933 INFO [train.py:901] (2/4) Epoch 25, batch 2500, loss[loss=0.2539, simple_loss=0.3303, pruned_loss=0.08874, over 6941.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2842, pruned_loss=0.05854, over 1623080.50 frames. ], batch size: 71, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:00,795 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.398e+02 2.858e+02 3.242e+02 5.404e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 08:25:26,994 INFO [train.py:901] (2/4) Epoch 25, batch 2550, loss[loss=0.1888, simple_loss=0.2742, pruned_loss=0.05167, over 8130.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2849, pruned_loss=0.05898, over 1625063.17 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:02,156 INFO [train.py:901] (2/4) Epoch 25, batch 2600, loss[loss=0.2085, simple_loss=0.2878, pruned_loss=0.06463, over 8488.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05932, over 1619581.10 frames. ], batch size: 29, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:06,422 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:26:10,247 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.366e+02 2.911e+02 3.287e+02 8.101e+02, threshold=5.822e+02, percent-clipped=1.0 +2023-02-07 08:26:22,284 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7305, 5.8314, 4.9746, 2.5946, 5.0738, 5.5830, 5.2859, 5.3639], + device='cuda:2'), covar=tensor([0.0502, 0.0393, 0.0962, 0.4294, 0.0817, 0.0761, 0.1069, 0.0487], + device='cuda:2'), in_proj_covar=tensor([0.0531, 0.0447, 0.0434, 0.0543, 0.0435, 0.0451, 0.0425, 0.0397], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:26:36,595 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5472, 1.5304, 2.1236, 1.2463, 1.2134, 2.0840, 0.2591, 1.2156], + device='cuda:2'), covar=tensor([0.1629, 0.1147, 0.0345, 0.1094, 0.2457, 0.0437, 0.2017, 0.1270], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0201, 0.0131, 0.0220, 0.0272, 0.0140, 0.0171, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:26:37,068 INFO [train.py:901] (2/4) Epoch 25, batch 2650, loss[loss=0.2264, simple_loss=0.318, pruned_loss=0.06735, over 8115.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.0593, over 1616478.39 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:08,211 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:12,709 INFO [train.py:901] (2/4) Epoch 25, batch 2700, loss[loss=0.2156, simple_loss=0.3029, pruned_loss=0.06415, over 8188.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2837, pruned_loss=0.05861, over 1613358.25 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:20,585 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.452e+02 2.909e+02 3.648e+02 8.771e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-07 08:27:33,982 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:46,997 INFO [train.py:901] (2/4) Epoch 25, batch 2750, loss[loss=0.1964, simple_loss=0.2908, pruned_loss=0.051, over 8495.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2826, pruned_loss=0.05831, over 1610921.70 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:55,334 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3042, 2.4548, 2.9010, 1.6486, 3.1609, 1.9109, 1.5521, 2.2418], + device='cuda:2'), covar=tensor([0.1047, 0.0559, 0.0379, 0.0993, 0.0485, 0.0959, 0.1119, 0.0636], + device='cuda:2'), in_proj_covar=tensor([0.0465, 0.0404, 0.0360, 0.0456, 0.0388, 0.0543, 0.0402, 0.0432], + device='cuda:2'), out_proj_covar=tensor([1.2356e-04, 1.0535e-04, 9.4351e-05, 1.1950e-04, 1.0162e-04, 1.5227e-04, + 1.0762e-04, 1.1362e-04], device='cuda:2') +2023-02-07 08:28:11,176 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-07 08:28:22,165 INFO [train.py:901] (2/4) Epoch 25, batch 2800, loss[loss=0.1802, simple_loss=0.2694, pruned_loss=0.04545, over 8507.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2818, pruned_loss=0.05795, over 1608452.25 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:28:27,859 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:28,544 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:31,148 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.445e+02 2.946e+02 3.604e+02 6.151e+02, threshold=5.892e+02, percent-clipped=2.0 +2023-02-07 08:28:52,420 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 08:28:54,961 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:56,675 INFO [train.py:901] (2/4) Epoch 25, batch 2850, loss[loss=0.2362, simple_loss=0.3263, pruned_loss=0.07302, over 8467.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2822, pruned_loss=0.05747, over 1613590.18 frames. ], batch size: 25, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:19,400 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:32,392 INFO [train.py:901] (2/4) Epoch 25, batch 2900, loss[loss=0.1924, simple_loss=0.2886, pruned_loss=0.04814, over 8028.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05676, over 1613387.51 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:39,439 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:41,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.504e+02 3.053e+02 3.742e+02 6.617e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-07 08:30:08,123 INFO [train.py:901] (2/4) Epoch 25, batch 2950, loss[loss=0.2274, simple_loss=0.3125, pruned_loss=0.07109, over 8595.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2831, pruned_loss=0.05779, over 1617399.86 frames. ], batch size: 49, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:08,203 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:30:08,823 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 08:30:19,842 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-07 08:30:41,610 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 08:30:42,478 INFO [train.py:901] (2/4) Epoch 25, batch 3000, loss[loss=0.1939, simple_loss=0.2804, pruned_loss=0.05366, over 8114.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2842, pruned_loss=0.05887, over 1617004.76 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:42,478 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 08:30:55,640 INFO [train.py:935] (2/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2721, pruned_loss=0.03618, over 944034.00 frames. +2023-02-07 08:30:55,641 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 08:31:03,958 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 2.477e+02 2.955e+02 3.925e+02 7.788e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 08:31:30,699 INFO [train.py:901] (2/4) Epoch 25, batch 3050, loss[loss=0.1961, simple_loss=0.286, pruned_loss=0.05305, over 8337.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2859, pruned_loss=0.05965, over 1620318.88 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:31:40,531 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197054.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:41,168 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:57,855 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197079.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:05,206 INFO [train.py:901] (2/4) Epoch 25, batch 3100, loss[loss=0.234, simple_loss=0.3143, pruned_loss=0.07687, over 8287.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2863, pruned_loss=0.06001, over 1620121.60 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:07,488 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:13,235 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.425e+02 3.089e+02 3.818e+02 7.102e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-07 08:32:24,977 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:40,196 INFO [train.py:901] (2/4) Epoch 25, batch 3150, loss[loss=0.1685, simple_loss=0.2636, pruned_loss=0.03674, over 8203.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2865, pruned_loss=0.06007, over 1619544.22 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:40,984 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:15,302 INFO [train.py:901] (2/4) Epoch 25, batch 3200, loss[loss=0.177, simple_loss=0.2579, pruned_loss=0.04806, over 7937.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2859, pruned_loss=0.06032, over 1617053.19 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:23,539 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.400e+02 2.739e+02 3.315e+02 1.024e+03, threshold=5.479e+02, percent-clipped=5.0 +2023-02-07 08:33:33,159 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:50,294 INFO [train.py:901] (2/4) Epoch 25, batch 3250, loss[loss=0.2129, simple_loss=0.2978, pruned_loss=0.064, over 8037.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2855, pruned_loss=0.05979, over 1617471.13 frames. ], batch size: 22, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:52,455 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:02,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:25,382 INFO [train.py:901] (2/4) Epoch 25, batch 3300, loss[loss=0.1594, simple_loss=0.2403, pruned_loss=0.03932, over 7701.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2845, pruned_loss=0.05913, over 1615700.52 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:34:34,250 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.560e+02 3.230e+02 4.212e+02 8.703e+02, threshold=6.460e+02, percent-clipped=10.0 +2023-02-07 08:34:40,503 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:46,284 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 08:34:54,230 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:57,734 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:00,243 INFO [train.py:901] (2/4) Epoch 25, batch 3350, loss[loss=0.1695, simple_loss=0.2552, pruned_loss=0.04193, over 7815.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.0593, over 1617767.22 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:35:13,338 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:17,450 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2496, 1.1262, 1.2996, 1.0313, 0.9911, 1.3270, 0.1317, 0.9882], + device='cuda:2'), covar=tensor([0.1388, 0.1212, 0.0494, 0.0660, 0.2549, 0.0503, 0.1906, 0.1116], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0220, 0.0271, 0.0139, 0.0170, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:35:36,171 INFO [train.py:901] (2/4) Epoch 25, batch 3400, loss[loss=0.2039, simple_loss=0.2794, pruned_loss=0.0642, over 7917.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.05905, over 1619655.71 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:35:39,769 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4724, 1.4483, 1.8457, 1.1421, 1.1024, 1.8616, 0.1636, 1.1093], + device='cuda:2'), covar=tensor([0.1567, 0.1296, 0.0388, 0.1090, 0.2640, 0.0380, 0.1981, 0.1317], + device='cuda:2'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0220, 0.0271, 0.0138, 0.0170, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:35:44,271 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.489e+02 3.044e+02 3.734e+02 7.163e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 08:36:06,304 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197433.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:36:11,058 INFO [train.py:901] (2/4) Epoch 25, batch 3450, loss[loss=0.2259, simple_loss=0.3011, pruned_loss=0.07537, over 7805.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.285, pruned_loss=0.05942, over 1620789.94 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:16,731 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3131, 2.1381, 1.6264, 1.9451, 1.7927, 1.3937, 1.7247, 1.7397], + device='cuda:2'), covar=tensor([0.1274, 0.0406, 0.1225, 0.0516, 0.0656, 0.1547, 0.0929, 0.0877], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0236, 0.0340, 0.0311, 0.0300, 0.0345, 0.0350, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 08:36:46,262 INFO [train.py:901] (2/4) Epoch 25, batch 3500, loss[loss=0.1979, simple_loss=0.2697, pruned_loss=0.06309, over 8204.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05923, over 1621178.63 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:54,912 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.387e+02 2.953e+02 3.537e+02 5.869e+02, threshold=5.907e+02, percent-clipped=0.0 +2023-02-07 08:37:02,036 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:07,262 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 08:37:17,736 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:19,770 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:21,600 INFO [train.py:901] (2/4) Epoch 25, batch 3550, loss[loss=0.2027, simple_loss=0.286, pruned_loss=0.05965, over 7236.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05914, over 1613545.72 frames. ], batch size: 71, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:37:22,461 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8911, 1.8423, 2.4390, 1.5751, 1.4736, 2.4773, 0.4807, 1.4890], + device='cuda:2'), covar=tensor([0.1456, 0.1136, 0.0314, 0.1055, 0.2121, 0.0302, 0.1708, 0.1149], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0172, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:37:24,413 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9501, 1.6936, 2.0293, 1.8071, 2.0013, 2.0206, 1.8635, 0.8609], + device='cuda:2'), covar=tensor([0.5872, 0.4684, 0.2201, 0.3694, 0.2527, 0.3189, 0.2003, 0.4994], + device='cuda:2'), in_proj_covar=tensor([0.0951, 0.1006, 0.0822, 0.0974, 0.1014, 0.0914, 0.0763, 0.0840], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:37:37,368 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197563.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:52,449 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8013, 1.9026, 1.6977, 2.3214, 1.0348, 1.5818, 1.6941, 1.8523], + device='cuda:2'), covar=tensor([0.0722, 0.0781, 0.0894, 0.0381, 0.1133, 0.1271, 0.0790, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0194, 0.0245, 0.0212, 0.0204, 0.0247, 0.0248, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 08:37:54,506 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:56,269 INFO [train.py:901] (2/4) Epoch 25, batch 3600, loss[loss=0.2028, simple_loss=0.2849, pruned_loss=0.06041, over 8346.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2838, pruned_loss=0.05892, over 1611404.17 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:05,202 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.311e+02 2.881e+02 3.803e+02 6.346e+02, threshold=5.762e+02, percent-clipped=1.0 +2023-02-07 08:38:12,109 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197612.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:13,509 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,671 INFO [train.py:901] (2/4) Epoch 25, batch 3650, loss[loss=0.2551, simple_loss=0.3255, pruned_loss=0.0924, over 8583.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2828, pruned_loss=0.05871, over 1612006.55 frames. ], batch size: 31, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:40,965 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7426, 1.6564, 2.4220, 1.4951, 1.3139, 2.3869, 0.3916, 1.4523], + device='cuda:2'), covar=tensor([0.1599, 0.1245, 0.0325, 0.1238, 0.2580, 0.0348, 0.2037, 0.1365], + device='cuda:2'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0171, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:38:54,159 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7931, 2.3330, 4.0892, 1.6408, 3.0077, 2.3929, 1.7867, 3.1550], + device='cuda:2'), covar=tensor([0.1853, 0.2599, 0.0761, 0.4447, 0.1850, 0.2997, 0.2377, 0.2109], + device='cuda:2'), in_proj_covar=tensor([0.0532, 0.0621, 0.0557, 0.0659, 0.0654, 0.0603, 0.0549, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:39:06,718 INFO [train.py:901] (2/4) Epoch 25, batch 3700, loss[loss=0.1674, simple_loss=0.259, pruned_loss=0.03791, over 8091.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2831, pruned_loss=0.05894, over 1615354.06 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:39:09,551 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 08:39:15,746 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.484e+02 2.942e+02 3.783e+02 7.174e+02, threshold=5.884e+02, percent-clipped=5.0 +2023-02-07 08:39:35,834 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5442, 1.7206, 3.6878, 1.9910, 3.3470, 3.1511, 3.4235, 3.3422], + device='cuda:2'), covar=tensor([0.0707, 0.3657, 0.0848, 0.3716, 0.1059, 0.0939, 0.0601, 0.0659], + device='cuda:2'), in_proj_covar=tensor([0.0655, 0.0660, 0.0721, 0.0650, 0.0730, 0.0624, 0.0626, 0.0701], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:39:40,720 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 08:39:43,102 INFO [train.py:901] (2/4) Epoch 25, batch 3750, loss[loss=0.1765, simple_loss=0.2562, pruned_loss=0.04844, over 7972.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2837, pruned_loss=0.05933, over 1611305.56 frames. ], batch size: 21, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:09,409 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197777.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:40:18,191 INFO [train.py:901] (2/4) Epoch 25, batch 3800, loss[loss=0.2014, simple_loss=0.2858, pruned_loss=0.05853, over 8109.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2854, pruned_loss=0.05991, over 1617169.24 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:26,484 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.549e+02 3.044e+02 3.681e+02 9.424e+02, threshold=6.087e+02, percent-clipped=5.0 +2023-02-07 08:40:34,310 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 08:40:43,133 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-07 08:40:53,458 INFO [train.py:901] (2/4) Epoch 25, batch 3850, loss[loss=0.2099, simple_loss=0.2962, pruned_loss=0.06185, over 8632.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2866, pruned_loss=0.06081, over 1616150.81 frames. ], batch size: 49, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:57,609 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2707, 2.8397, 2.2799, 3.9113, 1.6145, 1.9330, 2.4206, 2.8496], + device='cuda:2'), covar=tensor([0.0773, 0.0811, 0.0810, 0.0267, 0.1200, 0.1346, 0.0976, 0.0766], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0211, 0.0205, 0.0247, 0.0247, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 08:41:12,902 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 08:41:19,585 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:28,508 INFO [train.py:901] (2/4) Epoch 25, batch 3900, loss[loss=0.1695, simple_loss=0.2546, pruned_loss=0.0422, over 7545.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.06055, over 1617252.10 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:41:29,963 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197892.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:41:36,374 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.445e+02 2.982e+02 3.609e+02 8.629e+02, threshold=5.963e+02, percent-clipped=3.0 +2023-02-07 08:41:39,828 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197907.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:41,928 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:02,716 INFO [train.py:901] (2/4) Epoch 25, batch 3950, loss[loss=0.2171, simple_loss=0.2855, pruned_loss=0.07438, over 7272.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2871, pruned_loss=0.0607, over 1616127.86 frames. ], batch size: 16, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:24,235 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1301, 1.2824, 4.3136, 1.6508, 3.8031, 3.5927, 3.9011, 3.8058], + device='cuda:2'), covar=tensor([0.0635, 0.5040, 0.0602, 0.4104, 0.1127, 0.0936, 0.0631, 0.0707], + device='cuda:2'), in_proj_covar=tensor([0.0648, 0.0653, 0.0714, 0.0642, 0.0723, 0.0618, 0.0621, 0.0692], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:42:37,828 INFO [train.py:901] (2/4) Epoch 25, batch 4000, loss[loss=0.2498, simple_loss=0.3167, pruned_loss=0.09145, over 8442.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2871, pruned_loss=0.06071, over 1621655.54 frames. ], batch size: 27, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:40,144 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:47,780 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.312e+02 2.768e+02 3.562e+02 7.475e+02, threshold=5.536e+02, percent-clipped=2.0 +2023-02-07 08:43:01,546 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198022.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:14,006 INFO [train.py:901] (2/4) Epoch 25, batch 4050, loss[loss=0.1909, simple_loss=0.2875, pruned_loss=0.04718, over 8402.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2865, pruned_loss=0.06023, over 1618261.77 frames. ], batch size: 49, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:48,799 INFO [train.py:901] (2/4) Epoch 25, batch 4100, loss[loss=0.2125, simple_loss=0.2941, pruned_loss=0.06546, over 8343.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2869, pruned_loss=0.0605, over 1615817.14 frames. ], batch size: 24, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:55,129 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198099.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:57,000 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.413e+02 2.876e+02 3.434e+02 5.292e+02, threshold=5.752e+02, percent-clipped=1.0 +2023-02-07 08:44:24,272 INFO [train.py:901] (2/4) Epoch 25, batch 4150, loss[loss=0.2759, simple_loss=0.3423, pruned_loss=0.1047, over 8496.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2871, pruned_loss=0.0605, over 1620051.78 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:44:29,943 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198148.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:44:47,411 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198173.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:44:58,964 INFO [train.py:901] (2/4) Epoch 25, batch 4200, loss[loss=0.2043, simple_loss=0.2915, pruned_loss=0.05853, over 8623.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2857, pruned_loss=0.05985, over 1618028.76 frames. ], batch size: 39, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:08,033 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.351e+02 3.091e+02 3.845e+02 7.201e+02, threshold=6.182e+02, percent-clipped=4.0 +2023-02-07 08:45:09,395 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 08:45:33,127 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 08:45:35,178 INFO [train.py:901] (2/4) Epoch 25, batch 4250, loss[loss=0.1837, simple_loss=0.2763, pruned_loss=0.04549, over 8196.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2858, pruned_loss=0.05995, over 1624189.22 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:35,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8429, 1.7204, 2.9755, 1.4147, 2.4432, 3.3371, 3.4900, 2.5219], + device='cuda:2'), covar=tensor([0.1489, 0.1900, 0.0542, 0.2541, 0.1316, 0.0373, 0.0667, 0.0922], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0317, 0.0318, 0.0275, 0.0434, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 08:45:41,587 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:44,710 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:59,339 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:02,109 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198278.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:09,935 INFO [train.py:901] (2/4) Epoch 25, batch 4300, loss[loss=0.2028, simple_loss=0.2803, pruned_loss=0.06262, over 7006.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2854, pruned_loss=0.05999, over 1620092.39 frames. ], batch size: 71, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:46:18,872 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.314e+02 2.735e+02 3.533e+02 6.805e+02, threshold=5.471e+02, percent-clipped=1.0 +2023-02-07 08:46:19,824 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:45,627 INFO [train.py:901] (2/4) Epoch 25, batch 4350, loss[loss=0.1901, simple_loss=0.2741, pruned_loss=0.05305, over 8360.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05935, over 1619119.66 frames. ], batch size: 24, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:47:04,271 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 08:47:06,469 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:47:21,566 INFO [train.py:901] (2/4) Epoch 25, batch 4400, loss[loss=0.2275, simple_loss=0.3079, pruned_loss=0.0736, over 8243.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05931, over 1620877.74 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:29,511 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.496e+02 2.935e+02 3.768e+02 7.665e+02, threshold=5.870e+02, percent-clipped=6.0 +2023-02-07 08:47:45,263 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 08:47:50,270 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.43 vs. limit=2.0 +2023-02-07 08:47:56,500 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 08:47:56,744 INFO [train.py:901] (2/4) Epoch 25, batch 4450, loss[loss=0.2401, simple_loss=0.3222, pruned_loss=0.07898, over 8353.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2846, pruned_loss=0.05876, over 1624052.12 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:58,932 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:48:31,962 INFO [train.py:901] (2/4) Epoch 25, batch 4500, loss[loss=0.1934, simple_loss=0.281, pruned_loss=0.05296, over 8722.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2854, pruned_loss=0.05904, over 1624278.67 frames. ], batch size: 34, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:48:36,294 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6886, 4.7470, 4.1378, 2.0528, 4.1154, 4.3001, 4.2203, 4.0655], + device='cuda:2'), covar=tensor([0.0656, 0.0481, 0.1061, 0.4703, 0.0802, 0.0913, 0.1238, 0.0812], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0451, 0.0439, 0.0550, 0.0437, 0.0456, 0.0428, 0.0400], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:48:40,440 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.274e+02 2.771e+02 3.541e+02 5.802e+02, threshold=5.543e+02, percent-clipped=0.0 +2023-02-07 08:48:40,476 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 08:48:51,908 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:08,831 INFO [train.py:901] (2/4) Epoch 25, batch 4550, loss[loss=0.2095, simple_loss=0.2814, pruned_loss=0.06882, over 7544.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05889, over 1621163.80 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:22,069 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:39,132 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 08:49:44,786 INFO [train.py:901] (2/4) Epoch 25, batch 4600, loss[loss=0.1444, simple_loss=0.2228, pruned_loss=0.033, over 7933.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2841, pruned_loss=0.05895, over 1614662.19 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:52,976 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.449e+02 2.940e+02 3.432e+02 8.422e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-07 08:50:09,328 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198625.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:14,653 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:19,157 INFO [train.py:901] (2/4) Epoch 25, batch 4650, loss[loss=0.1767, simple_loss=0.2578, pruned_loss=0.04783, over 7240.00 frames. ], tot_loss[loss=0.201, simple_loss=0.284, pruned_loss=0.05901, over 1612104.51 frames. ], batch size: 16, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:50:26,823 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:54,512 INFO [train.py:901] (2/4) Epoch 25, batch 4700, loss[loss=0.2011, simple_loss=0.2884, pruned_loss=0.05686, over 8444.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05924, over 1607032.17 frames. ], batch size: 27, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:03,370 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.164e+02 2.735e+02 3.323e+02 7.623e+02, threshold=5.470e+02, percent-clipped=2.0 +2023-02-07 08:51:29,738 INFO [train.py:901] (2/4) Epoch 25, batch 4750, loss[loss=0.2667, simple_loss=0.3409, pruned_loss=0.09627, over 8498.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.284, pruned_loss=0.05921, over 1609089.82 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:38,030 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3883, 1.4796, 4.6009, 1.7748, 4.0786, 3.7873, 4.1550, 4.0195], + device='cuda:2'), covar=tensor([0.0564, 0.4709, 0.0489, 0.3992, 0.1110, 0.0915, 0.0543, 0.0675], + device='cuda:2'), in_proj_covar=tensor([0.0646, 0.0650, 0.0709, 0.0640, 0.0723, 0.0616, 0.0617, 0.0690], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:51:39,045 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 08:51:42,013 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 08:51:45,373 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 08:52:05,184 INFO [train.py:901] (2/4) Epoch 25, batch 4800, loss[loss=0.2541, simple_loss=0.3304, pruned_loss=0.08893, over 8297.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05889, over 1610371.90 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:13,388 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 2.392e+02 2.917e+02 3.409e+02 6.169e+02, threshold=5.835e+02, percent-clipped=3.0 +2023-02-07 08:52:22,063 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:36,112 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 08:52:39,635 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:40,102 INFO [train.py:901] (2/4) Epoch 25, batch 4850, loss[loss=0.1626, simple_loss=0.2497, pruned_loss=0.03774, over 7539.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2848, pruned_loss=0.05946, over 1612521.58 frames. ], batch size: 18, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:55,414 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:01,725 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198870.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:15,869 INFO [train.py:901] (2/4) Epoch 25, batch 4900, loss[loss=0.2158, simple_loss=0.3008, pruned_loss=0.06541, over 8129.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.285, pruned_loss=0.05967, over 1616326.91 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:24,155 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:24,666 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.376e+02 2.954e+02 3.660e+02 6.336e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 08:53:41,107 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 08:53:43,155 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 08:53:50,028 INFO [train.py:901] (2/4) Epoch 25, batch 4950, loss[loss=0.248, simple_loss=0.3295, pruned_loss=0.08328, over 8353.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2845, pruned_loss=0.0594, over 1616228.45 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:54,444 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:15,961 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:16,532 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:24,177 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1884, 2.0107, 2.7058, 2.2330, 2.6890, 2.3089, 2.0734, 1.6260], + device='cuda:2'), covar=tensor([0.5920, 0.5249, 0.2081, 0.3997, 0.2694, 0.3311, 0.2147, 0.5387], + device='cuda:2'), in_proj_covar=tensor([0.0949, 0.1002, 0.0817, 0.0969, 0.1011, 0.0914, 0.0758, 0.0835], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:54:25,247 INFO [train.py:901] (2/4) Epoch 25, batch 5000, loss[loss=0.2019, simple_loss=0.2929, pruned_loss=0.05544, over 8352.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2845, pruned_loss=0.05917, over 1615122.59 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:54:33,925 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 2.360e+02 2.883e+02 3.509e+02 6.136e+02, threshold=5.766e+02, percent-clipped=1.0 +2023-02-07 08:54:38,389 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.71 vs. limit=5.0 +2023-02-07 08:54:59,851 INFO [train.py:901] (2/4) Epoch 25, batch 5050, loss[loss=0.2738, simple_loss=0.3361, pruned_loss=0.1057, over 8530.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2852, pruned_loss=0.0595, over 1617051.42 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:14,361 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 08:55:35,763 INFO [train.py:901] (2/4) Epoch 25, batch 5100, loss[loss=0.2236, simple_loss=0.3151, pruned_loss=0.06607, over 8026.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.05927, over 1619352.06 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:37,406 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:55:44,130 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.470e+02 3.005e+02 3.768e+02 7.063e+02, threshold=6.010e+02, percent-clipped=5.0 +2023-02-07 08:56:11,856 INFO [train.py:901] (2/4) Epoch 25, batch 5150, loss[loss=0.1791, simple_loss=0.2689, pruned_loss=0.04459, over 8189.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2852, pruned_loss=0.05935, over 1627150.03 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:12,464 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 08:56:47,046 INFO [train.py:901] (2/4) Epoch 25, batch 5200, loss[loss=0.1867, simple_loss=0.2562, pruned_loss=0.05863, over 7223.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.285, pruned_loss=0.05939, over 1621563.47 frames. ], batch size: 16, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:49,913 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:56:55,026 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 2.381e+02 2.894e+02 3.514e+02 1.206e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 08:57:03,628 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 08:57:04,083 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:12,756 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 08:57:17,162 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:19,825 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6633, 1.4879, 1.6669, 1.3349, 0.8850, 1.4317, 1.4703, 1.4059], + device='cuda:2'), covar=tensor([0.0597, 0.1236, 0.1675, 0.1501, 0.0593, 0.1491, 0.0719, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0164, 0.0113, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 08:57:22,313 INFO [train.py:901] (2/4) Epoch 25, batch 5250, loss[loss=0.1905, simple_loss=0.276, pruned_loss=0.05254, over 6403.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2856, pruned_loss=0.05907, over 1623615.12 frames. ], batch size: 14, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:57:25,798 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:34,838 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:41,279 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1817, 4.0963, 3.7054, 2.0544, 3.7216, 3.8132, 3.6825, 3.6965], + device='cuda:2'), covar=tensor([0.0779, 0.0587, 0.1058, 0.4289, 0.0908, 0.0962, 0.1327, 0.0752], + device='cuda:2'), in_proj_covar=tensor([0.0542, 0.0456, 0.0442, 0.0555, 0.0439, 0.0460, 0.0433, 0.0404], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 08:57:44,762 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7325, 1.6564, 2.2581, 1.4877, 1.3126, 2.2170, 0.4373, 1.3824], + device='cuda:2'), covar=tensor([0.1563, 0.1088, 0.0350, 0.1037, 0.2310, 0.0405, 0.1823, 0.1341], + device='cuda:2'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0222, 0.0277, 0.0142, 0.0173, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 08:57:56,836 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:57,413 INFO [train.py:901] (2/4) Epoch 25, batch 5300, loss[loss=0.1842, simple_loss=0.2779, pruned_loss=0.04521, over 8014.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2855, pruned_loss=0.05889, over 1621253.52 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:05,707 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.313e+02 2.718e+02 3.488e+02 6.386e+02, threshold=5.437e+02, percent-clipped=3.0 +2023-02-07 08:58:25,230 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:32,735 INFO [train.py:901] (2/4) Epoch 25, batch 5350, loss[loss=0.1926, simple_loss=0.291, pruned_loss=0.04707, over 8290.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2854, pruned_loss=0.05911, over 1614560.89 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:38,543 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:47,625 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199360.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:57,357 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:08,730 INFO [train.py:901] (2/4) Epoch 25, batch 5400, loss[loss=0.1958, simple_loss=0.2734, pruned_loss=0.05906, over 8091.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05872, over 1608154.46 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 32.0 +2023-02-07 08:59:18,149 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 2.292e+02 2.858e+02 3.757e+02 5.815e+02, threshold=5.716e+02, percent-clipped=3.0 +2023-02-07 08:59:18,353 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:22,498 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8596, 1.8722, 2.9271, 2.1920, 2.6935, 1.9669, 1.6935, 1.4124], + device='cuda:2'), covar=tensor([0.7374, 0.6152, 0.2163, 0.4363, 0.3219, 0.4454, 0.3041, 0.5954], + device='cuda:2'), in_proj_covar=tensor([0.0951, 0.1002, 0.0818, 0.0972, 0.1014, 0.0916, 0.0760, 0.0838], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 08:59:28,367 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:43,202 INFO [train.py:901] (2/4) Epoch 25, batch 5450, loss[loss=0.1701, simple_loss=0.2471, pruned_loss=0.04658, over 8078.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2834, pruned_loss=0.05862, over 1602524.95 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:08,085 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 09:00:08,226 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199476.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:17,983 INFO [train.py:901] (2/4) Epoch 25, batch 5500, loss[loss=0.1985, simple_loss=0.2763, pruned_loss=0.06035, over 7645.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05852, over 1602625.01 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:28,263 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.278e+02 2.767e+02 3.622e+02 8.817e+02, threshold=5.534e+02, percent-clipped=3.0 +2023-02-07 09:00:33,827 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199512.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:00:52,118 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:53,325 INFO [train.py:901] (2/4) Epoch 25, batch 5550, loss[loss=0.1991, simple_loss=0.2766, pruned_loss=0.0608, over 7770.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2829, pruned_loss=0.05861, over 1605295.76 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:02,093 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199553.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:01:13,072 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.83 vs. limit=5.0 +2023-02-07 09:01:24,423 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:27,624 INFO [train.py:901] (2/4) Epoch 25, batch 5600, loss[loss=0.1999, simple_loss=0.2931, pruned_loss=0.05334, over 8504.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2819, pruned_loss=0.0583, over 1602826.22 frames. ], batch size: 28, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:38,057 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.538e+02 3.116e+02 4.016e+02 1.228e+03, threshold=6.232e+02, percent-clipped=11.0 +2023-02-07 09:01:43,186 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:47,370 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:03,338 INFO [train.py:901] (2/4) Epoch 25, batch 5650, loss[loss=0.209, simple_loss=0.2902, pruned_loss=0.06393, over 8253.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2826, pruned_loss=0.05845, over 1606331.70 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:04,213 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:13,533 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:13,999 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 09:02:18,327 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:36,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199685.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:39,918 INFO [train.py:901] (2/4) Epoch 25, batch 5700, loss[loss=0.211, simple_loss=0.2968, pruned_loss=0.06262, over 8496.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2837, pruned_loss=0.05915, over 1605152.82 frames. ], batch size: 31, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:49,766 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.200e+02 2.647e+02 3.419e+02 7.306e+02, threshold=5.294e+02, percent-clipped=3.0 +2023-02-07 09:03:12,405 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 09:03:16,059 INFO [train.py:901] (2/4) Epoch 25, batch 5750, loss[loss=0.2159, simple_loss=0.301, pruned_loss=0.06536, over 8327.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2824, pruned_loss=0.05879, over 1597616.80 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:03:16,267 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1739, 1.9960, 1.4843, 1.9104, 1.5918, 1.2677, 1.5158, 1.6860], + device='cuda:2'), covar=tensor([0.1495, 0.0526, 0.1472, 0.0628, 0.0980, 0.1945, 0.1246, 0.0945], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0235, 0.0339, 0.0310, 0.0301, 0.0341, 0.0346, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 09:03:21,550 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 09:03:30,824 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:03:39,208 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6974, 1.3979, 4.9167, 1.8176, 4.3277, 3.9880, 4.4051, 4.2470], + device='cuda:2'), covar=tensor([0.0581, 0.4969, 0.0431, 0.4218, 0.1075, 0.0891, 0.0567, 0.0687], + device='cuda:2'), in_proj_covar=tensor([0.0656, 0.0657, 0.0724, 0.0647, 0.0731, 0.0621, 0.0622, 0.0699], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:03:50,501 INFO [train.py:901] (2/4) Epoch 25, batch 5800, loss[loss=0.2356, simple_loss=0.3176, pruned_loss=0.07674, over 8458.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2832, pruned_loss=0.05908, over 1601684.39 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:00,801 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.348e+02 2.869e+02 3.742e+02 6.332e+02, threshold=5.738e+02, percent-clipped=6.0 +2023-02-07 09:04:11,740 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199820.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:04:12,724 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 09:04:26,575 INFO [train.py:901] (2/4) Epoch 25, batch 5850, loss[loss=0.2547, simple_loss=0.3182, pruned_loss=0.09557, over 7092.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05891, over 1611329.90 frames. ], batch size: 71, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:37,359 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199856.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:04:51,702 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199877.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:01,006 INFO [train.py:901] (2/4) Epoch 25, batch 5900, loss[loss=0.2336, simple_loss=0.3094, pruned_loss=0.07893, over 8252.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2838, pruned_loss=0.05903, over 1610069.77 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:05,824 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199897.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:05:10,363 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.352e+02 2.828e+02 3.481e+02 7.421e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-07 09:05:13,998 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:26,462 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:31,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:32,025 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:35,262 INFO [train.py:901] (2/4) Epoch 25, batch 5950, loss[loss=0.2137, simple_loss=0.2955, pruned_loss=0.06593, over 8630.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05892, over 1613909.11 frames. ], batch size: 34, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:54,665 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5278, 1.4438, 4.7856, 1.8949, 4.2309, 3.9580, 4.3271, 4.1768], + device='cuda:2'), covar=tensor([0.0648, 0.4885, 0.0460, 0.4157, 0.1146, 0.0921, 0.0577, 0.0696], + device='cuda:2'), in_proj_covar=tensor([0.0656, 0.0656, 0.0723, 0.0647, 0.0731, 0.0620, 0.0622, 0.0698], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:05:58,231 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199971.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:06:08,486 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4682, 2.2216, 2.7616, 2.4152, 2.7980, 2.4302, 2.3650, 2.0691], + device='cuda:2'), covar=tensor([0.4218, 0.4162, 0.1924, 0.3221, 0.1990, 0.2854, 0.1646, 0.4134], + device='cuda:2'), in_proj_covar=tensor([0.0955, 0.1004, 0.0822, 0.0976, 0.1017, 0.0917, 0.0763, 0.0839], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:06:11,076 INFO [train.py:901] (2/4) Epoch 25, batch 6000, loss[loss=0.2225, simple_loss=0.3035, pruned_loss=0.07073, over 8331.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.284, pruned_loss=0.05923, over 1608394.72 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:06:11,077 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 09:06:23,701 INFO [train.py:935] (2/4) Epoch 25, validation: loss=0.1725, simple_loss=0.2721, pruned_loss=0.03643, over 944034.00 frames. +2023-02-07 09:06:23,702 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 09:06:34,577 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.373e+02 2.952e+02 3.581e+02 7.260e+02, threshold=5.903e+02, percent-clipped=4.0 +2023-02-07 09:06:40,184 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200012.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:06:59,672 INFO [train.py:901] (2/4) Epoch 25, batch 6050, loss[loss=0.1681, simple_loss=0.2439, pruned_loss=0.04614, over 7551.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.0591, over 1613985.16 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:35,027 INFO [train.py:901] (2/4) Epoch 25, batch 6100, loss[loss=0.1923, simple_loss=0.2844, pruned_loss=0.05006, over 8329.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2841, pruned_loss=0.05901, over 1612890.84 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:36,616 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7942, 1.3291, 3.9907, 1.5503, 3.4713, 3.3332, 3.5917, 3.4662], + device='cuda:2'), covar=tensor([0.0730, 0.4773, 0.0642, 0.4108, 0.1341, 0.0970, 0.0661, 0.0826], + device='cuda:2'), in_proj_covar=tensor([0.0657, 0.0656, 0.0724, 0.0647, 0.0732, 0.0621, 0.0624, 0.0699], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:07:38,692 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4034, 1.4483, 1.4037, 1.8294, 0.8236, 1.2840, 1.3790, 1.4994], + device='cuda:2'), covar=tensor([0.0860, 0.0766, 0.1006, 0.0483, 0.1076, 0.1444, 0.0727, 0.0812], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0211, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 09:07:45,358 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.345e+02 2.959e+02 3.596e+02 7.197e+02, threshold=5.919e+02, percent-clipped=3.0 +2023-02-07 09:07:54,345 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 09:08:05,859 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200133.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:11,249 INFO [train.py:901] (2/4) Epoch 25, batch 6150, loss[loss=0.195, simple_loss=0.2768, pruned_loss=0.05664, over 8490.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2826, pruned_loss=0.05818, over 1610328.98 frames. ], batch size: 28, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:22,234 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 09:08:23,404 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:27,288 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 09:08:46,113 INFO [train.py:901] (2/4) Epoch 25, batch 6200, loss[loss=0.1588, simple_loss=0.2528, pruned_loss=0.03243, over 8235.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.283, pruned_loss=0.05862, over 1610175.69 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:46,772 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 09:08:47,061 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:49,547 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200195.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:08:55,699 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.314e+02 2.821e+02 3.535e+02 6.331e+02, threshold=5.643e+02, percent-clipped=2.0 +2023-02-07 09:09:05,007 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:08,358 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5883, 1.5689, 1.9440, 1.2863, 1.2676, 1.9380, 0.4643, 1.3580], + device='cuda:2'), covar=tensor([0.1577, 0.1008, 0.0402, 0.0937, 0.2219, 0.0487, 0.1899, 0.1160], + device='cuda:2'), in_proj_covar=tensor([0.0195, 0.0200, 0.0131, 0.0219, 0.0274, 0.0141, 0.0172, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 09:09:13,103 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200227.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:09:21,663 INFO [train.py:901] (2/4) Epoch 25, batch 6250, loss[loss=0.1741, simple_loss=0.2706, pruned_loss=0.03874, over 8314.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2836, pruned_loss=0.05919, over 1610162.26 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:29,786 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200252.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:41,365 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200268.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:09:43,283 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:56,028 INFO [train.py:901] (2/4) Epoch 25, batch 6300, loss[loss=0.1558, simple_loss=0.2356, pruned_loss=0.03799, over 7656.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2824, pruned_loss=0.05848, over 1610813.00 frames. ], batch size: 19, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:58,153 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200293.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:10:00,758 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:10:06,125 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.537e+02 3.046e+02 4.211e+02 7.306e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 09:10:31,233 INFO [train.py:901] (2/4) Epoch 25, batch 6350, loss[loss=0.1879, simple_loss=0.2735, pruned_loss=0.05113, over 8609.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2831, pruned_loss=0.059, over 1613724.82 frames. ], batch size: 34, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:03,766 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:11:06,376 INFO [train.py:901] (2/4) Epoch 25, batch 6400, loss[loss=0.2177, simple_loss=0.2964, pruned_loss=0.06949, over 7814.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2839, pruned_loss=0.05972, over 1614132.57 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:15,859 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.247e+02 2.600e+02 3.696e+02 8.014e+02, threshold=5.200e+02, percent-clipped=2.0 +2023-02-07 09:11:40,859 INFO [train.py:901] (2/4) Epoch 25, batch 6450, loss[loss=0.2083, simple_loss=0.2957, pruned_loss=0.06044, over 8194.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05911, over 1617131.37 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:16,064 INFO [train.py:901] (2/4) Epoch 25, batch 6500, loss[loss=0.2057, simple_loss=0.2931, pruned_loss=0.05915, over 8514.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2837, pruned_loss=0.05899, over 1617346.30 frames. ], batch size: 28, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:26,035 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.181e+02 2.613e+02 3.190e+02 4.719e+02, threshold=5.226e+02, percent-clipped=0.0 +2023-02-07 09:12:49,388 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200539.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:12:49,933 INFO [train.py:901] (2/4) Epoch 25, batch 6550, loss[loss=0.2289, simple_loss=0.3018, pruned_loss=0.07796, over 8236.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2839, pruned_loss=0.05912, over 1619889.37 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:09,849 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 09:13:16,316 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 09:13:26,026 INFO [train.py:901] (2/4) Epoch 25, batch 6600, loss[loss=0.198, simple_loss=0.2711, pruned_loss=0.0625, over 7805.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2831, pruned_loss=0.05901, over 1616979.04 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:30,833 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 09:13:35,559 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.321e+02 2.722e+02 3.541e+02 8.507e+02, threshold=5.445e+02, percent-clipped=6.0 +2023-02-07 09:13:43,069 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5336, 1.8360, 4.7197, 2.2284, 4.2546, 3.9442, 4.3416, 4.2280], + device='cuda:2'), covar=tensor([0.0592, 0.4131, 0.0517, 0.3830, 0.1036, 0.0868, 0.0537, 0.0575], + device='cuda:2'), in_proj_covar=tensor([0.0655, 0.0650, 0.0718, 0.0644, 0.0730, 0.0619, 0.0621, 0.0694], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:14:00,777 INFO [train.py:901] (2/4) Epoch 25, batch 6650, loss[loss=0.1674, simple_loss=0.2494, pruned_loss=0.04268, over 7798.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.0586, over 1616528.37 frames. ], batch size: 19, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:01,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:02,401 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:10,432 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200654.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:14:16,369 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200663.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:19,883 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200667.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:35,339 INFO [train.py:901] (2/4) Epoch 25, batch 6700, loss[loss=0.2295, simple_loss=0.3071, pruned_loss=0.0759, over 8336.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2829, pruned_loss=0.05856, over 1616004.31 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:45,630 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.443e+02 2.859e+02 3.397e+02 5.440e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 09:14:48,885 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 09:15:10,909 INFO [train.py:901] (2/4) Epoch 25, batch 6750, loss[loss=0.1919, simple_loss=0.2581, pruned_loss=0.06288, over 7453.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2818, pruned_loss=0.0579, over 1608682.82 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:22,782 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:15:47,008 INFO [train.py:901] (2/4) Epoch 25, batch 6800, loss[loss=0.2257, simple_loss=0.3066, pruned_loss=0.07234, over 8621.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2815, pruned_loss=0.05784, over 1607519.00 frames. ], batch size: 34, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:51,859 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 09:15:56,788 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.322e+02 2.853e+02 3.502e+02 6.162e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-07 09:16:21,871 INFO [train.py:901] (2/4) Epoch 25, batch 6850, loss[loss=0.2026, simple_loss=0.2957, pruned_loss=0.05479, over 8455.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2814, pruned_loss=0.05777, over 1602483.73 frames. ], batch size: 49, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:16:29,578 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7847, 1.5806, 3.9737, 1.4083, 3.4744, 3.2592, 3.5900, 3.4951], + device='cuda:2'), covar=tensor([0.0756, 0.4193, 0.0700, 0.4581, 0.1372, 0.1114, 0.0694, 0.0802], + device='cuda:2'), in_proj_covar=tensor([0.0663, 0.0659, 0.0729, 0.0655, 0.0739, 0.0629, 0.0629, 0.0703], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:16:39,682 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9138, 3.8157, 3.5316, 1.7804, 3.5098, 3.5575, 3.3775, 3.4326], + device='cuda:2'), covar=tensor([0.0859, 0.0723, 0.1217, 0.4284, 0.0936, 0.0993, 0.1604, 0.0693], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0453, 0.0440, 0.0552, 0.0438, 0.0457, 0.0433, 0.0401], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:16:40,950 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 09:16:56,596 INFO [train.py:901] (2/4) Epoch 25, batch 6900, loss[loss=0.1907, simple_loss=0.2541, pruned_loss=0.06362, over 7430.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05809, over 1605300.51 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:06,815 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.244e+02 2.770e+02 3.533e+02 6.127e+02, threshold=5.541e+02, percent-clipped=2.0 +2023-02-07 09:17:11,230 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200910.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:17:28,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200935.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:17:31,363 INFO [train.py:901] (2/4) Epoch 25, batch 6950, loss[loss=0.208, simple_loss=0.3004, pruned_loss=0.05777, over 7816.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05836, over 1608974.23 frames. ], batch size: 20, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:50,964 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 09:18:01,551 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8437, 1.4410, 4.2008, 1.8039, 3.3255, 3.2916, 3.7928, 3.7496], + device='cuda:2'), covar=tensor([0.1356, 0.6567, 0.1165, 0.5137, 0.2521, 0.1774, 0.1144, 0.1081], + device='cuda:2'), in_proj_covar=tensor([0.0659, 0.0655, 0.0726, 0.0650, 0.0736, 0.0624, 0.0626, 0.0699], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:18:07,787 INFO [train.py:901] (2/4) Epoch 25, batch 7000, loss[loss=0.1987, simple_loss=0.29, pruned_loss=0.05367, over 8549.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2841, pruned_loss=0.05866, over 1610177.68 frames. ], batch size: 49, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:18:17,572 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.593e+02 3.026e+02 3.851e+02 8.547e+02, threshold=6.052e+02, percent-clipped=7.0 +2023-02-07 09:18:19,773 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:23,197 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201012.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:37,955 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 09:18:40,487 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:42,298 INFO [train.py:901] (2/4) Epoch 25, batch 7050, loss[loss=0.167, simple_loss=0.2473, pruned_loss=0.04333, over 8232.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2833, pruned_loss=0.05766, over 1612009.72 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:16,807 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8448, 3.7106, 3.4855, 1.8541, 3.3726, 3.4708, 3.3304, 3.3154], + device='cuda:2'), covar=tensor([0.0940, 0.0735, 0.1146, 0.4236, 0.1041, 0.1178, 0.1586, 0.0918], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0455, 0.0440, 0.0554, 0.0441, 0.0461, 0.0434, 0.0401], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:19:16,852 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:17,388 INFO [train.py:901] (2/4) Epoch 25, batch 7100, loss[loss=0.2202, simple_loss=0.2939, pruned_loss=0.07326, over 8332.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05743, over 1608150.21 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:26,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.246e+02 2.728e+02 3.277e+02 5.322e+02, threshold=5.456e+02, percent-clipped=0.0 +2023-02-07 09:19:39,432 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2705, 1.9995, 2.6209, 2.1996, 2.6170, 2.3468, 2.1470, 1.4833], + device='cuda:2'), covar=tensor([0.5425, 0.5109, 0.2108, 0.3956, 0.2611, 0.3128, 0.1991, 0.5577], + device='cuda:2'), in_proj_covar=tensor([0.0953, 0.1005, 0.0824, 0.0978, 0.1017, 0.0915, 0.0764, 0.0841], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:19:40,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:52,241 INFO [train.py:901] (2/4) Epoch 25, batch 7150, loss[loss=0.1976, simple_loss=0.2891, pruned_loss=0.05304, over 8355.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2815, pruned_loss=0.05717, over 1610225.94 frames. ], batch size: 24, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:14,505 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1663, 3.7759, 2.4360, 2.9883, 2.8966, 2.1124, 2.7393, 3.1314], + device='cuda:2'), covar=tensor([0.1683, 0.0361, 0.1144, 0.0715, 0.0724, 0.1458, 0.1194, 0.1181], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0239, 0.0346, 0.0316, 0.0304, 0.0349, 0.0352, 0.0324], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 09:20:28,435 INFO [train.py:901] (2/4) Epoch 25, batch 7200, loss[loss=0.1806, simple_loss=0.2563, pruned_loss=0.05239, over 7444.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2815, pruned_loss=0.05762, over 1608279.24 frames. ], batch size: 17, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:34,964 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 09:20:38,233 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.475e+02 3.123e+02 4.294e+02 9.608e+02, threshold=6.246e+02, percent-clipped=8.0 +2023-02-07 09:21:03,474 INFO [train.py:901] (2/4) Epoch 25, batch 7250, loss[loss=0.2211, simple_loss=0.3049, pruned_loss=0.06866, over 8196.00 frames. ], tot_loss[loss=0.199, simple_loss=0.282, pruned_loss=0.05794, over 1606543.03 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:20,244 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 09:21:25,204 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:31,945 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8733, 3.8008, 3.4926, 1.9192, 3.4182, 3.5904, 3.4583, 3.3096], + device='cuda:2'), covar=tensor([0.0863, 0.0630, 0.1194, 0.4508, 0.1036, 0.1044, 0.1417, 0.0899], + device='cuda:2'), in_proj_covar=tensor([0.0534, 0.0451, 0.0439, 0.0549, 0.0437, 0.0456, 0.0432, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:21:37,952 INFO [train.py:901] (2/4) Epoch 25, batch 7300, loss[loss=0.1579, simple_loss=0.2403, pruned_loss=0.03779, over 7718.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2822, pruned_loss=0.05833, over 1608965.59 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:39,387 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:48,706 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.341e+02 2.809e+02 3.464e+02 9.506e+02, threshold=5.617e+02, percent-clipped=4.0 +2023-02-07 09:22:13,162 INFO [train.py:901] (2/4) Epoch 25, batch 7350, loss[loss=0.2005, simple_loss=0.2924, pruned_loss=0.05432, over 8497.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2822, pruned_loss=0.05826, over 1614662.69 frames. ], batch size: 28, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:39,015 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 09:22:40,537 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:48,513 INFO [train.py:901] (2/4) Epoch 25, batch 7400, loss[loss=0.1987, simple_loss=0.2872, pruned_loss=0.05504, over 8328.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.05871, over 1618239.44 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:57,503 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:57,969 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.231e+02 2.880e+02 3.857e+02 7.685e+02, threshold=5.759e+02, percent-clipped=5.0 +2023-02-07 09:22:58,716 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 09:23:19,305 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:23:23,998 INFO [train.py:901] (2/4) Epoch 25, batch 7450, loss[loss=0.1666, simple_loss=0.2487, pruned_loss=0.04221, over 7542.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2834, pruned_loss=0.05855, over 1615899.03 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 16.0 +2023-02-07 09:23:37,753 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 09:23:59,788 INFO [train.py:901] (2/4) Epoch 25, batch 7500, loss[loss=0.2085, simple_loss=0.2865, pruned_loss=0.06528, over 8130.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2835, pruned_loss=0.05839, over 1615504.01 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:09,797 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.281e+02 2.758e+02 3.564e+02 6.593e+02, threshold=5.515e+02, percent-clipped=6.0 +2023-02-07 09:24:34,699 INFO [train.py:901] (2/4) Epoch 25, batch 7550, loss[loss=0.2147, simple_loss=0.2979, pruned_loss=0.06577, over 8493.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.05932, over 1613121.59 frames. ], batch size: 49, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:40,271 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:09,407 INFO [train.py:901] (2/4) Epoch 25, batch 7600, loss[loss=0.2139, simple_loss=0.2937, pruned_loss=0.06711, over 8191.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2857, pruned_loss=0.06004, over 1616295.38 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:25:20,530 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.467e+02 2.939e+02 3.909e+02 7.265e+02, threshold=5.878e+02, percent-clipped=5.0 +2023-02-07 09:25:27,306 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:41,304 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:43,931 INFO [train.py:901] (2/4) Epoch 25, batch 7650, loss[loss=0.2204, simple_loss=0.3027, pruned_loss=0.06904, over 8102.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05935, over 1613560.41 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:00,389 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:12,136 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:19,459 INFO [train.py:901] (2/4) Epoch 25, batch 7700, loss[loss=0.1643, simple_loss=0.2428, pruned_loss=0.04297, over 7541.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.284, pruned_loss=0.05914, over 1613575.97 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:30,382 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.306e+02 2.805e+02 3.732e+02 7.115e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-07 09:26:43,956 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201724.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:48,035 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:49,269 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 09:26:54,623 INFO [train.py:901] (2/4) Epoch 25, batch 7750, loss[loss=0.1948, simple_loss=0.2634, pruned_loss=0.06305, over 7438.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2847, pruned_loss=0.05971, over 1613712.81 frames. ], batch size: 17, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:02,275 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201751.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:21,319 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.8030, 5.8700, 5.1521, 2.8460, 5.2294, 5.6473, 5.4755, 5.4964], + device='cuda:2'), covar=tensor([0.0568, 0.0377, 0.0876, 0.3760, 0.0704, 0.0684, 0.1027, 0.0528], + device='cuda:2'), in_proj_covar=tensor([0.0533, 0.0452, 0.0439, 0.0550, 0.0436, 0.0457, 0.0431, 0.0398], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:27:29,979 INFO [train.py:901] (2/4) Epoch 25, batch 7800, loss[loss=0.1719, simple_loss=0.2462, pruned_loss=0.04884, over 7668.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.05927, over 1617098.19 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:40,028 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:40,470 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.301e+02 2.955e+02 3.831e+02 1.047e+03, threshold=5.910e+02, percent-clipped=5.0 +2023-02-07 09:27:56,326 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:28:03,383 INFO [train.py:901] (2/4) Epoch 25, batch 7850, loss[loss=0.1819, simple_loss=0.2697, pruned_loss=0.04701, over 7927.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05892, over 1614843.79 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:27,652 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3633, 1.5445, 2.0532, 1.2912, 1.4716, 1.6268, 1.3783, 1.5981], + device='cuda:2'), covar=tensor([0.1831, 0.2508, 0.0964, 0.4221, 0.1948, 0.3089, 0.2284, 0.2132], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0628, 0.0559, 0.0663, 0.0662, 0.0607, 0.0556, 0.0644], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:28:36,527 INFO [train.py:901] (2/4) Epoch 25, batch 7900, loss[loss=0.1813, simple_loss=0.2686, pruned_loss=0.04703, over 8470.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05889, over 1617302.52 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:47,146 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.505e+02 3.187e+02 3.787e+02 7.491e+02, threshold=6.375e+02, percent-clipped=2.0 +2023-02-07 09:29:09,582 INFO [train.py:901] (2/4) Epoch 25, batch 7950, loss[loss=0.2148, simple_loss=0.2876, pruned_loss=0.07104, over 7979.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05856, over 1615446.85 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:29:40,350 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:42,799 INFO [train.py:901] (2/4) Epoch 25, batch 8000, loss[loss=0.1698, simple_loss=0.2694, pruned_loss=0.03513, over 8182.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05809, over 1612154.22 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:29:46,048 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9380, 1.4732, 3.2853, 1.4403, 2.4905, 3.6191, 3.7172, 3.0906], + device='cuda:2'), covar=tensor([0.1207, 0.1876, 0.0342, 0.2187, 0.0975, 0.0234, 0.0631, 0.0575], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0323, 0.0290, 0.0318, 0.0318, 0.0274, 0.0435, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 09:29:46,070 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9479, 1.4235, 1.6346, 1.3281, 0.8840, 1.4490, 1.7345, 1.5216], + device='cuda:2'), covar=tensor([0.0578, 0.1252, 0.1779, 0.1554, 0.0644, 0.1470, 0.0701, 0.0681], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 09:29:54,391 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 2.298e+02 3.131e+02 3.789e+02 6.155e+02, threshold=6.263e+02, percent-clipped=0.0 +2023-02-07 09:29:54,489 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,330 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,933 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:58,056 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:06,542 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:12,417 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202032.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:17,709 INFO [train.py:901] (2/4) Epoch 25, batch 8050, loss[loss=0.201, simple_loss=0.2826, pruned_loss=0.05965, over 7930.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.05856, over 1602840.51 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:30:36,924 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:50,320 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 09:30:55,054 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 09:30:55,321 INFO [train.py:901] (2/4) Epoch 26, batch 0, loss[loss=0.1926, simple_loss=0.2648, pruned_loss=0.06018, over 7281.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2648, pruned_loss=0.06018, over 7281.00 frames. ], batch size: 16, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:30:55,321 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 09:31:04,873 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3323, 2.1111, 1.6051, 1.8532, 1.7381, 1.5305, 1.6685, 1.7044], + device='cuda:2'), covar=tensor([0.1358, 0.0404, 0.1257, 0.0602, 0.0736, 0.1497, 0.0944, 0.0924], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0239, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 09:31:06,905 INFO [train.py:935] (2/4) Epoch 26, validation: loss=0.1717, simple_loss=0.2716, pruned_loss=0.03591, over 944034.00 frames. +2023-02-07 09:31:06,905 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 09:31:21,613 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 09:31:29,810 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.411e+02 2.993e+02 3.956e+02 9.314e+02, threshold=5.987e+02, percent-clipped=4.0 +2023-02-07 09:31:40,837 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:41,325 INFO [train.py:901] (2/4) Epoch 26, batch 50, loss[loss=0.1817, simple_loss=0.2673, pruned_loss=0.04809, over 8136.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2868, pruned_loss=0.05916, over 368321.88 frames. ], batch size: 22, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:31:52,558 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202138.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:55,759 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 09:32:15,981 INFO [train.py:901] (2/4) Epoch 26, batch 100, loss[loss=0.1907, simple_loss=0.2829, pruned_loss=0.0493, over 8453.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2893, pruned_loss=0.06139, over 650163.24 frames. ], batch size: 27, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:32:18,600 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 09:32:23,606 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:32:40,585 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.435e+02 2.962e+02 3.649e+02 8.375e+02, threshold=5.925e+02, percent-clipped=4.0 +2023-02-07 09:32:51,116 INFO [train.py:901] (2/4) Epoch 26, batch 150, loss[loss=0.2034, simple_loss=0.2838, pruned_loss=0.06148, over 8081.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.29, pruned_loss=0.06244, over 865320.49 frames. ], batch size: 21, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:22,414 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2368, 1.4304, 3.3841, 1.0802, 2.9843, 2.8646, 3.1049, 3.0514], + device='cuda:2'), covar=tensor([0.0984, 0.4073, 0.0844, 0.4619, 0.1456, 0.1172, 0.0807, 0.0911], + device='cuda:2'), in_proj_covar=tensor([0.0660, 0.0653, 0.0721, 0.0646, 0.0730, 0.0626, 0.0625, 0.0699], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:33:26,391 INFO [train.py:901] (2/4) Epoch 26, batch 200, loss[loss=0.1931, simple_loss=0.2775, pruned_loss=0.05436, over 8286.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.29, pruned_loss=0.06212, over 1035163.20 frames. ], batch size: 23, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:49,948 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.408e+02 2.928e+02 3.669e+02 9.390e+02, threshold=5.857e+02, percent-clipped=3.0 +2023-02-07 09:34:01,571 INFO [train.py:901] (2/4) Epoch 26, batch 250, loss[loss=0.2433, simple_loss=0.326, pruned_loss=0.0803, over 8250.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2899, pruned_loss=0.0618, over 1167401.14 frames. ], batch size: 24, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:09,725 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 09:34:19,979 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 09:34:22,771 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:35,408 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 09:34:36,400 INFO [train.py:901] (2/4) Epoch 26, batch 300, loss[loss=0.1907, simple_loss=0.2841, pruned_loss=0.04861, over 8456.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2892, pruned_loss=0.06194, over 1265856.13 frames. ], batch size: 25, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:40,015 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:52,235 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:57,267 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:59,616 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.510e+02 3.033e+02 3.572e+02 1.183e+03, threshold=6.066e+02, percent-clipped=2.0 +2023-02-07 09:35:08,710 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:10,555 INFO [train.py:901] (2/4) Epoch 26, batch 350, loss[loss=0.1617, simple_loss=0.2364, pruned_loss=0.04356, over 7447.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2876, pruned_loss=0.06094, over 1344594.88 frames. ], batch size: 17, lr: 2.93e-03, grad_scale: 4.0 +2023-02-07 09:35:22,053 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3354, 2.1086, 2.6079, 2.2594, 2.6272, 2.2831, 2.2324, 1.8458], + device='cuda:2'), covar=tensor([0.4017, 0.4234, 0.1659, 0.2944, 0.1935, 0.2730, 0.1556, 0.4085], + device='cuda:2'), in_proj_covar=tensor([0.0954, 0.1006, 0.0820, 0.0977, 0.1015, 0.0916, 0.0764, 0.0840], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:35:23,407 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:40,988 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:43,101 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:46,423 INFO [train.py:901] (2/4) Epoch 26, batch 400, loss[loss=0.2118, simple_loss=0.2876, pruned_loss=0.06798, over 8513.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2868, pruned_loss=0.05993, over 1408376.80 frames. ], batch size: 26, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:35:46,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5942, 2.4259, 3.2259, 2.5758, 3.2511, 2.5917, 2.4458, 1.9018], + device='cuda:2'), covar=tensor([0.5654, 0.5131, 0.2129, 0.4438, 0.2949, 0.3131, 0.1896, 0.6090], + device='cuda:2'), in_proj_covar=tensor([0.0953, 0.1005, 0.0820, 0.0977, 0.1015, 0.0915, 0.0763, 0.0840], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:36:09,159 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8645, 2.0139, 1.6993, 2.4617, 1.2746, 1.5331, 1.9203, 1.9943], + device='cuda:2'), covar=tensor([0.0724, 0.0745, 0.0909, 0.0474, 0.1050, 0.1288, 0.0736, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0212, 0.0206, 0.0246, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 09:36:11,055 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 2.504e+02 3.071e+02 3.633e+02 8.131e+02, threshold=6.142e+02, percent-clipped=3.0 +2023-02-07 09:36:21,079 INFO [train.py:901] (2/4) Epoch 26, batch 450, loss[loss=0.2231, simple_loss=0.3194, pruned_loss=0.06342, over 8312.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05923, over 1453218.59 frames. ], batch size: 49, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:36:40,725 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-07 09:36:55,498 INFO [train.py:901] (2/4) Epoch 26, batch 500, loss[loss=0.1756, simple_loss=0.2571, pruned_loss=0.04706, over 7546.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2847, pruned_loss=0.05928, over 1490003.49 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:19,245 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.396e+02 2.962e+02 4.085e+02 8.069e+02, threshold=5.924e+02, percent-clipped=6.0 +2023-02-07 09:37:29,372 INFO [train.py:901] (2/4) Epoch 26, batch 550, loss[loss=0.2719, simple_loss=0.3418, pruned_loss=0.101, over 6794.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.05995, over 1520752.55 frames. ], batch size: 71, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:41,179 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 09:37:52,232 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 09:38:01,512 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1343, 1.6407, 4.4379, 2.0684, 2.5502, 5.1002, 5.1966, 4.3982], + device='cuda:2'), covar=tensor([0.1316, 0.1929, 0.0292, 0.2009, 0.1216, 0.0181, 0.0580, 0.0568], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0326, 0.0289, 0.0319, 0.0318, 0.0274, 0.0433, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 09:38:05,142 INFO [train.py:901] (2/4) Epoch 26, batch 600, loss[loss=0.1743, simple_loss=0.2491, pruned_loss=0.04976, over 7537.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2847, pruned_loss=0.05931, over 1541009.00 frames. ], batch size: 18, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:21,711 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 09:38:29,023 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.445e+02 2.916e+02 3.512e+02 6.749e+02, threshold=5.833e+02, percent-clipped=3.0 +2023-02-07 09:38:38,962 INFO [train.py:901] (2/4) Epoch 26, batch 650, loss[loss=0.1759, simple_loss=0.2731, pruned_loss=0.03941, over 7974.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.285, pruned_loss=0.05961, over 1553901.20 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:39,871 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:38:57,381 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:39:14,890 INFO [train.py:901] (2/4) Epoch 26, batch 700, loss[loss=0.2079, simple_loss=0.294, pruned_loss=0.06094, over 8284.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2846, pruned_loss=0.05945, over 1561596.07 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:39:38,616 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.498e+02 3.029e+02 3.750e+02 8.351e+02, threshold=6.058e+02, percent-clipped=3.0 +2023-02-07 09:39:45,991 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 09:39:49,880 INFO [train.py:901] (2/4) Epoch 26, batch 750, loss[loss=0.1743, simple_loss=0.2633, pruned_loss=0.0426, over 7804.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2851, pruned_loss=0.05945, over 1575323.35 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:05,054 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 09:40:07,994 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202848.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:40:13,834 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 09:40:24,779 INFO [train.py:901] (2/4) Epoch 26, batch 800, loss[loss=0.1996, simple_loss=0.2926, pruned_loss=0.05333, over 8667.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2865, pruned_loss=0.06014, over 1589666.18 frames. ], batch size: 34, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:49,974 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.489e+02 2.818e+02 3.827e+02 7.280e+02, threshold=5.635e+02, percent-clipped=3.0 +2023-02-07 09:40:59,899 INFO [train.py:901] (2/4) Epoch 26, batch 850, loss[loss=0.1548, simple_loss=0.2357, pruned_loss=0.03697, over 7442.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2848, pruned_loss=0.05895, over 1594090.95 frames. ], batch size: 17, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:33,349 INFO [train.py:901] (2/4) Epoch 26, batch 900, loss[loss=0.1894, simple_loss=0.2616, pruned_loss=0.05857, over 7655.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2856, pruned_loss=0.05923, over 1602892.15 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:58,978 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.742e+02 3.265e+02 4.005e+02 6.934e+02, threshold=6.531e+02, percent-clipped=5.0 +2023-02-07 09:42:08,858 INFO [train.py:901] (2/4) Epoch 26, batch 950, loss[loss=0.2053, simple_loss=0.2932, pruned_loss=0.05866, over 8466.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2859, pruned_loss=0.05941, over 1610065.27 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:12,078 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 09:42:31,729 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 09:42:32,525 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:42:43,151 INFO [train.py:901] (2/4) Epoch 26, batch 1000, loss[loss=0.1879, simple_loss=0.2689, pruned_loss=0.05345, over 7983.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2856, pruned_loss=0.05931, over 1612585.94 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:53,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0393, 1.7193, 3.5390, 1.4982, 2.3630, 3.9273, 3.9703, 3.3416], + device='cuda:2'), covar=tensor([0.1133, 0.1643, 0.0319, 0.2110, 0.1061, 0.0220, 0.0505, 0.0574], + device='cuda:2'), in_proj_covar=tensor([0.0301, 0.0323, 0.0288, 0.0316, 0.0316, 0.0273, 0.0432, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 09:43:04,711 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:43:05,256 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 09:43:07,153 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.348e+02 2.857e+02 3.355e+02 6.976e+02, threshold=5.714e+02, percent-clipped=1.0 +2023-02-07 09:43:17,239 INFO [train.py:901] (2/4) Epoch 26, batch 1050, loss[loss=0.172, simple_loss=0.2492, pruned_loss=0.04742, over 7804.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05873, over 1610111.21 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:18,670 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 09:43:53,220 INFO [train.py:901] (2/4) Epoch 26, batch 1100, loss[loss=0.1785, simple_loss=0.2552, pruned_loss=0.05089, over 7425.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.0587, over 1612451.10 frames. ], batch size: 17, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:06,848 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203192.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:44:16,729 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.511e+02 2.912e+02 3.711e+02 8.666e+02, threshold=5.824e+02, percent-clipped=4.0 +2023-02-07 09:44:27,591 INFO [train.py:901] (2/4) Epoch 26, batch 1150, loss[loss=0.2393, simple_loss=0.3209, pruned_loss=0.07884, over 8455.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.0588, over 1608938.33 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:27,597 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 09:45:02,742 INFO [train.py:901] (2/4) Epoch 26, batch 1200, loss[loss=0.2255, simple_loss=0.3177, pruned_loss=0.06663, over 8187.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05871, over 1606524.57 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:45:27,283 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203307.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:45:27,743 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.402e+02 2.806e+02 3.306e+02 6.331e+02, threshold=5.612e+02, percent-clipped=2.0 +2023-02-07 09:45:37,087 INFO [train.py:901] (2/4) Epoch 26, batch 1250, loss[loss=0.1776, simple_loss=0.2585, pruned_loss=0.04837, over 7425.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05836, over 1608331.26 frames. ], batch size: 17, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:45:49,554 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7602, 1.9207, 2.0612, 1.3198, 2.1257, 1.5353, 0.5538, 1.9492], + device='cuda:2'), covar=tensor([0.0579, 0.0400, 0.0314, 0.0619, 0.0421, 0.0978, 0.0928, 0.0302], + device='cuda:2'), in_proj_covar=tensor([0.0461, 0.0400, 0.0356, 0.0453, 0.0387, 0.0539, 0.0395, 0.0428], + device='cuda:2'), out_proj_covar=tensor([1.2258e-04, 1.0409e-04, 9.2922e-05, 1.1871e-04, 1.0138e-04, 1.5074e-04, + 1.0581e-04, 1.1255e-04], device='cuda:2') +2023-02-07 09:46:02,956 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9979, 2.2019, 1.7640, 2.8514, 1.3360, 1.5955, 2.0701, 2.2636], + device='cuda:2'), covar=tensor([0.0674, 0.0754, 0.0910, 0.0301, 0.1048, 0.1221, 0.0771, 0.0688], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0246, 0.0212, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 09:46:11,111 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-07 09:46:12,721 INFO [train.py:901] (2/4) Epoch 26, batch 1300, loss[loss=0.1647, simple_loss=0.2426, pruned_loss=0.04346, over 7928.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2838, pruned_loss=0.05885, over 1612722.58 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:46:29,987 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5998, 1.5406, 2.0702, 1.4353, 1.1535, 2.0676, 0.3860, 1.2970], + device='cuda:2'), covar=tensor([0.1364, 0.1306, 0.0361, 0.0902, 0.2438, 0.0357, 0.1763, 0.1195], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0204, 0.0132, 0.0224, 0.0278, 0.0143, 0.0173, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 09:46:31,892 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:46:37,070 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.437e+02 2.919e+02 3.429e+02 9.499e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-07 09:46:46,433 INFO [train.py:901] (2/4) Epoch 26, batch 1350, loss[loss=0.1713, simple_loss=0.2453, pruned_loss=0.04863, over 7239.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05868, over 1614328.12 frames. ], batch size: 16, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:04,177 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203447.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:22,494 INFO [train.py:901] (2/4) Epoch 26, batch 1400, loss[loss=0.1841, simple_loss=0.2735, pruned_loss=0.0474, over 8364.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.05848, over 1614652.06 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:24,885 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 09:47:31,505 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:47,758 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.419e+02 2.906e+02 3.589e+02 5.599e+02, threshold=5.812e+02, percent-clipped=0.0 +2023-02-07 09:47:52,596 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:54,386 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 09:47:57,074 INFO [train.py:901] (2/4) Epoch 26, batch 1450, loss[loss=0.211, simple_loss=0.2921, pruned_loss=0.06493, over 8464.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2827, pruned_loss=0.05802, over 1613483.52 frames. ], batch size: 29, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:01,588 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 09:48:10,650 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5887, 2.0943, 3.3466, 1.4623, 2.5243, 2.0556, 1.7009, 2.7517], + device='cuda:2'), covar=tensor([0.1967, 0.2720, 0.0744, 0.4783, 0.1804, 0.3288, 0.2447, 0.1909], + device='cuda:2'), in_proj_covar=tensor([0.0533, 0.0623, 0.0554, 0.0659, 0.0654, 0.0602, 0.0552, 0.0637], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:48:18,154 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4596, 2.1540, 2.8297, 2.3493, 2.8157, 2.4752, 2.2913, 1.7260], + device='cuda:2'), covar=tensor([0.5576, 0.5134, 0.2043, 0.3938, 0.2657, 0.3180, 0.1809, 0.5613], + device='cuda:2'), in_proj_covar=tensor([0.0962, 0.1018, 0.0830, 0.0988, 0.1025, 0.0926, 0.0772, 0.0849], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:48:24,037 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:48:24,758 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203563.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:48:31,362 INFO [train.py:901] (2/4) Epoch 26, batch 1500, loss[loss=0.2012, simple_loss=0.2857, pruned_loss=0.05831, over 8082.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2829, pruned_loss=0.05869, over 1612877.55 frames. ], batch size: 21, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:33,571 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 09:48:42,963 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203588.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:48:56,866 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.262e+02 2.668e+02 3.517e+02 8.500e+02, threshold=5.335e+02, percent-clipped=2.0 +2023-02-07 09:49:06,819 INFO [train.py:901] (2/4) Epoch 26, batch 1550, loss[loss=0.2054, simple_loss=0.2739, pruned_loss=0.06851, over 7639.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05842, over 1616282.60 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:49:15,918 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6846, 1.9108, 1.9859, 1.2574, 2.0749, 1.5175, 0.5144, 1.9273], + device='cuda:2'), covar=tensor([0.0601, 0.0449, 0.0342, 0.0728, 0.0488, 0.0934, 0.0998, 0.0393], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0405, 0.0359, 0.0457, 0.0392, 0.0545, 0.0399, 0.0432], + device='cuda:2'), out_proj_covar=tensor([1.2384e-04, 1.0548e-04, 9.3703e-05, 1.1969e-04, 1.0256e-04, 1.5250e-04, + 1.0683e-04, 1.1347e-04], device='cuda:2') +2023-02-07 09:49:40,409 INFO [train.py:901] (2/4) Epoch 26, batch 1600, loss[loss=0.1868, simple_loss=0.2662, pruned_loss=0.05369, over 7649.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2817, pruned_loss=0.05787, over 1614225.37 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:05,089 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.410e+02 3.021e+02 3.901e+02 1.362e+03, threshold=6.042e+02, percent-clipped=8.0 +2023-02-07 09:50:15,006 INFO [train.py:901] (2/4) Epoch 26, batch 1650, loss[loss=0.2286, simple_loss=0.3116, pruned_loss=0.07283, over 8515.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2837, pruned_loss=0.05914, over 1616875.21 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:35,769 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 09:50:44,790 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:48,717 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:49,154 INFO [train.py:901] (2/4) Epoch 26, batch 1700, loss[loss=0.2223, simple_loss=0.3081, pruned_loss=0.06829, over 8477.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05943, over 1620347.46 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:54,930 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 09:50:57,401 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:05,469 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:14,292 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.478e+02 3.017e+02 3.791e+02 8.735e+02, threshold=6.035e+02, percent-clipped=4.0 +2023-02-07 09:51:20,898 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:23,317 INFO [train.py:901] (2/4) Epoch 26, batch 1750, loss[loss=0.1748, simple_loss=0.2621, pruned_loss=0.04378, over 7785.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2845, pruned_loss=0.05958, over 1621246.42 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:51:28,107 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:38,336 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:54,079 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:58,819 INFO [train.py:901] (2/4) Epoch 26, batch 1800, loss[loss=0.1887, simple_loss=0.2757, pruned_loss=0.05078, over 7821.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2845, pruned_loss=0.05965, over 1619966.25 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:52:23,002 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.441e+02 2.799e+02 3.336e+02 4.977e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 09:52:23,386 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 09:52:29,710 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203918.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:52:32,265 INFO [train.py:901] (2/4) Epoch 26, batch 1850, loss[loss=0.2027, simple_loss=0.2748, pruned_loss=0.06527, over 7713.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05945, over 1618345.25 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:52:47,701 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:53:07,703 INFO [train.py:901] (2/4) Epoch 26, batch 1900, loss[loss=0.218, simple_loss=0.3054, pruned_loss=0.06529, over 8331.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2851, pruned_loss=0.05937, over 1624018.65 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:09,159 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9921, 1.7133, 3.5166, 1.6595, 2.4517, 3.8997, 3.9808, 3.3642], + device='cuda:2'), covar=tensor([0.1150, 0.1649, 0.0334, 0.2005, 0.1015, 0.0215, 0.0523, 0.0496], + device='cuda:2'), in_proj_covar=tensor([0.0299, 0.0319, 0.0286, 0.0314, 0.0314, 0.0271, 0.0428, 0.0300], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 09:53:33,457 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.507e+02 3.073e+02 4.108e+02 9.647e+02, threshold=6.146e+02, percent-clipped=9.0 +2023-02-07 09:53:35,030 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5302, 1.4735, 1.8697, 1.2982, 1.1885, 1.8618, 0.2523, 1.2316], + device='cuda:2'), covar=tensor([0.1465, 0.1156, 0.0350, 0.0839, 0.2360, 0.0377, 0.1811, 0.1143], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0204, 0.0133, 0.0225, 0.0278, 0.0143, 0.0172, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 09:53:36,901 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 09:53:42,909 INFO [train.py:901] (2/4) Epoch 26, batch 1950, loss[loss=0.1988, simple_loss=0.2794, pruned_loss=0.05913, over 7780.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2842, pruned_loss=0.05871, over 1623453.79 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:49,444 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 09:54:07,148 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 09:54:17,198 INFO [train.py:901] (2/4) Epoch 26, batch 2000, loss[loss=0.1734, simple_loss=0.2621, pruned_loss=0.0424, over 8080.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05917, over 1623820.19 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:34,348 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:43,741 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.388e+02 3.050e+02 3.690e+02 7.171e+02, threshold=6.101e+02, percent-clipped=4.0 +2023-02-07 09:54:44,462 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:48,534 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8538, 1.6632, 2.0451, 1.5837, 1.0408, 1.8259, 2.3674, 2.3078], + device='cuda:2'), covar=tensor([0.0459, 0.1241, 0.1560, 0.1460, 0.0609, 0.1452, 0.0590, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 09:54:53,081 INFO [train.py:901] (2/4) Epoch 26, batch 2050, loss[loss=0.1841, simple_loss=0.2753, pruned_loss=0.04648, over 8087.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2841, pruned_loss=0.05936, over 1618634.66 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:57,140 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:26,603 INFO [train.py:901] (2/4) Epoch 26, batch 2100, loss[loss=0.2061, simple_loss=0.2858, pruned_loss=0.06317, over 8249.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2844, pruned_loss=0.05965, over 1616009.10 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:55:33,648 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:47,785 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:52,931 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.312e+02 2.797e+02 3.552e+02 6.063e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-07 09:55:53,721 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:02,439 INFO [train.py:901] (2/4) Epoch 26, batch 2150, loss[loss=0.1701, simple_loss=0.2397, pruned_loss=0.05027, over 7694.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05908, over 1617317.57 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:03,969 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:04,624 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:17,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:29,891 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204262.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:56:36,274 INFO [train.py:901] (2/4) Epoch 26, batch 2200, loss[loss=0.2235, simple_loss=0.3056, pruned_loss=0.07071, over 8448.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2843, pruned_loss=0.05965, over 1618005.34 frames. ], batch size: 27, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:48,681 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4191, 1.6681, 1.6980, 1.1606, 1.7092, 1.3569, 0.2995, 1.6729], + device='cuda:2'), covar=tensor([0.0499, 0.0397, 0.0318, 0.0560, 0.0403, 0.1007, 0.0979, 0.0300], + device='cuda:2'), in_proj_covar=tensor([0.0464, 0.0402, 0.0357, 0.0453, 0.0389, 0.0544, 0.0396, 0.0430], + device='cuda:2'), out_proj_covar=tensor([1.2326e-04, 1.0480e-04, 9.3285e-05, 1.1880e-04, 1.0173e-04, 1.5213e-04, + 1.0605e-04, 1.1306e-04], device='cuda:2') +2023-02-07 09:57:01,384 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.362e+02 3.099e+02 4.074e+02 1.599e+03, threshold=6.197e+02, percent-clipped=8.0 +2023-02-07 09:57:11,815 INFO [train.py:901] (2/4) Epoch 26, batch 2250, loss[loss=0.1973, simple_loss=0.2863, pruned_loss=0.05414, over 8363.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2843, pruned_loss=0.05938, over 1614136.75 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:13,367 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:57:30,422 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5495, 4.5741, 4.1593, 2.0242, 4.0097, 4.1970, 4.1177, 4.0436], + device='cuda:2'), covar=tensor([0.0669, 0.0496, 0.0942, 0.4878, 0.0829, 0.1019, 0.1247, 0.0678], + device='cuda:2'), in_proj_covar=tensor([0.0533, 0.0451, 0.0433, 0.0548, 0.0434, 0.0454, 0.0431, 0.0394], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 09:57:46,482 INFO [train.py:901] (2/4) Epoch 26, batch 2300, loss[loss=0.223, simple_loss=0.3053, pruned_loss=0.07037, over 8491.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.05951, over 1617369.68 frames. ], batch size: 39, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:50,092 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204377.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:58:10,713 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.304e+02 2.813e+02 3.713e+02 7.684e+02, threshold=5.626e+02, percent-clipped=3.0 +2023-02-07 09:58:21,043 INFO [train.py:901] (2/4) Epoch 26, batch 2350, loss[loss=0.1899, simple_loss=0.2735, pruned_loss=0.05311, over 8025.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05961, over 1619554.49 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:58:33,860 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:58:34,647 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0286, 1.8608, 2.0937, 1.8451, 0.9548, 1.8334, 2.4596, 3.0270], + device='cuda:2'), covar=tensor([0.0438, 0.1173, 0.1572, 0.1349, 0.0578, 0.1418, 0.0542, 0.0417], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0161, 0.0100, 0.0164, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 09:58:57,197 INFO [train.py:901] (2/4) Epoch 26, batch 2400, loss[loss=0.28, simple_loss=0.3459, pruned_loss=0.1071, over 6867.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2851, pruned_loss=0.05949, over 1618647.73 frames. ], batch size: 72, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:58:58,110 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7922, 2.1568, 3.6440, 2.0580, 1.7676, 3.5504, 0.7599, 2.1646], + device='cuda:2'), covar=tensor([0.1266, 0.1183, 0.0189, 0.1380, 0.2314, 0.0329, 0.1778, 0.1110], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0204, 0.0132, 0.0224, 0.0278, 0.0144, 0.0173, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 09:59:02,876 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:16,094 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:20,398 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:22,312 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.551e+02 2.904e+02 3.805e+02 7.023e+02, threshold=5.807e+02, percent-clipped=3.0 +2023-02-07 09:59:32,133 INFO [train.py:901] (2/4) Epoch 26, batch 2450, loss[loss=0.2534, simple_loss=0.326, pruned_loss=0.09036, over 8528.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05927, over 1619321.75 frames. ], batch size: 28, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:33,748 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204524.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:34,344 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:43,679 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2464, 2.0368, 2.6279, 2.2249, 2.7228, 2.3340, 2.1224, 1.5742], + device='cuda:2'), covar=tensor([0.5738, 0.5240, 0.2355, 0.4193, 0.2586, 0.3394, 0.1981, 0.5911], + device='cuda:2'), in_proj_covar=tensor([0.0956, 0.1011, 0.0824, 0.0983, 0.1020, 0.0919, 0.0766, 0.0845], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 09:59:56,603 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:08,715 INFO [train.py:901] (2/4) Epoch 26, batch 2500, loss[loss=0.2185, simple_loss=0.3005, pruned_loss=0.06825, over 8241.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.05926, over 1617917.93 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:15,138 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:32,007 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:33,807 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.483e+02 3.074e+02 3.585e+02 8.993e+02, threshold=6.148e+02, percent-clipped=7.0 +2023-02-07 10:00:42,666 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9179, 1.5317, 1.8152, 1.3866, 1.0272, 1.6245, 1.7151, 1.5310], + device='cuda:2'), covar=tensor([0.0517, 0.1187, 0.1607, 0.1434, 0.0588, 0.1359, 0.0676, 0.0670], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 10:00:43,170 INFO [train.py:901] (2/4) Epoch 26, batch 2550, loss[loss=0.1593, simple_loss=0.2369, pruned_loss=0.04079, over 7536.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2843, pruned_loss=0.05931, over 1618972.73 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:50,556 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204633.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 10:00:55,148 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204640.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:01:02,546 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0190, 1.6225, 1.8943, 1.5427, 1.0073, 1.6602, 1.7714, 1.7093], + device='cuda:2'), covar=tensor([0.0541, 0.1185, 0.1561, 0.1339, 0.0607, 0.1361, 0.0689, 0.0598], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 10:01:07,864 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204658.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:01:18,433 INFO [train.py:901] (2/4) Epoch 26, batch 2600, loss[loss=0.2138, simple_loss=0.3026, pruned_loss=0.06254, over 8141.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2839, pruned_loss=0.0593, over 1617317.81 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:01:43,342 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.465e+02 3.094e+02 3.874e+02 9.576e+02, threshold=6.187e+02, percent-clipped=4.0 +2023-02-07 10:01:52,895 INFO [train.py:901] (2/4) Epoch 26, batch 2650, loss[loss=0.199, simple_loss=0.2889, pruned_loss=0.05458, over 8487.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2833, pruned_loss=0.05892, over 1614425.99 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:28,028 INFO [train.py:901] (2/4) Epoch 26, batch 2700, loss[loss=0.1778, simple_loss=0.254, pruned_loss=0.05076, over 7416.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2827, pruned_loss=0.05868, over 1608903.95 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:53,803 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.359e+02 2.865e+02 3.674e+02 6.992e+02, threshold=5.730e+02, percent-clipped=1.0 +2023-02-07 10:02:55,424 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:04,000 INFO [train.py:901] (2/4) Epoch 26, batch 2750, loss[loss=0.2066, simple_loss=0.2832, pruned_loss=0.06496, over 7687.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2825, pruned_loss=0.05885, over 1602759.96 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:13,268 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:16,026 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:38,127 INFO [train.py:901] (2/4) Epoch 26, batch 2800, loss[loss=0.1838, simple_loss=0.2654, pruned_loss=0.05109, over 7428.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2824, pruned_loss=0.05883, over 1599094.61 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:55,277 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204896.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:04,627 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.285e+02 3.108e+02 3.828e+02 9.944e+02, threshold=6.216e+02, percent-clipped=6.0 +2023-02-07 10:04:13,926 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204921.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:14,419 INFO [train.py:901] (2/4) Epoch 26, batch 2850, loss[loss=0.1986, simple_loss=0.2647, pruned_loss=0.06626, over 7652.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2826, pruned_loss=0.05885, over 1599221.43 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:23,885 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3878, 1.1622, 2.2782, 1.3721, 2.0819, 2.4583, 2.6171, 2.0667], + device='cuda:2'), covar=tensor([0.1235, 0.1669, 0.0457, 0.2030, 0.0877, 0.0426, 0.0802, 0.0691], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0327, 0.0290, 0.0319, 0.0321, 0.0276, 0.0435, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:04:48,524 INFO [train.py:901] (2/4) Epoch 26, batch 2900, loss[loss=0.1939, simple_loss=0.2845, pruned_loss=0.05164, over 8247.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2821, pruned_loss=0.05829, over 1600810.56 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:59,571 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.16 vs. limit=5.0 +2023-02-07 10:05:00,983 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 10:05:13,383 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.393e+02 3.052e+02 3.991e+02 9.487e+02, threshold=6.105e+02, percent-clipped=5.0 +2023-02-07 10:05:20,486 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 10:05:23,875 INFO [train.py:901] (2/4) Epoch 26, batch 2950, loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05723, over 8040.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2817, pruned_loss=0.05836, over 1599074.66 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,487 INFO [train.py:901] (2/4) Epoch 26, batch 3000, loss[loss=0.2039, simple_loss=0.2883, pruned_loss=0.05977, over 8470.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2821, pruned_loss=0.05836, over 1602878.53 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,488 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 10:06:07,785 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.7745, 1.4910, 3.9225, 1.6138, 3.5183, 3.2459, 3.6190, 3.5082], + device='cuda:2'), covar=tensor([0.0702, 0.4520, 0.0513, 0.4127, 0.1008, 0.1045, 0.0630, 0.0697], + device='cuda:2'), in_proj_covar=tensor([0.0665, 0.0662, 0.0728, 0.0650, 0.0742, 0.0629, 0.0629, 0.0706], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:06:11,419 INFO [train.py:935] (2/4) Epoch 26, validation: loss=0.1716, simple_loss=0.2713, pruned_loss=0.03593, over 944034.00 frames. +2023-02-07 10:06:11,419 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 10:06:31,073 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-07 10:06:36,703 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.246e+02 2.785e+02 3.735e+02 7.523e+02, threshold=5.571e+02, percent-clipped=3.0 +2023-02-07 10:06:46,003 INFO [train.py:901] (2/4) Epoch 26, batch 3050, loss[loss=0.1993, simple_loss=0.2837, pruned_loss=0.05749, over 8233.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05855, over 1607871.38 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:06:49,436 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:22,789 INFO [train.py:901] (2/4) Epoch 26, batch 3100, loss[loss=0.198, simple_loss=0.285, pruned_loss=0.05552, over 8760.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05889, over 1611111.64 frames. ], batch size: 30, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:07:30,227 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:48,156 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.327e+02 2.997e+02 4.038e+02 1.256e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 10:07:53,693 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:57,548 INFO [train.py:901] (2/4) Epoch 26, batch 3150, loss[loss=0.2151, simple_loss=0.2939, pruned_loss=0.06811, over 8498.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05944, over 1610541.22 frames. ], batch size: 26, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:33,476 INFO [train.py:901] (2/4) Epoch 26, batch 3200, loss[loss=0.1457, simple_loss=0.2303, pruned_loss=0.03049, over 7459.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2836, pruned_loss=0.05918, over 1603992.11 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:52,247 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:08:57,569 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4959, 4.5055, 4.0924, 2.2058, 4.0150, 4.0851, 4.0131, 3.9135], + device='cuda:2'), covar=tensor([0.0688, 0.0477, 0.1010, 0.4352, 0.0826, 0.0964, 0.1265, 0.0776], + device='cuda:2'), in_proj_covar=tensor([0.0534, 0.0449, 0.0437, 0.0549, 0.0432, 0.0455, 0.0431, 0.0394], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:08:58,816 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.527e+02 3.010e+02 3.735e+02 6.895e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-07 10:09:09,091 INFO [train.py:901] (2/4) Epoch 26, batch 3250, loss[loss=0.2258, simple_loss=0.3123, pruned_loss=0.06964, over 8567.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05914, over 1603604.99 frames. ], batch size: 31, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:43,189 INFO [train.py:901] (2/4) Epoch 26, batch 3300, loss[loss=0.1961, simple_loss=0.2874, pruned_loss=0.05239, over 8751.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.284, pruned_loss=0.05939, over 1604943.00 frames. ], batch size: 40, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:53,668 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9405, 1.9732, 1.8489, 2.5903, 1.0053, 1.5969, 1.8696, 2.0142], + device='cuda:2'), covar=tensor([0.0754, 0.0819, 0.0874, 0.0372, 0.1192, 0.1370, 0.0874, 0.0803], + device='cuda:2'), in_proj_covar=tensor([0.0228, 0.0192, 0.0243, 0.0210, 0.0203, 0.0245, 0.0248, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 10:10:10,309 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.313e+02 2.653e+02 3.358e+02 9.214e+02, threshold=5.305e+02, percent-clipped=4.0 +2023-02-07 10:10:16,830 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5811, 1.9008, 2.6439, 1.4765, 1.9982, 2.0301, 1.5459, 2.1126], + device='cuda:2'), covar=tensor([0.1893, 0.2515, 0.0995, 0.4527, 0.1869, 0.3023, 0.2553, 0.2006], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0626, 0.0559, 0.0663, 0.0658, 0.0607, 0.0557, 0.0640], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:10:20,066 INFO [train.py:901] (2/4) Epoch 26, batch 3350, loss[loss=0.2476, simple_loss=0.3163, pruned_loss=0.08944, over 6919.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2832, pruned_loss=0.0588, over 1606023.58 frames. ], batch size: 72, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:54,101 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:10:54,721 INFO [train.py:901] (2/4) Epoch 26, batch 3400, loss[loss=0.2141, simple_loss=0.2964, pruned_loss=0.06588, over 8507.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.0581, over 1609404.69 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:02,812 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 10:11:03,624 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2604, 1.7978, 4.4248, 2.0625, 2.4586, 5.0920, 5.1722, 4.3691], + device='cuda:2'), covar=tensor([0.1293, 0.1790, 0.0253, 0.1950, 0.1106, 0.0176, 0.0381, 0.0612], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0327, 0.0290, 0.0319, 0.0321, 0.0276, 0.0435, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:11:20,313 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.409e+02 2.883e+02 3.635e+02 7.106e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 10:11:30,463 INFO [train.py:901] (2/4) Epoch 26, batch 3450, loss[loss=0.1821, simple_loss=0.2674, pruned_loss=0.04842, over 8087.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05838, over 1610625.64 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:53,166 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:11:57,170 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:05,234 INFO [train.py:901] (2/4) Epoch 26, batch 3500, loss[loss=0.1932, simple_loss=0.2902, pruned_loss=0.04814, over 8623.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.05779, over 1610240.74 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:12:10,358 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:15,152 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:21,539 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6268, 2.0151, 3.0617, 1.4704, 2.2313, 2.0715, 1.6080, 2.3049], + device='cuda:2'), covar=tensor([0.1902, 0.2791, 0.0872, 0.4794, 0.2083, 0.3239, 0.2669, 0.2447], + device='cuda:2'), in_proj_covar=tensor([0.0536, 0.0626, 0.0558, 0.0662, 0.0658, 0.0605, 0.0557, 0.0640], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:12:24,736 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 10:12:30,113 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.208e+02 2.714e+02 3.358e+02 5.744e+02, threshold=5.428e+02, percent-clipped=0.0 +2023-02-07 10:12:35,785 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 10:12:39,511 INFO [train.py:901] (2/4) Epoch 26, batch 3550, loss[loss=0.2034, simple_loss=0.2896, pruned_loss=0.05863, over 8503.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2824, pruned_loss=0.05746, over 1613774.18 frames. ], batch size: 28, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:15,378 INFO [train.py:901] (2/4) Epoch 26, batch 3600, loss[loss=0.1702, simple_loss=0.2581, pruned_loss=0.04114, over 8136.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2814, pruned_loss=0.05678, over 1611407.49 frames. ], batch size: 22, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:16,200 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:17,475 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:39,661 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.295e+02 2.882e+02 3.730e+02 8.207e+02, threshold=5.763e+02, percent-clipped=6.0 +2023-02-07 10:13:49,108 INFO [train.py:901] (2/4) Epoch 26, batch 3650, loss[loss=0.2152, simple_loss=0.3048, pruned_loss=0.06277, over 8627.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2816, pruned_loss=0.0566, over 1613964.09 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:18,432 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:14:24,803 INFO [train.py:901] (2/4) Epoch 26, batch 3700, loss[loss=0.2104, simple_loss=0.2967, pruned_loss=0.06201, over 8643.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2821, pruned_loss=0.05688, over 1610537.71 frames. ], batch size: 34, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:27,592 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:14:49,669 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.377e+02 2.968e+02 3.727e+02 1.221e+03, threshold=5.937e+02, percent-clipped=5.0 +2023-02-07 10:14:59,197 INFO [train.py:901] (2/4) Epoch 26, batch 3750, loss[loss=0.1826, simple_loss=0.2507, pruned_loss=0.05721, over 7922.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05693, over 1611530.79 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:12,916 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:31,310 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:34,014 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2021, 1.0764, 1.2933, 1.0409, 0.9181, 1.2943, 0.0977, 0.8944], + device='cuda:2'), covar=tensor([0.1385, 0.1304, 0.0537, 0.0707, 0.2534, 0.0544, 0.1976, 0.1172], + device='cuda:2'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0221, 0.0274, 0.0144, 0.0171, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 10:15:34,438 INFO [train.py:901] (2/4) Epoch 26, batch 3800, loss[loss=0.2558, simple_loss=0.3477, pruned_loss=0.08193, over 8489.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2821, pruned_loss=0.0572, over 1614482.47 frames. ], batch size: 28, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:59,218 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.381e+02 2.847e+02 3.364e+02 6.986e+02, threshold=5.694e+02, percent-clipped=1.0 +2023-02-07 10:15:59,376 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:08,812 INFO [train.py:901] (2/4) Epoch 26, batch 3850, loss[loss=0.1891, simple_loss=0.2775, pruned_loss=0.05033, over 8453.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05761, over 1617807.25 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:16:15,126 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:20,580 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1609, 1.7855, 3.4826, 1.6325, 2.5208, 3.9042, 3.9962, 3.3309], + device='cuda:2'), covar=tensor([0.1149, 0.1682, 0.0340, 0.2069, 0.1034, 0.0205, 0.0537, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0328, 0.0291, 0.0321, 0.0322, 0.0279, 0.0438, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:16:23,272 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7303, 2.1236, 3.0945, 1.5365, 2.5144, 2.1295, 1.8167, 2.4466], + device='cuda:2'), covar=tensor([0.1798, 0.2504, 0.0833, 0.4457, 0.1695, 0.2974, 0.2259, 0.2146], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0626, 0.0558, 0.0662, 0.0657, 0.0605, 0.0557, 0.0641], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:16:29,178 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 10:16:32,102 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205956.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:42,796 INFO [train.py:901] (2/4) Epoch 26, batch 3900, loss[loss=0.1831, simple_loss=0.2738, pruned_loss=0.04614, over 8106.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05753, over 1618633.72 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:09,995 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.441e+02 2.892e+02 3.706e+02 7.796e+02, threshold=5.785e+02, percent-clipped=3.0 +2023-02-07 10:17:16,699 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:17:18,464 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 10:17:20,037 INFO [train.py:901] (2/4) Epoch 26, batch 3950, loss[loss=0.1778, simple_loss=0.2702, pruned_loss=0.04265, over 8199.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05767, over 1616866.24 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:53,922 INFO [train.py:901] (2/4) Epoch 26, batch 4000, loss[loss=0.1931, simple_loss=0.2718, pruned_loss=0.05721, over 7975.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2835, pruned_loss=0.05824, over 1617784.58 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:55,451 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:18,674 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206106.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:19,947 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.407e+02 2.986e+02 3.556e+02 8.558e+02, threshold=5.971e+02, percent-clipped=6.0 +2023-02-07 10:18:23,569 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1376, 1.8770, 3.5457, 1.5460, 2.5654, 3.8979, 4.0415, 3.2945], + device='cuda:2'), covar=tensor([0.1187, 0.1727, 0.0299, 0.2189, 0.0969, 0.0225, 0.0548, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0303, 0.0326, 0.0291, 0.0320, 0.0321, 0.0278, 0.0437, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:18:26,939 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.1113, 1.5054, 6.2129, 2.2127, 5.6939, 5.2859, 5.7740, 5.6112], + device='cuda:2'), covar=tensor([0.0423, 0.5062, 0.0263, 0.3864, 0.0777, 0.0763, 0.0390, 0.0459], + device='cuda:2'), in_proj_covar=tensor([0.0659, 0.0655, 0.0721, 0.0644, 0.0734, 0.0620, 0.0621, 0.0696], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:18:29,517 INFO [train.py:901] (2/4) Epoch 26, batch 4050, loss[loss=0.2367, simple_loss=0.3198, pruned_loss=0.07676, over 8327.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05883, over 1615415.99 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:18:37,176 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:03,800 INFO [train.py:901] (2/4) Epoch 26, batch 4100, loss[loss=0.1679, simple_loss=0.2532, pruned_loss=0.04129, over 7823.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2837, pruned_loss=0.05868, over 1612509.80 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:27,817 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4859, 1.8860, 2.9452, 1.3390, 2.1677, 1.8570, 1.5418, 2.2482], + device='cuda:2'), covar=tensor([0.2094, 0.2771, 0.1096, 0.4904, 0.2235, 0.3465, 0.2601, 0.2512], + device='cuda:2'), in_proj_covar=tensor([0.0536, 0.0626, 0.0558, 0.0662, 0.0658, 0.0605, 0.0558, 0.0641], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:19:28,869 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.376e+02 2.755e+02 3.418e+02 9.873e+02, threshold=5.510e+02, percent-clipped=4.0 +2023-02-07 10:19:34,501 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206221.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,709 INFO [train.py:901] (2/4) Epoch 26, batch 4150, loss[loss=0.2229, simple_loss=0.3101, pruned_loss=0.06782, over 8592.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2843, pruned_loss=0.05834, over 1614515.02 frames. ], batch size: 34, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:58,907 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1090, 1.1485, 1.4886, 1.1605, 0.7698, 1.2585, 1.1708, 0.8794], + device='cuda:2'), covar=tensor([0.0680, 0.1415, 0.1785, 0.1574, 0.0609, 0.1636, 0.0768, 0.0835], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 10:20:00,780 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:20:14,200 INFO [train.py:901] (2/4) Epoch 26, batch 4200, loss[loss=0.2384, simple_loss=0.3328, pruned_loss=0.07199, over 8251.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2848, pruned_loss=0.0585, over 1616166.91 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:20:22,976 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 10:20:35,120 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0009, 1.0197, 0.9915, 1.2379, 0.5889, 0.8808, 0.9519, 1.0422], + device='cuda:2'), covar=tensor([0.0656, 0.0623, 0.0736, 0.0549, 0.0894, 0.1045, 0.0596, 0.0561], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0194, 0.0244, 0.0212, 0.0203, 0.0246, 0.0248, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 10:20:38,381 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.335e+02 2.968e+02 3.755e+02 9.805e+02, threshold=5.936e+02, percent-clipped=3.0 +2023-02-07 10:20:44,912 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 10:20:49,135 INFO [train.py:901] (2/4) Epoch 26, batch 4250, loss[loss=0.2146, simple_loss=0.3012, pruned_loss=0.06397, over 8030.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2846, pruned_loss=0.05874, over 1616329.18 frames. ], batch size: 22, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:21,328 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:24,643 INFO [train.py:901] (2/4) Epoch 26, batch 4300, loss[loss=0.2149, simple_loss=0.2925, pruned_loss=0.06865, over 8658.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05817, over 1615330.29 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:35,883 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:50,295 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.331e+02 2.890e+02 3.800e+02 6.492e+02, threshold=5.781e+02, percent-clipped=2.0 +2023-02-07 10:21:53,277 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206413.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:56,607 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:59,344 INFO [train.py:901] (2/4) Epoch 26, batch 4350, loss[loss=0.1934, simple_loss=0.2901, pruned_loss=0.04839, over 8317.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2818, pruned_loss=0.05764, over 1616198.11 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:18,984 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 10:22:20,070 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-07 10:22:20,820 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-07 10:22:32,191 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4004, 4.4304, 3.9741, 2.3107, 3.8589, 4.0161, 3.9296, 3.8116], + device='cuda:2'), covar=tensor([0.0714, 0.0502, 0.1103, 0.3983, 0.0912, 0.0866, 0.1320, 0.0771], + device='cuda:2'), in_proj_covar=tensor([0.0536, 0.0454, 0.0439, 0.0552, 0.0435, 0.0457, 0.0434, 0.0399], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:22:34,782 INFO [train.py:901] (2/4) Epoch 26, batch 4400, loss[loss=0.1677, simple_loss=0.2536, pruned_loss=0.04091, over 7660.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2815, pruned_loss=0.05759, over 1613282.63 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:36,353 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3637, 1.5598, 1.5829, 1.1175, 1.6532, 1.2440, 0.3311, 1.5683], + device='cuda:2'), covar=tensor([0.0530, 0.0413, 0.0355, 0.0587, 0.0446, 0.1049, 0.0964, 0.0321], + device='cuda:2'), in_proj_covar=tensor([0.0468, 0.0405, 0.0361, 0.0457, 0.0392, 0.0550, 0.0401, 0.0436], + device='cuda:2'), out_proj_covar=tensor([1.2424e-04, 1.0562e-04, 9.4419e-05, 1.1984e-04, 1.0259e-04, 1.5377e-04, + 1.0723e-04, 1.1472e-04], device='cuda:2') +2023-02-07 10:22:38,332 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:22:55,674 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:00,161 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.619e+02 3.000e+02 3.925e+02 8.429e+02, threshold=6.000e+02, percent-clipped=7.0 +2023-02-07 10:23:00,193 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 10:23:04,480 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-07 10:23:08,811 INFO [train.py:901] (2/4) Epoch 26, batch 4450, loss[loss=0.1756, simple_loss=0.2664, pruned_loss=0.04245, over 8244.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2829, pruned_loss=0.05818, over 1612070.60 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:16,257 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206533.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:34,156 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:44,254 INFO [train.py:901] (2/4) Epoch 26, batch 4500, loss[loss=0.2005, simple_loss=0.2911, pruned_loss=0.05493, over 8473.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2821, pruned_loss=0.05793, over 1612173.20 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:55,214 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 10:24:07,620 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7301, 2.3927, 4.8665, 2.9025, 4.4902, 4.2091, 4.5540, 4.4326], + device='cuda:2'), covar=tensor([0.0586, 0.3644, 0.0564, 0.3355, 0.0889, 0.0826, 0.0508, 0.0575], + device='cuda:2'), in_proj_covar=tensor([0.0662, 0.0655, 0.0723, 0.0648, 0.0735, 0.0623, 0.0623, 0.0697], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:24:10,074 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.363e+02 2.961e+02 3.499e+02 6.135e+02, threshold=5.921e+02, percent-clipped=1.0 +2023-02-07 10:24:18,689 INFO [train.py:901] (2/4) Epoch 26, batch 4550, loss[loss=0.206, simple_loss=0.2736, pruned_loss=0.06926, over 7975.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2823, pruned_loss=0.05853, over 1609791.15 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:19,497 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:35,787 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:51,524 INFO [train.py:901] (2/4) Epoch 26, batch 4600, loss[loss=0.1515, simple_loss=0.2224, pruned_loss=0.04029, over 7188.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2829, pruned_loss=0.05871, over 1608833.38 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:53,053 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206674.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:25:01,034 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5678, 4.5670, 4.1308, 2.1483, 4.0416, 4.1370, 4.0580, 3.9226], + device='cuda:2'), covar=tensor([0.0687, 0.0474, 0.0965, 0.4556, 0.0902, 0.0780, 0.1253, 0.0702], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0452, 0.0438, 0.0550, 0.0434, 0.0456, 0.0434, 0.0399], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:25:18,499 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.342e+02 2.811e+02 3.625e+02 9.770e+02, threshold=5.622e+02, percent-clipped=5.0 +2023-02-07 10:25:21,726 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-07 10:25:23,271 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 10:25:28,309 INFO [train.py:901] (2/4) Epoch 26, batch 4650, loss[loss=0.2042, simple_loss=0.2937, pruned_loss=0.05734, over 8255.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2829, pruned_loss=0.05829, over 1614274.83 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:25:40,978 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 10:26:02,059 INFO [train.py:901] (2/4) Epoch 26, batch 4700, loss[loss=0.2104, simple_loss=0.2894, pruned_loss=0.06571, over 7926.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2818, pruned_loss=0.05762, over 1616181.83 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:08,410 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6100, 1.6890, 1.6832, 1.3806, 1.8008, 1.3956, 0.9312, 1.6478], + device='cuda:2'), covar=tensor([0.0569, 0.0409, 0.0322, 0.0500, 0.0403, 0.0694, 0.0817, 0.0299], + device='cuda:2'), in_proj_covar=tensor([0.0466, 0.0402, 0.0360, 0.0455, 0.0389, 0.0547, 0.0399, 0.0434], + device='cuda:2'), out_proj_covar=tensor([1.2370e-04, 1.0477e-04, 9.4011e-05, 1.1921e-04, 1.0177e-04, 1.5321e-04, + 1.0686e-04, 1.1408e-04], device='cuda:2') +2023-02-07 10:26:13,621 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206789.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:28,921 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.506e+02 2.890e+02 3.298e+02 6.611e+02, threshold=5.779e+02, percent-clipped=3.0 +2023-02-07 10:26:32,506 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:34,593 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5014, 2.3318, 2.9182, 2.5077, 2.8257, 2.5269, 2.3774, 1.8311], + device='cuda:2'), covar=tensor([0.4698, 0.4505, 0.1769, 0.3553, 0.2462, 0.2973, 0.1750, 0.4910], + device='cuda:2'), in_proj_covar=tensor([0.0952, 0.1003, 0.0824, 0.0979, 0.1014, 0.0917, 0.0764, 0.0838], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:26:37,698 INFO [train.py:901] (2/4) Epoch 26, batch 4750, loss[loss=0.1961, simple_loss=0.2801, pruned_loss=0.05606, over 7967.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05831, over 1605640.91 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:53,337 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 10:26:55,380 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 10:26:58,853 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206852.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:27:00,992 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 10:27:12,096 INFO [train.py:901] (2/4) Epoch 26, batch 4800, loss[loss=0.1729, simple_loss=0.2554, pruned_loss=0.04519, over 7689.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05774, over 1607336.47 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:21,155 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 10:27:37,298 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.410e+02 2.886e+02 3.541e+02 7.542e+02, threshold=5.772e+02, percent-clipped=6.0 +2023-02-07 10:27:46,700 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 10:27:47,377 INFO [train.py:901] (2/4) Epoch 26, batch 4850, loss[loss=0.2028, simple_loss=0.2924, pruned_loss=0.05658, over 8195.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2836, pruned_loss=0.05865, over 1608429.06 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:52,971 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:10,403 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:21,661 INFO [train.py:901] (2/4) Epoch 26, batch 4900, loss[loss=0.2372, simple_loss=0.3195, pruned_loss=0.07749, over 8237.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.05902, over 1612257.86 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:28:25,971 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-07 10:28:46,038 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.535e+02 3.142e+02 3.836e+02 8.051e+02, threshold=6.285e+02, percent-clipped=2.0 +2023-02-07 10:28:55,274 INFO [train.py:901] (2/4) Epoch 26, batch 4950, loss[loss=0.2055, simple_loss=0.2938, pruned_loss=0.05857, over 8188.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05893, over 1612746.85 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:04,275 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0907, 1.3405, 1.5166, 1.2417, 0.7668, 1.3232, 1.1703, 0.9117], + device='cuda:2'), covar=tensor([0.0644, 0.1197, 0.1602, 0.1440, 0.0582, 0.1430, 0.0718, 0.0720], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 10:29:32,338 INFO [train.py:901] (2/4) Epoch 26, batch 5000, loss[loss=0.2017, simple_loss=0.2885, pruned_loss=0.0574, over 8766.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05912, over 1613967.62 frames. ], batch size: 30, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:35,480 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 10:29:57,376 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.413e+02 2.985e+02 3.933e+02 1.062e+03, threshold=5.970e+02, percent-clipped=3.0 +2023-02-07 10:30:06,448 INFO [train.py:901] (2/4) Epoch 26, batch 5050, loss[loss=0.2151, simple_loss=0.3006, pruned_loss=0.06485, over 8351.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2834, pruned_loss=0.05813, over 1617770.43 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:13,946 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1689, 3.5811, 2.2994, 2.8857, 2.8533, 2.0425, 2.8801, 3.0939], + device='cuda:2'), covar=tensor([0.1627, 0.0357, 0.1130, 0.0761, 0.0758, 0.1495, 0.1007, 0.0975], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0240, 0.0341, 0.0313, 0.0302, 0.0348, 0.0349, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 10:30:24,760 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 10:30:42,638 INFO [train.py:901] (2/4) Epoch 26, batch 5100, loss[loss=0.1798, simple_loss=0.2869, pruned_loss=0.03637, over 8261.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2834, pruned_loss=0.058, over 1618518.21 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:58,157 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=207194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:30:59,444 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:31:08,233 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.087e+02 2.633e+02 3.622e+02 6.552e+02, threshold=5.265e+02, percent-clipped=1.0 +2023-02-07 10:31:16,943 INFO [train.py:901] (2/4) Epoch 26, batch 5150, loss[loss=0.2002, simple_loss=0.2922, pruned_loss=0.05412, over 8463.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05796, over 1615741.69 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:31:48,270 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6106, 2.0603, 3.2679, 1.4975, 2.3830, 2.0747, 1.6378, 2.4903], + device='cuda:2'), covar=tensor([0.2007, 0.2670, 0.0831, 0.4826, 0.2109, 0.3326, 0.2634, 0.2296], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0626, 0.0556, 0.0661, 0.0660, 0.0604, 0.0557, 0.0643], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:31:52,802 INFO [train.py:901] (2/4) Epoch 26, batch 5200, loss[loss=0.2118, simple_loss=0.2978, pruned_loss=0.06294, over 8333.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2836, pruned_loss=0.0581, over 1618237.74 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:17,988 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.585e+02 3.464e+02 4.468e+02 1.375e+03, threshold=6.928e+02, percent-clipped=16.0 +2023-02-07 10:32:19,443 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:32:19,956 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 10:32:26,696 INFO [train.py:901] (2/4) Epoch 26, batch 5250, loss[loss=0.2221, simple_loss=0.303, pruned_loss=0.0706, over 8519.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05844, over 1614608.86 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:41,207 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0526, 1.6932, 3.4713, 1.5659, 2.6894, 3.8893, 3.9499, 3.2137], + device='cuda:2'), covar=tensor([0.1186, 0.1895, 0.0364, 0.2147, 0.0963, 0.0252, 0.0527, 0.0633], + device='cuda:2'), in_proj_covar=tensor([0.0298, 0.0322, 0.0286, 0.0314, 0.0314, 0.0273, 0.0429, 0.0301], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:32:52,557 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7333, 1.9039, 1.6396, 2.3291, 0.9699, 1.4992, 1.6934, 1.8492], + device='cuda:2'), covar=tensor([0.0745, 0.0672, 0.0859, 0.0413, 0.1108, 0.1302, 0.0774, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0193, 0.0244, 0.0212, 0.0202, 0.0245, 0.0248, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 10:33:00,346 INFO [train.py:901] (2/4) Epoch 26, batch 5300, loss[loss=0.2134, simple_loss=0.2964, pruned_loss=0.06522, over 8317.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2844, pruned_loss=0.05873, over 1616267.89 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:33:27,782 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.392e+02 2.913e+02 3.782e+02 6.658e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-07 10:33:36,850 INFO [train.py:901] (2/4) Epoch 26, batch 5350, loss[loss=0.1682, simple_loss=0.2464, pruned_loss=0.04498, over 7937.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2828, pruned_loss=0.05796, over 1612214.98 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:10,287 INFO [train.py:901] (2/4) Epoch 26, batch 5400, loss[loss=0.2257, simple_loss=0.3103, pruned_loss=0.07054, over 8497.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2838, pruned_loss=0.05859, over 1613662.79 frames. ], batch size: 39, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:37,323 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.344e+02 3.061e+02 4.157e+02 9.885e+02, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 10:34:46,156 INFO [train.py:901] (2/4) Epoch 26, batch 5450, loss[loss=0.2114, simple_loss=0.2996, pruned_loss=0.06165, over 8521.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05843, over 1614667.34 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:57,844 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:06,589 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 10:35:17,351 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:20,579 INFO [train.py:901] (2/4) Epoch 26, batch 5500, loss[loss=0.2402, simple_loss=0.3192, pruned_loss=0.08063, over 8443.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2847, pruned_loss=0.0594, over 1614527.25 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:35:34,396 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207592.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:47,181 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.395e+02 2.975e+02 3.460e+02 7.775e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 10:35:56,712 INFO [train.py:901] (2/4) Epoch 26, batch 5550, loss[loss=0.21, simple_loss=0.2967, pruned_loss=0.06162, over 7974.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.284, pruned_loss=0.05932, over 1612919.96 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:17,865 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:36:30,421 INFO [train.py:901] (2/4) Epoch 26, batch 5600, loss[loss=0.1852, simple_loss=0.2533, pruned_loss=0.05857, over 7785.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2842, pruned_loss=0.05939, over 1619325.77 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:55,073 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.412e+02 3.079e+02 3.750e+02 8.490e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-07 10:37:04,607 INFO [train.py:901] (2/4) Epoch 26, batch 5650, loss[loss=0.2096, simple_loss=0.2947, pruned_loss=0.06228, over 8123.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.0591, over 1613145.09 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:37:12,996 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 10:37:40,860 INFO [train.py:901] (2/4) Epoch 26, batch 5700, loss[loss=0.214, simple_loss=0.2957, pruned_loss=0.06613, over 8506.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.283, pruned_loss=0.05878, over 1607650.91 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:05,994 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.232e+02 2.912e+02 3.330e+02 6.698e+02, threshold=5.824e+02, percent-clipped=1.0 +2023-02-07 10:38:14,805 INFO [train.py:901] (2/4) Epoch 26, batch 5750, loss[loss=0.2119, simple_loss=0.295, pruned_loss=0.06439, over 8037.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2839, pruned_loss=0.05907, over 1611909.81 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:16,882 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 10:38:50,530 INFO [train.py:901] (2/4) Epoch 26, batch 5800, loss[loss=0.2178, simple_loss=0.3005, pruned_loss=0.06758, over 8332.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.285, pruned_loss=0.05976, over 1611910.87 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:15,995 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.614e+02 3.148e+02 4.020e+02 8.026e+02, threshold=6.297e+02, percent-clipped=4.0 +2023-02-07 10:39:16,245 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:24,804 INFO [train.py:901] (2/4) Epoch 26, batch 5850, loss[loss=0.1623, simple_loss=0.2591, pruned_loss=0.03277, over 8130.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05885, over 1616907.42 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:32,990 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:59,799 INFO [train.py:901] (2/4) Epoch 26, batch 5900, loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06147, over 8097.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2827, pruned_loss=0.0581, over 1617054.09 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:15,809 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6965, 1.3185, 2.8248, 1.4997, 2.1750, 3.0183, 3.2061, 2.5528], + device='cuda:2'), covar=tensor([0.1201, 0.1812, 0.0382, 0.1983, 0.0902, 0.0309, 0.0636, 0.0639], + device='cuda:2'), in_proj_covar=tensor([0.0300, 0.0324, 0.0290, 0.0316, 0.0317, 0.0275, 0.0435, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 10:40:26,842 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.451e+02 2.961e+02 3.656e+02 5.483e+02, threshold=5.923e+02, percent-clipped=0.0 +2023-02-07 10:40:35,640 INFO [train.py:901] (2/4) Epoch 26, batch 5950, loss[loss=0.1756, simple_loss=0.2654, pruned_loss=0.04292, over 8236.00 frames. ], tot_loss[loss=0.2, simple_loss=0.283, pruned_loss=0.05849, over 1618989.42 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:54,935 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:41:09,815 INFO [train.py:901] (2/4) Epoch 26, batch 6000, loss[loss=0.1495, simple_loss=0.2291, pruned_loss=0.03496, over 5549.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05835, over 1609089.27 frames. ], batch size: 12, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:41:09,815 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 10:41:24,450 INFO [train.py:935] (2/4) Epoch 26, validation: loss=0.1721, simple_loss=0.2717, pruned_loss=0.03627, over 944034.00 frames. +2023-02-07 10:41:24,451 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 10:41:32,222 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7469, 1.5718, 2.2838, 1.4797, 1.2155, 2.2324, 0.3921, 1.3838], + device='cuda:2'), covar=tensor([0.1546, 0.1390, 0.0354, 0.1035, 0.2376, 0.0371, 0.1776, 0.1201], + device='cuda:2'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0222, 0.0276, 0.0144, 0.0170, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 10:41:51,019 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.308e+02 2.837e+02 3.630e+02 6.769e+02, threshold=5.675e+02, percent-clipped=2.0 +2023-02-07 10:42:00,859 INFO [train.py:901] (2/4) Epoch 26, batch 6050, loss[loss=0.1583, simple_loss=0.2621, pruned_loss=0.02722, over 8367.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05764, over 1618427.69 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:42:25,847 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2213, 4.2027, 3.7709, 2.0873, 3.6849, 3.7798, 3.7095, 3.5909], + device='cuda:2'), covar=tensor([0.0762, 0.0607, 0.1064, 0.4294, 0.0931, 0.1149, 0.1322, 0.0942], + device='cuda:2'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0541, 0.0427, 0.0451, 0.0427, 0.0395], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:42:36,027 INFO [train.py:901] (2/4) Epoch 26, batch 6100, loss[loss=0.2414, simple_loss=0.3173, pruned_loss=0.08271, over 7005.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05712, over 1611616.30 frames. ], batch size: 71, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:42:48,563 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 10:43:01,329 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.422e+02 2.947e+02 3.994e+02 1.088e+03, threshold=5.894e+02, percent-clipped=8.0 +2023-02-07 10:43:06,114 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5691, 4.5165, 4.1055, 1.8967, 3.9896, 4.2190, 4.1190, 3.9988], + device='cuda:2'), covar=tensor([0.0682, 0.0574, 0.1066, 0.5188, 0.0902, 0.0911, 0.1237, 0.0721], + device='cuda:2'), in_proj_covar=tensor([0.0532, 0.0450, 0.0438, 0.0545, 0.0429, 0.0454, 0.0429, 0.0397], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:43:10,773 INFO [train.py:901] (2/4) Epoch 26, batch 6150, loss[loss=0.2211, simple_loss=0.2861, pruned_loss=0.07806, over 7658.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2809, pruned_loss=0.0575, over 1609914.23 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:45,586 INFO [train.py:901] (2/4) Epoch 26, batch 6200, loss[loss=0.2491, simple_loss=0.3463, pruned_loss=0.07597, over 8495.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2819, pruned_loss=0.05744, over 1611094.91 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:53,058 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208283.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:44:10,220 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 2.196e+02 2.837e+02 3.308e+02 7.178e+02, threshold=5.674e+02, percent-clipped=2.0 +2023-02-07 10:44:19,005 INFO [train.py:901] (2/4) Epoch 26, batch 6250, loss[loss=0.2222, simple_loss=0.3148, pruned_loss=0.06482, over 8319.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05743, over 1613519.10 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:44:55,635 INFO [train.py:901] (2/4) Epoch 26, batch 6300, loss[loss=0.1982, simple_loss=0.2853, pruned_loss=0.05554, over 8241.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05718, over 1610854.41 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:45:10,475 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:45:20,542 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.443e+02 2.919e+02 3.618e+02 1.192e+03, threshold=5.838e+02, percent-clipped=3.0 +2023-02-07 10:45:29,138 INFO [train.py:901] (2/4) Epoch 26, batch 6350, loss[loss=0.1603, simple_loss=0.2364, pruned_loss=0.04207, over 7552.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2823, pruned_loss=0.05812, over 1609903.64 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:04,930 INFO [train.py:901] (2/4) Epoch 26, batch 6400, loss[loss=0.1939, simple_loss=0.2621, pruned_loss=0.06282, over 7535.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05799, over 1608565.61 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:30,392 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4884, 2.2788, 3.1138, 2.5195, 3.0600, 2.5190, 2.4354, 1.9649], + device='cuda:2'), covar=tensor([0.5616, 0.5598, 0.2140, 0.4126, 0.2873, 0.3433, 0.1958, 0.6297], + device='cuda:2'), in_proj_covar=tensor([0.0962, 0.1013, 0.0831, 0.0985, 0.1022, 0.0923, 0.0770, 0.0847], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:46:30,798 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.580e+02 3.188e+02 3.813e+02 6.849e+02, threshold=6.376e+02, percent-clipped=3.0 +2023-02-07 10:46:30,996 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:46:39,661 INFO [train.py:901] (2/4) Epoch 26, batch 6450, loss[loss=0.1646, simple_loss=0.2487, pruned_loss=0.04029, over 8241.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05831, over 1609309.76 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:13,749 INFO [train.py:901] (2/4) Epoch 26, batch 6500, loss[loss=0.2004, simple_loss=0.2994, pruned_loss=0.05071, over 8466.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2835, pruned_loss=0.05837, over 1609905.85 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:29,481 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208594.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:39,234 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.371e+02 2.869e+02 3.528e+02 8.936e+02, threshold=5.738e+02, percent-clipped=3.0 +2023-02-07 10:47:44,298 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0524, 1.8696, 2.3170, 2.0310, 2.3486, 2.1438, 1.9856, 1.1821], + device='cuda:2'), covar=tensor([0.5849, 0.4933, 0.2061, 0.3754, 0.2434, 0.3109, 0.1980, 0.5477], + device='cuda:2'), in_proj_covar=tensor([0.0968, 0.1019, 0.0835, 0.0991, 0.1025, 0.0928, 0.0773, 0.0852], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:47:48,770 INFO [train.py:901] (2/4) Epoch 26, batch 6550, loss[loss=0.206, simple_loss=0.2852, pruned_loss=0.06344, over 8363.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05837, over 1610806.93 frames. ], batch size: 24, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:52,197 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:56,150 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 10:48:12,882 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:48:22,496 INFO [train.py:901] (2/4) Epoch 26, batch 6600, loss[loss=0.2177, simple_loss=0.2966, pruned_loss=0.06937, over 8283.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05786, over 1615989.91 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:48:49,205 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.581e+02 2.930e+02 3.571e+02 6.165e+02, threshold=5.859e+02, percent-clipped=2.0 +2023-02-07 10:48:58,727 INFO [train.py:901] (2/4) Epoch 26, batch 6650, loss[loss=0.1995, simple_loss=0.2763, pruned_loss=0.06134, over 7928.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.284, pruned_loss=0.05752, over 1622003.97 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:12,762 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:28,861 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:29,598 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2109, 1.9402, 2.4882, 2.1090, 2.5488, 2.2420, 2.1126, 1.4728], + device='cuda:2'), covar=tensor([0.5751, 0.4671, 0.1982, 0.3667, 0.2391, 0.3297, 0.1962, 0.4937], + device='cuda:2'), in_proj_covar=tensor([0.0967, 0.1017, 0.0834, 0.0989, 0.1024, 0.0927, 0.0771, 0.0849], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:49:33,534 INFO [train.py:901] (2/4) Epoch 26, batch 6700, loss[loss=0.1686, simple_loss=0.2584, pruned_loss=0.03937, over 7963.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.05793, over 1614471.06 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:46,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=208790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:59,642 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.530e+02 3.053e+02 4.076e+02 9.744e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-07 10:50:03,312 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7518, 1.7933, 1.6606, 2.2586, 1.0634, 1.4756, 1.7699, 1.8124], + device='cuda:2'), covar=tensor([0.0776, 0.0727, 0.0908, 0.0459, 0.1047, 0.1245, 0.0668, 0.0753], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0212, 0.0203, 0.0246, 0.0250, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 10:50:09,370 INFO [train.py:901] (2/4) Epoch 26, batch 6750, loss[loss=0.2214, simple_loss=0.3047, pruned_loss=0.06907, over 7308.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05794, over 1612501.18 frames. ], batch size: 72, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:30,915 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 10:50:43,657 INFO [train.py:901] (2/4) Epoch 26, batch 6800, loss[loss=0.1532, simple_loss=0.2347, pruned_loss=0.03581, over 7700.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2828, pruned_loss=0.05795, over 1609092.69 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:57,050 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 10:51:08,807 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.376e+02 2.847e+02 3.449e+02 1.016e+03, threshold=5.694e+02, percent-clipped=2.0 +2023-02-07 10:51:18,814 INFO [train.py:901] (2/4) Epoch 26, batch 6850, loss[loss=0.2104, simple_loss=0.3019, pruned_loss=0.05947, over 8283.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2837, pruned_loss=0.05803, over 1614328.54 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:51:19,503 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 10:51:30,530 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:51:33,401 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9909, 1.9940, 1.7828, 2.2391, 1.6281, 1.7338, 2.0005, 2.1021], + device='cuda:2'), covar=tensor([0.0613, 0.0716, 0.0778, 0.0613, 0.0860, 0.1038, 0.0642, 0.0631], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0212, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 10:51:37,687 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1848, 2.0295, 2.5892, 2.1293, 2.6395, 2.2552, 2.1122, 1.5553], + device='cuda:2'), covar=tensor([0.5763, 0.5284, 0.2168, 0.3873, 0.2548, 0.3197, 0.2048, 0.5522], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1014, 0.0830, 0.0986, 0.1020, 0.0924, 0.0769, 0.0847], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:51:42,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3099, 2.1025, 2.6675, 2.1960, 2.6761, 2.4174, 2.2062, 1.6336], + device='cuda:2'), covar=tensor([0.5666, 0.5106, 0.2131, 0.4344, 0.2861, 0.3152, 0.2131, 0.5672], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1019, 0.0924, 0.0769, 0.0846], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:51:45,311 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 10:51:54,549 INFO [train.py:901] (2/4) Epoch 26, batch 6900, loss[loss=0.2252, simple_loss=0.3058, pruned_loss=0.0723, over 8343.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.285, pruned_loss=0.05877, over 1615999.27 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:10,631 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4187, 2.4327, 1.8115, 2.1881, 1.9473, 1.6450, 1.9304, 1.9527], + device='cuda:2'), covar=tensor([0.1470, 0.0410, 0.1372, 0.0594, 0.0784, 0.1606, 0.1010, 0.0900], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0239, 0.0339, 0.0310, 0.0302, 0.0345, 0.0347, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 10:52:12,015 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:12,733 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4888, 2.2827, 2.9938, 2.3972, 2.9756, 2.5026, 2.3736, 1.7625], + device='cuda:2'), covar=tensor([0.5727, 0.5585, 0.2172, 0.4370, 0.2931, 0.3427, 0.1944, 0.6338], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1018, 0.0923, 0.0769, 0.0846], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:52:20,224 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.430e+02 2.933e+02 3.890e+02 9.541e+02, threshold=5.866e+02, percent-clipped=7.0 +2023-02-07 10:52:23,022 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 10:52:23,726 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2470, 3.1524, 2.9757, 1.5822, 2.8598, 2.8842, 2.8956, 2.8070], + device='cuda:2'), covar=tensor([0.1140, 0.0822, 0.1235, 0.4523, 0.1175, 0.1402, 0.1538, 0.1123], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0456, 0.0442, 0.0552, 0.0436, 0.0460, 0.0436, 0.0402], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:52:28,989 INFO [train.py:901] (2/4) Epoch 26, batch 6950, loss[loss=0.2382, simple_loss=0.3235, pruned_loss=0.07648, over 8105.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.285, pruned_loss=0.05902, over 1617887.30 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:29,847 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,124 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,885 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7487, 1.6313, 2.3730, 1.9456, 2.2259, 1.7649, 1.5547, 1.0543], + device='cuda:2'), covar=tensor([0.7499, 0.6374, 0.2241, 0.4555, 0.3203, 0.4820, 0.3220, 0.5855], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1017, 0.0922, 0.0770, 0.0847], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 10:53:04,210 INFO [train.py:901] (2/4) Epoch 26, batch 7000, loss[loss=0.153, simple_loss=0.237, pruned_loss=0.03452, over 7429.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05876, over 1615448.06 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:30,446 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.543e+02 3.075e+02 4.223e+02 1.225e+03, threshold=6.150e+02, percent-clipped=4.0 +2023-02-07 10:53:38,406 INFO [train.py:901] (2/4) Epoch 26, batch 7050, loss[loss=0.2395, simple_loss=0.3229, pruned_loss=0.07808, over 8624.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2851, pruned_loss=0.05922, over 1617087.70 frames. ], batch size: 39, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:44,275 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3603, 1.5922, 2.1385, 1.2645, 1.7315, 1.5652, 1.4005, 1.6726], + device='cuda:2'), covar=tensor([0.1430, 0.2013, 0.0720, 0.3513, 0.1452, 0.2553, 0.1887, 0.2241], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0633, 0.0563, 0.0668, 0.0660, 0.0610, 0.0561, 0.0648], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:54:15,155 INFO [train.py:901] (2/4) Epoch 26, batch 7100, loss[loss=0.1775, simple_loss=0.2684, pruned_loss=0.04329, over 7940.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2834, pruned_loss=0.05896, over 1611312.92 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:42,203 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.538e+02 3.057e+02 3.964e+02 1.199e+03, threshold=6.114e+02, percent-clipped=9.0 +2023-02-07 10:54:49,204 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209220.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:54:50,428 INFO [train.py:901] (2/4) Epoch 26, batch 7150, loss[loss=0.2141, simple_loss=0.2977, pruned_loss=0.06522, over 8091.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05802, over 1617030.91 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:25,268 INFO [train.py:901] (2/4) Epoch 26, batch 7200, loss[loss=0.1447, simple_loss=0.2265, pruned_loss=0.03145, over 7791.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05874, over 1621679.84 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:26,104 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:51,825 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:52,275 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.329e+02 2.733e+02 3.562e+02 6.414e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 10:55:56,507 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209316.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:00,325 INFO [train.py:901] (2/4) Epoch 26, batch 7250, loss[loss=0.1839, simple_loss=0.2797, pruned_loss=0.04401, over 8188.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.05809, over 1620958.28 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:08,449 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:33,552 INFO [train.py:901] (2/4) Epoch 26, batch 7300, loss[loss=0.1844, simple_loss=0.2635, pruned_loss=0.05263, over 7540.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.284, pruned_loss=0.05821, over 1622812.12 frames. ], batch size: 18, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:55,996 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1894, 4.1719, 3.8439, 2.1424, 3.7077, 3.8078, 3.7692, 3.6566], + device='cuda:2'), covar=tensor([0.0801, 0.0597, 0.1049, 0.4315, 0.0964, 0.1115, 0.1408, 0.0829], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0458, 0.0445, 0.0556, 0.0439, 0.0464, 0.0439, 0.0404], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 10:56:59,843 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.463e+02 3.055e+02 3.934e+02 7.151e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 10:57:06,739 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 10:57:09,434 INFO [train.py:901] (2/4) Epoch 26, batch 7350, loss[loss=0.1847, simple_loss=0.2805, pruned_loss=0.04444, over 8192.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2847, pruned_loss=0.05923, over 1621034.10 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:26,301 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 10:57:42,852 INFO [train.py:901] (2/4) Epoch 26, batch 7400, loss[loss=0.1418, simple_loss=0.2254, pruned_loss=0.02915, over 6313.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2845, pruned_loss=0.05884, over 1623083.56 frames. ], batch size: 14, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:51,018 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:04,811 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 10:58:09,479 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.534e+02 3.042e+02 3.812e+02 9.347e+02, threshold=6.084e+02, percent-clipped=5.0 +2023-02-07 10:58:17,134 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8073, 1.7340, 2.5371, 1.4771, 1.3223, 2.5054, 0.4638, 1.5120], + device='cuda:2'), covar=tensor([0.1675, 0.1411, 0.0349, 0.1756, 0.2556, 0.0338, 0.1926, 0.1624], + device='cuda:2'), in_proj_covar=tensor([0.0197, 0.0205, 0.0133, 0.0224, 0.0276, 0.0144, 0.0172, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 10:58:17,541 INFO [train.py:901] (2/4) Epoch 26, batch 7450, loss[loss=0.1877, simple_loss=0.2744, pruned_loss=0.05046, over 8072.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.05907, over 1619867.51 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:58:22,477 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:47,409 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209564.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:52,744 INFO [train.py:901] (2/4) Epoch 26, batch 7500, loss[loss=0.2026, simple_loss=0.2904, pruned_loss=0.05743, over 8496.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05888, over 1615337.53 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:59:18,692 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.436e+02 2.983e+02 3.503e+02 8.056e+02, threshold=5.967e+02, percent-clipped=5.0 +2023-02-07 10:59:24,167 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:27,341 INFO [train.py:901] (2/4) Epoch 26, batch 7550, loss[loss=0.1986, simple_loss=0.2865, pruned_loss=0.05534, over 8244.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2839, pruned_loss=0.05827, over 1617888.24 frames. ], batch size: 24, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 10:59:28,768 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:54,107 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:55,496 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2746, 3.1708, 2.9436, 1.6649, 2.8606, 2.9535, 2.8165, 2.8063], + device='cuda:2'), covar=tensor([0.1108, 0.0777, 0.1310, 0.4466, 0.1112, 0.1264, 0.1502, 0.1121], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0456, 0.0443, 0.0554, 0.0436, 0.0462, 0.0436, 0.0403], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:00:01,941 INFO [train.py:901] (2/4) Epoch 26, batch 7600, loss[loss=0.2186, simple_loss=0.2905, pruned_loss=0.07336, over 7784.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05889, over 1614989.87 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:07,010 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:25,111 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209706.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:27,671 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.404e+02 2.880e+02 3.478e+02 6.437e+02, threshold=5.761e+02, percent-clipped=3.0 +2023-02-07 11:00:35,725 INFO [train.py:901] (2/4) Epoch 26, batch 7650, loss[loss=0.2233, simple_loss=0.2997, pruned_loss=0.07344, over 8523.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2831, pruned_loss=0.05829, over 1611622.82 frames. ], batch size: 28, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:43,090 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:00,269 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.24 vs. limit=5.0 +2023-02-07 11:01:10,730 INFO [train.py:901] (2/4) Epoch 26, batch 7700, loss[loss=0.2135, simple_loss=0.3014, pruned_loss=0.06282, over 8583.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2851, pruned_loss=0.05941, over 1614657.24 frames. ], batch size: 31, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:12,912 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:14,129 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 11:01:23,013 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.43 vs. limit=2.0 +2023-02-07 11:01:36,854 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.688e+02 3.083e+02 3.850e+02 9.382e+02, threshold=6.167e+02, percent-clipped=8.0 +2023-02-07 11:01:39,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5155, 1.8296, 2.9072, 1.4203, 2.0489, 1.9056, 1.5662, 2.1466], + device='cuda:2'), covar=tensor([0.2068, 0.2580, 0.0886, 0.4777, 0.2116, 0.3370, 0.2530, 0.2347], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0631, 0.0560, 0.0666, 0.0659, 0.0610, 0.0559, 0.0644], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:01:44,890 INFO [train.py:901] (2/4) Epoch 26, batch 7750, loss[loss=0.2276, simple_loss=0.3104, pruned_loss=0.07246, over 8453.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2867, pruned_loss=0.06019, over 1621133.71 frames. ], batch size: 49, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:48,919 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:19,758 INFO [train.py:901] (2/4) Epoch 26, batch 7800, loss[loss=0.1408, simple_loss=0.2255, pruned_loss=0.02811, over 7799.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2854, pruned_loss=0.05959, over 1614385.78 frames. ], batch size: 19, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:02:19,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:44,106 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([6.0770, 1.8418, 6.1625, 2.3051, 5.6404, 5.3197, 5.7438, 5.6807], + device='cuda:2'), covar=tensor([0.0488, 0.5075, 0.0393, 0.3870, 0.0992, 0.0888, 0.0493, 0.0492], + device='cuda:2'), in_proj_covar=tensor([0.0672, 0.0666, 0.0734, 0.0658, 0.0742, 0.0633, 0.0633, 0.0709], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:02:45,320 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.465e+02 2.962e+02 3.430e+02 5.705e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-07 11:02:53,248 INFO [train.py:901] (2/4) Epoch 26, batch 7850, loss[loss=0.2022, simple_loss=0.2881, pruned_loss=0.05815, over 8338.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2867, pruned_loss=0.06003, over 1616179.40 frames. ], batch size: 26, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:01,886 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:07,203 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:18,180 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:23,279 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209968.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:25,891 INFO [train.py:901] (2/4) Epoch 26, batch 7900, loss[loss=0.2169, simple_loss=0.3066, pruned_loss=0.06358, over 8482.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2857, pruned_loss=0.05962, over 1617006.93 frames. ], batch size: 27, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:35,880 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:36,569 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:51,903 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.305e+02 2.795e+02 3.387e+02 5.942e+02, threshold=5.591e+02, percent-clipped=1.0 +2023-02-07 11:03:54,070 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210013.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:59,739 INFO [train.py:901] (2/4) Epoch 26, batch 7950, loss[loss=0.1777, simple_loss=0.2606, pruned_loss=0.04737, over 7814.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05913, over 1615086.38 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:06,008 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210031.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:18,574 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:22,764 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:33,086 INFO [train.py:901] (2/4) Epoch 26, batch 8000, loss[loss=0.2085, simple_loss=0.2909, pruned_loss=0.06304, over 8456.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2839, pruned_loss=0.05907, over 1614326.14 frames. ], batch size: 27, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:35,219 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:40,668 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210083.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:57,993 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.316e+02 2.819e+02 3.710e+02 9.270e+02, threshold=5.638e+02, percent-clipped=7.0 +2023-02-07 11:05:05,823 INFO [train.py:901] (2/4) Epoch 26, batch 8050, loss[loss=0.196, simple_loss=0.2709, pruned_loss=0.06055, over 7551.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2812, pruned_loss=0.05862, over 1596298.97 frames. ], batch size: 18, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:05:13,568 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5897, 1.7791, 4.7672, 1.8428, 4.3041, 3.9656, 4.2861, 4.2367], + device='cuda:2'), covar=tensor([0.0535, 0.4667, 0.0494, 0.4231, 0.0943, 0.0898, 0.0550, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0672, 0.0666, 0.0734, 0.0657, 0.0742, 0.0632, 0.0634, 0.0709], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:05:15,074 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-07 11:05:37,888 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 11:05:43,120 INFO [train.py:901] (2/4) Epoch 27, batch 0, loss[loss=0.2029, simple_loss=0.2969, pruned_loss=0.05445, over 8499.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2969, pruned_loss=0.05445, over 8499.00 frames. ], batch size: 28, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:05:43,121 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 11:05:54,192 INFO [train.py:935] (2/4) Epoch 27, validation: loss=0.172, simple_loss=0.2713, pruned_loss=0.03628, over 944034.00 frames. +2023-02-07 11:05:54,193 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 11:06:01,185 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:08,369 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 11:06:24,781 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:28,658 INFO [train.py:901] (2/4) Epoch 27, batch 50, loss[loss=0.2023, simple_loss=0.2758, pruned_loss=0.06441, over 8236.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2882, pruned_loss=0.059, over 368502.70 frames. ], batch size: 22, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:06:33,573 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.417e+02 2.930e+02 3.516e+02 7.088e+02, threshold=5.860e+02, percent-clipped=5.0 +2023-02-07 11:06:41,911 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 11:06:43,307 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:56,876 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:04,541 INFO [train.py:901] (2/4) Epoch 27, batch 100, loss[loss=0.1955, simple_loss=0.2731, pruned_loss=0.05892, over 8251.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2875, pruned_loss=0.05945, over 650061.38 frames. ], batch size: 22, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:05,004 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 11:07:05,182 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 11:07:13,376 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210268.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:38,001 INFO [train.py:901] (2/4) Epoch 27, batch 150, loss[loss=0.1907, simple_loss=0.2816, pruned_loss=0.04983, over 8252.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2848, pruned_loss=0.05838, over 865500.51 frames. ], batch size: 24, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:41,156 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.352e+02 2.905e+02 3.661e+02 1.089e+03, threshold=5.811e+02, percent-clipped=3.0 +2023-02-07 11:07:41,641 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 11:08:02,854 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:14,089 INFO [train.py:901] (2/4) Epoch 27, batch 200, loss[loss=0.2333, simple_loss=0.3165, pruned_loss=0.07503, over 8351.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.286, pruned_loss=0.05911, over 1035954.31 frames. ], batch size: 24, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:08:20,481 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:48,346 INFO [train.py:901] (2/4) Epoch 27, batch 250, loss[loss=0.2198, simple_loss=0.3046, pruned_loss=0.06754, over 8107.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05906, over 1161028.33 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:08:49,371 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 11:08:51,589 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.304e+02 2.819e+02 3.559e+02 6.263e+02, threshold=5.638e+02, percent-clipped=1.0 +2023-02-07 11:08:57,578 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 11:08:57,629 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:59,063 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210421.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:06,468 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 11:09:10,764 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1624, 1.9918, 2.5417, 2.1441, 2.5281, 2.2743, 2.1093, 1.4099], + device='cuda:2'), covar=tensor([0.6128, 0.5176, 0.2186, 0.3906, 0.2747, 0.2988, 0.1982, 0.5703], + device='cuda:2'), in_proj_covar=tensor([0.0965, 0.1017, 0.0828, 0.0985, 0.1021, 0.0926, 0.0772, 0.0847], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 11:09:16,567 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:23,015 INFO [train.py:901] (2/4) Epoch 27, batch 300, loss[loss=0.2118, simple_loss=0.2983, pruned_loss=0.06262, over 8285.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2845, pruned_loss=0.05862, over 1263195.89 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:09:57,338 INFO [train.py:901] (2/4) Epoch 27, batch 350, loss[loss=0.2063, simple_loss=0.2914, pruned_loss=0.06059, over 8520.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2847, pruned_loss=0.05917, over 1342998.80 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:10:00,682 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.321e+02 2.740e+02 3.479e+02 7.751e+02, threshold=5.481e+02, percent-clipped=4.0 +2023-02-07 11:10:06,930 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3549, 1.6905, 1.2909, 2.7675, 1.1900, 1.2518, 1.9702, 1.8591], + device='cuda:2'), covar=tensor([0.1661, 0.1458, 0.2024, 0.0413, 0.1456, 0.2120, 0.1038, 0.1105], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0194, 0.0246, 0.0211, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 11:10:16,905 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:10:29,882 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 11:10:30,766 INFO [train.py:901] (2/4) Epoch 27, batch 400, loss[loss=0.2196, simple_loss=0.3099, pruned_loss=0.06465, over 8327.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2848, pruned_loss=0.05875, over 1404963.08 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:06,891 INFO [train.py:901] (2/4) Epoch 27, batch 450, loss[loss=0.2056, simple_loss=0.2826, pruned_loss=0.06427, over 8752.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2843, pruned_loss=0.05798, over 1455614.14 frames. ], batch size: 34, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:10,232 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.445e+02 3.096e+02 3.744e+02 6.670e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 11:11:37,199 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:11:40,168 INFO [train.py:901] (2/4) Epoch 27, batch 500, loss[loss=0.2149, simple_loss=0.3085, pruned_loss=0.06066, over 8515.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.05798, over 1488927.82 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:01,200 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:12,813 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:15,964 INFO [train.py:901] (2/4) Epoch 27, batch 550, loss[loss=0.2061, simple_loss=0.2803, pruned_loss=0.06591, over 7649.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2842, pruned_loss=0.05792, over 1521863.51 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:19,367 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.336e+02 2.792e+02 3.793e+02 8.487e+02, threshold=5.584e+02, percent-clipped=3.0 +2023-02-07 11:12:50,305 INFO [train.py:901] (2/4) Epoch 27, batch 600, loss[loss=0.2052, simple_loss=0.2884, pruned_loss=0.06099, over 8504.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2842, pruned_loss=0.05806, over 1547919.36 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:08,465 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:11,592 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 11:13:13,655 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:23,582 INFO [train.py:901] (2/4) Epoch 27, batch 650, loss[loss=0.1636, simple_loss=0.2463, pruned_loss=0.04051, over 7971.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2831, pruned_loss=0.05719, over 1562270.44 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:28,267 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.351e+02 2.894e+02 3.474e+02 6.032e+02, threshold=5.788e+02, percent-clipped=3.0 +2023-02-07 11:13:32,523 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:54,801 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2462, 2.0690, 2.6135, 2.1600, 2.5711, 2.2914, 2.1744, 1.4694], + device='cuda:2'), covar=tensor([0.5796, 0.5102, 0.2198, 0.3767, 0.2733, 0.2898, 0.1837, 0.5536], + device='cuda:2'), in_proj_covar=tensor([0.0960, 0.1014, 0.0826, 0.0983, 0.1017, 0.0925, 0.0768, 0.0845], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 11:13:54,955 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-07 11:13:59,783 INFO [train.py:901] (2/4) Epoch 27, batch 700, loss[loss=0.1954, simple_loss=0.2848, pruned_loss=0.05296, over 8445.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2838, pruned_loss=0.05771, over 1575496.58 frames. ], batch size: 27, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:32,723 INFO [train.py:901] (2/4) Epoch 27, batch 750, loss[loss=0.1937, simple_loss=0.2729, pruned_loss=0.05725, over 8083.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2832, pruned_loss=0.05778, over 1583045.51 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:34,140 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8278, 1.5207, 1.7218, 1.4199, 1.0882, 1.5239, 1.7237, 1.3324], + device='cuda:2'), covar=tensor([0.0536, 0.1208, 0.1575, 0.1402, 0.0575, 0.1441, 0.0675, 0.0673], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0101, 0.0164, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 11:14:35,973 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.536e+02 2.996e+02 3.960e+02 1.304e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 11:14:39,933 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 11:14:54,964 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 11:15:04,237 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 11:15:08,843 INFO [train.py:901] (2/4) Epoch 27, batch 800, loss[loss=0.1586, simple_loss=0.2407, pruned_loss=0.03821, over 7789.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.0579, over 1592106.59 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:15:35,001 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:40,378 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:42,219 INFO [train.py:901] (2/4) Epoch 27, batch 850, loss[loss=0.2132, simple_loss=0.3065, pruned_loss=0.05994, over 8291.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.0578, over 1597868.95 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:15:45,626 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.265e+02 2.725e+02 3.482e+02 8.151e+02, threshold=5.450e+02, percent-clipped=2.0 +2023-02-07 11:15:57,721 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:05,943 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.18 vs. limit=5.0 +2023-02-07 11:16:10,467 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:13,259 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0965, 1.4136, 3.5246, 1.7304, 2.4846, 3.9186, 3.9852, 3.3690], + device='cuda:2'), covar=tensor([0.1126, 0.2015, 0.0343, 0.1897, 0.1029, 0.0249, 0.0562, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0304, 0.0327, 0.0291, 0.0319, 0.0318, 0.0277, 0.0434, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:16:17,666 INFO [train.py:901] (2/4) Epoch 27, batch 900, loss[loss=0.1537, simple_loss=0.2381, pruned_loss=0.03468, over 7791.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05842, over 1602436.05 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:51,925 INFO [train.py:901] (2/4) Epoch 27, batch 950, loss[loss=0.2134, simple_loss=0.2979, pruned_loss=0.06441, over 8473.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05852, over 1603422.77 frames. ], batch size: 48, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:54,832 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:55,242 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.483e+02 2.981e+02 4.008e+02 9.530e+02, threshold=5.961e+02, percent-clipped=10.0 +2023-02-07 11:17:06,152 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:17,582 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211143.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:18,072 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 11:17:26,813 INFO [train.py:901] (2/4) Epoch 27, batch 1000, loss[loss=0.1964, simple_loss=0.2735, pruned_loss=0.05968, over 7652.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05875, over 1606182.17 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:17:27,718 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8703, 1.6689, 3.1717, 1.4383, 2.3714, 3.4264, 3.5602, 2.9372], + device='cuda:2'), covar=tensor([0.1202, 0.1645, 0.0373, 0.2172, 0.0882, 0.0296, 0.0652, 0.0590], + device='cuda:2'), in_proj_covar=tensor([0.0306, 0.0327, 0.0293, 0.0321, 0.0320, 0.0278, 0.0436, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:17:30,477 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:53,311 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 11:18:03,302 INFO [train.py:901] (2/4) Epoch 27, batch 1050, loss[loss=0.2064, simple_loss=0.2874, pruned_loss=0.06275, over 8407.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05922, over 1610295.10 frames. ], batch size: 49, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:18:05,253 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 11:18:07,260 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.494e+02 3.070e+02 3.818e+02 8.233e+02, threshold=6.140e+02, percent-clipped=4.0 +2023-02-07 11:18:27,179 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:18:27,212 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6075, 1.5404, 2.1246, 1.3253, 1.1892, 2.0711, 0.3364, 1.2833], + device='cuda:2'), covar=tensor([0.1447, 0.1124, 0.0341, 0.1013, 0.2316, 0.0445, 0.1801, 0.1142], + device='cuda:2'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0221, 0.0275, 0.0145, 0.0171, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 11:18:36,463 INFO [train.py:901] (2/4) Epoch 27, batch 1100, loss[loss=0.1741, simple_loss=0.2574, pruned_loss=0.04536, over 8240.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2861, pruned_loss=0.05982, over 1606145.45 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:18:48,268 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9521, 3.5979, 2.1896, 2.8157, 2.7239, 2.0049, 2.8807, 3.0185], + device='cuda:2'), covar=tensor([0.1780, 0.0416, 0.1292, 0.0784, 0.0888, 0.1566, 0.1140, 0.1257], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0240, 0.0341, 0.0312, 0.0303, 0.0345, 0.0346, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 11:19:01,144 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0790, 2.2529, 1.7857, 2.8027, 1.4690, 1.5572, 2.1327, 2.2738], + device='cuda:2'), covar=tensor([0.0731, 0.0737, 0.0920, 0.0360, 0.1081, 0.1367, 0.0793, 0.0728], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0195, 0.0246, 0.0212, 0.0203, 0.0246, 0.0251, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 11:19:13,342 INFO [train.py:901] (2/4) Epoch 27, batch 1150, loss[loss=0.2431, simple_loss=0.3096, pruned_loss=0.08825, over 6980.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2842, pruned_loss=0.05867, over 1610497.83 frames. ], batch size: 71, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:15,964 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 11:19:17,279 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.371e+02 2.782e+02 3.549e+02 6.262e+02, threshold=5.564e+02, percent-clipped=1.0 +2023-02-07 11:19:30,075 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1962, 1.4618, 1.7286, 1.3515, 0.9506, 1.5031, 1.7393, 1.6440], + device='cuda:2'), covar=tensor([0.0483, 0.1290, 0.1715, 0.1529, 0.0613, 0.1478, 0.0720, 0.0649], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 11:19:40,861 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211346.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:19:46,763 INFO [train.py:901] (2/4) Epoch 27, batch 1200, loss[loss=0.2153, simple_loss=0.3041, pruned_loss=0.06326, over 7975.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2848, pruned_loss=0.05856, over 1616117.20 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:53,751 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:11,163 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:18,555 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211399.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:21,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:22,315 INFO [train.py:901] (2/4) Epoch 27, batch 1250, loss[loss=0.1782, simple_loss=0.271, pruned_loss=0.04272, over 8277.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2838, pruned_loss=0.0583, over 1612791.35 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:26,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.344e+02 2.922e+02 3.484e+02 6.390e+02, threshold=5.843e+02, percent-clipped=2.0 +2023-02-07 11:20:29,700 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:35,529 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211424.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:39,478 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:46,233 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211440.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:56,234 INFO [train.py:901] (2/4) Epoch 27, batch 1300, loss[loss=0.2206, simple_loss=0.3029, pruned_loss=0.0691, over 8105.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05764, over 1612148.41 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:21:00,472 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:24,569 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:30,515 INFO [train.py:901] (2/4) Epoch 27, batch 1350, loss[loss=0.1755, simple_loss=0.2667, pruned_loss=0.0421, over 8513.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2821, pruned_loss=0.05733, over 1609608.55 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:21:34,485 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.433e+02 2.859e+02 3.519e+02 6.900e+02, threshold=5.717e+02, percent-clipped=5.0 +2023-02-07 11:21:43,605 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:22:05,944 INFO [train.py:901] (2/4) Epoch 27, batch 1400, loss[loss=0.2075, simple_loss=0.2867, pruned_loss=0.06409, over 7930.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05772, over 1614520.03 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:39,557 INFO [train.py:901] (2/4) Epoch 27, batch 1450, loss[loss=0.1877, simple_loss=0.2712, pruned_loss=0.05205, over 8137.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.0578, over 1612243.93 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:43,647 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.739e+02 3.417e+02 5.363e+02 1.739e+03, threshold=6.835e+02, percent-clipped=22.0 +2023-02-07 11:22:45,696 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 11:23:15,887 INFO [train.py:901] (2/4) Epoch 27, batch 1500, loss[loss=0.2142, simple_loss=0.3025, pruned_loss=0.06291, over 8328.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2818, pruned_loss=0.0575, over 1611591.41 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:49,651 INFO [train.py:901] (2/4) Epoch 27, batch 1550, loss[loss=0.1795, simple_loss=0.2786, pruned_loss=0.04014, over 8192.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.282, pruned_loss=0.05781, over 1610015.61 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:53,681 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.372e+02 3.027e+02 3.476e+02 5.786e+02, threshold=6.054e+02, percent-clipped=0.0 +2023-02-07 11:23:57,833 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:15,280 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:19,426 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:24,773 INFO [train.py:901] (2/4) Epoch 27, batch 1600, loss[loss=0.1974, simple_loss=0.2822, pruned_loss=0.05627, over 8135.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2822, pruned_loss=0.05824, over 1613632.08 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:24:28,338 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0278, 2.3224, 3.7952, 2.1602, 2.0104, 3.8109, 0.7402, 2.2621], + device='cuda:2'), covar=tensor([0.1011, 0.1084, 0.0173, 0.1360, 0.2059, 0.0172, 0.1922, 0.1269], + device='cuda:2'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0221, 0.0275, 0.0144, 0.0172, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 11:24:38,873 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:59,721 INFO [train.py:901] (2/4) Epoch 27, batch 1650, loss[loss=0.1763, simple_loss=0.2676, pruned_loss=0.04243, over 8481.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2811, pruned_loss=0.05808, over 1610530.54 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:03,546 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 11:25:03,802 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.547e+02 3.089e+02 3.889e+02 1.356e+03, threshold=6.177e+02, percent-clipped=3.0 +2023-02-07 11:25:34,306 INFO [train.py:901] (2/4) Epoch 27, batch 1700, loss[loss=0.1825, simple_loss=0.2656, pruned_loss=0.0497, over 7531.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2809, pruned_loss=0.0578, over 1609288.13 frames. ], batch size: 18, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:39,902 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:25:57,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8755, 1.4342, 3.5724, 1.5493, 2.4756, 3.9447, 4.0374, 3.3704], + device='cuda:2'), covar=tensor([0.1251, 0.1956, 0.0289, 0.2070, 0.0980, 0.0222, 0.0526, 0.0536], + device='cuda:2'), in_proj_covar=tensor([0.0307, 0.0329, 0.0295, 0.0322, 0.0323, 0.0279, 0.0440, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:25:59,112 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211889.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:26:09,561 INFO [train.py:901] (2/4) Epoch 27, batch 1750, loss[loss=0.1802, simple_loss=0.259, pruned_loss=0.05072, over 7957.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.28, pruned_loss=0.05737, over 1603824.02 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:26:13,477 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 2.460e+02 2.972e+02 3.773e+02 5.726e+02, threshold=5.944e+02, percent-clipped=0.0 +2023-02-07 11:26:24,666 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.77 vs. limit=5.0 +2023-02-07 11:26:43,426 INFO [train.py:901] (2/4) Epoch 27, batch 1800, loss[loss=0.1734, simple_loss=0.2558, pruned_loss=0.04551, over 7931.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2806, pruned_loss=0.05736, over 1606051.21 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:03,804 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 11:27:20,496 INFO [train.py:901] (2/4) Epoch 27, batch 1850, loss[loss=0.1971, simple_loss=0.2698, pruned_loss=0.06216, over 7182.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2803, pruned_loss=0.05715, over 1606832.65 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:24,575 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.252e+02 2.767e+02 3.484e+02 5.487e+02, threshold=5.534e+02, percent-clipped=0.0 +2023-02-07 11:27:54,169 INFO [train.py:901] (2/4) Epoch 27, batch 1900, loss[loss=0.2143, simple_loss=0.296, pruned_loss=0.06633, over 8592.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2807, pruned_loss=0.05715, over 1609794.59 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:23,913 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8978, 3.8304, 3.5082, 1.8938, 3.4307, 3.4815, 3.3766, 3.3869], + device='cuda:2'), covar=tensor([0.0911, 0.0681, 0.1145, 0.4251, 0.0990, 0.1178, 0.1433, 0.0859], + device='cuda:2'), in_proj_covar=tensor([0.0540, 0.0456, 0.0442, 0.0552, 0.0438, 0.0460, 0.0437, 0.0402], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:28:25,181 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 11:28:28,382 INFO [train.py:901] (2/4) Epoch 27, batch 1950, loss[loss=0.2339, simple_loss=0.3248, pruned_loss=0.07155, over 8430.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2818, pruned_loss=0.05743, over 1610826.63 frames. ], batch size: 49, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:33,125 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.484e+02 3.059e+02 3.727e+02 7.478e+02, threshold=6.119e+02, percent-clipped=3.0 +2023-02-07 11:28:38,414 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 11:28:39,363 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:41,242 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212122.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:28:56,776 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212144.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:57,275 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 11:28:57,487 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:03,979 INFO [train.py:901] (2/4) Epoch 27, batch 2000, loss[loss=0.2168, simple_loss=0.2995, pruned_loss=0.06703, over 8572.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05736, over 1615712.20 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:14,160 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212170.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:37,644 INFO [train.py:901] (2/4) Epoch 27, batch 2050, loss[loss=0.2179, simple_loss=0.3045, pruned_loss=0.06567, over 8116.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2816, pruned_loss=0.05749, over 1607476.97 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:41,746 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.364e+02 2.966e+02 3.655e+02 9.314e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 11:29:53,466 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6724, 1.3776, 1.6966, 1.3474, 0.9906, 1.4609, 1.5091, 1.5036], + device='cuda:2'), covar=tensor([0.0589, 0.1284, 0.1647, 0.1522, 0.0569, 0.1436, 0.0683, 0.0650], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0163, 0.0102, 0.0164, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 11:30:13,811 INFO [train.py:901] (2/4) Epoch 27, batch 2100, loss[loss=0.195, simple_loss=0.2769, pruned_loss=0.05651, over 8448.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2819, pruned_loss=0.0577, over 1607434.71 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:24,389 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1650, 4.1174, 3.7745, 2.1365, 3.6280, 3.8693, 3.7479, 3.6539], + device='cuda:2'), covar=tensor([0.0871, 0.0607, 0.1118, 0.4289, 0.0974, 0.0915, 0.1286, 0.0761], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0456, 0.0442, 0.0554, 0.0438, 0.0459, 0.0437, 0.0404], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:30:25,127 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:30:42,129 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-07 11:30:47,851 INFO [train.py:901] (2/4) Epoch 27, batch 2150, loss[loss=0.2232, simple_loss=0.3125, pruned_loss=0.06692, over 8584.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2824, pruned_loss=0.05792, over 1608780.77 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:48,029 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3256, 2.2670, 1.6507, 2.0497, 1.8752, 1.3885, 1.7281, 1.9148], + device='cuda:2'), covar=tensor([0.1569, 0.0495, 0.1350, 0.0675, 0.0899, 0.1776, 0.1169, 0.1014], + device='cuda:2'), in_proj_covar=tensor([0.0365, 0.0240, 0.0341, 0.0316, 0.0304, 0.0349, 0.0350, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 11:30:51,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.395e+02 2.764e+02 3.582e+02 6.444e+02, threshold=5.527e+02, percent-clipped=1.0 +2023-02-07 11:31:22,776 INFO [train.py:901] (2/4) Epoch 27, batch 2200, loss[loss=0.1875, simple_loss=0.2766, pruned_loss=0.04919, over 8506.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.05831, over 1611821.48 frames. ], batch size: 28, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:31:57,346 INFO [train.py:901] (2/4) Epoch 27, batch 2250, loss[loss=0.1834, simple_loss=0.2652, pruned_loss=0.0508, over 7226.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2821, pruned_loss=0.05788, over 1612610.48 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:01,575 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.301e+02 2.815e+02 3.457e+02 5.141e+02, threshold=5.631e+02, percent-clipped=0.0 +2023-02-07 11:32:31,457 INFO [train.py:901] (2/4) Epoch 27, batch 2300, loss[loss=0.2107, simple_loss=0.2893, pruned_loss=0.06607, over 8552.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2816, pruned_loss=0.05784, over 1614878.09 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:33,511 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212457.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:32:39,373 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212466.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:33:07,931 INFO [train.py:901] (2/4) Epoch 27, batch 2350, loss[loss=0.2051, simple_loss=0.3035, pruned_loss=0.05331, over 8456.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2818, pruned_loss=0.05795, over 1616825.20 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:33:12,143 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.366e+02 2.801e+02 3.492e+02 6.818e+02, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 11:33:42,851 INFO [train.py:901] (2/4) Epoch 27, batch 2400, loss[loss=0.2212, simple_loss=0.2984, pruned_loss=0.07203, over 8296.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2819, pruned_loss=0.0578, over 1616437.76 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:01,664 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212581.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:34:04,906 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7284, 4.7189, 4.2137, 2.0998, 4.1709, 4.3331, 4.2121, 4.1409], + device='cuda:2'), covar=tensor([0.0676, 0.0516, 0.1074, 0.4410, 0.0892, 0.0848, 0.1181, 0.0754], + device='cuda:2'), in_proj_covar=tensor([0.0542, 0.0460, 0.0444, 0.0558, 0.0440, 0.0464, 0.0439, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:34:19,817 INFO [train.py:901] (2/4) Epoch 27, batch 2450, loss[loss=0.1605, simple_loss=0.2397, pruned_loss=0.04067, over 8084.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2809, pruned_loss=0.05748, over 1614097.91 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:23,906 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.406e+02 2.879e+02 3.948e+02 9.646e+02, threshold=5.757e+02, percent-clipped=9.0 +2023-02-07 11:34:26,763 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:34:46,276 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 11:34:54,050 INFO [train.py:901] (2/4) Epoch 27, batch 2500, loss[loss=0.2213, simple_loss=0.3087, pruned_loss=0.06695, over 8565.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05691, over 1613188.39 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:59,685 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6558, 1.4962, 3.1701, 1.4199, 2.3780, 3.3827, 3.5626, 2.9078], + device='cuda:2'), covar=tensor([0.1371, 0.1803, 0.0340, 0.2238, 0.0902, 0.0251, 0.0508, 0.0551], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0321, 0.0320, 0.0277, 0.0437, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:35:28,280 INFO [train.py:901] (2/4) Epoch 27, batch 2550, loss[loss=0.1836, simple_loss=0.2735, pruned_loss=0.04685, over 8253.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2802, pruned_loss=0.05662, over 1613776.04 frames. ], batch size: 24, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:35:33,072 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.333e+02 2.985e+02 3.926e+02 7.498e+02, threshold=5.971e+02, percent-clipped=4.0 +2023-02-07 11:35:37,331 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212716.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:46,877 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212729.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:47,588 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:04,555 INFO [train.py:901] (2/4) Epoch 27, batch 2600, loss[loss=0.1679, simple_loss=0.2462, pruned_loss=0.04484, over 7444.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2798, pruned_loss=0.05654, over 1614075.95 frames. ], batch size: 17, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:09,599 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 11:36:18,126 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8686, 1.6982, 3.1288, 1.4403, 2.3582, 3.3664, 3.5325, 2.8503], + device='cuda:2'), covar=tensor([0.1294, 0.1719, 0.0364, 0.2314, 0.0913, 0.0268, 0.0529, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0320, 0.0320, 0.0277, 0.0438, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:36:21,581 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2688, 2.0838, 1.7455, 1.9008, 1.7792, 1.4890, 1.6719, 1.6751], + device='cuda:2'), covar=tensor([0.1486, 0.0503, 0.1250, 0.0593, 0.0773, 0.1587, 0.1071, 0.1051], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0238, 0.0337, 0.0312, 0.0301, 0.0345, 0.0346, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 11:36:29,684 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212792.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:33,834 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5578, 1.4502, 1.8453, 1.2307, 1.1893, 1.8188, 0.1874, 1.1530], + device='cuda:2'), covar=tensor([0.1410, 0.1134, 0.0384, 0.0854, 0.2255, 0.0438, 0.1868, 0.1242], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0144, 0.0171, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 11:36:35,855 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:38,487 INFO [train.py:901] (2/4) Epoch 27, batch 2650, loss[loss=0.2578, simple_loss=0.3311, pruned_loss=0.09223, over 7227.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2801, pruned_loss=0.05668, over 1612207.34 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:43,315 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.517e+02 2.957e+02 3.589e+02 7.428e+02, threshold=5.913e+02, percent-clipped=3.0 +2023-02-07 11:37:02,116 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212837.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:37:04,095 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6465, 1.5192, 1.8975, 1.2671, 1.3290, 1.8621, 0.8476, 1.5324], + device='cuda:2'), covar=tensor([0.1360, 0.0913, 0.0373, 0.0837, 0.1674, 0.0418, 0.1580, 0.1214], + device='cuda:2'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0145, 0.0172, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 11:37:14,947 INFO [train.py:901] (2/4) Epoch 27, batch 2700, loss[loss=0.2023, simple_loss=0.2838, pruned_loss=0.06041, over 7238.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2809, pruned_loss=0.05733, over 1612549.86 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:19,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212862.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:37:49,369 INFO [train.py:901] (2/4) Epoch 27, batch 2750, loss[loss=0.2161, simple_loss=0.3014, pruned_loss=0.06543, over 8328.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2821, pruned_loss=0.05798, over 1612273.62 frames. ], batch size: 26, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:53,341 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.387e+02 2.942e+02 3.576e+02 8.277e+02, threshold=5.883e+02, percent-clipped=4.0 +2023-02-07 11:37:56,795 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:25,583 INFO [train.py:901] (2/4) Epoch 27, batch 2800, loss[loss=0.1796, simple_loss=0.2549, pruned_loss=0.05214, over 7788.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2821, pruned_loss=0.0582, over 1609150.67 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:38:43,388 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7740, 2.1193, 3.6106, 1.7686, 1.8149, 3.5586, 0.6961, 2.1995], + device='cuda:2'), covar=tensor([0.1349, 0.1178, 0.0202, 0.1603, 0.2259, 0.0254, 0.1845, 0.1252], + device='cuda:2'), in_proj_covar=tensor([0.0199, 0.0205, 0.0134, 0.0224, 0.0277, 0.0145, 0.0172, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 11:38:46,066 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:58,529 INFO [train.py:901] (2/4) Epoch 27, batch 2850, loss[loss=0.2965, simple_loss=0.3611, pruned_loss=0.1159, over 8072.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05847, over 1615715.92 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:02,632 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 2.420e+02 3.040e+02 3.738e+02 9.771e+02, threshold=6.080e+02, percent-clipped=4.0 +2023-02-07 11:39:02,865 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:03,499 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7798, 1.4854, 1.7277, 1.3868, 0.9755, 1.5128, 1.5982, 1.5031], + device='cuda:2'), covar=tensor([0.0551, 0.1252, 0.1579, 0.1491, 0.0581, 0.1443, 0.0681, 0.0635], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0101, 0.0164, 0.0112, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 11:39:33,419 INFO [train.py:901] (2/4) Epoch 27, batch 2900, loss[loss=0.1855, simple_loss=0.2564, pruned_loss=0.05736, over 7540.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05791, over 1613306.95 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:36,853 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213060.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:46,823 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213073.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:08,833 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.00 vs. limit=5.0 +2023-02-07 11:40:09,153 INFO [train.py:901] (2/4) Epoch 27, batch 2950, loss[loss=0.2075, simple_loss=0.2925, pruned_loss=0.06124, over 7975.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05767, over 1617100.29 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:12,524 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 11:40:13,188 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.292e+02 2.734e+02 3.601e+02 6.803e+02, threshold=5.467e+02, percent-clipped=1.0 +2023-02-07 11:40:30,260 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:42,826 INFO [train.py:901] (2/4) Epoch 27, batch 3000, loss[loss=0.1675, simple_loss=0.2481, pruned_loss=0.04348, over 7701.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.05805, over 1613629.45 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:42,826 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 11:40:56,478 INFO [train.py:935] (2/4) Epoch 27, validation: loss=0.171, simple_loss=0.2706, pruned_loss=0.03572, over 944034.00 frames. +2023-02-07 11:40:56,479 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 11:41:08,330 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213172.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:10,343 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213175.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:19,868 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213188.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:25,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:30,817 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 11:41:31,705 INFO [train.py:901] (2/4) Epoch 27, batch 3050, loss[loss=0.1915, simple_loss=0.2898, pruned_loss=0.04656, over 8355.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05815, over 1614695.47 frames. ], batch size: 24, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:41:36,535 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.283e+02 2.877e+02 3.649e+02 6.604e+02, threshold=5.754e+02, percent-clipped=7.0 +2023-02-07 11:41:40,805 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5647, 1.2597, 2.3852, 1.3073, 2.2044, 2.5232, 2.7130, 2.1612], + device='cuda:2'), covar=tensor([0.1019, 0.1438, 0.0433, 0.2077, 0.0724, 0.0391, 0.0750, 0.0616], + device='cuda:2'), in_proj_covar=tensor([0.0306, 0.0328, 0.0293, 0.0321, 0.0321, 0.0279, 0.0438, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 11:42:04,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:42:06,427 INFO [train.py:901] (2/4) Epoch 27, batch 3100, loss[loss=0.2232, simple_loss=0.3017, pruned_loss=0.07229, over 8676.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2844, pruned_loss=0.0588, over 1615510.16 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:29,241 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 11:42:40,164 INFO [train.py:901] (2/4) Epoch 27, batch 3150, loss[loss=0.1754, simple_loss=0.2532, pruned_loss=0.04874, over 7414.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2835, pruned_loss=0.05867, over 1613033.82 frames. ], batch size: 17, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:44,217 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.557e+02 3.186e+02 3.836e+02 1.080e+03, threshold=6.372e+02, percent-clipped=6.0 +2023-02-07 11:42:57,226 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1037, 1.9552, 2.5078, 2.0802, 2.4879, 2.2245, 2.0583, 1.3660], + device='cuda:2'), covar=tensor([0.5751, 0.4978, 0.2048, 0.4094, 0.2724, 0.3329, 0.1988, 0.5579], + device='cuda:2'), in_proj_covar=tensor([0.0965, 0.1016, 0.0828, 0.0989, 0.1022, 0.0927, 0.0770, 0.0850], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 11:43:15,298 INFO [train.py:901] (2/4) Epoch 27, batch 3200, loss[loss=0.2093, simple_loss=0.2961, pruned_loss=0.06122, over 8467.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05883, over 1619352.74 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:38,614 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5937, 4.6243, 4.1304, 2.0761, 4.0512, 4.3195, 4.0896, 4.1210], + device='cuda:2'), covar=tensor([0.0683, 0.0504, 0.1027, 0.4532, 0.0801, 0.0790, 0.1202, 0.0705], + device='cuda:2'), in_proj_covar=tensor([0.0542, 0.0460, 0.0447, 0.0557, 0.0441, 0.0464, 0.0437, 0.0405], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:43:46,356 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2355, 2.1015, 2.6374, 2.2461, 2.6893, 2.3353, 2.1573, 1.5137], + device='cuda:2'), covar=tensor([0.5864, 0.5162, 0.2277, 0.4213, 0.2914, 0.3420, 0.2009, 0.6007], + device='cuda:2'), in_proj_covar=tensor([0.0968, 0.1021, 0.0831, 0.0992, 0.1026, 0.0930, 0.0772, 0.0854], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 11:43:48,885 INFO [train.py:901] (2/4) Epoch 27, batch 3250, loss[loss=0.1786, simple_loss=0.2525, pruned_loss=0.05231, over 7204.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05801, over 1617858.22 frames. ], batch size: 16, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:52,804 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.478e+02 2.885e+02 3.413e+02 5.983e+02, threshold=5.770e+02, percent-clipped=0.0 +2023-02-07 11:44:07,298 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:17,675 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:25,453 INFO [train.py:901] (2/4) Epoch 27, batch 3300, loss[loss=0.1899, simple_loss=0.2744, pruned_loss=0.05272, over 7818.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2821, pruned_loss=0.05772, over 1618504.58 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:44:26,265 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213456.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:35,238 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213469.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:59,357 INFO [train.py:901] (2/4) Epoch 27, batch 3350, loss[loss=0.1908, simple_loss=0.2679, pruned_loss=0.05683, over 6841.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2812, pruned_loss=0.05712, over 1616169.89 frames. ], batch size: 15, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:00,994 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:03,443 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.606e+02 3.102e+02 3.998e+02 8.787e+02, threshold=6.203e+02, percent-clipped=8.0 +2023-02-07 11:45:18,446 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:34,519 INFO [train.py:901] (2/4) Epoch 27, batch 3400, loss[loss=0.1901, simple_loss=0.2816, pruned_loss=0.04926, over 8642.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2812, pruned_loss=0.05715, over 1617164.72 frames. ], batch size: 34, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:54,958 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 11:45:55,324 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=213584.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:46:09,627 INFO [train.py:901] (2/4) Epoch 27, batch 3450, loss[loss=0.2351, simple_loss=0.3176, pruned_loss=0.07624, over 7117.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05769, over 1616187.99 frames. ], batch size: 71, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:13,709 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.297e+02 2.616e+02 3.439e+02 9.820e+02, threshold=5.232e+02, percent-clipped=1.0 +2023-02-07 11:46:13,977 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1997, 2.1037, 2.7426, 2.3199, 2.8123, 2.2634, 2.1446, 1.7142], + device='cuda:2'), covar=tensor([0.6043, 0.5051, 0.2118, 0.3945, 0.2512, 0.3174, 0.1969, 0.5328], + device='cuda:2'), in_proj_covar=tensor([0.0964, 0.1019, 0.0829, 0.0988, 0.1019, 0.0929, 0.0770, 0.0851], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 11:46:44,529 INFO [train.py:901] (2/4) Epoch 27, batch 3500, loss[loss=0.2032, simple_loss=0.2911, pruned_loss=0.05762, over 8248.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2821, pruned_loss=0.05754, over 1615340.34 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:11,028 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 11:47:20,453 INFO [train.py:901] (2/4) Epoch 27, batch 3550, loss[loss=0.2302, simple_loss=0.3141, pruned_loss=0.07318, over 8263.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.0578, over 1616380.68 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:24,356 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.484e+02 3.157e+02 3.893e+02 8.912e+02, threshold=6.313e+02, percent-clipped=7.0 +2023-02-07 11:47:55,118 INFO [train.py:901] (2/4) Epoch 27, batch 3600, loss[loss=0.2225, simple_loss=0.3075, pruned_loss=0.0688, over 8471.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2836, pruned_loss=0.05778, over 1617776.97 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:31,565 INFO [train.py:901] (2/4) Epoch 27, batch 3650, loss[loss=0.2243, simple_loss=0.3072, pruned_loss=0.0707, over 8636.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.284, pruned_loss=0.05808, over 1621545.61 frames. ], batch size: 34, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:35,649 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.435e+02 3.005e+02 4.000e+02 1.001e+03, threshold=6.009e+02, percent-clipped=1.0 +2023-02-07 11:49:05,228 INFO [train.py:901] (2/4) Epoch 27, batch 3700, loss[loss=0.1753, simple_loss=0.2758, pruned_loss=0.03742, over 8629.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2839, pruned_loss=0.05782, over 1618331.01 frames. ], batch size: 39, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:11,355 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 11:49:27,322 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 11:49:40,755 INFO [train.py:901] (2/4) Epoch 27, batch 3750, loss[loss=0.183, simple_loss=0.2497, pruned_loss=0.05813, over 7703.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2839, pruned_loss=0.05793, over 1621981.80 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:44,676 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.240e+02 2.670e+02 3.453e+02 6.024e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 11:49:57,384 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:49:58,184 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1473, 2.2716, 2.2624, 1.6912, 2.3860, 1.8535, 1.7053, 2.1330], + device='cuda:2'), covar=tensor([0.0624, 0.0379, 0.0307, 0.0596, 0.0437, 0.0657, 0.0791, 0.0417], + device='cuda:2'), in_proj_covar=tensor([0.0470, 0.0406, 0.0361, 0.0457, 0.0392, 0.0546, 0.0402, 0.0437], + device='cuda:2'), out_proj_covar=tensor([1.2462e-04, 1.0556e-04, 9.4331e-05, 1.1968e-04, 1.0257e-04, 1.5229e-04, + 1.0730e-04, 1.1460e-04], device='cuda:2') +2023-02-07 11:50:15,110 INFO [train.py:901] (2/4) Epoch 27, batch 3800, loss[loss=0.2044, simple_loss=0.2909, pruned_loss=0.05898, over 8294.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2819, pruned_loss=0.05733, over 1615547.34 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:29,642 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 11:50:51,696 INFO [train.py:901] (2/4) Epoch 27, batch 3850, loss[loss=0.2373, simple_loss=0.3195, pruned_loss=0.07759, over 8348.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2815, pruned_loss=0.05739, over 1613065.09 frames. ], batch size: 26, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:55,678 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.325e+02 2.987e+02 3.815e+02 9.366e+02, threshold=5.974e+02, percent-clipped=6.0 +2023-02-07 11:51:19,434 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:51:21,264 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 11:51:27,207 INFO [train.py:901] (2/4) Epoch 27, batch 3900, loss[loss=0.2051, simple_loss=0.2951, pruned_loss=0.05754, over 8464.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2817, pruned_loss=0.05766, over 1612418.05 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:00,516 INFO [train.py:901] (2/4) Epoch 27, batch 3950, loss[loss=0.1887, simple_loss=0.2716, pruned_loss=0.0529, over 7813.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05753, over 1612456.24 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:04,374 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.377e+02 2.717e+02 3.364e+02 5.097e+02, threshold=5.435e+02, percent-clipped=0.0 +2023-02-07 11:52:30,737 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:52:36,016 INFO [train.py:901] (2/4) Epoch 27, batch 4000, loss[loss=0.1739, simple_loss=0.2601, pruned_loss=0.04391, over 8228.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2825, pruned_loss=0.05746, over 1614910.80 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:55,322 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6670, 2.0747, 3.2261, 1.5434, 2.3825, 2.1721, 1.7277, 2.5080], + device='cuda:2'), covar=tensor([0.1850, 0.2805, 0.0881, 0.4691, 0.1935, 0.3098, 0.2512, 0.2264], + device='cuda:2'), in_proj_covar=tensor([0.0535, 0.0630, 0.0562, 0.0665, 0.0656, 0.0604, 0.0559, 0.0642], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:53:10,789 INFO [train.py:901] (2/4) Epoch 27, batch 4050, loss[loss=0.1818, simple_loss=0.2541, pruned_loss=0.05473, over 7695.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05781, over 1615276.81 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:14,930 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.379e+02 2.958e+02 3.648e+02 7.596e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 11:53:31,866 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:53:45,411 INFO [train.py:901] (2/4) Epoch 27, batch 4100, loss[loss=0.2177, simple_loss=0.3005, pruned_loss=0.06742, over 8625.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05745, over 1613919.68 frames. ], batch size: 34, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:00,512 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 11:54:17,261 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:21,105 INFO [train.py:901] (2/4) Epoch 27, batch 4150, loss[loss=0.2135, simple_loss=0.306, pruned_loss=0.06046, over 8363.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.282, pruned_loss=0.05717, over 1612348.70 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:23,224 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3890, 4.3736, 3.9709, 1.9887, 3.8764, 4.1308, 3.9201, 3.8920], + device='cuda:2'), covar=tensor([0.0702, 0.0512, 0.0950, 0.4418, 0.0831, 0.0798, 0.1228, 0.0727], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0459, 0.0446, 0.0555, 0.0441, 0.0465, 0.0439, 0.0405], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:54:25,184 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.522e+02 2.957e+02 3.518e+02 6.524e+02, threshold=5.913e+02, percent-clipped=2.0 +2023-02-07 11:54:34,234 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:55,413 INFO [train.py:901] (2/4) Epoch 27, batch 4200, loss[loss=0.1859, simple_loss=0.2719, pruned_loss=0.04996, over 8084.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2829, pruned_loss=0.05717, over 1614243.18 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:56,403 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-07 11:55:10,584 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-07 11:55:15,716 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 11:55:30,769 INFO [train.py:901] (2/4) Epoch 27, batch 4250, loss[loss=0.2027, simple_loss=0.2966, pruned_loss=0.05442, over 8446.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2833, pruned_loss=0.0576, over 1615812.75 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:34,789 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.351e+02 2.930e+02 3.605e+02 8.966e+02, threshold=5.860e+02, percent-clipped=4.0 +2023-02-07 11:55:40,122 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 11:56:04,677 INFO [train.py:901] (2/4) Epoch 27, batch 4300, loss[loss=0.2101, simple_loss=0.2975, pruned_loss=0.06132, over 8464.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.283, pruned_loss=0.05771, over 1615254.13 frames. ], batch size: 27, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:09,564 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-07 11:56:15,639 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.08 vs. limit=5.0 +2023-02-07 11:56:18,079 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1388, 1.3791, 1.6689, 1.3324, 0.7448, 1.4613, 1.2191, 1.0769], + device='cuda:2'), covar=tensor([0.0655, 0.1211, 0.1645, 0.1402, 0.0547, 0.1391, 0.0691, 0.0710], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0101, 0.0164, 0.0112, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 11:56:29,278 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:56:40,081 INFO [train.py:901] (2/4) Epoch 27, batch 4350, loss[loss=0.2541, simple_loss=0.3248, pruned_loss=0.09168, over 6652.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05878, over 1609302.35 frames. ], batch size: 71, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:44,881 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.457e+02 2.989e+02 4.041e+02 8.697e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-07 11:57:11,489 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 11:57:14,883 INFO [train.py:901] (2/4) Epoch 27, batch 4400, loss[loss=0.1954, simple_loss=0.2742, pruned_loss=0.05829, over 8232.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05778, over 1608648.17 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:17,080 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2366, 2.4797, 2.5828, 1.5642, 3.0376, 1.8350, 1.5139, 2.3796], + device='cuda:2'), covar=tensor([0.0968, 0.0521, 0.0457, 0.0887, 0.0521, 0.1020, 0.1038, 0.0559], + device='cuda:2'), in_proj_covar=tensor([0.0473, 0.0411, 0.0364, 0.0461, 0.0395, 0.0550, 0.0403, 0.0439], + device='cuda:2'), out_proj_covar=tensor([1.2533e-04, 1.0673e-04, 9.4917e-05, 1.2070e-04, 1.0355e-04, 1.5371e-04, + 1.0771e-04, 1.1520e-04], device='cuda:2') +2023-02-07 11:57:32,006 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:46,529 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.17 vs. limit=5.0 +2023-02-07 11:57:49,419 INFO [train.py:901] (2/4) Epoch 27, batch 4450, loss[loss=0.3096, simple_loss=0.3648, pruned_loss=0.1272, over 6955.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.05728, over 1608460.47 frames. ], batch size: 72, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:50,290 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:51,441 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 11:57:52,862 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0423, 2.0580, 1.8638, 2.6079, 1.3199, 1.6344, 2.0220, 2.2306], + device='cuda:2'), covar=tensor([0.0676, 0.0742, 0.0825, 0.0429, 0.1074, 0.1258, 0.0772, 0.0701], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0211, 0.0202, 0.0245, 0.0247, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 11:57:53,336 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.435e+02 2.910e+02 3.675e+02 1.096e+03, threshold=5.821e+02, percent-clipped=3.0 +2023-02-07 11:58:25,056 INFO [train.py:901] (2/4) Epoch 27, batch 4500, loss[loss=0.1708, simple_loss=0.2488, pruned_loss=0.04643, over 7817.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05693, over 1612223.63 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:58:44,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8112, 2.5451, 4.1421, 1.6871, 3.1572, 2.4126, 2.0125, 3.1676], + device='cuda:2'), covar=tensor([0.1971, 0.2642, 0.0934, 0.4716, 0.1862, 0.3271, 0.2520, 0.2353], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0632, 0.0563, 0.0667, 0.0658, 0.0607, 0.0562, 0.0643], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 11:58:49,087 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 11:58:51,959 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:58:58,564 INFO [train.py:901] (2/4) Epoch 27, batch 4550, loss[loss=0.1769, simple_loss=0.272, pruned_loss=0.04094, over 8698.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05654, over 1617903.50 frames. ], batch size: 39, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:59:03,198 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.259e+02 2.795e+02 3.667e+02 7.490e+02, threshold=5.591e+02, percent-clipped=6.0 +2023-02-07 11:59:34,572 INFO [train.py:901] (2/4) Epoch 27, batch 4600, loss[loss=0.2267, simple_loss=0.3051, pruned_loss=0.07418, over 6811.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.0577, over 1614639.12 frames. ], batch size: 72, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:07,854 INFO [train.py:901] (2/4) Epoch 27, batch 4650, loss[loss=0.1688, simple_loss=0.2553, pruned_loss=0.04111, over 8076.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05764, over 1616169.34 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:11,913 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.452e+02 3.083e+02 3.974e+02 1.018e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-07 12:00:20,129 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0856, 1.8444, 2.3300, 2.0214, 2.3324, 2.1772, 2.0024, 1.2040], + device='cuda:2'), covar=tensor([0.5902, 0.5095, 0.2003, 0.4092, 0.2514, 0.3377, 0.1932, 0.5310], + device='cuda:2'), in_proj_covar=tensor([0.0960, 0.1013, 0.0824, 0.0985, 0.1017, 0.0923, 0.0766, 0.0845], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 12:00:43,652 INFO [train.py:901] (2/4) Epoch 27, batch 4700, loss[loss=0.2098, simple_loss=0.2797, pruned_loss=0.06996, over 7661.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05804, over 1615804.10 frames. ], batch size: 19, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:48,565 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:05,467 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:17,192 INFO [train.py:901] (2/4) Epoch 27, batch 4750, loss[loss=0.2318, simple_loss=0.2926, pruned_loss=0.08552, over 7682.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2826, pruned_loss=0.05797, over 1612227.19 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:01:21,117 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.444e+02 3.016e+02 3.790e+02 1.117e+03, threshold=6.032e+02, percent-clipped=6.0 +2023-02-07 12:01:42,264 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 12:01:45,072 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 12:01:49,024 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:51,303 INFO [train.py:901] (2/4) Epoch 27, batch 4800, loss[loss=0.2722, simple_loss=0.3288, pruned_loss=0.1078, over 7159.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2828, pruned_loss=0.05809, over 1608773.35 frames. ], batch size: 71, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:07,902 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:02:27,023 INFO [train.py:901] (2/4) Epoch 27, batch 4850, loss[loss=0.268, simple_loss=0.3453, pruned_loss=0.09533, over 8827.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05847, over 1613132.10 frames. ], batch size: 40, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:31,200 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.294e+02 2.705e+02 3.274e+02 6.085e+02, threshold=5.409e+02, percent-clipped=1.0 +2023-02-07 12:02:36,648 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 12:02:47,194 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 12:02:54,782 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-07 12:03:01,819 INFO [train.py:901] (2/4) Epoch 27, batch 4900, loss[loss=0.18, simple_loss=0.2622, pruned_loss=0.04892, over 7701.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05877, over 1613888.42 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:37,410 INFO [train.py:901] (2/4) Epoch 27, batch 4950, loss[loss=0.2053, simple_loss=0.2847, pruned_loss=0.06299, over 8344.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05917, over 1608341.53 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:41,314 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.323e+02 2.858e+02 3.502e+02 9.819e+02, threshold=5.716e+02, percent-clipped=5.0 +2023-02-07 12:04:10,551 INFO [train.py:901] (2/4) Epoch 27, batch 5000, loss[loss=0.1932, simple_loss=0.2627, pruned_loss=0.06184, over 7704.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2848, pruned_loss=0.05959, over 1600656.53 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:04:47,112 INFO [train.py:901] (2/4) Epoch 27, batch 5050, loss[loss=0.2681, simple_loss=0.3412, pruned_loss=0.09746, over 8776.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2844, pruned_loss=0.05921, over 1601842.42 frames. ], batch size: 30, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:04:50,989 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.415e+02 2.920e+02 3.667e+02 5.760e+02, threshold=5.840e+02, percent-clipped=1.0 +2023-02-07 12:04:53,741 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2886, 1.7647, 1.2730, 2.8102, 1.2619, 1.1881, 2.0103, 1.8334], + device='cuda:2'), covar=tensor([0.1592, 0.1244, 0.1997, 0.0392, 0.1305, 0.2179, 0.0900, 0.1021], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0243, 0.0212, 0.0203, 0.0246, 0.0249, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 12:05:10,151 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 12:05:15,550 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215248.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:05:20,039 INFO [train.py:901] (2/4) Epoch 27, batch 5100, loss[loss=0.2245, simple_loss=0.3126, pruned_loss=0.06822, over 8358.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2861, pruned_loss=0.06026, over 1602816.90 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:23,562 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0336, 2.1897, 1.9290, 2.8026, 1.4283, 1.6962, 2.1133, 2.2347], + device='cuda:2'), covar=tensor([0.0708, 0.0704, 0.0774, 0.0357, 0.0923, 0.1183, 0.0634, 0.0682], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0243, 0.0212, 0.0203, 0.0246, 0.0249, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 12:05:54,329 INFO [train.py:901] (2/4) Epoch 27, batch 5150, loss[loss=0.1816, simple_loss=0.2612, pruned_loss=0.05094, over 7660.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2842, pruned_loss=0.0593, over 1599328.96 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:59,266 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.449e+02 2.868e+02 3.492e+02 6.640e+02, threshold=5.736e+02, percent-clipped=1.0 +2023-02-07 12:06:12,726 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 12:06:22,709 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215343.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:06:28,150 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215351.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:06:30,706 INFO [train.py:901] (2/4) Epoch 27, batch 5200, loss[loss=0.2047, simple_loss=0.2947, pruned_loss=0.0573, over 8354.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.284, pruned_loss=0.05916, over 1604820.75 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:06:59,160 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-07 12:07:05,197 INFO [train.py:901] (2/4) Epoch 27, batch 5250, loss[loss=0.1714, simple_loss=0.2452, pruned_loss=0.04877, over 7443.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2832, pruned_loss=0.05876, over 1606248.93 frames. ], batch size: 17, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:09,821 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.358e+02 2.790e+02 3.638e+02 8.125e+02, threshold=5.579e+02, percent-clipped=3.0 +2023-02-07 12:07:12,598 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 12:07:40,340 INFO [train.py:901] (2/4) Epoch 27, batch 5300, loss[loss=0.2035, simple_loss=0.2811, pruned_loss=0.06295, over 7917.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05835, over 1612805.84 frames. ], batch size: 20, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:13,769 INFO [train.py:901] (2/4) Epoch 27, batch 5350, loss[loss=0.2129, simple_loss=0.3076, pruned_loss=0.05908, over 8365.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2836, pruned_loss=0.0585, over 1615319.32 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:18,669 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 2.455e+02 2.847e+02 3.988e+02 1.267e+03, threshold=5.693e+02, percent-clipped=12.0 +2023-02-07 12:08:24,722 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 12:08:25,711 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:08:48,885 INFO [train.py:901] (2/4) Epoch 27, batch 5400, loss[loss=0.1888, simple_loss=0.2814, pruned_loss=0.04813, over 8751.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05798, over 1615706.49 frames. ], batch size: 34, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:57,739 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215566.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:08,913 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:09:14,843 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215592.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:23,343 INFO [train.py:901] (2/4) Epoch 27, batch 5450, loss[loss=0.1957, simple_loss=0.2908, pruned_loss=0.05028, over 8510.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05766, over 1613357.14 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:27,901 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.540e+02 3.136e+02 3.819e+02 8.555e+02, threshold=6.272e+02, percent-clipped=5.0 +2023-02-07 12:09:56,882 INFO [train.py:901] (2/4) Epoch 27, batch 5500, loss[loss=0.1761, simple_loss=0.2497, pruned_loss=0.05126, over 7274.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2834, pruned_loss=0.0587, over 1615611.51 frames. ], batch size: 16, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:56,921 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 12:10:05,866 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6648, 1.7937, 5.8576, 2.1259, 5.2526, 4.9386, 5.3340, 5.2521], + device='cuda:2'), covar=tensor([0.0570, 0.4553, 0.0384, 0.4016, 0.0949, 0.0818, 0.0539, 0.0534], + device='cuda:2'), in_proj_covar=tensor([0.0674, 0.0664, 0.0733, 0.0654, 0.0741, 0.0632, 0.0633, 0.0711], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 12:10:20,606 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215687.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:10:25,967 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:10:32,543 INFO [train.py:901] (2/4) Epoch 27, batch 5550, loss[loss=0.1861, simple_loss=0.2785, pruned_loss=0.04689, over 8740.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2825, pruned_loss=0.05844, over 1612877.51 frames. ], batch size: 30, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:10:34,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215707.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:10:37,243 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.445e+02 2.973e+02 3.969e+02 8.778e+02, threshold=5.947e+02, percent-clipped=4.0 +2023-02-07 12:10:37,654 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-07 12:10:52,200 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7646, 1.5389, 3.1770, 1.4752, 2.3274, 3.3952, 3.5014, 2.9367], + device='cuda:2'), covar=tensor([0.1245, 0.1714, 0.0327, 0.2103, 0.0952, 0.0254, 0.0669, 0.0527], + device='cuda:2'), in_proj_covar=tensor([0.0306, 0.0325, 0.0291, 0.0320, 0.0321, 0.0277, 0.0437, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:11:06,807 INFO [train.py:901] (2/4) Epoch 27, batch 5600, loss[loss=0.1755, simple_loss=0.2509, pruned_loss=0.05007, over 7226.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2828, pruned_loss=0.05844, over 1611184.08 frames. ], batch size: 16, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:40,023 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:11:41,824 INFO [train.py:901] (2/4) Epoch 27, batch 5650, loss[loss=0.1952, simple_loss=0.2668, pruned_loss=0.06175, over 7808.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2834, pruned_loss=0.05868, over 1613886.74 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:42,241 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 12:11:46,137 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:11:47,203 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.361e+02 2.799e+02 3.308e+02 5.877e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 12:12:05,002 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 12:12:11,226 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:16,647 INFO [train.py:901] (2/4) Epoch 27, batch 5700, loss[loss=0.203, simple_loss=0.283, pruned_loss=0.06151, over 8522.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05878, over 1612256.52 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:23,685 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:51,905 INFO [train.py:901] (2/4) Epoch 27, batch 5750, loss[loss=0.1966, simple_loss=0.2845, pruned_loss=0.05437, over 8565.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2825, pruned_loss=0.05817, over 1606207.09 frames. ], batch size: 31, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:56,019 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215910.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:12:57,194 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.395e+02 2.899e+02 3.864e+02 7.116e+02, threshold=5.798e+02, percent-clipped=7.0 +2023-02-07 12:13:07,809 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:10,342 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 12:13:26,396 INFO [train.py:901] (2/4) Epoch 27, batch 5800, loss[loss=0.2123, simple_loss=0.2831, pruned_loss=0.07072, over 7205.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2828, pruned_loss=0.05863, over 1606351.21 frames. ], batch size: 16, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:13:31,935 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=215963.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:13:32,842 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 12:13:40,545 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1721, 1.6426, 1.6776, 1.4672, 1.0529, 1.5035, 1.7880, 1.7204], + device='cuda:2'), covar=tensor([0.0563, 0.1180, 0.1629, 0.1446, 0.0619, 0.1495, 0.0744, 0.0631], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0152, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 12:13:43,178 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215980.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:47,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6940, 1.6367, 2.3268, 1.2768, 1.1974, 2.3738, 0.3921, 1.3701], + device='cuda:2'), covar=tensor([0.1723, 0.1161, 0.0391, 0.1376, 0.2471, 0.0409, 0.1888, 0.1347], + device='cuda:2'), in_proj_covar=tensor([0.0199, 0.0203, 0.0135, 0.0222, 0.0275, 0.0145, 0.0170, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 12:13:48,929 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=215988.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:01,068 INFO [train.py:901] (2/4) Epoch 27, batch 5850, loss[loss=0.1571, simple_loss=0.2332, pruned_loss=0.04053, over 7524.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2833, pruned_loss=0.05824, over 1607280.72 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:05,651 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.428e+02 2.871e+02 3.760e+02 7.078e+02, threshold=5.742e+02, percent-clipped=9.0 +2023-02-07 12:14:15,358 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216025.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:27,446 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:30,139 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:36,738 INFO [train.py:901] (2/4) Epoch 27, batch 5900, loss[loss=0.2024, simple_loss=0.2777, pruned_loss=0.06359, over 7963.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.05845, over 1606322.00 frames. ], batch size: 21, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:38,967 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216058.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:44,154 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:53,613 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 12:14:55,481 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216083.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:15:00,978 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:15:10,147 INFO [train.py:901] (2/4) Epoch 27, batch 5950, loss[loss=0.1771, simple_loss=0.2604, pruned_loss=0.04693, over 7780.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05803, over 1605663.08 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:15,769 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.411e+02 2.864e+02 3.625e+02 8.908e+02, threshold=5.728e+02, percent-clipped=5.0 +2023-02-07 12:15:46,872 INFO [train.py:901] (2/4) Epoch 27, batch 6000, loss[loss=0.1937, simple_loss=0.294, pruned_loss=0.04676, over 8534.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.0587, over 1600053.66 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:46,873 INFO [train.py:926] (2/4) Computing validation loss +2023-02-07 12:15:59,961 INFO [train.py:935] (2/4) Epoch 27, validation: loss=0.1711, simple_loss=0.2711, pruned_loss=0.03554, over 944034.00 frames. +2023-02-07 12:15:59,962 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6789MB +2023-02-07 12:16:25,736 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:16:35,311 INFO [train.py:901] (2/4) Epoch 27, batch 6050, loss[loss=0.1946, simple_loss=0.2924, pruned_loss=0.04841, over 8527.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05836, over 1603854.92 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:16:40,126 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.565e+02 3.207e+02 4.227e+02 9.285e+02, threshold=6.415e+02, percent-clipped=9.0 +2023-02-07 12:16:56,671 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:09,862 INFO [train.py:901] (2/4) Epoch 27, batch 6100, loss[loss=0.1949, simple_loss=0.2844, pruned_loss=0.05268, over 8461.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2834, pruned_loss=0.05833, over 1600886.96 frames. ], batch size: 39, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:14,015 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:28,455 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216281.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:17:39,621 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:44,965 INFO [train.py:901] (2/4) Epoch 27, batch 6150, loss[loss=0.201, simple_loss=0.2933, pruned_loss=0.05436, over 8579.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05783, over 1604102.04 frames. ], batch size: 31, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:44,980 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 12:17:45,827 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216306.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:45,850 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216306.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:17:49,762 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.311e+02 2.985e+02 4.036e+02 8.594e+02, threshold=5.970e+02, percent-clipped=2.0 +2023-02-07 12:17:57,192 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216323.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:18,513 INFO [train.py:901] (2/4) Epoch 27, batch 6200, loss[loss=0.1991, simple_loss=0.2575, pruned_loss=0.07032, over 7707.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05848, over 1604092.00 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:28,740 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 12:18:42,476 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:45,887 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5504, 1.4924, 1.8220, 1.2627, 1.2519, 1.8385, 0.2556, 1.2399], + device='cuda:2'), covar=tensor([0.1560, 0.1229, 0.0442, 0.0849, 0.2253, 0.0450, 0.1830, 0.1198], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0205, 0.0136, 0.0222, 0.0275, 0.0145, 0.0171, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 12:18:53,897 INFO [train.py:901] (2/4) Epoch 27, batch 6250, loss[loss=0.1978, simple_loss=0.2777, pruned_loss=0.05893, over 8035.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2824, pruned_loss=0.05836, over 1604751.72 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:58,452 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.470e+02 2.901e+02 3.405e+02 7.374e+02, threshold=5.803e+02, percent-clipped=1.0 +2023-02-07 12:19:27,775 INFO [train.py:901] (2/4) Epoch 27, batch 6300, loss[loss=0.1892, simple_loss=0.2853, pruned_loss=0.04652, over 8372.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05818, over 1609013.10 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:19:54,184 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6427, 1.7611, 2.0092, 1.6598, 1.1999, 1.7759, 2.3399, 1.9412], + device='cuda:2'), covar=tensor([0.0489, 0.1196, 0.1623, 0.1397, 0.0578, 0.1454, 0.0623, 0.0636], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0152, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 12:19:56,913 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.18 vs. limit=5.0 +2023-02-07 12:20:01,964 INFO [train.py:901] (2/4) Epoch 27, batch 6350, loss[loss=0.2432, simple_loss=0.3116, pruned_loss=0.08743, over 8444.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05832, over 1610481.62 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:02,156 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:07,863 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.511e+02 2.994e+02 4.018e+02 7.521e+02, threshold=5.987e+02, percent-clipped=5.0 +2023-02-07 12:20:23,600 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-07 12:20:30,861 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2790, 1.6979, 4.5983, 1.9956, 2.6238, 5.1436, 5.2682, 4.4625], + device='cuda:2'), covar=tensor([0.1236, 0.1823, 0.0225, 0.1969, 0.1013, 0.0157, 0.0566, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0325, 0.0291, 0.0318, 0.0321, 0.0276, 0.0437, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:20:36,660 INFO [train.py:901] (2/4) Epoch 27, batch 6400, loss[loss=0.1993, simple_loss=0.2872, pruned_loss=0.05571, over 8540.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.05871, over 1609262.23 frames. ], batch size: 39, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:41,612 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:53,520 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0527, 2.1655, 1.9895, 2.9651, 1.3978, 1.6519, 2.1851, 2.2349], + device='cuda:2'), covar=tensor([0.0764, 0.0791, 0.0794, 0.0314, 0.1039, 0.1278, 0.0789, 0.0782], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0243, 0.0211, 0.0203, 0.0245, 0.0248, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-07 12:20:58,196 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:10,669 INFO [train.py:901] (2/4) Epoch 27, batch 6450, loss[loss=0.1828, simple_loss=0.2707, pruned_loss=0.0475, over 7645.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2822, pruned_loss=0.05793, over 1610926.13 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:13,554 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:16,162 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.469e+02 2.882e+02 3.609e+02 7.919e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 12:21:46,015 INFO [train.py:901] (2/4) Epoch 27, batch 6500, loss[loss=0.1845, simple_loss=0.2701, pruned_loss=0.04938, over 8078.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05836, over 1613014.27 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:58,517 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216673.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:22:19,973 INFO [train.py:901] (2/4) Epoch 27, batch 6550, loss[loss=0.2397, simple_loss=0.3148, pruned_loss=0.08228, over 8192.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2819, pruned_loss=0.05821, over 1611587.14 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:25,118 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.544e+02 2.876e+02 3.743e+02 6.730e+02, threshold=5.752e+02, percent-clipped=5.0 +2023-02-07 12:22:32,762 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:22:48,281 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.97 vs. limit=5.0 +2023-02-07 12:22:52,673 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9658, 1.5147, 1.7150, 1.4521, 1.0518, 1.4654, 1.7742, 1.5415], + device='cuda:2'), covar=tensor([0.0583, 0.1314, 0.1695, 0.1483, 0.0621, 0.1547, 0.0728, 0.0674], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0113, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 12:22:54,583 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 12:22:55,719 INFO [train.py:901] (2/4) Epoch 27, batch 6600, loss[loss=0.2858, simple_loss=0.3548, pruned_loss=0.1084, over 8464.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05825, over 1615227.13 frames. ], batch size: 29, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:59,346 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2623, 2.0605, 2.6827, 2.2340, 2.6531, 2.3124, 2.1308, 1.5074], + device='cuda:2'), covar=tensor([0.5792, 0.5447, 0.2142, 0.3817, 0.2611, 0.3295, 0.1932, 0.5702], + device='cuda:2'), in_proj_covar=tensor([0.0966, 0.1021, 0.0832, 0.0992, 0.1029, 0.0930, 0.0773, 0.0852], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 12:23:00,003 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:12,957 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 12:23:17,025 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:29,548 INFO [train.py:901] (2/4) Epoch 27, batch 6650, loss[loss=0.1817, simple_loss=0.2697, pruned_loss=0.04681, over 8187.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2822, pruned_loss=0.05804, over 1614579.45 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:23:34,796 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.571e+02 3.099e+02 3.859e+02 9.745e+02, threshold=6.199e+02, percent-clipped=7.0 +2023-02-07 12:24:03,776 INFO [train.py:901] (2/4) Epoch 27, batch 6700, loss[loss=0.2423, simple_loss=0.3101, pruned_loss=0.08726, over 8504.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05831, over 1615693.34 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:38,627 INFO [train.py:901] (2/4) Epoch 27, batch 6750, loss[loss=0.2026, simple_loss=0.2749, pruned_loss=0.06519, over 7781.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2842, pruned_loss=0.05891, over 1620436.02 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:42,858 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1276, 2.4173, 2.5823, 1.5645, 2.8743, 1.7417, 1.5572, 2.2161], + device='cuda:2'), covar=tensor([0.0909, 0.0468, 0.0399, 0.0912, 0.0487, 0.0998, 0.1049, 0.0602], + device='cuda:2'), in_proj_covar=tensor([0.0470, 0.0409, 0.0362, 0.0457, 0.0393, 0.0552, 0.0401, 0.0438], + device='cuda:2'), out_proj_covar=tensor([1.2446e-04, 1.0608e-04, 9.4260e-05, 1.1943e-04, 1.0299e-04, 1.5394e-04, + 1.0712e-04, 1.1497e-04], device='cuda:2') +2023-02-07 12:24:43,899 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.479e+02 3.006e+02 3.687e+02 6.813e+02, threshold=6.012e+02, percent-clipped=1.0 +2023-02-07 12:25:05,485 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-07 12:25:11,169 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216953.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:12,353 INFO [train.py:901] (2/4) Epoch 27, batch 6800, loss[loss=0.1904, simple_loss=0.2838, pruned_loss=0.04853, over 7913.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2852, pruned_loss=0.05908, over 1624280.75 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:24,096 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 12:25:32,325 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:35,736 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 12:25:47,373 INFO [train.py:901] (2/4) Epoch 27, batch 6850, loss[loss=0.2465, simple_loss=0.3138, pruned_loss=0.08959, over 7977.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2852, pruned_loss=0.05928, over 1621012.21 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:52,582 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.408e+02 3.097e+02 3.751e+02 9.876e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-07 12:25:55,361 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217017.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:26:11,011 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 12:26:21,085 INFO [train.py:901] (2/4) Epoch 27, batch 6900, loss[loss=0.2029, simple_loss=0.2737, pruned_loss=0.06599, over 7788.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.0593, over 1614389.03 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:26:29,751 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:30,513 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:56,816 INFO [train.py:901] (2/4) Epoch 27, batch 6950, loss[loss=0.156, simple_loss=0.2306, pruned_loss=0.04066, over 7717.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.05899, over 1616603.18 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:02,040 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.303e+02 2.670e+02 3.410e+02 6.861e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 12:27:15,659 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217132.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:27:19,475 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 12:27:30,833 INFO [train.py:901] (2/4) Epoch 27, batch 7000, loss[loss=0.1769, simple_loss=0.2755, pruned_loss=0.03919, over 8336.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2829, pruned_loss=0.05838, over 1610804.23 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:49,474 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:28:05,137 INFO [train.py:901] (2/4) Epoch 27, batch 7050, loss[loss=0.2149, simple_loss=0.2868, pruned_loss=0.07147, over 8188.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05852, over 1607767.08 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:28:11,287 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.356e+02 3.046e+02 3.591e+02 8.726e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 12:28:19,539 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8666, 2.0885, 2.1890, 1.3235, 2.3442, 1.6562, 0.7879, 1.9979], + device='cuda:2'), covar=tensor([0.0731, 0.0469, 0.0334, 0.0744, 0.0442, 0.1044, 0.1074, 0.0388], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0408, 0.0363, 0.0458, 0.0393, 0.0550, 0.0401, 0.0439], + device='cuda:2'), out_proj_covar=tensor([1.2496e-04, 1.0575e-04, 9.4613e-05, 1.1965e-04, 1.0285e-04, 1.5345e-04, + 1.0704e-04, 1.1515e-04], device='cuda:2') +2023-02-07 12:28:40,066 INFO [train.py:901] (2/4) Epoch 27, batch 7100, loss[loss=0.2253, simple_loss=0.3006, pruned_loss=0.07503, over 8462.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.05804, over 1607580.68 frames. ], batch size: 27, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:14,491 INFO [train.py:901] (2/4) Epoch 27, batch 7150, loss[loss=0.1901, simple_loss=0.2754, pruned_loss=0.05237, over 7972.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.0577, over 1610873.58 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:18,601 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5246, 2.0023, 3.4181, 1.5097, 1.4826, 3.3994, 0.7243, 1.9824], + device='cuda:2'), covar=tensor([0.1588, 0.1385, 0.0317, 0.1930, 0.2618, 0.0415, 0.1948, 0.1461], + device='cuda:2'), in_proj_covar=tensor([0.0199, 0.0204, 0.0136, 0.0222, 0.0274, 0.0146, 0.0171, 0.0197], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 12:29:19,666 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.444e+02 3.123e+02 4.113e+02 1.134e+03, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 12:29:27,891 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:29,713 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:45,931 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:49,818 INFO [train.py:901] (2/4) Epoch 27, batch 7200, loss[loss=0.2097, simple_loss=0.3106, pruned_loss=0.05441, over 8325.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2834, pruned_loss=0.05772, over 1618340.88 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:50,710 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2177, 3.4381, 2.1467, 2.9346, 2.9441, 1.8756, 2.9450, 2.9430], + device='cuda:2'), covar=tensor([0.1686, 0.0410, 0.1304, 0.0770, 0.0724, 0.1606, 0.0988, 0.1170], + device='cuda:2'), in_proj_covar=tensor([0.0363, 0.0244, 0.0345, 0.0316, 0.0306, 0.0350, 0.0353, 0.0326], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-07 12:30:11,979 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217388.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:30:23,036 INFO [train.py:901] (2/4) Epoch 27, batch 7250, loss[loss=0.2142, simple_loss=0.2735, pruned_loss=0.07745, over 7254.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2837, pruned_loss=0.05747, over 1617038.80 frames. ], batch size: 16, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:23,855 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:28,402 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.296e+02 2.784e+02 3.610e+02 7.832e+02, threshold=5.568e+02, percent-clipped=2.0 +2023-02-07 12:30:28,617 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217413.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:30:33,321 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1955, 1.0299, 1.2979, 1.0271, 0.9839, 1.3109, 0.1357, 0.9221], + device='cuda:2'), covar=tensor([0.1543, 0.1321, 0.0471, 0.0672, 0.2286, 0.0511, 0.1875, 0.1122], + device='cuda:2'), in_proj_covar=tensor([0.0199, 0.0205, 0.0136, 0.0223, 0.0275, 0.0146, 0.0171, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-07 12:30:36,047 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1757, 1.2624, 1.5405, 1.2184, 0.7392, 1.3678, 1.0937, 0.9476], + device='cuda:2'), covar=tensor([0.0652, 0.1216, 0.1668, 0.1550, 0.0601, 0.1417, 0.0757, 0.0742], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 12:30:45,934 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:49,266 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:58,439 INFO [train.py:901] (2/4) Epoch 27, batch 7300, loss[loss=0.1816, simple_loss=0.2672, pruned_loss=0.048, over 7979.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2841, pruned_loss=0.05795, over 1617872.08 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:04,044 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:20,395 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3411, 1.5580, 4.6308, 2.1497, 2.5144, 5.1828, 5.2731, 4.5456], + device='cuda:2'), covar=tensor([0.1183, 0.1954, 0.0215, 0.1818, 0.1176, 0.0167, 0.0394, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0307, 0.0324, 0.0292, 0.0318, 0.0322, 0.0277, 0.0439, 0.0307], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:31:33,102 INFO [train.py:901] (2/4) Epoch 27, batch 7350, loss[loss=0.2026, simple_loss=0.299, pruned_loss=0.05314, over 8331.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2841, pruned_loss=0.05778, over 1617837.72 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:36,629 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:38,492 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.322e+02 2.888e+02 3.768e+02 6.651e+02, threshold=5.777e+02, percent-clipped=4.0 +2023-02-07 12:31:59,836 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 12:32:07,059 INFO [train.py:901] (2/4) Epoch 27, batch 7400, loss[loss=0.1913, simple_loss=0.2686, pruned_loss=0.05697, over 7415.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2832, pruned_loss=0.05764, over 1614059.95 frames. ], batch size: 17, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:19,141 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 12:32:23,953 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1082, 1.5250, 1.7825, 1.4213, 1.1722, 1.5766, 1.8212, 1.4917], + device='cuda:2'), covar=tensor([0.0481, 0.1242, 0.1593, 0.1450, 0.0553, 0.1425, 0.0641, 0.0673], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0152, 0.0189, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-07 12:32:28,669 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4893, 1.9530, 3.1006, 1.4129, 2.3008, 1.9751, 1.6583, 2.3380], + device='cuda:2'), covar=tensor([0.2226, 0.2934, 0.0842, 0.5239, 0.2074, 0.3448, 0.2821, 0.2386], + device='cuda:2'), in_proj_covar=tensor([0.0544, 0.0637, 0.0567, 0.0672, 0.0661, 0.0614, 0.0567, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 12:32:40,306 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-07 12:32:42,453 INFO [train.py:901] (2/4) Epoch 27, batch 7450, loss[loss=0.2607, simple_loss=0.3243, pruned_loss=0.09854, over 8516.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05811, over 1616303.88 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:47,768 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.478e+02 3.262e+02 4.062e+02 8.102e+02, threshold=6.523e+02, percent-clipped=5.0 +2023-02-07 12:32:58,357 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 12:33:16,123 INFO [train.py:901] (2/4) Epoch 27, batch 7500, loss[loss=0.2049, simple_loss=0.2898, pruned_loss=0.05997, over 8342.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2838, pruned_loss=0.05764, over 1617635.67 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:28,170 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 12:33:34,960 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:46,545 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:51,440 INFO [train.py:901] (2/4) Epoch 27, batch 7550, loss[loss=0.2822, simple_loss=0.3556, pruned_loss=0.1044, over 8244.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2843, pruned_loss=0.05783, over 1618234.21 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:56,745 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.428e+02 3.024e+02 3.911e+02 8.560e+02, threshold=6.047e+02, percent-clipped=1.0 +2023-02-07 12:34:01,684 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6108, 1.4154, 2.8989, 1.3787, 2.2908, 3.0673, 3.2402, 2.6713], + device='cuda:2'), covar=tensor([0.1246, 0.1635, 0.0362, 0.2102, 0.0835, 0.0319, 0.0591, 0.0579], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0322, 0.0291, 0.0317, 0.0319, 0.0276, 0.0437, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:34:03,668 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:09,234 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 12:34:21,845 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:25,186 INFO [train.py:901] (2/4) Epoch 27, batch 7600, loss[loss=0.2241, simple_loss=0.2995, pruned_loss=0.0743, over 8083.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2841, pruned_loss=0.05794, over 1619407.51 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:34:53,129 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.28 vs. limit=5.0 +2023-02-07 12:35:01,502 INFO [train.py:901] (2/4) Epoch 27, batch 7650, loss[loss=0.2133, simple_loss=0.3077, pruned_loss=0.05946, over 8240.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05776, over 1615211.52 frames. ], batch size: 24, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:06,799 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.541e+02 2.896e+02 3.920e+02 6.720e+02, threshold=5.793e+02, percent-clipped=4.0 +2023-02-07 12:35:18,926 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9032, 3.7865, 3.4593, 1.8069, 3.4073, 3.5093, 3.4224, 3.3385], + device='cuda:2'), covar=tensor([0.0871, 0.0655, 0.1066, 0.4507, 0.1098, 0.1167, 0.1309, 0.0887], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0462, 0.0447, 0.0556, 0.0442, 0.0464, 0.0440, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 12:35:35,074 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217854.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:35,677 INFO [train.py:901] (2/4) Epoch 27, batch 7700, loss[loss=0.1963, simple_loss=0.2937, pruned_loss=0.04951, over 8205.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2819, pruned_loss=0.05743, over 1614030.70 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:42,374 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:36:05,139 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 12:36:10,562 INFO [train.py:901] (2/4) Epoch 27, batch 7750, loss[loss=0.1889, simple_loss=0.2812, pruned_loss=0.04829, over 8626.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05741, over 1615380.86 frames. ], batch size: 31, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:14,308 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 12:36:15,959 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.515e+02 3.033e+02 3.634e+02 8.452e+02, threshold=6.066e+02, percent-clipped=4.0 +2023-02-07 12:36:18,817 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217916.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:36:27,440 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2211, 1.7303, 4.0984, 1.7575, 2.6662, 4.5621, 4.9212, 3.4835], + device='cuda:2'), covar=tensor([0.1627, 0.2246, 0.0453, 0.2672, 0.1279, 0.0353, 0.0642, 0.1179], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0322, 0.0290, 0.0317, 0.0318, 0.0275, 0.0435, 0.0304], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:36:45,569 INFO [train.py:901] (2/4) Epoch 27, batch 7800, loss[loss=0.2132, simple_loss=0.284, pruned_loss=0.07116, over 8297.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2809, pruned_loss=0.05699, over 1614597.22 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:55,088 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:04,147 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2434, 3.1309, 2.9063, 1.5801, 2.8333, 2.9318, 2.8044, 2.8579], + device='cuda:2'), covar=tensor([0.1076, 0.0890, 0.1333, 0.4814, 0.1160, 0.1277, 0.1698, 0.1014], + device='cuda:2'), in_proj_covar=tensor([0.0538, 0.0461, 0.0446, 0.0554, 0.0440, 0.0464, 0.0438, 0.0405], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 12:37:19,648 INFO [train.py:901] (2/4) Epoch 27, batch 7850, loss[loss=0.2135, simple_loss=0.3015, pruned_loss=0.06278, over 8355.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2819, pruned_loss=0.0579, over 1614985.65 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:37:24,959 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.277e+02 2.828e+02 3.912e+02 8.712e+02, threshold=5.655e+02, percent-clipped=7.0 +2023-02-07 12:37:25,329 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 12:37:33,507 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:44,209 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6099, 1.9645, 2.9299, 1.5020, 2.2457, 2.0214, 1.6826, 2.2148], + device='cuda:2'), covar=tensor([0.2038, 0.2748, 0.0906, 0.4812, 0.1988, 0.3369, 0.2555, 0.2305], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0635, 0.0565, 0.0671, 0.0660, 0.0614, 0.0565, 0.0644], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-07 12:37:52,840 INFO [train.py:901] (2/4) Epoch 27, batch 7900, loss[loss=0.1929, simple_loss=0.2728, pruned_loss=0.05646, over 8238.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2811, pruned_loss=0.05751, over 1610510.24 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:38:26,202 INFO [train.py:901] (2/4) Epoch 27, batch 7950, loss[loss=0.1797, simple_loss=0.2623, pruned_loss=0.04858, over 7978.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2815, pruned_loss=0.05782, over 1608851.95 frames. ], batch size: 21, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:38:29,870 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2026, 2.0762, 2.5951, 2.2059, 2.5938, 2.3145, 2.1128, 1.4913], + device='cuda:2'), covar=tensor([0.5812, 0.4946, 0.2269, 0.4001, 0.2686, 0.3343, 0.1930, 0.5862], + device='cuda:2'), in_proj_covar=tensor([0.0960, 0.1019, 0.0827, 0.0988, 0.1022, 0.0926, 0.0766, 0.0849], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-07 12:38:31,699 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.553e+02 3.230e+02 4.059e+02 8.354e+02, threshold=6.459e+02, percent-clipped=5.0 +2023-02-07 12:38:35,246 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:37,411 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:50,500 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:51,663 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8205, 1.3199, 2.8369, 1.4113, 2.2643, 3.0709, 3.2324, 2.6155], + device='cuda:2'), covar=tensor([0.1092, 0.1802, 0.0363, 0.2132, 0.0877, 0.0309, 0.0668, 0.0595], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0323, 0.0291, 0.0318, 0.0320, 0.0276, 0.0437, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:38:51,965 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 12:38:53,604 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:59,466 INFO [train.py:901] (2/4) Epoch 27, batch 8000, loss[loss=0.2332, simple_loss=0.3057, pruned_loss=0.08038, over 8326.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2829, pruned_loss=0.05843, over 1612745.65 frames. ], batch size: 25, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:05,387 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1997, 1.4944, 3.4951, 1.4756, 2.4982, 3.8249, 3.9689, 3.2823], + device='cuda:2'), covar=tensor([0.1093, 0.2001, 0.0331, 0.2233, 0.1122, 0.0258, 0.0486, 0.0597], + device='cuda:2'), in_proj_covar=tensor([0.0305, 0.0323, 0.0291, 0.0317, 0.0320, 0.0276, 0.0437, 0.0305], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-07 12:39:29,218 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:32,345 INFO [train.py:901] (2/4) Epoch 27, batch 8050, loss[loss=0.1852, simple_loss=0.2827, pruned_loss=0.04385, over 8464.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05808, over 1609024.61 frames. ], batch size: 25, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:38,070 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.285e+02 2.948e+02 3.498e+02 7.136e+02, threshold=5.897e+02, percent-clipped=2.0 +2023-02-07 12:39:46,228 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:48,246 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218228.0, num_to_drop=0, layers_to_drop=set() diff --git a/log/log-train-2023-02-05-17-58-35-3 b/log/log-train-2023-02-05-17-58-35-3 new file mode 100644 index 0000000000000000000000000000000000000000..de124ea9c537ff0e637e4fe9ec1360338c5da856 --- /dev/null +++ b/log/log-train-2023-02-05-17-58-35-3 @@ -0,0 +1,24969 @@ +2023-02-05 17:58:35,366 INFO [train.py:973] (3/4) Training started +2023-02-05 17:58:35,367 INFO [train.py:983] (3/4) Device: cuda:3 +2023-02-05 17:58:35,412 INFO [train.py:992] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n07', 'IP address': '10.1.7.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-05 17:58:35,412 INFO [train.py:994] (3/4) About to create model +2023-02-05 17:58:36,053 INFO [zipformer.py:402] (3/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-05 17:58:36,065 INFO [train.py:998] (3/4) Number of model parameters: 20697573 +2023-02-05 17:58:51,146 INFO [train.py:1013] (3/4) Using DDP +2023-02-05 17:58:51,428 INFO [asr_datamodule.py:420] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-05 17:58:52,644 INFO [asr_datamodule.py:224] (3/4) Enable MUSAN +2023-02-05 17:58:52,645 INFO [asr_datamodule.py:225] (3/4) About to get Musan cuts +2023-02-05 17:58:54,366 INFO [asr_datamodule.py:249] (3/4) Enable SpecAugment +2023-02-05 17:58:54,366 INFO [asr_datamodule.py:250] (3/4) Time warp factor: 80 +2023-02-05 17:58:54,366 INFO [asr_datamodule.py:260] (3/4) Num frame mask: 10 +2023-02-05 17:58:54,366 INFO [asr_datamodule.py:273] (3/4) About to create train dataset +2023-02-05 17:58:54,366 INFO [asr_datamodule.py:300] (3/4) Using DynamicBucketingSampler. +2023-02-05 17:58:54,386 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:58:56,573 INFO [asr_datamodule.py:316] (3/4) About to create train dataloader +2023-02-05 17:58:56,573 INFO [asr_datamodule.py:430] (3/4) About to get dev-clean cuts +2023-02-05 17:58:56,574 INFO [asr_datamodule.py:437] (3/4) About to get dev-other cuts +2023-02-05 17:58:56,575 INFO [asr_datamodule.py:347] (3/4) About to create dev dataset +2023-02-05 17:58:56,929 INFO [asr_datamodule.py:364] (3/4) About to create dev dataloader +2023-02-05 17:59:06,777 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 17:59:11,983 INFO [train.py:901] (3/4) Epoch 1, batch 0, loss[loss=7.184, simple_loss=6.498, pruned_loss=6.838, over 7431.00 frames. ], tot_loss[loss=7.184, simple_loss=6.498, pruned_loss=6.838, over 7431.00 frames. ], batch size: 17, lr: 2.50e-02, grad_scale: 2.0 +2023-02-05 17:59:11,983 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 17:59:24,177 INFO [train.py:935] (3/4) Epoch 1, validation: loss=6.888, simple_loss=6.229, pruned_loss=6.575, over 944034.00 frames. +2023-02-05 17:59:24,178 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6171MB +2023-02-05 17:59:31,384 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=3.87 vs. limit=2.0 +2023-02-05 17:59:37,909 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 17:59:48,688 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=5.39 vs. limit=2.0 +2023-02-05 17:59:55,491 INFO [train.py:901] (3/4) Epoch 1, batch 50, loss[loss=1.185, simple_loss=1.048, pruned_loss=1.223, over 7546.00 frames. ], tot_loss[loss=2.18, simple_loss=1.971, pruned_loss=2.007, over 368825.90 frames. ], batch size: 18, lr: 2.75e-02, grad_scale: 0.25 +2023-02-05 17:59:56,195 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:00:07,031 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3845, 4.3773, 4.3454, 4.3800, 4.3797, 4.3874, 4.3868, 4.3708], + device='cuda:3'), covar=tensor([0.0022, 0.0027, 0.0016, 0.0022, 0.0032, 0.0022, 0.0017, 0.0014], + device='cuda:3'), in_proj_covar=tensor([0.0013, 0.0013, 0.0013, 0.0014, 0.0014, 0.0013, 0.0014, 0.0013], + device='cuda:3'), out_proj_covar=tensor([8.6881e-06, 9.0252e-06, 8.9078e-06, 8.9464e-06, 8.9258e-06, 8.9722e-06, + 8.8412e-06, 8.7866e-06], device='cuda:3') +2023-02-05 18:00:11,300 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 18:00:13,765 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:28,700 INFO [train.py:901] (3/4) Epoch 1, batch 100, loss[loss=1.077, simple_loss=0.9161, pruned_loss=1.267, over 7532.00 frames. ], tot_loss[loss=1.643, simple_loss=1.464, pruned_loss=1.614, over 644635.68 frames. ], batch size: 18, lr: 3.00e-02, grad_scale: 0.0625 +2023-02-05 18:00:28,817 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:00:31,818 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=10.81 vs. limit=2.0 +2023-02-05 18:00:32,055 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 18:00:32,813 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.087e+01 6.689e+01 1.862e+02 6.030e+02 6.185e+04, threshold=3.723e+02, percent-clipped=0.0 +2023-02-05 18:01:00,489 INFO [train.py:901] (3/4) Epoch 1, batch 150, loss[loss=0.9989, simple_loss=0.8501, pruned_loss=1.077, over 8143.00 frames. ], tot_loss[loss=1.407, simple_loss=1.238, pruned_loss=1.436, over 859443.32 frames. ], batch size: 22, lr: 3.25e-02, grad_scale: 0.0625 +2023-02-05 18:01:34,597 INFO [train.py:901] (3/4) Epoch 1, batch 200, loss[loss=0.9575, simple_loss=0.8116, pruned_loss=0.9806, over 8245.00 frames. ], tot_loss[loss=1.265, simple_loss=1.102, pruned_loss=1.301, over 1026898.35 frames. ], batch size: 22, lr: 3.50e-02, grad_scale: 0.125 +2023-02-05 18:01:37,990 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.848e+01 5.119e+01 6.630e+01 8.708e+01 3.236e+02, threshold=1.326e+02, percent-clipped=1.0 +2023-02-05 18:01:59,644 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=13.39 vs. limit=5.0 +2023-02-05 18:02:05,438 INFO [train.py:901] (3/4) Epoch 1, batch 250, loss[loss=0.8919, simple_loss=0.7493, pruned_loss=0.8904, over 7788.00 frames. ], tot_loss[loss=1.178, simple_loss=1.018, pruned_loss=1.203, over 1158228.87 frames. ], batch size: 19, lr: 3.75e-02, grad_scale: 0.125 +2023-02-05 18:02:14,834 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 18:02:21,590 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9300, 2.9300, 2.9300, 2.9300, 2.9300, 2.9300, 2.9300, 2.9300], + device='cuda:3'), covar=tensor([0.0002, 0.0003, 0.0001, 0.0001, 0.0002, 0.0004, 0.0002, 0.0001], + device='cuda:3'), in_proj_covar=tensor([0.0015, 0.0014, 0.0014, 0.0015, 0.0014, 0.0015, 0.0014, 0.0014], + device='cuda:3'), out_proj_covar=tensor([9.6301e-06, 9.5557e-06, 9.5193e-06, 9.2880e-06, 9.6126e-06, 9.4880e-06, + 9.5885e-06, 9.2433e-06], device='cuda:3') +2023-02-05 18:02:22,944 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 18:02:37,912 INFO [train.py:901] (3/4) Epoch 1, batch 300, loss[loss=0.9556, simple_loss=0.7941, pruned_loss=0.9397, over 7643.00 frames. ], tot_loss[loss=1.119, simple_loss=0.9593, pruned_loss=1.132, over 1259830.68 frames. ], batch size: 19, lr: 4.00e-02, grad_scale: 0.25 +2023-02-05 18:02:42,338 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=306.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:02:42,566 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=21.59 vs. limit=5.0 +2023-02-05 18:02:42,690 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.041e+01 5.570e+01 7.201e+01 9.677e+01 1.807e+02, threshold=1.440e+02, percent-clipped=6.0 +2023-02-05 18:02:47,399 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=314.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:02:57,561 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=9.81 vs. limit=5.0 +2023-02-05 18:03:10,250 INFO [train.py:901] (3/4) Epoch 1, batch 350, loss[loss=0.9643, simple_loss=0.794, pruned_loss=0.9303, over 8029.00 frames. ], tot_loss[loss=1.077, simple_loss=0.9168, pruned_loss=1.078, over 1341268.89 frames. ], batch size: 22, lr: 4.25e-02, grad_scale: 0.25 +2023-02-05 18:03:36,858 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.51 vs. limit=5.0 +2023-02-05 18:03:42,305 INFO [train.py:901] (3/4) Epoch 1, batch 400, loss[loss=1.002, simple_loss=0.8226, pruned_loss=0.9335, over 8357.00 frames. ], tot_loss[loss=1.055, simple_loss=0.89, pruned_loss=1.04, over 1410011.35 frames. ], batch size: 24, lr: 4.50e-02, grad_scale: 0.5 +2023-02-05 18:03:44,608 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=405.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:03:45,457 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 4.847e+01 5.714e+01 6.661e+01 8.261e+01 1.252e+02, threshold=1.332e+02, percent-clipped=0.0 +2023-02-05 18:03:55,287 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=421.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:04:11,522 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=445.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:04:15,520 INFO [train.py:901] (3/4) Epoch 1, batch 450, loss[loss=0.9513, simple_loss=0.7764, pruned_loss=0.8665, over 8048.00 frames. ], tot_loss[loss=1.035, simple_loss=0.8677, pruned_loss=1.006, over 1454719.88 frames. ], batch size: 20, lr: 4.75e-02, grad_scale: 0.5 +2023-02-05 18:04:45,727 INFO [train.py:901] (3/4) Epoch 1, batch 500, loss[loss=0.8925, simple_loss=0.7237, pruned_loss=0.7972, over 7657.00 frames. ], tot_loss[loss=1.016, simple_loss=0.8461, pruned_loss=0.9694, over 1485566.27 frames. ], batch size: 19, lr: 4.99e-02, grad_scale: 1.0 +2023-02-05 18:04:47,786 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.79 vs. limit=2.0 +2023-02-05 18:04:49,477 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.283e+01 6.268e+01 7.626e+01 9.977e+01 2.238e+02, threshold=1.525e+02, percent-clipped=10.0 +2023-02-05 18:05:16,922 INFO [train.py:901] (3/4) Epoch 1, batch 550, loss[loss=0.9715, simple_loss=0.7991, pruned_loss=0.8117, over 8791.00 frames. ], tot_loss[loss=0.9993, simple_loss=0.8289, pruned_loss=0.9331, over 1513812.58 frames. ], batch size: 40, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:22,228 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=560.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:22,436 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=7.56 vs. limit=5.0 +2023-02-05 18:05:34,666 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=580.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:05:39,265 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=586.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:47,855 INFO [train.py:901] (3/4) Epoch 1, batch 600, loss[loss=0.8942, simple_loss=0.7433, pruned_loss=0.7089, over 8467.00 frames. ], tot_loss[loss=0.9847, simple_loss=0.8156, pruned_loss=0.8941, over 1538658.31 frames. ], batch size: 25, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:05:51,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 5.986e+01 8.101e+01 1.064e+02 1.512e+02 3.340e+02, threshold=2.128e+02, percent-clipped=22.0 +2023-02-05 18:05:51,948 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=608.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:05:57,495 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 18:06:15,538 INFO [train.py:901] (3/4) Epoch 1, batch 650, loss[loss=0.786, simple_loss=0.6605, pruned_loss=0.5932, over 7716.00 frames. ], tot_loss[loss=0.9604, simple_loss=0.7963, pruned_loss=0.8466, over 1552882.00 frames. ], batch size: 18, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:20,640 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=658.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:06:31,063 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=677.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:06:44,410 INFO [train.py:901] (3/4) Epoch 1, batch 700, loss[loss=0.761, simple_loss=0.64, pruned_loss=0.5613, over 5925.00 frames. ], tot_loss[loss=0.9358, simple_loss=0.7778, pruned_loss=0.799, over 1568995.66 frames. ], batch size: 13, lr: 4.98e-02, grad_scale: 1.0 +2023-02-05 18:06:45,076 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=702.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:06:46,413 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.18 vs. limit=2.0 +2023-02-05 18:06:48,196 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.046e+02 2.109e+02 3.132e+02 4.412e+02 1.990e+03, threshold=6.264e+02, percent-clipped=73.0 +2023-02-05 18:07:14,476 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=749.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:15,374 INFO [train.py:901] (3/4) Epoch 1, batch 750, loss[loss=0.7555, simple_loss=0.6401, pruned_loss=0.5379, over 7784.00 frames. ], tot_loss[loss=0.9125, simple_loss=0.7608, pruned_loss=0.7546, over 1581164.66 frames. ], batch size: 19, lr: 4.97e-02, grad_scale: 1.0 +2023-02-05 18:07:25,623 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 18:07:26,836 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=773.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:07:32,338 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 18:07:43,637 INFO [train.py:901] (3/4) Epoch 1, batch 800, loss[loss=0.7988, simple_loss=0.6766, pruned_loss=0.5597, over 8138.00 frames. ], tot_loss[loss=0.8848, simple_loss=0.7405, pruned_loss=0.7095, over 1586213.52 frames. ], batch size: 22, lr: 4.97e-02, grad_scale: 2.0 +2023-02-05 18:07:43,939 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=6.18 vs. limit=5.0 +2023-02-05 18:07:46,610 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.528e+02 3.354e+02 4.455e+02 1.086e+03, threshold=6.708e+02, percent-clipped=4.0 +2023-02-05 18:07:51,296 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=816.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:05,568 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=841.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:11,170 INFO [train.py:901] (3/4) Epoch 1, batch 850, loss[loss=0.7196, simple_loss=0.6155, pruned_loss=0.4867, over 7524.00 frames. ], tot_loss[loss=0.8583, simple_loss=0.7215, pruned_loss=0.6675, over 1595253.51 frames. ], batch size: 18, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:22,425 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=864.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:22,899 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=865.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:08:33,836 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.64 vs. limit=5.0 +2023-02-05 18:08:42,863 INFO [train.py:901] (3/4) Epoch 1, batch 900, loss[loss=0.7063, simple_loss=0.6008, pruned_loss=0.4764, over 7241.00 frames. ], tot_loss[loss=0.8349, simple_loss=0.7047, pruned_loss=0.6307, over 1598131.81 frames. ], batch size: 16, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:08:46,415 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 3.070e+02 3.818e+02 4.702e+02 7.623e+02, threshold=7.636e+02, percent-clipped=5.0 +2023-02-05 18:08:55,930 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=924.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:08:58,991 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=930.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:10,094 INFO [train.py:901] (3/4) Epoch 1, batch 950, loss[loss=0.6854, simple_loss=0.5888, pruned_loss=0.4482, over 7969.00 frames. ], tot_loss[loss=0.8125, simple_loss=0.6889, pruned_loss=0.5968, over 1606047.71 frames. ], batch size: 21, lr: 4.96e-02, grad_scale: 2.0 +2023-02-05 18:09:10,759 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=952.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:26,439 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 18:09:29,016 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.36 vs. limit=5.0 +2023-02-05 18:09:37,668 INFO [train.py:901] (3/4) Epoch 1, batch 1000, loss[loss=0.751, simple_loss=0.6417, pruned_loss=0.4903, over 8259.00 frames. ], tot_loss[loss=0.7902, simple_loss=0.6728, pruned_loss=0.5655, over 1608023.85 frames. ], batch size: 24, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:09:40,945 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 3.215e+02 4.159e+02 4.799e+02 1.770e+03, threshold=8.319e+02, percent-clipped=6.0 +2023-02-05 18:09:52,920 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1029.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:09:53,905 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 18:09:59,204 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1039.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:02,591 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1045.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:10:05,088 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 18:10:05,577 INFO [train.py:901] (3/4) Epoch 1, batch 1050, loss[loss=0.8104, simple_loss=0.692, pruned_loss=0.524, over 8239.00 frames. ], tot_loss[loss=0.7725, simple_loss=0.6601, pruned_loss=0.5396, over 1613711.50 frames. ], batch size: 24, lr: 4.95e-02, grad_scale: 2.0 +2023-02-05 18:10:07,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1054.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:14,079 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1067.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:33,060 INFO [train.py:901] (3/4) Epoch 1, batch 1100, loss[loss=0.6373, simple_loss=0.5579, pruned_loss=0.3912, over 7404.00 frames. ], tot_loss[loss=0.7517, simple_loss=0.6454, pruned_loss=0.5127, over 1617354.67 frames. ], batch size: 17, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:10:36,094 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.463e+02 4.480e+02 5.452e+02 1.232e+03, threshold=8.959e+02, percent-clipped=3.0 +2023-02-05 18:10:43,739 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1120.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:56,863 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1145.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:10:59,903 INFO [train.py:901] (3/4) Epoch 1, batch 1150, loss[loss=0.656, simple_loss=0.5699, pruned_loss=0.4047, over 6418.00 frames. ], tot_loss[loss=0.7344, simple_loss=0.633, pruned_loss=0.4905, over 1615603.45 frames. ], batch size: 14, lr: 4.94e-02, grad_scale: 2.0 +2023-02-05 18:11:01,598 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 18:11:02,660 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7594, 3.8524, 3.8558, 1.9348, 3.8243, 4.0600, 3.7095, 3.9535], + device='cuda:3'), covar=tensor([0.0441, 0.0563, 0.0483, 0.1643, 0.0540, 0.0465, 0.0508, 0.0497], + device='cuda:3'), in_proj_covar=tensor([0.0038, 0.0032, 0.0035, 0.0048, 0.0037, 0.0035, 0.0038, 0.0039], + device='cuda:3'), out_proj_covar=tensor([2.5354e-05, 2.2330e-05, 2.2442e-05, 3.5247e-05, 2.4419e-05, 2.3636e-05, + 2.5796e-05, 2.5344e-05], device='cuda:3') +2023-02-05 18:11:05,790 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6254, 1.2649, 2.0076, 1.8245, 1.3255, 1.1424, 1.3392, 1.6682], + device='cuda:3'), covar=tensor([0.5609, 1.4936, 0.4080, 0.4683, 1.0158, 1.1708, 1.1662, 0.6630], + device='cuda:3'), in_proj_covar=tensor([0.0054, 0.0074, 0.0046, 0.0051, 0.0074, 0.0078, 0.0081, 0.0063], + device='cuda:3'), out_proj_covar=tensor([3.3258e-05, 5.2402e-05, 2.6211e-05, 2.8161e-05, 4.8208e-05, 5.1034e-05, + 4.9756e-05, 3.8707e-05], device='cuda:3') +2023-02-05 18:11:11,766 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1171.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:11:27,730 INFO [train.py:901] (3/4) Epoch 1, batch 1200, loss[loss=0.5851, simple_loss=0.5283, pruned_loss=0.3364, over 8124.00 frames. ], tot_loss[loss=0.7159, simple_loss=0.6197, pruned_loss=0.4687, over 1609697.87 frames. ], batch size: 22, lr: 4.93e-02, grad_scale: 4.0 +2023-02-05 18:11:30,965 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.424e+02 4.173e+02 5.178e+02 8.029e+02, threshold=8.346e+02, percent-clipped=0.0 +2023-02-05 18:11:32,143 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1209.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:11:53,794 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.98 vs. limit=2.0 +2023-02-05 18:11:56,782 INFO [train.py:901] (3/4) Epoch 1, batch 1250, loss[loss=0.7161, simple_loss=0.6337, pruned_loss=0.4233, over 8503.00 frames. ], tot_loss[loss=0.7029, simple_loss=0.6108, pruned_loss=0.4516, over 1609417.85 frames. ], batch size: 28, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:05,165 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7196, 3.8904, 3.8571, 2.3739, 3.6634, 4.0576, 3.6041, 3.5916], + device='cuda:3'), covar=tensor([0.0377, 0.0442, 0.0362, 0.1046, 0.0447, 0.0313, 0.0438, 0.0534], + device='cuda:3'), in_proj_covar=tensor([0.0047, 0.0040, 0.0042, 0.0062, 0.0046, 0.0041, 0.0045, 0.0046], + device='cuda:3'), out_proj_covar=tensor([3.1671e-05, 2.7687e-05, 2.6536e-05, 4.6501e-05, 3.0072e-05, 2.6935e-05, + 3.0494e-05, 2.9634e-05], device='cuda:3') +2023-02-05 18:12:21,181 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1295.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:24,257 INFO [train.py:901] (3/4) Epoch 1, batch 1300, loss[loss=0.5912, simple_loss=0.5375, pruned_loss=0.3334, over 7917.00 frames. ], tot_loss[loss=0.6889, simple_loss=0.6012, pruned_loss=0.4348, over 1612269.35 frames. ], batch size: 20, lr: 4.92e-02, grad_scale: 4.0 +2023-02-05 18:12:24,450 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1301.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:27,420 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 3.917e+02 4.747e+02 6.152e+02 9.080e+02, threshold=9.493e+02, percent-clipped=1.0 +2023-02-05 18:12:34,741 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1320.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:36,266 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1323.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:36,766 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1324.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:12:37,911 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1326.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:51,947 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1348.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:12:53,366 INFO [train.py:901] (3/4) Epoch 1, batch 1350, loss[loss=0.7066, simple_loss=0.6087, pruned_loss=0.4288, over 8724.00 frames. ], tot_loss[loss=0.6764, simple_loss=0.5927, pruned_loss=0.4198, over 1612336.91 frames. ], batch size: 34, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:22,443 INFO [train.py:901] (3/4) Epoch 1, batch 1400, loss[loss=0.6738, simple_loss=0.5878, pruned_loss=0.3996, over 7810.00 frames. ], tot_loss[loss=0.668, simple_loss=0.5872, pruned_loss=0.4088, over 1613389.43 frames. ], batch size: 20, lr: 4.91e-02, grad_scale: 4.0 +2023-02-05 18:13:25,824 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.466e+02 4.520e+02 5.912e+02 1.396e+03, threshold=9.040e+02, percent-clipped=6.0 +2023-02-05 18:13:38,639 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0012, 1.2284, 2.5925, 1.4526, 1.6353, 1.6792, 1.1084, 1.4847], + device='cuda:3'), covar=tensor([0.8715, 1.0867, 0.1508, 0.5266, 0.6881, 0.5120, 0.7594, 0.8337], + device='cuda:3'), in_proj_covar=tensor([0.0071, 0.0076, 0.0041, 0.0062, 0.0080, 0.0058, 0.0075, 0.0086], + device='cuda:3'), out_proj_covar=tensor([4.8778e-05, 5.1714e-05, 2.3044e-05, 4.0066e-05, 5.4528e-05, 3.7801e-05, + 4.7841e-05, 5.8963e-05], device='cuda:3') +2023-02-05 18:13:49,601 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 18:13:50,939 INFO [train.py:901] (3/4) Epoch 1, batch 1450, loss[loss=0.5099, simple_loss=0.4714, pruned_loss=0.2783, over 7258.00 frames. ], tot_loss[loss=0.6556, simple_loss=0.5786, pruned_loss=0.3956, over 1611615.53 frames. ], batch size: 16, lr: 4.90e-02, grad_scale: 4.0 +2023-02-05 18:13:51,608 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1452.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:13:54,969 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 18:14:21,307 INFO [train.py:901] (3/4) Epoch 1, batch 1500, loss[loss=0.648, simple_loss=0.5863, pruned_loss=0.3634, over 8107.00 frames. ], tot_loss[loss=0.6475, simple_loss=0.5731, pruned_loss=0.3862, over 1607894.44 frames. ], batch size: 23, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:14:24,737 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.084e+02 4.059e+02 4.884e+02 5.820e+02 1.191e+03, threshold=9.769e+02, percent-clipped=4.0 +2023-02-05 18:14:29,250 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1515.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:14:50,474 INFO [train.py:901] (3/4) Epoch 1, batch 1550, loss[loss=0.5724, simple_loss=0.5287, pruned_loss=0.3116, over 7962.00 frames. ], tot_loss[loss=0.6389, simple_loss=0.5672, pruned_loss=0.3768, over 1604494.35 frames. ], batch size: 21, lr: 4.89e-02, grad_scale: 4.0 +2023-02-05 18:15:08,671 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1580.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:10,913 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1584.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:20,766 INFO [train.py:901] (3/4) Epoch 1, batch 1600, loss[loss=0.5907, simple_loss=0.5379, pruned_loss=0.3268, over 8294.00 frames. ], tot_loss[loss=0.6352, simple_loss=0.5651, pruned_loss=0.3711, over 1613402.63 frames. ], batch size: 23, lr: 4.88e-02, grad_scale: 8.0 +2023-02-05 18:15:24,013 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1605.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:24,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.844e+02 4.893e+02 6.465e+02 8.597e+02 2.177e+03, threshold=1.293e+03, percent-clipped=12.0 +2023-02-05 18:15:33,782 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0205, 5.2160, 4.9740, 2.1578, 4.8972, 5.3830, 4.6630, 4.9411], + device='cuda:3'), covar=tensor([0.0420, 0.0367, 0.0301, 0.1949, 0.0269, 0.0478, 0.0319, 0.0370], + device='cuda:3'), in_proj_covar=tensor([0.0065, 0.0054, 0.0060, 0.0097, 0.0060, 0.0057, 0.0061, 0.0058], + device='cuda:3'), out_proj_covar=tensor([4.5513e-05, 3.7738e-05, 4.0761e-05, 7.0688e-05, 4.0835e-05, 3.9221e-05, + 4.1937e-05, 3.8797e-05], device='cuda:3') +2023-02-05 18:15:37,781 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1629.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:15:38,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1630.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:15:50,690 INFO [train.py:901] (3/4) Epoch 1, batch 1650, loss[loss=0.592, simple_loss=0.5418, pruned_loss=0.3249, over 8503.00 frames. ], tot_loss[loss=0.6289, simple_loss=0.5618, pruned_loss=0.3636, over 1611015.38 frames. ], batch size: 26, lr: 4.87e-02, grad_scale: 8.0 +2023-02-05 18:16:21,955 INFO [train.py:901] (3/4) Epoch 1, batch 1700, loss[loss=0.6116, simple_loss=0.5544, pruned_loss=0.3385, over 8253.00 frames. ], tot_loss[loss=0.6192, simple_loss=0.5564, pruned_loss=0.3538, over 1614723.67 frames. ], batch size: 24, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:16:25,341 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.633e+02 4.287e+02 5.230e+02 6.455e+02 2.107e+03, threshold=1.046e+03, percent-clipped=2.0 +2023-02-05 18:16:51,240 INFO [train.py:901] (3/4) Epoch 1, batch 1750, loss[loss=0.5433, simple_loss=0.5023, pruned_loss=0.2938, over 8137.00 frames. ], tot_loss[loss=0.6133, simple_loss=0.5525, pruned_loss=0.3477, over 1613089.86 frames. ], batch size: 22, lr: 4.86e-02, grad_scale: 8.0 +2023-02-05 18:17:18,074 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1796.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:17:21,116 INFO [train.py:901] (3/4) Epoch 1, batch 1800, loss[loss=0.5784, simple_loss=0.5364, pruned_loss=0.3114, over 8472.00 frames. ], tot_loss[loss=0.6067, simple_loss=0.5488, pruned_loss=0.3411, over 1612614.09 frames. ], batch size: 25, lr: 4.85e-02, grad_scale: 8.0 +2023-02-05 18:17:24,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.688e+02 4.554e+02 5.596e+02 6.733e+02 1.418e+03, threshold=1.119e+03, percent-clipped=4.0 +2023-02-05 18:17:26,909 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.01 vs. limit=2.0 +2023-02-05 18:17:44,339 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.83 vs. limit=2.0 +2023-02-05 18:17:52,109 INFO [train.py:901] (3/4) Epoch 1, batch 1850, loss[loss=0.607, simple_loss=0.5556, pruned_loss=0.3307, over 8510.00 frames. ], tot_loss[loss=0.6013, simple_loss=0.5456, pruned_loss=0.3357, over 1616464.31 frames. ], batch size: 28, lr: 4.84e-02, grad_scale: 8.0 +2023-02-05 18:17:55,086 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1856.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:17:55,654 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9151, 1.2690, 0.8573, 2.2682, 1.2871, 1.4380, 1.3075, 1.9423], + device='cuda:3'), covar=tensor([0.2760, 0.6968, 1.4274, 0.0945, 0.6216, 0.4200, 0.7004, 0.1784], + device='cuda:3'), in_proj_covar=tensor([0.0179, 0.0210, 0.0311, 0.0135, 0.0214, 0.0201, 0.0248, 0.0177], + device='cuda:3'), out_proj_covar=tensor([1.1794e-04, 1.4496e-04, 2.0223e-04, 9.1296e-05, 1.5002e-04, 1.3122e-04, + 1.6409e-04, 1.1045e-04], device='cuda:3') +2023-02-05 18:18:06,765 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1875.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:13,302 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=1886.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:14,331 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=1888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:21,902 INFO [train.py:901] (3/4) Epoch 1, batch 1900, loss[loss=0.5793, simple_loss=0.5301, pruned_loss=0.3152, over 7969.00 frames. ], tot_loss[loss=0.5939, simple_loss=0.5413, pruned_loss=0.3289, over 1617514.19 frames. ], batch size: 21, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:25,466 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.326e+02 4.483e+02 5.242e+02 7.443e+02 2.270e+03, threshold=1.048e+03, percent-clipped=7.0 +2023-02-05 18:18:27,930 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=1911.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:18:27,943 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=1911.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:18:30,293 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3864, 1.6197, 4.2089, 1.4383, 2.3730, 2.6044, 1.1403, 1.9916], + device='cuda:3'), covar=tensor([0.4306, 0.5133, 0.0442, 0.3024, 0.3365, 0.2670, 0.4589, 0.4210], + device='cuda:3'), in_proj_covar=tensor([0.0090, 0.0097, 0.0051, 0.0077, 0.0108, 0.0086, 0.0097, 0.0112], + device='cuda:3'), out_proj_covar=tensor([6.2742e-05, 6.6057e-05, 2.9482e-05, 4.9955e-05, 7.4022e-05, 6.0976e-05, + 6.2921e-05, 7.5834e-05], device='cuda:3') +2023-02-05 18:18:37,743 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:18:45,037 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 18:18:52,614 INFO [train.py:901] (3/4) Epoch 1, batch 1950, loss[loss=0.5505, simple_loss=0.5188, pruned_loss=0.2912, over 8358.00 frames. ], tot_loss[loss=0.5879, simple_loss=0.5377, pruned_loss=0.3236, over 1617586.26 frames. ], batch size: 24, lr: 4.83e-02, grad_scale: 8.0 +2023-02-05 18:18:55,553 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 18:19:00,392 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8594, 1.3773, 2.1877, 1.3456, 2.0076, 2.9135, 2.8510, 2.5565], + device='cuda:3'), covar=tensor([0.4364, 0.5856, 0.0986, 0.5193, 0.2389, 0.0733, 0.0508, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0184, 0.0101, 0.0181, 0.0144, 0.0089, 0.0082, 0.0102], + device='cuda:3'), out_proj_covar=tensor([1.1815e-04, 1.3210e-04, 6.1524e-05, 1.1884e-04, 1.0173e-04, 5.6628e-05, + 4.7520e-05, 6.2277e-05], device='cuda:3') +2023-02-05 18:19:05,743 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=1973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:19:11,336 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 18:19:23,720 INFO [train.py:901] (3/4) Epoch 1, batch 2000, loss[loss=0.6118, simple_loss=0.5473, pruned_loss=0.3382, over 6986.00 frames. ], tot_loss[loss=0.5834, simple_loss=0.5354, pruned_loss=0.3193, over 1615443.51 frames. ], batch size: 72, lr: 4.82e-02, grad_scale: 8.0 +2023-02-05 18:19:27,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.734e+02 4.600e+02 5.655e+02 7.771e+02 1.691e+03, threshold=1.131e+03, percent-clipped=5.0 +2023-02-05 18:19:50,362 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2043.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:19:56,662 INFO [train.py:901] (3/4) Epoch 1, batch 2050, loss[loss=0.4603, simple_loss=0.46, pruned_loss=0.2303, over 8234.00 frames. ], tot_loss[loss=0.5748, simple_loss=0.5304, pruned_loss=0.3124, over 1614412.53 frames. ], batch size: 22, lr: 4.81e-02, grad_scale: 8.0 +2023-02-05 18:20:05,737 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3255, 1.3984, 2.1804, 1.3407, 2.3645, 2.4101, 2.4312, 2.0753], + device='cuda:3'), covar=tensor([0.2587, 0.2606, 0.0508, 0.2719, 0.0799, 0.0393, 0.0344, 0.0461], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0181, 0.0095, 0.0168, 0.0143, 0.0084, 0.0080, 0.0098], + device='cuda:3'), out_proj_covar=tensor([1.1779e-04, 1.2906e-04, 5.8209e-05, 1.1144e-04, 1.0200e-04, 5.3951e-05, + 4.7212e-05, 6.1994e-05], device='cuda:3') +2023-02-05 18:20:21,052 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2088.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:20:29,073 INFO [train.py:901] (3/4) Epoch 1, batch 2100, loss[loss=0.628, simple_loss=0.558, pruned_loss=0.349, over 7087.00 frames. ], tot_loss[loss=0.5683, simple_loss=0.5269, pruned_loss=0.307, over 1614361.90 frames. ], batch size: 71, lr: 4.80e-02, grad_scale: 16.0 +2023-02-05 18:20:32,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.532e+02 4.654e+02 5.875e+02 8.240e+02 2.515e+03, threshold=1.175e+03, percent-clipped=11.0 +2023-02-05 18:20:55,902 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3680, 5.6484, 4.7931, 1.6381, 4.6091, 5.1359, 4.6366, 4.7301], + device='cuda:3'), covar=tensor([0.0367, 0.0297, 0.0320, 0.2793, 0.0376, 0.0420, 0.0649, 0.0398], + device='cuda:3'), in_proj_covar=tensor([0.0086, 0.0077, 0.0088, 0.0140, 0.0080, 0.0070, 0.0099, 0.0079], + device='cuda:3'), out_proj_covar=tensor([6.0274e-05, 6.3256e-05, 5.9619e-05, 9.9716e-05, 5.5193e-05, 5.1718e-05, + 7.1618e-05, 5.3931e-05], device='cuda:3') +2023-02-05 18:21:01,657 INFO [train.py:901] (3/4) Epoch 1, batch 2150, loss[loss=0.4852, simple_loss=0.4863, pruned_loss=0.2421, over 7927.00 frames. ], tot_loss[loss=0.5585, simple_loss=0.521, pruned_loss=0.2997, over 1610118.68 frames. ], batch size: 20, lr: 4.79e-02, grad_scale: 16.0 +2023-02-05 18:21:11,753 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2167.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:21:29,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2192.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:21:35,018 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:35,572 INFO [train.py:901] (3/4) Epoch 1, batch 2200, loss[loss=0.6085, simple_loss=0.5605, pruned_loss=0.3282, over 8554.00 frames. ], tot_loss[loss=0.5507, simple_loss=0.5164, pruned_loss=0.2938, over 1611845.39 frames. ], batch size: 34, lr: 4.78e-02, grad_scale: 16.0 +2023-02-05 18:21:39,335 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.979e+02 3.885e+02 5.100e+02 6.280e+02 1.293e+03, threshold=1.020e+03, percent-clipped=3.0 +2023-02-05 18:21:46,995 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:21:55,784 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:22:07,883 INFO [train.py:901] (3/4) Epoch 1, batch 2250, loss[loss=0.496, simple_loss=0.4792, pruned_loss=0.2564, over 7970.00 frames. ], tot_loss[loss=0.5437, simple_loss=0.5122, pruned_loss=0.2887, over 1613475.54 frames. ], batch size: 21, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:41,035 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2299.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:42,094 INFO [train.py:901] (3/4) Epoch 1, batch 2300, loss[loss=0.4354, simple_loss=0.4306, pruned_loss=0.2201, over 7397.00 frames. ], tot_loss[loss=0.5441, simple_loss=0.514, pruned_loss=0.2879, over 1621204.64 frames. ], batch size: 17, lr: 4.77e-02, grad_scale: 16.0 +2023-02-05 18:22:45,954 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.442e+02 5.272e+02 6.513e+02 7.975e+02 1.884e+03, threshold=1.303e+03, percent-clipped=9.0 +2023-02-05 18:22:51,195 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2315.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:22:56,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2324.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:23:03,216 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2334.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:09,703 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2344.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:12,276 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2347.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:23:14,708 INFO [train.py:901] (3/4) Epoch 1, batch 2350, loss[loss=0.5159, simple_loss=0.5122, pruned_loss=0.2598, over 8318.00 frames. ], tot_loss[loss=0.5389, simple_loss=0.5107, pruned_loss=0.2842, over 1614903.40 frames. ], batch size: 26, lr: 4.76e-02, grad_scale: 16.0 +2023-02-05 18:23:19,256 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2358.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:23,687 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 18:23:26,044 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2369.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:23:46,437 INFO [train.py:901] (3/4) Epoch 1, batch 2400, loss[loss=0.4616, simple_loss=0.4667, pruned_loss=0.2283, over 8190.00 frames. ], tot_loss[loss=0.5346, simple_loss=0.5083, pruned_loss=0.2809, over 1619645.01 frames. ], batch size: 23, lr: 4.75e-02, grad_scale: 16.0 +2023-02-05 18:23:50,348 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.591e+02 4.467e+02 5.905e+02 7.151e+02 1.301e+03, threshold=1.181e+03, percent-clipped=0.0 +2023-02-05 18:24:20,801 INFO [train.py:901] (3/4) Epoch 1, batch 2450, loss[loss=0.5529, simple_loss=0.5258, pruned_loss=0.29, over 8479.00 frames. ], tot_loss[loss=0.5318, simple_loss=0.5065, pruned_loss=0.2789, over 1615220.29 frames. ], batch size: 25, lr: 4.74e-02, grad_scale: 16.0 +2023-02-05 18:24:21,025 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5283, 1.2835, 1.7627, 1.6575, 1.4324, 1.7893, 0.9447, 1.6747], + device='cuda:3'), covar=tensor([0.1109, 0.0741, 0.0655, 0.0810, 0.1067, 0.0555, 0.2128, 0.0936], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0083, 0.0080, 0.0085, 0.0096, 0.0074, 0.0125, 0.0109], + device='cuda:3'), out_proj_covar=tensor([6.8589e-05, 5.7500e-05, 5.3330e-05, 6.0851e-05, 6.9431e-05, 4.9919e-05, + 9.2098e-05, 8.0544e-05], device='cuda:3') +2023-02-05 18:24:52,768 INFO [train.py:901] (3/4) Epoch 1, batch 2500, loss[loss=0.537, simple_loss=0.5068, pruned_loss=0.2835, over 7821.00 frames. ], tot_loss[loss=0.527, simple_loss=0.5036, pruned_loss=0.2755, over 1615758.41 frames. ], batch size: 20, lr: 4.73e-02, grad_scale: 16.0 +2023-02-05 18:24:56,549 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.099e+02 5.238e+02 6.448e+02 8.237e+02 1.660e+03, threshold=1.290e+03, percent-clipped=6.0 +2023-02-05 18:25:25,593 INFO [train.py:901] (3/4) Epoch 1, batch 2550, loss[loss=0.5541, simple_loss=0.5228, pruned_loss=0.2927, over 8471.00 frames. ], tot_loss[loss=0.5245, simple_loss=0.5023, pruned_loss=0.2736, over 1617792.86 frames. ], batch size: 25, lr: 4.72e-02, grad_scale: 16.0 +2023-02-05 18:25:27,057 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7906, 1.3193, 5.1770, 2.4909, 4.9442, 4.3059, 4.6395, 4.4317], + device='cuda:3'), covar=tensor([0.0127, 0.3588, 0.0226, 0.1213, 0.0212, 0.0238, 0.0317, 0.0372], + device='cuda:3'), in_proj_covar=tensor([0.0070, 0.0201, 0.0088, 0.0115, 0.0098, 0.0094, 0.0104, 0.0114], + device='cuda:3'), out_proj_covar=tensor([4.3529e-05, 1.2420e-04, 5.7263e-05, 7.7078e-05, 5.5557e-05, 5.2458e-05, + 6.3758e-05, 6.8137e-05], device='cuda:3') +2023-02-05 18:25:38,471 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2571.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:51,086 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2590.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:54,854 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2596.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:25:57,886 INFO [train.py:901] (3/4) Epoch 1, batch 2600, loss[loss=0.4265, simple_loss=0.4226, pruned_loss=0.2152, over 7254.00 frames. ], tot_loss[loss=0.5182, simple_loss=0.4993, pruned_loss=0.2687, over 1619749.52 frames. ], batch size: 16, lr: 4.71e-02, grad_scale: 16.0 +2023-02-05 18:25:59,371 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=2603.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:01,610 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.388e+02 4.352e+02 5.534e+02 7.344e+02 1.370e+03, threshold=1.107e+03, percent-clipped=3.0 +2023-02-05 18:26:05,385 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.01 vs. limit=5.0 +2023-02-05 18:26:06,868 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2615.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:26:15,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=2628.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:26:21,137 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.03 vs. limit=2.0 +2023-02-05 18:26:31,161 INFO [train.py:901] (3/4) Epoch 1, batch 2650, loss[loss=0.4979, simple_loss=0.503, pruned_loss=0.2465, over 8476.00 frames. ], tot_loss[loss=0.5142, simple_loss=0.4977, pruned_loss=0.2655, over 1617672.68 frames. ], batch size: 29, lr: 4.70e-02, grad_scale: 16.0 +2023-02-05 18:26:31,333 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3625, 1.1462, 4.2103, 2.2496, 3.9024, 3.5498, 3.5841, 3.6872], + device='cuda:3'), covar=tensor([0.0140, 0.3543, 0.0190, 0.1033, 0.0229, 0.0247, 0.0335, 0.0308], + device='cuda:3'), in_proj_covar=tensor([0.0072, 0.0210, 0.0089, 0.0117, 0.0102, 0.0095, 0.0107, 0.0115], + device='cuda:3'), out_proj_covar=tensor([4.3958e-05, 1.2911e-04, 5.7165e-05, 7.9091e-05, 5.8958e-05, 5.2637e-05, + 6.5615e-05, 6.8482e-05], device='cuda:3') +2023-02-05 18:27:03,817 INFO [train.py:901] (3/4) Epoch 1, batch 2700, loss[loss=0.5234, simple_loss=0.5027, pruned_loss=0.2721, over 8343.00 frames. ], tot_loss[loss=0.5096, simple_loss=0.4942, pruned_loss=0.2626, over 1618722.89 frames. ], batch size: 24, lr: 4.69e-02, grad_scale: 16.0 +2023-02-05 18:27:04,571 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=2702.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:27:05,219 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:27:08,314 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.214e+02 4.351e+02 5.311e+02 6.408e+02 1.471e+03, threshold=1.062e+03, percent-clipped=4.0 +2023-02-05 18:27:31,461 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5180, 1.6228, 1.1530, 1.8551, 1.5725, 1.5459, 1.3527, 1.4834], + device='cuda:3'), covar=tensor([0.1525, 0.1525, 0.2726, 0.0825, 0.2290, 0.1706, 0.3239, 0.1650], + device='cuda:3'), in_proj_covar=tensor([0.0111, 0.0105, 0.0145, 0.0087, 0.0126, 0.0106, 0.0153, 0.0111], + device='cuda:3'), out_proj_covar=tensor([7.9057e-05, 7.3684e-05, 9.7280e-05, 6.0446e-05, 9.0541e-05, 7.2948e-05, + 1.0652e-04, 7.4015e-05], device='cuda:3') +2023-02-05 18:27:32,088 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7568, 1.4946, 2.9518, 1.6669, 2.1490, 3.6152, 3.4082, 3.2476], + device='cuda:3'), covar=tensor([0.2688, 0.2998, 0.0377, 0.2592, 0.1440, 0.0219, 0.0257, 0.0375], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0216, 0.0115, 0.0195, 0.0168, 0.0094, 0.0092, 0.0120], + device='cuda:3'), out_proj_covar=tensor([1.4425e-04, 1.5460e-04, 8.7834e-05, 1.3309e-04, 1.3085e-04, 6.7281e-05, + 6.7995e-05, 8.2497e-05], device='cuda:3') +2023-02-05 18:27:37,287 INFO [train.py:901] (3/4) Epoch 1, batch 2750, loss[loss=0.4934, simple_loss=0.4769, pruned_loss=0.255, over 8086.00 frames. ], tot_loss[loss=0.5068, simple_loss=0.4927, pruned_loss=0.2605, over 1617502.09 frames. ], batch size: 21, lr: 4.68e-02, grad_scale: 16.0 +2023-02-05 18:28:11,556 INFO [train.py:901] (3/4) Epoch 1, batch 2800, loss[loss=0.5253, simple_loss=0.514, pruned_loss=0.2683, over 8679.00 frames. ], tot_loss[loss=0.5064, simple_loss=0.4929, pruned_loss=0.26, over 1613714.26 frames. ], batch size: 39, lr: 4.67e-02, grad_scale: 16.0 +2023-02-05 18:28:15,256 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.916e+02 4.898e+02 6.530e+02 2.276e+03, threshold=9.797e+02, percent-clipped=2.0 +2023-02-05 18:28:21,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=2817.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:28:28,487 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.73 vs. limit=5.0 +2023-02-05 18:28:38,536 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2842.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:28:44,024 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.97 vs. limit=2.0 +2023-02-05 18:28:44,180 INFO [train.py:901] (3/4) Epoch 1, batch 2850, loss[loss=0.515, simple_loss=0.5178, pruned_loss=0.2561, over 8352.00 frames. ], tot_loss[loss=0.5062, simple_loss=0.4932, pruned_loss=0.2597, over 1619738.62 frames. ], batch size: 24, lr: 4.66e-02, grad_scale: 16.0 +2023-02-05 18:29:18,789 INFO [train.py:901] (3/4) Epoch 1, batch 2900, loss[loss=0.5044, simple_loss=0.4964, pruned_loss=0.2562, over 8712.00 frames. ], tot_loss[loss=0.5067, simple_loss=0.4935, pruned_loss=0.26, over 1615680.85 frames. ], batch size: 39, lr: 4.65e-02, grad_scale: 16.0 +2023-02-05 18:29:22,676 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.417e+02 4.413e+02 5.664e+02 7.338e+02 1.737e+03, threshold=1.133e+03, percent-clipped=8.0 +2023-02-05 18:29:48,922 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 18:29:52,158 INFO [train.py:901] (3/4) Epoch 1, batch 2950, loss[loss=0.5386, simple_loss=0.5289, pruned_loss=0.2741, over 8244.00 frames. ], tot_loss[loss=0.5021, simple_loss=0.4904, pruned_loss=0.257, over 1615322.51 frames. ], batch size: 24, lr: 4.64e-02, grad_scale: 16.0 +2023-02-05 18:29:54,924 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=2955.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:30:25,905 INFO [train.py:901] (3/4) Epoch 1, batch 3000, loss[loss=0.508, simple_loss=0.498, pruned_loss=0.259, over 8338.00 frames. ], tot_loss[loss=0.5012, simple_loss=0.4902, pruned_loss=0.2561, over 1616735.87 frames. ], batch size: 25, lr: 4.63e-02, grad_scale: 16.0 +2023-02-05 18:30:25,905 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 18:30:40,786 INFO [train.py:935] (3/4) Epoch 1, validation: loss=0.4518, simple_loss=0.5106, pruned_loss=0.1966, over 944034.00 frames. +2023-02-05 18:30:40,787 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6178MB +2023-02-05 18:30:44,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.692e+02 4.264e+02 5.642e+02 7.781e+02 1.743e+03, threshold=1.128e+03, percent-clipped=6.0 +2023-02-05 18:31:07,322 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3037.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:13,911 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3047.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:31:16,515 INFO [train.py:901] (3/4) Epoch 1, batch 3050, loss[loss=0.4737, simple_loss=0.4838, pruned_loss=0.2318, over 8427.00 frames. ], tot_loss[loss=0.4963, simple_loss=0.4873, pruned_loss=0.2527, over 1613437.01 frames. ], batch size: 27, lr: 4.62e-02, grad_scale: 16.0 +2023-02-05 18:31:30,880 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3073.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:47,515 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3098.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:31:48,194 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4441, 0.8959, 2.6349, 1.4056, 1.6479, 1.4928, 0.8810, 2.7416], + device='cuda:3'), covar=tensor([0.0938, 0.0699, 0.0266, 0.0683, 0.0958, 0.0558, 0.0952, 0.0322], + device='cuda:3'), in_proj_covar=tensor([0.0095, 0.0080, 0.0068, 0.0090, 0.0079, 0.0079, 0.0100, 0.0073], + device='cuda:3'), out_proj_covar=tensor([6.8089e-05, 5.2254e-05, 4.5432e-05, 6.6716e-05, 5.4866e-05, 5.1492e-05, + 6.8975e-05, 4.7165e-05], device='cuda:3') +2023-02-05 18:31:49,294 INFO [train.py:901] (3/4) Epoch 1, batch 3100, loss[loss=0.4929, simple_loss=0.4809, pruned_loss=0.2524, over 7661.00 frames. ], tot_loss[loss=0.4963, simple_loss=0.4866, pruned_loss=0.253, over 1612463.03 frames. ], batch size: 19, lr: 4.61e-02, grad_scale: 16.0 +2023-02-05 18:31:53,107 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.570e+02 4.257e+02 6.045e+02 8.311e+02 2.838e+03, threshold=1.209e+03, percent-clipped=13.0 +2023-02-05 18:32:23,218 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 18:32:24,771 INFO [train.py:901] (3/4) Epoch 1, batch 3150, loss[loss=0.3752, simple_loss=0.3925, pruned_loss=0.179, over 7432.00 frames. ], tot_loss[loss=0.4929, simple_loss=0.4844, pruned_loss=0.2507, over 1609144.24 frames. ], batch size: 17, lr: 4.60e-02, grad_scale: 16.0 +2023-02-05 18:32:32,236 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3162.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:47,673 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:32:57,082 INFO [train.py:901] (3/4) Epoch 1, batch 3200, loss[loss=0.4077, simple_loss=0.4101, pruned_loss=0.2026, over 7420.00 frames. ], tot_loss[loss=0.4904, simple_loss=0.4829, pruned_loss=0.249, over 1610326.04 frames. ], batch size: 17, lr: 4.59e-02, grad_scale: 16.0 +2023-02-05 18:33:00,911 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 4.232e+02 5.266e+02 6.948e+02 2.778e+03, threshold=1.053e+03, percent-clipped=2.0 +2023-02-05 18:33:32,107 INFO [train.py:901] (3/4) Epoch 1, batch 3250, loss[loss=0.4871, simple_loss=0.4681, pruned_loss=0.253, over 7696.00 frames. ], tot_loss[loss=0.4905, simple_loss=0.4825, pruned_loss=0.2493, over 1607104.94 frames. ], batch size: 18, lr: 4.58e-02, grad_scale: 16.0 +2023-02-05 18:34:04,438 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3299.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:05,630 INFO [train.py:901] (3/4) Epoch 1, batch 3300, loss[loss=0.5583, simple_loss=0.5398, pruned_loss=0.2883, over 8328.00 frames. ], tot_loss[loss=0.4855, simple_loss=0.4799, pruned_loss=0.2455, over 1610314.07 frames. ], batch size: 25, lr: 4.57e-02, grad_scale: 16.0 +2023-02-05 18:34:05,848 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3301.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:34:08,973 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3306.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:34:09,426 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 4.334e+02 5.638e+02 7.160e+02 2.697e+03, threshold=1.128e+03, percent-clipped=10.0 +2023-02-05 18:34:39,416 INFO [train.py:901] (3/4) Epoch 1, batch 3350, loss[loss=0.4671, simple_loss=0.4582, pruned_loss=0.238, over 7934.00 frames. ], tot_loss[loss=0.4839, simple_loss=0.4795, pruned_loss=0.2442, over 1611936.14 frames. ], batch size: 20, lr: 4.56e-02, grad_scale: 16.0 +2023-02-05 18:35:01,953 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3381.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:14,980 INFO [train.py:901] (3/4) Epoch 1, batch 3400, loss[loss=0.5149, simple_loss=0.5083, pruned_loss=0.2608, over 8355.00 frames. ], tot_loss[loss=0.4807, simple_loss=0.4778, pruned_loss=0.2418, over 1611166.00 frames. ], batch size: 24, lr: 4.55e-02, grad_scale: 16.0 +2023-02-05 18:35:19,028 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.486e+02 3.960e+02 5.068e+02 6.311e+02 1.481e+03, threshold=1.014e+03, percent-clipped=3.0 +2023-02-05 18:35:23,821 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:26,545 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3418.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:35:36,490 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.18 vs. limit=5.0 +2023-02-05 18:35:43,706 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3443.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:35:48,746 INFO [train.py:901] (3/4) Epoch 1, batch 3450, loss[loss=0.4703, simple_loss=0.4873, pruned_loss=0.2267, over 8252.00 frames. ], tot_loss[loss=0.4775, simple_loss=0.4753, pruned_loss=0.2398, over 1613960.23 frames. ], batch size: 24, lr: 4.54e-02, grad_scale: 16.0 +2023-02-05 18:35:57,466 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0587, 0.9868, 3.0017, 1.4951, 2.6993, 2.4075, 2.6305, 2.5472], + device='cuda:3'), covar=tensor([0.0289, 0.3590, 0.0286, 0.1334, 0.0459, 0.0505, 0.0452, 0.0547], + device='cuda:3'), in_proj_covar=tensor([0.0084, 0.0254, 0.0110, 0.0146, 0.0127, 0.0127, 0.0124, 0.0141], + device='cuda:3'), out_proj_covar=tensor([5.2483e-05, 1.4952e-04, 7.1997e-05, 9.8125e-05, 7.6931e-05, 7.7708e-05, + 7.8837e-05, 9.0307e-05], device='cuda:3') +2023-02-05 18:35:59,195 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 18:36:05,372 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.14 vs. limit=2.0 +2023-02-05 18:36:21,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:36:24,212 INFO [train.py:901] (3/4) Epoch 1, batch 3500, loss[loss=0.4919, simple_loss=0.4975, pruned_loss=0.2432, over 8189.00 frames. ], tot_loss[loss=0.4783, simple_loss=0.4761, pruned_loss=0.2402, over 1610577.23 frames. ], batch size: 23, lr: 4.53e-02, grad_scale: 16.0 +2023-02-05 18:36:28,198 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.405e+02 5.773e+02 7.537e+02 2.537e+03, threshold=1.155e+03, percent-clipped=7.0 +2023-02-05 18:36:28,428 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8372, 2.0743, 1.7721, 2.7862, 1.4673, 1.4576, 1.6959, 2.0704], + device='cuda:3'), covar=tensor([0.1596, 0.1713, 0.1666, 0.0305, 0.2483, 0.2105, 0.2352, 0.1554], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0234, 0.0224, 0.0150, 0.0292, 0.0269, 0.0313, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-05 18:36:36,234 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 18:36:57,810 INFO [train.py:901] (3/4) Epoch 1, batch 3550, loss[loss=0.433, simple_loss=0.4401, pruned_loss=0.2129, over 8281.00 frames. ], tot_loss[loss=0.4757, simple_loss=0.4746, pruned_loss=0.2384, over 1607346.62 frames. ], batch size: 23, lr: 4.51e-02, grad_scale: 16.0 +2023-02-05 18:37:02,112 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3557.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:37:07,182 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3564.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:37:19,184 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3582.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:37:33,301 INFO [train.py:901] (3/4) Epoch 1, batch 3600, loss[loss=0.3964, simple_loss=0.4228, pruned_loss=0.185, over 7543.00 frames. ], tot_loss[loss=0.4834, simple_loss=0.4792, pruned_loss=0.2439, over 1603604.70 frames. ], batch size: 18, lr: 4.50e-02, grad_scale: 16.0 +2023-02-05 18:37:37,961 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.853e+02 4.660e+02 6.337e+02 8.772e+02 4.832e+03, threshold=1.267e+03, percent-clipped=11.0 +2023-02-05 18:37:54,512 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.12 vs. limit=5.0 +2023-02-05 18:38:06,596 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3650.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:38:07,050 INFO [train.py:901] (3/4) Epoch 1, batch 3650, loss[loss=0.5058, simple_loss=0.4666, pruned_loss=0.2725, over 7259.00 frames. ], tot_loss[loss=0.4788, simple_loss=0.4765, pruned_loss=0.2405, over 1606863.65 frames. ], batch size: 16, lr: 4.49e-02, grad_scale: 16.0 +2023-02-05 18:38:19,634 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3670.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:23,765 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6597, 1.5858, 2.1951, 1.6964, 1.6458, 2.2428, 0.7692, 1.2260], + device='cuda:3'), covar=tensor([0.0718, 0.0691, 0.0477, 0.0554, 0.0752, 0.0268, 0.1755, 0.0973], + device='cuda:3'), in_proj_covar=tensor([0.0128, 0.0125, 0.0108, 0.0112, 0.0126, 0.0095, 0.0161, 0.0126], + device='cuda:3'), out_proj_covar=tensor([9.2370e-05, 9.6245e-05, 7.7311e-05, 8.1404e-05, 9.4306e-05, 6.5028e-05, + 1.2332e-04, 1.0062e-04], device='cuda:3') +2023-02-05 18:38:31,953 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4851, 1.3170, 2.7870, 1.2494, 2.0346, 3.2257, 2.9703, 2.7078], + device='cuda:3'), covar=tensor([0.2028, 0.2463, 0.0386, 0.2674, 0.1042, 0.0181, 0.0244, 0.0501], + device='cuda:3'), in_proj_covar=tensor([0.0213, 0.0240, 0.0134, 0.0231, 0.0176, 0.0097, 0.0099, 0.0137], + device='cuda:3'), out_proj_covar=tensor([1.6267e-04, 1.7835e-04, 1.1150e-04, 1.6325e-04, 1.4670e-04, 7.7567e-05, + 8.5671e-05, 1.0787e-04], device='cuda:3') +2023-02-05 18:38:36,829 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3694.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:38:37,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3695.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:38:40,429 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 18:38:41,127 INFO [train.py:901] (3/4) Epoch 1, batch 3700, loss[loss=0.483, simple_loss=0.4798, pruned_loss=0.2431, over 8088.00 frames. ], tot_loss[loss=0.4806, simple_loss=0.4779, pruned_loss=0.2416, over 1602276.50 frames. ], batch size: 21, lr: 4.48e-02, grad_scale: 16.0 +2023-02-05 18:38:45,134 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.178e+02 4.586e+02 6.278e+02 1.050e+03 3.437e+03, threshold=1.256e+03, percent-clipped=14.0 +2023-02-05 18:38:46,738 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.3200, 0.8058, 0.9220, 0.1005, 0.5847, 0.7859, 0.0838, 0.9193], + device='cuda:3'), covar=tensor([0.0716, 0.0502, 0.0372, 0.0966, 0.0526, 0.0612, 0.0952, 0.0369], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0077, 0.0069, 0.0096, 0.0074, 0.0081, 0.0101, 0.0074], + device='cuda:3'), out_proj_covar=tensor([6.7548e-05, 5.0918e-05, 4.7601e-05, 7.3518e-05, 5.4900e-05, 5.6439e-05, + 7.2397e-05, 4.8028e-05], device='cuda:3') +2023-02-05 18:39:17,454 INFO [train.py:901] (3/4) Epoch 1, batch 3750, loss[loss=0.4552, simple_loss=0.4448, pruned_loss=0.2328, over 7432.00 frames. ], tot_loss[loss=0.4767, simple_loss=0.4752, pruned_loss=0.2391, over 1600809.10 frames. ], batch size: 17, lr: 4.47e-02, grad_scale: 16.0 +2023-02-05 18:39:18,342 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=3752.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:39:27,126 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=3765.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:39:35,214 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=3777.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:39:51,681 INFO [train.py:901] (3/4) Epoch 1, batch 3800, loss[loss=0.4524, simple_loss=0.4562, pruned_loss=0.2243, over 7809.00 frames. ], tot_loss[loss=0.4752, simple_loss=0.4744, pruned_loss=0.238, over 1604076.45 frames. ], batch size: 20, lr: 4.46e-02, grad_scale: 16.0 +2023-02-05 18:39:55,874 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.457e+02 5.389e+02 6.979e+02 9.091e+02 1.609e+03, threshold=1.396e+03, percent-clipped=5.0 +2023-02-05 18:40:12,368 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 18:40:16,371 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.06 vs. limit=2.0 +2023-02-05 18:40:27,875 INFO [train.py:901] (3/4) Epoch 1, batch 3850, loss[loss=0.4851, simple_loss=0.4695, pruned_loss=0.2504, over 7648.00 frames. ], tot_loss[loss=0.4745, simple_loss=0.4739, pruned_loss=0.2375, over 1608124.55 frames. ], batch size: 19, lr: 4.45e-02, grad_scale: 16.0 +2023-02-05 18:40:46,564 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 18:41:01,009 INFO [train.py:901] (3/4) Epoch 1, batch 3900, loss[loss=0.4231, simple_loss=0.4424, pruned_loss=0.2019, over 7926.00 frames. ], tot_loss[loss=0.4717, simple_loss=0.4716, pruned_loss=0.2359, over 1606511.92 frames. ], batch size: 20, lr: 4.44e-02, grad_scale: 16.0 +2023-02-05 18:41:05,008 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.102e+02 5.552e+02 7.100e+02 9.321e+02 1.906e+03, threshold=1.420e+03, percent-clipped=2.0 +2023-02-05 18:41:05,745 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=3908.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:41:17,811 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 18:41:23,676 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 18:41:25,613 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.82 vs. limit=5.0 +2023-02-05 18:41:29,965 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=3944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:41:35,349 INFO [train.py:901] (3/4) Epoch 1, batch 3950, loss[loss=0.4516, simple_loss=0.4625, pruned_loss=0.2203, over 8664.00 frames. ], tot_loss[loss=0.4691, simple_loss=0.4699, pruned_loss=0.2341, over 1604118.43 frames. ], batch size: 34, lr: 4.43e-02, grad_scale: 16.0 +2023-02-05 18:42:10,921 INFO [train.py:901] (3/4) Epoch 1, batch 4000, loss[loss=0.5744, simple_loss=0.5315, pruned_loss=0.3086, over 7270.00 frames. ], tot_loss[loss=0.4693, simple_loss=0.4704, pruned_loss=0.2341, over 1606912.34 frames. ], batch size: 73, lr: 4.42e-02, grad_scale: 8.0 +2023-02-05 18:42:15,524 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.262e+02 4.572e+02 5.687e+02 7.371e+02 1.820e+03, threshold=1.137e+03, percent-clipped=4.0 +2023-02-05 18:42:24,572 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4021.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:25,219 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2858, 2.7686, 2.0520, 2.1679, 2.0988, 2.2991, 1.8875, 2.6389], + device='cuda:3'), covar=tensor([0.1656, 0.1247, 0.2139, 0.1052, 0.2230, 0.1409, 0.3212, 0.1415], + device='cuda:3'), in_proj_covar=tensor([0.0218, 0.0157, 0.0254, 0.0155, 0.0224, 0.0187, 0.0257, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 18:42:25,843 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4023.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:36,380 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4038.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:42:42,710 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4046.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:42:46,095 INFO [train.py:901] (3/4) Epoch 1, batch 4050, loss[loss=0.4018, simple_loss=0.4305, pruned_loss=0.1865, over 8353.00 frames. ], tot_loss[loss=0.4712, simple_loss=0.4722, pruned_loss=0.235, over 1613746.64 frames. ], batch size: 24, lr: 4.41e-02, grad_scale: 8.0 +2023-02-05 18:43:17,413 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.94 vs. limit=2.0 +2023-02-05 18:43:22,347 INFO [train.py:901] (3/4) Epoch 1, batch 4100, loss[loss=0.405, simple_loss=0.4341, pruned_loss=0.188, over 8339.00 frames. ], tot_loss[loss=0.4699, simple_loss=0.4719, pruned_loss=0.234, over 1616676.81 frames. ], batch size: 25, lr: 4.40e-02, grad_scale: 8.0 +2023-02-05 18:43:26,892 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.479e+02 4.889e+02 6.474e+02 8.616e+02 2.054e+03, threshold=1.295e+03, percent-clipped=5.0 +2023-02-05 18:43:56,550 INFO [train.py:901] (3/4) Epoch 1, batch 4150, loss[loss=0.4579, simple_loss=0.4725, pruned_loss=0.2216, over 8103.00 frames. ], tot_loss[loss=0.4679, simple_loss=0.4709, pruned_loss=0.2324, over 1613922.48 frames. ], batch size: 23, lr: 4.39e-02, grad_scale: 8.0 +2023-02-05 18:43:58,161 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4153.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 18:44:02,268 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4159.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:44:33,523 INFO [train.py:901] (3/4) Epoch 1, batch 4200, loss[loss=0.5005, simple_loss=0.4998, pruned_loss=0.2506, over 8493.00 frames. ], tot_loss[loss=0.4642, simple_loss=0.4688, pruned_loss=0.2299, over 1616705.34 frames. ], batch size: 28, lr: 4.38e-02, grad_scale: 8.0 +2023-02-05 18:44:38,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 4.057e+02 5.109e+02 6.409e+02 1.525e+03, threshold=1.022e+03, percent-clipped=2.0 +2023-02-05 18:44:44,312 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 18:45:04,964 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 18:45:07,067 INFO [train.py:901] (3/4) Epoch 1, batch 4250, loss[loss=0.5085, simple_loss=0.4898, pruned_loss=0.2636, over 8325.00 frames. ], tot_loss[loss=0.4673, simple_loss=0.4703, pruned_loss=0.2321, over 1613724.42 frames. ], batch size: 25, lr: 4.36e-02, grad_scale: 8.0 +2023-02-05 18:45:26,610 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4279.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:33,325 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4288.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:42,864 INFO [train.py:901] (3/4) Epoch 1, batch 4300, loss[loss=0.4008, simple_loss=0.4249, pruned_loss=0.1883, over 7804.00 frames. ], tot_loss[loss=0.4631, simple_loss=0.4674, pruned_loss=0.2294, over 1615967.44 frames. ], batch size: 20, lr: 4.35e-02, grad_scale: 8.0 +2023-02-05 18:45:43,733 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1187, 1.1677, 1.8024, 0.4781, 1.2362, 1.0674, 0.2185, 1.4673], + device='cuda:3'), covar=tensor([0.0649, 0.0447, 0.0232, 0.1269, 0.0755, 0.0627, 0.0958, 0.0438], + device='cuda:3'), in_proj_covar=tensor([0.0111, 0.0085, 0.0072, 0.0111, 0.0081, 0.0098, 0.0106, 0.0081], + device='cuda:3'), out_proj_covar=tensor([7.8775e-05, 5.7395e-05, 5.0760e-05, 8.8134e-05, 6.3493e-05, 6.9266e-05, + 7.8165e-05, 5.6099e-05], device='cuda:3') +2023-02-05 18:45:45,736 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4304.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:45:47,031 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=4306.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:45:48,885 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.647e+02 4.666e+02 6.207e+02 8.078e+02 1.600e+03, threshold=1.241e+03, percent-clipped=6.0 +2023-02-05 18:46:03,145 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 18:46:18,293 INFO [train.py:901] (3/4) Epoch 1, batch 4350, loss[loss=0.4551, simple_loss=0.4397, pruned_loss=0.2352, over 7724.00 frames. ], tot_loss[loss=0.4617, simple_loss=0.466, pruned_loss=0.2287, over 1609611.51 frames. ], batch size: 18, lr: 4.34e-02, grad_scale: 8.0 +2023-02-05 18:46:37,364 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 18:46:38,879 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6405, 1.6203, 1.8618, 1.5654, 1.1890, 1.9319, 0.3957, 1.0336], + device='cuda:3'), covar=tensor([0.0837, 0.0542, 0.0464, 0.0484, 0.0690, 0.0400, 0.1796, 0.0948], + device='cuda:3'), in_proj_covar=tensor([0.0149, 0.0116, 0.0109, 0.0121, 0.0125, 0.0092, 0.0164, 0.0139], + device='cuda:3'), out_proj_covar=tensor([1.1179e-04, 9.4067e-05, 8.2953e-05, 8.9767e-05, 9.7724e-05, 6.7258e-05, + 1.2693e-04, 1.1050e-04], device='cuda:3') +2023-02-05 18:46:46,376 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 18:46:52,945 INFO [train.py:901] (3/4) Epoch 1, batch 4400, loss[loss=0.4757, simple_loss=0.482, pruned_loss=0.2347, over 8667.00 frames. ], tot_loss[loss=0.4592, simple_loss=0.4645, pruned_loss=0.227, over 1610456.93 frames. ], batch size: 34, lr: 4.33e-02, grad_scale: 8.0 +2023-02-05 18:46:54,525 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:46:57,924 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 4.338e+02 5.789e+02 7.262e+02 1.136e+03, threshold=1.158e+03, percent-clipped=0.0 +2023-02-05 18:46:58,935 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4409.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:47:18,598 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4434.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 18:47:21,208 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 18:47:22,739 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1680, 1.4458, 1.6320, 1.6274, 1.3910, 1.1210, 1.4276, 1.4584], + device='cuda:3'), covar=tensor([0.2512, 0.1040, 0.0670, 0.0691, 0.0877, 0.1508, 0.1121, 0.0924], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0107, 0.0078, 0.0088, 0.0112, 0.0125, 0.0131, 0.0127], + device='cuda:3'), out_proj_covar=tensor([1.0147e-04, 6.1722e-05, 4.2985e-05, 5.0462e-05, 6.2690e-05, 6.9145e-05, + 7.3832e-05, 6.8300e-05], device='cuda:3') +2023-02-05 18:47:29,979 INFO [train.py:901] (3/4) Epoch 1, batch 4450, loss[loss=0.3802, simple_loss=0.3963, pruned_loss=0.182, over 7792.00 frames. ], tot_loss[loss=0.4567, simple_loss=0.4626, pruned_loss=0.2254, over 1609605.10 frames. ], batch size: 19, lr: 4.32e-02, grad_scale: 8.0 +2023-02-05 18:48:04,127 INFO [train.py:901] (3/4) Epoch 1, batch 4500, loss[loss=0.4292, simple_loss=0.4578, pruned_loss=0.2002, over 8487.00 frames. ], tot_loss[loss=0.4565, simple_loss=0.4624, pruned_loss=0.2253, over 1608119.81 frames. ], batch size: 28, lr: 4.31e-02, grad_scale: 8.0 +2023-02-05 18:48:05,594 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4503.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:48:09,058 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.251e+02 4.383e+02 5.863e+02 8.313e+02 2.632e+03, threshold=1.173e+03, percent-clipped=9.0 +2023-02-05 18:48:15,316 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 18:48:41,806 INFO [train.py:901] (3/4) Epoch 1, batch 4550, loss[loss=0.431, simple_loss=0.4616, pruned_loss=0.2003, over 8132.00 frames. ], tot_loss[loss=0.4517, simple_loss=0.4597, pruned_loss=0.2218, over 1612268.19 frames. ], batch size: 22, lr: 4.30e-02, grad_scale: 8.0 +2023-02-05 18:49:16,702 INFO [train.py:901] (3/4) Epoch 1, batch 4600, loss[loss=0.5922, simple_loss=0.5499, pruned_loss=0.3172, over 7013.00 frames. ], tot_loss[loss=0.4541, simple_loss=0.4615, pruned_loss=0.2233, over 1613326.42 frames. ], batch size: 71, lr: 4.29e-02, grad_scale: 8.0 +2023-02-05 18:49:17,597 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2421, 1.2883, 3.7493, 1.8567, 1.9269, 4.4822, 3.6133, 4.2359], + device='cuda:3'), covar=tensor([0.1548, 0.2179, 0.0240, 0.2095, 0.1116, 0.0177, 0.0494, 0.0269], + device='cuda:3'), in_proj_covar=tensor([0.0222, 0.0246, 0.0134, 0.0237, 0.0177, 0.0103, 0.0102, 0.0142], + device='cuda:3'), out_proj_covar=tensor([1.7746e-04, 1.9206e-04, 1.1864e-04, 1.7895e-04, 1.5860e-04, 8.5862e-05, + 9.3240e-05, 1.2081e-04], device='cuda:3') +2023-02-05 18:49:21,483 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.983e+02 5.037e+02 6.922e+02 1.236e+03, threshold=1.007e+03, percent-clipped=2.0 +2023-02-05 18:49:28,486 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4618.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:37,336 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6534, 1.0653, 3.0744, 1.3886, 2.2791, 3.2938, 3.0573, 2.9676], + device='cuda:3'), covar=tensor([0.1693, 0.2402, 0.0268, 0.2412, 0.0833, 0.0260, 0.0342, 0.0411], + device='cuda:3'), in_proj_covar=tensor([0.0226, 0.0257, 0.0139, 0.0245, 0.0182, 0.0107, 0.0105, 0.0147], + device='cuda:3'), out_proj_covar=tensor([1.8115e-04, 1.9991e-04, 1.2458e-04, 1.8525e-04, 1.6297e-04, 8.9528e-05, + 9.6566e-05, 1.2484e-04], device='cuda:3') +2023-02-05 18:49:51,614 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=4650.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:49:52,218 INFO [train.py:901] (3/4) Epoch 1, batch 4650, loss[loss=0.4842, simple_loss=0.4869, pruned_loss=0.2407, over 8338.00 frames. ], tot_loss[loss=0.4532, simple_loss=0.4611, pruned_loss=0.2226, over 1617142.11 frames. ], batch size: 26, lr: 4.28e-02, grad_scale: 8.0 +2023-02-05 18:49:59,116 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4659.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:16,189 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4684.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:50:20,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 18:50:24,978 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4384, 1.1985, 4.2914, 2.1621, 3.9350, 3.6203, 3.6401, 3.7549], + device='cuda:3'), covar=tensor([0.0136, 0.3393, 0.0207, 0.0908, 0.0252, 0.0233, 0.0287, 0.0282], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0300, 0.0139, 0.0171, 0.0153, 0.0153, 0.0146, 0.0164], + device='cuda:3'), out_proj_covar=tensor([6.8514e-05, 1.7204e-04, 8.7556e-05, 1.1607e-04, 9.0235e-05, 9.4652e-05, + 9.1922e-05, 1.0867e-04], device='cuda:3') +2023-02-05 18:50:27,580 INFO [train.py:901] (3/4) Epoch 1, batch 4700, loss[loss=0.5101, simple_loss=0.5145, pruned_loss=0.2528, over 8583.00 frames. ], tot_loss[loss=0.4513, simple_loss=0.4597, pruned_loss=0.2214, over 1613778.32 frames. ], batch size: 34, lr: 4.27e-02, grad_scale: 8.0 +2023-02-05 18:50:27,811 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3746, 1.7367, 1.2179, 1.8996, 1.1913, 1.3140, 1.3730, 1.9100], + device='cuda:3'), covar=tensor([0.1238, 0.0818, 0.1726, 0.0612, 0.1375, 0.1283, 0.1586, 0.0673], + device='cuda:3'), in_proj_covar=tensor([0.0260, 0.0184, 0.0290, 0.0191, 0.0263, 0.0222, 0.0292, 0.0231], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 18:50:32,372 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.254e+02 4.576e+02 5.443e+02 6.674e+02 1.320e+03, threshold=1.089e+03, percent-clipped=4.0 +2023-02-05 18:51:00,649 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2962, 1.4070, 1.2978, 1.1343, 1.7126, 1.1987, 0.9821, 1.7004], + device='cuda:3'), covar=tensor([0.1792, 0.2293, 0.2401, 0.2372, 0.1135, 0.2503, 0.1753, 0.1174], + device='cuda:3'), in_proj_covar=tensor([0.0268, 0.0283, 0.0268, 0.0272, 0.0281, 0.0256, 0.0262, 0.0258], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 18:51:01,882 INFO [train.py:901] (3/4) Epoch 1, batch 4750, loss[loss=0.506, simple_loss=0.4794, pruned_loss=0.2663, over 6983.00 frames. ], tot_loss[loss=0.4488, simple_loss=0.4573, pruned_loss=0.2201, over 1611730.71 frames. ], batch size: 72, lr: 4.26e-02, grad_scale: 8.0 +2023-02-05 18:51:12,296 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=4765.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:51:21,697 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 18:51:23,849 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 18:51:37,814 INFO [train.py:901] (3/4) Epoch 1, batch 4800, loss[loss=0.4584, simple_loss=0.4727, pruned_loss=0.222, over 8311.00 frames. ], tot_loss[loss=0.4472, simple_loss=0.4561, pruned_loss=0.2191, over 1610027.99 frames. ], batch size: 25, lr: 4.25e-02, grad_scale: 8.0 +2023-02-05 18:51:42,622 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.690e+02 4.367e+02 5.327e+02 7.244e+02 1.939e+03, threshold=1.065e+03, percent-clipped=6.0 +2023-02-05 18:52:11,418 INFO [train.py:901] (3/4) Epoch 1, batch 4850, loss[loss=0.3715, simple_loss=0.3933, pruned_loss=0.1748, over 7527.00 frames. ], tot_loss[loss=0.446, simple_loss=0.4548, pruned_loss=0.2186, over 1613187.64 frames. ], batch size: 18, lr: 4.24e-02, grad_scale: 8.0 +2023-02-05 18:52:13,500 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 18:52:21,868 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7745, 2.0975, 1.2495, 2.1323, 1.8783, 1.4905, 1.5341, 2.2072], + device='cuda:3'), covar=tensor([0.1322, 0.0750, 0.1833, 0.0679, 0.1303, 0.1419, 0.2166, 0.0846], + device='cuda:3'), in_proj_covar=tensor([0.0265, 0.0186, 0.0299, 0.0202, 0.0276, 0.0227, 0.0298, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 18:52:27,480 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=4874.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:47,404 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=4899.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:52:48,546 INFO [train.py:901] (3/4) Epoch 1, batch 4900, loss[loss=0.4258, simple_loss=0.4261, pruned_loss=0.2128, over 6811.00 frames. ], tot_loss[loss=0.4436, simple_loss=0.4532, pruned_loss=0.217, over 1611133.20 frames. ], batch size: 15, lr: 4.23e-02, grad_scale: 8.0 +2023-02-05 18:52:53,371 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.332e+02 4.394e+02 5.447e+02 6.722e+02 1.310e+03, threshold=1.089e+03, percent-clipped=5.0 +2023-02-05 18:53:05,360 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1999, 1.7535, 1.7964, 1.5331, 1.1822, 1.8844, 0.3765, 0.7652], + device='cuda:3'), covar=tensor([0.1014, 0.0573, 0.0387, 0.0390, 0.0780, 0.0411, 0.1418, 0.0843], + device='cuda:3'), in_proj_covar=tensor([0.0143, 0.0111, 0.0103, 0.0125, 0.0123, 0.0093, 0.0166, 0.0138], + device='cuda:3'), out_proj_covar=tensor([1.1032e-04, 9.3566e-05, 8.1136e-05, 9.3620e-05, 1.0139e-04, 7.2355e-05, + 1.2957e-04, 1.1106e-04], device='cuda:3') +2023-02-05 18:53:08,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5192, 2.1428, 2.2529, 2.5243, 1.9505, 1.2716, 1.7603, 1.9830], + device='cuda:3'), covar=tensor([0.2030, 0.0808, 0.0531, 0.0338, 0.0706, 0.1096, 0.0887, 0.0933], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0118, 0.0085, 0.0097, 0.0127, 0.0138, 0.0143, 0.0150], + device='cuda:3'), out_proj_covar=tensor([1.1315e-04, 6.8332e-05, 4.8496e-05, 5.4958e-05, 7.0348e-05, 7.9129e-05, + 8.0833e-05, 8.2202e-05], device='cuda:3') +2023-02-05 18:53:22,696 INFO [train.py:901] (3/4) Epoch 1, batch 4950, loss[loss=0.6146, simple_loss=0.5586, pruned_loss=0.3353, over 8658.00 frames. ], tot_loss[loss=0.4437, simple_loss=0.4533, pruned_loss=0.2171, over 1613159.43 frames. ], batch size: 34, lr: 4.21e-02, grad_scale: 8.0 +2023-02-05 18:53:59,103 INFO [train.py:901] (3/4) Epoch 1, batch 5000, loss[loss=0.518, simple_loss=0.5004, pruned_loss=0.2678, over 8559.00 frames. ], tot_loss[loss=0.4451, simple_loss=0.4542, pruned_loss=0.218, over 1608068.41 frames. ], batch size: 49, lr: 4.20e-02, grad_scale: 8.0 +2023-02-05 18:54:04,640 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.658e+02 4.358e+02 5.438e+02 7.182e+02 1.797e+03, threshold=1.088e+03, percent-clipped=3.0 +2023-02-05 18:54:13,641 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=5021.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:30,644 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=5046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 18:54:33,891 INFO [train.py:901] (3/4) Epoch 1, batch 5050, loss[loss=0.3744, simple_loss=0.4032, pruned_loss=0.1728, over 7659.00 frames. ], tot_loss[loss=0.4422, simple_loss=0.4525, pruned_loss=0.216, over 1609364.37 frames. ], batch size: 19, lr: 4.19e-02, grad_scale: 8.0 +2023-02-05 18:54:50,671 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 18:55:08,927 INFO [train.py:901] (3/4) Epoch 1, batch 5100, loss[loss=0.4948, simple_loss=0.4774, pruned_loss=0.2561, over 8032.00 frames. ], tot_loss[loss=0.4438, simple_loss=0.4531, pruned_loss=0.2172, over 1608441.31 frames. ], batch size: 22, lr: 4.18e-02, grad_scale: 8.0 +2023-02-05 18:55:13,606 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.507e+02 4.431e+02 5.257e+02 6.582e+02 1.311e+03, threshold=1.051e+03, percent-clipped=2.0 +2023-02-05 18:55:45,846 INFO [train.py:901] (3/4) Epoch 1, batch 5150, loss[loss=0.4459, simple_loss=0.4596, pruned_loss=0.2161, over 8645.00 frames. ], tot_loss[loss=0.4422, simple_loss=0.4521, pruned_loss=0.2162, over 1608889.44 frames. ], batch size: 39, lr: 4.17e-02, grad_scale: 8.0 +2023-02-05 18:56:06,777 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.87 vs. limit=2.0 +2023-02-05 18:56:19,015 INFO [train.py:901] (3/4) Epoch 1, batch 5200, loss[loss=0.4951, simple_loss=0.4811, pruned_loss=0.2546, over 8646.00 frames. ], tot_loss[loss=0.4442, simple_loss=0.4534, pruned_loss=0.2175, over 1615358.50 frames. ], batch size: 34, lr: 4.16e-02, grad_scale: 8.0 +2023-02-05 18:56:23,577 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.039e+02 3.937e+02 5.264e+02 6.479e+02 1.558e+03, threshold=1.053e+03, percent-clipped=7.0 +2023-02-05 18:56:25,138 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7091, 1.1557, 4.3840, 2.1882, 4.0944, 3.7211, 3.8879, 3.7898], + device='cuda:3'), covar=tensor([0.0108, 0.3279, 0.0219, 0.1063, 0.0268, 0.0254, 0.0263, 0.0329], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0300, 0.0141, 0.0180, 0.0158, 0.0158, 0.0144, 0.0165], + device='cuda:3'), out_proj_covar=tensor([6.6723e-05, 1.7103e-04, 8.9388e-05, 1.1870e-04, 9.2658e-05, 9.6908e-05, + 8.9898e-05, 1.0832e-04], device='cuda:3') +2023-02-05 18:56:48,375 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.55 vs. limit=5.0 +2023-02-05 18:56:51,651 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 18:56:55,106 INFO [train.py:901] (3/4) Epoch 1, batch 5250, loss[loss=0.4401, simple_loss=0.46, pruned_loss=0.2101, over 8104.00 frames. ], tot_loss[loss=0.4432, simple_loss=0.4539, pruned_loss=0.2162, over 1622100.54 frames. ], batch size: 23, lr: 4.15e-02, grad_scale: 8.0 +2023-02-05 18:57:11,488 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4946, 1.1599, 2.9247, 1.3697, 1.9509, 3.3406, 2.9002, 2.7075], + device='cuda:3'), covar=tensor([0.1959, 0.2257, 0.0401, 0.2340, 0.1076, 0.0186, 0.0331, 0.0488], + device='cuda:3'), in_proj_covar=tensor([0.0242, 0.0256, 0.0145, 0.0250, 0.0189, 0.0112, 0.0115, 0.0164], + device='cuda:3'), out_proj_covar=tensor([1.9788e-04, 2.0499e-04, 1.3707e-04, 1.9676e-04, 1.7231e-04, 9.8135e-05, + 1.0682e-04, 1.4046e-04], device='cuda:3') +2023-02-05 18:57:28,846 INFO [train.py:901] (3/4) Epoch 1, batch 5300, loss[loss=0.4909, simple_loss=0.4889, pruned_loss=0.2464, over 8351.00 frames. ], tot_loss[loss=0.4416, simple_loss=0.4527, pruned_loss=0.2152, over 1621733.81 frames. ], batch size: 24, lr: 4.14e-02, grad_scale: 8.0 +2023-02-05 18:57:33,641 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.076e+02 4.278e+02 4.955e+02 6.641e+02 1.586e+03, threshold=9.909e+02, percent-clipped=4.0 +2023-02-05 18:57:33,918 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9475, 2.4979, 4.7003, 1.3549, 2.9938, 2.6974, 1.7594, 2.4101], + device='cuda:3'), covar=tensor([0.1071, 0.1336, 0.0199, 0.1342, 0.1118, 0.1573, 0.1158, 0.1252], + device='cuda:3'), in_proj_covar=tensor([0.0242, 0.0241, 0.0200, 0.0264, 0.0300, 0.0308, 0.0260, 0.0286], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 18:57:54,946 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6994, 2.0793, 1.6437, 1.5153, 2.5700, 2.0002, 2.2855, 3.1361], + device='cuda:3'), covar=tensor([0.1815, 0.2418, 0.2443, 0.2680, 0.1635, 0.2195, 0.1829, 0.1162], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0282, 0.0277, 0.0279, 0.0277, 0.0260, 0.0264, 0.0257], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 18:58:04,343 INFO [train.py:901] (3/4) Epoch 1, batch 5350, loss[loss=0.4199, simple_loss=0.4328, pruned_loss=0.2035, over 8285.00 frames. ], tot_loss[loss=0.4397, simple_loss=0.4515, pruned_loss=0.214, over 1622130.21 frames. ], batch size: 23, lr: 4.13e-02, grad_scale: 8.0 +2023-02-05 18:58:15,393 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.07 vs. limit=2.0 +2023-02-05 18:58:39,824 INFO [train.py:901] (3/4) Epoch 1, batch 5400, loss[loss=0.4044, simple_loss=0.4285, pruned_loss=0.1901, over 8235.00 frames. ], tot_loss[loss=0.4375, simple_loss=0.4499, pruned_loss=0.2126, over 1623070.45 frames. ], batch size: 22, lr: 4.12e-02, grad_scale: 8.0 +2023-02-05 18:58:44,298 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.977e+02 4.515e+02 5.788e+02 7.308e+02 1.362e+03, threshold=1.158e+03, percent-clipped=5.0 +2023-02-05 18:59:13,400 INFO [train.py:901] (3/4) Epoch 1, batch 5450, loss[loss=0.4583, simple_loss=0.4698, pruned_loss=0.2234, over 8351.00 frames. ], tot_loss[loss=0.4349, simple_loss=0.4483, pruned_loss=0.2108, over 1623818.58 frames. ], batch size: 25, lr: 4.11e-02, grad_scale: 8.0 +2023-02-05 18:59:41,788 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 18:59:49,950 INFO [train.py:901] (3/4) Epoch 1, batch 5500, loss[loss=0.4353, simple_loss=0.4463, pruned_loss=0.2121, over 8131.00 frames. ], tot_loss[loss=0.4375, simple_loss=0.4501, pruned_loss=0.2124, over 1625463.56 frames. ], batch size: 22, lr: 4.10e-02, grad_scale: 8.0 +2023-02-05 18:59:54,516 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.397e+02 4.451e+02 5.295e+02 6.340e+02 1.239e+03, threshold=1.059e+03, percent-clipped=2.0 +2023-02-05 19:00:21,030 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6983, 2.2827, 4.6471, 2.9061, 4.3996, 4.0700, 4.1877, 4.3241], + device='cuda:3'), covar=tensor([0.0124, 0.2226, 0.0140, 0.0776, 0.0201, 0.0173, 0.0167, 0.0159], + device='cuda:3'), in_proj_covar=tensor([0.0113, 0.0306, 0.0149, 0.0184, 0.0169, 0.0165, 0.0149, 0.0168], + device='cuda:3'), out_proj_covar=tensor([7.0860e-05, 1.7314e-04, 9.3464e-05, 1.2142e-04, 9.8619e-05, 1.0081e-04, + 9.2900e-05, 1.0932e-04], device='cuda:3') +2023-02-05 19:00:23,629 INFO [train.py:901] (3/4) Epoch 1, batch 5550, loss[loss=0.3931, simple_loss=0.4077, pruned_loss=0.1893, over 7778.00 frames. ], tot_loss[loss=0.4375, simple_loss=0.45, pruned_loss=0.2126, over 1621533.18 frames. ], batch size: 19, lr: 4.09e-02, grad_scale: 8.0 +2023-02-05 19:00:28,155 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.05 vs. limit=2.0 +2023-02-05 19:01:00,924 INFO [train.py:901] (3/4) Epoch 1, batch 5600, loss[loss=0.4484, simple_loss=0.4586, pruned_loss=0.2191, over 8322.00 frames. ], tot_loss[loss=0.4367, simple_loss=0.4489, pruned_loss=0.2122, over 1617817.30 frames. ], batch size: 25, lr: 4.08e-02, grad_scale: 8.0 +2023-02-05 19:01:05,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 3.916e+02 5.301e+02 6.582e+02 1.340e+03, threshold=1.060e+03, percent-clipped=3.0 +2023-02-05 19:01:26,253 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-05 19:01:34,543 INFO [train.py:901] (3/4) Epoch 1, batch 5650, loss[loss=0.4044, simple_loss=0.4024, pruned_loss=0.2032, over 7531.00 frames. ], tot_loss[loss=0.4371, simple_loss=0.4491, pruned_loss=0.2125, over 1617585.97 frames. ], batch size: 18, lr: 4.07e-02, grad_scale: 8.0 +2023-02-05 19:01:45,700 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 19:01:45,832 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5668.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:02:06,765 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7827, 2.3290, 1.4236, 2.2206, 1.9285, 1.6829, 1.6582, 2.4785], + device='cuda:3'), covar=tensor([0.1616, 0.0736, 0.1712, 0.0889, 0.1460, 0.1319, 0.2307, 0.0763], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0197, 0.0316, 0.0229, 0.0286, 0.0240, 0.0305, 0.0246], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:02:09,329 INFO [train.py:901] (3/4) Epoch 1, batch 5700, loss[loss=0.4543, simple_loss=0.4666, pruned_loss=0.221, over 8741.00 frames. ], tot_loss[loss=0.439, simple_loss=0.4497, pruned_loss=0.2142, over 1612390.36 frames. ], batch size: 40, lr: 4.06e-02, grad_scale: 8.0 +2023-02-05 19:02:15,264 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.140e+02 4.740e+02 5.744e+02 8.008e+02 1.790e+03, threshold=1.149e+03, percent-clipped=10.0 +2023-02-05 19:02:28,016 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-05 19:02:44,479 INFO [train.py:901] (3/4) Epoch 1, batch 5750, loss[loss=0.4178, simple_loss=0.4464, pruned_loss=0.1946, over 8139.00 frames. ], tot_loss[loss=0.435, simple_loss=0.447, pruned_loss=0.2115, over 1610890.00 frames. ], batch size: 22, lr: 4.05e-02, grad_scale: 8.0 +2023-02-05 19:02:51,405 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 19:02:59,846 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5773.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:03:16,849 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2948, 1.6606, 1.3480, 2.0139, 1.4283, 1.2006, 1.2279, 2.0112], + device='cuda:3'), covar=tensor([0.1255, 0.0680, 0.1484, 0.0586, 0.1394, 0.1291, 0.1643, 0.0738], + device='cuda:3'), in_proj_covar=tensor([0.0286, 0.0207, 0.0322, 0.0236, 0.0290, 0.0244, 0.0307, 0.0253], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:03:19,619 INFO [train.py:901] (3/4) Epoch 1, batch 5800, loss[loss=0.4427, simple_loss=0.4582, pruned_loss=0.2136, over 8324.00 frames. ], tot_loss[loss=0.4331, simple_loss=0.4462, pruned_loss=0.21, over 1609196.37 frames. ], batch size: 25, lr: 4.04e-02, grad_scale: 8.0 +2023-02-05 19:03:24,530 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.671e+02 4.595e+02 5.667e+02 1.405e+03, threshold=9.190e+02, percent-clipped=2.0 +2023-02-05 19:03:48,471 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 19:03:57,248 INFO [train.py:901] (3/4) Epoch 1, batch 5850, loss[loss=0.4272, simple_loss=0.4418, pruned_loss=0.2063, over 8080.00 frames. ], tot_loss[loss=0.4326, simple_loss=0.4461, pruned_loss=0.2095, over 1609670.55 frames. ], batch size: 21, lr: 4.03e-02, grad_scale: 8.0 +2023-02-05 19:04:15,207 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=5876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:04:21,567 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0921, 1.2567, 2.0160, 0.3793, 1.6034, 1.1244, 0.4392, 1.4242], + device='cuda:3'), covar=tensor([0.0541, 0.0272, 0.0189, 0.0998, 0.0621, 0.0659, 0.0922, 0.0315], + device='cuda:3'), in_proj_covar=tensor([0.0138, 0.0105, 0.0084, 0.0140, 0.0103, 0.0148, 0.0144, 0.0110], + device='cuda:3'), out_proj_covar=tensor([1.0000e-04, 7.5911e-05, 6.4379e-05, 1.1092e-04, 8.4542e-05, 1.1129e-04, + 1.1165e-04, 7.9108e-05], device='cuda:3') +2023-02-05 19:04:32,488 INFO [train.py:901] (3/4) Epoch 1, batch 5900, loss[loss=0.4201, simple_loss=0.4376, pruned_loss=0.2014, over 8292.00 frames. ], tot_loss[loss=0.4302, simple_loss=0.4443, pruned_loss=0.2081, over 1609159.36 frames. ], batch size: 23, lr: 4.02e-02, grad_scale: 8.0 +2023-02-05 19:04:37,230 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 3.095e+02 4.155e+02 5.559e+02 6.668e+02 2.372e+03, threshold=1.112e+03, percent-clipped=6.0 +2023-02-05 19:05:09,348 INFO [train.py:901] (3/4) Epoch 1, batch 5950, loss[loss=0.4437, simple_loss=0.4347, pruned_loss=0.2263, over 7524.00 frames. ], tot_loss[loss=0.4284, simple_loss=0.4432, pruned_loss=0.2068, over 1612280.16 frames. ], batch size: 18, lr: 4.01e-02, grad_scale: 8.0 +2023-02-05 19:05:13,688 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6391, 1.8462, 2.7939, 2.8603, 2.2292, 1.5125, 2.0386, 1.8560], + device='cuda:3'), covar=tensor([0.1693, 0.0983, 0.0265, 0.0286, 0.0550, 0.0751, 0.0576, 0.1030], + device='cuda:3'), in_proj_covar=tensor([0.0270, 0.0172, 0.0119, 0.0139, 0.0178, 0.0184, 0.0190, 0.0211], + device='cuda:3'), out_proj_covar=tensor([1.6108e-04, 1.0591e-04, 7.2145e-05, 8.2285e-05, 1.0167e-04, 1.0933e-04, + 1.0994e-04, 1.2088e-04], device='cuda:3') +2023-02-05 19:05:44,548 INFO [train.py:901] (3/4) Epoch 1, batch 6000, loss[loss=0.3785, simple_loss=0.3995, pruned_loss=0.1787, over 7428.00 frames. ], tot_loss[loss=0.4287, simple_loss=0.4432, pruned_loss=0.2071, over 1614178.52 frames. ], batch size: 17, lr: 4.00e-02, grad_scale: 16.0 +2023-02-05 19:05:44,548 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 19:06:02,003 INFO [train.py:935] (3/4) Epoch 1, validation: loss=0.3351, simple_loss=0.4011, pruned_loss=0.1346, over 944034.00 frames. +2023-02-05 19:06:02,004 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6514MB +2023-02-05 19:06:06,793 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.323e+02 3.694e+02 4.999e+02 6.330e+02 1.596e+03, threshold=9.998e+02, percent-clipped=5.0 +2023-02-05 19:06:06,994 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:06:09,469 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6012.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:06:28,692 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-05 19:06:35,737 INFO [train.py:901] (3/4) Epoch 1, batch 6050, loss[loss=0.5255, simple_loss=0.5106, pruned_loss=0.2702, over 8339.00 frames. ], tot_loss[loss=0.4351, simple_loss=0.4475, pruned_loss=0.2113, over 1612275.06 frames. ], batch size: 26, lr: 3.99e-02, grad_scale: 8.0 +2023-02-05 19:06:42,864 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:12,047 INFO [train.py:901] (3/4) Epoch 1, batch 6100, loss[loss=0.4014, simple_loss=0.4319, pruned_loss=0.1855, over 8245.00 frames. ], tot_loss[loss=0.4324, simple_loss=0.4461, pruned_loss=0.2093, over 1613133.52 frames. ], batch size: 24, lr: 3.98e-02, grad_scale: 8.0 +2023-02-05 19:07:17,503 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.508e+02 4.942e+02 6.048e+02 7.564e+02 1.774e+03, threshold=1.210e+03, percent-clipped=15.0 +2023-02-05 19:07:23,143 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6117.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:07:29,001 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 19:07:29,786 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6127.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:07:45,976 INFO [train.py:901] (3/4) Epoch 1, batch 6150, loss[loss=0.3728, simple_loss=0.4196, pruned_loss=0.163, over 8456.00 frames. ], tot_loss[loss=0.4309, simple_loss=0.4452, pruned_loss=0.2083, over 1618112.80 frames. ], batch size: 27, lr: 3.97e-02, grad_scale: 8.0 +2023-02-05 19:07:47,408 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6153.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:12,846 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6188.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:14,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2117, 1.2041, 1.1965, 1.8544, 0.8377, 0.9913, 1.0658, 1.1170], + device='cuda:3'), covar=tensor([0.1338, 0.1565, 0.1675, 0.0452, 0.2050, 0.2429, 0.1925, 0.1284], + device='cuda:3'), in_proj_covar=tensor([0.0299, 0.0314, 0.0295, 0.0192, 0.0339, 0.0336, 0.0390, 0.0280], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 19:08:22,934 INFO [train.py:901] (3/4) Epoch 1, batch 6200, loss[loss=0.4474, simple_loss=0.4584, pruned_loss=0.2182, over 8741.00 frames. ], tot_loss[loss=0.4298, simple_loss=0.4439, pruned_loss=0.2079, over 1611148.22 frames. ], batch size: 49, lr: 3.96e-02, grad_scale: 8.0 +2023-02-05 19:08:28,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.743e+02 4.155e+02 5.130e+02 7.106e+02 1.864e+03, threshold=1.026e+03, percent-clipped=2.0 +2023-02-05 19:08:36,408 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:37,190 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:42,653 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6229.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:44,652 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6232.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:08:57,426 INFO [train.py:901] (3/4) Epoch 1, batch 6250, loss[loss=0.4511, simple_loss=0.46, pruned_loss=0.2211, over 8476.00 frames. ], tot_loss[loss=0.4277, simple_loss=0.4434, pruned_loss=0.206, over 1612732.00 frames. ], batch size: 29, lr: 3.95e-02, grad_scale: 8.0 +2023-02-05 19:09:10,719 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-05 19:09:15,330 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-05 19:09:20,019 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:09:32,703 INFO [train.py:901] (3/4) Epoch 1, batch 6300, loss[loss=0.4242, simple_loss=0.4198, pruned_loss=0.2143, over 7545.00 frames. ], tot_loss[loss=0.4258, simple_loss=0.4426, pruned_loss=0.2045, over 1614579.96 frames. ], batch size: 18, lr: 3.94e-02, grad_scale: 8.0 +2023-02-05 19:09:38,778 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.783e+02 4.352e+02 5.159e+02 6.362e+02 1.735e+03, threshold=1.032e+03, percent-clipped=4.0 +2023-02-05 19:09:56,806 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6335.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:07,346 INFO [train.py:901] (3/4) Epoch 1, batch 6350, loss[loss=0.4245, simple_loss=0.4358, pruned_loss=0.2066, over 8133.00 frames. ], tot_loss[loss=0.4254, simple_loss=0.442, pruned_loss=0.2044, over 1616424.71 frames. ], batch size: 22, lr: 3.93e-02, grad_scale: 8.0 +2023-02-05 19:10:07,538 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.7687, 1.2785, 1.3851, 1.0808, 0.9212, 1.3073, 0.1205, 0.8535], + device='cuda:3'), covar=tensor([0.0693, 0.0564, 0.0294, 0.0424, 0.0586, 0.0403, 0.1352, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0142, 0.0114, 0.0101, 0.0132, 0.0122, 0.0085, 0.0176, 0.0147], + device='cuda:3'), out_proj_covar=tensor([1.1777e-04, 1.0304e-04, 8.2847e-05, 1.0529e-04, 1.0752e-04, 7.0384e-05, + 1.4574e-04, 1.2583e-04], device='cuda:3') +2023-02-05 19:10:08,107 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6352.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:28,447 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 19:10:28,929 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6383.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:10:38,776 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9051, 4.1676, 3.3737, 1.5667, 3.4363, 3.3616, 3.5788, 2.8071], + device='cuda:3'), covar=tensor([0.0914, 0.0427, 0.0769, 0.3304, 0.0453, 0.0512, 0.0851, 0.0559], + device='cuda:3'), in_proj_covar=tensor([0.0247, 0.0179, 0.0207, 0.0268, 0.0164, 0.0128, 0.0192, 0.0120], + device='cuda:3'), out_proj_covar=tensor([1.8633e-04, 1.2836e-04, 1.3568e-04, 1.7520e-04, 1.0601e-04, 9.2311e-05, + 1.3439e-04, 8.7719e-05], device='cuda:3') +2023-02-05 19:10:40,805 INFO [train.py:901] (3/4) Epoch 1, batch 6400, loss[loss=0.4602, simple_loss=0.4748, pruned_loss=0.2228, over 8473.00 frames. ], tot_loss[loss=0.4251, simple_loss=0.4411, pruned_loss=0.2045, over 1612745.88 frames. ], batch size: 25, lr: 3.92e-02, grad_scale: 8.0 +2023-02-05 19:10:43,627 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6405.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:10:45,785 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6408.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:10:46,252 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.785e+02 4.017e+02 4.991e+02 6.603e+02 1.156e+03, threshold=9.981e+02, percent-clipped=3.0 +2023-02-05 19:10:59,209 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.24 vs. limit=2.0 +2023-02-05 19:11:16,786 INFO [train.py:901] (3/4) Epoch 1, batch 6450, loss[loss=0.4759, simple_loss=0.4697, pruned_loss=0.241, over 8363.00 frames. ], tot_loss[loss=0.4247, simple_loss=0.4412, pruned_loss=0.2041, over 1614013.89 frames. ], batch size: 24, lr: 3.91e-02, grad_scale: 8.0 +2023-02-05 19:11:27,836 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6467.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:33,199 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5174, 1.3040, 3.5331, 1.6668, 3.0421, 2.8393, 3.0949, 3.0381], + device='cuda:3'), covar=tensor([0.0264, 0.3048, 0.0246, 0.1293, 0.0531, 0.0379, 0.0281, 0.0417], + device='cuda:3'), in_proj_covar=tensor([0.0136, 0.0334, 0.0165, 0.0205, 0.0201, 0.0184, 0.0158, 0.0191], + device='cuda:3'), out_proj_covar=tensor([8.4483e-05, 1.8558e-04, 1.0315e-04, 1.3213e-04, 1.1496e-04, 1.1048e-04, + 9.6604e-05, 1.2205e-04], device='cuda:3') +2023-02-05 19:11:35,915 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7609, 2.0980, 1.0357, 2.0604, 2.0203, 1.2921, 1.4487, 2.4654], + device='cuda:3'), covar=tensor([0.1375, 0.0660, 0.1857, 0.0739, 0.0924, 0.1302, 0.1620, 0.0685], + device='cuda:3'), in_proj_covar=tensor([0.0321, 0.0217, 0.0331, 0.0259, 0.0303, 0.0270, 0.0325, 0.0274], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-05 19:11:36,486 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:39,741 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6485.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:41,926 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:47,728 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:11:50,293 INFO [train.py:901] (3/4) Epoch 1, batch 6500, loss[loss=0.5012, simple_loss=0.4898, pruned_loss=0.2563, over 8097.00 frames. ], tot_loss[loss=0.4244, simple_loss=0.4414, pruned_loss=0.2037, over 1618832.37 frames. ], batch size: 23, lr: 3.90e-02, grad_scale: 8.0 +2023-02-05 19:11:55,445 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 4.204e+02 5.270e+02 6.161e+02 1.286e+03, threshold=1.054e+03, percent-clipped=6.0 +2023-02-05 19:11:58,510 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6513.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:03,316 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6520.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:11,141 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:24,034 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-05 19:12:25,068 INFO [train.py:901] (3/4) Epoch 1, batch 6550, loss[loss=0.5103, simple_loss=0.4974, pruned_loss=0.2615, over 7106.00 frames. ], tot_loss[loss=0.4248, simple_loss=0.442, pruned_loss=0.2039, over 1617032.27 frames. ], batch size: 72, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:12:29,525 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.27 vs. limit=5.0 +2023-02-05 19:12:35,940 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:37,933 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 19:12:41,475 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6573.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:53,798 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:12:57,649 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 19:13:00,396 INFO [train.py:901] (3/4) Epoch 1, batch 6600, loss[loss=0.454, simple_loss=0.4811, pruned_loss=0.2135, over 8357.00 frames. ], tot_loss[loss=0.4221, simple_loss=0.4408, pruned_loss=0.2018, over 1622166.34 frames. ], batch size: 24, lr: 3.89e-02, grad_scale: 8.0 +2023-02-05 19:13:05,685 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.999e+02 4.035e+02 4.985e+02 6.404e+02 1.328e+03, threshold=9.970e+02, percent-clipped=3.0 +2023-02-05 19:13:07,911 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:10,625 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:18,648 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6628.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,572 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:31,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6647.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:13:34,158 INFO [train.py:901] (3/4) Epoch 1, batch 6650, loss[loss=0.4425, simple_loss=0.4652, pruned_loss=0.2099, over 8700.00 frames. ], tot_loss[loss=0.4192, simple_loss=0.4388, pruned_loss=0.1998, over 1622276.70 frames. ], batch size: 34, lr: 3.88e-02, grad_scale: 8.0 +2023-02-05 19:13:42,280 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7539, 2.6397, 1.2768, 2.7970, 2.2505, 1.8688, 1.7966, 2.8189], + device='cuda:3'), covar=tensor([0.1916, 0.0889, 0.1899, 0.0983, 0.1351, 0.1486, 0.2274, 0.1139], + device='cuda:3'), in_proj_covar=tensor([0.0321, 0.0220, 0.0340, 0.0261, 0.0308, 0.0280, 0.0325, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-05 19:13:56,261 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:01,307 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:09,932 INFO [train.py:901] (3/4) Epoch 1, batch 6700, loss[loss=0.4447, simple_loss=0.4722, pruned_loss=0.2087, over 8193.00 frames. ], tot_loss[loss=0.4187, simple_loss=0.4379, pruned_loss=0.1997, over 1619187.82 frames. ], batch size: 23, lr: 3.87e-02, grad_scale: 8.0 +2023-02-05 19:14:15,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.351e+02 4.140e+02 4.960e+02 6.260e+02 1.494e+03, threshold=9.921e+02, percent-clipped=3.0 +2023-02-05 19:14:25,037 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6723.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:26,598 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-05 19:14:38,639 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6743.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:42,155 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:14:44,014 INFO [train.py:901] (3/4) Epoch 1, batch 6750, loss[loss=0.4639, simple_loss=0.4818, pruned_loss=0.223, over 8571.00 frames. ], tot_loss[loss=0.4181, simple_loss=0.4371, pruned_loss=0.1995, over 1616303.21 frames. ], batch size: 31, lr: 3.86e-02, grad_scale: 8.0 +2023-02-05 19:15:00,945 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:14,393 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 19:15:19,964 INFO [train.py:901] (3/4) Epoch 1, batch 6800, loss[loss=0.3769, simple_loss=0.4172, pruned_loss=0.1684, over 8101.00 frames. ], tot_loss[loss=0.4158, simple_loss=0.4354, pruned_loss=0.1981, over 1609622.21 frames. ], batch size: 23, lr: 3.85e-02, grad_scale: 8.0 +2023-02-05 19:15:20,167 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6801.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:25,324 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 4.226e+02 5.434e+02 7.341e+02 1.725e+03, threshold=1.087e+03, percent-clipped=4.0 +2023-02-05 19:15:35,603 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6824.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:39,017 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6829.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:15:54,377 INFO [train.py:901] (3/4) Epoch 1, batch 6850, loss[loss=0.4085, simple_loss=0.4348, pruned_loss=0.1911, over 7800.00 frames. ], tot_loss[loss=0.416, simple_loss=0.4358, pruned_loss=0.1981, over 1611633.79 frames. ], batch size: 20, lr: 3.84e-02, grad_scale: 8.0 +2023-02-05 19:15:54,609 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6451, 1.7402, 2.4964, 1.5222, 1.5196, 2.1416, 0.6712, 1.6609], + device='cuda:3'), covar=tensor([0.0938, 0.0607, 0.0514, 0.0525, 0.0863, 0.0823, 0.1633, 0.0877], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0106, 0.0098, 0.0129, 0.0117, 0.0083, 0.0162, 0.0132], + device='cuda:3'), out_proj_covar=tensor([1.1180e-04, 9.6890e-05, 8.1399e-05, 1.0407e-04, 1.0467e-04, 6.9712e-05, + 1.3780e-04, 1.1536e-04], device='cuda:3') +2023-02-05 19:16:04,831 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 19:16:06,406 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:23,441 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6893.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:25,429 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6896.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:16:29,306 INFO [train.py:901] (3/4) Epoch 1, batch 6900, loss[loss=0.4047, simple_loss=0.4108, pruned_loss=0.1993, over 7228.00 frames. ], tot_loss[loss=0.4179, simple_loss=0.4371, pruned_loss=0.1993, over 1611732.40 frames. ], batch size: 16, lr: 3.83e-02, grad_scale: 8.0 +2023-02-05 19:16:31,395 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6903.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:35,806 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.796e+02 4.754e+02 6.076e+02 1.448e+03, threshold=9.507e+02, percent-clipped=2.0 +2023-02-05 19:16:48,742 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=6927.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:49,426 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:54,879 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6936.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:16:56,904 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6939.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,413 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:00,456 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6944.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:05,067 INFO [train.py:901] (3/4) Epoch 1, batch 6950, loss[loss=0.3718, simple_loss=0.4236, pruned_loss=0.16, over 8324.00 frames. ], tot_loss[loss=0.4195, simple_loss=0.4384, pruned_loss=0.2003, over 1606561.03 frames. ], batch size: 25, lr: 3.82e-02, grad_scale: 8.0 +2023-02-05 19:17:11,197 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 19:17:12,138 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6961.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:17,885 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=6969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:32,945 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=6991.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:38,576 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=6999.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:17:39,834 INFO [train.py:901] (3/4) Epoch 1, batch 7000, loss[loss=0.36, simple_loss=0.3982, pruned_loss=0.1609, over 7915.00 frames. ], tot_loss[loss=0.4185, simple_loss=0.4383, pruned_loss=0.1993, over 1609769.37 frames. ], batch size: 20, lr: 3.81e-02, grad_scale: 8.0 +2023-02-05 19:17:45,246 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.380e+02 4.090e+02 4.918e+02 6.048e+02 1.151e+03, threshold=9.836e+02, percent-clipped=6.0 +2023-02-05 19:17:57,728 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:08,481 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-05 19:18:16,024 INFO [train.py:901] (3/4) Epoch 1, batch 7050, loss[loss=0.4197, simple_loss=0.4446, pruned_loss=0.1974, over 8131.00 frames. ], tot_loss[loss=0.4185, simple_loss=0.4381, pruned_loss=0.1994, over 1608104.36 frames. ], batch size: 22, lr: 3.80e-02, grad_scale: 8.0 +2023-02-05 19:18:26,095 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7066.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:18:50,234 INFO [train.py:901] (3/4) Epoch 1, batch 7100, loss[loss=0.4291, simple_loss=0.4418, pruned_loss=0.2082, over 8505.00 frames. ], tot_loss[loss=0.4188, simple_loss=0.438, pruned_loss=0.1998, over 1607837.45 frames. ], batch size: 28, lr: 3.79e-02, grad_scale: 8.0 +2023-02-05 19:18:53,868 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:18:55,758 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.791e+02 4.613e+02 6.150e+02 1.722e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 19:19:08,993 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3776, 1.6372, 1.4113, 1.2898, 2.0419, 1.5193, 1.9018, 2.1571], + device='cuda:3'), covar=tensor([0.1413, 0.2172, 0.2658, 0.2352, 0.1256, 0.2175, 0.1383, 0.1041], + device='cuda:3'), in_proj_covar=tensor([0.0273, 0.0292, 0.0297, 0.0280, 0.0273, 0.0261, 0.0258, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:19:25,907 INFO [train.py:901] (3/4) Epoch 1, batch 7150, loss[loss=0.4696, simple_loss=0.4532, pruned_loss=0.243, over 7290.00 frames. ], tot_loss[loss=0.4169, simple_loss=0.437, pruned_loss=0.1984, over 1608745.24 frames. ], batch size: 16, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:19:26,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.8230, 1.2119, 1.8226, 0.9861, 0.9525, 1.5706, 0.1464, 0.8247], + device='cuda:3'), covar=tensor([0.0683, 0.0463, 0.0271, 0.0438, 0.0531, 0.0277, 0.1336, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0107, 0.0099, 0.0129, 0.0116, 0.0077, 0.0160, 0.0131], + device='cuda:3'), out_proj_covar=tensor([1.1116e-04, 9.7726e-05, 8.3365e-05, 1.0552e-04, 1.0572e-04, 6.8292e-05, + 1.3667e-04, 1.1411e-04], device='cuda:3') +2023-02-05 19:19:46,604 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7181.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:56,059 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7195.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,481 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7200.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:19:59,951 INFO [train.py:901] (3/4) Epoch 1, batch 7200, loss[loss=0.4302, simple_loss=0.4386, pruned_loss=0.2109, over 7983.00 frames. ], tot_loss[loss=0.4178, simple_loss=0.4381, pruned_loss=0.1988, over 1614154.73 frames. ], batch size: 21, lr: 3.78e-02, grad_scale: 8.0 +2023-02-05 19:20:05,332 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.325e+02 4.231e+02 5.262e+02 7.053e+02 1.685e+03, threshold=1.052e+03, percent-clipped=7.0 +2023-02-05 19:20:13,050 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:16,293 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:22,240 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5776, 2.0608, 3.5017, 0.9407, 2.4054, 1.9043, 1.6564, 2.0271], + device='cuda:3'), covar=tensor([0.1065, 0.1218, 0.0286, 0.1676, 0.1117, 0.1501, 0.0900, 0.1346], + device='cuda:3'), in_proj_covar=tensor([0.0296, 0.0298, 0.0280, 0.0325, 0.0374, 0.0358, 0.0304, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:20:25,926 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7240.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:20:33,008 INFO [train.py:901] (3/4) Epoch 1, batch 7250, loss[loss=0.4088, simple_loss=0.4413, pruned_loss=0.1881, over 8344.00 frames. ], tot_loss[loss=0.4161, simple_loss=0.4367, pruned_loss=0.1977, over 1617356.65 frames. ], batch size: 26, lr: 3.77e-02, grad_scale: 8.0 +2023-02-05 19:20:49,156 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:20:58,000 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9377, 2.7550, 1.3173, 2.5357, 2.4511, 1.8569, 2.0377, 2.7744], + device='cuda:3'), covar=tensor([0.1842, 0.0824, 0.1644, 0.1004, 0.1184, 0.1241, 0.1682, 0.1121], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0236, 0.0351, 0.0282, 0.0325, 0.0279, 0.0340, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 19:21:04,006 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:09,003 INFO [train.py:901] (3/4) Epoch 1, batch 7300, loss[loss=0.4676, simple_loss=0.4748, pruned_loss=0.2302, over 8589.00 frames. ], tot_loss[loss=0.416, simple_loss=0.4366, pruned_loss=0.1977, over 1616224.71 frames. ], batch size: 34, lr: 3.76e-02, grad_scale: 8.0 +2023-02-05 19:21:14,312 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.413e+02 4.263e+02 5.448e+02 6.514e+02 1.215e+03, threshold=1.090e+03, percent-clipped=2.0 +2023-02-05 19:21:29,429 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9787, 2.0392, 1.7780, 2.8474, 1.3104, 1.3151, 2.1633, 2.0606], + device='cuda:3'), covar=tensor([0.1171, 0.1623, 0.1454, 0.0299, 0.2062, 0.2005, 0.1652, 0.1160], + device='cuda:3'), in_proj_covar=tensor([0.0306, 0.0325, 0.0308, 0.0199, 0.0347, 0.0340, 0.0387, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0004, 0.0003, 0.0002, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 19:21:42,634 INFO [train.py:901] (3/4) Epoch 1, batch 7350, loss[loss=0.414, simple_loss=0.4396, pruned_loss=0.1942, over 8603.00 frames. ], tot_loss[loss=0.4141, simple_loss=0.4353, pruned_loss=0.1965, over 1615605.54 frames. ], batch size: 49, lr: 3.75e-02, grad_scale: 8.0 +2023-02-05 19:21:45,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7355.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:21:50,108 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7362.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:21:56,032 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 19:22:08,451 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7386.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:09,131 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7387.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:18,191 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 19:22:18,967 INFO [train.py:901] (3/4) Epoch 1, batch 7400, loss[loss=0.4148, simple_loss=0.417, pruned_loss=0.2063, over 7793.00 frames. ], tot_loss[loss=0.4128, simple_loss=0.4343, pruned_loss=0.1956, over 1614668.84 frames. ], batch size: 19, lr: 3.74e-02, grad_scale: 8.0 +2023-02-05 19:22:24,409 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.824e+02 4.270e+02 5.603e+02 6.704e+02 2.452e+03, threshold=1.121e+03, percent-clipped=4.0 +2023-02-05 19:22:25,140 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7410.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:22:35,066 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:22:52,555 INFO [train.py:901] (3/4) Epoch 1, batch 7450, loss[loss=0.3993, simple_loss=0.4294, pruned_loss=0.1845, over 8605.00 frames. ], tot_loss[loss=0.4115, simple_loss=0.4335, pruned_loss=0.1948, over 1613957.27 frames. ], batch size: 34, lr: 3.73e-02, grad_scale: 8.0 +2023-02-05 19:22:56,050 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 19:23:27,520 INFO [train.py:901] (3/4) Epoch 1, batch 7500, loss[loss=0.3861, simple_loss=0.4179, pruned_loss=0.1772, over 8284.00 frames. ], tot_loss[loss=0.4111, simple_loss=0.4328, pruned_loss=0.1946, over 1613640.33 frames. ], batch size: 23, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:23:34,187 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.398e+02 4.060e+02 5.044e+02 6.934e+02 1.457e+03, threshold=1.009e+03, percent-clipped=3.0 +2023-02-05 19:23:45,047 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7525.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:23:45,156 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7525.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:23:51,344 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-05 19:24:02,231 INFO [train.py:901] (3/4) Epoch 1, batch 7550, loss[loss=0.4338, simple_loss=0.46, pruned_loss=0.2038, over 8472.00 frames. ], tot_loss[loss=0.4102, simple_loss=0.4316, pruned_loss=0.1944, over 1610775.79 frames. ], batch size: 25, lr: 3.72e-02, grad_scale: 8.0 +2023-02-05 19:24:36,286 INFO [train.py:901] (3/4) Epoch 1, batch 7600, loss[loss=0.3767, simple_loss=0.4165, pruned_loss=0.1684, over 8509.00 frames. ], tot_loss[loss=0.412, simple_loss=0.4329, pruned_loss=0.1956, over 1613037.76 frames. ], batch size: 26, lr: 3.71e-02, grad_scale: 8.0 +2023-02-05 19:24:41,731 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.765e+02 4.361e+02 5.460e+02 6.853e+02 1.164e+03, threshold=1.092e+03, percent-clipped=2.0 +2023-02-05 19:24:43,944 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7611.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:25:03,345 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7636.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:25:03,853 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7637.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:05,950 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7640.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,275 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:07,350 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7642.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:13,188 INFO [train.py:901] (3/4) Epoch 1, batch 7650, loss[loss=0.3858, simple_loss=0.4045, pruned_loss=0.1836, over 7417.00 frames. ], tot_loss[loss=0.4113, simple_loss=0.4329, pruned_loss=0.1948, over 1613120.06 frames. ], batch size: 17, lr: 3.70e-02, grad_scale: 8.0 +2023-02-05 19:25:23,919 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7667.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:25:30,053 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.30 vs. limit=2.0 +2023-02-05 19:25:46,145 INFO [train.py:901] (3/4) Epoch 1, batch 7700, loss[loss=0.3944, simple_loss=0.4247, pruned_loss=0.182, over 8487.00 frames. ], tot_loss[loss=0.412, simple_loss=0.4327, pruned_loss=0.1957, over 1610710.63 frames. ], batch size: 28, lr: 3.69e-02, grad_scale: 8.0 +2023-02-05 19:25:51,297 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.517e+02 4.083e+02 4.742e+02 6.161e+02 2.101e+03, threshold=9.483e+02, percent-clipped=6.0 +2023-02-05 19:26:07,598 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 19:26:21,649 INFO [train.py:901] (3/4) Epoch 1, batch 7750, loss[loss=0.463, simple_loss=0.466, pruned_loss=0.23, over 8728.00 frames. ], tot_loss[loss=0.4112, simple_loss=0.4325, pruned_loss=0.1949, over 1613990.37 frames. ], batch size: 34, lr: 3.68e-02, grad_scale: 8.0 +2023-02-05 19:26:23,165 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7752.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:29,122 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:34,419 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7769.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:26:42,672 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7781.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:26:56,375 INFO [train.py:901] (3/4) Epoch 1, batch 7800, loss[loss=0.3993, simple_loss=0.4165, pruned_loss=0.1911, over 7974.00 frames. ], tot_loss[loss=0.4094, simple_loss=0.4318, pruned_loss=0.1935, over 1615915.19 frames. ], batch size: 21, lr: 3.67e-02, grad_scale: 8.0 +2023-02-05 19:26:59,822 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7806.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 19:27:01,643 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.720e+02 4.585e+02 5.523e+02 1.290e+03, threshold=9.170e+02, percent-clipped=3.0 +2023-02-05 19:27:09,239 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=7820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:29,703 INFO [train.py:901] (3/4) Epoch 1, batch 7850, loss[loss=0.3691, simple_loss=0.4164, pruned_loss=0.1609, over 8087.00 frames. ], tot_loss[loss=0.409, simple_loss=0.431, pruned_loss=0.1935, over 1611756.27 frames. ], batch size: 21, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:27:44,076 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3173, 2.1037, 3.2772, 3.1386, 2.6664, 1.8484, 1.9183, 2.3612], + device='cuda:3'), covar=tensor([0.1252, 0.0958, 0.0189, 0.0249, 0.0481, 0.0541, 0.0617, 0.0726], + device='cuda:3'), in_proj_covar=tensor([0.0376, 0.0271, 0.0171, 0.0201, 0.0267, 0.0260, 0.0272, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:27:52,030 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=7884.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:27:59,878 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=7896.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:03,017 INFO [train.py:901] (3/4) Epoch 1, batch 7900, loss[loss=0.3724, simple_loss=0.4017, pruned_loss=0.1715, over 8472.00 frames. ], tot_loss[loss=0.4063, simple_loss=0.429, pruned_loss=0.1919, over 1614169.27 frames. ], batch size: 25, lr: 3.66e-02, grad_scale: 8.0 +2023-02-05 19:28:08,431 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.732e+02 4.923e+02 6.190e+02 1.863e+03, threshold=9.845e+02, percent-clipped=5.0 +2023-02-05 19:28:16,376 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=7921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:28:35,804 INFO [train.py:901] (3/4) Epoch 1, batch 7950, loss[loss=0.3425, simple_loss=0.3772, pruned_loss=0.1539, over 7551.00 frames. ], tot_loss[loss=0.4054, simple_loss=0.4286, pruned_loss=0.1911, over 1613412.83 frames. ], batch size: 18, lr: 3.65e-02, grad_scale: 8.0 +2023-02-05 19:28:48,237 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-05 19:28:49,472 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.02 vs. limit=2.0 +2023-02-05 19:28:59,118 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=7986.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:10,051 INFO [train.py:901] (3/4) Epoch 1, batch 8000, loss[loss=0.5878, simple_loss=0.5425, pruned_loss=0.3165, over 7273.00 frames. ], tot_loss[loss=0.4065, simple_loss=0.4293, pruned_loss=0.1919, over 1613254.04 frames. ], batch size: 71, lr: 3.64e-02, grad_scale: 8.0 +2023-02-05 19:29:15,105 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:15,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.650e+02 3.959e+02 4.934e+02 6.403e+02 1.426e+03, threshold=9.868e+02, percent-clipped=4.0 +2023-02-05 19:29:20,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4657, 1.3525, 1.9801, 1.6266, 1.3771, 1.7444, 0.7920, 1.4797], + device='cuda:3'), covar=tensor([0.0526, 0.0532, 0.0173, 0.0270, 0.0447, 0.0209, 0.1227, 0.0488], + device='cuda:3'), in_proj_covar=tensor([0.0136, 0.0111, 0.0093, 0.0134, 0.0116, 0.0081, 0.0163, 0.0132], + device='cuda:3'), out_proj_covar=tensor([1.1495e-04, 1.0514e-04, 8.1513e-05, 1.1284e-04, 1.0738e-04, 7.1609e-05, + 1.4203e-04, 1.1730e-04], device='cuda:3') +2023-02-05 19:29:31,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8033.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:29:43,013 INFO [train.py:901] (3/4) Epoch 1, batch 8050, loss[loss=0.3641, simple_loss=0.3929, pruned_loss=0.1676, over 7436.00 frames. ], tot_loss[loss=0.4063, simple_loss=0.4287, pruned_loss=0.1919, over 1597148.99 frames. ], batch size: 17, lr: 3.63e-02, grad_scale: 16.0 +2023-02-05 19:30:16,737 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 19:30:20,858 INFO [train.py:901] (3/4) Epoch 2, batch 0, loss[loss=0.4977, simple_loss=0.4681, pruned_loss=0.2637, over 7792.00 frames. ], tot_loss[loss=0.4977, simple_loss=0.4681, pruned_loss=0.2637, over 7792.00 frames. ], batch size: 19, lr: 3.56e-02, grad_scale: 8.0 +2023-02-05 19:30:20,858 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 19:30:32,395 INFO [train.py:935] (3/4) Epoch 2, validation: loss=0.3107, simple_loss=0.3861, pruned_loss=0.1176, over 944034.00 frames. +2023-02-05 19:30:32,396 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6536MB +2023-02-05 19:30:44,061 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8101.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:46,614 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 19:30:46,680 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:30:49,933 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 3.846e+02 4.676e+02 6.027e+02 1.450e+03, threshold=9.352e+02, percent-clipped=5.0 +2023-02-05 19:31:06,754 INFO [train.py:901] (3/4) Epoch 2, batch 50, loss[loss=0.4538, simple_loss=0.4714, pruned_loss=0.2181, over 8329.00 frames. ], tot_loss[loss=0.4055, simple_loss=0.4291, pruned_loss=0.1909, over 363162.79 frames. ], batch size: 25, lr: 3.55e-02, grad_scale: 8.0 +2023-02-05 19:31:11,120 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8140.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:20,781 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 19:31:20,999 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0888, 1.3183, 2.2919, 0.7550, 1.6318, 1.4113, 1.1551, 1.5634], + device='cuda:3'), covar=tensor([0.1691, 0.1697, 0.0375, 0.2117, 0.1121, 0.1948, 0.1609, 0.1150], + device='cuda:3'), in_proj_covar=tensor([0.0314, 0.0313, 0.0303, 0.0342, 0.0395, 0.0363, 0.0319, 0.0376], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:31:28,340 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:29,178 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8165.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:31:41,594 INFO [train.py:901] (3/4) Epoch 2, batch 100, loss[loss=0.4355, simple_loss=0.4548, pruned_loss=0.208, over 8460.00 frames. ], tot_loss[loss=0.4092, simple_loss=0.4311, pruned_loss=0.1937, over 640367.11 frames. ], batch size: 27, lr: 3.54e-02, grad_scale: 8.0 +2023-02-05 19:31:44,275 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 19:31:59,424 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.305e+02 4.246e+02 4.943e+02 6.491e+02 9.375e+02, threshold=9.885e+02, percent-clipped=1.0 +2023-02-05 19:32:06,339 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:07,762 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6156, 1.9599, 3.1216, 0.9820, 2.3549, 1.7417, 1.6091, 1.9689], + device='cuda:3'), covar=tensor([0.1070, 0.1166, 0.0338, 0.1744, 0.0963, 0.1591, 0.0942, 0.1248], + device='cuda:3'), in_proj_covar=tensor([0.0324, 0.0316, 0.0312, 0.0356, 0.0403, 0.0372, 0.0324, 0.0388], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:32:15,455 INFO [train.py:901] (3/4) Epoch 2, batch 150, loss[loss=0.4019, simple_loss=0.4296, pruned_loss=0.1871, over 8540.00 frames. ], tot_loss[loss=0.4065, simple_loss=0.4296, pruned_loss=0.1916, over 856394.75 frames. ], batch size: 39, lr: 3.53e-02, grad_scale: 8.0 +2023-02-05 19:32:47,787 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,396 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8283.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:32:50,927 INFO [train.py:901] (3/4) Epoch 2, batch 200, loss[loss=0.4125, simple_loss=0.4432, pruned_loss=0.1909, over 8243.00 frames. ], tot_loss[loss=0.404, simple_loss=0.4286, pruned_loss=0.1897, over 1024940.13 frames. ], batch size: 24, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:32:53,836 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-05 19:33:08,598 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.581e+02 3.727e+02 4.975e+02 6.903e+02 1.681e+03, threshold=9.950e+02, percent-clipped=7.0 +2023-02-05 19:33:24,846 INFO [train.py:901] (3/4) Epoch 2, batch 250, loss[loss=0.3923, simple_loss=0.4029, pruned_loss=0.1908, over 7954.00 frames. ], tot_loss[loss=0.4033, simple_loss=0.4282, pruned_loss=0.1892, over 1155873.13 frames. ], batch size: 21, lr: 3.52e-02, grad_scale: 8.0 +2023-02-05 19:33:27,779 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2478, 1.7325, 1.4181, 1.3637, 1.9858, 1.6512, 1.8149, 2.0878], + device='cuda:3'), covar=tensor([0.1212, 0.1728, 0.2441, 0.2086, 0.1046, 0.1778, 0.1343, 0.1017], + device='cuda:3'), in_proj_covar=tensor([0.0259, 0.0281, 0.0299, 0.0272, 0.0260, 0.0252, 0.0257, 0.0246], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:33:36,303 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 19:33:40,653 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:46,002 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 19:33:58,462 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:33:59,598 INFO [train.py:901] (3/4) Epoch 2, batch 300, loss[loss=0.3645, simple_loss=0.4109, pruned_loss=0.159, over 8450.00 frames. ], tot_loss[loss=0.4017, simple_loss=0.4268, pruned_loss=0.1883, over 1258328.79 frames. ], batch size: 27, lr: 3.51e-02, grad_scale: 8.0 +2023-02-05 19:34:18,661 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 4.043e+02 4.737e+02 5.583e+02 9.957e+02, threshold=9.474e+02, percent-clipped=1.0 +2023-02-05 19:34:32,213 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5358, 4.7488, 3.9881, 1.6534, 3.9009, 3.8643, 4.2236, 3.5426], + device='cuda:3'), covar=tensor([0.0793, 0.0314, 0.0732, 0.4047, 0.0437, 0.0661, 0.0793, 0.0394], + device='cuda:3'), in_proj_covar=tensor([0.0291, 0.0186, 0.0236, 0.0308, 0.0195, 0.0147, 0.0213, 0.0136], + device='cuda:3'), out_proj_covar=tensor([2.0865e-04, 1.2919e-04, 1.5256e-04, 1.9551e-04, 1.2491e-04, 1.0519e-04, + 1.4181e-04, 9.6670e-05], device='cuda:3') +2023-02-05 19:34:35,502 INFO [train.py:901] (3/4) Epoch 2, batch 350, loss[loss=0.4392, simple_loss=0.4593, pruned_loss=0.2096, over 8444.00 frames. ], tot_loss[loss=0.4015, simple_loss=0.4264, pruned_loss=0.1883, over 1336092.40 frames. ], batch size: 27, lr: 3.50e-02, grad_scale: 8.0 +2023-02-05 19:34:43,055 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.77 vs. limit=2.0 +2023-02-05 19:35:03,559 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:05,180 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-05 19:35:09,448 INFO [train.py:901] (3/4) Epoch 2, batch 400, loss[loss=0.3832, simple_loss=0.4306, pruned_loss=0.1679, over 8359.00 frames. ], tot_loss[loss=0.4021, simple_loss=0.4269, pruned_loss=0.1886, over 1397605.11 frames. ], batch size: 24, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:16,989 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2865, 2.3423, 1.7621, 2.4176, 2.1725, 1.7605, 2.2687, 2.5424], + device='cuda:3'), covar=tensor([0.0997, 0.0526, 0.1000, 0.0660, 0.0958, 0.1112, 0.0986, 0.0719], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0226, 0.0347, 0.0297, 0.0340, 0.0308, 0.0351, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 19:35:20,903 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:35:27,438 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.339e+02 4.887e+02 6.099e+02 1.134e+03, threshold=9.773e+02, percent-clipped=6.0 +2023-02-05 19:35:43,486 INFO [train.py:901] (3/4) Epoch 2, batch 450, loss[loss=0.3449, simple_loss=0.3743, pruned_loss=0.1577, over 7799.00 frames. ], tot_loss[loss=0.4024, simple_loss=0.4271, pruned_loss=0.1888, over 1449706.06 frames. ], batch size: 19, lr: 3.49e-02, grad_scale: 8.0 +2023-02-05 19:35:44,343 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:01,172 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1448, 1.8506, 1.2662, 1.9935, 1.6524, 1.1870, 1.2341, 2.1988], + device='cuda:3'), covar=tensor([0.1456, 0.0656, 0.1536, 0.0751, 0.1211, 0.1721, 0.1533, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0237, 0.0357, 0.0302, 0.0339, 0.0313, 0.0350, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 19:36:01,873 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=8560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:17,992 INFO [train.py:901] (3/4) Epoch 2, batch 500, loss[loss=0.3989, simple_loss=0.4241, pruned_loss=0.1869, over 8026.00 frames. ], tot_loss[loss=0.4023, simple_loss=0.4276, pruned_loss=0.1885, over 1489056.32 frames. ], batch size: 22, lr: 3.48e-02, grad_scale: 8.0 +2023-02-05 19:36:36,148 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.361e+02 3.910e+02 4.803e+02 5.619e+02 9.699e+02, threshold=9.605e+02, percent-clipped=0.0 +2023-02-05 19:36:39,797 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-05 19:36:46,910 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0203, 4.2905, 3.5930, 1.5540, 3.4878, 3.5397, 3.7858, 3.1516], + device='cuda:3'), covar=tensor([0.1074, 0.0426, 0.0915, 0.4226, 0.0553, 0.0564, 0.0896, 0.0572], + device='cuda:3'), in_proj_covar=tensor([0.0276, 0.0182, 0.0227, 0.0300, 0.0190, 0.0141, 0.0201, 0.0134], + device='cuda:3'), out_proj_covar=tensor([1.9803e-04, 1.2699e-04, 1.4554e-04, 1.8939e-04, 1.2231e-04, 1.0151e-04, + 1.3450e-04, 9.4873e-05], device='cuda:3') +2023-02-05 19:36:47,558 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=8627.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:36:50,876 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3086, 2.8620, 2.1285, 1.8104, 2.8398, 2.2067, 2.7350, 3.0723], + device='cuda:3'), covar=tensor([0.1226, 0.1696, 0.2199, 0.2035, 0.0937, 0.1856, 0.1196, 0.0896], + device='cuda:3'), in_proj_covar=tensor([0.0263, 0.0284, 0.0299, 0.0278, 0.0261, 0.0261, 0.0259, 0.0246], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:36:52,666 INFO [train.py:901] (3/4) Epoch 2, batch 550, loss[loss=0.4291, simple_loss=0.4193, pruned_loss=0.2195, over 7534.00 frames. ], tot_loss[loss=0.4009, simple_loss=0.4266, pruned_loss=0.1876, over 1515111.95 frames. ], batch size: 18, lr: 3.47e-02, grad_scale: 8.0 +2023-02-05 19:37:05,534 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3756, 1.1505, 1.2309, 1.0836, 1.6138, 1.1217, 1.0630, 1.5424], + device='cuda:3'), covar=tensor([0.1460, 0.2447, 0.2913, 0.2514, 0.0986, 0.2575, 0.1567, 0.1164], + device='cuda:3'), in_proj_covar=tensor([0.0260, 0.0279, 0.0291, 0.0273, 0.0257, 0.0256, 0.0256, 0.0247], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:37:26,522 INFO [train.py:901] (3/4) Epoch 2, batch 600, loss[loss=0.4362, simple_loss=0.431, pruned_loss=0.2207, over 7961.00 frames. ], tot_loss[loss=0.4016, simple_loss=0.4273, pruned_loss=0.188, over 1538722.47 frames. ], batch size: 21, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:37:43,312 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.752e+02 3.934e+02 5.073e+02 6.758e+02 1.500e+03, threshold=1.015e+03, percent-clipped=5.0 +2023-02-05 19:37:44,744 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 19:37:59,726 INFO [train.py:901] (3/4) Epoch 2, batch 650, loss[loss=0.4769, simple_loss=0.4708, pruned_loss=0.2415, over 8128.00 frames. ], tot_loss[loss=0.4001, simple_loss=0.4264, pruned_loss=0.187, over 1560233.24 frames. ], batch size: 22, lr: 3.46e-02, grad_scale: 8.0 +2023-02-05 19:38:05,384 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=8742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:31,190 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:38:35,541 INFO [train.py:901] (3/4) Epoch 2, batch 700, loss[loss=0.4168, simple_loss=0.4439, pruned_loss=0.1949, over 8464.00 frames. ], tot_loss[loss=0.3972, simple_loss=0.4243, pruned_loss=0.1851, over 1571545.87 frames. ], batch size: 27, lr: 3.45e-02, grad_scale: 8.0 +2023-02-05 19:38:53,112 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.421e+02 3.759e+02 4.676e+02 6.060e+02 1.461e+03, threshold=9.352e+02, percent-clipped=1.0 +2023-02-05 19:38:53,988 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6603, 1.1369, 3.7074, 1.4946, 3.0409, 3.0916, 3.0942, 3.1525], + device='cuda:3'), covar=tensor([0.0328, 0.2907, 0.0249, 0.1329, 0.0769, 0.0336, 0.0350, 0.0426], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0350, 0.0195, 0.0231, 0.0265, 0.0215, 0.0192, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 19:38:59,387 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2866, 1.4766, 1.5218, 0.3558, 1.5720, 1.1549, 0.3106, 1.6223], + device='cuda:3'), covar=tensor([0.0177, 0.0143, 0.0254, 0.0389, 0.0150, 0.0466, 0.0517, 0.0137], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0129, 0.0108, 0.0173, 0.0120, 0.0200, 0.0180, 0.0145], + device='cuda:3'), out_proj_covar=tensor([1.1934e-04, 9.1277e-05, 8.2637e-05, 1.2607e-04, 9.2538e-05, 1.5435e-04, + 1.3345e-04, 1.0701e-04], device='cuda:3') +2023-02-05 19:39:09,171 INFO [train.py:901] (3/4) Epoch 2, batch 750, loss[loss=0.4067, simple_loss=0.425, pruned_loss=0.1942, over 7808.00 frames. ], tot_loss[loss=0.3986, simple_loss=0.4251, pruned_loss=0.186, over 1581392.38 frames. ], batch size: 20, lr: 3.44e-02, grad_scale: 8.0 +2023-02-05 19:39:26,402 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 19:39:35,541 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 19:39:44,331 INFO [train.py:901] (3/4) Epoch 2, batch 800, loss[loss=0.3347, simple_loss=0.3706, pruned_loss=0.1494, over 7444.00 frames. ], tot_loss[loss=0.4021, simple_loss=0.4272, pruned_loss=0.1885, over 1590175.84 frames. ], batch size: 17, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:02,277 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 4.043e+02 5.225e+02 6.708e+02 1.302e+03, threshold=1.045e+03, percent-clipped=9.0 +2023-02-05 19:40:18,487 INFO [train.py:901] (3/4) Epoch 2, batch 850, loss[loss=0.3889, simple_loss=0.4035, pruned_loss=0.1871, over 7930.00 frames. ], tot_loss[loss=0.4014, simple_loss=0.4274, pruned_loss=0.1877, over 1602198.66 frames. ], batch size: 20, lr: 3.43e-02, grad_scale: 8.0 +2023-02-05 19:40:26,055 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=8945.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:40:32,045 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7731, 2.3297, 2.4955, 0.3369, 2.3177, 1.5958, 0.8043, 1.3454], + device='cuda:3'), covar=tensor([0.0300, 0.0142, 0.0177, 0.0593, 0.0308, 0.0466, 0.0649, 0.0307], + device='cuda:3'), in_proj_covar=tensor([0.0161, 0.0123, 0.0105, 0.0164, 0.0119, 0.0203, 0.0171, 0.0143], + device='cuda:3'), out_proj_covar=tensor([1.1400e-04, 8.7058e-05, 8.0642e-05, 1.1866e-04, 9.2732e-05, 1.5596e-04, + 1.2641e-04, 1.0579e-04], device='cuda:3') +2023-02-05 19:40:52,644 INFO [train.py:901] (3/4) Epoch 2, batch 900, loss[loss=0.4305, simple_loss=0.4495, pruned_loss=0.2057, over 7806.00 frames. ], tot_loss[loss=0.3996, simple_loss=0.4262, pruned_loss=0.1865, over 1605951.09 frames. ], batch size: 20, lr: 3.42e-02, grad_scale: 8.0 +2023-02-05 19:41:03,942 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=8998.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:08,133 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9004.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:12,012 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.317e+02 3.660e+02 4.402e+02 6.333e+02 1.420e+03, threshold=8.805e+02, percent-clipped=4.0 +2023-02-05 19:41:12,194 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3367, 1.3214, 2.9130, 1.1551, 1.9600, 3.3145, 3.0474, 2.8134], + device='cuda:3'), covar=tensor([0.1703, 0.1984, 0.0402, 0.2214, 0.0838, 0.0233, 0.0247, 0.0454], + device='cuda:3'), in_proj_covar=tensor([0.0237, 0.0263, 0.0174, 0.0254, 0.0188, 0.0137, 0.0134, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 19:41:21,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9023.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:27,244 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9031.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:41:29,113 INFO [train.py:901] (3/4) Epoch 2, batch 950, loss[loss=0.3685, simple_loss=0.3959, pruned_loss=0.1705, over 7817.00 frames. ], tot_loss[loss=0.3992, simple_loss=0.4262, pruned_loss=0.1862, over 1612392.88 frames. ], batch size: 20, lr: 3.41e-02, grad_scale: 8.0 +2023-02-05 19:41:30,788 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0302, 2.4844, 4.2676, 3.9835, 3.2277, 2.6397, 1.9668, 2.2674], + device='cuda:3'), covar=tensor([0.0871, 0.0994, 0.0144, 0.0241, 0.0460, 0.0386, 0.0580, 0.0965], + device='cuda:3'), in_proj_covar=tensor([0.0412, 0.0319, 0.0221, 0.0253, 0.0327, 0.0294, 0.0319, 0.0365], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:41:48,094 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.79 vs. limit=5.0 +2023-02-05 19:41:57,088 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 19:42:04,017 INFO [train.py:901] (3/4) Epoch 2, batch 1000, loss[loss=0.3898, simple_loss=0.4111, pruned_loss=0.1843, over 8136.00 frames. ], tot_loss[loss=0.3985, simple_loss=0.426, pruned_loss=0.1855, over 1617163.42 frames. ], batch size: 22, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:08,235 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4015, 0.8786, 4.5054, 1.8767, 3.7236, 3.6916, 3.9129, 3.9453], + device='cuda:3'), covar=tensor([0.0367, 0.3976, 0.0221, 0.1576, 0.1039, 0.0338, 0.0296, 0.0364], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0353, 0.0196, 0.0237, 0.0278, 0.0214, 0.0195, 0.0227], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 19:42:22,621 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.505e+02 3.676e+02 4.681e+02 5.718e+02 9.745e+02, threshold=9.362e+02, percent-clipped=2.0 +2023-02-05 19:42:30,647 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9122.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:42:31,281 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 19:42:39,158 INFO [train.py:901] (3/4) Epoch 2, batch 1050, loss[loss=0.3851, simple_loss=0.4075, pruned_loss=0.1814, over 7203.00 frames. ], tot_loss[loss=0.3981, simple_loss=0.4261, pruned_loss=0.185, over 1619973.11 frames. ], batch size: 16, lr: 3.40e-02, grad_scale: 8.0 +2023-02-05 19:42:43,244 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 19:43:12,161 INFO [train.py:901] (3/4) Epoch 2, batch 1100, loss[loss=0.3771, simple_loss=0.4038, pruned_loss=0.1752, over 8289.00 frames. ], tot_loss[loss=0.3951, simple_loss=0.4238, pruned_loss=0.1832, over 1619295.91 frames. ], batch size: 23, lr: 3.39e-02, grad_scale: 8.0 +2023-02-05 19:43:30,063 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.895e+02 4.986e+02 6.293e+02 1.172e+03, threshold=9.973e+02, percent-clipped=2.0 +2023-02-05 19:43:47,485 INFO [train.py:901] (3/4) Epoch 2, batch 1150, loss[loss=0.3695, simple_loss=0.3793, pruned_loss=0.1799, over 7539.00 frames. ], tot_loss[loss=0.3972, simple_loss=0.4248, pruned_loss=0.1848, over 1619076.10 frames. ], batch size: 18, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:43:49,754 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9237.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:43:50,988 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 19:44:18,414 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.00 vs. limit=2.0 +2023-02-05 19:44:22,129 INFO [train.py:901] (3/4) Epoch 2, batch 1200, loss[loss=0.3601, simple_loss=0.3975, pruned_loss=0.1614, over 7804.00 frames. ], tot_loss[loss=0.3978, simple_loss=0.4251, pruned_loss=0.1853, over 1620699.46 frames. ], batch size: 20, lr: 3.38e-02, grad_scale: 8.0 +2023-02-05 19:44:25,542 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9289.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:44:41,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.810e+02 4.160e+02 4.885e+02 6.720e+02 4.965e+03, threshold=9.769e+02, percent-clipped=5.0 +2023-02-05 19:44:56,246 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2066, 1.1803, 1.8363, 1.4619, 1.1629, 1.8273, 0.3851, 1.1434], + device='cuda:3'), covar=tensor([0.0881, 0.0652, 0.0313, 0.0434, 0.0602, 0.0311, 0.1618, 0.0771], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0103, 0.0094, 0.0130, 0.0114, 0.0085, 0.0158, 0.0126], + device='cuda:3'), out_proj_covar=tensor([1.1616e-04, 1.0030e-04, 8.4678e-05, 1.1451e-04, 1.1153e-04, 7.8152e-05, + 1.3983e-04, 1.1723e-04], device='cuda:3') +2023-02-05 19:44:56,719 INFO [train.py:901] (3/4) Epoch 2, batch 1250, loss[loss=0.3739, simple_loss=0.4112, pruned_loss=0.1683, over 8106.00 frames. ], tot_loss[loss=0.3987, simple_loss=0.4255, pruned_loss=0.1859, over 1618941.58 frames. ], batch size: 23, lr: 3.37e-02, grad_scale: 4.0 +2023-02-05 19:44:59,949 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.81 vs. limit=2.0 +2023-02-05 19:45:07,485 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9348.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:25,843 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9375.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:31,825 INFO [train.py:901] (3/4) Epoch 2, batch 1300, loss[loss=0.4203, simple_loss=0.4535, pruned_loss=0.1935, over 8321.00 frames. ], tot_loss[loss=0.3979, simple_loss=0.4248, pruned_loss=0.1856, over 1616909.46 frames. ], batch size: 25, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:45:45,741 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9404.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:45:50,304 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.161e+02 4.162e+02 5.656e+02 7.688e+02 2.529e+03, threshold=1.131e+03, percent-clipped=11.0 +2023-02-05 19:46:05,044 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:06,286 INFO [train.py:901] (3/4) Epoch 2, batch 1350, loss[loss=0.4197, simple_loss=0.4391, pruned_loss=0.2002, over 8355.00 frames. ], tot_loss[loss=0.3956, simple_loss=0.4232, pruned_loss=0.184, over 1612943.26 frames. ], batch size: 26, lr: 3.36e-02, grad_scale: 4.0 +2023-02-05 19:46:27,275 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:41,366 INFO [train.py:901] (3/4) Epoch 2, batch 1400, loss[loss=0.4802, simple_loss=0.4769, pruned_loss=0.2418, over 7367.00 frames. ], tot_loss[loss=0.3952, simple_loss=0.4228, pruned_loss=0.1838, over 1610931.37 frames. ], batch size: 71, lr: 3.35e-02, grad_scale: 4.0 +2023-02-05 19:46:45,508 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9490.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:47,591 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9493.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:46:59,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.192e+02 3.889e+02 4.981e+02 6.326e+02 1.555e+03, threshold=9.962e+02, percent-clipped=1.0 +2023-02-05 19:47:04,257 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9518.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:47:15,070 INFO [train.py:901] (3/4) Epoch 2, batch 1450, loss[loss=0.3825, simple_loss=0.4193, pruned_loss=0.1729, over 8335.00 frames. ], tot_loss[loss=0.3943, simple_loss=0.4221, pruned_loss=0.1832, over 1613719.64 frames. ], batch size: 25, lr: 3.34e-02, grad_scale: 4.0 +2023-02-05 19:47:19,043 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 19:47:49,262 INFO [train.py:901] (3/4) Epoch 2, batch 1500, loss[loss=0.4187, simple_loss=0.4373, pruned_loss=0.2, over 7974.00 frames. ], tot_loss[loss=0.3944, simple_loss=0.4224, pruned_loss=0.1832, over 1614673.96 frames. ], batch size: 21, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:01,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:07,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.496e+02 4.006e+02 4.905e+02 6.157e+02 1.300e+03, threshold=9.811e+02, percent-clipped=3.0 +2023-02-05 19:48:23,381 INFO [train.py:901] (3/4) Epoch 2, batch 1550, loss[loss=0.3469, simple_loss=0.3931, pruned_loss=0.1503, over 8358.00 frames. ], tot_loss[loss=0.3936, simple_loss=0.4213, pruned_loss=0.1829, over 1612167.59 frames. ], batch size: 24, lr: 3.33e-02, grad_scale: 4.0 +2023-02-05 19:48:41,694 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9660.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:52,343 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9676.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:48:57,573 INFO [train.py:901] (3/4) Epoch 2, batch 1600, loss[loss=0.4571, simple_loss=0.4752, pruned_loss=0.2195, over 8187.00 frames. ], tot_loss[loss=0.3934, simple_loss=0.4211, pruned_loss=0.1828, over 1613421.23 frames. ], batch size: 23, lr: 3.32e-02, grad_scale: 8.0 +2023-02-05 19:48:58,389 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9685.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:17,085 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.659e+02 4.192e+02 5.177e+02 6.492e+02 1.266e+03, threshold=1.035e+03, percent-clipped=2.0 +2023-02-05 19:49:22,885 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9719.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:33,626 INFO [train.py:901] (3/4) Epoch 2, batch 1650, loss[loss=0.3198, simple_loss=0.3736, pruned_loss=0.133, over 7977.00 frames. ], tot_loss[loss=0.3926, simple_loss=0.4215, pruned_loss=0.1818, over 1615566.32 frames. ], batch size: 21, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:49:40,256 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9744.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:41,569 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=9746.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:51,532 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=9761.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:49:58,951 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=9771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:02,239 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:50:07,344 INFO [train.py:901] (3/4) Epoch 2, batch 1700, loss[loss=0.4117, simple_loss=0.4309, pruned_loss=0.1962, over 8466.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.4218, pruned_loss=0.1819, over 1615012.29 frames. ], batch size: 25, lr: 3.31e-02, grad_scale: 8.0 +2023-02-05 19:50:21,970 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 19:50:26,241 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.640e+02 4.068e+02 5.098e+02 6.535e+02 1.207e+03, threshold=1.020e+03, percent-clipped=5.0 +2023-02-05 19:50:42,244 INFO [train.py:901] (3/4) Epoch 2, batch 1750, loss[loss=0.4136, simple_loss=0.4466, pruned_loss=0.1903, over 8641.00 frames. ], tot_loss[loss=0.3925, simple_loss=0.4215, pruned_loss=0.1817, over 1616646.41 frames. ], batch size: 49, lr: 3.30e-02, grad_scale: 8.0 +2023-02-05 19:51:02,102 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6060, 2.0796, 3.7512, 1.1835, 2.3718, 1.9912, 1.7159, 2.0789], + device='cuda:3'), covar=tensor([0.0899, 0.1155, 0.0261, 0.1474, 0.0988, 0.1449, 0.0871, 0.1336], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0335, 0.0348, 0.0379, 0.0431, 0.0400, 0.0346, 0.0423], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 19:51:16,319 INFO [train.py:901] (3/4) Epoch 2, batch 1800, loss[loss=0.5017, simple_loss=0.4789, pruned_loss=0.2622, over 6645.00 frames. ], tot_loss[loss=0.3928, simple_loss=0.4214, pruned_loss=0.1821, over 1613178.43 frames. ], batch size: 71, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:21,268 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=9891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:51:34,083 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.365e+02 4.111e+02 5.198e+02 6.626e+02 1.120e+03, threshold=1.040e+03, percent-clipped=3.0 +2023-02-05 19:51:49,954 INFO [train.py:901] (3/4) Epoch 2, batch 1850, loss[loss=0.3695, simple_loss=0.4198, pruned_loss=0.1596, over 8686.00 frames. ], tot_loss[loss=0.3929, simple_loss=0.4214, pruned_loss=0.1823, over 1616917.30 frames. ], batch size: 34, lr: 3.29e-02, grad_scale: 8.0 +2023-02-05 19:51:58,652 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=9946.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:00,331 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-05 19:52:24,350 INFO [train.py:901] (3/4) Epoch 2, batch 1900, loss[loss=0.3827, simple_loss=0.4159, pruned_loss=0.1747, over 8493.00 frames. ], tot_loss[loss=0.3903, simple_loss=0.4197, pruned_loss=0.1804, over 1616940.34 frames. ], batch size: 26, lr: 3.28e-02, grad_scale: 8.0 +2023-02-05 19:52:43,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.445e+02 3.513e+02 4.327e+02 5.785e+02 1.080e+03, threshold=8.653e+02, percent-clipped=1.0 +2023-02-05 19:52:49,916 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:52:54,612 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 19:52:59,200 INFO [train.py:901] (3/4) Epoch 2, batch 1950, loss[loss=0.3733, simple_loss=0.4183, pruned_loss=0.1641, over 8579.00 frames. ], tot_loss[loss=0.3897, simple_loss=0.4187, pruned_loss=0.1804, over 1613664.74 frames. ], batch size: 49, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:06,849 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 19:53:15,906 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10057.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:19,396 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:25,610 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 19:53:33,046 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10080.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:53:33,648 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3109, 2.3711, 1.8895, 3.0941, 1.7425, 1.4844, 1.6103, 2.3719], + device='cuda:3'), covar=tensor([0.0965, 0.1382, 0.1542, 0.0354, 0.1711, 0.2281, 0.2312, 0.1127], + device='cuda:3'), in_proj_covar=tensor([0.0297, 0.0322, 0.0311, 0.0214, 0.0324, 0.0343, 0.0371, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0004, 0.0003, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:53:35,464 INFO [train.py:901] (3/4) Epoch 2, batch 2000, loss[loss=0.4012, simple_loss=0.4072, pruned_loss=0.1976, over 7691.00 frames. ], tot_loss[loss=0.391, simple_loss=0.4196, pruned_loss=0.1812, over 1613187.33 frames. ], batch size: 18, lr: 3.27e-02, grad_scale: 8.0 +2023-02-05 19:53:42,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3059, 1.6110, 1.2316, 1.1550, 1.9531, 1.4918, 1.7090, 1.8804], + device='cuda:3'), covar=tensor([0.1181, 0.1887, 0.2637, 0.2192, 0.0970, 0.1951, 0.1294, 0.1033], + device='cuda:3'), in_proj_covar=tensor([0.0238, 0.0265, 0.0284, 0.0260, 0.0239, 0.0247, 0.0230, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0004, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:53:46,355 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0821, 2.6040, 4.2418, 4.1260, 3.1626, 2.3503, 1.7456, 2.1887], + device='cuda:3'), covar=tensor([0.0850, 0.1041, 0.0140, 0.0229, 0.0449, 0.0483, 0.0647, 0.0901], + device='cuda:3'), in_proj_covar=tensor([0.0446, 0.0355, 0.0255, 0.0293, 0.0375, 0.0331, 0.0351, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 19:53:50,417 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10105.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:53:55,727 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.648e+02 4.167e+02 5.413e+02 6.926e+02 6.671e+03, threshold=1.083e+03, percent-clipped=14.0 +2023-02-05 19:54:10,561 INFO [train.py:901] (3/4) Epoch 2, batch 2050, loss[loss=0.3667, simple_loss=0.4154, pruned_loss=0.159, over 8252.00 frames. ], tot_loss[loss=0.3907, simple_loss=0.4198, pruned_loss=0.1808, over 1617414.34 frames. ], batch size: 24, lr: 3.26e-02, grad_scale: 4.0 +2023-02-05 19:54:11,456 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10135.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:19,458 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:36,885 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:54:45,487 INFO [train.py:901] (3/4) Epoch 2, batch 2100, loss[loss=0.3357, simple_loss=0.3746, pruned_loss=0.1484, over 7933.00 frames. ], tot_loss[loss=0.3889, simple_loss=0.4188, pruned_loss=0.1794, over 1620796.65 frames. ], batch size: 20, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:54:55,285 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.30 vs. limit=5.0 +2023-02-05 19:54:59,735 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-05 19:55:06,153 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.788e+02 4.646e+02 5.840e+02 1.328e+03, threshold=9.292e+02, percent-clipped=3.0 +2023-02-05 19:55:11,276 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:55:20,261 INFO [train.py:901] (3/4) Epoch 2, batch 2150, loss[loss=0.4237, simple_loss=0.4441, pruned_loss=0.2016, over 8457.00 frames. ], tot_loss[loss=0.3859, simple_loss=0.4167, pruned_loss=0.1776, over 1621620.16 frames. ], batch size: 29, lr: 3.25e-02, grad_scale: 4.0 +2023-02-05 19:55:53,989 INFO [train.py:901] (3/4) Epoch 2, batch 2200, loss[loss=0.3354, simple_loss=0.381, pruned_loss=0.1448, over 8076.00 frames. ], tot_loss[loss=0.385, simple_loss=0.4159, pruned_loss=0.1771, over 1614871.51 frames. ], batch size: 21, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:10,342 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-05 19:56:14,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.803e+02 4.971e+02 6.310e+02 1.458e+03, threshold=9.942e+02, percent-clipped=6.0 +2023-02-05 19:56:18,159 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10317.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:56:29,273 INFO [train.py:901] (3/4) Epoch 2, batch 2250, loss[loss=0.4493, simple_loss=0.4646, pruned_loss=0.217, over 8043.00 frames. ], tot_loss[loss=0.3854, simple_loss=0.4164, pruned_loss=0.1772, over 1616101.18 frames. ], batch size: 22, lr: 3.24e-02, grad_scale: 4.0 +2023-02-05 19:56:34,612 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10342.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:03,213 INFO [train.py:901] (3/4) Epoch 2, batch 2300, loss[loss=0.3771, simple_loss=0.4042, pruned_loss=0.175, over 7779.00 frames. ], tot_loss[loss=0.3861, simple_loss=0.4168, pruned_loss=0.1777, over 1618346.77 frames. ], batch size: 19, lr: 3.23e-02, grad_scale: 4.0 +2023-02-05 19:57:08,299 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10391.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:15,022 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10401.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:23,818 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.389e+02 3.989e+02 5.161e+02 7.086e+02 1.471e+03, threshold=1.032e+03, percent-clipped=7.0 +2023-02-05 19:57:25,927 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10416.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:31,804 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10424.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:57:33,869 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:57:39,139 INFO [train.py:901] (3/4) Epoch 2, batch 2350, loss[loss=0.3818, simple_loss=0.4131, pruned_loss=0.1752, over 7973.00 frames. ], tot_loss[loss=0.3849, simple_loss=0.4161, pruned_loss=0.1769, over 1618113.98 frames. ], batch size: 21, lr: 3.22e-02, grad_scale: 4.0 +2023-02-05 19:58:05,154 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:07,804 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10476.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:12,898 INFO [train.py:901] (3/4) Epoch 2, batch 2400, loss[loss=0.4666, simple_loss=0.475, pruned_loss=0.2291, over 8199.00 frames. ], tot_loss[loss=0.3847, simple_loss=0.4162, pruned_loss=0.1766, over 1620491.40 frames. ], batch size: 23, lr: 3.22e-02, grad_scale: 8.0 +2023-02-05 19:58:24,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10501.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:32,502 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.364e+02 3.956e+02 5.047e+02 6.263e+02 1.564e+03, threshold=1.009e+03, percent-clipped=2.0 +2023-02-05 19:58:34,745 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10516.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 19:58:47,328 INFO [train.py:901] (3/4) Epoch 2, batch 2450, loss[loss=0.4361, simple_loss=0.4534, pruned_loss=0.2094, over 8249.00 frames. ], tot_loss[loss=0.3843, simple_loss=0.4157, pruned_loss=0.1764, over 1617731.49 frames. ], batch size: 24, lr: 3.21e-02, grad_scale: 8.0 +2023-02-05 19:58:50,875 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10539.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 19:59:07,389 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-05 19:59:21,101 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6711, 2.1304, 2.8326, 1.8435, 1.6755, 2.3837, 0.4785, 1.4969], + device='cuda:3'), covar=tensor([0.0875, 0.0515, 0.0262, 0.0591, 0.0951, 0.0378, 0.1735, 0.0773], + device='cuda:3'), in_proj_covar=tensor([0.0119, 0.0099, 0.0088, 0.0130, 0.0118, 0.0085, 0.0158, 0.0125], + device='cuda:3'), out_proj_covar=tensor([1.0911e-04, 1.0067e-04, 8.2060e-05, 1.1914e-04, 1.1672e-04, 7.9901e-05, + 1.4519e-04, 1.1964e-04], device='cuda:3') +2023-02-05 19:59:22,254 INFO [train.py:901] (3/4) Epoch 2, batch 2500, loss[loss=0.3883, simple_loss=0.4236, pruned_loss=0.1765, over 8473.00 frames. ], tot_loss[loss=0.384, simple_loss=0.4158, pruned_loss=0.1761, over 1619817.75 frames. ], batch size: 29, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 19:59:23,776 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2422, 2.0058, 1.5349, 1.3117, 1.9781, 1.6594, 1.6736, 1.8671], + device='cuda:3'), covar=tensor([0.1157, 0.1592, 0.2372, 0.1903, 0.0917, 0.1765, 0.1297, 0.0952], + device='cuda:3'), in_proj_covar=tensor([0.0241, 0.0262, 0.0288, 0.0253, 0.0234, 0.0249, 0.0232, 0.0224], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0005, 0.0005, 0.0004, 0.0004, 0.0004, 0.0004, 0.0004], + device='cuda:3') +2023-02-05 19:59:42,163 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.520e+02 3.522e+02 4.438e+02 6.473e+02 1.354e+03, threshold=8.876e+02, percent-clipped=4.0 +2023-02-05 19:59:55,946 INFO [train.py:901] (3/4) Epoch 2, batch 2550, loss[loss=0.3366, simple_loss=0.3973, pruned_loss=0.138, over 8107.00 frames. ], tot_loss[loss=0.3841, simple_loss=0.416, pruned_loss=0.1761, over 1618549.25 frames. ], batch size: 23, lr: 3.20e-02, grad_scale: 8.0 +2023-02-05 20:00:31,346 INFO [train.py:901] (3/4) Epoch 2, batch 2600, loss[loss=0.4578, simple_loss=0.4612, pruned_loss=0.2271, over 7971.00 frames. ], tot_loss[loss=0.3816, simple_loss=0.4137, pruned_loss=0.1748, over 1609586.66 frames. ], batch size: 21, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:00:50,499 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 4.188e+02 4.914e+02 6.333e+02 1.432e+03, threshold=9.828e+02, percent-clipped=6.0 +2023-02-05 20:01:05,101 INFO [train.py:901] (3/4) Epoch 2, batch 2650, loss[loss=0.3961, simple_loss=0.4287, pruned_loss=0.1818, over 8097.00 frames. ], tot_loss[loss=0.3832, simple_loss=0.4147, pruned_loss=0.1759, over 1612883.93 frames. ], batch size: 23, lr: 3.19e-02, grad_scale: 8.0 +2023-02-05 20:01:05,522 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-05 20:01:24,206 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10762.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:30,880 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10771.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:31,706 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10772.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:40,305 INFO [train.py:901] (3/4) Epoch 2, batch 2700, loss[loss=0.3817, simple_loss=0.4189, pruned_loss=0.1722, over 8357.00 frames. ], tot_loss[loss=0.3838, simple_loss=0.4151, pruned_loss=0.1763, over 1616298.83 frames. ], batch size: 24, lr: 3.18e-02, grad_scale: 8.0 +2023-02-05 20:01:46,658 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10792.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:01:48,789 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=10795.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:01:50,097 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10797.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:01,035 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.290e+02 4.005e+02 5.458e+02 7.000e+02 2.619e+03, threshold=1.092e+03, percent-clipped=7.0 +2023-02-05 20:02:03,280 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=10816.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:02:06,067 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=10820.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:02:15,181 INFO [train.py:901] (3/4) Epoch 2, batch 2750, loss[loss=0.4953, simple_loss=0.5001, pruned_loss=0.2452, over 8561.00 frames. ], tot_loss[loss=0.3798, simple_loss=0.4121, pruned_loss=0.1737, over 1613885.51 frames. ], batch size: 31, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:49,766 INFO [train.py:901] (3/4) Epoch 2, batch 2800, loss[loss=0.3113, simple_loss=0.3576, pruned_loss=0.1325, over 8461.00 frames. ], tot_loss[loss=0.3791, simple_loss=0.412, pruned_loss=0.1731, over 1616421.11 frames. ], batch size: 25, lr: 3.17e-02, grad_scale: 8.0 +2023-02-05 20:02:51,257 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10886.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:03,320 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=10903.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:03:10,621 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.367e+02 3.535e+02 4.531e+02 6.001e+02 1.335e+03, threshold=9.062e+02, percent-clipped=2.0 +2023-02-05 20:03:23,148 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=10931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:03:25,028 INFO [train.py:901] (3/4) Epoch 2, batch 2850, loss[loss=0.4321, simple_loss=0.4556, pruned_loss=0.2043, over 8518.00 frames. ], tot_loss[loss=0.3764, simple_loss=0.4102, pruned_loss=0.1713, over 1620787.53 frames. ], batch size: 26, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:03:59,105 INFO [train.py:901] (3/4) Epoch 2, batch 2900, loss[loss=0.4197, simple_loss=0.4381, pruned_loss=0.2006, over 8129.00 frames. ], tot_loss[loss=0.3797, simple_loss=0.4128, pruned_loss=0.1733, over 1619318.28 frames. ], batch size: 22, lr: 3.16e-02, grad_scale: 8.0 +2023-02-05 20:04:19,446 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.551e+02 4.216e+02 5.196e+02 6.845e+02 2.226e+03, threshold=1.039e+03, percent-clipped=10.0 +2023-02-05 20:04:34,448 INFO [train.py:901] (3/4) Epoch 2, batch 2950, loss[loss=0.4963, simple_loss=0.4841, pruned_loss=0.2542, over 7959.00 frames. ], tot_loss[loss=0.3819, simple_loss=0.4142, pruned_loss=0.1748, over 1616590.36 frames. ], batch size: 21, lr: 3.15e-02, grad_scale: 8.0 +2023-02-05 20:04:39,269 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 20:05:08,646 INFO [train.py:901] (3/4) Epoch 2, batch 3000, loss[loss=0.3439, simple_loss=0.3711, pruned_loss=0.1583, over 7694.00 frames. ], tot_loss[loss=0.3798, simple_loss=0.4129, pruned_loss=0.1733, over 1614618.21 frames. ], batch size: 18, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:05:08,647 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 20:05:24,852 INFO [train.py:935] (3/4) Epoch 2, validation: loss=0.2878, simple_loss=0.369, pruned_loss=0.1033, over 944034.00 frames. +2023-02-05 20:05:24,853 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 20:05:40,501 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11106.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:05:45,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.542e+02 3.795e+02 4.955e+02 6.193e+02 1.384e+03, threshold=9.910e+02, percent-clipped=4.0 +2023-02-05 20:06:00,078 INFO [train.py:901] (3/4) Epoch 2, batch 3050, loss[loss=0.3526, simple_loss=0.4012, pruned_loss=0.152, over 8195.00 frames. ], tot_loss[loss=0.3814, simple_loss=0.4142, pruned_loss=0.1744, over 1615709.21 frames. ], batch size: 23, lr: 3.14e-02, grad_scale: 8.0 +2023-02-05 20:06:01,579 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:05,905 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11142.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:08,869 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-05 20:06:24,332 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:35,398 INFO [train.py:901] (3/4) Epoch 2, batch 3100, loss[loss=0.3583, simple_loss=0.4118, pruned_loss=0.1524, over 8253.00 frames. ], tot_loss[loss=0.3815, simple_loss=0.4143, pruned_loss=0.1743, over 1613124.85 frames. ], batch size: 24, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:06:37,615 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:40,870 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:06:55,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.637e+02 3.930e+02 4.987e+02 6.652e+02 1.229e+03, threshold=9.974e+02, percent-clipped=5.0 +2023-02-05 20:07:01,729 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:10,341 INFO [train.py:901] (3/4) Epoch 2, batch 3150, loss[loss=0.4388, simple_loss=0.4607, pruned_loss=0.2084, over 8470.00 frames. ], tot_loss[loss=0.38, simple_loss=0.4128, pruned_loss=0.1736, over 1612676.79 frames. ], batch size: 25, lr: 3.13e-02, grad_scale: 8.0 +2023-02-05 20:07:20,137 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11247.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:07:22,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11251.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:07:35,609 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 20:07:44,249 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9556, 4.1919, 3.5387, 1.6815, 3.4598, 3.2433, 3.7016, 2.7286], + device='cuda:3'), covar=tensor([0.1006, 0.0770, 0.1094, 0.4062, 0.0575, 0.0740, 0.1406, 0.0784], + device='cuda:3'), in_proj_covar=tensor([0.0301, 0.0206, 0.0236, 0.0327, 0.0217, 0.0168, 0.0227, 0.0160], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0001, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 20:07:46,080 INFO [train.py:901] (3/4) Epoch 2, batch 3200, loss[loss=0.3393, simple_loss=0.3663, pruned_loss=0.1562, over 7535.00 frames. ], tot_loss[loss=0.3805, simple_loss=0.4135, pruned_loss=0.1737, over 1614012.26 frames. ], batch size: 18, lr: 3.12e-02, grad_scale: 8.0 +2023-02-05 20:08:06,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 3.889e+02 4.508e+02 6.050e+02 1.565e+03, threshold=9.016e+02, percent-clipped=4.0 +2023-02-05 20:08:13,956 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3301, 1.9885, 3.3216, 2.8973, 2.5691, 2.0067, 1.5899, 1.7653], + device='cuda:3'), covar=tensor([0.0795, 0.0784, 0.0140, 0.0215, 0.0338, 0.0345, 0.0475, 0.0684], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0385, 0.0271, 0.0307, 0.0403, 0.0346, 0.0361, 0.0407], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:08:20,048 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3454, 1.5363, 2.0689, 1.2469, 1.1597, 1.9427, 0.3821, 1.0407], + device='cuda:3'), covar=tensor([0.0940, 0.0737, 0.0468, 0.0660, 0.0861, 0.0324, 0.2173, 0.1198], + device='cuda:3'), in_proj_covar=tensor([0.0118, 0.0098, 0.0087, 0.0136, 0.0123, 0.0082, 0.0163, 0.0131], + device='cuda:3'), out_proj_covar=tensor([1.1292e-04, 1.0073e-04, 8.3813e-05, 1.2701e-04, 1.2407e-04, 7.8640e-05, + 1.5335e-04, 1.2907e-04], device='cuda:3') +2023-02-05 20:08:21,234 INFO [train.py:901] (3/4) Epoch 2, batch 3250, loss[loss=0.2958, simple_loss=0.3406, pruned_loss=0.1254, over 7813.00 frames. ], tot_loss[loss=0.3776, simple_loss=0.411, pruned_loss=0.1721, over 1615047.49 frames. ], batch size: 19, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:08:39,999 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11362.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:08:45,874 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0270, 1.1552, 1.1775, 0.0915, 1.0645, 0.8291, 0.1756, 1.0491], + device='cuda:3'), covar=tensor([0.0092, 0.0078, 0.0095, 0.0197, 0.0127, 0.0262, 0.0245, 0.0094], + device='cuda:3'), in_proj_covar=tensor([0.0185, 0.0128, 0.0125, 0.0182, 0.0132, 0.0226, 0.0194, 0.0164], + device='cuda:3'), out_proj_covar=tensor([1.1901e-04, 8.3338e-05, 8.6944e-05, 1.1965e-04, 9.2783e-05, 1.5836e-04, + 1.3218e-04, 1.0966e-04], device='cuda:3') +2023-02-05 20:08:55,031 INFO [train.py:901] (3/4) Epoch 2, batch 3300, loss[loss=0.3703, simple_loss=0.4084, pruned_loss=0.1661, over 7922.00 frames. ], tot_loss[loss=0.3773, simple_loss=0.4109, pruned_loss=0.1719, over 1614631.85 frames. ], batch size: 20, lr: 3.11e-02, grad_scale: 8.0 +2023-02-05 20:09:16,012 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.686e+02 3.650e+02 4.417e+02 5.589e+02 1.513e+03, threshold=8.834e+02, percent-clipped=8.0 +2023-02-05 20:09:20,907 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5922, 1.2202, 3.7383, 1.3881, 3.1394, 3.0866, 3.2831, 3.3495], + device='cuda:3'), covar=tensor([0.0322, 0.2872, 0.0292, 0.1568, 0.0913, 0.0398, 0.0325, 0.0386], + device='cuda:3'), in_proj_covar=tensor([0.0184, 0.0364, 0.0229, 0.0259, 0.0309, 0.0243, 0.0224, 0.0253], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 20:09:30,177 INFO [train.py:901] (3/4) Epoch 2, batch 3350, loss[loss=0.3063, simple_loss=0.3485, pruned_loss=0.1321, over 7549.00 frames. ], tot_loss[loss=0.3751, simple_loss=0.4093, pruned_loss=0.1705, over 1612808.06 frames. ], batch size: 18, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:00,602 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11477.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:05,252 INFO [train.py:901] (3/4) Epoch 2, batch 3400, loss[loss=0.2796, simple_loss=0.3301, pruned_loss=0.1146, over 7796.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.4092, pruned_loss=0.1699, over 1616037.93 frames. ], batch size: 19, lr: 3.10e-02, grad_scale: 8.0 +2023-02-05 20:10:17,731 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11502.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:21,207 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11507.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:25,777 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.120e+02 3.730e+02 4.591e+02 5.662e+02 1.223e+03, threshold=9.181e+02, percent-clipped=5.0 +2023-02-05 20:10:39,499 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11532.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:40,655 INFO [train.py:901] (3/4) Epoch 2, batch 3450, loss[loss=0.3682, simple_loss=0.4151, pruned_loss=0.1606, over 8542.00 frames. ], tot_loss[loss=0.375, simple_loss=0.4092, pruned_loss=0.1704, over 1613468.00 frames. ], batch size: 39, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:10:42,065 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:10:52,326 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11550.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:11:15,646 INFO [train.py:901] (3/4) Epoch 2, batch 3500, loss[loss=0.3654, simple_loss=0.4127, pruned_loss=0.1591, over 8521.00 frames. ], tot_loss[loss=0.3744, simple_loss=0.4087, pruned_loss=0.1701, over 1614982.93 frames. ], batch size: 28, lr: 3.09e-02, grad_scale: 8.0 +2023-02-05 20:11:19,217 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2695, 2.3179, 4.0681, 4.0342, 3.0124, 2.4335, 1.7402, 2.3892], + device='cuda:3'), covar=tensor([0.0631, 0.0949, 0.0133, 0.0194, 0.0394, 0.0369, 0.0528, 0.0725], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0387, 0.0282, 0.0311, 0.0413, 0.0355, 0.0373, 0.0408], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:11:22,039 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.58 vs. limit=5.0 +2023-02-05 20:11:26,051 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2627, 1.4577, 1.9607, 1.3143, 1.0279, 1.7978, 0.4063, 1.0995], + device='cuda:3'), covar=tensor([0.1066, 0.0908, 0.0451, 0.0720, 0.0914, 0.0411, 0.2369, 0.0976], + device='cuda:3'), in_proj_covar=tensor([0.0117, 0.0101, 0.0086, 0.0135, 0.0120, 0.0078, 0.0156, 0.0123], + device='cuda:3'), out_proj_covar=tensor([1.1200e-04, 1.0293e-04, 8.2897e-05, 1.2719e-04, 1.2137e-04, 7.4997e-05, + 1.4880e-04, 1.2349e-04], device='cuda:3') +2023-02-05 20:11:35,937 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.906e+02 4.071e+02 4.877e+02 6.297e+02 1.257e+03, threshold=9.753e+02, percent-clipped=3.0 +2023-02-05 20:11:39,545 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11618.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:11:40,718 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 20:11:50,751 INFO [train.py:901] (3/4) Epoch 2, batch 3550, loss[loss=0.2789, simple_loss=0.3275, pruned_loss=0.1152, over 7804.00 frames. ], tot_loss[loss=0.3748, simple_loss=0.4094, pruned_loss=0.1701, over 1620525.07 frames. ], batch size: 19, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:11:57,569 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11643.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:12:02,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=11651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:04,330 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:12,572 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-05 20:12:25,621 INFO [train.py:901] (3/4) Epoch 2, batch 3600, loss[loss=0.385, simple_loss=0.4207, pruned_loss=0.1746, over 8312.00 frames. ], tot_loss[loss=0.3762, simple_loss=0.4103, pruned_loss=0.1711, over 1619843.52 frames. ], batch size: 25, lr: 3.08e-02, grad_scale: 8.0 +2023-02-05 20:12:41,184 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-05 20:12:45,382 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.676e+02 3.688e+02 4.691e+02 6.662e+02 1.491e+03, threshold=9.383e+02, percent-clipped=3.0 +2023-02-05 20:12:48,292 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:12:59,429 INFO [train.py:901] (3/4) Epoch 2, batch 3650, loss[loss=0.3976, simple_loss=0.4365, pruned_loss=0.1793, over 8509.00 frames. ], tot_loss[loss=0.3779, simple_loss=0.4114, pruned_loss=0.1722, over 1619715.70 frames. ], batch size: 26, lr: 3.07e-02, grad_scale: 8.0 +2023-02-05 20:13:33,833 INFO [train.py:901] (3/4) Epoch 2, batch 3700, loss[loss=0.3926, simple_loss=0.4117, pruned_loss=0.1868, over 8129.00 frames. ], tot_loss[loss=0.3797, simple_loss=0.4121, pruned_loss=0.1736, over 1614256.66 frames. ], batch size: 22, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:13:44,421 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:13:53,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.910e+02 4.224e+02 5.211e+02 6.213e+02 2.304e+03, threshold=1.042e+03, percent-clipped=10.0 +2023-02-05 20:14:08,520 INFO [train.py:901] (3/4) Epoch 2, batch 3750, loss[loss=0.4393, simple_loss=0.4531, pruned_loss=0.2127, over 8186.00 frames. ], tot_loss[loss=0.3783, simple_loss=0.4112, pruned_loss=0.1727, over 1615865.05 frames. ], batch size: 23, lr: 3.06e-02, grad_scale: 8.0 +2023-02-05 20:14:08,633 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:28,588 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11864.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:43,031 INFO [train.py:901] (3/4) Epoch 2, batch 3800, loss[loss=0.3687, simple_loss=0.4035, pruned_loss=0.167, over 7973.00 frames. ], tot_loss[loss=0.3756, simple_loss=0.4092, pruned_loss=0.171, over 1611263.09 frames. ], batch size: 21, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:14:49,682 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11894.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:14:58,774 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=11907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:02,629 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.750e+02 4.056e+02 4.773e+02 6.198e+02 1.391e+03, threshold=9.546e+02, percent-clipped=3.0 +2023-02-05 20:15:16,344 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=11932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:17,493 INFO [train.py:901] (3/4) Epoch 2, batch 3850, loss[loss=0.3509, simple_loss=0.395, pruned_loss=0.1534, over 8485.00 frames. ], tot_loss[loss=0.3753, simple_loss=0.4091, pruned_loss=0.1707, over 1605503.81 frames. ], batch size: 49, lr: 3.05e-02, grad_scale: 8.0 +2023-02-05 20:15:20,309 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11938.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:39,314 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 20:15:39,760 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=11966.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:15:47,058 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 20:15:51,653 INFO [train.py:901] (3/4) Epoch 2, batch 3900, loss[loss=0.3053, simple_loss=0.3578, pruned_loss=0.1264, over 7681.00 frames. ], tot_loss[loss=0.3752, simple_loss=0.4097, pruned_loss=0.1704, over 1606911.93 frames. ], batch size: 18, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:01,111 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=11997.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:06,094 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12002.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:10,868 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12009.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:13,174 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.113e+02 3.926e+02 4.686e+02 5.678e+02 1.222e+03, threshold=9.373e+02, percent-clipped=4.0 +2023-02-05 20:16:25,605 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5517, 1.6810, 1.4645, 1.9400, 1.6447, 1.1994, 1.2777, 1.8276], + device='cuda:3'), covar=tensor([0.1034, 0.0617, 0.1051, 0.0652, 0.0898, 0.1349, 0.1067, 0.0652], + device='cuda:3'), in_proj_covar=tensor([0.0369, 0.0245, 0.0354, 0.0314, 0.0369, 0.0318, 0.0365, 0.0331], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 20:16:26,340 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2764, 1.8600, 3.1400, 2.8219, 2.5075, 1.8215, 1.5588, 1.7555], + device='cuda:3'), covar=tensor([0.0727, 0.0813, 0.0156, 0.0233, 0.0329, 0.0375, 0.0445, 0.0692], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0393, 0.0292, 0.0327, 0.0415, 0.0363, 0.0372, 0.0410], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:16:27,665 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2466, 1.4541, 1.9224, 1.5565, 0.9738, 1.6423, 0.4757, 0.8717], + device='cuda:3'), covar=tensor([0.0942, 0.0528, 0.0459, 0.0608, 0.0962, 0.0480, 0.1585, 0.0789], + device='cuda:3'), in_proj_covar=tensor([0.0112, 0.0098, 0.0084, 0.0136, 0.0116, 0.0079, 0.0149, 0.0114], + device='cuda:3'), out_proj_covar=tensor([1.1059e-04, 1.0228e-04, 8.2773e-05, 1.2953e-04, 1.1902e-04, 7.7516e-05, + 1.4388e-04, 1.1708e-04], device='cuda:3') +2023-02-05 20:16:28,135 INFO [train.py:901] (3/4) Epoch 2, batch 3950, loss[loss=0.3575, simple_loss=0.4074, pruned_loss=0.1537, over 8482.00 frames. ], tot_loss[loss=0.3747, simple_loss=0.4092, pruned_loss=0.1701, over 1609848.82 frames. ], batch size: 27, lr: 3.04e-02, grad_scale: 8.0 +2023-02-05 20:16:46,989 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:16:54,547 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5967, 1.9424, 1.6343, 1.3117, 2.3802, 1.8458, 1.9540, 2.1427], + device='cuda:3'), covar=tensor([0.0837, 0.1474, 0.1917, 0.1753, 0.0708, 0.1549, 0.1075, 0.0690], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0261, 0.0285, 0.0256, 0.0229, 0.0249, 0.0224, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:3') +2023-02-05 20:17:02,480 INFO [train.py:901] (3/4) Epoch 2, batch 4000, loss[loss=0.3706, simple_loss=0.3945, pruned_loss=0.1733, over 7661.00 frames. ], tot_loss[loss=0.3747, simple_loss=0.4092, pruned_loss=0.1701, over 1609718.19 frames. ], batch size: 19, lr: 3.03e-02, grad_scale: 8.0 +2023-02-05 20:17:09,218 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12094.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:22,648 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12112.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:17:23,112 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.955e+02 4.453e+02 5.904e+02 7.845e+02 2.502e+03, threshold=1.181e+03, percent-clipped=13.0 +2023-02-05 20:17:36,865 INFO [train.py:901] (3/4) Epoch 2, batch 4050, loss[loss=0.4664, simple_loss=0.4748, pruned_loss=0.229, over 6965.00 frames. ], tot_loss[loss=0.3753, simple_loss=0.4098, pruned_loss=0.1705, over 1611952.28 frames. ], batch size: 71, lr: 3.03e-02, grad_scale: 16.0 +2023-02-05 20:18:06,036 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:07,290 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12178.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:11,164 INFO [train.py:901] (3/4) Epoch 2, batch 4100, loss[loss=0.3794, simple_loss=0.4149, pruned_loss=0.1719, over 8301.00 frames. ], tot_loss[loss=0.3757, simple_loss=0.4099, pruned_loss=0.1707, over 1613391.16 frames. ], batch size: 23, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:18:23,677 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4201, 1.7545, 1.4596, 1.2305, 2.3275, 1.5498, 1.8345, 1.8550], + device='cuda:3'), covar=tensor([0.0943, 0.1638, 0.2313, 0.1923, 0.0727, 0.1841, 0.1059, 0.0802], + device='cuda:3'), in_proj_covar=tensor([0.0226, 0.0256, 0.0278, 0.0250, 0.0225, 0.0247, 0.0218, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:3') +2023-02-05 20:18:27,597 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12208.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:18:30,904 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.458e+02 3.728e+02 4.672e+02 5.863e+02 2.072e+03, threshold=9.344e+02, percent-clipped=1.0 +2023-02-05 20:18:47,040 INFO [train.py:901] (3/4) Epoch 2, batch 4150, loss[loss=0.3645, simple_loss=0.4105, pruned_loss=0.1593, over 8449.00 frames. ], tot_loss[loss=0.3746, simple_loss=0.4092, pruned_loss=0.17, over 1616681.61 frames. ], batch size: 29, lr: 3.02e-02, grad_scale: 16.0 +2023-02-05 20:19:08,912 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12265.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:20,419 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12282.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:21,722 INFO [train.py:901] (3/4) Epoch 2, batch 4200, loss[loss=0.3506, simple_loss=0.3803, pruned_loss=0.1604, over 7927.00 frames. ], tot_loss[loss=0.3735, simple_loss=0.4082, pruned_loss=0.1693, over 1610782.26 frames. ], batch size: 20, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:19:24,792 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 20:19:25,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12290.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:28,644 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12293.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:40,062 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12310.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:42,058 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.324e+02 3.573e+02 4.694e+02 5.833e+02 1.413e+03, threshold=9.388e+02, percent-clipped=6.0 +2023-02-05 20:19:43,521 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 20:19:49,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12323.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:19:57,056 INFO [train.py:901] (3/4) Epoch 2, batch 4250, loss[loss=0.3567, simple_loss=0.4055, pruned_loss=0.1539, over 8684.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4082, pruned_loss=0.169, over 1609769.94 frames. ], batch size: 34, lr: 3.01e-02, grad_scale: 16.0 +2023-02-05 20:20:06,020 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:06,630 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 20:20:20,963 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12368.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:21,630 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0917, 1.8866, 1.8848, 1.3747, 0.9785, 1.5163, 0.3725, 0.8244], + device='cuda:3'), covar=tensor([0.1318, 0.0761, 0.0475, 0.1016, 0.1262, 0.0605, 0.2349, 0.1125], + device='cuda:3'), in_proj_covar=tensor([0.0113, 0.0096, 0.0087, 0.0144, 0.0129, 0.0080, 0.0152, 0.0120], + device='cuda:3'), out_proj_covar=tensor([1.1333e-04, 1.0225e-04, 8.7732e-05, 1.3861e-04, 1.3136e-04, 8.0074e-05, + 1.4814e-04, 1.2437e-04], device='cuda:3') +2023-02-05 20:20:32,219 INFO [train.py:901] (3/4) Epoch 2, batch 4300, loss[loss=0.3601, simple_loss=0.4018, pruned_loss=0.1592, over 7826.00 frames. ], tot_loss[loss=0.3745, simple_loss=0.4093, pruned_loss=0.1698, over 1610601.37 frames. ], batch size: 20, lr: 3.00e-02, grad_scale: 16.0 +2023-02-05 20:20:38,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12393.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:41,210 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:20:53,214 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.244e+02 3.864e+02 4.648e+02 5.983e+02 1.525e+03, threshold=9.296e+02, percent-clipped=6.0 +2023-02-05 20:21:00,847 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12425.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:05,801 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:06,895 INFO [train.py:901] (3/4) Epoch 2, batch 4350, loss[loss=0.4815, simple_loss=0.4822, pruned_loss=0.2404, over 8623.00 frames. ], tot_loss[loss=0.3733, simple_loss=0.4082, pruned_loss=0.1693, over 1609947.42 frames. ], batch size: 39, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:21:09,737 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12438.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:23,517 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12457.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:26,047 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12461.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:28,058 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:21:38,136 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 20:21:42,135 INFO [train.py:901] (3/4) Epoch 2, batch 4400, loss[loss=0.3246, simple_loss=0.3837, pruned_loss=0.1327, over 8522.00 frames. ], tot_loss[loss=0.3731, simple_loss=0.4082, pruned_loss=0.1691, over 1612539.57 frames. ], batch size: 28, lr: 2.99e-02, grad_scale: 8.0 +2023-02-05 20:21:45,214 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-05 20:22:02,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.494e+02 4.041e+02 4.964e+02 6.742e+02 1.213e+03, threshold=9.928e+02, percent-clipped=4.0 +2023-02-05 20:22:16,720 INFO [train.py:901] (3/4) Epoch 2, batch 4450, loss[loss=0.3429, simple_loss=0.375, pruned_loss=0.1554, over 7642.00 frames. ], tot_loss[loss=0.372, simple_loss=0.407, pruned_loss=0.1685, over 1610757.49 frames. ], batch size: 19, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:17,428 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 20:22:22,526 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5117, 1.9877, 3.0914, 1.0176, 2.3018, 1.8260, 1.4974, 1.9052], + device='cuda:3'), covar=tensor([0.1024, 0.1073, 0.0332, 0.1679, 0.0895, 0.1359, 0.0893, 0.1194], + device='cuda:3'), in_proj_covar=tensor([0.0380, 0.0369, 0.0388, 0.0423, 0.0473, 0.0419, 0.0371, 0.0464], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 20:22:27,287 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:29,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12553.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:45,076 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12574.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:49,154 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:22:52,404 INFO [train.py:901] (3/4) Epoch 2, batch 4500, loss[loss=0.4511, simple_loss=0.4629, pruned_loss=0.2197, over 8670.00 frames. ], tot_loss[loss=0.3719, simple_loss=0.4066, pruned_loss=0.1685, over 1608616.03 frames. ], batch size: 34, lr: 2.98e-02, grad_scale: 8.0 +2023-02-05 20:22:56,171 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-05 20:23:06,036 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12604.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:12,553 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 20:23:13,221 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.312e+02 4.309e+02 5.092e+02 6.256e+02 1.421e+03, threshold=1.018e+03, percent-clipped=5.0 +2023-02-05 20:23:27,084 INFO [train.py:901] (3/4) Epoch 2, batch 4550, loss[loss=0.3732, simple_loss=0.4359, pruned_loss=0.1552, over 8476.00 frames. ], tot_loss[loss=0.3727, simple_loss=0.4074, pruned_loss=0.169, over 1611385.03 frames. ], batch size: 25, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:23:40,672 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:57,690 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12678.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:23:59,783 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:01,609 INFO [train.py:901] (3/4) Epoch 2, batch 4600, loss[loss=0.4182, simple_loss=0.4411, pruned_loss=0.1976, over 8288.00 frames. ], tot_loss[loss=0.3728, simple_loss=0.4077, pruned_loss=0.169, over 1610656.88 frames. ], batch size: 23, lr: 2.97e-02, grad_scale: 8.0 +2023-02-05 20:24:01,772 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9264, 1.1889, 4.0681, 1.6754, 3.4989, 3.4308, 3.6023, 3.5699], + device='cuda:3'), covar=tensor([0.0354, 0.3205, 0.0267, 0.1590, 0.0888, 0.0377, 0.0358, 0.0434], + device='cuda:3'), in_proj_covar=tensor([0.0185, 0.0374, 0.0234, 0.0269, 0.0321, 0.0257, 0.0241, 0.0262], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 20:24:08,005 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2422, 2.3022, 2.8261, 0.6590, 2.8998, 2.0322, 1.3703, 2.1248], + device='cuda:3'), covar=tensor([0.0119, 0.0086, 0.0125, 0.0246, 0.0103, 0.0256, 0.0232, 0.0116], + device='cuda:3'), in_proj_covar=tensor([0.0184, 0.0134, 0.0117, 0.0188, 0.0134, 0.0243, 0.0195, 0.0167], + device='cuda:3'), out_proj_covar=tensor([1.1235e-04, 8.2744e-05, 7.5610e-05, 1.1595e-04, 8.7352e-05, 1.6206e-04, + 1.2512e-04, 1.0585e-04], device='cuda:3') +2023-02-05 20:24:17,929 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:23,145 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.401e+02 3.817e+02 4.647e+02 5.826e+02 1.354e+03, threshold=9.293e+02, percent-clipped=3.0 +2023-02-05 20:24:25,437 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12717.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:37,083 INFO [train.py:901] (3/4) Epoch 2, batch 4650, loss[loss=0.3869, simple_loss=0.4204, pruned_loss=0.1767, over 8476.00 frames. ], tot_loss[loss=0.3723, simple_loss=0.407, pruned_loss=0.1688, over 1612865.80 frames. ], batch size: 25, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:24:40,102 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-05 20:24:42,620 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12742.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:24:53,335 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0978, 2.3006, 2.0403, 2.6328, 1.8470, 1.7897, 2.0960, 2.5205], + device='cuda:3'), covar=tensor([0.0826, 0.1015, 0.1076, 0.0469, 0.1330, 0.1461, 0.1487, 0.0717], + device='cuda:3'), in_proj_covar=tensor([0.0315, 0.0340, 0.0332, 0.0222, 0.0317, 0.0334, 0.0379, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0005, 0.0004, 0.0003, 0.0004, 0.0004, 0.0005, 0.0004], + device='cuda:3') +2023-02-05 20:25:09,738 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=12781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:11,642 INFO [train.py:901] (3/4) Epoch 2, batch 4700, loss[loss=0.3524, simple_loss=0.4045, pruned_loss=0.1502, over 8512.00 frames. ], tot_loss[loss=0.3704, simple_loss=0.4055, pruned_loss=0.1676, over 1609609.72 frames. ], batch size: 28, lr: 2.96e-02, grad_scale: 8.0 +2023-02-05 20:25:13,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1894, 1.5087, 1.1183, 1.6982, 1.2663, 1.0407, 1.1566, 1.7561], + device='cuda:3'), covar=tensor([0.1018, 0.0702, 0.1438, 0.0688, 0.1260, 0.1449, 0.1195, 0.0551], + device='cuda:3'), in_proj_covar=tensor([0.0367, 0.0249, 0.0357, 0.0309, 0.0362, 0.0322, 0.0359, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003], + device='cuda:3') +2023-02-05 20:25:14,122 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-05 20:25:28,731 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=12808.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:29,557 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=12809.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:25:32,792 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.540e+02 4.122e+02 5.358e+02 6.927e+02 1.344e+03, threshold=1.072e+03, percent-clipped=8.0 +2023-02-05 20:25:46,323 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.71 vs. limit=2.0 +2023-02-05 20:25:47,170 INFO [train.py:901] (3/4) Epoch 2, batch 4750, loss[loss=0.2762, simple_loss=0.3381, pruned_loss=0.1072, over 8239.00 frames. ], tot_loss[loss=0.3706, simple_loss=0.4055, pruned_loss=0.1678, over 1613581.02 frames. ], batch size: 22, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:25:47,389 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=12834.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:18,679 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 20:26:20,730 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 20:26:22,727 INFO [train.py:901] (3/4) Epoch 2, batch 4800, loss[loss=0.4022, simple_loss=0.4324, pruned_loss=0.186, over 8538.00 frames. ], tot_loss[loss=0.3683, simple_loss=0.4041, pruned_loss=0.1663, over 1610770.13 frames. ], batch size: 49, lr: 2.95e-02, grad_scale: 8.0 +2023-02-05 20:26:37,696 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.72 vs. limit=5.0 +2023-02-05 20:26:41,300 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-05 20:26:42,214 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6850, 1.2330, 3.3522, 1.3401, 2.3701, 3.8869, 3.5498, 3.2890], + device='cuda:3'), covar=tensor([0.1349, 0.1884, 0.0307, 0.2109, 0.0738, 0.0184, 0.0302, 0.0517], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0260, 0.0179, 0.0251, 0.0192, 0.0150, 0.0145, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:26:43,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.314e+02 3.678e+02 4.471e+02 5.888e+02 1.234e+03, threshold=8.941e+02, percent-clipped=3.0 +2023-02-05 20:26:49,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=12923.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:26:57,680 INFO [train.py:901] (3/4) Epoch 2, batch 4850, loss[loss=0.3965, simple_loss=0.4381, pruned_loss=0.1775, over 8346.00 frames. ], tot_loss[loss=0.3691, simple_loss=0.4053, pruned_loss=0.1664, over 1618480.21 frames. ], batch size: 26, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:12,745 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 20:27:32,097 INFO [train.py:901] (3/4) Epoch 2, batch 4900, loss[loss=0.3246, simple_loss=0.3656, pruned_loss=0.1418, over 7921.00 frames. ], tot_loss[loss=0.3672, simple_loss=0.403, pruned_loss=0.1657, over 1614720.11 frames. ], batch size: 20, lr: 2.94e-02, grad_scale: 8.0 +2023-02-05 20:27:53,287 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 4.170e+02 5.532e+02 7.452e+02 1.588e+03, threshold=1.106e+03, percent-clipped=9.0 +2023-02-05 20:28:06,714 INFO [train.py:901] (3/4) Epoch 2, batch 4950, loss[loss=0.3877, simple_loss=0.4256, pruned_loss=0.1748, over 8594.00 frames. ], tot_loss[loss=0.3684, simple_loss=0.404, pruned_loss=0.1663, over 1617624.86 frames. ], batch size: 34, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:28:30,712 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9350, 1.7903, 2.4720, 0.9075, 2.4470, 1.8755, 1.2304, 2.1337], + device='cuda:3'), covar=tensor([0.0194, 0.0107, 0.0179, 0.0234, 0.0247, 0.0291, 0.0317, 0.0124], + device='cuda:3'), in_proj_covar=tensor([0.0185, 0.0136, 0.0123, 0.0184, 0.0131, 0.0238, 0.0190, 0.0162], + device='cuda:3'), out_proj_covar=tensor([1.1184e-04, 8.2776e-05, 7.7799e-05, 1.1166e-04, 8.4859e-05, 1.5682e-04, + 1.1915e-04, 1.0001e-04], device='cuda:3') +2023-02-05 20:28:41,848 INFO [train.py:901] (3/4) Epoch 2, batch 5000, loss[loss=0.3924, simple_loss=0.4263, pruned_loss=0.1792, over 8584.00 frames. ], tot_loss[loss=0.3694, simple_loss=0.4047, pruned_loss=0.1671, over 1616370.63 frames. ], batch size: 34, lr: 2.93e-02, grad_scale: 8.0 +2023-02-05 20:29:02,464 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.193e+02 4.113e+02 5.050e+02 6.511e+02 1.788e+03, threshold=1.010e+03, percent-clipped=5.0 +2023-02-05 20:29:09,704 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=13125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:15,847 INFO [train.py:901] (3/4) Epoch 2, batch 5050, loss[loss=0.3523, simple_loss=0.4005, pruned_loss=0.152, over 8464.00 frames. ], tot_loss[loss=0.3692, simple_loss=0.4046, pruned_loss=0.167, over 1615769.25 frames. ], batch size: 49, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:29:47,474 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13179.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:29:47,972 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 20:29:50,616 INFO [train.py:901] (3/4) Epoch 2, batch 5100, loss[loss=0.4316, simple_loss=0.4432, pruned_loss=0.21, over 8534.00 frames. ], tot_loss[loss=0.3688, simple_loss=0.4044, pruned_loss=0.1666, over 1619134.77 frames. ], batch size: 49, lr: 2.92e-02, grad_scale: 4.0 +2023-02-05 20:30:04,642 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13204.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:09,665 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3538, 4.4717, 3.8076, 2.0848, 3.8237, 3.7868, 4.0869, 3.1941], + device='cuda:3'), covar=tensor([0.0600, 0.0348, 0.0720, 0.3310, 0.0472, 0.0604, 0.0757, 0.0533], + device='cuda:3'), in_proj_covar=tensor([0.0326, 0.0220, 0.0261, 0.0346, 0.0240, 0.0188, 0.0249, 0.0165], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 20:30:11,534 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.243e+02 3.930e+02 4.883e+02 5.892e+02 1.355e+03, threshold=9.766e+02, percent-clipped=3.0 +2023-02-05 20:30:24,598 INFO [train.py:901] (3/4) Epoch 2, batch 5150, loss[loss=0.3312, simple_loss=0.3802, pruned_loss=0.1411, over 8185.00 frames. ], tot_loss[loss=0.3696, simple_loss=0.4052, pruned_loss=0.1671, over 1616897.86 frames. ], batch size: 23, lr: 2.91e-02, grad_scale: 4.0 +2023-02-05 20:30:28,706 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=13240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:30:38,414 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.35 vs. limit=5.0 +2023-02-05 20:30:59,010 INFO [train.py:901] (3/4) Epoch 2, batch 5200, loss[loss=0.3862, simple_loss=0.4139, pruned_loss=0.1792, over 8543.00 frames. ], tot_loss[loss=0.3724, simple_loss=0.4069, pruned_loss=0.169, over 1613027.20 frames. ], batch size: 39, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:20,910 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 4.339e+02 5.206e+02 6.705e+02 1.063e+03, threshold=1.041e+03, percent-clipped=3.0 +2023-02-05 20:31:33,618 INFO [train.py:901] (3/4) Epoch 2, batch 5250, loss[loss=0.4088, simple_loss=0.4471, pruned_loss=0.1853, over 8507.00 frames. ], tot_loss[loss=0.3708, simple_loss=0.4057, pruned_loss=0.1679, over 1609453.03 frames. ], batch size: 28, lr: 2.91e-02, grad_scale: 8.0 +2023-02-05 20:31:42,977 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 20:32:03,333 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-05 20:32:07,567 INFO [train.py:901] (3/4) Epoch 2, batch 5300, loss[loss=0.3745, simple_loss=0.4174, pruned_loss=0.1657, over 8601.00 frames. ], tot_loss[loss=0.3705, simple_loss=0.406, pruned_loss=0.1675, over 1613001.93 frames. ], batch size: 34, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:32:12,473 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3928, 2.0691, 3.2243, 2.9753, 2.6646, 1.9095, 1.5192, 1.9530], + device='cuda:3'), covar=tensor([0.0578, 0.0653, 0.0131, 0.0185, 0.0299, 0.0307, 0.0413, 0.0514], + device='cuda:3'), in_proj_covar=tensor([0.0494, 0.0415, 0.0315, 0.0346, 0.0436, 0.0382, 0.0403, 0.0431], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:32:29,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.093e+02 3.821e+02 4.884e+02 6.417e+02 1.823e+03, threshold=9.767e+02, percent-clipped=6.0 +2023-02-05 20:32:42,524 INFO [train.py:901] (3/4) Epoch 2, batch 5350, loss[loss=0.3318, simple_loss=0.393, pruned_loss=0.1353, over 8472.00 frames. ], tot_loss[loss=0.3693, simple_loss=0.4049, pruned_loss=0.1668, over 1610544.90 frames. ], batch size: 25, lr: 2.90e-02, grad_scale: 8.0 +2023-02-05 20:33:16,588 INFO [train.py:901] (3/4) Epoch 2, batch 5400, loss[loss=0.3277, simple_loss=0.3692, pruned_loss=0.1431, over 7791.00 frames. ], tot_loss[loss=0.3699, simple_loss=0.4058, pruned_loss=0.167, over 1612424.81 frames. ], batch size: 19, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:33:24,898 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=13496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:38,013 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.355e+02 3.820e+02 4.559e+02 5.766e+02 1.205e+03, threshold=9.119e+02, percent-clipped=6.0 +2023-02-05 20:33:43,049 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=13521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:33:51,331 INFO [train.py:901] (3/4) Epoch 2, batch 5450, loss[loss=0.3571, simple_loss=0.3954, pruned_loss=0.1594, over 8246.00 frames. ], tot_loss[loss=0.3671, simple_loss=0.4045, pruned_loss=0.1649, over 1614790.52 frames. ], batch size: 22, lr: 2.89e-02, grad_scale: 8.0 +2023-02-05 20:34:25,971 INFO [train.py:901] (3/4) Epoch 2, batch 5500, loss[loss=0.3154, simple_loss=0.3551, pruned_loss=0.1379, over 7809.00 frames. ], tot_loss[loss=0.3687, simple_loss=0.4056, pruned_loss=0.1659, over 1614323.91 frames. ], batch size: 20, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:34:28,053 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 20:34:28,412 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.45 vs. limit=5.0 +2023-02-05 20:34:46,537 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.726e+02 4.817e+02 6.308e+02 1.682e+03, threshold=9.635e+02, percent-clipped=6.0 +2023-02-05 20:34:59,986 INFO [train.py:901] (3/4) Epoch 2, batch 5550, loss[loss=0.31, simple_loss=0.3501, pruned_loss=0.1349, over 7819.00 frames. ], tot_loss[loss=0.3684, simple_loss=0.4049, pruned_loss=0.166, over 1612018.77 frames. ], batch size: 20, lr: 2.88e-02, grad_scale: 8.0 +2023-02-05 20:35:35,317 INFO [train.py:901] (3/4) Epoch 2, batch 5600, loss[loss=0.358, simple_loss=0.3812, pruned_loss=0.1673, over 7788.00 frames. ], tot_loss[loss=0.3659, simple_loss=0.4032, pruned_loss=0.1643, over 1613689.42 frames. ], batch size: 19, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:35:55,774 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 3.833e+02 4.619e+02 6.071e+02 1.383e+03, threshold=9.238e+02, percent-clipped=5.0 +2023-02-05 20:36:08,578 INFO [train.py:901] (3/4) Epoch 2, batch 5650, loss[loss=0.4043, simple_loss=0.4238, pruned_loss=0.1924, over 8292.00 frames. ], tot_loss[loss=0.3671, simple_loss=0.4037, pruned_loss=0.1653, over 1607681.86 frames. ], batch size: 48, lr: 2.87e-02, grad_scale: 8.0 +2023-02-05 20:36:23,374 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=13755.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:36:34,189 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 20:36:35,122 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.13 vs. limit=5.0 +2023-02-05 20:36:43,556 INFO [train.py:901] (3/4) Epoch 2, batch 5700, loss[loss=0.3375, simple_loss=0.3882, pruned_loss=0.1435, over 8029.00 frames. ], tot_loss[loss=0.3659, simple_loss=0.4026, pruned_loss=0.1646, over 1607957.38 frames. ], batch size: 22, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:05,589 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.337e+02 4.261e+02 5.123e+02 6.631e+02 2.352e+03, threshold=1.025e+03, percent-clipped=5.0 +2023-02-05 20:37:18,906 INFO [train.py:901] (3/4) Epoch 2, batch 5750, loss[loss=0.3491, simple_loss=0.4019, pruned_loss=0.1482, over 8251.00 frames. ], tot_loss[loss=0.3651, simple_loss=0.402, pruned_loss=0.1641, over 1607389.49 frames. ], batch size: 24, lr: 2.86e-02, grad_scale: 8.0 +2023-02-05 20:37:38,953 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 20:37:54,461 INFO [train.py:901] (3/4) Epoch 2, batch 5800, loss[loss=0.4165, simple_loss=0.4425, pruned_loss=0.1953, over 8492.00 frames. ], tot_loss[loss=0.3638, simple_loss=0.4017, pruned_loss=0.163, over 1609899.16 frames. ], batch size: 29, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:11,985 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4476, 1.7129, 3.8837, 1.8388, 2.0316, 4.4247, 3.9079, 3.9517], + device='cuda:3'), covar=tensor([0.1175, 0.1659, 0.0315, 0.1894, 0.0972, 0.0320, 0.0354, 0.0552], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0262, 0.0186, 0.0254, 0.0194, 0.0157, 0.0148, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:38:15,256 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2470, 2.2072, 1.5034, 1.2904, 2.1316, 1.7354, 2.4107, 2.3799], + device='cuda:3'), covar=tensor([0.0922, 0.1502, 0.2152, 0.1943, 0.0881, 0.1798, 0.1036, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0221, 0.0250, 0.0279, 0.0248, 0.0220, 0.0243, 0.0210, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0005, 0.0004], + device='cuda:3') +2023-02-05 20:38:15,742 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.601e+02 3.784e+02 4.729e+02 6.225e+02 2.390e+03, threshold=9.458e+02, percent-clipped=5.0 +2023-02-05 20:38:29,067 INFO [train.py:901] (3/4) Epoch 2, batch 5850, loss[loss=0.4095, simple_loss=0.4406, pruned_loss=0.1892, over 8539.00 frames. ], tot_loss[loss=0.364, simple_loss=0.4016, pruned_loss=0.1632, over 1613739.62 frames. ], batch size: 31, lr: 2.85e-02, grad_scale: 8.0 +2023-02-05 20:38:58,029 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3125, 2.4512, 4.1705, 3.9550, 3.0136, 2.5019, 1.8470, 2.2108], + device='cuda:3'), covar=tensor([0.0588, 0.0875, 0.0153, 0.0213, 0.0414, 0.0308, 0.0432, 0.0703], + device='cuda:3'), in_proj_covar=tensor([0.0489, 0.0416, 0.0311, 0.0351, 0.0450, 0.0388, 0.0404, 0.0435], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:39:03,904 INFO [train.py:901] (3/4) Epoch 2, batch 5900, loss[loss=0.3359, simple_loss=0.3928, pruned_loss=0.1395, over 8258.00 frames. ], tot_loss[loss=0.3651, simple_loss=0.4023, pruned_loss=0.1639, over 1614372.24 frames. ], batch size: 24, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:39:05,670 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.04 vs. limit=5.0 +2023-02-05 20:39:27,081 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.452e+02 3.946e+02 4.724e+02 6.297e+02 1.551e+03, threshold=9.448e+02, percent-clipped=7.0 +2023-02-05 20:39:30,120 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.60 vs. limit=5.0 +2023-02-05 20:39:40,155 INFO [train.py:901] (3/4) Epoch 2, batch 5950, loss[loss=0.3833, simple_loss=0.4143, pruned_loss=0.1762, over 8296.00 frames. ], tot_loss[loss=0.3642, simple_loss=0.402, pruned_loss=0.1632, over 1614944.72 frames. ], batch size: 23, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,643 INFO [train.py:901] (3/4) Epoch 2, batch 6000, loss[loss=0.3797, simple_loss=0.4127, pruned_loss=0.1734, over 8496.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4015, pruned_loss=0.1624, over 1615698.07 frames. ], batch size: 26, lr: 2.84e-02, grad_scale: 8.0 +2023-02-05 20:40:14,643 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 20:40:24,524 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9187, 1.1742, 1.2710, 0.8340, 0.7641, 1.1568, 0.1463, 0.5454], + device='cuda:3'), covar=tensor([0.1849, 0.1714, 0.0716, 0.1934, 0.2670, 0.0917, 0.3981, 0.2130], + device='cuda:3'), in_proj_covar=tensor([0.0105, 0.0095, 0.0083, 0.0140, 0.0140, 0.0082, 0.0157, 0.0118], + device='cuda:3'), out_proj_covar=tensor([1.1407e-04, 1.0875e-04, 8.9206e-05, 1.4468e-04, 1.4930e-04, 9.0084e-05, + 1.6301e-04, 1.2994e-04], device='cuda:3') +2023-02-05 20:40:26,576 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0038, 1.5560, 1.4381, 0.3280, 1.4115, 0.9478, 0.2091, 1.4932], + device='cuda:3'), covar=tensor([0.0137, 0.0066, 0.0085, 0.0160, 0.0075, 0.0257, 0.0207, 0.0070], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0140, 0.0130, 0.0183, 0.0136, 0.0249, 0.0196, 0.0178], + device='cuda:3'), out_proj_covar=tensor([1.1280e-04, 8.1912e-05, 7.9440e-05, 1.0615e-04, 8.3237e-05, 1.5626e-04, + 1.1841e-04, 1.0724e-04], device='cuda:3') +2023-02-05 20:40:27,828 INFO [train.py:935] (3/4) Epoch 2, validation: loss=0.2758, simple_loss=0.3606, pruned_loss=0.0955, over 944034.00 frames. +2023-02-05 20:40:27,829 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 20:40:32,717 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:38,744 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14099.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:40:49,504 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.396e+02 3.733e+02 4.780e+02 6.772e+02 2.203e+03, threshold=9.561e+02, percent-clipped=10.0 +2023-02-05 20:40:53,735 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:02,698 INFO [train.py:901] (3/4) Epoch 2, batch 6050, loss[loss=0.3225, simple_loss=0.373, pruned_loss=0.136, over 8287.00 frames. ], tot_loss[loss=0.3635, simple_loss=0.4013, pruned_loss=0.1629, over 1613946.66 frames. ], batch size: 23, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:05,428 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14138.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:41:37,178 INFO [train.py:901] (3/4) Epoch 2, batch 6100, loss[loss=0.3072, simple_loss=0.3429, pruned_loss=0.1358, over 7668.00 frames. ], tot_loss[loss=0.3652, simple_loss=0.402, pruned_loss=0.1641, over 1608649.77 frames. ], batch size: 19, lr: 2.83e-02, grad_scale: 8.0 +2023-02-05 20:41:58,433 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14214.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:41:58,921 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 3.920e+02 4.920e+02 6.492e+02 2.677e+03, threshold=9.840e+02, percent-clipped=6.0 +2023-02-05 20:42:03,176 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.67 vs. limit=2.0 +2023-02-05 20:42:05,167 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.04 vs. limit=2.0 +2023-02-05 20:42:08,080 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 20:42:11,461 INFO [train.py:901] (3/4) Epoch 2, batch 6150, loss[loss=0.3308, simple_loss=0.3934, pruned_loss=0.1341, over 8467.00 frames. ], tot_loss[loss=0.3624, simple_loss=0.4004, pruned_loss=0.1622, over 1611958.19 frames. ], batch size: 25, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:42:46,433 INFO [train.py:901] (3/4) Epoch 2, batch 6200, loss[loss=0.3505, simple_loss=0.3853, pruned_loss=0.1579, over 7816.00 frames. ], tot_loss[loss=0.3644, simple_loss=0.4018, pruned_loss=0.1635, over 1612829.77 frames. ], batch size: 20, lr: 2.82e-02, grad_scale: 8.0 +2023-02-05 20:43:08,135 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.406e+02 3.453e+02 4.846e+02 6.394e+02 2.249e+03, threshold=9.691e+02, percent-clipped=6.0 +2023-02-05 20:43:21,530 INFO [train.py:901] (3/4) Epoch 2, batch 6250, loss[loss=0.3476, simple_loss=0.3869, pruned_loss=0.1541, over 7555.00 frames. ], tot_loss[loss=0.3631, simple_loss=0.4005, pruned_loss=0.1629, over 1609773.32 frames. ], batch size: 18, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:43:55,857 INFO [train.py:901] (3/4) Epoch 2, batch 6300, loss[loss=0.3759, simple_loss=0.4303, pruned_loss=0.1608, over 8256.00 frames. ], tot_loss[loss=0.364, simple_loss=0.4011, pruned_loss=0.1635, over 1609404.18 frames. ], batch size: 24, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:17,505 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.643e+02 3.823e+02 4.655e+02 5.877e+02 1.568e+03, threshold=9.309e+02, percent-clipped=4.0 +2023-02-05 20:44:28,394 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14431.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:30,257 INFO [train.py:901] (3/4) Epoch 2, batch 6350, loss[loss=0.391, simple_loss=0.4236, pruned_loss=0.1792, over 8649.00 frames. ], tot_loss[loss=0.3615, simple_loss=0.3999, pruned_loss=0.1616, over 1610724.85 frames. ], batch size: 31, lr: 2.81e-02, grad_scale: 8.0 +2023-02-05 20:44:30,321 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14434.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:43,820 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 20:44:51,467 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14465.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:44:54,888 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:03,123 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14482.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:45:04,274 INFO [train.py:901] (3/4) Epoch 2, batch 6400, loss[loss=0.3954, simple_loss=0.4201, pruned_loss=0.1853, over 8519.00 frames. ], tot_loss[loss=0.3634, simple_loss=0.4014, pruned_loss=0.1627, over 1614130.84 frames. ], batch size: 31, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:12,404 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14495.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:19,142 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14505.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:25,558 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.974e+02 5.065e+02 7.362e+02 1.328e+03, threshold=1.013e+03, percent-clipped=8.0 +2023-02-05 20:45:38,730 INFO [train.py:901] (3/4) Epoch 2, batch 6450, loss[loss=0.3469, simple_loss=0.3969, pruned_loss=0.1484, over 8198.00 frames. ], tot_loss[loss=0.3639, simple_loss=0.4014, pruned_loss=0.1632, over 1615566.59 frames. ], batch size: 23, lr: 2.80e-02, grad_scale: 8.0 +2023-02-05 20:45:48,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14549.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:45:59,284 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.65 vs. limit=2.0 +2023-02-05 20:46:10,576 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14580.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:46:13,166 INFO [train.py:901] (3/4) Epoch 2, batch 6500, loss[loss=0.3102, simple_loss=0.3524, pruned_loss=0.134, over 7692.00 frames. ], tot_loss[loss=0.3605, simple_loss=0.3994, pruned_loss=0.1609, over 1614714.20 frames. ], batch size: 18, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:46:22,646 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14597.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:46:35,355 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.440e+02 3.999e+02 5.009e+02 6.288e+02 1.522e+03, threshold=1.002e+03, percent-clipped=8.0 +2023-02-05 20:46:48,426 INFO [train.py:901] (3/4) Epoch 2, batch 6550, loss[loss=0.3455, simple_loss=0.4015, pruned_loss=0.1448, over 8111.00 frames. ], tot_loss[loss=0.3621, simple_loss=0.4003, pruned_loss=0.162, over 1612654.76 frames. ], batch size: 23, lr: 2.79e-02, grad_scale: 8.0 +2023-02-05 20:47:16,653 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 20:47:23,549 INFO [train.py:901] (3/4) Epoch 2, batch 6600, loss[loss=0.3661, simple_loss=0.4086, pruned_loss=0.1618, over 8253.00 frames. ], tot_loss[loss=0.3593, simple_loss=0.3982, pruned_loss=0.1602, over 1611346.01 frames. ], batch size: 24, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:47:36,555 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 20:47:45,888 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.289e+02 3.681e+02 4.457e+02 5.556e+02 1.208e+03, threshold=8.913e+02, percent-clipped=4.0 +2023-02-05 20:47:58,942 INFO [train.py:901] (3/4) Epoch 2, batch 6650, loss[loss=0.4196, simple_loss=0.4474, pruned_loss=0.1959, over 8445.00 frames. ], tot_loss[loss=0.3601, simple_loss=0.399, pruned_loss=0.1606, over 1615967.39 frames. ], batch size: 29, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:14,410 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2066, 1.9257, 3.1520, 2.7113, 2.3454, 1.9594, 1.2968, 1.2772], + device='cuda:3'), covar=tensor([0.0698, 0.0768, 0.0139, 0.0261, 0.0353, 0.0361, 0.0512, 0.0728], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0456, 0.0339, 0.0388, 0.0490, 0.0417, 0.0443, 0.0462], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 20:48:16,415 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=14758.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:28,651 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14775.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:34,791 INFO [train.py:901] (3/4) Epoch 2, batch 6700, loss[loss=0.3393, simple_loss=0.3841, pruned_loss=0.1473, over 8034.00 frames. ], tot_loss[loss=0.359, simple_loss=0.3982, pruned_loss=0.1599, over 1613337.03 frames. ], batch size: 22, lr: 2.78e-02, grad_scale: 8.0 +2023-02-05 20:48:50,193 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:48:56,685 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.056e+02 3.873e+02 4.634e+02 6.203e+02 1.536e+03, threshold=9.268e+02, percent-clipped=6.0 +2023-02-05 20:49:07,131 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14830.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:10,358 INFO [train.py:901] (3/4) Epoch 2, batch 6750, loss[loss=0.3279, simple_loss=0.3829, pruned_loss=0.1364, over 8511.00 frames. ], tot_loss[loss=0.3603, simple_loss=0.3995, pruned_loss=0.1606, over 1615895.21 frames. ], batch size: 26, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:11,906 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14836.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:21,259 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=14849.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:24,081 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=14853.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 20:49:29,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14861.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:29,663 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.8794, 1.0448, 1.0740, 0.9553, 0.7195, 1.0797, 0.0043, 0.6409], + device='cuda:3'), covar=tensor([0.1749, 0.1326, 0.0945, 0.1320, 0.2658, 0.0721, 0.4086, 0.1798], + device='cuda:3'), in_proj_covar=tensor([0.0109, 0.0100, 0.0084, 0.0142, 0.0145, 0.0082, 0.0163, 0.0116], + device='cuda:3'), out_proj_covar=tensor([1.1983e-04, 1.1672e-04, 9.2567e-05, 1.4989e-04, 1.5721e-04, 9.2951e-05, + 1.7304e-04, 1.3071e-04], device='cuda:3') +2023-02-05 20:49:41,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=14878.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 20:49:45,988 INFO [train.py:901] (3/4) Epoch 2, batch 6800, loss[loss=0.3874, simple_loss=0.4194, pruned_loss=0.1777, over 8608.00 frames. ], tot_loss[loss=0.3597, simple_loss=0.3985, pruned_loss=0.1604, over 1612090.27 frames. ], batch size: 31, lr: 2.77e-02, grad_scale: 8.0 +2023-02-05 20:49:50,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14890.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:49:54,306 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 20:50:04,552 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.82 vs. limit=5.0 +2023-02-05 20:50:07,690 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.663e+02 4.715e+02 6.092e+02 1.805e+03, threshold=9.431e+02, percent-clipped=7.0 +2023-02-05 20:50:20,401 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-05 20:50:21,321 INFO [train.py:901] (3/4) Epoch 2, batch 6850, loss[loss=0.2969, simple_loss=0.3601, pruned_loss=0.1169, over 8191.00 frames. ], tot_loss[loss=0.3584, simple_loss=0.3981, pruned_loss=0.1594, over 1616658.73 frames. ], batch size: 23, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:50:42,735 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=14964.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:50:45,366 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 20:50:57,103 INFO [train.py:901] (3/4) Epoch 2, batch 6900, loss[loss=0.2919, simple_loss=0.3449, pruned_loss=0.1194, over 8033.00 frames. ], tot_loss[loss=0.3577, simple_loss=0.3972, pruned_loss=0.1591, over 1611692.41 frames. ], batch size: 22, lr: 2.76e-02, grad_scale: 8.0 +2023-02-05 20:51:19,285 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.011e+02 4.191e+02 5.097e+02 7.005e+02 1.700e+03, threshold=1.019e+03, percent-clipped=5.0 +2023-02-05 20:51:30,703 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8531, 2.2786, 2.8661, 0.2623, 2.9086, 1.8322, 1.2592, 2.0467], + device='cuda:3'), covar=tensor([0.0145, 0.0066, 0.0087, 0.0225, 0.0118, 0.0205, 0.0225, 0.0102], + device='cuda:3'), in_proj_covar=tensor([0.0196, 0.0133, 0.0123, 0.0186, 0.0132, 0.0252, 0.0200, 0.0171], + device='cuda:3'), out_proj_covar=tensor([1.1061e-04, 7.4798e-05, 7.1916e-05, 1.0345e-04, 7.7725e-05, 1.5465e-04, + 1.1479e-04, 9.8079e-05], device='cuda:3') +2023-02-05 20:51:32,586 INFO [train.py:901] (3/4) Epoch 2, batch 6950, loss[loss=0.3714, simple_loss=0.3869, pruned_loss=0.178, over 7252.00 frames. ], tot_loss[loss=0.3577, simple_loss=0.3966, pruned_loss=0.1594, over 1600959.03 frames. ], batch size: 16, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:51:56,484 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 20:52:08,410 INFO [train.py:901] (3/4) Epoch 2, batch 7000, loss[loss=0.3213, simple_loss=0.3728, pruned_loss=0.1349, over 7982.00 frames. ], tot_loss[loss=0.3567, simple_loss=0.3962, pruned_loss=0.1586, over 1609016.27 frames. ], batch size: 21, lr: 2.75e-02, grad_scale: 8.0 +2023-02-05 20:52:21,505 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15102.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:30,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.233e+02 3.928e+02 4.810e+02 5.818e+02 1.410e+03, threshold=9.621e+02, percent-clipped=1.0 +2023-02-05 20:52:44,338 INFO [train.py:901] (3/4) Epoch 2, batch 7050, loss[loss=0.3643, simple_loss=0.4106, pruned_loss=0.159, over 8500.00 frames. ], tot_loss[loss=0.3571, simple_loss=0.3966, pruned_loss=0.1588, over 1611823.90 frames. ], batch size: 26, lr: 2.75e-02, grad_scale: 16.0 +2023-02-05 20:52:52,907 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:52:57,901 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 20:53:10,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15171.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:18,778 INFO [train.py:901] (3/4) Epoch 2, batch 7100, loss[loss=0.3975, simple_loss=0.4326, pruned_loss=0.1812, over 8111.00 frames. ], tot_loss[loss=0.3574, simple_loss=0.3967, pruned_loss=0.159, over 1612470.87 frames. ], batch size: 23, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:53:39,783 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15213.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:41,005 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.718e+02 4.413e+02 5.855e+02 1.165e+03, threshold=8.826e+02, percent-clipped=3.0 +2023-02-05 20:53:42,518 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15217.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:44,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15220.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:53:53,629 INFO [train.py:901] (3/4) Epoch 2, batch 7150, loss[loss=0.3672, simple_loss=0.3966, pruned_loss=0.1689, over 7210.00 frames. ], tot_loss[loss=0.3553, simple_loss=0.3949, pruned_loss=0.1579, over 1608951.16 frames. ], batch size: 16, lr: 2.74e-02, grad_scale: 16.0 +2023-02-05 20:53:55,204 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6575, 2.3648, 3.6865, 1.0852, 2.2068, 1.9036, 1.5932, 1.8825], + device='cuda:3'), covar=tensor([0.1003, 0.0960, 0.0352, 0.1823, 0.1029, 0.1678, 0.1018, 0.1571], + device='cuda:3'), in_proj_covar=tensor([0.0409, 0.0387, 0.0439, 0.0457, 0.0511, 0.0458, 0.0404, 0.0515], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 20:54:02,061 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:54:17,251 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-05 20:54:29,184 INFO [train.py:901] (3/4) Epoch 2, batch 7200, loss[loss=0.3647, simple_loss=0.4067, pruned_loss=0.1614, over 8324.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.3944, pruned_loss=0.1571, over 1607093.97 frames. ], batch size: 25, lr: 2.73e-02, grad_scale: 16.0 +2023-02-05 20:54:51,169 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.528e+02 3.704e+02 4.905e+02 6.625e+02 1.855e+03, threshold=9.809e+02, percent-clipped=12.0 +2023-02-05 20:54:55,123 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.97 vs. limit=2.0 +2023-02-05 20:55:04,889 INFO [train.py:901] (3/4) Epoch 2, batch 7250, loss[loss=0.3471, simple_loss=0.3939, pruned_loss=0.1502, over 8192.00 frames. ], tot_loss[loss=0.3567, simple_loss=0.396, pruned_loss=0.1586, over 1604175.84 frames. ], batch size: 23, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:55:39,922 INFO [train.py:901] (3/4) Epoch 2, batch 7300, loss[loss=0.2944, simple_loss=0.3591, pruned_loss=0.1148, over 7544.00 frames. ], tot_loss[loss=0.3574, simple_loss=0.397, pruned_loss=0.1589, over 1604917.39 frames. ], batch size: 18, lr: 2.73e-02, grad_scale: 8.0 +2023-02-05 20:56:02,317 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.194e+02 3.434e+02 4.292e+02 5.923e+02 1.449e+03, threshold=8.584e+02, percent-clipped=5.0 +2023-02-05 20:56:14,879 INFO [train.py:901] (3/4) Epoch 2, batch 7350, loss[loss=0.3121, simple_loss=0.3738, pruned_loss=0.1252, over 8041.00 frames. ], tot_loss[loss=0.3552, simple_loss=0.3953, pruned_loss=0.1575, over 1606004.79 frames. ], batch size: 22, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:42,793 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15473.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:56:43,923 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 20:56:49,843 INFO [train.py:901] (3/4) Epoch 2, batch 7400, loss[loss=0.329, simple_loss=0.394, pruned_loss=0.132, over 8328.00 frames. ], tot_loss[loss=0.356, simple_loss=0.3962, pruned_loss=0.1579, over 1607808.80 frames. ], batch size: 25, lr: 2.72e-02, grad_scale: 8.0 +2023-02-05 20:56:59,513 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:01,976 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 20:57:11,815 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.940e+02 4.956e+02 6.362e+02 1.377e+03, threshold=9.912e+02, percent-clipped=7.0 +2023-02-05 20:57:24,681 INFO [train.py:901] (3/4) Epoch 2, batch 7450, loss[loss=0.3489, simple_loss=0.4047, pruned_loss=0.1466, over 8344.00 frames. ], tot_loss[loss=0.3574, simple_loss=0.3976, pruned_loss=0.1586, over 1609005.28 frames. ], batch size: 26, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:57:40,477 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=15557.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:57:41,773 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 20:57:59,025 INFO [train.py:901] (3/4) Epoch 2, batch 7500, loss[loss=0.3294, simple_loss=0.3799, pruned_loss=0.1394, over 8030.00 frames. ], tot_loss[loss=0.3557, simple_loss=0.3959, pruned_loss=0.1578, over 1606962.85 frames. ], batch size: 22, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:21,359 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.209e+02 3.662e+02 4.519e+02 5.678e+02 1.466e+03, threshold=9.038e+02, percent-clipped=6.0 +2023-02-05 20:58:34,047 INFO [train.py:901] (3/4) Epoch 2, batch 7550, loss[loss=0.3468, simple_loss=0.3966, pruned_loss=0.1485, over 8707.00 frames. ], tot_loss[loss=0.354, simple_loss=0.3943, pruned_loss=0.1568, over 1605818.34 frames. ], batch size: 34, lr: 2.71e-02, grad_scale: 8.0 +2023-02-05 20:58:46,868 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3044, 1.4829, 1.3909, 1.5337, 0.9544, 1.8865, 0.2752, 0.9188], + device='cuda:3'), covar=tensor([0.1736, 0.1087, 0.0698, 0.1306, 0.1834, 0.0546, 0.3648, 0.1422], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0091, 0.0077, 0.0134, 0.0135, 0.0076, 0.0144, 0.0108], + device='cuda:3'), out_proj_covar=tensor([1.1914e-04, 1.0971e-04, 8.8119e-05, 1.4635e-04, 1.4930e-04, 9.0133e-05, + 1.5860e-04, 1.2735e-04], device='cuda:3') +2023-02-05 20:59:00,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=15672.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 20:59:08,582 INFO [train.py:901] (3/4) Epoch 2, batch 7600, loss[loss=0.2765, simple_loss=0.3277, pruned_loss=0.1127, over 7788.00 frames. ], tot_loss[loss=0.3558, simple_loss=0.396, pruned_loss=0.1578, over 1612075.31 frames. ], batch size: 19, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 20:59:31,058 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.634e+02 4.473e+02 6.191e+02 1.516e+03, threshold=8.946e+02, percent-clipped=5.0 +2023-02-05 20:59:35,833 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3405, 3.6965, 2.1712, 2.6077, 2.8704, 1.9294, 2.2480, 2.6338], + device='cuda:3'), covar=tensor([0.1474, 0.0436, 0.1043, 0.1000, 0.0863, 0.1320, 0.1521, 0.0932], + device='cuda:3'), in_proj_covar=tensor([0.0388, 0.0244, 0.0350, 0.0324, 0.0359, 0.0332, 0.0363, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 20:59:40,160 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-05 20:59:43,079 INFO [train.py:901] (3/4) Epoch 2, batch 7650, loss[loss=0.3897, simple_loss=0.4298, pruned_loss=0.1748, over 8344.00 frames. ], tot_loss[loss=0.3555, simple_loss=0.3957, pruned_loss=0.1576, over 1608661.53 frames. ], batch size: 26, lr: 2.70e-02, grad_scale: 8.0 +2023-02-05 21:00:02,563 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3405, 2.4696, 4.3645, 3.5512, 3.3120, 2.7750, 1.6375, 1.9670], + device='cuda:3'), covar=tensor([0.0616, 0.0954, 0.0159, 0.0315, 0.0373, 0.0314, 0.0485, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0527, 0.0453, 0.0348, 0.0386, 0.0484, 0.0419, 0.0440, 0.0457], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:00:19,423 INFO [train.py:901] (3/4) Epoch 2, batch 7700, loss[loss=0.3325, simple_loss=0.3721, pruned_loss=0.1465, over 7824.00 frames. ], tot_loss[loss=0.3543, simple_loss=0.3949, pruned_loss=0.1568, over 1610618.22 frames. ], batch size: 20, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:00:25,146 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.4425, 2.3760, 4.4142, 3.3889, 2.9448, 2.4905, 1.7185, 1.9807], + device='cuda:3'), covar=tensor([0.0607, 0.0948, 0.0176, 0.0314, 0.0471, 0.0366, 0.0512, 0.0842], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0466, 0.0358, 0.0396, 0.0496, 0.0429, 0.0451, 0.0468], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:00:41,050 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 3.880e+02 4.902e+02 6.175e+02 1.322e+03, threshold=9.805e+02, percent-clipped=4.0 +2023-02-05 21:00:43,410 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7733, 1.9069, 3.7748, 1.0649, 2.3229, 1.9947, 1.7170, 2.0684], + device='cuda:3'), covar=tensor([0.0961, 0.1275, 0.0313, 0.2128, 0.1056, 0.1507, 0.0918, 0.1562], + device='cuda:3'), in_proj_covar=tensor([0.0405, 0.0380, 0.0426, 0.0460, 0.0506, 0.0444, 0.0397, 0.0506], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:00:51,196 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 21:00:53,925 INFO [train.py:901] (3/4) Epoch 2, batch 7750, loss[loss=0.3869, simple_loss=0.4242, pruned_loss=0.1748, over 8507.00 frames. ], tot_loss[loss=0.3541, simple_loss=0.3945, pruned_loss=0.1569, over 1609548.15 frames. ], batch size: 28, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:28,170 INFO [train.py:901] (3/4) Epoch 2, batch 7800, loss[loss=0.3127, simple_loss=0.347, pruned_loss=0.1392, over 7689.00 frames. ], tot_loss[loss=0.3531, simple_loss=0.3938, pruned_loss=0.1562, over 1612551.30 frames. ], batch size: 18, lr: 2.69e-02, grad_scale: 8.0 +2023-02-05 21:01:41,032 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=15901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:01:50,927 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.569e+02 4.742e+02 5.990e+02 9.896e+02, threshold=9.484e+02, percent-clipped=1.0 +2023-02-05 21:01:59,055 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=15928.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:02,911 INFO [train.py:901] (3/4) Epoch 2, batch 7850, loss[loss=0.343, simple_loss=0.3856, pruned_loss=0.1502, over 7541.00 frames. ], tot_loss[loss=0.3548, simple_loss=0.3954, pruned_loss=0.1571, over 1611802.68 frames. ], batch size: 18, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:15,682 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=15953.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:02:36,233 INFO [train.py:901] (3/4) Epoch 2, batch 7900, loss[loss=0.3026, simple_loss=0.3485, pruned_loss=0.1283, over 7970.00 frames. ], tot_loss[loss=0.3529, simple_loss=0.3943, pruned_loss=0.1558, over 1609065.57 frames. ], batch size: 21, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:02:58,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.267e+02 3.808e+02 4.602e+02 5.936e+02 1.299e+03, threshold=9.205e+02, percent-clipped=9.0 +2023-02-05 21:03:00,388 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16019.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:03:10,220 INFO [train.py:901] (3/4) Epoch 2, batch 7950, loss[loss=0.4178, simple_loss=0.4429, pruned_loss=0.1963, over 8479.00 frames. ], tot_loss[loss=0.3553, simple_loss=0.3961, pruned_loss=0.1572, over 1610354.53 frames. ], batch size: 49, lr: 2.68e-02, grad_scale: 8.0 +2023-02-05 21:03:16,614 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.04 vs. limit=2.0 +2023-02-05 21:03:43,332 INFO [train.py:901] (3/4) Epoch 2, batch 8000, loss[loss=0.3309, simple_loss=0.3904, pruned_loss=0.1357, over 8693.00 frames. ], tot_loss[loss=0.3562, simple_loss=0.3969, pruned_loss=0.1577, over 1612715.58 frames. ], batch size: 34, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:03:55,455 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5003, 1.8401, 3.3949, 1.0492, 2.1326, 1.6158, 1.6440, 1.8365], + device='cuda:3'), covar=tensor([0.1147, 0.1414, 0.0424, 0.2091, 0.1226, 0.1859, 0.0906, 0.1718], + device='cuda:3'), in_proj_covar=tensor([0.0417, 0.0391, 0.0443, 0.0470, 0.0515, 0.0458, 0.0407, 0.0516], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:03:56,060 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16103.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:04:04,544 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.336e+02 4.123e+02 4.991e+02 6.647e+02 1.461e+03, threshold=9.983e+02, percent-clipped=10.0 +2023-02-05 21:04:16,511 INFO [train.py:901] (3/4) Epoch 2, batch 8050, loss[loss=0.3267, simple_loss=0.3606, pruned_loss=0.1464, over 7536.00 frames. ], tot_loss[loss=0.3563, simple_loss=0.3959, pruned_loss=0.1583, over 1601070.27 frames. ], batch size: 18, lr: 2.67e-02, grad_scale: 8.0 +2023-02-05 21:04:36,024 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5575, 2.1983, 3.2023, 2.2343, 2.7102, 3.7991, 3.3347, 3.3761], + device='cuda:3'), covar=tensor([0.0878, 0.1156, 0.0646, 0.1446, 0.0650, 0.0270, 0.0373, 0.0500], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0255, 0.0189, 0.0255, 0.0194, 0.0159, 0.0152, 0.0231], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:04:51,518 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 21:04:55,115 INFO [train.py:901] (3/4) Epoch 3, batch 0, loss[loss=0.4623, simple_loss=0.4701, pruned_loss=0.2272, over 8676.00 frames. ], tot_loss[loss=0.4623, simple_loss=0.4701, pruned_loss=0.2272, over 8676.00 frames. ], batch size: 39, lr: 2.53e-02, grad_scale: 8.0 +2023-02-05 21:04:55,115 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 21:05:06,956 INFO [train.py:935] (3/4) Epoch 3, validation: loss=0.2731, simple_loss=0.3579, pruned_loss=0.09417, over 944034.00 frames. +2023-02-05 21:05:06,957 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 21:05:07,097 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16167.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:05:21,209 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.73 vs. limit=5.0 +2023-02-05 21:05:23,569 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 21:05:42,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.402e+02 4.065e+02 5.070e+02 6.931e+02 1.670e+03, threshold=1.014e+03, percent-clipped=5.0 +2023-02-05 21:05:42,782 INFO [train.py:901] (3/4) Epoch 3, batch 50, loss[loss=0.3827, simple_loss=0.4245, pruned_loss=0.1705, over 8347.00 frames. ], tot_loss[loss=0.3602, simple_loss=0.3995, pruned_loss=0.1605, over 366981.32 frames. ], batch size: 26, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:05:58,800 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 21:06:02,997 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16245.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:06:18,208 INFO [train.py:901] (3/4) Epoch 3, batch 100, loss[loss=0.3649, simple_loss=0.4061, pruned_loss=0.1618, over 8495.00 frames. ], tot_loss[loss=0.3584, simple_loss=0.3996, pruned_loss=0.1586, over 649642.13 frames. ], batch size: 26, lr: 2.53e-02, grad_scale: 4.0 +2023-02-05 21:06:18,921 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 21:06:20,585 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4310, 1.8402, 3.1540, 0.9621, 2.2257, 1.6481, 1.4264, 1.8010], + device='cuda:3'), covar=tensor([0.1172, 0.1274, 0.0427, 0.2174, 0.1075, 0.1710, 0.1001, 0.1603], + device='cuda:3'), in_proj_covar=tensor([0.0419, 0.0388, 0.0443, 0.0472, 0.0514, 0.0453, 0.0409, 0.0511], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:06:53,426 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.291e+02 3.520e+02 4.471e+02 5.811e+02 1.196e+03, threshold=8.942e+02, percent-clipped=3.0 +2023-02-05 21:06:53,448 INFO [train.py:901] (3/4) Epoch 3, batch 150, loss[loss=0.3621, simple_loss=0.3972, pruned_loss=0.1635, over 7914.00 frames. ], tot_loss[loss=0.3523, simple_loss=0.3953, pruned_loss=0.1546, over 867161.29 frames. ], batch size: 20, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:22,028 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.80 vs. limit=5.0 +2023-02-05 21:07:23,173 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16360.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:24,940 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16363.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:07:27,429 INFO [train.py:901] (3/4) Epoch 3, batch 200, loss[loss=0.3136, simple_loss=0.3832, pruned_loss=0.1219, over 8328.00 frames. ], tot_loss[loss=0.3508, simple_loss=0.394, pruned_loss=0.1538, over 1034717.92 frames. ], batch size: 25, lr: 2.52e-02, grad_scale: 4.0 +2023-02-05 21:07:42,167 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16389.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:01,474 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 3.609e+02 4.419e+02 5.456e+02 1.161e+03, threshold=8.837e+02, percent-clipped=3.0 +2023-02-05 21:08:01,494 INFO [train.py:901] (3/4) Epoch 3, batch 250, loss[loss=0.2685, simple_loss=0.3282, pruned_loss=0.1044, over 6418.00 frames. ], tot_loss[loss=0.351, simple_loss=0.394, pruned_loss=0.1541, over 1163478.97 frames. ], batch size: 14, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:06,810 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6923, 2.2514, 3.0058, 0.8120, 2.9013, 1.8438, 1.1938, 1.6918], + device='cuda:3'), covar=tensor([0.0165, 0.0072, 0.0091, 0.0195, 0.0146, 0.0240, 0.0270, 0.0121], + device='cuda:3'), in_proj_covar=tensor([0.0199, 0.0140, 0.0119, 0.0185, 0.0132, 0.0249, 0.0204, 0.0173], + device='cuda:3'), out_proj_covar=tensor([1.0786e-04, 7.4981e-05, 6.4362e-05, 9.7529e-05, 7.3759e-05, 1.4512e-04, + 1.1234e-04, 9.4053e-05], device='cuda:3') +2023-02-05 21:08:13,913 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 21:08:22,558 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 21:08:22,620 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:08:35,585 INFO [train.py:901] (3/4) Epoch 3, batch 300, loss[loss=0.3655, simple_loss=0.3886, pruned_loss=0.1713, over 8091.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3928, pruned_loss=0.153, over 1264457.38 frames. ], batch size: 21, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:08:43,562 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16478.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:05,162 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16511.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:09,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.982e+02 3.752e+02 4.774e+02 5.919e+02 1.248e+03, threshold=9.549e+02, percent-clipped=6.0 +2023-02-05 21:09:09,117 INFO [train.py:901] (3/4) Epoch 3, batch 350, loss[loss=0.3436, simple_loss=0.3945, pruned_loss=0.1463, over 8101.00 frames. ], tot_loss[loss=0.3507, simple_loss=0.3938, pruned_loss=0.1539, over 1344074.23 frames. ], batch size: 23, lr: 2.51e-02, grad_scale: 4.0 +2023-02-05 21:09:40,933 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16562.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:09:44,011 INFO [train.py:901] (3/4) Epoch 3, batch 400, loss[loss=0.3425, simple_loss=0.3943, pruned_loss=0.1453, over 8097.00 frames. ], tot_loss[loss=0.3494, simple_loss=0.3923, pruned_loss=0.1533, over 1402318.36 frames. ], batch size: 23, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:09:47,772 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 21:10:18,125 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16616.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:18,537 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.210e+02 3.588e+02 4.493e+02 6.059e+02 1.047e+03, threshold=8.987e+02, percent-clipped=2.0 +2023-02-05 21:10:18,558 INFO [train.py:901] (3/4) Epoch 3, batch 450, loss[loss=0.4003, simple_loss=0.4177, pruned_loss=0.1915, over 8242.00 frames. ], tot_loss[loss=0.3503, simple_loss=0.3927, pruned_loss=0.154, over 1449026.85 frames. ], batch size: 22, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:10:20,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3194, 2.1255, 1.3951, 1.9327, 1.6899, 1.1743, 1.4352, 1.9226], + device='cuda:3'), covar=tensor([0.0998, 0.0379, 0.0960, 0.0508, 0.0697, 0.1206, 0.0880, 0.0705], + device='cuda:3'), in_proj_covar=tensor([0.0367, 0.0244, 0.0339, 0.0307, 0.0346, 0.0312, 0.0350, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 21:10:24,822 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16626.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:35,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16641.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:10:53,050 INFO [train.py:901] (3/4) Epoch 3, batch 500, loss[loss=0.391, simple_loss=0.4296, pruned_loss=0.1762, over 8205.00 frames. ], tot_loss[loss=0.3505, simple_loss=0.3938, pruned_loss=0.1536, over 1491489.18 frames. ], batch size: 23, lr: 2.50e-02, grad_scale: 8.0 +2023-02-05 21:11:27,942 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 3.547e+02 4.664e+02 6.145e+02 2.246e+03, threshold=9.327e+02, percent-clipped=7.0 +2023-02-05 21:11:27,962 INFO [train.py:901] (3/4) Epoch 3, batch 550, loss[loss=0.3594, simple_loss=0.3966, pruned_loss=0.1611, over 7962.00 frames. ], tot_loss[loss=0.3506, simple_loss=0.3938, pruned_loss=0.1538, over 1518685.59 frames. ], batch size: 21, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:11:38,639 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=16733.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:39,423 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16734.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:11:56,557 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16759.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:01,683 INFO [train.py:901] (3/4) Epoch 3, batch 600, loss[loss=0.3805, simple_loss=0.4018, pruned_loss=0.1796, over 8608.00 frames. ], tot_loss[loss=0.3513, simple_loss=0.3939, pruned_loss=0.1544, over 1541933.41 frames. ], batch size: 34, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:16,339 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 21:12:20,848 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-05 21:12:36,656 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.715e+02 4.834e+02 5.984e+02 1.404e+03, threshold=9.668e+02, percent-clipped=7.0 +2023-02-05 21:12:36,678 INFO [train.py:901] (3/4) Epoch 3, batch 650, loss[loss=0.3387, simple_loss=0.3657, pruned_loss=0.1559, over 7701.00 frames. ], tot_loss[loss=0.3517, simple_loss=0.3938, pruned_loss=0.1548, over 1555063.75 frames. ], batch size: 18, lr: 2.49e-02, grad_scale: 8.0 +2023-02-05 21:12:37,551 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16818.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:54,032 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:12:57,237 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=16848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:10,252 INFO [train.py:901] (3/4) Epoch 3, batch 700, loss[loss=0.3877, simple_loss=0.414, pruned_loss=0.1807, over 8107.00 frames. ], tot_loss[loss=0.3516, simple_loss=0.3936, pruned_loss=0.1548, over 1569845.88 frames. ], batch size: 23, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:17,870 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-05 21:13:20,420 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=16882.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:23,049 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16886.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:13:38,452 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=16907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:44,822 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.086e+02 3.932e+02 4.613e+02 6.231e+02 2.383e+03, threshold=9.225e+02, percent-clipped=5.0 +2023-02-05 21:13:44,843 INFO [train.py:901] (3/4) Epoch 3, batch 750, loss[loss=0.4409, simple_loss=0.4608, pruned_loss=0.2105, over 8425.00 frames. ], tot_loss[loss=0.3527, simple_loss=0.3943, pruned_loss=0.1555, over 1581545.43 frames. ], batch size: 29, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:13:49,811 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=16924.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:13:59,051 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 21:14:07,686 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 21:14:19,204 INFO [train.py:901] (3/4) Epoch 3, batch 800, loss[loss=0.3375, simple_loss=0.3901, pruned_loss=0.1424, over 8198.00 frames. ], tot_loss[loss=0.3504, simple_loss=0.3928, pruned_loss=0.154, over 1591679.59 frames. ], batch size: 23, lr: 2.48e-02, grad_scale: 8.0 +2023-02-05 21:14:26,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4934, 2.0660, 3.4701, 1.0306, 2.3578, 1.6471, 1.4664, 2.0224], + device='cuda:3'), covar=tensor([0.1043, 0.1225, 0.0310, 0.1984, 0.0982, 0.1568, 0.1050, 0.1471], + device='cuda:3'), in_proj_covar=tensor([0.0418, 0.0388, 0.0447, 0.0471, 0.0523, 0.0458, 0.0414, 0.0523], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:14:34,633 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4308, 2.2731, 1.3469, 1.7788, 1.8463, 1.1358, 1.7821, 2.0069], + device='cuda:3'), covar=tensor([0.1461, 0.0464, 0.1207, 0.0854, 0.0799, 0.1290, 0.1077, 0.0803], + device='cuda:3'), in_proj_covar=tensor([0.0366, 0.0246, 0.0339, 0.0307, 0.0346, 0.0309, 0.0353, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 21:14:53,625 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.211e+02 3.452e+02 4.368e+02 5.287e+02 1.393e+03, threshold=8.735e+02, percent-clipped=4.0 +2023-02-05 21:14:53,646 INFO [train.py:901] (3/4) Epoch 3, batch 850, loss[loss=0.4117, simple_loss=0.4484, pruned_loss=0.1875, over 8517.00 frames. ], tot_loss[loss=0.3485, simple_loss=0.3913, pruned_loss=0.1528, over 1597821.63 frames. ], batch size: 26, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:28,356 INFO [train.py:901] (3/4) Epoch 3, batch 900, loss[loss=0.3459, simple_loss=0.3814, pruned_loss=0.1552, over 7919.00 frames. ], tot_loss[loss=0.3461, simple_loss=0.3897, pruned_loss=0.1512, over 1602496.95 frames. ], batch size: 20, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:15:53,799 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17104.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:02,283 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.375e+02 3.695e+02 4.540e+02 5.760e+02 9.795e+02, threshold=9.080e+02, percent-clipped=3.0 +2023-02-05 21:16:02,304 INFO [train.py:901] (3/4) Epoch 3, batch 950, loss[loss=0.3276, simple_loss=0.375, pruned_loss=0.1401, over 8578.00 frames. ], tot_loss[loss=0.3432, simple_loss=0.388, pruned_loss=0.1492, over 1604596.47 frames. ], batch size: 34, lr: 2.47e-02, grad_scale: 8.0 +2023-02-05 21:16:10,477 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17129.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:16:25,711 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 21:16:36,866 INFO [train.py:901] (3/4) Epoch 3, batch 1000, loss[loss=0.3645, simple_loss=0.4193, pruned_loss=0.1549, over 8489.00 frames. ], tot_loss[loss=0.3477, simple_loss=0.3912, pruned_loss=0.1521, over 1609840.28 frames. ], batch size: 29, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:16:57,953 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 21:17:03,574 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:17:10,141 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 4.093e+02 4.952e+02 6.088e+02 1.030e+03, threshold=9.904e+02, percent-clipped=7.0 +2023-02-05 21:17:10,162 INFO [train.py:901] (3/4) Epoch 3, batch 1050, loss[loss=0.3746, simple_loss=0.4036, pruned_loss=0.1728, over 7773.00 frames. ], tot_loss[loss=0.3467, simple_loss=0.3905, pruned_loss=0.1514, over 1610951.49 frames. ], batch size: 19, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:10,176 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 21:17:19,694 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17230.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:17:45,190 INFO [train.py:901] (3/4) Epoch 3, batch 1100, loss[loss=0.3268, simple_loss=0.369, pruned_loss=0.1423, over 8495.00 frames. ], tot_loss[loss=0.3472, simple_loss=0.39, pruned_loss=0.1522, over 1605283.03 frames. ], batch size: 50, lr: 2.46e-02, grad_scale: 8.0 +2023-02-05 21:17:45,923 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:18:05,709 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 21:18:19,098 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.272e+02 3.840e+02 4.434e+02 5.714e+02 1.415e+03, threshold=8.869e+02, percent-clipped=3.0 +2023-02-05 21:18:19,120 INFO [train.py:901] (3/4) Epoch 3, batch 1150, loss[loss=0.4054, simple_loss=0.4399, pruned_loss=0.1854, over 8462.00 frames. ], tot_loss[loss=0.3464, simple_loss=0.39, pruned_loss=0.1514, over 1612789.42 frames. ], batch size: 25, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:18:22,466 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 21:18:38,641 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17345.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:18:52,840 INFO [train.py:901] (3/4) Epoch 3, batch 1200, loss[loss=0.3198, simple_loss=0.365, pruned_loss=0.1373, over 7537.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3905, pruned_loss=0.1512, over 1614718.67 frames. ], batch size: 18, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:19:02,176 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17380.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:04,946 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17383.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:19:17,680 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17401.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:19:28,364 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 3.772e+02 4.989e+02 5.905e+02 9.785e+02, threshold=9.978e+02, percent-clipped=4.0 +2023-02-05 21:19:28,385 INFO [train.py:901] (3/4) Epoch 3, batch 1250, loss[loss=0.2313, simple_loss=0.2962, pruned_loss=0.0832, over 7551.00 frames. ], tot_loss[loss=0.3478, simple_loss=0.3916, pruned_loss=0.152, over 1618189.30 frames. ], batch size: 18, lr: 2.45e-02, grad_scale: 8.0 +2023-02-05 21:19:57,608 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-05 21:20:02,604 INFO [train.py:901] (3/4) Epoch 3, batch 1300, loss[loss=0.2586, simple_loss=0.3406, pruned_loss=0.0883, over 8360.00 frames. ], tot_loss[loss=0.3465, simple_loss=0.3905, pruned_loss=0.1512, over 1616398.17 frames. ], batch size: 24, lr: 2.44e-02, grad_scale: 8.0 +2023-02-05 21:20:37,547 INFO [train.py:901] (3/4) Epoch 3, batch 1350, loss[loss=0.3485, simple_loss=0.4087, pruned_loss=0.1441, over 8437.00 frames. ], tot_loss[loss=0.3479, simple_loss=0.3908, pruned_loss=0.1525, over 1615639.97 frames. ], batch size: 29, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:20:38,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 4.258e+02 5.812e+02 8.345e+02 8.746e+03, threshold=1.162e+03, percent-clipped=16.0 +2023-02-05 21:20:58,310 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1000, 1.2544, 4.3378, 1.8375, 3.7806, 3.5517, 3.7485, 3.8033], + device='cuda:3'), covar=tensor([0.0384, 0.3078, 0.0240, 0.1703, 0.0758, 0.0409, 0.0389, 0.0434], + device='cuda:3'), in_proj_covar=tensor([0.0228, 0.0400, 0.0275, 0.0317, 0.0366, 0.0293, 0.0288, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 21:21:00,246 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17551.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:21:11,045 INFO [train.py:901] (3/4) Epoch 3, batch 1400, loss[loss=0.3144, simple_loss=0.3471, pruned_loss=0.1409, over 7932.00 frames. ], tot_loss[loss=0.3466, simple_loss=0.3899, pruned_loss=0.1517, over 1614067.92 frames. ], batch size: 19, lr: 2.44e-02, grad_scale: 4.0 +2023-02-05 21:21:25,493 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-05 21:21:26,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7025, 2.2714, 3.5044, 2.9630, 2.7386, 2.1969, 1.5742, 1.6516], + device='cuda:3'), covar=tensor([0.0698, 0.0888, 0.0167, 0.0318, 0.0403, 0.0437, 0.0584, 0.0852], + device='cuda:3'), in_proj_covar=tensor([0.0556, 0.0479, 0.0377, 0.0422, 0.0528, 0.0450, 0.0469, 0.0479], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:21:34,759 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17601.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:21:46,906 INFO [train.py:901] (3/4) Epoch 3, batch 1450, loss[loss=0.3423, simple_loss=0.396, pruned_loss=0.1443, over 8465.00 frames. ], tot_loss[loss=0.3454, simple_loss=0.3895, pruned_loss=0.1507, over 1616374.06 frames. ], batch size: 25, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:21:47,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.138e+02 3.309e+02 4.161e+02 5.035e+02 1.114e+03, threshold=8.322e+02, percent-clipped=0.0 +2023-02-05 21:21:48,910 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 21:21:53,237 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17626.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:22:02,546 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17639.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:19,039 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17664.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,430 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17666.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:22:20,937 INFO [train.py:901] (3/4) Epoch 3, batch 1500, loss[loss=0.3356, simple_loss=0.3889, pruned_loss=0.1411, over 8103.00 frames. ], tot_loss[loss=0.3446, simple_loss=0.3884, pruned_loss=0.1504, over 1613098.47 frames. ], batch size: 23, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,186 INFO [train.py:901] (3/4) Epoch 3, batch 1550, loss[loss=0.3036, simple_loss=0.3768, pruned_loss=0.1152, over 8251.00 frames. ], tot_loss[loss=0.3477, simple_loss=0.3907, pruned_loss=0.1524, over 1616787.58 frames. ], batch size: 24, lr: 2.43e-02, grad_scale: 4.0 +2023-02-05 21:22:56,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.415e+02 3.678e+02 4.620e+02 5.892e+02 1.697e+03, threshold=9.239e+02, percent-clipped=9.0 +2023-02-05 21:22:57,030 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:01,086 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17724.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:04,116 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 21:23:06,486 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5213, 4.5722, 4.1489, 2.0410, 4.1144, 4.0086, 4.2824, 3.8463], + device='cuda:3'), covar=tensor([0.0835, 0.0447, 0.0781, 0.3967, 0.0473, 0.0475, 0.1003, 0.0518], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0233, 0.0282, 0.0370, 0.0254, 0.0207, 0.0266, 0.0195], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:23:16,061 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=17745.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:23:17,958 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17748.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:23:30,933 INFO [train.py:901] (3/4) Epoch 3, batch 1600, loss[loss=0.3447, simple_loss=0.4029, pruned_loss=0.1432, over 8502.00 frames. ], tot_loss[loss=0.3467, simple_loss=0.3899, pruned_loss=0.1517, over 1617436.36 frames. ], batch size: 26, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,145 INFO [train.py:901] (3/4) Epoch 3, batch 1650, loss[loss=0.2999, simple_loss=0.3453, pruned_loss=0.1272, over 7672.00 frames. ], tot_loss[loss=0.3439, simple_loss=0.3878, pruned_loss=0.15, over 1615013.89 frames. ], batch size: 18, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:24:05,807 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.309e+02 4.132e+02 5.477e+02 8.650e+02, threshold=8.264e+02, percent-clipped=0.0 +2023-02-05 21:24:10,070 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-05 21:24:20,684 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17839.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:24:35,256 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=17860.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:24:39,698 INFO [train.py:901] (3/4) Epoch 3, batch 1700, loss[loss=0.3297, simple_loss=0.3689, pruned_loss=0.1452, over 7983.00 frames. ], tot_loss[loss=0.3446, simple_loss=0.3884, pruned_loss=0.1504, over 1618320.68 frames. ], batch size: 21, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:05,671 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.72 vs. limit=2.0 +2023-02-05 21:25:06,194 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0535, 1.1034, 1.0685, 1.0503, 0.8188, 1.1423, 0.0481, 0.8444], + device='cuda:3'), covar=tensor([0.2045, 0.1483, 0.1136, 0.1332, 0.3077, 0.0977, 0.3862, 0.1661], + device='cuda:3'), in_proj_covar=tensor([0.0110, 0.0102, 0.0084, 0.0144, 0.0149, 0.0081, 0.0148, 0.0111], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:25:13,890 INFO [train.py:901] (3/4) Epoch 3, batch 1750, loss[loss=0.3461, simple_loss=0.3911, pruned_loss=0.1505, over 8148.00 frames. ], tot_loss[loss=0.344, simple_loss=0.388, pruned_loss=0.15, over 1618818.58 frames. ], batch size: 22, lr: 2.42e-02, grad_scale: 8.0 +2023-02-05 21:25:14,593 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.998e+02 5.161e+02 6.686e+02 1.470e+03, threshold=1.032e+03, percent-clipped=12.0 +2023-02-05 21:25:17,622 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=17922.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:35,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=17947.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:25:48,896 INFO [train.py:901] (3/4) Epoch 3, batch 1800, loss[loss=0.3212, simple_loss=0.3739, pruned_loss=0.1342, over 8327.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3874, pruned_loss=0.1493, over 1620415.90 frames. ], batch size: 26, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:25:51,116 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7910, 1.6130, 2.2216, 1.6778, 1.2937, 2.1098, 0.3564, 1.4151], + device='cuda:3'), covar=tensor([0.1745, 0.1936, 0.0861, 0.1804, 0.3240, 0.0721, 0.4211, 0.1610], + device='cuda:3'), in_proj_covar=tensor([0.0107, 0.0099, 0.0082, 0.0142, 0.0148, 0.0078, 0.0142, 0.0109], + device='cuda:3'), out_proj_covar=tensor([1.2939e-04, 1.2438e-04, 1.0166e-04, 1.6370e-04, 1.7154e-04, 9.9048e-05, + 1.6789e-04, 1.3721e-04], device='cuda:3') +2023-02-05 21:25:57,023 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=17978.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:25,060 INFO [train.py:901] (3/4) Epoch 3, batch 1850, loss[loss=0.2828, simple_loss=0.3382, pruned_loss=0.1137, over 7416.00 frames. ], tot_loss[loss=0.3422, simple_loss=0.3866, pruned_loss=0.1489, over 1617790.57 frames. ], batch size: 17, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:26:25,636 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.340e+02 3.564e+02 4.327e+02 5.819e+02 2.228e+03, threshold=8.654e+02, percent-clipped=8.0 +2023-02-05 21:26:55,949 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18062.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:26:59,907 INFO [train.py:901] (3/4) Epoch 3, batch 1900, loss[loss=0.3361, simple_loss=0.3955, pruned_loss=0.1383, over 8317.00 frames. ], tot_loss[loss=0.3401, simple_loss=0.3852, pruned_loss=0.1475, over 1614463.10 frames. ], batch size: 26, lr: 2.41e-02, grad_scale: 8.0 +2023-02-05 21:27:11,515 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0487, 1.6240, 3.4211, 1.3929, 2.0067, 3.8919, 3.2446, 3.3168], + device='cuda:3'), covar=tensor([0.0986, 0.1266, 0.0316, 0.1818, 0.0790, 0.0187, 0.0402, 0.0553], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0263, 0.0200, 0.0260, 0.0198, 0.0165, 0.0166, 0.0241], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:27:15,651 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.14 vs. limit=5.0 +2023-02-05 21:27:17,427 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18092.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:19,615 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:24,176 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 21:27:34,372 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18116.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:27:34,801 INFO [train.py:901] (3/4) Epoch 3, batch 1950, loss[loss=0.3344, simple_loss=0.3731, pruned_loss=0.1479, over 7535.00 frames. ], tot_loss[loss=0.3399, simple_loss=0.385, pruned_loss=0.1474, over 1612986.84 frames. ], batch size: 18, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:27:35,481 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.131e+02 3.385e+02 4.094e+02 5.586e+02 1.173e+03, threshold=8.188e+02, percent-clipped=3.0 +2023-02-05 21:27:36,202 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 21:27:37,029 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:27:51,206 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18141.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:27:55,034 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 21:28:09,114 INFO [train.py:901] (3/4) Epoch 3, batch 2000, loss[loss=0.3793, simple_loss=0.4077, pruned_loss=0.1755, over 7644.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.385, pruned_loss=0.147, over 1614391.47 frames. ], batch size: 19, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:17,043 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18177.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:27,338 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18192.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:28,013 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18193.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:33,445 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1998, 1.5622, 1.2181, 1.6683, 1.3932, 1.0200, 1.0715, 1.3611], + device='cuda:3'), covar=tensor([0.0844, 0.0493, 0.0981, 0.0514, 0.0687, 0.1161, 0.0949, 0.0748], + device='cuda:3'), in_proj_covar=tensor([0.0365, 0.0255, 0.0344, 0.0312, 0.0352, 0.0314, 0.0356, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 21:28:38,141 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18207.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:28:44,798 INFO [train.py:901] (3/4) Epoch 3, batch 2050, loss[loss=0.3069, simple_loss=0.3587, pruned_loss=0.1276, over 8126.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3856, pruned_loss=0.1475, over 1617937.86 frames. ], batch size: 22, lr: 2.40e-02, grad_scale: 8.0 +2023-02-05 21:28:46,138 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.474e+02 3.817e+02 4.995e+02 6.129e+02 1.664e+03, threshold=9.991e+02, percent-clipped=7.0 +2023-02-05 21:28:51,908 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5284, 1.2229, 3.3130, 1.4883, 2.2430, 3.7804, 3.5099, 3.2934], + device='cuda:3'), covar=tensor([0.1228, 0.1621, 0.0327, 0.1900, 0.0703, 0.0189, 0.0299, 0.0489], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0259, 0.0199, 0.0261, 0.0197, 0.0165, 0.0165, 0.0242], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:29:19,882 INFO [train.py:901] (3/4) Epoch 3, batch 2100, loss[loss=0.3814, simple_loss=0.4298, pruned_loss=0.1665, over 8750.00 frames. ], tot_loss[loss=0.3408, simple_loss=0.3863, pruned_loss=0.1477, over 1621360.23 frames. ], batch size: 34, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,182 INFO [train.py:901] (3/4) Epoch 3, batch 2150, loss[loss=0.3659, simple_loss=0.4068, pruned_loss=0.1625, over 8524.00 frames. ], tot_loss[loss=0.3403, simple_loss=0.3855, pruned_loss=0.1476, over 1614361.56 frames. ], batch size: 28, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:29:55,874 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.297e+02 3.744e+02 4.718e+02 5.936e+02 1.452e+03, threshold=9.436e+02, percent-clipped=4.0 +2023-02-05 21:29:59,203 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18322.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:30:31,065 INFO [train.py:901] (3/4) Epoch 3, batch 2200, loss[loss=0.3513, simple_loss=0.3783, pruned_loss=0.1622, over 8239.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.3847, pruned_loss=0.1471, over 1612591.54 frames. ], batch size: 22, lr: 2.39e-02, grad_scale: 8.0 +2023-02-05 21:30:35,446 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 21:31:05,526 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5117, 4.7156, 4.0783, 1.7268, 4.0376, 4.0291, 4.3262, 3.4611], + device='cuda:3'), covar=tensor([0.0922, 0.0436, 0.0724, 0.4373, 0.0547, 0.0646, 0.0790, 0.0679], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0238, 0.0276, 0.0362, 0.0251, 0.0206, 0.0257, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:31:06,830 INFO [train.py:901] (3/4) Epoch 3, batch 2250, loss[loss=0.3125, simple_loss=0.3612, pruned_loss=0.1319, over 7816.00 frames. ], tot_loss[loss=0.3386, simple_loss=0.3842, pruned_loss=0.1465, over 1613563.26 frames. ], batch size: 20, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:07,499 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.379e+02 3.424e+02 4.222e+02 5.561e+02 1.530e+03, threshold=8.445e+02, percent-clipped=2.0 +2023-02-05 21:31:18,203 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18433.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:21,475 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18437.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:36,363 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18458.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:39,815 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18463.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:31:42,285 INFO [train.py:901] (3/4) Epoch 3, batch 2300, loss[loss=0.331, simple_loss=0.3845, pruned_loss=0.1388, over 8230.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.3837, pruned_loss=0.1461, over 1615214.38 frames. ], batch size: 22, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:31:56,793 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18488.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:09,415 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-05 21:32:11,148 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18508.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:17,115 INFO [train.py:901] (3/4) Epoch 3, batch 2350, loss[loss=0.281, simple_loss=0.3377, pruned_loss=0.1122, over 7783.00 frames. ], tot_loss[loss=0.3373, simple_loss=0.3827, pruned_loss=0.1459, over 1613202.58 frames. ], batch size: 19, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:17,770 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.449e+02 3.759e+02 4.661e+02 5.652e+02 9.227e+02, threshold=9.323e+02, percent-clipped=1.0 +2023-02-05 21:32:23,353 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18526.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:32:30,480 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18536.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:31,169 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18537.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:32:32,660 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8471, 1.5081, 2.3905, 2.0661, 2.1404, 1.4863, 1.2056, 0.8669], + device='cuda:3'), covar=tensor([0.0785, 0.0841, 0.0200, 0.0301, 0.0301, 0.0420, 0.0555, 0.0725], + device='cuda:3'), in_proj_covar=tensor([0.0563, 0.0491, 0.0393, 0.0443, 0.0544, 0.0458, 0.0476, 0.0486], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:32:34,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.10 vs. limit=2.0 +2023-02-05 21:32:51,324 INFO [train.py:901] (3/4) Epoch 3, batch 2400, loss[loss=0.3062, simple_loss=0.3384, pruned_loss=0.137, over 7425.00 frames. ], tot_loss[loss=0.3395, simple_loss=0.3842, pruned_loss=0.1474, over 1614078.99 frames. ], batch size: 17, lr: 2.38e-02, grad_scale: 8.0 +2023-02-05 21:32:57,491 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 21:33:24,831 INFO [train.py:901] (3/4) Epoch 3, batch 2450, loss[loss=0.3211, simple_loss=0.3806, pruned_loss=0.1308, over 8558.00 frames. ], tot_loss[loss=0.3441, simple_loss=0.3878, pruned_loss=0.1502, over 1615960.89 frames. ], batch size: 31, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:33:25,548 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 3.618e+02 4.763e+02 6.456e+02 1.024e+03, threshold=9.527e+02, percent-clipped=2.0 +2023-02-05 21:33:49,161 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18651.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:49,834 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18652.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:33:56,583 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4001, 2.1934, 3.1766, 2.8040, 2.4281, 1.8479, 1.2597, 1.3582], + device='cuda:3'), covar=tensor([0.0774, 0.0895, 0.0210, 0.0391, 0.0469, 0.0478, 0.0627, 0.0967], + device='cuda:3'), in_proj_covar=tensor([0.0565, 0.0489, 0.0393, 0.0449, 0.0544, 0.0464, 0.0483, 0.0487], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:33:59,429 INFO [train.py:901] (3/4) Epoch 3, batch 2500, loss[loss=0.336, simple_loss=0.4026, pruned_loss=0.1347, over 8361.00 frames. ], tot_loss[loss=0.343, simple_loss=0.3873, pruned_loss=0.1494, over 1615670.57 frames. ], batch size: 24, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:17,088 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7126, 2.4299, 1.8229, 2.0889, 1.9439, 1.3637, 1.7485, 2.2055], + device='cuda:3'), covar=tensor([0.1057, 0.0568, 0.0844, 0.0595, 0.0702, 0.1247, 0.0933, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0246, 0.0336, 0.0308, 0.0331, 0.0311, 0.0350, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 21:34:17,684 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:18,344 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18693.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:34:22,212 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3443, 1.6274, 1.6176, 0.1975, 1.5606, 1.2178, 0.2486, 1.5902], + device='cuda:3'), covar=tensor([0.0110, 0.0067, 0.0080, 0.0156, 0.0090, 0.0249, 0.0205, 0.0057], + device='cuda:3'), in_proj_covar=tensor([0.0213, 0.0151, 0.0130, 0.0196, 0.0146, 0.0266, 0.0214, 0.0175], + device='cuda:3'), out_proj_covar=tensor([1.0783e-04, 7.5567e-05, 6.4230e-05, 9.5794e-05, 7.5737e-05, 1.4382e-04, + 1.1047e-04, 8.9179e-05], device='cuda:3') +2023-02-05 21:34:24,656 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.4677, 5.5791, 4.7266, 1.9710, 4.7662, 4.9826, 5.2199, 4.4546], + device='cuda:3'), covar=tensor([0.0751, 0.0434, 0.0870, 0.4714, 0.0551, 0.0636, 0.1040, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0239, 0.0277, 0.0363, 0.0255, 0.0211, 0.0267, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:34:33,854 INFO [train.py:901] (3/4) Epoch 3, batch 2550, loss[loss=0.3316, simple_loss=0.3842, pruned_loss=0.1395, over 8566.00 frames. ], tot_loss[loss=0.3427, simple_loss=0.3869, pruned_loss=0.1493, over 1612793.82 frames. ], batch size: 31, lr: 2.37e-02, grad_scale: 8.0 +2023-02-05 21:34:34,506 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.889e+02 4.529e+02 5.619e+02 1.309e+03, threshold=9.058e+02, percent-clipped=5.0 +2023-02-05 21:34:34,733 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18718.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:35:02,133 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4313, 1.8371, 2.8819, 1.0928, 2.1906, 1.7986, 1.4765, 1.8310], + device='cuda:3'), covar=tensor([0.1240, 0.1245, 0.0445, 0.2266, 0.0983, 0.1753, 0.1153, 0.1546], + device='cuda:3'), in_proj_covar=tensor([0.0440, 0.0410, 0.0486, 0.0494, 0.0552, 0.0495, 0.0441, 0.0546], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:35:08,026 INFO [train.py:901] (3/4) Epoch 3, batch 2600, loss[loss=0.3404, simple_loss=0.3957, pruned_loss=0.1426, over 8361.00 frames. ], tot_loss[loss=0.3421, simple_loss=0.3872, pruned_loss=0.1485, over 1613110.39 frames. ], batch size: 24, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:25,812 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1431, 1.2297, 3.2031, 0.9143, 2.7604, 2.6781, 2.8627, 2.8294], + device='cuda:3'), covar=tensor([0.0396, 0.2716, 0.0495, 0.1985, 0.1219, 0.0589, 0.0484, 0.0616], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0419, 0.0279, 0.0322, 0.0388, 0.0303, 0.0295, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 21:35:30,914 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 21:35:44,454 INFO [train.py:901] (3/4) Epoch 3, batch 2650, loss[loss=0.2991, simple_loss=0.373, pruned_loss=0.1127, over 8139.00 frames. ], tot_loss[loss=0.3405, simple_loss=0.3868, pruned_loss=0.1471, over 1619291.35 frames. ], batch size: 22, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:35:45,136 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.448e+02 3.426e+02 4.272e+02 5.708e+02 1.020e+03, threshold=8.544e+02, percent-clipped=5.0 +2023-02-05 21:35:51,389 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3074, 4.5361, 3.8722, 1.6295, 3.8216, 3.8013, 4.1158, 3.3238], + device='cuda:3'), covar=tensor([0.0982, 0.0523, 0.0927, 0.4668, 0.0617, 0.0694, 0.1156, 0.0632], + device='cuda:3'), in_proj_covar=tensor([0.0326, 0.0231, 0.0267, 0.0351, 0.0249, 0.0204, 0.0256, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:36:08,387 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18852.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:19,357 INFO [train.py:901] (3/4) Epoch 3, batch 2700, loss[loss=0.301, simple_loss=0.3365, pruned_loss=0.1328, over 7281.00 frames. ], tot_loss[loss=0.3378, simple_loss=0.3845, pruned_loss=0.1455, over 1616754.98 frames. ], batch size: 16, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:21,520 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=18870.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 21:36:47,165 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18907.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:47,855 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=18908.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:36:53,563 INFO [train.py:901] (3/4) Epoch 3, batch 2750, loss[loss=0.3582, simple_loss=0.4151, pruned_loss=0.1506, over 8506.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3843, pruned_loss=0.145, over 1617830.81 frames. ], batch size: 28, lr: 2.36e-02, grad_scale: 8.0 +2023-02-05 21:36:54,225 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.973e+02 3.360e+02 4.052e+02 5.079e+02 9.265e+02, threshold=8.105e+02, percent-clipped=2.0 +2023-02-05 21:37:05,016 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18932.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:05,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=18933.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:15,751 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=18948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:24,000 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.82 vs. limit=5.0 +2023-02-05 21:37:28,078 INFO [train.py:901] (3/4) Epoch 3, batch 2800, loss[loss=0.302, simple_loss=0.3409, pruned_loss=0.1315, over 7710.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3841, pruned_loss=0.1451, over 1614938.99 frames. ], batch size: 18, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:37:28,273 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:37:41,139 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=18985.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:38:03,343 INFO [train.py:901] (3/4) Epoch 3, batch 2850, loss[loss=0.3411, simple_loss=0.3842, pruned_loss=0.149, over 8246.00 frames. ], tot_loss[loss=0.3351, simple_loss=0.3827, pruned_loss=0.1437, over 1612721.80 frames. ], batch size: 22, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:03,471 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0042, 1.6056, 3.2387, 1.2498, 2.1634, 3.6930, 3.5175, 2.9726], + device='cuda:3'), covar=tensor([0.1285, 0.1549, 0.0456, 0.2422, 0.0818, 0.0325, 0.0365, 0.0851], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0257, 0.0198, 0.0256, 0.0199, 0.0167, 0.0173, 0.0244], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:38:03,922 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 3.511e+02 4.402e+02 5.555e+02 1.104e+03, threshold=8.804e+02, percent-clipped=5.0 +2023-02-05 21:38:15,999 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19036.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:38:37,625 INFO [train.py:901] (3/4) Epoch 3, batch 2900, loss[loss=0.3005, simple_loss=0.357, pruned_loss=0.122, over 8092.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3843, pruned_loss=0.1453, over 1617949.32 frames. ], batch size: 21, lr: 2.35e-02, grad_scale: 8.0 +2023-02-05 21:38:42,289 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2048, 1.3688, 2.3714, 0.9896, 1.7002, 1.4046, 1.2522, 1.4966], + device='cuda:3'), covar=tensor([0.1320, 0.1441, 0.0495, 0.2230, 0.1038, 0.1965, 0.1241, 0.1332], + device='cuda:3'), in_proj_covar=tensor([0.0436, 0.0406, 0.0477, 0.0486, 0.0534, 0.0481, 0.0427, 0.0536], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:39:02,483 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 21:39:11,756 INFO [train.py:901] (3/4) Epoch 3, batch 2950, loss[loss=0.3826, simple_loss=0.4126, pruned_loss=0.1763, over 8631.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3826, pruned_loss=0.1436, over 1612074.65 frames. ], batch size: 34, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:12,407 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.613e+02 4.498e+02 5.900e+02 1.326e+03, threshold=8.996e+02, percent-clipped=8.0 +2023-02-05 21:39:13,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1690, 1.4905, 1.4554, 1.2611, 0.8967, 1.4860, 0.1423, 0.8329], + device='cuda:3'), covar=tensor([0.2395, 0.1454, 0.1055, 0.1966, 0.3823, 0.0870, 0.4486, 0.2177], + device='cuda:3'), in_proj_covar=tensor([0.0110, 0.0098, 0.0079, 0.0145, 0.0161, 0.0077, 0.0148, 0.0107], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:39:32,496 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19147.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:39:33,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2807, 1.8745, 2.1034, 1.4912, 0.8945, 2.0661, 0.3669, 1.0529], + device='cuda:3'), covar=tensor([0.3705, 0.1570, 0.1198, 0.2839, 0.4776, 0.0818, 0.5863, 0.2288], + device='cuda:3'), in_proj_covar=tensor([0.0112, 0.0098, 0.0078, 0.0146, 0.0162, 0.0077, 0.0148, 0.0106], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:39:35,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:39:37,925 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1249, 1.1680, 4.2715, 1.6656, 3.6804, 3.6622, 3.9187, 3.8816], + device='cuda:3'), covar=tensor([0.0445, 0.3319, 0.0310, 0.1723, 0.0941, 0.0398, 0.0387, 0.0440], + device='cuda:3'), in_proj_covar=tensor([0.0225, 0.0409, 0.0276, 0.0316, 0.0377, 0.0301, 0.0294, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 21:39:45,139 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5768, 1.0595, 1.2353, 0.8990, 1.2758, 1.0148, 0.9873, 1.3341], + device='cuda:3'), covar=tensor([0.0867, 0.2075, 0.2786, 0.2131, 0.0820, 0.2421, 0.1099, 0.0820], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0220, 0.0259, 0.0222, 0.0187, 0.0221, 0.0181, 0.0186], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:39:45,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1304, 1.4825, 1.1851, 1.9375, 0.7594, 0.9406, 1.3027, 1.5039], + device='cuda:3'), covar=tensor([0.1753, 0.1423, 0.2461, 0.0726, 0.2162, 0.3111, 0.1566, 0.1229], + device='cuda:3'), in_proj_covar=tensor([0.0292, 0.0306, 0.0306, 0.0226, 0.0292, 0.0310, 0.0324, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:39:46,216 INFO [train.py:901] (3/4) Epoch 3, batch 3000, loss[loss=0.3362, simple_loss=0.3843, pruned_loss=0.144, over 7955.00 frames. ], tot_loss[loss=0.3381, simple_loss=0.3848, pruned_loss=0.1457, over 1611619.44 frames. ], batch size: 21, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:39:46,216 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 21:39:58,665 INFO [train.py:935] (3/4) Epoch 3, validation: loss=0.2584, simple_loss=0.3473, pruned_loss=0.08481, over 944034.00 frames. +2023-02-05 21:39:58,666 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 21:40:08,538 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4615, 1.8668, 3.5383, 1.0333, 2.4268, 1.8623, 1.4923, 1.9144], + device='cuda:3'), covar=tensor([0.1316, 0.1401, 0.0444, 0.2279, 0.1180, 0.1777, 0.1092, 0.1899], + device='cuda:3'), in_proj_covar=tensor([0.0433, 0.0406, 0.0466, 0.0482, 0.0526, 0.0475, 0.0424, 0.0530], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:40:33,776 INFO [train.py:901] (3/4) Epoch 3, batch 3050, loss[loss=0.3186, simple_loss=0.3695, pruned_loss=0.1338, over 8250.00 frames. ], tot_loss[loss=0.3371, simple_loss=0.3837, pruned_loss=0.1453, over 1613801.15 frames. ], batch size: 24, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:40:34,454 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.016e+02 3.526e+02 4.458e+02 6.217e+02 1.354e+03, threshold=8.917e+02, percent-clipped=3.0 +2023-02-05 21:40:38,080 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:40:50,807 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19241.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:40:55,464 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19248.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:03,463 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3027, 1.9036, 3.2761, 2.8129, 2.5362, 1.8598, 1.3500, 1.3937], + device='cuda:3'), covar=tensor([0.0922, 0.1171, 0.0230, 0.0395, 0.0557, 0.0499, 0.0628, 0.1067], + device='cuda:3'), in_proj_covar=tensor([0.0573, 0.0494, 0.0399, 0.0448, 0.0558, 0.0466, 0.0488, 0.0488], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:41:07,579 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19266.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:41:08,006 INFO [train.py:901] (3/4) Epoch 3, batch 3100, loss[loss=0.3285, simple_loss=0.3598, pruned_loss=0.1487, over 7923.00 frames. ], tot_loss[loss=0.3364, simple_loss=0.3836, pruned_loss=0.1445, over 1614487.06 frames. ], batch size: 20, lr: 2.34e-02, grad_scale: 8.0 +2023-02-05 21:41:26,069 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19292.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:41:43,825 INFO [train.py:901] (3/4) Epoch 3, batch 3150, loss[loss=0.3363, simple_loss=0.3801, pruned_loss=0.1462, over 8291.00 frames. ], tot_loss[loss=0.3354, simple_loss=0.3829, pruned_loss=0.144, over 1615755.77 frames. ], batch size: 23, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:41:44,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.556e+02 3.507e+02 4.387e+02 6.193e+02 1.521e+03, threshold=8.773e+02, percent-clipped=4.0 +2023-02-05 21:42:02,650 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6445, 1.5242, 3.2846, 1.1449, 2.2465, 3.4498, 3.3082, 2.9807], + device='cuda:3'), covar=tensor([0.1225, 0.1449, 0.0329, 0.1951, 0.0710, 0.0261, 0.0286, 0.0531], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0264, 0.0204, 0.0267, 0.0206, 0.0172, 0.0175, 0.0250], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:42:17,836 INFO [train.py:901] (3/4) Epoch 3, batch 3200, loss[loss=0.4073, simple_loss=0.4403, pruned_loss=0.1871, over 8538.00 frames. ], tot_loss[loss=0.335, simple_loss=0.3823, pruned_loss=0.1439, over 1611758.01 frames. ], batch size: 28, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:33,629 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2647, 1.5719, 1.4962, 1.2864, 1.7217, 1.3876, 1.5551, 1.8012], + device='cuda:3'), covar=tensor([0.0732, 0.1450, 0.1885, 0.1679, 0.0756, 0.1673, 0.0937, 0.0656], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0221, 0.0259, 0.0223, 0.0189, 0.0224, 0.0185, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0006, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:42:45,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:45,914 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19407.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:49,227 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19412.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:42:53,531 INFO [train.py:901] (3/4) Epoch 3, batch 3250, loss[loss=0.3736, simple_loss=0.3979, pruned_loss=0.1746, over 7680.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.3826, pruned_loss=0.1439, over 1609996.60 frames. ], batch size: 18, lr: 2.33e-02, grad_scale: 8.0 +2023-02-05 21:42:54,130 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 3.440e+02 4.583e+02 5.736e+02 1.373e+03, threshold=9.167e+02, percent-clipped=8.0 +2023-02-05 21:43:03,731 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:43:13,590 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 21:43:26,831 INFO [train.py:901] (3/4) Epoch 3, batch 3300, loss[loss=0.4007, simple_loss=0.4259, pruned_loss=0.1878, over 7973.00 frames. ], tot_loss[loss=0.3362, simple_loss=0.3833, pruned_loss=0.1445, over 1610137.34 frames. ], batch size: 21, lr: 2.32e-02, grad_scale: 8.0 +2023-02-05 21:43:40,000 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 21:43:43,381 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19491.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:44:01,025 INFO [train.py:901] (3/4) Epoch 3, batch 3350, loss[loss=0.3184, simple_loss=0.3751, pruned_loss=0.1308, over 8038.00 frames. ], tot_loss[loss=0.3371, simple_loss=0.3839, pruned_loss=0.1451, over 1608549.69 frames. ], batch size: 22, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:44:01,692 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 3.690e+02 4.650e+02 5.581e+02 1.223e+03, threshold=9.300e+02, percent-clipped=5.0 +2023-02-05 21:44:26,566 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7518, 3.0926, 2.4874, 3.8040, 1.6025, 1.6520, 2.2503, 3.1852], + device='cuda:3'), covar=tensor([0.0968, 0.1511, 0.1644, 0.0472, 0.2555, 0.2696, 0.2413, 0.1116], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0313, 0.0308, 0.0235, 0.0300, 0.0314, 0.0335, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:44:35,816 INFO [train.py:901] (3/4) Epoch 3, batch 3400, loss[loss=0.3343, simple_loss=0.3798, pruned_loss=0.1443, over 8142.00 frames. ], tot_loss[loss=0.3372, simple_loss=0.3842, pruned_loss=0.1451, over 1609988.01 frames. ], batch size: 22, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:44:55,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1527, 2.2601, 4.0010, 3.5904, 3.0941, 2.1892, 1.4839, 1.8622], + device='cuda:3'), covar=tensor([0.0767, 0.1227, 0.0208, 0.0357, 0.0480, 0.0493, 0.0642, 0.1055], + device='cuda:3'), in_proj_covar=tensor([0.0589, 0.0509, 0.0408, 0.0464, 0.0563, 0.0479, 0.0496, 0.0490], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:45:02,612 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19606.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:45:09,804 INFO [train.py:901] (3/4) Epoch 3, batch 3450, loss[loss=0.35, simple_loss=0.391, pruned_loss=0.1545, over 8328.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.3844, pruned_loss=0.1457, over 1611291.90 frames. ], batch size: 25, lr: 2.32e-02, grad_scale: 16.0 +2023-02-05 21:45:10,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.543e+02 3.801e+02 4.733e+02 6.108e+02 1.526e+03, threshold=9.466e+02, percent-clipped=4.0 +2023-02-05 21:45:23,377 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=19636.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:42,893 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19663.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:45:45,374 INFO [train.py:901] (3/4) Epoch 3, batch 3500, loss[loss=0.3426, simple_loss=0.3991, pruned_loss=0.143, over 7981.00 frames. ], tot_loss[loss=0.3367, simple_loss=0.3835, pruned_loss=0.145, over 1609257.38 frames. ], batch size: 21, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:45:57,878 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 21:45:58,025 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 21:45:59,533 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19688.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:19,291 INFO [train.py:901] (3/4) Epoch 3, batch 3550, loss[loss=0.2737, simple_loss=0.3247, pruned_loss=0.1113, over 7533.00 frames. ], tot_loss[loss=0.3361, simple_loss=0.3826, pruned_loss=0.1448, over 1605645.15 frames. ], batch size: 18, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:46:19,958 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.944e+02 3.514e+02 4.193e+02 5.166e+02 1.109e+03, threshold=8.387e+02, percent-clipped=2.0 +2023-02-05 21:46:34,071 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-05 21:46:40,048 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-05 21:46:46,367 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19756.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:46:54,373 INFO [train.py:901] (3/4) Epoch 3, batch 3600, loss[loss=0.339, simple_loss=0.3707, pruned_loss=0.1536, over 7656.00 frames. ], tot_loss[loss=0.3362, simple_loss=0.3832, pruned_loss=0.1446, over 1610286.31 frames. ], batch size: 19, lr: 2.31e-02, grad_scale: 16.0 +2023-02-05 21:47:11,110 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6087, 1.7662, 1.4246, 2.4268, 1.2206, 1.0979, 1.5332, 2.1183], + device='cuda:3'), covar=tensor([0.1392, 0.1416, 0.2142, 0.0623, 0.1811, 0.2949, 0.1881, 0.0956], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0308, 0.0310, 0.0235, 0.0300, 0.0318, 0.0338, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:47:28,233 INFO [train.py:901] (3/4) Epoch 3, batch 3650, loss[loss=0.3587, simple_loss=0.402, pruned_loss=0.1577, over 8648.00 frames. ], tot_loss[loss=0.339, simple_loss=0.3854, pruned_loss=0.1464, over 1619147.84 frames. ], batch size: 39, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:47:28,888 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 3.610e+02 4.497e+02 5.952e+02 1.837e+03, threshold=8.994e+02, percent-clipped=7.0 +2023-02-05 21:47:58,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1552, 1.5897, 1.3868, 1.0544, 1.6305, 1.3507, 1.2853, 1.7546], + device='cuda:3'), covar=tensor([0.0717, 0.1385, 0.2034, 0.1756, 0.0722, 0.1749, 0.1009, 0.0634], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0225, 0.0260, 0.0227, 0.0189, 0.0226, 0.0187, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:47:58,735 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 21:47:59,613 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=19862.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:48:02,765 INFO [train.py:901] (3/4) Epoch 3, batch 3700, loss[loss=0.3392, simple_loss=0.3782, pruned_loss=0.1501, over 8092.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3855, pruned_loss=0.1457, over 1621368.68 frames. ], batch size: 21, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:05,649 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=19871.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:48:17,450 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=19887.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 21:48:37,848 INFO [train.py:901] (3/4) Epoch 3, batch 3750, loss[loss=0.3529, simple_loss=0.383, pruned_loss=0.1614, over 8236.00 frames. ], tot_loss[loss=0.3375, simple_loss=0.3849, pruned_loss=0.145, over 1621600.18 frames. ], batch size: 22, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:48:38,369 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 3.342e+02 4.116e+02 5.480e+02 1.463e+03, threshold=8.233e+02, percent-clipped=1.0 +2023-02-05 21:49:08,130 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0015, 1.5396, 3.3139, 1.2175, 2.3725, 3.6059, 3.3480, 3.1478], + device='cuda:3'), covar=tensor([0.1033, 0.1311, 0.0326, 0.1815, 0.0617, 0.0207, 0.0296, 0.0511], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0264, 0.0204, 0.0262, 0.0209, 0.0174, 0.0176, 0.0248], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:49:10,146 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9724, 1.0614, 4.1720, 1.5497, 3.5759, 3.5270, 3.7366, 3.6817], + device='cuda:3'), covar=tensor([0.0317, 0.3374, 0.0269, 0.1697, 0.0914, 0.0396, 0.0381, 0.0414], + device='cuda:3'), in_proj_covar=tensor([0.0244, 0.0432, 0.0295, 0.0332, 0.0394, 0.0314, 0.0311, 0.0334], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 21:49:12,035 INFO [train.py:901] (3/4) Epoch 3, batch 3800, loss[loss=0.3344, simple_loss=0.3741, pruned_loss=0.1474, over 7815.00 frames. ], tot_loss[loss=0.3379, simple_loss=0.3851, pruned_loss=0.1454, over 1614310.90 frames. ], batch size: 20, lr: 2.30e-02, grad_scale: 16.0 +2023-02-05 21:49:16,347 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3626, 1.7845, 2.0224, 1.7388, 0.8942, 2.0043, 0.3546, 1.1878], + device='cuda:3'), covar=tensor([0.3563, 0.1451, 0.1146, 0.1762, 0.5075, 0.0608, 0.5422, 0.2003], + device='cuda:3'), in_proj_covar=tensor([0.0113, 0.0094, 0.0082, 0.0151, 0.0170, 0.0077, 0.0155, 0.0110], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 21:49:20,778 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=19980.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:49:48,448 INFO [train.py:901] (3/4) Epoch 3, batch 3850, loss[loss=0.3466, simple_loss=0.39, pruned_loss=0.1516, over 8434.00 frames. ], tot_loss[loss=0.3367, simple_loss=0.384, pruned_loss=0.1447, over 1616934.52 frames. ], batch size: 27, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:49:49,086 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 3.536e+02 4.444e+02 5.257e+02 1.055e+03, threshold=8.889e+02, percent-clipped=4.0 +2023-02-05 21:50:01,908 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 21:50:02,728 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20038.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:09,036 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-05 21:50:22,592 INFO [train.py:901] (3/4) Epoch 3, batch 3900, loss[loss=0.2593, simple_loss=0.3341, pruned_loss=0.09221, over 8320.00 frames. ], tot_loss[loss=0.3364, simple_loss=0.3839, pruned_loss=0.1444, over 1617170.59 frames. ], batch size: 25, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:41,541 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20095.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:49,334 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-05 21:50:50,426 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20107.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:50:56,837 INFO [train.py:901] (3/4) Epoch 3, batch 3950, loss[loss=0.3293, simple_loss=0.3648, pruned_loss=0.1469, over 7201.00 frames. ], tot_loss[loss=0.3348, simple_loss=0.3825, pruned_loss=0.1436, over 1618040.76 frames. ], batch size: 16, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:50:57,400 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.523e+02 3.492e+02 4.461e+02 6.032e+02 1.371e+03, threshold=8.922e+02, percent-clipped=4.0 +2023-02-05 21:51:05,051 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:06,979 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20130.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:21,477 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20152.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:51:31,203 INFO [train.py:901] (3/4) Epoch 3, batch 4000, loss[loss=0.3702, simple_loss=0.4172, pruned_loss=0.1616, over 8364.00 frames. ], tot_loss[loss=0.3335, simple_loss=0.3818, pruned_loss=0.1426, over 1619212.31 frames. ], batch size: 24, lr: 2.29e-02, grad_scale: 16.0 +2023-02-05 21:51:45,720 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.21 vs. limit=5.0 +2023-02-05 21:52:05,184 INFO [train.py:901] (3/4) Epoch 3, batch 4050, loss[loss=0.3402, simple_loss=0.384, pruned_loss=0.1482, over 8185.00 frames. ], tot_loss[loss=0.333, simple_loss=0.3813, pruned_loss=0.1423, over 1614110.03 frames. ], batch size: 23, lr: 2.28e-02, grad_scale: 16.0 +2023-02-05 21:52:05,854 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.226e+02 3.505e+02 4.242e+02 5.307e+02 1.364e+03, threshold=8.485e+02, percent-clipped=4.0 +2023-02-05 21:52:09,351 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20222.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:52:40,358 INFO [train.py:901] (3/4) Epoch 3, batch 4100, loss[loss=0.3216, simple_loss=0.3838, pruned_loss=0.1297, over 8449.00 frames. ], tot_loss[loss=0.3347, simple_loss=0.3829, pruned_loss=0.1433, over 1614923.62 frames. ], batch size: 27, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:52:46,100 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0728, 1.7382, 2.8660, 2.3308, 2.4045, 1.8251, 1.2483, 1.0668], + device='cuda:3'), covar=tensor([0.1020, 0.1022, 0.0244, 0.0456, 0.0468, 0.0552, 0.0701, 0.1035], + device='cuda:3'), in_proj_covar=tensor([0.0586, 0.0504, 0.0420, 0.0459, 0.0560, 0.0470, 0.0491, 0.0490], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:53:14,414 INFO [train.py:901] (3/4) Epoch 3, batch 4150, loss[loss=0.377, simple_loss=0.4201, pruned_loss=0.1669, over 8496.00 frames. ], tot_loss[loss=0.3366, simple_loss=0.3843, pruned_loss=0.1444, over 1616354.08 frames. ], batch size: 26, lr: 2.28e-02, grad_scale: 8.0 +2023-02-05 21:53:15,797 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.167e+02 3.849e+02 4.660e+02 5.932e+02 1.097e+03, threshold=9.320e+02, percent-clipped=6.0 +2023-02-05 21:53:38,371 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20351.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:53:50,004 INFO [train.py:901] (3/4) Epoch 3, batch 4200, loss[loss=0.3311, simple_loss=0.3727, pruned_loss=0.1447, over 7964.00 frames. ], tot_loss[loss=0.3363, simple_loss=0.3839, pruned_loss=0.1444, over 1615240.94 frames. ], batch size: 21, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:53:55,450 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 21:53:56,308 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20376.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:00,121 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20382.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:01,092 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.04 vs. limit=2.0 +2023-02-05 21:54:07,227 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 21:54:16,288 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20406.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:16,809 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 21:54:24,058 INFO [train.py:901] (3/4) Epoch 3, batch 4250, loss[loss=0.3359, simple_loss=0.3932, pruned_loss=0.1394, over 8631.00 frames. ], tot_loss[loss=0.3389, simple_loss=0.3855, pruned_loss=0.1462, over 1615355.25 frames. ], batch size: 34, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:54:25,368 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.360e+02 3.627e+02 5.036e+02 6.332e+02 1.636e+03, threshold=1.007e+03, percent-clipped=4.0 +2023-02-05 21:54:29,672 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1293, 1.2199, 1.1958, 0.2273, 1.1347, 0.9819, 0.0904, 1.2227], + device='cuda:3'), covar=tensor([0.0088, 0.0068, 0.0050, 0.0138, 0.0074, 0.0212, 0.0164, 0.0064], + device='cuda:3'), in_proj_covar=tensor([0.0227, 0.0164, 0.0133, 0.0212, 0.0162, 0.0279, 0.0216, 0.0189], + device='cuda:3'), out_proj_covar=tensor([1.0911e-04, 7.8531e-05, 6.2246e-05, 9.8894e-05, 7.8683e-05, 1.4315e-04, + 1.0563e-04, 9.0656e-05], device='cuda:3') +2023-02-05 21:54:37,033 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20436.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:47,727 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20451.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:54:50,595 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1977, 1.5614, 1.3738, 1.0924, 1.5972, 1.4884, 1.6396, 1.5802], + device='cuda:3'), covar=tensor([0.0704, 0.1372, 0.1985, 0.1774, 0.0751, 0.1587, 0.0920, 0.0661], + device='cuda:3'), in_proj_covar=tensor([0.0182, 0.0221, 0.0257, 0.0223, 0.0187, 0.0222, 0.0182, 0.0184], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:54:54,861 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-05 21:54:59,266 INFO [train.py:901] (3/4) Epoch 3, batch 4300, loss[loss=0.3065, simple_loss=0.3524, pruned_loss=0.1303, over 7812.00 frames. ], tot_loss[loss=0.3385, simple_loss=0.3854, pruned_loss=0.1459, over 1612509.80 frames. ], batch size: 20, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:04,855 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20474.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:20,175 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20497.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:55:33,534 INFO [train.py:901] (3/4) Epoch 3, batch 4350, loss[loss=0.328, simple_loss=0.3826, pruned_loss=0.1367, over 8500.00 frames. ], tot_loss[loss=0.3352, simple_loss=0.3825, pruned_loss=0.144, over 1608454.80 frames. ], batch size: 29, lr: 2.27e-02, grad_scale: 8.0 +2023-02-05 21:55:34,898 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.452e+02 4.356e+02 5.638e+02 1.577e+03, threshold=8.711e+02, percent-clipped=2.0 +2023-02-05 21:55:46,525 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 21:55:50,697 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.91 vs. limit=2.0 +2023-02-05 21:56:06,883 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:06,987 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20566.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:07,507 INFO [train.py:901] (3/4) Epoch 3, batch 4400, loss[loss=0.3995, simple_loss=0.4271, pruned_loss=0.1859, over 8582.00 frames. ], tot_loss[loss=0.3363, simple_loss=0.3834, pruned_loss=0.1446, over 1611547.37 frames. ], batch size: 39, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:13,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3494, 1.7675, 1.9496, 1.4296, 1.0375, 1.8409, 0.2512, 1.2657], + device='cuda:3'), covar=tensor([0.3834, 0.1358, 0.1160, 0.2168, 0.4882, 0.0927, 0.5527, 0.2134], + device='cuda:3'), in_proj_covar=tensor([0.0113, 0.0097, 0.0084, 0.0147, 0.0169, 0.0078, 0.0152, 0.0117], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:56:22,875 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-05 21:56:24,128 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20589.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:27,483 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 21:56:36,783 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=20606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:56:44,140 INFO [train.py:901] (3/4) Epoch 3, batch 4450, loss[loss=0.2858, simple_loss=0.3497, pruned_loss=0.1109, over 8130.00 frames. ], tot_loss[loss=0.3336, simple_loss=0.3809, pruned_loss=0.1431, over 1607386.81 frames. ], batch size: 22, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:56:45,430 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.404e+02 4.420e+02 6.069e+02 1.310e+03, threshold=8.839e+02, percent-clipped=8.0 +2023-02-05 21:57:12,227 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-05 21:57:18,492 INFO [train.py:901] (3/4) Epoch 3, batch 4500, loss[loss=0.2909, simple_loss=0.3547, pruned_loss=0.1135, over 8083.00 frames. ], tot_loss[loss=0.3326, simple_loss=0.3809, pruned_loss=0.1421, over 1615496.52 frames. ], batch size: 21, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:20,853 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 21:57:27,445 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20681.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:57:38,117 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1112, 1.6426, 2.9312, 2.3598, 2.3290, 1.7711, 1.3362, 1.1184], + device='cuda:3'), covar=tensor([0.1087, 0.1254, 0.0229, 0.0487, 0.0475, 0.0604, 0.0707, 0.1075], + device='cuda:3'), in_proj_covar=tensor([0.0601, 0.0514, 0.0433, 0.0470, 0.0585, 0.0476, 0.0497, 0.0503], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 21:57:53,171 INFO [train.py:901] (3/4) Epoch 3, batch 4550, loss[loss=0.32, simple_loss=0.3845, pruned_loss=0.1277, over 8328.00 frames. ], tot_loss[loss=0.333, simple_loss=0.3804, pruned_loss=0.1428, over 1614834.92 frames. ], batch size: 25, lr: 2.26e-02, grad_scale: 8.0 +2023-02-05 21:57:54,484 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.207e+02 3.483e+02 4.570e+02 6.300e+02 1.347e+03, threshold=9.139e+02, percent-clipped=2.0 +2023-02-05 21:58:14,829 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20750.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:17,074 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:23,773 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5766, 1.9742, 3.2012, 1.0360, 2.0110, 1.8176, 1.4859, 1.8611], + device='cuda:3'), covar=tensor([0.1217, 0.1233, 0.0426, 0.2435, 0.1184, 0.1828, 0.1147, 0.1639], + device='cuda:3'), in_proj_covar=tensor([0.0439, 0.0410, 0.0481, 0.0501, 0.0551, 0.0482, 0.0427, 0.0544], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 21:58:26,243 INFO [train.py:901] (3/4) Epoch 3, batch 4600, loss[loss=0.3588, simple_loss=0.4041, pruned_loss=0.1568, over 8506.00 frames. ], tot_loss[loss=0.3324, simple_loss=0.3796, pruned_loss=0.1426, over 1613379.74 frames. ], batch size: 26, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:58:27,117 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2487, 2.3881, 2.0896, 3.0105, 1.2342, 1.4414, 1.9930, 2.5429], + device='cuda:3'), covar=tensor([0.1019, 0.1387, 0.1439, 0.0551, 0.2105, 0.2507, 0.1900, 0.1066], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0305, 0.0305, 0.0225, 0.0296, 0.0316, 0.0329, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 21:58:34,044 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-05 21:58:34,570 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20778.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:58:35,797 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20780.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:00,498 INFO [train.py:901] (3/4) Epoch 3, batch 4650, loss[loss=0.3013, simple_loss=0.348, pruned_loss=0.1273, over 7447.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3788, pruned_loss=0.1422, over 1613131.87 frames. ], batch size: 17, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:02,524 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 3.299e+02 4.239e+02 5.426e+02 9.400e+02, threshold=8.478e+02, percent-clipped=1.0 +2023-02-05 21:59:04,822 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20822.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:21,386 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20845.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:22,744 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20847.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:34,649 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20865.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:35,793 INFO [train.py:901] (3/4) Epoch 3, batch 4700, loss[loss=0.3783, simple_loss=0.4179, pruned_loss=0.1694, over 8284.00 frames. ], tot_loss[loss=0.332, simple_loss=0.3792, pruned_loss=0.1424, over 1615003.05 frames. ], batch size: 23, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 21:59:37,922 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20870.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 21:59:54,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=20895.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:08,984 INFO [train.py:901] (3/4) Epoch 3, batch 4750, loss[loss=0.2904, simple_loss=0.3529, pruned_loss=0.1139, over 8613.00 frames. ], tot_loss[loss=0.3337, simple_loss=0.3806, pruned_loss=0.1434, over 1614203.94 frames. ], batch size: 34, lr: 2.25e-02, grad_scale: 8.0 +2023-02-05 22:00:10,302 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.634e+02 4.432e+02 5.821e+02 1.296e+03, threshold=8.863e+02, percent-clipped=5.0 +2023-02-05 22:00:23,260 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=20937.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:24,392 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 22:00:26,472 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 22:00:32,660 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=20950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:41,462 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=20962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:00:44,560 INFO [train.py:901] (3/4) Epoch 3, batch 4800, loss[loss=0.3329, simple_loss=0.3929, pruned_loss=0.1364, over 8183.00 frames. ], tot_loss[loss=0.3318, simple_loss=0.3797, pruned_loss=0.1419, over 1609418.37 frames. ], batch size: 23, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:18,124 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 22:01:18,803 INFO [train.py:901] (3/4) Epoch 3, batch 4850, loss[loss=0.3442, simple_loss=0.3877, pruned_loss=0.1503, over 8630.00 frames. ], tot_loss[loss=0.3317, simple_loss=0.3796, pruned_loss=0.1419, over 1612706.97 frames. ], batch size: 39, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:20,192 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.459e+02 3.687e+02 4.412e+02 5.668e+02 1.155e+03, threshold=8.825e+02, percent-clipped=6.0 +2023-02-05 22:01:52,009 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21065.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:01:53,057 INFO [train.py:901] (3/4) Epoch 3, batch 4900, loss[loss=0.3341, simple_loss=0.3813, pruned_loss=0.1434, over 8085.00 frames. ], tot_loss[loss=0.3321, simple_loss=0.3799, pruned_loss=0.1422, over 1611873.81 frames. ], batch size: 21, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:01:55,912 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:21,872 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.4226, 1.6576, 5.3877, 2.3918, 4.8508, 4.7282, 5.0181, 5.1177], + device='cuda:3'), covar=tensor([0.0212, 0.2969, 0.0170, 0.1427, 0.0654, 0.0266, 0.0266, 0.0220], + device='cuda:3'), in_proj_covar=tensor([0.0245, 0.0426, 0.0294, 0.0331, 0.0396, 0.0314, 0.0312, 0.0339], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 22:02:27,714 INFO [train.py:901] (3/4) Epoch 3, batch 4950, loss[loss=0.356, simple_loss=0.4062, pruned_loss=0.1529, over 8339.00 frames. ], tot_loss[loss=0.3304, simple_loss=0.3788, pruned_loss=0.1411, over 1613317.97 frames. ], batch size: 25, lr: 2.24e-02, grad_scale: 8.0 +2023-02-05 22:02:29,094 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.083e+02 3.569e+02 4.502e+02 6.229e+02 1.133e+03, threshold=9.004e+02, percent-clipped=2.0 +2023-02-05 22:02:30,675 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21121.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:41,334 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21136.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:48,071 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21146.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:02:51,481 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21151.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:01,832 INFO [train.py:901] (3/4) Epoch 3, batch 5000, loss[loss=0.3112, simple_loss=0.3653, pruned_loss=0.1285, over 8458.00 frames. ], tot_loss[loss=0.33, simple_loss=0.3786, pruned_loss=0.1407, over 1616405.90 frames. ], batch size: 27, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:08,596 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0793, 1.1665, 3.1872, 0.9591, 2.5629, 2.6709, 2.8366, 2.7933], + device='cuda:3'), covar=tensor([0.0451, 0.2905, 0.0465, 0.1934, 0.1366, 0.0593, 0.0517, 0.0633], + device='cuda:3'), in_proj_covar=tensor([0.0248, 0.0424, 0.0296, 0.0331, 0.0400, 0.0318, 0.0311, 0.0343], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 22:03:08,669 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21176.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:03:37,221 INFO [train.py:901] (3/4) Epoch 3, batch 5050, loss[loss=0.3833, simple_loss=0.428, pruned_loss=0.1693, over 8104.00 frames. ], tot_loss[loss=0.3301, simple_loss=0.3795, pruned_loss=0.1403, over 1619849.27 frames. ], batch size: 23, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:03:38,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.043e+02 3.325e+02 4.224e+02 5.254e+02 1.187e+03, threshold=8.447e+02, percent-clipped=3.0 +2023-02-05 22:03:57,071 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 22:04:11,239 INFO [train.py:901] (3/4) Epoch 3, batch 5100, loss[loss=0.3899, simple_loss=0.4108, pruned_loss=0.1845, over 6918.00 frames. ], tot_loss[loss=0.3315, simple_loss=0.3802, pruned_loss=0.1413, over 1618011.82 frames. ], batch size: 71, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:34,346 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0866, 1.6439, 1.9365, 1.3663, 1.0441, 1.7681, 0.1640, 0.9704], + device='cuda:3'), covar=tensor([0.2715, 0.2165, 0.0842, 0.1880, 0.4594, 0.0989, 0.5264, 0.2482], + device='cuda:3'), in_proj_covar=tensor([0.0115, 0.0107, 0.0083, 0.0154, 0.0175, 0.0085, 0.0154, 0.0117], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:04:43,220 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7812, 1.5828, 1.5993, 1.1835, 1.6169, 1.5801, 2.0385, 1.8556], + device='cuda:3'), covar=tensor([0.0660, 0.1404, 0.2080, 0.1681, 0.0810, 0.1646, 0.0808, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0182, 0.0218, 0.0254, 0.0217, 0.0181, 0.0218, 0.0179, 0.0180], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 22:04:46,363 INFO [train.py:901] (3/4) Epoch 3, batch 5150, loss[loss=0.3338, simple_loss=0.3875, pruned_loss=0.14, over 8131.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.3791, pruned_loss=0.1402, over 1618590.84 frames. ], batch size: 22, lr: 2.23e-02, grad_scale: 8.0 +2023-02-05 22:04:47,675 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.142e+02 3.453e+02 4.061e+02 5.332e+02 1.278e+03, threshold=8.122e+02, percent-clipped=4.0 +2023-02-05 22:04:50,017 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21321.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:04:56,024 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21330.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,501 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:06,558 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:19,997 INFO [train.py:901] (3/4) Epoch 3, batch 5200, loss[loss=0.3587, simple_loss=0.3691, pruned_loss=0.1742, over 7561.00 frames. ], tot_loss[loss=0.3331, simple_loss=0.3816, pruned_loss=0.1423, over 1619068.38 frames. ], batch size: 18, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:52,850 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21414.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:05:54,862 INFO [train.py:901] (3/4) Epoch 3, batch 5250, loss[loss=0.2852, simple_loss=0.3412, pruned_loss=0.1146, over 7542.00 frames. ], tot_loss[loss=0.3329, simple_loss=0.3807, pruned_loss=0.1426, over 1615204.59 frames. ], batch size: 18, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:05:54,879 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 22:05:56,260 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 3.353e+02 4.281e+02 5.765e+02 2.364e+03, threshold=8.563e+02, percent-clipped=11.0 +2023-02-05 22:06:30,401 INFO [train.py:901] (3/4) Epoch 3, batch 5300, loss[loss=0.2929, simple_loss=0.3495, pruned_loss=0.1182, over 8621.00 frames. ], tot_loss[loss=0.3322, simple_loss=0.3808, pruned_loss=0.1418, over 1617001.25 frames. ], batch size: 31, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:06:39,463 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21480.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:04,805 INFO [train.py:901] (3/4) Epoch 3, batch 5350, loss[loss=0.2994, simple_loss=0.3614, pruned_loss=0.1187, over 8738.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3805, pruned_loss=0.1413, over 1620322.44 frames. ], batch size: 34, lr: 2.22e-02, grad_scale: 8.0 +2023-02-05 22:07:06,084 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 3.338e+02 4.128e+02 5.460e+02 1.129e+03, threshold=8.255e+02, percent-clipped=3.0 +2023-02-05 22:07:13,624 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21529.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:07:14,363 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5629, 2.0484, 3.3649, 0.9935, 2.5503, 1.8804, 1.7074, 1.9737], + device='cuda:3'), covar=tensor([0.1269, 0.1377, 0.0476, 0.2577, 0.1036, 0.2016, 0.1005, 0.1885], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0419, 0.0493, 0.0512, 0.0566, 0.0499, 0.0435, 0.0569], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 22:07:40,167 INFO [train.py:901] (3/4) Epoch 3, batch 5400, loss[loss=0.2977, simple_loss=0.3533, pruned_loss=0.1211, over 7544.00 frames. ], tot_loss[loss=0.3312, simple_loss=0.3797, pruned_loss=0.1413, over 1616133.75 frames. ], batch size: 18, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:07:48,369 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8249, 2.3855, 1.7217, 2.7914, 1.4229, 1.2967, 1.7274, 2.2366], + device='cuda:3'), covar=tensor([0.1310, 0.1322, 0.1857, 0.0558, 0.1951, 0.2630, 0.2310, 0.1430], + device='cuda:3'), in_proj_covar=tensor([0.0305, 0.0307, 0.0303, 0.0229, 0.0292, 0.0313, 0.0331, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 22:07:59,355 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21595.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:12,102 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21613.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:14,736 INFO [train.py:901] (3/4) Epoch 3, batch 5450, loss[loss=0.3377, simple_loss=0.3779, pruned_loss=0.1487, over 7807.00 frames. ], tot_loss[loss=0.3316, simple_loss=0.3797, pruned_loss=0.1417, over 1615259.70 frames. ], batch size: 20, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:16,059 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.864e+02 3.746e+02 4.366e+02 5.874e+02 2.172e+03, threshold=8.732e+02, percent-clipped=6.0 +2023-02-05 22:08:24,289 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21631.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:08:31,561 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-05 22:08:36,096 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-05 22:08:41,803 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 22:08:49,803 INFO [train.py:901] (3/4) Epoch 3, batch 5500, loss[loss=0.3351, simple_loss=0.3833, pruned_loss=0.1434, over 8467.00 frames. ], tot_loss[loss=0.3294, simple_loss=0.3784, pruned_loss=0.1403, over 1620236.28 frames. ], batch size: 25, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:08:55,224 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21674.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:05,908 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21690.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:09:18,076 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 22:09:23,538 INFO [train.py:901] (3/4) Epoch 3, batch 5550, loss[loss=0.2978, simple_loss=0.3505, pruned_loss=0.1225, over 7535.00 frames. ], tot_loss[loss=0.3296, simple_loss=0.3783, pruned_loss=0.1404, over 1619966.69 frames. ], batch size: 18, lr: 2.21e-02, grad_scale: 8.0 +2023-02-05 22:09:24,903 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 3.296e+02 4.063e+02 5.206e+02 8.291e+02, threshold=8.125e+02, percent-clipped=0.0 +2023-02-05 22:09:57,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3821, 1.1863, 4.4692, 1.7751, 3.8010, 3.6230, 3.9342, 3.9883], + device='cuda:3'), covar=tensor([0.0356, 0.3468, 0.0342, 0.1997, 0.1115, 0.0500, 0.0436, 0.0389], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0427, 0.0304, 0.0338, 0.0402, 0.0317, 0.0309, 0.0341], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 22:09:58,422 INFO [train.py:901] (3/4) Epoch 3, batch 5600, loss[loss=0.2849, simple_loss=0.355, pruned_loss=0.1074, over 8458.00 frames. ], tot_loss[loss=0.3285, simple_loss=0.3775, pruned_loss=0.1397, over 1619825.22 frames. ], batch size: 27, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:08,496 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21781.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:11,838 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21785.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:14,461 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21789.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:25,386 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=21805.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:26,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7899, 1.6468, 2.5487, 1.4512, 2.0786, 2.7351, 2.4965, 2.4753], + device='cuda:3'), covar=tensor([0.0915, 0.1189, 0.0844, 0.1533, 0.0971, 0.0390, 0.0409, 0.0599], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0260, 0.0198, 0.0252, 0.0207, 0.0180, 0.0176, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:10:28,849 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21810.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:10:33,430 INFO [train.py:901] (3/4) Epoch 3, batch 5650, loss[loss=0.3659, simple_loss=0.4046, pruned_loss=0.1636, over 8189.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.378, pruned_loss=0.1401, over 1617597.22 frames. ], batch size: 23, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:10:34,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.984e+02 3.614e+02 4.526e+02 5.980e+02 8.654e+02, threshold=9.051e+02, percent-clipped=4.0 +2023-02-05 22:10:45,266 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 22:10:56,889 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=21851.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:07,115 INFO [train.py:901] (3/4) Epoch 3, batch 5700, loss[loss=0.3968, simple_loss=0.421, pruned_loss=0.1863, over 8097.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3792, pruned_loss=0.1406, over 1620210.72 frames. ], batch size: 21, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:07,931 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21868.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:13,372 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=21876.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:11:42,902 INFO [train.py:901] (3/4) Epoch 3, batch 5750, loss[loss=0.35, simple_loss=0.4042, pruned_loss=0.1479, over 8194.00 frames. ], tot_loss[loss=0.3301, simple_loss=0.3792, pruned_loss=0.1405, over 1615049.18 frames. ], batch size: 23, lr: 2.20e-02, grad_scale: 8.0 +2023-02-05 22:11:44,221 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.339e+02 3.657e+02 4.422e+02 5.345e+02 1.248e+03, threshold=8.845e+02, percent-clipped=3.0 +2023-02-05 22:11:49,710 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 22:12:10,246 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21957.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:16,966 INFO [train.py:901] (3/4) Epoch 3, batch 5800, loss[loss=0.3733, simple_loss=0.4105, pruned_loss=0.168, over 8554.00 frames. ], tot_loss[loss=0.3292, simple_loss=0.3784, pruned_loss=0.14, over 1618816.52 frames. ], batch size: 39, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:22,433 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=21975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:30,996 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=21988.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:12:49,718 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6965, 2.1256, 2.6414, 1.0761, 2.6198, 1.6783, 1.2501, 1.6886], + device='cuda:3'), covar=tensor([0.0195, 0.0076, 0.0134, 0.0205, 0.0156, 0.0252, 0.0258, 0.0135], + device='cuda:3'), in_proj_covar=tensor([0.0227, 0.0156, 0.0135, 0.0210, 0.0157, 0.0283, 0.0224, 0.0192], + device='cuda:3'), out_proj_covar=tensor([1.0566e-04, 7.0679e-05, 6.0602e-05, 9.4311e-05, 7.3969e-05, 1.4039e-04, + 1.0522e-04, 8.7381e-05], device='cuda:3') +2023-02-05 22:12:52,162 INFO [train.py:901] (3/4) Epoch 3, batch 5850, loss[loss=0.348, simple_loss=0.3982, pruned_loss=0.149, over 8190.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3793, pruned_loss=0.1406, over 1623462.18 frames. ], batch size: 23, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:12:53,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.662e+02 4.461e+02 5.594e+02 1.608e+03, threshold=8.923e+02, percent-clipped=8.0 +2023-02-05 22:13:11,817 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:22,199 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22061.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:25,941 INFO [train.py:901] (3/4) Epoch 3, batch 5900, loss[loss=0.3449, simple_loss=0.3964, pruned_loss=0.1467, over 8515.00 frames. ], tot_loss[loss=0.3277, simple_loss=0.3775, pruned_loss=0.139, over 1620355.49 frames. ], batch size: 28, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:13:28,836 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22070.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:30,205 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22072.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:39,573 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22086.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:13:42,232 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22090.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:00,338 INFO [train.py:901] (3/4) Epoch 3, batch 5950, loss[loss=0.2957, simple_loss=0.3404, pruned_loss=0.1255, over 7716.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.3768, pruned_loss=0.1383, over 1614438.04 frames. ], batch size: 18, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:02,399 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.045e+02 3.353e+02 4.485e+02 5.691e+02 1.558e+03, threshold=8.970e+02, percent-clipped=6.0 +2023-02-05 22:14:07,181 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22125.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:23,958 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22148.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:14:32,281 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7793, 1.3915, 3.0154, 1.2955, 2.2460, 3.3618, 3.1595, 2.9582], + device='cuda:3'), covar=tensor([0.1117, 0.1512, 0.0390, 0.2019, 0.0719, 0.0272, 0.0449, 0.0612], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0260, 0.0196, 0.0254, 0.0204, 0.0176, 0.0174, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:14:36,951 INFO [train.py:901] (3/4) Epoch 3, batch 6000, loss[loss=0.2568, simple_loss=0.3174, pruned_loss=0.09808, over 7219.00 frames. ], tot_loss[loss=0.327, simple_loss=0.3767, pruned_loss=0.1386, over 1613496.06 frames. ], batch size: 16, lr: 2.19e-02, grad_scale: 8.0 +2023-02-05 22:14:36,951 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 22:14:49,934 INFO [train.py:935] (3/4) Epoch 3, validation: loss=0.2472, simple_loss=0.3383, pruned_loss=0.07805, over 944034.00 frames. +2023-02-05 22:14:49,935 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 22:15:04,703 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-05 22:15:08,338 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22194.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:21,666 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22212.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:25,116 INFO [train.py:901] (3/4) Epoch 3, batch 6050, loss[loss=0.2967, simple_loss=0.3508, pruned_loss=0.1212, over 7793.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.378, pruned_loss=0.1396, over 1613748.43 frames. ], batch size: 19, lr: 2.18e-02, grad_scale: 8.0 +2023-02-05 22:15:26,476 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.554e+02 3.417e+02 4.364e+02 5.364e+02 3.571e+03, threshold=8.727e+02, percent-clipped=6.0 +2023-02-05 22:15:33,747 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-05 22:15:36,164 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22233.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:40,929 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22240.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:51,061 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22255.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:15:52,562 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3826, 1.9239, 2.9561, 2.6930, 2.6557, 1.9472, 1.5792, 1.9016], + device='cuda:3'), covar=tensor([0.0706, 0.1059, 0.0199, 0.0348, 0.0367, 0.0490, 0.0580, 0.0733], + device='cuda:3'), in_proj_covar=tensor([0.0617, 0.0532, 0.0456, 0.0489, 0.0597, 0.0499, 0.0513, 0.0513], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:15:59,773 INFO [train.py:901] (3/4) Epoch 3, batch 6100, loss[loss=0.4139, simple_loss=0.4341, pruned_loss=0.1969, over 8612.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3792, pruned_loss=0.1407, over 1612601.15 frames. ], batch size: 39, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:18,445 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 22:16:35,129 INFO [train.py:901] (3/4) Epoch 3, batch 6150, loss[loss=0.3485, simple_loss=0.3991, pruned_loss=0.149, over 8577.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3792, pruned_loss=0.1406, over 1610050.85 frames. ], batch size: 39, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:16:36,459 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.469e+02 3.615e+02 4.380e+02 5.688e+02 1.525e+03, threshold=8.759e+02, percent-clipped=2.0 +2023-02-05 22:16:41,835 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:42,446 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4726, 1.1307, 1.4229, 0.9992, 1.2808, 1.3134, 1.1562, 1.3772], + device='cuda:3'), covar=tensor([0.0900, 0.1549, 0.2180, 0.1788, 0.0712, 0.1891, 0.0919, 0.0677], + device='cuda:3'), in_proj_covar=tensor([0.0178, 0.0213, 0.0252, 0.0214, 0.0179, 0.0216, 0.0178, 0.0179], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 22:16:42,493 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22328.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:45,084 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22332.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:54,537 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22346.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:16:59,257 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22353.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:01,498 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.40 vs. limit=5.0 +2023-02-05 22:17:08,710 INFO [train.py:901] (3/4) Epoch 3, batch 6200, loss[loss=0.4141, simple_loss=0.4375, pruned_loss=0.1953, over 6945.00 frames. ], tot_loss[loss=0.3298, simple_loss=0.3789, pruned_loss=0.1404, over 1606131.21 frames. ], batch size: 71, lr: 2.18e-02, grad_scale: 16.0 +2023-02-05 22:17:11,704 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22371.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:34,523 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22403.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:17:44,418 INFO [train.py:901] (3/4) Epoch 3, batch 6250, loss[loss=0.3553, simple_loss=0.4062, pruned_loss=0.1522, over 8512.00 frames. ], tot_loss[loss=0.3297, simple_loss=0.3789, pruned_loss=0.1402, over 1609183.42 frames. ], batch size: 39, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:17:45,753 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.208e+02 3.506e+02 4.308e+02 5.585e+02 1.214e+03, threshold=8.617e+02, percent-clipped=6.0 +2023-02-05 22:18:03,163 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-05 22:18:05,783 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22447.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:15,774 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-05 22:18:19,135 INFO [train.py:901] (3/4) Epoch 3, batch 6300, loss[loss=0.3402, simple_loss=0.385, pruned_loss=0.1477, over 8032.00 frames. ], tot_loss[loss=0.3302, simple_loss=0.3793, pruned_loss=0.1406, over 1610844.01 frames. ], batch size: 22, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:36,439 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22492.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:39,310 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22496.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:18:46,910 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6252, 2.5453, 3.0787, 0.8136, 2.8378, 1.7662, 1.0794, 1.3603], + device='cuda:3'), covar=tensor([0.0218, 0.0080, 0.0089, 0.0255, 0.0180, 0.0288, 0.0328, 0.0157], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0167, 0.0145, 0.0220, 0.0165, 0.0299, 0.0231, 0.0195], + device='cuda:3'), out_proj_covar=tensor([1.0988e-04, 7.5261e-05, 6.5272e-05, 9.8293e-05, 7.7038e-05, 1.4669e-04, + 1.0724e-04, 8.6946e-05], device='cuda:3') +2023-02-05 22:18:54,100 INFO [train.py:901] (3/4) Epoch 3, batch 6350, loss[loss=0.3724, simple_loss=0.4029, pruned_loss=0.171, over 8495.00 frames. ], tot_loss[loss=0.3291, simple_loss=0.3785, pruned_loss=0.1398, over 1615300.20 frames. ], batch size: 26, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:18:55,439 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.104e+02 3.537e+02 4.368e+02 5.315e+02 1.494e+03, threshold=8.736e+02, percent-clipped=5.0 +2023-02-05 22:18:57,022 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22521.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:03,400 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-05 22:19:08,840 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22538.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:28,747 INFO [train.py:901] (3/4) Epoch 3, batch 6400, loss[loss=0.2794, simple_loss=0.3437, pruned_loss=0.1076, over 8380.00 frames. ], tot_loss[loss=0.3281, simple_loss=0.3779, pruned_loss=0.1391, over 1613659.85 frames. ], batch size: 24, lr: 2.17e-02, grad_scale: 16.0 +2023-02-05 22:19:35,384 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22577.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:39,446 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:46,085 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4261, 1.5561, 4.2727, 1.9223, 2.2809, 4.9345, 4.4831, 4.3293], + device='cuda:3'), covar=tensor([0.1084, 0.1543, 0.0309, 0.1735, 0.0839, 0.0210, 0.0285, 0.0451], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0263, 0.0200, 0.0260, 0.0209, 0.0179, 0.0176, 0.0249], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:19:50,049 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22599.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:55,823 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22607.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:19:56,543 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22608.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:03,202 INFO [train.py:901] (3/4) Epoch 3, batch 6450, loss[loss=0.3301, simple_loss=0.3911, pruned_loss=0.1346, over 8369.00 frames. ], tot_loss[loss=0.3286, simple_loss=0.3779, pruned_loss=0.1397, over 1609692.51 frames. ], batch size: 24, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:04,480 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 3.557e+02 4.436e+02 5.729e+02 1.082e+03, threshold=8.871e+02, percent-clipped=7.0 +2023-02-05 22:20:28,492 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22653.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:31,153 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22657.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:20:37,605 INFO [train.py:901] (3/4) Epoch 3, batch 6500, loss[loss=0.3181, simple_loss=0.3762, pruned_loss=0.13, over 8198.00 frames. ], tot_loss[loss=0.3284, simple_loss=0.3773, pruned_loss=0.1398, over 1610362.89 frames. ], batch size: 23, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:20:53,261 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3882, 3.4531, 2.8226, 4.1783, 1.7066, 2.0360, 2.7765, 3.6442], + device='cuda:3'), covar=tensor([0.1223, 0.1364, 0.1344, 0.0287, 0.2141, 0.2079, 0.1768, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0305, 0.0305, 0.0304, 0.0225, 0.0287, 0.0310, 0.0319, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0005, 0.0005], + device='cuda:3') +2023-02-05 22:20:55,241 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22692.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:01,862 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6103, 5.6154, 4.6998, 2.1989, 4.7694, 5.3736, 5.1201, 4.6040], + device='cuda:3'), covar=tensor([0.0713, 0.0448, 0.0886, 0.4244, 0.0653, 0.0527, 0.0981, 0.0586], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0249, 0.0290, 0.0371, 0.0273, 0.0219, 0.0266, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 22:21:02,667 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22703.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:09,922 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22714.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:11,759 INFO [train.py:901] (3/4) Epoch 3, batch 6550, loss[loss=0.3553, simple_loss=0.3991, pruned_loss=0.1558, over 8508.00 frames. ], tot_loss[loss=0.3272, simple_loss=0.3766, pruned_loss=0.1389, over 1605316.14 frames. ], batch size: 28, lr: 2.16e-02, grad_scale: 16.0 +2023-02-05 22:21:13,165 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.155e+02 3.258e+02 3.883e+02 5.357e+02 1.264e+03, threshold=7.766e+02, percent-clipped=3.0 +2023-02-05 22:21:19,293 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:28,616 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 22:21:32,838 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=22747.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:21:47,207 INFO [train.py:901] (3/4) Epoch 3, batch 6600, loss[loss=0.3344, simple_loss=0.3718, pruned_loss=0.1485, over 7541.00 frames. ], tot_loss[loss=0.3258, simple_loss=0.3751, pruned_loss=0.1382, over 1607993.65 frames. ], batch size: 18, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:21:47,903 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 22:22:19,036 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22812.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:22,348 INFO [train.py:901] (3/4) Epoch 3, batch 6650, loss[loss=0.2953, simple_loss=0.3534, pruned_loss=0.1186, over 8235.00 frames. ], tot_loss[loss=0.3253, simple_loss=0.3747, pruned_loss=0.1379, over 1605018.31 frames. ], batch size: 22, lr: 2.16e-02, grad_scale: 8.0 +2023-02-05 22:22:24,341 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.373e+02 3.456e+02 4.169e+02 5.335e+02 9.931e+02, threshold=8.339e+02, percent-clipped=8.0 +2023-02-05 22:22:40,063 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=22843.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:53,737 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=22862.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:54,488 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22863.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:22:56,605 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9015, 2.3683, 4.7020, 1.1962, 2.7848, 2.2725, 1.5661, 2.5061], + device='cuda:3'), covar=tensor([0.1225, 0.1718, 0.0432, 0.2533, 0.1279, 0.1850, 0.1369, 0.1916], + device='cuda:3'), in_proj_covar=tensor([0.0445, 0.0413, 0.0489, 0.0508, 0.0546, 0.0478, 0.0432, 0.0551], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 22:22:57,068 INFO [train.py:901] (3/4) Epoch 3, batch 6700, loss[loss=0.2747, simple_loss=0.3445, pruned_loss=0.1025, over 7974.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.374, pruned_loss=0.1373, over 1602728.43 frames. ], batch size: 21, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:12,641 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22888.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:26,913 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:32,715 INFO [train.py:901] (3/4) Epoch 3, batch 6750, loss[loss=0.3272, simple_loss=0.3739, pruned_loss=0.1402, over 8619.00 frames. ], tot_loss[loss=0.3265, simple_loss=0.376, pruned_loss=0.1385, over 1606552.00 frames. ], batch size: 39, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:23:34,747 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.597e+02 4.402e+02 5.483e+02 1.400e+03, threshold=8.804e+02, percent-clipped=7.0 +2023-02-05 22:23:41,011 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8235, 5.8090, 4.7877, 1.6141, 5.0860, 5.3552, 5.1563, 4.7599], + device='cuda:3'), covar=tensor([0.0538, 0.0513, 0.0980, 0.5298, 0.0589, 0.0511, 0.1389, 0.0500], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0244, 0.0283, 0.0369, 0.0267, 0.0223, 0.0263, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 22:23:44,527 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22934.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:23:53,621 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22948.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:05,886 INFO [train.py:901] (3/4) Epoch 3, batch 6800, loss[loss=0.2653, simple_loss=0.3312, pruned_loss=0.09975, over 8031.00 frames. ], tot_loss[loss=0.3268, simple_loss=0.3764, pruned_loss=0.1386, over 1610385.94 frames. ], batch size: 22, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:05,921 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 22:24:08,778 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=22970.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:10,831 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22973.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:26,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=22995.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:30,241 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23001.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:24:33,156 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.20 vs. limit=5.0 +2023-02-05 22:24:41,423 INFO [train.py:901] (3/4) Epoch 3, batch 6850, loss[loss=0.3418, simple_loss=0.3694, pruned_loss=0.1571, over 7413.00 frames. ], tot_loss[loss=0.3256, simple_loss=0.3758, pruned_loss=0.1377, over 1612655.17 frames. ], batch size: 17, lr: 2.15e-02, grad_scale: 8.0 +2023-02-05 22:24:43,431 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.139e+02 3.425e+02 4.505e+02 5.413e+02 1.323e+03, threshold=9.011e+02, percent-clipped=6.0 +2023-02-05 22:24:54,851 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 22:25:15,250 INFO [train.py:901] (3/4) Epoch 3, batch 6900, loss[loss=0.342, simple_loss=0.3973, pruned_loss=0.1433, over 8501.00 frames. ], tot_loss[loss=0.3272, simple_loss=0.3767, pruned_loss=0.1388, over 1613106.42 frames. ], batch size: 26, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:23,733 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5512, 2.3037, 3.5696, 3.0970, 2.9244, 2.2078, 1.6524, 2.1187], + device='cuda:3'), covar=tensor([0.0895, 0.1243, 0.0248, 0.0421, 0.0547, 0.0573, 0.0658, 0.1094], + device='cuda:3'), in_proj_covar=tensor([0.0621, 0.0541, 0.0454, 0.0503, 0.0617, 0.0511, 0.0515, 0.0527], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:25:50,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23116.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:50,685 INFO [train.py:901] (3/4) Epoch 3, batch 6950, loss[loss=0.327, simple_loss=0.3761, pruned_loss=0.139, over 8346.00 frames. ], tot_loss[loss=0.3276, simple_loss=0.3774, pruned_loss=0.1389, over 1613123.16 frames. ], batch size: 24, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:25:51,625 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:25:52,723 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.525e+02 4.440e+02 6.025e+02 1.140e+03, threshold=8.880e+02, percent-clipped=3.0 +2023-02-05 22:25:57,668 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23126.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:02,129 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-05 22:26:09,874 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:18,630 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23156.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:25,707 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5409, 2.1097, 2.1297, 1.0675, 2.0761, 1.5968, 0.9587, 1.8596], + device='cuda:3'), covar=tensor([0.0150, 0.0077, 0.0070, 0.0134, 0.0102, 0.0186, 0.0178, 0.0075], + device='cuda:3'), in_proj_covar=tensor([0.0246, 0.0172, 0.0146, 0.0223, 0.0172, 0.0289, 0.0235, 0.0197], + device='cuda:3'), out_proj_covar=tensor([1.1141e-04, 7.6856e-05, 6.4808e-05, 9.7729e-05, 7.9628e-05, 1.3963e-04, + 1.0637e-04, 8.6732e-05], device='cuda:3') +2023-02-05 22:26:26,189 INFO [train.py:901] (3/4) Epoch 3, batch 7000, loss[loss=0.3439, simple_loss=0.3878, pruned_loss=0.15, over 8286.00 frames. ], tot_loss[loss=0.3263, simple_loss=0.3768, pruned_loss=0.1379, over 1618048.23 frames. ], batch size: 49, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:26:30,358 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4059, 1.8626, 3.3869, 2.6512, 2.5961, 1.8413, 1.3622, 1.1545], + device='cuda:3'), covar=tensor([0.1136, 0.1456, 0.0251, 0.0539, 0.0576, 0.0652, 0.0749, 0.1408], + device='cuda:3'), in_proj_covar=tensor([0.0627, 0.0540, 0.0460, 0.0509, 0.0618, 0.0508, 0.0518, 0.0528], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:26:39,926 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23187.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:26:49,017 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3901, 4.3162, 3.7907, 1.8798, 3.8204, 3.5648, 3.8822, 3.2926], + device='cuda:3'), covar=tensor([0.0724, 0.0596, 0.0833, 0.4473, 0.0666, 0.0863, 0.1370, 0.0722], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0245, 0.0281, 0.0365, 0.0268, 0.0223, 0.0271, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 22:27:01,200 INFO [train.py:901] (3/4) Epoch 3, batch 7050, loss[loss=0.4127, simple_loss=0.4329, pruned_loss=0.1962, over 8412.00 frames. ], tot_loss[loss=0.3277, simple_loss=0.378, pruned_loss=0.1387, over 1618011.57 frames. ], batch size: 48, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:03,862 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 3.682e+02 4.488e+02 5.424e+02 1.788e+03, threshold=8.977e+02, percent-clipped=6.0 +2023-02-05 22:27:14,111 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23235.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:22,746 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:36,330 INFO [train.py:901] (3/4) Epoch 3, batch 7100, loss[loss=0.3715, simple_loss=0.417, pruned_loss=0.163, over 8466.00 frames. ], tot_loss[loss=0.3267, simple_loss=0.377, pruned_loss=0.1382, over 1616566.53 frames. ], batch size: 25, lr: 2.14e-02, grad_scale: 8.0 +2023-02-05 22:27:39,097 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23271.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:27:59,147 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:28:08,841 INFO [train.py:901] (3/4) Epoch 3, batch 7150, loss[loss=0.3016, simple_loss=0.3537, pruned_loss=0.1247, over 7687.00 frames. ], tot_loss[loss=0.326, simple_loss=0.3762, pruned_loss=0.1379, over 1612940.15 frames. ], batch size: 18, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:10,863 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.845e+02 4.572e+02 5.960e+02 1.048e+03, threshold=9.143e+02, percent-clipped=2.0 +2023-02-05 22:28:42,780 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7886, 1.7294, 3.3051, 1.3816, 2.3259, 3.6709, 3.4558, 3.2261], + device='cuda:3'), covar=tensor([0.1181, 0.1356, 0.0417, 0.1926, 0.0709, 0.0241, 0.0428, 0.0602], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0262, 0.0207, 0.0263, 0.0205, 0.0182, 0.0184, 0.0254], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:28:43,301 INFO [train.py:901] (3/4) Epoch 3, batch 7200, loss[loss=0.3768, simple_loss=0.4199, pruned_loss=0.1668, over 8621.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3771, pruned_loss=0.1385, over 1615691.95 frames. ], batch size: 39, lr: 2.13e-02, grad_scale: 8.0 +2023-02-05 22:28:47,582 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23372.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:04,750 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23397.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:29:17,778 INFO [train.py:901] (3/4) Epoch 3, batch 7250, loss[loss=0.3567, simple_loss=0.3937, pruned_loss=0.1599, over 8182.00 frames. ], tot_loss[loss=0.3271, simple_loss=0.3765, pruned_loss=0.1389, over 1611090.33 frames. ], batch size: 23, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:20,307 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.518e+02 3.505e+02 4.323e+02 5.847e+02 9.851e+02, threshold=8.646e+02, percent-clipped=2.0 +2023-02-05 22:29:52,881 INFO [train.py:901] (3/4) Epoch 3, batch 7300, loss[loss=0.2713, simple_loss=0.3304, pruned_loss=0.1061, over 7711.00 frames. ], tot_loss[loss=0.3269, simple_loss=0.3766, pruned_loss=0.1385, over 1612736.36 frames. ], batch size: 18, lr: 2.13e-02, grad_scale: 4.0 +2023-02-05 22:29:55,076 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23470.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:21,341 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=23506.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:28,665 INFO [train.py:901] (3/4) Epoch 3, batch 7350, loss[loss=0.2854, simple_loss=0.3407, pruned_loss=0.115, over 7937.00 frames. ], tot_loss[loss=0.3243, simple_loss=0.3739, pruned_loss=0.1374, over 1601610.26 frames. ], batch size: 20, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:30:31,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.204e+02 3.295e+02 4.174e+02 5.897e+02 1.266e+03, threshold=8.348e+02, percent-clipped=6.0 +2023-02-05 22:30:35,724 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23527.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:45,653 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-05 22:30:52,328 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23552.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:30:56,357 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23558.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:03,074 INFO [train.py:901] (3/4) Epoch 3, batch 7400, loss[loss=0.306, simple_loss=0.3614, pruned_loss=0.1253, over 7819.00 frames. ], tot_loss[loss=0.3253, simple_loss=0.3753, pruned_loss=0.1376, over 1607094.73 frames. ], batch size: 20, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:05,761 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-05 22:31:11,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:14,787 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23583.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:16,181 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23585.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:20,317 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:31:38,774 INFO [train.py:901] (3/4) Epoch 3, batch 7450, loss[loss=0.3666, simple_loss=0.4082, pruned_loss=0.1625, over 8461.00 frames. ], tot_loss[loss=0.3279, simple_loss=0.3773, pruned_loss=0.1392, over 1606658.81 frames. ], batch size: 27, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:31:41,484 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.560e+02 4.542e+02 5.434e+02 8.209e+02, threshold=9.083e+02, percent-clipped=0.0 +2023-02-05 22:31:44,194 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-05 22:32:11,864 INFO [train.py:901] (3/4) Epoch 3, batch 7500, loss[loss=0.2788, simple_loss=0.3368, pruned_loss=0.1104, over 8032.00 frames. ], tot_loss[loss=0.3265, simple_loss=0.3762, pruned_loss=0.1385, over 1608500.87 frames. ], batch size: 22, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:23,285 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0186, 1.0813, 1.0904, 1.0381, 0.7519, 1.1987, 0.0364, 1.0288], + device='cuda:3'), covar=tensor([0.3028, 0.2022, 0.1143, 0.1758, 0.5069, 0.0961, 0.4986, 0.1729], + device='cuda:3'), in_proj_covar=tensor([0.0122, 0.0115, 0.0084, 0.0159, 0.0187, 0.0082, 0.0147, 0.0119], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:32:31,249 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23694.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:39,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:32:47,010 INFO [train.py:901] (3/4) Epoch 3, batch 7550, loss[loss=0.3611, simple_loss=0.4091, pruned_loss=0.1565, over 8197.00 frames. ], tot_loss[loss=0.3263, simple_loss=0.376, pruned_loss=0.1383, over 1610537.82 frames. ], batch size: 23, lr: 2.12e-02, grad_scale: 4.0 +2023-02-05 22:32:49,788 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.055e+02 3.573e+02 4.120e+02 5.568e+02 9.909e+02, threshold=8.240e+02, percent-clipped=1.0 +2023-02-05 22:33:21,017 INFO [train.py:901] (3/4) Epoch 3, batch 7600, loss[loss=0.3534, simple_loss=0.4114, pruned_loss=0.1477, over 8470.00 frames. ], tot_loss[loss=0.3262, simple_loss=0.3764, pruned_loss=0.138, over 1615431.13 frames. ], batch size: 29, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:55,888 INFO [train.py:901] (3/4) Epoch 3, batch 7650, loss[loss=0.349, simple_loss=0.378, pruned_loss=0.16, over 7442.00 frames. ], tot_loss[loss=0.3257, simple_loss=0.3756, pruned_loss=0.1379, over 1610140.66 frames. ], batch size: 17, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:33:58,392 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.349e+02 3.333e+02 4.379e+02 5.791e+02 1.321e+03, threshold=8.759e+02, percent-clipped=7.0 +2023-02-05 22:34:12,405 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-05 22:34:13,551 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23841.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:19,394 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=23850.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:29,028 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-05 22:34:30,200 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:34:30,653 INFO [train.py:901] (3/4) Epoch 3, batch 7700, loss[loss=0.3209, simple_loss=0.3763, pruned_loss=0.1327, over 8501.00 frames. ], tot_loss[loss=0.3256, simple_loss=0.3755, pruned_loss=0.1379, over 1613380.31 frames. ], batch size: 28, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:34:50,583 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-05 22:34:50,862 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-05 22:35:04,780 INFO [train.py:901] (3/4) Epoch 3, batch 7750, loss[loss=0.285, simple_loss=0.3591, pruned_loss=0.1054, over 8352.00 frames. ], tot_loss[loss=0.3248, simple_loss=0.3755, pruned_loss=0.1371, over 1614009.70 frames. ], batch size: 24, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:08,100 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 3.458e+02 4.167e+02 5.729e+02 1.393e+03, threshold=8.335e+02, percent-clipped=8.0 +2023-02-05 22:35:27,673 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23950.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:37,130 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=23962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:39,048 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=23965.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:40,188 INFO [train.py:901] (3/4) Epoch 3, batch 7800, loss[loss=0.369, simple_loss=0.4142, pruned_loss=0.1619, over 8342.00 frames. ], tot_loss[loss=0.3237, simple_loss=0.3746, pruned_loss=0.1364, over 1613506.93 frames. ], batch size: 26, lr: 2.11e-02, grad_scale: 8.0 +2023-02-05 22:35:45,565 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23975.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:35:53,489 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=23987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:14,023 INFO [train.py:901] (3/4) Epoch 3, batch 7850, loss[loss=0.2928, simple_loss=0.3573, pruned_loss=0.1141, over 8535.00 frames. ], tot_loss[loss=0.3253, simple_loss=0.3754, pruned_loss=0.1376, over 1610950.92 frames. ], batch size: 28, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:36:16,553 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.230e+02 3.608e+02 4.565e+02 5.801e+02 1.089e+03, threshold=9.129e+02, percent-clipped=5.0 +2023-02-05 22:36:19,400 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9603, 1.2532, 5.9020, 2.3110, 5.2909, 5.0669, 5.6533, 5.4995], + device='cuda:3'), covar=tensor([0.0241, 0.3166, 0.0159, 0.1496, 0.0668, 0.0300, 0.0216, 0.0264], + device='cuda:3'), in_proj_covar=tensor([0.0261, 0.0432, 0.0320, 0.0343, 0.0410, 0.0338, 0.0317, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-05 22:36:19,486 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2719, 2.4627, 3.1882, 0.5863, 2.8661, 1.8746, 1.4867, 1.8662], + device='cuda:3'), covar=tensor([0.0185, 0.0082, 0.0071, 0.0254, 0.0157, 0.0258, 0.0251, 0.0125], + device='cuda:3'), in_proj_covar=tensor([0.0252, 0.0176, 0.0141, 0.0227, 0.0170, 0.0308, 0.0241, 0.0202], + device='cuda:3'), out_proj_covar=tensor([1.1150e-04, 7.6177e-05, 6.0818e-05, 9.7618e-05, 7.6524e-05, 1.4657e-04, + 1.0786e-04, 8.6886e-05], device='cuda:3') +2023-02-05 22:36:39,254 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24055.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:36:47,367 INFO [train.py:901] (3/4) Epoch 3, batch 7900, loss[loss=0.3092, simple_loss=0.358, pruned_loss=0.1302, over 8109.00 frames. ], tot_loss[loss=0.3227, simple_loss=0.3733, pruned_loss=0.136, over 1608664.80 frames. ], batch size: 23, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:20,412 INFO [train.py:901] (3/4) Epoch 3, batch 7950, loss[loss=0.2811, simple_loss=0.3376, pruned_loss=0.1123, over 7557.00 frames. ], tot_loss[loss=0.3237, simple_loss=0.3742, pruned_loss=0.1366, over 1612323.13 frames. ], batch size: 18, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:23,172 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.080e+02 3.295e+02 4.369e+02 5.897e+02 2.335e+03, threshold=8.738e+02, percent-clipped=5.0 +2023-02-05 22:37:30,776 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8270, 1.1859, 5.8346, 2.2445, 5.1558, 4.9656, 5.4621, 5.3325], + device='cuda:3'), covar=tensor([0.0245, 0.3479, 0.0199, 0.1573, 0.0772, 0.0396, 0.0275, 0.0302], + device='cuda:3'), in_proj_covar=tensor([0.0273, 0.0443, 0.0332, 0.0357, 0.0429, 0.0351, 0.0337, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 22:37:49,164 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9228, 1.5843, 2.4193, 2.0774, 2.1054, 1.6830, 1.3808, 0.6766], + device='cuda:3'), covar=tensor([0.1004, 0.1108, 0.0283, 0.0458, 0.0460, 0.0595, 0.0668, 0.1055], + device='cuda:3'), in_proj_covar=tensor([0.0639, 0.0556, 0.0473, 0.0522, 0.0633, 0.0514, 0.0528, 0.0538], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:37:54,056 INFO [train.py:901] (3/4) Epoch 3, batch 8000, loss[loss=0.3002, simple_loss=0.3478, pruned_loss=0.1263, over 7287.00 frames. ], tot_loss[loss=0.3235, simple_loss=0.3736, pruned_loss=0.1367, over 1611327.77 frames. ], batch size: 16, lr: 2.10e-02, grad_scale: 8.0 +2023-02-05 22:37:58,422 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2290, 1.6788, 1.3169, 1.6620, 1.4733, 1.1782, 1.2633, 1.5288], + device='cuda:3'), covar=tensor([0.0801, 0.0396, 0.0849, 0.0480, 0.0565, 0.0961, 0.0690, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0239, 0.0324, 0.0307, 0.0332, 0.0316, 0.0339, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 22:38:27,963 INFO [train.py:901] (3/4) Epoch 3, batch 8050, loss[loss=0.3847, simple_loss=0.4069, pruned_loss=0.1813, over 7230.00 frames. ], tot_loss[loss=0.3231, simple_loss=0.3723, pruned_loss=0.137, over 1586903.60 frames. ], batch size: 71, lr: 2.09e-02, grad_scale: 8.0 +2023-02-05 22:38:30,755 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.328e+02 4.149e+02 5.404e+02 3.135e+03, threshold=8.298e+02, percent-clipped=6.0 +2023-02-05 22:38:31,005 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24221.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:38:48,122 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24246.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:39:03,930 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-05 22:39:07,722 INFO [train.py:901] (3/4) Epoch 4, batch 0, loss[loss=0.3448, simple_loss=0.3909, pruned_loss=0.1493, over 8583.00 frames. ], tot_loss[loss=0.3448, simple_loss=0.3909, pruned_loss=0.1493, over 8583.00 frames. ], batch size: 31, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:39:07,722 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 22:39:18,718 INFO [train.py:935] (3/4) Epoch 4, validation: loss=0.2476, simple_loss=0.3384, pruned_loss=0.07836, over 944034.00 frames. +2023-02-05 22:39:18,719 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 22:39:34,156 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-05 22:39:52,980 INFO [train.py:901] (3/4) Epoch 4, batch 50, loss[loss=0.2553, simple_loss=0.3309, pruned_loss=0.08989, over 8353.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3674, pruned_loss=0.1322, over 362852.21 frames. ], batch size: 24, lr: 1.96e-02, grad_scale: 8.0 +2023-02-05 22:40:07,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.017e+02 3.527e+02 4.250e+02 5.116e+02 9.987e+02, threshold=8.500e+02, percent-clipped=2.0 +2023-02-05 22:40:09,008 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-05 22:40:27,955 INFO [train.py:901] (3/4) Epoch 4, batch 100, loss[loss=0.4026, simple_loss=0.4372, pruned_loss=0.1839, over 8593.00 frames. ], tot_loss[loss=0.3284, simple_loss=0.3791, pruned_loss=0.1389, over 645794.14 frames. ], batch size: 49, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:40:31,333 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-05 22:41:01,467 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24399.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:41:02,085 INFO [train.py:901] (3/4) Epoch 4, batch 150, loss[loss=0.3021, simple_loss=0.3657, pruned_loss=0.1192, over 8281.00 frames. ], tot_loss[loss=0.3258, simple_loss=0.3771, pruned_loss=0.1373, over 864773.56 frames. ], batch size: 23, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:41:17,154 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 3.490e+02 4.203e+02 5.614e+02 1.653e+03, threshold=8.406e+02, percent-clipped=4.0 +2023-02-05 22:41:37,211 INFO [train.py:901] (3/4) Epoch 4, batch 200, loss[loss=0.3334, simple_loss=0.3873, pruned_loss=0.1398, over 8327.00 frames. ], tot_loss[loss=0.3225, simple_loss=0.3751, pruned_loss=0.135, over 1031861.36 frames. ], batch size: 25, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:11,040 INFO [train.py:901] (3/4) Epoch 4, batch 250, loss[loss=0.2799, simple_loss=0.3251, pruned_loss=0.1173, over 7690.00 frames. ], tot_loss[loss=0.3192, simple_loss=0.373, pruned_loss=0.1326, over 1164788.93 frames. ], batch size: 18, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:20,373 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=24514.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:42:23,570 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-05 22:42:24,844 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.156e+02 3.531e+02 4.434e+02 5.277e+02 1.190e+03, threshold=8.868e+02, percent-clipped=4.0 +2023-02-05 22:42:31,605 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-05 22:42:41,352 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7480, 1.6902, 1.4453, 1.2991, 1.6782, 1.4827, 1.5775, 2.1256], + device='cuda:3'), covar=tensor([0.0545, 0.1258, 0.1951, 0.1485, 0.0684, 0.1605, 0.0927, 0.0542], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0209, 0.0252, 0.0207, 0.0168, 0.0211, 0.0176, 0.0174], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0006, 0.0007, 0.0006, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 22:42:46,004 INFO [train.py:901] (3/4) Epoch 4, batch 300, loss[loss=0.2569, simple_loss=0.3097, pruned_loss=0.1021, over 7418.00 frames. ], tot_loss[loss=0.3245, simple_loss=0.3761, pruned_loss=0.1364, over 1264181.19 frames. ], batch size: 17, lr: 1.95e-02, grad_scale: 8.0 +2023-02-05 22:42:57,005 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24565.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:12,332 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24587.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:43:16,435 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3750, 1.8232, 1.7532, 0.4365, 1.6558, 1.2676, 0.3058, 1.5694], + device='cuda:3'), covar=tensor([0.0114, 0.0060, 0.0049, 0.0142, 0.0082, 0.0221, 0.0201, 0.0062], + device='cuda:3'), in_proj_covar=tensor([0.0256, 0.0175, 0.0143, 0.0225, 0.0170, 0.0302, 0.0239, 0.0206], + device='cuda:3'), out_proj_covar=tensor([1.1207e-04, 7.5289e-05, 6.1270e-05, 9.5059e-05, 7.5759e-05, 1.4152e-04, + 1.0535e-04, 8.8042e-05], device='cuda:3') +2023-02-05 22:43:21,554 INFO [train.py:901] (3/4) Epoch 4, batch 350, loss[loss=0.281, simple_loss=0.3394, pruned_loss=0.1113, over 7647.00 frames. ], tot_loss[loss=0.3231, simple_loss=0.3746, pruned_loss=0.1358, over 1341676.99 frames. ], batch size: 19, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:43:31,764 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9006, 2.1164, 2.8044, 1.0951, 2.3486, 1.8665, 1.5686, 1.9396], + device='cuda:3'), covar=tensor([0.0203, 0.0083, 0.0058, 0.0180, 0.0149, 0.0178, 0.0200, 0.0111], + device='cuda:3'), in_proj_covar=tensor([0.0259, 0.0178, 0.0146, 0.0229, 0.0172, 0.0306, 0.0242, 0.0210], + device='cuda:3'), out_proj_covar=tensor([1.1364e-04, 7.6424e-05, 6.2785e-05, 9.6814e-05, 7.6290e-05, 1.4319e-04, + 1.0660e-04, 8.9545e-05], device='cuda:3') +2023-02-05 22:43:34,099 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-05 22:43:35,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 3.300e+02 4.421e+02 5.071e+02 1.044e+03, threshold=8.841e+02, percent-clipped=4.0 +2023-02-05 22:43:52,417 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-05 22:43:56,478 INFO [train.py:901] (3/4) Epoch 4, batch 400, loss[loss=0.2999, simple_loss=0.3443, pruned_loss=0.1278, over 7176.00 frames. ], tot_loss[loss=0.3212, simple_loss=0.373, pruned_loss=0.1347, over 1403384.03 frames. ], batch size: 16, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:04,954 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-05 22:44:30,019 INFO [train.py:901] (3/4) Epoch 4, batch 450, loss[loss=0.311, simple_loss=0.3449, pruned_loss=0.1385, over 7427.00 frames. ], tot_loss[loss=0.3225, simple_loss=0.3742, pruned_loss=0.1354, over 1453311.33 frames. ], batch size: 17, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:44:44,808 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 3.414e+02 4.548e+02 5.600e+02 1.007e+03, threshold=9.096e+02, percent-clipped=5.0 +2023-02-05 22:45:04,965 INFO [train.py:901] (3/4) Epoch 4, batch 500, loss[loss=0.3341, simple_loss=0.3846, pruned_loss=0.1418, over 8766.00 frames. ], tot_loss[loss=0.3209, simple_loss=0.373, pruned_loss=0.1344, over 1487533.05 frames. ], batch size: 34, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:19,921 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=24770.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:28,220 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24783.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:36,835 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=24795.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:45:40,054 INFO [train.py:901] (3/4) Epoch 4, batch 550, loss[loss=0.2865, simple_loss=0.3264, pruned_loss=0.1233, over 7414.00 frames. ], tot_loss[loss=0.319, simple_loss=0.3714, pruned_loss=0.1333, over 1516758.02 frames. ], batch size: 17, lr: 1.94e-02, grad_scale: 8.0 +2023-02-05 22:45:53,858 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 3.369e+02 4.426e+02 5.591e+02 8.767e+02, threshold=8.852e+02, percent-clipped=0.0 +2023-02-05 22:46:13,959 INFO [train.py:901] (3/4) Epoch 4, batch 600, loss[loss=0.2986, simple_loss=0.3552, pruned_loss=0.121, over 8317.00 frames. ], tot_loss[loss=0.3197, simple_loss=0.3726, pruned_loss=0.1334, over 1543921.23 frames. ], batch size: 49, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:24,949 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=24866.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:46:28,942 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-05 22:46:49,154 INFO [train.py:901] (3/4) Epoch 4, batch 650, loss[loss=0.3868, simple_loss=0.4147, pruned_loss=0.1795, over 7812.00 frames. ], tot_loss[loss=0.3199, simple_loss=0.3727, pruned_loss=0.1335, over 1560104.89 frames. ], batch size: 20, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:46:49,645 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-05 22:46:55,189 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24909.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:03,760 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.198e+02 3.310e+02 4.230e+02 5.108e+02 1.167e+03, threshold=8.459e+02, percent-clipped=4.0 +2023-02-05 22:47:10,606 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=24931.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:47:19,517 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6361, 1.3307, 3.3441, 1.4577, 2.3968, 3.8816, 3.6780, 3.3424], + device='cuda:3'), covar=tensor([0.1358, 0.1713, 0.0303, 0.1835, 0.0711, 0.0204, 0.0283, 0.0558], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0262, 0.0207, 0.0264, 0.0212, 0.0187, 0.0187, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 22:47:24,028 INFO [train.py:901] (3/4) Epoch 4, batch 700, loss[loss=0.3196, simple_loss=0.3728, pruned_loss=0.1332, over 8458.00 frames. ], tot_loss[loss=0.3193, simple_loss=0.3719, pruned_loss=0.1334, over 1570370.01 frames. ], batch size: 25, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:47:59,150 INFO [train.py:901] (3/4) Epoch 4, batch 750, loss[loss=0.3071, simple_loss=0.3667, pruned_loss=0.1237, over 8025.00 frames. ], tot_loss[loss=0.3187, simple_loss=0.3714, pruned_loss=0.133, over 1575538.14 frames. ], batch size: 22, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:08,260 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25013.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:13,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.175e+02 4.108e+02 5.247e+02 1.235e+03, threshold=8.217e+02, percent-clipped=4.0 +2023-02-05 22:48:14,035 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-05 22:48:15,511 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:22,610 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-05 22:48:30,680 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25046.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:48:33,230 INFO [train.py:901] (3/4) Epoch 4, batch 800, loss[loss=0.2763, simple_loss=0.333, pruned_loss=0.1098, over 7779.00 frames. ], tot_loss[loss=0.3186, simple_loss=0.371, pruned_loss=0.1331, over 1583415.68 frames. ], batch size: 19, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:48:53,299 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3064, 1.9539, 2.1204, 1.3771, 2.1153, 1.4397, 0.5762, 1.7995], + device='cuda:3'), covar=tensor([0.0170, 0.0076, 0.0081, 0.0122, 0.0117, 0.0258, 0.0227, 0.0088], + device='cuda:3'), in_proj_covar=tensor([0.0255, 0.0174, 0.0142, 0.0219, 0.0165, 0.0297, 0.0239, 0.0209], + device='cuda:3'), out_proj_covar=tensor([1.1054e-04, 7.4326e-05, 5.9841e-05, 9.1637e-05, 7.2263e-05, 1.3834e-04, + 1.0356e-04, 8.7703e-05], device='cuda:3') +2023-02-05 22:49:06,963 INFO [train.py:901] (3/4) Epoch 4, batch 850, loss[loss=0.3186, simple_loss=0.364, pruned_loss=0.1366, over 7700.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3697, pruned_loss=0.1329, over 1589333.45 frames. ], batch size: 18, lr: 1.93e-02, grad_scale: 8.0 +2023-02-05 22:49:22,446 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 3.301e+02 4.277e+02 5.478e+02 1.022e+03, threshold=8.554e+02, percent-clipped=4.0 +2023-02-05 22:49:26,602 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25127.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:49:42,456 INFO [train.py:901] (3/4) Epoch 4, batch 900, loss[loss=0.3286, simple_loss=0.3855, pruned_loss=0.1359, over 8553.00 frames. ], tot_loss[loss=0.3178, simple_loss=0.3696, pruned_loss=0.133, over 1597436.29 frames. ], batch size: 39, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:02,302 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5466, 1.1187, 4.5784, 1.8121, 3.8960, 3.7319, 4.0987, 3.9895], + device='cuda:3'), covar=tensor([0.0268, 0.3978, 0.0322, 0.2036, 0.1015, 0.0607, 0.0349, 0.0483], + device='cuda:3'), in_proj_covar=tensor([0.0273, 0.0445, 0.0342, 0.0358, 0.0418, 0.0359, 0.0328, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 22:50:16,545 INFO [train.py:901] (3/4) Epoch 4, batch 950, loss[loss=0.3447, simple_loss=0.3824, pruned_loss=0.1536, over 8142.00 frames. ], tot_loss[loss=0.3177, simple_loss=0.3694, pruned_loss=0.133, over 1597551.22 frames. ], batch size: 22, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:50:23,514 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25210.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:25,724 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3651, 2.3466, 3.0324, 0.5445, 2.8235, 1.7337, 1.2990, 1.4533], + device='cuda:3'), covar=tensor([0.0287, 0.0093, 0.0074, 0.0249, 0.0152, 0.0276, 0.0313, 0.0173], + device='cuda:3'), in_proj_covar=tensor([0.0258, 0.0179, 0.0144, 0.0223, 0.0169, 0.0299, 0.0243, 0.0211], + device='cuda:3'), out_proj_covar=tensor([1.1169e-04, 7.6338e-05, 6.0479e-05, 9.3008e-05, 7.3783e-05, 1.3864e-04, + 1.0534e-04, 8.8758e-05], device='cuda:3') +2023-02-05 22:50:30,882 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.215e+02 3.501e+02 4.488e+02 5.717e+02 1.063e+03, threshold=8.976e+02, percent-clipped=5.0 +2023-02-05 22:50:40,883 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-05 22:50:46,577 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25242.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:50:51,551 INFO [train.py:901] (3/4) Epoch 4, batch 1000, loss[loss=0.34, simple_loss=0.3905, pruned_loss=0.1448, over 8110.00 frames. ], tot_loss[loss=0.318, simple_loss=0.3701, pruned_loss=0.133, over 1602696.60 frames. ], batch size: 23, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:12,233 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25280.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:13,330 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-05 22:51:21,728 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-05 22:51:25,838 INFO [train.py:901] (3/4) Epoch 4, batch 1050, loss[loss=0.33, simple_loss=0.3907, pruned_loss=0.1347, over 8251.00 frames. ], tot_loss[loss=0.3167, simple_loss=0.3693, pruned_loss=0.132, over 1607056.24 frames. ], batch size: 24, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:51:26,412 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-05 22:51:27,146 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25302.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:28,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:39,506 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 3.519e+02 4.399e+02 5.664e+02 1.146e+03, threshold=8.797e+02, percent-clipped=2.0 +2023-02-05 22:51:42,395 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25325.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:43,779 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25327.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:51:58,865 INFO [train.py:901] (3/4) Epoch 4, batch 1100, loss[loss=0.3549, simple_loss=0.3762, pruned_loss=0.1668, over 7710.00 frames. ], tot_loss[loss=0.3159, simple_loss=0.3684, pruned_loss=0.1317, over 1605136.73 frames. ], batch size: 18, lr: 1.92e-02, grad_scale: 8.0 +2023-02-05 22:52:03,946 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7897, 3.1328, 3.0308, 4.0621, 1.9240, 1.7543, 2.6248, 3.3313], + device='cuda:3'), covar=tensor([0.0840, 0.1423, 0.1268, 0.0233, 0.1772, 0.2458, 0.1605, 0.1132], + device='cuda:3'), in_proj_covar=tensor([0.0288, 0.0300, 0.0307, 0.0220, 0.0278, 0.0311, 0.0315, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 22:52:05,139 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25357.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:52:34,567 INFO [train.py:901] (3/4) Epoch 4, batch 1150, loss[loss=0.3353, simple_loss=0.3864, pruned_loss=0.1421, over 8495.00 frames. ], tot_loss[loss=0.3145, simple_loss=0.3675, pruned_loss=0.1307, over 1604546.09 frames. ], batch size: 28, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:52:37,395 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-05 22:52:40,117 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5861, 1.7218, 2.0987, 1.7816, 1.2243, 1.9960, 0.3788, 1.0379], + device='cuda:3'), covar=tensor([0.3607, 0.2366, 0.1041, 0.2514, 0.5734, 0.1481, 0.6260, 0.2444], + device='cuda:3'), in_proj_covar=tensor([0.0116, 0.0112, 0.0082, 0.0158, 0.0190, 0.0081, 0.0154, 0.0115], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:52:49,211 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.195e+02 3.278e+02 3.972e+02 4.649e+02 8.065e+02, threshold=7.944e+02, percent-clipped=0.0 +2023-02-05 22:52:55,656 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-05 22:53:06,093 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25446.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:53:08,506 INFO [train.py:901] (3/4) Epoch 4, batch 1200, loss[loss=0.2614, simple_loss=0.3186, pruned_loss=0.1021, over 7519.00 frames. ], tot_loss[loss=0.3144, simple_loss=0.3677, pruned_loss=0.1306, over 1609749.46 frames. ], batch size: 18, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:23,234 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25472.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:26,924 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-05 22:53:42,015 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25498.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:53:43,173 INFO [train.py:901] (3/4) Epoch 4, batch 1250, loss[loss=0.3168, simple_loss=0.3747, pruned_loss=0.1294, over 8467.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3671, pruned_loss=0.1301, over 1614121.46 frames. ], batch size: 25, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:53:57,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 3.538e+02 4.328e+02 6.105e+02 1.271e+03, threshold=8.657e+02, percent-clipped=4.0 +2023-02-05 22:53:59,265 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25523.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:18,017 INFO [train.py:901] (3/4) Epoch 4, batch 1300, loss[loss=0.32, simple_loss=0.3924, pruned_loss=0.1238, over 8104.00 frames. ], tot_loss[loss=0.3138, simple_loss=0.3678, pruned_loss=0.1299, over 1617575.62 frames. ], batch size: 23, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:39,396 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25581.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:54:40,062 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5131, 2.6148, 2.8182, 2.0842, 1.6372, 2.5752, 0.8706, 2.0791], + device='cuda:3'), covar=tensor([0.3150, 0.1368, 0.0835, 0.2238, 0.4711, 0.0806, 0.5690, 0.1971], + device='cuda:3'), in_proj_covar=tensor([0.0119, 0.0110, 0.0086, 0.0159, 0.0190, 0.0082, 0.0152, 0.0118], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:54:53,161 INFO [train.py:901] (3/4) Epoch 4, batch 1350, loss[loss=0.2828, simple_loss=0.352, pruned_loss=0.1068, over 8244.00 frames. ], tot_loss[loss=0.3147, simple_loss=0.3688, pruned_loss=0.1303, over 1618613.05 frames. ], batch size: 24, lr: 1.91e-02, grad_scale: 16.0 +2023-02-05 22:54:57,489 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25606.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:55:08,861 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.283e+02 4.098e+02 5.393e+02 1.175e+03, threshold=8.196e+02, percent-clipped=3.0 +2023-02-05 22:55:28,874 INFO [train.py:901] (3/4) Epoch 4, batch 1400, loss[loss=0.3293, simple_loss=0.3828, pruned_loss=0.1379, over 8028.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3701, pruned_loss=0.1318, over 1617913.14 frames. ], batch size: 22, lr: 1.91e-02, grad_scale: 8.0 +2023-02-05 22:56:03,158 INFO [train.py:901] (3/4) Epoch 4, batch 1450, loss[loss=0.327, simple_loss=0.385, pruned_loss=0.1345, over 8528.00 frames. ], tot_loss[loss=0.3169, simple_loss=0.3706, pruned_loss=0.1316, over 1620781.71 frames. ], batch size: 28, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:05,848 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-05 22:56:14,426 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1827, 1.1493, 2.3185, 1.1609, 1.9388, 2.4694, 2.4259, 2.1364], + device='cuda:3'), covar=tensor([0.1007, 0.1149, 0.0443, 0.1670, 0.0527, 0.0349, 0.0417, 0.0708], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0259, 0.0204, 0.0265, 0.0211, 0.0187, 0.0186, 0.0257], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 22:56:18,904 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.243e+02 3.964e+02 4.847e+02 1.034e+03, threshold=7.929e+02, percent-clipped=2.0 +2023-02-05 22:56:23,194 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=25728.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:56:38,603 INFO [train.py:901] (3/4) Epoch 4, batch 1500, loss[loss=0.3592, simple_loss=0.418, pruned_loss=0.1503, over 8281.00 frames. ], tot_loss[loss=0.3181, simple_loss=0.3705, pruned_loss=0.1329, over 1616484.27 frames. ], batch size: 23, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:56:40,769 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=25753.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 22:57:00,766 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8120, 4.0117, 2.3758, 2.0873, 2.7663, 1.9264, 2.1909, 3.0350], + device='cuda:3'), covar=tensor([0.1400, 0.0237, 0.0712, 0.0738, 0.0610, 0.0949, 0.1069, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0239, 0.0325, 0.0301, 0.0337, 0.0318, 0.0346, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 22:57:06,013 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=25790.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 22:57:12,582 INFO [train.py:901] (3/4) Epoch 4, batch 1550, loss[loss=0.2929, simple_loss=0.3563, pruned_loss=0.1148, over 8685.00 frames. ], tot_loss[loss=0.3166, simple_loss=0.37, pruned_loss=0.1316, over 1616502.39 frames. ], batch size: 39, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:57:20,259 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.62 vs. limit=5.0 +2023-02-05 22:57:23,289 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3272, 2.4258, 1.4374, 1.9210, 1.9396, 1.3871, 1.6687, 2.0340], + device='cuda:3'), covar=tensor([0.1059, 0.0349, 0.0919, 0.0474, 0.0550, 0.1063, 0.0814, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0240, 0.0324, 0.0304, 0.0336, 0.0317, 0.0345, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 22:57:27,012 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 3.100e+02 3.836e+02 5.066e+02 1.009e+03, threshold=7.672e+02, percent-clipped=5.0 +2023-02-05 22:57:46,740 INFO [train.py:901] (3/4) Epoch 4, batch 1600, loss[loss=0.3916, simple_loss=0.4395, pruned_loss=0.1718, over 8467.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.3682, pruned_loss=0.1301, over 1614266.24 frames. ], batch size: 48, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:03,561 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4074, 1.7014, 1.8550, 1.2631, 1.1402, 1.7672, 0.1675, 1.0354], + device='cuda:3'), covar=tensor([0.2684, 0.1861, 0.1415, 0.2473, 0.6098, 0.1132, 0.5803, 0.2468], + device='cuda:3'), in_proj_covar=tensor([0.0120, 0.0109, 0.0087, 0.0161, 0.0192, 0.0086, 0.0155, 0.0118], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 22:58:04,758 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=25876.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:15,939 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6548, 2.4816, 3.1835, 0.9434, 2.9337, 1.8395, 1.3406, 1.4529], + device='cuda:3'), covar=tensor([0.0256, 0.0111, 0.0075, 0.0206, 0.0115, 0.0265, 0.0269, 0.0157], + device='cuda:3'), in_proj_covar=tensor([0.0258, 0.0184, 0.0146, 0.0226, 0.0169, 0.0305, 0.0248, 0.0208], + device='cuda:3'), out_proj_covar=tensor([1.0990e-04, 7.7610e-05, 6.0591e-05, 9.2636e-05, 7.2225e-05, 1.3973e-04, + 1.0688e-04, 8.6092e-05], device='cuda:3') +2023-02-05 22:58:21,025 INFO [train.py:901] (3/4) Epoch 4, batch 1650, loss[loss=0.3035, simple_loss=0.3731, pruned_loss=0.117, over 8296.00 frames. ], tot_loss[loss=0.3157, simple_loss=0.3695, pruned_loss=0.131, over 1613750.25 frames. ], batch size: 23, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:58:24,591 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=25905.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 22:58:35,953 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.227e+02 3.823e+02 4.768e+02 5.766e+02 1.707e+03, threshold=9.535e+02, percent-clipped=9.0 +2023-02-05 22:58:36,374 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.28 vs. limit=5.0 +2023-02-05 22:58:56,117 INFO [train.py:901] (3/4) Epoch 4, batch 1700, loss[loss=0.2806, simple_loss=0.3404, pruned_loss=0.1104, over 8253.00 frames. ], tot_loss[loss=0.3147, simple_loss=0.369, pruned_loss=0.1302, over 1615917.76 frames. ], batch size: 22, lr: 1.90e-02, grad_scale: 8.0 +2023-02-05 22:59:28,692 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7636, 2.5178, 3.0825, 0.9869, 2.8377, 1.9021, 1.3425, 1.3695], + device='cuda:3'), covar=tensor([0.0278, 0.0106, 0.0072, 0.0201, 0.0103, 0.0215, 0.0305, 0.0162], + device='cuda:3'), in_proj_covar=tensor([0.0265, 0.0187, 0.0149, 0.0231, 0.0175, 0.0308, 0.0253, 0.0215], + device='cuda:3'), out_proj_covar=tensor([1.1280e-04, 7.8443e-05, 6.1954e-05, 9.5115e-05, 7.5216e-05, 1.4033e-04, + 1.0863e-04, 8.9326e-05], device='cuda:3') +2023-02-05 22:59:31,183 INFO [train.py:901] (3/4) Epoch 4, batch 1750, loss[loss=0.2778, simple_loss=0.3528, pruned_loss=0.1014, over 8527.00 frames. ], tot_loss[loss=0.3171, simple_loss=0.3707, pruned_loss=0.1318, over 1614649.63 frames. ], batch size: 31, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 22:59:46,995 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.187e+02 3.816e+02 4.801e+02 8.317e+02, threshold=7.632e+02, percent-clipped=0.0 +2023-02-05 23:00:06,090 INFO [train.py:901] (3/4) Epoch 4, batch 1800, loss[loss=0.2984, simple_loss=0.358, pruned_loss=0.1194, over 8030.00 frames. ], tot_loss[loss=0.3158, simple_loss=0.3694, pruned_loss=0.1311, over 1612664.70 frames. ], batch size: 22, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:40,846 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3978, 1.9215, 3.2873, 2.5949, 2.3664, 1.9528, 1.3626, 1.2037], + device='cuda:3'), covar=tensor([0.1190, 0.1474, 0.0300, 0.0655, 0.0736, 0.0705, 0.0812, 0.1504], + device='cuda:3'), in_proj_covar=tensor([0.0662, 0.0582, 0.0488, 0.0546, 0.0658, 0.0539, 0.0543, 0.0550], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 23:00:41,280 INFO [train.py:901] (3/4) Epoch 4, batch 1850, loss[loss=0.2828, simple_loss=0.343, pruned_loss=0.1113, over 7815.00 frames. ], tot_loss[loss=0.3142, simple_loss=0.3679, pruned_loss=0.1303, over 1610111.31 frames. ], batch size: 20, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:00:55,437 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26120.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:00:56,608 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.260e+02 3.379e+02 4.261e+02 5.084e+02 1.608e+03, threshold=8.521e+02, percent-clipped=6.0 +2023-02-05 23:01:05,442 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.4967, 1.3261, 3.6039, 1.4403, 3.0784, 2.9768, 3.1267, 3.0816], + device='cuda:3'), covar=tensor([0.0406, 0.3155, 0.0432, 0.2120, 0.1067, 0.0601, 0.0481, 0.0538], + device='cuda:3'), in_proj_covar=tensor([0.0276, 0.0452, 0.0335, 0.0362, 0.0435, 0.0359, 0.0342, 0.0385], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:01:15,406 INFO [train.py:901] (3/4) Epoch 4, batch 1900, loss[loss=0.2687, simple_loss=0.3292, pruned_loss=0.104, over 7825.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3685, pruned_loss=0.1307, over 1610715.62 frames. ], batch size: 20, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:22,881 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26161.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:01:40,599 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26186.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:01:40,659 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26186.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:01:41,083 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-05 23:01:49,621 INFO [train.py:901] (3/4) Epoch 4, batch 1950, loss[loss=0.3092, simple_loss=0.36, pruned_loss=0.1292, over 7948.00 frames. ], tot_loss[loss=0.314, simple_loss=0.3677, pruned_loss=0.1301, over 1609820.60 frames. ], batch size: 20, lr: 1.89e-02, grad_scale: 8.0 +2023-02-05 23:01:52,432 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-05 23:02:04,052 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26220.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:02:05,149 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.303e+02 3.684e+02 4.572e+02 6.046e+02 1.247e+03, threshold=9.144e+02, percent-clipped=2.0 +2023-02-05 23:02:10,399 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-05 23:02:24,335 INFO [train.py:901] (3/4) Epoch 4, batch 2000, loss[loss=0.2797, simple_loss=0.3425, pruned_loss=0.1084, over 8081.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3673, pruned_loss=0.1291, over 1612482.98 frames. ], batch size: 21, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:02:26,386 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3581, 2.3994, 1.5857, 1.9128, 1.9756, 1.3338, 1.8129, 1.8490], + device='cuda:3'), covar=tensor([0.0989, 0.0311, 0.0826, 0.0461, 0.0545, 0.0994, 0.0720, 0.0589], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0233, 0.0324, 0.0308, 0.0329, 0.0314, 0.0339, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:02:33,520 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-05 23:02:36,635 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26268.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:02:38,771 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8670, 2.1033, 1.8622, 2.8170, 1.2857, 1.4125, 1.6830, 2.2338], + device='cuda:3'), covar=tensor([0.1234, 0.1210, 0.1456, 0.0417, 0.1755, 0.2459, 0.1708, 0.1072], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0290, 0.0305, 0.0227, 0.0272, 0.0308, 0.0317, 0.0289], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:02:46,064 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1277, 1.4193, 2.9521, 0.8845, 2.0776, 1.3354, 1.1606, 1.8090], + device='cuda:3'), covar=tensor([0.2170, 0.2080, 0.0751, 0.3526, 0.1436, 0.2817, 0.1958, 0.2266], + device='cuda:3'), in_proj_covar=tensor([0.0458, 0.0416, 0.0507, 0.0509, 0.0545, 0.0494, 0.0441, 0.0565], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:02:59,479 INFO [train.py:901] (3/4) Epoch 4, batch 2050, loss[loss=0.3522, simple_loss=0.4124, pruned_loss=0.146, over 8332.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3681, pruned_loss=0.1295, over 1614708.53 frames. ], batch size: 25, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:03:14,377 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.223e+02 3.433e+02 4.198e+02 5.260e+02 1.263e+03, threshold=8.396e+02, percent-clipped=5.0 +2023-02-05 23:03:24,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26335.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:03:34,743 INFO [train.py:901] (3/4) Epoch 4, batch 2100, loss[loss=0.2851, simple_loss=0.3527, pruned_loss=0.1088, over 7971.00 frames. ], tot_loss[loss=0.3139, simple_loss=0.3681, pruned_loss=0.1298, over 1617342.62 frames. ], batch size: 21, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:08,107 INFO [train.py:901] (3/4) Epoch 4, batch 2150, loss[loss=0.2938, simple_loss=0.3551, pruned_loss=0.1163, over 8234.00 frames. ], tot_loss[loss=0.3128, simple_loss=0.3676, pruned_loss=0.129, over 1616270.71 frames. ], batch size: 22, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:24,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.431e+02 3.407e+02 4.210e+02 5.616e+02 1.521e+03, threshold=8.419e+02, percent-clipped=4.0 +2023-02-05 23:04:31,157 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26432.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:04:43,606 INFO [train.py:901] (3/4) Epoch 4, batch 2200, loss[loss=0.3548, simple_loss=0.3775, pruned_loss=0.1661, over 7971.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3665, pruned_loss=0.1292, over 1610838.77 frames. ], batch size: 21, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:04:53,186 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26464.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:18,123 INFO [train.py:901] (3/4) Epoch 4, batch 2250, loss[loss=0.2894, simple_loss=0.3532, pruned_loss=0.1128, over 7218.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3659, pruned_loss=0.1286, over 1614338.61 frames. ], batch size: 16, lr: 1.88e-02, grad_scale: 8.0 +2023-02-05 23:05:33,090 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.188e+02 3.857e+02 4.748e+02 9.287e+02, threshold=7.714e+02, percent-clipped=1.0 +2023-02-05 23:05:38,938 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26530.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:05:52,817 INFO [train.py:901] (3/4) Epoch 4, batch 2300, loss[loss=0.2829, simple_loss=0.3573, pruned_loss=0.1042, over 8186.00 frames. ], tot_loss[loss=0.3113, simple_loss=0.3662, pruned_loss=0.1282, over 1615444.12 frames. ], batch size: 23, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:12,949 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26579.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:21,880 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26591.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:06:27,554 INFO [train.py:901] (3/4) Epoch 4, batch 2350, loss[loss=0.3627, simple_loss=0.4121, pruned_loss=0.1566, over 8701.00 frames. ], tot_loss[loss=0.3113, simple_loss=0.3657, pruned_loss=0.1285, over 1616057.90 frames. ], batch size: 49, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:06:35,960 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26612.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:06:38,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26616.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:06:42,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 3.505e+02 4.841e+02 5.770e+02 1.247e+03, threshold=9.683e+02, percent-clipped=6.0 +2023-02-05 23:06:58,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26645.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:07:01,508 INFO [train.py:901] (3/4) Epoch 4, batch 2400, loss[loss=0.3525, simple_loss=0.3928, pruned_loss=0.1561, over 7972.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3673, pruned_loss=0.1299, over 1608781.82 frames. ], batch size: 21, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:17,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7252, 1.2497, 3.9312, 1.4563, 3.2634, 3.2546, 3.4683, 3.3882], + device='cuda:3'), covar=tensor([0.0344, 0.2899, 0.0301, 0.1968, 0.0923, 0.0564, 0.0382, 0.0460], + device='cuda:3'), in_proj_covar=tensor([0.0272, 0.0448, 0.0348, 0.0364, 0.0428, 0.0365, 0.0345, 0.0383], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:07:35,315 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9489, 1.0239, 1.6639, 0.8573, 1.6106, 1.8780, 1.7660, 1.5933], + device='cuda:3'), covar=tensor([0.0822, 0.0906, 0.0529, 0.1511, 0.0504, 0.0313, 0.0436, 0.0595], + device='cuda:3'), in_proj_covar=tensor([0.0225, 0.0259, 0.0205, 0.0262, 0.0209, 0.0187, 0.0195, 0.0260], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:07:37,135 INFO [train.py:901] (3/4) Epoch 4, batch 2450, loss[loss=0.2994, simple_loss=0.3443, pruned_loss=0.1273, over 7803.00 frames. ], tot_loss[loss=0.3152, simple_loss=0.3684, pruned_loss=0.131, over 1610572.04 frames. ], batch size: 20, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:07:51,856 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.211e+02 4.300e+02 5.616e+02 1.854e+03, threshold=8.599e+02, percent-clipped=7.0 +2023-02-05 23:07:55,326 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26727.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:10,588 INFO [train.py:901] (3/4) Epoch 4, batch 2500, loss[loss=0.3405, simple_loss=0.3808, pruned_loss=0.1501, over 8515.00 frames. ], tot_loss[loss=0.3157, simple_loss=0.3689, pruned_loss=0.1312, over 1615582.16 frames. ], batch size: 26, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:08:19,388 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7302, 2.5332, 2.8795, 0.8496, 2.7532, 1.8108, 1.3176, 1.9646], + device='cuda:3'), covar=tensor([0.0234, 0.0081, 0.0093, 0.0207, 0.0121, 0.0230, 0.0297, 0.0115], + device='cuda:3'), in_proj_covar=tensor([0.0258, 0.0185, 0.0150, 0.0222, 0.0170, 0.0303, 0.0248, 0.0207], + device='cuda:3'), out_proj_covar=tensor([1.0812e-04, 7.6519e-05, 6.0870e-05, 8.9652e-05, 7.1888e-05, 1.3598e-04, + 1.0501e-04, 8.4309e-05], device='cuda:3') +2023-02-05 23:08:29,253 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=26776.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:08:45,148 INFO [train.py:901] (3/4) Epoch 4, batch 2550, loss[loss=0.2999, simple_loss=0.3747, pruned_loss=0.1126, over 8354.00 frames. ], tot_loss[loss=0.3143, simple_loss=0.3681, pruned_loss=0.1303, over 1621133.75 frames. ], batch size: 24, lr: 1.87e-02, grad_scale: 8.0 +2023-02-05 23:09:01,300 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.235e+02 3.301e+02 4.146e+02 5.074e+02 1.055e+03, threshold=8.293e+02, percent-clipped=2.0 +2023-02-05 23:09:10,581 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26835.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:11,944 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3248, 1.8967, 1.7569, 0.5661, 1.8551, 1.2774, 0.3774, 1.6454], + device='cuda:3'), covar=tensor([0.0181, 0.0074, 0.0077, 0.0171, 0.0104, 0.0290, 0.0244, 0.0070], + device='cuda:3'), in_proj_covar=tensor([0.0257, 0.0187, 0.0152, 0.0223, 0.0170, 0.0306, 0.0246, 0.0208], + device='cuda:3'), out_proj_covar=tensor([1.0805e-04, 7.7575e-05, 6.1764e-05, 8.9955e-05, 7.1652e-05, 1.3747e-04, + 1.0457e-04, 8.4644e-05], device='cuda:3') +2023-02-05 23:09:20,514 INFO [train.py:901] (3/4) Epoch 4, batch 2600, loss[loss=0.3327, simple_loss=0.3892, pruned_loss=0.1381, over 8501.00 frames. ], tot_loss[loss=0.3131, simple_loss=0.3676, pruned_loss=0.1293, over 1624317.99 frames. ], batch size: 31, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:27,765 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26860.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:50,370 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=26891.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:09:56,432 INFO [train.py:901] (3/4) Epoch 4, batch 2650, loss[loss=0.3178, simple_loss=0.369, pruned_loss=0.1332, over 8447.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3664, pruned_loss=0.1281, over 1622562.41 frames. ], batch size: 29, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:09:57,275 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26901.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:12,336 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 3.245e+02 3.916e+02 5.024e+02 1.006e+03, threshold=7.831e+02, percent-clipped=3.0 +2023-02-05 23:10:12,503 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26922.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:10:15,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=26926.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:21,530 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.03 vs. limit=2.0 +2023-02-05 23:10:32,094 INFO [train.py:901] (3/4) Epoch 4, batch 2700, loss[loss=0.2667, simple_loss=0.336, pruned_loss=0.09867, over 8190.00 frames. ], tot_loss[loss=0.3098, simple_loss=0.3648, pruned_loss=0.1274, over 1617077.16 frames. ], batch size: 23, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:10:44,346 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=26968.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:10:54,316 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=26983.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:05,902 INFO [train.py:901] (3/4) Epoch 4, batch 2750, loss[loss=0.2661, simple_loss=0.3269, pruned_loss=0.1027, over 7552.00 frames. ], tot_loss[loss=0.3097, simple_loss=0.3645, pruned_loss=0.1274, over 1615706.71 frames. ], batch size: 18, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:11:12,359 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27008.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:11:21,392 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 3.589e+02 4.354e+02 5.460e+02 1.197e+03, threshold=8.707e+02, percent-clipped=9.0 +2023-02-05 23:11:40,852 INFO [train.py:901] (3/4) Epoch 4, batch 2800, loss[loss=0.2638, simple_loss=0.3287, pruned_loss=0.09949, over 7522.00 frames. ], tot_loss[loss=0.3097, simple_loss=0.3646, pruned_loss=0.1274, over 1613489.43 frames. ], batch size: 18, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:12:11,819 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-05 23:12:14,844 INFO [train.py:901] (3/4) Epoch 4, batch 2850, loss[loss=0.2874, simple_loss=0.3319, pruned_loss=0.1214, over 7935.00 frames. ], tot_loss[loss=0.3098, simple_loss=0.3645, pruned_loss=0.1276, over 1615310.23 frames. ], batch size: 20, lr: 1.86e-02, grad_scale: 8.0 +2023-02-05 23:12:16,670 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 23:12:25,957 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.88 vs. limit=2.0 +2023-02-05 23:12:30,250 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 3.374e+02 4.464e+02 5.831e+02 1.992e+03, threshold=8.927e+02, percent-clipped=6.0 +2023-02-05 23:12:31,895 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-05 23:12:39,737 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5846, 2.7930, 1.7639, 1.9974, 2.1841, 1.4595, 1.8818, 2.1174], + device='cuda:3'), covar=tensor([0.1125, 0.0276, 0.0749, 0.0570, 0.0503, 0.0952, 0.0832, 0.0742], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0245, 0.0308, 0.0306, 0.0330, 0.0309, 0.0338, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:12:47,570 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27147.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:12:49,277 INFO [train.py:901] (3/4) Epoch 4, batch 2900, loss[loss=0.2822, simple_loss=0.3283, pruned_loss=0.1181, over 6819.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3647, pruned_loss=0.1277, over 1613278.71 frames. ], batch size: 15, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:00,620 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27166.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:05,272 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27172.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:13:11,773 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-05 23:13:15,428 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2632, 1.7930, 2.7746, 1.0243, 1.9977, 1.5256, 1.5889, 1.5434], + device='cuda:3'), covar=tensor([0.1617, 0.1543, 0.0716, 0.3081, 0.1215, 0.2375, 0.1277, 0.1966], + device='cuda:3'), in_proj_covar=tensor([0.0450, 0.0420, 0.0505, 0.0511, 0.0553, 0.0488, 0.0431, 0.0560], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:13:24,075 INFO [train.py:901] (3/4) Epoch 4, batch 2950, loss[loss=0.2588, simple_loss=0.316, pruned_loss=0.1009, over 7545.00 frames. ], tot_loss[loss=0.3106, simple_loss=0.3655, pruned_loss=0.1279, over 1617810.78 frames. ], batch size: 18, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:34,431 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4705, 1.7127, 2.9621, 1.1530, 2.1295, 1.8657, 1.4983, 1.6693], + device='cuda:3'), covar=tensor([0.1262, 0.1482, 0.0451, 0.2594, 0.1055, 0.1858, 0.1247, 0.1680], + device='cuda:3'), in_proj_covar=tensor([0.0447, 0.0419, 0.0502, 0.0510, 0.0553, 0.0489, 0.0433, 0.0560], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:13:38,737 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 3.092e+02 3.649e+02 5.055e+02 1.216e+03, threshold=7.299e+02, percent-clipped=3.0 +2023-02-05 23:13:58,833 INFO [train.py:901] (3/4) Epoch 4, batch 3000, loss[loss=0.3507, simple_loss=0.4027, pruned_loss=0.1493, over 8325.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.3639, pruned_loss=0.1264, over 1618124.07 frames. ], batch size: 25, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:13:58,833 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 23:14:04,835 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5123, 1.2262, 3.6821, 1.4935, 3.1495, 3.0924, 3.3008, 3.2446], + device='cuda:3'), covar=tensor([0.0457, 0.3669, 0.0372, 0.2337, 0.1322, 0.0706, 0.0480, 0.0593], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0452, 0.0352, 0.0365, 0.0438, 0.0359, 0.0356, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:14:11,267 INFO [train.py:935] (3/4) Epoch 4, validation: loss=0.2374, simple_loss=0.3304, pruned_loss=0.07225, over 944034.00 frames. +2023-02-05 23:14:11,268 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 23:14:23,018 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27266.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:14:24,760 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-05 23:14:45,722 INFO [train.py:901] (3/4) Epoch 4, batch 3050, loss[loss=0.3891, simple_loss=0.4171, pruned_loss=0.1805, over 7316.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.3643, pruned_loss=0.1275, over 1609491.73 frames. ], batch size: 71, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:14:54,658 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27312.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:14:54,763 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4046, 1.8809, 1.5723, 1.4205, 1.5841, 1.5170, 1.9059, 1.9482], + device='cuda:3'), covar=tensor([0.0618, 0.1102, 0.1721, 0.1431, 0.0709, 0.1459, 0.0811, 0.0547], + device='cuda:3'), in_proj_covar=tensor([0.0159, 0.0200, 0.0239, 0.0199, 0.0157, 0.0204, 0.0165, 0.0167], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:15:01,945 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.415e+02 4.317e+02 5.768e+02 1.933e+03, threshold=8.634e+02, percent-clipped=10.0 +2023-02-05 23:15:08,182 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1851, 1.9772, 3.4340, 1.5882, 2.7699, 3.8725, 3.5374, 3.3730], + device='cuda:3'), covar=tensor([0.0879, 0.1195, 0.0325, 0.1774, 0.0504, 0.0239, 0.0346, 0.0485], + device='cuda:3'), in_proj_covar=tensor([0.0228, 0.0265, 0.0216, 0.0267, 0.0211, 0.0191, 0.0198, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:15:20,637 INFO [train.py:901] (3/4) Epoch 4, batch 3100, loss[loss=0.3819, simple_loss=0.4069, pruned_loss=0.1784, over 8445.00 frames. ], tot_loss[loss=0.3105, simple_loss=0.3648, pruned_loss=0.1281, over 1609788.33 frames. ], batch size: 27, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:15:41,813 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27381.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:15:54,825 INFO [train.py:901] (3/4) Epoch 4, batch 3150, loss[loss=0.2467, simple_loss=0.3169, pruned_loss=0.08828, over 7658.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3662, pruned_loss=0.1289, over 1612064.08 frames. ], batch size: 19, lr: 1.85e-02, grad_scale: 8.0 +2023-02-05 23:16:09,488 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 3.237e+02 4.041e+02 5.193e+02 1.210e+03, threshold=8.082e+02, percent-clipped=3.0 +2023-02-05 23:16:13,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27427.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:16:27,931 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.07 vs. limit=2.0 +2023-02-05 23:16:29,613 INFO [train.py:901] (3/4) Epoch 4, batch 3200, loss[loss=0.2722, simple_loss=0.3437, pruned_loss=0.1004, over 7973.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3665, pruned_loss=0.1283, over 1613758.47 frames. ], batch size: 21, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:16:31,281 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.02 vs. limit=5.0 +2023-02-05 23:17:03,113 INFO [train.py:901] (3/4) Epoch 4, batch 3250, loss[loss=0.2913, simple_loss=0.3439, pruned_loss=0.1194, over 7454.00 frames. ], tot_loss[loss=0.3104, simple_loss=0.3654, pruned_loss=0.1277, over 1611029.66 frames. ], batch size: 17, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:17:08,660 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3839, 1.6157, 1.5326, 1.2820, 1.5268, 1.3455, 1.7307, 1.5238], + device='cuda:3'), covar=tensor([0.0610, 0.1192, 0.1703, 0.1487, 0.0640, 0.1593, 0.0782, 0.0600], + device='cuda:3'), in_proj_covar=tensor([0.0159, 0.0198, 0.0236, 0.0197, 0.0155, 0.0202, 0.0162, 0.0166], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0006, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:17:10,576 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=27510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:17:17,970 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3614, 2.4748, 1.5623, 2.2358, 1.9250, 1.4298, 1.6567, 2.0343], + device='cuda:3'), covar=tensor([0.0960, 0.0303, 0.0816, 0.0429, 0.0546, 0.0954, 0.0891, 0.0609], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0238, 0.0308, 0.0299, 0.0322, 0.0307, 0.0332, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:17:18,423 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 3.449e+02 4.059e+02 4.930e+02 7.939e+02, threshold=8.117e+02, percent-clipped=0.0 +2023-02-05 23:17:37,484 INFO [train.py:901] (3/4) Epoch 4, batch 3300, loss[loss=0.3255, simple_loss=0.3659, pruned_loss=0.1426, over 7647.00 frames. ], tot_loss[loss=0.3092, simple_loss=0.3644, pruned_loss=0.127, over 1613993.24 frames. ], batch size: 19, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:12,294 INFO [train.py:901] (3/4) Epoch 4, batch 3350, loss[loss=0.2473, simple_loss=0.3048, pruned_loss=0.09493, over 7427.00 frames. ], tot_loss[loss=0.3099, simple_loss=0.3651, pruned_loss=0.1274, over 1617110.79 frames. ], batch size: 17, lr: 1.84e-02, grad_scale: 8.0 +2023-02-05 23:18:28,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.390e+02 3.326e+02 4.176e+02 5.439e+02 1.733e+03, threshold=8.353e+02, percent-clipped=9.0 +2023-02-05 23:18:30,513 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=27625.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:18:38,359 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27637.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:18:46,891 INFO [train.py:901] (3/4) Epoch 4, batch 3400, loss[loss=0.3365, simple_loss=0.393, pruned_loss=0.14, over 8026.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.3661, pruned_loss=0.1279, over 1618136.41 frames. ], batch size: 22, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:18:50,486 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3119, 1.7585, 2.8317, 1.0820, 2.0391, 1.7222, 1.5183, 1.5771], + device='cuda:3'), covar=tensor([0.1457, 0.1507, 0.0536, 0.2717, 0.1098, 0.2029, 0.1245, 0.1612], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0430, 0.0517, 0.0521, 0.0569, 0.0501, 0.0443, 0.0573], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:18:55,795 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27662.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:19:10,315 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27683.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:21,476 INFO [train.py:901] (3/4) Epoch 4, batch 3450, loss[loss=0.336, simple_loss=0.3923, pruned_loss=0.1398, over 8290.00 frames. ], tot_loss[loss=0.3116, simple_loss=0.3667, pruned_loss=0.1283, over 1620761.06 frames. ], batch size: 23, lr: 1.84e-02, grad_scale: 16.0 +2023-02-05 23:19:26,949 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27708.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:19:36,065 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.499e+02 3.357e+02 4.072e+02 5.275e+02 9.264e+02, threshold=8.144e+02, percent-clipped=1.0 +2023-02-05 23:19:43,549 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=27732.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:19:49,036 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 23:19:55,679 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-05 23:19:55,903 INFO [train.py:901] (3/4) Epoch 4, batch 3500, loss[loss=0.3061, simple_loss=0.3644, pruned_loss=0.1238, over 8024.00 frames. ], tot_loss[loss=0.3125, simple_loss=0.3676, pruned_loss=0.1287, over 1621711.03 frames. ], batch size: 22, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:19:59,473 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4323, 1.4405, 2.7757, 1.2819, 1.9056, 3.0322, 2.8290, 2.5974], + device='cuda:3'), covar=tensor([0.1046, 0.1281, 0.0418, 0.1816, 0.0749, 0.0259, 0.0455, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0226, 0.0267, 0.0215, 0.0262, 0.0216, 0.0189, 0.0198, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:20:10,686 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-05 23:20:31,122 INFO [train.py:901] (3/4) Epoch 4, batch 3550, loss[loss=0.3593, simple_loss=0.4049, pruned_loss=0.1568, over 8677.00 frames. ], tot_loss[loss=0.3101, simple_loss=0.3656, pruned_loss=0.1273, over 1622594.56 frames. ], batch size: 34, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:20:46,086 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.197e+02 3.262e+02 3.955e+02 5.254e+02 1.114e+03, threshold=7.909e+02, percent-clipped=8.0 +2023-02-05 23:21:05,473 INFO [train.py:901] (3/4) Epoch 4, batch 3600, loss[loss=0.2615, simple_loss=0.3302, pruned_loss=0.09641, over 7968.00 frames. ], tot_loss[loss=0.311, simple_loss=0.3663, pruned_loss=0.1279, over 1624116.93 frames. ], batch size: 21, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:27,351 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=27881.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:39,940 INFO [train.py:901] (3/4) Epoch 4, batch 3650, loss[loss=0.3601, simple_loss=0.3919, pruned_loss=0.1642, over 6784.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3636, pruned_loss=0.1252, over 1614900.18 frames. ], batch size: 71, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:21:44,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=27906.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:21:56,104 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.998e+02 3.334e+02 3.945e+02 4.811e+02 1.062e+03, threshold=7.891e+02, percent-clipped=4.0 +2023-02-05 23:22:13,487 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:22:14,787 INFO [train.py:901] (3/4) Epoch 4, batch 3700, loss[loss=0.3761, simple_loss=0.3927, pruned_loss=0.1798, over 8325.00 frames. ], tot_loss[loss=0.3082, simple_loss=0.3642, pruned_loss=0.1261, over 1616729.16 frames. ], batch size: 26, lr: 1.83e-02, grad_scale: 16.0 +2023-02-05 23:22:18,647 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-05 23:22:29,744 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-05 23:22:49,606 INFO [train.py:901] (3/4) Epoch 4, batch 3750, loss[loss=0.3147, simple_loss=0.3838, pruned_loss=0.1228, over 8301.00 frames. ], tot_loss[loss=0.3094, simple_loss=0.3651, pruned_loss=0.1269, over 1616656.35 frames. ], batch size: 23, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:22:53,491 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9088, 1.7384, 5.8356, 1.8536, 5.3132, 4.8193, 5.4571, 5.2754], + device='cuda:3'), covar=tensor([0.0320, 0.3328, 0.0221, 0.2069, 0.0769, 0.0474, 0.0309, 0.0357], + device='cuda:3'), in_proj_covar=tensor([0.0281, 0.0457, 0.0352, 0.0368, 0.0440, 0.0365, 0.0357, 0.0395], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:23:05,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.245e+02 3.553e+02 4.442e+02 6.055e+02 1.985e+03, threshold=8.883e+02, percent-clipped=11.0 +2023-02-05 23:23:13,957 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6031, 1.6062, 4.6664, 1.8023, 4.0607, 3.9498, 4.2126, 4.1433], + device='cuda:3'), covar=tensor([0.0357, 0.3006, 0.0274, 0.2087, 0.0955, 0.0552, 0.0404, 0.0433], + device='cuda:3'), in_proj_covar=tensor([0.0283, 0.0457, 0.0354, 0.0370, 0.0443, 0.0367, 0.0357, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:23:25,246 INFO [train.py:901] (3/4) Epoch 4, batch 3800, loss[loss=0.279, simple_loss=0.3345, pruned_loss=0.1117, over 7780.00 frames. ], tot_loss[loss=0.3096, simple_loss=0.365, pruned_loss=0.1271, over 1616114.42 frames. ], batch size: 19, lr: 1.83e-02, grad_scale: 8.0 +2023-02-05 23:23:42,804 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28076.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:23:47,615 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4940, 2.2826, 4.5064, 1.2254, 2.8924, 2.1307, 1.7221, 2.2263], + device='cuda:3'), covar=tensor([0.1374, 0.1671, 0.0607, 0.2799, 0.1359, 0.2033, 0.1275, 0.2247], + device='cuda:3'), in_proj_covar=tensor([0.0449, 0.0426, 0.0505, 0.0512, 0.0560, 0.0487, 0.0433, 0.0564], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:24:00,299 INFO [train.py:901] (3/4) Epoch 4, batch 3850, loss[loss=0.2465, simple_loss=0.3116, pruned_loss=0.09066, over 7943.00 frames. ], tot_loss[loss=0.3091, simple_loss=0.3644, pruned_loss=0.1269, over 1614543.36 frames. ], batch size: 20, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:24:15,220 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 3.238e+02 4.124e+02 5.182e+02 9.210e+02, threshold=8.247e+02, percent-clipped=1.0 +2023-02-05 23:24:17,309 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-05 23:24:34,697 INFO [train.py:901] (3/4) Epoch 4, batch 3900, loss[loss=0.2995, simple_loss=0.3623, pruned_loss=0.1183, over 8448.00 frames. ], tot_loss[loss=0.3113, simple_loss=0.3661, pruned_loss=0.1282, over 1614231.27 frames. ], batch size: 27, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:02,735 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28191.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:25:08,590 INFO [train.py:901] (3/4) Epoch 4, batch 3950, loss[loss=0.3702, simple_loss=0.4063, pruned_loss=0.167, over 6962.00 frames. ], tot_loss[loss=0.3119, simple_loss=0.3668, pruned_loss=0.1285, over 1614918.25 frames. ], batch size: 72, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:19,008 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4621, 1.9601, 3.3713, 1.1843, 2.4904, 1.7252, 1.5619, 1.8092], + device='cuda:3'), covar=tensor([0.1437, 0.1615, 0.0583, 0.2737, 0.1231, 0.2205, 0.1356, 0.2093], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0427, 0.0504, 0.0509, 0.0565, 0.0493, 0.0434, 0.0565], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:25:24,838 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.357e+02 4.080e+02 5.453e+02 1.389e+03, threshold=8.161e+02, percent-clipped=8.0 +2023-02-05 23:25:32,133 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-05 23:25:41,198 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28247.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:25:43,099 INFO [train.py:901] (3/4) Epoch 4, batch 4000, loss[loss=0.2556, simple_loss=0.3183, pruned_loss=0.0965, over 7546.00 frames. ], tot_loss[loss=0.3118, simple_loss=0.3667, pruned_loss=0.1285, over 1613780.18 frames. ], batch size: 18, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:25:56,829 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-05 23:25:59,940 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28273.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:26:17,616 INFO [train.py:901] (3/4) Epoch 4, batch 4050, loss[loss=0.2874, simple_loss=0.3547, pruned_loss=0.1101, over 8457.00 frames. ], tot_loss[loss=0.3112, simple_loss=0.3662, pruned_loss=0.1281, over 1614079.47 frames. ], batch size: 27, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:26:21,162 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28305.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:26:34,449 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.986e+02 3.482e+02 4.201e+02 5.400e+02 1.078e+03, threshold=8.403e+02, percent-clipped=4.0 +2023-02-05 23:26:52,365 INFO [train.py:901] (3/4) Epoch 4, batch 4100, loss[loss=0.3163, simple_loss=0.3464, pruned_loss=0.1431, over 7807.00 frames. ], tot_loss[loss=0.312, simple_loss=0.3669, pruned_loss=0.1286, over 1614984.26 frames. ], batch size: 19, lr: 1.82e-02, grad_scale: 8.0 +2023-02-05 23:27:27,346 INFO [train.py:901] (3/4) Epoch 4, batch 4150, loss[loss=0.3619, simple_loss=0.4091, pruned_loss=0.1573, over 8506.00 frames. ], tot_loss[loss=0.3099, simple_loss=0.3652, pruned_loss=0.1273, over 1613242.12 frames. ], batch size: 26, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:27:43,613 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.222e+02 3.372e+02 4.170e+02 5.520e+02 1.384e+03, threshold=8.341e+02, percent-clipped=6.0 +2023-02-05 23:28:00,679 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28447.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:28:02,480 INFO [train.py:901] (3/4) Epoch 4, batch 4200, loss[loss=0.2522, simple_loss=0.3203, pruned_loss=0.09208, over 8090.00 frames. ], tot_loss[loss=0.3093, simple_loss=0.3649, pruned_loss=0.1268, over 1613346.80 frames. ], batch size: 21, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:07,667 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-05 23:28:17,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28472.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:28:18,117 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1389, 1.4037, 1.5151, 1.1972, 1.5005, 1.3942, 1.5695, 1.5431], + device='cuda:3'), covar=tensor([0.0751, 0.1351, 0.1928, 0.1699, 0.0719, 0.1625, 0.0949, 0.0649], + device='cuda:3'), in_proj_covar=tensor([0.0158, 0.0199, 0.0236, 0.0201, 0.0156, 0.0201, 0.0163, 0.0168], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:28:29,077 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-05 23:28:36,444 INFO [train.py:901] (3/4) Epoch 4, batch 4250, loss[loss=0.277, simple_loss=0.3356, pruned_loss=0.1092, over 7657.00 frames. ], tot_loss[loss=0.3115, simple_loss=0.3663, pruned_loss=0.1284, over 1618203.68 frames. ], batch size: 19, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:28:39,208 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28504.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:43,325 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28510.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:28:51,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 3.170e+02 4.105e+02 5.662e+02 1.430e+03, threshold=8.210e+02, percent-clipped=9.0 +2023-02-05 23:28:57,105 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-05 23:29:10,383 INFO [train.py:901] (3/4) Epoch 4, batch 4300, loss[loss=0.3491, simple_loss=0.3903, pruned_loss=0.1539, over 7161.00 frames. ], tot_loss[loss=0.3137, simple_loss=0.3679, pruned_loss=0.1298, over 1621229.20 frames. ], batch size: 71, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:34,048 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-05 23:29:38,464 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28591.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:29:45,232 INFO [train.py:901] (3/4) Epoch 4, batch 4350, loss[loss=0.4299, simple_loss=0.446, pruned_loss=0.2069, over 6992.00 frames. ], tot_loss[loss=0.3136, simple_loss=0.3676, pruned_loss=0.1298, over 1617406.85 frames. ], batch size: 71, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:29:57,581 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28617.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:29:58,773 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-05 23:30:01,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 3.285e+02 3.917e+02 4.771e+02 1.131e+03, threshold=7.833e+02, percent-clipped=1.0 +2023-02-05 23:30:19,076 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28649.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:30:19,682 INFO [train.py:901] (3/4) Epoch 4, batch 4400, loss[loss=0.3425, simple_loss=0.388, pruned_loss=0.1485, over 8481.00 frames. ], tot_loss[loss=0.3121, simple_loss=0.3668, pruned_loss=0.1287, over 1615119.75 frames. ], batch size: 27, lr: 1.81e-02, grad_scale: 8.0 +2023-02-05 23:30:37,240 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1810, 2.1122, 1.4548, 2.0548, 1.7418, 1.0987, 1.5348, 1.9627], + device='cuda:3'), covar=tensor([0.1014, 0.0517, 0.1019, 0.0448, 0.0662, 0.1351, 0.0891, 0.0563], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0235, 0.0303, 0.0308, 0.0324, 0.0307, 0.0336, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:30:41,088 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-05 23:30:46,448 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5786, 5.6508, 5.0249, 2.0606, 4.9308, 5.3062, 5.3127, 4.7235], + device='cuda:3'), covar=tensor([0.0559, 0.0341, 0.0670, 0.4240, 0.0534, 0.0411, 0.0857, 0.0434], + device='cuda:3'), in_proj_covar=tensor([0.0366, 0.0268, 0.0296, 0.0388, 0.0293, 0.0239, 0.0282, 0.0222], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 23:30:54,262 INFO [train.py:901] (3/4) Epoch 4, batch 4450, loss[loss=0.3361, simple_loss=0.379, pruned_loss=0.1466, over 7809.00 frames. ], tot_loss[loss=0.3107, simple_loss=0.3653, pruned_loss=0.128, over 1615726.45 frames. ], batch size: 20, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:30:58,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28706.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:31:02,176 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-05 23:31:04,243 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.64 vs. limit=5.0 +2023-02-05 23:31:09,129 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-05 23:31:10,734 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.181e+02 3.229e+02 4.056e+02 4.786e+02 8.259e+02, threshold=8.113e+02, percent-clipped=1.0 +2023-02-05 23:31:17,728 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28732.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:31:30,206 INFO [train.py:901] (3/4) Epoch 4, batch 4500, loss[loss=0.2798, simple_loss=0.3431, pruned_loss=0.1083, over 7803.00 frames. ], tot_loss[loss=0.308, simple_loss=0.3635, pruned_loss=0.1263, over 1615138.66 frames. ], batch size: 20, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:31:36,221 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-05 23:31:39,884 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28764.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:05,459 INFO [train.py:901] (3/4) Epoch 4, batch 4550, loss[loss=0.2667, simple_loss=0.3342, pruned_loss=0.09962, over 8140.00 frames. ], tot_loss[loss=0.3084, simple_loss=0.3638, pruned_loss=0.1265, over 1615221.32 frames. ], batch size: 22, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:21,344 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.133e+02 4.046e+02 5.517e+02 1.256e+03, threshold=8.093e+02, percent-clipped=3.0 +2023-02-05 23:32:37,064 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4895, 1.7289, 2.7297, 1.1749, 1.9256, 1.6877, 1.5086, 1.7199], + device='cuda:3'), covar=tensor([0.1201, 0.1469, 0.0491, 0.2516, 0.1119, 0.1840, 0.1157, 0.1615], + device='cuda:3'), in_proj_covar=tensor([0.0452, 0.0426, 0.0503, 0.0515, 0.0557, 0.0497, 0.0438, 0.0566], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:32:39,629 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28848.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:32:40,907 INFO [train.py:901] (3/4) Epoch 4, batch 4600, loss[loss=0.3479, simple_loss=0.3958, pruned_loss=0.15, over 8251.00 frames. ], tot_loss[loss=0.3061, simple_loss=0.3618, pruned_loss=0.1252, over 1616626.26 frames. ], batch size: 24, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:32:43,605 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=28854.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:00,447 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=28879.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:05,969 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-05 23:33:14,778 INFO [train.py:901] (3/4) Epoch 4, batch 4650, loss[loss=0.3326, simple_loss=0.3792, pruned_loss=0.143, over 8561.00 frames. ], tot_loss[loss=0.3065, simple_loss=0.3616, pruned_loss=0.1257, over 1609052.11 frames. ], batch size: 49, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:30,676 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 3.425e+02 4.570e+02 5.631e+02 1.457e+03, threshold=9.141e+02, percent-clipped=7.0 +2023-02-05 23:33:49,339 INFO [train.py:901] (3/4) Epoch 4, batch 4700, loss[loss=0.3212, simple_loss=0.372, pruned_loss=0.1352, over 8248.00 frames. ], tot_loss[loss=0.3074, simple_loss=0.3628, pruned_loss=0.126, over 1614466.17 frames. ], batch size: 24, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:33:58,483 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28962.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:33:59,157 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28963.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:03,891 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=28969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:15,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=28987.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:16,547 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=28988.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:34:24,384 INFO [train.py:901] (3/4) Epoch 4, batch 4750, loss[loss=0.2987, simple_loss=0.3405, pruned_loss=0.1285, over 7704.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3618, pruned_loss=0.1259, over 1606674.88 frames. ], batch size: 18, lr: 1.80e-02, grad_scale: 8.0 +2023-02-05 23:34:33,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29013.0, num_to_drop=1, layers_to_drop={1} +2023-02-05 23:34:38,668 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:38,749 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29020.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:40,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.078e+02 3.145e+02 3.754e+02 5.040e+02 8.107e+02, threshold=7.508e+02, percent-clipped=0.0 +2023-02-05 23:34:40,462 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-05 23:34:42,472 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-05 23:34:56,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29045.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:34:59,308 INFO [train.py:901] (3/4) Epoch 4, batch 4800, loss[loss=0.3806, simple_loss=0.4144, pruned_loss=0.1734, over 8512.00 frames. ], tot_loss[loss=0.307, simple_loss=0.3621, pruned_loss=0.1259, over 1608793.56 frames. ], batch size: 39, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,008 INFO [train.py:901] (3/4) Epoch 4, batch 4850, loss[loss=0.2929, simple_loss=0.3372, pruned_loss=0.1243, over 7705.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3602, pruned_loss=0.1249, over 1606752.75 frames. ], batch size: 18, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:35:34,022 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-05 23:35:49,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.099e+02 3.374e+02 4.405e+02 6.016e+02 1.134e+03, threshold=8.810e+02, percent-clipped=7.0 +2023-02-05 23:35:51,361 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-05 23:36:07,826 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9817, 1.2306, 4.1137, 1.5108, 3.5079, 3.3324, 3.6963, 3.5860], + device='cuda:3'), covar=tensor([0.0440, 0.3304, 0.0406, 0.2196, 0.1006, 0.0633, 0.0411, 0.0509], + device='cuda:3'), in_proj_covar=tensor([0.0284, 0.0457, 0.0349, 0.0368, 0.0433, 0.0367, 0.0354, 0.0395], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-05 23:36:08,348 INFO [train.py:901] (3/4) Epoch 4, batch 4900, loss[loss=0.3625, simple_loss=0.4015, pruned_loss=0.1618, over 8023.00 frames. ], tot_loss[loss=0.3043, simple_loss=0.3598, pruned_loss=0.1244, over 1605444.24 frames. ], batch size: 22, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:41,956 INFO [train.py:901] (3/4) Epoch 4, batch 4950, loss[loss=0.3545, simple_loss=0.3999, pruned_loss=0.1546, over 7646.00 frames. ], tot_loss[loss=0.3074, simple_loss=0.3622, pruned_loss=0.1263, over 1607548.99 frames. ], batch size: 19, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:36:56,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29219.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:36:58,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.095e+02 3.208e+02 3.912e+02 5.596e+02 9.849e+02, threshold=7.824e+02, percent-clipped=2.0 +2023-02-05 23:36:58,867 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29223.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:00,346 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29225.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:12,971 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29244.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:16,652 INFO [train.py:901] (3/4) Epoch 4, batch 5000, loss[loss=0.2993, simple_loss=0.3666, pruned_loss=0.116, over 8539.00 frames. ], tot_loss[loss=0.3059, simple_loss=0.3612, pruned_loss=0.1253, over 1609444.07 frames. ], batch size: 31, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:37:16,877 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29250.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:19,516 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29254.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:37:51,636 INFO [train.py:901] (3/4) Epoch 4, batch 5050, loss[loss=0.3298, simple_loss=0.3862, pruned_loss=0.1367, over 8742.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3619, pruned_loss=0.1258, over 1610130.59 frames. ], batch size: 30, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:07,700 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 3.436e+02 4.072e+02 5.001e+02 1.022e+03, threshold=8.144e+02, percent-clipped=3.0 +2023-02-05 23:38:14,945 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-05 23:38:18,451 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29338.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:20,957 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.56 vs. limit=5.0 +2023-02-05 23:38:26,650 INFO [train.py:901] (3/4) Epoch 4, batch 5100, loss[loss=0.3033, simple_loss=0.3684, pruned_loss=0.1192, over 8454.00 frames. ], tot_loss[loss=0.3072, simple_loss=0.362, pruned_loss=0.1262, over 1608301.42 frames. ], batch size: 27, lr: 1.79e-02, grad_scale: 8.0 +2023-02-05 23:38:36,226 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29364.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:38:51,190 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6310, 2.3799, 4.7886, 1.1391, 2.9906, 2.2842, 1.6674, 2.4108], + device='cuda:3'), covar=tensor([0.1372, 0.1539, 0.0458, 0.2871, 0.1324, 0.1908, 0.1272, 0.2361], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0434, 0.0516, 0.0520, 0.0561, 0.0497, 0.0440, 0.0575], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:39:00,479 INFO [train.py:901] (3/4) Epoch 4, batch 5150, loss[loss=0.3389, simple_loss=0.3917, pruned_loss=0.1431, over 8659.00 frames. ], tot_loss[loss=0.3067, simple_loss=0.3619, pruned_loss=0.1257, over 1611154.77 frames. ], batch size: 49, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:16,241 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.145e+02 3.888e+02 4.871e+02 1.199e+03, threshold=7.777e+02, percent-clipped=1.0 +2023-02-05 23:39:35,367 INFO [train.py:901] (3/4) Epoch 4, batch 5200, loss[loss=0.3042, simple_loss=0.3654, pruned_loss=0.1215, over 8508.00 frames. ], tot_loss[loss=0.3079, simple_loss=0.3629, pruned_loss=0.1264, over 1609592.47 frames. ], batch size: 26, lr: 1.78e-02, grad_scale: 8.0 +2023-02-05 23:39:37,637 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0423, 2.4737, 2.8724, 1.0271, 2.7296, 1.8254, 1.4894, 1.6565], + device='cuda:3'), covar=tensor([0.0266, 0.0161, 0.0096, 0.0228, 0.0178, 0.0321, 0.0290, 0.0153], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0201, 0.0160, 0.0237, 0.0191, 0.0325, 0.0262, 0.0221], + device='cuda:3'), out_proj_covar=tensor([1.1113e-04, 8.0421e-05, 6.0837e-05, 9.1653e-05, 7.6934e-05, 1.3990e-04, + 1.0604e-04, 8.6328e-05], device='cuda:3') +2023-02-05 23:39:45,745 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-05 23:39:54,959 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29479.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:40:09,488 INFO [train.py:901] (3/4) Epoch 4, batch 5250, loss[loss=0.2867, simple_loss=0.3391, pruned_loss=0.1172, over 7797.00 frames. ], tot_loss[loss=0.3074, simple_loss=0.362, pruned_loss=0.1264, over 1606190.57 frames. ], batch size: 19, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:12,206 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-05 23:40:25,988 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.346e+02 3.507e+02 4.371e+02 5.555e+02 1.318e+03, threshold=8.742e+02, percent-clipped=11.0 +2023-02-05 23:40:43,419 INFO [train.py:901] (3/4) Epoch 4, batch 5300, loss[loss=0.3814, simple_loss=0.4242, pruned_loss=0.1693, over 8361.00 frames. ], tot_loss[loss=0.3062, simple_loss=0.3614, pruned_loss=0.1255, over 1610314.98 frames. ], batch size: 24, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:40:45,984 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-05 23:40:57,663 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29569.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:14,745 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29594.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:17,292 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29598.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:18,613 INFO [train.py:901] (3/4) Epoch 4, batch 5350, loss[loss=0.3874, simple_loss=0.4009, pruned_loss=0.1869, over 6523.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3601, pruned_loss=0.124, over 1609437.86 frames. ], batch size: 71, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:41:32,366 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29619.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:41:35,509 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.071e+02 3.127e+02 4.006e+02 4.952e+02 2.682e+03, threshold=8.012e+02, percent-clipped=7.0 +2023-02-05 23:41:53,618 INFO [train.py:901] (3/4) Epoch 4, batch 5400, loss[loss=0.3584, simple_loss=0.3887, pruned_loss=0.164, over 6859.00 frames. ], tot_loss[loss=0.3061, simple_loss=0.3619, pruned_loss=0.1251, over 1611946.68 frames. ], batch size: 72, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:14,536 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29680.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:28,598 INFO [train.py:901] (3/4) Epoch 4, batch 5450, loss[loss=0.287, simple_loss=0.3589, pruned_loss=0.1075, over 8286.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3614, pruned_loss=0.1246, over 1610687.87 frames. ], batch size: 23, lr: 1.78e-02, grad_scale: 4.0 +2023-02-05 23:42:37,306 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=29713.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:41,415 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.08 vs. limit=2.0 +2023-02-05 23:42:44,939 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.180e+02 3.089e+02 4.007e+02 5.016e+02 9.074e+02, threshold=8.014e+02, percent-clipped=4.0 +2023-02-05 23:42:52,700 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29735.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:42:57,989 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-05 23:43:02,818 INFO [train.py:901] (3/4) Epoch 4, batch 5500, loss[loss=0.2746, simple_loss=0.3461, pruned_loss=0.1015, over 8023.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3621, pruned_loss=0.1241, over 1612840.77 frames. ], batch size: 22, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:10,326 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29760.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:38,374 INFO [train.py:901] (3/4) Epoch 4, batch 5550, loss[loss=0.4029, simple_loss=0.4383, pruned_loss=0.1838, over 8661.00 frames. ], tot_loss[loss=0.3058, simple_loss=0.3628, pruned_loss=0.1244, over 1618262.58 frames. ], batch size: 39, lr: 1.77e-02, grad_scale: 4.0 +2023-02-05 23:43:51,782 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=29820.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:43:53,967 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-05 23:43:54,237 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 3.211e+02 3.931e+02 4.808e+02 9.688e+02, threshold=7.861e+02, percent-clipped=2.0 +2023-02-05 23:44:12,161 INFO [train.py:901] (3/4) Epoch 4, batch 5600, loss[loss=0.392, simple_loss=0.4207, pruned_loss=0.1817, over 8646.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3607, pruned_loss=0.1235, over 1615031.22 frames. ], batch size: 34, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:17,401 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-05 23:44:46,061 INFO [train.py:901] (3/4) Epoch 4, batch 5650, loss[loss=0.3773, simple_loss=0.3817, pruned_loss=0.1864, over 7801.00 frames. ], tot_loss[loss=0.3068, simple_loss=0.3625, pruned_loss=0.1255, over 1613962.77 frames. ], batch size: 19, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:44:55,376 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=29913.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:03,295 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.126e+02 3.236e+02 4.025e+02 5.119e+02 8.732e+02, threshold=8.050e+02, percent-clipped=2.0 +2023-02-05 23:45:03,329 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-05 23:45:20,778 INFO [train.py:901] (3/4) Epoch 4, batch 5700, loss[loss=0.3125, simple_loss=0.3742, pruned_loss=0.1254, over 8463.00 frames. ], tot_loss[loss=0.3076, simple_loss=0.3635, pruned_loss=0.1259, over 1617225.07 frames. ], batch size: 49, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:45:34,487 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=29969.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:50,023 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5189, 2.3163, 1.9057, 1.8723, 1.7989, 2.0303, 2.5240, 2.0702], + device='cuda:3'), covar=tensor([0.0562, 0.1146, 0.1761, 0.1351, 0.0666, 0.1418, 0.0722, 0.0600], + device='cuda:3'), in_proj_covar=tensor([0.0153, 0.0193, 0.0229, 0.0194, 0.0151, 0.0198, 0.0159, 0.0161], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:45:52,088 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=29994.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:45:55,955 INFO [train.py:901] (3/4) Epoch 4, batch 5750, loss[loss=0.3177, simple_loss=0.374, pruned_loss=0.1307, over 8357.00 frames. ], tot_loss[loss=0.3052, simple_loss=0.3614, pruned_loss=0.1245, over 1612424.29 frames. ], batch size: 24, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:46:00,415 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0286, 1.5823, 3.2102, 1.3654, 2.1980, 3.7198, 3.4747, 3.1112], + device='cuda:3'), covar=tensor([0.1082, 0.1532, 0.0420, 0.2125, 0.0847, 0.0245, 0.0376, 0.0646], + device='cuda:3'), in_proj_covar=tensor([0.0227, 0.0266, 0.0215, 0.0266, 0.0220, 0.0193, 0.0200, 0.0264], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:46:07,151 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-05 23:46:13,262 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.352e+02 3.278e+02 4.024e+02 4.787e+02 1.009e+03, threshold=8.047e+02, percent-clipped=4.0 +2023-02-05 23:46:13,360 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30024.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:17,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30028.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:46:32,295 INFO [train.py:901] (3/4) Epoch 4, batch 5800, loss[loss=0.2858, simple_loss=0.3489, pruned_loss=0.1114, over 8109.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3603, pruned_loss=0.1233, over 1613005.15 frames. ], batch size: 23, lr: 1.77e-02, grad_scale: 8.0 +2023-02-05 23:47:06,538 INFO [train.py:901] (3/4) Epoch 4, batch 5850, loss[loss=0.2589, simple_loss=0.3344, pruned_loss=0.09166, over 8025.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3606, pruned_loss=0.1235, over 1610144.79 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:23,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 3.427e+02 4.657e+02 5.932e+02 9.223e+02, threshold=9.314e+02, percent-clipped=4.0 +2023-02-05 23:47:33,289 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30139.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:35,878 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30143.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:47:36,576 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2364, 1.7151, 1.6444, 0.5433, 1.6907, 1.2643, 0.1986, 1.5235], + device='cuda:3'), covar=tensor([0.0152, 0.0089, 0.0091, 0.0140, 0.0095, 0.0260, 0.0231, 0.0070], + device='cuda:3'), in_proj_covar=tensor([0.0286, 0.0199, 0.0168, 0.0243, 0.0194, 0.0326, 0.0266, 0.0229], + device='cuda:3'), out_proj_covar=tensor([1.1371e-04, 7.8258e-05, 6.3216e-05, 9.2664e-05, 7.6986e-05, 1.3782e-04, + 1.0688e-04, 8.9229e-05], device='cuda:3') +2023-02-05 23:47:41,684 INFO [train.py:901] (3/4) Epoch 4, batch 5900, loss[loss=0.291, simple_loss=0.3573, pruned_loss=0.1123, over 8367.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3603, pruned_loss=0.1237, over 1610570.56 frames. ], batch size: 24, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:47:43,475 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.83 vs. limit=5.0 +2023-02-05 23:47:51,359 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30164.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:15,970 INFO [train.py:901] (3/4) Epoch 4, batch 5950, loss[loss=0.3297, simple_loss=0.3941, pruned_loss=0.1327, over 8107.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3598, pruned_loss=0.1234, over 1612391.34 frames. ], batch size: 23, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:32,438 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.869e+02 3.143e+02 3.968e+02 4.977e+02 1.070e+03, threshold=7.937e+02, percent-clipped=1.0 +2023-02-05 23:48:35,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30227.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:48:50,185 INFO [train.py:901] (3/4) Epoch 4, batch 6000, loss[loss=0.2978, simple_loss=0.3593, pruned_loss=0.1181, over 8243.00 frames. ], tot_loss[loss=0.3032, simple_loss=0.3598, pruned_loss=0.1233, over 1617176.23 frames. ], batch size: 22, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:48:50,185 INFO [train.py:926] (3/4) Computing validation loss +2023-02-05 23:49:02,857 INFO [train.py:935] (3/4) Epoch 4, validation: loss=0.2338, simple_loss=0.3275, pruned_loss=0.07005, over 944034.00 frames. +2023-02-05 23:49:02,858 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-05 23:49:22,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30279.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:26,599 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30284.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:37,708 INFO [train.py:901] (3/4) Epoch 4, batch 6050, loss[loss=0.3202, simple_loss=0.3689, pruned_loss=0.1357, over 8461.00 frames. ], tot_loss[loss=0.3026, simple_loss=0.3592, pruned_loss=0.123, over 1613205.13 frames. ], batch size: 29, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:49:44,063 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30309.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:49:53,935 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 3.338e+02 3.992e+02 4.649e+02 1.183e+03, threshold=7.984e+02, percent-clipped=3.0 +2023-02-05 23:50:12,466 INFO [train.py:901] (3/4) Epoch 4, batch 6100, loss[loss=0.3141, simple_loss=0.3801, pruned_loss=0.124, over 8252.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3581, pruned_loss=0.1218, over 1615781.08 frames. ], batch size: 24, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:32,439 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30378.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:37,425 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3282, 1.6184, 2.2982, 1.0406, 1.6365, 1.5790, 1.3505, 1.3384], + device='cuda:3'), covar=tensor([0.1448, 0.1503, 0.0684, 0.2865, 0.1247, 0.2148, 0.1381, 0.1772], + device='cuda:3'), in_proj_covar=tensor([0.0459, 0.0431, 0.0509, 0.0525, 0.0560, 0.0499, 0.0441, 0.0571], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:50:39,272 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-05 23:50:44,140 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30395.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:50:47,302 INFO [train.py:901] (3/4) Epoch 4, batch 6150, loss[loss=0.3598, simple_loss=0.4059, pruned_loss=0.1568, over 7967.00 frames. ], tot_loss[loss=0.3015, simple_loss=0.3584, pruned_loss=0.1222, over 1615897.17 frames. ], batch size: 21, lr: 1.76e-02, grad_scale: 8.0 +2023-02-05 23:50:47,648 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1988, 1.7190, 1.2616, 1.6874, 1.2944, 1.0627, 1.3212, 1.4274], + device='cuda:3'), covar=tensor([0.0819, 0.0370, 0.0952, 0.0465, 0.0622, 0.1126, 0.0717, 0.0639], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0234, 0.0305, 0.0302, 0.0329, 0.0312, 0.0335, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:51:02,480 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30420.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:05,074 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.511e+02 4.267e+02 5.249e+02 1.089e+03, threshold=8.535e+02, percent-clipped=6.0 +2023-02-05 23:51:23,139 INFO [train.py:901] (3/4) Epoch 4, batch 6200, loss[loss=0.2523, simple_loss=0.2979, pruned_loss=0.1033, over 7442.00 frames. ], tot_loss[loss=0.3021, simple_loss=0.3587, pruned_loss=0.1227, over 1613474.19 frames. ], batch size: 17, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:51:36,654 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2985, 1.9790, 1.9296, 2.0745, 2.1256, 2.0907, 2.6598, 2.1036], + device='cuda:3'), covar=tensor([0.0521, 0.1155, 0.1641, 0.1192, 0.0535, 0.1353, 0.0601, 0.0542], + device='cuda:3'), in_proj_covar=tensor([0.0150, 0.0194, 0.0230, 0.0196, 0.0149, 0.0197, 0.0159, 0.0161], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0007, 0.0007, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:51:48,271 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30487.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:51:57,470 INFO [train.py:901] (3/4) Epoch 4, batch 6250, loss[loss=0.3271, simple_loss=0.393, pruned_loss=0.1306, over 8320.00 frames. ], tot_loss[loss=0.302, simple_loss=0.3583, pruned_loss=0.1229, over 1613151.98 frames. ], batch size: 25, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:04,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6466, 5.6346, 4.8819, 2.2053, 4.8850, 5.2435, 5.2368, 4.7391], + device='cuda:3'), covar=tensor([0.0555, 0.0359, 0.0687, 0.4160, 0.0590, 0.0499, 0.0877, 0.0435], + device='cuda:3'), in_proj_covar=tensor([0.0367, 0.0267, 0.0299, 0.0375, 0.0295, 0.0246, 0.0279, 0.0222], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 23:52:14,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 3.291e+02 3.933e+02 5.014e+02 1.132e+03, threshold=7.866e+02, percent-clipped=4.0 +2023-02-05 23:52:22,914 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30535.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:32,704 INFO [train.py:901] (3/4) Epoch 4, batch 6300, loss[loss=0.2284, simple_loss=0.2853, pruned_loss=0.08572, over 7710.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3584, pruned_loss=0.1227, over 1612374.92 frames. ], batch size: 18, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:52:39,498 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30560.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:52:44,917 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.76 vs. limit=2.0 +2023-02-05 23:52:47,317 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30571.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:06,768 INFO [train.py:901] (3/4) Epoch 4, batch 6350, loss[loss=0.3048, simple_loss=0.3796, pruned_loss=0.115, over 8466.00 frames. ], tot_loss[loss=0.3036, simple_loss=0.3597, pruned_loss=0.1237, over 1613132.14 frames. ], batch size: 29, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:08,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30602.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:53:23,782 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 3.165e+02 3.849e+02 5.077e+02 1.430e+03, threshold=7.697e+02, percent-clipped=4.0 +2023-02-05 23:53:42,470 INFO [train.py:901] (3/4) Epoch 4, batch 6400, loss[loss=0.2799, simple_loss=0.3468, pruned_loss=0.1065, over 8034.00 frames. ], tot_loss[loss=0.3024, simple_loss=0.3589, pruned_loss=0.1229, over 1610032.71 frames. ], batch size: 22, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:53:58,109 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30673.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:07,701 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30686.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:13,776 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2506, 1.8218, 2.9970, 2.3846, 2.4780, 1.8387, 1.3665, 1.2351], + device='cuda:3'), covar=tensor([0.1444, 0.1709, 0.0337, 0.0771, 0.0716, 0.0853, 0.0863, 0.1506], + device='cuda:3'), in_proj_covar=tensor([0.0703, 0.0634, 0.0539, 0.0612, 0.0718, 0.0592, 0.0576, 0.0583], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 23:54:16,870 INFO [train.py:901] (3/4) Epoch 4, batch 6450, loss[loss=0.2583, simple_loss=0.3175, pruned_loss=0.09953, over 7793.00 frames. ], tot_loss[loss=0.3009, simple_loss=0.3577, pruned_loss=0.122, over 1609193.41 frames. ], batch size: 19, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:54:31,544 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=30722.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:54:32,820 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.022e+02 3.987e+02 5.645e+02 1.412e+03, threshold=7.975e+02, percent-clipped=10.0 +2023-02-05 23:54:42,853 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-05 23:54:50,947 INFO [train.py:901] (3/4) Epoch 4, batch 6500, loss[loss=0.315, simple_loss=0.3661, pruned_loss=0.132, over 7649.00 frames. ], tot_loss[loss=0.3013, simple_loss=0.358, pruned_loss=0.1223, over 1609657.31 frames. ], batch size: 19, lr: 1.75e-02, grad_scale: 8.0 +2023-02-05 23:55:26,183 INFO [train.py:901] (3/4) Epoch 4, batch 6550, loss[loss=0.2509, simple_loss=0.3157, pruned_loss=0.09303, over 7710.00 frames. ], tot_loss[loss=0.3028, simple_loss=0.3595, pruned_loss=0.1231, over 1611568.58 frames. ], batch size: 18, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:55:42,629 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.464e+02 3.539e+02 4.251e+02 5.114e+02 1.135e+03, threshold=8.501e+02, percent-clipped=1.0 +2023-02-05 23:55:50,027 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-05 23:55:51,484 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=30837.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:00,628 INFO [train.py:901] (3/4) Epoch 4, batch 6600, loss[loss=0.2792, simple_loss=0.3277, pruned_loss=0.1154, over 7653.00 frames. ], tot_loss[loss=0.302, simple_loss=0.3591, pruned_loss=0.1225, over 1615358.13 frames. ], batch size: 19, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:06,240 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30858.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:08,692 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-05 23:56:24,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30883.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:35,334 INFO [train.py:901] (3/4) Epoch 4, batch 6650, loss[loss=0.2913, simple_loss=0.3579, pruned_loss=0.1123, over 8106.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3604, pruned_loss=0.1233, over 1611540.10 frames. ], batch size: 23, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:56:50,088 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30921.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:56:51,879 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 3.362e+02 4.352e+02 5.461e+02 1.446e+03, threshold=8.703e+02, percent-clipped=3.0 +2023-02-05 23:57:04,055 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=30942.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:04,679 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30943.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:08,262 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.27 vs. limit=5.0 +2023-02-05 23:57:09,173 INFO [train.py:901] (3/4) Epoch 4, batch 6700, loss[loss=0.3203, simple_loss=0.3752, pruned_loss=0.1327, over 8475.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.3605, pruned_loss=0.1235, over 1614579.22 frames. ], batch size: 25, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:10,717 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=30952.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:12,267 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-05 23:57:21,648 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=30967.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:57:41,914 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6848, 2.8838, 1.9430, 2.1478, 2.2348, 1.4546, 2.0308, 2.2620], + device='cuda:3'), covar=tensor([0.1091, 0.0274, 0.0772, 0.0580, 0.0555, 0.1118, 0.0835, 0.0664], + device='cuda:3'), in_proj_covar=tensor([0.0338, 0.0237, 0.0302, 0.0300, 0.0316, 0.0306, 0.0330, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-05 23:57:45,060 INFO [train.py:901] (3/4) Epoch 4, batch 6750, loss[loss=0.3239, simple_loss=0.3672, pruned_loss=0.1403, over 7807.00 frames. ], tot_loss[loss=0.304, simple_loss=0.3604, pruned_loss=0.1238, over 1612371.68 frames. ], batch size: 20, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:57:56,390 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31017.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:00,825 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.225e+02 3.317e+02 4.136e+02 5.252e+02 1.678e+03, threshold=8.272e+02, percent-clipped=4.0 +2023-02-05 23:58:04,868 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31029.0, num_to_drop=1, layers_to_drop={0} +2023-02-05 23:58:14,342 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3177, 1.5717, 1.3541, 1.9154, 0.8258, 1.1864, 1.2941, 1.5260], + device='cuda:3'), covar=tensor([0.1321, 0.1318, 0.1580, 0.0699, 0.1846, 0.2301, 0.1457, 0.1230], + device='cuda:3'), in_proj_covar=tensor([0.0275, 0.0286, 0.0298, 0.0222, 0.0272, 0.0295, 0.0308, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:58:18,933 INFO [train.py:901] (3/4) Epoch 4, batch 6800, loss[loss=0.3296, simple_loss=0.3947, pruned_loss=0.1323, over 8256.00 frames. ], tot_loss[loss=0.3054, simple_loss=0.3619, pruned_loss=0.1245, over 1614717.47 frames. ], batch size: 24, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:58:19,594 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-05 23:58:48,703 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31093.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:58:53,202 INFO [train.py:901] (3/4) Epoch 4, batch 6850, loss[loss=0.3075, simple_loss=0.3638, pruned_loss=0.1256, over 8204.00 frames. ], tot_loss[loss=0.3048, simple_loss=0.3611, pruned_loss=0.1243, over 1610012.64 frames. ], batch size: 23, lr: 1.74e-02, grad_scale: 8.0 +2023-02-05 23:59:00,788 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1635, 1.5622, 4.3157, 1.8064, 2.3004, 4.7807, 4.4790, 4.1562], + device='cuda:3'), covar=tensor([0.1195, 0.1597, 0.0316, 0.1970, 0.0858, 0.0232, 0.0376, 0.0529], + device='cuda:3'), in_proj_covar=tensor([0.0227, 0.0255, 0.0211, 0.0257, 0.0209, 0.0192, 0.0203, 0.0259], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-05 23:59:02,901 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1958, 1.3632, 2.3494, 1.0081, 1.9802, 1.4099, 1.2015, 1.7470], + device='cuda:3'), covar=tensor([0.1401, 0.1625, 0.0501, 0.2592, 0.0873, 0.1895, 0.1394, 0.1319], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0428, 0.0508, 0.0516, 0.0558, 0.0501, 0.0443, 0.0567], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:59:06,907 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31118.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:09,997 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-05 23:59:10,560 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.178e+02 3.797e+02 5.313e+02 1.260e+03, threshold=7.594e+02, percent-clipped=4.0 +2023-02-05 23:59:13,367 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7219, 3.6616, 3.3306, 1.6829, 3.2968, 3.1761, 3.4009, 2.9295], + device='cuda:3'), covar=tensor([0.1066, 0.0747, 0.0964, 0.4175, 0.0857, 0.0977, 0.1443, 0.0882], + device='cuda:3'), in_proj_covar=tensor([0.0381, 0.0273, 0.0293, 0.0387, 0.0296, 0.0253, 0.0289, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-05 23:59:16,202 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31132.0, num_to_drop=0, layers_to_drop=set() +2023-02-05 23:59:16,890 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5968, 3.0853, 2.4874, 4.0918, 1.8034, 1.7453, 2.2539, 3.2726], + device='cuda:3'), covar=tensor([0.1208, 0.1335, 0.1663, 0.0337, 0.2129, 0.2620, 0.2180, 0.1167], + device='cuda:3'), in_proj_covar=tensor([0.0275, 0.0285, 0.0300, 0.0224, 0.0272, 0.0296, 0.0309, 0.0277], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-05 23:59:23,600 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7232, 2.3329, 4.3295, 1.2747, 2.9965, 2.2951, 1.7508, 2.4267], + device='cuda:3'), covar=tensor([0.1203, 0.1589, 0.0506, 0.2611, 0.1195, 0.1835, 0.1194, 0.1923], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0432, 0.0510, 0.0519, 0.0559, 0.0497, 0.0445, 0.0567], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-05 23:59:28,757 INFO [train.py:901] (3/4) Epoch 4, batch 6900, loss[loss=0.31, simple_loss=0.3707, pruned_loss=0.1247, over 8573.00 frames. ], tot_loss[loss=0.3047, simple_loss=0.3613, pruned_loss=0.124, over 1614501.64 frames. ], batch size: 34, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:03,327 INFO [train.py:901] (3/4) Epoch 4, batch 6950, loss[loss=0.3318, simple_loss=0.3903, pruned_loss=0.1366, over 8297.00 frames. ], tot_loss[loss=0.3037, simple_loss=0.3603, pruned_loss=0.1236, over 1612394.78 frames. ], batch size: 23, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:18,144 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 00:00:20,076 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 3.425e+02 4.122e+02 5.302e+02 9.579e+02, threshold=8.244e+02, percent-clipped=6.0 +2023-02-06 00:00:25,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2839, 1.5656, 1.2291, 1.8890, 0.9368, 1.1082, 1.3925, 1.5584], + device='cuda:3'), covar=tensor([0.1489, 0.1481, 0.1824, 0.0821, 0.1873, 0.2629, 0.1365, 0.1179], + device='cuda:3'), in_proj_covar=tensor([0.0270, 0.0281, 0.0295, 0.0218, 0.0264, 0.0295, 0.0299, 0.0268], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 00:00:38,134 INFO [train.py:901] (3/4) Epoch 4, batch 7000, loss[loss=0.2452, simple_loss=0.308, pruned_loss=0.09124, over 7425.00 frames. ], tot_loss[loss=0.3027, simple_loss=0.3593, pruned_loss=0.1231, over 1609918.69 frames. ], batch size: 17, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:00:48,784 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:01,632 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 00:01:03,285 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:05,299 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3639, 4.4414, 3.9420, 1.9480, 3.9226, 4.0834, 4.0061, 3.5009], + device='cuda:3'), covar=tensor([0.0886, 0.0502, 0.0924, 0.4285, 0.0590, 0.0607, 0.1247, 0.0608], + device='cuda:3'), in_proj_covar=tensor([0.0378, 0.0269, 0.0296, 0.0391, 0.0295, 0.0250, 0.0288, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-06 00:01:09,191 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:01:11,657 INFO [train.py:901] (3/4) Epoch 4, batch 7050, loss[loss=0.3403, simple_loss=0.4006, pruned_loss=0.14, over 8623.00 frames. ], tot_loss[loss=0.3031, simple_loss=0.3594, pruned_loss=0.1235, over 1606961.62 frames. ], batch size: 31, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:01:21,134 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8167, 1.1529, 3.9182, 1.6038, 2.9106, 3.0555, 3.5272, 3.5438], + device='cuda:3'), covar=tensor([0.0662, 0.5148, 0.0927, 0.2926, 0.2498, 0.1412, 0.0816, 0.0838], + device='cuda:3'), in_proj_covar=tensor([0.0306, 0.0471, 0.0377, 0.0397, 0.0463, 0.0386, 0.0382, 0.0424], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 00:01:28,375 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 3.221e+02 3.873e+02 5.326e+02 1.178e+03, threshold=7.746e+02, percent-clipped=8.0 +2023-02-06 00:01:47,448 INFO [train.py:901] (3/4) Epoch 4, batch 7100, loss[loss=0.2898, simple_loss=0.3439, pruned_loss=0.1178, over 7657.00 frames. ], tot_loss[loss=0.3026, simple_loss=0.3593, pruned_loss=0.1229, over 1606203.40 frames. ], batch size: 19, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:02:00,269 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 00:02:02,610 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31373.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:02:08,043 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:13,379 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:14,690 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8383, 2.5010, 3.1567, 0.8464, 3.0945, 2.2058, 1.2432, 1.7843], + device='cuda:3'), covar=tensor([0.0241, 0.0086, 0.0084, 0.0224, 0.0113, 0.0221, 0.0334, 0.0142], + device='cuda:3'), in_proj_covar=tensor([0.0282, 0.0203, 0.0164, 0.0244, 0.0196, 0.0332, 0.0271, 0.0232], + device='cuda:3'), out_proj_covar=tensor([1.0952e-04, 7.8012e-05, 6.1065e-05, 9.1123e-05, 7.6527e-05, 1.3799e-04, + 1.0631e-04, 8.8663e-05], device='cuda:3') +2023-02-06 00:02:21,054 INFO [train.py:901] (3/4) Epoch 4, batch 7150, loss[loss=0.2609, simple_loss=0.328, pruned_loss=0.09694, over 7976.00 frames. ], tot_loss[loss=0.3035, simple_loss=0.3605, pruned_loss=0.1233, over 1608852.37 frames. ], batch size: 21, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:02:22,730 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:28,577 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:29,926 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:37,019 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 3.187e+02 3.955e+02 5.000e+02 8.847e+02, threshold=7.910e+02, percent-clipped=2.0 +2023-02-06 00:02:39,208 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:02:56,011 INFO [train.py:901] (3/4) Epoch 4, batch 7200, loss[loss=0.3339, simple_loss=0.3693, pruned_loss=0.1492, over 8406.00 frames. ], tot_loss[loss=0.3018, simple_loss=0.3588, pruned_loss=0.1224, over 1604867.02 frames. ], batch size: 49, lr: 1.73e-02, grad_scale: 8.0 +2023-02-06 00:03:22,243 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31488.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:03:30,969 INFO [train.py:901] (3/4) Epoch 4, batch 7250, loss[loss=0.2942, simple_loss=0.3642, pruned_loss=0.1121, over 8466.00 frames. ], tot_loss[loss=0.3008, simple_loss=0.358, pruned_loss=0.1218, over 1607261.32 frames. ], batch size: 25, lr: 1.73e-02, grad_scale: 16.0 +2023-02-06 00:03:37,363 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31509.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:03:47,339 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.150e+02 3.858e+02 4.938e+02 9.845e+02, threshold=7.715e+02, percent-clipped=4.0 +2023-02-06 00:04:05,058 INFO [train.py:901] (3/4) Epoch 4, batch 7300, loss[loss=0.3163, simple_loss=0.3768, pruned_loss=0.1279, over 8335.00 frames. ], tot_loss[loss=0.3005, simple_loss=0.3578, pruned_loss=0.1216, over 1606929.34 frames. ], batch size: 25, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:40,426 INFO [train.py:901] (3/4) Epoch 4, batch 7350, loss[loss=0.2458, simple_loss=0.3119, pruned_loss=0.08989, over 8243.00 frames. ], tot_loss[loss=0.3001, simple_loss=0.3575, pruned_loss=0.1213, over 1610833.16 frames. ], batch size: 22, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:04:57,285 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.182e+02 2.774e+02 3.613e+02 4.483e+02 1.102e+03, threshold=7.227e+02, percent-clipped=2.0 +2023-02-06 00:04:59,980 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 00:05:00,991 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 00:05:05,421 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:14,569 INFO [train.py:901] (3/4) Epoch 4, batch 7400, loss[loss=0.275, simple_loss=0.3369, pruned_loss=0.1066, over 5983.00 frames. ], tot_loss[loss=0.3011, simple_loss=0.359, pruned_loss=0.1216, over 1614778.03 frames. ], batch size: 13, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:19,274 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 00:05:20,113 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:21,990 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:26,693 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:37,009 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:43,513 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:05:48,578 INFO [train.py:901] (3/4) Epoch 4, batch 7450, loss[loss=0.2709, simple_loss=0.341, pruned_loss=0.1004, over 8360.00 frames. ], tot_loss[loss=0.3039, simple_loss=0.3613, pruned_loss=0.1232, over 1619363.46 frames. ], batch size: 24, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:05:50,973 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 00:05:58,006 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 00:06:04,224 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:05,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.933e+02 3.216e+02 3.933e+02 5.503e+02 1.387e+03, threshold=7.866e+02, percent-clipped=9.0 +2023-02-06 00:06:19,848 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=31744.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:06:23,714 INFO [train.py:901] (3/4) Epoch 4, batch 7500, loss[loss=0.3812, simple_loss=0.411, pruned_loss=0.1757, over 8491.00 frames. ], tot_loss[loss=0.3038, simple_loss=0.3612, pruned_loss=0.1232, over 1620205.92 frames. ], batch size: 28, lr: 1.72e-02, grad_scale: 16.0 +2023-02-06 00:06:36,763 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=31769.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:06:37,958 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:06:56,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6374, 2.9063, 2.0032, 2.1708, 2.2981, 1.6045, 2.1469, 2.3114], + device='cuda:3'), covar=tensor([0.1165, 0.0304, 0.0806, 0.0547, 0.0545, 0.1037, 0.0770, 0.0704], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0235, 0.0309, 0.0300, 0.0321, 0.0310, 0.0333, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:06:58,136 INFO [train.py:901] (3/4) Epoch 4, batch 7550, loss[loss=0.3991, simple_loss=0.423, pruned_loss=0.1876, over 8510.00 frames. ], tot_loss[loss=0.3042, simple_loss=0.361, pruned_loss=0.1237, over 1618453.88 frames. ], batch size: 49, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:02,979 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:16,191 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.990e+02 2.842e+02 3.963e+02 5.244e+02 1.193e+03, threshold=7.926e+02, percent-clipped=8.0 +2023-02-06 00:07:21,433 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-06 00:07:27,230 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:07:34,061 INFO [train.py:901] (3/4) Epoch 4, batch 7600, loss[loss=0.3238, simple_loss=0.3748, pruned_loss=0.1365, over 8614.00 frames. ], tot_loss[loss=0.3033, simple_loss=0.3603, pruned_loss=0.1232, over 1621450.85 frames. ], batch size: 49, lr: 1.72e-02, grad_scale: 8.0 +2023-02-06 00:07:36,150 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=31853.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:07:58,613 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:05,432 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:06,813 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9636, 3.2593, 2.5725, 4.2564, 1.7723, 2.0528, 2.0867, 3.5067], + device='cuda:3'), covar=tensor([0.0939, 0.1328, 0.1655, 0.0313, 0.1982, 0.2254, 0.2112, 0.1154], + device='cuda:3'), in_proj_covar=tensor([0.0274, 0.0276, 0.0299, 0.0225, 0.0263, 0.0287, 0.0298, 0.0270], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 00:08:07,996 INFO [train.py:901] (3/4) Epoch 4, batch 7650, loss[loss=0.3421, simple_loss=0.3889, pruned_loss=0.1476, over 8767.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3587, pruned_loss=0.1223, over 1617572.21 frames. ], batch size: 30, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:25,718 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6038, 2.5480, 2.9592, 2.2239, 1.4070, 2.7092, 0.5307, 1.8377], + device='cuda:3'), covar=tensor([0.2435, 0.2731, 0.1216, 0.2146, 0.5933, 0.0606, 0.7771, 0.1966], + device='cuda:3'), in_proj_covar=tensor([0.0124, 0.0114, 0.0082, 0.0164, 0.0204, 0.0083, 0.0151, 0.0122], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:08:26,156 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.122e+02 3.181e+02 3.860e+02 4.828e+02 9.649e+02, threshold=7.720e+02, percent-clipped=2.0 +2023-02-06 00:08:31,519 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:31,633 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0421, 1.7266, 2.7436, 2.2151, 2.2720, 1.8609, 1.3943, 0.9220], + device='cuda:3'), covar=tensor([0.1705, 0.1690, 0.0390, 0.0820, 0.0813, 0.0873, 0.0974, 0.1823], + device='cuda:3'), in_proj_covar=tensor([0.0718, 0.0638, 0.0534, 0.0612, 0.0732, 0.0593, 0.0578, 0.0594], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:08:43,631 INFO [train.py:901] (3/4) Epoch 4, batch 7700, loss[loss=0.2843, simple_loss=0.3584, pruned_loss=0.1051, over 8470.00 frames. ], tot_loss[loss=0.3006, simple_loss=0.3584, pruned_loss=0.1214, over 1616340.90 frames. ], batch size: 27, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:08:55,989 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=31968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:08:56,026 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=31968.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:09:06,796 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 00:09:17,470 INFO [train.py:901] (3/4) Epoch 4, batch 7750, loss[loss=0.317, simple_loss=0.3627, pruned_loss=0.1357, over 7689.00 frames. ], tot_loss[loss=0.301, simple_loss=0.3587, pruned_loss=0.1217, over 1617235.04 frames. ], batch size: 18, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:09:35,965 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.165e+02 3.163e+02 3.927e+02 5.355e+02 1.239e+03, threshold=7.853e+02, percent-clipped=4.0 +2023-02-06 00:09:53,605 INFO [train.py:901] (3/4) Epoch 4, batch 7800, loss[loss=0.3631, simple_loss=0.4059, pruned_loss=0.1601, over 8565.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3576, pruned_loss=0.1214, over 1613679.61 frames. ], batch size: 39, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:05,169 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:10:11,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8664, 1.5174, 2.3028, 2.0028, 2.0439, 1.7375, 1.3416, 0.7002], + device='cuda:3'), covar=tensor([0.1619, 0.1689, 0.0441, 0.0753, 0.0656, 0.0862, 0.0877, 0.1592], + device='cuda:3'), in_proj_covar=tensor([0.0710, 0.0637, 0.0537, 0.0604, 0.0717, 0.0589, 0.0572, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:10:27,388 INFO [train.py:901] (3/4) Epoch 4, batch 7850, loss[loss=0.2653, simple_loss=0.3366, pruned_loss=0.09698, over 8362.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.359, pruned_loss=0.1222, over 1614473.32 frames. ], batch size: 24, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:10:43,938 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.460e+02 3.521e+02 4.480e+02 6.179e+02 1.308e+03, threshold=8.960e+02, percent-clipped=13.0 +2023-02-06 00:10:55,619 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:00,787 INFO [train.py:901] (3/4) Epoch 4, batch 7900, loss[loss=0.2848, simple_loss=0.3365, pruned_loss=0.1165, over 7528.00 frames. ], tot_loss[loss=0.3019, simple_loss=0.3591, pruned_loss=0.1224, over 1613291.03 frames. ], batch size: 18, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:00,855 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:12,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:21,573 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:24,163 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:11:34,311 INFO [train.py:901] (3/4) Epoch 4, batch 7950, loss[loss=0.3281, simple_loss=0.3856, pruned_loss=0.1353, over 8553.00 frames. ], tot_loss[loss=0.3004, simple_loss=0.3583, pruned_loss=0.1213, over 1615743.19 frames. ], batch size: 49, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:11:51,232 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32224.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:11:51,622 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.248e+02 3.536e+02 4.226e+02 5.315e+02 1.259e+03, threshold=8.452e+02, percent-clipped=4.0 +2023-02-06 00:12:01,775 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:08,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32249.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:12:08,740 INFO [train.py:901] (3/4) Epoch 4, batch 8000, loss[loss=0.2984, simple_loss=0.3507, pruned_loss=0.123, over 7937.00 frames. ], tot_loss[loss=0.3016, simple_loss=0.3594, pruned_loss=0.1219, over 1620155.18 frames. ], batch size: 20, lr: 1.71e-02, grad_scale: 8.0 +2023-02-06 00:12:19,090 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:26,801 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:42,460 INFO [train.py:901] (3/4) Epoch 4, batch 8050, loss[loss=0.2561, simple_loss=0.3109, pruned_loss=0.1007, over 7238.00 frames. ], tot_loss[loss=0.3034, simple_loss=0.3596, pruned_loss=0.1236, over 1602038.87 frames. ], batch size: 16, lr: 1.70e-02, grad_scale: 8.0 +2023-02-06 00:12:42,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:50,708 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=32312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:12:51,458 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6578, 1.7672, 1.8898, 1.5222, 0.9740, 1.7117, 0.2435, 1.1944], + device='cuda:3'), covar=tensor([0.4053, 0.3222, 0.1563, 0.2428, 0.7927, 0.1511, 0.6101, 0.2880], + device='cuda:3'), in_proj_covar=tensor([0.0123, 0.0109, 0.0079, 0.0162, 0.0201, 0.0081, 0.0145, 0.0119], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:12:58,911 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.697e+02 3.496e+02 4.220e+02 5.135e+02 1.064e+03, threshold=8.441e+02, percent-clipped=2.0 +2023-02-06 00:13:15,885 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 00:13:19,665 INFO [train.py:901] (3/4) Epoch 5, batch 0, loss[loss=0.3183, simple_loss=0.3579, pruned_loss=0.1393, over 7934.00 frames. ], tot_loss[loss=0.3183, simple_loss=0.3579, pruned_loss=0.1393, over 7934.00 frames. ], batch size: 20, lr: 1.59e-02, grad_scale: 8.0 +2023-02-06 00:13:19,666 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 00:13:31,615 INFO [train.py:935] (3/4) Epoch 5, validation: loss=0.2309, simple_loss=0.3254, pruned_loss=0.06822, over 944034.00 frames. +2023-02-06 00:13:31,616 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 00:13:46,435 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 00:13:46,612 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:06,989 INFO [train.py:901] (3/4) Epoch 5, batch 50, loss[loss=0.3401, simple_loss=0.3817, pruned_loss=0.1492, over 8235.00 frames. ], tot_loss[loss=0.305, simple_loss=0.3628, pruned_loss=0.1236, over 369010.72 frames. ], batch size: 22, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:14,084 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:22,029 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 00:14:22,894 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 00:14:36,524 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.029e+02 3.148e+02 3.721e+02 4.839e+02 1.477e+03, threshold=7.442e+02, percent-clipped=1.0 +2023-02-06 00:14:38,056 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=32427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:41,779 INFO [train.py:901] (3/4) Epoch 5, batch 100, loss[loss=0.2827, simple_loss=0.3227, pruned_loss=0.1214, over 7242.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.3596, pruned_loss=0.1219, over 647849.36 frames. ], batch size: 16, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:14:44,572 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:14:45,046 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 00:15:02,115 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:15,799 INFO [train.py:901] (3/4) Epoch 5, batch 150, loss[loss=0.3056, simple_loss=0.363, pruned_loss=0.1241, over 7791.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.3575, pruned_loss=0.1209, over 862620.50 frames. ], batch size: 20, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:43,039 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:15:45,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.082e+02 3.007e+02 3.818e+02 4.644e+02 8.323e+02, threshold=7.636e+02, percent-clipped=1.0 +2023-02-06 00:15:50,801 INFO [train.py:901] (3/4) Epoch 5, batch 200, loss[loss=0.2857, simple_loss=0.3529, pruned_loss=0.1093, over 8318.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.3564, pruned_loss=0.1193, over 1024260.86 frames. ], batch size: 25, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:15:59,814 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:06,406 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:23,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:24,867 INFO [train.py:901] (3/4) Epoch 5, batch 250, loss[loss=0.258, simple_loss=0.3162, pruned_loss=0.0999, over 7661.00 frames. ], tot_loss[loss=0.2992, simple_loss=0.3581, pruned_loss=0.1201, over 1159264.08 frames. ], batch size: 19, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:16:36,174 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 00:16:45,158 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:16:46,301 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 00:16:54,493 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 3.241e+02 4.131e+02 4.869e+02 1.219e+03, threshold=8.263e+02, percent-clipped=9.0 +2023-02-06 00:17:00,684 INFO [train.py:901] (3/4) Epoch 5, batch 300, loss[loss=0.3294, simple_loss=0.3994, pruned_loss=0.1297, over 8510.00 frames. ], tot_loss[loss=0.2993, simple_loss=0.3589, pruned_loss=0.1199, over 1263755.28 frames. ], batch size: 26, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:03,012 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:10,889 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:27,574 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:34,190 INFO [train.py:901] (3/4) Epoch 5, batch 350, loss[loss=0.3739, simple_loss=0.4009, pruned_loss=0.1735, over 6965.00 frames. ], tot_loss[loss=0.3014, simple_loss=0.3602, pruned_loss=0.1213, over 1346348.77 frames. ], batch size: 73, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:17:34,410 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=32683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:17:48,606 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 00:17:51,716 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=32708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:18:04,025 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 3.189e+02 4.031e+02 4.810e+02 8.158e+02, threshold=8.062e+02, percent-clipped=0.0 +2023-02-06 00:18:09,323 INFO [train.py:901] (3/4) Epoch 5, batch 400, loss[loss=0.3389, simple_loss=0.3965, pruned_loss=0.1407, over 8504.00 frames. ], tot_loss[loss=0.3017, simple_loss=0.3601, pruned_loss=0.1216, over 1408663.59 frames. ], batch size: 49, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:18:29,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8423, 1.4838, 2.3307, 1.9730, 2.1543, 1.6504, 1.2599, 0.6391], + device='cuda:3'), covar=tensor([0.1646, 0.1774, 0.0417, 0.0703, 0.0582, 0.0832, 0.0931, 0.1590], + device='cuda:3'), in_proj_covar=tensor([0.0715, 0.0646, 0.0553, 0.0615, 0.0725, 0.0593, 0.0586, 0.0596], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:18:43,765 INFO [train.py:901] (3/4) Epoch 5, batch 450, loss[loss=0.2943, simple_loss=0.3551, pruned_loss=0.1167, over 8098.00 frames. ], tot_loss[loss=0.2994, simple_loss=0.358, pruned_loss=0.1204, over 1454544.50 frames. ], batch size: 23, lr: 1.58e-02, grad_scale: 8.0 +2023-02-06 00:19:12,446 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.263e+02 3.122e+02 4.068e+02 4.898e+02 9.897e+02, threshold=8.137e+02, percent-clipped=5.0 +2023-02-06 00:19:17,687 INFO [train.py:901] (3/4) Epoch 5, batch 500, loss[loss=0.3005, simple_loss=0.3587, pruned_loss=0.1211, over 7919.00 frames. ], tot_loss[loss=0.2998, simple_loss=0.3582, pruned_loss=0.1207, over 1488718.72 frames. ], batch size: 20, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:19:39,191 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8814, 2.4658, 4.8093, 1.4121, 2.9237, 2.5247, 1.7966, 2.6416], + device='cuda:3'), covar=tensor([0.1289, 0.1606, 0.0551, 0.2660, 0.1267, 0.1827, 0.1234, 0.1975], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0440, 0.0522, 0.0526, 0.0577, 0.0502, 0.0448, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:19:52,905 INFO [train.py:901] (3/4) Epoch 5, batch 550, loss[loss=0.3362, simple_loss=0.3979, pruned_loss=0.1373, over 8533.00 frames. ], tot_loss[loss=0.3002, simple_loss=0.3586, pruned_loss=0.121, over 1520031.19 frames. ], batch size: 31, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:02,444 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-06 00:20:21,235 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.184e+02 3.133e+02 3.697e+02 5.126e+02 1.321e+03, threshold=7.393e+02, percent-clipped=4.0 +2023-02-06 00:20:26,724 INFO [train.py:901] (3/4) Epoch 5, batch 600, loss[loss=0.3022, simple_loss=0.3352, pruned_loss=0.1346, over 7217.00 frames. ], tot_loss[loss=0.2983, simple_loss=0.3571, pruned_loss=0.1198, over 1543312.80 frames. ], batch size: 16, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:20:50,758 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 00:21:02,150 INFO [train.py:901] (3/4) Epoch 5, batch 650, loss[loss=0.3495, simple_loss=0.3879, pruned_loss=0.1555, over 8026.00 frames. ], tot_loss[loss=0.2971, simple_loss=0.3561, pruned_loss=0.1191, over 1557452.52 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:21:02,979 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=32984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:21:04,609 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 00:21:30,782 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 3.090e+02 3.854e+02 5.024e+02 8.355e+02, threshold=7.708e+02, percent-clipped=4.0 +2023-02-06 00:21:36,138 INFO [train.py:901] (3/4) Epoch 5, batch 700, loss[loss=0.2569, simple_loss=0.3363, pruned_loss=0.08877, over 8459.00 frames. ], tot_loss[loss=0.297, simple_loss=0.3559, pruned_loss=0.119, over 1574072.52 frames. ], batch size: 25, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:11,097 INFO [train.py:901] (3/4) Epoch 5, batch 750, loss[loss=0.3373, simple_loss=0.3801, pruned_loss=0.1472, over 8497.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3557, pruned_loss=0.1189, over 1581999.45 frames. ], batch size: 26, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:22:12,595 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5001, 1.5151, 1.4094, 1.2714, 1.5219, 1.3201, 1.9440, 1.9594], + device='cuda:3'), covar=tensor([0.0501, 0.1380, 0.2043, 0.1498, 0.0667, 0.1858, 0.0730, 0.0521], + device='cuda:3'), in_proj_covar=tensor([0.0142, 0.0187, 0.0226, 0.0187, 0.0141, 0.0195, 0.0152, 0.0156], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 00:22:14,422 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:22:16,106 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 00:22:36,865 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 00:22:40,967 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 3.306e+02 4.079e+02 5.042e+02 1.499e+03, threshold=8.159e+02, percent-clipped=7.0 +2023-02-06 00:22:45,524 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 00:22:46,150 INFO [train.py:901] (3/4) Epoch 5, batch 800, loss[loss=0.3014, simple_loss=0.3682, pruned_loss=0.1173, over 8442.00 frames. ], tot_loss[loss=0.2977, simple_loss=0.3568, pruned_loss=0.1193, over 1593105.23 frames. ], batch size: 49, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:13,726 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:23:19,950 INFO [train.py:901] (3/4) Epoch 5, batch 850, loss[loss=0.2532, simple_loss=0.3278, pruned_loss=0.08927, over 8471.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3544, pruned_loss=0.1177, over 1597267.66 frames. ], batch size: 25, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:23:49,970 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.888e+02 3.855e+02 5.468e+02 1.103e+03, threshold=7.709e+02, percent-clipped=2.0 +2023-02-06 00:23:56,030 INFO [train.py:901] (3/4) Epoch 5, batch 900, loss[loss=0.2949, simple_loss=0.3519, pruned_loss=0.1189, over 8138.00 frames. ], tot_loss[loss=0.2963, simple_loss=0.3557, pruned_loss=0.1185, over 1603195.76 frames. ], batch size: 22, lr: 1.57e-02, grad_scale: 8.0 +2023-02-06 00:24:29,752 INFO [train.py:901] (3/4) Epoch 5, batch 950, loss[loss=0.2742, simple_loss=0.3357, pruned_loss=0.1063, over 7649.00 frames. ], tot_loss[loss=0.2951, simple_loss=0.3547, pruned_loss=0.1177, over 1605272.78 frames. ], batch size: 19, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:01,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.004e+02 3.759e+02 4.642e+02 8.675e+02, threshold=7.519e+02, percent-clipped=2.0 +2023-02-06 00:25:03,069 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:25:05,120 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 00:25:06,447 INFO [train.py:901] (3/4) Epoch 5, batch 1000, loss[loss=0.3677, simple_loss=0.4047, pruned_loss=0.1653, over 7157.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3529, pruned_loss=0.1165, over 1601554.84 frames. ], batch size: 71, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:06,675 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5591, 1.7770, 2.0718, 1.7293, 1.0138, 2.1708, 0.3163, 1.1265], + device='cuda:3'), covar=tensor([0.3832, 0.2422, 0.0988, 0.2918, 0.6964, 0.0880, 0.7528, 0.3879], + device='cuda:3'), in_proj_covar=tensor([0.0126, 0.0117, 0.0080, 0.0165, 0.0210, 0.0081, 0.0146, 0.0123], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:25:16,753 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1973, 1.5488, 1.5105, 1.2461, 1.4761, 1.4201, 1.6522, 1.7293], + device='cuda:3'), covar=tensor([0.0597, 0.1207, 0.1949, 0.1452, 0.0631, 0.1594, 0.0763, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0142, 0.0184, 0.0224, 0.0183, 0.0140, 0.0193, 0.0153, 0.0156], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 00:25:40,440 INFO [train.py:901] (3/4) Epoch 5, batch 1050, loss[loss=0.2447, simple_loss=0.3253, pruned_loss=0.08208, over 8255.00 frames. ], tot_loss[loss=0.2933, simple_loss=0.3529, pruned_loss=0.1168, over 1600672.64 frames. ], batch size: 24, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:25:40,444 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 00:25:47,767 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 00:25:52,111 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 00:26:08,784 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.885e+02 3.252e+02 3.786e+02 4.850e+02 9.380e+02, threshold=7.572e+02, percent-clipped=3.0 +2023-02-06 00:26:13,599 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:14,849 INFO [train.py:901] (3/4) Epoch 5, batch 1100, loss[loss=0.3121, simple_loss=0.3614, pruned_loss=0.1314, over 7536.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3539, pruned_loss=0.117, over 1610131.76 frames. ], batch size: 71, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:23,006 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:26:50,041 INFO [train.py:901] (3/4) Epoch 5, batch 1150, loss[loss=0.2298, simple_loss=0.2989, pruned_loss=0.08035, over 7444.00 frames. ], tot_loss[loss=0.2949, simple_loss=0.3545, pruned_loss=0.1176, over 1611431.12 frames. ], batch size: 17, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:26:54,935 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:02,050 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 00:27:13,083 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:27:18,265 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.110e+02 3.101e+02 4.052e+02 5.357e+02 1.331e+03, threshold=8.105e+02, percent-clipped=11.0 +2023-02-06 00:27:23,603 INFO [train.py:901] (3/4) Epoch 5, batch 1200, loss[loss=0.376, simple_loss=0.4166, pruned_loss=0.1677, over 8567.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3535, pruned_loss=0.1173, over 1608516.80 frames. ], batch size: 31, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:27:32,555 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:28:00,131 INFO [train.py:901] (3/4) Epoch 5, batch 1250, loss[loss=0.2893, simple_loss=0.3433, pruned_loss=0.1177, over 7973.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3538, pruned_loss=0.1172, over 1613931.18 frames. ], batch size: 21, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:29,029 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.057e+02 3.737e+02 5.343e+02 1.068e+03, threshold=7.474e+02, percent-clipped=1.0 +2023-02-06 00:28:34,054 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:28:34,561 INFO [train.py:901] (3/4) Epoch 5, batch 1300, loss[loss=0.2465, simple_loss=0.3213, pruned_loss=0.08583, over 7532.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3508, pruned_loss=0.1151, over 1611837.21 frames. ], batch size: 18, lr: 1.56e-02, grad_scale: 8.0 +2023-02-06 00:28:38,322 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-06 00:29:10,162 INFO [train.py:901] (3/4) Epoch 5, batch 1350, loss[loss=0.2794, simple_loss=0.3463, pruned_loss=0.1063, over 8136.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3505, pruned_loss=0.1145, over 1613072.86 frames. ], batch size: 22, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:29:18,826 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:21,585 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:23,806 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 00:29:38,302 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:29:39,358 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 3.141e+02 3.942e+02 4.566e+02 9.800e+02, threshold=7.885e+02, percent-clipped=1.0 +2023-02-06 00:29:44,199 INFO [train.py:901] (3/4) Epoch 5, batch 1400, loss[loss=0.2699, simple_loss=0.3364, pruned_loss=0.1017, over 8107.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3516, pruned_loss=0.1149, over 1615203.35 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:16,757 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.25 vs. limit=5.0 +2023-02-06 00:30:18,172 INFO [train.py:901] (3/4) Epoch 5, batch 1450, loss[loss=0.2491, simple_loss=0.3089, pruned_loss=0.09463, over 7660.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.3512, pruned_loss=0.1152, over 1614804.48 frames. ], batch size: 19, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:32,277 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 00:30:32,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:49,113 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.073e+02 3.068e+02 3.705e+02 5.190e+02 1.303e+03, threshold=7.410e+02, percent-clipped=4.0 +2023-02-06 00:30:50,020 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:30:54,017 INFO [train.py:901] (3/4) Epoch 5, batch 1500, loss[loss=0.3321, simple_loss=0.3832, pruned_loss=0.1405, over 8595.00 frames. ], tot_loss[loss=0.291, simple_loss=0.3517, pruned_loss=0.1152, over 1615609.83 frames. ], batch size: 31, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:30:54,781 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=33834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:16,100 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 00:31:27,604 INFO [train.py:901] (3/4) Epoch 5, batch 1550, loss[loss=0.288, simple_loss=0.3479, pruned_loss=0.114, over 8247.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.351, pruned_loss=0.1158, over 1608066.68 frames. ], batch size: 24, lr: 1.55e-02, grad_scale: 4.0 +2023-02-06 00:31:31,095 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=33888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:48,390 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2586, 1.4148, 2.1584, 0.9978, 1.9796, 2.1810, 2.3791, 1.7047], + device='cuda:3'), covar=tensor([0.1224, 0.1208, 0.0657, 0.2400, 0.0763, 0.0646, 0.0649, 0.1280], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0261, 0.0215, 0.0255, 0.0219, 0.0193, 0.0218, 0.0267], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:31:48,419 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=33913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:31:58,126 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.067e+02 3.419e+02 4.027e+02 4.998e+02 8.696e+02, threshold=8.054e+02, percent-clipped=2.0 +2023-02-06 00:32:03,155 INFO [train.py:901] (3/4) Epoch 5, batch 1600, loss[loss=0.3517, simple_loss=0.4054, pruned_loss=0.149, over 8235.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3527, pruned_loss=0.1165, over 1610455.63 frames. ], batch size: 24, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:32:14,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=33949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:17,009 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.00 vs. limit=2.0 +2023-02-06 00:32:20,121 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=33957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:32:31,556 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-06 00:32:37,050 INFO [train.py:901] (3/4) Epoch 5, batch 1650, loss[loss=0.2753, simple_loss=0.344, pruned_loss=0.1033, over 8105.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3518, pruned_loss=0.1154, over 1612801.88 frames. ], batch size: 23, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:07,304 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.926e+02 3.722e+02 4.611e+02 9.053e+02, threshold=7.444e+02, percent-clipped=4.0 +2023-02-06 00:33:11,834 INFO [train.py:901] (3/4) Epoch 5, batch 1700, loss[loss=0.3617, simple_loss=0.3954, pruned_loss=0.1639, over 8335.00 frames. ], tot_loss[loss=0.2941, simple_loss=0.3536, pruned_loss=0.1173, over 1613172.75 frames. ], batch size: 25, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:33:17,289 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:33,627 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:33:47,837 INFO [train.py:901] (3/4) Epoch 5, batch 1750, loss[loss=0.2887, simple_loss=0.3596, pruned_loss=0.1089, over 8505.00 frames. ], tot_loss[loss=0.294, simple_loss=0.3537, pruned_loss=0.1171, over 1615706.77 frames. ], batch size: 28, lr: 1.55e-02, grad_scale: 8.0 +2023-02-06 00:34:16,515 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.177e+02 3.118e+02 3.687e+02 4.787e+02 9.448e+02, threshold=7.373e+02, percent-clipped=7.0 +2023-02-06 00:34:21,854 INFO [train.py:901] (3/4) Epoch 5, batch 1800, loss[loss=0.3319, simple_loss=0.3855, pruned_loss=0.1391, over 8201.00 frames. ], tot_loss[loss=0.2947, simple_loss=0.3543, pruned_loss=0.1176, over 1615387.92 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:34:36,714 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:38,709 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4579, 1.7014, 1.8367, 1.6493, 0.9896, 1.9092, 0.3481, 1.1225], + device='cuda:3'), covar=tensor([0.3592, 0.2131, 0.1255, 0.1774, 0.7175, 0.0948, 0.5914, 0.2882], + device='cuda:3'), in_proj_covar=tensor([0.0129, 0.0120, 0.0081, 0.0167, 0.0216, 0.0084, 0.0148, 0.0127], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:34:43,247 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:34:57,300 INFO [train.py:901] (3/4) Epoch 5, batch 1850, loss[loss=0.3522, simple_loss=0.403, pruned_loss=0.1507, over 8254.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3539, pruned_loss=0.117, over 1617754.99 frames. ], batch size: 24, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:35:12,369 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34205.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:26,215 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 3.489e+02 4.150e+02 5.670e+02 1.027e+03, threshold=8.299e+02, percent-clipped=7.0 +2023-02-06 00:35:29,066 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:35:30,897 INFO [train.py:901] (3/4) Epoch 5, batch 1900, loss[loss=0.3266, simple_loss=0.3874, pruned_loss=0.1329, over 8105.00 frames. ], tot_loss[loss=0.2919, simple_loss=0.3519, pruned_loss=0.1159, over 1610516.22 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:05,945 INFO [train.py:901] (3/4) Epoch 5, batch 1950, loss[loss=0.2881, simple_loss=0.3428, pruned_loss=0.1167, over 8336.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3514, pruned_loss=0.116, over 1608438.49 frames. ], batch size: 26, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:09,873 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 00:36:18,719 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:36:23,388 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 00:36:35,388 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.967e+02 3.945e+02 4.927e+02 1.257e+03, threshold=7.890e+02, percent-clipped=2.0 +2023-02-06 00:36:40,163 INFO [train.py:901] (3/4) Epoch 5, batch 2000, loss[loss=0.3383, simple_loss=0.3842, pruned_loss=0.1462, over 8024.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3504, pruned_loss=0.1153, over 1608245.62 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:36:42,282 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 00:37:14,373 INFO [train.py:901] (3/4) Epoch 5, batch 2050, loss[loss=0.2248, simple_loss=0.2933, pruned_loss=0.07817, over 7713.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3511, pruned_loss=0.1158, over 1607017.51 frames. ], batch size: 18, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:29,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9554, 3.3200, 2.3326, 4.1214, 1.6912, 2.2496, 2.1186, 3.2436], + device='cuda:3'), covar=tensor([0.0752, 0.1095, 0.1297, 0.0363, 0.1661, 0.1846, 0.1959, 0.0966], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0268, 0.0286, 0.0221, 0.0261, 0.0287, 0.0289, 0.0263], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 00:37:31,143 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:33,921 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:38,677 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:37:45,186 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 3.245e+02 4.066e+02 4.898e+02 1.293e+03, threshold=8.132e+02, percent-clipped=4.0 +2023-02-06 00:37:49,840 INFO [train.py:901] (3/4) Epoch 5, batch 2100, loss[loss=0.2554, simple_loss=0.3237, pruned_loss=0.09352, over 8034.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3522, pruned_loss=0.1167, over 1609788.02 frames. ], batch size: 22, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:37:51,415 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:23,166 INFO [train.py:901] (3/4) Epoch 5, batch 2150, loss[loss=0.3279, simple_loss=0.3866, pruned_loss=0.1346, over 8099.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3523, pruned_loss=0.1165, over 1616040.72 frames. ], batch size: 23, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:38:39,927 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:50,619 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:38:53,806 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 2.929e+02 3.753e+02 4.663e+02 1.529e+03, threshold=7.506e+02, percent-clipped=2.0 +2023-02-06 00:38:59,141 INFO [train.py:901] (3/4) Epoch 5, batch 2200, loss[loss=0.3375, simple_loss=0.3899, pruned_loss=0.1425, over 8511.00 frames. ], tot_loss[loss=0.2929, simple_loss=0.3524, pruned_loss=0.1167, over 1611597.27 frames. ], batch size: 26, lr: 1.54e-02, grad_scale: 8.0 +2023-02-06 00:39:32,427 INFO [train.py:901] (3/4) Epoch 5, batch 2250, loss[loss=0.3164, simple_loss=0.3773, pruned_loss=0.1277, over 8470.00 frames. ], tot_loss[loss=0.2923, simple_loss=0.3518, pruned_loss=0.1164, over 1611205.45 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:39:52,222 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:39:59,502 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=34622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:02,640 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.996e+02 3.688e+02 4.883e+02 6.349e+02 4.437e+03, threshold=9.766e+02, percent-clipped=16.0 +2023-02-06 00:40:07,919 INFO [train.py:901] (3/4) Epoch 5, batch 2300, loss[loss=0.3134, simple_loss=0.3429, pruned_loss=0.142, over 7248.00 frames. ], tot_loss[loss=0.2916, simple_loss=0.3513, pruned_loss=0.116, over 1613512.96 frames. ], batch size: 16, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:12,726 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:16,085 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:34,982 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:40:42,256 INFO [train.py:901] (3/4) Epoch 5, batch 2350, loss[loss=0.2637, simple_loss=0.328, pruned_loss=0.09967, over 8147.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3508, pruned_loss=0.1158, over 1610665.93 frames. ], batch size: 22, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:40:51,695 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:11,439 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 3.189e+02 4.018e+02 4.942e+02 1.178e+03, threshold=8.036e+02, percent-clipped=1.0 +2023-02-06 00:41:12,937 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=34728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:41:14,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3127, 1.9513, 1.5227, 1.4690, 1.4869, 1.3700, 1.7783, 1.7855], + device='cuda:3'), covar=tensor([0.0617, 0.1208, 0.1884, 0.1426, 0.0628, 0.1661, 0.0757, 0.0558], + device='cuda:3'), in_proj_covar=tensor([0.0144, 0.0185, 0.0226, 0.0190, 0.0139, 0.0195, 0.0150, 0.0159], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 00:41:16,112 INFO [train.py:901] (3/4) Epoch 5, batch 2400, loss[loss=0.2476, simple_loss=0.3107, pruned_loss=0.09223, over 7927.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.351, pruned_loss=0.1158, over 1614657.69 frames. ], batch size: 20, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:41:47,413 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:41:49,403 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5047, 1.9420, 2.0886, 0.8355, 2.1660, 1.5103, 0.5345, 1.7573], + device='cuda:3'), covar=tensor([0.0194, 0.0099, 0.0088, 0.0175, 0.0102, 0.0299, 0.0264, 0.0084], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0217, 0.0179, 0.0260, 0.0208, 0.0351, 0.0273, 0.0250], + device='cuda:3'), out_proj_covar=tensor([1.1232e-04, 7.9085e-05, 6.3760e-05, 9.3411e-05, 7.7770e-05, 1.3878e-04, + 1.0153e-04, 9.1232e-05], device='cuda:3') +2023-02-06 00:41:51,224 INFO [train.py:901] (3/4) Epoch 5, batch 2450, loss[loss=0.3574, simple_loss=0.4054, pruned_loss=0.1547, over 8551.00 frames. ], tot_loss[loss=0.291, simple_loss=0.3508, pruned_loss=0.1156, over 1613003.04 frames. ], batch size: 28, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:04,203 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:09,448 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7543, 2.8086, 3.0652, 1.7337, 1.2644, 2.8573, 0.4969, 1.8108], + device='cuda:3'), covar=tensor([0.2867, 0.1774, 0.1625, 0.4132, 0.7857, 0.1088, 0.7006, 0.2304], + device='cuda:3'), in_proj_covar=tensor([0.0125, 0.0121, 0.0080, 0.0167, 0.0210, 0.0082, 0.0147, 0.0120], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:42:18,442 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3655, 2.8324, 1.6851, 2.0988, 2.4130, 1.2591, 1.8706, 2.1175], + device='cuda:3'), covar=tensor([0.1338, 0.0340, 0.0946, 0.0648, 0.0540, 0.1194, 0.1034, 0.0835], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0243, 0.0315, 0.0308, 0.0322, 0.0310, 0.0340, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:42:19,463 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.991e+02 3.791e+02 4.954e+02 1.109e+03, threshold=7.583e+02, percent-clipped=3.0 +2023-02-06 00:42:24,147 INFO [train.py:901] (3/4) Epoch 5, batch 2500, loss[loss=0.3123, simple_loss=0.3793, pruned_loss=0.1226, over 8326.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.3525, pruned_loss=0.1165, over 1619421.16 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:42:55,971 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=34878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:42:59,763 INFO [train.py:901] (3/4) Epoch 5, batch 2550, loss[loss=0.2998, simple_loss=0.3536, pruned_loss=0.123, over 8669.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3519, pruned_loss=0.1154, over 1616670.22 frames. ], batch size: 39, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:13,491 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=34903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:43:29,133 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.077e+02 2.947e+02 3.618e+02 4.736e+02 1.253e+03, threshold=7.237e+02, percent-clipped=4.0 +2023-02-06 00:43:33,905 INFO [train.py:901] (3/4) Epoch 5, batch 2600, loss[loss=0.3364, simple_loss=0.4068, pruned_loss=0.1331, over 8318.00 frames. ], tot_loss[loss=0.2918, simple_loss=0.352, pruned_loss=0.1158, over 1614475.69 frames. ], batch size: 25, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:43:49,009 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:09,524 INFO [train.py:901] (3/4) Epoch 5, batch 2650, loss[loss=0.3782, simple_loss=0.4268, pruned_loss=0.1647, over 8243.00 frames. ], tot_loss[loss=0.292, simple_loss=0.3517, pruned_loss=0.1162, over 1610220.03 frames. ], batch size: 22, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:44:10,292 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34984.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:13,023 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=34988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:44:39,308 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.922e+02 3.827e+02 4.980e+02 8.274e+02, threshold=7.654e+02, percent-clipped=5.0 +2023-02-06 00:44:43,764 INFO [train.py:901] (3/4) Epoch 5, batch 2700, loss[loss=0.2612, simple_loss=0.3302, pruned_loss=0.09611, over 7648.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3519, pruned_loss=0.1163, over 1610200.07 frames. ], batch size: 19, lr: 1.53e-02, grad_scale: 8.0 +2023-02-06 00:45:09,483 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:10,724 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35072.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:45:12,465 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 00:45:18,323 INFO [train.py:901] (3/4) Epoch 5, batch 2750, loss[loss=0.333, simple_loss=0.385, pruned_loss=0.1405, over 8351.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3513, pruned_loss=0.1157, over 1613780.79 frames. ], batch size: 26, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:45:29,757 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:32,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:34,566 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3574, 1.8116, 1.5703, 1.4474, 1.5322, 1.4870, 1.9220, 1.8479], + device='cuda:3'), covar=tensor([0.0616, 0.1135, 0.1755, 0.1457, 0.0643, 0.1577, 0.0758, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0144, 0.0185, 0.0226, 0.0189, 0.0139, 0.0195, 0.0149, 0.0158], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 00:45:37,947 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:45:48,687 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.004e+02 3.039e+02 3.659e+02 5.251e+02 1.248e+03, threshold=7.317e+02, percent-clipped=8.0 +2023-02-06 00:45:53,668 INFO [train.py:901] (3/4) Epoch 5, batch 2800, loss[loss=0.2878, simple_loss=0.355, pruned_loss=0.1103, over 8348.00 frames. ], tot_loss[loss=0.2896, simple_loss=0.3499, pruned_loss=0.1146, over 1617163.71 frames. ], batch size: 24, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:27,514 INFO [train.py:901] (3/4) Epoch 5, batch 2850, loss[loss=0.3224, simple_loss=0.382, pruned_loss=0.1314, over 8621.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3497, pruned_loss=0.1149, over 1614526.24 frames. ], batch size: 31, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:46:30,525 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35187.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:46:58,369 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.147e+02 2.990e+02 3.598e+02 4.675e+02 1.498e+03, threshold=7.197e+02, percent-clipped=4.0 +2023-02-06 00:47:03,665 INFO [train.py:901] (3/4) Epoch 5, batch 2900, loss[loss=0.3242, simple_loss=0.3814, pruned_loss=0.1335, over 8105.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.35, pruned_loss=0.1157, over 1610876.55 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:36,539 INFO [train.py:901] (3/4) Epoch 5, batch 2950, loss[loss=0.3144, simple_loss=0.387, pruned_loss=0.1209, over 8290.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3512, pruned_loss=0.1158, over 1606501.76 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:47:41,847 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 00:47:58,889 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7150, 2.2722, 4.7899, 1.2583, 3.3221, 2.2782, 1.7358, 2.6701], + device='cuda:3'), covar=tensor([0.1821, 0.2070, 0.0477, 0.3679, 0.1252, 0.2419, 0.1824, 0.2250], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0436, 0.0520, 0.0526, 0.0573, 0.0511, 0.0447, 0.0582], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:48:06,832 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 3.185e+02 3.825e+02 4.988e+02 1.295e+03, threshold=7.649e+02, percent-clipped=4.0 +2023-02-06 00:48:07,072 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:12,109 INFO [train.py:901] (3/4) Epoch 5, batch 3000, loss[loss=0.3041, simple_loss=0.3645, pruned_loss=0.1218, over 8294.00 frames. ], tot_loss[loss=0.2924, simple_loss=0.3523, pruned_loss=0.1163, over 1608681.40 frames. ], batch size: 23, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:48:12,109 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 00:48:22,590 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2702, 2.1946, 1.4480, 1.9687, 1.8109, 1.2048, 1.5636, 1.7552], + device='cuda:3'), covar=tensor([0.1125, 0.0327, 0.0985, 0.0472, 0.0584, 0.1200, 0.0891, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0236, 0.0310, 0.0304, 0.0320, 0.0302, 0.0337, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:48:25,508 INFO [train.py:935] (3/4) Epoch 5, validation: loss=0.2228, simple_loss=0.319, pruned_loss=0.0633, over 944034.00 frames. +2023-02-06 00:48:25,509 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 00:48:39,272 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:41,954 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3379, 2.1646, 3.4884, 2.1462, 2.8392, 3.9242, 3.7428, 3.5038], + device='cuda:3'), covar=tensor([0.0837, 0.1051, 0.0599, 0.1457, 0.0835, 0.0218, 0.0287, 0.0454], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0267, 0.0226, 0.0269, 0.0221, 0.0199, 0.0230, 0.0278], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 00:48:42,026 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:44,709 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:48:47,351 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35363.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:48:59,256 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:01,099 INFO [train.py:901] (3/4) Epoch 5, batch 3050, loss[loss=0.3202, simple_loss=0.3826, pruned_loss=0.1289, over 8412.00 frames. ], tot_loss[loss=0.293, simple_loss=0.353, pruned_loss=0.1165, over 1609433.20 frames. ], batch size: 49, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:01,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:07,211 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:29,672 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.238e+02 3.010e+02 3.735e+02 4.816e+02 9.592e+02, threshold=7.471e+02, percent-clipped=3.0 +2023-02-06 00:49:33,914 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 00:49:34,200 INFO [train.py:901] (3/4) Epoch 5, batch 3100, loss[loss=0.3144, simple_loss=0.3777, pruned_loss=0.1256, over 8316.00 frames. ], tot_loss[loss=0.2952, simple_loss=0.355, pruned_loss=0.1177, over 1614386.45 frames. ], batch size: 25, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:49:41,040 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35443.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:49:48,741 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35454.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:49:53,600 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2657, 1.7129, 1.3496, 1.6764, 1.3870, 1.1459, 1.3201, 1.5743], + device='cuda:3'), covar=tensor([0.0832, 0.0386, 0.0878, 0.0441, 0.0588, 0.1071, 0.0707, 0.0584], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0242, 0.0312, 0.0309, 0.0323, 0.0310, 0.0346, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:49:59,648 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35468.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:50:09,986 INFO [train.py:901] (3/4) Epoch 5, batch 3150, loss[loss=0.3077, simple_loss=0.3643, pruned_loss=0.1255, over 8140.00 frames. ], tot_loss[loss=0.2936, simple_loss=0.3541, pruned_loss=0.1166, over 1612955.24 frames. ], batch size: 22, lr: 1.52e-02, grad_scale: 8.0 +2023-02-06 00:50:29,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4469, 1.9361, 3.3937, 1.0511, 2.2067, 1.8402, 1.5146, 1.7078], + device='cuda:3'), covar=tensor([0.1498, 0.1580, 0.0602, 0.3075, 0.1384, 0.2239, 0.1414, 0.2310], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0445, 0.0530, 0.0538, 0.0579, 0.0518, 0.0457, 0.0591], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:50:39,622 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.108e+02 3.249e+02 4.087e+02 5.030e+02 9.472e+02, threshold=8.174e+02, percent-clipped=3.0 +2023-02-06 00:50:44,420 INFO [train.py:901] (3/4) Epoch 5, batch 3200, loss[loss=0.2471, simple_loss=0.3314, pruned_loss=0.08141, over 8511.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3546, pruned_loss=0.1171, over 1610101.89 frames. ], batch size: 28, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:50:54,835 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:09,467 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:51:20,160 INFO [train.py:901] (3/4) Epoch 5, batch 3250, loss[loss=0.2839, simple_loss=0.3576, pruned_loss=0.1051, over 8252.00 frames. ], tot_loss[loss=0.2951, simple_loss=0.3552, pruned_loss=0.1175, over 1611942.33 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:51:44,439 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6331, 3.9058, 2.2638, 2.1184, 2.7253, 1.7797, 2.4928, 2.8063], + device='cuda:3'), covar=tensor([0.1630, 0.0217, 0.0905, 0.0901, 0.0699, 0.1174, 0.1111, 0.1086], + device='cuda:3'), in_proj_covar=tensor([0.0364, 0.0240, 0.0317, 0.0306, 0.0320, 0.0313, 0.0349, 0.0324], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:51:50,492 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.378e+02 4.149e+02 5.121e+02 1.146e+03, threshold=8.298e+02, percent-clipped=3.0 +2023-02-06 00:51:51,044 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 00:51:55,288 INFO [train.py:901] (3/4) Epoch 5, batch 3300, loss[loss=0.2995, simple_loss=0.3571, pruned_loss=0.1209, over 8634.00 frames. ], tot_loss[loss=0.2943, simple_loss=0.3543, pruned_loss=0.1171, over 1610055.71 frames. ], batch size: 34, lr: 1.51e-02, grad_scale: 8.0 +2023-02-06 00:52:30,143 INFO [train.py:901] (3/4) Epoch 5, batch 3350, loss[loss=0.2827, simple_loss=0.3452, pruned_loss=0.1101, over 8109.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3527, pruned_loss=0.1159, over 1609880.56 frames. ], batch size: 23, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:52:47,837 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35707.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 00:53:01,469 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.966e+02 3.555e+02 4.125e+02 4.946e+02 1.065e+03, threshold=8.250e+02, percent-clipped=5.0 +2023-02-06 00:53:06,231 INFO [train.py:901] (3/4) Epoch 5, batch 3400, loss[loss=0.3018, simple_loss=0.3606, pruned_loss=0.1215, over 8453.00 frames. ], tot_loss[loss=0.2938, simple_loss=0.354, pruned_loss=0.1168, over 1608614.73 frames. ], batch size: 48, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:08,429 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:53:25,785 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 00:53:26,934 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 00:53:34,781 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 00:53:39,726 INFO [train.py:901] (3/4) Epoch 5, batch 3450, loss[loss=0.2759, simple_loss=0.3442, pruned_loss=0.1038, over 8039.00 frames. ], tot_loss[loss=0.2928, simple_loss=0.3529, pruned_loss=0.1163, over 1607795.67 frames. ], batch size: 22, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:53:43,882 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:06,639 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6493, 4.3769, 2.4979, 2.1047, 2.7121, 2.1547, 2.3732, 3.0976], + device='cuda:3'), covar=tensor([0.1414, 0.0120, 0.0679, 0.0786, 0.0555, 0.0853, 0.0945, 0.0738], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0237, 0.0312, 0.0302, 0.0321, 0.0311, 0.0346, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 00:54:07,873 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35822.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:54:09,940 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=35825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:10,372 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.123e+02 3.051e+02 3.738e+02 4.571e+02 6.690e+02, threshold=7.475e+02, percent-clipped=0.0 +2023-02-06 00:54:12,583 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=35829.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:15,094 INFO [train.py:901] (3/4) Epoch 5, batch 3500, loss[loss=0.299, simple_loss=0.3626, pruned_loss=0.1177, over 8368.00 frames. ], tot_loss[loss=0.2925, simple_loss=0.353, pruned_loss=0.116, over 1609066.13 frames. ], batch size: 24, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:15,201 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5282, 4.4885, 4.0655, 2.0085, 3.9030, 3.8691, 4.0499, 3.5800], + device='cuda:3'), covar=tensor([0.0716, 0.0570, 0.0888, 0.4116, 0.0794, 0.0858, 0.1365, 0.0796], + device='cuda:3'), in_proj_covar=tensor([0.0383, 0.0277, 0.0309, 0.0393, 0.0306, 0.0266, 0.0293, 0.0238], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-06 00:54:27,026 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=35850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:27,677 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=35851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:54:40,697 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 00:54:48,820 INFO [train.py:901] (3/4) Epoch 5, batch 3550, loss[loss=0.393, simple_loss=0.4372, pruned_loss=0.1744, over 8330.00 frames. ], tot_loss[loss=0.293, simple_loss=0.3534, pruned_loss=0.1163, over 1612294.11 frames. ], batch size: 26, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:54:54,913 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=35892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:55:19,538 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.232e+02 3.318e+02 3.882e+02 4.908e+02 1.221e+03, threshold=7.763e+02, percent-clipped=6.0 +2023-02-06 00:55:23,998 INFO [train.py:901] (3/4) Epoch 5, batch 3600, loss[loss=0.3247, simple_loss=0.3817, pruned_loss=0.1338, over 8475.00 frames. ], tot_loss[loss=0.2924, simple_loss=0.3535, pruned_loss=0.1156, over 1619145.27 frames. ], batch size: 25, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:55:37,602 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0433, 1.2981, 1.1783, 0.2158, 1.1263, 0.9182, 0.1323, 1.1173], + device='cuda:3'), covar=tensor([0.0133, 0.0111, 0.0089, 0.0196, 0.0111, 0.0356, 0.0256, 0.0095], + device='cuda:3'), in_proj_covar=tensor([0.0302, 0.0223, 0.0178, 0.0268, 0.0206, 0.0352, 0.0276, 0.0248], + device='cuda:3'), out_proj_covar=tensor([1.1013e-04, 7.9926e-05, 6.2670e-05, 9.5372e-05, 7.5519e-05, 1.3767e-04, + 1.0143e-04, 8.8963e-05], device='cuda:3') +2023-02-06 00:55:57,697 INFO [train.py:901] (3/4) Epoch 5, batch 3650, loss[loss=0.2595, simple_loss=0.3172, pruned_loss=0.1009, over 7219.00 frames. ], tot_loss[loss=0.2927, simple_loss=0.354, pruned_loss=0.1157, over 1621348.43 frames. ], batch size: 16, lr: 1.51e-02, grad_scale: 16.0 +2023-02-06 00:56:01,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4518, 2.1422, 3.1633, 2.7680, 2.7847, 1.9984, 1.4184, 1.5330], + device='cuda:3'), covar=tensor([0.1464, 0.1807, 0.0414, 0.0901, 0.0753, 0.0943, 0.0986, 0.1786], + device='cuda:3'), in_proj_covar=tensor([0.0732, 0.0670, 0.0568, 0.0653, 0.0762, 0.0627, 0.0603, 0.0624], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:56:15,066 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:56:27,680 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.237e+02 3.345e+02 4.197e+02 5.280e+02 9.599e+02, threshold=8.394e+02, percent-clipped=10.0 +2023-02-06 00:56:32,339 INFO [train.py:901] (3/4) Epoch 5, batch 3700, loss[loss=0.2901, simple_loss=0.3521, pruned_loss=0.114, over 8461.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3546, pruned_loss=0.1166, over 1618094.17 frames. ], batch size: 25, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:56:40,796 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 00:56:44,923 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7265, 1.4498, 2.8798, 1.1868, 2.2243, 3.1384, 3.0026, 2.6711], + device='cuda:3'), covar=tensor([0.0963, 0.1262, 0.0366, 0.1944, 0.0584, 0.0257, 0.0414, 0.0671], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0262, 0.0218, 0.0258, 0.0217, 0.0194, 0.0224, 0.0267], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 00:57:04,801 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36078.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:57:07,929 INFO [train.py:901] (3/4) Epoch 5, batch 3750, loss[loss=0.3157, simple_loss=0.373, pruned_loss=0.1292, over 8239.00 frames. ], tot_loss[loss=0.2931, simple_loss=0.3536, pruned_loss=0.1163, over 1613673.69 frames. ], batch size: 22, lr: 1.50e-02, grad_scale: 16.0 +2023-02-06 00:57:09,711 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 00:57:21,376 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36103.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 00:57:24,139 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:37,192 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 3.033e+02 3.704e+02 4.599e+02 1.470e+03, threshold=7.408e+02, percent-clipped=9.0 +2023-02-06 00:57:40,796 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:41,247 INFO [train.py:901] (3/4) Epoch 5, batch 3800, loss[loss=0.3073, simple_loss=0.3679, pruned_loss=0.1233, over 8109.00 frames. ], tot_loss[loss=0.2917, simple_loss=0.3527, pruned_loss=0.1153, over 1616536.60 frames. ], batch size: 23, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:57:41,320 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:57:48,141 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5813, 1.9475, 3.5466, 1.2065, 2.3912, 1.7675, 1.5892, 2.1776], + device='cuda:3'), covar=tensor([0.1326, 0.1719, 0.0484, 0.2836, 0.1291, 0.2275, 0.1400, 0.1861], + device='cuda:3'), in_proj_covar=tensor([0.0457, 0.0438, 0.0513, 0.0527, 0.0565, 0.0512, 0.0447, 0.0575], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:58:09,345 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:15,714 INFO [train.py:901] (3/4) Epoch 5, batch 3850, loss[loss=0.2898, simple_loss=0.3294, pruned_loss=0.1251, over 8142.00 frames. ], tot_loss[loss=0.2911, simple_loss=0.3516, pruned_loss=0.1152, over 1614316.52 frames. ], batch size: 22, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:27,240 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:58:41,077 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 00:58:45,749 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 3.284e+02 4.097e+02 5.243e+02 1.380e+03, threshold=8.194e+02, percent-clipped=10.0 +2023-02-06 00:58:49,716 INFO [train.py:901] (3/4) Epoch 5, batch 3900, loss[loss=0.3211, simple_loss=0.3743, pruned_loss=0.134, over 8656.00 frames. ], tot_loss[loss=0.2909, simple_loss=0.3514, pruned_loss=0.1152, over 1616301.04 frames. ], batch size: 39, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:58:59,594 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:09,642 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:22,980 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5065, 1.4805, 4.4878, 1.8157, 2.1614, 5.1313, 4.8008, 4.4682], + device='cuda:3'), covar=tensor([0.1068, 0.1527, 0.0220, 0.1845, 0.1084, 0.0198, 0.0434, 0.0536], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0267, 0.0224, 0.0261, 0.0225, 0.0199, 0.0231, 0.0273], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 00:59:24,828 INFO [train.py:901] (3/4) Epoch 5, batch 3950, loss[loss=0.29, simple_loss=0.3585, pruned_loss=0.1108, over 8498.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.35, pruned_loss=0.1136, over 1616726.98 frames. ], batch size: 28, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 00:59:27,795 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4019, 2.0465, 3.1854, 2.6336, 2.6162, 2.1026, 1.5020, 1.2338], + device='cuda:3'), covar=tensor([0.1614, 0.1777, 0.0388, 0.0816, 0.0831, 0.0913, 0.0993, 0.2047], + device='cuda:3'), in_proj_covar=tensor([0.0739, 0.0667, 0.0567, 0.0659, 0.0759, 0.0623, 0.0597, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 00:59:28,405 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:28,429 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 00:59:46,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5371, 1.8173, 3.3432, 0.9891, 2.3634, 1.8332, 1.3398, 1.9743], + device='cuda:3'), covar=tensor([0.1429, 0.1800, 0.0618, 0.3306, 0.1340, 0.2317, 0.1599, 0.2174], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0445, 0.0528, 0.0537, 0.0573, 0.0521, 0.0453, 0.0589], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 00:59:48,302 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.83 vs. limit=5.0 +2023-02-06 00:59:53,672 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 00:59:54,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5129, 1.4349, 2.8475, 1.0402, 1.9596, 3.0667, 2.9242, 2.5816], + device='cuda:3'), covar=tensor([0.1037, 0.1254, 0.0395, 0.1911, 0.0703, 0.0255, 0.0402, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0265, 0.0223, 0.0260, 0.0223, 0.0197, 0.0228, 0.0272], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 00:59:54,577 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.894e+02 2.959e+02 3.540e+02 4.519e+02 1.633e+03, threshold=7.079e+02, percent-clipped=6.0 +2023-02-06 00:59:58,379 INFO [train.py:901] (3/4) Epoch 5, batch 4000, loss[loss=0.2439, simple_loss=0.3135, pruned_loss=0.08711, over 8081.00 frames. ], tot_loss[loss=0.2897, simple_loss=0.3508, pruned_loss=0.1143, over 1613394.37 frames. ], batch size: 21, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:00:31,747 INFO [train.py:901] (3/4) Epoch 5, batch 4050, loss[loss=0.2819, simple_loss=0.3571, pruned_loss=0.1033, over 8013.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.3505, pruned_loss=0.1146, over 1605728.60 frames. ], batch size: 22, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:03,218 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 3.036e+02 3.577e+02 4.439e+02 7.437e+02, threshold=7.154e+02, percent-clipped=1.0 +2023-02-06 01:01:07,959 INFO [train.py:901] (3/4) Epoch 5, batch 4100, loss[loss=0.2671, simple_loss=0.3367, pruned_loss=0.09872, over 8083.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3504, pruned_loss=0.1142, over 1610608.60 frames. ], batch size: 21, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:41,918 INFO [train.py:901] (3/4) Epoch 5, batch 4150, loss[loss=0.2589, simple_loss=0.321, pruned_loss=0.09847, over 8650.00 frames. ], tot_loss[loss=0.2876, simple_loss=0.3486, pruned_loss=0.1133, over 1608929.99 frames. ], batch size: 34, lr: 1.50e-02, grad_scale: 8.0 +2023-02-06 01:01:44,924 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9147, 1.3855, 1.4698, 1.1647, 1.0969, 1.3461, 1.5384, 1.4272], + device='cuda:3'), covar=tensor([0.0608, 0.1255, 0.1948, 0.1598, 0.0666, 0.1615, 0.0774, 0.0602], + device='cuda:3'), in_proj_covar=tensor([0.0139, 0.0181, 0.0221, 0.0184, 0.0133, 0.0192, 0.0146, 0.0155], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:01:56,717 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:13,608 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.751e+02 3.740e+02 4.679e+02 9.033e+02, threshold=7.480e+02, percent-clipped=3.0 +2023-02-06 01:02:15,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:17,734 INFO [train.py:901] (3/4) Epoch 5, batch 4200, loss[loss=0.2973, simple_loss=0.3642, pruned_loss=0.1152, over 8482.00 frames. ], tot_loss[loss=0.2871, simple_loss=0.3483, pruned_loss=0.1129, over 1609107.08 frames. ], batch size: 27, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:02:24,878 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:25,684 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36544.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:37,294 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7441, 2.2274, 1.6161, 2.8289, 1.1427, 1.4520, 1.6555, 2.3581], + device='cuda:3'), covar=tensor([0.1281, 0.1131, 0.1662, 0.0518, 0.1829, 0.2200, 0.1701, 0.1040], + device='cuda:3'), in_proj_covar=tensor([0.0274, 0.0265, 0.0291, 0.0231, 0.0258, 0.0289, 0.0293, 0.0266], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 01:02:43,334 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 01:02:43,528 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36569.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:02:52,965 INFO [train.py:901] (3/4) Epoch 5, batch 4250, loss[loss=0.284, simple_loss=0.3435, pruned_loss=0.1123, over 7307.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.35, pruned_loss=0.1138, over 1610606.47 frames. ], batch size: 16, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:05,690 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 01:03:13,555 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4184, 1.4004, 4.5639, 1.7024, 3.9116, 3.7573, 4.0971, 4.0116], + device='cuda:3'), covar=tensor([0.0442, 0.3415, 0.0339, 0.2532, 0.1102, 0.0691, 0.0453, 0.0509], + device='cuda:3'), in_proj_covar=tensor([0.0324, 0.0481, 0.0404, 0.0416, 0.0476, 0.0400, 0.0387, 0.0442], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:03:20,905 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,279 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:24,817 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.959e+02 3.120e+02 3.802e+02 4.654e+02 9.583e+02, threshold=7.605e+02, percent-clipped=3.0 +2023-02-06 01:03:27,087 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1018, 4.1519, 3.6706, 1.7306, 3.6941, 3.6712, 3.8377, 3.3217], + device='cuda:3'), covar=tensor([0.0988, 0.0615, 0.1130, 0.5046, 0.0852, 0.1004, 0.1351, 0.0941], + device='cuda:3'), in_proj_covar=tensor([0.0387, 0.0279, 0.0318, 0.0399, 0.0312, 0.0270, 0.0298, 0.0241], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-06 01:03:27,131 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:28,982 INFO [train.py:901] (3/4) Epoch 5, batch 4300, loss[loss=0.3344, simple_loss=0.3732, pruned_loss=0.1478, over 8032.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3511, pruned_loss=0.1142, over 1614649.61 frames. ], batch size: 22, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:03:47,187 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=36658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:03:57,097 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 01:04:05,001 INFO [train.py:901] (3/4) Epoch 5, batch 4350, loss[loss=0.2247, simple_loss=0.2904, pruned_loss=0.07952, over 7653.00 frames. ], tot_loss[loss=0.2892, simple_loss=0.351, pruned_loss=0.1137, over 1611445.24 frames. ], batch size: 19, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:04:28,883 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36718.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:04:34,836 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.000e+02 3.265e+02 4.032e+02 4.973e+02 1.053e+03, threshold=8.064e+02, percent-clipped=5.0 +2023-02-06 01:04:36,262 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 01:04:38,955 INFO [train.py:901] (3/4) Epoch 5, batch 4400, loss[loss=0.2149, simple_loss=0.2853, pruned_loss=0.07225, over 7786.00 frames. ], tot_loss[loss=0.2899, simple_loss=0.3509, pruned_loss=0.1145, over 1611858.91 frames. ], batch size: 19, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:15,297 INFO [train.py:901] (3/4) Epoch 5, batch 4450, loss[loss=0.281, simple_loss=0.3535, pruned_loss=0.1042, over 8084.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.3512, pruned_loss=0.1145, over 1613249.05 frames. ], batch size: 21, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:05:18,070 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 01:05:43,198 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36823.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:05:44,599 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7026, 2.1665, 2.1843, 1.2141, 2.1326, 1.4424, 0.6403, 1.7260], + device='cuda:3'), covar=tensor([0.0179, 0.0084, 0.0068, 0.0166, 0.0149, 0.0336, 0.0284, 0.0104], + device='cuda:3'), in_proj_covar=tensor([0.0310, 0.0228, 0.0180, 0.0266, 0.0213, 0.0361, 0.0286, 0.0259], + device='cuda:3'), out_proj_covar=tensor([1.1198e-04, 8.0690e-05, 6.2385e-05, 9.3859e-05, 7.7463e-05, 1.3974e-04, + 1.0432e-04, 9.2529e-05], device='cuda:3') +2023-02-06 01:05:45,713 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 3.029e+02 3.648e+02 4.687e+02 9.435e+02, threshold=7.296e+02, percent-clipped=4.0 +2023-02-06 01:05:49,701 INFO [train.py:901] (3/4) Epoch 5, batch 4500, loss[loss=0.2978, simple_loss=0.3575, pruned_loss=0.119, over 8187.00 frames. ], tot_loss[loss=0.2903, simple_loss=0.3512, pruned_loss=0.1147, over 1618150.12 frames. ], batch size: 23, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:14,965 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 01:06:26,112 INFO [train.py:901] (3/4) Epoch 5, batch 4550, loss[loss=0.3301, simple_loss=0.3702, pruned_loss=0.145, over 7971.00 frames. ], tot_loss[loss=0.2905, simple_loss=0.3515, pruned_loss=0.1148, over 1618349.35 frames. ], batch size: 21, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:06:47,961 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=36914.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:06:56,551 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 3.174e+02 3.779e+02 4.790e+02 8.988e+02, threshold=7.559e+02, percent-clipped=4.0 +2023-02-06 01:06:58,095 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:00,566 INFO [train.py:901] (3/4) Epoch 5, batch 4600, loss[loss=0.2655, simple_loss=0.3386, pruned_loss=0.09626, over 8323.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3514, pruned_loss=0.115, over 1616596.13 frames. ], batch size: 25, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:07:04,198 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.61 vs. limit=5.0 +2023-02-06 01:07:04,718 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=36939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:14,074 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3908, 1.9600, 1.7957, 0.8022, 1.9469, 1.5000, 0.3394, 1.6986], + device='cuda:3'), covar=tensor([0.0215, 0.0099, 0.0094, 0.0188, 0.0115, 0.0331, 0.0312, 0.0113], + device='cuda:3'), in_proj_covar=tensor([0.0309, 0.0225, 0.0179, 0.0265, 0.0212, 0.0359, 0.0282, 0.0258], + device='cuda:3'), out_proj_covar=tensor([1.1165e-04, 7.9618e-05, 6.1792e-05, 9.3386e-05, 7.6775e-05, 1.3866e-04, + 1.0261e-04, 9.2074e-05], device='cuda:3') +2023-02-06 01:07:14,112 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3327, 1.8987, 3.0776, 2.3980, 2.3852, 1.8372, 1.4388, 0.9369], + device='cuda:3'), covar=tensor([0.1699, 0.1938, 0.0418, 0.1026, 0.0921, 0.1027, 0.0961, 0.2148], + device='cuda:3'), in_proj_covar=tensor([0.0751, 0.0680, 0.0570, 0.0655, 0.0757, 0.0628, 0.0601, 0.0621], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:07:18,360 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 01:07:23,549 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:25,627 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36970.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:26,415 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9352, 1.2136, 5.8581, 2.1816, 5.3779, 5.0141, 5.6045, 5.4614], + device='cuda:3'), covar=tensor([0.0358, 0.3462, 0.0187, 0.2074, 0.0658, 0.0414, 0.0256, 0.0321], + device='cuda:3'), in_proj_covar=tensor([0.0323, 0.0477, 0.0404, 0.0404, 0.0468, 0.0394, 0.0389, 0.0441], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:07:29,031 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=36974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:30,330 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=36976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:07:35,634 INFO [train.py:901] (3/4) Epoch 5, batch 4650, loss[loss=0.2731, simple_loss=0.3349, pruned_loss=0.1056, over 7659.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3497, pruned_loss=0.1137, over 1616819.03 frames. ], batch size: 19, lr: 1.49e-02, grad_scale: 8.0 +2023-02-06 01:08:02,860 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:06,067 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 3.207e+02 3.974e+02 5.163e+02 9.904e+02, threshold=7.949e+02, percent-clipped=4.0 +2023-02-06 01:08:10,736 INFO [train.py:901] (3/4) Epoch 5, batch 4700, loss[loss=0.3006, simple_loss=0.3679, pruned_loss=0.1166, over 8326.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3498, pruned_loss=0.1133, over 1618203.26 frames. ], batch size: 25, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:29,886 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37062.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:08:43,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:44,113 INFO [train.py:901] (3/4) Epoch 5, batch 4750, loss[loss=0.2829, simple_loss=0.3463, pruned_loss=0.1098, over 8140.00 frames. ], tot_loss[loss=0.2894, simple_loss=0.3502, pruned_loss=0.1143, over 1616509.59 frames. ], batch size: 22, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:08:45,721 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:08:49,146 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:09:15,976 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 3.010e+02 3.846e+02 4.879e+02 1.523e+03, threshold=7.692e+02, percent-clipped=5.0 +2023-02-06 01:09:17,401 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 01:09:20,175 INFO [train.py:901] (3/4) Epoch 5, batch 4800, loss[loss=0.3805, simple_loss=0.3987, pruned_loss=0.1811, over 7067.00 frames. ], tot_loss[loss=0.291, simple_loss=0.3515, pruned_loss=0.1152, over 1615758.57 frames. ], batch size: 71, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:09:20,181 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 01:09:44,352 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37167.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:09:51,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37177.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:09:55,226 INFO [train.py:901] (3/4) Epoch 5, batch 4850, loss[loss=0.333, simple_loss=0.3773, pruned_loss=0.1444, over 6887.00 frames. ], tot_loss[loss=0.2937, simple_loss=0.353, pruned_loss=0.1172, over 1614988.43 frames. ], batch size: 71, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:09:59,031 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.60 vs. limit=5.0 +2023-02-06 01:10:10,652 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 01:10:27,460 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.956e+02 3.581e+02 4.871e+02 1.087e+03, threshold=7.163e+02, percent-clipped=6.0 +2023-02-06 01:10:31,482 INFO [train.py:901] (3/4) Epoch 5, batch 4900, loss[loss=0.2689, simple_loss=0.3436, pruned_loss=0.09711, over 8252.00 frames. ], tot_loss[loss=0.2939, simple_loss=0.3533, pruned_loss=0.1173, over 1616277.04 frames. ], batch size: 24, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:10:36,645 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 01:10:59,367 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:05,619 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:11:06,080 INFO [train.py:901] (3/4) Epoch 5, batch 4950, loss[loss=0.2818, simple_loss=0.36, pruned_loss=0.1018, over 8199.00 frames. ], tot_loss[loss=0.2914, simple_loss=0.3517, pruned_loss=0.1155, over 1614171.68 frames. ], batch size: 23, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:08,201 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:31,042 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:35,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 3.052e+02 3.616e+02 4.696e+02 1.143e+03, threshold=7.231e+02, percent-clipped=5.0 +2023-02-06 01:11:40,306 INFO [train.py:901] (3/4) Epoch 5, batch 5000, loss[loss=0.2756, simple_loss=0.3323, pruned_loss=0.1094, over 8096.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3509, pruned_loss=0.1151, over 1615802.00 frames. ], batch size: 21, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:11:43,902 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:45,952 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:11:49,267 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:01,337 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,194 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:03,302 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:05,948 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:14,482 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:15,020 INFO [train.py:901] (3/4) Epoch 5, batch 5050, loss[loss=0.2664, simple_loss=0.318, pruned_loss=0.1074, over 7790.00 frames. ], tot_loss[loss=0.2907, simple_loss=0.3512, pruned_loss=0.1151, over 1613871.68 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:18,417 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:12:18,584 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 01:12:39,796 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 01:12:44,492 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 3.496e+02 4.122e+02 5.072e+02 9.522e+02, threshold=8.245e+02, percent-clipped=6.0 +2023-02-06 01:12:48,504 INFO [train.py:901] (3/4) Epoch 5, batch 5100, loss[loss=0.2318, simple_loss=0.3004, pruned_loss=0.08158, over 7664.00 frames. ], tot_loss[loss=0.2913, simple_loss=0.3516, pruned_loss=0.1155, over 1615926.81 frames. ], batch size: 19, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:12:48,513 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 01:12:48,713 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37433.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:12:49,923 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:06,537 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37458.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:13:22,248 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:13:23,387 INFO [train.py:901] (3/4) Epoch 5, batch 5150, loss[loss=0.3387, simple_loss=0.3873, pruned_loss=0.145, over 8317.00 frames. ], tot_loss[loss=0.2908, simple_loss=0.3515, pruned_loss=0.1151, over 1613266.39 frames. ], batch size: 25, lr: 1.48e-02, grad_scale: 8.0 +2023-02-06 01:13:46,994 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0353, 1.0653, 3.2323, 0.9406, 2.6972, 2.7461, 2.9077, 2.8804], + device='cuda:3'), covar=tensor([0.0703, 0.3473, 0.0648, 0.2548, 0.1557, 0.0759, 0.0709, 0.0747], + device='cuda:3'), in_proj_covar=tensor([0.0331, 0.0494, 0.0410, 0.0413, 0.0485, 0.0403, 0.0400, 0.0445], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:13:53,504 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.119e+02 3.138e+02 3.879e+02 5.454e+02 1.167e+03, threshold=7.757e+02, percent-clipped=4.0 +2023-02-06 01:13:57,569 INFO [train.py:901] (3/4) Epoch 5, batch 5200, loss[loss=0.2878, simple_loss=0.3543, pruned_loss=0.1107, over 8108.00 frames. ], tot_loss[loss=0.2895, simple_loss=0.3503, pruned_loss=0.1143, over 1608838.94 frames. ], batch size: 23, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:01,148 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37538.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:14:19,392 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37563.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:14:21,941 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2001, 1.2523, 2.0114, 0.9625, 1.8820, 2.2227, 2.2053, 1.8959], + device='cuda:3'), covar=tensor([0.1060, 0.1236, 0.0639, 0.1989, 0.0650, 0.0427, 0.0548, 0.0864], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0274, 0.0224, 0.0263, 0.0225, 0.0199, 0.0230, 0.0278], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 01:14:33,074 INFO [train.py:901] (3/4) Epoch 5, batch 5250, loss[loss=0.274, simple_loss=0.3498, pruned_loss=0.0991, over 8517.00 frames. ], tot_loss[loss=0.2906, simple_loss=0.3518, pruned_loss=0.1147, over 1616114.76 frames. ], batch size: 28, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:14:44,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6111, 1.7881, 1.8442, 1.5233, 1.0739, 1.8873, 0.2790, 1.0122], + device='cuda:3'), covar=tensor([0.3106, 0.1774, 0.1337, 0.2538, 0.5873, 0.0950, 0.5637, 0.2993], + device='cuda:3'), in_proj_covar=tensor([0.0129, 0.0123, 0.0080, 0.0172, 0.0209, 0.0083, 0.0149, 0.0127], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:14:45,226 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 01:15:03,494 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 3.205e+02 3.781e+02 5.298e+02 9.083e+02, threshold=7.562e+02, percent-clipped=4.0 +2023-02-06 01:15:05,555 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:07,536 INFO [train.py:901] (3/4) Epoch 5, batch 5300, loss[loss=0.2743, simple_loss=0.3523, pruned_loss=0.09814, over 8467.00 frames. ], tot_loss[loss=0.2888, simple_loss=0.3502, pruned_loss=0.1137, over 1612715.40 frames. ], batch size: 27, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:15,188 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:20,672 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4852, 1.4260, 2.7681, 1.1833, 1.8984, 2.9690, 2.9028, 2.5024], + device='cuda:3'), covar=tensor([0.1068, 0.1334, 0.0455, 0.1866, 0.0766, 0.0293, 0.0478, 0.0695], + device='cuda:3'), in_proj_covar=tensor([0.0241, 0.0276, 0.0227, 0.0264, 0.0226, 0.0201, 0.0232, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 01:15:32,590 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:15:36,180 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.95 vs. limit=2.0 +2023-02-06 01:15:42,454 INFO [train.py:901] (3/4) Epoch 5, batch 5350, loss[loss=0.2617, simple_loss=0.3315, pruned_loss=0.09594, over 7981.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3493, pruned_loss=0.1127, over 1613747.84 frames. ], batch size: 21, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:15:47,822 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:05,033 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:11,802 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=37726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:12,341 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 3.472e+02 4.206e+02 5.536e+02 1.524e+03, threshold=8.412e+02, percent-clipped=7.0 +2023-02-06 01:16:17,022 INFO [train.py:901] (3/4) Epoch 5, batch 5400, loss[loss=0.2357, simple_loss=0.2931, pruned_loss=0.08913, over 7452.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3493, pruned_loss=0.1131, over 1613649.07 frames. ], batch size: 17, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:16:19,918 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=37737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:25,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:36,760 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=37762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:16:40,898 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 01:16:50,838 INFO [train.py:901] (3/4) Epoch 5, batch 5450, loss[loss=0.3041, simple_loss=0.3513, pruned_loss=0.1284, over 7658.00 frames. ], tot_loss[loss=0.2878, simple_loss=0.3486, pruned_loss=0.1135, over 1609206.09 frames. ], batch size: 19, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:21,245 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4631, 1.4236, 1.5613, 1.3481, 1.2966, 1.3763, 1.7631, 1.6923], + device='cuda:3'), covar=tensor([0.0495, 0.1253, 0.1624, 0.1312, 0.0624, 0.1534, 0.0737, 0.0543], + device='cuda:3'), in_proj_covar=tensor([0.0135, 0.0179, 0.0218, 0.0181, 0.0130, 0.0188, 0.0146, 0.0153], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:17:22,464 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 2.936e+02 3.882e+02 5.021e+02 1.156e+03, threshold=7.764e+02, percent-clipped=3.0 +2023-02-06 01:17:23,312 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7552, 2.2143, 3.7059, 3.0836, 2.8719, 2.1009, 1.5762, 1.7260], + device='cuda:3'), covar=tensor([0.1751, 0.2344, 0.0501, 0.1057, 0.1170, 0.1012, 0.1032, 0.2285], + device='cuda:3'), in_proj_covar=tensor([0.0755, 0.0678, 0.0571, 0.0665, 0.0778, 0.0628, 0.0607, 0.0631], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:17:26,495 INFO [train.py:901] (3/4) Epoch 5, batch 5500, loss[loss=0.3377, simple_loss=0.3819, pruned_loss=0.1468, over 6904.00 frames. ], tot_loss[loss=0.2886, simple_loss=0.3492, pruned_loss=0.1139, over 1610578.90 frames. ], batch size: 71, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:17:30,474 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 01:17:32,020 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=37841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:17:40,095 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3818, 2.5132, 1.6105, 2.0479, 1.9548, 1.3564, 1.7624, 1.8064], + device='cuda:3'), covar=tensor([0.1150, 0.0258, 0.0867, 0.0479, 0.0541, 0.1074, 0.0848, 0.0724], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0242, 0.0312, 0.0302, 0.0318, 0.0308, 0.0343, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 01:18:00,739 INFO [train.py:901] (3/4) Epoch 5, batch 5550, loss[loss=0.334, simple_loss=0.3816, pruned_loss=0.1432, over 8242.00 frames. ], tot_loss[loss=0.2884, simple_loss=0.3489, pruned_loss=0.1139, over 1610305.67 frames. ], batch size: 24, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:32,131 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 3.124e+02 3.941e+02 5.093e+02 9.977e+02, threshold=7.882e+02, percent-clipped=4.0 +2023-02-06 01:18:36,202 INFO [train.py:901] (3/4) Epoch 5, batch 5600, loss[loss=0.2767, simple_loss=0.3356, pruned_loss=0.1089, over 8036.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.3481, pruned_loss=0.1132, over 1610355.56 frames. ], batch size: 22, lr: 1.47e-02, grad_scale: 8.0 +2023-02-06 01:18:40,423 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6999, 1.1886, 3.8857, 1.4093, 3.3262, 3.2173, 3.4937, 3.3517], + device='cuda:3'), covar=tensor([0.0436, 0.3689, 0.0478, 0.2628, 0.1155, 0.0768, 0.0474, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0329, 0.0491, 0.0412, 0.0421, 0.0480, 0.0407, 0.0397, 0.0447], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:18:46,649 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 01:19:10,749 INFO [train.py:901] (3/4) Epoch 5, batch 5650, loss[loss=0.285, simple_loss=0.3542, pruned_loss=0.1079, over 8245.00 frames. ], tot_loss[loss=0.2882, simple_loss=0.3489, pruned_loss=0.1138, over 1609890.06 frames. ], batch size: 24, lr: 1.47e-02, grad_scale: 4.0 +2023-02-06 01:19:11,865 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.60 vs. limit=5.0 +2023-02-06 01:19:20,217 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=37997.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:23,903 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:33,263 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7828, 2.1720, 4.6317, 1.1560, 3.1263, 2.4956, 1.6413, 2.4429], + device='cuda:3'), covar=tensor([0.1402, 0.1972, 0.0655, 0.3339, 0.1281, 0.2140, 0.1519, 0.2363], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0446, 0.0530, 0.0532, 0.0577, 0.0516, 0.0446, 0.0588], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 01:19:34,403 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 01:19:41,416 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:19:42,548 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 3.005e+02 3.717e+02 4.758e+02 1.120e+03, threshold=7.434e+02, percent-clipped=3.0 +2023-02-06 01:19:45,907 INFO [train.py:901] (3/4) Epoch 5, batch 5700, loss[loss=0.2865, simple_loss=0.3566, pruned_loss=0.1082, over 8105.00 frames. ], tot_loss[loss=0.2922, simple_loss=0.3519, pruned_loss=0.1163, over 1607095.02 frames. ], batch size: 23, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:19:58,761 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5292, 4.5192, 4.0787, 1.9780, 4.1100, 4.0978, 4.1922, 3.8457], + device='cuda:3'), covar=tensor([0.0598, 0.0428, 0.0768, 0.3931, 0.0601, 0.0565, 0.0865, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0399, 0.0287, 0.0319, 0.0405, 0.0321, 0.0269, 0.0298, 0.0250], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:20:20,737 INFO [train.py:901] (3/4) Epoch 5, batch 5750, loss[loss=0.2719, simple_loss=0.3422, pruned_loss=0.1008, over 8351.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3504, pruned_loss=0.1149, over 1607608.02 frames. ], batch size: 26, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:30,236 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:37,196 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 01:20:46,792 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:20:50,580 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.930e+02 3.053e+02 3.876e+02 4.925e+02 1.023e+03, threshold=7.752e+02, percent-clipped=4.0 +2023-02-06 01:20:54,528 INFO [train.py:901] (3/4) Epoch 5, batch 5800, loss[loss=0.2835, simple_loss=0.358, pruned_loss=0.1045, over 8456.00 frames. ], tot_loss[loss=0.2901, simple_loss=0.351, pruned_loss=0.1146, over 1613683.07 frames. ], batch size: 29, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:20:58,012 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9486, 2.3104, 2.9667, 1.1043, 2.9592, 1.6703, 1.4085, 1.5788], + device='cuda:3'), covar=tensor([0.0289, 0.0129, 0.0086, 0.0278, 0.0176, 0.0377, 0.0373, 0.0207], + device='cuda:3'), in_proj_covar=tensor([0.0315, 0.0224, 0.0183, 0.0269, 0.0214, 0.0359, 0.0282, 0.0261], + device='cuda:3'), out_proj_covar=tensor([1.1298e-04, 7.7778e-05, 6.2967e-05, 9.3688e-05, 7.6369e-05, 1.3633e-04, + 1.0106e-04, 9.1467e-05], device='cuda:3') +2023-02-06 01:21:14,944 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4257, 1.6280, 1.5521, 1.3900, 1.4000, 1.5112, 1.8202, 1.8796], + device='cuda:3'), covar=tensor([0.0551, 0.1291, 0.1806, 0.1435, 0.0647, 0.1653, 0.0810, 0.0531], + device='cuda:3'), in_proj_covar=tensor([0.0135, 0.0180, 0.0219, 0.0183, 0.0131, 0.0191, 0.0145, 0.0153], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:21:29,415 INFO [train.py:901] (3/4) Epoch 5, batch 5850, loss[loss=0.2818, simple_loss=0.3555, pruned_loss=0.104, over 8510.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3506, pruned_loss=0.114, over 1615078.82 frames. ], batch size: 28, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:21:31,398 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 01:21:57,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5737, 3.5160, 3.0653, 2.0035, 3.0645, 3.1877, 3.3079, 2.8165], + device='cuda:3'), covar=tensor([0.0954, 0.0749, 0.1061, 0.3855, 0.0833, 0.0895, 0.1090, 0.1019], + device='cuda:3'), in_proj_covar=tensor([0.0404, 0.0289, 0.0323, 0.0409, 0.0318, 0.0271, 0.0304, 0.0255], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:22:01,294 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.005e+02 3.016e+02 3.759e+02 4.889e+02 1.185e+03, threshold=7.518e+02, percent-clipped=2.0 +2023-02-06 01:22:04,818 INFO [train.py:901] (3/4) Epoch 5, batch 5900, loss[loss=0.2573, simple_loss=0.3109, pruned_loss=0.1018, over 7790.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.3478, pruned_loss=0.1125, over 1610963.55 frames. ], batch size: 19, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:22:40,895 INFO [train.py:901] (3/4) Epoch 5, batch 5950, loss[loss=0.3692, simple_loss=0.3935, pruned_loss=0.1724, over 6938.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3483, pruned_loss=0.1127, over 1613891.67 frames. ], batch size: 71, lr: 1.46e-02, grad_scale: 4.0 +2023-02-06 01:23:12,028 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.179e+02 3.191e+02 3.790e+02 5.332e+02 1.075e+03, threshold=7.580e+02, percent-clipped=7.0 +2023-02-06 01:23:15,479 INFO [train.py:901] (3/4) Epoch 5, batch 6000, loss[loss=0.2447, simple_loss=0.3277, pruned_loss=0.08082, over 8249.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.347, pruned_loss=0.1122, over 1610555.36 frames. ], batch size: 24, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:23:15,479 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 01:23:23,744 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4417, 1.7206, 2.7938, 1.0934, 2.0820, 1.6816, 1.5074, 1.7903], + device='cuda:3'), covar=tensor([0.1518, 0.2101, 0.0717, 0.3574, 0.1479, 0.2532, 0.1562, 0.1993], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0446, 0.0522, 0.0537, 0.0577, 0.0520, 0.0443, 0.0593], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 01:23:28,272 INFO [train.py:935] (3/4) Epoch 5, validation: loss=0.2196, simple_loss=0.3162, pruned_loss=0.06146, over 944034.00 frames. +2023-02-06 01:23:28,273 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 01:23:33,799 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:23:56,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3283, 1.5947, 3.0940, 1.0346, 2.1760, 1.7881, 1.4375, 1.7477], + device='cuda:3'), covar=tensor([0.1966, 0.2261, 0.0777, 0.3935, 0.1575, 0.2634, 0.1845, 0.2429], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0446, 0.0524, 0.0537, 0.0579, 0.0519, 0.0444, 0.0595], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 01:23:58,484 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38378.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:24:01,710 INFO [train.py:901] (3/4) Epoch 5, batch 6050, loss[loss=0.2926, simple_loss=0.3397, pruned_loss=0.1228, over 7653.00 frames. ], tot_loss[loss=0.2862, simple_loss=0.3472, pruned_loss=0.1126, over 1613142.81 frames. ], batch size: 19, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:33,770 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.931e+02 3.108e+02 3.868e+02 4.827e+02 8.119e+02, threshold=7.737e+02, percent-clipped=1.0 +2023-02-06 01:24:37,062 INFO [train.py:901] (3/4) Epoch 5, batch 6100, loss[loss=0.2257, simple_loss=0.2875, pruned_loss=0.08197, over 7544.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3486, pruned_loss=0.1137, over 1613120.73 frames. ], batch size: 18, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:24:53,399 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:25:09,655 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 01:25:10,930 INFO [train.py:901] (3/4) Epoch 5, batch 6150, loss[loss=0.3141, simple_loss=0.3796, pruned_loss=0.1243, over 8443.00 frames. ], tot_loss[loss=0.288, simple_loss=0.3488, pruned_loss=0.1137, over 1613830.54 frames. ], batch size: 27, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:25:29,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5191, 1.8980, 2.0044, 1.4637, 0.8855, 2.0611, 0.3239, 1.0301], + device='cuda:3'), covar=tensor([0.3517, 0.2308, 0.1255, 0.3057, 0.6500, 0.0627, 0.5857, 0.3184], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0127, 0.0083, 0.0173, 0.0216, 0.0083, 0.0145, 0.0131], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:25:42,289 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 3.089e+02 4.008e+02 5.119e+02 1.011e+03, threshold=8.016e+02, percent-clipped=7.0 +2023-02-06 01:25:44,511 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1723, 1.3136, 2.3221, 1.0299, 2.0900, 2.4779, 2.4610, 2.1430], + device='cuda:3'), covar=tensor([0.1029, 0.1091, 0.0487, 0.1870, 0.0554, 0.0362, 0.0502, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0267, 0.0220, 0.0260, 0.0222, 0.0198, 0.0228, 0.0271], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 01:25:45,625 INFO [train.py:901] (3/4) Epoch 5, batch 6200, loss[loss=0.3147, simple_loss=0.3631, pruned_loss=0.1332, over 7973.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3482, pruned_loss=0.1132, over 1609855.47 frames. ], batch size: 21, lr: 1.46e-02, grad_scale: 8.0 +2023-02-06 01:26:20,118 INFO [train.py:901] (3/4) Epoch 5, batch 6250, loss[loss=0.271, simple_loss=0.3216, pruned_loss=0.1102, over 7269.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3478, pruned_loss=0.1129, over 1609466.27 frames. ], batch size: 16, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:26:29,033 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2764, 1.9495, 3.1794, 2.3558, 2.6026, 1.9443, 1.5115, 1.2266], + device='cuda:3'), covar=tensor([0.1893, 0.2035, 0.0445, 0.1112, 0.0971, 0.1056, 0.1020, 0.2146], + device='cuda:3'), in_proj_covar=tensor([0.0758, 0.0685, 0.0580, 0.0666, 0.0783, 0.0641, 0.0612, 0.0634], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:26:36,084 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.93 vs. limit=2.0 +2023-02-06 01:26:51,177 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.140e+02 3.239e+02 3.994e+02 4.997e+02 1.061e+03, threshold=7.988e+02, percent-clipped=3.0 +2023-02-06 01:26:54,600 INFO [train.py:901] (3/4) Epoch 5, batch 6300, loss[loss=0.2862, simple_loss=0.3676, pruned_loss=0.1024, over 8459.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.348, pruned_loss=0.1132, over 1608628.98 frames. ], batch size: 25, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:29,613 INFO [train.py:901] (3/4) Epoch 5, batch 6350, loss[loss=0.3036, simple_loss=0.3487, pruned_loss=0.1292, over 7934.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.3486, pruned_loss=0.1132, over 1608957.11 frames. ], batch size: 20, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:27:49,936 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=38712.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:27:56,425 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=38722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:00,326 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.862e+02 3.826e+02 4.732e+02 1.596e+03, threshold=7.652e+02, percent-clipped=5.0 +2023-02-06 01:28:03,612 INFO [train.py:901] (3/4) Epoch 5, batch 6400, loss[loss=0.3239, simple_loss=0.3763, pruned_loss=0.1357, over 7813.00 frames. ], tot_loss[loss=0.2867, simple_loss=0.3483, pruned_loss=0.1126, over 1613052.56 frames. ], batch size: 20, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:06,534 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=38737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:28:38,892 INFO [train.py:901] (3/4) Epoch 5, batch 6450, loss[loss=0.2446, simple_loss=0.3054, pruned_loss=0.09191, over 7935.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3493, pruned_loss=0.1133, over 1614084.19 frames. ], batch size: 20, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:28:59,421 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4852, 2.4794, 2.7475, 2.0229, 1.6245, 2.5916, 0.9042, 1.8929], + device='cuda:3'), covar=tensor([0.2434, 0.1775, 0.0608, 0.2491, 0.4383, 0.0525, 0.5502, 0.2064], + device='cuda:3'), in_proj_covar=tensor([0.0126, 0.0123, 0.0078, 0.0166, 0.0207, 0.0077, 0.0140, 0.0124], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:29:09,817 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.470e+02 3.536e+02 4.141e+02 5.010e+02 9.096e+02, threshold=8.281e+02, percent-clipped=4.0 +2023-02-06 01:29:13,135 INFO [train.py:901] (3/4) Epoch 5, batch 6500, loss[loss=0.3023, simple_loss=0.3699, pruned_loss=0.1173, over 8569.00 frames. ], tot_loss[loss=0.29, simple_loss=0.3509, pruned_loss=0.1145, over 1617220.84 frames. ], batch size: 31, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:29:16,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=38837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:18,659 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=38841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:29:18,852 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 01:29:38,053 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4857, 1.5164, 1.4316, 1.1133, 1.2119, 1.3947, 1.7086, 1.7266], + device='cuda:3'), covar=tensor([0.0546, 0.1411, 0.1922, 0.1618, 0.0691, 0.1758, 0.0825, 0.0597], + device='cuda:3'), in_proj_covar=tensor([0.0136, 0.0179, 0.0221, 0.0186, 0.0131, 0.0189, 0.0144, 0.0154], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:29:48,037 INFO [train.py:901] (3/4) Epoch 5, batch 6550, loss[loss=0.2774, simple_loss=0.3458, pruned_loss=0.1045, over 8357.00 frames. ], tot_loss[loss=0.2872, simple_loss=0.3486, pruned_loss=0.1129, over 1616066.91 frames. ], batch size: 24, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:09,430 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.70 vs. limit=2.0 +2023-02-06 01:30:19,268 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.239e+02 3.737e+02 4.952e+02 1.438e+03, threshold=7.474e+02, percent-clipped=4.0 +2023-02-06 01:30:22,018 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 01:30:22,217 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4550, 1.8045, 1.9463, 1.4495, 1.0673, 2.1216, 0.2911, 1.0274], + device='cuda:3'), covar=tensor([0.3221, 0.2443, 0.1049, 0.2996, 0.6551, 0.0676, 0.5764, 0.2919], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0127, 0.0083, 0.0172, 0.0215, 0.0079, 0.0142, 0.0129], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:30:22,676 INFO [train.py:901] (3/4) Epoch 5, batch 6600, loss[loss=0.2524, simple_loss=0.3098, pruned_loss=0.09749, over 7657.00 frames. ], tot_loss[loss=0.2875, simple_loss=0.3485, pruned_loss=0.1133, over 1613708.93 frames. ], batch size: 19, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:30:39,837 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 01:30:58,095 INFO [train.py:901] (3/4) Epoch 5, batch 6650, loss[loss=0.3017, simple_loss=0.3443, pruned_loss=0.1295, over 7223.00 frames. ], tot_loss[loss=0.2868, simple_loss=0.3476, pruned_loss=0.113, over 1603961.34 frames. ], batch size: 16, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:31:29,853 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.971e+02 3.068e+02 3.660e+02 4.252e+02 1.265e+03, threshold=7.321e+02, percent-clipped=3.0 +2023-02-06 01:31:33,319 INFO [train.py:901] (3/4) Epoch 5, batch 6700, loss[loss=0.2453, simple_loss=0.3024, pruned_loss=0.09411, over 7694.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3473, pruned_loss=0.1119, over 1608023.51 frames. ], batch size: 18, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:02,064 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1064, 1.0730, 3.1944, 1.0054, 2.7102, 2.7034, 2.8790, 2.7746], + device='cuda:3'), covar=tensor([0.0618, 0.3877, 0.0623, 0.2841, 0.1527, 0.0817, 0.0694, 0.0795], + device='cuda:3'), in_proj_covar=tensor([0.0339, 0.0497, 0.0425, 0.0426, 0.0494, 0.0408, 0.0406, 0.0451], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:32:08,097 INFO [train.py:901] (3/4) Epoch 5, batch 6750, loss[loss=0.2317, simple_loss=0.3114, pruned_loss=0.07598, over 7923.00 frames. ], tot_loss[loss=0.2849, simple_loss=0.3466, pruned_loss=0.1116, over 1608722.08 frames. ], batch size: 20, lr: 1.45e-02, grad_scale: 8.0 +2023-02-06 01:32:15,830 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:33,105 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:32:40,328 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.802e+02 3.437e+02 4.300e+02 8.945e+02, threshold=6.874e+02, percent-clipped=2.0 +2023-02-06 01:32:43,702 INFO [train.py:901] (3/4) Epoch 5, batch 6800, loss[loss=0.296, simple_loss=0.3681, pruned_loss=0.1119, over 8292.00 frames. ], tot_loss[loss=0.2847, simple_loss=0.3464, pruned_loss=0.1115, over 1608619.60 frames. ], batch size: 23, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:32:54,741 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 01:32:59,187 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 01:33:17,611 INFO [train.py:901] (3/4) Epoch 5, batch 6850, loss[loss=0.2461, simple_loss=0.2981, pruned_loss=0.09707, over 7428.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3481, pruned_loss=0.1129, over 1609073.43 frames. ], batch size: 17, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:33:19,091 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39185.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:31,848 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:33:43,504 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 01:33:48,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 3.284e+02 3.960e+02 5.468e+02 1.321e+03, threshold=7.919e+02, percent-clipped=11.0 +2023-02-06 01:33:52,156 INFO [train.py:901] (3/4) Epoch 5, batch 6900, loss[loss=0.3041, simple_loss=0.363, pruned_loss=0.1226, over 7935.00 frames. ], tot_loss[loss=0.2863, simple_loss=0.3476, pruned_loss=0.1125, over 1610001.66 frames. ], batch size: 20, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:18,979 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 01:34:26,657 INFO [train.py:901] (3/4) Epoch 5, batch 6950, loss[loss=0.3655, simple_loss=0.4003, pruned_loss=0.1654, over 7255.00 frames. ], tot_loss[loss=0.2873, simple_loss=0.3486, pruned_loss=0.113, over 1611331.01 frames. ], batch size: 71, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:34:38,156 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:39,404 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:34:49,520 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 01:34:58,304 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.949e+02 3.231e+02 3.801e+02 5.196e+02 1.038e+03, threshold=7.603e+02, percent-clipped=4.0 +2023-02-06 01:35:01,666 INFO [train.py:901] (3/4) Epoch 5, batch 7000, loss[loss=0.2827, simple_loss=0.359, pruned_loss=0.1032, over 8463.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3468, pruned_loss=0.1121, over 1604681.36 frames. ], batch size: 29, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:35:34,098 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6833, 2.9478, 1.9520, 2.2241, 2.5011, 1.6198, 2.3144, 2.2636], + device='cuda:3'), covar=tensor([0.1118, 0.0225, 0.0809, 0.0558, 0.0460, 0.1110, 0.0677, 0.0751], + device='cuda:3'), in_proj_covar=tensor([0.0342, 0.0241, 0.0307, 0.0300, 0.0312, 0.0313, 0.0333, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 01:35:35,822 INFO [train.py:901] (3/4) Epoch 5, batch 7050, loss[loss=0.2411, simple_loss=0.3097, pruned_loss=0.08627, over 6802.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3455, pruned_loss=0.1114, over 1600199.61 frames. ], batch size: 15, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:06,897 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.068e+02 2.867e+02 3.538e+02 4.706e+02 1.662e+03, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 01:36:09,166 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8122, 2.2798, 3.8188, 3.1079, 3.0668, 2.2964, 1.5007, 1.8185], + device='cuda:3'), covar=tensor([0.1829, 0.2459, 0.0508, 0.1065, 0.1097, 0.1056, 0.1119, 0.2319], + device='cuda:3'), in_proj_covar=tensor([0.0756, 0.0686, 0.0581, 0.0673, 0.0777, 0.0645, 0.0607, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:36:10,286 INFO [train.py:901] (3/4) Epoch 5, batch 7100, loss[loss=0.3854, simple_loss=0.4105, pruned_loss=0.1801, over 7125.00 frames. ], tot_loss[loss=0.2842, simple_loss=0.3456, pruned_loss=0.1114, over 1597881.84 frames. ], batch size: 71, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:36:38,200 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2640, 2.5069, 1.7368, 2.1774, 2.0611, 1.4860, 1.8029, 1.9480], + device='cuda:3'), covar=tensor([0.1215, 0.0255, 0.0789, 0.0456, 0.0613, 0.1025, 0.0895, 0.0757], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0240, 0.0310, 0.0303, 0.0321, 0.0317, 0.0341, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 01:36:44,804 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:36:46,000 INFO [train.py:901] (3/4) Epoch 5, batch 7150, loss[loss=0.2512, simple_loss=0.3342, pruned_loss=0.08407, over 8192.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.3457, pruned_loss=0.1118, over 1598110.90 frames. ], batch size: 23, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:17,185 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.977e+02 2.912e+02 3.907e+02 4.774e+02 1.202e+03, threshold=7.813e+02, percent-clipped=7.0 +2023-02-06 01:37:20,761 INFO [train.py:901] (3/4) Epoch 5, batch 7200, loss[loss=0.2728, simple_loss=0.3458, pruned_loss=0.09995, over 8469.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3465, pruned_loss=0.1124, over 1600275.53 frames. ], batch size: 25, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:37:22,385 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5287, 1.9792, 3.6013, 1.1286, 2.5100, 1.8896, 1.5459, 2.0404], + device='cuda:3'), covar=tensor([0.1435, 0.1829, 0.0476, 0.3045, 0.1233, 0.2235, 0.1442, 0.2069], + device='cuda:3'), in_proj_covar=tensor([0.0470, 0.0453, 0.0520, 0.0529, 0.0570, 0.0514, 0.0448, 0.0592], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 01:37:30,595 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:37,491 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:55,145 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:37:56,307 INFO [train.py:901] (3/4) Epoch 5, batch 7250, loss[loss=0.3072, simple_loss=0.3647, pruned_loss=0.1249, over 8234.00 frames. ], tot_loss[loss=0.2865, simple_loss=0.3473, pruned_loss=0.1129, over 1603876.23 frames. ], batch size: 24, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:09,671 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4366, 2.1875, 1.3646, 1.9695, 1.8799, 1.0797, 1.4096, 1.8110], + device='cuda:3'), covar=tensor([0.1083, 0.0348, 0.1163, 0.0509, 0.0655, 0.1479, 0.1041, 0.0736], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0242, 0.0309, 0.0303, 0.0320, 0.0315, 0.0340, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 01:38:27,209 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.040e+02 2.862e+02 3.679e+02 5.056e+02 1.142e+03, threshold=7.358e+02, percent-clipped=8.0 +2023-02-06 01:38:30,502 INFO [train.py:901] (3/4) Epoch 5, batch 7300, loss[loss=0.3378, simple_loss=0.3971, pruned_loss=0.1393, over 8480.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3473, pruned_loss=0.1122, over 1610156.13 frames. ], batch size: 28, lr: 1.44e-02, grad_scale: 8.0 +2023-02-06 01:38:33,401 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5567, 1.3184, 1.3948, 1.2057, 1.0935, 1.2489, 1.2531, 1.4494], + device='cuda:3'), covar=tensor([0.0610, 0.1283, 0.1794, 0.1407, 0.0589, 0.1622, 0.0732, 0.0541], + device='cuda:3'), in_proj_covar=tensor([0.0135, 0.0178, 0.0219, 0.0183, 0.0131, 0.0189, 0.0144, 0.0151], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:38:39,558 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:38:47,121 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39657.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:38:50,516 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39662.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:39:05,559 INFO [train.py:901] (3/4) Epoch 5, batch 7350, loss[loss=0.3419, simple_loss=0.3964, pruned_loss=0.1437, over 8292.00 frames. ], tot_loss[loss=0.2879, simple_loss=0.3489, pruned_loss=0.1135, over 1611228.72 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:10,023 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 01:39:12,259 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.5730, 3.3719, 3.1463, 1.9870, 3.0535, 3.1655, 3.2269, 2.8538], + device='cuda:3'), covar=tensor([0.0871, 0.0777, 0.1021, 0.3750, 0.0810, 0.0940, 0.1221, 0.0882], + device='cuda:3'), in_proj_covar=tensor([0.0392, 0.0282, 0.0310, 0.0399, 0.0302, 0.0270, 0.0292, 0.0243], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-06 01:39:33,447 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 01:39:36,162 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.846e+02 3.982e+02 4.999e+02 1.878e+03, threshold=7.964e+02, percent-clipped=11.0 +2023-02-06 01:39:39,633 INFO [train.py:901] (3/4) Epoch 5, batch 7400, loss[loss=0.3359, simple_loss=0.3877, pruned_loss=0.1421, over 8471.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3479, pruned_loss=0.113, over 1607873.39 frames. ], batch size: 25, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:39:45,832 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7301, 3.0561, 3.2394, 1.9168, 1.3217, 3.1775, 0.5364, 2.0484], + device='cuda:3'), covar=tensor([0.4860, 0.1621, 0.0883, 0.3746, 0.6774, 0.1011, 0.6890, 0.2335], + device='cuda:3'), in_proj_covar=tensor([0.0134, 0.0131, 0.0083, 0.0179, 0.0221, 0.0084, 0.0146, 0.0133], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:39:52,995 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 01:39:59,175 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:13,746 INFO [train.py:901] (3/4) Epoch 5, batch 7450, loss[loss=0.2421, simple_loss=0.3196, pruned_loss=0.08226, over 8468.00 frames. ], tot_loss[loss=0.2858, simple_loss=0.3473, pruned_loss=0.1121, over 1610483.18 frames. ], batch size: 27, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:15,954 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:32,190 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 01:40:43,642 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=39825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:40:45,624 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 3.116e+02 3.779e+02 4.440e+02 1.107e+03, threshold=7.558e+02, percent-clipped=3.0 +2023-02-06 01:40:49,054 INFO [train.py:901] (3/4) Epoch 5, batch 7500, loss[loss=0.2471, simple_loss=0.3254, pruned_loss=0.08438, over 8106.00 frames. ], tot_loss[loss=0.2852, simple_loss=0.3469, pruned_loss=0.1117, over 1611241.41 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:40:49,201 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=39833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:07,198 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1655, 1.4580, 1.4358, 1.2061, 1.2842, 1.2901, 1.6234, 1.5576], + device='cuda:3'), covar=tensor([0.0590, 0.1212, 0.1768, 0.1441, 0.0585, 0.1598, 0.0745, 0.0585], + device='cuda:3'), in_proj_covar=tensor([0.0133, 0.0178, 0.0218, 0.0181, 0.0128, 0.0188, 0.0143, 0.0150], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:41:22,994 INFO [train.py:901] (3/4) Epoch 5, batch 7550, loss[loss=0.2858, simple_loss=0.3331, pruned_loss=0.1193, over 7567.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.347, pruned_loss=0.1126, over 1609389.22 frames. ], batch size: 18, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:41:41,173 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3068, 1.0386, 4.4795, 1.8089, 3.7663, 3.7617, 3.9999, 3.8644], + device='cuda:3'), covar=tensor([0.0402, 0.3709, 0.0365, 0.2258, 0.1086, 0.0653, 0.0402, 0.0528], + device='cuda:3'), in_proj_covar=tensor([0.0337, 0.0486, 0.0429, 0.0425, 0.0493, 0.0411, 0.0399, 0.0451], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:41:48,007 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=39918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:41:54,538 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.961e+02 3.144e+02 3.978e+02 5.379e+02 1.554e+03, threshold=7.957e+02, percent-clipped=6.0 +2023-02-06 01:41:57,840 INFO [train.py:901] (3/4) Epoch 5, batch 7600, loss[loss=0.2583, simple_loss=0.3357, pruned_loss=0.09048, over 7811.00 frames. ], tot_loss[loss=0.2855, simple_loss=0.3465, pruned_loss=0.1123, over 1606718.00 frames. ], batch size: 20, lr: 1.43e-02, grad_scale: 8.0 +2023-02-06 01:42:02,473 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=39940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:04,534 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=39943.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:42:32,747 INFO [train.py:901] (3/4) Epoch 5, batch 7650, loss[loss=0.2532, simple_loss=0.3309, pruned_loss=0.08772, over 8293.00 frames. ], tot_loss[loss=0.2856, simple_loss=0.3469, pruned_loss=0.1121, over 1609683.15 frames. ], batch size: 23, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:42:45,885 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40001.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:42:56,999 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:05,594 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.281e+02 3.028e+02 3.689e+02 4.703e+02 1.290e+03, threshold=7.379e+02, percent-clipped=1.0 +2023-02-06 01:43:08,878 INFO [train.py:901] (3/4) Epoch 5, batch 7700, loss[loss=0.2731, simple_loss=0.3404, pruned_loss=0.1028, over 8517.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3481, pruned_loss=0.1128, over 1613266.47 frames. ], batch size: 28, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:15,433 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:43:15,443 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3105, 1.5562, 1.7139, 1.3153, 0.7626, 1.6853, 0.0724, 0.9333], + device='cuda:3'), covar=tensor([0.3783, 0.2102, 0.0875, 0.2133, 0.6117, 0.0712, 0.4735, 0.2601], + device='cuda:3'), in_proj_covar=tensor([0.0132, 0.0126, 0.0078, 0.0173, 0.0212, 0.0083, 0.0141, 0.0130], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:43:21,219 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 01:43:44,274 INFO [train.py:901] (3/4) Epoch 5, batch 7750, loss[loss=0.2642, simple_loss=0.3176, pruned_loss=0.1054, over 7542.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3472, pruned_loss=0.1125, over 1610945.67 frames. ], batch size: 18, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:43:44,292 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 01:44:07,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40116.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:44:12,724 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5271, 4.5322, 4.0434, 1.7042, 4.0962, 3.9157, 4.1860, 3.8121], + device='cuda:3'), covar=tensor([0.0696, 0.0553, 0.0868, 0.5123, 0.0636, 0.0739, 0.1249, 0.0559], + device='cuda:3'), in_proj_covar=tensor([0.0390, 0.0277, 0.0313, 0.0402, 0.0303, 0.0271, 0.0292, 0.0247], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001], + device='cuda:3') +2023-02-06 01:44:15,426 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.050e+02 3.016e+02 3.638e+02 4.428e+02 8.911e+02, threshold=7.276e+02, percent-clipped=8.0 +2023-02-06 01:44:16,238 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:18,934 INFO [train.py:901] (3/4) Epoch 5, batch 7800, loss[loss=0.3182, simple_loss=0.3852, pruned_loss=0.1256, over 8502.00 frames. ], tot_loss[loss=0.2861, simple_loss=0.3474, pruned_loss=0.1124, over 1614259.00 frames. ], batch size: 26, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:44:50,152 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:44:54,256 INFO [train.py:901] (3/4) Epoch 5, batch 7850, loss[loss=0.2829, simple_loss=0.3545, pruned_loss=0.1057, over 8460.00 frames. ], tot_loss[loss=0.2869, simple_loss=0.3485, pruned_loss=0.1127, over 1619158.16 frames. ], batch size: 29, lr: 1.43e-02, grad_scale: 16.0 +2023-02-06 01:45:03,200 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:14,682 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:20,124 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:24,748 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.315e+02 3.285e+02 3.978e+02 4.753e+02 1.108e+03, threshold=7.955e+02, percent-clipped=4.0 +2023-02-06 01:45:28,277 INFO [train.py:901] (3/4) Epoch 5, batch 7900, loss[loss=0.2928, simple_loss=0.3601, pruned_loss=0.1127, over 8251.00 frames. ], tot_loss[loss=0.2857, simple_loss=0.3478, pruned_loss=0.1118, over 1618338.19 frames. ], batch size: 24, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:45:30,497 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40236.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:45:35,930 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:02,027 INFO [train.py:901] (3/4) Epoch 5, batch 7950, loss[loss=0.2541, simple_loss=0.3083, pruned_loss=0.09988, over 7701.00 frames. ], tot_loss[loss=0.2853, simple_loss=0.3473, pruned_loss=0.1116, over 1611991.08 frames. ], batch size: 18, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:07,555 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:09,020 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:46:24,696 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 01:46:33,072 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.942e+02 3.003e+02 3.931e+02 4.743e+02 9.937e+02, threshold=7.862e+02, percent-clipped=4.0 +2023-02-06 01:46:36,398 INFO [train.py:901] (3/4) Epoch 5, batch 8000, loss[loss=0.2985, simple_loss=0.3594, pruned_loss=0.1188, over 8496.00 frames. ], tot_loss[loss=0.286, simple_loss=0.348, pruned_loss=0.112, over 1616572.33 frames. ], batch size: 26, lr: 1.42e-02, grad_scale: 16.0 +2023-02-06 01:46:46,582 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:47:03,142 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40372.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 01:47:10,475 INFO [train.py:901] (3/4) Epoch 5, batch 8050, loss[loss=0.2848, simple_loss=0.3509, pruned_loss=0.1093, over 8496.00 frames. ], tot_loss[loss=0.2859, simple_loss=0.3475, pruned_loss=0.1121, over 1608253.52 frames. ], batch size: 49, lr: 1.42e-02, grad_scale: 8.0 +2023-02-06 01:47:20,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40397.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 01:47:43,720 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 01:47:48,100 INFO [train.py:901] (3/4) Epoch 6, batch 0, loss[loss=0.3374, simple_loss=0.3912, pruned_loss=0.1418, over 8463.00 frames. ], tot_loss[loss=0.3374, simple_loss=0.3912, pruned_loss=0.1418, over 8463.00 frames. ], batch size: 25, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:47:48,100 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 01:47:59,053 INFO [train.py:935] (3/4) Epoch 6, validation: loss=0.2203, simple_loss=0.3165, pruned_loss=0.06206, over 944034.00 frames. +2023-02-06 01:47:59,054 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 01:48:07,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 3.052e+02 3.992e+02 5.098e+02 1.227e+03, threshold=7.983e+02, percent-clipped=7.0 +2023-02-06 01:48:13,427 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 01:48:24,586 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-06 01:48:34,110 INFO [train.py:901] (3/4) Epoch 6, batch 50, loss[loss=0.2479, simple_loss=0.3229, pruned_loss=0.08647, over 7524.00 frames. ], tot_loss[loss=0.292, simple_loss=0.3532, pruned_loss=0.1154, over 366970.96 frames. ], batch size: 18, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:48:48,505 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 01:48:57,360 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:04,765 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:08,810 INFO [train.py:901] (3/4) Epoch 6, batch 100, loss[loss=0.2488, simple_loss=0.3091, pruned_loss=0.09421, over 7706.00 frames. ], tot_loss[loss=0.2829, simple_loss=0.3461, pruned_loss=0.1098, over 644149.60 frames. ], batch size: 18, lr: 1.33e-02, grad_scale: 8.0 +2023-02-06 01:49:13,092 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 01:49:15,331 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:17,934 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.877e+02 3.627e+02 4.294e+02 7.601e+02, threshold=7.253e+02, percent-clipped=0.0 +2023-02-06 01:49:31,506 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:37,660 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:44,253 INFO [train.py:901] (3/4) Epoch 6, batch 150, loss[loss=0.2639, simple_loss=0.3414, pruned_loss=0.09323, over 8290.00 frames. ], tot_loss[loss=0.2785, simple_loss=0.3421, pruned_loss=0.1074, over 854353.21 frames. ], batch size: 23, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:49:49,722 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:49:54,327 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:19,240 INFO [train.py:901] (3/4) Epoch 6, batch 200, loss[loss=0.2844, simple_loss=0.3532, pruned_loss=0.1078, over 8539.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3441, pruned_loss=0.1087, over 1024941.49 frames. ], batch size: 49, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:25,200 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 01:50:28,752 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 3.079e+02 3.898e+02 5.213e+02 9.157e+02, threshold=7.795e+02, percent-clipped=3.0 +2023-02-06 01:50:32,269 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:54,051 INFO [train.py:901] (3/4) Epoch 6, batch 250, loss[loss=0.2671, simple_loss=0.3459, pruned_loss=0.09414, over 8462.00 frames. ], tot_loss[loss=0.282, simple_loss=0.346, pruned_loss=0.109, over 1159819.49 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:50:56,283 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:50:58,988 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:04,073 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 01:51:12,261 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 01:51:13,018 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:15,105 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:51:20,705 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.92 vs. limit=2.0 +2023-02-06 01:51:29,244 INFO [train.py:901] (3/4) Epoch 6, batch 300, loss[loss=0.2671, simple_loss=0.3395, pruned_loss=0.09731, over 8334.00 frames. ], tot_loss[loss=0.2846, simple_loss=0.348, pruned_loss=0.1107, over 1262796.66 frames. ], batch size: 25, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:51:38,581 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 3.025e+02 3.729e+02 4.724e+02 9.863e+02, threshold=7.458e+02, percent-clipped=3.0 +2023-02-06 01:51:52,721 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:03,705 INFO [train.py:901] (3/4) Epoch 6, batch 350, loss[loss=0.2871, simple_loss=0.357, pruned_loss=0.1087, over 8253.00 frames. ], tot_loss[loss=0.2843, simple_loss=0.3471, pruned_loss=0.1107, over 1341579.31 frames. ], batch size: 24, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:32,427 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:52:35,329 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 01:52:38,231 INFO [train.py:901] (3/4) Epoch 6, batch 400, loss[loss=0.3403, simple_loss=0.391, pruned_loss=0.1448, over 8564.00 frames. ], tot_loss[loss=0.2825, simple_loss=0.3453, pruned_loss=0.1098, over 1396007.10 frames. ], batch size: 31, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:52:39,046 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3364, 4.3212, 3.8801, 1.7887, 3.7254, 3.8870, 4.1173, 3.4826], + device='cuda:3'), covar=tensor([0.0850, 0.0603, 0.1030, 0.4976, 0.0759, 0.0766, 0.1156, 0.0763], + device='cuda:3'), in_proj_covar=tensor([0.0400, 0.0285, 0.0321, 0.0410, 0.0311, 0.0275, 0.0298, 0.0251], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:52:46,391 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2902, 1.4434, 4.3819, 1.6563, 3.8219, 3.6854, 3.8713, 3.8150], + device='cuda:3'), covar=tensor([0.0411, 0.3593, 0.0399, 0.2613, 0.0984, 0.0649, 0.0510, 0.0522], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0498, 0.0439, 0.0432, 0.0499, 0.0420, 0.0405, 0.0461], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 01:52:46,905 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.939e+02 3.080e+02 3.801e+02 5.022e+02 1.220e+03, threshold=7.601e+02, percent-clipped=4.0 +2023-02-06 01:53:04,906 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=40854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:04,945 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7218, 5.7771, 4.9990, 2.1547, 5.1067, 5.4710, 5.5655, 5.0770], + device='cuda:3'), covar=tensor([0.0741, 0.0366, 0.0848, 0.4736, 0.0690, 0.0550, 0.0900, 0.0501], + device='cuda:3'), in_proj_covar=tensor([0.0411, 0.0289, 0.0325, 0.0414, 0.0315, 0.0280, 0.0306, 0.0255], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:53:12,737 INFO [train.py:901] (3/4) Epoch 6, batch 450, loss[loss=0.2418, simple_loss=0.3104, pruned_loss=0.08667, over 7801.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3474, pruned_loss=0.1108, over 1448983.93 frames. ], batch size: 20, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:24,454 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:47,834 INFO [train.py:901] (3/4) Epoch 6, batch 500, loss[loss=0.3719, simple_loss=0.408, pruned_loss=0.1678, over 7224.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3471, pruned_loss=0.1104, over 1489524.59 frames. ], batch size: 73, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:53:49,263 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=40918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:56,788 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:53:57,231 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.956e+02 3.058e+02 3.738e+02 5.288e+02 8.550e+02, threshold=7.476e+02, percent-clipped=3.0 +2023-02-06 01:54:12,302 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=40951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:13,628 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:22,939 INFO [train.py:901] (3/4) Epoch 6, batch 550, loss[loss=0.282, simple_loss=0.3505, pruned_loss=0.1067, over 8485.00 frames. ], tot_loss[loss=0.282, simple_loss=0.3456, pruned_loss=0.1092, over 1519847.18 frames. ], batch size: 34, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:54:25,225 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=40969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:30,599 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=40976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:49,829 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:55,069 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:56,493 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:54:57,033 INFO [train.py:901] (3/4) Epoch 6, batch 600, loss[loss=0.3101, simple_loss=0.3716, pruned_loss=0.1242, over 8538.00 frames. ], tot_loss[loss=0.2828, simple_loss=0.3464, pruned_loss=0.1096, over 1543040.66 frames. ], batch size: 49, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:06,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.839e+02 3.515e+02 4.292e+02 8.268e+02, threshold=7.031e+02, percent-clipped=4.0 +2023-02-06 01:55:06,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:09,476 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 01:55:16,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6852, 5.8268, 4.9880, 2.3394, 5.0101, 5.2752, 5.3896, 4.8589], + device='cuda:3'), covar=tensor([0.0561, 0.0328, 0.0730, 0.4124, 0.0553, 0.0637, 0.0877, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0398, 0.0284, 0.0318, 0.0410, 0.0310, 0.0278, 0.0300, 0.0253], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 01:55:29,301 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:29,380 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:55:31,088 INFO [train.py:901] (3/4) Epoch 6, batch 650, loss[loss=0.2021, simple_loss=0.2703, pruned_loss=0.06698, over 7301.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3461, pruned_loss=0.1094, over 1555462.88 frames. ], batch size: 16, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:55:32,749 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-06 01:55:46,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:05,817 INFO [train.py:901] (3/4) Epoch 6, batch 700, loss[loss=0.2118, simple_loss=0.2962, pruned_loss=0.06368, over 7989.00 frames. ], tot_loss[loss=0.2803, simple_loss=0.3445, pruned_loss=0.108, over 1569716.25 frames. ], batch size: 21, lr: 1.32e-02, grad_scale: 8.0 +2023-02-06 01:56:08,625 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:13,565 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9813, 2.6300, 3.1803, 1.0500, 3.2303, 1.9539, 1.4868, 1.8786], + device='cuda:3'), covar=tensor([0.0278, 0.0116, 0.0124, 0.0273, 0.0119, 0.0306, 0.0323, 0.0183], + device='cuda:3'), in_proj_covar=tensor([0.0324, 0.0229, 0.0198, 0.0285, 0.0227, 0.0372, 0.0297, 0.0275], + device='cuda:3'), out_proj_covar=tensor([1.1200e-04, 7.6334e-05, 6.6504e-05, 9.7055e-05, 7.8084e-05, 1.3704e-04, + 1.0279e-04, 9.3551e-05], device='cuda:3') +2023-02-06 01:56:14,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:56:14,737 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.998e+02 3.776e+02 4.654e+02 1.221e+03, threshold=7.553e+02, percent-clipped=4.0 +2023-02-06 01:56:40,063 INFO [train.py:901] (3/4) Epoch 6, batch 750, loss[loss=0.3099, simple_loss=0.3542, pruned_loss=0.1328, over 6887.00 frames. ], tot_loss[loss=0.2794, simple_loss=0.3433, pruned_loss=0.1078, over 1578985.05 frames. ], batch size: 71, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:56:52,315 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1375, 1.4209, 1.6752, 1.2675, 1.2097, 1.4297, 1.7092, 1.4222], + device='cuda:3'), covar=tensor([0.0575, 0.1272, 0.1696, 0.1411, 0.0603, 0.1529, 0.0681, 0.0599], + device='cuda:3'), in_proj_covar=tensor([0.0130, 0.0176, 0.0216, 0.0180, 0.0129, 0.0186, 0.0140, 0.0150], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 01:56:52,816 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 01:57:00,951 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 01:57:15,163 INFO [train.py:901] (3/4) Epoch 6, batch 800, loss[loss=0.3141, simple_loss=0.3632, pruned_loss=0.1325, over 8478.00 frames. ], tot_loss[loss=0.2804, simple_loss=0.3441, pruned_loss=0.1083, over 1585659.57 frames. ], batch size: 29, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:21,520 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:22,747 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:24,005 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.937e+02 3.578e+02 4.897e+02 8.076e+02, threshold=7.157e+02, percent-clipped=3.0 +2023-02-06 01:57:38,352 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:46,852 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:57:49,532 INFO [train.py:901] (3/4) Epoch 6, batch 850, loss[loss=0.296, simple_loss=0.3599, pruned_loss=0.1161, over 8347.00 frames. ], tot_loss[loss=0.2808, simple_loss=0.3439, pruned_loss=0.1089, over 1588790.30 frames. ], batch size: 24, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:57:54,388 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8580, 2.3379, 4.8175, 1.3443, 3.3496, 2.4265, 1.9128, 2.9725], + device='cuda:3'), covar=tensor([0.1305, 0.1770, 0.0507, 0.3048, 0.1155, 0.1991, 0.1332, 0.1935], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0450, 0.0520, 0.0533, 0.0585, 0.0511, 0.0443, 0.0588], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 01:58:23,894 INFO [train.py:901] (3/4) Epoch 6, batch 900, loss[loss=0.2711, simple_loss=0.3479, pruned_loss=0.0971, over 8488.00 frames. ], tot_loss[loss=0.28, simple_loss=0.3433, pruned_loss=0.1084, over 1593757.27 frames. ], batch size: 29, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:58:33,480 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.871e+02 3.405e+02 4.321e+02 1.147e+03, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 01:58:34,642 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 01:58:42,537 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:52,794 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.68 vs. limit=5.0 +2023-02-06 01:58:53,861 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:58:58,556 INFO [train.py:901] (3/4) Epoch 6, batch 950, loss[loss=0.2547, simple_loss=0.3204, pruned_loss=0.09452, over 8082.00 frames. ], tot_loss[loss=0.2795, simple_loss=0.3429, pruned_loss=0.1081, over 1598469.55 frames. ], batch size: 21, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:06,361 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:11,063 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:21,435 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 01:59:26,995 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:28,450 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 01:59:32,977 INFO [train.py:901] (3/4) Epoch 6, batch 1000, loss[loss=0.2403, simple_loss=0.3041, pruned_loss=0.08822, over 7640.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3415, pruned_loss=0.1067, over 1597873.76 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 01:59:41,359 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.042e+02 3.293e+02 3.921e+02 5.074e+02 1.211e+03, threshold=7.843e+02, percent-clipped=6.0 +2023-02-06 01:59:55,291 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 02:00:06,251 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=41464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:07,520 INFO [train.py:901] (3/4) Epoch 6, batch 1050, loss[loss=0.3016, simple_loss=0.3518, pruned_loss=0.1257, over 6821.00 frames. ], tot_loss[loss=0.2813, simple_loss=0.3443, pruned_loss=0.1091, over 1602906.41 frames. ], batch size: 15, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:08,234 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 02:00:13,141 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:19,147 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7366, 1.7698, 3.1983, 1.3118, 2.2316, 3.6654, 3.6142, 3.0478], + device='cuda:3'), covar=tensor([0.1195, 0.1320, 0.0443, 0.2159, 0.0864, 0.0272, 0.0429, 0.0670], + device='cuda:3'), in_proj_covar=tensor([0.0238, 0.0266, 0.0229, 0.0261, 0.0229, 0.0207, 0.0243, 0.0279], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 02:00:42,276 INFO [train.py:901] (3/4) Epoch 6, batch 1100, loss[loss=0.2252, simple_loss=0.3127, pruned_loss=0.06886, over 8084.00 frames. ], tot_loss[loss=0.2824, simple_loss=0.3453, pruned_loss=0.1098, over 1604333.46 frames. ], batch size: 21, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:00:46,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:00:51,108 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.937e+02 3.488e+02 4.376e+02 9.981e+02, threshold=6.976e+02, percent-clipped=3.0 +2023-02-06 02:01:16,051 INFO [train.py:901] (3/4) Epoch 6, batch 1150, loss[loss=0.2639, simple_loss=0.3306, pruned_loss=0.09865, over 8358.00 frames. ], tot_loss[loss=0.282, simple_loss=0.3449, pruned_loss=0.1096, over 1606192.58 frames. ], batch size: 24, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:18,801 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 02:01:25,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=41579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:27,462 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4860, 1.6518, 1.8335, 1.5230, 0.9564, 1.9086, 0.2600, 1.0418], + device='cuda:3'), covar=tensor([0.2375, 0.1902, 0.0680, 0.1979, 0.5824, 0.0847, 0.5086, 0.2652], + device='cuda:3'), in_proj_covar=tensor([0.0130, 0.0131, 0.0082, 0.0175, 0.0213, 0.0083, 0.0147, 0.0136], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:01:38,067 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:01:50,380 INFO [train.py:901] (3/4) Epoch 6, batch 1200, loss[loss=0.2414, simple_loss=0.3128, pruned_loss=0.08499, over 7788.00 frames. ], tot_loss[loss=0.2811, simple_loss=0.3445, pruned_loss=0.1089, over 1611368.99 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:01:53,225 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2405, 2.0025, 2.9638, 2.3264, 2.4175, 1.9126, 1.5208, 1.1939], + device='cuda:3'), covar=tensor([0.1927, 0.1957, 0.0464, 0.1135, 0.0947, 0.1035, 0.1056, 0.1982], + device='cuda:3'), in_proj_covar=tensor([0.0770, 0.0711, 0.0604, 0.0703, 0.0794, 0.0656, 0.0626, 0.0654], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:01:55,788 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:00,257 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.965e+02 3.060e+02 3.864e+02 4.910e+02 1.275e+03, threshold=7.729e+02, percent-clipped=9.0 +2023-02-06 02:02:03,225 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:19,548 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:24,698 INFO [train.py:901] (3/4) Epoch 6, batch 1250, loss[loss=0.348, simple_loss=0.3723, pruned_loss=0.1618, over 7538.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3427, pruned_loss=0.1076, over 1608256.28 frames. ], batch size: 18, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:02:29,689 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:02:34,701 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 02:02:59,807 INFO [train.py:901] (3/4) Epoch 6, batch 1300, loss[loss=0.2703, simple_loss=0.3157, pruned_loss=0.1124, over 7791.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3417, pruned_loss=0.1074, over 1606246.80 frames. ], batch size: 19, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:08,602 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.781e+02 3.137e+02 4.028e+02 4.813e+02 9.668e+02, threshold=8.056e+02, percent-clipped=5.0 +2023-02-06 02:03:09,498 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:27,440 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:03:34,647 INFO [train.py:901] (3/4) Epoch 6, batch 1350, loss[loss=0.252, simple_loss=0.31, pruned_loss=0.09697, over 7428.00 frames. ], tot_loss[loss=0.2775, simple_loss=0.3413, pruned_loss=0.1068, over 1610632.39 frames. ], batch size: 17, lr: 1.31e-02, grad_scale: 8.0 +2023-02-06 02:03:42,972 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:00,426 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:06,499 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3161, 1.5540, 2.2972, 1.1347, 1.6662, 1.5582, 1.3539, 1.5309], + device='cuda:3'), covar=tensor([0.1445, 0.1612, 0.0629, 0.2863, 0.1235, 0.2305, 0.1454, 0.1510], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0459, 0.0528, 0.0542, 0.0592, 0.0520, 0.0445, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 02:04:09,399 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 02:04:09,619 INFO [train.py:901] (3/4) Epoch 6, batch 1400, loss[loss=0.2045, simple_loss=0.2879, pruned_loss=0.06055, over 8111.00 frames. ], tot_loss[loss=0.2758, simple_loss=0.3397, pruned_loss=0.1059, over 1609798.37 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:18,118 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.090e+02 3.079e+02 3.704e+02 4.589e+02 8.838e+02, threshold=7.407e+02, percent-clipped=2.0 +2023-02-06 02:04:22,381 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=41835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:39,963 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=41860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:04:44,132 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2433, 2.1320, 1.5300, 2.0200, 1.8312, 1.2064, 1.5134, 1.7432], + device='cuda:3'), covar=tensor([0.0972, 0.0334, 0.0937, 0.0458, 0.0578, 0.1268, 0.0879, 0.0685], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0237, 0.0308, 0.0297, 0.0312, 0.0310, 0.0336, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:04:44,592 INFO [train.py:901] (3/4) Epoch 6, batch 1450, loss[loss=0.2703, simple_loss=0.3436, pruned_loss=0.09851, over 8248.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3409, pruned_loss=0.1067, over 1605956.49 frames. ], batch size: 24, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:04:47,872 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 02:05:18,595 INFO [train.py:901] (3/4) Epoch 6, batch 1500, loss[loss=0.3083, simple_loss=0.3737, pruned_loss=0.1214, over 8202.00 frames. ], tot_loss[loss=0.2793, simple_loss=0.3429, pruned_loss=0.1078, over 1609640.53 frames. ], batch size: 23, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:24,647 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:05:27,835 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.902e+02 2.922e+02 3.542e+02 4.432e+02 1.007e+03, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 02:05:53,225 INFO [train.py:901] (3/4) Epoch 6, batch 1550, loss[loss=0.244, simple_loss=0.3214, pruned_loss=0.08329, over 8728.00 frames. ], tot_loss[loss=0.2797, simple_loss=0.343, pruned_loss=0.1082, over 1606594.23 frames. ], batch size: 34, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:05:53,367 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=41966.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:06:28,439 INFO [train.py:901] (3/4) Epoch 6, batch 1600, loss[loss=0.3281, simple_loss=0.392, pruned_loss=0.1321, over 8607.00 frames. ], tot_loss[loss=0.2792, simple_loss=0.343, pruned_loss=0.1077, over 1610121.83 frames. ], batch size: 39, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:06:28,517 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:06:33,628 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 02:06:37,873 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.135e+02 3.132e+02 3.836e+02 5.392e+02 3.005e+03, threshold=7.672e+02, percent-clipped=11.0 +2023-02-06 02:07:03,787 INFO [train.py:901] (3/4) Epoch 6, batch 1650, loss[loss=0.2749, simple_loss=0.3567, pruned_loss=0.09652, over 8364.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3405, pruned_loss=0.1057, over 1612212.79 frames. ], batch size: 24, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:39,091 INFO [train.py:901] (3/4) Epoch 6, batch 1700, loss[loss=0.2691, simple_loss=0.3358, pruned_loss=0.1012, over 7246.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3424, pruned_loss=0.107, over 1613883.37 frames. ], batch size: 72, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:07:47,894 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.826e+02 3.670e+02 4.452e+02 1.049e+03, threshold=7.339e+02, percent-clipped=2.0 +2023-02-06 02:07:49,334 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:08:00,568 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 02:08:14,465 INFO [train.py:901] (3/4) Epoch 6, batch 1750, loss[loss=0.3281, simple_loss=0.3776, pruned_loss=0.1393, over 8463.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.3408, pruned_loss=0.107, over 1607229.71 frames. ], batch size: 27, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:49,384 INFO [train.py:901] (3/4) Epoch 6, batch 1800, loss[loss=0.2615, simple_loss=0.3179, pruned_loss=0.1025, over 7276.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.34, pruned_loss=0.1066, over 1603239.88 frames. ], batch size: 16, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:08:59,176 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 3.032e+02 3.540e+02 4.353e+02 2.015e+03, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 02:09:24,922 INFO [train.py:901] (3/4) Epoch 6, batch 1850, loss[loss=0.2779, simple_loss=0.3413, pruned_loss=0.1072, over 8138.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3416, pruned_loss=0.1075, over 1612118.47 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:09:26,417 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:09:55,345 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42310.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:09:59,279 INFO [train.py:901] (3/4) Epoch 6, batch 1900, loss[loss=0.2657, simple_loss=0.3113, pruned_loss=0.1101, over 7536.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3423, pruned_loss=0.1076, over 1615564.20 frames. ], batch size: 18, lr: 1.30e-02, grad_scale: 8.0 +2023-02-06 02:10:08,776 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.992e+02 2.715e+02 3.297e+02 4.142e+02 7.213e+02, threshold=6.594e+02, percent-clipped=2.0 +2023-02-06 02:10:23,952 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 02:10:34,058 INFO [train.py:901] (3/4) Epoch 6, batch 1950, loss[loss=0.2276, simple_loss=0.3051, pruned_loss=0.07502, over 8223.00 frames. ], tot_loss[loss=0.2783, simple_loss=0.3426, pruned_loss=0.107, over 1617926.25 frames. ], batch size: 22, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:10:36,640 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 02:10:46,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:48,981 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42387.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:10:56,190 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 02:10:56,319 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:06,409 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:11:08,983 INFO [train.py:901] (3/4) Epoch 6, batch 2000, loss[loss=0.2598, simple_loss=0.3351, pruned_loss=0.09224, over 8326.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3426, pruned_loss=0.1071, over 1617873.52 frames. ], batch size: 25, lr: 1.30e-02, grad_scale: 16.0 +2023-02-06 02:11:15,172 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42425.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:11:18,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.766e+02 3.581e+02 4.303e+02 8.011e+02, threshold=7.162e+02, percent-clipped=3.0 +2023-02-06 02:11:43,879 INFO [train.py:901] (3/4) Epoch 6, batch 2050, loss[loss=0.2785, simple_loss=0.3496, pruned_loss=0.1037, over 8342.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3416, pruned_loss=0.1058, over 1617275.04 frames. ], batch size: 26, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:17,698 INFO [train.py:901] (3/4) Epoch 6, batch 2100, loss[loss=0.3897, simple_loss=0.4246, pruned_loss=0.1774, over 8569.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3428, pruned_loss=0.1067, over 1622100.09 frames. ], batch size: 49, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:12:23,886 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:12:27,687 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.916e+02 3.481e+02 4.572e+02 1.310e+03, threshold=6.962e+02, percent-clipped=2.0 +2023-02-06 02:12:31,136 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6039, 5.5796, 4.9617, 1.9874, 5.0171, 5.3622, 5.3176, 5.0100], + device='cuda:3'), covar=tensor([0.0661, 0.0444, 0.0814, 0.4557, 0.0565, 0.0723, 0.0969, 0.0618], + device='cuda:3'), in_proj_covar=tensor([0.0393, 0.0293, 0.0323, 0.0401, 0.0314, 0.0283, 0.0306, 0.0258], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:12:52,387 INFO [train.py:901] (3/4) Epoch 6, batch 2150, loss[loss=0.2818, simple_loss=0.3493, pruned_loss=0.1071, over 8485.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3427, pruned_loss=0.1071, over 1616862.55 frames. ], batch size: 28, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:25,966 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8830, 1.5489, 2.3097, 1.9224, 1.9373, 1.6882, 1.2952, 0.6276], + device='cuda:3'), covar=tensor([0.2194, 0.2190, 0.0596, 0.1152, 0.0949, 0.1299, 0.1375, 0.2005], + device='cuda:3'), in_proj_covar=tensor([0.0775, 0.0710, 0.0618, 0.0712, 0.0797, 0.0657, 0.0622, 0.0651], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:13:27,037 INFO [train.py:901] (3/4) Epoch 6, batch 2200, loss[loss=0.2941, simple_loss=0.3542, pruned_loss=0.117, over 8489.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.342, pruned_loss=0.1071, over 1608615.35 frames. ], batch size: 25, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:13:36,153 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 3.020e+02 3.729e+02 5.072e+02 1.122e+03, threshold=7.459e+02, percent-clipped=5.0 +2023-02-06 02:13:39,964 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 02:13:41,693 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6135, 5.7012, 4.9592, 1.8116, 5.0295, 5.1806, 5.4093, 4.7886], + device='cuda:3'), covar=tensor([0.0744, 0.0331, 0.0785, 0.5004, 0.0635, 0.0647, 0.0818, 0.0654], + device='cuda:3'), in_proj_covar=tensor([0.0407, 0.0298, 0.0332, 0.0415, 0.0321, 0.0290, 0.0309, 0.0263], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:13:43,115 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:13:51,252 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.43 vs. limit=5.0 +2023-02-06 02:13:59,706 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:00,835 INFO [train.py:901] (3/4) Epoch 6, batch 2250, loss[loss=0.2992, simple_loss=0.3636, pruned_loss=0.1174, over 8498.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3421, pruned_loss=0.107, over 1614249.66 frames. ], batch size: 28, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:01,636 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:14:11,906 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=42681.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:14:14,915 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-06 02:14:29,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=42706.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:14:35,823 INFO [train.py:901] (3/4) Epoch 6, batch 2300, loss[loss=0.2988, simple_loss=0.357, pruned_loss=0.1203, over 8543.00 frames. ], tot_loss[loss=0.2781, simple_loss=0.3418, pruned_loss=0.1072, over 1610389.10 frames. ], batch size: 39, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:14:37,439 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6013, 1.9404, 2.2259, 1.2968, 2.3735, 1.4909, 0.7703, 1.8157], + device='cuda:3'), covar=tensor([0.0241, 0.0105, 0.0086, 0.0203, 0.0088, 0.0349, 0.0317, 0.0112], + device='cuda:3'), in_proj_covar=tensor([0.0331, 0.0238, 0.0211, 0.0296, 0.0233, 0.0389, 0.0307, 0.0276], + device='cuda:3'), out_proj_covar=tensor([1.1251e-04, 7.8386e-05, 6.9930e-05, 9.9007e-05, 7.9130e-05, 1.4115e-04, + 1.0483e-04, 9.2462e-05], device='cuda:3') +2023-02-06 02:14:45,237 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.977e+02 3.532e+02 4.435e+02 7.362e+02, threshold=7.063e+02, percent-clipped=0.0 +2023-02-06 02:14:53,236 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:15:11,242 INFO [train.py:901] (3/4) Epoch 6, batch 2350, loss[loss=0.3137, simple_loss=0.3755, pruned_loss=0.126, over 8345.00 frames. ], tot_loss[loss=0.278, simple_loss=0.3422, pruned_loss=0.1069, over 1614591.28 frames. ], batch size: 26, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:18,563 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3155, 1.9887, 3.1727, 2.6992, 2.7280, 2.0030, 1.5909, 1.4335], + device='cuda:3'), covar=tensor([0.2057, 0.2414, 0.0513, 0.1147, 0.1028, 0.1207, 0.1062, 0.2326], + device='cuda:3'), in_proj_covar=tensor([0.0772, 0.0705, 0.0608, 0.0705, 0.0794, 0.0649, 0.0617, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:15:43,015 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9898, 1.4592, 6.0652, 2.3068, 5.4130, 5.0754, 5.6618, 5.5218], + device='cuda:3'), covar=tensor([0.0333, 0.3853, 0.0229, 0.2297, 0.0795, 0.0454, 0.0279, 0.0326], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0495, 0.0432, 0.0427, 0.0499, 0.0411, 0.0405, 0.0461], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 02:15:46,902 INFO [train.py:901] (3/4) Epoch 6, batch 2400, loss[loss=0.2653, simple_loss=0.3397, pruned_loss=0.09543, over 8185.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3426, pruned_loss=0.1074, over 1615097.49 frames. ], batch size: 23, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:15:56,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 3.099e+02 3.712e+02 4.452e+02 1.076e+03, threshold=7.425e+02, percent-clipped=4.0 +2023-02-06 02:16:14,319 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:20,856 INFO [train.py:901] (3/4) Epoch 6, batch 2450, loss[loss=0.1919, simple_loss=0.276, pruned_loss=0.05389, over 7235.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.3414, pruned_loss=0.1069, over 1613405.36 frames. ], batch size: 16, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:16:22,281 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=42868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:29,155 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:31,746 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3255, 1.5982, 1.3198, 1.9572, 0.8512, 1.1881, 1.2235, 1.5695], + device='cuda:3'), covar=tensor([0.1045, 0.0892, 0.1466, 0.0538, 0.1383, 0.1756, 0.1021, 0.0804], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0250, 0.0284, 0.0225, 0.0246, 0.0278, 0.0282, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 02:16:32,685 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-06 02:16:42,221 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=42897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:16:54,610 INFO [train.py:901] (3/4) Epoch 6, batch 2500, loss[loss=0.2675, simple_loss=0.3395, pruned_loss=0.09775, over 8468.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3424, pruned_loss=0.1075, over 1613361.54 frames. ], batch size: 27, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:05,200 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 3.094e+02 4.004e+02 4.995e+02 1.056e+03, threshold=8.009e+02, percent-clipped=4.0 +2023-02-06 02:17:17,684 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 02:17:29,432 INFO [train.py:901] (3/4) Epoch 6, batch 2550, loss[loss=0.2246, simple_loss=0.2856, pruned_loss=0.08179, over 7437.00 frames. ], tot_loss[loss=0.2772, simple_loss=0.3407, pruned_loss=0.1068, over 1610753.93 frames. ], batch size: 17, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:17:39,755 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3935, 2.2037, 1.9761, 1.8482, 1.5478, 1.9114, 2.5767, 1.8873], + device='cuda:3'), covar=tensor([0.0470, 0.1145, 0.1675, 0.1319, 0.0608, 0.1451, 0.0591, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0128, 0.0173, 0.0216, 0.0180, 0.0125, 0.0183, 0.0139, 0.0150], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 02:17:41,648 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=42983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:17:42,283 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0356, 2.3556, 1.9640, 3.0838, 1.4272, 1.5130, 1.7943, 2.3127], + device='cuda:3'), covar=tensor([0.0895, 0.0955, 0.1260, 0.0421, 0.1403, 0.1895, 0.1399, 0.0933], + device='cuda:3'), in_proj_covar=tensor([0.0266, 0.0250, 0.0281, 0.0223, 0.0243, 0.0278, 0.0280, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 02:18:01,220 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:18:04,436 INFO [train.py:901] (3/4) Epoch 6, batch 2600, loss[loss=0.2061, simple_loss=0.2709, pruned_loss=0.07066, over 7711.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.34, pruned_loss=0.1063, over 1606174.63 frames. ], batch size: 18, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:18:10,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3238, 1.7636, 2.6906, 1.0944, 1.7988, 1.6289, 1.4059, 1.6292], + device='cuda:3'), covar=tensor([0.1856, 0.1946, 0.0759, 0.3695, 0.1778, 0.2807, 0.1748, 0.2298], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0453, 0.0521, 0.0536, 0.0583, 0.0520, 0.0445, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 02:18:13,979 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.052e+02 3.779e+02 5.019e+02 1.784e+03, threshold=7.558e+02, percent-clipped=4.0 +2023-02-06 02:18:33,097 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7861, 3.7506, 3.3712, 1.6025, 3.3500, 3.2637, 3.4449, 2.8803], + device='cuda:3'), covar=tensor([0.0922, 0.0616, 0.0945, 0.4719, 0.0813, 0.1101, 0.1089, 0.1067], + device='cuda:3'), in_proj_covar=tensor([0.0394, 0.0285, 0.0322, 0.0399, 0.0313, 0.0280, 0.0306, 0.0253], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:18:39,577 INFO [train.py:901] (3/4) Epoch 6, batch 2650, loss[loss=0.2827, simple_loss=0.3535, pruned_loss=0.106, over 8468.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3405, pruned_loss=0.106, over 1606874.73 frames. ], batch size: 27, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:19:11,157 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:14,337 INFO [train.py:901] (3/4) Epoch 6, batch 2700, loss[loss=0.3167, simple_loss=0.3802, pruned_loss=0.1266, over 8334.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3404, pruned_loss=0.1057, over 1605067.45 frames. ], batch size: 26, lr: 1.29e-02, grad_scale: 8.0 +2023-02-06 02:19:20,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:23,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.935e+02 3.532e+02 4.548e+02 1.003e+03, threshold=7.064e+02, percent-clipped=2.0 +2023-02-06 02:19:28,343 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:19:47,960 INFO [train.py:901] (3/4) Epoch 6, batch 2750, loss[loss=0.3381, simple_loss=0.392, pruned_loss=0.1421, over 8617.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3394, pruned_loss=0.1048, over 1609960.85 frames. ], batch size: 34, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:19:51,155 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 02:20:22,662 INFO [train.py:901] (3/4) Epoch 6, batch 2800, loss[loss=0.2671, simple_loss=0.3236, pruned_loss=0.1053, over 7647.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3394, pruned_loss=0.1047, over 1611212.97 frames. ], batch size: 19, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:20:26,193 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:32,052 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.696e+02 3.315e+02 4.271e+02 8.534e+02, threshold=6.630e+02, percent-clipped=4.0 +2023-02-06 02:20:39,133 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:40,382 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:55,849 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:20:56,917 INFO [train.py:901] (3/4) Epoch 6, batch 2850, loss[loss=0.2358, simple_loss=0.3137, pruned_loss=0.07894, over 8297.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3405, pruned_loss=0.1054, over 1614073.64 frames. ], batch size: 23, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:06,153 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-06 02:21:31,687 INFO [train.py:901] (3/4) Epoch 6, batch 2900, loss[loss=0.2639, simple_loss=0.3413, pruned_loss=0.09322, over 8245.00 frames. ], tot_loss[loss=0.2749, simple_loss=0.34, pruned_loss=0.1049, over 1614473.86 frames. ], batch size: 24, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:21:41,577 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.996e+02 3.885e+02 4.976e+02 9.964e+02, threshold=7.771e+02, percent-clipped=9.0 +2023-02-06 02:21:42,516 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9306, 3.8617, 2.4094, 2.5528, 3.0011, 1.8617, 2.8186, 3.1600], + device='cuda:3'), covar=tensor([0.1409, 0.0263, 0.0794, 0.0824, 0.0636, 0.1121, 0.0907, 0.0794], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0240, 0.0314, 0.0308, 0.0322, 0.0316, 0.0344, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:21:46,023 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:00,459 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:01,689 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 02:22:07,136 INFO [train.py:901] (3/4) Epoch 6, batch 2950, loss[loss=0.3368, simple_loss=0.3816, pruned_loss=0.1459, over 8613.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3401, pruned_loss=0.1051, over 1610041.66 frames. ], batch size: 34, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:17,905 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:33,423 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6309, 2.7018, 1.8091, 2.1780, 2.1171, 1.5466, 2.0659, 2.2822], + device='cuda:3'), covar=tensor([0.1237, 0.0381, 0.0867, 0.0631, 0.0614, 0.1232, 0.0883, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0236, 0.0309, 0.0303, 0.0317, 0.0317, 0.0342, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:22:36,001 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:22:41,772 INFO [train.py:901] (3/4) Epoch 6, batch 3000, loss[loss=0.2918, simple_loss=0.3636, pruned_loss=0.11, over 8534.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3414, pruned_loss=0.1059, over 1614425.30 frames. ], batch size: 28, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:22:41,772 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 02:22:53,877 INFO [train.py:935] (3/4) Epoch 6, validation: loss=0.2158, simple_loss=0.3124, pruned_loss=0.05962, over 944034.00 frames. +2023-02-06 02:22:53,878 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 02:23:03,880 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.288e+02 4.080e+02 5.339e+02 1.082e+03, threshold=8.161e+02, percent-clipped=5.0 +2023-02-06 02:23:28,757 INFO [train.py:901] (3/4) Epoch 6, batch 3050, loss[loss=0.2954, simple_loss=0.3633, pruned_loss=0.1137, over 8746.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.3412, pruned_loss=0.1061, over 1607324.48 frames. ], batch size: 30, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:23:45,125 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0986, 4.0667, 3.6800, 1.7698, 3.6136, 3.6202, 3.7741, 3.1519], + device='cuda:3'), covar=tensor([0.0894, 0.0777, 0.1013, 0.4544, 0.0783, 0.0744, 0.1609, 0.0910], + device='cuda:3'), in_proj_covar=tensor([0.0406, 0.0302, 0.0333, 0.0417, 0.0331, 0.0292, 0.0320, 0.0264], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:24:03,331 INFO [train.py:901] (3/4) Epoch 6, batch 3100, loss[loss=0.3103, simple_loss=0.3695, pruned_loss=0.1256, over 8183.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3423, pruned_loss=0.107, over 1609135.32 frames. ], batch size: 23, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:12,759 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.871e+02 3.509e+02 4.582e+02 1.148e+03, threshold=7.017e+02, percent-clipped=4.0 +2023-02-06 02:24:14,880 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:24:25,050 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0785, 1.3959, 1.5642, 1.2592, 1.0823, 1.3839, 1.6645, 1.5634], + device='cuda:3'), covar=tensor([0.0581, 0.1247, 0.1775, 0.1473, 0.0651, 0.1586, 0.0727, 0.0588], + device='cuda:3'), in_proj_covar=tensor([0.0128, 0.0174, 0.0219, 0.0180, 0.0124, 0.0186, 0.0141, 0.0149], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 02:24:38,267 INFO [train.py:901] (3/4) Epoch 6, batch 3150, loss[loss=0.2518, simple_loss=0.3294, pruned_loss=0.08709, over 8335.00 frames. ], tot_loss[loss=0.2784, simple_loss=0.3427, pruned_loss=0.107, over 1614930.16 frames. ], batch size: 26, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:24:38,611 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.93 vs. limit=5.0 +2023-02-06 02:24:50,306 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 02:24:57,059 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:07,804 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-06 02:25:10,978 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=43612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:13,479 INFO [train.py:901] (3/4) Epoch 6, batch 3200, loss[loss=0.255, simple_loss=0.307, pruned_loss=0.1015, over 7293.00 frames. ], tot_loss[loss=0.279, simple_loss=0.3429, pruned_loss=0.1076, over 1617889.31 frames. ], batch size: 16, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:25:14,386 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:23,589 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.828e+02 3.409e+02 4.222e+02 1.719e+03, threshold=6.818e+02, percent-clipped=4.0 +2023-02-06 02:25:28,589 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=43637.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:25:34,656 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3274, 1.1559, 4.4556, 1.6553, 3.7349, 3.7066, 3.9918, 3.8182], + device='cuda:3'), covar=tensor([0.0412, 0.3974, 0.0370, 0.2703, 0.1224, 0.0695, 0.0508, 0.0593], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0504, 0.0449, 0.0439, 0.0508, 0.0423, 0.0421, 0.0476], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 02:25:49,151 INFO [train.py:901] (3/4) Epoch 6, batch 3250, loss[loss=0.2721, simple_loss=0.3522, pruned_loss=0.09598, over 8480.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3416, pruned_loss=0.1065, over 1614600.05 frames. ], batch size: 25, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:14,704 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6739, 2.3746, 4.3930, 1.1865, 3.0282, 2.3068, 1.7014, 2.6088], + device='cuda:3'), covar=tensor([0.1339, 0.1715, 0.0552, 0.3080, 0.1174, 0.2065, 0.1460, 0.2100], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0456, 0.0517, 0.0532, 0.0586, 0.0515, 0.0445, 0.0583], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 02:26:23,265 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 02:26:23,408 INFO [train.py:901] (3/4) Epoch 6, batch 3300, loss[loss=0.2674, simple_loss=0.3297, pruned_loss=0.1025, over 8287.00 frames. ], tot_loss[loss=0.2767, simple_loss=0.3411, pruned_loss=0.1061, over 1615714.02 frames. ], batch size: 23, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:26:33,006 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.968e+02 3.670e+02 5.054e+02 9.057e+02, threshold=7.341e+02, percent-clipped=6.0 +2023-02-06 02:26:40,676 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3844, 1.4770, 1.5744, 1.4407, 1.3940, 1.5022, 2.6842, 2.3707], + device='cuda:3'), covar=tensor([0.0502, 0.1854, 0.2574, 0.1798, 0.0745, 0.2159, 0.0701, 0.0592], + device='cuda:3'), in_proj_covar=tensor([0.0126, 0.0172, 0.0214, 0.0178, 0.0123, 0.0183, 0.0139, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 02:26:58,036 INFO [train.py:901] (3/4) Epoch 6, batch 3350, loss[loss=0.2671, simple_loss=0.3412, pruned_loss=0.0965, over 8295.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3412, pruned_loss=0.1056, over 1619458.36 frames. ], batch size: 23, lr: 1.28e-02, grad_scale: 8.0 +2023-02-06 02:27:25,496 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:27:33,331 INFO [train.py:901] (3/4) Epoch 6, batch 3400, loss[loss=0.3541, simple_loss=0.4054, pruned_loss=0.1514, over 8289.00 frames. ], tot_loss[loss=0.2765, simple_loss=0.3416, pruned_loss=0.1057, over 1620495.14 frames. ], batch size: 23, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:27:42,441 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.693e+02 3.397e+02 4.441e+02 9.371e+02, threshold=6.793e+02, percent-clipped=2.0 +2023-02-06 02:28:07,549 INFO [train.py:901] (3/4) Epoch 6, batch 3450, loss[loss=0.2341, simple_loss=0.3064, pruned_loss=0.08091, over 8084.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3408, pruned_loss=0.1055, over 1616985.89 frames. ], batch size: 21, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:09,784 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0385, 1.7064, 1.4336, 1.6546, 1.3864, 1.1287, 1.2617, 1.4779], + device='cuda:3'), covar=tensor([0.0793, 0.0335, 0.0827, 0.0411, 0.0555, 0.1081, 0.0663, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0233, 0.0307, 0.0301, 0.0311, 0.0309, 0.0332, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:28:14,344 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=43876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:42,236 INFO [train.py:901] (3/4) Epoch 6, batch 3500, loss[loss=0.2853, simple_loss=0.3652, pruned_loss=0.1027, over 8506.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3408, pruned_loss=0.1052, over 1616050.67 frames. ], batch size: 31, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:28:50,533 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:28:52,404 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 3.111e+02 3.775e+02 4.956e+02 7.195e+02, threshold=7.550e+02, percent-clipped=1.0 +2023-02-06 02:28:54,757 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.53 vs. limit=5.0 +2023-02-06 02:28:59,194 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 02:29:16,404 INFO [train.py:901] (3/4) Epoch 6, batch 3550, loss[loss=0.3717, simple_loss=0.4073, pruned_loss=0.168, over 6862.00 frames. ], tot_loss[loss=0.2774, simple_loss=0.342, pruned_loss=0.1063, over 1612960.34 frames. ], batch size: 73, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:29:24,737 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=43977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:24,979 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.02 vs. limit=2.0 +2023-02-06 02:29:34,252 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=43991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:29:52,626 INFO [train.py:901] (3/4) Epoch 6, batch 3600, loss[loss=0.3038, simple_loss=0.364, pruned_loss=0.1219, over 8356.00 frames. ], tot_loss[loss=0.2762, simple_loss=0.3409, pruned_loss=0.1058, over 1610139.11 frames. ], batch size: 24, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:30:02,264 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.983e+02 3.632e+02 4.470e+02 1.452e+03, threshold=7.265e+02, percent-clipped=1.0 +2023-02-06 02:30:27,001 INFO [train.py:901] (3/4) Epoch 6, batch 3650, loss[loss=0.2379, simple_loss=0.2985, pruned_loss=0.08865, over 7301.00 frames. ], tot_loss[loss=0.2763, simple_loss=0.3409, pruned_loss=0.1059, over 1606869.95 frames. ], batch size: 16, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:00,588 INFO [train.py:901] (3/4) Epoch 6, batch 3700, loss[loss=0.2376, simple_loss=0.3184, pruned_loss=0.07843, over 8469.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3404, pruned_loss=0.1057, over 1604808.43 frames. ], batch size: 25, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:31:01,285 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 02:31:11,218 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 3.019e+02 3.651e+02 4.413e+02 8.839e+02, threshold=7.303e+02, percent-clipped=3.0 +2023-02-06 02:31:23,984 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:31:27,458 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 02:31:35,821 INFO [train.py:901] (3/4) Epoch 6, batch 3750, loss[loss=0.2739, simple_loss=0.337, pruned_loss=0.1054, over 7976.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3413, pruned_loss=0.1064, over 1604476.03 frames. ], batch size: 21, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:06,703 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7660, 3.7532, 3.3925, 1.7283, 3.3727, 3.4092, 3.4413, 3.0287], + device='cuda:3'), covar=tensor([0.1091, 0.0776, 0.1214, 0.4684, 0.0931, 0.1080, 0.1432, 0.1072], + device='cuda:3'), in_proj_covar=tensor([0.0405, 0.0302, 0.0335, 0.0417, 0.0323, 0.0293, 0.0319, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:32:09,322 INFO [train.py:901] (3/4) Epoch 6, batch 3800, loss[loss=0.2933, simple_loss=0.3459, pruned_loss=0.1204, over 7804.00 frames. ], tot_loss[loss=0.2769, simple_loss=0.3408, pruned_loss=0.1065, over 1597072.45 frames. ], batch size: 19, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:19,578 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.008e+02 3.761e+02 4.930e+02 1.044e+03, threshold=7.521e+02, percent-clipped=7.0 +2023-02-06 02:32:32,439 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:34,456 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5032, 1.4263, 4.6864, 1.8005, 4.1716, 3.9148, 4.2582, 4.1683], + device='cuda:3'), covar=tensor([0.0419, 0.3419, 0.0401, 0.2343, 0.0999, 0.0721, 0.0375, 0.0461], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0499, 0.0445, 0.0436, 0.0503, 0.0413, 0.0419, 0.0474], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 02:32:44,252 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:45,446 INFO [train.py:901] (3/4) Epoch 6, batch 3850, loss[loss=0.3331, simple_loss=0.3953, pruned_loss=0.1355, over 8296.00 frames. ], tot_loss[loss=0.2789, simple_loss=0.3426, pruned_loss=0.1077, over 1601854.30 frames. ], batch size: 23, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:32:48,966 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:32:49,779 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:02,839 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 02:33:20,475 INFO [train.py:901] (3/4) Epoch 6, batch 3900, loss[loss=0.2374, simple_loss=0.3168, pruned_loss=0.07901, over 8018.00 frames. ], tot_loss[loss=0.2787, simple_loss=0.3427, pruned_loss=0.1073, over 1610224.43 frames. ], batch size: 22, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:33:23,541 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 02:33:23,957 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:33:30,569 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.224e+02 2.909e+02 3.535e+02 4.398e+02 8.405e+02, threshold=7.069e+02, percent-clipped=2.0 +2023-02-06 02:33:56,226 INFO [train.py:901] (3/4) Epoch 6, batch 3950, loss[loss=0.281, simple_loss=0.3481, pruned_loss=0.107, over 8607.00 frames. ], tot_loss[loss=0.2786, simple_loss=0.3427, pruned_loss=0.1073, over 1610138.14 frames. ], batch size: 34, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:09,754 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:34:30,792 INFO [train.py:901] (3/4) Epoch 6, batch 4000, loss[loss=0.3142, simple_loss=0.3846, pruned_loss=0.1219, over 8026.00 frames. ], tot_loss[loss=0.2782, simple_loss=0.3429, pruned_loss=0.1068, over 1615538.40 frames. ], batch size: 22, lr: 1.27e-02, grad_scale: 8.0 +2023-02-06 02:34:39,800 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4736, 1.5025, 2.7828, 1.2218, 1.9913, 3.0171, 3.0105, 2.5541], + device='cuda:3'), covar=tensor([0.1047, 0.1244, 0.0399, 0.1959, 0.0711, 0.0308, 0.0446, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0241, 0.0273, 0.0227, 0.0267, 0.0235, 0.0212, 0.0247, 0.0284], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 02:34:40,314 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.014e+02 2.805e+02 3.702e+02 4.857e+02 8.487e+02, threshold=7.405e+02, percent-clipped=7.0 +2023-02-06 02:34:44,606 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=44436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:05,734 INFO [train.py:901] (3/4) Epoch 6, batch 4050, loss[loss=0.2681, simple_loss=0.3177, pruned_loss=0.1092, over 6815.00 frames. ], tot_loss[loss=0.2779, simple_loss=0.3426, pruned_loss=0.1066, over 1614562.96 frames. ], batch size: 15, lr: 1.27e-02, grad_scale: 16.0 +2023-02-06 02:35:15,521 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5420, 2.8698, 1.8753, 2.2644, 2.4115, 1.4866, 1.9673, 2.2058], + device='cuda:3'), covar=tensor([0.1145, 0.0249, 0.0790, 0.0517, 0.0476, 0.1069, 0.0703, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0232, 0.0307, 0.0301, 0.0316, 0.0311, 0.0333, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:35:41,182 INFO [train.py:901] (3/4) Epoch 6, batch 4100, loss[loss=0.2658, simple_loss=0.3313, pruned_loss=0.1002, over 6389.00 frames. ], tot_loss[loss=0.2771, simple_loss=0.3418, pruned_loss=0.1061, over 1610724.59 frames. ], batch size: 14, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:35:44,020 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:35:50,484 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 3.131e+02 3.987e+02 5.314e+02 1.327e+03, threshold=7.973e+02, percent-clipped=4.0 +2023-02-06 02:35:53,973 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0014, 4.0814, 2.7437, 2.7210, 3.1167, 2.0484, 2.5495, 3.0083], + device='cuda:3'), covar=tensor([0.1397, 0.0231, 0.0730, 0.0690, 0.0583, 0.1130, 0.0952, 0.0826], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0235, 0.0310, 0.0306, 0.0323, 0.0316, 0.0337, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:36:00,537 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:00,594 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44545.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:14,407 INFO [train.py:901] (3/4) Epoch 6, batch 4150, loss[loss=0.2543, simple_loss=0.3162, pruned_loss=0.09616, over 7973.00 frames. ], tot_loss[loss=0.2753, simple_loss=0.34, pruned_loss=0.1053, over 1610328.55 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:15,790 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=44568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:36:49,802 INFO [train.py:901] (3/4) Epoch 6, batch 4200, loss[loss=0.2539, simple_loss=0.3071, pruned_loss=0.1003, over 7642.00 frames. ], tot_loss[loss=0.275, simple_loss=0.3393, pruned_loss=0.1054, over 1612118.73 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:36:58,995 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.001e+02 2.806e+02 3.559e+02 4.787e+02 1.284e+03, threshold=7.119e+02, percent-clipped=4.0 +2023-02-06 02:37:05,643 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 02:37:07,966 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:23,774 INFO [train.py:901] (3/4) Epoch 6, batch 4250, loss[loss=0.3034, simple_loss=0.3603, pruned_loss=0.1233, over 7693.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3385, pruned_loss=0.1051, over 1610297.36 frames. ], batch size: 18, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:37:24,680 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:29,243 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 02:37:41,576 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=44692.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:37:47,606 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4844, 2.0033, 3.2431, 2.6161, 2.7037, 2.1163, 1.5207, 1.3694], + device='cuda:3'), covar=tensor([0.1949, 0.2318, 0.0551, 0.1173, 0.1023, 0.1182, 0.1224, 0.2580], + device='cuda:3'), in_proj_covar=tensor([0.0783, 0.0721, 0.0623, 0.0719, 0.0804, 0.0661, 0.0629, 0.0661], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:37:51,714 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.68 vs. limit=5.0 +2023-02-06 02:37:58,490 INFO [train.py:901] (3/4) Epoch 6, batch 4300, loss[loss=0.2662, simple_loss=0.3203, pruned_loss=0.106, over 7800.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3381, pruned_loss=0.1045, over 1611799.72 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:00,027 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=44717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:38:08,669 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.771e+02 3.321e+02 4.102e+02 9.930e+02, threshold=6.641e+02, percent-clipped=2.0 +2023-02-06 02:38:33,191 INFO [train.py:901] (3/4) Epoch 6, batch 4350, loss[loss=0.254, simple_loss=0.3202, pruned_loss=0.09388, over 7789.00 frames. ], tot_loss[loss=0.2749, simple_loss=0.3394, pruned_loss=0.1052, over 1614057.90 frames. ], batch size: 19, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:38:38,300 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6743, 2.4786, 4.4220, 1.2230, 2.8418, 2.1352, 1.9713, 2.3630], + device='cuda:3'), covar=tensor([0.1679, 0.1969, 0.0863, 0.3968, 0.1633, 0.2639, 0.1582, 0.2828], + device='cuda:3'), in_proj_covar=tensor([0.0473, 0.0462, 0.0538, 0.0545, 0.0589, 0.0531, 0.0445, 0.0599], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 02:39:00,033 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 02:39:06,571 INFO [train.py:901] (3/4) Epoch 6, batch 4400, loss[loss=0.2707, simple_loss=0.3371, pruned_loss=0.1022, over 8233.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.3386, pruned_loss=0.1044, over 1614375.55 frames. ], batch size: 22, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:12,866 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3497, 2.1508, 1.1967, 2.9935, 1.2830, 1.1094, 1.8661, 2.3296], + device='cuda:3'), covar=tensor([0.2300, 0.1629, 0.3305, 0.0476, 0.2061, 0.3121, 0.1825, 0.1245], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0245, 0.0277, 0.0220, 0.0241, 0.0273, 0.0284, 0.0252], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 02:39:17,275 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.185e+02 3.434e+02 4.206e+02 5.183e+02 1.151e+03, threshold=8.413e+02, percent-clipped=11.0 +2023-02-06 02:39:31,753 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0908, 2.1776, 1.4742, 1.9169, 1.9228, 1.2255, 1.5720, 1.7166], + device='cuda:3'), covar=tensor([0.1094, 0.0296, 0.1007, 0.0436, 0.0541, 0.1204, 0.0744, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0234, 0.0307, 0.0302, 0.0315, 0.0312, 0.0335, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:39:40,250 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 02:39:42,256 INFO [train.py:901] (3/4) Epoch 6, batch 4450, loss[loss=0.2621, simple_loss=0.3448, pruned_loss=0.08975, over 8333.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.3388, pruned_loss=0.1043, over 1619383.86 frames. ], batch size: 25, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:39:58,546 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:13,884 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=44912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:40:16,360 INFO [train.py:901] (3/4) Epoch 6, batch 4500, loss[loss=0.3065, simple_loss=0.3564, pruned_loss=0.1283, over 7934.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3393, pruned_loss=0.1047, over 1616612.16 frames. ], batch size: 20, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:40:26,435 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 3.100e+02 3.740e+02 5.266e+02 1.703e+03, threshold=7.479e+02, percent-clipped=4.0 +2023-02-06 02:40:31,315 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2879, 2.3861, 1.5787, 2.2144, 2.0114, 1.2261, 1.7589, 2.0423], + device='cuda:3'), covar=tensor([0.1198, 0.0304, 0.1048, 0.0434, 0.0610, 0.1398, 0.0899, 0.0673], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0234, 0.0306, 0.0302, 0.0313, 0.0311, 0.0334, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:40:31,751 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 02:40:36,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7157, 2.9788, 2.0285, 2.4425, 2.4170, 1.7705, 2.0784, 2.4888], + device='cuda:3'), covar=tensor([0.1202, 0.0275, 0.0748, 0.0519, 0.0519, 0.1057, 0.0805, 0.0643], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0234, 0.0307, 0.0302, 0.0313, 0.0312, 0.0333, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 02:40:44,171 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5382, 4.5135, 4.0717, 1.5558, 4.1361, 4.0704, 4.2632, 3.7086], + device='cuda:3'), covar=tensor([0.0719, 0.0536, 0.0995, 0.4436, 0.0654, 0.0824, 0.1099, 0.0758], + device='cuda:3'), in_proj_covar=tensor([0.0411, 0.0305, 0.0335, 0.0412, 0.0329, 0.0293, 0.0319, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:40:52,041 INFO [train.py:901] (3/4) Epoch 6, batch 4550, loss[loss=0.3191, simple_loss=0.3769, pruned_loss=0.1307, over 8622.00 frames. ], tot_loss[loss=0.2731, simple_loss=0.3387, pruned_loss=0.1038, over 1614190.69 frames. ], batch size: 39, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:10,193 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0256, 3.9638, 3.6629, 1.8046, 3.5504, 3.6177, 3.7409, 3.3060], + device='cuda:3'), covar=tensor([0.0959, 0.0644, 0.0909, 0.4764, 0.0852, 0.0922, 0.1121, 0.0947], + device='cuda:3'), in_proj_covar=tensor([0.0408, 0.0304, 0.0333, 0.0413, 0.0328, 0.0293, 0.0316, 0.0265], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:41:18,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:27,054 INFO [train.py:901] (3/4) Epoch 6, batch 4600, loss[loss=0.2504, simple_loss=0.3133, pruned_loss=0.0937, over 7964.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3392, pruned_loss=0.1042, over 1615865.66 frames. ], batch size: 21, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:41:28,727 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6947, 2.1392, 3.8124, 2.9252, 3.1943, 2.1812, 1.5443, 1.8099], + device='cuda:3'), covar=tensor([0.2293, 0.2962, 0.0630, 0.1368, 0.1256, 0.1207, 0.1207, 0.2681], + device='cuda:3'), in_proj_covar=tensor([0.0785, 0.0731, 0.0621, 0.0726, 0.0817, 0.0665, 0.0632, 0.0663], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:41:34,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:41:36,658 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.977e+02 3.732e+02 4.434e+02 1.135e+03, threshold=7.465e+02, percent-clipped=1.0 +2023-02-06 02:42:02,755 INFO [train.py:901] (3/4) Epoch 6, batch 4650, loss[loss=0.2927, simple_loss=0.3427, pruned_loss=0.1214, over 7916.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3384, pruned_loss=0.1038, over 1615978.20 frames. ], batch size: 20, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:08,413 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:25,262 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:42:36,980 INFO [train.py:901] (3/4) Epoch 6, batch 4700, loss[loss=0.2588, simple_loss=0.326, pruned_loss=0.09577, over 7192.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3397, pruned_loss=0.1052, over 1610147.36 frames. ], batch size: 16, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:42:46,396 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.041e+02 3.187e+02 3.833e+02 4.569e+02 1.251e+03, threshold=7.667e+02, percent-clipped=2.0 +2023-02-06 02:43:11,110 INFO [train.py:901] (3/4) Epoch 6, batch 4750, loss[loss=0.2897, simple_loss=0.3569, pruned_loss=0.1113, over 8318.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3396, pruned_loss=0.1046, over 1613138.44 frames. ], batch size: 25, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:30,408 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 02:43:31,805 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 02:43:46,231 INFO [train.py:901] (3/4) Epoch 6, batch 4800, loss[loss=0.3384, simple_loss=0.3793, pruned_loss=0.1487, over 8734.00 frames. ], tot_loss[loss=0.276, simple_loss=0.341, pruned_loss=0.1055, over 1612148.88 frames. ], batch size: 34, lr: 1.26e-02, grad_scale: 16.0 +2023-02-06 02:43:55,770 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 3.021e+02 3.501e+02 4.623e+02 8.497e+02, threshold=7.001e+02, percent-clipped=1.0 +2023-02-06 02:44:16,259 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:20,164 INFO [train.py:901] (3/4) Epoch 6, batch 4850, loss[loss=0.3338, simple_loss=0.3883, pruned_loss=0.1397, over 8339.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3394, pruned_loss=0.1047, over 1610357.86 frames. ], batch size: 26, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:44:20,856 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 02:44:33,164 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:35,180 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45285.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:50,595 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:44:51,247 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0420, 1.3261, 1.1773, 0.2997, 1.2664, 0.9418, 0.0883, 1.1409], + device='cuda:3'), covar=tensor([0.0166, 0.0130, 0.0117, 0.0212, 0.0124, 0.0433, 0.0294, 0.0120], + device='cuda:3'), in_proj_covar=tensor([0.0324, 0.0236, 0.0204, 0.0292, 0.0231, 0.0377, 0.0298, 0.0277], + device='cuda:3'), out_proj_covar=tensor([1.0791e-04, 7.6319e-05, 6.6182e-05, 9.6015e-05, 7.6680e-05, 1.3412e-04, + 9.9780e-05, 9.1389e-05], device='cuda:3') +2023-02-06 02:44:56,003 INFO [train.py:901] (3/4) Epoch 6, batch 4900, loss[loss=0.2487, simple_loss=0.3239, pruned_loss=0.08669, over 8533.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.339, pruned_loss=0.1046, over 1608655.49 frames. ], batch size: 39, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:02,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6605, 1.6524, 3.1864, 1.2569, 2.2269, 3.6101, 3.5361, 3.1779], + device='cuda:3'), covar=tensor([0.1025, 0.1242, 0.0371, 0.1876, 0.0735, 0.0233, 0.0336, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0237, 0.0263, 0.0224, 0.0265, 0.0231, 0.0211, 0.0246, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 02:45:05,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.896e+02 3.521e+02 4.501e+02 9.960e+02, threshold=7.042e+02, percent-clipped=7.0 +2023-02-06 02:45:30,236 INFO [train.py:901] (3/4) Epoch 6, batch 4950, loss[loss=0.2561, simple_loss=0.3399, pruned_loss=0.08611, over 8296.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3399, pruned_loss=0.1048, over 1615302.65 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:45:30,384 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:46:05,707 INFO [train.py:901] (3/4) Epoch 6, batch 5000, loss[loss=0.252, simple_loss=0.3192, pruned_loss=0.09238, over 8245.00 frames. ], tot_loss[loss=0.2747, simple_loss=0.3393, pruned_loss=0.1051, over 1614517.68 frames. ], batch size: 22, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:07,201 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:07,295 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1170, 1.4282, 2.3083, 1.1545, 2.1221, 2.5238, 2.5104, 2.1510], + device='cuda:3'), covar=tensor([0.1067, 0.1039, 0.0460, 0.1845, 0.0540, 0.0366, 0.0524, 0.0747], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0261, 0.0222, 0.0264, 0.0230, 0.0210, 0.0246, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 02:46:15,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.151e+02 3.255e+02 4.005e+02 4.887e+02 1.315e+03, threshold=8.009e+02, percent-clipped=7.0 +2023-02-06 02:46:24,039 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:46:40,014 INFO [train.py:901] (3/4) Epoch 6, batch 5050, loss[loss=0.2581, simple_loss=0.3244, pruned_loss=0.09594, over 7942.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3381, pruned_loss=0.1039, over 1615226.04 frames. ], batch size: 20, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:46:51,616 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0102, 4.0886, 3.6295, 1.9027, 3.5535, 3.6299, 3.7868, 3.3547], + device='cuda:3'), covar=tensor([0.1094, 0.0697, 0.1058, 0.4290, 0.0902, 0.0823, 0.1376, 0.0739], + device='cuda:3'), in_proj_covar=tensor([0.0400, 0.0302, 0.0330, 0.0411, 0.0322, 0.0292, 0.0310, 0.0261], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:46:58,892 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 02:47:09,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4486, 1.7370, 1.8788, 1.3639, 0.9463, 2.0148, 0.1628, 1.2288], + device='cuda:3'), covar=tensor([0.4345, 0.2483, 0.1133, 0.3288, 0.6862, 0.0660, 0.5417, 0.2265], + device='cuda:3'), in_proj_covar=tensor([0.0138, 0.0135, 0.0087, 0.0183, 0.0228, 0.0083, 0.0146, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:47:13,239 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.94 vs. limit=5.0 +2023-02-06 02:47:14,057 INFO [train.py:901] (3/4) Epoch 6, batch 5100, loss[loss=0.2538, simple_loss=0.3034, pruned_loss=0.1021, over 7532.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3375, pruned_loss=0.1033, over 1610746.13 frames. ], batch size: 18, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:47:24,713 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.842e+02 3.419e+02 4.219e+02 7.828e+02, threshold=6.837e+02, percent-clipped=0.0 +2023-02-06 02:47:26,970 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:40,375 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1195, 1.7411, 3.0210, 2.4664, 2.4061, 1.7281, 1.4461, 1.1331], + device='cuda:3'), covar=tensor([0.2867, 0.2979, 0.0546, 0.1332, 0.1305, 0.1992, 0.1875, 0.2497], + device='cuda:3'), in_proj_covar=tensor([0.0800, 0.0736, 0.0628, 0.0727, 0.0837, 0.0678, 0.0643, 0.0678], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:47:43,443 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:47:47,199 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 02:47:49,316 INFO [train.py:901] (3/4) Epoch 6, batch 5150, loss[loss=0.3533, simple_loss=0.3948, pruned_loss=0.1559, over 8561.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3406, pruned_loss=0.1051, over 1617458.98 frames. ], batch size: 31, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:47:50,927 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8890, 1.5473, 2.2947, 1.9743, 2.0177, 1.6315, 1.3182, 1.1536], + device='cuda:3'), covar=tensor([0.1526, 0.1864, 0.0455, 0.0853, 0.0818, 0.0973, 0.0971, 0.1671], + device='cuda:3'), in_proj_covar=tensor([0.0791, 0.0729, 0.0621, 0.0719, 0.0828, 0.0672, 0.0637, 0.0672], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:48:23,749 INFO [train.py:901] (3/4) Epoch 6, batch 5200, loss[loss=0.2722, simple_loss=0.3444, pruned_loss=0.09998, over 8689.00 frames. ], tot_loss[loss=0.2759, simple_loss=0.3411, pruned_loss=0.1053, over 1612973.00 frames. ], batch size: 34, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:48:34,007 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.012e+02 3.204e+02 4.015e+02 4.654e+02 8.708e+02, threshold=8.029e+02, percent-clipped=4.0 +2023-02-06 02:48:57,684 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 02:48:59,760 INFO [train.py:901] (3/4) Epoch 6, batch 5250, loss[loss=0.2826, simple_loss=0.3588, pruned_loss=0.1032, over 8322.00 frames. ], tot_loss[loss=0.2766, simple_loss=0.3415, pruned_loss=0.1058, over 1613745.22 frames. ], batch size: 25, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:30,225 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=45710.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 02:49:34,108 INFO [train.py:901] (3/4) Epoch 6, batch 5300, loss[loss=0.2761, simple_loss=0.3412, pruned_loss=0.1055, over 8245.00 frames. ], tot_loss[loss=0.2757, simple_loss=0.3405, pruned_loss=0.1055, over 1612482.25 frames. ], batch size: 24, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:49:36,957 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:49:43,554 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.935e+02 3.437e+02 4.667e+02 1.283e+03, threshold=6.874e+02, percent-clipped=3.0 +2023-02-06 02:50:09,986 INFO [train.py:901] (3/4) Epoch 6, batch 5350, loss[loss=0.2733, simple_loss=0.3374, pruned_loss=0.1046, over 8108.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3402, pruned_loss=0.1055, over 1612979.96 frames. ], batch size: 23, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:20,660 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4014, 1.8539, 3.4386, 1.0567, 2.4118, 1.8732, 1.3599, 2.0971], + device='cuda:3'), covar=tensor([0.1584, 0.1935, 0.0664, 0.3316, 0.1394, 0.2369, 0.1541, 0.2269], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0458, 0.0525, 0.0540, 0.0579, 0.0526, 0.0439, 0.0581], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 02:50:25,441 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:27,348 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:38,025 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5901, 1.2828, 4.8333, 1.7709, 4.0701, 3.9810, 4.3687, 4.2316], + device='cuda:3'), covar=tensor([0.0498, 0.3685, 0.0301, 0.2649, 0.1044, 0.0601, 0.0417, 0.0497], + device='cuda:3'), in_proj_covar=tensor([0.0366, 0.0518, 0.0463, 0.0455, 0.0517, 0.0425, 0.0439, 0.0482], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 02:50:42,760 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:42,778 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=45814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:50:43,428 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3947, 1.2631, 1.2402, 1.1240, 0.8952, 1.1119, 1.2939, 1.1398], + device='cuda:3'), covar=tensor([0.0657, 0.1314, 0.1826, 0.1439, 0.0603, 0.1530, 0.0704, 0.0632], + device='cuda:3'), in_proj_covar=tensor([0.0121, 0.0170, 0.0209, 0.0173, 0.0120, 0.0178, 0.0133, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 02:50:43,905 INFO [train.py:901] (3/4) Epoch 6, batch 5400, loss[loss=0.3158, simple_loss=0.3704, pruned_loss=0.1306, over 8589.00 frames. ], tot_loss[loss=0.277, simple_loss=0.3415, pruned_loss=0.1063, over 1614299.58 frames. ], batch size: 31, lr: 1.25e-02, grad_scale: 16.0 +2023-02-06 02:50:49,968 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=45825.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:50:53,703 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.842e+02 3.609e+02 4.644e+02 1.367e+03, threshold=7.218e+02, percent-clipped=2.0 +2023-02-06 02:50:59,048 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=45839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:17,301 INFO [train.py:901] (3/4) Epoch 6, batch 5450, loss[loss=0.2603, simple_loss=0.3386, pruned_loss=0.09101, over 7921.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.3401, pruned_loss=0.1051, over 1610459.93 frames. ], batch size: 20, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:51:22,257 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9701, 3.4796, 2.5621, 3.9590, 1.9355, 2.1627, 2.4196, 3.3103], + device='cuda:3'), covar=tensor([0.0743, 0.0818, 0.1130, 0.0271, 0.1322, 0.1749, 0.1514, 0.0757], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0249, 0.0280, 0.0227, 0.0244, 0.0278, 0.0287, 0.0248], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 02:51:26,215 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=45877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:51:47,555 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 02:51:51,930 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9683, 1.5899, 2.2581, 1.9378, 1.9797, 1.7282, 1.4595, 0.6276], + device='cuda:3'), covar=tensor([0.2339, 0.2240, 0.0647, 0.1174, 0.1046, 0.1340, 0.1160, 0.2270], + device='cuda:3'), in_proj_covar=tensor([0.0798, 0.0734, 0.0629, 0.0722, 0.0836, 0.0679, 0.0639, 0.0675], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:51:52,372 INFO [train.py:901] (3/4) Epoch 6, batch 5500, loss[loss=0.256, simple_loss=0.3154, pruned_loss=0.09827, over 7537.00 frames. ], tot_loss[loss=0.2741, simple_loss=0.3393, pruned_loss=0.1044, over 1610529.50 frames. ], batch size: 18, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:52:03,086 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.821e+02 3.418e+02 4.385e+02 9.516e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 02:52:27,018 INFO [train.py:901] (3/4) Epoch 6, batch 5550, loss[loss=0.2836, simple_loss=0.3322, pruned_loss=0.1176, over 7706.00 frames. ], tot_loss[loss=0.2738, simple_loss=0.3391, pruned_loss=0.1042, over 1611767.52 frames. ], batch size: 18, lr: 1.25e-02, grad_scale: 8.0 +2023-02-06 02:52:39,460 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 02:52:56,205 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4362, 1.5384, 1.5700, 1.3718, 1.1801, 1.3635, 1.7510, 1.8000], + device='cuda:3'), covar=tensor([0.0544, 0.1278, 0.1802, 0.1365, 0.0658, 0.1581, 0.0749, 0.0505], + device='cuda:3'), in_proj_covar=tensor([0.0123, 0.0170, 0.0211, 0.0175, 0.0121, 0.0180, 0.0135, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 02:53:03,502 INFO [train.py:901] (3/4) Epoch 6, batch 5600, loss[loss=0.2463, simple_loss=0.3038, pruned_loss=0.09441, over 7973.00 frames. ], tot_loss[loss=0.2752, simple_loss=0.34, pruned_loss=0.1052, over 1610232.58 frames. ], batch size: 21, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:13,363 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.150e+02 2.809e+02 3.495e+02 4.670e+02 1.291e+03, threshold=6.989e+02, percent-clipped=6.0 +2023-02-06 02:53:33,464 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0781, 1.7250, 2.7724, 2.0732, 2.4032, 1.8959, 1.4221, 0.9501], + device='cuda:3'), covar=tensor([0.2341, 0.2388, 0.0522, 0.1372, 0.1036, 0.1275, 0.1150, 0.2516], + device='cuda:3'), in_proj_covar=tensor([0.0813, 0.0744, 0.0637, 0.0729, 0.0842, 0.0687, 0.0647, 0.0683], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:53:36,014 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46064.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:53:37,308 INFO [train.py:901] (3/4) Epoch 6, batch 5650, loss[loss=0.2802, simple_loss=0.343, pruned_loss=0.1087, over 8608.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3402, pruned_loss=0.1055, over 1611356.72 frames. ], batch size: 34, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:53:47,430 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:53:51,172 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 02:53:59,577 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 02:54:04,756 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46106.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 02:54:12,525 INFO [train.py:901] (3/4) Epoch 6, batch 5700, loss[loss=0.2644, simple_loss=0.335, pruned_loss=0.09687, over 8536.00 frames. ], tot_loss[loss=0.2745, simple_loss=0.3397, pruned_loss=0.1046, over 1610988.81 frames. ], batch size: 28, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:22,543 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.829e+02 3.489e+02 4.392e+02 1.030e+03, threshold=6.978e+02, percent-clipped=3.0 +2023-02-06 02:54:25,975 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:54:46,332 INFO [train.py:901] (3/4) Epoch 6, batch 5750, loss[loss=0.2661, simple_loss=0.3381, pruned_loss=0.09701, over 8513.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3401, pruned_loss=0.1048, over 1613122.79 frames. ], batch size: 28, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:54:53,641 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 02:54:55,248 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:01,704 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 02:55:21,182 INFO [train.py:901] (3/4) Epoch 6, batch 5800, loss[loss=0.288, simple_loss=0.3608, pruned_loss=0.1076, over 8500.00 frames. ], tot_loss[loss=0.2748, simple_loss=0.3408, pruned_loss=0.1044, over 1619352.73 frames. ], batch size: 28, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:55:24,803 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:32,627 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.949e+02 3.358e+02 4.338e+02 9.471e+02, threshold=6.717e+02, percent-clipped=1.0 +2023-02-06 02:55:45,837 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:55:55,787 INFO [train.py:901] (3/4) Epoch 6, batch 5850, loss[loss=0.2243, simple_loss=0.2968, pruned_loss=0.07586, over 7792.00 frames. ], tot_loss[loss=0.2743, simple_loss=0.3403, pruned_loss=0.1042, over 1620781.16 frames. ], batch size: 19, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:14,992 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:56:29,476 INFO [train.py:901] (3/4) Epoch 6, batch 5900, loss[loss=0.2917, simple_loss=0.3436, pruned_loss=0.1198, over 8445.00 frames. ], tot_loss[loss=0.2764, simple_loss=0.3416, pruned_loss=0.1056, over 1622486.98 frames. ], batch size: 27, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:56:39,482 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 3.022e+02 3.849e+02 5.141e+02 8.536e+02, threshold=7.697e+02, percent-clipped=7.0 +2023-02-06 02:56:43,702 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:57:04,658 INFO [train.py:901] (3/4) Epoch 6, batch 5950, loss[loss=0.2424, simple_loss=0.3122, pruned_loss=0.08627, over 7973.00 frames. ], tot_loss[loss=0.2754, simple_loss=0.3404, pruned_loss=0.1052, over 1617017.14 frames. ], batch size: 21, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,431 INFO [train.py:901] (3/4) Epoch 6, batch 6000, loss[loss=0.3541, simple_loss=0.3964, pruned_loss=0.1559, over 7065.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.3404, pruned_loss=0.1049, over 1617804.51 frames. ], batch size: 71, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:57:38,432 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 02:57:50,760 INFO [train.py:935] (3/4) Epoch 6, validation: loss=0.2127, simple_loss=0.3094, pruned_loss=0.05799, over 944034.00 frames. +2023-02-06 02:57:50,761 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 02:58:01,259 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.683e+02 3.226e+02 4.100e+02 1.140e+03, threshold=6.453e+02, percent-clipped=1.0 +2023-02-06 02:58:04,347 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:06,388 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7160, 3.6963, 3.3656, 1.7014, 3.2668, 3.3486, 3.3761, 2.9576], + device='cuda:3'), covar=tensor([0.1079, 0.0782, 0.1134, 0.5397, 0.1000, 0.1172, 0.1719, 0.1105], + device='cuda:3'), in_proj_covar=tensor([0.0413, 0.0312, 0.0340, 0.0429, 0.0330, 0.0304, 0.0326, 0.0273], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 02:58:11,325 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2007, 1.6193, 3.8774, 1.7973, 2.5530, 4.4070, 4.1931, 3.8580], + device='cuda:3'), covar=tensor([0.1025, 0.1476, 0.0367, 0.1905, 0.0883, 0.0177, 0.0320, 0.0496], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0258, 0.0224, 0.0263, 0.0230, 0.0209, 0.0248, 0.0270], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 02:58:21,760 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:58:25,654 INFO [train.py:901] (3/4) Epoch 6, batch 6050, loss[loss=0.2752, simple_loss=0.3427, pruned_loss=0.1038, over 8487.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.3411, pruned_loss=0.1049, over 1617480.71 frames. ], batch size: 49, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:58:56,115 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:02,121 INFO [train.py:901] (3/4) Epoch 6, batch 6100, loss[loss=0.2009, simple_loss=0.2756, pruned_loss=0.06315, over 7938.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3418, pruned_loss=0.1051, over 1619636.83 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:12,642 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 3.046e+02 3.657e+02 4.398e+02 9.620e+02, threshold=7.315e+02, percent-clipped=4.0 +2023-02-06 02:59:13,539 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 02:59:20,475 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2159, 1.5721, 3.3991, 1.4389, 2.2413, 3.7877, 3.6996, 3.2450], + device='cuda:3'), covar=tensor([0.0957, 0.1418, 0.0412, 0.2049, 0.0860, 0.0246, 0.0398, 0.0609], + device='cuda:3'), in_proj_covar=tensor([0.0237, 0.0266, 0.0230, 0.0268, 0.0237, 0.0213, 0.0256, 0.0278], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 02:59:24,583 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 02:59:37,764 INFO [train.py:901] (3/4) Epoch 6, batch 6150, loss[loss=0.3023, simple_loss=0.36, pruned_loss=0.1223, over 7933.00 frames. ], tot_loss[loss=0.276, simple_loss=0.3415, pruned_loss=0.1052, over 1617819.22 frames. ], batch size: 20, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 02:59:56,266 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=46592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:11,873 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:13,815 INFO [train.py:901] (3/4) Epoch 6, batch 6200, loss[loss=0.2804, simple_loss=0.3568, pruned_loss=0.102, over 8734.00 frames. ], tot_loss[loss=0.2776, simple_loss=0.3425, pruned_loss=0.1064, over 1617095.86 frames. ], batch size: 49, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:00:14,154 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 03:00:14,732 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=46617.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:16,763 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:24,151 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.053e+02 3.051e+02 3.861e+02 4.926e+02 1.016e+03, threshold=7.722e+02, percent-clipped=3.0 +2023-02-06 03:00:28,968 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:00:48,696 INFO [train.py:901] (3/4) Epoch 6, batch 6250, loss[loss=0.2945, simple_loss=0.3606, pruned_loss=0.1142, over 8200.00 frames. ], tot_loss[loss=0.2756, simple_loss=0.3407, pruned_loss=0.1053, over 1616080.05 frames. ], batch size: 23, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:22,866 INFO [train.py:901] (3/4) Epoch 6, batch 6300, loss[loss=0.3172, simple_loss=0.3731, pruned_loss=0.1306, over 8245.00 frames. ], tot_loss[loss=0.274, simple_loss=0.339, pruned_loss=0.1045, over 1613158.16 frames. ], batch size: 24, lr: 1.24e-02, grad_scale: 8.0 +2023-02-06 03:01:24,468 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2859, 1.3231, 2.3280, 1.1568, 2.1760, 2.4972, 2.5171, 2.1399], + device='cuda:3'), covar=tensor([0.0973, 0.1094, 0.0430, 0.1917, 0.0511, 0.0370, 0.0491, 0.0707], + device='cuda:3'), in_proj_covar=tensor([0.0238, 0.0267, 0.0228, 0.0270, 0.0234, 0.0213, 0.0253, 0.0276], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 03:01:34,435 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.734e+02 3.399e+02 4.377e+02 1.449e+03, threshold=6.797e+02, percent-clipped=4.0 +2023-02-06 03:01:49,381 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=46753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:01:57,778 INFO [train.py:901] (3/4) Epoch 6, batch 6350, loss[loss=0.2546, simple_loss=0.3236, pruned_loss=0.09276, over 7650.00 frames. ], tot_loss[loss=0.2736, simple_loss=0.3386, pruned_loss=0.1043, over 1614559.87 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:32,250 INFO [train.py:901] (3/4) Epoch 6, batch 6400, loss[loss=0.2491, simple_loss=0.3003, pruned_loss=0.09892, over 7543.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3381, pruned_loss=0.1042, over 1611710.12 frames. ], batch size: 18, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:02:41,198 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=46828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:02:43,104 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.744e+02 3.578e+02 4.396e+02 9.504e+02, threshold=7.157e+02, percent-clipped=5.0 +2023-02-06 03:03:07,338 INFO [train.py:901] (3/4) Epoch 6, batch 6450, loss[loss=0.3358, simple_loss=0.3914, pruned_loss=0.1402, over 8462.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3374, pruned_loss=0.1037, over 1610496.07 frames. ], batch size: 29, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:15,539 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8642, 1.3223, 1.4064, 1.2091, 1.0665, 1.3194, 1.5280, 1.2917], + device='cuda:3'), covar=tensor([0.0609, 0.1338, 0.1909, 0.1489, 0.0630, 0.1626, 0.0763, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0122, 0.0169, 0.0209, 0.0172, 0.0119, 0.0178, 0.0132, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 03:03:41,560 INFO [train.py:901] (3/4) Epoch 6, batch 6500, loss[loss=0.2621, simple_loss=0.3398, pruned_loss=0.09221, over 8536.00 frames. ], tot_loss[loss=0.273, simple_loss=0.3379, pruned_loss=0.1041, over 1610923.27 frames. ], batch size: 34, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:03:51,595 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.994e+02 3.001e+02 3.759e+02 4.377e+02 1.086e+03, threshold=7.517e+02, percent-clipped=1.0 +2023-02-06 03:04:09,448 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:14,549 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=46964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:15,758 INFO [train.py:901] (3/4) Epoch 6, batch 6550, loss[loss=0.3234, simple_loss=0.3926, pruned_loss=0.1271, over 8107.00 frames. ], tot_loss[loss=0.2739, simple_loss=0.3387, pruned_loss=0.1046, over 1613653.54 frames. ], batch size: 23, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:37,517 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 03:04:45,803 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:04:50,982 INFO [train.py:901] (3/4) Epoch 6, batch 6600, loss[loss=0.2459, simple_loss=0.3309, pruned_loss=0.08043, over 8475.00 frames. ], tot_loss[loss=0.2751, simple_loss=0.34, pruned_loss=0.1051, over 1615223.11 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:04:56,435 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 03:05:01,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.072e+02 2.927e+02 3.687e+02 4.772e+02 1.123e+03, threshold=7.374e+02, percent-clipped=4.0 +2023-02-06 03:05:03,302 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:04,658 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3509, 2.8763, 1.9748, 2.0925, 2.2395, 1.5613, 1.8004, 2.2788], + device='cuda:3'), covar=tensor([0.1285, 0.0300, 0.0755, 0.0561, 0.0580, 0.1242, 0.0973, 0.0780], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0233, 0.0309, 0.0301, 0.0312, 0.0315, 0.0340, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:05:25,535 INFO [train.py:901] (3/4) Epoch 6, batch 6650, loss[loss=0.3755, simple_loss=0.4097, pruned_loss=0.1707, over 6680.00 frames. ], tot_loss[loss=0.2755, simple_loss=0.34, pruned_loss=0.1055, over 1611313.55 frames. ], batch size: 72, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:05:30,595 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:05:35,423 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:06:00,860 INFO [train.py:901] (3/4) Epoch 6, batch 6700, loss[loss=0.2716, simple_loss=0.3531, pruned_loss=0.09509, over 8466.00 frames. ], tot_loss[loss=0.2744, simple_loss=0.3396, pruned_loss=0.1046, over 1614142.08 frames. ], batch size: 25, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:07,896 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4998, 2.8412, 1.9031, 2.2487, 2.3155, 1.5590, 2.1773, 2.2303], + device='cuda:3'), covar=tensor([0.1174, 0.0257, 0.0773, 0.0511, 0.0574, 0.1186, 0.0749, 0.0714], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0231, 0.0309, 0.0299, 0.0309, 0.0314, 0.0337, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:06:12,485 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.777e+02 3.640e+02 4.922e+02 1.093e+03, threshold=7.281e+02, percent-clipped=6.0 +2023-02-06 03:06:33,731 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2456, 1.8905, 1.9061, 1.7861, 1.5654, 1.7740, 2.4555, 2.1151], + device='cuda:3'), covar=tensor([0.0474, 0.1140, 0.1791, 0.1232, 0.0529, 0.1541, 0.0590, 0.0597], + device='cuda:3'), in_proj_covar=tensor([0.0120, 0.0168, 0.0208, 0.0171, 0.0118, 0.0177, 0.0132, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 03:06:34,837 INFO [train.py:901] (3/4) Epoch 6, batch 6750, loss[loss=0.2691, simple_loss=0.342, pruned_loss=0.09805, over 8250.00 frames. ], tot_loss[loss=0.2734, simple_loss=0.3382, pruned_loss=0.1043, over 1611372.55 frames. ], batch size: 22, lr: 1.23e-02, grad_scale: 4.0 +2023-02-06 03:06:38,969 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:10,710 INFO [train.py:901] (3/4) Epoch 6, batch 6800, loss[loss=0.3078, simple_loss=0.3711, pruned_loss=0.1222, over 8497.00 frames. ], tot_loss[loss=0.272, simple_loss=0.3374, pruned_loss=0.1033, over 1612382.17 frames. ], batch size: 26, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:12,675 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 03:07:21,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.942e+02 3.591e+02 4.804e+02 1.528e+03, threshold=7.182e+02, percent-clipped=7.0 +2023-02-06 03:07:35,889 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9453, 1.3154, 1.3736, 1.1479, 1.0569, 1.2741, 1.5308, 1.5881], + device='cuda:3'), covar=tensor([0.0539, 0.1385, 0.1798, 0.1453, 0.0674, 0.1732, 0.0739, 0.0559], + device='cuda:3'), in_proj_covar=tensor([0.0120, 0.0167, 0.0207, 0.0170, 0.0119, 0.0176, 0.0131, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0007, 0.0008, 0.0007, 0.0005, 0.0007, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 03:07:40,015 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:07:41,479 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8439, 2.9926, 2.5264, 4.2525, 1.7531, 1.7546, 2.4332, 3.2016], + device='cuda:3'), covar=tensor([0.0874, 0.1178, 0.1322, 0.0217, 0.1685, 0.2087, 0.1644, 0.0998], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0252, 0.0279, 0.0225, 0.0249, 0.0274, 0.0280, 0.0249], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 03:07:44,575 INFO [train.py:901] (3/4) Epoch 6, batch 6850, loss[loss=0.3601, simple_loss=0.4043, pruned_loss=0.158, over 8631.00 frames. ], tot_loss[loss=0.2714, simple_loss=0.3371, pruned_loss=0.1029, over 1609869.33 frames. ], batch size: 34, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:07:50,959 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7437, 1.5732, 3.4189, 1.3819, 2.2753, 3.7814, 3.6851, 3.3287], + device='cuda:3'), covar=tensor([0.0993, 0.1335, 0.0288, 0.1792, 0.0727, 0.0218, 0.0394, 0.0523], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0263, 0.0221, 0.0263, 0.0230, 0.0207, 0.0252, 0.0269], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 03:07:58,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:00,897 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 03:08:19,248 INFO [train.py:901] (3/4) Epoch 6, batch 6900, loss[loss=0.2924, simple_loss=0.3676, pruned_loss=0.1086, over 8562.00 frames. ], tot_loss[loss=0.2721, simple_loss=0.3378, pruned_loss=0.1032, over 1613946.47 frames. ], batch size: 34, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:08:28,208 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:30,616 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 2.873e+02 3.537e+02 4.379e+02 9.664e+02, threshold=7.075e+02, percent-clipped=2.0 +2023-02-06 03:08:32,865 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:44,861 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:49,798 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:08:54,247 INFO [train.py:901] (3/4) Epoch 6, batch 6950, loss[loss=0.2471, simple_loss=0.3321, pruned_loss=0.08103, over 8507.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3371, pruned_loss=0.1024, over 1616118.56 frames. ], batch size: 48, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:09,780 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 03:09:15,148 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47397.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:09:15,352 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 03:09:28,483 INFO [train.py:901] (3/4) Epoch 6, batch 7000, loss[loss=0.2259, simple_loss=0.2981, pruned_loss=0.07689, over 7703.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3371, pruned_loss=0.1023, over 1615185.91 frames. ], batch size: 18, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:09:39,921 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.860e+02 2.784e+02 3.553e+02 4.437e+02 1.281e+03, threshold=7.106e+02, percent-clipped=4.0 +2023-02-06 03:10:03,577 INFO [train.py:901] (3/4) Epoch 6, batch 7050, loss[loss=0.2419, simple_loss=0.3017, pruned_loss=0.09103, over 7795.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.338, pruned_loss=0.1036, over 1614650.39 frames. ], batch size: 19, lr: 1.23e-02, grad_scale: 8.0 +2023-02-06 03:10:18,702 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1045, 3.8740, 2.2842, 2.5931, 2.8769, 2.0066, 2.6883, 3.0033], + device='cuda:3'), covar=tensor([0.1295, 0.0273, 0.0847, 0.0659, 0.0623, 0.1077, 0.0864, 0.0805], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0232, 0.0307, 0.0299, 0.0314, 0.0312, 0.0342, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:10:19,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4893, 1.2330, 4.6477, 1.7736, 3.9009, 3.9334, 4.1240, 4.0310], + device='cuda:3'), covar=tensor([0.0488, 0.4128, 0.0344, 0.2765, 0.1157, 0.0684, 0.0491, 0.0598], + device='cuda:3'), in_proj_covar=tensor([0.0369, 0.0508, 0.0450, 0.0445, 0.0508, 0.0424, 0.0432, 0.0475], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-06 03:10:37,641 INFO [train.py:901] (3/4) Epoch 6, batch 7100, loss[loss=0.2565, simple_loss=0.3223, pruned_loss=0.0954, over 7974.00 frames. ], tot_loss[loss=0.2732, simple_loss=0.3387, pruned_loss=0.1039, over 1617187.33 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:10:48,827 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.154e+02 3.207e+02 3.842e+02 5.073e+02 1.424e+03, threshold=7.684e+02, percent-clipped=2.0 +2023-02-06 03:10:56,280 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:00,495 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.29 vs. limit=5.0 +2023-02-06 03:11:12,602 INFO [train.py:901] (3/4) Epoch 6, batch 7150, loss[loss=0.2912, simple_loss=0.3555, pruned_loss=0.1134, over 8601.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3379, pruned_loss=0.1038, over 1615312.58 frames. ], batch size: 39, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:14,091 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:37,946 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:11:46,759 INFO [train.py:901] (3/4) Epoch 6, batch 7200, loss[loss=0.3385, simple_loss=0.3896, pruned_loss=0.1437, over 7197.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3368, pruned_loss=0.104, over 1606679.35 frames. ], batch size: 72, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:11:57,791 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.983e+02 3.737e+02 4.630e+02 8.445e+02, threshold=7.473e+02, percent-clipped=4.0 +2023-02-06 03:12:02,846 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5730, 1.3389, 3.0811, 1.4019, 2.1791, 3.3646, 3.3357, 2.9315], + device='cuda:3'), covar=tensor([0.1096, 0.1313, 0.0355, 0.1844, 0.0720, 0.0253, 0.0411, 0.0589], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0266, 0.0225, 0.0268, 0.0232, 0.0209, 0.0256, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 03:12:22,028 INFO [train.py:901] (3/4) Epoch 6, batch 7250, loss[loss=0.2819, simple_loss=0.3457, pruned_loss=0.1091, over 8285.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3367, pruned_loss=0.1036, over 1608685.50 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:42,080 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 03:12:45,389 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2093, 1.7759, 2.7498, 2.1189, 2.2293, 1.9386, 1.5175, 0.9999], + device='cuda:3'), covar=tensor([0.2169, 0.2390, 0.0551, 0.1431, 0.1176, 0.1337, 0.1278, 0.2443], + device='cuda:3'), in_proj_covar=tensor([0.0826, 0.0755, 0.0655, 0.0745, 0.0850, 0.0694, 0.0654, 0.0693], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:12:56,481 INFO [train.py:901] (3/4) Epoch 6, batch 7300, loss[loss=0.2848, simple_loss=0.3558, pruned_loss=0.1068, over 8630.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3373, pruned_loss=0.1038, over 1605631.29 frames. ], batch size: 34, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:12:57,903 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:07,209 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.066e+02 3.071e+02 3.696e+02 4.839e+02 1.031e+03, threshold=7.393e+02, percent-clipped=2.0 +2023-02-06 03:13:13,285 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=47741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:23,407 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:13:30,060 INFO [train.py:901] (3/4) Epoch 6, batch 7350, loss[loss=0.3171, simple_loss=0.3758, pruned_loss=0.1292, over 8473.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3368, pruned_loss=0.1031, over 1604439.58 frames. ], batch size: 25, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:13:45,831 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 03:13:48,798 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 03:13:59,324 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:05,871 INFO [train.py:901] (3/4) Epoch 6, batch 7400, loss[loss=0.2034, simple_loss=0.2806, pruned_loss=0.0631, over 7447.00 frames. ], tot_loss[loss=0.2709, simple_loss=0.3355, pruned_loss=0.1031, over 1600293.26 frames. ], batch size: 17, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:08,036 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 03:14:13,527 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=47827.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:14:17,406 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 3.124e+02 3.904e+02 4.877e+02 9.892e+02, threshold=7.808e+02, percent-clipped=5.0 +2023-02-06 03:14:33,733 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=47856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:14:40,242 INFO [train.py:901] (3/4) Epoch 6, batch 7450, loss[loss=0.2723, simple_loss=0.329, pruned_loss=0.1078, over 7531.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.338, pruned_loss=0.1047, over 1605447.82 frames. ], batch size: 18, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:14:46,244 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 03:15:15,311 INFO [train.py:901] (3/4) Epoch 6, batch 7500, loss[loss=0.2166, simple_loss=0.2986, pruned_loss=0.06732, over 8365.00 frames. ], tot_loss[loss=0.2737, simple_loss=0.3383, pruned_loss=0.1046, over 1607524.26 frames. ], batch size: 24, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:25,969 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.098e+02 3.102e+02 3.706e+02 4.699e+02 1.511e+03, threshold=7.412e+02, percent-clipped=9.0 +2023-02-06 03:15:49,279 INFO [train.py:901] (3/4) Epoch 6, batch 7550, loss[loss=0.2508, simple_loss=0.3317, pruned_loss=0.08498, over 8333.00 frames. ], tot_loss[loss=0.2727, simple_loss=0.3376, pruned_loss=0.1039, over 1607707.82 frames. ], batch size: 25, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:15:54,927 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=47974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:04,325 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5343, 2.9590, 3.0524, 1.8997, 1.4498, 3.0062, 0.5399, 2.0888], + device='cuda:3'), covar=tensor([0.3244, 0.1844, 0.1197, 0.3983, 0.6298, 0.0784, 0.5406, 0.2046], + device='cuda:3'), in_proj_covar=tensor([0.0138, 0.0132, 0.0081, 0.0182, 0.0226, 0.0082, 0.0142, 0.0136], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:16:11,854 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=47999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:16:24,750 INFO [train.py:901] (3/4) Epoch 6, batch 7600, loss[loss=0.2947, simple_loss=0.3457, pruned_loss=0.1218, over 8072.00 frames. ], tot_loss[loss=0.2722, simple_loss=0.3374, pruned_loss=0.1035, over 1612573.38 frames. ], batch size: 21, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:16:27,980 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 03:16:34,272 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 03:16:37,192 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.897e+02 3.536e+02 4.611e+02 2.294e+03, threshold=7.072e+02, percent-clipped=5.0 +2023-02-06 03:17:01,525 INFO [train.py:901] (3/4) Epoch 6, batch 7650, loss[loss=0.213, simple_loss=0.295, pruned_loss=0.06553, over 7668.00 frames. ], tot_loss[loss=0.2715, simple_loss=0.3369, pruned_loss=0.103, over 1611054.98 frames. ], batch size: 19, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:17,558 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48090.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:24,181 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:32,462 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48112.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:34,935 INFO [train.py:901] (3/4) Epoch 6, batch 7700, loss[loss=0.2675, simple_loss=0.3221, pruned_loss=0.1064, over 7268.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3371, pruned_loss=0.103, over 1613580.86 frames. ], batch size: 16, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:17:46,038 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.945e+02 2.821e+02 3.617e+02 4.667e+02 9.808e+02, threshold=7.234e+02, percent-clipped=3.0 +2023-02-06 03:17:50,861 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:17:57,309 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 03:17:59,320 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:03,433 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1845, 3.1641, 2.9098, 1.4904, 2.8168, 2.7131, 2.9209, 2.5215], + device='cuda:3'), covar=tensor([0.1476, 0.0959, 0.1472, 0.4738, 0.1115, 0.1531, 0.1886, 0.1224], + device='cuda:3'), in_proj_covar=tensor([0.0403, 0.0310, 0.0332, 0.0412, 0.0324, 0.0301, 0.0311, 0.0268], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:18:10,106 INFO [train.py:901] (3/4) Epoch 6, batch 7750, loss[loss=0.292, simple_loss=0.3577, pruned_loss=0.1131, over 8296.00 frames. ], tot_loss[loss=0.2726, simple_loss=0.3382, pruned_loss=0.1035, over 1616209.38 frames. ], batch size: 23, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:13,414 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48171.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:18:43,311 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:18:43,803 INFO [train.py:901] (3/4) Epoch 6, batch 7800, loss[loss=0.2842, simple_loss=0.3496, pruned_loss=0.1094, over 8500.00 frames. ], tot_loss[loss=0.2735, simple_loss=0.3392, pruned_loss=0.1039, over 1618146.53 frames. ], batch size: 26, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:18:47,460 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3021, 2.2533, 1.4668, 1.9861, 1.8503, 1.2660, 1.6289, 1.7393], + device='cuda:3'), covar=tensor([0.1009, 0.0290, 0.0949, 0.0426, 0.0573, 0.1127, 0.0725, 0.0648], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0230, 0.0316, 0.0302, 0.0313, 0.0316, 0.0341, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:18:53,437 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48230.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:18:54,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 3.053e+02 3.731e+02 4.789e+02 1.133e+03, threshold=7.462e+02, percent-clipped=3.0 +2023-02-06 03:19:16,584 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48265.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:19:17,072 INFO [train.py:901] (3/4) Epoch 6, batch 7850, loss[loss=0.2397, simple_loss=0.3084, pruned_loss=0.08547, over 7927.00 frames. ], tot_loss[loss=0.2734, simple_loss=0.3395, pruned_loss=0.1037, over 1622738.12 frames. ], batch size: 20, lr: 1.22e-02, grad_scale: 8.0 +2023-02-06 03:19:30,593 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48286.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:19:51,000 INFO [train.py:901] (3/4) Epoch 6, batch 7900, loss[loss=0.2562, simple_loss=0.3351, pruned_loss=0.08867, over 8314.00 frames. ], tot_loss[loss=0.2729, simple_loss=0.3391, pruned_loss=0.1034, over 1620618.08 frames. ], batch size: 25, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:19:57,278 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2455, 4.2239, 3.7717, 1.9487, 3.7670, 3.7406, 3.9525, 3.3625], + device='cuda:3'), covar=tensor([0.0860, 0.0638, 0.0967, 0.4837, 0.0870, 0.0980, 0.1194, 0.1195], + device='cuda:3'), in_proj_covar=tensor([0.0402, 0.0311, 0.0333, 0.0413, 0.0322, 0.0302, 0.0310, 0.0267], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:20:01,872 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.927e+02 3.494e+02 4.326e+02 7.205e+02, threshold=6.988e+02, percent-clipped=0.0 +2023-02-06 03:20:09,338 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:25,105 INFO [train.py:901] (3/4) Epoch 6, batch 7950, loss[loss=0.2609, simple_loss=0.3209, pruned_loss=0.1005, over 7645.00 frames. ], tot_loss[loss=0.2718, simple_loss=0.3378, pruned_loss=0.1029, over 1622202.44 frames. ], batch size: 19, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:20:58,689 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:20:59,237 INFO [train.py:901] (3/4) Epoch 6, batch 8000, loss[loss=0.2668, simple_loss=0.3348, pruned_loss=0.09936, over 8474.00 frames. ], tot_loss[loss=0.2716, simple_loss=0.3378, pruned_loss=0.1027, over 1621555.24 frames. ], batch size: 28, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:10,358 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.174e+02 2.873e+02 3.488e+02 4.217e+02 8.104e+02, threshold=6.977e+02, percent-clipped=2.0 +2023-02-06 03:21:11,764 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:16,779 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:33,917 INFO [train.py:901] (3/4) Epoch 6, batch 8050, loss[loss=0.216, simple_loss=0.2816, pruned_loss=0.07518, over 7554.00 frames. ], tot_loss[loss=0.271, simple_loss=0.3361, pruned_loss=0.103, over 1596911.05 frames. ], batch size: 18, lr: 1.21e-02, grad_scale: 8.0 +2023-02-06 03:21:36,916 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3939, 2.2907, 3.1511, 2.0686, 2.6774, 3.4821, 3.2304, 3.1690], + device='cuda:3'), covar=tensor([0.0709, 0.0920, 0.0593, 0.1362, 0.0783, 0.0221, 0.0438, 0.0456], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0273, 0.0225, 0.0268, 0.0234, 0.0211, 0.0260, 0.0279], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 03:21:37,664 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:21:54,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48496.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:07,093 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 03:22:10,949 INFO [train.py:901] (3/4) Epoch 7, batch 0, loss[loss=0.2996, simple_loss=0.3591, pruned_loss=0.1201, over 8558.00 frames. ], tot_loss[loss=0.2996, simple_loss=0.3591, pruned_loss=0.1201, over 8558.00 frames. ], batch size: 39, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:10,950 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 03:22:22,759 INFO [train.py:935] (3/4) Epoch 7, validation: loss=0.2113, simple_loss=0.3091, pruned_loss=0.05678, over 944034.00 frames. +2023-02-06 03:22:22,760 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 03:22:28,414 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8674, 2.1743, 1.7800, 2.6929, 1.1407, 1.5530, 1.7914, 2.1690], + device='cuda:3'), covar=tensor([0.0964, 0.0941, 0.1279, 0.0465, 0.1399, 0.1713, 0.1222, 0.0895], + device='cuda:3'), in_proj_covar=tensor([0.0263, 0.0239, 0.0276, 0.0221, 0.0240, 0.0267, 0.0273, 0.0244], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 03:22:37,613 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:38,081 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 03:22:39,699 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6350, 1.8537, 2.1963, 1.0464, 2.3379, 1.3406, 0.7125, 1.6980], + device='cuda:3'), covar=tensor([0.0287, 0.0186, 0.0110, 0.0281, 0.0140, 0.0412, 0.0418, 0.0153], + device='cuda:3'), in_proj_covar=tensor([0.0340, 0.0251, 0.0203, 0.0302, 0.0238, 0.0385, 0.0310, 0.0287], + device='cuda:3'), out_proj_covar=tensor([1.1150e-04, 8.0551e-05, 6.3816e-05, 9.6298e-05, 7.6935e-05, 1.3405e-04, + 1.0118e-04, 9.2305e-05], device='cuda:3') +2023-02-06 03:22:41,674 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6137, 2.8933, 2.3544, 4.1629, 1.5997, 1.8397, 2.2062, 2.7918], + device='cuda:3'), covar=tensor([0.0926, 0.1224, 0.1455, 0.0298, 0.1785, 0.2031, 0.1817, 0.1315], + device='cuda:3'), in_proj_covar=tensor([0.0266, 0.0242, 0.0278, 0.0224, 0.0244, 0.0271, 0.0277, 0.0247], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 03:22:45,429 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.727e+02 3.570e+02 4.321e+02 1.428e+03, threshold=7.140e+02, percent-clipped=5.0 +2023-02-06 03:22:53,284 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48542.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:22:55,826 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:22:56,687 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 03:22:57,631 INFO [train.py:901] (3/4) Epoch 7, batch 50, loss[loss=0.2501, simple_loss=0.3189, pruned_loss=0.09067, over 8093.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3328, pruned_loss=0.1008, over 362824.91 frames. ], batch size: 21, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:22:57,789 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:23:09,791 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48567.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:23:12,926 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 03:23:14,280 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48574.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:23:31,378 INFO [train.py:901] (3/4) Epoch 7, batch 100, loss[loss=0.3118, simple_loss=0.3703, pruned_loss=0.1267, over 8129.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3345, pruned_loss=0.1017, over 642318.41 frames. ], batch size: 22, lr: 1.14e-02, grad_scale: 8.0 +2023-02-06 03:23:34,987 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 03:23:44,206 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5141, 2.2897, 4.4781, 1.3061, 3.0203, 2.1785, 1.7053, 2.5153], + device='cuda:3'), covar=tensor([0.1565, 0.1809, 0.0589, 0.3158, 0.1315, 0.2290, 0.1509, 0.2225], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0473, 0.0526, 0.0547, 0.0594, 0.0534, 0.0451, 0.0592], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 03:23:54,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.847e+02 2.936e+02 3.434e+02 4.642e+02 8.961e+02, threshold=6.868e+02, percent-clipped=3.0 +2023-02-06 03:24:06,735 INFO [train.py:901] (3/4) Epoch 7, batch 150, loss[loss=0.241, simple_loss=0.3215, pruned_loss=0.08025, over 8135.00 frames. ], tot_loss[loss=0.2702, simple_loss=0.336, pruned_loss=0.1022, over 860941.54 frames. ], batch size: 22, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:24:28,699 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2373, 1.4659, 2.2279, 1.0884, 1.6126, 1.4903, 1.3236, 1.5135], + device='cuda:3'), covar=tensor([0.1527, 0.1835, 0.0701, 0.3083, 0.1448, 0.2593, 0.1561, 0.1652], + device='cuda:3'), in_proj_covar=tensor([0.0470, 0.0470, 0.0523, 0.0541, 0.0591, 0.0527, 0.0448, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 03:24:31,975 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:24:34,069 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48689.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:24:40,631 INFO [train.py:901] (3/4) Epoch 7, batch 200, loss[loss=0.2974, simple_loss=0.3595, pruned_loss=0.1176, over 8633.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3358, pruned_loss=0.1024, over 1026603.26 frames. ], batch size: 39, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:03,499 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.629e+02 3.306e+02 4.274e+02 1.004e+03, threshold=6.612e+02, percent-clipped=3.0 +2023-02-06 03:25:15,498 INFO [train.py:901] (3/4) Epoch 7, batch 250, loss[loss=0.2793, simple_loss=0.3485, pruned_loss=0.1051, over 8684.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3356, pruned_loss=0.1012, over 1161054.12 frames. ], batch size: 39, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:22,711 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:26,770 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 03:25:35,605 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 03:25:41,109 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=48785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:44,865 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 03:25:50,539 INFO [train.py:901] (3/4) Epoch 7, batch 300, loss[loss=0.2632, simple_loss=0.3373, pruned_loss=0.09461, over 8556.00 frames. ], tot_loss[loss=0.2704, simple_loss=0.3372, pruned_loss=0.1018, over 1266871.54 frames. ], batch size: 31, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:25:52,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:25:54,978 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48805.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:12,387 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:13,528 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.973e+02 3.476e+02 4.340e+02 1.124e+03, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 03:26:18,391 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:18,505 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2751, 1.8020, 2.9008, 2.2864, 2.5057, 2.0384, 1.5099, 1.1888], + device='cuda:3'), covar=tensor([0.2445, 0.2681, 0.0640, 0.1437, 0.1183, 0.1323, 0.1293, 0.2577], + device='cuda:3'), in_proj_covar=tensor([0.0800, 0.0742, 0.0640, 0.0731, 0.0829, 0.0683, 0.0638, 0.0672], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:26:25,098 INFO [train.py:901] (3/4) Epoch 7, batch 350, loss[loss=0.2757, simple_loss=0.3463, pruned_loss=0.1026, over 8477.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3366, pruned_loss=0.1009, over 1349370.12 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:26:30,605 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=48856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:34,339 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 03:26:43,222 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:26:49,431 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.39 vs. limit=5.0 +2023-02-06 03:27:00,371 INFO [train.py:901] (3/4) Epoch 7, batch 400, loss[loss=0.2129, simple_loss=0.2799, pruned_loss=0.07296, over 7425.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3345, pruned_loss=0.1002, over 1406979.28 frames. ], batch size: 17, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:01,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=48900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:27:06,078 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 03:27:09,953 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4688, 1.9569, 3.3733, 1.2238, 2.3679, 1.9533, 1.6369, 2.1742], + device='cuda:3'), covar=tensor([0.1488, 0.1627, 0.0536, 0.3009, 0.1206, 0.2134, 0.1380, 0.1743], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0467, 0.0524, 0.0542, 0.0585, 0.0524, 0.0451, 0.0584], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 03:27:22,460 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.855e+02 2.734e+02 3.619e+02 4.506e+02 1.679e+03, threshold=7.237e+02, percent-clipped=8.0 +2023-02-06 03:27:29,397 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4113, 4.3078, 3.9923, 1.9108, 3.8631, 3.9463, 3.9767, 3.5207], + device='cuda:3'), covar=tensor([0.0675, 0.0597, 0.0911, 0.4606, 0.0873, 0.0828, 0.1458, 0.0793], + device='cuda:3'), in_proj_covar=tensor([0.0413, 0.0321, 0.0344, 0.0427, 0.0333, 0.0315, 0.0320, 0.0271], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:27:32,148 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=48945.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:27:34,591 INFO [train.py:901] (3/4) Epoch 7, batch 450, loss[loss=0.2272, simple_loss=0.2955, pruned_loss=0.0794, over 7919.00 frames. ], tot_loss[loss=0.2686, simple_loss=0.336, pruned_loss=0.1006, over 1454575.77 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:27:48,253 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9534, 6.0602, 5.1113, 2.3833, 5.2257, 5.5897, 5.4876, 5.1515], + device='cuda:3'), covar=tensor([0.0668, 0.0469, 0.1040, 0.5015, 0.0711, 0.0622, 0.1432, 0.0598], + device='cuda:3'), in_proj_covar=tensor([0.0416, 0.0321, 0.0344, 0.0428, 0.0334, 0.0316, 0.0320, 0.0272], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:27:49,656 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=48970.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:28:10,030 INFO [train.py:901] (3/4) Epoch 7, batch 500, loss[loss=0.2997, simple_loss=0.3616, pruned_loss=0.1189, over 8452.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3341, pruned_loss=0.09991, over 1487477.70 frames. ], batch size: 27, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:28:32,337 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.572e+02 3.184e+02 4.227e+02 8.649e+02, threshold=6.369e+02, percent-clipped=1.0 +2023-02-06 03:28:43,905 INFO [train.py:901] (3/4) Epoch 7, batch 550, loss[loss=0.309, simple_loss=0.3723, pruned_loss=0.1229, over 8492.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3347, pruned_loss=0.1004, over 1515404.97 frames. ], batch size: 26, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:28:50,297 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:05,360 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-06 03:29:06,570 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1256, 2.6689, 3.2338, 1.1123, 3.1161, 1.9946, 1.5581, 1.8359], + device='cuda:3'), covar=tensor([0.0376, 0.0152, 0.0111, 0.0373, 0.0258, 0.0438, 0.0403, 0.0236], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0256, 0.0212, 0.0315, 0.0245, 0.0403, 0.0322, 0.0298], + device='cuda:3'), out_proj_covar=tensor([1.1610e-04, 8.1755e-05, 6.6466e-05, 1.0013e-04, 7.8668e-05, 1.3977e-04, + 1.0509e-04, 9.5720e-05], device='cuda:3') +2023-02-06 03:29:07,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:19,500 INFO [train.py:901] (3/4) Epoch 7, batch 600, loss[loss=0.2754, simple_loss=0.3429, pruned_loss=0.104, over 8671.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3361, pruned_loss=0.1013, over 1543369.09 frames. ], batch size: 39, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:24,352 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3676, 1.3903, 4.4064, 2.0435, 2.3361, 5.0660, 4.9207, 4.4468], + device='cuda:3'), covar=tensor([0.0934, 0.1500, 0.0231, 0.1725, 0.0975, 0.0190, 0.0312, 0.0556], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0275, 0.0228, 0.0272, 0.0237, 0.0214, 0.0267, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 03:29:31,447 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 03:29:41,663 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49130.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:42,821 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.845e+02 3.510e+02 4.694e+02 1.227e+03, threshold=7.020e+02, percent-clipped=5.0 +2023-02-06 03:29:54,465 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.57 vs. limit=2.0 +2023-02-06 03:29:54,594 INFO [train.py:901] (3/4) Epoch 7, batch 650, loss[loss=0.2442, simple_loss=0.3225, pruned_loss=0.08296, over 7919.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3346, pruned_loss=0.1006, over 1554542.59 frames. ], batch size: 20, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:29:58,972 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:29:59,709 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:13,069 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-06 03:30:17,667 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:18,988 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:29,822 INFO [train.py:901] (3/4) Epoch 7, batch 700, loss[loss=0.294, simple_loss=0.355, pruned_loss=0.1165, over 8657.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.335, pruned_loss=0.1006, over 1569490.78 frames. ], batch size: 39, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:30:31,258 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:30:43,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3414, 1.6549, 3.6677, 1.1202, 2.5342, 1.9617, 1.4702, 2.4777], + device='cuda:3'), covar=tensor([0.2069, 0.2807, 0.0664, 0.4271, 0.1380, 0.2778, 0.2025, 0.2069], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0474, 0.0529, 0.0550, 0.0589, 0.0528, 0.0453, 0.0594], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 03:30:54,555 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.106e+02 2.876e+02 3.436e+02 4.276e+02 6.994e+02, threshold=6.873e+02, percent-clipped=0.0 +2023-02-06 03:31:06,146 INFO [train.py:901] (3/4) Epoch 7, batch 750, loss[loss=0.2294, simple_loss=0.289, pruned_loss=0.08486, over 7781.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3345, pruned_loss=0.1004, over 1583006.02 frames. ], batch size: 19, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:09,779 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:18,085 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 03:31:26,429 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 03:31:28,182 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 03:31:40,968 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:31:41,430 INFO [train.py:901] (3/4) Epoch 7, batch 800, loss[loss=0.2281, simple_loss=0.298, pruned_loss=0.07912, over 8078.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3347, pruned_loss=0.1006, over 1591806.50 frames. ], batch size: 21, lr: 1.13e-02, grad_scale: 16.0 +2023-02-06 03:31:53,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:32:05,543 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.832e+02 3.318e+02 4.162e+02 1.224e+03, threshold=6.636e+02, percent-clipped=6.0 +2023-02-06 03:32:17,934 INFO [train.py:901] (3/4) Epoch 7, batch 850, loss[loss=0.253, simple_loss=0.3099, pruned_loss=0.09806, over 7712.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3346, pruned_loss=0.1008, over 1597249.64 frames. ], batch size: 18, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:32:22,243 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5338, 2.7230, 1.7834, 2.2654, 2.2788, 1.3102, 1.9205, 2.1005], + device='cuda:3'), covar=tensor([0.1120, 0.0266, 0.0831, 0.0452, 0.0579, 0.1268, 0.0881, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0343, 0.0229, 0.0311, 0.0296, 0.0308, 0.0313, 0.0338, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:32:31,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7675, 2.0829, 2.3722, 1.7748, 1.0822, 2.4012, 0.4249, 1.3541], + device='cuda:3'), covar=tensor([0.3193, 0.2400, 0.0657, 0.2592, 0.6466, 0.0442, 0.5227, 0.2640], + device='cuda:3'), in_proj_covar=tensor([0.0141, 0.0131, 0.0082, 0.0183, 0.0227, 0.0082, 0.0144, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:32:52,545 INFO [train.py:901] (3/4) Epoch 7, batch 900, loss[loss=0.2661, simple_loss=0.3447, pruned_loss=0.09369, over 8483.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3354, pruned_loss=0.1011, over 1605561.51 frames. ], batch size: 25, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:17,133 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.812e+02 3.278e+02 4.578e+02 1.649e+03, threshold=6.556e+02, percent-clipped=8.0 +2023-02-06 03:33:22,001 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:33:28,079 INFO [train.py:901] (3/4) Epoch 7, batch 950, loss[loss=0.2787, simple_loss=0.3482, pruned_loss=0.1046, over 8462.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3348, pruned_loss=0.1004, over 1608240.38 frames. ], batch size: 29, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:33:28,880 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49450.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:33:35,919 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 03:33:50,518 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 03:34:03,658 INFO [train.py:901] (3/4) Epoch 7, batch 1000, loss[loss=0.2921, simple_loss=0.3574, pruned_loss=0.1134, over 8605.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3341, pruned_loss=0.09986, over 1608798.76 frames. ], batch size: 34, lr: 1.13e-02, grad_scale: 8.0 +2023-02-06 03:34:24,199 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 03:34:27,690 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.937e+02 3.091e+02 3.599e+02 4.515e+02 1.445e+03, threshold=7.198e+02, percent-clipped=7.0 +2023-02-06 03:34:35,923 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 03:34:38,730 INFO [train.py:901] (3/4) Epoch 7, batch 1050, loss[loss=0.2238, simple_loss=0.3, pruned_loss=0.07377, over 7931.00 frames. ], tot_loss[loss=0.2676, simple_loss=0.3345, pruned_loss=0.1004, over 1609268.77 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:34:43,053 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:34:54,544 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49571.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:00,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49579.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:13,244 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:14,558 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:35:14,686 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6081, 1.9065, 2.0998, 1.5999, 0.9636, 2.1710, 0.3778, 1.0729], + device='cuda:3'), covar=tensor([0.4010, 0.2179, 0.1200, 0.3153, 0.7070, 0.0704, 0.5242, 0.3463], + device='cuda:3'), in_proj_covar=tensor([0.0141, 0.0132, 0.0083, 0.0183, 0.0227, 0.0084, 0.0143, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:35:15,138 INFO [train.py:901] (3/4) Epoch 7, batch 1100, loss[loss=0.286, simple_loss=0.35, pruned_loss=0.111, over 8460.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3338, pruned_loss=0.09942, over 1614287.47 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:35:38,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.766e+02 3.386e+02 4.310e+02 6.415e+02, threshold=6.771e+02, percent-clipped=0.0 +2023-02-06 03:35:45,264 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 03:35:46,076 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 03:35:49,188 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.99 vs. limit=2.0 +2023-02-06 03:35:49,445 INFO [train.py:901] (3/4) Epoch 7, batch 1150, loss[loss=0.2264, simple_loss=0.2941, pruned_loss=0.07932, over 8234.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3347, pruned_loss=0.09971, over 1616386.90 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:23,365 INFO [train.py:901] (3/4) Epoch 7, batch 1200, loss[loss=0.2475, simple_loss=0.3256, pruned_loss=0.08473, over 8137.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3351, pruned_loss=0.09995, over 1612538.35 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:36:33,695 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:36:47,167 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.006e+02 2.915e+02 3.820e+02 5.048e+02 1.193e+03, threshold=7.640e+02, percent-clipped=11.0 +2023-02-06 03:36:57,980 INFO [train.py:901] (3/4) Epoch 7, batch 1250, loss[loss=0.2535, simple_loss=0.331, pruned_loss=0.08803, over 7976.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3337, pruned_loss=0.09862, over 1616947.00 frames. ], batch size: 21, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:22,190 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:22,260 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=49784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:37:29,679 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=49794.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:37:32,811 INFO [train.py:901] (3/4) Epoch 7, batch 1300, loss[loss=0.4713, simple_loss=0.4731, pruned_loss=0.2348, over 7041.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3349, pruned_loss=0.09934, over 1615180.59 frames. ], batch size: 72, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:37:57,663 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.650e+02 3.390e+02 4.402e+02 9.600e+02, threshold=6.781e+02, percent-clipped=3.0 +2023-02-06 03:38:08,116 INFO [train.py:901] (3/4) Epoch 7, batch 1350, loss[loss=0.2521, simple_loss=0.3271, pruned_loss=0.08851, over 7965.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3359, pruned_loss=0.1006, over 1612444.35 frames. ], batch size: 21, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:38,485 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0881, 1.2966, 1.1400, 0.4128, 1.2205, 0.9146, 0.0819, 1.1563], + device='cuda:3'), covar=tensor([0.0181, 0.0141, 0.0130, 0.0231, 0.0153, 0.0443, 0.0340, 0.0135], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0263, 0.0216, 0.0309, 0.0251, 0.0404, 0.0319, 0.0297], + device='cuda:3'), out_proj_covar=tensor([1.1481e-04, 8.3460e-05, 6.7401e-05, 9.7767e-05, 8.0563e-05, 1.3962e-04, + 1.0383e-04, 9.4980e-05], device='cuda:3') +2023-02-06 03:38:42,146 INFO [train.py:901] (3/4) Epoch 7, batch 1400, loss[loss=0.2311, simple_loss=0.2916, pruned_loss=0.08533, over 7802.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3357, pruned_loss=0.1005, over 1616769.77 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:38:43,022 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:38:49,798 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=49909.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:39:07,158 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 2.921e+02 3.790e+02 4.996e+02 8.997e+02, threshold=7.579e+02, percent-clipped=6.0 +2023-02-06 03:39:11,351 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 03:39:18,097 INFO [train.py:901] (3/4) Epoch 7, batch 1450, loss[loss=0.2857, simple_loss=0.3575, pruned_loss=0.107, over 8460.00 frames. ], tot_loss[loss=0.2691, simple_loss=0.3364, pruned_loss=0.1009, over 1618318.35 frames. ], batch size: 25, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:39:31,697 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=49969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:49,094 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=49994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:39:52,397 INFO [train.py:901] (3/4) Epoch 7, batch 1500, loss[loss=0.2611, simple_loss=0.3285, pruned_loss=0.09679, over 7979.00 frames. ], tot_loss[loss=0.2674, simple_loss=0.3348, pruned_loss=0.1, over 1613851.70 frames. ], batch size: 21, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:40:16,582 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.823e+02 3.555e+02 4.038e+02 9.229e+02, threshold=7.110e+02, percent-clipped=3.0 +2023-02-06 03:40:27,896 INFO [train.py:901] (3/4) Epoch 7, batch 1550, loss[loss=0.3849, simple_loss=0.4369, pruned_loss=0.1664, over 8507.00 frames. ], tot_loss[loss=0.27, simple_loss=0.3374, pruned_loss=0.1013, over 1620684.62 frames. ], batch size: 26, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:02,320 INFO [train.py:901] (3/4) Epoch 7, batch 1600, loss[loss=0.2978, simple_loss=0.3612, pruned_loss=0.1172, over 8028.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3379, pruned_loss=0.1014, over 1623134.60 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:12,237 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-06 03:41:22,667 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:25,920 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.849e+02 3.464e+02 4.418e+02 7.019e+02, threshold=6.928e+02, percent-clipped=0.0 +2023-02-06 03:41:35,383 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:37,167 INFO [train.py:901] (3/4) Epoch 7, batch 1650, loss[loss=0.2542, simple_loss=0.3203, pruned_loss=0.09399, over 7916.00 frames. ], tot_loss[loss=0.269, simple_loss=0.3364, pruned_loss=0.1008, over 1619565.44 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:41:41,456 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:41:48,659 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50165.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:41:59,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:05,843 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50190.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:42:11,959 INFO [train.py:901] (3/4) Epoch 7, batch 1700, loss[loss=0.3836, simple_loss=0.4104, pruned_loss=0.1784, over 6483.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3365, pruned_loss=0.1004, over 1620548.37 frames. ], batch size: 71, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:42:22,989 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50215.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:42:35,242 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.886e+02 3.481e+02 4.608e+02 1.233e+03, threshold=6.962e+02, percent-clipped=3.0 +2023-02-06 03:42:42,122 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:42:46,680 INFO [train.py:901] (3/4) Epoch 7, batch 1750, loss[loss=0.3067, simple_loss=0.3725, pruned_loss=0.1204, over 8440.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3359, pruned_loss=0.1, over 1625935.84 frames. ], batch size: 49, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:21,543 INFO [train.py:901] (3/4) Epoch 7, batch 1800, loss[loss=0.2887, simple_loss=0.3457, pruned_loss=0.1158, over 7816.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3359, pruned_loss=0.1002, over 1624765.64 frames. ], batch size: 20, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:43:44,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.922e+02 3.562e+02 4.379e+02 1.030e+03, threshold=7.125e+02, percent-clipped=4.0 +2023-02-06 03:43:56,170 INFO [train.py:901] (3/4) Epoch 7, batch 1850, loss[loss=0.239, simple_loss=0.2976, pruned_loss=0.09015, over 6285.00 frames. ], tot_loss[loss=0.2668, simple_loss=0.3353, pruned_loss=0.09917, over 1629049.37 frames. ], batch size: 14, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:30,587 INFO [train.py:901] (3/4) Epoch 7, batch 1900, loss[loss=0.2807, simple_loss=0.3432, pruned_loss=0.1091, over 8240.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3357, pruned_loss=0.09993, over 1624823.03 frames. ], batch size: 22, lr: 1.12e-02, grad_scale: 8.0 +2023-02-06 03:44:42,162 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0714, 1.1565, 4.2180, 1.5536, 3.6019, 3.3865, 3.7043, 3.5472], + device='cuda:3'), covar=tensor([0.0465, 0.4225, 0.0483, 0.2895, 0.1210, 0.0807, 0.0526, 0.0684], + device='cuda:3'), in_proj_covar=tensor([0.0371, 0.0512, 0.0464, 0.0455, 0.0516, 0.0428, 0.0432, 0.0490], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 03:44:43,978 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 03:44:53,922 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.786e+02 3.637e+02 4.614e+02 8.948e+02, threshold=7.273e+02, percent-clipped=3.0 +2023-02-06 03:44:56,014 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 03:45:04,598 INFO [train.py:901] (3/4) Epoch 7, batch 1950, loss[loss=0.2402, simple_loss=0.3258, pruned_loss=0.07729, over 8470.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3352, pruned_loss=0.1001, over 1619348.68 frames. ], batch size: 29, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:05,201 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-06 03:45:15,300 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 03:45:19,139 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-06 03:45:33,289 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:39,205 INFO [train.py:901] (3/4) Epoch 7, batch 2000, loss[loss=0.2472, simple_loss=0.3096, pruned_loss=0.09243, over 7691.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3353, pruned_loss=0.1003, over 1618870.49 frames. ], batch size: 18, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:45:39,435 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50499.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:40,222 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 03:45:57,458 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:45:57,476 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1114, 2.6066, 3.1934, 1.2052, 3.3005, 2.0956, 1.4738, 1.8942], + device='cuda:3'), covar=tensor([0.0359, 0.0150, 0.0093, 0.0327, 0.0131, 0.0361, 0.0446, 0.0228], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0254, 0.0212, 0.0307, 0.0248, 0.0395, 0.0314, 0.0286], + device='cuda:3'), out_proj_covar=tensor([1.1192e-04, 7.9927e-05, 6.5928e-05, 9.6853e-05, 7.8975e-05, 1.3617e-04, + 1.0205e-04, 9.0992e-05], device='cuda:3') +2023-02-06 03:46:03,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.800e+02 3.583e+02 4.591e+02 1.075e+03, threshold=7.166e+02, percent-clipped=7.0 +2023-02-06 03:46:13,949 INFO [train.py:901] (3/4) Epoch 7, batch 2050, loss[loss=0.2574, simple_loss=0.3264, pruned_loss=0.0942, over 8291.00 frames. ], tot_loss[loss=0.2687, simple_loss=0.3358, pruned_loss=0.1008, over 1618417.60 frames. ], batch size: 23, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:20,540 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50559.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:46:23,784 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:46:47,297 INFO [train.py:901] (3/4) Epoch 7, batch 2100, loss[loss=0.276, simple_loss=0.3482, pruned_loss=0.102, over 8568.00 frames. ], tot_loss[loss=0.2677, simple_loss=0.3352, pruned_loss=0.1001, over 1617216.75 frames. ], batch size: 31, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:46:52,372 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:47:11,026 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 3.066e+02 3.697e+02 4.610e+02 1.063e+03, threshold=7.394e+02, percent-clipped=3.0 +2023-02-06 03:47:22,370 INFO [train.py:901] (3/4) Epoch 7, batch 2150, loss[loss=0.2313, simple_loss=0.2874, pruned_loss=0.0876, over 7802.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3351, pruned_loss=0.1006, over 1616359.28 frames. ], batch size: 19, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:47:40,258 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=50674.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:47:52,177 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2618, 1.2564, 1.3705, 1.1128, 1.2359, 1.1632, 1.7553, 1.7095], + device='cuda:3'), covar=tensor([0.0607, 0.1819, 0.2731, 0.1881, 0.0740, 0.2290, 0.0884, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0117, 0.0167, 0.0208, 0.0170, 0.0117, 0.0176, 0.0129, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 03:47:56,609 INFO [train.py:901] (3/4) Epoch 7, batch 2200, loss[loss=0.2254, simple_loss=0.3013, pruned_loss=0.07475, over 7655.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3353, pruned_loss=0.1006, over 1616578.18 frames. ], batch size: 19, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:48:20,832 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.939e+02 3.492e+02 4.230e+02 8.261e+02, threshold=6.983e+02, percent-clipped=2.0 +2023-02-06 03:48:31,240 INFO [train.py:901] (3/4) Epoch 7, batch 2250, loss[loss=0.2691, simple_loss=0.3454, pruned_loss=0.0964, over 8658.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3349, pruned_loss=0.09982, over 1622420.23 frames. ], batch size: 34, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:05,387 INFO [train.py:901] (3/4) Epoch 7, batch 2300, loss[loss=0.2377, simple_loss=0.3057, pruned_loss=0.08488, over 7809.00 frames. ], tot_loss[loss=0.2697, simple_loss=0.3363, pruned_loss=0.1015, over 1621418.98 frames. ], batch size: 20, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:06,290 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:49:23,415 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50826.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:49:28,610 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 3.140e+02 4.073e+02 5.620e+02 1.608e+03, threshold=8.146e+02, percent-clipped=16.0 +2023-02-06 03:49:39,820 INFO [train.py:901] (3/4) Epoch 7, batch 2350, loss[loss=0.2824, simple_loss=0.3582, pruned_loss=0.1033, over 8581.00 frames. ], tot_loss[loss=0.2693, simple_loss=0.3361, pruned_loss=0.1013, over 1617515.52 frames. ], batch size: 49, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:49:47,971 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:05,013 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:14,212 INFO [train.py:901] (3/4) Epoch 7, batch 2400, loss[loss=0.2658, simple_loss=0.3329, pruned_loss=0.0994, over 7807.00 frames. ], tot_loss[loss=0.2689, simple_loss=0.3358, pruned_loss=0.101, over 1616746.71 frames. ], batch size: 20, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:20,197 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=50908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:50:29,050 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6709, 1.7533, 1.9537, 1.7434, 1.0833, 1.9627, 0.3400, 1.3956], + device='cuda:3'), covar=tensor([0.3757, 0.2063, 0.0806, 0.1812, 0.5525, 0.0692, 0.4257, 0.2090], + device='cuda:3'), in_proj_covar=tensor([0.0141, 0.0136, 0.0083, 0.0186, 0.0230, 0.0085, 0.0145, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:50:35,194 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=50930.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:50:36,985 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.900e+02 3.414e+02 4.196e+02 7.276e+02, threshold=6.828e+02, percent-clipped=0.0 +2023-02-06 03:50:47,544 INFO [train.py:901] (3/4) Epoch 7, batch 2450, loss[loss=0.2452, simple_loss=0.3128, pruned_loss=0.08879, over 8079.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3342, pruned_loss=0.1002, over 1617265.49 frames. ], batch size: 21, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:50:48,394 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7328, 1.3783, 3.9092, 1.2849, 3.3845, 3.2792, 3.5562, 3.3886], + device='cuda:3'), covar=tensor([0.0524, 0.3706, 0.0496, 0.3005, 0.1196, 0.0779, 0.0554, 0.0711], + device='cuda:3'), in_proj_covar=tensor([0.0375, 0.0523, 0.0475, 0.0455, 0.0523, 0.0434, 0.0437, 0.0497], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 03:50:52,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=50955.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:50:54,522 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 03:50:59,079 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=50964.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:22,801 INFO [train.py:901] (3/4) Epoch 7, batch 2500, loss[loss=0.2694, simple_loss=0.3463, pruned_loss=0.09622, over 8465.00 frames. ], tot_loss[loss=0.2681, simple_loss=0.3351, pruned_loss=0.1005, over 1622062.83 frames. ], batch size: 27, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:51:39,950 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51023.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:51:46,386 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.634e+02 3.421e+02 4.023e+02 8.503e+02, threshold=6.842e+02, percent-clipped=1.0 +2023-02-06 03:51:56,890 INFO [train.py:901] (3/4) Epoch 7, batch 2550, loss[loss=0.2615, simple_loss=0.3316, pruned_loss=0.09568, over 8468.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3337, pruned_loss=0.09984, over 1619882.37 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:18,142 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:52:31,081 INFO [train.py:901] (3/4) Epoch 7, batch 2600, loss[loss=0.295, simple_loss=0.3701, pruned_loss=0.11, over 8445.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.3337, pruned_loss=0.1003, over 1616335.06 frames. ], batch size: 27, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:52:54,869 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.187e+02 3.140e+02 3.874e+02 4.757e+02 8.436e+02, threshold=7.747e+02, percent-clipped=5.0 +2023-02-06 03:53:02,028 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:53:05,173 INFO [train.py:901] (3/4) Epoch 7, batch 2650, loss[loss=0.3399, simple_loss=0.3837, pruned_loss=0.1481, over 8576.00 frames. ], tot_loss[loss=0.2672, simple_loss=0.3334, pruned_loss=0.1005, over 1614220.22 frames. ], batch size: 39, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:19,253 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51170.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:32,643 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51190.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 03:53:39,221 INFO [train.py:901] (3/4) Epoch 7, batch 2700, loss[loss=0.2476, simple_loss=0.3223, pruned_loss=0.08647, over 8500.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3322, pruned_loss=0.09952, over 1616749.14 frames. ], batch size: 26, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:53:46,473 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 03:54:02,460 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 3.046e+02 3.584e+02 4.560e+02 9.753e+02, threshold=7.169e+02, percent-clipped=4.0 +2023-02-06 03:54:14,236 INFO [train.py:901] (3/4) Epoch 7, batch 2750, loss[loss=0.2887, simple_loss=0.368, pruned_loss=0.1047, over 8321.00 frames. ], tot_loss[loss=0.266, simple_loss=0.3331, pruned_loss=0.09946, over 1619229.08 frames. ], batch size: 25, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:14,458 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7294, 2.9969, 1.9526, 2.4843, 2.4678, 1.6230, 2.1668, 2.4132], + device='cuda:3'), covar=tensor([0.1061, 0.0229, 0.0822, 0.0458, 0.0527, 0.1002, 0.0700, 0.0714], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0229, 0.0310, 0.0294, 0.0305, 0.0310, 0.0335, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 03:54:21,079 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:34,381 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:38,286 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51285.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:54:40,393 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7964, 2.2104, 3.9188, 2.8540, 3.1897, 2.2539, 1.7048, 1.8630], + device='cuda:3'), covar=tensor([0.2250, 0.2881, 0.0576, 0.1645, 0.1313, 0.1365, 0.1281, 0.2962], + device='cuda:3'), in_proj_covar=tensor([0.0826, 0.0769, 0.0667, 0.0765, 0.0857, 0.0708, 0.0666, 0.0703], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:54:47,540 INFO [train.py:901] (3/4) Epoch 7, batch 2800, loss[loss=0.2118, simple_loss=0.2891, pruned_loss=0.06724, over 7220.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.333, pruned_loss=0.09909, over 1623062.70 frames. ], batch size: 16, lr: 1.11e-02, grad_scale: 8.0 +2023-02-06 03:54:51,153 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:54:53,741 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:55:01,962 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9754, 1.5961, 1.5521, 1.4750, 1.1488, 1.4098, 1.6768, 1.4743], + device='cuda:3'), covar=tensor([0.0611, 0.1206, 0.1757, 0.1312, 0.0575, 0.1452, 0.0668, 0.0586], + device='cuda:3'), in_proj_covar=tensor([0.0119, 0.0167, 0.0208, 0.0171, 0.0117, 0.0176, 0.0129, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 03:55:11,783 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.811e+02 3.563e+02 4.674e+02 6.809e+02, threshold=7.126e+02, percent-clipped=0.0 +2023-02-06 03:55:22,692 INFO [train.py:901] (3/4) Epoch 7, batch 2850, loss[loss=0.2689, simple_loss=0.3399, pruned_loss=0.09895, over 8132.00 frames. ], tot_loss[loss=0.2675, simple_loss=0.3351, pruned_loss=0.09998, over 1625330.45 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:57,259 INFO [train.py:901] (3/4) Epoch 7, batch 2900, loss[loss=0.25, simple_loss=0.3223, pruned_loss=0.08887, over 8335.00 frames. ], tot_loss[loss=0.2673, simple_loss=0.3348, pruned_loss=0.09995, over 1620775.11 frames. ], batch size: 26, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:55:57,412 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8666, 1.4199, 5.8675, 1.9331, 5.2576, 4.9742, 5.5580, 5.4512], + device='cuda:3'), covar=tensor([0.0341, 0.3773, 0.0233, 0.2713, 0.0892, 0.0597, 0.0314, 0.0358], + device='cuda:3'), in_proj_covar=tensor([0.0377, 0.0521, 0.0468, 0.0462, 0.0524, 0.0436, 0.0430, 0.0489], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 03:55:58,200 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51400.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:55:58,867 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5625, 5.5910, 4.8678, 2.2238, 4.8699, 5.0992, 5.1965, 4.7966], + device='cuda:3'), covar=tensor([0.0672, 0.0371, 0.0765, 0.4383, 0.0646, 0.0588, 0.0933, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0412, 0.0310, 0.0336, 0.0419, 0.0328, 0.0306, 0.0315, 0.0272], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:56:04,944 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:13,557 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:14,147 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:56:19,630 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 03:56:20,272 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.833e+02 3.577e+02 4.732e+02 1.075e+03, threshold=7.153e+02, percent-clipped=9.0 +2023-02-06 03:56:28,586 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 03:56:32,325 INFO [train.py:901] (3/4) Epoch 7, batch 2950, loss[loss=0.2448, simple_loss=0.3107, pruned_loss=0.08946, over 6449.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.334, pruned_loss=0.09873, over 1622551.41 frames. ], batch size: 14, lr: 1.10e-02, grad_scale: 16.0 +2023-02-06 03:57:06,412 INFO [train.py:901] (3/4) Epoch 7, batch 3000, loss[loss=0.2269, simple_loss=0.2863, pruned_loss=0.0837, over 7199.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3343, pruned_loss=0.09928, over 1620999.45 frames. ], batch size: 16, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:57:06,412 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 03:57:21,703 INFO [train.py:935] (3/4) Epoch 7, validation: loss=0.2071, simple_loss=0.305, pruned_loss=0.05459, over 944034.00 frames. +2023-02-06 03:57:21,703 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 03:57:31,201 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:32,578 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51515.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:45,150 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.842e+02 3.422e+02 4.197e+02 1.269e+03, threshold=6.844e+02, percent-clipped=2.0 +2023-02-06 03:57:45,245 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51534.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:57:48,659 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:49,365 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51540.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:57:50,090 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51541.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:57:55,339 INFO [train.py:901] (3/4) Epoch 7, batch 3050, loss[loss=0.3549, simple_loss=0.3944, pruned_loss=0.1578, over 8455.00 frames. ], tot_loss[loss=0.2684, simple_loss=0.3358, pruned_loss=0.1005, over 1621650.50 frames. ], batch size: 27, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:57:59,273 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 03:58:06,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51566.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:58:25,337 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0105, 3.8732, 3.5901, 1.9517, 3.4255, 3.4461, 3.6935, 3.1891], + device='cuda:3'), covar=tensor([0.0873, 0.0742, 0.0919, 0.4266, 0.0900, 0.1081, 0.1276, 0.0993], + device='cuda:3'), in_proj_covar=tensor([0.0411, 0.0315, 0.0342, 0.0424, 0.0332, 0.0307, 0.0322, 0.0275], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:58:29,888 INFO [train.py:901] (3/4) Epoch 7, batch 3100, loss[loss=0.2728, simple_loss=0.3591, pruned_loss=0.09326, over 8203.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3369, pruned_loss=0.1014, over 1619188.01 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:58:38,418 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 03:58:54,843 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 3.035e+02 3.902e+02 5.145e+02 1.067e+03, threshold=7.804e+02, percent-clipped=7.0 +2023-02-06 03:59:05,326 INFO [train.py:901] (3/4) Epoch 7, batch 3150, loss[loss=0.254, simple_loss=0.3167, pruned_loss=0.09565, over 7434.00 frames. ], tot_loss[loss=0.2696, simple_loss=0.3363, pruned_loss=0.1014, over 1616057.09 frames. ], batch size: 17, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:05,504 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51649.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 03:59:10,954 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5940, 1.7633, 1.9355, 1.5648, 1.1860, 2.0222, 0.2530, 1.2232], + device='cuda:3'), covar=tensor([0.3444, 0.2018, 0.0836, 0.2322, 0.5255, 0.0765, 0.4309, 0.2389], + device='cuda:3'), in_proj_covar=tensor([0.0139, 0.0140, 0.0082, 0.0186, 0.0227, 0.0088, 0.0142, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:59:17,824 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5761, 4.5651, 4.0095, 1.7447, 3.9765, 4.1667, 4.2018, 3.5526], + device='cuda:3'), covar=tensor([0.0767, 0.0529, 0.0889, 0.5055, 0.0782, 0.0789, 0.1146, 0.0987], + device='cuda:3'), in_proj_covar=tensor([0.0419, 0.0320, 0.0346, 0.0428, 0.0333, 0.0312, 0.0324, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 03:59:26,403 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 03:59:40,111 INFO [train.py:901] (3/4) Epoch 7, batch 3200, loss[loss=0.2172, simple_loss=0.2766, pruned_loss=0.07888, over 7710.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3366, pruned_loss=0.1015, over 1618105.70 frames. ], batch size: 18, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 03:59:43,623 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:05,272 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.946e+02 3.588e+02 4.680e+02 7.788e+02, threshold=7.176e+02, percent-clipped=0.0 +2023-02-06 04:00:12,049 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:16,054 INFO [train.py:901] (3/4) Epoch 7, batch 3250, loss[loss=0.2291, simple_loss=0.306, pruned_loss=0.07606, over 7812.00 frames. ], tot_loss[loss=0.2688, simple_loss=0.3357, pruned_loss=0.101, over 1614336.68 frames. ], batch size: 20, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:00:19,466 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:23,712 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 04:00:26,786 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=51765.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:46,821 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:00:49,340 INFO [train.py:901] (3/4) Epoch 7, batch 3300, loss[loss=0.2206, simple_loss=0.2971, pruned_loss=0.07207, over 8099.00 frames. ], tot_loss[loss=0.2682, simple_loss=0.3351, pruned_loss=0.1007, over 1613665.39 frames. ], batch size: 21, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:03,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:14,384 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.988e+02 3.662e+02 4.246e+02 9.313e+02, threshold=7.324e+02, percent-clipped=2.0 +2023-02-06 04:01:24,626 INFO [train.py:901] (3/4) Epoch 7, batch 3350, loss[loss=0.2297, simple_loss=0.3119, pruned_loss=0.07379, over 8029.00 frames. ], tot_loss[loss=0.268, simple_loss=0.3354, pruned_loss=0.1003, over 1616770.05 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:01:30,919 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=51857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:32,354 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51859.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:39,546 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:01:59,346 INFO [train.py:901] (3/4) Epoch 7, batch 3400, loss[loss=0.2923, simple_loss=0.3602, pruned_loss=0.1121, over 8479.00 frames. ], tot_loss[loss=0.2665, simple_loss=0.3342, pruned_loss=0.09946, over 1618834.26 frames. ], batch size: 29, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:03,619 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=51905.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:02:20,935 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=51930.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:02:23,261 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.723e+02 3.470e+02 4.144e+02 7.359e+02, threshold=6.940e+02, percent-clipped=1.0 +2023-02-06 04:02:24,191 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4857, 2.2351, 4.2690, 1.1705, 2.7690, 1.9734, 1.6491, 2.4628], + device='cuda:3'), covar=tensor([0.1876, 0.2208, 0.0686, 0.3931, 0.1686, 0.2781, 0.1810, 0.2421], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0470, 0.0534, 0.0547, 0.0588, 0.0524, 0.0452, 0.0590], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 04:02:34,623 INFO [train.py:901] (3/4) Epoch 7, batch 3450, loss[loss=0.2046, simple_loss=0.2913, pruned_loss=0.05895, over 8185.00 frames. ], tot_loss[loss=0.2657, simple_loss=0.3335, pruned_loss=0.09897, over 1620251.56 frames. ], batch size: 23, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:02:50,954 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=51972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:03:08,878 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0981, 1.5255, 1.4833, 1.4065, 1.0826, 1.3198, 1.5845, 1.5309], + device='cuda:3'), covar=tensor([0.0499, 0.1172, 0.1809, 0.1338, 0.0585, 0.1497, 0.0720, 0.0584], + device='cuda:3'), in_proj_covar=tensor([0.0117, 0.0165, 0.0208, 0.0171, 0.0116, 0.0174, 0.0127, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0007, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 04:03:09,379 INFO [train.py:901] (3/4) Epoch 7, batch 3500, loss[loss=0.2392, simple_loss=0.3218, pruned_loss=0.07824, over 8120.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3327, pruned_loss=0.09889, over 1616159.65 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:22,431 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 04:03:33,496 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.822e+02 3.302e+02 4.435e+02 1.594e+03, threshold=6.604e+02, percent-clipped=5.0 +2023-02-06 04:03:43,727 INFO [train.py:901] (3/4) Epoch 7, batch 3550, loss[loss=0.2891, simple_loss=0.3502, pruned_loss=0.114, over 8238.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3329, pruned_loss=0.09878, over 1615476.43 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:03:50,008 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:03:52,702 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1671, 1.8406, 2.5990, 2.1150, 2.1294, 1.9422, 1.4799, 0.9394], + device='cuda:3'), covar=tensor([0.2704, 0.2559, 0.0694, 0.1450, 0.1283, 0.1472, 0.1520, 0.2657], + device='cuda:3'), in_proj_covar=tensor([0.0827, 0.0763, 0.0660, 0.0758, 0.0857, 0.0710, 0.0656, 0.0694], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:04:10,082 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3973, 1.3135, 4.6567, 1.6473, 3.9147, 3.9032, 4.1844, 4.0254], + device='cuda:3'), covar=tensor([0.0540, 0.3891, 0.0456, 0.2912, 0.1323, 0.0708, 0.0454, 0.0567], + device='cuda:3'), in_proj_covar=tensor([0.0375, 0.0517, 0.0467, 0.0459, 0.0519, 0.0429, 0.0426, 0.0484], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0001], + device='cuda:3') +2023-02-06 04:04:19,844 INFO [train.py:901] (3/4) Epoch 7, batch 3600, loss[loss=0.25, simple_loss=0.3192, pruned_loss=0.09041, over 8130.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3329, pruned_loss=0.09872, over 1615728.97 frames. ], batch size: 22, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:26,703 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:30,881 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:37,710 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:43,463 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.818e+02 3.176e+02 4.094e+02 8.086e+02, threshold=6.353e+02, percent-clipped=5.0 +2023-02-06 04:04:47,728 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:04:53,394 INFO [train.py:901] (3/4) Epoch 7, batch 3650, loss[loss=0.2273, simple_loss=0.2964, pruned_loss=0.07907, over 7452.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3331, pruned_loss=0.09874, over 1615416.67 frames. ], batch size: 17, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:04:54,178 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:23,190 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:05:28,583 INFO [train.py:901] (3/4) Epoch 7, batch 3700, loss[loss=0.2405, simple_loss=0.3237, pruned_loss=0.07867, over 8362.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3319, pruned_loss=0.09839, over 1610948.81 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:05:36,877 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:47,074 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:49,855 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:05:53,719 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.602e+02 3.554e+02 4.404e+02 9.700e+02, threshold=7.108e+02, percent-clipped=5.0 +2023-02-06 04:06:04,132 INFO [train.py:901] (3/4) Epoch 7, batch 3750, loss[loss=0.2711, simple_loss=0.3516, pruned_loss=0.09534, over 8357.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.3304, pruned_loss=0.09713, over 1611304.74 frames. ], batch size: 24, lr: 1.10e-02, grad_scale: 8.0 +2023-02-06 04:06:07,177 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:06:38,744 INFO [train.py:901] (3/4) Epoch 7, batch 3800, loss[loss=0.2518, simple_loss=0.3276, pruned_loss=0.08802, over 8249.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.3316, pruned_loss=0.09765, over 1615276.40 frames. ], batch size: 22, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:01,891 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52330.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:07:04,421 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.780e+02 3.361e+02 4.228e+02 6.516e+02, threshold=6.722e+02, percent-clipped=0.0 +2023-02-06 04:07:15,844 INFO [train.py:901] (3/4) Epoch 7, batch 3850, loss[loss=0.2609, simple_loss=0.3458, pruned_loss=0.08796, over 8328.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3315, pruned_loss=0.09758, over 1610652.97 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:18,009 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.67 vs. limit=5.0 +2023-02-06 04:07:30,481 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 04:07:49,736 INFO [train.py:901] (3/4) Epoch 7, batch 3900, loss[loss=0.2498, simple_loss=0.3104, pruned_loss=0.09462, over 7266.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3324, pruned_loss=0.09786, over 1611338.37 frames. ], batch size: 16, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:07:51,995 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:08:15,069 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.697e+02 3.207e+02 4.225e+02 1.297e+03, threshold=6.414e+02, percent-clipped=5.0 +2023-02-06 04:08:25,215 INFO [train.py:901] (3/4) Epoch 7, batch 3950, loss[loss=0.2234, simple_loss=0.2972, pruned_loss=0.07479, over 7800.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3336, pruned_loss=0.09855, over 1617513.04 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:08:48,002 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:00,797 INFO [train.py:901] (3/4) Epoch 7, batch 4000, loss[loss=0.3037, simple_loss=0.3441, pruned_loss=0.1316, over 7692.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3328, pruned_loss=0.09815, over 1614858.26 frames. ], batch size: 18, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:05,166 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:13,183 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:24,186 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.935e+02 3.629e+02 4.693e+02 1.248e+03, threshold=7.258e+02, percent-clipped=9.0 +2023-02-06 04:09:24,393 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2807, 2.1754, 1.5036, 1.9823, 1.7704, 1.1895, 1.5678, 1.8437], + device='cuda:3'), covar=tensor([0.1125, 0.0343, 0.0992, 0.0460, 0.0612, 0.1251, 0.0781, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0239, 0.0313, 0.0304, 0.0315, 0.0321, 0.0340, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:09:35,730 INFO [train.py:901] (3/4) Epoch 7, batch 4050, loss[loss=0.2751, simple_loss=0.353, pruned_loss=0.09861, over 8197.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3341, pruned_loss=0.09904, over 1620422.07 frames. ], batch size: 23, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:09:39,788 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:09:53,155 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52573.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:10:10,473 INFO [train.py:901] (3/4) Epoch 7, batch 4100, loss[loss=0.2361, simple_loss=0.3044, pruned_loss=0.08385, over 7932.00 frames. ], tot_loss[loss=0.266, simple_loss=0.3337, pruned_loss=0.09915, over 1616538.80 frames. ], batch size: 20, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:33,850 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.709e+02 3.346e+02 4.687e+02 1.096e+03, threshold=6.691e+02, percent-clipped=5.0 +2023-02-06 04:10:44,015 INFO [train.py:901] (3/4) Epoch 7, batch 4150, loss[loss=0.2532, simple_loss=0.3314, pruned_loss=0.08752, over 8450.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3323, pruned_loss=0.09864, over 1615827.44 frames. ], batch size: 27, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:10:59,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:02,381 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:11:20,449 INFO [train.py:901] (3/4) Epoch 7, batch 4200, loss[loss=0.4247, simple_loss=0.4566, pruned_loss=0.1964, over 8561.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3321, pruned_loss=0.09845, over 1614584.70 frames. ], batch size: 39, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:11:30,521 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 04:11:43,884 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.675e+02 3.334e+02 4.108e+02 1.082e+03, threshold=6.669e+02, percent-clipped=4.0 +2023-02-06 04:11:53,178 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 04:11:53,853 INFO [train.py:901] (3/4) Epoch 7, batch 4250, loss[loss=0.2985, simple_loss=0.3575, pruned_loss=0.1197, over 8571.00 frames. ], tot_loss[loss=0.2648, simple_loss=0.3319, pruned_loss=0.09881, over 1611350.86 frames. ], batch size: 34, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:08,141 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=52770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:10,297 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:22,429 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=52789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,468 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:12:28,944 INFO [train.py:901] (3/4) Epoch 7, batch 4300, loss[loss=0.2696, simple_loss=0.3397, pruned_loss=0.09975, over 8332.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3303, pruned_loss=0.09783, over 1609911.40 frames. ], batch size: 25, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:12:53,669 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.879e+02 3.462e+02 4.347e+02 1.112e+03, threshold=6.924e+02, percent-clipped=5.0 +2023-02-06 04:13:03,882 INFO [train.py:901] (3/4) Epoch 7, batch 4350, loss[loss=0.2407, simple_loss=0.3287, pruned_loss=0.07636, over 8640.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3294, pruned_loss=0.09737, over 1612855.92 frames. ], batch size: 49, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:24,437 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 04:13:38,504 INFO [train.py:901] (3/4) Epoch 7, batch 4400, loss[loss=0.264, simple_loss=0.3258, pruned_loss=0.101, over 7967.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3311, pruned_loss=0.09898, over 1608066.06 frames. ], batch size: 21, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:13:50,776 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=52917.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:13:56,765 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=52926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:02,503 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.715e+02 3.689e+02 4.508e+02 8.331e+02, threshold=7.379e+02, percent-clipped=6.0 +2023-02-06 04:14:06,689 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 04:14:13,200 INFO [train.py:901] (3/4) Epoch 7, batch 4450, loss[loss=0.3552, simple_loss=0.4053, pruned_loss=0.1525, over 8502.00 frames. ], tot_loss[loss=0.2645, simple_loss=0.3314, pruned_loss=0.09878, over 1610538.15 frames. ], batch size: 26, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:14,731 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=52951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:14:46,593 INFO [train.py:901] (3/4) Epoch 7, batch 4500, loss[loss=0.2552, simple_loss=0.3072, pruned_loss=0.1016, over 7658.00 frames. ], tot_loss[loss=0.2659, simple_loss=0.3325, pruned_loss=0.09969, over 1612166.61 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:14:59,361 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 04:15:10,353 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53032.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:15:11,488 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.236e+02 2.890e+02 3.405e+02 4.030e+02 1.067e+03, threshold=6.809e+02, percent-clipped=4.0 +2023-02-06 04:15:18,996 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:22,052 INFO [train.py:901] (3/4) Epoch 7, batch 4550, loss[loss=0.2597, simple_loss=0.3286, pruned_loss=0.09537, over 7636.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.3312, pruned_loss=0.09904, over 1612434.23 frames. ], batch size: 19, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:15:27,688 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5520, 2.0254, 2.0398, 1.0920, 2.2947, 1.3753, 0.6862, 1.6940], + device='cuda:3'), covar=tensor([0.0301, 0.0152, 0.0130, 0.0273, 0.0162, 0.0445, 0.0409, 0.0151], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0263, 0.0221, 0.0322, 0.0261, 0.0408, 0.0318, 0.0298], + device='cuda:3'), out_proj_covar=tensor([1.1291e-04, 8.1943e-05, 6.8022e-05, 1.0050e-04, 8.2528e-05, 1.3840e-04, + 1.0194e-04, 9.3595e-05], device='cuda:3') +2023-02-06 04:15:29,785 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 04:15:36,992 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:39,630 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53074.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:15:56,227 INFO [train.py:901] (3/4) Epoch 7, batch 4600, loss[loss=0.2746, simple_loss=0.3462, pruned_loss=0.1015, over 8328.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3318, pruned_loss=0.09954, over 1612391.90 frames. ], batch size: 26, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:16:06,498 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:16:20,975 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.951e+02 3.579e+02 4.375e+02 1.013e+03, threshold=7.158e+02, percent-clipped=5.0 +2023-02-06 04:16:31,866 INFO [train.py:901] (3/4) Epoch 7, batch 4650, loss[loss=0.3678, simple_loss=0.4019, pruned_loss=0.1669, over 7087.00 frames. ], tot_loss[loss=0.265, simple_loss=0.332, pruned_loss=0.09899, over 1613794.95 frames. ], batch size: 71, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:05,162 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1349, 1.5239, 1.6615, 1.3606, 1.0444, 1.4582, 1.6462, 1.6406], + device='cuda:3'), covar=tensor([0.0546, 0.1300, 0.1771, 0.1421, 0.0608, 0.1565, 0.0719, 0.0593], + device='cuda:3'), in_proj_covar=tensor([0.0117, 0.0167, 0.0207, 0.0169, 0.0115, 0.0174, 0.0127, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 04:17:06,942 INFO [train.py:901] (3/4) Epoch 7, batch 4700, loss[loss=0.28, simple_loss=0.3421, pruned_loss=0.109, over 8038.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3315, pruned_loss=0.09848, over 1612435.16 frames. ], batch size: 22, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:27,464 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:30,531 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.793e+02 3.469e+02 4.275e+02 9.300e+02, threshold=6.939e+02, percent-clipped=3.0 +2023-02-06 04:17:41,203 INFO [train.py:901] (3/4) Epoch 7, batch 4750, loss[loss=0.3161, simple_loss=0.377, pruned_loss=0.1276, over 8361.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3327, pruned_loss=0.09871, over 1616816.27 frames. ], batch size: 24, lr: 1.09e-02, grad_scale: 8.0 +2023-02-06 04:17:42,009 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4744, 2.1280, 3.4675, 2.1462, 2.7317, 3.9642, 3.7183, 3.5579], + device='cuda:3'), covar=tensor([0.0738, 0.1204, 0.0560, 0.1434, 0.0912, 0.0193, 0.0482, 0.0449], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0276, 0.0230, 0.0270, 0.0242, 0.0217, 0.0274, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 04:17:46,729 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2583, 2.6006, 1.7978, 2.1396, 1.9797, 1.4524, 1.8349, 1.9514], + device='cuda:3'), covar=tensor([0.1345, 0.0347, 0.0943, 0.0547, 0.0652, 0.1226, 0.0942, 0.0824], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0236, 0.0310, 0.0298, 0.0311, 0.0314, 0.0337, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:17:48,716 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:17:56,082 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8817, 1.7601, 5.8329, 1.9718, 5.2669, 4.8464, 5.5108, 5.3786], + device='cuda:3'), covar=tensor([0.0308, 0.3646, 0.0286, 0.2974, 0.0804, 0.0567, 0.0345, 0.0378], + device='cuda:3'), in_proj_covar=tensor([0.0390, 0.0531, 0.0479, 0.0469, 0.0529, 0.0448, 0.0435, 0.0497], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 04:17:56,639 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 04:17:59,361 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 04:18:01,605 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5050, 1.7750, 4.6322, 2.1619, 4.1310, 3.9176, 4.2844, 4.1804], + device='cuda:3'), covar=tensor([0.0391, 0.3381, 0.0360, 0.2495, 0.0871, 0.0603, 0.0405, 0.0458], + device='cuda:3'), in_proj_covar=tensor([0.0390, 0.0531, 0.0479, 0.0469, 0.0530, 0.0448, 0.0435, 0.0497], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 04:18:04,735 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 04:18:09,725 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53288.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:18:16,601 INFO [train.py:901] (3/4) Epoch 7, batch 4800, loss[loss=0.2414, simple_loss=0.3177, pruned_loss=0.08255, over 7946.00 frames. ], tot_loss[loss=0.2638, simple_loss=0.3315, pruned_loss=0.09807, over 1617266.95 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:26,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53313.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:18:32,413 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:39,903 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:18:41,111 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.630e+02 3.191e+02 3.984e+02 9.617e+02, threshold=6.381e+02, percent-clipped=3.0 +2023-02-06 04:18:50,455 INFO [train.py:901] (3/4) Epoch 7, batch 4850, loss[loss=0.2536, simple_loss=0.3305, pruned_loss=0.0883, over 7649.00 frames. ], tot_loss[loss=0.2631, simple_loss=0.3307, pruned_loss=0.0978, over 1616357.20 frames. ], batch size: 19, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:18:51,164 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 04:19:16,485 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=53385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:20,663 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9133, 2.1982, 3.8259, 2.9821, 3.2090, 2.4728, 1.6558, 1.7854], + device='cuda:3'), covar=tensor([0.2312, 0.3261, 0.0652, 0.1332, 0.1230, 0.1192, 0.1231, 0.2942], + device='cuda:3'), in_proj_covar=tensor([0.0818, 0.0767, 0.0659, 0.0755, 0.0847, 0.0698, 0.0650, 0.0693], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:19:26,612 INFO [train.py:901] (3/4) Epoch 7, batch 4900, loss[loss=0.2489, simple_loss=0.3295, pruned_loss=0.08415, over 8340.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3306, pruned_loss=0.09723, over 1619208.19 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:19:40,217 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:19:51,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.021e+02 2.811e+02 3.269e+02 4.328e+02 9.769e+02, threshold=6.539e+02, percent-clipped=6.0 +2023-02-06 04:20:00,859 INFO [train.py:901] (3/4) Epoch 7, batch 4950, loss[loss=0.2976, simple_loss=0.3684, pruned_loss=0.1134, over 8349.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3315, pruned_loss=0.09786, over 1622024.15 frames. ], batch size: 24, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:27,191 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:20:36,822 INFO [train.py:901] (3/4) Epoch 7, batch 5000, loss[loss=0.2638, simple_loss=0.342, pruned_loss=0.09279, over 8506.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.331, pruned_loss=0.09734, over 1620636.19 frames. ], batch size: 28, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:20:37,041 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6517, 2.0553, 2.0524, 1.1644, 2.3603, 1.4524, 0.6475, 1.6820], + device='cuda:3'), covar=tensor([0.0325, 0.0154, 0.0132, 0.0258, 0.0137, 0.0451, 0.0405, 0.0153], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0259, 0.0217, 0.0316, 0.0256, 0.0402, 0.0311, 0.0288], + device='cuda:3'), out_proj_covar=tensor([1.1241e-04, 8.0720e-05, 6.6808e-05, 9.7905e-05, 8.0513e-05, 1.3622e-04, + 9.9228e-05, 9.0097e-05], device='cuda:3') +2023-02-06 04:20:45,261 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:01,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:21:03,199 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.612e+02 3.156e+02 4.000e+02 8.821e+02, threshold=6.312e+02, percent-clipped=7.0 +2023-02-06 04:21:12,888 INFO [train.py:901] (3/4) Epoch 7, batch 5050, loss[loss=0.2367, simple_loss=0.3127, pruned_loss=0.08033, over 7802.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3302, pruned_loss=0.09687, over 1615788.27 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:31,995 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 04:21:46,780 INFO [train.py:901] (3/4) Epoch 7, batch 5100, loss[loss=0.2525, simple_loss=0.3189, pruned_loss=0.09306, over 8295.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3305, pruned_loss=0.09715, over 1615681.99 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:21:51,219 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:13,232 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.889e+02 3.391e+02 4.238e+02 9.606e+02, threshold=6.783e+02, percent-clipped=10.0 +2023-02-06 04:22:23,450 INFO [train.py:901] (3/4) Epoch 7, batch 5150, loss[loss=0.2621, simple_loss=0.3289, pruned_loss=0.09769, over 8524.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.332, pruned_loss=0.09819, over 1617774.24 frames. ], batch size: 39, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:22:23,630 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3626, 1.6090, 1.7146, 1.2918, 1.1076, 1.5862, 1.7518, 1.7378], + device='cuda:3'), covar=tensor([0.0515, 0.1185, 0.1663, 0.1432, 0.0605, 0.1531, 0.0660, 0.0552], + device='cuda:3'), in_proj_covar=tensor([0.0116, 0.0166, 0.0205, 0.0171, 0.0115, 0.0175, 0.0127, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 04:22:34,970 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:42,267 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:22:56,582 INFO [train.py:901] (3/4) Epoch 7, batch 5200, loss[loss=0.2462, simple_loss=0.3147, pruned_loss=0.08885, over 8612.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.332, pruned_loss=0.09856, over 1617554.60 frames. ], batch size: 34, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:10,744 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:17,973 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=53729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:22,034 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.983e+02 3.078e+02 4.028e+02 5.378e+02 1.177e+03, threshold=8.056e+02, percent-clipped=8.0 +2023-02-06 04:23:28,937 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 04:23:32,187 INFO [train.py:901] (3/4) Epoch 7, batch 5250, loss[loss=0.3331, simple_loss=0.3794, pruned_loss=0.1434, over 8579.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3316, pruned_loss=0.09821, over 1616692.80 frames. ], batch size: 34, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:23:54,699 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:23:58,805 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6903, 5.7099, 5.0719, 1.7851, 5.1279, 5.4607, 5.5080, 4.9399], + device='cuda:3'), covar=tensor([0.0587, 0.0455, 0.0924, 0.5416, 0.0636, 0.0658, 0.1080, 0.0644], + device='cuda:3'), in_proj_covar=tensor([0.0425, 0.0324, 0.0357, 0.0443, 0.0343, 0.0322, 0.0334, 0.0282], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:24:00,280 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:02,339 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53792.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:06,925 INFO [train.py:901] (3/4) Epoch 7, batch 5300, loss[loss=0.2051, simple_loss=0.2807, pruned_loss=0.06478, over 7249.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3332, pruned_loss=0.09884, over 1613641.82 frames. ], batch size: 16, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:24:17,563 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53814.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:31,354 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.647e+02 3.169e+02 3.870e+02 1.211e+03, threshold=6.339e+02, percent-clipped=2.0 +2023-02-06 04:24:36,505 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 04:24:39,058 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=53844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:24:42,222 INFO [train.py:901] (3/4) Epoch 7, batch 5350, loss[loss=0.2459, simple_loss=0.33, pruned_loss=0.0809, over 8489.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.333, pruned_loss=0.09837, over 1614509.83 frames. ], batch size: 28, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:17,625 INFO [train.py:901] (3/4) Epoch 7, batch 5400, loss[loss=0.2377, simple_loss=0.3123, pruned_loss=0.08158, over 8304.00 frames. ], tot_loss[loss=0.263, simple_loss=0.3318, pruned_loss=0.09709, over 1612753.91 frames. ], batch size: 23, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:25:41,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.728e+02 3.458e+02 4.119e+02 1.009e+03, threshold=6.915e+02, percent-clipped=3.0 +2023-02-06 04:25:43,934 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1808, 1.3578, 3.3196, 0.9409, 2.8833, 2.8423, 3.0187, 2.9207], + device='cuda:3'), covar=tensor([0.0603, 0.3027, 0.0604, 0.2905, 0.1226, 0.0733, 0.0632, 0.0695], + device='cuda:3'), in_proj_covar=tensor([0.0391, 0.0526, 0.0478, 0.0461, 0.0523, 0.0441, 0.0440, 0.0494], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 04:25:51,250 INFO [train.py:901] (3/4) Epoch 7, batch 5450, loss[loss=0.286, simple_loss=0.3564, pruned_loss=0.1078, over 8741.00 frames. ], tot_loss[loss=0.2628, simple_loss=0.3317, pruned_loss=0.097, over 1619731.04 frames. ], batch size: 39, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:09,515 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=53974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:11,714 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 04:26:18,665 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 04:26:26,958 INFO [train.py:901] (3/4) Epoch 7, batch 5500, loss[loss=0.2387, simple_loss=0.307, pruned_loss=0.08521, over 7914.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3299, pruned_loss=0.09667, over 1610587.25 frames. ], batch size: 20, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:26:27,120 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=53999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:26:52,078 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.929e+02 2.794e+02 3.496e+02 4.646e+02 1.157e+03, threshold=6.993e+02, percent-clipped=7.0 +2023-02-06 04:26:53,573 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,157 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:01,616 INFO [train.py:901] (3/4) Epoch 7, batch 5550, loss[loss=0.2637, simple_loss=0.3397, pruned_loss=0.09385, over 8610.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.3303, pruned_loss=0.09673, over 1615613.14 frames. ], batch size: 39, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:10,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:18,062 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:37,055 INFO [train.py:901] (3/4) Epoch 7, batch 5600, loss[loss=0.3206, simple_loss=0.3757, pruned_loss=0.1328, over 8337.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3311, pruned_loss=0.09685, over 1616516.37 frames. ], batch size: 26, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:27:37,987 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=54100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:27:55,818 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=54125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:28:02,458 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.954e+02 2.795e+02 3.455e+02 4.516e+02 9.788e+02, threshold=6.911e+02, percent-clipped=3.0 +2023-02-06 04:28:04,939 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 04:28:12,203 INFO [train.py:901] (3/4) Epoch 7, batch 5650, loss[loss=0.2442, simple_loss=0.3227, pruned_loss=0.0828, over 8366.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3319, pruned_loss=0.09762, over 1618663.91 frames. ], batch size: 24, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:28:25,370 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 04:28:42,171 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 04:28:47,273 INFO [train.py:901] (3/4) Epoch 7, batch 5700, loss[loss=0.2411, simple_loss=0.3135, pruned_loss=0.08429, over 5167.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3305, pruned_loss=0.09639, over 1610391.95 frames. ], batch size: 11, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:12,994 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.786e+02 3.155e+02 4.023e+02 8.991e+02, threshold=6.311e+02, percent-clipped=4.0 +2023-02-06 04:29:22,470 INFO [train.py:901] (3/4) Epoch 7, batch 5750, loss[loss=0.3023, simple_loss=0.3604, pruned_loss=0.1221, over 8574.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.329, pruned_loss=0.09572, over 1608444.18 frames. ], batch size: 31, lr: 1.08e-02, grad_scale: 8.0 +2023-02-06 04:29:31,469 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 04:29:39,674 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5943, 1.3485, 2.8853, 1.2615, 1.9088, 3.0927, 3.0217, 2.6518], + device='cuda:3'), covar=tensor([0.0966, 0.1304, 0.0405, 0.1945, 0.0769, 0.0286, 0.0510, 0.0629], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0272, 0.0231, 0.0270, 0.0237, 0.0217, 0.0275, 0.0279], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 04:29:48,772 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 04:29:56,274 INFO [train.py:901] (3/4) Epoch 7, batch 5800, loss[loss=0.1835, simple_loss=0.2616, pruned_loss=0.05274, over 6858.00 frames. ], tot_loss[loss=0.2607, simple_loss=0.329, pruned_loss=0.09619, over 1607311.72 frames. ], batch size: 15, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:30:03,822 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3959, 1.5583, 1.6617, 1.4467, 0.9158, 1.7543, 0.0672, 1.0895], + device='cuda:3'), covar=tensor([0.3097, 0.1964, 0.0744, 0.1906, 0.5615, 0.0650, 0.4407, 0.2208], + device='cuda:3'), in_proj_covar=tensor([0.0145, 0.0144, 0.0086, 0.0193, 0.0230, 0.0089, 0.0152, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:30:22,036 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.127e+02 2.882e+02 3.737e+02 4.385e+02 9.194e+02, threshold=7.474e+02, percent-clipped=5.0 +2023-02-06 04:30:25,045 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9773, 1.2268, 5.9069, 2.0522, 5.3455, 5.0678, 5.6019, 5.4203], + device='cuda:3'), covar=tensor([0.0281, 0.4463, 0.0218, 0.2750, 0.0754, 0.0566, 0.0295, 0.0371], + device='cuda:3'), in_proj_covar=tensor([0.0395, 0.0532, 0.0474, 0.0462, 0.0541, 0.0445, 0.0439, 0.0498], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 04:30:32,282 INFO [train.py:901] (3/4) Epoch 7, batch 5850, loss[loss=0.2711, simple_loss=0.3455, pruned_loss=0.09833, over 8366.00 frames. ], tot_loss[loss=0.2622, simple_loss=0.3302, pruned_loss=0.09709, over 1607254.27 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:06,116 INFO [train.py:901] (3/4) Epoch 7, batch 5900, loss[loss=0.222, simple_loss=0.2949, pruned_loss=0.07453, over 7439.00 frames. ], tot_loss[loss=0.2623, simple_loss=0.33, pruned_loss=0.09735, over 1606727.36 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:30,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.634e+02 3.151e+02 3.851e+02 7.879e+02, threshold=6.301e+02, percent-clipped=2.0 +2023-02-06 04:31:40,254 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5327, 1.7839, 2.8793, 1.2526, 2.0679, 1.7332, 1.6737, 1.7079], + device='cuda:3'), covar=tensor([0.1422, 0.1808, 0.0553, 0.3151, 0.1244, 0.2416, 0.1489, 0.1933], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0477, 0.0529, 0.0552, 0.0597, 0.0530, 0.0456, 0.0585], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:31:40,698 INFO [train.py:901] (3/4) Epoch 7, batch 5950, loss[loss=0.2698, simple_loss=0.3376, pruned_loss=0.101, over 8621.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3307, pruned_loss=0.09712, over 1612398.25 frames. ], batch size: 39, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:31:55,859 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9927, 2.3128, 1.8839, 2.8204, 1.2809, 1.6182, 1.7473, 2.4276], + device='cuda:3'), covar=tensor([0.0859, 0.0969, 0.1245, 0.0470, 0.1536, 0.1802, 0.1432, 0.0862], + device='cuda:3'), in_proj_covar=tensor([0.0254, 0.0240, 0.0277, 0.0228, 0.0240, 0.0269, 0.0280, 0.0242], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 04:32:14,309 INFO [train.py:901] (3/4) Epoch 7, batch 6000, loss[loss=0.25, simple_loss=0.3222, pruned_loss=0.08894, over 8254.00 frames. ], tot_loss[loss=0.2618, simple_loss=0.3302, pruned_loss=0.09669, over 1612045.40 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:32:14,309 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 04:32:26,542 INFO [train.py:935] (3/4) Epoch 7, validation: loss=0.2048, simple_loss=0.3036, pruned_loss=0.05298, over 944034.00 frames. +2023-02-06 04:32:26,543 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 04:32:50,868 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.791e+02 2.687e+02 3.524e+02 4.445e+02 8.914e+02, threshold=7.048e+02, percent-clipped=8.0 +2023-02-06 04:33:00,119 INFO [train.py:901] (3/4) Epoch 7, batch 6050, loss[loss=0.2904, simple_loss=0.3595, pruned_loss=0.1106, over 8449.00 frames. ], tot_loss[loss=0.264, simple_loss=0.3318, pruned_loss=0.09805, over 1614112.61 frames. ], batch size: 27, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:22,591 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7323, 1.1917, 3.9320, 1.3483, 3.4725, 3.4117, 3.5646, 3.4279], + device='cuda:3'), covar=tensor([0.0574, 0.3835, 0.0577, 0.2958, 0.1303, 0.0852, 0.0588, 0.0685], + device='cuda:3'), in_proj_covar=tensor([0.0390, 0.0532, 0.0473, 0.0466, 0.0540, 0.0446, 0.0445, 0.0502], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0001, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 04:33:36,263 INFO [train.py:901] (3/4) Epoch 7, batch 6100, loss[loss=0.2359, simple_loss=0.324, pruned_loss=0.07389, over 8508.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3319, pruned_loss=0.09795, over 1613940.69 frames. ], batch size: 26, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:33:52,423 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6498, 2.0760, 3.3878, 1.3801, 2.4120, 2.1057, 1.7385, 2.1632], + device='cuda:3'), covar=tensor([0.1440, 0.1807, 0.0608, 0.3286, 0.1269, 0.2252, 0.1489, 0.1902], + device='cuda:3'), in_proj_covar=tensor([0.0478, 0.0472, 0.0525, 0.0557, 0.0599, 0.0530, 0.0453, 0.0586], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:33:57,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9283, 3.1184, 2.7178, 4.0679, 1.9521, 2.3058, 2.3716, 3.3493], + device='cuda:3'), covar=tensor([0.0689, 0.1012, 0.0992, 0.0290, 0.1354, 0.1456, 0.1439, 0.0855], + device='cuda:3'), in_proj_covar=tensor([0.0251, 0.0237, 0.0275, 0.0225, 0.0236, 0.0268, 0.0276, 0.0239], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 04:34:00,442 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 04:34:01,815 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.948e+02 2.824e+02 3.447e+02 4.351e+02 1.012e+03, threshold=6.894e+02, percent-clipped=2.0 +2023-02-06 04:34:11,162 INFO [train.py:901] (3/4) Epoch 7, batch 6150, loss[loss=0.2711, simple_loss=0.327, pruned_loss=0.1076, over 7421.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.3326, pruned_loss=0.09898, over 1612022.82 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:34:34,025 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54682.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:34:46,676 INFO [train.py:901] (3/4) Epoch 7, batch 6200, loss[loss=0.3221, simple_loss=0.3661, pruned_loss=0.1391, over 8588.00 frames. ], tot_loss[loss=0.2671, simple_loss=0.334, pruned_loss=0.1, over 1614801.83 frames. ], batch size: 50, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:12,146 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.928e+02 3.624e+02 4.953e+02 9.267e+02, threshold=7.248e+02, percent-clipped=4.0 +2023-02-06 04:35:21,775 INFO [train.py:901] (3/4) Epoch 7, batch 6250, loss[loss=0.3075, simple_loss=0.3621, pruned_loss=0.1264, over 7198.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3324, pruned_loss=0.09935, over 1610305.21 frames. ], batch size: 71, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:35:21,999 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2770, 1.5485, 1.3753, 1.8839, 0.8620, 1.1847, 1.2752, 1.4696], + device='cuda:3'), covar=tensor([0.1159, 0.0998, 0.1368, 0.0685, 0.1490, 0.1905, 0.1037, 0.0970], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0236, 0.0272, 0.0223, 0.0236, 0.0264, 0.0272, 0.0238], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 04:35:44,427 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.66 vs. limit=5.0 +2023-02-06 04:35:55,499 INFO [train.py:901] (3/4) Epoch 7, batch 6300, loss[loss=0.3027, simple_loss=0.3554, pruned_loss=0.125, over 8197.00 frames. ], tot_loss[loss=0.2656, simple_loss=0.3327, pruned_loss=0.09918, over 1612677.95 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:22,281 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 3.001e+02 3.662e+02 4.451e+02 9.002e+02, threshold=7.325e+02, percent-clipped=3.0 +2023-02-06 04:36:23,839 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5661, 1.7411, 2.8542, 1.2812, 2.0336, 1.9027, 1.5158, 1.6756], + device='cuda:3'), covar=tensor([0.1519, 0.1986, 0.0640, 0.3477, 0.1347, 0.2329, 0.1720, 0.2016], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0475, 0.0534, 0.0564, 0.0602, 0.0529, 0.0453, 0.0590], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:36:24,160 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 04:36:32,317 INFO [train.py:901] (3/4) Epoch 7, batch 6350, loss[loss=0.2498, simple_loss=0.3149, pruned_loss=0.09234, over 7705.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3339, pruned_loss=0.09991, over 1612547.49 frames. ], batch size: 18, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:36:53,639 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=54880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:37:06,007 INFO [train.py:901] (3/4) Epoch 7, batch 6400, loss[loss=0.2112, simple_loss=0.2904, pruned_loss=0.06602, over 8286.00 frames. ], tot_loss[loss=0.2667, simple_loss=0.3338, pruned_loss=0.09983, over 1616446.39 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:26,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9565, 4.0505, 2.6136, 2.7124, 2.7542, 2.2343, 2.4258, 2.9970], + device='cuda:3'), covar=tensor([0.1505, 0.0248, 0.0747, 0.0728, 0.0673, 0.1047, 0.0985, 0.0938], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0233, 0.0313, 0.0298, 0.0314, 0.0316, 0.0339, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:37:31,195 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.661e+02 3.281e+02 3.949e+02 1.010e+03, threshold=6.562e+02, percent-clipped=2.0 +2023-02-06 04:37:40,714 INFO [train.py:901] (3/4) Epoch 7, batch 6450, loss[loss=0.2549, simple_loss=0.322, pruned_loss=0.0939, over 8240.00 frames. ], tot_loss[loss=0.2654, simple_loss=0.3327, pruned_loss=0.09908, over 1614063.16 frames. ], batch size: 22, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:37:56,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5831, 1.9318, 3.3136, 1.2875, 2.1726, 1.9415, 1.6413, 1.9964], + device='cuda:3'), covar=tensor([0.1471, 0.1820, 0.0555, 0.3342, 0.1384, 0.2339, 0.1538, 0.1960], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0468, 0.0524, 0.0550, 0.0589, 0.0521, 0.0445, 0.0582], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:38:15,589 INFO [train.py:901] (3/4) Epoch 7, batch 6500, loss[loss=0.2835, simple_loss=0.3513, pruned_loss=0.1078, over 8261.00 frames. ], tot_loss[loss=0.265, simple_loss=0.3323, pruned_loss=0.09886, over 1612278.48 frames. ], batch size: 24, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:38:33,834 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55026.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:38:39,660 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.546e+02 3.271e+02 4.197e+02 5.859e+02, threshold=6.542e+02, percent-clipped=0.0 +2023-02-06 04:38:49,579 INFO [train.py:901] (3/4) Epoch 7, batch 6550, loss[loss=0.1928, simple_loss=0.2838, pruned_loss=0.05091, over 7944.00 frames. ], tot_loss[loss=0.2663, simple_loss=0.3336, pruned_loss=0.09952, over 1613721.08 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:08,940 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0510, 4.1434, 2.6323, 2.7910, 3.1469, 2.3690, 2.8230, 3.0007], + device='cuda:3'), covar=tensor([0.1484, 0.0180, 0.0868, 0.0713, 0.0639, 0.1129, 0.0975, 0.1192], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0231, 0.0311, 0.0296, 0.0306, 0.0313, 0.0337, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:39:12,182 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 04:39:17,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7525, 2.1375, 1.6721, 2.5964, 1.1693, 1.2997, 1.7093, 2.1538], + device='cuda:3'), covar=tensor([0.0990, 0.0864, 0.1333, 0.0497, 0.1424, 0.1946, 0.1240, 0.0940], + device='cuda:3'), in_proj_covar=tensor([0.0255, 0.0239, 0.0279, 0.0225, 0.0235, 0.0270, 0.0278, 0.0242], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 04:39:25,813 INFO [train.py:901] (3/4) Epoch 7, batch 6600, loss[loss=0.207, simple_loss=0.2815, pruned_loss=0.06627, over 7436.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3328, pruned_loss=0.09825, over 1616544.13 frames. ], batch size: 17, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:39:32,333 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 04:39:49,419 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.699e+02 3.503e+02 4.413e+02 7.218e+02, threshold=7.007e+02, percent-clipped=4.0 +2023-02-06 04:39:53,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55141.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:39:58,770 INFO [train.py:901] (3/4) Epoch 7, batch 6650, loss[loss=0.266, simple_loss=0.3354, pruned_loss=0.09834, over 8550.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3332, pruned_loss=0.09832, over 1617085.04 frames. ], batch size: 39, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:10,776 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:29,609 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5299, 2.8069, 1.8006, 2.1666, 2.2163, 1.5404, 2.1257, 2.2516], + device='cuda:3'), covar=tensor([0.1483, 0.0314, 0.0962, 0.0711, 0.0669, 0.1267, 0.0936, 0.0947], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0232, 0.0309, 0.0297, 0.0311, 0.0311, 0.0337, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:40:34,123 INFO [train.py:901] (3/4) Epoch 7, batch 6700, loss[loss=0.2087, simple_loss=0.286, pruned_loss=0.06568, over 7817.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3333, pruned_loss=0.09849, over 1618889.79 frames. ], batch size: 20, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:40:42,563 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 04:40:51,595 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:55,055 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:40:58,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 3.031e+02 3.759e+02 4.673e+02 1.170e+03, threshold=7.519e+02, percent-clipped=9.0 +2023-02-06 04:41:07,993 INFO [train.py:901] (3/4) Epoch 7, batch 6750, loss[loss=0.2488, simple_loss=0.3229, pruned_loss=0.08734, over 8190.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.3317, pruned_loss=0.09752, over 1615269.32 frames. ], batch size: 23, lr: 1.07e-02, grad_scale: 8.0 +2023-02-06 04:41:17,500 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7281, 1.9926, 2.1029, 1.3397, 2.2049, 1.4758, 1.0145, 1.7612], + device='cuda:3'), covar=tensor([0.0237, 0.0146, 0.0102, 0.0215, 0.0176, 0.0384, 0.0315, 0.0145], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0263, 0.0218, 0.0318, 0.0258, 0.0407, 0.0317, 0.0297], + device='cuda:3'), out_proj_covar=tensor([1.0850e-04, 8.1515e-05, 6.6539e-05, 9.7277e-05, 8.0360e-05, 1.3681e-04, + 9.9991e-05, 9.2540e-05], device='cuda:3') +2023-02-06 04:41:28,742 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4097, 2.8297, 1.6554, 2.0642, 1.9410, 1.3750, 1.7238, 2.1448], + device='cuda:3'), covar=tensor([0.1459, 0.0284, 0.1036, 0.0707, 0.0778, 0.1435, 0.1143, 0.0937], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0231, 0.0310, 0.0299, 0.0311, 0.0315, 0.0339, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:41:38,038 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2658, 2.2105, 1.4131, 1.9402, 1.7558, 1.3154, 1.6063, 1.7253], + device='cuda:3'), covar=tensor([0.1254, 0.0342, 0.1056, 0.0494, 0.0590, 0.1289, 0.0802, 0.0807], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0232, 0.0313, 0.0301, 0.0313, 0.0317, 0.0340, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:41:42,537 INFO [train.py:901] (3/4) Epoch 7, batch 6800, loss[loss=0.1895, simple_loss=0.2637, pruned_loss=0.05769, over 7230.00 frames. ], tot_loss[loss=0.2644, simple_loss=0.3325, pruned_loss=0.09815, over 1616328.25 frames. ], batch size: 16, lr: 1.07e-02, grad_scale: 16.0 +2023-02-06 04:41:47,230 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 04:42:08,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5016, 2.8012, 1.5628, 2.0815, 2.1207, 1.4222, 1.9196, 2.0864], + device='cuda:3'), covar=tensor([0.1343, 0.0310, 0.1036, 0.0687, 0.0632, 0.1232, 0.0913, 0.0894], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0231, 0.0310, 0.0299, 0.0311, 0.0314, 0.0337, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:42:08,554 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.859e+02 3.364e+02 4.161e+02 9.626e+02, threshold=6.728e+02, percent-clipped=3.0 +2023-02-06 04:42:11,524 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:42:18,203 INFO [train.py:901] (3/4) Epoch 7, batch 6850, loss[loss=0.281, simple_loss=0.343, pruned_loss=0.1095, over 8282.00 frames. ], tot_loss[loss=0.2653, simple_loss=0.333, pruned_loss=0.09875, over 1617854.67 frames. ], batch size: 23, lr: 1.06e-02, grad_scale: 16.0 +2023-02-06 04:42:34,166 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 04:42:50,632 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55397.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:42:52,432 INFO [train.py:901] (3/4) Epoch 7, batch 6900, loss[loss=0.2131, simple_loss=0.2879, pruned_loss=0.06916, over 7924.00 frames. ], tot_loss[loss=0.2649, simple_loss=0.3332, pruned_loss=0.09831, over 1620766.38 frames. ], batch size: 20, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:09,632 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55422.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 04:43:19,277 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.767e+02 3.318e+02 4.413e+02 7.718e+02, threshold=6.635e+02, percent-clipped=1.0 +2023-02-06 04:43:28,911 INFO [train.py:901] (3/4) Epoch 7, batch 6950, loss[loss=0.3456, simple_loss=0.3842, pruned_loss=0.1535, over 8645.00 frames. ], tot_loss[loss=0.2636, simple_loss=0.3318, pruned_loss=0.09772, over 1616163.74 frames. ], batch size: 49, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:43:42,924 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 04:43:46,612 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 04:43:48,201 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 04:44:02,247 INFO [train.py:901] (3/4) Epoch 7, batch 7000, loss[loss=0.2143, simple_loss=0.2747, pruned_loss=0.07693, over 6792.00 frames. ], tot_loss[loss=0.2661, simple_loss=0.3342, pruned_loss=0.09895, over 1619791.16 frames. ], batch size: 15, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:04,388 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1648, 3.1517, 2.2362, 2.4313, 2.5189, 2.1208, 2.4195, 2.7790], + device='cuda:3'), covar=tensor([0.0956, 0.0237, 0.0650, 0.0534, 0.0469, 0.0796, 0.0656, 0.0658], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0232, 0.0308, 0.0297, 0.0308, 0.0317, 0.0335, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 04:44:09,458 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:44:28,243 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.942e+02 3.699e+02 4.542e+02 1.220e+03, threshold=7.399e+02, percent-clipped=11.0 +2023-02-06 04:44:37,059 INFO [train.py:901] (3/4) Epoch 7, batch 7050, loss[loss=0.2339, simple_loss=0.2963, pruned_loss=0.08572, over 8235.00 frames. ], tot_loss[loss=0.2646, simple_loss=0.333, pruned_loss=0.0981, over 1613962.19 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:44:43,035 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 04:44:54,228 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=55573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:09,299 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:11,742 INFO [train.py:901] (3/4) Epoch 7, batch 7100, loss[loss=0.2193, simple_loss=0.2923, pruned_loss=0.0732, over 7793.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3309, pruned_loss=0.09703, over 1613322.55 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:45:26,280 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:29,498 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:45:36,698 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.720e+02 3.297e+02 4.008e+02 7.250e+02, threshold=6.594e+02, percent-clipped=0.0 +2023-02-06 04:45:38,761 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3214, 4.2420, 3.8524, 1.8955, 3.8426, 3.6911, 3.8946, 3.3983], + device='cuda:3'), covar=tensor([0.0758, 0.0568, 0.1017, 0.4676, 0.0785, 0.0932, 0.1217, 0.0919], + device='cuda:3'), in_proj_covar=tensor([0.0413, 0.0324, 0.0358, 0.0439, 0.0339, 0.0317, 0.0328, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:45:45,953 INFO [train.py:901] (3/4) Epoch 7, batch 7150, loss[loss=0.2685, simple_loss=0.3443, pruned_loss=0.09633, over 8246.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3301, pruned_loss=0.09634, over 1610605.96 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:14,224 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=55688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:46:21,659 INFO [train.py:901] (3/4) Epoch 7, batch 7200, loss[loss=0.2699, simple_loss=0.337, pruned_loss=0.1014, over 8250.00 frames. ], tot_loss[loss=0.2625, simple_loss=0.3314, pruned_loss=0.09673, over 1617312.56 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:46:30,269 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.96 vs. limit=5.0 +2023-02-06 04:46:38,149 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 04:46:47,138 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.857e+02 3.487e+02 4.455e+02 1.230e+03, threshold=6.974e+02, percent-clipped=5.0 +2023-02-06 04:46:55,811 INFO [train.py:901] (3/4) Epoch 7, batch 7250, loss[loss=0.2551, simple_loss=0.3136, pruned_loss=0.09825, over 7228.00 frames. ], tot_loss[loss=0.261, simple_loss=0.3297, pruned_loss=0.09615, over 1611565.07 frames. ], batch size: 16, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:07,788 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:47:15,884 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6302, 1.3584, 2.8285, 1.1625, 2.0065, 3.0632, 3.0694, 2.5909], + device='cuda:3'), covar=tensor([0.1005, 0.1288, 0.0352, 0.1928, 0.0740, 0.0283, 0.0429, 0.0640], + device='cuda:3'), in_proj_covar=tensor([0.0242, 0.0271, 0.0229, 0.0269, 0.0239, 0.0212, 0.0277, 0.0276], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 04:47:31,018 INFO [train.py:901] (3/4) Epoch 7, batch 7300, loss[loss=0.2421, simple_loss=0.3088, pruned_loss=0.08766, over 8691.00 frames. ], tot_loss[loss=0.261, simple_loss=0.33, pruned_loss=0.09599, over 1617361.25 frames. ], batch size: 34, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:47:55,727 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.887e+02 3.402e+02 4.424e+02 1.529e+03, threshold=6.804e+02, percent-clipped=7.0 +2023-02-06 04:48:04,227 INFO [train.py:901] (3/4) Epoch 7, batch 7350, loss[loss=0.2086, simple_loss=0.2785, pruned_loss=0.06931, over 7550.00 frames. ], tot_loss[loss=0.262, simple_loss=0.3302, pruned_loss=0.09693, over 1613848.46 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:09,146 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9014, 2.5721, 4.6961, 1.4280, 3.2558, 2.4411, 2.0390, 2.9725], + device='cuda:3'), covar=tensor([0.1441, 0.1805, 0.0546, 0.3275, 0.1357, 0.2249, 0.1405, 0.1958], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0474, 0.0531, 0.0554, 0.0596, 0.0531, 0.0452, 0.0592], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:48:15,839 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=55866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:16,838 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.06 vs. limit=5.0 +2023-02-06 04:48:26,613 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55881.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:27,826 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 04:48:40,229 INFO [train.py:901] (3/4) Epoch 7, batch 7400, loss[loss=0.2825, simple_loss=0.3223, pruned_loss=0.1213, over 7438.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3298, pruned_loss=0.09667, over 1615845.61 frames. ], batch size: 17, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:48:45,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:48:50,024 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 04:49:05,903 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.646e+02 3.471e+02 4.467e+02 1.348e+03, threshold=6.942e+02, percent-clipped=5.0 +2023-02-06 04:49:11,674 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=55944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:14,924 INFO [train.py:901] (3/4) Epoch 7, batch 7450, loss[loss=0.3126, simple_loss=0.3675, pruned_loss=0.1289, over 8500.00 frames. ], tot_loss[loss=0.2634, simple_loss=0.3312, pruned_loss=0.09783, over 1612093.69 frames. ], batch size: 39, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:49:25,913 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 04:49:28,833 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=55969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:49:38,425 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8988, 2.0171, 2.3370, 1.6483, 1.2104, 2.4651, 0.3960, 1.3989], + device='cuda:3'), covar=tensor([0.3531, 0.1562, 0.0633, 0.2501, 0.5515, 0.0763, 0.4522, 0.2341], + device='cuda:3'), in_proj_covar=tensor([0.0142, 0.0144, 0.0085, 0.0191, 0.0229, 0.0088, 0.0145, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:49:50,667 INFO [train.py:901] (3/4) Epoch 7, batch 7500, loss[loss=0.3191, simple_loss=0.3779, pruned_loss=0.1301, over 8621.00 frames. ], tot_loss[loss=0.2642, simple_loss=0.3317, pruned_loss=0.09832, over 1614617.98 frames. ], batch size: 39, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:17,129 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.816e+02 3.537e+02 4.737e+02 9.745e+02, threshold=7.074e+02, percent-clipped=6.0 +2023-02-06 04:50:23,244 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5436, 4.4501, 4.0722, 1.8495, 3.9849, 4.0387, 4.0376, 3.4973], + device='cuda:3'), covar=tensor([0.0758, 0.0619, 0.0974, 0.4989, 0.0739, 0.0743, 0.1306, 0.1026], + device='cuda:3'), in_proj_covar=tensor([0.0425, 0.0330, 0.0361, 0.0447, 0.0345, 0.0320, 0.0335, 0.0287], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:50:25,625 INFO [train.py:901] (3/4) Epoch 7, batch 7550, loss[loss=0.2469, simple_loss=0.306, pruned_loss=0.09391, over 8240.00 frames. ], tot_loss[loss=0.2624, simple_loss=0.3302, pruned_loss=0.0973, over 1613203.70 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:50:58,643 INFO [train.py:901] (3/4) Epoch 7, batch 7600, loss[loss=0.3025, simple_loss=0.362, pruned_loss=0.1215, over 8433.00 frames. ], tot_loss[loss=0.2633, simple_loss=0.3305, pruned_loss=0.098, over 1611752.87 frames. ], batch size: 27, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:06,785 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:25,294 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.750e+02 3.495e+02 4.537e+02 9.121e+02, threshold=6.990e+02, percent-clipped=3.0 +2023-02-06 04:51:34,927 INFO [train.py:901] (3/4) Epoch 7, batch 7650, loss[loss=0.2753, simple_loss=0.3398, pruned_loss=0.1054, over 7014.00 frames. ], tot_loss[loss=0.2637, simple_loss=0.3312, pruned_loss=0.09809, over 1610843.30 frames. ], batch size: 71, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:51:44,429 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:51:47,899 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5813, 2.5773, 4.6124, 1.3053, 3.0081, 1.9982, 1.8645, 2.3313], + device='cuda:3'), covar=tensor([0.2007, 0.1938, 0.0678, 0.4183, 0.1723, 0.2901, 0.1837, 0.2800], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0478, 0.0533, 0.0562, 0.0599, 0.0529, 0.0457, 0.0594], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:51:50,263 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 04:52:04,033 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.8117, 4.7221, 4.3069, 2.2408, 4.2453, 4.2235, 4.3173, 3.8334], + device='cuda:3'), covar=tensor([0.0544, 0.0444, 0.0738, 0.3665, 0.0683, 0.0667, 0.0920, 0.0796], + device='cuda:3'), in_proj_covar=tensor([0.0417, 0.0327, 0.0358, 0.0442, 0.0342, 0.0316, 0.0329, 0.0283], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:52:08,629 INFO [train.py:901] (3/4) Epoch 7, batch 7700, loss[loss=0.256, simple_loss=0.3278, pruned_loss=0.09209, over 8029.00 frames. ], tot_loss[loss=0.2639, simple_loss=0.3314, pruned_loss=0.09819, over 1613114.92 frames. ], batch size: 22, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:16,096 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:22,688 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1844, 1.4619, 3.4707, 1.3610, 2.3594, 3.9442, 3.9169, 3.3687], + device='cuda:3'), covar=tensor([0.0931, 0.1392, 0.0323, 0.1994, 0.0781, 0.0231, 0.0377, 0.0625], + device='cuda:3'), in_proj_covar=tensor([0.0242, 0.0269, 0.0231, 0.0268, 0.0238, 0.0210, 0.0274, 0.0272], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 04:52:22,936 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 04:52:26,763 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:52:34,688 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.791e+02 3.394e+02 3.978e+02 9.035e+02, threshold=6.788e+02, percent-clipped=3.0 +2023-02-06 04:52:34,717 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 04:52:44,020 INFO [train.py:901] (3/4) Epoch 7, batch 7750, loss[loss=0.2772, simple_loss=0.3405, pruned_loss=0.1069, over 7792.00 frames. ], tot_loss[loss=0.2627, simple_loss=0.3302, pruned_loss=0.09754, over 1609058.53 frames. ], batch size: 19, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:52:49,380 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 04:53:18,265 INFO [train.py:901] (3/4) Epoch 7, batch 7800, loss[loss=0.2132, simple_loss=0.2825, pruned_loss=0.07194, over 7558.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3276, pruned_loss=0.0957, over 1608499.67 frames. ], batch size: 18, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:53:35,684 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:53:42,620 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.702e+02 3.307e+02 4.383e+02 8.490e+02, threshold=6.613e+02, percent-clipped=4.0 +2023-02-06 04:53:51,366 INFO [train.py:901] (3/4) Epoch 7, batch 7850, loss[loss=0.2519, simple_loss=0.332, pruned_loss=0.0859, over 8251.00 frames. ], tot_loss[loss=0.2591, simple_loss=0.3275, pruned_loss=0.09537, over 1610195.82 frames. ], batch size: 24, lr: 1.06e-02, grad_scale: 8.0 +2023-02-06 04:54:24,864 INFO [train.py:901] (3/4) Epoch 7, batch 7900, loss[loss=0.2882, simple_loss=0.3589, pruned_loss=0.1087, over 8366.00 frames. ], tot_loss[loss=0.26, simple_loss=0.3287, pruned_loss=0.09571, over 1611820.68 frames. ], batch size: 24, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:54:49,429 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.758e+02 3.520e+02 4.424e+02 1.197e+03, threshold=7.039e+02, percent-clipped=9.0 +2023-02-06 04:54:58,058 INFO [train.py:901] (3/4) Epoch 7, batch 7950, loss[loss=0.2446, simple_loss=0.3264, pruned_loss=0.08134, over 8448.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3292, pruned_loss=0.09579, over 1613798.87 frames. ], batch size: 27, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:19,809 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:31,649 INFO [train.py:901] (3/4) Epoch 7, batch 8000, loss[loss=0.224, simple_loss=0.2952, pruned_loss=0.07634, over 8069.00 frames. ], tot_loss[loss=0.2596, simple_loss=0.3286, pruned_loss=0.0953, over 1612047.12 frames. ], batch size: 21, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:55:36,308 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:36,851 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:55:56,107 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.809e+02 3.378e+02 4.457e+02 7.052e+02, threshold=6.755e+02, percent-clipped=1.0 +2023-02-06 04:55:56,474 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 04:55:59,646 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56541.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 04:56:04,838 INFO [train.py:901] (3/4) Epoch 7, batch 8050, loss[loss=0.3255, simple_loss=0.3737, pruned_loss=0.1386, over 6622.00 frames. ], tot_loss[loss=0.259, simple_loss=0.3272, pruned_loss=0.0954, over 1599754.43 frames. ], batch size: 72, lr: 1.05e-02, grad_scale: 8.0 +2023-02-06 04:56:37,957 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 04:56:42,617 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:56:43,089 INFO [train.py:901] (3/4) Epoch 8, batch 0, loss[loss=0.256, simple_loss=0.3244, pruned_loss=0.0938, over 7980.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3244, pruned_loss=0.0938, over 7980.00 frames. ], batch size: 21, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:56:43,090 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 04:56:54,073 INFO [train.py:935] (3/4) Epoch 8, validation: loss=0.205, simple_loss=0.3028, pruned_loss=0.05355, over 944034.00 frames. +2023-02-06 04:56:54,074 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 04:56:54,903 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9354, 2.8337, 2.6396, 1.4244, 2.5576, 2.6299, 2.6366, 2.4658], + device='cuda:3'), covar=tensor([0.1512, 0.1167, 0.1597, 0.5062, 0.1234, 0.1570, 0.1903, 0.1333], + device='cuda:3'), in_proj_covar=tensor([0.0419, 0.0327, 0.0354, 0.0451, 0.0343, 0.0322, 0.0334, 0.0287], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:57:08,612 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 04:57:08,792 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4152, 1.8152, 1.3152, 2.3719, 1.0417, 1.1504, 1.5754, 1.8441], + device='cuda:3'), covar=tensor([0.1164, 0.1059, 0.1551, 0.0598, 0.1508, 0.2085, 0.1338, 0.0947], + device='cuda:3'), in_proj_covar=tensor([0.0252, 0.0240, 0.0279, 0.0223, 0.0236, 0.0269, 0.0274, 0.0236], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 04:57:10,750 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:22,333 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=56622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:57:28,989 INFO [train.py:901] (3/4) Epoch 8, batch 50, loss[loss=0.2792, simple_loss=0.3465, pruned_loss=0.1059, over 8356.00 frames. ], tot_loss[loss=0.2643, simple_loss=0.3336, pruned_loss=0.09748, over 370774.27 frames. ], batch size: 24, lr: 9.92e-03, grad_scale: 8.0 +2023-02-06 04:57:31,765 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.831e+02 3.488e+02 4.265e+02 1.069e+03, threshold=6.975e+02, percent-clipped=2.0 +2023-02-06 04:57:43,125 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 04:58:03,657 INFO [train.py:901] (3/4) Epoch 8, batch 100, loss[loss=0.2086, simple_loss=0.2696, pruned_loss=0.07377, over 7447.00 frames. ], tot_loss[loss=0.2652, simple_loss=0.3331, pruned_loss=0.09862, over 649301.08 frames. ], batch size: 17, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:05,751 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 04:58:08,609 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5915, 5.6142, 4.9442, 2.2569, 5.0410, 5.4643, 5.1610, 4.9841], + device='cuda:3'), covar=tensor([0.0578, 0.0396, 0.0681, 0.4189, 0.0545, 0.0462, 0.0896, 0.0556], + device='cuda:3'), in_proj_covar=tensor([0.0423, 0.0326, 0.0358, 0.0450, 0.0345, 0.0325, 0.0335, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:58:37,162 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3667, 1.8830, 2.9235, 2.3950, 2.5679, 2.0689, 1.7144, 1.7987], + device='cuda:3'), covar=tensor([0.2024, 0.2783, 0.0734, 0.1465, 0.1189, 0.1431, 0.1170, 0.2247], + device='cuda:3'), in_proj_covar=tensor([0.0827, 0.0780, 0.0671, 0.0780, 0.0868, 0.0720, 0.0671, 0.0708], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 04:58:38,288 INFO [train.py:901] (3/4) Epoch 8, batch 150, loss[loss=0.225, simple_loss=0.2985, pruned_loss=0.07571, over 8229.00 frames. ], tot_loss[loss=0.2612, simple_loss=0.3307, pruned_loss=0.09586, over 863582.53 frames. ], batch size: 22, lr: 9.91e-03, grad_scale: 8.0 +2023-02-06 04:58:39,112 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1896, 1.6800, 4.2711, 1.3678, 2.4808, 4.8742, 5.0284, 3.8404], + device='cuda:3'), covar=tensor([0.1355, 0.1666, 0.0323, 0.2509, 0.0990, 0.0358, 0.0401, 0.1017], + device='cuda:3'), in_proj_covar=tensor([0.0248, 0.0277, 0.0236, 0.0273, 0.0243, 0.0214, 0.0281, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 04:58:40,996 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.710e+02 3.372e+02 4.105e+02 8.611e+02, threshold=6.744e+02, percent-clipped=2.0 +2023-02-06 04:59:12,801 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 04:59:13,823 INFO [train.py:901] (3/4) Epoch 8, batch 200, loss[loss=0.3055, simple_loss=0.3663, pruned_loss=0.1224, over 8734.00 frames. ], tot_loss[loss=0.2595, simple_loss=0.3296, pruned_loss=0.09471, over 1032745.35 frames. ], batch size: 34, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:26,215 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-06 04:59:41,302 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 04:59:48,622 INFO [train.py:901] (3/4) Epoch 8, batch 250, loss[loss=0.2922, simple_loss=0.3654, pruned_loss=0.1095, over 8333.00 frames. ], tot_loss[loss=0.2616, simple_loss=0.3316, pruned_loss=0.09583, over 1161185.85 frames. ], batch size: 25, lr: 9.90e-03, grad_scale: 8.0 +2023-02-06 04:59:51,336 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.703e+02 3.318e+02 4.204e+02 1.022e+03, threshold=6.636e+02, percent-clipped=1.0 +2023-02-06 04:59:56,902 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 05:00:06,241 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 05:00:21,346 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0889, 2.3360, 1.9900, 2.7577, 1.4618, 1.5663, 2.0104, 2.3649], + device='cuda:3'), covar=tensor([0.0826, 0.0929, 0.1199, 0.0544, 0.1296, 0.1752, 0.1033, 0.0815], + device='cuda:3'), in_proj_covar=tensor([0.0251, 0.0235, 0.0275, 0.0223, 0.0234, 0.0266, 0.0271, 0.0237], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:00:21,381 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=56878.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:23,889 INFO [train.py:901] (3/4) Epoch 8, batch 300, loss[loss=0.2413, simple_loss=0.3122, pruned_loss=0.08524, over 7909.00 frames. ], tot_loss[loss=0.2619, simple_loss=0.3314, pruned_loss=0.09622, over 1262964.70 frames. ], batch size: 20, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:00:25,999 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=56885.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:00:38,049 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=56903.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:54,963 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=56926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:00:58,776 INFO [train.py:901] (3/4) Epoch 8, batch 350, loss[loss=0.3267, simple_loss=0.3802, pruned_loss=0.1366, over 8460.00 frames. ], tot_loss[loss=0.2604, simple_loss=0.3303, pruned_loss=0.09528, over 1343807.09 frames. ], batch size: 27, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:01,452 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.612e+02 3.168e+02 3.951e+02 1.059e+03, threshold=6.336e+02, percent-clipped=3.0 +2023-02-06 05:01:33,167 INFO [train.py:901] (3/4) Epoch 8, batch 400, loss[loss=0.2329, simple_loss=0.3216, pruned_loss=0.07213, over 8517.00 frames. ], tot_loss[loss=0.2611, simple_loss=0.331, pruned_loss=0.09564, over 1407290.10 frames. ], batch size: 28, lr: 9.89e-03, grad_scale: 8.0 +2023-02-06 05:01:45,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57000.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 05:01:50,619 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0091, 3.0737, 3.3354, 2.0775, 1.8389, 3.3923, 0.6973, 1.8772], + device='cuda:3'), covar=tensor([0.2904, 0.1249, 0.0411, 0.3317, 0.5484, 0.0504, 0.4045, 0.2542], + device='cuda:3'), in_proj_covar=tensor([0.0149, 0.0145, 0.0088, 0.0197, 0.0238, 0.0092, 0.0146, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:01:52,588 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57011.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:01:53,825 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:02:04,583 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5986, 1.9933, 3.4173, 1.2825, 2.4464, 2.0813, 1.5257, 2.1951], + device='cuda:3'), covar=tensor([0.1593, 0.1957, 0.0701, 0.3504, 0.1476, 0.2510, 0.1732, 0.2195], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0478, 0.0543, 0.0559, 0.0603, 0.0537, 0.0458, 0.0602], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 05:02:07,049 INFO [train.py:901] (3/4) Epoch 8, batch 450, loss[loss=0.24, simple_loss=0.305, pruned_loss=0.08745, over 7797.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3311, pruned_loss=0.09583, over 1454891.31 frames. ], batch size: 19, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:02:10,305 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.037e+02 2.769e+02 3.532e+02 4.551e+02 9.004e+02, threshold=7.064e+02, percent-clipped=7.0 +2023-02-06 05:02:41,859 INFO [train.py:901] (3/4) Epoch 8, batch 500, loss[loss=0.2485, simple_loss=0.331, pruned_loss=0.08302, over 8340.00 frames. ], tot_loss[loss=0.2592, simple_loss=0.3292, pruned_loss=0.09456, over 1487056.85 frames. ], batch size: 26, lr: 9.88e-03, grad_scale: 8.0 +2023-02-06 05:03:15,882 INFO [train.py:901] (3/4) Epoch 8, batch 550, loss[loss=0.2513, simple_loss=0.3289, pruned_loss=0.0869, over 8505.00 frames. ], tot_loss[loss=0.2599, simple_loss=0.3295, pruned_loss=0.09513, over 1513316.64 frames. ], batch size: 26, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:03:18,519 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.761e+02 3.532e+02 4.192e+02 1.400e+03, threshold=7.064e+02, percent-clipped=6.0 +2023-02-06 05:03:39,524 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:03:50,883 INFO [train.py:901] (3/4) Epoch 8, batch 600, loss[loss=0.2768, simple_loss=0.3336, pruned_loss=0.11, over 8497.00 frames. ], tot_loss[loss=0.2593, simple_loss=0.3294, pruned_loss=0.09462, over 1540145.51 frames. ], batch size: 26, lr: 9.87e-03, grad_scale: 8.0 +2023-02-06 05:04:02,995 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 05:04:06,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:13,913 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57215.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:25,746 INFO [train.py:901] (3/4) Epoch 8, batch 650, loss[loss=0.278, simple_loss=0.3423, pruned_loss=0.1069, over 8372.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3285, pruned_loss=0.09462, over 1552149.95 frames. ], batch size: 24, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:04:28,376 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.474e+02 3.242e+02 4.284e+02 1.059e+03, threshold=6.484e+02, percent-clipped=6.0 +2023-02-06 05:04:29,950 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1888, 2.8103, 2.9020, 1.4111, 3.0759, 1.7997, 1.6761, 1.6525], + device='cuda:3'), covar=tensor([0.0386, 0.0181, 0.0170, 0.0331, 0.0301, 0.0450, 0.0467, 0.0290], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0271, 0.0224, 0.0330, 0.0266, 0.0418, 0.0325, 0.0302], + device='cuda:3'), out_proj_covar=tensor([1.1024e-04, 8.2685e-05, 6.7404e-05, 1.0004e-04, 8.2607e-05, 1.3904e-04, + 1.0134e-04, 9.3016e-05], device='cuda:3') +2023-02-06 05:04:41,274 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4650, 2.0513, 1.9830, 1.0523, 2.2235, 1.3786, 0.5700, 1.5823], + device='cuda:3'), covar=tensor([0.0281, 0.0147, 0.0121, 0.0255, 0.0150, 0.0433, 0.0392, 0.0149], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0272, 0.0224, 0.0333, 0.0268, 0.0420, 0.0328, 0.0303], + device='cuda:3'), out_proj_covar=tensor([1.1101e-04, 8.3030e-05, 6.7443e-05, 1.0093e-04, 8.3005e-05, 1.3962e-04, + 1.0202e-04, 9.3388e-05], device='cuda:3') +2023-02-06 05:04:41,994 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57256.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:04:46,656 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:51,987 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:04:59,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57280.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:00,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57281.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 05:05:00,683 INFO [train.py:901] (3/4) Epoch 8, batch 700, loss[loss=0.2364, simple_loss=0.3104, pruned_loss=0.08124, over 8238.00 frames. ], tot_loss[loss=0.2584, simple_loss=0.3277, pruned_loss=0.09459, over 1563253.53 frames. ], batch size: 22, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:34,915 INFO [train.py:901] (3/4) Epoch 8, batch 750, loss[loss=0.2985, simple_loss=0.3651, pruned_loss=0.1159, over 8331.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3267, pruned_loss=0.09417, over 1568443.46 frames. ], batch size: 26, lr: 9.86e-03, grad_scale: 8.0 +2023-02-06 05:05:35,407 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 05:05:38,344 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.846e+02 3.371e+02 4.091e+02 7.333e+02, threshold=6.742e+02, percent-clipped=1.0 +2023-02-06 05:05:49,697 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 05:05:51,159 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:52,580 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:05:57,732 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 05:05:58,592 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9435, 2.1306, 1.7442, 2.6735, 1.1434, 1.5192, 1.8171, 2.1920], + device='cuda:3'), covar=tensor([0.0829, 0.0927, 0.1261, 0.0405, 0.1269, 0.1577, 0.1047, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0235, 0.0275, 0.0217, 0.0232, 0.0267, 0.0271, 0.0237], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:06:09,705 INFO [train.py:901] (3/4) Epoch 8, batch 800, loss[loss=0.2867, simple_loss=0.3478, pruned_loss=0.1128, over 8357.00 frames. ], tot_loss[loss=0.2615, simple_loss=0.3302, pruned_loss=0.09636, over 1585182.81 frames. ], batch size: 24, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:10,112 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 05:06:11,947 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:33,606 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:06:38,323 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7774, 1.5168, 3.2313, 1.1613, 2.1069, 3.5750, 3.6298, 2.8127], + device='cuda:3'), covar=tensor([0.1263, 0.1638, 0.0482, 0.2324, 0.1065, 0.0362, 0.0441, 0.0849], + device='cuda:3'), in_proj_covar=tensor([0.0245, 0.0276, 0.0234, 0.0271, 0.0244, 0.0217, 0.0281, 0.0281], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 05:06:44,177 INFO [train.py:901] (3/4) Epoch 8, batch 850, loss[loss=0.2601, simple_loss=0.3257, pruned_loss=0.09725, over 8344.00 frames. ], tot_loss[loss=0.2621, simple_loss=0.3308, pruned_loss=0.09667, over 1593197.86 frames. ], batch size: 26, lr: 9.85e-03, grad_scale: 16.0 +2023-02-06 05:06:46,893 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.664e+02 3.287e+02 4.255e+02 8.769e+02, threshold=6.575e+02, percent-clipped=4.0 +2023-02-06 05:07:11,117 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:12,470 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:13,429 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 05:07:17,839 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:19,065 INFO [train.py:901] (3/4) Epoch 8, batch 900, loss[loss=0.2268, simple_loss=0.306, pruned_loss=0.07385, over 8029.00 frames. ], tot_loss[loss=0.2617, simple_loss=0.3304, pruned_loss=0.0965, over 1597290.98 frames. ], batch size: 22, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:40,917 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:53,661 INFO [train.py:901] (3/4) Epoch 8, batch 950, loss[loss=0.2417, simple_loss=0.3182, pruned_loss=0.08254, over 8459.00 frames. ], tot_loss[loss=0.2614, simple_loss=0.3299, pruned_loss=0.09647, over 1599244.96 frames. ], batch size: 25, lr: 9.84e-03, grad_scale: 16.0 +2023-02-06 05:07:56,417 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.713e+02 3.197e+02 4.416e+02 7.629e+02, threshold=6.394e+02, percent-clipped=6.0 +2023-02-06 05:07:56,683 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:07:57,304 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5354, 1.4376, 2.7471, 1.2093, 2.1028, 3.0142, 3.0018, 2.5543], + device='cuda:3'), covar=tensor([0.1097, 0.1396, 0.0484, 0.2017, 0.0780, 0.0320, 0.0577, 0.0678], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0281, 0.0240, 0.0275, 0.0248, 0.0219, 0.0288, 0.0286], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 05:08:04,593 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:13,027 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:14,312 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 05:08:14,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:29,126 INFO [train.py:901] (3/4) Epoch 8, batch 1000, loss[loss=0.2822, simple_loss=0.3525, pruned_loss=0.106, over 8446.00 frames. ], tot_loss[loss=0.2602, simple_loss=0.3294, pruned_loss=0.09549, over 1608556.26 frames. ], batch size: 27, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:08:45,909 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:08:47,895 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 05:09:00,595 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 05:09:03,903 INFO [train.py:901] (3/4) Epoch 8, batch 1050, loss[loss=0.2587, simple_loss=0.3268, pruned_loss=0.09528, over 7923.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3271, pruned_loss=0.0943, over 1603595.25 frames. ], batch size: 20, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:09:06,632 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.778e+02 2.733e+02 3.382e+02 4.210e+02 1.523e+03, threshold=6.765e+02, percent-clipped=11.0 +2023-02-06 05:09:10,055 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:24,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:26,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:31,692 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:09:37,589 INFO [train.py:901] (3/4) Epoch 8, batch 1100, loss[loss=0.2938, simple_loss=0.3539, pruned_loss=0.1168, over 6875.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3269, pruned_loss=0.09448, over 1607719.46 frames. ], batch size: 71, lr: 9.83e-03, grad_scale: 16.0 +2023-02-06 05:10:05,404 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:08,891 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,258 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57728.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:10,700 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 05:10:12,767 INFO [train.py:901] (3/4) Epoch 8, batch 1150, loss[loss=0.2948, simple_loss=0.3687, pruned_loss=0.1105, over 8202.00 frames. ], tot_loss[loss=0.2567, simple_loss=0.326, pruned_loss=0.09375, over 1611657.61 frames. ], batch size: 23, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:10:15,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.752e+02 3.349e+02 4.211e+02 1.172e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 05:10:26,720 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:26,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:28,142 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:32,806 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:10:48,142 INFO [train.py:901] (3/4) Epoch 8, batch 1200, loss[loss=0.2747, simple_loss=0.3494, pruned_loss=0.09998, over 8771.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3265, pruned_loss=0.09404, over 1616815.21 frames. ], batch size: 30, lr: 9.82e-03, grad_scale: 16.0 +2023-02-06 05:11:17,883 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:23,253 INFO [train.py:901] (3/4) Epoch 8, batch 1250, loss[loss=0.2783, simple_loss=0.3454, pruned_loss=0.1056, over 7125.00 frames. ], tot_loss[loss=0.2588, simple_loss=0.3274, pruned_loss=0.09512, over 1613865.81 frames. ], batch size: 71, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:11:25,902 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.817e+02 3.577e+02 4.191e+02 8.690e+02, threshold=7.155e+02, percent-clipped=5.0 +2023-02-06 05:11:41,411 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=57857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:53,671 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:11:58,369 INFO [train.py:901] (3/4) Epoch 8, batch 1300, loss[loss=0.2773, simple_loss=0.3505, pruned_loss=0.102, over 8360.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.3274, pruned_loss=0.09518, over 1610194.23 frames. ], batch size: 24, lr: 9.81e-03, grad_scale: 16.0 +2023-02-06 05:12:24,237 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57919.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:31,156 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-06 05:12:32,241 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:33,387 INFO [train.py:901] (3/4) Epoch 8, batch 1350, loss[loss=0.2234, simple_loss=0.2859, pruned_loss=0.08041, over 7270.00 frames. ], tot_loss[loss=0.2572, simple_loss=0.3261, pruned_loss=0.09414, over 1609947.92 frames. ], batch size: 16, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:12:36,124 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.963e+02 2.787e+02 3.281e+02 4.089e+02 1.129e+03, threshold=6.562e+02, percent-clipped=4.0 +2023-02-06 05:12:38,227 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:41,656 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:49,721 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=57955.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:12:51,658 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6436, 1.8434, 2.2436, 1.7020, 0.9716, 2.2987, 0.3267, 1.2902], + device='cuda:3'), covar=tensor([0.2737, 0.1668, 0.0603, 0.2177, 0.5280, 0.0504, 0.3611, 0.2154], + device='cuda:3'), in_proj_covar=tensor([0.0142, 0.0143, 0.0083, 0.0188, 0.0229, 0.0089, 0.0142, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:13:01,725 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=57972.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:05,186 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=57977.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:08,423 INFO [train.py:901] (3/4) Epoch 8, batch 1400, loss[loss=0.2761, simple_loss=0.3391, pruned_loss=0.1066, over 8337.00 frames. ], tot_loss[loss=0.2586, simple_loss=0.3275, pruned_loss=0.09488, over 1613880.53 frames. ], batch size: 48, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:23,091 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:13:41,319 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 05:13:42,234 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8657, 2.7611, 3.2255, 1.4563, 3.3525, 1.9064, 1.5039, 1.8338], + device='cuda:3'), covar=tensor([0.0478, 0.0197, 0.0113, 0.0388, 0.0181, 0.0505, 0.0519, 0.0317], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0274, 0.0224, 0.0335, 0.0269, 0.0422, 0.0328, 0.0309], + device='cuda:3'), out_proj_covar=tensor([1.1115e-04, 8.3089e-05, 6.7083e-05, 1.0102e-04, 8.3117e-05, 1.3975e-04, + 1.0160e-04, 9.4776e-05], device='cuda:3') +2023-02-06 05:13:43,360 INFO [train.py:901] (3/4) Epoch 8, batch 1450, loss[loss=0.2557, simple_loss=0.3273, pruned_loss=0.09202, over 8605.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3269, pruned_loss=0.09383, over 1617045.21 frames. ], batch size: 39, lr: 9.80e-03, grad_scale: 16.0 +2023-02-06 05:13:46,045 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.668e+02 3.298e+02 4.223e+02 1.032e+03, threshold=6.596e+02, percent-clipped=5.0 +2023-02-06 05:14:18,670 INFO [train.py:901] (3/4) Epoch 8, batch 1500, loss[loss=0.2005, simple_loss=0.2791, pruned_loss=0.0609, over 7817.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3273, pruned_loss=0.09416, over 1617224.26 frames. ], batch size: 20, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:28,081 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:43,585 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 05:14:52,818 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:14:53,269 INFO [train.py:901] (3/4) Epoch 8, batch 1550, loss[loss=0.2631, simple_loss=0.3392, pruned_loss=0.0935, over 8698.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3256, pruned_loss=0.09284, over 1615023.37 frames. ], batch size: 34, lr: 9.79e-03, grad_scale: 16.0 +2023-02-06 05:14:56,007 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.601e+02 3.218e+02 3.979e+02 6.246e+02, threshold=6.435e+02, percent-clipped=0.0 +2023-02-06 05:15:10,021 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58156.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:12,046 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58159.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:14,755 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:27,769 INFO [train.py:901] (3/4) Epoch 8, batch 1600, loss[loss=0.2182, simple_loss=0.2896, pruned_loss=0.07336, over 8073.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3249, pruned_loss=0.09239, over 1615700.18 frames. ], batch size: 21, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:15:37,318 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:42,313 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 05:15:42,380 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 05:15:47,385 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:15:54,734 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:00,085 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58228.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:02,613 INFO [train.py:901] (3/4) Epoch 8, batch 1650, loss[loss=0.1862, simple_loss=0.2667, pruned_loss=0.05282, over 7788.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.326, pruned_loss=0.09307, over 1612620.11 frames. ], batch size: 19, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:16:05,270 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.785e+02 3.241e+02 4.331e+02 1.468e+03, threshold=6.482e+02, percent-clipped=4.0 +2023-02-06 05:16:16,844 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:16:37,569 INFO [train.py:901] (3/4) Epoch 8, batch 1700, loss[loss=0.309, simple_loss=0.3703, pruned_loss=0.1238, over 8463.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.324, pruned_loss=0.09258, over 1608447.92 frames. ], batch size: 25, lr: 9.78e-03, grad_scale: 16.0 +2023-02-06 05:16:57,472 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 05:17:11,722 INFO [train.py:901] (3/4) Epoch 8, batch 1750, loss[loss=0.2246, simple_loss=0.2972, pruned_loss=0.07599, over 8087.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3247, pruned_loss=0.09301, over 1609898.00 frames. ], batch size: 21, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:17:15,044 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.752e+02 3.204e+02 3.949e+02 8.384e+02, threshold=6.409e+02, percent-clipped=4.0 +2023-02-06 05:17:45,739 INFO [train.py:901] (3/4) Epoch 8, batch 1800, loss[loss=0.2492, simple_loss=0.3278, pruned_loss=0.08531, over 8324.00 frames. ], tot_loss[loss=0.2571, simple_loss=0.3267, pruned_loss=0.09373, over 1616180.56 frames. ], batch size: 25, lr: 9.77e-03, grad_scale: 16.0 +2023-02-06 05:17:51,960 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4752, 1.8437, 3.3985, 1.3039, 2.3996, 2.0170, 1.6165, 2.1090], + device='cuda:3'), covar=tensor([0.1872, 0.2089, 0.0610, 0.3584, 0.1285, 0.2405, 0.1858, 0.2020], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0482, 0.0530, 0.0559, 0.0609, 0.0534, 0.0457, 0.0595], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:18:16,053 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4481, 4.3909, 3.8620, 1.8036, 3.8963, 3.9741, 4.1652, 3.5599], + device='cuda:3'), covar=tensor([0.0898, 0.0648, 0.1127, 0.5773, 0.0899, 0.1051, 0.1198, 0.0952], + device='cuda:3'), in_proj_covar=tensor([0.0423, 0.0329, 0.0359, 0.0451, 0.0352, 0.0329, 0.0338, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:18:21,313 INFO [train.py:901] (3/4) Epoch 8, batch 1850, loss[loss=0.2387, simple_loss=0.3085, pruned_loss=0.0844, over 7539.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3264, pruned_loss=0.09339, over 1612446.04 frames. ], batch size: 18, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:18:24,003 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.956e+02 3.603e+02 4.636e+02 8.044e+02, threshold=7.207e+02, percent-clipped=5.0 +2023-02-06 05:18:45,037 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58466.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:18:55,853 INFO [train.py:901] (3/4) Epoch 8, batch 1900, loss[loss=0.2716, simple_loss=0.3284, pruned_loss=0.1073, over 7542.00 frames. ], tot_loss[loss=0.2563, simple_loss=0.3261, pruned_loss=0.09328, over 1610496.70 frames. ], batch size: 18, lr: 9.76e-03, grad_scale: 16.0 +2023-02-06 05:19:02,004 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:10,024 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:12,718 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=58506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:19:19,281 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 05:19:24,512 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4994, 3.2020, 2.3017, 4.0840, 1.8728, 2.2006, 2.1433, 3.2246], + device='cuda:3'), covar=tensor([0.0772, 0.0706, 0.1096, 0.0262, 0.1296, 0.1489, 0.1312, 0.0748], + device='cuda:3'), in_proj_covar=tensor([0.0247, 0.0229, 0.0265, 0.0214, 0.0234, 0.0265, 0.0265, 0.0235], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:19:30,166 INFO [train.py:901] (3/4) Epoch 8, batch 1950, loss[loss=0.233, simple_loss=0.3157, pruned_loss=0.07512, over 8350.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3272, pruned_loss=0.09389, over 1613154.62 frames. ], batch size: 24, lr: 9.75e-03, grad_scale: 16.0 +2023-02-06 05:19:30,815 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 05:19:32,812 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.699e+02 3.417e+02 4.103e+02 8.210e+02, threshold=6.834e+02, percent-clipped=5.0 +2023-02-06 05:19:41,935 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-06 05:19:50,869 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 05:19:59,410 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 05:20:04,893 INFO [train.py:901] (3/4) Epoch 8, batch 2000, loss[loss=0.2119, simple_loss=0.2852, pruned_loss=0.06925, over 7248.00 frames. ], tot_loss[loss=0.2565, simple_loss=0.3258, pruned_loss=0.09356, over 1610496.42 frames. ], batch size: 16, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:25,673 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-06 05:20:29,402 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58618.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:31,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=58621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:20:39,655 INFO [train.py:901] (3/4) Epoch 8, batch 2050, loss[loss=0.3171, simple_loss=0.3757, pruned_loss=0.1293, over 8506.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3265, pruned_loss=0.09352, over 1616679.48 frames. ], batch size: 26, lr: 9.75e-03, grad_scale: 8.0 +2023-02-06 05:20:42,933 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.785e+02 3.396e+02 4.687e+02 1.585e+03, threshold=6.792e+02, percent-clipped=4.0 +2023-02-06 05:21:13,656 INFO [train.py:901] (3/4) Epoch 8, batch 2100, loss[loss=0.2402, simple_loss=0.3055, pruned_loss=0.08748, over 7696.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3274, pruned_loss=0.09405, over 1616528.79 frames. ], batch size: 18, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:25,817 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:21:34,548 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6540, 1.2020, 3.8472, 1.4429, 3.2155, 3.1889, 3.4360, 3.3268], + device='cuda:3'), covar=tensor([0.0589, 0.4448, 0.0560, 0.3152, 0.1274, 0.0887, 0.0593, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0406, 0.0532, 0.0506, 0.0476, 0.0543, 0.0459, 0.0457, 0.0513], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 05:21:47,831 INFO [train.py:901] (3/4) Epoch 8, batch 2150, loss[loss=0.2604, simple_loss=0.3358, pruned_loss=0.09243, over 8501.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3279, pruned_loss=0.09393, over 1621613.51 frames. ], batch size: 28, lr: 9.74e-03, grad_scale: 8.0 +2023-02-06 05:21:51,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.818e+02 3.372e+02 4.104e+02 8.704e+02, threshold=6.743e+02, percent-clipped=2.0 +2023-02-06 05:21:53,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3402, 1.5031, 1.4365, 1.8461, 0.8410, 1.2159, 1.3553, 1.4543], + device='cuda:3'), covar=tensor([0.1074, 0.0941, 0.1203, 0.0608, 0.1322, 0.1787, 0.0990, 0.0866], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0227, 0.0265, 0.0214, 0.0233, 0.0264, 0.0263, 0.0234], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:22:23,671 INFO [train.py:901] (3/4) Epoch 8, batch 2200, loss[loss=0.2857, simple_loss=0.3558, pruned_loss=0.1078, over 8636.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3275, pruned_loss=0.09408, over 1618824.43 frames. ], batch size: 49, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:22:31,534 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58793.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:22:58,800 INFO [train.py:901] (3/4) Epoch 8, batch 2250, loss[loss=0.206, simple_loss=0.2834, pruned_loss=0.06426, over 7711.00 frames. ], tot_loss[loss=0.2564, simple_loss=0.3264, pruned_loss=0.09319, over 1620137.95 frames. ], batch size: 18, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:02,318 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.600e+02 3.138e+02 4.259e+02 8.800e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-06 05:23:04,491 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0908, 1.3294, 3.2043, 0.9988, 2.7671, 2.6668, 2.8593, 2.7899], + device='cuda:3'), covar=tensor([0.0686, 0.3577, 0.0757, 0.3286, 0.1520, 0.1059, 0.0726, 0.0819], + device='cuda:3'), in_proj_covar=tensor([0.0400, 0.0524, 0.0500, 0.0470, 0.0540, 0.0454, 0.0455, 0.0506], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 05:23:25,723 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4600, 3.0731, 2.1620, 3.9302, 1.9505, 1.8695, 2.0991, 3.1780], + device='cuda:3'), covar=tensor([0.0955, 0.0952, 0.1236, 0.0342, 0.1402, 0.1847, 0.1541, 0.1046], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0228, 0.0267, 0.0215, 0.0233, 0.0266, 0.0267, 0.0235], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:23:29,264 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:31,267 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=58877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:34,473 INFO [train.py:901] (3/4) Epoch 8, batch 2300, loss[loss=0.255, simple_loss=0.3256, pruned_loss=0.09216, over 8358.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3272, pruned_loss=0.09316, over 1621484.42 frames. ], batch size: 26, lr: 9.73e-03, grad_scale: 8.0 +2023-02-06 05:23:36,059 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6211, 1.7838, 1.8964, 1.5619, 0.9780, 1.9972, 0.1859, 1.2016], + device='cuda:3'), covar=tensor([0.2725, 0.1688, 0.0718, 0.2193, 0.5277, 0.0721, 0.4028, 0.2171], + device='cuda:3'), in_proj_covar=tensor([0.0145, 0.0142, 0.0084, 0.0194, 0.0231, 0.0088, 0.0149, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:23:46,516 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:48,587 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=58902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:23:50,664 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9630, 1.8531, 1.7634, 1.9283, 1.1833, 1.6423, 2.2000, 2.1988], + device='cuda:3'), covar=tensor([0.0460, 0.1035, 0.1630, 0.1187, 0.0542, 0.1307, 0.0590, 0.0497], + device='cuda:3'), in_proj_covar=tensor([0.0111, 0.0161, 0.0199, 0.0166, 0.0110, 0.0169, 0.0123, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:24:00,183 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3633, 1.4314, 1.2953, 1.8567, 0.8179, 1.1433, 1.3314, 1.4779], + device='cuda:3'), covar=tensor([0.1031, 0.0935, 0.1434, 0.0578, 0.1251, 0.1757, 0.0908, 0.0860], + device='cuda:3'), in_proj_covar=tensor([0.0250, 0.0229, 0.0267, 0.0215, 0.0233, 0.0267, 0.0266, 0.0236], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:24:03,549 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=58924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:24:09,485 INFO [train.py:901] (3/4) Epoch 8, batch 2350, loss[loss=0.2093, simple_loss=0.2955, pruned_loss=0.06152, over 8031.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.326, pruned_loss=0.09182, over 1618938.85 frames. ], batch size: 22, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:24:12,940 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.611e+02 3.221e+02 3.780e+02 8.999e+02, threshold=6.441e+02, percent-clipped=2.0 +2023-02-06 05:24:44,048 INFO [train.py:901] (3/4) Epoch 8, batch 2400, loss[loss=0.3105, simple_loss=0.3709, pruned_loss=0.125, over 8366.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3252, pruned_loss=0.09144, over 1618716.54 frames. ], batch size: 24, lr: 9.72e-03, grad_scale: 8.0 +2023-02-06 05:25:18,654 INFO [train.py:901] (3/4) Epoch 8, batch 2450, loss[loss=0.2505, simple_loss=0.332, pruned_loss=0.08447, over 8288.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3246, pruned_loss=0.0911, over 1617996.64 frames. ], batch size: 23, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:25:21,882 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 3.001e+02 3.706e+02 4.542e+02 9.599e+02, threshold=7.413e+02, percent-clipped=3.0 +2023-02-06 05:25:26,108 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:25:29,906 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 05:25:52,506 INFO [train.py:901] (3/4) Epoch 8, batch 2500, loss[loss=0.2497, simple_loss=0.3126, pruned_loss=0.09341, over 8083.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3256, pruned_loss=0.09191, over 1619819.30 frames. ], batch size: 21, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:27,547 INFO [train.py:901] (3/4) Epoch 8, batch 2550, loss[loss=0.2795, simple_loss=0.3587, pruned_loss=0.1001, over 8324.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3257, pruned_loss=0.09195, over 1622319.78 frames. ], batch size: 25, lr: 9.71e-03, grad_scale: 8.0 +2023-02-06 05:26:30,875 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.676e+02 3.180e+02 4.175e+02 9.807e+02, threshold=6.360e+02, percent-clipped=4.0 +2023-02-06 05:26:30,965 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:26:45,976 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:03,035 INFO [train.py:901] (3/4) Epoch 8, batch 2600, loss[loss=0.2211, simple_loss=0.2922, pruned_loss=0.07494, over 7931.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3244, pruned_loss=0.09144, over 1618122.63 frames. ], batch size: 20, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:03,171 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:27:19,766 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 05:27:38,230 INFO [train.py:901] (3/4) Epoch 8, batch 2650, loss[loss=0.3689, simple_loss=0.4047, pruned_loss=0.1665, over 8549.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3252, pruned_loss=0.09246, over 1612203.35 frames. ], batch size: 39, lr: 9.70e-03, grad_scale: 8.0 +2023-02-06 05:27:41,652 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.757e+02 3.213e+02 4.207e+02 1.360e+03, threshold=6.426e+02, percent-clipped=6.0 +2023-02-06 05:27:51,997 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:03,088 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:28:05,173 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2759, 1.4049, 4.2824, 1.9319, 2.4633, 4.9700, 4.8406, 4.3013], + device='cuda:3'), covar=tensor([0.1093, 0.1755, 0.0298, 0.1956, 0.1017, 0.0201, 0.0330, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0284, 0.0242, 0.0277, 0.0249, 0.0223, 0.0293, 0.0286], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 05:28:12,220 INFO [train.py:901] (3/4) Epoch 8, batch 2700, loss[loss=0.2334, simple_loss=0.2997, pruned_loss=0.08358, over 7513.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3242, pruned_loss=0.09165, over 1613451.89 frames. ], batch size: 18, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:46,767 INFO [train.py:901] (3/4) Epoch 8, batch 2750, loss[loss=0.2468, simple_loss=0.3138, pruned_loss=0.08991, over 8133.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3241, pruned_loss=0.09193, over 1613097.32 frames. ], batch size: 22, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:28:50,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.941e+02 2.846e+02 3.367e+02 4.274e+02 9.837e+02, threshold=6.735e+02, percent-clipped=6.0 +2023-02-06 05:28:51,051 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4070, 1.9600, 3.1790, 2.5189, 2.7088, 2.1241, 1.5944, 1.3997], + device='cuda:3'), covar=tensor([0.2891, 0.3421, 0.0799, 0.1934, 0.1577, 0.1691, 0.1482, 0.3473], + device='cuda:3'), in_proj_covar=tensor([0.0843, 0.0796, 0.0686, 0.0790, 0.0887, 0.0736, 0.0672, 0.0721], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:29:14,220 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-06 05:29:22,654 INFO [train.py:901] (3/4) Epoch 8, batch 2800, loss[loss=0.2722, simple_loss=0.3235, pruned_loss=0.1105, over 7547.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.325, pruned_loss=0.09176, over 1617171.74 frames. ], batch size: 18, lr: 9.69e-03, grad_scale: 8.0 +2023-02-06 05:29:23,474 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:36,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5467, 2.3872, 2.7482, 2.1087, 1.6953, 2.7259, 0.9734, 2.1664], + device='cuda:3'), covar=tensor([0.2217, 0.1555, 0.0570, 0.2520, 0.4500, 0.0544, 0.4033, 0.1829], + device='cuda:3'), in_proj_covar=tensor([0.0147, 0.0146, 0.0083, 0.0196, 0.0232, 0.0089, 0.0152, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:29:44,998 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:29:56,659 INFO [train.py:901] (3/4) Epoch 8, batch 2850, loss[loss=0.3103, simple_loss=0.3679, pruned_loss=0.1263, over 8277.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3251, pruned_loss=0.09191, over 1620936.22 frames. ], batch size: 23, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:00,145 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.577e+02 2.974e+02 3.773e+02 5.956e+02, threshold=5.948e+02, percent-clipped=0.0 +2023-02-06 05:30:01,810 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:32,530 INFO [train.py:901] (3/4) Epoch 8, batch 2900, loss[loss=0.2479, simple_loss=0.3284, pruned_loss=0.08375, over 8132.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3255, pruned_loss=0.09212, over 1616634.77 frames. ], batch size: 22, lr: 9.68e-03, grad_scale: 8.0 +2023-02-06 05:30:51,217 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:30:59,183 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:03,080 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:04,363 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 05:31:07,552 INFO [train.py:901] (3/4) Epoch 8, batch 2950, loss[loss=0.3744, simple_loss=0.4182, pruned_loss=0.1653, over 8362.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3253, pruned_loss=0.09223, over 1614687.04 frames. ], batch size: 24, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:08,346 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:31:10,780 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.847e+02 3.468e+02 5.057e+02 9.591e+02, threshold=6.936e+02, percent-clipped=13.0 +2023-02-06 05:31:42,181 INFO [train.py:901] (3/4) Epoch 8, batch 3000, loss[loss=0.2188, simple_loss=0.2978, pruned_loss=0.06986, over 7972.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3256, pruned_loss=0.09284, over 1611533.13 frames. ], batch size: 21, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:31:42,181 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 05:31:54,427 INFO [train.py:935] (3/4) Epoch 8, validation: loss=0.2021, simple_loss=0.3001, pruned_loss=0.05199, over 944034.00 frames. +2023-02-06 05:31:54,428 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 05:32:30,902 INFO [train.py:901] (3/4) Epoch 8, batch 3050, loss[loss=0.2855, simple_loss=0.3528, pruned_loss=0.1092, over 8356.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3247, pruned_loss=0.09284, over 1602419.57 frames. ], batch size: 24, lr: 9.67e-03, grad_scale: 8.0 +2023-02-06 05:32:34,247 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.623e+02 3.324e+02 4.059e+02 7.396e+02, threshold=6.648e+02, percent-clipped=1.0 +2023-02-06 05:32:35,805 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:37,138 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:32:52,396 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:33:05,113 INFO [train.py:901] (3/4) Epoch 8, batch 3100, loss[loss=0.2794, simple_loss=0.347, pruned_loss=0.1059, over 8632.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3268, pruned_loss=0.09401, over 1607313.79 frames. ], batch size: 34, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:40,029 INFO [train.py:901] (3/4) Epoch 8, batch 3150, loss[loss=0.1932, simple_loss=0.2664, pruned_loss=0.05993, over 5986.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.326, pruned_loss=0.0936, over 1608147.79 frames. ], batch size: 13, lr: 9.66e-03, grad_scale: 8.0 +2023-02-06 05:33:43,224 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.052e+02 2.894e+02 3.427e+02 4.526e+02 8.691e+02, threshold=6.853e+02, percent-clipped=4.0 +2023-02-06 05:34:14,629 INFO [train.py:901] (3/4) Epoch 8, batch 3200, loss[loss=0.2724, simple_loss=0.341, pruned_loss=0.102, over 8455.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3254, pruned_loss=0.09289, over 1611716.74 frames. ], batch size: 27, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:50,982 INFO [train.py:901] (3/4) Epoch 8, batch 3250, loss[loss=0.2188, simple_loss=0.3058, pruned_loss=0.06587, over 8136.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3252, pruned_loss=0.09261, over 1611669.75 frames. ], batch size: 22, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:34:53,137 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0366, 2.9880, 3.0345, 2.3398, 1.9428, 3.3361, 0.6831, 2.2998], + device='cuda:3'), covar=tensor([0.2269, 0.1559, 0.1091, 0.2783, 0.4723, 0.0781, 0.4785, 0.2439], + device='cuda:3'), in_proj_covar=tensor([0.0146, 0.0144, 0.0084, 0.0191, 0.0231, 0.0090, 0.0151, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:34:54,318 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.545e+02 3.201e+02 4.295e+02 9.179e+02, threshold=6.402e+02, percent-clipped=6.0 +2023-02-06 05:35:13,114 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=59864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:14,548 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:23,456 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4569, 2.0054, 3.2366, 2.3981, 2.7436, 2.1005, 1.7111, 1.4060], + device='cuda:3'), covar=tensor([0.2983, 0.3379, 0.0737, 0.2004, 0.1615, 0.1927, 0.1643, 0.3367], + device='cuda:3'), in_proj_covar=tensor([0.0850, 0.0811, 0.0689, 0.0795, 0.0895, 0.0746, 0.0684, 0.0729], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:35:25,230 INFO [train.py:901] (3/4) Epoch 8, batch 3300, loss[loss=0.1652, simple_loss=0.2448, pruned_loss=0.04283, over 6841.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3253, pruned_loss=0.09262, over 1614328.73 frames. ], batch size: 15, lr: 9.65e-03, grad_scale: 8.0 +2023-02-06 05:35:31,615 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.61 vs. limit=2.0 +2023-02-06 05:35:35,293 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=59897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:41,747 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=59907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:48,082 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 05:35:52,455 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=59922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:35:59,528 INFO [train.py:901] (3/4) Epoch 8, batch 3350, loss[loss=0.2388, simple_loss=0.3023, pruned_loss=0.08762, over 7921.00 frames. ], tot_loss[loss=0.2559, simple_loss=0.3258, pruned_loss=0.093, over 1620084.47 frames. ], batch size: 20, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:02,898 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.795e+02 3.400e+02 4.166e+02 8.824e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 05:36:31,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=59979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:36:33,699 INFO [train.py:901] (3/4) Epoch 8, batch 3400, loss[loss=0.2526, simple_loss=0.3233, pruned_loss=0.09098, over 7522.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3248, pruned_loss=0.09244, over 1616734.63 frames. ], batch size: 18, lr: 9.64e-03, grad_scale: 8.0 +2023-02-06 05:36:48,498 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7181, 2.2693, 3.4338, 2.8055, 3.0488, 2.3229, 2.0568, 2.1647], + device='cuda:3'), covar=tensor([0.2113, 0.3012, 0.0795, 0.1681, 0.1247, 0.1602, 0.1172, 0.2566], + device='cuda:3'), in_proj_covar=tensor([0.0848, 0.0815, 0.0691, 0.0793, 0.0897, 0.0744, 0.0682, 0.0727], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:37:09,558 INFO [train.py:901] (3/4) Epoch 8, batch 3450, loss[loss=0.2497, simple_loss=0.323, pruned_loss=0.08819, over 8471.00 frames. ], tot_loss[loss=0.255, simple_loss=0.325, pruned_loss=0.09252, over 1612588.78 frames. ], batch size: 25, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:12,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.668e+02 3.106e+02 3.891e+02 9.201e+02, threshold=6.211e+02, percent-clipped=2.0 +2023-02-06 05:37:27,906 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:35,081 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0338, 1.4727, 1.5324, 1.3011, 1.0593, 1.3121, 1.5498, 1.6214], + device='cuda:3'), covar=tensor([0.0582, 0.1287, 0.1781, 0.1408, 0.0600, 0.1527, 0.0703, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0109, 0.0162, 0.0198, 0.0164, 0.0111, 0.0169, 0.0123, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:37:36,417 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:44,252 INFO [train.py:901] (3/4) Epoch 8, batch 3500, loss[loss=0.25, simple_loss=0.3257, pruned_loss=0.08712, over 8239.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3246, pruned_loss=0.09257, over 1611983.20 frames. ], batch size: 24, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:37:45,090 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:53,903 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60096.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:37:58,640 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 05:38:02,911 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 05:38:18,645 INFO [train.py:901] (3/4) Epoch 8, batch 3550, loss[loss=0.2861, simple_loss=0.3502, pruned_loss=0.111, over 8338.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3255, pruned_loss=0.09339, over 1612532.12 frames. ], batch size: 26, lr: 9.63e-03, grad_scale: 8.0 +2023-02-06 05:38:22,095 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.844e+02 3.449e+02 4.512e+02 7.529e+02, threshold=6.898e+02, percent-clipped=5.0 +2023-02-06 05:38:41,391 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0811, 1.2057, 4.2515, 1.5297, 3.7316, 3.5509, 3.7980, 3.6673], + device='cuda:3'), covar=tensor([0.0477, 0.3798, 0.0448, 0.2985, 0.1051, 0.0747, 0.0517, 0.0628], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0535, 0.0514, 0.0478, 0.0552, 0.0465, 0.0460, 0.0514], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 05:38:54,374 INFO [train.py:901] (3/4) Epoch 8, batch 3600, loss[loss=0.2444, simple_loss=0.323, pruned_loss=0.08295, over 8514.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3254, pruned_loss=0.09281, over 1613259.85 frames. ], batch size: 26, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:38:59,446 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8870, 1.8598, 1.8958, 1.7589, 1.1018, 1.8269, 2.4061, 2.7433], + device='cuda:3'), covar=tensor([0.0470, 0.1127, 0.1621, 0.1239, 0.0577, 0.1435, 0.0573, 0.0434], + device='cuda:3'), in_proj_covar=tensor([0.0110, 0.0162, 0.0199, 0.0164, 0.0110, 0.0169, 0.0123, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:39:03,692 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 05:39:14,697 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60210.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:18,773 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:29,812 INFO [train.py:901] (3/4) Epoch 8, batch 3650, loss[loss=0.3384, simple_loss=0.393, pruned_loss=0.1419, over 8601.00 frames. ], tot_loss[loss=0.2577, simple_loss=0.3268, pruned_loss=0.09426, over 1610246.93 frames. ], batch size: 49, lr: 9.62e-03, grad_scale: 8.0 +2023-02-06 05:39:32,082 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:33,128 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.875e+02 2.702e+02 3.457e+02 4.155e+02 9.631e+02, threshold=6.915e+02, percent-clipped=4.0 +2023-02-06 05:39:42,753 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60251.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:39:48,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:02,764 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 05:40:04,774 INFO [train.py:901] (3/4) Epoch 8, batch 3700, loss[loss=0.2319, simple_loss=0.3108, pruned_loss=0.07654, over 8332.00 frames. ], tot_loss[loss=0.2566, simple_loss=0.3257, pruned_loss=0.09368, over 1610655.21 frames. ], batch size: 26, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:34,564 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:40:39,103 INFO [train.py:901] (3/4) Epoch 8, batch 3750, loss[loss=0.2338, simple_loss=0.315, pruned_loss=0.07629, over 8467.00 frames. ], tot_loss[loss=0.2561, simple_loss=0.3258, pruned_loss=0.09319, over 1613213.52 frames. ], batch size: 25, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:40:43,053 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.685e+02 3.295e+02 3.882e+02 8.274e+02, threshold=6.589e+02, percent-clipped=2.0 +2023-02-06 05:41:02,727 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:13,769 INFO [train.py:901] (3/4) Epoch 8, batch 3800, loss[loss=0.2626, simple_loss=0.3245, pruned_loss=0.1004, over 7817.00 frames. ], tot_loss[loss=0.257, simple_loss=0.327, pruned_loss=0.09346, over 1611287.73 frames. ], batch size: 20, lr: 9.61e-03, grad_scale: 8.0 +2023-02-06 05:41:28,152 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:36,465 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:45,879 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:41:49,069 INFO [train.py:901] (3/4) Epoch 8, batch 3850, loss[loss=0.3296, simple_loss=0.3944, pruned_loss=0.1324, over 8568.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3257, pruned_loss=0.09243, over 1609661.97 frames. ], batch size: 34, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:41:52,287 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.691e+02 3.271e+02 4.212e+02 1.032e+03, threshold=6.541e+02, percent-clipped=5.0 +2023-02-06 05:41:54,201 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:08,499 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 05:42:12,035 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6785, 4.7127, 4.0644, 1.8184, 3.9997, 4.0807, 4.2531, 3.9225], + device='cuda:3'), covar=tensor([0.0650, 0.0469, 0.0918, 0.5439, 0.0819, 0.0993, 0.1208, 0.0773], + device='cuda:3'), in_proj_covar=tensor([0.0417, 0.0326, 0.0351, 0.0449, 0.0351, 0.0329, 0.0336, 0.0284], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:42:22,376 INFO [train.py:901] (3/4) Epoch 8, batch 3900, loss[loss=0.2658, simple_loss=0.335, pruned_loss=0.09836, over 8357.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3255, pruned_loss=0.09258, over 1611514.90 frames. ], batch size: 24, lr: 9.60e-03, grad_scale: 8.0 +2023-02-06 05:42:41,578 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 05:42:46,591 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:55,303 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:42:57,131 INFO [train.py:901] (3/4) Epoch 8, batch 3950, loss[loss=0.2813, simple_loss=0.3532, pruned_loss=0.1047, over 8252.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3248, pruned_loss=0.09234, over 1610496.21 frames. ], batch size: 24, lr: 9.59e-03, grad_scale: 8.0 +2023-02-06 05:43:00,413 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.562e+02 3.362e+02 4.082e+02 8.516e+02, threshold=6.724e+02, percent-clipped=2.0 +2023-02-06 05:43:03,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:13,331 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:16,391 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=60560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,328 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60581.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:31,760 INFO [train.py:901] (3/4) Epoch 8, batch 4000, loss[loss=0.2315, simple_loss=0.3077, pruned_loss=0.07771, over 7937.00 frames. ], tot_loss[loss=0.256, simple_loss=0.3263, pruned_loss=0.09289, over 1613560.13 frames. ], batch size: 20, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:43:48,277 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:43:50,230 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8784, 2.4296, 4.6566, 1.4012, 3.2781, 2.2835, 1.9321, 2.8317], + device='cuda:3'), covar=tensor([0.1329, 0.1749, 0.0607, 0.3294, 0.1160, 0.2264, 0.1438, 0.1994], + device='cuda:3'), in_proj_covar=tensor([0.0469, 0.0479, 0.0525, 0.0552, 0.0595, 0.0533, 0.0452, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:43:58,368 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7874, 2.1289, 4.6609, 1.2505, 3.1942, 2.1804, 1.7559, 2.8431], + device='cuda:3'), covar=tensor([0.1569, 0.2311, 0.0601, 0.3676, 0.1470, 0.2731, 0.1807, 0.2247], + device='cuda:3'), in_proj_covar=tensor([0.0470, 0.0481, 0.0526, 0.0554, 0.0598, 0.0536, 0.0454, 0.0589], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:43:59,795 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:06,269 INFO [train.py:901] (3/4) Epoch 8, batch 4050, loss[loss=0.2678, simple_loss=0.3555, pruned_loss=0.08999, over 8358.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3271, pruned_loss=0.09326, over 1616301.96 frames. ], batch size: 24, lr: 9.59e-03, grad_scale: 16.0 +2023-02-06 05:44:09,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.835e+02 3.722e+02 4.462e+02 8.493e+02, threshold=7.445e+02, percent-clipped=1.0 +2023-02-06 05:44:17,204 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:36,991 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=60675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:44:39,700 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2948, 1.5410, 1.5559, 0.7855, 1.7239, 1.2243, 0.2574, 1.5196], + device='cuda:3'), covar=tensor([0.0254, 0.0164, 0.0124, 0.0228, 0.0170, 0.0423, 0.0393, 0.0132], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0276, 0.0226, 0.0336, 0.0267, 0.0427, 0.0329, 0.0304], + device='cuda:3'), out_proj_covar=tensor([1.0820e-04, 8.2460e-05, 6.7271e-05, 1.0027e-04, 8.1688e-05, 1.4057e-04, + 1.0064e-04, 9.2114e-05], device='cuda:3') +2023-02-06 05:44:41,543 INFO [train.py:901] (3/4) Epoch 8, batch 4100, loss[loss=0.2393, simple_loss=0.3094, pruned_loss=0.08462, over 7691.00 frames. ], tot_loss[loss=0.2555, simple_loss=0.3261, pruned_loss=0.09246, over 1615172.42 frames. ], batch size: 18, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:44:50,520 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2178, 1.1503, 1.2268, 1.1712, 0.8052, 1.2973, 0.0329, 1.0175], + device='cuda:3'), covar=tensor([0.2559, 0.2341, 0.0858, 0.1651, 0.4985, 0.0759, 0.4031, 0.2113], + device='cuda:3'), in_proj_covar=tensor([0.0149, 0.0146, 0.0086, 0.0196, 0.0235, 0.0091, 0.0153, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:45:13,057 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4410, 1.8462, 1.4093, 2.3492, 1.1552, 1.1542, 1.6321, 1.9410], + device='cuda:3'), covar=tensor([0.1249, 0.1085, 0.1673, 0.0649, 0.1433, 0.2217, 0.1300, 0.0900], + device='cuda:3'), in_proj_covar=tensor([0.0248, 0.0232, 0.0267, 0.0219, 0.0228, 0.0264, 0.0267, 0.0233], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:45:16,315 INFO [train.py:901] (3/4) Epoch 8, batch 4150, loss[loss=0.3012, simple_loss=0.3593, pruned_loss=0.1216, over 8362.00 frames. ], tot_loss[loss=0.2551, simple_loss=0.3257, pruned_loss=0.09228, over 1614893.58 frames. ], batch size: 24, lr: 9.58e-03, grad_scale: 16.0 +2023-02-06 05:45:19,086 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=60736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:19,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.904e+02 3.574e+02 4.093e+02 8.234e+02, threshold=7.147e+02, percent-clipped=2.0 +2023-02-06 05:45:45,319 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:45:51,060 INFO [train.py:901] (3/4) Epoch 8, batch 4200, loss[loss=0.2426, simple_loss=0.3176, pruned_loss=0.08382, over 8342.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3263, pruned_loss=0.0927, over 1615433.61 frames. ], batch size: 26, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:45:54,023 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,594 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:02,614 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:07,985 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 05:46:10,882 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:11,541 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60811.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:20,182 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:25,972 INFO [train.py:901] (3/4) Epoch 8, batch 4250, loss[loss=0.244, simple_loss=0.3252, pruned_loss=0.08141, over 8091.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3259, pruned_loss=0.09229, over 1616817.72 frames. ], batch size: 21, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:46:28,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:46:30,099 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.807e+02 3.546e+02 4.515e+02 1.213e+03, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 05:46:30,813 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 05:46:51,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8116, 3.7886, 3.3493, 1.7609, 3.3390, 3.3600, 3.4664, 2.9853], + device='cuda:3'), covar=tensor([0.0979, 0.0658, 0.1245, 0.4642, 0.0911, 0.0941, 0.1393, 0.1081], + device='cuda:3'), in_proj_covar=tensor([0.0422, 0.0330, 0.0353, 0.0454, 0.0349, 0.0328, 0.0338, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:47:01,075 INFO [train.py:901] (3/4) Epoch 8, batch 4300, loss[loss=0.2316, simple_loss=0.3088, pruned_loss=0.07716, over 8290.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3272, pruned_loss=0.09318, over 1614380.55 frames. ], batch size: 23, lr: 9.57e-03, grad_scale: 8.0 +2023-02-06 05:47:03,344 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8329, 1.8725, 2.1516, 1.6101, 1.2926, 2.1670, 0.2675, 1.3464], + device='cuda:3'), covar=tensor([0.3078, 0.1868, 0.0562, 0.2493, 0.5133, 0.0612, 0.4151, 0.2335], + device='cuda:3'), in_proj_covar=tensor([0.0150, 0.0149, 0.0085, 0.0197, 0.0235, 0.0091, 0.0153, 0.0149], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:47:35,225 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=60931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:35,242 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0693, 2.3699, 3.8619, 1.7517, 2.9088, 2.5051, 2.0283, 2.7598], + device='cuda:3'), covar=tensor([0.1118, 0.1683, 0.0454, 0.2620, 0.1081, 0.1759, 0.1310, 0.1689], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0484, 0.0525, 0.0559, 0.0599, 0.0542, 0.0455, 0.0594], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:47:35,684 INFO [train.py:901] (3/4) Epoch 8, batch 4350, loss[loss=0.2831, simple_loss=0.3385, pruned_loss=0.1139, over 7136.00 frames. ], tot_loss[loss=0.259, simple_loss=0.329, pruned_loss=0.0945, over 1617482.02 frames. ], batch size: 71, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:47:39,642 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.751e+02 3.442e+02 4.335e+02 7.709e+02, threshold=6.884e+02, percent-clipped=1.0 +2023-02-06 05:47:51,980 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=60956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:47:59,291 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 05:48:10,331 INFO [train.py:901] (3/4) Epoch 8, batch 4400, loss[loss=0.252, simple_loss=0.328, pruned_loss=0.08799, over 8242.00 frames. ], tot_loss[loss=0.258, simple_loss=0.3278, pruned_loss=0.09408, over 1614290.06 frames. ], batch size: 24, lr: 9.56e-03, grad_scale: 8.0 +2023-02-06 05:48:33,377 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6428, 2.0255, 4.6328, 1.3220, 3.2011, 2.1608, 1.7228, 2.7506], + device='cuda:3'), covar=tensor([0.1815, 0.2278, 0.0567, 0.3873, 0.1483, 0.2814, 0.1874, 0.2202], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0487, 0.0522, 0.0559, 0.0598, 0.0542, 0.0455, 0.0593], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:48:42,540 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 05:48:44,685 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4605, 1.7660, 1.8559, 0.9078, 1.9740, 1.4151, 0.4044, 1.6899], + device='cuda:3'), covar=tensor([0.0264, 0.0166, 0.0120, 0.0293, 0.0162, 0.0444, 0.0450, 0.0120], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0280, 0.0227, 0.0339, 0.0268, 0.0430, 0.0333, 0.0306], + device='cuda:3'), out_proj_covar=tensor([1.0817e-04, 8.3407e-05, 6.7626e-05, 1.0127e-04, 8.2175e-05, 1.4119e-04, + 1.0195e-04, 9.2573e-05], device='cuda:3') +2023-02-06 05:48:45,132 INFO [train.py:901] (3/4) Epoch 8, batch 4450, loss[loss=0.2499, simple_loss=0.3167, pruned_loss=0.09151, over 7815.00 frames. ], tot_loss[loss=0.2575, simple_loss=0.3271, pruned_loss=0.09394, over 1614408.79 frames. ], batch size: 20, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:48:49,127 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.403e+02 3.130e+02 3.948e+02 8.767e+02, threshold=6.260e+02, percent-clipped=3.0 +2023-02-06 05:49:13,356 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5094, 1.8201, 2.0384, 1.1622, 2.2323, 1.3778, 0.6821, 1.5787], + device='cuda:3'), covar=tensor([0.0296, 0.0177, 0.0128, 0.0284, 0.0166, 0.0480, 0.0416, 0.0172], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0280, 0.0228, 0.0339, 0.0268, 0.0430, 0.0332, 0.0308], + device='cuda:3'), out_proj_covar=tensor([1.0793e-04, 8.3361e-05, 6.7925e-05, 1.0123e-04, 8.2006e-05, 1.4153e-04, + 1.0163e-04, 9.2944e-05], device='cuda:3') +2023-02-06 05:49:17,368 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:17,967 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:49:19,304 INFO [train.py:901] (3/4) Epoch 8, batch 4500, loss[loss=0.2251, simple_loss=0.2936, pruned_loss=0.07831, over 7707.00 frames. ], tot_loss[loss=0.2579, simple_loss=0.3275, pruned_loss=0.09411, over 1616451.83 frames. ], batch size: 18, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:24,899 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 05:49:36,534 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 05:49:38,012 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6261, 1.9081, 1.4932, 2.3225, 1.0256, 1.1613, 1.4607, 1.9425], + device='cuda:3'), covar=tensor([0.0925, 0.0924, 0.1212, 0.0556, 0.1308, 0.1859, 0.1182, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0231, 0.0271, 0.0218, 0.0229, 0.0266, 0.0268, 0.0237], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:49:44,119 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 05:49:50,317 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8186, 2.1375, 4.8461, 1.4310, 3.3181, 2.2060, 1.7016, 3.0008], + device='cuda:3'), covar=tensor([0.1511, 0.2239, 0.0480, 0.3574, 0.1336, 0.2547, 0.1664, 0.2015], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0488, 0.0522, 0.0563, 0.0602, 0.0541, 0.0456, 0.0595], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:49:53,205 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 05:49:53,478 INFO [train.py:901] (3/4) Epoch 8, batch 4550, loss[loss=0.2585, simple_loss=0.3415, pruned_loss=0.08775, over 8678.00 frames. ], tot_loss[loss=0.2583, simple_loss=0.3274, pruned_loss=0.09462, over 1615350.73 frames. ], batch size: 34, lr: 9.55e-03, grad_scale: 8.0 +2023-02-06 05:49:58,147 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.768e+02 3.493e+02 4.645e+02 1.007e+03, threshold=6.986e+02, percent-clipped=6.0 +2023-02-06 05:50:07,866 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4664, 1.9787, 3.1851, 2.4402, 2.6614, 2.2110, 1.6724, 1.2472], + device='cuda:3'), covar=tensor([0.3083, 0.3596, 0.0916, 0.2211, 0.1788, 0.1827, 0.1544, 0.3879], + device='cuda:3'), in_proj_covar=tensor([0.0835, 0.0798, 0.0680, 0.0782, 0.0881, 0.0730, 0.0668, 0.0717], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:50:29,340 INFO [train.py:901] (3/4) Epoch 8, batch 4600, loss[loss=0.2523, simple_loss=0.3047, pruned_loss=0.09992, over 7923.00 frames. ], tot_loss[loss=0.2578, simple_loss=0.3267, pruned_loss=0.09448, over 1614790.13 frames. ], batch size: 20, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:50:31,565 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6026, 2.0464, 3.2714, 2.4446, 2.7489, 2.3151, 1.7959, 1.4117], + device='cuda:3'), covar=tensor([0.3048, 0.3532, 0.0887, 0.2351, 0.1725, 0.1816, 0.1623, 0.3662], + device='cuda:3'), in_proj_covar=tensor([0.0833, 0.0796, 0.0681, 0.0782, 0.0880, 0.0728, 0.0668, 0.0715], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:50:38,249 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61195.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:50:38,324 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4312, 1.9370, 2.9304, 2.2896, 2.4850, 2.1641, 1.6725, 1.1854], + device='cuda:3'), covar=tensor([0.2843, 0.3256, 0.0946, 0.2024, 0.1616, 0.1715, 0.1432, 0.3207], + device='cuda:3'), in_proj_covar=tensor([0.0837, 0.0800, 0.0685, 0.0787, 0.0886, 0.0733, 0.0671, 0.0719], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:50:39,770 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 05:50:53,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3163, 1.7069, 1.6532, 1.6411, 1.2107, 1.5015, 1.8961, 1.6296], + device='cuda:3'), covar=tensor([0.0469, 0.1143, 0.1705, 0.1255, 0.0573, 0.1454, 0.0635, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0109, 0.0161, 0.0199, 0.0163, 0.0110, 0.0169, 0.0122, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:51:04,231 INFO [train.py:901] (3/4) Epoch 8, batch 4650, loss[loss=0.2796, simple_loss=0.3455, pruned_loss=0.1069, over 8350.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3267, pruned_loss=0.0941, over 1614314.58 frames. ], batch size: 24, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:51:08,278 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.975e+02 2.693e+02 3.115e+02 3.876e+02 8.832e+02, threshold=6.229e+02, percent-clipped=3.0 +2023-02-06 05:51:11,645 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3182, 4.2881, 3.8871, 1.9843, 3.8485, 3.9633, 3.9105, 3.5734], + device='cuda:3'), covar=tensor([0.1071, 0.0693, 0.1191, 0.4521, 0.0924, 0.0942, 0.1319, 0.0983], + device='cuda:3'), in_proj_covar=tensor([0.0416, 0.0333, 0.0347, 0.0442, 0.0348, 0.0324, 0.0337, 0.0286], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:51:19,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1259, 1.7837, 1.8123, 1.6441, 1.2687, 1.6524, 2.3050, 2.2700], + device='cuda:3'), covar=tensor([0.0441, 0.1205, 0.1766, 0.1390, 0.0599, 0.1481, 0.0605, 0.0468], + device='cuda:3'), in_proj_covar=tensor([0.0109, 0.0161, 0.0198, 0.0163, 0.0111, 0.0168, 0.0122, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:51:38,690 INFO [train.py:901] (3/4) Epoch 8, batch 4700, loss[loss=0.2856, simple_loss=0.3587, pruned_loss=0.1063, over 8583.00 frames. ], tot_loss[loss=0.2585, simple_loss=0.3279, pruned_loss=0.09453, over 1614541.60 frames. ], batch size: 31, lr: 9.54e-03, grad_scale: 8.0 +2023-02-06 05:52:09,393 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5308, 1.8481, 1.9547, 1.1338, 2.0424, 1.3509, 0.6016, 1.5713], + device='cuda:3'), covar=tensor([0.0270, 0.0163, 0.0119, 0.0269, 0.0178, 0.0451, 0.0417, 0.0153], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0278, 0.0228, 0.0332, 0.0265, 0.0425, 0.0328, 0.0305], + device='cuda:3'), out_proj_covar=tensor([1.0611e-04, 8.2890e-05, 6.7800e-05, 9.8778e-05, 8.0798e-05, 1.3965e-04, + 1.0029e-04, 9.2068e-05], device='cuda:3') +2023-02-06 05:52:13,893 INFO [train.py:901] (3/4) Epoch 8, batch 4750, loss[loss=0.2657, simple_loss=0.3333, pruned_loss=0.09905, over 8596.00 frames. ], tot_loss[loss=0.2589, simple_loss=0.328, pruned_loss=0.09485, over 1612480.74 frames. ], batch size: 31, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:52:17,857 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.870e+02 3.425e+02 4.672e+02 9.837e+02, threshold=6.850e+02, percent-clipped=8.0 +2023-02-06 05:52:22,852 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 05:52:34,193 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2915, 2.4999, 1.8573, 2.9995, 1.4313, 1.6378, 1.8839, 2.4499], + device='cuda:3'), covar=tensor([0.0763, 0.0843, 0.1124, 0.0386, 0.1267, 0.1563, 0.1206, 0.0853], + device='cuda:3'), in_proj_covar=tensor([0.0251, 0.0231, 0.0272, 0.0218, 0.0230, 0.0264, 0.0270, 0.0236], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 05:52:36,774 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 05:52:39,421 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 05:52:48,205 INFO [train.py:901] (3/4) Epoch 8, batch 4800, loss[loss=0.2797, simple_loss=0.3578, pruned_loss=0.1008, over 8331.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3266, pruned_loss=0.09408, over 1603733.36 frames. ], batch size: 26, lr: 9.53e-03, grad_scale: 8.0 +2023-02-06 05:53:12,939 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3384, 1.5842, 2.9560, 1.2157, 2.0369, 1.7899, 1.5145, 1.8126], + device='cuda:3'), covar=tensor([0.2017, 0.2461, 0.0641, 0.4027, 0.1534, 0.2870, 0.2065, 0.2145], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0487, 0.0525, 0.0563, 0.0603, 0.0539, 0.0455, 0.0596], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:53:16,794 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=61423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:22,686 INFO [train.py:901] (3/4) Epoch 8, batch 4850, loss[loss=0.2437, simple_loss=0.3124, pruned_loss=0.08752, over 8134.00 frames. ], tot_loss[loss=0.2568, simple_loss=0.3268, pruned_loss=0.09345, over 1611567.01 frames. ], batch size: 22, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:53:26,653 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.780e+02 3.448e+02 4.323e+02 7.771e+02, threshold=6.895e+02, percent-clipped=1.0 +2023-02-06 05:53:28,671 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 05:53:36,192 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61451.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:53,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:53:54,744 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 05:53:56,860 INFO [train.py:901] (3/4) Epoch 8, batch 4900, loss[loss=0.2887, simple_loss=0.3534, pruned_loss=0.112, over 8489.00 frames. ], tot_loss[loss=0.2573, simple_loss=0.3268, pruned_loss=0.09387, over 1612502.83 frames. ], batch size: 28, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:53:59,790 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5670, 1.8888, 3.2293, 1.2241, 2.2634, 1.9951, 1.6042, 1.8915], + device='cuda:3'), covar=tensor([0.1485, 0.2014, 0.0639, 0.3518, 0.1408, 0.2399, 0.1550, 0.2020], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0492, 0.0535, 0.0568, 0.0610, 0.0545, 0.0460, 0.0606], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 05:54:07,101 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3074, 1.7617, 2.7409, 2.1227, 2.3321, 2.0530, 1.6669, 1.0070], + device='cuda:3'), covar=tensor([0.2768, 0.3182, 0.0791, 0.1749, 0.1339, 0.1762, 0.1447, 0.3140], + device='cuda:3'), in_proj_covar=tensor([0.0852, 0.0811, 0.0689, 0.0793, 0.0889, 0.0744, 0.0678, 0.0731], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:54:31,292 INFO [train.py:901] (3/4) Epoch 8, batch 4950, loss[loss=0.2359, simple_loss=0.3125, pruned_loss=0.07967, over 8193.00 frames. ], tot_loss[loss=0.2574, simple_loss=0.3271, pruned_loss=0.09385, over 1611918.96 frames. ], batch size: 23, lr: 9.52e-03, grad_scale: 8.0 +2023-02-06 05:54:35,325 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.701e+02 3.325e+02 4.582e+02 7.633e+02, threshold=6.649e+02, percent-clipped=1.0 +2023-02-06 05:54:35,521 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=61538.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:54:47,371 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9071, 2.4012, 3.9974, 2.8293, 3.3055, 2.4137, 1.9516, 1.7485], + device='cuda:3'), covar=tensor([0.2875, 0.3618, 0.0763, 0.2314, 0.1730, 0.1711, 0.1483, 0.3939], + device='cuda:3'), in_proj_covar=tensor([0.0846, 0.0805, 0.0680, 0.0787, 0.0880, 0.0737, 0.0674, 0.0725], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:55:07,153 INFO [train.py:901] (3/4) Epoch 8, batch 5000, loss[loss=0.228, simple_loss=0.2979, pruned_loss=0.0791, over 7797.00 frames. ], tot_loss[loss=0.2554, simple_loss=0.3254, pruned_loss=0.09268, over 1609936.55 frames. ], batch size: 19, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:17,611 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7139, 1.5617, 5.7841, 2.2465, 5.1510, 4.8661, 5.3825, 5.2318], + device='cuda:3'), covar=tensor([0.0455, 0.4575, 0.0290, 0.3086, 0.0910, 0.0763, 0.0478, 0.0466], + device='cuda:3'), in_proj_covar=tensor([0.0432, 0.0549, 0.0522, 0.0497, 0.0564, 0.0474, 0.0474, 0.0522], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 05:55:42,165 INFO [train.py:901] (3/4) Epoch 8, batch 5050, loss[loss=0.266, simple_loss=0.335, pruned_loss=0.0985, over 7971.00 frames. ], tot_loss[loss=0.255, simple_loss=0.325, pruned_loss=0.0925, over 1612171.43 frames. ], batch size: 21, lr: 9.51e-03, grad_scale: 8.0 +2023-02-06 05:55:46,809 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.771e+02 3.459e+02 4.924e+02 1.310e+03, threshold=6.919e+02, percent-clipped=9.0 +2023-02-06 05:55:55,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2005, 1.5630, 1.5463, 1.4523, 1.0561, 1.3762, 1.6582, 1.7223], + device='cuda:3'), covar=tensor([0.0497, 0.1172, 0.1863, 0.1325, 0.0615, 0.1520, 0.0728, 0.0559], + device='cuda:3'), in_proj_covar=tensor([0.0107, 0.0160, 0.0200, 0.0163, 0.0111, 0.0168, 0.0121, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 05:56:07,702 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 05:56:17,393 INFO [train.py:901] (3/4) Epoch 8, batch 5100, loss[loss=0.2335, simple_loss=0.2972, pruned_loss=0.08485, over 7434.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3233, pruned_loss=0.09167, over 1610220.17 frames. ], batch size: 17, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:52,941 INFO [train.py:901] (3/4) Epoch 8, batch 5150, loss[loss=0.2203, simple_loss=0.2963, pruned_loss=0.07221, over 7795.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3235, pruned_loss=0.09149, over 1613773.64 frames. ], batch size: 20, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:56:57,120 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.584e+02 3.190e+02 4.018e+02 8.337e+02, threshold=6.381e+02, percent-clipped=2.0 +2023-02-06 05:57:06,179 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:07,616 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1760, 1.0934, 1.1380, 1.0423, 0.8026, 1.2680, 0.0530, 0.9251], + device='cuda:3'), covar=tensor([0.3397, 0.2710, 0.0909, 0.1861, 0.5325, 0.0865, 0.4352, 0.2145], + device='cuda:3'), in_proj_covar=tensor([0.0153, 0.0151, 0.0087, 0.0200, 0.0238, 0.0092, 0.0160, 0.0153], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 05:57:08,917 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7194, 1.8592, 5.7919, 2.1277, 5.1541, 4.9236, 5.3962, 5.2445], + device='cuda:3'), covar=tensor([0.0392, 0.3677, 0.0300, 0.3073, 0.0936, 0.0721, 0.0434, 0.0408], + device='cuda:3'), in_proj_covar=tensor([0.0428, 0.0541, 0.0515, 0.0495, 0.0559, 0.0468, 0.0469, 0.0518], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 05:57:13,701 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:27,554 INFO [train.py:901] (3/4) Epoch 8, batch 5200, loss[loss=0.2518, simple_loss=0.3157, pruned_loss=0.09399, over 7936.00 frames. ], tot_loss[loss=0.2549, simple_loss=0.3248, pruned_loss=0.09251, over 1612659.17 frames. ], batch size: 20, lr: 9.50e-03, grad_scale: 8.0 +2023-02-06 05:57:35,720 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=61794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:57:53,740 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=61819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:58:02,409 INFO [train.py:901] (3/4) Epoch 8, batch 5250, loss[loss=0.2449, simple_loss=0.3161, pruned_loss=0.08687, over 8199.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3246, pruned_loss=0.092, over 1616405.27 frames. ], batch size: 23, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:58:06,468 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.711e+02 3.309e+02 4.013e+02 1.150e+03, threshold=6.618e+02, percent-clipped=3.0 +2023-02-06 05:58:07,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5062, 2.4866, 1.6263, 2.0662, 1.9224, 1.3288, 1.9460, 1.9023], + device='cuda:3'), covar=tensor([0.1143, 0.0327, 0.1128, 0.0493, 0.0631, 0.1399, 0.0790, 0.0811], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0231, 0.0308, 0.0293, 0.0302, 0.0308, 0.0334, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 05:58:07,802 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 05:58:10,096 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3303, 1.9427, 2.9267, 2.3355, 2.5485, 2.0686, 1.6414, 1.2360], + device='cuda:3'), covar=tensor([0.2754, 0.2987, 0.0817, 0.1762, 0.1366, 0.1618, 0.1400, 0.3119], + device='cuda:3'), in_proj_covar=tensor([0.0851, 0.0807, 0.0681, 0.0793, 0.0892, 0.0741, 0.0675, 0.0730], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 05:58:37,170 INFO [train.py:901] (3/4) Epoch 8, batch 5300, loss[loss=0.2705, simple_loss=0.3442, pruned_loss=0.09844, over 8474.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.324, pruned_loss=0.09221, over 1612544.47 frames. ], batch size: 25, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,098 INFO [train.py:901] (3/4) Epoch 8, batch 5350, loss[loss=0.2136, simple_loss=0.2943, pruned_loss=0.06649, over 8243.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3253, pruned_loss=0.09308, over 1610824.23 frames. ], batch size: 22, lr: 9.49e-03, grad_scale: 8.0 +2023-02-06 05:59:12,261 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=61932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 05:59:16,956 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.785e+02 2.596e+02 3.249e+02 3.983e+02 1.109e+03, threshold=6.498e+02, percent-clipped=6.0 +2023-02-06 05:59:47,805 INFO [train.py:901] (3/4) Epoch 8, batch 5400, loss[loss=0.2441, simple_loss=0.3296, pruned_loss=0.07929, over 8470.00 frames. ], tot_loss[loss=0.2546, simple_loss=0.3248, pruned_loss=0.0922, over 1612366.75 frames. ], batch size: 25, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:23,965 INFO [train.py:901] (3/4) Epoch 8, batch 5450, loss[loss=0.2657, simple_loss=0.3313, pruned_loss=0.1, over 8254.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3249, pruned_loss=0.09205, over 1614876.74 frames. ], batch size: 22, lr: 9.48e-03, grad_scale: 8.0 +2023-02-06 06:00:28,689 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.625e+02 3.240e+02 4.068e+02 8.471e+02, threshold=6.479e+02, percent-clipped=5.0 +2023-02-06 06:00:33,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3568, 1.8995, 2.9865, 2.3989, 2.5542, 2.0953, 1.6032, 1.3354], + device='cuda:3'), covar=tensor([0.2835, 0.3284, 0.0836, 0.1914, 0.1458, 0.1717, 0.1358, 0.3362], + device='cuda:3'), in_proj_covar=tensor([0.0847, 0.0805, 0.0679, 0.0789, 0.0887, 0.0739, 0.0669, 0.0728], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:00:49,039 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5469, 1.5049, 1.6887, 1.3919, 0.9975, 1.8146, 0.1006, 1.2248], + device='cuda:3'), covar=tensor([0.2569, 0.2125, 0.0629, 0.1676, 0.4964, 0.0565, 0.3640, 0.1830], + device='cuda:3'), in_proj_covar=tensor([0.0153, 0.0152, 0.0087, 0.0200, 0.0242, 0.0092, 0.0160, 0.0151], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 06:01:00,412 INFO [train.py:901] (3/4) Epoch 8, batch 5500, loss[loss=0.2072, simple_loss=0.2809, pruned_loss=0.06677, over 7527.00 frames. ], tot_loss[loss=0.2531, simple_loss=0.3235, pruned_loss=0.09139, over 1613861.71 frames. ], batch size: 18, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:01,792 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 06:01:08,468 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:16,547 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:01:34,829 INFO [train.py:901] (3/4) Epoch 8, batch 5550, loss[loss=0.2264, simple_loss=0.3073, pruned_loss=0.07275, over 8193.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.325, pruned_loss=0.09138, over 1622760.53 frames. ], batch size: 23, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:01:38,603 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.724e+02 3.277e+02 4.222e+02 9.983e+02, threshold=6.553e+02, percent-clipped=5.0 +2023-02-06 06:01:40,112 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:09,239 INFO [train.py:901] (3/4) Epoch 8, batch 5600, loss[loss=0.2189, simple_loss=0.285, pruned_loss=0.07636, over 7442.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3258, pruned_loss=0.09236, over 1622120.10 frames. ], batch size: 17, lr: 9.47e-03, grad_scale: 8.0 +2023-02-06 06:02:19,446 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 06:02:27,947 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:35,378 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62220.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:02:44,114 INFO [train.py:901] (3/4) Epoch 8, batch 5650, loss[loss=0.2632, simple_loss=0.3429, pruned_loss=0.09171, over 8521.00 frames. ], tot_loss[loss=0.2545, simple_loss=0.3251, pruned_loss=0.09192, over 1621999.64 frames. ], batch size: 39, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:02:48,201 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 2.893e+02 3.442e+02 4.058e+02 7.819e+02, threshold=6.884e+02, percent-clipped=2.0 +2023-02-06 06:02:58,226 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 06:03:03,307 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 06:03:13,678 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3528, 2.7789, 1.8105, 2.1386, 2.2337, 1.4025, 1.9431, 1.9903], + device='cuda:3'), covar=tensor([0.1161, 0.0264, 0.0905, 0.0522, 0.0541, 0.1284, 0.0837, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0342, 0.0231, 0.0309, 0.0295, 0.0305, 0.0316, 0.0339, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:03:14,928 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:19,746 INFO [train.py:901] (3/4) Epoch 8, batch 5700, loss[loss=0.2317, simple_loss=0.3019, pruned_loss=0.08069, over 7984.00 frames. ], tot_loss[loss=0.255, simple_loss=0.326, pruned_loss=0.09196, over 1625932.30 frames. ], batch size: 21, lr: 9.46e-03, grad_scale: 8.0 +2023-02-06 06:03:26,021 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:03:32,321 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 06:03:53,772 INFO [train.py:901] (3/4) Epoch 8, batch 5750, loss[loss=0.2785, simple_loss=0.3435, pruned_loss=0.1067, over 8495.00 frames. ], tot_loss[loss=0.2544, simple_loss=0.3249, pruned_loss=0.0919, over 1620682.85 frames. ], batch size: 26, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:03:58,446 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.702e+02 3.342e+02 4.214e+02 1.406e+03, threshold=6.684e+02, percent-clipped=3.0 +2023-02-06 06:04:07,821 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 06:04:28,273 INFO [train.py:901] (3/4) Epoch 8, batch 5800, loss[loss=0.251, simple_loss=0.3033, pruned_loss=0.09933, over 7426.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3249, pruned_loss=0.09175, over 1621268.67 frames. ], batch size: 17, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:04:35,223 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:04:48,168 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:04,097 INFO [train.py:901] (3/4) Epoch 8, batch 5850, loss[loss=0.2212, simple_loss=0.3006, pruned_loss=0.07091, over 8138.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3239, pruned_loss=0.09093, over 1620208.75 frames. ], batch size: 22, lr: 9.45e-03, grad_scale: 8.0 +2023-02-06 06:05:06,527 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 06:05:08,203 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.970e+02 2.690e+02 3.286e+02 4.000e+02 6.740e+02, threshold=6.571e+02, percent-clipped=1.0 +2023-02-06 06:05:20,411 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0821, 1.2760, 1.1992, 0.3455, 1.2323, 1.0137, 0.1273, 1.1083], + device='cuda:3'), covar=tensor([0.0187, 0.0174, 0.0148, 0.0297, 0.0187, 0.0530, 0.0383, 0.0152], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0279, 0.0226, 0.0336, 0.0267, 0.0425, 0.0328, 0.0305], + device='cuda:3'), out_proj_covar=tensor([1.0555e-04, 8.2629e-05, 6.6702e-05, 1.0024e-04, 8.0871e-05, 1.3890e-04, + 1.0028e-04, 9.1450e-05], device='cuda:3') +2023-02-06 06:05:27,195 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:34,614 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62476.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:38,457 INFO [train.py:901] (3/4) Epoch 8, batch 5900, loss[loss=0.2686, simple_loss=0.3371, pruned_loss=0.1, over 8322.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3236, pruned_loss=0.09104, over 1616161.44 frames. ], batch size: 26, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:05:39,871 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:44,020 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:05:52,029 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:06:13,414 INFO [train.py:901] (3/4) Epoch 8, batch 5950, loss[loss=0.1905, simple_loss=0.2701, pruned_loss=0.05547, over 7799.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3232, pruned_loss=0.09018, over 1619122.64 frames. ], batch size: 19, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:17,413 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.881e+02 2.652e+02 3.220e+02 3.904e+02 8.315e+02, threshold=6.439e+02, percent-clipped=2.0 +2023-02-06 06:06:34,802 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:06:47,821 INFO [train.py:901] (3/4) Epoch 8, batch 6000, loss[loss=0.2655, simple_loss=0.3305, pruned_loss=0.1002, over 8237.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3237, pruned_loss=0.09091, over 1618992.11 frames. ], batch size: 22, lr: 9.44e-03, grad_scale: 8.0 +2023-02-06 06:06:47,822 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 06:06:56,083 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2890, 2.1585, 1.5099, 1.8462, 1.8148, 1.2620, 1.5891, 1.6110], + device='cuda:3'), covar=tensor([0.1056, 0.0302, 0.1002, 0.0468, 0.0633, 0.1234, 0.0867, 0.0796], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0233, 0.0309, 0.0293, 0.0307, 0.0315, 0.0338, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:07:00,012 INFO [train.py:935] (3/4) Epoch 8, validation: loss=0.1996, simple_loss=0.2985, pruned_loss=0.05037, over 944034.00 frames. +2023-02-06 06:07:00,013 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 06:07:12,285 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62599.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:14,967 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:33,819 INFO [train.py:901] (3/4) Epoch 8, batch 6050, loss[loss=0.2044, simple_loss=0.2819, pruned_loss=0.06343, over 7814.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3234, pruned_loss=0.09073, over 1617194.28 frames. ], batch size: 20, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:07:35,913 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:07:37,858 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.575e+02 3.268e+02 4.071e+02 9.720e+02, threshold=6.536e+02, percent-clipped=3.0 +2023-02-06 06:07:44,180 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:02,426 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:08,753 INFO [train.py:901] (3/4) Epoch 8, batch 6100, loss[loss=0.2032, simple_loss=0.2759, pruned_loss=0.06522, over 7550.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3239, pruned_loss=0.09083, over 1619395.82 frames. ], batch size: 18, lr: 9.43e-03, grad_scale: 8.0 +2023-02-06 06:08:10,161 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:37,242 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 06:08:43,081 INFO [train.py:901] (3/4) Epoch 8, batch 6150, loss[loss=0.2624, simple_loss=0.3386, pruned_loss=0.09311, over 8736.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3231, pruned_loss=0.09088, over 1617839.34 frames. ], batch size: 30, lr: 9.42e-03, grad_scale: 8.0 +2023-02-06 06:08:47,077 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.667e+02 3.544e+02 4.037e+02 8.376e+02, threshold=7.087e+02, percent-clipped=5.0 +2023-02-06 06:08:55,088 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:08:57,067 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:17,711 INFO [train.py:901] (3/4) Epoch 8, batch 6200, loss[loss=0.2289, simple_loss=0.3144, pruned_loss=0.07165, over 8342.00 frames. ], tot_loss[loss=0.2526, simple_loss=0.3229, pruned_loss=0.09115, over 1613531.81 frames. ], batch size: 25, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:23,752 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=62791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:09:52,742 INFO [train.py:901] (3/4) Epoch 8, batch 6250, loss[loss=0.217, simple_loss=0.288, pruned_loss=0.07302, over 7524.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3235, pruned_loss=0.09183, over 1610789.18 frames. ], batch size: 18, lr: 9.42e-03, grad_scale: 16.0 +2023-02-06 06:09:56,741 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.905e+02 2.717e+02 3.222e+02 4.596e+02 9.217e+02, threshold=6.445e+02, percent-clipped=3.0 +2023-02-06 06:10:08,419 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=62855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:16,912 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=62868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:23,670 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9903, 3.9937, 2.4128, 2.6466, 2.9990, 2.2838, 2.8289, 2.8293], + device='cuda:3'), covar=tensor([0.1341, 0.0226, 0.0794, 0.0700, 0.0584, 0.0970, 0.0857, 0.0907], + device='cuda:3'), in_proj_covar=tensor([0.0339, 0.0233, 0.0305, 0.0288, 0.0300, 0.0311, 0.0332, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:10:24,988 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=62880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:10:26,044 INFO [train.py:901] (3/4) Epoch 8, batch 6300, loss[loss=0.2809, simple_loss=0.3477, pruned_loss=0.107, over 8022.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3238, pruned_loss=0.09187, over 1613486.45 frames. ], batch size: 22, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:10:31,573 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9440, 2.0363, 1.7510, 2.6474, 1.2090, 1.5268, 1.7662, 2.1788], + device='cuda:3'), covar=tensor([0.0874, 0.1004, 0.1255, 0.0419, 0.1235, 0.1574, 0.1028, 0.0782], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0226, 0.0267, 0.0215, 0.0227, 0.0263, 0.0264, 0.0232], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 06:10:44,100 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:01,294 INFO [train.py:901] (3/4) Epoch 8, batch 6350, loss[loss=0.2881, simple_loss=0.3595, pruned_loss=0.1084, over 8665.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3235, pruned_loss=0.09167, over 1613406.56 frames. ], batch size: 34, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:05,349 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.824e+02 3.504e+02 4.161e+02 7.437e+02, threshold=7.007e+02, percent-clipped=2.0 +2023-02-06 06:11:12,102 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=62947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:11:14,900 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9173, 1.9576, 2.2917, 1.8128, 1.2520, 2.3115, 0.3716, 1.3972], + device='cuda:3'), covar=tensor([0.2439, 0.1656, 0.0471, 0.2083, 0.5054, 0.0497, 0.3903, 0.2146], + device='cuda:3'), in_proj_covar=tensor([0.0148, 0.0150, 0.0088, 0.0197, 0.0236, 0.0092, 0.0156, 0.0154], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:11:17,753 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 06:11:35,238 INFO [train.py:901] (3/4) Epoch 8, batch 6400, loss[loss=0.2274, simple_loss=0.3124, pruned_loss=0.07123, over 8298.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3232, pruned_loss=0.09124, over 1615277.81 frames. ], batch size: 23, lr: 9.41e-03, grad_scale: 16.0 +2023-02-06 06:11:52,116 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:03,499 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:07,442 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:09,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:10,068 INFO [train.py:901] (3/4) Epoch 8, batch 6450, loss[loss=0.2733, simple_loss=0.3393, pruned_loss=0.1036, over 8143.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3226, pruned_loss=0.09107, over 1611362.69 frames. ], batch size: 22, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:12:13,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6325, 2.2470, 4.3771, 1.1687, 3.1012, 2.1718, 1.6320, 2.4787], + device='cuda:3'), covar=tensor([0.1571, 0.1892, 0.0568, 0.3537, 0.1220, 0.2369, 0.1660, 0.2342], + device='cuda:3'), in_proj_covar=tensor([0.0482, 0.0490, 0.0538, 0.0569, 0.0604, 0.0537, 0.0459, 0.0606], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0003], + device='cuda:3') +2023-02-06 06:12:14,125 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.060e+02 2.983e+02 3.820e+02 5.218e+02 9.633e+02, threshold=7.640e+02, percent-clipped=4.0 +2023-02-06 06:12:31,361 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:12:45,220 INFO [train.py:901] (3/4) Epoch 8, batch 6500, loss[loss=0.2825, simple_loss=0.3487, pruned_loss=0.1082, over 8696.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3228, pruned_loss=0.09102, over 1607875.36 frames. ], batch size: 34, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:03,586 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.16 vs. limit=5.0 +2023-02-06 06:13:14,197 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:20,107 INFO [train.py:901] (3/4) Epoch 8, batch 6550, loss[loss=0.2168, simple_loss=0.2954, pruned_loss=0.06913, over 8086.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3239, pruned_loss=0.09181, over 1607636.51 frames. ], batch size: 21, lr: 9.40e-03, grad_scale: 16.0 +2023-02-06 06:13:22,295 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:24,225 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.645e+02 3.116e+02 3.905e+02 9.747e+02, threshold=6.232e+02, percent-clipped=3.0 +2023-02-06 06:13:27,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:27,810 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4177, 1.6857, 2.8296, 1.2011, 2.1661, 1.8492, 1.4181, 1.7722], + device='cuda:3'), covar=tensor([0.1458, 0.1889, 0.0622, 0.3326, 0.1149, 0.2309, 0.1543, 0.1831], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0487, 0.0533, 0.0566, 0.0600, 0.0535, 0.0458, 0.0603], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:13:29,203 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3077, 2.5260, 1.8124, 2.0118, 2.0665, 1.4483, 1.7070, 2.0064], + device='cuda:3'), covar=tensor([0.1234, 0.0310, 0.0874, 0.0488, 0.0631, 0.1235, 0.0941, 0.0721], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0235, 0.0308, 0.0294, 0.0304, 0.0315, 0.0337, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:13:31,960 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:13:46,259 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2241, 2.8718, 2.9126, 1.7698, 1.6546, 3.2133, 0.6347, 2.0266], + device='cuda:3'), covar=tensor([0.1586, 0.1462, 0.1191, 0.2979, 0.5694, 0.0472, 0.4994, 0.2324], + device='cuda:3'), in_proj_covar=tensor([0.0153, 0.0155, 0.0091, 0.0204, 0.0243, 0.0095, 0.0159, 0.0158], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 06:13:49,427 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 06:13:54,668 INFO [train.py:901] (3/4) Epoch 8, batch 6600, loss[loss=0.2413, simple_loss=0.3125, pruned_loss=0.08501, over 8194.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3241, pruned_loss=0.09177, over 1610895.11 frames. ], batch size: 23, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:13:56,259 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2879, 1.8750, 2.8918, 2.2990, 2.5113, 2.0758, 1.6481, 1.1637], + device='cuda:3'), covar=tensor([0.3139, 0.3362, 0.0859, 0.2025, 0.1732, 0.1684, 0.1448, 0.3544], + device='cuda:3'), in_proj_covar=tensor([0.0845, 0.0806, 0.0680, 0.0792, 0.0892, 0.0741, 0.0673, 0.0727], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:14:07,399 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 06:14:25,322 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.51 vs. limit=5.0 +2023-02-06 06:14:29,753 INFO [train.py:901] (3/4) Epoch 8, batch 6650, loss[loss=0.2332, simple_loss=0.3078, pruned_loss=0.07936, over 8106.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.325, pruned_loss=0.09216, over 1616507.80 frames. ], batch size: 23, lr: 9.39e-03, grad_scale: 16.0 +2023-02-06 06:14:33,644 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.260e+02 2.943e+02 3.528e+02 4.449e+02 1.178e+03, threshold=7.055e+02, percent-clipped=8.0 +2023-02-06 06:14:34,518 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:14:42,442 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:01,179 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:03,732 INFO [train.py:901] (3/4) Epoch 8, batch 6700, loss[loss=0.2, simple_loss=0.2862, pruned_loss=0.0569, over 8278.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3258, pruned_loss=0.09268, over 1618229.89 frames. ], batch size: 23, lr: 9.38e-03, grad_scale: 16.0 +2023-02-06 06:15:19,256 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:29,352 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:15:34,397 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 06:15:38,596 INFO [train.py:901] (3/4) Epoch 8, batch 6750, loss[loss=0.2137, simple_loss=0.2831, pruned_loss=0.07217, over 8283.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3244, pruned_loss=0.09169, over 1619122.79 frames. ], batch size: 23, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:15:43,298 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.603e+02 3.068e+02 3.707e+02 1.416e+03, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 06:15:46,216 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:13,022 INFO [train.py:901] (3/4) Epoch 8, batch 6800, loss[loss=0.2444, simple_loss=0.3192, pruned_loss=0.08478, over 8129.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3238, pruned_loss=0.09134, over 1618178.57 frames. ], batch size: 22, lr: 9.38e-03, grad_scale: 8.0 +2023-02-06 06:16:19,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1471, 4.0858, 3.7041, 1.8488, 3.6517, 3.6429, 3.7028, 3.1850], + device='cuda:3'), covar=tensor([0.0815, 0.0600, 0.0987, 0.4686, 0.0943, 0.0988, 0.1276, 0.1059], + device='cuda:3'), in_proj_covar=tensor([0.0436, 0.0343, 0.0360, 0.0451, 0.0358, 0.0335, 0.0349, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:16:20,992 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 06:16:24,486 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63399.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:42,724 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:47,925 INFO [train.py:901] (3/4) Epoch 8, batch 6850, loss[loss=0.228, simple_loss=0.3082, pruned_loss=0.07391, over 8240.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.323, pruned_loss=0.09085, over 1616905.79 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:16:52,299 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:52,777 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.669e+02 3.418e+02 4.059e+02 7.847e+02, threshold=6.836e+02, percent-clipped=4.0 +2023-02-06 06:16:53,667 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:16:57,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0255, 1.7217, 1.2747, 1.6328, 1.3901, 1.0959, 1.3073, 1.3577], + device='cuda:3'), covar=tensor([0.0923, 0.0403, 0.1026, 0.0420, 0.0568, 0.1198, 0.0721, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0237, 0.0309, 0.0295, 0.0306, 0.0315, 0.0336, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:17:11,111 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 06:17:22,530 INFO [train.py:901] (3/4) Epoch 8, batch 6900, loss[loss=0.2297, simple_loss=0.2988, pruned_loss=0.08028, over 7701.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3239, pruned_loss=0.09178, over 1615601.32 frames. ], batch size: 18, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:17:39,554 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:45,254 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.71 vs. limit=5.0 +2023-02-06 06:17:58,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:17:58,757 INFO [train.py:901] (3/4) Epoch 8, batch 6950, loss[loss=0.2182, simple_loss=0.3008, pruned_loss=0.06779, over 8022.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3221, pruned_loss=0.09063, over 1612422.01 frames. ], batch size: 22, lr: 9.37e-03, grad_scale: 8.0 +2023-02-06 06:18:03,563 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.671e+02 3.369e+02 4.495e+02 9.890e+02, threshold=6.738e+02, percent-clipped=4.0 +2023-02-06 06:18:18,590 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 06:18:28,136 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4572, 1.9269, 3.2494, 1.1397, 2.3135, 1.7297, 1.5227, 1.8470], + device='cuda:3'), covar=tensor([0.1537, 0.1782, 0.0664, 0.3429, 0.1292, 0.2617, 0.1598, 0.2334], + device='cuda:3'), in_proj_covar=tensor([0.0478, 0.0483, 0.0530, 0.0563, 0.0597, 0.0538, 0.0457, 0.0596], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:18:32,009 INFO [train.py:901] (3/4) Epoch 8, batch 7000, loss[loss=0.2372, simple_loss=0.3251, pruned_loss=0.07468, over 8038.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3229, pruned_loss=0.09084, over 1614295.00 frames. ], batch size: 22, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:18:32,810 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:36,325 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:48,868 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:18:55,138 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4463, 1.4031, 3.0734, 1.3329, 2.0989, 3.2775, 3.3285, 2.7847], + device='cuda:3'), covar=tensor([0.1334, 0.1687, 0.0402, 0.2058, 0.1012, 0.0320, 0.0499, 0.0781], + device='cuda:3'), in_proj_covar=tensor([0.0250, 0.0279, 0.0241, 0.0268, 0.0253, 0.0224, 0.0292, 0.0280], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 06:19:08,076 INFO [train.py:901] (3/4) Epoch 8, batch 7050, loss[loss=0.2816, simple_loss=0.3407, pruned_loss=0.1112, over 7222.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3234, pruned_loss=0.0907, over 1615370.55 frames. ], batch size: 71, lr: 9.36e-03, grad_scale: 8.0 +2023-02-06 06:19:12,575 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.720e+02 3.242e+02 3.930e+02 7.648e+02, threshold=6.484e+02, percent-clipped=3.0 +2023-02-06 06:19:42,443 INFO [train.py:901] (3/4) Epoch 8, batch 7100, loss[loss=0.2635, simple_loss=0.3373, pruned_loss=0.09484, over 8503.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3231, pruned_loss=0.09018, over 1618419.30 frames. ], batch size: 28, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:19:52,879 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5223, 2.0363, 3.4057, 1.2442, 2.4173, 1.9357, 1.6213, 2.1732], + device='cuda:3'), covar=tensor([0.1590, 0.1927, 0.0685, 0.3676, 0.1405, 0.2626, 0.1640, 0.2191], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0485, 0.0529, 0.0565, 0.0601, 0.0541, 0.0461, 0.0600], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:19:53,533 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:17,191 INFO [train.py:901] (3/4) Epoch 8, batch 7150, loss[loss=0.2466, simple_loss=0.3125, pruned_loss=0.09032, over 8285.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3229, pruned_loss=0.09024, over 1617403.56 frames. ], batch size: 23, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:21,748 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.778e+02 3.549e+02 4.516e+02 1.097e+03, threshold=7.098e+02, percent-clipped=7.0 +2023-02-06 06:20:51,749 INFO [train.py:901] (3/4) Epoch 8, batch 7200, loss[loss=0.2841, simple_loss=0.3496, pruned_loss=0.1093, over 6847.00 frames. ], tot_loss[loss=0.2527, simple_loss=0.3236, pruned_loss=0.09087, over 1613804.75 frames. ], batch size: 71, lr: 9.35e-03, grad_scale: 8.0 +2023-02-06 06:20:51,835 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:20:53,157 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:15,508 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3404, 1.1172, 1.3092, 1.0767, 0.7999, 1.1504, 1.1165, 1.1206], + device='cuda:3'), covar=tensor([0.0557, 0.1446, 0.1887, 0.1539, 0.0628, 0.1724, 0.0767, 0.0686], + device='cuda:3'), in_proj_covar=tensor([0.0107, 0.0161, 0.0198, 0.0164, 0.0111, 0.0168, 0.0122, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 06:21:23,973 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:21:26,385 INFO [train.py:901] (3/4) Epoch 8, batch 7250, loss[loss=0.3124, simple_loss=0.372, pruned_loss=0.1264, over 8558.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3238, pruned_loss=0.09173, over 1609842.02 frames. ], batch size: 31, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:21:30,976 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.694e+02 3.202e+02 4.148e+02 8.009e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 06:21:35,245 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 06:21:47,993 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:00,780 INFO [train.py:901] (3/4) Epoch 8, batch 7300, loss[loss=0.2862, simple_loss=0.3517, pruned_loss=0.1103, over 8526.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3233, pruned_loss=0.09114, over 1609094.21 frames. ], batch size: 28, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:12,942 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:14,342 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=63899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:36,711 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:37,304 INFO [train.py:901] (3/4) Epoch 8, batch 7350, loss[loss=0.2881, simple_loss=0.3651, pruned_loss=0.1056, over 8325.00 frames. ], tot_loss[loss=0.2523, simple_loss=0.3229, pruned_loss=0.09082, over 1607226.36 frames. ], batch size: 25, lr: 9.34e-03, grad_scale: 8.0 +2023-02-06 06:22:38,888 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=63934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:42,245 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.551e+02 3.183e+02 3.767e+02 5.416e+02, threshold=6.365e+02, percent-clipped=0.0 +2023-02-06 06:22:48,931 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=63948.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:22:50,009 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.43 vs. limit=5.0 +2023-02-06 06:22:53,096 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3765, 1.9466, 4.5318, 2.0177, 3.9247, 3.8488, 4.0604, 4.0024], + device='cuda:3'), covar=tensor([0.0450, 0.3236, 0.0439, 0.2833, 0.1011, 0.0688, 0.0469, 0.0545], + device='cuda:3'), in_proj_covar=tensor([0.0424, 0.0539, 0.0527, 0.0490, 0.0553, 0.0467, 0.0464, 0.0523], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:22:53,935 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=63954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:05,953 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 06:23:10,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=63979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:12,554 INFO [train.py:901] (3/4) Epoch 8, batch 7400, loss[loss=0.2912, simple_loss=0.3489, pruned_loss=0.1167, over 8031.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3231, pruned_loss=0.09082, over 1607200.51 frames. ], batch size: 22, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:24,827 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 06:23:48,430 INFO [train.py:901] (3/4) Epoch 8, batch 7450, loss[loss=0.2662, simple_loss=0.3469, pruned_loss=0.09277, over 8448.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3235, pruned_loss=0.09117, over 1608756.90 frames. ], batch size: 27, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:23:53,158 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.827e+02 3.358e+02 3.935e+02 9.777e+02, threshold=6.715e+02, percent-clipped=5.0 +2023-02-06 06:23:54,693 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:23:58,118 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:05,490 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 06:24:10,191 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64063.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:24:23,166 INFO [train.py:901] (3/4) Epoch 8, batch 7500, loss[loss=0.2254, simple_loss=0.3009, pruned_loss=0.07498, over 8079.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3229, pruned_loss=0.0905, over 1612404.33 frames. ], batch size: 21, lr: 9.33e-03, grad_scale: 8.0 +2023-02-06 06:24:33,030 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([6.0630, 1.7135, 6.0718, 2.1528, 5.4736, 5.1015, 5.5836, 5.5371], + device='cuda:3'), covar=tensor([0.0301, 0.3998, 0.0240, 0.2859, 0.0857, 0.0633, 0.0333, 0.0390], + device='cuda:3'), in_proj_covar=tensor([0.0420, 0.0538, 0.0528, 0.0489, 0.0552, 0.0467, 0.0466, 0.0524], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:24:57,458 INFO [train.py:901] (3/4) Epoch 8, batch 7550, loss[loss=0.2975, simple_loss=0.3626, pruned_loss=0.1162, over 8501.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3239, pruned_loss=0.09162, over 1610502.30 frames. ], batch size: 49, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:02,136 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.846e+02 3.017e+02 3.905e+02 4.969e+02 7.546e+02, threshold=7.810e+02, percent-clipped=1.0 +2023-02-06 06:25:06,402 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4480, 1.9908, 3.0928, 2.4523, 2.5987, 2.0983, 1.6878, 1.1800], + device='cuda:3'), covar=tensor([0.2636, 0.3120, 0.0808, 0.1844, 0.1569, 0.1587, 0.1377, 0.3421], + device='cuda:3'), in_proj_covar=tensor([0.0846, 0.0806, 0.0688, 0.0801, 0.0892, 0.0746, 0.0677, 0.0724], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:25:11,679 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:13,083 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64155.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:24,861 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64172.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:28,964 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:30,997 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:32,166 INFO [train.py:901] (3/4) Epoch 8, batch 7600, loss[loss=0.2234, simple_loss=0.3091, pruned_loss=0.06886, over 8106.00 frames. ], tot_loss[loss=0.2536, simple_loss=0.3241, pruned_loss=0.09154, over 1612765.85 frames. ], batch size: 23, lr: 9.32e-03, grad_scale: 8.0 +2023-02-06 06:25:40,512 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8654, 1.9953, 1.7514, 2.5621, 1.2376, 1.4125, 1.7679, 2.0644], + device='cuda:3'), covar=tensor([0.0881, 0.1108, 0.1191, 0.0485, 0.1310, 0.1745, 0.1103, 0.0884], + device='cuda:3'), in_proj_covar=tensor([0.0250, 0.0228, 0.0270, 0.0215, 0.0229, 0.0264, 0.0268, 0.0232], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 06:25:49,066 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:25:58,615 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 06:26:06,999 INFO [train.py:901] (3/4) Epoch 8, batch 7650, loss[loss=0.2959, simple_loss=0.3515, pruned_loss=0.1202, over 8646.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.3242, pruned_loss=0.09217, over 1611294.18 frames. ], batch size: 39, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:11,829 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.778e+02 3.467e+02 5.154e+02 1.113e+03, threshold=6.933e+02, percent-clipped=3.0 +2023-02-06 06:26:19,377 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:38,168 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:40,887 INFO [train.py:901] (3/4) Epoch 8, batch 7700, loss[loss=0.233, simple_loss=0.3234, pruned_loss=0.07125, over 8193.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3242, pruned_loss=0.09177, over 1614681.38 frames. ], batch size: 23, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:26:45,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:26:56,492 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:08,125 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,000 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:10,463 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 06:27:13,360 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:16,536 INFO [train.py:901] (3/4) Epoch 8, batch 7750, loss[loss=0.2055, simple_loss=0.2916, pruned_loss=0.05974, over 8511.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.324, pruned_loss=0.09136, over 1614594.49 frames. ], batch size: 26, lr: 9.31e-03, grad_scale: 8.0 +2023-02-06 06:27:21,028 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.516e+02 3.070e+02 3.996e+02 6.859e+02, threshold=6.139e+02, percent-clipped=0.0 +2023-02-06 06:27:25,294 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:38,606 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:43,970 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4772, 2.0065, 2.0161, 1.1197, 2.1012, 1.3464, 0.5046, 1.7139], + device='cuda:3'), covar=tensor([0.0299, 0.0132, 0.0137, 0.0300, 0.0179, 0.0527, 0.0453, 0.0133], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0284, 0.0236, 0.0346, 0.0273, 0.0441, 0.0336, 0.0317], + device='cuda:3'), out_proj_covar=tensor([1.0808e-04, 8.3007e-05, 6.9521e-05, 1.0235e-04, 8.1936e-05, 1.4384e-04, + 1.0169e-04, 9.4832e-05], device='cuda:3') +2023-02-06 06:27:51,131 INFO [train.py:901] (3/4) Epoch 8, batch 7800, loss[loss=0.2239, simple_loss=0.2926, pruned_loss=0.07761, over 7420.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3237, pruned_loss=0.0918, over 1609694.19 frames. ], batch size: 17, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:27:51,953 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8696, 1.0690, 3.0126, 0.9910, 2.5700, 2.5016, 2.7036, 2.6378], + device='cuda:3'), covar=tensor([0.0726, 0.3590, 0.0788, 0.3355, 0.1427, 0.0913, 0.0701, 0.0828], + device='cuda:3'), in_proj_covar=tensor([0.0428, 0.0544, 0.0536, 0.0496, 0.0560, 0.0473, 0.0471, 0.0528], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:27:53,244 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:54,024 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3582, 2.8430, 1.8507, 2.2508, 2.1300, 1.4485, 1.9477, 2.1323], + device='cuda:3'), covar=tensor([0.1434, 0.0344, 0.0988, 0.0638, 0.0596, 0.1353, 0.0873, 0.0837], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0233, 0.0311, 0.0297, 0.0306, 0.0317, 0.0339, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:27:58,800 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:27:59,137 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-06 06:28:25,439 INFO [train.py:901] (3/4) Epoch 8, batch 7850, loss[loss=0.2869, simple_loss=0.3362, pruned_loss=0.1188, over 8260.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3238, pruned_loss=0.09165, over 1611907.19 frames. ], batch size: 24, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:30,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.075e+02 2.873e+02 3.519e+02 4.505e+02 1.254e+03, threshold=7.037e+02, percent-clipped=6.0 +2023-02-06 06:28:58,104 INFO [train.py:901] (3/4) Epoch 8, batch 7900, loss[loss=0.2678, simple_loss=0.342, pruned_loss=0.09677, over 8253.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3248, pruned_loss=0.09254, over 1611405.43 frames. ], batch size: 24, lr: 9.30e-03, grad_scale: 8.0 +2023-02-06 06:28:58,914 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:10,210 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64500.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:12,952 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4811, 2.7861, 1.9309, 2.2294, 2.0907, 1.5134, 1.9139, 2.1036], + device='cuda:3'), covar=tensor([0.1212, 0.0305, 0.0910, 0.0524, 0.0561, 0.1221, 0.0849, 0.0708], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0236, 0.0310, 0.0295, 0.0305, 0.0318, 0.0339, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:29:17,969 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 06:29:32,351 INFO [train.py:901] (3/4) Epoch 8, batch 7950, loss[loss=0.2657, simple_loss=0.3391, pruned_loss=0.09609, over 8361.00 frames. ], tot_loss[loss=0.2538, simple_loss=0.3241, pruned_loss=0.09176, over 1609296.55 frames. ], batch size: 24, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:29:37,062 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.689e+02 3.383e+02 4.341e+02 8.251e+02, threshold=6.766e+02, percent-clipped=4.0 +2023-02-06 06:29:38,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0940, 1.7119, 1.3844, 1.6824, 1.4094, 1.1781, 1.3655, 1.4111], + device='cuda:3'), covar=tensor([0.0947, 0.0462, 0.1080, 0.0467, 0.0622, 0.1265, 0.0772, 0.0709], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0235, 0.0309, 0.0293, 0.0303, 0.0318, 0.0337, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:29:40,088 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:29:56,722 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64568.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:03,508 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:06,038 INFO [train.py:901] (3/4) Epoch 8, batch 8000, loss[loss=0.1976, simple_loss=0.2745, pruned_loss=0.06037, over 7655.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3231, pruned_loss=0.09177, over 1601001.47 frames. ], batch size: 19, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:14,258 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:20,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:23,072 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:30:39,924 INFO [train.py:901] (3/4) Epoch 8, batch 8050, loss[loss=0.2172, simple_loss=0.2807, pruned_loss=0.07689, over 7557.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3227, pruned_loss=0.09272, over 1585011.03 frames. ], batch size: 18, lr: 9.29e-03, grad_scale: 8.0 +2023-02-06 06:30:44,636 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.434e+02 2.955e+02 3.616e+02 6.730e+02, threshold=5.909e+02, percent-clipped=0.0 +2023-02-06 06:30:51,580 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:13,049 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 06:31:17,613 INFO [train.py:901] (3/4) Epoch 9, batch 0, loss[loss=0.2478, simple_loss=0.3181, pruned_loss=0.08873, over 8622.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3181, pruned_loss=0.08873, over 8622.00 frames. ], batch size: 31, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:31:17,613 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 06:31:28,850 INFO [train.py:935] (3/4) Epoch 9, validation: loss=0.1983, simple_loss=0.2974, pruned_loss=0.04961, over 944034.00 frames. +2023-02-06 06:31:28,851 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 06:31:29,662 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,200 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:35,991 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-06 06:31:43,422 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 06:31:56,883 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:31:58,425 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:02,443 INFO [train.py:901] (3/4) Epoch 9, batch 50, loss[loss=0.2464, simple_loss=0.3218, pruned_loss=0.08553, over 8452.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3225, pruned_loss=0.09218, over 365814.18 frames. ], batch size: 25, lr: 8.79e-03, grad_scale: 8.0 +2023-02-06 06:32:06,633 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:07,956 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3696, 1.9522, 3.2760, 1.1221, 2.4500, 1.7898, 1.6189, 2.0197], + device='cuda:3'), covar=tensor([0.2002, 0.2216, 0.0705, 0.4354, 0.1555, 0.3054, 0.1932, 0.2480], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0482, 0.0522, 0.0562, 0.0595, 0.0532, 0.0457, 0.0595], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:32:16,334 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 06:32:18,980 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.993e+02 2.818e+02 3.347e+02 4.122e+02 1.189e+03, threshold=6.695e+02, percent-clipped=9.0 +2023-02-06 06:32:30,212 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2657, 1.5650, 1.6071, 0.8990, 1.7128, 1.1993, 0.2336, 1.4324], + device='cuda:3'), covar=tensor([0.0266, 0.0179, 0.0169, 0.0264, 0.0200, 0.0580, 0.0448, 0.0144], + device='cuda:3'), in_proj_covar=tensor([0.0372, 0.0288, 0.0239, 0.0353, 0.0281, 0.0447, 0.0340, 0.0324], + device='cuda:3'), out_proj_covar=tensor([1.1223e-04, 8.4071e-05, 7.0473e-05, 1.0408e-04, 8.4099e-05, 1.4525e-04, + 1.0258e-04, 9.6760e-05], device='cuda:3') +2023-02-06 06:32:31,575 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:36,077 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:37,301 INFO [train.py:901] (3/4) Epoch 9, batch 100, loss[loss=0.2189, simple_loss=0.2966, pruned_loss=0.07055, over 8183.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3237, pruned_loss=0.09064, over 649705.35 frames. ], batch size: 23, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:32:41,538 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:32:42,074 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 06:32:49,745 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:11,715 INFO [train.py:901] (3/4) Epoch 9, batch 150, loss[loss=0.3216, simple_loss=0.3763, pruned_loss=0.1334, over 8590.00 frames. ], tot_loss[loss=0.2541, simple_loss=0.3248, pruned_loss=0.09168, over 864523.48 frames. ], batch size: 31, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:33:16,748 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:20,059 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:33:27,774 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.896e+02 2.577e+02 3.213e+02 3.848e+02 9.281e+02, threshold=6.425e+02, percent-clipped=3.0 +2023-02-06 06:33:45,633 INFO [train.py:901] (3/4) Epoch 9, batch 200, loss[loss=0.2953, simple_loss=0.3402, pruned_loss=0.1252, over 8080.00 frames. ], tot_loss[loss=0.2569, simple_loss=0.3266, pruned_loss=0.09359, over 1035685.55 frames. ], batch size: 21, lr: 8.78e-03, grad_scale: 8.0 +2023-02-06 06:34:21,132 INFO [train.py:901] (3/4) Epoch 9, batch 250, loss[loss=0.2725, simple_loss=0.3388, pruned_loss=0.1031, over 8490.00 frames. ], tot_loss[loss=0.255, simple_loss=0.3254, pruned_loss=0.09227, over 1166272.92 frames. ], batch size: 29, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:21,950 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7627, 1.3561, 2.7413, 1.1887, 2.0645, 2.9474, 2.9751, 2.5060], + device='cuda:3'), covar=tensor([0.1087, 0.1481, 0.0445, 0.2188, 0.0857, 0.0350, 0.0546, 0.0769], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0282, 0.0241, 0.0273, 0.0254, 0.0222, 0.0295, 0.0282], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 06:34:34,312 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 06:34:36,811 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.096e+02 2.841e+02 3.295e+02 4.179e+02 1.029e+03, threshold=6.590e+02, percent-clipped=5.0 +2023-02-06 06:34:39,027 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=64942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:42,836 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 06:34:44,920 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=64951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:34:54,272 INFO [train.py:901] (3/4) Epoch 9, batch 300, loss[loss=0.215, simple_loss=0.2879, pruned_loss=0.07102, over 8092.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3254, pruned_loss=0.09212, over 1272605.52 frames. ], batch size: 21, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:34:54,483 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=64965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:09,995 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 06:35:11,915 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=64990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:15,045 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=64994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:26,442 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:35:30,329 INFO [train.py:901] (3/4) Epoch 9, batch 350, loss[loss=0.2324, simple_loss=0.2952, pruned_loss=0.08478, over 7551.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.3244, pruned_loss=0.09168, over 1345956.69 frames. ], batch size: 18, lr: 8.77e-03, grad_scale: 8.0 +2023-02-06 06:35:41,464 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 06:35:46,488 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.892e+02 2.570e+02 3.183e+02 3.796e+02 1.000e+03, threshold=6.367e+02, percent-clipped=4.0 +2023-02-06 06:36:03,866 INFO [train.py:901] (3/4) Epoch 9, batch 400, loss[loss=0.2679, simple_loss=0.3452, pruned_loss=0.09531, over 8343.00 frames. ], tot_loss[loss=0.2539, simple_loss=0.325, pruned_loss=0.09139, over 1407080.80 frames. ], batch size: 26, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:03,966 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65065.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:04,759 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:08,000 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6867, 1.3182, 4.7887, 1.8665, 4.1914, 3.9596, 4.3137, 4.1567], + device='cuda:3'), covar=tensor([0.0397, 0.4281, 0.0398, 0.3051, 0.1094, 0.0785, 0.0499, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0429, 0.0547, 0.0535, 0.0500, 0.0565, 0.0474, 0.0476, 0.0532], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:36:09,409 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:12,868 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:22,093 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 06:36:30,518 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:33,064 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:36,574 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6400, 2.3915, 4.7081, 1.2588, 3.2158, 2.2472, 1.8008, 2.7486], + device='cuda:3'), covar=tensor([0.1627, 0.2054, 0.0591, 0.3768, 0.1297, 0.2547, 0.1613, 0.2324], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0486, 0.0523, 0.0561, 0.0599, 0.0533, 0.0456, 0.0594], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:36:37,776 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:38,389 INFO [train.py:901] (3/4) Epoch 9, batch 450, loss[loss=0.2459, simple_loss=0.3246, pruned_loss=0.08361, over 8130.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3245, pruned_loss=0.09058, over 1453494.59 frames. ], batch size: 22, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:36:46,192 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:36:56,315 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.705e+02 3.323e+02 3.920e+02 9.407e+02, threshold=6.647e+02, percent-clipped=6.0 +2023-02-06 06:37:13,476 INFO [train.py:901] (3/4) Epoch 9, batch 500, loss[loss=0.2407, simple_loss=0.3218, pruned_loss=0.07982, over 8252.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3229, pruned_loss=0.08932, over 1491357.65 frames. ], batch size: 24, lr: 8.76e-03, grad_scale: 8.0 +2023-02-06 06:37:23,270 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:35,049 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:45,913 INFO [train.py:901] (3/4) Epoch 9, batch 550, loss[loss=0.3019, simple_loss=0.3674, pruned_loss=0.1182, over 8344.00 frames. ], tot_loss[loss=0.2532, simple_loss=0.3248, pruned_loss=0.09083, over 1524150.17 frames. ], batch size: 25, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:37:51,951 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:52,675 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:37:56,719 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:03,161 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.829e+02 3.496e+02 4.355e+02 8.306e+02, threshold=6.991e+02, percent-clipped=2.0 +2023-02-06 06:38:21,250 INFO [train.py:901] (3/4) Epoch 9, batch 600, loss[loss=0.257, simple_loss=0.3404, pruned_loss=0.08677, over 8341.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3242, pruned_loss=0.09033, over 1544764.25 frames. ], batch size: 26, lr: 8.75e-03, grad_scale: 8.0 +2023-02-06 06:38:37,438 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 06:38:38,355 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 06:38:39,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1805, 2.2152, 1.6857, 1.9827, 1.7429, 1.2807, 1.6532, 1.6307], + device='cuda:3'), covar=tensor([0.1097, 0.0283, 0.0814, 0.0447, 0.0540, 0.1198, 0.0756, 0.0766], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0234, 0.0307, 0.0295, 0.0300, 0.0316, 0.0336, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:38:43,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3793, 1.7046, 4.4210, 1.9204, 2.2497, 4.9459, 4.8692, 4.2097], + device='cuda:3'), covar=tensor([0.0964, 0.1536, 0.0249, 0.1869, 0.1135, 0.0172, 0.0346, 0.0554], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0284, 0.0241, 0.0274, 0.0255, 0.0225, 0.0298, 0.0284], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 06:38:46,525 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:38:54,293 INFO [train.py:901] (3/4) Epoch 9, batch 650, loss[loss=0.2673, simple_loss=0.3317, pruned_loss=0.1014, over 8327.00 frames. ], tot_loss[loss=0.2542, simple_loss=0.3253, pruned_loss=0.09149, over 1563064.97 frames. ], batch size: 25, lr: 8.75e-03, grad_scale: 16.0 +2023-02-06 06:38:59,115 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:08,415 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7227, 1.8219, 2.1874, 1.7069, 1.1347, 2.2227, 0.3621, 1.2883], + device='cuda:3'), covar=tensor([0.2738, 0.1429, 0.0480, 0.1745, 0.4973, 0.0531, 0.3653, 0.1962], + device='cuda:3'), in_proj_covar=tensor([0.0155, 0.0153, 0.0092, 0.0202, 0.0242, 0.0095, 0.0156, 0.0156], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0001, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 06:39:10,302 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:10,872 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.656e+02 3.252e+02 4.080e+02 6.220e+02, threshold=6.503e+02, percent-clipped=0.0 +2023-02-06 06:39:17,065 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:29,074 INFO [train.py:901] (3/4) Epoch 9, batch 700, loss[loss=0.2158, simple_loss=0.3125, pruned_loss=0.05957, over 8192.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3238, pruned_loss=0.08977, over 1580772.13 frames. ], batch size: 23, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:39:41,010 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65381.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:39:57,320 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2514, 1.7121, 1.6428, 0.7102, 1.7310, 1.2183, 0.2694, 1.4908], + device='cuda:3'), covar=tensor([0.0271, 0.0163, 0.0182, 0.0302, 0.0230, 0.0547, 0.0471, 0.0151], + device='cuda:3'), in_proj_covar=tensor([0.0364, 0.0288, 0.0239, 0.0350, 0.0281, 0.0446, 0.0334, 0.0317], + device='cuda:3'), out_proj_covar=tensor([1.0970e-04, 8.4167e-05, 7.0472e-05, 1.0279e-04, 8.4140e-05, 1.4444e-04, + 1.0045e-04, 9.4514e-05], device='cuda:3') +2023-02-06 06:39:57,985 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:03,865 INFO [train.py:901] (3/4) Epoch 9, batch 750, loss[loss=0.2224, simple_loss=0.2996, pruned_loss=0.07254, over 8037.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3227, pruned_loss=0.08896, over 1593322.33 frames. ], batch size: 22, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:05,346 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:18,167 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65436.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:19,959 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.803e+02 3.527e+02 4.474e+02 1.505e+03, threshold=7.053e+02, percent-clipped=7.0 +2023-02-06 06:40:21,305 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 06:40:23,501 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5613, 2.7724, 1.8921, 2.2660, 2.2666, 1.4667, 2.0891, 2.0802], + device='cuda:3'), covar=tensor([0.1355, 0.0352, 0.0967, 0.0625, 0.0631, 0.1390, 0.0992, 0.0974], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0239, 0.0313, 0.0301, 0.0306, 0.0322, 0.0345, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:40:30,030 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 06:40:30,195 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:36,163 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65461.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:38,584 INFO [train.py:901] (3/4) Epoch 9, batch 800, loss[loss=0.2469, simple_loss=0.32, pruned_loss=0.08695, over 8572.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3235, pruned_loss=0.08951, over 1599719.68 frames. ], batch size: 34, lr: 8.74e-03, grad_scale: 16.0 +2023-02-06 06:40:47,463 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:40:53,641 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65485.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:05,678 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:10,500 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:13,593 INFO [train.py:901] (3/4) Epoch 9, batch 850, loss[loss=0.2818, simple_loss=0.3532, pruned_loss=0.1051, over 8577.00 frames. ], tot_loss[loss=0.2512, simple_loss=0.3231, pruned_loss=0.08961, over 1605394.64 frames. ], batch size: 49, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:13,771 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9010, 3.8967, 2.1664, 2.6930, 2.8707, 1.8328, 2.3309, 2.7223], + device='cuda:3'), covar=tensor([0.1364, 0.0277, 0.0888, 0.0610, 0.0523, 0.1175, 0.0895, 0.0875], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0234, 0.0308, 0.0296, 0.0300, 0.0316, 0.0339, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:41:25,166 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:41:29,557 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.736e+02 3.271e+02 4.209e+02 1.110e+03, threshold=6.542e+02, percent-clipped=5.0 +2023-02-06 06:41:37,091 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65550.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:41:47,587 INFO [train.py:901] (3/4) Epoch 9, batch 900, loss[loss=0.237, simple_loss=0.3064, pruned_loss=0.0838, over 8338.00 frames. ], tot_loss[loss=0.2533, simple_loss=0.3251, pruned_loss=0.09073, over 1610607.58 frames. ], batch size: 26, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:41:59,861 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4925, 2.9452, 2.3726, 3.7190, 1.6896, 1.8938, 2.2773, 3.0549], + device='cuda:3'), covar=tensor([0.0796, 0.0852, 0.1008, 0.0327, 0.1311, 0.1484, 0.1172, 0.0734], + device='cuda:3'), in_proj_covar=tensor([0.0246, 0.0225, 0.0269, 0.0219, 0.0227, 0.0262, 0.0265, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 06:42:23,181 INFO [train.py:901] (3/4) Epoch 9, batch 950, loss[loss=0.2149, simple_loss=0.3073, pruned_loss=0.06122, over 8284.00 frames. ], tot_loss[loss=0.2529, simple_loss=0.3248, pruned_loss=0.09056, over 1616671.68 frames. ], batch size: 23, lr: 8.73e-03, grad_scale: 16.0 +2023-02-06 06:42:39,187 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.498e+02 3.047e+02 4.041e+02 6.463e+02, threshold=6.094e+02, percent-clipped=0.0 +2023-02-06 06:42:44,697 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:42:50,357 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 06:42:56,309 INFO [train.py:901] (3/4) Epoch 9, batch 1000, loss[loss=0.2628, simple_loss=0.3448, pruned_loss=0.09033, over 8256.00 frames. ], tot_loss[loss=0.2517, simple_loss=0.3235, pruned_loss=0.08997, over 1613195.78 frames. ], batch size: 24, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:05,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0366, 4.1526, 2.2730, 2.7002, 2.8597, 1.9280, 2.7294, 2.7535], + device='cuda:3'), covar=tensor([0.1469, 0.0243, 0.0932, 0.0698, 0.0592, 0.1203, 0.0962, 0.1003], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0234, 0.0312, 0.0299, 0.0304, 0.0318, 0.0341, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:43:23,106 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 06:43:27,428 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:31,857 INFO [train.py:901] (3/4) Epoch 9, batch 1050, loss[loss=0.2271, simple_loss=0.3081, pruned_loss=0.07302, over 8501.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3237, pruned_loss=0.08978, over 1616516.99 frames. ], batch size: 28, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:43:32,060 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4985, 3.0664, 2.5648, 3.9670, 1.6770, 1.9412, 2.4619, 3.3211], + device='cuda:3'), covar=tensor([0.0830, 0.0817, 0.0918, 0.0240, 0.1219, 0.1548, 0.1042, 0.0731], + device='cuda:3'), in_proj_covar=tensor([0.0245, 0.0223, 0.0269, 0.0218, 0.0226, 0.0263, 0.0265, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 06:43:35,701 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 06:43:44,978 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:43:47,995 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.070e+02 2.877e+02 3.398e+02 4.338e+02 8.070e+02, threshold=6.796e+02, percent-clipped=6.0 +2023-02-06 06:43:48,507 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 06:44:03,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=65762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:05,257 INFO [train.py:901] (3/4) Epoch 9, batch 1100, loss[loss=0.2936, simple_loss=0.3723, pruned_loss=0.1074, over 8556.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3231, pruned_loss=0.08972, over 1617168.99 frames. ], batch size: 31, lr: 8.72e-03, grad_scale: 16.0 +2023-02-06 06:44:20,645 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=65788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:38,688 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=65813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:44:39,894 INFO [train.py:901] (3/4) Epoch 9, batch 1150, loss[loss=0.231, simple_loss=0.3129, pruned_loss=0.07456, over 8521.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3228, pruned_loss=0.08999, over 1620874.90 frames. ], batch size: 28, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:44:44,630 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 06:44:56,767 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.552e+02 3.121e+02 3.966e+02 8.304e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-06 06:45:09,008 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=65856.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:45:14,590 INFO [train.py:901] (3/4) Epoch 9, batch 1200, loss[loss=0.2381, simple_loss=0.304, pruned_loss=0.08614, over 7794.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3222, pruned_loss=0.09004, over 1609627.34 frames. ], batch size: 19, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:45:33,745 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=65894.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:45:47,641 INFO [train.py:901] (3/4) Epoch 9, batch 1250, loss[loss=0.2465, simple_loss=0.3248, pruned_loss=0.08409, over 8245.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3221, pruned_loss=0.08975, over 1610280.83 frames. ], batch size: 24, lr: 8.71e-03, grad_scale: 16.0 +2023-02-06 06:46:05,032 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.844e+02 3.477e+02 4.312e+02 8.167e+02, threshold=6.953e+02, percent-clipped=5.0 +2023-02-06 06:46:23,825 INFO [train.py:901] (3/4) Epoch 9, batch 1300, loss[loss=0.2365, simple_loss=0.3084, pruned_loss=0.08227, over 8502.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3227, pruned_loss=0.09052, over 1612133.30 frames. ], batch size: 26, lr: 8.70e-03, grad_scale: 16.0 +2023-02-06 06:46:46,259 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 06:46:46,668 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8950, 3.7923, 3.5257, 1.6928, 3.4657, 3.3611, 3.6007, 3.1063], + device='cuda:3'), covar=tensor([0.0872, 0.0615, 0.0917, 0.4453, 0.0827, 0.0999, 0.1079, 0.0950], + device='cuda:3'), in_proj_covar=tensor([0.0438, 0.0348, 0.0362, 0.0457, 0.0353, 0.0332, 0.0355, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:46:55,297 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66009.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:46:59,214 INFO [train.py:901] (3/4) Epoch 9, batch 1350, loss[loss=0.2478, simple_loss=0.3244, pruned_loss=0.08556, over 8614.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3219, pruned_loss=0.09009, over 1610092.67 frames. ], batch size: 34, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:47:00,007 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3021, 2.9569, 2.2227, 3.8428, 1.7603, 1.7070, 2.0520, 3.1736], + device='cuda:3'), covar=tensor([0.0876, 0.0871, 0.1104, 0.0344, 0.1266, 0.1670, 0.1297, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0245, 0.0224, 0.0268, 0.0217, 0.0224, 0.0261, 0.0264, 0.0229], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 06:47:01,385 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:16,348 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6083, 2.5764, 2.0198, 2.2420, 2.2152, 1.5657, 2.1487, 2.1445], + device='cuda:3'), covar=tensor([0.1194, 0.0301, 0.0777, 0.0457, 0.0511, 0.1178, 0.0736, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0234, 0.0311, 0.0297, 0.0307, 0.0319, 0.0341, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 06:47:17,555 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 2.557e+02 3.336e+02 4.233e+02 1.201e+03, threshold=6.672e+02, percent-clipped=8.0 +2023-02-06 06:47:19,778 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:47:35,101 INFO [train.py:901] (3/4) Epoch 9, batch 1400, loss[loss=0.2484, simple_loss=0.3272, pruned_loss=0.08478, over 8507.00 frames. ], tot_loss[loss=0.2534, simple_loss=0.3241, pruned_loss=0.09138, over 1613515.51 frames. ], batch size: 28, lr: 8.70e-03, grad_scale: 8.0 +2023-02-06 06:48:09,443 INFO [train.py:901] (3/4) Epoch 9, batch 1450, loss[loss=0.3189, simple_loss=0.3717, pruned_loss=0.1331, over 6960.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3236, pruned_loss=0.09183, over 1608848.47 frames. ], batch size: 71, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:12,195 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 06:48:26,154 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.917e+02 2.633e+02 3.463e+02 4.686e+02 9.003e+02, threshold=6.925e+02, percent-clipped=5.0 +2023-02-06 06:48:42,248 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:48:44,178 INFO [train.py:901] (3/4) Epoch 9, batch 1500, loss[loss=0.2245, simple_loss=0.3119, pruned_loss=0.06854, over 8289.00 frames. ], tot_loss[loss=0.2535, simple_loss=0.3242, pruned_loss=0.09137, over 1613685.71 frames. ], batch size: 23, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:48:52,878 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66178.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:49:05,569 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3072, 2.3348, 4.3895, 2.6966, 4.0290, 3.7868, 4.1309, 4.0499], + device='cuda:3'), covar=tensor([0.0424, 0.2885, 0.0541, 0.2449, 0.0797, 0.0661, 0.0401, 0.0455], + device='cuda:3'), in_proj_covar=tensor([0.0427, 0.0538, 0.0518, 0.0494, 0.0558, 0.0473, 0.0463, 0.0528], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:49:07,243 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 06:49:08,956 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66200.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:49:18,748 INFO [train.py:901] (3/4) Epoch 9, batch 1550, loss[loss=0.2275, simple_loss=0.3192, pruned_loss=0.06785, over 8498.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3233, pruned_loss=0.09058, over 1612372.40 frames. ], batch size: 28, lr: 8.69e-03, grad_scale: 8.0 +2023-02-06 06:49:28,444 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3187, 1.2385, 1.3656, 1.1771, 0.7833, 1.2264, 1.1945, 0.8971], + device='cuda:3'), covar=tensor([0.0619, 0.1249, 0.1817, 0.1445, 0.0596, 0.1601, 0.0682, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0107, 0.0158, 0.0198, 0.0163, 0.0109, 0.0167, 0.0122, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 06:49:35,629 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.559e+02 2.942e+02 3.565e+02 7.942e+02, threshold=5.885e+02, percent-clipped=2.0 +2023-02-06 06:49:53,203 INFO [train.py:901] (3/4) Epoch 9, batch 1600, loss[loss=0.1983, simple_loss=0.2718, pruned_loss=0.06236, over 7925.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.322, pruned_loss=0.08982, over 1611007.26 frames. ], batch size: 20, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:49:53,436 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66265.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:02,299 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:50:11,802 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66290.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:20,671 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 06:50:29,758 INFO [train.py:901] (3/4) Epoch 9, batch 1650, loss[loss=0.2127, simple_loss=0.282, pruned_loss=0.07166, over 7559.00 frames. ], tot_loss[loss=0.2513, simple_loss=0.3226, pruned_loss=0.08998, over 1614452.87 frames. ], batch size: 18, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:50:29,933 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66315.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:50:46,552 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.536e+02 3.360e+02 4.258e+02 7.701e+02, threshold=6.719e+02, percent-clipped=5.0 +2023-02-06 06:51:03,467 INFO [train.py:901] (3/4) Epoch 9, batch 1700, loss[loss=0.2584, simple_loss=0.336, pruned_loss=0.09038, over 8296.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3219, pruned_loss=0.08936, over 1616369.62 frames. ], batch size: 23, lr: 8.68e-03, grad_scale: 8.0 +2023-02-06 06:51:39,908 INFO [train.py:901] (3/4) Epoch 9, batch 1750, loss[loss=0.262, simple_loss=0.3416, pruned_loss=0.09119, over 8770.00 frames. ], tot_loss[loss=0.2484, simple_loss=0.3204, pruned_loss=0.08818, over 1617884.81 frames. ], batch size: 30, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:51:57,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.942e+02 3.542e+02 4.261e+02 7.419e+02, threshold=7.084e+02, percent-clipped=2.0 +2023-02-06 06:52:04,253 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.2177, 5.2407, 4.6484, 2.3743, 4.7494, 4.8146, 4.9675, 4.2785], + device='cuda:3'), covar=tensor([0.0646, 0.0416, 0.0927, 0.4321, 0.0671, 0.0654, 0.0963, 0.0695], + device='cuda:3'), in_proj_covar=tensor([0.0444, 0.0345, 0.0358, 0.0451, 0.0356, 0.0336, 0.0358, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:52:13,907 INFO [train.py:901] (3/4) Epoch 9, batch 1800, loss[loss=0.2405, simple_loss=0.3135, pruned_loss=0.08378, over 8583.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3216, pruned_loss=0.08874, over 1621727.22 frames. ], batch size: 39, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:42,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:52:48,997 INFO [train.py:901] (3/4) Epoch 9, batch 1850, loss[loss=0.2359, simple_loss=0.3097, pruned_loss=0.08109, over 8108.00 frames. ], tot_loss[loss=0.2487, simple_loss=0.3208, pruned_loss=0.08824, over 1621980.26 frames. ], batch size: 23, lr: 8.67e-03, grad_scale: 8.0 +2023-02-06 06:52:53,698 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66522.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:05,497 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66539.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:53:05,997 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.848e+02 3.228e+02 4.154e+02 1.120e+03, threshold=6.457e+02, percent-clipped=1.0 +2023-02-06 06:53:13,960 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6898, 1.2813, 1.5297, 1.2344, 1.0598, 1.3027, 1.5406, 1.4289], + device='cuda:3'), covar=tensor([0.0573, 0.1273, 0.1675, 0.1352, 0.0566, 0.1495, 0.0677, 0.0595], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0158, 0.0199, 0.0162, 0.0110, 0.0167, 0.0122, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 06:53:22,983 INFO [train.py:901] (3/4) Epoch 9, batch 1900, loss[loss=0.2525, simple_loss=0.3278, pruned_loss=0.0886, over 8455.00 frames. ], tot_loss[loss=0.2503, simple_loss=0.3216, pruned_loss=0.08949, over 1616646.04 frames. ], batch size: 27, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:27,132 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66571.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:43,851 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66596.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:53:47,139 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 06:53:48,636 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6863, 4.6033, 4.2062, 2.0260, 4.1375, 4.1202, 4.3522, 3.7197], + device='cuda:3'), covar=tensor([0.0727, 0.0494, 0.0939, 0.4418, 0.0746, 0.0804, 0.0972, 0.0771], + device='cuda:3'), in_proj_covar=tensor([0.0443, 0.0346, 0.0361, 0.0455, 0.0361, 0.0337, 0.0359, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:53:56,562 INFO [train.py:901] (3/4) Epoch 9, batch 1950, loss[loss=0.2216, simple_loss=0.2849, pruned_loss=0.07912, over 7198.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.322, pruned_loss=0.08944, over 1615652.52 frames. ], batch size: 16, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:53:58,607 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 06:54:00,037 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:00,840 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:54:12,900 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66637.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:54:14,621 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.940e+02 2.852e+02 3.410e+02 4.369e+02 9.021e+02, threshold=6.820e+02, percent-clipped=7.0 +2023-02-06 06:54:20,054 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 06:54:32,103 INFO [train.py:901] (3/4) Epoch 9, batch 2000, loss[loss=0.2334, simple_loss=0.3144, pruned_loss=0.07621, over 8360.00 frames. ], tot_loss[loss=0.2483, simple_loss=0.3206, pruned_loss=0.08803, over 1615020.32 frames. ], batch size: 26, lr: 8.66e-03, grad_scale: 8.0 +2023-02-06 06:55:06,982 INFO [train.py:901] (3/4) Epoch 9, batch 2050, loss[loss=0.2042, simple_loss=0.294, pruned_loss=0.05718, over 8034.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3218, pruned_loss=0.08847, over 1620354.05 frames. ], batch size: 22, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:55:12,325 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1559, 1.3513, 4.3001, 1.7281, 3.8359, 3.5741, 3.8501, 3.7648], + device='cuda:3'), covar=tensor([0.0466, 0.3890, 0.0473, 0.2736, 0.1047, 0.0780, 0.0517, 0.0610], + device='cuda:3'), in_proj_covar=tensor([0.0435, 0.0551, 0.0538, 0.0503, 0.0572, 0.0486, 0.0478, 0.0543], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 06:55:20,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:55:23,653 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.024e+02 2.763e+02 3.349e+02 4.333e+02 1.017e+03, threshold=6.698e+02, percent-clipped=4.0 +2023-02-06 06:55:29,223 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1196, 1.3834, 3.1566, 1.3983, 2.1329, 3.5103, 3.5426, 2.9691], + device='cuda:3'), covar=tensor([0.0937, 0.1685, 0.0367, 0.2061, 0.1033, 0.0284, 0.0505, 0.0704], + device='cuda:3'), in_proj_covar=tensor([0.0256, 0.0288, 0.0246, 0.0279, 0.0263, 0.0230, 0.0305, 0.0291], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 06:55:31,185 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66749.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:55:42,432 INFO [train.py:901] (3/4) Epoch 9, batch 2100, loss[loss=0.2594, simple_loss=0.3354, pruned_loss=0.09168, over 7053.00 frames. ], tot_loss[loss=0.2502, simple_loss=0.3223, pruned_loss=0.08911, over 1619357.03 frames. ], batch size: 71, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:17,429 INFO [train.py:901] (3/4) Epoch 9, batch 2150, loss[loss=0.2384, simple_loss=0.3095, pruned_loss=0.08361, over 7660.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3236, pruned_loss=0.08975, over 1620953.95 frames. ], batch size: 19, lr: 8.65e-03, grad_scale: 8.0 +2023-02-06 06:56:34,546 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.810e+02 3.362e+02 4.511e+02 1.000e+03, threshold=6.724e+02, percent-clipped=7.0 +2023-02-06 06:56:53,076 INFO [train.py:901] (3/4) Epoch 9, batch 2200, loss[loss=0.2868, simple_loss=0.3512, pruned_loss=0.1112, over 8623.00 frames. ], tot_loss[loss=0.2507, simple_loss=0.3224, pruned_loss=0.08952, over 1617156.39 frames. ], batch size: 34, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:01,059 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66877.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:05,629 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=66883.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:57:09,744 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2322, 1.8948, 3.0731, 2.3176, 2.5120, 2.0589, 1.6328, 1.2626], + device='cuda:3'), covar=tensor([0.3447, 0.3761, 0.0950, 0.2532, 0.1954, 0.1921, 0.1641, 0.3919], + device='cuda:3'), in_proj_covar=tensor([0.0842, 0.0809, 0.0694, 0.0805, 0.0896, 0.0756, 0.0686, 0.0731], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:57:12,276 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66893.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:18,130 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:57:26,998 INFO [train.py:901] (3/4) Epoch 9, batch 2250, loss[loss=0.2265, simple_loss=0.2967, pruned_loss=0.07814, over 7922.00 frames. ], tot_loss[loss=0.2501, simple_loss=0.3218, pruned_loss=0.08925, over 1613393.97 frames. ], batch size: 20, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:57:29,218 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=66918.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 06:57:43,689 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.716e+02 3.375e+02 4.203e+02 7.579e+02, threshold=6.750e+02, percent-clipped=1.0 +2023-02-06 06:58:00,318 INFO [train.py:901] (3/4) Epoch 9, batch 2300, loss[loss=0.2269, simple_loss=0.3189, pruned_loss=0.06746, over 8200.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3206, pruned_loss=0.08878, over 1610820.60 frames. ], batch size: 23, lr: 8.64e-03, grad_scale: 8.0 +2023-02-06 06:58:07,513 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=66975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:19,742 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=66991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:21,141 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4783, 1.7745, 2.8479, 1.2732, 2.1570, 1.9049, 1.4476, 1.8160], + device='cuda:3'), covar=tensor([0.1535, 0.1875, 0.0642, 0.3463, 0.1250, 0.2465, 0.1679, 0.1849], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0488, 0.0534, 0.0570, 0.0603, 0.0535, 0.0463, 0.0601], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 06:58:24,432 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=66998.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:58:36,102 INFO [train.py:901] (3/4) Epoch 9, batch 2350, loss[loss=0.2336, simple_loss=0.3256, pruned_loss=0.07085, over 8234.00 frames. ], tot_loss[loss=0.249, simple_loss=0.3204, pruned_loss=0.08876, over 1612455.41 frames. ], batch size: 22, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:58:37,002 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:58:53,554 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.755e+02 3.236e+02 4.430e+02 1.005e+03, threshold=6.472e+02, percent-clipped=3.0 +2023-02-06 06:59:10,050 INFO [train.py:901] (3/4) Epoch 9, batch 2400, loss[loss=0.243, simple_loss=0.3179, pruned_loss=0.08411, over 8466.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3208, pruned_loss=0.08924, over 1608778.20 frames. ], batch size: 25, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 06:59:10,297 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3333, 1.6793, 1.6592, 0.8849, 1.7219, 1.2633, 0.3000, 1.4803], + device='cuda:3'), covar=tensor([0.0293, 0.0198, 0.0153, 0.0275, 0.0178, 0.0554, 0.0477, 0.0148], + device='cuda:3'), in_proj_covar=tensor([0.0367, 0.0292, 0.0243, 0.0355, 0.0284, 0.0441, 0.0336, 0.0321], + device='cuda:3'), out_proj_covar=tensor([1.0949e-04, 8.5292e-05, 7.1180e-05, 1.0401e-04, 8.4612e-05, 1.4138e-04, + 1.0074e-04, 9.4993e-05], device='cuda:3') +2023-02-06 06:59:28,534 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67093.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 06:59:35,235 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 06:59:44,439 INFO [train.py:901] (3/4) Epoch 9, batch 2450, loss[loss=0.2594, simple_loss=0.3296, pruned_loss=0.09461, over 7977.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3212, pruned_loss=0.08936, over 1611804.76 frames. ], batch size: 21, lr: 8.63e-03, grad_scale: 8.0 +2023-02-06 07:00:02,010 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.801e+02 2.787e+02 3.467e+02 4.148e+02 8.119e+02, threshold=6.934e+02, percent-clipped=3.0 +2023-02-06 07:00:17,987 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8587, 2.0183, 1.7213, 2.5580, 1.3885, 1.3622, 1.7281, 2.0502], + device='cuda:3'), covar=tensor([0.0850, 0.0990, 0.1231, 0.0480, 0.1231, 0.1829, 0.1105, 0.0923], + device='cuda:3'), in_proj_covar=tensor([0.0247, 0.0226, 0.0265, 0.0218, 0.0224, 0.0264, 0.0267, 0.0231], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 07:00:18,493 INFO [train.py:901] (3/4) Epoch 9, batch 2500, loss[loss=0.2595, simple_loss=0.3371, pruned_loss=0.09094, over 8466.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3229, pruned_loss=0.0905, over 1614758.86 frames. ], batch size: 25, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:00:48,181 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67208.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:00:52,861 INFO [train.py:901] (3/4) Epoch 9, batch 2550, loss[loss=0.253, simple_loss=0.3297, pruned_loss=0.08814, over 8331.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3226, pruned_loss=0.09047, over 1611660.83 frames. ], batch size: 25, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:00,613 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7598, 2.2114, 3.4077, 2.7451, 3.0258, 2.4161, 2.0650, 2.0979], + device='cuda:3'), covar=tensor([0.2425, 0.3513, 0.0949, 0.2292, 0.1588, 0.1702, 0.1296, 0.3287], + device='cuda:3'), in_proj_covar=tensor([0.0844, 0.0811, 0.0693, 0.0807, 0.0894, 0.0752, 0.0684, 0.0733], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:01:12,346 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.679e+02 3.405e+02 4.203e+02 8.726e+02, threshold=6.810e+02, percent-clipped=2.0 +2023-02-06 07:01:19,266 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4224, 1.7037, 2.8722, 1.2096, 2.1253, 1.8052, 1.5717, 2.0116], + device='cuda:3'), covar=tensor([0.1520, 0.1920, 0.0507, 0.3460, 0.1310, 0.2502, 0.1562, 0.1752], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0489, 0.0531, 0.0566, 0.0602, 0.0534, 0.0463, 0.0606], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:01:19,289 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1630, 1.7425, 2.6363, 2.1645, 2.3578, 1.9731, 1.6274, 1.1139], + device='cuda:3'), covar=tensor([0.3053, 0.3405, 0.0886, 0.1962, 0.1423, 0.1950, 0.1477, 0.3165], + device='cuda:3'), in_proj_covar=tensor([0.0850, 0.0814, 0.0696, 0.0809, 0.0896, 0.0756, 0.0687, 0.0734], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:01:21,215 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3264, 1.6603, 2.8248, 1.1150, 1.9811, 1.7285, 1.4660, 1.8632], + device='cuda:3'), covar=tensor([0.1749, 0.2005, 0.0759, 0.3661, 0.1494, 0.2649, 0.1655, 0.2021], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0489, 0.0530, 0.0565, 0.0601, 0.0533, 0.0462, 0.0605], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:01:21,878 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:01:28,483 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5858, 1.8760, 3.3787, 1.3082, 2.2885, 1.9750, 1.6702, 2.0693], + device='cuda:3'), covar=tensor([0.1512, 0.2008, 0.0597, 0.3526, 0.1333, 0.2502, 0.1517, 0.2151], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0489, 0.0528, 0.0565, 0.0600, 0.0533, 0.0461, 0.0604], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:01:29,607 INFO [train.py:901] (3/4) Epoch 9, batch 2600, loss[loss=0.2687, simple_loss=0.3318, pruned_loss=0.1028, over 8024.00 frames. ], tot_loss[loss=0.2543, simple_loss=0.325, pruned_loss=0.09185, over 1615802.26 frames. ], batch size: 22, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:01:32,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4723, 1.5143, 2.3495, 1.1851, 2.1602, 2.5145, 2.6073, 2.1753], + device='cuda:3'), covar=tensor([0.0935, 0.1063, 0.0472, 0.1840, 0.0619, 0.0377, 0.0546, 0.0755], + device='cuda:3'), in_proj_covar=tensor([0.0252, 0.0282, 0.0244, 0.0274, 0.0256, 0.0227, 0.0302, 0.0286], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:01:39,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67279.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:01:46,306 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 07:02:03,433 INFO [train.py:901] (3/4) Epoch 9, batch 2650, loss[loss=0.2536, simple_loss=0.3288, pruned_loss=0.08921, over 8740.00 frames. ], tot_loss[loss=0.254, simple_loss=0.3244, pruned_loss=0.09179, over 1612207.07 frames. ], batch size: 30, lr: 8.62e-03, grad_scale: 8.0 +2023-02-06 07:02:06,282 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:02:18,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8428, 1.5453, 1.7592, 1.4095, 1.1596, 1.4189, 1.6983, 1.5018], + device='cuda:3'), covar=tensor([0.0527, 0.1143, 0.1575, 0.1267, 0.0533, 0.1406, 0.0621, 0.0580], + device='cuda:3'), in_proj_covar=tensor([0.0105, 0.0158, 0.0195, 0.0161, 0.0108, 0.0166, 0.0119, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0008, 0.0008, 0.0005, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 07:02:20,594 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6601, 1.5540, 4.4236, 1.8276, 2.4131, 5.1081, 5.0245, 4.4314], + device='cuda:3'), covar=tensor([0.1034, 0.1619, 0.0253, 0.2050, 0.1023, 0.0195, 0.0310, 0.0510], + device='cuda:3'), in_proj_covar=tensor([0.0251, 0.0280, 0.0242, 0.0274, 0.0255, 0.0225, 0.0302, 0.0285], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:02:21,820 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.739e+02 3.376e+02 4.238e+02 9.756e+02, threshold=6.752e+02, percent-clipped=4.0 +2023-02-06 07:02:39,314 INFO [train.py:901] (3/4) Epoch 9, batch 2700, loss[loss=0.32, simple_loss=0.3827, pruned_loss=0.1287, over 8262.00 frames. ], tot_loss[loss=0.2528, simple_loss=0.3234, pruned_loss=0.09112, over 1612545.98 frames. ], batch size: 24, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:02:55,842 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9399, 2.4005, 4.5780, 1.4313, 3.2922, 2.3789, 1.9610, 2.9404], + device='cuda:3'), covar=tensor([0.1514, 0.2019, 0.0677, 0.3586, 0.1302, 0.2411, 0.1509, 0.2176], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0493, 0.0532, 0.0571, 0.0605, 0.0540, 0.0465, 0.0607], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:03:13,103 INFO [train.py:901] (3/4) Epoch 9, batch 2750, loss[loss=0.3373, simple_loss=0.385, pruned_loss=0.1448, over 7419.00 frames. ], tot_loss[loss=0.253, simple_loss=0.324, pruned_loss=0.09106, over 1612166.35 frames. ], batch size: 72, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:03:13,294 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7582, 3.3151, 2.6513, 4.0098, 2.1832, 2.5298, 2.4659, 3.5445], + device='cuda:3'), covar=tensor([0.0684, 0.0746, 0.0892, 0.0249, 0.1025, 0.1153, 0.1155, 0.0557], + device='cuda:3'), in_proj_covar=tensor([0.0247, 0.0224, 0.0265, 0.0217, 0.0224, 0.0262, 0.0266, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 07:03:26,078 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:29,963 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.867e+02 3.446e+02 4.196e+02 9.783e+02, threshold=6.892e+02, percent-clipped=3.0 +2023-02-06 07:03:30,377 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 07:03:34,268 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67445.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:03:48,012 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67464.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:03:48,381 INFO [train.py:901] (3/4) Epoch 9, batch 2800, loss[loss=0.2879, simple_loss=0.355, pruned_loss=0.1104, over 8027.00 frames. ], tot_loss[loss=0.2524, simple_loss=0.3231, pruned_loss=0.09082, over 1613090.43 frames. ], batch size: 22, lr: 8.61e-03, grad_scale: 8.0 +2023-02-06 07:04:05,334 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67489.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:04:23,680 INFO [train.py:901] (3/4) Epoch 9, batch 2850, loss[loss=0.2538, simple_loss=0.325, pruned_loss=0.09131, over 8660.00 frames. ], tot_loss[loss=0.2518, simple_loss=0.3228, pruned_loss=0.09044, over 1612429.36 frames. ], batch size: 39, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:04:34,549 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:40,475 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.765e+02 3.269e+02 4.105e+02 6.649e+02, threshold=6.538e+02, percent-clipped=0.0 +2023-02-06 07:04:55,092 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:04:58,190 INFO [train.py:901] (3/4) Epoch 9, batch 2900, loss[loss=0.2024, simple_loss=0.2749, pruned_loss=0.06495, over 7803.00 frames. ], tot_loss[loss=0.2525, simple_loss=0.3229, pruned_loss=0.09105, over 1608013.95 frames. ], batch size: 20, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:23,402 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 07:05:24,273 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 07:05:33,887 INFO [train.py:901] (3/4) Epoch 9, batch 2950, loss[loss=0.2324, simple_loss=0.2852, pruned_loss=0.08978, over 7423.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3217, pruned_loss=0.09003, over 1610732.32 frames. ], batch size: 17, lr: 8.60e-03, grad_scale: 8.0 +2023-02-06 07:05:41,791 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9906, 1.3699, 3.2416, 1.4438, 2.2515, 3.5142, 3.5421, 3.0377], + device='cuda:3'), covar=tensor([0.1007, 0.1636, 0.0421, 0.2064, 0.0945, 0.0275, 0.0527, 0.0617], + device='cuda:3'), in_proj_covar=tensor([0.0254, 0.0285, 0.0248, 0.0279, 0.0261, 0.0228, 0.0309, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:05:51,260 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.115e+02 2.827e+02 3.390e+02 4.435e+02 7.404e+02, threshold=6.780e+02, percent-clipped=4.0 +2023-02-06 07:06:07,060 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0290, 3.1119, 2.3177, 2.3647, 2.4832, 2.0047, 2.3321, 2.7436], + device='cuda:3'), covar=tensor([0.1296, 0.0267, 0.0766, 0.0703, 0.0543, 0.1060, 0.0961, 0.0983], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0230, 0.0307, 0.0294, 0.0301, 0.0315, 0.0337, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:06:08,213 INFO [train.py:901] (3/4) Epoch 9, batch 3000, loss[loss=0.2016, simple_loss=0.2637, pruned_loss=0.06979, over 7708.00 frames. ], tot_loss[loss=0.2497, simple_loss=0.3208, pruned_loss=0.08931, over 1608863.94 frames. ], batch size: 18, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:08,213 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 07:06:18,918 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3498, 1.7696, 2.7001, 1.1344, 2.0433, 1.5689, 1.5463, 1.8880], + device='cuda:3'), covar=tensor([0.1643, 0.2093, 0.0732, 0.3788, 0.1429, 0.2770, 0.1716, 0.1995], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0494, 0.0527, 0.0568, 0.0601, 0.0537, 0.0458, 0.0602], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:06:20,340 INFO [train.py:935] (3/4) Epoch 9, validation: loss=0.1965, simple_loss=0.2957, pruned_loss=0.04864, over 944034.00 frames. +2023-02-06 07:06:20,341 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 07:06:37,452 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:37,688 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 07:06:43,356 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:52,181 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:06:55,496 INFO [train.py:901] (3/4) Epoch 9, batch 3050, loss[loss=0.2116, simple_loss=0.2729, pruned_loss=0.07518, over 7220.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3203, pruned_loss=0.0892, over 1607301.71 frames. ], batch size: 16, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:06:55,710 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:07:13,229 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.763e+02 2.629e+02 3.194e+02 3.976e+02 7.575e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:07:29,768 INFO [train.py:901] (3/4) Epoch 9, batch 3100, loss[loss=0.2605, simple_loss=0.329, pruned_loss=0.09602, over 8304.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3199, pruned_loss=0.08894, over 1605443.91 frames. ], batch size: 23, lr: 8.59e-03, grad_scale: 8.0 +2023-02-06 07:07:42,788 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8062, 4.0244, 2.5397, 2.7268, 2.8673, 1.8413, 2.7479, 3.0910], + device='cuda:3'), covar=tensor([0.1482, 0.0222, 0.0843, 0.0723, 0.0592, 0.1292, 0.0926, 0.0828], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0231, 0.0309, 0.0296, 0.0304, 0.0318, 0.0339, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:08:04,168 INFO [train.py:901] (3/4) Epoch 9, batch 3150, loss[loss=0.2681, simple_loss=0.3407, pruned_loss=0.09773, over 8363.00 frames. ], tot_loss[loss=0.2495, simple_loss=0.3209, pruned_loss=0.08907, over 1608881.25 frames. ], batch size: 24, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:05,060 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=67816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:21,137 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.202e+02 2.768e+02 3.401e+02 4.235e+02 8.418e+02, threshold=6.801e+02, percent-clipped=5.0 +2023-02-06 07:08:21,987 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=67841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:08:37,978 INFO [train.py:901] (3/4) Epoch 9, batch 3200, loss[loss=0.2833, simple_loss=0.3549, pruned_loss=0.1059, over 8450.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3225, pruned_loss=0.08973, over 1613188.17 frames. ], batch size: 49, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:08:43,401 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3867, 4.3524, 3.9052, 1.9934, 3.8667, 3.8918, 4.0329, 3.6554], + device='cuda:3'), covar=tensor([0.0742, 0.0590, 0.0987, 0.4600, 0.0754, 0.1012, 0.1305, 0.0904], + device='cuda:3'), in_proj_covar=tensor([0.0435, 0.0344, 0.0359, 0.0451, 0.0356, 0.0336, 0.0354, 0.0300], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:08:44,734 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=67875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:09:12,186 INFO [train.py:901] (3/4) Epoch 9, batch 3250, loss[loss=0.2209, simple_loss=0.2963, pruned_loss=0.07274, over 7789.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3221, pruned_loss=0.08993, over 1611347.16 frames. ], batch size: 19, lr: 8.58e-03, grad_scale: 8.0 +2023-02-06 07:09:29,464 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.709e+02 3.362e+02 4.203e+02 8.128e+02, threshold=6.724e+02, percent-clipped=5.0 +2023-02-06 07:09:46,666 INFO [train.py:901] (3/4) Epoch 9, batch 3300, loss[loss=0.2314, simple_loss=0.2948, pruned_loss=0.08398, over 7259.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3209, pruned_loss=0.08917, over 1608617.31 frames. ], batch size: 16, lr: 8.57e-03, grad_scale: 8.0 +2023-02-06 07:10:04,408 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=67990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:10,480 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=67999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:22,543 INFO [train.py:901] (3/4) Epoch 9, batch 3350, loss[loss=0.2916, simple_loss=0.35, pruned_loss=0.1166, over 8567.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3213, pruned_loss=0.08916, over 1612179.76 frames. ], batch size: 31, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:10:22,685 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3195, 1.3020, 4.5159, 1.6688, 3.9093, 3.7626, 4.0380, 3.9188], + device='cuda:3'), covar=tensor([0.0535, 0.4279, 0.0469, 0.3465, 0.1234, 0.0844, 0.0513, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0447, 0.0553, 0.0546, 0.0515, 0.0576, 0.0489, 0.0478, 0.0546], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:10:39,235 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.493e+02 3.108e+02 4.287e+02 1.101e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-06 07:10:40,594 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:49,106 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:10:56,400 INFO [train.py:901] (3/4) Epoch 9, batch 3400, loss[loss=0.3245, simple_loss=0.3664, pruned_loss=0.1413, over 6930.00 frames. ], tot_loss[loss=0.25, simple_loss=0.3212, pruned_loss=0.08935, over 1610769.84 frames. ], batch size: 72, lr: 8.57e-03, grad_scale: 16.0 +2023-02-06 07:11:24,316 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68105.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:11:26,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6102, 2.8931, 1.8608, 2.1662, 2.3199, 1.5913, 1.9987, 2.1068], + device='cuda:3'), covar=tensor([0.1189, 0.0272, 0.0924, 0.0565, 0.0553, 0.1223, 0.0895, 0.0861], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0235, 0.0310, 0.0295, 0.0305, 0.0321, 0.0343, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:11:30,826 INFO [train.py:901] (3/4) Epoch 9, batch 3450, loss[loss=0.2081, simple_loss=0.2803, pruned_loss=0.06793, over 7977.00 frames. ], tot_loss[loss=0.2509, simple_loss=0.3218, pruned_loss=0.09003, over 1607125.55 frames. ], batch size: 21, lr: 8.56e-03, grad_scale: 16.0 +2023-02-06 07:11:47,216 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.94 vs. limit=5.0 +2023-02-06 07:11:48,144 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.585e+02 3.242e+02 3.955e+02 1.617e+03, threshold=6.484e+02, percent-clipped=7.0 +2023-02-06 07:11:48,434 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8603, 2.3253, 3.6508, 2.8159, 3.1555, 2.4596, 2.0328, 1.7926], + device='cuda:3'), covar=tensor([0.2821, 0.3363, 0.0858, 0.2143, 0.1729, 0.1736, 0.1363, 0.3807], + device='cuda:3'), in_proj_covar=tensor([0.0856, 0.0816, 0.0691, 0.0809, 0.0900, 0.0760, 0.0687, 0.0736], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:11:57,309 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 07:11:59,840 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:05,799 INFO [train.py:901] (3/4) Epoch 9, batch 3500, loss[loss=0.2783, simple_loss=0.3384, pruned_loss=0.1091, over 7805.00 frames. ], tot_loss[loss=0.2514, simple_loss=0.3223, pruned_loss=0.09024, over 1604772.22 frames. ], batch size: 20, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:08,688 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:12:11,464 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5129, 2.0582, 3.5993, 1.2456, 2.5966, 1.9741, 1.6149, 2.4566], + device='cuda:3'), covar=tensor([0.1575, 0.2057, 0.0593, 0.3752, 0.1433, 0.2733, 0.1638, 0.2074], + device='cuda:3'), in_proj_covar=tensor([0.0482, 0.0496, 0.0530, 0.0574, 0.0603, 0.0545, 0.0463, 0.0604], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:12:17,932 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 07:12:40,959 INFO [train.py:901] (3/4) Epoch 9, batch 3550, loss[loss=0.3037, simple_loss=0.365, pruned_loss=0.1211, over 8338.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.3225, pruned_loss=0.0902, over 1604781.32 frames. ], batch size: 26, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:12:58,952 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.838e+02 3.387e+02 4.304e+02 7.616e+02, threshold=6.774e+02, percent-clipped=6.0 +2023-02-06 07:13:02,582 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:14,901 INFO [train.py:901] (3/4) Epoch 9, batch 3600, loss[loss=0.2375, simple_loss=0.3107, pruned_loss=0.08217, over 8080.00 frames. ], tot_loss[loss=0.251, simple_loss=0.3221, pruned_loss=0.08991, over 1608283.04 frames. ], batch size: 21, lr: 8.56e-03, grad_scale: 8.0 +2023-02-06 07:13:19,106 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:13:49,589 INFO [train.py:901] (3/4) Epoch 9, batch 3650, loss[loss=0.2427, simple_loss=0.316, pruned_loss=0.08469, over 8298.00 frames. ], tot_loss[loss=0.2511, simple_loss=0.3226, pruned_loss=0.08982, over 1610754.82 frames. ], batch size: 23, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:08,230 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.637e+02 3.214e+02 4.100e+02 7.421e+02, threshold=6.428e+02, percent-clipped=2.0 +2023-02-06 07:14:09,703 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:18,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:14:25,009 INFO [train.py:901] (3/4) Epoch 9, batch 3700, loss[loss=0.2572, simple_loss=0.3229, pruned_loss=0.09579, over 8245.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3235, pruned_loss=0.09024, over 1613011.38 frames. ], batch size: 22, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:14:26,544 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0987, 1.4187, 1.5327, 1.3057, 1.1612, 1.4369, 1.8172, 1.4701], + device='cuda:3'), covar=tensor([0.0500, 0.1189, 0.1748, 0.1375, 0.0562, 0.1458, 0.0627, 0.0612], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0158, 0.0197, 0.0161, 0.0109, 0.0166, 0.0120, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 07:14:57,598 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:14:58,733 INFO [train.py:901] (3/4) Epoch 9, batch 3750, loss[loss=0.2637, simple_loss=0.3183, pruned_loss=0.1045, over 7423.00 frames. ], tot_loss[loss=0.2519, simple_loss=0.3234, pruned_loss=0.09017, over 1616159.12 frames. ], batch size: 17, lr: 8.55e-03, grad_scale: 8.0 +2023-02-06 07:15:00,195 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:06,070 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:08,022 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2915, 1.4387, 1.3090, 1.8627, 0.6776, 1.1496, 1.2606, 1.4684], + device='cuda:3'), covar=tensor([0.0997, 0.0936, 0.1247, 0.0563, 0.1330, 0.1581, 0.0962, 0.0852], + device='cuda:3'), in_proj_covar=tensor([0.0246, 0.0225, 0.0263, 0.0219, 0.0226, 0.0261, 0.0267, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 07:15:14,911 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:16,797 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.868e+02 3.639e+02 4.960e+02 1.282e+03, threshold=7.278e+02, percent-clipped=8.0 +2023-02-06 07:15:22,920 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68449.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:15:23,637 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:29,205 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:15:33,817 INFO [train.py:901] (3/4) Epoch 9, batch 3800, loss[loss=0.2413, simple_loss=0.3212, pruned_loss=0.08067, over 8293.00 frames. ], tot_loss[loss=0.2508, simple_loss=0.3227, pruned_loss=0.08947, over 1616453.06 frames. ], batch size: 23, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:07,820 INFO [train.py:901] (3/4) Epoch 9, batch 3850, loss[loss=0.2104, simple_loss=0.2814, pruned_loss=0.06968, over 7723.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3204, pruned_loss=0.08801, over 1616368.57 frames. ], batch size: 18, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:16:24,991 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 07:16:25,653 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.907e+02 2.582e+02 3.048e+02 3.724e+02 6.674e+02, threshold=6.096e+02, percent-clipped=0.0 +2023-02-06 07:16:28,000 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0395, 1.2437, 3.1577, 1.0242, 2.7839, 2.6764, 2.8670, 2.7774], + device='cuda:3'), covar=tensor([0.0655, 0.3442, 0.0758, 0.3139, 0.1374, 0.0930, 0.0625, 0.0764], + device='cuda:3'), in_proj_covar=tensor([0.0443, 0.0546, 0.0542, 0.0509, 0.0574, 0.0479, 0.0476, 0.0539], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:16:42,274 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68564.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:16:42,685 INFO [train.py:901] (3/4) Epoch 9, batch 3900, loss[loss=0.3139, simple_loss=0.3921, pruned_loss=0.1178, over 8676.00 frames. ], tot_loss[loss=0.2499, simple_loss=0.3221, pruned_loss=0.08885, over 1618879.97 frames. ], batch size: 34, lr: 8.54e-03, grad_scale: 8.0 +2023-02-06 07:17:02,959 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6099, 1.4115, 2.7938, 1.2664, 2.0307, 3.0304, 3.0924, 2.5404], + device='cuda:3'), covar=tensor([0.1052, 0.1470, 0.0409, 0.2057, 0.0873, 0.0305, 0.0475, 0.0718], + device='cuda:3'), in_proj_covar=tensor([0.0255, 0.0285, 0.0246, 0.0276, 0.0262, 0.0228, 0.0304, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:17:04,307 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:17:17,701 INFO [train.py:901] (3/4) Epoch 9, batch 3950, loss[loss=0.256, simple_loss=0.3252, pruned_loss=0.0934, over 8252.00 frames. ], tot_loss[loss=0.2494, simple_loss=0.3217, pruned_loss=0.08862, over 1614562.16 frames. ], batch size: 22, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:17:35,328 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.589e+02 3.045e+02 4.133e+02 1.084e+03, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 07:17:51,768 INFO [train.py:901] (3/4) Epoch 9, batch 4000, loss[loss=0.229, simple_loss=0.3126, pruned_loss=0.07273, over 8250.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.322, pruned_loss=0.08877, over 1613702.44 frames. ], batch size: 22, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:00,060 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.64 vs. limit=5.0 +2023-02-06 07:18:13,674 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-06 07:18:16,919 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 07:18:17,951 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=68703.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,239 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:25,677 INFO [train.py:901] (3/4) Epoch 9, batch 4050, loss[loss=0.2609, simple_loss=0.326, pruned_loss=0.09789, over 8488.00 frames. ], tot_loss[loss=0.2481, simple_loss=0.3207, pruned_loss=0.08771, over 1615946.31 frames. ], batch size: 29, lr: 8.53e-03, grad_scale: 8.0 +2023-02-06 07:18:28,565 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3813, 1.9440, 2.9470, 2.2683, 2.5609, 2.1481, 1.7112, 1.2824], + device='cuda:3'), covar=tensor([0.3032, 0.3295, 0.0827, 0.2109, 0.1633, 0.2023, 0.1546, 0.3377], + device='cuda:3'), in_proj_covar=tensor([0.0853, 0.0814, 0.0697, 0.0808, 0.0904, 0.0758, 0.0687, 0.0731], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:18:42,613 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:18:43,698 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.478e+02 3.133e+02 3.692e+02 8.585e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 07:18:56,669 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5872, 1.9391, 2.0200, 1.3991, 2.0948, 1.4174, 0.5833, 1.9148], + device='cuda:3'), covar=tensor([0.0318, 0.0178, 0.0123, 0.0259, 0.0191, 0.0521, 0.0503, 0.0124], + device='cuda:3'), in_proj_covar=tensor([0.0371, 0.0297, 0.0248, 0.0357, 0.0288, 0.0448, 0.0343, 0.0322], + device='cuda:3'), out_proj_covar=tensor([1.1048e-04, 8.5746e-05, 7.2775e-05, 1.0398e-04, 8.5422e-05, 1.4315e-04, + 1.0210e-04, 9.5259e-05], device='cuda:3') +2023-02-06 07:18:57,743 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:19:00,372 INFO [train.py:901] (3/4) Epoch 9, batch 4100, loss[loss=0.2386, simple_loss=0.3172, pruned_loss=0.07998, over 8517.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3205, pruned_loss=0.08722, over 1618875.52 frames. ], batch size: 28, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:08,015 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 07:19:34,811 INFO [train.py:901] (3/4) Epoch 9, batch 4150, loss[loss=0.2289, simple_loss=0.3077, pruned_loss=0.07505, over 8135.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3204, pruned_loss=0.08743, over 1614539.68 frames. ], batch size: 22, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:19:38,350 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=68820.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:19:52,100 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.558e+02 3.576e+02 4.352e+02 8.740e+02, threshold=7.151e+02, percent-clipped=5.0 +2023-02-06 07:19:55,817 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=68845.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:20:02,871 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 07:20:09,486 INFO [train.py:901] (3/4) Epoch 9, batch 4200, loss[loss=0.2337, simple_loss=0.2968, pruned_loss=0.08523, over 7809.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3204, pruned_loss=0.08736, over 1614435.60 frames. ], batch size: 20, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:12,588 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 07:20:17,183 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=68876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:20:23,072 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 07:20:44,847 INFO [train.py:901] (3/4) Epoch 9, batch 4250, loss[loss=0.2296, simple_loss=0.3132, pruned_loss=0.07301, over 8243.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3208, pruned_loss=0.08782, over 1616667.98 frames. ], batch size: 24, lr: 8.52e-03, grad_scale: 8.0 +2023-02-06 07:20:45,558 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 07:20:51,272 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9564, 1.6481, 2.3261, 1.9142, 2.0394, 1.8272, 1.5503, 0.6426], + device='cuda:3'), covar=tensor([0.3180, 0.2948, 0.0939, 0.1858, 0.1394, 0.1852, 0.1443, 0.3105], + device='cuda:3'), in_proj_covar=tensor([0.0854, 0.0816, 0.0698, 0.0808, 0.0902, 0.0761, 0.0686, 0.0733], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:21:02,845 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=68940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:21:03,447 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.856e+02 3.701e+02 4.402e+02 9.379e+02, threshold=7.403e+02, percent-clipped=2.0 +2023-02-06 07:21:20,741 INFO [train.py:901] (3/4) Epoch 9, batch 4300, loss[loss=0.223, simple_loss=0.2931, pruned_loss=0.07648, over 7652.00 frames. ], tot_loss[loss=0.2496, simple_loss=0.3217, pruned_loss=0.08879, over 1617456.26 frames. ], batch size: 19, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:21:55,038 INFO [train.py:901] (3/4) Epoch 9, batch 4350, loss[loss=0.2348, simple_loss=0.3126, pruned_loss=0.0785, over 8463.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3199, pruned_loss=0.08795, over 1615237.90 frames. ], batch size: 27, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:12,979 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.761e+02 3.203e+02 3.985e+02 6.558e+02, threshold=6.405e+02, percent-clipped=0.0 +2023-02-06 07:22:16,276 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 07:22:17,724 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:23,149 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:27,151 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:22:29,684 INFO [train.py:901] (3/4) Epoch 9, batch 4400, loss[loss=0.2296, simple_loss=0.3083, pruned_loss=0.07545, over 8333.00 frames. ], tot_loss[loss=0.2475, simple_loss=0.3198, pruned_loss=0.08761, over 1613019.98 frames. ], batch size: 26, lr: 8.51e-03, grad_scale: 8.0 +2023-02-06 07:22:30,556 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9719, 2.0811, 1.6417, 2.8876, 1.3085, 1.3879, 1.8626, 2.4392], + device='cuda:3'), covar=tensor([0.0898, 0.0999, 0.1213, 0.0388, 0.1329, 0.1627, 0.1147, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0249, 0.0226, 0.0267, 0.0222, 0.0228, 0.0263, 0.0268, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 07:22:40,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8280, 1.4771, 3.3440, 1.4259, 2.1632, 3.5705, 3.6545, 2.9080], + device='cuda:3'), covar=tensor([0.1170, 0.1660, 0.0417, 0.2120, 0.1072, 0.0347, 0.0449, 0.0761], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0285, 0.0246, 0.0275, 0.0260, 0.0229, 0.0301, 0.0284], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:22:55,804 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 07:23:04,523 INFO [train.py:901] (3/4) Epoch 9, batch 4450, loss[loss=0.1926, simple_loss=0.268, pruned_loss=0.05857, over 7519.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3207, pruned_loss=0.08858, over 1613070.98 frames. ], batch size: 18, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:16,659 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:22,378 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.746e+02 3.298e+02 3.852e+02 8.052e+02, threshold=6.596e+02, percent-clipped=4.0 +2023-02-06 07:23:33,874 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:37,253 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:39,008 INFO [train.py:901] (3/4) Epoch 9, batch 4500, loss[loss=0.2332, simple_loss=0.3157, pruned_loss=0.07528, over 8579.00 frames. ], tot_loss[loss=0.2498, simple_loss=0.3212, pruned_loss=0.08921, over 1611474.47 frames. ], batch size: 31, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:23:49,144 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 07:23:49,293 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69180.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:23:53,180 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:12,700 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4163, 1.7943, 1.3486, 2.3100, 1.0270, 1.1621, 1.6248, 1.8177], + device='cuda:3'), covar=tensor([0.1295, 0.0994, 0.1484, 0.0587, 0.1347, 0.1795, 0.1061, 0.0904], + device='cuda:3'), in_proj_covar=tensor([0.0248, 0.0223, 0.0264, 0.0222, 0.0226, 0.0263, 0.0268, 0.0230], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 07:24:13,186 INFO [train.py:901] (3/4) Epoch 9, batch 4550, loss[loss=0.2817, simple_loss=0.3499, pruned_loss=0.1068, over 8553.00 frames. ], tot_loss[loss=0.2489, simple_loss=0.3205, pruned_loss=0.08862, over 1606787.16 frames. ], batch size: 31, lr: 8.50e-03, grad_scale: 8.0 +2023-02-06 07:24:16,801 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2028, 1.6542, 4.2333, 1.7822, 2.3132, 4.7423, 4.7466, 4.0793], + device='cuda:3'), covar=tensor([0.1146, 0.1570, 0.0283, 0.2019, 0.1024, 0.0253, 0.0381, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0258, 0.0290, 0.0251, 0.0280, 0.0266, 0.0235, 0.0309, 0.0291], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:24:31,944 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.522e+02 2.943e+02 3.743e+02 5.945e+02, threshold=5.886e+02, percent-clipped=0.0 +2023-02-06 07:24:33,333 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:24:47,702 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69263.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:24:48,921 INFO [train.py:901] (3/4) Epoch 9, batch 4600, loss[loss=0.2329, simple_loss=0.3141, pruned_loss=0.07588, over 8512.00 frames. ], tot_loss[loss=0.2491, simple_loss=0.3209, pruned_loss=0.08864, over 1610091.85 frames. ], batch size: 26, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:07,533 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:22,107 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:24,600 INFO [train.py:901] (3/4) Epoch 9, batch 4650, loss[loss=0.2242, simple_loss=0.3134, pruned_loss=0.06755, over 8105.00 frames. ], tot_loss[loss=0.2479, simple_loss=0.3199, pruned_loss=0.08798, over 1609548.49 frames. ], batch size: 23, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:25:38,791 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:40,113 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:25:42,601 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.666e+02 3.298e+02 3.900e+02 8.712e+02, threshold=6.595e+02, percent-clipped=8.0 +2023-02-06 07:25:58,527 INFO [train.py:901] (3/4) Epoch 9, batch 4700, loss[loss=0.2349, simple_loss=0.3121, pruned_loss=0.07887, over 8106.00 frames. ], tot_loss[loss=0.2478, simple_loss=0.3194, pruned_loss=0.0881, over 1604956.71 frames. ], batch size: 23, lr: 8.49e-03, grad_scale: 8.0 +2023-02-06 07:26:14,068 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:26,910 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:31,850 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69412.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:33,682 INFO [train.py:901] (3/4) Epoch 9, batch 4750, loss[loss=0.2788, simple_loss=0.3414, pruned_loss=0.1082, over 7125.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3177, pruned_loss=0.08717, over 1602535.80 frames. ], batch size: 71, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:26:35,887 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:26:43,653 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 07:26:49,111 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 07:26:51,004 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 07:26:51,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.564e+02 3.173e+02 4.227e+02 9.736e+02, threshold=6.346e+02, percent-clipped=4.0 +2023-02-06 07:26:53,207 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:08,529 INFO [train.py:901] (3/4) Epoch 9, batch 4800, loss[loss=0.2073, simple_loss=0.2849, pruned_loss=0.06484, over 7528.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3174, pruned_loss=0.08686, over 1597114.82 frames. ], batch size: 18, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:41,300 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 07:27:43,201 INFO [train.py:901] (3/4) Epoch 9, batch 4850, loss[loss=0.225, simple_loss=0.2998, pruned_loss=0.07513, over 8238.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3171, pruned_loss=0.08684, over 1594354.00 frames. ], batch size: 22, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:27:46,760 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:49,417 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:27:53,339 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:00,637 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.717e+02 3.193e+02 3.973e+02 8.915e+02, threshold=6.387e+02, percent-clipped=1.0 +2023-02-06 07:28:17,599 INFO [train.py:901] (3/4) Epoch 9, batch 4900, loss[loss=0.1989, simple_loss=0.2625, pruned_loss=0.06762, over 7934.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3163, pruned_loss=0.08655, over 1595566.08 frames. ], batch size: 20, lr: 8.48e-03, grad_scale: 8.0 +2023-02-06 07:28:33,123 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:28:47,483 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69607.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:28:53,430 INFO [train.py:901] (3/4) Epoch 9, batch 4950, loss[loss=0.2334, simple_loss=0.3076, pruned_loss=0.07958, over 7817.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.3165, pruned_loss=0.08637, over 1604732.48 frames. ], batch size: 20, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:06,610 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:09,344 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:10,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.734e+02 3.225e+02 4.131e+02 8.295e+02, threshold=6.450e+02, percent-clipped=5.0 +2023-02-06 07:29:10,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0232, 1.6411, 1.7813, 1.6693, 1.1944, 1.7765, 2.1638, 1.8156], + device='cuda:3'), covar=tensor([0.0436, 0.1231, 0.1703, 0.1314, 0.0619, 0.1433, 0.0632, 0.0617], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0160, 0.0198, 0.0163, 0.0110, 0.0168, 0.0122, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 07:29:13,215 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:27,250 INFO [train.py:901] (3/4) Epoch 9, batch 5000, loss[loss=0.2225, simple_loss=0.3087, pruned_loss=0.06816, over 8458.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3177, pruned_loss=0.08692, over 1609459.63 frames. ], batch size: 25, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:29:39,131 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:29:46,879 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=69692.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:29:53,593 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:02,817 INFO [train.py:901] (3/4) Epoch 9, batch 5050, loss[loss=0.2817, simple_loss=0.3505, pruned_loss=0.1064, over 8506.00 frames. ], tot_loss[loss=0.2465, simple_loss=0.3181, pruned_loss=0.08741, over 1609417.70 frames. ], batch size: 26, lr: 8.47e-03, grad_scale: 8.0 +2023-02-06 07:30:07,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69722.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:30:12,770 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:17,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3056, 1.9154, 2.9685, 2.3307, 2.5736, 2.0375, 1.6024, 1.3260], + device='cuda:3'), covar=tensor([0.3174, 0.3309, 0.0872, 0.2030, 0.1645, 0.1799, 0.1549, 0.3449], + device='cuda:3'), in_proj_covar=tensor([0.0858, 0.0825, 0.0702, 0.0808, 0.0911, 0.0762, 0.0689, 0.0738], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:30:18,113 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 07:30:20,813 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.703e+02 3.249e+02 3.895e+02 8.845e+02, threshold=6.498e+02, percent-clipped=2.0 +2023-02-06 07:30:26,987 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:30,984 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=69756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:36,718 INFO [train.py:901] (3/4) Epoch 9, batch 5100, loss[loss=0.2851, simple_loss=0.3453, pruned_loss=0.1125, over 7973.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3184, pruned_loss=0.08737, over 1608284.54 frames. ], batch size: 21, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:30:44,228 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:30:59,043 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:01,166 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6104, 1.6385, 4.3853, 1.9857, 2.4877, 4.9894, 4.9586, 4.3099], + device='cuda:3'), covar=tensor([0.1017, 0.1745, 0.0289, 0.1916, 0.1013, 0.0201, 0.0263, 0.0599], + device='cuda:3'), in_proj_covar=tensor([0.0254, 0.0288, 0.0249, 0.0277, 0.0265, 0.0229, 0.0305, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:31:01,898 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:11,530 INFO [train.py:901] (3/4) Epoch 9, batch 5150, loss[loss=0.2783, simple_loss=0.3419, pruned_loss=0.1074, over 8321.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3177, pruned_loss=0.08676, over 1607938.79 frames. ], batch size: 25, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:29,714 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.853e+02 2.410e+02 3.240e+02 3.896e+02 9.119e+02, threshold=6.481e+02, percent-clipped=3.0 +2023-02-06 07:31:32,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:31:43,080 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 07:31:46,671 INFO [train.py:901] (3/4) Epoch 9, batch 5200, loss[loss=0.2155, simple_loss=0.3055, pruned_loss=0.06274, over 8358.00 frames. ], tot_loss[loss=0.2461, simple_loss=0.3181, pruned_loss=0.08708, over 1608778.26 frames. ], batch size: 24, lr: 8.46e-03, grad_scale: 8.0 +2023-02-06 07:31:50,876 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=69871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:02,093 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3839, 2.0424, 3.0951, 2.4626, 2.8018, 2.1868, 1.7577, 1.5370], + device='cuda:3'), covar=tensor([0.3159, 0.3544, 0.0933, 0.2427, 0.1749, 0.1934, 0.1478, 0.3500], + device='cuda:3'), in_proj_covar=tensor([0.0853, 0.0820, 0.0700, 0.0807, 0.0907, 0.0758, 0.0686, 0.0735], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:32:06,754 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69895.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:11,374 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:17,761 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 07:32:20,221 INFO [train.py:901] (3/4) Epoch 9, batch 5250, loss[loss=0.2558, simple_loss=0.3176, pruned_loss=0.09696, over 8127.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3173, pruned_loss=0.08648, over 1607742.88 frames. ], batch size: 22, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:32:23,773 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69920.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:27,865 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:38,251 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 2.909e+02 3.504e+02 4.160e+02 7.603e+02, threshold=7.007e+02, percent-clipped=5.0 +2023-02-06 07:32:50,714 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:32:55,297 INFO [train.py:901] (3/4) Epoch 9, batch 5300, loss[loss=0.2205, simple_loss=0.3122, pruned_loss=0.06438, over 8201.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.316, pruned_loss=0.08525, over 1609653.45 frames. ], batch size: 23, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:05,000 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=69978.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:08,278 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=69983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:22,934 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70003.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:24,857 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:31,223 INFO [train.py:901] (3/4) Epoch 9, batch 5350, loss[loss=0.231, simple_loss=0.3187, pruned_loss=0.07164, over 8302.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3179, pruned_loss=0.0867, over 1610318.94 frames. ], batch size: 23, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:33:34,730 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7474, 1.3408, 1.5657, 1.2517, 0.9410, 1.3696, 1.4245, 1.4334], + device='cuda:3'), covar=tensor([0.0502, 0.1186, 0.1687, 0.1381, 0.0574, 0.1413, 0.0681, 0.0599], + device='cuda:3'), in_proj_covar=tensor([0.0105, 0.0159, 0.0198, 0.0162, 0.0109, 0.0167, 0.0121, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 07:33:42,009 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:33:45,285 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70036.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:33:48,396 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.965e+02 3.484e+02 4.155e+02 9.515e+02, threshold=6.968e+02, percent-clipped=2.0 +2023-02-06 07:33:57,317 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:05,195 INFO [train.py:901] (3/4) Epoch 9, batch 5400, loss[loss=0.2297, simple_loss=0.3087, pruned_loss=0.07535, over 8134.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3191, pruned_loss=0.08716, over 1613347.64 frames. ], batch size: 22, lr: 8.45e-03, grad_scale: 8.0 +2023-02-06 07:34:14,822 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:31,560 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70101.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:40,952 INFO [train.py:901] (3/4) Epoch 9, batch 5450, loss[loss=0.2312, simple_loss=0.3214, pruned_loss=0.07057, over 8526.00 frames. ], tot_loss[loss=0.247, simple_loss=0.3193, pruned_loss=0.08732, over 1612200.57 frames. ], batch size: 28, lr: 8.44e-03, grad_scale: 8.0 +2023-02-06 07:34:48,368 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,037 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:49,627 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:34:58,814 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.682e+02 3.191e+02 4.046e+02 1.028e+03, threshold=6.382e+02, percent-clipped=4.0 +2023-02-06 07:35:04,325 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 07:35:05,834 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70151.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:35:06,536 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70152.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:35:15,856 INFO [train.py:901] (3/4) Epoch 9, batch 5500, loss[loss=0.2289, simple_loss=0.3064, pruned_loss=0.07565, over 8032.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.319, pruned_loss=0.08692, over 1612632.14 frames. ], batch size: 22, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:35:50,278 INFO [train.py:901] (3/4) Epoch 9, batch 5550, loss[loss=0.2046, simple_loss=0.2654, pruned_loss=0.07187, over 7825.00 frames. ], tot_loss[loss=0.247, simple_loss=0.3191, pruned_loss=0.08741, over 1604741.98 frames. ], batch size: 19, lr: 8.44e-03, grad_scale: 16.0 +2023-02-06 07:36:07,873 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.484e+02 3.031e+02 3.937e+02 9.276e+02, threshold=6.062e+02, percent-clipped=2.0 +2023-02-06 07:36:14,989 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 07:36:24,759 INFO [train.py:901] (3/4) Epoch 9, batch 5600, loss[loss=0.2215, simple_loss=0.3112, pruned_loss=0.06586, over 8772.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3178, pruned_loss=0.08624, over 1606427.36 frames. ], batch size: 30, lr: 8.43e-03, grad_scale: 16.0 +2023-02-06 07:36:34,074 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=70278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:36:35,515 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4376, 1.9076, 3.1307, 1.2357, 2.4180, 1.9012, 1.7539, 2.1065], + device='cuda:3'), covar=tensor([0.1830, 0.2046, 0.0700, 0.3846, 0.1398, 0.2768, 0.1635, 0.1960], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0496, 0.0524, 0.0564, 0.0600, 0.0537, 0.0463, 0.0603], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:36:59,382 INFO [train.py:901] (3/4) Epoch 9, batch 5650, loss[loss=0.2091, simple_loss=0.2967, pruned_loss=0.06074, over 8336.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3168, pruned_loss=0.08531, over 1608201.37 frames. ], batch size: 25, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:08,098 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 07:37:18,053 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.609e+02 3.248e+02 4.005e+02 8.106e+02, threshold=6.497e+02, percent-clipped=5.0 +2023-02-06 07:37:32,963 INFO [train.py:901] (3/4) Epoch 9, batch 5700, loss[loss=0.2348, simple_loss=0.3204, pruned_loss=0.07459, over 8354.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3172, pruned_loss=0.08583, over 1608505.09 frames. ], batch size: 24, lr: 8.43e-03, grad_scale: 8.0 +2023-02-06 07:37:56,393 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4986, 1.9737, 3.0514, 2.5826, 2.8320, 2.2576, 1.7896, 1.4267], + device='cuda:3'), covar=tensor([0.2987, 0.3699, 0.0900, 0.1860, 0.1414, 0.1699, 0.1336, 0.3393], + device='cuda:3'), in_proj_covar=tensor([0.0861, 0.0829, 0.0707, 0.0806, 0.0905, 0.0765, 0.0686, 0.0737], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:38:02,546 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3907, 4.3666, 3.9422, 1.7798, 3.9078, 3.9656, 3.9607, 3.6345], + device='cuda:3'), covar=tensor([0.0918, 0.0692, 0.1192, 0.5735, 0.0900, 0.0998, 0.1467, 0.0863], + device='cuda:3'), in_proj_covar=tensor([0.0449, 0.0353, 0.0367, 0.0468, 0.0365, 0.0350, 0.0362, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:38:02,667 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70407.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 07:38:08,013 INFO [train.py:901] (3/4) Epoch 9, batch 5750, loss[loss=0.2474, simple_loss=0.3282, pruned_loss=0.08335, over 7967.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3158, pruned_loss=0.08528, over 1608197.02 frames. ], batch size: 21, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:12,143 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 07:38:20,280 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70432.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 07:38:27,529 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.028e+02 2.898e+02 3.376e+02 4.229e+02 8.555e+02, threshold=6.753e+02, percent-clipped=3.0 +2023-02-06 07:38:43,381 INFO [train.py:901] (3/4) Epoch 9, batch 5800, loss[loss=0.2056, simple_loss=0.2807, pruned_loss=0.06525, over 7230.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3158, pruned_loss=0.08548, over 1608727.32 frames. ], batch size: 16, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:38:48,296 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:38:48,579 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.60 vs. limit=5.0 +2023-02-06 07:39:18,643 INFO [train.py:901] (3/4) Epoch 9, batch 5850, loss[loss=0.2384, simple_loss=0.3154, pruned_loss=0.08076, over 8561.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3156, pruned_loss=0.08556, over 1611300.28 frames. ], batch size: 49, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:39:28,974 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4293, 1.8240, 1.8591, 1.0135, 2.0406, 1.4563, 0.4423, 1.6526], + device='cuda:3'), covar=tensor([0.0306, 0.0174, 0.0131, 0.0281, 0.0196, 0.0501, 0.0447, 0.0132], + device='cuda:3'), in_proj_covar=tensor([0.0372, 0.0297, 0.0246, 0.0357, 0.0285, 0.0446, 0.0340, 0.0326], + device='cuda:3'), out_proj_covar=tensor([1.1024e-04, 8.5580e-05, 7.1640e-05, 1.0383e-04, 8.3951e-05, 1.4133e-04, + 1.0086e-04, 9.5879e-05], device='cuda:3') +2023-02-06 07:39:37,365 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.477e+02 3.501e+02 4.376e+02 8.995e+02, threshold=7.001e+02, percent-clipped=4.0 +2023-02-06 07:39:48,209 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6663, 2.2284, 3.7538, 2.8076, 3.0813, 2.5042, 1.9785, 1.7846], + device='cuda:3'), covar=tensor([0.3150, 0.3703, 0.0964, 0.2366, 0.1780, 0.1846, 0.1514, 0.3922], + device='cuda:3'), in_proj_covar=tensor([0.0853, 0.0819, 0.0703, 0.0805, 0.0900, 0.0761, 0.0683, 0.0733], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:39:53,322 INFO [train.py:901] (3/4) Epoch 9, batch 5900, loss[loss=0.232, simple_loss=0.3133, pruned_loss=0.07531, over 8244.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3163, pruned_loss=0.0861, over 1611842.07 frames. ], batch size: 24, lr: 8.42e-03, grad_scale: 8.0 +2023-02-06 07:40:08,010 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:27,231 INFO [train.py:901] (3/4) Epoch 9, batch 5950, loss[loss=0.226, simple_loss=0.2933, pruned_loss=0.07936, over 7427.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3169, pruned_loss=0.08652, over 1613804.52 frames. ], batch size: 17, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:40:32,706 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=70622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:40:45,946 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.730e+02 3.193e+02 3.849e+02 7.953e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 07:41:02,078 INFO [train.py:901] (3/4) Epoch 9, batch 6000, loss[loss=0.2804, simple_loss=0.327, pruned_loss=0.1168, over 7653.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3174, pruned_loss=0.08712, over 1613437.90 frames. ], batch size: 19, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:41:02,079 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 07:41:14,589 INFO [train.py:935] (3/4) Epoch 9, validation: loss=0.1952, simple_loss=0.2947, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 07:41:14,590 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 07:41:49,711 INFO [train.py:901] (3/4) Epoch 9, batch 6050, loss[loss=0.1739, simple_loss=0.2633, pruned_loss=0.04229, over 7716.00 frames. ], tot_loss[loss=0.2469, simple_loss=0.3182, pruned_loss=0.08779, over 1615986.65 frames. ], batch size: 18, lr: 8.41e-03, grad_scale: 8.0 +2023-02-06 07:42:04,740 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=70737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:42:07,993 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.822e+02 3.602e+02 4.348e+02 1.269e+03, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 07:42:10,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6266, 1.9746, 2.0503, 1.0682, 2.2595, 1.4308, 0.7064, 1.7749], + device='cuda:3'), covar=tensor([0.0403, 0.0187, 0.0173, 0.0368, 0.0178, 0.0537, 0.0453, 0.0180], + device='cuda:3'), in_proj_covar=tensor([0.0378, 0.0301, 0.0249, 0.0360, 0.0287, 0.0450, 0.0344, 0.0329], + device='cuda:3'), out_proj_covar=tensor([1.1221e-04, 8.6868e-05, 7.2050e-05, 1.0463e-04, 8.4396e-05, 1.4254e-04, + 1.0218e-04, 9.6955e-05], device='cuda:3') +2023-02-06 07:42:19,440 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 07:42:20,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6407, 1.9292, 3.0358, 1.4075, 2.4197, 2.0482, 1.7563, 2.1208], + device='cuda:3'), covar=tensor([0.1424, 0.1870, 0.0628, 0.3303, 0.1222, 0.2390, 0.1441, 0.1861], + device='cuda:3'), in_proj_covar=tensor([0.0478, 0.0495, 0.0525, 0.0567, 0.0603, 0.0541, 0.0464, 0.0604], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:42:24,368 INFO [train.py:901] (3/4) Epoch 9, batch 6100, loss[loss=0.1823, simple_loss=0.2558, pruned_loss=0.05438, over 7432.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3168, pruned_loss=0.08646, over 1610369.73 frames. ], batch size: 17, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:42:42,096 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 07:43:00,374 INFO [train.py:901] (3/4) Epoch 9, batch 6150, loss[loss=0.2216, simple_loss=0.2929, pruned_loss=0.07512, over 7442.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3157, pruned_loss=0.08574, over 1609391.53 frames. ], batch size: 17, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:01,905 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9412, 2.5413, 3.1930, 1.2257, 2.9630, 1.6375, 1.6670, 1.7611], + device='cuda:3'), covar=tensor([0.0473, 0.0241, 0.0146, 0.0475, 0.0368, 0.0607, 0.0514, 0.0335], + device='cuda:3'), in_proj_covar=tensor([0.0376, 0.0300, 0.0247, 0.0360, 0.0288, 0.0449, 0.0343, 0.0328], + device='cuda:3'), out_proj_covar=tensor([1.1154e-04, 8.6542e-05, 7.1460e-05, 1.0460e-04, 8.4599e-05, 1.4198e-04, + 1.0185e-04, 9.6633e-05], device='cuda:3') +2023-02-06 07:43:18,322 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.692e+02 3.232e+02 3.879e+02 7.941e+02, threshold=6.463e+02, percent-clipped=1.0 +2023-02-06 07:43:19,254 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:43:33,936 INFO [train.py:901] (3/4) Epoch 9, batch 6200, loss[loss=0.199, simple_loss=0.2858, pruned_loss=0.05606, over 8231.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3158, pruned_loss=0.0856, over 1611370.33 frames. ], batch size: 22, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:43:36,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=70868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:44:09,711 INFO [train.py:901] (3/4) Epoch 9, batch 6250, loss[loss=0.2277, simple_loss=0.3124, pruned_loss=0.07153, over 8472.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3168, pruned_loss=0.08643, over 1613639.07 frames. ], batch size: 27, lr: 8.40e-03, grad_scale: 8.0 +2023-02-06 07:44:28,454 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.792e+02 3.423e+02 4.432e+02 1.474e+03, threshold=6.847e+02, percent-clipped=7.0 +2023-02-06 07:44:43,930 INFO [train.py:901] (3/4) Epoch 9, batch 6300, loss[loss=0.2817, simple_loss=0.3482, pruned_loss=0.1077, over 8809.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3168, pruned_loss=0.08656, over 1614634.69 frames. ], batch size: 40, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:03,882 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=70993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:16,145 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:19,449 INFO [train.py:901] (3/4) Epoch 9, batch 6350, loss[loss=0.2376, simple_loss=0.3139, pruned_loss=0.08066, over 8661.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3167, pruned_loss=0.08614, over 1615915.82 frames. ], batch size: 34, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:45:21,646 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:23,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4348, 1.6901, 2.7321, 1.2619, 1.9245, 1.7835, 1.4957, 1.8343], + device='cuda:3'), covar=tensor([0.1805, 0.2269, 0.0758, 0.3817, 0.1592, 0.2833, 0.1747, 0.2078], + device='cuda:3'), in_proj_covar=tensor([0.0486, 0.0501, 0.0528, 0.0573, 0.0612, 0.0547, 0.0465, 0.0609], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:45:38,849 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.814e+02 3.293e+02 4.210e+02 8.338e+02, threshold=6.585e+02, percent-clipped=5.0 +2023-02-06 07:45:42,976 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:45:54,196 INFO [train.py:901] (3/4) Epoch 9, batch 6400, loss[loss=0.3193, simple_loss=0.3767, pruned_loss=0.1309, over 8317.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.3186, pruned_loss=0.08695, over 1620198.08 frames. ], batch size: 25, lr: 8.39e-03, grad_scale: 8.0 +2023-02-06 07:46:28,849 INFO [train.py:901] (3/4) Epoch 9, batch 6450, loss[loss=0.2938, simple_loss=0.357, pruned_loss=0.1153, over 7166.00 frames. ], tot_loss[loss=0.247, simple_loss=0.3195, pruned_loss=0.08727, over 1621893.98 frames. ], batch size: 72, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:46:48,400 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.655e+02 3.350e+02 4.272e+02 1.011e+03, threshold=6.701e+02, percent-clipped=3.0 +2023-02-06 07:47:03,598 INFO [train.py:901] (3/4) Epoch 9, batch 6500, loss[loss=0.2776, simple_loss=0.3419, pruned_loss=0.1066, over 8547.00 frames. ], tot_loss[loss=0.2466, simple_loss=0.3189, pruned_loss=0.08718, over 1618441.42 frames. ], batch size: 49, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:03,834 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0460, 3.1333, 3.5822, 2.2983, 1.9850, 3.6516, 0.6792, 2.2465], + device='cuda:3'), covar=tensor([0.1828, 0.1137, 0.0396, 0.2738, 0.4301, 0.0291, 0.4360, 0.2031], + device='cuda:3'), in_proj_covar=tensor([0.0157, 0.0158, 0.0094, 0.0208, 0.0242, 0.0096, 0.0158, 0.0156], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:47:37,713 INFO [train.py:901] (3/4) Epoch 9, batch 6550, loss[loss=0.2033, simple_loss=0.2749, pruned_loss=0.06583, over 7805.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3176, pruned_loss=0.08627, over 1618187.15 frames. ], batch size: 19, lr: 8.38e-03, grad_scale: 4.0 +2023-02-06 07:47:40,261 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 07:47:50,023 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 07:47:58,021 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.500e+02 3.444e+02 4.178e+02 7.414e+02, threshold=6.887e+02, percent-clipped=1.0 +2023-02-06 07:48:10,531 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 07:48:13,165 INFO [train.py:901] (3/4) Epoch 9, batch 6600, loss[loss=0.2182, simple_loss=0.302, pruned_loss=0.06726, over 7647.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3175, pruned_loss=0.08673, over 1612670.79 frames. ], batch size: 19, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:48:21,096 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 07:48:47,048 INFO [train.py:901] (3/4) Epoch 9, batch 6650, loss[loss=0.1975, simple_loss=0.2725, pruned_loss=0.06125, over 7638.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3163, pruned_loss=0.08599, over 1611421.97 frames. ], batch size: 19, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:05,722 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.641e+02 3.214e+02 4.234e+02 1.005e+03, threshold=6.427e+02, percent-clipped=4.0 +2023-02-06 07:49:13,932 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:21,688 INFO [train.py:901] (3/4) Epoch 9, batch 6700, loss[loss=0.2443, simple_loss=0.3055, pruned_loss=0.09151, over 7780.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3182, pruned_loss=0.08658, over 1615505.64 frames. ], batch size: 19, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:49:41,042 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:49:56,538 INFO [train.py:901] (3/4) Epoch 9, batch 6750, loss[loss=0.2197, simple_loss=0.2859, pruned_loss=0.07676, over 7971.00 frames. ], tot_loss[loss=0.2469, simple_loss=0.3189, pruned_loss=0.08745, over 1612816.61 frames. ], batch size: 21, lr: 8.37e-03, grad_scale: 4.0 +2023-02-06 07:50:15,374 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.899e+02 3.032e+02 3.821e+02 4.704e+02 1.129e+03, threshold=7.641e+02, percent-clipped=7.0 +2023-02-06 07:50:23,380 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 07:50:30,842 INFO [train.py:901] (3/4) Epoch 9, batch 6800, loss[loss=0.263, simple_loss=0.3226, pruned_loss=0.1017, over 7932.00 frames. ], tot_loss[loss=0.2467, simple_loss=0.3186, pruned_loss=0.08744, over 1613673.97 frames. ], batch size: 20, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:50:32,161 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1969, 4.1062, 3.7817, 1.7854, 3.7366, 3.7593, 3.8164, 3.5428], + device='cuda:3'), covar=tensor([0.0817, 0.0582, 0.1177, 0.5123, 0.0840, 0.0859, 0.1282, 0.0770], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0360, 0.0367, 0.0469, 0.0370, 0.0355, 0.0364, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:50:33,614 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:50:55,237 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2178, 1.9294, 2.9005, 2.3276, 2.6060, 2.1213, 1.6555, 1.2853], + device='cuda:3'), covar=tensor([0.3499, 0.3542, 0.0989, 0.2146, 0.1704, 0.1889, 0.1554, 0.3537], + device='cuda:3'), in_proj_covar=tensor([0.0856, 0.0819, 0.0702, 0.0806, 0.0899, 0.0763, 0.0685, 0.0739], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:51:01,265 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=71507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:51:06,468 INFO [train.py:901] (3/4) Epoch 9, batch 6850, loss[loss=0.2366, simple_loss=0.3185, pruned_loss=0.07737, over 8497.00 frames. ], tot_loss[loss=0.2463, simple_loss=0.3181, pruned_loss=0.08727, over 1610991.19 frames. ], batch size: 28, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:14,496 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 07:51:25,243 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.628e+02 3.217e+02 4.054e+02 6.964e+02, threshold=6.433e+02, percent-clipped=0.0 +2023-02-06 07:51:40,042 INFO [train.py:901] (3/4) Epoch 9, batch 6900, loss[loss=0.3301, simple_loss=0.3946, pruned_loss=0.1328, over 8414.00 frames. ], tot_loss[loss=0.2476, simple_loss=0.3193, pruned_loss=0.08797, over 1610634.57 frames. ], batch size: 49, lr: 8.36e-03, grad_scale: 8.0 +2023-02-06 07:51:57,276 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8290, 5.9095, 5.1423, 2.1481, 5.1838, 5.5818, 5.4829, 5.1943], + device='cuda:3'), covar=tensor([0.0598, 0.0433, 0.0864, 0.4899, 0.0656, 0.0704, 0.1132, 0.0589], + device='cuda:3'), in_proj_covar=tensor([0.0450, 0.0361, 0.0366, 0.0471, 0.0368, 0.0354, 0.0367, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:52:15,382 INFO [train.py:901] (3/4) Epoch 9, batch 6950, loss[loss=0.2541, simple_loss=0.3368, pruned_loss=0.08567, over 8333.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.319, pruned_loss=0.08759, over 1606884.54 frames. ], batch size: 25, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:22,399 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3384, 2.6344, 3.0086, 1.3990, 3.0962, 1.9526, 1.4954, 2.0246], + device='cuda:3'), covar=tensor([0.0526, 0.0218, 0.0231, 0.0516, 0.0294, 0.0542, 0.0654, 0.0294], + device='cuda:3'), in_proj_covar=tensor([0.0377, 0.0303, 0.0250, 0.0360, 0.0292, 0.0452, 0.0346, 0.0326], + device='cuda:3'), out_proj_covar=tensor([1.1122e-04, 8.6778e-05, 7.2309e-05, 1.0411e-04, 8.5738e-05, 1.4291e-04, + 1.0245e-04, 9.5758e-05], device='cuda:3') +2023-02-06 07:52:23,470 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 07:52:26,278 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1861, 1.1958, 3.2740, 0.9687, 2.8262, 2.7308, 2.9801, 2.8682], + device='cuda:3'), covar=tensor([0.0687, 0.3959, 0.0856, 0.3610, 0.1512, 0.1069, 0.0735, 0.0880], + device='cuda:3'), in_proj_covar=tensor([0.0448, 0.0558, 0.0559, 0.0515, 0.0585, 0.0496, 0.0491, 0.0546], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:52:29,643 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:52:35,520 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.622e+02 3.284e+02 3.978e+02 8.428e+02, threshold=6.567e+02, percent-clipped=2.0 +2023-02-06 07:52:50,437 INFO [train.py:901] (3/4) Epoch 9, batch 7000, loss[loss=0.3034, simple_loss=0.3711, pruned_loss=0.1178, over 8471.00 frames. ], tot_loss[loss=0.2471, simple_loss=0.3194, pruned_loss=0.08736, over 1608667.76 frames. ], batch size: 29, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:52:56,864 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 07:53:24,883 INFO [train.py:901] (3/4) Epoch 9, batch 7050, loss[loss=0.2351, simple_loss=0.3258, pruned_loss=0.0722, over 8463.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3184, pruned_loss=0.0864, over 1612295.51 frames. ], batch size: 25, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:53:32,430 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71725.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:45,153 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.886e+02 3.338e+02 4.007e+02 6.250e+02, threshold=6.676e+02, percent-clipped=0.0 +2023-02-06 07:53:50,708 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:53:53,323 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6669, 2.0694, 2.1496, 1.1867, 2.3812, 1.5073, 0.6562, 1.8163], + device='cuda:3'), covar=tensor([0.0345, 0.0161, 0.0128, 0.0299, 0.0166, 0.0494, 0.0450, 0.0159], + device='cuda:3'), in_proj_covar=tensor([0.0371, 0.0297, 0.0247, 0.0356, 0.0286, 0.0445, 0.0340, 0.0324], + device='cuda:3'), out_proj_covar=tensor([1.0957e-04, 8.5037e-05, 7.1500e-05, 1.0269e-04, 8.3832e-05, 1.4036e-04, + 1.0047e-04, 9.5186e-05], device='cuda:3') +2023-02-06 07:53:59,291 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=71763.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:00,448 INFO [train.py:901] (3/4) Epoch 9, batch 7100, loss[loss=0.2072, simple_loss=0.2902, pruned_loss=0.06211, over 7800.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.318, pruned_loss=0.0863, over 1611882.81 frames. ], batch size: 20, lr: 8.35e-03, grad_scale: 8.0 +2023-02-06 07:54:10,698 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3710, 1.5094, 2.1915, 1.1515, 1.5181, 1.5741, 1.3717, 1.2476], + device='cuda:3'), covar=tensor([0.1567, 0.1888, 0.0708, 0.3437, 0.1497, 0.2799, 0.1704, 0.1849], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0496, 0.0529, 0.0572, 0.0604, 0.0546, 0.0463, 0.0608], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:54:16,145 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=71788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:54:28,670 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5821, 1.8430, 1.9516, 1.0013, 2.1296, 1.3538, 0.5156, 1.6704], + device='cuda:3'), covar=tensor([0.0353, 0.0175, 0.0168, 0.0359, 0.0214, 0.0558, 0.0464, 0.0176], + device='cuda:3'), in_proj_covar=tensor([0.0375, 0.0300, 0.0252, 0.0360, 0.0292, 0.0448, 0.0343, 0.0330], + device='cuda:3'), out_proj_covar=tensor([1.1062e-04, 8.5958e-05, 7.2734e-05, 1.0415e-04, 8.5610e-05, 1.4137e-04, + 1.0129e-04, 9.7004e-05], device='cuda:3') +2023-02-06 07:54:31,426 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1472, 1.0719, 1.2392, 1.1182, 0.8081, 1.2932, 0.0599, 0.9015], + device='cuda:3'), covar=tensor([0.2947, 0.1994, 0.0730, 0.1477, 0.5142, 0.0783, 0.3947, 0.1981], + device='cuda:3'), in_proj_covar=tensor([0.0159, 0.0158, 0.0093, 0.0207, 0.0245, 0.0096, 0.0157, 0.0156], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 07:54:34,526 INFO [train.py:901] (3/4) Epoch 9, batch 7150, loss[loss=0.2449, simple_loss=0.323, pruned_loss=0.08346, over 8326.00 frames. ], tot_loss[loss=0.2472, simple_loss=0.3192, pruned_loss=0.08762, over 1609592.56 frames. ], batch size: 25, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:54:54,740 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.560e+02 3.246e+02 4.043e+02 1.359e+03, threshold=6.493e+02, percent-clipped=7.0 +2023-02-06 07:55:10,759 INFO [train.py:901] (3/4) Epoch 9, batch 7200, loss[loss=0.2671, simple_loss=0.3368, pruned_loss=0.09866, over 6467.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3203, pruned_loss=0.08805, over 1611674.50 frames. ], batch size: 72, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:30,900 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 07:55:43,959 INFO [train.py:901] (3/4) Epoch 9, batch 7250, loss[loss=0.2332, simple_loss=0.3208, pruned_loss=0.0728, over 8462.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.3183, pruned_loss=0.08653, over 1614008.94 frames. ], batch size: 27, lr: 8.34e-03, grad_scale: 8.0 +2023-02-06 07:55:58,954 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=71937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:02,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.597e+02 3.277e+02 3.984e+02 9.565e+02, threshold=6.554e+02, percent-clipped=6.0 +2023-02-06 07:56:19,574 INFO [train.py:901] (3/4) Epoch 9, batch 7300, loss[loss=0.2791, simple_loss=0.3432, pruned_loss=0.1075, over 6927.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3172, pruned_loss=0.08624, over 1608311.24 frames. ], batch size: 71, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:56:28,842 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=71978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:56:29,640 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4880, 2.9715, 1.7911, 2.2255, 2.2624, 1.6031, 2.1336, 2.2257], + device='cuda:3'), covar=tensor([0.1346, 0.0325, 0.1084, 0.0649, 0.0586, 0.1322, 0.0945, 0.0914], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0241, 0.0316, 0.0302, 0.0307, 0.0324, 0.0343, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:56:48,133 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3982, 2.7901, 1.6904, 2.0232, 2.1279, 1.5202, 1.8685, 1.9336], + device='cuda:3'), covar=tensor([0.1335, 0.0277, 0.0984, 0.0617, 0.0571, 0.1258, 0.0918, 0.0847], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0242, 0.0316, 0.0302, 0.0307, 0.0324, 0.0344, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:56:52,665 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1401, 1.2659, 3.2954, 0.9703, 2.8616, 2.7562, 3.0137, 2.8944], + device='cuda:3'), covar=tensor([0.0755, 0.3923, 0.0734, 0.3492, 0.1526, 0.0988, 0.0726, 0.0929], + device='cuda:3'), in_proj_covar=tensor([0.0446, 0.0553, 0.0548, 0.0517, 0.0587, 0.0493, 0.0484, 0.0547], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 07:56:54,487 INFO [train.py:901] (3/4) Epoch 9, batch 7350, loss[loss=0.2613, simple_loss=0.3343, pruned_loss=0.09415, over 8352.00 frames. ], tot_loss[loss=0.2457, simple_loss=0.318, pruned_loss=0.08674, over 1608178.62 frames. ], batch size: 24, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:02,505 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 07:57:13,528 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.925e+02 3.749e+02 4.804e+02 1.068e+03, threshold=7.499e+02, percent-clipped=9.0 +2023-02-06 07:57:22,479 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 07:57:30,036 INFO [train.py:901] (3/4) Epoch 9, batch 7400, loss[loss=0.2491, simple_loss=0.3343, pruned_loss=0.08195, over 8369.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3176, pruned_loss=0.08612, over 1610489.94 frames. ], batch size: 24, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:57:42,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6860, 5.6848, 4.9514, 2.2768, 5.0216, 5.3403, 5.2222, 4.9861], + device='cuda:3'), covar=tensor([0.0589, 0.0487, 0.0876, 0.4526, 0.0648, 0.0686, 0.1218, 0.0589], + device='cuda:3'), in_proj_covar=tensor([0.0450, 0.0354, 0.0368, 0.0468, 0.0364, 0.0352, 0.0363, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 07:57:50,402 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:57:56,613 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4988, 2.7395, 1.7046, 2.0277, 2.0544, 1.4376, 1.8745, 2.0744], + device='cuda:3'), covar=tensor([0.1116, 0.0258, 0.0962, 0.0590, 0.0613, 0.1270, 0.0909, 0.0813], + device='cuda:3'), in_proj_covar=tensor([0.0342, 0.0238, 0.0313, 0.0299, 0.0303, 0.0320, 0.0339, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 07:58:03,869 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 07:58:05,138 INFO [train.py:901] (3/4) Epoch 9, batch 7450, loss[loss=0.221, simple_loss=0.2979, pruned_loss=0.07205, over 7643.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3164, pruned_loss=0.08491, over 1610822.40 frames. ], batch size: 19, lr: 8.33e-03, grad_scale: 8.0 +2023-02-06 07:58:23,964 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.410e+02 3.229e+02 3.860e+02 9.903e+02, threshold=6.459e+02, percent-clipped=1.0 +2023-02-06 07:58:26,830 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 07:58:38,698 INFO [train.py:901] (3/4) Epoch 9, batch 7500, loss[loss=0.2149, simple_loss=0.2893, pruned_loss=0.07028, over 7929.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3147, pruned_loss=0.08421, over 1610157.63 frames. ], batch size: 20, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:09,506 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 07:59:15,028 INFO [train.py:901] (3/4) Epoch 9, batch 7550, loss[loss=0.3165, simple_loss=0.3759, pruned_loss=0.1286, over 8462.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.3144, pruned_loss=0.08443, over 1607708.12 frames. ], batch size: 29, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:33,688 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.819e+02 3.433e+02 4.309e+02 8.597e+02, threshold=6.865e+02, percent-clipped=4.0 +2023-02-06 07:59:48,158 INFO [train.py:901] (3/4) Epoch 9, batch 7600, loss[loss=0.2289, simple_loss=0.3143, pruned_loss=0.07172, over 8110.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3166, pruned_loss=0.0858, over 1610736.41 frames. ], batch size: 23, lr: 8.32e-03, grad_scale: 8.0 +2023-02-06 07:59:58,719 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:22,660 INFO [train.py:901] (3/4) Epoch 9, batch 7650, loss[loss=0.2342, simple_loss=0.3015, pruned_loss=0.08351, over 8086.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3171, pruned_loss=0.0865, over 1611476.79 frames. ], batch size: 21, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:00:42,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.706e+02 3.178e+02 3.983e+02 6.818e+02, threshold=6.357e+02, percent-clipped=0.0 +2023-02-06 08:00:43,589 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:46,999 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:00:57,579 INFO [train.py:901] (3/4) Epoch 9, batch 7700, loss[loss=0.2698, simple_loss=0.3435, pruned_loss=0.09803, over 8518.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3177, pruned_loss=0.08696, over 1612987.43 frames. ], batch size: 29, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:03,946 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:09,771 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 08:01:18,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:01:32,218 INFO [train.py:901] (3/4) Epoch 9, batch 7750, loss[loss=0.2535, simple_loss=0.3101, pruned_loss=0.09842, over 7813.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3174, pruned_loss=0.0865, over 1613389.76 frames. ], batch size: 20, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:01:53,040 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.870e+02 2.670e+02 3.267e+02 4.054e+02 1.108e+03, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 08:02:08,196 INFO [train.py:901] (3/4) Epoch 9, batch 7800, loss[loss=0.2701, simple_loss=0.3452, pruned_loss=0.09751, over 8472.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3164, pruned_loss=0.0858, over 1613047.18 frames. ], batch size: 27, lr: 8.31e-03, grad_scale: 8.0 +2023-02-06 08:02:08,406 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2657, 1.3891, 1.1990, 1.8600, 0.7823, 1.1068, 1.1644, 1.4558], + device='cuda:3'), covar=tensor([0.1042, 0.0930, 0.1332, 0.0549, 0.1273, 0.1706, 0.0980, 0.0901], + device='cuda:3'), in_proj_covar=tensor([0.0238, 0.0216, 0.0257, 0.0215, 0.0219, 0.0254, 0.0259, 0.0224], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 08:02:25,494 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:02:37,535 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2095, 1.4248, 1.2202, 1.9323, 0.6544, 1.0761, 1.2786, 1.5471], + device='cuda:3'), covar=tensor([0.1156, 0.0923, 0.1444, 0.0591, 0.1432, 0.1825, 0.0976, 0.0760], + device='cuda:3'), in_proj_covar=tensor([0.0241, 0.0219, 0.0257, 0.0216, 0.0222, 0.0256, 0.0261, 0.0226], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 08:02:41,259 INFO [train.py:901] (3/4) Epoch 9, batch 7850, loss[loss=0.2479, simple_loss=0.329, pruned_loss=0.0834, over 8462.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3157, pruned_loss=0.08506, over 1614616.11 frames. ], batch size: 25, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:02:59,530 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.530e+02 3.201e+02 3.890e+02 8.475e+02, threshold=6.403e+02, percent-clipped=6.0 +2023-02-06 08:03:14,039 INFO [train.py:901] (3/4) Epoch 9, batch 7900, loss[loss=0.2572, simple_loss=0.3293, pruned_loss=0.09255, over 8454.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3163, pruned_loss=0.08517, over 1613624.97 frames. ], batch size: 27, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:03:37,901 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 08:03:41,422 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72606.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:03:44,706 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4158, 1.7080, 2.8419, 1.2537, 1.9970, 1.7526, 1.4332, 1.8856], + device='cuda:3'), covar=tensor([0.1697, 0.2115, 0.0626, 0.3756, 0.1425, 0.2795, 0.1910, 0.1998], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0496, 0.0531, 0.0572, 0.0609, 0.0540, 0.0469, 0.0611], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:03:47,073 INFO [train.py:901] (3/4) Epoch 9, batch 7950, loss[loss=0.2053, simple_loss=0.2902, pruned_loss=0.06025, over 8360.00 frames. ], tot_loss[loss=0.2434, simple_loss=0.3163, pruned_loss=0.08523, over 1611600.58 frames. ], batch size: 24, lr: 8.30e-03, grad_scale: 8.0 +2023-02-06 08:04:05,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.498e+02 3.176e+02 4.184e+02 8.861e+02, threshold=6.353e+02, percent-clipped=6.0 +2023-02-06 08:04:11,429 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:19,759 INFO [train.py:901] (3/4) Epoch 9, batch 8000, loss[loss=0.2327, simple_loss=0.3253, pruned_loss=0.07006, over 8335.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3162, pruned_loss=0.08519, over 1608277.53 frames. ], batch size: 25, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:04:27,787 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:34,992 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=72688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:04:49,794 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0070, 1.4963, 1.6821, 1.5194, 0.9662, 1.4694, 1.6323, 1.4176], + device='cuda:3'), covar=tensor([0.0523, 0.1240, 0.1659, 0.1356, 0.0592, 0.1467, 0.0691, 0.0630], + device='cuda:3'), in_proj_covar=tensor([0.0105, 0.0157, 0.0196, 0.0161, 0.0107, 0.0167, 0.0120, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 08:04:52,985 INFO [train.py:901] (3/4) Epoch 9, batch 8050, loss[loss=0.1872, simple_loss=0.2641, pruned_loss=0.0551, over 7796.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3145, pruned_loss=0.0853, over 1594742.41 frames. ], batch size: 19, lr: 8.29e-03, grad_scale: 8.0 +2023-02-06 08:05:11,528 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.635e+02 3.102e+02 3.711e+02 7.462e+02, threshold=6.205e+02, percent-clipped=1.0 +2023-02-06 08:05:25,745 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 08:05:31,306 INFO [train.py:901] (3/4) Epoch 10, batch 0, loss[loss=0.2893, simple_loss=0.3549, pruned_loss=0.1119, over 8334.00 frames. ], tot_loss[loss=0.2893, simple_loss=0.3549, pruned_loss=0.1119, over 8334.00 frames. ], batch size: 26, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:05:31,306 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 08:05:43,258 INFO [train.py:935] (3/4) Epoch 10, validation: loss=0.1954, simple_loss=0.295, pruned_loss=0.0479, over 944034.00 frames. +2023-02-06 08:05:43,259 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 08:05:57,129 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 08:06:07,501 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 08:06:17,960 INFO [train.py:901] (3/4) Epoch 10, batch 50, loss[loss=0.2573, simple_loss=0.3393, pruned_loss=0.08767, over 8370.00 frames. ], tot_loss[loss=0.2455, simple_loss=0.3174, pruned_loss=0.08676, over 364930.87 frames. ], batch size: 24, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:06:21,761 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=72803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:06:31,234 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 08:06:49,407 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.716e+02 3.124e+02 3.887e+02 7.160e+02, threshold=6.248e+02, percent-clipped=5.0 +2023-02-06 08:06:52,303 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 08:06:52,961 INFO [train.py:901] (3/4) Epoch 10, batch 100, loss[loss=0.2313, simple_loss=0.287, pruned_loss=0.08778, over 7540.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3181, pruned_loss=0.08631, over 645951.51 frames. ], batch size: 18, lr: 7.88e-03, grad_scale: 8.0 +2023-02-06 08:07:03,804 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=72862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:22,340 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=72887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:07:30,273 INFO [train.py:901] (3/4) Epoch 10, batch 150, loss[loss=0.3054, simple_loss=0.3563, pruned_loss=0.1272, over 6942.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3179, pruned_loss=0.08635, over 858725.85 frames. ], batch size: 72, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:07:33,245 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4276, 1.9590, 3.0648, 2.4452, 2.7128, 2.1886, 1.7219, 1.5277], + device='cuda:3'), covar=tensor([0.3343, 0.3725, 0.0992, 0.2115, 0.1774, 0.1951, 0.1633, 0.3644], + device='cuda:3'), in_proj_covar=tensor([0.0865, 0.0828, 0.0700, 0.0817, 0.0905, 0.0763, 0.0689, 0.0744], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:07:36,683 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6625, 1.8115, 4.4198, 1.9189, 2.3901, 5.1683, 4.9663, 4.5097], + device='cuda:3'), covar=tensor([0.0946, 0.1446, 0.0250, 0.1846, 0.1066, 0.0154, 0.0274, 0.0516], + device='cuda:3'), in_proj_covar=tensor([0.0252, 0.0289, 0.0251, 0.0279, 0.0264, 0.0230, 0.0312, 0.0285], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:07:40,081 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72912.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:08:01,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.888e+02 2.670e+02 3.307e+02 4.288e+02 9.841e+02, threshold=6.614e+02, percent-clipped=3.0 +2023-02-06 08:08:04,559 INFO [train.py:901] (3/4) Epoch 10, batch 200, loss[loss=0.2462, simple_loss=0.3196, pruned_loss=0.08642, over 8480.00 frames. ], tot_loss[loss=0.2452, simple_loss=0.3183, pruned_loss=0.086, over 1024857.49 frames. ], batch size: 26, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:29,437 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=72982.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:08:41,016 INFO [train.py:901] (3/4) Epoch 10, batch 250, loss[loss=0.276, simple_loss=0.3299, pruned_loss=0.1111, over 7234.00 frames. ], tot_loss[loss=0.2451, simple_loss=0.3184, pruned_loss=0.08589, over 1160010.25 frames. ], batch size: 71, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:08:47,842 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 08:08:56,862 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 08:09:02,436 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2092, 1.4875, 1.6353, 1.3010, 1.1510, 1.3149, 1.9112, 1.7327], + device='cuda:3'), covar=tensor([0.0537, 0.1309, 0.1726, 0.1432, 0.0590, 0.1669, 0.0657, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0106, 0.0155, 0.0196, 0.0160, 0.0107, 0.0167, 0.0120, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 08:09:07,478 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 08:09:12,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.688e+02 3.158e+02 3.760e+02 5.735e+02, threshold=6.316e+02, percent-clipped=0.0 +2023-02-06 08:09:16,048 INFO [train.py:901] (3/4) Epoch 10, batch 300, loss[loss=0.2718, simple_loss=0.3469, pruned_loss=0.0984, over 8465.00 frames. ], tot_loss[loss=0.2464, simple_loss=0.3194, pruned_loss=0.08667, over 1262735.35 frames. ], batch size: 29, lr: 7.87e-03, grad_scale: 8.0 +2023-02-06 08:09:23,774 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:40,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:09:51,614 INFO [train.py:901] (3/4) Epoch 10, batch 350, loss[loss=0.2571, simple_loss=0.331, pruned_loss=0.0916, over 8485.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3188, pruned_loss=0.08648, over 1341612.61 frames. ], batch size: 28, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:09:55,108 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5779, 2.0039, 3.3170, 1.2913, 2.4886, 1.9879, 1.6003, 2.2716], + device='cuda:3'), covar=tensor([0.1634, 0.2196, 0.0646, 0.3948, 0.1335, 0.2700, 0.1783, 0.2042], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0495, 0.0534, 0.0567, 0.0607, 0.0540, 0.0466, 0.0605], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:10:23,506 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 2.632e+02 3.058e+02 3.924e+02 7.931e+02, threshold=6.116e+02, percent-clipped=5.0 +2023-02-06 08:10:26,911 INFO [train.py:901] (3/4) Epoch 10, batch 400, loss[loss=0.2828, simple_loss=0.3587, pruned_loss=0.1035, over 8281.00 frames. ], tot_loss[loss=0.2459, simple_loss=0.3191, pruned_loss=0.08632, over 1405767.97 frames. ], batch size: 23, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:10:49,317 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6423, 2.8483, 2.0756, 2.2507, 2.2970, 1.6824, 2.1599, 2.2509], + device='cuda:3'), covar=tensor([0.1493, 0.0338, 0.0986, 0.0603, 0.0615, 0.1347, 0.1089, 0.1046], + device='cuda:3'), in_proj_covar=tensor([0.0343, 0.0231, 0.0310, 0.0297, 0.0300, 0.0318, 0.0335, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:11:01,334 INFO [train.py:901] (3/4) Epoch 10, batch 450, loss[loss=0.2106, simple_loss=0.2936, pruned_loss=0.06377, over 8363.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3185, pruned_loss=0.08574, over 1451257.16 frames. ], batch size: 24, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:33,882 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.629e+02 3.140e+02 3.877e+02 8.143e+02, threshold=6.279e+02, percent-clipped=4.0 +2023-02-06 08:11:35,980 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3387, 4.3707, 3.9569, 1.8801, 3.8700, 3.9728, 4.0107, 3.5145], + device='cuda:3'), covar=tensor([0.0803, 0.0475, 0.0819, 0.4683, 0.0751, 0.0732, 0.1113, 0.0910], + device='cuda:3'), in_proj_covar=tensor([0.0461, 0.0358, 0.0375, 0.0477, 0.0373, 0.0357, 0.0367, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:11:37,155 INFO [train.py:901] (3/4) Epoch 10, batch 500, loss[loss=0.2394, simple_loss=0.314, pruned_loss=0.08246, over 8607.00 frames. ], tot_loss[loss=0.2462, simple_loss=0.3194, pruned_loss=0.08649, over 1489094.26 frames. ], batch size: 39, lr: 7.86e-03, grad_scale: 16.0 +2023-02-06 08:11:42,548 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:10,509 INFO [train.py:901] (3/4) Epoch 10, batch 550, loss[loss=0.2344, simple_loss=0.3283, pruned_loss=0.07025, over 8345.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3183, pruned_loss=0.08575, over 1517327.81 frames. ], batch size: 26, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:18,668 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8724, 1.4507, 3.3626, 1.3260, 2.1776, 3.7091, 3.7486, 3.0972], + device='cuda:3'), covar=tensor([0.1079, 0.1483, 0.0350, 0.1957, 0.1005, 0.0218, 0.0413, 0.0619], + device='cuda:3'), in_proj_covar=tensor([0.0255, 0.0290, 0.0252, 0.0282, 0.0265, 0.0231, 0.0314, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:12:19,359 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:12:19,472 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1424, 1.7776, 2.5613, 2.1647, 2.3666, 2.0106, 1.5754, 0.9963], + device='cuda:3'), covar=tensor([0.3465, 0.3393, 0.0990, 0.1950, 0.1569, 0.1809, 0.1555, 0.3493], + device='cuda:3'), in_proj_covar=tensor([0.0860, 0.0831, 0.0701, 0.0817, 0.0907, 0.0763, 0.0690, 0.0744], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:12:23,931 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1592, 1.6813, 1.6202, 1.3534, 1.0703, 1.4270, 1.8789, 1.8116], + device='cuda:3'), covar=tensor([0.0488, 0.1172, 0.1779, 0.1375, 0.0599, 0.1541, 0.0678, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0105, 0.0157, 0.0197, 0.0161, 0.0108, 0.0168, 0.0120, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 08:12:29,157 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73326.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:12:41,609 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.475e+02 3.100e+02 3.629e+02 1.040e+03, threshold=6.201e+02, percent-clipped=3.0 +2023-02-06 08:12:44,822 INFO [train.py:901] (3/4) Epoch 10, batch 600, loss[loss=0.2851, simple_loss=0.3432, pruned_loss=0.1135, over 8498.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3192, pruned_loss=0.08604, over 1542106.81 frames. ], batch size: 28, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:12:56,196 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 08:13:01,725 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:13:20,007 INFO [train.py:901] (3/4) Epoch 10, batch 650, loss[loss=0.2635, simple_loss=0.3371, pruned_loss=0.09498, over 8099.00 frames. ], tot_loss[loss=0.2458, simple_loss=0.3188, pruned_loss=0.08642, over 1557417.10 frames. ], batch size: 23, lr: 7.85e-03, grad_scale: 16.0 +2023-02-06 08:13:50,088 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73441.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:13:51,168 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.848e+02 2.469e+02 3.040e+02 3.840e+02 6.530e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 08:13:54,587 INFO [train.py:901] (3/4) Epoch 10, batch 700, loss[loss=0.2034, simple_loss=0.2714, pruned_loss=0.06769, over 7716.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3172, pruned_loss=0.08496, over 1572039.56 frames. ], batch size: 18, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:31,478 INFO [train.py:901] (3/4) Epoch 10, batch 750, loss[loss=0.2127, simple_loss=0.2878, pruned_loss=0.06875, over 8129.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3151, pruned_loss=0.08433, over 1573584.20 frames. ], batch size: 22, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:14:45,811 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 08:14:54,756 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 08:15:02,253 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.756e+02 3.307e+02 3.958e+02 8.111e+02, threshold=6.615e+02, percent-clipped=6.0 +2023-02-06 08:15:02,469 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9652, 2.4020, 1.9216, 2.9245, 1.3617, 1.6049, 1.7550, 2.4539], + device='cuda:3'), covar=tensor([0.0871, 0.0837, 0.1116, 0.0385, 0.1294, 0.1622, 0.1250, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0246, 0.0221, 0.0264, 0.0220, 0.0223, 0.0260, 0.0267, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 08:15:05,712 INFO [train.py:901] (3/4) Epoch 10, batch 800, loss[loss=0.2469, simple_loss=0.329, pruned_loss=0.08239, over 8475.00 frames. ], tot_loss[loss=0.2421, simple_loss=0.3152, pruned_loss=0.08446, over 1583977.61 frames. ], batch size: 29, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:17,222 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3076, 1.4621, 4.5116, 1.6966, 3.9550, 3.8265, 4.0746, 3.9473], + device='cuda:3'), covar=tensor([0.0534, 0.3752, 0.0448, 0.3161, 0.1188, 0.0892, 0.0537, 0.0651], + device='cuda:3'), in_proj_covar=tensor([0.0444, 0.0556, 0.0557, 0.0510, 0.0585, 0.0496, 0.0488, 0.0552], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:15:41,988 INFO [train.py:901] (3/4) Epoch 10, batch 850, loss[loss=0.2432, simple_loss=0.3201, pruned_loss=0.08312, over 8319.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.315, pruned_loss=0.08429, over 1589420.76 frames. ], batch size: 26, lr: 7.84e-03, grad_scale: 16.0 +2023-02-06 08:15:49,886 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:03,001 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73627.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:13,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.847e+02 3.470e+02 4.482e+02 1.720e+03, threshold=6.940e+02, percent-clipped=10.0 +2023-02-06 08:16:17,263 INFO [train.py:901] (3/4) Epoch 10, batch 900, loss[loss=0.2108, simple_loss=0.2849, pruned_loss=0.06835, over 7433.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3142, pruned_loss=0.08373, over 1596054.95 frames. ], batch size: 17, lr: 7.83e-03, grad_scale: 16.0 +2023-02-06 08:16:20,227 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73652.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:22,225 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73655.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:16:52,130 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=73697.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:16:52,448 INFO [train.py:901] (3/4) Epoch 10, batch 950, loss[loss=0.2635, simple_loss=0.3294, pruned_loss=0.09878, over 7973.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3153, pruned_loss=0.08452, over 1601168.40 frames. ], batch size: 21, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:10,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=73722.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:17:18,303 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 08:17:22,464 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5648, 1.4651, 4.7039, 1.9797, 4.1250, 3.9310, 4.2771, 4.1150], + device='cuda:3'), covar=tensor([0.0508, 0.4522, 0.0417, 0.3194, 0.1079, 0.0782, 0.0521, 0.0577], + device='cuda:3'), in_proj_covar=tensor([0.0448, 0.0560, 0.0560, 0.0509, 0.0590, 0.0501, 0.0490, 0.0554], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:17:24,917 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.751e+02 3.323e+02 4.211e+02 1.163e+03, threshold=6.645e+02, percent-clipped=9.0 +2023-02-06 08:17:27,464 INFO [train.py:901] (3/4) Epoch 10, batch 1000, loss[loss=0.2259, simple_loss=0.3048, pruned_loss=0.07355, over 8236.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3152, pruned_loss=0.08456, over 1601692.94 frames. ], batch size: 22, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:17:28,907 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=73750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:42,245 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=73770.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:17:50,797 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 08:18:00,658 INFO [train.py:901] (3/4) Epoch 10, batch 1050, loss[loss=0.2137, simple_loss=0.3016, pruned_loss=0.06288, over 8137.00 frames. ], tot_loss[loss=0.2436, simple_loss=0.3168, pruned_loss=0.08517, over 1607262.95 frames. ], batch size: 22, lr: 7.83e-03, grad_scale: 8.0 +2023-02-06 08:18:01,390 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 08:18:34,182 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.634e+02 3.058e+02 3.903e+02 1.179e+03, threshold=6.116e+02, percent-clipped=2.0 +2023-02-06 08:18:36,809 INFO [train.py:901] (3/4) Epoch 10, batch 1100, loss[loss=0.2692, simple_loss=0.3409, pruned_loss=0.09878, over 8502.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3172, pruned_loss=0.08531, over 1608590.79 frames. ], batch size: 26, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:04,659 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6684, 1.8221, 2.2689, 1.4947, 1.0646, 2.4506, 0.3860, 1.2111], + device='cuda:3'), covar=tensor([0.2590, 0.1628, 0.0539, 0.2781, 0.4683, 0.0430, 0.3624, 0.2414], + device='cuda:3'), in_proj_covar=tensor([0.0163, 0.0162, 0.0095, 0.0215, 0.0252, 0.0097, 0.0160, 0.0162], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:19:09,833 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 08:19:10,503 INFO [train.py:901] (3/4) Epoch 10, batch 1150, loss[loss=0.2619, simple_loss=0.3275, pruned_loss=0.09814, over 7819.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3174, pruned_loss=0.08524, over 1612420.12 frames. ], batch size: 20, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:42,449 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.356e+02 2.791e+02 3.726e+02 1.227e+03, threshold=5.583e+02, percent-clipped=4.0 +2023-02-06 08:19:45,137 INFO [train.py:901] (3/4) Epoch 10, batch 1200, loss[loss=0.2527, simple_loss=0.3283, pruned_loss=0.08855, over 8355.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3178, pruned_loss=0.0849, over 1619916.12 frames. ], batch size: 26, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:19:48,562 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=73952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:20,063 INFO [train.py:901] (3/4) Epoch 10, batch 1250, loss[loss=0.2684, simple_loss=0.3378, pruned_loss=0.09955, over 8651.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3157, pruned_loss=0.0838, over 1616521.38 frames. ], batch size: 34, lr: 7.82e-03, grad_scale: 8.0 +2023-02-06 08:20:25,445 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 08:20:39,873 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:20:48,615 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4670, 2.0140, 3.0592, 2.4509, 2.7987, 2.2015, 1.7767, 1.6208], + device='cuda:3'), covar=tensor([0.3052, 0.3350, 0.0987, 0.2063, 0.1501, 0.1840, 0.1488, 0.3389], + device='cuda:3'), in_proj_covar=tensor([0.0858, 0.0823, 0.0706, 0.0812, 0.0903, 0.0765, 0.0686, 0.0741], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:20:51,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.519e+02 3.075e+02 3.983e+02 7.817e+02, threshold=6.150e+02, percent-clipped=4.0 +2023-02-06 08:20:54,941 INFO [train.py:901] (3/4) Epoch 10, batch 1300, loss[loss=0.2394, simple_loss=0.2918, pruned_loss=0.0935, over 7564.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3147, pruned_loss=0.08341, over 1614318.85 frames. ], batch size: 18, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:20:57,207 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:07,836 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:19,127 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:26,840 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:21:29,354 INFO [train.py:901] (3/4) Epoch 10, batch 1350, loss[loss=0.2447, simple_loss=0.3144, pruned_loss=0.08754, over 7934.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3159, pruned_loss=0.08424, over 1616930.26 frames. ], batch size: 20, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:21:30,434 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-06 08:21:59,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.773e+02 3.448e+02 4.052e+02 8.675e+02, threshold=6.895e+02, percent-clipped=5.0 +2023-02-06 08:22:02,258 INFO [train.py:901] (3/4) Epoch 10, batch 1400, loss[loss=0.2718, simple_loss=0.3566, pruned_loss=0.09354, over 8499.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3176, pruned_loss=0.08556, over 1616592.74 frames. ], batch size: 26, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:02,453 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4191, 1.8563, 1.7866, 1.0533, 1.8047, 1.3870, 0.4856, 1.6794], + device='cuda:3'), covar=tensor([0.0362, 0.0198, 0.0165, 0.0339, 0.0271, 0.0553, 0.0507, 0.0171], + device='cuda:3'), in_proj_covar=tensor([0.0366, 0.0303, 0.0256, 0.0357, 0.0286, 0.0447, 0.0340, 0.0330], + device='cuda:3'), out_proj_covar=tensor([1.0716e-04, 8.6524e-05, 7.3501e-05, 1.0265e-04, 8.3262e-05, 1.4058e-04, + 9.9860e-05, 9.6205e-05], device='cuda:3') +2023-02-06 08:22:38,033 INFO [train.py:901] (3/4) Epoch 10, batch 1450, loss[loss=0.2649, simple_loss=0.3268, pruned_loss=0.1015, over 8230.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3174, pruned_loss=0.08528, over 1617916.94 frames. ], batch size: 22, lr: 7.81e-03, grad_scale: 8.0 +2023-02-06 08:22:41,667 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 08:22:45,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:01,080 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 08:23:09,187 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.838e+02 2.525e+02 3.045e+02 3.954e+02 1.310e+03, threshold=6.089e+02, percent-clipped=4.0 +2023-02-06 08:23:11,853 INFO [train.py:901] (3/4) Epoch 10, batch 1500, loss[loss=0.2377, simple_loss=0.3036, pruned_loss=0.08591, over 7647.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3167, pruned_loss=0.08454, over 1620472.10 frames. ], batch size: 19, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:18,344 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:23:46,601 INFO [train.py:901] (3/4) Epoch 10, batch 1550, loss[loss=0.2011, simple_loss=0.2825, pruned_loss=0.05987, over 8202.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3158, pruned_loss=0.08396, over 1618108.51 frames. ], batch size: 23, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:23:57,170 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.1988, 1.9870, 5.3194, 2.2856, 4.7568, 4.5022, 4.9448, 4.8103], + device='cuda:3'), covar=tensor([0.0494, 0.3975, 0.0389, 0.3031, 0.1008, 0.0843, 0.0441, 0.0497], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0564, 0.0567, 0.0517, 0.0588, 0.0505, 0.0491, 0.0556], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:24:05,684 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:17,381 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2428, 1.4793, 2.1607, 1.0974, 1.4665, 1.4432, 1.3249, 1.5371], + device='cuda:3'), covar=tensor([0.1712, 0.2091, 0.0803, 0.3578, 0.1666, 0.2929, 0.1805, 0.1980], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0500, 0.0527, 0.0565, 0.0608, 0.0540, 0.0463, 0.0604], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:24:19,868 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.540e+02 3.095e+02 3.981e+02 6.537e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 08:24:22,697 INFO [train.py:901] (3/4) Epoch 10, batch 1600, loss[loss=0.215, simple_loss=0.292, pruned_loss=0.06897, over 7813.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3144, pruned_loss=0.08297, over 1620529.86 frames. ], batch size: 20, lr: 7.80e-03, grad_scale: 8.0 +2023-02-06 08:24:22,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:24:28,415 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1478, 2.1872, 1.6057, 1.8696, 1.8340, 1.2332, 1.5636, 1.7096], + device='cuda:3'), covar=tensor([0.1179, 0.0332, 0.1032, 0.0524, 0.0645, 0.1388, 0.0830, 0.0751], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0239, 0.0316, 0.0303, 0.0307, 0.0323, 0.0342, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:24:56,948 INFO [train.py:901] (3/4) Epoch 10, batch 1650, loss[loss=0.2432, simple_loss=0.3124, pruned_loss=0.08704, over 8239.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3134, pruned_loss=0.08313, over 1619294.20 frames. ], batch size: 22, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:00,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3922, 1.8744, 2.8610, 2.3186, 2.6666, 2.1692, 1.7836, 1.2072], + device='cuda:3'), covar=tensor([0.3251, 0.3474, 0.0963, 0.2161, 0.1484, 0.1926, 0.1550, 0.3525], + device='cuda:3'), in_proj_covar=tensor([0.0859, 0.0828, 0.0708, 0.0817, 0.0907, 0.0767, 0.0689, 0.0742], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:25:12,623 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0981, 2.1467, 1.5839, 1.9353, 1.7417, 1.2529, 1.4724, 1.7238], + device='cuda:3'), covar=tensor([0.1265, 0.0350, 0.1103, 0.0451, 0.0693, 0.1430, 0.0884, 0.0782], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0238, 0.0316, 0.0302, 0.0307, 0.0322, 0.0342, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:25:18,074 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:25:30,268 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.508e+02 3.008e+02 3.971e+02 8.483e+02, threshold=6.016e+02, percent-clipped=6.0 +2023-02-06 08:25:32,863 INFO [train.py:901] (3/4) Epoch 10, batch 1700, loss[loss=0.3005, simple_loss=0.3535, pruned_loss=0.1237, over 8598.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3148, pruned_loss=0.08412, over 1616172.82 frames. ], batch size: 31, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:25:44,218 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:00,776 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:03,007 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 08:26:05,906 INFO [train.py:901] (3/4) Epoch 10, batch 1750, loss[loss=0.2115, simple_loss=0.2793, pruned_loss=0.0718, over 7281.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3158, pruned_loss=0.08475, over 1618854.95 frames. ], batch size: 16, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:26:36,857 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74541.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:26:38,773 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.804e+02 3.517e+02 4.698e+02 1.546e+03, threshold=7.034e+02, percent-clipped=7.0 +2023-02-06 08:26:41,533 INFO [train.py:901] (3/4) Epoch 10, batch 1800, loss[loss=0.2937, simple_loss=0.3526, pruned_loss=0.1173, over 8469.00 frames. ], tot_loss[loss=0.2445, simple_loss=0.3172, pruned_loss=0.08592, over 1618589.30 frames. ], batch size: 25, lr: 7.79e-03, grad_scale: 8.0 +2023-02-06 08:26:45,353 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 08:26:48,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6259, 4.7035, 4.1805, 1.8592, 4.0957, 4.2373, 4.3348, 3.9276], + device='cuda:3'), covar=tensor([0.0815, 0.0444, 0.0893, 0.4722, 0.0787, 0.0888, 0.0971, 0.0721], + device='cuda:3'), in_proj_covar=tensor([0.0450, 0.0358, 0.0380, 0.0469, 0.0372, 0.0354, 0.0363, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:27:14,908 INFO [train.py:901] (3/4) Epoch 10, batch 1850, loss[loss=0.1992, simple_loss=0.2662, pruned_loss=0.06611, over 8024.00 frames. ], tot_loss[loss=0.244, simple_loss=0.317, pruned_loss=0.08549, over 1621349.45 frames. ], batch size: 22, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:27:17,787 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:25,263 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74613.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:27:46,970 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.251e+02 2.722e+02 3.219e+02 4.226e+02 1.097e+03, threshold=6.437e+02, percent-clipped=2.0 +2023-02-06 08:27:50,400 INFO [train.py:901] (3/4) Epoch 10, batch 1900, loss[loss=0.2065, simple_loss=0.2755, pruned_loss=0.06872, over 7784.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.3161, pruned_loss=0.08486, over 1621497.91 frames. ], batch size: 19, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:13,885 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 08:28:25,274 INFO [train.py:901] (3/4) Epoch 10, batch 1950, loss[loss=0.2544, simple_loss=0.3148, pruned_loss=0.09699, over 7245.00 frames. ], tot_loss[loss=0.2429, simple_loss=0.316, pruned_loss=0.08489, over 1622243.61 frames. ], batch size: 16, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:28:25,960 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 08:28:38,209 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=74717.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:28:43,967 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 08:28:56,744 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.465e+02 3.030e+02 3.717e+02 6.494e+02, threshold=6.060e+02, percent-clipped=3.0 +2023-02-06 08:28:59,513 INFO [train.py:901] (3/4) Epoch 10, batch 2000, loss[loss=0.2095, simple_loss=0.2924, pruned_loss=0.06328, over 7936.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.317, pruned_loss=0.08555, over 1618654.83 frames. ], batch size: 20, lr: 7.78e-03, grad_scale: 8.0 +2023-02-06 08:29:34,165 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:34,602 INFO [train.py:901] (3/4) Epoch 10, batch 2050, loss[loss=0.2908, simple_loss=0.3453, pruned_loss=0.1181, over 6857.00 frames. ], tot_loss[loss=0.2449, simple_loss=0.3178, pruned_loss=0.086, over 1618415.77 frames. ], batch size: 71, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:29:36,988 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 08:29:50,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:29:56,002 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 08:30:04,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.471e+02 3.084e+02 4.282e+02 1.276e+03, threshold=6.169e+02, percent-clipped=5.0 +2023-02-06 08:30:07,351 INFO [train.py:901] (3/4) Epoch 10, batch 2100, loss[loss=0.2445, simple_loss=0.3322, pruned_loss=0.07845, over 8335.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.317, pruned_loss=0.08538, over 1616129.66 frames. ], batch size: 25, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:30:43,224 INFO [train.py:901] (3/4) Epoch 10, batch 2150, loss[loss=0.2531, simple_loss=0.3368, pruned_loss=0.08466, over 8732.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.318, pruned_loss=0.08577, over 1612130.27 frames. ], batch size: 30, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:02,639 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:09,371 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8337, 2.3314, 3.2091, 2.0484, 1.6852, 3.2149, 0.6773, 1.8944], + device='cuda:3'), covar=tensor([0.2489, 0.2272, 0.0420, 0.2691, 0.4667, 0.0424, 0.4540, 0.2141], + device='cuda:3'), in_proj_covar=tensor([0.0163, 0.0165, 0.0092, 0.0213, 0.0255, 0.0097, 0.0161, 0.0158], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:31:13,918 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.626e+02 3.226e+02 3.775e+02 6.882e+02, threshold=6.451e+02, percent-clipped=1.0 +2023-02-06 08:31:16,707 INFO [train.py:901] (3/4) Epoch 10, batch 2200, loss[loss=0.2626, simple_loss=0.3292, pruned_loss=0.09795, over 8091.00 frames. ], tot_loss[loss=0.2437, simple_loss=0.3171, pruned_loss=0.08515, over 1614239.14 frames. ], batch size: 21, lr: 7.77e-03, grad_scale: 8.0 +2023-02-06 08:31:22,704 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=74957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:33,585 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=74973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:39,395 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=74982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:31:50,364 INFO [train.py:901] (3/4) Epoch 10, batch 2250, loss[loss=0.1943, simple_loss=0.2654, pruned_loss=0.0616, over 7700.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.316, pruned_loss=0.08457, over 1613989.79 frames. ], batch size: 18, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:31:50,561 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=74998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:23,117 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.748e+02 3.468e+02 4.709e+02 1.048e+03, threshold=6.936e+02, percent-clipped=3.0 +2023-02-06 08:32:25,869 INFO [train.py:901] (3/4) Epoch 10, batch 2300, loss[loss=0.232, simple_loss=0.3194, pruned_loss=0.07235, over 8457.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3148, pruned_loss=0.08434, over 1611517.81 frames. ], batch size: 27, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:32:42,311 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:32:59,567 INFO [train.py:901] (3/4) Epoch 10, batch 2350, loss[loss=0.2277, simple_loss=0.3139, pruned_loss=0.07079, over 8479.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3148, pruned_loss=0.08408, over 1613106.76 frames. ], batch size: 29, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:33:01,993 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 08:33:33,027 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.526e+02 3.215e+02 4.182e+02 1.054e+03, threshold=6.430e+02, percent-clipped=5.0 +2023-02-06 08:33:35,803 INFO [train.py:901] (3/4) Epoch 10, batch 2400, loss[loss=0.2929, simple_loss=0.354, pruned_loss=0.1159, over 8558.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3147, pruned_loss=0.08375, over 1615803.30 frames. ], batch size: 31, lr: 7.76e-03, grad_scale: 8.0 +2023-02-06 08:34:08,759 INFO [train.py:901] (3/4) Epoch 10, batch 2450, loss[loss=0.1791, simple_loss=0.2525, pruned_loss=0.05284, over 7435.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3142, pruned_loss=0.08395, over 1614042.44 frames. ], batch size: 17, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:34:40,830 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.613e+02 3.092e+02 4.227e+02 1.037e+03, threshold=6.184e+02, percent-clipped=5.0 +2023-02-06 08:34:43,532 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:34:44,709 INFO [train.py:901] (3/4) Epoch 10, batch 2500, loss[loss=0.2405, simple_loss=0.3245, pruned_loss=0.07828, over 8327.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3155, pruned_loss=0.08444, over 1617321.47 frames. ], batch size: 26, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:00,245 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:11,672 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=75288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:18,077 INFO [train.py:901] (3/4) Epoch 10, batch 2550, loss[loss=0.219, simple_loss=0.3004, pruned_loss=0.06874, over 8089.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3161, pruned_loss=0.08463, over 1618243.99 frames. ], batch size: 21, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:33,985 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([6.0762, 1.8090, 6.1553, 2.2722, 5.4912, 5.1438, 5.7588, 5.5907], + device='cuda:3'), covar=tensor([0.0306, 0.3633, 0.0249, 0.2810, 0.0852, 0.0594, 0.0339, 0.0382], + device='cuda:3'), in_proj_covar=tensor([0.0448, 0.0559, 0.0566, 0.0516, 0.0589, 0.0506, 0.0492, 0.0555], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:35:36,598 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:38,120 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:35:49,134 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.664e+02 3.245e+02 3.791e+02 6.757e+02, threshold=6.490e+02, percent-clipped=2.0 +2023-02-06 08:35:51,832 INFO [train.py:901] (3/4) Epoch 10, batch 2600, loss[loss=0.2248, simple_loss=0.3055, pruned_loss=0.07208, over 8032.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.315, pruned_loss=0.08412, over 1616765.70 frames. ], batch size: 22, lr: 7.75e-03, grad_scale: 8.0 +2023-02-06 08:35:55,417 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:02,041 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7759, 3.3924, 2.0943, 2.4757, 2.3785, 1.6668, 2.2819, 2.9004], + device='cuda:3'), covar=tensor([0.1553, 0.0313, 0.1127, 0.0790, 0.0877, 0.1649, 0.1224, 0.0827], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0236, 0.0314, 0.0299, 0.0308, 0.0322, 0.0341, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:36:08,176 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7406, 2.3621, 4.8716, 1.3838, 3.3047, 2.4743, 1.7964, 2.9993], + device='cuda:3'), covar=tensor([0.1516, 0.2181, 0.0550, 0.3649, 0.1369, 0.2444, 0.1688, 0.2015], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0498, 0.0532, 0.0566, 0.0605, 0.0544, 0.0464, 0.0605], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:36:19,405 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:27,352 INFO [train.py:901] (3/4) Epoch 10, batch 2650, loss[loss=0.2226, simple_loss=0.3087, pruned_loss=0.06824, over 8335.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3151, pruned_loss=0.0839, over 1611200.18 frames. ], batch size: 26, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:36:56,752 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:36:58,551 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.723e+02 3.413e+02 4.384e+02 8.455e+02, threshold=6.827e+02, percent-clipped=3.0 +2023-02-06 08:37:01,351 INFO [train.py:901] (3/4) Epoch 10, batch 2700, loss[loss=0.2218, simple_loss=0.3061, pruned_loss=0.06872, over 8348.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.315, pruned_loss=0.08408, over 1611299.87 frames. ], batch size: 26, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:37,692 INFO [train.py:901] (3/4) Epoch 10, batch 2750, loss[loss=0.2258, simple_loss=0.2863, pruned_loss=0.08269, over 7704.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3159, pruned_loss=0.08434, over 1612209.58 frames. ], batch size: 18, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:37:47,098 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5190, 1.5507, 1.7463, 1.4181, 1.0536, 1.7764, 0.1128, 1.1393], + device='cuda:3'), covar=tensor([0.3443, 0.2033, 0.0750, 0.1700, 0.4681, 0.0626, 0.3392, 0.1987], + device='cuda:3'), in_proj_covar=tensor([0.0164, 0.0165, 0.0094, 0.0217, 0.0258, 0.0099, 0.0164, 0.0161], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:37:55,834 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5685, 2.0803, 3.0805, 2.4039, 2.7122, 2.2636, 1.8141, 1.4458], + device='cuda:3'), covar=tensor([0.3205, 0.3540, 0.0950, 0.2377, 0.1881, 0.2050, 0.1639, 0.3893], + device='cuda:3'), in_proj_covar=tensor([0.0863, 0.0840, 0.0711, 0.0823, 0.0920, 0.0779, 0.0697, 0.0755], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:38:01,676 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6985, 1.9127, 1.6417, 2.3382, 1.1649, 1.3771, 1.6587, 1.8568], + device='cuda:3'), covar=tensor([0.0855, 0.0871, 0.1087, 0.0454, 0.1226, 0.1530, 0.0918, 0.0800], + device='cuda:3'), in_proj_covar=tensor([0.0245, 0.0220, 0.0265, 0.0220, 0.0225, 0.0257, 0.0265, 0.0226], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 08:38:06,718 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 08:38:08,183 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.897e+02 2.609e+02 3.111e+02 3.957e+02 1.084e+03, threshold=6.223e+02, percent-clipped=3.0 +2023-02-06 08:38:10,727 INFO [train.py:901] (3/4) Epoch 10, batch 2800, loss[loss=0.313, simple_loss=0.3589, pruned_loss=0.1335, over 7511.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3156, pruned_loss=0.08436, over 1610786.07 frames. ], batch size: 71, lr: 7.74e-03, grad_scale: 8.0 +2023-02-06 08:38:39,098 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:38:45,072 INFO [train.py:901] (3/4) Epoch 10, batch 2850, loss[loss=0.2427, simple_loss=0.3078, pruned_loss=0.08886, over 8241.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3163, pruned_loss=0.08499, over 1612998.06 frames. ], batch size: 22, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:38:48,925 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 08:39:09,362 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=75632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:16,276 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:17,406 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.670e+02 3.172e+02 3.749e+02 6.038e+02, threshold=6.343e+02, percent-clipped=0.0 +2023-02-06 08:39:19,599 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6291, 1.8016, 2.1682, 1.5886, 1.1674, 2.1866, 0.2707, 1.1894], + device='cuda:3'), covar=tensor([0.3079, 0.2000, 0.0551, 0.2451, 0.5213, 0.0558, 0.3963, 0.2325], + device='cuda:3'), in_proj_covar=tensor([0.0164, 0.0164, 0.0093, 0.0217, 0.0257, 0.0099, 0.0163, 0.0160], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:39:20,067 INFO [train.py:901] (3/4) Epoch 10, batch 2900, loss[loss=0.2128, simple_loss=0.2894, pruned_loss=0.06808, over 8089.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3163, pruned_loss=0.0853, over 1608894.34 frames. ], batch size: 21, lr: 7.73e-03, grad_scale: 8.0 +2023-02-06 08:39:20,527 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 08:39:32,778 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:49,793 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 08:39:53,286 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:39:53,747 INFO [train.py:901] (3/4) Epoch 10, batch 2950, loss[loss=0.25, simple_loss=0.3234, pruned_loss=0.08824, over 8357.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3164, pruned_loss=0.08526, over 1610770.85 frames. ], batch size: 24, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:39:58,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:11,389 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:26,686 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.480e+02 3.030e+02 3.596e+02 1.304e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-06 08:40:28,982 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=75747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:40:29,500 INFO [train.py:901] (3/4) Epoch 10, batch 3000, loss[loss=0.2117, simple_loss=0.2944, pruned_loss=0.06453, over 8446.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3169, pruned_loss=0.08502, over 1617791.09 frames. ], batch size: 25, lr: 7.73e-03, grad_scale: 16.0 +2023-02-06 08:40:29,500 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 08:40:41,876 INFO [train.py:935] (3/4) Epoch 10, validation: loss=0.1918, simple_loss=0.2916, pruned_loss=0.04599, over 944034.00 frames. +2023-02-06 08:40:41,877 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 08:41:14,531 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-06 08:41:15,547 INFO [train.py:901] (3/4) Epoch 10, batch 3050, loss[loss=0.253, simple_loss=0.3287, pruned_loss=0.0887, over 8230.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3166, pruned_loss=0.08479, over 1618255.65 frames. ], batch size: 22, lr: 7.72e-03, grad_scale: 16.0 +2023-02-06 08:41:47,920 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.518e+02 3.138e+02 4.468e+02 1.006e+03, threshold=6.276e+02, percent-clipped=13.0 +2023-02-06 08:41:50,035 INFO [train.py:901] (3/4) Epoch 10, batch 3100, loss[loss=0.264, simple_loss=0.334, pruned_loss=0.097, over 8449.00 frames. ], tot_loss[loss=0.2438, simple_loss=0.3175, pruned_loss=0.08501, over 1618636.20 frames. ], batch size: 27, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:41:55,058 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2018, 1.8264, 2.6671, 2.2186, 2.3815, 2.0467, 1.6678, 1.1922], + device='cuda:3'), covar=tensor([0.3205, 0.3210, 0.0926, 0.2040, 0.1401, 0.1831, 0.1507, 0.3284], + device='cuda:3'), in_proj_covar=tensor([0.0868, 0.0848, 0.0715, 0.0828, 0.0919, 0.0781, 0.0699, 0.0757], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:42:25,563 INFO [train.py:901] (3/4) Epoch 10, batch 3150, loss[loss=0.2652, simple_loss=0.3334, pruned_loss=0.0985, over 7025.00 frames. ], tot_loss[loss=0.2453, simple_loss=0.3189, pruned_loss=0.08587, over 1616052.43 frames. ], batch size: 71, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:42:48,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5198, 1.3147, 4.7204, 1.9027, 4.1354, 3.9185, 4.2148, 4.1434], + device='cuda:3'), covar=tensor([0.0488, 0.4205, 0.0406, 0.2801, 0.0936, 0.0725, 0.0478, 0.0510], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0562, 0.0572, 0.0521, 0.0599, 0.0510, 0.0497, 0.0560], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:42:57,478 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.584e+02 3.323e+02 3.941e+02 8.938e+02, threshold=6.646e+02, percent-clipped=3.0 +2023-02-06 08:42:59,542 INFO [train.py:901] (3/4) Epoch 10, batch 3200, loss[loss=0.2417, simple_loss=0.3253, pruned_loss=0.07906, over 8365.00 frames. ], tot_loss[loss=0.246, simple_loss=0.3193, pruned_loss=0.08639, over 1618699.67 frames. ], batch size: 24, lr: 7.72e-03, grad_scale: 8.0 +2023-02-06 08:43:09,325 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=75961.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:28,326 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=75986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:28,984 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4294, 2.7781, 1.9521, 2.2626, 2.3791, 1.5072, 2.1076, 2.2391], + device='cuda:3'), covar=tensor([0.1416, 0.0305, 0.0990, 0.0640, 0.0621, 0.1429, 0.0950, 0.0969], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0237, 0.0314, 0.0301, 0.0312, 0.0326, 0.0343, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:43:36,360 INFO [train.py:901] (3/4) Epoch 10, batch 3250, loss[loss=0.2004, simple_loss=0.2906, pruned_loss=0.05512, over 8190.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3174, pruned_loss=0.08475, over 1620868.58 frames. ], batch size: 23, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:43:41,258 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=76003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:43:57,880 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=76028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:44:09,009 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.688e+02 3.300e+02 3.989e+02 9.835e+02, threshold=6.601e+02, percent-clipped=4.0 +2023-02-06 08:44:11,012 INFO [train.py:901] (3/4) Epoch 10, batch 3300, loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06282, over 8242.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3168, pruned_loss=0.08432, over 1620352.65 frames. ], batch size: 22, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:20,564 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0487, 1.2527, 4.3076, 1.6657, 3.7421, 3.6135, 3.8992, 3.7322], + device='cuda:3'), covar=tensor([0.0517, 0.3977, 0.0496, 0.2995, 0.1179, 0.0787, 0.0528, 0.0658], + device='cuda:3'), in_proj_covar=tensor([0.0449, 0.0556, 0.0564, 0.0516, 0.0591, 0.0504, 0.0491, 0.0555], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:44:47,577 INFO [train.py:901] (3/4) Epoch 10, batch 3350, loss[loss=0.2267, simple_loss=0.2962, pruned_loss=0.07856, over 8081.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3161, pruned_loss=0.0838, over 1620714.62 frames. ], batch size: 21, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:44:58,403 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8148, 1.4091, 2.8436, 1.2694, 2.0706, 3.0092, 3.1255, 2.5398], + device='cuda:3'), covar=tensor([0.0941, 0.1362, 0.0393, 0.1992, 0.0821, 0.0311, 0.0571, 0.0724], + device='cuda:3'), in_proj_covar=tensor([0.0255, 0.0293, 0.0252, 0.0283, 0.0268, 0.0233, 0.0318, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:45:18,633 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.652e+02 3.239e+02 4.192e+02 7.352e+02, threshold=6.477e+02, percent-clipped=1.0 +2023-02-06 08:45:20,663 INFO [train.py:901] (3/4) Epoch 10, batch 3400, loss[loss=0.2264, simple_loss=0.2999, pruned_loss=0.07642, over 7979.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3179, pruned_loss=0.085, over 1622729.81 frames. ], batch size: 21, lr: 7.71e-03, grad_scale: 8.0 +2023-02-06 08:45:55,812 INFO [train.py:901] (3/4) Epoch 10, batch 3450, loss[loss=0.2303, simple_loss=0.318, pruned_loss=0.0713, over 8325.00 frames. ], tot_loss[loss=0.2447, simple_loss=0.3182, pruned_loss=0.08558, over 1618876.53 frames. ], batch size: 25, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:30,137 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.394e+02 3.045e+02 3.881e+02 9.338e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-06 08:46:32,218 INFO [train.py:901] (3/4) Epoch 10, batch 3500, loss[loss=0.2622, simple_loss=0.3323, pruned_loss=0.09603, over 8145.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.3161, pruned_loss=0.08438, over 1614108.00 frames. ], batch size: 22, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:46:48,062 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 08:46:57,629 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 08:46:59,307 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76287.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:47:06,735 INFO [train.py:901] (3/4) Epoch 10, batch 3550, loss[loss=0.2477, simple_loss=0.3177, pruned_loss=0.08884, over 8465.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3156, pruned_loss=0.08389, over 1616847.97 frames. ], batch size: 25, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:47:17,274 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:47:39,380 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76341.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:47:42,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.850e+02 2.725e+02 3.470e+02 4.316e+02 7.747e+02, threshold=6.941e+02, percent-clipped=6.0 +2023-02-06 08:47:44,160 INFO [train.py:901] (3/4) Epoch 10, batch 3600, loss[loss=0.2336, simple_loss=0.3176, pruned_loss=0.07477, over 8483.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3154, pruned_loss=0.08343, over 1618181.90 frames. ], batch size: 25, lr: 7.70e-03, grad_scale: 8.0 +2023-02-06 08:48:18,380 INFO [train.py:901] (3/4) Epoch 10, batch 3650, loss[loss=0.2642, simple_loss=0.3377, pruned_loss=0.09535, over 8461.00 frames. ], tot_loss[loss=0.2405, simple_loss=0.3143, pruned_loss=0.08337, over 1613804.77 frames. ], batch size: 27, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:50,989 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.530e+02 3.057e+02 3.624e+02 8.995e+02, threshold=6.114e+02, percent-clipped=3.0 +2023-02-06 08:48:52,995 INFO [train.py:901] (3/4) Epoch 10, batch 3700, loss[loss=0.2097, simple_loss=0.2816, pruned_loss=0.06894, over 7441.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3128, pruned_loss=0.08282, over 1611380.40 frames. ], batch size: 17, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:48:54,933 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 08:49:28,890 INFO [train.py:901] (3/4) Epoch 10, batch 3750, loss[loss=0.2752, simple_loss=0.3523, pruned_loss=0.09898, over 8331.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3131, pruned_loss=0.08286, over 1611842.60 frames. ], batch size: 26, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:00,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.698e+02 2.765e+02 3.416e+02 4.278e+02 1.031e+03, threshold=6.832e+02, percent-clipped=4.0 +2023-02-06 08:50:02,995 INFO [train.py:901] (3/4) Epoch 10, batch 3800, loss[loss=0.2347, simple_loss=0.3095, pruned_loss=0.07992, over 8696.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3148, pruned_loss=0.08406, over 1612201.04 frames. ], batch size: 34, lr: 7.69e-03, grad_scale: 8.0 +2023-02-06 08:50:19,507 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4247, 2.7478, 1.6708, 2.2350, 2.1334, 1.3666, 1.8651, 2.1654], + device='cuda:3'), covar=tensor([0.1494, 0.0326, 0.1155, 0.0657, 0.0697, 0.1543, 0.1126, 0.0982], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0236, 0.0312, 0.0297, 0.0304, 0.0323, 0.0339, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:50:38,441 INFO [train.py:901] (3/4) Epoch 10, batch 3850, loss[loss=0.2856, simple_loss=0.3567, pruned_loss=0.1073, over 8517.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3145, pruned_loss=0.0836, over 1616258.50 frames. ], batch size: 28, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:50:59,044 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 08:51:00,490 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76631.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:51:09,921 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.563e+02 3.093e+02 4.191e+02 1.151e+03, threshold=6.187e+02, percent-clipped=5.0 +2023-02-06 08:51:11,978 INFO [train.py:901] (3/4) Epoch 10, batch 3900, loss[loss=0.2276, simple_loss=0.2701, pruned_loss=0.09257, over 7690.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.3139, pruned_loss=0.08322, over 1617591.71 frames. ], batch size: 18, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:51:17,437 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:51:38,047 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=76685.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:51:48,068 INFO [train.py:901] (3/4) Epoch 10, batch 3950, loss[loss=0.2595, simple_loss=0.3429, pruned_loss=0.08801, over 8475.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3136, pruned_loss=0.08298, over 1615771.14 frames. ], batch size: 27, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:19,564 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.430e+02 3.097e+02 3.693e+02 7.444e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-06 08:52:20,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76746.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:52:21,609 INFO [train.py:901] (3/4) Epoch 10, batch 4000, loss[loss=0.2778, simple_loss=0.3435, pruned_loss=0.106, over 8518.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3131, pruned_loss=0.08271, over 1614210.20 frames. ], batch size: 26, lr: 7.68e-03, grad_scale: 8.0 +2023-02-06 08:52:37,416 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:52:56,080 INFO [train.py:901] (3/4) Epoch 10, batch 4050, loss[loss=0.2448, simple_loss=0.3141, pruned_loss=0.08775, over 7814.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3153, pruned_loss=0.08389, over 1617152.86 frames. ], batch size: 20, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:52:57,687 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=76800.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:53:29,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.637e+02 3.294e+02 4.061e+02 9.505e+02, threshold=6.587e+02, percent-clipped=7.0 +2023-02-06 08:53:31,235 INFO [train.py:901] (3/4) Epoch 10, batch 4100, loss[loss=0.3016, simple_loss=0.3686, pruned_loss=0.1173, over 8528.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3155, pruned_loss=0.08417, over 1612530.27 frames. ], batch size: 26, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:53:37,245 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=76857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:53:48,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3093, 2.1928, 1.5613, 1.8468, 1.7695, 1.3555, 1.5931, 1.6723], + device='cuda:3'), covar=tensor([0.1121, 0.0346, 0.1014, 0.0575, 0.0661, 0.1331, 0.0866, 0.0798], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0234, 0.0311, 0.0297, 0.0304, 0.0325, 0.0339, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 08:54:04,757 INFO [train.py:901] (3/4) Epoch 10, batch 4150, loss[loss=0.2521, simple_loss=0.3166, pruned_loss=0.09375, over 8604.00 frames. ], tot_loss[loss=0.2402, simple_loss=0.314, pruned_loss=0.08321, over 1612066.36 frames. ], batch size: 49, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:54:10,375 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.40 vs. limit=5.0 +2023-02-06 08:54:34,616 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.39 vs. limit=5.0 +2023-02-06 08:54:38,768 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.505e+02 2.967e+02 3.617e+02 8.554e+02, threshold=5.933e+02, percent-clipped=2.0 +2023-02-06 08:54:39,588 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7885, 5.9240, 5.1105, 2.5127, 5.1098, 5.5245, 5.5325, 5.1544], + device='cuda:3'), covar=tensor([0.0577, 0.0419, 0.0852, 0.4177, 0.0682, 0.0522, 0.1011, 0.0604], + device='cuda:3'), in_proj_covar=tensor([0.0451, 0.0362, 0.0372, 0.0471, 0.0366, 0.0357, 0.0364, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 08:54:40,857 INFO [train.py:901] (3/4) Epoch 10, batch 4200, loss[loss=0.2278, simple_loss=0.2959, pruned_loss=0.07986, over 7921.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3142, pruned_loss=0.08325, over 1612181.68 frames. ], batch size: 20, lr: 7.67e-03, grad_scale: 8.0 +2023-02-06 08:55:00,913 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 08:55:14,235 INFO [train.py:901] (3/4) Epoch 10, batch 4250, loss[loss=0.2428, simple_loss=0.309, pruned_loss=0.08832, over 7935.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3141, pruned_loss=0.08362, over 1610166.60 frames. ], batch size: 20, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:17,201 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77002.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:23,802 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 08:55:34,055 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77027.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 08:55:34,065 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:46,496 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.530e+02 3.131e+02 3.743e+02 6.568e+02, threshold=6.262e+02, percent-clipped=1.0 +2023-02-06 08:55:48,447 INFO [train.py:901] (3/4) Epoch 10, batch 4300, loss[loss=0.2938, simple_loss=0.3419, pruned_loss=0.1228, over 7932.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3145, pruned_loss=0.08411, over 1607397.57 frames. ], batch size: 20, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:55:52,725 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:55:55,462 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77056.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:56:12,959 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 08:56:19,608 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7061, 1.4660, 1.6781, 1.5411, 1.1981, 1.5980, 2.2503, 2.0383], + device='cuda:3'), covar=tensor([0.0487, 0.1380, 0.1814, 0.1403, 0.0594, 0.1569, 0.0637, 0.0610], + device='cuda:3'), in_proj_covar=tensor([0.0103, 0.0157, 0.0196, 0.0162, 0.0106, 0.0166, 0.0119, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 08:56:23,978 INFO [train.py:901] (3/4) Epoch 10, batch 4350, loss[loss=0.2562, simple_loss=0.3236, pruned_loss=0.09437, over 8467.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3143, pruned_loss=0.08362, over 1608527.68 frames. ], batch size: 25, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:56:30,758 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8375, 2.8150, 3.2914, 2.0378, 1.6409, 3.2102, 0.6250, 2.0254], + device='cuda:3'), covar=tensor([0.2296, 0.1192, 0.0380, 0.2496, 0.4393, 0.0488, 0.3918, 0.1942], + device='cuda:3'), in_proj_covar=tensor([0.0159, 0.0160, 0.0091, 0.0207, 0.0251, 0.0099, 0.0159, 0.0154], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0001, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 08:56:33,989 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:56:53,837 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 08:56:54,997 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.751e+02 3.331e+02 4.491e+02 1.022e+03, threshold=6.663e+02, percent-clipped=8.0 +2023-02-06 08:56:57,038 INFO [train.py:901] (3/4) Epoch 10, batch 4400, loss[loss=0.2145, simple_loss=0.2986, pruned_loss=0.06523, over 8457.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3155, pruned_loss=0.08403, over 1613771.95 frames. ], batch size: 25, lr: 7.66e-03, grad_scale: 8.0 +2023-02-06 08:57:33,159 INFO [train.py:901] (3/4) Epoch 10, batch 4450, loss[loss=0.2032, simple_loss=0.297, pruned_loss=0.05472, over 8102.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3146, pruned_loss=0.08356, over 1613601.18 frames. ], batch size: 23, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:57:35,381 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:57:36,009 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 08:57:54,205 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:04,751 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.784e+02 3.289e+02 4.035e+02 8.452e+02, threshold=6.579e+02, percent-clipped=2.0 +2023-02-06 08:58:06,781 INFO [train.py:901] (3/4) Epoch 10, batch 4500, loss[loss=0.2005, simple_loss=0.2852, pruned_loss=0.05791, over 7815.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3142, pruned_loss=0.08352, over 1611654.91 frames. ], batch size: 20, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:27,560 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 08:58:31,892 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6339, 2.0190, 2.1925, 1.1292, 2.3803, 1.6433, 0.6466, 1.8400], + device='cuda:3'), covar=tensor([0.0398, 0.0199, 0.0189, 0.0366, 0.0226, 0.0494, 0.0512, 0.0176], + device='cuda:3'), in_proj_covar=tensor([0.0373, 0.0305, 0.0262, 0.0370, 0.0299, 0.0456, 0.0347, 0.0338], + device='cuda:3'), out_proj_covar=tensor([1.0826e-04, 8.6417e-05, 7.4624e-05, 1.0589e-04, 8.6720e-05, 1.4185e-04, + 1.0121e-04, 9.8024e-05], device='cuda:3') +2023-02-06 08:58:37,994 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:39,411 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7990, 1.9578, 5.9218, 2.1209, 5.1425, 4.9236, 5.4706, 5.2578], + device='cuda:3'), covar=tensor([0.0486, 0.4009, 0.0329, 0.3298, 0.1149, 0.0809, 0.0528, 0.0561], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0567, 0.0569, 0.0526, 0.0600, 0.0508, 0.0501, 0.0568], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:58:41,458 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:58:43,379 INFO [train.py:901] (3/4) Epoch 10, batch 4550, loss[loss=0.2805, simple_loss=0.3523, pruned_loss=0.1043, over 8464.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3128, pruned_loss=0.08244, over 1611123.99 frames. ], batch size: 27, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:58:50,337 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0967, 1.2541, 1.2117, 0.6730, 1.2442, 0.9928, 0.1374, 1.1749], + device='cuda:3'), covar=tensor([0.0241, 0.0181, 0.0161, 0.0279, 0.0218, 0.0553, 0.0438, 0.0161], + device='cuda:3'), in_proj_covar=tensor([0.0374, 0.0308, 0.0263, 0.0372, 0.0301, 0.0460, 0.0350, 0.0340], + device='cuda:3'), out_proj_covar=tensor([1.0883e-04, 8.7470e-05, 7.5054e-05, 1.0640e-04, 8.7437e-05, 1.4295e-04, + 1.0188e-04, 9.8406e-05], device='cuda:3') +2023-02-06 08:58:55,664 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 08:59:14,833 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.639e+02 3.213e+02 4.072e+02 8.769e+02, threshold=6.426e+02, percent-clipped=3.0 +2023-02-06 08:59:16,946 INFO [train.py:901] (3/4) Epoch 10, batch 4600, loss[loss=0.2316, simple_loss=0.3169, pruned_loss=0.07313, over 8366.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3147, pruned_loss=0.08399, over 1610798.26 frames. ], batch size: 24, lr: 7.65e-03, grad_scale: 8.0 +2023-02-06 08:59:35,820 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4965, 2.0777, 3.0278, 2.4343, 2.7155, 2.2159, 1.7866, 1.3400], + device='cuda:3'), covar=tensor([0.3326, 0.3489, 0.1050, 0.2146, 0.1687, 0.1866, 0.1521, 0.3760], + device='cuda:3'), in_proj_covar=tensor([0.0854, 0.0835, 0.0703, 0.0822, 0.0908, 0.0767, 0.0691, 0.0746], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 08:59:43,210 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 08:59:50,930 INFO [train.py:901] (3/4) Epoch 10, batch 4650, loss[loss=0.2277, simple_loss=0.3019, pruned_loss=0.07678, over 8427.00 frames. ], tot_loss[loss=0.2428, simple_loss=0.3158, pruned_loss=0.08488, over 1615083.75 frames. ], batch size: 48, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:25,440 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.953e+02 3.048e+02 3.591e+02 4.434e+02 8.168e+02, threshold=7.182e+02, percent-clipped=8.0 +2023-02-06 09:00:27,540 INFO [train.py:901] (3/4) Epoch 10, batch 4700, loss[loss=0.2478, simple_loss=0.331, pruned_loss=0.08237, over 8491.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3148, pruned_loss=0.08446, over 1612631.17 frames. ], batch size: 29, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:00:33,885 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:00:47,328 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2385, 1.6383, 1.6912, 1.3851, 1.0655, 1.5196, 1.7866, 1.7266], + device='cuda:3'), covar=tensor([0.0499, 0.1159, 0.1608, 0.1358, 0.0601, 0.1517, 0.0666, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0194, 0.0160, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 09:01:02,748 INFO [train.py:901] (3/4) Epoch 10, batch 4750, loss[loss=0.2268, simple_loss=0.2832, pruned_loss=0.08518, over 7545.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3148, pruned_loss=0.08397, over 1617681.23 frames. ], batch size: 18, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:28,497 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 09:01:30,530 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 09:01:35,881 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.648e+02 3.312e+02 4.103e+02 1.054e+03, threshold=6.623e+02, percent-clipped=5.0 +2023-02-06 09:01:37,937 INFO [train.py:901] (3/4) Epoch 10, batch 4800, loss[loss=0.2253, simple_loss=0.2937, pruned_loss=0.07846, over 8085.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3156, pruned_loss=0.08479, over 1613865.31 frames. ], batch size: 21, lr: 7.64e-03, grad_scale: 8.0 +2023-02-06 09:01:54,061 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,103 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:01:54,648 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:00,287 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 09:02:10,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:11,384 INFO [train.py:901] (3/4) Epoch 10, batch 4850, loss[loss=0.2554, simple_loss=0.3345, pruned_loss=0.08814, over 8670.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3159, pruned_loss=0.08523, over 1611591.02 frames. ], batch size: 39, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:16,290 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 09:02:38,007 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:38,638 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:42,018 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77639.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:02:46,052 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.616e+02 3.128e+02 3.870e+02 7.279e+02, threshold=6.256e+02, percent-clipped=1.0 +2023-02-06 09:02:48,055 INFO [train.py:901] (3/4) Epoch 10, batch 4900, loss[loss=0.1961, simple_loss=0.2736, pruned_loss=0.05928, over 7813.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3156, pruned_loss=0.08488, over 1615573.42 frames. ], batch size: 20, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:02:59,929 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.50 vs. limit=5.0 +2023-02-06 09:03:00,785 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.62 vs. limit=5.0 +2023-02-06 09:03:14,724 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1184, 1.6416, 1.6248, 1.2497, 1.1221, 1.4903, 1.8783, 1.6840], + device='cuda:3'), covar=tensor([0.0493, 0.1177, 0.1670, 0.1401, 0.0591, 0.1414, 0.0670, 0.0589], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0195, 0.0160, 0.0105, 0.0165, 0.0118, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 09:03:15,420 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:20,005 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=77695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:03:21,727 INFO [train.py:901] (3/4) Epoch 10, batch 4950, loss[loss=0.3122, simple_loss=0.3639, pruned_loss=0.1303, over 7009.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3153, pruned_loss=0.08507, over 1608952.01 frames. ], batch size: 71, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:38,878 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 09:03:54,524 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.777e+02 2.775e+02 3.348e+02 4.012e+02 9.680e+02, threshold=6.695e+02, percent-clipped=4.0 +2023-02-06 09:03:57,189 INFO [train.py:901] (3/4) Epoch 10, batch 5000, loss[loss=0.2839, simple_loss=0.3513, pruned_loss=0.1082, over 8356.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3148, pruned_loss=0.08487, over 1605906.24 frames. ], batch size: 24, lr: 7.63e-03, grad_scale: 8.0 +2023-02-06 09:03:58,711 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:01,945 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=77754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:14,753 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 09:04:30,716 INFO [train.py:901] (3/4) Epoch 10, batch 5050, loss[loss=0.2161, simple_loss=0.2922, pruned_loss=0.07001, over 7924.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3157, pruned_loss=0.0857, over 1606972.35 frames. ], batch size: 20, lr: 7.62e-03, grad_scale: 8.0 +2023-02-06 09:04:39,315 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 09:04:51,132 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:04:52,878 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 09:05:02,962 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.592e+02 3.051e+02 4.098e+02 9.089e+02, threshold=6.102e+02, percent-clipped=4.0 +2023-02-06 09:05:05,646 INFO [train.py:901] (3/4) Epoch 10, batch 5100, loss[loss=0.2765, simple_loss=0.3495, pruned_loss=0.1017, over 8250.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3154, pruned_loss=0.08493, over 1610905.79 frames. ], batch size: 24, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:09,145 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:05:18,408 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6723, 2.3490, 4.4508, 1.2302, 3.0375, 2.0552, 1.6136, 2.6140], + device='cuda:3'), covar=tensor([0.1689, 0.2095, 0.0678, 0.3820, 0.1495, 0.2809, 0.1780, 0.2306], + device='cuda:3'), in_proj_covar=tensor([0.0488, 0.0510, 0.0534, 0.0576, 0.0613, 0.0556, 0.0467, 0.0608], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:05:40,062 INFO [train.py:901] (3/4) Epoch 10, batch 5150, loss[loss=0.2871, simple_loss=0.3455, pruned_loss=0.1144, over 8300.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3157, pruned_loss=0.08474, over 1614750.10 frames. ], batch size: 23, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:05:53,326 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.97 vs. limit=5.0 +2023-02-06 09:06:11,276 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=77944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:11,738 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.968e+02 2.805e+02 3.349e+02 3.898e+02 8.134e+02, threshold=6.697e+02, percent-clipped=4.0 +2023-02-06 09:06:13,811 INFO [train.py:901] (3/4) Epoch 10, batch 5200, loss[loss=0.2443, simple_loss=0.3241, pruned_loss=0.08218, over 8340.00 frames. ], tot_loss[loss=0.2442, simple_loss=0.3174, pruned_loss=0.08554, over 1613068.47 frames. ], batch size: 26, lr: 7.62e-03, grad_scale: 16.0 +2023-02-06 09:06:29,756 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=77969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:35,813 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=77978.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:06:50,971 INFO [train.py:901] (3/4) Epoch 10, batch 5250, loss[loss=0.2205, simple_loss=0.3037, pruned_loss=0.06862, over 8638.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3161, pruned_loss=0.08458, over 1611551.02 frames. ], batch size: 27, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:06:56,857 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 09:06:57,789 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:00,626 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78010.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:14,857 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:17,424 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:19,992 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:07:23,912 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.672e+02 3.378e+02 4.041e+02 9.848e+02, threshold=6.756e+02, percent-clipped=3.0 +2023-02-06 09:07:25,980 INFO [train.py:901] (3/4) Epoch 10, batch 5300, loss[loss=0.2556, simple_loss=0.3178, pruned_loss=0.09674, over 7969.00 frames. ], tot_loss[loss=0.2418, simple_loss=0.3156, pruned_loss=0.08402, over 1613407.29 frames. ], batch size: 21, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:07:57,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:00,804 INFO [train.py:901] (3/4) Epoch 10, batch 5350, loss[loss=0.215, simple_loss=0.2934, pruned_loss=0.06825, over 8083.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.3158, pruned_loss=0.08426, over 1613090.67 frames. ], batch size: 21, lr: 7.61e-03, grad_scale: 16.0 +2023-02-06 09:08:14,893 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9184, 2.4017, 2.7082, 1.3964, 3.0455, 1.4914, 1.2621, 1.6903], + device='cuda:3'), covar=tensor([0.0545, 0.0259, 0.0250, 0.0499, 0.0260, 0.0724, 0.0599, 0.0381], + device='cuda:3'), in_proj_covar=tensor([0.0369, 0.0305, 0.0261, 0.0367, 0.0294, 0.0453, 0.0343, 0.0330], + device='cuda:3'), out_proj_covar=tensor([1.0693e-04, 8.6489e-05, 7.4541e-05, 1.0460e-04, 8.5137e-05, 1.4056e-04, + 9.9680e-05, 9.4978e-05], device='cuda:3') +2023-02-06 09:08:34,365 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.713e+02 3.238e+02 4.266e+02 6.892e+02, threshold=6.476e+02, percent-clipped=1.0 +2023-02-06 09:08:35,735 INFO [train.py:901] (3/4) Epoch 10, batch 5400, loss[loss=0.2412, simple_loss=0.3251, pruned_loss=0.0787, over 8358.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.316, pruned_loss=0.0845, over 1611687.44 frames. ], batch size: 24, lr: 7.61e-03, grad_scale: 8.0 +2023-02-06 09:08:40,013 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:08:41,605 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.59 vs. limit=5.0 +2023-02-06 09:09:08,700 INFO [train.py:901] (3/4) Epoch 10, batch 5450, loss[loss=0.2441, simple_loss=0.3291, pruned_loss=0.07958, over 8510.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3148, pruned_loss=0.08407, over 1607554.50 frames. ], batch size: 29, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:43,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.697e+02 3.396e+02 4.413e+02 8.943e+02, threshold=6.791e+02, percent-clipped=7.0 +2023-02-06 09:09:44,805 INFO [train.py:901] (3/4) Epoch 10, batch 5500, loss[loss=0.2238, simple_loss=0.2935, pruned_loss=0.07703, over 7550.00 frames. ], tot_loss[loss=0.2423, simple_loss=0.3155, pruned_loss=0.08454, over 1610180.59 frames. ], batch size: 18, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:09:46,319 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:09:46,830 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 09:10:18,553 INFO [train.py:901] (3/4) Epoch 10, batch 5550, loss[loss=0.2959, simple_loss=0.3541, pruned_loss=0.1188, over 8336.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3157, pruned_loss=0.08417, over 1611775.32 frames. ], batch size: 26, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:52,577 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.563e+02 3.102e+02 4.076e+02 7.679e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 09:10:54,679 INFO [train.py:901] (3/4) Epoch 10, batch 5600, loss[loss=0.2866, simple_loss=0.3487, pruned_loss=0.1122, over 8504.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3155, pruned_loss=0.08419, over 1615403.37 frames. ], batch size: 26, lr: 7.60e-03, grad_scale: 8.0 +2023-02-06 09:10:55,587 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:07,553 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3601, 4.3484, 3.9148, 2.1086, 3.8130, 3.8497, 3.9472, 3.6474], + device='cuda:3'), covar=tensor([0.0839, 0.0567, 0.1085, 0.4802, 0.0971, 0.1014, 0.1281, 0.0892], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0357, 0.0371, 0.0472, 0.0366, 0.0361, 0.0368, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:11:12,345 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:28,351 INFO [train.py:901] (3/4) Epoch 10, batch 5650, loss[loss=0.1939, simple_loss=0.2699, pruned_loss=0.05894, over 7974.00 frames. ], tot_loss[loss=0.2425, simple_loss=0.3159, pruned_loss=0.08452, over 1615396.19 frames. ], batch size: 21, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:11:32,098 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1651, 4.1612, 3.7446, 1.7980, 3.6194, 3.7120, 3.7899, 3.4478], + device='cuda:3'), covar=tensor([0.0862, 0.0592, 0.1082, 0.5009, 0.0947, 0.0883, 0.1246, 0.0884], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0357, 0.0371, 0.0473, 0.0365, 0.0361, 0.0368, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:11:36,902 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:11:48,304 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 09:11:54,466 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:02,391 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.443e+02 2.913e+02 3.480e+02 5.594e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 09:12:03,752 INFO [train.py:901] (3/4) Epoch 10, batch 5700, loss[loss=0.227, simple_loss=0.3079, pruned_loss=0.0731, over 8553.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3145, pruned_loss=0.08335, over 1616802.01 frames. ], batch size: 31, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:16,136 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2719, 1.3394, 2.3408, 1.1599, 1.9579, 2.4648, 2.6130, 2.1186], + device='cuda:3'), covar=tensor([0.1049, 0.1269, 0.0467, 0.2072, 0.0757, 0.0401, 0.0595, 0.0791], + device='cuda:3'), in_proj_covar=tensor([0.0257, 0.0294, 0.0255, 0.0285, 0.0269, 0.0233, 0.0326, 0.0285], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 09:12:25,604 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:12:39,149 INFO [train.py:901] (3/4) Epoch 10, batch 5750, loss[loss=0.2394, simple_loss=0.3168, pruned_loss=0.08101, over 8640.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3153, pruned_loss=0.08352, over 1618760.22 frames. ], batch size: 34, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:12:42,075 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4549, 2.6954, 1.7977, 2.2452, 2.2515, 1.3644, 2.0552, 2.2295], + device='cuda:3'), covar=tensor([0.1519, 0.0360, 0.1099, 0.0652, 0.0645, 0.1480, 0.0993, 0.0865], + device='cuda:3'), in_proj_covar=tensor([0.0342, 0.0229, 0.0309, 0.0297, 0.0303, 0.0322, 0.0335, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 09:12:53,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 09:13:11,252 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.793e+02 3.478e+02 4.404e+02 1.244e+03, threshold=6.955e+02, percent-clipped=11.0 +2023-02-06 09:13:12,618 INFO [train.py:901] (3/4) Epoch 10, batch 5800, loss[loss=0.2141, simple_loss=0.2981, pruned_loss=0.06499, over 8117.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3166, pruned_loss=0.08433, over 1619492.49 frames. ], batch size: 23, lr: 7.59e-03, grad_scale: 8.0 +2023-02-06 09:13:17,537 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 09:13:29,738 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6286, 2.7733, 1.8584, 2.2147, 2.1678, 1.4898, 1.9914, 2.2811], + device='cuda:3'), covar=tensor([0.1263, 0.0318, 0.0935, 0.0563, 0.0609, 0.1255, 0.0864, 0.0771], + device='cuda:3'), in_proj_covar=tensor([0.0340, 0.0229, 0.0308, 0.0295, 0.0302, 0.0319, 0.0334, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 09:13:31,655 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:45,431 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:13:48,031 INFO [train.py:901] (3/4) Epoch 10, batch 5850, loss[loss=0.2114, simple_loss=0.2817, pruned_loss=0.07061, over 7538.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3157, pruned_loss=0.08412, over 1620126.42 frames. ], batch size: 18, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:19,898 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.589e+02 3.164e+02 4.281e+02 9.296e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-06 09:14:21,268 INFO [train.py:901] (3/4) Epoch 10, batch 5900, loss[loss=0.1951, simple_loss=0.279, pruned_loss=0.05565, over 8132.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3147, pruned_loss=0.0838, over 1618878.88 frames. ], batch size: 22, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:14:24,992 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 09:14:29,629 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 09:14:30,103 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0698, 1.4140, 4.2799, 1.6386, 3.6624, 3.5626, 3.8462, 3.7061], + device='cuda:3'), covar=tensor([0.0618, 0.4139, 0.0490, 0.3134, 0.1316, 0.0809, 0.0596, 0.0707], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0557, 0.0561, 0.0514, 0.0590, 0.0497, 0.0495, 0.0562], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:14:49,658 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0336, 1.2557, 1.1691, 0.5706, 1.1973, 0.9332, 0.1554, 1.1824], + device='cuda:3'), covar=tensor([0.0226, 0.0190, 0.0160, 0.0315, 0.0209, 0.0566, 0.0448, 0.0166], + device='cuda:3'), in_proj_covar=tensor([0.0379, 0.0308, 0.0265, 0.0376, 0.0303, 0.0462, 0.0351, 0.0336], + device='cuda:3'), out_proj_covar=tensor([1.0992e-04, 8.7306e-05, 7.5411e-05, 1.0730e-04, 8.7486e-05, 1.4334e-04, + 1.0197e-04, 9.6504e-05], device='cuda:3') +2023-02-06 09:14:57,574 INFO [train.py:901] (3/4) Epoch 10, batch 5950, loss[loss=0.1658, simple_loss=0.2408, pruned_loss=0.04544, over 7645.00 frames. ], tot_loss[loss=0.2414, simple_loss=0.3151, pruned_loss=0.08386, over 1618133.08 frames. ], batch size: 19, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:05,422 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:15:30,045 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.430e+02 2.939e+02 3.954e+02 7.661e+02, threshold=5.878e+02, percent-clipped=3.0 +2023-02-06 09:15:31,441 INFO [train.py:901] (3/4) Epoch 10, batch 6000, loss[loss=0.2146, simple_loss=0.3078, pruned_loss=0.06066, over 8290.00 frames. ], tot_loss[loss=0.2396, simple_loss=0.3136, pruned_loss=0.08286, over 1615553.46 frames. ], batch size: 23, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:15:31,441 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 09:15:43,950 INFO [train.py:935] (3/4) Epoch 10, validation: loss=0.1914, simple_loss=0.2907, pruned_loss=0.04604, over 944034.00 frames. +2023-02-06 09:15:43,951 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 09:16:18,415 INFO [train.py:901] (3/4) Epoch 10, batch 6050, loss[loss=0.1765, simple_loss=0.2605, pruned_loss=0.0462, over 7925.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3142, pruned_loss=0.08345, over 1613989.02 frames. ], batch size: 20, lr: 7.58e-03, grad_scale: 8.0 +2023-02-06 09:16:35,968 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:16:52,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.842e+02 3.348e+02 4.641e+02 9.072e+02, threshold=6.696e+02, percent-clipped=15.0 +2023-02-06 09:16:54,176 INFO [train.py:901] (3/4) Epoch 10, batch 6100, loss[loss=0.2606, simple_loss=0.338, pruned_loss=0.09159, over 8120.00 frames. ], tot_loss[loss=0.2409, simple_loss=0.3149, pruned_loss=0.0834, over 1618608.48 frames. ], batch size: 22, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:16,950 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 09:17:24,368 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 09:17:27,721 INFO [train.py:901] (3/4) Epoch 10, batch 6150, loss[loss=0.181, simple_loss=0.2641, pruned_loss=0.04892, over 8254.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3149, pruned_loss=0.08354, over 1619933.04 frames. ], batch size: 22, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:17:41,309 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=78918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:17:54,629 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=78937.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:01,033 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.486e+02 3.076e+02 3.632e+02 7.166e+02, threshold=6.152e+02, percent-clipped=1.0 +2023-02-06 09:18:02,457 INFO [train.py:901] (3/4) Epoch 10, batch 6200, loss[loss=0.1862, simple_loss=0.2557, pruned_loss=0.05838, over 7431.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3146, pruned_loss=0.08331, over 1619322.62 frames. ], batch size: 17, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:18:15,656 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=78965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:28,061 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=78983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:32,907 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=78990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:18:38,057 INFO [train.py:901] (3/4) Epoch 10, batch 6250, loss[loss=0.2549, simple_loss=0.3253, pruned_loss=0.09231, over 8109.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.315, pruned_loss=0.08362, over 1613486.75 frames. ], batch size: 23, lr: 7.57e-03, grad_scale: 8.0 +2023-02-06 09:19:01,795 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79033.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:19:10,136 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.509e+02 3.177e+02 4.128e+02 1.006e+03, threshold=6.355e+02, percent-clipped=7.0 +2023-02-06 09:19:11,551 INFO [train.py:901] (3/4) Epoch 10, batch 6300, loss[loss=0.26, simple_loss=0.3389, pruned_loss=0.09051, over 8338.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3142, pruned_loss=0.08371, over 1610917.19 frames. ], batch size: 25, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:31,078 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8235, 1.6755, 5.9251, 2.2614, 5.2484, 4.8669, 5.4601, 5.2708], + device='cuda:3'), covar=tensor([0.0413, 0.4186, 0.0306, 0.3056, 0.0938, 0.0741, 0.0415, 0.0505], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0566, 0.0568, 0.0518, 0.0599, 0.0503, 0.0499, 0.0567], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:19:41,955 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1151, 4.0943, 3.7748, 1.8549, 3.6383, 3.8316, 3.7351, 3.5197], + device='cuda:3'), covar=tensor([0.0916, 0.0674, 0.1079, 0.4875, 0.0920, 0.1029, 0.1315, 0.0816], + device='cuda:3'), in_proj_covar=tensor([0.0451, 0.0354, 0.0367, 0.0470, 0.0361, 0.0357, 0.0366, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:19:47,657 INFO [train.py:901] (3/4) Epoch 10, batch 6350, loss[loss=0.3072, simple_loss=0.3627, pruned_loss=0.1259, over 8102.00 frames. ], tot_loss[loss=0.2416, simple_loss=0.315, pruned_loss=0.08413, over 1611521.18 frames. ], batch size: 23, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:19:54,818 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 09:20:00,072 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9925, 1.6724, 2.1953, 1.8914, 2.0774, 1.9291, 1.5794, 0.7578], + device='cuda:3'), covar=tensor([0.3814, 0.3412, 0.1167, 0.2142, 0.1536, 0.1871, 0.1557, 0.3433], + device='cuda:3'), in_proj_covar=tensor([0.0865, 0.0842, 0.0701, 0.0815, 0.0911, 0.0772, 0.0685, 0.0742], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:20:20,611 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.998e+02 3.636e+02 4.667e+02 1.201e+03, threshold=7.271e+02, percent-clipped=11.0 +2023-02-06 09:20:21,298 INFO [train.py:901] (3/4) Epoch 10, batch 6400, loss[loss=0.2098, simple_loss=0.2833, pruned_loss=0.06812, over 7415.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3152, pruned_loss=0.08436, over 1614061.40 frames. ], batch size: 17, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:20:32,950 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8779, 6.0307, 5.2259, 2.5411, 5.0615, 5.6144, 5.4407, 5.1855], + device='cuda:3'), covar=tensor([0.0569, 0.0423, 0.1095, 0.4661, 0.0755, 0.0645, 0.1109, 0.0581], + device='cuda:3'), in_proj_covar=tensor([0.0447, 0.0351, 0.0364, 0.0465, 0.0360, 0.0353, 0.0362, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:20:54,147 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:20:57,417 INFO [train.py:901] (3/4) Epoch 10, batch 6450, loss[loss=0.2219, simple_loss=0.3027, pruned_loss=0.07056, over 8342.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3139, pruned_loss=0.08372, over 1611603.25 frames. ], batch size: 26, lr: 7.56e-03, grad_scale: 8.0 +2023-02-06 09:21:12,209 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79218.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:21:31,626 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.577e+02 3.130e+02 4.050e+02 7.383e+02, threshold=6.260e+02, percent-clipped=1.0 +2023-02-06 09:21:32,338 INFO [train.py:901] (3/4) Epoch 10, batch 6500, loss[loss=0.2421, simple_loss=0.3205, pruned_loss=0.08184, over 8508.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.314, pruned_loss=0.08312, over 1614545.54 frames. ], batch size: 26, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:21:59,871 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79289.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:06,998 INFO [train.py:901] (3/4) Epoch 10, batch 6550, loss[loss=0.2657, simple_loss=0.3392, pruned_loss=0.09607, over 8337.00 frames. ], tot_loss[loss=0.2413, simple_loss=0.3152, pruned_loss=0.08364, over 1615622.67 frames. ], batch size: 26, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:11,158 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79303.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:22:18,373 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79314.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:27,892 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:36,559 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 09:22:41,238 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.767e+02 3.312e+02 4.239e+02 1.073e+03, threshold=6.623e+02, percent-clipped=3.0 +2023-02-06 09:22:41,948 INFO [train.py:901] (3/4) Epoch 10, batch 6600, loss[loss=0.2251, simple_loss=0.3, pruned_loss=0.07511, over 8078.00 frames. ], tot_loss[loss=0.2426, simple_loss=0.3164, pruned_loss=0.08436, over 1616324.95 frames. ], batch size: 21, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:22:44,149 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:22:53,889 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 09:23:04,711 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79382.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:11,911 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79393.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:23:15,196 INFO [train.py:901] (3/4) Epoch 10, batch 6650, loss[loss=0.2344, simple_loss=0.3173, pruned_loss=0.07572, over 8574.00 frames. ], tot_loss[loss=0.241, simple_loss=0.3152, pruned_loss=0.08337, over 1616442.05 frames. ], batch size: 34, lr: 7.55e-03, grad_scale: 8.0 +2023-02-06 09:23:18,285 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-06 09:23:47,677 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:23:50,865 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.666e+02 3.220e+02 4.193e+02 8.839e+02, threshold=6.440e+02, percent-clipped=3.0 +2023-02-06 09:23:51,577 INFO [train.py:901] (3/4) Epoch 10, batch 6700, loss[loss=0.2094, simple_loss=0.2745, pruned_loss=0.07215, over 7686.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3147, pruned_loss=0.08335, over 1612674.17 frames. ], batch size: 18, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:24,667 INFO [train.py:901] (3/4) Epoch 10, batch 6750, loss[loss=0.2257, simple_loss=0.3049, pruned_loss=0.07324, over 8083.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3144, pruned_loss=0.08384, over 1614028.12 frames. ], batch size: 21, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:24:34,604 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9538, 2.1219, 1.6216, 2.6216, 1.0834, 1.4189, 1.7543, 2.0803], + device='cuda:3'), covar=tensor([0.0668, 0.0772, 0.1094, 0.0359, 0.1318, 0.1507, 0.1013, 0.0770], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0212, 0.0255, 0.0216, 0.0219, 0.0252, 0.0259, 0.0224], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 09:24:35,571 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 09:25:00,368 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.662e+02 3.188e+02 4.113e+02 8.575e+02, threshold=6.376e+02, percent-clipped=4.0 +2023-02-06 09:25:01,059 INFO [train.py:901] (3/4) Epoch 10, batch 6800, loss[loss=0.1946, simple_loss=0.269, pruned_loss=0.06007, over 8242.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3136, pruned_loss=0.08331, over 1613503.05 frames. ], batch size: 22, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:11,664 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 09:25:13,797 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5251, 4.4781, 4.0663, 1.8746, 3.9859, 4.1472, 4.1831, 3.8277], + device='cuda:3'), covar=tensor([0.0770, 0.0539, 0.0921, 0.5337, 0.0815, 0.1042, 0.1218, 0.0734], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0355, 0.0371, 0.0474, 0.0368, 0.0358, 0.0366, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:25:35,811 INFO [train.py:901] (3/4) Epoch 10, batch 6850, loss[loss=0.2854, simple_loss=0.3483, pruned_loss=0.1112, over 8626.00 frames. ], tot_loss[loss=0.2407, simple_loss=0.3146, pruned_loss=0.08347, over 1617290.15 frames. ], batch size: 49, lr: 7.54e-03, grad_scale: 8.0 +2023-02-06 09:25:39,361 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:25:48,391 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5549, 1.9603, 3.5254, 1.1972, 2.4195, 2.0737, 1.6275, 2.1386], + device='cuda:3'), covar=tensor([0.1672, 0.2045, 0.0675, 0.3817, 0.1554, 0.2556, 0.1688, 0.2261], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0504, 0.0526, 0.0569, 0.0613, 0.0548, 0.0461, 0.0603], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:25:59,641 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 09:26:10,489 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.544e+02 2.480e+02 2.958e+02 3.519e+02 6.592e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-06 09:26:10,589 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79647.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:26:11,119 INFO [train.py:901] (3/4) Epoch 10, batch 6900, loss[loss=0.261, simple_loss=0.3168, pruned_loss=0.1026, over 7920.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3167, pruned_loss=0.08485, over 1622498.91 frames. ], batch size: 20, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:30,984 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:44,404 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79695.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:26:46,400 INFO [train.py:901] (3/4) Epoch 10, batch 6950, loss[loss=0.2537, simple_loss=0.3254, pruned_loss=0.09095, over 8101.00 frames. ], tot_loss[loss=0.2427, simple_loss=0.3161, pruned_loss=0.08464, over 1621646.21 frames. ], batch size: 23, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:26:46,619 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=79698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:03,561 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=79723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:05,486 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:10,652 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 09:27:12,700 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79737.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:19,257 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.770e+02 3.379e+02 4.019e+02 1.115e+03, threshold=6.759e+02, percent-clipped=8.0 +2023-02-06 09:27:19,981 INFO [train.py:901] (3/4) Epoch 10, batch 7000, loss[loss=0.2291, simple_loss=0.316, pruned_loss=0.07108, over 8640.00 frames. ], tot_loss[loss=0.2424, simple_loss=0.316, pruned_loss=0.08443, over 1622557.69 frames. ], batch size: 39, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:27:30,363 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79762.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:27:39,405 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0827, 3.9727, 3.6987, 1.8993, 3.6664, 3.8004, 3.7657, 3.4459], + device='cuda:3'), covar=tensor([0.0924, 0.0711, 0.1072, 0.4937, 0.0857, 0.0928, 0.1492, 0.0838], + device='cuda:3'), in_proj_covar=tensor([0.0457, 0.0357, 0.0373, 0.0471, 0.0367, 0.0356, 0.0363, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:27:43,455 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=79780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:27:55,335 INFO [train.py:901] (3/4) Epoch 10, batch 7050, loss[loss=0.224, simple_loss=0.2963, pruned_loss=0.07587, over 8232.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3156, pruned_loss=0.0839, over 1623112.98 frames. ], batch size: 22, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:04,460 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6255, 1.3599, 1.6620, 1.2551, 0.8866, 1.4158, 1.4067, 1.4046], + device='cuda:3'), covar=tensor([0.0538, 0.1247, 0.1642, 0.1402, 0.0617, 0.1498, 0.0714, 0.0613], + device='cuda:3'), in_proj_covar=tensor([0.0102, 0.0152, 0.0193, 0.0159, 0.0105, 0.0164, 0.0118, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0006, 0.0006], + device='cuda:3') +2023-02-06 09:28:04,467 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:11,471 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 09:28:25,536 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79841.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:28:29,357 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.704e+02 3.361e+02 4.306e+02 1.362e+03, threshold=6.722e+02, percent-clipped=5.0 +2023-02-06 09:28:30,073 INFO [train.py:901] (3/4) Epoch 10, batch 7100, loss[loss=0.1777, simple_loss=0.2549, pruned_loss=0.05028, over 7427.00 frames. ], tot_loss[loss=0.2411, simple_loss=0.3148, pruned_loss=0.08371, over 1617922.62 frames. ], batch size: 17, lr: 7.53e-03, grad_scale: 8.0 +2023-02-06 09:28:32,888 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=79852.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:29:06,027 INFO [train.py:901] (3/4) Epoch 10, batch 7150, loss[loss=0.2949, simple_loss=0.3698, pruned_loss=0.1101, over 8541.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3136, pruned_loss=0.08297, over 1616942.04 frames. ], batch size: 49, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:29:39,481 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.702e+02 3.262e+02 4.332e+02 1.613e+03, threshold=6.525e+02, percent-clipped=3.0 +2023-02-06 09:29:39,577 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=79947.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:29:40,186 INFO [train.py:901] (3/4) Epoch 10, batch 7200, loss[loss=0.2505, simple_loss=0.3039, pruned_loss=0.09858, over 7534.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3133, pruned_loss=0.08232, over 1619910.39 frames. ], batch size: 18, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:06,714 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-06 09:30:13,856 INFO [train.py:901] (3/4) Epoch 10, batch 7250, loss[loss=0.2428, simple_loss=0.317, pruned_loss=0.08433, over 8292.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3133, pruned_loss=0.08232, over 1618802.37 frames. ], batch size: 23, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:30:30,524 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80018.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:31,018 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80019.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:30:47,885 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80043.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:30:50,288 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.755e+02 3.243e+02 3.993e+02 1.489e+03, threshold=6.485e+02, percent-clipped=9.0 +2023-02-06 09:30:50,942 INFO [train.py:901] (3/4) Epoch 10, batch 7300, loss[loss=0.2269, simple_loss=0.2978, pruned_loss=0.07801, over 7694.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3147, pruned_loss=0.08323, over 1617509.15 frames. ], batch size: 18, lr: 7.52e-03, grad_scale: 8.0 +2023-02-06 09:31:00,591 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:03,354 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:20,005 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80091.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,179 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:24,595 INFO [train.py:901] (3/4) Epoch 10, batch 7350, loss[loss=0.2319, simple_loss=0.3075, pruned_loss=0.0782, over 8109.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3144, pruned_loss=0.08311, over 1615758.41 frames. ], batch size: 23, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:31:31,673 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80108.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 09:31:32,533 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 09:31:42,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:44,300 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:50,390 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80133.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 09:31:50,992 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:31:52,503 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 09:31:56,728 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 09:31:59,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.519e+02 3.343e+02 4.224e+02 9.659e+02, threshold=6.686e+02, percent-clipped=6.0 +2023-02-06 09:32:00,139 INFO [train.py:901] (3/4) Epoch 10, batch 7400, loss[loss=0.266, simple_loss=0.3294, pruned_loss=0.1013, over 6380.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3154, pruned_loss=0.08375, over 1612460.84 frames. ], batch size: 14, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:16,188 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 09:32:29,723 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5231, 1.5811, 4.7177, 2.0032, 4.1198, 3.9323, 4.3101, 4.1576], + device='cuda:3'), covar=tensor([0.0498, 0.3921, 0.0404, 0.2981, 0.1012, 0.0770, 0.0466, 0.0561], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0559, 0.0562, 0.0511, 0.0584, 0.0493, 0.0490, 0.0556], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:32:34,297 INFO [train.py:901] (3/4) Epoch 10, batch 7450, loss[loss=0.2043, simple_loss=0.2771, pruned_loss=0.06576, over 7207.00 frames. ], tot_loss[loss=0.2422, simple_loss=0.316, pruned_loss=0.08418, over 1613483.01 frames. ], batch size: 16, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:32:54,347 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 09:33:02,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:09,145 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.637e+02 3.217e+02 3.901e+02 6.824e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:33:09,864 INFO [train.py:901] (3/4) Epoch 10, batch 7500, loss[loss=0.1906, simple_loss=0.2682, pruned_loss=0.05656, over 7427.00 frames. ], tot_loss[loss=0.2406, simple_loss=0.3146, pruned_loss=0.0833, over 1615449.83 frames. ], batch size: 17, lr: 7.51e-03, grad_scale: 8.0 +2023-02-06 09:33:43,942 INFO [train.py:901] (3/4) Epoch 10, batch 7550, loss[loss=0.2583, simple_loss=0.3269, pruned_loss=0.09488, over 8133.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3141, pruned_loss=0.0828, over 1615152.80 frames. ], batch size: 22, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:33:46,247 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:33:57,962 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80318.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:15,056 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:17,459 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.399e+02 2.908e+02 3.933e+02 1.078e+03, threshold=5.816e+02, percent-clipped=3.0 +2023-02-06 09:34:18,143 INFO [train.py:901] (3/4) Epoch 10, batch 7600, loss[loss=0.22, simple_loss=0.303, pruned_loss=0.06847, over 8081.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3135, pruned_loss=0.08269, over 1613173.15 frames. ], batch size: 21, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:34:25,955 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 09:34:49,203 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80390.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:34:54,344 INFO [train.py:901] (3/4) Epoch 10, batch 7650, loss[loss=0.2389, simple_loss=0.3257, pruned_loss=0.07609, over 8618.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3139, pruned_loss=0.08258, over 1616075.91 frames. ], batch size: 34, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:03,333 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:03,594 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.58 vs. limit=5.0 +2023-02-06 09:35:06,123 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80415.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:27,280 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.621e+02 3.149e+02 3.913e+02 9.838e+02, threshold=6.298e+02, percent-clipped=6.0 +2023-02-06 09:35:27,987 INFO [train.py:901] (3/4) Epoch 10, batch 7700, loss[loss=0.2406, simple_loss=0.3168, pruned_loss=0.08217, over 8575.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3138, pruned_loss=0.08231, over 1620990.37 frames. ], batch size: 49, lr: 7.50e-03, grad_scale: 8.0 +2023-02-06 09:35:51,541 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:35:52,936 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0093, 2.3667, 1.8152, 2.8703, 1.3238, 1.6092, 2.1483, 2.5088], + device='cuda:3'), covar=tensor([0.0816, 0.0896, 0.1142, 0.0471, 0.1137, 0.1471, 0.0927, 0.0751], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0215, 0.0256, 0.0219, 0.0219, 0.0254, 0.0257, 0.0228], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 09:36:01,534 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=80495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:03,351 INFO [train.py:901] (3/4) Epoch 10, batch 7750, loss[loss=0.261, simple_loss=0.3342, pruned_loss=0.09389, over 8746.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3145, pruned_loss=0.08265, over 1623546.82 frames. ], batch size: 39, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:36:06,761 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 09:36:13,279 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:18,618 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=80520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:36:32,807 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 09:36:36,357 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.713e+02 3.406e+02 4.090e+02 8.759e+02, threshold=6.812e+02, percent-clipped=3.0 +2023-02-06 09:36:37,066 INFO [train.py:901] (3/4) Epoch 10, batch 7800, loss[loss=0.2379, simple_loss=0.3, pruned_loss=0.08789, over 7524.00 frames. ], tot_loss[loss=0.2397, simple_loss=0.3137, pruned_loss=0.0828, over 1620645.37 frames. ], batch size: 18, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:09,869 INFO [train.py:901] (3/4) Epoch 10, batch 7850, loss[loss=0.2356, simple_loss=0.319, pruned_loss=0.07607, over 8632.00 frames. ], tot_loss[loss=0.2404, simple_loss=0.3143, pruned_loss=0.08319, over 1619221.71 frames. ], batch size: 34, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:37:41,490 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:37:42,730 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.556e+02 3.372e+02 4.255e+02 7.191e+02, threshold=6.744e+02, percent-clipped=1.0 +2023-02-06 09:37:43,428 INFO [train.py:901] (3/4) Epoch 10, batch 7900, loss[loss=0.2666, simple_loss=0.3262, pruned_loss=0.1035, over 6799.00 frames. ], tot_loss[loss=0.2394, simple_loss=0.3135, pruned_loss=0.08267, over 1618880.98 frames. ], batch size: 72, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:16,939 INFO [train.py:901] (3/4) Epoch 10, batch 7950, loss[loss=0.1894, simple_loss=0.2666, pruned_loss=0.0561, over 7812.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3131, pruned_loss=0.08215, over 1617566.85 frames. ], batch size: 20, lr: 7.49e-03, grad_scale: 8.0 +2023-02-06 09:38:26,479 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6273, 2.9033, 2.1997, 2.3204, 2.3881, 1.7411, 2.3686, 2.3159], + device='cuda:3'), covar=tensor([0.1198, 0.0261, 0.0766, 0.0535, 0.0561, 0.1180, 0.0757, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0229, 0.0305, 0.0291, 0.0300, 0.0319, 0.0336, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 09:38:46,817 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6267, 1.3291, 4.8127, 1.6636, 4.2036, 3.9934, 4.3395, 4.1875], + device='cuda:3'), covar=tensor([0.0466, 0.4446, 0.0473, 0.3400, 0.1134, 0.0823, 0.0449, 0.0577], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0560, 0.0566, 0.0514, 0.0591, 0.0494, 0.0493, 0.0561], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:38:50,733 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.443e+02 2.660e+02 3.023e+02 3.700e+02 9.606e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-06 09:38:51,443 INFO [train.py:901] (3/4) Epoch 10, batch 8000, loss[loss=0.2476, simple_loss=0.3326, pruned_loss=0.08126, over 8513.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.313, pruned_loss=0.08204, over 1614850.59 frames. ], batch size: 26, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:38:55,688 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:56,306 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:38:59,816 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:20,375 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=80791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:24,989 INFO [train.py:901] (3/4) Epoch 10, batch 8050, loss[loss=0.2111, simple_loss=0.2742, pruned_loss=0.07403, over 7191.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3129, pruned_loss=0.0825, over 1609155.26 frames. ], batch size: 16, lr: 7.48e-03, grad_scale: 8.0 +2023-02-06 09:39:43,570 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:39:58,215 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 09:40:01,797 INFO [train.py:901] (3/4) Epoch 11, batch 0, loss[loss=0.225, simple_loss=0.3047, pruned_loss=0.07262, over 8187.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3047, pruned_loss=0.07262, over 8187.00 frames. ], batch size: 23, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:01,798 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 09:40:13,089 INFO [train.py:935] (3/4) Epoch 11, validation: loss=0.1907, simple_loss=0.2907, pruned_loss=0.04534, over 944034.00 frames. +2023-02-06 09:40:13,090 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 09:40:22,706 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6534, 1.3598, 1.5494, 1.2320, 0.8954, 1.2778, 1.4888, 1.2497], + device='cuda:3'), covar=tensor([0.0567, 0.1317, 0.1830, 0.1515, 0.0631, 0.1656, 0.0750, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0103, 0.0155, 0.0197, 0.0161, 0.0107, 0.0166, 0.0119, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 09:40:23,924 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.899e+02 3.439e+02 4.416e+02 1.589e+03, threshold=6.879e+02, percent-clipped=9.0 +2023-02-06 09:40:27,456 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 09:40:30,153 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=80856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:39,880 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80870.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:40:47,941 INFO [train.py:901] (3/4) Epoch 11, batch 50, loss[loss=0.2576, simple_loss=0.3266, pruned_loss=0.09428, over 8104.00 frames. ], tot_loss[loss=0.2492, simple_loss=0.3227, pruned_loss=0.08785, over 365636.04 frames. ], batch size: 23, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:40:52,491 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.56 vs. limit=2.0 +2023-02-06 09:41:03,915 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 09:41:04,376 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 09:41:24,348 INFO [train.py:901] (3/4) Epoch 11, batch 100, loss[loss=0.3025, simple_loss=0.3559, pruned_loss=0.1246, over 6834.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3164, pruned_loss=0.08583, over 639730.86 frames. ], batch size: 71, lr: 7.14e-03, grad_scale: 8.0 +2023-02-06 09:41:29,236 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 09:41:30,717 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80940.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:35,311 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.925e+02 2.679e+02 3.187e+02 3.933e+02 1.063e+03, threshold=6.374e+02, percent-clipped=2.0 +2023-02-06 09:41:51,853 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=80971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:41:58,391 INFO [train.py:901] (3/4) Epoch 11, batch 150, loss[loss=0.2523, simple_loss=0.3155, pruned_loss=0.09455, over 7781.00 frames. ], tot_loss[loss=0.2432, simple_loss=0.3163, pruned_loss=0.08502, over 857947.16 frames. ], batch size: 19, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:23,722 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:34,641 INFO [train.py:901] (3/4) Epoch 11, batch 200, loss[loss=0.2, simple_loss=0.2715, pruned_loss=0.06421, over 7234.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3152, pruned_loss=0.08404, over 1029717.70 frames. ], batch size: 16, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:42:37,864 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 09:42:42,984 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:42:47,008 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.935e+02 2.662e+02 3.186e+02 4.005e+02 8.686e+02, threshold=6.371e+02, percent-clipped=5.0 +2023-02-06 09:43:10,553 INFO [train.py:901] (3/4) Epoch 11, batch 250, loss[loss=0.2235, simple_loss=0.3097, pruned_loss=0.06862, over 8496.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3125, pruned_loss=0.08242, over 1155030.50 frames. ], batch size: 28, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:43:21,546 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 09:43:22,306 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:31,350 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 09:43:42,708 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81126.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:46,010 INFO [train.py:901] (3/4) Epoch 11, batch 300, loss[loss=0.2591, simple_loss=0.3385, pruned_loss=0.08983, over 8337.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3114, pruned_loss=0.08218, over 1252914.29 frames. ], batch size: 26, lr: 7.13e-03, grad_scale: 16.0 +2023-02-06 09:43:48,256 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:48,874 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:43:57,132 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.697e+02 3.136e+02 4.054e+02 9.565e+02, threshold=6.271e+02, percent-clipped=1.0 +2023-02-06 09:44:00,772 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:22,513 INFO [train.py:901] (3/4) Epoch 11, batch 350, loss[loss=0.2383, simple_loss=0.3092, pruned_loss=0.08364, over 8297.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3108, pruned_loss=0.08149, over 1333622.79 frames. ], batch size: 23, lr: 7.13e-03, grad_scale: 8.0 +2023-02-06 09:44:32,495 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 09:44:33,017 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:39,230 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6449, 2.1736, 3.4061, 2.5706, 2.9781, 2.3903, 1.9563, 1.7628], + device='cuda:3'), covar=tensor([0.3868, 0.4305, 0.1191, 0.2749, 0.2100, 0.2078, 0.1671, 0.4466], + device='cuda:3'), in_proj_covar=tensor([0.0892, 0.0863, 0.0720, 0.0833, 0.0935, 0.0794, 0.0704, 0.0762], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:44:44,354 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:46,358 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8999, 1.5767, 1.7299, 1.4951, 1.0280, 1.5015, 1.7516, 1.4664], + device='cuda:3'), covar=tensor([0.0483, 0.1079, 0.1544, 0.1210, 0.0539, 0.1306, 0.0647, 0.0566], + device='cuda:3'), in_proj_covar=tensor([0.0102, 0.0155, 0.0195, 0.0159, 0.0106, 0.0164, 0.0118, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 09:44:49,728 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81221.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:53,754 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:44:56,288 INFO [train.py:901] (3/4) Epoch 11, batch 400, loss[loss=0.1798, simple_loss=0.2576, pruned_loss=0.05099, over 7433.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3109, pruned_loss=0.08169, over 1390377.65 frames. ], batch size: 17, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:00,800 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 09:45:08,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.601e+02 3.216e+02 4.274e+02 6.931e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 09:45:10,286 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:11,745 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:23,639 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0302, 1.2231, 1.1934, 0.4990, 1.1566, 1.0142, 0.0877, 1.1693], + device='cuda:3'), covar=tensor([0.0259, 0.0225, 0.0194, 0.0375, 0.0226, 0.0685, 0.0512, 0.0208], + device='cuda:3'), in_proj_covar=tensor([0.0373, 0.0304, 0.0259, 0.0372, 0.0293, 0.0457, 0.0345, 0.0339], + device='cuda:3'), out_proj_covar=tensor([1.0753e-04, 8.5703e-05, 7.3022e-05, 1.0561e-04, 8.3944e-05, 1.4109e-04, + 9.9503e-05, 9.6983e-05], device='cuda:3') +2023-02-06 09:45:29,082 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4654, 2.6907, 2.0021, 2.1983, 2.2158, 1.6664, 2.1498, 2.1968], + device='cuda:3'), covar=tensor([0.1366, 0.0355, 0.0879, 0.0532, 0.0549, 0.1249, 0.0831, 0.0782], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0233, 0.0308, 0.0296, 0.0303, 0.0325, 0.0338, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 09:45:32,912 INFO [train.py:901] (3/4) Epoch 11, batch 450, loss[loss=0.2424, simple_loss=0.3194, pruned_loss=0.08265, over 8357.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3106, pruned_loss=0.08139, over 1438840.23 frames. ], batch size: 24, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:45:57,085 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:45:58,455 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:46:06,255 INFO [train.py:901] (3/4) Epoch 11, batch 500, loss[loss=0.2014, simple_loss=0.2876, pruned_loss=0.05762, over 8287.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3114, pruned_loss=0.08118, over 1483854.84 frames. ], batch size: 23, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:46:07,904 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-06 09:46:17,547 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.501e+02 3.364e+02 4.069e+02 6.845e+02, threshold=6.728e+02, percent-clipped=2.0 +2023-02-06 09:46:40,092 INFO [train.py:901] (3/4) Epoch 11, batch 550, loss[loss=0.2561, simple_loss=0.3327, pruned_loss=0.08972, over 8101.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3123, pruned_loss=0.08212, over 1512111.99 frames. ], batch size: 23, lr: 7.12e-03, grad_scale: 8.0 +2023-02-06 09:47:04,542 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3174, 2.0021, 2.9504, 2.3722, 2.7488, 2.1589, 1.8077, 1.5979], + device='cuda:3'), covar=tensor([0.3705, 0.3710, 0.1076, 0.2199, 0.1676, 0.2161, 0.1696, 0.3643], + device='cuda:3'), in_proj_covar=tensor([0.0886, 0.0853, 0.0717, 0.0828, 0.0927, 0.0789, 0.0701, 0.0759], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:47:15,699 INFO [train.py:901] (3/4) Epoch 11, batch 600, loss[loss=0.252, simple_loss=0.3368, pruned_loss=0.08353, over 8701.00 frames. ], tot_loss[loss=0.2399, simple_loss=0.3138, pruned_loss=0.08296, over 1538012.94 frames. ], batch size: 30, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:27,354 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.633e+02 3.080e+02 3.885e+02 6.931e+02, threshold=6.160e+02, percent-clipped=1.0 +2023-02-06 09:47:27,538 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0654, 1.4802, 3.4083, 1.5396, 2.0991, 3.7727, 3.7573, 3.1910], + device='cuda:3'), covar=tensor([0.0948, 0.1403, 0.0308, 0.1707, 0.0970, 0.0197, 0.0434, 0.0530], + device='cuda:3'), in_proj_covar=tensor([0.0257, 0.0286, 0.0247, 0.0278, 0.0264, 0.0226, 0.0322, 0.0280], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 09:47:27,561 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1910, 1.4819, 1.5399, 1.3157, 1.1298, 1.3378, 1.7501, 1.5815], + device='cuda:3'), covar=tensor([0.0472, 0.1232, 0.1755, 0.1418, 0.0614, 0.1524, 0.0710, 0.0588], + device='cuda:3'), in_proj_covar=tensor([0.0102, 0.0155, 0.0196, 0.0159, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 09:47:27,976 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 09:47:35,462 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 09:47:42,472 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:48,472 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:47:50,439 INFO [train.py:901] (3/4) Epoch 11, batch 650, loss[loss=0.2011, simple_loss=0.2827, pruned_loss=0.05973, over 7659.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3126, pruned_loss=0.08233, over 1553284.53 frames. ], batch size: 19, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:47:59,450 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:05,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1027, 2.1790, 1.6680, 1.9632, 1.7408, 1.3274, 1.6584, 1.6490], + device='cuda:3'), covar=tensor([0.1208, 0.0345, 0.1004, 0.0470, 0.0559, 0.1322, 0.0878, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0340, 0.0228, 0.0304, 0.0295, 0.0297, 0.0318, 0.0332, 0.0300], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 09:48:08,335 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81506.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:26,931 INFO [train.py:901] (3/4) Epoch 11, batch 700, loss[loss=0.1942, simple_loss=0.2867, pruned_loss=0.05085, over 7974.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3117, pruned_loss=0.08087, over 1567765.15 frames. ], batch size: 21, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:48:27,091 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:48:28,479 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6499, 2.1947, 4.4205, 1.2362, 3.1392, 2.2144, 1.8686, 2.6481], + device='cuda:3'), covar=tensor([0.2013, 0.2590, 0.0877, 0.4747, 0.1882, 0.3189, 0.1973, 0.3174], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0504, 0.0526, 0.0567, 0.0609, 0.0541, 0.0466, 0.0608], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:48:38,763 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.541e+02 3.049e+02 3.626e+02 6.264e+02, threshold=6.097e+02, percent-clipped=1.0 +2023-02-06 09:49:01,392 INFO [train.py:901] (3/4) Epoch 11, batch 750, loss[loss=0.2964, simple_loss=0.3665, pruned_loss=0.1131, over 8487.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3112, pruned_loss=0.08095, over 1579232.54 frames. ], batch size: 25, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:07,835 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-06 09:49:10,214 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:10,588 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 09:49:21,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1770, 1.8451, 2.8891, 2.2043, 2.5301, 2.0575, 1.5646, 1.2575], + device='cuda:3'), covar=tensor([0.4059, 0.4036, 0.1005, 0.2583, 0.1972, 0.2185, 0.1846, 0.3929], + device='cuda:3'), in_proj_covar=tensor([0.0885, 0.0855, 0.0718, 0.0833, 0.0931, 0.0790, 0.0701, 0.0761], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:49:24,851 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 09:49:25,693 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0279, 2.4530, 1.9142, 2.9684, 1.3005, 1.6386, 2.0013, 2.3925], + device='cuda:3'), covar=tensor([0.0814, 0.0758, 0.1068, 0.0396, 0.1296, 0.1505, 0.1036, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0212, 0.0254, 0.0216, 0.0216, 0.0254, 0.0257, 0.0226], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 09:49:29,029 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7910, 6.0302, 5.0784, 2.4316, 5.2542, 5.5888, 5.6593, 5.2912], + device='cuda:3'), covar=tensor([0.0583, 0.0323, 0.0863, 0.4476, 0.0630, 0.0581, 0.0833, 0.0492], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0368, 0.0379, 0.0479, 0.0373, 0.0365, 0.0367, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:49:33,727 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 09:49:36,455 INFO [train.py:901] (3/4) Epoch 11, batch 800, loss[loss=0.2155, simple_loss=0.2924, pruned_loss=0.06937, over 7939.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3111, pruned_loss=0.08119, over 1583001.09 frames. ], batch size: 20, lr: 7.11e-03, grad_scale: 8.0 +2023-02-06 09:49:49,270 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.494e+02 2.971e+02 3.970e+02 9.403e+02, threshold=5.941e+02, percent-clipped=2.0 +2023-02-06 09:49:58,275 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:49:59,657 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=81663.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:11,545 INFO [train.py:901] (3/4) Epoch 11, batch 850, loss[loss=0.2052, simple_loss=0.2888, pruned_loss=0.06083, over 8384.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3118, pruned_loss=0.0816, over 1594676.96 frames. ], batch size: 49, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:16,502 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=81688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:50:46,069 INFO [train.py:901] (3/4) Epoch 11, batch 900, loss[loss=0.2129, simple_loss=0.2903, pruned_loss=0.06771, over 7809.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3125, pruned_loss=0.08156, over 1599236.56 frames. ], batch size: 20, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:50:58,840 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.543e+02 3.289e+02 4.286e+02 9.063e+02, threshold=6.577e+02, percent-clipped=7.0 +2023-02-06 09:51:08,746 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2108, 1.2419, 1.4928, 1.1132, 0.7880, 1.2958, 1.1356, 0.9238], + device='cuda:3'), covar=tensor([0.0608, 0.1330, 0.1772, 0.1581, 0.0616, 0.1617, 0.0769, 0.0734], + device='cuda:3'), in_proj_covar=tensor([0.0102, 0.0154, 0.0195, 0.0159, 0.0106, 0.0165, 0.0118, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 09:51:18,686 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81776.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:20,088 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=81778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:51:21,842 INFO [train.py:901] (3/4) Epoch 11, batch 950, loss[loss=0.2431, simple_loss=0.3187, pruned_loss=0.08373, over 8470.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3125, pruned_loss=0.0811, over 1604467.00 frames. ], batch size: 49, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:51:37,578 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3897, 1.4915, 1.6818, 1.2885, 0.8943, 1.8169, 0.1272, 1.0957], + device='cuda:3'), covar=tensor([0.3137, 0.1797, 0.0580, 0.1897, 0.4455, 0.0490, 0.3237, 0.1825], + device='cuda:3'), in_proj_covar=tensor([0.0163, 0.0164, 0.0094, 0.0210, 0.0253, 0.0102, 0.0162, 0.0159], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 09:51:51,848 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 09:51:56,006 INFO [train.py:901] (3/4) Epoch 11, batch 1000, loss[loss=0.2455, simple_loss=0.3206, pruned_loss=0.08517, over 8424.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3124, pruned_loss=0.08076, over 1610248.77 frames. ], batch size: 49, lr: 7.10e-03, grad_scale: 8.0 +2023-02-06 09:52:06,324 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5837, 1.9478, 3.1758, 1.3985, 2.2479, 1.9928, 1.6575, 2.0878], + device='cuda:3'), covar=tensor([0.1568, 0.2063, 0.0599, 0.3821, 0.1498, 0.2640, 0.1705, 0.2076], + device='cuda:3'), in_proj_covar=tensor([0.0485, 0.0515, 0.0537, 0.0582, 0.0618, 0.0552, 0.0474, 0.0614], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:52:07,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.713e+02 3.211e+02 4.023e+02 7.481e+02, threshold=6.422e+02, percent-clipped=3.0 +2023-02-06 09:52:08,395 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=81849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:13,878 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 09:52:22,679 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 09:52:27,183 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 09:52:27,397 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=81874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:52:31,947 INFO [train.py:901] (3/4) Epoch 11, batch 1050, loss[loss=0.319, simple_loss=0.3738, pruned_loss=0.1321, over 8693.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3116, pruned_loss=0.08077, over 1611270.98 frames. ], batch size: 34, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:52:39,042 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 09:53:06,135 INFO [train.py:901] (3/4) Epoch 11, batch 1100, loss[loss=0.3085, simple_loss=0.3606, pruned_loss=0.1282, over 8244.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3117, pruned_loss=0.08085, over 1613527.67 frames. ], batch size: 24, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:07,788 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3908, 2.0717, 3.0042, 2.3414, 2.7489, 2.2595, 1.7951, 1.4688], + device='cuda:3'), covar=tensor([0.3520, 0.3754, 0.1132, 0.2417, 0.1727, 0.2004, 0.1590, 0.3868], + device='cuda:3'), in_proj_covar=tensor([0.0872, 0.0841, 0.0711, 0.0821, 0.0912, 0.0780, 0.0692, 0.0752], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:53:18,507 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.436e+02 2.887e+02 3.709e+02 9.106e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 09:53:38,028 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 09:53:41,608 INFO [train.py:901] (3/4) Epoch 11, batch 1150, loss[loss=0.2456, simple_loss=0.3054, pruned_loss=0.09286, over 7540.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3112, pruned_loss=0.08046, over 1614746.75 frames. ], batch size: 18, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:53:51,771 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 09:54:00,459 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:10,936 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 09:54:14,795 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7325, 2.2057, 3.6091, 2.7413, 3.1646, 2.3823, 1.8904, 1.9776], + device='cuda:3'), covar=tensor([0.3574, 0.4423, 0.1151, 0.2470, 0.1798, 0.2006, 0.1591, 0.3981], + device='cuda:3'), in_proj_covar=tensor([0.0868, 0.0838, 0.0708, 0.0816, 0.0905, 0.0776, 0.0688, 0.0747], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:54:17,074 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.06 vs. limit=2.0 +2023-02-06 09:54:17,962 INFO [train.py:901] (3/4) Epoch 11, batch 1200, loss[loss=0.1825, simple_loss=0.2516, pruned_loss=0.05669, over 7541.00 frames. ], tot_loss[loss=0.2359, simple_loss=0.311, pruned_loss=0.08044, over 1617763.92 frames. ], batch size: 18, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:54:18,727 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:18,848 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:20,238 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:29,493 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.664e+02 3.172e+02 3.772e+02 1.117e+03, threshold=6.345e+02, percent-clipped=5.0 +2023-02-06 09:54:36,791 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:38,209 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:48,002 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82073.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:54:53,473 INFO [train.py:901] (3/4) Epoch 11, batch 1250, loss[loss=0.2148, simple_loss=0.2837, pruned_loss=0.07296, over 7982.00 frames. ], tot_loss[loss=0.2354, simple_loss=0.3102, pruned_loss=0.08034, over 1612508.71 frames. ], batch size: 21, lr: 7.09e-03, grad_scale: 8.0 +2023-02-06 09:55:29,351 INFO [train.py:901] (3/4) Epoch 11, batch 1300, loss[loss=0.2203, simple_loss=0.3017, pruned_loss=0.06948, over 8460.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3107, pruned_loss=0.08038, over 1614594.79 frames. ], batch size: 25, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:55:32,254 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3291, 1.8509, 2.7581, 2.1767, 2.4933, 2.1484, 1.7650, 1.1028], + device='cuda:3'), covar=tensor([0.3734, 0.3773, 0.1026, 0.2352, 0.1757, 0.2145, 0.1589, 0.3953], + device='cuda:3'), in_proj_covar=tensor([0.0880, 0.0849, 0.0713, 0.0824, 0.0914, 0.0785, 0.0696, 0.0754], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 09:55:40,431 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:55:40,862 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.583e+02 3.223e+02 4.179e+02 7.623e+02, threshold=6.447e+02, percent-clipped=2.0 +2023-02-06 09:55:46,226 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1411, 4.1224, 3.7376, 2.0275, 3.6906, 3.6491, 3.8235, 3.4041], + device='cuda:3'), covar=tensor([0.0796, 0.0549, 0.0883, 0.4576, 0.0846, 0.1179, 0.1068, 0.0914], + device='cuda:3'), in_proj_covar=tensor([0.0460, 0.0366, 0.0375, 0.0476, 0.0370, 0.0363, 0.0366, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 09:56:03,684 INFO [train.py:901] (3/4) Epoch 11, batch 1350, loss[loss=0.2358, simple_loss=0.3033, pruned_loss=0.08415, over 7546.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3111, pruned_loss=0.08089, over 1615073.11 frames. ], batch size: 18, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:38,846 INFO [train.py:901] (3/4) Epoch 11, batch 1400, loss[loss=0.232, simple_loss=0.3186, pruned_loss=0.07274, over 8257.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3116, pruned_loss=0.08081, over 1619317.89 frames. ], batch size: 24, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:56:51,062 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.627e+02 3.119e+02 3.954e+02 1.224e+03, threshold=6.238e+02, percent-clipped=1.0 +2023-02-06 09:57:13,115 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5285, 2.9762, 2.6060, 4.0183, 1.7814, 1.9793, 2.4934, 3.1341], + device='cuda:3'), covar=tensor([0.0780, 0.0916, 0.0985, 0.0273, 0.1263, 0.1549, 0.1133, 0.0825], + device='cuda:3'), in_proj_covar=tensor([0.0241, 0.0214, 0.0255, 0.0219, 0.0218, 0.0256, 0.0254, 0.0225], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 09:57:13,607 INFO [train.py:901] (3/4) Epoch 11, batch 1450, loss[loss=0.1769, simple_loss=0.2494, pruned_loss=0.05217, over 8051.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3109, pruned_loss=0.08023, over 1619072.70 frames. ], batch size: 20, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:57:27,756 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 09:57:46,369 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.72 vs. limit=5.0 +2023-02-06 09:57:48,797 INFO [train.py:901] (3/4) Epoch 11, batch 1500, loss[loss=0.2498, simple_loss=0.3246, pruned_loss=0.0875, over 8686.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3115, pruned_loss=0.08111, over 1615053.81 frames. ], batch size: 39, lr: 7.08e-03, grad_scale: 8.0 +2023-02-06 09:58:01,381 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.943e+02 2.743e+02 3.193e+02 4.270e+02 9.879e+02, threshold=6.387e+02, percent-clipped=7.0 +2023-02-06 09:58:02,224 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:13,199 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:24,634 INFO [train.py:901] (3/4) Epoch 11, batch 1550, loss[loss=0.2275, simple_loss=0.3119, pruned_loss=0.0716, over 8357.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3104, pruned_loss=0.08029, over 1614918.40 frames. ], batch size: 24, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:58:39,990 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82403.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:50,129 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:50,255 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9734, 2.3106, 1.9199, 2.9556, 1.4449, 1.6103, 1.9668, 2.3744], + device='cuda:3'), covar=tensor([0.0825, 0.0903, 0.1069, 0.0438, 0.1184, 0.1521, 0.1059, 0.0866], + device='cuda:3'), in_proj_covar=tensor([0.0239, 0.0212, 0.0251, 0.0216, 0.0216, 0.0252, 0.0253, 0.0223], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 09:58:57,865 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:58:59,756 INFO [train.py:901] (3/4) Epoch 11, batch 1600, loss[loss=0.1958, simple_loss=0.282, pruned_loss=0.05478, over 8342.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3109, pruned_loss=0.08037, over 1620563.34 frames. ], batch size: 26, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:13,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.328e+02 2.878e+02 3.468e+02 7.869e+02, threshold=5.757e+02, percent-clipped=2.0 +2023-02-06 09:59:24,128 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 09:59:36,407 INFO [train.py:901] (3/4) Epoch 11, batch 1650, loss[loss=0.2993, simple_loss=0.3511, pruned_loss=0.1237, over 7084.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3115, pruned_loss=0.08123, over 1617160.41 frames. ], batch size: 71, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 09:59:57,314 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82511.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:00:11,612 INFO [train.py:901] (3/4) Epoch 11, batch 1700, loss[loss=0.2211, simple_loss=0.3096, pruned_loss=0.06629, over 8330.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3114, pruned_loss=0.08076, over 1616480.18 frames. ], batch size: 25, lr: 7.07e-03, grad_scale: 8.0 +2023-02-06 10:00:12,448 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:00:17,635 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 10:00:23,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.517e+02 3.185e+02 4.066e+02 8.085e+02, threshold=6.370e+02, percent-clipped=5.0 +2023-02-06 10:00:47,531 INFO [train.py:901] (3/4) Epoch 11, batch 1750, loss[loss=0.2437, simple_loss=0.31, pruned_loss=0.08871, over 8101.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3103, pruned_loss=0.07977, over 1614846.84 frames. ], batch size: 23, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:00:52,546 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:01:00,903 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5835, 2.6284, 1.8516, 2.3696, 2.3068, 1.5328, 2.1350, 2.1850], + device='cuda:3'), covar=tensor([0.1328, 0.0344, 0.0993, 0.0559, 0.0578, 0.1390, 0.0882, 0.1025], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0234, 0.0313, 0.0298, 0.0300, 0.0324, 0.0340, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:01:23,316 INFO [train.py:901] (3/4) Epoch 11, batch 1800, loss[loss=0.2496, simple_loss=0.3124, pruned_loss=0.09344, over 7660.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3113, pruned_loss=0.08045, over 1620898.46 frames. ], batch size: 19, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:01:35,825 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.598e+02 3.107e+02 4.193e+02 1.199e+03, threshold=6.213e+02, percent-clipped=8.0 +2023-02-06 10:01:58,586 INFO [train.py:901] (3/4) Epoch 11, batch 1850, loss[loss=0.2222, simple_loss=0.2831, pruned_loss=0.08068, over 7540.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3113, pruned_loss=0.08046, over 1620457.39 frames. ], batch size: 18, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:18,370 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:21,804 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1708, 1.7396, 3.5177, 1.3570, 2.2372, 3.8566, 3.9452, 3.3205], + device='cuda:3'), covar=tensor([0.0938, 0.1414, 0.0302, 0.2139, 0.1010, 0.0222, 0.0433, 0.0559], + device='cuda:3'), in_proj_covar=tensor([0.0258, 0.0294, 0.0255, 0.0285, 0.0270, 0.0232, 0.0330, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 10:02:26,598 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:32,178 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7302, 2.3016, 3.7062, 2.7124, 3.0467, 2.4883, 1.9347, 1.7813], + device='cuda:3'), covar=tensor([0.3586, 0.4223, 0.1041, 0.2713, 0.2033, 0.2022, 0.1680, 0.4483], + device='cuda:3'), in_proj_covar=tensor([0.0887, 0.0862, 0.0727, 0.0836, 0.0932, 0.0793, 0.0704, 0.0761], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:02:34,011 INFO [train.py:901] (3/4) Epoch 11, batch 1900, loss[loss=0.2613, simple_loss=0.3358, pruned_loss=0.09337, over 8189.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3114, pruned_loss=0.08043, over 1618784.17 frames. ], batch size: 23, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:02:43,169 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 10:02:43,697 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:02:45,528 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.442e+02 3.142e+02 3.936e+02 6.780e+02, threshold=6.284e+02, percent-clipped=1.0 +2023-02-06 10:03:08,869 INFO [train.py:901] (3/4) Epoch 11, batch 1950, loss[loss=0.2185, simple_loss=0.2824, pruned_loss=0.07733, over 7219.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3111, pruned_loss=0.08108, over 1616772.43 frames. ], batch size: 16, lr: 7.06e-03, grad_scale: 8.0 +2023-02-06 10:03:12,343 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 10:03:13,861 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=82788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:17,231 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2825, 2.6651, 3.3086, 1.2897, 3.2717, 1.9563, 1.5596, 1.9596], + device='cuda:3'), covar=tensor([0.0495, 0.0222, 0.0161, 0.0527, 0.0253, 0.0597, 0.0644, 0.0355], + device='cuda:3'), in_proj_covar=tensor([0.0382, 0.0317, 0.0266, 0.0376, 0.0304, 0.0468, 0.0352, 0.0347], + device='cuda:3'), out_proj_covar=tensor([1.0988e-04, 8.9128e-05, 7.4872e-05, 1.0638e-04, 8.6597e-05, 1.4414e-04, + 1.0123e-04, 9.9186e-05], device='cuda:3') +2023-02-06 10:03:26,496 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 10:03:32,228 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=82813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:39,053 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:03:45,036 INFO [train.py:901] (3/4) Epoch 11, batch 2000, loss[loss=0.1845, simple_loss=0.2569, pruned_loss=0.05609, over 7232.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3108, pruned_loss=0.08119, over 1617263.28 frames. ], batch size: 16, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:03:47,029 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 10:03:56,671 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.675e+02 3.279e+02 3.987e+02 1.082e+03, threshold=6.559e+02, percent-clipped=7.0 +2023-02-06 10:04:01,562 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82855.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:04:09,121 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4032, 1.5936, 4.2324, 1.8957, 2.1880, 4.9950, 4.9699, 4.2482], + device='cuda:3'), covar=tensor([0.0960, 0.1639, 0.0299, 0.1888, 0.1243, 0.0164, 0.0309, 0.0584], + device='cuda:3'), in_proj_covar=tensor([0.0253, 0.0289, 0.0249, 0.0279, 0.0266, 0.0228, 0.0323, 0.0283], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 10:04:12,622 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5913, 4.5005, 3.9710, 2.1390, 4.0446, 4.1952, 4.1550, 3.8852], + device='cuda:3'), covar=tensor([0.0678, 0.0596, 0.1098, 0.4451, 0.0794, 0.0767, 0.1157, 0.0908], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0375, 0.0382, 0.0483, 0.0375, 0.0366, 0.0369, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:04:16,807 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5234, 2.1808, 4.1012, 1.3543, 3.0201, 2.0957, 1.6974, 2.6984], + device='cuda:3'), covar=tensor([0.1712, 0.1986, 0.0758, 0.3873, 0.1461, 0.2704, 0.1811, 0.2374], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0512, 0.0534, 0.0574, 0.0613, 0.0545, 0.0468, 0.0607], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:04:19,320 INFO [train.py:901] (3/4) Epoch 11, batch 2050, loss[loss=0.2378, simple_loss=0.3076, pruned_loss=0.08402, over 7924.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3111, pruned_loss=0.08156, over 1613740.11 frames. ], batch size: 20, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,299 INFO [train.py:901] (3/4) Epoch 11, batch 2100, loss[loss=0.2094, simple_loss=0.2811, pruned_loss=0.06887, over 7521.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3108, pruned_loss=0.08092, over 1617179.46 frames. ], batch size: 18, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:04:55,378 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=82931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:05:07,191 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.489e+02 3.174e+02 3.706e+02 9.083e+02, threshold=6.348e+02, percent-clipped=2.0 +2023-02-06 10:05:22,111 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=82970.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:05:29,198 INFO [train.py:901] (3/4) Epoch 11, batch 2150, loss[loss=0.2569, simple_loss=0.3321, pruned_loss=0.09084, over 8242.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3116, pruned_loss=0.0808, over 1618964.47 frames. ], batch size: 24, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:05:29,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=82981.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:04,069 INFO [train.py:901] (3/4) Epoch 11, batch 2200, loss[loss=0.2199, simple_loss=0.308, pruned_loss=0.06595, over 8333.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3107, pruned_loss=0.08069, over 1615142.97 frames. ], batch size: 26, lr: 7.05e-03, grad_scale: 8.0 +2023-02-06 10:06:15,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:16,953 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.510e+02 3.092e+02 4.104e+02 1.639e+03, threshold=6.185e+02, percent-clipped=4.0 +2023-02-06 10:06:31,480 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4783, 1.7633, 1.8712, 1.1696, 1.9312, 1.2755, 0.4757, 1.6638], + device='cuda:3'), covar=tensor([0.0306, 0.0199, 0.0164, 0.0292, 0.0230, 0.0553, 0.0502, 0.0158], + device='cuda:3'), in_proj_covar=tensor([0.0381, 0.0316, 0.0264, 0.0374, 0.0302, 0.0467, 0.0350, 0.0346], + device='cuda:3'), out_proj_covar=tensor([1.0941e-04, 8.8922e-05, 7.4357e-05, 1.0567e-04, 8.5977e-05, 1.4398e-04, + 1.0076e-04, 9.8853e-05], device='cuda:3') +2023-02-06 10:06:38,957 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83079.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:06:40,009 INFO [train.py:901] (3/4) Epoch 11, batch 2250, loss[loss=0.2529, simple_loss=0.3332, pruned_loss=0.08631, over 8574.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.313, pruned_loss=0.08223, over 1616867.10 frames. ], batch size: 39, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:06:55,779 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:07:13,675 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5053, 1.8658, 2.8178, 1.2950, 2.0712, 1.9439, 1.6109, 1.7876], + device='cuda:3'), covar=tensor([0.1707, 0.2021, 0.0745, 0.3961, 0.1523, 0.2661, 0.1859, 0.2077], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0507, 0.0529, 0.0568, 0.0608, 0.0538, 0.0463, 0.0600], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:07:14,109 INFO [train.py:901] (3/4) Epoch 11, batch 2300, loss[loss=0.2296, simple_loss=0.2952, pruned_loss=0.08201, over 7437.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.312, pruned_loss=0.08191, over 1614060.53 frames. ], batch size: 17, lr: 7.04e-03, grad_scale: 8.0 +2023-02-06 10:07:25,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.480e+02 3.199e+02 4.275e+02 9.806e+02, threshold=6.398e+02, percent-clipped=6.0 +2023-02-06 10:07:48,927 INFO [train.py:901] (3/4) Epoch 11, batch 2350, loss[loss=0.2265, simple_loss=0.3103, pruned_loss=0.07129, over 8458.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.3132, pruned_loss=0.08223, over 1614734.94 frames. ], batch size: 27, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:12,077 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:08:20,049 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83226.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:08:23,038 INFO [train.py:901] (3/4) Epoch 11, batch 2400, loss[loss=0.244, simple_loss=0.3142, pruned_loss=0.08688, over 8246.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3137, pruned_loss=0.08267, over 1610794.40 frames. ], batch size: 24, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:08:35,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.359e+02 2.853e+02 3.666e+02 7.740e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-06 10:08:37,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83251.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:08:58,651 INFO [train.py:901] (3/4) Epoch 11, batch 2450, loss[loss=0.2923, simple_loss=0.3546, pruned_loss=0.115, over 7048.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3134, pruned_loss=0.08232, over 1612673.01 frames. ], batch size: 72, lr: 7.04e-03, grad_scale: 16.0 +2023-02-06 10:09:13,733 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:13,800 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83302.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:29,008 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83325.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:30,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:09:33,054 INFO [train.py:901] (3/4) Epoch 11, batch 2500, loss[loss=0.1914, simple_loss=0.2702, pruned_loss=0.05628, over 7211.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3123, pruned_loss=0.08178, over 1612254.02 frames. ], batch size: 16, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:09:44,607 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.634e+02 3.143e+02 3.904e+02 7.323e+02, threshold=6.285e+02, percent-clipped=4.0 +2023-02-06 10:10:05,062 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2309, 1.6152, 4.3742, 1.9974, 2.5066, 5.0682, 4.9881, 4.4969], + device='cuda:3'), covar=tensor([0.1046, 0.1591, 0.0271, 0.1772, 0.1012, 0.0169, 0.0390, 0.0455], + device='cuda:3'), in_proj_covar=tensor([0.0260, 0.0297, 0.0255, 0.0289, 0.0273, 0.0235, 0.0334, 0.0287], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 10:10:07,382 INFO [train.py:901] (3/4) Epoch 11, batch 2550, loss[loss=0.2063, simple_loss=0.2706, pruned_loss=0.07107, over 7249.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3125, pruned_loss=0.08212, over 1611312.19 frames. ], batch size: 16, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:11,154 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 10:10:13,590 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3431, 2.1422, 1.6772, 1.9357, 1.7255, 1.2510, 1.6469, 1.7536], + device='cuda:3'), covar=tensor([0.1111, 0.0345, 0.0941, 0.0465, 0.0637, 0.1341, 0.0907, 0.0712], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0236, 0.0315, 0.0297, 0.0304, 0.0327, 0.0343, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:10:43,103 INFO [train.py:901] (3/4) Epoch 11, batch 2600, loss[loss=0.2336, simple_loss=0.3192, pruned_loss=0.07403, over 8425.00 frames. ], tot_loss[loss=0.2392, simple_loss=0.3132, pruned_loss=0.08259, over 1613997.88 frames. ], batch size: 27, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:10:49,500 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83440.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:10:54,705 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.607e+02 3.192e+02 4.372e+02 8.439e+02, threshold=6.384e+02, percent-clipped=10.0 +2023-02-06 10:11:17,510 INFO [train.py:901] (3/4) Epoch 11, batch 2650, loss[loss=0.224, simple_loss=0.3085, pruned_loss=0.06973, over 8299.00 frames. ], tot_loss[loss=0.2381, simple_loss=0.3122, pruned_loss=0.08201, over 1616291.17 frames. ], batch size: 23, lr: 7.03e-03, grad_scale: 16.0 +2023-02-06 10:11:25,738 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5378, 1.5010, 2.8592, 1.2980, 2.0259, 3.0046, 3.1136, 2.5775], + device='cuda:3'), covar=tensor([0.1027, 0.1362, 0.0350, 0.1945, 0.0822, 0.0288, 0.0499, 0.0623], + device='cuda:3'), in_proj_covar=tensor([0.0257, 0.0295, 0.0252, 0.0286, 0.0268, 0.0232, 0.0330, 0.0284], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0002], + device='cuda:3') +2023-02-06 10:11:33,051 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0246, 1.3066, 3.1753, 1.0406, 2.7531, 2.6073, 2.8782, 2.7462], + device='cuda:3'), covar=tensor([0.0797, 0.3659, 0.0759, 0.3591, 0.1409, 0.1011, 0.0713, 0.0905], + device='cuda:3'), in_proj_covar=tensor([0.0482, 0.0572, 0.0584, 0.0530, 0.0604, 0.0506, 0.0502, 0.0577], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:11:49,224 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1199, 4.0890, 3.6991, 2.0786, 3.6998, 3.7164, 3.7480, 3.2752], + device='cuda:3'), covar=tensor([0.0955, 0.0710, 0.1249, 0.4211, 0.0906, 0.0785, 0.1429, 0.0900], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0368, 0.0376, 0.0477, 0.0370, 0.0366, 0.0367, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:11:52,412 INFO [train.py:901] (3/4) Epoch 11, batch 2700, loss[loss=0.2704, simple_loss=0.3231, pruned_loss=0.1088, over 7697.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3123, pruned_loss=0.08184, over 1614263.94 frames. ], batch size: 18, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:12:04,666 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.578e+02 3.131e+02 4.095e+02 6.916e+02, threshold=6.263e+02, percent-clipped=2.0 +2023-02-06 10:12:11,629 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:12:23,378 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5902, 2.0263, 2.1834, 1.0665, 2.1342, 1.4808, 0.6239, 1.7592], + device='cuda:3'), covar=tensor([0.0448, 0.0216, 0.0170, 0.0382, 0.0298, 0.0651, 0.0574, 0.0219], + device='cuda:3'), in_proj_covar=tensor([0.0375, 0.0315, 0.0259, 0.0372, 0.0299, 0.0461, 0.0349, 0.0341], + device='cuda:3'), out_proj_covar=tensor([1.0767e-04, 8.8357e-05, 7.2768e-05, 1.0509e-04, 8.5232e-05, 1.4174e-04, + 1.0048e-04, 9.7331e-05], device='cuda:3') +2023-02-06 10:12:27,318 INFO [train.py:901] (3/4) Epoch 11, batch 2750, loss[loss=0.2198, simple_loss=0.2987, pruned_loss=0.07047, over 8254.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3135, pruned_loss=0.08275, over 1610403.99 frames. ], batch size: 24, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:03,334 INFO [train.py:901] (3/4) Epoch 11, batch 2800, loss[loss=0.2523, simple_loss=0.3315, pruned_loss=0.08654, over 8112.00 frames. ], tot_loss[loss=0.24, simple_loss=0.3139, pruned_loss=0.08305, over 1611515.62 frames. ], batch size: 23, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:13,811 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=83646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:15,055 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.535e+02 3.136e+02 3.769e+02 1.201e+03, threshold=6.273e+02, percent-clipped=3.0 +2023-02-06 10:13:32,688 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83673.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:13:35,279 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7089, 3.7057, 1.8691, 2.2741, 2.5163, 1.6123, 2.3215, 2.7677], + device='cuda:3'), covar=tensor([0.1937, 0.0467, 0.1406, 0.1000, 0.0880, 0.1797, 0.1462, 0.1188], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0235, 0.0313, 0.0294, 0.0301, 0.0323, 0.0338, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:13:37,605 INFO [train.py:901] (3/4) Epoch 11, batch 2850, loss[loss=0.3247, simple_loss=0.402, pruned_loss=0.1237, over 8315.00 frames. ], tot_loss[loss=0.2415, simple_loss=0.3151, pruned_loss=0.08397, over 1609640.78 frames. ], batch size: 25, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:13:47,914 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83696.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:05,642 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83721.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:12,967 INFO [train.py:901] (3/4) Epoch 11, batch 2900, loss[loss=0.2449, simple_loss=0.3237, pruned_loss=0.08304, over 8344.00 frames. ], tot_loss[loss=0.2401, simple_loss=0.3138, pruned_loss=0.08324, over 1610778.11 frames. ], batch size: 26, lr: 7.02e-03, grad_scale: 16.0 +2023-02-06 10:14:25,269 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.545e+02 3.159e+02 4.165e+02 9.643e+02, threshold=6.318e+02, percent-clipped=5.0 +2023-02-06 10:14:27,059 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 10:14:34,296 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=83761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:46,314 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:14:48,174 INFO [train.py:901] (3/4) Epoch 11, batch 2950, loss[loss=0.2822, simple_loss=0.346, pruned_loss=0.1092, over 8556.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3141, pruned_loss=0.08275, over 1613006.23 frames. ], batch size: 49, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:14:53,615 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 10:15:22,303 INFO [train.py:901] (3/4) Epoch 11, batch 3000, loss[loss=0.2464, simple_loss=0.319, pruned_loss=0.08692, over 7969.00 frames. ], tot_loss[loss=0.2391, simple_loss=0.3136, pruned_loss=0.08232, over 1609388.08 frames. ], batch size: 21, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:15:22,303 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 10:15:34,550 INFO [train.py:935] (3/4) Epoch 11, validation: loss=0.1889, simple_loss=0.2886, pruned_loss=0.04461, over 944034.00 frames. +2023-02-06 10:15:34,551 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 10:15:46,618 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.511e+02 2.977e+02 3.600e+02 5.313e+02, threshold=5.953e+02, percent-clipped=0.0 +2023-02-06 10:16:10,357 INFO [train.py:901] (3/4) Epoch 11, batch 3050, loss[loss=0.224, simple_loss=0.3039, pruned_loss=0.072, over 7982.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3134, pruned_loss=0.08153, over 1612731.94 frames. ], batch size: 21, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:30,257 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 10:16:43,146 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=83929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:16:44,283 INFO [train.py:901] (3/4) Epoch 11, batch 3100, loss[loss=0.2415, simple_loss=0.3245, pruned_loss=0.0792, over 8426.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3135, pruned_loss=0.08141, over 1613180.70 frames. ], batch size: 27, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:16:55,423 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 2.748e+02 3.262e+02 3.755e+02 7.942e+02, threshold=6.525e+02, percent-clipped=1.0 +2023-02-06 10:17:00,086 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=83954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:08,766 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=83967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:18,437 INFO [train.py:901] (3/4) Epoch 11, batch 3150, loss[loss=0.242, simple_loss=0.311, pruned_loss=0.08651, over 7513.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3133, pruned_loss=0.08175, over 1615799.30 frames. ], batch size: 18, lr: 7.01e-03, grad_scale: 16.0 +2023-02-06 10:17:24,696 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9406, 1.5614, 2.1707, 1.8499, 2.0328, 1.8711, 1.5473, 0.7085], + device='cuda:3'), covar=tensor([0.3966, 0.3710, 0.1223, 0.2162, 0.1587, 0.2084, 0.1588, 0.3699], + device='cuda:3'), in_proj_covar=tensor([0.0884, 0.0865, 0.0726, 0.0837, 0.0935, 0.0794, 0.0701, 0.0763], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:17:34,200 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:44,270 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:17:53,260 INFO [train.py:901] (3/4) Epoch 11, batch 3200, loss[loss=0.2409, simple_loss=0.3197, pruned_loss=0.08103, over 8357.00 frames. ], tot_loss[loss=0.239, simple_loss=0.3132, pruned_loss=0.08235, over 1615350.62 frames. ], batch size: 24, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:01,384 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:05,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.726e+02 3.369e+02 4.220e+02 9.302e+02, threshold=6.739e+02, percent-clipped=4.0 +2023-02-06 10:18:27,190 INFO [train.py:901] (3/4) Epoch 11, batch 3250, loss[loss=0.2758, simple_loss=0.3401, pruned_loss=0.1057, over 8418.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3139, pruned_loss=0.08255, over 1615838.16 frames. ], batch size: 39, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:18:50,444 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:55,082 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:18:55,570 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 10:19:01,829 INFO [train.py:901] (3/4) Epoch 11, batch 3300, loss[loss=0.2185, simple_loss=0.2944, pruned_loss=0.07124, over 8131.00 frames. ], tot_loss[loss=0.2384, simple_loss=0.3125, pruned_loss=0.08214, over 1614642.98 frames. ], batch size: 22, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:19:13,378 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.729e+02 3.101e+02 4.103e+02 8.191e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 10:19:29,807 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3552, 2.4085, 1.6963, 2.0339, 1.8745, 1.3680, 1.7447, 1.8826], + device='cuda:3'), covar=tensor([0.1300, 0.0309, 0.1030, 0.0539, 0.0616, 0.1351, 0.0921, 0.0872], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0233, 0.0314, 0.0293, 0.0301, 0.0322, 0.0337, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:19:35,417 INFO [train.py:901] (3/4) Epoch 11, batch 3350, loss[loss=0.2058, simple_loss=0.2931, pruned_loss=0.05929, over 7421.00 frames. ], tot_loss[loss=0.2382, simple_loss=0.3123, pruned_loss=0.08205, over 1612880.07 frames. ], batch size: 17, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:20:01,020 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:10,209 INFO [train.py:901] (3/4) Epoch 11, batch 3400, loss[loss=0.2111, simple_loss=0.2953, pruned_loss=0.06344, over 8021.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3125, pruned_loss=0.08201, over 1612729.09 frames. ], batch size: 22, lr: 7.00e-03, grad_scale: 8.0 +2023-02-06 10:20:15,228 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:23,162 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.356e+02 2.553e+02 3.068e+02 3.977e+02 7.727e+02, threshold=6.137e+02, percent-clipped=2.0 +2023-02-06 10:20:26,657 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:20:28,773 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7117, 2.0700, 2.3469, 1.4968, 2.3637, 1.6635, 0.7921, 1.8619], + device='cuda:3'), covar=tensor([0.0419, 0.0228, 0.0159, 0.0371, 0.0246, 0.0572, 0.0512, 0.0200], + device='cuda:3'), in_proj_covar=tensor([0.0389, 0.0323, 0.0268, 0.0386, 0.0309, 0.0472, 0.0358, 0.0352], + device='cuda:3'), out_proj_covar=tensor([1.1150e-04, 9.0472e-05, 7.4890e-05, 1.0921e-04, 8.7879e-05, 1.4481e-04, + 1.0276e-04, 1.0054e-04], device='cuda:3') +2023-02-06 10:20:32,809 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0144, 2.4370, 2.9105, 1.4340, 2.8654, 1.7872, 1.5916, 1.9714], + device='cuda:3'), covar=tensor([0.0542, 0.0280, 0.0170, 0.0502, 0.0317, 0.0653, 0.0544, 0.0329], + device='cuda:3'), in_proj_covar=tensor([0.0389, 0.0323, 0.0267, 0.0386, 0.0309, 0.0472, 0.0358, 0.0352], + device='cuda:3'), out_proj_covar=tensor([1.1149e-04, 9.0398e-05, 7.4871e-05, 1.0912e-04, 8.7882e-05, 1.4486e-04, + 1.0264e-04, 1.0054e-04], device='cuda:3') +2023-02-06 10:20:34,982 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 10:20:45,376 INFO [train.py:901] (3/4) Epoch 11, batch 3450, loss[loss=0.2299, simple_loss=0.2999, pruned_loss=0.07996, over 8106.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3125, pruned_loss=0.08232, over 1612559.94 frames. ], batch size: 23, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:06,407 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84311.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:20,277 INFO [train.py:901] (3/4) Epoch 11, batch 3500, loss[loss=0.2402, simple_loss=0.3099, pruned_loss=0.08529, over 8237.00 frames. ], tot_loss[loss=0.2387, simple_loss=0.3134, pruned_loss=0.082, over 1617697.96 frames. ], batch size: 22, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:21:31,078 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:32,273 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.912e+02 2.703e+02 3.166e+02 4.187e+02 8.001e+02, threshold=6.332e+02, percent-clipped=6.0 +2023-02-06 10:21:36,488 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:21:48,769 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 10:21:49,639 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0208, 2.2910, 1.6838, 2.7455, 1.2696, 1.3904, 1.8729, 2.3012], + device='cuda:3'), covar=tensor([0.0757, 0.0867, 0.1055, 0.0447, 0.1246, 0.1637, 0.1010, 0.0752], + device='cuda:3'), in_proj_covar=tensor([0.0240, 0.0219, 0.0258, 0.0222, 0.0221, 0.0255, 0.0259, 0.0225], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 10:21:54,138 INFO [train.py:901] (3/4) Epoch 11, batch 3550, loss[loss=0.2616, simple_loss=0.3313, pruned_loss=0.0959, over 8320.00 frames. ], tot_loss[loss=0.2393, simple_loss=0.3142, pruned_loss=0.08214, over 1621565.26 frames. ], batch size: 26, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:25,867 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84426.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:28,970 INFO [train.py:901] (3/4) Epoch 11, batch 3600, loss[loss=0.2478, simple_loss=0.306, pruned_loss=0.09484, over 8083.00 frames. ], tot_loss[loss=0.238, simple_loss=0.313, pruned_loss=0.08156, over 1619071.27 frames. ], batch size: 21, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:22:41,776 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.788e+02 3.447e+02 4.179e+02 1.001e+03, threshold=6.895e+02, percent-clipped=4.0 +2023-02-06 10:22:48,585 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:22:50,733 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:03,534 INFO [train.py:901] (3/4) Epoch 11, batch 3650, loss[loss=0.2463, simple_loss=0.3128, pruned_loss=0.08991, over 8244.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3128, pruned_loss=0.0809, over 1621902.35 frames. ], batch size: 22, lr: 6.99e-03, grad_scale: 8.0 +2023-02-06 10:23:10,232 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1844, 1.4697, 1.5459, 1.2929, 0.9941, 1.3998, 1.8024, 1.8560], + device='cuda:3'), covar=tensor([0.0495, 0.1147, 0.1719, 0.1433, 0.0593, 0.1423, 0.0665, 0.0556], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0158, 0.0105, 0.0163, 0.0117, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 10:23:11,597 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84493.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:28,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84518.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:37,223 INFO [train.py:901] (3/4) Epoch 11, batch 3700, loss[loss=0.2013, simple_loss=0.2702, pruned_loss=0.06625, over 7537.00 frames. ], tot_loss[loss=0.2386, simple_loss=0.3138, pruned_loss=0.0817, over 1619676.40 frames. ], batch size: 18, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:23:46,131 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:23:48,592 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:23:49,869 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.648e+02 3.219e+02 3.938e+02 7.332e+02, threshold=6.437e+02, percent-clipped=1.0 +2023-02-06 10:23:58,482 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:07,287 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:11,840 INFO [train.py:901] (3/4) Epoch 11, batch 3750, loss[loss=0.2225, simple_loss=0.3153, pruned_loss=0.06485, over 8511.00 frames. ], tot_loss[loss=0.2377, simple_loss=0.3132, pruned_loss=0.08108, over 1619146.83 frames. ], batch size: 26, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:23,996 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:24:46,870 INFO [train.py:901] (3/4) Epoch 11, batch 3800, loss[loss=0.2575, simple_loss=0.3342, pruned_loss=0.09044, over 8348.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3122, pruned_loss=0.08067, over 1617601.01 frames. ], batch size: 26, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:24:58,773 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.607e+02 3.118e+02 4.251e+02 1.041e+03, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:25:00,935 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:13,124 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-06 10:25:18,146 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:21,362 INFO [train.py:901] (3/4) Epoch 11, batch 3850, loss[loss=0.2207, simple_loss=0.2915, pruned_loss=0.07492, over 8082.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3116, pruned_loss=0.08049, over 1618621.92 frames. ], batch size: 21, lr: 6.98e-03, grad_scale: 8.0 +2023-02-06 10:25:22,267 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:32,829 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:39,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:43,784 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:47,043 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:25:51,556 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 10:25:55,492 INFO [train.py:901] (3/4) Epoch 11, batch 3900, loss[loss=0.2348, simple_loss=0.3151, pruned_loss=0.07729, over 8506.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3115, pruned_loss=0.08094, over 1615737.72 frames. ], batch size: 49, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:03,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:08,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.619e+02 3.238e+02 3.926e+02 9.069e+02, threshold=6.476e+02, percent-clipped=5.0 +2023-02-06 10:26:30,324 INFO [train.py:901] (3/4) Epoch 11, batch 3950, loss[loss=0.3095, simple_loss=0.369, pruned_loss=0.125, over 8187.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3113, pruned_loss=0.08115, over 1610605.15 frames. ], batch size: 23, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:26:34,650 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9746, 3.8898, 2.4758, 2.6841, 2.9394, 1.9685, 2.8417, 2.9739], + device='cuda:3'), covar=tensor([0.1833, 0.0361, 0.1100, 0.0779, 0.0747, 0.1462, 0.1065, 0.1181], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0238, 0.0322, 0.0297, 0.0305, 0.0324, 0.0345, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:26:52,790 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=84813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:26:56,206 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84818.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:04,815 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:05,276 INFO [train.py:901] (3/4) Epoch 11, batch 4000, loss[loss=0.255, simple_loss=0.3286, pruned_loss=0.0907, over 8767.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3106, pruned_loss=0.08066, over 1610095.76 frames. ], batch size: 39, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:17,172 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.608e+02 2.990e+02 3.694e+02 8.393e+02, threshold=5.981e+02, percent-clipped=2.0 +2023-02-06 10:27:21,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84855.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:27:39,667 INFO [train.py:901] (3/4) Epoch 11, batch 4050, loss[loss=0.2618, simple_loss=0.3383, pruned_loss=0.0926, over 8677.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.3118, pruned_loss=0.08091, over 1611779.18 frames. ], batch size: 34, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:27:44,487 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:15,554 INFO [train.py:901] (3/4) Epoch 11, batch 4100, loss[loss=0.2366, simple_loss=0.3198, pruned_loss=0.07669, over 8465.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3122, pruned_loss=0.08129, over 1611141.86 frames. ], batch size: 28, lr: 6.97e-03, grad_scale: 8.0 +2023-02-06 10:28:16,485 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:27,737 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.518e+02 2.978e+02 3.788e+02 7.594e+02, threshold=5.956e+02, percent-clipped=4.0 +2023-02-06 10:28:33,385 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84957.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,370 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:41,453 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=84969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:43,738 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 10:28:49,610 INFO [train.py:901] (3/4) Epoch 11, batch 4150, loss[loss=0.2737, simple_loss=0.3377, pruned_loss=0.1048, over 8462.00 frames. ], tot_loss[loss=0.2383, simple_loss=0.3132, pruned_loss=0.08175, over 1613792.71 frames. ], batch size: 25, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:28:51,802 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6832, 1.5698, 2.8573, 1.3700, 2.0725, 3.0391, 3.1450, 2.5703], + device='cuda:3'), covar=tensor([0.1021, 0.1345, 0.0335, 0.1873, 0.0786, 0.0294, 0.0516, 0.0687], + device='cuda:3'), in_proj_covar=tensor([0.0262, 0.0298, 0.0257, 0.0289, 0.0271, 0.0237, 0.0337, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:28:54,617 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8570, 2.2605, 3.6622, 2.5877, 3.0195, 2.5534, 1.9852, 1.7601], + device='cuda:3'), covar=tensor([0.3264, 0.4194, 0.1141, 0.2797, 0.2065, 0.2039, 0.1590, 0.4306], + device='cuda:3'), in_proj_covar=tensor([0.0878, 0.0862, 0.0728, 0.0829, 0.0931, 0.0790, 0.0703, 0.0760], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:28:58,518 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=84994.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:28:59,079 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=84995.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:04,350 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:23,818 INFO [train.py:901] (3/4) Epoch 11, batch 4200, loss[loss=0.2324, simple_loss=0.3017, pruned_loss=0.0816, over 8129.00 frames. ], tot_loss[loss=0.2375, simple_loss=0.3123, pruned_loss=0.08138, over 1609570.69 frames. ], batch size: 22, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:29:36,445 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.581e+02 3.261e+02 3.967e+02 9.417e+02, threshold=6.523e+02, percent-clipped=7.0 +2023-02-06 10:29:47,918 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 10:29:50,139 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:29:58,035 INFO [train.py:901] (3/4) Epoch 11, batch 4250, loss[loss=0.3327, simple_loss=0.3779, pruned_loss=0.1438, over 7072.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.312, pruned_loss=0.0811, over 1611045.99 frames. ], batch size: 71, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:06,871 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:10,079 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 10:30:18,082 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85110.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:25,989 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0428, 1.2400, 3.2135, 1.0528, 2.7681, 2.6825, 2.8981, 2.7574], + device='cuda:3'), covar=tensor([0.0822, 0.3818, 0.0793, 0.3454, 0.1466, 0.0993, 0.0754, 0.0918], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0570, 0.0574, 0.0522, 0.0601, 0.0513, 0.0502, 0.0572], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:30:28,613 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:32,505 INFO [train.py:901] (3/4) Epoch 11, batch 4300, loss[loss=0.2045, simple_loss=0.2672, pruned_loss=0.07091, over 7223.00 frames. ], tot_loss[loss=0.2378, simple_loss=0.3125, pruned_loss=0.08153, over 1611768.03 frames. ], batch size: 16, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:30:33,970 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:30:45,182 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.616e+02 3.014e+02 4.154e+02 7.931e+02, threshold=6.027e+02, percent-clipped=5.0 +2023-02-06 10:30:54,029 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:31:06,961 INFO [train.py:901] (3/4) Epoch 11, batch 4350, loss[loss=0.2182, simple_loss=0.2845, pruned_loss=0.07592, over 7784.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3112, pruned_loss=0.0808, over 1613194.83 frames. ], batch size: 19, lr: 6.96e-03, grad_scale: 8.0 +2023-02-06 10:31:40,296 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 10:31:41,575 INFO [train.py:901] (3/4) Epoch 11, batch 4400, loss[loss=0.2486, simple_loss=0.3152, pruned_loss=0.09097, over 7936.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3105, pruned_loss=0.0797, over 1612906.63 frames. ], batch size: 20, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:31:54,340 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.553e+02 3.172e+02 3.669e+02 6.483e+02, threshold=6.345e+02, percent-clipped=4.0 +2023-02-06 10:32:01,485 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:05,695 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 10:32:14,076 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:16,478 INFO [train.py:901] (3/4) Epoch 11, batch 4450, loss[loss=0.263, simple_loss=0.3403, pruned_loss=0.09282, over 8738.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3096, pruned_loss=0.07931, over 1613831.80 frames. ], batch size: 34, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:32:18,781 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:22,680 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 10:32:38,778 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:32:41,052 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 10:32:50,699 INFO [train.py:901] (3/4) Epoch 11, batch 4500, loss[loss=0.2046, simple_loss=0.2856, pruned_loss=0.0618, over 8082.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3085, pruned_loss=0.07856, over 1613839.88 frames. ], batch size: 21, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:03,395 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.629e+02 3.227e+02 4.085e+02 1.162e+03, threshold=6.455e+02, percent-clipped=2.0 +2023-02-06 10:33:11,826 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5369, 1.8618, 1.8697, 1.1460, 1.9502, 1.2774, 0.4158, 1.6965], + device='cuda:3'), covar=tensor([0.0306, 0.0186, 0.0169, 0.0299, 0.0251, 0.0572, 0.0521, 0.0166], + device='cuda:3'), in_proj_covar=tensor([0.0385, 0.0321, 0.0268, 0.0378, 0.0307, 0.0467, 0.0353, 0.0351], + device='cuda:3'), out_proj_covar=tensor([1.1011e-04, 8.9668e-05, 7.5148e-05, 1.0646e-04, 8.7415e-05, 1.4280e-04, + 1.0103e-04, 1.0028e-04], device='cuda:3') +2023-02-06 10:33:15,850 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 10:33:16,063 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:26,538 INFO [train.py:901] (3/4) Epoch 11, batch 4550, loss[loss=0.2868, simple_loss=0.3475, pruned_loss=0.1131, over 8274.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3101, pruned_loss=0.07982, over 1613786.88 frames. ], batch size: 23, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:33:33,518 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85391.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:33:59,382 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:01,129 INFO [train.py:901] (3/4) Epoch 11, batch 4600, loss[loss=0.2304, simple_loss=0.3158, pruned_loss=0.07247, over 7980.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3118, pruned_loss=0.08108, over 1612574.38 frames. ], batch size: 21, lr: 6.95e-03, grad_scale: 8.0 +2023-02-06 10:34:11,935 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:13,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.573e+02 3.214e+02 4.149e+02 1.527e+03, threshold=6.427e+02, percent-clipped=2.0 +2023-02-06 10:34:28,035 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:33,581 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:36,251 INFO [train.py:901] (3/4) Epoch 11, batch 4650, loss[loss=0.2225, simple_loss=0.2976, pruned_loss=0.07372, over 8079.00 frames. ], tot_loss[loss=0.237, simple_loss=0.3115, pruned_loss=0.08119, over 1613811.09 frames. ], batch size: 21, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:34:50,386 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85501.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:34:56,543 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 10:35:11,112 INFO [train.py:901] (3/4) Epoch 11, batch 4700, loss[loss=0.248, simple_loss=0.3213, pruned_loss=0.08729, over 8497.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3111, pruned_loss=0.08107, over 1612232.69 frames. ], batch size: 26, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:12,710 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:22,559 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:23,053 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 2.022e+02 2.812e+02 3.491e+02 4.674e+02 1.006e+03, threshold=6.983e+02, percent-clipped=9.0 +2023-02-06 10:35:27,296 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4854, 1.8595, 3.0988, 1.2602, 2.3331, 1.9380, 1.5759, 2.1265], + device='cuda:3'), covar=tensor([0.1665, 0.2307, 0.0642, 0.3848, 0.1470, 0.2734, 0.1926, 0.2046], + device='cuda:3'), in_proj_covar=tensor([0.0488, 0.0518, 0.0532, 0.0579, 0.0618, 0.0555, 0.0475, 0.0612], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:35:30,014 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85558.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:45,765 INFO [train.py:901] (3/4) Epoch 11, batch 4750, loss[loss=0.1673, simple_loss=0.2438, pruned_loss=0.0454, over 7420.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.3101, pruned_loss=0.08076, over 1605123.72 frames. ], batch size: 17, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:35:47,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:35:53,311 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:09,830 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 10:36:11,809 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 10:36:13,760 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 10:36:20,650 INFO [train.py:901] (3/4) Epoch 11, batch 4800, loss[loss=0.2464, simple_loss=0.3201, pruned_loss=0.08637, over 8583.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.3121, pruned_loss=0.08241, over 1606744.24 frames. ], batch size: 34, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:28,782 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:36:32,777 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.628e+02 3.255e+02 4.281e+02 8.051e+02, threshold=6.510e+02, percent-clipped=3.0 +2023-02-06 10:36:40,045 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 10:36:55,320 INFO [train.py:901] (3/4) Epoch 11, batch 4850, loss[loss=0.2268, simple_loss=0.2994, pruned_loss=0.07712, over 7420.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3109, pruned_loss=0.08126, over 1609447.42 frames. ], batch size: 17, lr: 6.94e-03, grad_scale: 8.0 +2023-02-06 10:36:57,569 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:01,303 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 10:37:11,264 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6885, 1.6425, 3.0818, 1.2533, 2.1832, 3.3483, 3.3623, 2.8650], + device='cuda:3'), covar=tensor([0.1080, 0.1437, 0.0393, 0.2137, 0.0888, 0.0282, 0.0656, 0.0664], + device='cuda:3'), in_proj_covar=tensor([0.0260, 0.0294, 0.0256, 0.0290, 0.0268, 0.0234, 0.0336, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:37:15,342 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:37:30,196 INFO [train.py:901] (3/4) Epoch 11, batch 4900, loss[loss=0.2581, simple_loss=0.3326, pruned_loss=0.09178, over 8195.00 frames. ], tot_loss[loss=0.2374, simple_loss=0.3116, pruned_loss=0.0816, over 1613422.44 frames. ], batch size: 23, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:37:36,032 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-06 10:37:42,889 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.544e+02 3.151e+02 4.004e+02 8.063e+02, threshold=6.301e+02, percent-clipped=5.0 +2023-02-06 10:38:04,659 INFO [train.py:901] (3/4) Epoch 11, batch 4950, loss[loss=0.1985, simple_loss=0.2771, pruned_loss=0.05993, over 7915.00 frames. ], tot_loss[loss=0.2373, simple_loss=0.3114, pruned_loss=0.08162, over 1613507.59 frames. ], batch size: 20, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:11,395 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:39,606 INFO [train.py:901] (3/4) Epoch 11, batch 5000, loss[loss=0.2056, simple_loss=0.2863, pruned_loss=0.06244, over 7633.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3104, pruned_loss=0.08105, over 1609294.71 frames. ], batch size: 19, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:38:46,527 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:49,824 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85845.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:51,925 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=85848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:38:52,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.585e+02 3.219e+02 4.097e+02 8.363e+02, threshold=6.438e+02, percent-clipped=6.0 +2023-02-06 10:39:03,550 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:08,904 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=85873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:13,945 INFO [train.py:901] (3/4) Epoch 11, batch 5050, loss[loss=0.221, simple_loss=0.2949, pruned_loss=0.0736, over 8089.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.311, pruned_loss=0.08161, over 1606055.54 frames. ], batch size: 21, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:39:21,269 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85892.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:22,012 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:30,144 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:39:39,821 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 10:39:48,590 INFO [train.py:901] (3/4) Epoch 11, batch 5100, loss[loss=0.2648, simple_loss=0.3397, pruned_loss=0.09497, over 8730.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3108, pruned_loss=0.0813, over 1611685.29 frames. ], batch size: 30, lr: 6.93e-03, grad_scale: 8.0 +2023-02-06 10:40:00,815 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85948.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:40:01,252 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.570e+02 3.113e+02 3.980e+02 6.838e+02, threshold=6.226e+02, percent-clipped=2.0 +2023-02-06 10:40:08,941 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=85960.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:19,557 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=85975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:23,488 INFO [train.py:901] (3/4) Epoch 11, batch 5150, loss[loss=0.2563, simple_loss=0.3262, pruned_loss=0.09324, over 8364.00 frames. ], tot_loss[loss=0.2368, simple_loss=0.311, pruned_loss=0.08137, over 1607387.19 frames. ], batch size: 24, lr: 6.92e-03, grad_scale: 8.0 +2023-02-06 10:40:27,616 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=85987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:31,743 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4315, 2.1928, 3.2873, 2.6117, 2.7069, 2.4128, 1.8056, 1.7264], + device='cuda:3'), covar=tensor([0.4352, 0.4518, 0.1300, 0.2592, 0.2418, 0.2452, 0.2045, 0.4559], + device='cuda:3'), in_proj_covar=tensor([0.0886, 0.0864, 0.0734, 0.0834, 0.0938, 0.0796, 0.0701, 0.0770], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:40:42,274 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:51,394 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:40:59,435 INFO [train.py:901] (3/4) Epoch 11, batch 5200, loss[loss=0.2473, simple_loss=0.3123, pruned_loss=0.09116, over 7545.00 frames. ], tot_loss[loss=0.2361, simple_loss=0.3105, pruned_loss=0.08083, over 1611305.09 frames. ], batch size: 18, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:12,340 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.648e+02 3.082e+02 3.913e+02 1.007e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-06 10:41:27,743 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86070.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:35,327 INFO [train.py:901] (3/4) Epoch 11, batch 5250, loss[loss=0.2176, simple_loss=0.3038, pruned_loss=0.06574, over 8317.00 frames. ], tot_loss[loss=0.2365, simple_loss=0.3107, pruned_loss=0.08116, over 1612051.62 frames. ], batch size: 25, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:41:39,715 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:41:40,963 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 10:41:50,755 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:10,643 INFO [train.py:901] (3/4) Epoch 11, batch 5300, loss[loss=0.2434, simple_loss=0.3248, pruned_loss=0.08099, over 8347.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.3105, pruned_loss=0.08097, over 1616006.95 frames. ], batch size: 26, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:23,760 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.570e+02 3.118e+02 4.195e+02 8.045e+02, threshold=6.237e+02, percent-clipped=4.0 +2023-02-06 10:42:32,856 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:42:46,495 INFO [train.py:901] (3/4) Epoch 11, batch 5350, loss[loss=0.2545, simple_loss=0.3225, pruned_loss=0.09326, over 7929.00 frames. ], tot_loss[loss=0.2355, simple_loss=0.3097, pruned_loss=0.08064, over 1612806.94 frames. ], batch size: 20, lr: 6.92e-03, grad_scale: 16.0 +2023-02-06 10:42:50,720 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:12,424 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86216.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:22,276 INFO [train.py:901] (3/4) Epoch 11, batch 5400, loss[loss=0.254, simple_loss=0.3277, pruned_loss=0.0902, over 8243.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3096, pruned_loss=0.08001, over 1618087.00 frames. ], batch size: 22, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:43:26,576 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86237.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:29,461 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86241.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:34,673 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.471e+02 3.223e+02 4.268e+02 9.619e+02, threshold=6.446e+02, percent-clipped=7.0 +2023-02-06 10:43:44,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:45,132 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86264.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:43:57,388 INFO [train.py:901] (3/4) Epoch 11, batch 5450, loss[loss=0.2291, simple_loss=0.2953, pruned_loss=0.08144, over 7248.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3089, pruned_loss=0.07979, over 1616216.42 frames. ], batch size: 16, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:03,068 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:05,819 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86292.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:44:25,136 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86319.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:34,081 INFO [train.py:901] (3/4) Epoch 11, batch 5500, loss[loss=0.2835, simple_loss=0.3676, pruned_loss=0.09975, over 8588.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3079, pruned_loss=0.07916, over 1613226.73 frames. ], batch size: 31, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:44:34,720 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 10:44:39,101 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 10:44:40,912 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5287, 1.4304, 2.3091, 1.2206, 2.1145, 2.4792, 2.5651, 2.1028], + device='cuda:3'), covar=tensor([0.0890, 0.1184, 0.0474, 0.1960, 0.0666, 0.0429, 0.0725, 0.0807], + device='cuda:3'), in_proj_covar=tensor([0.0263, 0.0297, 0.0258, 0.0289, 0.0271, 0.0236, 0.0338, 0.0291], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:44:46,144 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.494e+02 3.013e+02 3.770e+02 8.759e+02, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 10:44:48,463 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:52,690 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:44:56,794 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:09,176 INFO [train.py:901] (3/4) Epoch 11, batch 5550, loss[loss=0.2619, simple_loss=0.3453, pruned_loss=0.08926, over 8326.00 frames. ], tot_loss[loss=0.2357, simple_loss=0.3102, pruned_loss=0.08056, over 1612774.67 frames. ], batch size: 26, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:10,778 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:27,859 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86407.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:45:33,154 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:44,325 INFO [train.py:901] (3/4) Epoch 11, batch 5600, loss[loss=0.3065, simple_loss=0.3603, pruned_loss=0.1263, over 8513.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3104, pruned_loss=0.08079, over 1613212.06 frames. ], batch size: 49, lr: 6.91e-03, grad_scale: 16.0 +2023-02-06 10:45:44,405 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:46,520 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:45:57,241 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.527e+02 3.003e+02 3.802e+02 9.548e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-06 10:46:03,247 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:06,613 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:17,346 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:18,539 INFO [train.py:901] (3/4) Epoch 11, batch 5650, loss[loss=0.2277, simple_loss=0.307, pruned_loss=0.07418, over 8254.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3118, pruned_loss=0.08117, over 1615966.06 frames. ], batch size: 24, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:46:39,891 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 10:46:52,899 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86529.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:46:53,988 INFO [train.py:901] (3/4) Epoch 11, batch 5700, loss[loss=0.2452, simple_loss=0.3246, pruned_loss=0.08293, over 8452.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3113, pruned_loss=0.08096, over 1619823.43 frames. ], batch size: 27, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:04,208 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:06,030 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.473e+02 3.032e+02 3.837e+02 8.433e+02, threshold=6.065e+02, percent-clipped=5.0 +2023-02-06 10:47:13,106 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0048, 1.6344, 1.3373, 1.6056, 1.3242, 1.1586, 1.2548, 1.3635], + device='cuda:3'), covar=tensor([0.1018, 0.0368, 0.1107, 0.0455, 0.0642, 0.1334, 0.0839, 0.0679], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0233, 0.0314, 0.0296, 0.0304, 0.0322, 0.0341, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 10:47:28,587 INFO [train.py:901] (3/4) Epoch 11, batch 5750, loss[loss=0.2138, simple_loss=0.2903, pruned_loss=0.06865, over 7539.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3112, pruned_loss=0.08067, over 1619279.43 frames. ], batch size: 18, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:47:40,235 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:42,134 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 10:47:47,613 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:47:47,754 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:03,810 INFO [train.py:901] (3/4) Epoch 11, batch 5800, loss[loss=0.2144, simple_loss=0.2794, pruned_loss=0.07475, over 7191.00 frames. ], tot_loss[loss=0.236, simple_loss=0.3105, pruned_loss=0.0808, over 1615149.33 frames. ], batch size: 16, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:05,413 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:48:17,063 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.802e+02 2.625e+02 3.434e+02 4.363e+02 1.044e+03, threshold=6.867e+02, percent-clipped=16.0 +2023-02-06 10:48:26,847 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86663.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:39,354 INFO [train.py:901] (3/4) Epoch 11, batch 5850, loss[loss=0.2511, simple_loss=0.3309, pruned_loss=0.08564, over 7966.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3097, pruned_loss=0.08015, over 1616679.73 frames. ], batch size: 21, lr: 6.90e-03, grad_scale: 16.0 +2023-02-06 10:48:44,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86688.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:48:45,663 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86690.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:02,117 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:07,944 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:13,225 INFO [train.py:901] (3/4) Epoch 11, batch 5900, loss[loss=0.2882, simple_loss=0.3503, pruned_loss=0.113, over 8516.00 frames. ], tot_loss[loss=0.2363, simple_loss=0.3109, pruned_loss=0.08083, over 1614196.58 frames. ], batch size: 26, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:16,684 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:25,725 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.878e+02 2.650e+02 3.002e+02 3.837e+02 8.505e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-06 10:49:33,361 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:49:48,257 INFO [train.py:901] (3/4) Epoch 11, batch 5950, loss[loss=0.1979, simple_loss=0.2751, pruned_loss=0.06039, over 7535.00 frames. ], tot_loss[loss=0.2369, simple_loss=0.3115, pruned_loss=0.08116, over 1616564.47 frames. ], batch size: 18, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:49:51,983 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:01,806 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 10:50:03,643 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:03,789 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:05,873 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 10:50:06,871 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:09,091 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:18,541 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6134, 1.3920, 1.7146, 1.3763, 0.8372, 1.4256, 1.4424, 1.4769], + device='cuda:3'), covar=tensor([0.0541, 0.1236, 0.1696, 0.1342, 0.0606, 0.1468, 0.0735, 0.0609], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0164, 0.0116, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 10:50:20,479 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=86827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:22,934 INFO [train.py:901] (3/4) Epoch 11, batch 6000, loss[loss=0.278, simple_loss=0.3408, pruned_loss=0.1076, over 7980.00 frames. ], tot_loss[loss=0.2371, simple_loss=0.3119, pruned_loss=0.08115, over 1619792.49 frames. ], batch size: 21, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:50:22,934 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 10:50:31,615 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3859, 1.7238, 2.6999, 1.2412, 1.9670, 1.6964, 1.5291, 1.8898], + device='cuda:3'), covar=tensor([0.1796, 0.2731, 0.0709, 0.4337, 0.1793, 0.3026, 0.2039, 0.2408], + device='cuda:3'), in_proj_covar=tensor([0.0490, 0.0522, 0.0538, 0.0586, 0.0620, 0.0555, 0.0473, 0.0616], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:50:35,330 INFO [train.py:935] (3/4) Epoch 11, validation: loss=0.1887, simple_loss=0.2887, pruned_loss=0.04439, over 944034.00 frames. +2023-02-06 10:50:35,330 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 10:50:36,208 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86832.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:50:39,275 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 10:50:47,363 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.431e+02 2.934e+02 3.566e+02 7.044e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-06 10:51:10,322 INFO [train.py:901] (3/4) Epoch 11, batch 6050, loss[loss=0.2104, simple_loss=0.2995, pruned_loss=0.06066, over 8313.00 frames. ], tot_loss[loss=0.2379, simple_loss=0.3125, pruned_loss=0.08161, over 1613929.42 frames. ], batch size: 25, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:20,527 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=86896.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:35,573 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:39,098 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=86922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:45,227 INFO [train.py:901] (3/4) Epoch 11, batch 6100, loss[loss=0.2352, simple_loss=0.3167, pruned_loss=0.0768, over 8368.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3112, pruned_loss=0.08101, over 1611675.43 frames. ], batch size: 24, lr: 6.89e-03, grad_scale: 16.0 +2023-02-06 10:51:53,491 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=86942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:51:58,237 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.604e+02 3.114e+02 3.901e+02 9.212e+02, threshold=6.229e+02, percent-clipped=4.0 +2023-02-06 10:52:06,857 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 10:52:06,982 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6514, 1.6099, 5.7067, 2.1371, 5.1500, 4.7941, 5.3158, 5.1630], + device='cuda:3'), covar=tensor([0.0362, 0.4588, 0.0318, 0.3392, 0.0892, 0.0797, 0.0386, 0.0451], + device='cuda:3'), in_proj_covar=tensor([0.0495, 0.0584, 0.0590, 0.0543, 0.0612, 0.0523, 0.0515, 0.0586], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 10:52:19,169 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=86979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:20,335 INFO [train.py:901] (3/4) Epoch 11, batch 6150, loss[loss=0.2263, simple_loss=0.3024, pruned_loss=0.07508, over 8505.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3099, pruned_loss=0.08002, over 1613193.88 frames. ], batch size: 26, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:52:36,922 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87004.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:52:47,759 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87020.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 10:52:55,888 INFO [train.py:901] (3/4) Epoch 11, batch 6200, loss[loss=0.1907, simple_loss=0.2728, pruned_loss=0.05434, over 7830.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3088, pruned_loss=0.07911, over 1611398.62 frames. ], batch size: 20, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:53:07,899 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.592e+02 3.192e+02 4.476e+02 1.804e+03, threshold=6.384e+02, percent-clipped=5.0 +2023-02-06 10:53:14,457 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:53:30,917 INFO [train.py:901] (3/4) Epoch 11, batch 6250, loss[loss=0.2075, simple_loss=0.2852, pruned_loss=0.06491, over 7941.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3091, pruned_loss=0.07968, over 1607958.52 frames. ], batch size: 20, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:06,554 INFO [train.py:901] (3/4) Epoch 11, batch 6300, loss[loss=0.25, simple_loss=0.3219, pruned_loss=0.08901, over 8080.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3098, pruned_loss=0.0801, over 1610455.68 frames. ], batch size: 21, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:11,435 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7790, 2.0552, 3.4982, 1.4377, 2.6832, 2.2522, 1.7813, 2.3896], + device='cuda:3'), covar=tensor([0.1497, 0.2208, 0.0694, 0.3731, 0.1396, 0.2581, 0.1727, 0.2207], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0516, 0.0532, 0.0576, 0.0610, 0.0551, 0.0467, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:54:17,459 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8675, 1.5376, 3.3349, 1.3863, 2.2188, 3.5407, 3.6327, 3.0300], + device='cuda:3'), covar=tensor([0.1123, 0.1520, 0.0298, 0.1894, 0.0949, 0.0260, 0.0460, 0.0646], + device='cuda:3'), in_proj_covar=tensor([0.0260, 0.0293, 0.0255, 0.0288, 0.0269, 0.0231, 0.0334, 0.0289], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 10:54:19,292 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.563e+02 3.017e+02 3.734e+02 8.364e+02, threshold=6.034e+02, percent-clipped=3.0 +2023-02-06 10:54:36,427 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87173.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:38,325 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87176.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:39,797 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:41,627 INFO [train.py:901] (3/4) Epoch 11, batch 6350, loss[loss=0.2036, simple_loss=0.2933, pruned_loss=0.05697, over 7812.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3085, pruned_loss=0.07942, over 1607227.91 frames. ], batch size: 20, lr: 6.88e-03, grad_scale: 16.0 +2023-02-06 10:54:41,989 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 10:54:53,187 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87198.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:54:57,265 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:16,800 INFO [train.py:901] (3/4) Epoch 11, batch 6400, loss[loss=0.2484, simple_loss=0.3203, pruned_loss=0.08828, over 8466.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3075, pruned_loss=0.07844, over 1609806.79 frames. ], batch size: 25, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:23,182 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87240.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:28,864 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87248.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:55:29,363 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.577e+02 3.020e+02 3.786e+02 7.428e+02, threshold=6.041e+02, percent-clipped=2.0 +2023-02-06 10:55:35,650 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1282, 1.4926, 1.5870, 1.2252, 0.9135, 1.3353, 1.6698, 1.5996], + device='cuda:3'), covar=tensor([0.0481, 0.1192, 0.1709, 0.1440, 0.0614, 0.1537, 0.0685, 0.0594], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0158, 0.0104, 0.0163, 0.0116, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 10:55:42,987 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6526, 2.0305, 2.0985, 1.3532, 2.3074, 1.4173, 0.7383, 1.7557], + device='cuda:3'), covar=tensor([0.0452, 0.0243, 0.0221, 0.0405, 0.0262, 0.0699, 0.0544, 0.0243], + device='cuda:3'), in_proj_covar=tensor([0.0383, 0.0322, 0.0266, 0.0378, 0.0308, 0.0464, 0.0349, 0.0341], + device='cuda:3'), out_proj_covar=tensor([1.0949e-04, 8.9885e-05, 7.4330e-05, 1.0629e-04, 8.7286e-05, 1.4163e-04, + 9.9369e-05, 9.6785e-05], device='cuda:3') +2023-02-06 10:55:51,534 INFO [train.py:901] (3/4) Epoch 11, batch 6450, loss[loss=0.248, simple_loss=0.3329, pruned_loss=0.08152, over 8570.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3077, pruned_loss=0.07838, over 1614161.53 frames. ], batch size: 31, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:55:59,190 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:14,184 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:27,324 INFO [train.py:901] (3/4) Epoch 11, batch 6500, loss[loss=0.2256, simple_loss=0.3118, pruned_loss=0.06971, over 8481.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3075, pruned_loss=0.07819, over 1612095.63 frames. ], batch size: 28, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:56:32,334 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:39,860 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.605e+02 3.245e+02 4.169e+02 7.875e+02, threshold=6.489e+02, percent-clipped=5.0 +2023-02-06 10:56:44,216 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87355.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:56:50,424 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87364.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:57:01,889 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87380.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:57:02,433 INFO [train.py:901] (3/4) Epoch 11, batch 6550, loss[loss=0.2715, simple_loss=0.3395, pruned_loss=0.1017, over 8561.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3075, pruned_loss=0.07881, over 1612589.78 frames. ], batch size: 31, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:17,754 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 10:57:37,071 INFO [train.py:901] (3/4) Epoch 11, batch 6600, loss[loss=0.2212, simple_loss=0.3002, pruned_loss=0.07105, over 8022.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3086, pruned_loss=0.07918, over 1613197.22 frames. ], batch size: 22, lr: 6.87e-03, grad_scale: 16.0 +2023-02-06 10:57:37,776 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 10:57:50,090 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.293e+02 2.790e+02 3.732e+02 8.562e+02, threshold=5.581e+02, percent-clipped=1.0 +2023-02-06 10:58:11,506 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:11,542 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87479.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 10:58:12,690 INFO [train.py:901] (3/4) Epoch 11, batch 6650, loss[loss=0.1821, simple_loss=0.2706, pruned_loss=0.04685, over 7975.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3082, pruned_loss=0.07886, over 1617182.27 frames. ], batch size: 21, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:13,572 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8878, 1.7780, 1.7289, 1.6112, 1.1398, 1.6939, 2.0674, 1.8912], + device='cuda:3'), covar=tensor([0.0441, 0.1124, 0.1766, 0.1341, 0.0583, 0.1411, 0.0650, 0.0597], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0164, 0.0116, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 10:58:35,737 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.56 vs. limit=5.0 +2023-02-06 10:58:41,528 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:47,482 INFO [train.py:901] (3/4) Epoch 11, batch 6700, loss[loss=0.2897, simple_loss=0.365, pruned_loss=0.1072, over 8676.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.3084, pruned_loss=0.07927, over 1618180.05 frames. ], batch size: 34, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:58:58,345 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:58:59,467 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.493e+02 3.158e+02 4.170e+02 8.693e+02, threshold=6.316e+02, percent-clipped=8.0 +2023-02-06 10:59:16,930 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87572.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:22,970 INFO [train.py:901] (3/4) Epoch 11, batch 6750, loss[loss=0.2349, simple_loss=0.3057, pruned_loss=0.08199, over 7975.00 frames. ], tot_loss[loss=0.2336, simple_loss=0.3082, pruned_loss=0.07949, over 1613605.21 frames. ], batch size: 21, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 10:59:30,579 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:37,571 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:40,790 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:41,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2084, 4.1569, 3.8240, 1.9203, 3.7103, 3.7744, 3.7622, 3.4085], + device='cuda:3'), covar=tensor([0.0781, 0.0690, 0.1053, 0.4864, 0.0837, 0.1038, 0.1473, 0.0916], + device='cuda:3'), in_proj_covar=tensor([0.0460, 0.0374, 0.0380, 0.0480, 0.0376, 0.0376, 0.0377, 0.0334], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 10:59:43,424 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:52,295 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=87623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 10:59:56,922 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 10:59:57,597 INFO [train.py:901] (3/4) Epoch 11, batch 6800, loss[loss=0.2119, simple_loss=0.2833, pruned_loss=0.07026, over 7803.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3069, pruned_loss=0.07895, over 1609012.00 frames. ], batch size: 19, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:01,251 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:00:10,523 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.375e+02 2.980e+02 3.798e+02 7.616e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 11:00:32,377 INFO [train.py:901] (3/4) Epoch 11, batch 6850, loss[loss=0.2637, simple_loss=0.3307, pruned_loss=0.09838, over 7811.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.308, pruned_loss=0.07934, over 1606045.10 frames. ], batch size: 20, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:00:45,155 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 11:00:50,748 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87707.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:01,874 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87724.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:01:06,421 INFO [train.py:901] (3/4) Epoch 11, batch 6900, loss[loss=0.1973, simple_loss=0.2801, pruned_loss=0.05723, over 7431.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.3095, pruned_loss=0.07987, over 1611507.17 frames. ], batch size: 17, lr: 6.86e-03, grad_scale: 16.0 +2023-02-06 11:01:10,025 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87735.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:01:19,188 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.628e+02 3.043e+02 4.130e+02 7.700e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 11:01:26,844 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87760.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:01:41,614 INFO [train.py:901] (3/4) Epoch 11, batch 6950, loss[loss=0.2524, simple_loss=0.323, pruned_loss=0.09093, over 8358.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3091, pruned_loss=0.07946, over 1614634.32 frames. ], batch size: 24, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:01:52,562 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 11:02:11,618 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87823.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:16,819 INFO [train.py:901] (3/4) Epoch 11, batch 7000, loss[loss=0.2325, simple_loss=0.3116, pruned_loss=0.07674, over 8329.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.3101, pruned_loss=0.07982, over 1616561.07 frames. ], batch size: 25, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:02:22,314 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:29,501 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.548e+02 3.185e+02 4.052e+02 9.283e+02, threshold=6.369e+02, percent-clipped=6.0 +2023-02-06 11:02:39,540 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3190, 1.6900, 4.5116, 1.8880, 2.6639, 5.2775, 5.1678, 4.5062], + device='cuda:3'), covar=tensor([0.1120, 0.1581, 0.0249, 0.1995, 0.0986, 0.0134, 0.0243, 0.0560], + device='cuda:3'), in_proj_covar=tensor([0.0261, 0.0295, 0.0259, 0.0292, 0.0271, 0.0232, 0.0338, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 11:02:41,527 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:02:42,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8939, 1.4508, 1.5565, 1.3416, 0.8683, 1.2632, 1.4806, 1.4542], + device='cuda:3'), covar=tensor([0.0482, 0.1268, 0.1616, 0.1386, 0.0606, 0.1525, 0.0720, 0.0622], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0164, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 11:02:51,623 INFO [train.py:901] (3/4) Epoch 11, batch 7050, loss[loss=0.2642, simple_loss=0.324, pruned_loss=0.1022, over 7920.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3095, pruned_loss=0.07948, over 1616932.34 frames. ], batch size: 20, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:26,705 INFO [train.py:901] (3/4) Epoch 11, batch 7100, loss[loss=0.2036, simple_loss=0.2783, pruned_loss=0.06439, over 7814.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3082, pruned_loss=0.07859, over 1614051.93 frames. ], batch size: 20, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:03:30,183 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6746, 4.7588, 4.1974, 1.9870, 4.1164, 4.2744, 4.2407, 3.8967], + device='cuda:3'), covar=tensor([0.0780, 0.0586, 0.1066, 0.4993, 0.0800, 0.0723, 0.1374, 0.0805], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0376, 0.0384, 0.0484, 0.0379, 0.0377, 0.0380, 0.0337], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:03:31,608 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87938.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:36,826 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:38,774 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.732e+02 3.356e+02 4.654e+02 1.650e+03, threshold=6.712e+02, percent-clipped=12.0 +2023-02-06 11:03:40,145 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:48,965 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=87963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:03:51,559 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=87967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:00,730 INFO [train.py:901] (3/4) Epoch 11, batch 7150, loss[loss=0.2106, simple_loss=0.3023, pruned_loss=0.05946, over 8284.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3083, pruned_loss=0.07849, over 1614082.69 frames. ], batch size: 23, lr: 6.85e-03, grad_scale: 16.0 +2023-02-06 11:04:01,630 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=87982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:05,830 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=87988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:04:09,952 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6212, 2.6765, 1.7103, 2.1412, 2.1842, 1.4015, 2.1207, 2.2012], + device='cuda:3'), covar=tensor([0.1240, 0.0325, 0.1148, 0.0617, 0.0640, 0.1370, 0.0856, 0.0748], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0234, 0.0317, 0.0297, 0.0300, 0.0317, 0.0341, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:04:36,664 INFO [train.py:901] (3/4) Epoch 11, batch 7200, loss[loss=0.206, simple_loss=0.2773, pruned_loss=0.06732, over 8097.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3085, pruned_loss=0.07835, over 1619459.53 frames. ], batch size: 21, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:04:49,434 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.591e+02 3.086e+02 3.706e+02 9.715e+02, threshold=6.172e+02, percent-clipped=2.0 +2023-02-06 11:04:57,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:00,587 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3544, 1.2746, 1.4926, 1.1588, 0.7761, 1.2931, 1.1630, 1.0793], + device='cuda:3'), covar=tensor([0.0518, 0.1204, 0.1699, 0.1406, 0.0588, 0.1475, 0.0696, 0.0645], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0158, 0.0103, 0.0164, 0.0115, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0006], + device='cuda:3') +2023-02-06 11:05:01,290 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:11,823 INFO [train.py:901] (3/4) Epoch 11, batch 7250, loss[loss=0.2104, simple_loss=0.2928, pruned_loss=0.06403, over 8320.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3097, pruned_loss=0.07939, over 1619329.39 frames. ], batch size: 25, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:05:12,643 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:21,492 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:37,915 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88118.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:39,357 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:05:46,919 INFO [train.py:901] (3/4) Epoch 11, batch 7300, loss[loss=0.2018, simple_loss=0.2902, pruned_loss=0.05665, over 7537.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3088, pruned_loss=0.07886, over 1616693.02 frames. ], batch size: 18, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:00,699 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.414e+02 2.958e+02 3.757e+02 7.369e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-06 11:06:22,821 INFO [train.py:901] (3/4) Epoch 11, batch 7350, loss[loss=0.2379, simple_loss=0.3229, pruned_loss=0.07644, over 8522.00 frames. ], tot_loss[loss=0.2343, simple_loss=0.3098, pruned_loss=0.07943, over 1615140.94 frames. ], batch size: 39, lr: 6.84e-03, grad_scale: 32.0 +2023-02-06 11:06:32,251 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 11:06:32,470 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88194.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:33,350 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-06 11:06:50,343 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:06:51,484 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 11:06:58,201 INFO [train.py:901] (3/4) Epoch 11, batch 7400, loss[loss=0.2645, simple_loss=0.3369, pruned_loss=0.09599, over 8507.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.312, pruned_loss=0.08063, over 1616548.88 frames. ], batch size: 26, lr: 6.84e-03, grad_scale: 16.0 +2023-02-06 11:07:03,172 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:11,883 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.577e+02 3.074e+02 3.691e+02 9.024e+02, threshold=6.148e+02, percent-clipped=4.0 +2023-02-06 11:07:20,903 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:07:32,879 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 11:07:33,591 INFO [train.py:901] (3/4) Epoch 11, batch 7450, loss[loss=0.21, simple_loss=0.2915, pruned_loss=0.0642, over 7927.00 frames. ], tot_loss[loss=0.2353, simple_loss=0.311, pruned_loss=0.07974, over 1618069.45 frames. ], batch size: 20, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:07:37,182 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1276, 2.4100, 1.9176, 2.9112, 1.3158, 1.6194, 1.7540, 2.5014], + device='cuda:3'), covar=tensor([0.0750, 0.0731, 0.0996, 0.0391, 0.1224, 0.1405, 0.1140, 0.0726], + device='cuda:3'), in_proj_covar=tensor([0.0237, 0.0212, 0.0256, 0.0217, 0.0218, 0.0255, 0.0255, 0.0223], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 11:07:53,288 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=88309.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:07:58,566 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:01,839 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:05,927 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3888, 1.9917, 2.8914, 2.3053, 2.6534, 2.1514, 1.7777, 1.3893], + device='cuda:3'), covar=tensor([0.3802, 0.4016, 0.1220, 0.2769, 0.1791, 0.2305, 0.1633, 0.4245], + device='cuda:3'), in_proj_covar=tensor([0.0888, 0.0864, 0.0732, 0.0843, 0.0929, 0.0801, 0.0703, 0.0765], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:08:07,741 INFO [train.py:901] (3/4) Epoch 11, batch 7500, loss[loss=0.2512, simple_loss=0.3212, pruned_loss=0.09063, over 7701.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3098, pruned_loss=0.0792, over 1613576.29 frames. ], batch size: 18, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:13,316 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:15,896 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:19,204 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:20,935 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.783e+02 2.827e+02 3.509e+02 4.304e+02 1.282e+03, threshold=7.018e+02, percent-clipped=8.0 +2023-02-06 11:08:29,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:08:42,590 INFO [train.py:901] (3/4) Epoch 11, batch 7550, loss[loss=0.1941, simple_loss=0.2822, pruned_loss=0.053, over 8105.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.3101, pruned_loss=0.07998, over 1611676.33 frames. ], batch size: 23, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:08:46,446 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 11:09:08,829 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5983, 4.5608, 4.1573, 2.1379, 4.0580, 4.0161, 4.2373, 3.6686], + device='cuda:3'), covar=tensor([0.0822, 0.0617, 0.0996, 0.4906, 0.0956, 0.0954, 0.1397, 0.1025], + device='cuda:3'), in_proj_covar=tensor([0.0469, 0.0374, 0.0388, 0.0483, 0.0381, 0.0379, 0.0383, 0.0337], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:09:17,377 INFO [train.py:901] (3/4) Epoch 11, batch 7600, loss[loss=0.2204, simple_loss=0.2943, pruned_loss=0.07324, over 7812.00 frames. ], tot_loss[loss=0.2367, simple_loss=0.3116, pruned_loss=0.08083, over 1613642.04 frames. ], batch size: 20, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:09:31,032 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.301e+02 2.441e+02 2.975e+02 3.888e+02 6.138e+02, threshold=5.951e+02, percent-clipped=0.0 +2023-02-06 11:09:39,152 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:09:51,512 INFO [train.py:901] (3/4) Epoch 11, batch 7650, loss[loss=0.2308, simple_loss=0.3088, pruned_loss=0.07641, over 8623.00 frames. ], tot_loss[loss=0.2364, simple_loss=0.3115, pruned_loss=0.08068, over 1614356.49 frames. ], batch size: 34, lr: 6.83e-03, grad_scale: 16.0 +2023-02-06 11:10:26,450 INFO [train.py:901] (3/4) Epoch 11, batch 7700, loss[loss=0.2607, simple_loss=0.3277, pruned_loss=0.09682, over 8460.00 frames. ], tot_loss[loss=0.2366, simple_loss=0.3114, pruned_loss=0.08089, over 1615219.94 frames. ], batch size: 25, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:10:39,159 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 11:10:39,709 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.472e+02 3.053e+02 3.571e+02 8.603e+02, threshold=6.105e+02, percent-clipped=3.0 +2023-02-06 11:10:58,417 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:11:00,813 INFO [train.py:901] (3/4) Epoch 11, batch 7750, loss[loss=0.1868, simple_loss=0.2576, pruned_loss=0.05798, over 7803.00 frames. ], tot_loss[loss=0.238, simple_loss=0.3124, pruned_loss=0.08181, over 1613220.44 frames. ], batch size: 19, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:36,333 INFO [train.py:901] (3/4) Epoch 11, batch 7800, loss[loss=0.2474, simple_loss=0.3277, pruned_loss=0.08353, over 8338.00 frames. ], tot_loss[loss=0.2372, simple_loss=0.3118, pruned_loss=0.08131, over 1612927.92 frames. ], batch size: 25, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:11:48,834 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.685e+02 3.345e+02 4.152e+02 1.012e+03, threshold=6.690e+02, percent-clipped=6.0 +2023-02-06 11:11:50,872 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=88653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:12:09,486 INFO [train.py:901] (3/4) Epoch 11, batch 7850, loss[loss=0.282, simple_loss=0.3406, pruned_loss=0.1117, over 7047.00 frames. ], tot_loss[loss=0.2362, simple_loss=0.311, pruned_loss=0.08072, over 1612579.49 frames. ], batch size: 71, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:42,901 INFO [train.py:901] (3/4) Epoch 11, batch 7900, loss[loss=0.2174, simple_loss=0.2964, pruned_loss=0.06923, over 7656.00 frames. ], tot_loss[loss=0.2356, simple_loss=0.3107, pruned_loss=0.0802, over 1612632.21 frames. ], batch size: 19, lr: 6.82e-03, grad_scale: 16.0 +2023-02-06 11:12:55,416 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.490e+02 3.060e+02 3.735e+02 6.734e+02, threshold=6.120e+02, percent-clipped=1.0 +2023-02-06 11:13:07,262 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=88768.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:13:15,806 INFO [train.py:901] (3/4) Epoch 11, batch 7950, loss[loss=0.1753, simple_loss=0.2546, pruned_loss=0.04797, over 7793.00 frames. ], tot_loss[loss=0.235, simple_loss=0.3105, pruned_loss=0.0797, over 1615892.90 frames. ], batch size: 19, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:16,456 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 11:13:49,354 INFO [train.py:901] (3/4) Epoch 11, batch 8000, loss[loss=0.2674, simple_loss=0.3335, pruned_loss=0.1007, over 7198.00 frames. ], tot_loss[loss=0.2344, simple_loss=0.3098, pruned_loss=0.07948, over 1612802.21 frames. ], batch size: 71, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:13:50,917 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=88833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:02,010 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.603e+02 3.071e+02 3.730e+02 8.421e+02, threshold=6.141e+02, percent-clipped=3.0 +2023-02-06 11:14:07,240 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=88858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:14:22,234 INFO [train.py:901] (3/4) Epoch 11, batch 8050, loss[loss=0.1992, simple_loss=0.2677, pruned_loss=0.06536, over 7547.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3066, pruned_loss=0.07812, over 1598174.39 frames. ], batch size: 18, lr: 6.81e-03, grad_scale: 16.0 +2023-02-06 11:14:54,629 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 11:14:58,680 INFO [train.py:901] (3/4) Epoch 12, batch 0, loss[loss=0.2195, simple_loss=0.2811, pruned_loss=0.07894, over 7686.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2811, pruned_loss=0.07894, over 7686.00 frames. ], batch size: 18, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:14:58,680 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 11:15:09,781 INFO [train.py:935] (3/4) Epoch 12, validation: loss=0.1897, simple_loss=0.2896, pruned_loss=0.04486, over 944034.00 frames. +2023-02-06 11:15:09,782 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 11:15:23,307 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 11:15:35,202 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.690e+02 3.540e+02 4.339e+02 7.249e+02, threshold=7.080e+02, percent-clipped=5.0 +2023-02-06 11:15:44,676 INFO [train.py:901] (3/4) Epoch 12, batch 50, loss[loss=0.2552, simple_loss=0.3047, pruned_loss=0.1028, over 7539.00 frames. ], tot_loss[loss=0.2433, simple_loss=0.3161, pruned_loss=0.08526, over 363240.91 frames. ], batch size: 18, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:15:46,145 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6161, 4.6040, 4.1706, 2.0155, 4.1848, 4.2146, 4.1708, 4.0384], + device='cuda:3'), covar=tensor([0.0898, 0.0554, 0.1092, 0.5233, 0.0843, 0.0791, 0.1422, 0.0770], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0370, 0.0383, 0.0479, 0.0374, 0.0374, 0.0376, 0.0332], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:15:57,435 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 11:16:01,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7729, 1.9797, 2.1010, 1.4489, 2.1686, 1.6226, 0.5232, 1.8762], + device='cuda:3'), covar=tensor([0.0297, 0.0184, 0.0157, 0.0273, 0.0229, 0.0514, 0.0547, 0.0134], + device='cuda:3'), in_proj_covar=tensor([0.0388, 0.0327, 0.0272, 0.0382, 0.0315, 0.0474, 0.0358, 0.0353], + device='cuda:3'), out_proj_covar=tensor([1.1061e-04, 9.0904e-05, 7.5933e-05, 1.0699e-04, 8.9075e-05, 1.4421e-04, + 1.0177e-04, 1.0004e-04], device='cuda:3') +2023-02-06 11:16:19,065 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 11:16:19,764 INFO [train.py:901] (3/4) Epoch 12, batch 100, loss[loss=0.2743, simple_loss=0.351, pruned_loss=0.09882, over 8357.00 frames. ], tot_loss[loss=0.2395, simple_loss=0.3143, pruned_loss=0.08239, over 641770.83 frames. ], batch size: 24, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:16:26,515 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89024.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:16:32,538 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4058, 3.0644, 2.4566, 4.0021, 1.7122, 2.0324, 2.3172, 3.3624], + device='cuda:3'), covar=tensor([0.0801, 0.0828, 0.0879, 0.0278, 0.1249, 0.1486, 0.1250, 0.0767], + device='cuda:3'), in_proj_covar=tensor([0.0235, 0.0216, 0.0255, 0.0216, 0.0220, 0.0253, 0.0258, 0.0222], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 11:16:33,261 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9067, 2.5040, 4.3484, 1.5502, 3.0707, 2.3745, 1.9405, 2.9502], + device='cuda:3'), covar=tensor([0.1471, 0.1996, 0.0719, 0.3611, 0.1411, 0.2604, 0.1630, 0.2097], + device='cuda:3'), in_proj_covar=tensor([0.0485, 0.0522, 0.0532, 0.0581, 0.0621, 0.0561, 0.0474, 0.0612], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:16:40,630 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:16:43,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89049.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:16:43,913 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.771e+02 3.256e+02 4.152e+02 1.357e+03, threshold=6.512e+02, percent-clipped=1.0 +2023-02-06 11:16:54,736 INFO [train.py:901] (3/4) Epoch 12, batch 150, loss[loss=0.2189, simple_loss=0.3053, pruned_loss=0.0663, over 8289.00 frames. ], tot_loss[loss=0.2358, simple_loss=0.311, pruned_loss=0.08031, over 857116.09 frames. ], batch size: 23, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:17,997 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.11 vs. limit=5.0 +2023-02-06 11:17:29,009 INFO [train.py:901] (3/4) Epoch 12, batch 200, loss[loss=0.2425, simple_loss=0.3177, pruned_loss=0.08369, over 8583.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3095, pruned_loss=0.07898, over 1028140.64 frames. ], batch size: 39, lr: 6.52e-03, grad_scale: 16.0 +2023-02-06 11:17:42,733 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8294, 2.4390, 3.1006, 2.2112, 1.6773, 3.2806, 0.8121, 2.1712], + device='cuda:3'), covar=tensor([0.2246, 0.1440, 0.0415, 0.2049, 0.3504, 0.0324, 0.3434, 0.1699], + device='cuda:3'), in_proj_covar=tensor([0.0165, 0.0168, 0.0101, 0.0212, 0.0253, 0.0104, 0.0164, 0.0163], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:17:53,942 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.699e+02 2.712e+02 3.423e+02 4.383e+02 1.008e+03, threshold=6.845e+02, percent-clipped=3.0 +2023-02-06 11:18:03,555 INFO [train.py:901] (3/4) Epoch 12, batch 250, loss[loss=0.2191, simple_loss=0.3131, pruned_loss=0.06259, over 8203.00 frames. ], tot_loss[loss=0.2349, simple_loss=0.311, pruned_loss=0.0794, over 1164819.44 frames. ], batch size: 23, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:18:05,197 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1589, 2.3944, 1.9195, 2.9437, 1.3912, 1.5644, 1.9140, 2.6026], + device='cuda:3'), covar=tensor([0.0657, 0.0802, 0.0921, 0.0408, 0.1069, 0.1426, 0.0984, 0.0656], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0215, 0.0254, 0.0214, 0.0218, 0.0252, 0.0258, 0.0222], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 11:18:13,242 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 11:18:17,197 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 11:18:20,886 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:18:22,854 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 11:18:40,051 INFO [train.py:901] (3/4) Epoch 12, batch 300, loss[loss=0.2438, simple_loss=0.3216, pruned_loss=0.08298, over 8200.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3102, pruned_loss=0.07878, over 1266562.73 frames. ], batch size: 23, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:05,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.536e+02 3.052e+02 3.921e+02 6.584e+02, threshold=6.103e+02, percent-clipped=0.0 +2023-02-06 11:19:14,500 INFO [train.py:901] (3/4) Epoch 12, batch 350, loss[loss=0.2452, simple_loss=0.3285, pruned_loss=0.08098, over 8598.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3102, pruned_loss=0.0791, over 1343223.36 frames. ], batch size: 34, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:19:49,369 INFO [train.py:901] (3/4) Epoch 12, batch 400, loss[loss=0.2435, simple_loss=0.3219, pruned_loss=0.08252, over 8517.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.31, pruned_loss=0.07877, over 1405925.07 frames. ], batch size: 26, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:14,268 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.417e+02 2.965e+02 3.513e+02 5.511e+02, threshold=5.929e+02, percent-clipped=0.0 +2023-02-06 11:20:24,232 INFO [train.py:901] (3/4) Epoch 12, batch 450, loss[loss=0.2592, simple_loss=0.3456, pruned_loss=0.08642, over 8628.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3091, pruned_loss=0.07806, over 1456342.35 frames. ], batch size: 34, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:20:40,990 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:43,203 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:20:58,861 INFO [train.py:901] (3/4) Epoch 12, batch 500, loss[loss=0.2457, simple_loss=0.3229, pruned_loss=0.08424, over 8497.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3088, pruned_loss=0.07812, over 1490978.39 frames. ], batch size: 26, lr: 6.51e-03, grad_scale: 16.0 +2023-02-06 11:21:19,380 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89443.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:21:24,108 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.539e+02 3.031e+02 3.696e+02 8.346e+02, threshold=6.063e+02, percent-clipped=3.0 +2023-02-06 11:21:29,669 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89457.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:21:34,342 INFO [train.py:901] (3/4) Epoch 12, batch 550, loss[loss=0.2301, simple_loss=0.303, pruned_loss=0.07857, over 8598.00 frames. ], tot_loss[loss=0.2339, simple_loss=0.3101, pruned_loss=0.07884, over 1524270.36 frames. ], batch size: 31, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:21:38,942 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 11:22:02,591 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:09,201 INFO [train.py:901] (3/4) Epoch 12, batch 600, loss[loss=0.2595, simple_loss=0.3216, pruned_loss=0.09866, over 7821.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3089, pruned_loss=0.07827, over 1546202.93 frames. ], batch size: 20, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:17,854 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:21,115 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:22:24,200 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-06 11:22:26,581 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 11:22:34,504 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.630e+02 3.047e+02 3.733e+02 1.036e+03, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 11:22:44,045 INFO [train.py:901] (3/4) Epoch 12, batch 650, loss[loss=0.2059, simple_loss=0.2787, pruned_loss=0.0665, over 7910.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.309, pruned_loss=0.07864, over 1562382.66 frames. ], batch size: 20, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:22:56,505 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8355, 2.1187, 2.2826, 1.5067, 2.3522, 1.4652, 0.7327, 1.9795], + device='cuda:3'), covar=tensor([0.0390, 0.0208, 0.0146, 0.0352, 0.0252, 0.0579, 0.0630, 0.0176], + device='cuda:3'), in_proj_covar=tensor([0.0390, 0.0326, 0.0274, 0.0381, 0.0315, 0.0470, 0.0355, 0.0351], + device='cuda:3'), out_proj_covar=tensor([1.1118e-04, 9.0633e-05, 7.6560e-05, 1.0674e-04, 8.8918e-05, 1.4283e-04, + 1.0081e-04, 9.8998e-05], device='cuda:3') +2023-02-06 11:23:18,869 INFO [train.py:901] (3/4) Epoch 12, batch 700, loss[loss=0.2446, simple_loss=0.3225, pruned_loss=0.08335, over 8469.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3091, pruned_loss=0.07863, over 1573933.49 frames. ], batch size: 25, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:23:30,411 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1573, 1.1061, 1.2899, 1.1062, 0.9172, 1.2985, 0.0526, 0.9671], + device='cuda:3'), covar=tensor([0.2563, 0.1823, 0.0598, 0.1319, 0.3719, 0.0622, 0.3230, 0.1634], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0171, 0.0102, 0.0217, 0.0258, 0.0107, 0.0166, 0.0169], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:23:40,430 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:23:43,633 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.821e+02 3.296e+02 4.031e+02 9.579e+02, threshold=6.593e+02, percent-clipped=5.0 +2023-02-06 11:23:51,386 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1943, 1.6239, 4.1432, 1.5804, 2.4900, 4.6174, 4.9666, 3.6526], + device='cuda:3'), covar=tensor([0.1318, 0.1827, 0.0399, 0.2512, 0.1194, 0.0320, 0.0389, 0.0968], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0300, 0.0263, 0.0296, 0.0276, 0.0238, 0.0346, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:23:53,839 INFO [train.py:901] (3/4) Epoch 12, batch 750, loss[loss=0.2167, simple_loss=0.2898, pruned_loss=0.07185, over 7986.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.3101, pruned_loss=0.07914, over 1586109.63 frames. ], batch size: 21, lr: 6.50e-03, grad_scale: 16.0 +2023-02-06 11:24:11,509 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 11:24:17,640 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:20,275 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 11:24:28,197 INFO [train.py:901] (3/4) Epoch 12, batch 800, loss[loss=0.2443, simple_loss=0.3075, pruned_loss=0.09054, over 7795.00 frames. ], tot_loss[loss=0.2346, simple_loss=0.3104, pruned_loss=0.07942, over 1594438.72 frames. ], batch size: 20, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:24:32,436 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8248, 1.8720, 2.2956, 1.7479, 1.2547, 2.2499, 0.4210, 1.4868], + device='cuda:3'), covar=tensor([0.2377, 0.1771, 0.0440, 0.1949, 0.4031, 0.0473, 0.3415, 0.1999], + device='cuda:3'), in_proj_covar=tensor([0.0170, 0.0171, 0.0103, 0.0218, 0.0259, 0.0107, 0.0167, 0.0169], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:24:43,681 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:24:53,390 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.628e+02 3.285e+02 4.121e+02 9.349e+02, threshold=6.571e+02, percent-clipped=6.0 +2023-02-06 11:24:59,660 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:02,836 INFO [train.py:901] (3/4) Epoch 12, batch 850, loss[loss=0.2242, simple_loss=0.3061, pruned_loss=0.07114, over 8124.00 frames. ], tot_loss[loss=0.2342, simple_loss=0.31, pruned_loss=0.07925, over 1598303.88 frames. ], batch size: 22, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:25:18,005 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:19,288 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:25:28,676 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89801.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:25:34,025 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2678, 2.7366, 3.1373, 1.4909, 3.1724, 1.8059, 1.5106, 2.2718], + device='cuda:3'), covar=tensor([0.0518, 0.0238, 0.0182, 0.0547, 0.0343, 0.0609, 0.0705, 0.0350], + device='cuda:3'), in_proj_covar=tensor([0.0391, 0.0330, 0.0275, 0.0384, 0.0317, 0.0474, 0.0356, 0.0353], + device='cuda:3'), out_proj_covar=tensor([1.1120e-04, 9.1706e-05, 7.6565e-05, 1.0750e-04, 8.9654e-05, 1.4412e-04, + 1.0104e-04, 9.9689e-05], device='cuda:3') +2023-02-06 11:25:37,790 INFO [train.py:901] (3/4) Epoch 12, batch 900, loss[loss=0.1896, simple_loss=0.2731, pruned_loss=0.05307, over 7419.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3081, pruned_loss=0.07836, over 1599467.58 frames. ], batch size: 17, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:03,293 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.446e+02 3.021e+02 3.729e+02 6.397e+02, threshold=6.041e+02, percent-clipped=0.0 +2023-02-06 11:26:03,492 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:11,313 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9408, 2.4265, 1.8451, 2.9307, 1.3432, 1.6373, 1.8552, 2.4089], + device='cuda:3'), covar=tensor([0.0847, 0.0802, 0.0929, 0.0376, 0.1214, 0.1487, 0.1201, 0.0783], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0213, 0.0255, 0.0215, 0.0218, 0.0252, 0.0258, 0.0218], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 11:26:11,802 INFO [train.py:901] (3/4) Epoch 12, batch 950, loss[loss=0.214, simple_loss=0.2933, pruned_loss=0.06736, over 8248.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3084, pruned_loss=0.07875, over 1600293.13 frames. ], batch size: 22, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:16,404 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=89871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:24,567 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=89883.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:33,238 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6463, 2.0862, 3.4979, 2.3435, 2.9424, 2.3629, 1.9285, 1.6245], + device='cuda:3'), covar=tensor([0.3956, 0.4685, 0.1283, 0.3046, 0.2112, 0.2317, 0.1732, 0.4852], + device='cuda:3'), in_proj_covar=tensor([0.0895, 0.0876, 0.0743, 0.0853, 0.0936, 0.0806, 0.0708, 0.0772], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:26:38,586 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:38,628 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=89902.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:26:39,095 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 11:26:46,289 INFO [train.py:901] (3/4) Epoch 12, batch 1000, loss[loss=0.218, simple_loss=0.2997, pruned_loss=0.06814, over 8626.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3086, pruned_loss=0.07811, over 1606421.94 frames. ], batch size: 34, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:26:47,815 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89916.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:26:48,420 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9029, 2.8816, 3.5416, 2.0510, 1.6141, 3.6204, 0.7300, 2.0845], + device='cuda:3'), covar=tensor([0.2271, 0.1139, 0.0325, 0.2536, 0.4123, 0.0233, 0.3374, 0.1855], + device='cuda:3'), in_proj_covar=tensor([0.0170, 0.0172, 0.0103, 0.0218, 0.0259, 0.0107, 0.0166, 0.0169], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:26:55,721 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=89927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:11,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.648e+02 3.254e+02 4.081e+02 9.414e+02, threshold=6.507e+02, percent-clipped=7.0 +2023-02-06 11:27:11,438 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 11:27:13,888 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 11:27:20,836 INFO [train.py:901] (3/4) Epoch 12, batch 1050, loss[loss=0.252, simple_loss=0.3215, pruned_loss=0.09122, over 7804.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3091, pruned_loss=0.07795, over 1610218.86 frames. ], batch size: 20, lr: 6.49e-03, grad_scale: 8.0 +2023-02-06 11:27:22,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1888, 1.3145, 3.3599, 0.9754, 2.8980, 2.8408, 3.0537, 2.9315], + device='cuda:3'), covar=tensor([0.0748, 0.3676, 0.0802, 0.3505, 0.1475, 0.1007, 0.0695, 0.0837], + device='cuda:3'), in_proj_covar=tensor([0.0501, 0.0586, 0.0593, 0.0543, 0.0620, 0.0528, 0.0520, 0.0592], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:27:24,334 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 11:27:35,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=89986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:27:56,234 INFO [train.py:901] (3/4) Epoch 12, batch 1100, loss[loss=0.2422, simple_loss=0.3142, pruned_loss=0.08511, over 8109.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3091, pruned_loss=0.07869, over 1608731.16 frames. ], batch size: 23, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:28:04,628 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3516, 2.6791, 1.7758, 2.1158, 2.1074, 1.3839, 2.0033, 1.9898], + device='cuda:3'), covar=tensor([0.1459, 0.0362, 0.1174, 0.0668, 0.0704, 0.1498, 0.0918, 0.0890], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0240, 0.0316, 0.0297, 0.0301, 0.0323, 0.0336, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:28:16,470 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:28:22,898 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.536e+02 3.046e+02 3.976e+02 6.882e+02, threshold=6.092e+02, percent-clipped=1.0 +2023-02-06 11:28:31,021 INFO [train.py:901] (3/4) Epoch 12, batch 1150, loss[loss=0.3008, simple_loss=0.3556, pruned_loss=0.123, over 8374.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3077, pruned_loss=0.07844, over 1606246.55 frames. ], batch size: 24, lr: 6.48e-03, grad_scale: 4.0 +2023-02-06 11:28:34,438 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 11:28:41,701 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 11:29:01,109 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:03,695 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2404, 3.1180, 2.8948, 1.6450, 2.8758, 2.7797, 2.8689, 2.6597], + device='cuda:3'), covar=tensor([0.1334, 0.0926, 0.1396, 0.4609, 0.1187, 0.1451, 0.1809, 0.1385], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0383, 0.0390, 0.0492, 0.0392, 0.0389, 0.0383, 0.0340], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:29:05,599 INFO [train.py:901] (3/4) Epoch 12, batch 1200, loss[loss=0.2171, simple_loss=0.2917, pruned_loss=0.07126, over 8024.00 frames. ], tot_loss[loss=0.2351, simple_loss=0.31, pruned_loss=0.08012, over 1605743.99 frames. ], batch size: 22, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:19,563 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:30,353 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:32,963 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.449e+02 3.099e+02 4.282e+02 6.791e+02, threshold=6.197e+02, percent-clipped=4.0 +2023-02-06 11:29:36,564 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:37,303 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:29:41,831 INFO [train.py:901] (3/4) Epoch 12, batch 1250, loss[loss=0.2557, simple_loss=0.3342, pruned_loss=0.08861, over 8564.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.3092, pruned_loss=0.07956, over 1605837.42 frames. ], batch size: 34, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:29:47,523 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90172.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:29:55,711 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:05,421 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90197.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:30:17,140 INFO [train.py:901] (3/4) Epoch 12, batch 1300, loss[loss=0.2405, simple_loss=0.3268, pruned_loss=0.07712, over 8483.00 frames. ], tot_loss[loss=0.234, simple_loss=0.3096, pruned_loss=0.07922, over 1615102.04 frames. ], batch size: 25, lr: 6.48e-03, grad_scale: 8.0 +2023-02-06 11:30:26,147 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90227.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:37,536 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:43,728 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:30:44,956 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.408e+02 3.209e+02 4.069e+02 1.568e+03, threshold=6.418e+02, percent-clipped=9.0 +2023-02-06 11:30:47,242 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3950, 4.3374, 3.8743, 2.0545, 3.8447, 3.9102, 3.9359, 3.6049], + device='cuda:3'), covar=tensor([0.0772, 0.0598, 0.1036, 0.4607, 0.0897, 0.0961, 0.1261, 0.0903], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0380, 0.0388, 0.0487, 0.0388, 0.0386, 0.0379, 0.0338], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:30:53,234 INFO [train.py:901] (3/4) Epoch 12, batch 1350, loss[loss=0.2965, simple_loss=0.3537, pruned_loss=0.1197, over 8445.00 frames. ], tot_loss[loss=0.2348, simple_loss=0.3101, pruned_loss=0.07975, over 1614796.11 frames. ], batch size: 27, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:30:55,570 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:05,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8910, 1.5490, 1.8037, 1.6497, 1.0764, 1.6264, 2.0056, 1.9891], + device='cuda:3'), covar=tensor([0.0451, 0.1275, 0.1641, 0.1330, 0.0628, 0.1492, 0.0689, 0.0575], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0153, 0.0193, 0.0159, 0.0103, 0.0164, 0.0117, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 11:31:09,729 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3301, 1.4662, 1.3235, 1.8553, 0.7390, 1.2127, 1.2310, 1.5346], + device='cuda:3'), covar=tensor([0.0959, 0.0894, 0.1223, 0.0610, 0.1314, 0.1494, 0.0905, 0.0767], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0212, 0.0257, 0.0215, 0.0217, 0.0250, 0.0257, 0.0217], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 11:31:28,606 INFO [train.py:901] (3/4) Epoch 12, batch 1400, loss[loss=0.2337, simple_loss=0.3122, pruned_loss=0.07756, over 8291.00 frames. ], tot_loss[loss=0.2345, simple_loss=0.3101, pruned_loss=0.0795, over 1618004.21 frames. ], batch size: 23, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:31:36,251 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6528, 1.6784, 2.0206, 1.6633, 1.0561, 2.0353, 0.2286, 1.4061], + device='cuda:3'), covar=tensor([0.2775, 0.1568, 0.0615, 0.1746, 0.4281, 0.0538, 0.3405, 0.1631], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0172, 0.0104, 0.0219, 0.0256, 0.0107, 0.0164, 0.0168], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:31:47,922 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:31:54,618 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.398e+02 2.808e+02 3.540e+02 8.131e+02, threshold=5.617e+02, percent-clipped=1.0 +2023-02-06 11:32:03,601 INFO [train.py:901] (3/4) Epoch 12, batch 1450, loss[loss=0.2197, simple_loss=0.3076, pruned_loss=0.06589, over 8501.00 frames. ], tot_loss[loss=0.2341, simple_loss=0.31, pruned_loss=0.07908, over 1618588.43 frames. ], batch size: 28, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:07,901 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:08,430 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 11:32:13,169 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6164, 1.4240, 3.1170, 1.1312, 2.1186, 3.3477, 3.5268, 2.6100], + device='cuda:3'), covar=tensor([0.1393, 0.1783, 0.0490, 0.2592, 0.1245, 0.0375, 0.0566, 0.0978], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0296, 0.0260, 0.0293, 0.0275, 0.0236, 0.0346, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:32:21,875 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5853, 2.1110, 3.4062, 1.3438, 2.5734, 2.0639, 1.6916, 2.6150], + device='cuda:3'), covar=tensor([0.1668, 0.2243, 0.0731, 0.4018, 0.1417, 0.2684, 0.1819, 0.1872], + device='cuda:3'), in_proj_covar=tensor([0.0487, 0.0519, 0.0533, 0.0582, 0.0622, 0.0558, 0.0473, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:32:38,148 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:32:38,592 INFO [train.py:901] (3/4) Epoch 12, batch 1500, loss[loss=0.2205, simple_loss=0.3019, pruned_loss=0.06957, over 8473.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.3098, pruned_loss=0.07889, over 1615565.12 frames. ], batch size: 25, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:32:55,146 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:04,320 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.462e+02 2.993e+02 3.898e+02 9.256e+02, threshold=5.985e+02, percent-clipped=2.0 +2023-02-06 11:33:12,494 INFO [train.py:901] (3/4) Epoch 12, batch 1550, loss[loss=0.2241, simple_loss=0.3129, pruned_loss=0.06758, over 8423.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3081, pruned_loss=0.07829, over 1609254.41 frames. ], batch size: 27, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:21,457 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:33,067 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:33:48,747 INFO [train.py:901] (3/4) Epoch 12, batch 1600, loss[loss=0.2512, simple_loss=0.3299, pruned_loss=0.08624, over 8616.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3065, pruned_loss=0.07721, over 1605711.43 frames. ], batch size: 34, lr: 6.47e-03, grad_scale: 8.0 +2023-02-06 11:33:48,918 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:15,464 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.712e+02 3.378e+02 4.197e+02 8.231e+02, threshold=6.755e+02, percent-clipped=6.0 +2023-02-06 11:34:23,534 INFO [train.py:901] (3/4) Epoch 12, batch 1650, loss[loss=0.2356, simple_loss=0.3181, pruned_loss=0.0766, over 8482.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3077, pruned_loss=0.07735, over 1609752.09 frames. ], batch size: 49, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:34:23,750 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1996, 1.1375, 1.3108, 1.1736, 0.9551, 1.3239, 0.0892, 1.0164], + device='cuda:3'), covar=tensor([0.2276, 0.1757, 0.0709, 0.1297, 0.4060, 0.0675, 0.3034, 0.1684], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0172, 0.0104, 0.0219, 0.0256, 0.0108, 0.0165, 0.0168], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:34:29,142 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1393, 2.5278, 2.9058, 1.3837, 3.0622, 1.7367, 1.4227, 2.1150], + device='cuda:3'), covar=tensor([0.0564, 0.0238, 0.0225, 0.0561, 0.0324, 0.0594, 0.0630, 0.0349], + device='cuda:3'), in_proj_covar=tensor([0.0394, 0.0333, 0.0277, 0.0386, 0.0317, 0.0475, 0.0358, 0.0356], + device='cuda:3'), out_proj_covar=tensor([1.1205e-04, 9.2375e-05, 7.7212e-05, 1.0820e-04, 8.9284e-05, 1.4384e-04, + 1.0188e-04, 1.0050e-04], device='cuda:3') +2023-02-06 11:34:44,125 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:47,058 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90598.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:49,232 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5017, 2.1400, 3.5194, 1.3764, 2.5639, 1.9125, 1.7765, 2.3335], + device='cuda:3'), covar=tensor([0.1609, 0.1872, 0.0614, 0.3545, 0.1386, 0.2761, 0.1642, 0.2169], + device='cuda:3'), in_proj_covar=tensor([0.0484, 0.0518, 0.0532, 0.0580, 0.0617, 0.0556, 0.0470, 0.0612], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:34:53,826 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:34:58,209 INFO [train.py:901] (3/4) Epoch 12, batch 1700, loss[loss=0.2525, simple_loss=0.3328, pruned_loss=0.08615, over 8572.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3081, pruned_loss=0.07816, over 1603162.81 frames. ], batch size: 31, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:04,330 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:35:24,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.486e+02 2.952e+02 3.646e+02 6.764e+02, threshold=5.904e+02, percent-clipped=1.0 +2023-02-06 11:35:33,331 INFO [train.py:901] (3/4) Epoch 12, batch 1750, loss[loss=0.1939, simple_loss=0.2663, pruned_loss=0.06082, over 7794.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3085, pruned_loss=0.07816, over 1607538.91 frames. ], batch size: 19, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:35:39,571 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1715, 1.8014, 1.3939, 1.6481, 1.5867, 1.2521, 1.4467, 1.5092], + device='cuda:3'), covar=tensor([0.0855, 0.0344, 0.0825, 0.0385, 0.0466, 0.0952, 0.0642, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0244, 0.0320, 0.0302, 0.0305, 0.0328, 0.0344, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:36:04,662 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:07,401 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:36:08,038 INFO [train.py:901] (3/4) Epoch 12, batch 1800, loss[loss=0.2526, simple_loss=0.3236, pruned_loss=0.0908, over 8305.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.3088, pruned_loss=0.0782, over 1609135.16 frames. ], batch size: 23, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:36:28,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8310, 3.6785, 2.3756, 2.7113, 2.9928, 1.9271, 2.6544, 2.9083], + device='cuda:3'), covar=tensor([0.1794, 0.0356, 0.1101, 0.0786, 0.0626, 0.1362, 0.1084, 0.1011], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0244, 0.0321, 0.0303, 0.0305, 0.0328, 0.0344, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:36:35,309 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.625e+02 3.119e+02 3.569e+02 7.012e+02, threshold=6.239e+02, percent-clipped=2.0 +2023-02-06 11:36:43,322 INFO [train.py:901] (3/4) Epoch 12, batch 1850, loss[loss=0.2562, simple_loss=0.3279, pruned_loss=0.09221, over 8517.00 frames. ], tot_loss[loss=0.2324, simple_loss=0.3085, pruned_loss=0.07811, over 1611524.55 frames. ], batch size: 39, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:17,708 INFO [train.py:901] (3/4) Epoch 12, batch 1900, loss[loss=0.1762, simple_loss=0.2491, pruned_loss=0.05158, over 7684.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3081, pruned_loss=0.07739, over 1615083.77 frames. ], batch size: 18, lr: 6.46e-03, grad_scale: 8.0 +2023-02-06 11:37:22,472 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90821.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:27,323 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90828.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:38,160 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.04 vs. limit=5.0 +2023-02-06 11:37:44,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.569e+02 3.031e+02 3.632e+02 7.649e+02, threshold=6.063e+02, percent-clipped=2.0 +2023-02-06 11:37:47,231 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 11:37:48,699 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=90858.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:50,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0510, 2.7015, 3.5246, 2.0702, 1.7102, 3.4099, 0.6147, 2.0542], + device='cuda:3'), covar=tensor([0.1488, 0.1255, 0.0322, 0.2327, 0.4032, 0.0378, 0.3964, 0.2233], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0172, 0.0104, 0.0218, 0.0255, 0.0108, 0.0165, 0.0169], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 11:37:52,154 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:52,633 INFO [train.py:901] (3/4) Epoch 12, batch 1950, loss[loss=0.1937, simple_loss=0.2712, pruned_loss=0.05812, over 7514.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3072, pruned_loss=0.07741, over 1612941.91 frames. ], batch size: 18, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:37:55,453 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=90867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:37:59,332 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 11:38:10,328 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:19,030 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 11:38:27,978 INFO [train.py:901] (3/4) Epoch 12, batch 2000, loss[loss=0.3009, simple_loss=0.349, pruned_loss=0.1264, over 7081.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3073, pruned_loss=0.07733, over 1617756.95 frames. ], batch size: 72, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:38:43,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90936.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:38:54,924 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.641e+02 3.163e+02 4.034e+02 9.087e+02, threshold=6.326e+02, percent-clipped=9.0 +2023-02-06 11:39:02,896 INFO [train.py:901] (3/4) Epoch 12, batch 2050, loss[loss=0.1899, simple_loss=0.2707, pruned_loss=0.05452, over 7241.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3082, pruned_loss=0.07779, over 1618323.00 frames. ], batch size: 16, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:39:03,757 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=90965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:09,926 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=90973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:21,822 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=90990.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:39:27,126 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9301, 1.4286, 1.5764, 1.3308, 0.8767, 1.3736, 1.5269, 1.3470], + device='cuda:3'), covar=tensor([0.0486, 0.1240, 0.1687, 0.1390, 0.0590, 0.1545, 0.0710, 0.0645], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0190, 0.0159, 0.0103, 0.0162, 0.0116, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 11:39:34,308 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-06 11:39:38,682 INFO [train.py:901] (3/4) Epoch 12, batch 2100, loss[loss=0.2121, simple_loss=0.2963, pruned_loss=0.06389, over 8190.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3074, pruned_loss=0.07706, over 1620459.31 frames. ], batch size: 23, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:04,173 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.877e+02 2.659e+02 3.265e+02 4.247e+02 8.349e+02, threshold=6.531e+02, percent-clipped=2.0 +2023-02-06 11:40:12,109 INFO [train.py:901] (3/4) Epoch 12, batch 2150, loss[loss=0.219, simple_loss=0.3069, pruned_loss=0.06559, over 8458.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3079, pruned_loss=0.07753, over 1619251.18 frames. ], batch size: 25, lr: 6.45e-03, grad_scale: 8.0 +2023-02-06 11:40:26,819 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:44,045 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:40:47,223 INFO [train.py:901] (3/4) Epoch 12, batch 2200, loss[loss=0.1672, simple_loss=0.2493, pruned_loss=0.04256, over 7929.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3061, pruned_loss=0.07642, over 1618347.49 frames. ], batch size: 20, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:13,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.751e+02 3.546e+02 4.173e+02 9.054e+02, threshold=7.092e+02, percent-clipped=3.0 +2023-02-06 11:41:21,758 INFO [train.py:901] (3/4) Epoch 12, batch 2250, loss[loss=0.2154, simple_loss=0.2958, pruned_loss=0.06756, over 8091.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3062, pruned_loss=0.07681, over 1616142.74 frames. ], batch size: 21, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:41,125 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:54,522 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91211.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:41:56,410 INFO [train.py:901] (3/4) Epoch 12, batch 2300, loss[loss=0.1988, simple_loss=0.2662, pruned_loss=0.06576, over 7817.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3078, pruned_loss=0.07825, over 1614617.83 frames. ], batch size: 20, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:41:58,521 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:07,362 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91229.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:23,427 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.635e+02 3.142e+02 4.194e+02 9.102e+02, threshold=6.284e+02, percent-clipped=2.0 +2023-02-06 11:42:24,994 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:42:31,681 INFO [train.py:901] (3/4) Epoch 12, batch 2350, loss[loss=0.2164, simple_loss=0.3036, pruned_loss=0.06466, over 8675.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3073, pruned_loss=0.07747, over 1613314.46 frames. ], batch size: 34, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:42:57,737 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91303.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:05,701 INFO [train.py:901] (3/4) Epoch 12, batch 2400, loss[loss=0.1972, simple_loss=0.2702, pruned_loss=0.06212, over 7430.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3073, pruned_loss=0.07782, over 1612965.58 frames. ], batch size: 17, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:43:14,307 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:43:32,246 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.547e+02 3.046e+02 3.774e+02 7.420e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-06 11:43:35,851 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6764, 1.3500, 3.9265, 1.3822, 3.3287, 3.2033, 3.5120, 3.3263], + device='cuda:3'), covar=tensor([0.0783, 0.4432, 0.0584, 0.3844, 0.1262, 0.1006, 0.0716, 0.0854], + device='cuda:3'), in_proj_covar=tensor([0.0498, 0.0580, 0.0588, 0.0536, 0.0609, 0.0524, 0.0518, 0.0583], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:43:41,055 INFO [train.py:901] (3/4) Epoch 12, batch 2450, loss[loss=0.1805, simple_loss=0.2678, pruned_loss=0.04654, over 8029.00 frames. ], tot_loss[loss=0.2316, simple_loss=0.3068, pruned_loss=0.07819, over 1606146.66 frames. ], batch size: 22, lr: 6.44e-03, grad_scale: 8.0 +2023-02-06 11:44:06,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6209, 1.9626, 2.0254, 1.2314, 2.2008, 1.4680, 0.4702, 1.8566], + device='cuda:3'), covar=tensor([0.0402, 0.0248, 0.0197, 0.0396, 0.0244, 0.0688, 0.0633, 0.0186], + device='cuda:3'), in_proj_covar=tensor([0.0397, 0.0332, 0.0280, 0.0384, 0.0317, 0.0473, 0.0362, 0.0359], + device='cuda:3'), out_proj_covar=tensor([1.1284e-04, 9.1987e-05, 7.7832e-05, 1.0725e-04, 8.9322e-05, 1.4282e-04, + 1.0273e-04, 1.0109e-04], device='cuda:3') +2023-02-06 11:44:11,801 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4701, 2.0938, 2.9892, 2.3800, 2.7899, 2.2855, 1.8690, 1.5290], + device='cuda:3'), covar=tensor([0.3880, 0.3774, 0.1264, 0.2729, 0.1837, 0.2213, 0.1733, 0.4100], + device='cuda:3'), in_proj_covar=tensor([0.0883, 0.0871, 0.0740, 0.0850, 0.0937, 0.0804, 0.0704, 0.0765], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:44:15,047 INFO [train.py:901] (3/4) Epoch 12, batch 2500, loss[loss=0.2654, simple_loss=0.329, pruned_loss=0.1009, over 8593.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.307, pruned_loss=0.07871, over 1604798.23 frames. ], batch size: 49, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:44:41,736 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.576e+02 3.186e+02 4.386e+02 8.083e+02, threshold=6.372e+02, percent-clipped=11.0 +2023-02-06 11:44:50,272 INFO [train.py:901] (3/4) Epoch 12, batch 2550, loss[loss=0.2294, simple_loss=0.2995, pruned_loss=0.07966, over 7976.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.3076, pruned_loss=0.07904, over 1606017.66 frames. ], batch size: 21, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:24,404 INFO [train.py:901] (3/4) Epoch 12, batch 2600, loss[loss=0.1621, simple_loss=0.2467, pruned_loss=0.03873, over 7555.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.308, pruned_loss=0.07863, over 1609568.04 frames. ], batch size: 18, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:45:50,006 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.880e+02 3.430e+02 4.544e+02 8.443e+02, threshold=6.860e+02, percent-clipped=9.0 +2023-02-06 11:45:56,378 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91560.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:45:58,883 INFO [train.py:901] (3/4) Epoch 12, batch 2650, loss[loss=0.2537, simple_loss=0.3233, pruned_loss=0.0921, over 8089.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3089, pruned_loss=0.07886, over 1615121.60 frames. ], batch size: 21, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:11,921 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=91582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:29,376 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=91607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:33,855 INFO [train.py:901] (3/4) Epoch 12, batch 2700, loss[loss=0.2168, simple_loss=0.3025, pruned_loss=0.06555, over 8126.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3081, pruned_loss=0.07867, over 1611848.66 frames. ], batch size: 22, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:46:35,604 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 11:46:55,953 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91647.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:46:59,292 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.823e+02 2.691e+02 3.205e+02 3.908e+02 7.628e+02, threshold=6.410e+02, percent-clipped=2.0 +2023-02-06 11:47:08,035 INFO [train.py:901] (3/4) Epoch 12, batch 2750, loss[loss=0.2977, simple_loss=0.3587, pruned_loss=0.1184, over 7258.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3073, pruned_loss=0.07722, over 1614581.64 frames. ], batch size: 73, lr: 6.43e-03, grad_scale: 8.0 +2023-02-06 11:47:43,514 INFO [train.py:901] (3/4) Epoch 12, batch 2800, loss[loss=0.2386, simple_loss=0.3065, pruned_loss=0.08533, over 8360.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3091, pruned_loss=0.078, over 1619633.41 frames. ], batch size: 24, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:08,849 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.634e+02 3.181e+02 3.784e+02 9.192e+02, threshold=6.362e+02, percent-clipped=3.0 +2023-02-06 11:48:13,228 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 11:48:15,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=91762.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:48:16,916 INFO [train.py:901] (3/4) Epoch 12, batch 2850, loss[loss=0.2717, simple_loss=0.332, pruned_loss=0.1057, over 8598.00 frames. ], tot_loss[loss=0.2332, simple_loss=0.3096, pruned_loss=0.07834, over 1619388.01 frames. ], batch size: 34, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:48:52,947 INFO [train.py:901] (3/4) Epoch 12, batch 2900, loss[loss=0.222, simple_loss=0.3172, pruned_loss=0.06341, over 8341.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3086, pruned_loss=0.07795, over 1615387.24 frames. ], batch size: 26, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:18,830 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.538e+02 3.175e+02 3.875e+02 8.885e+02, threshold=6.349e+02, percent-clipped=4.0 +2023-02-06 11:49:22,155 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 11:49:26,763 INFO [train.py:901] (3/4) Epoch 12, batch 2950, loss[loss=0.1864, simple_loss=0.2533, pruned_loss=0.0597, over 7693.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3086, pruned_loss=0.07778, over 1616601.37 frames. ], batch size: 18, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:49:49,031 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-06 11:49:54,045 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=91904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:49:57,399 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:50:00,614 INFO [train.py:901] (3/4) Epoch 12, batch 3000, loss[loss=0.2086, simple_loss=0.2852, pruned_loss=0.06606, over 7967.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3087, pruned_loss=0.07859, over 1612258.12 frames. ], batch size: 21, lr: 6.42e-03, grad_scale: 8.0 +2023-02-06 11:50:00,614 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 11:50:12,504 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7803, 3.7573, 3.4949, 1.8087, 3.4380, 3.4474, 3.5115, 3.1931], + device='cuda:3'), covar=tensor([0.1039, 0.0604, 0.0927, 0.5351, 0.0853, 0.0995, 0.1122, 0.0993], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0379, 0.0388, 0.0489, 0.0379, 0.0383, 0.0379, 0.0330], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:50:13,628 INFO [train.py:935] (3/4) Epoch 12, validation: loss=0.1868, simple_loss=0.2871, pruned_loss=0.04323, over 944034.00 frames. +2023-02-06 11:50:13,628 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 11:50:40,667 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.361e+02 2.883e+02 3.802e+02 7.578e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-06 11:50:49,092 INFO [train.py:901] (3/4) Epoch 12, batch 3050, loss[loss=0.1932, simple_loss=0.2815, pruned_loss=0.05244, over 8243.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3075, pruned_loss=0.07815, over 1607443.91 frames. ], batch size: 22, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:50:56,133 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:14,056 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=91999.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:51:25,124 INFO [train.py:901] (3/4) Epoch 12, batch 3100, loss[loss=0.2327, simple_loss=0.3139, pruned_loss=0.07571, over 7964.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3081, pruned_loss=0.07771, over 1609432.02 frames. ], batch size: 21, lr: 6.41e-03, grad_scale: 8.0 +2023-02-06 11:51:28,114 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:28,796 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92019.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:51:30,841 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0357, 1.6173, 4.1655, 1.8514, 3.7039, 3.5074, 3.7946, 3.6520], + device='cuda:3'), covar=tensor([0.0557, 0.3816, 0.0564, 0.3187, 0.1053, 0.0794, 0.0514, 0.0604], + device='cuda:3'), in_proj_covar=tensor([0.0497, 0.0578, 0.0590, 0.0535, 0.0616, 0.0527, 0.0519, 0.0581], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:51:45,786 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:51:47,899 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5434, 1.9067, 2.1538, 1.1062, 2.2239, 1.2954, 0.6700, 1.6528], + device='cuda:3'), covar=tensor([0.0533, 0.0248, 0.0166, 0.0443, 0.0270, 0.0689, 0.0673, 0.0234], + device='cuda:3'), in_proj_covar=tensor([0.0395, 0.0330, 0.0280, 0.0388, 0.0318, 0.0476, 0.0358, 0.0357], + device='cuda:3'), out_proj_covar=tensor([1.1205e-04, 9.1684e-05, 7.7773e-05, 1.0840e-04, 8.9401e-05, 1.4369e-04, + 1.0151e-04, 1.0049e-04], device='cuda:3') +2023-02-06 11:51:51,734 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.663e+02 3.347e+02 4.142e+02 7.838e+02, threshold=6.695e+02, percent-clipped=5.0 +2023-02-06 11:52:01,127 INFO [train.py:901] (3/4) Epoch 12, batch 3150, loss[loss=0.3046, simple_loss=0.3541, pruned_loss=0.1275, over 6891.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3088, pruned_loss=0.07826, over 1610812.40 frames. ], batch size: 72, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:35,766 INFO [train.py:901] (3/4) Epoch 12, batch 3200, loss[loss=0.2236, simple_loss=0.3068, pruned_loss=0.07016, over 8329.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3073, pruned_loss=0.07718, over 1609383.88 frames. ], batch size: 26, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:52:55,510 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.20 vs. limit=5.0 +2023-02-06 11:53:02,008 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.674e+02 3.226e+02 3.971e+02 7.397e+02, threshold=6.453e+02, percent-clipped=3.0 +2023-02-06 11:53:10,361 INFO [train.py:901] (3/4) Epoch 12, batch 3250, loss[loss=0.189, simple_loss=0.2593, pruned_loss=0.05933, over 7540.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3064, pruned_loss=0.0773, over 1608353.74 frames. ], batch size: 18, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:46,148 INFO [train.py:901] (3/4) Epoch 12, batch 3300, loss[loss=0.2335, simple_loss=0.3086, pruned_loss=0.07919, over 8037.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3058, pruned_loss=0.07665, over 1606857.81 frames. ], batch size: 22, lr: 6.41e-03, grad_scale: 16.0 +2023-02-06 11:53:53,879 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 11:54:11,039 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.357e+02 2.935e+02 3.680e+02 6.719e+02, threshold=5.870e+02, percent-clipped=1.0 +2023-02-06 11:54:11,767 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:54:19,108 INFO [train.py:901] (3/4) Epoch 12, batch 3350, loss[loss=0.2548, simple_loss=0.3216, pruned_loss=0.09404, over 8599.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3057, pruned_loss=0.07691, over 1610332.07 frames. ], batch size: 31, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:27,348 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92275.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:54:27,949 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9915, 1.4370, 4.2659, 1.7144, 2.3151, 4.8244, 4.8726, 4.1062], + device='cuda:3'), covar=tensor([0.1407, 0.1963, 0.0319, 0.2338, 0.1303, 0.0217, 0.0471, 0.0718], + device='cuda:3'), in_proj_covar=tensor([0.0270, 0.0299, 0.0265, 0.0297, 0.0280, 0.0238, 0.0351, 0.0294], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:54:45,173 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92300.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:54:50,801 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3982, 1.5794, 2.3075, 1.2405, 1.6079, 1.6754, 1.4566, 1.5578], + device='cuda:3'), covar=tensor([0.1741, 0.2324, 0.0723, 0.3919, 0.1560, 0.2831, 0.1984, 0.1822], + device='cuda:3'), in_proj_covar=tensor([0.0494, 0.0532, 0.0540, 0.0585, 0.0624, 0.0563, 0.0481, 0.0616], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:54:55,335 INFO [train.py:901] (3/4) Epoch 12, batch 3400, loss[loss=0.2145, simple_loss=0.2989, pruned_loss=0.06503, over 8469.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3062, pruned_loss=0.07718, over 1609575.19 frames. ], batch size: 39, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:54:57,465 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92317.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:55:03,718 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4870, 1.8689, 4.3596, 1.8758, 2.4779, 4.8896, 4.9879, 4.2636], + device='cuda:3'), covar=tensor([0.0890, 0.1312, 0.0239, 0.1898, 0.1012, 0.0184, 0.0305, 0.0574], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0299, 0.0265, 0.0296, 0.0279, 0.0239, 0.0351, 0.0294], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 11:55:15,994 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92343.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 11:55:21,861 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.364e+02 2.893e+02 3.659e+02 6.777e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 11:55:29,921 INFO [train.py:901] (3/4) Epoch 12, batch 3450, loss[loss=0.2144, simple_loss=0.3063, pruned_loss=0.0612, over 8650.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3074, pruned_loss=0.0774, over 1613524.99 frames. ], batch size: 39, lr: 6.40e-03, grad_scale: 16.0 +2023-02-06 11:55:30,197 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0935, 1.7462, 2.4279, 2.0376, 2.3593, 2.0065, 1.6724, 0.9435], + device='cuda:3'), covar=tensor([0.4231, 0.3776, 0.1238, 0.2391, 0.1697, 0.2240, 0.1655, 0.4000], + device='cuda:3'), in_proj_covar=tensor([0.0892, 0.0872, 0.0733, 0.0847, 0.0936, 0.0805, 0.0706, 0.0767], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:55:32,885 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92368.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:56:04,899 INFO [train.py:901] (3/4) Epoch 12, batch 3500, loss[loss=0.2007, simple_loss=0.2799, pruned_loss=0.06077, over 7809.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3062, pruned_loss=0.07704, over 1612175.71 frames. ], batch size: 19, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:18,412 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92432.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:56:29,127 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 11:56:33,046 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.438e+02 2.928e+02 3.742e+02 8.211e+02, threshold=5.856e+02, percent-clipped=5.0 +2023-02-06 11:56:36,516 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=92458.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:56:40,251 INFO [train.py:901] (3/4) Epoch 12, batch 3550, loss[loss=0.2419, simple_loss=0.3112, pruned_loss=0.08629, over 6879.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.307, pruned_loss=0.07756, over 1610731.89 frames. ], batch size: 71, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:56:55,154 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0869, 1.2648, 1.1495, 0.6811, 1.2087, 0.9679, 0.1269, 1.2034], + device='cuda:3'), covar=tensor([0.0254, 0.0232, 0.0198, 0.0346, 0.0260, 0.0687, 0.0531, 0.0212], + device='cuda:3'), in_proj_covar=tensor([0.0400, 0.0332, 0.0282, 0.0393, 0.0325, 0.0481, 0.0359, 0.0362], + device='cuda:3'), out_proj_covar=tensor([1.1358e-04, 9.2051e-05, 7.8060e-05, 1.0973e-04, 9.1301e-05, 1.4537e-04, + 1.0191e-04, 1.0198e-04], device='cuda:3') +2023-02-06 11:57:12,359 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-06 11:57:14,696 INFO [train.py:901] (3/4) Epoch 12, batch 3600, loss[loss=0.2259, simple_loss=0.3069, pruned_loss=0.07241, over 8289.00 frames. ], tot_loss[loss=0.232, simple_loss=0.308, pruned_loss=0.07799, over 1613765.00 frames. ], batch size: 23, lr: 6.40e-03, grad_scale: 8.0 +2023-02-06 11:57:39,233 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6191, 5.7111, 5.0174, 2.1753, 5.1108, 5.2870, 5.2999, 4.9150], + device='cuda:3'), covar=tensor([0.0558, 0.0394, 0.0831, 0.4851, 0.0628, 0.0838, 0.1051, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0376, 0.0387, 0.0486, 0.0380, 0.0383, 0.0376, 0.0331], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:57:42,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.628e+02 3.055e+02 4.234e+02 9.851e+02, threshold=6.109e+02, percent-clipped=7.0 +2023-02-06 11:57:50,884 INFO [train.py:901] (3/4) Epoch 12, batch 3650, loss[loss=0.2332, simple_loss=0.3152, pruned_loss=0.0756, over 8540.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.308, pruned_loss=0.07751, over 1615221.80 frames. ], batch size: 49, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:23,906 INFO [train.py:901] (3/4) Epoch 12, batch 3700, loss[loss=0.2292, simple_loss=0.3092, pruned_loss=0.07458, over 7936.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.308, pruned_loss=0.07779, over 1614774.30 frames. ], batch size: 20, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:58:28,529 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 11:58:31,414 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92624.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:44,258 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92643.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:48,450 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:58:50,894 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.476e+02 3.116e+02 4.152e+02 8.400e+02, threshold=6.233e+02, percent-clipped=9.0 +2023-02-06 11:58:54,573 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-06 11:58:59,706 INFO [train.py:901] (3/4) Epoch 12, batch 3750, loss[loss=0.2503, simple_loss=0.3322, pruned_loss=0.08419, over 8193.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3096, pruned_loss=0.07829, over 1618219.85 frames. ], batch size: 23, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:02,538 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7153, 4.7171, 4.2409, 2.0082, 4.2838, 4.1806, 4.3835, 3.9339], + device='cuda:3'), covar=tensor([0.0677, 0.0465, 0.0994, 0.4808, 0.0738, 0.0918, 0.1125, 0.0829], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0382, 0.0393, 0.0493, 0.0382, 0.0385, 0.0381, 0.0334], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 11:59:17,077 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92688.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:33,692 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9882, 1.9167, 6.0215, 2.2751, 5.4626, 5.0208, 5.5550, 5.5081], + device='cuda:3'), covar=tensor([0.0384, 0.3948, 0.0360, 0.3100, 0.0865, 0.0739, 0.0414, 0.0404], + device='cuda:3'), in_proj_covar=tensor([0.0505, 0.0581, 0.0604, 0.0542, 0.0623, 0.0535, 0.0527, 0.0587], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 11:59:34,461 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:34,944 INFO [train.py:901] (3/4) Epoch 12, batch 3800, loss[loss=0.2279, simple_loss=0.3106, pruned_loss=0.07256, over 8585.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3075, pruned_loss=0.07703, over 1615119.66 frames. ], batch size: 39, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 11:59:35,191 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=92714.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 11:59:52,028 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 11:59:52,783 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=92739.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:00:02,126 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.558e+02 2.972e+02 3.756e+02 9.318e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 12:00:09,485 INFO [train.py:901] (3/4) Epoch 12, batch 3850, loss[loss=0.1991, simple_loss=0.2744, pruned_loss=0.06191, over 7965.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.308, pruned_loss=0.07714, over 1616262.66 frames. ], batch size: 21, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:00:32,435 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4036, 1.9697, 2.9917, 2.3210, 2.6821, 2.2136, 1.7724, 1.2835], + device='cuda:3'), covar=tensor([0.3965, 0.4014, 0.1178, 0.2837, 0.2038, 0.2369, 0.1739, 0.4474], + device='cuda:3'), in_proj_covar=tensor([0.0897, 0.0870, 0.0728, 0.0852, 0.0935, 0.0804, 0.0705, 0.0765], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:00:33,553 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 12:00:45,118 INFO [train.py:901] (3/4) Epoch 12, batch 3900, loss[loss=0.2259, simple_loss=0.3074, pruned_loss=0.07221, over 8439.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3084, pruned_loss=0.07751, over 1621003.44 frames. ], batch size: 27, lr: 6.39e-03, grad_scale: 8.0 +2023-02-06 12:01:08,875 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:01:11,288 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.538e+02 2.989e+02 3.922e+02 7.912e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 12:01:18,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6249, 4.6207, 4.1306, 1.9746, 4.1487, 4.1858, 4.3050, 3.8537], + device='cuda:3'), covar=tensor([0.0746, 0.0576, 0.1035, 0.4819, 0.0740, 0.0975, 0.1326, 0.0816], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0378, 0.0388, 0.0484, 0.0377, 0.0380, 0.0376, 0.0333], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:01:19,449 INFO [train.py:901] (3/4) Epoch 12, batch 3950, loss[loss=0.2519, simple_loss=0.3331, pruned_loss=0.08541, over 8626.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3076, pruned_loss=0.07705, over 1622705.39 frames. ], batch size: 34, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:25,440 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9958, 1.5981, 1.7020, 1.2224, 0.9259, 1.4931, 1.8098, 1.5944], + device='cuda:3'), covar=tensor([0.0466, 0.1181, 0.1690, 0.1428, 0.0583, 0.1489, 0.0657, 0.0624], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0159, 0.0104, 0.0163, 0.0116, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 12:01:54,556 INFO [train.py:901] (3/4) Epoch 12, batch 4000, loss[loss=0.2248, simple_loss=0.3161, pruned_loss=0.06675, over 8234.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3061, pruned_loss=0.07648, over 1618385.56 frames. ], batch size: 24, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:01:56,842 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:18,353 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92949.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:18,569 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 12:02:20,900 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.373e+02 3.059e+02 3.649e+02 8.513e+02, threshold=6.118e+02, percent-clipped=6.0 +2023-02-06 12:02:28,381 INFO [train.py:901] (3/4) Epoch 12, batch 4050, loss[loss=0.2306, simple_loss=0.3155, pruned_loss=0.07287, over 8103.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3064, pruned_loss=0.07719, over 1619045.98 frames. ], batch size: 23, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:02:44,203 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=92987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:02:48,271 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=92993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:03,713 INFO [train.py:901] (3/4) Epoch 12, batch 4100, loss[loss=0.2223, simple_loss=0.3026, pruned_loss=0.07103, over 8362.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3078, pruned_loss=0.07806, over 1617956.59 frames. ], batch size: 24, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:13,908 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:21,395 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8073, 2.4988, 3.2132, 1.9499, 1.7003, 3.1587, 0.6211, 1.9671], + device='cuda:3'), covar=tensor([0.2129, 0.1614, 0.0357, 0.2481, 0.4230, 0.0419, 0.3636, 0.1957], + device='cuda:3'), in_proj_covar=tensor([0.0168, 0.0172, 0.0102, 0.0214, 0.0251, 0.0106, 0.0160, 0.0166], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:03:30,605 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.418e+02 3.048e+02 3.757e+02 7.047e+02, threshold=6.097e+02, percent-clipped=3.0 +2023-02-06 12:03:31,419 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:03:37,929 INFO [train.py:901] (3/4) Epoch 12, batch 4150, loss[loss=0.2526, simple_loss=0.3319, pruned_loss=0.08663, over 8558.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.308, pruned_loss=0.07813, over 1616028.34 frames. ], batch size: 49, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:03:50,874 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:04,576 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:12,238 INFO [train.py:901] (3/4) Epoch 12, batch 4200, loss[loss=0.2157, simple_loss=0.2912, pruned_loss=0.07008, over 8132.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.306, pruned_loss=0.07709, over 1612246.82 frames. ], batch size: 22, lr: 6.38e-03, grad_scale: 8.0 +2023-02-06 12:04:24,958 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 12:04:26,416 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:04:40,123 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.563e+02 2.943e+02 3.717e+02 8.503e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-06 12:04:47,435 INFO [train.py:901] (3/4) Epoch 12, batch 4250, loss[loss=0.2186, simple_loss=0.2946, pruned_loss=0.07129, over 7970.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.3089, pruned_loss=0.07864, over 1617290.31 frames. ], batch size: 21, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:04:48,801 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 12:05:06,772 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:09,360 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93197.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:21,311 INFO [train.py:901] (3/4) Epoch 12, batch 4300, loss[loss=0.234, simple_loss=0.3166, pruned_loss=0.07569, over 8352.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.3087, pruned_loss=0.07829, over 1617002.96 frames. ], batch size: 24, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:05:48,569 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.718e+02 3.236e+02 4.116e+02 1.260e+03, threshold=6.473e+02, percent-clipped=7.0 +2023-02-06 12:05:54,515 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93261.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:05:56,509 INFO [train.py:901] (3/4) Epoch 12, batch 4350, loss[loss=0.2282, simple_loss=0.301, pruned_loss=0.07767, over 8073.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.3089, pruned_loss=0.07845, over 1615941.44 frames. ], batch size: 21, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:15,792 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:16,412 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 12:06:25,937 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93308.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:29,909 INFO [train.py:901] (3/4) Epoch 12, batch 4400, loss[loss=0.2405, simple_loss=0.3144, pruned_loss=0.08332, over 7944.00 frames. ], tot_loss[loss=0.2337, simple_loss=0.3095, pruned_loss=0.07897, over 1617044.88 frames. ], batch size: 20, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:06:41,630 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4970, 2.6102, 1.8106, 2.2980, 2.1380, 1.4248, 1.9869, 2.0989], + device='cuda:3'), covar=tensor([0.1290, 0.0355, 0.1016, 0.0546, 0.0622, 0.1339, 0.0928, 0.0851], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0234, 0.0313, 0.0296, 0.0297, 0.0323, 0.0341, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:06:46,156 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:06:58,339 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.551e+02 2.995e+02 3.715e+02 7.484e+02, threshold=5.990e+02, percent-clipped=1.0 +2023-02-06 12:06:58,364 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 12:07:01,783 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:05,646 INFO [train.py:901] (3/4) Epoch 12, batch 4450, loss[loss=0.1933, simple_loss=0.2639, pruned_loss=0.06133, over 7265.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3081, pruned_loss=0.07826, over 1615678.26 frames. ], batch size: 16, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:11,755 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:12,583 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8276, 2.3191, 3.5414, 2.7117, 3.1265, 2.4666, 2.0427, 1.6181], + device='cuda:3'), covar=tensor([0.3547, 0.4274, 0.1233, 0.2613, 0.1900, 0.2177, 0.1596, 0.4622], + device='cuda:3'), in_proj_covar=tensor([0.0886, 0.0868, 0.0725, 0.0847, 0.0927, 0.0798, 0.0700, 0.0762], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:07:14,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:19,344 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93383.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:29,365 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:36,075 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:07:39,914 INFO [train.py:901] (3/4) Epoch 12, batch 4500, loss[loss=0.2799, simple_loss=0.3513, pruned_loss=0.1043, over 8037.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3075, pruned_loss=0.07791, over 1611494.23 frames. ], batch size: 22, lr: 6.37e-03, grad_scale: 8.0 +2023-02-06 12:07:50,717 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 12:08:06,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:06,478 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.576e+02 3.193e+02 4.187e+02 6.619e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:08:06,708 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93453.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:13,869 INFO [train.py:901] (3/4) Epoch 12, batch 4550, loss[loss=0.2153, simple_loss=0.2819, pruned_loss=0.07439, over 7805.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3059, pruned_loss=0.07686, over 1612718.60 frames. ], batch size: 20, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:08:24,172 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=93477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:24,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:31,900 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93487.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:49,671 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:08:50,192 INFO [train.py:901] (3/4) Epoch 12, batch 4600, loss[loss=0.2502, simple_loss=0.3415, pruned_loss=0.07947, over 8521.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3063, pruned_loss=0.07715, over 1612632.35 frames. ], batch size: 28, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:16,614 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.455e+02 3.020e+02 4.052e+02 9.299e+02, threshold=6.041e+02, percent-clipped=5.0 +2023-02-06 12:09:24,869 INFO [train.py:901] (3/4) Epoch 12, batch 4650, loss[loss=0.2691, simple_loss=0.3224, pruned_loss=0.1079, over 7428.00 frames. ], tot_loss[loss=0.2299, simple_loss=0.3061, pruned_loss=0.07688, over 1610803.71 frames. ], batch size: 17, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:09:25,068 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:41,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,159 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=93592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:09:45,361 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 12:09:47,217 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7625, 2.0680, 2.2287, 1.2634, 2.3246, 1.5674, 0.6279, 1.8997], + device='cuda:3'), covar=tensor([0.0426, 0.0209, 0.0158, 0.0437, 0.0236, 0.0666, 0.0651, 0.0189], + device='cuda:3'), in_proj_covar=tensor([0.0398, 0.0332, 0.0277, 0.0394, 0.0322, 0.0481, 0.0361, 0.0363], + device='cuda:3'), out_proj_covar=tensor([1.1275e-04, 9.1637e-05, 7.6628e-05, 1.0989e-04, 9.0315e-05, 1.4519e-04, + 1.0213e-04, 1.0190e-04], device='cuda:3') +2023-02-06 12:09:59,879 INFO [train.py:901] (3/4) Epoch 12, batch 4700, loss[loss=0.2087, simple_loss=0.2738, pruned_loss=0.07179, over 7556.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.304, pruned_loss=0.07588, over 1606825.85 frames. ], batch size: 18, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:12,702 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:20,659 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6977, 4.6703, 4.1840, 1.8683, 4.2274, 4.2540, 4.3006, 4.0374], + device='cuda:3'), covar=tensor([0.0752, 0.0618, 0.1073, 0.5621, 0.0790, 0.0889, 0.1487, 0.0743], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0389, 0.0395, 0.0493, 0.0387, 0.0390, 0.0384, 0.0336], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:10:25,039 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.59 vs. limit=2.0 +2023-02-06 12:10:26,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.370e+02 2.939e+02 3.568e+02 8.447e+02, threshold=5.879e+02, percent-clipped=4.0 +2023-02-06 12:10:29,424 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93657.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:33,909 INFO [train.py:901] (3/4) Epoch 12, batch 4750, loss[loss=0.2375, simple_loss=0.3221, pruned_loss=0.07648, over 8330.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3045, pruned_loss=0.07585, over 1605647.63 frames. ], batch size: 26, lr: 6.36e-03, grad_scale: 8.0 +2023-02-06 12:10:34,129 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:10:36,648 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4173, 1.4585, 2.4097, 1.2722, 2.3046, 2.5580, 2.6300, 2.1512], + device='cuda:3'), covar=tensor([0.0933, 0.1140, 0.0380, 0.1788, 0.0611, 0.0356, 0.0638, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0300, 0.0264, 0.0295, 0.0278, 0.0240, 0.0354, 0.0294], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:10:51,743 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93689.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:00,451 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 12:11:02,479 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 12:11:04,768 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:09,462 INFO [train.py:901] (3/4) Epoch 12, batch 4800, loss[loss=0.2908, simple_loss=0.3593, pruned_loss=0.1111, over 8505.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3046, pruned_loss=0.07617, over 1603277.35 frames. ], batch size: 28, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:23,301 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:24,216 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 12:11:30,156 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:36,832 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.519e+02 2.967e+02 3.635e+02 7.460e+02, threshold=5.934e+02, percent-clipped=2.0 +2023-02-06 12:11:44,120 INFO [train.py:901] (3/4) Epoch 12, batch 4850, loss[loss=0.1997, simple_loss=0.271, pruned_loss=0.06418, over 7794.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.305, pruned_loss=0.07603, over 1607404.29 frames. ], batch size: 19, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:11:47,089 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93768.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:47,776 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:11:53,081 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 12:12:00,547 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93788.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:04,712 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:18,639 INFO [train.py:901] (3/4) Epoch 12, batch 4900, loss[loss=0.1961, simple_loss=0.2853, pruned_loss=0.05349, over 8249.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3056, pruned_loss=0.07655, over 1607564.41 frames. ], batch size: 22, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:12:42,683 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=93848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:12:45,726 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.389e+02 2.920e+02 3.679e+02 7.315e+02, threshold=5.841e+02, percent-clipped=3.0 +2023-02-06 12:12:53,910 INFO [train.py:901] (3/4) Epoch 12, batch 4950, loss[loss=0.2473, simple_loss=0.3271, pruned_loss=0.08375, over 8353.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3058, pruned_loss=0.07674, over 1611113.37 frames. ], batch size: 24, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:00,036 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=93873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:13:20,749 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4208, 1.5762, 4.5512, 2.0270, 4.0430, 3.8550, 4.1757, 4.1064], + device='cuda:3'), covar=tensor([0.0485, 0.4123, 0.0477, 0.3200, 0.0955, 0.0822, 0.0509, 0.0508], + device='cuda:3'), in_proj_covar=tensor([0.0497, 0.0569, 0.0584, 0.0528, 0.0603, 0.0516, 0.0506, 0.0569], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:13:27,059 INFO [train.py:901] (3/4) Epoch 12, batch 5000, loss[loss=0.1851, simple_loss=0.2635, pruned_loss=0.05336, over 7242.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3075, pruned_loss=0.07796, over 1609804.80 frames. ], batch size: 16, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:13:55,385 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.574e+02 3.082e+02 3.748e+02 7.333e+02, threshold=6.165e+02, percent-clipped=4.0 +2023-02-06 12:13:56,536 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 12:14:02,955 INFO [train.py:901] (3/4) Epoch 12, batch 5050, loss[loss=0.2004, simple_loss=0.2695, pruned_loss=0.06565, over 7645.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3068, pruned_loss=0.07783, over 1607569.87 frames. ], batch size: 19, lr: 6.35e-03, grad_scale: 8.0 +2023-02-06 12:14:03,320 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.44 vs. limit=5.0 +2023-02-06 12:14:27,670 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=93999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:14:31,165 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 12:14:38,587 INFO [train.py:901] (3/4) Epoch 12, batch 5100, loss[loss=0.26, simple_loss=0.3308, pruned_loss=0.09461, over 8434.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3065, pruned_loss=0.07751, over 1605829.58 frames. ], batch size: 49, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:02,451 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 12:15:04,423 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 12:15:05,345 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.576e+02 2.962e+02 4.029e+02 5.912e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-06 12:15:10,266 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4502, 4.3446, 3.9718, 1.7993, 4.0055, 3.9614, 4.0140, 3.6560], + device='cuda:3'), covar=tensor([0.0680, 0.0561, 0.0895, 0.4722, 0.0749, 0.0914, 0.1185, 0.0819], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0379, 0.0389, 0.0486, 0.0382, 0.0386, 0.0380, 0.0334], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:15:13,505 INFO [train.py:901] (3/4) Epoch 12, batch 5150, loss[loss=0.2666, simple_loss=0.3305, pruned_loss=0.1014, over 6635.00 frames. ], tot_loss[loss=0.233, simple_loss=0.3082, pruned_loss=0.07896, over 1604655.59 frames. ], batch size: 71, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:47,480 INFO [train.py:901] (3/4) Epoch 12, batch 5200, loss[loss=0.2296, simple_loss=0.3024, pruned_loss=0.07836, over 8069.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3087, pruned_loss=0.0789, over 1610807.30 frames. ], batch size: 21, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:15:53,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4960, 1.5020, 2.5946, 1.0542, 2.0121, 2.7670, 3.0128, 2.0112], + device='cuda:3'), covar=tensor([0.1469, 0.1676, 0.0588, 0.2856, 0.1066, 0.0564, 0.0760, 0.1338], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0297, 0.0262, 0.0292, 0.0276, 0.0238, 0.0349, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:15:54,504 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7350, 1.3157, 3.9497, 1.4108, 3.4511, 3.3229, 3.5423, 3.4434], + device='cuda:3'), covar=tensor([0.0663, 0.3928, 0.0577, 0.3379, 0.1287, 0.0876, 0.0610, 0.0715], + device='cuda:3'), in_proj_covar=tensor([0.0496, 0.0573, 0.0586, 0.0529, 0.0608, 0.0518, 0.0512, 0.0570], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:15:59,924 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:16:14,667 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.571e+02 3.074e+02 4.467e+02 8.286e+02, threshold=6.149e+02, percent-clipped=7.0 +2023-02-06 12:16:21,915 INFO [train.py:901] (3/4) Epoch 12, batch 5250, loss[loss=0.2052, simple_loss=0.2734, pruned_loss=0.06854, over 7693.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3085, pruned_loss=0.07828, over 1609630.80 frames. ], batch size: 18, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:16:25,903 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 12:16:57,676 INFO [train.py:901] (3/4) Epoch 12, batch 5300, loss[loss=0.2343, simple_loss=0.3103, pruned_loss=0.07914, over 8370.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3086, pruned_loss=0.0782, over 1612314.35 frames. ], batch size: 24, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:13,244 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 12:17:15,489 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94241.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:17:19,507 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:17:23,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.628e+02 3.237e+02 4.138e+02 9.258e+02, threshold=6.473e+02, percent-clipped=5.0 +2023-02-06 12:17:31,605 INFO [train.py:901] (3/4) Epoch 12, batch 5350, loss[loss=0.1978, simple_loss=0.283, pruned_loss=0.05631, over 8024.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3076, pruned_loss=0.07704, over 1617347.41 frames. ], batch size: 22, lr: 6.34e-03, grad_scale: 8.0 +2023-02-06 12:17:42,450 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6736, 2.0557, 3.5063, 1.4325, 2.5802, 2.1867, 1.7113, 2.4312], + device='cuda:3'), covar=tensor([0.1620, 0.2132, 0.0659, 0.3846, 0.1454, 0.2568, 0.1734, 0.2098], + device='cuda:3'), in_proj_covar=tensor([0.0487, 0.0521, 0.0532, 0.0582, 0.0614, 0.0555, 0.0475, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:17:55,700 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0625, 1.3083, 1.3787, 1.0823, 0.8666, 1.1714, 1.6240, 1.2757], + device='cuda:3'), covar=tensor([0.0588, 0.1898, 0.2682, 0.2081, 0.0793, 0.2273, 0.0897, 0.0888], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0152, 0.0193, 0.0158, 0.0103, 0.0163, 0.0116, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 12:18:05,214 INFO [train.py:901] (3/4) Epoch 12, batch 5400, loss[loss=0.1988, simple_loss=0.2791, pruned_loss=0.05926, over 7929.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.3089, pruned_loss=0.07774, over 1616931.17 frames. ], batch size: 20, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:25,503 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:18:32,249 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.485e+02 2.978e+02 4.110e+02 9.009e+02, threshold=5.957e+02, percent-clipped=6.0 +2023-02-06 12:18:39,968 INFO [train.py:901] (3/4) Epoch 12, batch 5450, loss[loss=0.2108, simple_loss=0.2739, pruned_loss=0.0739, over 7711.00 frames. ], tot_loss[loss=0.232, simple_loss=0.3081, pruned_loss=0.07795, over 1612718.89 frames. ], batch size: 18, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:18:40,821 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2339, 2.1901, 1.7267, 2.0089, 1.7514, 1.3726, 1.6521, 1.6649], + device='cuda:3'), covar=tensor([0.1209, 0.0334, 0.1005, 0.0473, 0.0641, 0.1360, 0.0849, 0.0739], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0232, 0.0311, 0.0293, 0.0296, 0.0320, 0.0334, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:18:41,427 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=94366.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:12,394 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 12:19:15,761 INFO [train.py:901] (3/4) Epoch 12, batch 5500, loss[loss=0.2448, simple_loss=0.3164, pruned_loss=0.08664, over 8249.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3072, pruned_loss=0.07707, over 1611766.26 frames. ], batch size: 24, lr: 6.33e-03, grad_scale: 16.0 +2023-02-06 12:19:28,082 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1878, 1.6324, 3.4929, 1.4795, 2.4264, 3.9516, 3.8927, 3.4107], + device='cuda:3'), covar=tensor([0.0868, 0.1419, 0.0317, 0.1866, 0.0909, 0.0202, 0.0417, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0297, 0.0260, 0.0291, 0.0274, 0.0235, 0.0347, 0.0290], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:19:43,163 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.406e+02 2.798e+02 3.361e+02 6.650e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 12:19:45,385 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:19:46,245 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 12:19:49,116 INFO [train.py:901] (3/4) Epoch 12, batch 5550, loss[loss=0.2703, simple_loss=0.3452, pruned_loss=0.09769, over 8298.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3068, pruned_loss=0.07668, over 1611619.59 frames. ], batch size: 23, lr: 6.33e-03, grad_scale: 4.0 +2023-02-06 12:20:16,695 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94503.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:17,403 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3274, 1.9453, 2.8588, 2.2408, 2.5821, 2.1698, 1.7880, 1.3455], + device='cuda:3'), covar=tensor([0.4125, 0.4154, 0.1268, 0.2807, 0.2025, 0.2317, 0.1728, 0.4355], + device='cuda:3'), in_proj_covar=tensor([0.0893, 0.0874, 0.0730, 0.0856, 0.0945, 0.0804, 0.0705, 0.0767], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:20:24,042 INFO [train.py:901] (3/4) Epoch 12, batch 5600, loss[loss=0.2323, simple_loss=0.3148, pruned_loss=0.07492, over 8100.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3074, pruned_loss=0.07687, over 1612365.68 frames. ], batch size: 23, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:20:35,252 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:20:54,482 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.995e+02 2.675e+02 3.313e+02 4.214e+02 1.006e+03, threshold=6.626e+02, percent-clipped=7.0 +2023-02-06 12:20:58,134 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1181, 3.8851, 2.2404, 2.7781, 3.0002, 1.6844, 2.7335, 2.8778], + device='cuda:3'), covar=tensor([0.1496, 0.0263, 0.1107, 0.0714, 0.0561, 0.1425, 0.0924, 0.0915], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0234, 0.0314, 0.0296, 0.0299, 0.0322, 0.0336, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:21:00,666 INFO [train.py:901] (3/4) Epoch 12, batch 5650, loss[loss=0.2094, simple_loss=0.301, pruned_loss=0.05891, over 8326.00 frames. ], tot_loss[loss=0.2314, simple_loss=0.3077, pruned_loss=0.07753, over 1609013.58 frames. ], batch size: 26, lr: 6.33e-03, grad_scale: 8.0 +2023-02-06 12:21:15,258 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94585.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:21:21,330 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 12:21:35,767 INFO [train.py:901] (3/4) Epoch 12, batch 5700, loss[loss=0.1897, simple_loss=0.2637, pruned_loss=0.05784, over 7710.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3072, pruned_loss=0.07762, over 1610659.43 frames. ], batch size: 18, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:04,739 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.327e+02 3.036e+02 3.801e+02 7.493e+02, threshold=6.072e+02, percent-clipped=2.0 +2023-02-06 12:22:10,807 INFO [train.py:901] (3/4) Epoch 12, batch 5750, loss[loss=0.2184, simple_loss=0.3012, pruned_loss=0.06784, over 8494.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3076, pruned_loss=0.07749, over 1612818.34 frames. ], batch size: 28, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:26,193 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 12:22:35,722 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94700.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:22:42,476 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=94710.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:45,147 INFO [train.py:901] (3/4) Epoch 12, batch 5800, loss[loss=0.2521, simple_loss=0.3381, pruned_loss=0.08303, over 8097.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3081, pruned_loss=0.07777, over 1620594.85 frames. ], batch size: 23, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:22:45,333 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94714.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:22:53,246 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0457, 2.5360, 3.0082, 1.2213, 3.2041, 1.7252, 1.4095, 1.9940], + device='cuda:3'), covar=tensor([0.0657, 0.0292, 0.0218, 0.0579, 0.0259, 0.0679, 0.0715, 0.0404], + device='cuda:3'), in_proj_covar=tensor([0.0396, 0.0330, 0.0278, 0.0388, 0.0322, 0.0477, 0.0359, 0.0360], + device='cuda:3'), out_proj_covar=tensor([1.1189e-04, 9.0728e-05, 7.6748e-05, 1.0780e-04, 9.0071e-05, 1.4371e-04, + 1.0137e-04, 1.0096e-04], device='cuda:3') +2023-02-06 12:23:02,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94739.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:23:10,572 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7012, 2.2224, 3.5906, 2.7971, 3.2809, 2.3575, 1.9010, 1.7302], + device='cuda:3'), covar=tensor([0.3845, 0.4470, 0.1205, 0.2470, 0.1702, 0.2265, 0.1814, 0.4310], + device='cuda:3'), in_proj_covar=tensor([0.0891, 0.0875, 0.0728, 0.0853, 0.0935, 0.0802, 0.0704, 0.0768], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:23:13,636 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.575e+02 3.288e+02 4.021e+02 7.847e+02, threshold=6.576e+02, percent-clipped=2.0 +2023-02-06 12:23:19,964 INFO [train.py:901] (3/4) Epoch 12, batch 5850, loss[loss=0.29, simple_loss=0.36, pruned_loss=0.11, over 8355.00 frames. ], tot_loss[loss=0.2333, simple_loss=0.3087, pruned_loss=0.079, over 1617058.94 frames. ], batch size: 24, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:23:33,717 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1343, 1.7580, 1.8979, 1.7086, 1.1861, 1.8171, 2.2780, 2.2956], + device='cuda:3'), covar=tensor([0.0357, 0.1184, 0.1616, 0.1290, 0.0591, 0.1401, 0.0572, 0.0493], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0157, 0.0103, 0.0160, 0.0113, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 12:23:54,261 INFO [train.py:901] (3/4) Epoch 12, batch 5900, loss[loss=0.3139, simple_loss=0.3649, pruned_loss=0.1315, over 6886.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.309, pruned_loss=0.07899, over 1617019.59 frames. ], batch size: 71, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:24:01,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=94825.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:24:22,275 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.604e+02 3.248e+02 4.213e+02 6.479e+02, threshold=6.496e+02, percent-clipped=0.0 +2023-02-06 12:24:28,378 INFO [train.py:901] (3/4) Epoch 12, batch 5950, loss[loss=0.1842, simple_loss=0.2704, pruned_loss=0.049, over 8105.00 frames. ], tot_loss[loss=0.2322, simple_loss=0.308, pruned_loss=0.07824, over 1618281.03 frames. ], batch size: 23, lr: 6.32e-03, grad_scale: 8.0 +2023-02-06 12:24:29,416 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 12:25:03,798 INFO [train.py:901] (3/4) Epoch 12, batch 6000, loss[loss=0.2338, simple_loss=0.3091, pruned_loss=0.0792, over 8029.00 frames. ], tot_loss[loss=0.2318, simple_loss=0.3074, pruned_loss=0.07813, over 1611925.32 frames. ], batch size: 22, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:25:03,799 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 12:25:16,946 INFO [train.py:935] (3/4) Epoch 12, validation: loss=0.1862, simple_loss=0.286, pruned_loss=0.04318, over 944034.00 frames. +2023-02-06 12:25:16,947 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 12:25:18,116 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 12:25:44,724 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.442e+02 2.970e+02 3.787e+02 9.017e+02, threshold=5.940e+02, percent-clipped=3.0 +2023-02-06 12:25:45,524 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=94956.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:25:50,750 INFO [train.py:901] (3/4) Epoch 12, batch 6050, loss[loss=0.2169, simple_loss=0.3009, pruned_loss=0.06643, over 8109.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3064, pruned_loss=0.07744, over 1612213.20 frames. ], batch size: 23, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:02,541 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=94981.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:26:25,539 INFO [train.py:901] (3/4) Epoch 12, batch 6100, loss[loss=0.2364, simple_loss=0.3154, pruned_loss=0.07867, over 8332.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3055, pruned_loss=0.07662, over 1609790.52 frames. ], batch size: 26, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:26:50,863 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2291, 1.1348, 1.2510, 1.1681, 0.9671, 1.3085, 0.0377, 0.8575], + device='cuda:3'), covar=tensor([0.2362, 0.1682, 0.0646, 0.1291, 0.3990, 0.0718, 0.3303, 0.1678], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0172, 0.0101, 0.0213, 0.0253, 0.0106, 0.0164, 0.0165], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:26:54,005 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 12:26:54,667 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.590e+02 3.216e+02 4.301e+02 8.648e+02, threshold=6.433e+02, percent-clipped=2.0 +2023-02-06 12:27:00,777 INFO [train.py:901] (3/4) Epoch 12, batch 6150, loss[loss=0.1982, simple_loss=0.2824, pruned_loss=0.05701, over 7461.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3065, pruned_loss=0.07715, over 1612513.01 frames. ], batch size: 17, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:12,227 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:29,627 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95106.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:27:34,680 INFO [train.py:901] (3/4) Epoch 12, batch 6200, loss[loss=0.1985, simple_loss=0.2895, pruned_loss=0.05369, over 8363.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3061, pruned_loss=0.07674, over 1610719.47 frames. ], batch size: 24, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:27:41,101 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95123.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:28:04,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.519e+02 2.980e+02 3.798e+02 7.393e+02, threshold=5.961e+02, percent-clipped=2.0 +2023-02-06 12:28:10,299 INFO [train.py:901] (3/4) Epoch 12, batch 6250, loss[loss=0.1977, simple_loss=0.2777, pruned_loss=0.05889, over 7921.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3063, pruned_loss=0.07702, over 1611432.34 frames. ], batch size: 20, lr: 6.31e-03, grad_scale: 8.0 +2023-02-06 12:28:13,980 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 12:28:19,684 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9548, 1.3009, 6.0890, 1.8797, 5.2893, 5.1425, 5.6211, 5.4562], + device='cuda:3'), covar=tensor([0.0424, 0.4883, 0.0348, 0.3499, 0.1078, 0.0766, 0.0430, 0.0489], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0583, 0.0596, 0.0543, 0.0625, 0.0534, 0.0525, 0.0586], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:28:21,778 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2422, 1.8678, 2.5667, 2.0488, 2.3352, 2.1267, 1.7204, 1.0751], + device='cuda:3'), covar=tensor([0.3798, 0.3696, 0.1137, 0.2398, 0.1685, 0.2146, 0.1724, 0.3796], + device='cuda:3'), in_proj_covar=tensor([0.0899, 0.0884, 0.0740, 0.0867, 0.0947, 0.0813, 0.0713, 0.0776], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:28:43,827 INFO [train.py:901] (3/4) Epoch 12, batch 6300, loss[loss=0.1777, simple_loss=0.2541, pruned_loss=0.05071, over 6792.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3069, pruned_loss=0.07735, over 1610744.92 frames. ], batch size: 15, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:28:46,414 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-06 12:29:13,414 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.657e+02 3.224e+02 4.358e+02 1.571e+03, threshold=6.448e+02, percent-clipped=5.0 +2023-02-06 12:29:20,982 INFO [train.py:901] (3/4) Epoch 12, batch 6350, loss[loss=0.1971, simple_loss=0.2783, pruned_loss=0.05792, over 7811.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3059, pruned_loss=0.07674, over 1610991.49 frames. ], batch size: 20, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:29:30,724 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:36,279 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:29:50,022 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7003, 2.2473, 4.4569, 1.3815, 3.0658, 2.1979, 1.7208, 2.9461], + device='cuda:3'), covar=tensor([0.1747, 0.2423, 0.0622, 0.4133, 0.1583, 0.2896, 0.1940, 0.2168], + device='cuda:3'), in_proj_covar=tensor([0.0493, 0.0531, 0.0537, 0.0586, 0.0621, 0.0559, 0.0478, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:29:55,395 INFO [train.py:901] (3/4) Epoch 12, batch 6400, loss[loss=0.1848, simple_loss=0.2542, pruned_loss=0.05771, over 7548.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3048, pruned_loss=0.07636, over 1613477.46 frames. ], batch size: 18, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:30:23,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.403e+02 2.937e+02 3.904e+02 6.682e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 12:30:25,105 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95357.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:30:27,148 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4907, 1.8567, 3.1499, 1.2608, 2.2969, 1.8913, 1.6033, 2.1055], + device='cuda:3'), covar=tensor([0.1700, 0.2514, 0.0807, 0.4079, 0.1716, 0.2843, 0.1936, 0.2265], + device='cuda:3'), in_proj_covar=tensor([0.0487, 0.0526, 0.0533, 0.0579, 0.0617, 0.0555, 0.0472, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:30:29,662 INFO [train.py:901] (3/4) Epoch 12, batch 6450, loss[loss=0.1783, simple_loss=0.2508, pruned_loss=0.05288, over 7711.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3054, pruned_loss=0.07678, over 1614171.83 frames. ], batch size: 18, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:05,024 INFO [train.py:901] (3/4) Epoch 12, batch 6500, loss[loss=0.245, simple_loss=0.3271, pruned_loss=0.08149, over 8352.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3045, pruned_loss=0.07606, over 1615696.21 frames. ], batch size: 26, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:14,613 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2993, 1.5124, 2.1958, 1.1947, 1.5286, 1.5736, 1.4098, 1.4440], + device='cuda:3'), covar=tensor([0.1776, 0.2251, 0.0803, 0.3942, 0.1593, 0.2929, 0.1886, 0.1886], + device='cuda:3'), in_proj_covar=tensor([0.0489, 0.0527, 0.0533, 0.0580, 0.0618, 0.0555, 0.0473, 0.0611], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:31:31,884 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.431e+02 2.857e+02 3.846e+02 1.801e+03, threshold=5.713e+02, percent-clipped=8.0 +2023-02-06 12:31:35,004 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-06 12:31:37,946 INFO [train.py:901] (3/4) Epoch 12, batch 6550, loss[loss=0.1958, simple_loss=0.2798, pruned_loss=0.05587, over 7818.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3048, pruned_loss=0.07642, over 1613534.05 frames. ], batch size: 20, lr: 6.30e-03, grad_scale: 8.0 +2023-02-06 12:31:40,440 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.27 vs. limit=5.0 +2023-02-06 12:31:40,702 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95467.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:31:59,683 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6090, 1.5353, 3.7856, 1.5347, 3.3233, 3.1801, 3.4455, 3.3266], + device='cuda:3'), covar=tensor([0.0646, 0.3952, 0.0646, 0.3480, 0.1168, 0.0922, 0.0599, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0512, 0.0581, 0.0594, 0.0544, 0.0622, 0.0532, 0.0520, 0.0582], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:32:06,855 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 12:32:13,528 INFO [train.py:901] (3/4) Epoch 12, batch 6600, loss[loss=0.2668, simple_loss=0.3366, pruned_loss=0.09851, over 8695.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.306, pruned_loss=0.07737, over 1610824.17 frames. ], batch size: 49, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:25,471 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 12:32:25,784 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 12:32:40,261 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.522e+02 3.078e+02 3.913e+02 8.021e+02, threshold=6.157e+02, percent-clipped=7.0 +2023-02-06 12:32:43,802 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4036, 1.4585, 1.7728, 1.3284, 0.9833, 1.8000, 0.0973, 1.1471], + device='cuda:3'), covar=tensor([0.2774, 0.1591, 0.0491, 0.1559, 0.4159, 0.0495, 0.2727, 0.1554], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0173, 0.0103, 0.0216, 0.0256, 0.0108, 0.0163, 0.0166], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:32:46,202 INFO [train.py:901] (3/4) Epoch 12, batch 6650, loss[loss=0.1996, simple_loss=0.2724, pruned_loss=0.06343, over 7784.00 frames. ], tot_loss[loss=0.2315, simple_loss=0.3069, pruned_loss=0.07808, over 1608351.10 frames. ], batch size: 19, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:32:59,165 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95582.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:33:15,566 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-06 12:33:21,233 INFO [train.py:901] (3/4) Epoch 12, batch 6700, loss[loss=0.2725, simple_loss=0.352, pruned_loss=0.09652, over 8253.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3064, pruned_loss=0.07714, over 1612442.56 frames. ], batch size: 24, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:33:27,481 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:33,703 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:35,042 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7400, 1.4583, 2.7492, 1.2271, 2.0572, 3.0201, 3.1263, 2.5812], + device='cuda:3'), covar=tensor([0.1048, 0.1433, 0.0413, 0.2104, 0.0875, 0.0279, 0.0486, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0297, 0.0261, 0.0291, 0.0272, 0.0236, 0.0347, 0.0288], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 12:33:37,106 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:33:50,489 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.832e+02 2.656e+02 3.142e+02 4.011e+02 7.522e+02, threshold=6.284e+02, percent-clipped=4.0 +2023-02-06 12:33:56,567 INFO [train.py:901] (3/4) Epoch 12, batch 6750, loss[loss=0.2856, simple_loss=0.3566, pruned_loss=0.1073, over 8112.00 frames. ], tot_loss[loss=0.2317, simple_loss=0.3081, pruned_loss=0.07768, over 1617907.48 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:22,169 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95701.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:30,630 INFO [train.py:901] (3/4) Epoch 12, batch 6800, loss[loss=0.2312, simple_loss=0.3012, pruned_loss=0.08056, over 8042.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3068, pruned_loss=0.07666, over 1616799.33 frames. ], batch size: 20, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:34:40,734 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 12:34:47,118 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:34:53,250 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:00,257 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.510e+02 2.822e+02 3.564e+02 9.162e+02, threshold=5.644e+02, percent-clipped=3.0 +2023-02-06 12:35:06,300 INFO [train.py:901] (3/4) Epoch 12, batch 6850, loss[loss=0.2682, simple_loss=0.3369, pruned_loss=0.09976, over 8287.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.3079, pruned_loss=0.07718, over 1617618.21 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:19,168 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5142, 1.9533, 2.0645, 1.0885, 2.1615, 1.3626, 0.5381, 1.7969], + device='cuda:3'), covar=tensor([0.0454, 0.0231, 0.0181, 0.0476, 0.0295, 0.0758, 0.0616, 0.0213], + device='cuda:3'), in_proj_covar=tensor([0.0394, 0.0329, 0.0282, 0.0393, 0.0325, 0.0483, 0.0360, 0.0360], + device='cuda:3'), out_proj_covar=tensor([1.1132e-04, 8.9943e-05, 7.7810e-05, 1.0918e-04, 9.0896e-05, 1.4543e-04, + 1.0159e-04, 1.0083e-04], device='cuda:3') +2023-02-06 12:35:25,954 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.13 vs. limit=5.0 +2023-02-06 12:35:26,864 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 12:35:40,362 INFO [train.py:901] (3/4) Epoch 12, batch 6900, loss[loss=0.2359, simple_loss=0.3192, pruned_loss=0.0763, over 8198.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3077, pruned_loss=0.07716, over 1614838.83 frames. ], batch size: 23, lr: 6.29e-03, grad_scale: 8.0 +2023-02-06 12:35:41,923 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=95816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:35:43,310 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4298, 1.9556, 3.2263, 1.2407, 2.4173, 1.8936, 1.5310, 2.1895], + device='cuda:3'), covar=tensor([0.1792, 0.2302, 0.0715, 0.4229, 0.1573, 0.3055, 0.2004, 0.2188], + device='cuda:3'), in_proj_covar=tensor([0.0488, 0.0526, 0.0531, 0.0580, 0.0616, 0.0554, 0.0476, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:35:48,560 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3033, 1.7393, 2.7281, 1.1676, 2.0214, 1.7081, 1.4783, 1.8323], + device='cuda:3'), covar=tensor([0.1938, 0.2251, 0.0827, 0.4202, 0.1630, 0.3076, 0.1989, 0.2228], + device='cuda:3'), in_proj_covar=tensor([0.0487, 0.0525, 0.0530, 0.0579, 0.0615, 0.0554, 0.0475, 0.0609], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:35:51,215 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95830.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:35:57,285 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95838.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:36:08,028 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.693e+02 3.422e+02 4.342e+02 1.062e+03, threshold=6.843e+02, percent-clipped=12.0 +2023-02-06 12:36:14,248 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=95863.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:36:14,702 INFO [train.py:901] (3/4) Epoch 12, batch 6950, loss[loss=0.1962, simple_loss=0.2653, pruned_loss=0.06351, over 7204.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3074, pruned_loss=0.07717, over 1609990.38 frames. ], batch size: 16, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:36:15,458 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:36:34,590 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 12:36:48,737 INFO [train.py:901] (3/4) Epoch 12, batch 7000, loss[loss=0.2182, simple_loss=0.2852, pruned_loss=0.07558, over 7659.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3075, pruned_loss=0.07709, over 1610088.68 frames. ], batch size: 19, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:17,422 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.501e+02 3.116e+02 3.850e+02 8.001e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-06 12:37:19,590 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-06 12:37:23,279 INFO [train.py:901] (3/4) Epoch 12, batch 7050, loss[loss=0.2128, simple_loss=0.2895, pruned_loss=0.06806, over 7813.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3059, pruned_loss=0.07626, over 1609214.86 frames. ], batch size: 20, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:37:24,105 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3328, 1.2573, 4.5483, 1.7853, 3.9650, 3.8053, 4.0922, 3.9460], + device='cuda:3'), covar=tensor([0.0531, 0.4811, 0.0462, 0.3358, 0.1139, 0.0806, 0.0519, 0.0616], + device='cuda:3'), in_proj_covar=tensor([0.0504, 0.0580, 0.0592, 0.0543, 0.0618, 0.0530, 0.0521, 0.0582], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:37:34,014 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=95979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:44,076 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=95993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:45,560 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.99 vs. limit=5.0 +2023-02-06 12:37:47,413 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=95998.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:50,529 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:37:58,492 INFO [train.py:901] (3/4) Epoch 12, batch 7100, loss[loss=0.1877, simple_loss=0.2732, pruned_loss=0.05106, over 7814.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3057, pruned_loss=0.07668, over 1610754.82 frames. ], batch size: 20, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:01,410 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:07,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:26,966 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.539e+02 3.029e+02 4.080e+02 8.783e+02, threshold=6.058e+02, percent-clipped=4.0 +2023-02-06 12:38:33,147 INFO [train.py:901] (3/4) Epoch 12, batch 7150, loss[loss=0.2597, simple_loss=0.3281, pruned_loss=0.09564, over 6863.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3064, pruned_loss=0.07703, over 1612535.97 frames. ], batch size: 72, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:38:38,725 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:54,802 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:56,840 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:38:58,153 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:08,660 INFO [train.py:901] (3/4) Epoch 12, batch 7200, loss[loss=0.2409, simple_loss=0.3013, pruned_loss=0.09023, over 7648.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.3069, pruned_loss=0.07732, over 1614158.01 frames. ], batch size: 19, lr: 6.28e-03, grad_scale: 8.0 +2023-02-06 12:39:36,231 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.446e+02 3.002e+02 3.633e+02 6.248e+02, threshold=6.005e+02, percent-clipped=1.0 +2023-02-06 12:39:42,864 INFO [train.py:901] (3/4) Epoch 12, batch 7250, loss[loss=0.2726, simple_loss=0.3313, pruned_loss=0.1069, over 7053.00 frames. ], tot_loss[loss=0.2305, simple_loss=0.3067, pruned_loss=0.07716, over 1612808.38 frames. ], batch size: 71, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:39:49,510 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96174.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:39:55,436 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:39:56,073 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.3712, 5.4501, 4.8297, 2.3942, 4.8252, 5.0049, 5.1298, 4.5741], + device='cuda:3'), covar=tensor([0.0643, 0.0429, 0.0869, 0.4640, 0.0730, 0.0891, 0.0897, 0.0861], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0381, 0.0393, 0.0484, 0.0385, 0.0385, 0.0382, 0.0334], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:40:14,154 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:40:17,295 INFO [train.py:901] (3/4) Epoch 12, batch 7300, loss[loss=0.1525, simple_loss=0.2327, pruned_loss=0.03609, over 7451.00 frames. ], tot_loss[loss=0.2308, simple_loss=0.307, pruned_loss=0.07729, over 1610761.53 frames. ], batch size: 17, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:40:44,977 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8803, 2.2958, 3.1687, 1.6942, 2.6925, 2.1997, 2.0891, 2.5223], + device='cuda:3'), covar=tensor([0.1308, 0.1742, 0.0555, 0.3080, 0.1224, 0.2105, 0.1361, 0.1672], + device='cuda:3'), in_proj_covar=tensor([0.0488, 0.0526, 0.0535, 0.0578, 0.0620, 0.0556, 0.0475, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:40:45,404 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.567e+02 3.297e+02 4.044e+02 1.170e+03, threshold=6.593e+02, percent-clipped=7.0 +2023-02-06 12:40:51,427 INFO [train.py:901] (3/4) Epoch 12, batch 7350, loss[loss=0.2264, simple_loss=0.3096, pruned_loss=0.07157, over 8676.00 frames. ], tot_loss[loss=0.2306, simple_loss=0.3069, pruned_loss=0.07719, over 1610913.59 frames. ], batch size: 34, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:09,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96289.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:41:15,842 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 12:41:26,495 INFO [train.py:901] (3/4) Epoch 12, batch 7400, loss[loss=0.2319, simple_loss=0.3012, pruned_loss=0.08132, over 5953.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3049, pruned_loss=0.07582, over 1603073.81 frames. ], batch size: 13, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:41:33,417 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:36,481 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 12:41:40,821 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2151, 4.1925, 3.7369, 1.7999, 3.6612, 3.6653, 3.7835, 3.3967], + device='cuda:3'), covar=tensor([0.0742, 0.0588, 0.1058, 0.4461, 0.0832, 0.0959, 0.1215, 0.0996], + device='cuda:3'), in_proj_covar=tensor([0.0467, 0.0381, 0.0390, 0.0483, 0.0381, 0.0384, 0.0380, 0.0333], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:41:42,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4725, 1.2222, 4.6670, 1.6821, 4.0911, 3.8708, 4.1990, 4.0948], + device='cuda:3'), covar=tensor([0.0554, 0.5206, 0.0477, 0.3836, 0.1156, 0.0911, 0.0575, 0.0614], + device='cuda:3'), in_proj_covar=tensor([0.0512, 0.0589, 0.0604, 0.0549, 0.0627, 0.0535, 0.0530, 0.0592], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:41:47,013 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:47,081 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:52,329 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:41:55,367 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.475e+02 3.182e+02 4.307e+02 9.281e+02, threshold=6.365e+02, percent-clipped=3.0 +2023-02-06 12:42:01,560 INFO [train.py:901] (3/4) Epoch 12, batch 7450, loss[loss=0.2839, simple_loss=0.3562, pruned_loss=0.1058, over 8593.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3055, pruned_loss=0.07619, over 1606269.78 frames. ], batch size: 39, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:09,242 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:15,394 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 12:42:31,442 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96408.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:42:35,242 INFO [train.py:901] (3/4) Epoch 12, batch 7500, loss[loss=0.2087, simple_loss=0.2845, pruned_loss=0.06642, over 7446.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.3061, pruned_loss=0.07647, over 1605040.49 frames. ], batch size: 17, lr: 6.27e-03, grad_scale: 8.0 +2023-02-06 12:42:54,779 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:02,270 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9825, 3.9371, 2.5234, 2.7884, 3.0862, 2.1596, 2.6499, 2.8535], + device='cuda:3'), covar=tensor([0.1560, 0.0352, 0.0911, 0.0708, 0.0578, 0.1178, 0.0988, 0.1045], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0240, 0.0323, 0.0301, 0.0306, 0.0325, 0.0343, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:43:03,968 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.684e+02 3.354e+02 4.069e+02 8.964e+02, threshold=6.707e+02, percent-clipped=7.0 +2023-02-06 12:43:05,486 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:43:09,792 INFO [train.py:901] (3/4) Epoch 12, batch 7550, loss[loss=0.2089, simple_loss=0.2827, pruned_loss=0.06755, over 7712.00 frames. ], tot_loss[loss=0.2307, simple_loss=0.3073, pruned_loss=0.07709, over 1607447.25 frames. ], batch size: 18, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:15,331 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2297, 1.1140, 1.3010, 1.1424, 0.9524, 1.3005, 0.1093, 1.0364], + device='cuda:3'), covar=tensor([0.2282, 0.1694, 0.0614, 0.1129, 0.3634, 0.0648, 0.2861, 0.1479], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0173, 0.0103, 0.0216, 0.0254, 0.0109, 0.0164, 0.0166], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:43:42,990 INFO [train.py:901] (3/4) Epoch 12, batch 7600, loss[loss=0.2354, simple_loss=0.3006, pruned_loss=0.08517, over 7969.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3078, pruned_loss=0.07742, over 1608080.55 frames. ], batch size: 21, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:43:52,531 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:05,572 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96545.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:44:11,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.342e+02 2.706e+02 3.173e+02 4.121e+02 9.971e+02, threshold=6.345e+02, percent-clipped=8.0 +2023-02-06 12:44:13,340 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:18,542 INFO [train.py:901] (3/4) Epoch 12, batch 7650, loss[loss=0.1737, simple_loss=0.2489, pruned_loss=0.04926, over 7307.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3079, pruned_loss=0.07727, over 1608262.98 frames. ], batch size: 16, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:44:23,310 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96570.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 12:44:29,851 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:47,080 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:44:53,025 INFO [train.py:901] (3/4) Epoch 12, batch 7700, loss[loss=0.2051, simple_loss=0.2955, pruned_loss=0.0573, over 8246.00 frames. ], tot_loss[loss=0.2309, simple_loss=0.3077, pruned_loss=0.07701, over 1611791.08 frames. ], batch size: 24, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:02,659 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5304, 1.5354, 1.8010, 1.3933, 1.0914, 1.7802, 0.1228, 1.0994], + device='cuda:3'), covar=tensor([0.2212, 0.1503, 0.0551, 0.1439, 0.3617, 0.0505, 0.3326, 0.1711], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0172, 0.0103, 0.0215, 0.0255, 0.0109, 0.0163, 0.0166], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:45:12,730 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:45:21,241 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.521e+02 3.004e+02 3.630e+02 7.905e+02, threshold=6.007e+02, percent-clipped=3.0 +2023-02-06 12:45:23,931 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 12:45:27,894 INFO [train.py:901] (3/4) Epoch 12, batch 7750, loss[loss=0.2858, simple_loss=0.3618, pruned_loss=0.1048, over 8578.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.308, pruned_loss=0.07709, over 1615568.58 frames. ], batch size: 31, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:45:42,858 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,093 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96713.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:02,573 INFO [train.py:901] (3/4) Epoch 12, batch 7800, loss[loss=0.2254, simple_loss=0.3129, pruned_loss=0.06895, over 8712.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3065, pruned_loss=0.07646, over 1615695.03 frames. ], batch size: 34, lr: 6.26e-03, grad_scale: 16.0 +2023-02-06 12:46:15,972 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.80 vs. limit=5.0 +2023-02-06 12:46:19,264 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:23,938 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3268, 1.3055, 2.1454, 0.9648, 2.0054, 2.3087, 2.5568, 1.6859], + device='cuda:3'), covar=tensor([0.1258, 0.1468, 0.0697, 0.2732, 0.0932, 0.0596, 0.0782, 0.1216], + device='cuda:3'), in_proj_covar=tensor([0.0268, 0.0299, 0.0263, 0.0291, 0.0274, 0.0238, 0.0354, 0.0289], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 12:46:28,441 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=96752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:46:30,315 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.519e+02 3.193e+02 4.174e+02 8.059e+02, threshold=6.386e+02, percent-clipped=4.0 +2023-02-06 12:46:31,382 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 12:46:36,566 INFO [train.py:901] (3/4) Epoch 12, batch 7850, loss[loss=0.2426, simple_loss=0.3124, pruned_loss=0.08645, over 7474.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.306, pruned_loss=0.0765, over 1614826.01 frames. ], batch size: 71, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:46:56,866 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=96794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:01,900 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,034 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:10,493 INFO [train.py:901] (3/4) Epoch 12, batch 7900, loss[loss=0.3025, simple_loss=0.3599, pruned_loss=0.1226, over 6763.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3061, pruned_loss=0.0767, over 1615799.17 frames. ], batch size: 72, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:27,488 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:47:38,783 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.595e+02 3.080e+02 3.878e+02 8.124e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 12:47:44,792 INFO [train.py:901] (3/4) Epoch 12, batch 7950, loss[loss=0.2161, simple_loss=0.2815, pruned_loss=0.07537, over 7798.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3077, pruned_loss=0.07805, over 1615856.77 frames. ], batch size: 19, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:47:47,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=96867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:07,435 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=96898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:17,698 INFO [train.py:901] (3/4) Epoch 12, batch 8000, loss[loss=0.2301, simple_loss=0.3054, pruned_loss=0.07738, over 8131.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3078, pruned_loss=0.0784, over 1615500.14 frames. ], batch size: 22, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:48:23,698 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=96923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:48:36,034 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 12:48:45,040 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.377e+02 3.280e+02 4.266e+02 7.100e+02, threshold=6.559e+02, percent-clipped=4.0 +2023-02-06 12:48:51,263 INFO [train.py:901] (3/4) Epoch 12, batch 8050, loss[loss=0.2045, simple_loss=0.2737, pruned_loss=0.06764, over 7542.00 frames. ], tot_loss[loss=0.2301, simple_loss=0.3054, pruned_loss=0.07745, over 1606441.35 frames. ], batch size: 18, lr: 6.25e-03, grad_scale: 16.0 +2023-02-06 12:49:06,772 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-06 12:49:24,805 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 12:49:29,791 INFO [train.py:901] (3/4) Epoch 13, batch 0, loss[loss=0.2443, simple_loss=0.3268, pruned_loss=0.08092, over 8279.00 frames. ], tot_loss[loss=0.2443, simple_loss=0.3268, pruned_loss=0.08092, over 8279.00 frames. ], batch size: 23, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:49:29,791 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 12:49:40,735 INFO [train.py:935] (3/4) Epoch 13, validation: loss=0.1867, simple_loss=0.2865, pruned_loss=0.04345, over 944034.00 frames. +2023-02-06 12:49:40,737 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 12:49:41,811 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 12:49:55,391 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 12:49:55,526 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:14,915 INFO [train.py:901] (3/4) Epoch 13, batch 50, loss[loss=0.2891, simple_loss=0.3588, pruned_loss=0.1097, over 8461.00 frames. ], tot_loss[loss=0.2347, simple_loss=0.3118, pruned_loss=0.07875, over 368319.19 frames. ], batch size: 25, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:20,337 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.833e+02 3.357e+02 4.758e+02 6.927e+02, threshold=6.715e+02, percent-clipped=2.0 +2023-02-06 12:50:21,954 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:29,188 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 12:50:41,097 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:50:50,959 INFO [train.py:901] (3/4) Epoch 13, batch 100, loss[loss=0.192, simple_loss=0.2661, pruned_loss=0.05891, over 7645.00 frames. ], tot_loss[loss=0.2296, simple_loss=0.3076, pruned_loss=0.07576, over 638995.58 frames. ], batch size: 19, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:50:52,984 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 12:51:09,074 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:18,986 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:24,701 INFO [train.py:901] (3/4) Epoch 13, batch 150, loss[loss=0.3121, simple_loss=0.3723, pruned_loss=0.1259, over 7965.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.3084, pruned_loss=0.07698, over 853655.18 frames. ], batch size: 21, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:51:25,593 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97148.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:51:30,111 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.477e+02 2.848e+02 3.342e+02 7.997e+02, threshold=5.696e+02, percent-clipped=2.0 +2023-02-06 12:51:39,289 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 12:51:42,869 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2196, 1.3318, 4.3649, 1.9613, 2.4477, 4.9879, 4.8606, 4.2995], + device='cuda:3'), covar=tensor([0.1139, 0.1786, 0.0265, 0.1880, 0.1052, 0.0175, 0.0380, 0.0553], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0302, 0.0265, 0.0293, 0.0275, 0.0239, 0.0359, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:51:58,451 INFO [train.py:901] (3/4) Epoch 13, batch 200, loss[loss=0.2113, simple_loss=0.3023, pruned_loss=0.06012, over 8502.00 frames. ], tot_loss[loss=0.2323, simple_loss=0.3091, pruned_loss=0.07772, over 1024861.43 frames. ], batch size: 26, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:09,220 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8791, 2.0406, 1.7347, 2.5880, 1.2185, 1.5438, 1.6695, 2.1109], + device='cuda:3'), covar=tensor([0.0702, 0.0833, 0.0914, 0.0418, 0.1135, 0.1379, 0.1008, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0211, 0.0251, 0.0214, 0.0213, 0.0251, 0.0254, 0.0218], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 12:52:18,286 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2879, 1.8430, 4.2585, 1.6778, 2.2979, 4.7124, 4.8725, 3.8358], + device='cuda:3'), covar=tensor([0.1229, 0.1624, 0.0375, 0.2413, 0.1284, 0.0295, 0.0532, 0.0840], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0301, 0.0263, 0.0292, 0.0274, 0.0238, 0.0356, 0.0291], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 12:52:23,162 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 12:52:33,410 INFO [train.py:901] (3/4) Epoch 13, batch 250, loss[loss=0.262, simple_loss=0.3433, pruned_loss=0.09033, over 8136.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3081, pruned_loss=0.07699, over 1155770.70 frames. ], batch size: 22, lr: 6.00e-03, grad_scale: 16.0 +2023-02-06 12:52:37,629 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97253.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:38,759 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.455e+02 3.117e+02 3.819e+02 7.824e+02, threshold=6.233e+02, percent-clipped=7.0 +2023-02-06 12:52:46,021 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 12:52:53,683 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 12:52:54,016 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:52:54,531 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 12:53:06,365 INFO [train.py:901] (3/4) Epoch 13, batch 300, loss[loss=0.2122, simple_loss=0.2949, pruned_loss=0.06474, over 8033.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3076, pruned_loss=0.07723, over 1254459.02 frames. ], batch size: 22, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:06,760 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.96 vs. limit=2.0 +2023-02-06 12:53:41,549 INFO [train.py:901] (3/4) Epoch 13, batch 350, loss[loss=0.212, simple_loss=0.2755, pruned_loss=0.07425, over 7223.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.3088, pruned_loss=0.07806, over 1338238.96 frames. ], batch size: 16, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:53:46,922 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.508e+02 3.076e+02 3.709e+02 6.548e+02, threshold=6.153e+02, percent-clipped=1.0 +2023-02-06 12:53:50,484 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:51,768 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:53:53,849 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:11,903 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:54:15,154 INFO [train.py:901] (3/4) Epoch 13, batch 400, loss[loss=0.2378, simple_loss=0.3155, pruned_loss=0.0801, over 8354.00 frames. ], tot_loss[loss=0.2321, simple_loss=0.3084, pruned_loss=0.07791, over 1401299.61 frames. ], batch size: 24, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:34,010 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6294, 1.9554, 3.0811, 1.3921, 2.2014, 1.9449, 1.7403, 2.0855], + device='cuda:3'), covar=tensor([0.1576, 0.2087, 0.0713, 0.3890, 0.1662, 0.2861, 0.1728, 0.2190], + device='cuda:3'), in_proj_covar=tensor([0.0486, 0.0525, 0.0531, 0.0577, 0.0620, 0.0554, 0.0476, 0.0611], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 12:54:45,026 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3119, 1.9570, 2.8687, 2.3532, 2.5764, 2.2264, 1.8486, 1.3288], + device='cuda:3'), covar=tensor([0.4214, 0.4219, 0.1354, 0.2781, 0.2100, 0.2457, 0.1709, 0.4649], + device='cuda:3'), in_proj_covar=tensor([0.0890, 0.0878, 0.0730, 0.0857, 0.0935, 0.0807, 0.0702, 0.0768], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 12:54:51,721 INFO [train.py:901] (3/4) Epoch 13, batch 450, loss[loss=0.2239, simple_loss=0.3078, pruned_loss=0.06997, over 8468.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.3084, pruned_loss=0.07768, over 1452629.52 frames. ], batch size: 29, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:54:57,098 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.318e+02 2.836e+02 3.756e+02 7.381e+02, threshold=5.672e+02, percent-clipped=3.0 +2023-02-06 12:55:13,262 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:26,761 INFO [train.py:901] (3/4) Epoch 13, batch 500, loss[loss=0.2442, simple_loss=0.3143, pruned_loss=0.08702, over 8494.00 frames. ], tot_loss[loss=0.231, simple_loss=0.3077, pruned_loss=0.07711, over 1479630.95 frames. ], batch size: 28, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:55:29,814 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6676, 1.6793, 2.0572, 1.5650, 1.0717, 2.0488, 0.2267, 1.3067], + device='cuda:3'), covar=tensor([0.2053, 0.1727, 0.0465, 0.1739, 0.4392, 0.0478, 0.3140, 0.1858], + device='cuda:3'), in_proj_covar=tensor([0.0172, 0.0173, 0.0103, 0.0220, 0.0259, 0.0110, 0.0164, 0.0170], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 12:55:35,492 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97509.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:55:52,969 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:01,768 INFO [train.py:901] (3/4) Epoch 13, batch 550, loss[loss=0.2071, simple_loss=0.2784, pruned_loss=0.06795, over 7220.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.307, pruned_loss=0.07666, over 1510812.06 frames. ], batch size: 16, lr: 5.99e-03, grad_scale: 16.0 +2023-02-06 12:56:07,720 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.533e+02 3.037e+02 3.770e+02 9.997e+02, threshold=6.074e+02, percent-clipped=4.0 +2023-02-06 12:56:20,094 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:36,760 INFO [train.py:901] (3/4) Epoch 13, batch 600, loss[loss=0.2453, simple_loss=0.3163, pruned_loss=0.08718, over 8329.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3063, pruned_loss=0.07596, over 1536447.55 frames. ], batch size: 25, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:56:53,704 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97622.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:56:55,695 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 12:56:55,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8103, 1.4041, 2.8084, 1.3680, 2.0539, 2.9893, 3.1072, 2.5624], + device='cuda:3'), covar=tensor([0.0934, 0.1425, 0.0388, 0.1967, 0.0843, 0.0312, 0.0563, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0299, 0.0264, 0.0292, 0.0275, 0.0239, 0.0356, 0.0291], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:57:10,256 INFO [train.py:901] (3/4) Epoch 13, batch 650, loss[loss=0.2082, simple_loss=0.2971, pruned_loss=0.05968, over 8486.00 frames. ], tot_loss[loss=0.2298, simple_loss=0.3074, pruned_loss=0.07612, over 1557918.55 frames. ], batch size: 29, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:16,275 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.537e+02 2.925e+02 3.842e+02 7.324e+02, threshold=5.850e+02, percent-clipped=4.0 +2023-02-06 12:57:37,848 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5588, 2.6390, 1.8416, 2.1740, 2.0637, 1.3505, 1.8773, 2.1150], + device='cuda:3'), covar=tensor([0.1427, 0.0351, 0.1069, 0.0647, 0.0783, 0.1489, 0.1077, 0.0957], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0238, 0.0323, 0.0303, 0.0306, 0.0327, 0.0345, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:57:42,539 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97692.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 12:57:45,737 INFO [train.py:901] (3/4) Epoch 13, batch 700, loss[loss=0.2292, simple_loss=0.3041, pruned_loss=0.07719, over 8134.00 frames. ], tot_loss[loss=0.2295, simple_loss=0.307, pruned_loss=0.07599, over 1572071.72 frames. ], batch size: 22, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:57:50,154 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.51 vs. limit=5.0 +2023-02-06 12:57:51,262 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:54,515 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97709.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:57:59,092 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 12:58:10,718 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:12,619 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:13,391 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97737.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:19,716 INFO [train.py:901] (3/4) Epoch 13, batch 750, loss[loss=0.214, simple_loss=0.2956, pruned_loss=0.06621, over 7967.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3059, pruned_loss=0.07604, over 1580195.51 frames. ], batch size: 21, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:58:25,054 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.478e+02 2.997e+02 3.995e+02 8.399e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-06 12:58:27,379 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=97758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:58:39,716 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 12:58:49,000 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 12:58:51,777 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9459, 1.4653, 3.3285, 1.3771, 2.3818, 3.6763, 3.7254, 2.9850], + device='cuda:3'), covar=tensor([0.1144, 0.1679, 0.0397, 0.2135, 0.0953, 0.0304, 0.0561, 0.0829], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0302, 0.0267, 0.0294, 0.0278, 0.0242, 0.0359, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 12:58:54,231 INFO [train.py:901] (3/4) Epoch 13, batch 800, loss[loss=0.2621, simple_loss=0.3194, pruned_loss=0.1024, over 7822.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3053, pruned_loss=0.07564, over 1589825.79 frames. ], batch size: 20, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:10,089 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:14,047 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:29,481 INFO [train.py:901] (3/4) Epoch 13, batch 850, loss[loss=0.1745, simple_loss=0.2632, pruned_loss=0.04293, over 8079.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3041, pruned_loss=0.07492, over 1593674.50 frames. ], batch size: 21, lr: 5.98e-03, grad_scale: 16.0 +2023-02-06 12:59:32,323 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=97851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 12:59:35,502 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.627e+02 3.254e+02 4.246e+02 9.834e+02, threshold=6.507e+02, percent-clipped=8.0 +2023-02-06 13:00:03,798 INFO [train.py:901] (3/4) Epoch 13, batch 900, loss[loss=0.2025, simple_loss=0.2765, pruned_loss=0.06425, over 7424.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3036, pruned_loss=0.07465, over 1596221.18 frames. ], batch size: 17, lr: 5.98e-03, grad_scale: 8.0 +2023-02-06 13:00:05,028 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 13:00:09,705 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=97906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:18,218 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=97917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:00:39,219 INFO [train.py:901] (3/4) Epoch 13, batch 950, loss[loss=0.2344, simple_loss=0.3091, pruned_loss=0.07988, over 8580.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3051, pruned_loss=0.07559, over 1604868.05 frames. ], batch size: 39, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:00:45,305 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.593e+02 3.202e+02 4.020e+02 7.231e+02, threshold=6.403e+02, percent-clipped=2.0 +2023-02-06 13:00:55,368 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1193, 1.9304, 3.1044, 1.5900, 2.3529, 3.3990, 3.4343, 2.9883], + device='cuda:3'), covar=tensor([0.0919, 0.1265, 0.0438, 0.1851, 0.1027, 0.0249, 0.0556, 0.0522], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0300, 0.0266, 0.0292, 0.0276, 0.0241, 0.0358, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:01:08,779 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 13:01:11,125 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=97993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:13,715 INFO [train.py:901] (3/4) Epoch 13, batch 1000, loss[loss=0.2718, simple_loss=0.3475, pruned_loss=0.098, over 8287.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.305, pruned_loss=0.07566, over 1606662.24 frames. ], batch size: 23, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:29,781 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:39,957 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:01:43,183 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98036.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:01:44,385 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 13:01:50,454 INFO [train.py:901] (3/4) Epoch 13, batch 1050, loss[loss=0.2593, simple_loss=0.3272, pruned_loss=0.09568, over 8438.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3048, pruned_loss=0.07559, over 1606945.93 frames. ], batch size: 27, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:01:56,534 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.707e+02 2.365e+02 2.893e+02 3.782e+02 5.594e+02, threshold=5.785e+02, percent-clipped=0.0 +2023-02-06 13:01:57,234 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 13:02:10,160 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98075.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:13,453 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:24,727 INFO [train.py:901] (3/4) Epoch 13, batch 1100, loss[loss=0.2318, simple_loss=0.313, pruned_loss=0.07526, over 8330.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3032, pruned_loss=0.07495, over 1602087.35 frames. ], batch size: 25, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:02:26,971 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98100.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:30,280 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98105.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:31,655 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:43,618 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98124.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:49,146 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98132.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:02:59,486 INFO [train.py:901] (3/4) Epoch 13, batch 1150, loss[loss=0.2216, simple_loss=0.3007, pruned_loss=0.07125, over 7797.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3025, pruned_loss=0.07476, over 1595818.88 frames. ], batch size: 20, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:02,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98151.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:03:05,443 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 13:03:06,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.603e+02 3.101e+02 3.825e+02 7.832e+02, threshold=6.203e+02, percent-clipped=6.0 +2023-02-06 13:03:14,955 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7495, 2.0904, 2.2223, 1.2843, 2.3562, 1.5883, 0.6715, 1.8264], + device='cuda:3'), covar=tensor([0.0424, 0.0202, 0.0178, 0.0393, 0.0233, 0.0608, 0.0540, 0.0208], + device='cuda:3'), in_proj_covar=tensor([0.0402, 0.0337, 0.0289, 0.0397, 0.0325, 0.0484, 0.0360, 0.0362], + device='cuda:3'), out_proj_covar=tensor([1.1293e-04, 9.2114e-05, 7.9625e-05, 1.0987e-04, 9.0458e-05, 1.4467e-04, + 1.0148e-04, 1.0106e-04], device='cuda:3') +2023-02-06 13:03:34,172 INFO [train.py:901] (3/4) Epoch 13, batch 1200, loss[loss=0.2169, simple_loss=0.3035, pruned_loss=0.06513, over 8197.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.303, pruned_loss=0.07489, over 1599434.78 frames. ], batch size: 23, lr: 5.97e-03, grad_scale: 8.0 +2023-02-06 13:03:46,029 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:04:06,227 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2360, 1.1279, 3.3161, 1.0154, 2.8992, 2.7873, 2.9858, 2.9069], + device='cuda:3'), covar=tensor([0.0808, 0.4294, 0.0814, 0.4007, 0.1443, 0.1142, 0.0821, 0.0922], + device='cuda:3'), in_proj_covar=tensor([0.0510, 0.0587, 0.0598, 0.0544, 0.0618, 0.0531, 0.0522, 0.0579], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:04:08,153 INFO [train.py:901] (3/4) Epoch 13, batch 1250, loss[loss=0.1888, simple_loss=0.2716, pruned_loss=0.05304, over 8107.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3043, pruned_loss=0.07499, over 1607816.71 frames. ], batch size: 21, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:10,209 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:14,079 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.568e+02 3.066e+02 4.053e+02 1.440e+03, threshold=6.132e+02, percent-clipped=8.0 +2023-02-06 13:04:36,959 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:04:43,430 INFO [train.py:901] (3/4) Epoch 13, batch 1300, loss[loss=0.3315, simple_loss=0.3863, pruned_loss=0.1383, over 8759.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3049, pruned_loss=0.07514, over 1611686.98 frames. ], batch size: 30, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:04:44,922 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6068, 1.9531, 3.3564, 2.4926, 2.7591, 2.2891, 1.8838, 1.6206], + device='cuda:3'), covar=tensor([0.4203, 0.4749, 0.1264, 0.2908, 0.2221, 0.2517, 0.2001, 0.4584], + device='cuda:3'), in_proj_covar=tensor([0.0893, 0.0883, 0.0741, 0.0861, 0.0939, 0.0810, 0.0706, 0.0771], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:04:54,307 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6807, 1.3593, 1.5193, 1.2235, 0.9275, 1.2782, 1.4881, 1.5585], + device='cuda:3'), covar=tensor([0.0485, 0.1263, 0.1743, 0.1398, 0.0565, 0.1523, 0.0684, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0193, 0.0158, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:04:54,336 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98313.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:12,876 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4252, 2.7277, 1.9510, 2.2405, 2.1811, 1.6113, 1.8965, 2.1924], + device='cuda:3'), covar=tensor([0.1194, 0.0285, 0.0986, 0.0537, 0.0575, 0.1203, 0.0864, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0338, 0.0231, 0.0313, 0.0295, 0.0296, 0.0317, 0.0334, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:05:16,646 INFO [train.py:901] (3/4) Epoch 13, batch 1350, loss[loss=0.2697, simple_loss=0.3451, pruned_loss=0.09716, over 8135.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.304, pruned_loss=0.07519, over 1608375.17 frames. ], batch size: 22, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:23,233 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.492e+02 3.102e+02 3.697e+02 5.327e+02, threshold=6.205e+02, percent-clipped=0.0 +2023-02-06 13:05:29,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:05:52,230 INFO [train.py:901] (3/4) Epoch 13, batch 1400, loss[loss=0.1912, simple_loss=0.2783, pruned_loss=0.05206, over 7659.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3041, pruned_loss=0.07587, over 1606972.15 frames. ], batch size: 19, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:05:59,420 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98407.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:06:14,075 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:06:16,859 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98432.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:06:26,901 INFO [train.py:901] (3/4) Epoch 13, batch 1450, loss[loss=0.2429, simple_loss=0.3159, pruned_loss=0.08494, over 8368.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3057, pruned_loss=0.07653, over 1610212.84 frames. ], batch size: 24, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:06:32,939 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.293e+02 2.812e+02 3.491e+02 8.118e+02, threshold=5.625e+02, percent-clipped=1.0 +2023-02-06 13:06:34,308 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 13:06:41,227 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:07:02,158 INFO [train.py:901] (3/4) Epoch 13, batch 1500, loss[loss=0.1805, simple_loss=0.2612, pruned_loss=0.04989, over 7801.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3051, pruned_loss=0.07618, over 1611751.02 frames. ], batch size: 20, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:28,254 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4710, 2.0045, 3.4034, 1.2493, 2.6813, 1.9756, 1.5726, 2.4062], + device='cuda:3'), covar=tensor([0.1853, 0.2211, 0.0753, 0.4215, 0.1461, 0.2978, 0.2013, 0.2055], + device='cuda:3'), in_proj_covar=tensor([0.0492, 0.0532, 0.0538, 0.0590, 0.0626, 0.0561, 0.0483, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:07:36,072 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8568, 3.8280, 3.4577, 1.9397, 3.3880, 3.4124, 3.4671, 3.2818], + device='cuda:3'), covar=tensor([0.0995, 0.0677, 0.1162, 0.4813, 0.1131, 0.1162, 0.1436, 0.1002], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0382, 0.0391, 0.0483, 0.0383, 0.0388, 0.0380, 0.0337], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:07:37,387 INFO [train.py:901] (3/4) Epoch 13, batch 1550, loss[loss=0.2425, simple_loss=0.3082, pruned_loss=0.08843, over 8247.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3033, pruned_loss=0.07519, over 1609914.85 frames. ], batch size: 22, lr: 5.96e-03, grad_scale: 8.0 +2023-02-06 13:07:43,378 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.583e+02 3.208e+02 4.119e+02 6.608e+02, threshold=6.417e+02, percent-clipped=3.0 +2023-02-06 13:07:46,952 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98561.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:07:47,836 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.33 vs. limit=5.0 +2023-02-06 13:08:02,054 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:11,762 INFO [train.py:901] (3/4) Epoch 13, batch 1600, loss[loss=0.2201, simple_loss=0.3035, pruned_loss=0.06832, over 8345.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.303, pruned_loss=0.07458, over 1614030.91 frames. ], batch size: 26, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:27,713 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6983, 1.6161, 2.1721, 1.5981, 1.2522, 2.1569, 0.3905, 1.3040], + device='cuda:3'), covar=tensor([0.2253, 0.1595, 0.0553, 0.1719, 0.3787, 0.0467, 0.3247, 0.1911], + device='cuda:3'), in_proj_covar=tensor([0.0172, 0.0175, 0.0104, 0.0221, 0.0262, 0.0112, 0.0164, 0.0168], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 13:08:29,744 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:46,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98646.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:08:47,299 INFO [train.py:901] (3/4) Epoch 13, batch 1650, loss[loss=0.1996, simple_loss=0.2817, pruned_loss=0.05882, over 7931.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3042, pruned_loss=0.07518, over 1615401.44 frames. ], batch size: 20, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:08:53,412 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.462e+02 2.942e+02 3.707e+02 8.113e+02, threshold=5.885e+02, percent-clipped=6.0 +2023-02-06 13:09:07,752 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.69 vs. limit=5.0 +2023-02-06 13:09:20,792 INFO [train.py:901] (3/4) Epoch 13, batch 1700, loss[loss=0.2178, simple_loss=0.301, pruned_loss=0.06725, over 8467.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3043, pruned_loss=0.07519, over 1616510.00 frames. ], batch size: 25, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:09:49,369 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1171, 1.0593, 1.2627, 1.1790, 0.9048, 1.2453, 0.0665, 0.9368], + device='cuda:3'), covar=tensor([0.2627, 0.1875, 0.0737, 0.1498, 0.3815, 0.0776, 0.3122, 0.1676], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0175, 0.0104, 0.0220, 0.0260, 0.0112, 0.0163, 0.0167], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 13:09:57,388 INFO [train.py:901] (3/4) Epoch 13, batch 1750, loss[loss=0.2097, simple_loss=0.2884, pruned_loss=0.06552, over 8102.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3036, pruned_loss=0.0747, over 1617124.54 frames. ], batch size: 23, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:10:03,433 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.663e+02 3.311e+02 3.905e+02 7.561e+02, threshold=6.622e+02, percent-clipped=6.0 +2023-02-06 13:10:15,123 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:10:27,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4961, 1.1617, 4.7080, 1.6897, 4.0695, 3.9597, 4.2709, 4.1059], + device='cuda:3'), covar=tensor([0.0579, 0.4843, 0.0413, 0.3708, 0.1164, 0.0907, 0.0526, 0.0615], + device='cuda:3'), in_proj_covar=tensor([0.0510, 0.0584, 0.0597, 0.0550, 0.0620, 0.0532, 0.0522, 0.0585], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:10:31,875 INFO [train.py:901] (3/4) Epoch 13, batch 1800, loss[loss=0.2155, simple_loss=0.2927, pruned_loss=0.06918, over 8648.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3034, pruned_loss=0.07488, over 1614983.67 frames. ], batch size: 34, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:01,380 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=98839.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:06,994 INFO [train.py:901] (3/4) Epoch 13, batch 1850, loss[loss=0.288, simple_loss=0.35, pruned_loss=0.113, over 8040.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3046, pruned_loss=0.0759, over 1616533.89 frames. ], batch size: 22, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:13,499 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.419e+02 2.914e+02 4.067e+02 1.078e+03, threshold=5.828e+02, percent-clipped=2.0 +2023-02-06 13:11:18,923 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=98864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:34,565 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=98887.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:41,190 INFO [train.py:901] (3/4) Epoch 13, batch 1900, loss[loss=0.2791, simple_loss=0.3504, pruned_loss=0.1039, over 8727.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3041, pruned_loss=0.07501, over 1617408.44 frames. ], batch size: 30, lr: 5.95e-03, grad_scale: 8.0 +2023-02-06 13:11:46,604 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=98905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:48,684 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98908.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:11:51,822 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:02,109 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0693, 2.4922, 2.8431, 1.3775, 3.0546, 1.7781, 1.4855, 2.0655], + device='cuda:3'), covar=tensor([0.0697, 0.0284, 0.0229, 0.0609, 0.0351, 0.0726, 0.0710, 0.0404], + device='cuda:3'), in_proj_covar=tensor([0.0400, 0.0335, 0.0291, 0.0399, 0.0327, 0.0485, 0.0361, 0.0364], + device='cuda:3'), out_proj_covar=tensor([1.1216e-04, 9.1455e-05, 8.0008e-05, 1.1021e-04, 9.0817e-05, 1.4455e-04, + 1.0175e-04, 1.0132e-04], device='cuda:3') +2023-02-06 13:12:04,112 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9202, 1.3815, 1.6020, 1.2226, 0.9618, 1.5121, 2.1214, 1.8572], + device='cuda:3'), covar=tensor([0.0454, 0.1749, 0.2420, 0.1875, 0.0668, 0.2089, 0.0745, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0153, 0.0192, 0.0158, 0.0102, 0.0164, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:12:11,197 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 13:12:15,142 INFO [train.py:901] (3/4) Epoch 13, batch 1950, loss[loss=0.2295, simple_loss=0.2948, pruned_loss=0.08212, over 7698.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.305, pruned_loss=0.07528, over 1621599.16 frames. ], batch size: 18, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:12:21,303 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.445e+02 3.079e+02 3.874e+02 6.986e+02, threshold=6.158e+02, percent-clipped=4.0 +2023-02-06 13:12:23,945 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 13:12:33,016 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2223, 1.5460, 1.6062, 1.4674, 1.0731, 1.4408, 1.7546, 1.4104], + device='cuda:3'), covar=tensor([0.0518, 0.1158, 0.1618, 0.1286, 0.0607, 0.1422, 0.0670, 0.0615], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0157, 0.0102, 0.0163, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:12:44,575 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 13:12:45,395 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=98989.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:12:50,409 INFO [train.py:901] (3/4) Epoch 13, batch 2000, loss[loss=0.2684, simple_loss=0.3502, pruned_loss=0.09331, over 8488.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3055, pruned_loss=0.0761, over 1619339.03 frames. ], batch size: 49, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:12:51,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7567, 2.0136, 2.1923, 1.2083, 2.4054, 1.5355, 0.7095, 1.9520], + device='cuda:3'), covar=tensor([0.0484, 0.0243, 0.0195, 0.0448, 0.0250, 0.0663, 0.0612, 0.0233], + device='cuda:3'), in_proj_covar=tensor([0.0398, 0.0334, 0.0289, 0.0398, 0.0324, 0.0482, 0.0360, 0.0363], + device='cuda:3'), out_proj_covar=tensor([1.1172e-04, 9.1195e-05, 7.9623e-05, 1.0982e-04, 9.0171e-05, 1.4381e-04, + 1.0126e-04, 1.0103e-04], device='cuda:3') +2023-02-06 13:13:06,441 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:24,250 INFO [train.py:901] (3/4) Epoch 13, batch 2050, loss[loss=0.197, simple_loss=0.2699, pruned_loss=0.06208, over 7705.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3054, pruned_loss=0.07563, over 1621362.50 frames. ], batch size: 18, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:13:30,116 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.527e+02 3.267e+02 4.166e+02 9.227e+02, threshold=6.535e+02, percent-clipped=8.0 +2023-02-06 13:13:39,430 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:13:58,771 INFO [train.py:901] (3/4) Epoch 13, batch 2100, loss[loss=0.2024, simple_loss=0.2897, pruned_loss=0.05752, over 8024.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3047, pruned_loss=0.07526, over 1618748.19 frames. ], batch size: 22, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:00,877 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3436, 1.6991, 2.7029, 1.1487, 2.0320, 1.6173, 1.4158, 1.9488], + device='cuda:3'), covar=tensor([0.1820, 0.2051, 0.0719, 0.3989, 0.1514, 0.2993, 0.1943, 0.1955], + device='cuda:3'), in_proj_covar=tensor([0.0491, 0.0533, 0.0539, 0.0592, 0.0624, 0.0560, 0.0485, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:14:17,686 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2331, 1.6570, 4.4151, 2.1878, 2.4457, 5.0811, 5.0039, 4.3230], + device='cuda:3'), covar=tensor([0.1093, 0.1655, 0.0272, 0.1752, 0.1055, 0.0158, 0.0329, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0269, 0.0300, 0.0268, 0.0291, 0.0276, 0.0237, 0.0357, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:14:31,273 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:14:33,883 INFO [train.py:901] (3/4) Epoch 13, batch 2150, loss[loss=0.2427, simple_loss=0.3212, pruned_loss=0.08212, over 8674.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.306, pruned_loss=0.07593, over 1620410.05 frames. ], batch size: 39, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:14:39,911 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.393e+02 2.767e+02 3.323e+02 5.467e+02, threshold=5.533e+02, percent-clipped=0.0 +2023-02-06 13:14:48,073 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:07,844 INFO [train.py:901] (3/4) Epoch 13, batch 2200, loss[loss=0.2114, simple_loss=0.2817, pruned_loss=0.0705, over 7254.00 frames. ], tot_loss[loss=0.2304, simple_loss=0.3067, pruned_loss=0.07705, over 1621057.73 frames. ], batch size: 16, lr: 5.94e-03, grad_scale: 8.0 +2023-02-06 13:15:39,425 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 13:15:43,036 INFO [train.py:901] (3/4) Epoch 13, batch 2250, loss[loss=0.2335, simple_loss=0.3068, pruned_loss=0.08006, over 7969.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3066, pruned_loss=0.07673, over 1621761.25 frames. ], batch size: 21, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:15:43,157 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:46,460 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:15:48,887 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.623e+02 3.377e+02 4.135e+02 6.545e+02, threshold=6.753e+02, percent-clipped=6.0 +2023-02-06 13:15:49,683 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:02,600 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:09,216 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:16,493 INFO [train.py:901] (3/4) Epoch 13, batch 2300, loss[loss=0.2243, simple_loss=0.3169, pruned_loss=0.06588, over 8451.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3059, pruned_loss=0.07621, over 1619694.83 frames. ], batch size: 27, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:19,363 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:43,129 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99333.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:49,191 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99342.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:16:52,410 INFO [train.py:901] (3/4) Epoch 13, batch 2350, loss[loss=0.2131, simple_loss=0.2829, pruned_loss=0.07164, over 7968.00 frames. ], tot_loss[loss=0.229, simple_loss=0.3061, pruned_loss=0.07596, over 1622109.76 frames. ], batch size: 21, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:16:58,394 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.470e+02 3.074e+02 3.865e+02 1.080e+03, threshold=6.149e+02, percent-clipped=3.0 +2023-02-06 13:17:06,750 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99367.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:10,177 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:26,585 INFO [train.py:901] (3/4) Epoch 13, batch 2400, loss[loss=0.1901, simple_loss=0.2643, pruned_loss=0.05794, over 7802.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3051, pruned_loss=0.07541, over 1622690.43 frames. ], batch size: 19, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:17:26,707 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4593, 4.4578, 4.0334, 1.8967, 4.0300, 3.9797, 4.0854, 3.8506], + device='cuda:3'), covar=tensor([0.0765, 0.0573, 0.1038, 0.4563, 0.0804, 0.0861, 0.1182, 0.0721], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0393, 0.0397, 0.0497, 0.0391, 0.0394, 0.0384, 0.0343], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:17:37,540 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:17:56,001 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:01,826 INFO [train.py:901] (3/4) Epoch 13, batch 2450, loss[loss=0.2314, simple_loss=0.3063, pruned_loss=0.07831, over 7973.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3046, pruned_loss=0.07537, over 1619027.38 frames. ], batch size: 21, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:02,728 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:18:08,608 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.541e+02 3.157e+02 3.793e+02 6.756e+02, threshold=6.314e+02, percent-clipped=3.0 +2023-02-06 13:18:29,568 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99486.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:18:36,513 INFO [train.py:901] (3/4) Epoch 13, batch 2500, loss[loss=0.203, simple_loss=0.2849, pruned_loss=0.0606, over 8075.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3055, pruned_loss=0.07581, over 1621318.05 frames. ], batch size: 21, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:18:57,697 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:10,261 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0725, 1.5749, 3.2589, 1.5242, 2.3058, 3.5581, 3.6377, 3.0154], + device='cuda:3'), covar=tensor([0.0963, 0.1477, 0.0335, 0.1888, 0.0970, 0.0265, 0.0496, 0.0648], + device='cuda:3'), in_proj_covar=tensor([0.0274, 0.0304, 0.0271, 0.0294, 0.0282, 0.0242, 0.0361, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:19:10,744 INFO [train.py:901] (3/4) Epoch 13, batch 2550, loss[loss=0.2609, simple_loss=0.3275, pruned_loss=0.09717, over 8351.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3046, pruned_loss=0.0752, over 1620802.08 frames. ], batch size: 24, lr: 5.93e-03, grad_scale: 8.0 +2023-02-06 13:19:17,196 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.420e+02 2.977e+02 3.875e+02 7.325e+02, threshold=5.954e+02, percent-clipped=4.0 +2023-02-06 13:19:37,759 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99586.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:41,078 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:19:45,636 INFO [train.py:901] (3/4) Epoch 13, batch 2600, loss[loss=0.2017, simple_loss=0.2703, pruned_loss=0.06653, over 7534.00 frames. ], tot_loss[loss=0.2289, simple_loss=0.3052, pruned_loss=0.07633, over 1617758.13 frames. ], batch size: 18, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:19:47,529 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 13:19:52,602 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:03,523 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99623.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:06,915 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:08,162 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:19,558 INFO [train.py:901] (3/4) Epoch 13, batch 2650, loss[loss=0.2444, simple_loss=0.3318, pruned_loss=0.07853, over 8357.00 frames. ], tot_loss[loss=0.2297, simple_loss=0.3067, pruned_loss=0.07634, over 1619094.69 frames. ], batch size: 24, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:20,455 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:23,645 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:25,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.509e+02 2.403e+02 3.099e+02 4.031e+02 8.160e+02, threshold=6.198e+02, percent-clipped=1.0 +2023-02-06 13:20:47,384 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:54,454 INFO [train.py:901] (3/4) Epoch 13, batch 2700, loss[loss=0.2454, simple_loss=0.3164, pruned_loss=0.08719, over 8460.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3056, pruned_loss=0.07625, over 1615711.49 frames. ], batch size: 29, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:20:55,216 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=99698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:20:59,197 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:00,473 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:16,804 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:27,698 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:28,866 INFO [train.py:901] (3/4) Epoch 13, batch 2750, loss[loss=0.2145, simple_loss=0.2952, pruned_loss=0.0669, over 7972.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3044, pruned_loss=0.07541, over 1613968.45 frames. ], batch size: 21, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:21:34,775 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.508e+02 3.194e+02 3.866e+02 8.318e+02, threshold=6.387e+02, percent-clipped=3.0 +2023-02-06 13:21:49,875 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5484, 2.0050, 3.2217, 1.3132, 2.4767, 1.9078, 1.6576, 2.2513], + device='cuda:3'), covar=tensor([0.1718, 0.2109, 0.0693, 0.3962, 0.1510, 0.2855, 0.1782, 0.2177], + device='cuda:3'), in_proj_covar=tensor([0.0489, 0.0530, 0.0539, 0.0588, 0.0622, 0.0559, 0.0481, 0.0612], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:21:52,465 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:21:53,980 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99784.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:03,192 INFO [train.py:901] (3/4) Epoch 13, batch 2800, loss[loss=0.1977, simple_loss=0.2655, pruned_loss=0.06501, over 7552.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3036, pruned_loss=0.07527, over 1611382.66 frames. ], batch size: 18, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:06,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99801.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:11,458 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:22:26,108 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99830.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:22:37,608 INFO [train.py:901] (3/4) Epoch 13, batch 2850, loss[loss=0.2261, simple_loss=0.3013, pruned_loss=0.07546, over 8194.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.304, pruned_loss=0.07554, over 1613099.33 frames. ], batch size: 23, lr: 5.92e-03, grad_scale: 8.0 +2023-02-06 13:22:43,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.486e+02 2.909e+02 3.673e+02 9.445e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-06 13:23:11,490 INFO [train.py:901] (3/4) Epoch 13, batch 2900, loss[loss=0.2389, simple_loss=0.2998, pruned_loss=0.08894, over 7934.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3041, pruned_loss=0.07611, over 1611832.31 frames. ], batch size: 20, lr: 5.92e-03, grad_scale: 16.0 +2023-02-06 13:23:11,689 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:34,895 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99930.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:45,024 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=99945.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:23:46,142 INFO [train.py:901] (3/4) Epoch 13, batch 2950, loss[loss=0.2422, simple_loss=0.3164, pruned_loss=0.08398, over 8249.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3051, pruned_loss=0.07614, over 1615572.31 frames. ], batch size: 24, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:23:48,998 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=99951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:23:52,274 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 13:23:52,905 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.762e+02 3.280e+02 4.150e+02 8.176e+02, threshold=6.560e+02, percent-clipped=12.0 +2023-02-06 13:23:57,303 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=99962.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:14,500 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=99987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:21,167 INFO [train.py:901] (3/4) Epoch 13, batch 3000, loss[loss=0.2449, simple_loss=0.3257, pruned_loss=0.08205, over 8505.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3044, pruned_loss=0.07498, over 1616562.02 frames. ], batch size: 26, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:24:21,167 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 13:24:33,564 INFO [train.py:935] (3/4) Epoch 13, validation: loss=0.1841, simple_loss=0.2841, pruned_loss=0.04204, over 944034.00 frames. +2023-02-06 13:24:33,565 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 13:24:37,726 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:24:42,517 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2548, 1.2473, 1.4249, 1.3184, 0.7209, 1.3012, 1.2310, 0.9432], + device='cuda:3'), covar=tensor([0.0523, 0.1246, 0.1704, 0.1293, 0.0579, 0.1478, 0.0664, 0.0689], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:24:54,616 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100025.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:24:55,295 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:05,694 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:07,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:09,024 INFO [train.py:901] (3/4) Epoch 13, batch 3050, loss[loss=0.2146, simple_loss=0.3007, pruned_loss=0.06424, over 8516.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3041, pruned_loss=0.07467, over 1617547.09 frames. ], batch size: 28, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:25:15,849 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.522e+02 3.008e+02 4.207e+02 1.157e+03, threshold=6.017e+02, percent-clipped=6.0 +2023-02-06 13:25:16,790 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100057.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:22,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:24,274 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4077, 1.4132, 4.5142, 1.6677, 3.9290, 3.7039, 4.0566, 3.9632], + device='cuda:3'), covar=tensor([0.0499, 0.4631, 0.0491, 0.3779, 0.1209, 0.0992, 0.0595, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0591, 0.0609, 0.0558, 0.0634, 0.0546, 0.0538, 0.0591], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:25:34,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100082.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:25:41,470 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5726, 1.5598, 2.0316, 1.5314, 1.1694, 2.0494, 0.3045, 1.2675], + device='cuda:3'), covar=tensor([0.2260, 0.1800, 0.0483, 0.1554, 0.3869, 0.0588, 0.2871, 0.1612], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0177, 0.0106, 0.0222, 0.0260, 0.0112, 0.0166, 0.0170], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 13:25:44,733 INFO [train.py:901] (3/4) Epoch 13, batch 3100, loss[loss=0.2613, simple_loss=0.3435, pruned_loss=0.08953, over 8337.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3056, pruned_loss=0.07571, over 1621455.52 frames. ], batch size: 25, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:19,849 INFO [train.py:901] (3/4) Epoch 13, batch 3150, loss[loss=0.1956, simple_loss=0.2684, pruned_loss=0.06135, over 7798.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3058, pruned_loss=0.07561, over 1622238.61 frames. ], batch size: 20, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:24,151 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:25,928 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.491e+02 3.036e+02 4.077e+02 6.258e+02, threshold=6.072e+02, percent-clipped=1.0 +2023-02-06 13:26:26,821 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100157.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:41,739 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:26:55,157 INFO [train.py:901] (3/4) Epoch 13, batch 3200, loss[loss=0.1974, simple_loss=0.277, pruned_loss=0.05894, over 7800.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3052, pruned_loss=0.07512, over 1623995.09 frames. ], batch size: 19, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:26:58,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100201.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:27:05,247 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0656, 1.5003, 1.5873, 1.4132, 0.8729, 1.3586, 1.6882, 1.7437], + device='cuda:3'), covar=tensor([0.0496, 0.1298, 0.1749, 0.1397, 0.0628, 0.1594, 0.0719, 0.0574], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0158, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:27:15,352 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100226.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:27:29,402 INFO [train.py:901] (3/4) Epoch 13, batch 3250, loss[loss=0.232, simple_loss=0.2918, pruned_loss=0.08609, over 7769.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3057, pruned_loss=0.07553, over 1621393.27 frames. ], batch size: 19, lr: 5.91e-03, grad_scale: 16.0 +2023-02-06 13:27:34,944 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7865, 5.8909, 5.0720, 2.2439, 5.1893, 5.5666, 5.3079, 5.2733], + device='cuda:3'), covar=tensor([0.0463, 0.0341, 0.0833, 0.4390, 0.0599, 0.0577, 0.1022, 0.0482], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0387, 0.0391, 0.0490, 0.0388, 0.0392, 0.0385, 0.0342], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:27:35,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.430e+02 2.991e+02 3.670e+02 7.489e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 13:27:35,622 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:04,524 INFO [train.py:901] (3/4) Epoch 13, batch 3300, loss[loss=0.1854, simple_loss=0.2661, pruned_loss=0.05228, over 8239.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.075, over 1619080.53 frames. ], batch size: 22, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:07,518 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:22,246 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:24,891 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100326.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:35,233 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 13:28:39,553 INFO [train.py:901] (3/4) Epoch 13, batch 3350, loss[loss=0.2702, simple_loss=0.3298, pruned_loss=0.1053, over 6775.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3045, pruned_loss=0.07536, over 1614020.67 frames. ], batch size: 71, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:28:39,774 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:28:45,562 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.599e+02 3.166e+02 3.997e+02 7.990e+02, threshold=6.333e+02, percent-clipped=6.0 +2023-02-06 13:28:54,365 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100369.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:29:13,504 INFO [train.py:901] (3/4) Epoch 13, batch 3400, loss[loss=0.2413, simple_loss=0.321, pruned_loss=0.08082, over 8459.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3047, pruned_loss=0.07513, over 1617238.24 frames. ], batch size: 25, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:25,260 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100413.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:29,923 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:42,788 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100438.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:29:44,445 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-06 13:29:48,800 INFO [train.py:901] (3/4) Epoch 13, batch 3450, loss[loss=0.2127, simple_loss=0.3004, pruned_loss=0.06256, over 7807.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3047, pruned_loss=0.07518, over 1618293.91 frames. ], batch size: 20, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:29:54,814 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.523e+02 3.011e+02 4.006e+02 7.808e+02, threshold=6.023e+02, percent-clipped=2.0 +2023-02-06 13:30:14,320 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100484.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:30:23,422 INFO [train.py:901] (3/4) Epoch 13, batch 3500, loss[loss=0.2387, simple_loss=0.3194, pruned_loss=0.079, over 8134.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3042, pruned_loss=0.07488, over 1618786.10 frames. ], batch size: 22, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:30:53,068 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 13:30:58,421 INFO [train.py:901] (3/4) Epoch 13, batch 3550, loss[loss=0.2231, simple_loss=0.3106, pruned_loss=0.06777, over 8254.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3047, pruned_loss=0.07498, over 1621386.10 frames. ], batch size: 24, lr: 5.90e-03, grad_scale: 16.0 +2023-02-06 13:31:04,451 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.564e+02 3.091e+02 3.906e+02 9.185e+02, threshold=6.182e+02, percent-clipped=3.0 +2023-02-06 13:31:33,168 INFO [train.py:901] (3/4) Epoch 13, batch 3600, loss[loss=0.2172, simple_loss=0.3007, pruned_loss=0.06689, over 8283.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3058, pruned_loss=0.07538, over 1625399.75 frames. ], batch size: 23, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:31:35,334 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:31:56,141 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2395, 1.7909, 1.4323, 1.6352, 1.4869, 1.2687, 1.4671, 1.5239], + device='cuda:3'), covar=tensor([0.0864, 0.0334, 0.0709, 0.0373, 0.0509, 0.0945, 0.0640, 0.0594], + device='cuda:3'), in_proj_covar=tensor([0.0341, 0.0233, 0.0317, 0.0297, 0.0296, 0.0322, 0.0339, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:32:08,080 INFO [train.py:901] (3/4) Epoch 13, batch 3650, loss[loss=0.2869, simple_loss=0.3476, pruned_loss=0.1131, over 8710.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3049, pruned_loss=0.07534, over 1619566.21 frames. ], batch size: 34, lr: 5.89e-03, grad_scale: 16.0 +2023-02-06 13:32:12,319 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8478, 1.6415, 1.8281, 1.6396, 1.1271, 1.6457, 2.2850, 1.9637], + device='cuda:3'), covar=tensor([0.0462, 0.1282, 0.1669, 0.1361, 0.0638, 0.1517, 0.0630, 0.0626], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0152, 0.0191, 0.0158, 0.0102, 0.0163, 0.0115, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:32:14,110 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.362e+02 3.080e+02 3.827e+02 7.938e+02, threshold=6.161e+02, percent-clipped=3.0 +2023-02-06 13:32:17,467 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 13:32:34,201 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.74 vs. limit=5.0 +2023-02-06 13:32:43,157 INFO [train.py:901] (3/4) Epoch 13, batch 3700, loss[loss=0.2397, simple_loss=0.3162, pruned_loss=0.08158, over 8030.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3053, pruned_loss=0.07557, over 1619830.35 frames. ], batch size: 22, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:32:52,912 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 13:32:55,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:32:57,152 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 13:33:13,284 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100740.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:16,645 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4090, 2.6976, 2.0342, 2.1881, 2.0584, 1.5926, 1.8904, 2.0627], + device='cuda:3'), covar=tensor([0.1454, 0.0335, 0.0894, 0.0586, 0.0743, 0.1388, 0.1058, 0.0911], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0234, 0.0318, 0.0299, 0.0300, 0.0322, 0.0340, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:33:17,849 INFO [train.py:901] (3/4) Epoch 13, batch 3750, loss[loss=0.2234, simple_loss=0.3044, pruned_loss=0.07119, over 8295.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3042, pruned_loss=0.07491, over 1611969.65 frames. ], batch size: 23, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:18,689 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=100748.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:33:24,691 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.543e+02 3.029e+02 3.909e+02 6.778e+02, threshold=6.059e+02, percent-clipped=2.0 +2023-02-06 13:33:30,137 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=100764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:33:30,993 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100765.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:33:52,954 INFO [train.py:901] (3/4) Epoch 13, batch 3800, loss[loss=0.1928, simple_loss=0.271, pruned_loss=0.05733, over 7930.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3048, pruned_loss=0.07499, over 1613769.44 frames. ], batch size: 20, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:33:53,763 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3932, 1.3845, 4.5598, 1.7442, 4.1263, 3.8271, 4.0993, 4.0075], + device='cuda:3'), covar=tensor([0.0522, 0.4133, 0.0422, 0.3303, 0.0888, 0.0775, 0.0480, 0.0536], + device='cuda:3'), in_proj_covar=tensor([0.0518, 0.0595, 0.0619, 0.0557, 0.0633, 0.0546, 0.0537, 0.0597], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:34:00,247 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 13:34:27,590 INFO [train.py:901] (3/4) Epoch 13, batch 3850, loss[loss=0.2164, simple_loss=0.2895, pruned_loss=0.07168, over 7711.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.305, pruned_loss=0.07539, over 1612504.06 frames. ], batch size: 18, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:34:34,506 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.985e+02 2.830e+02 3.312e+02 3.730e+02 7.453e+02, threshold=6.624e+02, percent-clipped=3.0 +2023-02-06 13:34:50,099 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=100879.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:35:01,753 INFO [train.py:901] (3/4) Epoch 13, batch 3900, loss[loss=0.2117, simple_loss=0.2886, pruned_loss=0.06741, over 7816.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3051, pruned_loss=0.07528, over 1611872.97 frames. ], batch size: 20, lr: 5.89e-03, grad_scale: 8.0 +2023-02-06 13:35:01,759 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 13:35:33,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1735, 1.8785, 2.6404, 2.0880, 2.4468, 2.0670, 1.7852, 1.3088], + device='cuda:3'), covar=tensor([0.4175, 0.3976, 0.1330, 0.2818, 0.1987, 0.2467, 0.1681, 0.4246], + device='cuda:3'), in_proj_covar=tensor([0.0891, 0.0887, 0.0734, 0.0862, 0.0941, 0.0813, 0.0705, 0.0776], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:35:37,125 INFO [train.py:901] (3/4) Epoch 13, batch 3950, loss[loss=0.2036, simple_loss=0.2689, pruned_loss=0.06913, over 7793.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.3044, pruned_loss=0.07494, over 1605479.46 frames. ], batch size: 19, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:35:44,004 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.489e+02 3.011e+02 3.855e+02 9.802e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-06 13:35:54,142 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=100971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:10,612 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7894, 2.2705, 4.8835, 2.7370, 4.4809, 4.2453, 4.5772, 4.5191], + device='cuda:3'), covar=tensor([0.0413, 0.3369, 0.0515, 0.2784, 0.0829, 0.0726, 0.0424, 0.0463], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0588, 0.0612, 0.0551, 0.0630, 0.0543, 0.0533, 0.0593], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:36:11,899 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=100996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:36:12,365 INFO [train.py:901] (3/4) Epoch 13, batch 4000, loss[loss=0.2331, simple_loss=0.3103, pruned_loss=0.07793, over 8338.00 frames. ], tot_loss[loss=0.228, simple_loss=0.305, pruned_loss=0.0755, over 1606342.97 frames. ], batch size: 26, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:20,492 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-06 13:36:47,684 INFO [train.py:901] (3/4) Epoch 13, batch 4050, loss[loss=0.2185, simple_loss=0.3019, pruned_loss=0.06758, over 8743.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3054, pruned_loss=0.07582, over 1608819.07 frames. ], batch size: 30, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:36:54,254 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.645e+02 3.184e+02 3.816e+02 9.518e+02, threshold=6.368e+02, percent-clipped=3.0 +2023-02-06 13:37:18,351 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101092.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:37:21,619 INFO [train.py:901] (3/4) Epoch 13, batch 4100, loss[loss=0.2464, simple_loss=0.3148, pruned_loss=0.08899, over 8081.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3056, pruned_loss=0.07568, over 1611857.03 frames. ], batch size: 21, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:37:41,404 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101125.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:47,946 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:37:55,651 INFO [train.py:901] (3/4) Epoch 13, batch 4150, loss[loss=0.2367, simple_loss=0.3249, pruned_loss=0.07423, over 8192.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3056, pruned_loss=0.07573, over 1610008.59 frames. ], batch size: 23, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:02,992 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.783e+02 3.401e+02 4.642e+02 1.010e+03, threshold=6.803e+02, percent-clipped=7.0 +2023-02-06 13:38:05,283 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:38:05,891 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2825, 4.1699, 3.7782, 1.8403, 3.7992, 3.7811, 3.7676, 3.4950], + device='cuda:3'), covar=tensor([0.0670, 0.0485, 0.0895, 0.4393, 0.0713, 0.0828, 0.1173, 0.0840], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0391, 0.0401, 0.0500, 0.0394, 0.0395, 0.0389, 0.0344], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 13:38:06,622 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3080, 1.1640, 1.4252, 1.1442, 0.6540, 1.2030, 1.1315, 1.1511], + device='cuda:3'), covar=tensor([0.0581, 0.1780, 0.2350, 0.1761, 0.0671, 0.2025, 0.0796, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0192, 0.0157, 0.0102, 0.0164, 0.0116, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:38:30,242 INFO [train.py:901] (3/4) Epoch 13, batch 4200, loss[loss=0.22, simple_loss=0.2949, pruned_loss=0.07257, over 8355.00 frames. ], tot_loss[loss=0.228, simple_loss=0.3051, pruned_loss=0.0755, over 1614327.42 frames. ], batch size: 24, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:38:37,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101207.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:38:39,766 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 13:38:54,606 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 13:39:05,859 INFO [train.py:901] (3/4) Epoch 13, batch 4250, loss[loss=0.2054, simple_loss=0.2831, pruned_loss=0.06381, over 7932.00 frames. ], tot_loss[loss=0.2293, simple_loss=0.3057, pruned_loss=0.07648, over 1610592.33 frames. ], batch size: 20, lr: 5.88e-03, grad_scale: 8.0 +2023-02-06 13:39:12,480 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 2.514e+02 3.154e+02 3.992e+02 7.648e+02, threshold=6.307e+02, percent-clipped=3.0 +2023-02-06 13:39:16,501 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 13:39:40,012 INFO [train.py:901] (3/4) Epoch 13, batch 4300, loss[loss=0.2229, simple_loss=0.3046, pruned_loss=0.0706, over 8503.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3054, pruned_loss=0.07592, over 1613145.40 frames. ], batch size: 26, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:39:58,266 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 13:40:01,018 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.62 vs. limit=2.0 +2023-02-06 13:40:14,581 INFO [train.py:901] (3/4) Epoch 13, batch 4350, loss[loss=0.2213, simple_loss=0.2922, pruned_loss=0.07517, over 7807.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3046, pruned_loss=0.07551, over 1610940.74 frames. ], batch size: 20, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:14,813 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1293, 1.9405, 2.4209, 1.8341, 1.6675, 2.4740, 1.1199, 2.0625], + device='cuda:3'), covar=tensor([0.2603, 0.1502, 0.0479, 0.1799, 0.2995, 0.0428, 0.2515, 0.1446], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0176, 0.0107, 0.0219, 0.0256, 0.0111, 0.0164, 0.0170], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 13:40:21,344 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.682e+02 3.184e+02 4.441e+02 9.358e+02, threshold=6.368e+02, percent-clipped=11.0 +2023-02-06 13:40:49,426 INFO [train.py:901] (3/4) Epoch 13, batch 4400, loss[loss=0.2267, simple_loss=0.3053, pruned_loss=0.07407, over 7814.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.306, pruned_loss=0.07615, over 1614401.06 frames. ], batch size: 20, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:40:49,433 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 13:41:23,441 INFO [train.py:901] (3/4) Epoch 13, batch 4450, loss[loss=0.2073, simple_loss=0.302, pruned_loss=0.0563, over 8107.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3053, pruned_loss=0.07583, over 1613655.63 frames. ], batch size: 23, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:41:28,823 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 13:41:30,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 2.700e+02 3.319e+02 4.103e+02 1.285e+03, threshold=6.638e+02, percent-clipped=3.0 +2023-02-06 13:41:34,866 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101463.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:41:38,577 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=101469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:41:52,069 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101488.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:41:57,886 INFO [train.py:901] (3/4) Epoch 13, batch 4500, loss[loss=0.2583, simple_loss=0.3251, pruned_loss=0.0957, over 8445.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3043, pruned_loss=0.07546, over 1610539.74 frames. ], batch size: 27, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:22,192 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 13:42:33,107 INFO [train.py:901] (3/4) Epoch 13, batch 4550, loss[loss=0.2018, simple_loss=0.2866, pruned_loss=0.0585, over 7808.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3028, pruned_loss=0.07495, over 1605907.53 frames. ], batch size: 20, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:42:39,887 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.403e+02 2.986e+02 3.546e+02 6.918e+02, threshold=5.973e+02, percent-clipped=1.0 +2023-02-06 13:42:50,291 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9708, 1.6854, 2.1663, 1.8544, 2.0014, 1.9421, 1.7198, 0.7386], + device='cuda:3'), covar=tensor([0.4874, 0.4022, 0.1529, 0.2607, 0.1927, 0.2481, 0.1863, 0.4157], + device='cuda:3'), in_proj_covar=tensor([0.0894, 0.0893, 0.0747, 0.0869, 0.0947, 0.0819, 0.0710, 0.0779], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:42:58,932 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=101584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:43:08,193 INFO [train.py:901] (3/4) Epoch 13, batch 4600, loss[loss=0.2331, simple_loss=0.311, pruned_loss=0.0776, over 8086.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.303, pruned_loss=0.07484, over 1611063.44 frames. ], batch size: 21, lr: 5.87e-03, grad_scale: 8.0 +2023-02-06 13:43:42,601 INFO [train.py:901] (3/4) Epoch 13, batch 4650, loss[loss=0.194, simple_loss=0.2786, pruned_loss=0.05473, over 8289.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3026, pruned_loss=0.07469, over 1602826.94 frames. ], batch size: 23, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:43:49,456 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.500e+02 2.989e+02 3.844e+02 7.619e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-06 13:43:53,084 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1774, 1.4815, 1.7274, 1.4315, 1.0002, 1.4253, 1.6361, 1.7376], + device='cuda:3'), covar=tensor([0.0445, 0.1264, 0.1594, 0.1328, 0.0573, 0.1489, 0.0681, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:44:17,684 INFO [train.py:901] (3/4) Epoch 13, batch 4700, loss[loss=0.2233, simple_loss=0.3124, pruned_loss=0.06709, over 8322.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3019, pruned_loss=0.07409, over 1605434.04 frames. ], batch size: 25, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:21,754 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:44:52,092 INFO [train.py:901] (3/4) Epoch 13, batch 4750, loss[loss=0.259, simple_loss=0.3229, pruned_loss=0.09756, over 8024.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3031, pruned_loss=0.07444, over 1613947.95 frames. ], batch size: 22, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:44:59,504 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.558e+02 3.081e+02 3.778e+02 8.564e+02, threshold=6.162e+02, percent-clipped=2.0 +2023-02-06 13:45:21,976 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 13:45:24,392 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 13:45:27,097 INFO [train.py:901] (3/4) Epoch 13, batch 4800, loss[loss=0.1806, simple_loss=0.2507, pruned_loss=0.05524, over 7720.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3033, pruned_loss=0.07494, over 1613788.09 frames. ], batch size: 18, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:45:57,597 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=101840.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:01,949 INFO [train.py:901] (3/4) Epoch 13, batch 4850, loss[loss=0.2153, simple_loss=0.2859, pruned_loss=0.07233, over 7544.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.304, pruned_loss=0.07541, over 1613478.21 frames. ], batch size: 18, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:08,645 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.589e+02 3.137e+02 3.918e+02 7.572e+02, threshold=6.274e+02, percent-clipped=4.0 +2023-02-06 13:46:14,079 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 13:46:14,950 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=101865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:46:36,897 INFO [train.py:901] (3/4) Epoch 13, batch 4900, loss[loss=0.2701, simple_loss=0.3404, pruned_loss=0.09988, over 8320.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3048, pruned_loss=0.07536, over 1617843.54 frames. ], batch size: 25, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:46:52,175 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 13:47:06,384 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=101938.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 13:47:11,284 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 13:47:12,287 INFO [train.py:901] (3/4) Epoch 13, batch 4950, loss[loss=0.2503, simple_loss=0.3121, pruned_loss=0.0943, over 8133.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3036, pruned_loss=0.07475, over 1615943.77 frames. ], batch size: 22, lr: 5.86e-03, grad_scale: 8.0 +2023-02-06 13:47:15,216 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3568, 2.4712, 1.7360, 2.0981, 1.9437, 1.3744, 1.7630, 1.8690], + device='cuda:3'), covar=tensor([0.1258, 0.0347, 0.0974, 0.0501, 0.0558, 0.1347, 0.0873, 0.0757], + device='cuda:3'), in_proj_covar=tensor([0.0343, 0.0233, 0.0314, 0.0293, 0.0296, 0.0321, 0.0339, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:47:19,104 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.432e+02 3.023e+02 3.670e+02 7.494e+02, threshold=6.046e+02, percent-clipped=3.0 +2023-02-06 13:47:30,008 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9846, 1.7684, 1.9721, 1.7739, 0.9893, 1.9108, 2.0279, 2.1629], + device='cuda:3'), covar=tensor([0.0424, 0.1135, 0.1513, 0.1276, 0.0583, 0.1337, 0.0623, 0.0527], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0191, 0.0157, 0.0102, 0.0163, 0.0114, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:47:46,752 INFO [train.py:901] (3/4) Epoch 13, batch 5000, loss[loss=0.2388, simple_loss=0.3151, pruned_loss=0.08122, over 8135.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3031, pruned_loss=0.07405, over 1618446.45 frames. ], batch size: 22, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:47:53,376 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:22,479 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102046.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:48:23,077 INFO [train.py:901] (3/4) Epoch 13, batch 5050, loss[loss=0.1836, simple_loss=0.2569, pruned_loss=0.05518, over 7640.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3026, pruned_loss=0.07449, over 1613346.98 frames. ], batch size: 19, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:48:29,968 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.626e+02 3.300e+02 4.185e+02 9.088e+02, threshold=6.599e+02, percent-clipped=3.0 +2023-02-06 13:48:50,354 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 13:48:54,012 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 13:48:56,651 INFO [train.py:901] (3/4) Epoch 13, batch 5100, loss[loss=0.2231, simple_loss=0.3093, pruned_loss=0.06845, over 8503.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.302, pruned_loss=0.07436, over 1611603.82 frames. ], batch size: 26, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:31,561 INFO [train.py:901] (3/4) Epoch 13, batch 5150, loss[loss=0.2204, simple_loss=0.3026, pruned_loss=0.06907, over 8612.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3021, pruned_loss=0.07446, over 1611954.01 frames. ], batch size: 31, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:49:38,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.413e+02 2.853e+02 3.425e+02 7.647e+02, threshold=5.706e+02, percent-clipped=3.0 +2023-02-06 13:49:42,467 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:49:50,399 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 13:50:06,641 INFO [train.py:901] (3/4) Epoch 13, batch 5200, loss[loss=0.2053, simple_loss=0.2975, pruned_loss=0.05661, over 8503.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.302, pruned_loss=0.07476, over 1607808.63 frames. ], batch size: 28, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:41,849 INFO [train.py:901] (3/4) Epoch 13, batch 5250, loss[loss=0.209, simple_loss=0.2899, pruned_loss=0.06406, over 8462.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.303, pruned_loss=0.07476, over 1610898.26 frames. ], batch size: 25, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:50:48,577 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.565e+02 3.047e+02 3.925e+02 1.157e+03, threshold=6.094e+02, percent-clipped=6.0 +2023-02-06 13:50:51,017 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-06 13:50:53,900 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 13:51:06,840 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102282.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:51:16,471 INFO [train.py:901] (3/4) Epoch 13, batch 5300, loss[loss=0.2186, simple_loss=0.3013, pruned_loss=0.0679, over 8498.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3029, pruned_loss=0.07494, over 1609070.48 frames. ], batch size: 28, lr: 5.85e-03, grad_scale: 8.0 +2023-02-06 13:51:51,008 INFO [train.py:901] (3/4) Epoch 13, batch 5350, loss[loss=0.2105, simple_loss=0.2916, pruned_loss=0.06477, over 7927.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3023, pruned_loss=0.07438, over 1613046.94 frames. ], batch size: 20, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:51:52,484 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=102349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:51:52,613 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0792, 1.6458, 1.3778, 1.5342, 1.3170, 1.2369, 1.2616, 1.3869], + device='cuda:3'), covar=tensor([0.1048, 0.0482, 0.1225, 0.0551, 0.0761, 0.1446, 0.0913, 0.0671], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0236, 0.0318, 0.0297, 0.0298, 0.0324, 0.0345, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:51:57,792 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.535e+02 3.049e+02 3.805e+02 7.372e+02, threshold=6.098e+02, percent-clipped=2.0 +2023-02-06 13:52:08,202 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-06 13:52:26,058 INFO [train.py:901] (3/4) Epoch 13, batch 5400, loss[loss=0.2176, simple_loss=0.2987, pruned_loss=0.06827, over 8505.00 frames. ], tot_loss[loss=0.226, simple_loss=0.303, pruned_loss=0.0745, over 1611697.67 frames. ], batch size: 39, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:52:26,255 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102397.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:52:40,303 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:52:56,887 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102442.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:00,140 INFO [train.py:901] (3/4) Epoch 13, batch 5450, loss[loss=0.1985, simple_loss=0.2747, pruned_loss=0.0612, over 7664.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3036, pruned_loss=0.07512, over 1612910.73 frames. ], batch size: 19, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:07,658 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.724e+02 3.222e+02 3.900e+02 7.023e+02, threshold=6.444e+02, percent-clipped=3.0 +2023-02-06 13:53:12,602 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=102464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:53:34,976 INFO [train.py:901] (3/4) Epoch 13, batch 5500, loss[loss=0.2134, simple_loss=0.3011, pruned_loss=0.06284, over 8572.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.303, pruned_loss=0.07434, over 1617046.64 frames. ], batch size: 31, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:53:41,588 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 13:54:03,104 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7895, 1.6289, 2.8478, 1.3005, 2.1314, 3.1236, 3.1766, 2.6567], + device='cuda:3'), covar=tensor([0.1002, 0.1249, 0.0401, 0.2035, 0.0883, 0.0283, 0.0535, 0.0613], + device='cuda:3'), in_proj_covar=tensor([0.0268, 0.0300, 0.0265, 0.0296, 0.0277, 0.0237, 0.0360, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 13:54:09,472 INFO [train.py:901] (3/4) Epoch 13, batch 5550, loss[loss=0.2326, simple_loss=0.3037, pruned_loss=0.0807, over 7787.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3039, pruned_loss=0.07537, over 1617659.32 frames. ], batch size: 19, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:15,944 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.279e+02 3.010e+02 3.933e+02 6.976e+02, threshold=6.019e+02, percent-clipped=1.0 +2023-02-06 13:54:43,194 INFO [train.py:901] (3/4) Epoch 13, batch 5600, loss[loss=0.2004, simple_loss=0.2882, pruned_loss=0.05624, over 8105.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3047, pruned_loss=0.07597, over 1615506.73 frames. ], batch size: 23, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:54:44,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8219, 1.1465, 1.3444, 1.1078, 0.9728, 1.1021, 1.6591, 1.5532], + device='cuda:3'), covar=tensor([0.0609, 0.1861, 0.2455, 0.1893, 0.0738, 0.2194, 0.0825, 0.0733], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0156, 0.0101, 0.0162, 0.0113, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 13:55:04,665 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5794, 1.9012, 1.9876, 1.0410, 2.0470, 1.4711, 0.4060, 1.8645], + device='cuda:3'), covar=tensor([0.0285, 0.0191, 0.0144, 0.0340, 0.0216, 0.0564, 0.0527, 0.0141], + device='cuda:3'), in_proj_covar=tensor([0.0406, 0.0341, 0.0293, 0.0401, 0.0332, 0.0490, 0.0368, 0.0373], + device='cuda:3'), out_proj_covar=tensor([1.1347e-04, 9.3025e-05, 7.9922e-05, 1.1031e-04, 9.1562e-05, 1.4528e-04, + 1.0327e-04, 1.0324e-04], device='cuda:3') +2023-02-06 13:55:18,276 INFO [train.py:901] (3/4) Epoch 13, batch 5650, loss[loss=0.2872, simple_loss=0.3502, pruned_loss=0.1121, over 8100.00 frames. ], tot_loss[loss=0.23, simple_loss=0.3059, pruned_loss=0.07701, over 1615927.48 frames. ], batch size: 23, lr: 5.84e-03, grad_scale: 8.0 +2023-02-06 13:55:22,473 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102653.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:24,851 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.730e+02 3.267e+02 4.266e+02 8.129e+02, threshold=6.534e+02, percent-clipped=5.0 +2023-02-06 13:55:39,202 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102678.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 13:55:43,627 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 13:55:52,498 INFO [train.py:901] (3/4) Epoch 13, batch 5700, loss[loss=0.1974, simple_loss=0.2809, pruned_loss=0.057, over 7657.00 frames. ], tot_loss[loss=0.2285, simple_loss=0.3048, pruned_loss=0.07614, over 1619413.13 frames. ], batch size: 19, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:08,740 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=102720.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:25,991 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=102745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:56:27,756 INFO [train.py:901] (3/4) Epoch 13, batch 5750, loss[loss=0.2045, simple_loss=0.2775, pruned_loss=0.06572, over 7639.00 frames. ], tot_loss[loss=0.2303, simple_loss=0.3063, pruned_loss=0.07715, over 1620874.27 frames. ], batch size: 19, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:56:34,421 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.889e+02 2.514e+02 3.075e+02 4.012e+02 7.214e+02, threshold=6.150e+02, percent-clipped=2.0 +2023-02-06 13:56:45,826 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.39 vs. limit=5.0 +2023-02-06 13:56:47,305 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 13:56:49,636 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6399, 2.2588, 3.2867, 2.7333, 2.9792, 2.3982, 2.1057, 2.0409], + device='cuda:3'), covar=tensor([0.3747, 0.4084, 0.1501, 0.2753, 0.2310, 0.2442, 0.1642, 0.4338], + device='cuda:3'), in_proj_covar=tensor([0.0894, 0.0889, 0.0744, 0.0870, 0.0943, 0.0815, 0.0705, 0.0778], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 13:56:52,945 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7605, 2.0583, 2.2514, 1.2527, 2.3119, 1.6350, 0.6490, 1.9400], + device='cuda:3'), covar=tensor([0.0459, 0.0233, 0.0153, 0.0448, 0.0266, 0.0650, 0.0624, 0.0213], + device='cuda:3'), in_proj_covar=tensor([0.0407, 0.0342, 0.0295, 0.0403, 0.0334, 0.0492, 0.0369, 0.0374], + device='cuda:3'), out_proj_covar=tensor([1.1393e-04, 9.3301e-05, 8.0550e-05, 1.1094e-04, 9.2128e-05, 1.4583e-04, + 1.0362e-04, 1.0363e-04], device='cuda:3') +2023-02-06 13:57:01,391 INFO [train.py:901] (3/4) Epoch 13, batch 5800, loss[loss=0.2846, simple_loss=0.3564, pruned_loss=0.1064, over 8280.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3053, pruned_loss=0.07602, over 1619032.90 frames. ], batch size: 23, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:36,616 INFO [train.py:901] (3/4) Epoch 13, batch 5850, loss[loss=0.2241, simple_loss=0.2985, pruned_loss=0.07489, over 8237.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3044, pruned_loss=0.07522, over 1616303.32 frames. ], batch size: 22, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:57:43,166 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.218e+02 2.874e+02 3.517e+02 7.476e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-06 13:57:52,584 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102869.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:11,324 INFO [train.py:901] (3/4) Epoch 13, batch 5900, loss[loss=0.1878, simple_loss=0.2724, pruned_loss=0.05157, over 7430.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3044, pruned_loss=0.07525, over 1614830.41 frames. ], batch size: 17, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:16,412 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.09 vs. limit=5.0 +2023-02-06 13:58:16,872 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=102905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 13:58:42,656 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.08 vs. limit=5.0 +2023-02-06 13:58:46,267 INFO [train.py:901] (3/4) Epoch 13, batch 5950, loss[loss=0.2513, simple_loss=0.3211, pruned_loss=0.09077, over 7813.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3048, pruned_loss=0.07535, over 1616623.52 frames. ], batch size: 20, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:58:52,894 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.537e+02 3.124e+02 4.010e+02 1.248e+03, threshold=6.247e+02, percent-clipped=9.0 +2023-02-06 13:58:59,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0013, 2.5798, 2.9731, 1.3530, 3.1329, 1.7155, 1.5145, 1.9367], + device='cuda:3'), covar=tensor([0.0756, 0.0327, 0.0243, 0.0637, 0.0439, 0.0780, 0.0799, 0.0508], + device='cuda:3'), in_proj_covar=tensor([0.0410, 0.0346, 0.0296, 0.0406, 0.0334, 0.0496, 0.0372, 0.0376], + device='cuda:3'), out_proj_covar=tensor([1.1459e-04, 9.4281e-05, 8.0591e-05, 1.1148e-04, 9.2291e-05, 1.4726e-04, + 1.0437e-04, 1.0401e-04], device='cuda:3') +2023-02-06 13:59:21,495 INFO [train.py:901] (3/4) Epoch 13, batch 6000, loss[loss=0.201, simple_loss=0.2854, pruned_loss=0.05833, over 8569.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3051, pruned_loss=0.07521, over 1618365.07 frames. ], batch size: 39, lr: 5.83e-03, grad_scale: 16.0 +2023-02-06 13:59:21,495 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 13:59:36,606 INFO [train.py:935] (3/4) Epoch 13, validation: loss=0.1836, simple_loss=0.2836, pruned_loss=0.04176, over 944034.00 frames. +2023-02-06 13:59:36,607 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 14:00:11,030 INFO [train.py:901] (3/4) Epoch 13, batch 6050, loss[loss=0.1824, simple_loss=0.2691, pruned_loss=0.04785, over 8142.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3039, pruned_loss=0.07452, over 1616545.17 frames. ], batch size: 22, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:14,465 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9622, 2.6055, 3.6925, 1.8973, 1.5600, 3.6286, 0.6700, 2.1874], + device='cuda:3'), covar=tensor([0.2033, 0.1578, 0.0341, 0.3040, 0.4093, 0.0385, 0.3335, 0.1883], + device='cuda:3'), in_proj_covar=tensor([0.0172, 0.0173, 0.0106, 0.0218, 0.0254, 0.0110, 0.0164, 0.0170], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 14:00:18,307 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.701e+02 2.480e+02 3.014e+02 3.999e+02 8.436e+02, threshold=6.027e+02, percent-clipped=4.0 +2023-02-06 14:00:21,130 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8318, 5.8201, 5.1099, 2.2076, 5.1363, 5.6698, 5.4283, 5.2389], + device='cuda:3'), covar=tensor([0.0567, 0.0422, 0.0910, 0.5203, 0.0732, 0.0704, 0.1154, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0479, 0.0391, 0.0397, 0.0495, 0.0391, 0.0392, 0.0389, 0.0343], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:00:45,627 INFO [train.py:901] (3/4) Epoch 13, batch 6100, loss[loss=0.2337, simple_loss=0.3124, pruned_loss=0.07752, over 8289.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3038, pruned_loss=0.07423, over 1611879.41 frames. ], batch size: 23, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:00:58,589 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103116.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:14,184 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 14:01:19,569 INFO [train.py:901] (3/4) Epoch 13, batch 6150, loss[loss=0.2231, simple_loss=0.3069, pruned_loss=0.06966, over 8472.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.304, pruned_loss=0.0748, over 1611259.49 frames. ], batch size: 29, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:01:21,559 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103150.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:01:26,195 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.434e+02 3.117e+02 4.172e+02 7.466e+02, threshold=6.235e+02, percent-clipped=2.0 +2023-02-06 14:01:46,190 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7871, 1.5065, 2.7516, 1.3005, 2.0098, 2.9485, 2.9966, 2.4841], + device='cuda:3'), covar=tensor([0.1044, 0.1468, 0.0463, 0.2101, 0.0988, 0.0326, 0.0688, 0.0722], + device='cuda:3'), in_proj_covar=tensor([0.0268, 0.0303, 0.0265, 0.0297, 0.0278, 0.0239, 0.0361, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:01:51,720 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2969, 1.5950, 4.3056, 2.0908, 2.3578, 4.9692, 4.9713, 4.2574], + device='cuda:3'), covar=tensor([0.1103, 0.1621, 0.0297, 0.1738, 0.1153, 0.0177, 0.0404, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0267, 0.0301, 0.0264, 0.0296, 0.0277, 0.0239, 0.0359, 0.0294], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:01:55,079 INFO [train.py:901] (3/4) Epoch 13, batch 6200, loss[loss=0.1981, simple_loss=0.2838, pruned_loss=0.05622, over 7972.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3036, pruned_loss=0.07486, over 1614531.89 frames. ], batch size: 21, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:06,162 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:30,996 INFO [train.py:901] (3/4) Epoch 13, batch 6250, loss[loss=0.2414, simple_loss=0.3128, pruned_loss=0.08499, over 7977.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3021, pruned_loss=0.07396, over 1609100.67 frames. ], batch size: 21, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:02:32,364 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103249.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:02:37,840 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.482e+02 2.950e+02 3.630e+02 6.819e+02, threshold=5.900e+02, percent-clipped=4.0 +2023-02-06 14:03:06,056 INFO [train.py:901] (3/4) Epoch 13, batch 6300, loss[loss=0.2121, simple_loss=0.2828, pruned_loss=0.07069, over 7921.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3023, pruned_loss=0.0744, over 1607620.57 frames. ], batch size: 20, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:15,730 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6991, 1.6925, 2.3100, 1.6318, 1.2381, 2.2767, 0.2585, 1.3065], + device='cuda:3'), covar=tensor([0.2329, 0.1396, 0.0384, 0.1632, 0.3415, 0.0393, 0.2876, 0.1749], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0173, 0.0107, 0.0221, 0.0256, 0.0110, 0.0166, 0.0173], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 14:03:21,245 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 14:03:27,928 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:03:40,545 INFO [train.py:901] (3/4) Epoch 13, batch 6350, loss[loss=0.2249, simple_loss=0.2829, pruned_loss=0.08346, over 7216.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3037, pruned_loss=0.07538, over 1606373.56 frames. ], batch size: 16, lr: 5.82e-03, grad_scale: 16.0 +2023-02-06 14:03:48,225 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.547e+02 3.093e+02 3.716e+02 8.603e+02, threshold=6.185e+02, percent-clipped=3.0 +2023-02-06 14:03:53,004 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:04:06,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103384.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:04:14,878 INFO [train.py:901] (3/4) Epoch 13, batch 6400, loss[loss=0.2029, simple_loss=0.2883, pruned_loss=0.05874, over 8652.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3032, pruned_loss=0.07508, over 1607335.36 frames. ], batch size: 34, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:49,443 INFO [train.py:901] (3/4) Epoch 13, batch 6450, loss[loss=0.2406, simple_loss=0.3028, pruned_loss=0.08922, over 7929.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3024, pruned_loss=0.07473, over 1606250.33 frames. ], batch size: 20, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:04:56,168 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.890e+02 2.528e+02 3.186e+02 3.863e+02 6.544e+02, threshold=6.372e+02, percent-clipped=1.0 +2023-02-06 14:04:58,241 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103460.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:13,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1337, 1.3480, 1.6735, 1.2600, 0.9632, 1.4540, 1.7768, 1.9002], + device='cuda:3'), covar=tensor([0.0465, 0.1339, 0.1722, 0.1460, 0.0625, 0.1541, 0.0693, 0.0562], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0155, 0.0100, 0.0161, 0.0113, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:05:22,247 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:05:24,240 INFO [train.py:901] (3/4) Epoch 13, batch 6500, loss[loss=0.2099, simple_loss=0.2728, pruned_loss=0.07346, over 7654.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3031, pruned_loss=0.07497, over 1610821.17 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:05:58,663 INFO [train.py:901] (3/4) Epoch 13, batch 6550, loss[loss=0.2568, simple_loss=0.3385, pruned_loss=0.08757, over 8519.00 frames. ], tot_loss[loss=0.2283, simple_loss=0.3051, pruned_loss=0.07579, over 1614521.93 frames. ], batch size: 28, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:05,482 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.442e+02 3.089e+02 4.027e+02 9.292e+02, threshold=6.177e+02, percent-clipped=8.0 +2023-02-06 14:06:12,688 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1665, 4.1179, 3.7717, 1.8509, 3.6687, 3.6915, 3.7393, 3.5703], + device='cuda:3'), covar=tensor([0.0743, 0.0643, 0.1134, 0.4844, 0.0909, 0.1060, 0.1341, 0.0882], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0389, 0.0399, 0.0498, 0.0394, 0.0392, 0.0385, 0.0343], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:06:18,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103575.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:24,184 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 14:06:24,394 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103584.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:34,118 INFO [train.py:901] (3/4) Epoch 13, batch 6600, loss[loss=0.2249, simple_loss=0.3045, pruned_loss=0.07267, over 8499.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.3055, pruned_loss=0.07609, over 1617221.88 frames. ], batch size: 28, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:06:36,425 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9306, 1.3090, 1.5124, 1.2818, 0.8941, 1.3296, 1.6834, 1.3148], + device='cuda:3'), covar=tensor([0.0494, 0.1275, 0.1676, 0.1374, 0.0609, 0.1563, 0.0681, 0.0721], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0154, 0.0100, 0.0161, 0.0114, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:06:37,032 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9964, 1.8706, 2.0584, 1.9109, 1.1002, 1.9155, 2.4064, 2.5282], + device='cuda:3'), covar=tensor([0.0434, 0.1071, 0.1510, 0.1146, 0.0582, 0.1308, 0.0569, 0.0495], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0150, 0.0189, 0.0154, 0.0100, 0.0161, 0.0114, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0008, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:06:42,479 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:42,503 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:06:43,680 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 14:06:49,834 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:02,408 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1997, 1.8436, 2.7486, 2.1631, 2.4787, 2.1147, 1.7960, 1.3589], + device='cuda:3'), covar=tensor([0.4381, 0.4014, 0.1287, 0.2951, 0.2116, 0.2436, 0.1796, 0.4211], + device='cuda:3'), in_proj_covar=tensor([0.0896, 0.0887, 0.0742, 0.0866, 0.0945, 0.0822, 0.0706, 0.0774], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:07:07,875 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:07:09,056 INFO [train.py:901] (3/4) Epoch 13, batch 6650, loss[loss=0.2265, simple_loss=0.3235, pruned_loss=0.06472, over 8370.00 frames. ], tot_loss[loss=0.2292, simple_loss=0.3061, pruned_loss=0.0762, over 1615849.45 frames. ], batch size: 24, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:07:16,596 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.459e+02 2.800e+02 3.637e+02 6.016e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-06 14:07:43,988 INFO [train.py:901] (3/4) Epoch 13, batch 6700, loss[loss=0.2391, simple_loss=0.3033, pruned_loss=0.08742, over 7799.00 frames. ], tot_loss[loss=0.2279, simple_loss=0.3046, pruned_loss=0.07562, over 1617243.71 frames. ], batch size: 19, lr: 5.81e-03, grad_scale: 16.0 +2023-02-06 14:08:06,486 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=103728.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:08:18,549 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 14:08:19,450 INFO [train.py:901] (3/4) Epoch 13, batch 6750, loss[loss=0.3149, simple_loss=0.3723, pruned_loss=0.1288, over 6986.00 frames. ], tot_loss[loss=0.2287, simple_loss=0.3054, pruned_loss=0.07603, over 1613600.67 frames. ], batch size: 72, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:24,989 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0240, 2.1888, 1.7721, 2.7399, 1.2875, 1.6759, 1.8839, 2.2350], + device='cuda:3'), covar=tensor([0.0714, 0.0809, 0.1036, 0.0359, 0.1168, 0.1367, 0.0958, 0.0773], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0205, 0.0250, 0.0208, 0.0213, 0.0249, 0.0252, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:08:26,133 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.551e+02 3.234e+02 3.983e+02 1.044e+03, threshold=6.469e+02, percent-clipped=6.0 +2023-02-06 14:08:26,993 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:44,326 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:08:53,558 INFO [train.py:901] (3/4) Epoch 13, batch 6800, loss[loss=0.239, simple_loss=0.3295, pruned_loss=0.07427, over 8462.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3041, pruned_loss=0.07542, over 1610263.69 frames. ], batch size: 27, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:08:58,344 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 14:09:17,167 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103831.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:19,856 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7840, 1.8954, 1.6255, 2.2802, 1.0653, 1.5168, 1.6154, 1.9413], + device='cuda:3'), covar=tensor([0.0683, 0.0769, 0.0965, 0.0418, 0.1175, 0.1272, 0.0899, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0205, 0.0251, 0.0208, 0.0213, 0.0249, 0.0253, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:09:25,231 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=103843.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:09:27,549 INFO [train.py:901] (3/4) Epoch 13, batch 6850, loss[loss=0.2351, simple_loss=0.2998, pruned_loss=0.08518, over 7306.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3043, pruned_loss=0.0756, over 1609375.63 frames. ], batch size: 16, lr: 5.80e-03, grad_scale: 16.0 +2023-02-06 14:09:33,735 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103856.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:34,154 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.670e+02 3.153e+02 3.957e+02 9.275e+02, threshold=6.306e+02, percent-clipped=2.0 +2023-02-06 14:09:40,250 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=103865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:09:44,804 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 14:09:46,354 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5149, 2.2537, 3.3628, 2.4197, 2.9927, 2.4981, 2.1913, 1.8290], + device='cuda:3'), covar=tensor([0.4572, 0.4857, 0.1515, 0.3349, 0.2489, 0.2589, 0.1830, 0.5036], + device='cuda:3'), in_proj_covar=tensor([0.0899, 0.0887, 0.0742, 0.0866, 0.0948, 0.0822, 0.0705, 0.0777], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:09:57,532 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=103890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:00,226 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=103894.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:10:02,112 INFO [train.py:901] (3/4) Epoch 13, batch 6900, loss[loss=0.2107, simple_loss=0.2838, pruned_loss=0.06885, over 8141.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3044, pruned_loss=0.07513, over 1615526.52 frames. ], batch size: 22, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:35,839 INFO [train.py:901] (3/4) Epoch 13, batch 6950, loss[loss=0.2845, simple_loss=0.3425, pruned_loss=0.1133, over 7207.00 frames. ], tot_loss[loss=0.227, simple_loss=0.3045, pruned_loss=0.07473, over 1619128.58 frames. ], batch size: 72, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:10:43,033 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.539e+02 3.074e+02 3.917e+02 9.810e+02, threshold=6.147e+02, percent-clipped=9.0 +2023-02-06 14:10:53,174 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 14:11:00,841 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4803, 1.8819, 2.7402, 1.3211, 1.9788, 1.8422, 1.6186, 1.8311], + device='cuda:3'), covar=tensor([0.1706, 0.2038, 0.0712, 0.3750, 0.1614, 0.2734, 0.1855, 0.2096], + device='cuda:3'), in_proj_covar=tensor([0.0496, 0.0537, 0.0531, 0.0588, 0.0620, 0.0559, 0.0483, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:11:09,734 INFO [train.py:901] (3/4) Epoch 13, batch 7000, loss[loss=0.23, simple_loss=0.311, pruned_loss=0.07446, over 8429.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3027, pruned_loss=0.07354, over 1616851.66 frames. ], batch size: 27, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:18,202 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:19,230 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 14:11:23,547 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:11:35,611 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7800, 1.9041, 1.5897, 2.3279, 1.0481, 1.3614, 1.6014, 1.9076], + device='cuda:3'), covar=tensor([0.0698, 0.0731, 0.0979, 0.0418, 0.1121, 0.1412, 0.0895, 0.0728], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0208, 0.0252, 0.0210, 0.0215, 0.0252, 0.0256, 0.0215], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:11:43,224 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.45 vs. limit=5.0 +2023-02-06 14:11:44,810 INFO [train.py:901] (3/4) Epoch 13, batch 7050, loss[loss=0.2237, simple_loss=0.293, pruned_loss=0.0772, over 7547.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3026, pruned_loss=0.07326, over 1614163.76 frames. ], batch size: 18, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:11:52,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.385e+02 2.879e+02 3.637e+02 6.044e+02, threshold=5.759e+02, percent-clipped=0.0 +2023-02-06 14:11:58,854 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=104067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:01,822 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 14:12:18,820 INFO [train.py:901] (3/4) Epoch 13, batch 7100, loss[loss=0.1841, simple_loss=0.2632, pruned_loss=0.05247, over 7436.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.303, pruned_loss=0.07366, over 1612508.69 frames. ], batch size: 17, lr: 5.80e-03, grad_scale: 8.0 +2023-02-06 14:12:21,102 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104099.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:12:22,946 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104102.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:37,519 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104124.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:12:39,433 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104127.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:12:46,609 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 14:12:47,820 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-06 14:12:52,697 INFO [train.py:901] (3/4) Epoch 13, batch 7150, loss[loss=0.1754, simple_loss=0.2468, pruned_loss=0.05196, over 7524.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.305, pruned_loss=0.07498, over 1612450.15 frames. ], batch size: 18, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:00,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.541e+02 2.991e+02 4.071e+02 7.912e+02, threshold=5.982e+02, percent-clipped=4.0 +2023-02-06 14:13:27,755 INFO [train.py:901] (3/4) Epoch 13, batch 7200, loss[loss=0.1597, simple_loss=0.2408, pruned_loss=0.03925, over 7539.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3039, pruned_loss=0.07401, over 1613099.40 frames. ], batch size: 18, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:13:42,140 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104217.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:55,691 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:13:58,506 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:14:01,688 INFO [train.py:901] (3/4) Epoch 13, batch 7250, loss[loss=0.2294, simple_loss=0.3075, pruned_loss=0.07566, over 7928.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3037, pruned_loss=0.07448, over 1611642.13 frames. ], batch size: 20, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:14:09,625 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.469e+02 3.063e+02 3.939e+02 8.277e+02, threshold=6.126e+02, percent-clipped=7.0 +2023-02-06 14:14:32,858 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0496, 2.3369, 1.8356, 2.8817, 1.5123, 1.4846, 1.9000, 2.2587], + device='cuda:3'), covar=tensor([0.0695, 0.0797, 0.0974, 0.0370, 0.1173, 0.1502, 0.0975, 0.0791], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0207, 0.0251, 0.0209, 0.0213, 0.0251, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:14:37,320 INFO [train.py:901] (3/4) Epoch 13, batch 7300, loss[loss=0.2071, simple_loss=0.2748, pruned_loss=0.06974, over 7713.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3033, pruned_loss=0.07427, over 1611629.58 frames. ], batch size: 18, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:11,544 INFO [train.py:901] (3/4) Epoch 13, batch 7350, loss[loss=0.1929, simple_loss=0.2715, pruned_loss=0.05712, over 7431.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3028, pruned_loss=0.07396, over 1612466.07 frames. ], batch size: 17, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:14,977 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:15,799 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:19,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.439e+02 3.043e+02 3.823e+02 6.373e+02, threshold=6.086e+02, percent-clipped=2.0 +2023-02-06 14:15:19,894 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:15:33,260 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 14:15:46,158 INFO [train.py:901] (3/4) Epoch 13, batch 7400, loss[loss=0.2319, simple_loss=0.3047, pruned_loss=0.07957, over 8099.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3029, pruned_loss=0.07357, over 1613699.89 frames. ], batch size: 23, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:15:53,249 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 14:15:56,728 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=104411.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:21,099 INFO [train.py:901] (3/4) Epoch 13, batch 7450, loss[loss=0.2449, simple_loss=0.3215, pruned_loss=0.08414, over 8345.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3038, pruned_loss=0.07408, over 1616158.83 frames. ], batch size: 26, lr: 5.79e-03, grad_scale: 8.0 +2023-02-06 14:16:29,258 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.488e+02 2.494e+02 3.000e+02 3.814e+02 1.100e+03, threshold=5.999e+02, percent-clipped=4.0 +2023-02-06 14:16:33,242 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 14:16:35,471 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:39,584 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:40,181 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:55,894 INFO [train.py:901] (3/4) Epoch 13, batch 7500, loss[loss=0.2974, simple_loss=0.3452, pruned_loss=0.1248, over 6752.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.305, pruned_loss=0.07512, over 1617465.80 frames. ], batch size: 71, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:16:56,789 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:16:56,807 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104498.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:14,584 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:15,883 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2688, 3.1481, 2.9087, 1.5703, 2.8594, 2.8558, 2.9305, 2.7393], + device='cuda:3'), covar=tensor([0.1046, 0.0866, 0.1357, 0.4578, 0.1121, 0.1341, 0.1565, 0.1098], + device='cuda:3'), in_proj_covar=tensor([0.0470, 0.0385, 0.0394, 0.0487, 0.0386, 0.0389, 0.0378, 0.0338], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:17:16,626 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=104526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:17:30,659 INFO [train.py:901] (3/4) Epoch 13, batch 7550, loss[loss=0.2104, simple_loss=0.2751, pruned_loss=0.07284, over 7545.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3044, pruned_loss=0.07523, over 1613562.03 frames. ], batch size: 18, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:17:37,880 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.482e+02 3.042e+02 4.105e+02 9.709e+02, threshold=6.085e+02, percent-clipped=7.0 +2023-02-06 14:18:03,967 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6276, 1.5185, 2.8760, 1.1037, 2.1624, 3.1724, 3.4033, 2.2901], + device='cuda:3'), covar=tensor([0.1565, 0.1745, 0.0505, 0.2756, 0.1003, 0.0417, 0.0575, 0.1264], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0303, 0.0265, 0.0295, 0.0278, 0.0239, 0.0359, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:18:05,105 INFO [train.py:901] (3/4) Epoch 13, batch 7600, loss[loss=0.2201, simple_loss=0.2954, pruned_loss=0.07237, over 7647.00 frames. ], tot_loss[loss=0.2278, simple_loss=0.3048, pruned_loss=0.07543, over 1614857.04 frames. ], batch size: 19, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:13,227 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:19,840 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0771, 1.2808, 4.2848, 1.5278, 3.7086, 3.5071, 3.8343, 3.7173], + device='cuda:3'), covar=tensor([0.0606, 0.4834, 0.0519, 0.3959, 0.1201, 0.1109, 0.0610, 0.0694], + device='cuda:3'), in_proj_covar=tensor([0.0522, 0.0585, 0.0609, 0.0555, 0.0632, 0.0542, 0.0528, 0.0591], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:18:30,785 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:18:34,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1644, 2.1642, 1.5354, 1.8157, 1.7760, 1.3803, 1.5263, 1.6746], + device='cuda:3'), covar=tensor([0.1389, 0.0339, 0.1178, 0.0579, 0.0745, 0.1389, 0.0961, 0.0850], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0238, 0.0323, 0.0299, 0.0302, 0.0325, 0.0344, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:18:40,154 INFO [train.py:901] (3/4) Epoch 13, batch 7650, loss[loss=0.2528, simple_loss=0.3386, pruned_loss=0.08348, over 8261.00 frames. ], tot_loss[loss=0.2282, simple_loss=0.3055, pruned_loss=0.07542, over 1619919.85 frames. ], batch size: 24, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:18:47,609 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.372e+02 2.638e+02 3.280e+02 4.340e+02 1.130e+03, threshold=6.560e+02, percent-clipped=9.0 +2023-02-06 14:19:14,843 INFO [train.py:901] (3/4) Epoch 13, batch 7700, loss[loss=0.1951, simple_loss=0.2709, pruned_loss=0.05964, over 7778.00 frames. ], tot_loss[loss=0.2288, simple_loss=0.306, pruned_loss=0.07576, over 1615491.87 frames. ], batch size: 19, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:33,193 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104723.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:37,764 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 14:19:37,984 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:49,832 INFO [train.py:901] (3/4) Epoch 13, batch 7750, loss[loss=0.3057, simple_loss=0.3737, pruned_loss=0.1189, over 8185.00 frames. ], tot_loss[loss=0.2291, simple_loss=0.3063, pruned_loss=0.0759, over 1621867.44 frames. ], batch size: 23, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:19:50,602 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104748.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:55,980 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:19:57,811 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.403e+02 2.530e+02 2.944e+02 3.392e+02 9.198e+02, threshold=5.888e+02, percent-clipped=3.0 +2023-02-06 14:20:09,967 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5093, 2.6021, 1.7688, 2.1893, 2.2515, 1.5936, 2.0421, 2.1101], + device='cuda:3'), covar=tensor([0.1535, 0.0395, 0.1174, 0.0682, 0.0673, 0.1490, 0.1006, 0.0942], + device='cuda:3'), in_proj_covar=tensor([0.0345, 0.0233, 0.0316, 0.0292, 0.0296, 0.0319, 0.0335, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:20:12,809 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 14:20:14,006 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=104782.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:24,830 INFO [train.py:901] (3/4) Epoch 13, batch 7800, loss[loss=0.1805, simple_loss=0.2632, pruned_loss=0.04894, over 7654.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.3046, pruned_loss=0.07482, over 1619880.65 frames. ], batch size: 19, lr: 5.78e-03, grad_scale: 8.0 +2023-02-06 14:20:31,894 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=104807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:20:57,827 INFO [train.py:901] (3/4) Epoch 13, batch 7850, loss[loss=0.2354, simple_loss=0.3231, pruned_loss=0.07382, over 8239.00 frames. ], tot_loss[loss=0.2284, simple_loss=0.3055, pruned_loss=0.07561, over 1617147.08 frames. ], batch size: 24, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:05,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.619e+02 3.074e+02 4.074e+02 1.012e+03, threshold=6.148e+02, percent-clipped=5.0 +2023-02-06 14:21:30,905 INFO [train.py:901] (3/4) Epoch 13, batch 7900, loss[loss=0.1728, simple_loss=0.2524, pruned_loss=0.04666, over 7563.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3049, pruned_loss=0.07562, over 1611263.32 frames. ], batch size: 18, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:21:36,645 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-06 14:22:04,229 INFO [train.py:901] (3/4) Epoch 13, batch 7950, loss[loss=0.2152, simple_loss=0.3011, pruned_loss=0.06462, over 8501.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3048, pruned_loss=0.07569, over 1612172.30 frames. ], batch size: 26, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:22:11,312 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.521e+02 3.027e+02 3.866e+02 6.555e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 14:22:37,770 INFO [train.py:901] (3/4) Epoch 13, batch 8000, loss[loss=0.1953, simple_loss=0.2765, pruned_loss=0.05709, over 8196.00 frames. ], tot_loss[loss=0.2276, simple_loss=0.3043, pruned_loss=0.07548, over 1612784.90 frames. ], batch size: 23, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:03,700 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 14:23:10,573 INFO [train.py:901] (3/4) Epoch 13, batch 8050, loss[loss=0.2229, simple_loss=0.2908, pruned_loss=0.07754, over 7921.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3033, pruned_loss=0.07518, over 1607230.97 frames. ], batch size: 20, lr: 5.77e-03, grad_scale: 8.0 +2023-02-06 14:23:18,080 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.417e+02 2.946e+02 3.621e+02 6.025e+02, threshold=5.892e+02, percent-clipped=0.0 +2023-02-06 14:23:50,191 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 14:23:54,152 INFO [train.py:901] (3/4) Epoch 14, batch 0, loss[loss=0.2044, simple_loss=0.2758, pruned_loss=0.06653, over 6362.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2758, pruned_loss=0.06653, over 6362.00 frames. ], batch size: 14, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:23:54,152 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 14:24:01,350 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4844, 1.7656, 2.6542, 1.2601, 2.0029, 1.7611, 1.5451, 1.9716], + device='cuda:3'), covar=tensor([0.1835, 0.2770, 0.0878, 0.4598, 0.1903, 0.3282, 0.2268, 0.2249], + device='cuda:3'), in_proj_covar=tensor([0.0499, 0.0545, 0.0539, 0.0600, 0.0623, 0.0566, 0.0493, 0.0619], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:24:05,196 INFO [train.py:935] (3/4) Epoch 14, validation: loss=0.184, simple_loss=0.2839, pruned_loss=0.04201, over 944034.00 frames. +2023-02-06 14:24:05,196 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 14:24:16,397 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.78 vs. limit=2.0 +2023-02-06 14:24:21,240 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 14:24:22,036 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9531, 1.9720, 6.0110, 2.1719, 5.4394, 5.0932, 5.6257, 5.5358], + device='cuda:3'), covar=tensor([0.0446, 0.3965, 0.0315, 0.3144, 0.0857, 0.0745, 0.0427, 0.0432], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0580, 0.0608, 0.0551, 0.0632, 0.0536, 0.0523, 0.0589], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:24:38,591 INFO [train.py:901] (3/4) Epoch 14, batch 50, loss[loss=0.2136, simple_loss=0.2973, pruned_loss=0.06495, over 8132.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3038, pruned_loss=0.07469, over 362428.86 frames. ], batch size: 22, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:24:54,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5074, 1.4227, 2.8409, 1.2459, 1.9743, 3.0760, 3.1405, 2.5635], + device='cuda:3'), covar=tensor([0.1145, 0.1479, 0.0363, 0.2033, 0.0916, 0.0277, 0.0542, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0272, 0.0304, 0.0265, 0.0295, 0.0281, 0.0241, 0.0362, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:24:54,762 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 14:24:58,151 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.684e+02 3.092e+02 3.835e+02 7.852e+02, threshold=6.183e+02, percent-clipped=3.0 +2023-02-06 14:25:02,467 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4093, 1.9497, 3.4299, 1.2591, 2.3934, 1.8706, 1.5633, 2.3986], + device='cuda:3'), covar=tensor([0.1879, 0.2389, 0.0704, 0.4016, 0.1793, 0.3059, 0.1953, 0.2245], + device='cuda:3'), in_proj_covar=tensor([0.0498, 0.0541, 0.0535, 0.0597, 0.0623, 0.0565, 0.0490, 0.0615], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:25:14,422 INFO [train.py:901] (3/4) Epoch 14, batch 100, loss[loss=0.1892, simple_loss=0.2811, pruned_loss=0.04869, over 8245.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3024, pruned_loss=0.07311, over 639188.60 frames. ], batch size: 24, lr: 5.56e-03, grad_scale: 8.0 +2023-02-06 14:25:17,795 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 14:25:48,643 INFO [train.py:901] (3/4) Epoch 14, batch 150, loss[loss=0.2633, simple_loss=0.3435, pruned_loss=0.09158, over 8439.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3038, pruned_loss=0.07482, over 854952.57 frames. ], batch size: 49, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:08,300 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.384e+02 2.990e+02 3.742e+02 5.781e+02, threshold=5.980e+02, percent-clipped=0.0 +2023-02-06 14:26:14,015 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 14:26:22,055 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.69 vs. limit=2.0 +2023-02-06 14:26:23,065 INFO [train.py:901] (3/4) Epoch 14, batch 200, loss[loss=0.1992, simple_loss=0.2745, pruned_loss=0.06188, over 7505.00 frames. ], tot_loss[loss=0.2294, simple_loss=0.3056, pruned_loss=0.07658, over 1022460.62 frames. ], batch size: 18, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:26:44,058 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 14:26:47,394 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2637, 1.2555, 1.5220, 1.2052, 0.6965, 1.3536, 1.2151, 1.1551], + device='cuda:3'), covar=tensor([0.0572, 0.1227, 0.1650, 0.1400, 0.0582, 0.1435, 0.0645, 0.0646], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0150, 0.0188, 0.0155, 0.0100, 0.0160, 0.0112, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:26:58,940 INFO [train.py:901] (3/4) Epoch 14, batch 250, loss[loss=0.2046, simple_loss=0.2933, pruned_loss=0.05798, over 8097.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.305, pruned_loss=0.07613, over 1154638.52 frames. ], batch size: 23, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:07,605 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 14:27:15,946 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 14:27:18,050 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.546e+02 3.157e+02 4.204e+02 9.163e+02, threshold=6.313e+02, percent-clipped=6.0 +2023-02-06 14:27:31,869 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3632, 1.5628, 1.6668, 0.9219, 1.7273, 1.3087, 0.2403, 1.5681], + device='cuda:3'), covar=tensor([0.0357, 0.0272, 0.0187, 0.0379, 0.0246, 0.0695, 0.0629, 0.0204], + device='cuda:3'), in_proj_covar=tensor([0.0406, 0.0344, 0.0296, 0.0400, 0.0330, 0.0488, 0.0365, 0.0372], + device='cuda:3'), out_proj_covar=tensor([1.1341e-04, 9.3573e-05, 8.0427e-05, 1.0972e-04, 9.0763e-05, 1.4409e-04, + 1.0227e-04, 1.0270e-04], device='cuda:3') +2023-02-06 14:27:33,663 INFO [train.py:901] (3/4) Epoch 14, batch 300, loss[loss=0.2387, simple_loss=0.3248, pruned_loss=0.07635, over 8112.00 frames. ], tot_loss[loss=0.2286, simple_loss=0.3054, pruned_loss=0.07589, over 1257718.26 frames. ], batch size: 23, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:27:52,816 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:27:58,028 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1906, 1.1977, 3.3236, 0.9394, 2.9199, 2.7720, 3.0000, 2.9347], + device='cuda:3'), covar=tensor([0.0763, 0.4188, 0.0898, 0.3959, 0.1458, 0.1239, 0.0785, 0.0864], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0586, 0.0618, 0.0557, 0.0635, 0.0539, 0.0530, 0.0597], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:28:04,217 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8089, 1.3026, 3.9411, 1.2733, 3.4617, 3.2885, 3.5078, 3.4515], + device='cuda:3'), covar=tensor([0.0551, 0.4403, 0.0620, 0.3909, 0.1185, 0.1010, 0.0637, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0586, 0.0617, 0.0557, 0.0634, 0.0538, 0.0529, 0.0596], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:28:09,627 INFO [train.py:901] (3/4) Epoch 14, batch 350, loss[loss=0.2041, simple_loss=0.2766, pruned_loss=0.06579, over 8077.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3045, pruned_loss=0.07504, over 1338813.07 frames. ], batch size: 21, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:28:28,604 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.329e+02 2.437e+02 2.818e+02 3.446e+02 5.751e+02, threshold=5.636e+02, percent-clipped=0.0 +2023-02-06 14:28:43,599 INFO [train.py:901] (3/4) Epoch 14, batch 400, loss[loss=0.2792, simple_loss=0.3478, pruned_loss=0.1053, over 8495.00 frames. ], tot_loss[loss=0.2281, simple_loss=0.3055, pruned_loss=0.07534, over 1402855.13 frames. ], batch size: 26, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:00,993 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:13,246 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105520.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:29:20,741 INFO [train.py:901] (3/4) Epoch 14, batch 450, loss[loss=0.2617, simple_loss=0.3354, pruned_loss=0.09393, over 8738.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3043, pruned_loss=0.07386, over 1452541.12 frames. ], batch size: 30, lr: 5.55e-03, grad_scale: 8.0 +2023-02-06 14:29:36,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6555, 1.3112, 1.5372, 1.2647, 0.8657, 1.3252, 1.4651, 1.2558], + device='cuda:3'), covar=tensor([0.0528, 0.1288, 0.1782, 0.1431, 0.0619, 0.1540, 0.0723, 0.0689], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0101, 0.0161, 0.0114, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:29:40,039 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.497e+02 2.804e+02 3.770e+02 6.336e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 14:29:55,206 INFO [train.py:901] (3/4) Epoch 14, batch 500, loss[loss=0.2225, simple_loss=0.3013, pruned_loss=0.07184, over 8578.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3034, pruned_loss=0.07377, over 1485579.46 frames. ], batch size: 39, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:29,396 INFO [train.py:901] (3/4) Epoch 14, batch 550, loss[loss=0.1938, simple_loss=0.2857, pruned_loss=0.05099, over 8519.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3048, pruned_loss=0.07441, over 1519036.30 frames. ], batch size: 39, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:30:50,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.442e+02 2.933e+02 3.700e+02 8.163e+02, threshold=5.867e+02, percent-clipped=3.0 +2023-02-06 14:31:05,192 INFO [train.py:901] (3/4) Epoch 14, batch 600, loss[loss=0.2358, simple_loss=0.3162, pruned_loss=0.07765, over 7118.00 frames. ], tot_loss[loss=0.2272, simple_loss=0.305, pruned_loss=0.07466, over 1538016.19 frames. ], batch size: 71, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:18,452 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 14:31:39,816 INFO [train.py:901] (3/4) Epoch 14, batch 650, loss[loss=0.1663, simple_loss=0.2495, pruned_loss=0.04155, over 7424.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3042, pruned_loss=0.074, over 1555848.64 frames. ], batch size: 17, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:31:54,391 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:01,339 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.402e+02 3.000e+02 3.711e+02 7.109e+02, threshold=6.000e+02, percent-clipped=4.0 +2023-02-06 14:32:17,053 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.04 vs. limit=5.0 +2023-02-06 14:32:17,354 INFO [train.py:901] (3/4) Epoch 14, batch 700, loss[loss=0.2617, simple_loss=0.3267, pruned_loss=0.09833, over 8807.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07317, over 1570987.51 frames. ], batch size: 40, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:37,894 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:42,734 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=105817.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:32:51,388 INFO [train.py:901] (3/4) Epoch 14, batch 750, loss[loss=0.2316, simple_loss=0.3137, pruned_loss=0.07474, over 8036.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07322, over 1583808.93 frames. ], batch size: 22, lr: 5.54e-03, grad_scale: 8.0 +2023-02-06 14:32:51,599 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4914, 2.9059, 2.4308, 4.0247, 1.6558, 2.1925, 2.4732, 3.2536], + device='cuda:3'), covar=tensor([0.0785, 0.0874, 0.0933, 0.0270, 0.1248, 0.1385, 0.1104, 0.0725], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0204, 0.0252, 0.0210, 0.0212, 0.0253, 0.0258, 0.0216], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:33:03,871 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:06,443 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 14:33:11,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.459e+02 2.898e+02 3.725e+02 7.154e+02, threshold=5.796e+02, percent-clipped=4.0 +2023-02-06 14:33:15,493 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=105864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:16,057 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 14:33:16,241 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105865.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:33:27,180 INFO [train.py:901] (3/4) Epoch 14, batch 800, loss[loss=0.1943, simple_loss=0.2736, pruned_loss=0.05749, over 7527.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3027, pruned_loss=0.07315, over 1585298.99 frames. ], batch size: 18, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:33:40,346 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2518, 3.1608, 2.9432, 1.6536, 2.8665, 2.8783, 2.9169, 2.7276], + device='cuda:3'), covar=tensor([0.1117, 0.0838, 0.1376, 0.4587, 0.1159, 0.1294, 0.1578, 0.1193], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0391, 0.0405, 0.0504, 0.0396, 0.0398, 0.0384, 0.0346], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:33:48,448 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.46 vs. limit=5.0 +2023-02-06 14:33:50,560 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 14:33:55,100 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 14:34:02,175 INFO [train.py:901] (3/4) Epoch 14, batch 850, loss[loss=0.2161, simple_loss=0.2899, pruned_loss=0.07118, over 7986.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3036, pruned_loss=0.0735, over 1598706.51 frames. ], batch size: 21, lr: 5.54e-03, grad_scale: 16.0 +2023-02-06 14:34:20,961 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.803e+02 2.477e+02 2.961e+02 4.061e+02 6.411e+02, threshold=5.921e+02, percent-clipped=4.0 +2023-02-06 14:34:24,608 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105963.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:36,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=105979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:34:37,083 INFO [train.py:901] (3/4) Epoch 14, batch 900, loss[loss=0.2055, simple_loss=0.2894, pruned_loss=0.06075, over 8479.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3038, pruned_loss=0.07363, over 1602307.45 frames. ], batch size: 25, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:03,388 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7760, 1.7323, 2.2112, 1.7119, 1.4160, 2.2612, 0.6290, 1.5945], + device='cuda:3'), covar=tensor([0.2566, 0.1247, 0.0467, 0.1426, 0.3073, 0.0425, 0.2960, 0.1678], + device='cuda:3'), in_proj_covar=tensor([0.0174, 0.0177, 0.0108, 0.0220, 0.0256, 0.0111, 0.0164, 0.0174], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 14:35:14,895 INFO [train.py:901] (3/4) Epoch 14, batch 950, loss[loss=0.2093, simple_loss=0.2938, pruned_loss=0.06241, over 8361.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3037, pruned_loss=0.07375, over 1606652.06 frames. ], batch size: 24, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:35:33,973 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.617e+02 3.202e+02 4.119e+02 6.844e+02, threshold=6.403e+02, percent-clipped=3.0 +2023-02-06 14:35:38,937 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 14:35:49,299 INFO [train.py:901] (3/4) Epoch 14, batch 1000, loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.05666, over 8618.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.304, pruned_loss=0.07379, over 1612763.74 frames. ], batch size: 34, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:00,606 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:14,311 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 14:36:20,078 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:26,949 INFO [train.py:901] (3/4) Epoch 14, batch 1050, loss[loss=0.208, simple_loss=0.2824, pruned_loss=0.06674, over 7807.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.303, pruned_loss=0.0728, over 1607502.69 frames. ], batch size: 20, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:36:26,961 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 14:36:37,977 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:43,499 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:36:46,250 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.417e+02 2.951e+02 3.593e+02 9.096e+02, threshold=5.903e+02, percent-clipped=2.0 +2023-02-06 14:36:48,432 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:01,617 INFO [train.py:901] (3/4) Epoch 14, batch 1100, loss[loss=0.2059, simple_loss=0.2837, pruned_loss=0.06405, over 8473.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3027, pruned_loss=0.07285, over 1606087.23 frames. ], batch size: 25, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:29,710 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:35,892 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 14:37:38,717 INFO [train.py:901] (3/4) Epoch 14, batch 1150, loss[loss=0.204, simple_loss=0.2839, pruned_loss=0.06209, over 8372.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3029, pruned_loss=0.07315, over 1614533.42 frames. ], batch size: 24, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:37:42,396 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106235.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:46,539 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 14:37:49,101 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106244.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:37:58,390 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.475e+02 3.133e+02 3.919e+02 6.906e+02, threshold=6.266e+02, percent-clipped=3.0 +2023-02-06 14:37:59,994 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106260.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:06,207 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106269.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:10,866 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106276.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:13,355 INFO [train.py:901] (3/4) Epoch 14, batch 1200, loss[loss=0.2157, simple_loss=0.297, pruned_loss=0.06723, over 8256.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3035, pruned_loss=0.07388, over 1615856.25 frames. ], batch size: 24, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:38:17,561 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:38:42,813 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9232, 1.5422, 3.4700, 1.3393, 2.3965, 3.8419, 3.7639, 3.2493], + device='cuda:3'), covar=tensor([0.1030, 0.1543, 0.0318, 0.2034, 0.0934, 0.0214, 0.0597, 0.0639], + device='cuda:3'), in_proj_covar=tensor([0.0275, 0.0307, 0.0267, 0.0296, 0.0284, 0.0245, 0.0365, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:38:47,946 INFO [train.py:901] (3/4) Epoch 14, batch 1250, loss[loss=0.2282, simple_loss=0.3121, pruned_loss=0.07215, over 8495.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.07345, over 1615104.39 frames. ], batch size: 26, lr: 5.53e-03, grad_scale: 16.0 +2023-02-06 14:39:05,980 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106354.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:39:08,474 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.539e+02 3.303e+02 4.386e+02 1.450e+03, threshold=6.607e+02, percent-clipped=4.0 +2023-02-06 14:39:24,634 INFO [train.py:901] (3/4) Epoch 14, batch 1300, loss[loss=0.2168, simple_loss=0.3046, pruned_loss=0.06446, over 8101.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3023, pruned_loss=0.07283, over 1618841.12 frames. ], batch size: 23, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:39:55,747 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4600, 1.6950, 1.6817, 1.1576, 1.7529, 1.3771, 0.3390, 1.5647], + device='cuda:3'), covar=tensor([0.0331, 0.0228, 0.0188, 0.0299, 0.0252, 0.0565, 0.0575, 0.0169], + device='cuda:3'), in_proj_covar=tensor([0.0410, 0.0346, 0.0302, 0.0404, 0.0337, 0.0491, 0.0366, 0.0376], + device='cuda:3'), out_proj_covar=tensor([1.1432e-04, 9.3765e-05, 8.2173e-05, 1.1042e-04, 9.2543e-05, 1.4467e-04, + 1.0230e-04, 1.0355e-04], device='cuda:3') +2023-02-06 14:39:58,993 INFO [train.py:901] (3/4) Epoch 14, batch 1350, loss[loss=0.2185, simple_loss=0.2878, pruned_loss=0.07459, over 7547.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3025, pruned_loss=0.07266, over 1618455.37 frames. ], batch size: 18, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:40:05,441 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:19,201 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.550e+02 3.060e+02 3.665e+02 8.767e+02, threshold=6.121e+02, percent-clipped=1.0 +2023-02-06 14:40:29,790 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=106472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:40:35,138 INFO [train.py:901] (3/4) Epoch 14, batch 1400, loss[loss=0.2346, simple_loss=0.3128, pruned_loss=0.07819, over 8071.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3027, pruned_loss=0.07286, over 1617245.75 frames. ], batch size: 21, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:05,449 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 14:41:07,387 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106525.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:10,539 INFO [train.py:901] (3/4) Epoch 14, batch 1450, loss[loss=0.1767, simple_loss=0.2541, pruned_loss=0.04965, over 7433.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3031, pruned_loss=0.07351, over 1616624.79 frames. ], batch size: 17, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:41:11,257 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 14:41:12,179 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:24,665 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106550.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:27,430 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106554.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:41:29,949 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.831e+02 2.546e+02 3.123e+02 4.151e+02 8.254e+02, threshold=6.246e+02, percent-clipped=6.0 +2023-02-06 14:41:47,575 INFO [train.py:901] (3/4) Epoch 14, batch 1500, loss[loss=0.2989, simple_loss=0.3668, pruned_loss=0.1155, over 8321.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3044, pruned_loss=0.0741, over 1617376.11 frames. ], batch size: 26, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,569 INFO [train.py:901] (3/4) Epoch 14, batch 1550, loss[loss=0.2041, simple_loss=0.2901, pruned_loss=0.0591, over 8577.00 frames. ], tot_loss[loss=0.2274, simple_loss=0.3053, pruned_loss=0.07472, over 1618240.17 frames. ], batch size: 31, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:42:22,652 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:42:41,334 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.593e+02 3.196e+02 4.114e+02 8.054e+02, threshold=6.391e+02, percent-clipped=4.0 +2023-02-06 14:42:52,897 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7645, 1.5509, 3.1884, 1.4433, 2.2536, 3.3791, 3.4973, 2.8722], + device='cuda:3'), covar=tensor([0.1136, 0.1521, 0.0318, 0.1989, 0.0889, 0.0270, 0.0511, 0.0631], + device='cuda:3'), in_proj_covar=tensor([0.0270, 0.0303, 0.0263, 0.0293, 0.0281, 0.0242, 0.0362, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 14:42:56,721 INFO [train.py:901] (3/4) Epoch 14, batch 1600, loss[loss=0.3067, simple_loss=0.361, pruned_loss=0.1262, over 7246.00 frames. ], tot_loss[loss=0.2267, simple_loss=0.3042, pruned_loss=0.07466, over 1616960.37 frames. ], batch size: 73, lr: 5.52e-03, grad_scale: 16.0 +2023-02-06 14:43:10,342 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:32,335 INFO [train.py:901] (3/4) Epoch 14, batch 1650, loss[loss=0.2642, simple_loss=0.3261, pruned_loss=0.1011, over 7925.00 frames. ], tot_loss[loss=0.2259, simple_loss=0.3033, pruned_loss=0.07423, over 1616434.82 frames. ], batch size: 20, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:43:35,230 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2103, 1.4391, 1.6079, 1.3092, 0.8929, 1.3430, 1.8061, 1.5602], + device='cuda:3'), covar=tensor([0.0501, 0.1297, 0.1836, 0.1503, 0.0634, 0.1599, 0.0671, 0.0678], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0157, 0.0101, 0.0162, 0.0113, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 14:43:42,575 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106745.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:43:51,899 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.493e+02 3.038e+02 4.078e+02 1.080e+03, threshold=6.076e+02, percent-clipped=3.0 +2023-02-06 14:43:52,347 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 14:43:53,551 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3324, 1.5564, 1.6296, 0.8554, 1.6841, 1.2210, 0.2867, 1.4929], + device='cuda:3'), covar=tensor([0.0337, 0.0277, 0.0230, 0.0374, 0.0246, 0.0666, 0.0591, 0.0195], + device='cuda:3'), in_proj_covar=tensor([0.0412, 0.0349, 0.0304, 0.0406, 0.0338, 0.0492, 0.0370, 0.0375], + device='cuda:3'), out_proj_covar=tensor([1.1478e-04, 9.4922e-05, 8.2619e-05, 1.1079e-04, 9.2894e-05, 1.4505e-04, + 1.0328e-04, 1.0334e-04], device='cuda:3') +2023-02-06 14:44:06,421 INFO [train.py:901] (3/4) Epoch 14, batch 1700, loss[loss=0.2305, simple_loss=0.2847, pruned_loss=0.08818, over 4742.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3032, pruned_loss=0.07342, over 1619272.64 frames. ], batch size: 10, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:28,951 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=106810.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:31,562 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106813.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:33,565 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=106816.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:44:43,438 INFO [train.py:901] (3/4) Epoch 14, batch 1750, loss[loss=0.2058, simple_loss=0.2902, pruned_loss=0.06067, over 8033.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3016, pruned_loss=0.07219, over 1618360.16 frames. ], batch size: 22, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:44:47,852 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=106835.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:45:04,124 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.358e+02 2.865e+02 3.554e+02 7.426e+02, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 14:45:18,437 INFO [train.py:901] (3/4) Epoch 14, batch 1800, loss[loss=0.2355, simple_loss=0.3045, pruned_loss=0.08326, over 8091.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3018, pruned_loss=0.0729, over 1618945.96 frames. ], batch size: 21, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:45:54,581 INFO [train.py:901] (3/4) Epoch 14, batch 1850, loss[loss=0.2453, simple_loss=0.3171, pruned_loss=0.08675, over 8539.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.302, pruned_loss=0.07322, over 1619895.07 frames. ], batch size: 39, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:45:55,506 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=106931.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:16,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.570e+02 3.068e+02 3.847e+02 1.325e+03, threshold=6.136e+02, percent-clipped=4.0 +2023-02-06 14:46:29,532 INFO [train.py:901] (3/4) Epoch 14, batch 1900, loss[loss=0.2046, simple_loss=0.2919, pruned_loss=0.05869, over 8192.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3023, pruned_loss=0.07381, over 1616337.37 frames. ], batch size: 23, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:46:43,881 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107001.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:46:47,083 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 14:46:59,775 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 14:47:01,258 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107026.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:03,728 INFO [train.py:901] (3/4) Epoch 14, batch 1950, loss[loss=0.2684, simple_loss=0.3363, pruned_loss=0.1003, over 7306.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3027, pruned_loss=0.07443, over 1615000.90 frames. ], batch size: 71, lr: 5.51e-03, grad_scale: 4.0 +2023-02-06 14:47:19,873 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 14:47:26,064 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.386e+02 2.840e+02 3.483e+02 6.138e+02, threshold=5.681e+02, percent-clipped=1.0 +2023-02-06 14:47:31,992 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107069.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:47:39,106 INFO [train.py:901] (3/4) Epoch 14, batch 2000, loss[loss=0.2838, simple_loss=0.3471, pruned_loss=0.1102, over 6911.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3022, pruned_loss=0.07384, over 1614702.86 frames. ], batch size: 71, lr: 5.51e-03, grad_scale: 8.0 +2023-02-06 14:47:48,662 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:12,604 INFO [train.py:901] (3/4) Epoch 14, batch 2050, loss[loss=0.2138, simple_loss=0.3003, pruned_loss=0.06368, over 7982.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3011, pruned_loss=0.07309, over 1612140.16 frames. ], batch size: 21, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:19,570 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1142, 2.1226, 1.4805, 1.8694, 1.6709, 1.1875, 1.5346, 1.7518], + device='cuda:3'), covar=tensor([0.1459, 0.0525, 0.1351, 0.0597, 0.0900, 0.1743, 0.1170, 0.0873], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0233, 0.0320, 0.0296, 0.0300, 0.0322, 0.0339, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 14:48:23,057 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6546, 1.6071, 2.8238, 1.3266, 2.0662, 3.0054, 3.1201, 2.5189], + device='cuda:3'), covar=tensor([0.1148, 0.1378, 0.0395, 0.2038, 0.0978, 0.0315, 0.0640, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0304, 0.0264, 0.0292, 0.0279, 0.0241, 0.0362, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 14:48:26,620 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3863, 2.0853, 2.9523, 2.4140, 2.7481, 2.2648, 2.0185, 1.5179], + device='cuda:3'), covar=tensor([0.4624, 0.4324, 0.1378, 0.2749, 0.2262, 0.2476, 0.1847, 0.4495], + device='cuda:3'), in_proj_covar=tensor([0.0897, 0.0898, 0.0746, 0.0868, 0.0956, 0.0825, 0.0710, 0.0779], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:48:27,953 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0482, 2.2690, 1.7945, 2.8680, 1.3564, 1.6281, 1.9536, 2.3844], + device='cuda:3'), covar=tensor([0.0674, 0.0836, 0.0911, 0.0345, 0.1214, 0.1385, 0.1030, 0.0773], + device='cuda:3'), in_proj_covar=tensor([0.0226, 0.0202, 0.0245, 0.0207, 0.0210, 0.0247, 0.0251, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:48:33,212 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107158.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:48:34,368 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.411e+02 3.055e+02 3.713e+02 7.642e+02, threshold=6.109e+02, percent-clipped=4.0 +2023-02-06 14:48:42,102 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107170.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:49,408 INFO [train.py:901] (3/4) Epoch 14, batch 2100, loss[loss=0.2903, simple_loss=0.3625, pruned_loss=0.1091, over 8479.00 frames. ], tot_loss[loss=0.2252, simple_loss=0.3026, pruned_loss=0.07392, over 1609099.16 frames. ], batch size: 28, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:48:54,366 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:48:57,041 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7076, 2.2531, 4.1344, 1.5074, 2.8590, 2.2904, 1.9217, 2.8058], + device='cuda:3'), covar=tensor([0.1709, 0.2347, 0.0665, 0.3880, 0.1717, 0.2775, 0.1797, 0.2264], + device='cuda:3'), in_proj_covar=tensor([0.0496, 0.0542, 0.0535, 0.0592, 0.0618, 0.0559, 0.0487, 0.0614], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:49:00,227 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1421, 1.9135, 3.2128, 1.4294, 2.3839, 3.5109, 3.5400, 2.9972], + device='cuda:3'), covar=tensor([0.1048, 0.1408, 0.0379, 0.2218, 0.1044, 0.0240, 0.0578, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0272, 0.0305, 0.0266, 0.0293, 0.0281, 0.0242, 0.0363, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 14:49:11,188 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:49:23,196 INFO [train.py:901] (3/4) Epoch 14, batch 2150, loss[loss=0.1798, simple_loss=0.2525, pruned_loss=0.05357, over 7247.00 frames. ], tot_loss[loss=0.2257, simple_loss=0.303, pruned_loss=0.07424, over 1608142.47 frames. ], batch size: 16, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:49:44,426 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.615e+02 3.041e+02 3.823e+02 8.460e+02, threshold=6.081e+02, percent-clipped=1.0 +2023-02-06 14:49:58,898 INFO [train.py:901] (3/4) Epoch 14, batch 2200, loss[loss=0.2071, simple_loss=0.286, pruned_loss=0.06412, over 8038.00 frames. ], tot_loss[loss=0.2268, simple_loss=0.3041, pruned_loss=0.07471, over 1609605.19 frames. ], batch size: 22, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:23,271 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1015, 2.6548, 3.1281, 1.3803, 3.1913, 1.7991, 1.4588, 2.1736], + device='cuda:3'), covar=tensor([0.0579, 0.0287, 0.0204, 0.0617, 0.0352, 0.0706, 0.0720, 0.0425], + device='cuda:3'), in_proj_covar=tensor([0.0411, 0.0351, 0.0306, 0.0407, 0.0337, 0.0496, 0.0372, 0.0378], + device='cuda:3'), out_proj_covar=tensor([1.1456e-04, 9.5359e-05, 8.3165e-05, 1.1110e-04, 9.2411e-05, 1.4607e-04, + 1.0394e-04, 1.0421e-04], device='cuda:3') +2023-02-06 14:50:34,527 INFO [train.py:901] (3/4) Epoch 14, batch 2250, loss[loss=0.2022, simple_loss=0.2718, pruned_loss=0.06632, over 7976.00 frames. ], tot_loss[loss=0.2273, simple_loss=0.3045, pruned_loss=0.075, over 1609186.06 frames. ], batch size: 21, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:50:54,548 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.599e+02 3.319e+02 4.071e+02 1.027e+03, threshold=6.637e+02, percent-clipped=7.0 +2023-02-06 14:51:08,893 INFO [train.py:901] (3/4) Epoch 14, batch 2300, loss[loss=0.2956, simple_loss=0.3483, pruned_loss=0.1214, over 6998.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3036, pruned_loss=0.07457, over 1609464.19 frames. ], batch size: 71, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:51:27,523 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9032, 1.4611, 3.3001, 1.5010, 2.3623, 3.5211, 3.6749, 3.0481], + device='cuda:3'), covar=tensor([0.1098, 0.1649, 0.0341, 0.1976, 0.0991, 0.0241, 0.0440, 0.0567], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0303, 0.0266, 0.0292, 0.0280, 0.0242, 0.0363, 0.0293], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 14:51:33,161 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 14:51:33,643 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0211, 2.2877, 1.7719, 2.8012, 1.2754, 1.5706, 1.9432, 2.3707], + device='cuda:3'), covar=tensor([0.0714, 0.0784, 0.0930, 0.0361, 0.1187, 0.1385, 0.0965, 0.0790], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0206, 0.0249, 0.0211, 0.0214, 0.0252, 0.0254, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:51:44,729 INFO [train.py:901] (3/4) Epoch 14, batch 2350, loss[loss=0.2203, simple_loss=0.297, pruned_loss=0.07176, over 7977.00 frames. ], tot_loss[loss=0.225, simple_loss=0.3025, pruned_loss=0.07375, over 1610720.60 frames. ], batch size: 21, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:00,477 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5539, 2.2221, 3.4441, 2.6090, 2.9494, 2.3434, 2.0018, 1.7222], + device='cuda:3'), covar=tensor([0.3948, 0.4024, 0.1325, 0.3271, 0.2342, 0.2396, 0.1699, 0.4690], + device='cuda:3'), in_proj_covar=tensor([0.0892, 0.0893, 0.0745, 0.0866, 0.0953, 0.0822, 0.0709, 0.0776], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:52:04,939 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.358e+02 2.889e+02 3.449e+02 7.134e+02, threshold=5.779e+02, percent-clipped=1.0 +2023-02-06 14:52:18,380 INFO [train.py:901] (3/4) Epoch 14, batch 2400, loss[loss=0.296, simple_loss=0.3532, pruned_loss=0.1194, over 8338.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.3033, pruned_loss=0.07462, over 1609408.59 frames. ], batch size: 26, lr: 5.50e-03, grad_scale: 8.0 +2023-02-06 14:52:34,429 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107502.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:52:43,465 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:53,751 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107528.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:52:54,997 INFO [train.py:901] (3/4) Epoch 14, batch 2450, loss[loss=0.2394, simple_loss=0.3163, pruned_loss=0.08127, over 8774.00 frames. ], tot_loss[loss=0.226, simple_loss=0.3033, pruned_loss=0.07439, over 1617939.26 frames. ], batch size: 30, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:16,528 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.477e+02 3.089e+02 4.011e+02 1.178e+03, threshold=6.179e+02, percent-clipped=8.0 +2023-02-06 14:53:29,784 INFO [train.py:901] (3/4) Epoch 14, batch 2500, loss[loss=0.2296, simple_loss=0.3014, pruned_loss=0.07892, over 7220.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3035, pruned_loss=0.07437, over 1623559.62 frames. ], batch size: 16, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:53:54,964 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7437, 1.6863, 2.1938, 1.5708, 1.3343, 2.2385, 0.4341, 1.3779], + device='cuda:3'), covar=tensor([0.2051, 0.1476, 0.0417, 0.1436, 0.3140, 0.0354, 0.2577, 0.1668], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0177, 0.0108, 0.0218, 0.0261, 0.0112, 0.0162, 0.0174], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 14:53:55,583 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107617.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:54:03,479 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:03,974 INFO [train.py:901] (3/4) Epoch 14, batch 2550, loss[loss=0.1798, simple_loss=0.2644, pruned_loss=0.04764, over 7972.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.303, pruned_loss=0.07424, over 1622480.55 frames. ], batch size: 21, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:54:12,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6594, 1.5313, 2.8518, 1.2899, 2.0190, 3.0166, 3.1217, 2.5803], + device='cuda:3'), covar=tensor([0.1089, 0.1445, 0.0399, 0.2063, 0.0960, 0.0283, 0.0618, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0303, 0.0266, 0.0293, 0.0280, 0.0241, 0.0361, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 14:54:17,609 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6681, 2.0425, 2.2260, 1.2356, 2.3093, 1.5180, 0.6655, 1.8248], + device='cuda:3'), covar=tensor([0.0521, 0.0280, 0.0215, 0.0522, 0.0359, 0.0754, 0.0762, 0.0324], + device='cuda:3'), in_proj_covar=tensor([0.0413, 0.0353, 0.0309, 0.0409, 0.0341, 0.0499, 0.0375, 0.0381], + device='cuda:3'), out_proj_covar=tensor([1.1511e-04, 9.5815e-05, 8.3996e-05, 1.1152e-04, 9.3479e-05, 1.4706e-04, + 1.0461e-04, 1.0489e-04], device='cuda:3') +2023-02-06 14:54:26,274 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.642e+02 3.253e+02 4.518e+02 1.030e+03, threshold=6.506e+02, percent-clipped=5.0 +2023-02-06 14:54:37,784 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:54:39,579 INFO [train.py:901] (3/4) Epoch 14, batch 2600, loss[loss=0.2387, simple_loss=0.3043, pruned_loss=0.08655, over 7975.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3039, pruned_loss=0.07491, over 1621971.22 frames. ], batch size: 21, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:12,836 INFO [train.py:901] (3/4) Epoch 14, batch 2650, loss[loss=0.259, simple_loss=0.341, pruned_loss=0.08849, over 8260.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3043, pruned_loss=0.0747, over 1618865.67 frames. ], batch size: 24, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:55:30,857 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=107755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:55:34,865 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.443e+02 2.980e+02 3.881e+02 9.981e+02, threshold=5.960e+02, percent-clipped=6.0 +2023-02-06 14:55:38,610 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2276, 2.6675, 3.1845, 1.5690, 3.2567, 2.0565, 1.6669, 2.3041], + device='cuda:3'), covar=tensor([0.0660, 0.0325, 0.0166, 0.0591, 0.0283, 0.0614, 0.0651, 0.0395], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0353, 0.0308, 0.0411, 0.0341, 0.0499, 0.0376, 0.0381], + device='cuda:3'), out_proj_covar=tensor([1.1548e-04, 9.5921e-05, 8.3857e-05, 1.1200e-04, 9.3445e-05, 1.4695e-04, + 1.0487e-04, 1.0510e-04], device='cuda:3') +2023-02-06 14:55:49,934 INFO [train.py:901] (3/4) Epoch 14, batch 2700, loss[loss=0.2507, simple_loss=0.321, pruned_loss=0.09024, over 6675.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3036, pruned_loss=0.07431, over 1615728.45 frames. ], batch size: 71, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:23,701 INFO [train.py:901] (3/4) Epoch 14, batch 2750, loss[loss=0.2016, simple_loss=0.2858, pruned_loss=0.05874, over 8235.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.3038, pruned_loss=0.07473, over 1611110.30 frames. ], batch size: 22, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:56:44,697 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.404e+02 2.918e+02 3.592e+02 1.217e+03, threshold=5.837e+02, percent-clipped=4.0 +2023-02-06 14:56:53,331 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=107872.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:56:54,149 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107873.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 14:56:59,458 INFO [train.py:901] (3/4) Epoch 14, batch 2800, loss[loss=0.2502, simple_loss=0.3195, pruned_loss=0.09045, over 8293.00 frames. ], tot_loss[loss=0.2266, simple_loss=0.304, pruned_loss=0.07458, over 1614204.77 frames. ], batch size: 23, lr: 5.49e-03, grad_scale: 8.0 +2023-02-06 14:57:03,888 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=107885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:12,678 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107898.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 14:57:20,767 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=107910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:57:33,815 INFO [train.py:901] (3/4) Epoch 14, batch 2850, loss[loss=0.2556, simple_loss=0.3184, pruned_loss=0.09643, over 7655.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.3038, pruned_loss=0.07446, over 1615954.75 frames. ], batch size: 19, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:57:54,102 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.478e+02 3.087e+02 3.919e+02 8.173e+02, threshold=6.173e+02, percent-clipped=5.0 +2023-02-06 14:58:08,145 INFO [train.py:901] (3/4) Epoch 14, batch 2900, loss[loss=0.2139, simple_loss=0.289, pruned_loss=0.06936, over 7805.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.3037, pruned_loss=0.07425, over 1615017.36 frames. ], batch size: 19, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:58:12,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=107987.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:22,939 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6555, 2.2638, 4.3758, 1.5019, 2.8710, 2.2304, 1.7507, 2.9022], + device='cuda:3'), covar=tensor([0.1662, 0.2307, 0.0641, 0.3814, 0.1642, 0.2688, 0.1826, 0.2116], + device='cuda:3'), in_proj_covar=tensor([0.0493, 0.0541, 0.0533, 0.0586, 0.0616, 0.0554, 0.0485, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 14:58:27,879 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 14:58:37,447 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7362, 2.2860, 3.4620, 2.6267, 3.2545, 2.3851, 2.1777, 1.7451], + device='cuda:3'), covar=tensor([0.4050, 0.4415, 0.1397, 0.2985, 0.2104, 0.2407, 0.1635, 0.4807], + device='cuda:3'), in_proj_covar=tensor([0.0900, 0.0898, 0.0742, 0.0868, 0.0951, 0.0828, 0.0710, 0.0780], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 14:58:38,642 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 14:58:44,582 INFO [train.py:901] (3/4) Epoch 14, batch 2950, loss[loss=0.261, simple_loss=0.3303, pruned_loss=0.09586, over 7022.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.3043, pruned_loss=0.07431, over 1615759.73 frames. ], batch size: 71, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:04,838 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.696e+02 3.199e+02 4.019e+02 8.231e+02, threshold=6.398e+02, percent-clipped=3.0 +2023-02-06 14:59:06,751 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 14:59:18,173 INFO [train.py:901] (3/4) Epoch 14, batch 3000, loss[loss=0.2092, simple_loss=0.2818, pruned_loss=0.0683, over 7233.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3038, pruned_loss=0.0739, over 1612876.80 frames. ], batch size: 16, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 14:59:18,173 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 14:59:23,432 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3310, 1.5503, 1.5647, 0.9796, 1.5631, 1.1875, 0.3254, 1.4875], + device='cuda:3'), covar=tensor([0.0395, 0.0332, 0.0266, 0.0429, 0.0390, 0.0887, 0.0768, 0.0262], + device='cuda:3'), in_proj_covar=tensor([0.0419, 0.0356, 0.0310, 0.0411, 0.0342, 0.0500, 0.0379, 0.0383], + device='cuda:3'), out_proj_covar=tensor([1.1663e-04, 9.6576e-05, 8.4118e-05, 1.1208e-04, 9.3709e-05, 1.4718e-04, + 1.0580e-04, 1.0561e-04], device='cuda:3') +2023-02-06 14:59:26,783 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6903, 1.7671, 1.5901, 2.1931, 1.1565, 1.4240, 1.5784, 1.7851], + device='cuda:3'), covar=tensor([0.0702, 0.0898, 0.0985, 0.0485, 0.1194, 0.1357, 0.0881, 0.0769], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0205, 0.0249, 0.0211, 0.0213, 0.0249, 0.0253, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 14:59:30,506 INFO [train.py:935] (3/4) Epoch 14, validation: loss=0.1827, simple_loss=0.283, pruned_loss=0.04121, over 944034.00 frames. +2023-02-06 14:59:30,506 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 14:59:43,702 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108099.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:05,760 INFO [train.py:901] (3/4) Epoch 14, batch 3050, loss[loss=0.1955, simple_loss=0.2572, pruned_loss=0.06691, over 7411.00 frames. ], tot_loss[loss=0.2256, simple_loss=0.3033, pruned_loss=0.07398, over 1612510.78 frames. ], batch size: 17, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:10,682 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:00:28,076 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.670e+02 3.118e+02 3.835e+02 7.160e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 15:00:41,680 INFO [train.py:901] (3/4) Epoch 14, batch 3100, loss[loss=0.2592, simple_loss=0.332, pruned_loss=0.09315, over 8525.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3026, pruned_loss=0.07355, over 1613593.60 frames. ], batch size: 49, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:00:55,329 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9783, 4.0946, 2.5475, 2.8650, 2.9461, 2.5033, 2.7372, 2.9781], + device='cuda:3'), covar=tensor([0.1637, 0.0282, 0.0848, 0.0701, 0.0631, 0.1019, 0.1001, 0.1148], + device='cuda:3'), in_proj_covar=tensor([0.0343, 0.0229, 0.0318, 0.0296, 0.0296, 0.0319, 0.0339, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:01:04,698 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108214.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:15,963 INFO [train.py:901] (3/4) Epoch 14, batch 3150, loss[loss=0.2545, simple_loss=0.3248, pruned_loss=0.09204, over 8553.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3022, pruned_loss=0.07327, over 1614717.57 frames. ], batch size: 31, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:01:17,031 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 15:01:24,681 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:26,343 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 15:01:37,168 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.570e+02 3.163e+02 4.155e+02 7.848e+02, threshold=6.326e+02, percent-clipped=5.0 +2023-02-06 15:01:43,480 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:01:51,515 INFO [train.py:901] (3/4) Epoch 14, batch 3200, loss[loss=0.2172, simple_loss=0.2999, pruned_loss=0.06726, over 8497.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3024, pruned_loss=0.07346, over 1615418.26 frames. ], batch size: 26, lr: 5.48e-03, grad_scale: 8.0 +2023-02-06 15:02:23,930 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1488, 1.8137, 2.7127, 2.2068, 2.4423, 2.0843, 1.7321, 1.3205], + device='cuda:3'), covar=tensor([0.4553, 0.4258, 0.1336, 0.2731, 0.2088, 0.2598, 0.1788, 0.4236], + device='cuda:3'), in_proj_covar=tensor([0.0901, 0.0900, 0.0740, 0.0868, 0.0958, 0.0830, 0.0712, 0.0779], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:02:25,774 INFO [train.py:901] (3/4) Epoch 14, batch 3250, loss[loss=0.2472, simple_loss=0.3252, pruned_loss=0.0846, over 8447.00 frames. ], tot_loss[loss=0.2253, simple_loss=0.3031, pruned_loss=0.07376, over 1618634.78 frames. ], batch size: 29, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:02:35,709 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108343.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:02:47,025 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.638e+02 3.239e+02 4.086e+02 1.012e+03, threshold=6.478e+02, percent-clipped=4.0 +2023-02-06 15:03:02,204 INFO [train.py:901] (3/4) Epoch 14, batch 3300, loss[loss=0.2116, simple_loss=0.2921, pruned_loss=0.06554, over 8693.00 frames. ], tot_loss[loss=0.2247, simple_loss=0.3029, pruned_loss=0.0733, over 1620438.14 frames. ], batch size: 34, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:05,089 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8472, 1.5232, 3.9552, 1.3662, 3.4918, 3.2805, 3.5768, 3.4843], + device='cuda:3'), covar=tensor([0.0587, 0.4162, 0.0630, 0.3958, 0.1203, 0.1027, 0.0637, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0528, 0.0601, 0.0616, 0.0567, 0.0641, 0.0550, 0.0535, 0.0606], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:03:11,254 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:27,909 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:36,466 INFO [train.py:901] (3/4) Epoch 14, batch 3350, loss[loss=0.2016, simple_loss=0.2823, pruned_loss=0.06041, over 8115.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3027, pruned_loss=0.07347, over 1617198.64 frames. ], batch size: 23, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:03:49,804 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:03:57,198 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.656e+02 3.299e+02 4.467e+02 8.781e+02, threshold=6.597e+02, percent-clipped=5.0 +2023-02-06 15:04:04,263 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=108470.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:10,800 INFO [train.py:901] (3/4) Epoch 14, batch 3400, loss[loss=0.1845, simple_loss=0.2713, pruned_loss=0.04889, over 7915.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.3028, pruned_loss=0.07368, over 1615581.42 frames. ], batch size: 20, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:04:22,163 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=108495.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:04:46,706 INFO [train.py:901] (3/4) Epoch 14, batch 3450, loss[loss=0.2231, simple_loss=0.2956, pruned_loss=0.0753, over 7704.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.3033, pruned_loss=0.07374, over 1617711.69 frames. ], batch size: 18, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:07,907 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.481e+02 3.055e+02 3.627e+02 7.933e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:05:21,068 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-06 15:05:21,985 INFO [train.py:901] (3/4) Epoch 14, batch 3500, loss[loss=0.2236, simple_loss=0.3086, pruned_loss=0.06927, over 8035.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3025, pruned_loss=0.07334, over 1613718.49 frames. ], batch size: 22, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:05:29,155 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 15:05:31,989 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:05:57,720 INFO [train.py:901] (3/4) Epoch 14, batch 3550, loss[loss=0.2399, simple_loss=0.3208, pruned_loss=0.07951, over 8365.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3021, pruned_loss=0.07327, over 1614560.83 frames. ], batch size: 24, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:17,928 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.417e+02 3.151e+02 4.175e+02 8.210e+02, threshold=6.301e+02, percent-clipped=3.0 +2023-02-06 15:06:31,412 INFO [train.py:901] (3/4) Epoch 14, batch 3600, loss[loss=0.2386, simple_loss=0.3163, pruned_loss=0.08041, over 8461.00 frames. ], tot_loss[loss=0.224, simple_loss=0.302, pruned_loss=0.07293, over 1616129.61 frames. ], batch size: 25, lr: 5.47e-03, grad_scale: 8.0 +2023-02-06 15:06:36,134 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108687.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:06:54,657 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 15:07:07,042 INFO [train.py:901] (3/4) Epoch 14, batch 3650, loss[loss=0.1844, simple_loss=0.265, pruned_loss=0.0519, over 7217.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3018, pruned_loss=0.07292, over 1613957.73 frames. ], batch size: 16, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:27,807 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.861e+02 2.655e+02 3.191e+02 3.880e+02 8.243e+02, threshold=6.382e+02, percent-clipped=2.0 +2023-02-06 15:07:30,614 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:07:37,170 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 15:07:41,536 INFO [train.py:901] (3/4) Epoch 14, batch 3700, loss[loss=0.1842, simple_loss=0.2723, pruned_loss=0.04801, over 8240.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3009, pruned_loss=0.07249, over 1615099.04 frames. ], batch size: 22, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:07:41,783 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.7121, 0.8143, 0.8171, 0.3574, 0.8171, 0.6461, 0.0897, 0.7642], + device='cuda:3'), covar=tensor([0.0214, 0.0186, 0.0159, 0.0282, 0.0203, 0.0406, 0.0416, 0.0157], + device='cuda:3'), in_proj_covar=tensor([0.0414, 0.0350, 0.0306, 0.0406, 0.0338, 0.0494, 0.0372, 0.0378], + device='cuda:3'), out_proj_covar=tensor([1.1519e-04, 9.4633e-05, 8.3079e-05, 1.1040e-04, 9.2383e-05, 1.4526e-04, + 1.0375e-04, 1.0391e-04], device='cuda:3') +2023-02-06 15:07:50,772 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:07:56,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:01,083 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=108809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:08:15,867 INFO [train.py:901] (3/4) Epoch 14, batch 3750, loss[loss=0.2286, simple_loss=0.3123, pruned_loss=0.07248, over 8506.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3001, pruned_loss=0.07212, over 1611623.30 frames. ], batch size: 28, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:08:16,011 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8223, 1.2649, 3.9606, 1.4251, 3.5317, 3.2798, 3.5136, 3.4004], + device='cuda:3'), covar=tensor([0.0620, 0.4378, 0.0577, 0.3705, 0.1146, 0.1079, 0.0668, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0606, 0.0621, 0.0567, 0.0644, 0.0553, 0.0543, 0.0606], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:08:37,495 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.414e+02 2.846e+02 3.664e+02 8.039e+02, threshold=5.692e+02, percent-clipped=5.0 +2023-02-06 15:08:51,947 INFO [train.py:901] (3/4) Epoch 14, batch 3800, loss[loss=0.1912, simple_loss=0.2697, pruned_loss=0.05631, over 8239.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3005, pruned_loss=0.07256, over 1611135.77 frames. ], batch size: 22, lr: 5.46e-03, grad_scale: 8.0 +2023-02-06 15:09:01,185 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8802, 1.6069, 3.1736, 1.1164, 2.2497, 3.4485, 3.7343, 2.6303], + device='cuda:3'), covar=tensor([0.1436, 0.1899, 0.0504, 0.2945, 0.1286, 0.0454, 0.0534, 0.1215], + device='cuda:3'), in_proj_covar=tensor([0.0274, 0.0305, 0.0268, 0.0298, 0.0284, 0.0247, 0.0369, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:09:12,261 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=108909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:20,730 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4135, 1.9341, 2.8162, 2.3080, 2.6886, 2.2571, 1.9396, 1.3709], + device='cuda:3'), covar=tensor([0.4195, 0.4401, 0.1391, 0.2755, 0.1846, 0.2349, 0.1785, 0.4232], + device='cuda:3'), in_proj_covar=tensor([0.0894, 0.0897, 0.0743, 0.0871, 0.0950, 0.0827, 0.0710, 0.0778], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:09:26,380 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6495, 1.7935, 1.6467, 2.2523, 1.0409, 1.3868, 1.6513, 1.9160], + device='cuda:3'), covar=tensor([0.0890, 0.0922, 0.1053, 0.0478, 0.1209, 0.1626, 0.0920, 0.0793], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0208, 0.0252, 0.0214, 0.0216, 0.0252, 0.0258, 0.0215], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 15:09:26,887 INFO [train.py:901] (3/4) Epoch 14, batch 3850, loss[loss=0.206, simple_loss=0.2777, pruned_loss=0.06716, over 7921.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3016, pruned_loss=0.07282, over 1612618.02 frames. ], batch size: 20, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:09:33,935 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=108939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:09:35,942 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 15:09:49,061 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.574e+02 3.020e+02 4.517e+02 9.725e+02, threshold=6.039e+02, percent-clipped=15.0 +2023-02-06 15:10:04,288 INFO [train.py:901] (3/4) Epoch 14, batch 3900, loss[loss=0.2217, simple_loss=0.2962, pruned_loss=0.07355, over 7919.00 frames. ], tot_loss[loss=0.2243, simple_loss=0.3022, pruned_loss=0.07324, over 1614755.34 frames. ], batch size: 20, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:39,028 INFO [train.py:901] (3/4) Epoch 14, batch 3950, loss[loss=0.2313, simple_loss=0.3004, pruned_loss=0.08115, over 8088.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3018, pruned_loss=0.073, over 1613627.44 frames. ], batch size: 21, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:10:49,250 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 15:10:51,040 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1105, 1.6364, 3.2913, 1.3920, 2.2517, 3.5429, 3.6599, 3.0362], + device='cuda:3'), covar=tensor([0.0968, 0.1571, 0.0336, 0.2210, 0.0999, 0.0257, 0.0489, 0.0659], + device='cuda:3'), in_proj_covar=tensor([0.0272, 0.0305, 0.0269, 0.0298, 0.0285, 0.0248, 0.0368, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:10:53,915 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 15:10:56,306 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:10:56,962 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109055.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:10:59,097 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:00,257 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.458e+02 2.966e+02 3.777e+02 8.079e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-06 15:11:05,994 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7235, 3.0302, 2.5588, 4.0816, 1.7098, 2.2908, 2.5771, 3.3531], + device='cuda:3'), covar=tensor([0.0602, 0.0827, 0.0833, 0.0201, 0.1174, 0.1322, 0.1016, 0.0726], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0207, 0.0250, 0.0211, 0.0214, 0.0251, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 15:11:14,753 INFO [train.py:901] (3/4) Epoch 14, batch 4000, loss[loss=0.2444, simple_loss=0.3235, pruned_loss=0.08264, over 8464.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3011, pruned_loss=0.07206, over 1613731.18 frames. ], batch size: 29, lr: 5.46e-03, grad_scale: 16.0 +2023-02-06 15:11:17,679 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109083.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:11:22,496 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9406, 2.4188, 3.4241, 1.4938, 1.5399, 3.5992, 0.4034, 1.9958], + device='cuda:3'), covar=tensor([0.1738, 0.1510, 0.0469, 0.3241, 0.4012, 0.0283, 0.3273, 0.1752], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0176, 0.0108, 0.0217, 0.0259, 0.0111, 0.0162, 0.0173], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 15:11:42,460 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8288, 1.8266, 2.2767, 1.6099, 1.2662, 2.3974, 0.4233, 1.3237], + device='cuda:3'), covar=tensor([0.1986, 0.1329, 0.0466, 0.1725, 0.3688, 0.0334, 0.2919, 0.1856], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0175, 0.0107, 0.0217, 0.0258, 0.0111, 0.0161, 0.0172], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 15:11:50,548 INFO [train.py:901] (3/4) Epoch 14, batch 4050, loss[loss=0.1792, simple_loss=0.2579, pruned_loss=0.05031, over 7646.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3011, pruned_loss=0.07228, over 1611168.79 frames. ], batch size: 19, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:06,805 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:11,645 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.858e+02 2.362e+02 2.684e+02 3.543e+02 7.215e+02, threshold=5.369e+02, percent-clipped=4.0 +2023-02-06 15:12:16,025 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:26,279 INFO [train.py:901] (3/4) Epoch 14, batch 4100, loss[loss=0.2269, simple_loss=0.2978, pruned_loss=0.07798, over 8085.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3025, pruned_loss=0.07251, over 1613973.54 frames. ], batch size: 21, lr: 5.45e-03, grad_scale: 16.0 +2023-02-06 15:12:33,985 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:12:50,497 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:02,499 INFO [train.py:901] (3/4) Epoch 14, batch 4150, loss[loss=0.1987, simple_loss=0.2654, pruned_loss=0.06599, over 7705.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3032, pruned_loss=0.07316, over 1617599.16 frames. ], batch size: 18, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:06,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5490, 2.1386, 3.2965, 1.3425, 2.5427, 1.9926, 1.8047, 2.3087], + device='cuda:3'), covar=tensor([0.1814, 0.2066, 0.0773, 0.3976, 0.1539, 0.2833, 0.1797, 0.2228], + device='cuda:3'), in_proj_covar=tensor([0.0500, 0.0553, 0.0542, 0.0597, 0.0622, 0.0565, 0.0490, 0.0620], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 15:13:20,630 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4776, 1.6567, 4.4461, 1.9859, 2.2719, 5.0303, 5.0526, 4.3496], + device='cuda:3'), covar=tensor([0.0953, 0.1604, 0.0228, 0.1921, 0.1257, 0.0154, 0.0286, 0.0530], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0304, 0.0268, 0.0298, 0.0283, 0.0246, 0.0366, 0.0294], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:13:23,981 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.677e+02 3.078e+02 3.893e+02 8.547e+02, threshold=6.157e+02, percent-clipped=10.0 +2023-02-06 15:13:28,941 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:13:35,716 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 15:13:37,137 INFO [train.py:901] (3/4) Epoch 14, batch 4200, loss[loss=0.2668, simple_loss=0.328, pruned_loss=0.1028, over 7396.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3017, pruned_loss=0.07237, over 1614320.50 frames. ], batch size: 72, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:13:59,676 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,000 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:01,568 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 15:14:14,550 INFO [train.py:901] (3/4) Epoch 14, batch 4250, loss[loss=0.2276, simple_loss=0.3056, pruned_loss=0.07479, over 8420.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3013, pruned_loss=0.07201, over 1611599.55 frames. ], batch size: 48, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:14:18,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:18,812 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:14:35,684 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.488e+02 3.016e+02 3.845e+02 8.299e+02, threshold=6.033e+02, percent-clipped=4.0 +2023-02-06 15:14:48,340 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 15:14:48,668 INFO [train.py:901] (3/4) Epoch 14, batch 4300, loss[loss=0.2474, simple_loss=0.3272, pruned_loss=0.08377, over 8557.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3025, pruned_loss=0.07336, over 1609044.75 frames. ], batch size: 31, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:01,893 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109399.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:15:16,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3011, 1.9228, 2.7917, 2.2216, 2.6426, 2.2204, 1.8794, 1.3102], + device='cuda:3'), covar=tensor([0.4567, 0.4334, 0.1335, 0.2979, 0.1963, 0.2473, 0.1752, 0.4548], + device='cuda:3'), in_proj_covar=tensor([0.0902, 0.0899, 0.0745, 0.0877, 0.0953, 0.0829, 0.0714, 0.0784], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:15:24,488 INFO [train.py:901] (3/4) Epoch 14, batch 4350, loss[loss=0.198, simple_loss=0.2676, pruned_loss=0.06424, over 7806.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3018, pruned_loss=0.07318, over 1604305.18 frames. ], batch size: 19, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:15:34,088 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 15:15:47,274 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.678e+02 3.270e+02 4.253e+02 1.326e+03, threshold=6.540e+02, percent-clipped=8.0 +2023-02-06 15:16:00,545 INFO [train.py:901] (3/4) Epoch 14, batch 4400, loss[loss=0.2214, simple_loss=0.298, pruned_loss=0.07236, over 8357.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3015, pruned_loss=0.07287, over 1607389.98 frames. ], batch size: 24, lr: 5.45e-03, grad_scale: 8.0 +2023-02-06 15:16:11,101 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6490, 1.4634, 1.5543, 1.2895, 0.9670, 1.3798, 1.5669, 1.2060], + device='cuda:3'), covar=tensor([0.0556, 0.1231, 0.1634, 0.1399, 0.0649, 0.1456, 0.0702, 0.0693], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0156, 0.0101, 0.0162, 0.0113, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 15:16:15,756 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 15:16:24,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109514.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:16:31,894 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:35,898 INFO [train.py:901] (3/4) Epoch 14, batch 4450, loss[loss=0.2291, simple_loss=0.3112, pruned_loss=0.07352, over 8500.00 frames. ], tot_loss[loss=0.2236, simple_loss=0.3014, pruned_loss=0.07291, over 1608295.26 frames. ], batch size: 26, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:16:45,978 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 15:16:49,990 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:55,370 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:16:58,634 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.456e+02 2.864e+02 3.608e+02 1.087e+03, threshold=5.728e+02, percent-clipped=4.0 +2023-02-06 15:17:04,034 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 15:17:11,135 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 15:17:12,486 INFO [train.py:901] (3/4) Epoch 14, batch 4500, loss[loss=0.241, simple_loss=0.3154, pruned_loss=0.08325, over 8477.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3018, pruned_loss=0.07319, over 1611165.48 frames. ], batch size: 27, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:17:24,283 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109597.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:17:47,002 INFO [train.py:901] (3/4) Epoch 14, batch 4550, loss[loss=0.2322, simple_loss=0.3145, pruned_loss=0.075, over 8300.00 frames. ], tot_loss[loss=0.2244, simple_loss=0.3022, pruned_loss=0.07332, over 1607124.78 frames. ], batch size: 49, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:17:57,570 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1075, 1.7513, 2.3800, 1.9820, 2.2705, 2.0595, 1.7891, 1.0720], + device='cuda:3'), covar=tensor([0.5057, 0.4707, 0.1748, 0.3132, 0.2193, 0.2671, 0.1819, 0.4983], + device='cuda:3'), in_proj_covar=tensor([0.0898, 0.0897, 0.0743, 0.0872, 0.0951, 0.0827, 0.0711, 0.0780], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:18:05,731 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:09,030 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.640e+02 3.232e+02 4.162e+02 9.021e+02, threshold=6.464e+02, percent-clipped=8.0 +2023-02-06 15:18:16,725 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:21,359 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=109677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:22,045 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3521, 1.8795, 3.3587, 1.4418, 2.3959, 3.6991, 3.6886, 3.1884], + device='cuda:3'), covar=tensor([0.0836, 0.1405, 0.0353, 0.2104, 0.1019, 0.0206, 0.0503, 0.0540], + device='cuda:3'), in_proj_covar=tensor([0.0276, 0.0307, 0.0271, 0.0301, 0.0288, 0.0249, 0.0372, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:18:22,792 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3415, 1.6223, 1.6825, 1.0205, 1.7057, 1.2529, 0.3047, 1.5666], + device='cuda:3'), covar=tensor([0.0360, 0.0244, 0.0204, 0.0335, 0.0275, 0.0636, 0.0612, 0.0179], + device='cuda:3'), in_proj_covar=tensor([0.0414, 0.0352, 0.0302, 0.0409, 0.0340, 0.0496, 0.0372, 0.0378], + device='cuda:3'), out_proj_covar=tensor([1.1513e-04, 9.5241e-05, 8.1927e-05, 1.1133e-04, 9.2941e-05, 1.4597e-04, + 1.0355e-04, 1.0382e-04], device='cuda:3') +2023-02-06 15:18:23,263 INFO [train.py:901] (3/4) Epoch 14, batch 4600, loss[loss=0.2655, simple_loss=0.3362, pruned_loss=0.09741, over 8493.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3019, pruned_loss=0.07324, over 1610804.39 frames. ], batch size: 28, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:18:23,340 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:18:56,947 INFO [train.py:901] (3/4) Epoch 14, batch 4650, loss[loss=0.2505, simple_loss=0.3361, pruned_loss=0.08246, over 8017.00 frames. ], tot_loss[loss=0.224, simple_loss=0.302, pruned_loss=0.073, over 1608784.80 frames. ], batch size: 22, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:18,715 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.556e+02 3.032e+02 3.907e+02 9.020e+02, threshold=6.065e+02, percent-clipped=4.0 +2023-02-06 15:19:19,811 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 15:19:24,816 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109770.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:19:25,451 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:31,312 INFO [train.py:901] (3/4) Epoch 14, batch 4700, loss[loss=0.2474, simple_loss=0.3286, pruned_loss=0.08315, over 8532.00 frames. ], tot_loss[loss=0.224, simple_loss=0.3019, pruned_loss=0.07311, over 1608625.47 frames. ], batch size: 49, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:19:37,765 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-06 15:19:42,837 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=109795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:19:42,862 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109795.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:19:56,334 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-06 15:20:06,516 INFO [train.py:901] (3/4) Epoch 14, batch 4750, loss[loss=0.2266, simple_loss=0.3126, pruned_loss=0.07032, over 8607.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.3033, pruned_loss=0.0738, over 1612695.61 frames. ], batch size: 34, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:10,490 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 15:20:12,430 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 15:20:26,961 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.643e+02 3.166e+02 4.371e+02 1.104e+03, threshold=6.332e+02, percent-clipped=5.0 +2023-02-06 15:20:40,299 INFO [train.py:901] (3/4) Epoch 14, batch 4800, loss[loss=0.2721, simple_loss=0.3383, pruned_loss=0.1029, over 7984.00 frames. ], tot_loss[loss=0.2258, simple_loss=0.3035, pruned_loss=0.07402, over 1612750.82 frames. ], batch size: 21, lr: 5.44e-03, grad_scale: 8.0 +2023-02-06 15:20:52,070 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-06 15:20:56,651 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-06 15:21:03,220 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 15:21:14,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=109927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:16,047 INFO [train.py:901] (3/4) Epoch 14, batch 4850, loss[loss=0.217, simple_loss=0.302, pruned_loss=0.06604, over 7985.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3028, pruned_loss=0.07344, over 1615073.12 frames. ], batch size: 21, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:21:23,121 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 15:21:23,510 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=109941.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:31,143 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=109952.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:21:34,569 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3931, 1.7834, 2.6525, 1.2630, 1.8147, 1.7236, 1.5267, 1.8160], + device='cuda:3'), covar=tensor([0.1972, 0.2308, 0.0841, 0.4142, 0.1809, 0.3088, 0.2003, 0.2305], + device='cuda:3'), in_proj_covar=tensor([0.0498, 0.0549, 0.0538, 0.0595, 0.0620, 0.0563, 0.0487, 0.0617], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 15:21:37,039 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.854e+02 3.344e+02 7.947e+02, threshold=5.708e+02, percent-clipped=2.0 +2023-02-06 15:21:49,880 INFO [train.py:901] (3/4) Epoch 14, batch 4900, loss[loss=0.2363, simple_loss=0.3183, pruned_loss=0.07714, over 8465.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3023, pruned_loss=0.07348, over 1614453.06 frames. ], batch size: 25, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:18,845 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3610, 1.3193, 2.3429, 1.2213, 2.0450, 2.5078, 2.5968, 2.1125], + device='cuda:3'), covar=tensor([0.1037, 0.1243, 0.0453, 0.1963, 0.0743, 0.0401, 0.0697, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0271, 0.0303, 0.0267, 0.0296, 0.0283, 0.0245, 0.0367, 0.0292], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 15:22:19,457 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:20,954 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7240, 1.9346, 2.1488, 1.3619, 2.2068, 1.5208, 0.5058, 1.7793], + device='cuda:3'), covar=tensor([0.0397, 0.0262, 0.0194, 0.0377, 0.0280, 0.0719, 0.0670, 0.0233], + device='cuda:3'), in_proj_covar=tensor([0.0419, 0.0356, 0.0305, 0.0411, 0.0343, 0.0503, 0.0376, 0.0381], + device='cuda:3'), out_proj_covar=tensor([1.1645e-04, 9.6243e-05, 8.2705e-05, 1.1197e-04, 9.3788e-05, 1.4787e-04, + 1.0478e-04, 1.0453e-04], device='cuda:3') +2023-02-06 15:22:24,325 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:26,032 INFO [train.py:901] (3/4) Epoch 14, batch 4950, loss[loss=0.1952, simple_loss=0.2641, pruned_loss=0.06319, over 7443.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3009, pruned_loss=0.07247, over 1613133.76 frames. ], batch size: 17, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:22:30,967 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110035.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:41,737 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:42,420 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:45,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:48,336 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.585e+02 3.180e+02 4.032e+02 7.448e+02, threshold=6.360e+02, percent-clipped=3.0 +2023-02-06 15:22:49,155 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:22:58,272 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:00,771 INFO [train.py:901] (3/4) Epoch 14, batch 5000, loss[loss=0.2423, simple_loss=0.3197, pruned_loss=0.08239, over 8639.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3006, pruned_loss=0.07262, over 1612177.94 frames. ], batch size: 39, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:33,490 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:34,636 INFO [train.py:901] (3/4) Epoch 14, batch 5050, loss[loss=0.237, simple_loss=0.3191, pruned_loss=0.07748, over 8357.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2993, pruned_loss=0.07195, over 1609763.48 frames. ], batch size: 24, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:23:36,757 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110133.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:38,747 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:23:43,176 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 15:23:57,271 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.889e+02 3.601e+02 4.263e+02 9.587e+02, threshold=7.203e+02, percent-clipped=6.0 +2023-02-06 15:24:09,951 INFO [train.py:901] (3/4) Epoch 14, batch 5100, loss[loss=0.1834, simple_loss=0.2627, pruned_loss=0.05207, over 7294.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2984, pruned_loss=0.07179, over 1607220.61 frames. ], batch size: 16, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:24:42,801 INFO [train.py:901] (3/4) Epoch 14, batch 5150, loss[loss=0.2204, simple_loss=0.3034, pruned_loss=0.0687, over 8257.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3005, pruned_loss=0.07306, over 1612423.32 frames. ], batch size: 24, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:05,057 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.455e+02 3.012e+02 3.817e+02 9.599e+02, threshold=6.024e+02, percent-clipped=2.0 +2023-02-06 15:25:17,480 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7429, 1.7245, 2.4194, 1.3566, 1.1567, 2.4234, 0.3445, 1.3936], + device='cuda:3'), covar=tensor([0.1883, 0.1473, 0.0413, 0.2127, 0.4064, 0.0390, 0.2675, 0.1812], + device='cuda:3'), in_proj_covar=tensor([0.0169, 0.0173, 0.0105, 0.0214, 0.0258, 0.0111, 0.0160, 0.0170], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 15:25:19,324 INFO [train.py:901] (3/4) Epoch 14, batch 5200, loss[loss=0.2047, simple_loss=0.2727, pruned_loss=0.06837, over 7784.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3011, pruned_loss=0.07327, over 1615048.25 frames. ], batch size: 19, lr: 5.43e-03, grad_scale: 8.0 +2023-02-06 15:25:20,641 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1819, 4.1403, 3.7746, 2.0198, 3.7221, 3.7884, 3.7750, 3.4194], + device='cuda:3'), covar=tensor([0.0800, 0.0642, 0.1333, 0.4219, 0.0855, 0.0944, 0.1437, 0.0984], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0397, 0.0401, 0.0495, 0.0393, 0.0398, 0.0387, 0.0346], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 15:25:25,793 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 15:25:27,241 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-06 15:25:33,778 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110301.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:39,486 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 15:25:41,060 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:47,127 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:53,177 INFO [train.py:901] (3/4) Epoch 14, batch 5250, loss[loss=0.2157, simple_loss=0.303, pruned_loss=0.06415, over 8461.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.2996, pruned_loss=0.07248, over 1611630.02 frames. ], batch size: 25, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:25:57,582 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:25:58,393 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:15,513 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.532e+02 3.204e+02 3.879e+02 8.466e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 15:26:28,850 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:29,482 INFO [train.py:901] (3/4) Epoch 14, batch 5300, loss[loss=0.208, simple_loss=0.2754, pruned_loss=0.07033, over 7686.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3007, pruned_loss=0.07286, over 1615116.32 frames. ], batch size: 18, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:26:37,624 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.00 vs. limit=2.0 +2023-02-06 15:26:39,496 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:48,923 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110406.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:26:56,337 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:03,181 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5145, 2.6893, 1.9311, 2.2258, 2.2755, 1.6579, 2.0189, 2.0635], + device='cuda:3'), covar=tensor([0.1304, 0.0332, 0.1010, 0.0512, 0.0620, 0.1292, 0.0923, 0.0814], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0231, 0.0318, 0.0295, 0.0296, 0.0324, 0.0339, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:27:05,003 INFO [train.py:901] (3/4) Epoch 14, batch 5350, loss[loss=0.2039, simple_loss=0.2981, pruned_loss=0.05485, over 8327.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3009, pruned_loss=0.07261, over 1618356.76 frames. ], batch size: 25, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:13,486 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-06 15:27:25,505 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.452e+02 3.047e+02 3.791e+02 6.566e+02, threshold=6.094e+02, percent-clipped=2.0 +2023-02-06 15:27:32,804 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:36,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:27:38,768 INFO [train.py:901] (3/4) Epoch 14, batch 5400, loss[loss=0.211, simple_loss=0.2807, pruned_loss=0.07062, over 7416.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3001, pruned_loss=0.07173, over 1618698.85 frames. ], batch size: 17, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:27:48,333 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:08,461 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110521.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:14,356 INFO [train.py:901] (3/4) Epoch 14, batch 5450, loss[loss=0.2756, simple_loss=0.3545, pruned_loss=0.09832, over 8517.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2997, pruned_loss=0.07132, over 1618628.98 frames. ], batch size: 28, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:30,476 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 15:28:34,934 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 2.429e+02 2.846e+02 3.589e+02 7.640e+02, threshold=5.692e+02, percent-clipped=1.0 +2023-02-06 15:28:39,770 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1750, 2.1058, 1.5989, 1.8528, 1.7278, 1.3873, 1.5794, 1.6071], + device='cuda:3'), covar=tensor([0.1145, 0.0412, 0.1107, 0.0489, 0.0717, 0.1427, 0.0889, 0.0753], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0235, 0.0322, 0.0297, 0.0298, 0.0328, 0.0344, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:28:47,542 INFO [train.py:901] (3/4) Epoch 14, batch 5500, loss[loss=0.2226, simple_loss=0.3066, pruned_loss=0.06933, over 8258.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3008, pruned_loss=0.07204, over 1614865.28 frames. ], batch size: 24, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:28:50,374 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3996, 2.8374, 1.8687, 2.1561, 2.2578, 1.6966, 2.0903, 2.1484], + device='cuda:3'), covar=tensor([0.1849, 0.0397, 0.1197, 0.0792, 0.0734, 0.1520, 0.1245, 0.0971], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0234, 0.0322, 0.0297, 0.0297, 0.0328, 0.0343, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:28:52,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:28:55,821 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110592.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,305 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:23,844 INFO [train.py:901] (3/4) Epoch 14, batch 5550, loss[loss=0.1861, simple_loss=0.2628, pruned_loss=0.0547, over 7785.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3002, pruned_loss=0.07152, over 1616123.27 frames. ], batch size: 19, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:28,291 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-06 15:29:34,020 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:35,489 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4173, 2.7224, 1.9736, 2.1236, 2.2878, 1.5854, 2.0282, 1.9689], + device='cuda:3'), covar=tensor([0.1482, 0.0363, 0.1012, 0.0655, 0.0656, 0.1479, 0.1023, 0.0896], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0235, 0.0323, 0.0298, 0.0298, 0.0329, 0.0345, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:29:44,466 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.421e+02 3.120e+02 3.692e+02 1.093e+03, threshold=6.240e+02, percent-clipped=9.0 +2023-02-06 15:29:47,080 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110665.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:29:56,913 INFO [train.py:901] (3/4) Epoch 14, batch 5600, loss[loss=0.2036, simple_loss=0.2941, pruned_loss=0.05657, over 8110.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3021, pruned_loss=0.0728, over 1616260.51 frames. ], batch size: 23, lr: 5.42e-03, grad_scale: 8.0 +2023-02-06 15:29:56,987 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:30,943 INFO [train.py:901] (3/4) Epoch 14, batch 5650, loss[loss=0.1914, simple_loss=0.2793, pruned_loss=0.05174, over 8359.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3026, pruned_loss=0.07333, over 1619521.04 frames. ], batch size: 24, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:30:33,090 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 15:30:47,222 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:49,874 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,025 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:30:54,498 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.502e+02 3.092e+02 3.638e+02 5.778e+02, threshold=6.185e+02, percent-clipped=0.0 +2023-02-06 15:31:04,325 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:05,721 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110777.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:07,495 INFO [train.py:901] (3/4) Epoch 14, batch 5700, loss[loss=0.2434, simple_loss=0.3135, pruned_loss=0.08667, over 8346.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3011, pruned_loss=0.07215, over 1619644.68 frames. ], batch size: 25, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:07,675 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110780.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:17,913 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=110795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:22,870 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110802.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:40,945 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 15:31:41,631 INFO [train.py:901] (3/4) Epoch 14, batch 5750, loss[loss=0.1754, simple_loss=0.2643, pruned_loss=0.04329, over 8254.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3002, pruned_loss=0.07163, over 1618302.37 frames. ], batch size: 22, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:31:51,514 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:31:54,992 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=110848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:04,408 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.447e+02 3.019e+02 3.853e+02 7.521e+02, threshold=6.038e+02, percent-clipped=3.0 +2023-02-06 15:32:10,146 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:14,049 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=110873.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:18,667 INFO [train.py:901] (3/4) Epoch 14, batch 5800, loss[loss=0.2543, simple_loss=0.3356, pruned_loss=0.08655, over 8550.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2991, pruned_loss=0.07127, over 1614984.61 frames. ], batch size: 31, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:32:22,950 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:32:50,169 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5574, 2.7754, 1.7900, 2.2524, 2.3325, 1.5706, 2.1448, 2.1442], + device='cuda:3'), covar=tensor([0.1354, 0.0306, 0.1107, 0.0542, 0.0635, 0.1385, 0.0895, 0.0845], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0231, 0.0321, 0.0294, 0.0296, 0.0326, 0.0341, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:32:53,261 INFO [train.py:901] (3/4) Epoch 14, batch 5850, loss[loss=0.2118, simple_loss=0.3031, pruned_loss=0.06025, over 8516.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2988, pruned_loss=0.0713, over 1613642.99 frames. ], batch size: 28, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:02,551 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 15:33:14,136 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.498e+02 3.098e+02 4.112e+02 1.106e+03, threshold=6.195e+02, percent-clipped=10.0 +2023-02-06 15:33:22,883 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=110973.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:26,427 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=110976.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:33:28,981 INFO [train.py:901] (3/4) Epoch 14, batch 5900, loss[loss=0.2309, simple_loss=0.3102, pruned_loss=0.07574, over 7681.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2986, pruned_loss=0.07121, over 1614222.84 frames. ], batch size: 18, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:33:38,586 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5579, 1.8870, 2.0345, 1.0914, 2.1092, 1.3744, 0.5218, 1.7820], + device='cuda:3'), covar=tensor([0.0470, 0.0271, 0.0203, 0.0453, 0.0298, 0.0676, 0.0695, 0.0225], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0358, 0.0305, 0.0406, 0.0345, 0.0499, 0.0372, 0.0377], + device='cuda:3'), out_proj_covar=tensor([1.1526e-04, 9.6830e-05, 8.2572e-05, 1.1029e-04, 9.4174e-05, 1.4647e-04, + 1.0367e-04, 1.0325e-04], device='cuda:3') +2023-02-06 15:33:54,412 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:03,738 INFO [train.py:901] (3/4) Epoch 14, batch 5950, loss[loss=0.2018, simple_loss=0.2814, pruned_loss=0.06109, over 7527.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2996, pruned_loss=0.07119, over 1616337.73 frames. ], batch size: 18, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:07,780 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111036.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:11,073 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111041.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:17,723 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111051.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:24,233 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.648e+02 3.047e+02 4.016e+02 7.772e+02, threshold=6.093e+02, percent-clipped=5.0 +2023-02-06 15:34:24,461 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111061.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:34,724 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111076.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:34:38,001 INFO [train.py:901] (3/4) Epoch 14, batch 6000, loss[loss=0.2026, simple_loss=0.2965, pruned_loss=0.05433, over 8228.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2995, pruned_loss=0.07118, over 1611896.23 frames. ], batch size: 49, lr: 5.41e-03, grad_scale: 8.0 +2023-02-06 15:34:38,001 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 15:34:50,549 INFO [train.py:935] (3/4) Epoch 14, validation: loss=0.1818, simple_loss=0.2816, pruned_loss=0.04094, over 944034.00 frames. +2023-02-06 15:34:50,550 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 15:34:51,683 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 15:34:56,293 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:03,698 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111098.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:35:11,768 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 15:35:27,231 INFO [train.py:901] (3/4) Epoch 14, batch 6050, loss[loss=0.2393, simple_loss=0.3139, pruned_loss=0.08231, over 8668.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2987, pruned_loss=0.07115, over 1611114.12 frames. ], batch size: 34, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:35:49,432 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.432e+02 2.876e+02 3.526e+02 5.542e+02, threshold=5.752e+02, percent-clipped=0.0 +2023-02-06 15:35:59,319 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 15:36:01,590 INFO [train.py:901] (3/4) Epoch 14, batch 6100, loss[loss=0.2448, simple_loss=0.3161, pruned_loss=0.08677, over 8657.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.3001, pruned_loss=0.07149, over 1618487.81 frames. ], batch size: 39, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:15,954 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 15:36:24,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:37,111 INFO [train.py:901] (3/4) Epoch 14, batch 6150, loss[loss=0.1927, simple_loss=0.279, pruned_loss=0.05315, over 8025.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2995, pruned_loss=0.07134, over 1617321.79 frames. ], batch size: 22, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:36:37,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111230.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:46,729 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:36:59,331 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.474e+02 3.213e+02 4.029e+02 8.079e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-06 15:37:11,859 INFO [train.py:901] (3/4) Epoch 14, batch 6200, loss[loss=0.2423, simple_loss=0.3208, pruned_loss=0.08186, over 8548.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3006, pruned_loss=0.07262, over 1618676.02 frames. ], batch size: 31, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:38,515 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:40,663 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8070, 1.7805, 2.3952, 1.8691, 1.2957, 2.4401, 0.4847, 1.4100], + device='cuda:3'), covar=tensor([0.2221, 0.1555, 0.0387, 0.1432, 0.3680, 0.0435, 0.2939, 0.1715], + device='cuda:3'), in_proj_covar=tensor([0.0173, 0.0175, 0.0108, 0.0216, 0.0259, 0.0112, 0.0163, 0.0172], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 15:37:45,102 INFO [train.py:901] (3/4) Epoch 14, batch 6250, loss[loss=0.24, simple_loss=0.3242, pruned_loss=0.07793, over 8286.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3015, pruned_loss=0.07333, over 1618090.04 frames. ], batch size: 23, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:37:54,001 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 15:37:55,169 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:37:55,819 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:08,441 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.286e+02 2.818e+02 3.691e+02 1.208e+03, threshold=5.637e+02, percent-clipped=2.0 +2023-02-06 15:38:13,382 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:38:20,509 INFO [train.py:901] (3/4) Epoch 14, batch 6300, loss[loss=0.2063, simple_loss=0.2848, pruned_loss=0.06391, over 8035.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3011, pruned_loss=0.07288, over 1619928.26 frames. ], batch size: 20, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:55,237 INFO [train.py:901] (3/4) Epoch 14, batch 6350, loss[loss=0.2047, simple_loss=0.2838, pruned_loss=0.06276, over 7804.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3016, pruned_loss=0.07327, over 1616367.43 frames. ], batch size: 20, lr: 5.40e-03, grad_scale: 4.0 +2023-02-06 15:38:56,128 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3884, 2.3974, 1.6140, 2.0972, 2.0935, 1.3247, 1.7959, 1.9521], + device='cuda:3'), covar=tensor([0.1519, 0.0398, 0.1186, 0.0625, 0.0615, 0.1575, 0.0979, 0.0913], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0232, 0.0323, 0.0293, 0.0298, 0.0326, 0.0341, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:38:58,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111435.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:05,112 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3728, 1.3681, 2.3064, 1.1901, 2.1363, 2.4975, 2.6198, 2.1153], + device='cuda:3'), covar=tensor([0.1045, 0.1301, 0.0492, 0.2062, 0.0740, 0.0409, 0.0702, 0.0755], + device='cuda:3'), in_proj_covar=tensor([0.0275, 0.0307, 0.0269, 0.0299, 0.0285, 0.0245, 0.0371, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:39:10,426 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1003, 2.9025, 2.1376, 2.4754, 2.4328, 1.9268, 2.2800, 2.7069], + device='cuda:3'), covar=tensor([0.1277, 0.0358, 0.0928, 0.0668, 0.0638, 0.1190, 0.0982, 0.0798], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0232, 0.0323, 0.0294, 0.0298, 0.0326, 0.0341, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:39:17,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.340e+02 2.891e+02 3.552e+02 9.934e+02, threshold=5.783e+02, percent-clipped=8.0 +2023-02-06 15:39:23,083 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111469.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:30,958 INFO [train.py:901] (3/4) Epoch 14, batch 6400, loss[loss=0.2246, simple_loss=0.3078, pruned_loss=0.07071, over 8644.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3013, pruned_loss=0.07315, over 1616004.70 frames. ], batch size: 39, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:39:35,210 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:39:40,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111494.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:05,255 INFO [train.py:901] (3/4) Epoch 14, batch 6450, loss[loss=0.1915, simple_loss=0.267, pruned_loss=0.05795, over 6777.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3, pruned_loss=0.07241, over 1612968.63 frames. ], batch size: 15, lr: 5.40e-03, grad_scale: 8.0 +2023-02-06 15:40:26,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.418e+02 3.184e+02 3.807e+02 1.482e+03, threshold=6.367e+02, percent-clipped=8.0 +2023-02-06 15:40:39,077 INFO [train.py:901] (3/4) Epoch 14, batch 6500, loss[loss=0.1917, simple_loss=0.2666, pruned_loss=0.05842, over 7201.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3007, pruned_loss=0.07313, over 1607905.26 frames. ], batch size: 16, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:40:44,376 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:40:55,096 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111601.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:12,108 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:41:14,602 INFO [train.py:901] (3/4) Epoch 14, batch 6550, loss[loss=0.1822, simple_loss=0.2598, pruned_loss=0.0523, over 7252.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3015, pruned_loss=0.07343, over 1609317.74 frames. ], batch size: 16, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:24,419 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 15:41:35,852 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.348e+02 2.515e+02 3.055e+02 3.900e+02 7.605e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-06 15:41:43,341 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 15:41:47,955 INFO [train.py:901] (3/4) Epoch 14, batch 6600, loss[loss=0.2141, simple_loss=0.2972, pruned_loss=0.06543, over 8322.00 frames. ], tot_loss[loss=0.2248, simple_loss=0.3021, pruned_loss=0.07374, over 1612303.51 frames. ], batch size: 25, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:41:55,651 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:03,625 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:14,398 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111716.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:42:23,468 INFO [train.py:901] (3/4) Epoch 14, batch 6650, loss[loss=0.1982, simple_loss=0.2682, pruned_loss=0.06408, over 7696.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3014, pruned_loss=0.0731, over 1613396.19 frames. ], batch size: 18, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:42:45,230 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.391e+02 3.105e+02 3.860e+02 7.189e+02, threshold=6.209e+02, percent-clipped=3.0 +2023-02-06 15:42:48,109 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3387, 1.4785, 1.3992, 1.7758, 0.8025, 1.2000, 1.3355, 1.5258], + device='cuda:3'), covar=tensor([0.0884, 0.0872, 0.1062, 0.0565, 0.1151, 0.1503, 0.0820, 0.0711], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0207, 0.0256, 0.0215, 0.0216, 0.0253, 0.0261, 0.0217], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 15:42:49,649 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 15:42:51,481 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3815, 1.6061, 1.6305, 0.9985, 1.6769, 1.3095, 0.2509, 1.5786], + device='cuda:3'), covar=tensor([0.0334, 0.0266, 0.0249, 0.0388, 0.0330, 0.0705, 0.0638, 0.0184], + device='cuda:3'), in_proj_covar=tensor([0.0409, 0.0354, 0.0302, 0.0405, 0.0339, 0.0493, 0.0369, 0.0376], + device='cuda:3'), out_proj_covar=tensor([1.1356e-04, 9.5802e-05, 8.1746e-05, 1.1006e-04, 9.2653e-05, 1.4440e-04, + 1.0266e-04, 1.0282e-04], device='cuda:3') +2023-02-06 15:42:57,306 INFO [train.py:901] (3/4) Epoch 14, batch 6700, loss[loss=0.2488, simple_loss=0.3292, pruned_loss=0.08419, over 8256.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3011, pruned_loss=0.07289, over 1614432.67 frames. ], batch size: 24, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:16,632 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=111809.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:43:32,517 INFO [train.py:901] (3/4) Epoch 14, batch 6750, loss[loss=0.2035, simple_loss=0.2809, pruned_loss=0.06308, over 7656.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3002, pruned_loss=0.07239, over 1609079.74 frames. ], batch size: 19, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:43:32,588 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=111830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:43:54,029 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.559e+02 3.020e+02 4.182e+02 1.269e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-06 15:44:02,386 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 15:44:02,882 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 15:44:07,193 INFO [train.py:901] (3/4) Epoch 14, batch 6800, loss[loss=0.1931, simple_loss=0.2707, pruned_loss=0.05776, over 7796.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2994, pruned_loss=0.07189, over 1610711.76 frames. ], batch size: 19, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:40,418 INFO [train.py:901] (3/4) Epoch 14, batch 6850, loss[loss=0.2365, simple_loss=0.308, pruned_loss=0.08253, over 7648.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.2997, pruned_loss=0.07205, over 1610259.52 frames. ], batch size: 19, lr: 5.39e-03, grad_scale: 8.0 +2023-02-06 15:44:51,193 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 15:44:52,737 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=111945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:01,428 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=111958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:03,874 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.542e+02 3.126e+02 4.226e+02 8.027e+02, threshold=6.251e+02, percent-clipped=7.0 +2023-02-06 15:45:16,603 INFO [train.py:901] (3/4) Epoch 14, batch 6900, loss[loss=0.214, simple_loss=0.2942, pruned_loss=0.06692, over 8135.00 frames. ], tot_loss[loss=0.223, simple_loss=0.301, pruned_loss=0.07247, over 1616005.79 frames. ], batch size: 22, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:18,711 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=111983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:45:50,586 INFO [train.py:901] (3/4) Epoch 14, batch 6950, loss[loss=0.2131, simple_loss=0.2905, pruned_loss=0.06783, over 7435.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3023, pruned_loss=0.07271, over 1619023.33 frames. ], batch size: 17, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:45:58,670 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 15:46:13,925 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.419e+02 2.987e+02 3.531e+02 6.552e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-06 15:46:25,966 INFO [train.py:901] (3/4) Epoch 14, batch 7000, loss[loss=0.1618, simple_loss=0.2536, pruned_loss=0.03499, over 7817.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3021, pruned_loss=0.07229, over 1618888.41 frames. ], batch size: 20, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:46:28,776 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:46:28,795 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9601, 1.5486, 3.2727, 1.3706, 2.2101, 3.5448, 3.6978, 3.0141], + device='cuda:3'), covar=tensor([0.1119, 0.1647, 0.0342, 0.2206, 0.1068, 0.0244, 0.0489, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0306, 0.0268, 0.0298, 0.0284, 0.0244, 0.0369, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:46:41,421 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7261, 3.7104, 3.4115, 1.8816, 3.2765, 3.4287, 3.3830, 3.2273], + device='cuda:3'), covar=tensor([0.1019, 0.0787, 0.1147, 0.5031, 0.1064, 0.1233, 0.1432, 0.0961], + device='cuda:3'), in_proj_covar=tensor([0.0485, 0.0397, 0.0403, 0.0500, 0.0396, 0.0400, 0.0390, 0.0346], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 15:46:59,913 INFO [train.py:901] (3/4) Epoch 14, batch 7050, loss[loss=0.2676, simple_loss=0.3407, pruned_loss=0.09726, over 8528.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.3024, pruned_loss=0.07262, over 1618736.87 frames. ], batch size: 28, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:15,936 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112153.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 15:47:21,801 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.608e+02 3.153e+02 4.211e+02 1.237e+03, threshold=6.307e+02, percent-clipped=12.0 +2023-02-06 15:47:35,265 INFO [train.py:901] (3/4) Epoch 14, batch 7100, loss[loss=0.1908, simple_loss=0.277, pruned_loss=0.05236, over 8029.00 frames. ], tot_loss[loss=0.2233, simple_loss=0.3021, pruned_loss=0.0722, over 1620084.34 frames. ], batch size: 22, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:47:50,285 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:47:59,153 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 15:48:07,018 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0384, 1.6476, 1.4590, 1.6509, 1.4181, 1.2473, 1.2675, 1.3756], + device='cuda:3'), covar=tensor([0.1017, 0.0406, 0.1084, 0.0423, 0.0593, 0.1321, 0.0836, 0.0635], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0234, 0.0323, 0.0298, 0.0301, 0.0329, 0.0345, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 15:48:07,693 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:48:10,129 INFO [train.py:901] (3/4) Epoch 14, batch 7150, loss[loss=0.258, simple_loss=0.3345, pruned_loss=0.09075, over 8334.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3008, pruned_loss=0.07181, over 1618059.39 frames. ], batch size: 26, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:48:16,275 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2875, 1.2592, 3.3618, 1.1052, 2.9969, 2.7585, 3.0479, 2.9630], + device='cuda:3'), covar=tensor([0.0635, 0.3751, 0.0817, 0.3534, 0.1304, 0.1138, 0.0704, 0.0802], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0599, 0.0625, 0.0566, 0.0638, 0.0550, 0.0542, 0.0603], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 15:48:31,549 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.371e+02 2.859e+02 3.664e+02 7.587e+02, threshold=5.717e+02, percent-clipped=3.0 +2023-02-06 15:48:35,310 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-06 15:48:35,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112268.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:48:43,215 INFO [train.py:901] (3/4) Epoch 14, batch 7200, loss[loss=0.2435, simple_loss=0.3218, pruned_loss=0.08255, over 8028.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3016, pruned_loss=0.07219, over 1618293.46 frames. ], batch size: 22, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:48:51,521 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1421, 1.7214, 1.8261, 1.6144, 1.1200, 1.7126, 2.0916, 1.9072], + device='cuda:3'), covar=tensor([0.0442, 0.1070, 0.1432, 0.1197, 0.0522, 0.1253, 0.0535, 0.0506], + device='cuda:3'), in_proj_covar=tensor([0.0095, 0.0150, 0.0187, 0.0153, 0.0099, 0.0159, 0.0113, 0.0135], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 15:49:18,427 INFO [train.py:901] (3/4) Epoch 14, batch 7250, loss[loss=0.1932, simple_loss=0.2683, pruned_loss=0.05904, over 7710.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3005, pruned_loss=0.07171, over 1613255.26 frames. ], batch size: 18, lr: 5.38e-03, grad_scale: 8.0 +2023-02-06 15:49:26,297 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6142, 1.4119, 1.5097, 1.2667, 0.8803, 1.3427, 1.5431, 1.2321], + device='cuda:3'), covar=tensor([0.0495, 0.1207, 0.1618, 0.1373, 0.0534, 0.1430, 0.0666, 0.0653], + device='cuda:3'), in_proj_covar=tensor([0.0095, 0.0151, 0.0188, 0.0154, 0.0100, 0.0160, 0.0114, 0.0135], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0008, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 15:49:38,652 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5893, 4.5557, 4.1628, 1.9395, 4.0906, 4.2370, 4.1752, 4.0238], + device='cuda:3'), covar=tensor([0.0615, 0.0475, 0.0868, 0.4478, 0.0739, 0.0712, 0.1149, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0397, 0.0405, 0.0502, 0.0397, 0.0400, 0.0392, 0.0346], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 15:49:39,904 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.708e+02 3.212e+02 3.989e+02 8.387e+02, threshold=6.424e+02, percent-clipped=5.0 +2023-02-06 15:49:52,050 INFO [train.py:901] (3/4) Epoch 14, batch 7300, loss[loss=0.2281, simple_loss=0.3107, pruned_loss=0.07272, over 8323.00 frames. ], tot_loss[loss=0.223, simple_loss=0.301, pruned_loss=0.07251, over 1613178.28 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:11,942 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112407.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:26,720 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:50:28,014 INFO [train.py:901] (3/4) Epoch 14, batch 7350, loss[loss=0.1941, simple_loss=0.2815, pruned_loss=0.05331, over 7658.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.301, pruned_loss=0.07269, over 1613019.17 frames. ], batch size: 19, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:50:40,030 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 15:50:49,906 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.462e+02 2.972e+02 3.682e+02 1.093e+03, threshold=5.943e+02, percent-clipped=5.0 +2023-02-06 15:50:59,939 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 15:51:02,056 INFO [train.py:901] (3/4) Epoch 14, batch 7400, loss[loss=0.2346, simple_loss=0.2982, pruned_loss=0.08549, over 7426.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3011, pruned_loss=0.07267, over 1616631.71 frames. ], batch size: 17, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:10,094 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.73 vs. limit=2.0 +2023-02-06 15:51:32,627 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112524.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:37,189 INFO [train.py:901] (3/4) Epoch 14, batch 7450, loss[loss=0.2167, simple_loss=0.3006, pruned_loss=0.06645, over 8338.00 frames. ], tot_loss[loss=0.2231, simple_loss=0.3014, pruned_loss=0.07246, over 1614955.12 frames. ], batch size: 26, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:51:41,773 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 15:51:46,946 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:51:51,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112549.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 15:51:59,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.473e+02 3.112e+02 3.710e+02 6.215e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 15:52:13,216 INFO [train.py:901] (3/4) Epoch 14, batch 7500, loss[loss=0.1953, simple_loss=0.2868, pruned_loss=0.05187, over 8283.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3004, pruned_loss=0.07218, over 1611603.90 frames. ], batch size: 23, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:13,389 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:23,793 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:52:47,704 INFO [train.py:901] (3/4) Epoch 14, batch 7550, loss[loss=0.2116, simple_loss=0.2883, pruned_loss=0.06742, over 7967.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3004, pruned_loss=0.07199, over 1611660.91 frames. ], batch size: 21, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:52:51,318 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:53:11,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.402e+02 2.890e+02 3.643e+02 7.164e+02, threshold=5.781e+02, percent-clipped=3.0 +2023-02-06 15:53:23,629 INFO [train.py:901] (3/4) Epoch 14, batch 7600, loss[loss=0.1611, simple_loss=0.2398, pruned_loss=0.04119, over 7544.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2995, pruned_loss=0.07139, over 1613712.51 frames. ], batch size: 18, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:53:40,436 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6497, 1.3335, 1.5820, 1.2452, 0.8778, 1.3308, 1.5954, 1.3270], + device='cuda:3'), covar=tensor([0.0498, 0.1223, 0.1654, 0.1399, 0.0596, 0.1504, 0.0659, 0.0634], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0155, 0.0101, 0.0161, 0.0115, 0.0137], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 15:53:57,111 INFO [train.py:901] (3/4) Epoch 14, batch 7650, loss[loss=0.2502, simple_loss=0.317, pruned_loss=0.09167, over 7539.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.2998, pruned_loss=0.07172, over 1611825.16 frames. ], batch size: 18, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:11,117 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:18,468 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.546e+02 2.941e+02 3.649e+02 7.123e+02, threshold=5.882e+02, percent-clipped=5.0 +2023-02-06 15:54:32,365 INFO [train.py:901] (3/4) Epoch 14, batch 7700, loss[loss=0.2145, simple_loss=0.3033, pruned_loss=0.06284, over 8304.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3006, pruned_loss=0.0716, over 1615633.85 frames. ], batch size: 25, lr: 5.37e-03, grad_scale: 8.0 +2023-02-06 15:54:44,372 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 15:54:45,579 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=112799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:54:52,889 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 15:55:03,119 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=112824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:06,911 INFO [train.py:901] (3/4) Epoch 14, batch 7750, loss[loss=0.1753, simple_loss=0.2648, pruned_loss=0.04286, over 7798.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3008, pruned_loss=0.07158, over 1618362.72 frames. ], batch size: 19, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:55:28,275 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.453e+02 3.172e+02 4.245e+02 8.131e+02, threshold=6.343e+02, percent-clipped=10.0 +2023-02-06 15:55:30,956 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=112866.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:55:41,029 INFO [train.py:901] (3/4) Epoch 14, batch 7800, loss[loss=0.2146, simple_loss=0.2865, pruned_loss=0.07136, over 5962.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3003, pruned_loss=0.07147, over 1613811.99 frames. ], batch size: 13, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:12,171 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112924.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:16,242 INFO [train.py:901] (3/4) Epoch 14, batch 7850, loss[loss=0.2113, simple_loss=0.3042, pruned_loss=0.05914, over 8105.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3007, pruned_loss=0.07159, over 1615730.69 frames. ], batch size: 23, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:56:22,361 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:22,438 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=112939.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:29,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7186, 1.9662, 2.2491, 1.5717, 2.2693, 1.4986, 0.9113, 1.9008], + device='cuda:3'), covar=tensor([0.0457, 0.0259, 0.0181, 0.0369, 0.0278, 0.0662, 0.0601, 0.0232], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0355, 0.0302, 0.0407, 0.0342, 0.0495, 0.0368, 0.0380], + device='cuda:3'), out_proj_covar=tensor([1.1508e-04, 9.5920e-05, 8.1591e-05, 1.1046e-04, 9.3161e-05, 1.4483e-04, + 1.0230e-04, 1.0379e-04], device='cuda:3') +2023-02-06 15:56:37,783 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.504e+02 3.067e+02 3.726e+02 7.698e+02, threshold=6.135e+02, percent-clipped=2.0 +2023-02-06 15:56:49,070 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=112979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:56:49,646 INFO [train.py:901] (3/4) Epoch 14, batch 7900, loss[loss=0.2142, simple_loss=0.3034, pruned_loss=0.06249, over 8322.00 frames. ], tot_loss[loss=0.223, simple_loss=0.3014, pruned_loss=0.0723, over 1618686.10 frames. ], batch size: 25, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:22,235 INFO [train.py:901] (3/4) Epoch 14, batch 7950, loss[loss=0.2696, simple_loss=0.3409, pruned_loss=0.09918, over 8689.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3004, pruned_loss=0.07179, over 1619926.94 frames. ], batch size: 34, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:57:28,322 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113039.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:38,049 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:57:43,248 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.654e+02 3.191e+02 4.041e+02 1.304e+03, threshold=6.382e+02, percent-clipped=5.0 +2023-02-06 15:57:55,258 INFO [train.py:901] (3/4) Epoch 14, batch 8000, loss[loss=0.2463, simple_loss=0.3163, pruned_loss=0.08813, over 7109.00 frames. ], tot_loss[loss=0.2232, simple_loss=0.3014, pruned_loss=0.07252, over 1620114.00 frames. ], batch size: 71, lr: 5.36e-03, grad_scale: 8.0 +2023-02-06 15:58:04,704 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:23,492 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113122.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:28,568 INFO [train.py:901] (3/4) Epoch 14, batch 8050, loss[loss=0.2123, simple_loss=0.2834, pruned_loss=0.07058, over 7191.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3001, pruned_loss=0.07267, over 1599384.71 frames. ], batch size: 16, lr: 5.36e-03, grad_scale: 16.0 +2023-02-06 15:58:40,131 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113147.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 15:58:49,778 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.323e+02 2.856e+02 3.288e+02 8.076e+02, threshold=5.712e+02, percent-clipped=1.0 +2023-02-06 15:59:01,701 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 15:59:06,257 INFO [train.py:901] (3/4) Epoch 15, batch 0, loss[loss=0.2239, simple_loss=0.3013, pruned_loss=0.07324, over 7653.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3013, pruned_loss=0.07324, over 7653.00 frames. ], batch size: 19, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 15:59:06,258 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 15:59:17,268 INFO [train.py:935] (3/4) Epoch 15, validation: loss=0.1825, simple_loss=0.283, pruned_loss=0.04098, over 944034.00 frames. +2023-02-06 15:59:17,269 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 15:59:32,293 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 15:59:48,307 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-06 15:59:49,088 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 15:59:51,454 INFO [train.py:901] (3/4) Epoch 15, batch 50, loss[loss=0.2187, simple_loss=0.2839, pruned_loss=0.0768, over 7798.00 frames. ], tot_loss[loss=0.2302, simple_loss=0.3091, pruned_loss=0.07564, over 366937.13 frames. ], batch size: 19, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:08,694 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 16:00:21,179 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113252.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:27,788 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.549e+02 3.077e+02 3.582e+02 9.445e+02, threshold=6.153e+02, percent-clipped=5.0 +2023-02-06 16:00:28,490 INFO [train.py:901] (3/4) Epoch 15, batch 100, loss[loss=0.2551, simple_loss=0.3256, pruned_loss=0.09233, over 8648.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.3065, pruned_loss=0.07443, over 643929.27 frames. ], batch size: 34, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:00:29,906 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 16:00:39,309 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1489, 4.1223, 3.7704, 1.8390, 3.6618, 3.7192, 3.6675, 3.5576], + device='cuda:3'), covar=tensor([0.0998, 0.0688, 0.1259, 0.5006, 0.1108, 0.1163, 0.1536, 0.0997], + device='cuda:3'), in_proj_covar=tensor([0.0484, 0.0404, 0.0403, 0.0503, 0.0402, 0.0400, 0.0393, 0.0352], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:00:41,981 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:00:50,244 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:00,331 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113310.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:02,087 INFO [train.py:901] (3/4) Epoch 15, batch 150, loss[loss=0.2041, simple_loss=0.2785, pruned_loss=0.06488, over 7436.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3026, pruned_loss=0.07261, over 861101.22 frames. ], batch size: 17, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:06,867 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113320.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:17,345 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113335.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:28,771 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:01:32,821 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6673, 1.9836, 2.1819, 1.2116, 2.2587, 1.4563, 0.6638, 1.8959], + device='cuda:3'), covar=tensor([0.0499, 0.0269, 0.0207, 0.0457, 0.0282, 0.0666, 0.0617, 0.0239], + device='cuda:3'), in_proj_covar=tensor([0.0416, 0.0358, 0.0304, 0.0408, 0.0343, 0.0497, 0.0370, 0.0380], + device='cuda:3'), out_proj_covar=tensor([1.1544e-04, 9.6722e-05, 8.2157e-05, 1.1080e-04, 9.3424e-05, 1.4533e-04, + 1.0253e-04, 1.0406e-04], device='cuda:3') +2023-02-06 16:01:37,328 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.511e+02 3.032e+02 4.146e+02 1.005e+03, threshold=6.064e+02, percent-clipped=3.0 +2023-02-06 16:01:38,025 INFO [train.py:901] (3/4) Epoch 15, batch 200, loss[loss=0.2244, simple_loss=0.3133, pruned_loss=0.06776, over 8485.00 frames. ], tot_loss[loss=0.2249, simple_loss=0.3028, pruned_loss=0.07349, over 1030001.13 frames. ], batch size: 26, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:01:46,251 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:01,351 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:02:11,081 INFO [train.py:901] (3/4) Epoch 15, batch 250, loss[loss=0.1693, simple_loss=0.2551, pruned_loss=0.0418, over 7451.00 frames. ], tot_loss[loss=0.2269, simple_loss=0.3045, pruned_loss=0.07463, over 1157373.24 frames. ], batch size: 17, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:02:19,373 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 16:02:28,604 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 16:02:43,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.666e+02 3.062e+02 4.026e+02 8.735e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 16:02:44,413 INFO [train.py:901] (3/4) Epoch 15, batch 300, loss[loss=0.2359, simple_loss=0.3136, pruned_loss=0.07911, over 8022.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.3049, pruned_loss=0.07507, over 1261826.14 frames. ], batch size: 22, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:15,226 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 16:03:19,322 INFO [train.py:901] (3/4) Epoch 15, batch 350, loss[loss=0.201, simple_loss=0.2877, pruned_loss=0.05716, over 8245.00 frames. ], tot_loss[loss=0.2245, simple_loss=0.3023, pruned_loss=0.07338, over 1339681.15 frames. ], batch size: 24, lr: 5.17e-03, grad_scale: 16.0 +2023-02-06 16:03:52,045 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.415e+02 3.115e+02 3.728e+02 6.919e+02, threshold=6.229e+02, percent-clipped=2.0 +2023-02-06 16:03:52,739 INFO [train.py:901] (3/4) Epoch 15, batch 400, loss[loss=0.2325, simple_loss=0.3079, pruned_loss=0.07854, over 7804.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2996, pruned_loss=0.07153, over 1398639.05 frames. ], batch size: 20, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:17,492 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113596.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:28,817 INFO [train.py:901] (3/4) Epoch 15, batch 450, loss[loss=0.2063, simple_loss=0.2853, pruned_loss=0.06365, over 7976.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2997, pruned_loss=0.0709, over 1451634.76 frames. ], batch size: 21, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:04:30,185 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7327, 1.3363, 1.5570, 1.2376, 0.7854, 1.3351, 1.4750, 1.4432], + device='cuda:3'), covar=tensor([0.0558, 0.1296, 0.1717, 0.1454, 0.0640, 0.1518, 0.0733, 0.0631], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0155, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 16:04:43,286 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:04:56,118 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113654.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:01,032 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.616e+02 3.268e+02 4.141e+02 9.119e+02, threshold=6.536e+02, percent-clipped=2.0 +2023-02-06 16:05:01,750 INFO [train.py:901] (3/4) Epoch 15, batch 500, loss[loss=0.2409, simple_loss=0.3182, pruned_loss=0.08182, over 6685.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.301, pruned_loss=0.07188, over 1487024.32 frames. ], batch size: 72, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:05:02,560 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=113664.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:12,319 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113679.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:35,427 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=113711.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:05:36,635 INFO [train.py:901] (3/4) Epoch 15, batch 550, loss[loss=0.2547, simple_loss=0.3299, pruned_loss=0.08977, over 8388.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.302, pruned_loss=0.07291, over 1516906.70 frames. ], batch size: 49, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:09,825 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.516e+02 3.119e+02 4.209e+02 9.524e+02, threshold=6.239e+02, percent-clipped=4.0 +2023-02-06 16:06:10,537 INFO [train.py:901] (3/4) Epoch 15, batch 600, loss[loss=0.2092, simple_loss=0.3011, pruned_loss=0.05867, over 8246.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3022, pruned_loss=0.07306, over 1542550.81 frames. ], batch size: 24, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:17,854 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1596, 1.7261, 4.1750, 1.8828, 2.6157, 4.6166, 4.6122, 4.0830], + device='cuda:3'), covar=tensor([0.1065, 0.1578, 0.0234, 0.1837, 0.0976, 0.0180, 0.0432, 0.0469], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0308, 0.0273, 0.0301, 0.0288, 0.0247, 0.0377, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:06:24,219 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 16:06:44,320 INFO [train.py:901] (3/4) Epoch 15, batch 650, loss[loss=0.2036, simple_loss=0.2778, pruned_loss=0.06473, over 7813.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3027, pruned_loss=0.07325, over 1561481.66 frames. ], batch size: 20, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:06:53,837 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3080, 1.4280, 1.3680, 1.8076, 0.6500, 1.1637, 1.2675, 1.4219], + device='cuda:3'), covar=tensor([0.0892, 0.0799, 0.1113, 0.0558, 0.1180, 0.1462, 0.0808, 0.0732], + device='cuda:3'), in_proj_covar=tensor([0.0226, 0.0201, 0.0247, 0.0207, 0.0208, 0.0244, 0.0249, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0004, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:07:19,220 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.270e+02 2.767e+02 3.649e+02 9.673e+02, threshold=5.535e+02, percent-clipped=4.0 +2023-02-06 16:07:19,888 INFO [train.py:901] (3/4) Epoch 15, batch 700, loss[loss=0.2454, simple_loss=0.3319, pruned_loss=0.07948, over 8520.00 frames. ], tot_loss[loss=0.2246, simple_loss=0.3027, pruned_loss=0.07331, over 1573998.24 frames. ], batch size: 28, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:07:53,445 INFO [train.py:901] (3/4) Epoch 15, batch 750, loss[loss=0.2053, simple_loss=0.2952, pruned_loss=0.05775, over 8471.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3013, pruned_loss=0.07204, over 1581497.59 frames. ], batch size: 25, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:11,157 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 16:08:14,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8059, 6.0211, 5.0989, 2.6121, 5.2075, 5.6455, 5.5510, 5.1759], + device='cuda:3'), covar=tensor([0.0637, 0.0397, 0.0999, 0.4168, 0.0711, 0.0706, 0.1074, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0399, 0.0398, 0.0497, 0.0396, 0.0396, 0.0383, 0.0346], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:08:20,441 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 16:08:29,183 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.267e+02 2.237e+02 2.791e+02 3.511e+02 6.350e+02, threshold=5.582e+02, percent-clipped=4.0 +2023-02-06 16:08:29,880 INFO [train.py:901] (3/4) Epoch 15, batch 800, loss[loss=0.2013, simple_loss=0.2758, pruned_loss=0.06334, over 7808.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2996, pruned_loss=0.07108, over 1586189.97 frames. ], batch size: 20, lr: 5.16e-03, grad_scale: 16.0 +2023-02-06 16:08:32,849 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=113967.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:40,790 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=113979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:08:45,869 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8783, 1.6255, 2.1374, 1.8035, 1.9738, 1.9022, 1.5874, 0.7036], + device='cuda:3'), covar=tensor([0.4479, 0.3877, 0.1480, 0.2777, 0.1966, 0.2581, 0.1812, 0.4306], + device='cuda:3'), in_proj_covar=tensor([0.0894, 0.0906, 0.0746, 0.0876, 0.0943, 0.0833, 0.0715, 0.0786], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:08:50,096 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=113992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:01,983 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114008.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:09:05,302 INFO [train.py:901] (3/4) Epoch 15, batch 850, loss[loss=0.1906, simple_loss=0.2702, pruned_loss=0.05549, over 7698.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2999, pruned_loss=0.07097, over 1596305.92 frames. ], batch size: 18, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:09:17,861 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 16:09:39,413 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.394e+02 2.826e+02 3.443e+02 6.296e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-06 16:09:40,794 INFO [train.py:901] (3/4) Epoch 15, batch 900, loss[loss=0.2713, simple_loss=0.335, pruned_loss=0.1039, over 6229.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3005, pruned_loss=0.07187, over 1591161.08 frames. ], batch size: 72, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:02,632 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:15,168 INFO [train.py:901] (3/4) Epoch 15, batch 950, loss[loss=0.2282, simple_loss=0.3092, pruned_loss=0.07355, over 8609.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3001, pruned_loss=0.07191, over 1596263.70 frames. ], batch size: 39, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:10:20,921 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 16:10:21,911 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:36,823 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:10:37,014 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 16:10:39,440 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 16:10:49,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.449e+02 2.913e+02 3.851e+02 8.356e+02, threshold=5.826e+02, percent-clipped=3.0 +2023-02-06 16:10:49,926 INFO [train.py:901] (3/4) Epoch 15, batch 1000, loss[loss=0.2394, simple_loss=0.3294, pruned_loss=0.07473, over 8598.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3002, pruned_loss=0.0713, over 1604212.39 frames. ], batch size: 31, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:14,211 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 16:11:20,816 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 16:11:25,581 INFO [train.py:901] (3/4) Epoch 15, batch 1050, loss[loss=0.2546, simple_loss=0.3324, pruned_loss=0.08836, over 8326.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.3007, pruned_loss=0.07206, over 1606216.54 frames. ], batch size: 25, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:11:25,604 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 16:11:28,873 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0865, 1.1991, 4.1531, 1.8322, 2.3344, 4.7275, 4.7916, 3.9983], + device='cuda:3'), covar=tensor([0.1152, 0.2079, 0.0295, 0.2049, 0.1333, 0.0191, 0.0407, 0.0599], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0306, 0.0270, 0.0299, 0.0285, 0.0246, 0.0372, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:11:39,884 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6878, 1.3607, 1.4972, 1.2663, 0.7885, 1.2846, 1.4729, 1.3093], + device='cuda:3'), covar=tensor([0.0532, 0.1282, 0.1724, 0.1382, 0.0634, 0.1515, 0.0732, 0.0628], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0102, 0.0161, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 16:11:57,601 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.504e+02 3.058e+02 3.938e+02 1.189e+03, threshold=6.116e+02, percent-clipped=4.0 +2023-02-06 16:11:58,320 INFO [train.py:901] (3/4) Epoch 15, batch 1100, loss[loss=0.2102, simple_loss=0.2966, pruned_loss=0.06189, over 8603.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3018, pruned_loss=0.07298, over 1610596.37 frames. ], batch size: 31, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:11,298 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 16:12:26,182 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 16:12:33,000 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 16:12:33,901 INFO [train.py:901] (3/4) Epoch 15, batch 1150, loss[loss=0.2077, simple_loss=0.2872, pruned_loss=0.06408, over 7812.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2999, pruned_loss=0.07134, over 1613223.00 frames. ], batch size: 20, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:12:38,620 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 16:12:59,531 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114350.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:07,357 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.463e+02 3.139e+02 3.955e+02 6.139e+02, threshold=6.277e+02, percent-clipped=1.0 +2023-02-06 16:13:07,982 INFO [train.py:901] (3/4) Epoch 15, batch 1200, loss[loss=0.3208, simple_loss=0.3721, pruned_loss=0.1347, over 6937.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2991, pruned_loss=0.0709, over 1612044.99 frames. ], batch size: 73, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:13:16,147 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114375.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:18,729 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114379.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:36,382 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114404.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:13:42,789 INFO [train.py:901] (3/4) Epoch 15, batch 1250, loss[loss=0.2164, simple_loss=0.2826, pruned_loss=0.07513, over 8246.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3001, pruned_loss=0.07196, over 1609625.58 frames. ], batch size: 22, lr: 5.15e-03, grad_scale: 16.0 +2023-02-06 16:14:16,856 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.591e+02 3.148e+02 4.129e+02 1.085e+03, threshold=6.295e+02, percent-clipped=6.0 +2023-02-06 16:14:17,473 INFO [train.py:901] (3/4) Epoch 15, batch 1300, loss[loss=0.268, simple_loss=0.3309, pruned_loss=0.1025, over 8591.00 frames. ], tot_loss[loss=0.221, simple_loss=0.299, pruned_loss=0.07147, over 1607283.79 frames. ], batch size: 34, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:14:35,225 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=114489.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:14:51,247 INFO [train.py:901] (3/4) Epoch 15, batch 1350, loss[loss=0.2316, simple_loss=0.3001, pruned_loss=0.08159, over 8609.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.2997, pruned_loss=0.07182, over 1611434.71 frames. ], batch size: 34, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:08,699 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0933, 4.1167, 3.7384, 1.8032, 3.6220, 3.7600, 3.7862, 3.5045], + device='cuda:3'), covar=tensor([0.0872, 0.0597, 0.1062, 0.5033, 0.0933, 0.0940, 0.1294, 0.0863], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0398, 0.0399, 0.0498, 0.0395, 0.0398, 0.0381, 0.0348], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:15:12,372 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 16:15:26,462 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.434e+02 2.903e+02 3.628e+02 5.826e+02, threshold=5.807e+02, percent-clipped=0.0 +2023-02-06 16:15:27,129 INFO [train.py:901] (3/4) Epoch 15, batch 1400, loss[loss=0.2396, simple_loss=0.3192, pruned_loss=0.07998, over 8472.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2989, pruned_loss=0.07139, over 1609640.01 frames. ], batch size: 27, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:15:54,550 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=114604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:16:00,693 INFO [train.py:901] (3/4) Epoch 15, batch 1450, loss[loss=0.2086, simple_loss=0.2977, pruned_loss=0.05975, over 8519.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2992, pruned_loss=0.07115, over 1612665.70 frames. ], batch size: 28, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:08,838 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 16:16:36,177 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.414e+02 3.068e+02 3.744e+02 6.619e+02, threshold=6.136e+02, percent-clipped=3.0 +2023-02-06 16:16:36,891 INFO [train.py:901] (3/4) Epoch 15, batch 1500, loss[loss=0.1618, simple_loss=0.2456, pruned_loss=0.03897, over 7799.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2997, pruned_loss=0.0714, over 1615042.65 frames. ], batch size: 19, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:16:48,664 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6716, 1.3285, 4.7826, 1.7290, 4.2219, 3.9756, 4.2854, 4.1885], + device='cuda:3'), covar=tensor([0.0548, 0.5103, 0.0462, 0.4144, 0.1126, 0.0851, 0.0624, 0.0562], + device='cuda:3'), in_proj_covar=tensor([0.0549, 0.0609, 0.0634, 0.0576, 0.0651, 0.0554, 0.0548, 0.0614], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:16:58,918 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4760, 1.9983, 3.3402, 1.3954, 2.4363, 2.0319, 1.6619, 2.3441], + device='cuda:3'), covar=tensor([0.1800, 0.2301, 0.0639, 0.3978, 0.1506, 0.2858, 0.1971, 0.2029], + device='cuda:3'), in_proj_covar=tensor([0.0498, 0.0551, 0.0537, 0.0601, 0.0622, 0.0565, 0.0493, 0.0621], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:17:00,198 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2336, 2.5171, 3.0243, 1.5662, 3.1149, 1.7234, 1.5786, 1.9989], + device='cuda:3'), covar=tensor([0.0638, 0.0317, 0.0224, 0.0542, 0.0339, 0.0665, 0.0720, 0.0440], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0354, 0.0302, 0.0406, 0.0338, 0.0493, 0.0366, 0.0378], + device='cuda:3'), out_proj_covar=tensor([1.1498e-04, 9.5558e-05, 8.1249e-05, 1.1026e-04, 9.1970e-05, 1.4356e-04, + 1.0150e-04, 1.0324e-04], device='cuda:3') +2023-02-06 16:17:09,890 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.89 vs. limit=2.0 +2023-02-06 16:17:11,513 INFO [train.py:901] (3/4) Epoch 15, batch 1550, loss[loss=0.1705, simple_loss=0.2634, pruned_loss=0.03884, over 7654.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2981, pruned_loss=0.07097, over 1609582.21 frames. ], batch size: 19, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:17:26,182 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:17:45,709 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.278e+02 2.828e+02 3.736e+02 6.971e+02, threshold=5.655e+02, percent-clipped=1.0 +2023-02-06 16:17:46,443 INFO [train.py:901] (3/4) Epoch 15, batch 1600, loss[loss=0.2205, simple_loss=0.3055, pruned_loss=0.06778, over 7971.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2987, pruned_loss=0.0707, over 1610934.67 frames. ], batch size: 21, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:22,446 INFO [train.py:901] (3/4) Epoch 15, batch 1650, loss[loss=0.1925, simple_loss=0.2698, pruned_loss=0.0576, over 7700.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3005, pruned_loss=0.07191, over 1614095.45 frames. ], batch size: 18, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:18:55,126 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=114860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:18:56,281 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.261e+02 2.429e+02 2.845e+02 3.384e+02 6.803e+02, threshold=5.691e+02, percent-clipped=1.0 +2023-02-06 16:18:56,970 INFO [train.py:901] (3/4) Epoch 15, batch 1700, loss[loss=0.231, simple_loss=0.3176, pruned_loss=0.07222, over 8481.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.3, pruned_loss=0.07115, over 1616157.81 frames. ], batch size: 29, lr: 5.14e-03, grad_scale: 16.0 +2023-02-06 16:19:12,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=114885.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:16,112 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=114889.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:19:32,915 INFO [train.py:901] (3/4) Epoch 15, batch 1750, loss[loss=0.34, simple_loss=0.3881, pruned_loss=0.1459, over 6884.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.299, pruned_loss=0.07096, over 1609615.92 frames. ], batch size: 71, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:19:45,243 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7897, 2.2079, 4.9147, 2.7232, 4.4538, 4.2373, 4.5664, 4.4661], + device='cuda:3'), covar=tensor([0.0508, 0.3758, 0.0459, 0.3028, 0.0898, 0.0726, 0.0511, 0.0467], + device='cuda:3'), in_proj_covar=tensor([0.0553, 0.0611, 0.0639, 0.0581, 0.0653, 0.0560, 0.0553, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:20:06,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.435e+02 3.025e+02 3.758e+02 7.531e+02, threshold=6.050e+02, percent-clipped=3.0 +2023-02-06 16:20:07,576 INFO [train.py:901] (3/4) Epoch 15, batch 1800, loss[loss=0.213, simple_loss=0.2797, pruned_loss=0.07315, over 7531.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2977, pruned_loss=0.07022, over 1609548.06 frames. ], batch size: 18, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:20:43,790 INFO [train.py:901] (3/4) Epoch 15, batch 1850, loss[loss=0.26, simple_loss=0.3252, pruned_loss=0.09739, over 6880.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2988, pruned_loss=0.07152, over 1605210.00 frames. ], batch size: 71, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:17,801 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.743e+02 2.659e+02 3.189e+02 4.139e+02 1.250e+03, threshold=6.379e+02, percent-clipped=4.0 +2023-02-06 16:21:18,506 INFO [train.py:901] (3/4) Epoch 15, batch 1900, loss[loss=0.2337, simple_loss=0.3231, pruned_loss=0.07209, over 8288.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2972, pruned_loss=0.07033, over 1601203.31 frames. ], batch size: 49, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:21:28,840 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:46,755 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:21:50,085 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 16:21:53,561 INFO [train.py:901] (3/4) Epoch 15, batch 1950, loss[loss=0.1991, simple_loss=0.2829, pruned_loss=0.05765, over 8029.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2972, pruned_loss=0.06984, over 1606899.50 frames. ], batch size: 22, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:04,500 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 16:22:11,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7177, 1.7371, 3.2811, 1.3457, 2.2960, 3.6407, 3.9731, 2.6709], + device='cuda:3'), covar=tensor([0.1649, 0.1965, 0.0482, 0.2800, 0.1260, 0.0389, 0.0548, 0.1185], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0305, 0.0270, 0.0297, 0.0285, 0.0245, 0.0373, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:22:23,207 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 16:22:28,430 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.421e+02 3.112e+02 3.916e+02 6.433e+02, threshold=6.224e+02, percent-clipped=1.0 +2023-02-06 16:22:29,136 INFO [train.py:901] (3/4) Epoch 15, batch 2000, loss[loss=0.182, simple_loss=0.2636, pruned_loss=0.0502, over 7640.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.297, pruned_loss=0.06993, over 1602079.93 frames. ], batch size: 19, lr: 5.13e-03, grad_scale: 32.0 +2023-02-06 16:22:47,905 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 16:22:49,770 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:22:54,580 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1762, 2.1900, 1.5359, 1.7287, 1.7879, 1.3323, 1.5974, 1.5655], + device='cuda:3'), covar=tensor([0.1336, 0.0343, 0.1193, 0.0610, 0.0693, 0.1457, 0.0911, 0.0864], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0229, 0.0324, 0.0302, 0.0300, 0.0330, 0.0345, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:22:58,872 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9914, 2.0459, 1.9592, 2.6205, 1.1141, 1.5996, 1.8721, 2.0883], + device='cuda:3'), covar=tensor([0.0688, 0.0875, 0.0860, 0.0397, 0.1188, 0.1326, 0.0827, 0.0791], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0203, 0.0248, 0.0211, 0.0211, 0.0247, 0.0253, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:23:03,505 INFO [train.py:901] (3/4) Epoch 15, batch 2050, loss[loss=0.205, simple_loss=0.2901, pruned_loss=0.05989, over 8610.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2977, pruned_loss=0.07019, over 1607066.52 frames. ], batch size: 31, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:23:18,038 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115233.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:23:36,045 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 16:23:39,485 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.382e+02 2.963e+02 3.753e+02 6.860e+02, threshold=5.925e+02, percent-clipped=2.0 +2023-02-06 16:23:39,506 INFO [train.py:901] (3/4) Epoch 15, batch 2100, loss[loss=0.2468, simple_loss=0.3381, pruned_loss=0.07772, over 8510.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2986, pruned_loss=0.0706, over 1607639.49 frames. ], batch size: 26, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:05,868 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3731, 1.4976, 4.6142, 1.7709, 4.0869, 3.8698, 4.1212, 3.9783], + device='cuda:3'), covar=tensor([0.0549, 0.3986, 0.0408, 0.3174, 0.0994, 0.0813, 0.0500, 0.0614], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0598, 0.0628, 0.0568, 0.0643, 0.0552, 0.0542, 0.0609], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:24:13,868 INFO [train.py:901] (3/4) Epoch 15, batch 2150, loss[loss=0.2091, simple_loss=0.298, pruned_loss=0.06008, over 8377.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2988, pruned_loss=0.07072, over 1612090.96 frames. ], batch size: 49, lr: 5.13e-03, grad_scale: 16.0 +2023-02-06 16:24:29,064 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0236, 1.2165, 1.2125, 0.6937, 1.1871, 0.9839, 0.0852, 1.2032], + device='cuda:3'), covar=tensor([0.0381, 0.0292, 0.0271, 0.0432, 0.0356, 0.0845, 0.0700, 0.0260], + device='cuda:3'), in_proj_covar=tensor([0.0427, 0.0365, 0.0312, 0.0420, 0.0349, 0.0507, 0.0380, 0.0392], + device='cuda:3'), out_proj_covar=tensor([1.1842e-04, 9.8345e-05, 8.3735e-05, 1.1414e-04, 9.4909e-05, 1.4778e-04, + 1.0527e-04, 1.0719e-04], device='cuda:3') +2023-02-06 16:24:37,887 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115348.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:24:49,119 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.532e+02 3.093e+02 4.065e+02 1.254e+03, threshold=6.185e+02, percent-clipped=7.0 +2023-02-06 16:24:49,140 INFO [train.py:901] (3/4) Epoch 15, batch 2200, loss[loss=0.2225, simple_loss=0.3027, pruned_loss=0.0711, over 8505.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2993, pruned_loss=0.07089, over 1615655.59 frames. ], batch size: 26, lr: 5.12e-03, grad_scale: 16.0 +2023-02-06 16:25:07,044 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:07,721 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:12,316 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3352, 1.8791, 4.4324, 1.8835, 2.5576, 4.8668, 5.0431, 4.1926], + device='cuda:3'), covar=tensor([0.1117, 0.1696, 0.0255, 0.2043, 0.1037, 0.0205, 0.0430, 0.0618], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0309, 0.0272, 0.0300, 0.0287, 0.0248, 0.0378, 0.0300], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:25:24,253 INFO [train.py:901] (3/4) Epoch 15, batch 2250, loss[loss=0.2183, simple_loss=0.3102, pruned_loss=0.06318, over 8251.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3004, pruned_loss=0.07107, over 1617946.75 frames. ], batch size: 24, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:48,108 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115448.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:48,945 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:25:58,283 INFO [train.py:901] (3/4) Epoch 15, batch 2300, loss[loss=0.2197, simple_loss=0.3003, pruned_loss=0.06952, over 8453.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2991, pruned_loss=0.07012, over 1615400.37 frames. ], batch size: 25, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:25:58,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.315e+02 2.502e+02 3.175e+02 3.927e+02 9.067e+02, threshold=6.350e+02, percent-clipped=5.0 +2023-02-06 16:26:04,769 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5548, 5.5955, 4.9327, 2.6844, 4.9751, 5.3469, 5.1526, 5.0687], + device='cuda:3'), covar=tensor([0.0553, 0.0376, 0.0947, 0.3858, 0.0690, 0.0735, 0.0999, 0.0626], + device='cuda:3'), in_proj_covar=tensor([0.0491, 0.0405, 0.0407, 0.0506, 0.0406, 0.0405, 0.0392, 0.0353], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:26:07,556 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:26:17,353 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2619, 1.3393, 1.5711, 1.2066, 0.7290, 1.3602, 1.1861, 1.0990], + device='cuda:3'), covar=tensor([0.0541, 0.1215, 0.1625, 0.1404, 0.0565, 0.1445, 0.0713, 0.0651], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0156, 0.0102, 0.0162, 0.0115, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 16:26:34,675 INFO [train.py:901] (3/4) Epoch 15, batch 2350, loss[loss=0.2164, simple_loss=0.309, pruned_loss=0.06196, over 8034.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.299, pruned_loss=0.07037, over 1617308.55 frames. ], batch size: 22, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:26:52,706 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 16:27:09,317 INFO [train.py:901] (3/4) Epoch 15, batch 2400, loss[loss=0.2323, simple_loss=0.3151, pruned_loss=0.07468, over 8315.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2986, pruned_loss=0.07047, over 1618432.63 frames. ], batch size: 25, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:09,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115563.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:10,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.542e+02 3.047e+02 3.524e+02 9.073e+02, threshold=6.095e+02, percent-clipped=1.0 +2023-02-06 16:27:14,077 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2263, 3.1311, 2.8626, 1.5364, 2.8279, 2.8368, 2.8919, 2.7490], + device='cuda:3'), covar=tensor([0.1147, 0.0881, 0.1437, 0.4902, 0.1213, 0.1437, 0.1495, 0.1315], + device='cuda:3'), in_proj_covar=tensor([0.0493, 0.0405, 0.0406, 0.0506, 0.0404, 0.0404, 0.0391, 0.0352], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:27:39,712 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115604.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:27:42,703 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 16:27:45,573 INFO [train.py:901] (3/4) Epoch 15, batch 2450, loss[loss=0.1878, simple_loss=0.2669, pruned_loss=0.05429, over 7929.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2982, pruned_loss=0.07007, over 1616082.71 frames. ], batch size: 20, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:27:56,559 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115629.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:28:19,897 INFO [train.py:901] (3/4) Epoch 15, batch 2500, loss[loss=0.2644, simple_loss=0.3327, pruned_loss=0.098, over 8333.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2986, pruned_loss=0.07052, over 1616592.12 frames. ], batch size: 25, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:28:20,562 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.367e+02 2.686e+02 3.697e+02 9.165e+02, threshold=5.372e+02, percent-clipped=5.0 +2023-02-06 16:28:21,755 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 16:28:36,131 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-06 16:28:40,554 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5712, 1.9536, 2.0226, 1.1269, 2.0992, 1.4782, 0.5292, 1.8419], + device='cuda:3'), covar=tensor([0.0495, 0.0277, 0.0214, 0.0478, 0.0298, 0.0709, 0.0715, 0.0223], + device='cuda:3'), in_proj_covar=tensor([0.0420, 0.0358, 0.0307, 0.0413, 0.0343, 0.0499, 0.0372, 0.0383], + device='cuda:3'), out_proj_covar=tensor([1.1643e-04, 9.6288e-05, 8.2497e-05, 1.1198e-04, 9.3219e-05, 1.4539e-04, + 1.0298e-04, 1.0469e-04], device='cuda:3') +2023-02-06 16:28:41,954 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5721, 1.7693, 2.7540, 1.4342, 1.9663, 1.9077, 1.5761, 1.8240], + device='cuda:3'), covar=tensor([0.1806, 0.2277, 0.0798, 0.4104, 0.1742, 0.3024, 0.2001, 0.2178], + device='cuda:3'), in_proj_covar=tensor([0.0495, 0.0549, 0.0539, 0.0602, 0.0621, 0.0565, 0.0495, 0.0617], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:28:55,235 INFO [train.py:901] (3/4) Epoch 15, batch 2550, loss[loss=0.1904, simple_loss=0.2751, pruned_loss=0.05287, over 7971.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2969, pruned_loss=0.06964, over 1614120.90 frames. ], batch size: 21, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:04,479 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.67 vs. limit=2.0 +2023-02-06 16:29:08,942 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115732.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:09,204 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.32 vs. limit=5.0 +2023-02-06 16:29:09,603 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=115733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:29:30,406 INFO [train.py:901] (3/4) Epoch 15, batch 2600, loss[loss=0.1915, simple_loss=0.2871, pruned_loss=0.04792, over 7980.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2979, pruned_loss=0.06982, over 1620579.57 frames. ], batch size: 21, lr: 5.12e-03, grad_scale: 8.0 +2023-02-06 16:29:31,072 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.427e+02 3.148e+02 3.839e+02 8.607e+02, threshold=6.296e+02, percent-clipped=3.0 +2023-02-06 16:30:04,163 INFO [train.py:901] (3/4) Epoch 15, batch 2650, loss[loss=0.2828, simple_loss=0.3495, pruned_loss=0.108, over 8137.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2997, pruned_loss=0.07083, over 1626248.02 frames. ], batch size: 22, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:08,503 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=115819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:27,409 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=115844.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:29,377 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115847.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:30,054 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=115848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:30:39,733 INFO [train.py:901] (3/4) Epoch 15, batch 2700, loss[loss=0.2095, simple_loss=0.3035, pruned_loss=0.05782, over 8196.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3, pruned_loss=0.0713, over 1619059.45 frames. ], batch size: 23, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:30:40,394 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.337e+02 2.718e+02 3.606e+02 6.832e+02, threshold=5.436e+02, percent-clipped=3.0 +2023-02-06 16:31:12,010 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=115910.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:31:13,864 INFO [train.py:901] (3/4) Epoch 15, batch 2750, loss[loss=0.22, simple_loss=0.3071, pruned_loss=0.06643, over 8494.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2991, pruned_loss=0.0712, over 1612043.53 frames. ], batch size: 26, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:32,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6684, 3.6259, 3.3342, 1.9074, 3.2431, 3.2448, 3.3025, 3.0498], + device='cuda:3'), covar=tensor([0.1010, 0.0760, 0.1123, 0.4665, 0.0951, 0.1305, 0.1404, 0.1115], + device='cuda:3'), in_proj_covar=tensor([0.0485, 0.0401, 0.0404, 0.0503, 0.0397, 0.0400, 0.0388, 0.0349], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:31:44,881 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8900, 1.4732, 3.5428, 1.3497, 2.4214, 3.9044, 3.9623, 3.3485], + device='cuda:3'), covar=tensor([0.1100, 0.1691, 0.0298, 0.2114, 0.1040, 0.0222, 0.0483, 0.0593], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0304, 0.0270, 0.0296, 0.0287, 0.0247, 0.0376, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:31:49,503 INFO [train.py:901] (3/4) Epoch 15, batch 2800, loss[loss=0.2616, simple_loss=0.3244, pruned_loss=0.09943, over 6982.00 frames. ], tot_loss[loss=0.2216, simple_loss=0.3004, pruned_loss=0.07138, over 1618037.19 frames. ], batch size: 71, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:31:50,151 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.517e+02 2.986e+02 3.677e+02 9.071e+02, threshold=5.972e+02, percent-clipped=5.0 +2023-02-06 16:32:24,937 INFO [train.py:901] (3/4) Epoch 15, batch 2850, loss[loss=0.219, simple_loss=0.3, pruned_loss=0.069, over 8144.00 frames. ], tot_loss[loss=0.2201, simple_loss=0.2985, pruned_loss=0.0708, over 1611430.85 frames. ], batch size: 22, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:32:35,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6726, 1.4014, 1.5169, 1.1965, 0.8048, 1.3042, 1.4921, 1.2559], + device='cuda:3'), covar=tensor([0.0541, 0.1272, 0.1734, 0.1514, 0.0623, 0.1593, 0.0756, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0156, 0.0101, 0.0162, 0.0115, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 16:32:38,092 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:32:46,508 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2164, 2.4458, 2.0273, 2.7612, 1.4548, 1.8509, 2.1437, 2.3798], + device='cuda:3'), covar=tensor([0.0623, 0.0718, 0.0838, 0.0398, 0.1105, 0.1148, 0.0863, 0.0715], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0203, 0.0249, 0.0211, 0.0212, 0.0250, 0.0253, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:33:00,841 INFO [train.py:901] (3/4) Epoch 15, batch 2900, loss[loss=0.2272, simple_loss=0.3183, pruned_loss=0.06807, over 8727.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2991, pruned_loss=0.07078, over 1615498.50 frames. ], batch size: 30, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:01,416 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.452e+02 2.959e+02 3.782e+02 6.842e+02, threshold=5.917e+02, percent-clipped=3.0 +2023-02-06 16:33:29,120 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:29,805 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:35,327 INFO [train.py:901] (3/4) Epoch 15, batch 2950, loss[loss=0.1984, simple_loss=0.277, pruned_loss=0.05985, over 8101.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3, pruned_loss=0.0714, over 1615747.87 frames. ], batch size: 21, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:33:36,706 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 16:33:42,120 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116123.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:45,596 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:46,298 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:33:49,699 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:34:08,977 INFO [train.py:901] (3/4) Epoch 15, batch 3000, loss[loss=0.2595, simple_loss=0.3236, pruned_loss=0.09766, over 6890.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2993, pruned_loss=0.07086, over 1617088.55 frames. ], batch size: 72, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:34:08,977 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 16:34:21,680 INFO [train.py:935] (3/4) Epoch 15, validation: loss=0.1808, simple_loss=0.2809, pruned_loss=0.04034, over 944034.00 frames. +2023-02-06 16:34:21,681 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 16:34:22,359 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.826e+02 2.534e+02 3.127e+02 3.845e+02 7.463e+02, threshold=6.253e+02, percent-clipped=8.0 +2023-02-06 16:34:32,180 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4163, 1.8979, 2.9264, 1.2432, 2.0932, 1.7428, 1.6060, 1.9688], + device='cuda:3'), covar=tensor([0.2115, 0.2523, 0.0941, 0.4674, 0.1983, 0.3590, 0.2330, 0.2797], + device='cuda:3'), in_proj_covar=tensor([0.0497, 0.0552, 0.0542, 0.0600, 0.0624, 0.0567, 0.0495, 0.0619], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:34:57,899 INFO [train.py:901] (3/4) Epoch 15, batch 3050, loss[loss=0.2213, simple_loss=0.3095, pruned_loss=0.06649, over 8465.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2991, pruned_loss=0.07073, over 1613737.08 frames. ], batch size: 29, lr: 5.11e-03, grad_scale: 8.0 +2023-02-06 16:35:15,576 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4940, 2.9261, 2.3267, 3.5779, 1.8509, 2.0552, 2.3633, 2.9175], + device='cuda:3'), covar=tensor([0.0627, 0.0608, 0.0805, 0.0311, 0.0999, 0.1242, 0.0961, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0202, 0.0248, 0.0211, 0.0211, 0.0249, 0.0254, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:35:26,166 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116254.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:35:31,957 INFO [train.py:901] (3/4) Epoch 15, batch 3100, loss[loss=0.2059, simple_loss=0.2847, pruned_loss=0.06356, over 8323.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3003, pruned_loss=0.07151, over 1615393.26 frames. ], batch size: 26, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:35:32,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.573e+02 3.095e+02 3.865e+02 1.142e+03, threshold=6.190e+02, percent-clipped=3.0 +2023-02-06 16:36:06,934 INFO [train.py:901] (3/4) Epoch 15, batch 3150, loss[loss=0.2187, simple_loss=0.3095, pruned_loss=0.06393, over 8244.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2996, pruned_loss=0.07129, over 1615148.73 frames. ], batch size: 24, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:27,210 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116341.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:41,980 INFO [train.py:901] (3/4) Epoch 15, batch 3200, loss[loss=0.2379, simple_loss=0.3193, pruned_loss=0.07827, over 8238.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3013, pruned_loss=0.07222, over 1617128.73 frames. ], batch size: 24, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:36:43,354 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.524e+02 3.304e+02 3.942e+02 1.206e+03, threshold=6.608e+02, percent-clipped=2.0 +2023-02-06 16:36:46,747 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116369.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 16:36:51,222 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:36:53,699 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 16:37:06,199 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2065, 2.3764, 1.9313, 2.9154, 1.3228, 1.7236, 1.9179, 2.3453], + device='cuda:3'), covar=tensor([0.0655, 0.0690, 0.0915, 0.0315, 0.1174, 0.1287, 0.1011, 0.0726], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0203, 0.0250, 0.0212, 0.0211, 0.0250, 0.0257, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:37:12,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9141, 1.5706, 3.0968, 1.3280, 2.0140, 3.3353, 3.4704, 2.8711], + device='cuda:3'), covar=tensor([0.0969, 0.1582, 0.0356, 0.2044, 0.1097, 0.0267, 0.0546, 0.0585], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0306, 0.0272, 0.0299, 0.0288, 0.0248, 0.0377, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:37:16,507 INFO [train.py:901] (3/4) Epoch 15, batch 3250, loss[loss=0.2101, simple_loss=0.281, pruned_loss=0.06957, over 7929.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.3001, pruned_loss=0.07147, over 1615355.35 frames. ], batch size: 20, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:52,491 INFO [train.py:901] (3/4) Epoch 15, batch 3300, loss[loss=0.2014, simple_loss=0.2899, pruned_loss=0.05642, over 8597.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3, pruned_loss=0.07146, over 1616921.18 frames. ], batch size: 34, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:37:53,156 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.388e+02 2.875e+02 3.716e+02 9.209e+02, threshold=5.750e+02, percent-clipped=3.0 +2023-02-06 16:37:53,300 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116464.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:37:55,237 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116467.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:02,554 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116478.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:12,020 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116491.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:38:26,452 INFO [train.py:901] (3/4) Epoch 15, batch 3350, loss[loss=0.2335, simple_loss=0.3062, pruned_loss=0.0804, over 7430.00 frames. ], tot_loss[loss=0.221, simple_loss=0.2998, pruned_loss=0.0711, over 1616201.98 frames. ], batch size: 17, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:38:33,251 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116523.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:02,052 INFO [train.py:901] (3/4) Epoch 15, batch 3400, loss[loss=0.2142, simple_loss=0.2882, pruned_loss=0.07006, over 7717.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2999, pruned_loss=0.07087, over 1619205.41 frames. ], batch size: 18, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:02,726 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.566e+02 3.149e+02 4.104e+02 8.501e+02, threshold=6.298e+02, percent-clipped=7.0 +2023-02-06 16:39:14,876 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116582.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:22,204 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116593.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:39:36,209 INFO [train.py:901] (3/4) Epoch 15, batch 3450, loss[loss=0.2883, simple_loss=0.3446, pruned_loss=0.116, over 7648.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2997, pruned_loss=0.07086, over 1613626.10 frames. ], batch size: 19, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:39:44,406 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116625.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:39:51,718 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:01,115 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116650.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 16:40:03,772 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7471, 1.9021, 1.9238, 1.3507, 2.0447, 1.5649, 1.0602, 1.7764], + device='cuda:3'), covar=tensor([0.0342, 0.0227, 0.0162, 0.0348, 0.0241, 0.0481, 0.0543, 0.0189], + device='cuda:3'), in_proj_covar=tensor([0.0424, 0.0363, 0.0311, 0.0419, 0.0351, 0.0507, 0.0375, 0.0385], + device='cuda:3'), out_proj_covar=tensor([1.1716e-04, 9.7643e-05, 8.3560e-05, 1.1352e-04, 9.5440e-05, 1.4759e-04, + 1.0361e-04, 1.0489e-04], device='cuda:3') +2023-02-06 16:40:10,039 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.06 vs. limit=5.0 +2023-02-06 16:40:10,185 INFO [train.py:901] (3/4) Epoch 15, batch 3500, loss[loss=0.2214, simple_loss=0.3098, pruned_loss=0.06647, over 8458.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2986, pruned_loss=0.07035, over 1608594.07 frames. ], batch size: 29, lr: 5.10e-03, grad_scale: 8.0 +2023-02-06 16:40:10,850 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.398e+02 2.936e+02 3.935e+02 9.560e+02, threshold=5.871e+02, percent-clipped=3.0 +2023-02-06 16:40:13,184 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5216, 1.7959, 2.7437, 1.3413, 1.8978, 1.9325, 1.6026, 1.7547], + device='cuda:3'), covar=tensor([0.1820, 0.2484, 0.0874, 0.4404, 0.1749, 0.3059, 0.2146, 0.2244], + device='cuda:3'), in_proj_covar=tensor([0.0503, 0.0557, 0.0547, 0.0609, 0.0628, 0.0571, 0.0502, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:40:26,417 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116685.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:40:35,608 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 16:40:44,884 INFO [train.py:901] (3/4) Epoch 15, batch 3550, loss[loss=0.2143, simple_loss=0.2816, pruned_loss=0.07349, over 7974.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2982, pruned_loss=0.07018, over 1605787.69 frames. ], batch size: 21, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:40:50,947 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116722.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:06,496 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8267, 1.7149, 3.1800, 1.4903, 2.2567, 3.3976, 3.4920, 2.9208], + device='cuda:3'), covar=tensor([0.1074, 0.1414, 0.0325, 0.1881, 0.0871, 0.0268, 0.0563, 0.0584], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0308, 0.0272, 0.0304, 0.0291, 0.0251, 0.0380, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:41:08,580 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:17,775 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-06 16:41:19,408 INFO [train.py:901] (3/4) Epoch 15, batch 3600, loss[loss=0.1525, simple_loss=0.2379, pruned_loss=0.03358, over 7659.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2986, pruned_loss=0.07045, over 1603994.82 frames. ], batch size: 19, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:41:20,116 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.627e+02 3.005e+02 3.918e+02 8.490e+02, threshold=6.010e+02, percent-clipped=4.0 +2023-02-06 16:41:25,799 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116772.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:45,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4198, 1.5138, 4.2007, 1.9824, 2.3786, 4.7154, 4.8110, 3.9344], + device='cuda:3'), covar=tensor([0.1031, 0.1964, 0.0329, 0.2139, 0.1273, 0.0282, 0.0491, 0.0703], + device='cuda:3'), in_proj_covar=tensor([0.0279, 0.0307, 0.0273, 0.0303, 0.0291, 0.0251, 0.0379, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:41:47,426 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:52,898 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:41:56,184 INFO [train.py:901] (3/4) Epoch 15, batch 3650, loss[loss=0.247, simple_loss=0.3118, pruned_loss=0.09112, over 4947.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2986, pruned_loss=0.07066, over 1602060.76 frames. ], batch size: 11, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:00,908 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=116820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:13,592 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:21,035 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=116849.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,284 INFO [train.py:901] (3/4) Epoch 15, batch 3700, loss[loss=0.2101, simple_loss=0.2865, pruned_loss=0.06687, over 7805.00 frames. ], tot_loss[loss=0.2217, simple_loss=0.3005, pruned_loss=0.0714, over 1605703.49 frames. ], batch size: 20, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:42:30,494 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116863.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:30,966 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.301e+02 2.797e+02 3.414e+02 8.630e+02, threshold=5.595e+02, percent-clipped=3.0 +2023-02-06 16:42:33,141 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116867.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:42:36,572 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 16:42:38,094 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=116874.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:06,649 INFO [train.py:901] (3/4) Epoch 15, batch 3750, loss[loss=0.2304, simple_loss=0.3092, pruned_loss=0.07577, over 8484.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3008, pruned_loss=0.07092, over 1613855.17 frames. ], batch size: 28, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:13,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116923.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:23,707 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8535, 1.6324, 2.4713, 1.6647, 1.1508, 2.4401, 0.5627, 1.4068], + device='cuda:3'), covar=tensor([0.1834, 0.1659, 0.0386, 0.1623, 0.3637, 0.0438, 0.2389, 0.1723], + device='cuda:3'), in_proj_covar=tensor([0.0174, 0.0181, 0.0112, 0.0216, 0.0260, 0.0116, 0.0163, 0.0177], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 16:43:38,613 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 16:43:40,820 INFO [train.py:901] (3/4) Epoch 15, batch 3800, loss[loss=0.2364, simple_loss=0.3285, pruned_loss=0.07221, over 8110.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3022, pruned_loss=0.07175, over 1616116.58 frames. ], batch size: 23, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:43:41,471 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.512e+02 2.989e+02 3.697e+02 7.171e+02, threshold=5.977e+02, percent-clipped=7.0 +2023-02-06 16:43:52,421 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=116980.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:43:53,910 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=116982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:15,608 INFO [train.py:901] (3/4) Epoch 15, batch 3850, loss[loss=0.2417, simple_loss=0.3329, pruned_loss=0.07519, over 8468.00 frames. ], tot_loss[loss=0.2234, simple_loss=0.3023, pruned_loss=0.0722, over 1614950.62 frames. ], batch size: 25, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:42,587 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 16:44:46,207 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:44:46,539 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-06 16:44:50,965 INFO [train.py:901] (3/4) Epoch 15, batch 3900, loss[loss=0.2321, simple_loss=0.3129, pruned_loss=0.07567, over 8518.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.3016, pruned_loss=0.07213, over 1615937.18 frames. ], batch size: 26, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:44:51,623 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.460e+02 2.428e+02 3.027e+02 3.797e+02 6.654e+02, threshold=6.053e+02, percent-clipped=2.0 +2023-02-06 16:44:53,044 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:03,712 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117081.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:12,954 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117095.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:45:17,736 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2029, 2.1961, 1.7440, 1.9266, 1.8768, 1.4421, 1.5920, 1.6767], + device='cuda:3'), covar=tensor([0.1304, 0.0372, 0.1092, 0.0542, 0.0637, 0.1389, 0.0954, 0.0747], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0233, 0.0327, 0.0306, 0.0303, 0.0333, 0.0349, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:45:24,922 INFO [train.py:901] (3/4) Epoch 15, batch 3950, loss[loss=0.2193, simple_loss=0.2981, pruned_loss=0.07025, over 8104.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3012, pruned_loss=0.07221, over 1610196.18 frames. ], batch size: 21, lr: 5.09e-03, grad_scale: 8.0 +2023-02-06 16:45:34,216 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 16:46:01,077 INFO [train.py:901] (3/4) Epoch 15, batch 4000, loss[loss=0.19, simple_loss=0.2669, pruned_loss=0.05652, over 8072.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2998, pruned_loss=0.07093, over 1614299.95 frames. ], batch size: 21, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:01,776 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.473e+02 2.992e+02 3.534e+02 5.115e+02, threshold=5.984e+02, percent-clipped=0.0 +2023-02-06 16:46:01,907 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117164.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:12,550 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:13,871 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117181.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:15,165 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2244, 2.4470, 4.4150, 2.6890, 3.9708, 3.7364, 4.0914, 3.9953], + device='cuda:3'), covar=tensor([0.0663, 0.3042, 0.0679, 0.2973, 0.1041, 0.0899, 0.0517, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0554, 0.0602, 0.0633, 0.0571, 0.0654, 0.0556, 0.0548, 0.0613], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:46:17,466 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 16:46:29,855 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117204.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:35,569 INFO [train.py:901] (3/4) Epoch 15, batch 4050, loss[loss=0.2034, simple_loss=0.2829, pruned_loss=0.06191, over 8104.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3007, pruned_loss=0.07181, over 1612428.18 frames. ], batch size: 23, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:46:53,307 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117238.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:46:53,913 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117239.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:11,625 INFO [train.py:901] (3/4) Epoch 15, batch 4100, loss[loss=0.1861, simple_loss=0.2833, pruned_loss=0.0445, over 8457.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2999, pruned_loss=0.07112, over 1613265.32 frames. ], batch size: 27, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:47:11,825 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117263.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:12,285 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.506e+02 3.096e+02 3.742e+02 9.544e+02, threshold=6.191e+02, percent-clipped=4.0 +2023-02-06 16:47:22,932 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:47:46,643 INFO [train.py:901] (3/4) Epoch 15, batch 4150, loss[loss=0.2104, simple_loss=0.2974, pruned_loss=0.06171, over 8338.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2992, pruned_loss=0.07078, over 1613485.77 frames. ], batch size: 26, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:10,097 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=117347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:12,979 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117351.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:21,503 INFO [train.py:901] (3/4) Epoch 15, batch 4200, loss[loss=0.1793, simple_loss=0.2668, pruned_loss=0.04591, over 8127.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2975, pruned_loss=0.06977, over 1611728.40 frames. ], batch size: 22, lr: 5.08e-03, grad_scale: 8.0 +2023-02-06 16:48:22,820 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.404e+02 2.907e+02 3.383e+02 1.073e+03, threshold=5.814e+02, percent-clipped=1.0 +2023-02-06 16:48:31,971 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:48:40,576 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 16:48:48,352 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9186, 1.1611, 3.1051, 1.1031, 2.7302, 2.6292, 2.8171, 2.7271], + device='cuda:3'), covar=tensor([0.0853, 0.4060, 0.0959, 0.3856, 0.1348, 0.1036, 0.0717, 0.0860], + device='cuda:3'), in_proj_covar=tensor([0.0550, 0.0605, 0.0628, 0.0571, 0.0650, 0.0552, 0.0546, 0.0610], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 16:48:57,040 INFO [train.py:901] (3/4) Epoch 15, batch 4250, loss[loss=0.2109, simple_loss=0.296, pruned_loss=0.0629, over 8348.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2975, pruned_loss=0.06951, over 1616344.29 frames. ], batch size: 50, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:03,724 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 16:49:14,107 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:30,910 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117462.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:49:31,427 INFO [train.py:901] (3/4) Epoch 15, batch 4300, loss[loss=0.1803, simple_loss=0.2688, pruned_loss=0.04591, over 6006.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2987, pruned_loss=0.0703, over 1620764.32 frames. ], batch size: 13, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:49:32,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.479e+02 3.115e+02 3.892e+02 7.815e+02, threshold=6.229e+02, percent-clipped=5.0 +2023-02-06 16:50:07,599 INFO [train.py:901] (3/4) Epoch 15, batch 4350, loss[loss=0.2488, simple_loss=0.3229, pruned_loss=0.08736, over 8781.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.3, pruned_loss=0.07112, over 1618910.26 frames. ], batch size: 39, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:23,038 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117535.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:34,438 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 16:50:36,329 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 16:50:40,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117560.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:50:41,781 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3821, 4.2999, 3.9470, 2.2348, 3.9046, 3.9104, 4.0187, 3.6576], + device='cuda:3'), covar=tensor([0.0775, 0.0614, 0.1027, 0.4406, 0.0873, 0.1279, 0.1267, 0.0853], + device='cuda:3'), in_proj_covar=tensor([0.0489, 0.0408, 0.0411, 0.0509, 0.0402, 0.0409, 0.0393, 0.0355], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:50:42,368 INFO [train.py:901] (3/4) Epoch 15, batch 4400, loss[loss=0.246, simple_loss=0.3149, pruned_loss=0.08855, over 8713.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2999, pruned_loss=0.07145, over 1617784.30 frames. ], batch size: 39, lr: 5.08e-03, grad_scale: 16.0 +2023-02-06 16:50:43,036 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.383e+02 3.124e+02 3.901e+02 9.506e+02, threshold=6.248e+02, percent-clipped=7.0 +2023-02-06 16:50:55,792 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:51:08,235 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4944, 4.4134, 3.9946, 1.9257, 4.0292, 3.9449, 4.0329, 3.8037], + device='cuda:3'), covar=tensor([0.0800, 0.0677, 0.1176, 0.4584, 0.0877, 0.0841, 0.1383, 0.0700], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0403, 0.0405, 0.0503, 0.0396, 0.0405, 0.0388, 0.0350], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:51:17,968 INFO [train.py:901] (3/4) Epoch 15, batch 4450, loss[loss=0.2285, simple_loss=0.3058, pruned_loss=0.07563, over 8483.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2997, pruned_loss=0.07144, over 1615406.82 frames. ], batch size: 27, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:17,982 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 16:51:52,096 INFO [train.py:901] (3/4) Epoch 15, batch 4500, loss[loss=0.2348, simple_loss=0.3204, pruned_loss=0.07459, over 8604.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3004, pruned_loss=0.0716, over 1617463.45 frames. ], batch size: 49, lr: 5.07e-03, grad_scale: 16.0 +2023-02-06 16:51:52,740 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.480e+02 2.963e+02 4.043e+02 1.091e+03, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 16:52:11,249 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=117691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:11,855 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 16:52:16,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117698.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:52:17,579 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6489, 2.7541, 2.4023, 3.8202, 1.7045, 1.8377, 2.3206, 3.1148], + device='cuda:3'), covar=tensor([0.0627, 0.0894, 0.0914, 0.0273, 0.1236, 0.1522, 0.1135, 0.0809], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0205, 0.0253, 0.0213, 0.0213, 0.0250, 0.0258, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:52:27,011 INFO [train.py:901] (3/4) Epoch 15, batch 4550, loss[loss=0.1734, simple_loss=0.2485, pruned_loss=0.04911, over 7689.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2992, pruned_loss=0.07068, over 1618383.66 frames. ], batch size: 18, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:02,114 INFO [train.py:901] (3/4) Epoch 15, batch 4600, loss[loss=0.2254, simple_loss=0.3121, pruned_loss=0.0693, over 8356.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.3, pruned_loss=0.07124, over 1618645.96 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:03,483 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.311e+02 2.848e+02 3.671e+02 5.923e+02, threshold=5.697e+02, percent-clipped=0.0 +2023-02-06 16:53:31,669 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=117806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:53:34,339 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2643, 1.2323, 1.5005, 1.1744, 0.7373, 1.2847, 1.2227, 1.2601], + device='cuda:3'), covar=tensor([0.0535, 0.1382, 0.1692, 0.1456, 0.0572, 0.1582, 0.0717, 0.0630], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0156, 0.0101, 0.0162, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 16:53:36,042 INFO [train.py:901] (3/4) Epoch 15, batch 4650, loss[loss=0.2909, simple_loss=0.3489, pruned_loss=0.1165, over 8355.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.3008, pruned_loss=0.07226, over 1615071.76 frames. ], batch size: 24, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:53:43,323 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 16:53:52,453 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7456, 1.8436, 1.8492, 1.5129, 1.9384, 1.5192, 1.1319, 1.8089], + device='cuda:3'), covar=tensor([0.0382, 0.0241, 0.0181, 0.0349, 0.0244, 0.0488, 0.0559, 0.0190], + device='cuda:3'), in_proj_covar=tensor([0.0418, 0.0359, 0.0310, 0.0414, 0.0344, 0.0504, 0.0375, 0.0381], + device='cuda:3'), out_proj_covar=tensor([1.1551e-04, 9.6559e-05, 8.3003e-05, 1.1208e-04, 9.3504e-05, 1.4687e-04, + 1.0354e-04, 1.0346e-04], device='cuda:3') +2023-02-06 16:54:11,642 INFO [train.py:901] (3/4) Epoch 15, batch 4700, loss[loss=0.2454, simple_loss=0.3224, pruned_loss=0.0842, over 8285.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3004, pruned_loss=0.07168, over 1614551.91 frames. ], batch size: 23, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:54:12,886 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.509e+02 3.109e+02 4.231e+02 8.316e+02, threshold=6.217e+02, percent-clipped=12.0 +2023-02-06 16:54:46,551 INFO [train.py:901] (3/4) Epoch 15, batch 4750, loss[loss=0.2781, simple_loss=0.3471, pruned_loss=0.1046, over 8461.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3007, pruned_loss=0.07232, over 1612883.95 frames. ], batch size: 25, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:10,056 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3105, 2.8568, 2.2683, 3.8463, 1.9222, 1.8777, 2.3688, 3.0527], + device='cuda:3'), covar=tensor([0.0749, 0.0856, 0.0914, 0.0281, 0.1119, 0.1388, 0.1037, 0.0769], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0204, 0.0251, 0.0212, 0.0211, 0.0248, 0.0255, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 16:55:11,955 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 16:55:15,282 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 16:55:16,043 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=117954.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:22,513 INFO [train.py:901] (3/4) Epoch 15, batch 4800, loss[loss=0.2298, simple_loss=0.302, pruned_loss=0.07877, over 8596.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3007, pruned_loss=0.07195, over 1617177.90 frames. ], batch size: 34, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:55:23,938 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 2.482e+02 3.121e+02 4.555e+02 1.692e+03, threshold=6.242e+02, percent-clipped=8.0 +2023-02-06 16:55:33,837 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=117979.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:55:57,739 INFO [train.py:901] (3/4) Epoch 15, batch 4850, loss[loss=0.2478, simple_loss=0.3228, pruned_loss=0.08641, over 8442.00 frames. ], tot_loss[loss=0.2237, simple_loss=0.3019, pruned_loss=0.07275, over 1622932.73 frames. ], batch size: 29, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:07,046 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 16:56:29,212 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118058.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:31,989 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:56:32,449 INFO [train.py:901] (3/4) Epoch 15, batch 4900, loss[loss=0.2246, simple_loss=0.287, pruned_loss=0.0811, over 7800.00 frames. ], tot_loss[loss=0.2241, simple_loss=0.3021, pruned_loss=0.07306, over 1620391.74 frames. ], batch size: 19, lr: 5.07e-03, grad_scale: 8.0 +2023-02-06 16:56:33,728 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.453e+02 2.951e+02 3.688e+02 9.605e+02, threshold=5.903e+02, percent-clipped=5.0 +2023-02-06 16:56:50,262 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:57:07,604 INFO [train.py:901] (3/4) Epoch 15, batch 4950, loss[loss=0.2758, simple_loss=0.3409, pruned_loss=0.1053, over 7964.00 frames. ], tot_loss[loss=0.2239, simple_loss=0.3019, pruned_loss=0.0729, over 1619566.35 frames. ], batch size: 21, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:09,214 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3143, 2.2271, 1.7137, 1.9750, 1.8475, 1.4149, 1.7138, 1.7156], + device='cuda:3'), covar=tensor([0.1159, 0.0339, 0.1061, 0.0505, 0.0619, 0.1436, 0.0914, 0.0699], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0228, 0.0324, 0.0300, 0.0300, 0.0331, 0.0345, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 16:57:42,121 INFO [train.py:901] (3/4) Epoch 15, batch 5000, loss[loss=0.2149, simple_loss=0.3026, pruned_loss=0.06358, over 8289.00 frames. ], tot_loss[loss=0.2228, simple_loss=0.3008, pruned_loss=0.0724, over 1615934.69 frames. ], batch size: 23, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:57:43,378 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.421e+02 2.910e+02 3.813e+02 6.624e+02, threshold=5.820e+02, percent-clipped=4.0 +2023-02-06 16:57:58,591 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:58:17,614 INFO [train.py:901] (3/4) Epoch 15, batch 5050, loss[loss=0.2384, simple_loss=0.3224, pruned_loss=0.07724, over 8324.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2993, pruned_loss=0.07157, over 1610368.79 frames. ], batch size: 25, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:23,569 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 16:58:43,443 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 16:58:52,547 INFO [train.py:901] (3/4) Epoch 15, batch 5100, loss[loss=0.2192, simple_loss=0.2975, pruned_loss=0.07047, over 6009.00 frames. ], tot_loss[loss=0.2219, simple_loss=0.3003, pruned_loss=0.0718, over 1613475.21 frames. ], batch size: 13, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:58:53,819 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.591e+02 3.125e+02 3.877e+02 7.785e+02, threshold=6.249e+02, percent-clipped=4.0 +2023-02-06 16:58:57,519 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 16:59:09,115 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:23,817 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 16:59:27,782 INFO [train.py:901] (3/4) Epoch 15, batch 5150, loss[loss=0.1957, simple_loss=0.2803, pruned_loss=0.05557, over 7930.00 frames. ], tot_loss[loss=0.222, simple_loss=0.3005, pruned_loss=0.07174, over 1615152.28 frames. ], batch size: 20, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 16:59:42,970 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4794, 1.8063, 2.7146, 1.3359, 2.0055, 1.8236, 1.5625, 1.9587], + device='cuda:3'), covar=tensor([0.1777, 0.2257, 0.0881, 0.4016, 0.1662, 0.2965, 0.2047, 0.2087], + device='cuda:3'), in_proj_covar=tensor([0.0497, 0.0551, 0.0535, 0.0604, 0.0625, 0.0564, 0.0494, 0.0616], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 16:59:51,101 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:02,426 INFO [train.py:901] (3/4) Epoch 15, batch 5200, loss[loss=0.226, simple_loss=0.3111, pruned_loss=0.0705, over 8481.00 frames. ], tot_loss[loss=0.2221, simple_loss=0.3006, pruned_loss=0.07178, over 1617437.89 frames. ], batch size: 25, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:03,692 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.269e+02 2.811e+02 3.673e+02 9.088e+02, threshold=5.623e+02, percent-clipped=2.0 +2023-02-06 17:00:13,700 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8043, 1.6067, 2.0958, 1.7892, 1.9532, 1.8067, 1.5474, 0.8077], + device='cuda:3'), covar=tensor([0.4920, 0.4031, 0.1403, 0.2733, 0.2025, 0.2546, 0.1959, 0.4150], + device='cuda:3'), in_proj_covar=tensor([0.0915, 0.0922, 0.0758, 0.0891, 0.0960, 0.0844, 0.0722, 0.0799], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:00:22,811 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 17:00:29,681 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:00:37,917 INFO [train.py:901] (3/4) Epoch 15, batch 5250, loss[loss=0.213, simple_loss=0.3021, pruned_loss=0.06191, over 8499.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2992, pruned_loss=0.07066, over 1615706.69 frames. ], batch size: 26, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:00:46,149 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 17:00:47,619 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4953, 1.3454, 4.7479, 1.7992, 4.1055, 3.9561, 4.3050, 4.1326], + device='cuda:3'), covar=tensor([0.0619, 0.4678, 0.0456, 0.3545, 0.1113, 0.0929, 0.0536, 0.0659], + device='cuda:3'), in_proj_covar=tensor([0.0556, 0.0609, 0.0639, 0.0580, 0.0655, 0.0560, 0.0554, 0.0616], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:01:12,979 INFO [train.py:901] (3/4) Epoch 15, batch 5300, loss[loss=0.1983, simple_loss=0.2907, pruned_loss=0.05293, over 8552.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2981, pruned_loss=0.07048, over 1610343.42 frames. ], batch size: 31, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:14,347 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.534e+02 2.995e+02 3.765e+02 8.916e+02, threshold=5.991e+02, percent-clipped=4.0 +2023-02-06 17:01:24,824 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 17:01:47,927 INFO [train.py:901] (3/4) Epoch 15, batch 5350, loss[loss=0.1833, simple_loss=0.2554, pruned_loss=0.05559, over 7694.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2972, pruned_loss=0.06991, over 1607865.93 frames. ], batch size: 18, lr: 5.06e-03, grad_scale: 8.0 +2023-02-06 17:01:50,886 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118517.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:01:52,212 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2343, 3.1707, 2.8550, 1.6433, 2.8870, 2.8930, 2.8879, 2.8009], + device='cuda:3'), covar=tensor([0.1363, 0.0962, 0.1805, 0.4868, 0.1203, 0.1272, 0.1863, 0.1193], + device='cuda:3'), in_proj_covar=tensor([0.0487, 0.0402, 0.0408, 0.0505, 0.0401, 0.0410, 0.0391, 0.0354], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:02:01,054 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118530.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:02:24,461 INFO [train.py:901] (3/4) Epoch 15, batch 5400, loss[loss=0.2271, simple_loss=0.3106, pruned_loss=0.07181, over 8518.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2978, pruned_loss=0.07007, over 1609866.61 frames. ], batch size: 28, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:02:25,792 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.478e+02 2.903e+02 3.717e+02 8.291e+02, threshold=5.806e+02, percent-clipped=5.0 +2023-02-06 17:02:53,355 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-06 17:02:55,830 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1352, 1.6046, 1.7091, 1.4171, 0.9288, 1.4803, 1.7878, 1.5853], + device='cuda:3'), covar=tensor([0.0504, 0.1197, 0.1652, 0.1365, 0.0581, 0.1467, 0.0644, 0.0638], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0157, 0.0101, 0.0162, 0.0113, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 17:02:58,969 INFO [train.py:901] (3/4) Epoch 15, batch 5450, loss[loss=0.2192, simple_loss=0.288, pruned_loss=0.07516, over 7651.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2974, pruned_loss=0.06993, over 1609763.98 frames. ], batch size: 19, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:11,222 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118631.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:21,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118645.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:24,936 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=118649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:26,157 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:03:34,862 INFO [train.py:901] (3/4) Epoch 15, batch 5500, loss[loss=0.1869, simple_loss=0.2712, pruned_loss=0.05123, over 8083.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2962, pruned_loss=0.06968, over 1604268.22 frames. ], batch size: 21, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:03:36,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.592e+02 3.113e+02 3.610e+02 8.755e+02, threshold=6.227e+02, percent-clipped=2.0 +2023-02-06 17:03:38,383 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 17:03:54,400 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:09,096 INFO [train.py:901] (3/4) Epoch 15, batch 5550, loss[loss=0.1868, simple_loss=0.2732, pruned_loss=0.05016, over 8291.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2963, pruned_loss=0.06966, over 1605412.89 frames. ], batch size: 23, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:32,310 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:44,981 INFO [train.py:901] (3/4) Epoch 15, batch 5600, loss[loss=0.1943, simple_loss=0.2682, pruned_loss=0.06022, over 7703.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2972, pruned_loss=0.06986, over 1606150.98 frames. ], batch size: 18, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:04:46,293 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.537e+02 3.218e+02 3.925e+02 9.216e+02, threshold=6.435e+02, percent-clipped=4.0 +2023-02-06 17:04:47,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:04:52,585 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:09,078 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118798.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:14,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=118806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:05:19,156 INFO [train.py:901] (3/4) Epoch 15, batch 5650, loss[loss=0.1747, simple_loss=0.2549, pruned_loss=0.04726, over 7438.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2962, pruned_loss=0.06915, over 1608552.75 frames. ], batch size: 17, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:43,436 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 17:05:53,490 INFO [train.py:901] (3/4) Epoch 15, batch 5700, loss[loss=0.2724, simple_loss=0.3327, pruned_loss=0.106, over 8704.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2975, pruned_loss=0.06974, over 1612944.54 frames. ], batch size: 34, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:05:54,818 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.491e+02 2.972e+02 3.726e+02 7.690e+02, threshold=5.944e+02, percent-clipped=5.0 +2023-02-06 17:06:00,481 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5782, 1.9761, 2.1534, 1.2919, 2.2577, 1.4939, 0.6516, 1.7905], + device='cuda:3'), covar=tensor([0.0491, 0.0254, 0.0227, 0.0423, 0.0275, 0.0651, 0.0659, 0.0253], + device='cuda:3'), in_proj_covar=tensor([0.0415, 0.0357, 0.0307, 0.0412, 0.0342, 0.0500, 0.0371, 0.0380], + device='cuda:3'), out_proj_covar=tensor([1.1472e-04, 9.6024e-05, 8.2225e-05, 1.1124e-04, 9.2625e-05, 1.4529e-04, + 1.0207e-04, 1.0334e-04], device='cuda:3') +2023-02-06 17:06:21,168 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=118901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:29,112 INFO [train.py:901] (3/4) Epoch 15, batch 5750, loss[loss=0.2437, simple_loss=0.3229, pruned_loss=0.08227, over 8525.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2986, pruned_loss=0.0703, over 1615677.73 frames. ], batch size: 39, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:06:38,203 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=118926.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:06:46,365 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 17:06:58,316 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 17:06:59,552 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6538, 1.3995, 4.8540, 1.8851, 4.3123, 4.0480, 4.3911, 4.2677], + device='cuda:3'), covar=tensor([0.0599, 0.4492, 0.0433, 0.3469, 0.1000, 0.0855, 0.0515, 0.0590], + device='cuda:3'), in_proj_covar=tensor([0.0554, 0.0602, 0.0632, 0.0572, 0.0649, 0.0552, 0.0549, 0.0614], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:07:04,200 INFO [train.py:901] (3/4) Epoch 15, batch 5800, loss[loss=0.2144, simple_loss=0.3, pruned_loss=0.06442, over 8473.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2967, pruned_loss=0.06934, over 1610753.17 frames. ], batch size: 27, lr: 5.05e-03, grad_scale: 8.0 +2023-02-06 17:07:05,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.317e+02 2.944e+02 4.100e+02 6.996e+02, threshold=5.887e+02, percent-clipped=4.0 +2023-02-06 17:07:26,177 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=118993.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:32,096 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119002.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:39,886 INFO [train.py:901] (3/4) Epoch 15, batch 5850, loss[loss=0.207, simple_loss=0.282, pruned_loss=0.06595, over 8731.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2957, pruned_loss=0.06882, over 1612811.00 frames. ], batch size: 30, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:07:46,173 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:07:49,457 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:02,813 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:13,684 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:14,146 INFO [train.py:901] (3/4) Epoch 15, batch 5900, loss[loss=0.1863, simple_loss=0.2544, pruned_loss=0.05912, over 7675.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2953, pruned_loss=0.06899, over 1613451.99 frames. ], batch size: 18, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:08:15,366 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.486e+02 2.938e+02 3.942e+02 7.909e+02, threshold=5.877e+02, percent-clipped=6.0 +2023-02-06 17:08:30,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:34,143 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:44,779 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:46,723 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119111.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:08:47,874 INFO [train.py:901] (3/4) Epoch 15, batch 5950, loss[loss=0.1886, simple_loss=0.2746, pruned_loss=0.05127, over 8237.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2967, pruned_loss=0.06982, over 1613335.42 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,903 INFO [train.py:901] (3/4) Epoch 15, batch 6000, loss[loss=0.2518, simple_loss=0.3309, pruned_loss=0.08637, over 8027.00 frames. ], tot_loss[loss=0.2197, simple_loss=0.2982, pruned_loss=0.07059, over 1616132.51 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:09:22,904 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 17:09:35,676 INFO [train.py:935] (3/4) Epoch 15, validation: loss=0.181, simple_loss=0.2808, pruned_loss=0.04056, over 944034.00 frames. +2023-02-06 17:09:35,677 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 17:09:37,095 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.469e+02 2.578e+02 3.120e+02 3.956e+02 1.218e+03, threshold=6.240e+02, percent-clipped=5.0 +2023-02-06 17:10:05,848 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7048, 2.0120, 2.1307, 1.3582, 2.2088, 1.5077, 0.6823, 1.9988], + device='cuda:3'), covar=tensor([0.0452, 0.0245, 0.0194, 0.0456, 0.0302, 0.0647, 0.0663, 0.0229], + device='cuda:3'), in_proj_covar=tensor([0.0417, 0.0359, 0.0309, 0.0415, 0.0344, 0.0502, 0.0373, 0.0383], + device='cuda:3'), out_proj_covar=tensor([1.1503e-04, 9.6344e-05, 8.2818e-05, 1.1194e-04, 9.3071e-05, 1.4615e-04, + 1.0260e-04, 1.0407e-04], device='cuda:3') +2023-02-06 17:10:10,481 INFO [train.py:901] (3/4) Epoch 15, batch 6050, loss[loss=0.2087, simple_loss=0.2853, pruned_loss=0.06603, over 7972.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.2977, pruned_loss=0.071, over 1611578.35 frames. ], batch size: 21, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:30,807 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3125, 2.6059, 2.2356, 3.3410, 1.8787, 2.0586, 2.4669, 2.8539], + device='cuda:3'), covar=tensor([0.0725, 0.0808, 0.0776, 0.0422, 0.0980, 0.1183, 0.0912, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0235, 0.0203, 0.0248, 0.0211, 0.0211, 0.0249, 0.0254, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 17:10:44,318 INFO [train.py:901] (3/4) Epoch 15, batch 6100, loss[loss=0.2447, simple_loss=0.3257, pruned_loss=0.08189, over 8543.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2984, pruned_loss=0.0715, over 1610015.33 frames. ], batch size: 39, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:10:45,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.463e+02 3.114e+02 4.132e+02 8.492e+02, threshold=6.229e+02, percent-clipped=7.0 +2023-02-06 17:11:03,738 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-06 17:11:13,015 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.50 vs. limit=5.0 +2023-02-06 17:11:18,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 17:11:20,322 INFO [train.py:901] (3/4) Epoch 15, batch 6150, loss[loss=0.2392, simple_loss=0.3278, pruned_loss=0.07526, over 8457.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.2994, pruned_loss=0.07207, over 1610831.38 frames. ], batch size: 25, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:54,707 INFO [train.py:901] (3/4) Epoch 15, batch 6200, loss[loss=0.3297, simple_loss=0.3721, pruned_loss=0.1436, over 7100.00 frames. ], tot_loss[loss=0.2224, simple_loss=0.2997, pruned_loss=0.07258, over 1609740.57 frames. ], batch size: 72, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:11:55,643 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:11:56,081 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.347e+02 3.204e+02 3.871e+02 7.576e+02, threshold=6.408e+02, percent-clipped=2.0 +2023-02-06 17:12:14,478 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:30,388 INFO [train.py:901] (3/4) Epoch 15, batch 6250, loss[loss=0.2563, simple_loss=0.334, pruned_loss=0.08927, over 8498.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.298, pruned_loss=0.07163, over 1609946.04 frames. ], batch size: 26, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:12:47,168 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119437.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:12:59,471 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:13:04,852 INFO [train.py:901] (3/4) Epoch 15, batch 6300, loss[loss=0.2117, simple_loss=0.2927, pruned_loss=0.06531, over 8018.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2985, pruned_loss=0.0713, over 1610985.62 frames. ], batch size: 22, lr: 5.04e-03, grad_scale: 8.0 +2023-02-06 17:13:06,147 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.517e+02 3.087e+02 3.932e+02 1.134e+03, threshold=6.173e+02, percent-clipped=3.0 +2023-02-06 17:13:41,045 INFO [train.py:901] (3/4) Epoch 15, batch 6350, loss[loss=0.2525, simple_loss=0.3399, pruned_loss=0.08255, over 8510.00 frames. ], tot_loss[loss=0.2215, simple_loss=0.2996, pruned_loss=0.07167, over 1612396.46 frames. ], batch size: 26, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:13:44,744 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 17:13:53,143 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2728, 1.4301, 1.6391, 1.3867, 0.9404, 1.5487, 1.7431, 1.9128], + device='cuda:3'), covar=tensor([0.0455, 0.1301, 0.1742, 0.1400, 0.0608, 0.1519, 0.0701, 0.0551], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0157, 0.0101, 0.0163, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 17:13:53,793 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:07,839 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119552.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:15,091 INFO [train.py:901] (3/4) Epoch 15, batch 6400, loss[loss=0.2237, simple_loss=0.3107, pruned_loss=0.06836, over 8532.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2989, pruned_loss=0.07142, over 1606997.16 frames. ], batch size: 28, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:14:16,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 3.023e+02 3.752e+02 7.818e+02, threshold=6.047e+02, percent-clipped=4.0 +2023-02-06 17:14:20,031 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119570.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:14:49,977 INFO [train.py:901] (3/4) Epoch 15, batch 6450, loss[loss=0.1949, simple_loss=0.2891, pruned_loss=0.05031, over 8287.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2981, pruned_loss=0.07084, over 1607108.15 frames. ], batch size: 23, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:24,245 INFO [train.py:901] (3/4) Epoch 15, batch 6500, loss[loss=0.2417, simple_loss=0.3203, pruned_loss=0.08155, over 8323.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2987, pruned_loss=0.07097, over 1610500.84 frames. ], batch size: 25, lr: 5.03e-03, grad_scale: 8.0 +2023-02-06 17:15:25,570 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.561e+02 2.888e+02 3.578e+02 6.995e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-06 17:15:38,583 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=119683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:15:58,717 INFO [train.py:901] (3/4) Epoch 15, batch 6550, loss[loss=0.1989, simple_loss=0.2833, pruned_loss=0.05726, over 8458.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2975, pruned_loss=0.06984, over 1616243.91 frames. ], batch size: 25, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:03,800 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8732, 1.6076, 2.0465, 1.7394, 1.8072, 1.9003, 1.6507, 0.6721], + device='cuda:3'), covar=tensor([0.5235, 0.4461, 0.1651, 0.2941, 0.2410, 0.2562, 0.1882, 0.4618], + device='cuda:3'), in_proj_covar=tensor([0.0912, 0.0920, 0.0758, 0.0884, 0.0955, 0.0843, 0.0717, 0.0795], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:16:04,741 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 17:16:29,732 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 17:16:34,457 INFO [train.py:901] (3/4) Epoch 15, batch 6600, loss[loss=0.2211, simple_loss=0.2961, pruned_loss=0.07301, over 7941.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2979, pruned_loss=0.07008, over 1614405.32 frames. ], batch size: 20, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:16:35,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.456e+02 2.938e+02 3.854e+02 9.901e+02, threshold=5.877e+02, percent-clipped=5.0 +2023-02-06 17:16:47,891 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 17:17:05,469 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119808.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:08,620 INFO [train.py:901] (3/4) Epoch 15, batch 6650, loss[loss=0.1793, simple_loss=0.2616, pruned_loss=0.04851, over 7810.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2985, pruned_loss=0.07029, over 1614528.26 frames. ], batch size: 19, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:12,198 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6893, 5.7390, 4.9652, 2.5794, 5.0903, 5.4340, 5.2849, 5.1207], + device='cuda:3'), covar=tensor([0.0557, 0.0411, 0.0988, 0.4240, 0.0724, 0.0769, 0.1118, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0494, 0.0409, 0.0414, 0.0508, 0.0404, 0.0411, 0.0394, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:17:17,680 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=119826.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:22,300 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:36,352 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=119851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:17:44,400 INFO [train.py:901] (3/4) Epoch 15, batch 6700, loss[loss=0.2292, simple_loss=0.318, pruned_loss=0.07023, over 8393.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2993, pruned_loss=0.07073, over 1609346.97 frames. ], batch size: 49, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:17:45,754 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.601e+02 2.951e+02 3.516e+02 8.618e+02, threshold=5.902e+02, percent-clipped=2.0 +2023-02-06 17:17:51,656 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 17:17:53,450 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=119876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:18:19,555 INFO [train.py:901] (3/4) Epoch 15, batch 6750, loss[loss=0.1685, simple_loss=0.2481, pruned_loss=0.04447, over 7711.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2981, pruned_loss=0.07024, over 1609828.02 frames. ], batch size: 18, lr: 5.03e-03, grad_scale: 16.0 +2023-02-06 17:18:55,244 INFO [train.py:901] (3/4) Epoch 15, batch 6800, loss[loss=0.2482, simple_loss=0.3253, pruned_loss=0.08554, over 8241.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2983, pruned_loss=0.07012, over 1611383.60 frames. ], batch size: 24, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:18:57,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.559e+02 3.032e+02 3.835e+02 7.300e+02, threshold=6.064e+02, percent-clipped=2.0 +2023-02-06 17:19:03,569 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 17:19:06,737 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 17:19:15,434 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=119991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:24,246 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 17:19:32,095 INFO [train.py:901] (3/4) Epoch 15, batch 6850, loss[loss=0.2276, simple_loss=0.307, pruned_loss=0.07411, over 7789.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2974, pruned_loss=0.06934, over 1614221.41 frames. ], batch size: 19, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:19:41,651 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=120027.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:19:53,404 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 17:20:01,658 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9746, 2.2164, 1.8292, 2.6922, 1.2061, 1.5716, 1.9173, 2.1928], + device='cuda:3'), covar=tensor([0.0718, 0.0762, 0.0967, 0.0405, 0.1225, 0.1445, 0.0893, 0.0800], + device='cuda:3'), in_proj_covar=tensor([0.0235, 0.0204, 0.0248, 0.0214, 0.0212, 0.0252, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 17:20:02,347 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4857, 1.4123, 1.7690, 1.4197, 1.0656, 1.7849, 0.2089, 1.1747], + device='cuda:3'), covar=tensor([0.2026, 0.1532, 0.0496, 0.1191, 0.3643, 0.0563, 0.2709, 0.1516], + device='cuda:3'), in_proj_covar=tensor([0.0176, 0.0182, 0.0115, 0.0220, 0.0263, 0.0117, 0.0164, 0.0179], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 17:20:06,210 INFO [train.py:901] (3/4) Epoch 15, batch 6900, loss[loss=0.2098, simple_loss=0.2852, pruned_loss=0.06717, over 7698.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2979, pruned_loss=0.06938, over 1614335.24 frames. ], batch size: 18, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:20:07,524 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.397e+02 2.973e+02 3.506e+02 9.980e+02, threshold=5.947e+02, percent-clipped=2.0 +2023-02-06 17:20:42,259 INFO [train.py:901] (3/4) Epoch 15, batch 6950, loss[loss=0.2405, simple_loss=0.3241, pruned_loss=0.07846, over 8245.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2987, pruned_loss=0.06967, over 1618585.71 frames. ], batch size: 24, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:02,421 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=120142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:21:03,576 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 17:21:16,267 INFO [train.py:901] (3/4) Epoch 15, batch 7000, loss[loss=0.2174, simple_loss=0.3006, pruned_loss=0.06715, over 8252.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2982, pruned_loss=0.06918, over 1616898.11 frames. ], batch size: 24, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:21:17,612 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.303e+02 2.879e+02 3.620e+02 6.461e+02, threshold=5.757e+02, percent-clipped=3.0 +2023-02-06 17:21:51,887 INFO [train.py:901] (3/4) Epoch 15, batch 7050, loss[loss=0.2269, simple_loss=0.3184, pruned_loss=0.06769, over 8505.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2974, pruned_loss=0.06904, over 1614052.53 frames. ], batch size: 29, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:11,813 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 17:22:15,017 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:26,167 INFO [train.py:901] (3/4) Epoch 15, batch 7100, loss[loss=0.2091, simple_loss=0.296, pruned_loss=0.06113, over 8281.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2971, pruned_loss=0.06909, over 1612572.27 frames. ], batch size: 23, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:22:27,488 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.428e+02 3.078e+02 4.147e+02 9.225e+02, threshold=6.156e+02, percent-clipped=10.0 +2023-02-06 17:22:32,379 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:22:57,826 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.13 vs. limit=5.0 +2023-02-06 17:23:00,855 INFO [train.py:901] (3/4) Epoch 15, batch 7150, loss[loss=0.2271, simple_loss=0.3084, pruned_loss=0.07292, over 8318.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2982, pruned_loss=0.06949, over 1614562.39 frames. ], batch size: 25, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:27,589 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8500, 1.5196, 1.8089, 1.3795, 0.8531, 1.6071, 1.5796, 1.4948], + device='cuda:3'), covar=tensor([0.0466, 0.1116, 0.1495, 0.1272, 0.0586, 0.1323, 0.0646, 0.0577], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 17:23:27,656 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5744, 1.8630, 3.1093, 1.4402, 2.2574, 2.0184, 1.6615, 2.0920], + device='cuda:3'), covar=tensor([0.1741, 0.2373, 0.0659, 0.4049, 0.1539, 0.2846, 0.2011, 0.2219], + device='cuda:3'), in_proj_covar=tensor([0.0499, 0.0552, 0.0537, 0.0603, 0.0626, 0.0565, 0.0495, 0.0619], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:23:31,185 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 17:23:35,467 INFO [train.py:901] (3/4) Epoch 15, batch 7200, loss[loss=0.2488, simple_loss=0.3187, pruned_loss=0.08942, over 6748.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2984, pruned_loss=0.06981, over 1613109.16 frames. ], batch size: 72, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:23:36,812 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.418e+02 2.853e+02 3.692e+02 6.645e+02, threshold=5.707e+02, percent-clipped=2.0 +2023-02-06 17:24:00,235 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=120398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:10,185 INFO [train.py:901] (3/4) Epoch 15, batch 7250, loss[loss=0.2677, simple_loss=0.3286, pruned_loss=0.1033, over 6960.00 frames. ], tot_loss[loss=0.22, simple_loss=0.2988, pruned_loss=0.0706, over 1611512.88 frames. ], batch size: 72, lr: 5.02e-03, grad_scale: 16.0 +2023-02-06 17:24:17,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=120423.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:24:29,231 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5587, 2.0064, 3.2217, 1.3625, 2.3108, 1.9713, 1.6704, 2.3746], + device='cuda:3'), covar=tensor([0.1823, 0.2368, 0.0744, 0.4220, 0.1669, 0.2878, 0.2019, 0.2152], + device='cuda:3'), in_proj_covar=tensor([0.0501, 0.0554, 0.0539, 0.0609, 0.0629, 0.0568, 0.0498, 0.0621], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:24:45,982 INFO [train.py:901] (3/4) Epoch 15, batch 7300, loss[loss=0.1937, simple_loss=0.2764, pruned_loss=0.05554, over 8138.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2988, pruned_loss=0.07092, over 1611218.01 frames. ], batch size: 22, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:24:47,343 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.594e+02 2.423e+02 2.925e+02 3.483e+02 5.889e+02, threshold=5.849e+02, percent-clipped=3.0 +2023-02-06 17:25:18,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0046, 1.7202, 2.5919, 1.4356, 2.1268, 2.9097, 2.8342, 2.5904], + device='cuda:3'), covar=tensor([0.0793, 0.1282, 0.0665, 0.1726, 0.1594, 0.0249, 0.0746, 0.0476], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0306, 0.0272, 0.0301, 0.0285, 0.0248, 0.0379, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:25:20,537 INFO [train.py:901] (3/4) Epoch 15, batch 7350, loss[loss=0.211, simple_loss=0.3065, pruned_loss=0.0577, over 8594.00 frames. ], tot_loss[loss=0.2204, simple_loss=0.2989, pruned_loss=0.07093, over 1610991.64 frames. ], batch size: 31, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:27,945 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0432, 1.8336, 3.4366, 1.2609, 2.2922, 3.8738, 3.8743, 3.2560], + device='cuda:3'), covar=tensor([0.1137, 0.1527, 0.0374, 0.2332, 0.1061, 0.0225, 0.0514, 0.0603], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0306, 0.0271, 0.0301, 0.0285, 0.0248, 0.0379, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:25:45,380 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 17:25:56,247 INFO [train.py:901] (3/4) Epoch 15, batch 7400, loss[loss=0.2447, simple_loss=0.3158, pruned_loss=0.08678, over 8598.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.299, pruned_loss=0.07077, over 1610029.01 frames. ], batch size: 31, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:25:57,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.487e+02 3.190e+02 4.160e+02 9.613e+02, threshold=6.380e+02, percent-clipped=9.0 +2023-02-06 17:26:04,636 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 17:26:30,894 INFO [train.py:901] (3/4) Epoch 15, batch 7450, loss[loss=0.2717, simple_loss=0.3425, pruned_loss=0.1005, over 8505.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2989, pruned_loss=0.07076, over 1608653.93 frames. ], batch size: 26, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:26:42,796 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 17:26:47,953 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.26 vs. limit=5.0 +2023-02-06 17:27:06,390 INFO [train.py:901] (3/4) Epoch 15, batch 7500, loss[loss=0.2002, simple_loss=0.291, pruned_loss=0.05474, over 8474.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2994, pruned_loss=0.07092, over 1613187.49 frames. ], batch size: 25, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:27:07,747 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.388e+02 2.853e+02 3.831e+02 7.536e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 17:27:27,368 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=120694.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:27:40,252 INFO [train.py:901] (3/4) Epoch 15, batch 7550, loss[loss=0.2088, simple_loss=0.2976, pruned_loss=0.06006, over 8467.00 frames. ], tot_loss[loss=0.2218, simple_loss=0.3002, pruned_loss=0.0717, over 1609858.19 frames. ], batch size: 25, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:27:40,386 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6138, 4.6149, 4.0836, 2.0223, 4.1019, 4.1562, 4.2351, 4.0075], + device='cuda:3'), covar=tensor([0.0793, 0.0579, 0.1216, 0.4346, 0.1003, 0.0857, 0.1338, 0.0730], + device='cuda:3'), in_proj_covar=tensor([0.0494, 0.0409, 0.0411, 0.0510, 0.0403, 0.0411, 0.0397, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:28:14,828 INFO [train.py:901] (3/4) Epoch 15, batch 7600, loss[loss=0.2252, simple_loss=0.3061, pruned_loss=0.07212, over 8489.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2979, pruned_loss=0.07064, over 1603914.31 frames. ], batch size: 26, lr: 5.01e-03, grad_scale: 16.0 +2023-02-06 17:28:16,204 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.426e+02 3.048e+02 3.965e+02 8.844e+02, threshold=6.096e+02, percent-clipped=6.0 +2023-02-06 17:28:50,143 INFO [train.py:901] (3/4) Epoch 15, batch 7650, loss[loss=0.2081, simple_loss=0.2759, pruned_loss=0.07015, over 7190.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2985, pruned_loss=0.07136, over 1606228.02 frames. ], batch size: 16, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:25,340 INFO [train.py:901] (3/4) Epoch 15, batch 7700, loss[loss=0.2433, simple_loss=0.3222, pruned_loss=0.08221, over 8253.00 frames. ], tot_loss[loss=0.2203, simple_loss=0.2982, pruned_loss=0.07115, over 1604575.99 frames. ], batch size: 24, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:29:27,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.503e+02 3.087e+02 4.175e+02 9.539e+02, threshold=6.174e+02, percent-clipped=7.0 +2023-02-06 17:29:52,776 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 17:30:01,557 INFO [train.py:901] (3/4) Epoch 15, batch 7750, loss[loss=0.1843, simple_loss=0.2649, pruned_loss=0.05181, over 7536.00 frames. ], tot_loss[loss=0.2199, simple_loss=0.2984, pruned_loss=0.0707, over 1604781.65 frames. ], batch size: 18, lr: 5.01e-03, grad_scale: 8.0 +2023-02-06 17:30:23,664 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 17:30:36,085 INFO [train.py:901] (3/4) Epoch 15, batch 7800, loss[loss=0.212, simple_loss=0.2906, pruned_loss=0.06671, over 6740.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.298, pruned_loss=0.07039, over 1606143.24 frames. ], batch size: 71, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:30:38,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.815e+02 2.376e+02 2.783e+02 3.266e+02 5.993e+02, threshold=5.565e+02, percent-clipped=0.0 +2023-02-06 17:30:42,285 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6579, 1.2498, 1.4933, 1.1913, 0.8254, 1.3442, 1.4826, 1.2332], + device='cuda:3'), covar=tensor([0.0514, 0.1329, 0.1727, 0.1446, 0.0610, 0.1621, 0.0686, 0.0706], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0099, 0.0161, 0.0112, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 17:31:09,466 INFO [train.py:901] (3/4) Epoch 15, batch 7850, loss[loss=0.2244, simple_loss=0.3125, pruned_loss=0.06815, over 8515.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.2994, pruned_loss=0.07136, over 1611866.99 frames. ], batch size: 28, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:14,865 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:26,044 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121038.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:31:37,296 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8821, 1.5981, 3.3041, 1.2708, 2.2088, 3.6807, 3.9968, 2.7608], + device='cuda:3'), covar=tensor([0.1362, 0.1943, 0.0460, 0.2630, 0.1256, 0.0348, 0.0594, 0.0984], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0307, 0.0271, 0.0299, 0.0286, 0.0247, 0.0375, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:31:42,563 INFO [train.py:901] (3/4) Epoch 15, batch 7900, loss[loss=0.2419, simple_loss=0.3313, pruned_loss=0.07628, over 8557.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.2995, pruned_loss=0.07155, over 1609617.04 frames. ], batch size: 31, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:31:44,514 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 3.139e+02 4.114e+02 1.036e+03, threshold=6.279e+02, percent-clipped=8.0 +2023-02-06 17:31:49,953 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-06 17:32:15,975 INFO [train.py:901] (3/4) Epoch 15, batch 7950, loss[loss=0.2101, simple_loss=0.3018, pruned_loss=0.05922, over 8264.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.2996, pruned_loss=0.07159, over 1612012.15 frames. ], batch size: 24, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:42,024 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:32:47,156 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7880, 5.8799, 5.1164, 2.4483, 5.2191, 5.5134, 5.4241, 5.3719], + device='cuda:3'), covar=tensor([0.0473, 0.0349, 0.0849, 0.4255, 0.0579, 0.0755, 0.0978, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0491, 0.0404, 0.0409, 0.0513, 0.0401, 0.0407, 0.0391, 0.0357], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:32:48,355 INFO [train.py:901] (3/4) Epoch 15, batch 8000, loss[loss=0.2233, simple_loss=0.312, pruned_loss=0.0673, over 8434.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.2992, pruned_loss=0.07159, over 1611413.95 frames. ], batch size: 27, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:32:50,387 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.992e+02 3.696e+02 7.694e+02, threshold=5.984e+02, percent-clipped=2.0 +2023-02-06 17:33:01,515 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:33:22,842 INFO [train.py:901] (3/4) Epoch 15, batch 8050, loss[loss=0.1762, simple_loss=0.2552, pruned_loss=0.04856, over 7436.00 frames. ], tot_loss[loss=0.2209, simple_loss=0.2982, pruned_loss=0.0718, over 1599046.49 frames. ], batch size: 17, lr: 5.00e-03, grad_scale: 8.0 +2023-02-06 17:33:55,969 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 17:34:00,924 INFO [train.py:901] (3/4) Epoch 16, batch 0, loss[loss=0.2111, simple_loss=0.2903, pruned_loss=0.06598, over 7934.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2903, pruned_loss=0.06598, over 7934.00 frames. ], batch size: 20, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:34:00,924 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 17:34:11,044 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5534, 1.6174, 2.7253, 1.2827, 2.0351, 2.9180, 3.0640, 2.4904], + device='cuda:3'), covar=tensor([0.1363, 0.1608, 0.0449, 0.2492, 0.0954, 0.0374, 0.0562, 0.0779], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0309, 0.0271, 0.0301, 0.0288, 0.0248, 0.0377, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:34:11,910 INFO [train.py:935] (3/4) Epoch 16, validation: loss=0.1795, simple_loss=0.2801, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 17:34:11,911 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 17:34:17,802 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3515, 1.2192, 4.5560, 1.7789, 3.9919, 3.7995, 4.0643, 3.9755], + device='cuda:3'), covar=tensor([0.0541, 0.5060, 0.0503, 0.3724, 0.1176, 0.1040, 0.0552, 0.0693], + device='cuda:3'), in_proj_covar=tensor([0.0558, 0.0611, 0.0632, 0.0582, 0.0653, 0.0565, 0.0555, 0.0614], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:34:24,911 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.543e+02 3.194e+02 4.084e+02 8.334e+02, threshold=6.389e+02, percent-clipped=7.0 +2023-02-06 17:34:26,238 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 17:34:41,512 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 17:34:47,408 INFO [train.py:901] (3/4) Epoch 16, batch 50, loss[loss=0.1723, simple_loss=0.2499, pruned_loss=0.04737, over 7636.00 frames. ], tot_loss[loss=0.2225, simple_loss=0.3007, pruned_loss=0.07218, over 367337.95 frames. ], batch size: 17, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:00,275 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 17:35:02,267 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 17:35:09,805 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121329.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:35:20,725 INFO [train.py:901] (3/4) Epoch 16, batch 100, loss[loss=0.2265, simple_loss=0.308, pruned_loss=0.07249, over 8707.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2988, pruned_loss=0.07026, over 642660.39 frames. ], batch size: 34, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:35:24,733 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 17:35:33,289 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121365.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:35:33,868 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.470e+02 2.913e+02 3.674e+02 6.203e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-06 17:35:52,045 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5543, 1.9568, 3.1784, 1.3876, 2.2633, 1.9641, 1.5747, 2.3887], + device='cuda:3'), covar=tensor([0.1875, 0.2464, 0.0827, 0.4150, 0.1838, 0.3051, 0.2194, 0.2086], + device='cuda:3'), in_proj_covar=tensor([0.0508, 0.0560, 0.0542, 0.0614, 0.0637, 0.0580, 0.0507, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:35:53,851 INFO [train.py:901] (3/4) Epoch 16, batch 150, loss[loss=0.2394, simple_loss=0.3268, pruned_loss=0.07605, over 8468.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.299, pruned_loss=0.06941, over 857593.24 frames. ], batch size: 25, lr: 4.84e-03, grad_scale: 8.0 +2023-02-06 17:36:00,966 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5444, 1.4724, 1.7692, 1.3293, 1.1252, 1.8033, 0.1125, 1.1261], + device='cuda:3'), covar=tensor([0.1920, 0.1563, 0.0471, 0.1209, 0.3303, 0.0523, 0.2538, 0.1442], + device='cuda:3'), in_proj_covar=tensor([0.0174, 0.0180, 0.0112, 0.0215, 0.0257, 0.0116, 0.0163, 0.0176], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 17:36:04,299 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:15,673 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:21,674 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:30,081 INFO [train.py:901] (3/4) Epoch 16, batch 200, loss[loss=0.227, simple_loss=0.3058, pruned_loss=0.07411, over 8226.00 frames. ], tot_loss[loss=0.2212, simple_loss=0.3009, pruned_loss=0.07071, over 1027932.51 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:36:33,180 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.05 vs. limit=5.0 +2023-02-06 17:36:43,676 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.385e+02 2.940e+02 3.661e+02 7.455e+02, threshold=5.881e+02, percent-clipped=4.0 +2023-02-06 17:36:53,376 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121480.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:36:56,052 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7507, 1.9571, 1.5966, 2.2661, 1.0091, 1.3918, 1.6588, 1.9508], + device='cuda:3'), covar=tensor([0.0773, 0.0703, 0.1001, 0.0432, 0.1184, 0.1396, 0.0832, 0.0729], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0206, 0.0252, 0.0215, 0.0216, 0.0252, 0.0257, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 17:36:59,470 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3172, 2.4370, 1.6009, 1.9283, 2.0117, 1.3868, 1.8784, 1.7277], + device='cuda:3'), covar=tensor([0.1498, 0.0364, 0.1275, 0.0706, 0.0665, 0.1545, 0.0932, 0.1045], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0233, 0.0326, 0.0303, 0.0301, 0.0330, 0.0341, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:37:04,013 INFO [train.py:901] (3/4) Epoch 16, batch 250, loss[loss=0.2485, simple_loss=0.3332, pruned_loss=0.08185, over 8108.00 frames. ], tot_loss[loss=0.2213, simple_loss=0.3008, pruned_loss=0.07094, over 1160244.94 frames. ], batch size: 23, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:07,578 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5990, 2.2039, 4.3672, 1.4660, 2.9332, 2.2986, 1.6450, 2.7851], + device='cuda:3'), covar=tensor([0.1885, 0.2464, 0.0666, 0.4353, 0.1911, 0.3095, 0.2247, 0.2503], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0559, 0.0542, 0.0612, 0.0636, 0.0578, 0.0505, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:37:18,650 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 17:37:24,828 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121526.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:37:28,150 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 17:37:39,702 INFO [train.py:901] (3/4) Epoch 16, batch 300, loss[loss=0.2444, simple_loss=0.3168, pruned_loss=0.08596, over 6738.00 frames. ], tot_loss[loss=0.2207, simple_loss=0.2998, pruned_loss=0.07079, over 1257060.05 frames. ], batch size: 72, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:37:54,073 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.529e+02 3.079e+02 3.820e+02 7.739e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-06 17:38:14,124 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7689, 1.8765, 1.6449, 2.2936, 1.0090, 1.4008, 1.6756, 1.9196], + device='cuda:3'), covar=tensor([0.0744, 0.0754, 0.0995, 0.0498, 0.1178, 0.1499, 0.0784, 0.0763], + device='cuda:3'), in_proj_covar=tensor([0.0235, 0.0205, 0.0250, 0.0214, 0.0214, 0.0251, 0.0254, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 17:38:14,588 INFO [train.py:901] (3/4) Epoch 16, batch 350, loss[loss=0.2161, simple_loss=0.2916, pruned_loss=0.07032, over 8137.00 frames. ], tot_loss[loss=0.2211, simple_loss=0.3006, pruned_loss=0.07086, over 1338692.82 frames. ], batch size: 22, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:38:45,978 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121641.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:38:49,789 INFO [train.py:901] (3/4) Epoch 16, batch 400, loss[loss=0.2135, simple_loss=0.2933, pruned_loss=0.0668, over 8496.00 frames. ], tot_loss[loss=0.2227, simple_loss=0.3019, pruned_loss=0.07177, over 1405372.18 frames. ], batch size: 28, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:04,295 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.467e+02 3.087e+02 3.761e+02 6.357e+02, threshold=6.175e+02, percent-clipped=1.0 +2023-02-06 17:39:09,177 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121673.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:39:25,132 INFO [train.py:901] (3/4) Epoch 16, batch 450, loss[loss=0.2007, simple_loss=0.2859, pruned_loss=0.05777, over 8075.00 frames. ], tot_loss[loss=0.2229, simple_loss=0.302, pruned_loss=0.07189, over 1455911.98 frames. ], batch size: 21, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:39:52,426 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:39:59,043 INFO [train.py:901] (3/4) Epoch 16, batch 500, loss[loss=0.2294, simple_loss=0.3073, pruned_loss=0.07572, over 7648.00 frames. ], tot_loss[loss=0.2223, simple_loss=0.3017, pruned_loss=0.07148, over 1495433.27 frames. ], batch size: 19, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:40:00,111 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.07 vs. limit=5.0 +2023-02-06 17:40:10,963 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121761.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:14,769 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.435e+02 2.838e+02 3.555e+02 6.989e+02, threshold=5.677e+02, percent-clipped=1.0 +2023-02-06 17:40:17,014 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=121769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:40:29,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121788.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:40:35,813 INFO [train.py:901] (3/4) Epoch 16, batch 550, loss[loss=0.2087, simple_loss=0.3011, pruned_loss=0.05811, over 8186.00 frames. ], tot_loss[loss=0.2222, simple_loss=0.3012, pruned_loss=0.0716, over 1523519.43 frames. ], batch size: 23, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:02,700 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9622, 1.3578, 3.2984, 1.4503, 2.2825, 3.5974, 3.6939, 3.0414], + device='cuda:3'), covar=tensor([0.1066, 0.1846, 0.0336, 0.2091, 0.1043, 0.0233, 0.0453, 0.0625], + device='cuda:3'), in_proj_covar=tensor([0.0281, 0.0310, 0.0273, 0.0301, 0.0292, 0.0248, 0.0381, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:41:09,178 INFO [train.py:901] (3/4) Epoch 16, batch 600, loss[loss=0.2248, simple_loss=0.3091, pruned_loss=0.07024, over 8326.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2999, pruned_loss=0.07058, over 1547135.38 frames. ], batch size: 25, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:15,570 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 17:41:22,439 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.425e+02 3.086e+02 4.175e+02 1.417e+03, threshold=6.173e+02, percent-clipped=9.0 +2023-02-06 17:41:26,596 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 17:41:36,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=121884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:44,738 INFO [train.py:901] (3/4) Epoch 16, batch 650, loss[loss=0.2138, simple_loss=0.2879, pruned_loss=0.06988, over 8085.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2997, pruned_loss=0.07034, over 1565996.41 frames. ], batch size: 21, lr: 4.83e-03, grad_scale: 8.0 +2023-02-06 17:41:45,634 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=121897.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:41:48,937 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-06 17:42:02,839 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=121922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:18,674 INFO [train.py:901] (3/4) Epoch 16, batch 700, loss[loss=0.1676, simple_loss=0.2503, pruned_loss=0.04247, over 7534.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2982, pruned_loss=0.07006, over 1572237.64 frames. ], batch size: 18, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:42:32,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.496e+02 2.978e+02 3.542e+02 1.118e+03, threshold=5.957e+02, percent-clipped=1.0 +2023-02-06 17:42:33,588 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=121968.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:42:41,116 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 17:42:53,716 INFO [train.py:901] (3/4) Epoch 16, batch 750, loss[loss=0.1594, simple_loss=0.2401, pruned_loss=0.03938, over 7546.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2969, pruned_loss=0.06916, over 1584304.15 frames. ], batch size: 18, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:14,263 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 17:43:14,505 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0867, 1.2457, 1.1871, 0.6881, 1.2433, 1.0392, 0.0861, 1.2010], + device='cuda:3'), covar=tensor([0.0317, 0.0285, 0.0241, 0.0449, 0.0323, 0.0756, 0.0639, 0.0259], + device='cuda:3'), in_proj_covar=tensor([0.0411, 0.0355, 0.0306, 0.0409, 0.0341, 0.0495, 0.0361, 0.0377], + device='cuda:3'), out_proj_covar=tensor([1.1340e-04, 9.4978e-05, 8.1782e-05, 1.1026e-04, 9.1961e-05, 1.4363e-04, + 9.9360e-05, 1.0221e-04], device='cuda:3') +2023-02-06 17:43:23,862 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 17:43:28,671 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122044.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 17:43:29,778 INFO [train.py:901] (3/4) Epoch 16, batch 800, loss[loss=0.2039, simple_loss=0.2751, pruned_loss=0.06635, over 7640.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2965, pruned_loss=0.06907, over 1592096.67 frames. ], batch size: 19, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:43:43,079 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.422e+02 2.925e+02 3.576e+02 6.712e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-06 17:43:45,458 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122069.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 17:44:03,131 INFO [train.py:901] (3/4) Epoch 16, batch 850, loss[loss=0.2097, simple_loss=0.2877, pruned_loss=0.06589, over 8242.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2972, pruned_loss=0.06974, over 1598232.10 frames. ], batch size: 22, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:16,336 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7203, 1.6773, 2.3584, 1.5675, 1.2741, 2.3092, 0.3061, 1.2939], + device='cuda:3'), covar=tensor([0.2087, 0.1463, 0.0347, 0.1565, 0.2962, 0.0451, 0.2667, 0.1694], + device='cuda:3'), in_proj_covar=tensor([0.0174, 0.0179, 0.0112, 0.0212, 0.0255, 0.0115, 0.0161, 0.0176], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 17:44:31,619 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:34,942 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:38,760 INFO [train.py:901] (3/4) Epoch 16, batch 900, loss[loss=0.1967, simple_loss=0.283, pruned_loss=0.0552, over 8297.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2961, pruned_loss=0.06915, over 1599208.65 frames. ], batch size: 23, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:44:48,954 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6269, 1.6029, 2.0885, 1.3849, 1.1584, 2.0807, 0.2537, 1.1739], + device='cuda:3'), covar=tensor([0.2261, 0.1551, 0.0420, 0.1635, 0.3412, 0.0474, 0.3006, 0.1640], + device='cuda:3'), in_proj_covar=tensor([0.0174, 0.0179, 0.0112, 0.0212, 0.0255, 0.0115, 0.0161, 0.0176], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 17:44:52,333 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:44:52,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.482e+02 3.085e+02 4.013e+02 7.148e+02, threshold=6.170e+02, percent-clipped=4.0 +2023-02-06 17:45:01,233 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 17:45:12,901 INFO [train.py:901] (3/4) Epoch 16, batch 950, loss[loss=0.2427, simple_loss=0.3174, pruned_loss=0.08394, over 8338.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2958, pruned_loss=0.06902, over 1600185.85 frames. ], batch size: 25, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:45:24,863 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122213.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:45:40,122 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 17:45:49,028 INFO [train.py:901] (3/4) Epoch 16, batch 1000, loss[loss=0.211, simple_loss=0.2911, pruned_loss=0.06548, over 8516.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2959, pruned_loss=0.06934, over 1602423.71 frames. ], batch size: 28, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:03,410 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.462e+02 3.004e+02 3.600e+02 8.525e+02, threshold=6.009e+02, percent-clipped=4.0 +2023-02-06 17:46:14,166 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 17:46:23,692 INFO [train.py:901] (3/4) Epoch 16, batch 1050, loss[loss=0.1815, simple_loss=0.2529, pruned_loss=0.05507, over 7281.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2953, pruned_loss=0.06903, over 1601356.06 frames. ], batch size: 16, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:46:26,429 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 17:46:34,620 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122312.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:51,293 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122337.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:46:57,871 INFO [train.py:901] (3/4) Epoch 16, batch 1100, loss[loss=0.186, simple_loss=0.2676, pruned_loss=0.05216, over 7977.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2947, pruned_loss=0.06895, over 1604906.80 frames. ], batch size: 21, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:12,620 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.699e+02 3.204e+02 3.982e+02 8.590e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 17:47:33,538 INFO [train.py:901] (3/4) Epoch 16, batch 1150, loss[loss=0.2489, simple_loss=0.3131, pruned_loss=0.09238, over 6818.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2953, pruned_loss=0.06852, over 1607366.74 frames. ], batch size: 73, lr: 4.82e-03, grad_scale: 8.0 +2023-02-06 17:47:38,316 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 17:47:54,844 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:07,552 INFO [train.py:901] (3/4) Epoch 16, batch 1200, loss[loss=0.1652, simple_loss=0.2475, pruned_loss=0.04145, over 7199.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2956, pruned_loss=0.06852, over 1606571.85 frames. ], batch size: 16, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:48:21,993 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.417e+02 3.007e+02 3.779e+02 1.089e+03, threshold=6.013e+02, percent-clipped=2.0 +2023-02-06 17:48:31,806 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:36,695 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122486.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:48:43,455 INFO [train.py:901] (3/4) Epoch 16, batch 1250, loss[loss=0.2335, simple_loss=0.3147, pruned_loss=0.07616, over 8110.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2967, pruned_loss=0.0689, over 1608966.03 frames. ], batch size: 23, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:19,089 INFO [train.py:901] (3/4) Epoch 16, batch 1300, loss[loss=0.1873, simple_loss=0.262, pruned_loss=0.05633, over 7697.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.06782, over 1612095.02 frames. ], batch size: 18, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:49:26,970 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122557.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:28,693 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 17:49:32,897 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5983, 2.2247, 3.2339, 2.5426, 2.9849, 2.4062, 2.0723, 1.9092], + device='cuda:3'), covar=tensor([0.4495, 0.4728, 0.1570, 0.3248, 0.2437, 0.2512, 0.1751, 0.4834], + device='cuda:3'), in_proj_covar=tensor([0.0902, 0.0916, 0.0754, 0.0887, 0.0950, 0.0835, 0.0716, 0.0792], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:49:33,313 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 3.105e+02 3.703e+02 6.719e+02, threshold=6.210e+02, percent-clipped=4.0 +2023-02-06 17:49:54,913 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:49:56,059 INFO [train.py:901] (3/4) Epoch 16, batch 1350, loss[loss=0.249, simple_loss=0.3228, pruned_loss=0.08767, over 7967.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.06779, over 1614676.39 frames. ], batch size: 21, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:08,293 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4905, 2.7150, 1.8660, 2.2644, 2.0754, 1.4612, 2.0199, 2.0865], + device='cuda:3'), covar=tensor([0.1522, 0.0396, 0.1199, 0.0630, 0.0789, 0.1612, 0.1034, 0.0883], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0238, 0.0332, 0.0307, 0.0305, 0.0332, 0.0348, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 17:50:31,465 INFO [train.py:901] (3/4) Epoch 16, batch 1400, loss[loss=0.1777, simple_loss=0.2573, pruned_loss=0.04906, over 7549.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06763, over 1612035.68 frames. ], batch size: 18, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:50:34,470 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4284, 2.0214, 3.2609, 1.2885, 2.3678, 1.8989, 1.5484, 2.2150], + device='cuda:3'), covar=tensor([0.2118, 0.2384, 0.0777, 0.4559, 0.1841, 0.3237, 0.2321, 0.2449], + device='cuda:3'), in_proj_covar=tensor([0.0508, 0.0560, 0.0542, 0.0610, 0.0631, 0.0575, 0.0503, 0.0624], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:50:45,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.319e+02 2.799e+02 3.491e+02 7.123e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-06 17:50:49,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:55,480 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:50:56,987 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122683.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:05,777 INFO [train.py:901] (3/4) Epoch 16, batch 1450, loss[loss=0.2843, simple_loss=0.3437, pruned_loss=0.1125, over 7233.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2952, pruned_loss=0.0675, over 1614121.24 frames. ], batch size: 72, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:12,730 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 17:51:15,655 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122708.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:51:42,667 INFO [train.py:901] (3/4) Epoch 16, batch 1500, loss[loss=0.2447, simple_loss=0.3146, pruned_loss=0.08739, over 7056.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.297, pruned_loss=0.06864, over 1611746.73 frames. ], batch size: 72, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:51:56,875 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.290e+02 2.515e+02 3.024e+02 4.111e+02 8.238e+02, threshold=6.047e+02, percent-clipped=9.0 +2023-02-06 17:52:05,878 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1281, 1.8832, 2.6330, 2.1749, 2.4811, 2.1582, 1.8369, 1.4413], + device='cuda:3'), covar=tensor([0.4468, 0.4014, 0.1375, 0.2856, 0.2013, 0.2387, 0.1744, 0.4107], + device='cuda:3'), in_proj_covar=tensor([0.0909, 0.0923, 0.0758, 0.0893, 0.0952, 0.0840, 0.0720, 0.0796], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:52:16,422 INFO [train.py:901] (3/4) Epoch 16, batch 1550, loss[loss=0.2503, simple_loss=0.315, pruned_loss=0.09281, over 7919.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2973, pruned_loss=0.06917, over 1611185.39 frames. ], batch size: 20, lr: 4.81e-03, grad_scale: 4.0 +2023-02-06 17:52:16,636 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:21,764 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 17:52:41,664 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=122830.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:52:41,855 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7446, 2.3904, 3.4862, 2.6722, 3.2087, 2.6483, 2.3344, 1.8374], + device='cuda:3'), covar=tensor([0.4452, 0.4693, 0.1523, 0.3378, 0.2219, 0.2386, 0.1639, 0.5022], + device='cuda:3'), in_proj_covar=tensor([0.0907, 0.0921, 0.0758, 0.0893, 0.0949, 0.0838, 0.0717, 0.0794], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:52:52,388 INFO [train.py:901] (3/4) Epoch 16, batch 1600, loss[loss=0.2556, simple_loss=0.3284, pruned_loss=0.09136, over 8395.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.06901, over 1612684.40 frames. ], batch size: 49, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:52:55,439 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122850.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:07,647 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.555e+02 3.178e+02 4.067e+02 1.179e+03, threshold=6.355e+02, percent-clipped=12.0 +2023-02-06 17:53:13,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:23,677 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=122890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:53:27,661 INFO [train.py:901] (3/4) Epoch 16, batch 1650, loss[loss=0.1958, simple_loss=0.2712, pruned_loss=0.06025, over 7229.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2962, pruned_loss=0.06807, over 1615295.78 frames. ], batch size: 16, lr: 4.81e-03, grad_scale: 8.0 +2023-02-06 17:53:28,081 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 17:53:28,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7921, 1.7045, 1.8369, 1.5236, 1.1766, 1.5281, 2.1011, 2.1835], + device='cuda:3'), covar=tensor([0.0436, 0.1141, 0.1681, 0.1403, 0.0592, 0.1449, 0.0640, 0.0536], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0150, 0.0188, 0.0155, 0.0099, 0.0161, 0.0113, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 17:53:49,582 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=122928.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,349 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=122945.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:02,852 INFO [train.py:901] (3/4) Epoch 16, batch 1700, loss[loss=0.215, simple_loss=0.2923, pruned_loss=0.0689, over 7788.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.0679, over 1609115.23 frames. ], batch size: 19, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:54:08,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=122953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:54:11,198 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1822, 2.5791, 3.0070, 1.6398, 3.2052, 1.7112, 1.3960, 1.9816], + device='cuda:3'), covar=tensor([0.0631, 0.0321, 0.0192, 0.0562, 0.0270, 0.0797, 0.0721, 0.0437], + device='cuda:3'), in_proj_covar=tensor([0.0417, 0.0360, 0.0313, 0.0413, 0.0346, 0.0504, 0.0368, 0.0386], + device='cuda:3'), out_proj_covar=tensor([1.1497e-04, 9.6637e-05, 8.3694e-05, 1.1138e-04, 9.3238e-05, 1.4601e-04, + 1.0098e-04, 1.0457e-04], device='cuda:3') +2023-02-06 17:54:17,605 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.342e+02 2.881e+02 3.479e+02 7.679e+02, threshold=5.763e+02, percent-clipped=3.0 +2023-02-06 17:54:38,077 INFO [train.py:901] (3/4) Epoch 16, batch 1750, loss[loss=0.2455, simple_loss=0.3244, pruned_loss=0.08327, over 8544.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06772, over 1606917.43 frames. ], batch size: 49, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:12,116 INFO [train.py:901] (3/4) Epoch 16, batch 1800, loss[loss=0.1739, simple_loss=0.253, pruned_loss=0.04739, over 7444.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2966, pruned_loss=0.06863, over 1612886.08 frames. ], batch size: 17, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:55:16,362 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123052.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:27,703 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.489e+02 2.922e+02 3.750e+02 7.056e+02, threshold=5.843e+02, percent-clipped=4.0 +2023-02-06 17:55:35,411 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123077.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:55:48,780 INFO [train.py:901] (3/4) Epoch 16, batch 1850, loss[loss=0.2438, simple_loss=0.3161, pruned_loss=0.08571, over 8134.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.06788, over 1612412.10 frames. ], batch size: 22, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:22,235 INFO [train.py:901] (3/4) Epoch 16, batch 1900, loss[loss=0.2435, simple_loss=0.3241, pruned_loss=0.08143, over 8360.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2955, pruned_loss=0.068, over 1617268.57 frames. ], batch size: 24, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:36,269 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.569e+02 3.077e+02 4.069e+02 9.708e+02, threshold=6.154e+02, percent-clipped=7.0 +2023-02-06 17:56:57,742 INFO [train.py:901] (3/4) Epoch 16, batch 1950, loss[loss=0.2626, simple_loss=0.3451, pruned_loss=0.09008, over 8334.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2967, pruned_loss=0.06841, over 1622019.40 frames. ], batch size: 26, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:56:59,132 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 17:57:01,388 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:11,338 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 17:57:18,815 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123226.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:24,081 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:57:30,732 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 17:57:32,102 INFO [train.py:901] (3/4) Epoch 16, batch 2000, loss[loss=0.2468, simple_loss=0.3212, pruned_loss=0.08617, over 8699.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.06895, over 1618324.22 frames. ], batch size: 49, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:57:46,349 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.202e+02 2.631e+02 3.355e+02 6.225e+02, threshold=5.263e+02, percent-clipped=1.0 +2023-02-06 17:58:05,880 INFO [train.py:901] (3/4) Epoch 16, batch 2050, loss[loss=0.1882, simple_loss=0.2717, pruned_loss=0.05235, over 7975.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2961, pruned_loss=0.06844, over 1615426.84 frames. ], batch size: 21, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:31,864 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123332.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:41,131 INFO [train.py:901] (3/4) Epoch 16, batch 2100, loss[loss=0.2367, simple_loss=0.3196, pruned_loss=0.07691, over 8555.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2964, pruned_loss=0.06831, over 1616799.98 frames. ], batch size: 39, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:58:43,351 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 17:58:54,983 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.517e+02 3.000e+02 3.631e+02 1.037e+03, threshold=6.000e+02, percent-clipped=6.0 +2023-02-06 17:59:14,280 INFO [train.py:901] (3/4) Epoch 16, batch 2150, loss[loss=0.2654, simple_loss=0.3331, pruned_loss=0.09879, over 8644.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2975, pruned_loss=0.06912, over 1615663.64 frames. ], batch size: 34, lr: 4.80e-03, grad_scale: 8.0 +2023-02-06 17:59:19,821 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4155, 2.1045, 2.8701, 2.3416, 2.8812, 2.3871, 2.0711, 1.4895], + device='cuda:3'), covar=tensor([0.4623, 0.4486, 0.1530, 0.3299, 0.2048, 0.2612, 0.1825, 0.4845], + device='cuda:3'), in_proj_covar=tensor([0.0920, 0.0928, 0.0763, 0.0900, 0.0964, 0.0846, 0.0720, 0.0803], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 17:59:22,909 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9811, 6.1516, 5.3109, 2.3863, 5.3429, 5.6790, 5.7180, 5.3766], + device='cuda:3'), covar=tensor([0.0496, 0.0310, 0.0806, 0.4227, 0.0664, 0.0741, 0.0889, 0.0557], + device='cuda:3'), in_proj_covar=tensor([0.0493, 0.0410, 0.0410, 0.0511, 0.0404, 0.0410, 0.0397, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 17:59:50,133 INFO [train.py:901] (3/4) Epoch 16, batch 2200, loss[loss=0.217, simple_loss=0.3046, pruned_loss=0.06465, over 8620.00 frames. ], tot_loss[loss=0.2198, simple_loss=0.299, pruned_loss=0.07026, over 1612725.72 frames. ], batch size: 31, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 17:59:50,273 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:00:04,144 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.694e+02 3.295e+02 4.036e+02 1.292e+03, threshold=6.590e+02, percent-clipped=6.0 +2023-02-06 18:00:23,391 INFO [train.py:901] (3/4) Epoch 16, batch 2250, loss[loss=0.2132, simple_loss=0.2961, pruned_loss=0.06509, over 8521.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.298, pruned_loss=0.06932, over 1616787.91 frames. ], batch size: 39, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:00:46,373 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-06 18:00:58,616 INFO [train.py:901] (3/4) Epoch 16, batch 2300, loss[loss=0.2053, simple_loss=0.2738, pruned_loss=0.06843, over 7779.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2974, pruned_loss=0.06938, over 1611716.19 frames. ], batch size: 19, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:13,226 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.374e+02 2.935e+02 3.719e+02 2.594e+03, threshold=5.871e+02, percent-clipped=2.0 +2023-02-06 18:01:22,769 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5673, 1.9405, 2.0205, 1.3365, 2.1448, 1.5189, 0.5487, 1.8223], + device='cuda:3'), covar=tensor([0.0476, 0.0269, 0.0215, 0.0420, 0.0328, 0.0769, 0.0701, 0.0244], + device='cuda:3'), in_proj_covar=tensor([0.0423, 0.0364, 0.0313, 0.0416, 0.0350, 0.0507, 0.0372, 0.0389], + device='cuda:3'), out_proj_covar=tensor([1.1640e-04, 9.7522e-05, 8.3455e-05, 1.1214e-04, 9.4408e-05, 1.4701e-04, + 1.0201e-04, 1.0529e-04], device='cuda:3') +2023-02-06 18:01:32,632 INFO [train.py:901] (3/4) Epoch 16, batch 2350, loss[loss=0.199, simple_loss=0.2808, pruned_loss=0.05863, over 8470.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2958, pruned_loss=0.06847, over 1614329.27 frames. ], batch size: 29, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:01:38,853 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=123605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:01:55,687 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=123630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:06,132 INFO [train.py:901] (3/4) Epoch 16, batch 2400, loss[loss=0.2412, simple_loss=0.3116, pruned_loss=0.08533, over 8027.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2955, pruned_loss=0.06831, over 1615557.47 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:02:22,333 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.321e+02 3.011e+02 3.485e+02 7.740e+02, threshold=6.021e+02, percent-clipped=5.0 +2023-02-06 18:02:28,436 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123676.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:02:42,480 INFO [train.py:901] (3/4) Epoch 16, batch 2450, loss[loss=0.1905, simple_loss=0.2728, pruned_loss=0.05409, over 8248.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2946, pruned_loss=0.06785, over 1611394.38 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:15,350 INFO [train.py:901] (3/4) Epoch 16, batch 2500, loss[loss=0.2233, simple_loss=0.3012, pruned_loss=0.07271, over 8196.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2964, pruned_loss=0.06894, over 1612202.54 frames. ], batch size: 23, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:03:29,367 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.388e+02 3.009e+02 3.987e+02 1.163e+03, threshold=6.019e+02, percent-clipped=7.0 +2023-02-06 18:03:46,951 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=123790.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:47,744 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:03:50,919 INFO [train.py:901] (3/4) Epoch 16, batch 2550, loss[loss=0.2164, simple_loss=0.2907, pruned_loss=0.07103, over 8245.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2964, pruned_loss=0.06927, over 1611271.86 frames. ], batch size: 22, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:24,938 INFO [train.py:901] (3/4) Epoch 16, batch 2600, loss[loss=0.2248, simple_loss=0.2974, pruned_loss=0.07609, over 7345.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2974, pruned_loss=0.06959, over 1616160.04 frames. ], batch size: 71, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:04:38,926 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.835e+02 2.447e+02 2.814e+02 3.524e+02 5.517e+02, threshold=5.629e+02, percent-clipped=0.0 +2023-02-06 18:04:54,351 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=123890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:04:58,763 INFO [train.py:901] (3/4) Epoch 16, batch 2650, loss[loss=0.2489, simple_loss=0.3244, pruned_loss=0.08672, over 8356.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.297, pruned_loss=0.06939, over 1615481.72 frames. ], batch size: 24, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:01,631 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0451, 1.6423, 1.4557, 1.5388, 1.3707, 1.2762, 1.2873, 1.3011], + device='cuda:3'), covar=tensor([0.1054, 0.0410, 0.1145, 0.0520, 0.0734, 0.1386, 0.0895, 0.0775], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0233, 0.0328, 0.0304, 0.0302, 0.0334, 0.0348, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:05:06,373 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=123905.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:05:34,176 INFO [train.py:901] (3/4) Epoch 16, batch 2700, loss[loss=0.2904, simple_loss=0.3462, pruned_loss=0.1173, over 6890.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2971, pruned_loss=0.06965, over 1615652.87 frames. ], batch size: 71, lr: 4.79e-03, grad_scale: 8.0 +2023-02-06 18:05:48,214 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.455e+02 3.188e+02 4.135e+02 8.908e+02, threshold=6.377e+02, percent-clipped=7.0 +2023-02-06 18:06:04,519 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4956, 2.6632, 2.2524, 3.7071, 1.6563, 2.0663, 2.2615, 2.9004], + device='cuda:3'), covar=tensor([0.0668, 0.0951, 0.0813, 0.0355, 0.1137, 0.1237, 0.1117, 0.0748], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0203, 0.0250, 0.0211, 0.0211, 0.0249, 0.0254, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 18:06:07,605 INFO [train.py:901] (3/4) Epoch 16, batch 2750, loss[loss=0.1828, simple_loss=0.2541, pruned_loss=0.05569, over 7401.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2959, pruned_loss=0.06889, over 1615182.53 frames. ], batch size: 17, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:24,199 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7251, 1.6874, 2.2521, 1.6049, 1.1798, 2.3056, 0.4050, 1.2268], + device='cuda:3'), covar=tensor([0.2279, 0.1521, 0.0420, 0.1500, 0.3443, 0.0371, 0.2653, 0.1846], + device='cuda:3'), in_proj_covar=tensor([0.0176, 0.0183, 0.0115, 0.0215, 0.0259, 0.0118, 0.0165, 0.0178], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 18:06:45,091 INFO [train.py:901] (3/4) Epoch 16, batch 2800, loss[loss=0.1845, simple_loss=0.2593, pruned_loss=0.05487, over 7931.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2951, pruned_loss=0.06861, over 1605325.89 frames. ], batch size: 20, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:06:46,015 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:50,843 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124054.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:06:59,429 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 2.578e+02 3.039e+02 4.001e+02 1.196e+03, threshold=6.079e+02, percent-clipped=5.0 +2023-02-06 18:06:59,558 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5184, 4.4482, 4.0015, 2.3614, 3.9963, 4.1703, 4.1252, 3.9249], + device='cuda:3'), covar=tensor([0.0652, 0.0563, 0.0967, 0.4182, 0.0745, 0.0872, 0.1145, 0.0762], + device='cuda:3'), in_proj_covar=tensor([0.0494, 0.0409, 0.0415, 0.0513, 0.0405, 0.0411, 0.0401, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:07:03,060 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:08,263 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:07:18,991 INFO [train.py:901] (3/4) Epoch 16, batch 2850, loss[loss=0.1709, simple_loss=0.2528, pruned_loss=0.04454, over 7807.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2954, pruned_loss=0.06872, over 1605552.89 frames. ], batch size: 20, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:07:55,339 INFO [train.py:901] (3/4) Epoch 16, batch 2900, loss[loss=0.2196, simple_loss=0.2925, pruned_loss=0.0733, over 8083.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2955, pruned_loss=0.06846, over 1609150.47 frames. ], batch size: 21, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:06,271 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:07,977 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 18:08:08,495 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-06 18:08:10,038 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.456e+02 3.206e+02 4.387e+02 8.191e+02, threshold=6.412e+02, percent-clipped=4.0 +2023-02-06 18:08:22,884 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124186.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:08:29,505 INFO [train.py:901] (3/4) Epoch 16, batch 2950, loss[loss=0.1937, simple_loss=0.2589, pruned_loss=0.06426, over 7199.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2968, pruned_loss=0.06975, over 1610834.59 frames. ], batch size: 16, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:08:35,642 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 18:08:50,767 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0465, 1.5379, 1.6570, 1.3965, 0.9538, 1.4907, 1.6393, 1.5421], + device='cuda:3'), covar=tensor([0.0476, 0.1198, 0.1671, 0.1420, 0.0594, 0.1454, 0.0680, 0.0619], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 18:08:55,342 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:03,841 INFO [train.py:901] (3/4) Epoch 16, batch 3000, loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06188, over 8539.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2971, pruned_loss=0.07009, over 1609297.92 frames. ], batch size: 49, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:09:03,841 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 18:09:16,269 INFO [train.py:935] (3/4) Epoch 16, validation: loss=0.1794, simple_loss=0.2796, pruned_loss=0.03958, over 944034.00 frames. +2023-02-06 18:09:16,270 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 18:09:32,708 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.393e+02 2.939e+02 3.627e+02 1.404e+03, threshold=5.877e+02, percent-clipped=2.0 +2023-02-06 18:09:49,110 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:09:51,099 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0880, 1.9272, 3.1759, 1.3869, 2.2438, 3.3944, 3.4440, 2.8864], + device='cuda:3'), covar=tensor([0.0932, 0.1294, 0.0341, 0.1977, 0.0928, 0.0257, 0.0571, 0.0536], + device='cuda:3'), in_proj_covar=tensor([0.0277, 0.0305, 0.0271, 0.0297, 0.0288, 0.0248, 0.0377, 0.0295], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 18:09:52,920 INFO [train.py:901] (3/4) Epoch 16, batch 3050, loss[loss=0.1786, simple_loss=0.2565, pruned_loss=0.05037, over 7533.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.297, pruned_loss=0.07022, over 1609191.00 frames. ], batch size: 18, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:24,200 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9423, 1.6991, 2.0870, 1.8296, 2.0010, 1.9423, 1.7153, 0.7502], + device='cuda:3'), covar=tensor([0.4946, 0.4114, 0.1571, 0.2927, 0.1993, 0.2445, 0.1744, 0.4426], + device='cuda:3'), in_proj_covar=tensor([0.0915, 0.0924, 0.0756, 0.0894, 0.0961, 0.0847, 0.0723, 0.0796], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:10:25,956 INFO [train.py:901] (3/4) Epoch 16, batch 3100, loss[loss=0.2203, simple_loss=0.2828, pruned_loss=0.07892, over 7240.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2974, pruned_loss=0.07015, over 1611656.48 frames. ], batch size: 16, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:10:28,053 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:10:39,328 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124366.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:10:39,801 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.464e+02 2.975e+02 4.095e+02 1.383e+03, threshold=5.950e+02, percent-clipped=6.0 +2023-02-06 18:11:01,473 INFO [train.py:901] (3/4) Epoch 16, batch 3150, loss[loss=0.2041, simple_loss=0.2841, pruned_loss=0.06207, over 8341.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2963, pruned_loss=0.06947, over 1610665.98 frames. ], batch size: 26, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:02,939 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:21,450 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:36,597 INFO [train.py:901] (3/4) Epoch 16, batch 3200, loss[loss=0.2413, simple_loss=0.3201, pruned_loss=0.08125, over 8618.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2967, pruned_loss=0.0699, over 1611397.24 frames. ], batch size: 31, lr: 4.78e-03, grad_scale: 8.0 +2023-02-06 18:11:39,414 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:11:43,517 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.10 vs. limit=5.0 +2023-02-06 18:11:50,435 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.552e+02 3.102e+02 3.772e+02 6.284e+02, threshold=6.205e+02, percent-clipped=3.0 +2023-02-06 18:12:07,430 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0811, 1.7756, 1.9944, 1.7257, 0.8817, 1.7987, 2.3463, 2.1964], + device='cuda:3'), covar=tensor([0.0430, 0.1195, 0.1599, 0.1317, 0.0615, 0.1412, 0.0590, 0.0553], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0100, 0.0162, 0.0113, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 18:12:09,966 INFO [train.py:901] (3/4) Epoch 16, batch 3250, loss[loss=0.2215, simple_loss=0.3003, pruned_loss=0.07137, over 8248.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2967, pruned_loss=0.06979, over 1613791.65 frames. ], batch size: 24, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:23,042 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124513.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:40,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:12:45,402 INFO [train.py:901] (3/4) Epoch 16, batch 3300, loss[loss=0.193, simple_loss=0.2731, pruned_loss=0.05649, over 7433.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2956, pruned_loss=0.06865, over 1614496.90 frames. ], batch size: 17, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:12:59,420 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.425e+02 2.919e+02 3.659e+02 6.879e+02, threshold=5.837e+02, percent-clipped=1.0 +2023-02-06 18:13:18,844 INFO [train.py:901] (3/4) Epoch 16, batch 3350, loss[loss=0.2139, simple_loss=0.2945, pruned_loss=0.06662, over 8766.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2958, pruned_loss=0.06885, over 1613320.91 frames. ], batch size: 40, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:13:23,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5625, 1.4887, 4.4647, 1.8164, 2.4828, 5.1599, 5.2045, 4.4719], + device='cuda:3'), covar=tensor([0.1025, 0.1927, 0.0308, 0.2032, 0.1245, 0.0173, 0.0379, 0.0549], + device='cuda:3'), in_proj_covar=tensor([0.0279, 0.0308, 0.0275, 0.0300, 0.0292, 0.0249, 0.0382, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:13:25,356 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:32,076 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4170, 2.0678, 2.8788, 2.3051, 2.7743, 2.3419, 1.9973, 1.4785], + device='cuda:3'), covar=tensor([0.4703, 0.4700, 0.1564, 0.3138, 0.2066, 0.2705, 0.1805, 0.4986], + device='cuda:3'), in_proj_covar=tensor([0.0913, 0.0923, 0.0758, 0.0894, 0.0960, 0.0846, 0.0722, 0.0797], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:13:43,506 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:46,713 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124634.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:13:54,715 INFO [train.py:901] (3/4) Epoch 16, batch 3400, loss[loss=0.1842, simple_loss=0.2585, pruned_loss=0.05494, over 7704.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2958, pruned_loss=0.06865, over 1614230.49 frames. ], batch size: 18, lr: 4.77e-03, grad_scale: 8.0 +2023-02-06 18:14:01,621 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124656.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:08,826 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.420e+02 3.011e+02 3.525e+02 7.222e+02, threshold=6.022e+02, percent-clipped=3.0 +2023-02-06 18:14:15,310 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3009, 1.2363, 2.3815, 1.1717, 1.9868, 2.5004, 2.6338, 2.1202], + device='cuda:3'), covar=tensor([0.1189, 0.1456, 0.0469, 0.2136, 0.0856, 0.0396, 0.0630, 0.0729], + device='cuda:3'), in_proj_covar=tensor([0.0279, 0.0309, 0.0274, 0.0300, 0.0291, 0.0249, 0.0382, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 18:14:18,584 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124681.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:14:19,314 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7180, 1.6056, 2.8059, 1.2887, 2.0994, 3.0166, 3.1337, 2.5291], + device='cuda:3'), covar=tensor([0.1151, 0.1470, 0.0425, 0.2135, 0.0897, 0.0319, 0.0600, 0.0644], + device='cuda:3'), in_proj_covar=tensor([0.0278, 0.0308, 0.0273, 0.0299, 0.0290, 0.0249, 0.0381, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 18:14:28,873 INFO [train.py:901] (3/4) Epoch 16, batch 3450, loss[loss=0.2493, simple_loss=0.3269, pruned_loss=0.08585, over 8486.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2973, pruned_loss=0.0696, over 1611790.48 frames. ], batch size: 28, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:14:38,586 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124710.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:15:05,353 INFO [train.py:901] (3/4) Epoch 16, batch 3500, loss[loss=0.1857, simple_loss=0.2604, pruned_loss=0.05549, over 7441.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2983, pruned_loss=0.06973, over 1617121.06 frames. ], batch size: 17, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:07,651 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:20,546 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.534e+02 3.082e+02 3.894e+02 7.146e+02, threshold=6.164e+02, percent-clipped=3.0 +2023-02-06 18:15:22,093 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:38,229 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 18:15:38,978 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,090 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:39,728 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=124795.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:15:40,182 INFO [train.py:901] (3/4) Epoch 16, batch 3550, loss[loss=0.1922, simple_loss=0.2632, pruned_loss=0.06064, over 7714.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2975, pruned_loss=0.06962, over 1610771.07 frames. ], batch size: 18, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:15:56,879 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=124820.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:00,137 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124825.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:16:08,892 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=124838.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:16:14,229 INFO [train.py:901] (3/4) Epoch 16, batch 3600, loss[loss=0.1992, simple_loss=0.2878, pruned_loss=0.05526, over 8584.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2969, pruned_loss=0.0692, over 1609920.79 frames. ], batch size: 39, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:18,570 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5878, 1.2718, 4.7640, 1.7670, 4.1870, 3.9795, 4.2845, 4.1440], + device='cuda:3'), covar=tensor([0.0623, 0.5202, 0.0511, 0.4057, 0.1246, 0.1029, 0.0604, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0561, 0.0615, 0.0638, 0.0590, 0.0663, 0.0570, 0.0562, 0.0626], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:16:30,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.338e+02 2.977e+02 3.463e+02 8.977e+02, threshold=5.954e+02, percent-clipped=2.0 +2023-02-06 18:16:50,929 INFO [train.py:901] (3/4) Epoch 16, batch 3650, loss[loss=0.284, simple_loss=0.3517, pruned_loss=0.1081, over 8675.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.298, pruned_loss=0.06961, over 1612717.79 frames. ], batch size: 39, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:16:59,897 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=124909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:17:24,956 INFO [train.py:901] (3/4) Epoch 16, batch 3700, loss[loss=0.2047, simple_loss=0.2746, pruned_loss=0.06739, over 7203.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2983, pruned_loss=0.0699, over 1611382.90 frames. ], batch size: 16, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:17:38,859 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:17:40,140 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.880e+02 2.643e+02 3.299e+02 4.315e+02 1.525e+03, threshold=6.598e+02, percent-clipped=10.0 +2023-02-06 18:18:01,618 INFO [train.py:901] (3/4) Epoch 16, batch 3750, loss[loss=0.2271, simple_loss=0.3081, pruned_loss=0.07304, over 8330.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2978, pruned_loss=0.06946, over 1613973.21 frames. ], batch size: 25, lr: 4.77e-03, grad_scale: 16.0 +2023-02-06 18:18:04,451 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:07,878 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:18,222 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-06 18:18:20,988 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:24,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125030.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:18:35,177 INFO [train.py:901] (3/4) Epoch 16, batch 3800, loss[loss=0.23, simple_loss=0.3102, pruned_loss=0.07489, over 8190.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2975, pruned_loss=0.06954, over 1609724.56 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:18:49,286 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.284e+02 2.854e+02 3.651e+02 7.015e+02, threshold=5.709e+02, percent-clipped=3.0 +2023-02-06 18:18:54,970 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8690, 1.6999, 2.4847, 1.6210, 1.2718, 2.5083, 0.3763, 1.4624], + device='cuda:3'), covar=tensor([0.1970, 0.1534, 0.0376, 0.1727, 0.3242, 0.0433, 0.2879, 0.1534], + device='cuda:3'), in_proj_covar=tensor([0.0171, 0.0180, 0.0112, 0.0211, 0.0256, 0.0115, 0.0161, 0.0175], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 18:18:58,968 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125081.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:19:10,722 INFO [train.py:901] (3/4) Epoch 16, batch 3850, loss[loss=0.2316, simple_loss=0.3172, pruned_loss=0.07298, over 8248.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2974, pruned_loss=0.06919, over 1614903.65 frames. ], batch size: 24, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:18,425 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125106.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:19:24,394 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125115.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:41,021 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:45,073 INFO [train.py:901] (3/4) Epoch 16, batch 3900, loss[loss=0.2366, simple_loss=0.3056, pruned_loss=0.08384, over 8596.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2982, pruned_loss=0.06981, over 1617364.02 frames. ], batch size: 31, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:19:45,089 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 18:19:45,237 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:56,173 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9632, 1.1436, 1.5638, 0.8988, 1.1882, 1.1228, 1.0067, 1.1052], + device='cuda:3'), covar=tensor([0.1222, 0.1634, 0.0602, 0.2869, 0.1174, 0.2116, 0.1498, 0.1614], + device='cuda:3'), in_proj_covar=tensor([0.0506, 0.0562, 0.0544, 0.0614, 0.0633, 0.0574, 0.0502, 0.0621], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:19:58,196 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:19:58,452 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 18:19:59,306 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.507e+02 2.888e+02 3.601e+02 7.393e+02, threshold=5.777e+02, percent-clipped=3.0 +2023-02-06 18:20:09,607 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125182.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:14,434 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4192, 4.4085, 3.8779, 1.8553, 3.8834, 4.0086, 4.0566, 3.7005], + device='cuda:3'), covar=tensor([0.0800, 0.0593, 0.1169, 0.5230, 0.0938, 0.0860, 0.1206, 0.0934], + device='cuda:3'), in_proj_covar=tensor([0.0492, 0.0410, 0.0409, 0.0514, 0.0402, 0.0410, 0.0398, 0.0357], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:20:15,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:20:19,100 INFO [train.py:901] (3/4) Epoch 16, batch 3950, loss[loss=0.2128, simple_loss=0.287, pruned_loss=0.06924, over 7695.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2975, pruned_loss=0.06958, over 1611532.66 frames. ], batch size: 18, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:20:20,129 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.19 vs. limit=5.0 +2023-02-06 18:20:35,589 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.28 vs. limit=5.0 +2023-02-06 18:20:55,580 INFO [train.py:901] (3/4) Epoch 16, batch 4000, loss[loss=0.1904, simple_loss=0.2744, pruned_loss=0.05319, over 8140.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2979, pruned_loss=0.0696, over 1614315.59 frames. ], batch size: 22, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:09,913 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.424e+02 2.747e+02 3.530e+02 7.172e+02, threshold=5.495e+02, percent-clipped=3.0 +2023-02-06 18:21:29,134 INFO [train.py:901] (3/4) Epoch 16, batch 4050, loss[loss=0.1912, simple_loss=0.289, pruned_loss=0.04676, over 8282.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2982, pruned_loss=0.06995, over 1609653.49 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:21:29,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125297.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:05,148 INFO [train.py:901] (3/4) Epoch 16, batch 4100, loss[loss=0.2394, simple_loss=0.3149, pruned_loss=0.08195, over 8025.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2984, pruned_loss=0.0703, over 1608796.31 frames. ], batch size: 22, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:19,363 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.458e+02 2.941e+02 3.398e+02 7.943e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-06 18:22:22,358 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125371.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:24,212 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125374.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:37,727 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:38,921 INFO [train.py:901] (3/4) Epoch 16, batch 4150, loss[loss=0.2543, simple_loss=0.3403, pruned_loss=0.08415, over 8328.00 frames. ], tot_loss[loss=0.2208, simple_loss=0.2998, pruned_loss=0.07096, over 1612297.64 frames. ], batch size: 48, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:22:39,124 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:39,144 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125396.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:22:55,757 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:09,082 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:14,199 INFO [train.py:901] (3/4) Epoch 16, batch 4200, loss[loss=0.2275, simple_loss=0.3014, pruned_loss=0.07679, over 8194.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.298, pruned_loss=0.07013, over 1608237.03 frames. ], batch size: 23, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:23:21,476 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.78 vs. limit=5.0 +2023-02-06 18:23:29,130 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.508e+02 2.881e+02 3.373e+02 7.881e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-06 18:23:39,930 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 18:23:44,591 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:23:48,646 INFO [train.py:901] (3/4) Epoch 16, batch 4250, loss[loss=0.2038, simple_loss=0.2911, pruned_loss=0.05824, over 8455.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2982, pruned_loss=0.06997, over 1609419.43 frames. ], batch size: 25, lr: 4.76e-03, grad_scale: 16.0 +2023-02-06 18:23:51,792 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 18:24:01,578 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 18:24:23,121 INFO [train.py:901] (3/4) Epoch 16, batch 4300, loss[loss=0.2056, simple_loss=0.276, pruned_loss=0.06756, over 7930.00 frames. ], tot_loss[loss=0.2195, simple_loss=0.2987, pruned_loss=0.07019, over 1612124.65 frames. ], batch size: 20, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:24:28,632 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125553.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:37,293 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7192, 2.0450, 2.1638, 1.4136, 2.3070, 1.5729, 0.6222, 1.9859], + device='cuda:3'), covar=tensor([0.0514, 0.0274, 0.0214, 0.0462, 0.0262, 0.0691, 0.0756, 0.0216], + device='cuda:3'), in_proj_covar=tensor([0.0428, 0.0361, 0.0311, 0.0418, 0.0353, 0.0511, 0.0372, 0.0390], + device='cuda:3'), out_proj_covar=tensor([1.1756e-04, 9.6357e-05, 8.2811e-05, 1.1225e-04, 9.5326e-05, 1.4791e-04, + 1.0189e-04, 1.0563e-04], device='cuda:3') +2023-02-06 18:24:38,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.516e+02 3.115e+02 4.119e+02 8.810e+02, threshold=6.231e+02, percent-clipped=6.0 +2023-02-06 18:24:46,754 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125578.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:24:58,872 INFO [train.py:901] (3/4) Epoch 16, batch 4350, loss[loss=0.2086, simple_loss=0.2977, pruned_loss=0.05971, over 8016.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2975, pruned_loss=0.06999, over 1603429.88 frames. ], batch size: 22, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:04,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6396, 1.9773, 2.1231, 1.3207, 2.2770, 1.5240, 0.6736, 1.8565], + device='cuda:3'), covar=tensor([0.0524, 0.0267, 0.0186, 0.0419, 0.0270, 0.0677, 0.0708, 0.0230], + device='cuda:3'), in_proj_covar=tensor([0.0430, 0.0363, 0.0313, 0.0420, 0.0355, 0.0513, 0.0373, 0.0391], + device='cuda:3'), out_proj_covar=tensor([1.1805e-04, 9.6888e-05, 8.3259e-05, 1.1272e-04, 9.5760e-05, 1.4852e-04, + 1.0234e-04, 1.0598e-04], device='cuda:3') +2023-02-06 18:25:05,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:11,536 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6217, 1.8850, 1.9990, 1.2165, 2.1517, 1.3751, 0.5412, 1.8028], + device='cuda:3'), covar=tensor([0.0471, 0.0325, 0.0248, 0.0478, 0.0314, 0.0876, 0.0739, 0.0264], + device='cuda:3'), in_proj_covar=tensor([0.0429, 0.0362, 0.0312, 0.0419, 0.0354, 0.0511, 0.0373, 0.0391], + device='cuda:3'), out_proj_covar=tensor([1.1776e-04, 9.6691e-05, 8.3003e-05, 1.1244e-04, 9.5586e-05, 1.4802e-04, + 1.0216e-04, 1.0578e-04], device='cuda:3') +2023-02-06 18:25:16,539 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:25:33,364 INFO [train.py:901] (3/4) Epoch 16, batch 4400, loss[loss=0.1712, simple_loss=0.2595, pruned_loss=0.04149, over 7805.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2956, pruned_loss=0.06896, over 1601151.40 frames. ], batch size: 19, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:25:34,016 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 18:25:48,652 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.489e+02 3.156e+02 3.927e+02 6.760e+02, threshold=6.312e+02, percent-clipped=2.0 +2023-02-06 18:25:53,923 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 18:26:09,567 INFO [train.py:901] (3/4) Epoch 16, batch 4450, loss[loss=0.1885, simple_loss=0.2791, pruned_loss=0.04899, over 8475.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2955, pruned_loss=0.06883, over 1601913.54 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:14,207 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 18:26:24,219 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:38,220 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:41,764 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=125743.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:26:43,639 INFO [train.py:901] (3/4) Epoch 16, batch 4500, loss[loss=0.1767, simple_loss=0.2672, pruned_loss=0.04311, over 8026.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.0687, over 1605178.35 frames. ], batch size: 22, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:26:57,806 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.361e+02 2.740e+02 3.373e+02 6.169e+02, threshold=5.479e+02, percent-clipped=0.0 +2023-02-06 18:27:04,077 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 18:27:10,715 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125783.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:15,515 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5302, 1.5138, 1.8285, 1.3190, 1.1538, 1.8142, 0.1883, 1.1363], + device='cuda:3'), covar=tensor([0.2078, 0.1426, 0.0507, 0.1189, 0.3324, 0.0498, 0.2446, 0.1496], + device='cuda:3'), in_proj_covar=tensor([0.0175, 0.0182, 0.0115, 0.0214, 0.0259, 0.0118, 0.0165, 0.0177], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 18:27:19,273 INFO [train.py:901] (3/4) Epoch 16, batch 4550, loss[loss=0.2111, simple_loss=0.2951, pruned_loss=0.0635, over 8464.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2976, pruned_loss=0.06953, over 1609250.31 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:45,704 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:27:54,454 INFO [train.py:901] (3/4) Epoch 16, batch 4600, loss[loss=0.2735, simple_loss=0.3354, pruned_loss=0.1058, over 6626.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2985, pruned_loss=0.07005, over 1611905.68 frames. ], batch size: 71, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:27:59,460 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:05,128 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=125861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:08,976 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.490e+02 3.040e+02 3.897e+02 1.241e+03, threshold=6.080e+02, percent-clipped=8.0 +2023-02-06 18:28:22,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=125886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:30,022 INFO [train.py:901] (3/4) Epoch 16, batch 4650, loss[loss=0.2644, simple_loss=0.3468, pruned_loss=0.091, over 8647.00 frames. ], tot_loss[loss=0.2205, simple_loss=0.2997, pruned_loss=0.07065, over 1612044.42 frames. ], batch size: 39, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:28:31,600 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=125898.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:28:40,004 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8711, 1.6409, 1.7015, 1.5358, 1.1289, 1.6368, 1.7903, 1.5468], + device='cuda:3'), covar=tensor([0.0571, 0.0963, 0.1344, 0.1154, 0.0613, 0.1138, 0.0702, 0.0548], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0156, 0.0101, 0.0162, 0.0114, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 18:28:59,490 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3157, 2.0089, 2.8019, 2.1678, 2.6523, 2.2432, 1.9690, 1.2988], + device='cuda:3'), covar=tensor([0.4691, 0.4760, 0.1488, 0.3164, 0.2171, 0.2632, 0.1723, 0.5145], + device='cuda:3'), in_proj_covar=tensor([0.0915, 0.0922, 0.0760, 0.0896, 0.0959, 0.0846, 0.0723, 0.0799], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:29:06,099 INFO [train.py:901] (3/4) Epoch 16, batch 4700, loss[loss=0.22, simple_loss=0.3154, pruned_loss=0.06223, over 8324.00 frames. ], tot_loss[loss=0.2202, simple_loss=0.2992, pruned_loss=0.07064, over 1607736.58 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:18,975 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=125965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:29:20,230 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.580e+02 3.138e+02 4.127e+02 1.212e+03, threshold=6.277e+02, percent-clipped=5.0 +2023-02-06 18:29:39,835 INFO [train.py:901] (3/4) Epoch 16, batch 4750, loss[loss=0.1754, simple_loss=0.2579, pruned_loss=0.04648, over 6816.00 frames. ], tot_loss[loss=0.2206, simple_loss=0.2995, pruned_loss=0.07087, over 1607869.52 frames. ], batch size: 15, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:29:55,979 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126016.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:11,192 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 18:30:13,722 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 18:30:16,326 INFO [train.py:901] (3/4) Epoch 16, batch 4800, loss[loss=0.2246, simple_loss=0.3044, pruned_loss=0.07242, over 8474.00 frames. ], tot_loss[loss=0.2196, simple_loss=0.2986, pruned_loss=0.07029, over 1612117.65 frames. ], batch size: 25, lr: 4.75e-03, grad_scale: 16.0 +2023-02-06 18:30:21,713 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-06 18:30:31,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.301e+02 2.788e+02 3.330e+02 6.705e+02, threshold=5.575e+02, percent-clipped=2.0 +2023-02-06 18:30:40,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:45,002 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:46,456 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:30:51,047 INFO [train.py:901] (3/4) Epoch 16, batch 4850, loss[loss=0.1961, simple_loss=0.2724, pruned_loss=0.05985, over 7654.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06902, over 1612599.38 frames. ], batch size: 19, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:30:59,987 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:01,812 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 18:31:03,309 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126114.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:19,031 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:26,988 INFO [train.py:901] (3/4) Epoch 16, batch 4900, loss[loss=0.1868, simple_loss=0.2712, pruned_loss=0.05117, over 7418.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2955, pruned_loss=0.06804, over 1612493.39 frames. ], batch size: 17, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:31:32,561 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126154.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:31:41,755 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.384e+02 3.140e+02 3.836e+02 7.587e+02, threshold=6.281e+02, percent-clipped=5.0 +2023-02-06 18:31:49,039 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 18:31:50,106 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126179.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:01,581 INFO [train.py:901] (3/4) Epoch 16, batch 4950, loss[loss=0.2073, simple_loss=0.2904, pruned_loss=0.06214, over 8498.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2968, pruned_loss=0.06878, over 1616924.99 frames. ], batch size: 26, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:06,047 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:32:35,836 INFO [train.py:901] (3/4) Epoch 16, batch 5000, loss[loss=0.2675, simple_loss=0.3261, pruned_loss=0.1044, over 6983.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2964, pruned_loss=0.06873, over 1614959.54 frames. ], batch size: 71, lr: 4.74e-03, grad_scale: 16.0 +2023-02-06 18:32:50,295 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.421e+02 2.802e+02 3.540e+02 7.456e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-06 18:33:10,451 INFO [train.py:901] (3/4) Epoch 16, batch 5050, loss[loss=0.1731, simple_loss=0.2495, pruned_loss=0.04837, over 7777.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.06882, over 1612634.22 frames. ], batch size: 19, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:38,227 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126336.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:41,473 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 18:33:44,843 INFO [train.py:901] (3/4) Epoch 16, batch 5100, loss[loss=0.2461, simple_loss=0.3165, pruned_loss=0.08781, over 8359.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2951, pruned_loss=0.06842, over 1610969.45 frames. ], batch size: 24, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:33:55,181 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126360.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:33:56,645 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126361.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:01,121 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.574e+02 2.967e+02 3.773e+02 8.448e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-06 18:34:20,690 INFO [train.py:901] (3/4) Epoch 16, batch 5150, loss[loss=0.2122, simple_loss=0.2873, pruned_loss=0.06856, over 8092.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2955, pruned_loss=0.06837, over 1614226.16 frames. ], batch size: 21, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:34:22,778 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126398.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:34:40,810 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4925, 1.4455, 1.7957, 1.2278, 1.0852, 1.7873, 0.1369, 1.1403], + device='cuda:3'), covar=tensor([0.2144, 0.1487, 0.0475, 0.1364, 0.3319, 0.0454, 0.2652, 0.1556], + device='cuda:3'), in_proj_covar=tensor([0.0175, 0.0181, 0.0113, 0.0213, 0.0259, 0.0117, 0.0164, 0.0178], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 18:34:42,900 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1970, 2.1217, 1.6684, 1.9053, 1.7682, 1.4306, 1.6178, 1.6417], + device='cuda:3'), covar=tensor([0.1323, 0.0454, 0.1182, 0.0482, 0.0678, 0.1553, 0.0893, 0.0878], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0233, 0.0326, 0.0301, 0.0300, 0.0332, 0.0344, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:34:54,883 INFO [train.py:901] (3/4) Epoch 16, batch 5200, loss[loss=0.2003, simple_loss=0.2789, pruned_loss=0.06086, over 8128.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2959, pruned_loss=0.06862, over 1616898.90 frames. ], batch size: 22, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:03,386 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126458.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:10,023 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.450e+02 2.961e+02 4.009e+02 9.502e+02, threshold=5.923e+02, percent-clipped=8.0 +2023-02-06 18:35:10,207 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126468.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:15,119 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126475.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:15,314 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.09 vs. limit=5.0 +2023-02-06 18:35:21,834 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126483.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:35:30,964 INFO [train.py:901] (3/4) Epoch 16, batch 5250, loss[loss=0.2925, simple_loss=0.3518, pruned_loss=0.1166, over 8108.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2969, pruned_loss=0.06899, over 1616589.31 frames. ], batch size: 23, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:35:39,842 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 18:36:05,600 INFO [train.py:901] (3/4) Epoch 16, batch 5300, loss[loss=0.1902, simple_loss=0.2679, pruned_loss=0.05621, over 7563.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2963, pruned_loss=0.06903, over 1614748.95 frames. ], batch size: 18, lr: 4.74e-03, grad_scale: 8.0 +2023-02-06 18:36:20,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.415e+02 2.951e+02 3.953e+02 1.148e+03, threshold=5.902e+02, percent-clipped=4.0 +2023-02-06 18:36:21,856 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5332, 1.9769, 3.2356, 1.3112, 2.3705, 1.8359, 1.7942, 2.1876], + device='cuda:3'), covar=tensor([0.2028, 0.2677, 0.0963, 0.4947, 0.1890, 0.3540, 0.2414, 0.2654], + device='cuda:3'), in_proj_covar=tensor([0.0500, 0.0554, 0.0541, 0.0612, 0.0627, 0.0566, 0.0497, 0.0617], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:36:41,560 INFO [train.py:901] (3/4) Epoch 16, batch 5350, loss[loss=0.1856, simple_loss=0.2637, pruned_loss=0.05377, over 7691.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2961, pruned_loss=0.06908, over 1612034.39 frames. ], batch size: 18, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:16,921 INFO [train.py:901] (3/4) Epoch 16, batch 5400, loss[loss=0.202, simple_loss=0.2753, pruned_loss=0.06432, over 7810.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2956, pruned_loss=0.06913, over 1604888.48 frames. ], batch size: 20, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:37:24,787 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6766, 2.2975, 3.3842, 2.6001, 3.1937, 2.5307, 2.3010, 1.8122], + device='cuda:3'), covar=tensor([0.4687, 0.5277, 0.1771, 0.3317, 0.2219, 0.2850, 0.1813, 0.5288], + device='cuda:3'), in_proj_covar=tensor([0.0907, 0.0915, 0.0756, 0.0891, 0.0957, 0.0841, 0.0717, 0.0795], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:37:32,197 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.767e+02 2.413e+02 2.875e+02 3.758e+02 9.843e+02, threshold=5.751e+02, percent-clipped=6.0 +2023-02-06 18:37:51,440 INFO [train.py:901] (3/4) Epoch 16, batch 5450, loss[loss=0.2168, simple_loss=0.3073, pruned_loss=0.06311, over 8195.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.296, pruned_loss=0.06896, over 1606738.92 frames. ], batch size: 23, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:15,727 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2291, 1.9808, 2.6900, 2.2095, 2.6089, 2.2322, 1.9587, 1.2170], + device='cuda:3'), covar=tensor([0.4607, 0.4177, 0.1592, 0.3008, 0.2020, 0.2410, 0.1604, 0.4844], + device='cuda:3'), in_proj_covar=tensor([0.0912, 0.0922, 0.0760, 0.0898, 0.0962, 0.0847, 0.0721, 0.0799], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:38:17,608 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=126731.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:24,946 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:28,282 INFO [train.py:901] (3/4) Epoch 16, batch 5500, loss[loss=0.2128, simple_loss=0.2728, pruned_loss=0.07635, over 6832.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2967, pruned_loss=0.06877, over 1609564.76 frames. ], batch size: 15, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:38:29,001 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 18:38:35,373 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=126756.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:38:44,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.451e+02 2.886e+02 3.496e+02 8.391e+02, threshold=5.772e+02, percent-clipped=4.0 +2023-02-06 18:39:02,241 INFO [train.py:901] (3/4) Epoch 16, batch 5550, loss[loss=0.2649, simple_loss=0.3487, pruned_loss=0.09059, over 8135.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2969, pruned_loss=0.06846, over 1613845.89 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 4.0 +2023-02-06 18:39:13,435 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=126812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:30,333 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=126834.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:38,318 INFO [train.py:901] (3/4) Epoch 16, batch 5600, loss[loss=0.2655, simple_loss=0.3419, pruned_loss=0.09452, over 8030.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2968, pruned_loss=0.06845, over 1616485.66 frames. ], batch size: 22, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:39:45,819 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126857.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:39:54,359 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.374e+02 2.959e+02 4.088e+02 8.002e+02, threshold=5.917e+02, percent-clipped=4.0 +2023-02-06 18:40:12,816 INFO [train.py:901] (3/4) Epoch 16, batch 5650, loss[loss=0.2586, simple_loss=0.3162, pruned_loss=0.1004, over 7214.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.297, pruned_loss=0.0688, over 1613461.87 frames. ], batch size: 71, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:40:27,756 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 18:40:30,279 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7921, 1.6437, 5.9298, 2.0847, 5.3118, 4.9304, 5.4746, 5.3434], + device='cuda:3'), covar=tensor([0.0457, 0.4577, 0.0406, 0.3647, 0.1000, 0.0858, 0.0494, 0.0507], + device='cuda:3'), in_proj_covar=tensor([0.0554, 0.0610, 0.0631, 0.0581, 0.0658, 0.0562, 0.0553, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0001, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:40:33,398 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 18:40:33,516 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=126927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:40:48,559 INFO [train.py:901] (3/4) Epoch 16, batch 5700, loss[loss=0.2239, simple_loss=0.3024, pruned_loss=0.07269, over 8494.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2968, pruned_loss=0.06896, over 1616982.99 frames. ], batch size: 26, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:40:52,882 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6508, 2.1287, 1.6337, 2.7517, 1.3087, 1.4477, 1.9937, 2.1387], + device='cuda:3'), covar=tensor([0.1049, 0.0902, 0.1231, 0.0451, 0.1191, 0.1717, 0.0930, 0.0975], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0202, 0.0250, 0.0213, 0.0210, 0.0249, 0.0254, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 18:40:56,307 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6988, 1.9510, 2.1548, 1.3301, 2.2808, 1.4868, 0.6064, 1.8380], + device='cuda:3'), covar=tensor([0.0495, 0.0303, 0.0251, 0.0479, 0.0278, 0.0696, 0.0746, 0.0258], + device='cuda:3'), in_proj_covar=tensor([0.0431, 0.0369, 0.0318, 0.0423, 0.0356, 0.0518, 0.0376, 0.0396], + device='cuda:3'), out_proj_covar=tensor([1.1826e-04, 9.8715e-05, 8.4573e-05, 1.1356e-04, 9.5934e-05, 1.5007e-04, + 1.0287e-04, 1.0721e-04], device='cuda:3') +2023-02-06 18:41:02,489 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.66 vs. limit=2.0 +2023-02-06 18:41:04,181 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.583e+02 3.205e+02 4.543e+02 7.570e+02, threshold=6.410e+02, percent-clipped=11.0 +2023-02-06 18:41:20,982 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9536, 1.5031, 1.1205, 1.3455, 1.2174, 1.0158, 1.1760, 1.1098], + device='cuda:3'), covar=tensor([0.1121, 0.0497, 0.1356, 0.0618, 0.0832, 0.1634, 0.0992, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0235, 0.0328, 0.0305, 0.0302, 0.0337, 0.0347, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:41:22,819 INFO [train.py:901] (3/4) Epoch 16, batch 5750, loss[loss=0.2458, simple_loss=0.3185, pruned_loss=0.08652, over 8109.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2967, pruned_loss=0.06878, over 1613988.59 frames. ], batch size: 21, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:41:39,590 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 18:41:56,555 INFO [train.py:901] (3/4) Epoch 16, batch 5800, loss[loss=0.1785, simple_loss=0.2522, pruned_loss=0.05242, over 7788.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2961, pruned_loss=0.06851, over 1618324.94 frames. ], batch size: 19, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:14,367 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.425e+02 2.951e+02 3.537e+02 6.549e+02, threshold=5.902e+02, percent-clipped=1.0 +2023-02-06 18:42:24,910 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 18:42:33,212 INFO [train.py:901] (3/4) Epoch 16, batch 5850, loss[loss=0.15, simple_loss=0.2246, pruned_loss=0.03772, over 6799.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2948, pruned_loss=0.06763, over 1615190.98 frames. ], batch size: 15, lr: 4.73e-03, grad_scale: 8.0 +2023-02-06 18:42:45,194 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127113.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:02,053 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127138.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:07,130 INFO [train.py:901] (3/4) Epoch 16, batch 5900, loss[loss=0.2196, simple_loss=0.2943, pruned_loss=0.07244, over 7432.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2954, pruned_loss=0.06833, over 1616155.73 frames. ], batch size: 17, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:09,359 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5794, 2.3695, 3.4861, 2.2396, 2.9312, 3.8930, 3.7876, 3.5073], + device='cuda:3'), covar=tensor([0.0786, 0.1227, 0.0567, 0.1555, 0.1327, 0.0209, 0.0543, 0.0453], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0312, 0.0275, 0.0301, 0.0293, 0.0250, 0.0384, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 18:43:23,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.337e+02 2.920e+02 3.581e+02 1.365e+03, threshold=5.840e+02, percent-clipped=5.0 +2023-02-06 18:43:27,094 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 18:43:30,612 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:34,126 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127183.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:43:42,722 INFO [train.py:901] (3/4) Epoch 16, batch 5950, loss[loss=0.1887, simple_loss=0.2821, pruned_loss=0.04764, over 8300.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2954, pruned_loss=0.06765, over 1618478.47 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:43:51,307 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127208.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:17,704 INFO [train.py:901] (3/4) Epoch 16, batch 6000, loss[loss=0.2102, simple_loss=0.2823, pruned_loss=0.06901, over 7541.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2945, pruned_loss=0.06736, over 1614254.81 frames. ], batch size: 18, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:44:17,704 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 18:44:29,966 INFO [train.py:935] (3/4) Epoch 16, validation: loss=0.1793, simple_loss=0.2799, pruned_loss=0.03935, over 944034.00 frames. +2023-02-06 18:44:29,967 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 18:44:44,467 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:44:45,671 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.282e+02 2.976e+02 3.659e+02 8.304e+02, threshold=5.951e+02, percent-clipped=2.0 +2023-02-06 18:44:55,051 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8930, 1.5371, 3.4926, 1.3841, 2.2860, 3.8422, 3.8683, 3.3013], + device='cuda:3'), covar=tensor([0.1161, 0.1767, 0.0303, 0.2167, 0.1061, 0.0215, 0.0526, 0.0552], + device='cuda:3'), in_proj_covar=tensor([0.0280, 0.0311, 0.0274, 0.0301, 0.0293, 0.0250, 0.0383, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 18:45:01,839 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127293.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:45:03,665 INFO [train.py:901] (3/4) Epoch 16, batch 6050, loss[loss=0.2272, simple_loss=0.3111, pruned_loss=0.07164, over 8193.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06802, over 1617537.25 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:08,719 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 18:45:27,055 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 18:45:39,308 INFO [train.py:901] (3/4) Epoch 16, batch 6100, loss[loss=0.2516, simple_loss=0.3326, pruned_loss=0.08529, over 7268.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2955, pruned_loss=0.06855, over 1614942.55 frames. ], batch size: 72, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:45:46,283 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.75 vs. limit=2.0 +2023-02-06 18:45:55,482 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.555e+02 2.947e+02 3.627e+02 8.036e+02, threshold=5.895e+02, percent-clipped=1.0 +2023-02-06 18:46:09,096 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 18:46:13,727 INFO [train.py:901] (3/4) Epoch 16, batch 6150, loss[loss=0.216, simple_loss=0.3053, pruned_loss=0.06338, over 8248.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2952, pruned_loss=0.06883, over 1612805.58 frames. ], batch size: 24, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:36,722 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6506, 2.0894, 3.3349, 1.4214, 2.4790, 2.0655, 1.7737, 2.4372], + device='cuda:3'), covar=tensor([0.1816, 0.2457, 0.0764, 0.4287, 0.1704, 0.2862, 0.1970, 0.2107], + device='cuda:3'), in_proj_covar=tensor([0.0504, 0.0557, 0.0543, 0.0612, 0.0625, 0.0567, 0.0500, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:46:48,825 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127445.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:46:49,327 INFO [train.py:901] (3/4) Epoch 16, batch 6200, loss[loss=0.2228, simple_loss=0.2974, pruned_loss=0.07408, over 8017.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2932, pruned_loss=0.06788, over 1611549.76 frames. ], batch size: 22, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:46:59,667 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-06 18:47:04,720 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.656e+02 3.320e+02 4.256e+02 8.643e+02, threshold=6.639e+02, percent-clipped=4.0 +2023-02-06 18:47:23,437 INFO [train.py:901] (3/4) Epoch 16, batch 6250, loss[loss=0.2612, simple_loss=0.3364, pruned_loss=0.09299, over 7108.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2927, pruned_loss=0.06762, over 1610018.94 frames. ], batch size: 71, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:33,309 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 18:47:44,502 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9612, 2.3740, 3.5055, 1.9408, 1.6964, 3.3835, 0.6386, 2.1179], + device='cuda:3'), covar=tensor([0.1867, 0.1610, 0.0308, 0.2332, 0.3285, 0.0479, 0.3070, 0.1788], + device='cuda:3'), in_proj_covar=tensor([0.0176, 0.0186, 0.0116, 0.0215, 0.0261, 0.0121, 0.0167, 0.0181], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 18:47:57,839 INFO [train.py:901] (3/4) Epoch 16, batch 6300, loss[loss=0.2011, simple_loss=0.2894, pruned_loss=0.05636, over 8352.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.293, pruned_loss=0.06767, over 1612899.82 frames. ], batch size: 24, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:47:58,588 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:00,675 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:14,536 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.653e+02 3.258e+02 3.936e+02 6.732e+02, threshold=6.516e+02, percent-clipped=2.0 +2023-02-06 18:48:17,993 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=127574.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:48:32,719 INFO [train.py:901] (3/4) Epoch 16, batch 6350, loss[loss=0.2109, simple_loss=0.2846, pruned_loss=0.06857, over 8192.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2934, pruned_loss=0.06795, over 1607389.45 frames. ], batch size: 23, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:48:43,692 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127611.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:07,014 INFO [train.py:901] (3/4) Epoch 16, batch 6400, loss[loss=0.1893, simple_loss=0.2686, pruned_loss=0.05498, over 7419.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2955, pruned_loss=0.06913, over 1605880.67 frames. ], batch size: 17, lr: 4.72e-03, grad_scale: 8.0 +2023-02-06 18:49:13,036 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-06 18:49:24,191 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.402e+02 3.034e+02 3.710e+02 8.847e+02, threshold=6.069e+02, percent-clipped=1.0 +2023-02-06 18:49:32,559 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:49:43,220 INFO [train.py:901] (3/4) Epoch 16, batch 6450, loss[loss=0.2165, simple_loss=0.2853, pruned_loss=0.07386, over 5574.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.295, pruned_loss=0.06861, over 1607323.20 frames. ], batch size: 12, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:04,152 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127726.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:17,013 INFO [train.py:901] (3/4) Epoch 16, batch 6500, loss[loss=0.2687, simple_loss=0.3315, pruned_loss=0.1029, over 7978.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2965, pruned_loss=0.06976, over 1609424.75 frames. ], batch size: 21, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:32,625 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.427e+02 3.150e+02 4.006e+02 1.604e+03, threshold=6.301e+02, percent-clipped=4.0 +2023-02-06 18:50:42,621 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-06 18:50:48,410 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127789.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:50:52,950 INFO [train.py:901] (3/4) Epoch 16, batch 6550, loss[loss=0.2285, simple_loss=0.3189, pruned_loss=0.06909, over 8499.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2977, pruned_loss=0.07006, over 1613551.97 frames. ], batch size: 26, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:50:53,787 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:50:56,242 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 18:51:17,239 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 18:51:27,459 INFO [train.py:901] (3/4) Epoch 16, batch 6600, loss[loss=0.173, simple_loss=0.2407, pruned_loss=0.05264, over 7541.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2969, pruned_loss=0.07018, over 1611481.19 frames. ], batch size: 18, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:51:36,809 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 18:51:37,277 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 18:51:42,294 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:42,776 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.405e+02 2.899e+02 3.574e+02 1.034e+03, threshold=5.799e+02, percent-clipped=3.0 +2023-02-06 18:51:57,085 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=127890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:51:57,613 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=127891.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:52:00,755 INFO [train.py:901] (3/4) Epoch 16, batch 6650, loss[loss=0.2394, simple_loss=0.3247, pruned_loss=0.07702, over 8611.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2971, pruned_loss=0.06992, over 1605710.77 frames. ], batch size: 39, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:07,586 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=127904.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 18:52:15,487 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1910, 3.1438, 2.9218, 1.7834, 2.8318, 2.8748, 2.8879, 2.6818], + device='cuda:3'), covar=tensor([0.1286, 0.0891, 0.1325, 0.4359, 0.1210, 0.1282, 0.1702, 0.1246], + device='cuda:3'), in_proj_covar=tensor([0.0498, 0.0411, 0.0415, 0.0515, 0.0406, 0.0415, 0.0406, 0.0357], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:52:31,598 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5194, 1.3899, 4.7208, 1.8525, 4.1656, 3.8890, 4.2381, 4.1157], + device='cuda:3'), covar=tensor([0.0573, 0.5243, 0.0449, 0.3940, 0.1076, 0.0919, 0.0568, 0.0693], + device='cuda:3'), in_proj_covar=tensor([0.0566, 0.0622, 0.0644, 0.0591, 0.0672, 0.0572, 0.0567, 0.0631], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:52:36,161 INFO [train.py:901] (3/4) Epoch 16, batch 6700, loss[loss=0.2196, simple_loss=0.3032, pruned_loss=0.068, over 8253.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2973, pruned_loss=0.06979, over 1610514.43 frames. ], batch size: 24, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:52:52,429 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.543e+02 2.898e+02 3.564e+02 8.195e+02, threshold=5.796e+02, percent-clipped=3.0 +2023-02-06 18:53:01,213 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=127982.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:10,442 INFO [train.py:901] (3/4) Epoch 16, batch 6750, loss[loss=0.2708, simple_loss=0.3292, pruned_loss=0.1063, over 7075.00 frames. ], tot_loss[loss=0.2185, simple_loss=0.2975, pruned_loss=0.06975, over 1614410.97 frames. ], batch size: 73, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:18,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:19,184 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:32,893 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128024.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:53:47,637 INFO [train.py:901] (3/4) Epoch 16, batch 6800, loss[loss=0.2226, simple_loss=0.3091, pruned_loss=0.06805, over 8245.00 frames. ], tot_loss[loss=0.2192, simple_loss=0.2982, pruned_loss=0.07011, over 1615388.51 frames. ], batch size: 24, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:53:48,774 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 18:53:51,065 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 18:54:04,017 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.604e+02 3.143e+02 4.008e+02 8.483e+02, threshold=6.287e+02, percent-clipped=3.0 +2023-02-06 18:54:22,246 INFO [train.py:901] (3/4) Epoch 16, batch 6850, loss[loss=0.2094, simple_loss=0.2762, pruned_loss=0.07126, over 7798.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2962, pruned_loss=0.06897, over 1611743.78 frames. ], batch size: 19, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:54:37,690 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-06 18:54:40,698 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 18:54:45,024 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9277, 2.5807, 2.0425, 2.2804, 2.3344, 1.8908, 2.1624, 2.3787], + device='cuda:3'), covar=tensor([0.0946, 0.0272, 0.0768, 0.0465, 0.0472, 0.1026, 0.0674, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0231, 0.0323, 0.0297, 0.0299, 0.0330, 0.0341, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:54:49,919 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1544, 1.2402, 1.5212, 1.1720, 0.6724, 1.3204, 1.1067, 1.0433], + device='cuda:3'), covar=tensor([0.0555, 0.1264, 0.1696, 0.1441, 0.0598, 0.1535, 0.0712, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0156, 0.0100, 0.0163, 0.0114, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 18:54:53,397 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128139.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:54,710 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128141.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:54:58,077 INFO [train.py:901] (3/4) Epoch 16, batch 6900, loss[loss=0.221, simple_loss=0.3073, pruned_loss=0.06735, over 8320.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2971, pruned_loss=0.0693, over 1615853.23 frames. ], batch size: 25, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:08,138 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128160.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:55:14,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.605e+02 3.172e+02 3.868e+02 9.306e+02, threshold=6.344e+02, percent-clipped=5.0 +2023-02-06 18:55:25,866 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128185.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 18:55:32,799 INFO [train.py:901] (3/4) Epoch 16, batch 6950, loss[loss=0.1889, simple_loss=0.2781, pruned_loss=0.04981, over 7939.00 frames. ], tot_loss[loss=0.2182, simple_loss=0.2976, pruned_loss=0.06943, over 1618139.42 frames. ], batch size: 20, lr: 4.71e-03, grad_scale: 8.0 +2023-02-06 18:55:43,520 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:55:48,028 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 18:55:58,347 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128234.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:07,205 INFO [train.py:901] (3/4) Epoch 16, batch 7000, loss[loss=0.2469, simple_loss=0.321, pruned_loss=0.08643, over 8498.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2963, pruned_loss=0.06865, over 1620656.23 frames. ], batch size: 26, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:56:08,134 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5408, 1.9953, 3.3828, 1.3428, 2.6539, 2.1129, 1.6388, 2.4693], + device='cuda:3'), covar=tensor([0.1869, 0.2579, 0.0767, 0.4279, 0.1577, 0.2747, 0.2088, 0.2265], + device='cuda:3'), in_proj_covar=tensor([0.0502, 0.0559, 0.0539, 0.0609, 0.0621, 0.0565, 0.0499, 0.0615], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:56:15,612 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:19,641 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:24,001 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.690e+02 3.457e+02 5.056e+02 8.270e+02, threshold=6.915e+02, percent-clipped=6.0 +2023-02-06 18:56:27,837 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-06 18:56:36,191 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128287.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:56:42,581 INFO [train.py:901] (3/4) Epoch 16, batch 7050, loss[loss=0.2403, simple_loss=0.3049, pruned_loss=0.08783, over 7922.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2977, pruned_loss=0.06905, over 1623695.14 frames. ], batch size: 20, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:03,876 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:11,324 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128338.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:16,466 INFO [train.py:901] (3/4) Epoch 16, batch 7100, loss[loss=0.1929, simple_loss=0.2679, pruned_loss=0.05897, over 7656.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2975, pruned_loss=0.0689, over 1623471.01 frames. ], batch size: 19, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:57:18,650 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:33,889 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.456e+02 3.083e+02 3.766e+02 8.441e+02, threshold=6.166e+02, percent-clipped=2.0 +2023-02-06 18:57:52,034 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128395.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:57:52,518 INFO [train.py:901] (3/4) Epoch 16, batch 7150, loss[loss=0.2293, simple_loss=0.3088, pruned_loss=0.07485, over 8548.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2958, pruned_loss=0.0679, over 1620439.35 frames. ], batch size: 49, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:09,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128420.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:17,227 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128431.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:58:27,265 INFO [train.py:901] (3/4) Epoch 16, batch 7200, loss[loss=0.1887, simple_loss=0.2703, pruned_loss=0.05351, over 7913.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.296, pruned_loss=0.06752, over 1623578.65 frames. ], batch size: 20, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:58:41,650 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 18:58:42,535 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.378e+02 2.905e+02 3.370e+02 6.119e+02, threshold=5.810e+02, percent-clipped=0.0 +2023-02-06 18:58:45,615 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4782, 1.9043, 3.2190, 1.3267, 2.3601, 1.9322, 1.6306, 2.3371], + device='cuda:3'), covar=tensor([0.1873, 0.2572, 0.0701, 0.4403, 0.1672, 0.2994, 0.2168, 0.2072], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0569, 0.0550, 0.0619, 0.0635, 0.0577, 0.0510, 0.0625], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:58:55,486 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2100, 1.3761, 3.3336, 1.0914, 2.9446, 2.7663, 3.0495, 2.9229], + device='cuda:3'), covar=tensor([0.0798, 0.4016, 0.0872, 0.4089, 0.1421, 0.1167, 0.0739, 0.0944], + device='cuda:3'), in_proj_covar=tensor([0.0563, 0.0622, 0.0644, 0.0591, 0.0672, 0.0575, 0.0566, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 18:58:58,272 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0691, 2.3111, 3.3613, 1.8778, 2.8489, 2.3963, 2.1220, 2.6598], + device='cuda:3'), covar=tensor([0.1382, 0.1978, 0.0593, 0.3258, 0.1252, 0.2191, 0.1631, 0.1789], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0569, 0.0549, 0.0619, 0.0634, 0.0577, 0.0510, 0.0625], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 18:59:02,791 INFO [train.py:901] (3/4) Epoch 16, batch 7250, loss[loss=0.2197, simple_loss=0.3085, pruned_loss=0.06546, over 8359.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2969, pruned_loss=0.06776, over 1625304.70 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:13,779 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128512.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:31,344 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 18:59:37,227 INFO [train.py:901] (3/4) Epoch 16, batch 7300, loss[loss=0.1927, simple_loss=0.2638, pruned_loss=0.06079, over 7436.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2959, pruned_loss=0.06744, over 1621770.26 frames. ], batch size: 17, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 18:59:42,187 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5002, 2.4065, 1.7210, 2.1337, 2.0125, 1.2941, 1.9391, 2.0829], + device='cuda:3'), covar=tensor([0.1092, 0.0338, 0.1054, 0.0527, 0.0610, 0.1481, 0.0834, 0.0686], + device='cuda:3'), in_proj_covar=tensor([0.0344, 0.0230, 0.0322, 0.0298, 0.0298, 0.0327, 0.0339, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 18:59:52,610 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.470e+02 2.980e+02 3.722e+02 1.252e+03, threshold=5.960e+02, percent-clipped=4.0 +2023-02-06 19:00:02,294 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:12,483 INFO [train.py:901] (3/4) Epoch 16, batch 7350, loss[loss=0.2658, simple_loss=0.3407, pruned_loss=0.09544, over 8366.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2952, pruned_loss=0.06713, over 1615328.49 frames. ], batch size: 24, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:19,523 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=128605.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:21,507 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:31,403 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 19:00:36,137 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=128630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:00:46,278 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7360, 1.6777, 2.6644, 1.3433, 2.1404, 2.8703, 2.9503, 2.4999], + device='cuda:3'), covar=tensor([0.1025, 0.1349, 0.0540, 0.2022, 0.1032, 0.0335, 0.0699, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0282, 0.0310, 0.0273, 0.0302, 0.0293, 0.0252, 0.0385, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 19:00:47,434 INFO [train.py:901] (3/4) Epoch 16, batch 7400, loss[loss=0.2316, simple_loss=0.3141, pruned_loss=0.07453, over 8641.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2962, pruned_loss=0.06774, over 1619465.79 frames. ], batch size: 34, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:00:49,525 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 19:01:02,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.610e+02 3.305e+02 3.788e+02 1.058e+03, threshold=6.610e+02, percent-clipped=7.0 +2023-02-06 19:01:11,772 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128682.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:01:21,079 INFO [train.py:901] (3/4) Epoch 16, batch 7450, loss[loss=0.1917, simple_loss=0.2708, pruned_loss=0.05629, over 7652.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2952, pruned_loss=0.06735, over 1618325.39 frames. ], batch size: 19, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:01:30,646 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 19:01:47,212 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.98 vs. limit=2.0 +2023-02-06 19:01:56,807 INFO [train.py:901] (3/4) Epoch 16, batch 7500, loss[loss=0.1798, simple_loss=0.2581, pruned_loss=0.05075, over 7817.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.295, pruned_loss=0.06729, over 1621068.86 frames. ], batch size: 20, lr: 4.70e-03, grad_scale: 8.0 +2023-02-06 19:02:13,138 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.212e+02 2.417e+02 2.923e+02 3.614e+02 6.549e+02, threshold=5.847e+02, percent-clipped=0.0 +2023-02-06 19:02:17,186 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=128775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:31,139 INFO [train.py:901] (3/4) Epoch 16, batch 7550, loss[loss=0.2342, simple_loss=0.3086, pruned_loss=0.0799, over 8453.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2949, pruned_loss=0.06752, over 1619967.48 frames. ], batch size: 27, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:02:32,029 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128797.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:02:33,329 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:07,389 INFO [train.py:901] (3/4) Epoch 16, batch 7600, loss[loss=0.2347, simple_loss=0.3215, pruned_loss=0.07391, over 8323.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2964, pruned_loss=0.06817, over 1621163.88 frames. ], batch size: 25, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:23,260 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.439e+02 3.123e+02 4.017e+02 8.994e+02, threshold=6.245e+02, percent-clipped=5.0 +2023-02-06 19:03:31,022 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7818, 1.4109, 1.5416, 1.2889, 0.9417, 1.3846, 1.6470, 1.3446], + device='cuda:3'), covar=tensor([0.0535, 0.1304, 0.1708, 0.1486, 0.0624, 0.1558, 0.0718, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0190, 0.0156, 0.0100, 0.0161, 0.0113, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:03:32,443 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2942, 1.5018, 1.3394, 1.8266, 0.7514, 1.1730, 1.3584, 1.4917], + device='cuda:3'), covar=tensor([0.0981, 0.0774, 0.1038, 0.0545, 0.1191, 0.1527, 0.0755, 0.0774], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0200, 0.0246, 0.0209, 0.0207, 0.0246, 0.0249, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:03:38,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=128890.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:03:42,464 INFO [train.py:901] (3/4) Epoch 16, batch 7650, loss[loss=0.2033, simple_loss=0.2885, pruned_loss=0.05908, over 7812.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2965, pruned_loss=0.06842, over 1619754.68 frames. ], batch size: 20, lr: 4.69e-03, grad_scale: 16.0 +2023-02-06 19:03:50,518 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=128908.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:04:17,494 INFO [train.py:901] (3/4) Epoch 16, batch 7700, loss[loss=0.2145, simple_loss=0.2896, pruned_loss=0.06965, over 7421.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2976, pruned_loss=0.06922, over 1620931.61 frames. ], batch size: 17, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:04:34,543 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.361e+02 3.016e+02 3.880e+02 7.767e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 19:04:42,146 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 19:04:52,875 INFO [train.py:901] (3/4) Epoch 16, batch 7750, loss[loss=0.1564, simple_loss=0.2441, pruned_loss=0.03436, over 7972.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2963, pruned_loss=0.06782, over 1624021.11 frames. ], batch size: 21, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:10,498 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3455, 1.6917, 1.6238, 0.9709, 1.6462, 1.3152, 0.3040, 1.4997], + device='cuda:3'), covar=tensor([0.0488, 0.0340, 0.0282, 0.0516, 0.0477, 0.0865, 0.0829, 0.0277], + device='cuda:3'), in_proj_covar=tensor([0.0427, 0.0365, 0.0315, 0.0423, 0.0351, 0.0512, 0.0375, 0.0394], + device='cuda:3'), out_proj_covar=tensor([1.1699e-04, 9.7428e-05, 8.3572e-05, 1.1348e-04, 9.4393e-05, 1.4788e-04, + 1.0258e-04, 1.0646e-04], device='cuda:3') +2023-02-06 19:05:13,130 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2179, 1.0926, 1.3023, 1.0648, 0.9966, 1.3317, 0.0475, 0.9198], + device='cuda:3'), covar=tensor([0.2046, 0.1659, 0.0578, 0.1168, 0.3210, 0.0606, 0.2592, 0.1529], + device='cuda:3'), in_proj_covar=tensor([0.0177, 0.0185, 0.0115, 0.0216, 0.0263, 0.0120, 0.0165, 0.0179], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 19:05:26,231 INFO [train.py:901] (3/4) Epoch 16, batch 7800, loss[loss=0.2281, simple_loss=0.3133, pruned_loss=0.07143, over 8302.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2953, pruned_loss=0.06783, over 1620353.39 frames. ], batch size: 23, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:05:31,078 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129053.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:05:34,341 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5014, 1.8932, 2.0097, 1.1100, 2.0695, 1.2916, 0.5681, 1.5855], + device='cuda:3'), covar=tensor([0.0547, 0.0325, 0.0238, 0.0555, 0.0371, 0.0929, 0.0813, 0.0328], + device='cuda:3'), in_proj_covar=tensor([0.0428, 0.0366, 0.0315, 0.0422, 0.0351, 0.0512, 0.0375, 0.0394], + device='cuda:3'), out_proj_covar=tensor([1.1713e-04, 9.7452e-05, 8.3565e-05, 1.1332e-04, 9.4473e-05, 1.4777e-04, + 1.0254e-04, 1.0635e-04], device='cuda:3') +2023-02-06 19:05:41,933 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.423e+02 2.949e+02 3.975e+02 9.373e+02, threshold=5.898e+02, percent-clipped=5.0 +2023-02-06 19:05:48,217 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:00,661 INFO [train.py:901] (3/4) Epoch 16, batch 7850, loss[loss=0.1861, simple_loss=0.2688, pruned_loss=0.0517, over 8242.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2951, pruned_loss=0.06743, over 1621604.04 frames. ], batch size: 22, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:32,322 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129143.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:34,273 INFO [train.py:901] (3/4) Epoch 16, batch 7900, loss[loss=0.2038, simple_loss=0.2772, pruned_loss=0.0652, over 7550.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2961, pruned_loss=0.06831, over 1616396.93 frames. ], batch size: 18, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:06:34,508 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129146.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:06:51,032 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.289e+02 2.786e+02 3.620e+02 6.776e+02, threshold=5.572e+02, percent-clipped=2.0 +2023-02-06 19:06:51,881 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:03,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3900, 2.9278, 2.2936, 4.0516, 1.6338, 1.9502, 2.4587, 3.0239], + device='cuda:3'), covar=tensor([0.0774, 0.0840, 0.0908, 0.0260, 0.1179, 0.1433, 0.1120, 0.0840], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0202, 0.0249, 0.0211, 0.0208, 0.0248, 0.0252, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:07:08,442 INFO [train.py:901] (3/4) Epoch 16, batch 7950, loss[loss=0.2726, simple_loss=0.3377, pruned_loss=0.1038, over 7073.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.297, pruned_loss=0.06862, over 1614542.76 frames. ], batch size: 71, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:12,208 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5870, 1.3814, 1.5855, 1.2860, 0.8637, 1.4103, 1.4846, 1.3593], + device='cuda:3'), covar=tensor([0.0556, 0.1268, 0.1698, 0.1457, 0.0599, 0.1537, 0.0714, 0.0626], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0100, 0.0161, 0.0113, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:07:39,192 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7744, 1.9292, 1.7480, 2.3355, 1.0670, 1.4949, 1.6015, 1.9377], + device='cuda:3'), covar=tensor([0.0732, 0.0723, 0.0863, 0.0421, 0.1053, 0.1363, 0.0823, 0.0788], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0202, 0.0248, 0.0211, 0.0208, 0.0246, 0.0252, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:07:42,911 INFO [train.py:901] (3/4) Epoch 16, batch 8000, loss[loss=0.2155, simple_loss=0.2896, pruned_loss=0.0707, over 7975.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2969, pruned_loss=0.06858, over 1618291.30 frames. ], batch size: 21, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:07:43,887 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4406, 1.3765, 1.7471, 1.2363, 1.2120, 1.7265, 0.3380, 1.1712], + device='cuda:3'), covar=tensor([0.1915, 0.1673, 0.0515, 0.1323, 0.3266, 0.0583, 0.2657, 0.1734], + device='cuda:3'), in_proj_covar=tensor([0.0177, 0.0185, 0.0115, 0.0217, 0.0265, 0.0121, 0.0166, 0.0181], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 19:07:47,179 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129252.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:07:51,191 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129258.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:07:59,068 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.511e+02 2.964e+02 3.601e+02 8.820e+02, threshold=5.927e+02, percent-clipped=6.0 +2023-02-06 19:08:16,579 INFO [train.py:901] (3/4) Epoch 16, batch 8050, loss[loss=0.2056, simple_loss=0.2875, pruned_loss=0.06187, over 7932.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.297, pruned_loss=0.06976, over 1600879.01 frames. ], batch size: 20, lr: 4.69e-03, grad_scale: 8.0 +2023-02-06 19:08:52,564 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 19:08:56,566 INFO [train.py:901] (3/4) Epoch 17, batch 0, loss[loss=0.211, simple_loss=0.2939, pruned_loss=0.06409, over 8501.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2939, pruned_loss=0.06409, over 8501.00 frames. ], batch size: 26, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:08:56,566 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 19:09:04,437 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5294, 1.8075, 2.6434, 1.3617, 1.9923, 1.7787, 1.6473, 1.9159], + device='cuda:3'), covar=tensor([0.1749, 0.2552, 0.0806, 0.4270, 0.1841, 0.3175, 0.2158, 0.2310], + device='cuda:3'), in_proj_covar=tensor([0.0504, 0.0561, 0.0543, 0.0612, 0.0631, 0.0570, 0.0501, 0.0620], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:09:07,559 INFO [train.py:935] (3/4) Epoch 17, validation: loss=0.1792, simple_loss=0.2794, pruned_loss=0.03944, over 944034.00 frames. +2023-02-06 19:09:07,559 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 19:09:19,459 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-06 19:09:21,130 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 19:09:29,749 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3253, 2.0446, 2.8571, 2.2827, 2.6763, 2.2141, 2.0088, 1.4447], + device='cuda:3'), covar=tensor([0.4332, 0.4471, 0.1583, 0.3347, 0.2167, 0.2697, 0.1698, 0.4935], + device='cuda:3'), in_proj_covar=tensor([0.0906, 0.0918, 0.0758, 0.0892, 0.0954, 0.0845, 0.0719, 0.0792], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:09:33,860 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129367.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:09:35,633 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.551e+02 3.127e+02 3.678e+02 8.568e+02, threshold=6.254e+02, percent-clipped=4.0 +2023-02-06 19:09:41,817 INFO [train.py:901] (3/4) Epoch 17, batch 50, loss[loss=0.2647, simple_loss=0.3376, pruned_loss=0.09591, over 8539.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.3011, pruned_loss=0.07295, over 366709.10 frames. ], batch size: 31, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:09:54,005 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 19:10:18,436 INFO [train.py:901] (3/4) Epoch 17, batch 100, loss[loss=0.1771, simple_loss=0.2569, pruned_loss=0.04864, over 7538.00 frames. ], tot_loss[loss=0.2214, simple_loss=0.3, pruned_loss=0.0714, over 648442.29 frames. ], batch size: 18, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:10:18,444 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 19:10:19,955 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129431.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:10:32,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8311, 3.5636, 2.3212, 2.7993, 2.6851, 1.8648, 2.7401, 2.9734], + device='cuda:3'), covar=tensor([0.1829, 0.0373, 0.1272, 0.0782, 0.0834, 0.1751, 0.1169, 0.1340], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0235, 0.0328, 0.0301, 0.0299, 0.0335, 0.0342, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:10:44,166 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5781, 1.8779, 1.9934, 1.1489, 2.0953, 1.4733, 0.5638, 1.7397], + device='cuda:3'), covar=tensor([0.0510, 0.0300, 0.0222, 0.0537, 0.0378, 0.0804, 0.0753, 0.0297], + device='cuda:3'), in_proj_covar=tensor([0.0425, 0.0365, 0.0315, 0.0422, 0.0348, 0.0513, 0.0374, 0.0390], + device='cuda:3'), out_proj_covar=tensor([1.1630e-04, 9.7486e-05, 8.3561e-05, 1.1323e-04, 9.3624e-05, 1.4825e-04, + 1.0240e-04, 1.0518e-04], device='cuda:3') +2023-02-06 19:10:46,023 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.389e+02 2.489e+02 3.062e+02 3.657e+02 7.822e+02, threshold=6.124e+02, percent-clipped=4.0 +2023-02-06 19:10:52,173 INFO [train.py:901] (3/4) Epoch 17, batch 150, loss[loss=0.2027, simple_loss=0.2833, pruned_loss=0.06104, over 8239.00 frames. ], tot_loss[loss=0.2187, simple_loss=0.2977, pruned_loss=0.06981, over 863154.27 frames. ], batch size: 22, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:18,275 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129514.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:29,036 INFO [train.py:901] (3/4) Epoch 17, batch 200, loss[loss=0.2129, simple_loss=0.2899, pruned_loss=0.06795, over 8246.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2969, pruned_loss=0.06898, over 1034535.66 frames. ], batch size: 22, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:11:36,256 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:11:57,079 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.455e+02 2.902e+02 3.926e+02 7.649e+02, threshold=5.804e+02, percent-clipped=5.0 +2023-02-06 19:12:03,435 INFO [train.py:901] (3/4) Epoch 17, batch 250, loss[loss=0.2356, simple_loss=0.2987, pruned_loss=0.08622, over 7543.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2968, pruned_loss=0.06866, over 1160498.26 frames. ], batch size: 18, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:09,641 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 19:12:11,839 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0009, 1.6155, 1.3565, 1.5270, 1.3683, 1.2118, 1.1997, 1.3220], + device='cuda:3'), covar=tensor([0.1156, 0.0464, 0.1296, 0.0514, 0.0694, 0.1540, 0.0924, 0.0822], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0236, 0.0330, 0.0303, 0.0301, 0.0337, 0.0344, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:12:18,388 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 19:12:33,734 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=129623.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:12:38,235 INFO [train.py:901] (3/4) Epoch 17, batch 300, loss[loss=0.2222, simple_loss=0.3039, pruned_loss=0.07026, over 8620.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2974, pruned_loss=0.06916, over 1262215.49 frames. ], batch size: 39, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:12:39,095 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129630.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:12:53,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=129648.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:13:08,147 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.453e+02 3.064e+02 3.747e+02 1.027e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-06 19:13:14,342 INFO [train.py:901] (3/4) Epoch 17, batch 350, loss[loss=0.2197, simple_loss=0.302, pruned_loss=0.06864, over 7821.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2972, pruned_loss=0.06902, over 1341007.42 frames. ], batch size: 20, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:13:47,828 INFO [train.py:901] (3/4) Epoch 17, batch 400, loss[loss=0.2034, simple_loss=0.2803, pruned_loss=0.06321, over 7922.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2979, pruned_loss=0.06949, over 1403327.53 frames. ], batch size: 20, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:08,552 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9923, 1.0053, 1.1689, 0.9452, 0.6421, 1.0410, 1.0131, 0.9155], + device='cuda:3'), covar=tensor([0.0516, 0.0907, 0.1261, 0.1075, 0.0504, 0.1090, 0.0587, 0.0499], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0155, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:14:17,992 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.355e+02 2.898e+02 3.830e+02 8.224e+02, threshold=5.797e+02, percent-clipped=7.0 +2023-02-06 19:14:21,452 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129775.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:14:24,068 INFO [train.py:901] (3/4) Epoch 17, batch 450, loss[loss=0.2421, simple_loss=0.3231, pruned_loss=0.08053, over 8496.00 frames. ], tot_loss[loss=0.2188, simple_loss=0.2985, pruned_loss=0.06953, over 1456989.13 frames. ], batch size: 26, lr: 4.54e-03, grad_scale: 8.0 +2023-02-06 19:14:26,520 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 19:14:39,892 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-06 19:14:44,133 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 19:14:44,535 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=129809.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:14:58,031 INFO [train.py:901] (3/4) Epoch 17, batch 500, loss[loss=0.2247, simple_loss=0.3057, pruned_loss=0.07184, over 8027.00 frames. ], tot_loss[loss=0.2193, simple_loss=0.2991, pruned_loss=0.06974, over 1493917.96 frames. ], batch size: 22, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:28,005 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.377e+02 2.910e+02 3.862e+02 1.132e+03, threshold=5.820e+02, percent-clipped=8.0 +2023-02-06 19:15:35,670 INFO [train.py:901] (3/4) Epoch 17, batch 550, loss[loss=0.2625, simple_loss=0.3197, pruned_loss=0.1027, over 5136.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2991, pruned_loss=0.06989, over 1522423.12 frames. ], batch size: 11, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:15:43,353 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=129890.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:16:10,114 INFO [train.py:901] (3/4) Epoch 17, batch 600, loss[loss=0.2331, simple_loss=0.3133, pruned_loss=0.0764, over 8503.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2981, pruned_loss=0.06928, over 1542727.45 frames. ], batch size: 26, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:16:19,728 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 19:16:24,107 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9763, 1.5262, 1.6402, 1.4143, 0.9762, 1.4784, 1.8029, 1.6941], + device='cuda:3'), covar=tensor([0.0539, 0.1200, 0.1637, 0.1390, 0.0611, 0.1483, 0.0668, 0.0581], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0190, 0.0156, 0.0100, 0.0162, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:16:38,509 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.845e+02 2.576e+02 2.936e+02 3.639e+02 7.352e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-06 19:16:41,362 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=129974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:16:44,756 INFO [train.py:901] (3/4) Epoch 17, batch 650, loss[loss=0.2503, simple_loss=0.3264, pruned_loss=0.08707, over 8389.00 frames. ], tot_loss[loss=0.2194, simple_loss=0.2994, pruned_loss=0.06972, over 1564566.95 frames. ], batch size: 49, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:09,828 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8374, 1.8721, 2.4074, 1.6410, 1.3895, 2.4131, 0.4497, 1.4563], + device='cuda:3'), covar=tensor([0.2032, 0.1371, 0.0340, 0.1512, 0.2952, 0.0380, 0.2431, 0.1576], + device='cuda:3'), in_proj_covar=tensor([0.0178, 0.0185, 0.0115, 0.0217, 0.0265, 0.0121, 0.0165, 0.0179], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 19:17:11,757 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2704, 1.5050, 4.4315, 2.0564, 2.5877, 5.0329, 5.0175, 4.3335], + device='cuda:3'), covar=tensor([0.1173, 0.1802, 0.0326, 0.1910, 0.1070, 0.0201, 0.0491, 0.0572], + device='cuda:3'), in_proj_covar=tensor([0.0279, 0.0306, 0.0270, 0.0298, 0.0291, 0.0251, 0.0384, 0.0296], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 19:17:16,416 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130018.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:16,512 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5805, 1.8531, 2.7591, 1.4458, 2.1225, 1.9073, 1.6593, 2.0071], + device='cuda:3'), covar=tensor([0.1861, 0.2314, 0.0814, 0.4103, 0.1571, 0.2898, 0.2059, 0.2012], + device='cuda:3'), in_proj_covar=tensor([0.0505, 0.0560, 0.0542, 0.0610, 0.0628, 0.0568, 0.0502, 0.0617], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:17:17,845 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:17:23,793 INFO [train.py:901] (3/4) Epoch 17, batch 700, loss[loss=0.2163, simple_loss=0.2981, pruned_loss=0.06726, over 8455.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2978, pruned_loss=0.0691, over 1577827.23 frames. ], batch size: 27, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:17:51,871 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.350e+02 2.811e+02 3.683e+02 1.098e+03, threshold=5.622e+02, percent-clipped=6.0 +2023-02-06 19:17:58,279 INFO [train.py:901] (3/4) Epoch 17, batch 750, loss[loss=0.2099, simple_loss=0.2795, pruned_loss=0.07018, over 8078.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2964, pruned_loss=0.06818, over 1588430.18 frames. ], batch size: 21, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:04,874 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3173, 1.6599, 1.7081, 1.0659, 1.7431, 1.2676, 0.2634, 1.5247], + device='cuda:3'), covar=tensor([0.0382, 0.0290, 0.0223, 0.0377, 0.0324, 0.0724, 0.0642, 0.0235], + device='cuda:3'), in_proj_covar=tensor([0.0421, 0.0362, 0.0309, 0.0418, 0.0346, 0.0507, 0.0370, 0.0388], + device='cuda:3'), out_proj_covar=tensor([1.1516e-04, 9.6632e-05, 8.1938e-05, 1.1221e-04, 9.3132e-05, 1.4651e-04, + 1.0125e-04, 1.0452e-04], device='cuda:3') +2023-02-06 19:18:05,572 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130089.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:18:08,253 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 19:18:19,439 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 19:18:36,029 INFO [train.py:901] (3/4) Epoch 17, batch 800, loss[loss=0.1967, simple_loss=0.263, pruned_loss=0.06523, over 7430.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2972, pruned_loss=0.06891, over 1594890.80 frames. ], batch size: 17, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:18:46,672 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7547, 1.7647, 2.3822, 1.6487, 1.3446, 2.3157, 0.4775, 1.4308], + device='cuda:3'), covar=tensor([0.1818, 0.1368, 0.0352, 0.1422, 0.3154, 0.0458, 0.2478, 0.1565], + device='cuda:3'), in_proj_covar=tensor([0.0178, 0.0186, 0.0116, 0.0218, 0.0265, 0.0122, 0.0167, 0.0181], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 19:18:48,084 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130146.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 19:18:52,760 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130153.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:19:04,223 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.363e+02 2.676e+02 3.408e+02 8.560e+02, threshold=5.353e+02, percent-clipped=3.0 +2023-02-06 19:19:05,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130171.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:19:10,476 INFO [train.py:901] (3/4) Epoch 17, batch 850, loss[loss=0.1843, simple_loss=0.255, pruned_loss=0.05678, over 7542.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2974, pruned_loss=0.06885, over 1602095.72 frames. ], batch size: 18, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:19:47,566 INFO [train.py:901] (3/4) Epoch 17, batch 900, loss[loss=0.2467, simple_loss=0.3159, pruned_loss=0.08872, over 8104.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2955, pruned_loss=0.06801, over 1605344.34 frames. ], batch size: 23, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:15,376 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130268.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:20:16,501 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.765e+02 2.489e+02 3.023e+02 3.878e+02 8.176e+02, threshold=6.045e+02, percent-clipped=7.0 +2023-02-06 19:20:22,805 INFO [train.py:901] (3/4) Epoch 17, batch 950, loss[loss=0.2315, simple_loss=0.3107, pruned_loss=0.07608, over 8500.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.295, pruned_loss=0.06795, over 1609312.87 frames. ], batch size: 26, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:20:29,213 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3286, 1.5530, 2.1803, 1.1824, 1.4831, 1.5535, 1.3553, 1.5950], + device='cuda:3'), covar=tensor([0.2021, 0.2514, 0.0950, 0.4402, 0.2028, 0.3327, 0.2346, 0.2151], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0564, 0.0545, 0.0612, 0.0632, 0.0570, 0.0504, 0.0619], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:20:43,395 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 19:20:57,189 INFO [train.py:901] (3/4) Epoch 17, batch 1000, loss[loss=0.2128, simple_loss=0.2863, pruned_loss=0.06963, over 7527.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2959, pruned_loss=0.06858, over 1608223.91 frames. ], batch size: 18, lr: 4.53e-03, grad_scale: 8.0 +2023-02-06 19:21:04,876 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0060, 2.2516, 1.9841, 3.0233, 1.4144, 1.7702, 2.0804, 2.4024], + device='cuda:3'), covar=tensor([0.0780, 0.0844, 0.0981, 0.0336, 0.1200, 0.1288, 0.0972, 0.0767], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0202, 0.0252, 0.0213, 0.0210, 0.0250, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:21:09,223 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130345.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:20,028 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 19:21:21,979 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130362.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:23,327 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130364.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:27,498 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.680e+02 3.059e+02 3.924e+02 8.380e+02, threshold=6.118e+02, percent-clipped=2.0 +2023-02-06 19:21:27,752 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130370.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:21:33,152 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 19:21:33,588 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 19:21:33,831 INFO [train.py:901] (3/4) Epoch 17, batch 1050, loss[loss=0.1828, simple_loss=0.2617, pruned_loss=0.05193, over 7434.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2946, pruned_loss=0.06864, over 1607168.56 frames. ], batch size: 17, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:21:49,962 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130402.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:08,452 INFO [train.py:901] (3/4) Epoch 17, batch 1100, loss[loss=0.1871, simple_loss=0.2712, pruned_loss=0.05145, over 8131.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2952, pruned_loss=0.06823, over 1611280.28 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:14,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8550, 1.6691, 3.3807, 1.5279, 2.4737, 3.7235, 3.8197, 3.1989], + device='cuda:3'), covar=tensor([0.1173, 0.1522, 0.0324, 0.1970, 0.0957, 0.0223, 0.0394, 0.0543], + device='cuda:3'), in_proj_covar=tensor([0.0282, 0.0307, 0.0272, 0.0301, 0.0293, 0.0252, 0.0386, 0.0296], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 19:22:23,047 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130450.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:27,202 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:38,665 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.545e+02 2.978e+02 3.676e+02 6.168e+02, threshold=5.956e+02, percent-clipped=1.0 +2023-02-06 19:22:44,133 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130477.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,340 INFO [train.py:901] (3/4) Epoch 17, batch 1150, loss[loss=0.176, simple_loss=0.2712, pruned_loss=0.04037, over 8025.00 frames. ], tot_loss[loss=0.215, simple_loss=0.295, pruned_loss=0.0675, over 1613094.99 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:22:45,511 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:22:45,963 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 19:23:16,172 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130524.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:19,416 INFO [train.py:901] (3/4) Epoch 17, batch 1200, loss[loss=0.2156, simple_loss=0.2913, pruned_loss=0.06992, over 8350.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2958, pruned_loss=0.06776, over 1615119.48 frames. ], batch size: 48, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:33,403 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130549.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:45,146 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130566.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:23:47,779 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.314e+02 2.862e+02 3.617e+02 1.013e+03, threshold=5.724e+02, percent-clipped=2.0 +2023-02-06 19:23:53,883 INFO [train.py:901] (3/4) Epoch 17, batch 1250, loss[loss=0.2665, simple_loss=0.3346, pruned_loss=0.09926, over 7808.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.06789, over 1616631.59 frames. ], batch size: 20, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:23:57,469 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=130583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:24:07,984 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5464, 1.8089, 1.8689, 1.1671, 1.9739, 1.3577, 0.4692, 1.8300], + device='cuda:3'), covar=tensor([0.0435, 0.0272, 0.0208, 0.0425, 0.0289, 0.0778, 0.0714, 0.0237], + device='cuda:3'), in_proj_covar=tensor([0.0427, 0.0365, 0.0312, 0.0424, 0.0350, 0.0516, 0.0377, 0.0394], + device='cuda:3'), out_proj_covar=tensor([1.1675e-04, 9.7352e-05, 8.2703e-05, 1.1359e-04, 9.3933e-05, 1.4911e-04, + 1.0301e-04, 1.0598e-04], device='cuda:3') +2023-02-06 19:24:30,834 INFO [train.py:901] (3/4) Epoch 17, batch 1300, loss[loss=0.2795, simple_loss=0.3565, pruned_loss=0.1013, over 8472.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2955, pruned_loss=0.0679, over 1614970.27 frames. ], batch size: 29, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:24:35,110 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2881, 2.6293, 3.0395, 1.5457, 3.3451, 1.8869, 1.5978, 2.3040], + device='cuda:3'), covar=tensor([0.0688, 0.0361, 0.0213, 0.0637, 0.0304, 0.0719, 0.0755, 0.0470], + device='cuda:3'), in_proj_covar=tensor([0.0426, 0.0365, 0.0313, 0.0424, 0.0348, 0.0516, 0.0376, 0.0393], + device='cuda:3'), out_proj_covar=tensor([1.1640e-04, 9.7338e-05, 8.2955e-05, 1.1361e-04, 9.3577e-05, 1.4901e-04, + 1.0280e-04, 1.0569e-04], device='cuda:3') +2023-02-06 19:24:59,335 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.380e+02 3.126e+02 3.675e+02 7.509e+02, threshold=6.253e+02, percent-clipped=2.0 +2023-02-06 19:25:05,687 INFO [train.py:901] (3/4) Epoch 17, batch 1350, loss[loss=0.1694, simple_loss=0.2495, pruned_loss=0.0446, over 7650.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2958, pruned_loss=0.06746, over 1617629.26 frames. ], batch size: 19, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:43,062 INFO [train.py:901] (3/4) Epoch 17, batch 1400, loss[loss=0.2005, simple_loss=0.2695, pruned_loss=0.06569, over 7786.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.295, pruned_loss=0.06734, over 1616019.58 frames. ], batch size: 19, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:25:46,048 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:47,394 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=130735.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:25:54,827 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130746.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:03,100 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130758.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:04,431 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=130760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:11,021 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.401e+02 2.607e+02 3.260e+02 4.191e+02 1.113e+03, threshold=6.520e+02, percent-clipped=3.0 +2023-02-06 19:26:17,374 INFO [train.py:901] (3/4) Epoch 17, batch 1450, loss[loss=0.2223, simple_loss=0.3063, pruned_loss=0.06918, over 8294.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2961, pruned_loss=0.06794, over 1615198.92 frames. ], batch size: 23, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:26:20,718 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 19:26:27,816 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130794.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:32,216 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130800.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:26:34,999 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7542, 1.7420, 3.9742, 1.4235, 3.4967, 3.3667, 3.6523, 3.4932], + device='cuda:3'), covar=tensor([0.0699, 0.3900, 0.0726, 0.3916, 0.1319, 0.1023, 0.0607, 0.0758], + device='cuda:3'), in_proj_covar=tensor([0.0570, 0.0627, 0.0650, 0.0595, 0.0676, 0.0580, 0.0573, 0.0637], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:26:54,259 INFO [train.py:901] (3/4) Epoch 17, batch 1500, loss[loss=0.2133, simple_loss=0.2919, pruned_loss=0.06735, over 7528.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2949, pruned_loss=0.06754, over 1613390.70 frames. ], batch size: 18, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:17,129 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:22,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.580e+02 2.370e+02 2.974e+02 3.638e+02 1.375e+03, threshold=5.949e+02, percent-clipped=1.0 +2023-02-06 19:27:29,127 INFO [train.py:901] (3/4) Epoch 17, batch 1550, loss[loss=0.2656, simple_loss=0.3229, pruned_loss=0.1042, over 7003.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2957, pruned_loss=0.06843, over 1612847.41 frames. ], batch size: 72, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:27:50,134 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130909.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:50,699 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:27:54,327 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=130915.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:02,601 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=130927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:28:03,805 INFO [train.py:901] (3/4) Epoch 17, batch 1600, loss[loss=0.2325, simple_loss=0.3082, pruned_loss=0.07838, over 8239.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2962, pruned_loss=0.06823, over 1614994.64 frames. ], batch size: 22, lr: 4.52e-03, grad_scale: 8.0 +2023-02-06 19:28:34,757 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.345e+02 2.992e+02 3.546e+02 8.486e+02, threshold=5.983e+02, percent-clipped=5.0 +2023-02-06 19:28:40,942 INFO [train.py:901] (3/4) Epoch 17, batch 1650, loss[loss=0.2485, simple_loss=0.3273, pruned_loss=0.08485, over 8356.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.06818, over 1614551.44 frames. ], batch size: 24, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:28:57,386 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-06 19:29:13,492 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131025.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:16,140 INFO [train.py:901] (3/4) Epoch 17, batch 1700, loss[loss=0.2162, simple_loss=0.2942, pruned_loss=0.06913, over 8083.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2958, pruned_loss=0.06781, over 1617694.35 frames. ], batch size: 21, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:25,392 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:29:46,943 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.451e+02 3.155e+02 3.823e+02 7.811e+02, threshold=6.311e+02, percent-clipped=3.0 +2023-02-06 19:29:53,069 INFO [train.py:901] (3/4) Epoch 17, batch 1750, loss[loss=0.1907, simple_loss=0.2775, pruned_loss=0.05197, over 7541.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2978, pruned_loss=0.06905, over 1619974.20 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:29:58,739 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1166, 1.8030, 1.9344, 1.8847, 1.2422, 1.8050, 2.6368, 2.5759], + device='cuda:3'), covar=tensor([0.0434, 0.1115, 0.1613, 0.1322, 0.0579, 0.1399, 0.0510, 0.0495], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0164, 0.0115, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:30:19,627 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131117.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:27,911 INFO [train.py:901] (3/4) Epoch 17, batch 1800, loss[loss=0.1803, simple_loss=0.2732, pruned_loss=0.04368, over 8467.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2971, pruned_loss=0.06822, over 1617642.03 frames. ], batch size: 25, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:30:37,105 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:52,694 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:30:53,406 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1035, 1.6419, 1.4801, 1.6594, 1.4499, 1.3981, 1.3038, 1.3834], + device='cuda:3'), covar=tensor([0.1047, 0.0416, 0.1123, 0.0448, 0.0662, 0.1275, 0.0832, 0.0713], + device='cuda:3'), in_proj_covar=tensor([0.0347, 0.0230, 0.0324, 0.0298, 0.0296, 0.0326, 0.0338, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:30:55,955 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.745e+02 3.356e+02 4.683e+02 1.105e+03, threshold=6.712e+02, percent-clipped=11.0 +2023-02-06 19:30:56,921 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:03,685 INFO [train.py:901] (3/4) Epoch 17, batch 1850, loss[loss=0.2085, simple_loss=0.2701, pruned_loss=0.07344, over 7693.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2976, pruned_loss=0.06887, over 1620649.70 frames. ], batch size: 18, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:31:06,774 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0423, 2.2888, 1.8884, 2.7621, 1.4530, 1.6627, 1.9738, 2.3050], + device='cuda:3'), covar=tensor([0.0708, 0.0778, 0.0988, 0.0416, 0.1133, 0.1391, 0.0919, 0.0820], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0198, 0.0248, 0.0211, 0.0208, 0.0246, 0.0250, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:31:12,470 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131190.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:13,837 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:16,719 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131196.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:31:39,542 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5173, 2.2885, 3.2820, 2.6473, 3.1491, 2.5276, 2.2312, 1.8713], + device='cuda:3'), covar=tensor([0.5071, 0.5195, 0.1760, 0.3310, 0.2386, 0.2624, 0.1768, 0.5050], + device='cuda:3'), in_proj_covar=tensor([0.0921, 0.0928, 0.0773, 0.0899, 0.0962, 0.0853, 0.0723, 0.0798], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:31:39,955 INFO [train.py:901] (3/4) Epoch 17, batch 1900, loss[loss=0.1919, simple_loss=0.2776, pruned_loss=0.05307, over 8125.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2967, pruned_loss=0.06828, over 1615387.55 frames. ], batch size: 22, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:08,100 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.313e+02 2.955e+02 3.582e+02 5.685e+02, threshold=5.910e+02, percent-clipped=0.0 +2023-02-06 19:32:08,131 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 19:32:14,127 INFO [train.py:901] (3/4) Epoch 17, batch 1950, loss[loss=0.2129, simple_loss=0.3013, pruned_loss=0.0622, over 8499.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2963, pruned_loss=0.06794, over 1616380.84 frames. ], batch size: 26, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:32:15,756 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:19,626 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 19:32:28,929 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:35,156 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:39,941 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 19:32:47,534 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:32:51,199 INFO [train.py:901] (3/4) Epoch 17, batch 2000, loss[loss=0.2366, simple_loss=0.3219, pruned_loss=0.07567, over 8247.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2966, pruned_loss=0.06838, over 1616525.93 frames. ], batch size: 24, lr: 4.51e-03, grad_scale: 16.0 +2023-02-06 19:33:19,858 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.510e+02 3.128e+02 3.622e+02 6.098e+02, threshold=6.257e+02, percent-clipped=1.0 +2023-02-06 19:33:25,352 INFO [train.py:901] (3/4) Epoch 17, batch 2050, loss[loss=0.2181, simple_loss=0.2983, pruned_loss=0.06896, over 8449.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.06785, over 1613476.85 frames. ], batch size: 25, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:00,685 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131427.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:01,952 INFO [train.py:901] (3/4) Epoch 17, batch 2100, loss[loss=0.1735, simple_loss=0.2594, pruned_loss=0.04378, over 7670.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2941, pruned_loss=0.06748, over 1610781.27 frames. ], batch size: 19, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:06,129 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131434.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:34:31,410 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.457e+02 2.884e+02 3.530e+02 8.686e+02, threshold=5.767e+02, percent-clipped=1.0 +2023-02-06 19:34:36,973 INFO [train.py:901] (3/4) Epoch 17, batch 2150, loss[loss=0.2275, simple_loss=0.3225, pruned_loss=0.06627, over 8457.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2952, pruned_loss=0.06754, over 1614854.70 frames. ], batch size: 27, lr: 4.51e-03, grad_scale: 8.0 +2023-02-06 19:34:58,677 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131510.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:12,359 INFO [train.py:901] (3/4) Epoch 17, batch 2200, loss[loss=0.2036, simple_loss=0.2869, pruned_loss=0.06015, over 8106.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2953, pruned_loss=0.06748, over 1617402.99 frames. ], batch size: 23, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:35:18,094 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131536.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:35:36,119 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6287, 2.2649, 1.7649, 4.0719, 1.6589, 1.5764, 2.3884, 2.7373], + device='cuda:3'), covar=tensor([0.1662, 0.1376, 0.2015, 0.0262, 0.1569, 0.2124, 0.1270, 0.0895], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0198, 0.0247, 0.0211, 0.0208, 0.0246, 0.0253, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:35:43,530 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.550e+02 3.248e+02 4.465e+02 1.208e+03, threshold=6.496e+02, percent-clipped=6.0 +2023-02-06 19:35:49,218 INFO [train.py:901] (3/4) Epoch 17, batch 2250, loss[loss=0.208, simple_loss=0.2721, pruned_loss=0.07188, over 7670.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2951, pruned_loss=0.06736, over 1617844.40 frames. ], batch size: 18, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:23,880 INFO [train.py:901] (3/4) Epoch 17, batch 2300, loss[loss=0.2361, simple_loss=0.316, pruned_loss=0.07815, over 8589.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2956, pruned_loss=0.06778, over 1618239.86 frames. ], batch size: 49, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:36:24,090 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3940, 2.7847, 2.5277, 3.9990, 1.8065, 2.2366, 2.5158, 3.1038], + device='cuda:3'), covar=tensor([0.0733, 0.0813, 0.0844, 0.0326, 0.1174, 0.1225, 0.1034, 0.0728], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0199, 0.0249, 0.0212, 0.0209, 0.0247, 0.0254, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 19:36:40,759 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:36:55,869 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.285e+02 2.553e+02 3.001e+02 3.824e+02 6.268e+02, threshold=6.003e+02, percent-clipped=0.0 +2023-02-06 19:37:01,535 INFO [train.py:901] (3/4) Epoch 17, batch 2350, loss[loss=0.1809, simple_loss=0.2639, pruned_loss=0.049, over 8095.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2947, pruned_loss=0.06763, over 1613317.65 frames. ], batch size: 21, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:16,027 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6169, 1.6684, 2.0811, 1.4626, 1.2026, 2.0709, 0.3023, 1.2574], + device='cuda:3'), covar=tensor([0.1972, 0.1230, 0.0374, 0.1265, 0.2920, 0.0425, 0.2449, 0.1440], + device='cuda:3'), in_proj_covar=tensor([0.0179, 0.0183, 0.0114, 0.0216, 0.0260, 0.0121, 0.0166, 0.0179], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 19:37:21,843 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-06 19:37:35,923 INFO [train.py:901] (3/4) Epoch 17, batch 2400, loss[loss=0.1773, simple_loss=0.2753, pruned_loss=0.03964, over 8228.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2957, pruned_loss=0.06825, over 1614344.07 frames. ], batch size: 22, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:37:40,426 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2101, 2.0955, 1.6241, 1.8712, 1.7213, 1.4074, 1.7119, 1.6669], + device='cuda:3'), covar=tensor([0.1184, 0.0421, 0.1185, 0.0523, 0.0667, 0.1355, 0.0831, 0.0815], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0233, 0.0324, 0.0299, 0.0297, 0.0328, 0.0340, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:38:06,374 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.467e+02 3.155e+02 3.892e+02 8.269e+02, threshold=6.310e+02, percent-clipped=4.0 +2023-02-06 19:38:06,491 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131771.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,217 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131778.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:38:12,839 INFO [train.py:901] (3/4) Epoch 17, batch 2450, loss[loss=0.1991, simple_loss=0.291, pruned_loss=0.05357, over 8524.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2947, pruned_loss=0.06753, over 1613234.97 frames. ], batch size: 49, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:47,888 INFO [train.py:901] (3/4) Epoch 17, batch 2500, loss[loss=0.2254, simple_loss=0.304, pruned_loss=0.07339, over 8361.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2953, pruned_loss=0.0679, over 1612441.81 frames. ], batch size: 24, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:38:58,548 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8229, 1.4194, 1.6071, 1.3538, 0.9564, 1.3725, 1.7327, 1.5522], + device='cuda:3'), covar=tensor([0.0498, 0.1215, 0.1570, 0.1362, 0.0560, 0.1430, 0.0659, 0.0597], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0156, 0.0099, 0.0161, 0.0114, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:39:05,482 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=131854.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:17,118 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.761e+02 2.481e+02 2.929e+02 3.320e+02 7.417e+02, threshold=5.858e+02, percent-clipped=2.0 +2023-02-06 19:39:20,127 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=131875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:22,846 INFO [train.py:901] (3/4) Epoch 17, batch 2550, loss[loss=0.1764, simple_loss=0.2687, pruned_loss=0.04205, over 7968.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2956, pruned_loss=0.06857, over 1608793.91 frames. ], batch size: 21, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:39:29,642 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131886.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:34,611 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131893.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:39:42,812 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9878, 1.7735, 3.2824, 1.5438, 2.4676, 3.5448, 3.6580, 2.9738], + device='cuda:3'), covar=tensor([0.1133, 0.1619, 0.0480, 0.2085, 0.1126, 0.0332, 0.0691, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0282, 0.0308, 0.0275, 0.0302, 0.0292, 0.0252, 0.0388, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 19:39:45,590 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=131907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:00,833 INFO [train.py:901] (3/4) Epoch 17, batch 2600, loss[loss=0.1845, simple_loss=0.2658, pruned_loss=0.05164, over 8601.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2949, pruned_loss=0.06777, over 1609733.79 frames. ], batch size: 31, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:03,026 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=131932.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:28,771 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=131969.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:40:29,953 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.887e+02 3.716e+02 6.826e+02, threshold=5.774e+02, percent-clipped=1.0 +2023-02-06 19:40:35,438 INFO [train.py:901] (3/4) Epoch 17, batch 2650, loss[loss=0.1951, simple_loss=0.2942, pruned_loss=0.048, over 8707.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.295, pruned_loss=0.06796, over 1611599.36 frames. ], batch size: 34, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:40:52,328 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0743, 1.2587, 1.2483, 0.6507, 1.2397, 1.0401, 0.0696, 1.2267], + device='cuda:3'), covar=tensor([0.0339, 0.0318, 0.0260, 0.0435, 0.0391, 0.0853, 0.0665, 0.0271], + device='cuda:3'), in_proj_covar=tensor([0.0426, 0.0368, 0.0315, 0.0425, 0.0351, 0.0509, 0.0376, 0.0391], + device='cuda:3'), out_proj_covar=tensor([1.1651e-04, 9.7932e-05, 8.3442e-05, 1.1367e-04, 9.4395e-05, 1.4685e-04, + 1.0248e-04, 1.0514e-04], device='cuda:3') +2023-02-06 19:41:13,503 INFO [train.py:901] (3/4) Epoch 17, batch 2700, loss[loss=0.2512, simple_loss=0.3192, pruned_loss=0.09157, over 7290.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2961, pruned_loss=0.06862, over 1614537.59 frames. ], batch size: 71, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:41:27,368 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132049.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:31,122 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 19:41:42,497 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.450e+02 3.248e+02 4.102e+02 1.137e+03, threshold=6.496e+02, percent-clipped=12.0 +2023-02-06 19:41:43,394 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132072.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:41:48,263 INFO [train.py:901] (3/4) Epoch 17, batch 2750, loss[loss=0.2625, simple_loss=0.3283, pruned_loss=0.09832, over 7027.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2948, pruned_loss=0.06814, over 1606695.25 frames. ], batch size: 71, lr: 4.50e-03, grad_scale: 8.0 +2023-02-06 19:42:25,027 INFO [train.py:901] (3/4) Epoch 17, batch 2800, loss[loss=0.2451, simple_loss=0.3249, pruned_loss=0.08269, over 8196.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2944, pruned_loss=0.06766, over 1609159.30 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:42:33,150 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7811, 5.7650, 5.0813, 2.5214, 5.0979, 5.6509, 5.4435, 5.2952], + device='cuda:3'), covar=tensor([0.0587, 0.0467, 0.1030, 0.4846, 0.0790, 0.0795, 0.1099, 0.0616], + device='cuda:3'), in_proj_covar=tensor([0.0505, 0.0414, 0.0418, 0.0512, 0.0407, 0.0416, 0.0402, 0.0360], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:42:35,387 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132142.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:40,216 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132149.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:52,729 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:42:55,269 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.217e+02 2.865e+02 3.623e+02 1.020e+03, threshold=5.730e+02, percent-clipped=3.0 +2023-02-06 19:42:57,334 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:00,624 INFO [train.py:901] (3/4) Epoch 17, batch 2850, loss[loss=0.19, simple_loss=0.2716, pruned_loss=0.05421, over 8134.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2947, pruned_loss=0.06784, over 1603957.43 frames. ], batch size: 22, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:10,452 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 19:43:29,199 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:33,397 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132225.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:43:35,747 INFO [train.py:901] (3/4) Epoch 17, batch 2900, loss[loss=0.2235, simple_loss=0.3001, pruned_loss=0.07344, over 7810.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2945, pruned_loss=0.06732, over 1604358.02 frames. ], batch size: 20, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:43:52,939 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132250.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:44:08,371 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.407e+02 2.887e+02 3.454e+02 7.005e+02, threshold=5.774e+02, percent-clipped=2.0 +2023-02-06 19:44:09,818 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 19:44:13,741 INFO [train.py:901] (3/4) Epoch 17, batch 2950, loss[loss=0.2033, simple_loss=0.2787, pruned_loss=0.06398, over 8076.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2941, pruned_loss=0.06736, over 1604501.00 frames. ], batch size: 21, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:36,062 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 19:44:48,315 INFO [train.py:901] (3/4) Epoch 17, batch 3000, loss[loss=0.1823, simple_loss=0.2776, pruned_loss=0.04353, over 8235.00 frames. ], tot_loss[loss=0.2159, simple_loss=0.2958, pruned_loss=0.06806, over 1609704.87 frames. ], batch size: 22, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:44:48,315 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 19:44:56,140 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5615, 1.8382, 2.5538, 1.4134, 2.1176, 1.8400, 1.6952, 1.9575], + device='cuda:3'), covar=tensor([0.1635, 0.2621, 0.0882, 0.4086, 0.1636, 0.2763, 0.2056, 0.2194], + device='cuda:3'), in_proj_covar=tensor([0.0509, 0.0566, 0.0546, 0.0619, 0.0634, 0.0574, 0.0510, 0.0623], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:45:00,591 INFO [train.py:935] (3/4) Epoch 17, validation: loss=0.1786, simple_loss=0.2786, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 19:45:00,592 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 19:45:04,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:45:07,390 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2420, 1.9449, 2.6472, 2.1425, 2.5084, 2.2126, 1.9167, 1.3171], + device='cuda:3'), covar=tensor([0.4945, 0.4688, 0.1716, 0.3283, 0.2352, 0.2597, 0.1779, 0.5029], + device='cuda:3'), in_proj_covar=tensor([0.0931, 0.0938, 0.0776, 0.0908, 0.0973, 0.0858, 0.0727, 0.0809], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:45:12,169 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7014, 1.5161, 1.7091, 1.4608, 0.9044, 1.4948, 1.5055, 1.3479], + device='cuda:3'), covar=tensor([0.0498, 0.1303, 0.1668, 0.1372, 0.0591, 0.1549, 0.0691, 0.0673], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0157, 0.0100, 0.0163, 0.0115, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0008, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 19:45:31,440 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.547e+02 2.492e+02 3.005e+02 3.786e+02 8.313e+02, threshold=6.010e+02, percent-clipped=11.0 +2023-02-06 19:45:37,097 INFO [train.py:901] (3/4) Epoch 17, batch 3050, loss[loss=0.2309, simple_loss=0.3092, pruned_loss=0.07632, over 8337.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2966, pruned_loss=0.06853, over 1609848.85 frames. ], batch size: 26, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:45:48,262 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:04,209 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=132416.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:46:12,936 INFO [train.py:901] (3/4) Epoch 17, batch 3100, loss[loss=0.2224, simple_loss=0.3015, pruned_loss=0.07163, over 7550.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2971, pruned_loss=0.06864, over 1611601.38 frames. ], batch size: 18, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:41,891 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.762e+02 2.340e+02 2.843e+02 3.195e+02 7.960e+02, threshold=5.685e+02, percent-clipped=6.0 +2023-02-06 19:46:47,328 INFO [train.py:901] (3/4) Epoch 17, batch 3150, loss[loss=0.2118, simple_loss=0.2873, pruned_loss=0.06819, over 8025.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2965, pruned_loss=0.06826, over 1615628.02 frames. ], batch size: 22, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:46:48,924 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2933, 2.3478, 1.7253, 2.0679, 2.0290, 1.4948, 1.8877, 1.8946], + device='cuda:3'), covar=tensor([0.1376, 0.0343, 0.1101, 0.0550, 0.0690, 0.1409, 0.0801, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0233, 0.0324, 0.0300, 0.0297, 0.0328, 0.0339, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:47:09,742 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132508.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:23,940 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.67 vs. limit=5.0 +2023-02-06 19:47:24,993 INFO [train.py:901] (3/4) Epoch 17, batch 3200, loss[loss=0.2131, simple_loss=0.3052, pruned_loss=0.06044, over 8324.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2965, pruned_loss=0.06821, over 1613790.78 frames. ], batch size: 25, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:47:26,576 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=132531.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:47:54,177 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.495e+02 3.112e+02 3.824e+02 1.248e+03, threshold=6.223e+02, percent-clipped=6.0 +2023-02-06 19:47:59,505 INFO [train.py:901] (3/4) Epoch 17, batch 3250, loss[loss=0.2478, simple_loss=0.3214, pruned_loss=0.08712, over 8495.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2973, pruned_loss=0.06883, over 1614726.67 frames. ], batch size: 26, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:48:07,401 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:11,633 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3109, 2.3225, 1.7602, 2.1398, 1.9774, 1.5932, 1.7774, 1.9775], + device='cuda:3'), covar=tensor([0.1391, 0.0399, 0.1191, 0.0545, 0.0674, 0.1467, 0.0907, 0.0826], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0233, 0.0324, 0.0301, 0.0298, 0.0330, 0.0341, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 19:48:26,272 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:48:37,516 INFO [train.py:901] (3/4) Epoch 17, batch 3300, loss[loss=0.2274, simple_loss=0.316, pruned_loss=0.06941, over 8288.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2971, pruned_loss=0.06841, over 1616571.19 frames. ], batch size: 23, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:06,785 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.345e+02 2.532e+02 2.971e+02 3.744e+02 7.972e+02, threshold=5.942e+02, percent-clipped=3.0 +2023-02-06 19:49:11,878 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132678.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:49:12,432 INFO [train.py:901] (3/4) Epoch 17, batch 3350, loss[loss=0.2106, simple_loss=0.2947, pruned_loss=0.06329, over 8129.00 frames. ], tot_loss[loss=0.2176, simple_loss=0.2975, pruned_loss=0.06884, over 1619595.22 frames. ], batch size: 22, lr: 4.49e-03, grad_scale: 8.0 +2023-02-06 19:49:49,258 INFO [train.py:901] (3/4) Epoch 17, batch 3400, loss[loss=0.2207, simple_loss=0.2958, pruned_loss=0.07283, over 8511.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2962, pruned_loss=0.06814, over 1622745.89 frames. ], batch size: 28, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:49:55,926 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4044, 4.3321, 3.9637, 2.1160, 3.8900, 3.9864, 3.9527, 3.7960], + device='cuda:3'), covar=tensor([0.0766, 0.0549, 0.0977, 0.4562, 0.0889, 0.0915, 0.1197, 0.0812], + device='cuda:3'), in_proj_covar=tensor([0.0501, 0.0414, 0.0421, 0.0513, 0.0406, 0.0414, 0.0400, 0.0358], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:50:01,004 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 19:50:04,442 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.35 vs. limit=5.0 +2023-02-06 19:50:14,691 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:14,772 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:19,439 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.442e+02 2.969e+02 4.012e+02 9.663e+02, threshold=5.937e+02, percent-clipped=5.0 +2023-02-06 19:50:24,934 INFO [train.py:901] (3/4) Epoch 17, batch 3450, loss[loss=0.1739, simple_loss=0.2536, pruned_loss=0.04708, over 7701.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2967, pruned_loss=0.06881, over 1620167.13 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:50:30,882 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=132787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:32,220 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132789.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:33,510 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7143, 1.9634, 2.0801, 1.4118, 2.1518, 1.4942, 0.7907, 1.8268], + device='cuda:3'), covar=tensor([0.0488, 0.0314, 0.0225, 0.0469, 0.0322, 0.0715, 0.0696, 0.0277], + device='cuda:3'), in_proj_covar=tensor([0.0431, 0.0370, 0.0315, 0.0427, 0.0353, 0.0510, 0.0378, 0.0392], + device='cuda:3'), out_proj_covar=tensor([1.1769e-04, 9.8406e-05, 8.3563e-05, 1.1444e-04, 9.4984e-05, 1.4697e-04, + 1.0310e-04, 1.0527e-04], device='cuda:3') +2023-02-06 19:50:47,682 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=132812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:50:59,803 INFO [train.py:901] (3/4) Epoch 17, batch 3500, loss[loss=0.1908, simple_loss=0.2672, pruned_loss=0.05718, over 7539.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2953, pruned_loss=0.06842, over 1616883.73 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:51:13,840 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 19:51:31,530 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.401e+02 3.009e+02 3.970e+02 8.620e+02, threshold=6.019e+02, percent-clipped=6.0 +2023-02-06 19:51:37,025 INFO [train.py:901] (3/4) Epoch 17, batch 3550, loss[loss=0.2904, simple_loss=0.3559, pruned_loss=0.1124, over 8358.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2961, pruned_loss=0.06851, over 1617672.31 frames. ], batch size: 26, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:11,133 INFO [train.py:901] (3/4) Epoch 17, batch 3600, loss[loss=0.2082, simple_loss=0.2822, pruned_loss=0.06713, over 7426.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2959, pruned_loss=0.06869, over 1616290.22 frames. ], batch size: 17, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:52:11,369 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6612, 1.9914, 2.1402, 1.2096, 2.2121, 1.4671, 0.6997, 1.8457], + device='cuda:3'), covar=tensor([0.0522, 0.0289, 0.0233, 0.0562, 0.0341, 0.0817, 0.0751, 0.0306], + device='cuda:3'), in_proj_covar=tensor([0.0434, 0.0375, 0.0319, 0.0431, 0.0358, 0.0515, 0.0380, 0.0396], + device='cuda:3'), out_proj_covar=tensor([1.1865e-04, 9.9762e-05, 8.4682e-05, 1.1547e-04, 9.6214e-05, 1.4857e-04, + 1.0381e-04, 1.0618e-04], device='cuda:3') +2023-02-06 19:52:40,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8003, 2.1533, 2.3243, 1.4721, 2.3900, 1.6633, 0.7500, 1.9388], + device='cuda:3'), covar=tensor([0.0487, 0.0225, 0.0208, 0.0421, 0.0245, 0.0629, 0.0649, 0.0254], + device='cuda:3'), in_proj_covar=tensor([0.0432, 0.0373, 0.0318, 0.0429, 0.0356, 0.0512, 0.0378, 0.0394], + device='cuda:3'), out_proj_covar=tensor([1.1802e-04, 9.9351e-05, 8.4339e-05, 1.1490e-04, 9.5670e-05, 1.4765e-04, + 1.0323e-04, 1.0562e-04], device='cuda:3') +2023-02-06 19:52:41,877 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.442e+02 2.775e+02 3.418e+02 6.006e+02, threshold=5.549e+02, percent-clipped=0.0 +2023-02-06 19:52:48,333 INFO [train.py:901] (3/4) Epoch 17, batch 3650, loss[loss=0.2058, simple_loss=0.2795, pruned_loss=0.06608, over 8236.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2954, pruned_loss=0.06815, over 1616327.75 frames. ], batch size: 22, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:18,533 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:53:21,778 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 19:53:23,087 INFO [train.py:901] (3/4) Epoch 17, batch 3700, loss[loss=0.1959, simple_loss=0.2628, pruned_loss=0.06449, over 7234.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2959, pruned_loss=0.06855, over 1618420.06 frames. ], batch size: 16, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:53:37,038 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 19:53:53,566 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.419e+02 3.081e+02 4.194e+02 7.364e+02, threshold=6.162e+02, percent-clipped=6.0 +2023-02-06 19:53:59,120 INFO [train.py:901] (3/4) Epoch 17, batch 3750, loss[loss=0.1887, simple_loss=0.2665, pruned_loss=0.05542, over 7702.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2949, pruned_loss=0.06796, over 1613155.23 frames. ], batch size: 18, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:10,364 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.4014, 5.3689, 4.8049, 2.3585, 4.7977, 5.1126, 4.9971, 4.7938], + device='cuda:3'), covar=tensor([0.0594, 0.0460, 0.0888, 0.4790, 0.0833, 0.0820, 0.1105, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0504, 0.0412, 0.0419, 0.0514, 0.0408, 0.0416, 0.0400, 0.0360], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 19:54:21,508 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133108.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:54:35,509 INFO [train.py:901] (3/4) Epoch 17, batch 3800, loss[loss=0.2159, simple_loss=0.2972, pruned_loss=0.06727, over 8142.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.06905, over 1615119.64 frames. ], batch size: 22, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:54:41,291 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133137.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:04,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.594e+02 3.054e+02 3.718e+02 6.772e+02, threshold=6.108e+02, percent-clipped=5.0 +2023-02-06 19:55:09,934 INFO [train.py:901] (3/4) Epoch 17, batch 3850, loss[loss=0.2159, simple_loss=0.2836, pruned_loss=0.07409, over 7935.00 frames. ], tot_loss[loss=0.217, simple_loss=0.2963, pruned_loss=0.06885, over 1614638.64 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:55:31,129 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 19:55:39,721 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2184, 1.9849, 2.7190, 2.1985, 2.5836, 2.1919, 1.9340, 1.4101], + device='cuda:3'), covar=tensor([0.4652, 0.4631, 0.1616, 0.3500, 0.2494, 0.2773, 0.1841, 0.4904], + device='cuda:3'), in_proj_covar=tensor([0.0918, 0.0931, 0.0770, 0.0902, 0.0968, 0.0850, 0.0721, 0.0800], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:55:43,019 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133223.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:55:46,893 INFO [train.py:901] (3/4) Epoch 17, batch 3900, loss[loss=0.177, simple_loss=0.2694, pruned_loss=0.04226, over 8192.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2974, pruned_loss=0.06931, over 1614076.07 frames. ], batch size: 23, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:15,759 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.486e+02 2.968e+02 4.028e+02 1.073e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-06 19:56:21,114 INFO [train.py:901] (3/4) Epoch 17, batch 3950, loss[loss=0.2264, simple_loss=0.3012, pruned_loss=0.07583, over 7797.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.2967, pruned_loss=0.06914, over 1614163.91 frames. ], batch size: 20, lr: 4.48e-03, grad_scale: 8.0 +2023-02-06 19:56:53,886 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.94 vs. limit=5.0 +2023-02-06 19:56:56,947 INFO [train.py:901] (3/4) Epoch 17, batch 4000, loss[loss=0.2139, simple_loss=0.3, pruned_loss=0.06389, over 8513.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2959, pruned_loss=0.06851, over 1613539.17 frames. ], batch size: 29, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:27,408 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.526e+02 3.333e+02 3.995e+02 7.649e+02, threshold=6.666e+02, percent-clipped=5.0 +2023-02-06 19:57:32,346 INFO [train.py:901] (3/4) Epoch 17, batch 4050, loss[loss=0.2041, simple_loss=0.2967, pruned_loss=0.05574, over 8108.00 frames. ], tot_loss[loss=0.2172, simple_loss=0.2967, pruned_loss=0.0688, over 1620234.72 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:57:41,378 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133392.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 19:57:42,113 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:42,760 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:57:59,639 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133418.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:07,686 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 19:58:07,783 INFO [train.py:901] (3/4) Epoch 17, batch 4100, loss[loss=0.2462, simple_loss=0.3212, pruned_loss=0.08555, over 8245.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2973, pruned_loss=0.0693, over 1621214.22 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:40,085 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.513e+02 2.919e+02 3.658e+02 1.440e+03, threshold=5.839e+02, percent-clipped=2.0 +2023-02-06 19:58:45,031 INFO [train.py:901] (3/4) Epoch 17, batch 4150, loss[loss=0.2361, simple_loss=0.3178, pruned_loss=0.07719, over 8339.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2959, pruned_loss=0.06853, over 1618938.61 frames. ], batch size: 26, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:58:45,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=133479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:58:54,027 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133492.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:02,306 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=133504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 19:59:14,062 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5481, 1.2989, 4.6971, 1.7695, 4.1855, 3.8705, 4.3173, 4.1590], + device='cuda:3'), covar=tensor([0.0512, 0.4964, 0.0537, 0.4051, 0.1143, 0.1102, 0.0502, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0571, 0.0622, 0.0662, 0.0595, 0.0671, 0.0582, 0.0570, 0.0635], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:59:19,507 INFO [train.py:901] (3/4) Epoch 17, batch 4200, loss[loss=0.216, simple_loss=0.2949, pruned_loss=0.06855, over 8032.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2966, pruned_loss=0.06879, over 1620686.58 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:32,473 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 19:59:39,304 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7836, 1.3565, 3.9194, 1.4563, 3.5124, 3.2559, 3.5393, 3.4348], + device='cuda:3'), covar=tensor([0.0611, 0.4457, 0.0730, 0.4140, 0.1129, 0.1115, 0.0694, 0.0782], + device='cuda:3'), in_proj_covar=tensor([0.0573, 0.0623, 0.0664, 0.0595, 0.0673, 0.0583, 0.0572, 0.0636], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 19:59:51,074 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.565e+02 3.135e+02 3.827e+02 1.180e+03, threshold=6.269e+02, percent-clipped=6.0 +2023-02-06 19:59:56,778 INFO [train.py:901] (3/4) Epoch 17, batch 4250, loss[loss=0.1793, simple_loss=0.2718, pruned_loss=0.04337, over 8240.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2962, pruned_loss=0.06832, over 1623159.08 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 19:59:57,447 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 20:00:03,426 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-06 20:00:05,318 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.74 vs. limit=5.0 +2023-02-06 20:00:30,990 INFO [train.py:901] (3/4) Epoch 17, batch 4300, loss[loss=0.2131, simple_loss=0.3088, pruned_loss=0.05871, over 8513.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2956, pruned_loss=0.06823, over 1620805.39 frames. ], batch size: 26, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:00,697 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=133670.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:01:01,925 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.551e+02 3.118e+02 3.976e+02 6.360e+02, threshold=6.236e+02, percent-clipped=1.0 +2023-02-06 20:01:06,900 INFO [train.py:901] (3/4) Epoch 17, batch 4350, loss[loss=0.2874, simple_loss=0.3548, pruned_loss=0.11, over 6974.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2958, pruned_loss=0.06819, over 1619702.66 frames. ], batch size: 72, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:18,441 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5316, 2.0159, 3.3476, 1.3606, 2.5541, 2.0222, 1.5561, 2.5582], + device='cuda:3'), covar=tensor([0.1791, 0.2352, 0.0702, 0.4218, 0.1552, 0.2892, 0.2217, 0.2002], + device='cuda:3'), in_proj_covar=tensor([0.0510, 0.0571, 0.0550, 0.0621, 0.0637, 0.0576, 0.0514, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:01:25,375 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-06 20:01:31,292 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 20:01:43,135 INFO [train.py:901] (3/4) Epoch 17, batch 4400, loss[loss=0.1984, simple_loss=0.3001, pruned_loss=0.04835, over 8362.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.297, pruned_loss=0.06814, over 1624287.77 frames. ], batch size: 24, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:01:48,106 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133736.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:01:49,422 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133738.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:02:12,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.579e+02 3.148e+02 3.884e+02 8.584e+02, threshold=6.297e+02, percent-clipped=6.0 +2023-02-06 20:02:13,681 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 20:02:18,533 INFO [train.py:901] (3/4) Epoch 17, batch 4450, loss[loss=0.2024, simple_loss=0.2815, pruned_loss=0.06166, over 8191.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2948, pruned_loss=0.06746, over 1621929.07 frames. ], batch size: 23, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:02:55,024 INFO [train.py:901] (3/4) Epoch 17, batch 4500, loss[loss=0.1758, simple_loss=0.2511, pruned_loss=0.05028, over 7431.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2939, pruned_loss=0.06767, over 1615437.56 frames. ], batch size: 17, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:00,079 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=133836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:10,432 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133851.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:03:10,911 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 20:03:11,739 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133853.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:03:24,313 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.330e+02 2.856e+02 3.592e+02 8.327e+02, threshold=5.711e+02, percent-clipped=1.0 +2023-02-06 20:03:29,183 INFO [train.py:901] (3/4) Epoch 17, batch 4550, loss[loss=0.1821, simple_loss=0.275, pruned_loss=0.04465, over 8136.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2943, pruned_loss=0.06774, over 1612479.25 frames. ], batch size: 22, lr: 4.47e-03, grad_scale: 8.0 +2023-02-06 20:03:32,459 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.68 vs. limit=5.0 +2023-02-06 20:04:04,549 INFO [train.py:901] (3/4) Epoch 17, batch 4600, loss[loss=0.2564, simple_loss=0.3239, pruned_loss=0.0944, over 8620.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2953, pruned_loss=0.06849, over 1613030.42 frames. ], batch size: 49, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:04:19,774 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-06 20:04:21,359 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=133951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:04:35,425 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.660e+02 2.386e+02 2.834e+02 3.772e+02 7.696e+02, threshold=5.668e+02, percent-clipped=3.0 +2023-02-06 20:04:40,234 INFO [train.py:901] (3/4) Epoch 17, batch 4650, loss[loss=0.2239, simple_loss=0.3056, pruned_loss=0.07117, over 8564.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2952, pruned_loss=0.06821, over 1615490.37 frames. ], batch size: 39, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:06,465 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:07,195 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:05:16,664 INFO [train.py:901] (3/4) Epoch 17, batch 4700, loss[loss=0.1916, simple_loss=0.2752, pruned_loss=0.05395, over 8037.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2935, pruned_loss=0.0673, over 1613786.71 frames. ], batch size: 22, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:05:48,988 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.331e+02 2.674e+02 3.349e+02 6.559e+02, threshold=5.348e+02, percent-clipped=3.0 +2023-02-06 20:05:53,966 INFO [train.py:901] (3/4) Epoch 17, batch 4750, loss[loss=0.2063, simple_loss=0.2828, pruned_loss=0.06494, over 8089.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2934, pruned_loss=0.06696, over 1612906.66 frames. ], batch size: 21, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:13,302 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134107.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 20:06:14,648 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134109.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:17,896 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 20:06:20,573 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 20:06:28,183 INFO [train.py:901] (3/4) Epoch 17, batch 4800, loss[loss=0.2413, simple_loss=0.3176, pruned_loss=0.08252, over 8600.00 frames. ], tot_loss[loss=0.2147, simple_loss=0.2942, pruned_loss=0.06761, over 1612609.38 frames. ], batch size: 31, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:06:29,214 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:06:31,288 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134132.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 20:06:32,691 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134134.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:00,734 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.352e+02 2.869e+02 3.488e+02 8.440e+02, threshold=5.739e+02, percent-clipped=9.0 +2023-02-06 20:07:06,361 INFO [train.py:901] (3/4) Epoch 17, batch 4850, loss[loss=0.2071, simple_loss=0.2968, pruned_loss=0.05865, over 8027.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2944, pruned_loss=0.06782, over 1609679.56 frames. ], batch size: 22, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:14,663 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 20:07:26,226 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134207.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:07:41,127 INFO [train.py:901] (3/4) Epoch 17, batch 4900, loss[loss=0.2301, simple_loss=0.3119, pruned_loss=0.07417, over 8600.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2935, pruned_loss=0.06721, over 1611906.62 frames. ], batch size: 31, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:07:43,514 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134232.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:13,116 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.657e+02 3.351e+02 4.707e+02 1.168e+03, threshold=6.701e+02, percent-clipped=12.0 +2023-02-06 20:08:17,820 INFO [train.py:901] (3/4) Epoch 17, batch 4950, loss[loss=0.2425, simple_loss=0.3099, pruned_loss=0.08755, over 7325.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2942, pruned_loss=0.06722, over 1614214.95 frames. ], batch size: 71, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:08:49,559 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:52,984 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:08:54,217 INFO [train.py:901] (3/4) Epoch 17, batch 5000, loss[loss=0.2131, simple_loss=0.2934, pruned_loss=0.06643, over 7821.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2932, pruned_loss=0.06667, over 1611285.41 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:09,866 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5217, 2.6304, 1.7614, 2.2010, 2.1037, 1.6039, 2.0367, 2.0537], + device='cuda:3'), covar=tensor([0.1563, 0.0382, 0.1214, 0.0672, 0.0704, 0.1442, 0.1020, 0.0991], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0234, 0.0327, 0.0302, 0.0297, 0.0332, 0.0342, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:09:15,213 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:24,853 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.361e+02 2.656e+02 3.405e+02 6.362e+02, threshold=5.311e+02, percent-clipped=0.0 +2023-02-06 20:09:30,487 INFO [train.py:901] (3/4) Epoch 17, batch 5050, loss[loss=0.1919, simple_loss=0.2792, pruned_loss=0.05228, over 8727.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2933, pruned_loss=0.06656, over 1613870.92 frames. ], batch size: 34, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:09:31,735 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 20:09:35,089 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134385.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:54,066 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134410.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:09:58,691 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 20:10:05,232 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8241, 2.3682, 3.4101, 1.9598, 1.8128, 3.4570, 0.7376, 2.1078], + device='cuda:3'), covar=tensor([0.1733, 0.1544, 0.0338, 0.1983, 0.2931, 0.0284, 0.2654, 0.1527], + device='cuda:3'), in_proj_covar=tensor([0.0180, 0.0186, 0.0117, 0.0221, 0.0264, 0.0124, 0.0168, 0.0184], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:10:07,165 INFO [train.py:901] (3/4) Epoch 17, batch 5100, loss[loss=0.2074, simple_loss=0.2963, pruned_loss=0.05927, over 8475.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2936, pruned_loss=0.06662, over 1619237.10 frames. ], batch size: 25, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:10:12,406 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8376, 1.9213, 2.4189, 1.6315, 1.4369, 2.4703, 0.5126, 1.5166], + device='cuda:3'), covar=tensor([0.2203, 0.1264, 0.0577, 0.1751, 0.3115, 0.0631, 0.2619, 0.2023], + device='cuda:3'), in_proj_covar=tensor([0.0179, 0.0185, 0.0116, 0.0220, 0.0263, 0.0123, 0.0167, 0.0183], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:10:36,989 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.384e+02 2.769e+02 3.675e+02 1.185e+03, threshold=5.538e+02, percent-clipped=9.0 +2023-02-06 20:10:38,569 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:10:42,699 INFO [train.py:901] (3/4) Epoch 17, batch 5150, loss[loss=0.2084, simple_loss=0.2833, pruned_loss=0.06675, over 7803.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2938, pruned_loss=0.06661, over 1616438.96 frames. ], batch size: 20, lr: 4.46e-03, grad_scale: 8.0 +2023-02-06 20:11:20,275 INFO [train.py:901] (3/4) Epoch 17, batch 5200, loss[loss=0.238, simple_loss=0.3203, pruned_loss=0.07779, over 7966.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2939, pruned_loss=0.0668, over 1615415.32 frames. ], batch size: 21, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:49,978 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.225e+02 2.783e+02 3.706e+02 1.482e+03, threshold=5.567e+02, percent-clipped=8.0 +2023-02-06 20:11:54,881 INFO [train.py:901] (3/4) Epoch 17, batch 5250, loss[loss=0.2142, simple_loss=0.287, pruned_loss=0.07073, over 8749.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2943, pruned_loss=0.06695, over 1617707.43 frames. ], batch size: 39, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:11:57,589 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 20:11:57,995 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-06 20:12:09,111 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8209, 1.5183, 3.9370, 1.4696, 3.4683, 3.2522, 3.6014, 3.4439], + device='cuda:3'), covar=tensor([0.0657, 0.4559, 0.0692, 0.4150, 0.1322, 0.1038, 0.0707, 0.0821], + device='cuda:3'), in_proj_covar=tensor([0.0581, 0.0621, 0.0663, 0.0593, 0.0674, 0.0579, 0.0574, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:12:31,033 INFO [train.py:901] (3/4) Epoch 17, batch 5300, loss[loss=0.1668, simple_loss=0.2433, pruned_loss=0.04518, over 8083.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.293, pruned_loss=0.06582, over 1619746.35 frames. ], batch size: 21, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:12:34,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5559, 1.6357, 4.7133, 1.8390, 4.2120, 3.9543, 4.3136, 4.1564], + device='cuda:3'), covar=tensor([0.0563, 0.4632, 0.0520, 0.3911, 0.1066, 0.0989, 0.0585, 0.0648], + device='cuda:3'), in_proj_covar=tensor([0.0583, 0.0623, 0.0664, 0.0595, 0.0676, 0.0580, 0.0576, 0.0642], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:12:48,147 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3296, 2.0347, 2.7951, 2.2996, 2.7163, 2.2400, 2.0358, 1.5836], + device='cuda:3'), covar=tensor([0.4490, 0.4382, 0.1499, 0.2733, 0.1904, 0.2546, 0.1670, 0.4380], + device='cuda:3'), in_proj_covar=tensor([0.0913, 0.0927, 0.0771, 0.0901, 0.0965, 0.0847, 0.0719, 0.0798], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:12:58,386 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134666.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:00,526 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5354, 2.6066, 1.8485, 2.2988, 2.1406, 1.6099, 2.0856, 2.0891], + device='cuda:3'), covar=tensor([0.1349, 0.0342, 0.1142, 0.0558, 0.0727, 0.1519, 0.0965, 0.0890], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0232, 0.0325, 0.0301, 0.0296, 0.0330, 0.0340, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:13:01,795 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=134671.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:13:02,357 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.333e+02 2.884e+02 3.429e+02 1.143e+03, threshold=5.769e+02, percent-clipped=6.0 +2023-02-06 20:13:05,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0608, 1.6424, 1.3805, 1.6355, 1.3703, 1.2459, 1.3468, 1.3257], + device='cuda:3'), covar=tensor([0.1027, 0.0420, 0.1168, 0.0483, 0.0670, 0.1356, 0.0785, 0.0817], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0232, 0.0325, 0.0301, 0.0296, 0.0331, 0.0340, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:13:07,134 INFO [train.py:901] (3/4) Epoch 17, batch 5350, loss[loss=0.1923, simple_loss=0.2857, pruned_loss=0.04941, over 8328.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2946, pruned_loss=0.0664, over 1620233.77 frames. ], batch size: 25, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:26,623 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6881, 2.0253, 2.2013, 1.2669, 2.3570, 1.5055, 0.6950, 1.8840], + device='cuda:3'), covar=tensor([0.0572, 0.0339, 0.0238, 0.0617, 0.0361, 0.0820, 0.0771, 0.0279], + device='cuda:3'), in_proj_covar=tensor([0.0438, 0.0376, 0.0320, 0.0434, 0.0359, 0.0520, 0.0380, 0.0397], + device='cuda:3'), out_proj_covar=tensor([1.1944e-04, 1.0013e-04, 8.4772e-05, 1.1600e-04, 9.6398e-05, 1.5007e-04, + 1.0359e-04, 1.0650e-04], device='cuda:3') +2023-02-06 20:13:43,340 INFO [train.py:901] (3/4) Epoch 17, batch 5400, loss[loss=0.1863, simple_loss=0.2683, pruned_loss=0.05217, over 7966.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.296, pruned_loss=0.06644, over 1624560.85 frames. ], batch size: 21, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:13:44,314 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=134730.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:01,981 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=134755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:14,310 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.337e+02 2.988e+02 3.635e+02 1.067e+03, threshold=5.976e+02, percent-clipped=7.0 +2023-02-06 20:14:18,986 INFO [train.py:901] (3/4) Epoch 17, batch 5450, loss[loss=0.2495, simple_loss=0.3262, pruned_loss=0.08637, over 8114.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2964, pruned_loss=0.06739, over 1625101.56 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:20,487 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134781.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:24,001 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=134786.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:14:54,786 INFO [train.py:901] (3/4) Epoch 17, batch 5500, loss[loss=0.2591, simple_loss=0.3253, pruned_loss=0.09649, over 8464.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2959, pruned_loss=0.06717, over 1624671.60 frames. ], batch size: 27, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:14:55,407 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 20:15:00,395 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5458, 1.7794, 1.8423, 1.1823, 1.9423, 1.3355, 0.4458, 1.7254], + device='cuda:3'), covar=tensor([0.0439, 0.0309, 0.0212, 0.0466, 0.0311, 0.0794, 0.0731, 0.0221], + device='cuda:3'), in_proj_covar=tensor([0.0434, 0.0373, 0.0316, 0.0428, 0.0355, 0.0514, 0.0375, 0.0394], + device='cuda:3'), out_proj_covar=tensor([1.1841e-04, 9.9318e-05, 8.3595e-05, 1.1440e-04, 9.4999e-05, 1.4805e-04, + 1.0213e-04, 1.0556e-04], device='cuda:3') +2023-02-06 20:15:25,536 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.381e+02 2.895e+02 3.783e+02 8.489e+02, threshold=5.790e+02, percent-clipped=3.0 +2023-02-06 20:15:31,376 INFO [train.py:901] (3/4) Epoch 17, batch 5550, loss[loss=0.2204, simple_loss=0.3064, pruned_loss=0.06725, over 8193.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2946, pruned_loss=0.06684, over 1621194.61 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:15:32,207 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=134880.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:15:42,878 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 20:15:58,385 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.99 vs. limit=5.0 +2023-02-06 20:16:06,822 INFO [train.py:901] (3/4) Epoch 17, batch 5600, loss[loss=0.2287, simple_loss=0.3243, pruned_loss=0.06654, over 8200.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2927, pruned_loss=0.06612, over 1613378.42 frames. ], batch size: 23, lr: 4.45e-03, grad_scale: 8.0 +2023-02-06 20:16:12,376 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7991, 1.5461, 3.1347, 1.4668, 2.2472, 3.3293, 3.4894, 2.8430], + device='cuda:3'), covar=tensor([0.1177, 0.1699, 0.0387, 0.2046, 0.0993, 0.0280, 0.0600, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0282, 0.0311, 0.0276, 0.0302, 0.0295, 0.0255, 0.0390, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:16:23,704 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1942, 1.6711, 1.2816, 1.6319, 1.4202, 1.1197, 1.4553, 1.4826], + device='cuda:3'), covar=tensor([0.0740, 0.0322, 0.0822, 0.0351, 0.0526, 0.1039, 0.0581, 0.0523], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0232, 0.0323, 0.0301, 0.0296, 0.0330, 0.0340, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:16:32,068 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8936, 1.6042, 2.0412, 1.7622, 1.9864, 1.9169, 1.6966, 0.8155], + device='cuda:3'), covar=tensor([0.5265, 0.4401, 0.1740, 0.3189, 0.2257, 0.2761, 0.1888, 0.4774], + device='cuda:3'), in_proj_covar=tensor([0.0914, 0.0929, 0.0773, 0.0902, 0.0972, 0.0849, 0.0721, 0.0800], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:16:38,748 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.724e+02 3.292e+02 4.135e+02 9.276e+02, threshold=6.584e+02, percent-clipped=7.0 +2023-02-06 20:16:42,891 INFO [train.py:901] (3/4) Epoch 17, batch 5650, loss[loss=0.1782, simple_loss=0.2635, pruned_loss=0.04642, over 7648.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2939, pruned_loss=0.0668, over 1612821.64 frames. ], batch size: 19, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:04,390 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 20:17:18,720 INFO [train.py:901] (3/4) Epoch 17, batch 5700, loss[loss=0.1631, simple_loss=0.25, pruned_loss=0.03813, over 6433.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2941, pruned_loss=0.06744, over 1609699.92 frames. ], batch size: 14, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:17:24,519 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135037.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:28,020 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:42,577 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:45,955 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135067.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:17:49,719 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.516e+02 3.214e+02 3.973e+02 1.283e+03, threshold=6.427e+02, percent-clipped=6.0 +2023-02-06 20:17:53,701 INFO [train.py:901] (3/4) Epoch 17, batch 5750, loss[loss=0.1808, simple_loss=0.2621, pruned_loss=0.04976, over 8146.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2936, pruned_loss=0.06678, over 1609526.79 frames. ], batch size: 22, lr: 4.45e-03, grad_scale: 4.0 +2023-02-06 20:18:11,550 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 20:18:13,321 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 20:18:17,698 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4121, 4.3276, 3.9699, 2.1688, 3.8278, 3.9263, 3.9946, 3.6313], + device='cuda:3'), covar=tensor([0.0751, 0.0589, 0.1089, 0.4785, 0.0905, 0.1033, 0.1360, 0.0826], + device='cuda:3'), in_proj_covar=tensor([0.0508, 0.0417, 0.0423, 0.0525, 0.0414, 0.0417, 0.0407, 0.0364], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:18:30,185 INFO [train.py:901] (3/4) Epoch 17, batch 5800, loss[loss=0.2541, simple_loss=0.3218, pruned_loss=0.09325, over 7652.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2942, pruned_loss=0.06691, over 1615542.77 frames. ], batch size: 19, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:18:47,013 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5582, 2.2719, 3.2262, 2.6258, 3.0367, 2.4565, 2.1654, 1.7491], + device='cuda:3'), covar=tensor([0.4784, 0.4928, 0.1493, 0.3151, 0.2405, 0.2598, 0.1780, 0.5085], + device='cuda:3'), in_proj_covar=tensor([0.0925, 0.0938, 0.0778, 0.0908, 0.0978, 0.0856, 0.0726, 0.0804], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:19:00,545 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.313e+02 2.882e+02 3.681e+02 6.576e+02, threshold=5.764e+02, percent-clipped=1.0 +2023-02-06 20:19:04,567 INFO [train.py:901] (3/4) Epoch 17, batch 5850, loss[loss=0.2558, simple_loss=0.3363, pruned_loss=0.08762, over 8461.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2952, pruned_loss=0.06704, over 1618999.52 frames. ], batch size: 29, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:19:12,154 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135189.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:37,646 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135224.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:19:41,092 INFO [train.py:901] (3/4) Epoch 17, batch 5900, loss[loss=0.2213, simple_loss=0.3112, pruned_loss=0.06569, over 8490.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2945, pruned_loss=0.06693, over 1616004.41 frames. ], batch size: 29, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:12,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.357e+02 3.084e+02 3.660e+02 6.807e+02, threshold=6.167e+02, percent-clipped=2.0 +2023-02-06 20:20:16,628 INFO [train.py:901] (3/4) Epoch 17, batch 5950, loss[loss=0.2179, simple_loss=0.3086, pruned_loss=0.06362, over 8448.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2948, pruned_loss=0.06692, over 1621118.28 frames. ], batch size: 27, lr: 4.44e-03, grad_scale: 4.0 +2023-02-06 20:20:52,281 INFO [train.py:901] (3/4) Epoch 17, batch 6000, loss[loss=0.1973, simple_loss=0.2793, pruned_loss=0.05765, over 7924.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.295, pruned_loss=0.06762, over 1623099.20 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:20:52,281 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 20:21:02,022 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8176, 3.7988, 3.4541, 2.1810, 3.3243, 3.4635, 3.5014, 3.2135], + device='cuda:3'), covar=tensor([0.0914, 0.0594, 0.1009, 0.4503, 0.1072, 0.1039, 0.1206, 0.1085], + device='cuda:3'), in_proj_covar=tensor([0.0501, 0.0412, 0.0417, 0.0518, 0.0406, 0.0410, 0.0401, 0.0360], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:21:05,418 INFO [train.py:935] (3/4) Epoch 17, validation: loss=0.1774, simple_loss=0.2777, pruned_loss=0.03857, over 944034.00 frames. +2023-02-06 20:21:05,419 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 20:21:12,601 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135339.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:21:16,156 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5055, 1.4629, 1.8156, 1.3372, 1.1673, 1.8067, 0.1558, 1.1759], + device='cuda:3'), covar=tensor([0.1807, 0.1359, 0.0419, 0.1048, 0.2831, 0.0464, 0.2172, 0.1297], + device='cuda:3'), in_proj_covar=tensor([0.0176, 0.0184, 0.0115, 0.0216, 0.0260, 0.0123, 0.0165, 0.0180], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:21:36,686 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.411e+02 3.026e+02 3.580e+02 8.983e+02, threshold=6.051e+02, percent-clipped=2.0 +2023-02-06 20:21:40,882 INFO [train.py:901] (3/4) Epoch 17, batch 6050, loss[loss=0.1723, simple_loss=0.245, pruned_loss=0.04983, over 7222.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2936, pruned_loss=0.06766, over 1611796.47 frames. ], batch size: 16, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:21:41,759 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8007, 3.6668, 3.3783, 1.8870, 3.3123, 3.3082, 3.4261, 3.0040], + device='cuda:3'), covar=tensor([0.0927, 0.0710, 0.1183, 0.4684, 0.1038, 0.1236, 0.1330, 0.1051], + device='cuda:3'), in_proj_covar=tensor([0.0501, 0.0412, 0.0416, 0.0517, 0.0406, 0.0410, 0.0400, 0.0359], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:22:16,317 INFO [train.py:901] (3/4) Epoch 17, batch 6100, loss[loss=0.2412, simple_loss=0.3119, pruned_loss=0.08521, over 8251.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2947, pruned_loss=0.06831, over 1609843.55 frames. ], batch size: 22, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:47,563 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.459e+02 2.890e+02 3.783e+02 6.848e+02, threshold=5.780e+02, percent-clipped=3.0 +2023-02-06 20:22:49,621 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 20:22:52,257 INFO [train.py:901] (3/4) Epoch 17, batch 6150, loss[loss=0.1717, simple_loss=0.2487, pruned_loss=0.04739, over 7207.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2958, pruned_loss=0.06885, over 1613224.32 frames. ], batch size: 16, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:22:55,588 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8227, 5.9181, 5.0377, 2.5738, 5.1921, 5.6083, 5.3850, 5.2298], + device='cuda:3'), covar=tensor([0.0487, 0.0337, 0.0859, 0.3877, 0.0712, 0.0581, 0.0964, 0.0506], + device='cuda:3'), in_proj_covar=tensor([0.0502, 0.0413, 0.0418, 0.0519, 0.0409, 0.0413, 0.0402, 0.0360], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:23:26,588 INFO [train.py:901] (3/4) Epoch 17, batch 6200, loss[loss=0.2241, simple_loss=0.3004, pruned_loss=0.07395, over 8246.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2951, pruned_loss=0.06787, over 1615888.49 frames. ], batch size: 24, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:23:29,372 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135533.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:23:57,601 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.470e+02 3.035e+02 3.550e+02 6.137e+02, threshold=6.070e+02, percent-clipped=1.0 +2023-02-06 20:24:01,728 INFO [train.py:901] (3/4) Epoch 17, batch 6250, loss[loss=0.2047, simple_loss=0.2718, pruned_loss=0.06881, over 7787.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2955, pruned_loss=0.06803, over 1617948.93 frames. ], batch size: 19, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:13,157 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135595.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:29,700 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 20:24:30,226 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:36,951 INFO [train.py:901] (3/4) Epoch 17, batch 6300, loss[loss=0.1937, simple_loss=0.2791, pruned_loss=0.05409, over 8288.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2946, pruned_loss=0.06777, over 1614732.43 frames. ], batch size: 23, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:24:49,740 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:24:49,776 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=135648.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:25:07,385 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.700e+02 3.426e+02 4.477e+02 8.691e+02, threshold=6.853e+02, percent-clipped=8.0 +2023-02-06 20:25:11,443 INFO [train.py:901] (3/4) Epoch 17, batch 6350, loss[loss=0.192, simple_loss=0.269, pruned_loss=0.0575, over 7925.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06871, over 1619475.13 frames. ], batch size: 20, lr: 4.44e-03, grad_scale: 8.0 +2023-02-06 20:25:46,617 INFO [train.py:901] (3/4) Epoch 17, batch 6400, loss[loss=0.1923, simple_loss=0.2934, pruned_loss=0.04565, over 8615.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2961, pruned_loss=0.06854, over 1618232.17 frames. ], batch size: 39, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:16,681 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.216e+02 2.648e+02 3.143e+02 6.334e+02, threshold=5.295e+02, percent-clipped=0.0 +2023-02-06 20:26:20,488 INFO [train.py:901] (3/4) Epoch 17, batch 6450, loss[loss=0.2302, simple_loss=0.327, pruned_loss=0.06671, over 8765.00 frames. ], tot_loss[loss=0.2174, simple_loss=0.2967, pruned_loss=0.06904, over 1620103.19 frames. ], batch size: 40, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:26:39,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4075, 1.4852, 1.3913, 1.8570, 0.7958, 1.2171, 1.2660, 1.4597], + device='cuda:3'), covar=tensor([0.0841, 0.0773, 0.1070, 0.0473, 0.1059, 0.1427, 0.0781, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0198, 0.0248, 0.0211, 0.0209, 0.0248, 0.0256, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 20:26:56,417 INFO [train.py:901] (3/4) Epoch 17, batch 6500, loss[loss=0.2046, simple_loss=0.2956, pruned_loss=0.05678, over 8465.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2974, pruned_loss=0.06961, over 1620651.60 frames. ], batch size: 25, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:27,349 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.443e+02 3.095e+02 4.367e+02 8.897e+02, threshold=6.190e+02, percent-clipped=12.0 +2023-02-06 20:27:31,534 INFO [train.py:901] (3/4) Epoch 17, batch 6550, loss[loss=0.1992, simple_loss=0.2863, pruned_loss=0.05607, over 8240.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06879, over 1620039.55 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:27:44,287 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2695, 1.6942, 1.7343, 1.0023, 1.6618, 1.2885, 0.2779, 1.6631], + device='cuda:3'), covar=tensor([0.0415, 0.0257, 0.0199, 0.0445, 0.0317, 0.0686, 0.0672, 0.0199], + device='cuda:3'), in_proj_covar=tensor([0.0430, 0.0372, 0.0320, 0.0425, 0.0353, 0.0512, 0.0374, 0.0395], + device='cuda:3'), out_proj_covar=tensor([1.1723e-04, 9.8929e-05, 8.4846e-05, 1.1351e-04, 9.4554e-05, 1.4739e-04, + 1.0180e-04, 1.0568e-04], device='cuda:3') +2023-02-06 20:27:48,288 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=135904.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:27:56,446 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 20:28:06,536 INFO [train.py:901] (3/4) Epoch 17, batch 6600, loss[loss=0.2158, simple_loss=0.2968, pruned_loss=0.06739, over 7652.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2966, pruned_loss=0.06896, over 1620879.90 frames. ], batch size: 19, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:06,734 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=135929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:16,347 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 20:28:36,543 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.554e+02 2.943e+02 3.634e+02 1.271e+03, threshold=5.887e+02, percent-clipped=2.0 +2023-02-06 20:28:40,563 INFO [train.py:901] (3/4) Epoch 17, batch 6650, loss[loss=0.193, simple_loss=0.2891, pruned_loss=0.04846, over 5553.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06793, over 1618121.58 frames. ], batch size: 12, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:28:49,903 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=135992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:28:56,445 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136000.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:04,534 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136012.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:29:12,172 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.23 vs. limit=5.0 +2023-02-06 20:29:16,447 INFO [train.py:901] (3/4) Epoch 17, batch 6700, loss[loss=0.2024, simple_loss=0.2829, pruned_loss=0.06097, over 7800.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2947, pruned_loss=0.06839, over 1613887.61 frames. ], batch size: 20, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:29:45,909 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2051, 1.3303, 1.6784, 1.2971, 0.6859, 1.4415, 1.2582, 1.0939], + device='cuda:3'), covar=tensor([0.0519, 0.1224, 0.1531, 0.1359, 0.0568, 0.1437, 0.0634, 0.0657], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0162, 0.0114, 0.0138], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 20:29:47,790 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.437e+02 3.090e+02 3.837e+02 8.578e+02, threshold=6.181e+02, percent-clipped=4.0 +2023-02-06 20:29:50,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5957, 2.0000, 3.3157, 1.4313, 2.3147, 2.0682, 1.6708, 2.4304], + device='cuda:3'), covar=tensor([0.1861, 0.2525, 0.0780, 0.4344, 0.1914, 0.3003, 0.2197, 0.2200], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0568, 0.0542, 0.0612, 0.0632, 0.0572, 0.0507, 0.0618], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:29:51,766 INFO [train.py:901] (3/4) Epoch 17, batch 6750, loss[loss=0.1974, simple_loss=0.2758, pruned_loss=0.05947, over 7802.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2944, pruned_loss=0.06822, over 1608039.88 frames. ], batch size: 20, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:09,234 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 20:30:11,742 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136107.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:26,348 INFO [train.py:901] (3/4) Epoch 17, batch 6800, loss[loss=0.2335, simple_loss=0.3089, pruned_loss=0.07906, over 8511.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.295, pruned_loss=0.06759, over 1612558.25 frames. ], batch size: 26, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:30:35,093 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 20:30:48,380 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136160.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:30:57,853 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.691e+02 3.204e+02 3.737e+02 8.793e+02, threshold=6.409e+02, percent-clipped=5.0 +2023-02-06 20:31:01,825 INFO [train.py:901] (3/4) Epoch 17, batch 6850, loss[loss=0.1745, simple_loss=0.2467, pruned_loss=0.05121, over 7436.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2956, pruned_loss=0.06803, over 1607542.84 frames. ], batch size: 17, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:31:22,727 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 20:31:37,000 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 20:31:37,305 INFO [train.py:901] (3/4) Epoch 17, batch 6900, loss[loss=0.1769, simple_loss=0.2732, pruned_loss=0.04024, over 8359.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2952, pruned_loss=0.06751, over 1610023.95 frames. ], batch size: 24, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:08,492 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.868e+02 2.541e+02 3.415e+02 4.318e+02 7.722e+02, threshold=6.831e+02, percent-clipped=4.0 +2023-02-06 20:32:12,496 INFO [train.py:901] (3/4) Epoch 17, batch 6950, loss[loss=0.2142, simple_loss=0.2872, pruned_loss=0.07058, over 7973.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2958, pruned_loss=0.06864, over 1606719.80 frames. ], batch size: 21, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:32,956 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 20:32:48,142 INFO [train.py:901] (3/4) Epoch 17, batch 7000, loss[loss=0.2341, simple_loss=0.3202, pruned_loss=0.07396, over 8198.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2952, pruned_loss=0.06793, over 1609991.69 frames. ], batch size: 23, lr: 4.43e-03, grad_scale: 8.0 +2023-02-06 20:32:54,378 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7176, 1.5892, 2.9021, 1.3893, 2.1545, 3.1031, 3.2111, 2.6794], + device='cuda:3'), covar=tensor([0.1104, 0.1428, 0.0361, 0.2038, 0.0885, 0.0291, 0.0551, 0.0612], + device='cuda:3'), in_proj_covar=tensor([0.0286, 0.0311, 0.0277, 0.0306, 0.0296, 0.0254, 0.0392, 0.0300], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 20:32:58,260 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136344.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:04,331 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136353.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:06,280 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136356.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:11,076 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136363.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:18,223 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.499e+02 2.956e+02 3.583e+02 7.307e+02, threshold=5.911e+02, percent-clipped=2.0 +2023-02-06 20:33:22,383 INFO [train.py:901] (3/4) Epoch 17, batch 7050, loss[loss=0.2354, simple_loss=0.2954, pruned_loss=0.08769, over 6658.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2952, pruned_loss=0.06811, over 1606905.48 frames. ], batch size: 71, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:33:29,399 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:33:41,802 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0751, 1.6419, 1.3577, 1.5827, 1.3255, 1.2200, 1.2486, 1.2977], + device='cuda:3'), covar=tensor([0.1001, 0.0436, 0.1168, 0.0518, 0.0695, 0.1412, 0.0871, 0.0728], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0229, 0.0322, 0.0297, 0.0293, 0.0327, 0.0338, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:33:57,954 INFO [train.py:901] (3/4) Epoch 17, batch 7100, loss[loss=0.2423, simple_loss=0.33, pruned_loss=0.07729, over 8331.00 frames. ], tot_loss[loss=0.2162, simple_loss=0.2961, pruned_loss=0.06814, over 1612254.20 frames. ], batch size: 26, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:18,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:26,517 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136471.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:34:27,626 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.503e+02 2.917e+02 3.905e+02 1.004e+03, threshold=5.834e+02, percent-clipped=4.0 +2023-02-06 20:34:31,744 INFO [train.py:901] (3/4) Epoch 17, batch 7150, loss[loss=0.2379, simple_loss=0.313, pruned_loss=0.08133, over 8601.00 frames. ], tot_loss[loss=0.218, simple_loss=0.2976, pruned_loss=0.06917, over 1616874.37 frames. ], batch size: 34, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:34:50,071 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:35:07,636 INFO [train.py:901] (3/4) Epoch 17, batch 7200, loss[loss=0.1931, simple_loss=0.2887, pruned_loss=0.04877, over 8188.00 frames. ], tot_loss[loss=0.2171, simple_loss=0.2965, pruned_loss=0.06891, over 1607314.32 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:35:09,680 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7393, 2.1044, 6.0518, 2.4896, 5.1437, 5.0479, 5.6279, 5.5959], + device='cuda:3'), covar=tensor([0.0870, 0.5658, 0.0649, 0.3975, 0.1710, 0.1254, 0.0701, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0570, 0.0615, 0.0648, 0.0586, 0.0667, 0.0565, 0.0567, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:35:37,959 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.498e+02 3.072e+02 3.698e+02 8.742e+02, threshold=6.145e+02, percent-clipped=2.0 +2023-02-06 20:35:42,145 INFO [train.py:901] (3/4) Epoch 17, batch 7250, loss[loss=0.2475, simple_loss=0.3209, pruned_loss=0.08709, over 8515.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2961, pruned_loss=0.06875, over 1610048.71 frames. ], batch size: 49, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:11,363 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136619.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:17,806 INFO [train.py:901] (3/4) Epoch 17, batch 7300, loss[loss=0.2026, simple_loss=0.2865, pruned_loss=0.05933, over 8198.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2949, pruned_loss=0.06809, over 1608265.43 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:36:40,629 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:36:42,658 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1499, 1.4233, 3.5065, 1.4176, 2.3037, 3.8393, 3.9423, 3.3164], + device='cuda:3'), covar=tensor([0.1070, 0.1981, 0.0354, 0.2394, 0.1149, 0.0234, 0.0393, 0.0567], + device='cuda:3'), in_proj_covar=tensor([0.0285, 0.0314, 0.0278, 0.0307, 0.0296, 0.0255, 0.0392, 0.0299], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 20:36:48,514 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.506e+02 2.969e+02 3.762e+02 7.100e+02, threshold=5.939e+02, percent-clipped=2.0 +2023-02-06 20:36:52,585 INFO [train.py:901] (3/4) Epoch 17, batch 7350, loss[loss=0.1927, simple_loss=0.2811, pruned_loss=0.05214, over 8473.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2943, pruned_loss=0.0671, over 1609132.30 frames. ], batch size: 27, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:05,621 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=136697.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:15,657 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 20:37:16,515 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 20:37:17,972 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:27,014 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136727.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:28,132 INFO [train.py:901] (3/4) Epoch 17, batch 7400, loss[loss=0.1944, simple_loss=0.2745, pruned_loss=0.05716, over 8037.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2944, pruned_loss=0.06758, over 1610594.77 frames. ], batch size: 22, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:37:35,014 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 20:37:36,608 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136740.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:45,396 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136752.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:37:59,292 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.787e+02 2.354e+02 2.898e+02 3.777e+02 7.037e+02, threshold=5.795e+02, percent-clipped=3.0 +2023-02-06 20:38:03,299 INFO [train.py:901] (3/4) Epoch 17, batch 7450, loss[loss=0.2173, simple_loss=0.3047, pruned_loss=0.06493, over 8105.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.0679, over 1610602.62 frames. ], batch size: 23, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:38:16,573 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 20:38:26,057 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=136812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:34,159 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=136824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:38:37,330 INFO [train.py:901] (3/4) Epoch 17, batch 7500, loss[loss=0.2639, simple_loss=0.3396, pruned_loss=0.09406, over 8023.00 frames. ], tot_loss[loss=0.2155, simple_loss=0.2954, pruned_loss=0.06776, over 1610368.66 frames. ], batch size: 22, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:09,459 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.462e+02 2.866e+02 3.948e+02 7.787e+02, threshold=5.732e+02, percent-clipped=6.0 +2023-02-06 20:39:11,061 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=136875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:13,444 INFO [train.py:901] (3/4) Epoch 17, batch 7550, loss[loss=0.2227, simple_loss=0.3093, pruned_loss=0.06803, over 8345.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2962, pruned_loss=0.06834, over 1610565.64 frames. ], batch size: 24, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:39:28,622 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=136900.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:39:49,027 INFO [train.py:901] (3/4) Epoch 17, batch 7600, loss[loss=0.1916, simple_loss=0.2595, pruned_loss=0.06185, over 7802.00 frames. ], tot_loss[loss=0.2168, simple_loss=0.2968, pruned_loss=0.06834, over 1617522.34 frames. ], batch size: 19, lr: 4.42e-03, grad_scale: 8.0 +2023-02-06 20:40:15,221 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6681, 2.0071, 3.3530, 1.4586, 2.4595, 2.0644, 1.6676, 2.3999], + device='cuda:3'), covar=tensor([0.1757, 0.2364, 0.0700, 0.4227, 0.1657, 0.2886, 0.2123, 0.2172], + device='cuda:3'), in_proj_covar=tensor([0.0509, 0.0571, 0.0545, 0.0614, 0.0635, 0.0576, 0.0509, 0.0619], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:40:21,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.516e+02 2.945e+02 3.717e+02 7.457e+02, threshold=5.891e+02, percent-clipped=6.0 +2023-02-06 20:40:22,736 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2608, 1.9705, 2.6926, 2.1662, 2.5828, 2.2735, 1.9641, 1.3063], + device='cuda:3'), covar=tensor([0.4773, 0.4601, 0.1549, 0.3122, 0.2128, 0.2528, 0.1692, 0.4509], + device='cuda:3'), in_proj_covar=tensor([0.0920, 0.0937, 0.0775, 0.0902, 0.0969, 0.0853, 0.0723, 0.0798], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:40:25,200 INFO [train.py:901] (3/4) Epoch 17, batch 7650, loss[loss=0.2162, simple_loss=0.2972, pruned_loss=0.0676, over 7659.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2943, pruned_loss=0.06685, over 1616656.88 frames. ], batch size: 19, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:40:38,676 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6656, 2.0539, 3.1985, 1.4227, 2.4051, 2.1356, 1.6966, 2.3841], + device='cuda:3'), covar=tensor([0.1738, 0.2316, 0.0758, 0.4280, 0.1650, 0.2893, 0.2108, 0.2034], + device='cuda:3'), in_proj_covar=tensor([0.0510, 0.0572, 0.0546, 0.0615, 0.0636, 0.0577, 0.0509, 0.0620], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:40:43,281 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:40:58,549 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 20:41:00,049 INFO [train.py:901] (3/4) Epoch 17, batch 7700, loss[loss=0.2195, simple_loss=0.2825, pruned_loss=0.07826, over 8090.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2943, pruned_loss=0.06746, over 1612966.80 frames. ], batch size: 21, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:26,497 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 20:41:26,688 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:41:30,557 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.598e+02 3.111e+02 3.900e+02 8.834e+02, threshold=6.222e+02, percent-clipped=1.0 +2023-02-06 20:41:34,727 INFO [train.py:901] (3/4) Epoch 17, batch 7750, loss[loss=0.2297, simple_loss=0.3055, pruned_loss=0.07695, over 8189.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2933, pruned_loss=0.06701, over 1615655.54 frames. ], batch size: 23, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:41:36,314 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6534, 1.5843, 2.1462, 1.5642, 1.1661, 2.1762, 0.5345, 1.3088], + device='cuda:3'), covar=tensor([0.1816, 0.1355, 0.0381, 0.1268, 0.3218, 0.0461, 0.2301, 0.1567], + device='cuda:3'), in_proj_covar=tensor([0.0176, 0.0183, 0.0116, 0.0216, 0.0263, 0.0123, 0.0166, 0.0182], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:41:44,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137093.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:03,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:09,491 INFO [train.py:901] (3/4) Epoch 17, batch 7800, loss[loss=0.1976, simple_loss=0.2702, pruned_loss=0.06253, over 7422.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2937, pruned_loss=0.06674, over 1618354.19 frames. ], batch size: 17, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:42:36,424 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=137168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:42:39,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.454e+02 2.768e+02 3.488e+02 7.043e+02, threshold=5.537e+02, percent-clipped=4.0 +2023-02-06 20:42:41,436 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-06 20:42:43,619 INFO [train.py:901] (3/4) Epoch 17, batch 7850, loss[loss=0.2305, simple_loss=0.313, pruned_loss=0.07403, over 8597.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.293, pruned_loss=0.06633, over 1616998.24 frames. ], batch size: 50, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:16,604 INFO [train.py:901] (3/4) Epoch 17, batch 7900, loss[loss=0.178, simple_loss=0.2684, pruned_loss=0.04379, over 7978.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2938, pruned_loss=0.06688, over 1616699.90 frames. ], batch size: 21, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:45,781 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.478e+02 3.005e+02 3.961e+02 6.905e+02, threshold=6.010e+02, percent-clipped=7.0 +2023-02-06 20:43:49,853 INFO [train.py:901] (3/4) Epoch 17, batch 7950, loss[loss=0.192, simple_loss=0.2676, pruned_loss=0.05822, over 6757.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2931, pruned_loss=0.0666, over 1611583.83 frames. ], batch size: 15, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:43:52,843 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=137283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:23,211 INFO [train.py:901] (3/4) Epoch 17, batch 8000, loss[loss=0.1963, simple_loss=0.2717, pruned_loss=0.06044, over 7707.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2942, pruned_loss=0.06713, over 1614087.75 frames. ], batch size: 18, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:44:52,977 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.583e+02 3.026e+02 3.684e+02 1.341e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-06 20:44:55,368 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:44:57,246 INFO [train.py:901] (3/4) Epoch 17, batch 8050, loss[loss=0.1723, simple_loss=0.2534, pruned_loss=0.04558, over 7558.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2937, pruned_loss=0.06729, over 1603334.35 frames. ], batch size: 18, lr: 4.41e-03, grad_scale: 16.0 +2023-02-06 20:45:12,517 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:45:29,328 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 20:45:34,948 INFO [train.py:901] (3/4) Epoch 18, batch 0, loss[loss=0.2158, simple_loss=0.3054, pruned_loss=0.0631, over 8492.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.3054, pruned_loss=0.0631, over 8492.00 frames. ], batch size: 26, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:45:34,948 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 20:45:46,125 INFO [train.py:935] (3/4) Epoch 18, validation: loss=0.1783, simple_loss=0.2784, pruned_loss=0.03907, over 944034.00 frames. +2023-02-06 20:45:46,126 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 20:46:00,859 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 20:46:20,802 INFO [train.py:901] (3/4) Epoch 18, batch 50, loss[loss=0.1835, simple_loss=0.2735, pruned_loss=0.04675, over 8137.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2956, pruned_loss=0.06793, over 361913.60 frames. ], batch size: 22, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:29,004 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 2.698e+02 3.585e+02 4.414e+02 8.769e+02, threshold=7.169e+02, percent-clipped=9.0 +2023-02-06 20:46:35,902 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 20:46:56,059 INFO [train.py:901] (3/4) Epoch 18, batch 100, loss[loss=0.2028, simple_loss=0.2849, pruned_loss=0.06037, over 7807.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2992, pruned_loss=0.06878, over 641096.18 frames. ], batch size: 19, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:46:58,818 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 20:47:16,436 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=137539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:31,280 INFO [train.py:901] (3/4) Epoch 18, batch 150, loss[loss=0.2022, simple_loss=0.2824, pruned_loss=0.06097, over 8506.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.297, pruned_loss=0.06728, over 858558.07 frames. ], batch size: 26, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:47:33,505 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=137564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:47:39,699 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.369e+02 2.797e+02 3.885e+02 6.122e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-06 20:47:49,999 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-06 20:47:53,234 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9776, 1.7295, 2.1315, 1.9315, 2.0739, 1.9802, 1.7779, 0.7718], + device='cuda:3'), covar=tensor([0.5041, 0.4237, 0.1728, 0.2831, 0.1984, 0.2668, 0.1825, 0.4393], + device='cuda:3'), in_proj_covar=tensor([0.0922, 0.0939, 0.0773, 0.0909, 0.0973, 0.0855, 0.0725, 0.0802], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 20:48:07,693 INFO [train.py:901] (3/4) Epoch 18, batch 200, loss[loss=0.225, simple_loss=0.3113, pruned_loss=0.06936, over 8640.00 frames. ], tot_loss[loss=0.2158, simple_loss=0.2968, pruned_loss=0.06738, over 1028115.37 frames. ], batch size: 34, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:44,082 INFO [train.py:901] (3/4) Epoch 18, batch 250, loss[loss=0.243, simple_loss=0.3131, pruned_loss=0.08649, over 8365.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2982, pruned_loss=0.06856, over 1161983.04 frames. ], batch size: 24, lr: 4.28e-03, grad_scale: 16.0 +2023-02-06 20:48:52,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.467e+02 3.008e+02 3.586e+02 6.135e+02, threshold=6.015e+02, percent-clipped=1.0 +2023-02-06 20:48:55,946 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 20:49:03,701 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 20:49:19,875 INFO [train.py:901] (3/4) Epoch 18, batch 300, loss[loss=0.2411, simple_loss=0.3235, pruned_loss=0.07937, over 8506.00 frames. ], tot_loss[loss=0.2163, simple_loss=0.2972, pruned_loss=0.06772, over 1265609.94 frames. ], batch size: 26, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:49:44,086 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-06 20:49:55,786 INFO [train.py:901] (3/4) Epoch 18, batch 350, loss[loss=0.1966, simple_loss=0.282, pruned_loss=0.05559, over 8287.00 frames. ], tot_loss[loss=0.2167, simple_loss=0.2978, pruned_loss=0.06775, over 1349319.51 frames. ], batch size: 23, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:05,695 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.555e+02 3.034e+02 3.752e+02 7.695e+02, threshold=6.069e+02, percent-clipped=3.0 +2023-02-06 20:50:32,304 INFO [train.py:901] (3/4) Epoch 18, batch 400, loss[loss=0.2552, simple_loss=0.3346, pruned_loss=0.08789, over 8178.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.2976, pruned_loss=0.06764, over 1413554.84 frames. ], batch size: 23, lr: 4.28e-03, grad_scale: 8.0 +2023-02-06 20:50:40,683 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2947, 4.3901, 3.9359, 2.1268, 3.8163, 3.8926, 3.9472, 3.7090], + device='cuda:3'), covar=tensor([0.0917, 0.0619, 0.1274, 0.5113, 0.1026, 0.1284, 0.1504, 0.0913], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0418, 0.0422, 0.0523, 0.0413, 0.0421, 0.0410, 0.0366], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:50:46,323 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6265, 1.6176, 2.0528, 1.4473, 1.1033, 2.0925, 0.3403, 1.2086], + device='cuda:3'), covar=tensor([0.1877, 0.1384, 0.0411, 0.1330, 0.3310, 0.0486, 0.2349, 0.1527], + device='cuda:3'), in_proj_covar=tensor([0.0178, 0.0184, 0.0118, 0.0219, 0.0263, 0.0124, 0.0166, 0.0182], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:51:08,172 INFO [train.py:901] (3/4) Epoch 18, batch 450, loss[loss=0.2269, simple_loss=0.3078, pruned_loss=0.07295, over 8132.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2977, pruned_loss=0.06777, over 1461251.89 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:16,924 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.504e+02 3.016e+02 3.557e+02 6.367e+02, threshold=6.032e+02, percent-clipped=3.0 +2023-02-06 20:51:43,038 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137910.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:51:43,595 INFO [train.py:901] (3/4) Epoch 18, batch 500, loss[loss=0.2517, simple_loss=0.3185, pruned_loss=0.0925, over 7113.00 frames. ], tot_loss[loss=0.2173, simple_loss=0.2977, pruned_loss=0.06844, over 1495639.09 frames. ], batch size: 72, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:51:45,197 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=137913.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:52:05,361 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5739, 2.7911, 1.8344, 2.4021, 2.1802, 1.6903, 2.1387, 2.3009], + device='cuda:3'), covar=tensor([0.1454, 0.0326, 0.1133, 0.0626, 0.0727, 0.1381, 0.1025, 0.0953], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0234, 0.0326, 0.0300, 0.0297, 0.0330, 0.0342, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 20:52:20,558 INFO [train.py:901] (3/4) Epoch 18, batch 550, loss[loss=0.2012, simple_loss=0.2688, pruned_loss=0.06682, over 7559.00 frames. ], tot_loss[loss=0.2165, simple_loss=0.297, pruned_loss=0.06799, over 1522339.44 frames. ], batch size: 18, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:52:29,490 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.606e+02 3.197e+02 3.974e+02 7.545e+02, threshold=6.394e+02, percent-clipped=3.0 +2023-02-06 20:52:56,990 INFO [train.py:901] (3/4) Epoch 18, batch 600, loss[loss=0.2396, simple_loss=0.313, pruned_loss=0.08315, over 8476.00 frames. ], tot_loss[loss=0.2156, simple_loss=0.2964, pruned_loss=0.06741, over 1549478.57 frames. ], batch size: 25, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:09,287 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9406, 2.1874, 1.9236, 2.5721, 1.3324, 1.5756, 1.9051, 2.1786], + device='cuda:3'), covar=tensor([0.0685, 0.0683, 0.0881, 0.0381, 0.1012, 0.1252, 0.0817, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0199, 0.0251, 0.0212, 0.0208, 0.0249, 0.0254, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 20:53:11,892 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 20:53:32,788 INFO [train.py:901] (3/4) Epoch 18, batch 650, loss[loss=0.2502, simple_loss=0.3306, pruned_loss=0.08483, over 8500.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2959, pruned_loss=0.06717, over 1565460.34 frames. ], batch size: 26, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:53:43,406 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.461e+02 2.865e+02 3.365e+02 7.739e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-06 20:54:00,540 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 20:54:09,454 INFO [train.py:901] (3/4) Epoch 18, batch 700, loss[loss=0.2081, simple_loss=0.2895, pruned_loss=0.06332, over 8345.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2955, pruned_loss=0.06714, over 1581449.69 frames. ], batch size: 26, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:29,539 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 20:54:30,790 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6389, 1.5037, 1.9995, 1.4039, 1.1818, 2.1049, 0.2763, 1.2455], + device='cuda:3'), covar=tensor([0.1958, 0.1695, 0.0487, 0.1498, 0.3385, 0.0459, 0.2684, 0.1778], + device='cuda:3'), in_proj_covar=tensor([0.0178, 0.0184, 0.0117, 0.0217, 0.0262, 0.0123, 0.0165, 0.0182], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 20:54:44,128 INFO [train.py:901] (3/4) Epoch 18, batch 750, loss[loss=0.228, simple_loss=0.2956, pruned_loss=0.08018, over 6860.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.2955, pruned_loss=0.06744, over 1589642.44 frames. ], batch size: 72, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:54:53,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.332e+02 3.041e+02 3.730e+02 6.216e+02, threshold=6.081e+02, percent-clipped=3.0 +2023-02-06 20:54:58,184 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 20:55:08,084 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 20:55:19,814 INFO [train.py:901] (3/4) Epoch 18, batch 800, loss[loss=0.2011, simple_loss=0.2997, pruned_loss=0.05127, over 8192.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.295, pruned_loss=0.06663, over 1594952.88 frames. ], batch size: 23, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:55:49,563 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138254.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:51,620 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138257.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:55:54,224 INFO [train.py:901] (3/4) Epoch 18, batch 850, loss[loss=0.207, simple_loss=0.2753, pruned_loss=0.06941, over 7420.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2957, pruned_loss=0.06642, over 1602594.87 frames. ], batch size: 17, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:03,045 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.796e+02 2.308e+02 2.906e+02 3.562e+02 8.427e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 20:56:13,563 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138288.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:56:30,836 INFO [train.py:901] (3/4) Epoch 18, batch 900, loss[loss=0.2328, simple_loss=0.3045, pruned_loss=0.08056, over 8027.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2947, pruned_loss=0.06643, over 1600993.87 frames. ], batch size: 22, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:56:35,147 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6134, 1.3773, 1.6638, 1.3281, 0.8626, 1.3857, 1.3964, 1.2684], + device='cuda:3'), covar=tensor([0.0545, 0.1247, 0.1653, 0.1443, 0.0586, 0.1510, 0.0743, 0.0672], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 20:56:50,631 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:05,377 INFO [train.py:901] (3/4) Epoch 18, batch 950, loss[loss=0.1848, simple_loss=0.2633, pruned_loss=0.05318, over 7440.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2946, pruned_loss=0.06656, over 1602980.12 frames. ], batch size: 17, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:10,976 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138369.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:13,023 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138372.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 20:57:14,179 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.531e+02 3.020e+02 3.937e+02 8.991e+02, threshold=6.039e+02, percent-clipped=7.0 +2023-02-06 20:57:14,413 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3434, 2.4702, 2.3584, 3.9205, 1.6390, 2.1254, 2.4571, 3.1930], + device='cuda:3'), covar=tensor([0.0766, 0.0981, 0.0864, 0.0247, 0.1157, 0.1225, 0.1071, 0.0608], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0200, 0.0251, 0.0212, 0.0207, 0.0249, 0.0253, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 20:57:29,249 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 20:57:40,379 INFO [train.py:901] (3/4) Epoch 18, batch 1000, loss[loss=0.2038, simple_loss=0.276, pruned_loss=0.06582, over 7212.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2942, pruned_loss=0.06623, over 1609995.43 frames. ], batch size: 16, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:57:56,233 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6051, 2.0240, 3.3401, 1.3371, 2.4253, 1.9443, 1.6931, 2.4827], + device='cuda:3'), covar=tensor([0.1895, 0.2566, 0.0938, 0.4490, 0.1854, 0.3162, 0.2126, 0.2265], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0574, 0.0544, 0.0618, 0.0636, 0.0577, 0.0510, 0.0624], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:58:05,480 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 20:58:16,697 INFO [train.py:901] (3/4) Epoch 18, batch 1050, loss[loss=0.1897, simple_loss=0.2703, pruned_loss=0.05453, over 8081.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2936, pruned_loss=0.06632, over 1603052.18 frames. ], batch size: 21, lr: 4.27e-03, grad_scale: 8.0 +2023-02-06 20:58:18,832 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 20:58:25,546 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.454e+02 3.228e+02 4.133e+02 8.765e+02, threshold=6.456e+02, percent-clipped=4.0 +2023-02-06 20:58:51,055 INFO [train.py:901] (3/4) Epoch 18, batch 1100, loss[loss=0.2511, simple_loss=0.3337, pruned_loss=0.08426, over 8587.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2942, pruned_loss=0.0669, over 1604564.23 frames. ], batch size: 31, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:58:56,219 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7870, 5.8938, 5.1480, 2.4869, 5.1133, 5.5669, 5.4438, 5.3661], + device='cuda:3'), covar=tensor([0.0483, 0.0392, 0.0856, 0.4209, 0.0677, 0.0777, 0.1080, 0.0557], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0419, 0.0419, 0.0517, 0.0409, 0.0422, 0.0407, 0.0365], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 20:59:26,908 INFO [train.py:901] (3/4) Epoch 18, batch 1150, loss[loss=0.1956, simple_loss=0.2835, pruned_loss=0.05382, over 8491.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06721, over 1604550.44 frames. ], batch size: 26, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 20:59:29,623 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 20:59:35,879 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 2.366e+02 2.909e+02 3.553e+02 5.350e+02, threshold=5.817e+02, percent-clipped=0.0 +2023-02-06 20:59:44,962 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5679, 1.9722, 2.9823, 1.3080, 2.1473, 1.8179, 1.7064, 2.0877], + device='cuda:3'), covar=tensor([0.2134, 0.2598, 0.0907, 0.4988, 0.2059, 0.3711, 0.2488, 0.2572], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0577, 0.0550, 0.0625, 0.0642, 0.0583, 0.0515, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:00:02,035 INFO [train.py:901] (3/4) Epoch 18, batch 1200, loss[loss=0.2239, simple_loss=0.3033, pruned_loss=0.07219, over 8492.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2947, pruned_loss=0.06717, over 1609689.24 frames. ], batch size: 28, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:04,276 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9849, 1.7520, 6.1094, 2.3064, 5.6025, 5.1944, 5.6526, 5.6008], + device='cuda:3'), covar=tensor([0.0407, 0.4113, 0.0274, 0.3323, 0.0775, 0.0742, 0.0454, 0.0426], + device='cuda:3'), in_proj_covar=tensor([0.0582, 0.0618, 0.0662, 0.0591, 0.0674, 0.0577, 0.0569, 0.0642], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:00:11,865 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138625.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:13,950 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=138628.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:16,606 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138632.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:29,705 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:31,812 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=138653.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:00:37,162 INFO [train.py:901] (3/4) Epoch 18, batch 1250, loss[loss=0.1918, simple_loss=0.2796, pruned_loss=0.05205, over 8031.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.295, pruned_loss=0.06712, over 1613538.39 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:00:46,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5122, 4.5686, 4.1135, 1.9228, 4.0357, 4.1232, 4.1930, 3.9734], + device='cuda:3'), covar=tensor([0.0726, 0.0511, 0.0944, 0.4779, 0.0789, 0.0777, 0.1137, 0.0689], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0420, 0.0421, 0.0521, 0.0412, 0.0424, 0.0409, 0.0367], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:00:47,269 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.310e+02 2.834e+02 3.613e+02 5.274e+02, threshold=5.668e+02, percent-clipped=0.0 +2023-02-06 21:00:54,247 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=138684.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:04,840 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138699.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:08,295 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138704.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:12,860 INFO [train.py:901] (3/4) Epoch 18, batch 1300, loss[loss=0.2302, simple_loss=0.3125, pruned_loss=0.07395, over 8243.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.295, pruned_loss=0.06698, over 1615570.88 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:15,853 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:34,957 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4120, 1.6681, 2.7226, 1.2165, 1.9353, 1.7962, 1.3695, 1.8572], + device='cuda:3'), covar=tensor([0.1887, 0.2362, 0.0731, 0.4470, 0.1796, 0.3151, 0.2324, 0.2135], + device='cuda:3'), in_proj_covar=tensor([0.0514, 0.0577, 0.0549, 0.0624, 0.0643, 0.0583, 0.0513, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:01:37,544 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138747.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:01:38,938 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9351, 2.0789, 1.8108, 2.6196, 1.1733, 1.5314, 1.7936, 2.1198], + device='cuda:3'), covar=tensor([0.0757, 0.0741, 0.0949, 0.0361, 0.1098, 0.1350, 0.0882, 0.0738], + device='cuda:3'), in_proj_covar=tensor([0.0236, 0.0202, 0.0254, 0.0214, 0.0210, 0.0251, 0.0256, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:01:47,850 INFO [train.py:901] (3/4) Epoch 18, batch 1350, loss[loss=0.1826, simple_loss=0.2669, pruned_loss=0.04914, over 8140.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2948, pruned_loss=0.06687, over 1618504.73 frames. ], batch size: 22, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:01:56,594 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.404e+02 2.906e+02 3.545e+02 6.613e+02, threshold=5.812e+02, percent-clipped=4.0 +2023-02-06 21:02:15,443 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=138799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:02:23,336 INFO [train.py:901] (3/4) Epoch 18, batch 1400, loss[loss=0.1738, simple_loss=0.2511, pruned_loss=0.04826, over 7444.00 frames. ], tot_loss[loss=0.2157, simple_loss=0.2963, pruned_loss=0.06758, over 1622924.09 frames. ], batch size: 17, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:02:43,933 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.26 vs. limit=5.0 +2023-02-06 21:02:57,621 INFO [train.py:901] (3/4) Epoch 18, batch 1450, loss[loss=0.1927, simple_loss=0.2612, pruned_loss=0.06208, over 7685.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2951, pruned_loss=0.06684, over 1622276.55 frames. ], batch size: 18, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:06,392 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.331e+02 2.491e+02 3.050e+02 4.246e+02 7.467e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-06 21:03:07,079 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 21:03:33,392 INFO [train.py:901] (3/4) Epoch 18, batch 1500, loss[loss=0.2233, simple_loss=0.2885, pruned_loss=0.07909, over 7248.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2943, pruned_loss=0.06645, over 1619186.19 frames. ], batch size: 16, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:03:44,791 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=138927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:08,397 INFO [train.py:901] (3/4) Epoch 18, batch 1550, loss[loss=0.2165, simple_loss=0.2993, pruned_loss=0.06685, over 6694.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2941, pruned_loss=0.06654, over 1616480.81 frames. ], batch size: 71, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:17,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.366e+02 2.933e+02 3.736e+02 6.367e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-06 21:04:38,197 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139003.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:04:43,436 INFO [train.py:901] (3/4) Epoch 18, batch 1600, loss[loss=0.2136, simple_loss=0.2906, pruned_loss=0.06835, over 8471.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2935, pruned_loss=0.06664, over 1615047.16 frames. ], batch size: 27, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:04:54,110 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4013, 1.6978, 4.6063, 1.8080, 4.1115, 3.8583, 4.1632, 4.0426], + device='cuda:3'), covar=tensor([0.0519, 0.3806, 0.0391, 0.3588, 0.0928, 0.0825, 0.0493, 0.0576], + device='cuda:3'), in_proj_covar=tensor([0.0587, 0.0623, 0.0665, 0.0593, 0.0679, 0.0582, 0.0573, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:04:56,950 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139028.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:07,017 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:10,368 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:15,387 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139055.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:18,032 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:19,996 INFO [train.py:901] (3/4) Epoch 18, batch 1650, loss[loss=0.2197, simple_loss=0.304, pruned_loss=0.06766, over 8106.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2941, pruned_loss=0.06709, over 1613933.07 frames. ], batch size: 23, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:05:28,835 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.375e+02 2.907e+02 3.508e+02 7.626e+02, threshold=5.813e+02, percent-clipped=3.0 +2023-02-06 21:05:33,246 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139080.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:05:37,215 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0476, 1.6291, 1.4347, 1.5850, 1.3303, 1.2500, 1.1932, 1.2435], + device='cuda:3'), covar=tensor([0.1094, 0.0457, 0.1234, 0.0532, 0.0745, 0.1427, 0.0982, 0.0878], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0236, 0.0328, 0.0302, 0.0298, 0.0331, 0.0343, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:05:54,418 INFO [train.py:901] (3/4) Epoch 18, batch 1700, loss[loss=0.2467, simple_loss=0.3104, pruned_loss=0.09144, over 8353.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2939, pruned_loss=0.06693, over 1616798.15 frames. ], batch size: 24, lr: 4.26e-03, grad_scale: 8.0 +2023-02-06 21:06:28,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139158.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:30,617 INFO [train.py:901] (3/4) Epoch 18, batch 1750, loss[loss=0.212, simple_loss=0.2921, pruned_loss=0.06593, over 8258.00 frames. ], tot_loss[loss=0.214, simple_loss=0.294, pruned_loss=0.06704, over 1620804.60 frames. ], batch size: 22, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:06:32,195 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:06:39,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.310e+02 2.913e+02 3.912e+02 7.750e+02, threshold=5.826e+02, percent-clipped=6.0 +2023-02-06 21:06:39,808 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:05,725 INFO [train.py:901] (3/4) Epoch 18, batch 1800, loss[loss=0.215, simple_loss=0.2827, pruned_loss=0.07367, over 8084.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2932, pruned_loss=0.06636, over 1616637.89 frames. ], batch size: 21, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:37,630 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:40,842 INFO [train.py:901] (3/4) Epoch 18, batch 1850, loss[loss=0.2437, simple_loss=0.322, pruned_loss=0.08264, over 7466.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2928, pruned_loss=0.06588, over 1614430.04 frames. ], batch size: 71, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:07:47,457 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8560, 1.6598, 5.9678, 2.1389, 5.3316, 5.0205, 5.5330, 5.4022], + device='cuda:3'), covar=tensor([0.0481, 0.4743, 0.0357, 0.3916, 0.1137, 0.0899, 0.0490, 0.0531], + device='cuda:3'), in_proj_covar=tensor([0.0585, 0.0623, 0.0664, 0.0592, 0.0676, 0.0581, 0.0573, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:07:49,485 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139271.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:07:51,406 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.274e+02 2.776e+02 3.369e+02 8.658e+02, threshold=5.552e+02, percent-clipped=2.0 +2023-02-06 21:07:53,731 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9829, 3.6081, 2.2036, 2.8818, 2.7377, 1.9633, 2.8070, 2.9874], + device='cuda:3'), covar=tensor([0.1686, 0.0446, 0.1252, 0.0849, 0.0764, 0.1486, 0.1161, 0.1274], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0234, 0.0324, 0.0299, 0.0297, 0.0329, 0.0340, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:07:58,399 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3327, 2.0219, 2.7147, 2.2286, 2.7254, 2.2639, 2.0074, 1.5580], + device='cuda:3'), covar=tensor([0.4753, 0.4583, 0.1662, 0.3605, 0.2235, 0.2593, 0.1820, 0.4734], + device='cuda:3'), in_proj_covar=tensor([0.0919, 0.0938, 0.0777, 0.0909, 0.0976, 0.0856, 0.0726, 0.0802], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:08:05,262 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139294.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:14,583 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139307.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:08:17,142 INFO [train.py:901] (3/4) Epoch 18, batch 1900, loss[loss=0.2071, simple_loss=0.2823, pruned_loss=0.06599, over 8072.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2935, pruned_loss=0.06643, over 1613442.84 frames. ], batch size: 21, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:52,413 INFO [train.py:901] (3/4) Epoch 18, batch 1950, loss[loss=0.2405, simple_loss=0.3209, pruned_loss=0.08002, over 8517.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2926, pruned_loss=0.06611, over 1613621.89 frames. ], batch size: 26, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:08:55,252 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 21:09:01,254 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.421e+02 2.964e+02 3.877e+02 7.962e+02, threshold=5.927e+02, percent-clipped=5.0 +2023-02-06 21:09:08,117 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 21:09:11,192 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139386.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:28,244 INFO [train.py:901] (3/4) Epoch 18, batch 2000, loss[loss=0.2436, simple_loss=0.3302, pruned_loss=0.07848, over 8592.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2931, pruned_loss=0.06665, over 1610072.26 frames. ], batch size: 31, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:09:28,249 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 21:09:30,557 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139414.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:33,906 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139419.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:42,134 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139430.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:48,228 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:51,662 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139444.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:09:54,542 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-06 21:09:59,144 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139455.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:10:02,980 INFO [train.py:901] (3/4) Epoch 18, batch 2050, loss[loss=0.252, simple_loss=0.3192, pruned_loss=0.09236, over 8593.00 frames. ], tot_loss[loss=0.215, simple_loss=0.2947, pruned_loss=0.06766, over 1613512.07 frames. ], batch size: 39, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:12,679 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.679e+02 2.515e+02 3.080e+02 3.592e+02 7.733e+02, threshold=6.160e+02, percent-clipped=3.0 +2023-02-06 21:10:39,816 INFO [train.py:901] (3/4) Epoch 18, batch 2100, loss[loss=0.2651, simple_loss=0.3345, pruned_loss=0.09781, over 8090.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2933, pruned_loss=0.06713, over 1609337.38 frames. ], batch size: 21, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:10:46,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1932, 1.2317, 1.5255, 1.2245, 0.7033, 1.2822, 1.2344, 1.0630], + device='cuda:3'), covar=tensor([0.0603, 0.1316, 0.1664, 0.1476, 0.0568, 0.1579, 0.0709, 0.0698], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 21:10:53,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3801, 1.6075, 1.6556, 1.0063, 1.7128, 1.2809, 0.2647, 1.5535], + device='cuda:3'), covar=tensor([0.0448, 0.0290, 0.0269, 0.0445, 0.0374, 0.0840, 0.0778, 0.0252], + device='cuda:3'), in_proj_covar=tensor([0.0437, 0.0377, 0.0325, 0.0432, 0.0365, 0.0522, 0.0384, 0.0405], + device='cuda:3'), out_proj_covar=tensor([1.1910e-04, 9.9955e-05, 8.6009e-05, 1.1499e-04, 9.7570e-05, 1.4966e-04, + 1.0457e-04, 1.0862e-04], device='cuda:3') +2023-02-06 21:10:58,025 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0152, 2.3799, 3.5288, 2.0819, 1.7838, 3.5234, 0.5530, 2.1821], + device='cuda:3'), covar=tensor([0.1393, 0.1413, 0.0274, 0.2010, 0.3163, 0.0431, 0.2752, 0.1617], + device='cuda:3'), in_proj_covar=tensor([0.0179, 0.0188, 0.0118, 0.0216, 0.0264, 0.0126, 0.0165, 0.0184], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 21:11:01,675 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 21:11:15,316 INFO [train.py:901] (3/4) Epoch 18, batch 2150, loss[loss=0.2117, simple_loss=0.3128, pruned_loss=0.05535, over 8352.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2948, pruned_loss=0.06801, over 1612146.68 frames. ], batch size: 24, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:11:24,962 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.487e+02 3.024e+02 3.808e+02 9.008e+02, threshold=6.048e+02, percent-clipped=4.0 +2023-02-06 21:11:33,584 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-06 21:11:34,710 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139589.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,091 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:43,161 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:11:50,319 INFO [train.py:901] (3/4) Epoch 18, batch 2200, loss[loss=0.1906, simple_loss=0.2738, pruned_loss=0.0537, over 7700.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2938, pruned_loss=0.0674, over 1609357.88 frames. ], batch size: 18, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:10,497 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139638.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:13,213 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139642.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:19,363 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139651.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:22,903 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1327, 2.5972, 3.0222, 1.4251, 3.1791, 1.8064, 1.5009, 2.0301], + device='cuda:3'), covar=tensor([0.0741, 0.0344, 0.0223, 0.0741, 0.0317, 0.0782, 0.0855, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0428, 0.0369, 0.0320, 0.0424, 0.0357, 0.0512, 0.0377, 0.0398], + device='cuda:3'), out_proj_covar=tensor([1.1676e-04, 9.7882e-05, 8.4682e-05, 1.1285e-04, 9.5403e-05, 1.4668e-04, + 1.0260e-04, 1.0672e-04], device='cuda:3') +2023-02-06 21:12:26,632 INFO [train.py:901] (3/4) Epoch 18, batch 2250, loss[loss=0.2489, simple_loss=0.3259, pruned_loss=0.08596, over 8452.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.293, pruned_loss=0.06673, over 1612440.19 frames. ], batch size: 27, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:12:31,134 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139667.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:12:36,169 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.519e+02 3.270e+02 4.475e+02 8.912e+02, threshold=6.540e+02, percent-clipped=11.0 +2023-02-06 21:12:53,999 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2411, 1.6664, 4.4257, 1.9752, 3.9950, 3.6750, 4.0088, 3.8841], + device='cuda:3'), covar=tensor([0.0556, 0.4083, 0.0553, 0.3575, 0.0959, 0.0967, 0.0540, 0.0608], + device='cuda:3'), in_proj_covar=tensor([0.0585, 0.0621, 0.0667, 0.0595, 0.0675, 0.0580, 0.0574, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:13:01,644 INFO [train.py:901] (3/4) Epoch 18, batch 2300, loss[loss=0.2409, simple_loss=0.3127, pruned_loss=0.08459, over 6676.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.294, pruned_loss=0.06711, over 1615651.40 frames. ], batch size: 72, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:04,669 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139715.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:05,410 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5931, 2.0265, 2.1908, 1.4002, 2.2786, 1.4640, 0.6298, 1.8135], + device='cuda:3'), covar=tensor([0.0548, 0.0281, 0.0196, 0.0465, 0.0313, 0.0823, 0.0720, 0.0271], + device='cuda:3'), in_proj_covar=tensor([0.0431, 0.0371, 0.0322, 0.0427, 0.0359, 0.0517, 0.0380, 0.0401], + device='cuda:3'), out_proj_covar=tensor([1.1755e-04, 9.8298e-05, 8.5310e-05, 1.1363e-04, 9.5936e-05, 1.4809e-04, + 1.0338e-04, 1.0728e-04], device='cuda:3') +2023-02-06 21:13:32,006 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139753.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:37,207 INFO [train.py:901] (3/4) Epoch 18, batch 2350, loss[loss=0.209, simple_loss=0.3017, pruned_loss=0.05818, over 8327.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2948, pruned_loss=0.06746, over 1617550.88 frames. ], batch size: 25, lr: 4.25e-03, grad_scale: 8.0 +2023-02-06 21:13:40,621 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=139766.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:13:47,224 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.439e+02 2.945e+02 3.859e+02 6.515e+02, threshold=5.891e+02, percent-clipped=0.0 +2023-02-06 21:13:55,509 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:09,008 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:11,597 INFO [train.py:901] (3/4) Epoch 18, batch 2400, loss[loss=0.1913, simple_loss=0.2746, pruned_loss=0.05397, over 8247.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2945, pruned_loss=0.06767, over 1617829.14 frames. ], batch size: 22, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:20,253 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139822.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:21,638 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:37,679 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5603, 2.6682, 1.8719, 2.3490, 2.3171, 1.6105, 2.3208, 2.3756], + device='cuda:3'), covar=tensor([0.1453, 0.0398, 0.1236, 0.0711, 0.0746, 0.1507, 0.0947, 0.1003], + device='cuda:3'), in_proj_covar=tensor([0.0346, 0.0233, 0.0322, 0.0299, 0.0294, 0.0328, 0.0337, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:14:44,092 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2091, 2.5252, 2.9150, 1.6505, 2.9760, 1.7359, 1.5179, 2.0835], + device='cuda:3'), covar=tensor([0.0688, 0.0326, 0.0228, 0.0655, 0.0365, 0.0845, 0.0835, 0.0543], + device='cuda:3'), in_proj_covar=tensor([0.0432, 0.0372, 0.0322, 0.0427, 0.0358, 0.0518, 0.0379, 0.0401], + device='cuda:3'), out_proj_covar=tensor([1.1782e-04, 9.8596e-05, 8.5355e-05, 1.1361e-04, 9.5606e-05, 1.4841e-04, + 1.0307e-04, 1.0736e-04], device='cuda:3') +2023-02-06 21:14:48,492 INFO [train.py:901] (3/4) Epoch 18, batch 2450, loss[loss=0.1911, simple_loss=0.2713, pruned_loss=0.05541, over 7642.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2947, pruned_loss=0.06757, over 1618535.46 frames. ], batch size: 19, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:14:53,495 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=139868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:14:58,179 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.793e+02 2.359e+02 2.854e+02 3.442e+02 8.627e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-06 21:15:00,747 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-06 21:15:23,626 INFO [train.py:901] (3/4) Epoch 18, batch 2500, loss[loss=0.2435, simple_loss=0.3157, pruned_loss=0.08564, over 6846.00 frames. ], tot_loss[loss=0.2152, simple_loss=0.295, pruned_loss=0.06772, over 1615627.25 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:15:39,668 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139933.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:15:47,236 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=139944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:00,232 INFO [train.py:901] (3/4) Epoch 18, batch 2550, loss[loss=0.2317, simple_loss=0.3079, pruned_loss=0.07777, over 8087.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2939, pruned_loss=0.06725, over 1618967.82 frames. ], batch size: 21, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:07,334 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=139971.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:09,801 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.329e+02 2.906e+02 3.594e+02 7.294e+02, threshold=5.811e+02, percent-clipped=3.0 +2023-02-06 21:16:25,070 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=139996.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:35,499 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140009.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:36,613 INFO [train.py:901] (3/4) Epoch 18, batch 2600, loss[loss=0.2226, simple_loss=0.2895, pruned_loss=0.07783, over 7534.00 frames. ], tot_loss[loss=0.2149, simple_loss=0.2943, pruned_loss=0.06775, over 1616782.06 frames. ], batch size: 18, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:16:39,140 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-06 21:16:44,583 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140022.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:16:52,715 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140034.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:01,557 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140047.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:03,005 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140048.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:07,127 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5774, 2.5952, 1.8959, 2.2587, 2.1330, 1.5765, 2.1624, 2.1786], + device='cuda:3'), covar=tensor([0.1423, 0.0334, 0.1102, 0.0588, 0.0726, 0.1417, 0.0894, 0.0956], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0234, 0.0323, 0.0300, 0.0296, 0.0329, 0.0339, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:17:10,539 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:17:11,785 INFO [train.py:901] (3/4) Epoch 18, batch 2650, loss[loss=0.2191, simple_loss=0.3108, pruned_loss=0.06373, over 8542.00 frames. ], tot_loss[loss=0.2151, simple_loss=0.2949, pruned_loss=0.06767, over 1619492.71 frames. ], batch size: 49, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:22,349 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.571e+02 2.973e+02 3.666e+02 6.732e+02, threshold=5.945e+02, percent-clipped=3.0 +2023-02-06 21:17:47,905 INFO [train.py:901] (3/4) Epoch 18, batch 2700, loss[loss=0.2172, simple_loss=0.2825, pruned_loss=0.07595, over 7812.00 frames. ], tot_loss[loss=0.2153, simple_loss=0.2949, pruned_loss=0.06784, over 1616934.02 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:17:55,845 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140121.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:00,544 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:02,536 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140131.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:16,416 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140151.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:23,213 INFO [train.py:901] (3/4) Epoch 18, batch 2750, loss[loss=0.2349, simple_loss=0.312, pruned_loss=0.07889, over 8469.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.2943, pruned_loss=0.06768, over 1614067.07 frames. ], batch size: 25, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:18:23,409 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9052, 1.6262, 1.6741, 1.4617, 0.9031, 1.4904, 1.7145, 1.3970], + device='cuda:3'), covar=tensor([0.0487, 0.1184, 0.1612, 0.1315, 0.0601, 0.1472, 0.0679, 0.0638], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0113, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 21:18:27,490 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140166.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:28,899 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140168.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:18:33,791 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.336e+02 2.919e+02 3.807e+02 8.313e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-06 21:18:50,343 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3048, 2.1642, 1.6565, 1.9160, 1.7713, 1.4013, 1.6179, 1.6774], + device='cuda:3'), covar=tensor([0.1228, 0.0358, 0.1059, 0.0544, 0.0687, 0.1490, 0.0927, 0.0833], + device='cuda:3'), in_proj_covar=tensor([0.0349, 0.0235, 0.0323, 0.0302, 0.0296, 0.0331, 0.0340, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:19:00,759 INFO [train.py:901] (3/4) Epoch 18, batch 2800, loss[loss=0.2009, simple_loss=0.28, pruned_loss=0.06093, over 7808.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2932, pruned_loss=0.06687, over 1611931.92 frames. ], batch size: 20, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:01,504 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140212.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:25,733 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140246.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:35,893 INFO [train.py:901] (3/4) Epoch 18, batch 2850, loss[loss=0.2236, simple_loss=0.3108, pruned_loss=0.06826, over 8186.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2928, pruned_loss=0.06643, over 1611243.79 frames. ], batch size: 23, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:19:39,583 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140266.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:45,685 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.447e+02 2.919e+02 3.574e+02 5.806e+02, threshold=5.838e+02, percent-clipped=0.0 +2023-02-06 21:19:47,313 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140277.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:50,917 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140281.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:52,321 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140283.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:19:58,486 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([6.0935, 1.4863, 6.2642, 2.3208, 5.6586, 5.3322, 5.7747, 5.6794], + device='cuda:3'), covar=tensor([0.0484, 0.4730, 0.0346, 0.3383, 0.0997, 0.0820, 0.0506, 0.0512], + device='cuda:3'), in_proj_covar=tensor([0.0587, 0.0620, 0.0666, 0.0596, 0.0676, 0.0581, 0.0577, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:20:06,056 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3624, 2.0409, 1.6640, 2.0562, 1.7801, 1.3040, 1.7302, 1.8456], + device='cuda:3'), covar=tensor([0.1135, 0.0444, 0.1216, 0.0480, 0.0766, 0.1651, 0.0929, 0.0724], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0235, 0.0322, 0.0302, 0.0297, 0.0331, 0.0340, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:20:07,428 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140304.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:10,080 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5964, 1.4178, 4.7481, 1.8592, 4.2550, 4.0171, 4.3025, 4.1694], + device='cuda:3'), covar=tensor([0.0501, 0.4606, 0.0477, 0.3447, 0.1055, 0.0844, 0.0532, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0587, 0.0619, 0.0666, 0.0597, 0.0675, 0.0581, 0.0578, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:20:10,419 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-06 21:20:11,947 INFO [train.py:901] (3/4) Epoch 18, batch 2900, loss[loss=0.2337, simple_loss=0.3245, pruned_loss=0.07143, over 8292.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2946, pruned_loss=0.06717, over 1614183.14 frames. ], batch size: 23, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:13,912 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 21:20:14,963 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:23,768 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:25,189 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140329.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:33,504 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:20:44,300 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 21:20:47,991 INFO [train.py:901] (3/4) Epoch 18, batch 2950, loss[loss=0.2172, simple_loss=0.3044, pruned_loss=0.06496, over 6970.00 frames. ], tot_loss[loss=0.2146, simple_loss=0.2948, pruned_loss=0.06719, over 1612979.23 frames. ], batch size: 71, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:20:57,359 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.671e+02 3.280e+02 4.327e+02 7.160e+02, threshold=6.561e+02, percent-clipped=5.0 +2023-02-06 21:21:07,285 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8792, 1.6273, 2.0345, 1.8003, 1.8249, 1.9083, 1.6209, 0.7655], + device='cuda:3'), covar=tensor([0.4981, 0.4190, 0.1647, 0.2740, 0.2122, 0.2739, 0.1955, 0.4234], + device='cuda:3'), in_proj_covar=tensor([0.0923, 0.0943, 0.0781, 0.0908, 0.0975, 0.0862, 0.0727, 0.0806], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:21:18,955 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:21:23,797 INFO [train.py:901] (3/4) Epoch 18, batch 3000, loss[loss=0.1994, simple_loss=0.2981, pruned_loss=0.05029, over 8296.00 frames. ], tot_loss[loss=0.2166, simple_loss=0.2964, pruned_loss=0.06836, over 1613433.70 frames. ], batch size: 23, lr: 4.24e-03, grad_scale: 8.0 +2023-02-06 21:21:23,798 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 21:21:32,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7231, 1.8227, 1.6959, 2.2445, 1.1686, 1.4624, 1.6605, 1.7863], + device='cuda:3'), covar=tensor([0.0753, 0.0846, 0.0925, 0.0438, 0.1124, 0.1317, 0.0829, 0.0853], + device='cuda:3'), in_proj_covar=tensor([0.0235, 0.0201, 0.0252, 0.0214, 0.0209, 0.0251, 0.0258, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:21:37,682 INFO [train.py:935] (3/4) Epoch 18, validation: loss=0.1773, simple_loss=0.2774, pruned_loss=0.03861, over 944034.00 frames. +2023-02-06 21:21:37,683 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 21:22:14,087 INFO [train.py:901] (3/4) Epoch 18, batch 3050, loss[loss=0.2374, simple_loss=0.3136, pruned_loss=0.0806, over 8505.00 frames. ], tot_loss[loss=0.2175, simple_loss=0.297, pruned_loss=0.069, over 1615569.84 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:16,897 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:21,664 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140472.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:24,218 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.663e+02 3.172e+02 4.119e+02 9.916e+02, threshold=6.345e+02, percent-clipped=7.0 +2023-02-06 21:22:42,899 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140502.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:46,271 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140507.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:22:48,875 INFO [train.py:901] (3/4) Epoch 18, batch 3100, loss[loss=0.1947, simple_loss=0.2668, pruned_loss=0.0613, over 7975.00 frames. ], tot_loss[loss=0.2186, simple_loss=0.2979, pruned_loss=0.0697, over 1616591.62 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:22:56,952 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140522.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:00,971 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:01,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140527.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:07,883 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140537.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:09,293 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140539.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:15,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140547.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:25,516 INFO [train.py:901] (3/4) Epoch 18, batch 3150, loss[loss=0.2787, simple_loss=0.3473, pruned_loss=0.105, over 8322.00 frames. ], tot_loss[loss=0.2179, simple_loss=0.2976, pruned_loss=0.06913, over 1617756.45 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:23:26,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140562.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:27,692 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140564.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:34,931 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.438e+02 2.948e+02 4.263e+02 1.019e+03, threshold=5.895e+02, percent-clipped=4.0 +2023-02-06 21:23:38,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140580.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:40,639 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:43,426 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:23:59,142 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140608.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:01,013 INFO [train.py:901] (3/4) Epoch 18, batch 3200, loss[loss=0.2524, simple_loss=0.3349, pruned_loss=0.08499, over 8299.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2962, pruned_loss=0.06805, over 1617664.63 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:06,861 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 21:24:07,891 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140621.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:24:11,471 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3929, 2.4014, 1.6415, 2.2370, 2.1595, 1.3464, 1.9928, 2.1175], + device='cuda:3'), covar=tensor([0.1409, 0.0406, 0.1337, 0.0623, 0.0751, 0.1675, 0.0997, 0.0843], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0233, 0.0323, 0.0302, 0.0296, 0.0330, 0.0339, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:24:36,825 INFO [train.py:901] (3/4) Epoch 18, batch 3250, loss[loss=0.1738, simple_loss=0.2602, pruned_loss=0.04373, over 8287.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2941, pruned_loss=0.06686, over 1616227.20 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:24:46,453 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.256e+02 2.889e+02 3.448e+02 6.536e+02, threshold=5.777e+02, percent-clipped=1.0 +2023-02-06 21:25:13,080 INFO [train.py:901] (3/4) Epoch 18, batch 3300, loss[loss=0.2352, simple_loss=0.3357, pruned_loss=0.06736, over 8250.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2949, pruned_loss=0.06688, over 1619962.13 frames. ], batch size: 24, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:27,703 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2777, 1.3472, 3.3684, 1.0527, 3.0032, 2.8168, 3.1028, 3.0101], + device='cuda:3'), covar=tensor([0.0820, 0.3922, 0.0812, 0.3967, 0.1369, 0.1105, 0.0757, 0.0836], + device='cuda:3'), in_proj_covar=tensor([0.0589, 0.0621, 0.0667, 0.0596, 0.0677, 0.0579, 0.0575, 0.0644], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:25:27,797 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0875, 1.2379, 1.1690, 0.7705, 1.1716, 1.0144, 0.1183, 1.1874], + device='cuda:3'), covar=tensor([0.0334, 0.0331, 0.0288, 0.0432, 0.0373, 0.0896, 0.0706, 0.0273], + device='cuda:3'), in_proj_covar=tensor([0.0431, 0.0371, 0.0319, 0.0427, 0.0356, 0.0516, 0.0375, 0.0395], + device='cuda:3'), out_proj_covar=tensor([1.1729e-04, 9.8323e-05, 8.4337e-05, 1.1388e-04, 9.4896e-05, 1.4785e-04, + 1.0217e-04, 1.0561e-04], device='cuda:3') +2023-02-06 21:25:30,571 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140736.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:33,929 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140741.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:39,311 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140749.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:25:40,431 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 21:25:47,450 INFO [train.py:901] (3/4) Epoch 18, batch 3350, loss[loss=0.2394, simple_loss=0.3233, pruned_loss=0.07773, over 8287.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2948, pruned_loss=0.06661, over 1620544.50 frames. ], batch size: 23, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:25:57,594 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.504e+02 2.969e+02 3.727e+02 7.020e+02, threshold=5.938e+02, percent-clipped=2.0 +2023-02-06 21:26:23,949 INFO [train.py:901] (3/4) Epoch 18, batch 3400, loss[loss=0.1682, simple_loss=0.2503, pruned_loss=0.04308, over 7967.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2941, pruned_loss=0.0663, over 1615088.82 frames. ], batch size: 21, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:35,827 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140827.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:42,132 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140836.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:46,754 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140843.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:52,093 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140851.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:26:58,805 INFO [train.py:901] (3/4) Epoch 18, batch 3450, loss[loss=0.2333, simple_loss=0.3162, pruned_loss=0.07519, over 8522.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2942, pruned_loss=0.06654, over 1614739.84 frames. ], batch size: 28, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:26:59,032 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140861.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:01,073 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140864.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:03,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=140868.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:05,721 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=140871.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:27:08,268 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.419e+02 3.065e+02 3.703e+02 6.567e+02, threshold=6.131e+02, percent-clipped=3.0 +2023-02-06 21:27:34,153 INFO [train.py:901] (3/4) Epoch 18, batch 3500, loss[loss=0.2461, simple_loss=0.3134, pruned_loss=0.08939, over 8511.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2935, pruned_loss=0.0665, over 1611611.89 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:27:51,059 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 21:27:51,204 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:09,211 INFO [train.py:901] (3/4) Epoch 18, batch 3550, loss[loss=0.2078, simple_loss=0.2836, pruned_loss=0.066, over 7432.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2923, pruned_loss=0.0658, over 1608069.50 frames. ], batch size: 17, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:11,375 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9096, 2.0866, 1.9111, 2.6008, 1.2424, 1.5863, 1.8921, 2.1324], + device='cuda:3'), covar=tensor([0.0784, 0.0836, 0.0844, 0.0374, 0.1016, 0.1314, 0.0780, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0200, 0.0251, 0.0213, 0.0206, 0.0249, 0.0255, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:28:12,016 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=140965.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:12,767 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140966.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:18,745 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.456e+02 3.083e+02 3.681e+02 6.081e+02, threshold=6.167e+02, percent-clipped=0.0 +2023-02-06 21:28:26,462 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=140986.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:30,642 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=140992.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:28:44,285 INFO [train.py:901] (3/4) Epoch 18, batch 3600, loss[loss=0.2023, simple_loss=0.278, pruned_loss=0.06325, over 7235.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2928, pruned_loss=0.06633, over 1608844.65 frames. ], batch size: 16, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:28:49,259 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141017.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:20,370 INFO [train.py:901] (3/4) Epoch 18, batch 3650, loss[loss=0.2482, simple_loss=0.3331, pruned_loss=0.08168, over 8341.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2931, pruned_loss=0.0666, over 1612628.65 frames. ], batch size: 26, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:30,821 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.345e+02 2.956e+02 3.633e+02 6.454e+02, threshold=5.912e+02, percent-clipped=1.0 +2023-02-06 21:29:37,715 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141085.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:29:55,733 INFO [train.py:901] (3/4) Epoch 18, batch 3700, loss[loss=0.1511, simple_loss=0.2372, pruned_loss=0.03252, over 7814.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2923, pruned_loss=0.06599, over 1613029.23 frames. ], batch size: 20, lr: 4.23e-03, grad_scale: 8.0 +2023-02-06 21:29:57,139 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 21:30:02,958 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:20,676 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141145.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:31,544 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4701, 4.4693, 4.0860, 2.1671, 3.9659, 4.1073, 4.1986, 3.7660], + device='cuda:3'), covar=tensor([0.0726, 0.0603, 0.0976, 0.4473, 0.0771, 0.1100, 0.1183, 0.0904], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0422, 0.0420, 0.0522, 0.0412, 0.0422, 0.0408, 0.0369], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:30:32,112 INFO [train.py:901] (3/4) Epoch 18, batch 3750, loss[loss=0.2388, simple_loss=0.3242, pruned_loss=0.0767, over 8620.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2936, pruned_loss=0.06653, over 1616286.12 frames. ], batch size: 39, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:30:32,288 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141161.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:39,106 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141171.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:30:41,858 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.679e+02 3.309e+02 4.099e+02 7.455e+02, threshold=6.618e+02, percent-clipped=7.0 +2023-02-06 21:31:00,278 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141200.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:07,419 INFO [train.py:901] (3/4) Epoch 18, batch 3800, loss[loss=0.2204, simple_loss=0.282, pruned_loss=0.07939, over 7717.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2932, pruned_loss=0.06627, over 1613381.97 frames. ], batch size: 18, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:15,017 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141222.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:29,266 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141242.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:32,639 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:39,638 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.87 vs. limit=5.0 +2023-02-06 21:31:42,608 INFO [train.py:901] (3/4) Epoch 18, batch 3850, loss[loss=0.2165, simple_loss=0.2878, pruned_loss=0.07261, over 7548.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2936, pruned_loss=0.06625, over 1613411.44 frames. ], batch size: 18, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:31:46,907 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141267.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:31:52,711 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.500e+02 3.018e+02 3.684e+02 7.912e+02, threshold=6.036e+02, percent-clipped=1.0 +2023-02-06 21:31:52,871 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2431, 4.2206, 3.8823, 2.1143, 3.7894, 3.9317, 3.8103, 3.5400], + device='cuda:3'), covar=tensor([0.0921, 0.0633, 0.1036, 0.4300, 0.0856, 0.0796, 0.1314, 0.0713], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0424, 0.0420, 0.0522, 0.0414, 0.0422, 0.0406, 0.0369], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:31:55,473 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141279.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:00,516 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141286.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:03,623 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-06 21:32:03,896 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 21:32:17,071 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141309.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:18,348 INFO [train.py:901] (3/4) Epoch 18, batch 3900, loss[loss=0.2255, simple_loss=0.303, pruned_loss=0.07397, over 7667.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2935, pruned_loss=0.06629, over 1609317.72 frames. ], batch size: 19, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:32:42,404 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:32:52,481 INFO [train.py:901] (3/4) Epoch 18, batch 3950, loss[loss=0.198, simple_loss=0.2864, pruned_loss=0.05476, over 8038.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2944, pruned_loss=0.06716, over 1607672.38 frames. ], batch size: 22, lr: 4.22e-03, grad_scale: 8.0 +2023-02-06 21:33:02,707 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.421e+02 2.990e+02 3.795e+02 7.053e+02, threshold=5.979e+02, percent-clipped=3.0 +2023-02-06 21:33:15,870 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141394.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:27,611 INFO [train.py:901] (3/4) Epoch 18, batch 4000, loss[loss=0.21, simple_loss=0.2893, pruned_loss=0.06533, over 8250.00 frames. ], tot_loss[loss=0.2145, simple_loss=0.2945, pruned_loss=0.06727, over 1609975.70 frames. ], batch size: 24, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:33:37,287 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141424.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:33:40,604 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4467, 2.0246, 2.8318, 2.2280, 2.7563, 2.3317, 2.1187, 1.4034], + device='cuda:3'), covar=tensor([0.4695, 0.4711, 0.1691, 0.3437, 0.2337, 0.2760, 0.1803, 0.4944], + device='cuda:3'), in_proj_covar=tensor([0.0921, 0.0942, 0.0779, 0.0906, 0.0979, 0.0858, 0.0725, 0.0804], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:33:44,517 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141435.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:33:45,250 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7547, 2.2944, 4.3595, 1.4503, 3.1233, 2.3369, 1.8232, 2.8410], + device='cuda:3'), covar=tensor([0.1720, 0.2328, 0.0751, 0.4185, 0.1589, 0.2854, 0.1999, 0.2393], + device='cuda:3'), in_proj_covar=tensor([0.0510, 0.0575, 0.0549, 0.0620, 0.0637, 0.0578, 0.0512, 0.0626], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:33:58,720 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141456.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:00,772 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:02,041 INFO [train.py:901] (3/4) Epoch 18, batch 4050, loss[loss=0.2105, simple_loss=0.282, pruned_loss=0.06945, over 7970.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2931, pruned_loss=0.06644, over 1603521.40 frames. ], batch size: 21, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:02,224 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9762, 3.7907, 2.4053, 2.8277, 2.6954, 2.0885, 2.8131, 2.9013], + device='cuda:3'), covar=tensor([0.1575, 0.0333, 0.1041, 0.0735, 0.0840, 0.1355, 0.1036, 0.1098], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0234, 0.0326, 0.0306, 0.0299, 0.0331, 0.0341, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:34:02,886 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0530, 1.4626, 1.5936, 1.3612, 0.9845, 1.3908, 1.8034, 1.5561], + device='cuda:3'), covar=tensor([0.0506, 0.1230, 0.1721, 0.1405, 0.0601, 0.1485, 0.0680, 0.0642], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0157, 0.0099, 0.0162, 0.0114, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 21:34:12,698 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.514e+02 3.146e+02 4.229e+02 8.641e+02, threshold=6.293e+02, percent-clipped=9.0 +2023-02-06 21:34:16,935 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141481.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:22,711 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-06 21:34:34,632 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141505.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:34:38,481 INFO [train.py:901] (3/4) Epoch 18, batch 4100, loss[loss=0.2096, simple_loss=0.2933, pruned_loss=0.06298, over 8579.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2929, pruned_loss=0.06619, over 1606361.08 frames. ], batch size: 39, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:34:41,368 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5474, 1.5368, 2.9211, 1.3390, 2.2125, 3.1490, 3.2399, 2.7623], + device='cuda:3'), covar=tensor([0.1158, 0.1460, 0.0346, 0.1903, 0.0798, 0.0268, 0.0557, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0285, 0.0313, 0.0276, 0.0309, 0.0297, 0.0256, 0.0398, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 21:34:53,730 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8249, 1.8360, 2.5503, 1.6796, 1.3739, 2.5487, 0.6691, 1.5096], + device='cuda:3'), covar=tensor([0.2025, 0.1493, 0.0364, 0.1493, 0.3151, 0.0386, 0.2057, 0.1531], + device='cuda:3'), in_proj_covar=tensor([0.0180, 0.0188, 0.0119, 0.0216, 0.0261, 0.0127, 0.0164, 0.0183], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 21:35:00,227 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141542.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:13,079 INFO [train.py:901] (3/4) Epoch 18, batch 4150, loss[loss=0.238, simple_loss=0.3044, pruned_loss=0.08583, over 8486.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2924, pruned_loss=0.06604, over 1609075.50 frames. ], batch size: 29, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:17,346 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141567.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:35:22,680 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.507e+02 2.964e+02 3.952e+02 7.900e+02, threshold=5.928e+02, percent-clipped=3.0 +2023-02-06 21:35:48,912 INFO [train.py:901] (3/4) Epoch 18, batch 4200, loss[loss=0.2014, simple_loss=0.2843, pruned_loss=0.05928, over 8565.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2931, pruned_loss=0.06606, over 1609364.96 frames. ], batch size: 34, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:35:55,793 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141620.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:02,339 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 21:36:16,008 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141650.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:23,890 INFO [train.py:901] (3/4) Epoch 18, batch 4250, loss[loss=0.2218, simple_loss=0.2934, pruned_loss=0.07513, over 8458.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.294, pruned_loss=0.06671, over 1611123.31 frames. ], batch size: 27, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:36:24,612 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 21:36:33,262 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.491e+02 2.994e+02 3.932e+02 8.485e+02, threshold=5.988e+02, percent-clipped=6.0 +2023-02-06 21:36:33,494 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141675.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:36,884 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141680.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:44,013 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141691.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:54,160 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141705.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:36:58,030 INFO [train.py:901] (3/4) Epoch 18, batch 4300, loss[loss=0.219, simple_loss=0.2946, pruned_loss=0.07166, over 8103.00 frames. ], tot_loss[loss=0.2143, simple_loss=0.2946, pruned_loss=0.06699, over 1609095.09 frames. ], batch size: 23, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:32,932 INFO [train.py:901] (3/4) Epoch 18, batch 4350, loss[loss=0.2452, simple_loss=0.3201, pruned_loss=0.08514, over 8186.00 frames. ], tot_loss[loss=0.2134, simple_loss=0.2936, pruned_loss=0.06661, over 1611614.71 frames. ], batch size: 23, lr: 4.22e-03, grad_scale: 16.0 +2023-02-06 21:37:38,873 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-06 21:37:43,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.620e+02 3.197e+02 4.150e+02 9.266e+02, threshold=6.393e+02, percent-clipped=5.0 +2023-02-06 21:37:46,102 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141779.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:37:54,218 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 21:38:02,437 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=141803.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:04,522 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141806.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:38:07,896 INFO [train.py:901] (3/4) Epoch 18, batch 4400, loss[loss=0.2047, simple_loss=0.2923, pruned_loss=0.05858, over 8653.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06649, over 1611120.85 frames. ], batch size: 34, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:36,615 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 21:38:43,845 INFO [train.py:901] (3/4) Epoch 18, batch 4450, loss[loss=0.1771, simple_loss=0.2761, pruned_loss=0.03908, over 8318.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2938, pruned_loss=0.0672, over 1611500.19 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:38:53,330 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.507e+02 2.868e+02 3.524e+02 7.777e+02, threshold=5.735e+02, percent-clipped=2.0 +2023-02-06 21:38:54,257 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=141876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:07,102 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141894.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:39:11,833 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=141901.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:18,273 INFO [train.py:901] (3/4) Epoch 18, batch 4500, loss[loss=0.2189, simple_loss=0.3004, pruned_loss=0.06868, over 7800.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2931, pruned_loss=0.06635, over 1609240.11 frames. ], batch size: 20, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:39:23,247 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=141918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:27,796 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 21:39:34,740 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141934.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:42,678 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=141946.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:39:53,388 INFO [train.py:901] (3/4) Epoch 18, batch 4550, loss[loss=0.1944, simple_loss=0.2833, pruned_loss=0.05278, over 8693.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2929, pruned_loss=0.06619, over 1608167.75 frames. ], batch size: 39, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:40:03,505 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.488e+02 2.920e+02 3.454e+02 6.371e+02, threshold=5.840e+02, percent-clipped=2.0 +2023-02-06 21:40:29,745 INFO [train.py:901] (3/4) Epoch 18, batch 4600, loss[loss=0.2331, simple_loss=0.3209, pruned_loss=0.07263, over 8735.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2937, pruned_loss=0.06667, over 1612498.28 frames. ], batch size: 30, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:02,726 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 21:41:04,181 INFO [train.py:901] (3/4) Epoch 18, batch 4650, loss[loss=0.256, simple_loss=0.3217, pruned_loss=0.09516, over 8454.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.293, pruned_loss=0.06673, over 1609616.65 frames. ], batch size: 49, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:05,109 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142062.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:13,897 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.464e+02 2.389e+02 2.901e+02 3.503e+02 7.256e+02, threshold=5.801e+02, percent-clipped=3.0 +2023-02-06 21:41:23,668 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142087.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:41:39,475 INFO [train.py:901] (3/4) Epoch 18, batch 4700, loss[loss=0.1961, simple_loss=0.2822, pruned_loss=0.05503, over 8128.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2935, pruned_loss=0.06675, over 1610213.18 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:41:56,995 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0633, 1.7518, 2.3450, 1.9917, 2.2753, 2.0686, 1.7817, 1.1140], + device='cuda:3'), covar=tensor([0.5105, 0.4439, 0.1817, 0.3108, 0.2143, 0.2627, 0.1874, 0.4687], + device='cuda:3'), in_proj_covar=tensor([0.0927, 0.0947, 0.0783, 0.0910, 0.0981, 0.0863, 0.0731, 0.0810], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:42:06,020 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142150.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 21:42:13,363 INFO [train.py:901] (3/4) Epoch 18, batch 4750, loss[loss=0.2112, simple_loss=0.3037, pruned_loss=0.05934, over 8243.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2926, pruned_loss=0.06623, over 1611077.23 frames. ], batch size: 24, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:42:17,759 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8379, 3.8235, 3.4999, 1.8099, 3.3235, 3.4630, 3.4206, 3.2352], + device='cuda:3'), covar=tensor([0.0952, 0.0666, 0.1115, 0.4673, 0.1076, 0.1045, 0.1443, 0.0891], + device='cuda:3'), in_proj_covar=tensor([0.0509, 0.0425, 0.0419, 0.0520, 0.0412, 0.0418, 0.0403, 0.0366], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:42:23,435 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142174.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:23,891 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.398e+02 2.792e+02 3.541e+02 9.190e+02, threshold=5.585e+02, percent-clipped=4.0 +2023-02-06 21:42:24,100 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142175.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:42:28,133 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6059, 1.6297, 5.8639, 2.4031, 4.7510, 4.8536, 5.3907, 5.3763], + device='cuda:3'), covar=tensor([0.1154, 0.7281, 0.0870, 0.4435, 0.2693, 0.1476, 0.1018, 0.0936], + device='cuda:3'), in_proj_covar=tensor([0.0594, 0.0625, 0.0670, 0.0600, 0.0682, 0.0582, 0.0575, 0.0642], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:42:30,579 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 21:42:32,654 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 21:42:41,670 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142199.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:42:49,348 INFO [train.py:901] (3/4) Epoch 18, batch 4800, loss[loss=0.2325, simple_loss=0.3195, pruned_loss=0.07272, over 8290.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2934, pruned_loss=0.06642, over 1617924.45 frames. ], batch size: 23, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:00,603 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9126, 2.5312, 3.5496, 1.8875, 1.8610, 3.4586, 0.6896, 2.0893], + device='cuda:3'), covar=tensor([0.1477, 0.1384, 0.0268, 0.1898, 0.3149, 0.0357, 0.2675, 0.1391], + device='cuda:3'), in_proj_covar=tensor([0.0182, 0.0189, 0.0121, 0.0219, 0.0266, 0.0128, 0.0165, 0.0184], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 21:43:23,866 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 21:43:24,513 INFO [train.py:901] (3/4) Epoch 18, batch 4850, loss[loss=0.284, simple_loss=0.3437, pruned_loss=0.1121, over 6969.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.293, pruned_loss=0.0666, over 1606278.20 frames. ], batch size: 72, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:43:33,968 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.522e+02 3.053e+02 3.876e+02 6.315e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-06 21:43:36,096 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:45,106 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142290.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:43:59,066 INFO [train.py:901] (3/4) Epoch 18, batch 4900, loss[loss=0.1849, simple_loss=0.2698, pruned_loss=0.04999, over 7918.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2926, pruned_loss=0.06641, over 1606670.81 frames. ], batch size: 20, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:34,497 INFO [train.py:901] (3/4) Epoch 18, batch 4950, loss[loss=0.1996, simple_loss=0.2915, pruned_loss=0.05387, over 8247.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2924, pruned_loss=0.0662, over 1609049.17 frames. ], batch size: 22, lr: 4.21e-03, grad_scale: 16.0 +2023-02-06 21:44:41,669 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7789, 1.8649, 5.9070, 2.5511, 5.2288, 5.0057, 5.4233, 5.2965], + device='cuda:3'), covar=tensor([0.0479, 0.4784, 0.0391, 0.3402, 0.1070, 0.0937, 0.0513, 0.0528], + device='cuda:3'), in_proj_covar=tensor([0.0588, 0.0620, 0.0664, 0.0597, 0.0676, 0.0578, 0.0571, 0.0639], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:44:44,958 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.451e+02 2.943e+02 3.789e+02 7.945e+02, threshold=5.886e+02, percent-clipped=1.0 +2023-02-06 21:44:57,238 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142393.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:05,916 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=142405.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:45:09,747 INFO [train.py:901] (3/4) Epoch 18, batch 5000, loss[loss=0.2227, simple_loss=0.3081, pruned_loss=0.06864, over 8497.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.294, pruned_loss=0.06727, over 1611529.51 frames. ], batch size: 28, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:31,134 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-06 21:45:33,586 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1724, 1.4100, 4.3582, 1.5839, 3.8949, 3.6532, 3.9281, 3.8111], + device='cuda:3'), covar=tensor([0.0553, 0.4387, 0.0486, 0.3984, 0.0963, 0.0863, 0.0527, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0587, 0.0619, 0.0663, 0.0594, 0.0676, 0.0577, 0.0569, 0.0639], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:45:44,314 INFO [train.py:901] (3/4) Epoch 18, batch 5050, loss[loss=0.21, simple_loss=0.3041, pruned_loss=0.05795, over 8471.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2942, pruned_loss=0.06704, over 1608627.92 frames. ], batch size: 25, lr: 4.21e-03, grad_scale: 8.0 +2023-02-06 21:45:54,476 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.384e+02 2.804e+02 3.417e+02 5.925e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-06 21:45:59,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9847, 1.7794, 3.3857, 1.5590, 2.3201, 3.6521, 3.7158, 3.1556], + device='cuda:3'), covar=tensor([0.1135, 0.1466, 0.0296, 0.1847, 0.0949, 0.0223, 0.0545, 0.0528], + device='cuda:3'), in_proj_covar=tensor([0.0286, 0.0312, 0.0276, 0.0306, 0.0298, 0.0256, 0.0397, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 21:46:04,022 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 21:46:19,170 INFO [train.py:901] (3/4) Epoch 18, batch 5100, loss[loss=0.2147, simple_loss=0.3057, pruned_loss=0.0618, over 8034.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2939, pruned_loss=0.06604, over 1612545.65 frames. ], batch size: 22, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:46:27,950 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6834, 1.9063, 2.1013, 1.4406, 2.1777, 1.4728, 0.6602, 1.9125], + device='cuda:3'), covar=tensor([0.0523, 0.0322, 0.0246, 0.0476, 0.0353, 0.0814, 0.0743, 0.0253], + device='cuda:3'), in_proj_covar=tensor([0.0427, 0.0369, 0.0316, 0.0425, 0.0357, 0.0514, 0.0370, 0.0393], + device='cuda:3'), out_proj_covar=tensor([1.1599e-04, 9.7765e-05, 8.3674e-05, 1.1317e-04, 9.5103e-05, 1.4723e-04, + 1.0050e-04, 1.0481e-04], device='cuda:3') +2023-02-06 21:46:54,249 INFO [train.py:901] (3/4) Epoch 18, batch 5150, loss[loss=0.244, simple_loss=0.2923, pruned_loss=0.09786, over 7692.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.294, pruned_loss=0.0666, over 1614305.19 frames. ], batch size: 18, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:04,404 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.481e+02 3.004e+02 4.323e+02 1.197e+03, threshold=6.009e+02, percent-clipped=7.0 +2023-02-06 21:47:14,699 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142591.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:47:29,008 INFO [train.py:901] (3/4) Epoch 18, batch 5200, loss[loss=0.1993, simple_loss=0.285, pruned_loss=0.05684, over 8493.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2937, pruned_loss=0.06667, over 1613567.77 frames. ], batch size: 49, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:47:45,306 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8932, 6.0141, 5.2165, 2.4069, 5.3027, 5.6407, 5.6043, 5.3861], + device='cuda:3'), covar=tensor([0.0494, 0.0396, 0.0841, 0.4331, 0.0656, 0.0670, 0.0919, 0.0647], + device='cuda:3'), in_proj_covar=tensor([0.0507, 0.0421, 0.0419, 0.0519, 0.0411, 0.0417, 0.0401, 0.0368], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:47:55,313 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:02,991 INFO [train.py:901] (3/4) Epoch 18, batch 5250, loss[loss=0.1837, simple_loss=0.2729, pruned_loss=0.04723, over 7801.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2934, pruned_loss=0.06642, over 1615509.85 frames. ], batch size: 20, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:03,207 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=142661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:03,673 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 21:48:13,210 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142674.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:14,366 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.428e+02 2.491e+02 3.102e+02 3.692e+02 6.533e+02, threshold=6.204e+02, percent-clipped=2.0 +2023-02-06 21:48:21,254 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=142686.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:48:37,897 INFO [train.py:901] (3/4) Epoch 18, batch 5300, loss[loss=0.2286, simple_loss=0.3092, pruned_loss=0.07399, over 8606.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2936, pruned_loss=0.06642, over 1613757.85 frames. ], batch size: 39, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:48:38,137 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3200, 2.6234, 3.1644, 1.6351, 3.2556, 1.9452, 1.5528, 2.2521], + device='cuda:3'), covar=tensor([0.0704, 0.0386, 0.0202, 0.0715, 0.0383, 0.0738, 0.0829, 0.0495], + device='cuda:3'), in_proj_covar=tensor([0.0429, 0.0369, 0.0318, 0.0427, 0.0357, 0.0516, 0.0370, 0.0395], + device='cuda:3'), out_proj_covar=tensor([1.1662e-04, 9.7769e-05, 8.4124e-05, 1.1369e-04, 9.4975e-05, 1.4790e-04, + 1.0063e-04, 1.0539e-04], device='cuda:3') +2023-02-06 21:48:53,423 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0329, 2.1574, 2.2985, 1.6022, 2.3115, 1.7206, 1.6565, 1.9343], + device='cuda:3'), covar=tensor([0.0520, 0.0348, 0.0231, 0.0480, 0.0366, 0.0571, 0.0632, 0.0363], + device='cuda:3'), in_proj_covar=tensor([0.0431, 0.0371, 0.0320, 0.0429, 0.0359, 0.0518, 0.0372, 0.0396], + device='cuda:3'), out_proj_covar=tensor([1.1717e-04, 9.8130e-05, 8.4633e-05, 1.1421e-04, 9.5457e-05, 1.4854e-04, + 1.0109e-04, 1.0578e-04], device='cuda:3') +2023-02-06 21:48:59,520 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=142742.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:49:12,900 INFO [train.py:901] (3/4) Epoch 18, batch 5350, loss[loss=0.181, simple_loss=0.2664, pruned_loss=0.04781, over 8092.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2937, pruned_loss=0.06618, over 1612572.45 frames. ], batch size: 21, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:49:22,760 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.581e+02 3.011e+02 3.651e+02 7.168e+02, threshold=6.023e+02, percent-clipped=3.0 +2023-02-06 21:49:43,696 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-06 21:49:48,113 INFO [train.py:901] (3/4) Epoch 18, batch 5400, loss[loss=0.1909, simple_loss=0.2744, pruned_loss=0.0537, over 8035.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2927, pruned_loss=0.06587, over 1612939.17 frames. ], batch size: 22, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:49:49,774 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-06 21:50:06,772 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0776, 1.8613, 2.3683, 1.9619, 2.2161, 2.1334, 1.9153, 1.1151], + device='cuda:3'), covar=tensor([0.5215, 0.4523, 0.1760, 0.3762, 0.2694, 0.3042, 0.1908, 0.5179], + device='cuda:3'), in_proj_covar=tensor([0.0927, 0.0943, 0.0776, 0.0911, 0.0984, 0.0863, 0.0731, 0.0811], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:50:22,753 INFO [train.py:901] (3/4) Epoch 18, batch 5450, loss[loss=0.1988, simple_loss=0.2742, pruned_loss=0.06165, over 7808.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2912, pruned_loss=0.06524, over 1609336.47 frames. ], batch size: 19, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:50:33,557 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.381e+02 3.003e+02 4.378e+02 7.690e+02, threshold=6.006e+02, percent-clipped=4.0 +2023-02-06 21:50:50,005 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 21:50:58,894 INFO [train.py:901] (3/4) Epoch 18, batch 5500, loss[loss=0.2456, simple_loss=0.3288, pruned_loss=0.08114, over 8105.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2928, pruned_loss=0.06558, over 1616099.07 frames. ], batch size: 23, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:14,989 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=142935.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:51:33,202 INFO [train.py:901] (3/4) Epoch 18, batch 5550, loss[loss=0.1772, simple_loss=0.2581, pruned_loss=0.04818, over 7794.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2922, pruned_loss=0.06549, over 1615478.62 frames. ], batch size: 19, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:51:43,333 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.398e+02 2.938e+02 3.826e+02 1.126e+03, threshold=5.876e+02, percent-clipped=10.0 +2023-02-06 21:52:08,277 INFO [train.py:901] (3/4) Epoch 18, batch 5600, loss[loss=0.2323, simple_loss=0.321, pruned_loss=0.07186, over 8745.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2924, pruned_loss=0.06511, over 1616646.92 frames. ], batch size: 30, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:19,261 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-06 21:52:23,148 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4079, 1.6268, 2.0960, 1.3101, 1.3602, 1.6222, 1.5251, 1.3743], + device='cuda:3'), covar=tensor([0.1697, 0.2155, 0.0865, 0.3903, 0.1940, 0.3148, 0.1983, 0.2206], + device='cuda:3'), in_proj_covar=tensor([0.0512, 0.0573, 0.0549, 0.0619, 0.0634, 0.0580, 0.0512, 0.0624], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:52:36,271 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:52:43,531 INFO [train.py:901] (3/4) Epoch 18, batch 5650, loss[loss=0.2192, simple_loss=0.3025, pruned_loss=0.06799, over 8504.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2924, pruned_loss=0.06573, over 1614447.80 frames. ], batch size: 26, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:52:54,545 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.315e+02 3.071e+02 3.627e+02 7.364e+02, threshold=6.141e+02, percent-clipped=4.0 +2023-02-06 21:53:00,411 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 21:53:01,153 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143086.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:53:18,744 INFO [train.py:901] (3/4) Epoch 18, batch 5700, loss[loss=0.2027, simple_loss=0.2853, pruned_loss=0.0601, over 8597.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2928, pruned_loss=0.06592, over 1610557.73 frames. ], batch size: 34, lr: 4.20e-03, grad_scale: 8.0 +2023-02-06 21:53:37,151 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5621, 3.0140, 2.5252, 4.1493, 1.7803, 2.0983, 2.4482, 3.1943], + device='cuda:3'), covar=tensor([0.0718, 0.0764, 0.0837, 0.0264, 0.1157, 0.1271, 0.1007, 0.0783], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0200, 0.0252, 0.0214, 0.0209, 0.0249, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:53:41,104 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1070, 2.1148, 1.7255, 1.9611, 1.6263, 1.4329, 1.5815, 1.6560], + device='cuda:3'), covar=tensor([0.1424, 0.0427, 0.1112, 0.0500, 0.0771, 0.1544, 0.0926, 0.0853], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0235, 0.0324, 0.0303, 0.0295, 0.0330, 0.0341, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 21:53:53,719 INFO [train.py:901] (3/4) Epoch 18, batch 5750, loss[loss=0.2217, simple_loss=0.3018, pruned_loss=0.07074, over 8320.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2929, pruned_loss=0.06638, over 1607871.88 frames. ], batch size: 25, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:53:56,196 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-06 21:54:04,018 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.423e+02 2.839e+02 3.621e+02 5.889e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 21:54:04,717 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 21:54:21,859 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143201.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:54:28,665 INFO [train.py:901] (3/4) Epoch 18, batch 5800, loss[loss=0.2014, simple_loss=0.2681, pruned_loss=0.06729, over 7790.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2922, pruned_loss=0.06635, over 1604627.26 frames. ], batch size: 19, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:54:43,951 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7307, 4.7012, 4.1307, 2.1601, 4.1229, 4.2550, 4.2593, 4.0227], + device='cuda:3'), covar=tensor([0.0638, 0.0465, 0.1063, 0.4132, 0.0825, 0.0867, 0.1132, 0.0765], + device='cuda:3'), in_proj_covar=tensor([0.0506, 0.0421, 0.0420, 0.0517, 0.0412, 0.0419, 0.0402, 0.0368], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:55:04,389 INFO [train.py:901] (3/4) Epoch 18, batch 5850, loss[loss=0.2144, simple_loss=0.3069, pruned_loss=0.06101, over 8327.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2909, pruned_loss=0.0653, over 1603942.50 frames. ], batch size: 25, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:15,559 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.467e+02 2.892e+02 3.630e+02 6.628e+02, threshold=5.783e+02, percent-clipped=2.0 +2023-02-06 21:55:36,578 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143306.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:55:39,811 INFO [train.py:901] (3/4) Epoch 18, batch 5900, loss[loss=0.1718, simple_loss=0.2496, pruned_loss=0.04705, over 7814.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.291, pruned_loss=0.06531, over 1605815.93 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:55:53,317 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143331.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:56:03,176 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4175, 2.2042, 2.8650, 2.3740, 2.8075, 2.3665, 2.1212, 1.6175], + device='cuda:3'), covar=tensor([0.4796, 0.4199, 0.1559, 0.3644, 0.2409, 0.2858, 0.1853, 0.4749], + device='cuda:3'), in_proj_covar=tensor([0.0929, 0.0946, 0.0777, 0.0915, 0.0981, 0.0866, 0.0729, 0.0812], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 21:56:14,464 INFO [train.py:901] (3/4) Epoch 18, batch 5950, loss[loss=0.2255, simple_loss=0.2964, pruned_loss=0.07725, over 7938.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2915, pruned_loss=0.06554, over 1609368.30 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:25,158 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.388e+02 2.875e+02 3.741e+02 7.794e+02, threshold=5.749e+02, percent-clipped=3.0 +2023-02-06 21:56:49,478 INFO [train.py:901] (3/4) Epoch 18, batch 6000, loss[loss=0.2492, simple_loss=0.3307, pruned_loss=0.0838, over 8191.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.291, pruned_loss=0.06504, over 1609910.24 frames. ], batch size: 23, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:56:49,478 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 21:57:03,428 INFO [train.py:935] (3/4) Epoch 18, validation: loss=0.1765, simple_loss=0.2767, pruned_loss=0.03814, over 944034.00 frames. +2023-02-06 21:57:03,430 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 21:57:07,209 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8703, 2.2209, 1.8615, 2.9224, 1.3488, 1.6298, 2.1462, 2.2582], + device='cuda:3'), covar=tensor([0.0794, 0.0849, 0.0901, 0.0394, 0.1267, 0.1465, 0.0960, 0.0924], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0199, 0.0251, 0.0213, 0.0208, 0.0247, 0.0254, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:57:08,527 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143418.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:57:33,582 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8268, 3.8303, 3.5193, 1.7805, 3.3813, 3.4574, 3.5239, 3.2794], + device='cuda:3'), covar=tensor([0.1028, 0.0695, 0.1245, 0.4789, 0.1040, 0.1166, 0.1439, 0.1016], + device='cuda:3'), in_proj_covar=tensor([0.0515, 0.0428, 0.0427, 0.0527, 0.0419, 0.0425, 0.0410, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:57:35,795 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=143457.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:38,434 INFO [train.py:901] (3/4) Epoch 18, batch 6050, loss[loss=0.2196, simple_loss=0.3057, pruned_loss=0.06678, over 8534.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2921, pruned_loss=0.06575, over 1614331.68 frames. ], batch size: 31, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:57:47,832 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-06 21:57:48,575 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.412e+02 3.060e+02 4.409e+02 1.030e+03, threshold=6.120e+02, percent-clipped=9.0 +2023-02-06 21:57:53,565 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=143482.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 21:57:59,612 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=143491.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 21:58:02,442 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8193, 1.3424, 1.6927, 1.2655, 0.9149, 1.3981, 1.6920, 1.6352], + device='cuda:3'), covar=tensor([0.0533, 0.1348, 0.1704, 0.1494, 0.0588, 0.1509, 0.0689, 0.0611], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0099, 0.0161, 0.0113, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 21:58:06,564 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9038, 2.0710, 1.8915, 2.5853, 1.1359, 1.6022, 1.8523, 2.0804], + device='cuda:3'), covar=tensor([0.0731, 0.0811, 0.0863, 0.0393, 0.1110, 0.1308, 0.0786, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0199, 0.0250, 0.0213, 0.0208, 0.0247, 0.0254, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 21:58:13,403 INFO [train.py:901] (3/4) Epoch 18, batch 6100, loss[loss=0.229, simple_loss=0.3109, pruned_loss=0.07354, over 8590.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2906, pruned_loss=0.06511, over 1607848.52 frames. ], batch size: 49, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:58:22,902 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6444, 4.6191, 4.1613, 2.2282, 4.1099, 4.1642, 4.1923, 3.9431], + device='cuda:3'), covar=tensor([0.0633, 0.0495, 0.0958, 0.4278, 0.0888, 0.1004, 0.1246, 0.0783], + device='cuda:3'), in_proj_covar=tensor([0.0512, 0.0426, 0.0425, 0.0524, 0.0418, 0.0423, 0.0408, 0.0374], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 21:58:39,433 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 21:58:40,456 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.20 vs. limit=5.0 +2023-02-06 21:58:49,839 INFO [train.py:901] (3/4) Epoch 18, batch 6150, loss[loss=0.2148, simple_loss=0.2997, pruned_loss=0.06496, over 8457.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2906, pruned_loss=0.06515, over 1610795.07 frames. ], batch size: 27, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:00,211 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.359e+02 3.030e+02 3.820e+02 7.737e+02, threshold=6.061e+02, percent-clipped=3.0 +2023-02-06 21:59:25,630 INFO [train.py:901] (3/4) Epoch 18, batch 6200, loss[loss=0.2525, simple_loss=0.3095, pruned_loss=0.09777, over 8240.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2911, pruned_loss=0.06549, over 1615935.04 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 21:59:56,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8593, 2.1973, 1.7967, 2.7517, 1.9580, 1.8293, 2.3111, 2.4106], + device='cuda:3'), covar=tensor([0.1183, 0.0900, 0.1541, 0.0391, 0.0996, 0.1396, 0.0682, 0.0658], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0200, 0.0252, 0.0213, 0.0209, 0.0249, 0.0255, 0.0214], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 22:00:01,325 INFO [train.py:901] (3/4) Epoch 18, batch 6250, loss[loss=0.2071, simple_loss=0.2925, pruned_loss=0.06083, over 8686.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2914, pruned_loss=0.06577, over 1612670.52 frames. ], batch size: 34, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:00:07,083 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9228, 2.3943, 1.9107, 2.3336, 2.1763, 1.7562, 2.0850, 2.2970], + device='cuda:3'), covar=tensor([0.1002, 0.0395, 0.1023, 0.0509, 0.0634, 0.1222, 0.0826, 0.0799], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0234, 0.0325, 0.0304, 0.0295, 0.0330, 0.0341, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:00:12,419 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.779e+02 2.460e+02 3.089e+02 4.040e+02 1.017e+03, threshold=6.178e+02, percent-clipped=5.0 +2023-02-06 22:00:20,714 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4929, 1.7137, 1.8549, 1.2662, 1.9255, 1.3494, 0.5131, 1.7039], + device='cuda:3'), covar=tensor([0.0465, 0.0318, 0.0234, 0.0437, 0.0297, 0.0808, 0.0686, 0.0219], + device='cuda:3'), in_proj_covar=tensor([0.0436, 0.0372, 0.0322, 0.0433, 0.0362, 0.0523, 0.0380, 0.0398], + device='cuda:3'), out_proj_covar=tensor([1.1826e-04, 9.8424e-05, 8.5340e-05, 1.1531e-04, 9.6311e-05, 1.4985e-04, + 1.0310e-04, 1.0596e-04], device='cuda:3') +2023-02-06 22:00:37,036 INFO [train.py:901] (3/4) Epoch 18, batch 6300, loss[loss=0.1951, simple_loss=0.2771, pruned_loss=0.05649, over 7815.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2915, pruned_loss=0.06555, over 1612197.79 frames. ], batch size: 20, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:11,904 INFO [train.py:901] (3/4) Epoch 18, batch 6350, loss[loss=0.2105, simple_loss=0.2645, pruned_loss=0.0782, over 7724.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2905, pruned_loss=0.06533, over 1610428.50 frames. ], batch size: 18, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:13,342 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143762.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:01:22,540 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.204e+02 2.882e+02 3.589e+02 6.333e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 22:01:47,363 INFO [train.py:901] (3/4) Epoch 18, batch 6400, loss[loss=0.1841, simple_loss=0.2677, pruned_loss=0.0503, over 8136.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2904, pruned_loss=0.065, over 1612526.11 frames. ], batch size: 22, lr: 4.19e-03, grad_scale: 8.0 +2023-02-06 22:01:49,681 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-06 22:01:55,546 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3924, 1.5980, 1.6181, 1.1039, 1.6401, 1.2757, 0.3194, 1.6246], + device='cuda:3'), covar=tensor([0.0406, 0.0297, 0.0297, 0.0416, 0.0359, 0.0834, 0.0731, 0.0221], + device='cuda:3'), in_proj_covar=tensor([0.0437, 0.0373, 0.0322, 0.0435, 0.0362, 0.0524, 0.0380, 0.0399], + device='cuda:3'), out_proj_covar=tensor([1.1853e-04, 9.8686e-05, 8.5205e-05, 1.1575e-04, 9.6397e-05, 1.5007e-04, + 1.0315e-04, 1.0630e-04], device='cuda:3') +2023-02-06 22:02:04,437 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=143835.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:20,759 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1598, 1.5234, 3.4009, 1.5094, 2.3034, 3.7997, 3.8881, 3.2563], + device='cuda:3'), covar=tensor([0.1052, 0.1880, 0.0364, 0.2219, 0.1253, 0.0216, 0.0437, 0.0547], + device='cuda:3'), in_proj_covar=tensor([0.0283, 0.0313, 0.0277, 0.0306, 0.0295, 0.0254, 0.0399, 0.0297], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 22:02:21,899 INFO [train.py:901] (3/4) Epoch 18, batch 6450, loss[loss=0.1998, simple_loss=0.2815, pruned_loss=0.05907, over 7787.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2906, pruned_loss=0.06521, over 1613216.58 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:02:33,451 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.452e+02 2.973e+02 3.704e+02 1.405e+03, threshold=5.946e+02, percent-clipped=1.0 +2023-02-06 22:02:34,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143877.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:02:39,644 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-06 22:02:57,286 INFO [train.py:901] (3/4) Epoch 18, batch 6500, loss[loss=0.2115, simple_loss=0.2718, pruned_loss=0.07559, over 7705.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2913, pruned_loss=0.06599, over 1611208.41 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:24,066 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=143950.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:03:31,360 INFO [train.py:901] (3/4) Epoch 18, batch 6550, loss[loss=0.1867, simple_loss=0.2593, pruned_loss=0.05706, over 7426.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2927, pruned_loss=0.06684, over 1609791.81 frames. ], batch size: 17, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:03:38,659 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2621, 1.9408, 2.6944, 2.2022, 2.5946, 2.2269, 2.0036, 1.4081], + device='cuda:3'), covar=tensor([0.4851, 0.4585, 0.1626, 0.3363, 0.2297, 0.2761, 0.1850, 0.4904], + device='cuda:3'), in_proj_covar=tensor([0.0928, 0.0945, 0.0779, 0.0913, 0.0982, 0.0866, 0.0731, 0.0809], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 22:03:41,849 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.926e+02 2.526e+02 3.154e+02 3.765e+02 8.734e+02, threshold=6.308e+02, percent-clipped=5.0 +2023-02-06 22:03:48,834 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 22:04:08,861 INFO [train.py:901] (3/4) Epoch 18, batch 6600, loss[loss=0.1825, simple_loss=0.2663, pruned_loss=0.04938, over 7667.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2922, pruned_loss=0.06619, over 1606928.52 frames. ], batch size: 19, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:10,898 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 22:04:25,261 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-06 22:04:34,007 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-06 22:04:43,666 INFO [train.py:901] (3/4) Epoch 18, batch 6650, loss[loss=0.2294, simple_loss=0.3053, pruned_loss=0.0768, over 8196.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2925, pruned_loss=0.06605, over 1610402.19 frames. ], batch size: 23, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:04:54,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.298e+02 3.022e+02 3.555e+02 7.360e+02, threshold=6.043e+02, percent-clipped=4.0 +2023-02-06 22:05:19,670 INFO [train.py:901] (3/4) Epoch 18, batch 6700, loss[loss=0.2145, simple_loss=0.3038, pruned_loss=0.06259, over 8511.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2919, pruned_loss=0.06573, over 1607755.48 frames. ], batch size: 28, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:05:34,398 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144133.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:05:51,944 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144158.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:05:53,841 INFO [train.py:901] (3/4) Epoch 18, batch 6750, loss[loss=0.1764, simple_loss=0.2558, pruned_loss=0.04853, over 7552.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2927, pruned_loss=0.06599, over 1613084.50 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:03,939 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.293e+02 3.003e+02 3.717e+02 7.578e+02, threshold=6.007e+02, percent-clipped=1.0 +2023-02-06 22:06:25,435 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=144206.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:06:27,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 22:06:28,567 INFO [train.py:901] (3/4) Epoch 18, batch 6800, loss[loss=0.1879, simple_loss=0.263, pruned_loss=0.05642, over 7704.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2919, pruned_loss=0.06564, over 1613504.69 frames. ], batch size: 18, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:06:41,515 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4432, 1.7106, 2.6692, 1.2892, 1.8131, 1.7772, 1.5059, 1.8609], + device='cuda:3'), covar=tensor([0.1999, 0.2517, 0.0848, 0.4400, 0.2070, 0.3159, 0.2307, 0.2342], + device='cuda:3'), in_proj_covar=tensor([0.0514, 0.0580, 0.0551, 0.0625, 0.0641, 0.0586, 0.0517, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:06:42,847 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=144231.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:07:03,973 INFO [train.py:901] (3/4) Epoch 18, batch 6850, loss[loss=0.2369, simple_loss=0.3097, pruned_loss=0.08203, over 8454.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2921, pruned_loss=0.06569, over 1615932.43 frames. ], batch size: 27, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:07:13,994 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.379e+02 2.937e+02 3.634e+02 6.722e+02, threshold=5.873e+02, percent-clipped=2.0 +2023-02-06 22:07:17,332 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 22:07:38,062 INFO [train.py:901] (3/4) Epoch 18, batch 6900, loss[loss=0.2871, simple_loss=0.3526, pruned_loss=0.1108, over 6692.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2925, pruned_loss=0.06563, over 1614839.12 frames. ], batch size: 71, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:13,483 INFO [train.py:901] (3/4) Epoch 18, batch 6950, loss[loss=0.2384, simple_loss=0.3166, pruned_loss=0.0801, over 8323.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2922, pruned_loss=0.06595, over 1610382.18 frames. ], batch size: 25, lr: 4.18e-03, grad_scale: 8.0 +2023-02-06 22:08:24,083 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.398e+02 2.919e+02 3.864e+02 7.610e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-06 22:08:25,464 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 22:08:47,781 INFO [train.py:901] (3/4) Epoch 18, batch 7000, loss[loss=0.1795, simple_loss=0.2562, pruned_loss=0.05139, over 7939.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2924, pruned_loss=0.06624, over 1605958.43 frames. ], batch size: 20, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:08:49,931 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7882, 1.5069, 3.9771, 1.5666, 3.5182, 3.2446, 3.6244, 3.5020], + device='cuda:3'), covar=tensor([0.0696, 0.3943, 0.0606, 0.3622, 0.1178, 0.1008, 0.0630, 0.0757], + device='cuda:3'), in_proj_covar=tensor([0.0596, 0.0632, 0.0676, 0.0603, 0.0685, 0.0584, 0.0582, 0.0652], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 22:09:01,097 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=144429.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:09:05,741 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7110, 1.3394, 2.8403, 1.4055, 2.0292, 3.0354, 3.2238, 2.5899], + device='cuda:3'), covar=tensor([0.1111, 0.1758, 0.0385, 0.2105, 0.0952, 0.0289, 0.0570, 0.0625], + device='cuda:3'), in_proj_covar=tensor([0.0283, 0.0314, 0.0278, 0.0307, 0.0295, 0.0255, 0.0400, 0.0298], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 22:09:22,578 INFO [train.py:901] (3/4) Epoch 18, batch 7050, loss[loss=0.242, simple_loss=0.3167, pruned_loss=0.0837, over 8643.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2927, pruned_loss=0.06686, over 1603041.08 frames. ], batch size: 34, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:09:34,230 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.214e+02 2.534e+02 2.937e+02 3.689e+02 8.247e+02, threshold=5.874e+02, percent-clipped=3.0 +2023-02-06 22:09:58,439 INFO [train.py:901] (3/4) Epoch 18, batch 7100, loss[loss=0.2064, simple_loss=0.2892, pruned_loss=0.06182, over 8107.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2936, pruned_loss=0.06714, over 1604518.96 frames. ], batch size: 23, lr: 4.18e-03, grad_scale: 16.0 +2023-02-06 22:10:33,587 INFO [train.py:901] (3/4) Epoch 18, batch 7150, loss[loss=0.1981, simple_loss=0.274, pruned_loss=0.06113, over 7804.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2933, pruned_loss=0.06686, over 1604497.22 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:10:43,975 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.263e+02 2.906e+02 3.662e+02 1.305e+03, threshold=5.813e+02, percent-clipped=7.0 +2023-02-06 22:11:00,102 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.28 vs. limit=5.0 +2023-02-06 22:11:10,032 INFO [train.py:901] (3/4) Epoch 18, batch 7200, loss[loss=0.2209, simple_loss=0.3124, pruned_loss=0.06463, over 8498.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2932, pruned_loss=0.06666, over 1607566.23 frames. ], batch size: 26, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:12,568 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 22:11:20,760 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8449, 1.5399, 1.7402, 1.3869, 1.0665, 1.5492, 1.7458, 1.4154], + device='cuda:3'), covar=tensor([0.0555, 0.1214, 0.1588, 0.1384, 0.0586, 0.1420, 0.0684, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0157, 0.0099, 0.0161, 0.0113, 0.0139], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 22:11:44,457 INFO [train.py:901] (3/4) Epoch 18, batch 7250, loss[loss=0.1895, simple_loss=0.2503, pruned_loss=0.06439, over 7433.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2927, pruned_loss=0.06639, over 1606149.18 frames. ], batch size: 17, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:11:54,472 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.379e+02 2.816e+02 3.627e+02 9.857e+02, threshold=5.632e+02, percent-clipped=4.0 +2023-02-06 22:12:19,766 INFO [train.py:901] (3/4) Epoch 18, batch 7300, loss[loss=0.1723, simple_loss=0.2496, pruned_loss=0.04752, over 7442.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2936, pruned_loss=0.06668, over 1610554.60 frames. ], batch size: 17, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:12:50,210 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4622, 2.5666, 1.7689, 2.3449, 2.0009, 1.4354, 2.0315, 2.0531], + device='cuda:3'), covar=tensor([0.1503, 0.0350, 0.1281, 0.0507, 0.0758, 0.1598, 0.0984, 0.0834], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0233, 0.0324, 0.0301, 0.0294, 0.0328, 0.0340, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:12:53,982 INFO [train.py:901] (3/4) Epoch 18, batch 7350, loss[loss=0.2056, simple_loss=0.3029, pruned_loss=0.05412, over 8195.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2927, pruned_loss=0.06627, over 1612618.41 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:13:02,927 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=144773.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:13:04,753 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.484e+02 2.992e+02 3.514e+02 8.978e+02, threshold=5.985e+02, percent-clipped=6.0 +2023-02-06 22:13:08,084 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 22:13:26,895 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 22:13:28,903 INFO [train.py:901] (3/4) Epoch 18, batch 7400, loss[loss=0.1843, simple_loss=0.2531, pruned_loss=0.05778, over 7715.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2929, pruned_loss=0.06579, over 1614646.14 frames. ], batch size: 18, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:04,309 INFO [train.py:901] (3/4) Epoch 18, batch 7450, loss[loss=0.1648, simple_loss=0.2499, pruned_loss=0.03989, over 7660.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2942, pruned_loss=0.06641, over 1619195.41 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 16.0 +2023-02-06 22:14:07,796 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 22:14:07,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2286, 2.1732, 1.5901, 1.9522, 1.7977, 1.3284, 1.7028, 1.5497], + device='cuda:3'), covar=tensor([0.1298, 0.0369, 0.1094, 0.0472, 0.0594, 0.1405, 0.0858, 0.0835], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0233, 0.0324, 0.0302, 0.0295, 0.0329, 0.0342, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:14:14,578 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.437e+02 2.433e+02 3.083e+02 4.140e+02 9.921e+02, threshold=6.167e+02, percent-clipped=3.0 +2023-02-06 22:14:23,403 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=144888.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:14:38,536 INFO [train.py:901] (3/4) Epoch 18, batch 7500, loss[loss=0.1984, simple_loss=0.2949, pruned_loss=0.05092, over 8710.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2943, pruned_loss=0.06619, over 1620002.96 frames. ], batch size: 34, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:14,116 INFO [train.py:901] (3/4) Epoch 18, batch 7550, loss[loss=0.2145, simple_loss=0.2886, pruned_loss=0.07026, over 7932.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2941, pruned_loss=0.06618, over 1619682.51 frames. ], batch size: 20, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:24,760 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.361e+02 2.893e+02 3.293e+02 8.578e+02, threshold=5.785e+02, percent-clipped=2.0 +2023-02-06 22:15:48,835 INFO [train.py:901] (3/4) Epoch 18, batch 7600, loss[loss=0.1662, simple_loss=0.2466, pruned_loss=0.04288, over 7656.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2944, pruned_loss=0.06603, over 1619649.96 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:15:51,056 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:12,837 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:24,429 INFO [train.py:901] (3/4) Epoch 18, batch 7650, loss[loss=0.1893, simple_loss=0.2759, pruned_loss=0.05137, over 7194.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2933, pruned_loss=0.06575, over 1617136.09 frames. ], batch size: 16, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:16:35,693 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.409e+02 3.204e+02 3.806e+02 7.453e+02, threshold=6.408e+02, percent-clipped=5.0 +2023-02-06 22:16:40,604 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145084.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:16:57,519 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6101, 1.8592, 2.0250, 1.2311, 2.1007, 1.3965, 0.7057, 1.8114], + device='cuda:3'), covar=tensor([0.0771, 0.0446, 0.0417, 0.0756, 0.0499, 0.1048, 0.1014, 0.0362], + device='cuda:3'), in_proj_covar=tensor([0.0440, 0.0379, 0.0325, 0.0433, 0.0364, 0.0525, 0.0381, 0.0404], + device='cuda:3'), out_proj_covar=tensor([1.1951e-04, 1.0020e-04, 8.5967e-05, 1.1516e-04, 9.6754e-05, 1.5029e-04, + 1.0339e-04, 1.0806e-04], device='cuda:3') +2023-02-06 22:16:58,566 INFO [train.py:901] (3/4) Epoch 18, batch 7700, loss[loss=0.2211, simple_loss=0.2849, pruned_loss=0.07867, over 7668.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.293, pruned_loss=0.06629, over 1613850.30 frames. ], batch size: 19, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:16,304 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 22:17:17,791 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1405, 1.2312, 1.2382, 0.9323, 1.2655, 1.0308, 0.3743, 1.1838], + device='cuda:3'), covar=tensor([0.0335, 0.0273, 0.0226, 0.0340, 0.0310, 0.0567, 0.0597, 0.0218], + device='cuda:3'), in_proj_covar=tensor([0.0438, 0.0377, 0.0323, 0.0431, 0.0362, 0.0523, 0.0379, 0.0403], + device='cuda:3'), out_proj_covar=tensor([1.1892e-04, 9.9597e-05, 8.5574e-05, 1.1453e-04, 9.6450e-05, 1.4958e-04, + 1.0290e-04, 1.0766e-04], device='cuda:3') +2023-02-06 22:17:21,903 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145144.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:33,670 INFO [train.py:901] (3/4) Epoch 18, batch 7750, loss[loss=0.2895, simple_loss=0.3512, pruned_loss=0.114, over 8294.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2943, pruned_loss=0.06727, over 1610405.02 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:17:40,052 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145169.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:17:45,202 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.617e+02 3.101e+02 3.765e+02 9.296e+02, threshold=6.202e+02, percent-clipped=3.0 +2023-02-06 22:18:08,802 INFO [train.py:901] (3/4) Epoch 18, batch 7800, loss[loss=0.2065, simple_loss=0.3028, pruned_loss=0.05513, over 8289.00 frames. ], tot_loss[loss=0.214, simple_loss=0.2941, pruned_loss=0.06697, over 1608694.94 frames. ], batch size: 23, lr: 4.17e-03, grad_scale: 8.0 +2023-02-06 22:18:42,838 INFO [train.py:901] (3/4) Epoch 18, batch 7850, loss[loss=0.1984, simple_loss=0.2813, pruned_loss=0.05774, over 8256.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2929, pruned_loss=0.06643, over 1609611.20 frames. ], batch size: 24, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:18:53,262 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.477e+02 2.948e+02 3.643e+02 1.044e+03, threshold=5.895e+02, percent-clipped=9.0 +2023-02-06 22:19:16,116 INFO [train.py:901] (3/4) Epoch 18, batch 7900, loss[loss=0.1769, simple_loss=0.252, pruned_loss=0.05088, over 7409.00 frames. ], tot_loss[loss=0.2131, simple_loss=0.2932, pruned_loss=0.06651, over 1610214.09 frames. ], batch size: 17, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:47,152 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145358.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:19:49,082 INFO [train.py:901] (3/4) Epoch 18, batch 7950, loss[loss=0.2181, simple_loss=0.3002, pruned_loss=0.06804, over 8202.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2932, pruned_loss=0.0659, over 1614938.29 frames. ], batch size: 23, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:19:59,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.389e+02 3.012e+02 3.869e+02 1.111e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-06 22:20:04,931 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.12 vs. limit=5.0 +2023-02-06 22:20:07,942 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:08,012 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145389.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:23,113 INFO [train.py:901] (3/4) Epoch 18, batch 8000, loss[loss=0.2332, simple_loss=0.3164, pruned_loss=0.07499, over 8469.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2929, pruned_loss=0.06578, over 1616857.62 frames. ], batch size: 27, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:20:25,589 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-06 22:20:31,461 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4496, 1.6331, 2.1508, 1.3343, 1.4948, 1.7080, 1.4948, 1.4456], + device='cuda:3'), covar=tensor([0.1765, 0.2268, 0.0977, 0.4150, 0.1852, 0.3147, 0.2122, 0.1979], + device='cuda:3'), in_proj_covar=tensor([0.0515, 0.0578, 0.0548, 0.0625, 0.0638, 0.0583, 0.0515, 0.0628], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:20:34,672 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145428.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:20:57,109 INFO [train.py:901] (3/4) Epoch 18, batch 8050, loss[loss=0.1799, simple_loss=0.2564, pruned_loss=0.05177, over 7227.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2916, pruned_loss=0.06615, over 1590907.86 frames. ], batch size: 16, lr: 4.16e-03, grad_scale: 8.0 +2023-02-06 22:21:05,665 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145473.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:21:08,161 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.350e+02 2.866e+02 3.408e+02 5.747e+02, threshold=5.732e+02, percent-clipped=0.0 +2023-02-06 22:21:29,237 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 22:21:34,893 INFO [train.py:901] (3/4) Epoch 19, batch 0, loss[loss=0.244, simple_loss=0.3149, pruned_loss=0.08652, over 8094.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3149, pruned_loss=0.08652, over 8094.00 frames. ], batch size: 23, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:21:34,894 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 22:21:46,548 INFO [train.py:935] (3/4) Epoch 19, validation: loss=0.1782, simple_loss=0.2779, pruned_loss=0.03928, over 944034.00 frames. +2023-02-06 22:21:46,549 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 22:21:54,199 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145504.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:03,060 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 22:22:22,466 INFO [train.py:901] (3/4) Epoch 19, batch 50, loss[loss=0.1729, simple_loss=0.2506, pruned_loss=0.04758, over 7437.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2906, pruned_loss=0.06452, over 365881.72 frames. ], batch size: 17, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:22,665 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145543.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:22:23,353 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9624, 1.6352, 3.5683, 1.5994, 2.4965, 3.8896, 4.0418, 3.2806], + device='cuda:3'), covar=tensor([0.1178, 0.1704, 0.0322, 0.1954, 0.1017, 0.0227, 0.0548, 0.0549], + device='cuda:3'), in_proj_covar=tensor([0.0281, 0.0312, 0.0275, 0.0303, 0.0293, 0.0253, 0.0396, 0.0296], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 22:22:25,796 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:22:40,517 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 22:22:42,317 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 22:22:45,196 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.340e+02 2.977e+02 3.641e+02 7.952e+02, threshold=5.953e+02, percent-clipped=6.0 +2023-02-06 22:22:56,255 INFO [train.py:901] (3/4) Epoch 19, batch 100, loss[loss=0.2519, simple_loss=0.3172, pruned_loss=0.09335, over 7190.00 frames. ], tot_loss[loss=0.2142, simple_loss=0.2949, pruned_loss=0.06675, over 644152.78 frames. ], batch size: 71, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:22:59,350 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6888, 2.2384, 4.0837, 1.5684, 3.0325, 2.1772, 1.7843, 2.8595], + device='cuda:3'), covar=tensor([0.1827, 0.2572, 0.0750, 0.4519, 0.1734, 0.3180, 0.2190, 0.2400], + device='cuda:3'), in_proj_covar=tensor([0.0515, 0.0578, 0.0548, 0.0628, 0.0635, 0.0585, 0.0517, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:23:01,906 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 22:23:10,096 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145612.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:20,686 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7143, 4.6867, 4.1813, 2.1754, 4.1330, 4.3431, 4.2578, 4.0546], + device='cuda:3'), covar=tensor([0.0823, 0.0504, 0.0949, 0.4601, 0.0820, 0.0932, 0.1165, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0426, 0.0429, 0.0527, 0.0414, 0.0426, 0.0408, 0.0373], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:23:32,314 INFO [train.py:901] (3/4) Epoch 19, batch 150, loss[loss=0.2341, simple_loss=0.3166, pruned_loss=0.0758, over 8518.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2934, pruned_loss=0.06613, over 859786.24 frames. ], batch size: 39, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:23:40,877 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2843, 1.9879, 2.7535, 2.2431, 2.6228, 2.2465, 1.9931, 1.4403], + device='cuda:3'), covar=tensor([0.5175, 0.4912, 0.1710, 0.3468, 0.2318, 0.2919, 0.2021, 0.5114], + device='cuda:3'), in_proj_covar=tensor([0.0925, 0.0945, 0.0773, 0.0911, 0.0976, 0.0864, 0.0728, 0.0808], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 22:23:46,187 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:23:57,046 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.454e+02 2.969e+02 3.777e+02 1.176e+03, threshold=5.938e+02, percent-clipped=4.0 +2023-02-06 22:24:07,976 INFO [train.py:901] (3/4) Epoch 19, batch 200, loss[loss=0.2443, simple_loss=0.3178, pruned_loss=0.08538, over 8290.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2936, pruned_loss=0.06582, over 1027557.08 frames. ], batch size: 23, lr: 4.05e-03, grad_scale: 8.0 +2023-02-06 22:24:33,093 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8301, 3.6354, 2.1712, 2.6877, 2.6134, 1.9447, 2.6852, 2.8790], + device='cuda:3'), covar=tensor([0.1778, 0.0322, 0.1137, 0.0735, 0.0771, 0.1356, 0.1078, 0.1096], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0234, 0.0323, 0.0301, 0.0296, 0.0328, 0.0339, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:24:33,108 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145729.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:35,731 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145733.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:43,207 INFO [train.py:901] (3/4) Epoch 19, batch 250, loss[loss=0.2205, simple_loss=0.315, pruned_loss=0.06298, over 8476.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.293, pruned_loss=0.0654, over 1160192.41 frames. ], batch size: 25, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:24:51,130 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145754.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:55,236 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145760.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:24:58,398 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-06 22:25:06,970 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-06 22:25:07,541 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.430e+02 2.432e+02 3.022e+02 3.893e+02 7.688e+02, threshold=6.043e+02, percent-clipped=6.0 +2023-02-06 22:25:13,307 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145785.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:18,656 INFO [train.py:901] (3/4) Epoch 19, batch 300, loss[loss=0.1872, simple_loss=0.2658, pruned_loss=0.05429, over 7657.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2926, pruned_loss=0.06565, over 1256809.30 frames. ], batch size: 19, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:22,945 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=145799.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:39,974 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=145824.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:25:51,137 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=145839.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:25:53,701 INFO [train.py:901] (3/4) Epoch 19, batch 350, loss[loss=0.1824, simple_loss=0.2571, pruned_loss=0.05385, over 7237.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2926, pruned_loss=0.06566, over 1332178.87 frames. ], batch size: 16, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:25:57,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=145848.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:26:17,676 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.383e+02 2.952e+02 3.795e+02 9.100e+02, threshold=5.904e+02, percent-clipped=6.0 +2023-02-06 22:26:30,044 INFO [train.py:901] (3/4) Epoch 19, batch 400, loss[loss=0.1669, simple_loss=0.2462, pruned_loss=0.04378, over 7265.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.0653, over 1391886.34 frames. ], batch size: 16, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:26:32,903 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4785, 4.4931, 4.0582, 2.0804, 3.9984, 4.0311, 4.0399, 3.8046], + device='cuda:3'), covar=tensor([0.0764, 0.0554, 0.1121, 0.4451, 0.0870, 0.1030, 0.1219, 0.0855], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0422, 0.0425, 0.0524, 0.0411, 0.0424, 0.0402, 0.0370], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:26:44,749 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8481, 1.6002, 1.7718, 1.5984, 1.0511, 1.5789, 2.1593, 2.0253], + device='cuda:3'), covar=tensor([0.0462, 0.1298, 0.1670, 0.1380, 0.0598, 0.1534, 0.0663, 0.0592], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0161, 0.0112, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 22:27:04,033 INFO [train.py:901] (3/4) Epoch 19, batch 450, loss[loss=0.1922, simple_loss=0.2689, pruned_loss=0.05774, over 7655.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.293, pruned_loss=0.06573, over 1443996.00 frames. ], batch size: 19, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:12,922 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=145956.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:27:23,355 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.02 vs. limit=5.0 +2023-02-06 22:27:28,517 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.474e+02 2.839e+02 3.457e+02 5.406e+02, threshold=5.677e+02, percent-clipped=0.0 +2023-02-06 22:27:40,188 INFO [train.py:901] (3/4) Epoch 19, batch 500, loss[loss=0.2249, simple_loss=0.3088, pruned_loss=0.07056, over 8335.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2922, pruned_loss=0.06515, over 1482659.05 frames. ], batch size: 25, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:27:50,097 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146005.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:03,432 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5306, 1.4883, 4.7346, 1.7133, 4.1607, 3.9086, 4.2830, 4.1205], + device='cuda:3'), covar=tensor([0.0549, 0.4619, 0.0453, 0.4250, 0.1094, 0.0929, 0.0561, 0.0662], + device='cuda:3'), in_proj_covar=tensor([0.0597, 0.0627, 0.0673, 0.0605, 0.0687, 0.0591, 0.0585, 0.0649], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:28:15,840 INFO [train.py:901] (3/4) Epoch 19, batch 550, loss[loss=0.194, simple_loss=0.284, pruned_loss=0.05204, over 7984.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2931, pruned_loss=0.06567, over 1515477.92 frames. ], batch size: 21, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:19,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8710, 3.8067, 3.5169, 1.7261, 3.4315, 3.4617, 3.4560, 3.1911], + device='cuda:3'), covar=tensor([0.0974, 0.0701, 0.1196, 0.4780, 0.1018, 0.1082, 0.1513, 0.0938], + device='cuda:3'), in_proj_covar=tensor([0.0519, 0.0428, 0.0430, 0.0530, 0.0416, 0.0430, 0.0411, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:28:35,122 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146071.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:28:38,933 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.555e+02 2.557e+02 3.049e+02 4.000e+02 8.642e+02, threshold=6.099e+02, percent-clipped=4.0 +2023-02-06 22:28:50,772 INFO [train.py:901] (3/4) Epoch 19, batch 600, loss[loss=0.2025, simple_loss=0.2659, pruned_loss=0.06954, over 7423.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2927, pruned_loss=0.06576, over 1539898.90 frames. ], batch size: 17, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:28:54,564 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8010, 1.4121, 4.0110, 1.3354, 3.5468, 3.3350, 3.6546, 3.5170], + device='cuda:3'), covar=tensor([0.0651, 0.4172, 0.0603, 0.3921, 0.1276, 0.1037, 0.0598, 0.0754], + device='cuda:3'), in_proj_covar=tensor([0.0594, 0.0624, 0.0670, 0.0600, 0.0684, 0.0588, 0.0582, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 22:28:59,452 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146104.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:03,473 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.8072, 4.7968, 4.3500, 2.3859, 4.2896, 4.3196, 4.4180, 4.0022], + device='cuda:3'), covar=tensor([0.0626, 0.0468, 0.0923, 0.3826, 0.0740, 0.0762, 0.1097, 0.0711], + device='cuda:3'), in_proj_covar=tensor([0.0517, 0.0425, 0.0427, 0.0526, 0.0413, 0.0427, 0.0407, 0.0373], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:29:11,441 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-06 22:29:11,618 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146120.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:17,651 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146129.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:26,673 INFO [train.py:901] (3/4) Epoch 19, batch 650, loss[loss=0.1869, simple_loss=0.2674, pruned_loss=0.05316, over 7805.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2904, pruned_loss=0.06498, over 1552080.25 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:29:42,925 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:29:49,766 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.807e+02 2.628e+02 2.995e+02 3.912e+02 8.872e+02, threshold=5.991e+02, percent-clipped=7.0 +2023-02-06 22:29:53,935 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146183.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:30:00,627 INFO [train.py:901] (3/4) Epoch 19, batch 700, loss[loss=0.2086, simple_loss=0.2956, pruned_loss=0.06082, over 8682.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2915, pruned_loss=0.06524, over 1569826.47 frames. ], batch size: 34, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:37,726 INFO [train.py:901] (3/4) Epoch 19, batch 750, loss[loss=0.192, simple_loss=0.2838, pruned_loss=0.05012, over 8456.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2906, pruned_loss=0.06472, over 1580215.47 frames. ], batch size: 27, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:30:58,059 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-06 22:31:00,738 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.187e+02 2.733e+02 3.387e+02 1.037e+03, threshold=5.466e+02, percent-clipped=4.0 +2023-02-06 22:31:06,864 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-06 22:31:11,497 INFO [train.py:901] (3/4) Epoch 19, batch 800, loss[loss=0.2447, simple_loss=0.3123, pruned_loss=0.08851, over 7331.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06498, over 1588505.02 frames. ], batch size: 71, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:14,845 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146298.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:31:15,544 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1837, 1.2098, 1.6307, 1.1091, 0.7427, 1.3278, 1.2406, 1.0451], + device='cuda:3'), covar=tensor([0.0564, 0.1175, 0.1556, 0.1435, 0.0522, 0.1432, 0.0657, 0.0706], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0099, 0.0162, 0.0113, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 22:31:17,173 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-06 22:31:35,767 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146327.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:31:47,226 INFO [train.py:901] (3/4) Epoch 19, batch 850, loss[loss=0.2151, simple_loss=0.2994, pruned_loss=0.06536, over 7941.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2925, pruned_loss=0.06571, over 1594632.87 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:31:54,229 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146352.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:10,850 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146376.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:11,293 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.470e+02 3.071e+02 3.941e+02 1.675e+03, threshold=6.141e+02, percent-clipped=6.0 +2023-02-06 22:32:22,253 INFO [train.py:901] (3/4) Epoch 19, batch 900, loss[loss=0.2146, simple_loss=0.2969, pruned_loss=0.06612, over 8527.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2931, pruned_loss=0.06598, over 1597670.61 frames. ], batch size: 28, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:32:27,825 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146401.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:32:41,220 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7796, 2.1955, 4.0524, 1.5989, 2.9715, 2.3416, 1.8015, 2.7749], + device='cuda:3'), covar=tensor([0.1904, 0.2787, 0.0893, 0.4678, 0.1857, 0.3048, 0.2326, 0.2533], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0580, 0.0550, 0.0630, 0.0639, 0.0584, 0.0518, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:32:56,373 INFO [train.py:901] (3/4) Epoch 19, batch 950, loss[loss=0.215, simple_loss=0.2834, pruned_loss=0.07327, over 7931.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2937, pruned_loss=0.06599, over 1602730.46 frames. ], batch size: 20, lr: 4.04e-03, grad_scale: 8.0 +2023-02-06 22:33:09,723 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.54 vs. limit=5.0 +2023-02-06 22:33:20,835 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0297, 1.5541, 1.9206, 1.4919, 1.0539, 1.5143, 2.0479, 2.2620], + device='cuda:3'), covar=tensor([0.0426, 0.1298, 0.1592, 0.1474, 0.0622, 0.1616, 0.0638, 0.0549], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0112, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 22:33:21,314 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.324e+02 2.987e+02 4.077e+02 9.877e+02, threshold=5.974e+02, percent-clipped=4.0 +2023-02-06 22:33:22,702 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-06 22:33:32,162 INFO [train.py:901] (3/4) Epoch 19, batch 1000, loss[loss=0.1794, simple_loss=0.2614, pruned_loss=0.04874, over 7924.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2918, pruned_loss=0.06462, over 1605038.07 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:33:44,468 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146511.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:33:54,581 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-06 22:34:00,614 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-06 22:34:06,370 INFO [train.py:901] (3/4) Epoch 19, batch 1050, loss[loss=0.2237, simple_loss=0.2999, pruned_loss=0.07376, over 8133.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2919, pruned_loss=0.06494, over 1604278.41 frames. ], batch size: 22, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:07,093 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-06 22:34:14,961 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146554.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 22:34:31,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.403e+02 2.837e+02 3.508e+02 6.242e+02, threshold=5.674e+02, percent-clipped=1.0 +2023-02-06 22:34:34,618 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146579.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 22:34:44,087 INFO [train.py:901] (3/4) Epoch 19, batch 1100, loss[loss=0.2179, simple_loss=0.3002, pruned_loss=0.06782, over 8475.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2908, pruned_loss=0.06433, over 1606859.40 frames. ], batch size: 29, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:34:55,193 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146609.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:06,979 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=146626.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:18,374 INFO [train.py:901] (3/4) Epoch 19, batch 1150, loss[loss=0.1817, simple_loss=0.2639, pruned_loss=0.04975, over 7919.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2921, pruned_loss=0.06548, over 1606230.17 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:35:19,117 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-06 22:35:19,269 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:35:42,411 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.484e+02 2.879e+02 3.755e+02 5.922e+02, threshold=5.758e+02, percent-clipped=3.0 +2023-02-06 22:35:53,854 INFO [train.py:901] (3/4) Epoch 19, batch 1200, loss[loss=0.2106, simple_loss=0.3029, pruned_loss=0.05918, over 8247.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2933, pruned_loss=0.06631, over 1611935.36 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:28,994 INFO [train.py:901] (3/4) Epoch 19, batch 1250, loss[loss=0.2095, simple_loss=0.2967, pruned_loss=0.06117, over 8289.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2934, pruned_loss=0.06663, over 1609242.52 frames. ], batch size: 23, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:36:37,770 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 22:36:52,651 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.364e+02 2.471e+02 2.976e+02 4.092e+02 7.603e+02, threshold=5.951e+02, percent-clipped=4.0 +2023-02-06 22:37:04,310 INFO [train.py:901] (3/4) Epoch 19, batch 1300, loss[loss=0.2057, simple_loss=0.2755, pruned_loss=0.06793, over 8248.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2938, pruned_loss=0.06631, over 1617078.24 frames. ], batch size: 22, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:40,718 INFO [train.py:901] (3/4) Epoch 19, batch 1350, loss[loss=0.2384, simple_loss=0.316, pruned_loss=0.08042, over 8668.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2947, pruned_loss=0.0667, over 1619874.81 frames. ], batch size: 34, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:37:53,742 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=146862.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:37:58,968 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-06 22:38:03,895 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.302e+02 2.844e+02 3.659e+02 6.626e+02, threshold=5.688e+02, percent-clipped=1.0 +2023-02-06 22:38:07,784 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=146882.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:15,240 INFO [train.py:901] (3/4) Epoch 19, batch 1400, loss[loss=0.1789, simple_loss=0.2504, pruned_loss=0.05367, over 7232.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2927, pruned_loss=0.06549, over 1617376.85 frames. ], batch size: 16, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:38:25,963 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=146907.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:38:43,246 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 22:38:52,626 INFO [train.py:901] (3/4) Epoch 19, batch 1450, loss[loss=0.2243, simple_loss=0.3098, pruned_loss=0.06938, over 8493.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2929, pruned_loss=0.06532, over 1618273.54 frames. ], batch size: 28, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:38:56,663 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-06 22:38:59,402 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146953.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:16,190 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.810e+02 2.362e+02 2.962e+02 3.993e+02 1.525e+03, threshold=5.923e+02, percent-clipped=6.0 +2023-02-06 22:39:22,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8179, 1.3296, 3.9756, 1.4941, 3.4959, 3.2869, 3.5701, 3.4785], + device='cuda:3'), covar=tensor([0.0674, 0.4620, 0.0630, 0.4001, 0.1261, 0.1125, 0.0732, 0.0761], + device='cuda:3'), in_proj_covar=tensor([0.0602, 0.0635, 0.0677, 0.0609, 0.0692, 0.0594, 0.0590, 0.0651], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:39:23,967 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=146988.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:39:27,289 INFO [train.py:901] (3/4) Epoch 19, batch 1500, loss[loss=0.215, simple_loss=0.3107, pruned_loss=0.05965, over 8471.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2933, pruned_loss=0.06534, over 1619336.01 frames. ], batch size: 25, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:03,257 INFO [train.py:901] (3/4) Epoch 19, batch 1550, loss[loss=0.1684, simple_loss=0.255, pruned_loss=0.04087, over 8040.00 frames. ], tot_loss[loss=0.2135, simple_loss=0.2945, pruned_loss=0.0662, over 1617612.48 frames. ], batch size: 20, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:22,655 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:40:28,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.434e+02 2.984e+02 3.600e+02 8.495e+02, threshold=5.968e+02, percent-clipped=1.0 +2023-02-06 22:40:39,456 INFO [train.py:901] (3/4) Epoch 19, batch 1600, loss[loss=0.157, simple_loss=0.2389, pruned_loss=0.03757, over 7692.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2936, pruned_loss=0.06599, over 1617410.69 frames. ], batch size: 18, lr: 4.03e-03, grad_scale: 16.0 +2023-02-06 22:40:46,379 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147103.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:41:14,549 INFO [train.py:901] (3/4) Epoch 19, batch 1650, loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05671, over 8365.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2936, pruned_loss=0.06564, over 1617389.61 frames. ], batch size: 24, lr: 4.03e-03, grad_scale: 8.0 +2023-02-06 22:41:40,974 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.588e+02 2.354e+02 2.709e+02 3.474e+02 7.081e+02, threshold=5.418e+02, percent-clipped=1.0 +2023-02-06 22:41:51,226 INFO [train.py:901] (3/4) Epoch 19, batch 1700, loss[loss=0.2037, simple_loss=0.2819, pruned_loss=0.06282, over 7931.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2923, pruned_loss=0.06524, over 1613871.44 frames. ], batch size: 20, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:41:52,242 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2810, 1.9985, 2.6667, 2.2142, 2.6295, 2.2693, 1.9671, 1.4201], + device='cuda:3'), covar=tensor([0.5086, 0.4506, 0.1777, 0.3217, 0.2321, 0.2685, 0.1871, 0.4819], + device='cuda:3'), in_proj_covar=tensor([0.0936, 0.0953, 0.0788, 0.0917, 0.0984, 0.0869, 0.0730, 0.0810], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 22:42:00,572 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147206.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:25,937 INFO [train.py:901] (3/4) Epoch 19, batch 1750, loss[loss=0.218, simple_loss=0.286, pruned_loss=0.07494, over 7425.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2927, pruned_loss=0.06543, over 1613518.79 frames. ], batch size: 17, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:42:37,958 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147259.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:42:41,411 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3247, 1.5336, 4.5012, 1.7842, 4.0033, 3.7327, 4.0653, 3.9743], + device='cuda:3'), covar=tensor([0.0629, 0.4704, 0.0547, 0.4224, 0.1059, 0.0999, 0.0597, 0.0677], + device='cuda:3'), in_proj_covar=tensor([0.0601, 0.0636, 0.0677, 0.0612, 0.0692, 0.0596, 0.0591, 0.0653], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:42:46,384 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6186, 4.6796, 4.1612, 1.9747, 4.1225, 4.2847, 4.1475, 4.1049], + device='cuda:3'), covar=tensor([0.0702, 0.0456, 0.0985, 0.4706, 0.0833, 0.0776, 0.1288, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0423, 0.0428, 0.0526, 0.0415, 0.0427, 0.0410, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:42:51,055 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.529e+02 3.043e+02 3.569e+02 7.736e+02, threshold=6.085e+02, percent-clipped=5.0 +2023-02-06 22:43:03,014 INFO [train.py:901] (3/4) Epoch 19, batch 1800, loss[loss=0.2497, simple_loss=0.3352, pruned_loss=0.08213, over 8580.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2932, pruned_loss=0.06631, over 1613527.12 frames. ], batch size: 31, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:22,541 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147321.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:24,605 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147324.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,349 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147328.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:27,659 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-06 22:43:37,466 INFO [train.py:901] (3/4) Epoch 19, batch 1850, loss[loss=0.2079, simple_loss=0.2969, pruned_loss=0.05949, over 8606.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2929, pruned_loss=0.06612, over 1617896.35 frames. ], batch size: 34, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:43:41,699 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147349.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:43:48,568 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147359.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:02,401 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.300e+02 2.823e+02 3.606e+02 1.006e+03, threshold=5.645e+02, percent-clipped=2.0 +2023-02-06 22:44:02,738 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-06 22:44:06,673 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:12,517 INFO [train.py:901] (3/4) Epoch 19, batch 1900, loss[loss=0.1888, simple_loss=0.2707, pruned_loss=0.05343, over 7657.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2924, pruned_loss=0.06581, over 1619990.29 frames. ], batch size: 19, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:37,287 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147425.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:44,937 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-06 22:44:49,664 INFO [train.py:901] (3/4) Epoch 19, batch 1950, loss[loss=0.2447, simple_loss=0.3112, pruned_loss=0.08903, over 7409.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2913, pruned_loss=0.0648, over 1615467.71 frames. ], batch size: 17, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:44:55,965 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147452.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:44:56,513 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-06 22:45:13,743 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.289e+02 2.862e+02 3.830e+02 8.439e+02, threshold=5.724e+02, percent-clipped=6.0 +2023-02-06 22:45:15,250 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-06 22:45:24,868 INFO [train.py:901] (3/4) Epoch 19, batch 2000, loss[loss=0.254, simple_loss=0.3214, pruned_loss=0.09327, over 8250.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2917, pruned_loss=0.06509, over 1614761.35 frames. ], batch size: 49, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:01,777 INFO [train.py:901] (3/4) Epoch 19, batch 2050, loss[loss=0.264, simple_loss=0.3287, pruned_loss=0.09965, over 8116.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2919, pruned_loss=0.0654, over 1610124.62 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:09,084 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.88 vs. limit=5.0 +2023-02-06 22:46:25,334 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147577.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:25,778 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.500e+02 2.918e+02 3.445e+02 6.516e+02, threshold=5.836e+02, percent-clipped=2.0 +2023-02-06 22:46:36,254 INFO [train.py:901] (3/4) Epoch 19, batch 2100, loss[loss=0.2196, simple_loss=0.308, pruned_loss=0.06559, over 8337.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2921, pruned_loss=0.06553, over 1611228.92 frames. ], batch size: 25, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:46:42,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147602.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:43,441 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147603.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:46:47,320 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.54 vs. limit=5.0 +2023-02-06 22:47:12,091 INFO [train.py:901] (3/4) Epoch 19, batch 2150, loss[loss=0.2031, simple_loss=0.2967, pruned_loss=0.05478, over 8284.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2919, pruned_loss=0.06523, over 1616067.64 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:47:28,883 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6189, 2.7647, 1.8508, 2.2570, 2.2561, 1.6024, 2.2068, 2.1647], + device='cuda:3'), covar=tensor([0.1460, 0.0320, 0.1095, 0.0689, 0.0773, 0.1423, 0.0894, 0.0920], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0234, 0.0325, 0.0303, 0.0301, 0.0331, 0.0341, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:47:31,900 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-06 22:47:33,564 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147672.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:36,914 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147677.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:47:37,423 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.398e+02 3.174e+02 3.852e+02 9.466e+02, threshold=6.348e+02, percent-clipped=6.0 +2023-02-06 22:47:47,720 INFO [train.py:901] (3/4) Epoch 19, batch 2200, loss[loss=0.2418, simple_loss=0.3216, pruned_loss=0.081, over 8590.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2918, pruned_loss=0.0654, over 1614156.52 frames. ], batch size: 31, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:04,720 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147718.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:21,945 INFO [train.py:901] (3/4) Epoch 19, batch 2250, loss[loss=0.2029, simple_loss=0.2936, pruned_loss=0.0561, over 8193.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2928, pruned_loss=0.06631, over 1613077.79 frames. ], batch size: 23, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:41,110 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147769.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:47,101 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.392e+02 3.089e+02 3.849e+02 9.613e+02, threshold=6.179e+02, percent-clipped=2.0 +2023-02-06 22:48:53,337 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147787.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:48:56,992 INFO [train.py:901] (3/4) Epoch 19, batch 2300, loss[loss=0.2321, simple_loss=0.3139, pruned_loss=0.07517, over 8237.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2913, pruned_loss=0.06566, over 1611446.26 frames. ], batch size: 24, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:48:58,974 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=147796.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:24,358 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147833.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:30,987 INFO [train.py:901] (3/4) Epoch 19, batch 2350, loss[loss=0.1903, simple_loss=0.2835, pruned_loss=0.04861, over 8466.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2923, pruned_loss=0.06595, over 1616105.32 frames. ], batch size: 25, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:49:53,249 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147875.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:49:55,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.451e+02 2.984e+02 3.607e+02 1.132e+03, threshold=5.968e+02, percent-clipped=4.0 +2023-02-06 22:50:01,541 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147884.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:05,018 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4158, 2.5909, 1.6685, 2.2295, 2.1190, 1.3880, 2.0405, 2.1437], + device='cuda:3'), covar=tensor([0.1671, 0.0464, 0.1386, 0.0726, 0.0829, 0.1917, 0.1240, 0.1104], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0233, 0.0322, 0.0300, 0.0298, 0.0327, 0.0338, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:50:07,523 INFO [train.py:901] (3/4) Epoch 19, batch 2400, loss[loss=0.1922, simple_loss=0.2807, pruned_loss=0.0518, over 8242.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2928, pruned_loss=0.06649, over 1616385.85 frames. ], batch size: 24, lr: 4.02e-03, grad_scale: 8.0 +2023-02-06 22:50:17,641 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.74 vs. limit=5.0 +2023-02-06 22:50:20,040 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=147911.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:32,261 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=147929.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:50:41,466 INFO [train.py:901] (3/4) Epoch 19, batch 2450, loss[loss=0.2043, simple_loss=0.2893, pruned_loss=0.05963, over 8254.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2924, pruned_loss=0.06663, over 1614785.27 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:03,065 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=147974.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:05,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.599e+02 2.990e+02 3.557e+02 6.406e+02, threshold=5.981e+02, percent-clipped=1.0 +2023-02-06 22:51:15,599 INFO [train.py:901] (3/4) Epoch 19, batch 2500, loss[loss=0.1793, simple_loss=0.2622, pruned_loss=0.04815, over 7546.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2917, pruned_loss=0.06565, over 1616927.94 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:20,621 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=147999.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:37,737 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148021.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:51:41,237 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5903, 1.8823, 2.6904, 1.5111, 1.9188, 1.9048, 1.7766, 1.7907], + device='cuda:3'), covar=tensor([0.1707, 0.2194, 0.0796, 0.3922, 0.1696, 0.3046, 0.1865, 0.2256], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0577, 0.0548, 0.0626, 0.0634, 0.0583, 0.0518, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 22:51:52,315 INFO [train.py:901] (3/4) Epoch 19, batch 2550, loss[loss=0.1865, simple_loss=0.2529, pruned_loss=0.06006, over 7546.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2912, pruned_loss=0.06511, over 1618487.13 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:51:52,571 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148043.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:09,288 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148068.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:52:15,629 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.379e+02 2.867e+02 3.516e+02 7.047e+02, threshold=5.734e+02, percent-clipped=3.0 +2023-02-06 22:52:17,130 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1578, 2.5964, 3.0119, 1.4964, 3.0418, 1.7512, 1.5628, 2.1448], + device='cuda:3'), covar=tensor([0.0871, 0.0380, 0.0287, 0.0929, 0.0449, 0.0993, 0.0921, 0.0517], + device='cuda:3'), in_proj_covar=tensor([0.0441, 0.0380, 0.0329, 0.0435, 0.0363, 0.0526, 0.0383, 0.0404], + device='cuda:3'), out_proj_covar=tensor([1.1934e-04, 1.0026e-04, 8.7018e-05, 1.1568e-04, 9.6228e-05, 1.5023e-04, + 1.0354e-04, 1.0795e-04], device='cuda:3') +2023-02-06 22:52:24,508 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2724, 3.8797, 2.5636, 3.1709, 3.0461, 1.8980, 3.0832, 3.2573], + device='cuda:3'), covar=tensor([0.1404, 0.0335, 0.1032, 0.0597, 0.0785, 0.1549, 0.1023, 0.0781], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0236, 0.0326, 0.0305, 0.0302, 0.0333, 0.0343, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:52:26,280 INFO [train.py:901] (3/4) Epoch 19, batch 2600, loss[loss=0.181, simple_loss=0.2731, pruned_loss=0.04444, over 8254.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2911, pruned_loss=0.0651, over 1617145.03 frames. ], batch size: 24, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:52:57,240 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148136.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:00,042 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148140.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:01,854 INFO [train.py:901] (3/4) Epoch 19, batch 2650, loss[loss=0.2345, simple_loss=0.3119, pruned_loss=0.07857, over 7934.00 frames. ], tot_loss[loss=0.2123, simple_loss=0.2926, pruned_loss=0.06594, over 1615817.06 frames. ], batch size: 20, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:16,797 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:18,195 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148167.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:24,870 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148177.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:25,453 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 2.384e+02 2.853e+02 3.529e+02 7.126e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-06 22:53:27,177 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-06 22:53:35,198 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148192.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:53:35,672 INFO [train.py:901] (3/4) Epoch 19, batch 2700, loss[loss=0.1641, simple_loss=0.2347, pruned_loss=0.04679, over 7541.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2927, pruned_loss=0.06575, over 1617604.56 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:53:38,591 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2588, 2.1611, 1.5433, 1.9157, 1.8476, 1.3644, 1.7821, 1.6606], + device='cuda:3'), covar=tensor([0.1352, 0.0372, 0.1234, 0.0519, 0.0711, 0.1511, 0.0890, 0.0954], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0237, 0.0327, 0.0306, 0.0302, 0.0333, 0.0344, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 22:53:54,176 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148219.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:03,924 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8784, 1.9935, 1.7642, 2.3344, 1.0981, 1.6122, 1.6870, 1.8839], + device='cuda:3'), covar=tensor([0.0704, 0.0713, 0.0939, 0.0435, 0.1110, 0.1297, 0.0868, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0198, 0.0252, 0.0214, 0.0208, 0.0248, 0.0256, 0.0213], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 22:54:11,924 INFO [train.py:901] (3/4) Epoch 19, batch 2750, loss[loss=0.1833, simple_loss=0.2526, pruned_loss=0.05701, over 7431.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2908, pruned_loss=0.06504, over 1616599.02 frames. ], batch size: 17, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:33,010 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148273.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:36,062 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.484e+02 2.895e+02 4.098e+02 9.310e+02, threshold=5.790e+02, percent-clipped=8.0 +2023-02-06 22:54:45,410 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:54:45,920 INFO [train.py:901] (3/4) Epoch 19, batch 2800, loss[loss=0.2503, simple_loss=0.3224, pruned_loss=0.08916, over 8603.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2922, pruned_loss=0.06631, over 1617596.09 frames. ], batch size: 39, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:54:54,040 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148305.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:13,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148334.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:19,843 INFO [train.py:901] (3/4) Epoch 19, batch 2850, loss[loss=0.2036, simple_loss=0.2865, pruned_loss=0.06036, over 8301.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2924, pruned_loss=0.06599, over 1620944.46 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:55:29,881 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-06 22:55:46,057 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.512e+02 2.931e+02 3.824e+02 7.566e+02, threshold=5.862e+02, percent-clipped=4.0 +2023-02-06 22:55:52,986 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148388.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:55,653 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148392.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:55:56,118 INFO [train.py:901] (3/4) Epoch 19, batch 2900, loss[loss=0.2341, simple_loss=0.3143, pruned_loss=0.07691, over 8690.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.294, pruned_loss=0.06664, over 1627339.40 frames. ], batch size: 39, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:10,221 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-06 22:56:12,664 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148417.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:29,357 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-06 22:56:29,949 INFO [train.py:901] (3/4) Epoch 19, batch 2950, loss[loss=0.2347, simple_loss=0.3089, pruned_loss=0.08026, over 8455.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2944, pruned_loss=0.06638, over 1623481.55 frames. ], batch size: 49, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:56:32,823 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148447.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:56:52,418 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9193, 1.5057, 1.7214, 1.3146, 0.9127, 1.4773, 1.6314, 1.3743], + device='cuda:3'), covar=tensor([0.0537, 0.1279, 0.1633, 0.1431, 0.0628, 0.1525, 0.0692, 0.0678], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 22:56:54,933 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.514e+02 3.009e+02 3.973e+02 7.443e+02, threshold=6.017e+02, percent-clipped=3.0 +2023-02-06 22:57:06,345 INFO [train.py:901] (3/4) Epoch 19, batch 3000, loss[loss=0.1887, simple_loss=0.283, pruned_loss=0.04726, over 8094.00 frames. ], tot_loss[loss=0.2137, simple_loss=0.2943, pruned_loss=0.06654, over 1617063.78 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:57:06,345 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 22:57:22,669 INFO [train.py:935] (3/4) Epoch 19, validation: loss=0.1752, simple_loss=0.2756, pruned_loss=0.03738, over 944034.00 frames. +2023-02-06 22:57:22,671 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 22:57:36,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.77 vs. limit=5.0 +2023-02-06 22:57:38,595 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148516.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:57:56,920 INFO [train.py:901] (3/4) Epoch 19, batch 3050, loss[loss=0.2456, simple_loss=0.3001, pruned_loss=0.09556, over 7783.00 frames. ], tot_loss[loss=0.2136, simple_loss=0.2943, pruned_loss=0.06646, over 1611781.42 frames. ], batch size: 19, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:00,732 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148548.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:17,764 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148573.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:21,054 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.398e+02 2.811e+02 3.727e+02 6.995e+02, threshold=5.622e+02, percent-clipped=3.0 +2023-02-06 22:58:30,234 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148590.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:58:32,157 INFO [train.py:901] (3/4) Epoch 19, batch 3100, loss[loss=0.1988, simple_loss=0.2844, pruned_loss=0.0566, over 7538.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2931, pruned_loss=0.06588, over 1612540.75 frames. ], batch size: 18, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:58:49,337 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148615.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:09,445 INFO [train.py:901] (3/4) Epoch 19, batch 3150, loss[loss=0.2544, simple_loss=0.3423, pruned_loss=0.08324, over 8295.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.292, pruned_loss=0.06538, over 1611269.28 frames. ], batch size: 23, lr: 4.01e-03, grad_scale: 8.0 +2023-02-06 22:59:10,320 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=148644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:13,428 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148649.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,357 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:26,420 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=148669.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 22:59:32,307 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.358e+02 3.073e+02 3.824e+02 9.523e+02, threshold=6.146e+02, percent-clipped=8.0 +2023-02-06 22:59:42,404 INFO [train.py:901] (3/4) Epoch 19, batch 3200, loss[loss=0.1754, simple_loss=0.2523, pruned_loss=0.04924, over 7529.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.293, pruned_loss=0.06613, over 1616454.26 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:12,970 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=148734.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:19,660 INFO [train.py:901] (3/4) Epoch 19, batch 3250, loss[loss=0.2049, simple_loss=0.2819, pruned_loss=0.06398, over 8046.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2923, pruned_loss=0.06559, over 1617090.09 frames. ], batch size: 22, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:00:26,696 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7691, 1.6179, 1.7971, 1.6861, 1.0202, 1.6695, 2.1029, 1.9852], + device='cuda:3'), covar=tensor([0.0488, 0.1343, 0.1747, 0.1404, 0.0625, 0.1534, 0.0658, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0154, 0.0191, 0.0158, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:00:34,083 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148764.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:43,250 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.443e+02 3.073e+02 4.112e+02 8.183e+02, threshold=6.146e+02, percent-clipped=4.0 +2023-02-06 23:00:52,318 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148791.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:00:53,597 INFO [train.py:901] (3/4) Epoch 19, batch 3300, loss[loss=0.2312, simple_loss=0.3086, pruned_loss=0.07689, over 7550.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2917, pruned_loss=0.06452, over 1617937.87 frames. ], batch size: 18, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:28,241 INFO [train.py:901] (3/4) Epoch 19, batch 3350, loss[loss=0.2614, simple_loss=0.3278, pruned_loss=0.09753, over 8367.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2923, pruned_loss=0.06473, over 1619260.48 frames. ], batch size: 24, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:01:41,953 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=148860.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:01:53,953 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.291e+02 2.864e+02 3.449e+02 6.722e+02, threshold=5.728e+02, percent-clipped=1.0 +2023-02-06 23:02:00,138 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7269, 1.9879, 2.2387, 1.4356, 2.2228, 1.5926, 0.6926, 1.9461], + device='cuda:3'), covar=tensor([0.0524, 0.0304, 0.0237, 0.0515, 0.0385, 0.0835, 0.0743, 0.0286], + device='cuda:3'), in_proj_covar=tensor([0.0440, 0.0378, 0.0328, 0.0435, 0.0362, 0.0528, 0.0382, 0.0405], + device='cuda:3'), out_proj_covar=tensor([1.1921e-04, 9.9853e-05, 8.6656e-05, 1.1564e-04, 9.6069e-05, 1.5083e-04, + 1.0330e-04, 1.0814e-04], device='cuda:3') +2023-02-06 23:02:04,174 INFO [train.py:901] (3/4) Epoch 19, batch 3400, loss[loss=0.2279, simple_loss=0.3122, pruned_loss=0.07184, over 8464.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2925, pruned_loss=0.06508, over 1619048.36 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:02:11,211 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1610, 1.8834, 2.5098, 2.0856, 2.4380, 2.1517, 1.8686, 1.3040], + device='cuda:3'), covar=tensor([0.5132, 0.4772, 0.1922, 0.3538, 0.2392, 0.2747, 0.1852, 0.5086], + device='cuda:3'), in_proj_covar=tensor([0.0925, 0.0947, 0.0781, 0.0913, 0.0976, 0.0867, 0.0726, 0.0807], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:02:13,160 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148906.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:02:31,055 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.61 vs. limit=5.0 +2023-02-06 23:02:38,070 INFO [train.py:901] (3/4) Epoch 19, batch 3450, loss[loss=0.2265, simple_loss=0.3062, pruned_loss=0.07343, over 8312.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2923, pruned_loss=0.06509, over 1617932.34 frames. ], batch size: 49, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:02:40,625 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-06 23:03:01,927 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=148975.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:04,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.340e+02 2.956e+02 3.727e+02 1.104e+03, threshold=5.912e+02, percent-clipped=3.0 +2023-02-06 23:03:14,141 INFO [train.py:901] (3/4) Epoch 19, batch 3500, loss[loss=0.2235, simple_loss=0.3007, pruned_loss=0.07319, over 8191.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2914, pruned_loss=0.06458, over 1616263.77 frames. ], batch size: 23, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:28,308 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149013.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:33,350 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149020.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:03:35,938 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-06 23:03:48,892 INFO [train.py:901] (3/4) Epoch 19, batch 3550, loss[loss=0.181, simple_loss=0.2466, pruned_loss=0.05773, over 7431.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2915, pruned_loss=0.06466, over 1614090.89 frames. ], batch size: 17, lr: 4.00e-03, grad_scale: 4.0 +2023-02-06 23:03:50,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149045.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,084 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149078.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:13,643 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.461e+02 3.087e+02 3.824e+02 7.251e+02, threshold=6.175e+02, percent-clipped=6.0 +2023-02-06 23:04:25,648 INFO [train.py:901] (3/4) Epoch 19, batch 3600, loss[loss=0.2148, simple_loss=0.2954, pruned_loss=0.06711, over 8661.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2912, pruned_loss=0.06478, over 1610226.80 frames. ], batch size: 34, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:04:49,820 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149128.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:04:59,740 INFO [train.py:901] (3/4) Epoch 19, batch 3650, loss[loss=0.2399, simple_loss=0.3166, pruned_loss=0.08157, over 8539.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2907, pruned_loss=0.06433, over 1611486.81 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:13,229 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149162.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:24,388 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.296e+02 2.731e+02 3.488e+02 6.725e+02, threshold=5.462e+02, percent-clipped=1.0 +2023-02-06 23:05:30,734 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149187.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:35,229 INFO [train.py:901] (3/4) Epoch 19, batch 3700, loss[loss=0.2143, simple_loss=0.2991, pruned_loss=0.06474, over 8514.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.291, pruned_loss=0.0641, over 1613303.84 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:05:35,415 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149193.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:05:38,069 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:06:02,793 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149231.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:10,467 INFO [train.py:901] (3/4) Epoch 19, batch 3750, loss[loss=0.2184, simple_loss=0.2838, pruned_loss=0.07645, over 7251.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06504, over 1614975.30 frames. ], batch size: 16, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:06:19,375 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149256.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:06:22,872 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-06 23:06:25,464 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4838, 1.9657, 2.9042, 1.3287, 2.1684, 1.8872, 1.6523, 2.0763], + device='cuda:3'), covar=tensor([0.1902, 0.2304, 0.0782, 0.4399, 0.1775, 0.3022, 0.2125, 0.2325], + device='cuda:3'), in_proj_covar=tensor([0.0517, 0.0581, 0.0552, 0.0628, 0.0638, 0.0588, 0.0521, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:06:34,566 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.258e+02 2.530e+02 3.028e+02 3.831e+02 7.632e+02, threshold=6.056e+02, percent-clipped=6.0 +2023-02-06 23:06:44,222 INFO [train.py:901] (3/4) Epoch 19, batch 3800, loss[loss=0.2258, simple_loss=0.3151, pruned_loss=0.06827, over 8469.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2928, pruned_loss=0.06513, over 1616470.48 frames. ], batch size: 25, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:20,713 INFO [train.py:901] (3/4) Epoch 19, batch 3850, loss[loss=0.2892, simple_loss=0.3506, pruned_loss=0.1139, over 6879.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2935, pruned_loss=0.0657, over 1613613.70 frames. ], batch size: 71, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:21,609 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4383, 2.6254, 1.9297, 2.2381, 2.2031, 1.6450, 1.9826, 2.0134], + device='cuda:3'), covar=tensor([0.1567, 0.0368, 0.1092, 0.0596, 0.0735, 0.1393, 0.1175, 0.1041], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0239, 0.0327, 0.0304, 0.0300, 0.0331, 0.0342, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:07:42,390 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-06 23:07:45,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.409e+02 2.948e+02 3.728e+02 6.848e+02, threshold=5.896e+02, percent-clipped=3.0 +2023-02-06 23:07:46,036 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5838, 1.3477, 2.0644, 1.7102, 1.7308, 1.5204, 1.3506, 0.8186], + device='cuda:3'), covar=tensor([0.6602, 0.5530, 0.1980, 0.3533, 0.2934, 0.4234, 0.2907, 0.4698], + device='cuda:3'), in_proj_covar=tensor([0.0930, 0.0955, 0.0787, 0.0918, 0.0981, 0.0871, 0.0729, 0.0812], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:07:48,708 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149384.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:07:54,629 INFO [train.py:901] (3/4) Epoch 19, batch 3900, loss[loss=0.1759, simple_loss=0.2654, pruned_loss=0.04321, over 8578.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2938, pruned_loss=0.06587, over 1617694.96 frames. ], batch size: 31, lr: 4.00e-03, grad_scale: 8.0 +2023-02-06 23:07:58,841 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6101, 5.6948, 5.0898, 2.3480, 5.0463, 5.4073, 5.1635, 4.9841], + device='cuda:3'), covar=tensor([0.0525, 0.0392, 0.0934, 0.4364, 0.0634, 0.0648, 0.1069, 0.0630], + device='cuda:3'), in_proj_covar=tensor([0.0518, 0.0427, 0.0429, 0.0530, 0.0419, 0.0433, 0.0411, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:07:58,986 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6571, 2.2948, 3.8832, 1.5863, 2.8974, 2.1607, 1.9444, 2.7889], + device='cuda:3'), covar=tensor([0.1885, 0.2303, 0.0807, 0.4132, 0.1821, 0.3120, 0.1991, 0.2450], + device='cuda:3'), in_proj_covar=tensor([0.0514, 0.0581, 0.0551, 0.0625, 0.0636, 0.0585, 0.0519, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:08:06,571 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149409.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:07,213 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6477, 1.4323, 4.8894, 1.8457, 4.2301, 3.9868, 4.3809, 4.2226], + device='cuda:3'), covar=tensor([0.0630, 0.5065, 0.0469, 0.4074, 0.1180, 0.0956, 0.0617, 0.0698], + device='cuda:3'), in_proj_covar=tensor([0.0600, 0.0635, 0.0676, 0.0608, 0.0686, 0.0593, 0.0584, 0.0649], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:08:27,992 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4955, 1.7287, 1.8209, 1.1951, 1.9142, 1.3211, 0.4503, 1.6795], + device='cuda:3'), covar=tensor([0.0495, 0.0368, 0.0294, 0.0543, 0.0425, 0.0925, 0.0808, 0.0264], + device='cuda:3'), in_proj_covar=tensor([0.0440, 0.0381, 0.0329, 0.0438, 0.0363, 0.0529, 0.0383, 0.0406], + device='cuda:3'), out_proj_covar=tensor([1.1911e-04, 1.0064e-04, 8.6970e-05, 1.1644e-04, 9.6128e-05, 1.5106e-04, + 1.0364e-04, 1.0857e-04], device='cuda:3') +2023-02-06 23:08:31,940 INFO [train.py:901] (3/4) Epoch 19, batch 3950, loss[loss=0.1892, simple_loss=0.2558, pruned_loss=0.06134, over 7264.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2925, pruned_loss=0.06485, over 1614744.72 frames. ], batch size: 16, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:08:36,337 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=149449.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:53,030 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=149474.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:08:56,233 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.497e+02 2.881e+02 4.050e+02 6.266e+02, threshold=5.763e+02, percent-clipped=1.0 +2023-02-06 23:09:05,724 INFO [train.py:901] (3/4) Epoch 19, batch 4000, loss[loss=0.1817, simple_loss=0.2626, pruned_loss=0.05044, over 7811.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2916, pruned_loss=0.06443, over 1612431.95 frames. ], batch size: 19, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:09:32,416 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149532.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:09:40,134 INFO [train.py:901] (3/4) Epoch 19, batch 4050, loss[loss=0.2035, simple_loss=0.2906, pruned_loss=0.05818, over 8644.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2928, pruned_loss=0.06495, over 1616624.63 frames. ], batch size: 34, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:05,796 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.470e+02 3.003e+02 4.246e+02 8.728e+02, threshold=6.007e+02, percent-clipped=8.0 +2023-02-06 23:10:08,590 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149583.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:10:15,179 INFO [train.py:901] (3/4) Epoch 19, batch 4100, loss[loss=0.2184, simple_loss=0.2763, pruned_loss=0.08024, over 7789.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.293, pruned_loss=0.06507, over 1613722.81 frames. ], batch size: 19, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:49,879 INFO [train.py:901] (3/4) Epoch 19, batch 4150, loss[loss=0.2007, simple_loss=0.2921, pruned_loss=0.0547, over 8615.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2916, pruned_loss=0.06387, over 1611313.49 frames. ], batch size: 31, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:10:58,400 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2780, 1.2979, 1.5416, 1.2076, 0.7075, 1.3897, 1.2327, 1.1507], + device='cuda:3'), covar=tensor([0.0553, 0.1250, 0.1669, 0.1443, 0.0573, 0.1484, 0.0698, 0.0644], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0161, 0.0113, 0.0140], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:11:16,655 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.273e+02 2.791e+02 3.594e+02 5.057e+02, threshold=5.582e+02, percent-clipped=0.0 +2023-02-06 23:11:26,114 INFO [train.py:901] (3/4) Epoch 19, batch 4200, loss[loss=0.2608, simple_loss=0.3378, pruned_loss=0.09189, over 8514.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2908, pruned_loss=0.06342, over 1611296.62 frames. ], batch size: 28, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:11:36,588 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-06 23:11:59,494 INFO [train.py:901] (3/4) Epoch 19, batch 4250, loss[loss=0.1903, simple_loss=0.2836, pruned_loss=0.04853, over 8032.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2898, pruned_loss=0.0627, over 1610741.39 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:12:00,910 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-06 23:12:14,479 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=149764.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:12:25,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.327e+02 2.444e+02 3.025e+02 3.928e+02 1.033e+03, threshold=6.050e+02, percent-clipped=5.0 +2023-02-06 23:12:35,589 INFO [train.py:901] (3/4) Epoch 19, batch 4300, loss[loss=0.2217, simple_loss=0.3215, pruned_loss=0.06099, over 8341.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2912, pruned_loss=0.06342, over 1611840.20 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:04,942 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3862, 1.3385, 1.7475, 1.2651, 1.0806, 1.7373, 0.2660, 1.1047], + device='cuda:3'), covar=tensor([0.1892, 0.1341, 0.0424, 0.0963, 0.2865, 0.0470, 0.2133, 0.1318], + device='cuda:3'), in_proj_covar=tensor([0.0186, 0.0195, 0.0124, 0.0222, 0.0272, 0.0133, 0.0171, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 23:13:10,090 INFO [train.py:901] (3/4) Epoch 19, batch 4350, loss[loss=0.208, simple_loss=0.2836, pruned_loss=0.06622, over 8482.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2908, pruned_loss=0.06369, over 1608777.15 frames. ], batch size: 27, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:13:33,152 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-06 23:13:33,237 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149876.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:13:35,197 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.416e+02 2.972e+02 3.761e+02 1.184e+03, threshold=5.944e+02, percent-clipped=4.0 +2023-02-06 23:13:44,579 INFO [train.py:901] (3/4) Epoch 19, batch 4400, loss[loss=0.2163, simple_loss=0.3044, pruned_loss=0.06405, over 8288.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2909, pruned_loss=0.06393, over 1607348.29 frames. ], batch size: 23, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:09,977 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=149927.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:11,337 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5322, 2.3720, 1.7594, 2.1458, 2.0457, 1.6167, 1.8867, 1.8504], + device='cuda:3'), covar=tensor([0.1194, 0.0330, 0.1055, 0.0495, 0.0592, 0.1227, 0.0809, 0.0870], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0239, 0.0330, 0.0307, 0.0302, 0.0334, 0.0344, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:14:14,577 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-06 23:14:20,881 INFO [train.py:901] (3/4) Epoch 19, batch 4450, loss[loss=0.2344, simple_loss=0.3064, pruned_loss=0.08124, over 8031.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2928, pruned_loss=0.06493, over 1615025.87 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:14:23,726 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4217, 2.3785, 1.6780, 2.1683, 2.0495, 1.4579, 1.8511, 1.8228], + device='cuda:3'), covar=tensor([0.1437, 0.0385, 0.1286, 0.0534, 0.0671, 0.1532, 0.1009, 0.0979], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0239, 0.0331, 0.0307, 0.0303, 0.0334, 0.0345, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:14:39,765 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3122, 1.3908, 1.3032, 1.8059, 0.7827, 1.2124, 1.2472, 1.4300], + device='cuda:3'), covar=tensor([0.0954, 0.0833, 0.1055, 0.0521, 0.1118, 0.1469, 0.0828, 0.0745], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0250, 0.0215, 0.0207, 0.0249, 0.0255, 0.0212], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 23:14:44,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.522e+02 2.925e+02 4.193e+02 1.036e+03, threshold=5.849e+02, percent-clipped=7.0 +2023-02-06 23:14:53,315 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=149991.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:14:54,467 INFO [train.py:901] (3/4) Epoch 19, batch 4500, loss[loss=0.2162, simple_loss=0.3091, pruned_loss=0.0617, over 8327.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2915, pruned_loss=0.06451, over 1615697.28 frames. ], batch size: 25, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:08,402 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-06 23:15:31,621 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150042.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:15:32,129 INFO [train.py:901] (3/4) Epoch 19, batch 4550, loss[loss=0.2216, simple_loss=0.306, pruned_loss=0.06855, over 8247.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2914, pruned_loss=0.06441, over 1617317.06 frames. ], batch size: 24, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:15:35,177 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 23:15:56,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.399e+02 2.811e+02 3.428e+02 5.502e+02, threshold=5.622e+02, percent-clipped=0.0 +2023-02-06 23:16:05,758 INFO [train.py:901] (3/4) Epoch 19, batch 4600, loss[loss=0.1833, simple_loss=0.2613, pruned_loss=0.05269, over 7653.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.29, pruned_loss=0.06374, over 1608469.24 frames. ], batch size: 19, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:16:08,477 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150097.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:16:15,971 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150108.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:16:41,502 INFO [train.py:901] (3/4) Epoch 19, batch 4650, loss[loss=0.1675, simple_loss=0.2611, pruned_loss=0.03699, over 8027.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2896, pruned_loss=0.06352, over 1608336.90 frames. ], batch size: 22, lr: 3.99e-03, grad_scale: 8.0 +2023-02-06 23:16:47,967 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6196, 1.4860, 1.7236, 1.3370, 0.7963, 1.5379, 1.5824, 1.3919], + device='cuda:3'), covar=tensor([0.0546, 0.1242, 0.1630, 0.1446, 0.0579, 0.1446, 0.0659, 0.0647], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0159, 0.0100, 0.0162, 0.0113, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:17:06,556 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.474e+02 2.856e+02 3.464e+02 8.049e+02, threshold=5.712e+02, percent-clipped=3.0 +2023-02-06 23:17:13,069 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 23:17:16,085 INFO [train.py:901] (3/4) Epoch 19, batch 4700, loss[loss=0.1795, simple_loss=0.2591, pruned_loss=0.04995, over 8253.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2889, pruned_loss=0.06354, over 1598375.69 frames. ], batch size: 22, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:22,191 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 23:17:36,661 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150223.0, num_to_drop=1, layers_to_drop={1} +2023-02-06 23:17:50,822 INFO [train.py:901] (3/4) Epoch 19, batch 4750, loss[loss=0.2395, simple_loss=0.3267, pruned_loss=0.07612, over 8486.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2901, pruned_loss=0.06407, over 1602992.67 frames. ], batch size: 28, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:17:53,804 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150247.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:12,325 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150272.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:13,463 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-06 23:18:15,512 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-06 23:18:16,850 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.315e+02 2.829e+02 3.523e+02 6.730e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-06 23:18:23,094 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2229, 1.8051, 3.2996, 1.3875, 2.2597, 3.5792, 3.7129, 3.0720], + device='cuda:3'), covar=tensor([0.1018, 0.1502, 0.0392, 0.2187, 0.1112, 0.0226, 0.0518, 0.0572], + device='cuda:3'), in_proj_covar=tensor([0.0289, 0.0317, 0.0287, 0.0312, 0.0301, 0.0265, 0.0406, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:18:25,835 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150292.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:26,364 INFO [train.py:901] (3/4) Epoch 19, batch 4800, loss[loss=0.2208, simple_loss=0.3096, pruned_loss=0.06603, over 8461.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2892, pruned_loss=0.06295, over 1608182.58 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:18:29,958 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150298.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:46,558 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150323.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:18:52,217 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-06 23:19:00,006 INFO [train.py:901] (3/4) Epoch 19, batch 4850, loss[loss=0.2434, simple_loss=0.3139, pruned_loss=0.08641, over 8103.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2906, pruned_loss=0.06382, over 1612119.05 frames. ], batch size: 23, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:05,347 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-06 23:19:27,015 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.476e+02 2.899e+02 3.621e+02 6.951e+02, threshold=5.799e+02, percent-clipped=6.0 +2023-02-06 23:19:36,191 INFO [train.py:901] (3/4) Epoch 19, batch 4900, loss[loss=0.186, simple_loss=0.2624, pruned_loss=0.05478, over 7513.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2894, pruned_loss=0.06334, over 1612893.12 frames. ], batch size: 18, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:19:39,851 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6274, 1.6635, 1.7622, 1.4573, 1.8032, 1.4709, 0.9245, 1.6055], + device='cuda:3'), covar=tensor([0.0369, 0.0281, 0.0197, 0.0323, 0.0274, 0.0546, 0.0609, 0.0197], + device='cuda:3'), in_proj_covar=tensor([0.0439, 0.0379, 0.0328, 0.0435, 0.0361, 0.0523, 0.0384, 0.0406], + device='cuda:3'), out_proj_covar=tensor([1.1875e-04, 1.0024e-04, 8.6658e-05, 1.1544e-04, 9.5794e-05, 1.4929e-04, + 1.0392e-04, 1.0839e-04], device='cuda:3') +2023-02-06 23:20:07,723 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150441.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:20:09,001 INFO [train.py:901] (3/4) Epoch 19, batch 4950, loss[loss=0.1912, simple_loss=0.2789, pruned_loss=0.05173, over 8461.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2911, pruned_loss=0.06502, over 1613115.57 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:33,700 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.356e+02 2.775e+02 3.573e+02 1.033e+03, threshold=5.550e+02, percent-clipped=4.0 +2023-02-06 23:20:33,934 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150479.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:20:43,986 INFO [train.py:901] (3/4) Epoch 19, batch 5000, loss[loss=0.1841, simple_loss=0.2701, pruned_loss=0.04904, over 8072.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2903, pruned_loss=0.06442, over 1613514.08 frames. ], batch size: 21, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:20:52,078 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150504.0, num_to_drop=1, layers_to_drop={0} +2023-02-06 23:21:07,769 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.43 vs. limit=5.0 +2023-02-06 23:21:16,239 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.64 vs. limit=5.0 +2023-02-06 23:21:17,801 INFO [train.py:901] (3/4) Epoch 19, batch 5050, loss[loss=0.1827, simple_loss=0.2651, pruned_loss=0.05014, over 7934.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2905, pruned_loss=0.06461, over 1611177.93 frames. ], batch size: 20, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:21:25,939 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150555.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:26,624 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150556.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:21:40,931 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-06 23:21:41,605 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.501e+02 3.000e+02 3.972e+02 7.212e+02, threshold=5.999e+02, percent-clipped=3.0 +2023-02-06 23:21:51,771 INFO [train.py:901] (3/4) Epoch 19, batch 5100, loss[loss=0.2293, simple_loss=0.3075, pruned_loss=0.07553, over 8578.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2899, pruned_loss=0.06464, over 1607700.24 frames. ], batch size: 31, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:18,178 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9825, 1.6936, 3.3769, 1.3127, 2.3825, 3.6323, 3.7270, 3.1182], + device='cuda:3'), covar=tensor([0.1198, 0.1603, 0.0313, 0.2263, 0.0945, 0.0259, 0.0592, 0.0582], + device='cuda:3'), in_proj_covar=tensor([0.0290, 0.0317, 0.0287, 0.0312, 0.0301, 0.0265, 0.0406, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-06 23:22:23,321 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150636.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:22:25,331 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2362, 1.4159, 3.3611, 1.0884, 2.9856, 2.8225, 3.0523, 2.9457], + device='cuda:3'), covar=tensor([0.0802, 0.4133, 0.0773, 0.4029, 0.1357, 0.1113, 0.0777, 0.0976], + device='cuda:3'), in_proj_covar=tensor([0.0598, 0.0633, 0.0672, 0.0610, 0.0685, 0.0589, 0.0591, 0.0650], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:22:27,867 INFO [train.py:901] (3/4) Epoch 19, batch 5150, loss[loss=0.2222, simple_loss=0.2986, pruned_loss=0.07286, over 8074.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2907, pruned_loss=0.06499, over 1611736.66 frames. ], batch size: 21, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:22:51,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.510e+02 3.215e+02 4.688e+02 9.098e+02, threshold=6.429e+02, percent-clipped=11.0 +2023-02-06 23:23:01,327 INFO [train.py:901] (3/4) Epoch 19, batch 5200, loss[loss=0.2065, simple_loss=0.288, pruned_loss=0.06256, over 8504.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2916, pruned_loss=0.06547, over 1608084.03 frames. ], batch size: 26, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:38,106 INFO [train.py:901] (3/4) Epoch 19, batch 5250, loss[loss=0.2097, simple_loss=0.283, pruned_loss=0.06819, over 7785.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2913, pruned_loss=0.06527, over 1609927.38 frames. ], batch size: 19, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:23:40,643 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-06 23:23:42,646 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:23:43,387 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=150751.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:00,396 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5871, 2.4971, 1.7690, 2.2051, 2.0994, 1.4845, 2.1252, 2.0513], + device='cuda:3'), covar=tensor([0.1348, 0.0428, 0.1198, 0.0613, 0.0724, 0.1581, 0.0889, 0.1064], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0236, 0.0326, 0.0305, 0.0299, 0.0330, 0.0340, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:24:01,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.351e+02 2.565e+02 3.080e+02 4.191e+02 1.354e+03, threshold=6.160e+02, percent-clipped=9.0 +2023-02-06 23:24:10,893 INFO [train.py:901] (3/4) Epoch 19, batch 5300, loss[loss=0.2229, simple_loss=0.3138, pruned_loss=0.06593, over 8137.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2936, pruned_loss=0.06645, over 1615366.74 frames. ], batch size: 22, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:24:11,729 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8172, 1.4750, 3.9499, 1.4206, 3.4345, 3.2646, 3.5443, 3.4128], + device='cuda:3'), covar=tensor([0.0675, 0.4661, 0.0694, 0.4299, 0.1374, 0.1092, 0.0705, 0.0864], + device='cuda:3'), in_proj_covar=tensor([0.0600, 0.0636, 0.0672, 0.0611, 0.0689, 0.0591, 0.0592, 0.0651], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:24:12,434 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7221, 2.1246, 4.2063, 1.5310, 3.2035, 2.2873, 1.7021, 2.9496], + device='cuda:3'), covar=tensor([0.1983, 0.2842, 0.0741, 0.4708, 0.1621, 0.3149, 0.2457, 0.2232], + device='cuda:3'), in_proj_covar=tensor([0.0517, 0.0584, 0.0554, 0.0633, 0.0642, 0.0589, 0.0523, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:24:13,044 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1415, 1.6446, 1.3489, 1.7330, 1.3320, 1.1872, 1.3971, 1.5043], + device='cuda:3'), covar=tensor([0.0908, 0.0470, 0.1423, 0.0461, 0.0778, 0.1671, 0.0863, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0236, 0.0327, 0.0305, 0.0300, 0.0330, 0.0341, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:24:13,364 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 23:24:23,656 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=150812.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:31,992 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-06 23:24:34,793 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-06 23:24:41,666 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=150837.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:24:46,253 INFO [train.py:901] (3/4) Epoch 19, batch 5350, loss[loss=0.2606, simple_loss=0.3372, pruned_loss=0.09198, over 8440.00 frames. ], tot_loss[loss=0.2139, simple_loss=0.2941, pruned_loss=0.06689, over 1612484.13 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 8.0 +2023-02-06 23:25:10,968 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.499e+02 2.979e+02 3.723e+02 8.863e+02, threshold=5.959e+02, percent-clipped=1.0 +2023-02-06 23:25:20,511 INFO [train.py:901] (3/4) Epoch 19, batch 5400, loss[loss=0.2259, simple_loss=0.312, pruned_loss=0.06994, over 8462.00 frames. ], tot_loss[loss=0.2141, simple_loss=0.2943, pruned_loss=0.06693, over 1609748.87 frames. ], batch size: 25, lr: 3.98e-03, grad_scale: 16.0 +2023-02-06 23:25:24,749 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=150899.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:25,114 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-06 23:25:37,930 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=150918.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:25:55,449 INFO [train.py:901] (3/4) Epoch 19, batch 5450, loss[loss=0.2214, simple_loss=0.3021, pruned_loss=0.07042, over 8426.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2925, pruned_loss=0.0663, over 1608512.45 frames. ], batch size: 27, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:02,824 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6298, 1.8750, 2.0850, 1.3774, 2.1803, 1.4566, 0.6699, 1.9042], + device='cuda:3'), covar=tensor([0.0601, 0.0333, 0.0267, 0.0612, 0.0393, 0.0929, 0.0871, 0.0304], + device='cuda:3'), in_proj_covar=tensor([0.0441, 0.0381, 0.0331, 0.0436, 0.0364, 0.0525, 0.0384, 0.0406], + device='cuda:3'), out_proj_covar=tensor([1.1939e-04, 1.0089e-04, 8.7397e-05, 1.1569e-04, 9.6622e-05, 1.4985e-04, + 1.0410e-04, 1.0853e-04], device='cuda:3') +2023-02-06 23:26:21,492 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3495, 1.5410, 2.1960, 1.2632, 1.6400, 1.6073, 1.3806, 1.6126], + device='cuda:3'), covar=tensor([0.1954, 0.2747, 0.0817, 0.4487, 0.1761, 0.3416, 0.2338, 0.2078], + device='cuda:3'), in_proj_covar=tensor([0.0515, 0.0582, 0.0552, 0.0630, 0.0639, 0.0588, 0.0522, 0.0628], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:26:22,613 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.347e+02 2.658e+02 3.430e+02 7.604e+02, threshold=5.316e+02, percent-clipped=2.0 +2023-02-06 23:26:25,524 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-06 23:26:28,442 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-06 23:26:31,922 INFO [train.py:901] (3/4) Epoch 19, batch 5500, loss[loss=0.1978, simple_loss=0.2921, pruned_loss=0.05177, over 8749.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2929, pruned_loss=0.06617, over 1612352.20 frames. ], batch size: 40, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:26:33,480 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5029, 2.3387, 3.3298, 2.5584, 3.0782, 2.4826, 2.1823, 1.9220], + device='cuda:3'), covar=tensor([0.5085, 0.4842, 0.1625, 0.3465, 0.2349, 0.2863, 0.1890, 0.5207], + device='cuda:3'), in_proj_covar=tensor([0.0925, 0.0955, 0.0782, 0.0916, 0.0977, 0.0867, 0.0733, 0.0810], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:26:41,796 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151007.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:46,653 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151014.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:26:58,978 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151032.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:27:03,484 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-06 23:27:06,206 INFO [train.py:901] (3/4) Epoch 19, batch 5550, loss[loss=0.2303, simple_loss=0.3115, pruned_loss=0.07458, over 8587.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2928, pruned_loss=0.06609, over 1609315.19 frames. ], batch size: 31, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:16,585 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4901, 1.7577, 1.8457, 1.2219, 1.9088, 1.3217, 0.4638, 1.7428], + device='cuda:3'), covar=tensor([0.0540, 0.0351, 0.0287, 0.0510, 0.0417, 0.0911, 0.0832, 0.0238], + device='cuda:3'), in_proj_covar=tensor([0.0443, 0.0383, 0.0332, 0.0438, 0.0366, 0.0527, 0.0385, 0.0408], + device='cuda:3'), out_proj_covar=tensor([1.2002e-04, 1.0132e-04, 8.7803e-05, 1.1629e-04, 9.6974e-05, 1.5046e-04, + 1.0436e-04, 1.0897e-04], device='cuda:3') +2023-02-06 23:27:29,975 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6552, 2.2595, 1.6530, 3.6420, 1.7133, 1.4968, 2.2255, 2.5339], + device='cuda:3'), covar=tensor([0.1596, 0.1186, 0.2026, 0.0408, 0.1418, 0.2116, 0.1242, 0.0959], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0249, 0.0214, 0.0207, 0.0250, 0.0254, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 23:27:32,505 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.454e+02 3.027e+02 4.195e+02 6.901e+02, threshold=6.054e+02, percent-clipped=7.0 +2023-02-06 23:27:42,415 INFO [train.py:901] (3/4) Epoch 19, batch 5600, loss[loss=0.2131, simple_loss=0.2934, pruned_loss=0.06642, over 8291.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2921, pruned_loss=0.06564, over 1609635.00 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:27:43,144 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151094.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:28:15,312 INFO [train.py:901] (3/4) Epoch 19, batch 5650, loss[loss=0.2055, simple_loss=0.2732, pruned_loss=0.06888, over 7540.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.292, pruned_loss=0.06545, over 1610432.13 frames. ], batch size: 18, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:22,682 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.64 vs. limit=2.0 +2023-02-06 23:28:31,577 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-06 23:28:39,660 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.685e+02 3.149e+02 3.866e+02 8.044e+02, threshold=6.298e+02, percent-clipped=3.0 +2023-02-06 23:28:50,395 INFO [train.py:901] (3/4) Epoch 19, batch 5700, loss[loss=0.1993, simple_loss=0.2806, pruned_loss=0.05896, over 8254.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.292, pruned_loss=0.06552, over 1614222.58 frames. ], batch size: 24, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:28:57,644 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151202.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:02,319 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151209.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:09,074 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0734, 2.4482, 2.5644, 1.6031, 2.7467, 1.8066, 1.6114, 2.0833], + device='cuda:3'), covar=tensor([0.0673, 0.0364, 0.0271, 0.0643, 0.0414, 0.0741, 0.0793, 0.0493], + device='cuda:3'), in_proj_covar=tensor([0.0444, 0.0383, 0.0332, 0.0439, 0.0368, 0.0527, 0.0385, 0.0408], + device='cuda:3'), out_proj_covar=tensor([1.2013e-04, 1.0119e-04, 8.7776e-05, 1.1647e-04, 9.7671e-05, 1.5051e-04, + 1.0412e-04, 1.0901e-04], device='cuda:3') +2023-02-06 23:29:15,623 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2880, 1.3546, 3.3717, 1.0406, 3.0103, 2.8033, 3.0875, 3.0009], + device='cuda:3'), covar=tensor([0.0754, 0.4118, 0.0776, 0.4073, 0.1300, 0.1087, 0.0726, 0.0835], + device='cuda:3'), in_proj_covar=tensor([0.0600, 0.0635, 0.0673, 0.0607, 0.0686, 0.0588, 0.0592, 0.0650], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:29:24,756 INFO [train.py:901] (3/4) Epoch 19, batch 5750, loss[loss=0.2081, simple_loss=0.2957, pruned_loss=0.06026, over 8105.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2916, pruned_loss=0.06517, over 1615994.65 frames. ], batch size: 23, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:29:30,822 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4839, 1.4968, 1.7963, 1.3779, 1.1574, 1.8473, 0.1957, 1.1724], + device='cuda:3'), covar=tensor([0.1950, 0.1412, 0.0430, 0.0975, 0.2825, 0.0477, 0.2080, 0.1249], + device='cuda:3'), in_proj_covar=tensor([0.0186, 0.0194, 0.0123, 0.0221, 0.0268, 0.0133, 0.0168, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 23:29:36,097 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-06 23:29:37,495 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151262.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:43,021 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151270.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:29:48,610 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.389e+02 2.918e+02 3.727e+02 7.769e+02, threshold=5.836e+02, percent-clipped=3.0 +2023-02-06 23:29:58,863 INFO [train.py:901] (3/4) Epoch 19, batch 5800, loss[loss=0.1992, simple_loss=0.2654, pruned_loss=0.06648, over 7253.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2915, pruned_loss=0.06479, over 1614455.76 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:00,357 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151295.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:04,356 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151300.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:05,089 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6947, 2.2727, 4.0656, 1.4925, 3.0428, 2.2478, 1.8402, 2.8316], + device='cuda:3'), covar=tensor([0.2004, 0.2787, 0.0797, 0.4937, 0.1804, 0.3286, 0.2360, 0.2523], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0580, 0.0552, 0.0629, 0.0637, 0.0587, 0.0521, 0.0626], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:30:16,599 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151316.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:34,588 INFO [train.py:901] (3/4) Epoch 19, batch 5850, loss[loss=0.2084, simple_loss=0.2942, pruned_loss=0.06129, over 8021.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2918, pruned_loss=0.06459, over 1613675.96 frames. ], batch size: 22, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:30:57,554 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151377.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:30:58,652 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.316e+02 2.176e+02 2.714e+02 3.221e+02 1.387e+03, threshold=5.429e+02, percent-clipped=3.0 +2023-02-06 23:31:08,064 INFO [train.py:901] (3/4) Epoch 19, batch 5900, loss[loss=0.1485, simple_loss=0.2272, pruned_loss=0.03485, over 7420.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2912, pruned_loss=0.06402, over 1612998.11 frames. ], batch size: 17, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:15,412 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-06 23:31:44,473 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0852, 2.4011, 4.5415, 1.7514, 3.3520, 2.5307, 2.1674, 3.2864], + device='cuda:3'), covar=tensor([0.1625, 0.2612, 0.0661, 0.4413, 0.1514, 0.2922, 0.2007, 0.2191], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0583, 0.0555, 0.0632, 0.0640, 0.0590, 0.0523, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:31:44,905 INFO [train.py:901] (3/4) Epoch 19, batch 5950, loss[loss=0.1633, simple_loss=0.2384, pruned_loss=0.04413, over 7275.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2926, pruned_loss=0.06486, over 1619423.16 frames. ], batch size: 16, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:31:59,863 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151465.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:09,190 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.424e+02 3.104e+02 3.851e+02 8.156e+02, threshold=6.208e+02, percent-clipped=3.0 +2023-02-06 23:32:16,814 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151490.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:32:18,557 INFO [train.py:901] (3/4) Epoch 19, batch 6000, loss[loss=0.2491, simple_loss=0.3209, pruned_loss=0.08866, over 8485.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2936, pruned_loss=0.06573, over 1619370.81 frames. ], batch size: 28, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:32:18,557 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 23:32:32,009 INFO [train.py:935] (3/4) Epoch 19, validation: loss=0.1763, simple_loss=0.2764, pruned_loss=0.03805, over 944034.00 frames. +2023-02-06 23:32:32,011 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 23:32:56,714 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5008, 1.7739, 1.8603, 1.2415, 1.9487, 1.3027, 0.5707, 1.7151], + device='cuda:3'), covar=tensor([0.0619, 0.0408, 0.0344, 0.0614, 0.0500, 0.1053, 0.0896, 0.0357], + device='cuda:3'), in_proj_covar=tensor([0.0444, 0.0380, 0.0332, 0.0439, 0.0368, 0.0527, 0.0386, 0.0406], + device='cuda:3'), out_proj_covar=tensor([1.2020e-04, 1.0041e-04, 8.7768e-05, 1.1665e-04, 9.7573e-05, 1.5037e-04, + 1.0445e-04, 1.0841e-04], device='cuda:3') +2023-02-06 23:33:00,162 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8227, 1.8076, 2.4950, 1.6195, 1.2856, 2.4314, 0.3938, 1.4154], + device='cuda:3'), covar=tensor([0.1735, 0.1215, 0.0270, 0.1443, 0.2930, 0.0406, 0.2317, 0.1361], + device='cuda:3'), in_proj_covar=tensor([0.0185, 0.0193, 0.0123, 0.0221, 0.0268, 0.0133, 0.0168, 0.0187], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 23:33:06,942 INFO [train.py:901] (3/4) Epoch 19, batch 6050, loss[loss=0.2381, simple_loss=0.3069, pruned_loss=0.08462, over 7927.00 frames. ], tot_loss[loss=0.2138, simple_loss=0.2947, pruned_loss=0.06641, over 1624047.43 frames. ], batch size: 20, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:09,101 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151546.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:33:32,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.535e+02 3.172e+02 3.888e+02 8.825e+02, threshold=6.343e+02, percent-clipped=4.0 +2023-02-06 23:33:42,767 INFO [train.py:901] (3/4) Epoch 19, batch 6100, loss[loss=0.1804, simple_loss=0.2768, pruned_loss=0.04205, over 8254.00 frames. ], tot_loss[loss=0.213, simple_loss=0.294, pruned_loss=0.06605, over 1622705.88 frames. ], batch size: 24, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:33:56,050 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1017, 1.2608, 4.5059, 1.8547, 2.4346, 5.1034, 5.1315, 4.4278], + device='cuda:3'), covar=tensor([0.1190, 0.1956, 0.0249, 0.1890, 0.1138, 0.0146, 0.0360, 0.0472], + device='cuda:3'), in_proj_covar=tensor([0.0290, 0.0319, 0.0286, 0.0311, 0.0303, 0.0262, 0.0405, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:34:07,680 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-06 23:34:10,839 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151633.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:17,587 INFO [train.py:901] (3/4) Epoch 19, batch 6150, loss[loss=0.2103, simple_loss=0.2921, pruned_loss=0.0643, over 8502.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2924, pruned_loss=0.06545, over 1615756.65 frames. ], batch size: 26, lr: 3.97e-03, grad_scale: 16.0 +2023-02-06 23:34:18,379 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151644.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:28,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151658.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,158 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=151660.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:30,981 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151661.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:34:43,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.691e+02 2.320e+02 2.846e+02 3.654e+02 5.745e+02, threshold=5.693e+02, percent-clipped=0.0 +2023-02-06 23:34:51,570 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-06 23:34:53,938 INFO [train.py:901] (3/4) Epoch 19, batch 6200, loss[loss=0.1996, simple_loss=0.2829, pruned_loss=0.05812, over 7922.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2917, pruned_loss=0.06512, over 1608167.51 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:02,696 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151706.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:28,537 INFO [train.py:901] (3/4) Epoch 19, batch 6250, loss[loss=0.2047, simple_loss=0.2882, pruned_loss=0.06062, over 8475.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.2925, pruned_loss=0.06582, over 1611668.62 frames. ], batch size: 29, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:35:39,360 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151759.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:50,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=151775.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:35:53,501 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.555e+02 3.246e+02 4.070e+02 8.549e+02, threshold=6.492e+02, percent-clipped=6.0 +2023-02-06 23:36:03,717 INFO [train.py:901] (3/4) Epoch 19, batch 6300, loss[loss=0.2185, simple_loss=0.2986, pruned_loss=0.06921, over 8701.00 frames. ], tot_loss[loss=0.2124, simple_loss=0.2925, pruned_loss=0.06613, over 1612935.57 frames. ], batch size: 49, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:36:22,220 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=151819.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:36:39,103 INFO [train.py:901] (3/4) Epoch 19, batch 6350, loss[loss=0.2329, simple_loss=0.3077, pruned_loss=0.07906, over 8034.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2915, pruned_loss=0.06549, over 1610357.65 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:03,141 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 2.376e+02 2.921e+02 3.593e+02 6.855e+02, threshold=5.841e+02, percent-clipped=1.0 +2023-02-06 23:37:13,208 INFO [train.py:901] (3/4) Epoch 19, batch 6400, loss[loss=0.1769, simple_loss=0.2558, pruned_loss=0.04898, over 7642.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2912, pruned_loss=0.06497, over 1608784.87 frames. ], batch size: 19, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:37:24,581 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0174, 1.5874, 1.3011, 1.4862, 1.2773, 1.1557, 1.2316, 1.3217], + device='cuda:3'), covar=tensor([0.1088, 0.0458, 0.1304, 0.0533, 0.0755, 0.1455, 0.0891, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0234, 0.0327, 0.0303, 0.0298, 0.0330, 0.0343, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:37:30,644 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=151917.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:30,810 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-06 23:37:38,746 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8901, 1.3243, 1.5513, 1.2633, 0.8955, 1.3404, 1.5247, 1.5225], + device='cuda:3'), covar=tensor([0.0487, 0.1338, 0.1752, 0.1507, 0.0636, 0.1583, 0.0740, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:37:40,462 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-06 23:37:42,194 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.17 vs. limit=2.0 +2023-02-06 23:37:48,037 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=151942.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:37:48,526 INFO [train.py:901] (3/4) Epoch 19, batch 6450, loss[loss=0.1715, simple_loss=0.2456, pruned_loss=0.04869, over 7677.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2913, pruned_loss=0.06506, over 1606700.10 frames. ], batch size: 18, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:13,513 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.409e+02 2.943e+02 3.710e+02 6.232e+02, threshold=5.887e+02, percent-clipped=1.0 +2023-02-06 23:38:15,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6127, 2.7310, 1.9602, 2.3459, 2.3164, 1.6397, 2.1324, 2.2429], + device='cuda:3'), covar=tensor([0.1603, 0.0456, 0.1239, 0.0632, 0.0731, 0.1593, 0.1038, 0.1043], + device='cuda:3'), in_proj_covar=tensor([0.0350, 0.0235, 0.0329, 0.0305, 0.0300, 0.0333, 0.0345, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:38:23,080 INFO [train.py:901] (3/4) Epoch 19, batch 6500, loss[loss=0.1925, simple_loss=0.2827, pruned_loss=0.05113, over 8358.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2914, pruned_loss=0.06505, over 1608082.54 frames. ], batch size: 24, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:38:40,001 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152015.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:46,042 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8054, 3.7936, 3.4708, 1.7158, 3.3841, 3.4838, 3.4558, 3.2887], + device='cuda:3'), covar=tensor([0.0988, 0.0665, 0.1209, 0.5085, 0.1005, 0.1020, 0.1476, 0.0994], + device='cuda:3'), in_proj_covar=tensor([0.0518, 0.0429, 0.0429, 0.0532, 0.0419, 0.0432, 0.0412, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:38:51,604 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:38:58,598 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152040.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:00,457 INFO [train.py:901] (3/4) Epoch 19, batch 6550, loss[loss=0.2015, simple_loss=0.2785, pruned_loss=0.06224, over 8138.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2892, pruned_loss=0.06407, over 1607029.88 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:04,937 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152050.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:09,220 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152056.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:39:21,598 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-06 23:39:24,906 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.379e+02 2.761e+02 3.695e+02 7.678e+02, threshold=5.522e+02, percent-clipped=3.0 +2023-02-06 23:39:34,313 INFO [train.py:901] (3/4) Epoch 19, batch 6600, loss[loss=0.219, simple_loss=0.3036, pruned_loss=0.06724, over 8346.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2896, pruned_loss=0.06404, over 1608779.11 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:39:39,641 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-06 23:40:09,007 INFO [train.py:901] (3/4) Epoch 19, batch 6650, loss[loss=0.1891, simple_loss=0.2685, pruned_loss=0.05487, over 7807.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2913, pruned_loss=0.06511, over 1606713.48 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:40:23,462 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152163.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:24,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152165.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:40:34,183 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.686e+02 3.265e+02 3.895e+02 8.931e+02, threshold=6.531e+02, percent-clipped=7.0 +2023-02-06 23:40:44,526 INFO [train.py:901] (3/4) Epoch 19, batch 6700, loss[loss=0.1931, simple_loss=0.2804, pruned_loss=0.0529, over 8508.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2911, pruned_loss=0.06459, over 1612017.04 frames. ], batch size: 26, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:05,142 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6649, 2.2871, 4.1516, 1.4591, 3.0901, 2.1730, 1.8292, 2.9585], + device='cuda:3'), covar=tensor([0.1947, 0.2477, 0.0839, 0.4414, 0.1746, 0.3256, 0.2165, 0.2392], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0581, 0.0548, 0.0627, 0.0637, 0.0586, 0.0521, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:41:06,355 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3877, 1.3513, 4.5270, 1.6582, 4.0043, 3.7586, 4.0803, 3.9779], + device='cuda:3'), covar=tensor([0.0570, 0.4981, 0.0579, 0.4145, 0.1180, 0.1035, 0.0607, 0.0679], + device='cuda:3'), in_proj_covar=tensor([0.0593, 0.0624, 0.0666, 0.0601, 0.0676, 0.0582, 0.0583, 0.0643], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:41:19,458 INFO [train.py:901] (3/4) Epoch 19, batch 6750, loss[loss=0.1926, simple_loss=0.2699, pruned_loss=0.05761, over 8080.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2919, pruned_loss=0.06493, over 1614567.35 frames. ], batch size: 21, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:41:19,600 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152243.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:44,696 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152278.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:45,124 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.378e+02 2.909e+02 3.491e+02 6.752e+02, threshold=5.817e+02, percent-clipped=2.0 +2023-02-06 23:41:51,504 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5678, 1.4854, 1.9700, 1.3229, 1.1507, 1.9581, 0.4107, 1.3209], + device='cuda:3'), covar=tensor([0.1796, 0.1144, 0.0372, 0.1208, 0.2658, 0.0438, 0.2186, 0.1363], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0195, 0.0125, 0.0223, 0.0270, 0.0134, 0.0169, 0.0187], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 23:41:53,513 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152291.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:41:54,070 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-06 23:41:54,749 INFO [train.py:901] (3/4) Epoch 19, batch 6800, loss[loss=0.1883, simple_loss=0.281, pruned_loss=0.04776, over 8320.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2918, pruned_loss=0.06501, over 1612526.67 frames. ], batch size: 25, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:29,090 INFO [train.py:901] (3/4) Epoch 19, batch 6850, loss[loss=0.1896, simple_loss=0.2717, pruned_loss=0.05377, over 7817.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2912, pruned_loss=0.06478, over 1604740.66 frames. ], batch size: 20, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:42:43,967 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-06 23:42:54,742 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.344e+02 3.012e+02 3.839e+02 8.073e+02, threshold=6.025e+02, percent-clipped=5.0 +2023-02-06 23:43:05,120 INFO [train.py:901] (3/4) Epoch 19, batch 6900, loss[loss=0.2211, simple_loss=0.2982, pruned_loss=0.07199, over 8125.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2906, pruned_loss=0.06436, over 1607382.38 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:25,517 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152421.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:40,387 INFO [train.py:901] (3/4) Epoch 19, batch 6950, loss[loss=0.1893, simple_loss=0.2719, pruned_loss=0.05334, over 8232.00 frames. ], tot_loss[loss=0.209, simple_loss=0.29, pruned_loss=0.06397, over 1608707.43 frames. ], batch size: 22, lr: 3.96e-03, grad_scale: 16.0 +2023-02-06 23:43:42,613 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152446.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:43:53,790 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-06 23:43:53,944 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152463.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:05,254 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.443e+02 3.132e+02 3.706e+02 6.613e+02, threshold=6.264e+02, percent-clipped=2.0 +2023-02-06 23:44:14,624 INFO [train.py:901] (3/4) Epoch 19, batch 7000, loss[loss=0.2056, simple_loss=0.293, pruned_loss=0.05914, over 8504.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.289, pruned_loss=0.06369, over 1605634.99 frames. ], batch size: 26, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:44:28,407 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9092, 1.5322, 1.8257, 1.5153, 1.0233, 1.5049, 2.0638, 1.7146], + device='cuda:3'), covar=tensor([0.0441, 0.1262, 0.1669, 0.1423, 0.0594, 0.1513, 0.0635, 0.0655], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0191, 0.0158, 0.0100, 0.0162, 0.0113, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:44:44,333 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152534.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:44:51,112 INFO [train.py:901] (3/4) Epoch 19, batch 7050, loss[loss=0.2243, simple_loss=0.304, pruned_loss=0.07224, over 8190.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2891, pruned_loss=0.06386, over 1608327.72 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:02,290 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152559.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:15,723 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.420e+02 2.800e+02 3.429e+02 5.549e+02, threshold=5.599e+02, percent-clipped=0.0 +2023-02-06 23:45:21,283 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152587.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:25,396 INFO [train.py:901] (3/4) Epoch 19, batch 7100, loss[loss=0.1903, simple_loss=0.2681, pruned_loss=0.05628, over 7916.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2875, pruned_loss=0.0629, over 1604124.05 frames. ], batch size: 20, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:45:30,679 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152600.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:35,449 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152607.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:45:56,367 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152635.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:46:01,876 INFO [train.py:901] (3/4) Epoch 19, batch 7150, loss[loss=0.2231, simple_loss=0.299, pruned_loss=0.07363, over 8626.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.289, pruned_loss=0.0641, over 1604817.43 frames. ], batch size: 39, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:27,178 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.485e+02 2.441e+02 2.885e+02 3.630e+02 1.043e+03, threshold=5.770e+02, percent-clipped=5.0 +2023-02-06 23:46:36,620 INFO [train.py:901] (3/4) Epoch 19, batch 7200, loss[loss=0.1841, simple_loss=0.261, pruned_loss=0.05356, over 7533.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2895, pruned_loss=0.06434, over 1604625.17 frames. ], batch size: 18, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:46:42,732 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152702.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:09,163 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6712, 1.5737, 2.8550, 1.3457, 2.1168, 3.0425, 3.1525, 2.5893], + device='cuda:3'), covar=tensor([0.1183, 0.1445, 0.0402, 0.2116, 0.0955, 0.0305, 0.0679, 0.0639], + device='cuda:3'), in_proj_covar=tensor([0.0292, 0.0320, 0.0285, 0.0313, 0.0301, 0.0262, 0.0408, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:47:12,599 INFO [train.py:901] (3/4) Epoch 19, batch 7250, loss[loss=0.2, simple_loss=0.2794, pruned_loss=0.06032, over 8089.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2911, pruned_loss=0.06492, over 1610322.06 frames. ], batch size: 21, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:13,461 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=152744.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:15,662 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3127, 1.9815, 2.7338, 2.2167, 2.7024, 2.2445, 2.0573, 1.3745], + device='cuda:3'), covar=tensor([0.5213, 0.4690, 0.1799, 0.3520, 0.2372, 0.3154, 0.1942, 0.5250], + device='cuda:3'), in_proj_covar=tensor([0.0929, 0.0957, 0.0783, 0.0923, 0.0981, 0.0872, 0.0740, 0.0817], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:47:17,701 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152750.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:47:37,386 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 2.392e+02 2.877e+02 3.488e+02 7.359e+02, threshold=5.753e+02, percent-clipped=2.0 +2023-02-06 23:47:47,610 INFO [train.py:901] (3/4) Epoch 19, batch 7300, loss[loss=0.1788, simple_loss=0.2584, pruned_loss=0.04956, over 7974.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2906, pruned_loss=0.06475, over 1613693.90 frames. ], batch size: 21, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:47:57,329 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152807.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:48:04,701 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0741, 2.3895, 1.9881, 2.8882, 1.4799, 1.6284, 2.0449, 2.3649], + device='cuda:3'), covar=tensor([0.0741, 0.0748, 0.0846, 0.0341, 0.1119, 0.1328, 0.0903, 0.0733], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0210, 0.0203, 0.0246, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 23:48:21,875 INFO [train.py:901] (3/4) Epoch 19, batch 7350, loss[loss=0.188, simple_loss=0.276, pruned_loss=0.05003, over 8247.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2902, pruned_loss=0.06435, over 1616101.88 frames. ], batch size: 22, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:48:46,701 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-06 23:48:48,161 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.571e+02 3.070e+02 4.184e+02 8.940e+02, threshold=6.140e+02, percent-clipped=8.0 +2023-02-06 23:48:58,049 INFO [train.py:901] (3/4) Epoch 19, batch 7400, loss[loss=0.2123, simple_loss=0.2834, pruned_loss=0.07056, over 7795.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2887, pruned_loss=0.06385, over 1611507.93 frames. ], batch size: 19, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:07,696 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-06 23:49:18,789 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=152922.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:32,918 INFO [train.py:901] (3/4) Epoch 19, batch 7450, loss[loss=0.199, simple_loss=0.281, pruned_loss=0.05848, over 7799.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2881, pruned_loss=0.06384, over 1607973.23 frames. ], batch size: 19, lr: 3.95e-03, grad_scale: 32.0 +2023-02-06 23:49:33,722 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152944.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:38,522 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=152951.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:44,011 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=152958.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:49:46,568 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-06 23:49:47,483 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4104, 2.1987, 3.2063, 2.4499, 2.9260, 2.4812, 2.1511, 1.6732], + device='cuda:3'), covar=tensor([0.5327, 0.5009, 0.1788, 0.3706, 0.2677, 0.2840, 0.1897, 0.5341], + device='cuda:3'), in_proj_covar=tensor([0.0924, 0.0953, 0.0779, 0.0917, 0.0979, 0.0867, 0.0736, 0.0811], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0001, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:49:58,925 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.506e+02 3.079e+02 4.075e+02 8.166e+02, threshold=6.159e+02, percent-clipped=5.0 +2023-02-06 23:50:01,984 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=152983.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:08,330 INFO [train.py:901] (3/4) Epoch 19, batch 7500, loss[loss=0.2245, simple_loss=0.3171, pruned_loss=0.06597, over 8311.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2891, pruned_loss=0.06381, over 1610714.45 frames. ], batch size: 25, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:11,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7684, 1.5907, 3.1615, 1.4241, 2.1959, 3.3799, 3.5115, 2.9237], + device='cuda:3'), covar=tensor([0.1188, 0.1565, 0.0386, 0.2056, 0.1030, 0.0279, 0.0651, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0320, 0.0287, 0.0313, 0.0304, 0.0263, 0.0409, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-06 23:50:17,498 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153006.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:21,635 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6316, 1.3778, 1.5702, 1.2564, 0.8745, 1.3735, 1.5042, 1.2349], + device='cuda:3'), covar=tensor([0.0570, 0.1236, 0.1703, 0.1479, 0.0649, 0.1445, 0.0778, 0.0701], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0160, 0.0112, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:50:34,853 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153031.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:42,701 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-06 23:50:42,933 INFO [train.py:901] (3/4) Epoch 19, batch 7550, loss[loss=0.1879, simple_loss=0.2794, pruned_loss=0.04818, over 7973.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2899, pruned_loss=0.06415, over 1611207.27 frames. ], batch size: 21, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:50:48,594 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4107, 1.4808, 1.3583, 1.8849, 0.8122, 1.2555, 1.3554, 1.4377], + device='cuda:3'), covar=tensor([0.0874, 0.0742, 0.1006, 0.0458, 0.1021, 0.1308, 0.0657, 0.0757], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0211, 0.0203, 0.0246, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-06 23:50:49,461 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.25 vs. limit=5.0 +2023-02-06 23:50:53,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153059.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:50:58,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153066.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:08,483 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.431e+02 2.980e+02 3.688e+02 7.634e+02, threshold=5.960e+02, percent-clipped=2.0 +2023-02-06 23:51:14,777 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153088.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:15,125 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-06 23:51:18,065 INFO [train.py:901] (3/4) Epoch 19, batch 7600, loss[loss=0.2309, simple_loss=0.3173, pruned_loss=0.07226, over 8105.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2907, pruned_loss=0.06479, over 1610690.60 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:51:28,505 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9987, 1.6570, 6.0708, 2.1418, 5.5534, 5.0436, 5.5855, 5.5511], + device='cuda:3'), covar=tensor([0.0511, 0.4754, 0.0400, 0.4062, 0.0969, 0.0888, 0.0476, 0.0505], + device='cuda:3'), in_proj_covar=tensor([0.0594, 0.0624, 0.0665, 0.0599, 0.0679, 0.0584, 0.0581, 0.0642], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0001, 0.0002], + device='cuda:3') +2023-02-06 23:51:47,774 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153135.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:51:53,092 INFO [train.py:901] (3/4) Epoch 19, batch 7650, loss[loss=0.2186, simple_loss=0.3062, pruned_loss=0.06548, over 8334.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.291, pruned_loss=0.06473, over 1615626.83 frames. ], batch size: 48, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:08,632 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-06 23:52:17,641 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153178.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:18,733 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.290e+02 2.780e+02 3.362e+02 7.829e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-06 23:52:27,555 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-06 23:52:28,394 INFO [train.py:901] (3/4) Epoch 19, batch 7700, loss[loss=0.1753, simple_loss=0.2677, pruned_loss=0.04145, over 8290.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2897, pruned_loss=0.06413, over 1609928.12 frames. ], batch size: 23, lr: 3.95e-03, grad_scale: 16.0 +2023-02-06 23:52:35,489 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:35,511 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153203.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:52:35,889 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-06 23:52:57,462 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-06 23:53:03,345 INFO [train.py:901] (3/4) Epoch 19, batch 7750, loss[loss=0.2112, simple_loss=0.2989, pruned_loss=0.06179, over 8325.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2913, pruned_loss=0.06517, over 1613185.06 frames. ], batch size: 25, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:28,922 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.456e+02 3.001e+02 3.725e+02 8.940e+02, threshold=6.003e+02, percent-clipped=11.0 +2023-02-06 23:53:37,741 INFO [train.py:901] (3/4) Epoch 19, batch 7800, loss[loss=0.2008, simple_loss=0.2906, pruned_loss=0.05545, over 8136.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2917, pruned_loss=0.0653, over 1613937.43 frames. ], batch size: 22, lr: 3.94e-03, grad_scale: 16.0 +2023-02-06 23:53:39,918 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153296.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:53,366 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153315.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:53:58,001 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153322.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:09,679 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153340.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:11,469 INFO [train.py:901] (3/4) Epoch 19, batch 7850, loss[loss=0.2025, simple_loss=0.2879, pruned_loss=0.0585, over 8700.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2907, pruned_loss=0.06463, over 1609239.98 frames. ], batch size: 34, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:54:14,341 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6786, 1.3891, 1.5600, 1.3347, 0.8897, 1.3394, 1.5158, 1.2030], + device='cuda:3'), covar=tensor([0.0587, 0.1248, 0.1698, 0.1486, 0.0624, 0.1480, 0.0739, 0.0728], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0113, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-06 23:54:14,372 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153347.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:54:16,434 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1569, 1.2967, 1.3224, 0.9677, 1.3537, 1.0379, 0.3955, 1.2780], + device='cuda:3'), covar=tensor([0.0357, 0.0250, 0.0219, 0.0356, 0.0288, 0.0622, 0.0623, 0.0208], + device='cuda:3'), in_proj_covar=tensor([0.0439, 0.0379, 0.0335, 0.0440, 0.0366, 0.0527, 0.0386, 0.0407], + device='cuda:3'), out_proj_covar=tensor([1.1879e-04, 9.9666e-05, 8.8673e-05, 1.1697e-04, 9.6864e-05, 1.5016e-04, + 1.0446e-04, 1.0865e-04], device='cuda:3') +2023-02-06 23:54:23,027 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7282, 2.0569, 2.2579, 1.4490, 2.3580, 1.5666, 0.6758, 1.8817], + device='cuda:3'), covar=tensor([0.0581, 0.0316, 0.0216, 0.0518, 0.0335, 0.0824, 0.0814, 0.0279], + device='cuda:3'), in_proj_covar=tensor([0.0440, 0.0379, 0.0335, 0.0440, 0.0366, 0.0527, 0.0386, 0.0407], + device='cuda:3'), out_proj_covar=tensor([1.1887e-04, 9.9728e-05, 8.8735e-05, 1.1695e-04, 9.6969e-05, 1.5010e-04, + 1.0448e-04, 1.0863e-04], device='cuda:3') +2023-02-06 23:54:36,624 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.457e+02 2.874e+02 3.581e+02 1.670e+03, threshold=5.749e+02, percent-clipped=9.0 +2023-02-06 23:54:44,306 INFO [train.py:901] (3/4) Epoch 19, batch 7900, loss[loss=0.2463, simple_loss=0.3218, pruned_loss=0.08538, over 8504.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2902, pruned_loss=0.06455, over 1608476.92 frames. ], batch size: 26, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:15,520 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153439.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:17,981 INFO [train.py:901] (3/4) Epoch 19, batch 7950, loss[loss=0.2276, simple_loss=0.3058, pruned_loss=0.07472, over 7184.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2911, pruned_loss=0.06487, over 1613113.28 frames. ], batch size: 71, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:55:28,823 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153459.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:41,876 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153479.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:43,176 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.434e+02 3.034e+02 3.983e+02 8.510e+02, threshold=6.068e+02, percent-clipped=6.0 +2023-02-06 23:55:45,345 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153484.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:55:50,998 INFO [train.py:901] (3/4) Epoch 19, batch 8000, loss[loss=0.2261, simple_loss=0.3008, pruned_loss=0.0757, over 7536.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2919, pruned_loss=0.06545, over 1611815.00 frames. ], batch size: 18, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:10,562 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-06 23:56:25,153 INFO [train.py:901] (3/4) Epoch 19, batch 8050, loss[loss=0.223, simple_loss=0.2874, pruned_loss=0.07934, over 7423.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2903, pruned_loss=0.06468, over 1603997.26 frames. ], batch size: 17, lr: 3.94e-03, grad_scale: 8.0 +2023-02-06 23:56:58,871 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-06 23:57:04,940 INFO [train.py:901] (3/4) Epoch 20, batch 0, loss[loss=0.2154, simple_loss=0.2806, pruned_loss=0.0751, over 7446.00 frames. ], tot_loss[loss=0.2154, simple_loss=0.2806, pruned_loss=0.0751, over 7446.00 frames. ], batch size: 17, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:57:04,940 INFO [train.py:926] (3/4) Computing validation loss +2023-02-06 23:57:16,940 INFO [train.py:935] (3/4) Epoch 20, validation: loss=0.1757, simple_loss=0.276, pruned_loss=0.03766, over 944034.00 frames. +2023-02-06 23:57:16,940 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-06 23:57:20,455 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.577e+02 3.496e+02 4.495e+02 1.164e+03, threshold=6.992e+02, percent-clipped=12.0 +2023-02-06 23:57:29,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153594.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:57:31,325 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-06 23:57:51,322 INFO [train.py:901] (3/4) Epoch 20, batch 50, loss[loss=0.1987, simple_loss=0.2669, pruned_loss=0.06527, over 7789.00 frames. ], tot_loss[loss=0.2177, simple_loss=0.2985, pruned_loss=0.06843, over 366435.38 frames. ], batch size: 19, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:01,107 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153640.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:58:06,579 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-06 23:58:27,829 INFO [train.py:901] (3/4) Epoch 20, batch 100, loss[loss=0.225, simple_loss=0.311, pruned_loss=0.06955, over 8254.00 frames. ], tot_loss[loss=0.2144, simple_loss=0.2962, pruned_loss=0.06632, over 649833.87 frames. ], batch size: 24, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:58:29,262 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-06 23:58:31,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.446e+02 2.844e+02 3.351e+02 7.473e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-06 23:58:58,603 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3320, 1.3825, 4.5247, 1.8005, 3.9787, 3.8102, 4.0744, 3.9831], + device='cuda:3'), covar=tensor([0.0594, 0.5304, 0.0517, 0.4069, 0.1120, 0.0916, 0.0576, 0.0619], + device='cuda:3'), in_proj_covar=tensor([0.0607, 0.0639, 0.0679, 0.0613, 0.0691, 0.0595, 0.0593, 0.0655], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-06 23:59:03,142 INFO [train.py:901] (3/4) Epoch 20, batch 150, loss[loss=0.2342, simple_loss=0.3215, pruned_loss=0.07342, over 8256.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2955, pruned_loss=0.06541, over 868139.31 frames. ], batch size: 24, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:06,136 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9614, 2.4661, 3.7082, 2.1013, 1.8262, 3.6384, 0.5502, 2.1793], + device='cuda:3'), covar=tensor([0.1285, 0.1245, 0.0222, 0.1611, 0.2881, 0.0220, 0.2658, 0.1365], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0194, 0.0124, 0.0221, 0.0270, 0.0133, 0.0169, 0.0186], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-06 23:59:23,320 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153755.0, num_to_drop=0, layers_to_drop=set() +2023-02-06 23:59:39,296 INFO [train.py:901] (3/4) Epoch 20, batch 200, loss[loss=0.2259, simple_loss=0.3048, pruned_loss=0.07345, over 8493.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2956, pruned_loss=0.06509, over 1039247.45 frames. ], batch size: 29, lr: 3.84e-03, grad_scale: 8.0 +2023-02-06 23:59:42,529 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.177e+02 2.784e+02 3.416e+02 8.818e+02, threshold=5.569e+02, percent-clipped=1.0 +2023-02-06 23:59:43,938 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=153783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:15,037 INFO [train.py:901] (3/4) Epoch 20, batch 250, loss[loss=0.2223, simple_loss=0.3001, pruned_loss=0.07227, over 8082.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2958, pruned_loss=0.06487, over 1172136.61 frames. ], batch size: 21, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:26,530 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 00:00:31,631 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=153850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:34,735 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 00:00:48,275 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=153875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:00:48,775 INFO [train.py:901] (3/4) Epoch 20, batch 300, loss[loss=0.2158, simple_loss=0.3042, pruned_loss=0.0637, over 8246.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2942, pruned_loss=0.06468, over 1273559.96 frames. ], batch size: 24, lr: 3.84e-03, grad_scale: 8.0 +2023-02-07 00:00:52,004 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.425e+02 2.846e+02 3.739e+02 1.062e+03, threshold=5.691e+02, percent-clipped=2.0 +2023-02-07 00:01:05,174 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=153898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:24,550 INFO [train.py:901] (3/4) Epoch 20, batch 350, loss[loss=0.2022, simple_loss=0.2781, pruned_loss=0.06311, over 8082.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2932, pruned_loss=0.06478, over 1349212.59 frames. ], batch size: 21, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:01:35,761 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=153941.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:01:36,640 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 00:01:59,290 INFO [train.py:901] (3/4) Epoch 20, batch 400, loss[loss=0.2044, simple_loss=0.291, pruned_loss=0.05886, over 8182.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2933, pruned_loss=0.06433, over 1408883.80 frames. ], batch size: 23, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:02,798 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.367e+02 2.483e+02 2.937e+02 3.652e+02 9.410e+02, threshold=5.874e+02, percent-clipped=4.0 +2023-02-07 00:02:22,963 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 00:02:25,627 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:02:36,305 INFO [train.py:901] (3/4) Epoch 20, batch 450, loss[loss=0.2553, simple_loss=0.3213, pruned_loss=0.09462, over 8680.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2926, pruned_loss=0.06407, over 1455860.17 frames. ], batch size: 34, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:02:44,052 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154036.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:05,786 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154067.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:03:11,785 INFO [train.py:901] (3/4) Epoch 20, batch 500, loss[loss=0.1947, simple_loss=0.2773, pruned_loss=0.05602, over 7811.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2888, pruned_loss=0.06239, over 1487931.49 frames. ], batch size: 20, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:15,235 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.123e+02 2.274e+02 2.685e+02 3.204e+02 7.760e+02, threshold=5.371e+02, percent-clipped=3.0 +2023-02-07 00:03:24,423 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:29,962 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:03:46,377 INFO [train.py:901] (3/4) Epoch 20, batch 550, loss[loss=0.2271, simple_loss=0.3155, pruned_loss=0.06932, over 8461.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2877, pruned_loss=0.06244, over 1509784.18 frames. ], batch size: 29, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:03:49,724 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-07 00:03:59,235 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-07 00:04:07,821 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:23,278 INFO [train.py:901] (3/4) Epoch 20, batch 600, loss[loss=0.2084, simple_loss=0.2933, pruned_loss=0.0617, over 8400.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2898, pruned_loss=0.06414, over 1536219.00 frames. ], batch size: 29, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:25,538 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:04:26,655 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.477e+02 2.962e+02 3.836e+02 8.919e+02, threshold=5.925e+02, percent-clipped=6.0 +2023-02-07 00:04:45,284 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 00:04:57,543 INFO [train.py:901] (3/4) Epoch 20, batch 650, loss[loss=0.1856, simple_loss=0.2621, pruned_loss=0.0545, over 7799.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2896, pruned_loss=0.06396, over 1551230.52 frames. ], batch size: 20, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:04:58,453 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4708, 2.6731, 1.8885, 2.3777, 2.2222, 1.6098, 2.0473, 2.3061], + device='cuda:3'), covar=tensor([0.1608, 0.0399, 0.1130, 0.0706, 0.0759, 0.1456, 0.1112, 0.0977], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0237, 0.0332, 0.0307, 0.0302, 0.0335, 0.0346, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:05:06,566 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:14,622 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 00:05:34,074 INFO [train.py:901] (3/4) Epoch 20, batch 700, loss[loss=0.2528, simple_loss=0.3245, pruned_loss=0.0906, over 8732.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2895, pruned_loss=0.06372, over 1566244.48 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:05:37,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.357e+02 2.958e+02 3.586e+02 6.466e+02, threshold=5.915e+02, percent-clipped=2.0 +2023-02-07 00:05:40,241 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154285.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:54,040 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:05:57,548 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:06:08,843 INFO [train.py:901] (3/4) Epoch 20, batch 750, loss[loss=0.2017, simple_loss=0.2888, pruned_loss=0.05726, over 8458.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2898, pruned_loss=0.06382, over 1575207.54 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:11,930 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 00:06:28,503 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154355.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:06:33,651 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 00:06:43,001 INFO [train.py:901] (3/4) Epoch 20, batch 800, loss[loss=0.2018, simple_loss=0.2816, pruned_loss=0.06095, over 8088.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2897, pruned_loss=0.06385, over 1582884.73 frames. ], batch size: 21, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:06:43,010 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 00:06:47,166 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.337e+02 2.441e+02 3.052e+02 3.711e+02 8.675e+02, threshold=6.104e+02, percent-clipped=3.0 +2023-02-07 00:07:01,166 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:08,302 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154411.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:07:19,181 INFO [train.py:901] (3/4) Epoch 20, batch 850, loss[loss=0.199, simple_loss=0.2773, pruned_loss=0.06029, over 7787.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2891, pruned_loss=0.06345, over 1586278.73 frames. ], batch size: 19, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:27,239 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:32,660 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:07:52,873 INFO [train.py:901] (3/4) Epoch 20, batch 900, loss[loss=0.192, simple_loss=0.2794, pruned_loss=0.0523, over 8343.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.29, pruned_loss=0.06393, over 1594485.43 frames. ], batch size: 25, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:07:56,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.768e+02 2.439e+02 2.923e+02 3.686e+02 1.072e+03, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 00:07:58,423 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3951, 1.5458, 1.4165, 1.8346, 0.7521, 1.2239, 1.2722, 1.5190], + device='cuda:3'), covar=tensor([0.0878, 0.0710, 0.0987, 0.0494, 0.1106, 0.1407, 0.0773, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0195, 0.0246, 0.0209, 0.0205, 0.0248, 0.0249, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 00:07:59,847 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5339, 1.7275, 2.7442, 1.3907, 2.0994, 1.9140, 1.5445, 2.0028], + device='cuda:3'), covar=tensor([0.1951, 0.2584, 0.0841, 0.4663, 0.1731, 0.3158, 0.2455, 0.2186], + device='cuda:3'), in_proj_covar=tensor([0.0519, 0.0586, 0.0553, 0.0631, 0.0639, 0.0587, 0.0524, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:08:04,402 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:29,153 INFO [train.py:901] (3/4) Epoch 20, batch 950, loss[loss=0.2368, simple_loss=0.3057, pruned_loss=0.08393, over 7203.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2903, pruned_loss=0.06447, over 1596793.48 frames. ], batch size: 71, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:08:29,366 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154526.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:08:48,746 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154553.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:08:54,161 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:00,292 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2569, 1.6401, 4.1327, 1.9265, 2.3267, 4.7160, 4.8614, 4.0594], + device='cuda:3'), covar=tensor([0.1171, 0.1962, 0.0301, 0.2030, 0.1389, 0.0192, 0.0339, 0.0557], + device='cuda:3'), in_proj_covar=tensor([0.0288, 0.0318, 0.0286, 0.0310, 0.0302, 0.0259, 0.0403, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:09:01,512 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 00:09:04,234 INFO [train.py:901] (3/4) Epoch 20, batch 1000, loss[loss=0.2647, simple_loss=0.3293, pruned_loss=0.1, over 7463.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2904, pruned_loss=0.06453, over 1597772.70 frames. ], batch size: 73, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:07,495 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.511e+02 3.044e+02 3.807e+02 8.767e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 00:09:08,984 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:23,942 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3446, 4.3802, 3.9111, 2.0228, 3.7587, 3.9291, 3.9340, 3.7252], + device='cuda:3'), covar=tensor([0.0819, 0.0637, 0.1137, 0.4818, 0.0963, 0.1265, 0.1458, 0.1007], + device='cuda:3'), in_proj_covar=tensor([0.0522, 0.0429, 0.0433, 0.0537, 0.0423, 0.0437, 0.0417, 0.0374], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:09:35,153 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 00:09:38,960 INFO [train.py:901] (3/4) Epoch 20, batch 1050, loss[loss=0.1984, simple_loss=0.2912, pruned_loss=0.05282, over 8439.00 frames. ], tot_loss[loss=0.211, simple_loss=0.2915, pruned_loss=0.06528, over 1602402.06 frames. ], batch size: 27, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:09:49,447 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 00:09:53,678 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:54,882 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:09:59,106 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:01,446 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154656.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:14,711 INFO [train.py:901] (3/4) Epoch 20, batch 1100, loss[loss=0.2099, simple_loss=0.2997, pruned_loss=0.06003, over 8737.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2913, pruned_loss=0.06529, over 1606832.81 frames. ], batch size: 30, lr: 3.83e-03, grad_scale: 8.0 +2023-02-07 00:10:18,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.678e+02 2.486e+02 3.103e+02 3.988e+02 8.246e+02, threshold=6.206e+02, percent-clipped=6.0 +2023-02-07 00:10:18,345 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154681.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:29,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:30,331 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154699.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:10:48,868 INFO [train.py:901] (3/4) Epoch 20, batch 1150, loss[loss=0.224, simple_loss=0.3167, pruned_loss=0.06562, over 8193.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2917, pruned_loss=0.06603, over 1608305.58 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:10:53,708 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0483, 1.5382, 1.7031, 1.3993, 1.0009, 1.5040, 1.8686, 1.6838], + device='cuda:3'), covar=tensor([0.0518, 0.1188, 0.1570, 0.1401, 0.0594, 0.1353, 0.0667, 0.0598], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0157, 0.0099, 0.0160, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0009, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:10:57,844 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154738.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:10:59,082 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 00:11:16,274 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:19,807 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:25,038 INFO [train.py:901] (3/4) Epoch 20, batch 1200, loss[loss=0.2369, simple_loss=0.3181, pruned_loss=0.07781, over 8485.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2912, pruned_loss=0.06531, over 1610265.62 frames. ], batch size: 28, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:11:28,377 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.412e+02 2.746e+02 3.577e+02 9.067e+02, threshold=5.492e+02, percent-clipped=2.0 +2023-02-07 00:11:29,299 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154782.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:11:46,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154807.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:11:47,770 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154809.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:51,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154814.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:11:53,284 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154817.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:11:59,126 INFO [train.py:901] (3/4) Epoch 20, batch 1250, loss[loss=0.1698, simple_loss=0.248, pruned_loss=0.04577, over 7696.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2908, pruned_loss=0.06496, over 1610108.88 frames. ], batch size: 18, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:05,337 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154834.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:06,467 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:11,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:12:22,329 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9034, 1.8693, 2.9553, 2.3275, 2.6816, 1.9321, 1.6404, 1.3753], + device='cuda:3'), covar=tensor([0.6783, 0.5662, 0.1839, 0.3646, 0.2719, 0.4138, 0.2959, 0.5465], + device='cuda:3'), in_proj_covar=tensor([0.0939, 0.0966, 0.0791, 0.0928, 0.0984, 0.0877, 0.0740, 0.0819], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 00:12:23,576 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1498, 1.5871, 4.3534, 1.6900, 3.8801, 3.6620, 3.9387, 3.8138], + device='cuda:3'), covar=tensor([0.0602, 0.4242, 0.0507, 0.3760, 0.1069, 0.0895, 0.0564, 0.0686], + device='cuda:3'), in_proj_covar=tensor([0.0599, 0.0621, 0.0673, 0.0599, 0.0679, 0.0587, 0.0586, 0.0651], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:12:34,985 INFO [train.py:901] (3/4) Epoch 20, batch 1300, loss[loss=0.1782, simple_loss=0.2615, pruned_loss=0.04744, over 7426.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2908, pruned_loss=0.06461, over 1612282.52 frames. ], batch size: 17, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:12:38,324 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.654e+02 2.433e+02 3.191e+02 3.995e+02 7.235e+02, threshold=6.381e+02, percent-clipped=6.0 +2023-02-07 00:13:09,379 INFO [train.py:901] (3/4) Epoch 20, batch 1350, loss[loss=0.1991, simple_loss=0.2939, pruned_loss=0.0521, over 8504.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2896, pruned_loss=0.0639, over 1612831.96 frames. ], batch size: 26, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:27,087 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=154951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:29,755 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=154954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:44,748 INFO [train.py:901] (3/4) Epoch 20, batch 1400, loss[loss=0.1903, simple_loss=0.2735, pruned_loss=0.05351, over 8028.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06281, over 1611927.68 frames. ], batch size: 22, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:13:47,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=154979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:48,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.419e+02 2.969e+02 3.620e+02 8.609e+02, threshold=5.938e+02, percent-clipped=3.0 +2023-02-07 00:13:55,293 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=154990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:13:58,160 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=154994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:16,373 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:19,579 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:20,734 INFO [train.py:901] (3/4) Epoch 20, batch 1450, loss[loss=0.1916, simple_loss=0.2787, pruned_loss=0.05223, over 8250.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2903, pruned_loss=0.0643, over 1611719.35 frames. ], batch size: 24, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:29,167 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 00:14:33,296 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:36,500 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:14:50,892 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155070.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:14:55,250 INFO [train.py:901] (3/4) Epoch 20, batch 1500, loss[loss=0.182, simple_loss=0.2705, pruned_loss=0.04677, over 8289.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2908, pruned_loss=0.06436, over 1614771.51 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:14:58,584 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.482e+02 3.072e+02 3.822e+02 6.990e+02, threshold=6.143e+02, percent-clipped=2.0 +2023-02-07 00:14:59,297 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:02,793 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:08,997 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155095.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:15:09,714 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.31 vs. limit=5.0 +2023-02-07 00:15:15,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:15:30,486 INFO [train.py:901] (3/4) Epoch 20, batch 1550, loss[loss=0.2331, simple_loss=0.3131, pruned_loss=0.07655, over 8335.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2906, pruned_loss=0.06392, over 1616293.98 frames. ], batch size: 25, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:15:36,915 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2490, 1.4299, 1.5798, 1.2905, 1.0394, 1.3399, 1.8369, 1.5547], + device='cuda:3'), covar=tensor([0.0497, 0.1295, 0.1734, 0.1495, 0.0624, 0.1583, 0.0680, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0189, 0.0157, 0.0100, 0.0160, 0.0111, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:16:04,708 INFO [train.py:901] (3/4) Epoch 20, batch 1600, loss[loss=0.1901, simple_loss=0.278, pruned_loss=0.05105, over 8506.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2898, pruned_loss=0.0635, over 1616026.64 frames. ], batch size: 39, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:08,765 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.295e+02 2.863e+02 3.431e+02 6.352e+02, threshold=5.726e+02, percent-clipped=1.0 +2023-02-07 00:16:20,605 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:27,178 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:16:40,714 INFO [train.py:901] (3/4) Epoch 20, batch 1650, loss[loss=0.2151, simple_loss=0.2917, pruned_loss=0.06928, over 8675.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2902, pruned_loss=0.06373, over 1616674.64 frames. ], batch size: 39, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:16:45,136 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:17:15,948 INFO [train.py:901] (3/4) Epoch 20, batch 1700, loss[loss=0.1957, simple_loss=0.2761, pruned_loss=0.05761, over 8330.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2907, pruned_loss=0.06359, over 1616742.53 frames. ], batch size: 26, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:17:19,376 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.383e+02 2.759e+02 3.259e+02 7.427e+02, threshold=5.517e+02, percent-clipped=3.0 +2023-02-07 00:17:20,956 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5713, 1.9262, 2.1474, 1.2181, 2.1857, 1.4510, 0.6486, 1.8653], + device='cuda:3'), covar=tensor([0.0662, 0.0381, 0.0281, 0.0597, 0.0398, 0.0847, 0.0901, 0.0289], + device='cuda:3'), in_proj_covar=tensor([0.0443, 0.0384, 0.0338, 0.0442, 0.0370, 0.0529, 0.0388, 0.0409], + device='cuda:3'), out_proj_covar=tensor([1.1964e-04, 1.0103e-04, 8.9166e-05, 1.1711e-04, 9.7986e-05, 1.5067e-04, + 1.0512e-04, 1.0900e-04], device='cuda:3') +2023-02-07 00:17:44,038 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0257, 1.9727, 1.9945, 1.9546, 1.0684, 1.7095, 2.2004, 1.9617], + device='cuda:3'), covar=tensor([0.0405, 0.1118, 0.1544, 0.1229, 0.0600, 0.1396, 0.0596, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0151, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:17:51,288 INFO [train.py:901] (3/4) Epoch 20, batch 1750, loss[loss=0.2074, simple_loss=0.3027, pruned_loss=0.056, over 8464.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2909, pruned_loss=0.06401, over 1616907.34 frames. ], batch size: 27, lr: 3.82e-03, grad_scale: 16.0 +2023-02-07 00:18:00,382 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:11,548 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0371, 3.5327, 2.0791, 2.6974, 2.6826, 1.9655, 2.5395, 2.9122], + device='cuda:3'), covar=tensor([0.1627, 0.0335, 0.1196, 0.0746, 0.0676, 0.1396, 0.1095, 0.1047], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0235, 0.0330, 0.0303, 0.0300, 0.0335, 0.0343, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:18:17,126 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:18:27,007 INFO [train.py:901] (3/4) Epoch 20, batch 1800, loss[loss=0.1991, simple_loss=0.2812, pruned_loss=0.0585, over 7810.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2911, pruned_loss=0.06433, over 1617261.51 frames. ], batch size: 20, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:18:31,096 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.586e+02 2.965e+02 3.772e+02 7.314e+02, threshold=5.929e+02, percent-clipped=8.0 +2023-02-07 00:18:34,027 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:01,130 INFO [train.py:901] (3/4) Epoch 20, batch 1850, loss[loss=0.21, simple_loss=0.2924, pruned_loss=0.06381, over 7260.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2908, pruned_loss=0.06402, over 1617627.48 frames. ], batch size: 16, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:04,533 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,142 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:20,184 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155453.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:36,702 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155475.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:37,259 INFO [train.py:901] (3/4) Epoch 20, batch 1900, loss[loss=0.2273, simple_loss=0.3123, pruned_loss=0.07113, over 8294.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2908, pruned_loss=0.06408, over 1615302.96 frames. ], batch size: 23, lr: 3.82e-03, grad_scale: 8.0 +2023-02-07 00:19:38,773 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:19:41,336 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.441e+02 2.899e+02 3.473e+02 6.405e+02, threshold=5.799e+02, percent-clipped=1.0 +2023-02-07 00:20:11,844 INFO [train.py:901] (3/4) Epoch 20, batch 1950, loss[loss=0.2122, simple_loss=0.2953, pruned_loss=0.06452, over 8253.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2907, pruned_loss=0.06428, over 1616257.79 frames. ], batch size: 24, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:13,302 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 00:20:26,415 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:20:26,928 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 00:20:46,968 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 00:20:47,674 INFO [train.py:901] (3/4) Epoch 20, batch 2000, loss[loss=0.1807, simple_loss=0.2709, pruned_loss=0.0452, over 7963.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2901, pruned_loss=0.06395, over 1616395.20 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:20:51,754 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.363e+02 2.911e+02 3.881e+02 1.027e+03, threshold=5.822e+02, percent-clipped=2.0 +2023-02-07 00:21:09,815 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 00:21:20,952 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:22,871 INFO [train.py:901] (3/4) Epoch 20, batch 2050, loss[loss=0.1977, simple_loss=0.2637, pruned_loss=0.06585, over 7430.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2905, pruned_loss=0.06406, over 1618081.87 frames. ], batch size: 17, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:21:34,546 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1219, 2.3541, 1.9056, 2.8044, 1.3529, 1.6127, 2.0006, 2.2957], + device='cuda:3'), covar=tensor([0.0689, 0.0694, 0.0841, 0.0384, 0.1136, 0.1348, 0.0949, 0.0718], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0198, 0.0248, 0.0213, 0.0205, 0.0250, 0.0253, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 00:21:57,538 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:21:58,080 INFO [train.py:901] (3/4) Epoch 20, batch 2100, loss[loss=0.1628, simple_loss=0.2493, pruned_loss=0.03814, over 7651.00 frames. ], tot_loss[loss=0.208, simple_loss=0.289, pruned_loss=0.06349, over 1614199.16 frames. ], batch size: 19, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:02,102 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.564e+02 2.968e+02 3.686e+02 8.256e+02, threshold=5.935e+02, percent-clipped=7.0 +2023-02-07 00:22:15,400 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7790, 1.7764, 2.4174, 1.6730, 1.3151, 2.3663, 0.4631, 1.4305], + device='cuda:3'), covar=tensor([0.1809, 0.1176, 0.0354, 0.1234, 0.3010, 0.0415, 0.2440, 0.1695], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0193, 0.0124, 0.0220, 0.0268, 0.0134, 0.0170, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:22:22,176 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:22:31,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0941, 1.6142, 1.4054, 1.6292, 1.3860, 1.2565, 1.3158, 1.3494], + device='cuda:3'), covar=tensor([0.1017, 0.0444, 0.1226, 0.0497, 0.0676, 0.1415, 0.0761, 0.0744], + device='cuda:3'), in_proj_covar=tensor([0.0348, 0.0235, 0.0328, 0.0302, 0.0296, 0.0331, 0.0338, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:22:33,482 INFO [train.py:901] (3/4) Epoch 20, batch 2150, loss[loss=0.2345, simple_loss=0.3208, pruned_loss=0.07406, over 8460.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2887, pruned_loss=0.06372, over 1610075.75 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:22:39,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:22:54,736 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8294, 3.7972, 3.4506, 1.8218, 3.3369, 3.4577, 3.3846, 3.3260], + device='cuda:3'), covar=tensor([0.0871, 0.0625, 0.1126, 0.4980, 0.0950, 0.0998, 0.1386, 0.0972], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0426, 0.0431, 0.0533, 0.0420, 0.0433, 0.0415, 0.0373], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:23:07,321 INFO [train.py:901] (3/4) Epoch 20, batch 2200, loss[loss=0.1752, simple_loss=0.253, pruned_loss=0.04867, over 7786.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2889, pruned_loss=0.06368, over 1608435.00 frames. ], batch size: 19, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:08,431 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 00:23:12,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.519e+02 2.939e+02 3.787e+02 7.175e+02, threshold=5.878e+02, percent-clipped=4.0 +2023-02-07 00:23:26,037 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:26,115 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=155802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:38,414 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155819.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:23:43,082 INFO [train.py:901] (3/4) Epoch 20, batch 2250, loss[loss=0.2341, simple_loss=0.3051, pruned_loss=0.08156, over 8035.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2887, pruned_loss=0.06361, over 1606766.22 frames. ], batch size: 22, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:23:44,831 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=155827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:24:17,866 INFO [train.py:901] (3/4) Epoch 20, batch 2300, loss[loss=0.2341, simple_loss=0.3176, pruned_loss=0.07529, over 8592.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2884, pruned_loss=0.0637, over 1602645.69 frames. ], batch size: 39, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:24:21,977 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.500e+02 2.966e+02 3.753e+02 6.656e+02, threshold=5.933e+02, percent-clipped=3.0 +2023-02-07 00:24:54,617 INFO [train.py:901] (3/4) Epoch 20, batch 2350, loss[loss=0.2506, simple_loss=0.3197, pruned_loss=0.09072, over 7044.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2886, pruned_loss=0.06343, over 1606251.86 frames. ], batch size: 71, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:00,029 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=155934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:17,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5636, 2.5427, 1.8854, 2.4057, 2.2568, 1.5240, 2.1762, 2.3038], + device='cuda:3'), covar=tensor([0.1421, 0.0404, 0.1231, 0.0565, 0.0660, 0.1615, 0.0864, 0.0823], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0235, 0.0331, 0.0305, 0.0298, 0.0334, 0.0341, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:25:23,234 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=155967.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:25:23,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2970, 1.6389, 1.6868, 0.9690, 1.7155, 1.3678, 0.2351, 1.5323], + device='cuda:3'), covar=tensor([0.0513, 0.0336, 0.0289, 0.0511, 0.0396, 0.0860, 0.0883, 0.0268], + device='cuda:3'), in_proj_covar=tensor([0.0444, 0.0382, 0.0336, 0.0441, 0.0368, 0.0528, 0.0389, 0.0410], + device='cuda:3'), out_proj_covar=tensor([1.1986e-04, 1.0052e-04, 8.8717e-05, 1.1676e-04, 9.7577e-05, 1.5040e-04, + 1.0548e-04, 1.0922e-04], device='cuda:3') +2023-02-07 00:25:29,310 INFO [train.py:901] (3/4) Epoch 20, batch 2400, loss[loss=0.1765, simple_loss=0.2468, pruned_loss=0.05311, over 7918.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2888, pruned_loss=0.06317, over 1612511.16 frames. ], batch size: 20, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:25:33,221 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.377e+02 2.729e+02 3.502e+02 6.388e+02, threshold=5.458e+02, percent-clipped=1.0 +2023-02-07 00:26:01,013 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156019.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:05,601 INFO [train.py:901] (3/4) Epoch 20, batch 2450, loss[loss=0.2114, simple_loss=0.2957, pruned_loss=0.0636, over 8088.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2885, pruned_loss=0.06302, over 1613662.18 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:40,968 INFO [train.py:901] (3/4) Epoch 20, batch 2500, loss[loss=0.1811, simple_loss=0.2633, pruned_loss=0.04949, over 8098.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2891, pruned_loss=0.06367, over 1617080.58 frames. ], batch size: 21, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:26:45,015 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.463e+02 3.105e+02 3.826e+02 1.382e+03, threshold=6.210e+02, percent-clipped=11.0 +2023-02-07 00:26:45,217 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:26:49,178 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156088.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:15,803 INFO [train.py:901] (3/4) Epoch 20, batch 2550, loss[loss=0.2142, simple_loss=0.3033, pruned_loss=0.06254, over 8501.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2899, pruned_loss=0.06423, over 1615815.24 frames. ], batch size: 26, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:21,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:23,384 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2322, 3.1725, 2.9060, 1.6139, 2.8403, 2.8928, 2.8449, 2.7534], + device='cuda:3'), covar=tensor([0.1202, 0.0878, 0.1439, 0.4785, 0.1092, 0.1206, 0.1668, 0.1120], + device='cuda:3'), in_proj_covar=tensor([0.0517, 0.0428, 0.0431, 0.0531, 0.0421, 0.0434, 0.0419, 0.0375], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:27:29,848 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:27:45,120 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 00:27:50,757 INFO [train.py:901] (3/4) Epoch 20, batch 2600, loss[loss=0.2378, simple_loss=0.3189, pruned_loss=0.07832, over 8326.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2903, pruned_loss=0.06481, over 1610096.87 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:27:54,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.376e+02 3.118e+02 3.808e+02 9.704e+02, threshold=6.236e+02, percent-clipped=5.0 +2023-02-07 00:28:00,399 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:17,469 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:24,592 INFO [train.py:901] (3/4) Epoch 20, batch 2650, loss[loss=0.178, simple_loss=0.2591, pruned_loss=0.04845, over 8338.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2894, pruned_loss=0.06405, over 1609655.28 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:28:30,661 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:28:49,206 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:00,108 INFO [train.py:901] (3/4) Epoch 20, batch 2700, loss[loss=0.1973, simple_loss=0.2843, pruned_loss=0.0551, over 8471.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2903, pruned_loss=0.06458, over 1609528.57 frames. ], batch size: 25, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:04,083 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.401e+02 3.078e+02 3.829e+02 8.557e+02, threshold=6.156e+02, percent-clipped=4.0 +2023-02-07 00:29:23,254 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:29:35,128 INFO [train.py:901] (3/4) Epoch 20, batch 2750, loss[loss=0.2187, simple_loss=0.3033, pruned_loss=0.06706, over 8515.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2906, pruned_loss=0.06443, over 1616576.50 frames. ], batch size: 28, lr: 3.81e-03, grad_scale: 8.0 +2023-02-07 00:29:38,039 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9900, 1.6253, 1.8019, 1.3587, 1.0116, 1.5171, 1.7884, 1.7623], + device='cuda:3'), covar=tensor([0.0507, 0.1245, 0.1625, 0.1461, 0.0589, 0.1535, 0.0671, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:29:43,525 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156338.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:01,814 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:10,361 INFO [train.py:901] (3/4) Epoch 20, batch 2800, loss[loss=0.2191, simple_loss=0.298, pruned_loss=0.07013, over 8292.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2905, pruned_loss=0.06442, over 1617776.46 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:15,855 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.534e+02 2.983e+02 3.648e+02 6.974e+02, threshold=5.966e+02, percent-clipped=1.0 +2023-02-07 00:30:20,860 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:23,642 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8141, 1.7938, 2.4446, 1.6072, 1.3837, 2.4012, 0.4825, 1.5218], + device='cuda:3'), covar=tensor([0.1554, 0.1260, 0.0339, 0.1244, 0.2727, 0.0404, 0.2332, 0.1358], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0193, 0.0124, 0.0221, 0.0268, 0.0134, 0.0168, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:30:38,827 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:30:46,250 INFO [train.py:901] (3/4) Epoch 20, batch 2850, loss[loss=0.1554, simple_loss=0.2391, pruned_loss=0.03584, over 7553.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.06445, over 1619468.26 frames. ], batch size: 18, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:30:50,403 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156432.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:20,791 INFO [train.py:901] (3/4) Epoch 20, batch 2900, loss[loss=0.1967, simple_loss=0.285, pruned_loss=0.05425, over 8368.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2911, pruned_loss=0.06445, over 1619990.58 frames. ], batch size: 24, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:31:26,323 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.692e+02 2.409e+02 2.783e+02 3.401e+02 8.568e+02, threshold=5.566e+02, percent-clipped=1.0 +2023-02-07 00:31:50,390 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:31:53,705 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 00:31:57,132 INFO [train.py:901] (3/4) Epoch 20, batch 2950, loss[loss=0.2675, simple_loss=0.3424, pruned_loss=0.09631, over 8586.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2917, pruned_loss=0.06489, over 1616955.36 frames. ], batch size: 31, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:08,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156542.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:11,752 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6990, 1.3449, 1.5073, 1.2558, 0.9342, 1.3273, 1.4783, 1.3448], + device='cuda:3'), covar=tensor([0.0588, 0.1243, 0.1682, 0.1443, 0.0640, 0.1511, 0.0742, 0.0681], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0112, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:32:11,760 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:25,906 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0666, 1.6080, 1.3096, 1.5607, 1.3810, 1.2094, 1.2475, 1.3766], + device='cuda:3'), covar=tensor([0.1095, 0.0444, 0.1272, 0.0577, 0.0702, 0.1471, 0.0942, 0.0798], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0237, 0.0331, 0.0307, 0.0299, 0.0335, 0.0344, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:32:31,023 INFO [train.py:901] (3/4) Epoch 20, batch 3000, loss[loss=0.2126, simple_loss=0.2991, pruned_loss=0.06303, over 8463.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2913, pruned_loss=0.06476, over 1612132.87 frames. ], batch size: 29, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:32:31,024 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 00:32:43,174 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7724, 1.5753, 2.7012, 1.4726, 2.1976, 2.8561, 3.0427, 2.4964], + device='cuda:3'), covar=tensor([0.1179, 0.1687, 0.0402, 0.2144, 0.0932, 0.0325, 0.0551, 0.0556], + device='cuda:3'), in_proj_covar=tensor([0.0292, 0.0323, 0.0287, 0.0315, 0.0305, 0.0262, 0.0410, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:32:46,833 INFO [train.py:935] (3/4) Epoch 20, validation: loss=0.1756, simple_loss=0.2756, pruned_loss=0.03779, over 944034.00 frames. +2023-02-07 00:32:46,834 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 00:32:48,394 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:32:48,562 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5466, 1.9525, 3.0746, 1.4183, 2.2971, 2.0697, 1.7626, 2.2232], + device='cuda:3'), covar=tensor([0.1831, 0.2590, 0.0844, 0.4434, 0.1880, 0.3113, 0.2168, 0.2316], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0595, 0.0558, 0.0638, 0.0647, 0.0598, 0.0531, 0.0635], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:32:51,796 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.571e+02 2.420e+02 3.007e+02 3.801e+02 6.408e+02, threshold=6.014e+02, percent-clipped=4.0 +2023-02-07 00:33:22,164 INFO [train.py:901] (3/4) Epoch 20, batch 3050, loss[loss=0.2406, simple_loss=0.3141, pruned_loss=0.08349, over 7249.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2909, pruned_loss=0.06463, over 1606973.75 frames. ], batch size: 16, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:33:40,595 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=156652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:33:57,463 INFO [train.py:901] (3/4) Epoch 20, batch 3100, loss[loss=0.2324, simple_loss=0.3181, pruned_loss=0.07342, over 8505.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2909, pruned_loss=0.0646, over 1607669.49 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:34:02,290 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.428e+02 2.992e+02 3.732e+02 8.006e+02, threshold=5.985e+02, percent-clipped=5.0 +2023-02-07 00:34:09,238 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:34:31,696 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 00:34:31,974 INFO [train.py:901] (3/4) Epoch 20, batch 3150, loss[loss=0.2411, simple_loss=0.3181, pruned_loss=0.08204, over 8342.00 frames. ], tot_loss[loss=0.2117, simple_loss=0.2925, pruned_loss=0.06546, over 1614405.16 frames. ], batch size: 26, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:01,272 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:01,309 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=156767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:07,244 INFO [train.py:901] (3/4) Epoch 20, batch 3200, loss[loss=0.2257, simple_loss=0.3208, pruned_loss=0.06534, over 8320.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2922, pruned_loss=0.06456, over 1612810.97 frames. ], batch size: 25, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:11,880 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.830e+02 2.338e+02 2.875e+02 3.612e+02 1.133e+03, threshold=5.749e+02, percent-clipped=4.0 +2023-02-07 00:35:24,621 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-07 00:35:26,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:35:41,814 INFO [train.py:901] (3/4) Epoch 20, batch 3250, loss[loss=0.2377, simple_loss=0.3199, pruned_loss=0.07773, over 8460.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2908, pruned_loss=0.06402, over 1612999.40 frames. ], batch size: 29, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:35:43,290 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:17,056 INFO [train.py:901] (3/4) Epoch 20, batch 3300, loss[loss=0.209, simple_loss=0.2803, pruned_loss=0.06883, over 7657.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2912, pruned_loss=0.06388, over 1615427.55 frames. ], batch size: 19, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:36:21,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.341e+02 2.967e+02 3.887e+02 7.432e+02, threshold=5.934e+02, percent-clipped=7.0 +2023-02-07 00:36:32,803 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0921, 1.5702, 1.7268, 1.4091, 0.9467, 1.5121, 1.8032, 1.5386], + device='cuda:3'), covar=tensor([0.0483, 0.1243, 0.1649, 0.1418, 0.0613, 0.1470, 0.0675, 0.0629], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:36:35,550 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=156903.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:36:51,529 INFO [train.py:901] (3/4) Epoch 20, batch 3350, loss[loss=0.2061, simple_loss=0.292, pruned_loss=0.06012, over 8181.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2915, pruned_loss=0.06405, over 1612454.97 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:05,904 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 00:37:07,172 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=156949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:25,819 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=156974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:37:27,098 INFO [train.py:901] (3/4) Epoch 20, batch 3400, loss[loss=0.1954, simple_loss=0.2877, pruned_loss=0.05152, over 8290.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.292, pruned_loss=0.06447, over 1609640.52 frames. ], batch size: 23, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:37:31,899 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.508e+02 3.011e+02 3.882e+02 8.239e+02, threshold=6.022e+02, percent-clipped=6.0 +2023-02-07 00:37:49,723 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2447, 1.2982, 1.6492, 1.1829, 0.7287, 1.3727, 1.2482, 1.1455], + device='cuda:3'), covar=tensor([0.0596, 0.1320, 0.1699, 0.1485, 0.0558, 0.1511, 0.0690, 0.0665], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0158, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:37:59,125 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9489, 1.4965, 1.7376, 1.3212, 0.9098, 1.5065, 1.7388, 1.4381], + device='cuda:3'), covar=tensor([0.0523, 0.1222, 0.1658, 0.1446, 0.0610, 0.1445, 0.0672, 0.0645], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0161, 0.0112, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:38:01,290 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:03,918 INFO [train.py:901] (3/4) Epoch 20, batch 3450, loss[loss=0.1738, simple_loss=0.2508, pruned_loss=0.04837, over 7790.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2905, pruned_loss=0.06394, over 1609947.24 frames. ], batch size: 19, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:05,354 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:11,405 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6557, 1.6563, 2.3460, 1.5952, 1.2321, 2.2521, 0.3963, 1.3237], + device='cuda:3'), covar=tensor([0.1967, 0.1326, 0.0337, 0.1216, 0.2932, 0.0441, 0.2239, 0.1363], + device='cuda:3'), in_proj_covar=tensor([0.0184, 0.0191, 0.0123, 0.0216, 0.0265, 0.0132, 0.0165, 0.0186], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:38:18,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:38:38,200 INFO [train.py:901] (3/4) Epoch 20, batch 3500, loss[loss=0.207, simple_loss=0.3021, pruned_loss=0.05592, over 8250.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2915, pruned_loss=0.06458, over 1608178.26 frames. ], batch size: 24, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:38:43,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.548e+02 3.004e+02 3.939e+02 7.448e+02, threshold=6.007e+02, percent-clipped=9.0 +2023-02-07 00:39:02,219 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 00:39:03,072 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:13,060 INFO [train.py:901] (3/4) Epoch 20, batch 3550, loss[loss=0.2454, simple_loss=0.3247, pruned_loss=0.08306, over 8547.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2922, pruned_loss=0.06457, over 1613904.56 frames. ], batch size: 49, lr: 3.80e-03, grad_scale: 8.0 +2023-02-07 00:39:37,686 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157160.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:39:48,298 INFO [train.py:901] (3/4) Epoch 20, batch 3600, loss[loss=0.2453, simple_loss=0.3197, pruned_loss=0.0854, over 8195.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.292, pruned_loss=0.06429, over 1619354.85 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:39:53,036 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.446e+02 2.923e+02 3.668e+02 9.434e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 00:40:10,601 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 00:40:15,595 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1907, 1.3203, 1.5385, 1.2874, 0.7802, 1.3400, 1.2250, 1.0758], + device='cuda:3'), covar=tensor([0.0566, 0.1314, 0.1660, 0.1405, 0.0540, 0.1519, 0.0705, 0.0677], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0100, 0.0161, 0.0112, 0.0141], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:40:24,180 INFO [train.py:901] (3/4) Epoch 20, batch 3650, loss[loss=0.2597, simple_loss=0.3476, pruned_loss=0.08594, over 8257.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.293, pruned_loss=0.065, over 1621699.37 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:40:24,362 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:38,564 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:40:42,194 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 00:40:58,619 INFO [train.py:901] (3/4) Epoch 20, batch 3700, loss[loss=0.2159, simple_loss=0.2986, pruned_loss=0.06663, over 8143.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2918, pruned_loss=0.06493, over 1610632.10 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:03,188 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.545e+02 3.038e+02 3.849e+02 9.039e+02, threshold=6.076e+02, percent-clipped=6.0 +2023-02-07 00:41:05,242 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 00:41:33,573 INFO [train.py:901] (3/4) Epoch 20, batch 3750, loss[loss=0.202, simple_loss=0.2903, pruned_loss=0.05683, over 8256.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2913, pruned_loss=0.06448, over 1611541.04 frames. ], batch size: 24, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:41:58,956 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157362.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:05,447 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:06,809 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:07,964 INFO [train.py:901] (3/4) Epoch 20, batch 3800, loss[loss=0.2442, simple_loss=0.3294, pruned_loss=0.07953, over 8186.00 frames. ], tot_loss[loss=0.21, simple_loss=0.291, pruned_loss=0.0645, over 1609880.54 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:42:12,517 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.302e+02 2.981e+02 3.884e+02 7.104e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 00:42:25,022 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:42:29,223 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.90 vs. limit=5.0 +2023-02-07 00:42:39,605 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157422.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:42:42,787 INFO [train.py:901] (3/4) Epoch 20, batch 3850, loss[loss=0.2284, simple_loss=0.3108, pruned_loss=0.07306, over 8546.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.291, pruned_loss=0.06494, over 1609511.42 frames. ], batch size: 34, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:05,834 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1790, 3.0395, 2.8570, 1.5678, 2.7983, 2.9365, 2.7447, 2.7424], + device='cuda:3'), covar=tensor([0.1115, 0.0866, 0.1368, 0.4432, 0.1103, 0.1276, 0.1699, 0.1021], + device='cuda:3'), in_proj_covar=tensor([0.0511, 0.0421, 0.0426, 0.0526, 0.0415, 0.0428, 0.0413, 0.0371], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:43:09,760 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 00:43:17,668 INFO [train.py:901] (3/4) Epoch 20, batch 3900, loss[loss=0.2085, simple_loss=0.296, pruned_loss=0.06045, over 8199.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2917, pruned_loss=0.0654, over 1614012.37 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:43:21,792 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157482.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:22,210 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.513e+02 3.153e+02 3.900e+02 7.255e+02, threshold=6.305e+02, percent-clipped=5.0 +2023-02-07 00:43:24,996 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:37,148 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:39,410 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:43:43,558 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2229, 1.1142, 3.3580, 1.0407, 2.9400, 2.8141, 3.0934, 2.9717], + device='cuda:3'), covar=tensor([0.0808, 0.4651, 0.0900, 0.4512, 0.1480, 0.1195, 0.0824, 0.0940], + device='cuda:3'), in_proj_covar=tensor([0.0612, 0.0635, 0.0687, 0.0616, 0.0697, 0.0603, 0.0599, 0.0665], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:43:51,268 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157524.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:43:52,538 INFO [train.py:901] (3/4) Epoch 20, batch 3950, loss[loss=0.1727, simple_loss=0.2519, pruned_loss=0.04675, over 7792.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2914, pruned_loss=0.06546, over 1609085.45 frames. ], batch size: 19, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:28,451 INFO [train.py:901] (3/4) Epoch 20, batch 4000, loss[loss=0.2244, simple_loss=0.3019, pruned_loss=0.07342, over 8291.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2911, pruned_loss=0.06472, over 1609460.75 frames. ], batch size: 49, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:44:33,891 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.914e+02 2.441e+02 3.259e+02 3.960e+02 7.383e+02, threshold=6.518e+02, percent-clipped=3.0 +2023-02-07 00:44:36,132 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:57,788 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157618.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:44:58,434 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:03,493 INFO [train.py:901] (3/4) Epoch 20, batch 4050, loss[loss=0.186, simple_loss=0.2752, pruned_loss=0.04844, over 7970.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2922, pruned_loss=0.06536, over 1611348.92 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:15,102 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:45:38,035 INFO [train.py:901] (3/4) Epoch 20, batch 4100, loss[loss=0.2032, simple_loss=0.2883, pruned_loss=0.05906, over 8076.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2926, pruned_loss=0.06513, over 1616362.25 frames. ], batch size: 21, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:45:42,588 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.673e+02 2.468e+02 3.178e+02 4.268e+02 8.149e+02, threshold=6.355e+02, percent-clipped=4.0 +2023-02-07 00:46:07,478 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157718.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:12,790 INFO [train.py:901] (3/4) Epoch 20, batch 4150, loss[loss=0.2125, simple_loss=0.3046, pruned_loss=0.06014, over 8319.00 frames. ], tot_loss[loss=0.2115, simple_loss=0.2928, pruned_loss=0.06506, over 1619811.31 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:25,108 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157743.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:25,658 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:40,586 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157766.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:46:41,946 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:46:45,980 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1163, 1.8762, 2.5280, 2.0870, 2.4467, 2.1887, 1.8969, 1.2868], + device='cuda:3'), covar=tensor([0.5228, 0.4801, 0.1786, 0.3284, 0.2350, 0.2933, 0.1926, 0.4992], + device='cuda:3'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0931, 0.0984, 0.0882, 0.0737, 0.0815], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 00:46:46,942 INFO [train.py:901] (3/4) Epoch 20, batch 4200, loss[loss=0.2576, simple_loss=0.3293, pruned_loss=0.09294, over 8470.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2921, pruned_loss=0.06475, over 1616044.84 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:46:52,350 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.385e+02 2.811e+02 3.577e+02 7.269e+02, threshold=5.621e+02, percent-clipped=2.0 +2023-02-07 00:47:08,774 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 00:47:10,323 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5561, 1.3610, 4.7029, 1.8389, 4.1433, 3.8901, 4.2643, 4.1023], + device='cuda:3'), covar=tensor([0.0579, 0.5293, 0.0521, 0.3960, 0.1099, 0.0973, 0.0625, 0.0690], + device='cuda:3'), in_proj_covar=tensor([0.0619, 0.0640, 0.0694, 0.0622, 0.0703, 0.0610, 0.0605, 0.0670], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 00:47:23,327 INFO [train.py:901] (3/4) Epoch 20, batch 4250, loss[loss=0.1922, simple_loss=0.2815, pruned_loss=0.05145, over 8548.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2912, pruned_loss=0.06402, over 1618362.59 frames. ], batch size: 31, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:47:26,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8085, 1.3568, 1.6003, 1.2590, 0.8786, 1.3764, 1.6529, 1.5484], + device='cuda:3'), covar=tensor([0.0560, 0.1299, 0.1790, 0.1532, 0.0616, 0.1535, 0.0714, 0.0649], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0159, 0.0100, 0.0162, 0.0113, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 00:47:28,293 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:32,306 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 00:47:46,380 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157859.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:53,374 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157868.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:47:55,481 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=157871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,177 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=157875.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:47:58,647 INFO [train.py:901] (3/4) Epoch 20, batch 4300, loss[loss=0.2211, simple_loss=0.2913, pruned_loss=0.07542, over 8282.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2904, pruned_loss=0.06398, over 1613494.11 frames. ], batch size: 23, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:02,059 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157881.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:48:03,194 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.270e+02 2.745e+02 3.400e+02 8.203e+02, threshold=5.491e+02, percent-clipped=7.0 +2023-02-07 00:48:15,326 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=157900.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:17,997 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1262, 1.9398, 2.8973, 1.8821, 2.5056, 3.1667, 3.1194, 2.8964], + device='cuda:3'), covar=tensor([0.0888, 0.1349, 0.0580, 0.1551, 0.1299, 0.0237, 0.0697, 0.0412], + device='cuda:3'), in_proj_covar=tensor([0.0292, 0.0318, 0.0285, 0.0311, 0.0300, 0.0262, 0.0407, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 00:48:24,971 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 00:48:33,499 INFO [train.py:901] (3/4) Epoch 20, batch 4350, loss[loss=0.2174, simple_loss=0.2986, pruned_loss=0.0681, over 8329.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2904, pruned_loss=0.06434, over 1610990.38 frames. ], batch size: 25, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:48:36,292 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=157930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:48:48,941 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-07 00:49:04,096 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 00:49:08,270 INFO [train.py:901] (3/4) Epoch 20, batch 4400, loss[loss=0.1821, simple_loss=0.2633, pruned_loss=0.05039, over 8132.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2903, pruned_loss=0.06416, over 1615787.47 frames. ], batch size: 22, lr: 3.79e-03, grad_scale: 8.0 +2023-02-07 00:49:13,805 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.443e+02 2.894e+02 3.714e+02 1.238e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 00:49:14,000 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=157983.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 00:49:44,325 INFO [train.py:901] (3/4) Epoch 20, batch 4450, loss[loss=0.2091, simple_loss=0.2808, pruned_loss=0.06866, over 7912.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2909, pruned_loss=0.06453, over 1616119.83 frames. ], batch size: 20, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:49:45,693 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 00:49:57,299 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:18,894 INFO [train.py:901] (3/4) Epoch 20, batch 4500, loss[loss=0.3174, simple_loss=0.3671, pruned_loss=0.1339, over 6626.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2912, pruned_loss=0.06464, over 1616918.59 frames. ], batch size: 72, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:50:23,590 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.382e+02 2.908e+02 3.384e+02 7.082e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-07 00:50:27,967 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:39,145 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 00:50:45,180 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158114.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:45,855 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158115.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:50:53,563 INFO [train.py:901] (3/4) Epoch 20, batch 4550, loss[loss=0.1717, simple_loss=0.259, pruned_loss=0.04227, over 7930.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2911, pruned_loss=0.0648, over 1614350.26 frames. ], batch size: 20, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:01,051 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158137.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:51:02,913 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:02,963 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158140.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:51:18,409 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158162.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:51:28,236 INFO [train.py:901] (3/4) Epoch 20, batch 4600, loss[loss=0.214, simple_loss=0.2977, pruned_loss=0.06513, over 8555.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2912, pruned_loss=0.06464, over 1614902.78 frames. ], batch size: 31, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:51:32,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.472e+02 2.506e+02 3.217e+02 3.763e+02 8.986e+02, threshold=6.435e+02, percent-clipped=3.0 +2023-02-07 00:51:54,921 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:52:01,862 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4623, 1.4911, 1.8236, 1.2713, 1.1478, 1.8160, 0.1664, 1.1738], + device='cuda:3'), covar=tensor([0.1653, 0.1301, 0.0356, 0.1086, 0.2979, 0.0460, 0.2207, 0.1273], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0195, 0.0124, 0.0219, 0.0268, 0.0134, 0.0167, 0.0189], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:52:03,088 INFO [train.py:901] (3/4) Epoch 20, batch 4650, loss[loss=0.1901, simple_loss=0.2783, pruned_loss=0.05096, over 8334.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2898, pruned_loss=0.06377, over 1610278.27 frames. ], batch size: 26, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:12,064 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158239.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:52:30,111 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158264.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:52:37,869 INFO [train.py:901] (3/4) Epoch 20, batch 4700, loss[loss=0.2405, simple_loss=0.3142, pruned_loss=0.0834, over 8110.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2902, pruned_loss=0.06446, over 1613344.51 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:52:42,603 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.408e+02 3.012e+02 4.119e+02 1.091e+03, threshold=6.025e+02, percent-clipped=3.0 +2023-02-07 00:52:55,812 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:12,689 INFO [train.py:901] (3/4) Epoch 20, batch 4750, loss[loss=0.1748, simple_loss=0.2577, pruned_loss=0.04595, over 8105.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2895, pruned_loss=0.06381, over 1611602.06 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 8.0 +2023-02-07 00:53:12,915 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:15,564 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:53:40,915 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 00:53:43,656 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 00:53:48,267 INFO [train.py:901] (3/4) Epoch 20, batch 4800, loss[loss=0.1974, simple_loss=0.2686, pruned_loss=0.06316, over 7659.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2902, pruned_loss=0.06409, over 1613103.97 frames. ], batch size: 19, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:53:52,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.386e+02 2.729e+02 3.445e+02 7.258e+02, threshold=5.458e+02, percent-clipped=2.0 +2023-02-07 00:54:06,935 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:54:22,904 INFO [train.py:901] (3/4) Epoch 20, batch 4850, loss[loss=0.2045, simple_loss=0.2991, pruned_loss=0.05493, over 8361.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2913, pruned_loss=0.06446, over 1611225.42 frames. ], batch size: 24, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:54:23,848 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9798, 1.7012, 2.0451, 1.7808, 1.9490, 2.0244, 1.8039, 0.7428], + device='cuda:3'), covar=tensor([0.5151, 0.4448, 0.1810, 0.3343, 0.2358, 0.2655, 0.1722, 0.4795], + device='cuda:3'), in_proj_covar=tensor([0.0934, 0.0966, 0.0788, 0.0933, 0.0990, 0.0882, 0.0740, 0.0820], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 00:54:33,540 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 00:54:35,079 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5113, 1.6842, 4.5182, 1.8792, 2.5420, 5.0670, 5.1462, 4.3605], + device='cuda:3'), covar=tensor([0.1120, 0.1896, 0.0239, 0.2023, 0.1124, 0.0189, 0.0495, 0.0586], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0320, 0.0287, 0.0313, 0.0303, 0.0263, 0.0410, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 00:54:51,919 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0850, 1.6846, 3.2848, 1.4883, 2.2847, 3.5651, 3.7064, 3.0124], + device='cuda:3'), covar=tensor([0.1145, 0.1751, 0.0416, 0.2192, 0.1141, 0.0261, 0.0619, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0319, 0.0286, 0.0312, 0.0303, 0.0262, 0.0409, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 00:54:57,240 INFO [train.py:901] (3/4) Epoch 20, batch 4900, loss[loss=0.2256, simple_loss=0.3042, pruned_loss=0.07346, over 8298.00 frames. ], tot_loss[loss=0.2116, simple_loss=0.2924, pruned_loss=0.06539, over 1615735.30 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:55:02,468 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.481e+02 3.123e+02 4.208e+02 8.958e+02, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 00:55:03,227 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:55:32,883 INFO [train.py:901] (3/4) Epoch 20, batch 4950, loss[loss=0.2199, simple_loss=0.3019, pruned_loss=0.06899, over 8208.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2918, pruned_loss=0.06496, over 1613264.68 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:07,753 INFO [train.py:901] (3/4) Epoch 20, batch 5000, loss[loss=0.2094, simple_loss=0.2904, pruned_loss=0.06416, over 8024.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2913, pruned_loss=0.06491, over 1612527.14 frames. ], batch size: 22, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:12,217 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.361e+02 2.881e+02 3.667e+02 7.563e+02, threshold=5.761e+02, percent-clipped=2.0 +2023-02-07 00:56:12,594 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.14 vs. limit=5.0 +2023-02-07 00:56:14,507 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:23,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:32,813 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:56:42,908 INFO [train.py:901] (3/4) Epoch 20, batch 5050, loss[loss=0.1956, simple_loss=0.2688, pruned_loss=0.06117, over 7954.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.292, pruned_loss=0.06532, over 1612590.42 frames. ], batch size: 21, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:56:54,150 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1521, 1.9316, 2.5161, 1.6399, 1.5107, 2.4995, 1.1737, 2.0180], + device='cuda:3'), covar=tensor([0.2023, 0.1460, 0.0416, 0.1553, 0.2630, 0.0431, 0.2071, 0.1289], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0197, 0.0127, 0.0222, 0.0273, 0.0135, 0.0171, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:57:10,225 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 00:57:17,762 INFO [train.py:901] (3/4) Epoch 20, batch 5100, loss[loss=0.1931, simple_loss=0.2721, pruned_loss=0.05707, over 7806.00 frames. ], tot_loss[loss=0.2108, simple_loss=0.2914, pruned_loss=0.06513, over 1604788.31 frames. ], batch size: 20, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:57:23,331 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.884e+02 2.670e+02 3.233e+02 3.910e+02 8.185e+02, threshold=6.466e+02, percent-clipped=7.0 +2023-02-07 00:57:23,581 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1761, 1.0210, 1.2324, 1.0791, 0.9017, 1.2587, 0.0815, 0.9393], + device='cuda:3'), covar=tensor([0.1557, 0.1609, 0.0564, 0.0813, 0.2969, 0.0614, 0.2219, 0.1411], + device='cuda:3'), in_proj_covar=tensor([0.0186, 0.0195, 0.0125, 0.0219, 0.0270, 0.0134, 0.0168, 0.0190], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 00:57:28,316 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7847, 2.6272, 1.9942, 2.3344, 2.2484, 1.7431, 2.1691, 2.3452], + device='cuda:3'), covar=tensor([0.1501, 0.0394, 0.1034, 0.0643, 0.0707, 0.1429, 0.0975, 0.0979], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0239, 0.0336, 0.0312, 0.0304, 0.0341, 0.0346, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 00:57:53,851 INFO [train.py:901] (3/4) Epoch 20, batch 5150, loss[loss=0.2328, simple_loss=0.3173, pruned_loss=0.07412, over 8486.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2908, pruned_loss=0.06476, over 1608762.02 frames. ], batch size: 28, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:08,321 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=158746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:28,394 INFO [train.py:901] (3/4) Epoch 20, batch 5200, loss[loss=0.1867, simple_loss=0.2516, pruned_loss=0.06091, over 7697.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2896, pruned_loss=0.0643, over 1610493.25 frames. ], batch size: 18, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:58:33,213 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.433e+02 2.837e+02 3.461e+02 7.505e+02, threshold=5.673e+02, percent-clipped=2.0 +2023-02-07 00:58:41,636 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:58:46,634 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:03,969 INFO [train.py:901] (3/4) Epoch 20, batch 5250, loss[loss=0.1952, simple_loss=0.2813, pruned_loss=0.05459, over 8280.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2897, pruned_loss=0.06434, over 1613770.89 frames. ], batch size: 23, lr: 3.78e-03, grad_scale: 16.0 +2023-02-07 00:59:11,274 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 00:59:22,876 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=158853.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 00:59:24,328 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=158855.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:28,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=158861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:38,579 INFO [train.py:901] (3/4) Epoch 20, batch 5300, loss[loss=0.2043, simple_loss=0.2794, pruned_loss=0.06464, over 8086.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2892, pruned_loss=0.06393, over 1613877.76 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 00:59:41,443 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=158880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 00:59:43,352 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.336e+02 2.792e+02 3.296e+02 7.091e+02, threshold=5.585e+02, percent-clipped=2.0 +2023-02-07 01:00:13,211 INFO [train.py:901] (3/4) Epoch 20, batch 5350, loss[loss=0.168, simple_loss=0.2567, pruned_loss=0.03968, over 7802.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2878, pruned_loss=0.06336, over 1612988.05 frames. ], batch size: 20, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:27,517 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7090, 1.9772, 2.0225, 1.4617, 2.1574, 1.4425, 0.5529, 1.8156], + device='cuda:3'), covar=tensor([0.0516, 0.0331, 0.0323, 0.0464, 0.0356, 0.0869, 0.0821, 0.0265], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0388, 0.0339, 0.0441, 0.0370, 0.0533, 0.0395, 0.0415], + device='cuda:3'), out_proj_covar=tensor([1.2208e-04, 1.0214e-04, 8.9355e-05, 1.1664e-04, 9.7860e-05, 1.5158e-04, + 1.0671e-04, 1.1064e-04], device='cuda:3') +2023-02-07 01:00:48,001 INFO [train.py:901] (3/4) Epoch 20, batch 5400, loss[loss=0.1857, simple_loss=0.2676, pruned_loss=0.05196, over 8084.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2877, pruned_loss=0.0635, over 1608445.01 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:00:52,647 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.394e+02 2.966e+02 3.887e+02 6.953e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 01:01:09,403 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7914, 1.4513, 4.1242, 1.8916, 3.2393, 3.2417, 3.7197, 3.6344], + device='cuda:3'), covar=tensor([0.1422, 0.6595, 0.1114, 0.4678, 0.2378, 0.1772, 0.1153, 0.1157], + device='cuda:3'), in_proj_covar=tensor([0.0609, 0.0629, 0.0674, 0.0610, 0.0689, 0.0595, 0.0595, 0.0663], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:01:22,917 INFO [train.py:901] (3/4) Epoch 20, batch 5450, loss[loss=0.1822, simple_loss=0.2599, pruned_loss=0.05227, over 8235.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2877, pruned_loss=0.06328, over 1608796.19 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:01:34,635 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 01:01:57,423 INFO [train.py:901] (3/4) Epoch 20, batch 5500, loss[loss=0.194, simple_loss=0.2717, pruned_loss=0.05818, over 7967.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2887, pruned_loss=0.06357, over 1608928.03 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:00,064 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 01:02:02,832 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.298e+02 2.656e+02 3.222e+02 6.486e+02, threshold=5.312e+02, percent-clipped=1.0 +2023-02-07 01:02:05,795 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159087.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:27,308 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:31,195 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7198, 2.0110, 2.1424, 1.3616, 2.2204, 1.5909, 0.6841, 1.8299], + device='cuda:3'), covar=tensor([0.0566, 0.0333, 0.0250, 0.0551, 0.0331, 0.0760, 0.0847, 0.0285], + device='cuda:3'), in_proj_covar=tensor([0.0451, 0.0385, 0.0337, 0.0441, 0.0367, 0.0530, 0.0393, 0.0414], + device='cuda:3'), out_proj_covar=tensor([1.2158e-04, 1.0114e-04, 8.8887e-05, 1.1675e-04, 9.7214e-05, 1.5071e-04, + 1.0623e-04, 1.1032e-04], device='cuda:3') +2023-02-07 01:02:32,989 INFO [train.py:901] (3/4) Epoch 20, batch 5550, loss[loss=0.1942, simple_loss=0.2802, pruned_loss=0.05405, over 8316.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2877, pruned_loss=0.06244, over 1612931.40 frames. ], batch size: 26, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:02:41,928 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159139.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:44,193 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:46,023 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:02:53,805 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 01:03:08,170 INFO [train.py:901] (3/4) Epoch 20, batch 5600, loss[loss=0.2296, simple_loss=0.3129, pruned_loss=0.07317, over 8243.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2884, pruned_loss=0.06277, over 1612501.60 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:03:09,025 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:03:12,919 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.638e+02 2.419e+02 2.780e+02 3.445e+02 7.739e+02, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 01:03:23,293 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159197.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:03:43,999 INFO [train.py:901] (3/4) Epoch 20, batch 5650, loss[loss=0.2096, simple_loss=0.2974, pruned_loss=0.06093, over 8304.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2896, pruned_loss=0.06339, over 1611093.63 frames. ], batch size: 48, lr: 3.77e-03, grad_scale: 16.0 +2023-02-07 01:04:03,412 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:04,621 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 01:04:07,504 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:19,056 INFO [train.py:901] (3/4) Epoch 20, batch 5700, loss[loss=0.2238, simple_loss=0.3054, pruned_loss=0.0711, over 8456.00 frames. ], tot_loss[loss=0.2106, simple_loss=0.2916, pruned_loss=0.06477, over 1612241.80 frames. ], batch size: 25, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:04:25,333 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.576e+02 3.260e+02 4.013e+02 6.441e+02, threshold=6.520e+02, percent-clipped=4.0 +2023-02-07 01:04:42,295 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159308.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:04:45,043 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159312.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:04:49,923 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2740, 1.4182, 1.2491, 1.8256, 0.7150, 1.1469, 1.3129, 1.4090], + device='cuda:3'), covar=tensor([0.1041, 0.0844, 0.1285, 0.0566, 0.1153, 0.1556, 0.0744, 0.0790], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0212, 0.0205, 0.0247, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:04:53,104 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.92 vs. limit=2.0 +2023-02-07 01:04:54,516 INFO [train.py:901] (3/4) Epoch 20, batch 5750, loss[loss=0.2164, simple_loss=0.2977, pruned_loss=0.06752, over 8506.00 frames. ], tot_loss[loss=0.2105, simple_loss=0.2918, pruned_loss=0.06463, over 1615127.41 frames. ], batch size: 28, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:03,867 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 01:05:06,395 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.44 vs. limit=5.0 +2023-02-07 01:05:09,318 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 01:05:29,351 INFO [train.py:901] (3/4) Epoch 20, batch 5800, loss[loss=0.2635, simple_loss=0.3355, pruned_loss=0.09577, over 8515.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2916, pruned_loss=0.06447, over 1610076.81 frames. ], batch size: 28, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:05:35,572 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.438e+02 2.992e+02 3.849e+02 1.447e+03, threshold=5.984e+02, percent-clipped=4.0 +2023-02-07 01:06:04,888 INFO [train.py:901] (3/4) Epoch 20, batch 5850, loss[loss=0.1792, simple_loss=0.2707, pruned_loss=0.04387, over 8290.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2925, pruned_loss=0.06472, over 1613521.75 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:08,469 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:06:39,988 INFO [train.py:901] (3/4) Epoch 20, batch 5900, loss[loss=0.1785, simple_loss=0.2788, pruned_loss=0.03911, over 8299.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2903, pruned_loss=0.06349, over 1611385.29 frames. ], batch size: 23, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:06:45,387 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:06:45,624 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.454e+02 2.951e+02 3.822e+02 7.063e+02, threshold=5.901e+02, percent-clipped=2.0 +2023-02-07 01:07:04,113 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:08,830 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:12,109 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:15,456 INFO [train.py:901] (3/4) Epoch 20, batch 5950, loss[loss=0.255, simple_loss=0.3169, pruned_loss=0.09657, over 6897.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2913, pruned_loss=0.06387, over 1613999.34 frames. ], batch size: 71, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:15,837 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-07 01:07:22,429 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:26,381 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:29,669 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159546.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:07:42,011 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7727, 2.0561, 2.2235, 1.3510, 2.3251, 1.5725, 0.7114, 1.9475], + device='cuda:3'), covar=tensor([0.0675, 0.0350, 0.0291, 0.0625, 0.0405, 0.0858, 0.0896, 0.0327], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0386, 0.0337, 0.0444, 0.0369, 0.0533, 0.0392, 0.0415], + device='cuda:3'), out_proj_covar=tensor([1.2254e-04, 1.0147e-04, 8.8862e-05, 1.1734e-04, 9.7634e-05, 1.5151e-04, + 1.0602e-04, 1.1062e-04], device='cuda:3') +2023-02-07 01:07:45,565 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159568.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:07:50,935 INFO [train.py:901] (3/4) Epoch 20, batch 6000, loss[loss=0.219, simple_loss=0.2866, pruned_loss=0.07571, over 8092.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2916, pruned_loss=0.06433, over 1611527.95 frames. ], batch size: 21, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:07:50,936 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 01:08:04,187 INFO [train.py:935] (3/4) Epoch 20, validation: loss=0.175, simple_loss=0.275, pruned_loss=0.03755, over 944034.00 frames. +2023-02-07 01:08:04,188 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 01:08:09,549 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.653e+02 2.504e+02 2.869e+02 3.482e+02 8.370e+02, threshold=5.739e+02, percent-clipped=5.0 +2023-02-07 01:08:15,897 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159593.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:08:38,922 INFO [train.py:901] (3/4) Epoch 20, batch 6050, loss[loss=0.1711, simple_loss=0.2632, pruned_loss=0.0395, over 8029.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2907, pruned_loss=0.06342, over 1613933.18 frames. ], batch size: 22, lr: 3.77e-03, grad_scale: 8.0 +2023-02-07 01:08:45,955 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:51,467 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3978, 1.4691, 1.3962, 1.8067, 0.7336, 1.2183, 1.2456, 1.4873], + device='cuda:3'), covar=tensor([0.0823, 0.0814, 0.0918, 0.0509, 0.1115, 0.1441, 0.0779, 0.0671], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0204, 0.0245, 0.0248, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:08:55,602 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159649.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:08:57,636 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:09:14,771 INFO [train.py:901] (3/4) Epoch 20, batch 6100, loss[loss=0.2241, simple_loss=0.3225, pruned_loss=0.06289, over 8321.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2918, pruned_loss=0.06443, over 1610201.73 frames. ], batch size: 25, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:09:20,998 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.453e+02 2.842e+02 3.745e+02 1.322e+03, threshold=5.684e+02, percent-clipped=4.0 +2023-02-07 01:09:35,012 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4970, 2.3204, 3.2205, 2.6036, 3.0529, 2.4772, 2.2345, 1.8241], + device='cuda:3'), covar=tensor([0.4819, 0.5039, 0.1835, 0.3382, 0.2340, 0.2962, 0.1757, 0.5284], + device='cuda:3'), in_proj_covar=tensor([0.0937, 0.0971, 0.0795, 0.0937, 0.0987, 0.0884, 0.0741, 0.0820], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:09:41,568 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 01:09:50,003 INFO [train.py:901] (3/4) Epoch 20, batch 6150, loss[loss=0.1814, simple_loss=0.2718, pruned_loss=0.04554, over 8457.00 frames. ], tot_loss[loss=0.2107, simple_loss=0.2925, pruned_loss=0.06442, over 1616904.35 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:18,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=159767.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:10:24,871 INFO [train.py:901] (3/4) Epoch 20, batch 6200, loss[loss=0.2052, simple_loss=0.2915, pruned_loss=0.05938, over 7919.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2916, pruned_loss=0.0639, over 1617185.77 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:10:30,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.429e+02 3.094e+02 3.753e+02 7.329e+02, threshold=6.188e+02, percent-clipped=3.0 +2023-02-07 01:10:31,129 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8423, 1.6766, 2.4408, 1.5460, 1.2641, 2.3390, 0.5263, 1.4287], + device='cuda:3'), covar=tensor([0.1675, 0.1472, 0.0368, 0.1396, 0.3138, 0.0458, 0.2456, 0.1557], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0195, 0.0125, 0.0221, 0.0270, 0.0133, 0.0169, 0.0189], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 01:10:43,484 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159802.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:10:55,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1881, 1.9367, 2.6791, 2.1503, 2.4954, 2.2223, 1.9782, 1.3866], + device='cuda:3'), covar=tensor([0.5637, 0.5021, 0.1856, 0.3720, 0.2595, 0.3017, 0.1996, 0.5489], + device='cuda:3'), in_proj_covar=tensor([0.0934, 0.0969, 0.0794, 0.0934, 0.0988, 0.0882, 0.0739, 0.0820], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:11:00,337 INFO [train.py:901] (3/4) Epoch 20, batch 6250, loss[loss=0.1992, simple_loss=0.2695, pruned_loss=0.06444, over 7227.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2907, pruned_loss=0.06344, over 1617189.81 frames. ], batch size: 16, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:01,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159827.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:32,486 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:11:34,374 INFO [train.py:901] (3/4) Epoch 20, batch 6300, loss[loss=0.1776, simple_loss=0.2606, pruned_loss=0.04736, over 8089.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2911, pruned_loss=0.06379, over 1615824.89 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:11:40,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.354e+02 2.951e+02 3.644e+02 9.166e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 01:11:45,865 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=159892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:00,084 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4528, 2.3262, 1.6734, 2.0722, 1.9840, 1.4067, 1.8940, 1.9700], + device='cuda:3'), covar=tensor([0.1435, 0.0408, 0.1246, 0.0638, 0.0724, 0.1625, 0.0951, 0.0945], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0238, 0.0332, 0.0310, 0.0301, 0.0338, 0.0345, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 01:12:03,495 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=159917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:09,073 INFO [train.py:901] (3/4) Epoch 20, batch 6350, loss[loss=0.2083, simple_loss=0.2813, pruned_loss=0.06763, over 7539.00 frames. ], tot_loss[loss=0.2102, simple_loss=0.2916, pruned_loss=0.06441, over 1616855.91 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:10,588 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=159928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:12:43,479 INFO [train.py:901] (3/4) Epoch 20, batch 6400, loss[loss=0.2429, simple_loss=0.3184, pruned_loss=0.08377, over 7093.00 frames. ], tot_loss[loss=0.2101, simple_loss=0.2918, pruned_loss=0.06425, over 1615964.83 frames. ], batch size: 71, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:12:45,737 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5813, 2.3479, 3.3039, 2.6157, 3.2374, 2.6456, 2.3332, 1.9996], + device='cuda:3'), covar=tensor([0.5236, 0.4911, 0.1919, 0.3544, 0.2312, 0.2778, 0.1776, 0.5171], + device='cuda:3'), in_proj_covar=tensor([0.0939, 0.0976, 0.0800, 0.0939, 0.0993, 0.0886, 0.0744, 0.0824], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:12:48,765 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 2.436e+02 2.995e+02 3.881e+02 8.346e+02, threshold=5.989e+02, percent-clipped=6.0 +2023-02-07 01:12:55,755 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=159993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:16,788 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:18,630 INFO [train.py:901] (3/4) Epoch 20, batch 6450, loss[loss=0.2244, simple_loss=0.3053, pruned_loss=0.07172, over 7524.00 frames. ], tot_loss[loss=0.2099, simple_loss=0.2914, pruned_loss=0.06421, over 1610682.46 frames. ], batch size: 18, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:34,496 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:13:49,644 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 01:13:53,995 INFO [train.py:901] (3/4) Epoch 20, batch 6500, loss[loss=0.2268, simple_loss=0.3116, pruned_loss=0.07101, over 8464.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2914, pruned_loss=0.06433, over 1613759.08 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:13:54,365 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 01:13:55,562 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2763, 2.0354, 1.5470, 1.9357, 1.7188, 1.3227, 1.6108, 1.6712], + device='cuda:3'), covar=tensor([0.1406, 0.0529, 0.1491, 0.0493, 0.0719, 0.1689, 0.0946, 0.0905], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0237, 0.0334, 0.0310, 0.0302, 0.0338, 0.0345, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 01:13:59,464 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.613e+02 3.061e+02 4.120e+02 1.100e+03, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 01:14:16,523 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:14:20,060 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7410, 1.7848, 2.2852, 1.5349, 1.2548, 2.2231, 0.4200, 1.3971], + device='cuda:3'), covar=tensor([0.1961, 0.1181, 0.0380, 0.1218, 0.2842, 0.0436, 0.2339, 0.1347], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0195, 0.0126, 0.0221, 0.0271, 0.0133, 0.0169, 0.0189], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 01:14:24,659 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0363, 2.2239, 1.8593, 2.8301, 1.4002, 1.5671, 1.9867, 2.2142], + device='cuda:3'), covar=tensor([0.0737, 0.0764, 0.0890, 0.0389, 0.1168, 0.1391, 0.0971, 0.0777], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0212, 0.0203, 0.0246, 0.0248, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:14:29,720 INFO [train.py:901] (3/4) Epoch 20, batch 6550, loss[loss=0.2354, simple_loss=0.3227, pruned_loss=0.07407, over 8647.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2911, pruned_loss=0.06449, over 1609996.03 frames. ], batch size: 34, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:14:53,086 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 01:15:05,564 INFO [train.py:901] (3/4) Epoch 20, batch 6600, loss[loss=0.2241, simple_loss=0.3102, pruned_loss=0.06905, over 8502.00 frames. ], tot_loss[loss=0.2095, simple_loss=0.2911, pruned_loss=0.06401, over 1615460.09 frames. ], batch size: 29, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:15:10,796 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.488e+02 3.067e+02 3.982e+02 8.719e+02, threshold=6.134e+02, percent-clipped=3.0 +2023-02-07 01:15:12,115 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 01:15:33,423 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160217.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:15:39,331 INFO [train.py:901] (3/4) Epoch 20, batch 6650, loss[loss=0.217, simple_loss=0.2975, pruned_loss=0.06823, over 8459.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2913, pruned_loss=0.06377, over 1617188.84 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:12,499 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:16:15,124 INFO [train.py:901] (3/4) Epoch 20, batch 6700, loss[loss=0.1903, simple_loss=0.2739, pruned_loss=0.0533, over 8085.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2909, pruned_loss=0.06379, over 1617883.41 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:20,313 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 01:16:20,502 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.299e+02 2.819e+02 3.357e+02 8.975e+02, threshold=5.638e+02, percent-clipped=4.0 +2023-02-07 01:16:48,911 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160325.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:16:49,340 INFO [train.py:901] (3/4) Epoch 20, batch 6750, loss[loss=0.2956, simple_loss=0.3606, pruned_loss=0.1153, over 8491.00 frames. ], tot_loss[loss=0.2104, simple_loss=0.2918, pruned_loss=0.06447, over 1612291.03 frames. ], batch size: 26, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:16:53,604 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160332.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:09,921 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 01:17:16,340 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:22,340 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 01:17:23,988 INFO [train.py:901] (3/4) Epoch 20, batch 6800, loss[loss=0.2077, simple_loss=0.295, pruned_loss=0.06022, over 8032.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2911, pruned_loss=0.0639, over 1613178.50 frames. ], batch size: 22, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:17:28,103 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 01:17:29,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.510e+02 3.096e+02 3.947e+02 9.727e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 01:17:32,259 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:33,656 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160389.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:17:59,210 INFO [train.py:901] (3/4) Epoch 20, batch 6850, loss[loss=0.1855, simple_loss=0.2719, pruned_loss=0.04951, over 7968.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2899, pruned_loss=0.06367, over 1607803.69 frames. ], batch size: 21, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:05,667 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 01:18:18,941 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4148, 2.6893, 2.2661, 3.7807, 1.7159, 2.2513, 2.4446, 2.9410], + device='cuda:3'), covar=tensor([0.0726, 0.0845, 0.0835, 0.0345, 0.1128, 0.1192, 0.1005, 0.0681], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0197, 0.0245, 0.0213, 0.0205, 0.0247, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:18:19,449 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 01:18:34,189 INFO [train.py:901] (3/4) Epoch 20, batch 6900, loss[loss=0.1939, simple_loss=0.2758, pruned_loss=0.056, over 7924.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2893, pruned_loss=0.06317, over 1608288.04 frames. ], batch size: 20, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:18:39,573 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.333e+02 2.912e+02 3.495e+02 9.213e+02, threshold=5.824e+02, percent-clipped=3.0 +2023-02-07 01:19:04,551 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 01:19:08,580 INFO [train.py:901] (3/4) Epoch 20, batch 6950, loss[loss=0.3009, simple_loss=0.3551, pruned_loss=0.1234, over 8475.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2893, pruned_loss=0.0629, over 1613306.01 frames. ], batch size: 27, lr: 3.76e-03, grad_scale: 8.0 +2023-02-07 01:19:12,204 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1683, 1.0430, 1.2888, 0.9986, 0.9417, 1.3321, 0.0496, 0.8824], + device='cuda:3'), covar=tensor([0.1594, 0.1367, 0.0464, 0.0844, 0.2756, 0.0510, 0.2220, 0.1254], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0195, 0.0126, 0.0222, 0.0271, 0.0134, 0.0169, 0.0189], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 01:19:16,603 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 01:19:30,214 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 01:19:31,076 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2496, 2.1253, 1.5735, 1.8192, 1.6724, 1.3683, 1.6236, 1.6217], + device='cuda:3'), covar=tensor([0.1429, 0.0447, 0.1329, 0.0637, 0.0813, 0.1616, 0.0990, 0.0915], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0238, 0.0335, 0.0311, 0.0303, 0.0340, 0.0347, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 01:19:42,958 INFO [train.py:901] (3/4) Epoch 20, batch 7000, loss[loss=0.2164, simple_loss=0.2859, pruned_loss=0.07347, over 7810.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2881, pruned_loss=0.06249, over 1613928.43 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:19:48,348 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.497e+02 2.987e+02 3.377e+02 5.985e+02, threshold=5.974e+02, percent-clipped=1.0 +2023-02-07 01:19:49,239 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3772, 2.1368, 2.7895, 2.2984, 2.7846, 2.3427, 2.1362, 1.5378], + device='cuda:3'), covar=tensor([0.5047, 0.4964, 0.1971, 0.3723, 0.2379, 0.3042, 0.1865, 0.5677], + device='cuda:3'), in_proj_covar=tensor([0.0927, 0.0966, 0.0792, 0.0929, 0.0982, 0.0879, 0.0738, 0.0817], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:19:52,055 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:09,568 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160613.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:18,198 INFO [train.py:901] (3/4) Epoch 20, batch 7050, loss[loss=0.21, simple_loss=0.2794, pruned_loss=0.07033, over 5113.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2874, pruned_loss=0.06247, over 1607737.82 frames. ], batch size: 11, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:24,706 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:20:30,580 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=160643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:48,809 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=160668.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:20:49,342 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=160669.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 01:20:53,995 INFO [train.py:901] (3/4) Epoch 20, batch 7100, loss[loss=0.2, simple_loss=0.2855, pruned_loss=0.05723, over 8087.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2875, pruned_loss=0.06274, over 1601602.75 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:20:59,623 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.520e+02 2.814e+02 3.523e+02 7.232e+02, threshold=5.628e+02, percent-clipped=2.0 +2023-02-07 01:21:02,177 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 01:21:29,432 INFO [train.py:901] (3/4) Epoch 20, batch 7150, loss[loss=0.2079, simple_loss=0.2843, pruned_loss=0.06575, over 7923.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2879, pruned_loss=0.06303, over 1600879.09 frames. ], batch size: 20, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:21:30,763 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5781, 4.6074, 4.1382, 2.1091, 4.0878, 4.1785, 4.1740, 3.9947], + device='cuda:3'), covar=tensor([0.0653, 0.0485, 0.0939, 0.4349, 0.0818, 0.1064, 0.1178, 0.0975], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0432, 0.0435, 0.0536, 0.0424, 0.0440, 0.0423, 0.0381], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:21:49,542 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 01:22:04,615 INFO [train.py:901] (3/4) Epoch 20, batch 7200, loss[loss=0.1924, simple_loss=0.2639, pruned_loss=0.06049, over 7804.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2895, pruned_loss=0.06388, over 1606682.71 frames. ], batch size: 19, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:22:09,775 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.432e+02 3.066e+02 3.972e+02 8.502e+02, threshold=6.132e+02, percent-clipped=3.0 +2023-02-07 01:22:09,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=160784.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:22:18,950 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 01:22:22,742 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0595, 1.6378, 1.3954, 1.5661, 1.3161, 1.2293, 1.3741, 1.2863], + device='cuda:3'), covar=tensor([0.0998, 0.0442, 0.1285, 0.0477, 0.0741, 0.1516, 0.0774, 0.0755], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0235, 0.0332, 0.0306, 0.0301, 0.0336, 0.0344, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 01:22:39,217 INFO [train.py:901] (3/4) Epoch 20, batch 7250, loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.05871, over 8128.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2889, pruned_loss=0.0639, over 1605378.43 frames. ], batch size: 22, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:13,935 INFO [train.py:901] (3/4) Epoch 20, batch 7300, loss[loss=0.1817, simple_loss=0.2642, pruned_loss=0.04964, over 8075.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2896, pruned_loss=0.06369, over 1607464.61 frames. ], batch size: 21, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:19,310 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.519e+02 2.885e+02 3.982e+02 8.183e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 01:23:28,337 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4664, 1.8355, 1.8923, 1.1722, 1.9826, 1.4182, 0.4108, 1.7493], + device='cuda:3'), covar=tensor([0.0556, 0.0338, 0.0295, 0.0556, 0.0358, 0.0891, 0.0872, 0.0264], + device='cuda:3'), in_proj_covar=tensor([0.0450, 0.0384, 0.0339, 0.0440, 0.0369, 0.0534, 0.0392, 0.0412], + device='cuda:3'), out_proj_covar=tensor([1.2107e-04, 1.0064e-04, 8.9304e-05, 1.1617e-04, 9.7327e-05, 1.5183e-04, + 1.0604e-04, 1.0947e-04], device='cuda:3') +2023-02-07 01:23:44,248 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160919.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:23:48,778 INFO [train.py:901] (3/4) Epoch 20, batch 7350, loss[loss=0.23, simple_loss=0.313, pruned_loss=0.07347, over 8611.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2887, pruned_loss=0.0631, over 1606002.62 frames. ], batch size: 34, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:23:51,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8358, 1.3972, 3.9957, 1.4551, 3.4797, 3.3004, 3.6361, 3.4871], + device='cuda:3'), covar=tensor([0.0610, 0.4677, 0.0558, 0.4132, 0.1181, 0.1024, 0.0623, 0.0745], + device='cuda:3'), in_proj_covar=tensor([0.0612, 0.0630, 0.0679, 0.0615, 0.0696, 0.0594, 0.0595, 0.0665], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:24:16,151 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 01:24:20,003 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 01:24:24,317 INFO [train.py:901] (3/4) Epoch 20, batch 7400, loss[loss=0.1921, simple_loss=0.2621, pruned_loss=0.06102, over 6355.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2878, pruned_loss=0.06219, over 1606324.42 frames. ], batch size: 14, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:24:29,887 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7141, 5.8760, 5.1575, 2.4566, 5.2466, 5.5449, 5.3966, 5.3247], + device='cuda:3'), covar=tensor([0.0524, 0.0421, 0.0968, 0.4388, 0.0677, 0.0747, 0.1170, 0.0633], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0432, 0.0433, 0.0536, 0.0427, 0.0438, 0.0421, 0.0380], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:24:29,932 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=160983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:24:30,419 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.344e+02 3.002e+02 3.673e+02 6.079e+02, threshold=6.004e+02, percent-clipped=1.0 +2023-02-07 01:24:37,303 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 01:24:59,976 INFO [train.py:901] (3/4) Epoch 20, batch 7450, loss[loss=0.2, simple_loss=0.2852, pruned_loss=0.05745, over 8461.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2868, pruned_loss=0.06112, over 1608752.71 frames. ], batch size: 25, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:10,081 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161040.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:10,606 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7739, 1.6177, 2.8946, 1.3397, 2.1906, 3.0555, 3.2397, 2.5702], + device='cuda:3'), covar=tensor([0.1175, 0.1542, 0.0373, 0.2196, 0.0906, 0.0338, 0.0693, 0.0619], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0319, 0.0284, 0.0313, 0.0303, 0.0261, 0.0410, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 01:25:16,129 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 01:25:28,627 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161065.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 01:25:35,936 INFO [train.py:901] (3/4) Epoch 20, batch 7500, loss[loss=0.2437, simple_loss=0.3177, pruned_loss=0.08487, over 7154.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06198, over 1606682.97 frames. ], batch size: 72, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:25:41,428 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.792e+02 2.441e+02 3.010e+02 3.756e+02 8.900e+02, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:26:11,145 INFO [train.py:901] (3/4) Epoch 20, batch 7550, loss[loss=0.2251, simple_loss=0.294, pruned_loss=0.07813, over 7426.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2883, pruned_loss=0.06234, over 1609891.26 frames. ], batch size: 17, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:23,187 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:26:46,339 INFO [train.py:901] (3/4) Epoch 20, batch 7600, loss[loss=0.1977, simple_loss=0.2811, pruned_loss=0.05719, over 8107.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.289, pruned_loss=0.06265, over 1608808.85 frames. ], batch size: 23, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:26:51,651 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.460e+02 3.037e+02 4.113e+02 9.859e+02, threshold=6.074e+02, percent-clipped=9.0 +2023-02-07 01:26:57,672 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5348, 2.1618, 3.1496, 1.6927, 1.6050, 3.0425, 0.8274, 2.0432], + device='cuda:3'), covar=tensor([0.1672, 0.1256, 0.0317, 0.1873, 0.2918, 0.0611, 0.2334, 0.1505], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0194, 0.0125, 0.0220, 0.0269, 0.0134, 0.0168, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 01:27:20,295 INFO [train.py:901] (3/4) Epoch 20, batch 7650, loss[loss=0.1647, simple_loss=0.2499, pruned_loss=0.03974, over 7697.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.289, pruned_loss=0.06266, over 1609526.01 frames. ], batch size: 18, lr: 3.75e-03, grad_scale: 8.0 +2023-02-07 01:27:25,754 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:36,306 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:45,609 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:27:54,270 INFO [train.py:901] (3/4) Epoch 20, batch 7700, loss[loss=0.1871, simple_loss=0.2726, pruned_loss=0.05075, over 8333.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2909, pruned_loss=0.0638, over 1612187.20 frames. ], batch size: 26, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:27:59,457 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.411e+02 2.987e+02 3.572e+02 6.786e+02, threshold=5.975e+02, percent-clipped=3.0 +2023-02-07 01:28:25,738 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 01:28:29,788 INFO [train.py:901] (3/4) Epoch 20, batch 7750, loss[loss=0.2096, simple_loss=0.2999, pruned_loss=0.05961, over 8296.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.29, pruned_loss=0.06385, over 1607927.98 frames. ], batch size: 23, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:28:30,572 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:05,271 INFO [train.py:901] (3/4) Epoch 20, batch 7800, loss[loss=0.2269, simple_loss=0.3137, pruned_loss=0.07002, over 8699.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2904, pruned_loss=0.0636, over 1613308.87 frames. ], batch size: 49, lr: 3.75e-03, grad_scale: 16.0 +2023-02-07 01:29:06,855 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:10,616 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.429e+02 2.909e+02 3.732e+02 6.331e+02, threshold=5.818e+02, percent-clipped=2.0 +2023-02-07 01:29:30,832 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-07 01:29:34,443 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:29:39,043 INFO [train.py:901] (3/4) Epoch 20, batch 7850, loss[loss=0.1939, simple_loss=0.2773, pruned_loss=0.05524, over 7793.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2898, pruned_loss=0.06359, over 1613007.03 frames. ], batch size: 19, lr: 3.74e-03, grad_scale: 16.0 +2023-02-07 01:29:49,660 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:12,438 INFO [train.py:901] (3/4) Epoch 20, batch 7900, loss[loss=0.2114, simple_loss=0.2987, pruned_loss=0.06203, over 8516.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2899, pruned_loss=0.06381, over 1610859.12 frames. ], batch size: 26, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:30:13,739 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:30:18,875 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.320e+02 2.352e+02 2.923e+02 4.060e+02 8.940e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 01:30:45,510 INFO [train.py:901] (3/4) Epoch 20, batch 7950, loss[loss=0.2114, simple_loss=0.2942, pruned_loss=0.06436, over 8641.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2908, pruned_loss=0.06369, over 1613967.45 frames. ], batch size: 39, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:18,030 INFO [train.py:901] (3/4) Epoch 20, batch 8000, loss[loss=0.2115, simple_loss=0.2983, pruned_loss=0.06236, over 8100.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2911, pruned_loss=0.06412, over 1612185.59 frames. ], batch size: 23, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:19,439 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:23,859 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.449e+02 3.108e+02 3.740e+02 8.675e+02, threshold=6.215e+02, percent-clipped=6.0 +2023-02-07 01:31:29,415 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:31:51,325 INFO [train.py:901] (3/4) Epoch 20, batch 8050, loss[loss=0.267, simple_loss=0.338, pruned_loss=0.09802, over 7078.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2896, pruned_loss=0.06327, over 1604803.51 frames. ], batch size: 71, lr: 3.74e-03, grad_scale: 8.0 +2023-02-07 01:31:57,140 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:25,034 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 01:32:32,190 INFO [train.py:901] (3/4) Epoch 21, batch 0, loss[loss=0.1906, simple_loss=0.2642, pruned_loss=0.05849, over 7795.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2642, pruned_loss=0.05849, over 7795.00 frames. ], batch size: 19, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:32:32,191 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 01:32:44,208 INFO [train.py:935] (3/4) Epoch 21, validation: loss=0.1763, simple_loss=0.2762, pruned_loss=0.03818, over 944034.00 frames. +2023-02-07 01:32:44,209 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 01:32:44,415 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:32:59,367 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 01:33:02,227 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.415e+02 2.918e+02 3.924e+02 7.413e+02, threshold=5.835e+02, percent-clipped=4.0 +2023-02-07 01:33:07,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:11,610 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:18,541 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9778, 1.4283, 3.1166, 1.5126, 2.7504, 2.6726, 2.8854, 2.8178], + device='cuda:3'), covar=tensor([0.0787, 0.3572, 0.0970, 0.3741, 0.1163, 0.0939, 0.0649, 0.0724], + device='cuda:3'), in_proj_covar=tensor([0.0610, 0.0623, 0.0673, 0.0608, 0.0690, 0.0590, 0.0591, 0.0656], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:33:18,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:19,108 INFO [train.py:901] (3/4) Epoch 21, batch 50, loss[loss=0.2562, simple_loss=0.3316, pruned_loss=0.09043, over 6692.00 frames. ], tot_loss[loss=0.2129, simple_loss=0.2923, pruned_loss=0.06673, over 364698.48 frames. ], batch size: 71, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:29,249 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:33:32,475 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 01:33:56,008 INFO [train.py:901] (3/4) Epoch 21, batch 100, loss[loss=0.2384, simple_loss=0.3166, pruned_loss=0.08012, over 8188.00 frames. ], tot_loss[loss=0.2127, simple_loss=0.2934, pruned_loss=0.06598, over 642771.43 frames. ], batch size: 23, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:33:57,255 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 01:33:58,658 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:14,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.511e+02 2.964e+02 4.065e+02 7.207e+02, threshold=5.927e+02, percent-clipped=4.0 +2023-02-07 01:34:18,655 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 01:34:30,768 INFO [train.py:901] (3/4) Epoch 21, batch 150, loss[loss=0.2042, simple_loss=0.2892, pruned_loss=0.05959, over 8686.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2925, pruned_loss=0.06481, over 859084.86 frames. ], batch size: 39, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:34:33,277 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:34:39,725 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=161822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:34:47,267 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=161833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:06,325 INFO [train.py:901] (3/4) Epoch 21, batch 200, loss[loss=0.2115, simple_loss=0.3026, pruned_loss=0.06019, over 8228.00 frames. ], tot_loss[loss=0.2121, simple_loss=0.293, pruned_loss=0.06558, over 1022462.16 frames. ], batch size: 22, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:19,121 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:35:23,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.494e+02 2.791e+02 3.613e+02 7.338e+02, threshold=5.582e+02, percent-clipped=1.0 +2023-02-07 01:35:41,056 INFO [train.py:901] (3/4) Epoch 21, batch 250, loss[loss=0.2105, simple_loss=0.3029, pruned_loss=0.05904, over 8343.00 frames. ], tot_loss[loss=0.2114, simple_loss=0.2921, pruned_loss=0.06532, over 1152197.25 frames. ], batch size: 25, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:35:47,951 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 01:35:57,070 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 01:36:00,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=161937.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:08,760 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:15,273 INFO [train.py:901] (3/4) Epoch 21, batch 300, loss[loss=0.2137, simple_loss=0.286, pruned_loss=0.07071, over 8315.00 frames. ], tot_loss[loss=0.2113, simple_loss=0.2924, pruned_loss=0.06507, over 1257244.23 frames. ], batch size: 25, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:19,027 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=161964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:20,642 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 01:36:26,620 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161974.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:33,765 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.407e+02 2.839e+02 3.558e+02 8.067e+02, threshold=5.678e+02, percent-clipped=5.0 +2023-02-07 01:36:36,647 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=161989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:36:51,875 INFO [train.py:901] (3/4) Epoch 21, batch 350, loss[loss=0.2583, simple_loss=0.3322, pruned_loss=0.0922, over 8495.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.291, pruned_loss=0.06379, over 1338754.49 frames. ], batch size: 26, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:36:57,892 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 01:37:25,807 INFO [train.py:901] (3/4) Epoch 21, batch 400, loss[loss=0.2118, simple_loss=0.3056, pruned_loss=0.05895, over 8441.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2906, pruned_loss=0.06407, over 1400170.25 frames. ], batch size: 25, lr: 3.65e-03, grad_scale: 8.0 +2023-02-07 01:37:44,475 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.323e+02 2.796e+02 3.394e+02 5.024e+02, threshold=5.592e+02, percent-clipped=0.0 +2023-02-07 01:37:52,930 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:02,159 INFO [train.py:901] (3/4) Epoch 21, batch 450, loss[loss=0.2069, simple_loss=0.2936, pruned_loss=0.06015, over 8026.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.2905, pruned_loss=0.06415, over 1449267.45 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:15,016 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 01:38:20,160 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:37,315 INFO [train.py:901] (3/4) Epoch 21, batch 500, loss[loss=0.2097, simple_loss=0.2901, pruned_loss=0.06465, over 8438.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2903, pruned_loss=0.0635, over 1491404.17 frames. ], batch size: 49, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:38:37,566 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:50,067 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:38:55,581 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.501e+02 2.975e+02 3.750e+02 9.376e+02, threshold=5.950e+02, percent-clipped=8.0 +2023-02-07 01:39:01,442 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162193.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:13,726 INFO [train.py:901] (3/4) Epoch 21, batch 550, loss[loss=0.1825, simple_loss=0.2634, pruned_loss=0.05083, over 7651.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2907, pruned_loss=0.06345, over 1523568.19 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:20,161 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:36,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8667, 3.7768, 3.4590, 1.7037, 3.3814, 3.4512, 3.4129, 3.2589], + device='cuda:3'), covar=tensor([0.0920, 0.0713, 0.1216, 0.5146, 0.1067, 0.1265, 0.1389, 0.0972], + device='cuda:3'), in_proj_covar=tensor([0.0515, 0.0432, 0.0431, 0.0532, 0.0421, 0.0435, 0.0414, 0.0377], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:39:42,296 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:39:48,783 INFO [train.py:901] (3/4) Epoch 21, batch 600, loss[loss=0.2355, simple_loss=0.3142, pruned_loss=0.07842, over 8287.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2906, pruned_loss=0.06393, over 1539165.04 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:39:49,653 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5275, 1.4593, 4.7526, 1.8027, 4.2468, 3.9677, 4.3158, 4.1645], + device='cuda:3'), covar=tensor([0.0566, 0.4475, 0.0435, 0.3690, 0.0936, 0.0920, 0.0541, 0.0608], + device='cuda:3'), in_proj_covar=tensor([0.0618, 0.0635, 0.0684, 0.0616, 0.0700, 0.0601, 0.0600, 0.0667], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:40:02,423 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 01:40:06,587 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.365e+02 2.932e+02 3.412e+02 7.385e+02, threshold=5.863e+02, percent-clipped=2.0 +2023-02-07 01:40:08,827 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2584, 2.5785, 3.0501, 1.5906, 3.3683, 2.0633, 1.4933, 2.0758], + device='cuda:3'), covar=tensor([0.0836, 0.0409, 0.0263, 0.0813, 0.0381, 0.0796, 0.0969, 0.0636], + device='cuda:3'), in_proj_covar=tensor([0.0447, 0.0383, 0.0335, 0.0437, 0.0367, 0.0530, 0.0387, 0.0411], + device='cuda:3'), out_proj_covar=tensor([1.2021e-04, 1.0044e-04, 8.8259e-05, 1.1543e-04, 9.6905e-05, 1.5057e-04, + 1.0474e-04, 1.0932e-04], device='cuda:3') +2023-02-07 01:40:11,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:40:22,966 INFO [train.py:901] (3/4) Epoch 21, batch 650, loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.0566, over 8463.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2904, pruned_loss=0.06374, over 1557183.85 frames. ], batch size: 29, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:40:59,244 INFO [train.py:901] (3/4) Epoch 21, batch 700, loss[loss=0.2106, simple_loss=0.2954, pruned_loss=0.06295, over 8095.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.289, pruned_loss=0.06308, over 1571540.48 frames. ], batch size: 21, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:17,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.443e+02 3.111e+02 4.032e+02 8.821e+02, threshold=6.222e+02, percent-clipped=5.0 +2023-02-07 01:41:34,560 INFO [train.py:901] (3/4) Epoch 21, batch 750, loss[loss=0.1918, simple_loss=0.2823, pruned_loss=0.05067, over 8331.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2891, pruned_loss=0.06297, over 1584371.75 frames. ], batch size: 26, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:41:40,438 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 01:41:45,476 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 01:41:54,312 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 01:41:56,381 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:42:01,036 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 01:42:11,060 INFO [train.py:901] (3/4) Epoch 21, batch 800, loss[loss=0.1825, simple_loss=0.2763, pruned_loss=0.04433, over 8371.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2907, pruned_loss=0.06343, over 1594838.13 frames. ], batch size: 24, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:42:29,948 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.455e+02 2.861e+02 3.570e+02 7.084e+02, threshold=5.721e+02, percent-clipped=3.0 +2023-02-07 01:42:47,158 INFO [train.py:901] (3/4) Epoch 21, batch 850, loss[loss=0.2286, simple_loss=0.3085, pruned_loss=0.0744, over 8539.00 frames. ], tot_loss[loss=0.2094, simple_loss=0.291, pruned_loss=0.06393, over 1599913.53 frames. ], batch size: 49, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:01,483 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:07,213 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2786, 2.1616, 2.8466, 2.2845, 2.7500, 2.3698, 2.0895, 1.5047], + device='cuda:3'), covar=tensor([0.5343, 0.4872, 0.1853, 0.3660, 0.2228, 0.2913, 0.1925, 0.5147], + device='cuda:3'), in_proj_covar=tensor([0.0936, 0.0967, 0.0791, 0.0929, 0.0983, 0.0878, 0.0738, 0.0815], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:43:16,274 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:21,103 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:24,455 INFO [train.py:901] (3/4) Epoch 21, batch 900, loss[loss=0.2246, simple_loss=0.3087, pruned_loss=0.07026, over 8037.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2907, pruned_loss=0.0631, over 1609091.54 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:43:34,388 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:43:42,639 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.507e+02 2.319e+02 2.838e+02 3.637e+02 1.203e+03, threshold=5.677e+02, percent-clipped=5.0 +2023-02-07 01:43:49,236 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:44:00,581 INFO [train.py:901] (3/4) Epoch 21, batch 950, loss[loss=0.2005, simple_loss=0.2821, pruned_loss=0.05943, over 8124.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2891, pruned_loss=0.06222, over 1612486.00 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:07,073 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6620, 1.9450, 2.8628, 1.4383, 2.1450, 1.9242, 1.7456, 2.0041], + device='cuda:3'), covar=tensor([0.1868, 0.2528, 0.0948, 0.4577, 0.1800, 0.3332, 0.2245, 0.2441], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0592, 0.0550, 0.0632, 0.0639, 0.0591, 0.0528, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:44:14,218 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 01:44:18,637 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5349, 2.3121, 3.3298, 2.5882, 3.0993, 2.4987, 2.3143, 1.9527], + device='cuda:3'), covar=tensor([0.5109, 0.4952, 0.1691, 0.3676, 0.2437, 0.2838, 0.1776, 0.5224], + device='cuda:3'), in_proj_covar=tensor([0.0939, 0.0970, 0.0793, 0.0931, 0.0985, 0.0879, 0.0740, 0.0818], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 01:44:35,839 INFO [train.py:901] (3/4) Epoch 21, batch 1000, loss[loss=0.1965, simple_loss=0.2745, pruned_loss=0.05925, over 8138.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2899, pruned_loss=0.06249, over 1615832.25 frames. ], batch size: 22, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:44:48,951 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 01:44:55,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.438e+02 2.954e+02 4.014e+02 9.557e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 01:45:01,391 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 01:45:11,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162708.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:45:12,207 INFO [train.py:901] (3/4) Epoch 21, batch 1050, loss[loss=0.2211, simple_loss=0.3045, pruned_loss=0.06881, over 8483.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.289, pruned_loss=0.06214, over 1613855.12 frames. ], batch size: 29, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:45:46,489 INFO [train.py:901] (3/4) Epoch 21, batch 1100, loss[loss=0.1888, simple_loss=0.2745, pruned_loss=0.05149, over 5909.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2886, pruned_loss=0.06221, over 1611510.90 frames. ], batch size: 13, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:06,018 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.501e+02 3.059e+02 3.494e+02 1.150e+03, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 01:46:14,530 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 01:46:23,789 INFO [train.py:901] (3/4) Epoch 21, batch 1150, loss[loss=0.2178, simple_loss=0.2809, pruned_loss=0.07742, over 7656.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06186, over 1612070.70 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:46:24,672 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:29,077 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 01:46:29,779 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 01:46:42,983 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:50,071 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=162845.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:46:59,719 INFO [train.py:901] (3/4) Epoch 21, batch 1200, loss[loss=0.1699, simple_loss=0.2442, pruned_loss=0.04779, over 7813.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06177, over 1612201.00 frames. ], batch size: 19, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:47:09,539 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=162873.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:47:17,459 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.368e+02 3.051e+02 3.779e+02 6.869e+02, threshold=6.103e+02, percent-clipped=3.0 +2023-02-07 01:47:36,404 INFO [train.py:901] (3/4) Epoch 21, batch 1250, loss[loss=0.2251, simple_loss=0.3124, pruned_loss=0.06891, over 8281.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2889, pruned_loss=0.062, over 1615124.45 frames. ], batch size: 23, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:11,285 INFO [train.py:901] (3/4) Epoch 21, batch 1300, loss[loss=0.1577, simple_loss=0.2302, pruned_loss=0.04263, over 7219.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2883, pruned_loss=0.06133, over 1614843.71 frames. ], batch size: 16, lr: 3.64e-03, grad_scale: 8.0 +2023-02-07 01:48:14,770 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=162964.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:27,747 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 01:48:28,485 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.260e+02 2.727e+02 3.317e+02 5.773e+02, threshold=5.453e+02, percent-clipped=0.0 +2023-02-07 01:48:29,597 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 01:48:30,748 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=162988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:31,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=162989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:48:44,803 INFO [train.py:901] (3/4) Epoch 21, batch 1350, loss[loss=0.2348, simple_loss=0.3093, pruned_loss=0.08016, over 8506.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2892, pruned_loss=0.06204, over 1617829.04 frames. ], batch size: 26, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:48:45,179 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 01:49:09,957 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.02 vs. limit=2.0 +2023-02-07 01:49:21,732 INFO [train.py:901] (3/4) Epoch 21, batch 1400, loss[loss=0.1944, simple_loss=0.2655, pruned_loss=0.06162, over 6801.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2888, pruned_loss=0.06224, over 1617057.38 frames. ], batch size: 15, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:49:39,406 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.469e+02 3.010e+02 4.050e+02 1.060e+03, threshold=6.020e+02, percent-clipped=5.0 +2023-02-07 01:49:43,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3689, 2.7826, 2.2230, 3.9031, 1.7324, 2.1861, 2.4217, 2.8234], + device='cuda:3'), covar=tensor([0.0705, 0.0820, 0.0847, 0.0257, 0.1131, 0.1196, 0.1011, 0.0808], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0205, 0.0246, 0.0250, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:49:46,305 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 01:49:55,443 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:49:55,931 INFO [train.py:901] (3/4) Epoch 21, batch 1450, loss[loss=0.2436, simple_loss=0.3111, pruned_loss=0.08803, over 8454.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2884, pruned_loss=0.06221, over 1615121.02 frames. ], batch size: 27, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:28,463 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 01:50:32,151 INFO [train.py:901] (3/4) Epoch 21, batch 1500, loss[loss=0.2076, simple_loss=0.2911, pruned_loss=0.06205, over 8317.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2893, pruned_loss=0.06226, over 1617778.84 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:50:50,508 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.250e+02 2.722e+02 3.392e+02 6.898e+02, threshold=5.444e+02, percent-clipped=4.0 +2023-02-07 01:50:53,288 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163189.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:06,788 INFO [train.py:901] (3/4) Epoch 21, batch 1550, loss[loss=0.1988, simple_loss=0.2837, pruned_loss=0.057, over 8342.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.288, pruned_loss=0.06186, over 1617830.41 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:31,323 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,268 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=163258.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:51:42,810 INFO [train.py:901] (3/4) Epoch 21, batch 1600, loss[loss=0.24, simple_loss=0.3272, pruned_loss=0.07635, over 8209.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2893, pruned_loss=0.06224, over 1618154.69 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:51:50,625 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163269.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:00,874 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.380e+02 3.009e+02 4.081e+02 9.131e+02, threshold=6.018e+02, percent-clipped=6.0 +2023-02-07 01:52:14,547 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163304.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:52:16,022 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 01:52:17,726 INFO [train.py:901] (3/4) Epoch 21, batch 1650, loss[loss=0.2282, simple_loss=0.3015, pruned_loss=0.07739, over 8095.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2905, pruned_loss=0.06316, over 1622375.46 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:52:33,941 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 01:52:51,343 INFO [train.py:901] (3/4) Epoch 21, batch 1700, loss[loss=0.2124, simple_loss=0.2976, pruned_loss=0.06358, over 8567.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2907, pruned_loss=0.06362, over 1621539.07 frames. ], batch size: 39, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:09,964 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.344e+02 2.897e+02 3.678e+02 1.033e+03, threshold=5.793e+02, percent-clipped=5.0 +2023-02-07 01:53:27,424 INFO [train.py:901] (3/4) Epoch 21, batch 1750, loss[loss=0.2166, simple_loss=0.2991, pruned_loss=0.06699, over 8078.00 frames. ], tot_loss[loss=0.21, simple_loss=0.2913, pruned_loss=0.06434, over 1624457.61 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 8.0 +2023-02-07 01:53:56,335 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:54:01,076 INFO [train.py:901] (3/4) Epoch 21, batch 1800, loss[loss=0.1859, simple_loss=0.2822, pruned_loss=0.04478, over 8192.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2897, pruned_loss=0.06326, over 1620676.09 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:18,722 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.661e+02 3.025e+02 4.067e+02 7.408e+02, threshold=6.049e+02, percent-clipped=6.0 +2023-02-07 01:54:18,997 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6878, 2.0932, 3.3439, 1.5222, 2.4391, 2.1809, 1.7899, 2.5225], + device='cuda:3'), covar=tensor([0.1790, 0.2662, 0.0777, 0.4449, 0.1795, 0.3083, 0.2252, 0.2209], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0592, 0.0550, 0.0633, 0.0640, 0.0589, 0.0528, 0.0628], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:54:37,342 INFO [train.py:901] (3/4) Epoch 21, batch 1850, loss[loss=0.1847, simple_loss=0.2716, pruned_loss=0.0489, over 8063.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2883, pruned_loss=0.06281, over 1620716.99 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:54:53,885 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4132, 1.7849, 1.4498, 2.8162, 1.2101, 1.2730, 1.9618, 1.9108], + device='cuda:3'), covar=tensor([0.1633, 0.1231, 0.1864, 0.0386, 0.1338, 0.2104, 0.1018, 0.1099], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0212, 0.0204, 0.0245, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 01:55:11,697 INFO [train.py:901] (3/4) Epoch 21, batch 1900, loss[loss=0.1984, simple_loss=0.2895, pruned_loss=0.05369, over 8508.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2865, pruned_loss=0.06165, over 1614572.30 frames. ], batch size: 29, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:12,593 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:17,239 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:21,422 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.29 vs. limit=5.0 +2023-02-07 01:55:26,434 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 01:55:29,012 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.410e+02 2.798e+02 3.588e+02 7.290e+02, threshold=5.595e+02, percent-clipped=1.0 +2023-02-07 01:55:29,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:35,269 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7422, 2.0969, 3.4214, 1.8240, 1.6916, 3.3481, 0.6509, 2.0454], + device='cuda:3'), covar=tensor([0.1530, 0.1536, 0.0242, 0.1830, 0.2774, 0.0320, 0.2481, 0.1548], + device='cuda:3'), in_proj_covar=tensor([0.0187, 0.0194, 0.0126, 0.0219, 0.0270, 0.0132, 0.0168, 0.0188], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 01:55:35,900 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4143, 1.6078, 2.1277, 1.2753, 1.4619, 1.7172, 1.3971, 1.4630], + device='cuda:3'), covar=tensor([0.1960, 0.2466, 0.0931, 0.4536, 0.2011, 0.3286, 0.2445, 0.2256], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0592, 0.0550, 0.0634, 0.0639, 0.0590, 0.0528, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:55:37,702 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 01:55:40,619 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=163602.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:55:42,636 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8860, 3.3892, 1.7379, 2.5819, 2.5200, 1.6189, 2.4514, 2.8796], + device='cuda:3'), covar=tensor([0.1656, 0.0397, 0.1589, 0.0829, 0.0890, 0.1850, 0.1218, 0.0922], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0234, 0.0332, 0.0306, 0.0296, 0.0333, 0.0344, 0.0317], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 01:55:45,160 INFO [train.py:901] (3/4) Epoch 21, batch 1950, loss[loss=0.2075, simple_loss=0.2879, pruned_loss=0.06357, over 8278.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06176, over 1615827.50 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:55:58,525 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 01:56:04,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7090, 4.6653, 4.1730, 2.1589, 4.1310, 4.3424, 4.3134, 4.2361], + device='cuda:3'), covar=tensor([0.0755, 0.0542, 0.1101, 0.4672, 0.0838, 0.0849, 0.1218, 0.0743], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0428, 0.0429, 0.0534, 0.0420, 0.0435, 0.0416, 0.0378], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:56:05,942 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 01:56:14,480 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7881, 5.8473, 5.0382, 2.6875, 5.1213, 5.6627, 5.3839, 5.4402], + device='cuda:3'), covar=tensor([0.0483, 0.0428, 0.0841, 0.3804, 0.0678, 0.0835, 0.1012, 0.0552], + device='cuda:3'), in_proj_covar=tensor([0.0513, 0.0428, 0.0429, 0.0534, 0.0421, 0.0435, 0.0416, 0.0379], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 01:56:21,641 INFO [train.py:901] (3/4) Epoch 21, batch 2000, loss[loss=0.2383, simple_loss=0.3025, pruned_loss=0.08702, over 8250.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2878, pruned_loss=0.06218, over 1619690.86 frames. ], batch size: 22, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:56:39,059 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.546e+02 3.013e+02 3.975e+02 6.874e+02, threshold=6.025e+02, percent-clipped=4.0 +2023-02-07 01:56:55,192 INFO [train.py:901] (3/4) Epoch 21, batch 2050, loss[loss=0.2284, simple_loss=0.3082, pruned_loss=0.07433, over 8292.00 frames. ], tot_loss[loss=0.2086, simple_loss=0.2898, pruned_loss=0.06366, over 1618380.60 frames. ], batch size: 23, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:00,616 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=163717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:57:30,311 INFO [train.py:901] (3/4) Epoch 21, batch 2100, loss[loss=0.1886, simple_loss=0.2674, pruned_loss=0.05488, over 8080.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06343, over 1619069.87 frames. ], batch size: 21, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:57:48,636 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.575e+02 2.946e+02 3.630e+02 8.805e+02, threshold=5.893e+02, percent-clipped=3.0 +2023-02-07 01:58:04,871 INFO [train.py:901] (3/4) Epoch 21, batch 2150, loss[loss=0.1999, simple_loss=0.2627, pruned_loss=0.06853, over 7175.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2894, pruned_loss=0.06346, over 1613645.30 frames. ], batch size: 16, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:14,693 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:25,291 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0771, 1.3082, 1.6345, 1.3825, 1.0083, 1.4775, 1.7709, 1.4668], + device='cuda:3'), covar=tensor([0.0525, 0.1401, 0.1778, 0.1508, 0.0640, 0.1576, 0.0736, 0.0736], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0158, 0.0099, 0.0163, 0.0113, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 01:58:31,303 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 01:58:39,335 INFO [train.py:901] (3/4) Epoch 21, batch 2200, loss[loss=0.2309, simple_loss=0.2982, pruned_loss=0.08178, over 8468.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.29, pruned_loss=0.06413, over 1613143.51 frames. ], batch size: 25, lr: 3.63e-03, grad_scale: 16.0 +2023-02-07 01:58:58,248 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.475e+02 2.987e+02 3.670e+02 7.762e+02, threshold=5.973e+02, percent-clipped=3.0 +2023-02-07 01:59:15,141 INFO [train.py:901] (3/4) Epoch 21, batch 2250, loss[loss=0.1731, simple_loss=0.2601, pruned_loss=0.04303, over 8320.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.2905, pruned_loss=0.06396, over 1616006.88 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:49,152 INFO [train.py:901] (3/4) Epoch 21, batch 2300, loss[loss=0.2106, simple_loss=0.2832, pruned_loss=0.06901, over 8094.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2897, pruned_loss=0.06367, over 1617054.26 frames. ], batch size: 21, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 01:59:58,906 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=163973.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:08,071 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 2.361e+02 2.889e+02 3.736e+02 8.411e+02, threshold=5.778e+02, percent-clipped=4.0 +2023-02-07 02:00:17,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=163998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:00:26,188 INFO [train.py:901] (3/4) Epoch 21, batch 2350, loss[loss=0.2047, simple_loss=0.2817, pruned_loss=0.0638, over 7933.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2877, pruned_loss=0.06261, over 1615316.10 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:00:56,717 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7338, 1.9938, 2.1500, 1.4585, 2.2873, 1.6256, 0.8384, 1.9879], + device='cuda:3'), covar=tensor([0.0564, 0.0344, 0.0253, 0.0539, 0.0358, 0.0781, 0.0777, 0.0288], + device='cuda:3'), in_proj_covar=tensor([0.0446, 0.0382, 0.0333, 0.0436, 0.0368, 0.0524, 0.0385, 0.0411], + device='cuda:3'), out_proj_covar=tensor([1.2005e-04, 1.0031e-04, 8.7860e-05, 1.1536e-04, 9.6964e-05, 1.4837e-04, + 1.0417e-04, 1.0914e-04], device='cuda:3') +2023-02-07 02:01:01,227 INFO [train.py:901] (3/4) Epoch 21, batch 2400, loss[loss=0.2326, simple_loss=0.307, pruned_loss=0.07909, over 8541.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2888, pruned_loss=0.06355, over 1611147.32 frames. ], batch size: 39, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:01:19,280 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.352e+02 2.419e+02 2.926e+02 3.800e+02 6.132e+02, threshold=5.852e+02, percent-clipped=4.0 +2023-02-07 02:01:37,286 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:01:37,736 INFO [train.py:901] (3/4) Epoch 21, batch 2450, loss[loss=0.2523, simple_loss=0.3155, pruned_loss=0.09448, over 8505.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2898, pruned_loss=0.06419, over 1612400.07 frames. ], batch size: 26, lr: 3.62e-03, grad_scale: 16.0 +2023-02-07 02:02:12,735 INFO [train.py:901] (3/4) Epoch 21, batch 2500, loss[loss=0.1854, simple_loss=0.2756, pruned_loss=0.0476, over 8473.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2891, pruned_loss=0.06355, over 1613054.73 frames. ], batch size: 25, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:02:22,148 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:02:30,792 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.421e+02 3.174e+02 4.025e+02 1.090e+03, threshold=6.349e+02, percent-clipped=9.0 +2023-02-07 02:02:46,233 INFO [train.py:901] (3/4) Epoch 21, batch 2550, loss[loss=0.1718, simple_loss=0.2608, pruned_loss=0.04139, over 7249.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2876, pruned_loss=0.06295, over 1609191.10 frames. ], batch size: 16, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:03:22,644 INFO [train.py:901] (3/4) Epoch 21, batch 2600, loss[loss=0.1903, simple_loss=0.2828, pruned_loss=0.04892, over 8489.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2875, pruned_loss=0.06257, over 1609317.56 frames. ], batch size: 28, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:03:40,891 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.272e+02 2.670e+02 3.622e+02 6.852e+02, threshold=5.341e+02, percent-clipped=1.0 +2023-02-07 02:03:56,826 INFO [train.py:901] (3/4) Epoch 21, batch 2650, loss[loss=0.1731, simple_loss=0.2651, pruned_loss=0.04057, over 8073.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2881, pruned_loss=0.06267, over 1610519.81 frames. ], batch size: 21, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:33,157 INFO [train.py:901] (3/4) Epoch 21, batch 2700, loss[loss=0.2075, simple_loss=0.289, pruned_loss=0.06299, over 8249.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2876, pruned_loss=0.06228, over 1610431.51 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:04:46,948 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:04:52,076 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.228e+02 2.697e+02 3.361e+02 7.045e+02, threshold=5.394e+02, percent-clipped=4.0 +2023-02-07 02:05:07,797 INFO [train.py:901] (3/4) Epoch 21, batch 2750, loss[loss=0.206, simple_loss=0.2983, pruned_loss=0.05686, over 8245.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.288, pruned_loss=0.06246, over 1610398.67 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:05:36,819 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:05:41,734 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9170, 1.7711, 2.0226, 1.7124, 0.9962, 1.7679, 2.4432, 2.3696], + device='cuda:3'), covar=tensor([0.0426, 0.1151, 0.1550, 0.1320, 0.0596, 0.1405, 0.0549, 0.0523], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0158, 0.0098, 0.0162, 0.0112, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 02:05:42,233 INFO [train.py:901] (3/4) Epoch 21, batch 2800, loss[loss=0.1437, simple_loss=0.231, pruned_loss=0.02818, over 7807.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2865, pruned_loss=0.06175, over 1607950.01 frames. ], batch size: 20, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:02,588 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.305e+02 2.813e+02 3.760e+02 7.507e+02, threshold=5.625e+02, percent-clipped=3.0 +2023-02-07 02:06:18,053 INFO [train.py:901] (3/4) Epoch 21, batch 2850, loss[loss=0.2143, simple_loss=0.2918, pruned_loss=0.06839, over 7796.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2885, pruned_loss=0.06272, over 1615882.36 frames. ], batch size: 19, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:21,042 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6798, 2.4324, 3.3583, 2.5613, 3.1754, 2.6153, 2.5100, 1.8962], + device='cuda:3'), covar=tensor([0.5207, 0.5133, 0.1995, 0.4022, 0.2635, 0.3068, 0.1750, 0.5872], + device='cuda:3'), in_proj_covar=tensor([0.0937, 0.0972, 0.0798, 0.0931, 0.0989, 0.0884, 0.0742, 0.0821], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:06:23,426 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:06:51,365 INFO [train.py:901] (3/4) Epoch 21, batch 2900, loss[loss=0.2584, simple_loss=0.3415, pruned_loss=0.08771, over 8735.00 frames. ], tot_loss[loss=0.209, simple_loss=0.2902, pruned_loss=0.06393, over 1611802.48 frames. ], batch size: 30, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:06:56,989 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:09,769 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 02:07:11,685 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.599e+02 3.265e+02 4.069e+02 1.074e+03, threshold=6.531e+02, percent-clipped=8.0 +2023-02-07 02:07:11,869 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:19,683 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7367, 5.8870, 5.2058, 2.3808, 5.1924, 5.6106, 5.4515, 5.3385], + device='cuda:3'), covar=tensor([0.0493, 0.0351, 0.0799, 0.4101, 0.0650, 0.0614, 0.0917, 0.0550], + device='cuda:3'), in_proj_covar=tensor([0.0512, 0.0429, 0.0430, 0.0529, 0.0420, 0.0434, 0.0414, 0.0379], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:07:19,723 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:07:28,728 INFO [train.py:901] (3/4) Epoch 21, batch 2950, loss[loss=0.1888, simple_loss=0.264, pruned_loss=0.05681, over 7433.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2912, pruned_loss=0.06419, over 1610203.66 frames. ], batch size: 17, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:07:44,453 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:02,297 INFO [train.py:901] (3/4) Epoch 21, batch 3000, loss[loss=0.2091, simple_loss=0.2929, pruned_loss=0.0627, over 8360.00 frames. ], tot_loss[loss=0.2098, simple_loss=0.2913, pruned_loss=0.06413, over 1615483.73 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:02,298 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 02:08:15,064 INFO [train.py:935] (3/4) Epoch 21, validation: loss=0.1742, simple_loss=0.2744, pruned_loss=0.03706, over 944034.00 frames. +2023-02-07 02:08:15,066 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 02:08:24,718 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.3909, 1.6651, 5.7723, 2.4351, 4.6937, 4.7345, 5.3509, 5.2561], + device='cuda:3'), covar=tensor([0.1188, 0.7288, 0.1016, 0.4635, 0.2117, 0.1824, 0.0929, 0.0820], + device='cuda:3'), in_proj_covar=tensor([0.0632, 0.0645, 0.0687, 0.0626, 0.0708, 0.0609, 0.0605, 0.0674], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:08:26,763 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164676.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:08:33,566 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.380e+02 2.886e+02 3.399e+02 6.002e+02, threshold=5.772e+02, percent-clipped=0.0 +2023-02-07 02:08:49,851 INFO [train.py:901] (3/4) Epoch 21, batch 3050, loss[loss=0.1774, simple_loss=0.256, pruned_loss=0.04935, over 8034.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06282, over 1611977.11 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:08:59,359 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:08,430 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4812, 1.7970, 1.8879, 1.2758, 1.9414, 1.4471, 0.4544, 1.7705], + device='cuda:3'), covar=tensor([0.0535, 0.0330, 0.0240, 0.0508, 0.0367, 0.0837, 0.0828, 0.0244], + device='cuda:3'), in_proj_covar=tensor([0.0452, 0.0385, 0.0340, 0.0443, 0.0373, 0.0531, 0.0389, 0.0416], + device='cuda:3'), out_proj_covar=tensor([1.2149e-04, 1.0118e-04, 8.9656e-05, 1.1709e-04, 9.8351e-05, 1.5044e-04, + 1.0530e-04, 1.1056e-04], device='cuda:3') +2023-02-07 02:09:25,495 INFO [train.py:901] (3/4) Epoch 21, batch 3100, loss[loss=0.1903, simple_loss=0.2717, pruned_loss=0.05449, over 8030.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2891, pruned_loss=0.06287, over 1614678.83 frames. ], batch size: 22, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:09:29,023 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=164764.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:09:35,995 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 02:09:43,646 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.375e+02 2.980e+02 3.572e+02 8.800e+02, threshold=5.960e+02, percent-clipped=5.0 +2023-02-07 02:09:59,122 INFO [train.py:901] (3/4) Epoch 21, batch 3150, loss[loss=0.2208, simple_loss=0.3107, pruned_loss=0.06545, over 8349.00 frames. ], tot_loss[loss=0.2087, simple_loss=0.2904, pruned_loss=0.06345, over 1616596.84 frames. ], batch size: 24, lr: 3.62e-03, grad_scale: 8.0 +2023-02-07 02:10:08,649 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:19,448 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=164837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:26,874 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:10:34,945 INFO [train.py:901] (3/4) Epoch 21, batch 3200, loss[loss=0.2106, simple_loss=0.2921, pruned_loss=0.06456, over 8472.00 frames. ], tot_loss[loss=0.2089, simple_loss=0.2909, pruned_loss=0.06348, over 1615684.21 frames. ], batch size: 27, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:10:54,101 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.482e+02 2.324e+02 2.650e+02 3.384e+02 7.808e+02, threshold=5.299e+02, percent-clipped=1.0 +2023-02-07 02:10:54,541 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.95 vs. limit=5.0 +2023-02-07 02:10:55,683 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=164888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:09,469 INFO [train.py:901] (3/4) Epoch 21, batch 3250, loss[loss=0.2419, simple_loss=0.3229, pruned_loss=0.08051, over 7081.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2932, pruned_loss=0.06433, over 1620340.31 frames. ], batch size: 71, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:11:12,439 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=164913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:23,958 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:30,775 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=164940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:11:44,789 INFO [train.py:901] (3/4) Epoch 21, batch 3300, loss[loss=0.2284, simple_loss=0.3066, pruned_loss=0.07509, over 8359.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2918, pruned_loss=0.06384, over 1617095.29 frames. ], batch size: 24, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:05,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.295e+02 2.742e+02 3.217e+02 7.829e+02, threshold=5.483e+02, percent-clipped=4.0 +2023-02-07 02:12:20,617 INFO [train.py:901] (3/4) Epoch 21, batch 3350, loss[loss=0.1725, simple_loss=0.2585, pruned_loss=0.04329, over 7786.00 frames. ], tot_loss[loss=0.2096, simple_loss=0.2917, pruned_loss=0.06382, over 1621616.48 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:12:28,066 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165020.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:30,882 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8629, 3.4097, 2.1966, 2.7023, 2.5683, 1.8109, 2.4194, 3.0346], + device='cuda:3'), covar=tensor([0.1939, 0.0453, 0.1416, 0.0914, 0.0882, 0.1845, 0.1426, 0.1026], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0235, 0.0333, 0.0307, 0.0297, 0.0333, 0.0342, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:12:45,358 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165045.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:46,097 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5434, 1.5020, 1.8276, 1.2703, 1.3126, 1.8273, 0.2509, 1.2717], + device='cuda:3'), covar=tensor([0.1519, 0.1341, 0.0449, 0.0909, 0.2497, 0.0402, 0.2103, 0.1284], + device='cuda:3'), in_proj_covar=tensor([0.0185, 0.0194, 0.0127, 0.0220, 0.0269, 0.0133, 0.0169, 0.0189], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 02:12:52,357 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:12:54,944 INFO [train.py:901] (3/4) Epoch 21, batch 3400, loss[loss=0.2291, simple_loss=0.3077, pruned_loss=0.07527, over 8575.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2915, pruned_loss=0.06389, over 1619182.18 frames. ], batch size: 49, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:15,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 2.300e+02 2.821e+02 3.884e+02 1.046e+03, threshold=5.643e+02, percent-clipped=8.0 +2023-02-07 02:13:20,662 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,321 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165108.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:31,893 INFO [train.py:901] (3/4) Epoch 21, batch 3450, loss[loss=0.1971, simple_loss=0.2828, pruned_loss=0.05566, over 7654.00 frames. ], tot_loss[loss=0.2097, simple_loss=0.2918, pruned_loss=0.06384, over 1617521.00 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:13:38,107 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:49,291 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165135.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:13:53,300 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:14:05,178 INFO [train.py:901] (3/4) Epoch 21, batch 3500, loss[loss=0.2068, simple_loss=0.2915, pruned_loss=0.06105, over 7808.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2903, pruned_loss=0.06321, over 1618841.15 frames. ], batch size: 20, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:06,056 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9328, 1.6607, 2.0503, 1.8088, 1.9263, 1.9657, 1.7829, 0.7899], + device='cuda:3'), covar=tensor([0.5253, 0.4394, 0.1969, 0.3267, 0.2409, 0.2869, 0.1852, 0.4921], + device='cuda:3'), in_proj_covar=tensor([0.0940, 0.0974, 0.0797, 0.0933, 0.0994, 0.0888, 0.0745, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:14:10,643 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 02:14:24,648 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.546e+02 2.436e+02 2.745e+02 3.695e+02 8.606e+02, threshold=5.490e+02, percent-clipped=3.0 +2023-02-07 02:14:41,285 INFO [train.py:901] (3/4) Epoch 21, batch 3550, loss[loss=0.1815, simple_loss=0.2576, pruned_loss=0.05272, over 7425.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2893, pruned_loss=0.06243, over 1617186.65 frames. ], batch size: 17, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:14:51,647 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:15,592 INFO [train.py:901] (3/4) Epoch 21, batch 3600, loss[loss=0.198, simple_loss=0.2756, pruned_loss=0.06017, over 7272.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06225, over 1614002.41 frames. ], batch size: 16, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:34,163 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.347e+02 2.942e+02 3.699e+02 7.087e+02, threshold=5.884e+02, percent-clipped=2.0 +2023-02-07 02:15:35,260 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 02:15:44,481 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:15:51,141 INFO [train.py:901] (3/4) Epoch 21, batch 3650, loss[loss=0.2296, simple_loss=0.3195, pruned_loss=0.06989, over 8326.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2872, pruned_loss=0.0618, over 1612536.58 frames. ], batch size: 25, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:15:52,693 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:03,312 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165326.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:06,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3351, 2.0477, 2.8301, 2.2739, 2.6546, 2.2897, 2.0330, 1.5175], + device='cuda:3'), covar=tensor([0.5073, 0.4825, 0.1881, 0.3623, 0.2648, 0.3192, 0.2031, 0.5417], + device='cuda:3'), in_proj_covar=tensor([0.0936, 0.0970, 0.0793, 0.0932, 0.0993, 0.0885, 0.0744, 0.0821], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:16:10,820 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:16,764 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:16:23,307 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 02:16:25,987 INFO [train.py:901] (3/4) Epoch 21, batch 3700, loss[loss=0.2045, simple_loss=0.2825, pruned_loss=0.06326, over 7785.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2872, pruned_loss=0.06194, over 1612944.39 frames. ], batch size: 19, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:16:27,417 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1559, 2.0950, 2.1285, 1.9795, 1.2456, 1.8663, 2.5104, 2.6234], + device='cuda:3'), covar=tensor([0.0417, 0.1085, 0.1584, 0.1277, 0.0545, 0.1372, 0.0550, 0.0493], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0189, 0.0159, 0.0099, 0.0162, 0.0113, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 02:16:44,014 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.402e+02 2.885e+02 3.854e+02 8.848e+02, threshold=5.771e+02, percent-clipped=5.0 +2023-02-07 02:16:47,619 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165391.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:16:57,948 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-07 02:16:59,571 INFO [train.py:901] (3/4) Epoch 21, batch 3750, loss[loss=0.226, simple_loss=0.317, pruned_loss=0.06751, over 8443.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.06252, over 1614723.09 frames. ], batch size: 48, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:03,226 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2729, 2.1777, 1.7220, 1.9363, 1.8515, 1.4309, 1.7483, 1.6870], + device='cuda:3'), covar=tensor([0.1402, 0.0432, 0.1289, 0.0631, 0.0732, 0.1639, 0.0940, 0.0944], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0237, 0.0337, 0.0312, 0.0300, 0.0336, 0.0347, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:17:04,491 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:23,266 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7428, 2.4885, 3.2674, 2.6840, 3.1129, 2.5732, 2.4856, 2.3465], + device='cuda:3'), covar=tensor([0.3564, 0.3998, 0.1558, 0.3005, 0.1789, 0.2552, 0.1445, 0.4012], + device='cuda:3'), in_proj_covar=tensor([0.0933, 0.0967, 0.0791, 0.0930, 0.0988, 0.0883, 0.0740, 0.0816], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:17:36,098 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2025, 3.0883, 2.8901, 1.6524, 2.8111, 2.8961, 2.8229, 2.7846], + device='cuda:3'), covar=tensor([0.1399, 0.0950, 0.1476, 0.5083, 0.1239, 0.1626, 0.1771, 0.1286], + device='cuda:3'), in_proj_covar=tensor([0.0516, 0.0429, 0.0430, 0.0533, 0.0422, 0.0436, 0.0415, 0.0379], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:17:36,650 INFO [train.py:901] (3/4) Epoch 21, batch 3800, loss[loss=0.264, simple_loss=0.3367, pruned_loss=0.0957, over 8442.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2879, pruned_loss=0.06248, over 1610320.21 frames. ], batch size: 27, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:17:50,402 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,326 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:17:54,908 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.401e+02 2.925e+02 3.673e+02 6.793e+02, threshold=5.851e+02, percent-clipped=2.0 +2023-02-07 02:18:01,776 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 02:18:07,162 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:18:10,359 INFO [train.py:901] (3/4) Epoch 21, batch 3850, loss[loss=0.1872, simple_loss=0.2756, pruned_loss=0.04934, over 8284.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2877, pruned_loss=0.06275, over 1605475.39 frames. ], batch size: 23, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:18,548 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 02:18:46,113 INFO [train.py:901] (3/4) Epoch 21, batch 3900, loss[loss=0.1698, simple_loss=0.2639, pruned_loss=0.03784, over 8080.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2871, pruned_loss=0.06202, over 1603984.02 frames. ], batch size: 21, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:18:53,056 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:05,131 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.182e+02 2.809e+02 3.459e+02 6.713e+02, threshold=5.619e+02, percent-clipped=4.0 +2023-02-07 02:19:12,372 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 02:19:14,925 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=165600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:19:20,727 INFO [train.py:901] (3/4) Epoch 21, batch 3950, loss[loss=0.286, simple_loss=0.3383, pruned_loss=0.1169, over 7186.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2885, pruned_loss=0.06261, over 1607276.34 frames. ], batch size: 71, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:19:27,201 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.13 vs. limit=5.0 +2023-02-07 02:19:29,802 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8213, 1.5789, 1.9388, 1.6752, 1.7911, 1.8316, 1.6289, 0.8019], + device='cuda:3'), covar=tensor([0.5108, 0.4551, 0.1849, 0.3153, 0.2323, 0.2829, 0.1862, 0.4633], + device='cuda:3'), in_proj_covar=tensor([0.0939, 0.0973, 0.0796, 0.0938, 0.0993, 0.0888, 0.0745, 0.0822], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:19:55,187 INFO [train.py:901] (3/4) Epoch 21, batch 4000, loss[loss=0.215, simple_loss=0.3077, pruned_loss=0.06119, over 8333.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2893, pruned_loss=0.06283, over 1610165.88 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:20:00,882 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2049, 1.8242, 4.2005, 1.5290, 2.5612, 4.6656, 5.0755, 3.5167], + device='cuda:3'), covar=tensor([0.1586, 0.2039, 0.0449, 0.2957, 0.1226, 0.0325, 0.0405, 0.1085], + device='cuda:3'), in_proj_covar=tensor([0.0292, 0.0320, 0.0287, 0.0315, 0.0306, 0.0263, 0.0413, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:20:15,749 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.370e+02 2.936e+02 3.785e+02 6.204e+02, threshold=5.872e+02, percent-clipped=2.0 +2023-02-07 02:20:31,251 INFO [train.py:901] (3/4) Epoch 21, batch 4050, loss[loss=0.2038, simple_loss=0.2935, pruned_loss=0.05705, over 8327.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2894, pruned_loss=0.06278, over 1611393.85 frames. ], batch size: 26, lr: 3.61e-03, grad_scale: 8.0 +2023-02-07 02:21:04,772 INFO [train.py:901] (3/4) Epoch 21, batch 4100, loss[loss=0.195, simple_loss=0.2885, pruned_loss=0.05076, over 8330.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2896, pruned_loss=0.06276, over 1614736.04 frames. ], batch size: 25, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:11,132 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:21:24,836 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.510e+02 3.105e+02 3.860e+02 6.931e+02, threshold=6.209e+02, percent-clipped=6.0 +2023-02-07 02:21:41,907 INFO [train.py:901] (3/4) Epoch 21, batch 4150, loss[loss=0.174, simple_loss=0.2696, pruned_loss=0.03918, over 8194.00 frames. ], tot_loss[loss=0.2085, simple_loss=0.2907, pruned_loss=0.06312, over 1620060.53 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:21:50,140 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=165821.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:13,904 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=165856.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:15,083 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 02:22:15,769 INFO [train.py:901] (3/4) Epoch 21, batch 4200, loss[loss=0.218, simple_loss=0.2998, pruned_loss=0.06806, over 8514.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2906, pruned_loss=0.06301, over 1621252.95 frames. ], batch size: 26, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:30,517 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=165881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:22:33,700 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.320e+02 2.907e+02 3.705e+02 7.802e+02, threshold=5.814e+02, percent-clipped=2.0 +2023-02-07 02:22:37,059 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 02:22:50,804 INFO [train.py:901] (3/4) Epoch 21, batch 4250, loss[loss=0.1803, simple_loss=0.2706, pruned_loss=0.04495, over 8364.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2906, pruned_loss=0.06291, over 1617674.61 frames. ], batch size: 24, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:22:53,703 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=165913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:23:26,545 INFO [train.py:901] (3/4) Epoch 21, batch 4300, loss[loss=0.2079, simple_loss=0.2933, pruned_loss=0.06123, over 7928.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2896, pruned_loss=0.0627, over 1612701.35 frames. ], batch size: 20, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:23:44,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.284e+02 2.728e+02 3.396e+02 7.954e+02, threshold=5.457e+02, percent-clipped=4.0 +2023-02-07 02:23:56,036 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4940, 1.9286, 3.1428, 1.2444, 2.3686, 1.7217, 1.6871, 2.2980], + device='cuda:3'), covar=tensor([0.2317, 0.2865, 0.0907, 0.5361, 0.2167, 0.4018, 0.2717, 0.2730], + device='cuda:3'), in_proj_covar=tensor([0.0521, 0.0595, 0.0552, 0.0637, 0.0643, 0.0591, 0.0534, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:23:57,382 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9081, 1.7524, 2.8155, 2.0906, 2.3867, 1.8958, 1.6060, 1.1960], + device='cuda:3'), covar=tensor([0.7262, 0.6342, 0.2004, 0.4073, 0.3351, 0.4422, 0.2992, 0.5938], + device='cuda:3'), in_proj_covar=tensor([0.0940, 0.0976, 0.0797, 0.0937, 0.0993, 0.0888, 0.0746, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:24:01,652 INFO [train.py:901] (3/4) Epoch 21, batch 4350, loss[loss=0.2056, simple_loss=0.2876, pruned_loss=0.06184, over 8635.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2891, pruned_loss=0.06222, over 1611756.13 frames. ], batch size: 34, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:11,728 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 02:24:15,248 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:24:16,541 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5857, 1.8985, 2.9844, 1.4429, 2.2746, 1.9743, 1.6596, 2.2500], + device='cuda:3'), covar=tensor([0.1888, 0.2581, 0.0870, 0.4398, 0.1802, 0.3126, 0.2311, 0.2178], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0593, 0.0551, 0.0635, 0.0641, 0.0590, 0.0533, 0.0631], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:24:36,834 INFO [train.py:901] (3/4) Epoch 21, batch 4400, loss[loss=0.2288, simple_loss=0.3104, pruned_loss=0.07358, over 8647.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2883, pruned_loss=0.06174, over 1612918.96 frames. ], batch size: 39, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:24:45,737 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166072.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:24:54,405 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 02:24:55,056 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.482e+02 3.095e+02 3.863e+02 7.424e+02, threshold=6.191e+02, percent-clipped=10.0 +2023-02-07 02:25:10,656 INFO [train.py:901] (3/4) Epoch 21, batch 4450, loss[loss=0.1632, simple_loss=0.2504, pruned_loss=0.03802, over 7924.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.06108, over 1612396.14 frames. ], batch size: 20, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:25:12,801 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166112.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:22,692 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7575, 1.9169, 2.0498, 1.3692, 2.1261, 1.5489, 0.5650, 1.8501], + device='cuda:3'), covar=tensor([0.0543, 0.0352, 0.0267, 0.0529, 0.0414, 0.0782, 0.0903, 0.0283], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0387, 0.0341, 0.0441, 0.0374, 0.0533, 0.0391, 0.0415], + device='cuda:3'), out_proj_covar=tensor([1.2196e-04, 1.0144e-04, 8.9743e-05, 1.1657e-04, 9.8624e-05, 1.5080e-04, + 1.0559e-04, 1.1017e-04], device='cuda:3') +2023-02-07 02:25:27,716 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:25:45,433 INFO [train.py:901] (3/4) Epoch 21, batch 4500, loss[loss=0.1808, simple_loss=0.2648, pruned_loss=0.04845, over 8760.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2884, pruned_loss=0.06215, over 1613541.81 frames. ], batch size: 30, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:25:50,227 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 02:25:50,295 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:25:50,362 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:05,003 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.688e+02 3.191e+02 4.415e+02 1.086e+03, threshold=6.382e+02, percent-clipped=9.0 +2023-02-07 02:26:20,597 INFO [train.py:901] (3/4) Epoch 21, batch 4550, loss[loss=0.1706, simple_loss=0.2505, pruned_loss=0.04536, over 7806.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2892, pruned_loss=0.06278, over 1613049.38 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:26:27,576 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5290, 2.3374, 3.2954, 2.6069, 2.9950, 2.5334, 2.2165, 1.9468], + device='cuda:3'), covar=tensor([0.4965, 0.5113, 0.1743, 0.3536, 0.2496, 0.2925, 0.1892, 0.5111], + device='cuda:3'), in_proj_covar=tensor([0.0935, 0.0972, 0.0794, 0.0932, 0.0989, 0.0885, 0.0741, 0.0820], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:26:33,042 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166227.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:26:54,695 INFO [train.py:901] (3/4) Epoch 21, batch 4600, loss[loss=0.1955, simple_loss=0.2859, pruned_loss=0.05256, over 8371.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2888, pruned_loss=0.06273, over 1610044.92 frames. ], batch size: 24, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:10,560 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166280.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:14,137 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166284.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:15,243 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.551e+02 3.310e+02 4.080e+02 7.820e+02, threshold=6.621e+02, percent-clipped=4.0 +2023-02-07 02:27:30,334 INFO [train.py:901] (3/4) Epoch 21, batch 4650, loss[loss=0.187, simple_loss=0.2621, pruned_loss=0.05596, over 7701.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2892, pruned_loss=0.06315, over 1612534.98 frames. ], batch size: 18, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:27:30,540 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:27:41,202 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:28:03,638 INFO [train.py:901] (3/4) Epoch 21, batch 4700, loss[loss=0.1824, simple_loss=0.2663, pruned_loss=0.0493, over 7804.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2882, pruned_loss=0.06277, over 1610743.48 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 16.0 +2023-02-07 02:28:23,900 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.420e+02 2.373e+02 2.801e+02 3.877e+02 1.145e+03, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 02:28:40,136 INFO [train.py:901] (3/4) Epoch 21, batch 4750, loss[loss=0.2231, simple_loss=0.2834, pruned_loss=0.0814, over 7272.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2877, pruned_loss=0.0627, over 1608999.83 frames. ], batch size: 16, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:28:45,009 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166416.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:28:52,942 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 02:28:55,082 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 02:29:14,177 INFO [train.py:901] (3/4) Epoch 21, batch 4800, loss[loss=0.2473, simple_loss=0.3243, pruned_loss=0.08518, over 8522.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2876, pruned_loss=0.06224, over 1607204.72 frames. ], batch size: 28, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:30,706 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166483.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:33,193 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 2.407e+02 2.819e+02 3.849e+02 8.316e+02, threshold=5.639e+02, percent-clipped=5.0 +2023-02-07 02:29:39,661 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-07 02:29:42,737 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:43,962 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 02:29:48,834 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166508.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:29:49,314 INFO [train.py:901] (3/4) Epoch 21, batch 4850, loss[loss=0.1858, simple_loss=0.2742, pruned_loss=0.04869, over 8196.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2869, pruned_loss=0.06131, over 1610209.08 frames. ], batch size: 23, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:29:49,390 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:03,258 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:05,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166531.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:30:09,308 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166536.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:17,942 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:21,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.49 vs. limit=5.0 +2023-02-07 02:30:24,635 INFO [train.py:901] (3/4) Epoch 21, batch 4900, loss[loss=0.2063, simple_loss=0.2679, pruned_loss=0.07238, over 7800.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06081, over 1608964.18 frames. ], batch size: 19, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:30:26,219 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166561.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:30:43,068 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.429e+02 3.059e+02 4.014e+02 7.599e+02, threshold=6.119e+02, percent-clipped=4.0 +2023-02-07 02:30:54,281 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5980, 1.5983, 2.1266, 1.4428, 1.2600, 2.0788, 0.3388, 1.3022], + device='cuda:3'), covar=tensor([0.1841, 0.1338, 0.0397, 0.1217, 0.2891, 0.0465, 0.2358, 0.1354], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0196, 0.0128, 0.0223, 0.0272, 0.0136, 0.0172, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 02:30:58,618 INFO [train.py:901] (3/4) Epoch 21, batch 4950, loss[loss=0.2027, simple_loss=0.2766, pruned_loss=0.06439, over 8334.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.287, pruned_loss=0.06168, over 1610523.17 frames. ], batch size: 26, lr: 3.60e-03, grad_scale: 8.0 +2023-02-07 02:31:09,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:34,524 INFO [train.py:901] (3/4) Epoch 21, batch 5000, loss[loss=0.1821, simple_loss=0.2677, pruned_loss=0.04827, over 8086.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2868, pruned_loss=0.0615, over 1607220.05 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:31:41,051 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:31:52,864 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.262e+02 2.770e+02 3.475e+02 7.586e+02, threshold=5.540e+02, percent-clipped=2.0 +2023-02-07 02:32:07,640 INFO [train.py:901] (3/4) Epoch 21, batch 5050, loss[loss=0.248, simple_loss=0.3237, pruned_loss=0.08612, over 8476.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2867, pruned_loss=0.06145, over 1609245.11 frames. ], batch size: 28, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:23,023 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 02:32:38,361 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:32:42,947 INFO [train.py:901] (3/4) Epoch 21, batch 5100, loss[loss=0.1796, simple_loss=0.2444, pruned_loss=0.05739, over 7703.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2872, pruned_loss=0.06202, over 1611638.31 frames. ], batch size: 18, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:32:59,528 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:00,920 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:02,731 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.724e+02 2.499e+02 3.045e+02 3.729e+02 1.083e+03, threshold=6.090e+02, percent-clipped=5.0 +2023-02-07 02:33:02,981 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166787.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:33:17,653 INFO [train.py:901] (3/4) Epoch 21, batch 5150, loss[loss=0.2367, simple_loss=0.3117, pruned_loss=0.08081, over 8352.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2883, pruned_loss=0.06269, over 1615350.85 frames. ], batch size: 49, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:19,982 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166812.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 02:33:35,605 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9209, 2.2532, 1.8007, 2.8571, 1.3543, 1.4973, 1.9218, 2.2096], + device='cuda:3'), covar=tensor([0.0848, 0.0707, 0.0934, 0.0348, 0.1092, 0.1362, 0.0936, 0.0837], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0194, 0.0241, 0.0210, 0.0203, 0.0241, 0.0249, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 02:33:40,945 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:33:52,921 INFO [train.py:901] (3/4) Epoch 21, batch 5200, loss[loss=0.1845, simple_loss=0.2731, pruned_loss=0.04798, over 8242.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2876, pruned_loss=0.06208, over 1612888.65 frames. ], batch size: 24, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:33:54,692 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.05 vs. limit=5.0 +2023-02-07 02:34:01,101 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:08,926 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=166880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:13,418 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 2.417e+02 2.893e+02 3.464e+02 9.071e+02, threshold=5.787e+02, percent-clipped=3.0 +2023-02-07 02:34:17,473 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=166893.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:22,854 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 02:34:25,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=166905.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:34:28,364 INFO [train.py:901] (3/4) Epoch 21, batch 5250, loss[loss=0.1893, simple_loss=0.2819, pruned_loss=0.04837, over 8250.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2878, pruned_loss=0.06195, over 1613001.76 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:34:53,832 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=166947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:00,988 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:01,477 INFO [train.py:901] (3/4) Epoch 21, batch 5300, loss[loss=0.2517, simple_loss=0.32, pruned_loss=0.09168, over 6884.00 frames. ], tot_loss[loss=0.2092, simple_loss=0.291, pruned_loss=0.06365, over 1616084.96 frames. ], batch size: 72, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:21,119 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=166986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:21,588 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.349e+02 2.996e+02 3.802e+02 6.845e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-07 02:35:22,372 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6293, 2.5336, 1.8554, 2.3821, 2.2130, 1.5784, 2.2421, 2.2866], + device='cuda:3'), covar=tensor([0.1491, 0.0426, 0.1226, 0.0634, 0.0733, 0.1535, 0.0879, 0.0929], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0235, 0.0335, 0.0308, 0.0298, 0.0334, 0.0344, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:35:37,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:35:37,983 INFO [train.py:901] (3/4) Epoch 21, batch 5350, loss[loss=0.1806, simple_loss=0.2669, pruned_loss=0.04718, over 8090.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2888, pruned_loss=0.06275, over 1612722.18 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:35:58,870 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167040.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:12,094 INFO [train.py:901] (3/4) Epoch 21, batch 5400, loss[loss=0.2113, simple_loss=0.2951, pruned_loss=0.06372, over 8336.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2885, pruned_loss=0.06276, over 1606652.74 frames. ], batch size: 25, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:16,470 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167065.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:32,162 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.459e+02 3.013e+02 3.547e+02 6.118e+02, threshold=6.026e+02, percent-clipped=1.0 +2023-02-07 02:36:39,933 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:47,942 INFO [train.py:901] (3/4) Epoch 21, batch 5450, loss[loss=0.2064, simple_loss=0.2941, pruned_loss=0.05935, over 8199.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2879, pruned_loss=0.06244, over 1609019.92 frames. ], batch size: 23, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:36:55,795 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:36:58,551 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:01,216 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:37:12,850 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 02:37:19,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8170, 1.3315, 4.0005, 1.5078, 3.5507, 3.3960, 3.6615, 3.5390], + device='cuda:3'), covar=tensor([0.0741, 0.4533, 0.0607, 0.4129, 0.1234, 0.0954, 0.0677, 0.0777], + device='cuda:3'), in_proj_covar=tensor([0.0630, 0.0643, 0.0695, 0.0629, 0.0707, 0.0605, 0.0605, 0.0677], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:37:24,075 INFO [train.py:901] (3/4) Epoch 21, batch 5500, loss[loss=0.2381, simple_loss=0.3183, pruned_loss=0.07895, over 8645.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2876, pruned_loss=0.06215, over 1613946.02 frames. ], batch size: 34, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:37:43,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.538e+02 3.099e+02 3.967e+02 8.838e+02, threshold=6.197e+02, percent-clipped=3.0 +2023-02-07 02:37:58,448 INFO [train.py:901] (3/4) Epoch 21, batch 5550, loss[loss=0.2313, simple_loss=0.3143, pruned_loss=0.07416, over 8667.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2876, pruned_loss=0.06139, over 1617678.72 frames. ], batch size: 34, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:38:01,331 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:02,595 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:21,134 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:22,453 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:23,135 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167242.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:34,480 INFO [train.py:901] (3/4) Epoch 21, batch 5600, loss[loss=0.1688, simple_loss=0.2568, pruned_loss=0.04036, over 8130.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2865, pruned_loss=0.06107, over 1616363.39 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 8.0 +2023-02-07 02:38:34,957 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.48 vs. limit=5.0 +2023-02-07 02:38:36,021 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6893, 1.3350, 4.8641, 1.7589, 4.3081, 4.0161, 4.3971, 4.2417], + device='cuda:3'), covar=tensor([0.0574, 0.5153, 0.0452, 0.4298, 0.1050, 0.0896, 0.0602, 0.0635], + device='cuda:3'), in_proj_covar=tensor([0.0629, 0.0637, 0.0691, 0.0626, 0.0701, 0.0602, 0.0601, 0.0672], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:38:38,123 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167264.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:40,083 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167267.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:54,594 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.626e+02 2.492e+02 3.097e+02 3.838e+02 7.086e+02, threshold=6.194e+02, percent-clipped=1.0 +2023-02-07 02:38:54,819 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:38:56,077 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:39:08,036 INFO [train.py:901] (3/4) Epoch 21, batch 5650, loss[loss=0.2134, simple_loss=0.2985, pruned_loss=0.06411, over 8083.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2868, pruned_loss=0.06131, over 1612568.76 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:39:18,774 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 02:39:27,191 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.24 vs. limit=5.0 +2023-02-07 02:39:30,412 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8836, 2.3655, 3.8202, 1.8124, 2.9781, 2.4208, 1.9759, 2.8981], + device='cuda:3'), covar=tensor([0.1823, 0.2633, 0.0836, 0.4267, 0.1633, 0.2926, 0.2228, 0.2256], + device='cuda:3'), in_proj_covar=tensor([0.0521, 0.0594, 0.0552, 0.0637, 0.0637, 0.0588, 0.0530, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:39:44,505 INFO [train.py:901] (3/4) Epoch 21, batch 5700, loss[loss=0.2071, simple_loss=0.2898, pruned_loss=0.06219, over 8241.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2869, pruned_loss=0.06158, over 1609349.47 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:04,660 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.585e+02 3.206e+02 3.925e+02 8.506e+02, threshold=6.412e+02, percent-clipped=6.0 +2023-02-07 02:40:07,027 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6410, 2.3186, 3.0175, 2.5130, 2.9381, 2.5443, 2.3884, 2.2308], + device='cuda:3'), covar=tensor([0.3514, 0.3721, 0.1536, 0.2723, 0.1853, 0.2417, 0.1489, 0.3546], + device='cuda:3'), in_proj_covar=tensor([0.0938, 0.0972, 0.0796, 0.0939, 0.0990, 0.0888, 0.0743, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 02:40:10,473 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.17 vs. limit=5.0 +2023-02-07 02:40:16,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:18,408 INFO [train.py:901] (3/4) Epoch 21, batch 5750, loss[loss=0.2013, simple_loss=0.2871, pruned_loss=0.05779, over 8247.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.285, pruned_loss=0.0604, over 1609203.55 frames. ], batch size: 22, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:19,352 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 02:40:21,795 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9976, 1.7838, 3.2683, 1.2540, 2.4778, 3.6055, 3.8871, 2.6330], + device='cuda:3'), covar=tensor([0.1439, 0.1891, 0.0470, 0.2785, 0.1079, 0.0371, 0.0596, 0.1045], + device='cuda:3'), in_proj_covar=tensor([0.0294, 0.0320, 0.0287, 0.0314, 0.0307, 0.0263, 0.0415, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:40:24,259 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 02:40:47,703 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167450.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:53,629 INFO [train.py:901] (3/4) Epoch 21, batch 5800, loss[loss=0.1773, simple_loss=0.259, pruned_loss=0.04774, over 8074.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2852, pruned_loss=0.06034, over 1606791.69 frames. ], batch size: 21, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:40:55,798 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:40:59,205 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167466.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:00,731 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:05,556 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.33 vs. limit=5.0 +2023-02-07 02:41:11,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4963, 1.7867, 2.6676, 1.3680, 1.8819, 1.8364, 1.5067, 1.9185], + device='cuda:3'), covar=tensor([0.1926, 0.2623, 0.0845, 0.4664, 0.2068, 0.3326, 0.2507, 0.2367], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0593, 0.0552, 0.0635, 0.0637, 0.0587, 0.0528, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:41:15,042 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.466e+02 2.953e+02 3.603e+02 7.254e+02, threshold=5.907e+02, percent-clipped=1.0 +2023-02-07 02:41:17,996 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:20,771 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:41:28,620 INFO [train.py:901] (3/4) Epoch 21, batch 5850, loss[loss=0.193, simple_loss=0.2833, pruned_loss=0.05136, over 8464.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2855, pruned_loss=0.06061, over 1608687.81 frames. ], batch size: 39, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:41:37,415 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 02:41:37,824 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:03,761 INFO [train.py:901] (3/4) Epoch 21, batch 5900, loss[loss=0.2047, simple_loss=0.2919, pruned_loss=0.05878, over 7921.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2857, pruned_loss=0.06113, over 1604280.84 frames. ], batch size: 20, lr: 3.59e-03, grad_scale: 4.0 +2023-02-07 02:42:14,756 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9690, 1.8479, 6.1123, 2.2116, 5.4462, 5.1614, 5.6579, 5.5538], + device='cuda:3'), covar=tensor([0.0465, 0.4594, 0.0369, 0.3896, 0.1089, 0.0905, 0.0486, 0.0480], + device='cuda:3'), in_proj_covar=tensor([0.0630, 0.0640, 0.0695, 0.0632, 0.0704, 0.0606, 0.0607, 0.0680], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:42:16,866 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:19,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:42:25,696 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.257e+02 2.859e+02 3.440e+02 7.059e+02, threshold=5.718e+02, percent-clipped=2.0 +2023-02-07 02:42:40,379 INFO [train.py:901] (3/4) Epoch 21, batch 5950, loss[loss=0.2071, simple_loss=0.275, pruned_loss=0.06956, over 7700.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2858, pruned_loss=0.0612, over 1606834.32 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 4.0 +2023-02-07 02:43:14,053 INFO [train.py:901] (3/4) Epoch 21, batch 6000, loss[loss=0.1759, simple_loss=0.2595, pruned_loss=0.04612, over 7700.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2868, pruned_loss=0.06144, over 1610556.47 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:43:14,054 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 02:43:26,394 INFO [train.py:935] (3/4) Epoch 21, validation: loss=0.174, simple_loss=0.2741, pruned_loss=0.03692, over 944034.00 frames. +2023-02-07 02:43:26,395 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 02:43:28,719 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:45,658 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:43:47,404 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.382e+02 2.918e+02 3.609e+02 5.587e+02, threshold=5.837e+02, percent-clipped=0.0 +2023-02-07 02:44:01,969 INFO [train.py:901] (3/4) Epoch 21, batch 6050, loss[loss=0.1734, simple_loss=0.2598, pruned_loss=0.0435, over 7536.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2872, pruned_loss=0.06127, over 1612769.39 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:06,718 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-07 02:44:22,063 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167737.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:38,028 INFO [train.py:901] (3/4) Epoch 21, batch 6100, loss[loss=0.1603, simple_loss=0.2451, pruned_loss=0.03774, over 7539.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2863, pruned_loss=0.06122, over 1612603.78 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:44:56,057 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=167785.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:44:57,194 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 02:44:58,540 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.390e+02 3.045e+02 3.849e+02 6.701e+02, threshold=6.089e+02, percent-clipped=2.0 +2023-02-07 02:45:02,123 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=167794.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:13,116 INFO [train.py:901] (3/4) Epoch 21, batch 6150, loss[loss=0.2096, simple_loss=0.2916, pruned_loss=0.06379, over 8106.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2871, pruned_loss=0.06164, over 1613779.36 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:30,424 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:33,104 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=167837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,358 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:45:48,812 INFO [train.py:901] (3/4) Epoch 21, batch 6200, loss[loss=0.1892, simple_loss=0.2689, pruned_loss=0.05471, over 7946.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2877, pruned_loss=0.06199, over 1617824.04 frames. ], batch size: 20, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:45:51,057 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=167862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:09,470 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.266e+02 2.776e+02 3.727e+02 8.167e+02, threshold=5.552e+02, percent-clipped=4.0 +2023-02-07 02:46:16,722 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 02:46:23,391 INFO [train.py:901] (3/4) Epoch 21, batch 6250, loss[loss=0.1896, simple_loss=0.2884, pruned_loss=0.04537, over 8363.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2876, pruned_loss=0.06203, over 1614113.09 frames. ], batch size: 24, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:46:23,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=167909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:46:58,909 INFO [train.py:901] (3/4) Epoch 21, batch 6300, loss[loss=0.2199, simple_loss=0.3115, pruned_loss=0.06419, over 8337.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2875, pruned_loss=0.06227, over 1610644.23 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:47:16,164 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-07 02:47:20,723 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.475e+02 2.869e+02 3.545e+02 9.430e+02, threshold=5.737e+02, percent-clipped=7.0 +2023-02-07 02:47:35,237 INFO [train.py:901] (3/4) Epoch 21, batch 6350, loss[loss=0.1907, simple_loss=0.2659, pruned_loss=0.05781, over 7688.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06232, over 1615117.33 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:06,026 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:09,823 INFO [train.py:901] (3/4) Epoch 21, batch 6400, loss[loss=0.1943, simple_loss=0.2812, pruned_loss=0.05374, over 7981.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2887, pruned_loss=0.06213, over 1618830.80 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:25,125 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168081.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:48:30,484 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.236e+02 2.639e+02 3.603e+02 6.999e+02, threshold=5.279e+02, percent-clipped=2.0 +2023-02-07 02:48:45,447 INFO [train.py:901] (3/4) Epoch 21, batch 6450, loss[loss=0.2383, simple_loss=0.3132, pruned_loss=0.08166, over 8637.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2883, pruned_loss=0.0623, over 1614866.99 frames. ], batch size: 34, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:48:59,389 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:19,828 INFO [train.py:901] (3/4) Epoch 21, batch 6500, loss[loss=0.2413, simple_loss=0.312, pruned_loss=0.08529, over 8519.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2885, pruned_loss=0.06342, over 1610833.68 frames. ], batch size: 26, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:49:24,789 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:41,355 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.960e+02 2.483e+02 3.129e+02 4.081e+02 1.148e+03, threshold=6.258e+02, percent-clipped=13.0 +2023-02-07 02:49:42,167 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168190.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:46,106 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:49:54,743 INFO [train.py:901] (3/4) Epoch 21, batch 6550, loss[loss=0.1627, simple_loss=0.232, pruned_loss=0.04667, over 7436.00 frames. ], tot_loss[loss=0.207, simple_loss=0.288, pruned_loss=0.063, over 1610337.20 frames. ], batch size: 17, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:19,982 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:20,482 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 02:50:24,649 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:50:29,933 INFO [train.py:901] (3/4) Epoch 21, batch 6600, loss[loss=0.2149, simple_loss=0.2858, pruned_loss=0.07196, over 7538.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2873, pruned_loss=0.06267, over 1607534.94 frames. ], batch size: 18, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:50:32,313 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 02:50:38,745 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 02:50:50,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.414e+02 2.830e+02 3.481e+02 7.637e+02, threshold=5.659e+02, percent-clipped=3.0 +2023-02-07 02:51:05,095 INFO [train.py:901] (3/4) Epoch 21, batch 6650, loss[loss=0.2553, simple_loss=0.3203, pruned_loss=0.09515, over 8543.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2885, pruned_loss=0.06325, over 1608696.86 frames. ], batch size: 49, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:51:40,101 INFO [train.py:901] (3/4) Epoch 21, batch 6700, loss[loss=0.2027, simple_loss=0.2871, pruned_loss=0.05913, over 8291.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2884, pruned_loss=0.0635, over 1606075.83 frames. ], batch size: 23, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:00,449 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.306e+02 2.933e+02 3.476e+02 6.537e+02, threshold=5.866e+02, percent-clipped=2.0 +2023-02-07 02:52:04,737 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7272, 2.0911, 3.2437, 1.6001, 2.4926, 2.1022, 1.9036, 2.4404], + device='cuda:3'), covar=tensor([0.1827, 0.2507, 0.0859, 0.4193, 0.1870, 0.3016, 0.2045, 0.2461], + device='cuda:3'), in_proj_covar=tensor([0.0524, 0.0600, 0.0556, 0.0641, 0.0644, 0.0591, 0.0532, 0.0632], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:52:05,989 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:14,914 INFO [train.py:901] (3/4) Epoch 21, batch 6750, loss[loss=0.2147, simple_loss=0.3026, pruned_loss=0.06347, over 8019.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2897, pruned_loss=0.0639, over 1610582.54 frames. ], batch size: 22, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:19,505 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-07 02:52:45,403 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:52:50,752 INFO [train.py:901] (3/4) Epoch 21, batch 6800, loss[loss=0.175, simple_loss=0.2516, pruned_loss=0.04914, over 8078.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2895, pruned_loss=0.06316, over 1615466.72 frames. ], batch size: 21, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:52:58,523 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 02:53:04,349 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:12,373 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.333e+02 2.834e+02 3.373e+02 7.883e+02, threshold=5.669e+02, percent-clipped=5.0 +2023-02-07 02:53:20,354 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168500.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:26,321 INFO [train.py:901] (3/4) Epoch 21, batch 6850, loss[loss=0.1626, simple_loss=0.2394, pruned_loss=0.04288, over 7807.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2896, pruned_loss=0.06278, over 1616050.19 frames. ], batch size: 19, lr: 3.58e-03, grad_scale: 8.0 +2023-02-07 02:53:28,543 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:37,578 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:53:45,921 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 02:54:00,742 INFO [train.py:901] (3/4) Epoch 21, batch 6900, loss[loss=0.1881, simple_loss=0.2751, pruned_loss=0.05053, over 7279.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2904, pruned_loss=0.06309, over 1620248.26 frames. ], batch size: 16, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:22,253 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.460e+02 2.867e+02 3.613e+02 6.820e+02, threshold=5.733e+02, percent-clipped=1.0 +2023-02-07 02:54:23,301 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-07 02:54:26,393 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=168595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:54:26,687 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-07 02:54:35,737 INFO [train.py:901] (3/4) Epoch 21, batch 6950, loss[loss=0.181, simple_loss=0.2603, pruned_loss=0.05083, over 8240.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2891, pruned_loss=0.06263, over 1613504.33 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:54:45,634 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=2.02 vs. limit=2.0 +2023-02-07 02:54:53,393 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 02:55:10,656 INFO [train.py:901] (3/4) Epoch 21, batch 7000, loss[loss=0.1671, simple_loss=0.2422, pruned_loss=0.04598, over 7256.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2894, pruned_loss=0.06268, over 1613967.29 frames. ], batch size: 16, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:12,903 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4774, 1.4158, 1.8645, 1.3017, 1.1004, 1.7971, 0.2100, 1.1668], + device='cuda:3'), covar=tensor([0.1469, 0.1265, 0.0333, 0.0963, 0.2689, 0.0415, 0.2134, 0.1214], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0195, 0.0126, 0.0221, 0.0270, 0.0135, 0.0170, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 02:55:31,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.444e+02 3.041e+02 3.968e+02 8.528e+02, threshold=6.083e+02, percent-clipped=8.0 +2023-02-07 02:55:35,109 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4995, 1.8808, 2.9407, 1.3826, 2.1508, 1.8856, 1.5736, 2.1275], + device='cuda:3'), covar=tensor([0.1961, 0.2452, 0.0742, 0.4451, 0.1820, 0.3127, 0.2296, 0.2236], + device='cuda:3'), in_proj_covar=tensor([0.0522, 0.0597, 0.0552, 0.0638, 0.0640, 0.0590, 0.0530, 0.0627], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:55:41,825 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1708, 1.4892, 1.7930, 1.4435, 0.9929, 1.5785, 1.8869, 1.6132], + device='cuda:3'), covar=tensor([0.0476, 0.1295, 0.1671, 0.1444, 0.0582, 0.1475, 0.0650, 0.0651], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0158, 0.0098, 0.0163, 0.0112, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 02:55:45,700 INFO [train.py:901] (3/4) Epoch 21, batch 7050, loss[loss=0.2096, simple_loss=0.2994, pruned_loss=0.05992, over 8298.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2891, pruned_loss=0.06269, over 1616187.76 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:55:46,104 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-07 02:55:46,596 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=168710.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:55:50,178 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-07 02:56:19,948 INFO [train.py:901] (3/4) Epoch 21, batch 7100, loss[loss=0.197, simple_loss=0.281, pruned_loss=0.0565, over 8193.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2888, pruned_loss=0.06219, over 1618470.09 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:56:26,887 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:40,683 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.605e+02 3.011e+02 3.811e+02 1.077e+03, threshold=6.022e+02, percent-clipped=4.0 +2023-02-07 02:56:41,625 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4212, 1.4140, 1.8104, 1.2249, 1.0933, 1.7578, 0.2441, 1.1032], + device='cuda:3'), covar=tensor([0.1709, 0.1369, 0.0402, 0.1119, 0.2956, 0.0498, 0.2198, 0.1324], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0271, 0.0136, 0.0172, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 02:56:43,715 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:56:48,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2769, 2.6096, 2.9525, 1.5425, 3.1219, 1.9544, 1.4909, 2.0759], + device='cuda:3'), covar=tensor([0.0827, 0.0407, 0.0298, 0.0870, 0.0492, 0.0897, 0.0974, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0391, 0.0342, 0.0443, 0.0375, 0.0534, 0.0389, 0.0417], + device='cuda:3'), out_proj_covar=tensor([1.2173e-04, 1.0258e-04, 9.0082e-05, 1.1684e-04, 9.8664e-05, 1.5105e-04, + 1.0518e-04, 1.1060e-04], device='cuda:3') +2023-02-07 02:56:55,260 INFO [train.py:901] (3/4) Epoch 21, batch 7150, loss[loss=0.2161, simple_loss=0.2969, pruned_loss=0.06761, over 8087.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2893, pruned_loss=0.06273, over 1614225.65 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:07,096 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.35 vs. limit=5.0 +2023-02-07 02:57:07,568 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9979, 2.3669, 3.4640, 1.9082, 2.9001, 2.3573, 2.1804, 2.7613], + device='cuda:3'), covar=tensor([0.1577, 0.2448, 0.0779, 0.3651, 0.1536, 0.2710, 0.1855, 0.2244], + device='cuda:3'), in_proj_covar=tensor([0.0525, 0.0603, 0.0558, 0.0643, 0.0645, 0.0596, 0.0534, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:57:29,819 INFO [train.py:901] (3/4) Epoch 21, batch 7200, loss[loss=0.2011, simple_loss=0.2826, pruned_loss=0.0598, over 8028.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2891, pruned_loss=0.06256, over 1614029.38 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:57:32,851 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-07 02:57:44,676 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0992, 2.3359, 1.8821, 2.8873, 1.4203, 1.6255, 1.9061, 2.2501], + device='cuda:3'), covar=tensor([0.0664, 0.0703, 0.0904, 0.0303, 0.1011, 0.1322, 0.0874, 0.0714], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0197, 0.0244, 0.0212, 0.0206, 0.0247, 0.0249, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 02:57:51,137 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.343e+02 3.196e+02 4.097e+02 7.456e+02, threshold=6.392e+02, percent-clipped=6.0 +2023-02-07 02:57:56,895 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 02:58:04,698 INFO [train.py:901] (3/4) Epoch 21, batch 7250, loss[loss=0.185, simple_loss=0.2777, pruned_loss=0.04621, over 8248.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2899, pruned_loss=0.06344, over 1610352.03 frames. ], batch size: 24, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:18,030 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5887, 2.4669, 1.7769, 2.1420, 2.1718, 1.5487, 2.0238, 2.0143], + device='cuda:3'), covar=tensor([0.1487, 0.0463, 0.1311, 0.0644, 0.0685, 0.1588, 0.0898, 0.1092], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0237, 0.0337, 0.0309, 0.0302, 0.0338, 0.0347, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 02:58:40,072 INFO [train.py:901] (3/4) Epoch 21, batch 7300, loss[loss=0.1923, simple_loss=0.2782, pruned_loss=0.05314, over 8592.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2903, pruned_loss=0.06399, over 1609290.35 frames. ], batch size: 34, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:58:44,986 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=168966.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:58,205 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168985.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:58:59,441 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=168987.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 02:58:59,470 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0509, 1.3011, 1.0745, 1.9265, 0.8540, 1.0271, 1.3629, 1.3856], + device='cuda:3'), covar=tensor([0.1701, 0.1115, 0.2175, 0.0472, 0.1309, 0.2043, 0.0852, 0.1050], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0198, 0.0245, 0.0213, 0.0207, 0.0248, 0.0250, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 02:59:00,600 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.375e+02 2.880e+02 4.111e+02 9.346e+02, threshold=5.760e+02, percent-clipped=6.0 +2023-02-07 02:59:02,085 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=168991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 02:59:14,684 INFO [train.py:901] (3/4) Epoch 21, batch 7350, loss[loss=0.174, simple_loss=0.2661, pruned_loss=0.04097, over 8232.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2889, pruned_loss=0.06308, over 1608493.80 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:35,045 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 02:59:36,091 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.50 vs. limit=2.0 +2023-02-07 02:59:44,846 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7974, 2.0935, 3.2542, 1.5820, 2.6012, 2.1485, 1.8645, 2.4972], + device='cuda:3'), covar=tensor([0.1774, 0.2492, 0.0833, 0.4362, 0.1737, 0.2910, 0.2207, 0.2256], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0603, 0.0558, 0.0644, 0.0645, 0.0596, 0.0534, 0.0632], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 02:59:49,830 INFO [train.py:901] (3/4) Epoch 21, batch 7400, loss[loss=0.199, simple_loss=0.2789, pruned_loss=0.05956, over 8134.00 frames. ], tot_loss[loss=0.2078, simple_loss=0.2892, pruned_loss=0.06316, over 1610457.48 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 02:59:53,390 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 03:00:10,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 2.322e+02 3.020e+02 4.298e+02 1.187e+03, threshold=6.039e+02, percent-clipped=6.0 +2023-02-07 03:00:19,215 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169100.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:00:25,151 INFO [train.py:901] (3/4) Epoch 21, batch 7450, loss[loss=0.2151, simple_loss=0.2988, pruned_loss=0.06569, over 8611.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2885, pruned_loss=0.06236, over 1611839.54 frames. ], batch size: 39, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:00:33,881 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 03:00:42,579 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169134.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:01,173 INFO [train.py:901] (3/4) Epoch 21, batch 7500, loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05797, over 8187.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2889, pruned_loss=0.06281, over 1608460.52 frames. ], batch size: 23, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:01:13,526 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169177.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:01:21,454 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.287e+02 2.739e+02 3.438e+02 5.948e+02, threshold=5.478e+02, percent-clipped=0.0 +2023-02-07 03:01:35,748 INFO [train.py:901] (3/4) Epoch 21, batch 7550, loss[loss=0.1902, simple_loss=0.2764, pruned_loss=0.05201, over 8039.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2887, pruned_loss=0.06254, over 1611080.94 frames. ], batch size: 22, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:09,758 INFO [train.py:901] (3/4) Epoch 21, batch 7600, loss[loss=0.241, simple_loss=0.3022, pruned_loss=0.08991, over 6396.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2877, pruned_loss=0.06219, over 1607188.87 frames. ], batch size: 14, lr: 3.57e-03, grad_scale: 8.0 +2023-02-07 03:02:32,180 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.243e+02 2.742e+02 3.349e+02 1.012e+03, threshold=5.485e+02, percent-clipped=5.0 +2023-02-07 03:02:45,868 INFO [train.py:901] (3/4) Epoch 21, batch 7650, loss[loss=0.2106, simple_loss=0.2954, pruned_loss=0.06287, over 8512.00 frames. ], tot_loss[loss=0.2079, simple_loss=0.2895, pruned_loss=0.0632, over 1612743.42 frames. ], batch size: 28, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:00,452 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:03:01,835 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169331.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:03:21,300 INFO [train.py:901] (3/4) Epoch 21, batch 7700, loss[loss=0.1853, simple_loss=0.2733, pruned_loss=0.04861, over 8087.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.289, pruned_loss=0.06259, over 1613180.61 frames. ], batch size: 21, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:03:37,726 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.47 vs. limit=5.0 +2023-02-07 03:03:42,196 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.349e+02 2.901e+02 3.736e+02 6.675e+02, threshold=5.802e+02, percent-clipped=6.0 +2023-02-07 03:03:44,229 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 03:03:45,051 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2742, 2.1070, 1.5777, 1.9207, 1.7727, 1.3987, 1.6431, 1.6573], + device='cuda:3'), covar=tensor([0.1427, 0.0401, 0.1287, 0.0551, 0.0703, 0.1499, 0.1024, 0.0932], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0236, 0.0335, 0.0307, 0.0301, 0.0337, 0.0345, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:03:57,027 INFO [train.py:901] (3/4) Epoch 21, batch 7750, loss[loss=0.1727, simple_loss=0.2491, pruned_loss=0.04818, over 7411.00 frames. ], tot_loss[loss=0.2091, simple_loss=0.2905, pruned_loss=0.06381, over 1616967.35 frames. ], batch size: 17, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:21,907 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:22,016 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:23,342 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169446.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:04:32,649 INFO [train.py:901] (3/4) Epoch 21, batch 7800, loss[loss=0.2418, simple_loss=0.3169, pruned_loss=0.08339, over 8523.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.289, pruned_loss=0.06272, over 1617294.63 frames. ], batch size: 31, lr: 3.57e-03, grad_scale: 16.0 +2023-02-07 03:04:45,398 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169478.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:04:52,668 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.145e+02 2.738e+02 3.428e+02 8.790e+02, threshold=5.476e+02, percent-clipped=3.0 +2023-02-07 03:05:06,018 INFO [train.py:901] (3/4) Epoch 21, batch 7850, loss[loss=0.1703, simple_loss=0.2541, pruned_loss=0.04328, over 7428.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2888, pruned_loss=0.06286, over 1619892.02 frames. ], batch size: 17, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:14,138 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:39,268 INFO [train.py:901] (3/4) Epoch 21, batch 7900, loss[loss=0.2304, simple_loss=0.3083, pruned_loss=0.07629, over 8496.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2888, pruned_loss=0.06263, over 1621239.05 frames. ], batch size: 26, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:05:39,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:05:52,127 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5171, 1.3855, 4.6836, 1.7636, 4.1344, 3.8780, 4.2235, 4.1009], + device='cuda:3'), covar=tensor([0.0584, 0.5072, 0.0539, 0.4299, 0.1164, 0.1009, 0.0677, 0.0703], + device='cuda:3'), in_proj_covar=tensor([0.0631, 0.0641, 0.0695, 0.0627, 0.0711, 0.0612, 0.0612, 0.0676], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:05:59,284 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.405e+02 2.884e+02 3.520e+02 8.387e+02, threshold=5.767e+02, percent-clipped=5.0 +2023-02-07 03:06:02,041 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169593.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:11,879 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6381, 2.0740, 3.3006, 1.5385, 2.3757, 2.0570, 1.7803, 2.4972], + device='cuda:3'), covar=tensor([0.1909, 0.2513, 0.0733, 0.4470, 0.1882, 0.3176, 0.2262, 0.2151], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0601, 0.0556, 0.0637, 0.0642, 0.0591, 0.0531, 0.0632], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:06:12,853 INFO [train.py:901] (3/4) Epoch 21, batch 7950, loss[loss=0.2026, simple_loss=0.2917, pruned_loss=0.05672, over 8289.00 frames. ], tot_loss[loss=0.207, simple_loss=0.2892, pruned_loss=0.0624, over 1623976.50 frames. ], batch size: 23, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:06:31,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=169636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:33,347 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:06:46,590 INFO [train.py:901] (3/4) Epoch 21, batch 8000, loss[loss=0.2094, simple_loss=0.2966, pruned_loss=0.06105, over 8442.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06188, over 1626375.54 frames. ], batch size: 27, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:06,440 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 2.194e+02 2.844e+02 3.383e+02 6.688e+02, threshold=5.687e+02, percent-clipped=2.0 +2023-02-07 03:07:07,193 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9645, 6.2066, 5.3653, 2.8261, 5.5033, 5.8398, 5.7468, 5.6420], + device='cuda:3'), covar=tensor([0.0601, 0.0359, 0.0901, 0.4022, 0.0670, 0.0798, 0.1017, 0.0555], + device='cuda:3'), in_proj_covar=tensor([0.0521, 0.0428, 0.0431, 0.0531, 0.0422, 0.0440, 0.0421, 0.0380], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:07:12,044 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9254, 1.6912, 2.0071, 1.7820, 1.9455, 1.9477, 1.8138, 0.8431], + device='cuda:3'), covar=tensor([0.5879, 0.4993, 0.2137, 0.3783, 0.2636, 0.3234, 0.2034, 0.5197], + device='cuda:3'), in_proj_covar=tensor([0.0945, 0.0977, 0.0803, 0.0946, 0.0998, 0.0892, 0.0746, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 03:07:12,147 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-07 03:07:14,031 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:15,422 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169702.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:07:19,705 INFO [train.py:901] (3/4) Epoch 21, batch 8050, loss[loss=0.2136, simple_loss=0.2913, pruned_loss=0.06796, over 7548.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2872, pruned_loss=0.06259, over 1596067.97 frames. ], batch size: 18, lr: 3.56e-03, grad_scale: 16.0 +2023-02-07 03:07:30,627 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:07:32,001 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169727.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:07:52,999 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 03:07:58,222 INFO [train.py:901] (3/4) Epoch 22, batch 0, loss[loss=0.215, simple_loss=0.282, pruned_loss=0.07399, over 8360.00 frames. ], tot_loss[loss=0.215, simple_loss=0.282, pruned_loss=0.07399, over 8360.00 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:07:58,222 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 03:08:05,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6925, 1.8454, 1.6380, 2.2675, 1.2054, 1.5084, 1.7158, 1.7848], + device='cuda:3'), covar=tensor([0.0781, 0.0772, 0.0893, 0.0447, 0.1150, 0.1266, 0.0797, 0.0854], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0213, 0.0207, 0.0246, 0.0250, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:08:09,347 INFO [train.py:935] (3/4) Epoch 22, validation: loss=0.1743, simple_loss=0.2746, pruned_loss=0.03702, over 944034.00 frames. +2023-02-07 03:08:09,348 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6736MB +2023-02-07 03:08:12,912 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169747.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:17,065 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7565, 1.7369, 2.0514, 2.0126, 1.0072, 1.7397, 2.1643, 2.2175], + device='cuda:3'), covar=tensor([0.0443, 0.1195, 0.1585, 0.1243, 0.0557, 0.1389, 0.0602, 0.0594], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0160, 0.0100, 0.0164, 0.0113, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:08:24,251 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 03:08:25,072 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:08:36,896 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1764, 2.1214, 1.7692, 1.9052, 1.7247, 1.5180, 1.6511, 1.6754], + device='cuda:3'), covar=tensor([0.1354, 0.0425, 0.1150, 0.0585, 0.0772, 0.1419, 0.0973, 0.0962], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0234, 0.0331, 0.0305, 0.0298, 0.0332, 0.0340, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0003, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:08:42,193 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.425e+02 2.482e+02 2.980e+02 3.558e+02 1.069e+03, threshold=5.959e+02, percent-clipped=8.0 +2023-02-07 03:08:44,174 INFO [train.py:901] (3/4) Epoch 22, batch 50, loss[loss=0.2316, simple_loss=0.3177, pruned_loss=0.07269, over 8578.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2881, pruned_loss=0.06026, over 365480.65 frames. ], batch size: 31, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:08:54,109 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=169804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:01,061 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 03:09:02,047 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:19,169 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169840.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:20,325 INFO [train.py:901] (3/4) Epoch 22, batch 100, loss[loss=0.1882, simple_loss=0.2827, pruned_loss=0.04691, over 8103.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2851, pruned_loss=0.05918, over 645952.47 frames. ], batch size: 23, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:23,123 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 03:09:25,371 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:42,071 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:09:52,438 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2508, 4.2211, 3.8602, 1.8648, 3.7830, 3.8197, 3.7984, 3.5986], + device='cuda:3'), covar=tensor([0.0765, 0.0511, 0.1008, 0.4722, 0.0853, 0.1003, 0.1224, 0.0820], + device='cuda:3'), in_proj_covar=tensor([0.0519, 0.0428, 0.0429, 0.0531, 0.0423, 0.0440, 0.0421, 0.0381], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:09:52,903 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.356e+02 3.069e+02 3.800e+02 7.981e+02, threshold=6.138e+02, percent-clipped=3.0 +2023-02-07 03:09:55,643 INFO [train.py:901] (3/4) Epoch 22, batch 150, loss[loss=0.1824, simple_loss=0.2696, pruned_loss=0.04766, over 8519.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.287, pruned_loss=0.06005, over 866423.93 frames. ], batch size: 26, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:09:55,887 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=169892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:12,771 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=169917.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:10:30,755 INFO [train.py:901] (3/4) Epoch 22, batch 200, loss[loss=0.2586, simple_loss=0.3201, pruned_loss=0.09856, over 7382.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2881, pruned_loss=0.06144, over 1030858.94 frames. ], batch size: 71, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:10:53,648 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.89 vs. limit=5.0 +2023-02-07 03:10:58,713 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=169983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:11:02,622 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.362e+02 2.871e+02 3.395e+02 8.094e+02, threshold=5.742e+02, percent-clipped=2.0 +2023-02-07 03:11:04,635 INFO [train.py:901] (3/4) Epoch 22, batch 250, loss[loss=0.213, simple_loss=0.2969, pruned_loss=0.06458, over 8352.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06218, over 1162269.03 frames. ], batch size: 24, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:17,877 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 03:11:26,137 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 03:11:41,665 INFO [train.py:901] (3/4) Epoch 22, batch 300, loss[loss=0.2123, simple_loss=0.3004, pruned_loss=0.06214, over 8468.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.06155, over 1259569.14 frames. ], batch size: 25, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:11:56,572 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:13,699 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.365e+02 2.486e+02 2.821e+02 3.492e+02 6.452e+02, threshold=5.641e+02, percent-clipped=3.0 +2023-02-07 03:12:15,180 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:15,770 INFO [train.py:901] (3/4) Epoch 22, batch 350, loss[loss=0.2059, simple_loss=0.2794, pruned_loss=0.06625, over 7810.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2878, pruned_loss=0.06135, over 1342937.55 frames. ], batch size: 19, lr: 3.48e-03, grad_scale: 16.0 +2023-02-07 03:12:19,957 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:27,058 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:12:49,705 INFO [train.py:901] (3/4) Epoch 22, batch 400, loss[loss=0.1984, simple_loss=0.2847, pruned_loss=0.05609, over 8581.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2888, pruned_loss=0.06203, over 1406129.78 frames. ], batch size: 49, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:12:53,701 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170148.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:20,105 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2170, 1.9318, 2.6405, 2.2161, 2.5810, 2.2298, 2.0411, 1.3851], + device='cuda:3'), covar=tensor([0.5788, 0.5278, 0.1984, 0.3609, 0.2494, 0.3158, 0.1948, 0.5553], + device='cuda:3'), in_proj_covar=tensor([0.0942, 0.0975, 0.0800, 0.0942, 0.0992, 0.0890, 0.0744, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 03:13:22,570 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.277e+02 2.821e+02 3.460e+02 6.418e+02, threshold=5.643e+02, percent-clipped=3.0 +2023-02-07 03:13:24,660 INFO [train.py:901] (3/4) Epoch 22, batch 450, loss[loss=0.1941, simple_loss=0.2815, pruned_loss=0.05338, over 7809.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06164, over 1448129.53 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:13:34,394 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170206.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:46,329 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:13:58,274 INFO [train.py:901] (3/4) Epoch 22, batch 500, loss[loss=0.2448, simple_loss=0.3289, pruned_loss=0.08033, over 8321.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06172, over 1486760.35 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:14:13,729 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:14:31,697 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.523e+02 2.263e+02 2.770e+02 3.716e+02 6.957e+02, threshold=5.540e+02, percent-clipped=5.0 +2023-02-07 03:14:34,528 INFO [train.py:901] (3/4) Epoch 22, batch 550, loss[loss=0.1748, simple_loss=0.2567, pruned_loss=0.04645, over 7918.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2868, pruned_loss=0.06126, over 1517596.41 frames. ], batch size: 20, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:00,839 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3279, 2.7033, 2.0968, 3.6997, 1.7358, 1.9750, 2.3456, 2.7546], + device='cuda:3'), covar=tensor([0.0765, 0.0912, 0.0930, 0.0324, 0.1149, 0.1325, 0.0942, 0.0807], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0196, 0.0243, 0.0213, 0.0206, 0.0245, 0.0248, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:15:08,205 INFO [train.py:901] (3/4) Epoch 22, batch 600, loss[loss=0.2165, simple_loss=0.2979, pruned_loss=0.06759, over 8454.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2872, pruned_loss=0.06127, over 1542065.52 frames. ], batch size: 27, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:16,556 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:27,512 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 03:15:34,302 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:15:40,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.463e+02 3.010e+02 3.561e+02 9.437e+02, threshold=6.021e+02, percent-clipped=1.0 +2023-02-07 03:15:42,758 INFO [train.py:901] (3/4) Epoch 22, batch 650, loss[loss=0.2187, simple_loss=0.2879, pruned_loss=0.07481, over 5183.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2881, pruned_loss=0.06206, over 1551342.08 frames. ], batch size: 11, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:15:52,785 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6699, 5.7583, 5.0811, 2.5760, 5.1022, 5.4673, 5.3841, 5.2596], + device='cuda:3'), covar=tensor([0.0547, 0.0352, 0.0855, 0.4245, 0.0718, 0.0780, 0.1078, 0.0578], + device='cuda:3'), in_proj_covar=tensor([0.0524, 0.0430, 0.0428, 0.0530, 0.0422, 0.0441, 0.0423, 0.0381], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:15:53,430 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170407.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:17,646 INFO [train.py:901] (3/4) Epoch 22, batch 700, loss[loss=0.193, simple_loss=0.2673, pruned_loss=0.05939, over 7781.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2891, pruned_loss=0.06262, over 1567534.74 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:16:31,459 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:42,960 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170479.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:43,713 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:49,771 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170487.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:16:50,917 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.347e+02 2.936e+02 3.672e+02 5.936e+02, threshold=5.871e+02, percent-clipped=0.0 +2023-02-07 03:16:52,905 INFO [train.py:901] (3/4) Epoch 22, batch 750, loss[loss=0.2054, simple_loss=0.2961, pruned_loss=0.05733, over 8289.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2885, pruned_loss=0.06195, over 1581851.71 frames. ], batch size: 23, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:01,712 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:11,627 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:13,547 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:14,719 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 03:17:23,956 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 03:17:27,404 INFO [train.py:901] (3/4) Epoch 22, batch 800, loss[loss=0.1813, simple_loss=0.2589, pruned_loss=0.05186, over 7662.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.06196, over 1590501.88 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:17:28,960 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170544.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:30,549 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 03:17:57,595 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:17:58,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.234e+02 2.598e+02 3.180e+02 6.753e+02, threshold=5.195e+02, percent-clipped=1.0 +2023-02-07 03:18:00,808 INFO [train.py:901] (3/4) Epoch 22, batch 850, loss[loss=0.2129, simple_loss=0.2898, pruned_loss=0.06801, over 8244.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2876, pruned_loss=0.0615, over 1598379.19 frames. ], batch size: 22, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:14,746 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6712, 1.6587, 2.4391, 1.6166, 1.2823, 2.4578, 0.5210, 1.4898], + device='cuda:3'), covar=tensor([0.1741, 0.1376, 0.0312, 0.1450, 0.2977, 0.0438, 0.2278, 0.1441], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0222, 0.0270, 0.0135, 0.0171, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:18:31,335 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 03:18:36,995 INFO [train.py:901] (3/4) Epoch 22, batch 900, loss[loss=0.1942, simple_loss=0.2586, pruned_loss=0.06493, over 7270.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2878, pruned_loss=0.06175, over 1601764.96 frames. ], batch size: 16, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:18:53,611 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5174, 1.8155, 1.9423, 1.2605, 2.0883, 1.4233, 0.6339, 1.6745], + device='cuda:3'), covar=tensor([0.0616, 0.0366, 0.0288, 0.0579, 0.0394, 0.0864, 0.0830, 0.0340], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0394, 0.0345, 0.0444, 0.0375, 0.0534, 0.0390, 0.0419], + device='cuda:3'), out_proj_covar=tensor([1.2187e-04, 1.0338e-04, 9.0653e-05, 1.1683e-04, 9.8477e-05, 1.5078e-04, + 1.0542e-04, 1.1114e-04], device='cuda:3') +2023-02-07 03:19:09,382 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.381e+02 2.827e+02 3.296e+02 7.509e+02, threshold=5.655e+02, percent-clipped=4.0 +2023-02-07 03:19:11,447 INFO [train.py:901] (3/4) Epoch 22, batch 950, loss[loss=0.2083, simple_loss=0.2882, pruned_loss=0.06422, over 7800.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2879, pruned_loss=0.06174, over 1607672.02 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 16.0 +2023-02-07 03:19:13,664 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7215, 1.4668, 2.8841, 1.3732, 2.2215, 3.1148, 3.2219, 2.6404], + device='cuda:3'), covar=tensor([0.1158, 0.1565, 0.0350, 0.2000, 0.0788, 0.0275, 0.0546, 0.0571], + device='cuda:3'), in_proj_covar=tensor([0.0296, 0.0323, 0.0286, 0.0317, 0.0310, 0.0265, 0.0420, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:19:43,585 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 03:19:46,359 INFO [train.py:901] (3/4) Epoch 22, batch 1000, loss[loss=0.1645, simple_loss=0.2549, pruned_loss=0.03707, over 8081.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2874, pruned_loss=0.06135, over 1612892.67 frames. ], batch size: 21, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:19:49,225 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5163, 2.4509, 1.8182, 2.1670, 2.0686, 1.5913, 1.9695, 2.0503], + device='cuda:3'), covar=tensor([0.1383, 0.0424, 0.1181, 0.0610, 0.0794, 0.1460, 0.1010, 0.0979], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0235, 0.0331, 0.0308, 0.0299, 0.0334, 0.0341, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:20:12,107 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=170778.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:17,096 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 03:20:19,771 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 2.326e+02 2.890e+02 3.504e+02 6.405e+02, threshold=5.779e+02, percent-clipped=4.0 +2023-02-07 03:20:21,023 INFO [train.py:901] (3/4) Epoch 22, batch 1050, loss[loss=0.1845, simple_loss=0.2611, pruned_loss=0.05394, over 7796.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06193, over 1614595.34 frames. ], batch size: 19, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:20:28,489 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 03:20:28,711 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=170803.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:41,786 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170823.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:20:55,953 INFO [train.py:901] (3/4) Epoch 22, batch 1100, loss[loss=0.2107, simple_loss=0.2933, pruned_loss=0.06406, over 8353.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2882, pruned_loss=0.0617, over 1615122.71 frames. ], batch size: 24, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:27,510 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6784, 2.4404, 1.8264, 2.3011, 2.2409, 1.6214, 2.1149, 2.1278], + device='cuda:3'), covar=tensor([0.1283, 0.0422, 0.1163, 0.0516, 0.0697, 0.1446, 0.0873, 0.0861], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0237, 0.0334, 0.0310, 0.0300, 0.0336, 0.0344, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:21:29,315 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.576e+02 3.127e+02 3.706e+02 1.049e+03, threshold=6.255e+02, percent-clipped=5.0 +2023-02-07 03:21:30,686 INFO [train.py:901] (3/4) Epoch 22, batch 1150, loss[loss=0.2526, simple_loss=0.3328, pruned_loss=0.08623, over 8355.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2894, pruned_loss=0.06289, over 1617274.20 frames. ], batch size: 26, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:21:37,435 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 03:21:45,387 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3695, 1.3658, 2.3097, 1.2390, 2.2321, 2.5123, 2.6165, 1.9612], + device='cuda:3'), covar=tensor([0.1215, 0.1433, 0.0521, 0.2110, 0.0750, 0.0437, 0.0705, 0.0924], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0320, 0.0283, 0.0314, 0.0306, 0.0262, 0.0415, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 03:21:52,885 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4102, 1.4726, 1.3943, 1.7769, 0.7351, 1.2438, 1.2318, 1.4726], + device='cuda:3'), covar=tensor([0.0823, 0.0774, 0.0981, 0.0509, 0.1104, 0.1295, 0.0804, 0.0712], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0198, 0.0247, 0.0215, 0.0209, 0.0249, 0.0251, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:21:56,818 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=170931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:01,709 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=170938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:04,216 INFO [train.py:901] (3/4) Epoch 22, batch 1200, loss[loss=0.2472, simple_loss=0.3268, pruned_loss=0.08379, over 8448.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2889, pruned_loss=0.06309, over 1614756.28 frames. ], batch size: 27, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:07,062 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=170946.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:22:38,803 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 2.384e+02 2.807e+02 3.549e+02 5.873e+02, threshold=5.615e+02, percent-clipped=0.0 +2023-02-07 03:22:40,087 INFO [train.py:901] (3/4) Epoch 22, batch 1250, loss[loss=0.205, simple_loss=0.2885, pruned_loss=0.06075, over 8462.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06227, over 1615130.43 frames. ], batch size: 25, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:22:57,673 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7468, 4.7555, 4.2517, 2.2510, 4.1379, 4.3561, 4.3417, 4.1118], + device='cuda:3'), covar=tensor([0.0684, 0.0490, 0.1035, 0.4284, 0.0878, 0.0809, 0.1113, 0.0666], + device='cuda:3'), in_proj_covar=tensor([0.0522, 0.0430, 0.0430, 0.0530, 0.0422, 0.0441, 0.0420, 0.0382], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:22:58,785 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 03:23:14,553 INFO [train.py:901] (3/4) Epoch 22, batch 1300, loss[loss=0.2009, simple_loss=0.2806, pruned_loss=0.06063, over 7963.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2872, pruned_loss=0.06187, over 1617509.09 frames. ], batch size: 21, lr: 3.47e-03, grad_scale: 8.0 +2023-02-07 03:23:17,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:23:47,495 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.382e+02 2.988e+02 3.753e+02 7.309e+02, threshold=5.975e+02, percent-clipped=5.0 +2023-02-07 03:23:48,840 INFO [train.py:901] (3/4) Epoch 22, batch 1350, loss[loss=0.1719, simple_loss=0.2646, pruned_loss=0.03957, over 8240.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06154, over 1614500.83 frames. ], batch size: 22, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:01,690 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171110.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:24:02,363 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3512, 1.6471, 4.5607, 1.6400, 4.0059, 3.7720, 4.0796, 3.9376], + device='cuda:3'), covar=tensor([0.0651, 0.4645, 0.0596, 0.4459, 0.1309, 0.1122, 0.0675, 0.0763], + device='cuda:3'), in_proj_covar=tensor([0.0625, 0.0637, 0.0688, 0.0620, 0.0704, 0.0604, 0.0606, 0.0672], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:24:23,450 INFO [train.py:901] (3/4) Epoch 22, batch 1400, loss[loss=0.2233, simple_loss=0.308, pruned_loss=0.06932, over 8623.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06184, over 1615656.03 frames. ], batch size: 31, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:23,822 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-07 03:24:55,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 3.047e+02 3.835e+02 9.203e+02, threshold=6.094e+02, percent-clipped=3.0 +2023-02-07 03:24:57,483 INFO [train.py:901] (3/4) Epoch 22, batch 1450, loss[loss=0.2573, simple_loss=0.3349, pruned_loss=0.08979, over 8498.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.06237, over 1617172.71 frames. ], batch size: 28, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:24:58,893 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:06,227 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 03:25:12,520 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:16,688 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:25:32,454 INFO [train.py:901] (3/4) Epoch 22, batch 1500, loss[loss=0.2323, simple_loss=0.31, pruned_loss=0.07729, over 8727.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2896, pruned_loss=0.06287, over 1619756.51 frames. ], batch size: 34, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:04,593 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.477e+02 2.962e+02 3.885e+02 1.079e+03, threshold=5.924e+02, percent-clipped=2.0 +2023-02-07 03:26:04,686 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171290.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:05,961 INFO [train.py:901] (3/4) Epoch 22, batch 1550, loss[loss=0.1948, simple_loss=0.2793, pruned_loss=0.05509, over 7972.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2899, pruned_loss=0.06317, over 1619643.00 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:12,947 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:30,106 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:26:31,051 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 03:26:40,691 INFO [train.py:901] (3/4) Epoch 22, batch 1600, loss[loss=0.1831, simple_loss=0.2572, pruned_loss=0.05448, over 7178.00 frames. ], tot_loss[loss=0.208, simple_loss=0.2895, pruned_loss=0.06323, over 1614838.74 frames. ], batch size: 16, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:26:48,987 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1590, 1.0538, 1.2495, 1.0879, 0.9889, 1.2727, 0.1259, 0.9964], + device='cuda:3'), covar=tensor([0.1608, 0.1262, 0.0546, 0.0766, 0.2451, 0.0603, 0.2121, 0.1134], + device='cuda:3'), in_proj_covar=tensor([0.0188, 0.0196, 0.0127, 0.0222, 0.0269, 0.0135, 0.0170, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:26:55,769 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171363.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:13,639 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.350e+02 2.510e+02 3.045e+02 3.987e+02 6.104e+02, threshold=6.090e+02, percent-clipped=2.0 +2023-02-07 03:27:15,007 INFO [train.py:901] (3/4) Epoch 22, batch 1650, loss[loss=0.2131, simple_loss=0.2973, pruned_loss=0.06445, over 8337.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2895, pruned_loss=0.06289, over 1613669.78 frames. ], batch size: 26, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:24,112 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171405.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:27:28,874 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8924, 3.5096, 2.1912, 2.6366, 2.6612, 1.9164, 2.6313, 2.9003], + device='cuda:3'), covar=tensor([0.1976, 0.0415, 0.1314, 0.0869, 0.0826, 0.1615, 0.1187, 0.1137], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0238, 0.0337, 0.0311, 0.0302, 0.0340, 0.0346, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:27:51,031 INFO [train.py:901] (3/4) Epoch 22, batch 1700, loss[loss=0.1741, simple_loss=0.2466, pruned_loss=0.05078, over 7238.00 frames. ], tot_loss[loss=0.2082, simple_loss=0.2897, pruned_loss=0.06332, over 1608333.94 frames. ], batch size: 16, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:27:59,282 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171454.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:28:01,709 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 03:28:24,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.429e+02 3.050e+02 3.629e+02 7.357e+02, threshold=6.100e+02, percent-clipped=3.0 +2023-02-07 03:28:25,936 INFO [train.py:901] (3/4) Epoch 22, batch 1750, loss[loss=0.2356, simple_loss=0.3121, pruned_loss=0.07951, over 7069.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2891, pruned_loss=0.06304, over 1610298.66 frames. ], batch size: 72, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:28:42,139 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171516.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:28:58,898 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.60 vs. limit=2.0 +2023-02-07 03:29:00,470 INFO [train.py:901] (3/4) Epoch 22, batch 1800, loss[loss=0.1881, simple_loss=0.2709, pruned_loss=0.05264, over 8356.00 frames. ], tot_loss[loss=0.2088, simple_loss=0.2902, pruned_loss=0.06371, over 1614674.92 frames. ], batch size: 24, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:29:11,511 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:19,688 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:29:34,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.408e+02 2.801e+02 3.784e+02 7.831e+02, threshold=5.602e+02, percent-clipped=2.0 +2023-02-07 03:29:35,958 INFO [train.py:901] (3/4) Epoch 22, batch 1850, loss[loss=0.1682, simple_loss=0.2393, pruned_loss=0.04852, over 7701.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2899, pruned_loss=0.0634, over 1618674.94 frames. ], batch size: 18, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:10,047 INFO [train.py:901] (3/4) Epoch 22, batch 1900, loss[loss=0.2315, simple_loss=0.3113, pruned_loss=0.07578, over 8633.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2895, pruned_loss=0.06288, over 1617146.21 frames. ], batch size: 39, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:24,228 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171661.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:32,191 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:36,776 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 03:30:41,592 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171686.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:30:44,064 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.518e+02 3.035e+02 3.649e+02 9.576e+02, threshold=6.070e+02, percent-clipped=4.0 +2023-02-07 03:30:45,465 INFO [train.py:901] (3/4) Epoch 22, batch 1950, loss[loss=0.199, simple_loss=0.2865, pruned_loss=0.05575, over 8286.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2894, pruned_loss=0.06289, over 1620290.84 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:30:48,018 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 03:30:56,320 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171707.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:31:07,793 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 03:31:20,039 INFO [train.py:901] (3/4) Epoch 22, batch 2000, loss[loss=0.267, simple_loss=0.3318, pruned_loss=0.1011, over 8336.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2893, pruned_loss=0.06296, over 1617561.33 frames. ], batch size: 25, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:31:43,122 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9660, 1.5355, 4.3191, 1.9760, 2.4276, 4.9398, 4.9795, 4.2037], + device='cuda:3'), covar=tensor([0.1422, 0.1984, 0.0348, 0.2042, 0.1369, 0.0202, 0.0476, 0.0606], + device='cuda:3'), in_proj_covar=tensor([0.0295, 0.0323, 0.0285, 0.0318, 0.0307, 0.0264, 0.0419, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 03:31:53,990 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.301e+02 2.928e+02 3.706e+02 6.798e+02, threshold=5.855e+02, percent-clipped=1.0 +2023-02-07 03:31:55,404 INFO [train.py:901] (3/4) Epoch 22, batch 2050, loss[loss=0.2072, simple_loss=0.2953, pruned_loss=0.0595, over 8493.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2883, pruned_loss=0.06215, over 1617886.40 frames. ], batch size: 28, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:17,606 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:19,747 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171825.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:31,149 INFO [train.py:901] (3/4) Epoch 22, batch 2100, loss[loss=0.1759, simple_loss=0.2594, pruned_loss=0.04618, over 8083.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06215, over 1622566.32 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:32:36,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171850.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:43,493 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=171860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:32:45,785 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6816, 2.3374, 3.5365, 1.9011, 1.6855, 3.3766, 0.5116, 2.0398], + device='cuda:3'), covar=tensor([0.1710, 0.1300, 0.0243, 0.1986, 0.3054, 0.0359, 0.2739, 0.1631], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0198, 0.0127, 0.0223, 0.0272, 0.0137, 0.0171, 0.0194], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:33:05,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.927e+02 2.505e+02 2.999e+02 3.749e+02 9.868e+02, threshold=5.998e+02, percent-clipped=7.0 +2023-02-07 03:33:06,894 INFO [train.py:901] (3/4) Epoch 22, batch 2150, loss[loss=0.1906, simple_loss=0.2842, pruned_loss=0.04846, over 8193.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2886, pruned_loss=0.06193, over 1627096.93 frames. ], batch size: 23, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:33,131 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=171929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:42,593 INFO [train.py:901] (3/4) Epoch 22, batch 2200, loss[loss=0.216, simple_loss=0.292, pruned_loss=0.06995, over 7810.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.289, pruned_loss=0.0624, over 1621799.24 frames. ], batch size: 20, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:33:51,040 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=171954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:33:51,697 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=171955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:04,374 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 03:34:05,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=171975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:34:15,550 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 2.362e+02 2.812e+02 3.623e+02 6.076e+02, threshold=5.624e+02, percent-clipped=1.0 +2023-02-07 03:34:16,940 INFO [train.py:901] (3/4) Epoch 22, batch 2250, loss[loss=0.2031, simple_loss=0.2867, pruned_loss=0.05976, over 8091.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06153, over 1620536.72 frames. ], batch size: 21, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:34:17,294 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 03:34:54,269 INFO [train.py:901] (3/4) Epoch 22, batch 2300, loss[loss=0.1998, simple_loss=0.2752, pruned_loss=0.06218, over 7537.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06136, over 1621040.42 frames. ], batch size: 18, lr: 3.46e-03, grad_scale: 8.0 +2023-02-07 03:35:19,363 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0919, 3.6844, 2.3623, 2.8504, 2.8510, 2.1988, 2.7833, 2.9858], + device='cuda:3'), covar=tensor([0.1607, 0.0330, 0.1066, 0.0729, 0.0705, 0.1337, 0.1086, 0.1135], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0233, 0.0331, 0.0307, 0.0299, 0.0336, 0.0343, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:35:20,116 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:35:28,297 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.401e+02 3.005e+02 3.667e+02 7.010e+02, threshold=6.010e+02, percent-clipped=1.0 +2023-02-07 03:35:29,621 INFO [train.py:901] (3/4) Epoch 22, batch 2350, loss[loss=0.2447, simple_loss=0.3231, pruned_loss=0.0831, over 8464.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2883, pruned_loss=0.06139, over 1623780.09 frames. ], batch size: 25, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:35:37,311 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:01,305 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:36:05,214 INFO [train.py:901] (3/4) Epoch 22, batch 2400, loss[loss=0.2246, simple_loss=0.3037, pruned_loss=0.07277, over 8025.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2896, pruned_loss=0.06244, over 1621581.84 frames. ], batch size: 22, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:39,691 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.658e+02 3.455e+02 4.348e+02 7.809e+02, threshold=6.910e+02, percent-clipped=6.0 +2023-02-07 03:36:41,123 INFO [train.py:901] (3/4) Epoch 22, batch 2450, loss[loss=0.1764, simple_loss=0.2619, pruned_loss=0.04547, over 7814.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2889, pruned_loss=0.06226, over 1622260.38 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:36:45,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5043, 2.3823, 3.2297, 2.5999, 3.1115, 2.5333, 2.2662, 1.8451], + device='cuda:3'), covar=tensor([0.5078, 0.4854, 0.1900, 0.3322, 0.2331, 0.2848, 0.1817, 0.5222], + device='cuda:3'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0997, 0.0895, 0.0748, 0.0828], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 03:37:08,138 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172231.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:16,955 INFO [train.py:901] (3/4) Epoch 22, batch 2500, loss[loss=0.2279, simple_loss=0.3122, pruned_loss=0.07181, over 8286.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.06219, over 1623976.69 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:26,701 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:37:37,000 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9841, 2.1893, 1.8616, 2.7790, 1.3247, 1.6252, 1.9022, 2.1884], + device='cuda:3'), covar=tensor([0.0775, 0.0815, 0.0842, 0.0399, 0.1049, 0.1263, 0.0911, 0.0781], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0197, 0.0243, 0.0215, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:37:50,850 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.288e+02 2.722e+02 3.540e+02 9.975e+02, threshold=5.443e+02, percent-clipped=1.0 +2023-02-07 03:37:52,252 INFO [train.py:901] (3/4) Epoch 22, batch 2550, loss[loss=0.162, simple_loss=0.2386, pruned_loss=0.0427, over 7918.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2882, pruned_loss=0.06205, over 1621788.82 frames. ], batch size: 20, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:37:56,722 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:38:25,419 INFO [train.py:901] (3/4) Epoch 22, batch 2600, loss[loss=0.1994, simple_loss=0.2864, pruned_loss=0.05624, over 8360.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.288, pruned_loss=0.0621, over 1618987.54 frames. ], batch size: 24, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:38:58,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.484e+02 3.096e+02 3.957e+02 1.134e+03, threshold=6.191e+02, percent-clipped=6.0 +2023-02-07 03:39:00,474 INFO [train.py:901] (3/4) Epoch 22, batch 2650, loss[loss=0.2063, simple_loss=0.2944, pruned_loss=0.05913, over 8497.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2878, pruned_loss=0.06177, over 1620902.46 frames. ], batch size: 28, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:39:10,806 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3537, 2.8790, 2.0315, 3.9142, 1.7669, 2.1339, 2.4177, 2.9585], + device='cuda:3'), covar=tensor([0.0691, 0.0716, 0.0941, 0.0268, 0.1023, 0.1160, 0.0903, 0.0736], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0197, 0.0244, 0.0216, 0.0206, 0.0248, 0.0251, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:39:16,288 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172414.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:39:35,347 INFO [train.py:901] (3/4) Epoch 22, batch 2700, loss[loss=0.2064, simple_loss=0.2961, pruned_loss=0.05839, over 8258.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2891, pruned_loss=0.06281, over 1619778.11 frames. ], batch size: 24, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:02,839 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:40:09,203 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 03:40:09,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.345e+02 2.798e+02 3.767e+02 1.133e+03, threshold=5.596e+02, percent-clipped=4.0 +2023-02-07 03:40:10,842 INFO [train.py:901] (3/4) Epoch 22, batch 2750, loss[loss=0.1774, simple_loss=0.2512, pruned_loss=0.05181, over 7531.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2892, pruned_loss=0.06285, over 1620899.61 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:40:45,667 INFO [train.py:901] (3/4) Epoch 22, batch 2800, loss[loss=0.2041, simple_loss=0.2951, pruned_loss=0.05653, over 8350.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2897, pruned_loss=0.06327, over 1621690.54 frames. ], batch size: 24, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:18,219 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.794e+02 2.395e+02 2.840e+02 3.614e+02 7.820e+02, threshold=5.680e+02, percent-clipped=6.0 +2023-02-07 03:41:20,376 INFO [train.py:901] (3/4) Epoch 22, batch 2850, loss[loss=0.1908, simple_loss=0.2719, pruned_loss=0.05482, over 7965.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2894, pruned_loss=0.06303, over 1619815.58 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:23,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=172595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:25,863 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2406, 4.1922, 3.8434, 1.9447, 3.7611, 3.7824, 3.7957, 3.5669], + device='cuda:3'), covar=tensor([0.0804, 0.0610, 0.1111, 0.4603, 0.1004, 0.1104, 0.1428, 0.0850], + device='cuda:3'), in_proj_covar=tensor([0.0520, 0.0431, 0.0427, 0.0532, 0.0423, 0.0443, 0.0424, 0.0383], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:41:37,778 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:41:44,849 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 03:41:56,011 INFO [train.py:901] (3/4) Epoch 22, batch 2900, loss[loss=0.1826, simple_loss=0.2626, pruned_loss=0.05127, over 7775.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2881, pruned_loss=0.06224, over 1615767.11 frames. ], batch size: 19, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:41:57,554 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:15,797 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172670.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:24,163 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 03:42:28,901 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.482e+02 2.975e+02 3.907e+02 6.756e+02, threshold=5.949e+02, percent-clipped=4.0 +2023-02-07 03:42:30,283 INFO [train.py:901] (3/4) Epoch 22, batch 2950, loss[loss=0.2197, simple_loss=0.2958, pruned_loss=0.07175, over 8607.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06192, over 1611220.43 frames. ], batch size: 31, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:42:32,552 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:42:56,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4763, 2.3384, 3.1924, 2.5508, 3.0085, 2.4604, 2.2771, 1.7491], + device='cuda:3'), covar=tensor([0.5451, 0.4849, 0.1890, 0.3648, 0.2383, 0.2935, 0.1773, 0.5670], + device='cuda:3'), in_proj_covar=tensor([0.0944, 0.0978, 0.0805, 0.0942, 0.0995, 0.0896, 0.0745, 0.0827], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 03:43:05,661 INFO [train.py:901] (3/4) Epoch 22, batch 3000, loss[loss=0.1642, simple_loss=0.2373, pruned_loss=0.04551, over 7696.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06205, over 1611076.19 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:43:05,662 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 03:43:17,969 INFO [train.py:935] (3/4) Epoch 22, validation: loss=0.1735, simple_loss=0.2739, pruned_loss=0.03659, over 944034.00 frames. +2023-02-07 03:43:17,970 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 03:43:25,648 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172752.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:43:51,448 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.429e+02 2.191e+02 2.765e+02 3.574e+02 6.067e+02, threshold=5.530e+02, percent-clipped=1.0 +2023-02-07 03:43:52,760 INFO [train.py:901] (3/4) Epoch 22, batch 3050, loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06109, over 7804.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06168, over 1608552.45 frames. ], batch size: 19, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:23,922 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3330, 1.4385, 1.3104, 1.7636, 0.7131, 1.2455, 1.2589, 1.4297], + device='cuda:3'), covar=tensor([0.0913, 0.0812, 0.1065, 0.0509, 0.1180, 0.1360, 0.0787, 0.0828], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0198, 0.0246, 0.0217, 0.0208, 0.0249, 0.0252, 0.0211], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:44:26,388 INFO [train.py:901] (3/4) Epoch 22, batch 3100, loss[loss=0.1997, simple_loss=0.2838, pruned_loss=0.0578, over 8343.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2868, pruned_loss=0.06147, over 1605652.84 frames. ], batch size: 26, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:44:32,707 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=172851.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:40,606 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=172863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:50,780 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=172876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:44:59,807 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.451e+02 3.163e+02 4.463e+02 7.617e+02, threshold=6.327e+02, percent-clipped=7.0 +2023-02-07 03:45:01,202 INFO [train.py:901] (3/4) Epoch 22, batch 3150, loss[loss=0.2208, simple_loss=0.2947, pruned_loss=0.07346, over 8086.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2864, pruned_loss=0.06136, over 1607632.46 frames. ], batch size: 21, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:33,032 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5012, 1.9916, 3.0941, 1.3646, 2.3603, 1.8802, 1.5760, 2.3887], + device='cuda:3'), covar=tensor([0.1904, 0.2446, 0.0886, 0.4448, 0.1713, 0.3194, 0.2283, 0.2099], + device='cuda:3'), in_proj_covar=tensor([0.0530, 0.0605, 0.0558, 0.0642, 0.0645, 0.0591, 0.0534, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:45:35,482 INFO [train.py:901] (3/4) Epoch 22, batch 3200, loss[loss=0.1864, simple_loss=0.2674, pruned_loss=0.05272, over 7684.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2863, pruned_loss=0.06114, over 1608660.47 frames. ], batch size: 18, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:45:40,969 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1164, 2.3779, 1.9318, 2.8809, 1.3371, 1.7087, 2.0368, 2.3168], + device='cuda:3'), covar=tensor([0.0702, 0.0708, 0.0863, 0.0342, 0.1108, 0.1196, 0.0803, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0198, 0.0246, 0.0217, 0.0207, 0.0249, 0.0252, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 03:45:47,744 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:45:53,317 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1481, 1.4690, 1.7247, 1.3757, 1.0114, 1.4572, 1.9118, 1.6447], + device='cuda:3'), covar=tensor([0.0490, 0.1238, 0.1611, 0.1461, 0.0584, 0.1482, 0.0637, 0.0622], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0099, 0.0163, 0.0111, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:46:06,753 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=172987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:46:09,263 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.554e+02 2.964e+02 3.773e+02 6.891e+02, threshold=5.928e+02, percent-clipped=2.0 +2023-02-07 03:46:10,583 INFO [train.py:901] (3/4) Epoch 22, batch 3250, loss[loss=0.1968, simple_loss=0.2951, pruned_loss=0.04924, over 8107.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06117, over 1611530.68 frames. ], batch size: 23, lr: 3.45e-03, grad_scale: 16.0 +2023-02-07 03:46:28,200 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1481, 3.7593, 2.4460, 3.0307, 2.7491, 1.9777, 2.8994, 3.2702], + device='cuda:3'), covar=tensor([0.1492, 0.0308, 0.1059, 0.0719, 0.0747, 0.1550, 0.0993, 0.0804], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0235, 0.0336, 0.0312, 0.0302, 0.0341, 0.0348, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 03:46:45,375 INFO [train.py:901] (3/4) Epoch 22, batch 3300, loss[loss=0.223, simple_loss=0.295, pruned_loss=0.07552, over 8576.00 frames. ], tot_loss[loss=0.205, simple_loss=0.287, pruned_loss=0.06153, over 1612227.59 frames. ], batch size: 31, lr: 3.45e-03, grad_scale: 8.0 +2023-02-07 03:47:07,565 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:17,930 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.506e+02 2.831e+02 3.669e+02 6.075e+02, threshold=5.662e+02, percent-clipped=1.0 +2023-02-07 03:47:18,594 INFO [train.py:901] (3/4) Epoch 22, batch 3350, loss[loss=0.23, simple_loss=0.2974, pruned_loss=0.08131, over 8442.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2869, pruned_loss=0.06215, over 1607838.65 frames. ], batch size: 27, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:47:22,044 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:26,752 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:47:54,975 INFO [train.py:901] (3/4) Epoch 22, batch 3400, loss[loss=0.2365, simple_loss=0.3179, pruned_loss=0.07753, over 8467.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2878, pruned_loss=0.06286, over 1608496.60 frames. ], batch size: 25, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:12,781 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173168.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:21,518 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4243, 1.3941, 4.6049, 1.7235, 4.1181, 3.8267, 4.1927, 4.0490], + device='cuda:3'), covar=tensor([0.0520, 0.4527, 0.0420, 0.3773, 0.0965, 0.0903, 0.0511, 0.0567], + device='cuda:3'), in_proj_covar=tensor([0.0625, 0.0636, 0.0685, 0.0618, 0.0700, 0.0604, 0.0603, 0.0669], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:48:28,294 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 2.494e+02 3.128e+02 3.771e+02 6.972e+02, threshold=6.255e+02, percent-clipped=4.0 +2023-02-07 03:48:28,961 INFO [train.py:901] (3/4) Epoch 22, batch 3450, loss[loss=0.2302, simple_loss=0.3101, pruned_loss=0.07516, over 8187.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2875, pruned_loss=0.06236, over 1612225.76 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:48:39,550 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173207.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:48:42,430 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173211.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:49:05,730 INFO [train.py:901] (3/4) Epoch 22, batch 3500, loss[loss=0.2278, simple_loss=0.294, pruned_loss=0.0808, over 7795.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.288, pruned_loss=0.0619, over 1615885.92 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:24,789 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 03:49:38,931 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.642e+02 3.082e+02 3.788e+02 9.506e+02, threshold=6.164e+02, percent-clipped=4.0 +2023-02-07 03:49:39,650 INFO [train.py:901] (3/4) Epoch 22, batch 3550, loss[loss=0.187, simple_loss=0.2635, pruned_loss=0.05523, over 7789.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.0617, over 1610769.45 frames. ], batch size: 19, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:49:50,462 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6431, 4.6264, 4.1446, 2.1806, 4.0783, 4.0308, 4.2194, 4.0107], + device='cuda:3'), covar=tensor([0.0686, 0.0482, 0.1055, 0.4241, 0.0921, 0.1018, 0.1150, 0.0712], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0434, 0.0432, 0.0536, 0.0427, 0.0447, 0.0426, 0.0386], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:50:00,033 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173322.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:03,372 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:06,898 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:10,302 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8483, 1.6707, 1.9815, 1.6103, 1.3421, 1.6810, 2.3483, 1.9483], + device='cuda:3'), covar=tensor([0.0477, 0.1236, 0.1607, 0.1423, 0.0571, 0.1424, 0.0625, 0.0640], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:50:14,773 INFO [train.py:901] (3/4) Epoch 22, batch 3600, loss[loss=0.2201, simple_loss=0.3104, pruned_loss=0.06491, over 8517.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2877, pruned_loss=0.06192, over 1609835.17 frames. ], batch size: 28, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:50:24,994 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173356.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:26,362 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:43,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:50:48,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.398e+02 3.034e+02 4.459e+02 8.281e+02, threshold=6.068e+02, percent-clipped=7.0 +2023-02-07 03:50:49,310 INFO [train.py:901] (3/4) Epoch 22, batch 3650, loss[loss=0.1933, simple_loss=0.266, pruned_loss=0.06032, over 6861.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.289, pruned_loss=0.06289, over 1612675.91 frames. ], batch size: 15, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:23,355 INFO [train.py:901] (3/4) Epoch 22, batch 3700, loss[loss=0.177, simple_loss=0.2692, pruned_loss=0.04238, over 8595.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2868, pruned_loss=0.06166, over 1607664.39 frames. ], batch size: 34, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:24,744 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 03:51:42,310 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:51:51,708 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.51 vs. limit=2.0 +2023-02-07 03:51:57,922 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.519e+02 2.931e+02 3.909e+02 7.363e+02, threshold=5.861e+02, percent-clipped=2.0 +2023-02-07 03:51:58,522 INFO [train.py:901] (3/4) Epoch 22, batch 3750, loss[loss=0.1933, simple_loss=0.2669, pruned_loss=0.05987, over 7511.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2876, pruned_loss=0.06179, over 1611239.57 frames. ], batch size: 18, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:51:58,723 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:11,078 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173509.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:52:12,783 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:52:18,145 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5054, 1.5620, 4.7165, 1.8994, 4.1906, 3.9105, 4.2916, 4.1480], + device='cuda:3'), covar=tensor([0.0556, 0.4484, 0.0473, 0.3633, 0.0990, 0.0915, 0.0560, 0.0583], + device='cuda:3'), in_proj_covar=tensor([0.0628, 0.0640, 0.0689, 0.0621, 0.0702, 0.0607, 0.0605, 0.0674], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 03:52:22,066 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2405, 1.3183, 1.6391, 1.2736, 0.7019, 1.4088, 1.2238, 1.0522], + device='cuda:3'), covar=tensor([0.0535, 0.1197, 0.1561, 0.1385, 0.0531, 0.1448, 0.0654, 0.0686], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:52:23,509 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 03:52:32,208 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0920, 1.4729, 1.7602, 1.4275, 0.9137, 1.5233, 1.8301, 1.6102], + device='cuda:3'), covar=tensor([0.0508, 0.1292, 0.1656, 0.1430, 0.0601, 0.1483, 0.0655, 0.0650], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0157, 0.0099, 0.0162, 0.0111, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:52:32,730 INFO [train.py:901] (3/4) Epoch 22, batch 3800, loss[loss=0.1698, simple_loss=0.2567, pruned_loss=0.04148, over 8093.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.288, pruned_loss=0.06172, over 1610556.59 frames. ], batch size: 21, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:52:35,956 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.98 vs. limit=5.0 +2023-02-07 03:52:41,041 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0921, 1.0143, 1.1949, 0.9358, 0.9093, 1.2178, 0.0721, 0.8570], + device='cuda:3'), covar=tensor([0.1490, 0.1330, 0.0509, 0.0861, 0.2467, 0.0548, 0.2233, 0.1294], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0198, 0.0127, 0.0220, 0.0267, 0.0135, 0.0170, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:52:58,747 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173578.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:07,973 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.467e+02 3.140e+02 3.842e+02 8.904e+02, threshold=6.281e+02, percent-clipped=2.0 +2023-02-07 03:53:08,696 INFO [train.py:901] (3/4) Epoch 22, batch 3850, loss[loss=0.1878, simple_loss=0.2788, pruned_loss=0.04846, over 8114.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2876, pruned_loss=0.06139, over 1613228.76 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:53:16,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173603.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:30,772 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 03:53:33,550 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:36,894 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=173632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:53:43,568 INFO [train.py:901] (3/4) Epoch 22, batch 3900, loss[loss=0.1873, simple_loss=0.2825, pruned_loss=0.04601, over 8094.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.288, pruned_loss=0.0616, over 1614986.04 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:02,782 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7865, 1.7292, 2.4458, 1.4401, 1.2670, 2.4007, 0.4499, 1.3951], + device='cuda:3'), covar=tensor([0.2080, 0.1255, 0.0382, 0.1505, 0.2967, 0.0391, 0.2337, 0.1512], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0198, 0.0128, 0.0221, 0.0269, 0.0136, 0.0171, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:54:03,324 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173671.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:54:17,229 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.938e+02 2.507e+02 2.945e+02 3.654e+02 8.206e+02, threshold=5.890e+02, percent-clipped=3.0 +2023-02-07 03:54:17,888 INFO [train.py:901] (3/4) Epoch 22, batch 3950, loss[loss=0.2131, simple_loss=0.293, pruned_loss=0.06663, over 8030.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2867, pruned_loss=0.06141, over 1614328.83 frames. ], batch size: 22, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:54:53,286 INFO [train.py:901] (3/4) Epoch 22, batch 4000, loss[loss=0.1952, simple_loss=0.2835, pruned_loss=0.05348, over 8114.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2873, pruned_loss=0.06144, over 1614441.93 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:55:23,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:55:26,124 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.329e+02 2.821e+02 3.599e+02 1.045e+03, threshold=5.642e+02, percent-clipped=6.0 +2023-02-07 03:55:26,793 INFO [train.py:901] (3/4) Epoch 22, batch 4050, loss[loss=0.2227, simple_loss=0.3027, pruned_loss=0.07139, over 8322.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2871, pruned_loss=0.06108, over 1613100.93 frames. ], batch size: 26, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:02,761 INFO [train.py:901] (3/4) Epoch 22, batch 4100, loss[loss=0.22, simple_loss=0.3075, pruned_loss=0.06628, over 8203.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2858, pruned_loss=0.06101, over 1609116.37 frames. ], batch size: 23, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:10,366 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173853.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 03:56:31,252 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=173883.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:56:36,362 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.656e+02 2.329e+02 2.764e+02 3.605e+02 7.317e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 03:56:37,019 INFO [train.py:901] (3/4) Epoch 22, batch 4150, loss[loss=0.1564, simple_loss=0.2343, pruned_loss=0.03923, over 7420.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2858, pruned_loss=0.0611, over 1606909.92 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:56:45,820 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6209, 1.6527, 2.4174, 1.3777, 1.1182, 2.3485, 0.4211, 1.4023], + device='cuda:3'), covar=tensor([0.1770, 0.1053, 0.0271, 0.1586, 0.2720, 0.0335, 0.2105, 0.1293], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0196, 0.0127, 0.0220, 0.0267, 0.0135, 0.0171, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 03:56:47,601 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=173908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:12,185 INFO [train.py:901] (3/4) Epoch 22, batch 4200, loss[loss=0.1649, simple_loss=0.2457, pruned_loss=0.04202, over 7414.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06102, over 1611749.32 frames. ], batch size: 17, lr: 3.44e-03, grad_scale: 8.0 +2023-02-07 03:57:28,914 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 03:57:29,777 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=173968.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 03:57:35,170 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=173976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:57:46,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.784e+02 2.391e+02 3.056e+02 3.931e+02 9.713e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 03:57:46,783 INFO [train.py:901] (3/4) Epoch 22, batch 4250, loss[loss=0.2062, simple_loss=0.2885, pruned_loss=0.06197, over 8590.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2866, pruned_loss=0.06137, over 1611954.26 frames. ], batch size: 34, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:57:55,806 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 03:58:15,721 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174033.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:17,063 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:17,880 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 03:58:21,636 INFO [train.py:901] (3/4) Epoch 22, batch 4300, loss[loss=0.222, simple_loss=0.3152, pruned_loss=0.06438, over 8501.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2863, pruned_loss=0.06096, over 1612992.98 frames. ], batch size: 28, lr: 3.44e-03, grad_scale: 4.0 +2023-02-07 03:58:21,849 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:25,741 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9363, 1.3705, 1.6095, 1.3357, 0.8370, 1.4241, 1.7579, 1.4498], + device='cuda:3'), covar=tensor([0.0533, 0.1260, 0.1667, 0.1441, 0.0621, 0.1459, 0.0684, 0.0690], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0158, 0.0099, 0.0163, 0.0111, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 03:58:40,410 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:56,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 03:58:57,380 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.427e+02 2.775e+02 3.458e+02 5.995e+02, threshold=5.550e+02, percent-clipped=0.0 +2023-02-07 03:58:57,401 INFO [train.py:901] (3/4) Epoch 22, batch 4350, loss[loss=0.2108, simple_loss=0.2991, pruned_loss=0.06127, over 8099.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2866, pruned_loss=0.06126, over 1611612.81 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 4.0 +2023-02-07 03:59:25,036 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 03:59:32,547 INFO [train.py:901] (3/4) Epoch 22, batch 4400, loss[loss=0.2206, simple_loss=0.3016, pruned_loss=0.06977, over 8438.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2872, pruned_loss=0.06187, over 1608976.37 frames. ], batch size: 49, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 03:59:42,831 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:00:06,456 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 04:00:07,758 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.623e+02 3.065e+02 3.902e+02 1.119e+03, threshold=6.129e+02, percent-clipped=5.0 +2023-02-07 04:00:07,777 INFO [train.py:901] (3/4) Epoch 22, batch 4450, loss[loss=0.2424, simple_loss=0.3242, pruned_loss=0.08031, over 8590.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2884, pruned_loss=0.06229, over 1612549.34 frames. ], batch size: 49, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:30,152 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174224.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:00:41,754 INFO [train.py:901] (3/4) Epoch 22, batch 4500, loss[loss=0.263, simple_loss=0.3216, pruned_loss=0.1022, over 8646.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2885, pruned_loss=0.06307, over 1610581.93 frames. ], batch size: 39, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:00:46,574 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174249.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:00:52,509 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4210, 2.8131, 2.2737, 3.7890, 1.9254, 1.9821, 2.6038, 2.9455], + device='cuda:3'), covar=tensor([0.0692, 0.0762, 0.0796, 0.0334, 0.0998, 0.1225, 0.0853, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0246, 0.0217, 0.0209, 0.0248, 0.0251, 0.0210], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:00:56,994 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 04:00:58,613 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 04:00:58,684 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 04:01:03,189 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1907, 1.0428, 1.2902, 0.9929, 0.9666, 1.3259, 0.0783, 0.9708], + device='cuda:3'), covar=tensor([0.1543, 0.1422, 0.0479, 0.0907, 0.2632, 0.0525, 0.2123, 0.1276], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0197, 0.0128, 0.0220, 0.0268, 0.0136, 0.0170, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:01:17,050 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.456e+02 2.524e+02 3.306e+02 4.354e+02 7.569e+02, threshold=6.612e+02, percent-clipped=6.0 +2023-02-07 04:01:17,071 INFO [train.py:901] (3/4) Epoch 22, batch 4550, loss[loss=0.2222, simple_loss=0.2889, pruned_loss=0.07779, over 7690.00 frames. ], tot_loss[loss=0.2093, simple_loss=0.2905, pruned_loss=0.06402, over 1612247.31 frames. ], batch size: 18, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:23,308 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:28,575 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:01:51,056 INFO [train.py:901] (3/4) Epoch 22, batch 4600, loss[loss=0.2232, simple_loss=0.3059, pruned_loss=0.07026, over 8465.00 frames. ], tot_loss[loss=0.2084, simple_loss=0.2898, pruned_loss=0.06352, over 1611664.00 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:01:53,058 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 04:01:54,748 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:12,026 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:15,442 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:16,890 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:02:25,982 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.714e+02 2.375e+02 2.973e+02 3.873e+02 1.031e+03, threshold=5.946e+02, percent-clipped=3.0 +2023-02-07 04:02:26,002 INFO [train.py:901] (3/4) Epoch 22, batch 4650, loss[loss=0.2157, simple_loss=0.3037, pruned_loss=0.06387, over 8476.00 frames. ], tot_loss[loss=0.2077, simple_loss=0.2893, pruned_loss=0.06304, over 1607222.17 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:02:34,565 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4056, 1.5959, 2.1269, 1.3112, 1.5282, 1.6107, 1.4700, 1.4714], + device='cuda:3'), covar=tensor([0.2043, 0.2808, 0.1123, 0.4675, 0.2025, 0.3608, 0.2441, 0.2261], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0604, 0.0557, 0.0642, 0.0645, 0.0590, 0.0534, 0.0629], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:02:37,944 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:02,511 INFO [train.py:901] (3/4) Epoch 22, batch 4700, loss[loss=0.1922, simple_loss=0.2668, pruned_loss=0.0588, over 7782.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2885, pruned_loss=0.06228, over 1606623.82 frames. ], batch size: 19, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:37,053 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.374e+02 2.923e+02 3.899e+02 9.329e+02, threshold=5.846e+02, percent-clipped=2.0 +2023-02-07 04:03:37,074 INFO [train.py:901] (3/4) Epoch 22, batch 4750, loss[loss=0.2283, simple_loss=0.3134, pruned_loss=0.07158, over 8466.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06183, over 1612208.01 frames. ], batch size: 25, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:03:37,264 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:38,611 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:03:43,328 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:04:04,462 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 04:04:06,509 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 04:04:12,377 INFO [train.py:901] (3/4) Epoch 22, batch 4800, loss[loss=0.2093, simple_loss=0.288, pruned_loss=0.06527, over 8018.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.287, pruned_loss=0.06133, over 1609067.74 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:21,847 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 04:04:46,102 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.398e+02 2.995e+02 3.860e+02 8.125e+02, threshold=5.990e+02, percent-clipped=3.0 +2023-02-07 04:04:46,123 INFO [train.py:901] (3/4) Epoch 22, batch 4850, loss[loss=0.252, simple_loss=0.3197, pruned_loss=0.09211, over 6827.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2869, pruned_loss=0.06178, over 1603864.28 frames. ], batch size: 71, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:04:55,425 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 04:05:02,314 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:20,319 INFO [train.py:901] (3/4) Epoch 22, batch 4900, loss[loss=0.1753, simple_loss=0.2555, pruned_loss=0.04757, over 8256.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2867, pruned_loss=0.06168, over 1602642.20 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:05:23,130 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:29,306 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:05:50,583 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1145, 1.8760, 2.3938, 2.0375, 2.3121, 2.0518, 1.9565, 1.5840], + device='cuda:3'), covar=tensor([0.4105, 0.4066, 0.1608, 0.2972, 0.2100, 0.2563, 0.1530, 0.4040], + device='cuda:3'), in_proj_covar=tensor([0.0936, 0.0977, 0.0802, 0.0942, 0.0993, 0.0891, 0.0746, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:05:56,489 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.582e+02 3.121e+02 3.821e+02 7.682e+02, threshold=6.242e+02, percent-clipped=2.0 +2023-02-07 04:05:56,510 INFO [train.py:901] (3/4) Epoch 22, batch 4950, loss[loss=0.2257, simple_loss=0.3038, pruned_loss=0.0738, over 7812.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2878, pruned_loss=0.06194, over 1606093.79 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:17,791 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=174723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:30,646 INFO [train.py:901] (3/4) Epoch 22, batch 5000, loss[loss=0.2043, simple_loss=0.2844, pruned_loss=0.06209, over 8561.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2875, pruned_loss=0.06226, over 1608016.60 frames. ], batch size: 31, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:06:35,014 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,251 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:36,400 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:44,698 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:51,019 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174768.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:54,585 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174773.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:06:55,972 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:07:07,673 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.731e+02 2.477e+02 2.928e+02 3.612e+02 7.754e+02, threshold=5.856e+02, percent-clipped=3.0 +2023-02-07 04:07:07,694 INFO [train.py:901] (3/4) Epoch 22, batch 5050, loss[loss=0.2195, simple_loss=0.2955, pruned_loss=0.07171, over 8199.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.287, pruned_loss=0.06224, over 1604803.22 frames. ], batch size: 23, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:17,140 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 04:07:30,596 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6605, 2.0383, 3.2507, 1.4223, 2.3745, 1.9276, 1.7818, 2.2862], + device='cuda:3'), covar=tensor([0.1842, 0.2564, 0.0803, 0.4480, 0.1943, 0.3399, 0.2197, 0.2487], + device='cuda:3'), in_proj_covar=tensor([0.0528, 0.0605, 0.0556, 0.0643, 0.0646, 0.0591, 0.0536, 0.0632], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:07:34,490 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 04:07:41,819 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 04:07:42,662 INFO [train.py:901] (3/4) Epoch 22, batch 5100, loss[loss=0.2279, simple_loss=0.2926, pruned_loss=0.08167, over 8032.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2874, pruned_loss=0.0624, over 1608385.31 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:07:52,852 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8038, 1.5677, 1.7191, 1.4907, 0.9152, 1.5200, 1.6233, 1.5659], + device='cuda:3'), covar=tensor([0.0521, 0.1201, 0.1556, 0.1322, 0.0599, 0.1402, 0.0694, 0.0661], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0191, 0.0160, 0.0100, 0.0164, 0.0113, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 04:07:58,154 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=174865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:03,642 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=174872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:15,021 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 04:08:17,819 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.498e+02 3.130e+02 3.757e+02 7.363e+02, threshold=6.259e+02, percent-clipped=3.0 +2023-02-07 04:08:17,839 INFO [train.py:901] (3/4) Epoch 22, batch 5150, loss[loss=0.187, simple_loss=0.2723, pruned_loss=0.05083, over 8239.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2878, pruned_loss=0.06273, over 1609076.40 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:08:20,514 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1690, 1.0793, 1.2917, 1.0251, 0.9527, 1.3181, 0.0509, 0.8618], + device='cuda:3'), covar=tensor([0.1978, 0.1401, 0.0514, 0.0892, 0.2804, 0.0552, 0.2152, 0.1384], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0197, 0.0128, 0.0220, 0.0268, 0.0136, 0.0170, 0.0194], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:08:21,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=174897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:08:41,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1983, 1.0853, 1.3280, 1.0475, 1.0233, 1.3230, 0.0629, 0.8866], + device='cuda:3'), covar=tensor([0.1425, 0.1441, 0.0453, 0.0752, 0.2515, 0.0527, 0.1937, 0.1269], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0197, 0.0128, 0.0219, 0.0268, 0.0136, 0.0169, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:08:52,066 INFO [train.py:901] (3/4) Epoch 22, batch 5200, loss[loss=0.2347, simple_loss=0.328, pruned_loss=0.07069, over 8345.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2873, pruned_loss=0.06224, over 1611394.80 frames. ], batch size: 26, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:09:26,430 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2194, 3.0928, 2.8304, 1.5781, 2.8262, 2.9383, 2.7986, 2.8076], + device='cuda:3'), covar=tensor([0.1173, 0.0808, 0.1426, 0.4536, 0.1143, 0.1194, 0.1559, 0.1097], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0433, 0.0432, 0.0535, 0.0425, 0.0444, 0.0424, 0.0387], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:09:26,992 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.432e+02 2.854e+02 3.739e+02 7.258e+02, threshold=5.708e+02, percent-clipped=1.0 +2023-02-07 04:09:27,013 INFO [train.py:901] (3/4) Epoch 22, batch 5250, loss[loss=0.1623, simple_loss=0.2499, pruned_loss=0.03738, over 7242.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2885, pruned_loss=0.0624, over 1609738.89 frames. ], batch size: 16, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:09:31,810 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 04:09:44,064 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:09:49,561 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175024.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:01,590 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:02,001 INFO [train.py:901] (3/4) Epoch 22, batch 5300, loss[loss=0.2801, simple_loss=0.3438, pruned_loss=0.1082, over 8139.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2878, pruned_loss=0.06233, over 1609470.06 frames. ], batch size: 22, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:07,101 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:19,116 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:10:35,758 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.519e+02 3.149e+02 3.909e+02 1.075e+03, threshold=6.297e+02, percent-clipped=6.0 +2023-02-07 04:10:35,778 INFO [train.py:901] (3/4) Epoch 22, batch 5350, loss[loss=0.1785, simple_loss=0.259, pruned_loss=0.04899, over 7811.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06147, over 1610359.78 frames. ], batch size: 20, lr: 3.43e-03, grad_scale: 8.0 +2023-02-07 04:10:57,903 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:08,811 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9398, 1.7539, 3.1665, 1.3903, 2.4349, 3.4613, 3.6308, 2.6468], + device='cuda:3'), covar=tensor([0.1340, 0.1794, 0.0439, 0.2584, 0.1075, 0.0358, 0.0666, 0.0953], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0321, 0.0285, 0.0313, 0.0308, 0.0263, 0.0418, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:11:11,826 INFO [train.py:901] (3/4) Epoch 22, batch 5400, loss[loss=0.2178, simple_loss=0.3052, pruned_loss=0.06522, over 8141.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06183, over 1602576.61 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:11:14,643 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:22,726 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:39,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:11:46,276 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.426e+02 2.833e+02 4.034e+02 1.686e+03, threshold=5.665e+02, percent-clipped=5.0 +2023-02-07 04:11:46,296 INFO [train.py:901] (3/4) Epoch 22, batch 5450, loss[loss=0.2216, simple_loss=0.3005, pruned_loss=0.07135, over 8502.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2873, pruned_loss=0.06181, over 1607190.19 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:11:52,130 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4512, 2.3507, 3.1439, 2.4694, 2.9878, 2.4873, 2.2845, 1.7961], + device='cuda:3'), covar=tensor([0.5247, 0.4828, 0.1986, 0.3790, 0.2807, 0.2786, 0.1796, 0.5478], + device='cuda:3'), in_proj_covar=tensor([0.0937, 0.0977, 0.0801, 0.0943, 0.0991, 0.0888, 0.0745, 0.0822], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:12:02,450 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7477, 1.6605, 2.4890, 2.0177, 2.1925, 1.7734, 1.5220, 1.0403], + device='cuda:3'), covar=tensor([0.7060, 0.5768, 0.1975, 0.3873, 0.3146, 0.4303, 0.2896, 0.5578], + device='cuda:3'), in_proj_covar=tensor([0.0937, 0.0977, 0.0801, 0.0944, 0.0992, 0.0889, 0.0746, 0.0822], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:12:04,558 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 04:12:09,775 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:12:20,269 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 04:12:22,329 INFO [train.py:901] (3/4) Epoch 22, batch 5500, loss[loss=0.1883, simple_loss=0.2823, pruned_loss=0.04714, over 8506.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2876, pruned_loss=0.06149, over 1613341.71 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:12:56,614 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.376e+02 2.848e+02 3.508e+02 8.289e+02, threshold=5.697e+02, percent-clipped=6.0 +2023-02-07 04:12:56,635 INFO [train.py:901] (3/4) Epoch 22, batch 5550, loss[loss=0.2287, simple_loss=0.3058, pruned_loss=0.07582, over 8462.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2886, pruned_loss=0.06178, over 1617540.41 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:13:31,796 INFO [train.py:901] (3/4) Epoch 22, batch 5600, loss[loss=0.1612, simple_loss=0.2424, pruned_loss=0.04002, over 7281.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2881, pruned_loss=0.06149, over 1614216.72 frames. ], batch size: 16, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:03,823 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:06,408 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.532e+02 3.141e+02 4.135e+02 1.836e+03, threshold=6.283e+02, percent-clipped=10.0 +2023-02-07 04:14:06,429 INFO [train.py:901] (3/4) Epoch 22, batch 5650, loss[loss=0.2137, simple_loss=0.3065, pruned_loss=0.06043, over 8495.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2883, pruned_loss=0.0617, over 1614132.89 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:20,103 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:22,663 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 04:14:23,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5216, 1.4471, 1.8216, 1.2275, 1.1766, 1.7797, 0.1659, 1.1349], + device='cuda:3'), covar=tensor([0.1557, 0.1218, 0.0399, 0.0980, 0.2549, 0.0486, 0.2042, 0.1368], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0196, 0.0128, 0.0219, 0.0265, 0.0136, 0.0167, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:14:37,562 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:40,743 INFO [train.py:901] (3/4) Epoch 22, batch 5700, loss[loss=0.2107, simple_loss=0.2862, pruned_loss=0.06762, over 8140.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.289, pruned_loss=0.06209, over 1616326.69 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:14:56,165 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:14:59,072 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:15:00,499 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-07 04:15:15,518 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.452e+02 2.878e+02 3.661e+02 5.836e+02, threshold=5.755e+02, percent-clipped=0.0 +2023-02-07 04:15:15,539 INFO [train.py:901] (3/4) Epoch 22, batch 5750, loss[loss=0.1979, simple_loss=0.2827, pruned_loss=0.05652, over 7807.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2881, pruned_loss=0.06164, over 1616672.67 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:15:20,437 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:22,434 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175501.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:15:27,207 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 04:15:33,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8835, 2.0550, 1.7579, 2.4809, 1.3674, 1.6010, 1.9864, 2.0917], + device='cuda:3'), covar=tensor([0.0749, 0.0741, 0.0890, 0.0488, 0.0998, 0.1212, 0.0694, 0.0760], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0197, 0.0246, 0.0215, 0.0207, 0.0247, 0.0251, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:15:36,028 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.0708, 3.9656, 3.6404, 2.0417, 3.5276, 3.6880, 3.6524, 3.4998], + device='cuda:3'), covar=tensor([0.0801, 0.0663, 0.1081, 0.4561, 0.0990, 0.1075, 0.1252, 0.0892], + device='cuda:3'), in_proj_covar=tensor([0.0524, 0.0437, 0.0433, 0.0539, 0.0427, 0.0445, 0.0425, 0.0388], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:15:50,255 INFO [train.py:901] (3/4) Epoch 22, batch 5800, loss[loss=0.2171, simple_loss=0.3083, pruned_loss=0.06295, over 8513.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.288, pruned_loss=0.06176, over 1618016.78 frames. ], batch size: 28, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:09,024 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175569.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:16:25,902 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.268e+02 2.280e+02 2.739e+02 3.457e+02 6.413e+02, threshold=5.479e+02, percent-clipped=3.0 +2023-02-07 04:16:25,923 INFO [train.py:901] (3/4) Epoch 22, batch 5850, loss[loss=0.2446, simple_loss=0.3378, pruned_loss=0.07574, over 8615.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2879, pruned_loss=0.06112, over 1622957.00 frames. ], batch size: 39, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:16:42,237 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:00,357 INFO [train.py:901] (3/4) Epoch 22, batch 5900, loss[loss=0.2074, simple_loss=0.283, pruned_loss=0.06596, over 7928.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2886, pruned_loss=0.06173, over 1621766.08 frames. ], batch size: 20, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:17:12,165 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8002, 2.1839, 3.4400, 1.7517, 1.7762, 3.4661, 0.7301, 2.0025], + device='cuda:3'), covar=tensor([0.1547, 0.1361, 0.0279, 0.2011, 0.2785, 0.0317, 0.2381, 0.1643], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0195, 0.0128, 0.0219, 0.0264, 0.0136, 0.0167, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:17:29,301 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:17:35,254 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.520e+02 3.043e+02 3.699e+02 9.671e+02, threshold=6.086e+02, percent-clipped=7.0 +2023-02-07 04:17:35,275 INFO [train.py:901] (3/4) Epoch 22, batch 5950, loss[loss=0.1526, simple_loss=0.2337, pruned_loss=0.03575, over 7526.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2885, pruned_loss=0.06186, over 1619003.06 frames. ], batch size: 18, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:17:45,508 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5019, 1.3378, 2.3898, 1.2324, 2.2262, 2.5660, 2.7056, 2.1914], + device='cuda:3'), covar=tensor([0.1040, 0.1362, 0.0401, 0.2097, 0.0699, 0.0357, 0.0585, 0.0690], + device='cuda:3'), in_proj_covar=tensor([0.0294, 0.0321, 0.0285, 0.0313, 0.0309, 0.0263, 0.0419, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:18:00,065 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175728.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:02,750 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:08,432 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4415, 1.3784, 1.7511, 1.1841, 1.1201, 1.7255, 0.2186, 1.1135], + device='cuda:3'), covar=tensor([0.1883, 0.1264, 0.0414, 0.0918, 0.2502, 0.0526, 0.2040, 0.1329], + device='cuda:3'), in_proj_covar=tensor([0.0189, 0.0195, 0.0128, 0.0219, 0.0264, 0.0136, 0.0167, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:18:09,571 INFO [train.py:901] (3/4) Epoch 22, batch 6000, loss[loss=0.2158, simple_loss=0.3007, pruned_loss=0.06545, over 8133.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2869, pruned_loss=0.06121, over 1607740.87 frames. ], batch size: 22, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:18:09,571 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 04:18:14,236 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7187, 1.5688, 3.9134, 1.5948, 3.4397, 3.2110, 3.5029, 3.3989], + device='cuda:3'), covar=tensor([0.0724, 0.4690, 0.0555, 0.4512, 0.1294, 0.1104, 0.0713, 0.0865], + device='cuda:3'), in_proj_covar=tensor([0.0629, 0.0644, 0.0697, 0.0626, 0.0709, 0.0606, 0.0608, 0.0678], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:18:21,633 INFO [train.py:935] (3/4) Epoch 22, validation: loss=0.1729, simple_loss=0.2732, pruned_loss=0.03632, over 944034.00 frames. +2023-02-07 04:18:21,634 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 04:18:31,485 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:18:56,217 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.478e+02 2.934e+02 3.623e+02 7.032e+02, threshold=5.869e+02, percent-clipped=2.0 +2023-02-07 04:18:56,237 INFO [train.py:901] (3/4) Epoch 22, batch 6050, loss[loss=0.2408, simple_loss=0.3287, pruned_loss=0.0765, over 8583.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2862, pruned_loss=0.06126, over 1602312.15 frames. ], batch size: 31, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:18,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3617, 2.1588, 2.7533, 2.2827, 2.7420, 2.3907, 2.1675, 1.4991], + device='cuda:3'), covar=tensor([0.5201, 0.4721, 0.1968, 0.3692, 0.2538, 0.2982, 0.1901, 0.5324], + device='cuda:3'), in_proj_covar=tensor([0.0943, 0.0981, 0.0808, 0.0947, 0.0999, 0.0898, 0.0750, 0.0828], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:19:31,869 INFO [train.py:901] (3/4) Epoch 22, batch 6100, loss[loss=0.1758, simple_loss=0.2686, pruned_loss=0.04154, over 8329.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.287, pruned_loss=0.06162, over 1610070.92 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:19:32,676 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=175843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:35,614 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:51,984 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:52,703 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:19:56,546 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 04:20:07,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.365e+02 2.974e+02 3.880e+02 6.577e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 04:20:07,229 INFO [train.py:901] (3/4) Epoch 22, batch 6150, loss[loss=0.2095, simple_loss=0.2837, pruned_loss=0.06764, over 8585.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.06107, over 1611044.59 frames. ], batch size: 39, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:10,659 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175897.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:11,996 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:40,378 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=175940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:41,486 INFO [train.py:901] (3/4) Epoch 22, batch 6200, loss[loss=0.247, simple_loss=0.3228, pruned_loss=0.0856, over 8512.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2858, pruned_loss=0.06121, over 1609831.21 frames. ], batch size: 26, lr: 3.42e-03, grad_scale: 8.0 +2023-02-07 04:20:53,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=175958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:56,487 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-07 04:20:56,918 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=175963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:20:58,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=175965.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:21:15,649 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.329e+02 2.882e+02 3.634e+02 1.217e+03, threshold=5.765e+02, percent-clipped=6.0 +2023-02-07 04:21:15,670 INFO [train.py:901] (3/4) Epoch 22, batch 6250, loss[loss=0.1975, simple_loss=0.2878, pruned_loss=0.05355, over 8441.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2864, pruned_loss=0.06173, over 1610315.13 frames. ], batch size: 27, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:21:51,349 INFO [train.py:901] (3/4) Epoch 22, batch 6300, loss[loss=0.1889, simple_loss=0.2669, pruned_loss=0.05544, over 7214.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.285, pruned_loss=0.06099, over 1610783.30 frames. ], batch size: 16, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:12,731 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176072.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:20,800 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5398, 4.5156, 4.1030, 2.3634, 4.0397, 4.1127, 4.0921, 3.9813], + device='cuda:3'), covar=tensor([0.0614, 0.0524, 0.0975, 0.3840, 0.0749, 0.0892, 0.1189, 0.0753], + device='cuda:3'), in_proj_covar=tensor([0.0531, 0.0437, 0.0435, 0.0542, 0.0427, 0.0446, 0.0427, 0.0388], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:22:26,687 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.297e+02 2.795e+02 3.577e+02 6.374e+02, threshold=5.590e+02, percent-clipped=1.0 +2023-02-07 04:22:26,707 INFO [train.py:901] (3/4) Epoch 22, batch 6350, loss[loss=0.226, simple_loss=0.3151, pruned_loss=0.06845, over 8327.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2854, pruned_loss=0.06078, over 1613066.50 frames. ], batch size: 25, lr: 3.42e-03, grad_scale: 16.0 +2023-02-07 04:22:34,429 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,208 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:22:51,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:01,398 INFO [train.py:901] (3/4) Epoch 22, batch 6400, loss[loss=0.1817, simple_loss=0.2664, pruned_loss=0.04851, over 8087.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2855, pruned_loss=0.06034, over 1616605.57 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:08,435 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:33,464 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:23:36,651 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.375e+02 2.869e+02 3.334e+02 7.002e+02, threshold=5.738e+02, percent-clipped=1.0 +2023-02-07 04:23:36,672 INFO [train.py:901] (3/4) Epoch 22, batch 6450, loss[loss=0.1789, simple_loss=0.2756, pruned_loss=0.04117, over 8472.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06139, over 1615673.92 frames. ], batch size: 25, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:23:52,613 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:09,921 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:11,685 INFO [train.py:901] (3/4) Epoch 22, batch 6500, loss[loss=0.2382, simple_loss=0.3126, pruned_loss=0.08188, over 7972.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.06157, over 1617047.76 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:12,464 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:22,288 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 04:24:23,884 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176260.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:34,086 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:24:45,630 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.325e+02 2.725e+02 3.404e+02 5.159e+02, threshold=5.450e+02, percent-clipped=0.0 +2023-02-07 04:24:45,651 INFO [train.py:901] (3/4) Epoch 22, batch 6550, loss[loss=0.1945, simple_loss=0.2908, pruned_loss=0.0491, over 8100.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2889, pruned_loss=0.06231, over 1620426.94 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:24:57,260 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176307.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:09,352 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 04:25:21,087 INFO [train.py:901] (3/4) Epoch 22, batch 6600, loss[loss=0.1685, simple_loss=0.2535, pruned_loss=0.04176, over 7920.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2887, pruned_loss=0.06214, over 1619297.42 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:25:29,283 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 04:25:30,400 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 04:25:32,747 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:25:55,380 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.510e+02 3.110e+02 4.060e+02 7.968e+02, threshold=6.221e+02, percent-clipped=4.0 +2023-02-07 04:25:55,401 INFO [train.py:901] (3/4) Epoch 22, batch 6650, loss[loss=0.2199, simple_loss=0.2943, pruned_loss=0.07278, over 8083.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2885, pruned_loss=0.0624, over 1616513.79 frames. ], batch size: 21, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:17,170 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:31,229 INFO [train.py:901] (3/4) Epoch 22, batch 6700, loss[loss=0.2249, simple_loss=0.3126, pruned_loss=0.06861, over 8326.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2887, pruned_loss=0.06238, over 1619638.56 frames. ], batch size: 26, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:26:32,112 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:26:41,199 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-07 04:26:49,604 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:00,366 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:27:05,583 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.672e+02 3.290e+02 4.002e+02 8.131e+02, threshold=6.579e+02, percent-clipped=6.0 +2023-02-07 04:27:05,603 INFO [train.py:901] (3/4) Epoch 22, batch 6750, loss[loss=0.1962, simple_loss=0.2634, pruned_loss=0.06453, over 7795.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2888, pruned_loss=0.06249, over 1621132.63 frames. ], batch size: 19, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:41,470 INFO [train.py:901] (3/4) Epoch 22, batch 6800, loss[loss=0.2024, simple_loss=0.2956, pruned_loss=0.05464, over 8470.00 frames. ], tot_loss[loss=0.2076, simple_loss=0.2893, pruned_loss=0.06295, over 1621734.80 frames. ], batch size: 25, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:27:44,982 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 04:28:16,786 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.317e+02 3.026e+02 3.783e+02 8.757e+02, threshold=6.052e+02, percent-clipped=1.0 +2023-02-07 04:28:16,807 INFO [train.py:901] (3/4) Epoch 22, batch 6850, loss[loss=0.1829, simple_loss=0.2613, pruned_loss=0.05226, over 7690.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2879, pruned_loss=0.06231, over 1616772.77 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:24,732 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:31,628 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:34,702 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 04:28:34,771 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176619.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:48,420 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:28:50,268 INFO [train.py:901] (3/4) Epoch 22, batch 6900, loss[loss=0.2295, simple_loss=0.307, pruned_loss=0.07603, over 8202.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2869, pruned_loss=0.06171, over 1612699.92 frames. ], batch size: 23, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:28:51,038 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:03,491 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4498, 1.9328, 4.5930, 2.2573, 4.1446, 3.8751, 4.1985, 4.0874], + device='cuda:3'), covar=tensor([0.0544, 0.3956, 0.0542, 0.3413, 0.0989, 0.0900, 0.0543, 0.0575], + device='cuda:3'), in_proj_covar=tensor([0.0631, 0.0637, 0.0694, 0.0625, 0.0709, 0.0605, 0.0603, 0.0678], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:29:17,268 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:20,592 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:26,753 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.293e+02 2.439e+02 3.078e+02 3.806e+02 5.995e+02, threshold=6.157e+02, percent-clipped=0.0 +2023-02-07 04:29:26,773 INFO [train.py:901] (3/4) Epoch 22, batch 6950, loss[loss=0.2007, simple_loss=0.2721, pruned_loss=0.06461, over 7423.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.06158, over 1614621.69 frames. ], batch size: 17, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:29:35,484 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=176703.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:44,272 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 04:29:46,549 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176719.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:29:56,857 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:30:02,055 INFO [train.py:901] (3/4) Epoch 22, batch 7000, loss[loss=0.211, simple_loss=0.2974, pruned_loss=0.06229, over 8471.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2874, pruned_loss=0.06181, over 1609596.07 frames. ], batch size: 25, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:30:36,087 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4425, 1.6295, 2.1095, 1.3386, 1.5632, 1.6929, 1.4927, 1.4970], + device='cuda:3'), covar=tensor([0.1979, 0.2694, 0.1007, 0.4462, 0.1952, 0.3400, 0.2409, 0.2172], + device='cuda:3'), in_proj_covar=tensor([0.0527, 0.0606, 0.0558, 0.0646, 0.0650, 0.0595, 0.0539, 0.0634], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:30:37,819 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.601e+02 2.400e+02 2.923e+02 3.703e+02 8.900e+02, threshold=5.847e+02, percent-clipped=5.0 +2023-02-07 04:30:37,839 INFO [train.py:901] (3/4) Epoch 22, batch 7050, loss[loss=0.2068, simple_loss=0.2933, pruned_loss=0.06015, over 8245.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2867, pruned_loss=0.06158, over 1608771.43 frames. ], batch size: 22, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:02,298 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 04:31:03,287 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:12,380 INFO [train.py:901] (3/4) Epoch 22, batch 7100, loss[loss=0.1967, simple_loss=0.287, pruned_loss=0.05317, over 8247.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2874, pruned_loss=0.06208, over 1609304.17 frames. ], batch size: 24, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:31:31,801 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=176871.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:31:46,099 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.316e+02 2.836e+02 3.633e+02 7.093e+02, threshold=5.673e+02, percent-clipped=3.0 +2023-02-07 04:31:46,120 INFO [train.py:901] (3/4) Epoch 22, batch 7150, loss[loss=0.2455, simple_loss=0.3164, pruned_loss=0.08731, over 8618.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2879, pruned_loss=0.06194, over 1614423.61 frames. ], batch size: 34, lr: 3.41e-03, grad_scale: 16.0 +2023-02-07 04:32:16,359 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8422, 2.0764, 2.1663, 1.3314, 2.3197, 1.5603, 0.7302, 2.0001], + device='cuda:3'), covar=tensor([0.0522, 0.0319, 0.0273, 0.0602, 0.0368, 0.0807, 0.0828, 0.0321], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0392, 0.0346, 0.0447, 0.0381, 0.0536, 0.0392, 0.0422], + device='cuda:3'), out_proj_covar=tensor([1.2187e-04, 1.0283e-04, 9.0824e-05, 1.1753e-04, 1.0017e-04, 1.5111e-04, + 1.0546e-04, 1.1169e-04], device='cuda:3') +2023-02-07 04:32:22,258 INFO [train.py:901] (3/4) Epoch 22, batch 7200, loss[loss=0.2203, simple_loss=0.2965, pruned_loss=0.07207, over 7815.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.288, pruned_loss=0.06254, over 1608265.42 frames. ], batch size: 20, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:23,124 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=176943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:35,679 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8893, 2.1566, 2.2664, 1.4002, 2.3973, 1.6648, 0.7988, 1.9788], + device='cuda:3'), covar=tensor([0.0574, 0.0348, 0.0279, 0.0616, 0.0417, 0.0894, 0.0845, 0.0351], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0392, 0.0346, 0.0447, 0.0380, 0.0536, 0.0392, 0.0422], + device='cuda:3'), out_proj_covar=tensor([1.2193e-04, 1.0279e-04, 9.0864e-05, 1.1763e-04, 1.0010e-04, 1.5109e-04, + 1.0549e-04, 1.1176e-04], device='cuda:3') +2023-02-07 04:32:44,917 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176975.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:52,859 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=176987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:54,982 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=176990.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:32:56,138 INFO [train.py:901] (3/4) Epoch 22, batch 7250, loss[loss=0.1627, simple_loss=0.2455, pruned_loss=0.03998, over 7544.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.2885, pruned_loss=0.06253, over 1614138.07 frames. ], batch size: 18, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:32:56,794 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.387e+02 2.385e+02 2.852e+02 3.441e+02 7.839e+02, threshold=5.703e+02, percent-clipped=2.0 +2023-02-07 04:33:02,409 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:14,113 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177015.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:21,897 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177027.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:33:22,934 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 04:33:28,004 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9767, 6.0051, 5.1647, 2.7313, 5.2391, 5.6318, 5.3811, 5.5174], + device='cuda:3'), covar=tensor([0.0416, 0.0337, 0.0810, 0.4057, 0.0707, 0.0911, 0.0971, 0.0598], + device='cuda:3'), in_proj_covar=tensor([0.0528, 0.0438, 0.0432, 0.0541, 0.0427, 0.0448, 0.0428, 0.0388], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:33:31,983 INFO [train.py:901] (3/4) Epoch 22, batch 7300, loss[loss=0.1762, simple_loss=0.2653, pruned_loss=0.04353, over 8506.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2885, pruned_loss=0.06216, over 1617217.71 frames. ], batch size: 28, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:06,492 INFO [train.py:901] (3/4) Epoch 22, batch 7350, loss[loss=0.2419, simple_loss=0.3157, pruned_loss=0.084, over 8339.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06189, over 1620712.47 frames. ], batch size: 26, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:07,163 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.532e+02 3.310e+02 4.342e+02 9.656e+02, threshold=6.621e+02, percent-clipped=7.0 +2023-02-07 04:34:13,493 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177102.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:26,059 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 04:34:31,739 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:33,086 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177129.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:36,854 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 04:34:42,503 INFO [train.py:901] (3/4) Epoch 22, batch 7400, loss[loss=0.1501, simple_loss=0.232, pruned_loss=0.03411, over 7788.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2881, pruned_loss=0.06179, over 1620258.94 frames. ], batch size: 19, lr: 3.41e-03, grad_scale: 8.0 +2023-02-07 04:34:42,675 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:34:48,000 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 04:34:59,914 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2992, 2.0291, 2.7801, 2.2402, 2.7465, 2.3239, 2.0969, 1.4641], + device='cuda:3'), covar=tensor([0.5447, 0.5114, 0.1908, 0.3691, 0.2445, 0.2858, 0.1853, 0.5333], + device='cuda:3'), in_proj_covar=tensor([0.0941, 0.0974, 0.0805, 0.0942, 0.0992, 0.0891, 0.0748, 0.0823], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:35:16,501 INFO [train.py:901] (3/4) Epoch 22, batch 7450, loss[loss=0.1867, simple_loss=0.2772, pruned_loss=0.04808, over 8246.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2883, pruned_loss=0.06211, over 1619666.32 frames. ], batch size: 24, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:35:17,191 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 2.327e+02 2.972e+02 3.761e+02 7.589e+02, threshold=5.944e+02, percent-clipped=3.0 +2023-02-07 04:35:21,647 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:27,651 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 04:35:32,259 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:38,465 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:35:51,500 INFO [train.py:901] (3/4) Epoch 22, batch 7500, loss[loss=0.1937, simple_loss=0.2773, pruned_loss=0.05505, over 7926.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2881, pruned_loss=0.06253, over 1617749.83 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:25,359 INFO [train.py:901] (3/4) Epoch 22, batch 7550, loss[loss=0.1573, simple_loss=0.248, pruned_loss=0.03327, over 7799.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2883, pruned_loss=0.06272, over 1619365.43 frames. ], batch size: 19, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:36:26,052 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.508e+02 3.019e+02 3.781e+02 7.904e+02, threshold=6.039e+02, percent-clipped=4.0 +2023-02-07 04:36:51,643 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177330.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:36:59,617 INFO [train.py:901] (3/4) Epoch 22, batch 7600, loss[loss=0.1796, simple_loss=0.2503, pruned_loss=0.05446, over 7700.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06201, over 1615027.67 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:03,240 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7553, 1.5616, 1.9757, 1.6903, 1.8748, 1.7211, 1.6163, 1.1679], + device='cuda:3'), covar=tensor([0.3888, 0.3664, 0.1562, 0.2628, 0.1920, 0.2505, 0.1563, 0.3770], + device='cuda:3'), in_proj_covar=tensor([0.0943, 0.0978, 0.0810, 0.0947, 0.0997, 0.0896, 0.0752, 0.0826], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:37:11,458 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:29,860 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:35,784 INFO [train.py:901] (3/4) Epoch 22, batch 7650, loss[loss=0.1839, simple_loss=0.2574, pruned_loss=0.05521, over 7702.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2878, pruned_loss=0.0626, over 1613593.24 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:37:36,440 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.921e+02 2.559e+02 3.074e+02 4.315e+02 1.263e+03, threshold=6.148e+02, percent-clipped=10.0 +2023-02-07 04:37:39,972 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:37:57,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177423.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:08,849 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2427, 2.5310, 2.8434, 1.4309, 3.1564, 1.7635, 1.5620, 2.1291], + device='cuda:3'), covar=tensor([0.0760, 0.0435, 0.0265, 0.0849, 0.0336, 0.0812, 0.0972, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0454, 0.0394, 0.0345, 0.0448, 0.0380, 0.0536, 0.0392, 0.0423], + device='cuda:3'), out_proj_covar=tensor([1.2149e-04, 1.0329e-04, 9.0714e-05, 1.1787e-04, 1.0002e-04, 1.5118e-04, + 1.0543e-04, 1.1198e-04], device='cuda:3') +2023-02-07 04:38:09,928 INFO [train.py:901] (3/4) Epoch 22, batch 7700, loss[loss=0.1788, simple_loss=0.2566, pruned_loss=0.05048, over 7789.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2875, pruned_loss=0.06253, over 1616027.62 frames. ], batch size: 19, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:30,448 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:31,729 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177473.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:38:38,602 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 04:38:45,983 INFO [train.py:901] (3/4) Epoch 22, batch 7750, loss[loss=0.2818, simple_loss=0.3428, pruned_loss=0.1104, over 8275.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2864, pruned_loss=0.062, over 1612209.84 frames. ], batch size: 48, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:38:46,654 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.486e+02 3.125e+02 4.090e+02 1.041e+03, threshold=6.251e+02, percent-clipped=8.0 +2023-02-07 04:39:20,413 INFO [train.py:901] (3/4) Epoch 22, batch 7800, loss[loss=0.1674, simple_loss=0.245, pruned_loss=0.04485, over 7420.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2866, pruned_loss=0.06167, over 1614716.05 frames. ], batch size: 17, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:35,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8806, 2.0172, 1.6968, 2.5699, 1.2547, 1.5188, 1.9769, 1.9824], + device='cuda:3'), covar=tensor([0.0768, 0.0779, 0.0937, 0.0379, 0.1075, 0.1414, 0.0776, 0.0838], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0195, 0.0244, 0.0213, 0.0206, 0.0244, 0.0248, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:39:39,801 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177571.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,858 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:49,890 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:51,164 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=177588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:39:53,636 INFO [train.py:901] (3/4) Epoch 22, batch 7850, loss[loss=0.2205, simple_loss=0.3093, pruned_loss=0.06583, over 8275.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2879, pruned_loss=0.06179, over 1615820.19 frames. ], batch size: 23, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:39:54,290 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.387e+02 2.753e+02 3.373e+02 6.542e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 04:39:57,206 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6599, 1.9879, 3.2408, 1.5190, 2.4972, 2.0957, 1.6478, 2.4990], + device='cuda:3'), covar=tensor([0.1786, 0.2498, 0.0769, 0.4340, 0.1619, 0.2939, 0.2287, 0.2070], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0609, 0.0558, 0.0650, 0.0651, 0.0598, 0.0541, 0.0636], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:40:06,550 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177611.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:40:26,677 INFO [train.py:901] (3/4) Epoch 22, batch 7900, loss[loss=0.2036, simple_loss=0.2809, pruned_loss=0.06316, over 7922.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.288, pruned_loss=0.06226, over 1614715.25 frames. ], batch size: 20, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:40:29,524 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2044, 1.9972, 2.5697, 2.1545, 2.5571, 2.2729, 2.0330, 1.3727], + device='cuda:3'), covar=tensor([0.5816, 0.4973, 0.2064, 0.3761, 0.2597, 0.3206, 0.1987, 0.5583], + device='cuda:3'), in_proj_covar=tensor([0.0950, 0.0987, 0.0813, 0.0955, 0.1005, 0.0904, 0.0757, 0.0833], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:40:53,554 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:41:00,017 INFO [train.py:901] (3/4) Epoch 22, batch 7950, loss[loss=0.1989, simple_loss=0.2855, pruned_loss=0.05618, over 8504.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2883, pruned_loss=0.0624, over 1615648.01 frames. ], batch size: 26, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:00,674 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.424e+02 2.966e+02 3.766e+02 9.319e+02, threshold=5.931e+02, percent-clipped=7.0 +2023-02-07 04:41:31,920 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5778, 2.4977, 1.7084, 2.1640, 2.0916, 1.6141, 1.9838, 2.0703], + device='cuda:3'), covar=tensor([0.1472, 0.0411, 0.1386, 0.0697, 0.0769, 0.1505, 0.1079, 0.1013], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0230, 0.0331, 0.0307, 0.0296, 0.0336, 0.0342, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 04:41:33,735 INFO [train.py:901] (3/4) Epoch 22, batch 8000, loss[loss=0.223, simple_loss=0.3093, pruned_loss=0.06838, over 8459.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.06185, over 1615193.84 frames. ], batch size: 25, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:41:35,290 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6827, 2.2295, 4.0108, 1.5675, 2.9712, 2.2022, 1.8348, 2.9715], + device='cuda:3'), covar=tensor([0.1907, 0.2692, 0.0708, 0.4577, 0.1797, 0.3182, 0.2281, 0.2199], + device='cuda:3'), in_proj_covar=tensor([0.0528, 0.0607, 0.0557, 0.0649, 0.0650, 0.0596, 0.0539, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:42:06,693 INFO [train.py:901] (3/4) Epoch 22, batch 8050, loss[loss=0.1506, simple_loss=0.2344, pruned_loss=0.03336, over 7560.00 frames. ], tot_loss[loss=0.2068, simple_loss=0.288, pruned_loss=0.06286, over 1600607.65 frames. ], batch size: 18, lr: 3.40e-03, grad_scale: 8.0 +2023-02-07 04:42:07,276 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.308e+02 2.923e+02 3.618e+02 1.070e+03, threshold=5.846e+02, percent-clipped=4.0 +2023-02-07 04:42:10,816 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:22,396 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=177815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:42:39,732 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 04:42:44,826 INFO [train.py:901] (3/4) Epoch 23, batch 0, loss[loss=0.2313, simple_loss=0.3042, pruned_loss=0.07921, over 7071.00 frames. ], tot_loss[loss=0.2313, simple_loss=0.3042, pruned_loss=0.07921, over 7071.00 frames. ], batch size: 71, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:42:44,827 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 04:42:56,156 INFO [train.py:935] (3/4) Epoch 23, validation: loss=0.1743, simple_loss=0.274, pruned_loss=0.0373, over 944034.00 frames. +2023-02-07 04:42:56,157 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 04:43:08,361 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:10,566 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=177844.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:12,396 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 04:43:26,747 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:28,099 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=177869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:43:32,006 INFO [train.py:901] (3/4) Epoch 23, batch 50, loss[loss=0.2142, simple_loss=0.3026, pruned_loss=0.06293, over 8465.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2869, pruned_loss=0.05999, over 365944.79 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:43:42,596 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6474, 1.9588, 3.0464, 1.4756, 2.2364, 1.8927, 1.7532, 2.0086], + device='cuda:3'), covar=tensor([0.1805, 0.2404, 0.0752, 0.4286, 0.1741, 0.3219, 0.2156, 0.2262], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0648, 0.0648, 0.0594, 0.0538, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:43:45,279 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.650e+02 3.149e+02 3.939e+02 1.519e+03, threshold=6.298e+02, percent-clipped=14.0 +2023-02-07 04:43:46,692 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 04:44:01,128 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=177915.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:44:02,081 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 04:44:07,963 INFO [train.py:901] (3/4) Epoch 23, batch 100, loss[loss=0.1745, simple_loss=0.2616, pruned_loss=0.04364, over 7979.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2867, pruned_loss=0.05988, over 642922.10 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:09,366 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 04:44:15,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7007, 1.8244, 1.6461, 2.2277, 1.0577, 1.4643, 1.6593, 1.8106], + device='cuda:3'), covar=tensor([0.0732, 0.0767, 0.0853, 0.0503, 0.1100, 0.1270, 0.0784, 0.0761], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0214, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:44:42,229 INFO [train.py:901] (3/4) Epoch 23, batch 150, loss[loss=0.1974, simple_loss=0.2716, pruned_loss=0.06162, over 7921.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.06098, over 859526.82 frames. ], batch size: 20, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:44:49,547 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8990, 1.6798, 2.5958, 1.5699, 2.2462, 2.8278, 2.7608, 2.5573], + device='cuda:3'), covar=tensor([0.0951, 0.1372, 0.0615, 0.1695, 0.1409, 0.0251, 0.0759, 0.0439], + device='cuda:3'), in_proj_covar=tensor([0.0300, 0.0323, 0.0287, 0.0318, 0.0313, 0.0269, 0.0424, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:44:54,934 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.352e+02 3.015e+02 3.767e+02 5.945e+02, threshold=6.031e+02, percent-clipped=0.0 +2023-02-07 04:44:55,351 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 04:45:18,299 INFO [train.py:901] (3/4) Epoch 23, batch 200, loss[loss=0.1752, simple_loss=0.2537, pruned_loss=0.04838, over 8090.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2877, pruned_loss=0.06071, over 1028673.18 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:45:19,113 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:22,634 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:45:53,029 INFO [train.py:901] (3/4) Epoch 23, batch 250, loss[loss=0.2337, simple_loss=0.315, pruned_loss=0.07621, over 8334.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2874, pruned_loss=0.06, over 1162124.13 frames. ], batch size: 25, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:04,760 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 04:46:06,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.380e+02 2.804e+02 3.484e+02 6.736e+02, threshold=5.609e+02, percent-clipped=2.0 +2023-02-07 04:46:12,815 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 04:46:28,460 INFO [train.py:901] (3/4) Epoch 23, batch 300, loss[loss=0.1822, simple_loss=0.2831, pruned_loss=0.04061, over 8107.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2862, pruned_loss=0.0596, over 1265007.51 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:46:40,070 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:40,631 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178142.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:45,457 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6537, 2.6425, 1.9476, 2.3719, 2.2096, 1.6585, 2.1661, 2.2700], + device='cuda:3'), covar=tensor([0.1549, 0.0422, 0.1200, 0.0636, 0.0710, 0.1451, 0.1004, 0.0908], + device='cuda:3'), in_proj_covar=tensor([0.0352, 0.0231, 0.0332, 0.0308, 0.0297, 0.0337, 0.0342, 0.0313], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 04:46:52,877 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:46:53,827 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 04:46:54,588 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 04:46:59,078 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1294, 1.8806, 4.4243, 1.8897, 2.5689, 5.0586, 5.1758, 4.3939], + device='cuda:3'), covar=tensor([0.1392, 0.1741, 0.0281, 0.2140, 0.1212, 0.0176, 0.0399, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0299, 0.0322, 0.0288, 0.0316, 0.0312, 0.0267, 0.0423, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:47:03,749 INFO [train.py:901] (3/4) Epoch 23, batch 350, loss[loss=0.2036, simple_loss=0.2809, pruned_loss=0.06316, over 7968.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2864, pruned_loss=0.06005, over 1342250.12 frames. ], batch size: 21, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:16,039 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.905e+02 3.451e+02 8.072e+02, threshold=5.809e+02, percent-clipped=5.0 +2023-02-07 04:47:23,327 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7134, 1.4207, 3.1431, 1.4717, 2.3686, 3.4041, 3.5171, 2.9318], + device='cuda:3'), covar=tensor([0.1259, 0.1752, 0.0342, 0.2077, 0.0888, 0.0269, 0.0527, 0.0564], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0320, 0.0286, 0.0315, 0.0311, 0.0266, 0.0421, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:47:38,680 INFO [train.py:901] (3/4) Epoch 23, batch 400, loss[loss=0.1789, simple_loss=0.2464, pruned_loss=0.05574, over 7269.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2856, pruned_loss=0.06033, over 1399766.28 frames. ], batch size: 16, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:47:43,369 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 04:47:51,835 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6549, 2.0132, 3.1962, 1.4849, 2.3338, 2.0732, 1.7549, 2.3691], + device='cuda:3'), covar=tensor([0.1887, 0.2596, 0.0900, 0.4469, 0.1927, 0.3122, 0.2217, 0.2298], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0605, 0.0556, 0.0645, 0.0647, 0.0592, 0.0537, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:48:02,278 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:15,476 INFO [train.py:901] (3/4) Epoch 23, batch 450, loss[loss=0.2059, simple_loss=0.2962, pruned_loss=0.0578, over 8109.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2865, pruned_loss=0.06033, over 1452169.20 frames. ], batch size: 23, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:48:17,125 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0536, 2.1843, 1.8563, 2.9105, 1.2826, 1.7186, 2.0798, 2.2121], + device='cuda:3'), covar=tensor([0.0664, 0.0730, 0.0832, 0.0286, 0.1141, 0.1205, 0.0810, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0215, 0.0207, 0.0246, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:48:23,160 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:27,631 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.308e+02 2.812e+02 3.532e+02 1.107e+03, threshold=5.624e+02, percent-clipped=2.0 +2023-02-07 04:48:40,223 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:48:50,187 INFO [train.py:901] (3/4) Epoch 23, batch 500, loss[loss=0.2429, simple_loss=0.3144, pruned_loss=0.08564, over 8599.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.287, pruned_loss=0.06112, over 1487173.43 frames. ], batch size: 49, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:25,956 INFO [train.py:901] (3/4) Epoch 23, batch 550, loss[loss=0.2353, simple_loss=0.3025, pruned_loss=0.08403, over 7117.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.289, pruned_loss=0.0622, over 1514468.55 frames. ], batch size: 72, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:49:39,372 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.748e+02 2.448e+02 3.105e+02 3.761e+02 9.562e+02, threshold=6.211e+02, percent-clipped=5.0 +2023-02-07 04:49:42,455 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178397.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:49:59,317 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:01,203 INFO [train.py:901] (3/4) Epoch 23, batch 600, loss[loss=0.1834, simple_loss=0.2564, pruned_loss=0.0552, over 7543.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2875, pruned_loss=0.06115, over 1532315.54 frames. ], batch size: 18, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:14,795 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 04:50:33,529 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178470.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:50:36,800 INFO [train.py:901] (3/4) Epoch 23, batch 650, loss[loss=0.2029, simple_loss=0.2911, pruned_loss=0.05738, over 8034.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06067, over 1551054.57 frames. ], batch size: 22, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:50:49,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.230e+02 2.701e+02 3.368e+02 8.641e+02, threshold=5.402e+02, percent-clipped=2.0 +2023-02-07 04:51:04,377 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:12,440 INFO [train.py:901] (3/4) Epoch 23, batch 700, loss[loss=0.2141, simple_loss=0.2971, pruned_loss=0.06555, over 8327.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2863, pruned_loss=0.06038, over 1568285.60 frames. ], batch size: 26, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:16,061 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=178530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:18,193 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2770, 1.9764, 2.5863, 2.1825, 2.4594, 2.3051, 2.0915, 1.3351], + device='cuda:3'), covar=tensor([0.5302, 0.4965, 0.2016, 0.3759, 0.2448, 0.3061, 0.1900, 0.5525], + device='cuda:3'), in_proj_covar=tensor([0.0941, 0.0986, 0.0813, 0.0952, 0.0997, 0.0899, 0.0754, 0.0831], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:51:21,529 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:33,978 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=178555.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:51:47,512 INFO [train.py:901] (3/4) Epoch 23, batch 750, loss[loss=0.1929, simple_loss=0.2736, pruned_loss=0.05606, over 7938.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2863, pruned_loss=0.06065, over 1580725.48 frames. ], batch size: 20, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:51:49,830 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9614, 2.3510, 3.7240, 2.0576, 1.8404, 3.7245, 0.6560, 2.1897], + device='cuda:3'), covar=tensor([0.1253, 0.1200, 0.0203, 0.1656, 0.2713, 0.0207, 0.2210, 0.1454], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0221, 0.0270, 0.0137, 0.0171, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:51:59,472 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6867, 2.1051, 3.2292, 1.4622, 2.5117, 2.0934, 1.8505, 2.5167], + device='cuda:3'), covar=tensor([0.1944, 0.2565, 0.0828, 0.4639, 0.1898, 0.3219, 0.2246, 0.2436], + device='cuda:3'), in_proj_covar=tensor([0.0527, 0.0606, 0.0555, 0.0645, 0.0649, 0.0594, 0.0536, 0.0634], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:52:00,640 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.529e+02 2.988e+02 3.531e+02 9.866e+02, threshold=5.976e+02, percent-clipped=5.0 +2023-02-07 04:52:03,332 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 04:52:12,887 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 04:52:24,030 INFO [train.py:901] (3/4) Epoch 23, batch 800, loss[loss=0.212, simple_loss=0.2921, pruned_loss=0.06595, over 8603.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2859, pruned_loss=0.06034, over 1592966.05 frames. ], batch size: 49, lr: 3.32e-03, grad_scale: 8.0 +2023-02-07 04:52:32,114 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178637.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:52:38,300 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6212, 1.7610, 1.5358, 2.3060, 1.0612, 1.3801, 1.6575, 1.7814], + device='cuda:3'), covar=tensor([0.0787, 0.0756, 0.0941, 0.0402, 0.1069, 0.1313, 0.0779, 0.0822], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0197, 0.0245, 0.0214, 0.0206, 0.0245, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 04:52:57,753 INFO [train.py:901] (3/4) Epoch 23, batch 850, loss[loss=0.1728, simple_loss=0.2448, pruned_loss=0.05041, over 7254.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2872, pruned_loss=0.06152, over 1600386.23 frames. ], batch size: 16, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:10,561 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.561e+02 2.992e+02 3.918e+02 1.040e+03, threshold=5.984e+02, percent-clipped=6.0 +2023-02-07 04:53:24,478 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178712.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:53:26,492 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178715.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:53:31,391 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6584, 4.7361, 4.2283, 2.1496, 4.0617, 4.3015, 4.2710, 4.1646], + device='cuda:3'), covar=tensor([0.0756, 0.0524, 0.1029, 0.4865, 0.0864, 0.0970, 0.1253, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0535, 0.0440, 0.0435, 0.0544, 0.0426, 0.0448, 0.0431, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 04:53:34,044 INFO [train.py:901] (3/4) Epoch 23, batch 900, loss[loss=0.1684, simple_loss=0.255, pruned_loss=0.04092, over 8478.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2864, pruned_loss=0.06109, over 1607685.01 frames. ], batch size: 25, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:53:55,222 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.99 vs. limit=5.0 +2023-02-07 04:54:03,477 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1028, 1.4747, 4.2817, 1.9540, 2.3092, 4.8924, 4.9831, 4.1743], + device='cuda:3'), covar=tensor([0.1299, 0.2042, 0.0290, 0.2009, 0.1434, 0.0167, 0.0404, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0295, 0.0321, 0.0286, 0.0315, 0.0310, 0.0267, 0.0422, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 04:54:09,415 INFO [train.py:901] (3/4) Epoch 23, batch 950, loss[loss=0.2272, simple_loss=0.3038, pruned_loss=0.07526, over 7799.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2876, pruned_loss=0.06161, over 1611948.38 frames. ], batch size: 20, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:54:18,557 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:21,848 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.330e+02 2.907e+02 3.544e+02 9.473e+02, threshold=5.814e+02, percent-clipped=4.0 +2023-02-07 04:54:22,773 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4422, 1.6929, 1.7706, 1.0702, 1.8525, 1.2123, 0.6087, 1.5417], + device='cuda:3'), covar=tensor([0.0653, 0.0387, 0.0280, 0.0620, 0.0384, 0.0983, 0.0900, 0.0340], + device='cuda:3'), in_proj_covar=tensor([0.0453, 0.0395, 0.0347, 0.0448, 0.0380, 0.0536, 0.0393, 0.0425], + device='cuda:3'), out_proj_covar=tensor([1.2096e-04, 1.0363e-04, 9.1189e-05, 1.1773e-04, 9.9773e-05, 1.5107e-04, + 1.0597e-04, 1.1251e-04], device='cuda:3') +2023-02-07 04:54:35,804 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 04:54:37,117 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:54:45,365 INFO [train.py:901] (3/4) Epoch 23, batch 1000, loss[loss=0.2409, simple_loss=0.3141, pruned_loss=0.08382, over 8654.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2868, pruned_loss=0.06109, over 1613952.49 frames. ], batch size: 34, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:12,377 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 04:55:21,365 INFO [train.py:901] (3/4) Epoch 23, batch 1050, loss[loss=0.2036, simple_loss=0.2722, pruned_loss=0.0675, over 7286.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2875, pruned_loss=0.06113, over 1618899.20 frames. ], batch size: 16, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 04:55:25,412 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 04:55:33,396 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 2.332e+02 2.695e+02 3.454e+02 6.847e+02, threshold=5.390e+02, percent-clipped=5.0 +2023-02-07 04:55:46,653 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=178912.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:55:56,202 INFO [train.py:901] (3/4) Epoch 23, batch 1100, loss[loss=0.1926, simple_loss=0.2828, pruned_loss=0.05118, over 8348.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2873, pruned_loss=0.06112, over 1621576.03 frames. ], batch size: 24, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:55:59,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=178929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:00,122 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 04:56:32,129 INFO [train.py:901] (3/4) Epoch 23, batch 1150, loss[loss=0.1782, simple_loss=0.2574, pruned_loss=0.04953, over 8078.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2859, pruned_loss=0.06017, over 1621181.64 frames. ], batch size: 21, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:56:36,267 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 04:56:36,342 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=178981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:56:45,237 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.628e+02 3.162e+02 4.177e+02 1.087e+03, threshold=6.324e+02, percent-clipped=6.0 +2023-02-07 04:57:07,131 INFO [train.py:901] (3/4) Epoch 23, batch 1200, loss[loss=0.2351, simple_loss=0.3114, pruned_loss=0.07935, over 7263.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06098, over 1619474.23 frames. ], batch size: 71, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:29,093 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:57:31,038 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179059.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 04:57:38,236 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5501, 1.5777, 2.1062, 1.2279, 1.1525, 2.0821, 0.2572, 1.2184], + device='cuda:3'), covar=tensor([0.1822, 0.1141, 0.0367, 0.1375, 0.2529, 0.0368, 0.1983, 0.1317], + device='cuda:3'), in_proj_covar=tensor([0.0191, 0.0199, 0.0129, 0.0219, 0.0267, 0.0136, 0.0169, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 04:57:42,801 INFO [train.py:901] (3/4) Epoch 23, batch 1250, loss[loss=0.2324, simple_loss=0.3101, pruned_loss=0.07738, over 8106.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06164, over 1617535.74 frames. ], batch size: 23, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:57:55,990 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.289e+02 2.896e+02 3.686e+02 5.954e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 04:57:58,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179096.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:19,010 INFO [train.py:901] (3/4) Epoch 23, batch 1300, loss[loss=0.1997, simple_loss=0.282, pruned_loss=0.05872, over 8491.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2867, pruned_loss=0.06123, over 1612033.47 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:58:20,053 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2991, 1.9387, 2.5406, 2.1068, 2.4828, 2.3089, 2.1299, 1.2964], + device='cuda:3'), covar=tensor([0.5150, 0.4990, 0.1881, 0.3784, 0.2452, 0.2901, 0.1811, 0.5381], + device='cuda:3'), in_proj_covar=tensor([0.0938, 0.0985, 0.0810, 0.0949, 0.0994, 0.0899, 0.0754, 0.0828], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 04:58:24,099 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:51,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179171.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:58:53,997 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179174.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 04:58:54,481 INFO [train.py:901] (3/4) Epoch 23, batch 1350, loss[loss=0.1864, simple_loss=0.2688, pruned_loss=0.05197, over 8076.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2856, pruned_loss=0.06064, over 1610713.36 frames. ], batch size: 21, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:01,717 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179185.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:07,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.184e+02 2.635e+02 3.098e+02 5.270e+02, threshold=5.271e+02, percent-clipped=0.0 +2023-02-07 04:59:20,409 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179210.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:30,640 INFO [train.py:901] (3/4) Epoch 23, batch 1400, loss[loss=0.1725, simple_loss=0.2469, pruned_loss=0.04906, over 7628.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2859, pruned_loss=0.06066, over 1612554.97 frames. ], batch size: 19, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 04:59:47,155 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 04:59:53,440 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179256.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:00:06,540 INFO [train.py:901] (3/4) Epoch 23, batch 1450, loss[loss=0.2627, simple_loss=0.3379, pruned_loss=0.09373, over 8305.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06118, over 1614532.69 frames. ], batch size: 23, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:16,936 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 05:00:19,769 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.293e+02 2.971e+02 3.774e+02 8.745e+02, threshold=5.941e+02, percent-clipped=9.0 +2023-02-07 05:00:43,616 INFO [train.py:901] (3/4) Epoch 23, batch 1500, loss[loss=0.2363, simple_loss=0.3221, pruned_loss=0.07521, over 7978.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2865, pruned_loss=0.0608, over 1615819.45 frames. ], batch size: 21, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:00:49,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1295, 4.1130, 3.7293, 2.0829, 3.6297, 3.7424, 3.7397, 3.6195], + device='cuda:3'), covar=tensor([0.0858, 0.0615, 0.1022, 0.4406, 0.0960, 0.1073, 0.1287, 0.0907], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0443, 0.0438, 0.0548, 0.0431, 0.0453, 0.0435, 0.0391], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:01:03,395 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:08,426 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 05:01:16,374 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179371.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:01:18,847 INFO [train.py:901] (3/4) Epoch 23, batch 1550, loss[loss=0.2054, simple_loss=0.2882, pruned_loss=0.06132, over 8122.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2867, pruned_loss=0.06137, over 1613254.84 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:20,472 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:21,734 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179379.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:31,101 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.349e+02 2.958e+02 3.969e+02 7.808e+02, threshold=5.916e+02, percent-clipped=1.0 +2023-02-07 05:01:54,019 INFO [train.py:901] (3/4) Epoch 23, batch 1600, loss[loss=0.2673, simple_loss=0.337, pruned_loss=0.09879, over 8331.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2867, pruned_loss=0.06121, over 1609795.23 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:01:56,481 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179427.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:01:58,505 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179430.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:02:14,535 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179452.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:02:16,570 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179455.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:02:21,383 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179462.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:02:31,191 INFO [train.py:901] (3/4) Epoch 23, batch 1650, loss[loss=0.2174, simple_loss=0.3102, pruned_loss=0.06229, over 8512.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.0607, over 1609311.48 frames. ], batch size: 26, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:02:41,801 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8727, 2.2088, 1.7158, 2.7724, 1.3058, 1.4818, 2.1740, 2.2391], + device='cuda:3'), covar=tensor([0.0946, 0.0898, 0.1154, 0.0452, 0.1171, 0.1560, 0.0845, 0.0761], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0199, 0.0245, 0.0215, 0.0207, 0.0248, 0.0251, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:02:43,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.367e+02 2.783e+02 3.381e+02 8.055e+02, threshold=5.566e+02, percent-clipped=4.0 +2023-02-07 05:02:50,738 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:06,276 INFO [train.py:901] (3/4) Epoch 23, batch 1700, loss[loss=0.1711, simple_loss=0.2539, pruned_loss=0.04411, over 7246.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2873, pruned_loss=0.06171, over 1608505.59 frames. ], batch size: 16, lr: 3.31e-03, grad_scale: 16.0 +2023-02-07 05:03:08,705 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:03:42,331 INFO [train.py:901] (3/4) Epoch 23, batch 1750, loss[loss=0.171, simple_loss=0.2546, pruned_loss=0.04367, over 8127.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2872, pruned_loss=0.06141, over 1612441.30 frames. ], batch size: 22, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:03:56,238 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.481e+02 2.857e+02 3.517e+02 8.396e+02, threshold=5.713e+02, percent-clipped=3.0 +2023-02-07 05:04:17,978 INFO [train.py:901] (3/4) Epoch 23, batch 1800, loss[loss=0.1702, simple_loss=0.2478, pruned_loss=0.04633, over 7266.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06055, over 1611851.88 frames. ], batch size: 16, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:04:19,572 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=179627.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:04:33,217 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 05:04:37,268 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=179652.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:04:38,535 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179654.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:04:54,476 INFO [train.py:901] (3/4) Epoch 23, batch 1850, loss[loss=0.2285, simple_loss=0.3034, pruned_loss=0.07683, over 8434.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2874, pruned_loss=0.0612, over 1610422.71 frames. ], batch size: 27, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:07,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.311e+02 2.831e+02 3.615e+02 8.108e+02, threshold=5.663e+02, percent-clipped=6.0 +2023-02-07 05:05:28,522 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:05:29,813 INFO [train.py:901] (3/4) Epoch 23, batch 1900, loss[loss=0.2262, simple_loss=0.2998, pruned_loss=0.07633, over 8352.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06158, over 1611457.30 frames. ], batch size: 49, lr: 3.31e-03, grad_scale: 8.0 +2023-02-07 05:05:59,899 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 05:06:05,578 INFO [train.py:901] (3/4) Epoch 23, batch 1950, loss[loss=0.1734, simple_loss=0.2701, pruned_loss=0.03839, over 8501.00 frames. ], tot_loss[loss=0.2062, simple_loss=0.2882, pruned_loss=0.06205, over 1611283.83 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:12,655 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 05:06:12,867 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4005, 1.4243, 1.4002, 1.7798, 0.7188, 1.2752, 1.3035, 1.5013], + device='cuda:3'), covar=tensor([0.0946, 0.0966, 0.1041, 0.0522, 0.1237, 0.1497, 0.0822, 0.0769], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0198, 0.0242, 0.0213, 0.0206, 0.0245, 0.0250, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:06:19,480 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.621e+02 2.457e+02 2.986e+02 3.643e+02 8.972e+02, threshold=5.972e+02, percent-clipped=4.0 +2023-02-07 05:06:28,061 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179806.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:06:31,257 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 05:06:34,179 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:06:41,610 INFO [train.py:901] (3/4) Epoch 23, batch 2000, loss[loss=0.183, simple_loss=0.2566, pruned_loss=0.05477, over 7424.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.0612, over 1611535.16 frames. ], batch size: 17, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:06:50,603 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179838.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:06:56,181 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8963, 2.4399, 4.1873, 1.6844, 3.1493, 2.5766, 2.0343, 3.1101], + device='cuda:3'), covar=tensor([0.2024, 0.2965, 0.0897, 0.5163, 0.1882, 0.3199, 0.2573, 0.2384], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0607, 0.0556, 0.0645, 0.0649, 0.0594, 0.0537, 0.0631], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:07:16,416 INFO [train.py:901] (3/4) Epoch 23, batch 2050, loss[loss=0.2086, simple_loss=0.3022, pruned_loss=0.05753, over 8104.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.06145, over 1610705.39 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:07:30,035 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.444e+02 2.856e+02 3.794e+02 1.051e+03, threshold=5.713e+02, percent-clipped=7.0 +2023-02-07 05:07:49,561 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=179921.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:07:50,922 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9226, 3.8452, 3.5500, 2.4685, 3.4613, 3.4750, 3.5698, 3.2952], + device='cuda:3'), covar=tensor([0.0801, 0.0705, 0.1082, 0.3713, 0.0893, 0.1257, 0.1213, 0.1091], + device='cuda:3'), in_proj_covar=tensor([0.0530, 0.0440, 0.0435, 0.0542, 0.0429, 0.0448, 0.0432, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:07:51,616 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179924.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:07:52,161 INFO [train.py:901] (3/4) Epoch 23, batch 2100, loss[loss=0.1797, simple_loss=0.2781, pruned_loss=0.04062, over 8500.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2871, pruned_loss=0.06141, over 1613077.24 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:04,852 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=179942.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:08:25,020 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5442, 2.8685, 2.4621, 4.0888, 1.7569, 2.1304, 2.7892, 3.0580], + device='cuda:3'), covar=tensor([0.0662, 0.0800, 0.0758, 0.0244, 0.1078, 0.1142, 0.0787, 0.0733], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0198, 0.0243, 0.0214, 0.0206, 0.0246, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:08:27,544 INFO [train.py:901] (3/4) Epoch 23, batch 2150, loss[loss=0.2009, simple_loss=0.2677, pruned_loss=0.06701, over 7706.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2875, pruned_loss=0.06201, over 1611954.65 frames. ], batch size: 18, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:08:41,580 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.311e+02 2.940e+02 3.642e+02 8.826e+02, threshold=5.880e+02, percent-clipped=6.0 +2023-02-07 05:08:44,589 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=179998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:05,728 INFO [train.py:901] (3/4) Epoch 23, batch 2200, loss[loss=0.2057, simple_loss=0.3005, pruned_loss=0.05549, over 8194.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2875, pruned_loss=0.06191, over 1611537.22 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:18,806 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:09:23,721 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5726, 1.8327, 2.6756, 1.4196, 1.9283, 1.9675, 1.6087, 1.9086], + device='cuda:3'), covar=tensor([0.1841, 0.2639, 0.0808, 0.4665, 0.1953, 0.3135, 0.2424, 0.2181], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0610, 0.0557, 0.0648, 0.0650, 0.0595, 0.0539, 0.0633], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:09:40,654 INFO [train.py:901] (3/4) Epoch 23, batch 2250, loss[loss=0.2184, simple_loss=0.2999, pruned_loss=0.06847, over 6461.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2883, pruned_loss=0.06212, over 1614136.93 frames. ], batch size: 71, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:09:53,773 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.377e+02 2.815e+02 3.570e+02 6.536e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-07 05:09:54,026 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:07,872 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180113.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:12,118 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:16,774 INFO [train.py:901] (3/4) Epoch 23, batch 2300, loss[loss=0.2142, simple_loss=0.2989, pruned_loss=0.06477, over 8500.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2879, pruned_loss=0.06181, over 1613744.98 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:40,104 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:10:52,678 INFO [train.py:901] (3/4) Epoch 23, batch 2350, loss[loss=0.2017, simple_loss=0.2964, pruned_loss=0.05347, over 8192.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2885, pruned_loss=0.06206, over 1617121.72 frames. ], batch size: 23, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:10:54,329 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180177.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:05,884 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 2.451e+02 2.928e+02 3.544e+02 9.883e+02, threshold=5.856e+02, percent-clipped=4.0 +2023-02-07 05:11:11,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180202.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:11:25,820 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180223.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:11:27,097 INFO [train.py:901] (3/4) Epoch 23, batch 2400, loss[loss=0.1934, simple_loss=0.2856, pruned_loss=0.05063, over 8241.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06139, over 1618038.02 frames. ], batch size: 24, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:11:59,473 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180268.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:12:02,920 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:04,126 INFO [train.py:901] (3/4) Epoch 23, batch 2450, loss[loss=0.2316, simple_loss=0.307, pruned_loss=0.07814, over 8235.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2874, pruned_loss=0.0616, over 1613591.05 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:12:12,627 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180286.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:12:18,007 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.501e+02 2.918e+02 3.866e+02 1.157e+03, threshold=5.835e+02, percent-clipped=6.0 +2023-02-07 05:12:39,632 INFO [train.py:901] (3/4) Epoch 23, batch 2500, loss[loss=0.1936, simple_loss=0.2814, pruned_loss=0.05293, over 8129.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2888, pruned_loss=0.06188, over 1615652.38 frames. ], batch size: 22, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:00,480 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180354.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:12,934 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:16,784 INFO [train.py:901] (3/4) Epoch 23, batch 2550, loss[loss=0.2036, simple_loss=0.2824, pruned_loss=0.06238, over 7974.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2884, pruned_loss=0.06164, over 1615993.29 frames. ], batch size: 21, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:13:22,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180383.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:13:25,750 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:29,886 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.741e+02 2.435e+02 3.031e+02 3.942e+02 1.076e+03, threshold=6.063e+02, percent-clipped=1.0 +2023-02-07 05:13:30,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:35,751 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180401.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:13:51,966 INFO [train.py:901] (3/4) Epoch 23, batch 2600, loss[loss=0.2001, simple_loss=0.2857, pruned_loss=0.05725, over 8471.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06156, over 1615933.06 frames. ], batch size: 25, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:28,406 INFO [train.py:901] (3/4) Epoch 23, batch 2650, loss[loss=0.1912, simple_loss=0.2626, pruned_loss=0.05993, over 7693.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06127, over 1612664.42 frames. ], batch size: 18, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:14:42,182 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.334e+02 2.331e+02 2.876e+02 3.734e+02 9.435e+02, threshold=5.753e+02, percent-clipped=4.0 +2023-02-07 05:14:48,432 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180503.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:14:51,945 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5148, 1.5015, 2.1046, 1.4162, 1.1048, 2.0273, 0.2731, 1.2973], + device='cuda:3'), covar=tensor([0.1796, 0.1385, 0.0434, 0.1133, 0.2809, 0.0429, 0.2066, 0.1195], + device='cuda:3'), in_proj_covar=tensor([0.0192, 0.0199, 0.0129, 0.0222, 0.0269, 0.0136, 0.0170, 0.0194], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 05:15:04,091 INFO [train.py:901] (3/4) Epoch 23, batch 2700, loss[loss=0.1867, simple_loss=0.2708, pruned_loss=0.05134, over 7797.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2853, pruned_loss=0.06074, over 1613783.73 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:07,059 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180529.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:24,142 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:33,269 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:39,689 INFO [train.py:901] (3/4) Epoch 23, batch 2750, loss[loss=0.1948, simple_loss=0.284, pruned_loss=0.05284, over 8568.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2857, pruned_loss=0.06087, over 1611703.61 frames. ], batch size: 39, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:15:48,799 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180588.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:15:53,497 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.355e+02 2.814e+02 3.432e+02 9.125e+02, threshold=5.629e+02, percent-clipped=4.0 +2023-02-07 05:16:15,675 INFO [train.py:901] (3/4) Epoch 23, batch 2800, loss[loss=0.2073, simple_loss=0.2829, pruned_loss=0.06588, over 7654.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2853, pruned_loss=0.06016, over 1614183.91 frames. ], batch size: 19, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:26,293 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180639.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 05:16:38,634 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180657.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:43,330 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180664.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 05:16:50,750 INFO [train.py:901] (3/4) Epoch 23, batch 2850, loss[loss=0.2146, simple_loss=0.2976, pruned_loss=0.06583, over 8496.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2864, pruned_loss=0.06028, over 1618315.96 frames. ], batch size: 26, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:16:55,707 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:16:55,730 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:04,507 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.469e+02 3.037e+02 3.866e+02 9.714e+02, threshold=6.075e+02, percent-clipped=7.0 +2023-02-07 05:17:07,454 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:07,605 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0392, 1.1507, 1.1437, 0.8680, 1.1526, 0.9835, 0.1785, 1.1501], + device='cuda:3'), covar=tensor([0.0425, 0.0407, 0.0372, 0.0504, 0.0468, 0.1046, 0.0890, 0.0343], + device='cuda:3'), in_proj_covar=tensor([0.0456, 0.0394, 0.0348, 0.0445, 0.0380, 0.0534, 0.0393, 0.0422], + device='cuda:3'), out_proj_covar=tensor([1.2181e-04, 1.0316e-04, 9.1228e-05, 1.1701e-04, 9.9895e-05, 1.5036e-04, + 1.0583e-04, 1.1171e-04], device='cuda:3') +2023-02-07 05:17:27,380 INFO [train.py:901] (3/4) Epoch 23, batch 2900, loss[loss=0.2017, simple_loss=0.2976, pruned_loss=0.05289, over 8493.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2863, pruned_loss=0.06083, over 1615881.23 frames. ], batch size: 29, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:17:45,109 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:52,141 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180759.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:17:59,469 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 05:18:02,965 INFO [train.py:901] (3/4) Epoch 23, batch 2950, loss[loss=0.2492, simple_loss=0.3206, pruned_loss=0.08894, over 7288.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2875, pruned_loss=0.0617, over 1616272.08 frames. ], batch size: 73, lr: 3.30e-03, grad_scale: 8.0 +2023-02-07 05:18:09,335 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:16,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.356e+02 2.925e+02 3.942e+02 6.480e+02, threshold=5.850e+02, percent-clipped=1.0 +2023-02-07 05:18:19,052 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8811, 1.3033, 4.3250, 1.8694, 2.4969, 4.9625, 5.0280, 4.2085], + device='cuda:3'), covar=tensor([0.1404, 0.2114, 0.0282, 0.2048, 0.1191, 0.0165, 0.0371, 0.0557], + device='cuda:3'), in_proj_covar=tensor([0.0295, 0.0318, 0.0285, 0.0314, 0.0308, 0.0267, 0.0420, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 05:18:30,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=180813.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:18:33,373 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 05:18:38,135 INFO [train.py:901] (3/4) Epoch 23, batch 3000, loss[loss=0.1757, simple_loss=0.2645, pruned_loss=0.04346, over 8330.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2874, pruned_loss=0.06144, over 1613730.72 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:18:38,135 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 05:18:50,536 INFO [train.py:935] (3/4) Epoch 23, validation: loss=0.1735, simple_loss=0.2731, pruned_loss=0.03696, over 944034.00 frames. +2023-02-07 05:18:50,537 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 05:18:57,234 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 05:19:03,699 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=180843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:19:26,991 INFO [train.py:901] (3/4) Epoch 23, batch 3050, loss[loss=0.2724, simple_loss=0.331, pruned_loss=0.1069, over 7057.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2865, pruned_loss=0.06148, over 1608892.50 frames. ], batch size: 71, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:19:40,679 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.515e+02 3.107e+02 3.968e+02 1.139e+03, threshold=6.214e+02, percent-clipped=7.0 +2023-02-07 05:20:02,332 INFO [train.py:901] (3/4) Epoch 23, batch 3100, loss[loss=0.2211, simple_loss=0.3054, pruned_loss=0.06838, over 8250.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2872, pruned_loss=0.06159, over 1609134.92 frames. ], batch size: 48, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:07,184 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=180932.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:11,450 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=180938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:29,340 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=180963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:20:38,167 INFO [train.py:901] (3/4) Epoch 23, batch 3150, loss[loss=0.19, simple_loss=0.2927, pruned_loss=0.04364, over 8113.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2867, pruned_loss=0.06119, over 1612941.81 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:20:51,969 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.467e+02 3.042e+02 3.660e+02 1.036e+03, threshold=6.084e+02, percent-clipped=2.0 +2023-02-07 05:21:14,471 INFO [train.py:901] (3/4) Epoch 23, batch 3200, loss[loss=0.1875, simple_loss=0.2794, pruned_loss=0.04784, over 8543.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2872, pruned_loss=0.06159, over 1611693.26 frames. ], batch size: 39, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:20,225 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8827, 1.8372, 1.8910, 1.7179, 1.0108, 1.7628, 2.2356, 1.8370], + device='cuda:3'), covar=tensor([0.0434, 0.1135, 0.1570, 0.1330, 0.0588, 0.1319, 0.0618, 0.0638], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0188, 0.0159, 0.0099, 0.0161, 0.0111, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:21:29,945 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181047.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:45,872 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181069.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:21:49,883 INFO [train.py:901] (3/4) Epoch 23, batch 3250, loss[loss=0.2459, simple_loss=0.3171, pruned_loss=0.08738, over 8467.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2884, pruned_loss=0.06223, over 1620816.62 frames. ], batch size: 27, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:21:57,726 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5924, 1.5747, 4.7790, 1.8290, 4.2585, 3.9546, 4.3450, 4.1704], + device='cuda:3'), covar=tensor([0.0537, 0.4413, 0.0450, 0.3841, 0.1133, 0.0917, 0.0535, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0648, 0.0707, 0.0640, 0.0717, 0.0613, 0.0612, 0.0689], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:22:03,753 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.376e+02 2.917e+02 3.369e+02 6.745e+02, threshold=5.834e+02, percent-clipped=1.0 +2023-02-07 05:22:04,708 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:04,822 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181094.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:22:26,226 INFO [train.py:901] (3/4) Epoch 23, batch 3300, loss[loss=0.2232, simple_loss=0.2969, pruned_loss=0.07472, over 8085.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06127, over 1616526.13 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:01,365 INFO [train.py:901] (3/4) Epoch 23, batch 3350, loss[loss=0.1918, simple_loss=0.264, pruned_loss=0.05975, over 7795.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2871, pruned_loss=0.06158, over 1616470.06 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:10,457 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181187.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:14,976 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.358e+02 3.053e+02 3.666e+02 9.674e+02, threshold=6.107e+02, percent-clipped=1.0 +2023-02-07 05:23:26,432 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:23:38,223 INFO [train.py:901] (3/4) Epoch 23, batch 3400, loss[loss=0.1876, simple_loss=0.2677, pruned_loss=0.05377, over 7444.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2875, pruned_loss=0.06143, over 1613884.15 frames. ], batch size: 17, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:23:39,864 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6662, 1.6096, 2.1634, 1.4629, 1.1946, 2.0774, 0.2959, 1.2983], + device='cuda:3'), covar=tensor([0.1573, 0.1256, 0.0322, 0.0962, 0.2542, 0.0396, 0.1941, 0.1210], + device='cuda:3'), in_proj_covar=tensor([0.0192, 0.0198, 0.0129, 0.0220, 0.0270, 0.0137, 0.0170, 0.0193], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 05:23:46,806 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.49 vs. limit=2.0 +2023-02-07 05:24:13,227 INFO [train.py:901] (3/4) Epoch 23, batch 3450, loss[loss=0.2414, simple_loss=0.3071, pruned_loss=0.08786, over 8497.00 frames. ], tot_loss[loss=0.2066, simple_loss=0.2886, pruned_loss=0.0623, over 1615006.25 frames. ], batch size: 29, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:22,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7388, 1.9743, 1.7221, 2.5963, 1.2695, 1.5654, 2.0428, 2.1066], + device='cuda:3'), covar=tensor([0.0993, 0.0851, 0.1096, 0.0442, 0.1056, 0.1418, 0.0784, 0.0814], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0197, 0.0243, 0.0213, 0.0204, 0.0244, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:24:27,414 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.916e+02 2.466e+02 2.960e+02 3.783e+02 8.296e+02, threshold=5.920e+02, percent-clipped=4.0 +2023-02-07 05:24:32,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:33,736 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:42,799 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181315.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:24:49,554 INFO [train.py:901] (3/4) Epoch 23, batch 3500, loss[loss=0.1775, simple_loss=0.2611, pruned_loss=0.047, over 8033.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2881, pruned_loss=0.06174, over 1619133.15 frames. ], batch size: 22, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:24:52,757 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181328.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:25:05,780 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6559, 1.9624, 2.0307, 1.2931, 2.0741, 1.4667, 0.9316, 1.8508], + device='cuda:3'), covar=tensor([0.0765, 0.0448, 0.0337, 0.0770, 0.0500, 0.1036, 0.0927, 0.0373], + device='cuda:3'), in_proj_covar=tensor([0.0456, 0.0394, 0.0348, 0.0451, 0.0381, 0.0537, 0.0394, 0.0424], + device='cuda:3'), out_proj_covar=tensor([1.2173e-04, 1.0319e-04, 9.1233e-05, 1.1864e-04, 1.0006e-04, 1.5137e-04, + 1.0606e-04, 1.1202e-04], device='cuda:3') +2023-02-07 05:25:07,629 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 05:25:25,791 INFO [train.py:901] (3/4) Epoch 23, batch 3550, loss[loss=0.18, simple_loss=0.266, pruned_loss=0.04693, over 8333.00 frames. ], tot_loss[loss=0.2074, simple_loss=0.2894, pruned_loss=0.06267, over 1618425.52 frames. ], batch size: 25, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:25:32,319 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2339, 2.0479, 2.6686, 2.2215, 2.6604, 2.2999, 2.0970, 1.5134], + device='cuda:3'), covar=tensor([0.5503, 0.5071, 0.1998, 0.3645, 0.2560, 0.3097, 0.1923, 0.5582], + device='cuda:3'), in_proj_covar=tensor([0.0949, 0.0992, 0.0815, 0.0957, 0.1003, 0.0905, 0.0757, 0.0834], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 05:25:39,015 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.351e+02 2.882e+02 3.469e+02 9.271e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 05:26:00,043 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0516, 2.2069, 1.8437, 2.8538, 1.3900, 1.6985, 2.0821, 2.3571], + device='cuda:3'), covar=tensor([0.0697, 0.0798, 0.0888, 0.0336, 0.1103, 0.1285, 0.0847, 0.0720], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0197, 0.0243, 0.0213, 0.0204, 0.0244, 0.0250, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:26:01,197 INFO [train.py:901] (3/4) Epoch 23, batch 3600, loss[loss=0.2359, simple_loss=0.3158, pruned_loss=0.07793, over 8291.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2891, pruned_loss=0.06264, over 1616753.94 frames. ], batch size: 23, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:16,016 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5006, 1.4114, 1.8664, 1.3338, 1.1471, 1.7855, 0.2383, 1.2063], + device='cuda:3'), covar=tensor([0.1794, 0.1586, 0.0390, 0.0943, 0.2658, 0.0520, 0.2132, 0.1319], + device='cuda:3'), in_proj_covar=tensor([0.0192, 0.0198, 0.0129, 0.0218, 0.0268, 0.0136, 0.0169, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 05:26:30,562 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181465.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:37,776 INFO [train.py:901] (3/4) Epoch 23, batch 3650, loss[loss=0.1877, simple_loss=0.2761, pruned_loss=0.04961, over 8071.00 frames. ], tot_loss[loss=0.2071, simple_loss=0.2892, pruned_loss=0.06252, over 1619035.10 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:26:48,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181490.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:26:50,917 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.424e+02 2.919e+02 3.720e+02 6.119e+02, threshold=5.839e+02, percent-clipped=1.0 +2023-02-07 05:27:11,122 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 05:27:12,461 INFO [train.py:901] (3/4) Epoch 23, batch 3700, loss[loss=0.1802, simple_loss=0.2624, pruned_loss=0.04905, over 7920.00 frames. ], tot_loss[loss=0.2069, simple_loss=0.2885, pruned_loss=0.06263, over 1613718.20 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 8.0 +2023-02-07 05:27:17,712 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 05:27:30,291 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:37,401 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=181558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:27:49,553 INFO [train.py:901] (3/4) Epoch 23, batch 3750, loss[loss=0.1803, simple_loss=0.2608, pruned_loss=0.04992, over 7713.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2883, pruned_loss=0.06256, over 1613927.00 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:27:55,393 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=181583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:28:02,817 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.354e+02 2.844e+02 3.677e+02 7.170e+02, threshold=5.688e+02, percent-clipped=4.0 +2023-02-07 05:28:24,873 INFO [train.py:901] (3/4) Epoch 23, batch 3800, loss[loss=0.2009, simple_loss=0.2889, pruned_loss=0.05643, over 7796.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2866, pruned_loss=0.06126, over 1613307.73 frames. ], batch size: 20, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:28:44,349 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4866, 1.8369, 1.4145, 2.9105, 1.4827, 1.3632, 2.1997, 2.0931], + device='cuda:3'), covar=tensor([0.1612, 0.1382, 0.2032, 0.0410, 0.1313, 0.2047, 0.0961, 0.1044], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0198, 0.0246, 0.0215, 0.0207, 0.0247, 0.0252, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:28:47,153 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([6.0775, 1.7351, 6.2023, 2.1984, 5.5424, 5.1447, 5.7177, 5.6536], + device='cuda:3'), covar=tensor([0.0416, 0.4584, 0.0289, 0.3733, 0.0870, 0.0784, 0.0410, 0.0485], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0650, 0.0706, 0.0641, 0.0721, 0.0617, 0.0616, 0.0688], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:28:49,221 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181659.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:29:00,794 INFO [train.py:901] (3/4) Epoch 23, batch 3850, loss[loss=0.2142, simple_loss=0.2982, pruned_loss=0.06504, over 8577.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2879, pruned_loss=0.0616, over 1617256.45 frames. ], batch size: 31, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:14,859 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.406e+02 2.361e+02 2.900e+02 3.650e+02 9.007e+02, threshold=5.800e+02, percent-clipped=7.0 +2023-02-07 05:29:22,393 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 05:29:36,633 INFO [train.py:901] (3/4) Epoch 23, batch 3900, loss[loss=0.196, simple_loss=0.2773, pruned_loss=0.05735, over 8329.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2869, pruned_loss=0.06125, over 1619240.95 frames. ], batch size: 26, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:29:42,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8312, 1.5425, 1.9153, 1.5912, 1.0442, 1.6605, 2.1678, 2.0195], + device='cuda:3'), covar=tensor([0.0423, 0.1295, 0.1617, 0.1424, 0.0613, 0.1430, 0.0645, 0.0588], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0151, 0.0188, 0.0159, 0.0100, 0.0161, 0.0111, 0.0142], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:30:10,472 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=181774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:30:11,006 INFO [train.py:901] (3/4) Epoch 23, batch 3950, loss[loss=0.1942, simple_loss=0.2798, pruned_loss=0.05427, over 7980.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2873, pruned_loss=0.06163, over 1616886.69 frames. ], batch size: 21, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:30:26,257 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 2.359e+02 2.788e+02 3.393e+02 6.824e+02, threshold=5.575e+02, percent-clipped=4.0 +2023-02-07 05:30:47,704 INFO [train.py:901] (3/4) Epoch 23, batch 4000, loss[loss=0.1858, simple_loss=0.2642, pruned_loss=0.0537, over 7689.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06166, over 1618662.43 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:04,380 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2615, 1.2734, 3.3976, 1.0601, 3.0094, 2.8397, 3.0940, 3.0165], + device='cuda:3'), covar=tensor([0.0827, 0.4081, 0.0842, 0.4250, 0.1478, 0.1170, 0.0804, 0.0921], + device='cuda:3'), in_proj_covar=tensor([0.0640, 0.0650, 0.0706, 0.0641, 0.0717, 0.0615, 0.0613, 0.0686], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:31:22,586 INFO [train.py:901] (3/4) Epoch 23, batch 4050, loss[loss=0.1868, simple_loss=0.2683, pruned_loss=0.05266, over 7531.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2868, pruned_loss=0.06101, over 1614972.58 frames. ], batch size: 18, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:31:34,384 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=181892.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:31:35,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 2.508e+02 2.885e+02 3.954e+02 8.020e+02, threshold=5.770e+02, percent-clipped=6.0 +2023-02-07 05:31:59,838 INFO [train.py:901] (3/4) Epoch 23, batch 4100, loss[loss=0.1779, simple_loss=0.2549, pruned_loss=0.05052, over 7652.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.06109, over 1612857.54 frames. ], batch size: 19, lr: 3.29e-03, grad_scale: 16.0 +2023-02-07 05:32:10,663 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1384, 3.7161, 2.4093, 2.8959, 2.7128, 1.9738, 2.8905, 2.9192], + device='cuda:3'), covar=tensor([0.1602, 0.0375, 0.1160, 0.0710, 0.0677, 0.1521, 0.0996, 0.1156], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0235, 0.0340, 0.0312, 0.0303, 0.0340, 0.0347, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 05:32:13,920 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1673, 1.2292, 1.4830, 1.1560, 0.7217, 1.2935, 1.2607, 1.0388], + device='cuda:3'), covar=tensor([0.0663, 0.1309, 0.1720, 0.1525, 0.0598, 0.1480, 0.0732, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0190, 0.0159, 0.0100, 0.0163, 0.0112, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:32:34,924 INFO [train.py:901] (3/4) Epoch 23, batch 4150, loss[loss=0.1614, simple_loss=0.2415, pruned_loss=0.04061, over 7798.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.287, pruned_loss=0.06093, over 1611842.95 frames. ], batch size: 19, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:32:48,428 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.356e+02 2.929e+02 3.956e+02 6.697e+02, threshold=5.858e+02, percent-clipped=3.0 +2023-02-07 05:32:49,332 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=181996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:32:58,730 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:11,753 INFO [train.py:901] (3/4) Epoch 23, batch 4200, loss[loss=0.2106, simple_loss=0.2893, pruned_loss=0.06598, over 8664.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2864, pruned_loss=0.06072, over 1608999.27 frames. ], batch size: 49, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:16,205 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182030.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:17,893 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.37 vs. limit=5.0 +2023-02-07 05:33:25,771 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 05:33:33,433 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:33:47,611 INFO [train.py:901] (3/4) Epoch 23, batch 4250, loss[loss=0.2069, simple_loss=0.2988, pruned_loss=0.05753, over 8480.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.287, pruned_loss=0.06085, over 1614165.83 frames. ], batch size: 27, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:33:48,141 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-07 05:33:49,026 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 05:34:01,341 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.416e+02 2.989e+02 3.588e+02 6.339e+02, threshold=5.979e+02, percent-clipped=2.0 +2023-02-07 05:34:06,984 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5044, 1.6369, 2.1783, 1.3887, 1.4892, 1.7300, 1.5019, 1.5009], + device='cuda:3'), covar=tensor([0.1883, 0.2516, 0.0984, 0.4291, 0.1959, 0.3318, 0.2380, 0.2132], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0608, 0.0552, 0.0647, 0.0644, 0.0596, 0.0539, 0.0628], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:34:22,741 INFO [train.py:901] (3/4) Epoch 23, batch 4300, loss[loss=0.209, simple_loss=0.2937, pruned_loss=0.06219, over 8323.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.287, pruned_loss=0.06072, over 1615595.65 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:34:58,709 INFO [train.py:901] (3/4) Epoch 23, batch 4350, loss[loss=0.213, simple_loss=0.2988, pruned_loss=0.06357, over 8605.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2858, pruned_loss=0.0605, over 1609800.21 frames. ], batch size: 31, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:12,957 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5072, 1.4514, 4.7371, 1.8121, 4.1204, 3.9452, 4.2582, 4.1202], + device='cuda:3'), covar=tensor([0.0604, 0.4887, 0.0484, 0.4108, 0.1167, 0.0951, 0.0609, 0.0692], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0653, 0.0709, 0.0642, 0.0720, 0.0617, 0.0616, 0.0689], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:35:13,490 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.346e+02 2.960e+02 3.931e+02 9.702e+02, threshold=5.919e+02, percent-clipped=9.0 +2023-02-07 05:35:21,958 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 05:35:34,697 INFO [train.py:901] (3/4) Epoch 23, batch 4400, loss[loss=0.2298, simple_loss=0.3054, pruned_loss=0.07709, over 8734.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.06027, over 1609514.58 frames. ], batch size: 30, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:35:50,851 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6036, 1.3312, 1.5518, 1.2396, 0.9240, 1.3904, 1.4619, 1.2343], + device='cuda:3'), covar=tensor([0.0540, 0.1311, 0.1721, 0.1468, 0.0597, 0.1495, 0.0705, 0.0718], + device='cuda:3'), in_proj_covar=tensor([0.0096, 0.0152, 0.0189, 0.0160, 0.0100, 0.0163, 0.0111, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:35:55,268 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 05:36:03,255 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182263.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:05,077 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 05:36:11,442 INFO [train.py:901] (3/4) Epoch 23, batch 4450, loss[loss=0.2352, simple_loss=0.3133, pruned_loss=0.07851, over 8238.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.286, pruned_loss=0.06053, over 1612467.19 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:20,488 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182288.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:26,042 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.606e+02 3.225e+02 4.349e+02 9.132e+02, threshold=6.449e+02, percent-clipped=7.0 +2023-02-07 05:36:29,003 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:36:47,067 INFO [train.py:901] (3/4) Epoch 23, batch 4500, loss[loss=0.2092, simple_loss=0.2993, pruned_loss=0.05958, over 8346.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2877, pruned_loss=0.06164, over 1612787.09 frames. ], batch size: 24, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:36:56,825 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 05:36:57,584 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182340.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:37:23,621 INFO [train.py:901] (3/4) Epoch 23, batch 4550, loss[loss=0.2331, simple_loss=0.304, pruned_loss=0.08112, over 8089.00 frames. ], tot_loss[loss=0.2058, simple_loss=0.2878, pruned_loss=0.06192, over 1614385.35 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:37:37,490 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.347e+02 2.810e+02 3.651e+02 9.685e+02, threshold=5.619e+02, percent-clipped=2.0 +2023-02-07 05:37:38,401 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0520, 3.7536, 2.1400, 2.8572, 2.7662, 1.6881, 2.8486, 3.1920], + device='cuda:3'), covar=tensor([0.1837, 0.0373, 0.1368, 0.0793, 0.0817, 0.1922, 0.1222, 0.0995], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0234, 0.0338, 0.0311, 0.0299, 0.0340, 0.0346, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 05:37:59,265 INFO [train.py:901] (3/4) Epoch 23, batch 4600, loss[loss=0.1837, simple_loss=0.2727, pruned_loss=0.04734, over 8094.00 frames. ], tot_loss[loss=0.2056, simple_loss=0.2877, pruned_loss=0.06179, over 1610881.52 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:03,826 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2241, 1.8924, 2.5916, 1.5918, 1.6029, 2.5342, 1.2400, 2.0375], + device='cuda:3'), covar=tensor([0.1586, 0.1107, 0.0278, 0.1238, 0.2016, 0.0359, 0.1446, 0.1069], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0200, 0.0129, 0.0220, 0.0270, 0.0137, 0.0169, 0.0192], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 05:38:20,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182455.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:38:34,855 INFO [train.py:901] (3/4) Epoch 23, batch 4650, loss[loss=0.2014, simple_loss=0.2923, pruned_loss=0.05524, over 8548.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2873, pruned_loss=0.06153, over 1614246.17 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:38:44,332 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7305, 1.5792, 1.8083, 1.5883, 1.3127, 1.5363, 2.3795, 1.7230], + device='cuda:3'), covar=tensor([0.0447, 0.1249, 0.1681, 0.1411, 0.0558, 0.1500, 0.0561, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 05:38:45,815 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8022, 1.7299, 3.9640, 1.4635, 3.5091, 3.2962, 3.6103, 3.4973], + device='cuda:3'), covar=tensor([0.0699, 0.3961, 0.0605, 0.4261, 0.1137, 0.0985, 0.0642, 0.0754], + device='cuda:3'), in_proj_covar=tensor([0.0639, 0.0646, 0.0702, 0.0634, 0.0712, 0.0609, 0.0609, 0.0682], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:38:50,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.204e+02 2.647e+02 3.638e+02 6.712e+02, threshold=5.294e+02, percent-clipped=7.0 +2023-02-07 05:39:05,479 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7979, 2.0061, 1.8014, 2.6283, 1.0558, 1.6061, 1.8085, 2.0599], + device='cuda:3'), covar=tensor([0.0777, 0.0776, 0.0910, 0.0366, 0.1169, 0.1308, 0.0877, 0.0843], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0247, 0.0216, 0.0208, 0.0249, 0.0252, 0.0209], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:39:12,442 INFO [train.py:901] (3/4) Epoch 23, batch 4700, loss[loss=0.1803, simple_loss=0.2769, pruned_loss=0.0419, over 8127.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06125, over 1614370.46 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:39:17,394 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:39:47,034 INFO [train.py:901] (3/4) Epoch 23, batch 4750, loss[loss=0.1982, simple_loss=0.2904, pruned_loss=0.05299, over 8145.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2851, pruned_loss=0.05999, over 1617802.11 frames. ], batch size: 22, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:01,607 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.297e+02 2.902e+02 3.418e+02 7.225e+02, threshold=5.805e+02, percent-clipped=3.0 +2023-02-07 05:40:05,943 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 05:40:08,827 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 05:40:11,797 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7276, 1.4960, 4.9266, 1.8532, 4.4532, 4.0633, 4.4198, 4.3639], + device='cuda:3'), covar=tensor([0.0551, 0.4627, 0.0390, 0.3969, 0.0861, 0.0856, 0.0554, 0.0551], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0648, 0.0704, 0.0635, 0.0714, 0.0611, 0.0612, 0.0684], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:40:24,101 INFO [train.py:901] (3/4) Epoch 23, batch 4800, loss[loss=0.2509, simple_loss=0.3202, pruned_loss=0.09082, over 8459.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2857, pruned_loss=0.06047, over 1610826.98 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:40:36,436 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182643.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:40:59,177 INFO [train.py:901] (3/4) Epoch 23, batch 4850, loss[loss=0.1801, simple_loss=0.2556, pruned_loss=0.05231, over 7691.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2849, pruned_loss=0.06017, over 1610846.35 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:00,611 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 05:41:13,254 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 2.288e+02 2.781e+02 3.814e+02 7.165e+02, threshold=5.562e+02, percent-clipped=4.0 +2023-02-07 05:41:25,712 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=182711.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:41:36,258 INFO [train.py:901] (3/4) Epoch 23, batch 4900, loss[loss=0.1923, simple_loss=0.2714, pruned_loss=0.05663, over 8092.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2848, pruned_loss=0.06009, over 1610071.30 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:41:44,994 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=182736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:00,363 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:42:12,966 INFO [train.py:901] (3/4) Epoch 23, batch 4950, loss[loss=0.1942, simple_loss=0.2902, pruned_loss=0.04907, over 8575.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2847, pruned_loss=0.05999, over 1611985.83 frames. ], batch size: 39, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:42:27,042 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.423e+02 2.989e+02 3.745e+02 1.524e+03, threshold=5.977e+02, percent-clipped=7.0 +2023-02-07 05:42:48,225 INFO [train.py:901] (3/4) Epoch 23, batch 5000, loss[loss=0.214, simple_loss=0.3011, pruned_loss=0.06341, over 8438.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2856, pruned_loss=0.06009, over 1614341.99 frames. ], batch size: 29, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:25,232 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:25,719 INFO [train.py:901] (3/4) Epoch 23, batch 5050, loss[loss=0.214, simple_loss=0.2854, pruned_loss=0.07129, over 7972.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2852, pruned_loss=0.05989, over 1610355.09 frames. ], batch size: 21, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:43:26,556 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=182876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:29,545 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=182880.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:43:40,723 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.376e+02 2.932e+02 3.646e+02 6.966e+02, threshold=5.864e+02, percent-clipped=3.0 +2023-02-07 05:43:46,344 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 05:44:01,707 INFO [train.py:901] (3/4) Epoch 23, batch 5100, loss[loss=0.1904, simple_loss=0.2606, pruned_loss=0.06008, over 7521.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2851, pruned_loss=0.05972, over 1612374.74 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:03,884 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0574, 1.4890, 1.6605, 1.4264, 0.9404, 1.4381, 1.7609, 1.5765], + device='cuda:3'), covar=tensor([0.0572, 0.1267, 0.1696, 0.1429, 0.0622, 0.1483, 0.0708, 0.0651], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:44:38,860 INFO [train.py:901] (3/4) Epoch 23, batch 5150, loss[loss=0.2174, simple_loss=0.3056, pruned_loss=0.06457, over 8518.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2862, pruned_loss=0.06031, over 1612059.81 frames. ], batch size: 26, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:44:50,218 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=182991.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:44:53,605 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.308e+02 2.409e+02 2.843e+02 3.449e+02 6.604e+02, threshold=5.686e+02, percent-clipped=1.0 +2023-02-07 05:45:07,250 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183014.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:09,905 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7970, 1.6668, 1.9717, 1.7603, 1.0982, 1.7096, 2.3219, 2.2980], + device='cuda:3'), covar=tensor([0.0466, 0.1221, 0.1633, 0.1349, 0.0600, 0.1393, 0.0576, 0.0544], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0007], + device='cuda:3') +2023-02-07 05:45:14,591 INFO [train.py:901] (3/4) Epoch 23, batch 5200, loss[loss=0.1758, simple_loss=0.2464, pruned_loss=0.05263, over 7708.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06052, over 1612350.14 frames. ], batch size: 18, lr: 3.28e-03, grad_scale: 8.0 +2023-02-07 05:45:24,446 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183039.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:45:46,578 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 05:45:50,597 INFO [train.py:901] (3/4) Epoch 23, batch 5250, loss[loss=0.233, simple_loss=0.3154, pruned_loss=0.07533, over 8639.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2861, pruned_loss=0.06076, over 1610956.15 frames. ], batch size: 31, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:45:55,149 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 05:46:05,166 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.492e+02 2.942e+02 3.798e+02 7.403e+02, threshold=5.885e+02, percent-clipped=3.0 +2023-02-07 05:46:27,067 INFO [train.py:901] (3/4) Epoch 23, batch 5300, loss[loss=0.2056, simple_loss=0.2745, pruned_loss=0.06834, over 7529.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2859, pruned_loss=0.06136, over 1610093.88 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:46:29,212 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3579, 1.4414, 1.3981, 1.7547, 0.7789, 1.2575, 1.3240, 1.4655], + device='cuda:3'), covar=tensor([0.0923, 0.0880, 0.0997, 0.0510, 0.1039, 0.1383, 0.0744, 0.0763], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0196, 0.0243, 0.0213, 0.0205, 0.0244, 0.0249, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:46:32,528 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 05:46:59,015 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.55 vs. limit=5.0 +2023-02-07 05:47:02,893 INFO [train.py:901] (3/4) Epoch 23, batch 5350, loss[loss=0.2, simple_loss=0.2911, pruned_loss=0.05449, over 8656.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2861, pruned_loss=0.06146, over 1607801.34 frames. ], batch size: 39, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:47:17,714 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.502e+02 3.193e+02 3.793e+02 7.809e+02, threshold=6.385e+02, percent-clipped=1.0 +2023-02-07 05:47:34,672 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183218.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:38,864 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:47:39,488 INFO [train.py:901] (3/4) Epoch 23, batch 5400, loss[loss=0.1857, simple_loss=0.2693, pruned_loss=0.05107, over 7966.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2867, pruned_loss=0.06138, over 1612742.10 frames. ], batch size: 21, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:47:55,900 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183247.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:13,031 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:48:14,972 INFO [train.py:901] (3/4) Epoch 23, batch 5450, loss[loss=0.2312, simple_loss=0.3091, pruned_loss=0.07662, over 8670.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2858, pruned_loss=0.0615, over 1608147.94 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:22,247 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6519, 2.3439, 3.1093, 2.5912, 3.0844, 2.5380, 2.4070, 1.9372], + device='cuda:3'), covar=tensor([0.5418, 0.5090, 0.2232, 0.3693, 0.2606, 0.3045, 0.1826, 0.5581], + device='cuda:3'), in_proj_covar=tensor([0.0941, 0.0986, 0.0806, 0.0947, 0.0995, 0.0898, 0.0751, 0.0829], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 05:48:30,414 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.334e+02 2.819e+02 3.622e+02 6.725e+02, threshold=5.637e+02, percent-clipped=1.0 +2023-02-07 05:48:41,154 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 05:48:52,618 INFO [train.py:901] (3/4) Epoch 23, batch 5500, loss[loss=0.1833, simple_loss=0.2556, pruned_loss=0.05547, over 7541.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2857, pruned_loss=0.06106, over 1612197.04 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:48:58,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183333.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:02,368 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:49:14,868 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 05:49:27,074 INFO [train.py:901] (3/4) Epoch 23, batch 5550, loss[loss=0.1841, simple_loss=0.2594, pruned_loss=0.05439, over 7538.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.285, pruned_loss=0.06083, over 1607505.79 frames. ], batch size: 18, lr: 3.27e-03, grad_scale: 4.0 +2023-02-07 05:49:41,517 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.428e+02 3.119e+02 4.010e+02 1.058e+03, threshold=6.238e+02, percent-clipped=9.0 +2023-02-07 05:50:03,271 INFO [train.py:901] (3/4) Epoch 23, batch 5600, loss[loss=0.182, simple_loss=0.2641, pruned_loss=0.04993, over 7643.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2859, pruned_loss=0.06106, over 1609766.35 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:04,085 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:50:12,352 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7803, 1.5949, 3.9934, 1.4160, 3.4962, 3.3153, 3.6226, 3.5116], + device='cuda:3'), covar=tensor([0.0773, 0.4314, 0.0599, 0.4115, 0.1210, 0.1029, 0.0708, 0.0817], + device='cuda:3'), in_proj_covar=tensor([0.0643, 0.0650, 0.0704, 0.0636, 0.0713, 0.0613, 0.0611, 0.0686], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:50:39,038 INFO [train.py:901] (3/4) Epoch 23, batch 5650, loss[loss=0.213, simple_loss=0.3015, pruned_loss=0.06227, over 8282.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2868, pruned_loss=0.06159, over 1610818.60 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:50:51,426 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 05:50:53,308 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 2.300e+02 3.084e+02 3.921e+02 7.530e+02, threshold=6.168e+02, percent-clipped=4.0 +2023-02-07 05:51:14,122 INFO [train.py:901] (3/4) Epoch 23, batch 5700, loss[loss=0.2198, simple_loss=0.3017, pruned_loss=0.0689, over 8581.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2868, pruned_loss=0.06154, over 1610026.82 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:50,332 INFO [train.py:901] (3/4) Epoch 23, batch 5750, loss[loss=0.1435, simple_loss=0.2281, pruned_loss=0.02945, over 7647.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2878, pruned_loss=0.06148, over 1612161.25 frames. ], batch size: 19, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:51:57,139 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 05:52:00,900 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183589.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:01,866 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-07 05:52:05,032 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=183595.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:05,458 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.334e+02 3.030e+02 3.740e+02 1.347e+03, threshold=6.060e+02, percent-clipped=7.0 +2023-02-07 05:52:18,049 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:22,097 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=183620.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:25,347 INFO [train.py:901] (3/4) Epoch 23, batch 5800, loss[loss=0.2061, simple_loss=0.2953, pruned_loss=0.05843, over 8721.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.287, pruned_loss=0.06079, over 1614548.57 frames. ], batch size: 34, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:52:30,175 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:52:51,797 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7035, 2.3068, 4.8760, 2.8343, 4.4542, 4.1932, 4.5333, 4.4107], + device='cuda:3'), covar=tensor([0.0627, 0.3610, 0.0470, 0.3124, 0.0898, 0.0848, 0.0512, 0.0572], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0650, 0.0704, 0.0636, 0.0713, 0.0614, 0.0610, 0.0689], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:53:01,082 INFO [train.py:901] (3/4) Epoch 23, batch 5850, loss[loss=0.1814, simple_loss=0.2698, pruned_loss=0.04656, over 7935.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05986, over 1611856.32 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:16,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.232e+02 2.367e+02 2.798e+02 3.640e+02 5.951e+02, threshold=5.597e+02, percent-clipped=0.0 +2023-02-07 05:53:36,781 INFO [train.py:901] (3/4) Epoch 23, batch 5900, loss[loss=0.1886, simple_loss=0.2794, pruned_loss=0.0489, over 7964.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.284, pruned_loss=0.05962, over 1602167.71 frames. ], batch size: 21, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:53:38,965 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6086, 1.3061, 2.8254, 1.3247, 2.1269, 3.0399, 3.2209, 2.5875], + device='cuda:3'), covar=tensor([0.1194, 0.1746, 0.0395, 0.2159, 0.0904, 0.0314, 0.0549, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0294, 0.0319, 0.0285, 0.0313, 0.0311, 0.0267, 0.0422, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 05:53:39,003 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7191, 1.9354, 1.6105, 2.3468, 1.0734, 1.4166, 1.7628, 1.9095], + device='cuda:3'), covar=tensor([0.0767, 0.0698, 0.0884, 0.0356, 0.1054, 0.1358, 0.0709, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0197, 0.0245, 0.0214, 0.0206, 0.0246, 0.0250, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:53:59,045 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 05:54:08,430 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183770.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:54:11,755 INFO [train.py:901] (3/4) Epoch 23, batch 5950, loss[loss=0.24, simple_loss=0.3304, pruned_loss=0.07479, over 8706.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2856, pruned_loss=0.06061, over 1602048.80 frames. ], batch size: 49, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:23,702 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2112, 4.1800, 3.8054, 2.0811, 3.7159, 3.6937, 3.7109, 3.5530], + device='cuda:3'), covar=tensor([0.0724, 0.0523, 0.0890, 0.3946, 0.0843, 0.0962, 0.1229, 0.0797], + device='cuda:3'), in_proj_covar=tensor([0.0524, 0.0441, 0.0426, 0.0536, 0.0427, 0.0443, 0.0427, 0.0384], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:54:27,016 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 2.784e+02 3.423e+02 5.836e+02, threshold=5.567e+02, percent-clipped=2.0 +2023-02-07 05:54:38,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0842, 1.4015, 4.2707, 1.7780, 2.5365, 4.8507, 4.9552, 4.1259], + device='cuda:3'), covar=tensor([0.1269, 0.2048, 0.0303, 0.2142, 0.1144, 0.0190, 0.0476, 0.0545], + device='cuda:3'), in_proj_covar=tensor([0.0293, 0.0317, 0.0283, 0.0311, 0.0308, 0.0265, 0.0420, 0.0300], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 05:54:43,514 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2536, 3.2253, 3.0242, 1.5917, 2.9427, 2.8704, 2.9430, 2.7922], + device='cuda:3'), covar=tensor([0.1155, 0.0862, 0.1228, 0.4410, 0.1070, 0.1402, 0.1563, 0.1060], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0443, 0.0428, 0.0539, 0.0429, 0.0445, 0.0428, 0.0385], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 05:54:47,617 INFO [train.py:901] (3/4) Epoch 23, batch 6000, loss[loss=0.2147, simple_loss=0.2973, pruned_loss=0.06603, over 8499.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2852, pruned_loss=0.06074, over 1602448.69 frames. ], batch size: 26, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:54:47,618 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 05:55:00,698 INFO [train.py:935] (3/4) Epoch 23, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03597, over 944034.00 frames. +2023-02-07 05:55:00,699 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 05:55:25,814 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=183860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:32,453 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 05:55:36,137 INFO [train.py:901] (3/4) Epoch 23, batch 6050, loss[loss=0.2108, simple_loss=0.2956, pruned_loss=0.06304, over 8131.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2873, pruned_loss=0.06202, over 1607515.96 frames. ], batch size: 22, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:55:43,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=183885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:55:50,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.494e+02 2.465e+02 3.097e+02 3.782e+02 8.398e+02, threshold=6.194e+02, percent-clipped=6.0 +2023-02-07 05:56:02,800 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 05:56:11,859 INFO [train.py:901] (3/4) Epoch 23, batch 6100, loss[loss=0.1909, simple_loss=0.2815, pruned_loss=0.05018, over 8111.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2876, pruned_loss=0.06192, over 1608823.97 frames. ], batch size: 23, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:32,460 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 05:56:47,367 INFO [train.py:901] (3/4) Epoch 23, batch 6150, loss[loss=0.1977, simple_loss=0.2588, pruned_loss=0.06828, over 6813.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2869, pruned_loss=0.061, over 1609979.68 frames. ], batch size: 15, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:56:48,176 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=183976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:56:51,963 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 05:57:01,786 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.512e+02 2.876e+02 3.577e+02 6.799e+02, threshold=5.752e+02, percent-clipped=2.0 +2023-02-07 05:57:22,985 INFO [train.py:901] (3/4) Epoch 23, batch 6200, loss[loss=0.1828, simple_loss=0.264, pruned_loss=0.05076, over 7801.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2864, pruned_loss=0.06051, over 1611766.85 frames. ], batch size: 20, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:57:25,507 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1148, 1.8632, 2.3998, 2.0772, 2.2928, 2.1883, 1.9644, 1.2464], + device='cuda:3'), covar=tensor([0.5283, 0.4614, 0.1864, 0.3345, 0.2317, 0.2954, 0.1836, 0.4819], + device='cuda:3'), in_proj_covar=tensor([0.0947, 0.0992, 0.0809, 0.0953, 0.0999, 0.0900, 0.0752, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 05:57:54,995 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:57:59,607 INFO [train.py:901] (3/4) Epoch 23, batch 6250, loss[loss=0.1965, simple_loss=0.2632, pruned_loss=0.06489, over 7435.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2872, pruned_loss=0.06111, over 1611728.10 frames. ], batch size: 17, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:08,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7507, 1.9434, 1.6940, 2.2926, 0.9526, 1.5661, 1.7501, 1.8797], + device='cuda:3'), covar=tensor([0.0765, 0.0699, 0.0885, 0.0430, 0.1097, 0.1311, 0.0728, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0198, 0.0246, 0.0215, 0.0207, 0.0247, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 05:58:11,446 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:58:14,643 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.340e+02 2.866e+02 3.425e+02 5.984e+02, threshold=5.731e+02, percent-clipped=3.0 +2023-02-07 05:58:34,481 INFO [train.py:901] (3/4) Epoch 23, batch 6300, loss[loss=0.2206, simple_loss=0.3023, pruned_loss=0.0695, over 8438.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2866, pruned_loss=0.06095, over 1612177.23 frames. ], batch size: 29, lr: 3.27e-03, grad_scale: 8.0 +2023-02-07 05:58:45,793 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:04,561 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:06,581 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0589, 1.5771, 1.3631, 1.7085, 1.4114, 1.2313, 1.4128, 1.3976], + device='cuda:3'), covar=tensor([0.1122, 0.0492, 0.1472, 0.0486, 0.0767, 0.1681, 0.0846, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0231, 0.0331, 0.0305, 0.0297, 0.0337, 0.0342, 0.0314], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 05:59:10,600 INFO [train.py:901] (3/4) Epoch 23, batch 6350, loss[loss=0.1965, simple_loss=0.2729, pruned_loss=0.06002, over 7801.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2855, pruned_loss=0.0604, over 1611780.17 frames. ], batch size: 19, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 05:59:12,233 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0849, 3.7741, 2.3834, 2.9905, 2.9019, 2.2976, 2.8346, 3.2360], + device='cuda:3'), covar=tensor([0.1575, 0.0305, 0.1094, 0.0725, 0.0665, 0.1279, 0.1004, 0.0894], + device='cuda:3'), in_proj_covar=tensor([0.0351, 0.0232, 0.0331, 0.0305, 0.0297, 0.0337, 0.0342, 0.0315], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0003, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 05:59:25,790 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.298e+02 2.703e+02 3.593e+02 9.198e+02, threshold=5.406e+02, percent-clipped=6.0 +2023-02-07 05:59:32,345 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184204.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 05:59:46,838 INFO [train.py:901] (3/4) Epoch 23, batch 6400, loss[loss=0.1939, simple_loss=0.2553, pruned_loss=0.06623, over 7714.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2852, pruned_loss=0.06019, over 1612650.66 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:22,069 INFO [train.py:901] (3/4) Epoch 23, batch 6450, loss[loss=0.18, simple_loss=0.269, pruned_loss=0.04547, over 8336.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2858, pruned_loss=0.0607, over 1611877.26 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:00:37,219 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.429e+02 3.055e+02 3.904e+02 7.071e+02, threshold=6.109e+02, percent-clipped=5.0 +2023-02-07 06:00:53,120 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1750, 1.6743, 4.5154, 2.0954, 2.6170, 5.1092, 5.1645, 4.4289], + device='cuda:3'), covar=tensor([0.1181, 0.1824, 0.0244, 0.1923, 0.1102, 0.0176, 0.0308, 0.0503], + device='cuda:3'), in_proj_covar=tensor([0.0295, 0.0319, 0.0285, 0.0314, 0.0311, 0.0267, 0.0423, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:00:54,551 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:00:58,465 INFO [train.py:901] (3/4) Epoch 23, batch 6500, loss[loss=0.1825, simple_loss=0.2624, pruned_loss=0.05134, over 8085.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2847, pruned_loss=0.05987, over 1609096.99 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:13,657 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184347.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:30,749 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184372.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:32,667 INFO [train.py:901] (3/4) Epoch 23, batch 6550, loss[loss=0.2059, simple_loss=0.2917, pruned_loss=0.06002, over 8604.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2857, pruned_loss=0.06055, over 1609253.24 frames. ], batch size: 34, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:01:34,223 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:01:48,099 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.251e+02 2.720e+02 3.518e+02 7.175e+02, threshold=5.440e+02, percent-clipped=6.0 +2023-02-07 06:01:51,595 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 06:02:00,103 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184412.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:02:09,823 INFO [train.py:901] (3/4) Epoch 23, batch 6600, loss[loss=0.2065, simple_loss=0.2934, pruned_loss=0.05982, over 8328.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.285, pruned_loss=0.06041, over 1606318.66 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:02:09,838 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 06:02:45,212 INFO [train.py:901] (3/4) Epoch 23, batch 6650, loss[loss=0.1807, simple_loss=0.2568, pruned_loss=0.05231, over 7679.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2843, pruned_loss=0.05998, over 1609751.81 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:00,393 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.187e+02 2.636e+02 3.150e+02 7.164e+02, threshold=5.273e+02, percent-clipped=1.0 +2023-02-07 06:03:06,034 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184504.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:14,722 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 06:03:21,193 INFO [train.py:901] (3/4) Epoch 23, batch 6700, loss[loss=0.1788, simple_loss=0.2651, pruned_loss=0.04621, over 7716.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2834, pruned_loss=0.05963, over 1610759.34 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:22,776 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184527.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:03:32,199 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.10 vs. limit=5.0 +2023-02-07 06:03:56,976 INFO [train.py:901] (3/4) Epoch 23, batch 6750, loss[loss=0.184, simple_loss=0.264, pruned_loss=0.05195, over 8077.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2846, pruned_loss=0.06014, over 1609531.70 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:03:57,226 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184575.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:02,293 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 06:04:11,516 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.299e+02 2.705e+02 3.689e+02 1.087e+03, threshold=5.410e+02, percent-clipped=6.0 +2023-02-07 06:04:14,465 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184600.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:04:30,945 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 06:04:32,276 INFO [train.py:901] (3/4) Epoch 23, batch 6800, loss[loss=0.2388, simple_loss=0.3241, pruned_loss=0.07677, over 8457.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2862, pruned_loss=0.06129, over 1610480.01 frames. ], batch size: 27, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:04:46,626 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-07 06:05:08,933 INFO [train.py:901] (3/4) Epoch 23, batch 6850, loss[loss=0.1583, simple_loss=0.2346, pruned_loss=0.04101, over 7545.00 frames. ], tot_loss[loss=0.2053, simple_loss=0.2872, pruned_loss=0.0617, over 1609606.39 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:05:14,193 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2901, 2.0580, 2.6517, 2.1868, 2.6346, 2.2875, 2.1304, 1.4172], + device='cuda:3'), covar=tensor([0.5292, 0.4780, 0.2038, 0.3959, 0.2541, 0.3068, 0.2039, 0.5270], + device='cuda:3'), in_proj_covar=tensor([0.0943, 0.0990, 0.0807, 0.0950, 0.0997, 0.0898, 0.0752, 0.0828], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:05:19,335 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 06:05:23,525 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.780e+02 2.645e+02 3.100e+02 4.179e+02 7.238e+02, threshold=6.201e+02, percent-clipped=8.0 +2023-02-07 06:05:40,715 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184721.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:05:43,468 INFO [train.py:901] (3/4) Epoch 23, batch 6900, loss[loss=0.2362, simple_loss=0.3237, pruned_loss=0.07435, over 8614.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2877, pruned_loss=0.06152, over 1612955.68 frames. ], batch size: 31, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:21,332 INFO [train.py:901] (3/4) Epoch 23, batch 6950, loss[loss=0.1962, simple_loss=0.2745, pruned_loss=0.05893, over 7917.00 frames. ], tot_loss[loss=0.2063, simple_loss=0.2882, pruned_loss=0.06218, over 1610329.06 frames. ], batch size: 20, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:06:27,129 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=184783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:29,047 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 06:06:35,999 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 2.334e+02 2.863e+02 3.573e+02 6.345e+02, threshold=5.727e+02, percent-clipped=1.0 +2023-02-07 06:06:38,088 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9184, 3.8292, 3.5237, 1.8861, 3.4632, 3.4442, 3.3920, 3.3605], + device='cuda:3'), covar=tensor([0.0846, 0.0660, 0.1174, 0.4372, 0.1016, 0.1137, 0.1615, 0.0843], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0447, 0.0431, 0.0544, 0.0434, 0.0448, 0.0430, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:06:44,106 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.24 vs. limit=5.0 +2023-02-07 06:06:44,532 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=184808.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:06:56,221 INFO [train.py:901] (3/4) Epoch 23, batch 7000, loss[loss=0.2316, simple_loss=0.3174, pruned_loss=0.07293, over 8660.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2869, pruned_loss=0.06164, over 1606528.40 frames. ], batch size: 34, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:03,960 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184836.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:12,028 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=184848.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:32,116 INFO [train.py:901] (3/4) Epoch 23, batch 7050, loss[loss=0.2077, simple_loss=0.2942, pruned_loss=0.06061, over 8085.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2852, pruned_loss=0.06074, over 1601730.12 frames. ], batch size: 21, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:07:38,586 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:07:48,063 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.510e+02 2.247e+02 2.854e+02 3.580e+02 1.056e+03, threshold=5.709e+02, percent-clipped=4.0 +2023-02-07 06:07:53,395 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.42 vs. limit=5.0 +2023-02-07 06:08:08,157 INFO [train.py:901] (3/4) Epoch 23, batch 7100, loss[loss=0.1797, simple_loss=0.2537, pruned_loss=0.05281, over 7699.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2845, pruned_loss=0.06007, over 1601052.34 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:30,175 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=184957.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:34,372 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=184963.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:08:42,468 INFO [train.py:901] (3/4) Epoch 23, batch 7150, loss[loss=0.2181, simple_loss=0.3073, pruned_loss=0.0644, over 8319.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2848, pruned_loss=0.05993, over 1604542.79 frames. ], batch size: 25, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:08:53,468 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9672, 1.6897, 1.9956, 1.8309, 1.9866, 2.0483, 1.8517, 0.8516], + device='cuda:3'), covar=tensor([0.5659, 0.4597, 0.2041, 0.3398, 0.2368, 0.2837, 0.1907, 0.4907], + device='cuda:3'), in_proj_covar=tensor([0.0942, 0.0991, 0.0806, 0.0950, 0.0997, 0.0896, 0.0753, 0.0829], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:08:58,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.323e+02 2.664e+02 3.243e+02 7.163e+02, threshold=5.329e+02, percent-clipped=2.0 +2023-02-07 06:09:20,383 INFO [train.py:901] (3/4) Epoch 23, batch 7200, loss[loss=0.228, simple_loss=0.3014, pruned_loss=0.0773, over 8138.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2856, pruned_loss=0.06068, over 1605169.16 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:29,457 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9016, 1.5262, 1.6953, 1.3935, 0.8318, 1.4837, 1.6749, 1.4970], + device='cuda:3'), covar=tensor([0.0543, 0.1232, 0.1731, 0.1469, 0.0601, 0.1488, 0.0688, 0.0673], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:09:35,034 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9658, 1.3185, 1.5239, 1.2179, 0.9491, 1.3849, 1.7648, 1.4913], + device='cuda:3'), covar=tensor([0.0556, 0.1600, 0.2321, 0.1872, 0.0679, 0.1940, 0.0732, 0.0719], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0102, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:09:35,779 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7736, 2.2158, 3.5612, 1.9484, 1.6405, 3.4408, 0.5730, 2.1481], + device='cuda:3'), covar=tensor([0.1477, 0.1291, 0.0245, 0.1589, 0.2771, 0.0342, 0.2357, 0.1363], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0195, 0.0129, 0.0219, 0.0268, 0.0136, 0.0168, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 06:09:44,873 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6926, 1.7458, 2.0414, 1.7287, 1.0271, 1.7077, 2.1028, 2.0191], + device='cuda:3'), covar=tensor([0.0449, 0.1212, 0.1572, 0.1354, 0.0615, 0.1463, 0.0661, 0.0605], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:09:44,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8353, 2.2703, 3.6969, 1.7680, 1.5870, 3.6889, 0.5202, 2.1583], + device='cuda:3'), covar=tensor([0.1346, 0.1302, 0.0192, 0.1925, 0.2904, 0.0216, 0.2464, 0.1406], + device='cuda:3'), in_proj_covar=tensor([0.0190, 0.0196, 0.0129, 0.0220, 0.0268, 0.0136, 0.0169, 0.0191], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 06:09:54,644 INFO [train.py:901] (3/4) Epoch 23, batch 7250, loss[loss=0.2372, simple_loss=0.3158, pruned_loss=0.07933, over 7525.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2862, pruned_loss=0.06101, over 1606224.86 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:09:56,898 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185078.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:06,367 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:09,714 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.341e+02 2.701e+02 3.625e+02 6.528e+02, threshold=5.401e+02, percent-clipped=8.0 +2023-02-07 06:10:25,011 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:10:30,392 INFO [train.py:901] (3/4) Epoch 23, batch 7300, loss[loss=0.1826, simple_loss=0.272, pruned_loss=0.04655, over 8532.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2857, pruned_loss=0.06082, over 1608002.78 frames. ], batch size: 49, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:10:46,959 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:06,505 INFO [train.py:901] (3/4) Epoch 23, batch 7350, loss[loss=0.1673, simple_loss=0.2495, pruned_loss=0.04259, over 7543.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2854, pruned_loss=0.06053, over 1609529.31 frames. ], batch size: 18, lr: 3.26e-03, grad_scale: 8.0 +2023-02-07 06:11:19,725 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 06:11:21,070 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 2.380e+02 2.863e+02 3.556e+02 7.708e+02, threshold=5.726e+02, percent-clipped=6.0 +2023-02-07 06:11:38,636 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185219.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:41,245 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 06:11:42,632 INFO [train.py:901] (3/4) Epoch 23, batch 7400, loss[loss=0.1915, simple_loss=0.2852, pruned_loss=0.04893, over 8036.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.06011, over 1607517.64 frames. ], batch size: 22, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:11:44,815 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185228.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:11:53,922 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9974, 1.3215, 1.6215, 1.3105, 0.9467, 1.4409, 1.7741, 1.7983], + device='cuda:3'), covar=tensor([0.0609, 0.1760, 0.2348, 0.1867, 0.0703, 0.2035, 0.0777, 0.0663], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0159, 0.0101, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:11:56,742 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185244.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:18,683 INFO [train.py:901] (3/4) Epoch 23, batch 7450, loss[loss=0.1577, simple_loss=0.2412, pruned_loss=0.03713, over 7663.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2838, pruned_loss=0.05991, over 1604692.79 frames. ], batch size: 19, lr: 3.26e-03, grad_scale: 16.0 +2023-02-07 06:12:18,956 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6245, 1.8315, 2.6464, 1.4621, 1.9474, 2.0010, 1.6515, 1.9283], + device='cuda:3'), covar=tensor([0.1878, 0.2480, 0.0817, 0.4504, 0.1906, 0.3099, 0.2287, 0.2208], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0611, 0.0554, 0.0646, 0.0647, 0.0595, 0.0541, 0.0632], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:12:21,617 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 06:12:33,462 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.310e+02 2.954e+02 3.827e+02 6.869e+02, threshold=5.908e+02, percent-clipped=4.0 +2023-02-07 06:12:37,079 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:12:53,820 INFO [train.py:901] (3/4) Epoch 23, batch 7500, loss[loss=0.1903, simple_loss=0.2623, pruned_loss=0.0591, over 7451.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2837, pruned_loss=0.05953, over 1607062.74 frames. ], batch size: 17, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:05,840 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-07 06:13:08,254 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185343.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:13:22,347 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9594, 2.2554, 1.7334, 2.8490, 1.2551, 1.5424, 2.1004, 2.2161], + device='cuda:3'), covar=tensor([0.0778, 0.0850, 0.0952, 0.0343, 0.1192, 0.1397, 0.0801, 0.0754], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0199, 0.0244, 0.0214, 0.0206, 0.0247, 0.0250, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 06:13:31,751 INFO [train.py:901] (3/4) Epoch 23, batch 7550, loss[loss=0.2206, simple_loss=0.3073, pruned_loss=0.06694, over 8327.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2845, pruned_loss=0.05927, over 1612749.80 frames. ], batch size: 25, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:13:39,221 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 06:13:46,072 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.461e+02 3.059e+02 3.860e+02 7.244e+02, threshold=6.118e+02, percent-clipped=3.0 +2023-02-07 06:14:00,489 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185416.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:04,590 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185422.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:06,608 INFO [train.py:901] (3/4) Epoch 23, batch 7600, loss[loss=0.1765, simple_loss=0.2529, pruned_loss=0.05002, over 7706.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05901, over 1612584.02 frames. ], batch size: 18, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:09,581 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-07 06:14:37,967 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8281, 6.0913, 5.2008, 2.6546, 5.3572, 5.6406, 5.4923, 5.5105], + device='cuda:3'), covar=tensor([0.0557, 0.0350, 0.0884, 0.4129, 0.0771, 0.0838, 0.1153, 0.0513], + device='cuda:3'), in_proj_covar=tensor([0.0535, 0.0450, 0.0439, 0.0549, 0.0439, 0.0452, 0.0433, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:14:41,963 INFO [train.py:901] (3/4) Epoch 23, batch 7650, loss[loss=0.1802, simple_loss=0.2625, pruned_loss=0.04893, over 7538.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.0596, over 1611342.42 frames. ], batch size: 18, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:14:50,200 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:54,420 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:14:57,817 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.760e+02 2.478e+02 3.100e+02 3.999e+02 8.387e+02, threshold=6.200e+02, percent-clipped=6.0 +2023-02-07 06:15:17,442 INFO [train.py:901] (3/4) Epoch 23, batch 7700, loss[loss=0.1789, simple_loss=0.2656, pruned_loss=0.04613, over 8141.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.06001, over 1614733.12 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:15:25,727 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:15:37,341 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 06:15:53,099 INFO [train.py:901] (3/4) Epoch 23, batch 7750, loss[loss=0.244, simple_loss=0.3317, pruned_loss=0.07814, over 8761.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2854, pruned_loss=0.05961, over 1615249.39 frames. ], batch size: 30, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:15:56,275 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 06:16:08,175 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.519e+02 2.336e+02 2.905e+02 3.607e+02 6.527e+02, threshold=5.810e+02, percent-clipped=2.0 +2023-02-07 06:16:10,499 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185599.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:11,963 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2563, 2.0032, 2.6598, 2.1471, 2.5204, 2.2771, 2.0983, 1.4063], + device='cuda:3'), covar=tensor([0.5486, 0.5102, 0.1980, 0.3651, 0.2680, 0.3085, 0.1938, 0.5237], + device='cuda:3'), in_proj_covar=tensor([0.0940, 0.0982, 0.0803, 0.0949, 0.0991, 0.0896, 0.0750, 0.0824], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:16:15,294 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,342 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:16:28,831 INFO [train.py:901] (3/4) Epoch 23, batch 7800, loss[loss=0.1907, simple_loss=0.2866, pruned_loss=0.04737, over 8324.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2865, pruned_loss=0.05985, over 1620062.39 frames. ], batch size: 25, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:01,069 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185672.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:02,902 INFO [train.py:901] (3/4) Epoch 23, batch 7850, loss[loss=0.233, simple_loss=0.3051, pruned_loss=0.08045, over 6441.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2859, pruned_loss=0.05972, over 1614435.62 frames. ], batch size: 72, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:17:17,283 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.434e+02 2.983e+02 3.607e+02 9.941e+02, threshold=5.966e+02, percent-clipped=5.0 +2023-02-07 06:17:18,193 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185697.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:17:28,603 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7898, 2.3479, 4.2244, 1.6817, 3.1637, 2.3884, 1.9724, 3.1379], + device='cuda:3'), covar=tensor([0.1841, 0.2889, 0.0669, 0.4649, 0.1740, 0.3044, 0.2354, 0.2232], + device='cuda:3'), in_proj_covar=tensor([0.0523, 0.0609, 0.0551, 0.0643, 0.0646, 0.0591, 0.0539, 0.0630], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:17:37,217 INFO [train.py:901] (3/4) Epoch 23, batch 7900, loss[loss=0.173, simple_loss=0.2549, pruned_loss=0.0455, over 8032.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2851, pruned_loss=0.05952, over 1612913.44 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:11,080 INFO [train.py:901] (3/4) Epoch 23, batch 7950, loss[loss=0.1918, simple_loss=0.2827, pruned_loss=0.05046, over 8594.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2853, pruned_loss=0.05944, over 1616134.55 frames. ], batch size: 31, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:12,603 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185777.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:23,314 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:25,077 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.254e+02 2.775e+02 3.427e+02 8.244e+02, threshold=5.550e+02, percent-clipped=2.0 +2023-02-07 06:18:39,402 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=185817.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:18:40,110 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:18:44,665 INFO [train.py:901] (3/4) Epoch 23, batch 8000, loss[loss=0.2372, simple_loss=0.3146, pruned_loss=0.07985, over 8029.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2848, pruned_loss=0.05961, over 1611788.61 frames. ], batch size: 22, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:18:48,036 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=185830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:09,886 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=185862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:15,915 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.03 vs. limit=5.0 +2023-02-07 06:19:17,209 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 06:19:18,260 INFO [train.py:901] (3/4) Epoch 23, batch 8050, loss[loss=0.2826, simple_loss=0.3309, pruned_loss=0.1172, over 6741.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2845, pruned_loss=0.06059, over 1590295.39 frames. ], batch size: 71, lr: 3.25e-03, grad_scale: 16.0 +2023-02-07 06:19:26,781 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=185887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:19:32,791 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.662e+02 3.318e+02 4.159e+02 9.358e+02, threshold=6.635e+02, percent-clipped=7.0 +2023-02-07 06:19:51,668 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 06:19:57,763 INFO [train.py:901] (3/4) Epoch 24, batch 0, loss[loss=0.2454, simple_loss=0.3025, pruned_loss=0.09414, over 7435.00 frames. ], tot_loss[loss=0.2454, simple_loss=0.3025, pruned_loss=0.09414, over 7435.00 frames. ], batch size: 17, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:19:57,763 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 06:20:01,674 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6368, 1.4279, 1.6905, 1.4116, 0.9696, 1.4481, 1.6668, 1.3033], + device='cuda:3'), covar=tensor([0.0699, 0.1376, 0.1795, 0.1529, 0.0628, 0.1595, 0.0710, 0.0729], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0160, 0.0101, 0.0164, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:20:09,065 INFO [train.py:935] (3/4) Epoch 24, validation: loss=0.1731, simple_loss=0.2733, pruned_loss=0.03644, over 944034.00 frames. +2023-02-07 06:20:09,067 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 06:20:23,918 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 06:20:35,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=185945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:20:44,027 INFO [train.py:901] (3/4) Epoch 24, batch 50, loss[loss=0.2284, simple_loss=0.3115, pruned_loss=0.07264, over 8031.00 frames. ], tot_loss[loss=0.2148, simple_loss=0.297, pruned_loss=0.06633, over 367463.87 frames. ], batch size: 22, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:20:57,553 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 06:21:11,401 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.437e+02 2.851e+02 3.663e+02 1.155e+03, threshold=5.702e+02, percent-clipped=3.0 +2023-02-07 06:21:20,557 INFO [train.py:901] (3/4) Epoch 24, batch 100, loss[loss=0.1777, simple_loss=0.2544, pruned_loss=0.05055, over 7659.00 frames. ], tot_loss[loss=0.2075, simple_loss=0.2885, pruned_loss=0.06325, over 640742.45 frames. ], batch size: 19, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:21:22,597 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 06:21:41,725 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3743, 1.5325, 4.5862, 1.6966, 4.0837, 3.8445, 4.1886, 4.0775], + device='cuda:3'), covar=tensor([0.0579, 0.4663, 0.0494, 0.4094, 0.1057, 0.0958, 0.0564, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0645, 0.0655, 0.0708, 0.0641, 0.0720, 0.0620, 0.0615, 0.0690], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:21:56,095 INFO [train.py:901] (3/4) Epoch 24, batch 150, loss[loss=0.1811, simple_loss=0.2634, pruned_loss=0.04941, over 7536.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2854, pruned_loss=0.06035, over 858025.18 frames. ], batch size: 18, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:00,573 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-07 06:22:03,342 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.42 vs. limit=2.0 +2023-02-07 06:22:07,734 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7043, 1.9046, 2.1094, 1.3311, 2.2127, 1.5317, 0.6705, 1.8670], + device='cuda:3'), covar=tensor([0.0610, 0.0394, 0.0315, 0.0653, 0.0422, 0.0919, 0.0930, 0.0330], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0402, 0.0354, 0.0455, 0.0385, 0.0545, 0.0398, 0.0428], + device='cuda:3'), out_proj_covar=tensor([1.2331e-04, 1.0515e-04, 9.2994e-05, 1.1942e-04, 1.0126e-04, 1.5330e-04, + 1.0710e-04, 1.1294e-04], device='cuda:3') +2023-02-07 06:22:21,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.384e+02 2.880e+02 3.401e+02 7.597e+02, threshold=5.761e+02, percent-clipped=1.0 +2023-02-07 06:22:30,271 INFO [train.py:901] (3/4) Epoch 24, batch 200, loss[loss=0.2057, simple_loss=0.2924, pruned_loss=0.05953, over 8513.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2859, pruned_loss=0.06045, over 1027395.60 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:22:35,409 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9434, 1.5973, 3.5158, 1.6221, 2.3839, 3.8721, 3.9683, 3.3299], + device='cuda:3'), covar=tensor([0.1178, 0.1747, 0.0309, 0.2015, 0.1114, 0.0215, 0.0405, 0.0523], + device='cuda:3'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0317, 0.0314, 0.0269, 0.0426, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:22:38,155 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:22:40,060 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:01,369 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-07 06:23:05,572 INFO [train.py:901] (3/4) Epoch 24, batch 250, loss[loss=0.1945, simple_loss=0.2769, pruned_loss=0.05602, over 7426.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.286, pruned_loss=0.06068, over 1158341.44 frames. ], batch size: 17, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:07,697 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186161.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:23:16,549 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 06:23:18,806 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186176.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:25,623 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 06:23:32,348 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.402e+02 3.098e+02 3.972e+02 8.418e+02, threshold=6.197e+02, percent-clipped=5.0 +2023-02-07 06:23:36,037 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186201.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:23:40,499 INFO [train.py:901] (3/4) Epoch 24, batch 300, loss[loss=0.1907, simple_loss=0.2755, pruned_loss=0.05293, over 8016.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2878, pruned_loss=0.06204, over 1258826.88 frames. ], batch size: 22, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:23:53,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186226.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,549 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:00,864 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 06:24:13,910 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6017, 2.7110, 1.8848, 2.2640, 2.0858, 1.7403, 2.1768, 2.1626], + device='cuda:3'), covar=tensor([0.1474, 0.0379, 0.1176, 0.0649, 0.0798, 0.1427, 0.0955, 0.1029], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0235, 0.0337, 0.0311, 0.0303, 0.0342, 0.0350, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 06:24:15,084 INFO [train.py:901] (3/4) Epoch 24, batch 350, loss[loss=0.1896, simple_loss=0.2781, pruned_loss=0.05052, over 8257.00 frames. ], tot_loss[loss=0.2052, simple_loss=0.2872, pruned_loss=0.06159, over 1340501.25 frames. ], batch size: 24, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:24:24,620 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3693, 4.3175, 3.9572, 2.1764, 3.8595, 3.9213, 3.8915, 3.7857], + device='cuda:3'), covar=tensor([0.0685, 0.0563, 0.0920, 0.3869, 0.0807, 0.0940, 0.1186, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0526, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:24:28,140 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186276.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:24:28,717 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:24:42,187 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 2.428e+02 2.971e+02 3.348e+02 5.777e+02, threshold=5.941e+02, percent-clipped=0.0 +2023-02-07 06:24:47,695 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1423, 4.0725, 3.6894, 2.1238, 3.6168, 3.7531, 3.6756, 3.5816], + device='cuda:3'), covar=tensor([0.0788, 0.0596, 0.1114, 0.4231, 0.0884, 0.1031, 0.1273, 0.0838], + device='cuda:3'), in_proj_covar=tensor([0.0527, 0.0445, 0.0431, 0.0541, 0.0431, 0.0446, 0.0426, 0.0389], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:24:50,334 INFO [train.py:901] (3/4) Epoch 24, batch 400, loss[loss=0.2679, simple_loss=0.3453, pruned_loss=0.09528, over 8755.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2878, pruned_loss=0.06184, over 1404560.02 frames. ], batch size: 30, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:26,090 INFO [train.py:901] (3/4) Epoch 24, batch 450, loss[loss=0.2033, simple_loss=0.2749, pruned_loss=0.06587, over 7704.00 frames. ], tot_loss[loss=0.2059, simple_loss=0.2881, pruned_loss=0.06187, over 1454087.77 frames. ], batch size: 18, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:25:52,936 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.487e+02 2.919e+02 3.580e+02 7.824e+02, threshold=5.839e+02, percent-clipped=3.0 +2023-02-07 06:26:02,022 INFO [train.py:901] (3/4) Epoch 24, batch 500, loss[loss=0.2191, simple_loss=0.3061, pruned_loss=0.06607, over 8346.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2876, pruned_loss=0.0617, over 1488925.60 frames. ], batch size: 26, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:23,367 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:37,824 INFO [train.py:901] (3/4) Epoch 24, batch 550, loss[loss=0.2157, simple_loss=0.2974, pruned_loss=0.067, over 6709.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2874, pruned_loss=0.06128, over 1520260.34 frames. ], batch size: 71, lr: 3.18e-03, grad_scale: 16.0 +2023-02-07 06:26:40,778 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:26:55,567 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2670, 2.5659, 2.8975, 1.6380, 3.2173, 1.8870, 1.5409, 2.2326], + device='cuda:3'), covar=tensor([0.0835, 0.0396, 0.0278, 0.0851, 0.0485, 0.0981, 0.0991, 0.0519], + device='cuda:3'), in_proj_covar=tensor([0.0458, 0.0399, 0.0351, 0.0449, 0.0382, 0.0539, 0.0393, 0.0426], + device='cuda:3'), out_proj_covar=tensor([1.2231e-04, 1.0434e-04, 9.2115e-05, 1.1794e-04, 1.0032e-04, 1.5167e-04, + 1.0568e-04, 1.1243e-04], device='cuda:3') +2023-02-07 06:27:01,108 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186492.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:01,726 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:03,580 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.350e+02 3.005e+02 3.846e+02 7.955e+02, threshold=6.011e+02, percent-clipped=1.0 +2023-02-07 06:27:12,590 INFO [train.py:901] (3/4) Epoch 24, batch 600, loss[loss=0.291, simple_loss=0.3382, pruned_loss=0.1219, over 6693.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2857, pruned_loss=0.06006, over 1541236.29 frames. ], batch size: 71, lr: 3.17e-03, grad_scale: 16.0 +2023-02-07 06:27:19,661 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:21,530 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186520.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:23,338 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.23 vs. limit=5.0 +2023-02-07 06:27:26,202 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 06:27:29,796 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186532.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 06:27:31,744 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186535.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:40,312 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8007, 1.4581, 3.3051, 1.4340, 2.3563, 3.6099, 3.6790, 3.0820], + device='cuda:3'), covar=tensor([0.1276, 0.1802, 0.0346, 0.2070, 0.1036, 0.0221, 0.0493, 0.0543], + device='cuda:3'), in_proj_covar=tensor([0.0297, 0.0321, 0.0286, 0.0314, 0.0312, 0.0268, 0.0424, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:27:46,476 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186557.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:27:46,540 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186557.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 06:27:47,019 INFO [train.py:901] (3/4) Epoch 24, batch 650, loss[loss=0.2213, simple_loss=0.2951, pruned_loss=0.07375, over 8515.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2875, pruned_loss=0.06115, over 1557936.32 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:27:52,098 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9608, 1.5018, 3.4251, 1.5942, 2.3993, 3.7962, 3.8836, 3.2232], + device='cuda:3'), covar=tensor([0.1228, 0.1920, 0.0340, 0.2087, 0.1118, 0.0230, 0.0447, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0296, 0.0321, 0.0285, 0.0314, 0.0311, 0.0268, 0.0423, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:28:01,240 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186577.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:07,496 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:15,660 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.811e+02 2.377e+02 2.753e+02 3.513e+02 8.271e+02, threshold=5.505e+02, percent-clipped=2.0 +2023-02-07 06:28:20,750 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186604.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:23,403 INFO [train.py:901] (3/4) Epoch 24, batch 700, loss[loss=0.204, simple_loss=0.2873, pruned_loss=0.06029, over 8505.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2863, pruned_loss=0.06064, over 1572670.10 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:28:33,223 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186621.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:43,766 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186635.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:28:59,667 INFO [train.py:901] (3/4) Epoch 24, batch 750, loss[loss=0.2092, simple_loss=0.288, pruned_loss=0.06523, over 7641.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2862, pruned_loss=0.06053, over 1581725.20 frames. ], batch size: 19, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:11,913 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 06:29:16,211 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9031, 1.2017, 3.2556, 1.2399, 2.5441, 2.6090, 2.8868, 2.8882], + device='cuda:3'), covar=tensor([0.2066, 0.6575, 0.1659, 0.5671, 0.2907, 0.2224, 0.1708, 0.1680], + device='cuda:3'), in_proj_covar=tensor([0.0645, 0.0653, 0.0707, 0.0637, 0.0721, 0.0620, 0.0613, 0.0689], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:29:21,540 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 06:29:27,092 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.532e+02 3.077e+02 4.008e+02 9.294e+02, threshold=6.153e+02, percent-clipped=8.0 +2023-02-07 06:29:31,907 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 06:29:35,750 INFO [train.py:901] (3/4) Epoch 24, batch 800, loss[loss=0.1945, simple_loss=0.2742, pruned_loss=0.0574, over 8025.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2854, pruned_loss=0.06045, over 1586898.24 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:29:48,937 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:29:56,174 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186736.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:11,912 INFO [train.py:901] (3/4) Epoch 24, batch 850, loss[loss=0.1961, simple_loss=0.2713, pruned_loss=0.06046, over 7547.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2848, pruned_loss=0.06, over 1594676.69 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:30:29,503 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:30:39,062 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.556e+02 2.330e+02 2.764e+02 3.350e+02 7.186e+02, threshold=5.528e+02, percent-clipped=2.0 +2023-02-07 06:30:47,648 INFO [train.py:901] (3/4) Epoch 24, batch 900, loss[loss=0.1822, simple_loss=0.2695, pruned_loss=0.04741, over 8654.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2862, pruned_loss=0.06026, over 1601651.19 frames. ], batch size: 49, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:05,956 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186833.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:08,596 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:15,807 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1957, 1.9233, 2.5277, 2.1055, 2.5675, 2.2363, 2.0337, 1.3529], + device='cuda:3'), covar=tensor([0.5532, 0.4881, 0.1990, 0.3321, 0.2150, 0.3016, 0.1950, 0.5049], + device='cuda:3'), in_proj_covar=tensor([0.0948, 0.0992, 0.0812, 0.0958, 0.0998, 0.0901, 0.0756, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:31:24,264 INFO [train.py:901] (3/4) Epoch 24, batch 950, loss[loss=0.2412, simple_loss=0.3149, pruned_loss=0.08374, over 8546.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2856, pruned_loss=0.06022, over 1608027.94 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:31:24,460 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:24,519 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186858.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:30,140 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2245, 1.6051, 1.7984, 1.4736, 1.0601, 1.6167, 1.8617, 2.0123], + device='cuda:3'), covar=tensor([0.0520, 0.1159, 0.1656, 0.1383, 0.0578, 0.1363, 0.0656, 0.0547], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0152, 0.0189, 0.0159, 0.0100, 0.0162, 0.0111, 0.0143], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0009, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 06:31:39,987 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186879.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:43,476 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 06:31:48,338 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186891.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:50,408 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0207, 3.6879, 2.1949, 2.7348, 2.5795, 2.0220, 2.5102, 2.9876], + device='cuda:3'), covar=tensor([0.1684, 0.0296, 0.1158, 0.0791, 0.0821, 0.1482, 0.1182, 0.1114], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0310, 0.0301, 0.0341, 0.0346, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 06:31:52,264 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 2.324e+02 2.850e+02 3.567e+02 7.043e+02, threshold=5.700e+02, percent-clipped=2.0 +2023-02-07 06:31:53,155 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:55,091 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:31:59,852 INFO [train.py:901] (3/4) Epoch 24, batch 1000, loss[loss=0.2125, simple_loss=0.2814, pruned_loss=0.07184, over 7707.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2857, pruned_loss=0.0601, over 1612627.37 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:32:05,641 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=186916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,432 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:15,510 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=186929.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:19,252 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0830, 3.5775, 2.2029, 2.7767, 2.6189, 1.9580, 2.6456, 2.9826], + device='cuda:3'), covar=tensor([0.1872, 0.0366, 0.1265, 0.0822, 0.0902, 0.1636, 0.1265, 0.1204], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0235, 0.0336, 0.0310, 0.0301, 0.0341, 0.0347, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 06:32:20,508 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 06:32:29,428 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=186948.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:32,169 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:32:33,326 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 06:32:36,112 INFO [train.py:901] (3/4) Epoch 24, batch 1050, loss[loss=0.2062, simple_loss=0.288, pruned_loss=0.06215, over 8589.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2848, pruned_loss=0.05942, over 1616517.92 frames. ], batch size: 39, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:00,063 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3481, 1.5486, 4.5712, 1.6472, 4.0473, 3.7871, 4.1109, 3.9750], + device='cuda:3'), covar=tensor([0.0584, 0.4634, 0.0496, 0.4178, 0.1034, 0.0983, 0.0583, 0.0651], + device='cuda:3'), in_proj_covar=tensor([0.0642, 0.0649, 0.0703, 0.0634, 0.0718, 0.0618, 0.0610, 0.0686], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:33:01,572 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=186992.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:02,864 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=186994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:04,729 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.690e+02 2.475e+02 2.949e+02 3.829e+02 9.793e+02, threshold=5.897e+02, percent-clipped=8.0 +2023-02-07 06:33:12,439 INFO [train.py:901] (3/4) Epoch 24, batch 1100, loss[loss=0.1645, simple_loss=0.243, pruned_loss=0.04295, over 7699.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2857, pruned_loss=0.05992, over 1620118.18 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:12,697 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7399, 2.0176, 3.2091, 1.5495, 2.4530, 2.1300, 1.7608, 2.4307], + device='cuda:3'), covar=tensor([0.1833, 0.2679, 0.0943, 0.4682, 0.1887, 0.3204, 0.2393, 0.2271], + device='cuda:3'), in_proj_covar=tensor([0.0527, 0.0615, 0.0556, 0.0650, 0.0653, 0.0600, 0.0544, 0.0634], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:33:18,326 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:19,044 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:37,544 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:38,320 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:45,690 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 06:33:47,128 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:48,356 INFO [train.py:901] (3/4) Epoch 24, batch 1150, loss[loss=0.2134, simple_loss=0.3002, pruned_loss=0.06327, over 8137.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05984, over 1623626.52 frames. ], batch size: 22, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:33:49,929 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0021, 1.5835, 3.3638, 1.5941, 2.4158, 3.6386, 3.7304, 3.1861], + device='cuda:3'), covar=tensor([0.1088, 0.1734, 0.0309, 0.1912, 0.1020, 0.0220, 0.0526, 0.0473], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0324, 0.0288, 0.0317, 0.0315, 0.0270, 0.0427, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:33:52,034 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187063.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:33:57,505 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187071.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:34:02,535 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6082, 2.4596, 1.8177, 2.3147, 2.0762, 1.5570, 2.0641, 2.1053], + device='cuda:3'), covar=tensor([0.1375, 0.0403, 0.1184, 0.0577, 0.0760, 0.1560, 0.0984, 0.0962], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0234, 0.0336, 0.0309, 0.0301, 0.0341, 0.0346, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 06:34:16,146 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.205e+02 2.742e+02 3.279e+02 6.267e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 06:34:24,629 INFO [train.py:901] (3/4) Epoch 24, batch 1200, loss[loss=0.1999, simple_loss=0.2917, pruned_loss=0.05408, over 8682.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2844, pruned_loss=0.05886, over 1623332.22 frames. ], batch size: 34, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:34:57,400 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:00,007 INFO [train.py:901] (3/4) Epoch 24, batch 1250, loss[loss=0.2264, simple_loss=0.3081, pruned_loss=0.07233, over 8452.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.05909, over 1620875.35 frames. ], batch size: 27, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:15,149 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:19,762 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187186.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:27,942 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 2.417e+02 2.916e+02 3.659e+02 9.833e+02, threshold=5.832e+02, percent-clipped=6.0 +2023-02-07 06:35:30,894 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187202.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:35,746 INFO [train.py:901] (3/4) Epoch 24, batch 1300, loss[loss=0.2302, simple_loss=0.3098, pruned_loss=0.07534, over 8104.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05918, over 1620923.50 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:35:35,979 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187208.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:35:51,429 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.01 vs. limit=5.0 +2023-02-07 06:35:53,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187233.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:05,785 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187250.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:10,945 INFO [train.py:901] (3/4) Epoch 24, batch 1350, loss[loss=0.1959, simple_loss=0.2914, pruned_loss=0.05018, over 8602.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2853, pruned_loss=0.0592, over 1623487.70 frames. ], batch size: 31, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:20,764 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187272.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:21,335 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:22,852 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187275.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,321 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:39,750 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.391e+02 3.088e+02 3.702e+02 1.176e+03, threshold=6.175e+02, percent-clipped=8.0 +2023-02-07 06:36:41,428 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187300.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:46,713 INFO [train.py:901] (3/4) Epoch 24, batch 1400, loss[loss=0.2093, simple_loss=0.2828, pruned_loss=0.06794, over 8446.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05965, over 1620613.81 frames. ], batch size: 27, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:36:52,969 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187317.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:54,426 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187319.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:36:59,284 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187325.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:03,370 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4911, 1.8041, 2.5281, 1.4101, 1.8583, 1.8147, 1.5969, 1.8015], + device='cuda:3'), covar=tensor([0.2046, 0.2510, 0.1094, 0.4492, 0.2032, 0.3284, 0.2380, 0.2498], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0617, 0.0558, 0.0652, 0.0653, 0.0600, 0.0545, 0.0636], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:37:05,350 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4513, 3.7062, 2.3885, 3.0134, 3.0181, 2.2089, 3.2048, 3.1182], + device='cuda:3'), covar=tensor([0.1357, 0.0359, 0.1090, 0.0657, 0.0665, 0.1366, 0.0836, 0.0963], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0233, 0.0335, 0.0308, 0.0300, 0.0338, 0.0346, 0.0316], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 06:37:12,871 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187344.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:16,708 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-07 06:37:21,744 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 06:37:22,427 INFO [train.py:901] (3/4) Epoch 24, batch 1450, loss[loss=0.1891, simple_loss=0.2861, pruned_loss=0.04609, over 8334.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.285, pruned_loss=0.06022, over 1614323.92 frames. ], batch size: 26, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:37:42,336 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187387.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:43,074 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:44,408 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:49,534 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.482e+02 2.870e+02 4.012e+02 8.494e+02, threshold=5.740e+02, percent-clipped=8.0 +2023-02-07 06:37:51,822 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:37:56,987 INFO [train.py:901] (3/4) Epoch 24, batch 1500, loss[loss=0.2157, simple_loss=0.2926, pruned_loss=0.06939, over 8513.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.05966, over 1614666.08 frames. ], batch size: 28, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:22,160 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:33,247 INFO [train.py:901] (3/4) Epoch 24, batch 1550, loss[loss=0.1796, simple_loss=0.2592, pruned_loss=0.05001, over 7928.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2844, pruned_loss=0.05951, over 1617237.93 frames. ], batch size: 20, lr: 3.17e-03, grad_scale: 4.0 +2023-02-07 06:38:39,570 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:38:59,888 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.508e+02 2.355e+02 2.764e+02 3.622e+02 7.454e+02, threshold=5.529e+02, percent-clipped=4.0 +2023-02-07 06:39:02,835 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:06,537 INFO [train.py:901] (3/4) Epoch 24, batch 1600, loss[loss=0.1773, simple_loss=0.2521, pruned_loss=0.05122, over 7704.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2841, pruned_loss=0.05961, over 1614667.47 frames. ], batch size: 18, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:11,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:22,052 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187530.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:27,623 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:30,383 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=187541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:39:41,434 INFO [train.py:901] (3/4) Epoch 24, batch 1650, loss[loss=0.2627, simple_loss=0.3359, pruned_loss=0.09481, over 8199.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2845, pruned_loss=0.05977, over 1615359.10 frames. ], batch size: 23, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:39:52,525 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:09,686 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.451e+02 2.921e+02 3.516e+02 7.853e+02, threshold=5.842e+02, percent-clipped=7.0 +2023-02-07 06:40:09,875 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187598.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:10,450 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9383, 1.4648, 4.3670, 2.3224, 2.4764, 5.0112, 5.1181, 4.3954], + device='cuda:3'), covar=tensor([0.1350, 0.1917, 0.0245, 0.1810, 0.1224, 0.0179, 0.0407, 0.0539], + device='cuda:3'), in_proj_covar=tensor([0.0295, 0.0319, 0.0283, 0.0312, 0.0311, 0.0267, 0.0422, 0.0301], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:40:16,433 INFO [train.py:901] (3/4) Epoch 24, batch 1700, loss[loss=0.1788, simple_loss=0.256, pruned_loss=0.05083, over 7429.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2849, pruned_loss=0.05973, over 1615141.07 frames. ], batch size: 17, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:40,841 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187644.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:40:50,899 INFO [train.py:901] (3/4) Epoch 24, batch 1750, loss[loss=0.1748, simple_loss=0.2582, pruned_loss=0.04573, over 7651.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.05948, over 1617793.19 frames. ], batch size: 19, lr: 3.17e-03, grad_scale: 8.0 +2023-02-07 06:40:58,581 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187669.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:18,565 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.502e+02 3.000e+02 3.757e+02 9.885e+02, threshold=5.999e+02, percent-clipped=2.0 +2023-02-07 06:41:25,257 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 06:41:26,170 INFO [train.py:901] (3/4) Epoch 24, batch 1800, loss[loss=0.1544, simple_loss=0.2301, pruned_loss=0.03935, over 7438.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2849, pruned_loss=0.05983, over 1614622.51 frames. ], batch size: 17, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:43,300 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187734.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:41:59,225 INFO [train.py:901] (3/4) Epoch 24, batch 1850, loss[loss=0.2109, simple_loss=0.2802, pruned_loss=0.07084, over 7813.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2866, pruned_loss=0.06116, over 1612417.30 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:41:59,465 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187758.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:09,324 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=187771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:18,457 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:27,406 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=187796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:42:28,592 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.405e+02 2.492e+02 2.912e+02 4.002e+02 8.326e+02, threshold=5.824e+02, percent-clipped=6.0 +2023-02-07 06:42:36,372 INFO [train.py:901] (3/4) Epoch 24, batch 1900, loss[loss=0.2084, simple_loss=0.2923, pruned_loss=0.06226, over 8462.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.287, pruned_loss=0.06126, over 1611880.35 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:42:42,662 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1963, 4.1491, 3.7970, 2.0240, 3.7227, 3.8338, 3.6693, 3.6600], + device='cuda:3'), covar=tensor([0.0729, 0.0546, 0.0882, 0.4356, 0.0927, 0.1220, 0.1243, 0.0921], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0543, 0.0435, 0.0448, 0.0428, 0.0391], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:42:49,917 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.46 vs. limit=2.0 +2023-02-07 06:43:02,033 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 06:43:05,614 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 06:43:05,823 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187849.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:11,796 INFO [train.py:901] (3/4) Epoch 24, batch 1950, loss[loss=0.189, simple_loss=0.2626, pruned_loss=0.05776, over 8079.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2858, pruned_loss=0.06063, over 1609393.71 frames. ], batch size: 21, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:43:18,404 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 06:43:22,598 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187874.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:28,014 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187881.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:30,765 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=187885.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:43:39,205 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.274e+02 2.375e+02 2.745e+02 3.412e+02 6.105e+02, threshold=5.491e+02, percent-clipped=1.0 +2023-02-07 06:43:39,237 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 06:43:42,660 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9279, 2.1853, 3.6833, 1.8491, 1.8683, 3.6838, 0.7303, 2.1330], + device='cuda:3'), covar=tensor([0.1268, 0.1510, 0.0293, 0.1655, 0.2533, 0.0246, 0.2120, 0.1268], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0200, 0.0132, 0.0222, 0.0273, 0.0137, 0.0171, 0.0194], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 06:43:46,291 INFO [train.py:901] (3/4) Epoch 24, batch 2000, loss[loss=0.2075, simple_loss=0.2973, pruned_loss=0.05881, over 8466.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2854, pruned_loss=0.06024, over 1606253.12 frames. ], batch size: 29, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:21,546 INFO [train.py:901] (3/4) Epoch 24, batch 2050, loss[loss=0.1985, simple_loss=0.2863, pruned_loss=0.05532, over 8634.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2848, pruned_loss=0.06004, over 1602660.45 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:44:23,780 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4771, 1.8438, 1.9038, 1.0824, 1.9728, 1.4512, 0.4341, 1.7129], + device='cuda:3'), covar=tensor([0.0549, 0.0372, 0.0298, 0.0624, 0.0452, 0.0949, 0.0944, 0.0316], + device='cuda:3'), in_proj_covar=tensor([0.0458, 0.0400, 0.0354, 0.0450, 0.0384, 0.0539, 0.0394, 0.0425], + device='cuda:3'), out_proj_covar=tensor([1.2219e-04, 1.0450e-04, 9.3072e-05, 1.1817e-04, 1.0087e-04, 1.5174e-04, + 1.0582e-04, 1.1198e-04], device='cuda:3') +2023-02-07 06:44:42,298 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187989.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:46,914 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=187996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:48,824 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.387e+02 2.958e+02 3.531e+02 6.524e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 06:44:51,434 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188000.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:44:56,705 INFO [train.py:901] (3/4) Epoch 24, batch 2100, loss[loss=0.2149, simple_loss=0.306, pruned_loss=0.06188, over 8746.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06089, over 1607340.63 frames. ], batch size: 30, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:24,480 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3202, 2.0035, 2.5382, 2.2119, 2.4943, 2.3708, 2.1909, 1.3055], + device='cuda:3'), covar=tensor([0.5150, 0.4640, 0.1885, 0.3476, 0.2413, 0.2965, 0.1902, 0.5254], + device='cuda:3'), in_proj_covar=tensor([0.0954, 0.1003, 0.0820, 0.0967, 0.1004, 0.0911, 0.0763, 0.0837], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:45:32,170 INFO [train.py:901] (3/4) Epoch 24, batch 2150, loss[loss=0.1898, simple_loss=0.2679, pruned_loss=0.05591, over 8468.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2862, pruned_loss=0.06075, over 1612585.20 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:45:35,777 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2697, 2.0424, 2.7943, 2.3799, 2.7379, 2.2871, 2.0779, 1.4398], + device='cuda:3'), covar=tensor([0.5583, 0.5112, 0.1971, 0.3624, 0.2435, 0.3281, 0.2000, 0.5709], + device='cuda:3'), in_proj_covar=tensor([0.0954, 0.1003, 0.0820, 0.0967, 0.1004, 0.0911, 0.0763, 0.0837], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:45:58,768 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.464e+02 3.048e+02 3.692e+02 7.821e+02, threshold=6.095e+02, percent-clipped=5.0 +2023-02-07 06:46:03,903 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:05,702 INFO [train.py:901] (3/4) Epoch 24, batch 2200, loss[loss=0.2213, simple_loss=0.298, pruned_loss=0.07231, over 8085.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2855, pruned_loss=0.06003, over 1616064.68 frames. ], batch size: 21, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:46:21,948 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188130.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:46:41,366 INFO [train.py:901] (3/4) Epoch 24, batch 2250, loss[loss=0.1691, simple_loss=0.2606, pruned_loss=0.0388, over 7821.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2838, pruned_loss=0.05911, over 1616074.75 frames. ], batch size: 20, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:09,411 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 2.349e+02 3.047e+02 3.898e+02 9.680e+02, threshold=6.095e+02, percent-clipped=4.0 +2023-02-07 06:47:16,303 INFO [train.py:901] (3/4) Epoch 24, batch 2300, loss[loss=0.1664, simple_loss=0.2472, pruned_loss=0.0428, over 7557.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.283, pruned_loss=0.05873, over 1616247.48 frames. ], batch size: 18, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:47:42,370 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:47,131 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:50,573 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=188256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:47:51,707 INFO [train.py:901] (3/4) Epoch 24, batch 2350, loss[loss=0.2417, simple_loss=0.3259, pruned_loss=0.07877, over 8341.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2831, pruned_loss=0.05942, over 1611120.43 frames. ], batch size: 26, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:00,185 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188270.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:05,507 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188277.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:08,191 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=188281.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:20,104 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.514e+02 3.085e+02 3.939e+02 8.316e+02, threshold=6.171e+02, percent-clipped=4.0 +2023-02-07 06:48:22,398 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188301.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:48:27,100 INFO [train.py:901] (3/4) Epoch 24, batch 2400, loss[loss=0.2053, simple_loss=0.2781, pruned_loss=0.06627, over 7646.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2837, pruned_loss=0.05989, over 1609708.28 frames. ], batch size: 19, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:48:30,046 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8272, 1.5501, 3.3976, 1.4940, 2.3495, 3.7683, 3.9518, 3.2998], + device='cuda:3'), covar=tensor([0.1400, 0.1940, 0.0388, 0.2389, 0.1260, 0.0261, 0.0543, 0.0537], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0323, 0.0285, 0.0315, 0.0314, 0.0270, 0.0425, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 06:49:00,630 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-07 06:49:02,252 INFO [train.py:901] (3/4) Epoch 24, batch 2450, loss[loss=0.231, simple_loss=0.2995, pruned_loss=0.08127, over 7602.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2841, pruned_loss=0.0599, over 1605042.95 frames. ], batch size: 71, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:49:30,925 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.263e+02 2.982e+02 3.612e+02 7.179e+02, threshold=5.965e+02, percent-clipped=1.0 +2023-02-07 06:49:38,097 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 06:49:38,407 INFO [train.py:901] (3/4) Epoch 24, batch 2500, loss[loss=0.205, simple_loss=0.2928, pruned_loss=0.05858, over 8106.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2842, pruned_loss=0.05966, over 1608367.92 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:11,884 INFO [train.py:901] (3/4) Epoch 24, batch 2550, loss[loss=0.2131, simple_loss=0.2905, pruned_loss=0.06782, over 8449.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2834, pruned_loss=0.05942, over 1611961.05 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:50:40,459 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.347e+02 2.538e+02 2.905e+02 3.766e+02 9.788e+02, threshold=5.809e+02, percent-clipped=4.0 +2023-02-07 06:50:41,317 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:50:47,918 INFO [train.py:901] (3/4) Epoch 24, batch 2600, loss[loss=0.1928, simple_loss=0.2704, pruned_loss=0.05762, over 8139.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2828, pruned_loss=0.05888, over 1614221.37 frames. ], batch size: 22, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:21,921 INFO [train.py:901] (3/4) Epoch 24, batch 2650, loss[loss=0.2184, simple_loss=0.2999, pruned_loss=0.06847, over 8266.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2829, pruned_loss=0.05925, over 1612896.20 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:51:31,890 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.54 vs. limit=2.0 +2023-02-07 06:51:41,777 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 06:51:48,604 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.561e+02 2.338e+02 2.924e+02 3.924e+02 7.774e+02, threshold=5.847e+02, percent-clipped=4.0 +2023-02-07 06:51:55,400 INFO [train.py:901] (3/4) Epoch 24, batch 2700, loss[loss=0.1943, simple_loss=0.2823, pruned_loss=0.05313, over 8260.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2836, pruned_loss=0.05966, over 1615260.54 frames. ], batch size: 24, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:02,969 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9497, 6.2311, 5.3247, 2.9968, 5.4800, 5.8345, 5.6134, 5.6454], + device='cuda:3'), covar=tensor([0.0420, 0.0314, 0.0808, 0.3839, 0.0667, 0.0739, 0.0957, 0.0540], + device='cuda:3'), in_proj_covar=tensor([0.0521, 0.0439, 0.0426, 0.0535, 0.0426, 0.0441, 0.0421, 0.0386], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 06:52:21,732 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188645.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:52:31,269 INFO [train.py:901] (3/4) Epoch 24, batch 2750, loss[loss=0.2124, simple_loss=0.3018, pruned_loss=0.06144, over 8703.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.284, pruned_loss=0.06012, over 1614482.93 frames. ], batch size: 34, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:52:57,783 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.419e+02 2.375e+02 3.087e+02 4.139e+02 1.460e+03, threshold=6.174e+02, percent-clipped=4.0 +2023-02-07 06:53:05,388 INFO [train.py:901] (3/4) Epoch 24, batch 2800, loss[loss=0.2051, simple_loss=0.2942, pruned_loss=0.058, over 8292.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2842, pruned_loss=0.06026, over 1611135.06 frames. ], batch size: 23, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:23,062 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1930, 2.0390, 2.6821, 2.1691, 2.6398, 2.2743, 2.0911, 1.3987], + device='cuda:3'), covar=tensor([0.5541, 0.4838, 0.2089, 0.3992, 0.2561, 0.3396, 0.2015, 0.5702], + device='cuda:3'), in_proj_covar=tensor([0.0947, 0.0992, 0.0814, 0.0956, 0.0995, 0.0905, 0.0756, 0.0832], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 06:53:40,611 INFO [train.py:901] (3/4) Epoch 24, batch 2850, loss[loss=0.2125, simple_loss=0.2949, pruned_loss=0.06508, over 8456.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2841, pruned_loss=0.06007, over 1612650.22 frames. ], batch size: 27, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:53:42,193 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188760.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:07,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.346e+02 3.064e+02 3.754e+02 6.997e+02, threshold=6.129e+02, percent-clipped=3.0 +2023-02-07 06:54:14,851 INFO [train.py:901] (3/4) Epoch 24, batch 2900, loss[loss=0.2036, simple_loss=0.2919, pruned_loss=0.05758, over 8472.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2845, pruned_loss=0.06009, over 1613821.56 frames. ], batch size: 25, lr: 3.16e-03, grad_scale: 8.0 +2023-02-07 06:54:39,478 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=188843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:54:48,793 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4132, 1.5009, 1.3963, 1.8309, 0.7662, 1.2634, 1.3931, 1.4876], + device='cuda:3'), covar=tensor([0.0822, 0.0703, 0.0922, 0.0490, 0.1088, 0.1326, 0.0646, 0.0732], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0197, 0.0242, 0.0214, 0.0205, 0.0246, 0.0249, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 06:54:51,387 INFO [train.py:901] (3/4) Epoch 24, batch 2950, loss[loss=0.2183, simple_loss=0.3059, pruned_loss=0.06532, over 8280.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2853, pruned_loss=0.06044, over 1609798.76 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:54:51,396 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 06:55:19,114 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.217e+02 2.685e+02 3.700e+02 9.567e+02, threshold=5.370e+02, percent-clipped=4.0 +2023-02-07 06:55:25,889 INFO [train.py:901] (3/4) Epoch 24, batch 3000, loss[loss=0.1726, simple_loss=0.264, pruned_loss=0.04056, over 7970.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2856, pruned_loss=0.06067, over 1611555.26 frames. ], batch size: 21, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:55:25,889 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 06:55:39,554 INFO [train.py:935] (3/4) Epoch 24, validation: loss=0.1724, simple_loss=0.2726, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 06:55:39,555 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 06:56:06,004 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188947.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:07,364 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=188949.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:13,969 INFO [train.py:901] (3/4) Epoch 24, batch 3050, loss[loss=0.2196, simple_loss=0.3095, pruned_loss=0.06483, over 8193.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2867, pruned_loss=0.06108, over 1614933.05 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:14,159 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=188958.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:56:41,562 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.422e+02 3.010e+02 3.817e+02 9.746e+02, threshold=6.020e+02, percent-clipped=4.0 +2023-02-07 06:56:49,126 INFO [train.py:901] (3/4) Epoch 24, batch 3100, loss[loss=0.2726, simple_loss=0.3531, pruned_loss=0.09601, over 8191.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2871, pruned_loss=0.06133, over 1613062.25 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:56:54,808 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189016.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:12,122 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189041.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:16,980 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189048.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:23,537 INFO [train.py:901] (3/4) Epoch 24, batch 3150, loss[loss=0.2403, simple_loss=0.3192, pruned_loss=0.0807, over 7219.00 frames. ], tot_loss[loss=0.2057, simple_loss=0.2877, pruned_loss=0.0618, over 1609102.71 frames. ], batch size: 71, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:57:40,666 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189082.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:50,853 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:57:51,369 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.687e+02 2.394e+02 2.917e+02 3.565e+02 6.979e+02, threshold=5.834e+02, percent-clipped=3.0 +2023-02-07 06:57:59,743 INFO [train.py:901] (3/4) Epoch 24, batch 3200, loss[loss=0.1944, simple_loss=0.287, pruned_loss=0.05084, over 8244.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2884, pruned_loss=0.06196, over 1613391.61 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 8.0 +2023-02-07 06:58:33,957 INFO [train.py:901] (3/4) Epoch 24, batch 3250, loss[loss=0.2789, simple_loss=0.3339, pruned_loss=0.1119, over 6921.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2883, pruned_loss=0.06184, over 1613517.18 frames. ], batch size: 71, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:01,533 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.438e+02 3.003e+02 3.759e+02 6.490e+02, threshold=6.005e+02, percent-clipped=4.0 +2023-02-07 06:59:08,532 INFO [train.py:901] (3/4) Epoch 24, batch 3300, loss[loss=0.1899, simple_loss=0.278, pruned_loss=0.05088, over 7813.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.287, pruned_loss=0.06129, over 1616435.36 frames. ], batch size: 20, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:12,861 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:30,906 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 06:59:44,170 INFO [train.py:901] (3/4) Epoch 24, batch 3350, loss[loss=0.1849, simple_loss=0.2587, pruned_loss=0.05553, over 7796.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2863, pruned_loss=0.06093, over 1612848.20 frames. ], batch size: 19, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 06:59:49,412 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189266.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:05,166 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8460, 1.8539, 1.9398, 1.6534, 0.9637, 1.8121, 2.1949, 1.9790], + device='cuda:3'), covar=tensor([0.0432, 0.1111, 0.1612, 0.1348, 0.0586, 0.1342, 0.0593, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 07:00:05,777 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189291.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:07,055 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189293.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:10,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.367e+02 2.967e+02 3.575e+02 9.298e+02, threshold=5.934e+02, percent-clipped=5.0 +2023-02-07 07:00:17,750 INFO [train.py:901] (3/4) Epoch 24, batch 3400, loss[loss=0.2061, simple_loss=0.2809, pruned_loss=0.0656, over 7781.00 frames. ], tot_loss[loss=0.2044, simple_loss=0.2863, pruned_loss=0.06125, over 1610982.97 frames. ], batch size: 19, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:19,422 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 07:00:50,350 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-07 07:00:52,451 INFO [train.py:901] (3/4) Epoch 24, batch 3450, loss[loss=0.1773, simple_loss=0.2594, pruned_loss=0.04762, over 7804.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2856, pruned_loss=0.06109, over 1608113.75 frames. ], batch size: 19, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:00:57,500 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:00:58,320 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1031, 1.8480, 2.3794, 2.0636, 2.3866, 2.1672, 1.9562, 1.1682], + device='cuda:3'), covar=tensor([0.5806, 0.5028, 0.1995, 0.3678, 0.2368, 0.3071, 0.1950, 0.5307], + device='cuda:3'), in_proj_covar=tensor([0.0944, 0.0992, 0.0811, 0.0957, 0.0995, 0.0903, 0.0755, 0.0828], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:01:04,474 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8268, 1.4600, 3.5408, 1.6039, 2.5422, 3.8565, 4.0365, 3.3080], + device='cuda:3'), covar=tensor([0.1314, 0.1791, 0.0301, 0.2094, 0.0951, 0.0229, 0.0551, 0.0535], + device='cuda:3'), in_proj_covar=tensor([0.0299, 0.0323, 0.0285, 0.0316, 0.0314, 0.0270, 0.0426, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 07:01:16,613 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:20,611 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.399e+02 2.884e+02 3.624e+02 7.571e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 07:01:20,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3430, 1.3971, 4.5668, 1.8059, 4.0262, 3.8171, 4.1225, 3.9664], + device='cuda:3'), covar=tensor([0.0688, 0.5001, 0.0527, 0.4165, 0.1131, 0.0883, 0.0624, 0.0747], + device='cuda:3'), in_proj_covar=tensor([0.0650, 0.0657, 0.0716, 0.0647, 0.0725, 0.0622, 0.0618, 0.0697], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:01:26,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:27,406 INFO [train.py:901] (3/4) Epoch 24, batch 3500, loss[loss=0.1911, simple_loss=0.2841, pruned_loss=0.04903, over 8342.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2861, pruned_loss=0.06151, over 1609451.38 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:01:27,611 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:40,501 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 07:01:40,600 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189426.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:50,805 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189441.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:01:58,236 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5580, 1.2878, 2.8876, 1.5319, 2.1634, 3.0635, 3.2443, 2.6255], + device='cuda:3'), covar=tensor([0.1269, 0.1770, 0.0369, 0.2029, 0.0913, 0.0339, 0.0635, 0.0632], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0322, 0.0285, 0.0316, 0.0314, 0.0270, 0.0427, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 07:02:03,505 INFO [train.py:901] (3/4) Epoch 24, batch 3550, loss[loss=0.2022, simple_loss=0.2889, pruned_loss=0.05777, over 8500.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2859, pruned_loss=0.06117, over 1608046.31 frames. ], batch size: 29, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:31,305 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.509e+02 2.981e+02 3.708e+02 7.370e+02, threshold=5.962e+02, percent-clipped=4.0 +2023-02-07 07:02:33,858 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-07 07:02:37,645 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:38,137 INFO [train.py:901] (3/4) Epoch 24, batch 3600, loss[loss=0.1773, simple_loss=0.251, pruned_loss=0.05182, over 7245.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2849, pruned_loss=0.06013, over 1609383.82 frames. ], batch size: 16, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:02:45,842 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189519.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:02:58,695 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 07:02:59,867 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9342, 1.6059, 1.8304, 1.4907, 0.9266, 1.6409, 1.8388, 1.5772], + device='cuda:3'), covar=tensor([0.0519, 0.1177, 0.1557, 0.1349, 0.0588, 0.1409, 0.0615, 0.0620], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0190, 0.0160, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 07:03:01,354 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-07 07:03:01,842 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189541.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:12,159 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189556.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:03:13,414 INFO [train.py:901] (3/4) Epoch 24, batch 3650, loss[loss=0.202, simple_loss=0.2652, pruned_loss=0.06936, over 7270.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2846, pruned_loss=0.06035, over 1610084.64 frames. ], batch size: 16, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:41,107 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:03:41,750 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 2.496e+02 2.930e+02 3.600e+02 6.319e+02, threshold=5.860e+02, percent-clipped=2.0 +2023-02-07 07:03:48,376 INFO [train.py:901] (3/4) Epoch 24, batch 3700, loss[loss=0.1695, simple_loss=0.2518, pruned_loss=0.04364, over 7534.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2838, pruned_loss=0.05977, over 1609967.32 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:03:49,802 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:23,128 INFO [train.py:901] (3/4) Epoch 24, batch 3750, loss[loss=0.1934, simple_loss=0.2659, pruned_loss=0.06051, over 7423.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2855, pruned_loss=0.06019, over 1608823.03 frames. ], batch size: 17, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:23,299 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189658.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:26,014 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:27,231 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189664.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:42,765 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189687.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:43,976 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189689.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:04:51,127 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.609e+02 3.129e+02 4.249e+02 7.016e+02, threshold=6.258e+02, percent-clipped=8.0 +2023-02-07 07:04:57,798 INFO [train.py:901] (3/4) Epoch 24, batch 3800, loss[loss=0.211, simple_loss=0.2983, pruned_loss=0.06185, over 8529.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2856, pruned_loss=0.0603, over 1608295.00 frames. ], batch size: 28, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:04:58,621 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189709.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:05,504 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2360, 2.0832, 2.7121, 2.2613, 2.7157, 2.3233, 2.0942, 1.5750], + device='cuda:3'), covar=tensor([0.5462, 0.4860, 0.2007, 0.3742, 0.2277, 0.3120, 0.1809, 0.5237], + device='cuda:3'), in_proj_covar=tensor([0.0946, 0.0994, 0.0812, 0.0961, 0.0999, 0.0905, 0.0756, 0.0831], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:05:07,479 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:09,545 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189725.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:24,369 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=189746.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:32,060 INFO [train.py:901] (3/4) Epoch 24, batch 3850, loss[loss=0.1979, simple_loss=0.2861, pruned_loss=0.05485, over 8531.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2861, pruned_loss=0.06024, over 1609566.02 frames. ], batch size: 31, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:05:35,656 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189763.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:47,661 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 07:05:53,013 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189788.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,101 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:05:59,534 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.407e+02 2.910e+02 3.432e+02 8.251e+02, threshold=5.819e+02, percent-clipped=1.0 +2023-02-07 07:06:06,326 INFO [train.py:901] (3/4) Epoch 24, batch 3900, loss[loss=0.1808, simple_loss=0.2744, pruned_loss=0.04357, over 8257.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2857, pruned_loss=0.05976, over 1613164.42 frames. ], batch size: 24, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:10,783 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189812.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:17,542 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189822.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:18,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189824.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:27,526 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=189837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:06:27,536 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9016, 1.7240, 2.5373, 1.7540, 1.3629, 2.4634, 0.5287, 1.6667], + device='cuda:3'), covar=tensor([0.1337, 0.0993, 0.0247, 0.0997, 0.2169, 0.0289, 0.1930, 0.1044], + device='cuda:3'), in_proj_covar=tensor([0.0191, 0.0200, 0.0130, 0.0221, 0.0269, 0.0137, 0.0170, 0.0194], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:06:42,146 INFO [train.py:901] (3/4) Epoch 24, batch 3950, loss[loss=0.2028, simple_loss=0.269, pruned_loss=0.06828, over 7558.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05979, over 1611498.54 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:06:45,569 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=189863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:07:07,915 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 07:07:09,586 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.387e+02 3.217e+02 3.997e+02 8.874e+02, threshold=6.434e+02, percent-clipped=5.0 +2023-02-07 07:07:16,321 INFO [train.py:901] (3/4) Epoch 24, batch 4000, loss[loss=0.1721, simple_loss=0.2469, pruned_loss=0.04868, over 7539.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2853, pruned_loss=0.05956, over 1617681.23 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:07:51,147 INFO [train.py:901] (3/4) Epoch 24, batch 4050, loss[loss=0.195, simple_loss=0.2841, pruned_loss=0.05296, over 8241.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2856, pruned_loss=0.059, over 1622909.52 frames. ], batch size: 22, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:08:02,816 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7630, 1.9822, 2.0886, 1.5063, 2.2304, 1.5678, 0.8914, 1.9579], + device='cuda:3'), covar=tensor([0.0614, 0.0355, 0.0273, 0.0579, 0.0400, 0.0820, 0.0907, 0.0309], + device='cuda:3'), in_proj_covar=tensor([0.0455, 0.0396, 0.0353, 0.0450, 0.0381, 0.0534, 0.0394, 0.0425], + device='cuda:3'), out_proj_covar=tensor([1.2137e-04, 1.0340e-04, 9.2506e-05, 1.1828e-04, 1.0005e-04, 1.4993e-04, + 1.0600e-04, 1.1207e-04], device='cuda:3') +2023-02-07 07:08:05,479 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=189978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:07,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=189981.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:18,671 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.334e+02 2.770e+02 3.399e+02 1.124e+03, threshold=5.539e+02, percent-clipped=1.0 +2023-02-07 07:08:22,558 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:26,239 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:08:27,378 INFO [train.py:901] (3/4) Epoch 24, batch 4100, loss[loss=0.2115, simple_loss=0.291, pruned_loss=0.06597, over 8189.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.05894, over 1618907.99 frames. ], batch size: 23, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:08:36,503 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 07:09:02,428 INFO [train.py:901] (3/4) Epoch 24, batch 4150, loss[loss=0.172, simple_loss=0.2513, pruned_loss=0.04636, over 7538.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2846, pruned_loss=0.0591, over 1617337.39 frames. ], batch size: 18, lr: 3.15e-03, grad_scale: 16.0 +2023-02-07 07:09:08,098 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:15,374 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 07:09:17,890 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190080.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:25,156 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:30,574 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.686e+02 2.377e+02 2.724e+02 3.400e+02 7.023e+02, threshold=5.448e+02, percent-clipped=3.0 +2023-02-07 07:09:35,503 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190105.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:37,386 INFO [train.py:901] (3/4) Epoch 24, batch 4200, loss[loss=0.2011, simple_loss=0.2863, pruned_loss=0.05792, over 8334.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05843, over 1615817.35 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:09:43,678 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190117.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:09:48,188 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 07:09:54,431 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6470, 2.3985, 3.2340, 2.5799, 3.2192, 2.7131, 2.5188, 1.9671], + device='cuda:3'), covar=tensor([0.5210, 0.5127, 0.1932, 0.3935, 0.2324, 0.2751, 0.1675, 0.5401], + device='cuda:3'), in_proj_covar=tensor([0.0943, 0.0992, 0.0813, 0.0960, 0.1000, 0.0905, 0.0757, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:10:04,794 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7101, 1.5220, 1.8000, 1.6072, 1.7612, 1.7471, 1.6042, 0.7777], + device='cuda:3'), covar=tensor([0.5234, 0.4323, 0.2016, 0.3199, 0.2343, 0.2913, 0.1824, 0.4725], + device='cuda:3'), in_proj_covar=tensor([0.0943, 0.0992, 0.0814, 0.0960, 0.1001, 0.0905, 0.0756, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:10:10,652 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 07:10:11,424 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190157.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:10:11,869 INFO [train.py:901] (3/4) Epoch 24, batch 4250, loss[loss=0.1859, simple_loss=0.2697, pruned_loss=0.05108, over 7200.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2837, pruned_loss=0.05865, over 1613450.95 frames. ], batch size: 16, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:19,365 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190169.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:28,691 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:40,140 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 2.309e+02 2.865e+02 3.517e+02 8.092e+02, threshold=5.730e+02, percent-clipped=6.0 +2023-02-07 07:10:45,775 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190205.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:10:47,629 INFO [train.py:901] (3/4) Epoch 24, batch 4300, loss[loss=0.1791, simple_loss=0.2733, pruned_loss=0.04245, over 8338.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05891, over 1611201.49 frames. ], batch size: 25, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:10:56,762 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-07 07:11:05,307 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190234.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:21,909 INFO [train.py:901] (3/4) Epoch 24, batch 4350, loss[loss=0.1673, simple_loss=0.2455, pruned_loss=0.04457, over 7704.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2845, pruned_loss=0.05846, over 1615030.30 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:11:22,805 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190259.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:11:31,583 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0395, 1.7960, 2.3215, 1.9731, 2.2876, 2.0917, 1.9157, 1.1675], + device='cuda:3'), covar=tensor([0.5926, 0.4921, 0.2024, 0.3981, 0.2582, 0.3308, 0.2153, 0.5548], + device='cuda:3'), in_proj_covar=tensor([0.0948, 0.0994, 0.0815, 0.0963, 0.1003, 0.0906, 0.0758, 0.0832], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:11:40,177 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 07:11:50,403 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.430e+02 2.823e+02 3.493e+02 1.012e+03, threshold=5.646e+02, percent-clipped=3.0 +2023-02-07 07:11:51,035 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 07:11:55,518 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4063, 1.6688, 1.6622, 1.0776, 1.6745, 1.3207, 0.3080, 1.6422], + device='cuda:3'), covar=tensor([0.0504, 0.0391, 0.0386, 0.0552, 0.0456, 0.0962, 0.0908, 0.0283], + device='cuda:3'), in_proj_covar=tensor([0.0456, 0.0397, 0.0353, 0.0450, 0.0382, 0.0535, 0.0395, 0.0426], + device='cuda:3'), out_proj_covar=tensor([1.2174e-04, 1.0369e-04, 9.2694e-05, 1.1833e-04, 1.0045e-04, 1.4998e-04, + 1.0619e-04, 1.1234e-04], device='cuda:3') +2023-02-07 07:11:57,336 INFO [train.py:901] (3/4) Epoch 24, batch 4400, loss[loss=0.2109, simple_loss=0.2909, pruned_loss=0.06546, over 7917.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2855, pruned_loss=0.05917, over 1613385.32 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:05,647 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9920, 3.5047, 2.2953, 2.9548, 2.8979, 2.0271, 2.8742, 2.9223], + device='cuda:3'), covar=tensor([0.1905, 0.0420, 0.1262, 0.0742, 0.0690, 0.1435, 0.1061, 0.1154], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0235, 0.0338, 0.0312, 0.0299, 0.0343, 0.0346, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 07:12:15,563 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:12:16,330 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7977, 2.0574, 2.1874, 1.3855, 2.3233, 1.5745, 0.7933, 1.9770], + device='cuda:3'), covar=tensor([0.0670, 0.0421, 0.0357, 0.0718, 0.0470, 0.1019, 0.0923, 0.0361], + device='cuda:3'), in_proj_covar=tensor([0.0457, 0.0398, 0.0354, 0.0451, 0.0384, 0.0536, 0.0395, 0.0427], + device='cuda:3'), out_proj_covar=tensor([1.2195e-04, 1.0388e-04, 9.2742e-05, 1.1853e-04, 1.0082e-04, 1.5028e-04, + 1.0625e-04, 1.1252e-04], device='cuda:3') +2023-02-07 07:12:21,443 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 07:12:23,132 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 07:12:32,981 INFO [train.py:901] (3/4) Epoch 24, batch 4450, loss[loss=0.1894, simple_loss=0.2726, pruned_loss=0.05307, over 7977.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.283, pruned_loss=0.05774, over 1610081.00 frames. ], batch size: 21, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:12:43,413 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:00,241 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.418e+02 2.201e+02 2.691e+02 3.403e+02 6.534e+02, threshold=5.381e+02, percent-clipped=2.0 +2023-02-07 07:13:00,487 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190398.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:07,779 INFO [train.py:901] (3/4) Epoch 24, batch 4500, loss[loss=0.217, simple_loss=0.2973, pruned_loss=0.0683, over 8504.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05845, over 1610112.82 frames. ], batch size: 29, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:14,945 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:17,443 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 07:13:29,012 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190437.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:33,599 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8486, 1.7571, 2.4474, 1.6792, 1.3552, 2.4097, 0.4028, 1.5399], + device='cuda:3'), covar=tensor([0.1549, 0.1241, 0.0302, 0.1161, 0.2542, 0.0354, 0.1997, 0.1176], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0222, 0.0272, 0.0138, 0.0171, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:13:42,697 INFO [train.py:901] (3/4) Epoch 24, batch 4550, loss[loss=0.2043, simple_loss=0.2731, pruned_loss=0.0677, over 7695.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2838, pruned_loss=0.05877, over 1614715.38 frames. ], batch size: 18, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:13:44,284 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 07:13:44,879 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:13:45,494 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190462.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:02,627 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190486.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:10,720 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.430e+02 2.973e+02 3.981e+02 9.647e+02, threshold=5.946e+02, percent-clipped=9.0 +2023-02-07 07:14:12,834 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190501.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:14:17,565 INFO [train.py:901] (3/4) Epoch 24, batch 4600, loss[loss=0.1895, simple_loss=0.2698, pruned_loss=0.0546, over 8117.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2841, pruned_loss=0.05949, over 1612301.93 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:14:21,251 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190513.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:14:40,894 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 07:14:50,269 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0697, 1.2528, 1.2248, 0.8118, 1.2308, 1.0721, 0.1380, 1.1887], + device='cuda:3'), covar=tensor([0.0444, 0.0424, 0.0380, 0.0574, 0.0525, 0.1017, 0.0901, 0.0346], + device='cuda:3'), in_proj_covar=tensor([0.0458, 0.0399, 0.0356, 0.0454, 0.0385, 0.0538, 0.0397, 0.0429], + device='cuda:3'), out_proj_covar=tensor([1.2233e-04, 1.0417e-04, 9.3240e-05, 1.1914e-04, 1.0106e-04, 1.5096e-04, + 1.0660e-04, 1.1314e-04], device='cuda:3') +2023-02-07 07:14:54,259 INFO [train.py:901] (3/4) Epoch 24, batch 4650, loss[loss=0.1769, simple_loss=0.2644, pruned_loss=0.0447, over 8511.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05893, over 1613331.32 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:06,809 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.8114, 0.7230, 0.8892, 0.7701, 0.6584, 0.8965, 0.1178, 0.7437], + device='cuda:3'), covar=tensor([0.0996, 0.0856, 0.0348, 0.0573, 0.1662, 0.0378, 0.1483, 0.0886], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0221, 0.0270, 0.0138, 0.0170, 0.0195], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:15:22,379 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.616e+02 2.469e+02 2.987e+02 3.787e+02 1.231e+03, threshold=5.974e+02, percent-clipped=5.0 +2023-02-07 07:15:29,196 INFO [train.py:901] (3/4) Epoch 24, batch 4700, loss[loss=0.2197, simple_loss=0.3057, pruned_loss=0.06686, over 8494.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2841, pruned_loss=0.05938, over 1610374.23 frames. ], batch size: 26, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:15:29,951 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190609.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:34,413 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190616.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:15:41,744 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:42,480 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190628.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:15:43,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6033, 1.9816, 3.2678, 1.4189, 2.5512, 2.0301, 1.7599, 2.4487], + device='cuda:3'), covar=tensor([0.2082, 0.2884, 0.0880, 0.4920, 0.1915, 0.3380, 0.2430, 0.2474], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0616, 0.0556, 0.0654, 0.0651, 0.0600, 0.0548, 0.0637], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:16:04,014 INFO [train.py:901] (3/4) Epoch 24, batch 4750, loss[loss=0.2181, simple_loss=0.2958, pruned_loss=0.07014, over 8591.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2837, pruned_loss=0.059, over 1615893.97 frames. ], batch size: 31, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:16:18,718 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190678.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:20,754 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 07:16:22,896 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 07:16:29,412 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=190693.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:16:32,691 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.279e+02 2.789e+02 3.393e+02 7.815e+02, threshold=5.578e+02, percent-clipped=3.0 +2023-02-07 07:16:40,394 INFO [train.py:901] (3/4) Epoch 24, batch 4800, loss[loss=0.1842, simple_loss=0.2658, pruned_loss=0.05128, over 8252.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.283, pruned_loss=0.05894, over 1605804.14 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:13,016 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 07:17:15,032 INFO [train.py:901] (3/4) Epoch 24, batch 4850, loss[loss=0.1978, simple_loss=0.2823, pruned_loss=0.0567, over 8034.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2824, pruned_loss=0.05877, over 1604207.56 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:15,935 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1631, 2.4893, 3.8186, 2.0891, 2.0770, 3.8026, 0.7764, 2.3649], + device='cuda:3'), covar=tensor([0.1353, 0.1203, 0.0194, 0.1641, 0.2292, 0.0231, 0.2048, 0.1180], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0220, 0.0270, 0.0138, 0.0169, 0.0195], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:17:17,936 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:40,144 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190793.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:17:43,508 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.462e+02 2.278e+02 2.775e+02 3.178e+02 7.824e+02, threshold=5.550e+02, percent-clipped=3.0 +2023-02-07 07:17:50,723 INFO [train.py:901] (3/4) Epoch 24, batch 4900, loss[loss=0.1673, simple_loss=0.2429, pruned_loss=0.04586, over 7166.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2818, pruned_loss=0.05848, over 1605917.00 frames. ], batch size: 16, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:17:53,677 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2428, 2.3705, 1.9616, 2.9693, 1.4063, 1.6434, 2.1664, 2.2649], + device='cuda:3'), covar=tensor([0.0644, 0.0761, 0.0835, 0.0329, 0.1119, 0.1312, 0.0836, 0.0851], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0245, 0.0215, 0.0206, 0.0248, 0.0252, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 07:17:55,387 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 07:18:09,308 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 07:18:25,852 INFO [train.py:901] (3/4) Epoch 24, batch 4950, loss[loss=0.221, simple_loss=0.2924, pruned_loss=0.07478, over 7547.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.05861, over 1608898.48 frames. ], batch size: 72, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:18:36,136 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190872.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:18:39,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=190876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:45,201 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=190884.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:18:53,851 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190897.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:18:54,307 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.393e+02 2.890e+02 3.701e+02 7.772e+02, threshold=5.780e+02, percent-clipped=4.0 +2023-02-07 07:19:01,824 INFO [train.py:901] (3/4) Epoch 24, batch 5000, loss[loss=0.2179, simple_loss=0.2895, pruned_loss=0.07314, over 8036.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.05862, over 1607340.76 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:02,720 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=190909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:19,904 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1329, 2.3296, 1.8118, 2.9009, 1.3216, 1.6380, 2.1025, 2.3234], + device='cuda:3'), covar=tensor([0.0715, 0.0745, 0.0904, 0.0348, 0.1134, 0.1266, 0.0811, 0.0751], + device='cuda:3'), in_proj_covar=tensor([0.0234, 0.0199, 0.0246, 0.0216, 0.0206, 0.0248, 0.0253, 0.0208], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 07:19:33,491 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190953.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:19:36,599 INFO [train.py:901] (3/4) Epoch 24, batch 5050, loss[loss=0.1894, simple_loss=0.2725, pruned_loss=0.05312, over 7826.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2828, pruned_loss=0.05888, over 1609986.51 frames. ], batch size: 20, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:19:45,290 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=190971.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:19:51,186 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 07:20:04,896 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.504e+02 2.521e+02 3.221e+02 4.359e+02 8.705e+02, threshold=6.442e+02, percent-clipped=11.0 +2023-02-07 07:20:05,164 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1377, 1.9913, 2.6263, 2.2391, 2.5168, 2.2225, 2.0171, 1.4668], + device='cuda:3'), covar=tensor([0.5441, 0.4883, 0.1827, 0.3372, 0.2351, 0.3144, 0.1981, 0.5078], + device='cuda:3'), in_proj_covar=tensor([0.0947, 0.0994, 0.0812, 0.0963, 0.1001, 0.0906, 0.0755, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:20:11,616 INFO [train.py:901] (3/4) Epoch 24, batch 5100, loss[loss=0.1873, simple_loss=0.284, pruned_loss=0.04524, over 8654.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.284, pruned_loss=0.05955, over 1607991.68 frames. ], batch size: 34, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:31,920 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:33,352 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6833, 1.6382, 2.1245, 1.6258, 1.3324, 2.1436, 0.4960, 1.4543], + device='cuda:3'), covar=tensor([0.1601, 0.1234, 0.0392, 0.0802, 0.2371, 0.0354, 0.1643, 0.1040], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0200, 0.0130, 0.0221, 0.0271, 0.0138, 0.0171, 0.0195], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:20:40,071 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191049.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:20:40,224 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 07:20:46,727 INFO [train.py:901] (3/4) Epoch 24, batch 5150, loss[loss=0.1931, simple_loss=0.2739, pruned_loss=0.05617, over 8236.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05944, over 1611398.78 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:20:53,683 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191068.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:20:57,803 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:00,592 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 07:21:05,749 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:05,786 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:13,667 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.286e+02 2.691e+02 3.617e+02 7.196e+02, threshold=5.383e+02, percent-clipped=2.0 +2023-02-07 07:21:20,834 INFO [train.py:901] (3/4) Epoch 24, batch 5200, loss[loss=0.1957, simple_loss=0.2844, pruned_loss=0.05347, over 8195.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2849, pruned_loss=0.05948, over 1613693.11 frames. ], batch size: 23, lr: 3.14e-03, grad_scale: 16.0 +2023-02-07 07:21:38,118 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:50,042 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 07:21:50,823 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:52,273 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191152.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:55,614 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:21:56,093 INFO [train.py:901] (3/4) Epoch 24, batch 5250, loss[loss=0.2445, simple_loss=0.3053, pruned_loss=0.09183, over 6753.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2833, pruned_loss=0.05877, over 1607775.67 frames. ], batch size: 71, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:22:25,865 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.418e+02 2.847e+02 3.981e+02 6.971e+02, threshold=5.694e+02, percent-clipped=11.0 +2023-02-07 07:22:28,316 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191203.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:22:31,590 INFO [train.py:901] (3/4) Epoch 24, batch 5300, loss[loss=0.1844, simple_loss=0.2748, pruned_loss=0.04699, over 8133.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2835, pruned_loss=0.05869, over 1611323.63 frames. ], batch size: 22, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:07,057 INFO [train.py:901] (3/4) Epoch 24, batch 5350, loss[loss=0.245, simple_loss=0.3051, pruned_loss=0.09247, over 7078.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2831, pruned_loss=0.05888, over 1608494.28 frames. ], batch size: 71, lr: 3.14e-03, grad_scale: 8.0 +2023-02-07 07:23:31,507 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-07 07:23:36,917 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.294e+02 2.754e+02 3.342e+02 1.056e+03, threshold=5.508e+02, percent-clipped=2.0 +2023-02-07 07:23:40,179 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 07:23:42,385 INFO [train.py:901] (3/4) Epoch 24, batch 5400, loss[loss=0.2381, simple_loss=0.3168, pruned_loss=0.07968, over 8512.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05882, over 1611608.06 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:23:52,739 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 07:23:53,147 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191324.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:05,746 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191342.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:11,124 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191349.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:24:17,037 INFO [train.py:901] (3/4) Epoch 24, batch 5450, loss[loss=0.1787, simple_loss=0.256, pruned_loss=0.05073, over 7813.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05929, over 1611612.85 frames. ], batch size: 19, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:22,004 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9251, 1.7560, 2.5715, 1.6179, 1.4599, 2.5168, 0.4557, 1.6047], + device='cuda:3'), covar=tensor([0.1565, 0.1260, 0.0303, 0.1281, 0.2571, 0.0381, 0.2089, 0.1335], + device='cuda:3'), in_proj_covar=tensor([0.0195, 0.0202, 0.0131, 0.0224, 0.0274, 0.0140, 0.0172, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:24:23,304 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:34,971 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=191383.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:24:39,468 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 07:24:46,090 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.294e+02 2.951e+02 3.676e+02 7.135e+02, threshold=5.902e+02, percent-clipped=5.0 +2023-02-07 07:24:48,440 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4361, 1.5481, 2.1084, 1.3406, 1.4301, 1.7001, 1.4582, 1.4715], + device='cuda:3'), covar=tensor([0.1960, 0.2633, 0.1029, 0.4475, 0.2086, 0.3262, 0.2457, 0.2355], + device='cuda:3'), in_proj_covar=tensor([0.0530, 0.0617, 0.0556, 0.0657, 0.0652, 0.0599, 0.0548, 0.0637], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:24:52,410 INFO [train.py:901] (3/4) Epoch 24, batch 5500, loss[loss=0.1959, simple_loss=0.2825, pruned_loss=0.05467, over 8544.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2847, pruned_loss=0.05943, over 1613145.31 frames. ], batch size: 31, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:24:52,650 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191408.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:07,823 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:10,099 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:28,128 INFO [train.py:901] (3/4) Epoch 24, batch 5550, loss[loss=0.1931, simple_loss=0.2892, pruned_loss=0.04852, over 8246.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.05906, over 1617623.33 frames. ], batch size: 24, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:25:36,927 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0198, 1.2471, 1.2311, 0.6799, 1.2200, 1.0328, 0.0736, 1.1949], + device='cuda:3'), covar=tensor([0.0451, 0.0415, 0.0387, 0.0614, 0.0474, 0.1025, 0.0867, 0.0372], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0404, 0.0359, 0.0456, 0.0388, 0.0541, 0.0401, 0.0433], + device='cuda:3'), out_proj_covar=tensor([1.2354e-04, 1.0540e-04, 9.4053e-05, 1.1961e-04, 1.0190e-04, 1.5177e-04, + 1.0760e-04, 1.1426e-04], device='cuda:3') +2023-02-07 07:25:49,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8877, 1.7417, 2.4573, 1.6190, 1.4619, 2.3754, 0.4144, 1.5429], + device='cuda:3'), covar=tensor([0.1743, 0.1418, 0.0363, 0.1204, 0.2455, 0.0477, 0.2084, 0.1251], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0223, 0.0273, 0.0139, 0.0172, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:25:53,334 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191494.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:25:57,921 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.420e+02 3.039e+02 3.989e+02 7.925e+02, threshold=6.078e+02, percent-clipped=5.0 +2023-02-07 07:26:03,379 INFO [train.py:901] (3/4) Epoch 24, batch 5600, loss[loss=0.2249, simple_loss=0.3056, pruned_loss=0.07207, over 8500.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05923, over 1616243.51 frames. ], batch size: 28, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:23,607 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6959, 1.9403, 2.0634, 1.3387, 2.1438, 1.5895, 0.5505, 1.9201], + device='cuda:3'), covar=tensor([0.0612, 0.0385, 0.0324, 0.0614, 0.0469, 0.0856, 0.0972, 0.0313], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0404, 0.0359, 0.0457, 0.0389, 0.0542, 0.0401, 0.0434], + device='cuda:3'), out_proj_covar=tensor([1.2366e-04, 1.0560e-04, 9.4065e-05, 1.1995e-04, 1.0212e-04, 1.5210e-04, + 1.0776e-04, 1.1433e-04], device='cuda:3') +2023-02-07 07:26:29,661 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191545.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:30,967 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191547.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:26:38,449 INFO [train.py:901] (3/4) Epoch 24, batch 5650, loss[loss=0.2196, simple_loss=0.307, pruned_loss=0.06608, over 8360.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.05903, over 1613841.99 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:26:45,398 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 07:27:08,570 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.481e+02 2.901e+02 3.753e+02 9.237e+02, threshold=5.802e+02, percent-clipped=5.0 +2023-02-07 07:27:14,232 INFO [train.py:901] (3/4) Epoch 24, batch 5700, loss[loss=0.2313, simple_loss=0.3171, pruned_loss=0.07279, over 8512.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.285, pruned_loss=0.05907, over 1614943.16 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:15,046 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:49,746 INFO [train.py:901] (3/4) Epoch 24, batch 5750, loss[loss=0.1578, simple_loss=0.2497, pruned_loss=0.03293, over 8470.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2854, pruned_loss=0.05904, over 1617026.28 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:27:52,731 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:27:53,293 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 07:28:19,025 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.394e+02 2.726e+02 3.524e+02 6.240e+02, threshold=5.452e+02, percent-clipped=4.0 +2023-02-07 07:28:25,087 INFO [train.py:901] (3/4) Epoch 24, batch 5800, loss[loss=0.2415, simple_loss=0.3142, pruned_loss=0.08442, over 8313.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2856, pruned_loss=0.05913, over 1616790.62 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:28:31,560 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5897, 2.7255, 3.2109, 1.9392, 3.2991, 2.0983, 1.8413, 2.4559], + device='cuda:3'), covar=tensor([0.0675, 0.0417, 0.0247, 0.0734, 0.0441, 0.0747, 0.0908, 0.0456], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0405, 0.0360, 0.0459, 0.0389, 0.0543, 0.0402, 0.0434], + device='cuda:3'), out_proj_covar=tensor([1.2401e-04, 1.0586e-04, 9.4376e-05, 1.2048e-04, 1.0221e-04, 1.5225e-04, + 1.0794e-04, 1.1443e-04], device='cuda:3') +2023-02-07 07:28:38,095 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=191727.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:28:57,750 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3233, 4.3009, 3.9843, 2.0134, 3.8288, 3.9665, 3.9196, 3.8130], + device='cuda:3'), covar=tensor([0.0788, 0.0575, 0.0855, 0.4504, 0.0884, 0.1002, 0.1225, 0.0790], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0454, 0.0439, 0.0553, 0.0436, 0.0455, 0.0435, 0.0398], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:28:59,277 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.11 vs. limit=5.0 +2023-02-07 07:28:59,639 INFO [train.py:901] (3/4) Epoch 24, batch 5850, loss[loss=0.1899, simple_loss=0.279, pruned_loss=0.05043, over 8195.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2847, pruned_loss=0.05859, over 1619303.39 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:26,690 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6964, 2.3452, 3.7300, 1.4838, 2.8350, 1.9876, 1.8540, 2.6077], + device='cuda:3'), covar=tensor([0.2201, 0.2858, 0.1243, 0.5217, 0.2085, 0.3933, 0.2628, 0.3047], + device='cuda:3'), in_proj_covar=tensor([0.0531, 0.0616, 0.0556, 0.0653, 0.0651, 0.0601, 0.0548, 0.0636], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:29:29,158 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.223e+02 2.821e+02 3.422e+02 9.012e+02, threshold=5.641e+02, percent-clipped=8.0 +2023-02-07 07:29:30,134 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:34,726 INFO [train.py:901] (3/4) Epoch 24, batch 5900, loss[loss=0.2147, simple_loss=0.2894, pruned_loss=0.07, over 7229.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2852, pruned_loss=0.05878, over 1616893.26 frames. ], batch size: 72, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:29:48,215 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191826.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:29:59,347 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=191842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:10,944 INFO [train.py:901] (3/4) Epoch 24, batch 5950, loss[loss=0.1975, simple_loss=0.2799, pruned_loss=0.05751, over 8338.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2858, pruned_loss=0.05979, over 1615948.55 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:16,035 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:33,605 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191890.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:30:40,232 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.416e+02 2.338e+02 2.991e+02 3.628e+02 7.270e+02, threshold=5.982e+02, percent-clipped=3.0 +2023-02-07 07:30:45,681 INFO [train.py:901] (3/4) Epoch 24, batch 6000, loss[loss=0.1852, simple_loss=0.2598, pruned_loss=0.05532, over 7690.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.0599, over 1611887.34 frames. ], batch size: 18, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:30:45,681 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 07:30:58,017 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9285, 1.2031, 3.0531, 1.1635, 2.6803, 2.5223, 2.8321, 2.6813], + device='cuda:3'), covar=tensor([0.0653, 0.3510, 0.0452, 0.3585, 0.1053, 0.0926, 0.0626, 0.0668], + device='cuda:3'), in_proj_covar=tensor([0.0648, 0.0653, 0.0710, 0.0643, 0.0720, 0.0617, 0.0618, 0.0690], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:31:01,021 INFO [train.py:935] (3/4) Epoch 24, validation: loss=0.1718, simple_loss=0.2718, pruned_loss=0.0359, over 944034.00 frames. +2023-02-07 07:31:01,022 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 07:31:08,198 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=191918.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:24,848 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=191943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:31:35,317 INFO [train.py:901] (3/4) Epoch 24, batch 6050, loss[loss=0.1935, simple_loss=0.2777, pruned_loss=0.05463, over 8080.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.285, pruned_loss=0.0597, over 1611076.27 frames. ], batch size: 21, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:32:04,601 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.415e+02 2.742e+02 3.441e+02 8.508e+02, threshold=5.485e+02, percent-clipped=2.0 +2023-02-07 07:32:11,924 INFO [train.py:901] (3/4) Epoch 24, batch 6100, loss[loss=0.2027, simple_loss=0.2923, pruned_loss=0.05657, over 8283.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.0593, over 1613204.38 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:32:12,376 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.77 vs. limit=5.0 +2023-02-07 07:32:17,377 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-07 07:32:34,063 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 07:32:46,753 INFO [train.py:901] (3/4) Epoch 24, batch 6150, loss[loss=0.1953, simple_loss=0.2671, pruned_loss=0.06176, over 8198.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2836, pruned_loss=0.0589, over 1613747.79 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:15,565 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192098.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:16,714 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.367e+02 2.762e+02 3.348e+02 6.106e+02, threshold=5.524e+02, percent-clipped=2.0 +2023-02-07 07:33:22,017 INFO [train.py:901] (3/4) Epoch 24, batch 6200, loss[loss=0.2006, simple_loss=0.2836, pruned_loss=0.05877, over 8018.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05894, over 1608573.88 frames. ], batch size: 22, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:33:30,663 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192120.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:32,771 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192123.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:33:40,932 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 07:33:56,606 INFO [train.py:901] (3/4) Epoch 24, batch 6250, loss[loss=0.1979, simple_loss=0.2877, pruned_loss=0.05404, over 8344.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2833, pruned_loss=0.05917, over 1606811.74 frames. ], batch size: 26, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:05,369 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 07:34:07,843 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192173.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:26,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.364e+02 2.949e+02 3.646e+02 8.976e+02, threshold=5.898e+02, percent-clipped=7.0 +2023-02-07 07:34:33,010 INFO [train.py:901] (3/4) Epoch 24, batch 6300, loss[loss=0.2619, simple_loss=0.3455, pruned_loss=0.08915, over 8449.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2833, pruned_loss=0.05889, over 1609791.02 frames. ], batch size: 50, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:34:52,685 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192237.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:54,069 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192239.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:34:56,871 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3079, 2.0290, 2.6485, 2.1906, 2.6542, 2.3285, 2.1529, 1.5260], + device='cuda:3'), covar=tensor([0.5595, 0.4920, 0.1943, 0.3710, 0.2482, 0.3188, 0.1913, 0.5480], + device='cuda:3'), in_proj_covar=tensor([0.0949, 0.0998, 0.0817, 0.0970, 0.1005, 0.0910, 0.0760, 0.0833], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:34:59,519 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1549, 1.2481, 1.5408, 1.1980, 0.7738, 1.3428, 1.1816, 0.9400], + device='cuda:3'), covar=tensor([0.0634, 0.1292, 0.1746, 0.1501, 0.0569, 0.1495, 0.0712, 0.0767], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0188, 0.0159, 0.0100, 0.0163, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 07:35:07,368 INFO [train.py:901] (3/4) Epoch 24, batch 6350, loss[loss=0.1954, simple_loss=0.2778, pruned_loss=0.05647, over 7819.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2831, pruned_loss=0.05912, over 1606251.48 frames. ], batch size: 20, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:35:36,831 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.340e+02 2.329e+02 2.896e+02 3.640e+02 5.459e+02, threshold=5.791e+02, percent-clipped=0.0 +2023-02-07 07:35:38,467 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4679, 2.4258, 1.8156, 2.2005, 2.1305, 1.4724, 1.9313, 1.9191], + device='cuda:3'), covar=tensor([0.1618, 0.0469, 0.1349, 0.0713, 0.0766, 0.1867, 0.1209, 0.1163], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0235, 0.0337, 0.0310, 0.0301, 0.0342, 0.0346, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 07:35:43,003 INFO [train.py:901] (3/4) Epoch 24, batch 6400, loss[loss=0.1997, simple_loss=0.2972, pruned_loss=0.05114, over 8289.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05918, over 1608042.11 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:19,118 INFO [train.py:901] (3/4) Epoch 24, batch 6450, loss[loss=0.2928, simple_loss=0.3534, pruned_loss=0.1161, over 7176.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2846, pruned_loss=0.05975, over 1610720.04 frames. ], batch size: 72, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:49,114 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.491e+02 2.965e+02 3.858e+02 7.678e+02, threshold=5.930e+02, percent-clipped=7.0 +2023-02-07 07:36:54,692 INFO [train.py:901] (3/4) Epoch 24, batch 6500, loss[loss=0.1799, simple_loss=0.2704, pruned_loss=0.04476, over 8200.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2844, pruned_loss=0.05962, over 1611783.34 frames. ], batch size: 23, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:36:56,155 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:00,676 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192417.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:25,015 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192451.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:29,462 INFO [train.py:901] (3/4) Epoch 24, batch 6550, loss[loss=0.2038, simple_loss=0.2858, pruned_loss=0.06085, over 8462.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05913, over 1618901.54 frames. ], batch size: 25, lr: 3.13e-03, grad_scale: 8.0 +2023-02-07 07:37:33,493 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-07 07:37:33,655 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:37:36,747 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.70 vs. limit=5.0 +2023-02-07 07:37:48,317 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 07:37:58,367 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.534e+02 2.233e+02 2.734e+02 3.455e+02 6.558e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 07:38:03,873 INFO [train.py:901] (3/4) Epoch 24, batch 6600, loss[loss=0.2571, simple_loss=0.3444, pruned_loss=0.08491, over 8197.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2854, pruned_loss=0.06018, over 1615975.38 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:08,113 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 07:38:10,834 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:17,072 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8361, 1.3759, 4.0038, 1.4337, 3.6234, 3.3529, 3.6762, 3.5119], + device='cuda:3'), covar=tensor([0.0707, 0.4596, 0.0612, 0.4270, 0.1194, 0.1038, 0.0599, 0.0764], + device='cuda:3'), in_proj_covar=tensor([0.0645, 0.0655, 0.0709, 0.0638, 0.0718, 0.0613, 0.0616, 0.0685], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:38:39,627 INFO [train.py:901] (3/4) Epoch 24, batch 6650, loss[loss=0.1974, simple_loss=0.2879, pruned_loss=0.05346, over 8625.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2855, pruned_loss=0.06008, over 1617258.48 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:38:53,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:55,137 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192581.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:38:55,969 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4844, 1.4302, 1.8184, 1.2018, 1.1138, 1.7954, 0.1637, 1.1660], + device='cuda:3'), covar=tensor([0.1468, 0.1282, 0.0401, 0.0876, 0.2501, 0.0434, 0.1919, 0.1161], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0199, 0.0130, 0.0220, 0.0271, 0.0138, 0.0171, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:38:56,512 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:08,650 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.365e+02 2.876e+02 3.746e+02 9.522e+02, threshold=5.752e+02, percent-clipped=3.0 +2023-02-07 07:39:14,188 INFO [train.py:901] (3/4) Epoch 24, batch 6700, loss[loss=0.1736, simple_loss=0.25, pruned_loss=0.04862, over 7303.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05967, over 1619573.10 frames. ], batch size: 16, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:39:31,225 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192632.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:32,523 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192634.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:39:48,725 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-07 07:39:49,028 INFO [train.py:901] (3/4) Epoch 24, batch 6750, loss[loss=0.1708, simple_loss=0.2519, pruned_loss=0.04488, over 7712.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2849, pruned_loss=0.05907, over 1621467.74 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:00,543 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5585, 1.8678, 1.9225, 1.2180, 1.9951, 1.4624, 0.4139, 1.8687], + device='cuda:3'), covar=tensor([0.0476, 0.0281, 0.0236, 0.0479, 0.0411, 0.0794, 0.0855, 0.0224], + device='cuda:3'), in_proj_covar=tensor([0.0458, 0.0398, 0.0353, 0.0451, 0.0383, 0.0534, 0.0397, 0.0427], + device='cuda:3'), out_proj_covar=tensor([1.2197e-04, 1.0390e-04, 9.2514e-05, 1.1836e-04, 1.0065e-04, 1.4965e-04, + 1.0652e-04, 1.1246e-04], device='cuda:3') +2023-02-07 07:40:12,033 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192691.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:15,245 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192696.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:16,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:17,823 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 2.601e+02 3.163e+02 3.841e+02 9.507e+02, threshold=6.325e+02, percent-clipped=3.0 +2023-02-07 07:40:23,396 INFO [train.py:901] (3/4) Epoch 24, batch 6800, loss[loss=0.2353, simple_loss=0.319, pruned_loss=0.07584, over 7931.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2853, pruned_loss=0.05955, over 1625813.67 frames. ], batch size: 20, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:40:24,828 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 07:40:56,187 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192753.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:56,813 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:40:59,526 INFO [train.py:901] (3/4) Epoch 24, batch 6850, loss[loss=0.1699, simple_loss=0.2586, pruned_loss=0.04063, over 8498.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2839, pruned_loss=0.05907, over 1621199.52 frames. ], batch size: 29, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:01,650 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:08,693 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3574, 2.3958, 1.8677, 2.2421, 2.0326, 1.5814, 1.8252, 1.9135], + device='cuda:3'), covar=tensor([0.1381, 0.0371, 0.1196, 0.0549, 0.0639, 0.1537, 0.1039, 0.1035], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0235, 0.0337, 0.0308, 0.0300, 0.0341, 0.0348, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 07:41:15,298 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 07:41:26,177 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:41:29,430 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.230e+02 2.864e+02 3.611e+02 9.090e+02, threshold=5.729e+02, percent-clipped=1.0 +2023-02-07 07:41:35,202 INFO [train.py:901] (3/4) Epoch 24, batch 6900, loss[loss=0.2352, simple_loss=0.3218, pruned_loss=0.07434, over 8243.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05906, over 1618106.70 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:41:54,069 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:03,105 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=192849.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:42:08,876 INFO [train.py:901] (3/4) Epoch 24, batch 6950, loss[loss=0.2127, simple_loss=0.2892, pruned_loss=0.06809, over 8649.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.285, pruned_loss=0.05998, over 1615452.66 frames. ], batch size: 34, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:10,323 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192860.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:17,158 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:21,982 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192876.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:23,176 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 07:42:30,047 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192888.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:38,071 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6397, 5.7330, 5.0501, 2.6932, 5.0999, 5.4684, 5.2999, 5.2810], + device='cuda:3'), covar=tensor([0.0551, 0.0344, 0.0811, 0.3936, 0.0719, 0.0908, 0.0994, 0.0604], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0444, 0.0431, 0.0543, 0.0431, 0.0447, 0.0427, 0.0390], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:42:38,579 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.384e+02 2.955e+02 3.597e+02 9.319e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 07:42:44,702 INFO [train.py:901] (3/4) Epoch 24, batch 7000, loss[loss=0.2061, simple_loss=0.3009, pruned_loss=0.05565, over 8323.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2841, pruned_loss=0.05963, over 1614277.47 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:42:46,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=192910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:42:48,262 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192913.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:15,540 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192952.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:16,866 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=192954.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:19,408 INFO [train.py:901] (3/4) Epoch 24, batch 7050, loss[loss=0.1888, simple_loss=0.2644, pruned_loss=0.0566, over 7699.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2847, pruned_loss=0.05972, over 1613835.34 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:43:24,405 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5893, 1.6314, 2.1446, 1.3675, 1.1976, 2.1620, 0.3459, 1.3548], + device='cuda:3'), covar=tensor([0.1410, 0.1059, 0.0299, 0.1015, 0.2234, 0.0327, 0.1763, 0.1095], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0199, 0.0129, 0.0221, 0.0272, 0.0139, 0.0171, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 07:43:32,658 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:33,236 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=192978.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:34,071 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=192979.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:43:48,978 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.388e+02 2.407e+02 3.090e+02 3.925e+02 9.689e+02, threshold=6.179e+02, percent-clipped=7.0 +2023-02-07 07:43:54,440 INFO [train.py:901] (3/4) Epoch 24, batch 7100, loss[loss=0.2108, simple_loss=0.3032, pruned_loss=0.05925, over 8452.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2837, pruned_loss=0.05941, over 1609705.06 frames. ], batch size: 27, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:14,236 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193035.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:14,978 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2031, 1.6193, 4.3434, 1.6255, 3.9152, 3.5908, 3.9264, 3.8364], + device='cuda:3'), covar=tensor([0.0587, 0.4415, 0.0582, 0.4374, 0.1129, 0.0981, 0.0622, 0.0657], + device='cuda:3'), in_proj_covar=tensor([0.0646, 0.0657, 0.0711, 0.0642, 0.0721, 0.0617, 0.0619, 0.0690], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:44:20,314 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6401, 2.3600, 4.0275, 1.5596, 3.0216, 2.3147, 1.8303, 2.8987], + device='cuda:3'), covar=tensor([0.2050, 0.2660, 0.0890, 0.4728, 0.1852, 0.3202, 0.2464, 0.2445], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0619, 0.0557, 0.0653, 0.0651, 0.0601, 0.0549, 0.0639], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:44:29,519 INFO [train.py:901] (3/4) Epoch 24, batch 7150, loss[loss=0.1992, simple_loss=0.2819, pruned_loss=0.05823, over 8447.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.283, pruned_loss=0.05892, over 1609379.02 frames. ], batch size: 24, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:44:30,377 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193059.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:40,889 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-07 07:44:54,306 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:56,946 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193097.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:44:58,924 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.272e+02 2.945e+02 3.915e+02 7.728e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-07 07:45:05,031 INFO [train.py:901] (3/4) Epoch 24, batch 7200, loss[loss=0.1949, simple_loss=0.2829, pruned_loss=0.05346, over 8191.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2841, pruned_loss=0.05944, over 1609110.77 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 8.0 +2023-02-07 07:45:05,478 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 07:45:16,880 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:21,518 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:34,289 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:39,619 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193157.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:45:40,080 INFO [train.py:901] (3/4) Epoch 24, batch 7250, loss[loss=0.2287, simple_loss=0.3167, pruned_loss=0.0704, over 8305.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2834, pruned_loss=0.05907, over 1610606.58 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:45:45,721 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193166.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:03,209 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:04,351 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193193.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:46:08,899 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.389e+02 2.780e+02 3.377e+02 1.311e+03, threshold=5.561e+02, percent-clipped=2.0 +2023-02-07 07:46:14,404 INFO [train.py:901] (3/4) Epoch 24, batch 7300, loss[loss=0.1842, simple_loss=0.2743, pruned_loss=0.04709, over 8246.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2836, pruned_loss=0.05923, over 1608945.39 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:17,214 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193212.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:29,134 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193229.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:46:48,076 INFO [train.py:901] (3/4) Epoch 24, batch 7350, loss[loss=0.1487, simple_loss=0.2291, pruned_loss=0.03417, over 8232.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2844, pruned_loss=0.05971, over 1608665.41 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:46:54,300 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1081, 2.3511, 1.9054, 2.9419, 1.4728, 1.7017, 2.1996, 2.2881], + device='cuda:3'), covar=tensor([0.0713, 0.0762, 0.0890, 0.0311, 0.0960, 0.1189, 0.0703, 0.0658], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0197, 0.0245, 0.0214, 0.0205, 0.0247, 0.0251, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 07:47:11,631 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 07:47:17,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.477e+02 2.971e+02 3.853e+02 6.522e+02, threshold=5.942e+02, percent-clipped=4.0 +2023-02-07 07:47:19,303 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193302.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:23,212 INFO [train.py:901] (3/4) Epoch 24, batch 7400, loss[loss=0.2009, simple_loss=0.286, pruned_loss=0.05792, over 8147.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2835, pruned_loss=0.05906, over 1606748.73 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:47:24,067 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193308.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:47:31,912 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 07:47:51,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1816, 4.1672, 3.7497, 1.9809, 3.6805, 3.8286, 3.7224, 3.7002], + device='cuda:3'), covar=tensor([0.0757, 0.0565, 0.1004, 0.4499, 0.0875, 0.0988, 0.1302, 0.0775], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0455, 0.0444, 0.0557, 0.0443, 0.0458, 0.0439, 0.0398], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:47:52,397 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:47:58,366 INFO [train.py:901] (3/4) Epoch 24, batch 7450, loss[loss=0.2017, simple_loss=0.295, pruned_loss=0.05415, over 8465.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2841, pruned_loss=0.05973, over 1607058.78 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:09,531 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 07:48:09,632 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1826, 3.0924, 2.8693, 1.9365, 2.7619, 2.8524, 2.8603, 2.7818], + device='cuda:3'), covar=tensor([0.0847, 0.0779, 0.1025, 0.3561, 0.0980, 0.1202, 0.1273, 0.0931], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0454, 0.0444, 0.0557, 0.0442, 0.0457, 0.0438, 0.0398], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:48:10,325 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193374.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:11,141 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 07:48:27,885 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1446, 4.1379, 3.7277, 1.9555, 3.6367, 3.7587, 3.7430, 3.6623], + device='cuda:3'), covar=tensor([0.0793, 0.0599, 0.1088, 0.4655, 0.0951, 0.1295, 0.1310, 0.0761], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0454, 0.0443, 0.0556, 0.0442, 0.0456, 0.0437, 0.0397], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:48:28,441 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.340e+02 2.930e+02 4.048e+02 8.147e+02, threshold=5.861e+02, percent-clipped=5.0 +2023-02-07 07:48:30,638 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:32,906 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:34,028 INFO [train.py:901] (3/4) Epoch 24, batch 7500, loss[loss=0.1315, simple_loss=0.2123, pruned_loss=0.02536, over 7424.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2852, pruned_loss=0.0605, over 1608101.25 frames. ], batch size: 17, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:48:34,154 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3516, 2.2789, 2.1554, 1.2615, 2.0785, 2.1203, 2.0994, 2.0415], + device='cuda:3'), covar=tensor([0.1022, 0.0865, 0.1037, 0.3598, 0.0935, 0.1244, 0.1391, 0.0961], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0454, 0.0442, 0.0556, 0.0441, 0.0456, 0.0436, 0.0397], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:48:50,715 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:48:56,223 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:09,713 INFO [train.py:901] (3/4) Epoch 24, batch 7550, loss[loss=0.2713, simple_loss=0.3465, pruned_loss=0.09812, over 8627.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2849, pruned_loss=0.06028, over 1605137.65 frames. ], batch size: 39, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:16,840 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193468.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:19,413 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193472.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:49:34,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193493.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:49:35,003 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1928, 4.0957, 3.7969, 1.8801, 3.7202, 3.8205, 3.7167, 3.6955], + device='cuda:3'), covar=tensor([0.0802, 0.0598, 0.0984, 0.4773, 0.0947, 0.1036, 0.1316, 0.0779], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0449, 0.0438, 0.0551, 0.0437, 0.0452, 0.0433, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:49:39,065 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.434e+02 2.474e+02 3.046e+02 3.751e+02 6.843e+02, threshold=6.092e+02, percent-clipped=3.0 +2023-02-07 07:49:45,274 INFO [train.py:901] (3/4) Epoch 24, batch 7600, loss[loss=0.2202, simple_loss=0.2923, pruned_loss=0.07401, over 8289.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.2856, pruned_loss=0.06051, over 1610862.43 frames. ], batch size: 23, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:49:51,993 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193518.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:16,721 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8259, 3.7515, 3.4506, 1.8553, 3.3886, 3.5096, 3.3688, 3.3838], + device='cuda:3'), covar=tensor([0.0912, 0.0676, 0.1068, 0.4710, 0.0966, 0.1085, 0.1451, 0.0962], + device='cuda:3'), in_proj_covar=tensor([0.0533, 0.0449, 0.0437, 0.0551, 0.0436, 0.0452, 0.0433, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:50:19,357 INFO [train.py:901] (3/4) Epoch 24, batch 7650, loss[loss=0.1959, simple_loss=0.2862, pruned_loss=0.05276, over 8458.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2866, pruned_loss=0.06105, over 1612944.79 frames. ], batch size: 25, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:50:24,438 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193564.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:50:27,915 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-07 07:50:30,315 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193573.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:50:30,442 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9537, 3.4315, 2.2775, 2.8235, 2.7460, 1.9900, 2.6294, 2.8579], + device='cuda:3'), covar=tensor([0.1856, 0.0413, 0.1209, 0.0802, 0.0750, 0.1570, 0.1283, 0.1305], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0235, 0.0337, 0.0308, 0.0301, 0.0342, 0.0347, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 07:50:41,159 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193589.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:50:48,584 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.626e+02 3.196e+02 4.372e+02 7.437e+02, threshold=6.392e+02, percent-clipped=4.0 +2023-02-07 07:50:53,957 INFO [train.py:901] (3/4) Epoch 24, batch 7700, loss[loss=0.1749, simple_loss=0.2474, pruned_loss=0.05124, over 7552.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2865, pruned_loss=0.06093, over 1613238.53 frames. ], batch size: 18, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:11,794 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:14,886 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 07:51:20,929 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193646.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:29,073 INFO [train.py:901] (3/4) Epoch 24, batch 7750, loss[loss=0.1996, simple_loss=0.2774, pruned_loss=0.06086, over 7961.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2871, pruned_loss=0.06137, over 1613452.43 frames. ], batch size: 21, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:51:50,186 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193688.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:51:58,097 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.447e+02 2.679e+02 3.147e+02 3.999e+02 8.742e+02, threshold=6.294e+02, percent-clipped=3.0 +2023-02-07 07:52:03,354 INFO [train.py:901] (3/4) Epoch 24, batch 7800, loss[loss=0.1978, simple_loss=0.2928, pruned_loss=0.05135, over 8025.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2857, pruned_loss=0.06055, over 1612492.97 frames. ], batch size: 22, lr: 3.12e-03, grad_scale: 16.0 +2023-02-07 07:52:05,695 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-07 07:52:07,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1547, 1.7945, 3.4854, 1.5926, 2.3793, 3.8180, 3.9751, 3.3235], + device='cuda:3'), covar=tensor([0.1116, 0.1657, 0.0327, 0.2073, 0.1119, 0.0240, 0.0613, 0.0539], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0322, 0.0287, 0.0316, 0.0315, 0.0272, 0.0429, 0.0302], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 07:52:37,270 INFO [train.py:901] (3/4) Epoch 24, batch 7850, loss[loss=0.2228, simple_loss=0.304, pruned_loss=0.07085, over 8511.00 frames. ], tot_loss[loss=0.2045, simple_loss=0.2865, pruned_loss=0.06124, over 1609736.37 frames. ], batch size: 26, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:52:39,505 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:41,748 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 07:52:48,222 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:52:54,180 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193783.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,050 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193799.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:53:05,510 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.471e+02 2.801e+02 3.652e+02 8.352e+02, threshold=5.603e+02, percent-clipped=2.0 +2023-02-07 07:53:05,882 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7261, 1.7391, 2.5619, 2.0430, 2.2813, 1.8183, 1.5619, 1.1427], + device='cuda:3'), covar=tensor([0.8337, 0.6720, 0.2411, 0.4561, 0.3816, 0.5087, 0.3579, 0.6439], + device='cuda:3'), in_proj_covar=tensor([0.0950, 0.1001, 0.0824, 0.0969, 0.1009, 0.0911, 0.0762, 0.0836], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 07:53:10,834 INFO [train.py:901] (3/4) Epoch 24, batch 7900, loss[loss=0.2261, simple_loss=0.2908, pruned_loss=0.08072, over 7527.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2867, pruned_loss=0.0612, over 1616631.52 frames. ], batch size: 18, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:53:16,270 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193816.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:53:30,971 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5105, 2.3070, 4.0129, 1.5491, 2.8277, 2.0554, 1.8772, 2.7359], + device='cuda:3'), covar=tensor([0.2639, 0.3003, 0.0905, 0.5468, 0.2219, 0.4026, 0.2822, 0.2831], + device='cuda:3'), in_proj_covar=tensor([0.0528, 0.0615, 0.0554, 0.0649, 0.0650, 0.0597, 0.0546, 0.0634], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:53:43,829 INFO [train.py:901] (3/4) Epoch 24, batch 7950, loss[loss=0.2074, simple_loss=0.3066, pruned_loss=0.05413, over 8304.00 frames. ], tot_loss[loss=0.2046, simple_loss=0.2871, pruned_loss=0.06106, over 1619945.75 frames. ], batch size: 23, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:53:48,055 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2475, 3.2101, 2.9319, 1.6984, 2.8723, 2.8881, 2.8773, 2.8479], + device='cuda:3'), covar=tensor([0.1131, 0.0816, 0.1286, 0.4329, 0.1163, 0.1642, 0.1507, 0.1095], + device='cuda:3'), in_proj_covar=tensor([0.0533, 0.0451, 0.0437, 0.0552, 0.0438, 0.0454, 0.0433, 0.0396], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 07:54:11,271 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193898.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:12,511 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.494e+02 3.061e+02 3.521e+02 6.741e+02, threshold=6.122e+02, percent-clipped=2.0 +2023-02-07 07:54:13,964 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=193902.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:17,826 INFO [train.py:901] (3/4) Epoch 24, batch 8000, loss[loss=0.195, simple_loss=0.2679, pruned_loss=0.06103, over 7426.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2867, pruned_loss=0.06063, over 1614335.05 frames. ], batch size: 17, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:33,398 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=193931.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:54:42,133 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=193944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:54:51,058 INFO [train.py:901] (3/4) Epoch 24, batch 8050, loss[loss=0.1661, simple_loss=0.2562, pruned_loss=0.03795, over 5966.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2839, pruned_loss=0.06, over 1591724.55 frames. ], batch size: 13, lr: 3.11e-03, grad_scale: 16.0 +2023-02-07 07:54:58,169 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=193969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:03,441 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=193977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:55:23,316 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 07:55:28,454 INFO [train.py:901] (3/4) Epoch 25, batch 0, loss[loss=0.2473, simple_loss=0.3091, pruned_loss=0.09273, over 8141.00 frames. ], tot_loss[loss=0.2473, simple_loss=0.3091, pruned_loss=0.09273, over 8141.00 frames. ], batch size: 22, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:55:28,454 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 07:55:39,670 INFO [train.py:935] (3/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2724, pruned_loss=0.03604, over 944034.00 frames. +2023-02-07 07:55:39,672 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 07:55:46,488 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.577e+02 3.086e+02 3.975e+02 9.885e+02, threshold=6.172e+02, percent-clipped=3.0 +2023-02-07 07:55:57,050 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 07:56:00,149 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:15,973 INFO [train.py:901] (3/4) Epoch 25, batch 50, loss[loss=0.2244, simple_loss=0.308, pruned_loss=0.07046, over 8106.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2808, pruned_loss=0.05579, over 365068.30 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:17,563 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:32,518 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 07:56:51,125 INFO [train.py:901] (3/4) Epoch 25, batch 100, loss[loss=0.2492, simple_loss=0.337, pruned_loss=0.0807, over 8281.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2852, pruned_loss=0.06028, over 640271.74 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:56:51,283 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194090.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:52,674 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:56:55,675 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 07:56:57,725 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.655e+02 3.251e+02 4.247e+02 7.218e+02, threshold=6.502e+02, percent-clipped=2.0 +2023-02-07 07:57:19,066 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 07:57:22,506 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 07:57:25,375 INFO [train.py:901] (3/4) Epoch 25, batch 150, loss[loss=0.1934, simple_loss=0.2822, pruned_loss=0.05236, over 8293.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2832, pruned_loss=0.05917, over 855942.28 frames. ], batch size: 23, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:57:35,089 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194154.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:52,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194179.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:57:58,884 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194187.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 07:58:00,655 INFO [train.py:901] (3/4) Epoch 25, batch 200, loss[loss=0.1699, simple_loss=0.2425, pruned_loss=0.04861, over 6794.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2838, pruned_loss=0.05928, over 1020252.45 frames. ], batch size: 15, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:07,396 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.343e+02 2.842e+02 3.543e+02 5.999e+02, threshold=5.685e+02, percent-clipped=0.0 +2023-02-07 07:58:16,692 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194212.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 07:58:35,372 INFO [train.py:901] (3/4) Epoch 25, batch 250, loss[loss=0.1622, simple_loss=0.2541, pruned_loss=0.03509, over 7964.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2844, pruned_loss=0.05963, over 1153430.35 frames. ], batch size: 21, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:58:39,449 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194246.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:58:49,482 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 07:58:58,163 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 07:59:08,685 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-07 07:59:09,586 INFO [train.py:901] (3/4) Epoch 25, batch 300, loss[loss=0.2221, simple_loss=0.3094, pruned_loss=0.06741, over 8438.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.05881, over 1257804.65 frames. ], batch size: 49, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:17,104 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.595e+02 2.353e+02 2.857e+02 3.504e+02 7.851e+02, threshold=5.715e+02, percent-clipped=2.0 +2023-02-07 07:59:20,167 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7971, 1.3972, 3.4294, 1.5654, 2.5322, 3.6718, 3.7961, 3.1588], + device='cuda:3'), covar=tensor([0.1304, 0.1864, 0.0290, 0.2045, 0.0866, 0.0237, 0.0584, 0.0539], + device='cuda:3'), in_proj_covar=tensor([0.0298, 0.0322, 0.0287, 0.0316, 0.0316, 0.0273, 0.0429, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 07:59:43,445 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 07:59:45,900 INFO [train.py:901] (3/4) Epoch 25, batch 350, loss[loss=0.2254, simple_loss=0.306, pruned_loss=0.07242, over 7927.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2853, pruned_loss=0.05912, over 1340854.27 frames. ], batch size: 20, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 07:59:51,529 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:00,427 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194361.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:08,062 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194371.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:09,455 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:14,173 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194380.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:00:20,655 INFO [train.py:901] (3/4) Epoch 25, batch 400, loss[loss=0.1886, simple_loss=0.2707, pruned_loss=0.05327, over 7801.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2859, pruned_loss=0.05989, over 1401370.25 frames. ], batch size: 20, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:00:27,619 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.445e+02 3.013e+02 3.982e+02 8.525e+02, threshold=6.027e+02, percent-clipped=7.0 +2023-02-07 08:00:29,489 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.45 vs. limit=2.0 +2023-02-07 08:00:52,194 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194434.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:00:56,959 INFO [train.py:901] (3/4) Epoch 25, batch 450, loss[loss=0.193, simple_loss=0.2812, pruned_loss=0.05241, over 8444.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05977, over 1446515.95 frames. ], batch size: 25, lr: 3.05e-03, grad_scale: 16.0 +2023-02-07 08:01:30,915 INFO [train.py:901] (3/4) Epoch 25, batch 500, loss[loss=0.2424, simple_loss=0.3207, pruned_loss=0.08211, over 8689.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.286, pruned_loss=0.06025, over 1486849.89 frames. ], batch size: 34, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:01:37,840 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.459e+02 3.156e+02 4.025e+02 7.800e+02, threshold=6.312e+02, percent-clipped=3.0 +2023-02-07 08:02:06,149 INFO [train.py:901] (3/4) Epoch 25, batch 550, loss[loss=0.1834, simple_loss=0.2759, pruned_loss=0.04549, over 8327.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2849, pruned_loss=0.05942, over 1514495.81 frames. ], batch size: 25, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:13,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194549.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:02:30,143 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2709, 2.4915, 2.7376, 1.6652, 2.9476, 1.7354, 1.6576, 2.2254], + device='cuda:3'), covar=tensor([0.0751, 0.0419, 0.0327, 0.0737, 0.0485, 0.0923, 0.0862, 0.0503], + device='cuda:3'), in_proj_covar=tensor([0.0459, 0.0401, 0.0355, 0.0450, 0.0387, 0.0537, 0.0398, 0.0431], + device='cuda:3'), out_proj_covar=tensor([1.2215e-04, 1.0461e-04, 9.3020e-05, 1.1807e-04, 1.0144e-04, 1.5059e-04, + 1.0673e-04, 1.1351e-04], device='cuda:3') +2023-02-07 08:02:42,129 INFO [train.py:901] (3/4) Epoch 25, batch 600, loss[loss=0.222, simple_loss=0.2903, pruned_loss=0.07689, over 7095.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.05931, over 1537019.48 frames. ], batch size: 72, lr: 3.04e-03, grad_scale: 16.0 +2023-02-07 08:02:48,738 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.361e+02 2.970e+02 3.663e+02 1.001e+03, threshold=5.941e+02, percent-clipped=3.0 +2023-02-07 08:03:01,136 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 08:03:01,340 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:10,866 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0725, 1.3927, 1.7152, 1.4078, 1.0345, 1.4931, 1.8987, 1.6922], + device='cuda:3'), covar=tensor([0.0517, 0.1349, 0.1676, 0.1472, 0.0618, 0.1584, 0.0691, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:03:16,782 INFO [train.py:901] (3/4) Epoch 25, batch 650, loss[loss=0.1852, simple_loss=0.2771, pruned_loss=0.04664, over 8360.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2836, pruned_loss=0.05901, over 1553532.67 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:03:18,074 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:23,750 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4859, 2.2874, 2.8627, 2.5076, 2.8610, 2.4733, 2.3559, 2.1383], + device='cuda:3'), covar=tensor([0.3679, 0.3644, 0.1590, 0.2724, 0.1724, 0.2348, 0.1434, 0.3505], + device='cuda:3'), in_proj_covar=tensor([0.0942, 0.0995, 0.0814, 0.0961, 0.1001, 0.0904, 0.0754, 0.0830], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:03:25,638 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194652.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:45,808 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194680.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:03:52,562 INFO [train.py:901] (3/4) Epoch 25, batch 700, loss[loss=0.1932, simple_loss=0.2795, pruned_loss=0.05344, over 8112.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2845, pruned_loss=0.05913, over 1570411.70 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:00,038 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.448e+02 2.849e+02 3.638e+02 5.412e+02, threshold=5.698e+02, percent-clipped=0.0 +2023-02-07 08:04:09,652 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194715.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:04:16,431 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194724.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:04:27,405 INFO [train.py:901] (3/4) Epoch 25, batch 750, loss[loss=0.2111, simple_loss=0.2922, pruned_loss=0.06497, over 8282.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2846, pruned_loss=0.05928, over 1580083.11 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:04:49,459 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 08:04:58,544 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 08:05:03,377 INFO [train.py:901] (3/4) Epoch 25, batch 800, loss[loss=0.2021, simple_loss=0.3002, pruned_loss=0.05205, over 8105.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2851, pruned_loss=0.05958, over 1587988.27 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:07,606 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194795.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:11,591 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.325e+02 2.417e+02 2.991e+02 3.771e+02 6.788e+02, threshold=5.982e+02, percent-clipped=2.0 +2023-02-07 08:05:14,543 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=194805.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,800 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:31,829 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=194830.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:05:37,857 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=194839.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:05:38,297 INFO [train.py:901] (3/4) Epoch 25, batch 850, loss[loss=0.2169, simple_loss=0.2954, pruned_loss=0.06919, over 8180.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2856, pruned_loss=0.06014, over 1590782.78 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:05:49,042 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7610, 2.6533, 1.9986, 2.5045, 2.3015, 1.6735, 2.1990, 2.3087], + device='cuda:3'), covar=tensor([0.1479, 0.0441, 0.1179, 0.0625, 0.0835, 0.1649, 0.1107, 0.1069], + device='cuda:3'), in_proj_covar=tensor([0.0353, 0.0234, 0.0335, 0.0307, 0.0298, 0.0339, 0.0344, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:05:56,613 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=194865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:06:14,016 INFO [train.py:901] (3/4) Epoch 25, batch 900, loss[loss=0.191, simple_loss=0.2858, pruned_loss=0.04814, over 8464.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2849, pruned_loss=0.06001, over 1589640.56 frames. ], batch size: 27, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:06:22,179 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.549e+02 2.491e+02 2.923e+02 3.701e+02 8.623e+02, threshold=5.846e+02, percent-clipped=3.0 +2023-02-07 08:06:49,618 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-07 08:06:49,724 INFO [train.py:901] (3/4) Epoch 25, batch 950, loss[loss=0.2063, simple_loss=0.2929, pruned_loss=0.05984, over 8292.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2845, pruned_loss=0.05978, over 1596253.63 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:02,589 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7096, 1.7951, 1.9630, 1.9247, 1.2333, 1.7781, 2.4222, 2.2586], + device='cuda:3'), covar=tensor([0.0493, 0.1201, 0.1640, 0.1312, 0.0558, 0.1409, 0.0560, 0.0570], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0191, 0.0160, 0.0101, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:07:19,538 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 08:07:24,291 INFO [train.py:901] (3/4) Epoch 25, batch 1000, loss[loss=0.2164, simple_loss=0.3022, pruned_loss=0.06528, over 8759.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2844, pruned_loss=0.05976, over 1600291.69 frames. ], batch size: 30, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:07:29,064 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=194996.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:32,166 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 2.651e+02 3.101e+02 3.894e+02 6.477e+02, threshold=6.202e+02, percent-clipped=4.0 +2023-02-07 08:07:51,786 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195029.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:07:54,473 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 08:07:59,733 INFO [train.py:901] (3/4) Epoch 25, batch 1050, loss[loss=0.1892, simple_loss=0.2807, pruned_loss=0.04883, over 8242.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06084, over 1608161.65 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:06,394 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 08:08:07,162 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195051.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:14,490 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195062.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:08:23,833 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195076.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:23,887 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9451, 1.7170, 2.0448, 1.8572, 1.9982, 1.9778, 1.7992, 0.8681], + device='cuda:3'), covar=tensor([0.5534, 0.4369, 0.2036, 0.3387, 0.2222, 0.3098, 0.2030, 0.4703], + device='cuda:3'), in_proj_covar=tensor([0.0950, 0.1003, 0.0824, 0.0968, 0.1009, 0.0914, 0.0761, 0.0836], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:08:31,308 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195086.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,316 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:33,850 INFO [train.py:901] (3/4) Epoch 25, batch 1100, loss[loss=0.1602, simple_loss=0.2339, pruned_loss=0.04323, over 7646.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.286, pruned_loss=0.0609, over 1610486.67 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:08:36,611 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8732, 1.4713, 3.5001, 1.4737, 2.4276, 3.8007, 3.9229, 3.2786], + device='cuda:3'), covar=tensor([0.1223, 0.1843, 0.0307, 0.2110, 0.1008, 0.0241, 0.0561, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0300, 0.0324, 0.0288, 0.0317, 0.0316, 0.0275, 0.0430, 0.0304], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 08:08:37,387 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195095.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:08:41,229 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.550e+02 3.152e+02 4.111e+02 6.650e+02, threshold=6.304e+02, percent-clipped=3.0 +2023-02-07 08:08:48,180 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:48,202 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195111.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:08:53,479 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9089, 3.8774, 3.5129, 2.0384, 3.4447, 3.5263, 3.5150, 3.3789], + device='cuda:3'), covar=tensor([0.0883, 0.0645, 0.1110, 0.4146, 0.1068, 0.1255, 0.1294, 0.0957], + device='cuda:3'), in_proj_covar=tensor([0.0531, 0.0450, 0.0432, 0.0545, 0.0434, 0.0452, 0.0426, 0.0396], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:08:54,917 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195120.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:09:09,221 INFO [train.py:901] (3/4) Epoch 25, batch 1150, loss[loss=0.2549, simple_loss=0.3374, pruned_loss=0.08616, over 8483.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2853, pruned_loss=0.06042, over 1609383.51 frames. ], batch size: 29, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:16,851 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 08:09:43,696 INFO [train.py:901] (3/4) Epoch 25, batch 1200, loss[loss=0.18, simple_loss=0.271, pruned_loss=0.0445, over 8034.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2863, pruned_loss=0.06059, over 1614880.21 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:09:44,707 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 08:09:51,815 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.229e+02 2.843e+02 3.492e+02 1.399e+03, threshold=5.685e+02, percent-clipped=2.0 +2023-02-07 08:09:57,121 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:10:18,384 INFO [train.py:901] (3/4) Epoch 25, batch 1250, loss[loss=0.1666, simple_loss=0.2535, pruned_loss=0.03983, over 8086.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2862, pruned_loss=0.06095, over 1615127.70 frames. ], batch size: 21, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:10:34,136 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-07 08:10:53,103 INFO [train.py:901] (3/4) Epoch 25, batch 1300, loss[loss=0.1899, simple_loss=0.287, pruned_loss=0.04638, over 8030.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2858, pruned_loss=0.06077, over 1613352.42 frames. ], batch size: 22, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:00,279 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.452e+02 2.850e+02 4.025e+02 1.071e+03, threshold=5.700e+02, percent-clipped=7.0 +2023-02-07 08:11:16,286 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:26,834 INFO [train.py:901] (3/4) Epoch 25, batch 1350, loss[loss=0.2694, simple_loss=0.3557, pruned_loss=0.09157, over 8187.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2863, pruned_loss=0.06109, over 1613402.25 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:11:45,671 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:50,113 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:11:57,384 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-07 08:12:02,400 INFO [train.py:901] (3/4) Epoch 25, batch 1400, loss[loss=0.1881, simple_loss=0.2698, pruned_loss=0.05316, over 7650.00 frames. ], tot_loss[loss=0.2043, simple_loss=0.2867, pruned_loss=0.06096, over 1616819.97 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:03,997 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195392.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:10,479 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.570e+02 2.915e+02 3.833e+02 8.465e+02, threshold=5.831e+02, percent-clipped=6.0 +2023-02-07 08:12:13,227 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195406.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:12:15,768 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=195410.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:31,395 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:12:36,118 INFO [train.py:901] (3/4) Epoch 25, batch 1450, loss[loss=0.1702, simple_loss=0.2516, pruned_loss=0.04443, over 7810.00 frames. ], tot_loss[loss=0.206, simple_loss=0.2882, pruned_loss=0.06196, over 1617204.38 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:12:36,363 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6792, 1.6418, 2.2517, 1.4687, 1.3452, 2.2681, 0.5001, 1.4737], + device='cuda:3'), covar=tensor([0.1901, 0.1216, 0.0386, 0.1180, 0.2273, 0.0362, 0.1930, 0.1280], + device='cuda:3'), in_proj_covar=tensor([0.0195, 0.0201, 0.0130, 0.0221, 0.0273, 0.0139, 0.0171, 0.0196], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:12:44,148 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 08:13:10,181 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195488.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:13:11,407 INFO [train.py:901] (3/4) Epoch 25, batch 1500, loss[loss=0.1805, simple_loss=0.2655, pruned_loss=0.04781, over 8688.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2875, pruned_loss=0.06106, over 1617406.31 frames. ], batch size: 34, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:19,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.401e+02 3.375e+02 4.255e+02 1.024e+03, threshold=6.749e+02, percent-clipped=12.0 +2023-02-07 08:13:33,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195521.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:13:46,247 INFO [train.py:901] (3/4) Epoch 25, batch 1550, loss[loss=0.1976, simple_loss=0.2909, pruned_loss=0.05218, over 8254.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2862, pruned_loss=0.06008, over 1617633.90 frames. ], batch size: 24, lr: 3.04e-03, grad_scale: 4.0 +2023-02-07 08:13:51,916 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:14,119 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:21,360 INFO [train.py:901] (3/4) Epoch 25, batch 1600, loss[loss=0.1987, simple_loss=0.2857, pruned_loss=0.05587, over 8497.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.287, pruned_loss=0.06006, over 1622883.47 frames. ], batch size: 28, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:14:29,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.317e+02 3.105e+02 3.813e+02 7.132e+02, threshold=6.211e+02, percent-clipped=3.0 +2023-02-07 08:14:32,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:14:37,017 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1195, 2.0197, 2.6098, 2.1342, 2.6243, 2.2297, 2.0598, 1.5487], + device='cuda:3'), covar=tensor([0.5967, 0.5298, 0.2123, 0.4324, 0.2879, 0.3138, 0.2153, 0.5681], + device='cuda:3'), in_proj_covar=tensor([0.0954, 0.1009, 0.0826, 0.0977, 0.1019, 0.0919, 0.0765, 0.0842], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:14:55,975 INFO [train.py:901] (3/4) Epoch 25, batch 1650, loss[loss=0.2177, simple_loss=0.2914, pruned_loss=0.07201, over 7063.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.2862, pruned_loss=0.06009, over 1615488.11 frames. ], batch size: 71, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:29,744 INFO [train.py:901] (3/4) Epoch 25, batch 1700, loss[loss=0.21, simple_loss=0.2775, pruned_loss=0.07127, over 7664.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2855, pruned_loss=0.05968, over 1616854.08 frames. ], batch size: 19, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:15:38,030 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.590e+02 3.116e+02 3.996e+02 7.880e+02, threshold=6.232e+02, percent-clipped=2.0 +2023-02-07 08:16:05,339 INFO [train.py:901] (3/4) Epoch 25, batch 1750, loss[loss=0.2205, simple_loss=0.3073, pruned_loss=0.0668, over 8200.00 frames. ], tot_loss[loss=0.2032, simple_loss=0.286, pruned_loss=0.06026, over 1616652.48 frames. ], batch size: 23, lr: 3.04e-03, grad_scale: 8.0 +2023-02-07 08:16:06,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1678, 2.4064, 1.9945, 2.9936, 1.4866, 1.8196, 2.2196, 2.3431], + device='cuda:3'), covar=tensor([0.0606, 0.0725, 0.0827, 0.0303, 0.1001, 0.1147, 0.0713, 0.0773], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0213, 0.0205, 0.0245, 0.0249, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 08:16:09,112 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195744.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:15,704 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=195754.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:17,276 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2385, 2.0174, 2.6215, 2.1033, 2.6571, 2.3072, 2.0839, 1.4801], + device='cuda:3'), covar=tensor([0.5607, 0.4943, 0.1983, 0.3859, 0.2469, 0.3009, 0.1920, 0.5303], + device='cuda:3'), in_proj_covar=tensor([0.0953, 0.1008, 0.0825, 0.0976, 0.1016, 0.0917, 0.0764, 0.0841], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:16:26,012 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195769.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:16:28,849 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0703, 1.2772, 1.2161, 0.6946, 1.2331, 1.0595, 0.0841, 1.2170], + device='cuda:3'), covar=tensor([0.0470, 0.0424, 0.0380, 0.0626, 0.0443, 0.1068, 0.0979, 0.0343], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0406, 0.0362, 0.0458, 0.0390, 0.0546, 0.0403, 0.0435], + device='cuda:3'), out_proj_covar=tensor([1.2384e-04, 1.0602e-04, 9.4739e-05, 1.2015e-04, 1.0234e-04, 1.5297e-04, + 1.0806e-04, 1.1439e-04], device='cuda:3') +2023-02-07 08:16:31,518 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195777.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:16:40,107 INFO [train.py:901] (3/4) Epoch 25, batch 1800, loss[loss=0.1873, simple_loss=0.2733, pruned_loss=0.05068, over 7975.00 frames. ], tot_loss[loss=0.204, simple_loss=0.2866, pruned_loss=0.06074, over 1616732.35 frames. ], batch size: 21, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:16:48,983 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.456e+02 2.857e+02 3.484e+02 7.816e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-07 08:16:49,212 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:16:50,554 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=195804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:07,907 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=195829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:15,240 INFO [train.py:901] (3/4) Epoch 25, batch 1850, loss[loss=0.2021, simple_loss=0.2862, pruned_loss=0.05896, over 8253.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2861, pruned_loss=0.05983, over 1619352.19 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:36,413 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=195869.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:17:50,154 INFO [train.py:901] (3/4) Epoch 25, batch 1900, loss[loss=0.1745, simple_loss=0.2674, pruned_loss=0.04073, over 8138.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2853, pruned_loss=0.05965, over 1615068.47 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:17:58,370 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.976e+02 2.686e+02 3.045e+02 3.689e+02 8.196e+02, threshold=6.090e+02, percent-clipped=3.0 +2023-02-07 08:18:24,458 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 08:18:25,117 INFO [train.py:901] (3/4) Epoch 25, batch 1950, loss[loss=0.1883, simple_loss=0.2676, pruned_loss=0.05452, over 8134.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2847, pruned_loss=0.0587, over 1618003.36 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:18:37,846 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 08:18:56,450 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.95 vs. limit=2.0 +2023-02-07 08:18:57,275 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 08:18:58,139 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6216, 2.4902, 1.7838, 2.3610, 2.2035, 1.5570, 2.1236, 2.2218], + device='cuda:3'), covar=tensor([0.1520, 0.0431, 0.1280, 0.0634, 0.0743, 0.1622, 0.1113, 0.0929], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0237, 0.0341, 0.0312, 0.0302, 0.0345, 0.0349, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:19:00,585 INFO [train.py:901] (3/4) Epoch 25, batch 2000, loss[loss=0.2045, simple_loss=0.2707, pruned_loss=0.0692, over 7686.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2852, pruned_loss=0.05893, over 1620769.77 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:19:09,742 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 2.344e+02 2.823e+02 3.287e+02 7.423e+02, threshold=5.646e+02, percent-clipped=4.0 +2023-02-07 08:19:36,107 INFO [train.py:901] (3/4) Epoch 25, batch 2050, loss[loss=0.1659, simple_loss=0.2398, pruned_loss=0.04604, over 7523.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05887, over 1616615.32 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:19:55,113 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 08:19:56,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4346, 1.5731, 2.1603, 1.3458, 1.4942, 1.7041, 1.4463, 1.5042], + device='cuda:3'), covar=tensor([0.1984, 0.2667, 0.0882, 0.4769, 0.2134, 0.3571, 0.2508, 0.2284], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0624, 0.0557, 0.0660, 0.0656, 0.0603, 0.0551, 0.0640], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:20:11,106 INFO [train.py:901] (3/4) Epoch 25, batch 2100, loss[loss=0.1802, simple_loss=0.2631, pruned_loss=0.04869, over 7655.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2844, pruned_loss=0.059, over 1615949.27 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:20,386 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.403e+02 2.946e+02 3.659e+02 8.101e+02, threshold=5.892e+02, percent-clipped=3.0 +2023-02-07 08:20:35,890 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=196125.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:20:46,048 INFO [train.py:901] (3/4) Epoch 25, batch 2150, loss[loss=0.1856, simple_loss=0.2598, pruned_loss=0.05576, over 7417.00 frames. ], tot_loss[loss=0.202, simple_loss=0.285, pruned_loss=0.05945, over 1614199.11 frames. ], batch size: 17, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:20:54,033 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=196150.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:21:22,082 INFO [train.py:901] (3/4) Epoch 25, batch 2200, loss[loss=0.1719, simple_loss=0.268, pruned_loss=0.03791, over 8358.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2848, pruned_loss=0.05971, over 1615740.37 frames. ], batch size: 24, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:21:30,646 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.555e+02 3.213e+02 4.289e+02 6.887e+02, threshold=6.426e+02, percent-clipped=5.0 +2023-02-07 08:21:37,557 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8582, 5.9972, 5.1801, 2.8007, 5.2756, 5.6252, 5.4541, 5.4553], + device='cuda:3'), covar=tensor([0.0564, 0.0376, 0.1027, 0.4090, 0.0811, 0.0708, 0.1172, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0449, 0.0435, 0.0546, 0.0433, 0.0452, 0.0427, 0.0396], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:21:56,957 INFO [train.py:901] (3/4) Epoch 25, batch 2250, loss[loss=0.2207, simple_loss=0.2941, pruned_loss=0.07364, over 8140.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05943, over 1614899.20 frames. ], batch size: 22, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:07,582 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9233, 1.2666, 1.5932, 1.1689, 0.9793, 1.3648, 1.7420, 1.6937], + device='cuda:3'), covar=tensor([0.0583, 0.1784, 0.2493, 0.1985, 0.0689, 0.2091, 0.0788, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0190, 0.0160, 0.0100, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:22:12,519 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6221, 2.4294, 1.7827, 2.2582, 2.1681, 1.6403, 2.0791, 2.0668], + device='cuda:3'), covar=tensor([0.1335, 0.0438, 0.1319, 0.0598, 0.0700, 0.1551, 0.0968, 0.1010], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0238, 0.0341, 0.0313, 0.0303, 0.0345, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:22:32,049 INFO [train.py:901] (3/4) Epoch 25, batch 2300, loss[loss=0.2014, simple_loss=0.2913, pruned_loss=0.05572, over 8463.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05854, over 1612118.12 frames. ], batch size: 29, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:22:40,946 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.794e+02 3.530e+02 9.865e+02, threshold=5.587e+02, percent-clipped=2.0 +2023-02-07 08:23:04,030 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9971, 1.0135, 0.9637, 1.2059, 0.6122, 0.9100, 0.9585, 1.0361], + device='cuda:3'), covar=tensor([0.0599, 0.0552, 0.0695, 0.0438, 0.0795, 0.0913, 0.0514, 0.0521], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0194, 0.0242, 0.0210, 0.0203, 0.0243, 0.0247, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 08:23:07,221 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:07,751 INFO [train.py:901] (3/4) Epoch 25, batch 2350, loss[loss=0.2314, simple_loss=0.3169, pruned_loss=0.07291, over 8522.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2833, pruned_loss=0.05909, over 1606760.97 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:34,290 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:23:42,081 INFO [train.py:901] (3/4) Epoch 25, batch 2400, loss[loss=0.175, simple_loss=0.2689, pruned_loss=0.04058, over 8096.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2822, pruned_loss=0.05844, over 1609851.21 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:23:50,280 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.420e+02 2.902e+02 3.432e+02 7.434e+02, threshold=5.805e+02, percent-clipped=2.0 +2023-02-07 08:24:17,356 INFO [train.py:901] (3/4) Epoch 25, batch 2450, loss[loss=0.1568, simple_loss=0.2384, pruned_loss=0.03761, over 7958.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2829, pruned_loss=0.05854, over 1612223.92 frames. ], batch size: 21, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:24:51,934 INFO [train.py:901] (3/4) Epoch 25, batch 2500, loss[loss=0.2908, simple_loss=0.3703, pruned_loss=0.1057, over 8746.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2844, pruned_loss=0.05904, over 1614952.05 frames. ], batch size: 30, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:00,802 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.834e+02 2.398e+02 2.858e+02 3.242e+02 5.404e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 08:25:18,288 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1270, 1.3190, 1.5836, 1.2510, 0.7935, 1.3795, 1.0707, 0.9159], + device='cuda:3'), covar=tensor([0.0609, 0.1291, 0.1648, 0.1515, 0.0566, 0.1535, 0.0758, 0.0768], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0101, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:25:27,002 INFO [train.py:901] (3/4) Epoch 25, batch 2550, loss[loss=0.2241, simple_loss=0.3011, pruned_loss=0.07351, over 8342.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2847, pruned_loss=0.05911, over 1614895.48 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:25:54,370 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3158, 2.0886, 2.6558, 2.2647, 2.6392, 2.3477, 2.1812, 1.6311], + device='cuda:3'), covar=tensor([0.5731, 0.5161, 0.2238, 0.4185, 0.2666, 0.3405, 0.1983, 0.5576], + device='cuda:3'), in_proj_covar=tensor([0.0952, 0.1004, 0.0824, 0.0974, 0.1014, 0.0915, 0.0761, 0.0837], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:26:02,150 INFO [train.py:901] (3/4) Epoch 25, batch 2600, loss[loss=0.2023, simple_loss=0.2882, pruned_loss=0.05817, over 8344.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.0591, over 1615856.21 frames. ], batch size: 26, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:26:06,425 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196596.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:26:07,752 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6130, 5.7218, 4.9959, 2.6510, 5.0705, 5.4191, 5.2825, 5.2059], + device='cuda:3'), covar=tensor([0.0502, 0.0385, 0.0961, 0.3869, 0.0751, 0.0775, 0.1035, 0.0622], + device='cuda:3'), in_proj_covar=tensor([0.0530, 0.0447, 0.0433, 0.0542, 0.0434, 0.0450, 0.0424, 0.0396], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:26:10,242 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.366e+02 2.911e+02 3.287e+02 8.101e+02, threshold=5.822e+02, percent-clipped=1.0 +2023-02-07 08:26:13,505 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-07 08:26:37,069 INFO [train.py:901] (3/4) Epoch 25, batch 2650, loss[loss=0.2055, simple_loss=0.295, pruned_loss=0.05797, over 8660.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2849, pruned_loss=0.05919, over 1616639.56 frames. ], batch size: 34, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:08,210 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196683.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:12,709 INFO [train.py:901] (3/4) Epoch 25, batch 2700, loss[loss=0.1855, simple_loss=0.2512, pruned_loss=0.05995, over 7545.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2849, pruned_loss=0.05968, over 1612399.96 frames. ], batch size: 18, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:20,578 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.452e+02 2.909e+02 3.648e+02 8.771e+02, threshold=5.818e+02, percent-clipped=3.0 +2023-02-07 08:27:33,980 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196722.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:27:46,991 INFO [train.py:901] (3/4) Epoch 25, batch 2750, loss[loss=0.1806, simple_loss=0.2627, pruned_loss=0.04925, over 5159.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2854, pruned_loss=0.05991, over 1612316.48 frames. ], batch size: 11, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:27:55,343 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3898, 2.5895, 3.0553, 1.7441, 3.1697, 1.8855, 1.6407, 2.2959], + device='cuda:3'), covar=tensor([0.0801, 0.0499, 0.0322, 0.0858, 0.0599, 0.0962, 0.0945, 0.0604], + device='cuda:3'), in_proj_covar=tensor([0.0465, 0.0404, 0.0360, 0.0456, 0.0388, 0.0543, 0.0402, 0.0432], + device='cuda:3'), out_proj_covar=tensor([1.2356e-04, 1.0535e-04, 9.4351e-05, 1.1950e-04, 1.0162e-04, 1.5227e-04, + 1.0762e-04, 1.1362e-04], device='cuda:3') +2023-02-07 08:28:11,176 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.82 vs. limit=5.0 +2023-02-07 08:28:22,167 INFO [train.py:901] (3/4) Epoch 25, batch 2800, loss[loss=0.1722, simple_loss=0.2545, pruned_loss=0.04495, over 8056.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.05993, over 1616015.61 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:28:27,860 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196797.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:28,544 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196798.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:31,148 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.445e+02 2.946e+02 3.604e+02 6.151e+02, threshold=5.892e+02, percent-clipped=2.0 +2023-02-07 08:28:52,423 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-07 08:28:54,960 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=196837.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:28:56,676 INFO [train.py:901] (3/4) Epoch 25, batch 2850, loss[loss=0.2438, simple_loss=0.3184, pruned_loss=0.08461, over 8185.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2863, pruned_loss=0.05996, over 1618850.52 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:19,399 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:32,387 INFO [train.py:901] (3/4) Epoch 25, batch 2900, loss[loss=0.207, simple_loss=0.2814, pruned_loss=0.06629, over 7656.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2856, pruned_loss=0.05937, over 1618159.77 frames. ], batch size: 19, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:29:39,455 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=196899.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:29:41,309 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.504e+02 3.053e+02 3.742e+02 6.617e+02, threshold=6.106e+02, percent-clipped=2.0 +2023-02-07 08:30:08,124 INFO [train.py:901] (3/4) Epoch 25, batch 2950, loss[loss=0.2115, simple_loss=0.3071, pruned_loss=0.05789, over 8536.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2846, pruned_loss=0.05869, over 1617448.15 frames. ], batch size: 28, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:08,202 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=196940.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:30:08,828 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 08:30:19,844 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=2.01 vs. limit=2.0 +2023-02-07 08:30:42,487 INFO [train.py:901] (3/4) Epoch 25, batch 3000, loss[loss=0.1907, simple_loss=0.2644, pruned_loss=0.05848, over 7926.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2853, pruned_loss=0.05928, over 1617236.59 frames. ], batch size: 20, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:30:42,487 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 08:30:55,640 INFO [train.py:935] (3/4) Epoch 25, validation: loss=0.1722, simple_loss=0.2721, pruned_loss=0.03618, over 944034.00 frames. +2023-02-07 08:30:55,642 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 08:31:03,953 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.269e+02 2.477e+02 2.955e+02 3.925e+02 7.788e+02, threshold=5.910e+02, percent-clipped=1.0 +2023-02-07 08:31:30,700 INFO [train.py:901] (3/4) Epoch 25, batch 3050, loss[loss=0.1827, simple_loss=0.2754, pruned_loss=0.045, over 8293.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2857, pruned_loss=0.05957, over 1619554.31 frames. ], batch size: 23, lr: 3.03e-03, grad_scale: 8.0 +2023-02-07 08:31:40,541 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197054.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:41,167 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197055.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:31:57,870 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197079.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:05,206 INFO [train.py:901] (3/4) Epoch 25, batch 3100, loss[loss=0.1822, simple_loss=0.2672, pruned_loss=0.04862, over 7649.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2854, pruned_loss=0.05935, over 1620954.34 frames. ], batch size: 19, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:07,488 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197093.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:13,236 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.503e+02 2.425e+02 3.089e+02 3.818e+02 7.102e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-07 08:32:24,976 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:40,194 INFO [train.py:901] (3/4) Epoch 25, batch 3150, loss[loss=0.2017, simple_loss=0.297, pruned_loss=0.05314, over 8324.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2851, pruned_loss=0.05932, over 1617538.60 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:32:40,983 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:32:42,603 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-07 08:33:15,303 INFO [train.py:901] (3/4) Epoch 25, batch 3200, loss[loss=0.2103, simple_loss=0.2976, pruned_loss=0.06154, over 8187.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2859, pruned_loss=0.05957, over 1618984.52 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:23,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.400e+02 2.739e+02 3.315e+02 1.024e+03, threshold=5.479e+02, percent-clipped=5.0 +2023-02-07 08:33:33,160 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:33:50,045 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-07 08:33:50,304 INFO [train.py:901] (3/4) Epoch 25, batch 3250, loss[loss=0.2337, simple_loss=0.3175, pruned_loss=0.07494, over 8757.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2856, pruned_loss=0.05938, over 1615570.19 frames. ], batch size: 30, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:33:52,476 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:02,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197256.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:24,835 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4037, 1.1125, 2.5034, 0.9687, 2.2354, 2.1026, 2.2916, 2.2289], + device='cuda:3'), covar=tensor([0.0838, 0.3195, 0.0964, 0.3742, 0.1116, 0.0960, 0.0699, 0.0798], + device='cuda:3'), in_proj_covar=tensor([0.0649, 0.0653, 0.0711, 0.0643, 0.0724, 0.0615, 0.0620, 0.0693], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:34:25,380 INFO [train.py:901] (3/4) Epoch 25, batch 3300, loss[loss=0.2832, simple_loss=0.3396, pruned_loss=0.1134, over 7009.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2851, pruned_loss=0.05932, over 1612944.66 frames. ], batch size: 71, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:34:34,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.560e+02 3.230e+02 4.212e+02 8.703e+02, threshold=6.460e+02, percent-clipped=10.0 +2023-02-07 08:34:40,504 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:54,231 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197331.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:34:57,730 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197336.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:00,243 INFO [train.py:901] (3/4) Epoch 25, batch 3350, loss[loss=0.1607, simple_loss=0.2417, pruned_loss=0.03985, over 7925.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05892, over 1612355.05 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 8.0 +2023-02-07 08:35:13,336 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197358.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:35:17,449 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2263, 1.1026, 1.2707, 0.9949, 1.0389, 1.3109, 0.0507, 0.8881], + device='cuda:3'), covar=tensor([0.1545, 0.1269, 0.0519, 0.0786, 0.2443, 0.0573, 0.2133, 0.1253], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0200, 0.0130, 0.0220, 0.0271, 0.0139, 0.0170, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:35:36,172 INFO [train.py:901] (3/4) Epoch 25, batch 3400, loss[loss=0.1902, simple_loss=0.2571, pruned_loss=0.06162, over 7728.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.05905, over 1615528.69 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:35:39,765 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3853, 1.4058, 1.8088, 1.1061, 1.0446, 1.8178, 0.1574, 1.0577], + device='cuda:3'), covar=tensor([0.1615, 0.1234, 0.0397, 0.1164, 0.2600, 0.0378, 0.1954, 0.1344], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0199, 0.0130, 0.0220, 0.0271, 0.0138, 0.0170, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:35:44,272 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.558e+02 2.489e+02 3.044e+02 3.734e+02 7.163e+02, threshold=6.087e+02, percent-clipped=2.0 +2023-02-07 08:36:06,305 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197433.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:36:11,059 INFO [train.py:901] (3/4) Epoch 25, batch 3450, loss[loss=0.2255, simple_loss=0.3127, pruned_loss=0.06909, over 8708.00 frames. ], tot_loss[loss=0.2027, simple_loss=0.2856, pruned_loss=0.05991, over 1614284.57 frames. ], batch size: 34, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:16,733 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1794, 3.8087, 2.3586, 3.0273, 2.7172, 2.0071, 2.7649, 3.1128], + device='cuda:3'), covar=tensor([0.1480, 0.0265, 0.1040, 0.0673, 0.0753, 0.1417, 0.1036, 0.1036], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0236, 0.0340, 0.0311, 0.0300, 0.0345, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:36:46,262 INFO [train.py:901] (3/4) Epoch 25, batch 3500, loss[loss=0.1795, simple_loss=0.2565, pruned_loss=0.05124, over 7520.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2852, pruned_loss=0.05949, over 1613849.49 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:36:54,904 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.387e+02 2.953e+02 3.537e+02 5.869e+02, threshold=5.907e+02, percent-clipped=0.0 +2023-02-07 08:37:02,043 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197512.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:07,264 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 08:37:17,735 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:19,778 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197537.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:21,593 INFO [train.py:901] (3/4) Epoch 25, batch 3550, loss[loss=0.196, simple_loss=0.2744, pruned_loss=0.05885, over 7512.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2847, pruned_loss=0.05943, over 1617707.40 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:37:22,462 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8621, 1.7177, 2.1249, 1.5178, 1.5836, 2.1729, 1.0101, 1.7083], + device='cuda:3'), covar=tensor([0.1160, 0.0915, 0.0316, 0.0808, 0.1678, 0.0294, 0.1540, 0.1089], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0172, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:37:24,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0914, 1.9034, 2.3828, 2.0456, 2.3190, 2.1936, 1.9890, 1.1526], + device='cuda:3'), covar=tensor([0.5669, 0.4702, 0.1981, 0.3605, 0.2389, 0.2935, 0.1904, 0.5143], + device='cuda:3'), in_proj_covar=tensor([0.0951, 0.1006, 0.0822, 0.0974, 0.1014, 0.0914, 0.0763, 0.0840], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:37:37,367 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197563.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:52,449 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1201, 2.4169, 1.9089, 2.9099, 1.4489, 1.7872, 2.2127, 2.3708], + device='cuda:3'), covar=tensor([0.0719, 0.0719, 0.0878, 0.0367, 0.1084, 0.1243, 0.0747, 0.0740], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0194, 0.0245, 0.0212, 0.0204, 0.0247, 0.0248, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 08:37:54,507 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:37:56,270 INFO [train.py:901] (3/4) Epoch 25, batch 3600, loss[loss=0.1961, simple_loss=0.2685, pruned_loss=0.06187, over 7444.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2837, pruned_loss=0.05939, over 1607838.86 frames. ], batch size: 17, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:05,202 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.311e+02 2.881e+02 3.803e+02 6.346e+02, threshold=5.762e+02, percent-clipped=1.0 +2023-02-07 08:38:12,109 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197612.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:13,509 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=197614.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,222 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=197639.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:38:31,671 INFO [train.py:901] (3/4) Epoch 25, batch 3650, loss[loss=0.2218, simple_loss=0.301, pruned_loss=0.07128, over 8080.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.285, pruned_loss=0.06, over 1607024.51 frames. ], batch size: 21, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:38:40,962 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8605, 2.4264, 3.7933, 1.8691, 1.9298, 3.7648, 0.7353, 2.2244], + device='cuda:3'), covar=tensor([0.1372, 0.1059, 0.0163, 0.1573, 0.2292, 0.0177, 0.1956, 0.1139], + device='cuda:3'), in_proj_covar=tensor([0.0194, 0.0201, 0.0131, 0.0221, 0.0273, 0.0139, 0.0171, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:38:54,176 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6006, 1.9974, 2.8779, 1.5056, 2.1246, 2.0172, 1.6838, 2.1962], + device='cuda:3'), covar=tensor([0.1842, 0.2450, 0.0849, 0.4354, 0.1875, 0.3034, 0.2292, 0.2172], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0621, 0.0557, 0.0659, 0.0654, 0.0603, 0.0549, 0.0637], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:39:06,710 INFO [train.py:901] (3/4) Epoch 25, batch 3700, loss[loss=0.2053, simple_loss=0.2973, pruned_loss=0.05668, over 8330.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2852, pruned_loss=0.05993, over 1605851.24 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:39:09,548 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 08:39:15,747 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.484e+02 2.942e+02 3.783e+02 7.174e+02, threshold=5.884e+02, percent-clipped=5.0 +2023-02-07 08:39:35,848 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5322, 1.2556, 4.7567, 1.8306, 4.2597, 3.9412, 4.3319, 4.1983], + device='cuda:3'), covar=tensor([0.0570, 0.5257, 0.0453, 0.4017, 0.1074, 0.0945, 0.0555, 0.0646], + device='cuda:3'), in_proj_covar=tensor([0.0655, 0.0660, 0.0721, 0.0650, 0.0730, 0.0624, 0.0626, 0.0701], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:39:40,719 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 08:39:43,096 INFO [train.py:901] (3/4) Epoch 25, batch 3750, loss[loss=0.1904, simple_loss=0.2656, pruned_loss=0.05757, over 7702.00 frames. ], tot_loss[loss=0.2028, simple_loss=0.2857, pruned_loss=0.05989, over 1607779.40 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:09,411 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197777.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:40:18,200 INFO [train.py:901] (3/4) Epoch 25, batch 3800, loss[loss=0.1898, simple_loss=0.2677, pruned_loss=0.05593, over 7402.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2858, pruned_loss=0.06022, over 1606802.40 frames. ], batch size: 17, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:26,493 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.549e+02 3.044e+02 3.681e+02 9.424e+02, threshold=6.087e+02, percent-clipped=5.0 +2023-02-07 08:40:34,308 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 08:40:43,133 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-07 08:40:53,471 INFO [train.py:901] (3/4) Epoch 25, batch 3850, loss[loss=0.2281, simple_loss=0.3115, pruned_loss=0.07234, over 8518.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2856, pruned_loss=0.05979, over 1611595.91 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:40:57,614 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4460, 1.4876, 1.4389, 1.8378, 0.8381, 1.2963, 1.3845, 1.4749], + device='cuda:3'), covar=tensor([0.0845, 0.0766, 0.1029, 0.0494, 0.1090, 0.1404, 0.0712, 0.0738], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0211, 0.0205, 0.0247, 0.0247, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 08:41:12,905 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 08:41:19,585 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197878.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:28,515 INFO [train.py:901] (3/4) Epoch 25, batch 3900, loss[loss=0.2102, simple_loss=0.2973, pruned_loss=0.06153, over 8325.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.285, pruned_loss=0.05926, over 1612114.24 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:41:29,975 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197892.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:41:36,383 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.445e+02 2.982e+02 3.609e+02 8.629e+02, threshold=5.963e+02, percent-clipped=3.0 +2023-02-07 08:41:39,827 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=197907.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:41:41,926 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=197910.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:02,716 INFO [train.py:901] (3/4) Epoch 25, batch 3950, loss[loss=0.174, simple_loss=0.246, pruned_loss=0.05095, over 7521.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.285, pruned_loss=0.05916, over 1611273.54 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:27,075 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5809, 1.3424, 1.6094, 1.2645, 0.9811, 1.3886, 1.5305, 1.3197], + device='cuda:3'), covar=tensor([0.0591, 0.1373, 0.1755, 0.1520, 0.0615, 0.1613, 0.0740, 0.0699], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:42:37,829 INFO [train.py:901] (3/4) Epoch 25, batch 4000, loss[loss=0.2012, simple_loss=0.2701, pruned_loss=0.06613, over 7697.00 frames. ], tot_loss[loss=0.2038, simple_loss=0.2866, pruned_loss=0.06045, over 1611811.21 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:42:40,144 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=197993.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:42:47,778 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.312e+02 2.768e+02 3.562e+02 7.475e+02, threshold=5.536e+02, percent-clipped=2.0 +2023-02-07 08:43:01,564 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198022.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:14,007 INFO [train.py:901] (3/4) Epoch 25, batch 4050, loss[loss=0.1361, simple_loss=0.2186, pruned_loss=0.02682, over 7542.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2849, pruned_loss=0.05953, over 1614290.77 frames. ], batch size: 18, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:24,465 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0303, 2.3652, 3.6526, 1.9117, 2.9062, 2.4122, 1.9570, 2.8174], + device='cuda:3'), covar=tensor([0.1673, 0.2507, 0.0974, 0.4108, 0.1796, 0.2956, 0.2267, 0.2488], + device='cuda:3'), in_proj_covar=tensor([0.0534, 0.0622, 0.0557, 0.0659, 0.0656, 0.0604, 0.0552, 0.0640], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:43:32,401 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6845, 1.6236, 2.2770, 1.4612, 1.3202, 2.2557, 0.4305, 1.3795], + device='cuda:3'), covar=tensor([0.1519, 0.1309, 0.0309, 0.1103, 0.2400, 0.0418, 0.1973, 0.1430], + device='cuda:3'), in_proj_covar=tensor([0.0193, 0.0201, 0.0131, 0.0220, 0.0272, 0.0139, 0.0170, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:43:48,797 INFO [train.py:901] (3/4) Epoch 25, batch 4100, loss[loss=0.2341, simple_loss=0.3136, pruned_loss=0.07733, over 8537.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.05928, over 1618011.56 frames. ], batch size: 28, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:43:55,131 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198099.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:43:57,000 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.413e+02 2.876e+02 3.434e+02 5.292e+02, threshold=5.752e+02, percent-clipped=1.0 +2023-02-07 08:44:04,779 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5797, 2.5838, 1.8720, 2.2808, 2.2070, 1.5644, 2.0243, 2.1851], + device='cuda:3'), covar=tensor([0.1511, 0.0469, 0.1312, 0.0671, 0.0775, 0.1783, 0.1070, 0.1012], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0238, 0.0343, 0.0315, 0.0304, 0.0346, 0.0352, 0.0324], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:44:24,272 INFO [train.py:901] (3/4) Epoch 25, batch 4150, loss[loss=0.2328, simple_loss=0.3032, pruned_loss=0.08118, over 7809.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.284, pruned_loss=0.05875, over 1610910.76 frames. ], batch size: 20, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:44:27,219 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4768, 2.0101, 3.9344, 1.5185, 2.7634, 2.1025, 1.5787, 2.7986], + device='cuda:3'), covar=tensor([0.2350, 0.3188, 0.0917, 0.5171, 0.2171, 0.3715, 0.2858, 0.2603], + device='cuda:3'), in_proj_covar=tensor([0.0535, 0.0624, 0.0558, 0.0661, 0.0657, 0.0606, 0.0552, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:44:29,933 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198148.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 08:44:33,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5308, 1.5665, 4.7651, 1.7606, 4.2417, 3.9351, 4.2965, 4.1825], + device='cuda:3'), covar=tensor([0.0610, 0.4706, 0.0499, 0.4368, 0.1085, 0.0934, 0.0559, 0.0645], + device='cuda:3'), in_proj_covar=tensor([0.0652, 0.0657, 0.0718, 0.0645, 0.0726, 0.0621, 0.0625, 0.0695], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:44:44,867 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4743, 1.3634, 2.3572, 1.2822, 2.3750, 2.5335, 2.7102, 2.1241], + device='cuda:3'), covar=tensor([0.1112, 0.1372, 0.0384, 0.2070, 0.0614, 0.0389, 0.0722, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0318, 0.0318, 0.0275, 0.0434, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 08:44:47,427 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198173.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 08:44:58,965 INFO [train.py:901] (3/4) Epoch 25, batch 4200, loss[loss=0.1659, simple_loss=0.2462, pruned_loss=0.04283, over 7431.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2845, pruned_loss=0.05869, over 1613415.37 frames. ], batch size: 17, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:44:59,122 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5551, 1.4399, 4.7586, 1.7232, 4.2448, 3.9220, 4.3231, 4.1756], + device='cuda:3'), covar=tensor([0.0542, 0.4869, 0.0485, 0.4397, 0.1004, 0.0958, 0.0495, 0.0611], + device='cuda:3'), in_proj_covar=tensor([0.0649, 0.0654, 0.0714, 0.0642, 0.0723, 0.0618, 0.0622, 0.0692], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:45:08,029 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.351e+02 3.091e+02 3.845e+02 7.201e+02, threshold=6.182e+02, percent-clipped=4.0 +2023-02-07 08:45:09,394 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 08:45:25,548 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7463, 2.0542, 2.9456, 1.5705, 2.3219, 1.9935, 1.8332, 2.0761], + device='cuda:3'), covar=tensor([0.1802, 0.2531, 0.0852, 0.4485, 0.1784, 0.3367, 0.2210, 0.2345], + device='cuda:3'), in_proj_covar=tensor([0.0534, 0.0624, 0.0558, 0.0661, 0.0656, 0.0607, 0.0552, 0.0640], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:45:33,125 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 08:45:35,175 INFO [train.py:901] (3/4) Epoch 25, batch 4250, loss[loss=0.1653, simple_loss=0.2553, pruned_loss=0.03765, over 8286.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2836, pruned_loss=0.05779, over 1616143.73 frames. ], batch size: 23, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:45:41,587 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198249.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:43,798 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 08:45:44,708 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198254.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:45:59,331 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198274.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:02,100 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198278.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:09,935 INFO [train.py:901] (3/4) Epoch 25, batch 4300, loss[loss=0.1759, simple_loss=0.2704, pruned_loss=0.04067, over 8468.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2835, pruned_loss=0.05783, over 1612127.78 frames. ], batch size: 25, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:46:18,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.314e+02 2.735e+02 3.533e+02 6.805e+02, threshold=5.471e+02, percent-clipped=1.0 +2023-02-07 08:46:19,821 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198303.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:46:45,627 INFO [train.py:901] (3/4) Epoch 25, batch 4350, loss[loss=0.1845, simple_loss=0.2729, pruned_loss=0.04808, over 8498.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2816, pruned_loss=0.05656, over 1611370.51 frames. ], batch size: 26, lr: 3.02e-03, grad_scale: 16.0 +2023-02-07 08:46:56,198 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2406, 2.6063, 2.8091, 1.6638, 3.0888, 1.9090, 1.6065, 2.1963], + device='cuda:3'), covar=tensor([0.1078, 0.0531, 0.0483, 0.1034, 0.0650, 0.1112, 0.1068, 0.0632], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0404, 0.0361, 0.0458, 0.0390, 0.0545, 0.0400, 0.0432], + device='cuda:3'), out_proj_covar=tensor([1.2334e-04, 1.0525e-04, 9.4623e-05, 1.2014e-04, 1.0228e-04, 1.5263e-04, + 1.0718e-04, 1.1366e-04], device='cuda:3') +2023-02-07 08:47:04,264 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 08:47:06,465 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198369.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:47:21,574 INFO [train.py:901] (3/4) Epoch 25, batch 4400, loss[loss=0.1789, simple_loss=0.2709, pruned_loss=0.04343, over 8333.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2831, pruned_loss=0.05751, over 1613332.58 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:27,079 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6509, 1.8575, 2.6673, 1.5103, 1.9649, 1.9965, 1.6439, 1.9616], + device='cuda:3'), covar=tensor([0.2026, 0.2799, 0.0985, 0.4653, 0.2111, 0.3433, 0.2543, 0.2379], + device='cuda:3'), in_proj_covar=tensor([0.0534, 0.0622, 0.0557, 0.0659, 0.0655, 0.0605, 0.0552, 0.0639], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:47:29,518 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.496e+02 2.935e+02 3.768e+02 7.665e+02, threshold=5.870e+02, percent-clipped=6.0 +2023-02-07 08:47:45,273 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 08:47:49,446 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4368, 2.6092, 2.9324, 1.5996, 3.1990, 2.0327, 1.6012, 2.2975], + device='cuda:3'), covar=tensor([0.0889, 0.0545, 0.0321, 0.0924, 0.0582, 0.0882, 0.0987, 0.0568], + device='cuda:3'), in_proj_covar=tensor([0.0464, 0.0404, 0.0362, 0.0457, 0.0391, 0.0543, 0.0399, 0.0431], + device='cuda:3'), out_proj_covar=tensor([1.2331e-04, 1.0517e-04, 9.4670e-05, 1.1996e-04, 1.0243e-04, 1.5217e-04, + 1.0701e-04, 1.1337e-04], device='cuda:3') +2023-02-07 08:47:56,744 INFO [train.py:901] (3/4) Epoch 25, batch 4450, loss[loss=0.2252, simple_loss=0.3002, pruned_loss=0.0751, over 8510.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.0579, over 1611811.12 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:47:58,933 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198443.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:48:29,551 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8912, 1.7698, 2.7626, 2.0891, 2.4636, 1.8546, 1.6797, 1.3473], + device='cuda:3'), covar=tensor([0.7301, 0.6213, 0.2198, 0.4607, 0.3369, 0.4609, 0.3002, 0.5935], + device='cuda:3'), in_proj_covar=tensor([0.0946, 0.0999, 0.0816, 0.0969, 0.1011, 0.0911, 0.0756, 0.0836], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 08:48:31,963 INFO [train.py:901] (3/4) Epoch 25, batch 4500, loss[loss=0.2318, simple_loss=0.3035, pruned_loss=0.08011, over 7918.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.05797, over 1610222.10 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:48:40,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.274e+02 2.771e+02 3.541e+02 5.802e+02, threshold=5.543e+02, percent-clipped=0.0 +2023-02-07 08:48:40,467 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 08:48:46,360 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8046, 1.7560, 2.5071, 1.6519, 1.3861, 2.4974, 0.4806, 1.5122], + device='cuda:3'), covar=tensor([0.1980, 0.1372, 0.0356, 0.1389, 0.2803, 0.0423, 0.2106, 0.1544], + device='cuda:3'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0222, 0.0275, 0.0141, 0.0172, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:48:51,919 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198517.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:48:57,525 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9416, 6.0824, 5.2148, 2.4821, 5.3840, 5.6867, 5.4468, 5.5536], + device='cuda:3'), covar=tensor([0.0442, 0.0397, 0.0869, 0.4184, 0.0681, 0.0606, 0.1093, 0.0514], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0450, 0.0438, 0.0547, 0.0435, 0.0454, 0.0426, 0.0398], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:49:08,826 INFO [train.py:901] (3/4) Epoch 25, batch 4550, loss[loss=0.2123, simple_loss=0.3017, pruned_loss=0.06146, over 8467.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05839, over 1613966.58 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:22,068 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198558.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:49:44,787 INFO [train.py:901] (3/4) Epoch 25, batch 4600, loss[loss=0.1786, simple_loss=0.2726, pruned_loss=0.04229, over 8475.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2849, pruned_loss=0.05929, over 1611931.57 frames. ], batch size: 25, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:49:52,974 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.563e+02 2.449e+02 2.940e+02 3.432e+02 8.422e+02, threshold=5.881e+02, percent-clipped=6.0 +2023-02-07 08:50:09,324 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198625.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:14,649 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198633.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:19,158 INFO [train.py:901] (3/4) Epoch 25, batch 4650, loss[loss=0.1979, simple_loss=0.2888, pruned_loss=0.05356, over 8353.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2851, pruned_loss=0.05984, over 1610244.92 frames. ], batch size: 24, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:50:26,833 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:50:40,270 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5866, 1.7190, 2.0912, 1.6429, 1.0400, 1.7585, 2.3028, 1.8707], + device='cuda:3'), covar=tensor([0.0471, 0.1223, 0.1527, 0.1378, 0.0589, 0.1426, 0.0603, 0.0634], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:50:54,512 INFO [train.py:901] (3/4) Epoch 25, batch 4700, loss[loss=0.1963, simple_loss=0.2866, pruned_loss=0.05302, over 8473.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2847, pruned_loss=0.0596, over 1606458.05 frames. ], batch size: 29, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:03,371 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.164e+02 2.735e+02 3.323e+02 7.623e+02, threshold=5.470e+02, percent-clipped=2.0 +2023-02-07 08:51:29,739 INFO [train.py:901] (3/4) Epoch 25, batch 4750, loss[loss=0.2367, simple_loss=0.3268, pruned_loss=0.07331, over 8293.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2841, pruned_loss=0.0593, over 1608294.32 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:51:31,918 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3188, 2.6424, 2.9796, 1.7075, 3.2201, 1.9776, 1.5958, 2.3523], + device='cuda:3'), covar=tensor([0.0862, 0.0445, 0.0366, 0.0881, 0.0411, 0.0950, 0.0960, 0.0525], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0404, 0.0362, 0.0457, 0.0391, 0.0544, 0.0400, 0.0431], + device='cuda:3'), out_proj_covar=tensor([1.2369e-04, 1.0541e-04, 9.4773e-05, 1.1975e-04, 1.0267e-04, 1.5243e-04, + 1.0723e-04, 1.1324e-04], device='cuda:3') +2023-02-07 08:51:42,014 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 08:51:45,378 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 08:52:05,183 INFO [train.py:901] (3/4) Epoch 25, batch 4800, loss[loss=0.2412, simple_loss=0.3246, pruned_loss=0.07893, over 8516.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2844, pruned_loss=0.05979, over 1608267.70 frames. ], batch size: 39, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:13,383 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 2.392e+02 2.917e+02 3.409e+02 6.169e+02, threshold=5.835e+02, percent-clipped=3.0 +2023-02-07 08:52:22,058 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=198814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:36,110 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 08:52:39,636 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=198839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:40,102 INFO [train.py:901] (3/4) Epoch 25, batch 4850, loss[loss=0.2281, simple_loss=0.2869, pruned_loss=0.08465, over 7804.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2827, pruned_loss=0.05903, over 1602677.72 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:52:55,413 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198861.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:52:56,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5743, 1.4835, 1.8236, 1.3124, 1.2636, 1.8296, 0.2669, 1.3046], + device='cuda:3'), covar=tensor([0.1689, 0.1124, 0.0419, 0.0764, 0.2548, 0.0450, 0.1953, 0.1197], + device='cuda:3'), in_proj_covar=tensor([0.0195, 0.0201, 0.0131, 0.0220, 0.0273, 0.0141, 0.0171, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 08:53:01,733 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198870.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:15,870 INFO [train.py:901] (3/4) Epoch 25, batch 4900, loss[loss=0.3222, simple_loss=0.3709, pruned_loss=0.1368, over 6607.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2828, pruned_loss=0.05913, over 1601992.87 frames. ], batch size: 71, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:24,157 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198901.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:24,667 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.754e+02 2.376e+02 2.954e+02 3.660e+02 6.336e+02, threshold=5.908e+02, percent-clipped=3.0 +2023-02-07 08:53:30,960 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1411, 3.5628, 2.5001, 2.9718, 2.6661, 2.0650, 2.6660, 3.1398], + device='cuda:3'), covar=tensor([0.1790, 0.0458, 0.1082, 0.0718, 0.0817, 0.1518, 0.1213, 0.1112], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0235, 0.0339, 0.0311, 0.0300, 0.0343, 0.0346, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 08:53:50,032 INFO [train.py:901] (3/4) Epoch 25, batch 4950, loss[loss=0.2183, simple_loss=0.2939, pruned_loss=0.07136, over 8018.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2824, pruned_loss=0.05859, over 1604890.11 frames. ], batch size: 22, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:53:54,448 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=198945.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:53:54,544 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5383, 1.8620, 2.9300, 1.3927, 2.0556, 1.9546, 1.5759, 2.1766], + device='cuda:3'), covar=tensor([0.1996, 0.2734, 0.0888, 0.4782, 0.2146, 0.3174, 0.2486, 0.2364], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0622, 0.0557, 0.0661, 0.0657, 0.0603, 0.0553, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 08:54:15,962 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=198976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:16,531 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=198977.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:54:25,252 INFO [train.py:901] (3/4) Epoch 25, batch 5000, loss[loss=0.1776, simple_loss=0.264, pruned_loss=0.04559, over 7981.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05803, over 1603765.16 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:54:33,917 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 2.360e+02 2.883e+02 3.509e+02 6.136e+02, threshold=5.766e+02, percent-clipped=1.0 +2023-02-07 08:54:59,851 INFO [train.py:901] (3/4) Epoch 25, batch 5050, loss[loss=0.2051, simple_loss=0.299, pruned_loss=0.05563, over 8297.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2837, pruned_loss=0.05919, over 1606262.09 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:14,355 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 08:55:28,312 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-07 08:55:35,772 INFO [train.py:901] (3/4) Epoch 25, batch 5100, loss[loss=0.203, simple_loss=0.2895, pruned_loss=0.05827, over 8894.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05929, over 1605185.84 frames. ], batch size: 40, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:55:37,432 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199092.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:55:44,130 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.470e+02 3.005e+02 3.768e+02 7.063e+02, threshold=6.010e+02, percent-clipped=5.0 +2023-02-07 08:56:11,855 INFO [train.py:901] (3/4) Epoch 25, batch 5150, loss[loss=0.1647, simple_loss=0.2543, pruned_loss=0.0376, over 8076.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05939, over 1603368.89 frames. ], batch size: 21, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:47,046 INFO [train.py:901] (3/4) Epoch 25, batch 5200, loss[loss=0.1973, simple_loss=0.2807, pruned_loss=0.057, over 8099.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2836, pruned_loss=0.05874, over 1606095.67 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:56:49,910 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:56:50,569 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1230, 1.4630, 1.6608, 1.3598, 1.0057, 1.4716, 1.9706, 1.9906], + device='cuda:3'), covar=tensor([0.0559, 0.1698, 0.2287, 0.1864, 0.0673, 0.1951, 0.0733, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0100, 0.0164, 0.0113, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 08:56:55,021 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 2.381e+02 2.894e+02 3.514e+02 1.206e+03, threshold=5.788e+02, percent-clipped=6.0 +2023-02-07 08:57:04,084 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199214.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:12,777 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 08:57:17,169 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199232.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:22,320 INFO [train.py:901] (3/4) Epoch 25, batch 5250, loss[loss=0.191, simple_loss=0.2833, pruned_loss=0.04936, over 8748.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2841, pruned_loss=0.05928, over 1605226.70 frames. ], batch size: 30, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:57:25,800 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199245.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:34,835 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199257.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:34,966 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.56 vs. limit=2.0 +2023-02-07 08:57:56,837 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199289.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:57:57,413 INFO [train.py:901] (3/4) Epoch 25, batch 5300, loss[loss=0.2179, simple_loss=0.3019, pruned_loss=0.06699, over 8193.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.05898, over 1609200.52 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:05,707 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.313e+02 2.718e+02 3.488e+02 6.386e+02, threshold=5.437e+02, percent-clipped=3.0 +2023-02-07 08:58:25,239 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199329.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:32,735 INFO [train.py:901] (3/4) Epoch 25, batch 5350, loss[loss=0.1886, simple_loss=0.2721, pruned_loss=0.05255, over 7815.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2843, pruned_loss=0.05887, over 1610706.96 frames. ], batch size: 20, lr: 3.01e-03, grad_scale: 16.0 +2023-02-07 08:58:38,545 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199348.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:47,620 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199360.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:58:57,362 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199373.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:02,728 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0420, 1.6887, 3.2331, 1.5889, 2.5960, 3.5232, 3.6285, 3.0459], + device='cuda:3'), covar=tensor([0.1112, 0.1703, 0.0392, 0.1941, 0.1122, 0.0236, 0.0649, 0.0488], + device='cuda:3'), in_proj_covar=tensor([0.0302, 0.0323, 0.0288, 0.0316, 0.0318, 0.0274, 0.0432, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 08:59:08,731 INFO [train.py:901] (3/4) Epoch 25, batch 5400, loss[loss=0.2148, simple_loss=0.2926, pruned_loss=0.06853, over 8508.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2838, pruned_loss=0.05863, over 1613009.40 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 32.0 +2023-02-07 08:59:18,150 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.323e+02 2.292e+02 2.858e+02 3.757e+02 5.815e+02, threshold=5.716e+02, percent-clipped=3.0 +2023-02-07 08:59:18,353 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:28,357 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 08:59:42,053 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8226, 2.0247, 2.1264, 1.4310, 2.2157, 1.5474, 0.7791, 1.9728], + device='cuda:3'), covar=tensor([0.0739, 0.0383, 0.0313, 0.0735, 0.0520, 0.1043, 0.1052, 0.0384], + device='cuda:3'), in_proj_covar=tensor([0.0462, 0.0401, 0.0359, 0.0456, 0.0389, 0.0541, 0.0399, 0.0430], + device='cuda:3'), out_proj_covar=tensor([1.2274e-04, 1.0438e-04, 9.3970e-05, 1.1961e-04, 1.0184e-04, 1.5163e-04, + 1.0682e-04, 1.1304e-04], device='cuda:3') +2023-02-07 08:59:43,202 INFO [train.py:901] (3/4) Epoch 25, batch 5450, loss[loss=0.2212, simple_loss=0.2998, pruned_loss=0.07135, over 8350.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2844, pruned_loss=0.05868, over 1610516.87 frames. ], batch size: 26, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:08,085 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 09:00:08,234 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199476.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:17,975 INFO [train.py:901] (3/4) Epoch 25, batch 5500, loss[loss=0.2289, simple_loss=0.2923, pruned_loss=0.08273, over 7652.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2844, pruned_loss=0.05849, over 1616009.48 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:00:28,263 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.278e+02 2.767e+02 3.622e+02 8.817e+02, threshold=5.534e+02, percent-clipped=3.0 +2023-02-07 09:00:33,817 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199512.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:00:49,506 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8012, 1.6129, 2.4045, 1.5510, 1.3801, 2.3483, 0.3820, 1.5050], + device='cuda:3'), covar=tensor([0.1428, 0.1460, 0.0367, 0.1182, 0.2360, 0.0455, 0.2082, 0.1383], + device='cuda:3'), in_proj_covar=tensor([0.0196, 0.0201, 0.0132, 0.0221, 0.0276, 0.0142, 0.0173, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 09:00:52,119 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:00:53,322 INFO [train.py:901] (3/4) Epoch 25, batch 5550, loss[loss=0.2054, simple_loss=0.2783, pruned_loss=0.06623, over 7226.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.05847, over 1614123.42 frames. ], batch size: 16, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:02,092 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199553.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:01:24,436 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199585.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:27,625 INFO [train.py:901] (3/4) Epoch 25, batch 5600, loss[loss=0.1913, simple_loss=0.2865, pruned_loss=0.048, over 8467.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2843, pruned_loss=0.05879, over 1613004.96 frames. ], batch size: 29, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:01:38,060 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.538e+02 3.116e+02 4.016e+02 1.228e+03, threshold=6.232e+02, percent-clipped=11.0 +2023-02-07 09:01:39,616 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3438, 1.6388, 4.5413, 1.8131, 4.0402, 3.7922, 4.1245, 3.9975], + device='cuda:3'), covar=tensor([0.0633, 0.4578, 0.0566, 0.4079, 0.1074, 0.0923, 0.0554, 0.0653], + device='cuda:3'), in_proj_covar=tensor([0.0654, 0.0654, 0.0722, 0.0644, 0.0727, 0.0618, 0.0621, 0.0697], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:01:43,188 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199610.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:01:47,365 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199616.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:03,339 INFO [train.py:901] (3/4) Epoch 25, batch 5650, loss[loss=0.2223, simple_loss=0.2977, pruned_loss=0.07341, over 7655.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.0585, over 1616417.80 frames. ], batch size: 19, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:04,211 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:13,531 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:14,005 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 09:02:18,334 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:36,572 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199685.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:02:39,924 INFO [train.py:901] (3/4) Epoch 25, batch 5700, loss[loss=0.1929, simple_loss=0.2763, pruned_loss=0.05479, over 8105.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2818, pruned_loss=0.05739, over 1608621.59 frames. ], batch size: 23, lr: 3.01e-03, grad_scale: 8.0 +2023-02-07 09:02:49,769 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.681e+02 2.200e+02 2.647e+02 3.419e+02 7.306e+02, threshold=5.294e+02, percent-clipped=3.0 +2023-02-07 09:02:52,382 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 09:03:16,065 INFO [train.py:901] (3/4) Epoch 25, batch 5750, loss[loss=0.2074, simple_loss=0.2893, pruned_loss=0.06274, over 8504.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2812, pruned_loss=0.05745, over 1607598.67 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:03:21,551 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 09:03:29,026 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3270, 1.6845, 4.5384, 2.0032, 2.6995, 5.2323, 5.2726, 4.4949], + device='cuda:3'), covar=tensor([0.1201, 0.1925, 0.0272, 0.1978, 0.1117, 0.0193, 0.0523, 0.0573], + device='cuda:3'), in_proj_covar=tensor([0.0302, 0.0323, 0.0287, 0.0317, 0.0318, 0.0274, 0.0432, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 09:03:30,822 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:03:50,495 INFO [train.py:901] (3/4) Epoch 25, batch 5800, loss[loss=0.1707, simple_loss=0.2468, pruned_loss=0.04726, over 7447.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.282, pruned_loss=0.05777, over 1611493.31 frames. ], batch size: 17, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:00,808 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.348e+02 2.869e+02 3.742e+02 6.332e+02, threshold=5.738e+02, percent-clipped=6.0 +2023-02-07 09:04:11,738 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199820.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:04:13,183 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2667, 3.6550, 2.2275, 2.8563, 2.9105, 1.9557, 2.9344, 3.0855], + device='cuda:3'), covar=tensor([0.1507, 0.0383, 0.1162, 0.0715, 0.0696, 0.1430, 0.0938, 0.0877], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0236, 0.0340, 0.0311, 0.0301, 0.0342, 0.0346, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:04:26,580 INFO [train.py:901] (3/4) Epoch 25, batch 5850, loss[loss=0.2087, simple_loss=0.2957, pruned_loss=0.06089, over 8459.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2817, pruned_loss=0.05761, over 1611115.25 frames. ], batch size: 25, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:04:37,361 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199856.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:04:51,708 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199877.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:01,012 INFO [train.py:901] (3/4) Epoch 25, batch 5900, loss[loss=0.2807, simple_loss=0.3628, pruned_loss=0.09931, over 8489.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2818, pruned_loss=0.05752, over 1611229.82 frames. ], batch size: 28, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:05,825 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=199897.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:05:10,363 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 2.352e+02 2.828e+02 3.481e+02 7.421e+02, threshold=5.657e+02, percent-clipped=3.0 +2023-02-07 09:05:13,988 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=199909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:26,460 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=199927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:31,354 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=199934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:32,031 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:05:35,262 INFO [train.py:901] (3/4) Epoch 25, batch 5950, loss[loss=0.2239, simple_loss=0.3195, pruned_loss=0.06415, over 8366.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2827, pruned_loss=0.05791, over 1616177.59 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:05:58,240 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=199971.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:06:11,076 INFO [train.py:901] (3/4) Epoch 25, batch 6000, loss[loss=0.2416, simple_loss=0.3123, pruned_loss=0.08552, over 7438.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05826, over 1612337.59 frames. ], batch size: 72, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:06:11,076 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 09:06:23,699 INFO [train.py:935] (3/4) Epoch 25, validation: loss=0.1725, simple_loss=0.2721, pruned_loss=0.03643, over 944034.00 frames. +2023-02-07 09:06:23,700 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 09:06:34,575 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.373e+02 2.952e+02 3.581e+02 7.260e+02, threshold=5.903e+02, percent-clipped=4.0 +2023-02-07 09:06:38,841 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6335, 2.0183, 1.6808, 2.7727, 1.2849, 1.4175, 2.0246, 2.0415], + device='cuda:3'), covar=tensor([0.1049, 0.0902, 0.1192, 0.0382, 0.1186, 0.1673, 0.0917, 0.0869], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0194, 0.0244, 0.0211, 0.0204, 0.0247, 0.0248, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:06:40,192 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200012.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:06:46,328 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0136, 2.2876, 1.9495, 2.8929, 1.5722, 1.7560, 2.3108, 2.3450], + device='cuda:3'), covar=tensor([0.0723, 0.0763, 0.0823, 0.0321, 0.0942, 0.1244, 0.0630, 0.0762], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0194, 0.0244, 0.0211, 0.0205, 0.0247, 0.0248, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:06:53,225 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 09:06:59,664 INFO [train.py:901] (3/4) Epoch 25, batch 6050, loss[loss=0.1764, simple_loss=0.2533, pruned_loss=0.04974, over 7528.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2816, pruned_loss=0.05846, over 1608669.71 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:32,409 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4364, 2.5390, 1.8939, 2.2685, 2.1243, 1.6325, 2.1132, 2.1915], + device='cuda:3'), covar=tensor([0.1684, 0.0481, 0.1197, 0.0679, 0.0779, 0.1630, 0.0983, 0.1040], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0239, 0.0344, 0.0315, 0.0305, 0.0346, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:07:35,022 INFO [train.py:901] (3/4) Epoch 25, batch 6100, loss[loss=0.2136, simple_loss=0.2977, pruned_loss=0.06475, over 8415.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2813, pruned_loss=0.05831, over 1606502.66 frames. ], batch size: 49, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:07:36,028 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6618, 1.9188, 3.0574, 1.4426, 2.3403, 2.0241, 1.7522, 2.2422], + device='cuda:3'), covar=tensor([0.2251, 0.3111, 0.1066, 0.5438, 0.2173, 0.3824, 0.2728, 0.2971], + device='cuda:3'), in_proj_covar=tensor([0.0533, 0.0624, 0.0558, 0.0660, 0.0658, 0.0606, 0.0554, 0.0640], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:07:45,350 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.318e+02 2.345e+02 2.959e+02 3.596e+02 7.197e+02, threshold=5.919e+02, percent-clipped=3.0 +2023-02-07 09:07:54,349 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 09:08:05,126 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2490, 3.1066, 2.9051, 1.6401, 2.8747, 2.8894, 2.8054, 2.8165], + device='cuda:3'), covar=tensor([0.1061, 0.0787, 0.1246, 0.4139, 0.1050, 0.1159, 0.1599, 0.1017], + device='cuda:3'), in_proj_covar=tensor([0.0533, 0.0450, 0.0436, 0.0547, 0.0434, 0.0454, 0.0430, 0.0398], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:08:05,860 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200133.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:11,250 INFO [train.py:901] (3/4) Epoch 25, batch 6150, loss[loss=0.2056, simple_loss=0.2598, pruned_loss=0.07575, over 7697.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2805, pruned_loss=0.05792, over 1607738.59 frames. ], batch size: 18, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:23,401 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200158.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:46,118 INFO [train.py:901] (3/4) Epoch 25, batch 6200, loss[loss=0.2552, simple_loss=0.3219, pruned_loss=0.0942, over 6514.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2793, pruned_loss=0.05729, over 1599686.37 frames. ], batch size: 72, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:08:47,079 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:08:48,286 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5691, 2.6180, 1.7647, 2.2466, 2.0978, 1.5022, 2.0418, 2.2214], + device='cuda:3'), covar=tensor([0.1628, 0.0427, 0.1286, 0.0691, 0.0846, 0.1745, 0.1074, 0.0966], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0239, 0.0345, 0.0316, 0.0305, 0.0347, 0.0351, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:08:49,563 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200195.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:08:55,699 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 2.314e+02 2.821e+02 3.535e+02 6.331e+02, threshold=5.643e+02, percent-clipped=2.0 +2023-02-07 09:09:01,020 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.16 vs. limit=5.0 +2023-02-07 09:09:05,004 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:13,111 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200227.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:21,664 INFO [train.py:901] (3/4) Epoch 25, batch 6250, loss[loss=0.2159, simple_loss=0.2993, pruned_loss=0.06626, over 8201.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2806, pruned_loss=0.05762, over 1606498.80 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:29,782 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200252.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:09:41,368 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200268.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:09:43,284 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:09:56,025 INFO [train.py:901] (3/4) Epoch 25, batch 6300, loss[loss=0.1774, simple_loss=0.2669, pruned_loss=0.04392, over 8241.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2822, pruned_loss=0.05858, over 1609501.96 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:09:58,153 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200293.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:10:00,758 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200297.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:10:06,123 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 2.537e+02 3.046e+02 4.211e+02 7.306e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 09:10:31,233 INFO [train.py:901] (3/4) Epoch 25, batch 6350, loss[loss=0.2072, simple_loss=0.2897, pruned_loss=0.06228, over 8294.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2822, pruned_loss=0.05851, over 1612932.81 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:10:42,963 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 09:11:03,765 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200386.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:11:06,376 INFO [train.py:901] (3/4) Epoch 25, batch 6400, loss[loss=0.2102, simple_loss=0.2964, pruned_loss=0.06203, over 8362.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2824, pruned_loss=0.05849, over 1613782.09 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:15,859 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.247e+02 2.600e+02 3.696e+02 8.014e+02, threshold=5.200e+02, percent-clipped=2.0 +2023-02-07 09:11:19,412 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8493, 2.4165, 3.7372, 2.0199, 2.0621, 3.7536, 0.7859, 2.1790], + device='cuda:3'), covar=tensor([0.1317, 0.1420, 0.0230, 0.1549, 0.2179, 0.0262, 0.2085, 0.1432], + device='cuda:3'), in_proj_covar=tensor([0.0195, 0.0200, 0.0131, 0.0219, 0.0274, 0.0141, 0.0171, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 09:11:40,859 INFO [train.py:901] (3/4) Epoch 25, batch 6450, loss[loss=0.1849, simple_loss=0.2766, pruned_loss=0.04658, over 8366.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.282, pruned_loss=0.05816, over 1613198.54 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:11:44,528 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.63 vs. limit=2.0 +2023-02-07 09:12:16,057 INFO [train.py:901] (3/4) Epoch 25, batch 6500, loss[loss=0.1686, simple_loss=0.2551, pruned_loss=0.04104, over 7659.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2826, pruned_loss=0.05797, over 1611914.58 frames. ], batch size: 19, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:12:26,027 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 2.181e+02 2.613e+02 3.190e+02 4.719e+02, threshold=5.226e+02, percent-clipped=0.0 +2023-02-07 09:12:49,416 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200539.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:12:49,927 INFO [train.py:901] (3/4) Epoch 25, batch 6550, loss[loss=0.2662, simple_loss=0.3315, pruned_loss=0.1005, over 6894.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2842, pruned_loss=0.05864, over 1611077.74 frames. ], batch size: 71, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:09,855 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 09:13:26,032 INFO [train.py:901] (3/4) Epoch 25, batch 6600, loss[loss=0.2183, simple_loss=0.3056, pruned_loss=0.06551, over 8468.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2834, pruned_loss=0.05856, over 1610126.27 frames. ], batch size: 29, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:13:30,830 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 09:13:35,558 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.321e+02 2.722e+02 3.541e+02 8.507e+02, threshold=5.445e+02, percent-clipped=6.0 +2023-02-07 09:14:00,770 INFO [train.py:901] (3/4) Epoch 25, batch 6650, loss[loss=0.2222, simple_loss=0.3028, pruned_loss=0.07084, over 8493.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05932, over 1614540.76 frames. ], batch size: 26, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:01,602 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=200641.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:02,424 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200642.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:10,430 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200654.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:14:16,377 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=200663.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:19,893 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200667.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:14:35,348 INFO [train.py:901] (3/4) Epoch 25, batch 6700, loss[loss=0.185, simple_loss=0.2791, pruned_loss=0.04546, over 8186.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.0588, over 1611937.52 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:14:45,630 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 2.443e+02 2.859e+02 3.397e+02 5.440e+02, threshold=5.717e+02, percent-clipped=0.0 +2023-02-07 09:15:01,497 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5476, 1.7562, 1.8828, 1.1593, 1.9237, 1.3855, 0.3930, 1.7843], + device='cuda:3'), covar=tensor([0.0609, 0.0430, 0.0360, 0.0656, 0.0438, 0.1055, 0.0978, 0.0346], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0403, 0.0363, 0.0460, 0.0393, 0.0546, 0.0402, 0.0432], + device='cuda:3'), out_proj_covar=tensor([1.2363e-04, 1.0499e-04, 9.4921e-05, 1.2055e-04, 1.0303e-04, 1.5292e-04, + 1.0762e-04, 1.1364e-04], device='cuda:3') +2023-02-07 09:15:10,910 INFO [train.py:901] (3/4) Epoch 25, batch 6750, loss[loss=0.1885, simple_loss=0.2746, pruned_loss=0.05115, over 8254.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2842, pruned_loss=0.05819, over 1616038.12 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:11,172 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2541, 2.0555, 2.7066, 2.2356, 2.7215, 2.3302, 2.1551, 1.5863], + device='cuda:3'), covar=tensor([0.5799, 0.5067, 0.2007, 0.3774, 0.2534, 0.3304, 0.1967, 0.5459], + device='cuda:3'), in_proj_covar=tensor([0.0956, 0.1007, 0.0823, 0.0978, 0.1019, 0.0918, 0.0766, 0.0841], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 09:15:11,517 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 09:15:22,795 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=200756.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:15:47,008 INFO [train.py:901] (3/4) Epoch 25, batch 6800, loss[loss=0.2396, simple_loss=0.3206, pruned_loss=0.07935, over 8656.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2843, pruned_loss=0.0585, over 1617863.91 frames. ], batch size: 34, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:15:51,862 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 09:15:56,787 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.322e+02 2.853e+02 3.502e+02 6.162e+02, threshold=5.706e+02, percent-clipped=1.0 +2023-02-07 09:16:21,863 INFO [train.py:901] (3/4) Epoch 25, batch 6850, loss[loss=0.1961, simple_loss=0.2904, pruned_loss=0.05092, over 8250.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2841, pruned_loss=0.05832, over 1617694.97 frames. ], batch size: 24, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:16:40,949 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 09:16:56,587 INFO [train.py:901] (3/4) Epoch 25, batch 6900, loss[loss=0.1917, simple_loss=0.2834, pruned_loss=0.05006, over 8138.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2845, pruned_loss=0.05863, over 1619439.86 frames. ], batch size: 22, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:06,815 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.244e+02 2.770e+02 3.533e+02 6.127e+02, threshold=5.541e+02, percent-clipped=2.0 +2023-02-07 09:17:11,222 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=200910.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:17:28,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=200935.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:17:31,354 INFO [train.py:901] (3/4) Epoch 25, batch 6950, loss[loss=0.2407, simple_loss=0.3315, pruned_loss=0.07494, over 8099.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2848, pruned_loss=0.05906, over 1617234.51 frames. ], batch size: 23, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:17:50,969 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 09:17:58,881 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1109, 1.1912, 1.2533, 0.9340, 1.2491, 1.0160, 0.3468, 1.2149], + device='cuda:3'), covar=tensor([0.0422, 0.0347, 0.0259, 0.0401, 0.0376, 0.0680, 0.0796, 0.0248], + device='cuda:3'), in_proj_covar=tensor([0.0467, 0.0405, 0.0363, 0.0462, 0.0393, 0.0547, 0.0404, 0.0434], + device='cuda:3'), out_proj_covar=tensor([1.2390e-04, 1.0556e-04, 9.5089e-05, 1.2130e-04, 1.0309e-04, 1.5310e-04, + 1.0798e-04, 1.1394e-04], device='cuda:3') +2023-02-07 09:18:07,788 INFO [train.py:901] (3/4) Epoch 25, batch 7000, loss[loss=0.2063, simple_loss=0.2914, pruned_loss=0.06054, over 8477.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2853, pruned_loss=0.05956, over 1617243.85 frames. ], batch size: 39, lr: 3.00e-03, grad_scale: 8.0 +2023-02-07 09:18:12,093 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9177, 2.1177, 1.7998, 2.6768, 1.2628, 1.5737, 1.9578, 2.0656], + device='cuda:3'), covar=tensor([0.0731, 0.0775, 0.0878, 0.0357, 0.1101, 0.1391, 0.0767, 0.0764], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0211, 0.0204, 0.0247, 0.0248, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:18:17,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.593e+02 3.026e+02 3.851e+02 8.547e+02, threshold=6.052e+02, percent-clipped=7.0 +2023-02-07 09:18:19,776 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:23,201 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201012.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:40,488 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201037.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:18:42,298 INFO [train.py:901] (3/4) Epoch 25, batch 7050, loss[loss=0.1969, simple_loss=0.2839, pruned_loss=0.05497, over 8465.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2847, pruned_loss=0.05948, over 1614933.70 frames. ], batch size: 29, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:16,835 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201089.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:17,389 INFO [train.py:901] (3/4) Epoch 25, batch 7100, loss[loss=0.1596, simple_loss=0.2484, pruned_loss=0.03541, over 7797.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2836, pruned_loss=0.05879, over 1615251.49 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:26,871 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.246e+02 2.728e+02 3.277e+02 5.322e+02, threshold=5.456e+02, percent-clipped=0.0 +2023-02-07 09:19:40,021 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201122.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:19:52,248 INFO [train.py:901] (3/4) Epoch 25, batch 7150, loss[loss=0.1949, simple_loss=0.2861, pruned_loss=0.05183, over 8194.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05853, over 1617387.62 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:19:53,558 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.19 vs. limit=5.0 +2023-02-07 09:20:28,441 INFO [train.py:901] (3/4) Epoch 25, batch 7200, loss[loss=0.2318, simple_loss=0.3246, pruned_loss=0.06946, over 8607.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05887, over 1618212.54 frames. ], batch size: 31, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:20:34,999 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-07 09:20:38,240 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.475e+02 3.123e+02 4.294e+02 9.608e+02, threshold=6.246e+02, percent-clipped=8.0 +2023-02-07 09:20:56,018 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 09:21:03,479 INFO [train.py:901] (3/4) Epoch 25, batch 7250, loss[loss=0.1899, simple_loss=0.2754, pruned_loss=0.05218, over 8227.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2831, pruned_loss=0.05843, over 1617218.57 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:25,201 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:37,431 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4100, 1.1846, 2.3715, 1.2690, 2.1519, 2.5659, 2.7119, 2.1620], + device='cuda:3'), covar=tensor([0.1144, 0.1665, 0.0462, 0.2123, 0.0784, 0.0406, 0.0856, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0325, 0.0290, 0.0319, 0.0318, 0.0276, 0.0436, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 09:21:37,959 INFO [train.py:901] (3/4) Epoch 25, batch 7300, loss[loss=0.1885, simple_loss=0.2772, pruned_loss=0.04991, over 7974.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2824, pruned_loss=0.05769, over 1617006.39 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:21:39,379 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201292.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:21:48,706 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.341e+02 2.809e+02 3.464e+02 9.506e+02, threshold=5.617e+02, percent-clipped=4.0 +2023-02-07 09:21:50,349 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2907, 2.0939, 2.6779, 2.2630, 2.6630, 2.4165, 2.2056, 1.6202], + device='cuda:3'), covar=tensor([0.5737, 0.5416, 0.2202, 0.4071, 0.2614, 0.3355, 0.1972, 0.5719], + device='cuda:3'), in_proj_covar=tensor([0.0957, 0.1008, 0.0826, 0.0979, 0.1021, 0.0918, 0.0767, 0.0844], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 09:22:13,163 INFO [train.py:901] (3/4) Epoch 25, batch 7350, loss[loss=0.2221, simple_loss=0.3057, pruned_loss=0.0693, over 8470.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05723, over 1614189.20 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:27,893 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-07 09:22:39,016 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 09:22:40,539 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201378.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:48,513 INFO [train.py:901] (3/4) Epoch 25, batch 7400, loss[loss=0.1617, simple_loss=0.2438, pruned_loss=0.03976, over 7806.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2822, pruned_loss=0.05715, over 1616324.07 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:22:57,496 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201403.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:22:57,970 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.231e+02 2.880e+02 3.857e+02 7.685e+02, threshold=5.759e+02, percent-clipped=5.0 +2023-02-07 09:22:58,695 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 09:23:19,303 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201433.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:23:23,998 INFO [train.py:901] (3/4) Epoch 25, batch 7450, loss[loss=0.1866, simple_loss=0.2734, pruned_loss=0.04991, over 8246.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.05756, over 1615611.35 frames. ], batch size: 22, lr: 2.99e-03, grad_scale: 16.0 +2023-02-07 09:23:37,754 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 09:23:59,779 INFO [train.py:901] (3/4) Epoch 25, batch 7500, loss[loss=0.1815, simple_loss=0.263, pruned_loss=0.05006, over 7642.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2824, pruned_loss=0.05769, over 1612692.07 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:09,788 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 2.281e+02 2.758e+02 3.564e+02 6.593e+02, threshold=5.515e+02, percent-clipped=6.0 +2023-02-07 09:24:12,067 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5918, 2.4303, 1.7493, 2.2954, 1.9835, 1.4005, 1.9916, 2.2198], + device='cuda:3'), covar=tensor([0.1436, 0.0419, 0.1411, 0.0534, 0.0867, 0.1862, 0.1126, 0.0878], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0239, 0.0345, 0.0316, 0.0304, 0.0348, 0.0352, 0.0324], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:24:34,709 INFO [train.py:901] (3/4) Epoch 25, batch 7550, loss[loss=0.1992, simple_loss=0.2894, pruned_loss=0.05454, over 8194.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.05838, over 1610674.60 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:24:40,275 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201548.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:09,416 INFO [train.py:901] (3/4) Epoch 25, batch 7600, loss[loss=0.2022, simple_loss=0.2899, pruned_loss=0.05727, over 8401.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05838, over 1608291.83 frames. ], batch size: 48, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:25:20,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 2.467e+02 2.939e+02 3.909e+02 7.265e+02, threshold=5.878e+02, percent-clipped=5.0 +2023-02-07 09:25:27,306 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:41,314 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=201636.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:25:43,930 INFO [train.py:901] (3/4) Epoch 25, batch 7650, loss[loss=0.2134, simple_loss=0.2959, pruned_loss=0.06542, over 8252.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2842, pruned_loss=0.05898, over 1614650.69 frames. ], batch size: 24, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:00,386 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201662.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:12,135 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:19,113 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.46 vs. limit=5.0 +2023-02-07 09:26:19,457 INFO [train.py:901] (3/4) Epoch 25, batch 7700, loss[loss=0.2088, simple_loss=0.2898, pruned_loss=0.06392, over 8491.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.0589, over 1613179.70 frames. ], batch size: 29, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:20,539 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 09:26:21,002 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8481, 1.4468, 4.0161, 1.5218, 3.5634, 3.3616, 3.6822, 3.5540], + device='cuda:3'), covar=tensor([0.0631, 0.4269, 0.0616, 0.4066, 0.1182, 0.0924, 0.0602, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0658, 0.0656, 0.0722, 0.0648, 0.0736, 0.0627, 0.0626, 0.0699], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:26:30,382 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.306e+02 2.805e+02 3.732e+02 7.115e+02, threshold=5.609e+02, percent-clipped=1.0 +2023-02-07 09:26:43,965 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=201724.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:48,029 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:26:49,246 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 09:26:54,629 INFO [train.py:901] (3/4) Epoch 25, batch 7750, loss[loss=0.2136, simple_loss=0.2882, pruned_loss=0.0695, over 8094.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05851, over 1613697.33 frames. ], batch size: 21, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:26:58,887 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7403, 2.0158, 2.1406, 1.3730, 2.2457, 1.5611, 0.6780, 2.0467], + device='cuda:3'), covar=tensor([0.0766, 0.0417, 0.0331, 0.0744, 0.0517, 0.0951, 0.1125, 0.0356], + device='cuda:3'), in_proj_covar=tensor([0.0463, 0.0402, 0.0360, 0.0458, 0.0391, 0.0543, 0.0402, 0.0431], + device='cuda:3'), out_proj_covar=tensor([1.2294e-04, 1.0476e-04, 9.3949e-05, 1.1998e-04, 1.0235e-04, 1.5191e-04, + 1.0748e-04, 1.1315e-04], device='cuda:3') +2023-02-07 09:27:02,274 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=201751.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:11,230 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9165, 2.3803, 3.8143, 1.9296, 2.0013, 3.7819, 0.6122, 2.2191], + device='cuda:3'), covar=tensor([0.1254, 0.1101, 0.0155, 0.1454, 0.2193, 0.0182, 0.2023, 0.1157], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0201, 0.0130, 0.0221, 0.0274, 0.0141, 0.0171, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 09:27:14,639 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5742, 1.7547, 1.5719, 2.1232, 1.1308, 1.3729, 1.6726, 1.7324], + device='cuda:3'), covar=tensor([0.0797, 0.0812, 0.0954, 0.0565, 0.1056, 0.1338, 0.0719, 0.0719], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0211, 0.0204, 0.0247, 0.0248, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:27:29,980 INFO [train.py:901] (3/4) Epoch 25, batch 7800, loss[loss=0.1813, simple_loss=0.2724, pruned_loss=0.04516, over 8197.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2821, pruned_loss=0.0582, over 1612202.27 frames. ], batch size: 23, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:27:40,037 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201804.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:27:40,476 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.301e+02 2.955e+02 3.831e+02 1.047e+03, threshold=5.910e+02, percent-clipped=5.0 +2023-02-07 09:27:56,325 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=201829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:28:03,384 INFO [train.py:901] (3/4) Epoch 25, batch 7850, loss[loss=0.1533, simple_loss=0.2344, pruned_loss=0.03615, over 7702.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05875, over 1617468.26 frames. ], batch size: 18, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:36,528 INFO [train.py:901] (3/4) Epoch 25, batch 7900, loss[loss=0.2092, simple_loss=0.2923, pruned_loss=0.06302, over 8321.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2835, pruned_loss=0.05874, over 1615095.53 frames. ], batch size: 25, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:28:47,146 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.505e+02 3.187e+02 3.787e+02 7.491e+02, threshold=6.375e+02, percent-clipped=2.0 +2023-02-07 09:28:48,375 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-07 09:29:01,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1713, 1.5959, 1.7886, 1.5139, 0.9969, 1.5965, 1.8338, 1.7758], + device='cuda:3'), covar=tensor([0.0529, 0.1275, 0.1651, 0.1406, 0.0628, 0.1476, 0.0680, 0.0621], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:29:09,579 INFO [train.py:901] (3/4) Epoch 25, batch 7950, loss[loss=0.168, simple_loss=0.2564, pruned_loss=0.03983, over 7927.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2843, pruned_loss=0.0593, over 1616528.73 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 4.0 +2023-02-07 09:29:37,534 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1699, 1.4317, 1.6516, 1.3730, 0.9685, 1.4347, 1.8946, 1.7247], + device='cuda:3'), covar=tensor([0.0520, 0.1215, 0.1705, 0.1441, 0.0613, 0.1467, 0.0671, 0.0595], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:29:40,347 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=201986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:42,800 INFO [train.py:901] (3/4) Epoch 25, batch 8000, loss[loss=0.1548, simple_loss=0.2385, pruned_loss=0.03553, over 7800.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2834, pruned_loss=0.05897, over 1614659.21 frames. ], batch size: 19, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:29:54,398 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.313e+02 2.298e+02 3.131e+02 3.789e+02 6.155e+02, threshold=6.263e+02, percent-clipped=0.0 +2023-02-07 09:29:54,504 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202006.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,326 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202007.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:55,946 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202008.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:29:58,064 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:06,543 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:12,416 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202032.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:17,710 INFO [train.py:901] (3/4) Epoch 25, batch 8050, loss[loss=0.1916, simple_loss=0.2728, pruned_loss=0.05523, over 7942.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2833, pruned_loss=0.05944, over 1599617.34 frames. ], batch size: 20, lr: 2.99e-03, grad_scale: 8.0 +2023-02-07 09:30:36,925 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:30:50,486 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 09:30:55,056 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 09:30:55,323 INFO [train.py:901] (3/4) Epoch 26, batch 0, loss[loss=0.1994, simple_loss=0.2694, pruned_loss=0.06474, over 7521.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2694, pruned_loss=0.06474, over 7521.00 frames. ], batch size: 18, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:30:55,323 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 09:31:05,242 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0304, 1.6960, 1.3927, 1.5034, 1.4471, 1.2325, 1.3616, 1.3770], + device='cuda:3'), covar=tensor([0.1092, 0.0340, 0.0957, 0.0432, 0.0618, 0.1315, 0.0860, 0.0646], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0239, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:31:06,909 INFO [train.py:935] (3/4) Epoch 26, validation: loss=0.1717, simple_loss=0.2716, pruned_loss=0.03591, over 944034.00 frames. +2023-02-07 09:31:06,910 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 09:31:21,599 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 09:31:29,809 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.361e+02 2.411e+02 2.993e+02 3.956e+02 9.314e+02, threshold=5.987e+02, percent-clipped=4.0 +2023-02-07 09:31:40,833 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:41,325 INFO [train.py:901] (3/4) Epoch 26, batch 50, loss[loss=0.1995, simple_loss=0.2839, pruned_loss=0.05755, over 8606.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2834, pruned_loss=0.05896, over 367187.02 frames. ], batch size: 39, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:31:52,559 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202138.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:31:55,752 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 09:32:15,980 INFO [train.py:901] (3/4) Epoch 26, batch 100, loss[loss=0.1725, simple_loss=0.2526, pruned_loss=0.04619, over 7921.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2819, pruned_loss=0.05807, over 640503.17 frames. ], batch size: 20, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:32:18,612 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 09:32:23,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:32:40,583 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.435e+02 2.962e+02 3.649e+02 8.375e+02, threshold=5.925e+02, percent-clipped=4.0 +2023-02-07 09:32:51,110 INFO [train.py:901] (3/4) Epoch 26, batch 150, loss[loss=0.2015, simple_loss=0.2858, pruned_loss=0.05856, over 7814.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2852, pruned_loss=0.05963, over 856463.60 frames. ], batch size: 20, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:22,416 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8294, 1.6228, 3.9887, 1.5708, 3.5536, 3.3472, 3.6626, 3.5796], + device='cuda:3'), covar=tensor([0.0697, 0.4104, 0.0628, 0.4134, 0.1161, 0.0996, 0.0633, 0.0706], + device='cuda:3'), in_proj_covar=tensor([0.0660, 0.0653, 0.0721, 0.0646, 0.0730, 0.0626, 0.0625, 0.0699], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:33:26,394 INFO [train.py:901] (3/4) Epoch 26, batch 200, loss[loss=0.184, simple_loss=0.2653, pruned_loss=0.05135, over 8030.00 frames. ], tot_loss[loss=0.2035, simple_loss=0.2861, pruned_loss=0.06047, over 1023931.05 frames. ], batch size: 20, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:33:49,943 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.393e+02 2.408e+02 2.928e+02 3.669e+02 9.390e+02, threshold=5.857e+02, percent-clipped=3.0 +2023-02-07 09:34:01,569 INFO [train.py:901] (3/4) Epoch 26, batch 250, loss[loss=0.1942, simple_loss=0.2771, pruned_loss=0.05564, over 8109.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2849, pruned_loss=0.05945, over 1157042.39 frames. ], batch size: 23, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:09,766 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 09:34:19,972 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 09:34:22,771 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=202352.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:35,413 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-07 09:34:36,401 INFO [train.py:901] (3/4) Epoch 26, batch 300, loss[loss=0.1897, simple_loss=0.278, pruned_loss=0.05065, over 8033.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2843, pruned_loss=0.05957, over 1253007.37 frames. ], batch size: 22, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:34:40,021 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202377.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:52,245 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:57,265 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202402.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:34:59,615 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.339e+02 2.510e+02 3.033e+02 3.572e+02 1.183e+03, threshold=6.066e+02, percent-clipped=2.0 +2023-02-07 09:35:08,715 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:10,555 INFO [train.py:901] (3/4) Epoch 26, batch 350, loss[loss=0.1607, simple_loss=0.2439, pruned_loss=0.03876, over 7787.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2853, pruned_loss=0.05998, over 1340892.93 frames. ], batch size: 19, lr: 2.93e-03, grad_scale: 4.0 +2023-02-07 09:35:22,062 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6431, 2.4444, 3.2450, 2.6471, 3.3510, 2.6138, 2.4653, 2.0904], + device='cuda:3'), covar=tensor([0.5526, 0.5189, 0.2164, 0.3881, 0.2440, 0.3185, 0.1885, 0.5746], + device='cuda:3'), in_proj_covar=tensor([0.0954, 0.1006, 0.0820, 0.0977, 0.1015, 0.0916, 0.0764, 0.0840], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 09:35:23,413 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:40,995 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202464.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:43,118 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=202467.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:35:46,424 INFO [train.py:901] (3/4) Epoch 26, batch 400, loss[loss=0.1808, simple_loss=0.2672, pruned_loss=0.04723, over 8085.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2837, pruned_loss=0.05913, over 1397363.71 frames. ], batch size: 21, lr: 2.93e-03, grad_scale: 8.0 +2023-02-07 09:35:46,710 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0829, 1.8538, 2.3068, 2.0006, 2.3043, 2.1213, 1.9867, 1.2644], + device='cuda:3'), covar=tensor([0.5478, 0.4875, 0.2125, 0.3768, 0.2635, 0.3294, 0.1937, 0.5152], + device='cuda:3'), in_proj_covar=tensor([0.0953, 0.1005, 0.0820, 0.0977, 0.1015, 0.0915, 0.0763, 0.0840], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 09:36:09,159 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7375, 1.8842, 1.6189, 2.0595, 1.3290, 1.5276, 1.7374, 1.8982], + device='cuda:3'), covar=tensor([0.0646, 0.0692, 0.0831, 0.0610, 0.1032, 0.1030, 0.0634, 0.0595], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0212, 0.0206, 0.0246, 0.0249, 0.0207], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:36:11,049 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.432e+02 2.504e+02 3.071e+02 3.633e+02 8.131e+02, threshold=6.142e+02, percent-clipped=3.0 +2023-02-07 09:36:21,079 INFO [train.py:901] (3/4) Epoch 26, batch 450, loss[loss=0.1977, simple_loss=0.2935, pruned_loss=0.05094, over 8651.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2844, pruned_loss=0.05964, over 1445618.40 frames. ], batch size: 34, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:36:40,725 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.45 vs. limit=2.0 +2023-02-07 09:36:55,498 INFO [train.py:901] (3/4) Epoch 26, batch 500, loss[loss=0.2032, simple_loss=0.2848, pruned_loss=0.06079, over 8508.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.283, pruned_loss=0.05886, over 1480018.65 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:19,246 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.631e+02 2.396e+02 2.962e+02 4.085e+02 8.069e+02, threshold=5.924e+02, percent-clipped=6.0 +2023-02-07 09:37:29,360 INFO [train.py:901] (3/4) Epoch 26, batch 550, loss[loss=0.1764, simple_loss=0.269, pruned_loss=0.04195, over 8338.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2847, pruned_loss=0.06007, over 1513062.66 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:37:41,179 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-07 09:37:52,230 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-07 09:38:01,539 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5422, 1.2000, 1.3634, 1.1475, 0.9549, 1.2378, 1.5266, 1.4919], + device='cuda:3'), covar=tensor([0.0637, 0.1323, 0.1742, 0.1485, 0.0684, 0.1489, 0.0786, 0.0537], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0153, 0.0190, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:38:05,143 INFO [train.py:901] (3/4) Epoch 26, batch 600, loss[loss=0.2263, simple_loss=0.3095, pruned_loss=0.07155, over 8555.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2855, pruned_loss=0.06066, over 1534549.89 frames. ], batch size: 31, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:21,735 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 09:38:29,016 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.445e+02 2.916e+02 3.512e+02 6.749e+02, threshold=5.833e+02, percent-clipped=3.0 +2023-02-07 09:38:38,961 INFO [train.py:901] (3/4) Epoch 26, batch 650, loss[loss=0.2065, simple_loss=0.289, pruned_loss=0.06202, over 8247.00 frames. ], tot_loss[loss=0.2041, simple_loss=0.2864, pruned_loss=0.06087, over 1552896.76 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:38:39,879 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=202723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:38:57,386 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=202748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:39:01,544 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=5.04 vs. limit=5.0 +2023-02-07 09:39:14,899 INFO [train.py:901] (3/4) Epoch 26, batch 700, loss[loss=0.2253, simple_loss=0.3016, pruned_loss=0.07452, over 8467.00 frames. ], tot_loss[loss=0.2026, simple_loss=0.2852, pruned_loss=0.05999, over 1562140.66 frames. ], batch size: 49, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:39:38,616 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.871e+02 2.498e+02 3.029e+02 3.750e+02 8.351e+02, threshold=6.058e+02, percent-clipped=3.0 +2023-02-07 09:39:49,883 INFO [train.py:901] (3/4) Epoch 26, batch 750, loss[loss=0.216, simple_loss=0.2887, pruned_loss=0.07165, over 7828.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2842, pruned_loss=0.0594, over 1573746.37 frames. ], batch size: 20, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:05,052 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 09:40:08,006 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=202848.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:40:13,827 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 09:40:24,780 INFO [train.py:901] (3/4) Epoch 26, batch 800, loss[loss=0.1656, simple_loss=0.2404, pruned_loss=0.04538, over 7804.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05812, over 1585552.73 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:40:26,981 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1400, 1.4002, 1.7577, 1.3117, 1.0176, 1.4993, 1.8443, 1.5401], + device='cuda:3'), covar=tensor([0.0521, 0.1374, 0.1748, 0.1598, 0.0617, 0.1485, 0.0716, 0.0730], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0191, 0.0161, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:40:49,966 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.489e+02 2.818e+02 3.827e+02 7.280e+02, threshold=5.635e+02, percent-clipped=3.0 +2023-02-07 09:40:59,909 INFO [train.py:901] (3/4) Epoch 26, batch 850, loss[loss=0.2248, simple_loss=0.3019, pruned_loss=0.07386, over 8486.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05889, over 1589329.83 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:09,138 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1539, 3.5933, 2.3215, 2.9895, 2.9845, 1.9515, 3.0360, 3.0754], + device='cuda:3'), covar=tensor([0.1655, 0.0438, 0.1134, 0.0718, 0.0687, 0.1576, 0.0973, 0.1038], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0239, 0.0343, 0.0315, 0.0303, 0.0348, 0.0351, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 09:41:33,357 INFO [train.py:901] (3/4) Epoch 26, batch 900, loss[loss=0.2067, simple_loss=0.2937, pruned_loss=0.05982, over 8485.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2839, pruned_loss=0.05897, over 1596527.07 frames. ], batch size: 49, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:41:58,979 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.742e+02 3.265e+02 4.005e+02 6.934e+02, threshold=6.531e+02, percent-clipped=5.0 +2023-02-07 09:42:08,858 INFO [train.py:901] (3/4) Epoch 26, batch 950, loss[loss=0.2072, simple_loss=0.2908, pruned_loss=0.06176, over 8455.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2829, pruned_loss=0.05844, over 1599880.78 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:19,052 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8138, 1.6793, 1.9250, 1.7281, 1.0067, 1.7650, 2.2090, 2.0195], + device='cuda:3'), covar=tensor([0.0471, 0.1235, 0.1625, 0.1375, 0.0631, 0.1442, 0.0631, 0.0610], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:42:31,714 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 09:42:32,517 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:42:43,152 INFO [train.py:901] (3/4) Epoch 26, batch 1000, loss[loss=0.1825, simple_loss=0.2709, pruned_loss=0.04705, over 8499.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2833, pruned_loss=0.05897, over 1604498.85 frames. ], batch size: 26, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:42:50,158 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 09:43:04,725 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203103.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:43:05,257 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 09:43:07,153 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 2.348e+02 2.857e+02 3.355e+02 6.976e+02, threshold=5.714e+02, percent-clipped=1.0 +2023-02-07 09:43:17,240 INFO [train.py:901] (3/4) Epoch 26, batch 1050, loss[loss=0.183, simple_loss=0.2619, pruned_loss=0.05199, over 7788.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05885, over 1604334.08 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:43:18,682 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 09:43:38,435 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1131, 1.0041, 1.1952, 0.9385, 0.9092, 1.1956, 0.0984, 0.8764], + device='cuda:3'), covar=tensor([0.1440, 0.1052, 0.0478, 0.0710, 0.2231, 0.0505, 0.1853, 0.1249], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0202, 0.0132, 0.0222, 0.0277, 0.0143, 0.0171, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 09:43:53,220 INFO [train.py:901] (3/4) Epoch 26, batch 1100, loss[loss=0.2036, simple_loss=0.3013, pruned_loss=0.05289, over 8247.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05833, over 1608401.22 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:01,157 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 09:44:06,848 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203192.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:44:16,724 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.532e+02 2.511e+02 2.912e+02 3.711e+02 8.666e+02, threshold=5.824e+02, percent-clipped=4.0 +2023-02-07 09:44:24,304 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.6789, 5.8124, 4.9837, 2.6354, 5.0361, 5.4798, 5.1901, 5.3203], + device='cuda:3'), covar=tensor([0.0581, 0.0330, 0.0905, 0.4273, 0.0743, 0.0777, 0.1025, 0.0554], + device='cuda:3'), in_proj_covar=tensor([0.0530, 0.0450, 0.0437, 0.0548, 0.0435, 0.0452, 0.0427, 0.0394], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:44:27,579 INFO [train.py:901] (3/4) Epoch 26, batch 1150, loss[loss=0.2235, simple_loss=0.3111, pruned_loss=0.06794, over 8488.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05831, over 1610454.27 frames. ], batch size: 28, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:44:27,585 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 09:45:02,743 INFO [train.py:901] (3/4) Epoch 26, batch 1200, loss[loss=0.1792, simple_loss=0.2775, pruned_loss=0.04042, over 8460.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05817, over 1616516.20 frames. ], batch size: 25, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:45:27,280 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203307.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:45:27,743 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.402e+02 2.806e+02 3.306e+02 6.331e+02, threshold=5.612e+02, percent-clipped=2.0 +2023-02-07 09:45:35,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2231, 1.5130, 1.2190, 2.6293, 1.1110, 1.1820, 1.7404, 1.7030], + device='cuda:3'), covar=tensor([0.1725, 0.1459, 0.2175, 0.0435, 0.1447, 0.2191, 0.1085, 0.1168], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0194, 0.0246, 0.0212, 0.0205, 0.0247, 0.0249, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 09:45:37,087 INFO [train.py:901] (3/4) Epoch 26, batch 1250, loss[loss=0.1892, simple_loss=0.2772, pruned_loss=0.05063, over 8243.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2829, pruned_loss=0.05819, over 1614427.42 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:46:12,727 INFO [train.py:901] (3/4) Epoch 26, batch 1300, loss[loss=0.1756, simple_loss=0.2625, pruned_loss=0.04435, over 8239.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05883, over 1617521.51 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:46:31,894 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203400.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:46:37,064 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.437e+02 2.919e+02 3.429e+02 9.499e+02, threshold=5.838e+02, percent-clipped=5.0 +2023-02-07 09:46:46,434 INFO [train.py:901] (3/4) Epoch 26, batch 1350, loss[loss=0.1954, simple_loss=0.2821, pruned_loss=0.05435, over 8131.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2847, pruned_loss=0.05905, over 1621421.65 frames. ], batch size: 22, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:04,180 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203447.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:22,495 INFO [train.py:901] (3/4) Epoch 26, batch 1400, loss[loss=0.1866, simple_loss=0.2761, pruned_loss=0.04851, over 8744.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2836, pruned_loss=0.05844, over 1621704.54 frames. ], batch size: 39, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:47:24,885 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-07 09:47:31,496 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203485.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:47,752 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.419e+02 2.906e+02 3.589e+02 5.599e+02, threshold=5.812e+02, percent-clipped=0.0 +2023-02-07 09:47:52,601 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203515.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:47:54,406 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 09:47:57,076 INFO [train.py:901] (3/4) Epoch 26, batch 1450, loss[loss=0.1948, simple_loss=0.2851, pruned_loss=0.05226, over 8250.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2837, pruned_loss=0.05835, over 1622426.29 frames. ], batch size: 24, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:01,590 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-07 09:48:10,648 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6799, 2.0623, 3.1423, 1.5447, 2.2281, 2.1883, 1.7631, 2.4594], + device='cuda:3'), covar=tensor([0.1866, 0.2548, 0.0846, 0.4724, 0.2003, 0.3070, 0.2351, 0.2096], + device='cuda:3'), in_proj_covar=tensor([0.0533, 0.0623, 0.0554, 0.0659, 0.0654, 0.0602, 0.0552, 0.0637], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 09:48:18,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0171, 1.8597, 2.3658, 1.9911, 2.2687, 2.1178, 1.9113, 1.1056], + device='cuda:3'), covar=tensor([0.5706, 0.4929, 0.2046, 0.3797, 0.2463, 0.3268, 0.1979, 0.5467], + device='cuda:3'), in_proj_covar=tensor([0.0962, 0.1018, 0.0830, 0.0988, 0.1025, 0.0926, 0.0772, 0.0849], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 09:48:24,036 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:48:24,762 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203563.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:48:31,364 INFO [train.py:901] (3/4) Epoch 26, batch 1500, loss[loss=0.2772, simple_loss=0.3434, pruned_loss=0.1055, over 6593.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05847, over 1618024.95 frames. ], batch size: 71, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:48:33,569 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-07 09:48:42,961 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203588.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:48:56,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.262e+02 2.668e+02 3.517e+02 8.500e+02, threshold=5.335e+02, percent-clipped=2.0 +2023-02-07 09:49:06,820 INFO [train.py:901] (3/4) Epoch 26, batch 1550, loss[loss=0.2107, simple_loss=0.2948, pruned_loss=0.06333, over 8511.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2832, pruned_loss=0.05873, over 1619165.43 frames. ], batch size: 49, lr: 2.92e-03, grad_scale: 4.0 +2023-02-07 09:49:15,916 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6470, 1.9878, 2.0458, 1.2893, 2.1271, 1.5870, 0.5533, 1.8963], + device='cuda:3'), covar=tensor([0.0628, 0.0386, 0.0316, 0.0697, 0.0384, 0.0848, 0.0913, 0.0343], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0405, 0.0359, 0.0457, 0.0392, 0.0545, 0.0399, 0.0432], + device='cuda:3'), out_proj_covar=tensor([1.2384e-04, 1.0548e-04, 9.3703e-05, 1.1969e-04, 1.0256e-04, 1.5250e-04, + 1.0683e-04, 1.1347e-04], device='cuda:3') +2023-02-07 09:49:40,410 INFO [train.py:901] (3/4) Epoch 26, batch 1600, loss[loss=0.2415, simple_loss=0.3186, pruned_loss=0.08226, over 8452.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05913, over 1619959.68 frames. ], batch size: 27, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:05,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.410e+02 3.021e+02 3.901e+02 1.362e+03, threshold=6.042e+02, percent-clipped=8.0 +2023-02-07 09:50:15,007 INFO [train.py:901] (3/4) Epoch 26, batch 1650, loss[loss=0.21, simple_loss=0.2823, pruned_loss=0.06886, over 7777.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2843, pruned_loss=0.05907, over 1618018.95 frames. ], batch size: 19, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:44,787 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:48,721 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203771.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:50:49,155 INFO [train.py:901] (3/4) Epoch 26, batch 1700, loss[loss=0.1666, simple_loss=0.2429, pruned_loss=0.04514, over 7441.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2828, pruned_loss=0.05805, over 1618271.00 frames. ], batch size: 17, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:50:57,400 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203784.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:05,472 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203796.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:14,297 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.478e+02 3.017e+02 3.791e+02 8.735e+02, threshold=6.035e+02, percent-clipped=4.0 +2023-02-07 09:51:20,893 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=203818.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:23,317 INFO [train.py:901] (3/4) Epoch 26, batch 1750, loss[loss=0.1883, simple_loss=0.281, pruned_loss=0.04781, over 8583.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2842, pruned_loss=0.05904, over 1617409.12 frames. ], batch size: 34, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:51:28,102 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=203829.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:38,328 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=203843.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:54,076 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:51:58,819 INFO [train.py:901] (3/4) Epoch 26, batch 1800, loss[loss=0.171, simple_loss=0.2641, pruned_loss=0.03895, over 8110.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.05858, over 1618199.15 frames. ], batch size: 23, lr: 2.92e-03, grad_scale: 8.0 +2023-02-07 09:52:11,901 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2085, 1.4669, 1.7112, 1.3857, 1.0831, 1.5770, 1.9889, 1.7846], + device='cuda:3'), covar=tensor([0.0557, 0.1560, 0.2080, 0.1774, 0.0724, 0.1778, 0.0773, 0.0702], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0160, 0.0099, 0.0162, 0.0112, 0.0144], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:52:14,504 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8756, 1.4764, 3.4147, 1.5526, 2.3769, 3.8479, 3.9485, 3.3229], + device='cuda:3'), covar=tensor([0.1265, 0.1938, 0.0337, 0.2110, 0.1096, 0.0236, 0.0449, 0.0536], + device='cuda:3'), in_proj_covar=tensor([0.0301, 0.0321, 0.0288, 0.0317, 0.0316, 0.0273, 0.0431, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 09:52:23,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.441e+02 2.799e+02 3.336e+02 4.977e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 09:52:29,701 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=203918.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:52:32,265 INFO [train.py:901] (3/4) Epoch 26, batch 1850, loss[loss=0.194, simple_loss=0.2781, pruned_loss=0.05498, over 7922.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2831, pruned_loss=0.05853, over 1617414.03 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:52:47,219 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 09:52:47,700 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=203944.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:53:07,704 INFO [train.py:901] (3/4) Epoch 26, batch 1900, loss[loss=0.1973, simple_loss=0.2785, pruned_loss=0.05801, over 8519.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2835, pruned_loss=0.05897, over 1617997.52 frames. ], batch size: 28, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:33,465 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.507e+02 3.073e+02 4.108e+02 9.647e+02, threshold=6.146e+02, percent-clipped=9.0 +2023-02-07 09:53:36,910 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 09:53:42,921 INFO [train.py:901] (3/4) Epoch 26, batch 1950, loss[loss=0.1995, simple_loss=0.2805, pruned_loss=0.05922, over 7427.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2844, pruned_loss=0.05927, over 1621727.03 frames. ], batch size: 17, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:53:49,444 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 09:54:07,164 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 09:54:17,189 INFO [train.py:901] (3/4) Epoch 26, batch 2000, loss[loss=0.1977, simple_loss=0.2848, pruned_loss=0.05533, over 8322.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05886, over 1619316.57 frames. ], batch size: 25, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:34,347 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204095.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:43,733 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.388e+02 3.050e+02 3.690e+02 7.171e+02, threshold=6.101e+02, percent-clipped=4.0 +2023-02-07 09:54:44,461 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:54:53,066 INFO [train.py:901] (3/4) Epoch 26, batch 2050, loss[loss=0.2143, simple_loss=0.3025, pruned_loss=0.063, over 8103.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.0579, over 1619205.81 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:54:57,142 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204128.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:26,604 INFO [train.py:901] (3/4) Epoch 26, batch 2100, loss[loss=0.1594, simple_loss=0.2469, pruned_loss=0.03597, over 7915.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2833, pruned_loss=0.05772, over 1618180.82 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:55:29,666 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7254, 1.7009, 1.7872, 1.6406, 0.8962, 1.7066, 2.1316, 1.9364], + device='cuda:3'), covar=tensor([0.0447, 0.1240, 0.1649, 0.1397, 0.0616, 0.1470, 0.0666, 0.0613], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0100, 0.0164, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 09:55:33,650 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204181.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:47,793 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:52,931 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 2.312e+02 2.797e+02 3.552e+02 6.063e+02, threshold=5.595e+02, percent-clipped=0.0 +2023-02-07 09:55:53,728 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204209.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:55:56,894 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-07 09:56:02,441 INFO [train.py:901] (3/4) Epoch 26, batch 2150, loss[loss=0.1812, simple_loss=0.2577, pruned_loss=0.05231, over 7808.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2832, pruned_loss=0.0575, over 1618400.78 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:56:03,969 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:04,624 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:17,182 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:56:29,894 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204262.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 09:56:36,283 INFO [train.py:901] (3/4) Epoch 26, batch 2200, loss[loss=0.232, simple_loss=0.3086, pruned_loss=0.07769, over 8747.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2829, pruned_loss=0.05744, over 1620304.77 frames. ], batch size: 30, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:01,394 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.362e+02 3.099e+02 4.074e+02 1.599e+03, threshold=6.197e+02, percent-clipped=8.0 +2023-02-07 09:57:11,827 INFO [train.py:901] (3/4) Epoch 26, batch 2250, loss[loss=0.1872, simple_loss=0.2793, pruned_loss=0.0476, over 8486.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2836, pruned_loss=0.05775, over 1617540.03 frames. ], batch size: 28, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:13,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:57:46,492 INFO [train.py:901] (3/4) Epoch 26, batch 2300, loss[loss=0.1518, simple_loss=0.238, pruned_loss=0.03279, over 6412.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05787, over 1609012.79 frames. ], batch size: 14, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:57:50,096 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204377.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 09:58:10,722 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.304e+02 2.813e+02 3.713e+02 7.684e+02, threshold=5.626e+02, percent-clipped=3.0 +2023-02-07 09:58:21,053 INFO [train.py:901] (3/4) Epoch 26, batch 2350, loss[loss=0.2124, simple_loss=0.2842, pruned_loss=0.07031, over 7811.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2843, pruned_loss=0.0589, over 1611182.79 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:58:33,863 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204439.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:58:57,208 INFO [train.py:901] (3/4) Epoch 26, batch 2400, loss[loss=0.1925, simple_loss=0.2894, pruned_loss=0.04776, over 8188.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2852, pruned_loss=0.0591, over 1615371.30 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:02,881 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204480.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:16,088 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204499.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:20,395 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:22,322 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.551e+02 2.904e+02 3.805e+02 7.023e+02, threshold=5.807e+02, percent-clipped=3.0 +2023-02-07 09:59:32,142 INFO [train.py:901] (3/4) Epoch 26, batch 2450, loss[loss=0.2057, simple_loss=0.2666, pruned_loss=0.07235, over 8038.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2842, pruned_loss=0.05871, over 1618969.96 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 09:59:33,746 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204524.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:34,344 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=204525.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 09:59:56,604 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:08,716 INFO [train.py:901] (3/4) Epoch 26, batch 2500, loss[loss=0.212, simple_loss=0.2851, pruned_loss=0.06942, over 5582.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.0582, over 1616646.55 frames. ], batch size: 12, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:15,148 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:25,202 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1299, 3.6762, 2.3664, 2.9687, 2.9299, 2.0909, 2.9921, 3.0343], + device='cuda:3'), covar=tensor([0.1767, 0.0366, 0.1120, 0.0727, 0.0747, 0.1585, 0.0977, 0.1044], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0240, 0.0341, 0.0312, 0.0303, 0.0346, 0.0348, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 10:00:31,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204605.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:00:33,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.483e+02 3.074e+02 3.585e+02 8.993e+02, threshold=6.148e+02, percent-clipped=7.0 +2023-02-07 10:00:43,170 INFO [train.py:901] (3/4) Epoch 26, batch 2550, loss[loss=0.1821, simple_loss=0.2692, pruned_loss=0.0475, over 8082.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05856, over 1620476.72 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:00:50,560 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204633.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:00:55,141 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=204640.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:01:07,860 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204658.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 10:01:18,448 INFO [train.py:901] (3/4) Epoch 26, batch 2600, loss[loss=0.1823, simple_loss=0.2598, pruned_loss=0.05243, over 7694.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2839, pruned_loss=0.05881, over 1619574.47 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:01:43,342 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.840e+02 2.465e+02 3.094e+02 3.874e+02 9.576e+02, threshold=6.187e+02, percent-clipped=4.0 +2023-02-07 10:01:52,895 INFO [train.py:901] (3/4) Epoch 26, batch 2650, loss[loss=0.2051, simple_loss=0.2929, pruned_loss=0.05862, over 8033.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2834, pruned_loss=0.0585, over 1620715.57 frames. ], batch size: 22, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:14,670 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 10:02:28,028 INFO [train.py:901] (3/4) Epoch 26, batch 2700, loss[loss=0.1833, simple_loss=0.2554, pruned_loss=0.05567, over 7542.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2843, pruned_loss=0.05912, over 1622256.26 frames. ], batch size: 18, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:02:53,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.711e+02 2.359e+02 2.865e+02 3.674e+02 6.992e+02, threshold=5.730e+02, percent-clipped=1.0 +2023-02-07 10:02:55,450 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:04,002 INFO [train.py:901] (3/4) Epoch 26, batch 2750, loss[loss=0.2118, simple_loss=0.3012, pruned_loss=0.0612, over 8663.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.284, pruned_loss=0.05907, over 1617549.62 frames. ], batch size: 39, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:13,287 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204835.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:16,024 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=204839.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:03:38,123 INFO [train.py:901] (3/4) Epoch 26, batch 2800, loss[loss=0.2317, simple_loss=0.3096, pruned_loss=0.07694, over 7810.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05875, over 1615766.53 frames. ], batch size: 20, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:03:56,005 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=204896.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:04,627 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.285e+02 3.108e+02 3.828e+02 9.944e+02, threshold=6.216e+02, percent-clipped=6.0 +2023-02-07 10:04:09,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6721, 1.4857, 4.8639, 1.9238, 4.2599, 4.0229, 4.3458, 4.2878], + device='cuda:3'), covar=tensor([0.0615, 0.5345, 0.0461, 0.4302, 0.1187, 0.0991, 0.0620, 0.0708], + device='cuda:3'), in_proj_covar=tensor([0.0666, 0.0664, 0.0731, 0.0651, 0.0745, 0.0630, 0.0631, 0.0707], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:04:13,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=204921.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:04:14,419 INFO [train.py:901] (3/4) Epoch 26, batch 2850, loss[loss=0.2071, simple_loss=0.2839, pruned_loss=0.06513, over 8356.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2837, pruned_loss=0.05856, over 1618140.95 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:04:48,525 INFO [train.py:901] (3/4) Epoch 26, batch 2900, loss[loss=0.1953, simple_loss=0.2714, pruned_loss=0.05959, over 8193.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.284, pruned_loss=0.05906, over 1616061.79 frames. ], batch size: 23, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:13,380 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.393e+02 3.052e+02 3.991e+02 9.487e+02, threshold=6.105e+02, percent-clipped=5.0 +2023-02-07 10:05:20,487 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 10:05:23,875 INFO [train.py:901] (3/4) Epoch 26, batch 2950, loss[loss=0.1858, simple_loss=0.2699, pruned_loss=0.05085, over 8096.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2839, pruned_loss=0.05845, over 1617167.50 frames. ], batch size: 21, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:24,057 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0973, 1.6200, 3.3945, 1.6451, 2.6531, 3.7091, 3.8485, 3.1979], + device='cuda:3'), covar=tensor([0.1140, 0.1729, 0.0334, 0.1973, 0.0921, 0.0239, 0.0590, 0.0517], + device='cuda:3'), in_proj_covar=tensor([0.0302, 0.0326, 0.0289, 0.0318, 0.0321, 0.0275, 0.0434, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:05:54,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5980, 1.5612, 2.0818, 1.3120, 1.1887, 2.0643, 0.3153, 1.2980], + device='cuda:3'), covar=tensor([0.1406, 0.1248, 0.0349, 0.1045, 0.2412, 0.0379, 0.1896, 0.1237], + device='cuda:3'), in_proj_covar=tensor([0.0196, 0.0203, 0.0131, 0.0222, 0.0276, 0.0143, 0.0172, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 10:05:58,487 INFO [train.py:901] (3/4) Epoch 26, batch 3000, loss[loss=0.1619, simple_loss=0.2429, pruned_loss=0.04047, over 7646.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.05871, over 1614401.94 frames. ], batch size: 19, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:05:58,488 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 10:06:11,416 INFO [train.py:935] (3/4) Epoch 26, validation: loss=0.1716, simple_loss=0.2713, pruned_loss=0.03593, over 944034.00 frames. +2023-02-07 10:06:11,417 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 10:06:20,127 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-07 10:06:36,708 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.246e+02 2.785e+02 3.735e+02 7.523e+02, threshold=5.571e+02, percent-clipped=3.0 +2023-02-07 10:06:46,004 INFO [train.py:901] (3/4) Epoch 26, batch 3050, loss[loss=0.186, simple_loss=0.2721, pruned_loss=0.04992, over 8243.00 frames. ], tot_loss[loss=0.2025, simple_loss=0.2856, pruned_loss=0.05972, over 1611666.33 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:06:49,445 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205127.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:22,789 INFO [train.py:901] (3/4) Epoch 26, batch 3100, loss[loss=0.21, simple_loss=0.3084, pruned_loss=0.05584, over 8248.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2844, pruned_loss=0.05922, over 1609008.59 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:07:26,346 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6888, 1.3161, 4.9125, 1.9676, 4.4346, 4.0520, 4.4343, 4.3356], + device='cuda:3'), covar=tensor([0.0584, 0.4987, 0.0438, 0.3991, 0.0980, 0.0866, 0.0514, 0.0622], + device='cuda:3'), in_proj_covar=tensor([0.0666, 0.0663, 0.0730, 0.0652, 0.0743, 0.0630, 0.0631, 0.0708], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:07:30,228 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205183.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:48,156 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.327e+02 2.997e+02 4.038e+02 1.256e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 10:07:53,695 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205216.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:07:53,941 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 10:07:57,549 INFO [train.py:901] (3/4) Epoch 26, batch 3150, loss[loss=0.197, simple_loss=0.2827, pruned_loss=0.0556, over 8245.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05892, over 1612641.67 frames. ], batch size: 24, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:33,477 INFO [train.py:901] (3/4) Epoch 26, batch 3200, loss[loss=0.2438, simple_loss=0.3143, pruned_loss=0.08664, over 7042.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2846, pruned_loss=0.05938, over 1616188.36 frames. ], batch size: 72, lr: 2.91e-03, grad_scale: 8.0 +2023-02-07 10:08:52,245 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:08:58,818 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.527e+02 3.010e+02 3.735e+02 6.895e+02, threshold=6.021e+02, percent-clipped=2.0 +2023-02-07 10:09:09,102 INFO [train.py:901] (3/4) Epoch 26, batch 3250, loss[loss=0.1821, simple_loss=0.268, pruned_loss=0.04804, over 7807.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2845, pruned_loss=0.05908, over 1612776.60 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:09:18,634 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4850, 2.0778, 2.2240, 2.0423, 1.4607, 2.0947, 2.3831, 2.1916], + device='cuda:3'), covar=tensor([0.0535, 0.0887, 0.1227, 0.1117, 0.0632, 0.1080, 0.0652, 0.0488], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0160, 0.0100, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 10:09:40,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5947, 2.0848, 3.2324, 1.4982, 2.4925, 2.0029, 1.7204, 2.4863], + device='cuda:3'), covar=tensor([0.1961, 0.2622, 0.0854, 0.4594, 0.1932, 0.3269, 0.2460, 0.2257], + device='cuda:3'), in_proj_covar=tensor([0.0534, 0.0622, 0.0555, 0.0657, 0.0654, 0.0602, 0.0553, 0.0635], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:09:43,188 INFO [train.py:901] (3/4) Epoch 26, batch 3300, loss[loss=0.1668, simple_loss=0.245, pruned_loss=0.04435, over 7198.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2836, pruned_loss=0.05896, over 1610367.17 frames. ], batch size: 16, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:10,314 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.313e+02 2.653e+02 3.358e+02 9.214e+02, threshold=5.305e+02, percent-clipped=4.0 +2023-02-07 10:10:17,141 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 10:10:20,066 INFO [train.py:901] (3/4) Epoch 26, batch 3350, loss[loss=0.2812, simple_loss=0.349, pruned_loss=0.1067, over 8650.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05926, over 1606461.58 frames. ], batch size: 34, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:10:34,608 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3630, 1.7093, 2.6412, 1.2983, 1.9247, 1.8790, 1.4357, 1.9871], + device='cuda:3'), covar=tensor([0.2281, 0.2832, 0.1038, 0.5323, 0.2128, 0.3427, 0.2794, 0.2421], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0627, 0.0559, 0.0663, 0.0659, 0.0606, 0.0558, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:10:54,105 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205471.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:10:54,722 INFO [train.py:901] (3/4) Epoch 26, batch 3400, loss[loss=0.1881, simple_loss=0.2656, pruned_loss=0.05527, over 7412.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2842, pruned_loss=0.05953, over 1607466.57 frames. ], batch size: 17, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:20,313 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.409e+02 2.883e+02 3.635e+02 7.106e+02, threshold=5.767e+02, percent-clipped=3.0 +2023-02-07 10:11:30,470 INFO [train.py:901] (3/4) Epoch 26, batch 3450, loss[loss=0.1765, simple_loss=0.2638, pruned_loss=0.04456, over 8499.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.283, pruned_loss=0.05901, over 1606453.28 frames. ], batch size: 28, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:11:53,164 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205554.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:11:57,170 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=205560.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:05,236 INFO [train.py:901] (3/4) Epoch 26, batch 3500, loss[loss=0.1814, simple_loss=0.2691, pruned_loss=0.04684, over 7917.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2829, pruned_loss=0.05911, over 1608141.14 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:12:10,378 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205579.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:15,150 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205586.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:12:24,736 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 10:12:30,113 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.208e+02 2.714e+02 3.358e+02 5.744e+02, threshold=5.428e+02, percent-clipped=0.0 +2023-02-07 10:12:35,814 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-07 10:12:39,511 INFO [train.py:901] (3/4) Epoch 26, batch 3550, loss[loss=0.2019, simple_loss=0.2821, pruned_loss=0.06084, over 8533.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2834, pruned_loss=0.05905, over 1610807.81 frames. ], batch size: 39, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:15,379 INFO [train.py:901] (3/4) Epoch 26, batch 3600, loss[loss=0.1777, simple_loss=0.265, pruned_loss=0.04525, over 8032.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2834, pruned_loss=0.05874, over 1612403.94 frames. ], batch size: 22, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:13:16,213 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205673.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:17,486 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=205675.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:13:19,668 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.46 vs. limit=2.0 +2023-02-07 10:13:20,867 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7875, 1.4905, 3.1625, 1.4907, 2.3004, 3.4191, 3.5543, 2.8975], + device='cuda:3'), covar=tensor([0.1235, 0.1749, 0.0353, 0.2078, 0.0943, 0.0271, 0.0628, 0.0617], + device='cuda:3'), in_proj_covar=tensor([0.0304, 0.0328, 0.0291, 0.0320, 0.0322, 0.0277, 0.0437, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:13:39,661 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.295e+02 2.882e+02 3.730e+02 8.207e+02, threshold=5.763e+02, percent-clipped=6.0 +2023-02-07 10:13:49,104 INFO [train.py:901] (3/4) Epoch 26, batch 3650, loss[loss=0.191, simple_loss=0.2826, pruned_loss=0.04967, over 8272.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05815, over 1612671.49 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:18,433 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205762.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:14:24,803 INFO [train.py:901] (3/4) Epoch 26, batch 3700, loss[loss=0.2087, simple_loss=0.2977, pruned_loss=0.05984, over 8470.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2832, pruned_loss=0.05842, over 1614886.55 frames. ], batch size: 25, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:14:27,593 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:14:49,668 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 2.377e+02 2.968e+02 3.727e+02 1.221e+03, threshold=5.937e+02, percent-clipped=5.0 +2023-02-07 10:14:59,197 INFO [train.py:901] (3/4) Epoch 26, batch 3750, loss[loss=0.1951, simple_loss=0.2785, pruned_loss=0.05585, over 7803.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05813, over 1612057.55 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:12,917 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205842.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:31,310 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205867.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:15:34,018 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1962, 1.0706, 1.2958, 1.0381, 0.9956, 1.3326, 0.0517, 0.8846], + device='cuda:3'), covar=tensor([0.1443, 0.1272, 0.0529, 0.0659, 0.2287, 0.0523, 0.1881, 0.1225], + device='cuda:3'), in_proj_covar=tensor([0.0196, 0.0202, 0.0132, 0.0221, 0.0274, 0.0144, 0.0171, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 10:15:34,433 INFO [train.py:901] (3/4) Epoch 26, batch 3800, loss[loss=0.2272, simple_loss=0.3039, pruned_loss=0.0752, over 8094.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.05793, over 1612340.34 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:15:59,218 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.381e+02 2.847e+02 3.364e+02 6.986e+02, threshold=5.694e+02, percent-clipped=1.0 +2023-02-07 10:15:59,380 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=205908.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:08,813 INFO [train.py:901] (3/4) Epoch 26, batch 3850, loss[loss=0.2481, simple_loss=0.3187, pruned_loss=0.08878, over 6990.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2813, pruned_loss=0.05757, over 1605159.60 frames. ], batch size: 72, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:16:15,137 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=205931.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:20,592 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7045, 1.4683, 2.8164, 1.2651, 2.1774, 3.0153, 3.2478, 2.5609], + device='cuda:3'), covar=tensor([0.1305, 0.1783, 0.0403, 0.2411, 0.0957, 0.0343, 0.0624, 0.0657], + device='cuda:3'), in_proj_covar=tensor([0.0305, 0.0328, 0.0291, 0.0321, 0.0322, 0.0279, 0.0438, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:16:23,271 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5822, 2.1291, 3.1757, 1.4104, 2.3860, 1.9809, 1.7477, 2.4972], + device='cuda:3'), covar=tensor([0.2093, 0.2839, 0.1003, 0.4838, 0.2174, 0.3466, 0.2454, 0.2484], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0626, 0.0558, 0.0662, 0.0657, 0.0605, 0.0557, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:16:29,172 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 10:16:32,102 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=205956.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:16:42,797 INFO [train.py:901] (3/4) Epoch 26, batch 3900, loss[loss=0.1909, simple_loss=0.2623, pruned_loss=0.05977, over 7420.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2817, pruned_loss=0.05764, over 1607438.50 frames. ], batch size: 17, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:09,995 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.441e+02 2.892e+02 3.706e+02 7.796e+02, threshold=5.785e+02, percent-clipped=3.0 +2023-02-07 10:17:16,700 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206017.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:17:18,467 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-07 10:17:20,038 INFO [train.py:901] (3/4) Epoch 26, batch 3950, loss[loss=0.2066, simple_loss=0.2978, pruned_loss=0.05764, over 8508.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2815, pruned_loss=0.05741, over 1608144.69 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:53,915 INFO [train.py:901] (3/4) Epoch 26, batch 4000, loss[loss=0.1699, simple_loss=0.2558, pruned_loss=0.04197, over 7541.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2821, pruned_loss=0.05788, over 1606292.74 frames. ], batch size: 18, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:17:55,452 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206074.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:18,675 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206106.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:18:19,947 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.865e+02 2.407e+02 2.986e+02 3.556e+02 8.558e+02, threshold=5.971e+02, percent-clipped=6.0 +2023-02-07 10:18:23,572 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8433, 1.6026, 3.4688, 1.6266, 2.4373, 3.7911, 3.9112, 3.2710], + device='cuda:3'), covar=tensor([0.1321, 0.1892, 0.0299, 0.2092, 0.1071, 0.0221, 0.0434, 0.0532], + device='cuda:3'), in_proj_covar=tensor([0.0303, 0.0326, 0.0291, 0.0320, 0.0321, 0.0278, 0.0437, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:18:26,939 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7493, 1.4962, 4.9204, 1.9067, 4.4671, 4.1298, 4.4864, 4.3241], + device='cuda:3'), covar=tensor([0.0533, 0.4474, 0.0422, 0.3939, 0.0874, 0.0817, 0.0448, 0.0596], + device='cuda:3'), in_proj_covar=tensor([0.0659, 0.0655, 0.0721, 0.0644, 0.0734, 0.0620, 0.0621, 0.0696], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:18:29,517 INFO [train.py:901] (3/4) Epoch 26, batch 4050, loss[loss=0.2178, simple_loss=0.2935, pruned_loss=0.07101, over 8341.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.282, pruned_loss=0.0579, over 1605860.93 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:18:37,178 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206132.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:03,795 INFO [train.py:901] (3/4) Epoch 26, batch 4100, loss[loss=0.1465, simple_loss=0.2242, pruned_loss=0.0344, over 7430.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05815, over 1607606.75 frames. ], batch size: 17, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:28,869 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.590e+02 2.376e+02 2.755e+02 3.418e+02 9.873e+02, threshold=5.510e+02, percent-clipped=4.0 +2023-02-07 10:19:31,076 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0154, 1.4538, 1.7690, 1.3808, 1.0314, 1.5107, 1.8456, 1.5225], + device='cuda:3'), covar=tensor([0.0502, 0.1265, 0.1668, 0.1445, 0.0563, 0.1400, 0.0645, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0097, 0.0153, 0.0189, 0.0160, 0.0100, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 10:19:35,179 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206215.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,253 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206221.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:19:39,709 INFO [train.py:901] (3/4) Epoch 26, batch 4150, loss[loss=0.1844, simple_loss=0.2654, pruned_loss=0.05169, over 7803.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2819, pruned_loss=0.05767, over 1605497.28 frames. ], batch size: 19, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:19:52,402 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.34 vs. limit=5.0 +2023-02-07 10:20:00,473 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 10:20:00,792 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206252.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:20:10,914 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9107, 3.8136, 3.5168, 2.4486, 3.4081, 3.5534, 3.4663, 3.4046], + device='cuda:3'), covar=tensor([0.0831, 0.0718, 0.1049, 0.3607, 0.0913, 0.1170, 0.1387, 0.0883], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0455, 0.0441, 0.0553, 0.0436, 0.0459, 0.0436, 0.0400], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:20:14,202 INFO [train.py:901] (3/4) Epoch 26, batch 4200, loss[loss=0.2404, simple_loss=0.313, pruned_loss=0.08387, over 7155.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2833, pruned_loss=0.05851, over 1604348.62 frames. ], batch size: 73, lr: 2.90e-03, grad_scale: 16.0 +2023-02-07 10:20:22,970 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 10:20:38,388 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.475e+02 2.335e+02 2.968e+02 3.755e+02 9.805e+02, threshold=5.936e+02, percent-clipped=3.0 +2023-02-07 10:20:44,918 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 10:20:49,136 INFO [train.py:901] (3/4) Epoch 26, batch 4250, loss[loss=0.1875, simple_loss=0.274, pruned_loss=0.05053, over 7927.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.284, pruned_loss=0.05883, over 1607991.61 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:03,041 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0792, 1.2298, 1.1906, 0.8119, 1.2339, 1.0341, 0.0733, 1.2030], + device='cuda:3'), covar=tensor([0.0447, 0.0420, 0.0394, 0.0603, 0.0492, 0.1106, 0.0964, 0.0368], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0409, 0.0364, 0.0461, 0.0395, 0.0555, 0.0406, 0.0440], + device='cuda:3'), out_proj_covar=tensor([1.2541e-04, 1.0671e-04, 9.5195e-05, 1.2081e-04, 1.0329e-04, 1.5543e-04, + 1.0858e-04, 1.1577e-04], device='cuda:3') +2023-02-07 10:21:21,331 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206367.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:24,640 INFO [train.py:901] (3/4) Epoch 26, batch 4300, loss[loss=0.1766, simple_loss=0.2674, pruned_loss=0.04293, over 8285.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2828, pruned_loss=0.05801, over 1607951.56 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:21:26,011 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1860, 3.1359, 2.8919, 1.6363, 2.7418, 2.9201, 2.8417, 2.8132], + device='cuda:3'), covar=tensor([0.1197, 0.0850, 0.1366, 0.4868, 0.1352, 0.1627, 0.1587, 0.1106], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0457, 0.0443, 0.0556, 0.0440, 0.0462, 0.0439, 0.0402], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:21:26,056 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2403, 1.2340, 3.3552, 1.1525, 2.9509, 2.8132, 3.0727, 2.9669], + device='cuda:3'), covar=tensor([0.0860, 0.4641, 0.0856, 0.4275, 0.1362, 0.1104, 0.0808, 0.0925], + device='cuda:3'), in_proj_covar=tensor([0.0661, 0.0655, 0.0722, 0.0646, 0.0735, 0.0622, 0.0623, 0.0696], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:21:35,889 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206388.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:50,295 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.331e+02 2.890e+02 3.800e+02 6.492e+02, threshold=5.781e+02, percent-clipped=2.0 +2023-02-07 10:21:53,279 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206413.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:56,607 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206418.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:21:59,346 INFO [train.py:901] (3/4) Epoch 26, batch 4350, loss[loss=0.1715, simple_loss=0.2535, pruned_loss=0.04473, over 7918.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05789, over 1607868.34 frames. ], batch size: 20, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:18,983 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 10:22:20,066 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-07 10:22:20,824 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-07 10:22:32,200 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2861, 3.2084, 2.9825, 1.6508, 2.8906, 2.8746, 2.8587, 2.7472], + device='cuda:3'), covar=tensor([0.1067, 0.0799, 0.1224, 0.4465, 0.1163, 0.1258, 0.1669, 0.1131], + device='cuda:3'), in_proj_covar=tensor([0.0536, 0.0454, 0.0439, 0.0552, 0.0435, 0.0457, 0.0434, 0.0399], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:22:34,794 INFO [train.py:901] (3/4) Epoch 26, batch 4400, loss[loss=0.177, simple_loss=0.2539, pruned_loss=0.0501, over 8084.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05829, over 1608619.59 frames. ], batch size: 21, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:22:36,358 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3580, 1.6302, 1.6105, 1.0822, 1.6853, 1.3179, 0.3114, 1.6042], + device='cuda:3'), covar=tensor([0.0540, 0.0403, 0.0348, 0.0576, 0.0447, 0.0956, 0.0989, 0.0306], + device='cuda:3'), in_proj_covar=tensor([0.0468, 0.0405, 0.0361, 0.0457, 0.0392, 0.0550, 0.0401, 0.0436], + device='cuda:3'), out_proj_covar=tensor([1.2424e-04, 1.0562e-04, 9.4419e-05, 1.1984e-04, 1.0259e-04, 1.5377e-04, + 1.0723e-04, 1.1472e-04], device='cuda:3') +2023-02-07 10:22:38,337 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206477.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:22:55,679 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206502.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:00,162 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.619e+02 3.000e+02 3.925e+02 8.429e+02, threshold=6.000e+02, percent-clipped=7.0 +2023-02-07 10:23:00,191 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 10:23:04,479 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.54 vs. limit=2.0 +2023-02-07 10:23:08,814 INFO [train.py:901] (3/4) Epoch 26, batch 4450, loss[loss=0.1832, simple_loss=0.2779, pruned_loss=0.04428, over 8349.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05817, over 1613244.77 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:16,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206533.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:34,156 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=206559.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:23:44,259 INFO [train.py:901] (3/4) Epoch 26, batch 4500, loss[loss=0.176, simple_loss=0.2656, pruned_loss=0.04318, over 8101.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05784, over 1616447.61 frames. ], batch size: 23, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:23:55,209 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 10:24:07,630 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8055, 1.4304, 3.9757, 1.3893, 3.5111, 3.3092, 3.5834, 3.4618], + device='cuda:3'), covar=tensor([0.0758, 0.4750, 0.0691, 0.4676, 0.1269, 0.1070, 0.0760, 0.0805], + device='cuda:3'), in_proj_covar=tensor([0.0662, 0.0655, 0.0723, 0.0648, 0.0735, 0.0623, 0.0623, 0.0697], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:24:10,074 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.435e+02 2.363e+02 2.961e+02 3.499e+02 6.135e+02, threshold=5.921e+02, percent-clipped=1.0 +2023-02-07 10:24:18,689 INFO [train.py:901] (3/4) Epoch 26, batch 4550, loss[loss=0.2009, simple_loss=0.2907, pruned_loss=0.05552, over 8502.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05784, over 1613517.63 frames. ], batch size: 26, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:19,502 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206623.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:35,787 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206648.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:24:51,534 INFO [train.py:901] (3/4) Epoch 26, batch 4600, loss[loss=0.1723, simple_loss=0.2604, pruned_loss=0.04208, over 8248.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2832, pruned_loss=0.05817, over 1611670.38 frames. ], batch size: 24, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:24:53,054 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=206674.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:25:01,031 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1585, 4.0759, 3.7570, 1.8330, 3.6205, 3.7582, 3.7882, 3.5729], + device='cuda:3'), covar=tensor([0.0775, 0.0560, 0.1038, 0.4874, 0.0941, 0.1000, 0.1258, 0.0801], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0452, 0.0438, 0.0550, 0.0434, 0.0456, 0.0434, 0.0399], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:25:18,502 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.466e+02 2.342e+02 2.811e+02 3.625e+02 9.770e+02, threshold=5.622e+02, percent-clipped=5.0 +2023-02-07 10:25:21,726 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-07 10:25:23,276 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-07 10:25:28,319 INFO [train.py:901] (3/4) Epoch 26, batch 4650, loss[loss=0.2478, simple_loss=0.3336, pruned_loss=0.08095, over 8421.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05805, over 1615313.35 frames. ], batch size: 49, lr: 2.90e-03, grad_scale: 8.0 +2023-02-07 10:25:40,975 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-07 10:26:02,069 INFO [train.py:901] (3/4) Epoch 26, batch 4700, loss[loss=0.1821, simple_loss=0.2732, pruned_loss=0.04547, over 8089.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05804, over 1611766.58 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:08,426 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3997, 1.6889, 1.6604, 1.1293, 1.6784, 1.4004, 0.2480, 1.6040], + device='cuda:3'), covar=tensor([0.0656, 0.0426, 0.0386, 0.0641, 0.0556, 0.1110, 0.1017, 0.0368], + device='cuda:3'), in_proj_covar=tensor([0.0466, 0.0402, 0.0360, 0.0455, 0.0389, 0.0547, 0.0399, 0.0434], + device='cuda:3'), out_proj_covar=tensor([1.2370e-04, 1.0477e-04, 9.4011e-05, 1.1921e-04, 1.0177e-04, 1.5321e-04, + 1.0686e-04, 1.1408e-04], device='cuda:3') +2023-02-07 10:26:13,620 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206789.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:28,921 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.591e+02 2.506e+02 2.890e+02 3.298e+02 6.611e+02, threshold=5.779e+02, percent-clipped=3.0 +2023-02-07 10:26:32,508 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206814.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:26:34,594 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2322, 2.0497, 2.6171, 2.2415, 2.5609, 2.3093, 2.0790, 1.4125], + device='cuda:3'), covar=tensor([0.5512, 0.4864, 0.2120, 0.3628, 0.2464, 0.3003, 0.1955, 0.5105], + device='cuda:3'), in_proj_covar=tensor([0.0952, 0.1003, 0.0824, 0.0979, 0.1014, 0.0917, 0.0764, 0.0838], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:26:37,699 INFO [train.py:901] (3/4) Epoch 26, batch 4750, loss[loss=0.2001, simple_loss=0.2849, pruned_loss=0.05766, over 8243.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05813, over 1614472.17 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:26:53,345 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 10:26:55,380 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 10:26:58,852 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=206852.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:27:00,991 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-07 10:27:12,097 INFO [train.py:901] (3/4) Epoch 26, batch 4800, loss[loss=0.1783, simple_loss=0.2695, pruned_loss=0.04355, over 8295.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05788, over 1617812.63 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:21,155 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 10:27:37,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.410e+02 2.886e+02 3.541e+02 7.542e+02, threshold=5.772e+02, percent-clipped=6.0 +2023-02-07 10:27:46,734 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 10:27:47,378 INFO [train.py:901] (3/4) Epoch 26, batch 4850, loss[loss=0.2223, simple_loss=0.2979, pruned_loss=0.07333, over 6864.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05773, over 1615536.02 frames. ], batch size: 71, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:27:52,971 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=206930.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:10,406 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=206955.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:28:21,662 INFO [train.py:901] (3/4) Epoch 26, batch 4900, loss[loss=0.1457, simple_loss=0.2255, pruned_loss=0.03296, over 7446.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2821, pruned_loss=0.05807, over 1610544.66 frames. ], batch size: 17, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:28:25,971 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-07 10:28:46,039 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.596e+02 2.535e+02 3.142e+02 3.836e+02 8.051e+02, threshold=6.285e+02, percent-clipped=2.0 +2023-02-07 10:28:55,275 INFO [train.py:901] (3/4) Epoch 26, batch 4950, loss[loss=0.219, simple_loss=0.3079, pruned_loss=0.06507, over 8585.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2831, pruned_loss=0.05858, over 1611036.40 frames. ], batch size: 31, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:04,281 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9658, 1.5412, 1.7296, 1.3719, 1.1019, 1.5243, 1.8306, 1.4391], + device='cuda:3'), covar=tensor([0.0524, 0.1249, 0.1619, 0.1454, 0.0578, 0.1429, 0.0654, 0.0650], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0189, 0.0160, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 10:29:32,338 INFO [train.py:901] (3/4) Epoch 26, batch 5000, loss[loss=0.1562, simple_loss=0.2318, pruned_loss=0.04028, over 7558.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2824, pruned_loss=0.05837, over 1606758.85 frames. ], batch size: 18, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:29:35,482 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-07 10:29:57,380 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 2.413e+02 2.985e+02 3.933e+02 1.062e+03, threshold=5.970e+02, percent-clipped=3.0 +2023-02-07 10:30:06,447 INFO [train.py:901] (3/4) Epoch 26, batch 5050, loss[loss=0.2019, simple_loss=0.2741, pruned_loss=0.06486, over 7921.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2828, pruned_loss=0.05814, over 1613572.73 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:13,973 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5961, 2.5471, 1.8323, 2.3127, 2.2084, 1.5878, 2.1073, 2.2548], + device='cuda:3'), covar=tensor([0.1522, 0.0434, 0.1242, 0.0667, 0.0781, 0.1572, 0.1059, 0.0901], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0240, 0.0341, 0.0313, 0.0302, 0.0348, 0.0349, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 10:30:24,761 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 10:30:42,639 INFO [train.py:901] (3/4) Epoch 26, batch 5100, loss[loss=0.2538, simple_loss=0.3171, pruned_loss=0.09521, over 6972.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2836, pruned_loss=0.05819, over 1617499.84 frames. ], batch size: 71, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:30:58,171 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=207194.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:30:59,446 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207196.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:31:05,086 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0394, 1.5530, 3.5985, 1.4769, 2.5073, 3.9715, 4.0467, 3.4175], + device='cuda:3'), covar=tensor([0.1138, 0.1825, 0.0329, 0.2093, 0.1010, 0.0213, 0.0538, 0.0512], + device='cuda:3'), in_proj_covar=tensor([0.0300, 0.0324, 0.0287, 0.0316, 0.0317, 0.0274, 0.0432, 0.0303], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:31:08,239 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.087e+02 2.633e+02 3.622e+02 6.552e+02, threshold=5.265e+02, percent-clipped=1.0 +2023-02-07 10:31:16,944 INFO [train.py:901] (3/4) Epoch 26, batch 5150, loss[loss=0.1744, simple_loss=0.2495, pruned_loss=0.04966, over 7215.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2834, pruned_loss=0.05807, over 1618434.66 frames. ], batch size: 16, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:31:52,794 INFO [train.py:901] (3/4) Epoch 26, batch 5200, loss[loss=0.195, simple_loss=0.276, pruned_loss=0.057, over 8290.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2833, pruned_loss=0.05827, over 1615485.12 frames. ], batch size: 23, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:10,485 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 10:32:17,988 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.620e+02 2.585e+02 3.464e+02 4.468e+02 1.375e+03, threshold=6.928e+02, percent-clipped=16.0 +2023-02-07 10:32:19,441 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207311.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:32:19,977 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 10:32:26,691 INFO [train.py:901] (3/4) Epoch 26, batch 5250, loss[loss=0.2141, simple_loss=0.3137, pruned_loss=0.05728, over 8522.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.05869, over 1614165.30 frames. ], batch size: 26, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:32:52,569 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9962, 2.0334, 1.7903, 2.2936, 1.6121, 1.7455, 2.0285, 2.0891], + device='cuda:3'), covar=tensor([0.0628, 0.0710, 0.0764, 0.0645, 0.0946, 0.1065, 0.0591, 0.0645], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0193, 0.0244, 0.0212, 0.0202, 0.0245, 0.0248, 0.0204], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 10:33:00,341 INFO [train.py:901] (3/4) Epoch 26, batch 5300, loss[loss=0.1927, simple_loss=0.2703, pruned_loss=0.05757, over 7972.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2846, pruned_loss=0.05982, over 1604382.57 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:33:27,787 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.392e+02 2.913e+02 3.782e+02 6.658e+02, threshold=5.826e+02, percent-clipped=0.0 +2023-02-07 10:33:36,853 INFO [train.py:901] (3/4) Epoch 26, batch 5350, loss[loss=0.1697, simple_loss=0.2624, pruned_loss=0.0385, over 7923.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2832, pruned_loss=0.05878, over 1606867.99 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:10,288 INFO [train.py:901] (3/4) Epoch 26, batch 5400, loss[loss=0.1991, simple_loss=0.2928, pruned_loss=0.05269, over 8034.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05875, over 1610054.56 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:37,323 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.344e+02 3.061e+02 4.157e+02 9.885e+02, threshold=6.122e+02, percent-clipped=8.0 +2023-02-07 10:34:46,158 INFO [train.py:901] (3/4) Epoch 26, batch 5450, loss[loss=0.1896, simple_loss=0.2745, pruned_loss=0.05232, over 8092.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2848, pruned_loss=0.05932, over 1608752.02 frames. ], batch size: 21, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:34:57,844 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=207538.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:06,615 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 10:35:17,363 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207567.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:20,581 INFO [train.py:901] (3/4) Epoch 26, batch 5500, loss[loss=0.2175, simple_loss=0.3018, pruned_loss=0.06657, over 8253.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2852, pruned_loss=0.05939, over 1614767.68 frames. ], batch size: 24, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:35:34,400 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207592.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:35:47,181 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.395e+02 2.975e+02 3.460e+02 7.775e+02, threshold=5.949e+02, percent-clipped=2.0 +2023-02-07 10:35:56,712 INFO [train.py:901] (3/4) Epoch 26, batch 5550, loss[loss=0.1747, simple_loss=0.256, pruned_loss=0.04665, over 7427.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.284, pruned_loss=0.05843, over 1615383.43 frames. ], batch size: 17, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:17,892 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=207653.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:36:30,415 INFO [train.py:901] (3/4) Epoch 26, batch 5600, loss[loss=0.177, simple_loss=0.2609, pruned_loss=0.04653, over 7656.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2842, pruned_loss=0.05856, over 1612670.13 frames. ], batch size: 19, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:36:55,073 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.412e+02 3.079e+02 3.750e+02 8.490e+02, threshold=6.158e+02, percent-clipped=5.0 +2023-02-07 10:37:04,607 INFO [train.py:901] (3/4) Epoch 26, batch 5650, loss[loss=0.1889, simple_loss=0.281, pruned_loss=0.04844, over 8018.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2847, pruned_loss=0.05864, over 1611575.09 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:37:12,976 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 10:37:40,854 INFO [train.py:901] (3/4) Epoch 26, batch 5700, loss[loss=0.1655, simple_loss=0.2478, pruned_loss=0.04162, over 8136.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2834, pruned_loss=0.05766, over 1612074.04 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:05,994 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.232e+02 2.912e+02 3.330e+02 6.698e+02, threshold=5.824e+02, percent-clipped=1.0 +2023-02-07 10:38:14,800 INFO [train.py:901] (3/4) Epoch 26, batch 5750, loss[loss=0.1913, simple_loss=0.2739, pruned_loss=0.05429, over 7813.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.283, pruned_loss=0.05741, over 1609687.99 frames. ], batch size: 20, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:38:16,880 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 10:38:50,532 INFO [train.py:901] (3/4) Epoch 26, batch 5800, loss[loss=0.191, simple_loss=0.2834, pruned_loss=0.0493, over 8521.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.283, pruned_loss=0.05778, over 1611191.07 frames. ], batch size: 28, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:15,995 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.614e+02 3.148e+02 4.020e+02 8.026e+02, threshold=6.297e+02, percent-clipped=4.0 +2023-02-07 10:39:16,244 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=207909.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:24,804 INFO [train.py:901] (3/4) Epoch 26, batch 5850, loss[loss=0.1993, simple_loss=0.2761, pruned_loss=0.0613, over 8240.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05687, over 1613315.53 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:39:32,992 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=207934.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:39:59,800 INFO [train.py:901] (3/4) Epoch 26, batch 5900, loss[loss=0.1855, simple_loss=0.2709, pruned_loss=0.05002, over 8135.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05749, over 1611637.15 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:15,814 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1025, 1.0132, 1.6575, 0.9797, 1.6203, 1.8166, 1.8783, 1.5642], + device='cuda:3'), covar=tensor([0.0939, 0.1264, 0.0614, 0.1644, 0.1294, 0.0354, 0.0696, 0.0549], + device='cuda:3'), in_proj_covar=tensor([0.0300, 0.0324, 0.0290, 0.0316, 0.0317, 0.0275, 0.0435, 0.0305], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 10:40:26,851 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.451e+02 2.961e+02 3.656e+02 5.483e+02, threshold=5.923e+02, percent-clipped=0.0 +2023-02-07 10:40:35,634 INFO [train.py:901] (3/4) Epoch 26, batch 5950, loss[loss=0.2277, simple_loss=0.2935, pruned_loss=0.08098, over 8028.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2838, pruned_loss=0.05836, over 1612252.51 frames. ], batch size: 22, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:40:54,934 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:41:09,815 INFO [train.py:901] (3/4) Epoch 26, batch 6000, loss[loss=0.2082, simple_loss=0.3003, pruned_loss=0.05799, over 8462.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2851, pruned_loss=0.05895, over 1617394.51 frames. ], batch size: 27, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:41:09,816 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 10:41:24,447 INFO [train.py:935] (3/4) Epoch 26, validation: loss=0.1721, simple_loss=0.2717, pruned_loss=0.03627, over 944034.00 frames. +2023-02-07 10:41:24,447 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 10:41:32,217 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8224, 2.1165, 3.6276, 1.9873, 1.7424, 3.5156, 0.7053, 2.2308], + device='cuda:3'), covar=tensor([0.1275, 0.1339, 0.0194, 0.1445, 0.2461, 0.0356, 0.1987, 0.1200], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0222, 0.0276, 0.0144, 0.0170, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 10:41:51,028 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.308e+02 2.837e+02 3.630e+02 6.769e+02, threshold=5.675e+02, percent-clipped=2.0 +2023-02-07 10:42:00,869 INFO [train.py:901] (3/4) Epoch 26, batch 6050, loss[loss=0.1902, simple_loss=0.2751, pruned_loss=0.05266, over 8463.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.284, pruned_loss=0.05827, over 1616226.56 frames. ], batch size: 25, lr: 2.89e-03, grad_scale: 8.0 +2023-02-07 10:42:25,848 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5735, 4.6845, 4.1541, 2.0924, 3.9861, 4.2052, 4.1380, 3.9446], + device='cuda:3'), covar=tensor([0.0698, 0.0468, 0.0919, 0.4684, 0.0936, 0.0999, 0.1172, 0.0752], + device='cuda:3'), in_proj_covar=tensor([0.0529, 0.0446, 0.0435, 0.0541, 0.0427, 0.0451, 0.0427, 0.0395], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:42:36,037 INFO [train.py:901] (3/4) Epoch 26, batch 6100, loss[loss=0.1986, simple_loss=0.2856, pruned_loss=0.05582, over 8367.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2842, pruned_loss=0.0584, over 1615330.01 frames. ], batch size: 24, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:42:48,574 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 10:43:01,339 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.422e+02 2.947e+02 3.994e+02 1.088e+03, threshold=5.894e+02, percent-clipped=8.0 +2023-02-07 10:43:06,118 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7827, 5.9567, 5.1737, 2.4583, 5.1495, 5.5582, 5.5100, 5.3864], + device='cuda:3'), covar=tensor([0.0549, 0.0363, 0.0849, 0.4397, 0.0765, 0.0837, 0.0925, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0532, 0.0450, 0.0438, 0.0545, 0.0429, 0.0454, 0.0429, 0.0397], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:43:10,773 INFO [train.py:901] (3/4) Epoch 26, batch 6150, loss[loss=0.2071, simple_loss=0.2929, pruned_loss=0.06061, over 8293.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2842, pruned_loss=0.05849, over 1610855.02 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:45,582 INFO [train.py:901] (3/4) Epoch 26, batch 6200, loss[loss=0.1993, simple_loss=0.296, pruned_loss=0.05129, over 8291.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2836, pruned_loss=0.05817, over 1610964.84 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:43:53,059 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208283.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:44:10,220 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 2.196e+02 2.837e+02 3.308e+02 7.178e+02, threshold=5.674e+02, percent-clipped=2.0 +2023-02-07 10:44:19,005 INFO [train.py:901] (3/4) Epoch 26, batch 6250, loss[loss=0.1684, simple_loss=0.2515, pruned_loss=0.04268, over 7786.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.05769, over 1610298.14 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:44:55,642 INFO [train.py:901] (3/4) Epoch 26, batch 6300, loss[loss=0.2082, simple_loss=0.2904, pruned_loss=0.06296, over 8491.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2812, pruned_loss=0.0573, over 1607402.89 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:45:10,475 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208394.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:45:20,546 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.443e+02 2.919e+02 3.618e+02 1.192e+03, threshold=5.838e+02, percent-clipped=3.0 +2023-02-07 10:45:29,140 INFO [train.py:901] (3/4) Epoch 26, batch 6350, loss[loss=0.1941, simple_loss=0.2904, pruned_loss=0.04892, over 8531.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2819, pruned_loss=0.05735, over 1611504.66 frames. ], batch size: 28, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:04,931 INFO [train.py:901] (3/4) Epoch 26, batch 6400, loss[loss=0.1878, simple_loss=0.2685, pruned_loss=0.05353, over 8085.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05702, over 1611503.34 frames. ], batch size: 21, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:46:30,402 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1924, 2.0307, 2.5969, 2.2068, 2.5546, 2.2975, 2.1081, 1.4935], + device='cuda:3'), covar=tensor([0.5774, 0.4940, 0.2161, 0.3864, 0.2755, 0.3165, 0.1941, 0.5423], + device='cuda:3'), in_proj_covar=tensor([0.0962, 0.1013, 0.0831, 0.0985, 0.1022, 0.0923, 0.0770, 0.0847], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:46:30,800 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.580e+02 3.188e+02 3.813e+02 6.849e+02, threshold=6.376e+02, percent-clipped=3.0 +2023-02-07 10:46:31,022 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208509.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:46:39,653 INFO [train.py:901] (3/4) Epoch 26, batch 6450, loss[loss=0.226, simple_loss=0.3071, pruned_loss=0.0725, over 6989.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.0576, over 1609397.28 frames. ], batch size: 71, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:13,749 INFO [train.py:901] (3/4) Epoch 26, batch 6500, loss[loss=0.2332, simple_loss=0.3186, pruned_loss=0.07391, over 8608.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05774, over 1607958.39 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:29,483 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=208594.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:39,234 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.371e+02 2.869e+02 3.528e+02 8.936e+02, threshold=5.738e+02, percent-clipped=3.0 +2023-02-07 10:47:44,315 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3702, 2.1046, 2.7205, 2.1995, 2.7364, 2.3954, 2.2323, 1.5409], + device='cuda:3'), covar=tensor([0.5627, 0.5089, 0.2253, 0.4340, 0.2738, 0.3176, 0.2003, 0.5788], + device='cuda:3'), in_proj_covar=tensor([0.0968, 0.1019, 0.0835, 0.0991, 0.1025, 0.0928, 0.0773, 0.0852], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:47:48,765 INFO [train.py:901] (3/4) Epoch 26, batch 6550, loss[loss=0.2108, simple_loss=0.3029, pruned_loss=0.05928, over 8593.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.05805, over 1614932.43 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:47:52,200 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208627.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:47:56,170 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 10:48:12,891 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 10:48:22,502 INFO [train.py:901] (3/4) Epoch 26, batch 6600, loss[loss=0.1663, simple_loss=0.2433, pruned_loss=0.04463, over 7916.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.0575, over 1616405.31 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:48:49,206 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 2.581e+02 2.930e+02 3.571e+02 6.165e+02, threshold=5.859e+02, percent-clipped=2.0 +2023-02-07 10:48:58,720 INFO [train.py:901] (3/4) Epoch 26, batch 6650, loss[loss=0.1968, simple_loss=0.2884, pruned_loss=0.05256, over 8470.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2823, pruned_loss=0.05766, over 1613663.84 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:12,762 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=208742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:28,863 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208765.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:29,602 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3274, 2.0315, 2.6515, 2.1645, 2.7104, 2.3626, 2.2431, 1.5501], + device='cuda:3'), covar=tensor([0.6104, 0.5353, 0.2108, 0.4395, 0.2777, 0.3346, 0.2155, 0.5636], + device='cuda:3'), in_proj_covar=tensor([0.0967, 0.1017, 0.0834, 0.0989, 0.1024, 0.0927, 0.0771, 0.0849], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:49:33,535 INFO [train.py:901] (3/4) Epoch 26, batch 6700, loss[loss=0.2212, simple_loss=0.3084, pruned_loss=0.06698, over 8578.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.0581, over 1609762.05 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:49:46,230 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=208790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:49:59,642 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.530e+02 3.053e+02 4.076e+02 9.744e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-07 10:50:03,319 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5593, 2.7392, 2.3183, 4.0746, 1.5890, 2.0815, 2.3919, 2.7491], + device='cuda:3'), covar=tensor([0.0734, 0.0809, 0.0867, 0.0239, 0.1129, 0.1266, 0.0983, 0.0854], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0212, 0.0203, 0.0246, 0.0250, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 10:50:09,367 INFO [train.py:901] (3/4) Epoch 26, batch 6750, loss[loss=0.1827, simple_loss=0.2726, pruned_loss=0.04638, over 8202.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2823, pruned_loss=0.05793, over 1610184.41 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:30,942 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 10:50:43,658 INFO [train.py:901] (3/4) Epoch 26, batch 6800, loss[loss=0.1769, simple_loss=0.2671, pruned_loss=0.04335, over 8129.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.283, pruned_loss=0.0586, over 1611211.09 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:50:57,051 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-07 10:51:08,812 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.587e+02 2.376e+02 2.847e+02 3.449e+02 1.016e+03, threshold=5.694e+02, percent-clipped=2.0 +2023-02-07 10:51:18,808 INFO [train.py:901] (3/4) Epoch 26, batch 6850, loss[loss=0.1577, simple_loss=0.2301, pruned_loss=0.04267, over 7441.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2833, pruned_loss=0.05867, over 1613178.29 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 16.0 +2023-02-07 10:51:19,502 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 10:51:30,532 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=208938.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:51:33,406 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8943, 2.0770, 1.6912, 2.6685, 1.3175, 1.5487, 2.0720, 2.1566], + device='cuda:3'), covar=tensor([0.0778, 0.0847, 0.0915, 0.0371, 0.1087, 0.1360, 0.0767, 0.0737], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0212, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 10:51:37,688 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4684, 2.2740, 2.9733, 2.4319, 3.0269, 2.4590, 2.3175, 1.9407], + device='cuda:3'), covar=tensor([0.5517, 0.5193, 0.2081, 0.4106, 0.2897, 0.3435, 0.2034, 0.5665], + device='cuda:3'), in_proj_covar=tensor([0.0961, 0.1014, 0.0830, 0.0986, 0.1020, 0.0924, 0.0769, 0.0847], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:51:42,436 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8787, 1.6703, 1.9666, 1.7582, 1.9098, 1.9430, 1.7716, 0.8648], + device='cuda:3'), covar=tensor([0.6098, 0.4990, 0.2199, 0.3442, 0.2602, 0.3065, 0.2153, 0.5183], + device='cuda:3'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1019, 0.0924, 0.0769, 0.0846], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:51:45,311 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-07 10:51:54,560 INFO [train.py:901] (3/4) Epoch 26, batch 6900, loss[loss=0.1819, simple_loss=0.2586, pruned_loss=0.05266, over 7816.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05836, over 1606368.54 frames. ], batch size: 20, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:10,629 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0042, 3.5404, 2.1098, 2.7385, 2.6528, 2.0948, 2.6362, 2.9078], + device='cuda:3'), covar=tensor([0.1755, 0.0421, 0.1266, 0.0790, 0.0737, 0.1437, 0.1125, 0.1027], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0239, 0.0339, 0.0310, 0.0302, 0.0345, 0.0347, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 10:52:12,016 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=208998.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:12,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9817, 1.7198, 2.0228, 1.8437, 1.9855, 2.0192, 1.9096, 0.8316], + device='cuda:3'), covar=tensor([0.5764, 0.4978, 0.2181, 0.3629, 0.2581, 0.3069, 0.1929, 0.5258], + device='cuda:3'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1018, 0.0923, 0.0769, 0.0846], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:52:20,218 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.430e+02 2.933e+02 3.890e+02 9.541e+02, threshold=5.866e+02, percent-clipped=7.0 +2023-02-07 10:52:23,042 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 10:52:23,724 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5847, 4.5698, 4.2065, 2.3173, 4.0463, 4.1599, 4.1399, 3.9968], + device='cuda:3'), covar=tensor([0.0651, 0.0479, 0.0879, 0.3991, 0.0861, 0.0995, 0.1229, 0.0792], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0456, 0.0442, 0.0552, 0.0436, 0.0460, 0.0436, 0.0402], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:52:28,984 INFO [train.py:901] (3/4) Epoch 26, batch 6950, loss[loss=0.2103, simple_loss=0.3007, pruned_loss=0.05999, over 8300.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2831, pruned_loss=0.05872, over 1606264.27 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:52:29,857 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209023.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,125 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209053.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:52:51,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9073, 1.8787, 3.2125, 2.3961, 2.8499, 2.0089, 1.6951, 1.5427], + device='cuda:3'), covar=tensor([0.8159, 0.6996, 0.2301, 0.4347, 0.3360, 0.4791, 0.3151, 0.6285], + device='cuda:3'), in_proj_covar=tensor([0.0961, 0.1013, 0.0829, 0.0986, 0.1017, 0.0922, 0.0770, 0.0847], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 10:53:04,211 INFO [train.py:901] (3/4) Epoch 26, batch 7000, loss[loss=0.2254, simple_loss=0.2982, pruned_loss=0.0763, over 8337.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2822, pruned_loss=0.05844, over 1605814.57 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:30,453 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.766e+02 2.543e+02 3.075e+02 4.223e+02 1.225e+03, threshold=6.150e+02, percent-clipped=4.0 +2023-02-07 10:53:38,406 INFO [train.py:901] (3/4) Epoch 26, batch 7050, loss[loss=0.2047, simple_loss=0.2957, pruned_loss=0.05682, over 8512.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2828, pruned_loss=0.05837, over 1604398.28 frames. ], batch size: 26, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:53:44,277 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5548, 1.8468, 2.6219, 1.4947, 1.9314, 1.8944, 1.6492, 1.9820], + device='cuda:3'), covar=tensor([0.2002, 0.2577, 0.0972, 0.4626, 0.1955, 0.3390, 0.2456, 0.2283], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0633, 0.0563, 0.0668, 0.0660, 0.0610, 0.0561, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 10:54:15,161 INFO [train.py:901] (3/4) Epoch 26, batch 7100, loss[loss=0.1998, simple_loss=0.2914, pruned_loss=0.05408, over 8483.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05778, over 1604614.03 frames. ], batch size: 49, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:54:42,204 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.719e+02 2.538e+02 3.057e+02 3.964e+02 1.199e+03, threshold=6.114e+02, percent-clipped=9.0 +2023-02-07 10:54:49,198 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209220.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:54:50,429 INFO [train.py:901] (3/4) Epoch 26, batch 7150, loss[loss=0.1759, simple_loss=0.2558, pruned_loss=0.04801, over 7784.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2823, pruned_loss=0.05788, over 1603397.23 frames. ], batch size: 19, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:25,278 INFO [train.py:901] (3/4) Epoch 26, batch 7200, loss[loss=0.1899, simple_loss=0.2662, pruned_loss=0.05679, over 7434.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2818, pruned_loss=0.0577, over 1601282.43 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:55:26,109 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209273.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:51,840 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209309.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:55:52,281 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.329e+02 2.733e+02 3.562e+02 6.414e+02, threshold=5.467e+02, percent-clipped=2.0 +2023-02-07 10:55:56,508 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209316.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:00,334 INFO [train.py:901] (3/4) Epoch 26, batch 7250, loss[loss=0.2088, simple_loss=0.2998, pruned_loss=0.0589, over 8544.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.282, pruned_loss=0.05792, over 1601070.50 frames. ], batch size: 31, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:08,455 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209334.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:56:33,562 INFO [train.py:901] (3/4) Epoch 26, batch 7300, loss[loss=0.1929, simple_loss=0.2678, pruned_loss=0.05901, over 7417.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2833, pruned_loss=0.05864, over 1600770.17 frames. ], batch size: 17, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:56:59,843 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.463e+02 3.055e+02 3.934e+02 7.151e+02, threshold=6.111e+02, percent-clipped=5.0 +2023-02-07 10:57:06,726 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 10:57:09,426 INFO [train.py:901] (3/4) Epoch 26, batch 7350, loss[loss=0.2331, simple_loss=0.3098, pruned_loss=0.07824, over 8460.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2835, pruned_loss=0.05882, over 1600406.83 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:26,326 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 10:57:42,840 INFO [train.py:901] (3/4) Epoch 26, batch 7400, loss[loss=0.1818, simple_loss=0.2682, pruned_loss=0.04769, over 8294.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2814, pruned_loss=0.05809, over 1596554.60 frames. ], batch size: 23, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:57:51,016 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209484.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:04,804 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 10:58:09,472 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.528e+02 2.534e+02 3.042e+02 3.812e+02 9.347e+02, threshold=6.084e+02, percent-clipped=5.0 +2023-02-07 10:58:17,131 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6119, 1.4769, 2.2261, 1.3966, 1.1435, 2.1537, 0.3913, 1.2974], + device='cuda:3'), covar=tensor([0.1693, 0.1391, 0.0370, 0.1265, 0.2707, 0.0394, 0.1919, 0.1369], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0205, 0.0133, 0.0224, 0.0276, 0.0144, 0.0172, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 10:58:17,541 INFO [train.py:901] (3/4) Epoch 26, batch 7450, loss[loss=0.2294, simple_loss=0.3255, pruned_loss=0.06668, over 8335.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.05864, over 1601775.13 frames. ], batch size: 25, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:58:22,475 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209528.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:47,411 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209564.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:58:52,745 INFO [train.py:901] (3/4) Epoch 26, batch 7500, loss[loss=0.1916, simple_loss=0.2884, pruned_loss=0.04744, over 8246.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2823, pruned_loss=0.05842, over 1606623.27 frames. ], batch size: 22, lr: 2.88e-03, grad_scale: 8.0 +2023-02-07 10:59:18,694 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.713e+02 2.436e+02 2.983e+02 3.503e+02 8.056e+02, threshold=5.967e+02, percent-clipped=5.0 +2023-02-07 10:59:24,167 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209617.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:27,339 INFO [train.py:901] (3/4) Epoch 26, batch 7550, loss[loss=0.1912, simple_loss=0.2842, pruned_loss=0.04911, over 8104.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.282, pruned_loss=0.05821, over 1605838.22 frames. ], batch size: 23, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 10:59:28,771 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209624.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:54,107 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209660.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 10:59:55,498 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8262, 3.7884, 3.4574, 1.8401, 3.3441, 3.5108, 3.3769, 3.3188], + device='cuda:3'), covar=tensor([0.0982, 0.0718, 0.1317, 0.4716, 0.1040, 0.1181, 0.1416, 0.0971], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0456, 0.0443, 0.0554, 0.0436, 0.0462, 0.0436, 0.0403], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:00:01,941 INFO [train.py:901] (3/4) Epoch 26, batch 7600, loss[loss=0.187, simple_loss=0.2752, pruned_loss=0.04942, over 8469.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2823, pruned_loss=0.05795, over 1607564.03 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:07,008 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209679.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:25,119 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=209706.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:00:27,679 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.404e+02 2.880e+02 3.478e+02 6.437e+02, threshold=5.761e+02, percent-clipped=3.0 +2023-02-07 11:00:35,718 INFO [train.py:901] (3/4) Epoch 26, batch 7650, loss[loss=0.1969, simple_loss=0.2638, pruned_loss=0.06493, over 7441.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2816, pruned_loss=0.05756, over 1605908.32 frames. ], batch size: 17, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:00:43,088 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209732.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:00,269 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.09 vs. limit=5.0 +2023-02-07 11:01:10,730 INFO [train.py:901] (3/4) Epoch 26, batch 7700, loss[loss=0.2003, simple_loss=0.276, pruned_loss=0.06231, over 7820.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2816, pruned_loss=0.05769, over 1606673.86 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:12,911 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209775.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:01:14,128 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 11:01:23,013 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-07 11:01:36,859 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.688e+02 3.083e+02 3.850e+02 9.382e+02, threshold=6.167e+02, percent-clipped=8.0 +2023-02-07 11:01:39,195 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5479, 2.0327, 3.2012, 1.4252, 2.4187, 1.9873, 1.6329, 2.4396], + device='cuda:3'), covar=tensor([0.2087, 0.2739, 0.0915, 0.4888, 0.1969, 0.3283, 0.2601, 0.2346], + device='cuda:3'), in_proj_covar=tensor([0.0537, 0.0631, 0.0560, 0.0666, 0.0659, 0.0610, 0.0559, 0.0644], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:01:44,892 INFO [train.py:901] (3/4) Epoch 26, batch 7750, loss[loss=0.1986, simple_loss=0.2833, pruned_loss=0.05692, over 8342.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.282, pruned_loss=0.05793, over 1612198.32 frames. ], batch size: 25, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:01:48,919 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209828.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:19,753 INFO [train.py:901] (3/4) Epoch 26, batch 7800, loss[loss=0.181, simple_loss=0.2727, pruned_loss=0.04462, over 8610.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05798, over 1613499.24 frames. ], batch size: 34, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:02:19,851 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209872.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:02:44,107 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7069, 1.5542, 4.8874, 1.8305, 4.3733, 4.1534, 4.4291, 4.3458], + device='cuda:3'), covar=tensor([0.0590, 0.5128, 0.0467, 0.4125, 0.1057, 0.0930, 0.0586, 0.0639], + device='cuda:3'), in_proj_covar=tensor([0.0672, 0.0666, 0.0734, 0.0658, 0.0742, 0.0633, 0.0633, 0.0709], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:02:45,312 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.465e+02 2.962e+02 3.430e+02 5.705e+02, threshold=5.924e+02, percent-clipped=0.0 +2023-02-07 11:02:53,248 INFO [train.py:901] (3/4) Epoch 26, batch 7850, loss[loss=0.217, simple_loss=0.2972, pruned_loss=0.06842, over 8022.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.05862, over 1615114.32 frames. ], batch size: 22, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:01,894 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209935.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:07,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209943.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:18,183 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=209960.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:23,281 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=209968.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:25,898 INFO [train.py:901] (3/4) Epoch 26, batch 7900, loss[loss=0.1867, simple_loss=0.2731, pruned_loss=0.05021, over 8502.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.284, pruned_loss=0.0585, over 1619053.98 frames. ], batch size: 29, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:03:35,880 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=209987.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:36,569 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=209988.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:51,906 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.305e+02 2.795e+02 3.387e+02 5.942e+02, threshold=5.591e+02, percent-clipped=1.0 +2023-02-07 11:03:54,074 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210013.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:03:59,748 INFO [train.py:901] (3/4) Epoch 26, batch 7950, loss[loss=0.246, simple_loss=0.3255, pruned_loss=0.08321, over 8482.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05836, over 1618039.62 frames. ], batch size: 29, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:06,009 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210031.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:18,599 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210050.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:22,777 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210056.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:33,087 INFO [train.py:901] (3/4) Epoch 26, batch 8000, loss[loss=0.1945, simple_loss=0.2712, pruned_loss=0.05892, over 7938.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2836, pruned_loss=0.05823, over 1614708.14 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:04:35,214 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210075.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:40,671 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210083.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:04:58,001 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.537e+02 2.316e+02 2.819e+02 3.710e+02 9.270e+02, threshold=5.638e+02, percent-clipped=7.0 +2023-02-07 11:05:05,833 INFO [train.py:901] (3/4) Epoch 26, batch 8050, loss[loss=0.173, simple_loss=0.2518, pruned_loss=0.04712, over 7931.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2816, pruned_loss=0.05767, over 1598547.63 frames. ], batch size: 20, lr: 2.87e-03, grad_scale: 8.0 +2023-02-07 11:05:38,130 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-07 11:05:43,130 INFO [train.py:901] (3/4) Epoch 27, batch 0, loss[loss=0.1826, simple_loss=0.2734, pruned_loss=0.0459, over 8085.00 frames. ], tot_loss[loss=0.1826, simple_loss=0.2734, pruned_loss=0.0459, over 8085.00 frames. ], batch size: 21, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:05:43,131 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 11:05:54,191 INFO [train.py:935] (3/4) Epoch 27, validation: loss=0.172, simple_loss=0.2713, pruned_loss=0.03628, over 944034.00 frames. +2023-02-07 11:05:54,192 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 11:06:01,185 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210165.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:08,374 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-07 11:06:24,774 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210199.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:28,655 INFO [train.py:901] (3/4) Epoch 27, batch 50, loss[loss=0.1982, simple_loss=0.2846, pruned_loss=0.0559, over 8291.00 frames. ], tot_loss[loss=0.2051, simple_loss=0.2875, pruned_loss=0.06129, over 366851.73 frames. ], batch size: 23, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:06:33,574 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.449e+02 2.417e+02 2.930e+02 3.516e+02 7.088e+02, threshold=5.860e+02, percent-clipped=5.0 +2023-02-07 11:06:41,910 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-07 11:06:43,320 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210224.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:06:56,862 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210243.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:04,542 INFO [train.py:901] (3/4) Epoch 27, batch 100, loss[loss=0.2247, simple_loss=0.2944, pruned_loss=0.07755, over 8110.00 frames. ], tot_loss[loss=0.2036, simple_loss=0.2861, pruned_loss=0.0605, over 644245.89 frames. ], batch size: 23, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:05,182 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-07 11:07:13,375 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210268.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:07:38,001 INFO [train.py:901] (3/4) Epoch 27, batch 150, loss[loss=0.1972, simple_loss=0.2885, pruned_loss=0.05293, over 8337.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2857, pruned_loss=0.06022, over 859768.71 frames. ], batch size: 26, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:07:41,159 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.352e+02 2.905e+02 3.661e+02 1.089e+03, threshold=5.811e+02, percent-clipped=3.0 +2023-02-07 11:07:41,642 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 11:08:02,852 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210339.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:14,089 INFO [train.py:901] (3/4) Epoch 27, batch 200, loss[loss=0.1972, simple_loss=0.2861, pruned_loss=0.05413, over 8142.00 frames. ], tot_loss[loss=0.2033, simple_loss=0.286, pruned_loss=0.06037, over 1026167.35 frames. ], batch size: 22, lr: 2.82e-03, grad_scale: 8.0 +2023-02-07 11:08:20,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210364.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:48,338 INFO [train.py:901] (3/4) Epoch 27, batch 250, loss[loss=0.2112, simple_loss=0.2806, pruned_loss=0.07084, over 8030.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.2861, pruned_loss=0.06005, over 1160280.03 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:08:49,372 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 11:08:51,580 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.304e+02 2.819e+02 3.559e+02 6.263e+02, threshold=5.638e+02, percent-clipped=1.0 +2023-02-07 11:08:57,565 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-07 11:08:57,631 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210419.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:08:59,067 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210421.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:06,462 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-07 11:09:10,764 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1814, 1.9584, 2.3994, 2.0757, 2.4266, 2.2602, 2.0921, 1.2885], + device='cuda:3'), covar=tensor([0.5996, 0.5028, 0.2125, 0.4177, 0.2780, 0.3143, 0.2060, 0.5525], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1017, 0.0828, 0.0985, 0.1021, 0.0926, 0.0772, 0.0847], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 11:09:16,573 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210446.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:09:23,009 INFO [train.py:901] (3/4) Epoch 27, batch 300, loss[loss=0.2043, simple_loss=0.2899, pruned_loss=0.05938, over 7963.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2851, pruned_loss=0.05944, over 1260302.23 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:09:57,343 INFO [train.py:901] (3/4) Epoch 27, batch 350, loss[loss=0.2381, simple_loss=0.2969, pruned_loss=0.08963, over 6933.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2845, pruned_loss=0.05968, over 1336567.14 frames. ], batch size: 72, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:10:00,691 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.321e+02 2.740e+02 3.479e+02 7.751e+02, threshold=5.481e+02, percent-clipped=4.0 +2023-02-07 11:10:06,930 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3261, 1.7899, 1.3037, 2.9443, 1.2926, 1.2798, 2.0209, 1.9266], + device='cuda:3'), covar=tensor([0.1617, 0.1395, 0.2040, 0.0381, 0.1481, 0.2104, 0.1069, 0.1156], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0194, 0.0246, 0.0211, 0.0202, 0.0245, 0.0249, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 11:10:16,907 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=210534.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:10:29,881 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-07 11:10:30,766 INFO [train.py:901] (3/4) Epoch 27, batch 400, loss[loss=0.2203, simple_loss=0.3081, pruned_loss=0.06624, over 8806.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2843, pruned_loss=0.05939, over 1396326.47 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:06,884 INFO [train.py:901] (3/4) Epoch 27, batch 450, loss[loss=0.1952, simple_loss=0.2895, pruned_loss=0.05049, over 8257.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.284, pruned_loss=0.05919, over 1444575.80 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:10,233 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.445e+02 3.096e+02 3.744e+02 6.670e+02, threshold=6.192e+02, percent-clipped=5.0 +2023-02-07 11:11:28,589 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1553, 4.1590, 3.7924, 1.9938, 3.6365, 3.8246, 3.7037, 3.7087], + device='cuda:3'), covar=tensor([0.0831, 0.0570, 0.1064, 0.4203, 0.0908, 0.1106, 0.1334, 0.0850], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0458, 0.0442, 0.0555, 0.0437, 0.0460, 0.0436, 0.0405], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:11:37,206 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210650.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:11:40,169 INFO [train.py:901] (3/4) Epoch 27, batch 500, loss[loss=0.1963, simple_loss=0.279, pruned_loss=0.0568, over 8466.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2844, pruned_loss=0.05972, over 1483367.95 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:11:55,133 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8492, 2.1155, 2.2222, 1.3768, 2.3101, 1.6862, 0.6794, 2.0625], + device='cuda:3'), covar=tensor([0.0603, 0.0364, 0.0295, 0.0711, 0.0438, 0.0903, 0.0992, 0.0292], + device='cuda:3'), in_proj_covar=tensor([0.0469, 0.0406, 0.0360, 0.0458, 0.0392, 0.0548, 0.0402, 0.0436], + device='cuda:3'), out_proj_covar=tensor([1.2454e-04, 1.0560e-04, 9.4168e-05, 1.2007e-04, 1.0262e-04, 1.5318e-04, + 1.0753e-04, 1.1439e-04], device='cuda:3') +2023-02-07 11:12:01,202 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210684.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:12,486 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-07 11:12:12,805 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210700.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:12:15,962 INFO [train.py:901] (3/4) Epoch 27, batch 550, loss[loss=0.2214, simple_loss=0.3045, pruned_loss=0.06912, over 8560.00 frames. ], tot_loss[loss=0.2022, simple_loss=0.2851, pruned_loss=0.05967, over 1517817.55 frames. ], batch size: 31, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:12:19,368 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.605e+02 2.336e+02 2.792e+02 3.793e+02 8.487e+02, threshold=5.584e+02, percent-clipped=3.0 +2023-02-07 11:12:50,313 INFO [train.py:901] (3/4) Epoch 27, batch 600, loss[loss=0.1633, simple_loss=0.2464, pruned_loss=0.04007, over 7787.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2847, pruned_loss=0.05909, over 1541933.06 frames. ], batch size: 19, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:08,463 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=210782.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:11,593 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-07 11:13:13,655 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=210790.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:23,578 INFO [train.py:901] (3/4) Epoch 27, batch 650, loss[loss=0.1716, simple_loss=0.2637, pruned_loss=0.03973, over 8568.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2852, pruned_loss=0.0594, over 1558847.36 frames. ], batch size: 31, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:13:28,262 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.351e+02 2.894e+02 3.474e+02 6.032e+02, threshold=5.788e+02, percent-clipped=3.0 +2023-02-07 11:13:32,519 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=210815.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:13:59,777 INFO [train.py:901] (3/4) Epoch 27, batch 700, loss[loss=0.2108, simple_loss=0.294, pruned_loss=0.06386, over 8507.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05909, over 1568796.05 frames. ], batch size: 29, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:29,003 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0935, 2.2452, 1.9591, 2.7723, 1.2612, 1.7138, 2.0821, 2.2090], + device='cuda:3'), covar=tensor([0.0722, 0.0781, 0.0797, 0.0320, 0.1069, 0.1210, 0.0712, 0.0755], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0193, 0.0245, 0.0210, 0.0202, 0.0245, 0.0248, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 11:14:32,721 INFO [train.py:901] (3/4) Epoch 27, batch 750, loss[loss=0.2137, simple_loss=0.2936, pruned_loss=0.06694, over 8282.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2851, pruned_loss=0.05952, over 1580678.26 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:14:35,973 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.536e+02 2.996e+02 3.960e+02 1.304e+03, threshold=5.993e+02, percent-clipped=7.0 +2023-02-07 11:14:54,994 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-07 11:15:04,238 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-07 11:15:08,844 INFO [train.py:901] (3/4) Epoch 27, batch 800, loss[loss=0.2242, simple_loss=0.3004, pruned_loss=0.07398, over 8528.00 frames. ], tot_loss[loss=0.2024, simple_loss=0.2854, pruned_loss=0.05969, over 1584929.96 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:15:11,731 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-07 11:15:34,999 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=210994.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:38,615 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-07 11:15:40,390 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211002.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:15:42,223 INFO [train.py:901] (3/4) Epoch 27, batch 850, loss[loss=0.1989, simple_loss=0.2877, pruned_loss=0.05512, over 7936.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2846, pruned_loss=0.05917, over 1594669.94 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:15:45,636 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.581e+02 2.265e+02 2.725e+02 3.482e+02 8.151e+02, threshold=5.450e+02, percent-clipped=2.0 +2023-02-07 11:15:46,884 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-07 11:15:57,723 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211028.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:10,470 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211044.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:17,677 INFO [train.py:901] (3/4) Epoch 27, batch 900, loss[loss=0.1767, simple_loss=0.2633, pruned_loss=0.0451, over 7926.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05884, over 1596256.43 frames. ], batch size: 20, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:51,937 INFO [train.py:901] (3/4) Epoch 27, batch 950, loss[loss=0.1883, simple_loss=0.2779, pruned_loss=0.04934, over 8067.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2836, pruned_loss=0.05875, over 1600012.05 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:16:54,834 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211109.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:16:55,251 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.525e+02 2.483e+02 2.981e+02 4.008e+02 9.530e+02, threshold=5.961e+02, percent-clipped=10.0 +2023-02-07 11:17:06,170 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211126.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:11,860 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-07 11:17:17,582 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211143.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:18,072 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-07 11:17:26,813 INFO [train.py:901] (3/4) Epoch 27, batch 1000, loss[loss=0.2034, simple_loss=0.2936, pruned_loss=0.0566, over 8501.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2844, pruned_loss=0.05867, over 1610582.12 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 16.0 +2023-02-07 11:17:30,479 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211159.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:17:46,838 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-07 11:17:53,305 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-07 11:18:03,302 INFO [train.py:901] (3/4) Epoch 27, batch 1050, loss[loss=0.1674, simple_loss=0.2578, pruned_loss=0.03854, over 8356.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2842, pruned_loss=0.05817, over 1610298.45 frames. ], batch size: 24, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:18:05,227 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-07 11:18:07,261 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.494e+02 3.070e+02 3.818e+02 8.233e+02, threshold=6.140e+02, percent-clipped=4.0 +2023-02-07 11:18:27,174 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211241.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:18:36,464 INFO [train.py:901] (3/4) Epoch 27, batch 1100, loss[loss=0.2057, simple_loss=0.2874, pruned_loss=0.06197, over 8133.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2846, pruned_loss=0.0586, over 1613835.47 frames. ], batch size: 22, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:13,343 INFO [train.py:901] (3/4) Epoch 27, batch 1150, loss[loss=0.2068, simple_loss=0.2976, pruned_loss=0.05803, over 8494.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2851, pruned_loss=0.05865, over 1620002.22 frames. ], batch size: 28, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:15,967 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-07 11:19:17,152 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-07 11:19:17,279 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.371e+02 2.782e+02 3.549e+02 6.262e+02, threshold=5.564e+02, percent-clipped=1.0 +2023-02-07 11:19:40,861 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211346.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:19:46,770 INFO [train.py:901] (3/4) Epoch 27, batch 1200, loss[loss=0.2175, simple_loss=0.3016, pruned_loss=0.06669, over 8361.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2844, pruned_loss=0.05832, over 1618381.28 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:19:53,754 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211365.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:08,488 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3353, 2.3336, 1.6183, 2.1830, 1.8421, 1.3785, 1.7966, 1.9986], + device='cuda:3'), covar=tensor([0.1709, 0.0536, 0.1486, 0.0650, 0.0940, 0.2027, 0.1242, 0.1026], + device='cuda:3'), in_proj_covar=tensor([0.0363, 0.0241, 0.0343, 0.0314, 0.0305, 0.0347, 0.0349, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 11:20:11,160 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:18,556 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211399.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:21,056 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8646, 3.8546, 3.5602, 1.9036, 3.3891, 3.5272, 3.4894, 3.3279], + device='cuda:3'), covar=tensor([0.0920, 0.0648, 0.1102, 0.4469, 0.1009, 0.0928, 0.1368, 0.0909], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0459, 0.0446, 0.0557, 0.0440, 0.0463, 0.0437, 0.0407], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:20:21,774 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211404.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:22,042 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-07 11:20:22,321 INFO [train.py:901] (3/4) Epoch 27, batch 1250, loss[loss=0.1706, simple_loss=0.2454, pruned_loss=0.04794, over 7443.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2835, pruned_loss=0.05759, over 1617229.90 frames. ], batch size: 17, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:26,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.344e+02 2.922e+02 3.484e+02 6.390e+02, threshold=5.843e+02, percent-clipped=2.0 +2023-02-07 11:20:29,709 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211415.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:35,526 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211424.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:39,484 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=211430.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:46,235 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211440.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:20:49,935 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-07 11:20:56,235 INFO [train.py:901] (3/4) Epoch 27, batch 1300, loss[loss=0.2219, simple_loss=0.3109, pruned_loss=0.06645, over 8673.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2832, pruned_loss=0.0578, over 1617683.80 frames. ], batch size: 34, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:20:59,852 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5114, 1.7986, 2.6295, 1.4145, 1.8647, 1.8650, 1.5765, 1.8985], + device='cuda:3'), covar=tensor([0.2032, 0.2553, 0.0912, 0.4809, 0.2124, 0.3379, 0.2499, 0.2332], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0632, 0.0564, 0.0669, 0.0659, 0.0610, 0.0560, 0.0644], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:21:00,467 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211461.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:23,172 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6407, 1.4740, 2.8740, 1.3556, 2.2196, 3.0789, 3.2546, 2.6306], + device='cuda:3'), covar=tensor([0.1311, 0.1692, 0.0375, 0.2235, 0.0894, 0.0309, 0.0567, 0.0565], + device='cuda:3'), in_proj_covar=tensor([0.0307, 0.0327, 0.0293, 0.0321, 0.0320, 0.0278, 0.0437, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 11:21:24,574 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211497.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:21:30,515 INFO [train.py:901] (3/4) Epoch 27, batch 1350, loss[loss=0.2132, simple_loss=0.2961, pruned_loss=0.06519, over 8540.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05772, over 1615348.08 frames. ], batch size: 39, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:21:34,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.433e+02 2.859e+02 3.519e+02 6.900e+02, threshold=5.717e+02, percent-clipped=5.0 +2023-02-07 11:21:43,624 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211522.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:22:05,945 INFO [train.py:901] (3/4) Epoch 27, batch 1400, loss[loss=0.1833, simple_loss=0.2827, pruned_loss=0.04191, over 8326.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2834, pruned_loss=0.05806, over 1614544.60 frames. ], batch size: 25, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:39,557 INFO [train.py:901] (3/4) Epoch 27, batch 1450, loss[loss=0.2362, simple_loss=0.3156, pruned_loss=0.07839, over 8282.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05885, over 1609846.20 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:22:43,640 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.739e+02 3.417e+02 5.363e+02 1.739e+03, threshold=6.835e+02, percent-clipped=22.0 +2023-02-07 11:22:45,697 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-07 11:23:15,887 INFO [train.py:901] (3/4) Epoch 27, batch 1500, loss[loss=0.2511, simple_loss=0.3263, pruned_loss=0.08797, over 6848.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.05901, over 1611023.14 frames. ], batch size: 71, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:49,650 INFO [train.py:901] (3/4) Epoch 27, batch 1550, loss[loss=0.2093, simple_loss=0.2898, pruned_loss=0.06441, over 8508.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2839, pruned_loss=0.05852, over 1613890.49 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:23:53,681 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.372e+02 3.027e+02 3.476e+02 5.786e+02, threshold=6.054e+02, percent-clipped=0.0 +2023-02-07 11:23:57,840 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=211717.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:15,294 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=211742.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:19,425 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211748.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:24,774 INFO [train.py:901] (3/4) Epoch 27, batch 1600, loss[loss=0.2176, simple_loss=0.2993, pruned_loss=0.06796, over 8510.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05833, over 1612134.07 frames. ], batch size: 26, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:24:28,362 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4382, 1.3280, 1.6845, 1.1169, 1.1612, 1.6779, 0.5713, 1.2757], + device='cuda:3'), covar=tensor([0.1376, 0.0927, 0.0405, 0.0879, 0.1858, 0.0394, 0.1682, 0.1174], + device='cuda:3'), in_proj_covar=tensor([0.0197, 0.0203, 0.0133, 0.0221, 0.0275, 0.0144, 0.0172, 0.0197], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 11:24:38,874 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=211774.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:24:59,722 INFO [train.py:901] (3/4) Epoch 27, batch 1650, loss[loss=0.1895, simple_loss=0.2774, pruned_loss=0.05078, over 8077.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2843, pruned_loss=0.05871, over 1614276.01 frames. ], batch size: 21, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:03,544 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-07 11:25:03,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.547e+02 3.089e+02 3.889e+02 1.356e+03, threshold=6.177e+02, percent-clipped=3.0 +2023-02-07 11:25:34,307 INFO [train.py:901] (3/4) Epoch 27, batch 1700, loss[loss=0.1792, simple_loss=0.2711, pruned_loss=0.04366, over 8108.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2852, pruned_loss=0.05885, over 1616675.68 frames. ], batch size: 23, lr: 2.81e-03, grad_scale: 8.0 +2023-02-07 11:25:39,902 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211863.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:25:57,119 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0430, 1.3917, 3.4540, 1.6206, 2.2973, 3.8617, 3.9792, 3.3377], + device='cuda:3'), covar=tensor([0.1150, 0.1965, 0.0353, 0.2145, 0.1187, 0.0229, 0.0408, 0.0503], + device='cuda:3'), in_proj_covar=tensor([0.0307, 0.0329, 0.0295, 0.0322, 0.0323, 0.0279, 0.0440, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 11:25:59,112 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=211889.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:26:09,566 INFO [train.py:901] (3/4) Epoch 27, batch 1750, loss[loss=0.1803, simple_loss=0.2714, pruned_loss=0.04465, over 7964.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2839, pruned_loss=0.0587, over 1611296.96 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:26:13,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.324e+02 2.460e+02 2.972e+02 3.773e+02 5.726e+02, threshold=5.944e+02, percent-clipped=0.0 +2023-02-07 11:26:24,664 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.82 vs. limit=5.0 +2023-02-07 11:26:43,420 INFO [train.py:901] (3/4) Epoch 27, batch 1800, loss[loss=0.2058, simple_loss=0.2947, pruned_loss=0.05846, over 8139.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2826, pruned_loss=0.05797, over 1611762.93 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:03,808 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 11:27:20,497 INFO [train.py:901] (3/4) Epoch 27, batch 1850, loss[loss=0.2343, simple_loss=0.3185, pruned_loss=0.07503, over 8623.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2821, pruned_loss=0.05759, over 1611915.49 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:27:24,575 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.252e+02 2.767e+02 3.484e+02 5.487e+02, threshold=5.534e+02, percent-clipped=0.0 +2023-02-07 11:27:54,162 INFO [train.py:901] (3/4) Epoch 27, batch 1900, loss[loss=0.1839, simple_loss=0.2668, pruned_loss=0.05049, over 7965.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2831, pruned_loss=0.05796, over 1611523.16 frames. ], batch size: 21, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:23,913 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8756, 3.8071, 3.4894, 1.7032, 3.4175, 3.5218, 3.4084, 3.3921], + device='cuda:3'), covar=tensor([0.1045, 0.0739, 0.1261, 0.4810, 0.1026, 0.1131, 0.1610, 0.0837], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0456, 0.0442, 0.0552, 0.0438, 0.0460, 0.0437, 0.0402], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:28:25,185 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-07 11:28:28,380 INFO [train.py:901] (3/4) Epoch 27, batch 1950, loss[loss=0.2144, simple_loss=0.2966, pruned_loss=0.06608, over 8026.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.05801, over 1611866.37 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:28:33,126 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.484e+02 3.059e+02 3.727e+02 7.478e+02, threshold=6.119e+02, percent-clipped=3.0 +2023-02-07 11:28:38,414 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-07 11:28:39,362 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212119.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:41,243 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212122.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:28:56,767 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212144.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:28:57,282 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-07 11:28:57,487 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212145.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:03,980 INFO [train.py:901] (3/4) Epoch 27, batch 2000, loss[loss=0.1921, simple_loss=0.2884, pruned_loss=0.0479, over 8199.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.05788, over 1609208.96 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:14,158 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212170.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:29:37,643 INFO [train.py:901] (3/4) Epoch 27, batch 2050, loss[loss=0.1969, simple_loss=0.2817, pruned_loss=0.05603, over 7807.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05706, over 1611598.65 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:29:41,738 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.364e+02 2.966e+02 3.655e+02 9.314e+02, threshold=5.932e+02, percent-clipped=4.0 +2023-02-07 11:29:53,497 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1772, 1.4405, 1.6567, 1.4002, 0.9701, 1.4435, 1.6569, 1.8064], + device='cuda:3'), covar=tensor([0.0531, 0.1272, 0.1702, 0.1508, 0.0635, 0.1492, 0.0743, 0.0591], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0163, 0.0102, 0.0164, 0.0113, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 11:30:13,817 INFO [train.py:901] (3/4) Epoch 27, batch 2100, loss[loss=0.2081, simple_loss=0.2928, pruned_loss=0.0617, over 8286.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05671, over 1614419.69 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:24,389 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4495, 4.3515, 4.0356, 2.1940, 3.8794, 4.1054, 3.8758, 3.9300], + device='cuda:3'), covar=tensor([0.0622, 0.0497, 0.0908, 0.4095, 0.0817, 0.0761, 0.1127, 0.0684], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0456, 0.0442, 0.0554, 0.0438, 0.0459, 0.0437, 0.0404], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:30:25,137 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212271.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:30:42,130 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-07 11:30:47,851 INFO [train.py:901] (3/4) Epoch 27, batch 2150, loss[loss=0.1817, simple_loss=0.266, pruned_loss=0.04873, over 8294.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05648, over 1614413.21 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:30:48,031 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0038, 1.6436, 1.3808, 1.5212, 1.3057, 1.2434, 1.2722, 1.2565], + device='cuda:3'), covar=tensor([0.1233, 0.0510, 0.1310, 0.0614, 0.0799, 0.1599, 0.1007, 0.0879], + device='cuda:3'), in_proj_covar=tensor([0.0365, 0.0240, 0.0341, 0.0316, 0.0304, 0.0349, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 11:30:51,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.722e+02 2.395e+02 2.764e+02 3.582e+02 6.444e+02, threshold=5.527e+02, percent-clipped=1.0 +2023-02-07 11:31:22,777 INFO [train.py:901] (3/4) Epoch 27, batch 2200, loss[loss=0.2306, simple_loss=0.3122, pruned_loss=0.07455, over 8615.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2815, pruned_loss=0.0571, over 1616560.66 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:31:57,344 INFO [train.py:901] (3/4) Epoch 27, batch 2250, loss[loss=0.2245, simple_loss=0.3138, pruned_loss=0.06765, over 8735.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2817, pruned_loss=0.05699, over 1616521.47 frames. ], batch size: 34, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:01,575 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.617e+02 2.301e+02 2.815e+02 3.457e+02 5.141e+02, threshold=5.631e+02, percent-clipped=0.0 +2023-02-07 11:32:31,465 INFO [train.py:901] (3/4) Epoch 27, batch 2300, loss[loss=0.2208, simple_loss=0.3148, pruned_loss=0.06337, over 8245.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05692, over 1614289.56 frames. ], batch size: 24, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:32:33,511 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212457.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:32:39,373 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212466.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:33:07,929 INFO [train.py:901] (3/4) Epoch 27, batch 2350, loss[loss=0.218, simple_loss=0.3042, pruned_loss=0.06589, over 8594.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05695, over 1614699.87 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:33:12,148 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.366e+02 2.801e+02 3.492e+02 6.818e+02, threshold=5.601e+02, percent-clipped=4.0 +2023-02-07 11:33:42,852 INFO [train.py:901] (3/4) Epoch 27, batch 2400, loss[loss=0.2292, simple_loss=0.3061, pruned_loss=0.07612, over 8489.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2813, pruned_loss=0.05739, over 1616609.50 frames. ], batch size: 39, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:01,680 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212581.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:34:04,909 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8510, 3.7569, 3.4445, 1.8894, 3.4064, 3.3910, 3.3267, 3.2490], + device='cuda:3'), covar=tensor([0.0890, 0.0703, 0.1184, 0.4564, 0.0970, 0.1243, 0.1497, 0.0927], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0460, 0.0444, 0.0558, 0.0440, 0.0464, 0.0439, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:34:19,818 INFO [train.py:901] (3/4) Epoch 27, batch 2450, loss[loss=0.1775, simple_loss=0.2616, pruned_loss=0.04675, over 8460.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2801, pruned_loss=0.05729, over 1610576.97 frames. ], batch size: 29, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:23,906 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.730e+02 2.406e+02 2.879e+02 3.948e+02 9.646e+02, threshold=5.757e+02, percent-clipped=9.0 +2023-02-07 11:34:26,763 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212615.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:34:46,276 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 11:34:54,052 INFO [train.py:901] (3/4) Epoch 27, batch 2500, loss[loss=0.1735, simple_loss=0.2529, pruned_loss=0.04705, over 7813.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2795, pruned_loss=0.05702, over 1608891.68 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:34:59,682 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3378, 1.2231, 2.3695, 1.1936, 2.0826, 2.5119, 2.7211, 2.1034], + device='cuda:3'), covar=tensor([0.1296, 0.1625, 0.0449, 0.2416, 0.0829, 0.0407, 0.0716, 0.0740], + device='cuda:3'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0321, 0.0320, 0.0277, 0.0437, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 11:35:28,281 INFO [train.py:901] (3/4) Epoch 27, batch 2550, loss[loss=0.1954, simple_loss=0.2839, pruned_loss=0.05343, over 8455.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2798, pruned_loss=0.05735, over 1615036.67 frames. ], batch size: 27, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:35:33,066 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.333e+02 2.985e+02 3.926e+02 7.498e+02, threshold=5.971e+02, percent-clipped=4.0 +2023-02-07 11:35:37,327 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212716.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:46,883 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212729.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:35:47,587 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212730.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:04,562 INFO [train.py:901] (3/4) Epoch 27, batch 2600, loss[loss=0.1743, simple_loss=0.2584, pruned_loss=0.04508, over 7654.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2793, pruned_loss=0.05678, over 1612852.13 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:09,618 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-07 11:36:18,127 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4447, 1.2635, 2.4162, 1.2838, 2.2564, 2.5630, 2.7158, 2.1771], + device='cuda:3'), covar=tensor([0.1117, 0.1486, 0.0429, 0.2142, 0.0725, 0.0366, 0.0642, 0.0657], + device='cuda:3'), in_proj_covar=tensor([0.0305, 0.0327, 0.0292, 0.0320, 0.0320, 0.0277, 0.0438, 0.0307], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 11:36:21,581 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4126, 3.8848, 2.7120, 3.0485, 3.0885, 2.3593, 3.1560, 3.2899], + device='cuda:3'), covar=tensor([0.1631, 0.0378, 0.1009, 0.0733, 0.0698, 0.1499, 0.0996, 0.1079], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0238, 0.0337, 0.0312, 0.0301, 0.0345, 0.0346, 0.0319], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0002, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 11:36:29,686 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=212792.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:33,817 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8157, 1.5838, 2.4004, 1.5992, 1.2643, 2.3237, 0.5192, 1.4915], + device='cuda:3'), covar=tensor([0.1407, 0.1335, 0.0298, 0.1019, 0.2472, 0.0327, 0.1838, 0.1270], + device='cuda:3'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0144, 0.0171, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 11:36:35,853 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=212801.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:36:38,506 INFO [train.py:901] (3/4) Epoch 27, batch 2650, loss[loss=0.254, simple_loss=0.3149, pruned_loss=0.0966, over 6668.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2799, pruned_loss=0.05688, over 1616460.46 frames. ], batch size: 71, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:36:43,306 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.695e+02 2.517e+02 2.957e+02 3.589e+02 7.428e+02, threshold=5.913e+02, percent-clipped=3.0 +2023-02-07 11:37:02,814 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212837.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 11:37:04,100 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9686, 1.7922, 2.5842, 1.5869, 1.4334, 2.4932, 0.5282, 1.6161], + device='cuda:3'), covar=tensor([0.1359, 0.1309, 0.0300, 0.1147, 0.2261, 0.0358, 0.1776, 0.1240], + device='cuda:3'), in_proj_covar=tensor([0.0198, 0.0205, 0.0134, 0.0223, 0.0276, 0.0145, 0.0172, 0.0198], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 11:37:14,954 INFO [train.py:901] (3/4) Epoch 27, batch 2700, loss[loss=0.2023, simple_loss=0.2787, pruned_loss=0.06294, over 7531.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2802, pruned_loss=0.05712, over 1618091.62 frames. ], batch size: 18, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:19,850 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=212862.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 11:37:49,368 INFO [train.py:901] (3/4) Epoch 27, batch 2750, loss[loss=0.1937, simple_loss=0.2877, pruned_loss=0.04989, over 8287.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2799, pruned_loss=0.05725, over 1613753.52 frames. ], batch size: 23, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:37:53,337 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.521e+02 2.387e+02 2.942e+02 3.576e+02 8.277e+02, threshold=5.883e+02, percent-clipped=4.0 +2023-02-07 11:37:56,794 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=212916.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:25,584 INFO [train.py:901] (3/4) Epoch 27, batch 2800, loss[loss=0.1968, simple_loss=0.2869, pruned_loss=0.05331, over 8338.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2819, pruned_loss=0.05821, over 1610909.85 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:38:43,404 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5024, 1.4566, 2.1358, 1.2056, 0.9674, 2.1111, 0.3153, 1.2403], + device='cuda:3'), covar=tensor([0.1624, 0.1292, 0.0315, 0.1208, 0.2633, 0.0318, 0.1779, 0.1333], + device='cuda:3'), in_proj_covar=tensor([0.0199, 0.0205, 0.0134, 0.0224, 0.0277, 0.0145, 0.0172, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 11:38:46,067 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=212986.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:38:58,530 INFO [train.py:901] (3/4) Epoch 27, batch 2850, loss[loss=0.222, simple_loss=0.3085, pruned_loss=0.06779, over 8133.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2816, pruned_loss=0.05822, over 1608485.28 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:02,632 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 2.420e+02 3.040e+02 3.738e+02 9.771e+02, threshold=6.080e+02, percent-clipped=4.0 +2023-02-07 11:39:02,866 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213011.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:03,498 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9100, 1.5536, 1.7360, 1.4490, 1.0322, 1.5554, 1.6922, 1.4316], + device='cuda:3'), covar=tensor([0.0547, 0.1228, 0.1556, 0.1431, 0.0614, 0.1421, 0.0719, 0.0682], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0101, 0.0164, 0.0112, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 11:39:33,429 INFO [train.py:901] (3/4) Epoch 27, batch 2900, loss[loss=0.2022, simple_loss=0.2907, pruned_loss=0.05689, over 8742.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2821, pruned_loss=0.0578, over 1614193.24 frames. ], batch size: 30, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:39:36,855 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213060.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:39:46,829 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213073.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:08,834 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-07 11:40:09,156 INFO [train.py:901] (3/4) Epoch 27, batch 2950, loss[loss=0.1654, simple_loss=0.2461, pruned_loss=0.0424, over 7804.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.05766, over 1613560.11 frames. ], batch size: 19, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:12,510 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-07 11:40:13,189 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 2.292e+02 2.734e+02 3.601e+02 6.803e+02, threshold=5.467e+02, percent-clipped=1.0 +2023-02-07 11:40:30,261 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213136.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:40:42,820 INFO [train.py:901] (3/4) Epoch 27, batch 3000, loss[loss=0.2126, simple_loss=0.3076, pruned_loss=0.05885, over 8251.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2838, pruned_loss=0.05847, over 1613979.52 frames. ], batch size: 24, lr: 2.80e-03, grad_scale: 8.0 +2023-02-07 11:40:42,820 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 11:40:56,476 INFO [train.py:935] (3/4) Epoch 27, validation: loss=0.171, simple_loss=0.2706, pruned_loss=0.03572, over 944034.00 frames. +2023-02-07 11:40:56,477 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 11:41:08,340 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213172.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:10,350 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213175.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:19,878 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213188.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:25,955 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213197.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:41:30,815 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-07 11:41:31,708 INFO [train.py:901] (3/4) Epoch 27, batch 3050, loss[loss=0.23, simple_loss=0.319, pruned_loss=0.07046, over 8593.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05871, over 1613713.15 frames. ], batch size: 31, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:41:36,535 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.283e+02 2.877e+02 3.649e+02 6.604e+02, threshold=5.754e+02, percent-clipped=7.0 +2023-02-07 11:41:40,805 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7503, 1.3720, 2.8886, 1.3796, 2.2604, 3.0692, 3.2497, 2.6188], + device='cuda:3'), covar=tensor([0.1142, 0.1667, 0.0357, 0.2208, 0.0916, 0.0299, 0.0660, 0.0520], + device='cuda:3'), in_proj_covar=tensor([0.0306, 0.0328, 0.0293, 0.0321, 0.0321, 0.0279, 0.0438, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 11:42:04,116 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=213251.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:42:06,436 INFO [train.py:901] (3/4) Epoch 27, batch 3100, loss[loss=0.2362, simple_loss=0.3119, pruned_loss=0.08024, over 8038.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05867, over 1612767.67 frames. ], batch size: 22, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:29,244 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 11:42:40,166 INFO [train.py:901] (3/4) Epoch 27, batch 3150, loss[loss=0.1946, simple_loss=0.2847, pruned_loss=0.05221, over 8503.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2837, pruned_loss=0.0585, over 1612909.52 frames. ], batch size: 26, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:42:44,226 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.764e+02 2.557e+02 3.186e+02 3.836e+02 1.080e+03, threshold=6.372e+02, percent-clipped=6.0 +2023-02-07 11:42:57,218 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5108, 2.2988, 3.1217, 2.4875, 3.0659, 2.5480, 2.4039, 1.9242], + device='cuda:3'), covar=tensor([0.5840, 0.5518, 0.2139, 0.4117, 0.2613, 0.3069, 0.1902, 0.5869], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1016, 0.0828, 0.0989, 0.1022, 0.0927, 0.0770, 0.0850], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 11:43:15,308 INFO [train.py:901] (3/4) Epoch 27, batch 3200, loss[loss=0.1772, simple_loss=0.2624, pruned_loss=0.04605, over 7810.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.05791, over 1617840.67 frames. ], batch size: 20, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:38,615 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6973, 4.7742, 4.2390, 2.1557, 4.1661, 4.4166, 4.3573, 4.2565], + device='cuda:3'), covar=tensor([0.0658, 0.0453, 0.0977, 0.4103, 0.0834, 0.0815, 0.1035, 0.0603], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0460, 0.0447, 0.0557, 0.0441, 0.0464, 0.0437, 0.0405], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:43:46,357 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5489, 2.4115, 3.1255, 2.5571, 3.2423, 2.6112, 2.4838, 1.9423], + device='cuda:3'), covar=tensor([0.5867, 0.5387, 0.2285, 0.4233, 0.2635, 0.3295, 0.1883, 0.6082], + device='cuda:3'), in_proj_covar=tensor([0.0968, 0.1021, 0.0831, 0.0992, 0.1026, 0.0930, 0.0772, 0.0854], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 11:43:48,886 INFO [train.py:901] (3/4) Epoch 27, batch 3250, loss[loss=0.1982, simple_loss=0.2858, pruned_loss=0.05531, over 8454.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05813, over 1616374.56 frames. ], batch size: 25, lr: 2.80e-03, grad_scale: 16.0 +2023-02-07 11:43:52,809 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.478e+02 2.885e+02 3.413e+02 5.983e+02, threshold=5.770e+02, percent-clipped=0.0 +2023-02-07 11:44:07,290 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213431.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:17,687 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213444.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:21,743 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-07 11:44:25,453 INFO [train.py:901] (3/4) Epoch 27, batch 3300, loss[loss=0.1579, simple_loss=0.2473, pruned_loss=0.03424, over 7664.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2836, pruned_loss=0.05802, over 1616268.64 frames. ], batch size: 19, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:44:26,274 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213456.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:35,225 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213469.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:44:58,305 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7933, 2.4912, 4.0026, 1.6170, 3.0698, 2.3095, 1.9770, 2.9168], + device='cuda:3'), covar=tensor([0.2214, 0.2831, 0.1020, 0.5150, 0.1973, 0.3553, 0.2698, 0.2703], + device='cuda:3'), in_proj_covar=tensor([0.0538, 0.0634, 0.0567, 0.0670, 0.0660, 0.0608, 0.0561, 0.0645], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:44:59,366 INFO [train.py:901] (3/4) Epoch 27, batch 3350, loss[loss=0.2072, simple_loss=0.2954, pruned_loss=0.05944, over 8628.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05764, over 1609778.73 frames. ], batch size: 39, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:00,990 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=213507.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:03,448 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.606e+02 3.102e+02 3.998e+02 8.787e+02, threshold=6.203e+02, percent-clipped=8.0 +2023-02-07 11:45:18,441 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=213532.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:45:34,511 INFO [train.py:901] (3/4) Epoch 27, batch 3400, loss[loss=0.1894, simple_loss=0.2824, pruned_loss=0.0482, over 8283.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2824, pruned_loss=0.0579, over 1609132.50 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:45:54,959 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.70 vs. limit=2.0 +2023-02-07 11:45:55,326 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=213584.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:46:09,623 INFO [train.py:901] (3/4) Epoch 27, batch 3450, loss[loss=0.1908, simple_loss=0.2775, pruned_loss=0.05202, over 8079.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.285, pruned_loss=0.05895, over 1610545.96 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:13,709 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 2.297e+02 2.616e+02 3.439e+02 9.820e+02, threshold=5.232e+02, percent-clipped=1.0 +2023-02-07 11:46:13,985 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1895, 1.9211, 2.4392, 2.0793, 2.5129, 2.2477, 2.1121, 1.3486], + device='cuda:3'), covar=tensor([0.5876, 0.4998, 0.2189, 0.3641, 0.2529, 0.3058, 0.1948, 0.5447], + device='cuda:3'), in_proj_covar=tensor([0.0964, 0.1019, 0.0829, 0.0988, 0.1019, 0.0929, 0.0770, 0.0851], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 11:46:44,530 INFO [train.py:901] (3/4) Epoch 27, batch 3500, loss[loss=0.1861, simple_loss=0.2642, pruned_loss=0.05398, over 7710.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2855, pruned_loss=0.05939, over 1612165.82 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:46:57,107 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-07 11:47:11,033 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-07 11:47:20,454 INFO [train.py:901] (3/4) Epoch 27, batch 3550, loss[loss=0.1984, simple_loss=0.2818, pruned_loss=0.0575, over 7977.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.05865, over 1607025.99 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:24,356 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 2.484e+02 3.157e+02 3.893e+02 8.912e+02, threshold=6.313e+02, percent-clipped=7.0 +2023-02-07 11:47:55,119 INFO [train.py:901] (3/4) Epoch 27, batch 3600, loss[loss=0.233, simple_loss=0.3224, pruned_loss=0.07185, over 8035.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2833, pruned_loss=0.05842, over 1606240.19 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:47:56,230 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.85 vs. limit=5.0 +2023-02-07 11:48:31,565 INFO [train.py:901] (3/4) Epoch 27, batch 3650, loss[loss=0.1552, simple_loss=0.2369, pruned_loss=0.03673, over 7536.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.05756, over 1603847.07 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:48:35,656 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.435e+02 3.005e+02 4.000e+02 1.001e+03, threshold=6.009e+02, percent-clipped=1.0 +2023-02-07 11:49:05,221 INFO [train.py:901] (3/4) Epoch 27, batch 3700, loss[loss=0.1776, simple_loss=0.2655, pruned_loss=0.04481, over 7815.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05722, over 1603352.91 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:11,344 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-07 11:49:18,805 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3404, 1.6284, 1.6717, 1.0116, 1.7048, 1.3768, 0.2562, 1.5775], + device='cuda:3'), covar=tensor([0.0594, 0.0422, 0.0330, 0.0600, 0.0487, 0.0979, 0.1008, 0.0340], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0408, 0.0362, 0.0458, 0.0393, 0.0547, 0.0404, 0.0438], + device='cuda:3'), out_proj_covar=tensor([1.2525e-04, 1.0608e-04, 9.4603e-05, 1.1988e-04, 1.0296e-04, 1.5253e-04, + 1.0774e-04, 1.1502e-04], device='cuda:3') +2023-02-07 11:49:40,755 INFO [train.py:901] (3/4) Epoch 27, batch 3750, loss[loss=0.2211, simple_loss=0.3015, pruned_loss=0.07035, over 8661.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2817, pruned_loss=0.05757, over 1608925.73 frames. ], batch size: 34, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:49:44,686 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.240e+02 2.670e+02 3.453e+02 6.024e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 11:49:57,383 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=213928.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:50:15,102 INFO [train.py:901] (3/4) Epoch 27, batch 3800, loss[loss=0.1807, simple_loss=0.2569, pruned_loss=0.05221, over 7436.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05842, over 1610746.85 frames. ], batch size: 17, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:51,696 INFO [train.py:901] (3/4) Epoch 27, batch 3850, loss[loss=0.1883, simple_loss=0.2721, pruned_loss=0.0523, over 7943.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2825, pruned_loss=0.058, over 1611537.43 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:50:55,678 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.477e+02 2.325e+02 2.987e+02 3.815e+02 9.366e+02, threshold=5.974e+02, percent-clipped=6.0 +2023-02-07 11:51:19,440 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214043.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:51:21,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-07 11:51:27,208 INFO [train.py:901] (3/4) Epoch 27, batch 3900, loss[loss=0.1746, simple_loss=0.2674, pruned_loss=0.04092, over 7921.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2827, pruned_loss=0.05799, over 1610195.84 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:00,517 INFO [train.py:901] (3/4) Epoch 27, batch 3950, loss[loss=0.1803, simple_loss=0.2677, pruned_loss=0.04645, over 7924.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2833, pruned_loss=0.05794, over 1613385.87 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:52:04,375 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.377e+02 2.717e+02 3.364e+02 5.097e+02, threshold=5.435e+02, percent-clipped=0.0 +2023-02-07 11:52:30,744 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214147.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:52:36,017 INFO [train.py:901] (3/4) Epoch 27, batch 4000, loss[loss=0.184, simple_loss=0.2646, pruned_loss=0.05169, over 7924.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05775, over 1614329.84 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:10,790 INFO [train.py:901] (3/4) Epoch 27, batch 4050, loss[loss=0.2117, simple_loss=0.2824, pruned_loss=0.0705, over 8074.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05845, over 1617251.06 frames. ], batch size: 21, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:14,930 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.649e+02 2.379e+02 2.958e+02 3.648e+02 7.596e+02, threshold=5.915e+02, percent-clipped=3.0 +2023-02-07 11:53:31,866 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=214236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:53:41,647 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-07 11:53:45,410 INFO [train.py:901] (3/4) Epoch 27, batch 4100, loss[loss=0.174, simple_loss=0.2551, pruned_loss=0.04645, over 7553.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05789, over 1617004.84 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:53:57,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1951, 1.9211, 2.3630, 2.0602, 2.4189, 2.2186, 2.0560, 1.2038], + device='cuda:3'), covar=tensor([0.5826, 0.4996, 0.2256, 0.3717, 0.2363, 0.3378, 0.1855, 0.5301], + device='cuda:3'), in_proj_covar=tensor([0.0966, 0.1019, 0.0831, 0.0988, 0.1024, 0.0928, 0.0768, 0.0849], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 11:54:17,258 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214299.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:21,105 INFO [train.py:901] (3/4) Epoch 27, batch 4150, loss[loss=0.164, simple_loss=0.2364, pruned_loss=0.04581, over 7512.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2837, pruned_loss=0.05865, over 1618760.41 frames. ], batch size: 18, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:54:25,184 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 2.522e+02 2.957e+02 3.518e+02 6.524e+02, threshold=5.913e+02, percent-clipped=2.0 +2023-02-07 11:54:34,232 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:54:55,413 INFO [train.py:901] (3/4) Epoch 27, batch 4200, loss[loss=0.214, simple_loss=0.3121, pruned_loss=0.05796, over 8451.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2841, pruned_loss=0.05893, over 1618648.16 frames. ], batch size: 27, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:15,716 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-07 11:55:30,770 INFO [train.py:901] (3/4) Epoch 27, batch 4250, loss[loss=0.18, simple_loss=0.2723, pruned_loss=0.04389, over 8296.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.05936, over 1621470.33 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:55:34,782 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.646e+02 2.351e+02 2.930e+02 3.605e+02 8.966e+02, threshold=5.860e+02, percent-clipped=4.0 +2023-02-07 11:55:40,120 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-07 11:55:52,924 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.43 vs. limit=2.0 +2023-02-07 11:56:04,673 INFO [train.py:901] (3/4) Epoch 27, batch 4300, loss[loss=0.214, simple_loss=0.297, pruned_loss=0.06552, over 8360.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.283, pruned_loss=0.05871, over 1614850.34 frames. ], batch size: 24, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:29,280 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214491.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:56:40,082 INFO [train.py:901] (3/4) Epoch 27, batch 4350, loss[loss=0.1906, simple_loss=0.2768, pruned_loss=0.05217, over 8027.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.05937, over 1614869.31 frames. ], batch size: 22, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:56:44,889 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.457e+02 2.989e+02 4.041e+02 8.697e+02, threshold=5.978e+02, percent-clipped=4.0 +2023-02-07 11:57:04,813 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0700, 2.3348, 3.7626, 2.0193, 2.0954, 3.6865, 0.7107, 2.1703], + device='cuda:3'), covar=tensor([0.1022, 0.1203, 0.0181, 0.1544, 0.2166, 0.0243, 0.1957, 0.1308], + device='cuda:3'), in_proj_covar=tensor([0.0200, 0.0205, 0.0136, 0.0223, 0.0277, 0.0145, 0.0172, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-07 11:57:11,210 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-07 11:57:11,498 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-07 11:57:14,883 INFO [train.py:901] (3/4) Epoch 27, batch 4400, loss[loss=0.1846, simple_loss=0.2766, pruned_loss=0.04629, over 8183.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2828, pruned_loss=0.05816, over 1615562.72 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:26,154 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0621, 2.9533, 2.8082, 4.2541, 1.8094, 2.5591, 2.7482, 3.2468], + device='cuda:3'), covar=tensor([0.0513, 0.0680, 0.0694, 0.0197, 0.1056, 0.1009, 0.0839, 0.0653], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0211, 0.0202, 0.0245, 0.0248, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 11:57:32,006 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=214580.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:46,454 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0477, 2.7142, 4.0171, 1.7613, 3.0975, 2.2679, 2.4027, 2.7096], + device='cuda:3'), covar=tensor([0.1985, 0.2267, 0.0944, 0.4725, 0.1759, 0.3539, 0.2324, 0.2679], + device='cuda:3'), in_proj_covar=tensor([0.0536, 0.0630, 0.0562, 0.0666, 0.0656, 0.0605, 0.0560, 0.0641], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 11:57:49,420 INFO [train.py:901] (3/4) Epoch 27, batch 4450, loss[loss=0.2194, simple_loss=0.3113, pruned_loss=0.06379, over 8105.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2812, pruned_loss=0.05757, over 1612320.50 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:57:50,297 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214606.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:57:51,446 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-07 11:57:53,336 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.435e+02 2.910e+02 3.675e+02 1.096e+03, threshold=5.821e+02, percent-clipped=3.0 +2023-02-07 11:57:54,950 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.42 vs. limit=5.0 +2023-02-07 11:58:25,065 INFO [train.py:901] (3/4) Epoch 27, batch 4500, loss[loss=0.1941, simple_loss=0.2843, pruned_loss=0.05199, over 8109.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2802, pruned_loss=0.05718, over 1610561.96 frames. ], batch size: 23, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:58:49,087 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-07 11:58:51,961 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=214695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 11:58:58,573 INFO [train.py:901] (3/4) Epoch 27, batch 4550, loss[loss=0.2045, simple_loss=0.294, pruned_loss=0.05747, over 8458.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.279, pruned_loss=0.05635, over 1607307.29 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:59:03,198 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.259e+02 2.795e+02 3.667e+02 7.490e+02, threshold=5.591e+02, percent-clipped=6.0 +2023-02-07 11:59:15,587 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-07 11:59:34,573 INFO [train.py:901] (3/4) Epoch 27, batch 4600, loss[loss=0.1505, simple_loss=0.2334, pruned_loss=0.03382, over 7801.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2812, pruned_loss=0.05776, over 1606728.61 frames. ], batch size: 20, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 11:59:43,925 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.7349, 5.8518, 5.1430, 2.5481, 5.1298, 5.5537, 5.3479, 5.3638], + device='cuda:3'), covar=tensor([0.0515, 0.0369, 0.0765, 0.4067, 0.0713, 0.0679, 0.1019, 0.0509], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0462, 0.0447, 0.0556, 0.0443, 0.0466, 0.0440, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:00:07,855 INFO [train.py:901] (3/4) Epoch 27, batch 4650, loss[loss=0.2577, simple_loss=0.3209, pruned_loss=0.09727, over 6557.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2825, pruned_loss=0.05849, over 1607885.81 frames. ], batch size: 71, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:11,914 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.452e+02 3.083e+02 3.974e+02 1.018e+03, threshold=6.165e+02, percent-clipped=5.0 +2023-02-07 12:00:43,657 INFO [train.py:901] (3/4) Epoch 27, batch 4700, loss[loss=0.2325, simple_loss=0.3106, pruned_loss=0.07719, over 7061.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2825, pruned_loss=0.05866, over 1610728.14 frames. ], batch size: 72, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:00:48,569 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214862.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:05,463 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214887.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:17,192 INFO [train.py:901] (3/4) Epoch 27, batch 4750, loss[loss=0.2439, simple_loss=0.3333, pruned_loss=0.07725, over 8483.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05879, over 1614422.18 frames. ], batch size: 25, lr: 2.79e-03, grad_scale: 16.0 +2023-02-07 12:01:21,119 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.799e+02 2.444e+02 3.016e+02 3.790e+02 1.117e+03, threshold=6.032e+02, percent-clipped=6.0 +2023-02-07 12:01:27,694 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.80 vs. limit=2.0 +2023-02-07 12:01:42,268 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-07 12:01:45,079 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-07 12:01:49,027 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=214951.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:01:51,315 INFO [train.py:901] (3/4) Epoch 27, batch 4800, loss[loss=0.2046, simple_loss=0.306, pruned_loss=0.05162, over 8329.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05814, over 1611036.02 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:07,903 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=214976.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:02:27,032 INFO [train.py:901] (3/4) Epoch 27, batch 4850, loss[loss=0.2303, simple_loss=0.3089, pruned_loss=0.0758, over 8251.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2817, pruned_loss=0.058, over 1609038.50 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:02:31,208 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.607e+02 2.294e+02 2.705e+02 3.274e+02 6.085e+02, threshold=5.409e+02, percent-clipped=1.0 +2023-02-07 12:02:36,649 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-07 12:03:01,830 INFO [train.py:901] (3/4) Epoch 27, batch 4900, loss[loss=0.2149, simple_loss=0.3044, pruned_loss=0.06269, over 8247.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2811, pruned_loss=0.05721, over 1611595.67 frames. ], batch size: 24, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:07,092 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.45 vs. limit=5.0 +2023-02-07 12:03:34,914 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5989, 1.7658, 1.8956, 1.3147, 1.9500, 1.4345, 0.4845, 1.8549], + device='cuda:3'), covar=tensor([0.0622, 0.0401, 0.0352, 0.0585, 0.0444, 0.0992, 0.1017, 0.0317], + device='cuda:3'), in_proj_covar=tensor([0.0473, 0.0410, 0.0364, 0.0460, 0.0395, 0.0552, 0.0404, 0.0440], + device='cuda:3'), out_proj_covar=tensor([1.2553e-04, 1.0663e-04, 9.4976e-05, 1.2026e-04, 1.0346e-04, 1.5417e-04, + 1.0780e-04, 1.1539e-04], device='cuda:3') +2023-02-07 12:03:37,414 INFO [train.py:901] (3/4) Epoch 27, batch 4950, loss[loss=0.1853, simple_loss=0.2741, pruned_loss=0.04819, over 8476.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2815, pruned_loss=0.05746, over 1614913.96 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:03:41,324 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.323e+02 2.858e+02 3.502e+02 9.819e+02, threshold=5.716e+02, percent-clipped=5.0 +2023-02-07 12:03:46,706 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8318, 1.4101, 4.0038, 1.4990, 3.4951, 3.3499, 3.6097, 3.5358], + device='cuda:3'), covar=tensor([0.0682, 0.4799, 0.0700, 0.4394, 0.1391, 0.1048, 0.0728, 0.0803], + device='cuda:3'), in_proj_covar=tensor([0.0680, 0.0675, 0.0745, 0.0662, 0.0755, 0.0643, 0.0643, 0.0723], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:04:10,562 INFO [train.py:901] (3/4) Epoch 27, batch 5000, loss[loss=0.2112, simple_loss=0.2987, pruned_loss=0.06184, over 8594.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2824, pruned_loss=0.0583, over 1613046.88 frames. ], batch size: 39, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:04:40,485 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6172, 2.5312, 1.9040, 2.3056, 2.1988, 1.6469, 2.1035, 2.1803], + device='cuda:3'), covar=tensor([0.1696, 0.0472, 0.1261, 0.0674, 0.0833, 0.1648, 0.1104, 0.1053], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0242, 0.0342, 0.0313, 0.0302, 0.0347, 0.0350, 0.0324], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:04:47,122 INFO [train.py:901] (3/4) Epoch 27, batch 5050, loss[loss=0.2576, simple_loss=0.3399, pruned_loss=0.08765, over 8315.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2834, pruned_loss=0.05819, over 1616717.56 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:04:50,091 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-07 12:04:50,998 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.415e+02 2.920e+02 3.667e+02 5.760e+02, threshold=5.840e+02, percent-clipped=1.0 +2023-02-07 12:05:10,165 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-07 12:05:15,553 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215248.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:05:20,048 INFO [train.py:901] (3/4) Epoch 27, batch 5100, loss[loss=0.2021, simple_loss=0.2923, pruned_loss=0.05599, over 8500.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2846, pruned_loss=0.05853, over 1620674.02 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:54,330 INFO [train.py:901] (3/4) Epoch 27, batch 5150, loss[loss=0.1723, simple_loss=0.2595, pruned_loss=0.04257, over 7662.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2835, pruned_loss=0.05772, over 1623462.25 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 32.0 +2023-02-07 12:05:59,259 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.789e+02 2.449e+02 2.868e+02 3.492e+02 6.640e+02, threshold=5.736e+02, percent-clipped=1.0 +2023-02-07 12:06:22,707 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215343.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:06:28,166 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215351.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:06:30,701 INFO [train.py:901] (3/4) Epoch 27, batch 5200, loss[loss=0.2197, simple_loss=0.2924, pruned_loss=0.07349, over 6791.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05761, over 1612692.80 frames. ], batch size: 15, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:06:58,936 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5580, 1.8535, 2.8897, 1.4454, 2.1458, 2.0027, 1.5556, 2.2442], + device='cuda:3'), covar=tensor([0.2015, 0.2901, 0.0870, 0.5014, 0.2045, 0.3421, 0.2635, 0.2304], + device='cuda:3'), in_proj_covar=tensor([0.0541, 0.0637, 0.0568, 0.0673, 0.0663, 0.0613, 0.0567, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:07:05,198 INFO [train.py:901] (3/4) Epoch 27, batch 5250, loss[loss=0.1612, simple_loss=0.2473, pruned_loss=0.03753, over 7797.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.05744, over 1611685.41 frames. ], batch size: 19, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:07:09,826 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.650e+02 2.358e+02 2.790e+02 3.638e+02 8.125e+02, threshold=5.579e+02, percent-clipped=3.0 +2023-02-07 12:07:12,595 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-07 12:07:17,503 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3961, 2.3083, 1.7756, 2.1466, 1.9153, 1.5138, 1.8683, 1.9199], + device='cuda:3'), covar=tensor([0.1541, 0.0461, 0.1248, 0.0571, 0.0770, 0.1634, 0.1002, 0.0988], + device='cuda:3'), in_proj_covar=tensor([0.0362, 0.0242, 0.0344, 0.0314, 0.0304, 0.0348, 0.0351, 0.0324], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:07:28,890 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9810, 1.5807, 1.7576, 1.4804, 0.9627, 1.5794, 1.7506, 1.6051], + device='cuda:3'), covar=tensor([0.0547, 0.1222, 0.1592, 0.1433, 0.0571, 0.1446, 0.0676, 0.0664], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0188, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 12:07:40,340 INFO [train.py:901] (3/4) Epoch 27, batch 5300, loss[loss=0.2123, simple_loss=0.3113, pruned_loss=0.05663, over 8450.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.0567, over 1613388.19 frames. ], batch size: 27, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:13,770 INFO [train.py:901] (3/4) Epoch 27, batch 5350, loss[loss=0.2278, simple_loss=0.3125, pruned_loss=0.07159, over 8607.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.282, pruned_loss=0.05727, over 1613945.73 frames. ], batch size: 34, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:18,669 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.346e+02 2.455e+02 2.847e+02 3.988e+02 1.267e+03, threshold=5.693e+02, percent-clipped=12.0 +2023-02-07 12:08:25,719 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215521.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:08:48,885 INFO [train.py:901] (3/4) Epoch 27, batch 5400, loss[loss=0.1607, simple_loss=0.2346, pruned_loss=0.04338, over 7411.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05813, over 1610315.17 frames. ], batch size: 17, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:08:52,411 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3095, 2.1414, 1.7271, 2.0144, 1.7381, 1.4515, 1.6593, 1.7078], + device='cuda:3'), covar=tensor([0.1281, 0.0419, 0.1194, 0.0499, 0.0768, 0.1561, 0.0971, 0.0851], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0241, 0.0343, 0.0312, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:08:57,735 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215566.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:08,912 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215583.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:09:14,846 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215592.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:09:23,335 INFO [train.py:901] (3/4) Epoch 27, batch 5450, loss[loss=0.1791, simple_loss=0.269, pruned_loss=0.0446, over 8252.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2822, pruned_loss=0.05821, over 1608047.20 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:27,892 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.259e+02 2.540e+02 3.136e+02 3.819e+02 8.555e+02, threshold=6.272e+02, percent-clipped=5.0 +2023-02-07 12:09:47,134 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8185, 1.5923, 1.7617, 1.4263, 0.9923, 1.5695, 1.6813, 1.6166], + device='cuda:3'), covar=tensor([0.0535, 0.1177, 0.1513, 0.1394, 0.0587, 0.1383, 0.0688, 0.0603], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 12:09:56,871 INFO [train.py:901] (3/4) Epoch 27, batch 5500, loss[loss=0.2027, simple_loss=0.2923, pruned_loss=0.05648, over 8549.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05825, over 1611332.14 frames. ], batch size: 31, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:09:56,881 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-07 12:10:20,607 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215687.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:10:25,966 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215695.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:10:32,544 INFO [train.py:901] (3/4) Epoch 27, batch 5550, loss[loss=0.1991, simple_loss=0.2799, pruned_loss=0.05913, over 8438.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05754, over 1613202.45 frames. ], batch size: 27, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:10:34,105 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215707.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:10:37,242 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.599e+02 2.445e+02 2.973e+02 3.969e+02 8.778e+02, threshold=5.947e+02, percent-clipped=4.0 +2023-02-07 12:11:06,807 INFO [train.py:901] (3/4) Epoch 27, batch 5600, loss[loss=0.1766, simple_loss=0.2472, pruned_loss=0.053, over 7521.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2805, pruned_loss=0.0568, over 1605824.73 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:40,025 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215802.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:11:41,824 INFO [train.py:901] (3/4) Epoch 27, batch 5650, loss[loss=0.2136, simple_loss=0.3024, pruned_loss=0.06241, over 8486.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2813, pruned_loss=0.05732, over 1609868.07 frames. ], batch size: 27, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:11:46,131 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215810.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:11:47,203 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.511e+02 2.361e+02 2.799e+02 3.308e+02 5.877e+02, threshold=5.598e+02, percent-clipped=0.0 +2023-02-07 12:11:50,148 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9567, 1.1992, 1.1486, 0.7620, 1.1762, 0.9627, 0.1445, 1.1226], + device='cuda:3'), covar=tensor([0.0675, 0.0502, 0.0514, 0.0761, 0.0637, 0.1417, 0.1211, 0.0445], + device='cuda:3'), in_proj_covar=tensor([0.0473, 0.0410, 0.0363, 0.0459, 0.0394, 0.0552, 0.0403, 0.0439], + device='cuda:3'), out_proj_covar=tensor([1.2555e-04, 1.0640e-04, 9.4739e-05, 1.2018e-04, 1.0317e-04, 1.5401e-04, + 1.0772e-04, 1.1511e-04], device='cuda:3') +2023-02-07 12:12:04,971 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-07 12:12:11,223 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=215847.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:16,648 INFO [train.py:901] (3/4) Epoch 27, batch 5700, loss[loss=0.2119, simple_loss=0.2983, pruned_loss=0.06275, over 8387.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2814, pruned_loss=0.05745, over 1609604.12 frames. ], batch size: 49, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:23,694 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:12:51,896 INFO [train.py:901] (3/4) Epoch 27, batch 5750, loss[loss=0.2191, simple_loss=0.3066, pruned_loss=0.06581, over 8496.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05722, over 1606944.65 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:12:56,022 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215910.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:12:57,182 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.395e+02 2.899e+02 3.864e+02 7.116e+02, threshold=5.798e+02, percent-clipped=7.0 +2023-02-07 12:13:07,811 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=215927.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:10,334 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-07 12:13:26,385 INFO [train.py:901] (3/4) Epoch 27, batch 5800, loss[loss=0.1929, simple_loss=0.2859, pruned_loss=0.04991, over 8504.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2811, pruned_loss=0.05772, over 1602170.49 frames. ], batch size: 28, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:13:31,953 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=215963.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:13:43,176 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=215980.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:13:47,606 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1227, 1.4452, 1.6251, 1.2780, 0.9871, 1.4343, 1.8033, 1.5849], + device='cuda:3'), covar=tensor([0.0513, 0.1308, 0.1765, 0.1589, 0.0610, 0.1595, 0.0692, 0.0699], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 12:13:48,928 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=215988.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:14:01,069 INFO [train.py:901] (3/4) Epoch 27, batch 5850, loss[loss=0.1612, simple_loss=0.2588, pruned_loss=0.03175, over 7958.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05726, over 1608148.49 frames. ], batch size: 21, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:05,651 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.428e+02 2.871e+02 3.760e+02 7.078e+02, threshold=5.742e+02, percent-clipped=9.0 +2023-02-07 12:14:16,038 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216025.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:14:27,445 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216042.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:30,146 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216046.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:36,738 INFO [train.py:901] (3/4) Epoch 27, batch 5900, loss[loss=0.1891, simple_loss=0.2862, pruned_loss=0.04599, over 8337.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2809, pruned_loss=0.05692, over 1609189.32 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:14:38,998 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216058.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:14:44,145 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216066.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:14:55,477 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216083.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:15:00,979 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216091.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:15:10,148 INFO [train.py:901] (3/4) Epoch 27, batch 5950, loss[loss=0.2225, simple_loss=0.3094, pruned_loss=0.06785, over 8470.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2808, pruned_loss=0.05665, over 1609999.19 frames. ], batch size: 25, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:15,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.411e+02 2.864e+02 3.625e+02 8.908e+02, threshold=5.728e+02, percent-clipped=5.0 +2023-02-07 12:15:46,883 INFO [train.py:901] (3/4) Epoch 27, batch 6000, loss[loss=0.1789, simple_loss=0.252, pruned_loss=0.05289, over 7530.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05691, over 1613742.83 frames. ], batch size: 18, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:15:46,883 INFO [train.py:926] (3/4) Computing validation loss +2023-02-07 12:15:59,961 INFO [train.py:935] (3/4) Epoch 27, validation: loss=0.1711, simple_loss=0.2711, pruned_loss=0.03554, over 944034.00 frames. +2023-02-07 12:15:59,962 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6747MB +2023-02-07 12:16:06,493 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.98 vs. limit=5.0 +2023-02-07 12:16:25,739 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216191.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:16:35,300 INFO [train.py:901] (3/4) Epoch 27, batch 6050, loss[loss=0.1893, simple_loss=0.2736, pruned_loss=0.05248, over 8084.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.281, pruned_loss=0.0569, over 1610702.12 frames. ], batch size: 21, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:16:40,116 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.565e+02 3.207e+02 4.227e+02 9.285e+02, threshold=6.415e+02, percent-clipped=9.0 +2023-02-07 12:16:56,676 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216236.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:09,872 INFO [train.py:901] (3/4) Epoch 27, batch 6100, loss[loss=0.2124, simple_loss=0.2979, pruned_loss=0.06347, over 8410.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05758, over 1614560.44 frames. ], batch size: 49, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:14,011 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216261.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:28,451 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216281.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:17:39,620 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216298.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:44,976 INFO [train.py:901] (3/4) Epoch 27, batch 6150, loss[loss=0.1732, simple_loss=0.259, pruned_loss=0.04373, over 8460.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2828, pruned_loss=0.0578, over 1618981.18 frames. ], batch size: 49, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:17:44,988 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-07 12:17:45,830 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216306.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:17:45,853 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216306.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:17:49,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.311e+02 2.985e+02 4.036e+02 8.594e+02, threshold=5.970e+02, percent-clipped=2.0 +2023-02-07 12:17:52,545 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3880, 4.3762, 3.9122, 1.9864, 3.8636, 4.0532, 3.9214, 3.8535], + device='cuda:3'), covar=tensor([0.0770, 0.0595, 0.1119, 0.4745, 0.0946, 0.1030, 0.1358, 0.0801], + device='cuda:3'), in_proj_covar=tensor([0.0549, 0.0469, 0.0451, 0.0564, 0.0449, 0.0471, 0.0447, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:17:57,198 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216323.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:18,525 INFO [train.py:901] (3/4) Epoch 27, batch 6200, loss[loss=0.1697, simple_loss=0.256, pruned_loss=0.0417, over 8029.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05757, over 1621599.44 frames. ], batch size: 22, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:42,476 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216390.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:18:53,898 INFO [train.py:901] (3/4) Epoch 27, batch 6250, loss[loss=0.2272, simple_loss=0.2997, pruned_loss=0.07733, over 8589.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05816, over 1623607.44 frames. ], batch size: 31, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:18:58,453 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.643e+02 2.470e+02 2.901e+02 3.405e+02 7.374e+02, threshold=5.803e+02, percent-clipped=1.0 +2023-02-07 12:19:19,291 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8517, 3.7931, 3.4254, 1.8337, 3.3462, 3.4229, 3.3493, 3.3294], + device='cuda:3'), covar=tensor([0.0838, 0.0658, 0.1152, 0.4817, 0.0997, 0.1113, 0.1400, 0.0955], + device='cuda:3'), in_proj_covar=tensor([0.0545, 0.0465, 0.0448, 0.0560, 0.0446, 0.0467, 0.0444, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:19:27,772 INFO [train.py:901] (3/4) Epoch 27, batch 6300, loss[loss=0.1766, simple_loss=0.259, pruned_loss=0.04712, over 8505.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05873, over 1620049.89 frames. ], batch size: 26, lr: 2.78e-03, grad_scale: 16.0 +2023-02-07 12:20:01,955 INFO [train.py:901] (3/4) Epoch 27, batch 6350, loss[loss=0.178, simple_loss=0.2548, pruned_loss=0.05061, over 7534.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2841, pruned_loss=0.05872, over 1618537.23 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:02,151 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=216505.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:07,853 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.511e+02 2.994e+02 4.018e+02 7.521e+02, threshold=5.987e+02, percent-clipped=5.0 +2023-02-07 12:20:36,668 INFO [train.py:901] (3/4) Epoch 27, batch 6400, loss[loss=0.1849, simple_loss=0.2689, pruned_loss=0.05051, over 6378.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2847, pruned_loss=0.05903, over 1613794.71 frames. ], batch size: 14, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:20:41,615 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216562.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:20:53,533 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.9590, 1.6492, 1.3751, 1.5475, 1.2591, 1.2713, 1.2067, 1.2791], + device='cuda:3'), covar=tensor([0.1318, 0.0558, 0.1453, 0.0650, 0.0935, 0.1694, 0.1108, 0.0902], + device='cuda:3'), in_proj_covar=tensor([0.0361, 0.0241, 0.0342, 0.0312, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:20:58,186 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216587.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:10,669 INFO [train.py:901] (3/4) Epoch 27, batch 6450, loss[loss=0.1794, simple_loss=0.2585, pruned_loss=0.05017, over 7921.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.285, pruned_loss=0.05883, over 1615244.60 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:13,549 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216609.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:21:16,161 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.469e+02 2.882e+02 3.609e+02 7.919e+02, threshold=5.765e+02, percent-clipped=2.0 +2023-02-07 12:21:46,025 INFO [train.py:901] (3/4) Epoch 27, batch 6500, loss[loss=0.1985, simple_loss=0.2867, pruned_loss=0.05509, over 8808.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2846, pruned_loss=0.05873, over 1614233.49 frames. ], batch size: 40, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:21:46,207 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.1387, 1.3504, 1.5677, 1.2887, 0.7877, 1.3331, 1.1612, 1.0580], + device='cuda:3'), covar=tensor([0.0685, 0.1257, 0.1636, 0.1448, 0.0582, 0.1482, 0.0741, 0.0710], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-07 12:21:58,505 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216673.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:22:14,731 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4293, 1.8358, 1.4141, 2.8180, 1.3120, 1.2585, 2.0727, 1.9728], + device='cuda:3'), covar=tensor([0.1540, 0.1319, 0.1895, 0.0387, 0.1329, 0.2177, 0.0907, 0.0940], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0193, 0.0244, 0.0212, 0.0203, 0.0245, 0.0249, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 12:22:19,974 INFO [train.py:901] (3/4) Epoch 27, batch 6550, loss[loss=0.1362, simple_loss=0.2185, pruned_loss=0.027, over 7258.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2843, pruned_loss=0.05873, over 1616225.52 frames. ], batch size: 16, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:25,119 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.409e+02 2.544e+02 2.876e+02 3.743e+02 6.730e+02, threshold=5.752e+02, percent-clipped=5.0 +2023-02-07 12:22:32,752 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:22:54,580 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-07 12:22:55,723 INFO [train.py:901] (3/4) Epoch 27, batch 6600, loss[loss=0.2434, simple_loss=0.2955, pruned_loss=0.09563, over 7677.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2839, pruned_loss=0.05894, over 1614970.20 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:22:59,997 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=216761.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:09,884 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([0.8831, 1.2137, 0.9669, 1.2068, 0.9797, 0.8958, 1.0413, 1.0853], + device='cuda:3'), covar=tensor([0.0848, 0.0407, 0.1079, 0.0444, 0.0656, 0.1250, 0.0671, 0.0544], + device='cuda:3'), in_proj_covar=tensor([0.0362, 0.0241, 0.0342, 0.0313, 0.0304, 0.0347, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:23:12,957 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-07 12:23:17,022 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=216786.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:23:29,549 INFO [train.py:901] (3/4) Epoch 27, batch 6650, loss[loss=0.1918, simple_loss=0.2799, pruned_loss=0.05186, over 8317.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.283, pruned_loss=0.05911, over 1609873.69 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:23:34,797 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.735e+02 2.571e+02 3.099e+02 3.859e+02 9.745e+02, threshold=6.199e+02, percent-clipped=7.0 +2023-02-07 12:24:03,769 INFO [train.py:901] (3/4) Epoch 27, batch 6700, loss[loss=0.1967, simple_loss=0.2869, pruned_loss=0.05327, over 8503.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2841, pruned_loss=0.05956, over 1607513.59 frames. ], batch size: 28, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:32,231 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3137, 2.0228, 2.5799, 2.2238, 2.5376, 2.3550, 2.2214, 1.4322], + device='cuda:3'), covar=tensor([0.5587, 0.5161, 0.2101, 0.3763, 0.2420, 0.3323, 0.1956, 0.5318], + device='cuda:3'), in_proj_covar=tensor([0.0966, 0.1021, 0.0834, 0.0992, 0.1031, 0.0931, 0.0773, 0.0853], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 12:24:38,620 INFO [train.py:901] (3/4) Epoch 27, batch 6750, loss[loss=0.217, simple_loss=0.3058, pruned_loss=0.06406, over 8179.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2839, pruned_loss=0.05923, over 1609562.39 frames. ], batch size: 23, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:24:40,694 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5953, 4.6121, 4.1354, 2.1224, 4.0814, 4.1831, 4.1770, 4.0567], + device='cuda:3'), covar=tensor([0.0663, 0.0459, 0.0939, 0.4524, 0.0835, 0.0963, 0.1146, 0.0736], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0464, 0.0445, 0.0556, 0.0442, 0.0464, 0.0439, 0.0403], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:24:43,892 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.479e+02 3.006e+02 3.687e+02 6.813e+02, threshold=6.012e+02, percent-clipped=1.0 +2023-02-07 12:24:49,475 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1488, 3.7295, 2.4318, 2.9863, 2.8886, 2.1408, 2.8964, 3.0408], + device='cuda:3'), covar=tensor([0.1689, 0.0355, 0.1099, 0.0728, 0.0777, 0.1408, 0.0961, 0.1081], + device='cuda:3'), in_proj_covar=tensor([0.0363, 0.0243, 0.0343, 0.0314, 0.0306, 0.0348, 0.0352, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:24:59,756 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-07 12:25:11,172 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=216953.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:12,358 INFO [train.py:901] (3/4) Epoch 27, batch 6800, loss[loss=0.2211, simple_loss=0.3068, pruned_loss=0.06771, over 8636.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2845, pruned_loss=0.0594, over 1607727.33 frames. ], batch size: 34, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:24,113 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-07 12:25:32,329 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=216983.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:25:47,374 INFO [train.py:901] (3/4) Epoch 27, batch 6850, loss[loss=0.2285, simple_loss=0.3151, pruned_loss=0.07097, over 8553.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2836, pruned_loss=0.05885, over 1607661.55 frames. ], batch size: 49, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:25:52,583 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 2.408e+02 3.097e+02 3.751e+02 9.876e+02, threshold=6.193e+02, percent-clipped=4.0 +2023-02-07 12:25:55,366 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217017.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:26:10,970 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-07 12:26:21,077 INFO [train.py:901] (3/4) Epoch 27, batch 6900, loss[loss=0.1819, simple_loss=0.2628, pruned_loss=0.05047, over 7532.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2826, pruned_loss=0.05862, over 1607688.95 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:26:29,754 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217067.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:30,512 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217068.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:26:56,822 INFO [train.py:901] (3/4) Epoch 27, batch 6950, loss[loss=0.2269, simple_loss=0.3005, pruned_loss=0.07669, over 8630.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2836, pruned_loss=0.05893, over 1610068.09 frames. ], batch size: 34, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:02,041 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.303e+02 2.670e+02 3.410e+02 6.861e+02, threshold=5.340e+02, percent-clipped=1.0 +2023-02-07 12:27:12,678 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.05 vs. limit=5.0 +2023-02-07 12:27:15,674 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217132.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:27:19,477 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-07 12:27:30,834 INFO [train.py:901] (3/4) Epoch 27, batch 7000, loss[loss=0.2152, simple_loss=0.3005, pruned_loss=0.06501, over 8495.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2834, pruned_loss=0.05849, over 1615309.87 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:27:47,592 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5094, 1.6136, 1.6512, 1.2173, 1.7276, 1.3524, 0.6669, 1.6291], + device='cuda:3'), covar=tensor([0.0505, 0.0344, 0.0264, 0.0503, 0.0328, 0.0717, 0.0823, 0.0274], + device='cuda:3'), in_proj_covar=tensor([0.0471, 0.0407, 0.0362, 0.0457, 0.0392, 0.0550, 0.0401, 0.0438], + device='cuda:3'), out_proj_covar=tensor([1.2482e-04, 1.0563e-04, 9.4482e-05, 1.1964e-04, 1.0279e-04, 1.5348e-04, + 1.0697e-04, 1.1495e-04], device='cuda:3') +2023-02-07 12:27:49,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217182.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:28:05,127 INFO [train.py:901] (3/4) Epoch 27, batch 7050, loss[loss=0.1998, simple_loss=0.2932, pruned_loss=0.0532, over 8344.00 frames. ], tot_loss[loss=0.2014, simple_loss=0.2848, pruned_loss=0.059, over 1617040.15 frames. ], batch size: 26, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:28:11,277 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.356e+02 3.046e+02 3.591e+02 8.726e+02, threshold=6.092e+02, percent-clipped=6.0 +2023-02-07 12:28:20,121 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9743, 1.6608, 6.1321, 2.1862, 5.5409, 5.1090, 5.6851, 5.5327], + device='cuda:3'), covar=tensor([0.0527, 0.4638, 0.0352, 0.3880, 0.0942, 0.0851, 0.0442, 0.0497], + device='cuda:3'), in_proj_covar=tensor([0.0674, 0.0662, 0.0731, 0.0652, 0.0741, 0.0627, 0.0634, 0.0714], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-07 12:28:40,055 INFO [train.py:901] (3/4) Epoch 27, batch 7100, loss[loss=0.1753, simple_loss=0.264, pruned_loss=0.04327, over 8569.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05809, over 1614682.59 frames. ], batch size: 31, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:14,482 INFO [train.py:901] (3/4) Epoch 27, batch 7150, loss[loss=0.1724, simple_loss=0.2558, pruned_loss=0.04455, over 8087.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05838, over 1612884.66 frames. ], batch size: 21, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:29:19,657 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.328e+02 2.444e+02 3.123e+02 4.113e+02 1.134e+03, threshold=6.246e+02, percent-clipped=7.0 +2023-02-07 12:29:27,896 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217324.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:29,711 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217327.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:45,935 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217349.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:29:49,810 INFO [train.py:901] (3/4) Epoch 27, batch 7200, loss[loss=0.1976, simple_loss=0.2871, pruned_loss=0.05407, over 8463.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2839, pruned_loss=0.05814, over 1613830.14 frames. ], batch size: 25, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:04,008 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2343, 2.0307, 2.6297, 2.2235, 2.6579, 2.3151, 2.1566, 1.6344], + device='cuda:3'), covar=tensor([0.5805, 0.5260, 0.2150, 0.3763, 0.2622, 0.3194, 0.1901, 0.5523], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1022, 0.0832, 0.0992, 0.1030, 0.0928, 0.0770, 0.0851], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-07 12:30:11,975 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217388.0, num_to_drop=1, layers_to_drop={0} +2023-02-07 12:30:23,037 INFO [train.py:901] (3/4) Epoch 27, batch 7250, loss[loss=0.2028, simple_loss=0.2711, pruned_loss=0.06727, over 7209.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2837, pruned_loss=0.05766, over 1614357.20 frames. ], batch size: 16, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:30:23,876 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217406.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:28,402 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.296e+02 2.784e+02 3.610e+02 7.832e+02, threshold=5.568e+02, percent-clipped=2.0 +2023-02-07 12:30:28,621 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217413.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:30:45,931 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217438.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:49,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217442.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:30:58,443 INFO [train.py:901] (3/4) Epoch 27, batch 7300, loss[loss=0.1618, simple_loss=0.2469, pruned_loss=0.0384, over 7812.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.282, pruned_loss=0.05677, over 1613788.05 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:04,059 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217463.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:33,103 INFO [train.py:901] (3/4) Epoch 27, batch 7350, loss[loss=0.1907, simple_loss=0.269, pruned_loss=0.05624, over 8026.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.05629, over 1613767.95 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:31:36,623 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217510.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:31:38,502 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.322e+02 2.888e+02 3.768e+02 6.651e+02, threshold=5.777e+02, percent-clipped=4.0 +2023-02-07 12:31:59,842 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-07 12:32:07,068 INFO [train.py:901] (3/4) Epoch 27, batch 7400, loss[loss=0.1931, simple_loss=0.2871, pruned_loss=0.04955, over 8585.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2825, pruned_loss=0.05693, over 1616582.70 frames. ], batch size: 34, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:19,126 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-07 12:32:42,456 INFO [train.py:901] (3/4) Epoch 27, batch 7450, loss[loss=0.1823, simple_loss=0.2657, pruned_loss=0.04944, over 7808.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2832, pruned_loss=0.05756, over 1618707.06 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:32:43,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1086, 1.8639, 3.5409, 1.8013, 2.7502, 3.8456, 3.9512, 3.3388], + device='cuda:3'), covar=tensor([0.1166, 0.1654, 0.0292, 0.1943, 0.0816, 0.0223, 0.0645, 0.0546], + device='cuda:3'), in_proj_covar=tensor([0.0306, 0.0322, 0.0291, 0.0318, 0.0320, 0.0276, 0.0437, 0.0306], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-07 12:32:47,778 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.771e+02 2.478e+02 3.262e+02 4.062e+02 8.102e+02, threshold=6.523e+02, percent-clipped=5.0 +2023-02-07 12:32:58,353 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-07 12:33:05,996 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-07 12:33:16,130 INFO [train.py:901] (3/4) Epoch 27, batch 7500, loss[loss=0.215, simple_loss=0.2924, pruned_loss=0.0688, over 7809.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2827, pruned_loss=0.05742, over 1617179.72 frames. ], batch size: 20, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:34,977 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217682.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:46,542 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=217698.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:33:51,442 INFO [train.py:901] (3/4) Epoch 27, batch 7550, loss[loss=0.2277, simple_loss=0.3017, pruned_loss=0.07688, over 8118.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2824, pruned_loss=0.05747, over 1617145.70 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:33:56,749 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.428e+02 3.024e+02 3.911e+02 8.560e+02, threshold=6.047e+02, percent-clipped=1.0 +2023-02-07 12:34:03,671 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=217723.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:21,847 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217750.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:34:25,190 INFO [train.py:901] (3/4) Epoch 27, batch 7600, loss[loss=0.1513, simple_loss=0.2374, pruned_loss=0.03262, over 8240.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05721, over 1617586.67 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:01,502 INFO [train.py:901] (3/4) Epoch 27, batch 7650, loss[loss=0.1855, simple_loss=0.265, pruned_loss=0.05298, over 7540.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05705, over 1618436.06 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:06,799 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.640e+02 2.541e+02 2.896e+02 3.920e+02 6.720e+02, threshold=5.793e+02, percent-clipped=4.0 +2023-02-07 12:35:35,074 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=217854.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:35,687 INFO [train.py:901] (3/4) Epoch 27, batch 7700, loss[loss=0.1765, simple_loss=0.274, pruned_loss=0.03948, over 8033.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2813, pruned_loss=0.05717, over 1613834.22 frames. ], batch size: 22, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:35:42,371 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217865.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:35:54,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-07 12:36:05,141 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-07 12:36:10,562 INFO [train.py:901] (3/4) Epoch 27, batch 7750, loss[loss=0.1629, simple_loss=0.2342, pruned_loss=0.04581, over 7540.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.057, over 1610421.82 frames. ], batch size: 18, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:15,959 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.659e+02 2.515e+02 3.033e+02 3.634e+02 8.452e+02, threshold=6.066e+02, percent-clipped=4.0 +2023-02-07 12:36:18,822 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=217916.0, num_to_drop=1, layers_to_drop={1} +2023-02-07 12:36:45,564 INFO [train.py:901] (3/4) Epoch 27, batch 7800, loss[loss=0.2362, simple_loss=0.3003, pruned_loss=0.08605, over 7661.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2817, pruned_loss=0.05723, over 1607444.12 frames. ], batch size: 19, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:36:55,074 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=217969.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:08,180 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7386, 2.7129, 1.8797, 2.3569, 2.2935, 1.7560, 2.2425, 2.4249], + device='cuda:3'), covar=tensor([0.1510, 0.0367, 0.1259, 0.0645, 0.0737, 0.1487, 0.0964, 0.1028], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0241, 0.0341, 0.0312, 0.0303, 0.0346, 0.0349, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-07 12:37:17,817 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7202, 1.7929, 1.6191, 2.3440, 1.0346, 1.4660, 1.7044, 1.8479], + device='cuda:3'), covar=tensor([0.0748, 0.0865, 0.0933, 0.0389, 0.1147, 0.1317, 0.0784, 0.0707], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0193, 0.0245, 0.0212, 0.0202, 0.0245, 0.0249, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-07 12:37:19,652 INFO [train.py:901] (3/4) Epoch 27, batch 7850, loss[loss=0.157, simple_loss=0.2344, pruned_loss=0.03982, over 7233.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.05672, over 1607754.84 frames. ], batch size: 16, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:37:24,965 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.277e+02 2.828e+02 3.912e+02 8.712e+02, threshold=5.655e+02, percent-clipped=7.0 +2023-02-07 12:37:33,523 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218026.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:37:52,846 INFO [train.py:901] (3/4) Epoch 27, batch 7900, loss[loss=0.206, simple_loss=0.2966, pruned_loss=0.05765, over 8513.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2822, pruned_loss=0.05795, over 1606329.14 frames. ], batch size: 28, lr: 2.77e-03, grad_scale: 8.0 +2023-02-07 12:38:23,930 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-07 12:38:26,198 INFO [train.py:901] (3/4) Epoch 27, batch 7950, loss[loss=0.2413, simple_loss=0.323, pruned_loss=0.07981, over 8511.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2833, pruned_loss=0.05848, over 1606962.93 frames. ], batch size: 28, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:38:28,874 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-07 12:38:31,693 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.382e+02 2.553e+02 3.230e+02 4.059e+02 8.354e+02, threshold=6.459e+02, percent-clipped=5.0 +2023-02-07 12:38:35,241 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218118.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:37,398 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218121.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:50,521 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218141.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:53,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218146.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:38:59,458 INFO [train.py:901] (3/4) Epoch 27, batch 8000, loss[loss=0.2205, simple_loss=0.3045, pruned_loss=0.06829, over 8188.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2828, pruned_loss=0.05848, over 1605455.68 frames. ], batch size: 23, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:29,225 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218200.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:32,351 INFO [train.py:901] (3/4) Epoch 27, batch 8050, loss[loss=0.1665, simple_loss=0.2599, pruned_loss=0.03658, over 7917.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2819, pruned_loss=0.05795, over 1599146.51 frames. ], batch size: 20, lr: 2.76e-03, grad_scale: 8.0 +2023-02-07 12:39:38,070 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.285e+02 2.948e+02 3.498e+02 7.136e+02, threshold=5.897e+02, percent-clipped=2.0 +2023-02-07 12:39:46,279 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218225.0, num_to_drop=0, layers_to_drop=set() +2023-02-07 12:39:48,253 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218228.0, num_to_drop=0, layers_to_drop=set() diff --git a/log/log-train-2023-02-08-23-42-53-0 b/log/log-train-2023-02-08-23-42-53-0 new file mode 100644 index 0000000000000000000000000000000000000000..0f1d0d90d0bfa5f598261893b8303f15395accc5 --- /dev/null +++ b/log/log-train-2023-02-08-23-42-53-0 @@ -0,0 +1,2806 @@ +2023-02-08 23:42:53,785 INFO [train.py:973] (0/4) Training started +2023-02-08 23:42:53,792 INFO [train.py:983] (0/4) Device: cuda:0 +2023-02-08 23:42:53,850 INFO [train.py:992] (0/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r8n07', 'IP address': '10.1.8.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 28, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-08 23:42:53,850 INFO [train.py:994] (0/4) About to create model +2023-02-08 23:42:54,160 INFO [zipformer.py:402] (0/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-08 23:42:54,172 INFO [train.py:998] (0/4) Number of model parameters: 20697573 +2023-02-08 23:42:54,388 INFO [checkpoint.py:112] (0/4) Loading checkpoint from pruned_transducer_stateless7_streaming/exp/v1/epoch-27.pt +2023-02-08 23:42:58,649 INFO [checkpoint.py:131] (0/4) Loading averaged model +2023-02-08 23:43:03,619 INFO [train.py:1013] (0/4) Using DDP +2023-02-08 23:43:03,869 INFO [train.py:1030] (0/4) Loading optimizer state dict +2023-02-08 23:43:04,236 INFO [train.py:1038] (0/4) Loading scheduler state dict +2023-02-08 23:43:04,237 INFO [asr_datamodule.py:420] (0/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-08 23:43:04,302 INFO [asr_datamodule.py:224] (0/4) Enable MUSAN +2023-02-08 23:43:04,302 INFO [asr_datamodule.py:225] (0/4) About to get Musan cuts +2023-02-08 23:43:05,967 INFO [asr_datamodule.py:249] (0/4) Enable SpecAugment +2023-02-08 23:43:05,967 INFO [asr_datamodule.py:250] (0/4) Time warp factor: 80 +2023-02-08 23:43:05,967 INFO [asr_datamodule.py:260] (0/4) Num frame mask: 10 +2023-02-08 23:43:05,967 INFO [asr_datamodule.py:273] (0/4) About to create train dataset +2023-02-08 23:43:05,967 INFO [asr_datamodule.py:300] (0/4) Using DynamicBucketingSampler. +2023-02-08 23:43:06,271 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:08,591 INFO [asr_datamodule.py:316] (0/4) About to create train dataloader +2023-02-08 23:43:08,591 INFO [asr_datamodule.py:430] (0/4) About to get dev-clean cuts +2023-02-08 23:43:08,593 INFO [asr_datamodule.py:437] (0/4) About to get dev-other cuts +2023-02-08 23:43:08,594 INFO [asr_datamodule.py:347] (0/4) About to create dev dataset +2023-02-08 23:43:08,942 INFO [asr_datamodule.py:364] (0/4) About to create dev dataloader +2023-02-08 23:43:08,942 INFO [train.py:1122] (0/4) Loading grad scaler state dict +2023-02-08 23:43:20,390 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:25,772 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-08 23:43:26,064 INFO [train.py:901] (0/4) Epoch 28, batch 0, loss[loss=0.296, simple_loss=0.3346, pruned_loss=0.1286, over 7975.00 frames. ], tot_loss[loss=0.296, simple_loss=0.3346, pruned_loss=0.1286, over 7975.00 frames. ], batch size: 21, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:43:26,064 INFO [train.py:926] (0/4) Computing validation loss +2023-02-08 23:43:38,193 INFO [train.py:935] (0/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2712, pruned_loss=0.03579, over 944034.00 frames. +2023-02-08 23:43:38,195 INFO [train.py:936] (0/4) Maximum memory allocated so far is 5970MB +2023-02-08 23:43:48,590 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218250.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:43:55,323 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-08 23:43:59,124 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-08 23:43:59,192 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218260.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:44:12,579 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6264, 2.3861, 3.0217, 2.5372, 3.0846, 2.5499, 2.5257, 2.2612], + device='cuda:0'), covar=tensor([0.4481, 0.4732, 0.2180, 0.3421, 0.2352, 0.3004, 0.1662, 0.5073], + device='cuda:0'), in_proj_covar=tensor([0.0956, 0.1016, 0.0823, 0.0986, 0.1019, 0.0922, 0.0763, 0.0846], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-08 23:44:26,826 INFO [train.py:901] (0/4) Epoch 28, batch 50, loss[loss=0.1862, simple_loss=0.2835, pruned_loss=0.04448, over 8464.00 frames. ], tot_loss[loss=0.2055, simple_loss=0.2893, pruned_loss=0.06078, over 364890.37 frames. ], batch size: 27, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:44:44,822 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-08 23:44:48,222 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.503e+02 3.099e+02 3.838e+02 3.677e+03, threshold=6.198e+02, percent-clipped=7.0 +2023-02-08 23:45:09,768 INFO [train.py:901] (0/4) Epoch 28, batch 100, loss[loss=0.1674, simple_loss=0.2468, pruned_loss=0.04395, over 7561.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2865, pruned_loss=0.05978, over 646121.59 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:45:12,260 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-08 23:45:42,220 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218375.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:45:52,962 INFO [train.py:901] (0/4) Epoch 28, batch 150, loss[loss=0.2359, simple_loss=0.3088, pruned_loss=0.08148, over 8034.00 frames. ], tot_loss[loss=0.2034, simple_loss=0.2867, pruned_loss=0.06005, over 858215.43 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:01,161 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218397.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:12,821 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.274e+02 2.796e+02 3.416e+02 5.816e+02, threshold=5.591e+02, percent-clipped=0.0 +2023-02-08 23:46:19,646 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218422.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:32,308 INFO [train.py:901] (0/4) Epoch 28, batch 200, loss[loss=0.1937, simple_loss=0.2924, pruned_loss=0.04749, over 8349.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2845, pruned_loss=0.05843, over 1025052.38 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:50,630 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218462.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:10,726 INFO [train.py:901] (0/4) Epoch 28, batch 250, loss[loss=0.1891, simple_loss=0.2563, pruned_loss=0.06098, over 7428.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2837, pruned_loss=0.05733, over 1160062.83 frames. ], batch size: 17, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:23,075 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-08 23:47:31,292 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.405e+02 2.917e+02 3.543e+02 7.929e+02, threshold=5.833e+02, percent-clipped=6.0 +2023-02-08 23:47:33,433 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-08 23:47:41,366 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218527.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:48,877 INFO [train.py:901] (0/4) Epoch 28, batch 300, loss[loss=0.197, simple_loss=0.2891, pruned_loss=0.05243, over 8493.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2843, pruned_loss=0.05866, over 1260762.45 frames. ], batch size: 49, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:53,404 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218544.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:14,321 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218572.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:18,159 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218577.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:25,708 INFO [train.py:901] (0/4) Epoch 28, batch 350, loss[loss=0.2105, simple_loss=0.3012, pruned_loss=0.05992, over 8585.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05781, over 1336986.70 frames. ], batch size: 34, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:48:28,663 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218592.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:29,481 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6227, 1.5054, 2.0516, 1.7558, 1.7864, 1.6938, 1.4986, 0.8300], + device='cuda:0'), covar=tensor([0.7343, 0.5809, 0.2471, 0.4114, 0.3473, 0.4639, 0.3089, 0.5412], + device='cuda:0'), in_proj_covar=tensor([0.0959, 0.1017, 0.0823, 0.0986, 0.1018, 0.0922, 0.0763, 0.0846], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-08 23:48:43,881 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.330e+02 2.853e+02 3.797e+02 9.826e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-08 23:48:59,299 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218631.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:49:04,811 INFO [train.py:901] (0/4) Epoch 28, batch 400, loss[loss=0.1772, simple_loss=0.2641, pruned_loss=0.04513, over 7942.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05786, over 1396074.49 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:16,388 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6811, 2.0789, 3.2086, 1.5722, 2.3738, 2.1141, 1.8590, 2.5064], + device='cuda:0'), covar=tensor([0.1923, 0.2714, 0.0874, 0.4743, 0.2005, 0.3306, 0.2403, 0.2150], + device='cuda:0'), in_proj_covar=tensor([0.0542, 0.0637, 0.0566, 0.0672, 0.0660, 0.0615, 0.0568, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-08 23:49:17,857 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218656.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:49:19,981 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218659.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:40,570 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218687.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:41,135 INFO [train.py:901] (0/4) Epoch 28, batch 450, loss[loss=0.1908, simple_loss=0.2847, pruned_loss=0.04842, over 8508.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.283, pruned_loss=0.05778, over 1451021.87 frames. ], batch size: 28, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:59,791 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.362e+02 2.836e+02 3.643e+02 9.062e+02, threshold=5.672e+02, percent-clipped=2.0 +2023-02-08 23:50:18,544 INFO [train.py:901] (0/4) Epoch 28, batch 500, loss[loss=0.2342, simple_loss=0.3033, pruned_loss=0.08257, over 7133.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2831, pruned_loss=0.05748, over 1486358.08 frames. ], batch size: 74, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:50:57,136 INFO [train.py:901] (0/4) Epoch 28, batch 550, loss[loss=0.1882, simple_loss=0.2682, pruned_loss=0.05414, over 8236.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05717, over 1509607.17 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:05,270 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4702, 2.3310, 2.9924, 2.4696, 3.0249, 2.5198, 2.4219, 1.7875], + device='cuda:0'), covar=tensor([0.5789, 0.5441, 0.2227, 0.4242, 0.2618, 0.3391, 0.1913, 0.5993], + device='cuda:0'), in_proj_covar=tensor([0.0963, 0.1021, 0.0829, 0.0991, 0.1024, 0.0928, 0.0768, 0.0849], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-08 23:51:16,042 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.392e+02 2.925e+02 3.560e+02 1.211e+03, threshold=5.850e+02, percent-clipped=4.0 +2023-02-08 23:51:29,379 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5935, 2.4140, 1.8025, 2.3168, 2.2072, 1.5912, 2.0444, 2.1015], + device='cuda:0'), covar=tensor([0.1451, 0.0447, 0.1272, 0.0650, 0.0731, 0.1688, 0.1132, 0.0958], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0244, 0.0342, 0.0315, 0.0305, 0.0349, 0.0352, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-08 23:51:30,166 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218833.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:33,463 INFO [train.py:901] (0/4) Epoch 28, batch 600, loss[loss=0.214, simple_loss=0.2959, pruned_loss=0.06604, over 8336.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.05668, over 1533606.42 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:33,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.63 vs. limit=5.0 +2023-02-08 23:51:53,211 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218858.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:56,582 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-08 23:52:04,139 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218871.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:06,129 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-08 23:52:10,236 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5477, 1.7941, 1.8550, 1.2147, 1.9219, 1.3951, 0.4486, 1.7726], + device='cuda:0'), covar=tensor([0.0614, 0.0435, 0.0358, 0.0587, 0.0470, 0.0983, 0.0938, 0.0308], + device='cuda:0'), in_proj_covar=tensor([0.0474, 0.0412, 0.0366, 0.0458, 0.0395, 0.0553, 0.0402, 0.0442], + device='cuda:0'), out_proj_covar=tensor([1.2539e-04, 1.0696e-04, 9.5266e-05, 1.1974e-04, 1.0344e-04, 1.5414e-04, + 1.0732e-04, 1.1585e-04], device='cuda:0') +2023-02-08 23:52:18,546 INFO [train.py:901] (0/4) Epoch 28, batch 650, loss[loss=0.1839, simple_loss=0.2706, pruned_loss=0.04864, over 8017.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2803, pruned_loss=0.05598, over 1552200.91 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:40,037 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.221e+02 2.637e+02 3.403e+02 7.509e+02, threshold=5.274e+02, percent-clipped=1.0 +2023-02-08 23:52:41,074 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218915.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:54,633 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.6832, 5.8679, 5.1021, 2.3930, 5.1107, 5.4242, 5.3913, 5.2674], + device='cuda:0'), covar=tensor([0.0507, 0.0355, 0.0864, 0.4357, 0.0752, 0.0790, 0.1115, 0.0622], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0461, 0.0445, 0.0555, 0.0444, 0.0463, 0.0439, 0.0406], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-08 23:52:55,955 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218936.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:57,374 INFO [train.py:901] (0/4) Epoch 28, batch 700, loss[loss=0.1864, simple_loss=0.2611, pruned_loss=0.05586, over 7975.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05619, over 1566190.66 frames. ], batch size: 21, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:59,057 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218940.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:01,212 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218943.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:18,874 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218968.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:31,053 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218983.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:33,195 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218986.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:34,536 INFO [train.py:901] (0/4) Epoch 28, batch 750, loss[loss=0.1806, simple_loss=0.2642, pruned_loss=0.04848, over 7916.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2813, pruned_loss=0.05646, over 1572632.23 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:53:46,831 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219002.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:55,131 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.280e+02 2.810e+02 3.388e+02 7.203e+02, threshold=5.620e+02, percent-clipped=6.0 +2023-02-08 23:53:55,168 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-08 23:54:04,609 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-08 23:54:12,582 INFO [train.py:901] (0/4) Epoch 28, batch 800, loss[loss=0.1721, simple_loss=0.2583, pruned_loss=0.04292, over 7789.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.282, pruned_loss=0.05683, over 1580976.92 frames. ], batch size: 19, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:12,699 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2624, 3.1842, 2.9474, 1.5694, 2.8709, 2.9205, 2.8565, 2.8156], + device='cuda:0'), covar=tensor([0.1242, 0.0909, 0.1444, 0.4479, 0.1322, 0.1274, 0.1789, 0.1076], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0460, 0.0445, 0.0555, 0.0444, 0.0464, 0.0438, 0.0405], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-08 23:54:13,423 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219039.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:22,041 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219051.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:49,147 INFO [train.py:901] (0/4) Epoch 28, batch 850, loss[loss=0.2083, simple_loss=0.2938, pruned_loss=0.06135, over 8740.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2805, pruned_loss=0.05644, over 1585766.61 frames. ], batch size: 30, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:50,763 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219090.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:55:10,259 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.432e+02 3.183e+02 3.929e+02 8.024e+02, threshold=6.365e+02, percent-clipped=6.0 +2023-02-08 23:55:11,264 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8674, 1.6176, 1.9257, 1.7205, 1.8721, 1.9158, 1.7581, 0.8200], + device='cuda:0'), covar=tensor([0.6272, 0.4921, 0.2099, 0.3569, 0.2518, 0.3441, 0.2198, 0.5030], + device='cuda:0'), in_proj_covar=tensor([0.0965, 0.1023, 0.0830, 0.0991, 0.1022, 0.0928, 0.0769, 0.0848], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-08 23:55:15,822 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-08 23:55:27,553 INFO [train.py:901] (0/4) Epoch 28, batch 900, loss[loss=0.1909, simple_loss=0.2914, pruned_loss=0.04519, over 8231.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05616, over 1593382.53 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:03,866 INFO [train.py:901] (0/4) Epoch 28, batch 950, loss[loss=0.2058, simple_loss=0.2939, pruned_loss=0.05886, over 8689.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2817, pruned_loss=0.05684, over 1602282.37 frames. ], batch size: 34, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:15,323 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0877, 1.9938, 6.2693, 2.4705, 5.6672, 5.2764, 5.8188, 5.6754], + device='cuda:0'), covar=tensor([0.0483, 0.4403, 0.0374, 0.3639, 0.0978, 0.0884, 0.0474, 0.0545], + device='cuda:0'), in_proj_covar=tensor([0.0676, 0.0659, 0.0729, 0.0654, 0.0739, 0.0631, 0.0637, 0.0711], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-08 23:56:22,894 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.524e+02 3.053e+02 4.249e+02 9.516e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-08 23:56:29,698 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219221.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:56:34,861 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-08 23:56:40,765 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7189, 1.8562, 5.9275, 2.0158, 5.2637, 4.9490, 5.4503, 5.3133], + device='cuda:0'), covar=tensor([0.0624, 0.4981, 0.0367, 0.4498, 0.1078, 0.0978, 0.0561, 0.0600], + device='cuda:0'), in_proj_covar=tensor([0.0676, 0.0659, 0.0727, 0.0653, 0.0739, 0.0631, 0.0636, 0.0711], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-08 23:56:43,597 INFO [train.py:901] (0/4) Epoch 28, batch 1000, loss[loss=0.2239, simple_loss=0.3036, pruned_loss=0.07208, over 8489.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05631, over 1603294.50 frames. ], batch size: 49, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:46,632 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219242.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:04,474 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219267.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:11,557 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-08 23:57:19,442 INFO [train.py:901] (0/4) Epoch 28, batch 1050, loss[loss=0.2185, simple_loss=0.2972, pruned_loss=0.06986, over 8325.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.0571, over 1608197.17 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:57:23,688 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-08 23:57:33,335 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219307.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:38,279 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.456e+02 2.957e+02 3.788e+02 8.190e+02, threshold=5.915e+02, percent-clipped=1.0 +2023-02-08 23:57:47,814 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219327.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:51,441 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219332.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,299 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-08 23:57:56,812 INFO [train.py:901] (0/4) Epoch 28, batch 1100, loss[loss=0.2467, simple_loss=0.3354, pruned_loss=0.07904, over 8436.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.05708, over 1609352.41 frames. ], batch size: 49, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:03,515 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219346.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:13,599 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219360.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:58:30,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219383.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:33,468 INFO [train.py:901] (0/4) Epoch 28, batch 1150, loss[loss=0.2096, simple_loss=0.2874, pruned_loss=0.06595, over 8328.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05695, over 1612926.11 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:37,159 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-08 23:58:52,525 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.386e+02 3.071e+02 3.782e+02 1.293e+03, threshold=6.141e+02, percent-clipped=2.0 +2023-02-08 23:59:07,136 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219434.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:10,013 INFO [train.py:901] (0/4) Epoch 28, batch 1200, loss[loss=0.1971, simple_loss=0.2766, pruned_loss=0.05879, over 8428.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05633, over 1611297.89 frames. ], batch size: 27, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:13,064 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219442.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:27,998 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219461.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:47,599 INFO [train.py:901] (0/4) Epoch 28, batch 1250, loss[loss=0.1942, simple_loss=0.2664, pruned_loss=0.06101, over 7924.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05762, over 1613696.01 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:48,524 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7254, 1.5520, 1.8877, 1.5681, 1.0701, 1.5234, 2.2716, 1.9866], + device='cuda:0'), covar=tensor([0.0481, 0.1301, 0.1677, 0.1492, 0.0633, 0.1562, 0.0636, 0.0634], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0188, 0.0161, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-08 23:59:55,038 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:02,357 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219508.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:06,606 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.357e+02 2.809e+02 3.466e+02 7.121e+02, threshold=5.618e+02, percent-clipped=3.0 +2023-02-09 00:00:23,412 INFO [train.py:901] (0/4) Epoch 28, batch 1300, loss[loss=0.2167, simple_loss=0.2952, pruned_loss=0.06905, over 7713.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2829, pruned_loss=0.05792, over 1612069.41 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-09 00:00:31,539 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:42,923 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219565.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:01:02,349 INFO [train.py:901] (0/4) Epoch 28, batch 1350, loss[loss=0.1643, simple_loss=0.2549, pruned_loss=0.03688, over 8026.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2816, pruned_loss=0.05758, over 1610847.70 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:01:22,008 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.365e+02 2.856e+02 3.377e+02 7.819e+02, threshold=5.713e+02, percent-clipped=4.0 +2023-02-09 00:01:39,687 INFO [train.py:901] (0/4) Epoch 28, batch 1400, loss[loss=0.2366, simple_loss=0.3182, pruned_loss=0.07753, over 8508.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05719, over 1614188.86 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:09,818 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219680.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:15,338 INFO [train.py:901] (0/4) Epoch 28, batch 1450, loss[loss=0.1723, simple_loss=0.259, pruned_loss=0.04278, over 8258.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05699, over 1614928.54 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:23,496 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:25,407 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 00:02:28,358 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219704.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:02:36,731 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.298e+02 2.874e+02 3.536e+02 7.746e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-09 00:02:39,224 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:43,538 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:54,217 INFO [train.py:901] (0/4) Epoch 28, batch 1500, loss[loss=0.1433, simple_loss=0.2275, pruned_loss=0.02951, over 7701.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2803, pruned_loss=0.05658, over 1611657.45 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:57,287 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:05,694 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219754.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:23,849 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219779.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:25,258 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219781.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:29,874 INFO [train.py:901] (0/4) Epoch 28, batch 1550, loss[loss=0.2023, simple_loss=0.2875, pruned_loss=0.05851, over 8195.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2813, pruned_loss=0.05718, over 1613628.49 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:03:42,636 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:49,438 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.435e+02 2.945e+02 3.827e+02 6.900e+02, threshold=5.889e+02, percent-clipped=4.0 +2023-02-09 00:03:54,629 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219819.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:03:55,294 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8715, 1.6249, 1.9362, 1.6347, 1.1190, 1.6063, 2.3021, 1.9932], + device='cuda:0'), covar=tensor([0.0461, 0.1280, 0.1639, 0.1434, 0.0601, 0.1507, 0.0600, 0.0629], + device='cuda:0'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:0'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 00:04:03,141 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:08,594 INFO [train.py:901] (0/4) Epoch 28, batch 1600, loss[loss=0.1969, simple_loss=0.2904, pruned_loss=0.05171, over 8205.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05672, over 1616144.04 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:04:18,700 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219852.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:35,344 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:44,473 INFO [train.py:901] (0/4) Epoch 28, batch 1650, loss[loss=0.2121, simple_loss=0.2987, pruned_loss=0.06278, over 7967.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2817, pruned_loss=0.05673, over 1616758.70 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:02,695 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.482e+02 2.898e+02 3.443e+02 5.647e+02, threshold=5.797e+02, percent-clipped=0.0 +2023-02-09 00:05:20,276 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219936.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:21,472 INFO [train.py:901] (0/4) Epoch 28, batch 1700, loss[loss=0.2434, simple_loss=0.3174, pruned_loss=0.08471, over 8256.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2822, pruned_loss=0.0571, over 1617528.50 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:39,132 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219961.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:43,293 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219967.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:57,790 INFO [train.py:901] (0/4) Epoch 28, batch 1750, loss[loss=0.1933, simple_loss=0.2809, pruned_loss=0.05286, over 8468.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2824, pruned_loss=0.05773, over 1613424.06 frames. ], batch size: 29, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:06:06,483 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-220000.pt +2023-02-09 00:06:17,589 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.339e+02 2.848e+02 3.606e+02 1.047e+03, threshold=5.695e+02, percent-clipped=4.0 +2023-02-09 00:06:34,442 INFO [train.py:901] (0/4) Epoch 28, batch 1800, loss[loss=0.1724, simple_loss=0.251, pruned_loss=0.04689, over 7441.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2818, pruned_loss=0.05751, over 1610054.88 frames. ], batch size: 17, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:02,974 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220075.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:07:11,952 INFO [train.py:901] (0/4) Epoch 28, batch 1850, loss[loss=0.1872, simple_loss=0.2839, pruned_loss=0.04523, over 8510.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05729, over 1609669.63 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:20,533 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220100.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:07:23,916 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220105.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:30,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.328e+02 2.682e+02 3.608e+02 8.535e+02, threshold=5.364e+02, percent-clipped=7.0 +2023-02-09 00:07:31,755 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:38,071 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220125.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:47,183 INFO [train.py:901] (0/4) Epoch 28, batch 1900, loss[loss=0.1691, simple_loss=0.2561, pruned_loss=0.0411, over 8082.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05763, over 1607992.86 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:19,381 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 00:08:25,774 INFO [train.py:901] (0/4) Epoch 28, batch 1950, loss[loss=0.1956, simple_loss=0.2808, pruned_loss=0.05524, over 8659.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.05764, over 1615282.15 frames. ], batch size: 34, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:33,000 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 00:08:44,726 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.461e+02 2.916e+02 3.869e+02 7.609e+02, threshold=5.833e+02, percent-clipped=8.0 +2023-02-09 00:08:48,264 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220219.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:51,076 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:53,655 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 00:09:01,371 INFO [train.py:901] (0/4) Epoch 28, batch 2000, loss[loss=0.185, simple_loss=0.2778, pruned_loss=0.04611, over 8659.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05697, over 1611478.66 frames. ], batch size: 34, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:02,868 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:07,805 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220247.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:08,589 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220248.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:29,136 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:37,448 INFO [train.py:901] (0/4) Epoch 28, batch 2050, loss[loss=0.1587, simple_loss=0.2234, pruned_loss=0.04699, over 7215.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05738, over 1609546.39 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:58,202 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.398e+02 2.757e+02 3.324e+02 6.340e+02, threshold=5.514e+02, percent-clipped=2.0 +2023-02-09 00:10:04,008 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0528, 1.8484, 2.2540, 2.0131, 2.2987, 2.1768, 1.9984, 1.1996], + device='cuda:0'), covar=tensor([0.6445, 0.5493, 0.2402, 0.4102, 0.2644, 0.3520, 0.2208, 0.5832], + device='cuda:0'), in_proj_covar=tensor([0.0970, 0.1029, 0.0835, 0.0997, 0.1028, 0.0934, 0.0773, 0.0853], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 00:10:12,780 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220334.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:10:15,446 INFO [train.py:901] (0/4) Epoch 28, batch 2100, loss[loss=0.1915, simple_loss=0.2794, pruned_loss=0.05175, over 8484.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05719, over 1613570.54 frames. ], batch size: 27, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:10:20,361 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 00:10:42,395 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-09 00:10:51,259 INFO [train.py:901] (0/4) Epoch 28, batch 2150, loss[loss=0.2516, simple_loss=0.321, pruned_loss=0.09108, over 7158.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05681, over 1619032.83 frames. ], batch size: 71, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:11,486 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.504e+02 2.973e+02 4.041e+02 1.001e+03, threshold=5.945e+02, percent-clipped=8.0 +2023-02-09 00:11:28,330 INFO [train.py:901] (0/4) Epoch 28, batch 2200, loss[loss=0.2231, simple_loss=0.2951, pruned_loss=0.07548, over 8244.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05697, over 1620314.54 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:36,284 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220449.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:11:44,024 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220460.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:03,388 INFO [train.py:901] (0/4) Epoch 28, batch 2250, loss[loss=0.1985, simple_loss=0.2958, pruned_loss=0.05054, over 8105.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2812, pruned_loss=0.05632, over 1615471.78 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:09,263 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220496.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:15,028 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.66 vs. limit=5.0 +2023-02-09 00:12:22,282 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.331e+02 2.835e+02 3.325e+02 7.200e+02, threshold=5.671e+02, percent-clipped=3.0 +2023-02-09 00:12:27,588 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220521.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:41,523 INFO [train.py:901] (0/4) Epoch 28, batch 2300, loss[loss=0.1959, simple_loss=0.289, pruned_loss=0.05134, over 8365.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2816, pruned_loss=0.05639, over 1621164.04 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:44,383 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8158, 1.3745, 3.9786, 1.3943, 3.5300, 3.2704, 3.6238, 3.5143], + device='cuda:0'), covar=tensor([0.0698, 0.4875, 0.0669, 0.4688, 0.1230, 0.1120, 0.0713, 0.0803], + device='cuda:0'), in_proj_covar=tensor([0.0682, 0.0670, 0.0739, 0.0664, 0.0752, 0.0639, 0.0646, 0.0722], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:12:51,192 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0895, 1.7666, 3.9761, 1.9364, 2.6087, 4.4770, 4.6323, 3.9311], + device='cuda:0'), covar=tensor([0.1308, 0.1961, 0.0364, 0.2015, 0.1246, 0.0211, 0.0448, 0.0537], + device='cuda:0'), in_proj_covar=tensor([0.0308, 0.0327, 0.0293, 0.0321, 0.0323, 0.0277, 0.0441, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 00:12:59,743 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:06,023 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220573.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:07,464 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220575.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:16,600 INFO [train.py:901] (0/4) Epoch 28, batch 2350, loss[loss=0.1815, simple_loss=0.2773, pruned_loss=0.04291, over 8253.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2809, pruned_loss=0.05583, over 1621554.49 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:13:18,225 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220590.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:18,798 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220591.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:35,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.370e+02 2.329e+02 2.956e+02 3.826e+02 8.837e+02, threshold=5.912e+02, percent-clipped=4.0 +2023-02-09 00:13:36,675 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:40,250 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:53,608 INFO [train.py:901] (0/4) Epoch 28, batch 2400, loss[loss=0.1998, simple_loss=0.2873, pruned_loss=0.05617, over 8190.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2801, pruned_loss=0.05545, over 1619166.02 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:16,673 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220669.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:29,716 INFO [train.py:901] (0/4) Epoch 28, batch 2450, loss[loss=0.2272, simple_loss=0.3243, pruned_loss=0.06504, over 8667.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2797, pruned_loss=0.05549, over 1617024.14 frames. ], batch size: 39, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:42,734 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220706.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:48,792 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.507e+02 3.309e+02 3.917e+02 8.053e+02, threshold=6.618e+02, percent-clipped=4.0 +2023-02-09 00:15:03,106 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220735.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:15:05,107 INFO [train.py:901] (0/4) Epoch 28, batch 2500, loss[loss=0.162, simple_loss=0.2483, pruned_loss=0.03781, over 7975.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2799, pruned_loss=0.05605, over 1617653.02 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:15:15,106 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6774, 2.7947, 2.5845, 4.2547, 1.7169, 2.0548, 2.8229, 2.9358], + device='cuda:0'), covar=tensor([0.0611, 0.0728, 0.0693, 0.0162, 0.0983, 0.1195, 0.0744, 0.0767], + device='cuda:0'), in_proj_covar=tensor([0.0229, 0.0193, 0.0242, 0.0211, 0.0202, 0.0245, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 00:15:42,749 INFO [train.py:901] (0/4) Epoch 28, batch 2550, loss[loss=0.1972, simple_loss=0.2678, pruned_loss=0.06333, over 7805.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2812, pruned_loss=0.05655, over 1623033.75 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:02,766 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.505e+02 3.011e+02 3.782e+02 1.017e+03, threshold=6.023e+02, percent-clipped=3.0 +2023-02-09 00:16:06,730 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220820.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:14,507 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220831.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:19,228 INFO [train.py:901] (0/4) Epoch 28, batch 2600, loss[loss=0.173, simple_loss=0.2595, pruned_loss=0.04324, over 8294.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05616, over 1617615.46 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:20,651 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220840.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:24,305 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:32,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220856.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:57,370 INFO [train.py:901] (0/4) Epoch 28, batch 2650, loss[loss=0.1876, simple_loss=0.2769, pruned_loss=0.04914, over 8436.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05633, over 1618781.93 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:12,897 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1226, 3.6129, 2.3302, 2.9803, 2.8377, 2.3000, 3.0700, 3.0147], + device='cuda:0'), covar=tensor([0.1789, 0.0469, 0.1173, 0.0782, 0.0817, 0.1373, 0.1033, 0.1230], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0242, 0.0339, 0.0311, 0.0301, 0.0345, 0.0346, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 00:17:16,288 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.381e+02 2.801e+02 3.642e+02 5.464e+02, threshold=5.602e+02, percent-clipped=0.0 +2023-02-09 00:17:17,797 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220917.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:21,283 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220922.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:32,916 INFO [train.py:901] (0/4) Epoch 28, batch 2700, loss[loss=0.2024, simple_loss=0.2907, pruned_loss=0.05702, over 8543.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05646, over 1618232.27 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:50,491 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220962.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,034 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,531 INFO [train.py:901] (0/4) Epoch 28, batch 2750, loss[loss=0.2087, simple_loss=0.2902, pruned_loss=0.06362, over 8472.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05671, over 1619039.57 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:11,791 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:29,681 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:31,038 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 2.908e+02 3.517e+02 7.342e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-09 00:18:31,980 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221016.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:43,270 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221032.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:47,247 INFO [train.py:901] (0/4) Epoch 28, batch 2800, loss[loss=0.2039, simple_loss=0.2906, pruned_loss=0.05864, over 8573.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2822, pruned_loss=0.0571, over 1618897.49 frames. ], batch size: 31, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:57,821 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9179, 1.7399, 2.5532, 1.6717, 1.4627, 2.4605, 0.4679, 1.5924], + device='cuda:0'), covar=tensor([0.1438, 0.1232, 0.0331, 0.1035, 0.2047, 0.0435, 0.1924, 0.1215], + device='cuda:0'), in_proj_covar=tensor([0.0201, 0.0205, 0.0136, 0.0224, 0.0277, 0.0146, 0.0174, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 00:19:22,679 INFO [train.py:901] (0/4) Epoch 28, batch 2850, loss[loss=0.1787, simple_loss=0.2794, pruned_loss=0.03906, over 8245.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2822, pruned_loss=0.05665, over 1619132.58 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:19:43,237 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.366e+02 2.856e+02 3.627e+02 6.501e+02, threshold=5.713e+02, percent-clipped=2.0 +2023-02-09 00:19:53,977 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221128.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:00,673 INFO [train.py:901] (0/4) Epoch 28, batch 2900, loss[loss=0.1982, simple_loss=0.286, pruned_loss=0.05518, over 8647.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2813, pruned_loss=0.05675, over 1616715.40 frames. ], batch size: 34, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:04,302 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5009, 2.0651, 3.9150, 1.5069, 2.7408, 2.0627, 1.6690, 2.7888], + device='cuda:0'), covar=tensor([0.2345, 0.3123, 0.0971, 0.5157, 0.2265, 0.3673, 0.2804, 0.2654], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0639, 0.0566, 0.0673, 0.0663, 0.0612, 0.0566, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:20:32,275 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 00:20:33,728 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221184.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:36,504 INFO [train.py:901] (0/4) Epoch 28, batch 2950, loss[loss=0.1739, simple_loss=0.2656, pruned_loss=0.04112, over 8106.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05696, over 1614921.43 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:37,474 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4501, 1.8103, 3.2131, 1.3541, 2.5218, 1.9583, 1.5299, 2.5262], + device='cuda:0'), covar=tensor([0.2254, 0.3100, 0.0853, 0.5411, 0.1924, 0.3599, 0.2959, 0.2232], + device='cuda:0'), in_proj_covar=tensor([0.0541, 0.0640, 0.0567, 0.0674, 0.0663, 0.0612, 0.0566, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:20:55,458 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.299e+02 2.993e+02 3.879e+02 1.208e+03, threshold=5.985e+02, percent-clipped=10.0 +2023-02-09 00:21:01,652 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-09 00:21:13,554 INFO [train.py:901] (0/4) Epoch 28, batch 3000, loss[loss=0.228, simple_loss=0.3153, pruned_loss=0.07031, over 8500.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.05643, over 1613096.98 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:21:13,555 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 00:21:31,976 INFO [train.py:935] (0/4) Epoch 28, validation: loss=0.1712, simple_loss=0.2708, pruned_loss=0.03578, over 944034.00 frames. +2023-02-09 00:21:31,978 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6461MB +2023-02-09 00:21:43,325 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1626, 1.4157, 4.3623, 1.4793, 3.8622, 3.6158, 3.9692, 3.8573], + device='cuda:0'), covar=tensor([0.0649, 0.4843, 0.0548, 0.4486, 0.1157, 0.1036, 0.0591, 0.0668], + device='cuda:0'), in_proj_covar=tensor([0.0674, 0.0660, 0.0725, 0.0653, 0.0739, 0.0630, 0.0637, 0.0709], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:21:54,456 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:10,149 INFO [train.py:901] (0/4) Epoch 28, batch 3050, loss[loss=0.2069, simple_loss=0.2864, pruned_loss=0.06366, over 8656.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2808, pruned_loss=0.05704, over 1613487.55 frames. ], batch size: 34, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:22:10,377 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221288.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:11,515 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-09 00:22:18,089 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221299.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:24,685 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7662, 2.2128, 3.1447, 1.6448, 2.5510, 2.1037, 1.9599, 2.5354], + device='cuda:0'), covar=tensor([0.1980, 0.2519, 0.0981, 0.4606, 0.1990, 0.3227, 0.2439, 0.2411], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0635, 0.0563, 0.0669, 0.0658, 0.0607, 0.0563, 0.0643], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:22:28,234 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221313.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:29,365 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.361e+02 2.830e+02 3.600e+02 1.199e+03, threshold=5.660e+02, percent-clipped=4.0 +2023-02-09 00:22:45,363 INFO [train.py:901] (0/4) Epoch 28, batch 3100, loss[loss=0.192, simple_loss=0.2824, pruned_loss=0.05083, over 7931.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05669, over 1620113.01 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:18,469 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221381.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:20,627 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221384.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:23,256 INFO [train.py:901] (0/4) Epoch 28, batch 3150, loss[loss=0.1696, simple_loss=0.2558, pruned_loss=0.04175, over 7799.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2802, pruned_loss=0.05608, over 1614450.94 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:31,509 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3646, 1.6571, 4.3639, 2.0338, 2.5727, 4.9578, 5.1040, 4.3416], + device='cuda:0'), covar=tensor([0.1137, 0.1964, 0.0311, 0.2052, 0.1275, 0.0208, 0.0325, 0.0577], + device='cuda:0'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0326, 0.0278, 0.0443, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 00:23:34,619 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8240, 2.4468, 1.8630, 2.3329, 2.1932, 1.6652, 2.1581, 2.3412], + device='cuda:0'), covar=tensor([0.1515, 0.0569, 0.1398, 0.0634, 0.0813, 0.1807, 0.1037, 0.0986], + device='cuda:0'), in_proj_covar=tensor([0.0362, 0.0246, 0.0344, 0.0315, 0.0305, 0.0350, 0.0351, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 00:23:38,999 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221409.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:43,027 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.344e+02 3.031e+02 3.872e+02 9.124e+02, threshold=6.062e+02, percent-clipped=5.0 +2023-02-09 00:24:00,282 INFO [train.py:901] (0/4) Epoch 28, batch 3200, loss[loss=0.1766, simple_loss=0.2594, pruned_loss=0.04691, over 8088.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2787, pruned_loss=0.05574, over 1611026.98 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:21,671 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9736, 1.7573, 2.6323, 1.5759, 2.2426, 2.9209, 2.8914, 2.6292], + device='cuda:0'), covar=tensor([0.0982, 0.1491, 0.0680, 0.1806, 0.1915, 0.0275, 0.0795, 0.0486], + device='cuda:0'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0325, 0.0278, 0.0442, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 00:24:35,521 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1301, 1.6255, 3.5677, 1.5989, 2.5533, 3.9475, 4.0311, 3.3768], + device='cuda:0'), covar=tensor([0.1162, 0.1814, 0.0277, 0.2056, 0.0990, 0.0199, 0.0467, 0.0537], + device='cuda:0'), in_proj_covar=tensor([0.0307, 0.0328, 0.0294, 0.0323, 0.0325, 0.0278, 0.0441, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 00:24:36,745 INFO [train.py:901] (0/4) Epoch 28, batch 3250, loss[loss=0.1931, simple_loss=0.2829, pruned_loss=0.05168, over 8521.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2792, pruned_loss=0.05584, over 1614488.84 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:56,686 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.800e+02 3.771e+02 8.910e+02, threshold=5.600e+02, percent-clipped=3.0 +2023-02-09 00:25:04,051 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:12,970 INFO [train.py:901] (0/4) Epoch 28, batch 3300, loss[loss=0.1993, simple_loss=0.2878, pruned_loss=0.05537, over 8526.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2792, pruned_loss=0.05637, over 1611675.03 frames. ], batch size: 49, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:25:25,047 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221555.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:42,956 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:48,453 INFO [train.py:901] (0/4) Epoch 28, batch 3350, loss[loss=0.2024, simple_loss=0.291, pruned_loss=0.05689, over 8326.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2798, pruned_loss=0.05687, over 1606553.50 frames. ], batch size: 25, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:09,950 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.531e+02 3.062e+02 3.663e+02 8.444e+02, threshold=6.124e+02, percent-clipped=3.0 +2023-02-09 00:26:21,012 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6479, 1.8784, 2.0107, 1.3941, 2.1073, 1.5492, 0.5913, 1.9292], + device='cuda:0'), covar=tensor([0.0663, 0.0442, 0.0340, 0.0658, 0.0452, 0.1010, 0.1080, 0.0340], + device='cuda:0'), in_proj_covar=tensor([0.0475, 0.0412, 0.0365, 0.0461, 0.0396, 0.0552, 0.0404, 0.0444], + device='cuda:0'), out_proj_covar=tensor([1.2578e-04, 1.0687e-04, 9.5010e-05, 1.2038e-04, 1.0348e-04, 1.5392e-04, + 1.0800e-04, 1.1642e-04], device='cuda:0') +2023-02-09 00:26:26,097 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221637.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:26:26,622 INFO [train.py:901] (0/4) Epoch 28, batch 3400, loss[loss=0.1699, simple_loss=0.2533, pruned_loss=0.04326, over 7816.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2801, pruned_loss=0.05684, over 1608273.32 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:43,893 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:27:02,344 INFO [train.py:901] (0/4) Epoch 28, batch 3450, loss[loss=0.2083, simple_loss=0.2927, pruned_loss=0.06191, over 8349.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2788, pruned_loss=0.05605, over 1604426.71 frames. ], batch size: 49, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:21,424 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.306e+02 2.763e+02 3.583e+02 8.756e+02, threshold=5.526e+02, percent-clipped=3.0 +2023-02-09 00:27:39,503 INFO [train.py:901] (0/4) Epoch 28, batch 3500, loss[loss=0.1632, simple_loss=0.2348, pruned_loss=0.04582, over 7692.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2797, pruned_loss=0.05628, over 1607155.77 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:56,086 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-09 00:28:03,519 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 00:28:07,252 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:28:15,815 INFO [train.py:901] (0/4) Epoch 28, batch 3550, loss[loss=0.2211, simple_loss=0.3205, pruned_loss=0.06085, over 8492.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2794, pruned_loss=0.05619, over 1602427.12 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:28:35,058 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-09 00:28:35,275 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.405e+02 2.949e+02 3.672e+02 8.337e+02, threshold=5.897e+02, percent-clipped=3.0 +2023-02-09 00:28:52,589 INFO [train.py:901] (0/4) Epoch 28, batch 3600, loss[loss=0.2102, simple_loss=0.2922, pruned_loss=0.06413, over 8501.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.05711, over 1606910.89 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:08,876 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 00:29:09,928 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.7645, 5.9094, 5.0535, 2.5310, 5.1744, 5.6339, 5.3676, 5.4777], + device='cuda:0'), covar=tensor([0.0509, 0.0367, 0.0927, 0.4739, 0.0755, 0.0737, 0.1032, 0.0583], + device='cuda:0'), in_proj_covar=tensor([0.0542, 0.0457, 0.0448, 0.0560, 0.0444, 0.0465, 0.0441, 0.0406], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:29:15,424 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221869.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:28,482 INFO [train.py:901] (0/4) Epoch 28, batch 3650, loss[loss=0.2051, simple_loss=0.2806, pruned_loss=0.06479, over 7966.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2824, pruned_loss=0.05791, over 1608775.83 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:42,354 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:47,071 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.399e+02 3.022e+02 3.885e+02 8.966e+02, threshold=6.044e+02, percent-clipped=2.0 +2023-02-09 00:30:02,964 INFO [train.py:901] (0/4) Epoch 28, batch 3700, loss[loss=0.198, simple_loss=0.2701, pruned_loss=0.06296, over 7422.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2823, pruned_loss=0.05788, over 1611396.39 frames. ], batch size: 17, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:05,071 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 00:30:38,665 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:30:41,397 INFO [train.py:901] (0/4) Epoch 28, batch 3750, loss[loss=0.1759, simple_loss=0.2616, pruned_loss=0.04511, over 7542.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2828, pruned_loss=0.0578, over 1612069.50 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:49,687 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-222000.pt +2023-02-09 00:31:01,421 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.662e+02 3.142e+02 4.083e+02 1.270e+03, threshold=6.284e+02, percent-clipped=8.0 +2023-02-09 00:31:04,457 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9512, 1.5667, 3.4561, 1.5127, 2.4054, 3.8660, 3.9736, 3.2908], + device='cuda:0'), covar=tensor([0.1266, 0.1938, 0.0360, 0.2311, 0.1124, 0.0230, 0.0524, 0.0539], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0330, 0.0295, 0.0325, 0.0326, 0.0279, 0.0445, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 00:31:10,203 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222027.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:31:17,966 INFO [train.py:901] (0/4) Epoch 28, batch 3800, loss[loss=0.2042, simple_loss=0.2831, pruned_loss=0.06262, over 7255.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05839, over 1610243.42 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:31:55,281 INFO [train.py:901] (0/4) Epoch 28, batch 3850, loss[loss=0.1866, simple_loss=0.2596, pruned_loss=0.0568, over 8093.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2837, pruned_loss=0.05878, over 1606633.22 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:32:11,760 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 00:32:13,842 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.208e+02 2.768e+02 3.453e+02 7.901e+02, threshold=5.537e+02, percent-clipped=1.0 +2023-02-09 00:32:15,426 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.2507, 4.1669, 3.8616, 1.9038, 3.7820, 3.8502, 3.7439, 3.6906], + device='cuda:0'), covar=tensor([0.0726, 0.0561, 0.1116, 0.4858, 0.0906, 0.0938, 0.1351, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0545, 0.0459, 0.0448, 0.0560, 0.0444, 0.0467, 0.0442, 0.0408], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:32:17,497 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222120.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:32:30,078 INFO [train.py:901] (0/4) Epoch 28, batch 3900, loss[loss=0.2084, simple_loss=0.2936, pruned_loss=0.06166, over 8469.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2837, pruned_loss=0.05883, over 1606805.16 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:32:30,479 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 00:33:06,448 INFO [train.py:901] (0/4) Epoch 28, batch 3950, loss[loss=0.1643, simple_loss=0.2481, pruned_loss=0.04025, over 8134.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2836, pruned_loss=0.05851, over 1609648.82 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:17,880 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:20,808 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6373, 2.7079, 1.9992, 2.3767, 2.1495, 1.8191, 2.1315, 2.2742], + device='cuda:0'), covar=tensor([0.1554, 0.0421, 0.1148, 0.0642, 0.0809, 0.1510, 0.1127, 0.1039], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0245, 0.0342, 0.0314, 0.0304, 0.0347, 0.0350, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 00:33:26,088 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.338e+02 2.821e+02 3.606e+02 1.107e+03, threshold=5.643e+02, percent-clipped=4.0 +2023-02-09 00:33:31,796 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:35,387 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3160, 1.2365, 1.5758, 1.0207, 1.0607, 1.5776, 0.6428, 1.2180], + device='cuda:0'), covar=tensor([0.1326, 0.1083, 0.0439, 0.0884, 0.1916, 0.0424, 0.1555, 0.1118], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0225, 0.0279, 0.0147, 0.0176, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 00:33:37,688 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 00:33:40,240 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:42,146 INFO [train.py:901] (0/4) Epoch 28, batch 4000, loss[loss=0.2204, simple_loss=0.308, pruned_loss=0.06642, over 8477.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05915, over 1610894.82 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:43,704 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:51,741 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:01,389 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:17,514 INFO [train.py:901] (0/4) Epoch 28, batch 4050, loss[loss=0.2177, simple_loss=0.3042, pruned_loss=0.06559, over 8290.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2853, pruned_loss=0.05916, over 1615082.72 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:34:38,324 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.414e+02 3.092e+02 4.009e+02 1.246e+03, threshold=6.184e+02, percent-clipped=7.0 +2023-02-09 00:34:54,244 INFO [train.py:901] (0/4) Epoch 28, batch 4100, loss[loss=0.179, simple_loss=0.2568, pruned_loss=0.05063, over 7545.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2845, pruned_loss=0.05904, over 1610454.72 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:14,884 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:17,634 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:29,384 INFO [train.py:901] (0/4) Epoch 28, batch 4150, loss[loss=0.2012, simple_loss=0.2887, pruned_loss=0.05686, over 8028.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2832, pruned_loss=0.05808, over 1610359.00 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:49,105 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.330e+02 2.692e+02 3.176e+02 6.436e+02, threshold=5.384e+02, percent-clipped=1.0 +2023-02-09 00:36:07,146 INFO [train.py:901] (0/4) Epoch 28, batch 4200, loss[loss=0.2236, simple_loss=0.3048, pruned_loss=0.07119, over 8492.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.05753, over 1610192.25 frames. ], batch size: 29, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:14,730 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 00:36:37,845 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 00:36:41,351 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:42,590 INFO [train.py:901] (0/4) Epoch 28, batch 4250, loss[loss=0.1868, simple_loss=0.274, pruned_loss=0.04982, over 7967.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.0574, over 1605790.74 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:44,947 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222491.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:53,806 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 00:37:00,853 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.539e+02 3.193e+02 4.198e+02 8.289e+02, threshold=6.386e+02, percent-clipped=5.0 +2023-02-09 00:37:01,767 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222516.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:18,009 INFO [train.py:901] (0/4) Epoch 28, batch 4300, loss[loss=0.2172, simple_loss=0.3056, pruned_loss=0.06443, over 8348.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2832, pruned_loss=0.05796, over 1607590.58 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:20,160 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:25,005 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222547.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:39,459 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222567.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:54,080 INFO [train.py:901] (0/4) Epoch 28, batch 4350, loss[loss=0.1986, simple_loss=0.2783, pruned_loss=0.05947, over 8072.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05765, over 1610175.12 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:54,230 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222588.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:11,695 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 00:38:13,114 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.501e+02 2.979e+02 3.614e+02 7.360e+02, threshold=5.959e+02, percent-clipped=2.0 +2023-02-09 00:38:18,814 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:29,011 INFO [train.py:901] (0/4) Epoch 28, batch 4400, loss[loss=0.2005, simple_loss=0.2796, pruned_loss=0.06067, over 8601.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05735, over 1609770.23 frames. ], batch size: 39, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:38:36,668 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:40,087 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0886, 2.2916, 3.1219, 1.9720, 2.6934, 2.3639, 2.1569, 2.6087], + device='cuda:0'), covar=tensor([0.1427, 0.2311, 0.0753, 0.3569, 0.1407, 0.2523, 0.1891, 0.2005], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0640, 0.0568, 0.0675, 0.0664, 0.0613, 0.0567, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:38:47,084 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:54,372 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 00:39:01,980 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222682.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:04,790 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1300, 1.4884, 1.7517, 1.4515, 0.9658, 1.5601, 1.7238, 1.5928], + device='cuda:0'), covar=tensor([0.0507, 0.1205, 0.1623, 0.1442, 0.0583, 0.1414, 0.0686, 0.0662], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 00:39:05,979 INFO [train.py:901] (0/4) Epoch 28, batch 4450, loss[loss=0.1872, simple_loss=0.2705, pruned_loss=0.05193, over 7248.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.0581, over 1613068.89 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:24,970 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.353e+02 2.798e+02 3.446e+02 6.111e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-09 00:39:38,914 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.93 vs. limit=5.0 +2023-02-09 00:39:41,229 INFO [train.py:901] (0/4) Epoch 28, batch 4500, loss[loss=0.1849, simple_loss=0.2776, pruned_loss=0.04609, over 8197.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2824, pruned_loss=0.05756, over 1606037.32 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:44,237 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:45,367 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 00:39:58,788 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-09 00:40:02,744 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222767.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:40:18,336 INFO [train.py:901] (0/4) Epoch 28, batch 4550, loss[loss=0.1499, simple_loss=0.2305, pruned_loss=0.03463, over 7217.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05733, over 1605768.31 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:40:26,128 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4168, 2.7160, 2.2108, 3.6696, 1.6491, 2.0618, 2.5615, 2.7453], + device='cuda:0'), covar=tensor([0.0706, 0.0731, 0.0824, 0.0344, 0.1090, 0.1183, 0.0788, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0214, 0.0205, 0.0248, 0.0251, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 00:40:37,183 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.324e+02 2.721e+02 3.677e+02 6.861e+02, threshold=5.442e+02, percent-clipped=4.0 +2023-02-09 00:40:53,695 INFO [train.py:901] (0/4) Epoch 28, batch 4600, loss[loss=0.1726, simple_loss=0.2564, pruned_loss=0.04444, over 7807.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2809, pruned_loss=0.0568, over 1606321.03 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:41:11,253 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9197, 1.6382, 2.4832, 1.9894, 2.2561, 1.8889, 1.7476, 1.2160], + device='cuda:0'), covar=tensor([0.7987, 0.6572, 0.2500, 0.4451, 0.3347, 0.5029, 0.2977, 0.5864], + device='cuda:0'), in_proj_covar=tensor([0.0965, 0.1029, 0.0833, 0.0996, 0.1023, 0.0934, 0.0772, 0.0852], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 00:41:27,920 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222885.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:41:29,998 INFO [train.py:901] (0/4) Epoch 28, batch 4650, loss[loss=0.169, simple_loss=0.2487, pruned_loss=0.04462, over 7801.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2815, pruned_loss=0.05737, over 1601179.40 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 16.0 +2023-02-09 00:41:50,685 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 3.099e+02 3.500e+02 7.849e+02, threshold=6.198e+02, percent-clipped=6.0 +2023-02-09 00:41:53,084 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222918.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:02,693 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222932.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:06,739 INFO [train.py:901] (0/4) Epoch 28, batch 4700, loss[loss=0.2068, simple_loss=0.2764, pruned_loss=0.06866, over 7546.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2812, pruned_loss=0.05734, over 1605184.67 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:06,963 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222938.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:10,436 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222943.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:20,644 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222958.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:24,246 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222963.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:40,836 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:41,331 INFO [train.py:901] (0/4) Epoch 28, batch 4750, loss[loss=0.2459, simple_loss=0.3148, pruned_loss=0.08847, over 6866.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2814, pruned_loss=0.05755, over 1605715.76 frames. ], batch size: 71, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:50,012 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223000.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:51,379 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:54,709 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 00:42:58,165 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 00:43:02,878 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.412e+02 2.807e+02 3.833e+02 7.869e+02, threshold=5.613e+02, percent-clipped=5.0 +2023-02-09 00:43:18,664 INFO [train.py:901] (0/4) Epoch 28, batch 4800, loss[loss=0.1958, simple_loss=0.2858, pruned_loss=0.05288, over 8468.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2815, pruned_loss=0.05766, over 1607974.01 frames. ], batch size: 29, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:43:25,194 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223047.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:36,303 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1303, 1.5493, 1.8562, 1.4022, 0.9916, 1.5782, 1.8549, 1.6843], + device='cuda:0'), covar=tensor([0.0538, 0.1224, 0.1577, 0.1537, 0.0597, 0.1463, 0.0675, 0.0647], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 00:43:48,814 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 00:43:49,662 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:53,652 INFO [train.py:901] (0/4) Epoch 28, batch 4850, loss[loss=0.1629, simple_loss=0.2474, pruned_loss=0.03918, over 7922.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2817, pruned_loss=0.05746, over 1611843.73 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:13,747 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.508e+02 3.332e+02 4.408e+02 9.671e+02, threshold=6.663e+02, percent-clipped=7.0 +2023-02-09 00:44:31,161 INFO [train.py:901] (0/4) Epoch 28, batch 4900, loss[loss=0.1736, simple_loss=0.2483, pruned_loss=0.04942, over 7419.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.0572, over 1608959.24 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:57,024 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 00:45:07,000 INFO [train.py:901] (0/4) Epoch 28, batch 4950, loss[loss=0.2179, simple_loss=0.3011, pruned_loss=0.06741, over 8027.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2823, pruned_loss=0.05702, over 1612808.58 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:09,246 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223191.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:45:26,410 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.334e+02 2.712e+02 3.560e+02 9.309e+02, threshold=5.424e+02, percent-clipped=3.0 +2023-02-09 00:45:42,287 INFO [train.py:901] (0/4) Epoch 28, batch 5000, loss[loss=0.2226, simple_loss=0.307, pruned_loss=0.0691, over 8132.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2809, pruned_loss=0.05638, over 1609447.49 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:56,107 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:14,348 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223281.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:18,927 INFO [train.py:901] (0/4) Epoch 28, batch 5050, loss[loss=0.1782, simple_loss=0.2579, pruned_loss=0.04919, over 7416.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05676, over 1611356.68 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:28,800 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223302.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:29,640 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223303.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:32,879 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 00:46:38,379 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.274e+02 2.931e+02 3.573e+02 6.090e+02, threshold=5.862e+02, percent-clipped=1.0 +2023-02-09 00:46:42,968 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-09 00:46:46,945 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223328.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:48,957 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223331.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:53,557 INFO [train.py:901] (0/4) Epoch 28, batch 5100, loss[loss=0.2065, simple_loss=0.2991, pruned_loss=0.05697, over 8249.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05635, over 1615566.31 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:59,472 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223346.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:31,193 INFO [train.py:901] (0/4) Epoch 28, batch 5150, loss[loss=0.2021, simple_loss=0.2903, pruned_loss=0.05694, over 8449.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05675, over 1618656.53 frames. ], batch size: 27, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:47:50,499 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.349e+02 2.964e+02 3.516e+02 1.122e+03, threshold=5.928e+02, percent-clipped=3.0 +2023-02-09 00:47:51,405 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223417.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:57,694 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:59,104 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5509, 4.5448, 4.1312, 2.2812, 4.0106, 4.2136, 4.1434, 3.9839], + device='cuda:0'), covar=tensor([0.0705, 0.0466, 0.0966, 0.4431, 0.0870, 0.0990, 0.1244, 0.0723], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0454, 0.0449, 0.0554, 0.0443, 0.0463, 0.0441, 0.0407], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:48:05,974 INFO [train.py:901] (0/4) Epoch 28, batch 5200, loss[loss=0.2003, simple_loss=0.2952, pruned_loss=0.05276, over 8248.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2823, pruned_loss=0.05766, over 1617835.73 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:48:11,634 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223446.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:22,132 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:27,856 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6321, 2.4981, 3.1709, 2.6014, 3.1135, 2.7440, 2.6066, 2.0509], + device='cuda:0'), covar=tensor([0.5867, 0.5269, 0.2179, 0.4308, 0.2904, 0.3153, 0.1881, 0.5961], + device='cuda:0'), in_proj_covar=tensor([0.0965, 0.1031, 0.0836, 0.0999, 0.1026, 0.0936, 0.0773, 0.0855], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 00:48:31,235 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 00:48:44,077 INFO [train.py:901] (0/4) Epoch 28, batch 5250, loss[loss=0.2291, simple_loss=0.315, pruned_loss=0.07161, over 8475.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2835, pruned_loss=0.05849, over 1619757.39 frames. ], batch size: 49, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:03,773 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.237e+02 2.837e+02 3.561e+02 7.405e+02, threshold=5.674e+02, percent-clipped=6.0 +2023-02-09 00:49:17,141 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223535.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:49:19,101 INFO [train.py:901] (0/4) Epoch 28, batch 5300, loss[loss=0.21, simple_loss=0.294, pruned_loss=0.06301, over 8108.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2834, pruned_loss=0.05855, over 1620881.39 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:21,379 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:49:55,534 INFO [train.py:901] (0/4) Epoch 28, batch 5350, loss[loss=0.2048, simple_loss=0.2893, pruned_loss=0.06008, over 8100.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2835, pruned_loss=0.05876, over 1618661.53 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:50:01,806 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223596.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:50:15,668 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.840e+02 3.657e+02 7.209e+02, threshold=5.681e+02, percent-clipped=3.0 +2023-02-09 00:50:30,881 INFO [train.py:901] (0/4) Epoch 28, batch 5400, loss[loss=0.2009, simple_loss=0.2788, pruned_loss=0.0615, over 7984.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05871, over 1620250.11 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:50:39,719 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223650.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:50:55,924 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223673.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:06,378 INFO [train.py:901] (0/4) Epoch 28, batch 5450, loss[loss=0.1988, simple_loss=0.2845, pruned_loss=0.05658, over 8312.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.05778, over 1621939.29 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:13,820 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:16,779 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:28,424 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.405e+02 2.886e+02 3.694e+02 6.837e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 00:51:28,696 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:31,421 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 00:51:36,553 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223727.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:44,550 INFO [train.py:901] (0/4) Epoch 28, batch 5500, loss[loss=0.192, simple_loss=0.2596, pruned_loss=0.0622, over 7794.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2822, pruned_loss=0.05751, over 1622449.36 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:47,335 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:18,976 INFO [train.py:901] (0/4) Epoch 28, batch 5550, loss[loss=0.1742, simple_loss=0.2529, pruned_loss=0.04779, over 7421.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.283, pruned_loss=0.05815, over 1612300.98 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:52:25,548 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:31,280 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5968, 1.6202, 1.8008, 1.5473, 0.9526, 1.5382, 1.9773, 1.9972], + device='cuda:0'), covar=tensor([0.0510, 0.1215, 0.1688, 0.1523, 0.0639, 0.1498, 0.0706, 0.0613], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 00:52:39,667 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.462e+02 3.010e+02 3.574e+02 1.274e+03, threshold=6.020e+02, percent-clipped=3.0 +2023-02-09 00:52:43,470 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:55,575 INFO [train.py:901] (0/4) Epoch 28, batch 5600, loss[loss=0.2162, simple_loss=0.2882, pruned_loss=0.07208, over 7528.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2815, pruned_loss=0.05739, over 1612808.11 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:52:57,099 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6402, 1.9195, 2.8794, 1.5098, 2.2032, 2.0176, 1.7194, 2.1842], + device='cuda:0'), covar=tensor([0.2089, 0.2655, 0.1017, 0.4833, 0.1924, 0.3365, 0.2444, 0.2260], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0636, 0.0565, 0.0670, 0.0662, 0.0611, 0.0563, 0.0645], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 00:53:31,702 INFO [train.py:901] (0/4) Epoch 28, batch 5650, loss[loss=0.1864, simple_loss=0.2776, pruned_loss=0.04762, over 8468.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05737, over 1613076.75 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:41,215 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 00:53:44,200 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223906.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:53:51,307 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.313e+02 2.789e+02 3.752e+02 1.102e+03, threshold=5.578e+02, percent-clipped=3.0 +2023-02-09 00:53:52,429 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-09 00:54:01,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223931.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:54:06,150 INFO [train.py:901] (0/4) Epoch 28, batch 5700, loss[loss=0.2215, simple_loss=0.3144, pruned_loss=0.06434, over 8500.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05786, over 1614888.93 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:07,515 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223940.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:54:43,236 INFO [train.py:901] (0/4) Epoch 28, batch 5750, loss[loss=0.1958, simple_loss=0.2881, pruned_loss=0.05172, over 8526.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05717, over 1614870.42 frames. ], batch size: 28, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:48,705 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 00:54:51,646 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-224000.pt +2023-02-09 00:55:04,150 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.263e+02 2.713e+02 3.241e+02 8.661e+02, threshold=5.425e+02, percent-clipped=3.0 +2023-02-09 00:55:18,775 INFO [train.py:901] (0/4) Epoch 28, batch 5800, loss[loss=0.1758, simple_loss=0.2593, pruned_loss=0.04609, over 7804.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2827, pruned_loss=0.05738, over 1615326.94 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:55:30,600 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:55:55,798 INFO [train.py:901] (0/4) Epoch 28, batch 5850, loss[loss=0.1995, simple_loss=0.2888, pruned_loss=0.05509, over 8455.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2822, pruned_loss=0.05697, over 1613611.79 frames. ], batch size: 27, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:56:15,668 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.538e+02 3.148e+02 4.118e+02 7.183e+02, threshold=6.296e+02, percent-clipped=12.0 +2023-02-09 00:56:30,207 INFO [train.py:901] (0/4) Epoch 28, batch 5900, loss[loss=0.2252, simple_loss=0.2879, pruned_loss=0.08129, over 7782.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2824, pruned_loss=0.0568, over 1617531.29 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:06,196 INFO [train.py:901] (0/4) Epoch 28, batch 5950, loss[loss=0.2349, simple_loss=0.3047, pruned_loss=0.08262, over 7030.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2832, pruned_loss=0.05717, over 1619420.47 frames. ], batch size: 71, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:16,703 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-09 00:57:18,144 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 00:57:19,333 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5794, 1.4633, 1.8288, 1.2343, 1.2518, 1.8056, 0.2428, 1.1796], + device='cuda:0'), covar=tensor([0.1459, 0.1122, 0.0382, 0.0771, 0.2259, 0.0436, 0.1763, 0.1147], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0207, 0.0138, 0.0224, 0.0279, 0.0148, 0.0175, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 00:57:28,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.486e+02 3.110e+02 3.888e+02 7.674e+02, threshold=6.220e+02, percent-clipped=4.0 +2023-02-09 00:57:28,889 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-09 00:57:37,738 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224230.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:57:43,112 INFO [train.py:901] (0/4) Epoch 28, batch 6000, loss[loss=0.2005, simple_loss=0.2864, pruned_loss=0.05724, over 8256.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05784, over 1614447.06 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:43,113 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 00:57:56,803 INFO [train.py:935] (0/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2708, pruned_loss=0.03603, over 944034.00 frames. +2023-02-09 00:57:56,804 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6461MB +2023-02-09 00:58:05,912 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-09 00:58:33,320 INFO [train.py:901] (0/4) Epoch 28, batch 6050, loss[loss=0.1902, simple_loss=0.2655, pruned_loss=0.05746, over 7811.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.05803, over 1613756.31 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:58:38,452 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0678, 1.4641, 1.7030, 1.3440, 0.9198, 1.4432, 1.8238, 1.7271], + device='cuda:0'), covar=tensor([0.0578, 0.1272, 0.1731, 0.1556, 0.0616, 0.1530, 0.0736, 0.0607], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 00:58:49,962 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224311.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:58:54,048 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.462e+02 3.109e+02 3.867e+02 1.260e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-09 00:59:08,880 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224336.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:59:10,076 INFO [train.py:901] (0/4) Epoch 28, batch 6100, loss[loss=0.2167, simple_loss=0.2988, pruned_loss=0.06733, over 8635.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2843, pruned_loss=0.05899, over 1614512.72 frames. ], batch size: 34, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:26,306 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 00:59:46,740 INFO [train.py:901] (0/4) Epoch 28, batch 6150, loss[loss=0.1938, simple_loss=0.2919, pruned_loss=0.04787, over 8302.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2833, pruned_loss=0.058, over 1613450.07 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:48,616 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.20 vs. limit=5.0 +2023-02-09 00:59:54,613 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6321, 1.9381, 2.0185, 1.4154, 2.1901, 1.4671, 0.7790, 1.9347], + device='cuda:0'), covar=tensor([0.0795, 0.0460, 0.0347, 0.0739, 0.0530, 0.1108, 0.1014, 0.0451], + device='cuda:0'), in_proj_covar=tensor([0.0471, 0.0409, 0.0363, 0.0458, 0.0393, 0.0550, 0.0402, 0.0440], + device='cuda:0'), out_proj_covar=tensor([1.2457e-04, 1.0580e-04, 9.4517e-05, 1.1960e-04, 1.0283e-04, 1.5344e-04, + 1.0723e-04, 1.1521e-04], device='cuda:0') +2023-02-09 01:00:06,959 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.362e+02 2.823e+02 3.455e+02 8.158e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 01:00:21,356 INFO [train.py:901] (0/4) Epoch 28, batch 6200, loss[loss=0.163, simple_loss=0.2362, pruned_loss=0.04487, over 7554.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.05763, over 1609299.28 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:58,230 INFO [train.py:901] (0/4) Epoch 28, batch 6250, loss[loss=0.2008, simple_loss=0.268, pruned_loss=0.06681, over 7690.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2816, pruned_loss=0.05709, over 1609660.09 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:59,404 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.64 vs. limit=5.0 +2023-02-09 01:01:18,582 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.593e+02 3.043e+02 4.250e+02 9.084e+02, threshold=6.087e+02, percent-clipped=11.0 +2023-02-09 01:01:33,266 INFO [train.py:901] (0/4) Epoch 28, batch 6300, loss[loss=0.2024, simple_loss=0.2855, pruned_loss=0.05963, over 8238.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2819, pruned_loss=0.05735, over 1606921.67 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:01:50,186 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1874, 1.9654, 2.3491, 2.0481, 2.2690, 2.2513, 2.1395, 1.2472], + device='cuda:0'), covar=tensor([0.5665, 0.4698, 0.2163, 0.3920, 0.2546, 0.3311, 0.1964, 0.5177], + device='cuda:0'), in_proj_covar=tensor([0.0971, 0.1034, 0.0841, 0.1006, 0.1029, 0.0940, 0.0776, 0.0857], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 01:02:00,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224574.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:04,365 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:04,427 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0468, 1.5952, 1.4030, 1.5269, 1.3399, 1.2642, 1.2585, 1.2987], + device='cuda:0'), covar=tensor([0.1218, 0.0575, 0.1389, 0.0656, 0.0807, 0.1646, 0.0996, 0.0910], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0245, 0.0342, 0.0315, 0.0303, 0.0348, 0.0352, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:02:10,454 INFO [train.py:901] (0/4) Epoch 28, batch 6350, loss[loss=0.2095, simple_loss=0.3048, pruned_loss=0.05715, over 8357.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2832, pruned_loss=0.05817, over 1611737.79 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:30,929 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.315e+02 2.720e+02 3.259e+02 6.733e+02, threshold=5.440e+02, percent-clipped=2.0 +2023-02-09 01:02:45,886 INFO [train.py:901] (0/4) Epoch 28, batch 6400, loss[loss=0.2031, simple_loss=0.2815, pruned_loss=0.06234, over 7802.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.0576, over 1613843.66 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:21,511 INFO [train.py:901] (0/4) Epoch 28, batch 6450, loss[loss=0.2208, simple_loss=0.3042, pruned_loss=0.06868, over 8545.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05737, over 1614994.14 frames. ], batch size: 31, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:22,404 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224689.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:03:39,013 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2485, 2.0321, 2.5615, 2.1747, 2.5083, 2.3316, 2.1828, 1.3518], + device='cuda:0'), covar=tensor([0.5633, 0.4948, 0.2015, 0.3914, 0.2493, 0.3392, 0.1991, 0.5555], + device='cuda:0'), in_proj_covar=tensor([0.0966, 0.1030, 0.0837, 0.1000, 0.1025, 0.0935, 0.0773, 0.0852], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 01:03:43,003 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.303e+02 2.784e+02 3.485e+02 7.082e+02, threshold=5.567e+02, percent-clipped=7.0 +2023-02-09 01:03:57,598 INFO [train.py:901] (0/4) Epoch 28, batch 6500, loss[loss=0.2041, simple_loss=0.2934, pruned_loss=0.05739, over 8467.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.0574, over 1616698.59 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:02,118 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:11,957 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:32,262 INFO [train.py:901] (0/4) Epoch 28, batch 6550, loss[loss=0.2297, simple_loss=0.3035, pruned_loss=0.07791, over 8304.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.284, pruned_loss=0.05857, over 1616443.94 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:47,796 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 01:04:53,997 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.489e+02 3.184e+02 3.768e+02 7.222e+02, threshold=6.368e+02, percent-clipped=1.0 +2023-02-09 01:05:08,028 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 01:05:09,352 INFO [train.py:901] (0/4) Epoch 28, batch 6600, loss[loss=0.2287, simple_loss=0.3122, pruned_loss=0.07264, over 8353.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2844, pruned_loss=0.05894, over 1618167.46 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:05:26,402 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.74 vs. limit=5.0 +2023-02-09 01:05:44,398 INFO [train.py:901] (0/4) Epoch 28, batch 6650, loss[loss=0.202, simple_loss=0.2892, pruned_loss=0.0574, over 8468.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05769, over 1611817.62 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:05:50,128 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8817, 2.6525, 2.0483, 2.4025, 2.3412, 1.8501, 2.2144, 2.4096], + device='cuda:0'), covar=tensor([0.1430, 0.0416, 0.1179, 0.0660, 0.0723, 0.1412, 0.1021, 0.1028], + device='cuda:0'), in_proj_covar=tensor([0.0362, 0.0247, 0.0346, 0.0317, 0.0304, 0.0350, 0.0354, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:06:04,780 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.463e+02 2.971e+02 3.895e+02 9.422e+02, threshold=5.941e+02, percent-clipped=4.0 +2023-02-09 01:06:10,943 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224924.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:21,167 INFO [train.py:901] (0/4) Epoch 28, batch 6700, loss[loss=0.2165, simple_loss=0.3105, pruned_loss=0.06124, over 8524.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2828, pruned_loss=0.05776, over 1607211.10 frames. ], batch size: 28, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:22,611 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1769, 4.1845, 3.7510, 2.0396, 3.6818, 3.8016, 3.7203, 3.6368], + device='cuda:0'), covar=tensor([0.0756, 0.0539, 0.1030, 0.4327, 0.0962, 0.1039, 0.1274, 0.0874], + device='cuda:0'), in_proj_covar=tensor([0.0550, 0.0462, 0.0456, 0.0566, 0.0448, 0.0471, 0.0448, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:06:26,245 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:30,454 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8926, 1.6114, 2.8635, 1.4053, 2.3342, 3.0604, 3.2527, 2.6365], + device='cuda:0'), covar=tensor([0.1051, 0.1575, 0.0377, 0.2091, 0.0877, 0.0290, 0.0568, 0.0548], + device='cuda:0'), in_proj_covar=tensor([0.0307, 0.0326, 0.0294, 0.0322, 0.0324, 0.0276, 0.0441, 0.0306], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 01:06:44,453 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224970.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:57,025 INFO [train.py:901] (0/4) Epoch 28, batch 6750, loss[loss=0.1386, simple_loss=0.2222, pruned_loss=0.02747, over 7436.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2817, pruned_loss=0.05642, over 1611217.80 frames. ], batch size: 17, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:57,472 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-09 01:06:58,667 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7239, 2.5333, 1.9188, 2.2940, 2.2105, 1.6946, 2.1574, 2.2097], + device='cuda:0'), covar=tensor([0.1551, 0.0467, 0.1176, 0.0721, 0.0780, 0.1602, 0.1089, 0.1144], + device='cuda:0'), in_proj_covar=tensor([0.0362, 0.0247, 0.0345, 0.0317, 0.0304, 0.0350, 0.0354, 0.0327], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:07:01,390 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4382, 2.6919, 3.0253, 1.7242, 3.3417, 2.1555, 1.5604, 2.4029], + device='cuda:0'), covar=tensor([0.0941, 0.0418, 0.0400, 0.0994, 0.0551, 0.0874, 0.1172, 0.0613], + device='cuda:0'), in_proj_covar=tensor([0.0473, 0.0409, 0.0364, 0.0459, 0.0396, 0.0552, 0.0403, 0.0442], + device='cuda:0'), out_proj_covar=tensor([1.2520e-04, 1.0596e-04, 9.4811e-05, 1.2003e-04, 1.0343e-04, 1.5400e-04, + 1.0766e-04, 1.1576e-04], device='cuda:0') +2023-02-09 01:07:17,011 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.343e+02 2.979e+02 3.883e+02 6.136e+02, threshold=5.958e+02, percent-clipped=2.0 +2023-02-09 01:07:28,014 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 01:07:32,087 INFO [train.py:901] (0/4) Epoch 28, batch 6800, loss[loss=0.2307, simple_loss=0.3102, pruned_loss=0.07565, over 8366.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2824, pruned_loss=0.05687, over 1613586.86 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:07:32,960 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225039.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:07:57,284 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6084, 1.6745, 1.9955, 1.6981, 1.0419, 1.7599, 2.1559, 1.9747], + device='cuda:0'), covar=tensor([0.0513, 0.1235, 0.1581, 0.1427, 0.0627, 0.1406, 0.0685, 0.0620], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:08:08,384 INFO [train.py:901] (0/4) Epoch 28, batch 6850, loss[loss=0.1674, simple_loss=0.2613, pruned_loss=0.03672, over 8030.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2827, pruned_loss=0.05733, over 1607846.70 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:08,456 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,039 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225102.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,712 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 01:08:28,499 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.283e+02 2.996e+02 3.907e+02 8.918e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-09 01:08:31,362 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225121.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:42,882 INFO [train.py:901] (0/4) Epoch 28, batch 6900, loss[loss=0.1742, simple_loss=0.2595, pruned_loss=0.04441, over 7922.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05684, over 1606909.67 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:47,495 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-09 01:08:58,650 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225160.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:18,985 INFO [train.py:901] (0/4) Epoch 28, batch 6950, loss[loss=0.2179, simple_loss=0.2991, pruned_loss=0.06833, over 8582.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.281, pruned_loss=0.0573, over 1606123.48 frames. ], batch size: 39, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:09:30,334 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:30,870 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 01:09:40,019 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.456e+02 2.946e+02 3.977e+02 8.721e+02, threshold=5.892e+02, percent-clipped=6.0 +2023-02-09 01:09:40,242 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225217.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:54,782 INFO [train.py:901] (0/4) Epoch 28, batch 7000, loss[loss=0.2124, simple_loss=0.2877, pruned_loss=0.06852, over 7641.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2811, pruned_loss=0.05744, over 1609044.07 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:31,206 INFO [train.py:901] (0/4) Epoch 28, batch 7050, loss[loss=0.2148, simple_loss=0.2995, pruned_loss=0.06504, over 8468.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2807, pruned_loss=0.05738, over 1610005.39 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:36,318 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225295.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:10:41,256 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.6841, 4.6875, 4.1896, 2.3639, 4.0843, 4.3287, 4.2530, 4.1532], + device='cuda:0'), covar=tensor([0.0643, 0.0440, 0.0932, 0.4201, 0.0846, 0.0927, 0.1060, 0.0758], + device='cuda:0'), in_proj_covar=tensor([0.0549, 0.0463, 0.0457, 0.0564, 0.0448, 0.0470, 0.0447, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:10:52,627 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.402e+02 2.844e+02 3.449e+02 6.425e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-09 01:10:55,656 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:11:08,211 INFO [train.py:901] (0/4) Epoch 28, batch 7100, loss[loss=0.223, simple_loss=0.3143, pruned_loss=0.06583, over 8709.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.0571, over 1613871.16 frames. ], batch size: 34, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:11:43,051 INFO [train.py:901] (0/4) Epoch 28, batch 7150, loss[loss=0.2217, simple_loss=0.3075, pruned_loss=0.06792, over 8342.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.05712, over 1611726.18 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:05,413 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.377e+02 2.906e+02 3.542e+02 6.036e+02, threshold=5.811e+02, percent-clipped=2.0 +2023-02-09 01:12:21,605 INFO [train.py:901] (0/4) Epoch 28, batch 7200, loss[loss=0.1367, simple_loss=0.2272, pruned_loss=0.02312, over 8078.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2811, pruned_loss=0.05692, over 1614685.87 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:35,170 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225457.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:36,600 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:40,500 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225465.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:44,932 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5998, 2.0467, 3.1856, 1.5041, 2.4172, 2.0034, 1.7246, 2.4690], + device='cuda:0'), covar=tensor([0.1981, 0.2728, 0.1087, 0.4797, 0.1952, 0.3429, 0.2542, 0.2353], + device='cuda:0'), in_proj_covar=tensor([0.0538, 0.0638, 0.0563, 0.0673, 0.0663, 0.0614, 0.0563, 0.0647], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:12:46,198 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225473.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:51,117 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225480.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:53,940 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:56,555 INFO [train.py:901] (0/4) Epoch 28, batch 7250, loss[loss=0.1823, simple_loss=0.2499, pruned_loss=0.05732, over 7702.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05772, over 1616793.77 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:03,605 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:07,601 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225504.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:16,363 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.486e+02 3.022e+02 3.617e+02 8.325e+02, threshold=6.044e+02, percent-clipped=6.0 +2023-02-09 01:13:21,951 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:32,772 INFO [train.py:901] (0/4) Epoch 28, batch 7300, loss[loss=0.1806, simple_loss=0.2635, pruned_loss=0.04879, over 7710.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2834, pruned_loss=0.0578, over 1620594.37 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:34,183 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:56,395 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 01:14:00,398 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225577.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:14:02,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:07,683 INFO [train.py:901] (0/4) Epoch 28, batch 7350, loss[loss=0.219, simple_loss=0.2936, pruned_loss=0.07218, over 8593.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2832, pruned_loss=0.05823, over 1621177.20 frames. ], batch size: 34, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:14:24,556 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 01:14:27,898 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.380e+02 2.753e+02 3.463e+02 7.224e+02, threshold=5.506e+02, percent-clipped=3.0 +2023-02-09 01:14:29,532 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225619.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:42,951 INFO [train.py:901] (0/4) Epoch 28, batch 7400, loss[loss=0.1818, simple_loss=0.2706, pruned_loss=0.04651, over 7800.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05846, over 1621525.63 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:14:42,964 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 01:15:18,728 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2776, 1.3571, 3.3919, 1.1195, 3.0127, 2.8042, 3.0669, 2.9808], + device='cuda:0'), covar=tensor([0.0832, 0.4317, 0.0821, 0.4472, 0.1326, 0.1144, 0.0799, 0.0952], + device='cuda:0'), in_proj_covar=tensor([0.0683, 0.0664, 0.0735, 0.0660, 0.0747, 0.0636, 0.0645, 0.0717], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:15:19,317 INFO [train.py:901] (0/4) Epoch 28, batch 7450, loss[loss=0.2015, simple_loss=0.2876, pruned_loss=0.05774, over 8107.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2831, pruned_loss=0.05867, over 1619114.97 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:15:23,699 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7783, 1.8613, 2.1604, 1.8481, 0.9873, 1.9939, 2.2077, 2.4095], + device='cuda:0'), covar=tensor([0.0467, 0.1139, 0.1533, 0.1308, 0.0581, 0.1288, 0.0633, 0.0517], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:15:25,013 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 01:15:40,024 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.396e+02 3.007e+02 3.866e+02 7.466e+02, threshold=6.014e+02, percent-clipped=6.0 +2023-02-09 01:15:54,483 INFO [train.py:901] (0/4) Epoch 28, batch 7500, loss[loss=0.2199, simple_loss=0.298, pruned_loss=0.07088, over 8330.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05813, over 1620068.30 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:32,508 INFO [train.py:901] (0/4) Epoch 28, batch 7550, loss[loss=0.1925, simple_loss=0.2977, pruned_loss=0.04362, over 8301.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2839, pruned_loss=0.0587, over 1617038.25 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:39,082 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4354, 2.3442, 1.7879, 2.0663, 2.0678, 1.5216, 1.9837, 2.0210], + device='cuda:0'), covar=tensor([0.1604, 0.0453, 0.1262, 0.0716, 0.0741, 0.1659, 0.1016, 0.1010], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0244, 0.0341, 0.0312, 0.0300, 0.0345, 0.0348, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:16:41,875 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225801.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:16:52,725 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.388e+02 3.127e+02 4.485e+02 1.321e+03, threshold=6.254e+02, percent-clipped=11.0 +2023-02-09 01:16:57,627 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225824.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:05,959 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225836.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:07,145 INFO [train.py:901] (0/4) Epoch 28, batch 7600, loss[loss=0.1683, simple_loss=0.2402, pruned_loss=0.04813, over 7802.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2835, pruned_loss=0.05834, over 1620414.21 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:23,282 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225861.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:27,275 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225867.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:34,161 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:37,629 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:40,886 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:43,480 INFO [train.py:901] (0/4) Epoch 28, batch 7650, loss[loss=0.1984, simple_loss=0.2751, pruned_loss=0.06083, over 7811.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2827, pruned_loss=0.05836, over 1619007.11 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:51,611 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:01,187 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225913.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,351 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225916.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,835 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.353e+02 2.789e+02 3.444e+02 7.654e+02, threshold=5.579e+02, percent-clipped=1.0 +2023-02-09 01:18:06,735 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225921.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:18:18,649 INFO [train.py:901] (0/4) Epoch 28, batch 7700, loss[loss=0.1995, simple_loss=0.2877, pruned_loss=0.05567, over 7277.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05754, over 1618470.04 frames. ], batch size: 16, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:18:19,539 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:31,345 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 01:18:44,258 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 01:18:44,513 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4937, 2.4042, 3.0701, 2.5460, 3.0578, 2.5027, 2.4929, 2.0680], + device='cuda:0'), covar=tensor([0.5765, 0.4993, 0.2206, 0.4159, 0.2672, 0.3410, 0.1828, 0.5536], + device='cuda:0'), in_proj_covar=tensor([0.0964, 0.1031, 0.0836, 0.0999, 0.1025, 0.0936, 0.0772, 0.0855], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 01:18:49,254 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225982.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:53,148 INFO [train.py:901] (0/4) Epoch 28, batch 7750, loss[loss=0.2002, simple_loss=0.281, pruned_loss=0.05967, over 8685.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05718, over 1619903.93 frames. ], batch size: 49, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:01,902 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:19:02,550 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-226000.pt +2023-02-09 01:19:15,708 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.439e+02 2.815e+02 3.514e+02 7.333e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-09 01:19:29,663 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226036.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:19:30,729 INFO [train.py:901] (0/4) Epoch 28, batch 7800, loss[loss=0.2396, simple_loss=0.3157, pruned_loss=0.0818, over 8125.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05694, over 1615171.85 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:45,505 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226059.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:20:05,563 INFO [train.py:901] (0/4) Epoch 28, batch 7850, loss[loss=0.1676, simple_loss=0.2582, pruned_loss=0.03855, over 7963.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2795, pruned_loss=0.05646, over 1607744.62 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:20:25,276 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.405e+02 3.030e+02 3.828e+02 1.208e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-09 01:20:39,662 INFO [train.py:901] (0/4) Epoch 28, batch 7900, loss[loss=0.1904, simple_loss=0.2816, pruned_loss=0.04958, over 8473.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2799, pruned_loss=0.0563, over 1611916.71 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:02,890 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226172.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:13,382 INFO [train.py:901] (0/4) Epoch 28, batch 7950, loss[loss=0.2032, simple_loss=0.2693, pruned_loss=0.06852, over 7650.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05682, over 1615593.82 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:18,316 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226195.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:19,697 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226197.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:33,033 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.474e+02 2.920e+02 3.612e+02 7.690e+02, threshold=5.839e+02, percent-clipped=4.0 +2023-02-09 01:21:35,323 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:36,616 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6945, 1.5298, 1.9924, 1.5056, 1.1324, 1.6367, 2.2157, 1.9904], + device='cuda:0'), covar=tensor([0.0484, 0.1264, 0.1595, 0.1471, 0.0616, 0.1406, 0.0622, 0.0625], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:21:37,935 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:47,691 INFO [train.py:901] (0/4) Epoch 28, batch 8000, loss[loss=0.1667, simple_loss=0.2482, pruned_loss=0.04255, over 8098.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05696, over 1618603.81 frames. ], batch size: 21, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:21:47,907 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226238.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:52,819 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2729, 1.0326, 2.3812, 0.9525, 2.1051, 2.0315, 2.1874, 2.1365], + device='cuda:0'), covar=tensor([0.0831, 0.3163, 0.1015, 0.3644, 0.1071, 0.0969, 0.0683, 0.0774], + device='cuda:0'), in_proj_covar=tensor([0.0688, 0.0668, 0.0738, 0.0664, 0.0753, 0.0641, 0.0648, 0.0721], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:21:57,001 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:59,706 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226255.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:00,948 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226257.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:05,129 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226263.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:17,266 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:22,557 INFO [train.py:901] (0/4) Epoch 28, batch 8050, loss[loss=0.1689, simple_loss=0.2442, pruned_loss=0.04685, over 7797.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2806, pruned_loss=0.05756, over 1596546.09 frames. ], batch size: 19, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:22:25,375 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226292.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:22:30,122 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3759, 1.5350, 4.5984, 1.7259, 4.0937, 3.8397, 4.1943, 4.0757], + device='cuda:0'), covar=tensor([0.0630, 0.4538, 0.0540, 0.4313, 0.1092, 0.0967, 0.0546, 0.0679], + device='cuda:0'), in_proj_covar=tensor([0.0685, 0.0665, 0.0735, 0.0661, 0.0749, 0.0638, 0.0644, 0.0718], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:22:37,474 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 01:22:42,674 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.434e+02 3.095e+02 3.696e+02 6.520e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-09 01:22:42,894 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226317.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:22:45,974 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-28.pt +2023-02-09 01:22:57,715 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 01:23:01,693 INFO [train.py:901] (0/4) Epoch 29, batch 0, loss[loss=0.2242, simple_loss=0.3075, pruned_loss=0.07048, over 8626.00 frames. ], tot_loss[loss=0.2242, simple_loss=0.3075, pruned_loss=0.07048, over 8626.00 frames. ], batch size: 34, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:01,694 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 01:23:13,268 INFO [train.py:935] (0/4) Epoch 29, validation: loss=0.1705, simple_loss=0.2705, pruned_loss=0.03528, over 944034.00 frames. +2023-02-09 01:23:13,269 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6461MB +2023-02-09 01:23:19,034 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9453, 1.9733, 2.3098, 2.0591, 1.1589, 1.9153, 2.4261, 2.6898], + device='cuda:0'), covar=tensor([0.0432, 0.1033, 0.1455, 0.1195, 0.0547, 0.1273, 0.0547, 0.0482], + device='cuda:0'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0146], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:23:26,241 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226339.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:26,283 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7866, 1.7119, 2.2612, 1.4629, 1.4218, 2.2626, 0.4294, 1.4404], + device='cuda:0'), covar=tensor([0.1493, 0.1191, 0.0377, 0.1049, 0.2274, 0.0386, 0.1855, 0.1213], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0224, 0.0278, 0.0147, 0.0173, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 01:23:29,665 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 01:23:49,913 INFO [train.py:901] (0/4) Epoch 29, batch 50, loss[loss=0.1936, simple_loss=0.275, pruned_loss=0.05607, over 7805.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2798, pruned_loss=0.05705, over 366295.24 frames. ], batch size: 20, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:50,840 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:24:06,035 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 01:24:12,507 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226403.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:24:22,930 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.293e+02 2.936e+02 3.721e+02 6.222e+02, threshold=5.872e+02, percent-clipped=1.0 +2023-02-09 01:24:25,771 INFO [train.py:901] (0/4) Epoch 29, batch 100, loss[loss=0.1716, simple_loss=0.2597, pruned_loss=0.04176, over 8194.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05763, over 644294.06 frames. ], batch size: 23, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:24:30,610 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 01:25:02,686 INFO [train.py:901] (0/4) Epoch 29, batch 150, loss[loss=0.1783, simple_loss=0.2628, pruned_loss=0.04695, over 7978.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05811, over 859065.55 frames. ], batch size: 21, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:25:34,581 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.438e+02 2.916e+02 4.111e+02 7.524e+02, threshold=5.832e+02, percent-clipped=2.0 +2023-02-09 01:25:35,520 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226518.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:25:37,497 INFO [train.py:901] (0/4) Epoch 29, batch 200, loss[loss=0.1851, simple_loss=0.2753, pruned_loss=0.04747, over 8456.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2836, pruned_loss=0.05867, over 1026619.96 frames. ], batch size: 27, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:25:51,888 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6616, 2.6352, 1.9779, 2.3662, 2.2181, 1.7185, 2.1191, 2.2531], + device='cuda:0'), covar=tensor([0.1536, 0.0451, 0.1310, 0.0673, 0.0884, 0.1637, 0.1112, 0.1050], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0245, 0.0342, 0.0313, 0.0301, 0.0348, 0.0349, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:26:13,674 INFO [train.py:901] (0/4) Epoch 29, batch 250, loss[loss=0.2013, simple_loss=0.2844, pruned_loss=0.05907, over 8466.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.0583, over 1155270.58 frames. ], batch size: 29, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:26,050 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 01:26:30,591 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7062, 2.1310, 3.1264, 1.5714, 2.3866, 2.1799, 1.7875, 2.4580], + device='cuda:0'), covar=tensor([0.1950, 0.2700, 0.0972, 0.4613, 0.1928, 0.3280, 0.2506, 0.2210], + device='cuda:0'), in_proj_covar=tensor([0.0542, 0.0640, 0.0564, 0.0674, 0.0666, 0.0616, 0.0566, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:26:31,144 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:31,306 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:33,831 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 01:26:42,336 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6777, 2.5754, 1.8712, 2.3065, 2.1963, 1.5843, 2.1701, 2.2334], + device='cuda:0'), covar=tensor([0.1525, 0.0418, 0.1212, 0.0682, 0.0828, 0.1666, 0.1007, 0.1028], + device='cuda:0'), in_proj_covar=tensor([0.0355, 0.0244, 0.0341, 0.0312, 0.0300, 0.0347, 0.0348, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:26:44,587 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2205, 2.0075, 2.4509, 2.0891, 2.4209, 2.2949, 2.1250, 1.2896], + device='cuda:0'), covar=tensor([0.5572, 0.5012, 0.2225, 0.3966, 0.2524, 0.3276, 0.1943, 0.5578], + device='cuda:0'), in_proj_covar=tensor([0.0966, 0.1031, 0.0838, 0.0999, 0.1026, 0.0936, 0.0773, 0.0852], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 01:26:46,394 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.514e+02 3.003e+02 3.645e+02 8.891e+02, threshold=6.006e+02, percent-clipped=9.0 +2023-02-09 01:26:48,787 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:49,258 INFO [train.py:901] (0/4) Epoch 29, batch 300, loss[loss=0.2184, simple_loss=0.2968, pruned_loss=0.06995, over 8504.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.284, pruned_loss=0.05842, over 1261071.08 frames. ], batch size: 28, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:54,272 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226628.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:12,144 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226653.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:25,801 INFO [train.py:901] (0/4) Epoch 29, batch 350, loss[loss=0.2007, simple_loss=0.2814, pruned_loss=0.06002, over 7648.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2839, pruned_loss=0.05799, over 1343812.25 frames. ], batch size: 19, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:27:32,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-09 01:27:54,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226710.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:58,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.400e+02 2.862e+02 3.557e+02 6.632e+02, threshold=5.725e+02, percent-clipped=2.0 +2023-02-09 01:28:01,714 INFO [train.py:901] (0/4) Epoch 29, batch 400, loss[loss=0.2033, simple_loss=0.2942, pruned_loss=0.05622, over 8328.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.283, pruned_loss=0.05705, over 1405327.29 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:14,639 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0703, 1.6300, 1.4250, 1.5331, 1.3449, 1.3110, 1.3372, 1.3354], + device='cuda:0'), covar=tensor([0.1295, 0.0537, 0.1464, 0.0694, 0.0848, 0.1710, 0.0985, 0.0897], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0244, 0.0342, 0.0313, 0.0301, 0.0347, 0.0349, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:28:37,572 INFO [train.py:901] (0/4) Epoch 29, batch 450, loss[loss=0.214, simple_loss=0.3032, pruned_loss=0.06238, over 8333.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2814, pruned_loss=0.05639, over 1448392.96 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:39,871 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226774.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:28:49,335 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2443, 1.4132, 3.3987, 1.1712, 3.0212, 2.8805, 3.1149, 3.0326], + device='cuda:0'), covar=tensor([0.0873, 0.3946, 0.0820, 0.4186, 0.1423, 0.1070, 0.0773, 0.0916], + device='cuda:0'), in_proj_covar=tensor([0.0683, 0.0664, 0.0736, 0.0658, 0.0746, 0.0633, 0.0644, 0.0715], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:28:58,215 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226799.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:29:11,134 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.449e+02 2.964e+02 3.856e+02 9.700e+02, threshold=5.929e+02, percent-clipped=9.0 +2023-02-09 01:29:13,796 INFO [train.py:901] (0/4) Epoch 29, batch 500, loss[loss=0.2723, simple_loss=0.3349, pruned_loss=0.1048, over 7011.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05666, over 1485104.72 frames. ], batch size: 72, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:13,946 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2326, 1.4471, 3.3871, 1.1588, 3.0283, 2.8408, 3.0815, 3.0260], + device='cuda:0'), covar=tensor([0.0857, 0.4046, 0.0825, 0.4321, 0.1270, 0.1059, 0.0814, 0.0871], + device='cuda:0'), in_proj_covar=tensor([0.0683, 0.0663, 0.0736, 0.0658, 0.0746, 0.0633, 0.0645, 0.0715], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:29:48,217 INFO [train.py:901] (0/4) Epoch 29, batch 550, loss[loss=0.1695, simple_loss=0.2529, pruned_loss=0.04308, over 7810.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05632, over 1516348.00 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:30:21,909 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.486e+02 3.156e+02 4.092e+02 1.034e+03, threshold=6.313e+02, percent-clipped=6.0 +2023-02-09 01:30:24,630 INFO [train.py:901] (0/4) Epoch 29, batch 600, loss[loss=0.1869, simple_loss=0.2715, pruned_loss=0.05113, over 7790.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2821, pruned_loss=0.05686, over 1538741.34 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:30:28,553 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.05 vs. limit=5.0 +2023-02-09 01:30:35,120 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3189, 1.5849, 4.2729, 2.1680, 2.7086, 4.7743, 4.8944, 4.1803], + device='cuda:0'), covar=tensor([0.1152, 0.2008, 0.0333, 0.1815, 0.1135, 0.0230, 0.0595, 0.0592], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0325, 0.0327, 0.0278, 0.0444, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 01:30:38,681 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3731, 2.3144, 3.0256, 2.5518, 3.1210, 2.5001, 2.3061, 2.0561], + device='cuda:0'), covar=tensor([0.6014, 0.5550, 0.2296, 0.4432, 0.2778, 0.3512, 0.2024, 0.5895], + device='cuda:0'), in_proj_covar=tensor([0.0965, 0.1033, 0.0838, 0.0999, 0.1026, 0.0936, 0.0772, 0.0852], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 01:30:43,085 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 01:30:56,518 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226966.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:30:59,760 INFO [train.py:901] (0/4) Epoch 29, batch 650, loss[loss=0.1747, simple_loss=0.2479, pruned_loss=0.05069, over 7710.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.0565, over 1559149.99 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:13,854 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:25,436 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227007.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:32,755 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.425e+02 2.942e+02 3.859e+02 6.314e+02, threshold=5.885e+02, percent-clipped=1.0 +2023-02-09 01:31:36,265 INFO [train.py:901] (0/4) Epoch 29, batch 700, loss[loss=0.1673, simple_loss=0.2623, pruned_loss=0.03615, over 8238.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2821, pruned_loss=0.05663, over 1572679.32 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:56,849 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6340, 1.7491, 1.5192, 2.3159, 0.9861, 1.4209, 1.6697, 1.8173], + device='cuda:0'), covar=tensor([0.0900, 0.0833, 0.1134, 0.0385, 0.1107, 0.1418, 0.0814, 0.0773], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0214, 0.0204, 0.0249, 0.0251, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 01:32:12,700 INFO [train.py:901] (0/4) Epoch 29, batch 750, loss[loss=0.2002, simple_loss=0.2889, pruned_loss=0.05579, over 8103.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.282, pruned_loss=0.05663, over 1581478.93 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:32:31,397 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 01:32:40,245 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 01:32:44,348 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.547e+02 3.055e+02 4.023e+02 1.198e+03, threshold=6.109e+02, percent-clipped=3.0 +2023-02-09 01:32:47,763 INFO [train.py:901] (0/4) Epoch 29, batch 800, loss[loss=0.1771, simple_loss=0.2738, pruned_loss=0.0402, over 8250.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.05624, over 1590400.40 frames. ], batch size: 24, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:33:24,585 INFO [train.py:901] (0/4) Epoch 29, batch 850, loss[loss=0.2077, simple_loss=0.293, pruned_loss=0.0612, over 8454.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05642, over 1595832.33 frames. ], batch size: 49, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:33:57,034 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.463e+02 2.886e+02 3.949e+02 8.845e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 01:33:59,158 INFO [train.py:901] (0/4) Epoch 29, batch 900, loss[loss=0.1557, simple_loss=0.2491, pruned_loss=0.03114, over 7525.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2802, pruned_loss=0.05596, over 1604060.00 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:34:35,617 INFO [train.py:901] (0/4) Epoch 29, batch 950, loss[loss=0.1853, simple_loss=0.2706, pruned_loss=0.04997, over 8027.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2812, pruned_loss=0.05658, over 1608748.34 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:04,862 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 01:35:09,007 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.581e+02 3.033e+02 3.905e+02 1.035e+03, threshold=6.066e+02, percent-clipped=4.0 +2023-02-09 01:35:11,169 INFO [train.py:901] (0/4) Epoch 29, batch 1000, loss[loss=0.2136, simple_loss=0.2821, pruned_loss=0.07253, over 8239.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2814, pruned_loss=0.05664, over 1614332.78 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:32,311 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:35:39,825 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 01:35:47,253 INFO [train.py:901] (0/4) Epoch 29, batch 1050, loss[loss=0.2214, simple_loss=0.3149, pruned_loss=0.06394, over 8181.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2821, pruned_loss=0.05681, over 1617999.33 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:52,715 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 01:36:22,054 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.373e+02 3.020e+02 3.651e+02 1.051e+03, threshold=6.040e+02, percent-clipped=1.0 +2023-02-09 01:36:23,043 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8655, 1.6742, 2.4783, 1.6818, 1.4407, 2.3766, 0.4703, 1.6063], + device='cuda:0'), covar=tensor([0.1566, 0.1420, 0.0368, 0.1038, 0.2380, 0.0474, 0.2041, 0.1296], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0224, 0.0278, 0.0147, 0.0173, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 01:36:24,330 INFO [train.py:901] (0/4) Epoch 29, batch 1100, loss[loss=0.1907, simple_loss=0.2655, pruned_loss=0.05795, over 7663.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2807, pruned_loss=0.05615, over 1616235.04 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:36:30,390 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227429.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:52,864 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9699, 2.1102, 2.2823, 2.0353, 1.1796, 2.0424, 2.3046, 2.6270], + device='cuda:0'), covar=tensor([0.0421, 0.1094, 0.1570, 0.1287, 0.0573, 0.1307, 0.0614, 0.0504], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:36:54,943 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3139, 2.1695, 1.7918, 2.0512, 1.8562, 1.5364, 1.7885, 1.7845], + device='cuda:0'), covar=tensor([0.1206, 0.0443, 0.1128, 0.0533, 0.0672, 0.1478, 0.0848, 0.0780], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0246, 0.0344, 0.0314, 0.0303, 0.0349, 0.0352, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:36:56,368 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227466.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:59,550 INFO [train.py:901] (0/4) Epoch 29, batch 1150, loss[loss=0.1907, simple_loss=0.2761, pruned_loss=0.05263, over 8501.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2808, pruned_loss=0.05587, over 1617699.69 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:06,476 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 01:37:34,267 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.320e+02 2.698e+02 3.625e+02 7.425e+02, threshold=5.396e+02, percent-clipped=3.0 +2023-02-09 01:37:36,428 INFO [train.py:901] (0/4) Epoch 29, batch 1200, loss[loss=0.1741, simple_loss=0.2557, pruned_loss=0.04626, over 7925.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2796, pruned_loss=0.05522, over 1612576.80 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:57,387 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5858, 2.0861, 3.2078, 1.4730, 2.5849, 2.0802, 1.6716, 2.5348], + device='cuda:0'), covar=tensor([0.2032, 0.2620, 0.0985, 0.4832, 0.1890, 0.3492, 0.2566, 0.2298], + device='cuda:0'), in_proj_covar=tensor([0.0540, 0.0640, 0.0565, 0.0672, 0.0667, 0.0618, 0.0566, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:38:11,533 INFO [train.py:901] (0/4) Epoch 29, batch 1250, loss[loss=0.2216, simple_loss=0.302, pruned_loss=0.07065, over 8575.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2803, pruned_loss=0.05606, over 1613472.91 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:38:46,455 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.398e+02 2.828e+02 3.393e+02 7.704e+02, threshold=5.657e+02, percent-clipped=4.0 +2023-02-09 01:38:48,672 INFO [train.py:901] (0/4) Epoch 29, batch 1300, loss[loss=0.1667, simple_loss=0.2556, pruned_loss=0.03889, over 8074.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.28, pruned_loss=0.05573, over 1618466.34 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:24,557 INFO [train.py:901] (0/4) Epoch 29, batch 1350, loss[loss=0.163, simple_loss=0.2449, pruned_loss=0.04053, over 7546.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2805, pruned_loss=0.05624, over 1620929.17 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:52,816 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.7221, 1.4707, 4.9020, 1.8136, 4.4577, 4.0955, 4.4297, 4.3435], + device='cuda:0'), covar=tensor([0.0544, 0.4749, 0.0440, 0.4298, 0.0857, 0.0827, 0.0518, 0.0586], + device='cuda:0'), in_proj_covar=tensor([0.0680, 0.0664, 0.0737, 0.0657, 0.0745, 0.0632, 0.0641, 0.0714], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:39:52,826 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227711.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:39:58,209 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.335e+02 2.785e+02 3.650e+02 1.055e+03, threshold=5.570e+02, percent-clipped=4.0 +2023-02-09 01:40:00,382 INFO [train.py:901] (0/4) Epoch 29, batch 1400, loss[loss=0.1891, simple_loss=0.2735, pruned_loss=0.05234, over 8477.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2793, pruned_loss=0.05566, over 1620420.49 frames. ], batch size: 27, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:01,320 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=227722.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:02,248 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.97 vs. limit=5.0 +2023-02-09 01:40:11,989 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2493, 3.6080, 2.5076, 3.1598, 2.9288, 2.1827, 2.9538, 3.1283], + device='cuda:0'), covar=tensor([0.1778, 0.0461, 0.1106, 0.0664, 0.0834, 0.1480, 0.1098, 0.1180], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0247, 0.0344, 0.0315, 0.0304, 0.0348, 0.0352, 0.0324], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:40:20,617 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=227747.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:38,107 INFO [train.py:901] (0/4) Epoch 29, batch 1450, loss[loss=0.1845, simple_loss=0.2571, pruned_loss=0.05598, over 7242.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2792, pruned_loss=0.05601, over 1616089.67 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:39,473 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227773.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:47,996 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 01:41:11,905 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.369e+02 2.795e+02 3.434e+02 1.018e+03, threshold=5.589e+02, percent-clipped=3.0 +2023-02-09 01:41:14,107 INFO [train.py:901] (0/4) Epoch 29, batch 1500, loss[loss=0.1935, simple_loss=0.2836, pruned_loss=0.05174, over 8604.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2805, pruned_loss=0.05629, over 1619357.28 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:41:31,885 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:41:48,977 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.64 vs. limit=5.0 +2023-02-09 01:41:51,483 INFO [train.py:901] (0/4) Epoch 29, batch 1550, loss[loss=0.1772, simple_loss=0.2631, pruned_loss=0.04565, over 7975.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05674, over 1619696.12 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:04,423 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227888.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:42:25,548 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.452e+02 3.275e+02 4.864e+02 1.208e+03, threshold=6.551e+02, percent-clipped=17.0 +2023-02-09 01:42:27,701 INFO [train.py:901] (0/4) Epoch 29, batch 1600, loss[loss=0.2186, simple_loss=0.3073, pruned_loss=0.06493, over 8609.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2805, pruned_loss=0.05636, over 1618512.46 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:04,959 INFO [train.py:901] (0/4) Epoch 29, batch 1650, loss[loss=0.1921, simple_loss=0.2789, pruned_loss=0.05262, over 8194.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2811, pruned_loss=0.05652, over 1617926.25 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:26,177 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-228000.pt +2023-02-09 01:43:39,885 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.401e+02 2.687e+02 3.396e+02 6.045e+02, threshold=5.374e+02, percent-clipped=0.0 +2023-02-09 01:43:42,082 INFO [train.py:901] (0/4) Epoch 29, batch 1700, loss[loss=0.183, simple_loss=0.2663, pruned_loss=0.04983, over 8082.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05613, over 1617164.64 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:45,197 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228025.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:06,274 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:18,095 INFO [train.py:901] (0/4) Epoch 29, batch 1750, loss[loss=0.1927, simple_loss=0.2675, pruned_loss=0.05901, over 7926.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.0564, over 1613731.31 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:44:38,634 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:52,736 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.425e+02 2.925e+02 3.463e+02 6.679e+02, threshold=5.849e+02, percent-clipped=2.0 +2023-02-09 01:44:55,477 INFO [train.py:901] (0/4) Epoch 29, batch 1800, loss[loss=0.2156, simple_loss=0.2903, pruned_loss=0.07042, over 8575.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2813, pruned_loss=0.05668, over 1613733.06 frames. ], batch size: 49, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:11,249 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:12,124 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-09 01:45:28,616 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228169.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,250 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228170.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,719 INFO [train.py:901] (0/4) Epoch 29, batch 1850, loss[loss=0.2177, simple_loss=0.2926, pruned_loss=0.0714, over 7925.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05758, over 1618168.49 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:42,838 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228189.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:46:03,754 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.509e+02 2.932e+02 3.424e+02 5.958e+02, threshold=5.864e+02, percent-clipped=1.0 +2023-02-09 01:46:05,871 INFO [train.py:901] (0/4) Epoch 29, batch 1900, loss[loss=0.1533, simple_loss=0.2347, pruned_loss=0.03592, over 7770.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2822, pruned_loss=0.05723, over 1616225.78 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:29,148 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.43 vs. limit=5.0 +2023-02-09 01:46:32,895 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-09 01:46:38,538 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 01:46:41,408 INFO [train.py:901] (0/4) Epoch 29, batch 1950, loss[loss=0.193, simple_loss=0.2885, pruned_loss=0.04875, over 8447.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05726, over 1615060.74 frames. ], batch size: 27, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:50,974 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 01:47:05,321 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228304.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:10,124 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 01:47:16,245 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.422e+02 3.056e+02 3.762e+02 8.552e+02, threshold=6.111e+02, percent-clipped=4.0 +2023-02-09 01:47:18,246 INFO [train.py:901] (0/4) Epoch 29, batch 2000, loss[loss=0.1997, simple_loss=0.2869, pruned_loss=0.05621, over 8524.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05725, over 1613990.64 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:47:36,281 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228347.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:39,115 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:44,568 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2928, 2.5967, 2.9491, 1.5997, 3.2690, 1.8484, 1.4964, 2.1899], + device='cuda:0'), covar=tensor([0.0861, 0.0431, 0.0293, 0.0891, 0.0535, 0.0949, 0.1181, 0.0642], + device='cuda:0'), in_proj_covar=tensor([0.0476, 0.0414, 0.0369, 0.0460, 0.0397, 0.0552, 0.0403, 0.0443], + device='cuda:0'), out_proj_covar=tensor([1.2611e-04, 1.0730e-04, 9.6024e-05, 1.2017e-04, 1.0384e-04, 1.5379e-04, + 1.0767e-04, 1.1589e-04], device='cuda:0') +2023-02-09 01:47:52,204 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228369.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:53,548 INFO [train.py:901] (0/4) Epoch 29, batch 2050, loss[loss=0.1871, simple_loss=0.2757, pruned_loss=0.04929, over 8460.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05676, over 1615077.39 frames. ], batch size: 29, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:48:25,964 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.333e+02 2.866e+02 3.600e+02 5.490e+02, threshold=5.733e+02, percent-clipped=0.0 +2023-02-09 01:48:28,131 INFO [train.py:901] (0/4) Epoch 29, batch 2100, loss[loss=0.2378, simple_loss=0.3115, pruned_loss=0.08203, over 8479.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05689, over 1613202.16 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:48:32,356 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:43,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228442.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:48,021 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228447.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:48:50,751 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228451.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:59,127 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5728, 1.8847, 2.8606, 1.5386, 2.2596, 1.9846, 1.6822, 2.3051], + device='cuda:0'), covar=tensor([0.2136, 0.2904, 0.1090, 0.4937, 0.1999, 0.3548, 0.2676, 0.2298], + device='cuda:0'), in_proj_covar=tensor([0.0543, 0.0642, 0.0568, 0.0675, 0.0667, 0.0617, 0.0566, 0.0650], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:49:04,310 INFO [train.py:901] (0/4) Epoch 29, batch 2150, loss[loss=0.1867, simple_loss=0.2658, pruned_loss=0.05381, over 7813.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2819, pruned_loss=0.05705, over 1611736.15 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:05,173 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5864, 1.5336, 4.7961, 1.8327, 4.2762, 4.0413, 4.3369, 4.2031], + device='cuda:0'), covar=tensor([0.0557, 0.4658, 0.0474, 0.4107, 0.1070, 0.0888, 0.0529, 0.0659], + device='cuda:0'), in_proj_covar=tensor([0.0686, 0.0669, 0.0744, 0.0664, 0.0750, 0.0639, 0.0647, 0.0721], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:49:14,153 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:49:37,708 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.659e+02 3.270e+02 3.951e+02 1.171e+03, threshold=6.540e+02, percent-clipped=10.0 +2023-02-09 01:49:39,913 INFO [train.py:901] (0/4) Epoch 29, batch 2200, loss[loss=0.1756, simple_loss=0.2739, pruned_loss=0.03863, over 8470.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05665, over 1611788.95 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:47,027 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3508, 1.5309, 4.5631, 1.7009, 4.0246, 3.8058, 4.1351, 4.0057], + device='cuda:0'), covar=tensor([0.0624, 0.4602, 0.0518, 0.4356, 0.1065, 0.0940, 0.0594, 0.0668], + device='cuda:0'), in_proj_covar=tensor([0.0684, 0.0666, 0.0742, 0.0662, 0.0747, 0.0637, 0.0644, 0.0719], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:50:06,281 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228557.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:08,437 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228560.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:16,509 INFO [train.py:901] (0/4) Epoch 29, batch 2250, loss[loss=0.1664, simple_loss=0.2476, pruned_loss=0.04262, over 8053.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05624, over 1607187.19 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:26,603 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228585.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:44,222 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0412, 1.7336, 4.3838, 1.7604, 3.0075, 4.9105, 5.2309, 3.9115], + device='cuda:0'), covar=tensor([0.1679, 0.2424, 0.0378, 0.2653, 0.1054, 0.0282, 0.0476, 0.0911], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0327, 0.0297, 0.0325, 0.0329, 0.0279, 0.0447, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 01:50:50,343 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.350e+02 2.877e+02 3.583e+02 6.549e+02, threshold=5.755e+02, percent-clipped=1.0 +2023-02-09 01:50:52,571 INFO [train.py:901] (0/4) Epoch 29, batch 2300, loss[loss=0.1656, simple_loss=0.2449, pruned_loss=0.04313, over 7418.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2793, pruned_loss=0.05591, over 1607966.75 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:53,452 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5404, 1.4211, 1.8357, 1.2778, 1.1819, 1.8064, 0.3342, 1.2266], + device='cuda:0'), covar=tensor([0.1227, 0.1133, 0.0387, 0.0788, 0.2232, 0.0480, 0.1681, 0.1121], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0223, 0.0278, 0.0149, 0.0173, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 01:51:06,312 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-09 01:51:22,789 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.2360, 2.1809, 5.3969, 2.3506, 4.8815, 4.5692, 4.9305, 4.8032], + device='cuda:0'), covar=tensor([0.0576, 0.4258, 0.0439, 0.4040, 0.0991, 0.0991, 0.0528, 0.0627], + device='cuda:0'), in_proj_covar=tensor([0.0686, 0.0669, 0.0746, 0.0666, 0.0751, 0.0640, 0.0647, 0.0722], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:51:28,773 INFO [train.py:901] (0/4) Epoch 29, batch 2350, loss[loss=0.2456, simple_loss=0.3225, pruned_loss=0.08436, over 8770.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05704, over 1610128.05 frames. ], batch size: 40, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:51:43,129 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:51:45,871 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228695.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:02,330 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.434e+02 3.194e+02 3.873e+02 7.294e+02, threshold=6.388e+02, percent-clipped=4.0 +2023-02-09 01:52:04,277 INFO [train.py:901] (0/4) Epoch 29, batch 2400, loss[loss=0.2158, simple_loss=0.2979, pruned_loss=0.06689, over 8237.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.0573, over 1604690.68 frames. ], batch size: 24, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:17,436 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228740.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:34,952 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228765.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:39,605 INFO [train.py:901] (0/4) Epoch 29, batch 2450, loss[loss=0.1819, simple_loss=0.2671, pruned_loss=0.04833, over 8478.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2802, pruned_loss=0.05686, over 1607090.21 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:55,034 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228791.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:53:05,501 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228806.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:08,258 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:10,301 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228813.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:13,500 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.278e+02 2.630e+02 3.527e+02 5.802e+02, threshold=5.259e+02, percent-clipped=0.0 +2023-02-09 01:53:16,190 INFO [train.py:901] (0/4) Epoch 29, batch 2500, loss[loss=0.1982, simple_loss=0.2844, pruned_loss=0.05602, over 8236.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.281, pruned_loss=0.05769, over 1607495.52 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:53:28,185 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:49,912 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 01:53:51,004 INFO [train.py:901] (0/4) Epoch 29, batch 2550, loss[loss=0.1909, simple_loss=0.2715, pruned_loss=0.05511, over 8144.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2804, pruned_loss=0.05715, over 1606500.94 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:17,030 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:54:25,706 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.320e+02 2.761e+02 3.420e+02 6.403e+02, threshold=5.523e+02, percent-clipped=2.0 +2023-02-09 01:54:27,396 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6885, 2.1127, 3.1900, 1.5771, 2.5054, 2.1521, 1.7461, 2.5489], + device='cuda:0'), covar=tensor([0.1850, 0.2622, 0.0816, 0.4543, 0.1754, 0.3054, 0.2376, 0.2143], + device='cuda:0'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0679, 0.0670, 0.0619, 0.0568, 0.0652], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:54:27,845 INFO [train.py:901] (0/4) Epoch 29, batch 2600, loss[loss=0.2027, simple_loss=0.2865, pruned_loss=0.05941, over 8668.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2802, pruned_loss=0.05698, over 1610469.30 frames. ], batch size: 34, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:51,304 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:55:03,877 INFO [train.py:901] (0/4) Epoch 29, batch 2650, loss[loss=0.2036, simple_loss=0.2884, pruned_loss=0.05941, over 8597.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2808, pruned_loss=0.05717, over 1610205.78 frames. ], batch size: 40, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:55:27,205 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5833, 1.4838, 4.8111, 1.8809, 4.3191, 3.9860, 4.3465, 4.2360], + device='cuda:0'), covar=tensor([0.0536, 0.4667, 0.0457, 0.4062, 0.0903, 0.0863, 0.0474, 0.0598], + device='cuda:0'), in_proj_covar=tensor([0.0681, 0.0663, 0.0741, 0.0662, 0.0743, 0.0635, 0.0640, 0.0715], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:55:37,363 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.477e+02 2.922e+02 3.574e+02 8.790e+02, threshold=5.845e+02, percent-clipped=2.0 +2023-02-09 01:55:39,871 INFO [train.py:901] (0/4) Epoch 29, batch 2700, loss[loss=0.197, simple_loss=0.2895, pruned_loss=0.05219, over 8336.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2803, pruned_loss=0.05663, over 1611910.41 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:11,113 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229062.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:14,606 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229066.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:17,759 INFO [train.py:901] (0/4) Epoch 29, batch 2750, loss[loss=0.2003, simple_loss=0.2967, pruned_loss=0.05193, over 8513.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2785, pruned_loss=0.05545, over 1608919.74 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:29,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229087.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:32,358 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229091.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:51,304 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.406e+02 2.857e+02 3.824e+02 7.570e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-09 01:56:53,439 INFO [train.py:901] (0/4) Epoch 29, batch 2800, loss[loss=0.2345, simple_loss=0.3008, pruned_loss=0.08412, over 8459.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.05641, over 1614778.50 frames. ], batch size: 29, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:54,332 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8966, 1.3038, 1.5905, 1.2705, 0.9694, 1.4194, 1.7587, 1.7331], + device='cuda:0'), covar=tensor([0.0601, 0.1745, 0.2306, 0.1908, 0.0678, 0.1994, 0.0765, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0163, 0.0114, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:57:24,927 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229162.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:57:31,026 INFO [train.py:901] (0/4) Epoch 29, batch 2850, loss[loss=0.1899, simple_loss=0.293, pruned_loss=0.04339, over 8107.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.056, over 1614802.80 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:57:43,233 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229187.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:58:03,961 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9678, 1.6639, 3.2255, 1.5202, 2.4197, 3.5325, 3.7035, 3.0050], + device='cuda:0'), covar=tensor([0.1295, 0.1791, 0.0409, 0.2231, 0.1061, 0.0268, 0.0600, 0.0595], + device='cuda:0'), in_proj_covar=tensor([0.0314, 0.0330, 0.0299, 0.0327, 0.0331, 0.0282, 0.0451, 0.0314], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 01:58:05,222 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.443e+02 3.095e+02 3.828e+02 9.615e+02, threshold=6.189e+02, percent-clipped=4.0 +2023-02-09 01:58:07,411 INFO [train.py:901] (0/4) Epoch 29, batch 2900, loss[loss=0.1825, simple_loss=0.2752, pruned_loss=0.04493, over 8030.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2797, pruned_loss=0.0556, over 1613522.77 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:21,868 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 01:58:23,785 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9101, 1.4579, 1.6447, 1.4206, 0.9587, 1.5137, 1.7389, 1.4557], + device='cuda:0'), covar=tensor([0.0554, 0.1334, 0.1762, 0.1509, 0.0645, 0.1552, 0.0718, 0.0722], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0103, 0.0165, 0.0114, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 01:58:44,222 INFO [train.py:901] (0/4) Epoch 29, batch 2950, loss[loss=0.1658, simple_loss=0.2527, pruned_loss=0.03947, over 7815.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2804, pruned_loss=0.05614, over 1616755.75 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:47,089 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 01:59:02,251 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229297.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:17,270 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.481e+02 3.005e+02 3.741e+02 9.617e+02, threshold=6.010e+02, percent-clipped=3.0 +2023-02-09 01:59:17,395 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229318.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:19,280 INFO [train.py:901] (0/4) Epoch 29, batch 3000, loss[loss=0.1777, simple_loss=0.2523, pruned_loss=0.05156, over 7423.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05671, over 1618580.77 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:59:19,280 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 01:59:34,615 INFO [train.py:935] (0/4) Epoch 29, validation: loss=0.17, simple_loss=0.2699, pruned_loss=0.03504, over 944034.00 frames. +2023-02-09 01:59:34,615 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6641MB +2023-02-09 01:59:54,815 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8301, 3.7935, 3.4467, 1.8664, 3.3768, 3.4806, 3.3639, 3.3052], + device='cuda:0'), covar=tensor([0.1006, 0.0697, 0.1241, 0.5341, 0.1088, 0.1230, 0.1506, 0.0959], + device='cuda:0'), in_proj_covar=tensor([0.0550, 0.0464, 0.0457, 0.0568, 0.0446, 0.0471, 0.0447, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 01:59:58,445 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8194, 1.7237, 2.4974, 1.5656, 1.4234, 2.4261, 0.5007, 1.5305], + device='cuda:0'), covar=tensor([0.1746, 0.1166, 0.0333, 0.1164, 0.2216, 0.0372, 0.1898, 0.1228], + device='cuda:0'), in_proj_covar=tensor([0.0205, 0.0208, 0.0139, 0.0225, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 02:00:09,411 INFO [train.py:901] (0/4) Epoch 29, batch 3050, loss[loss=0.1716, simple_loss=0.2652, pruned_loss=0.03902, over 8103.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2821, pruned_loss=0.05696, over 1618155.04 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:40,598 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:00:44,468 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.505e+02 2.815e+02 3.570e+02 7.212e+02, threshold=5.630e+02, percent-clipped=4.0 +2023-02-09 02:00:46,507 INFO [train.py:901] (0/4) Epoch 29, batch 3100, loss[loss=0.2054, simple_loss=0.2906, pruned_loss=0.06005, over 7969.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2817, pruned_loss=0.05681, over 1617732.86 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:15,197 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:21,910 INFO [train.py:901] (0/4) Epoch 29, batch 3150, loss[loss=0.2013, simple_loss=0.2903, pruned_loss=0.05612, over 8283.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05754, over 1613936.09 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:37,529 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 02:01:39,330 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:43,780 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1657, 2.0005, 2.5793, 2.1926, 2.5352, 2.2728, 2.1193, 1.4613], + device='cuda:0'), covar=tensor([0.6413, 0.5623, 0.2282, 0.4061, 0.2689, 0.3436, 0.1971, 0.5787], + device='cuda:0'), in_proj_covar=tensor([0.0969, 0.1034, 0.0838, 0.1004, 0.1028, 0.0940, 0.0776, 0.0856], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:01:56,472 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.412e+02 3.144e+02 3.923e+02 1.015e+03, threshold=6.289e+02, percent-clipped=11.0 +2023-02-09 02:01:58,573 INFO [train.py:901] (0/4) Epoch 29, batch 3200, loss[loss=0.2266, simple_loss=0.2979, pruned_loss=0.07764, over 8081.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05767, over 1614515.47 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:02,164 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9003, 1.9884, 1.7518, 2.6117, 1.3230, 1.6360, 1.9810, 2.0676], + device='cuda:0'), covar=tensor([0.0718, 0.0779, 0.0825, 0.0350, 0.1027, 0.1255, 0.0741, 0.0755], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0212, 0.0203, 0.0246, 0.0250, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 02:02:04,247 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9435, 1.9362, 6.0733, 2.3791, 5.4891, 5.1090, 5.5977, 5.4760], + device='cuda:0'), covar=tensor([0.0442, 0.4398, 0.0362, 0.3520, 0.0867, 0.0896, 0.0438, 0.0495], + device='cuda:0'), in_proj_covar=tensor([0.0680, 0.0659, 0.0739, 0.0655, 0.0740, 0.0633, 0.0639, 0.0714], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:02:05,805 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6805, 1.9662, 2.0296, 1.4225, 2.1144, 1.5831, 0.5426, 1.9460], + device='cuda:0'), covar=tensor([0.0667, 0.0387, 0.0368, 0.0630, 0.0437, 0.1000, 0.1010, 0.0309], + device='cuda:0'), in_proj_covar=tensor([0.0479, 0.0413, 0.0371, 0.0463, 0.0399, 0.0555, 0.0407, 0.0444], + device='cuda:0'), out_proj_covar=tensor([1.2680e-04, 1.0684e-04, 9.6549e-05, 1.2098e-04, 1.0451e-04, 1.5445e-04, + 1.0855e-04, 1.1610e-04], device='cuda:0') +2023-02-09 02:02:35,462 INFO [train.py:901] (0/4) Epoch 29, batch 3250, loss[loss=0.2153, simple_loss=0.2992, pruned_loss=0.06563, over 8195.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05755, over 1613288.28 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:49,927 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.10 vs. limit=5.0 +2023-02-09 02:02:52,613 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:02:58,035 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:03:09,567 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.257e+02 2.746e+02 3.374e+02 9.131e+02, threshold=5.492e+02, percent-clipped=1.0 +2023-02-09 02:03:11,682 INFO [train.py:901] (0/4) Epoch 29, batch 3300, loss[loss=0.2102, simple_loss=0.2945, pruned_loss=0.06297, over 8615.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.282, pruned_loss=0.05668, over 1616198.63 frames. ], batch size: 31, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:03:20,264 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 02:03:41,131 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:45,542 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229668.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:47,452 INFO [train.py:901] (0/4) Epoch 29, batch 3350, loss[loss=0.2328, simple_loss=0.3109, pruned_loss=0.07737, over 7221.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.05619, over 1614235.40 frames. ], batch size: 71, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:03:52,168 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229677.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 02:04:03,326 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:04:20,879 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.561e+02 2.994e+02 3.779e+02 7.703e+02, threshold=5.989e+02, percent-clipped=7.0 +2023-02-09 02:04:22,315 INFO [train.py:901] (0/4) Epoch 29, batch 3400, loss[loss=0.1724, simple_loss=0.2662, pruned_loss=0.03935, over 8565.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2806, pruned_loss=0.05598, over 1614757.85 frames. ], batch size: 31, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:04:59,066 INFO [train.py:901] (0/4) Epoch 29, batch 3450, loss[loss=0.2145, simple_loss=0.2988, pruned_loss=0.06507, over 8438.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2812, pruned_loss=0.05658, over 1615343.96 frames. ], batch size: 27, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:00,795 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2770, 2.1802, 2.6897, 2.3675, 2.6671, 2.4068, 2.2436, 1.5763], + device='cuda:0'), covar=tensor([0.5981, 0.5102, 0.2181, 0.3502, 0.2405, 0.3131, 0.1971, 0.5338], + device='cuda:0'), in_proj_covar=tensor([0.0969, 0.1033, 0.0840, 0.1004, 0.1028, 0.0941, 0.0776, 0.0856], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:05:03,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229777.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:23,702 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:33,250 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.314e+02 2.735e+02 3.470e+02 1.051e+03, threshold=5.470e+02, percent-clipped=3.0 +2023-02-09 02:05:34,602 INFO [train.py:901] (0/4) Epoch 29, batch 3500, loss[loss=0.2011, simple_loss=0.262, pruned_loss=0.07006, over 7540.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2805, pruned_loss=0.05609, over 1614768.14 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:37,546 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2398, 2.1019, 2.6357, 2.2972, 2.6471, 2.3653, 2.2057, 1.4828], + device='cuda:0'), covar=tensor([0.5964, 0.5248, 0.2156, 0.3933, 0.2708, 0.3188, 0.1859, 0.5687], + device='cuda:0'), in_proj_covar=tensor([0.0970, 0.1034, 0.0841, 0.1005, 0.1029, 0.0941, 0.0776, 0.0857], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:05:38,249 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7366, 2.0826, 2.0836, 1.4098, 2.3027, 1.5722, 0.7556, 2.0377], + device='cuda:0'), covar=tensor([0.0699, 0.0390, 0.0374, 0.0709, 0.0379, 0.0983, 0.0987, 0.0334], + device='cuda:0'), in_proj_covar=tensor([0.0482, 0.0416, 0.0374, 0.0466, 0.0402, 0.0558, 0.0410, 0.0448], + device='cuda:0'), out_proj_covar=tensor([1.2768e-04, 1.0758e-04, 9.7337e-05, 1.2193e-04, 1.0517e-04, 1.5537e-04, + 1.0928e-04, 1.1737e-04], device='cuda:0') +2023-02-09 02:05:46,931 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229839.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:58,502 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 02:06:12,022 INFO [train.py:901] (0/4) Epoch 29, batch 3550, loss[loss=0.2026, simple_loss=0.2969, pruned_loss=0.05416, over 8356.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05678, over 1613501.36 frames. ], batch size: 24, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:06:41,736 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229912.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:46,517 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.466e+02 3.015e+02 3.666e+02 8.686e+02, threshold=6.030e+02, percent-clipped=2.0 +2023-02-09 02:06:47,440 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229920.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:47,948 INFO [train.py:901] (0/4) Epoch 29, batch 3600, loss[loss=0.2126, simple_loss=0.2878, pruned_loss=0.06871, over 7967.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2814, pruned_loss=0.05679, over 1610248.24 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:07:00,991 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:11,738 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229954.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:24,209 INFO [train.py:901] (0/4) Epoch 29, batch 3650, loss[loss=0.2239, simple_loss=0.3069, pruned_loss=0.07047, over 8540.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2813, pruned_loss=0.05618, over 1616181.17 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:07:45,869 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-230000.pt +2023-02-09 02:07:50,399 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:00,967 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.398e+02 2.862e+02 3.372e+02 7.881e+02, threshold=5.724e+02, percent-clipped=3.0 +2023-02-09 02:08:01,714 INFO [train.py:901] (0/4) Epoch 29, batch 3700, loss[loss=0.1704, simple_loss=0.2449, pruned_loss=0.04798, over 7235.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2804, pruned_loss=0.05569, over 1608890.94 frames. ], batch size: 16, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:08:02,482 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230021.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 02:08:06,541 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:08:10,879 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230033.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:25,550 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230054.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:28,467 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230058.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:37,280 INFO [train.py:901] (0/4) Epoch 29, batch 3750, loss[loss=0.2031, simple_loss=0.2854, pruned_loss=0.06039, over 7236.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2814, pruned_loss=0.05633, over 1610345.77 frames. ], batch size: 16, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:13,043 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.469e+02 3.110e+02 3.966e+02 1.066e+03, threshold=6.219e+02, percent-clipped=4.0 +2023-02-09 02:09:13,768 INFO [train.py:901] (0/4) Epoch 29, batch 3800, loss[loss=0.1867, simple_loss=0.2883, pruned_loss=0.04253, over 8576.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2817, pruned_loss=0.05677, over 1610188.18 frames. ], batch size: 31, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:24,893 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230136.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 02:09:47,387 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230168.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:09:49,391 INFO [train.py:901] (0/4) Epoch 29, batch 3850, loss[loss=0.1723, simple_loss=0.2667, pruned_loss=0.03894, over 8457.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.05629, over 1610577.67 frames. ], batch size: 29, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:53,108 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230176.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:10,924 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8438, 6.0499, 5.1340, 2.6813, 5.2436, 5.6243, 5.4981, 5.4166], + device='cuda:0'), covar=tensor([0.0503, 0.0333, 0.0950, 0.4047, 0.0781, 0.0760, 0.1049, 0.0571], + device='cuda:0'), in_proj_covar=tensor([0.0549, 0.0464, 0.0457, 0.0567, 0.0448, 0.0470, 0.0446, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:10:11,021 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:12,898 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 02:10:17,573 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230210.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:24,398 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.511e+02 3.297e+02 3.973e+02 1.066e+03, threshold=6.594e+02, percent-clipped=6.0 +2023-02-09 02:10:25,133 INFO [train.py:901] (0/4) Epoch 29, batch 3900, loss[loss=0.1933, simple_loss=0.2699, pruned_loss=0.05832, over 8230.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2805, pruned_loss=0.05586, over 1614249.97 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:10:36,235 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:47,182 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-09 02:10:51,860 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:54,125 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:02,671 INFO [train.py:901] (0/4) Epoch 29, batch 3950, loss[loss=0.2125, simple_loss=0.293, pruned_loss=0.06599, over 7973.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2813, pruned_loss=0.05676, over 1610478.31 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:11:30,515 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230310.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:37,968 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.564e+02 3.192e+02 4.107e+02 9.729e+02, threshold=6.384e+02, percent-clipped=2.0 +2023-02-09 02:11:38,729 INFO [train.py:901] (0/4) Epoch 29, batch 4000, loss[loss=0.1533, simple_loss=0.2348, pruned_loss=0.03591, over 7803.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05654, over 1615244.40 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:11:49,766 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230335.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:59,726 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230349.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:16,083 INFO [train.py:901] (0/4) Epoch 29, batch 4050, loss[loss=0.1578, simple_loss=0.243, pruned_loss=0.03629, over 6410.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2811, pruned_loss=0.05639, over 1615089.15 frames. ], batch size: 14, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:12:16,266 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:31,119 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230392.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:48,570 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230417.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:50,325 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.354e+02 2.823e+02 3.644e+02 9.834e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 02:12:51,009 INFO [train.py:901] (0/4) Epoch 29, batch 4100, loss[loss=0.2335, simple_loss=0.3244, pruned_loss=0.07134, over 8406.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.28, pruned_loss=0.05548, over 1612408.25 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:22,445 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230464.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:26,979 INFO [train.py:901] (0/4) Epoch 29, batch 4150, loss[loss=0.2079, simple_loss=0.2983, pruned_loss=0.05875, over 8445.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2808, pruned_loss=0.05589, over 1611139.02 frames. ], batch size: 29, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:38,479 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:56,241 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230512.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:01,802 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.471e+02 3.120e+02 4.398e+02 1.014e+03, threshold=6.241e+02, percent-clipped=11.0 +2023-02-09 02:14:02,507 INFO [train.py:901] (0/4) Epoch 29, batch 4200, loss[loss=0.1973, simple_loss=0.2904, pruned_loss=0.05214, over 8499.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05665, over 1614681.91 frames. ], batch size: 28, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:16,946 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 02:14:17,803 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230543.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:24,154 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:38,390 INFO [train.py:901] (0/4) Epoch 29, batch 4250, loss[loss=0.2273, simple_loss=0.3099, pruned_loss=0.07236, over 8453.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2827, pruned_loss=0.05684, over 1619119.17 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:41,209 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 02:15:01,588 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:13,978 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.532e+02 3.108e+02 3.832e+02 7.900e+02, threshold=6.217e+02, percent-clipped=4.0 +2023-02-09 02:15:14,745 INFO [train.py:901] (0/4) Epoch 29, batch 4300, loss[loss=0.1876, simple_loss=0.2747, pruned_loss=0.05024, over 8454.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2822, pruned_loss=0.05691, over 1608757.92 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:19,507 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:19,559 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:37,561 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230652.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:51,740 INFO [train.py:901] (0/4) Epoch 29, batch 4350, loss[loss=0.1967, simple_loss=0.2813, pruned_loss=0.05599, over 8472.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05629, over 1607732.78 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:55,720 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7986, 1.6106, 2.4034, 1.6173, 1.3628, 2.3109, 0.5958, 1.5157], + device='cuda:0'), covar=tensor([0.1599, 0.1222, 0.0353, 0.1084, 0.2189, 0.0404, 0.1785, 0.1208], + device='cuda:0'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0225, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 02:16:17,542 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 02:16:27,839 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,092 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.462e+02 3.042e+02 3.743e+02 1.027e+03, threshold=6.085e+02, percent-clipped=1.0 +2023-02-09 02:16:29,367 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230720.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,857 INFO [train.py:901] (0/4) Epoch 29, batch 4400, loss[loss=0.1851, simple_loss=0.272, pruned_loss=0.04906, over 8197.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.0567, over 1608827.72 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:16:47,649 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230745.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:57,348 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 02:17:06,612 INFO [train.py:901] (0/4) Epoch 29, batch 4450, loss[loss=0.208, simple_loss=0.2836, pruned_loss=0.06623, over 7792.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05642, over 1611355.12 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:36,261 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:43,247 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.373e+02 2.913e+02 3.542e+02 8.795e+02, threshold=5.826e+02, percent-clipped=1.0 +2023-02-09 02:17:43,993 INFO [train.py:901] (0/4) Epoch 29, batch 4500, loss[loss=0.2012, simple_loss=0.2946, pruned_loss=0.05389, over 8362.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05676, over 1606534.47 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:49,324 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8671, 1.3384, 3.4779, 1.5047, 2.5216, 3.8311, 3.9474, 3.3121], + device='cuda:0'), covar=tensor([0.1298, 0.2140, 0.0336, 0.2223, 0.1034, 0.0228, 0.0514, 0.0550], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0328, 0.0296, 0.0325, 0.0328, 0.0279, 0.0447, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:17:51,408 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:54,914 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 02:18:19,720 INFO [train.py:901] (0/4) Epoch 29, batch 4550, loss[loss=0.2397, simple_loss=0.33, pruned_loss=0.07473, over 8568.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05745, over 1608830.36 frames. ], batch size: 31, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:18:28,427 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:31,629 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230887.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:37,203 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230895.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,429 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,496 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:55,063 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.340e+02 2.935e+02 3.730e+02 9.176e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-09 02:18:55,765 INFO [train.py:901] (0/4) Epoch 29, batch 4600, loss[loss=0.1911, simple_loss=0.2873, pruned_loss=0.04744, over 8252.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05703, over 1608952.06 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:13,598 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:31,775 INFO [train.py:901] (0/4) Epoch 29, batch 4650, loss[loss=0.1691, simple_loss=0.2494, pruned_loss=0.04444, over 7431.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05725, over 1611780.75 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:33,916 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230974.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:42,745 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8410, 6.0267, 5.2571, 2.2642, 5.2201, 5.6602, 5.5304, 5.4150], + device='cuda:0'), covar=tensor([0.0522, 0.0335, 0.0859, 0.4343, 0.0672, 0.0671, 0.0980, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0544, 0.0460, 0.0451, 0.0563, 0.0442, 0.0466, 0.0441, 0.0411], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:19:51,180 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:53,303 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:59,428 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231010.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:20:06,388 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.812e+02 2.522e+02 3.157e+02 3.845e+02 7.559e+02, threshold=6.314e+02, percent-clipped=7.0 +2023-02-09 02:20:07,123 INFO [train.py:901] (0/4) Epoch 29, batch 4700, loss[loss=0.1945, simple_loss=0.2754, pruned_loss=0.05687, over 8199.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2821, pruned_loss=0.05697, over 1609069.62 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:20:43,873 INFO [train.py:901] (0/4) Epoch 29, batch 4750, loss[loss=0.1709, simple_loss=0.2528, pruned_loss=0.04451, over 7438.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05719, over 1603534.91 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:00,622 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 02:21:02,795 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 02:21:09,910 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-09 02:21:18,722 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.440e+02 2.924e+02 3.615e+02 6.392e+02, threshold=5.847e+02, percent-clipped=1.0 +2023-02-09 02:21:19,439 INFO [train.py:901] (0/4) Epoch 29, batch 4800, loss[loss=0.172, simple_loss=0.2689, pruned_loss=0.03753, over 8353.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05646, over 1608578.33 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:43,701 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231154.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:21:55,946 INFO [train.py:901] (0/4) Epoch 29, batch 4850, loss[loss=0.2115, simple_loss=0.2924, pruned_loss=0.06528, over 8124.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2803, pruned_loss=0.05617, over 1609018.06 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:55,955 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 02:22:17,890 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:31,032 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.490e+02 3.133e+02 4.186e+02 8.287e+02, threshold=6.266e+02, percent-clipped=6.0 +2023-02-09 02:22:31,779 INFO [train.py:901] (0/4) Epoch 29, batch 4900, loss[loss=0.164, simple_loss=0.2505, pruned_loss=0.03871, over 8073.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2803, pruned_loss=0.05621, over 1608271.40 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:22:35,629 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231226.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:54,370 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:59,240 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231258.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:04,620 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,612 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,651 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:07,930 INFO [train.py:901] (0/4) Epoch 29, batch 4950, loss[loss=0.206, simple_loss=0.3014, pruned_loss=0.05535, over 8105.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2813, pruned_loss=0.05647, over 1608515.98 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:17,277 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231283.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:22,765 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231291.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:43,161 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.476e+02 2.910e+02 3.581e+02 7.956e+02, threshold=5.820e+02, percent-clipped=2.0 +2023-02-09 02:23:43,855 INFO [train.py:901] (0/4) Epoch 29, batch 5000, loss[loss=0.1778, simple_loss=0.2606, pruned_loss=0.04747, over 7555.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05638, over 1612126.84 frames. ], batch size: 18, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:45,426 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4399, 1.8375, 4.5518, 2.3178, 2.6304, 5.2358, 5.2794, 4.5416], + device='cuda:0'), covar=tensor([0.1094, 0.1770, 0.0226, 0.1735, 0.1120, 0.0149, 0.0321, 0.0542], + device='cuda:0'), in_proj_covar=tensor([0.0313, 0.0330, 0.0299, 0.0327, 0.0328, 0.0281, 0.0450, 0.0312], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:24:17,067 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:24:19,695 INFO [train.py:901] (0/4) Epoch 29, batch 5050, loss[loss=0.1998, simple_loss=0.2902, pruned_loss=0.05468, over 8517.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2821, pruned_loss=0.05682, over 1610772.05 frames. ], batch size: 26, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:24:37,813 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 02:24:55,717 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.315e+02 2.860e+02 3.491e+02 8.708e+02, threshold=5.721e+02, percent-clipped=8.0 +2023-02-09 02:24:56,378 INFO [train.py:901] (0/4) Epoch 29, batch 5100, loss[loss=0.2494, simple_loss=0.3194, pruned_loss=0.08976, over 8461.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2818, pruned_loss=0.05691, over 1607182.84 frames. ], batch size: 29, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:32,302 INFO [train.py:901] (0/4) Epoch 29, batch 5150, loss[loss=0.1885, simple_loss=0.2859, pruned_loss=0.0455, over 8283.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05695, over 1608446.74 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:52,008 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:26:07,354 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-09 02:26:08,165 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.313e+02 2.816e+02 3.592e+02 7.666e+02, threshold=5.632e+02, percent-clipped=6.0 +2023-02-09 02:26:08,945 INFO [train.py:901] (0/4) Epoch 29, batch 5200, loss[loss=0.1736, simple_loss=0.2587, pruned_loss=0.0442, over 8077.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2805, pruned_loss=0.05632, over 1608953.10 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:26:12,110 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:30,191 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231550.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:39,129 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 02:26:44,750 INFO [train.py:901] (0/4) Epoch 29, batch 5250, loss[loss=0.2057, simple_loss=0.2896, pruned_loss=0.06089, over 7918.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.0565, over 1610060.70 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:15,801 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231613.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:20,504 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.485e+02 2.947e+02 3.559e+02 7.815e+02, threshold=5.893e+02, percent-clipped=2.0 +2023-02-09 02:27:21,231 INFO [train.py:901] (0/4) Epoch 29, batch 5300, loss[loss=0.1826, simple_loss=0.2701, pruned_loss=0.04757, over 8187.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05641, over 1612536.56 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:22,862 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:41,027 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:46,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9348, 1.5509, 6.0300, 2.2123, 5.4737, 5.0706, 5.5846, 5.4680], + device='cuda:0'), covar=tensor([0.0426, 0.5158, 0.0371, 0.3952, 0.0939, 0.0917, 0.0450, 0.0493], + device='cuda:0'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:27:47,757 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0527, 2.1687, 1.8955, 2.9144, 1.2611, 1.6214, 2.0182, 2.1699], + device='cuda:0'), covar=tensor([0.0768, 0.0842, 0.0849, 0.0382, 0.1107, 0.1417, 0.0920, 0.0725], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0213, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 02:27:53,033 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231665.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:54,370 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1555, 1.4116, 4.3720, 1.6290, 3.8591, 3.6706, 3.9315, 3.8396], + device='cuda:0'), covar=tensor([0.0718, 0.4746, 0.0576, 0.4121, 0.1126, 0.0956, 0.0689, 0.0750], + device='cuda:0'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:27:57,017 INFO [train.py:901] (0/4) Epoch 29, batch 5350, loss[loss=0.1959, simple_loss=0.2908, pruned_loss=0.05048, over 8492.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2814, pruned_loss=0.05665, over 1614671.44 frames. ], batch size: 29, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:32,683 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.535e+02 3.138e+02 3.956e+02 6.651e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-09 02:28:33,426 INFO [train.py:901] (0/4) Epoch 29, batch 5400, loss[loss=0.1893, simple_loss=0.2788, pruned_loss=0.04987, over 8038.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2826, pruned_loss=0.05733, over 1617545.11 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:38,412 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:29:09,678 INFO [train.py:901] (0/4) Epoch 29, batch 5450, loss[loss=0.2117, simple_loss=0.3007, pruned_loss=0.06132, over 8426.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2816, pruned_loss=0.05723, over 1614468.91 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:24,014 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1350, 3.5783, 2.4751, 3.0847, 2.8372, 2.2351, 2.8074, 3.1271], + device='cuda:0'), covar=tensor([0.2013, 0.0427, 0.1149, 0.0718, 0.0855, 0.1469, 0.1215, 0.1208], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0244, 0.0343, 0.0315, 0.0302, 0.0348, 0.0351, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 02:29:29,509 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 02:29:43,744 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7857, 2.0096, 2.1026, 1.4492, 2.1911, 1.5023, 0.7864, 2.0391], + device='cuda:0'), covar=tensor([0.0733, 0.0476, 0.0359, 0.0705, 0.0572, 0.0904, 0.1031, 0.0362], + device='cuda:0'), in_proj_covar=tensor([0.0476, 0.0417, 0.0371, 0.0465, 0.0400, 0.0556, 0.0408, 0.0445], + device='cuda:0'), out_proj_covar=tensor([1.2593e-04, 1.0787e-04, 9.6390e-05, 1.2157e-04, 1.0474e-04, 1.5474e-04, + 1.0873e-04, 1.1633e-04], device='cuda:0') +2023-02-09 02:29:44,874 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.416e+02 2.826e+02 3.521e+02 6.915e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-09 02:29:45,630 INFO [train.py:901] (0/4) Epoch 29, batch 5500, loss[loss=0.189, simple_loss=0.2784, pruned_loss=0.04981, over 8485.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2815, pruned_loss=0.05688, over 1617484.67 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:48,666 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6459, 1.6687, 2.0279, 1.6645, 0.9578, 1.7234, 2.1791, 2.0699], + device='cuda:0'), covar=tensor([0.0485, 0.1241, 0.1621, 0.1457, 0.0587, 0.1405, 0.0649, 0.0599], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 02:30:21,379 INFO [train.py:901] (0/4) Epoch 29, batch 5550, loss[loss=0.2045, simple_loss=0.2816, pruned_loss=0.06371, over 8091.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.0566, over 1614144.66 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:30:49,882 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-09 02:30:56,573 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.336e+02 2.917e+02 3.506e+02 1.057e+03, threshold=5.834e+02, percent-clipped=5.0 +2023-02-09 02:30:57,331 INFO [train.py:901] (0/4) Epoch 29, batch 5600, loss[loss=0.1897, simple_loss=0.2814, pruned_loss=0.04903, over 8192.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05681, over 1611912.04 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:31:24,827 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.45 vs. limit=2.0 +2023-02-09 02:31:34,506 INFO [train.py:901] (0/4) Epoch 29, batch 5650, loss[loss=0.2293, simple_loss=0.3146, pruned_loss=0.07205, over 8503.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2814, pruned_loss=0.05726, over 1611143.00 frames. ], batch size: 28, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:31:38,829 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 02:31:43,813 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:31:54,968 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-232000.pt +2023-02-09 02:32:02,829 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:02,968 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:10,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.351e+02 2.719e+02 3.536e+02 6.635e+02, threshold=5.438e+02, percent-clipped=1.0 +2023-02-09 02:32:11,022 INFO [train.py:901] (0/4) Epoch 29, batch 5700, loss[loss=0.204, simple_loss=0.2847, pruned_loss=0.06161, over 8345.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05745, over 1613788.31 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:45,200 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 02:32:47,267 INFO [train.py:901] (0/4) Epoch 29, batch 5750, loss[loss=0.2152, simple_loss=0.294, pruned_loss=0.06823, over 8039.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05727, over 1614977.14 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:54,536 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6225, 1.9772, 2.0584, 1.2776, 2.1144, 1.4829, 0.7621, 1.8773], + device='cuda:0'), covar=tensor([0.0979, 0.0453, 0.0405, 0.0936, 0.0609, 0.1149, 0.1185, 0.0453], + device='cuda:0'), in_proj_covar=tensor([0.0478, 0.0418, 0.0372, 0.0466, 0.0401, 0.0559, 0.0408, 0.0446], + device='cuda:0'), out_proj_covar=tensor([1.2665e-04, 1.0797e-04, 9.6710e-05, 1.2186e-04, 1.0495e-04, 1.5560e-04, + 1.0867e-04, 1.1668e-04], device='cuda:0') +2023-02-09 02:33:07,936 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7668, 1.6595, 2.6190, 2.0065, 2.3291, 1.8366, 1.5825, 1.1863], + device='cuda:0'), covar=tensor([0.7853, 0.6698, 0.2371, 0.4515, 0.3585, 0.4947, 0.3152, 0.6126], + device='cuda:0'), in_proj_covar=tensor([0.0971, 0.1035, 0.0840, 0.1003, 0.1028, 0.0939, 0.0777, 0.0856], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:33:23,816 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 2.417e+02 3.013e+02 3.730e+02 1.097e+03, threshold=6.026e+02, percent-clipped=6.0 +2023-02-09 02:33:24,549 INFO [train.py:901] (0/4) Epoch 29, batch 5800, loss[loss=0.2019, simple_loss=0.2659, pruned_loss=0.06893, over 7667.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.05636, over 1612646.63 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:33:26,837 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232124.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:33:38,337 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0390, 1.5291, 3.4891, 1.5293, 2.5792, 3.8269, 3.9890, 3.1680], + device='cuda:0'), covar=tensor([0.1215, 0.1931, 0.0395, 0.2192, 0.1059, 0.0280, 0.0616, 0.0656], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0328, 0.0295, 0.0325, 0.0326, 0.0279, 0.0446, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:33:46,045 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7572, 2.0132, 2.1030, 1.3522, 2.2454, 1.5057, 0.7929, 1.9807], + device='cuda:0'), covar=tensor([0.0850, 0.0451, 0.0392, 0.0822, 0.0466, 0.1088, 0.1124, 0.0451], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0417, 0.0371, 0.0465, 0.0400, 0.0558, 0.0407, 0.0445], + device='cuda:0'), out_proj_covar=tensor([1.2632e-04, 1.0779e-04, 9.6523e-05, 1.2158e-04, 1.0470e-04, 1.5528e-04, + 1.0856e-04, 1.1647e-04], device='cuda:0') +2023-02-09 02:33:59,553 INFO [train.py:901] (0/4) Epoch 29, batch 5850, loss[loss=0.1926, simple_loss=0.2895, pruned_loss=0.04791, over 8108.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05595, over 1613049.27 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:31,421 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1689, 1.5936, 1.8187, 1.5007, 1.0339, 1.5623, 1.8069, 1.5332], + device='cuda:0'), covar=tensor([0.0516, 0.1262, 0.1584, 0.1449, 0.0584, 0.1472, 0.0709, 0.0698], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0115, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 02:34:34,651 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.421e+02 2.869e+02 3.503e+02 7.290e+02, threshold=5.737e+02, percent-clipped=3.0 +2023-02-09 02:34:35,349 INFO [train.py:901] (0/4) Epoch 29, batch 5900, loss[loss=0.1741, simple_loss=0.2639, pruned_loss=0.04217, over 8177.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2804, pruned_loss=0.05583, over 1611903.02 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:44,583 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8963, 1.3841, 6.0628, 2.0948, 5.4078, 5.1272, 5.5627, 5.4423], + device='cuda:0'), covar=tensor([0.0552, 0.5445, 0.0414, 0.4368, 0.1186, 0.0918, 0.0576, 0.0625], + device='cuda:0'), in_proj_covar=tensor([0.0688, 0.0667, 0.0748, 0.0659, 0.0747, 0.0637, 0.0643, 0.0724], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:35:06,599 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:35:10,669 INFO [train.py:901] (0/4) Epoch 29, batch 5950, loss[loss=0.2252, simple_loss=0.3077, pruned_loss=0.07138, over 8105.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2791, pruned_loss=0.0553, over 1611126.03 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:26,282 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 02:35:46,611 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.503e+02 2.837e+02 3.700e+02 9.228e+02, threshold=5.675e+02, percent-clipped=4.0 +2023-02-09 02:35:47,368 INFO [train.py:901] (0/4) Epoch 29, batch 6000, loss[loss=0.191, simple_loss=0.2784, pruned_loss=0.05175, over 8461.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2792, pruned_loss=0.05519, over 1611239.32 frames. ], batch size: 27, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:47,369 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 02:36:01,206 INFO [train.py:935] (0/4) Epoch 29, validation: loss=0.1708, simple_loss=0.2701, pruned_loss=0.03577, over 944034.00 frames. +2023-02-09 02:36:01,207 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6641MB +2023-02-09 02:36:11,926 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8985, 1.5618, 5.9886, 2.2296, 5.4040, 4.9144, 5.4415, 5.3674], + device='cuda:0'), covar=tensor([0.0437, 0.5182, 0.0448, 0.3955, 0.1056, 0.1077, 0.0529, 0.0551], + device='cuda:0'), in_proj_covar=tensor([0.0687, 0.0666, 0.0747, 0.0658, 0.0745, 0.0637, 0.0641, 0.0724], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:36:21,626 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9643, 1.5858, 2.8302, 1.4927, 2.2298, 3.0208, 3.1826, 2.6053], + device='cuda:0'), covar=tensor([0.1058, 0.1584, 0.0359, 0.2044, 0.0930, 0.0300, 0.0611, 0.0543], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0328, 0.0297, 0.0326, 0.0326, 0.0279, 0.0447, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:36:37,560 INFO [train.py:901] (0/4) Epoch 29, batch 6050, loss[loss=0.1926, simple_loss=0.284, pruned_loss=0.05064, over 8664.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2792, pruned_loss=0.05528, over 1614339.02 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:36:44,021 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:36:57,858 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232399.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:02,410 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232405.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:12,732 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.608e+02 3.073e+02 4.031e+02 7.869e+02, threshold=6.145e+02, percent-clipped=3.0 +2023-02-09 02:37:13,453 INFO [train.py:901] (0/4) Epoch 29, batch 6100, loss[loss=0.1855, simple_loss=0.2781, pruned_loss=0.04646, over 8185.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2791, pruned_loss=0.05502, over 1612608.25 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:37:27,130 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 02:37:50,003 INFO [train.py:901] (0/4) Epoch 29, batch 6150, loss[loss=0.1788, simple_loss=0.2769, pruned_loss=0.04032, over 8468.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2802, pruned_loss=0.05561, over 1617715.54 frames. ], batch size: 27, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:00,015 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8321, 1.7823, 2.3212, 1.4778, 1.4698, 2.2391, 0.5455, 1.4776], + device='cuda:0'), covar=tensor([0.1389, 0.1026, 0.0289, 0.0891, 0.1963, 0.0496, 0.1514, 0.1068], + device='cuda:0'), in_proj_covar=tensor([0.0203, 0.0205, 0.0137, 0.0224, 0.0279, 0.0149, 0.0172, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 02:38:06,448 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232494.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:25,294 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.548e+02 2.901e+02 3.636e+02 6.365e+02, threshold=5.801e+02, percent-clipped=1.0 +2023-02-09 02:38:25,874 INFO [train.py:901] (0/4) Epoch 29, batch 6200, loss[loss=0.2931, simple_loss=0.3529, pruned_loss=0.1167, over 8107.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2811, pruned_loss=0.05603, over 1613681.94 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:36,674 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232536.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:53,542 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.6834, 2.4804, 3.1373, 2.6399, 3.0118, 2.6251, 2.6283, 2.3612], + device='cuda:0'), covar=tensor([0.4261, 0.4103, 0.1884, 0.3291, 0.2402, 0.2799, 0.1634, 0.4422], + device='cuda:0'), in_proj_covar=tensor([0.0974, 0.1036, 0.0843, 0.1005, 0.1030, 0.0939, 0.0779, 0.0857], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:39:02,789 INFO [train.py:901] (0/4) Epoch 29, batch 6250, loss[loss=0.2336, simple_loss=0.3014, pruned_loss=0.08291, over 6832.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2802, pruned_loss=0.05573, over 1611208.02 frames. ], batch size: 71, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:07,150 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1018, 1.5152, 1.7611, 1.4383, 1.0908, 1.4858, 1.9594, 1.9014], + device='cuda:0'), covar=tensor([0.0553, 0.1243, 0.1600, 0.1465, 0.0590, 0.1496, 0.0700, 0.0588], + device='cuda:0'), in_proj_covar=tensor([0.0100, 0.0155, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 02:39:16,237 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4373, 1.6263, 4.5153, 2.0979, 2.5329, 5.1199, 5.2045, 4.3642], + device='cuda:0'), covar=tensor([0.1184, 0.2015, 0.0260, 0.1948, 0.1136, 0.0171, 0.0558, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0326, 0.0325, 0.0278, 0.0445, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:39:23,936 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7354, 1.3364, 2.8664, 1.4242, 2.2266, 3.0868, 3.2614, 2.6324], + device='cuda:0'), covar=tensor([0.1256, 0.1814, 0.0401, 0.2243, 0.0933, 0.0321, 0.0748, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0326, 0.0295, 0.0326, 0.0325, 0.0278, 0.0445, 0.0309], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:39:30,142 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232609.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:39:37,831 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.332e+02 2.762e+02 3.491e+02 8.673e+02, threshold=5.523e+02, percent-clipped=4.0 +2023-02-09 02:39:39,195 INFO [train.py:901] (0/4) Epoch 29, batch 6300, loss[loss=0.2006, simple_loss=0.2929, pruned_loss=0.05414, over 8460.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2805, pruned_loss=0.05572, over 1615247.96 frames. ], batch size: 27, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:56,315 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9437, 1.7693, 2.2406, 1.8796, 2.2462, 2.0471, 1.9275, 1.1763], + device='cuda:0'), covar=tensor([0.6623, 0.5491, 0.2343, 0.4522, 0.2864, 0.3611, 0.2213, 0.5940], + device='cuda:0'), in_proj_covar=tensor([0.0974, 0.1036, 0.0844, 0.1006, 0.1030, 0.0940, 0.0780, 0.0858], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:40:12,366 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6234, 1.9754, 2.8556, 1.5141, 2.0966, 2.0544, 1.7364, 2.2115], + device='cuda:0'), covar=tensor([0.2121, 0.2827, 0.1104, 0.4944, 0.2318, 0.3426, 0.2661, 0.2441], + device='cuda:0'), in_proj_covar=tensor([0.0544, 0.0641, 0.0565, 0.0674, 0.0667, 0.0614, 0.0569, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:40:14,953 INFO [train.py:901] (0/4) Epoch 29, batch 6350, loss[loss=0.185, simple_loss=0.2755, pruned_loss=0.04729, over 8581.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2798, pruned_loss=0.05568, over 1614151.32 frames. ], batch size: 49, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:50,679 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.465e+02 3.018e+02 3.778e+02 1.284e+03, threshold=6.036e+02, percent-clipped=3.0 +2023-02-09 02:40:51,328 INFO [train.py:901] (0/4) Epoch 29, batch 6400, loss[loss=0.2403, simple_loss=0.3069, pruned_loss=0.08691, over 6730.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2801, pruned_loss=0.05577, over 1611370.33 frames. ], batch size: 72, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:53,617 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232724.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:08,038 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232743.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:28,304 INFO [train.py:901] (0/4) Epoch 29, batch 6450, loss[loss=0.1535, simple_loss=0.2384, pruned_loss=0.03429, over 7448.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2805, pruned_loss=0.05575, over 1615133.99 frames. ], batch size: 17, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:05,017 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.358e+02 3.084e+02 4.266e+02 9.590e+02, threshold=6.168e+02, percent-clipped=5.0 +2023-02-09 02:42:05,757 INFO [train.py:901] (0/4) Epoch 29, batch 6500, loss[loss=0.1683, simple_loss=0.247, pruned_loss=0.04475, over 7662.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2794, pruned_loss=0.05528, over 1611046.93 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:17,434 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:24,002 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-09 02:42:24,518 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6978, 2.5387, 1.7848, 2.3300, 2.3528, 1.6027, 2.2563, 2.1677], + device='cuda:0'), covar=tensor([0.1570, 0.0475, 0.1430, 0.0753, 0.0696, 0.1683, 0.1039, 0.1103], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0246, 0.0347, 0.0316, 0.0303, 0.0351, 0.0352, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 02:42:31,256 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232858.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:31,462 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-09 02:42:40,789 INFO [train.py:901] (0/4) Epoch 29, batch 6550, loss[loss=0.2322, simple_loss=0.3147, pruned_loss=0.07484, over 8550.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.05572, over 1614820.02 frames. ], batch size: 39, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:47,141 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:47,799 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 02:43:06,958 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1684, 3.4627, 2.2495, 2.9697, 2.8099, 2.0810, 2.7673, 3.0802], + device='cuda:0'), covar=tensor([0.1832, 0.0449, 0.1261, 0.0771, 0.0780, 0.1577, 0.1105, 0.1121], + device='cuda:0'), in_proj_covar=tensor([0.0359, 0.0247, 0.0348, 0.0317, 0.0304, 0.0352, 0.0353, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 02:43:08,169 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:43:16,623 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.398e+02 2.846e+02 3.696e+02 7.042e+02, threshold=5.692e+02, percent-clipped=2.0 +2023-02-09 02:43:17,344 INFO [train.py:901] (0/4) Epoch 29, batch 6600, loss[loss=0.2123, simple_loss=0.3004, pruned_loss=0.06215, over 8513.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2794, pruned_loss=0.05533, over 1616344.77 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:40,641 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:43:52,846 INFO [train.py:901] (0/4) Epoch 29, batch 6650, loss[loss=0.1813, simple_loss=0.2672, pruned_loss=0.04769, over 7236.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.279, pruned_loss=0.0551, over 1612117.80 frames. ], batch size: 16, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:59,988 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232980.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:10,273 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232995.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:17,502 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:19,561 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233008.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:28,483 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.388e+02 2.820e+02 3.481e+02 6.998e+02, threshold=5.640e+02, percent-clipped=2.0 +2023-02-09 02:44:28,503 INFO [train.py:901] (0/4) Epoch 29, batch 6700, loss[loss=0.2007, simple_loss=0.2792, pruned_loss=0.06109, over 7974.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2793, pruned_loss=0.05531, over 1612187.66 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:44:40,632 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:55,145 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8108, 1.8096, 2.8235, 2.1158, 2.5325, 1.8888, 1.6550, 1.4097], + device='cuda:0'), covar=tensor([0.8017, 0.6309, 0.2340, 0.4629, 0.3372, 0.4954, 0.3270, 0.6080], + device='cuda:0'), in_proj_covar=tensor([0.0971, 0.1034, 0.0842, 0.1003, 0.1027, 0.0939, 0.0777, 0.0856], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:45:06,286 INFO [train.py:901] (0/4) Epoch 29, batch 6750, loss[loss=0.1714, simple_loss=0.2617, pruned_loss=0.04053, over 7659.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2793, pruned_loss=0.05551, over 1611923.77 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:30,771 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 02:45:38,442 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233114.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:45:43,345 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.536e+02 3.042e+02 3.988e+02 8.675e+02, threshold=6.084e+02, percent-clipped=8.0 +2023-02-09 02:45:43,372 INFO [train.py:901] (0/4) Epoch 29, batch 6800, loss[loss=0.2106, simple_loss=0.29, pruned_loss=0.0656, over 8657.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2803, pruned_loss=0.05599, over 1612065.93 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:56,017 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:18,739 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1090, 2.2280, 1.8983, 2.7773, 1.5059, 1.7463, 2.1091, 2.3079], + device='cuda:0'), covar=tensor([0.0710, 0.0769, 0.0841, 0.0456, 0.1009, 0.1261, 0.0830, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0215, 0.0203, 0.0248, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 02:46:19,939 INFO [train.py:901] (0/4) Epoch 29, batch 6850, loss[loss=0.161, simple_loss=0.2431, pruned_loss=0.03949, over 7431.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05631, over 1611796.98 frames. ], batch size: 17, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:46:22,005 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 02:46:34,886 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8695, 1.3047, 3.1834, 1.5968, 2.3883, 3.4260, 3.5257, 2.9721], + device='cuda:0'), covar=tensor([0.1117, 0.1905, 0.0306, 0.1954, 0.0949, 0.0253, 0.0597, 0.0499], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0328, 0.0297, 0.0327, 0.0328, 0.0280, 0.0449, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 02:46:45,157 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6611, 2.5672, 1.7731, 2.3940, 2.1715, 1.5173, 2.1325, 2.2996], + device='cuda:0'), covar=tensor([0.1594, 0.0432, 0.1457, 0.0701, 0.0849, 0.1839, 0.1067, 0.0936], + device='cuda:0'), in_proj_covar=tensor([0.0358, 0.0246, 0.0346, 0.0315, 0.0302, 0.0350, 0.0351, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 02:46:46,494 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233209.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:48,583 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2040, 1.9926, 2.4490, 2.0699, 2.4608, 2.3378, 2.1502, 1.3418], + device='cuda:0'), covar=tensor([0.5866, 0.5315, 0.2338, 0.4053, 0.2614, 0.3197, 0.1959, 0.5776], + device='cuda:0'), in_proj_covar=tensor([0.0975, 0.1037, 0.0844, 0.1010, 0.1030, 0.0943, 0.0780, 0.0859], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:46:55,106 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.470e+02 3.068e+02 3.817e+02 7.038e+02, threshold=6.136e+02, percent-clipped=5.0 +2023-02-09 02:46:55,128 INFO [train.py:901] (0/4) Epoch 29, batch 6900, loss[loss=0.1869, simple_loss=0.2802, pruned_loss=0.04678, over 8473.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05616, over 1608805.96 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:00,415 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2685, 2.1874, 2.6720, 2.2348, 2.7323, 2.4309, 2.1829, 1.5836], + device='cuda:0'), covar=tensor([0.5902, 0.5219, 0.2310, 0.4312, 0.2792, 0.3090, 0.1997, 0.5656], + device='cuda:0'), in_proj_covar=tensor([0.0974, 0.1037, 0.0844, 0.1009, 0.1030, 0.0943, 0.0779, 0.0858], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:47:04,474 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233234.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:16,287 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:31,602 INFO [train.py:901] (0/4) Epoch 29, batch 6950, loss[loss=0.2078, simple_loss=0.2987, pruned_loss=0.05846, over 8446.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05654, over 1609964.06 frames. ], batch size: 27, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:33,688 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 02:47:35,282 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:35,910 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233277.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:07,355 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.388e+02 2.860e+02 3.725e+02 6.106e+02, threshold=5.720e+02, percent-clipped=0.0 +2023-02-09 02:48:07,382 INFO [train.py:901] (0/4) Epoch 29, batch 7000, loss[loss=0.1918, simple_loss=0.2676, pruned_loss=0.058, over 8234.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.281, pruned_loss=0.05687, over 1611101.31 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:30,364 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233352.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:44,316 INFO [train.py:901] (0/4) Epoch 29, batch 7050, loss[loss=0.2151, simple_loss=0.289, pruned_loss=0.07055, over 8128.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.05651, over 1610248.08 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:51,048 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:21,590 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6380, 2.1303, 3.2595, 1.4988, 2.5805, 2.0586, 1.7797, 2.5283], + device='cuda:0'), covar=tensor([0.2020, 0.2825, 0.0941, 0.5056, 0.1923, 0.3437, 0.2642, 0.2416], + device='cuda:0'), in_proj_covar=tensor([0.0543, 0.0641, 0.0566, 0.0676, 0.0664, 0.0615, 0.0568, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:49:22,018 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.400e+02 3.088e+02 3.796e+02 6.683e+02, threshold=6.176e+02, percent-clipped=2.0 +2023-02-09 02:49:22,039 INFO [train.py:901] (0/4) Epoch 29, batch 7100, loss[loss=0.1762, simple_loss=0.268, pruned_loss=0.04224, over 8367.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05681, over 1611682.62 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:49:54,761 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233467.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:57,422 INFO [train.py:901] (0/4) Epoch 29, batch 7150, loss[loss=0.1942, simple_loss=0.2743, pruned_loss=0.05704, over 8084.00 frames. ], tot_loss[loss=0.198, simple_loss=0.282, pruned_loss=0.05698, over 1611316.74 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:50:09,678 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9558, 2.0340, 1.7538, 2.5798, 1.2078, 1.5703, 1.8853, 2.0208], + device='cuda:0'), covar=tensor([0.0662, 0.0787, 0.0877, 0.0377, 0.1039, 0.1229, 0.0817, 0.0790], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0214, 0.0202, 0.0246, 0.0249, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 02:50:14,767 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:50:23,099 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.0871, 2.9746, 2.8020, 1.6875, 2.7092, 2.8329, 2.7283, 2.7069], + device='cuda:0'), covar=tensor([0.1212, 0.0877, 0.1226, 0.4502, 0.1196, 0.1345, 0.1726, 0.1139], + device='cuda:0'), in_proj_covar=tensor([0.0549, 0.0462, 0.0454, 0.0563, 0.0446, 0.0470, 0.0445, 0.0414], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:50:23,183 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1792, 1.3147, 1.5209, 1.2748, 0.8641, 1.3342, 1.2380, 1.0709], + device='cuda:0'), covar=tensor([0.0664, 0.1218, 0.1603, 0.1419, 0.0573, 0.1419, 0.0698, 0.0683], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 02:50:34,193 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.385e+02 2.900e+02 3.377e+02 5.605e+02, threshold=5.800e+02, percent-clipped=0.0 +2023-02-09 02:50:34,214 INFO [train.py:901] (0/4) Epoch 29, batch 7200, loss[loss=0.1804, simple_loss=0.2637, pruned_loss=0.04854, over 8086.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2805, pruned_loss=0.0565, over 1613352.73 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:10,566 INFO [train.py:901] (0/4) Epoch 29, batch 7250, loss[loss=0.2027, simple_loss=0.289, pruned_loss=0.05822, over 8025.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2813, pruned_loss=0.05646, over 1616070.20 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:11,761 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-09 02:51:36,858 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8373, 2.3145, 3.7364, 1.8545, 1.9429, 3.6822, 0.7785, 2.1054], + device='cuda:0'), covar=tensor([0.1255, 0.1081, 0.0202, 0.1577, 0.2131, 0.0261, 0.1958, 0.1315], + device='cuda:0'), in_proj_covar=tensor([0.0204, 0.0207, 0.0138, 0.0227, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 02:51:46,283 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.384e+02 2.959e+02 3.398e+02 1.041e+03, threshold=5.918e+02, percent-clipped=4.0 +2023-02-09 02:51:46,303 INFO [train.py:901] (0/4) Epoch 29, batch 7300, loss[loss=0.1774, simple_loss=0.2607, pruned_loss=0.04699, over 7714.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.281, pruned_loss=0.05662, over 1615240.84 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:46,371 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233621.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:13,025 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233657.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:22,374 INFO [train.py:901] (0/4) Epoch 29, batch 7350, loss[loss=0.1858, simple_loss=0.2756, pruned_loss=0.04805, over 7971.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2798, pruned_loss=0.05625, over 1613875.80 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:27,578 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4210, 1.6763, 2.1933, 1.3726, 1.5647, 1.6876, 1.5445, 1.6111], + device='cuda:0'), covar=tensor([0.2107, 0.2678, 0.1037, 0.4803, 0.2158, 0.3642, 0.2574, 0.2363], + device='cuda:0'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0678, 0.0667, 0.0619, 0.0570, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:52:28,055 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 02:52:42,955 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0339, 1.8179, 6.2189, 2.2718, 5.6506, 5.1838, 5.7236, 5.6503], + device='cuda:0'), covar=tensor([0.0476, 0.4696, 0.0321, 0.4225, 0.1005, 0.0938, 0.0490, 0.0527], + device='cuda:0'), in_proj_covar=tensor([0.0695, 0.0671, 0.0756, 0.0663, 0.0755, 0.0642, 0.0650, 0.0733], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:52:48,257 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 02:52:58,118 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.663e+02 3.074e+02 4.100e+02 9.512e+02, threshold=6.147e+02, percent-clipped=7.0 +2023-02-09 02:52:58,138 INFO [train.py:901] (0/4) Epoch 29, batch 7400, loss[loss=0.1905, simple_loss=0.2707, pruned_loss=0.05514, over 7971.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05623, over 1610530.89 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:59,689 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:03,233 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:08,960 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233736.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:18,939 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233748.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:21,107 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:32,423 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 02:53:36,092 INFO [train.py:901] (0/4) Epoch 29, batch 7450, loss[loss=0.2089, simple_loss=0.3072, pruned_loss=0.05528, over 8106.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2799, pruned_loss=0.05658, over 1605796.04 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:53:39,820 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:42,697 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7129, 1.7525, 2.0571, 1.7955, 1.1489, 1.7548, 2.2841, 1.9774], + device='cuda:0'), covar=tensor([0.0514, 0.1224, 0.1613, 0.1374, 0.0594, 0.1446, 0.0617, 0.0627], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0115, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 02:54:12,667 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.228e+02 2.637e+02 3.402e+02 7.399e+02, threshold=5.273e+02, percent-clipped=2.0 +2023-02-09 02:54:12,687 INFO [train.py:901] (0/4) Epoch 29, batch 7500, loss[loss=0.2013, simple_loss=0.2905, pruned_loss=0.05606, over 8467.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05673, over 1608222.29 frames. ], batch size: 25, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:54:48,750 INFO [train.py:901] (0/4) Epoch 29, batch 7550, loss[loss=0.1721, simple_loss=0.2625, pruned_loss=0.04089, over 8039.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.0565, over 1606517.10 frames. ], batch size: 22, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:00,149 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0178, 2.1833, 1.8192, 2.7821, 1.3613, 1.6806, 2.0979, 2.1963], + device='cuda:0'), covar=tensor([0.0702, 0.0789, 0.0869, 0.0324, 0.1055, 0.1221, 0.0734, 0.0710], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0213, 0.0202, 0.0246, 0.0249, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 02:55:24,356 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.424e+02 2.935e+02 3.534e+02 7.288e+02, threshold=5.870e+02, percent-clipped=3.0 +2023-02-09 02:55:24,377 INFO [train.py:901] (0/4) Epoch 29, batch 7600, loss[loss=0.2332, simple_loss=0.3064, pruned_loss=0.08003, over 7135.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05633, over 1611218.29 frames. ], batch size: 71, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:24,646 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3400, 2.2195, 3.2535, 2.4921, 2.9282, 2.1850, 2.1814, 2.2245], + device='cuda:0'), covar=tensor([0.5841, 0.5798, 0.2265, 0.4531, 0.3170, 0.4357, 0.2550, 0.5149], + device='cuda:0'), in_proj_covar=tensor([0.0974, 0.1037, 0.0845, 0.1009, 0.1033, 0.0945, 0.0781, 0.0859], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 02:56:01,032 INFO [train.py:901] (0/4) Epoch 29, batch 7650, loss[loss=0.1686, simple_loss=0.2547, pruned_loss=0.04129, over 7918.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2803, pruned_loss=0.05587, over 1612928.38 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:12,976 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8693, 1.4151, 4.0154, 1.4108, 3.6252, 3.3274, 3.6614, 3.5706], + device='cuda:0'), covar=tensor([0.0646, 0.4566, 0.0606, 0.4501, 0.1052, 0.1074, 0.0661, 0.0704], + device='cuda:0'), in_proj_covar=tensor([0.0693, 0.0670, 0.0754, 0.0662, 0.0751, 0.0641, 0.0649, 0.0728], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 02:56:16,585 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233992.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:22,137 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-234000.pt +2023-02-09 02:56:23,675 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234001.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:35,521 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234017.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:38,037 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.208e+02 2.731e+02 3.331e+02 6.993e+02, threshold=5.462e+02, percent-clipped=2.0 +2023-02-09 02:56:38,057 INFO [train.py:901] (0/4) Epoch 29, batch 7700, loss[loss=0.1589, simple_loss=0.246, pruned_loss=0.0359, over 7451.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2811, pruned_loss=0.05596, over 1611192.28 frames. ], batch size: 17, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:49,496 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 02:56:53,148 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:12,864 INFO [train.py:901] (0/4) Epoch 29, batch 7750, loss[loss=0.1715, simple_loss=0.2449, pruned_loss=0.04905, over 5995.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.056, over 1610937.67 frames. ], batch size: 13, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:57:13,646 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234072.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:45,241 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:48,454 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.355e+02 2.809e+02 3.505e+02 7.382e+02, threshold=5.617e+02, percent-clipped=2.0 +2023-02-09 02:57:48,495 INFO [train.py:901] (0/4) Epoch 29, batch 7800, loss[loss=0.1906, simple_loss=0.2827, pruned_loss=0.04924, over 8288.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2804, pruned_loss=0.05611, over 1608957.17 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:24,432 INFO [train.py:901] (0/4) Epoch 29, batch 7850, loss[loss=0.1637, simple_loss=0.2528, pruned_loss=0.03729, over 7965.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2798, pruned_loss=0.05577, over 1607945.35 frames. ], batch size: 21, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:36,023 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234187.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:58,932 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:59,506 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.264e+02 2.745e+02 3.500e+02 8.048e+02, threshold=5.490e+02, percent-clipped=4.0 +2023-02-09 02:58:59,527 INFO [train.py:901] (0/4) Epoch 29, batch 7900, loss[loss=0.2015, simple_loss=0.2842, pruned_loss=0.0594, over 8661.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05592, over 1605850.28 frames. ], batch size: 34, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:19,152 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:59:34,070 INFO [train.py:901] (0/4) Epoch 29, batch 7950, loss[loss=0.2022, simple_loss=0.2935, pruned_loss=0.05548, over 8454.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.28, pruned_loss=0.05637, over 1608682.20 frames. ], batch size: 27, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:40,221 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.25 vs. limit=5.0 +2023-02-09 03:00:10,399 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.366e+02 2.676e+02 3.633e+02 8.832e+02, threshold=5.352e+02, percent-clipped=6.0 +2023-02-09 03:00:10,419 INFO [train.py:901] (0/4) Epoch 29, batch 8000, loss[loss=0.1868, simple_loss=0.2726, pruned_loss=0.05044, over 8197.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2804, pruned_loss=0.05648, over 1610456.57 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:44,627 INFO [train.py:901] (0/4) Epoch 29, batch 8050, loss[loss=0.2566, simple_loss=0.3258, pruned_loss=0.09373, over 7192.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2789, pruned_loss=0.05623, over 1587524.57 frames. ], batch size: 73, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:45,534 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:00:55,734 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234387.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:02,586 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234397.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:07,577 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-29.pt +2023-02-09 03:01:20,130 WARNING [train.py:1067] (0/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 03:01:23,764 INFO [train.py:901] (0/4) Epoch 30, batch 0, loss[loss=0.2073, simple_loss=0.2937, pruned_loss=0.06046, over 8037.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2937, pruned_loss=0.06046, over 8037.00 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:01:23,764 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 03:01:35,943 INFO [train.py:935] (0/4) Epoch 30, validation: loss=0.1704, simple_loss=0.27, pruned_loss=0.03537, over 944034.00 frames. +2023-02-09 03:01:35,944 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6641MB +2023-02-09 03:01:47,823 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.332e+02 2.743e+02 3.464e+02 7.498e+02, threshold=5.486e+02, percent-clipped=3.0 +2023-02-09 03:01:51,469 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9942, 2.3435, 1.8958, 2.8816, 1.4111, 1.6784, 2.0272, 2.2555], + device='cuda:0'), covar=tensor([0.0718, 0.0695, 0.0856, 0.0328, 0.1090, 0.1231, 0.0846, 0.0751], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0246, 0.0214, 0.0203, 0.0247, 0.0251, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:01:52,029 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 03:02:04,490 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234443.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:12,501 INFO [train.py:901] (0/4) Epoch 30, batch 50, loss[loss=0.1856, simple_loss=0.266, pruned_loss=0.05259, over 8090.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2799, pruned_loss=0.05627, over 362826.06 frames. ], batch size: 21, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:23,117 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234468.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:25,364 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3674, 1.5841, 1.5956, 1.0792, 1.6665, 1.2743, 0.2578, 1.5735], + device='cuda:0'), covar=tensor([0.0579, 0.0468, 0.0412, 0.0612, 0.0491, 0.1130, 0.1053, 0.0340], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0418, 0.0373, 0.0466, 0.0401, 0.0558, 0.0407, 0.0445], + device='cuda:0'), out_proj_covar=tensor([1.2634e-04, 1.0819e-04, 9.7132e-05, 1.2162e-04, 1.0500e-04, 1.5531e-04, + 1.0858e-04, 1.1626e-04], device='cuda:0') +2023-02-09 03:02:28,117 WARNING [train.py:1067] (0/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 03:02:49,607 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234502.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:50,908 INFO [train.py:901] (0/4) Epoch 30, batch 100, loss[loss=0.1854, simple_loss=0.2796, pruned_loss=0.04561, over 8505.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2808, pruned_loss=0.05533, over 643515.93 frames. ], batch size: 28, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:54,593 WARNING [train.py:1067] (0/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 03:03:03,349 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.294e+02 2.794e+02 3.449e+02 7.855e+02, threshold=5.588e+02, percent-clipped=7.0 +2023-02-09 03:03:28,104 INFO [train.py:901] (0/4) Epoch 30, batch 150, loss[loss=0.2164, simple_loss=0.3061, pruned_loss=0.06339, over 8495.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2815, pruned_loss=0.05617, over 862624.66 frames. ], batch size: 26, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:03:29,553 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-09 03:03:35,358 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:03:56,912 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234593.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:04:04,596 INFO [train.py:901] (0/4) Epoch 30, batch 200, loss[loss=0.2299, simple_loss=0.3148, pruned_loss=0.0725, over 8601.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2822, pruned_loss=0.05598, over 1032107.50 frames. ], batch size: 34, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:07,725 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-09 03:04:16,947 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.329e+02 2.797e+02 3.759e+02 1.341e+03, threshold=5.593e+02, percent-clipped=8.0 +2023-02-09 03:04:40,212 INFO [train.py:901] (0/4) Epoch 30, batch 250, loss[loss=0.191, simple_loss=0.2654, pruned_loss=0.05833, over 7977.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2809, pruned_loss=0.05543, over 1161714.73 frames. ], batch size: 21, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:48,444 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 03:04:50,788 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7406, 1.5806, 2.4708, 1.3913, 1.2110, 2.3883, 0.4361, 1.4633], + device='cuda:0'), covar=tensor([0.1579, 0.1298, 0.0327, 0.1261, 0.2591, 0.0420, 0.1909, 0.1376], + device='cuda:0'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0227, 0.0280, 0.0149, 0.0174, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 03:04:57,629 WARNING [train.py:1067] (0/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 03:04:58,551 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234679.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:04,648 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234687.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:16,625 INFO [train.py:901] (0/4) Epoch 30, batch 300, loss[loss=0.1753, simple_loss=0.2529, pruned_loss=0.04884, over 7714.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2822, pruned_loss=0.05646, over 1267197.18 frames. ], batch size: 18, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:19,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234708.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:29,382 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.381e+02 2.888e+02 3.640e+02 7.253e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-09 03:05:45,734 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:46,523 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8849, 2.1276, 2.1313, 1.5118, 2.2766, 1.6825, 0.6870, 2.1398], + device='cuda:0'), covar=tensor([0.0623, 0.0365, 0.0374, 0.0656, 0.0497, 0.0976, 0.1016, 0.0339], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0416, 0.0372, 0.0465, 0.0400, 0.0555, 0.0405, 0.0443], + device='cuda:0'), out_proj_covar=tensor([1.2618e-04, 1.0768e-04, 9.6869e-05, 1.2151e-04, 1.0482e-04, 1.5420e-04, + 1.0793e-04, 1.1575e-04], device='cuda:0') +2023-02-09 03:05:52,647 INFO [train.py:901] (0/4) Epoch 30, batch 350, loss[loss=0.2019, simple_loss=0.2934, pruned_loss=0.0552, over 8354.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05701, over 1340121.49 frames. ], batch size: 24, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:55,588 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:14,224 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234783.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:27,364 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234800.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:30,086 INFO [train.py:901] (0/4) Epoch 30, batch 400, loss[loss=0.172, simple_loss=0.261, pruned_loss=0.04149, over 8129.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.282, pruned_loss=0.05721, over 1402342.04 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:06:42,231 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.513e+02 2.926e+02 3.697e+02 1.204e+03, threshold=5.852e+02, percent-clipped=7.0 +2023-02-09 03:07:06,512 INFO [train.py:901] (0/4) Epoch 30, batch 450, loss[loss=0.2098, simple_loss=0.2947, pruned_loss=0.06246, over 8235.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2828, pruned_loss=0.05726, over 1449261.75 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:07:38,964 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:07:42,332 INFO [train.py:901] (0/4) Epoch 30, batch 500, loss[loss=0.176, simple_loss=0.2614, pruned_loss=0.04532, over 8109.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2822, pruned_loss=0.057, over 1490830.31 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:07:54,773 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.393e+02 2.948e+02 3.833e+02 6.284e+02, threshold=5.896e+02, percent-clipped=1.0 +2023-02-09 03:08:04,868 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234935.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:18,296 INFO [train.py:901] (0/4) Epoch 30, batch 550, loss[loss=0.1888, simple_loss=0.2802, pruned_loss=0.04866, over 8030.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.05719, over 1518835.36 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:08:22,859 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234960.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:26,380 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234964.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:34,795 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234976.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:36,247 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234978.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:44,125 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234989.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:54,540 INFO [train.py:901] (0/4) Epoch 30, batch 600, loss[loss=0.1781, simple_loss=0.2587, pruned_loss=0.04879, over 7530.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05754, over 1541779.07 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:06,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.485e+02 2.961e+02 3.544e+02 6.861e+02, threshold=5.922e+02, percent-clipped=1.0 +2023-02-09 03:09:10,820 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 03:09:13,646 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235031.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:09:29,975 INFO [train.py:901] (0/4) Epoch 30, batch 650, loss[loss=0.2099, simple_loss=0.2892, pruned_loss=0.06531, over 8676.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05663, over 1554121.66 frames. ], batch size: 39, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:54,561 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:05,667 INFO [train.py:901] (0/4) Epoch 30, batch 700, loss[loss=0.1978, simple_loss=0.2859, pruned_loss=0.05491, over 8445.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05693, over 1565056.16 frames. ], batch size: 27, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:12,457 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-09 03:10:17,681 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.477e+02 3.055e+02 3.959e+02 7.285e+02, threshold=6.109e+02, percent-clipped=6.0 +2023-02-09 03:10:33,428 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:34,922 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235146.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:41,067 INFO [train.py:901] (0/4) Epoch 30, batch 750, loss[loss=0.1833, simple_loss=0.2652, pruned_loss=0.05067, over 8068.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.05689, over 1581164.82 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:59,030 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 03:10:59,130 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8029, 3.7904, 3.4769, 1.9211, 3.3639, 3.6310, 3.3456, 3.4628], + device='cuda:0'), covar=tensor([0.0924, 0.0681, 0.1081, 0.4458, 0.1032, 0.0976, 0.1442, 0.0870], + device='cuda:0'), in_proj_covar=tensor([0.0551, 0.0461, 0.0451, 0.0562, 0.0445, 0.0470, 0.0446, 0.0414], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:11:08,059 WARNING [train.py:1067] (0/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 03:11:17,105 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:17,666 INFO [train.py:901] (0/4) Epoch 30, batch 800, loss[loss=0.1642, simple_loss=0.2657, pruned_loss=0.03138, over 8477.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.05712, over 1586358.78 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:30,476 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.417e+02 2.836e+02 3.328e+02 8.160e+02, threshold=5.671e+02, percent-clipped=2.0 +2023-02-09 03:11:46,746 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:53,481 INFO [train.py:901] (0/4) Epoch 30, batch 850, loss[loss=0.1765, simple_loss=0.2616, pruned_loss=0.04569, over 8447.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2808, pruned_loss=0.05665, over 1592312.02 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:57,047 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:07,386 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235273.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:20,601 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4006, 4.4626, 4.0228, 1.9658, 3.9069, 4.1098, 4.0051, 3.9094], + device='cuda:0'), covar=tensor([0.0909, 0.0628, 0.1258, 0.4720, 0.1050, 0.0941, 0.1362, 0.0753], + device='cuda:0'), in_proj_covar=tensor([0.0547, 0.0458, 0.0448, 0.0559, 0.0443, 0.0467, 0.0443, 0.0411], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:12:25,088 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-09 03:12:29,564 INFO [train.py:901] (0/4) Epoch 30, batch 900, loss[loss=0.1756, simple_loss=0.2462, pruned_loss=0.05247, over 7700.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2795, pruned_loss=0.056, over 1595774.43 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:12:41,045 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:41,595 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.438e+02 3.006e+02 3.865e+02 6.238e+02, threshold=6.012e+02, percent-clipped=6.0 +2023-02-09 03:12:42,348 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235322.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:05,247 INFO [train.py:901] (0/4) Epoch 30, batch 950, loss[loss=0.171, simple_loss=0.2557, pruned_loss=0.04315, over 7803.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2793, pruned_loss=0.05583, over 1599378.95 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:08,753 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235359.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:21,825 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235378.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:13:32,455 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5605, 2.4003, 1.8674, 2.2590, 2.0533, 1.6181, 1.9828, 2.0866], + device='cuda:0'), covar=tensor([0.1565, 0.0465, 0.1280, 0.0626, 0.0822, 0.1757, 0.1118, 0.1038], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0243, 0.0344, 0.0314, 0.0302, 0.0349, 0.0350, 0.0321], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:13:32,976 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 03:13:39,419 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235402.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:40,150 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.5449, 2.4417, 3.1056, 2.6228, 3.1045, 2.6498, 2.5314, 2.0022], + device='cuda:0'), covar=tensor([0.5552, 0.5187, 0.2350, 0.4245, 0.2808, 0.3328, 0.1941, 0.6041], + device='cuda:0'), in_proj_covar=tensor([0.0978, 0.1045, 0.0853, 0.1015, 0.1038, 0.0949, 0.0782, 0.0865], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:13:40,609 INFO [train.py:901] (0/4) Epoch 30, batch 1000, loss[loss=0.1707, simple_loss=0.2535, pruned_loss=0.04399, over 7792.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2796, pruned_loss=0.05626, over 1602075.43 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:52,258 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.479e+02 3.055e+02 4.205e+02 7.814e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-09 03:13:56,469 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235427.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:02,538 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235435.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:03,948 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:08,428 WARNING [train.py:1067] (0/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 03:14:15,931 INFO [train.py:901] (0/4) Epoch 30, batch 1050, loss[loss=0.1591, simple_loss=0.2449, pruned_loss=0.03661, over 8075.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05653, over 1606725.03 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:14:19,405 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:21,148 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 03:14:36,648 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:41,816 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-09 03:14:50,593 INFO [train.py:901] (0/4) Epoch 30, batch 1100, loss[loss=0.1506, simple_loss=0.2313, pruned_loss=0.03493, over 7791.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05654, over 1610569.37 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:14:59,101 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235515.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:03,872 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.444e+02 3.135e+02 3.900e+02 6.752e+02, threshold=6.270e+02, percent-clipped=2.0 +2023-02-09 03:15:04,840 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7765, 2.0389, 2.0618, 1.3847, 2.1555, 1.6180, 0.5849, 1.9683], + device='cuda:0'), covar=tensor([0.0674, 0.0452, 0.0398, 0.0683, 0.0504, 0.1057, 0.1118, 0.0321], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0421, 0.0375, 0.0470, 0.0404, 0.0561, 0.0410, 0.0448], + device='cuda:0'), out_proj_covar=tensor([1.2799e-04, 1.0897e-04, 9.7569e-05, 1.2288e-04, 1.0573e-04, 1.5611e-04, + 1.0930e-04, 1.1726e-04], device='cuda:0') +2023-02-09 03:15:09,972 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.32 vs. limit=5.0 +2023-02-09 03:15:12,933 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-09 03:15:17,351 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:26,820 INFO [train.py:901] (0/4) Epoch 30, batch 1150, loss[loss=0.2247, simple_loss=0.2992, pruned_loss=0.07514, over 8195.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2804, pruned_loss=0.05593, over 1617035.56 frames. ], batch size: 48, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:15:35,846 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 03:15:39,059 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.67 vs. limit=2.0 +2023-02-09 03:16:03,360 INFO [train.py:901] (0/4) Epoch 30, batch 1200, loss[loss=0.1983, simple_loss=0.2877, pruned_loss=0.05443, over 8378.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.0562, over 1618357.61 frames. ], batch size: 48, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:16:11,480 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:12,739 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235617.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:16,062 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.378e+02 2.945e+02 3.640e+02 8.540e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-09 03:16:30,043 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:32,813 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3656, 2.1501, 2.6754, 2.3381, 2.6840, 2.3647, 2.2672, 1.5579], + device='cuda:0'), covar=tensor([0.5541, 0.4734, 0.1961, 0.3552, 0.2332, 0.3175, 0.1937, 0.5086], + device='cuda:0'), in_proj_covar=tensor([0.0970, 0.1037, 0.0846, 0.1008, 0.1030, 0.0943, 0.0777, 0.0860], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:16:39,494 INFO [train.py:901] (0/4) Epoch 30, batch 1250, loss[loss=0.1926, simple_loss=0.2728, pruned_loss=0.05621, over 7239.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2819, pruned_loss=0.05668, over 1617900.65 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:06,201 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:07,630 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:10,373 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235697.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:15,251 INFO [train.py:901] (0/4) Epoch 30, batch 1300, loss[loss=0.1986, simple_loss=0.2974, pruned_loss=0.04992, over 8486.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2823, pruned_loss=0.05683, over 1619721.29 frames. ], batch size: 29, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:23,814 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235716.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:25,211 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:27,378 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2181, 2.1280, 2.6664, 2.2846, 2.6427, 2.3201, 2.1744, 1.5727], + device='cuda:0'), covar=tensor([0.6022, 0.5076, 0.2208, 0.3950, 0.2786, 0.3349, 0.2016, 0.5665], + device='cuda:0'), in_proj_covar=tensor([0.0964, 0.1030, 0.0841, 0.1002, 0.1025, 0.0939, 0.0773, 0.0855], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:17:27,735 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.444e+02 2.774e+02 3.314e+02 6.214e+02, threshold=5.548e+02, percent-clipped=2.0 +2023-02-09 03:17:27,829 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235722.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:17:34,617 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235732.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:35,440 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-09 03:17:50,119 INFO [train.py:901] (0/4) Epoch 30, batch 1350, loss[loss=0.1677, simple_loss=0.2434, pruned_loss=0.04593, over 7549.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2828, pruned_loss=0.05682, over 1621018.32 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:26,589 INFO [train.py:901] (0/4) Epoch 30, batch 1400, loss[loss=0.1669, simple_loss=0.2449, pruned_loss=0.04442, over 7660.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.0563, over 1618278.91 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:30,253 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235809.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:18:39,093 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.338e+02 2.741e+02 3.583e+02 7.907e+02, threshold=5.482e+02, percent-clipped=6.0 +2023-02-09 03:18:49,570 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235837.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:19:00,849 INFO [train.py:901] (0/4) Epoch 30, batch 1450, loss[loss=0.1797, simple_loss=0.2774, pruned_loss=0.04097, over 8278.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2806, pruned_loss=0.05593, over 1616056.42 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:07,593 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 03:19:22,545 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-09 03:19:38,202 INFO [train.py:901] (0/4) Epoch 30, batch 1500, loss[loss=0.1915, simple_loss=0.2818, pruned_loss=0.05059, over 8435.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2809, pruned_loss=0.05601, over 1617228.40 frames. ], batch size: 27, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:42,088 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 03:19:51,220 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.306e+02 2.900e+02 3.560e+02 8.272e+02, threshold=5.801e+02, percent-clipped=7.0 +2023-02-09 03:19:56,742 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-09 03:20:14,193 INFO [train.py:901] (0/4) Epoch 30, batch 1550, loss[loss=0.1904, simple_loss=0.282, pruned_loss=0.04938, over 8646.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2814, pruned_loss=0.05597, over 1620647.57 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:17,116 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2851, 1.6362, 4.3037, 1.8736, 2.4658, 4.8549, 5.0367, 4.2033], + device='cuda:0'), covar=tensor([0.1123, 0.1985, 0.0280, 0.2017, 0.1256, 0.0197, 0.0552, 0.0587], + device='cuda:0'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0329, 0.0327, 0.0282, 0.0447, 0.0310], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 03:20:22,061 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 03:20:27,562 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8766, 1.8553, 2.5143, 1.5957, 1.4477, 2.5130, 0.4707, 1.5506], + device='cuda:0'), covar=tensor([0.1473, 0.1063, 0.0310, 0.1190, 0.2225, 0.0302, 0.1829, 0.1366], + device='cuda:0'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0226, 0.0280, 0.0149, 0.0175, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 03:20:38,887 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235988.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:20:47,546 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-236000.pt +2023-02-09 03:20:51,300 INFO [train.py:901] (0/4) Epoch 30, batch 1600, loss[loss=0.189, simple_loss=0.2697, pruned_loss=0.05422, over 8077.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2813, pruned_loss=0.05574, over 1621820.12 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:57,863 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:00,115 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2784, 2.0693, 1.6197, 1.9605, 1.7270, 1.4842, 1.6713, 1.7173], + device='cuda:0'), covar=tensor([0.1399, 0.0472, 0.1401, 0.0581, 0.0808, 0.1577, 0.0982, 0.0964], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0246, 0.0349, 0.0317, 0.0304, 0.0352, 0.0354, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:21:04,933 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.693e+02 3.134e+02 4.092e+02 8.333e+02, threshold=6.267e+02, percent-clipped=7.0 +2023-02-09 03:21:15,461 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:19,095 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236041.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:28,393 INFO [train.py:901] (0/4) Epoch 30, batch 1650, loss[loss=0.1616, simple_loss=0.2419, pruned_loss=0.04066, over 7452.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2807, pruned_loss=0.05559, over 1619205.66 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:21:56,809 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236093.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:22:04,824 INFO [train.py:901] (0/4) Epoch 30, batch 1700, loss[loss=0.1749, simple_loss=0.2515, pruned_loss=0.04917, over 7541.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2802, pruned_loss=0.05504, over 1619674.91 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:15,388 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236118.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:22:17,735 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.438e+02 2.804e+02 3.459e+02 5.840e+02, threshold=5.608e+02, percent-clipped=0.0 +2023-02-09 03:22:34,518 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.59 vs. limit=5.0 +2023-02-09 03:22:40,513 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236153.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:22:41,159 INFO [train.py:901] (0/4) Epoch 30, batch 1750, loss[loss=0.19, simple_loss=0.2806, pruned_loss=0.04972, over 8537.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2809, pruned_loss=0.05539, over 1623601.95 frames. ], batch size: 49, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:42,651 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236156.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:23:07,926 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1638, 2.0365, 2.4921, 2.1803, 2.5490, 2.2743, 2.1450, 1.5526], + device='cuda:0'), covar=tensor([0.5919, 0.5270, 0.2284, 0.4119, 0.2696, 0.3352, 0.2027, 0.5365], + device='cuda:0'), in_proj_covar=tensor([0.0972, 0.1037, 0.0848, 0.1008, 0.1032, 0.0946, 0.0778, 0.0860], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:23:16,218 INFO [train.py:901] (0/4) Epoch 30, batch 1800, loss[loss=0.2484, simple_loss=0.327, pruned_loss=0.08493, over 8506.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.282, pruned_loss=0.05615, over 1624405.25 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:23:29,387 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.503e+02 3.165e+02 3.852e+02 7.294e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-09 03:23:52,507 INFO [train.py:901] (0/4) Epoch 30, batch 1850, loss[loss=0.2371, simple_loss=0.3321, pruned_loss=0.07103, over 8249.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2812, pruned_loss=0.05629, over 1619653.95 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:03,974 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236268.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:24:28,813 INFO [train.py:901] (0/4) Epoch 30, batch 1900, loss[loss=0.1686, simple_loss=0.2388, pruned_loss=0.04919, over 7436.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.05616, over 1620042.86 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:41,170 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-09 03:24:41,427 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.325e+02 3.004e+02 3.832e+02 8.674e+02, threshold=6.008e+02, percent-clipped=3.0 +2023-02-09 03:25:01,722 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 03:25:05,199 INFO [train.py:901] (0/4) Epoch 30, batch 1950, loss[loss=0.1892, simple_loss=0.2793, pruned_loss=0.04955, over 8244.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2811, pruned_loss=0.05618, over 1617187.41 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:13,610 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 03:25:24,198 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:25,020 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5628, 1.7792, 1.8273, 1.1939, 1.8609, 1.4281, 0.4719, 1.7322], + device='cuda:0'), covar=tensor([0.0629, 0.0423, 0.0342, 0.0643, 0.0561, 0.0976, 0.1039, 0.0322], + device='cuda:0'), in_proj_covar=tensor([0.0477, 0.0418, 0.0373, 0.0465, 0.0400, 0.0555, 0.0405, 0.0442], + device='cuda:0'), out_proj_covar=tensor([1.2627e-04, 1.0814e-04, 9.7134e-05, 1.2130e-04, 1.0478e-04, 1.5435e-04, + 1.0794e-04, 1.1561e-04], device='cuda:0') +2023-02-09 03:25:25,042 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4645, 1.9320, 2.8471, 1.4609, 2.0689, 1.9098, 1.6111, 2.1767], + device='cuda:0'), covar=tensor([0.2133, 0.2703, 0.0985, 0.4923, 0.2147, 0.3563, 0.2628, 0.2339], + device='cuda:0'), in_proj_covar=tensor([0.0546, 0.0646, 0.0569, 0.0681, 0.0670, 0.0622, 0.0573, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:25:33,332 WARNING [train.py:1067] (0/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 03:25:41,050 INFO [train.py:901] (0/4) Epoch 30, batch 2000, loss[loss=0.1937, simple_loss=0.2997, pruned_loss=0.04389, over 8291.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2811, pruned_loss=0.05649, over 1613739.73 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:43,360 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2414, 2.0935, 2.6512, 2.2318, 2.6299, 2.3472, 2.1872, 1.6319], + device='cuda:0'), covar=tensor([0.5935, 0.5443, 0.2208, 0.4291, 0.2860, 0.3388, 0.2090, 0.5733], + device='cuda:0'), in_proj_covar=tensor([0.0970, 0.1036, 0.0846, 0.1007, 0.1032, 0.0947, 0.0778, 0.0857], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:25:46,893 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:53,562 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.406e+02 2.956e+02 3.756e+02 9.982e+02, threshold=5.913e+02, percent-clipped=8.0 +2023-02-09 03:26:04,554 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:16,760 INFO [train.py:901] (0/4) Epoch 30, batch 2050, loss[loss=0.1881, simple_loss=0.2679, pruned_loss=0.05419, over 8079.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05651, over 1612990.03 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:26:47,589 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:53,533 INFO [train.py:901] (0/4) Epoch 30, batch 2100, loss[loss=0.1941, simple_loss=0.277, pruned_loss=0.0556, over 8071.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2811, pruned_loss=0.05636, over 1617949.22 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:06,244 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.477e+02 3.089e+02 3.892e+02 8.089e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-09 03:27:07,847 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236524.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:25,249 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:28,498 INFO [train.py:901] (0/4) Epoch 30, batch 2150, loss[loss=0.1924, simple_loss=0.2883, pruned_loss=0.04822, over 8499.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2806, pruned_loss=0.05603, over 1616884.01 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:43,268 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8322, 5.9470, 5.2509, 2.5706, 5.2709, 5.6565, 5.2700, 5.5096], + device='cuda:0'), covar=tensor([0.0647, 0.0373, 0.0925, 0.4341, 0.0784, 0.0874, 0.1249, 0.0535], + device='cuda:0'), in_proj_covar=tensor([0.0551, 0.0461, 0.0450, 0.0564, 0.0446, 0.0473, 0.0449, 0.0413], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:27:57,013 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4373, 2.3682, 1.7829, 2.1054, 1.9725, 1.5737, 1.8140, 1.8930], + device='cuda:0'), covar=tensor([0.1506, 0.0406, 0.1184, 0.0617, 0.0772, 0.1504, 0.1053, 0.1066], + device='cuda:0'), in_proj_covar=tensor([0.0361, 0.0245, 0.0349, 0.0317, 0.0304, 0.0353, 0.0355, 0.0325], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:28:04,589 INFO [train.py:901] (0/4) Epoch 30, batch 2200, loss[loss=0.168, simple_loss=0.2365, pruned_loss=0.04972, over 7241.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2803, pruned_loss=0.05568, over 1611760.66 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:28:18,769 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.435e+02 2.816e+02 3.564e+02 9.413e+02, threshold=5.632e+02, percent-clipped=3.0 +2023-02-09 03:28:30,761 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.3249, 1.3216, 4.5482, 1.6845, 4.0209, 3.7824, 4.0983, 4.0139], + device='cuda:0'), covar=tensor([0.0805, 0.5347, 0.0633, 0.4716, 0.1310, 0.1136, 0.0724, 0.0796], + device='cuda:0'), in_proj_covar=tensor([0.0695, 0.0674, 0.0754, 0.0668, 0.0759, 0.0646, 0.0653, 0.0730], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:28:40,462 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0502, 2.2312, 1.7870, 2.8310, 1.4212, 1.5935, 2.1260, 2.2545], + device='cuda:0'), covar=tensor([0.0727, 0.0754, 0.0928, 0.0315, 0.1112, 0.1364, 0.0801, 0.0732], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0196, 0.0245, 0.0216, 0.0202, 0.0248, 0.0252, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:28:40,976 INFO [train.py:901] (0/4) Epoch 30, batch 2250, loss[loss=0.1927, simple_loss=0.2959, pruned_loss=0.04477, over 8309.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2807, pruned_loss=0.05555, over 1617382.61 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:04,196 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-09 03:29:16,927 INFO [train.py:901] (0/4) Epoch 30, batch 2300, loss[loss=0.1818, simple_loss=0.2809, pruned_loss=0.04136, over 8587.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.05604, over 1613803.23 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:29,253 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.489e+02 3.036e+02 4.215e+02 7.962e+02, threshold=6.071e+02, percent-clipped=6.0 +2023-02-09 03:29:50,634 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:29:52,586 INFO [train.py:901] (0/4) Epoch 30, batch 2350, loss[loss=0.181, simple_loss=0.2738, pruned_loss=0.04412, over 8244.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05628, over 1614675.40 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:08,618 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:30:29,061 INFO [train.py:901] (0/4) Epoch 30, batch 2400, loss[loss=0.1693, simple_loss=0.2594, pruned_loss=0.03958, over 7807.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.28, pruned_loss=0.05622, over 1610348.22 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:42,227 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.281e+02 2.685e+02 3.729e+02 8.099e+02, threshold=5.371e+02, percent-clipped=9.0 +2023-02-09 03:31:05,197 INFO [train.py:901] (0/4) Epoch 30, batch 2450, loss[loss=0.1892, simple_loss=0.2803, pruned_loss=0.04902, over 8198.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05669, over 1614812.19 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:39,710 INFO [train.py:901] (0/4) Epoch 30, batch 2500, loss[loss=0.228, simple_loss=0.3087, pruned_loss=0.07368, over 8356.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2795, pruned_loss=0.0564, over 1612544.47 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:43,945 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236910.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:31:52,883 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.373e+02 3.086e+02 3.767e+02 7.222e+02, threshold=6.171e+02, percent-clipped=6.0 +2023-02-09 03:32:13,826 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6575, 2.3598, 1.8584, 2.2895, 2.1930, 1.5829, 2.0645, 2.0152], + device='cuda:0'), covar=tensor([0.1281, 0.0433, 0.1259, 0.0541, 0.0715, 0.1630, 0.0951, 0.0933], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0242, 0.0345, 0.0314, 0.0302, 0.0350, 0.0351, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:32:16,587 INFO [train.py:901] (0/4) Epoch 30, batch 2550, loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05686, over 8422.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2797, pruned_loss=0.05655, over 1613259.32 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:32:54,193 INFO [train.py:901] (0/4) Epoch 30, batch 2600, loss[loss=0.1602, simple_loss=0.2512, pruned_loss=0.03457, over 8105.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2801, pruned_loss=0.05635, over 1615964.14 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:33:06,914 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.458e+02 3.021e+02 3.974e+02 8.394e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:33:12,331 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.50 vs. limit=5.0 +2023-02-09 03:33:30,302 INFO [train.py:901] (0/4) Epoch 30, batch 2650, loss[loss=0.1914, simple_loss=0.2805, pruned_loss=0.05113, over 8644.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05706, over 1617523.94 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:06,427 INFO [train.py:901] (0/4) Epoch 30, batch 2700, loss[loss=0.1813, simple_loss=0.2535, pruned_loss=0.05461, over 7936.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.0572, over 1612550.81 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:07,587 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 03:34:18,973 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.464e+02 3.015e+02 4.068e+02 7.247e+02, threshold=6.030e+02, percent-clipped=1.0 +2023-02-09 03:34:41,482 INFO [train.py:901] (0/4) Epoch 30, batch 2750, loss[loss=0.1886, simple_loss=0.2637, pruned_loss=0.05677, over 7925.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05669, over 1610995.49 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:18,240 INFO [train.py:901] (0/4) Epoch 30, batch 2800, loss[loss=0.2242, simple_loss=0.3071, pruned_loss=0.07064, over 8468.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2795, pruned_loss=0.05622, over 1607240.44 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:20,505 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6027, 1.3965, 1.7360, 1.2951, 0.9700, 1.4766, 1.5414, 1.3398], + device='cuda:0'), covar=tensor([0.0601, 0.1289, 0.1586, 0.1492, 0.0584, 0.1494, 0.0720, 0.0711], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 03:35:31,338 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.300e+02 2.824e+02 3.573e+02 8.919e+02, threshold=5.648e+02, percent-clipped=3.0 +2023-02-09 03:35:45,285 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([6.0150, 1.7386, 6.1547, 2.2430, 5.5787, 5.1944, 5.6744, 5.5627], + device='cuda:0'), covar=tensor([0.0495, 0.4598, 0.0369, 0.3831, 0.1004, 0.0840, 0.0523, 0.0543], + device='cuda:0'), in_proj_covar=tensor([0.0694, 0.0671, 0.0752, 0.0670, 0.0756, 0.0645, 0.0655, 0.0729], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:35:48,282 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 03:35:53,019 INFO [train.py:901] (0/4) Epoch 30, batch 2850, loss[loss=0.1942, simple_loss=0.281, pruned_loss=0.05374, over 8332.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2796, pruned_loss=0.05645, over 1606593.71 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:53,086 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237254.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:36:18,934 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.88 vs. limit=2.0 +2023-02-09 03:36:27,598 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-09 03:36:29,228 INFO [train.py:901] (0/4) Epoch 30, batch 2900, loss[loss=0.2138, simple_loss=0.305, pruned_loss=0.06134, over 8456.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2801, pruned_loss=0.05695, over 1608026.24 frames. ], batch size: 29, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:36:42,593 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.592e+02 3.021e+02 4.387e+02 8.419e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:37:04,020 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 03:37:05,370 INFO [train.py:901] (0/4) Epoch 30, batch 2950, loss[loss=0.2294, simple_loss=0.3099, pruned_loss=0.07448, over 7549.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.28, pruned_loss=0.05641, over 1609751.87 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:15,785 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237369.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:37:36,198 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8772, 6.0635, 5.2368, 2.4435, 5.3370, 5.7064, 5.4105, 5.5050], + device='cuda:0'), covar=tensor([0.0463, 0.0258, 0.0775, 0.3980, 0.0660, 0.0648, 0.0880, 0.0400], + device='cuda:0'), in_proj_covar=tensor([0.0550, 0.0461, 0.0450, 0.0562, 0.0445, 0.0473, 0.0447, 0.0415], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:37:40,335 INFO [train.py:901] (0/4) Epoch 30, batch 3000, loss[loss=0.2191, simple_loss=0.2994, pruned_loss=0.06944, over 8732.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05684, over 1611549.79 frames. ], batch size: 40, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:40,336 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 03:37:54,060 INFO [train.py:935] (0/4) Epoch 30, validation: loss=0.1704, simple_loss=0.2697, pruned_loss=0.0356, over 944034.00 frames. +2023-02-09 03:37:54,061 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6641MB +2023-02-09 03:38:07,363 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.369e+02 2.918e+02 3.560e+02 6.316e+02, threshold=5.836e+02, percent-clipped=1.0 +2023-02-09 03:38:31,177 INFO [train.py:901] (0/4) Epoch 30, batch 3050, loss[loss=0.2181, simple_loss=0.2995, pruned_loss=0.06835, over 8527.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05682, over 1614406.21 frames. ], batch size: 28, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:39:07,128 INFO [train.py:901] (0/4) Epoch 30, batch 3100, loss[loss=0.1846, simple_loss=0.2776, pruned_loss=0.04577, over 8479.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05636, over 1618555.37 frames. ], batch size: 29, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:39:10,835 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237509.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:39:15,046 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3600, 1.4252, 1.3633, 1.8380, 0.6558, 1.2817, 1.2733, 1.4887], + device='cuda:0'), covar=tensor([0.0977, 0.0846, 0.1007, 0.0533, 0.1157, 0.1325, 0.0756, 0.0725], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0194, 0.0245, 0.0215, 0.0202, 0.0246, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:39:19,648 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.429e+02 3.016e+02 3.485e+02 6.483e+02, threshold=6.032e+02, percent-clipped=4.0 +2023-02-09 03:39:31,253 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7721, 1.6754, 2.7015, 2.0637, 2.4307, 1.7977, 1.6028, 1.2677], + device='cuda:0'), covar=tensor([0.8015, 0.7085, 0.2508, 0.4520, 0.3450, 0.5048, 0.3163, 0.6399], + device='cuda:0'), in_proj_covar=tensor([0.0967, 0.1036, 0.0847, 0.1009, 0.1030, 0.0948, 0.0778, 0.0860], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:39:43,983 INFO [train.py:901] (0/4) Epoch 30, batch 3150, loss[loss=0.1635, simple_loss=0.2472, pruned_loss=0.03985, over 7800.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.28, pruned_loss=0.05569, over 1619032.72 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:03,671 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237581.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:21,021 INFO [train.py:901] (0/4) Epoch 30, batch 3200, loss[loss=0.1826, simple_loss=0.2698, pruned_loss=0.04771, over 8473.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2796, pruned_loss=0.05543, over 1615685.68 frames. ], batch size: 29, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:33,294 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.320e+02 2.861e+02 3.592e+02 8.186e+02, threshold=5.722e+02, percent-clipped=5.0 +2023-02-09 03:40:33,442 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7564, 1.5058, 3.0988, 1.4049, 2.4777, 3.3995, 3.5157, 2.8282], + device='cuda:0'), covar=tensor([0.1255, 0.1869, 0.0366, 0.2265, 0.0906, 0.0309, 0.0556, 0.0706], + device='cuda:0'), in_proj_covar=tensor([0.0309, 0.0329, 0.0298, 0.0328, 0.0330, 0.0283, 0.0449, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 03:40:35,513 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=237625.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:52,677 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=237650.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:56,035 INFO [train.py:901] (0/4) Epoch 30, batch 3250, loss[loss=0.1723, simple_loss=0.2598, pruned_loss=0.04242, over 7929.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05586, over 1612775.25 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:32,182 INFO [train.py:901] (0/4) Epoch 30, batch 3300, loss[loss=0.1962, simple_loss=0.2838, pruned_loss=0.05426, over 8462.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2789, pruned_loss=0.05537, over 1613520.21 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:45,796 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.392e+02 2.907e+02 3.818e+02 6.093e+02, threshold=5.813e+02, percent-clipped=2.0 +2023-02-09 03:42:03,991 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.0134, 1.6253, 1.3197, 1.5233, 1.3201, 1.2190, 1.2460, 1.2873], + device='cuda:0'), covar=tensor([0.1121, 0.0510, 0.1413, 0.0571, 0.0716, 0.1560, 0.0955, 0.0812], + device='cuda:0'), in_proj_covar=tensor([0.0354, 0.0242, 0.0343, 0.0314, 0.0299, 0.0347, 0.0348, 0.0319], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:42:07,978 INFO [train.py:901] (0/4) Epoch 30, batch 3350, loss[loss=0.1782, simple_loss=0.2682, pruned_loss=0.04415, over 8130.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2787, pruned_loss=0.05501, over 1613184.54 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:35,991 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.93 vs. limit=2.0 +2023-02-09 03:42:40,705 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([0.9640, 1.6231, 1.3090, 1.4556, 1.2767, 1.1467, 1.1364, 1.2276], + device='cuda:0'), covar=tensor([0.1416, 0.0616, 0.1577, 0.0759, 0.1001, 0.1981, 0.1280, 0.0998], + device='cuda:0'), in_proj_covar=tensor([0.0356, 0.0243, 0.0344, 0.0314, 0.0300, 0.0348, 0.0349, 0.0320], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:42:44,185 INFO [train.py:901] (0/4) Epoch 30, batch 3400, loss[loss=0.1609, simple_loss=0.2467, pruned_loss=0.03755, over 8248.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2785, pruned_loss=0.05529, over 1614121.56 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:47,129 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237808.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:42:57,408 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.484e+02 3.245e+02 4.483e+02 9.283e+02, threshold=6.490e+02, percent-clipped=12.0 +2023-02-09 03:43:19,448 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237853.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:43:20,122 INFO [train.py:901] (0/4) Epoch 30, batch 3450, loss[loss=0.2164, simple_loss=0.3059, pruned_loss=0.06348, over 8103.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2788, pruned_loss=0.05543, over 1610162.67 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:43:56,091 INFO [train.py:901] (0/4) Epoch 30, batch 3500, loss[loss=0.19, simple_loss=0.2836, pruned_loss=0.04819, over 8248.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2791, pruned_loss=0.05547, over 1612494.09 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:43:57,001 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5042, 1.3965, 1.8392, 1.2246, 1.1329, 1.8150, 0.2806, 1.1662], + device='cuda:0'), covar=tensor([0.1318, 0.1196, 0.0371, 0.0743, 0.2311, 0.0443, 0.1737, 0.1035], + device='cuda:0'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0224, 0.0279, 0.0149, 0.0174, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 03:44:08,739 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.505e+02 3.010e+02 3.725e+02 8.965e+02, threshold=6.019e+02, percent-clipped=4.0 +2023-02-09 03:44:11,008 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237925.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:44:14,972 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237930.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:22,354 WARNING [train.py:1067] (0/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 03:44:32,818 INFO [train.py:901] (0/4) Epoch 30, batch 3550, loss[loss=0.1828, simple_loss=0.2779, pruned_loss=0.04383, over 8506.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2797, pruned_loss=0.05538, over 1617647.16 frames. ], batch size: 28, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:43,175 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237968.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:56,017 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7688, 2.2935, 3.6842, 1.5662, 2.8009, 2.2381, 1.8036, 2.8550], + device='cuda:0'), covar=tensor([0.1999, 0.2578, 0.1014, 0.4836, 0.1972, 0.3410, 0.2623, 0.2378], + device='cuda:0'), in_proj_covar=tensor([0.0545, 0.0642, 0.0568, 0.0677, 0.0668, 0.0617, 0.0571, 0.0648], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:45:05,696 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-238000.pt +2023-02-09 03:45:09,421 INFO [train.py:901] (0/4) Epoch 30, batch 3600, loss[loss=0.2271, simple_loss=0.3175, pruned_loss=0.06829, over 8499.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2805, pruned_loss=0.05551, over 1619058.85 frames. ], batch size: 39, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:45:22,459 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.286e+02 2.832e+02 3.360e+02 7.556e+02, threshold=5.664e+02, percent-clipped=2.0 +2023-02-09 03:45:35,422 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238040.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:45:40,895 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.8419, 6.0660, 5.1922, 2.4631, 5.2583, 5.6974, 5.4899, 5.5048], + device='cuda:0'), covar=tensor([0.0650, 0.0378, 0.0965, 0.4432, 0.0766, 0.0761, 0.1134, 0.0670], + device='cuda:0'), in_proj_covar=tensor([0.0551, 0.0463, 0.0452, 0.0563, 0.0445, 0.0475, 0.0451, 0.0416], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:45:44,902 INFO [train.py:901] (0/4) Epoch 30, batch 3650, loss[loss=0.2574, simple_loss=0.3272, pruned_loss=0.09381, over 7279.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2803, pruned_loss=0.05552, over 1619148.87 frames. ], batch size: 78, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:05,857 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:12,779 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238092.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:20,738 INFO [train.py:901] (0/4) Epoch 30, batch 3700, loss[loss=0.2346, simple_loss=0.321, pruned_loss=0.07413, over 8355.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05592, over 1615037.34 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:21,662 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4945, 1.3938, 1.8343, 1.1742, 1.1071, 1.8042, 0.2364, 1.1560], + device='cuda:0'), covar=tensor([0.1400, 0.1175, 0.0371, 0.0824, 0.2318, 0.0410, 0.1717, 0.1065], + device='cuda:0'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0225, 0.0279, 0.0149, 0.0175, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 03:46:29,078 WARNING [train.py:1067] (0/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 03:46:33,267 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.352e+02 3.001e+02 3.686e+02 7.575e+02, threshold=6.003e+02, percent-clipped=3.0 +2023-02-09 03:46:45,999 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.1812, 1.4480, 1.7390, 1.4084, 0.7422, 1.5291, 1.1660, 1.2058], + device='cuda:0'), covar=tensor([0.0602, 0.1157, 0.1471, 0.1377, 0.0537, 0.1335, 0.0663, 0.0645], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0193, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 03:46:50,339 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:56,066 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238152.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:57,373 INFO [train.py:901] (0/4) Epoch 30, batch 3750, loss[loss=0.2219, simple_loss=0.3072, pruned_loss=0.06834, over 8765.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05641, over 1615925.74 frames. ], batch size: 30, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:57,656 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4308, 2.3509, 2.9914, 2.5296, 2.9241, 2.5248, 2.3755, 1.8210], + device='cuda:0'), covar=tensor([0.5623, 0.5255, 0.2191, 0.4072, 0.2893, 0.3253, 0.1994, 0.5875], + device='cuda:0'), in_proj_covar=tensor([0.0971, 0.1038, 0.0848, 0.1012, 0.1033, 0.0949, 0.0780, 0.0860], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 03:47:33,681 INFO [train.py:901] (0/4) Epoch 30, batch 3800, loss[loss=0.2026, simple_loss=0.286, pruned_loss=0.05962, over 7918.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05696, over 1612469.83 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:46,041 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.427e+02 2.911e+02 3.474e+02 7.215e+02, threshold=5.821e+02, percent-clipped=2.0 +2023-02-09 03:47:47,593 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:47:48,257 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7788, 1.4433, 1.7020, 1.3485, 1.0641, 1.4630, 1.6871, 1.5041], + device='cuda:0'), covar=tensor([0.0631, 0.1334, 0.1694, 0.1501, 0.0627, 0.1554, 0.0733, 0.0676], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0193, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 03:48:05,496 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:09,474 INFO [train.py:901] (0/4) Epoch 30, batch 3850, loss[loss=0.1917, simple_loss=0.2823, pruned_loss=0.0505, over 8192.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05742, over 1615671.95 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:18,890 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238267.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:23,697 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238274.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:37,706 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 03:48:39,926 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238296.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:45,178 INFO [train.py:901] (0/4) Epoch 30, batch 3900, loss[loss=0.1725, simple_loss=0.2492, pruned_loss=0.04795, over 7787.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2822, pruned_loss=0.05682, over 1621772.59 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:57,810 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238321.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:58,293 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.361e+02 2.887e+02 3.538e+02 6.169e+02, threshold=5.773e+02, percent-clipped=2.0 +2023-02-09 03:49:12,284 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-09 03:49:20,478 INFO [train.py:901] (0/4) Epoch 30, batch 3950, loss[loss=0.1946, simple_loss=0.2855, pruned_loss=0.05182, over 8322.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.05668, over 1619176.25 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:49:45,699 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2721, 1.4851, 3.4129, 1.2055, 3.0455, 2.8799, 3.1516, 3.0664], + device='cuda:0'), covar=tensor([0.0805, 0.3704, 0.0760, 0.4123, 0.1302, 0.1040, 0.0716, 0.0855], + device='cuda:0'), in_proj_covar=tensor([0.0695, 0.0670, 0.0755, 0.0667, 0.0755, 0.0644, 0.0653, 0.0728], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:49:46,435 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238389.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:49:56,967 INFO [train.py:901] (0/4) Epoch 30, batch 4000, loss[loss=0.1991, simple_loss=0.287, pruned_loss=0.05564, over 7978.00 frames. ], tot_loss[loss=0.197, simple_loss=0.281, pruned_loss=0.05654, over 1615781.56 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:08,209 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8740, 1.9629, 1.9309, 1.6209, 2.0390, 1.7018, 1.2167, 1.9257], + device='cuda:0'), covar=tensor([0.0509, 0.0365, 0.0290, 0.0514, 0.0396, 0.0694, 0.0827, 0.0312], + device='cuda:0'), in_proj_covar=tensor([0.0484, 0.0424, 0.0380, 0.0470, 0.0407, 0.0564, 0.0411, 0.0450], + device='cuda:0'), out_proj_covar=tensor([1.2813e-04, 1.0972e-04, 9.8935e-05, 1.2272e-04, 1.0644e-04, 1.5702e-04, + 1.0953e-04, 1.1764e-04], device='cuda:0') +2023-02-09 03:50:09,962 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.354e+02 2.920e+02 3.674e+02 8.815e+02, threshold=5.839e+02, percent-clipped=5.0 +2023-02-09 03:50:12,702 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:20,358 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238436.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:21,775 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238438.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:50:32,675 INFO [train.py:901] (0/4) Epoch 30, batch 4050, loss[loss=0.1929, simple_loss=0.2854, pruned_loss=0.05023, over 8505.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05644, over 1616021.86 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:47,615 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.0585, 1.5911, 4.3878, 1.9620, 3.5370, 3.4464, 3.9099, 3.8893], + device='cuda:0'), covar=tensor([0.1358, 0.6995, 0.1174, 0.5876, 0.2219, 0.1923, 0.1223, 0.1098], + device='cuda:0'), in_proj_covar=tensor([0.0697, 0.0673, 0.0757, 0.0669, 0.0757, 0.0646, 0.0655, 0.0731], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:50:49,068 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.16 vs. limit=5.0 +2023-02-09 03:50:57,402 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238488.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:08,980 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 03:51:09,342 INFO [train.py:901] (0/4) Epoch 30, batch 4100, loss[loss=0.1655, simple_loss=0.2574, pruned_loss=0.03682, over 8348.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2803, pruned_loss=0.0562, over 1614098.89 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:21,790 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.287e+02 2.925e+02 3.934e+02 1.031e+03, threshold=5.850e+02, percent-clipped=7.0 +2023-02-09 03:51:22,750 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:35,966 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:41,378 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238548.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:43,266 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:45,014 INFO [train.py:901] (0/4) Epoch 30, batch 4150, loss[loss=0.2282, simple_loss=0.3147, pruned_loss=0.07084, over 8322.00 frames. ], tot_loss[loss=0.197, simple_loss=0.281, pruned_loss=0.05651, over 1616288.42 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:11,306 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.4225, 1.6885, 4.5892, 1.8184, 4.1228, 3.7819, 4.1051, 3.9935], + device='cuda:0'), covar=tensor([0.0615, 0.4587, 0.0492, 0.4330, 0.0977, 0.0978, 0.0621, 0.0717], + device='cuda:0'), in_proj_covar=tensor([0.0698, 0.0675, 0.0760, 0.0672, 0.0760, 0.0650, 0.0658, 0.0734], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:52:19,910 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:20,455 INFO [train.py:901] (0/4) Epoch 30, batch 4200, loss[loss=0.195, simple_loss=0.2654, pruned_loss=0.06226, over 7538.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05663, over 1616141.68 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:33,715 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.494e+02 3.256e+02 4.447e+02 1.288e+03, threshold=6.511e+02, percent-clipped=8.0 +2023-02-09 03:52:41,508 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 03:52:50,657 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238645.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:56,587 INFO [train.py:901] (0/4) Epoch 30, batch 4250, loss[loss=0.1948, simple_loss=0.2901, pruned_loss=0.04978, over 8320.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05706, over 1622936.83 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:05,087 WARNING [train.py:1067] (0/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 03:53:07,965 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238670.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:27,915 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238699.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:31,134 INFO [train.py:901] (0/4) Epoch 30, batch 4300, loss[loss=0.2745, simple_loss=0.3333, pruned_loss=0.1079, over 8617.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2806, pruned_loss=0.05696, over 1621512.84 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:44,805 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.303e+02 2.743e+02 3.342e+02 6.438e+02, threshold=5.486e+02, percent-clipped=0.0 +2023-02-09 03:54:06,887 INFO [train.py:901] (0/4) Epoch 30, batch 4350, loss[loss=0.1982, simple_loss=0.2894, pruned_loss=0.05354, over 8462.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2804, pruned_loss=0.05662, over 1622870.72 frames. ], batch size: 27, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:27,363 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238782.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:54:36,398 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 03:54:37,987 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:42,555 INFO [train.py:901] (0/4) Epoch 30, batch 4400, loss[loss=0.2368, simple_loss=0.3177, pruned_loss=0.07796, over 8613.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2805, pruned_loss=0.05638, over 1621789.99 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:44,953 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238807.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:55,878 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.573e+02 3.023e+02 3.983e+02 6.680e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-09 03:54:56,101 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:03,128 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238832.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:11,355 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5269, 1.8455, 2.6670, 1.4934, 1.9359, 1.9563, 1.6229, 2.0488], + device='cuda:0'), covar=tensor([0.1959, 0.2884, 0.1027, 0.4800, 0.2089, 0.3302, 0.2658, 0.2265], + device='cuda:0'), in_proj_covar=tensor([0.0549, 0.0647, 0.0571, 0.0681, 0.0673, 0.0620, 0.0574, 0.0653], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 03:55:15,276 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 03:55:18,603 INFO [train.py:901] (0/4) Epoch 30, batch 4450, loss[loss=0.2153, simple_loss=0.2995, pruned_loss=0.06552, over 8194.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.05667, over 1624724.27 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:55:18,810 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4105, 2.7049, 2.2149, 3.8600, 1.6787, 2.1006, 2.4816, 2.6318], + device='cuda:0'), covar=tensor([0.0722, 0.0788, 0.0842, 0.0271, 0.1081, 0.1178, 0.0944, 0.0894], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0195, 0.0246, 0.0215, 0.0203, 0.0248, 0.0250, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:55:22,519 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238859.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:40,886 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:50,458 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238897.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:55:55,020 INFO [train.py:901] (0/4) Epoch 30, batch 4500, loss[loss=0.2059, simple_loss=0.2804, pruned_loss=0.06566, over 8636.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05743, over 1616014.79 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:07,440 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.335e+02 2.828e+02 3.474e+02 8.376e+02, threshold=5.656e+02, percent-clipped=3.0 +2023-02-09 03:56:08,204 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 03:56:13,481 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.80 vs. limit=5.0 +2023-02-09 03:56:30,985 INFO [train.py:901] (0/4) Epoch 30, batch 4550, loss[loss=0.1779, simple_loss=0.2642, pruned_loss=0.04586, over 8244.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2833, pruned_loss=0.05745, over 1618388.74 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:31,145 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238954.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:57:04,986 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3302, 2.5069, 2.1409, 2.9067, 2.0551, 2.1385, 2.3347, 2.5417], + device='cuda:0'), covar=tensor([0.0617, 0.0666, 0.0697, 0.0477, 0.0824, 0.0937, 0.0651, 0.0603], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0195, 0.0246, 0.0215, 0.0203, 0.0247, 0.0251, 0.0206], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:57:06,050 INFO [train.py:901] (0/4) Epoch 30, batch 4600, loss[loss=0.202, simple_loss=0.2791, pruned_loss=0.06244, over 7814.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2837, pruned_loss=0.0579, over 1618426.68 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:09,651 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5524, 1.8304, 1.8616, 1.2853, 1.9588, 1.3926, 0.4692, 1.7581], + device='cuda:0'), covar=tensor([0.0718, 0.0463, 0.0345, 0.0687, 0.0503, 0.1229, 0.1126, 0.0359], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0421, 0.0379, 0.0468, 0.0403, 0.0562, 0.0408, 0.0450], + device='cuda:0'), out_proj_covar=tensor([1.2768e-04, 1.0874e-04, 9.8625e-05, 1.2225e-04, 1.0537e-04, 1.5617e-04, + 1.0883e-04, 1.1764e-04], device='cuda:0') +2023-02-09 03:57:19,156 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.347e+02 2.832e+02 3.443e+02 5.144e+02, threshold=5.665e+02, percent-clipped=0.0 +2023-02-09 03:57:34,216 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:57:41,235 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.3116, 2.0913, 1.6625, 2.0456, 1.7298, 1.4090, 1.6878, 1.7573], + device='cuda:0'), covar=tensor([0.1405, 0.0484, 0.1421, 0.0560, 0.0845, 0.1783, 0.1090, 0.0932], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0247, 0.0347, 0.0317, 0.0304, 0.0351, 0.0352, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 03:57:41,732 INFO [train.py:901] (0/4) Epoch 30, batch 4650, loss[loss=0.1749, simple_loss=0.2712, pruned_loss=0.03935, over 8188.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.285, pruned_loss=0.05864, over 1615969.62 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:43,630 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 03:57:47,058 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-09 03:57:52,089 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1778, 2.3195, 1.8647, 2.8788, 1.3582, 1.6882, 2.1708, 2.3730], + device='cuda:0'), covar=tensor([0.0616, 0.0714, 0.0789, 0.0331, 0.1012, 0.1188, 0.0795, 0.0631], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0214, 0.0203, 0.0246, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 03:58:17,765 INFO [train.py:901] (0/4) Epoch 30, batch 4700, loss[loss=0.2061, simple_loss=0.2888, pruned_loss=0.06168, over 8280.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.284, pruned_loss=0.05828, over 1616589.96 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:30,953 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.353e+02 2.866e+02 3.941e+02 8.957e+02, threshold=5.733e+02, percent-clipped=8.0 +2023-02-09 03:58:52,470 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239153.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:58:52,926 INFO [train.py:901] (0/4) Epoch 30, batch 4750, loss[loss=0.2249, simple_loss=0.3131, pruned_loss=0.06839, over 8513.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2846, pruned_loss=0.05882, over 1620103.88 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:55,963 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239158.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:59:10,887 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239178.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:59:12,057 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 03:59:14,175 WARNING [train.py:1067] (0/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 03:59:28,649 INFO [train.py:901] (0/4) Epoch 30, batch 4800, loss[loss=0.2203, simple_loss=0.3034, pruned_loss=0.06855, over 8037.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2843, pruned_loss=0.05838, over 1621861.16 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:59:41,693 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.393e+02 3.010e+02 3.751e+02 7.640e+02, threshold=6.020e+02, percent-clipped=2.0 +2023-02-09 04:00:04,582 INFO [train.py:901] (0/4) Epoch 30, batch 4850, loss[loss=0.1998, simple_loss=0.2838, pruned_loss=0.05793, over 8499.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.284, pruned_loss=0.05782, over 1619725.03 frames. ], batch size: 28, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:06,728 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 04:00:36,497 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239298.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:00:40,440 INFO [train.py:901] (0/4) Epoch 30, batch 4900, loss[loss=0.2152, simple_loss=0.2918, pruned_loss=0.06934, over 8605.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2842, pruned_loss=0.05796, over 1620357.71 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:53,055 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.412e+02 2.818e+02 3.519e+02 1.028e+03, threshold=5.635e+02, percent-clipped=4.0 +2023-02-09 04:00:59,134 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 04:01:15,935 INFO [train.py:901] (0/4) Epoch 30, batch 4950, loss[loss=0.2206, simple_loss=0.3098, pruned_loss=0.06569, over 8347.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2832, pruned_loss=0.05771, over 1621328.00 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:51,710 INFO [train.py:901] (0/4) Epoch 30, batch 5000, loss[loss=0.2138, simple_loss=0.2962, pruned_loss=0.06566, over 7985.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2842, pruned_loss=0.05861, over 1617263.14 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:58,133 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239413.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:01:58,923 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239414.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:05,006 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.513e+02 3.095e+02 3.810e+02 1.179e+03, threshold=6.190e+02, percent-clipped=9.0 +2023-02-09 04:02:17,713 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239439.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:29,049 INFO [train.py:901] (0/4) Epoch 30, batch 5050, loss[loss=0.1865, simple_loss=0.2533, pruned_loss=0.0598, over 7690.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05793, over 1614418.64 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:02:48,703 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7304, 2.6137, 1.8918, 2.4601, 2.2640, 1.6293, 2.2536, 2.2942], + device='cuda:0'), covar=tensor([0.1540, 0.0456, 0.1317, 0.0687, 0.0802, 0.1625, 0.0980, 0.0969], + device='cuda:0'), in_proj_covar=tensor([0.0360, 0.0246, 0.0346, 0.0316, 0.0303, 0.0348, 0.0350, 0.0322], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 04:02:52,733 WARNING [train.py:1067] (0/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 04:03:05,858 INFO [train.py:901] (0/4) Epoch 30, batch 5100, loss[loss=0.2029, simple_loss=0.2803, pruned_loss=0.0628, over 7531.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05716, over 1613945.50 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:20,038 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.525e+02 3.230e+02 3.994e+02 1.175e+03, threshold=6.461e+02, percent-clipped=6.0 +2023-02-09 04:03:27,188 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5782, 1.7662, 2.1709, 1.4655, 1.6052, 1.8024, 1.6806, 1.5654], + device='cuda:0'), covar=tensor([0.1925, 0.2588, 0.0955, 0.4532, 0.1954, 0.3480, 0.2415, 0.2159], + device='cuda:0'), in_proj_covar=tensor([0.0545, 0.0645, 0.0568, 0.0680, 0.0669, 0.0619, 0.0571, 0.0649], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:03:42,222 INFO [train.py:901] (0/4) Epoch 30, batch 5150, loss[loss=0.1851, simple_loss=0.265, pruned_loss=0.05256, over 7649.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05754, over 1611879.85 frames. ], batch size: 19, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:12,495 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0238, 1.5340, 1.8493, 1.4445, 1.0074, 1.5748, 1.7809, 1.5646], + device='cuda:0'), covar=tensor([0.0592, 0.1241, 0.1611, 0.1422, 0.0620, 0.1415, 0.0687, 0.0674], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 04:04:18,739 INFO [train.py:901] (0/4) Epoch 30, batch 5200, loss[loss=0.1715, simple_loss=0.2556, pruned_loss=0.04371, over 7714.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.05751, over 1608897.61 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:31,920 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.360e+02 2.794e+02 3.430e+02 1.458e+03, threshold=5.587e+02, percent-clipped=2.0 +2023-02-09 04:04:44,694 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=239640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:04:54,718 INFO [train.py:901] (0/4) Epoch 30, batch 5250, loss[loss=0.2335, simple_loss=0.309, pruned_loss=0.07901, over 8506.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.0571, over 1605094.33 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:56,133 WARNING [train.py:1067] (0/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 04:05:05,211 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239669.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:05:07,208 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8868, 1.4030, 3.4899, 1.6036, 2.6044, 3.7883, 3.8965, 3.2898], + device='cuda:0'), covar=tensor([0.1281, 0.2004, 0.0281, 0.2034, 0.0944, 0.0213, 0.0461, 0.0488], + device='cuda:0'), in_proj_covar=tensor([0.0310, 0.0329, 0.0297, 0.0329, 0.0331, 0.0284, 0.0451, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 04:05:09,931 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.5703, 4.5905, 4.1255, 2.2682, 4.0198, 4.2254, 4.1512, 4.0960], + device='cuda:0'), covar=tensor([0.0669, 0.0500, 0.1014, 0.4258, 0.0880, 0.1024, 0.1200, 0.0726], + device='cuda:0'), in_proj_covar=tensor([0.0556, 0.0465, 0.0455, 0.0569, 0.0450, 0.0478, 0.0453, 0.0417], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:05:23,284 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239694.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:05:29,843 INFO [train.py:901] (0/4) Epoch 30, batch 5300, loss[loss=0.1962, simple_loss=0.2887, pruned_loss=0.05192, over 8675.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2818, pruned_loss=0.05678, over 1604859.99 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:05:32,367 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 04:05:43,734 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.433e+02 2.937e+02 3.850e+02 7.663e+02, threshold=5.875e+02, percent-clipped=5.0 +2023-02-09 04:06:04,875 INFO [train.py:901] (0/4) Epoch 30, batch 5350, loss[loss=0.1557, simple_loss=0.2437, pruned_loss=0.03385, over 7459.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2832, pruned_loss=0.05776, over 1610947.73 frames. ], batch size: 17, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:09,871 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3142, 1.4518, 4.6537, 2.2860, 2.6634, 5.2752, 5.3445, 4.6383], + device='cuda:0'), covar=tensor([0.1252, 0.2158, 0.0214, 0.1797, 0.1092, 0.0154, 0.0477, 0.0515], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0330, 0.0297, 0.0330, 0.0332, 0.0284, 0.0453, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 04:06:41,526 INFO [train.py:901] (0/4) Epoch 30, batch 5400, loss[loss=0.1883, simple_loss=0.2787, pruned_loss=0.04895, over 8649.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2831, pruned_loss=0.05792, over 1611206.72 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:43,090 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8632, 1.6662, 2.5077, 1.6434, 1.3825, 2.4617, 0.5059, 1.5625], + device='cuda:0'), covar=tensor([0.1309, 0.1159, 0.0409, 0.1020, 0.2189, 0.0393, 0.2094, 0.1164], + device='cuda:0'), in_proj_covar=tensor([0.0204, 0.0207, 0.0138, 0.0224, 0.0280, 0.0149, 0.0176, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:06:55,033 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.371e+02 2.881e+02 3.522e+02 8.420e+02, threshold=5.763e+02, percent-clipped=7.0 +2023-02-09 04:06:59,886 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-09 04:07:17,636 INFO [train.py:901] (0/4) Epoch 30, batch 5450, loss[loss=0.1582, simple_loss=0.2409, pruned_loss=0.03774, over 7694.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2829, pruned_loss=0.05765, over 1615932.82 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:07:49,860 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 04:07:53,962 INFO [train.py:901] (0/4) Epoch 30, batch 5500, loss[loss=0.1565, simple_loss=0.2544, pruned_loss=0.02934, over 8507.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2827, pruned_loss=0.05734, over 1617039.42 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:08,771 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.378e+02 3.012e+02 4.037e+02 9.246e+02, threshold=6.023e+02, percent-clipped=5.0 +2023-02-09 04:08:30,325 INFO [train.py:901] (0/4) Epoch 30, batch 5550, loss[loss=0.1765, simple_loss=0.2618, pruned_loss=0.04564, over 8031.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2827, pruned_loss=0.05737, over 1615188.12 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:50,819 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:09:02,725 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-240000.pt +2023-02-09 04:09:07,150 INFO [train.py:901] (0/4) Epoch 30, batch 5600, loss[loss=0.1787, simple_loss=0.262, pruned_loss=0.04774, over 7931.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2822, pruned_loss=0.05706, over 1613533.10 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:15,199 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 04:09:21,043 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.479e+02 2.939e+02 3.472e+02 8.474e+02, threshold=5.878e+02, percent-clipped=2.0 +2023-02-09 04:09:21,981 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7995, 1.6835, 2.1671, 1.3943, 1.3946, 2.0633, 0.3297, 1.3638], + device='cuda:0'), covar=tensor([0.1124, 0.1018, 0.0332, 0.0828, 0.1944, 0.0450, 0.1709, 0.1205], + device='cuda:0'), in_proj_covar=tensor([0.0204, 0.0206, 0.0138, 0.0224, 0.0279, 0.0149, 0.0175, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:09:42,594 INFO [train.py:901] (0/4) Epoch 30, batch 5650, loss[loss=0.1726, simple_loss=0.2556, pruned_loss=0.04482, over 8096.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05717, over 1611902.04 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:44,739 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9330, 6.2015, 5.4875, 2.8059, 5.5223, 5.8396, 5.6441, 5.8126], + device='cuda:0'), covar=tensor([0.0640, 0.0366, 0.0890, 0.4272, 0.0770, 0.0689, 0.1033, 0.0521], + device='cuda:0'), in_proj_covar=tensor([0.0558, 0.0466, 0.0456, 0.0572, 0.0453, 0.0480, 0.0456, 0.0419], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:09:59,638 WARNING [train.py:1067] (0/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 04:10:14,064 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240099.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:10:17,325 INFO [train.py:901] (0/4) Epoch 30, batch 5700, loss[loss=0.2179, simple_loss=0.2955, pruned_loss=0.07015, over 7919.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05725, over 1613377.69 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:10:20,770 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4495, 2.8418, 2.3499, 4.1239, 1.5418, 2.0853, 2.5841, 2.7046], + device='cuda:0'), covar=tensor([0.0817, 0.0807, 0.0896, 0.0246, 0.1142, 0.1262, 0.0902, 0.0848], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0193, 0.0243, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:10:32,009 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 2.506e+02 3.162e+02 4.194e+02 1.225e+03, threshold=6.325e+02, percent-clipped=8.0 +2023-02-09 04:10:53,043 INFO [train.py:901] (0/4) Epoch 30, batch 5750, loss[loss=0.2035, simple_loss=0.2827, pruned_loss=0.06213, over 7974.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.05759, over 1617643.34 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:04,197 WARNING [train.py:1067] (0/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 04:11:21,805 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2912, 3.3446, 2.3786, 2.8542, 2.6306, 2.2258, 2.5845, 2.9498], + device='cuda:0'), covar=tensor([0.1432, 0.0381, 0.1088, 0.0639, 0.0721, 0.1327, 0.0975, 0.0988], + device='cuda:0'), in_proj_covar=tensor([0.0357, 0.0245, 0.0344, 0.0314, 0.0301, 0.0346, 0.0349, 0.0318], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 04:11:28,592 INFO [train.py:901] (0/4) Epoch 30, batch 5800, loss[loss=0.1569, simple_loss=0.2391, pruned_loss=0.03735, over 7548.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2812, pruned_loss=0.05733, over 1616714.26 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:35,160 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.84 vs. limit=5.0 +2023-02-09 04:11:39,945 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.4506, 2.7569, 2.1749, 3.9227, 1.6435, 1.9861, 2.4649, 2.6724], + device='cuda:0'), covar=tensor([0.0660, 0.0763, 0.0766, 0.0229, 0.1041, 0.1176, 0.0849, 0.0755], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0194, 0.0244, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:11:42,453 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.400e+02 2.667e+02 3.487e+02 8.848e+02, threshold=5.334e+02, percent-clipped=2.0 +2023-02-09 04:12:04,254 INFO [train.py:901] (0/4) Epoch 30, batch 5850, loss[loss=0.1998, simple_loss=0.2893, pruned_loss=0.05516, over 8455.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2796, pruned_loss=0.05614, over 1614462.56 frames. ], batch size: 27, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:33,887 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.8867, 2.1861, 3.6173, 1.9044, 1.8993, 3.5346, 0.8307, 2.2529], + device='cuda:0'), covar=tensor([0.1106, 0.1227, 0.0233, 0.1447, 0.2223, 0.0300, 0.1967, 0.1209], + device='cuda:0'), in_proj_covar=tensor([0.0205, 0.0208, 0.0139, 0.0225, 0.0281, 0.0150, 0.0176, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:12:39,820 INFO [train.py:901] (0/4) Epoch 30, batch 5900, loss[loss=0.208, simple_loss=0.2904, pruned_loss=0.06287, over 8323.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2803, pruned_loss=0.05648, over 1613305.04 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:52,667 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=2.86 vs. limit=5.0 +2023-02-09 04:12:53,716 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.330e+02 2.970e+02 3.920e+02 1.059e+03, threshold=5.939e+02, percent-clipped=6.0 +2023-02-09 04:13:15,508 INFO [train.py:901] (0/4) Epoch 30, batch 5950, loss[loss=0.2329, simple_loss=0.3108, pruned_loss=0.07746, over 8444.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2803, pruned_loss=0.05631, over 1613540.00 frames. ], batch size: 27, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:16,436 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=240355.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:18,461 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240358.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:33,813 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=240380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:50,597 INFO [train.py:901] (0/4) Epoch 30, batch 6000, loss[loss=0.1747, simple_loss=0.26, pruned_loss=0.0447, over 8034.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2804, pruned_loss=0.05628, over 1614785.18 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:50,598 INFO [train.py:926] (0/4) Computing validation loss +2023-02-09 04:14:04,295 INFO [train.py:935] (0/4) Epoch 30, validation: loss=0.1701, simple_loss=0.2695, pruned_loss=0.03536, over 944034.00 frames. +2023-02-09 04:14:04,296 INFO [train.py:936] (0/4) Maximum memory allocated so far is 6641MB +2023-02-09 04:14:17,955 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.377e+02 3.122e+02 3.554e+02 6.850e+02, threshold=6.243e+02, percent-clipped=2.0 +2023-02-09 04:14:29,600 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2262, 3.1078, 2.9233, 1.6731, 2.8112, 2.9317, 2.7988, 2.8134], + device='cuda:0'), covar=tensor([0.1154, 0.0803, 0.1242, 0.4299, 0.1290, 0.1324, 0.1709, 0.1022], + device='cuda:0'), in_proj_covar=tensor([0.0556, 0.0464, 0.0455, 0.0568, 0.0451, 0.0479, 0.0453, 0.0417], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:14:29,703 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4059, 1.4855, 1.3513, 1.7625, 0.7112, 1.2594, 1.2877, 1.4817], + device='cuda:0'), covar=tensor([0.0852, 0.0766, 0.0981, 0.0493, 0.1120, 0.1327, 0.0768, 0.0745], + device='cuda:0'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0214, 0.0202, 0.0246, 0.0248, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:14:39,922 INFO [train.py:901] (0/4) Epoch 30, batch 6050, loss[loss=0.1992, simple_loss=0.2776, pruned_loss=0.0604, over 8086.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2812, pruned_loss=0.05652, over 1618848.16 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:16,413 INFO [train.py:901] (0/4) Epoch 30, batch 6100, loss[loss=0.1644, simple_loss=0.2542, pruned_loss=0.03733, over 8104.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05638, over 1619769.23 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:30,262 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 2.992e+02 3.767e+02 7.583e+02, threshold=5.983e+02, percent-clipped=4.0 +2023-02-09 04:15:38,795 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.2124, 1.0458, 1.3049, 1.0218, 0.9789, 1.2801, 0.1294, 0.9607], + device='cuda:0'), covar=tensor([0.1461, 0.1330, 0.0504, 0.0630, 0.2353, 0.0564, 0.1859, 0.1062], + device='cuda:0'), in_proj_covar=tensor([0.0205, 0.0207, 0.0139, 0.0224, 0.0280, 0.0149, 0.0175, 0.0202], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:15:40,577 WARNING [train.py:1067] (0/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 04:15:51,551 INFO [train.py:901] (0/4) Epoch 30, batch 6150, loss[loss=0.1554, simple_loss=0.2268, pruned_loss=0.04194, over 7536.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05634, over 1621960.83 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:28,475 INFO [train.py:901] (0/4) Epoch 30, batch 6200, loss[loss=0.2063, simple_loss=0.2744, pruned_loss=0.06913, over 7654.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05627, over 1620316.36 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:44,278 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.533e+02 2.968e+02 3.901e+02 6.917e+02, threshold=5.935e+02, percent-clipped=4.0 +2023-02-09 04:16:58,423 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2286, 2.0045, 2.6286, 2.2474, 2.6961, 2.3272, 2.2072, 1.6493], + device='cuda:0'), covar=tensor([0.6129, 0.5663, 0.2464, 0.4353, 0.2871, 0.3653, 0.2032, 0.5807], + device='cuda:0'), in_proj_covar=tensor([0.0970, 0.1037, 0.0852, 0.1010, 0.1033, 0.0947, 0.0779, 0.0858], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 04:17:01,144 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0755, 2.2803, 1.9065, 2.8856, 1.3662, 1.8010, 2.1214, 2.2552], + device='cuda:0'), covar=tensor([0.0712, 0.0711, 0.0831, 0.0330, 0.1082, 0.1185, 0.0829, 0.0734], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0214, 0.0203, 0.0247, 0.0248, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:17:05,856 INFO [train.py:901] (0/4) Epoch 30, batch 6250, loss[loss=0.218, simple_loss=0.3021, pruned_loss=0.06696, over 8498.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2799, pruned_loss=0.05575, over 1620593.53 frames. ], batch size: 28, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:26,883 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.2720, 3.2049, 2.9396, 1.7279, 2.8540, 2.9502, 2.7816, 2.8207], + device='cuda:0'), covar=tensor([0.1027, 0.0736, 0.1148, 0.3931, 0.1145, 0.1194, 0.1504, 0.0991], + device='cuda:0'), in_proj_covar=tensor([0.0556, 0.0465, 0.0455, 0.0568, 0.0451, 0.0480, 0.0451, 0.0418], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:17:39,922 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=240702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:17:41,233 INFO [train.py:901] (0/4) Epoch 30, batch 6300, loss[loss=0.2136, simple_loss=0.3027, pruned_loss=0.06229, over 8035.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05611, over 1617510.13 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:54,934 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.569e+02 3.101e+02 4.376e+02 1.063e+03, threshold=6.203e+02, percent-clipped=9.0 +2023-02-09 04:18:17,094 INFO [train.py:901] (0/4) Epoch 30, batch 6350, loss[loss=0.1506, simple_loss=0.2377, pruned_loss=0.0318, over 7802.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2797, pruned_loss=0.05586, over 1616166.45 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:18:53,328 INFO [train.py:901] (0/4) Epoch 30, batch 6400, loss[loss=0.2055, simple_loss=0.2947, pruned_loss=0.05819, over 8613.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2797, pruned_loss=0.05597, over 1608919.95 frames. ], batch size: 34, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:02,609 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240817.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:19:07,065 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.422e+02 2.800e+02 3.642e+02 5.918e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-09 04:19:28,687 INFO [train.py:901] (0/4) Epoch 30, batch 6450, loss[loss=0.1955, simple_loss=0.2909, pruned_loss=0.05001, over 8240.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2797, pruned_loss=0.05592, over 1613119.12 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:33,400 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0338, 1.3229, 3.4877, 1.6750, 2.5026, 3.8481, 4.0302, 3.3386], + device='cuda:0'), covar=tensor([0.1149, 0.2003, 0.0279, 0.1925, 0.1027, 0.0211, 0.0442, 0.0495], + device='cuda:0'), in_proj_covar=tensor([0.0308, 0.0329, 0.0295, 0.0328, 0.0329, 0.0283, 0.0450, 0.0308], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 04:20:03,777 INFO [train.py:901] (0/4) Epoch 30, batch 6500, loss[loss=0.1888, simple_loss=0.2842, pruned_loss=0.04667, over 8350.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05587, over 1616188.71 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:17,923 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.582e+02 3.161e+02 3.840e+02 1.025e+03, threshold=6.322e+02, percent-clipped=7.0 +2023-02-09 04:20:24,408 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.7075, 1.5771, 1.9053, 1.5589, 1.1230, 1.6901, 2.2240, 1.9365], + device='cuda:0'), covar=tensor([0.0517, 0.1278, 0.1622, 0.1452, 0.0612, 0.1441, 0.0675, 0.0651], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0148], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:0') +2023-02-09 04:20:36,288 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240950.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:20:38,871 INFO [train.py:901] (0/4) Epoch 30, batch 6550, loss[loss=0.1846, simple_loss=0.2598, pruned_loss=0.05469, over 7245.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2789, pruned_loss=0.05575, over 1609999.80 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:00,578 WARNING [train.py:1067] (0/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 04:21:15,891 INFO [train.py:901] (0/4) Epoch 30, batch 6600, loss[loss=0.1919, simple_loss=0.2855, pruned_loss=0.04917, over 8583.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2803, pruned_loss=0.05621, over 1613655.08 frames. ], batch size: 39, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:16,038 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241004.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:21:20,066 WARNING [train.py:1067] (0/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 04:21:29,847 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.233e+02 3.026e+02 3.930e+02 1.368e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-09 04:21:45,179 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([4.1935, 1.5310, 4.3732, 1.6733, 3.9083, 3.6316, 3.9550, 3.8905], + device='cuda:0'), covar=tensor([0.0704, 0.4504, 0.0543, 0.4375, 0.1052, 0.1089, 0.0627, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0704, 0.0671, 0.0757, 0.0676, 0.0760, 0.0649, 0.0658, 0.0735], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:21:51,501 INFO [train.py:901] (0/4) Epoch 30, batch 6650, loss[loss=0.2216, simple_loss=0.2917, pruned_loss=0.07571, over 8234.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2796, pruned_loss=0.05584, over 1609782.68 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:58,701 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.3520, 3.5420, 2.3212, 3.1787, 3.0676, 2.0436, 2.9109, 3.0457], + device='cuda:0'), covar=tensor([0.1703, 0.0451, 0.1314, 0.0653, 0.0678, 0.1656, 0.1003, 0.1150], + device='cuda:0'), in_proj_covar=tensor([0.0364, 0.0248, 0.0349, 0.0318, 0.0305, 0.0352, 0.0353, 0.0323], + device='cuda:0'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:0') +2023-02-09 04:22:04,833 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241073.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:23,575 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:27,626 INFO [train.py:901] (0/4) Epoch 30, batch 6700, loss[loss=0.2475, simple_loss=0.3359, pruned_loss=0.07956, over 8506.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05626, over 1608365.89 frames. ], batch size: 28, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:42,108 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.246e+02 2.834e+02 3.422e+02 9.903e+02, threshold=5.667e+02, percent-clipped=4.0 +2023-02-09 04:23:04,033 INFO [train.py:901] (0/4) Epoch 30, batch 6750, loss[loss=0.1977, simple_loss=0.2842, pruned_loss=0.05564, over 8440.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2798, pruned_loss=0.05572, over 1608974.12 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:14,120 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 04:23:15,321 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5599, 1.8399, 2.7557, 1.4883, 2.0376, 1.9285, 1.5869, 2.1018], + device='cuda:0'), covar=tensor([0.2121, 0.2897, 0.0927, 0.4976, 0.1976, 0.3534, 0.2672, 0.2111], + device='cuda:0'), in_proj_covar=tensor([0.0548, 0.0650, 0.0570, 0.0681, 0.0675, 0.0624, 0.0576, 0.0653], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:23:39,181 INFO [train.py:901] (0/4) Epoch 30, batch 6800, loss[loss=0.1884, simple_loss=0.2823, pruned_loss=0.04729, over 8471.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05613, over 1612483.20 frames. ], batch size: 49, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:42,695 WARNING [train.py:1067] (0/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 04:23:43,123 INFO [scaling.py:679] (0/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-09 04:23:53,781 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.178e+02 2.682e+02 3.526e+02 7.087e+02, threshold=5.364e+02, percent-clipped=2.0 +2023-02-09 04:24:15,461 INFO [train.py:901] (0/4) Epoch 30, batch 6850, loss[loss=0.1656, simple_loss=0.2451, pruned_loss=0.04306, over 7794.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2804, pruned_loss=0.05619, over 1607364.90 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:24:34,765 WARNING [train.py:1067] (0/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 04:24:35,653 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.9604, 2.4268, 3.8549, 2.0509, 2.0966, 3.8153, 0.9057, 2.2975], + device='cuda:0'), covar=tensor([0.1245, 0.0918, 0.0219, 0.1352, 0.1796, 0.0262, 0.1686, 0.1042], + device='cuda:0'), in_proj_covar=tensor([0.0203, 0.0207, 0.0138, 0.0223, 0.0277, 0.0149, 0.0174, 0.0201], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:24:43,868 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241294.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:24:47,961 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.9932, 1.4838, 3.5528, 1.7244, 2.5799, 3.9107, 4.0319, 3.3728], + device='cuda:0'), covar=tensor([0.1268, 0.1956, 0.0302, 0.2063, 0.0982, 0.0212, 0.0586, 0.0525], + device='cuda:0'), in_proj_covar=tensor([0.0311, 0.0331, 0.0299, 0.0331, 0.0332, 0.0286, 0.0455, 0.0311], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:0') +2023-02-09 04:24:50,577 INFO [train.py:901] (0/4) Epoch 30, batch 6900, loss[loss=0.232, simple_loss=0.3029, pruned_loss=0.08059, over 7800.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.28, pruned_loss=0.0565, over 1606188.68 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:05,718 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.527e+02 3.094e+02 3.969e+02 8.004e+02, threshold=6.188e+02, percent-clipped=9.0 +2023-02-09 04:25:17,363 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8215, 2.1144, 2.1104, 1.3454, 2.3089, 1.5704, 0.7396, 2.0135], + device='cuda:0'), covar=tensor([0.0763, 0.0439, 0.0368, 0.0791, 0.0511, 0.1069, 0.1140, 0.0416], + device='cuda:0'), in_proj_covar=tensor([0.0483, 0.0418, 0.0376, 0.0466, 0.0404, 0.0560, 0.0406, 0.0447], + device='cuda:0'), out_proj_covar=tensor([1.2769e-04, 1.0758e-04, 9.7909e-05, 1.2150e-04, 1.0537e-04, 1.5562e-04, + 1.0836e-04, 1.1663e-04], device='cuda:0') +2023-02-09 04:25:22,104 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241348.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:25:26,916 INFO [train.py:901] (0/4) Epoch 30, batch 6950, loss[loss=0.191, simple_loss=0.2788, pruned_loss=0.05158, over 8470.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2787, pruned_loss=0.05568, over 1608616.37 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:33,886 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.0900, 2.2476, 1.8765, 2.7252, 1.2627, 1.7681, 2.0609, 2.1980], + device='cuda:0'), covar=tensor([0.0679, 0.0674, 0.0845, 0.0358, 0.1099, 0.1259, 0.0798, 0.0725], + device='cuda:0'), in_proj_covar=tensor([0.0230, 0.0193, 0.0244, 0.0213, 0.0201, 0.0246, 0.0247, 0.0203], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:25:41,892 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5253, 1.4284, 1.7510, 1.3720, 0.8982, 1.4763, 1.4893, 1.4174], + device='cuda:0'), covar=tensor([0.0651, 0.1276, 0.1635, 0.1473, 0.0587, 0.1460, 0.0716, 0.0689], + device='cuda:0'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0149], + device='cuda:0'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:0') +2023-02-09 04:25:46,583 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 04:25:46,725 WARNING [train.py:1067] (0/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 04:26:04,049 INFO [train.py:901] (0/4) Epoch 30, batch 7000, loss[loss=0.2221, simple_loss=0.3056, pruned_loss=0.06927, over 8559.00 frames. ], tot_loss[loss=0.1933, simple_loss=0.2773, pruned_loss=0.05468, over 1608569.52 frames. ], batch size: 34, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:07,751 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241409.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:26:17,960 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.441e+02 2.932e+02 3.651e+02 7.920e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-09 04:26:18,126 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241424.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:26:30,068 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8597, 1.6180, 2.1748, 1.4300, 1.5682, 2.1548, 1.1769, 1.7183], + device='cuda:0'), covar=tensor([0.1342, 0.1036, 0.0385, 0.0890, 0.1728, 0.0492, 0.1475, 0.1178], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0206, 0.0137, 0.0222, 0.0276, 0.0149, 0.0174, 0.0200], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:26:40,283 INFO [train.py:901] (0/4) Epoch 30, batch 7050, loss[loss=0.1517, simple_loss=0.2394, pruned_loss=0.03198, over 5477.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2783, pruned_loss=0.05486, over 1609369.77 frames. ], batch size: 12, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:46,805 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241463.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:26:53,092 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-09 04:27:16,742 INFO [train.py:901] (0/4) Epoch 30, batch 7100, loss[loss=0.1956, simple_loss=0.2747, pruned_loss=0.05825, over 8130.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2785, pruned_loss=0.05476, over 1611838.17 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:27:30,724 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.381e+02 2.857e+02 3.660e+02 8.579e+02, threshold=5.714e+02, percent-clipped=3.0 +2023-02-09 04:27:32,322 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.5212, 1.8899, 2.5191, 1.4509, 1.9141, 1.8314, 1.6482, 1.9635], + device='cuda:0'), covar=tensor([0.2040, 0.2685, 0.1093, 0.4777, 0.2110, 0.3548, 0.2552, 0.2359], + device='cuda:0'), in_proj_covar=tensor([0.0545, 0.0648, 0.0566, 0.0675, 0.0670, 0.0620, 0.0573, 0.0649], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:27:51,599 INFO [train.py:901] (0/4) Epoch 30, batch 7150, loss[loss=0.22, simple_loss=0.3201, pruned_loss=0.05998, over 8248.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2784, pruned_loss=0.05473, over 1611311.47 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:15,816 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.6129, 2.0167, 3.0178, 1.4143, 2.3147, 1.8747, 1.7275, 2.3476], + device='cuda:0'), covar=tensor([0.2269, 0.3070, 0.1055, 0.5424, 0.2275, 0.4150, 0.2992, 0.2603], + device='cuda:0'), in_proj_covar=tensor([0.0548, 0.0652, 0.0569, 0.0680, 0.0674, 0.0625, 0.0578, 0.0654], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:28:28,711 INFO [train.py:901] (0/4) Epoch 30, batch 7200, loss[loss=0.2531, simple_loss=0.3286, pruned_loss=0.08877, over 8515.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.05565, over 1614372.07 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:36,103 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=192, metric=1.53 vs. limit=2.0 +2023-02-09 04:28:43,490 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.259e+02 2.765e+02 3.853e+02 1.030e+03, threshold=5.530e+02, percent-clipped=3.0 +2023-02-09 04:28:53,993 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241639.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:03,966 INFO [train.py:901] (0/4) Epoch 30, batch 7250, loss[loss=0.2058, simple_loss=0.2965, pruned_loss=0.05752, over 8254.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2803, pruned_loss=0.0561, over 1614799.12 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:11,988 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241665.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:29:30,402 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241690.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:29:40,036 INFO [train.py:901] (0/4) Epoch 30, batch 7300, loss[loss=0.201, simple_loss=0.2797, pruned_loss=0.06111, over 8133.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05595, over 1614346.47 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:50,490 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241719.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:53,738 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.349e+02 2.997e+02 3.899e+02 6.597e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-09 04:29:57,751 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.8154, 1.7114, 2.3189, 1.4897, 1.4181, 2.2802, 0.5452, 1.4200], + device='cuda:0'), covar=tensor([0.1420, 0.1004, 0.0323, 0.0961, 0.1989, 0.0413, 0.1493, 0.1195], + device='cuda:0'), in_proj_covar=tensor([0.0202, 0.0205, 0.0137, 0.0222, 0.0275, 0.0149, 0.0173, 0.0199], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:0') +2023-02-09 04:30:08,895 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:15,574 INFO [train.py:901] (0/4) Epoch 30, batch 7350, loss[loss=0.2143, simple_loss=0.2999, pruned_loss=0.06441, over 8361.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2792, pruned_loss=0.0558, over 1612450.95 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:25,404 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241768.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:39,744 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 04:30:51,563 INFO [train.py:901] (0/4) Epoch 30, batch 7400, loss[loss=0.2203, simple_loss=0.3065, pruned_loss=0.06708, over 8596.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2796, pruned_loss=0.05605, over 1611780.99 frames. ], batch size: 39, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:51,676 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8442, 1.4195, 4.1725, 1.8043, 3.2977, 3.2204, 3.6642, 3.6728], + device='cuda:0'), covar=tensor([0.1390, 0.6801, 0.1510, 0.5688, 0.2550, 0.2233, 0.1261, 0.1135], + device='cuda:0'), in_proj_covar=tensor([0.0703, 0.0675, 0.0762, 0.0679, 0.0764, 0.0652, 0.0663, 0.0739], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:30:59,775 WARNING [train.py:1067] (0/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 04:31:04,083 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241821.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:05,935 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.497e+02 3.037e+02 3.880e+02 5.984e+02, threshold=6.074e+02, percent-clipped=0.0 +2023-02-09 04:31:23,879 INFO [scaling.py:679] (0/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-09 04:31:24,335 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241849.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:31:27,626 INFO [train.py:901] (0/4) Epoch 30, batch 7450, loss[loss=0.1567, simple_loss=0.2566, pruned_loss=0.02843, over 7984.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2804, pruned_loss=0.05627, over 1611896.96 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:31:27,785 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([5.9045, 1.6115, 6.0880, 2.2883, 5.5277, 5.1245, 5.5892, 5.5409], + device='cuda:0'), covar=tensor([0.0576, 0.5228, 0.0373, 0.4178, 0.0948, 0.0911, 0.0550, 0.0570], + device='cuda:0'), in_proj_covar=tensor([0.0701, 0.0674, 0.0760, 0.0677, 0.0762, 0.0650, 0.0661, 0.0737], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:31:40,211 WARNING [train.py:1067] (0/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 04:31:48,172 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:54,445 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.7274, 2.2556, 3.4727, 1.4686, 2.7185, 2.0147, 1.8845, 2.5370], + device='cuda:0'), covar=tensor([0.2482, 0.3480, 0.1305, 0.6111, 0.2504, 0.4633, 0.3148, 0.3378], + device='cuda:0'), in_proj_covar=tensor([0.0547, 0.0651, 0.0568, 0.0680, 0.0672, 0.0622, 0.0575, 0.0651], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:32:02,789 INFO [train.py:901] (0/4) Epoch 30, batch 7500, loss[loss=0.1578, simple_loss=0.2259, pruned_loss=0.04481, over 7232.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2806, pruned_loss=0.05626, over 1609661.98 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 16.0 +2023-02-09 04:32:18,963 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.491e+02 2.852e+02 3.531e+02 9.058e+02, threshold=5.704e+02, percent-clipped=2.0 +2023-02-09 04:32:35,936 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1020, 2.3110, 1.9909, 2.9153, 1.3697, 1.7598, 2.3116, 2.2704], + device='cuda:0'), covar=tensor([0.0741, 0.0840, 0.0832, 0.0336, 0.1104, 0.1315, 0.0754, 0.0826], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0195, 0.0245, 0.0215, 0.0203, 0.0248, 0.0250, 0.0205], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:32:39,986 INFO [train.py:901] (0/4) Epoch 30, batch 7550, loss[loss=0.2069, simple_loss=0.2942, pruned_loss=0.05978, over 8294.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.28, pruned_loss=0.05579, over 1610689.72 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:32:58,713 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.2510, 2.6974, 2.0918, 3.5277, 1.7191, 1.8834, 2.4517, 2.6250], + device='cuda:0'), covar=tensor([0.0722, 0.0747, 0.0840, 0.0342, 0.1021, 0.1197, 0.0804, 0.0732], + device='cuda:0'), in_proj_covar=tensor([0.0232, 0.0194, 0.0244, 0.0214, 0.0203, 0.0247, 0.0249, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:33:01,505 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241983.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:33:13,525 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/checkpoint-242000.pt +2023-02-09 04:33:17,223 INFO [train.py:901] (0/4) Epoch 30, batch 7600, loss[loss=0.1883, simple_loss=0.282, pruned_loss=0.0473, over 8122.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.28, pruned_loss=0.0557, over 1615687.58 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:32,888 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.414e+02 3.070e+02 3.745e+02 6.631e+02, threshold=6.140e+02, percent-clipped=3.0 +2023-02-09 04:33:54,463 INFO [train.py:901] (0/4) Epoch 30, batch 7650, loss[loss=0.2587, simple_loss=0.337, pruned_loss=0.09022, over 8248.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05719, over 1614151.89 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:07,141 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([1.4090, 1.5177, 1.4411, 1.7525, 0.7306, 1.3210, 1.4029, 1.4890], + device='cuda:0'), covar=tensor([0.0870, 0.0777, 0.0896, 0.0537, 0.1101, 0.1323, 0.0669, 0.0709], + device='cuda:0'), in_proj_covar=tensor([0.0233, 0.0195, 0.0245, 0.0215, 0.0203, 0.0248, 0.0250, 0.0204], + device='cuda:0'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:0') +2023-02-09 04:34:25,725 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:34:29,807 INFO [train.py:901] (0/4) Epoch 30, batch 7700, loss[loss=0.1897, simple_loss=0.2896, pruned_loss=0.04493, over 8321.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.0563, over 1617746.45 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:44,314 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.431e+02 3.028e+02 3.722e+02 6.918e+02, threshold=6.057e+02, percent-clipped=1.0 +2023-02-09 04:34:54,349 WARNING [train.py:1067] (0/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 04:34:55,305 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:05,990 INFO [train.py:901] (0/4) Epoch 30, batch 7750, loss[loss=0.2333, simple_loss=0.3193, pruned_loss=0.07362, over 8200.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05627, over 1611284.84 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:13,206 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242164.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:13,754 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242165.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:18,585 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242171.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:34,895 INFO [zipformer.py:1185] (0/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242193.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:35:42,660 INFO [train.py:901] (0/4) Epoch 30, batch 7800, loss[loss=0.1963, simple_loss=0.2648, pruned_loss=0.0639, over 7186.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2795, pruned_loss=0.05599, over 1608636.03 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:51,706 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([2.1357, 1.8584, 2.2485, 1.9612, 2.2590, 2.1818, 2.0335, 1.2226], + device='cuda:0'), covar=tensor([0.5652, 0.5103, 0.2281, 0.3968, 0.2728, 0.3508, 0.1974, 0.5645], + device='cuda:0'), in_proj_covar=tensor([0.0974, 0.1039, 0.0856, 0.1017, 0.1036, 0.0951, 0.0782, 0.0863], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:0') +2023-02-09 04:35:57,874 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.396e+02 3.014e+02 3.960e+02 8.063e+02, threshold=6.029e+02, percent-clipped=4.0 +2023-02-09 04:36:11,187 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:17,995 INFO [train.py:901] (0/4) Epoch 30, batch 7850, loss[loss=0.1708, simple_loss=0.2543, pruned_loss=0.04366, over 7780.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2791, pruned_loss=0.05569, over 1608453.92 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:36,022 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:52,303 INFO [train.py:901] (0/4) Epoch 30, batch 7900, loss[loss=0.1967, simple_loss=0.2762, pruned_loss=0.05864, over 7919.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2787, pruned_loss=0.05501, over 1605762.04 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:54,993 INFO [zipformer.py:1185] (0/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242308.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:37:06,477 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.359e+02 2.894e+02 3.889e+02 1.272e+03, threshold=5.788e+02, percent-clipped=10.0 +2023-02-09 04:37:08,017 INFO [zipformer.py:1185] (0/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242327.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:26,300 INFO [train.py:901] (0/4) Epoch 30, batch 7950, loss[loss=0.1998, simple_loss=0.2874, pruned_loss=0.05612, over 8331.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2797, pruned_loss=0.05528, over 1609491.47 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:37:26,518 INFO [zipformer.py:1185] (0/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242354.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:43,564 INFO [zipformer.py:1185] (0/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242379.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:52,887 INFO [zipformer.py:2431] (0/4) attn_weights_entropy = tensor([3.8609, 3.7839, 3.5400, 1.6979, 3.4169, 3.4573, 3.3236, 3.3466], + device='cuda:0'), covar=tensor([0.0859, 0.0619, 0.0999, 0.4684, 0.0947, 0.1241, 0.1428, 0.0926], + device='cuda:0'), in_proj_covar=tensor([0.0560, 0.0467, 0.0458, 0.0571, 0.0453, 0.0483, 0.0454, 0.0421], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:0') +2023-02-09 04:38:00,592 INFO [train.py:901] (0/4) Epoch 30, batch 8000, loss[loss=0.1605, simple_loss=0.2426, pruned_loss=0.03921, over 7706.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05638, over 1607380.98 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:14,874 INFO [optim.py:369] (0/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.478e+02 2.968e+02 3.707e+02 1.083e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-09 04:38:35,236 INFO [train.py:901] (0/4) Epoch 30, batch 8050, loss[loss=0.2099, simple_loss=0.2922, pruned_loss=0.06385, over 7218.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2804, pruned_loss=0.05697, over 1593580.48 frames. ], batch size: 72, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:58,267 INFO [checkpoint.py:75] (0/4) Saving checkpoint to pruned_transducer_stateless7_streaming/exp/v1/epoch-30.pt +2023-02-09 04:38:59,103 INFO [train.py:1165] (0/4) Done! diff --git a/log/log-train-2023-02-08-23-42-53-1 b/log/log-train-2023-02-08-23-42-53-1 new file mode 100644 index 0000000000000000000000000000000000000000..c3ab23c6f2fd14a51088080b583def6323c5ba39 --- /dev/null +++ b/log/log-train-2023-02-08-23-42-53-1 @@ -0,0 +1,2920 @@ +2023-02-08 23:42:53,784 INFO [train.py:973] (1/4) Training started +2023-02-08 23:42:53,784 INFO [train.py:983] (1/4) Device: cuda:1 +2023-02-08 23:42:53,850 INFO [train.py:992] (1/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r8n07', 'IP address': '10.1.8.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 28, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-08 23:42:53,850 INFO [train.py:994] (1/4) About to create model +2023-02-08 23:42:54,155 INFO [zipformer.py:402] (1/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-08 23:42:54,167 INFO [train.py:998] (1/4) Number of model parameters: 20697573 +2023-02-08 23:42:54,168 INFO [checkpoint.py:112] (1/4) Loading checkpoint from pruned_transducer_stateless7_streaming/exp/v1/epoch-27.pt +2023-02-08 23:43:03,616 INFO [train.py:1013] (1/4) Using DDP +2023-02-08 23:43:03,872 INFO [train.py:1030] (1/4) Loading optimizer state dict +2023-02-08 23:43:04,090 INFO [train.py:1038] (1/4) Loading scheduler state dict +2023-02-08 23:43:04,091 INFO [asr_datamodule.py:420] (1/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-08 23:43:04,266 INFO [asr_datamodule.py:224] (1/4) Enable MUSAN +2023-02-08 23:43:04,267 INFO [asr_datamodule.py:225] (1/4) About to get Musan cuts +2023-02-08 23:43:05,805 INFO [asr_datamodule.py:249] (1/4) Enable SpecAugment +2023-02-08 23:43:05,805 INFO [asr_datamodule.py:250] (1/4) Time warp factor: 80 +2023-02-08 23:43:05,805 INFO [asr_datamodule.py:260] (1/4) Num frame mask: 10 +2023-02-08 23:43:05,805 INFO [asr_datamodule.py:273] (1/4) About to create train dataset +2023-02-08 23:43:05,806 INFO [asr_datamodule.py:300] (1/4) Using DynamicBucketingSampler. +2023-02-08 23:43:05,825 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:07,885 INFO [asr_datamodule.py:316] (1/4) About to create train dataloader +2023-02-08 23:43:07,885 INFO [asr_datamodule.py:430] (1/4) About to get dev-clean cuts +2023-02-08 23:43:07,910 INFO [asr_datamodule.py:437] (1/4) About to get dev-other cuts +2023-02-08 23:43:07,932 INFO [asr_datamodule.py:347] (1/4) About to create dev dataset +2023-02-08 23:43:08,281 INFO [asr_datamodule.py:364] (1/4) About to create dev dataloader +2023-02-08 23:43:08,281 INFO [train.py:1122] (1/4) Loading grad scaler state dict +2023-02-08 23:43:20,414 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:25,772 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-08 23:43:26,066 INFO [train.py:901] (1/4) Epoch 28, batch 0, loss[loss=0.2683, simple_loss=0.3408, pruned_loss=0.09788, over 8194.00 frames. ], tot_loss[loss=0.2683, simple_loss=0.3408, pruned_loss=0.09788, over 8194.00 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:43:26,066 INFO [train.py:926] (1/4) Computing validation loss +2023-02-08 23:43:38,187 INFO [train.py:935] (1/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2712, pruned_loss=0.03579, over 944034.00 frames. +2023-02-08 23:43:38,189 INFO [train.py:936] (1/4) Maximum memory allocated so far is 5898MB +2023-02-08 23:43:48,585 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218250.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:43:59,122 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-08 23:43:59,194 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218260.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:44:26,822 INFO [train.py:901] (1/4) Epoch 28, batch 50, loss[loss=0.1753, simple_loss=0.2634, pruned_loss=0.04356, over 8243.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2837, pruned_loss=0.05927, over 366290.63 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:44:45,178 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-08 23:44:46,084 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7173, 2.6415, 1.8021, 2.3582, 2.3274, 1.5716, 2.2139, 2.3342], + device='cuda:1'), covar=tensor([0.1510, 0.0464, 0.1334, 0.0670, 0.0686, 0.1713, 0.0987, 0.1043], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0240, 0.0340, 0.0311, 0.0303, 0.0345, 0.0348, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-08 23:44:48,212 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.503e+02 3.099e+02 3.838e+02 3.677e+03, threshold=6.198e+02, percent-clipped=7.0 +2023-02-08 23:45:01,314 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-08 23:45:09,775 INFO [train.py:901] (1/4) Epoch 28, batch 100, loss[loss=0.204, simple_loss=0.296, pruned_loss=0.05604, over 8363.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2829, pruned_loss=0.05853, over 642385.34 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:45:12,270 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-08 23:45:42,208 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218375.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:45:52,957 INFO [train.py:901] (1/4) Epoch 28, batch 150, loss[loss=0.2282, simple_loss=0.3142, pruned_loss=0.07105, over 8648.00 frames. ], tot_loss[loss=0.2011, simple_loss=0.2838, pruned_loss=0.05914, over 858455.84 frames. ], batch size: 48, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:01,151 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218397.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:01,340 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-08 23:46:10,824 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2140, 1.7323, 1.8397, 1.6457, 1.2663, 1.7026, 2.0329, 1.8486], + device='cuda:1'), covar=tensor([0.0560, 0.1178, 0.1600, 0.1380, 0.0582, 0.1404, 0.0639, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-08 23:46:12,821 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.274e+02 2.796e+02 3.416e+02 5.816e+02, threshold=5.591e+02, percent-clipped=0.0 +2023-02-08 23:46:19,712 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218422.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:32,311 INFO [train.py:901] (1/4) Epoch 28, batch 200, loss[loss=0.1961, simple_loss=0.2857, pruned_loss=0.05328, over 8462.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2831, pruned_loss=0.05882, over 1026204.14 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:50,626 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218462.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:10,724 INFO [train.py:901] (1/4) Epoch 28, batch 250, loss[loss=0.2152, simple_loss=0.2936, pruned_loss=0.06837, over 8369.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.283, pruned_loss=0.05827, over 1158780.76 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:23,080 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-08 23:47:31,296 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.405e+02 2.917e+02 3.543e+02 7.929e+02, threshold=5.833e+02, percent-clipped=6.0 +2023-02-08 23:47:33,450 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-08 23:47:41,359 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218527.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:48,876 INFO [train.py:901] (1/4) Epoch 28, batch 300, loss[loss=0.1857, simple_loss=0.2791, pruned_loss=0.04613, over 8472.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05837, over 1262917.92 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:53,401 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218544.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:14,318 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218572.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:18,152 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218577.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:25,696 INFO [train.py:901] (1/4) Epoch 28, batch 350, loss[loss=0.2123, simple_loss=0.2819, pruned_loss=0.07134, over 7539.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05854, over 1340446.29 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:48:28,669 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218592.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:29,467 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7931, 1.8094, 2.7621, 2.1656, 2.4975, 1.8807, 1.6488, 1.4396], + device='cuda:1'), covar=tensor([0.7822, 0.6316, 0.2247, 0.4567, 0.3559, 0.4729, 0.3172, 0.5885], + device='cuda:1'), in_proj_covar=tensor([0.0959, 0.1017, 0.0823, 0.0986, 0.1018, 0.0922, 0.0763, 0.0846], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-08 23:48:43,884 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.330e+02 2.853e+02 3.797e+02 9.826e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-08 23:48:59,283 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218631.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:49:04,797 INFO [train.py:901] (1/4) Epoch 28, batch 400, loss[loss=0.1695, simple_loss=0.2504, pruned_loss=0.0443, over 7546.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05845, over 1401020.76 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:16,380 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5696, 1.8973, 2.8968, 1.4347, 2.0187, 1.9120, 1.6802, 2.1809], + device='cuda:1'), covar=tensor([0.2055, 0.2812, 0.0889, 0.4805, 0.2163, 0.3463, 0.2544, 0.2484], + device='cuda:1'), in_proj_covar=tensor([0.0542, 0.0637, 0.0566, 0.0672, 0.0660, 0.0615, 0.0568, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-08 23:49:17,842 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218656.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:49:19,964 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218659.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:40,564 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218687.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:41,126 INFO [train.py:901] (1/4) Epoch 28, batch 450, loss[loss=0.1768, simple_loss=0.269, pruned_loss=0.04226, over 8138.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.285, pruned_loss=0.05896, over 1452349.09 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:59,791 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.362e+02 2.836e+02 3.643e+02 9.062e+02, threshold=5.672e+02, percent-clipped=2.0 +2023-02-08 23:50:18,541 INFO [train.py:901] (1/4) Epoch 28, batch 500, loss[loss=0.174, simple_loss=0.2656, pruned_loss=0.04113, over 8479.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.284, pruned_loss=0.05823, over 1490572.80 frames. ], batch size: 27, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:50:57,129 INFO [train.py:901] (1/4) Epoch 28, batch 550, loss[loss=0.1905, simple_loss=0.2721, pruned_loss=0.05442, over 7928.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2846, pruned_loss=0.05871, over 1517920.14 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:05,247 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4076, 2.3219, 3.0593, 2.5460, 3.0690, 2.5139, 2.3124, 1.7083], + device='cuda:1'), covar=tensor([0.6236, 0.5386, 0.2420, 0.4133, 0.2828, 0.3352, 0.2121, 0.6004], + device='cuda:1'), in_proj_covar=tensor([0.0963, 0.1021, 0.0829, 0.0991, 0.1024, 0.0928, 0.0768, 0.0849], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-08 23:51:16,044 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.392e+02 2.925e+02 3.560e+02 1.211e+03, threshold=5.850e+02, percent-clipped=4.0 +2023-02-08 23:51:29,357 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1711, 3.4918, 2.3107, 2.9802, 2.8350, 2.1765, 2.6837, 2.9419], + device='cuda:1'), covar=tensor([0.1682, 0.0415, 0.1238, 0.0789, 0.0818, 0.1523, 0.1278, 0.1230], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0244, 0.0342, 0.0315, 0.0305, 0.0349, 0.0352, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-08 23:51:30,152 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218833.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:33,469 INFO [train.py:901] (1/4) Epoch 28, batch 600, loss[loss=0.184, simple_loss=0.2694, pruned_loss=0.0493, over 8236.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2845, pruned_loss=0.05846, over 1541127.97 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:33,892 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.17 vs. limit=5.0 +2023-02-08 23:51:53,184 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218858.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:56,576 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-08 23:52:04,137 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218871.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:06,122 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.05 vs. limit=5.0 +2023-02-08 23:52:10,229 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3698, 1.6333, 1.6217, 1.1148, 1.6769, 1.3299, 0.3001, 1.5929], + device='cuda:1'), covar=tensor([0.0621, 0.0430, 0.0356, 0.0593, 0.0446, 0.1029, 0.1016, 0.0308], + device='cuda:1'), in_proj_covar=tensor([0.0474, 0.0412, 0.0366, 0.0458, 0.0395, 0.0553, 0.0402, 0.0442], + device='cuda:1'), out_proj_covar=tensor([1.2539e-04, 1.0696e-04, 9.5266e-05, 1.1974e-04, 1.0344e-04, 1.5414e-04, + 1.0732e-04, 1.1585e-04], device='cuda:1') +2023-02-08 23:52:18,545 INFO [train.py:901] (1/4) Epoch 28, batch 650, loss[loss=0.1632, simple_loss=0.2592, pruned_loss=0.03357, over 8502.00 frames. ], tot_loss[loss=0.199, simple_loss=0.283, pruned_loss=0.05745, over 1557046.85 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:40,036 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.221e+02 2.637e+02 3.403e+02 7.509e+02, threshold=5.274e+02, percent-clipped=1.0 +2023-02-08 23:52:41,041 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218915.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:54,627 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6974, 4.7304, 4.2054, 2.2003, 4.1287, 4.3007, 4.2105, 4.1061], + device='cuda:1'), covar=tensor([0.0586, 0.0468, 0.0951, 0.4330, 0.0844, 0.0885, 0.1253, 0.0765], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0461, 0.0445, 0.0555, 0.0444, 0.0463, 0.0439, 0.0406], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-08 23:52:55,955 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218936.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:57,371 INFO [train.py:901] (1/4) Epoch 28, batch 700, loss[loss=0.1813, simple_loss=0.2578, pruned_loss=0.05242, over 7544.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.283, pruned_loss=0.05793, over 1567736.63 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:59,037 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218940.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:01,198 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218943.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:18,855 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218968.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:31,040 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218983.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:33,178 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218986.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:34,534 INFO [train.py:901] (1/4) Epoch 28, batch 750, loss[loss=0.1927, simple_loss=0.2686, pruned_loss=0.05835, over 7545.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2837, pruned_loss=0.05842, over 1577188.68 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:53:46,816 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219002.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:55,126 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.280e+02 2.810e+02 3.388e+02 7.203e+02, threshold=5.620e+02, percent-clipped=6.0 +2023-02-08 23:53:55,158 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-08 23:54:04,597 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-08 23:54:12,570 INFO [train.py:901] (1/4) Epoch 28, batch 800, loss[loss=0.1816, simple_loss=0.2669, pruned_loss=0.04815, over 7766.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2845, pruned_loss=0.05868, over 1580938.24 frames. ], batch size: 19, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:12,691 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7407, 5.8447, 5.1602, 2.5476, 5.0919, 5.5450, 5.3198, 5.3865], + device='cuda:1'), covar=tensor([0.0503, 0.0426, 0.0913, 0.4093, 0.0803, 0.0730, 0.1134, 0.0585], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0460, 0.0445, 0.0555, 0.0444, 0.0464, 0.0438, 0.0405], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-08 23:54:13,416 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219039.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:22,016 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219051.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:49,148 INFO [train.py:901] (1/4) Epoch 28, batch 850, loss[loss=0.1716, simple_loss=0.2641, pruned_loss=0.03951, over 8294.00 frames. ], tot_loss[loss=0.201, simple_loss=0.285, pruned_loss=0.05857, over 1594305.23 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:50,761 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219090.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:55:10,252 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.432e+02 3.183e+02 3.929e+02 8.024e+02, threshold=6.365e+02, percent-clipped=6.0 +2023-02-08 23:55:11,241 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4880, 2.3518, 3.1349, 2.5101, 2.9512, 2.6153, 2.4291, 1.9589], + device='cuda:1'), covar=tensor([0.5672, 0.5160, 0.1918, 0.3882, 0.2629, 0.3065, 0.1945, 0.5452], + device='cuda:1'), in_proj_covar=tensor([0.0965, 0.1023, 0.0830, 0.0991, 0.1022, 0.0928, 0.0769, 0.0848], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-08 23:55:15,816 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-08 23:55:27,539 INFO [train.py:901] (1/4) Epoch 28, batch 900, loss[loss=0.2437, simple_loss=0.3295, pruned_loss=0.07898, over 8636.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2844, pruned_loss=0.05882, over 1601735.97 frames. ], batch size: 31, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:03,870 INFO [train.py:901] (1/4) Epoch 28, batch 950, loss[loss=0.1828, simple_loss=0.2698, pruned_loss=0.04792, over 8291.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05843, over 1605752.73 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:15,303 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3571, 1.6068, 4.5913, 1.5764, 4.0353, 3.8244, 4.1084, 4.0109], + device='cuda:1'), covar=tensor([0.0639, 0.4558, 0.0545, 0.4689, 0.1139, 0.1019, 0.0644, 0.0673], + device='cuda:1'), in_proj_covar=tensor([0.0676, 0.0659, 0.0729, 0.0654, 0.0739, 0.0631, 0.0637, 0.0711], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-08 23:56:22,890 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.524e+02 3.053e+02 4.249e+02 9.516e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-08 23:56:29,702 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219221.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:56:34,848 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-08 23:56:40,761 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1144, 1.5782, 4.3410, 1.5518, 3.8073, 3.6311, 3.9262, 3.7924], + device='cuda:1'), covar=tensor([0.0821, 0.4518, 0.0605, 0.4531, 0.1263, 0.1001, 0.0678, 0.0770], + device='cuda:1'), in_proj_covar=tensor([0.0676, 0.0659, 0.0727, 0.0653, 0.0739, 0.0631, 0.0636, 0.0711], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-08 23:56:43,596 INFO [train.py:901] (1/4) Epoch 28, batch 1000, loss[loss=0.1735, simple_loss=0.2642, pruned_loss=0.0414, over 8196.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.0586, over 1605568.02 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:46,608 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219242.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:04,452 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219267.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:11,554 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-08 23:57:19,431 INFO [train.py:901] (1/4) Epoch 28, batch 1050, loss[loss=0.2094, simple_loss=0.3056, pruned_loss=0.05661, over 8345.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05799, over 1609162.29 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:57:23,672 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-08 23:57:33,307 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219307.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:38,278 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.456e+02 2.957e+02 3.788e+02 8.190e+02, threshold=5.915e+02, percent-clipped=1.0 +2023-02-08 23:57:47,811 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219327.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,097 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219332.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,298 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-08 23:57:56,808 INFO [train.py:901] (1/4) Epoch 28, batch 1100, loss[loss=0.1987, simple_loss=0.2831, pruned_loss=0.05716, over 7407.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2841, pruned_loss=0.05842, over 1614870.02 frames. ], batch size: 17, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:03,512 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219346.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:13,591 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219360.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:58:30,134 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219383.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:33,473 INFO [train.py:901] (1/4) Epoch 28, batch 1150, loss[loss=0.2219, simple_loss=0.2858, pruned_loss=0.079, over 5956.00 frames. ], tot_loss[loss=0.199, simple_loss=0.283, pruned_loss=0.05747, over 1616299.61 frames. ], batch size: 13, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:37,163 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-08 23:58:52,532 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.386e+02 3.071e+02 3.782e+02 1.293e+03, threshold=6.141e+02, percent-clipped=2.0 +2023-02-08 23:59:07,138 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219434.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:10,012 INFO [train.py:901] (1/4) Epoch 28, batch 1200, loss[loss=0.1741, simple_loss=0.252, pruned_loss=0.04812, over 7817.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2824, pruned_loss=0.0571, over 1614923.54 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:13,031 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219442.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:27,991 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219461.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:47,588 INFO [train.py:901] (1/4) Epoch 28, batch 1250, loss[loss=0.1883, simple_loss=0.2865, pruned_loss=0.04502, over 8313.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2831, pruned_loss=0.05744, over 1617371.50 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:48,512 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7991, 1.7608, 1.9405, 1.7749, 0.9464, 1.6197, 2.2662, 2.3378], + device='cuda:1'), covar=tensor([0.0457, 0.1228, 0.1640, 0.1395, 0.0609, 0.1522, 0.0612, 0.0521], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0188, 0.0161, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-08 23:59:55,027 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:02,350 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219508.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:06,612 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.357e+02 2.809e+02 3.466e+02 7.121e+02, threshold=5.618e+02, percent-clipped=3.0 +2023-02-09 00:00:23,407 INFO [train.py:901] (1/4) Epoch 28, batch 1300, loss[loss=0.1791, simple_loss=0.2596, pruned_loss=0.04932, over 7450.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2826, pruned_loss=0.05742, over 1617401.36 frames. ], batch size: 17, lr: 2.71e-03, grad_scale: 8.0 +2023-02-09 00:00:25,004 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9567, 2.0423, 1.7615, 2.7417, 1.3098, 1.5590, 1.9620, 2.0295], + device='cuda:1'), covar=tensor([0.0724, 0.0826, 0.0895, 0.0385, 0.1039, 0.1318, 0.0821, 0.0854], + device='cuda:1'), in_proj_covar=tensor([0.0227, 0.0191, 0.0240, 0.0209, 0.0199, 0.0242, 0.0245, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 00:00:31,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:42,920 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219565.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:01:02,333 INFO [train.py:901] (1/4) Epoch 28, batch 1350, loss[loss=0.1871, simple_loss=0.2701, pruned_loss=0.05202, over 7805.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2835, pruned_loss=0.0579, over 1619426.85 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:01:22,005 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.365e+02 2.856e+02 3.377e+02 7.819e+02, threshold=5.713e+02, percent-clipped=4.0 +2023-02-09 00:01:39,663 INFO [train.py:901] (1/4) Epoch 28, batch 1400, loss[loss=0.1699, simple_loss=0.2609, pruned_loss=0.03942, over 8032.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05724, over 1618286.34 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:09,798 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219680.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:15,336 INFO [train.py:901] (1/4) Epoch 28, batch 1450, loss[loss=0.1869, simple_loss=0.2796, pruned_loss=0.04708, over 7962.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.05694, over 1617854.88 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:23,473 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:25,414 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 00:02:28,352 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219704.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:02:33,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7187, 5.8762, 5.0332, 2.8170, 5.1107, 5.5225, 5.2685, 5.3230], + device='cuda:1'), covar=tensor([0.0513, 0.0302, 0.0806, 0.3771, 0.0797, 0.0905, 0.1025, 0.0647], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0460, 0.0448, 0.0555, 0.0444, 0.0465, 0.0439, 0.0406], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:02:36,724 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.298e+02 2.874e+02 3.536e+02 7.746e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-09 00:02:39,218 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:43,524 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:54,214 INFO [train.py:901] (1/4) Epoch 28, batch 1500, loss[loss=0.1445, simple_loss=0.2279, pruned_loss=0.03056, over 7661.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05679, over 1616305.37 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:57,276 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:05,681 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219754.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:23,841 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219779.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:25,269 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219781.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:29,879 INFO [train.py:901] (1/4) Epoch 28, batch 1550, loss[loss=0.1752, simple_loss=0.2495, pruned_loss=0.05039, over 7289.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2823, pruned_loss=0.05687, over 1611910.38 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:03:42,605 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:49,435 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.435e+02 2.945e+02 3.827e+02 6.900e+02, threshold=5.889e+02, percent-clipped=4.0 +2023-02-09 00:03:54,630 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219819.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:03:55,263 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6912, 1.4363, 1.6250, 1.3552, 0.9637, 1.4316, 1.5694, 1.2620], + device='cuda:1'), covar=tensor([0.0620, 0.1349, 0.1737, 0.1582, 0.0657, 0.1549, 0.0754, 0.0768], + device='cuda:1'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:1'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 00:04:03,136 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:08,596 INFO [train.py:901] (1/4) Epoch 28, batch 1600, loss[loss=0.1983, simple_loss=0.2852, pruned_loss=0.0557, over 8465.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2831, pruned_loss=0.05693, over 1618696.92 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:04:18,703 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219852.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:35,335 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:44,469 INFO [train.py:901] (1/4) Epoch 28, batch 1650, loss[loss=0.2518, simple_loss=0.3326, pruned_loss=0.08546, over 8660.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2831, pruned_loss=0.05671, over 1617238.94 frames. ], batch size: 39, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:02,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.482e+02 2.898e+02 3.443e+02 5.647e+02, threshold=5.797e+02, percent-clipped=0.0 +2023-02-09 00:05:20,267 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219936.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:21,462 INFO [train.py:901] (1/4) Epoch 28, batch 1700, loss[loss=0.2103, simple_loss=0.2864, pruned_loss=0.06712, over 8599.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2829, pruned_loss=0.05695, over 1616611.69 frames. ], batch size: 31, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:39,120 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219961.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:43,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219967.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:57,778 INFO [train.py:901] (1/4) Epoch 28, batch 1750, loss[loss=0.1651, simple_loss=0.2531, pruned_loss=0.0385, over 7558.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2826, pruned_loss=0.05628, over 1617141.64 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:06:17,587 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.339e+02 2.848e+02 3.606e+02 1.047e+03, threshold=5.695e+02, percent-clipped=4.0 +2023-02-09 00:06:34,450 INFO [train.py:901] (1/4) Epoch 28, batch 1800, loss[loss=0.1605, simple_loss=0.2625, pruned_loss=0.0292, over 8257.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2823, pruned_loss=0.05667, over 1615792.92 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:02,975 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220075.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:07:11,957 INFO [train.py:901] (1/4) Epoch 28, batch 1850, loss[loss=0.1954, simple_loss=0.2831, pruned_loss=0.05385, over 8340.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2832, pruned_loss=0.05738, over 1619743.24 frames. ], batch size: 26, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:20,513 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220100.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:07:23,908 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220105.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:30,304 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.328e+02 2.682e+02 3.608e+02 8.535e+02, threshold=5.364e+02, percent-clipped=7.0 +2023-02-09 00:07:31,749 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:38,071 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220125.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:47,173 INFO [train.py:901] (1/4) Epoch 28, batch 1900, loss[loss=0.2061, simple_loss=0.2969, pruned_loss=0.05764, over 8455.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05727, over 1615359.26 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:19,360 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 00:08:25,760 INFO [train.py:901] (1/4) Epoch 28, batch 1950, loss[loss=0.261, simple_loss=0.3313, pruned_loss=0.09537, over 7113.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2822, pruned_loss=0.05748, over 1606342.87 frames. ], batch size: 71, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:32,984 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 00:08:44,716 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.461e+02 2.916e+02 3.869e+02 7.609e+02, threshold=5.833e+02, percent-clipped=8.0 +2023-02-09 00:08:48,263 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220219.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:51,065 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:53,656 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 00:09:01,373 INFO [train.py:901] (1/4) Epoch 28, batch 2000, loss[loss=0.1839, simple_loss=0.2746, pruned_loss=0.04655, over 8511.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05641, over 1604694.27 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:02,862 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:07,808 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220247.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:08,581 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220248.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:29,128 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:37,451 INFO [train.py:901] (1/4) Epoch 28, batch 2050, loss[loss=0.2185, simple_loss=0.3066, pruned_loss=0.0652, over 8505.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2815, pruned_loss=0.05618, over 1610568.54 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:58,200 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.398e+02 2.757e+02 3.324e+02 6.340e+02, threshold=5.514e+02, percent-clipped=2.0 +2023-02-09 00:10:03,993 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9910, 1.8462, 2.2612, 1.9992, 2.2198, 2.1288, 1.9405, 1.1763], + device='cuda:1'), covar=tensor([0.6027, 0.4949, 0.2184, 0.4005, 0.2760, 0.3324, 0.1994, 0.5320], + device='cuda:1'), in_proj_covar=tensor([0.0970, 0.1029, 0.0835, 0.0997, 0.1028, 0.0934, 0.0773, 0.0853], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 00:10:12,766 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220334.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:10:15,436 INFO [train.py:901] (1/4) Epoch 28, batch 2100, loss[loss=0.1995, simple_loss=0.2803, pruned_loss=0.05933, over 8637.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.281, pruned_loss=0.05607, over 1614825.54 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:10:20,359 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 00:10:42,392 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-09 00:10:51,259 INFO [train.py:901] (1/4) Epoch 28, batch 2150, loss[loss=0.1655, simple_loss=0.2522, pruned_loss=0.03939, over 7449.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2821, pruned_loss=0.05708, over 1618847.65 frames. ], batch size: 17, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:11,484 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.504e+02 2.973e+02 4.041e+02 1.001e+03, threshold=5.945e+02, percent-clipped=8.0 +2023-02-09 00:11:28,328 INFO [train.py:901] (1/4) Epoch 28, batch 2200, loss[loss=0.1991, simple_loss=0.2837, pruned_loss=0.05731, over 8338.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05699, over 1620495.91 frames. ], batch size: 26, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:36,288 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220449.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:11:44,017 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220460.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:03,395 INFO [train.py:901] (1/4) Epoch 28, batch 2250, loss[loss=0.2148, simple_loss=0.3033, pruned_loss=0.06315, over 8321.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05723, over 1621023.59 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:09,254 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220496.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:15,026 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.42 vs. limit=5.0 +2023-02-09 00:12:22,281 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.331e+02 2.835e+02 3.325e+02 7.200e+02, threshold=5.671e+02, percent-clipped=3.0 +2023-02-09 00:12:27,560 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220521.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:41,520 INFO [train.py:901] (1/4) Epoch 28, batch 2300, loss[loss=0.1599, simple_loss=0.2435, pruned_loss=0.03819, over 7973.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05621, over 1622945.87 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:44,383 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6690, 1.5456, 4.8827, 1.8673, 4.3126, 3.9983, 4.4269, 4.2989], + device='cuda:1'), covar=tensor([0.0597, 0.5088, 0.0485, 0.4448, 0.1143, 0.1031, 0.0584, 0.0675], + device='cuda:1'), in_proj_covar=tensor([0.0682, 0.0670, 0.0739, 0.0664, 0.0752, 0.0639, 0.0646, 0.0722], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:12:51,184 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2257, 1.1535, 2.1148, 1.1871, 2.0109, 2.2442, 2.3944, 1.9438], + device='cuda:1'), covar=tensor([0.1168, 0.1439, 0.0486, 0.1936, 0.0895, 0.0387, 0.0731, 0.0634], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0327, 0.0293, 0.0321, 0.0323, 0.0277, 0.0441, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:12:59,738 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:06,020 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220573.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:07,454 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220575.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:16,608 INFO [train.py:901] (1/4) Epoch 28, batch 2350, loss[loss=0.1805, simple_loss=0.2717, pruned_loss=0.04465, over 8478.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05617, over 1624755.70 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:13:18,215 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220590.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:18,797 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220591.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:35,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.370e+02 2.329e+02 2.956e+02 3.826e+02 8.837e+02, threshold=5.912e+02, percent-clipped=4.0 +2023-02-09 00:13:36,669 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:40,249 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:53,609 INFO [train.py:901] (1/4) Epoch 28, batch 2400, loss[loss=0.2175, simple_loss=0.2837, pruned_loss=0.07566, over 7697.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.05635, over 1622221.92 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:16,668 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220669.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:29,705 INFO [train.py:901] (1/4) Epoch 28, batch 2450, loss[loss=0.1975, simple_loss=0.2793, pruned_loss=0.05786, over 7989.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2801, pruned_loss=0.0562, over 1619836.04 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:42,736 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220706.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:48,793 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.507e+02 3.309e+02 3.917e+02 8.053e+02, threshold=6.618e+02, percent-clipped=4.0 +2023-02-09 00:15:03,092 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220735.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:15:05,104 INFO [train.py:901] (1/4) Epoch 28, batch 2500, loss[loss=0.2054, simple_loss=0.2727, pruned_loss=0.06907, over 7797.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2801, pruned_loss=0.05612, over 1621520.13 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:15:15,098 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3933, 1.4261, 1.3815, 1.8062, 0.7363, 1.2280, 1.3276, 1.4626], + device='cuda:1'), covar=tensor([0.0836, 0.0774, 0.0950, 0.0464, 0.1112, 0.1357, 0.0689, 0.0731], + device='cuda:1'), in_proj_covar=tensor([0.0229, 0.0193, 0.0242, 0.0211, 0.0202, 0.0245, 0.0249, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 00:15:42,737 INFO [train.py:901] (1/4) Epoch 28, batch 2550, loss[loss=0.2284, simple_loss=0.3069, pruned_loss=0.07492, over 8033.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2809, pruned_loss=0.05691, over 1622148.72 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:02,764 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.505e+02 3.011e+02 3.782e+02 1.017e+03, threshold=6.023e+02, percent-clipped=3.0 +2023-02-09 00:16:06,710 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220820.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:14,502 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220831.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:19,226 INFO [train.py:901] (1/4) Epoch 28, batch 2600, loss[loss=0.2093, simple_loss=0.2923, pruned_loss=0.06313, over 8638.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2817, pruned_loss=0.05753, over 1618730.13 frames. ], batch size: 34, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:20,649 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220840.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:24,294 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:32,235 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220856.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:57,368 INFO [train.py:901] (1/4) Epoch 28, batch 2650, loss[loss=0.1976, simple_loss=0.2817, pruned_loss=0.05672, over 8246.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2804, pruned_loss=0.05657, over 1619829.29 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:12,884 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5345, 2.4328, 1.7452, 2.1874, 2.1024, 1.5653, 2.0734, 2.1044], + device='cuda:1'), covar=tensor([0.1517, 0.0505, 0.1330, 0.0684, 0.0753, 0.1602, 0.1006, 0.1022], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0242, 0.0339, 0.0311, 0.0301, 0.0345, 0.0346, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:17:16,289 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.381e+02 2.801e+02 3.642e+02 5.464e+02, threshold=5.602e+02, percent-clipped=0.0 +2023-02-09 00:17:17,796 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220917.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:21,293 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220922.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:32,910 INFO [train.py:901] (1/4) Epoch 28, batch 2700, loss[loss=0.2108, simple_loss=0.3032, pruned_loss=0.05916, over 8264.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05685, over 1619055.77 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:37,481 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6231, 1.4215, 1.6771, 1.2868, 0.9104, 1.4221, 1.4750, 1.3853], + device='cuda:1'), covar=tensor([0.0619, 0.1250, 0.1615, 0.1559, 0.0610, 0.1461, 0.0742, 0.0687], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0162, 0.0102, 0.0163, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 00:17:50,477 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220962.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,043 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,535 INFO [train.py:901] (1/4) Epoch 28, batch 2750, loss[loss=0.1794, simple_loss=0.2699, pruned_loss=0.04445, over 8497.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2809, pruned_loss=0.05725, over 1614361.09 frames. ], batch size: 26, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:11,782 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:29,675 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:31,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 2.908e+02 3.517e+02 7.342e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-09 00:18:31,965 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221016.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:43,261 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221032.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:47,244 INFO [train.py:901] (1/4) Epoch 28, batch 2800, loss[loss=0.1901, simple_loss=0.2792, pruned_loss=0.05052, over 8023.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2793, pruned_loss=0.05627, over 1607516.44 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:57,816 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8694, 1.7236, 2.3799, 1.5668, 1.5091, 2.2745, 0.4322, 1.4893], + device='cuda:1'), covar=tensor([0.1346, 0.1386, 0.0320, 0.0908, 0.2094, 0.0404, 0.1774, 0.1172], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0205, 0.0136, 0.0224, 0.0277, 0.0146, 0.0174, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:19:22,673 INFO [train.py:901] (1/4) Epoch 28, batch 2850, loss[loss=0.1997, simple_loss=0.2813, pruned_loss=0.05902, over 7988.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2803, pruned_loss=0.05671, over 1607164.15 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:19:43,232 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.366e+02 2.856e+02 3.627e+02 6.501e+02, threshold=5.713e+02, percent-clipped=2.0 +2023-02-09 00:19:53,962 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221128.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:00,677 INFO [train.py:901] (1/4) Epoch 28, batch 2900, loss[loss=0.209, simple_loss=0.3066, pruned_loss=0.05565, over 8103.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2815, pruned_loss=0.05747, over 1611841.38 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:04,292 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4066, 1.5967, 2.1386, 1.3625, 1.5570, 1.6746, 1.5080, 1.5194], + device='cuda:1'), covar=tensor([0.2119, 0.2826, 0.1015, 0.4961, 0.2137, 0.3683, 0.2703, 0.2295], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0639, 0.0566, 0.0673, 0.0663, 0.0612, 0.0566, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:20:32,274 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 00:20:33,725 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221184.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:36,502 INFO [train.py:901] (1/4) Epoch 28, batch 2950, loss[loss=0.1738, simple_loss=0.2506, pruned_loss=0.04851, over 7783.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05815, over 1613526.16 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:43,328 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0329, 1.1978, 1.5635, 1.0058, 1.1348, 1.2310, 1.1161, 1.1427], + device='cuda:1'), covar=tensor([0.1473, 0.2008, 0.0780, 0.3504, 0.1605, 0.2606, 0.1905, 0.2017], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0640, 0.0567, 0.0674, 0.0663, 0.0612, 0.0567, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:20:55,456 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.299e+02 2.993e+02 3.879e+02 1.208e+03, threshold=5.985e+02, percent-clipped=10.0 +2023-02-09 00:21:01,649 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.04 vs. limit=5.0 +2023-02-09 00:21:13,540 INFO [train.py:901] (1/4) Epoch 28, batch 3000, loss[loss=0.1673, simple_loss=0.2462, pruned_loss=0.04419, over 7923.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2812, pruned_loss=0.0575, over 1607359.71 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:21:13,541 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 00:21:31,972 INFO [train.py:935] (1/4) Epoch 28, validation: loss=0.1712, simple_loss=0.2708, pruned_loss=0.03578, over 944034.00 frames. +2023-02-09 00:21:31,973 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 00:21:47,605 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2137, 1.0783, 1.3128, 1.0302, 0.9604, 1.3275, 0.0885, 0.9492], + device='cuda:1'), covar=tensor([0.1516, 0.1226, 0.0499, 0.0708, 0.2366, 0.0554, 0.1891, 0.1131], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0225, 0.0278, 0.0147, 0.0174, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:21:54,455 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:21:55,519 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-09 00:22:03,528 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.65 vs. limit=5.0 +2023-02-09 00:22:10,139 INFO [train.py:901] (1/4) Epoch 28, batch 3050, loss[loss=0.1868, simple_loss=0.2537, pruned_loss=0.05998, over 7703.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2828, pruned_loss=0.0582, over 1612634.16 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:22:10,359 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221288.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:18,082 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221299.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:28,217 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221313.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:29,357 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.361e+02 2.830e+02 3.600e+02 1.199e+03, threshold=5.660e+02, percent-clipped=4.0 +2023-02-09 00:22:37,155 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.7967, 2.2239, 3.7181, 1.6816, 1.9645, 3.7088, 0.7593, 2.1232], + device='cuda:1'), covar=tensor([0.1424, 0.1097, 0.0195, 0.1649, 0.1986, 0.0220, 0.1851, 0.1184], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0224, 0.0277, 0.0146, 0.0174, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:22:45,360 INFO [train.py:901] (1/4) Epoch 28, batch 3100, loss[loss=0.1815, simple_loss=0.2644, pruned_loss=0.04928, over 7527.00 frames. ], tot_loss[loss=0.2, simple_loss=0.283, pruned_loss=0.0585, over 1613693.17 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:18,456 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221381.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:20,619 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221384.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:23,254 INFO [train.py:901] (1/4) Epoch 28, batch 3150, loss[loss=0.1941, simple_loss=0.2698, pruned_loss=0.0592, over 7217.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2821, pruned_loss=0.0578, over 1612835.35 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:31,492 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9000, 1.5283, 3.1493, 1.5252, 2.3499, 3.3569, 3.5257, 2.8851], + device='cuda:1'), covar=tensor([0.1215, 0.1797, 0.0328, 0.2115, 0.0979, 0.0270, 0.0520, 0.0546], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0326, 0.0278, 0.0443, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:23:34,610 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5038, 2.3821, 1.8393, 2.2864, 2.0388, 1.5431, 2.0329, 2.2461], + device='cuda:1'), covar=tensor([0.1621, 0.0660, 0.1592, 0.0668, 0.0908, 0.2036, 0.1199, 0.0999], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0246, 0.0344, 0.0315, 0.0305, 0.0350, 0.0351, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:23:38,987 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221409.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:43,026 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.344e+02 3.031e+02 3.872e+02 9.124e+02, threshold=6.062e+02, percent-clipped=5.0 +2023-02-09 00:24:00,287 INFO [train.py:901] (1/4) Epoch 28, batch 3200, loss[loss=0.1826, simple_loss=0.2722, pruned_loss=0.04647, over 8440.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2821, pruned_loss=0.05778, over 1610285.09 frames. ], batch size: 29, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:21,674 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0606, 1.5746, 3.5073, 1.7013, 2.6178, 3.8521, 3.9248, 3.3329], + device='cuda:1'), covar=tensor([0.1115, 0.1824, 0.0307, 0.1928, 0.0939, 0.0219, 0.0613, 0.0543], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0325, 0.0278, 0.0442, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:24:36,745 INFO [train.py:901] (1/4) Epoch 28, batch 3250, loss[loss=0.1887, simple_loss=0.2546, pruned_loss=0.06138, over 7218.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.05709, over 1606491.85 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:56,686 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.800e+02 3.771e+02 8.910e+02, threshold=5.600e+02, percent-clipped=3.0 +2023-02-09 00:25:04,045 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:12,972 INFO [train.py:901] (1/4) Epoch 28, batch 3300, loss[loss=0.1888, simple_loss=0.2817, pruned_loss=0.04798, over 8697.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05683, over 1605824.85 frames. ], batch size: 39, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:25:23,656 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6337, 1.9780, 3.0082, 1.4662, 2.3178, 2.0464, 1.7151, 2.3745], + device='cuda:1'), covar=tensor([0.1940, 0.2849, 0.0860, 0.4889, 0.1838, 0.3286, 0.2559, 0.2117], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0634, 0.0561, 0.0668, 0.0657, 0.0605, 0.0561, 0.0641], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:25:25,041 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221555.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:42,943 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:48,454 INFO [train.py:901] (1/4) Epoch 28, batch 3350, loss[loss=0.1783, simple_loss=0.2653, pruned_loss=0.04566, over 8341.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05653, over 1614886.61 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:25:51,504 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3730, 2.5094, 2.0939, 4.0300, 1.5370, 1.8806, 2.4098, 2.7962], + device='cuda:1'), covar=tensor([0.0763, 0.0889, 0.0952, 0.0236, 0.1183, 0.1332, 0.0999, 0.0797], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0243, 0.0212, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 00:26:09,958 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.531e+02 3.062e+02 3.663e+02 8.444e+02, threshold=6.124e+02, percent-clipped=3.0 +2023-02-09 00:26:26,095 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221637.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:26:26,629 INFO [train.py:901] (1/4) Epoch 28, batch 3400, loss[loss=0.1955, simple_loss=0.2915, pruned_loss=0.0498, over 8254.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2822, pruned_loss=0.05699, over 1620452.72 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:29,006 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5744, 1.3229, 2.4160, 1.3511, 2.2285, 2.5786, 2.7376, 2.2162], + device='cuda:1'), covar=tensor([0.1057, 0.1491, 0.0382, 0.2027, 0.0752, 0.0369, 0.0664, 0.0616], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0325, 0.0278, 0.0442, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:26:36,014 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0394, 1.6035, 1.7695, 1.4126, 0.9441, 1.5015, 1.8135, 1.5182], + device='cuda:1'), covar=tensor([0.0549, 0.1235, 0.1626, 0.1473, 0.0607, 0.1552, 0.0711, 0.0689], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0163, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 00:26:43,876 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:27:02,341 INFO [train.py:901] (1/4) Epoch 28, batch 3450, loss[loss=0.236, simple_loss=0.3239, pruned_loss=0.07402, over 8501.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05715, over 1619167.76 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:20,948 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7083, 2.3446, 3.9722, 1.6216, 3.0908, 2.3490, 1.8767, 2.9512], + device='cuda:1'), covar=tensor([0.1929, 0.2715, 0.0875, 0.4557, 0.1643, 0.3250, 0.2409, 0.2327], + device='cuda:1'), in_proj_covar=tensor([0.0537, 0.0635, 0.0563, 0.0670, 0.0660, 0.0608, 0.0562, 0.0644], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:27:21,422 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.306e+02 2.763e+02 3.583e+02 8.756e+02, threshold=5.526e+02, percent-clipped=3.0 +2023-02-09 00:27:39,500 INFO [train.py:901] (1/4) Epoch 28, batch 3500, loss[loss=0.2059, simple_loss=0.2778, pruned_loss=0.06702, over 7518.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2831, pruned_loss=0.05781, over 1620207.11 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:42,565 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 00:28:03,519 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 00:28:07,250 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:28:15,816 INFO [train.py:901] (1/4) Epoch 28, batch 3550, loss[loss=0.2092, simple_loss=0.2874, pruned_loss=0.06549, over 8467.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05726, over 1621237.60 frames. ], batch size: 25, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:28:35,272 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.405e+02 2.949e+02 3.672e+02 8.337e+02, threshold=5.897e+02, percent-clipped=3.0 +2023-02-09 00:28:36,052 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8400, 3.7830, 3.4334, 1.8744, 3.4122, 3.5386, 3.3277, 3.3814], + device='cuda:1'), covar=tensor([0.0880, 0.0671, 0.1262, 0.5060, 0.1046, 0.1071, 0.1633, 0.0861], + device='cuda:1'), in_proj_covar=tensor([0.0542, 0.0457, 0.0448, 0.0560, 0.0444, 0.0465, 0.0441, 0.0406], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:28:52,587 INFO [train.py:901] (1/4) Epoch 28, batch 3600, loss[loss=0.2194, simple_loss=0.2988, pruned_loss=0.07, over 8280.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05788, over 1617634.92 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:07,255 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6132, 1.3976, 1.6110, 1.2939, 0.9420, 1.4198, 1.5008, 1.3981], + device='cuda:1'), covar=tensor([0.0603, 0.1278, 0.1712, 0.1545, 0.0612, 0.1495, 0.0718, 0.0674], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 00:29:08,665 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7985, 2.2619, 3.8612, 1.6707, 2.8742, 2.3096, 1.9083, 2.9688], + device='cuda:1'), covar=tensor([0.1949, 0.2826, 0.0790, 0.4753, 0.1923, 0.3256, 0.2505, 0.2283], + device='cuda:1'), in_proj_covar=tensor([0.0536, 0.0634, 0.0563, 0.0670, 0.0659, 0.0607, 0.0561, 0.0642], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:29:15,450 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221869.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:28,481 INFO [train.py:901] (1/4) Epoch 28, batch 3650, loss[loss=0.1602, simple_loss=0.2516, pruned_loss=0.03441, over 7948.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05794, over 1613840.82 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:42,354 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:47,072 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.399e+02 3.022e+02 3.885e+02 8.966e+02, threshold=6.044e+02, percent-clipped=2.0 +2023-02-09 00:29:53,450 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0245, 2.5395, 2.6110, 1.5260, 2.8696, 1.8116, 1.5200, 2.1281], + device='cuda:1'), covar=tensor([0.1217, 0.0502, 0.0512, 0.1193, 0.0682, 0.1181, 0.1282, 0.0720], + device='cuda:1'), in_proj_covar=tensor([0.0470, 0.0409, 0.0362, 0.0458, 0.0393, 0.0548, 0.0401, 0.0440], + device='cuda:1'), out_proj_covar=tensor([1.2435e-04, 1.0588e-04, 9.4179e-05, 1.1959e-04, 1.0268e-04, 1.5285e-04, + 1.0707e-04, 1.1512e-04], device='cuda:1') +2023-02-09 00:29:54,449 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-09 00:29:57,465 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9869, 1.7595, 6.1676, 2.4403, 5.5986, 5.1184, 5.6715, 5.6340], + device='cuda:1'), covar=tensor([0.0469, 0.4849, 0.0361, 0.3793, 0.0890, 0.0895, 0.0475, 0.0465], + device='cuda:1'), in_proj_covar=tensor([0.0681, 0.0670, 0.0737, 0.0662, 0.0748, 0.0637, 0.0645, 0.0718], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:30:02,963 INFO [train.py:901] (1/4) Epoch 28, batch 3700, loss[loss=0.1969, simple_loss=0.2846, pruned_loss=0.05457, over 8641.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2826, pruned_loss=0.05812, over 1611447.56 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:05,070 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 00:30:38,683 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:30:41,396 INFO [train.py:901] (1/4) Epoch 28, batch 3750, loss[loss=0.1794, simple_loss=0.277, pruned_loss=0.04085, over 8105.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05812, over 1608756.08 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:31:01,419 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.662e+02 3.142e+02 4.083e+02 1.270e+03, threshold=6.284e+02, percent-clipped=8.0 +2023-02-09 00:31:04,444 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3661, 1.9851, 4.0061, 1.9487, 2.6106, 4.5899, 4.6994, 3.9494], + device='cuda:1'), covar=tensor([0.1136, 0.1756, 0.0347, 0.2022, 0.1265, 0.0196, 0.0363, 0.0542], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0330, 0.0295, 0.0325, 0.0326, 0.0279, 0.0445, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:31:10,190 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222027.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:31:17,962 INFO [train.py:901] (1/4) Epoch 28, batch 3800, loss[loss=0.2121, simple_loss=0.2766, pruned_loss=0.0738, over 7796.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05813, over 1613365.27 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:31:55,279 INFO [train.py:901] (1/4) Epoch 28, batch 3850, loss[loss=0.1995, simple_loss=0.2922, pruned_loss=0.05338, over 8365.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2838, pruned_loss=0.0582, over 1613745.73 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:32:11,759 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 00:32:13,842 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.208e+02 2.768e+02 3.453e+02 7.901e+02, threshold=5.537e+02, percent-clipped=1.0 +2023-02-09 00:32:15,428 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.2619, 5.2335, 4.7221, 2.5798, 4.6875, 4.9655, 4.8841, 4.7320], + device='cuda:1'), covar=tensor([0.0541, 0.0438, 0.0987, 0.4649, 0.0782, 0.0966, 0.1104, 0.0630], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0459, 0.0448, 0.0560, 0.0444, 0.0467, 0.0442, 0.0408], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:32:17,524 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222120.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:32:30,075 INFO [train.py:901] (1/4) Epoch 28, batch 3900, loss[loss=0.1971, simple_loss=0.2827, pruned_loss=0.0558, over 8478.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2837, pruned_loss=0.05779, over 1617853.70 frames. ], batch size: 29, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:00,581 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4504, 2.3132, 1.7460, 2.2364, 1.8939, 1.3792, 1.8596, 2.1575], + device='cuda:1'), covar=tensor([0.1520, 0.0513, 0.1417, 0.0622, 0.0939, 0.2045, 0.1221, 0.0844], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0245, 0.0343, 0.0315, 0.0305, 0.0348, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:33:06,447 INFO [train.py:901] (1/4) Epoch 28, batch 3950, loss[loss=0.2116, simple_loss=0.2981, pruned_loss=0.06254, over 8488.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05814, over 1617515.90 frames. ], batch size: 25, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:17,877 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:20,797 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1081, 1.6695, 1.5357, 1.5300, 1.3756, 1.3786, 1.3705, 1.3013], + device='cuda:1'), covar=tensor([0.1196, 0.0565, 0.1343, 0.0661, 0.0940, 0.1570, 0.0962, 0.0897], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0245, 0.0342, 0.0314, 0.0304, 0.0347, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:33:26,088 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.338e+02 2.821e+02 3.606e+02 1.107e+03, threshold=5.643e+02, percent-clipped=4.0 +2023-02-09 00:33:31,794 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:35,381 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4065, 1.4052, 1.7628, 1.0889, 1.0275, 1.7759, 0.2735, 1.1721], + device='cuda:1'), covar=tensor([0.1465, 0.1087, 0.0420, 0.1033, 0.2355, 0.0402, 0.1727, 0.1249], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0225, 0.0279, 0.0147, 0.0176, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:33:37,686 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-09 00:33:40,241 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:42,144 INFO [train.py:901] (1/4) Epoch 28, batch 4000, loss[loss=0.1709, simple_loss=0.2492, pruned_loss=0.04635, over 7930.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2827, pruned_loss=0.05738, over 1618893.55 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:43,719 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:51,744 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:01,378 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:17,514 INFO [train.py:901] (1/4) Epoch 28, batch 4050, loss[loss=0.2327, simple_loss=0.2996, pruned_loss=0.08289, over 8031.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2841, pruned_loss=0.05813, over 1625762.80 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:34:38,323 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.414e+02 3.092e+02 4.009e+02 1.246e+03, threshold=6.184e+02, percent-clipped=7.0 +2023-02-09 00:34:54,250 INFO [train.py:901] (1/4) Epoch 28, batch 4100, loss[loss=0.1904, simple_loss=0.2814, pruned_loss=0.04971, over 8138.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2838, pruned_loss=0.05825, over 1622846.05 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:14,871 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:17,641 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:29,374 INFO [train.py:901] (1/4) Epoch 28, batch 4150, loss[loss=0.1685, simple_loss=0.2594, pruned_loss=0.03877, over 8297.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.284, pruned_loss=0.05824, over 1621943.95 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:49,101 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.330e+02 2.692e+02 3.176e+02 6.436e+02, threshold=5.384e+02, percent-clipped=1.0 +2023-02-09 00:36:07,144 INFO [train.py:901] (1/4) Epoch 28, batch 4200, loss[loss=0.2717, simple_loss=0.3446, pruned_loss=0.09941, over 8475.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2839, pruned_loss=0.05821, over 1620269.24 frames. ], batch size: 49, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:13,556 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0066, 2.1649, 1.8311, 2.9064, 1.3259, 1.7382, 2.1631, 2.1450], + device='cuda:1'), covar=tensor([0.0776, 0.0871, 0.0879, 0.0349, 0.1189, 0.1269, 0.0818, 0.0891], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0195, 0.0244, 0.0214, 0.0204, 0.0247, 0.0251, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 00:36:14,729 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 00:36:37,838 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 00:36:41,343 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:42,592 INFO [train.py:901] (1/4) Epoch 28, batch 4250, loss[loss=0.1986, simple_loss=0.2894, pruned_loss=0.05388, over 8296.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2832, pruned_loss=0.05789, over 1617585.17 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:44,926 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222491.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:00,852 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.539e+02 3.193e+02 4.198e+02 8.289e+02, threshold=6.386e+02, percent-clipped=5.0 +2023-02-09 00:37:01,758 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222516.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:18,018 INFO [train.py:901] (1/4) Epoch 28, batch 4300, loss[loss=0.2152, simple_loss=0.306, pruned_loss=0.06219, over 8342.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2836, pruned_loss=0.05821, over 1616226.42 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:20,160 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:25,011 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222547.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:30,690 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2308, 1.5549, 1.5441, 1.0325, 1.5668, 1.2369, 0.3241, 1.5137], + device='cuda:1'), covar=tensor([0.0813, 0.0488, 0.0455, 0.0744, 0.0632, 0.1197, 0.1225, 0.0408], + device='cuda:1'), in_proj_covar=tensor([0.0471, 0.0410, 0.0362, 0.0458, 0.0394, 0.0549, 0.0402, 0.0441], + device='cuda:1'), out_proj_covar=tensor([1.2467e-04, 1.0614e-04, 9.4104e-05, 1.1964e-04, 1.0311e-04, 1.5315e-04, + 1.0740e-04, 1.1552e-04], device='cuda:1') +2023-02-09 00:37:39,468 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222567.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:54,079 INFO [train.py:901] (1/4) Epoch 28, batch 4350, loss[loss=0.18, simple_loss=0.2718, pruned_loss=0.04412, over 8088.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2838, pruned_loss=0.05779, over 1617856.19 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:54,214 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222588.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:11,696 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 00:38:13,114 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.501e+02 2.979e+02 3.614e+02 7.360e+02, threshold=5.959e+02, percent-clipped=2.0 +2023-02-09 00:38:18,808 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:29,009 INFO [train.py:901] (1/4) Epoch 28, batch 4400, loss[loss=0.1796, simple_loss=0.2602, pruned_loss=0.04947, over 7662.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05759, over 1613455.15 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:38:32,613 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0626, 1.2874, 1.5934, 1.2129, 1.0463, 1.3656, 1.9803, 1.7456], + device='cuda:1'), covar=tensor([0.0554, 0.1713, 0.2338, 0.1890, 0.0672, 0.2074, 0.0737, 0.0656], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 00:38:36,656 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:47,075 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:47,604 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2461, 4.2269, 3.8282, 1.9135, 3.7440, 3.8974, 3.7935, 3.6074], + device='cuda:1'), covar=tensor([0.0683, 0.0529, 0.1039, 0.4788, 0.0821, 0.0797, 0.1279, 0.0723], + device='cuda:1'), in_proj_covar=tensor([0.0544, 0.0458, 0.0450, 0.0559, 0.0444, 0.0464, 0.0440, 0.0407], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:38:54,364 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 00:39:01,989 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222682.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:05,978 INFO [train.py:901] (1/4) Epoch 28, batch 4450, loss[loss=0.1782, simple_loss=0.2638, pruned_loss=0.04634, over 7778.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.0571, over 1611835.05 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:24,976 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.353e+02 2.798e+02 3.446e+02 6.111e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-09 00:39:29,406 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2121, 3.4334, 2.2462, 2.9502, 2.6937, 2.0638, 2.6917, 2.9465], + device='cuda:1'), covar=tensor([0.1616, 0.0456, 0.1236, 0.0653, 0.0837, 0.1502, 0.1063, 0.1112], + device='cuda:1'), in_proj_covar=tensor([0.0361, 0.0247, 0.0344, 0.0317, 0.0306, 0.0351, 0.0351, 0.0324], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:39:41,225 INFO [train.py:901] (1/4) Epoch 28, batch 4500, loss[loss=0.2085, simple_loss=0.2962, pruned_loss=0.06037, over 8110.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2818, pruned_loss=0.05689, over 1614388.29 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:42,067 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5547, 2.1519, 3.2068, 1.3924, 2.3990, 1.9895, 1.7621, 2.6317], + device='cuda:1'), covar=tensor([0.1968, 0.2563, 0.0802, 0.4750, 0.1950, 0.3355, 0.2507, 0.2111], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0638, 0.0566, 0.0673, 0.0662, 0.0611, 0.0565, 0.0645], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:39:43,394 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8535, 3.8357, 3.4241, 1.8662, 3.3643, 3.4798, 3.4535, 3.3598], + device='cuda:1'), covar=tensor([0.0869, 0.0698, 0.1188, 0.4639, 0.1020, 0.1182, 0.1404, 0.1010], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0457, 0.0449, 0.0557, 0.0442, 0.0463, 0.0439, 0.0406], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:39:44,228 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:45,367 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 00:40:02,731 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222767.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:40:18,345 INFO [train.py:901] (1/4) Epoch 28, batch 4550, loss[loss=0.1801, simple_loss=0.2697, pruned_loss=0.04524, over 8493.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.05634, over 1609865.46 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:40:37,191 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.324e+02 2.721e+02 3.677e+02 6.861e+02, threshold=5.442e+02, percent-clipped=4.0 +2023-02-09 00:40:53,695 INFO [train.py:901] (1/4) Epoch 28, batch 4600, loss[loss=0.2213, simple_loss=0.3049, pruned_loss=0.06886, over 8592.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05637, over 1613162.45 frames. ], batch size: 31, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:41:27,918 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222885.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:41:30,003 INFO [train.py:901] (1/4) Epoch 28, batch 4650, loss[loss=0.1896, simple_loss=0.2762, pruned_loss=0.05155, over 8240.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05674, over 1616170.14 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 16.0 +2023-02-09 00:41:30,921 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8038, 1.9692, 2.0730, 1.4910, 2.2161, 1.5406, 0.7883, 2.0164], + device='cuda:1'), covar=tensor([0.0657, 0.0417, 0.0380, 0.0701, 0.0519, 0.0983, 0.1033, 0.0364], + device='cuda:1'), in_proj_covar=tensor([0.0472, 0.0411, 0.0364, 0.0459, 0.0396, 0.0552, 0.0402, 0.0442], + device='cuda:1'), out_proj_covar=tensor([1.2487e-04, 1.0650e-04, 9.4666e-05, 1.2003e-04, 1.0351e-04, 1.5409e-04, + 1.0734e-04, 1.1564e-04], device='cuda:1') +2023-02-09 00:41:50,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 3.099e+02 3.500e+02 7.849e+02, threshold=6.198e+02, percent-clipped=6.0 +2023-02-09 00:41:53,071 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222918.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:41:55,969 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8962, 2.4200, 3.7817, 2.0278, 2.1350, 3.7456, 0.6897, 2.3652], + device='cuda:1'), covar=tensor([0.1412, 0.1057, 0.0187, 0.1351, 0.2060, 0.0210, 0.2027, 0.1081], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0225, 0.0279, 0.0148, 0.0176, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:42:02,696 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222932.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:06,748 INFO [train.py:901] (1/4) Epoch 28, batch 4700, loss[loss=0.1945, simple_loss=0.2699, pruned_loss=0.05953, over 7533.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05624, over 1614505.40 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:06,964 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222938.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:10,432 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222943.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:20,638 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222958.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:24,246 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222963.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:40,837 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:41,329 INFO [train.py:901] (1/4) Epoch 28, batch 4750, loss[loss=0.1795, simple_loss=0.2641, pruned_loss=0.04748, over 7803.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2803, pruned_loss=0.05638, over 1615903.05 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:50,010 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223000.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:52,055 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:54,708 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 00:42:58,161 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 00:43:02,879 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.412e+02 2.807e+02 3.833e+02 7.869e+02, threshold=5.613e+02, percent-clipped=5.0 +2023-02-09 00:43:18,663 INFO [train.py:901] (1/4) Epoch 28, batch 4800, loss[loss=0.1847, simple_loss=0.2683, pruned_loss=0.05051, over 8294.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.05668, over 1620181.79 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:43:25,185 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223047.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:32,780 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3418, 2.0858, 1.6313, 1.9802, 1.7038, 1.4370, 1.6787, 1.7668], + device='cuda:1'), covar=tensor([0.1411, 0.0494, 0.1360, 0.0559, 0.0801, 0.1609, 0.0953, 0.0897], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0244, 0.0340, 0.0313, 0.0302, 0.0346, 0.0347, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:43:47,828 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.46 vs. limit=5.0 +2023-02-09 00:43:48,814 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 00:43:49,658 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:53,653 INFO [train.py:901] (1/4) Epoch 28, batch 4850, loss[loss=0.2073, simple_loss=0.2989, pruned_loss=0.05785, over 8496.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.281, pruned_loss=0.05659, over 1618410.76 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:10,873 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-09 00:44:13,745 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.508e+02 3.332e+02 4.408e+02 9.671e+02, threshold=6.663e+02, percent-clipped=7.0 +2023-02-09 00:44:31,160 INFO [train.py:901] (1/4) Epoch 28, batch 4900, loss[loss=0.2047, simple_loss=0.2874, pruned_loss=0.06105, over 8508.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05718, over 1615253.50 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:48,259 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5654, 1.3283, 2.6775, 1.0628, 2.1553, 2.8374, 3.1801, 2.0839], + device='cuda:1'), covar=tensor([0.1575, 0.2205, 0.0542, 0.3099, 0.1098, 0.0486, 0.0653, 0.1213], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0326, 0.0292, 0.0320, 0.0322, 0.0275, 0.0439, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:44:48,925 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1287, 1.4870, 4.3781, 1.6174, 3.8831, 3.6280, 3.9623, 3.8861], + device='cuda:1'), covar=tensor([0.0695, 0.4689, 0.0590, 0.4119, 0.1118, 0.1029, 0.0631, 0.0671], + device='cuda:1'), in_proj_covar=tensor([0.0689, 0.0674, 0.0743, 0.0667, 0.0750, 0.0643, 0.0649, 0.0722], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:45:07,006 INFO [train.py:901] (1/4) Epoch 28, batch 4950, loss[loss=0.1787, simple_loss=0.259, pruned_loss=0.04917, over 7799.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2818, pruned_loss=0.05686, over 1615840.12 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:09,273 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223191.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:45:22,625 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1861, 2.5715, 3.6051, 2.0958, 3.0445, 2.5871, 2.3162, 2.9306], + device='cuda:1'), covar=tensor([0.1550, 0.2162, 0.0812, 0.3661, 0.1412, 0.2527, 0.1986, 0.2107], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0638, 0.0565, 0.0673, 0.0664, 0.0612, 0.0565, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:45:26,420 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.334e+02 2.712e+02 3.560e+02 9.309e+02, threshold=5.424e+02, percent-clipped=3.0 +2023-02-09 00:45:42,295 INFO [train.py:901] (1/4) Epoch 28, batch 5000, loss[loss=0.1827, simple_loss=0.2574, pruned_loss=0.05401, over 7424.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2822, pruned_loss=0.05704, over 1614837.59 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:56,134 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:14,345 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223281.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:18,935 INFO [train.py:901] (1/4) Epoch 28, batch 5050, loss[loss=0.1777, simple_loss=0.2701, pruned_loss=0.04264, over 8101.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05724, over 1617506.70 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:28,802 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223302.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:29,633 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223303.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:32,889 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 00:46:38,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.274e+02 2.931e+02 3.573e+02 6.090e+02, threshold=5.862e+02, percent-clipped=1.0 +2023-02-09 00:46:46,948 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223328.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:48,965 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223331.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:53,555 INFO [train.py:901] (1/4) Epoch 28, batch 5100, loss[loss=0.1947, simple_loss=0.2828, pruned_loss=0.05329, over 8027.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.0564, over 1617448.15 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:59,471 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223346.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:26,338 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2672, 1.6976, 4.3074, 2.1577, 2.5365, 4.8962, 5.0360, 4.2654], + device='cuda:1'), covar=tensor([0.1180, 0.1882, 0.0271, 0.1904, 0.1216, 0.0182, 0.0408, 0.0534], + device='cuda:1'), in_proj_covar=tensor([0.0305, 0.0325, 0.0292, 0.0319, 0.0322, 0.0274, 0.0439, 0.0307], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 00:47:31,192 INFO [train.py:901] (1/4) Epoch 28, batch 5150, loss[loss=0.2071, simple_loss=0.2875, pruned_loss=0.06329, over 8576.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.05629, over 1616708.48 frames. ], batch size: 49, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:47:37,584 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5113, 1.7809, 1.8343, 1.2664, 1.8924, 1.4045, 0.5099, 1.7596], + device='cuda:1'), covar=tensor([0.0634, 0.0426, 0.0355, 0.0630, 0.0417, 0.1035, 0.1004, 0.0324], + device='cuda:1'), in_proj_covar=tensor([0.0469, 0.0409, 0.0362, 0.0456, 0.0394, 0.0549, 0.0400, 0.0438], + device='cuda:1'), out_proj_covar=tensor([1.2407e-04, 1.0587e-04, 9.4242e-05, 1.1912e-04, 1.0306e-04, 1.5314e-04, + 1.0684e-04, 1.1481e-04], device='cuda:1') +2023-02-09 00:47:38,172 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6105, 4.6190, 4.1862, 2.3797, 4.1176, 4.1828, 4.1793, 4.0159], + device='cuda:1'), covar=tensor([0.0733, 0.0520, 0.0954, 0.3824, 0.0837, 0.0934, 0.1261, 0.0642], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0454, 0.0449, 0.0554, 0.0443, 0.0463, 0.0441, 0.0407], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:47:43,144 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.31 vs. limit=5.0 +2023-02-09 00:47:50,489 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.349e+02 2.964e+02 3.516e+02 1.122e+03, threshold=5.928e+02, percent-clipped=3.0 +2023-02-09 00:47:51,397 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223417.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:57,693 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:58,560 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4589, 1.3858, 1.7919, 1.2243, 1.0445, 1.7805, 0.2501, 1.1598], + device='cuda:1'), covar=tensor([0.1365, 0.1238, 0.0422, 0.0854, 0.2574, 0.0442, 0.1905, 0.1140], + device='cuda:1'), in_proj_covar=tensor([0.0200, 0.0206, 0.0137, 0.0224, 0.0279, 0.0148, 0.0175, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:48:05,964 INFO [train.py:901] (1/4) Epoch 28, batch 5200, loss[loss=0.188, simple_loss=0.2602, pruned_loss=0.05793, over 6839.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05697, over 1618836.30 frames. ], batch size: 15, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:48:11,623 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223446.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:22,805 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:31,242 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 00:48:44,075 INFO [train.py:901] (1/4) Epoch 28, batch 5250, loss[loss=0.2114, simple_loss=0.2922, pruned_loss=0.06533, over 7695.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05699, over 1613139.79 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:03,771 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.237e+02 2.837e+02 3.561e+02 7.405e+02, threshold=5.674e+02, percent-clipped=6.0 +2023-02-09 00:49:17,138 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223535.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:49:19,101 INFO [train.py:901] (1/4) Epoch 28, batch 5300, loss[loss=0.2256, simple_loss=0.3078, pruned_loss=0.07171, over 8356.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2813, pruned_loss=0.0567, over 1613357.80 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:21,372 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:49:22,687 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1115, 2.0258, 2.5869, 2.2100, 2.5727, 2.2362, 2.0445, 1.4844], + device='cuda:1'), covar=tensor([0.5787, 0.4884, 0.1987, 0.3830, 0.2456, 0.3138, 0.1966, 0.5397], + device='cuda:1'), in_proj_covar=tensor([0.0966, 0.1030, 0.0835, 0.0999, 0.1026, 0.0936, 0.0773, 0.0855], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 00:49:55,542 INFO [train.py:901] (1/4) Epoch 28, batch 5350, loss[loss=0.2405, simple_loss=0.3208, pruned_loss=0.08011, over 7245.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05788, over 1608285.55 frames. ], batch size: 73, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:50:01,803 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223596.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:50:04,677 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8428, 1.5018, 4.0046, 1.4328, 3.5620, 3.3245, 3.6187, 3.5409], + device='cuda:1'), covar=tensor([0.0738, 0.4515, 0.0608, 0.4482, 0.1184, 0.1063, 0.0743, 0.0771], + device='cuda:1'), in_proj_covar=tensor([0.0679, 0.0663, 0.0734, 0.0656, 0.0737, 0.0632, 0.0640, 0.0712], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:50:15,659 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.840e+02 3.657e+02 7.209e+02, threshold=5.681e+02, percent-clipped=3.0 +2023-02-09 00:50:30,412 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2841, 2.0376, 2.5978, 2.2233, 2.5509, 2.3488, 2.2000, 1.4277], + device='cuda:1'), covar=tensor([0.5604, 0.4992, 0.2158, 0.3906, 0.2600, 0.3240, 0.1931, 0.5664], + device='cuda:1'), in_proj_covar=tensor([0.0967, 0.1031, 0.0837, 0.1000, 0.1027, 0.0938, 0.0774, 0.0856], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 00:50:30,870 INFO [train.py:901] (1/4) Epoch 28, batch 5400, loss[loss=0.1927, simple_loss=0.2651, pruned_loss=0.06012, over 8078.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.0582, over 1611779.05 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:50:39,702 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223650.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:50:55,924 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223673.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:03,756 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2919, 3.3097, 2.3535, 2.7445, 2.5293, 2.1670, 2.5896, 2.9725], + device='cuda:1'), covar=tensor([0.1353, 0.0339, 0.1052, 0.0714, 0.0758, 0.1417, 0.0958, 0.0922], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0244, 0.0342, 0.0315, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:51:06,360 INFO [train.py:901] (1/4) Epoch 28, batch 5450, loss[loss=0.1785, simple_loss=0.2548, pruned_loss=0.05109, over 7552.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2833, pruned_loss=0.05804, over 1609888.13 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:13,800 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:16,752 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:28,428 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.405e+02 2.886e+02 3.694e+02 6.837e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 00:51:28,657 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:29,453 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2626, 2.0976, 2.7042, 2.3057, 2.7474, 2.3579, 2.1487, 1.5821], + device='cuda:1'), covar=tensor([0.5747, 0.5260, 0.2281, 0.3831, 0.2531, 0.3336, 0.1941, 0.5558], + device='cuda:1'), in_proj_covar=tensor([0.0966, 0.1029, 0.0837, 0.0998, 0.1025, 0.0934, 0.0772, 0.0855], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 00:51:31,414 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 00:51:35,317 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-09 00:51:36,523 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223727.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:44,560 INFO [train.py:901] (1/4) Epoch 28, batch 5500, loss[loss=0.2057, simple_loss=0.2932, pruned_loss=0.05905, over 8249.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.282, pruned_loss=0.05769, over 1607409.53 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:47,326 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:18,971 INFO [train.py:901] (1/4) Epoch 28, batch 5550, loss[loss=0.1621, simple_loss=0.2413, pruned_loss=0.04145, over 7442.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2829, pruned_loss=0.05831, over 1610939.24 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:52:20,246 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-09 00:52:25,540 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:39,665 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.462e+02 3.010e+02 3.574e+02 1.274e+03, threshold=6.020e+02, percent-clipped=3.0 +2023-02-09 00:52:43,459 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:55,577 INFO [train.py:901] (1/4) Epoch 28, batch 5600, loss[loss=0.1921, simple_loss=0.2725, pruned_loss=0.05581, over 7930.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2818, pruned_loss=0.05767, over 1609302.20 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:05,100 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5479, 1.4594, 1.8226, 1.2895, 1.2063, 1.8376, 0.2561, 1.2281], + device='cuda:1'), covar=tensor([0.1384, 0.1256, 0.0408, 0.0725, 0.2252, 0.0440, 0.1819, 0.1098], + device='cuda:1'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0223, 0.0278, 0.0147, 0.0174, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 00:53:31,691 INFO [train.py:901] (1/4) Epoch 28, batch 5650, loss[loss=0.1885, simple_loss=0.2805, pruned_loss=0.04828, over 8763.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2817, pruned_loss=0.05785, over 1610209.30 frames. ], batch size: 30, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:41,197 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 00:53:44,191 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:53:44,786 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7421, 1.8255, 1.5942, 2.3379, 1.0076, 1.4730, 1.6512, 1.7496], + device='cuda:1'), covar=tensor([0.0769, 0.0696, 0.0870, 0.0388, 0.1039, 0.1253, 0.0744, 0.0755], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0215, 0.0205, 0.0249, 0.0252, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 00:53:51,298 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.313e+02 2.789e+02 3.752e+02 1.102e+03, threshold=5.578e+02, percent-clipped=3.0 +2023-02-09 00:54:01,602 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223931.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:54:06,155 INFO [train.py:901] (1/4) Epoch 28, batch 5700, loss[loss=0.2186, simple_loss=0.3107, pruned_loss=0.06322, over 8500.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.282, pruned_loss=0.05777, over 1613612.23 frames. ], batch size: 28, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:07,519 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223940.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:54:10,615 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.52 vs. limit=2.0 +2023-02-09 00:54:16,420 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 00:54:43,234 INFO [train.py:901] (1/4) Epoch 28, batch 5750, loss[loss=0.2151, simple_loss=0.2935, pruned_loss=0.06838, over 7660.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2817, pruned_loss=0.05749, over 1612065.23 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:48,708 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 00:55:04,150 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.263e+02 2.713e+02 3.241e+02 8.661e+02, threshold=5.425e+02, percent-clipped=3.0 +2023-02-09 00:55:18,773 INFO [train.py:901] (1/4) Epoch 28, batch 5800, loss[loss=0.1879, simple_loss=0.2743, pruned_loss=0.05073, over 8615.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05684, over 1611567.70 frames. ], batch size: 39, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:55:22,317 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7124, 1.6448, 5.9280, 2.2830, 5.3101, 5.0035, 5.4546, 5.3643], + device='cuda:1'), covar=tensor([0.0551, 0.5123, 0.0422, 0.4025, 0.1039, 0.0950, 0.0497, 0.0566], + device='cuda:1'), in_proj_covar=tensor([0.0683, 0.0665, 0.0737, 0.0661, 0.0742, 0.0636, 0.0643, 0.0718], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:55:30,597 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:55:31,242 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2370, 3.1652, 2.9191, 1.6450, 2.8845, 2.8981, 2.7830, 2.8658], + device='cuda:1'), covar=tensor([0.1122, 0.0783, 0.1252, 0.4489, 0.1167, 0.1246, 0.1616, 0.1052], + device='cuda:1'), in_proj_covar=tensor([0.0546, 0.0458, 0.0452, 0.0560, 0.0446, 0.0467, 0.0444, 0.0410], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:55:43,054 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7045, 2.1086, 3.2581, 1.5313, 2.4701, 2.1368, 1.8436, 2.5085], + device='cuda:1'), covar=tensor([0.1941, 0.2713, 0.0815, 0.4771, 0.1879, 0.3222, 0.2458, 0.2345], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0636, 0.0564, 0.0671, 0.0663, 0.0612, 0.0563, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:55:47,081 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4811, 2.3167, 1.7653, 2.1434, 1.9236, 1.4495, 1.8958, 1.9611], + device='cuda:1'), covar=tensor([0.1569, 0.0478, 0.1390, 0.0690, 0.0814, 0.1727, 0.1015, 0.1027], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0243, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 00:55:55,788 INFO [train.py:901] (1/4) Epoch 28, batch 5850, loss[loss=0.2104, simple_loss=0.2849, pruned_loss=0.06794, over 8295.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05649, over 1613458.04 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:56:15,667 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.538e+02 3.148e+02 4.118e+02 7.183e+02, threshold=6.296e+02, percent-clipped=12.0 +2023-02-09 00:56:30,204 INFO [train.py:901] (1/4) Epoch 28, batch 5900, loss[loss=0.1821, simple_loss=0.2528, pruned_loss=0.05577, over 7779.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05717, over 1613728.54 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:06,180 INFO [train.py:901] (1/4) Epoch 28, batch 5950, loss[loss=0.1811, simple_loss=0.2737, pruned_loss=0.04423, over 8240.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05703, over 1611639.03 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:25,370 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-09 00:57:28,301 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.486e+02 3.110e+02 3.888e+02 7.674e+02, threshold=6.220e+02, percent-clipped=4.0 +2023-02-09 00:57:37,734 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224230.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:57:43,109 INFO [train.py:901] (1/4) Epoch 28, batch 6000, loss[loss=0.1825, simple_loss=0.2637, pruned_loss=0.05065, over 7940.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2808, pruned_loss=0.05669, over 1611932.37 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:43,110 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 00:57:56,800 INFO [train.py:935] (1/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2708, pruned_loss=0.03603, over 944034.00 frames. +2023-02-09 00:57:56,801 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 00:57:59,119 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0119, 1.8217, 2.2815, 1.9509, 2.2758, 2.1002, 1.9133, 1.2537], + device='cuda:1'), covar=tensor([0.6243, 0.5460, 0.2100, 0.3952, 0.2582, 0.3489, 0.2087, 0.5456], + device='cuda:1'), in_proj_covar=tensor([0.0973, 0.1035, 0.0841, 0.1004, 0.1028, 0.0940, 0.0776, 0.0859], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 00:58:07,094 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7658, 5.9471, 5.2271, 2.3518, 5.2449, 5.5306, 5.3643, 5.3950], + device='cuda:1'), covar=tensor([0.0455, 0.0297, 0.0725, 0.4238, 0.0659, 0.0680, 0.0897, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0457, 0.0451, 0.0559, 0.0445, 0.0466, 0.0442, 0.0408], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:58:33,310 INFO [train.py:901] (1/4) Epoch 28, batch 6050, loss[loss=0.1988, simple_loss=0.2858, pruned_loss=0.05594, over 8644.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05628, over 1613146.54 frames. ], batch size: 34, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:58:43,545 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6209, 2.0269, 3.2759, 1.5017, 2.4673, 2.1456, 1.7335, 2.5762], + device='cuda:1'), covar=tensor([0.1884, 0.2800, 0.0787, 0.4623, 0.1895, 0.3155, 0.2438, 0.2256], + device='cuda:1'), in_proj_covar=tensor([0.0541, 0.0637, 0.0565, 0.0671, 0.0665, 0.0614, 0.0565, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 00:58:48,812 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-09 00:58:49,932 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224311.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:58:52,994 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 00:58:54,040 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.462e+02 3.109e+02 3.867e+02 1.260e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-09 00:59:08,857 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224336.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:59:10,069 INFO [train.py:901] (1/4) Epoch 28, batch 6100, loss[loss=0.194, simple_loss=0.2689, pruned_loss=0.05953, over 7812.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2818, pruned_loss=0.05682, over 1612663.13 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:26,305 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 00:59:46,740 INFO [train.py:901] (1/4) Epoch 28, batch 6150, loss[loss=0.1994, simple_loss=0.2891, pruned_loss=0.05488, over 8354.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05641, over 1612053.10 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:54,603 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0684, 2.2492, 2.2886, 1.5890, 2.3963, 1.8230, 1.7306, 2.0423], + device='cuda:1'), covar=tensor([0.0675, 0.0430, 0.0344, 0.0705, 0.0485, 0.0716, 0.0780, 0.0465], + device='cuda:1'), in_proj_covar=tensor([0.0471, 0.0409, 0.0363, 0.0458, 0.0393, 0.0550, 0.0402, 0.0440], + device='cuda:1'), out_proj_covar=tensor([1.2457e-04, 1.0580e-04, 9.4517e-05, 1.1960e-04, 1.0283e-04, 1.5344e-04, + 1.0723e-04, 1.1521e-04], device='cuda:1') +2023-02-09 01:00:06,960 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.362e+02 2.823e+02 3.455e+02 8.158e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 01:00:21,354 INFO [train.py:901] (1/4) Epoch 28, batch 6200, loss[loss=0.1771, simple_loss=0.2664, pruned_loss=0.04387, over 8088.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2818, pruned_loss=0.05648, over 1616770.29 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:58,233 INFO [train.py:901] (1/4) Epoch 28, batch 6250, loss[loss=0.1814, simple_loss=0.2563, pruned_loss=0.05326, over 7527.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2823, pruned_loss=0.05691, over 1618630.33 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:59,402 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.45 vs. limit=5.0 +2023-02-09 01:01:18,580 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.593e+02 3.043e+02 4.250e+02 9.084e+02, threshold=6.087e+02, percent-clipped=11.0 +2023-02-09 01:01:33,267 INFO [train.py:901] (1/4) Epoch 28, batch 6300, loss[loss=0.1907, simple_loss=0.282, pruned_loss=0.04971, over 8286.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2815, pruned_loss=0.05636, over 1616286.82 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:00,128 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224574.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:04,359 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:10,460 INFO [train.py:901] (1/4) Epoch 28, batch 6350, loss[loss=0.2133, simple_loss=0.2951, pruned_loss=0.06572, over 8118.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2806, pruned_loss=0.05617, over 1609053.31 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:30,928 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.315e+02 2.720e+02 3.259e+02 6.733e+02, threshold=5.440e+02, percent-clipped=2.0 +2023-02-09 01:02:45,883 INFO [train.py:901] (1/4) Epoch 28, batch 6400, loss[loss=0.2312, simple_loss=0.3186, pruned_loss=0.07189, over 8461.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.0563, over 1608084.70 frames. ], batch size: 27, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:16,424 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 01:03:21,510 INFO [train.py:901] (1/4) Epoch 28, batch 6450, loss[loss=0.1982, simple_loss=0.2902, pruned_loss=0.05306, over 8299.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2798, pruned_loss=0.05606, over 1604204.40 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:22,396 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224689.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:03:43,002 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.303e+02 2.784e+02 3.485e+02 7.082e+02, threshold=5.567e+02, percent-clipped=7.0 +2023-02-09 01:03:44,831 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 01:03:56,389 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2256, 2.4539, 2.7808, 1.7958, 3.0627, 1.8836, 1.4732, 2.2746], + device='cuda:1'), covar=tensor([0.0979, 0.0547, 0.0353, 0.0933, 0.0615, 0.0996, 0.1236, 0.0662], + device='cuda:1'), in_proj_covar=tensor([0.0475, 0.0412, 0.0366, 0.0461, 0.0396, 0.0555, 0.0406, 0.0444], + device='cuda:1'), out_proj_covar=tensor([1.2572e-04, 1.0680e-04, 9.5128e-05, 1.2062e-04, 1.0360e-04, 1.5473e-04, + 1.0836e-04, 1.1623e-04], device='cuda:1') +2023-02-09 01:03:57,597 INFO [train.py:901] (1/4) Epoch 28, batch 6500, loss[loss=0.2125, simple_loss=0.3005, pruned_loss=0.06227, over 8539.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2795, pruned_loss=0.05579, over 1605898.32 frames. ], batch size: 31, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:02,104 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:11,961 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:32,262 INFO [train.py:901] (1/4) Epoch 28, batch 6550, loss[loss=0.2184, simple_loss=0.2984, pruned_loss=0.06919, over 8495.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2806, pruned_loss=0.05632, over 1608716.32 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:47,798 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 01:04:53,995 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.489e+02 3.184e+02 3.768e+02 7.222e+02, threshold=6.368e+02, percent-clipped=1.0 +2023-02-09 01:05:08,029 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 01:05:09,348 INFO [train.py:901] (1/4) Epoch 28, batch 6600, loss[loss=0.1895, simple_loss=0.2749, pruned_loss=0.05201, over 8028.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.05602, over 1609609.67 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:05:17,179 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4246, 1.3394, 1.6728, 1.0999, 1.0877, 1.6595, 0.2891, 1.1237], + device='cuda:1'), covar=tensor([0.1427, 0.1124, 0.0396, 0.0887, 0.2167, 0.0449, 0.1815, 0.1241], + device='cuda:1'), in_proj_covar=tensor([0.0204, 0.0209, 0.0138, 0.0226, 0.0280, 0.0149, 0.0176, 0.0201], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 01:05:44,396 INFO [train.py:901] (1/4) Epoch 28, batch 6650, loss[loss=0.1734, simple_loss=0.2683, pruned_loss=0.0392, over 8186.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.0568, over 1614147.38 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:04,780 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.463e+02 2.971e+02 3.895e+02 9.422e+02, threshold=5.941e+02, percent-clipped=4.0 +2023-02-09 01:06:10,964 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224924.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:11,638 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7797, 5.9266, 5.1626, 2.7804, 5.2192, 5.6425, 5.4208, 5.3638], + device='cuda:1'), covar=tensor([0.0495, 0.0357, 0.0846, 0.3890, 0.0774, 0.0755, 0.1030, 0.0656], + device='cuda:1'), in_proj_covar=tensor([0.0550, 0.0462, 0.0456, 0.0566, 0.0448, 0.0471, 0.0448, 0.0414], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:06:17,461 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.81 vs. limit=2.0 +2023-02-09 01:06:21,167 INFO [train.py:901] (1/4) Epoch 28, batch 6700, loss[loss=0.1709, simple_loss=0.2534, pruned_loss=0.04418, over 5562.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.05753, over 1613511.49 frames. ], batch size: 12, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:26,239 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:30,484 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1341, 1.3725, 1.5987, 1.3455, 0.8524, 1.4105, 1.2273, 1.0411], + device='cuda:1'), covar=tensor([0.0633, 0.1240, 0.1603, 0.1444, 0.0548, 0.1501, 0.0714, 0.0727], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0102, 0.0164, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:06:44,443 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224970.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:57,024 INFO [train.py:901] (1/4) Epoch 28, batch 6750, loss[loss=0.1634, simple_loss=0.2402, pruned_loss=0.04329, over 7711.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.05728, over 1614080.48 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:57,469 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-09 01:06:58,659 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8177, 2.5921, 1.9183, 2.4556, 2.3775, 1.6330, 2.3214, 2.3318], + device='cuda:1'), covar=tensor([0.1534, 0.0446, 0.1280, 0.0694, 0.0783, 0.1700, 0.1022, 0.1120], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0247, 0.0345, 0.0317, 0.0304, 0.0350, 0.0354, 0.0327], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 01:07:01,382 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4169, 2.7419, 3.0060, 1.8004, 3.3448, 2.0369, 1.5247, 2.4366], + device='cuda:1'), covar=tensor([0.1008, 0.0434, 0.0419, 0.0928, 0.0463, 0.1084, 0.1160, 0.0644], + device='cuda:1'), in_proj_covar=tensor([0.0473, 0.0409, 0.0364, 0.0459, 0.0396, 0.0552, 0.0403, 0.0442], + device='cuda:1'), out_proj_covar=tensor([1.2520e-04, 1.0596e-04, 9.4811e-05, 1.2003e-04, 1.0343e-04, 1.5400e-04, + 1.0766e-04, 1.1576e-04], device='cuda:1') +2023-02-09 01:07:17,010 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.343e+02 2.979e+02 3.883e+02 6.136e+02, threshold=5.958e+02, percent-clipped=2.0 +2023-02-09 01:07:28,015 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 01:07:32,093 INFO [train.py:901] (1/4) Epoch 28, batch 6800, loss[loss=0.2155, simple_loss=0.2994, pruned_loss=0.06583, over 8592.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05729, over 1615736.71 frames. ], batch size: 39, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:07:32,951 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225039.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:07:57,277 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5590, 1.7740, 2.0839, 1.7803, 1.2093, 1.8710, 2.3378, 1.9050], + device='cuda:1'), covar=tensor([0.0549, 0.1185, 0.1525, 0.1400, 0.0627, 0.1368, 0.0651, 0.0665], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:08:08,382 INFO [train.py:901] (1/4) Epoch 28, batch 6850, loss[loss=0.185, simple_loss=0.2594, pruned_loss=0.05526, over 7799.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2821, pruned_loss=0.0571, over 1618913.25 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:08,454 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,039 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225102.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,737 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 01:08:28,500 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.283e+02 2.996e+02 3.907e+02 8.918e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-09 01:08:31,371 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225121.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:42,882 INFO [train.py:901] (1/4) Epoch 28, batch 6900, loss[loss=0.1636, simple_loss=0.2531, pruned_loss=0.03704, over 5096.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2831, pruned_loss=0.05735, over 1617152.45 frames. ], batch size: 11, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:47,492 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-09 01:08:58,668 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225160.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:18,975 INFO [train.py:901] (1/4) Epoch 28, batch 6950, loss[loss=0.1647, simple_loss=0.2527, pruned_loss=0.03838, over 8075.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2851, pruned_loss=0.05827, over 1620000.87 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:09:30,328 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:30,860 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 01:09:40,023 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.456e+02 2.946e+02 3.977e+02 8.721e+02, threshold=5.892e+02, percent-clipped=6.0 +2023-02-09 01:09:40,234 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225217.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:54,781 INFO [train.py:901] (1/4) Epoch 28, batch 7000, loss[loss=0.1804, simple_loss=0.261, pruned_loss=0.04993, over 7276.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2836, pruned_loss=0.0577, over 1620087.09 frames. ], batch size: 16, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:31,216 INFO [train.py:901] (1/4) Epoch 28, batch 7050, loss[loss=0.2251, simple_loss=0.3025, pruned_loss=0.0738, over 8502.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2828, pruned_loss=0.05769, over 1618061.93 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:36,306 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225295.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:10:41,258 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.9223, 6.2328, 5.3009, 2.9806, 5.4099, 5.7729, 5.5764, 5.6207], + device='cuda:1'), covar=tensor([0.0557, 0.0343, 0.0922, 0.3670, 0.0780, 0.0998, 0.0913, 0.0623], + device='cuda:1'), in_proj_covar=tensor([0.0549, 0.0463, 0.0457, 0.0564, 0.0448, 0.0470, 0.0447, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:10:52,623 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.402e+02 2.844e+02 3.449e+02 6.425e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-09 01:10:55,647 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:11:08,201 INFO [train.py:901] (1/4) Epoch 28, batch 7100, loss[loss=0.1772, simple_loss=0.2673, pruned_loss=0.0435, over 8135.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.281, pruned_loss=0.05722, over 1617486.22 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:11:43,048 INFO [train.py:901] (1/4) Epoch 28, batch 7150, loss[loss=0.218, simple_loss=0.2883, pruned_loss=0.0738, over 8495.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2816, pruned_loss=0.05749, over 1611046.62 frames. ], batch size: 29, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:05,408 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.377e+02 2.906e+02 3.542e+02 6.036e+02, threshold=5.811e+02, percent-clipped=2.0 +2023-02-09 01:12:21,600 INFO [train.py:901] (1/4) Epoch 28, batch 7200, loss[loss=0.1876, simple_loss=0.2738, pruned_loss=0.05068, over 8497.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2836, pruned_loss=0.0585, over 1614532.05 frames. ], batch size: 29, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:35,171 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225457.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:36,591 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:40,499 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225465.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:44,913 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7479, 2.2924, 4.0086, 1.5688, 2.9574, 2.3713, 1.8556, 2.9702], + device='cuda:1'), covar=tensor([0.1976, 0.3044, 0.0885, 0.5084, 0.1957, 0.3375, 0.2664, 0.2483], + device='cuda:1'), in_proj_covar=tensor([0.0538, 0.0638, 0.0563, 0.0673, 0.0663, 0.0614, 0.0563, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:12:46,185 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225473.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:51,110 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225480.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:53,925 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:56,557 INFO [train.py:901] (1/4) Epoch 28, batch 7250, loss[loss=0.1716, simple_loss=0.2465, pruned_loss=0.0484, over 7422.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2833, pruned_loss=0.05854, over 1610752.48 frames. ], batch size: 17, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:03,596 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:07,602 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225504.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:16,360 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.486e+02 3.022e+02 3.617e+02 8.325e+02, threshold=6.044e+02, percent-clipped=6.0 +2023-02-09 01:13:21,985 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:32,777 INFO [train.py:901] (1/4) Epoch 28, batch 7300, loss[loss=0.1863, simple_loss=0.2719, pruned_loss=0.05032, over 8106.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2826, pruned_loss=0.05836, over 1610361.63 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:34,181 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:56,385 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 01:14:00,391 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225577.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:14:02,407 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:07,681 INFO [train.py:901] (1/4) Epoch 28, batch 7350, loss[loss=0.1762, simple_loss=0.2674, pruned_loss=0.04255, over 8199.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2832, pruned_loss=0.05825, over 1614690.48 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:14:24,562 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 01:14:27,903 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.380e+02 2.753e+02 3.463e+02 7.224e+02, threshold=5.506e+02, percent-clipped=3.0 +2023-02-09 01:14:29,522 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225619.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:42,948 INFO [train.py:901] (1/4) Epoch 28, batch 7400, loss[loss=0.2312, simple_loss=0.3117, pruned_loss=0.07532, over 8515.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2841, pruned_loss=0.0588, over 1614644.66 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:14:42,988 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 01:14:48,673 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8986, 1.3675, 3.4473, 1.5843, 2.4874, 3.7593, 3.8671, 3.2414], + device='cuda:1'), covar=tensor([0.1294, 0.1998, 0.0307, 0.2090, 0.0949, 0.0221, 0.0553, 0.0510], + device='cuda:1'), in_proj_covar=tensor([0.0309, 0.0327, 0.0295, 0.0325, 0.0326, 0.0277, 0.0443, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 01:15:18,721 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3690, 1.4273, 4.5486, 1.7071, 4.0674, 3.7875, 4.1112, 3.9935], + device='cuda:1'), covar=tensor([0.0638, 0.4786, 0.0529, 0.4489, 0.1040, 0.0994, 0.0637, 0.0674], + device='cuda:1'), in_proj_covar=tensor([0.0683, 0.0664, 0.0735, 0.0660, 0.0747, 0.0636, 0.0645, 0.0717], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:15:19,316 INFO [train.py:901] (1/4) Epoch 28, batch 7450, loss[loss=0.205, simple_loss=0.2802, pruned_loss=0.06495, over 8512.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2836, pruned_loss=0.05822, over 1617414.04 frames. ], batch size: 28, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:15:23,690 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8564, 1.4565, 1.7034, 1.3191, 0.8566, 1.4468, 1.6096, 1.4301], + device='cuda:1'), covar=tensor([0.0610, 0.1292, 0.1649, 0.1528, 0.0639, 0.1546, 0.0762, 0.0714], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:15:25,007 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 01:15:40,020 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.396e+02 3.007e+02 3.866e+02 7.466e+02, threshold=6.014e+02, percent-clipped=6.0 +2023-02-09 01:15:54,482 INFO [train.py:901] (1/4) Epoch 28, batch 7500, loss[loss=0.2037, simple_loss=0.2903, pruned_loss=0.05849, over 8504.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05763, over 1618491.53 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:32,496 INFO [train.py:901] (1/4) Epoch 28, batch 7550, loss[loss=0.1883, simple_loss=0.2716, pruned_loss=0.05246, over 7805.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05769, over 1616623.74 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:39,079 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6091, 2.5587, 1.8263, 2.3047, 2.2754, 1.5783, 2.2017, 2.3069], + device='cuda:1'), covar=tensor([0.1512, 0.0444, 0.1315, 0.0728, 0.0759, 0.1695, 0.0935, 0.0925], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0244, 0.0341, 0.0312, 0.0300, 0.0345, 0.0348, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 01:16:41,874 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225801.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:16:52,725 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.388e+02 3.127e+02 4.485e+02 1.321e+03, threshold=6.254e+02, percent-clipped=11.0 +2023-02-09 01:16:57,629 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225824.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:05,945 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225836.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:07,143 INFO [train.py:901] (1/4) Epoch 28, batch 7600, loss[loss=0.229, simple_loss=0.3036, pruned_loss=0.07726, over 7202.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2812, pruned_loss=0.0571, over 1613812.74 frames. ], batch size: 72, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:23,280 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225861.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:27,273 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225867.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:34,159 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:37,623 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:40,887 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:43,480 INFO [train.py:901] (1/4) Epoch 28, batch 7650, loss[loss=0.2333, simple_loss=0.3158, pruned_loss=0.0754, over 8503.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05771, over 1616027.30 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:51,603 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:01,180 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225913.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,341 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225916.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,835 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.353e+02 2.789e+02 3.444e+02 7.654e+02, threshold=5.579e+02, percent-clipped=1.0 +2023-02-09 01:18:06,734 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225921.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:18:18,657 INFO [train.py:901] (1/4) Epoch 28, batch 7700, loss[loss=0.2104, simple_loss=0.2949, pruned_loss=0.06298, over 8502.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2824, pruned_loss=0.05749, over 1617082.98 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:18:19,535 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:31,341 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 01:18:44,239 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 01:18:44,498 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1811, 1.8964, 2.3376, 2.0163, 2.2997, 2.1941, 2.1226, 1.1985], + device='cuda:1'), covar=tensor([0.5704, 0.5020, 0.2238, 0.4272, 0.2647, 0.3639, 0.2069, 0.5580], + device='cuda:1'), in_proj_covar=tensor([0.0964, 0.1031, 0.0836, 0.0999, 0.1025, 0.0936, 0.0772, 0.0855], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 01:18:49,257 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225982.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:53,157 INFO [train.py:901] (1/4) Epoch 28, batch 7750, loss[loss=0.1796, simple_loss=0.252, pruned_loss=0.05362, over 7537.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2817, pruned_loss=0.05728, over 1612558.87 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:01,892 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:19:15,711 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.439e+02 2.815e+02 3.514e+02 7.333e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-09 01:19:29,663 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226036.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:19:30,737 INFO [train.py:901] (1/4) Epoch 28, batch 7800, loss[loss=0.221, simple_loss=0.3007, pruned_loss=0.07066, over 8365.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05777, over 1612618.11 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:45,497 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226059.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:20:05,553 INFO [train.py:901] (1/4) Epoch 28, batch 7850, loss[loss=0.1852, simple_loss=0.2793, pruned_loss=0.04552, over 8557.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.0572, over 1612788.47 frames. ], batch size: 49, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:20:25,265 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.405e+02 3.030e+02 3.828e+02 1.208e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-09 01:20:39,665 INFO [train.py:901] (1/4) Epoch 28, batch 7900, loss[loss=0.1912, simple_loss=0.2677, pruned_loss=0.05729, over 7539.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.0571, over 1614936.44 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:02,886 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226172.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:13,381 INFO [train.py:901] (1/4) Epoch 28, batch 7950, loss[loss=0.2241, simple_loss=0.2979, pruned_loss=0.07514, over 8526.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2832, pruned_loss=0.05843, over 1615826.55 frames. ], batch size: 29, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:18,307 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226195.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:19,690 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226197.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:33,033 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.474e+02 2.920e+02 3.612e+02 7.690e+02, threshold=5.839e+02, percent-clipped=4.0 +2023-02-09 01:21:35,313 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:36,607 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6988, 1.5245, 1.8921, 1.4947, 0.9918, 1.5310, 2.1294, 2.1314], + device='cuda:1'), covar=tensor([0.0516, 0.1267, 0.1688, 0.1486, 0.0661, 0.1524, 0.0687, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0146], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:21:37,935 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:47,691 INFO [train.py:901] (1/4) Epoch 28, batch 8000, loss[loss=0.1858, simple_loss=0.2786, pruned_loss=0.04651, over 8753.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2823, pruned_loss=0.05764, over 1615090.82 frames. ], batch size: 30, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:21:47,901 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226238.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:52,830 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2483, 1.2622, 3.4030, 1.1220, 2.9754, 2.8547, 3.0878, 2.9984], + device='cuda:1'), covar=tensor([0.0871, 0.4569, 0.0810, 0.4448, 0.1372, 0.1160, 0.0801, 0.0943], + device='cuda:1'), in_proj_covar=tensor([0.0688, 0.0668, 0.0738, 0.0664, 0.0753, 0.0641, 0.0648, 0.0721], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:21:57,007 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:59,697 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226255.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:00,947 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226257.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:05,095 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226263.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:17,270 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:22,557 INFO [train.py:901] (1/4) Epoch 28, batch 8050, loss[loss=0.1933, simple_loss=0.2678, pruned_loss=0.05941, over 7561.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2815, pruned_loss=0.05767, over 1598682.53 frames. ], batch size: 18, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:22:25,397 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226292.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:22:30,115 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.3853, 1.5715, 4.6016, 1.7221, 4.0940, 3.7898, 4.1811, 4.0590], + device='cuda:1'), covar=tensor([0.0597, 0.4339, 0.0510, 0.4347, 0.1080, 0.1027, 0.0522, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0685, 0.0665, 0.0735, 0.0661, 0.0749, 0.0638, 0.0644, 0.0718], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:22:37,471 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 01:22:42,673 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.434e+02 3.095e+02 3.696e+02 6.520e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-09 01:22:42,888 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226317.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:22:57,851 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 01:23:01,703 INFO [train.py:901] (1/4) Epoch 29, batch 0, loss[loss=0.1891, simple_loss=0.274, pruned_loss=0.05211, over 7929.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.274, pruned_loss=0.05211, over 7929.00 frames. ], batch size: 20, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:01,703 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 01:23:13,264 INFO [train.py:935] (1/4) Epoch 29, validation: loss=0.1705, simple_loss=0.2705, pruned_loss=0.03528, over 944034.00 frames. +2023-02-09 01:23:13,265 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 01:23:26,225 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226339.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:29,655 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 01:23:39,590 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-09 01:23:49,914 INFO [train.py:901] (1/4) Epoch 29, batch 50, loss[loss=0.2237, simple_loss=0.3086, pruned_loss=0.06942, over 8769.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2855, pruned_loss=0.06025, over 368168.85 frames. ], batch size: 40, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:50,830 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:53,661 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7005, 1.9195, 1.9411, 1.4428, 2.0929, 1.4167, 0.5997, 1.9377], + device='cuda:1'), covar=tensor([0.0660, 0.0430, 0.0361, 0.0603, 0.0452, 0.1049, 0.1091, 0.0319], + device='cuda:1'), in_proj_covar=tensor([0.0478, 0.0414, 0.0368, 0.0462, 0.0398, 0.0555, 0.0406, 0.0443], + device='cuda:1'), out_proj_covar=tensor([1.2657e-04, 1.0724e-04, 9.5925e-05, 1.2073e-04, 1.0402e-04, 1.5462e-04, + 1.0826e-04, 1.1606e-04], device='cuda:1') +2023-02-09 01:24:06,019 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 01:24:12,517 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226403.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:24:22,934 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.293e+02 2.936e+02 3.721e+02 6.222e+02, threshold=5.872e+02, percent-clipped=1.0 +2023-02-09 01:24:25,777 INFO [train.py:901] (1/4) Epoch 29, batch 100, loss[loss=0.1604, simple_loss=0.2575, pruned_loss=0.03164, over 8252.00 frames. ], tot_loss[loss=0.2037, simple_loss=0.2874, pruned_loss=0.05998, over 648804.17 frames. ], batch size: 24, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:24:30,614 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 01:24:33,769 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1517, 1.9584, 2.4465, 2.0914, 2.3912, 2.2323, 2.0838, 1.3477], + device='cuda:1'), covar=tensor([0.5865, 0.5081, 0.2267, 0.4116, 0.2792, 0.3427, 0.2031, 0.5554], + device='cuda:1'), in_proj_covar=tensor([0.0969, 0.1034, 0.0840, 0.1003, 0.1030, 0.0940, 0.0776, 0.0858], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 01:25:02,686 INFO [train.py:901] (1/4) Epoch 29, batch 150, loss[loss=0.1671, simple_loss=0.2458, pruned_loss=0.04416, over 7800.00 frames. ], tot_loss[loss=0.2047, simple_loss=0.2873, pruned_loss=0.06105, over 860979.21 frames. ], batch size: 19, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:25:34,579 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.438e+02 2.916e+02 4.111e+02 7.524e+02, threshold=5.832e+02, percent-clipped=2.0 +2023-02-09 01:25:35,513 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226518.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:25:37,496 INFO [train.py:901] (1/4) Epoch 29, batch 200, loss[loss=0.1507, simple_loss=0.2344, pruned_loss=0.03355, over 7687.00 frames. ], tot_loss[loss=0.2031, simple_loss=0.286, pruned_loss=0.06008, over 1027035.00 frames. ], batch size: 18, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:12,529 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1822, 1.3298, 1.6916, 1.3065, 0.7367, 1.4611, 1.2196, 1.0664], + device='cuda:1'), covar=tensor([0.0658, 0.1310, 0.1672, 0.1477, 0.0580, 0.1483, 0.0696, 0.0745], + device='cuda:1'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:26:12,615 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0023, 1.7448, 2.0307, 1.8333, 1.9825, 2.0665, 1.9610, 0.9098], + device='cuda:1'), covar=tensor([0.5906, 0.5143, 0.2409, 0.4039, 0.2604, 0.3538, 0.2069, 0.5407], + device='cuda:1'), in_proj_covar=tensor([0.0965, 0.1030, 0.0837, 0.0999, 0.1025, 0.0935, 0.0773, 0.0852], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 01:26:13,673 INFO [train.py:901] (1/4) Epoch 29, batch 250, loss[loss=0.1976, simple_loss=0.2815, pruned_loss=0.05689, over 8321.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2848, pruned_loss=0.05925, over 1159601.88 frames. ], batch size: 25, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:26,058 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 01:26:31,141 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:31,292 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:33,830 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 01:26:46,400 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.514e+02 3.003e+02 3.645e+02 8.891e+02, threshold=6.006e+02, percent-clipped=9.0 +2023-02-09 01:26:48,779 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:49,245 INFO [train.py:901] (1/4) Epoch 29, batch 300, loss[loss=0.2207, simple_loss=0.2999, pruned_loss=0.07076, over 8345.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.2846, pruned_loss=0.059, over 1267199.19 frames. ], batch size: 26, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:54,267 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226628.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:12,774 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226653.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:25,791 INFO [train.py:901] (1/4) Epoch 29, batch 350, loss[loss=0.1972, simple_loss=0.2897, pruned_loss=0.05234, over 8489.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2837, pruned_loss=0.05892, over 1342437.11 frames. ], batch size: 29, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:27:45,003 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2152, 2.4861, 2.7637, 1.6010, 3.1946, 1.7945, 1.5835, 2.1695], + device='cuda:1'), covar=tensor([0.0991, 0.0515, 0.0376, 0.0994, 0.0532, 0.1077, 0.1133, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0418, 0.0372, 0.0466, 0.0401, 0.0559, 0.0408, 0.0447], + device='cuda:1'), out_proj_covar=tensor([1.2785e-04, 1.0831e-04, 9.7018e-05, 1.2160e-04, 1.0475e-04, 1.5584e-04, + 1.0894e-04, 1.1705e-04], device='cuda:1') +2023-02-09 01:27:46,743 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4106, 1.6284, 2.0587, 1.3741, 1.4570, 1.6930, 1.4946, 1.4616], + device='cuda:1'), covar=tensor([0.2030, 0.2704, 0.1106, 0.4664, 0.2238, 0.3566, 0.2616, 0.2289], + device='cuda:1'), in_proj_covar=tensor([0.0540, 0.0637, 0.0562, 0.0671, 0.0664, 0.0614, 0.0564, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:27:54,190 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226710.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:58,932 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.400e+02 2.862e+02 3.557e+02 6.632e+02, threshold=5.725e+02, percent-clipped=2.0 +2023-02-09 01:28:01,712 INFO [train.py:901] (1/4) Epoch 29, batch 400, loss[loss=0.1865, simple_loss=0.2779, pruned_loss=0.04753, over 8094.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2839, pruned_loss=0.05831, over 1406346.10 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:37,571 INFO [train.py:901] (1/4) Epoch 29, batch 450, loss[loss=0.1422, simple_loss=0.2252, pruned_loss=0.02963, over 7803.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2832, pruned_loss=0.05756, over 1452706.22 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:38,565 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=1.99 vs. limit=5.0 +2023-02-09 01:28:39,871 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226774.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:28:53,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5361, 1.4623, 1.8303, 1.2147, 1.1548, 1.8367, 0.3182, 1.2536], + device='cuda:1'), covar=tensor([0.1430, 0.1111, 0.0366, 0.0739, 0.2285, 0.0397, 0.1696, 0.1023], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0208, 0.0138, 0.0224, 0.0280, 0.0148, 0.0174, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 01:28:56,601 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.33 vs. limit=2.0 +2023-02-09 01:28:58,207 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226799.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:29:11,132 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.449e+02 2.964e+02 3.856e+02 9.700e+02, threshold=5.929e+02, percent-clipped=9.0 +2023-02-09 01:29:13,796 INFO [train.py:901] (1/4) Epoch 29, batch 500, loss[loss=0.2672, simple_loss=0.3269, pruned_loss=0.1037, over 6707.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05755, over 1488248.61 frames. ], batch size: 72, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:48,217 INFO [train.py:901] (1/4) Epoch 29, batch 550, loss[loss=0.1997, simple_loss=0.2839, pruned_loss=0.05779, over 8286.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05719, over 1518498.43 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:51,497 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 01:30:21,906 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.486e+02 3.156e+02 4.092e+02 1.034e+03, threshold=6.313e+02, percent-clipped=6.0 +2023-02-09 01:30:24,623 INFO [train.py:901] (1/4) Epoch 29, batch 600, loss[loss=0.1872, simple_loss=0.2647, pruned_loss=0.05489, over 7966.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2831, pruned_loss=0.0575, over 1544731.24 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:30:28,540 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.68 vs. limit=5.0 +2023-02-09 01:30:35,116 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4575, 2.0329, 4.6417, 2.1331, 2.6919, 5.1658, 5.3467, 4.5549], + device='cuda:1'), covar=tensor([0.1208, 0.1662, 0.0194, 0.1901, 0.1037, 0.0175, 0.0262, 0.0515], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0325, 0.0327, 0.0278, 0.0444, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 01:30:38,671 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6248, 2.3991, 3.0892, 2.5560, 3.1225, 2.6279, 2.5479, 1.9046], + device='cuda:1'), covar=tensor([0.5799, 0.5635, 0.2290, 0.4229, 0.2789, 0.3108, 0.1782, 0.5928], + device='cuda:1'), in_proj_covar=tensor([0.0965, 0.1033, 0.0838, 0.0999, 0.1026, 0.0936, 0.0772, 0.0852], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 01:30:43,053 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 01:30:56,503 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226966.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:30:59,753 INFO [train.py:901] (1/4) Epoch 29, batch 650, loss[loss=0.199, simple_loss=0.2932, pruned_loss=0.05238, over 8360.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05773, over 1554561.09 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:13,843 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:25,429 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227007.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:32,762 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.425e+02 2.942e+02 3.859e+02 6.314e+02, threshold=5.885e+02, percent-clipped=1.0 +2023-02-09 01:31:36,261 INFO [train.py:901] (1/4) Epoch 29, batch 700, loss[loss=0.2022, simple_loss=0.2996, pruned_loss=0.05238, over 8513.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.05778, over 1563043.17 frames. ], batch size: 39, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:56,845 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9901, 2.2028, 1.8207, 2.8653, 1.4800, 1.6873, 2.2431, 2.2785], + device='cuda:1'), covar=tensor([0.0927, 0.0794, 0.1090, 0.0442, 0.1040, 0.1451, 0.0774, 0.0793], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0214, 0.0204, 0.0249, 0.0251, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 01:32:12,699 INFO [train.py:901] (1/4) Epoch 29, batch 750, loss[loss=0.2129, simple_loss=0.2977, pruned_loss=0.06407, over 8348.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05779, over 1576683.50 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:32:31,396 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 01:32:40,246 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 01:32:44,348 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.547e+02 3.055e+02 4.023e+02 1.198e+03, threshold=6.109e+02, percent-clipped=3.0 +2023-02-09 01:32:47,760 INFO [train.py:901] (1/4) Epoch 29, batch 800, loss[loss=0.1892, simple_loss=0.2596, pruned_loss=0.05939, over 7707.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.05771, over 1584632.88 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:33:24,580 INFO [train.py:901] (1/4) Epoch 29, batch 850, loss[loss=0.1964, simple_loss=0.2782, pruned_loss=0.05731, over 8245.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.281, pruned_loss=0.05656, over 1585892.69 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:33:57,024 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.463e+02 2.886e+02 3.949e+02 8.845e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 01:33:59,146 INFO [train.py:901] (1/4) Epoch 29, batch 900, loss[loss=0.2122, simple_loss=0.2836, pruned_loss=0.07035, over 8070.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2804, pruned_loss=0.05628, over 1593686.63 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:34:35,613 INFO [train.py:901] (1/4) Epoch 29, batch 950, loss[loss=0.2221, simple_loss=0.3004, pruned_loss=0.07194, over 8300.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05677, over 1601952.00 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:04,861 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 01:35:09,005 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.581e+02 3.033e+02 3.905e+02 1.035e+03, threshold=6.066e+02, percent-clipped=4.0 +2023-02-09 01:35:11,178 INFO [train.py:901] (1/4) Epoch 29, batch 1000, loss[loss=0.1891, simple_loss=0.2601, pruned_loss=0.05903, over 7533.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.282, pruned_loss=0.05671, over 1610323.46 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:32,312 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:35:39,825 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 01:35:47,252 INFO [train.py:901] (1/4) Epoch 29, batch 1050, loss[loss=0.1921, simple_loss=0.2673, pruned_loss=0.05844, over 5966.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2815, pruned_loss=0.05619, over 1609886.90 frames. ], batch size: 13, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:52,716 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 01:36:22,051 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.373e+02 3.020e+02 3.651e+02 1.051e+03, threshold=6.040e+02, percent-clipped=1.0 +2023-02-09 01:36:23,015 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2415, 1.0778, 1.3289, 1.0621, 1.0117, 1.3375, 0.1632, 1.0650], + device='cuda:1'), covar=tensor([0.1385, 0.1299, 0.0489, 0.0646, 0.2302, 0.0542, 0.1733, 0.1071], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0224, 0.0278, 0.0147, 0.0173, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 01:36:24,319 INFO [train.py:901] (1/4) Epoch 29, batch 1100, loss[loss=0.143, simple_loss=0.2283, pruned_loss=0.02888, over 7707.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2819, pruned_loss=0.05634, over 1616144.86 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:36:30,384 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227429.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:56,358 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227466.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:59,076 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8390, 1.4732, 1.6590, 1.4191, 1.0318, 1.5080, 1.7080, 1.5099], + device='cuda:1'), covar=tensor([0.0585, 0.1354, 0.1823, 0.1570, 0.0631, 0.1581, 0.0744, 0.0713], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:36:59,547 INFO [train.py:901] (1/4) Epoch 29, batch 1150, loss[loss=0.1808, simple_loss=0.2756, pruned_loss=0.04296, over 8293.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.05619, over 1618672.02 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:06,458 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 01:37:34,273 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.320e+02 2.698e+02 3.625e+02 7.425e+02, threshold=5.396e+02, percent-clipped=3.0 +2023-02-09 01:37:36,428 INFO [train.py:901] (1/4) Epoch 29, batch 1200, loss[loss=0.1636, simple_loss=0.2498, pruned_loss=0.03873, over 7816.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2803, pruned_loss=0.05556, over 1617940.64 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:53,824 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1561, 2.2698, 1.9404, 2.8523, 1.2663, 1.7711, 1.9615, 2.3055], + device='cuda:1'), covar=tensor([0.0695, 0.0775, 0.0792, 0.0330, 0.1197, 0.1250, 0.0905, 0.0804], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0195, 0.0244, 0.0213, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 01:38:09,053 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8719, 1.5471, 1.7819, 1.4706, 0.9704, 1.5400, 1.6811, 1.4977], + device='cuda:1'), covar=tensor([0.0587, 0.1235, 0.1634, 0.1411, 0.0610, 0.1425, 0.0707, 0.0676], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:38:11,541 INFO [train.py:901] (1/4) Epoch 29, batch 1250, loss[loss=0.1869, simple_loss=0.2741, pruned_loss=0.04981, over 8191.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05648, over 1618493.09 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:38:13,782 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4579, 3.8687, 2.6931, 3.3560, 3.2007, 2.3494, 3.1956, 3.4442], + device='cuda:1'), covar=tensor([0.1594, 0.0377, 0.1044, 0.0663, 0.0667, 0.1437, 0.0976, 0.0974], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0246, 0.0344, 0.0314, 0.0303, 0.0349, 0.0351, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 01:38:19,535 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7373, 2.1221, 3.2784, 1.6166, 2.5788, 2.1906, 1.8539, 2.5458], + device='cuda:1'), covar=tensor([0.1895, 0.2801, 0.0914, 0.4806, 0.1864, 0.3390, 0.2440, 0.2367], + device='cuda:1'), in_proj_covar=tensor([0.0539, 0.0639, 0.0564, 0.0672, 0.0667, 0.0617, 0.0565, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:38:46,455 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.398e+02 2.828e+02 3.393e+02 7.704e+02, threshold=5.657e+02, percent-clipped=4.0 +2023-02-09 01:38:48,671 INFO [train.py:901] (1/4) Epoch 29, batch 1300, loss[loss=0.2198, simple_loss=0.3081, pruned_loss=0.06578, over 8449.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2812, pruned_loss=0.05602, over 1620466.34 frames. ], batch size: 27, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:24,554 INFO [train.py:901] (1/4) Epoch 29, batch 1350, loss[loss=0.1999, simple_loss=0.2919, pruned_loss=0.05393, over 8674.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2814, pruned_loss=0.05627, over 1621382.94 frames. ], batch size: 49, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:52,814 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227711.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:39:58,210 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.335e+02 2.785e+02 3.650e+02 1.055e+03, threshold=5.570e+02, percent-clipped=4.0 +2023-02-09 01:40:00,377 INFO [train.py:901] (1/4) Epoch 29, batch 1400, loss[loss=0.2189, simple_loss=0.3099, pruned_loss=0.06398, over 8290.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2808, pruned_loss=0.05572, over 1622592.95 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:01,311 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=227722.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:20,607 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=227747.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:38,113 INFO [train.py:901] (1/4) Epoch 29, batch 1450, loss[loss=0.1838, simple_loss=0.2776, pruned_loss=0.04495, over 8355.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2807, pruned_loss=0.05538, over 1621407.37 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:39,477 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227773.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:47,997 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 01:41:11,911 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.369e+02 2.795e+02 3.434e+02 1.018e+03, threshold=5.589e+02, percent-clipped=3.0 +2023-02-09 01:41:14,105 INFO [train.py:901] (1/4) Epoch 29, batch 1500, loss[loss=0.1726, simple_loss=0.2646, pruned_loss=0.0403, over 8463.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2799, pruned_loss=0.05533, over 1620636.75 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:41:31,878 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:41:40,138 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6252, 4.6222, 4.0775, 1.8584, 4.0594, 4.2421, 4.1747, 4.0461], + device='cuda:1'), covar=tensor([0.0632, 0.0424, 0.0978, 0.4442, 0.0871, 0.0869, 0.1065, 0.0681], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0458, 0.0456, 0.0563, 0.0446, 0.0470, 0.0442, 0.0414], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:41:51,485 INFO [train.py:901] (1/4) Epoch 29, batch 1550, loss[loss=0.1861, simple_loss=0.2786, pruned_loss=0.04682, over 8075.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05639, over 1622372.21 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:04,411 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227888.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:42:25,548 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.452e+02 3.275e+02 4.864e+02 1.208e+03, threshold=6.551e+02, percent-clipped=17.0 +2023-02-09 01:42:27,699 INFO [train.py:901] (1/4) Epoch 29, batch 1600, loss[loss=0.1814, simple_loss=0.2853, pruned_loss=0.03875, over 8339.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2813, pruned_loss=0.05615, over 1622706.08 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:27,875 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4477, 1.7912, 1.4219, 3.0424, 1.3541, 1.2968, 2.1110, 2.0181], + device='cuda:1'), covar=tensor([0.1597, 0.1418, 0.1960, 0.0340, 0.1453, 0.2243, 0.1067, 0.1122], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0213, 0.0204, 0.0247, 0.0251, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 01:43:04,958 INFO [train.py:901] (1/4) Epoch 29, batch 1650, loss[loss=0.1979, simple_loss=0.2912, pruned_loss=0.05237, over 8582.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2808, pruned_loss=0.056, over 1619343.12 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:39,886 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.401e+02 2.687e+02 3.396e+02 6.045e+02, threshold=5.374e+02, percent-clipped=0.0 +2023-02-09 01:43:42,081 INFO [train.py:901] (1/4) Epoch 29, batch 1700, loss[loss=0.2185, simple_loss=0.303, pruned_loss=0.06703, over 8195.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05594, over 1622456.93 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:42,948 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6441, 4.6718, 4.1673, 2.4000, 4.1236, 4.2451, 4.2492, 4.0609], + device='cuda:1'), covar=tensor([0.0682, 0.0485, 0.0975, 0.4110, 0.0864, 0.1078, 0.1058, 0.0832], + device='cuda:1'), in_proj_covar=tensor([0.0548, 0.0461, 0.0457, 0.0567, 0.0447, 0.0472, 0.0445, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:43:44,491 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1843, 1.7744, 4.1368, 1.8607, 2.4363, 4.6413, 4.7830, 4.0807], + device='cuda:1'), covar=tensor([0.1231, 0.1849, 0.0308, 0.2060, 0.1262, 0.0209, 0.0476, 0.0526], + device='cuda:1'), in_proj_covar=tensor([0.0312, 0.0329, 0.0298, 0.0327, 0.0329, 0.0280, 0.0450, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 01:43:45,195 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228025.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:06,282 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:18,095 INFO [train.py:901] (1/4) Epoch 29, batch 1750, loss[loss=0.1941, simple_loss=0.2678, pruned_loss=0.06017, over 7780.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05592, over 1623144.98 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:44:28,126 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1567, 2.0646, 2.5913, 2.2599, 2.6118, 2.2685, 2.1926, 1.5961], + device='cuda:1'), covar=tensor([0.6069, 0.5174, 0.2199, 0.3826, 0.2579, 0.3276, 0.2047, 0.5303], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1038, 0.0842, 0.1005, 0.1030, 0.0942, 0.0776, 0.0857], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 01:44:38,632 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:38,667 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0040, 1.6005, 1.8779, 1.4428, 1.1310, 1.5647, 1.8545, 1.6692], + device='cuda:1'), covar=tensor([0.0555, 0.1235, 0.1579, 0.1471, 0.0569, 0.1416, 0.0660, 0.0648], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:44:52,743 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.425e+02 2.925e+02 3.463e+02 6.679e+02, threshold=5.849e+02, percent-clipped=2.0 +2023-02-09 01:44:55,484 INFO [train.py:901] (1/4) Epoch 29, batch 1800, loss[loss=0.2151, simple_loss=0.3081, pruned_loss=0.06109, over 8098.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05663, over 1618086.45 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:11,244 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:12,123 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.76 vs. limit=2.0 +2023-02-09 01:45:28,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228169.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,268 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228170.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,726 INFO [train.py:901] (1/4) Epoch 29, batch 1850, loss[loss=0.197, simple_loss=0.2786, pruned_loss=0.05776, over 8593.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05707, over 1616382.46 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:42,839 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228189.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:46:03,761 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.509e+02 2.932e+02 3.424e+02 5.958e+02, threshold=5.864e+02, percent-clipped=1.0 +2023-02-09 01:46:05,868 INFO [train.py:901] (1/4) Epoch 29, batch 1900, loss[loss=0.1774, simple_loss=0.2642, pruned_loss=0.04527, over 7970.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05635, over 1618586.00 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:29,135 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.36 vs. limit=5.0 +2023-02-09 01:46:32,890 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.36 vs. limit=2.0 +2023-02-09 01:46:38,539 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 01:46:41,415 INFO [train.py:901] (1/4) Epoch 29, batch 1950, loss[loss=0.2199, simple_loss=0.3034, pruned_loss=0.0682, over 8690.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.282, pruned_loss=0.05646, over 1622366.19 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:50,971 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 01:47:05,322 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228304.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:10,131 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 01:47:16,244 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.422e+02 3.056e+02 3.762e+02 8.552e+02, threshold=6.111e+02, percent-clipped=4.0 +2023-02-09 01:47:18,252 INFO [train.py:901] (1/4) Epoch 29, batch 2000, loss[loss=0.1751, simple_loss=0.2473, pruned_loss=0.05149, over 7241.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2811, pruned_loss=0.05607, over 1624090.06 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:47:36,281 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228347.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:39,103 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:44,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1807, 1.3007, 1.3056, 0.9218, 1.3238, 1.1009, 0.3511, 1.2935], + device='cuda:1'), covar=tensor([0.0438, 0.0329, 0.0256, 0.0441, 0.0368, 0.0634, 0.0775, 0.0257], + device='cuda:1'), in_proj_covar=tensor([0.0476, 0.0414, 0.0369, 0.0460, 0.0397, 0.0552, 0.0403, 0.0443], + device='cuda:1'), out_proj_covar=tensor([1.2611e-04, 1.0730e-04, 9.6024e-05, 1.2017e-04, 1.0384e-04, 1.5379e-04, + 1.0767e-04, 1.1589e-04], device='cuda:1') +2023-02-09 01:47:52,202 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228369.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:53,536 INFO [train.py:901] (1/4) Epoch 29, batch 2050, loss[loss=0.2039, simple_loss=0.2866, pruned_loss=0.06062, over 8334.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05612, over 1623533.63 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:48:25,953 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.333e+02 2.866e+02 3.600e+02 5.490e+02, threshold=5.733e+02, percent-clipped=0.0 +2023-02-09 01:48:28,117 INFO [train.py:901] (1/4) Epoch 29, batch 2100, loss[loss=0.1676, simple_loss=0.2666, pruned_loss=0.03428, over 8200.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2802, pruned_loss=0.05548, over 1621156.71 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:48:32,344 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:43,920 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228442.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:48,016 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228447.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:48:50,743 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228451.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:59,117 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6430, 2.0662, 3.2715, 1.5759, 2.6430, 2.0871, 1.7608, 2.6882], + device='cuda:1'), covar=tensor([0.2050, 0.2835, 0.0975, 0.4960, 0.1791, 0.3432, 0.2590, 0.2260], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0642, 0.0568, 0.0675, 0.0667, 0.0617, 0.0566, 0.0650], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:49:04,298 INFO [train.py:901] (1/4) Epoch 29, batch 2150, loss[loss=0.2003, simple_loss=0.272, pruned_loss=0.06432, over 7648.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2804, pruned_loss=0.05578, over 1620356.52 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:05,162 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8255, 1.3411, 4.0061, 1.4287, 3.5419, 3.3687, 3.6441, 3.5058], + device='cuda:1'), covar=tensor([0.0712, 0.4555, 0.0679, 0.4475, 0.1265, 0.1046, 0.0656, 0.0833], + device='cuda:1'), in_proj_covar=tensor([0.0686, 0.0669, 0.0744, 0.0664, 0.0750, 0.0639, 0.0647, 0.0721], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:49:14,145 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:49:37,705 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.659e+02 3.270e+02 3.951e+02 1.171e+03, threshold=6.540e+02, percent-clipped=10.0 +2023-02-09 01:49:39,911 INFO [train.py:901] (1/4) Epoch 29, batch 2200, loss[loss=0.216, simple_loss=0.3037, pruned_loss=0.0641, over 8634.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2801, pruned_loss=0.05574, over 1616397.28 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:47,016 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2464, 1.3054, 3.3856, 1.1213, 3.0016, 2.8459, 3.0868, 3.0001], + device='cuda:1'), covar=tensor([0.0875, 0.4153, 0.0874, 0.4338, 0.1319, 0.1085, 0.0795, 0.0935], + device='cuda:1'), in_proj_covar=tensor([0.0684, 0.0666, 0.0742, 0.0662, 0.0747, 0.0637, 0.0644, 0.0719], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:50:06,272 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228557.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:08,439 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228560.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:16,499 INFO [train.py:901] (1/4) Epoch 29, batch 2250, loss[loss=0.2292, simple_loss=0.3121, pruned_loss=0.07315, over 8472.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.0567, over 1614749.16 frames. ], batch size: 29, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:26,600 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228585.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:44,232 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4806, 1.3194, 2.3294, 1.2965, 2.3177, 2.4928, 2.6428, 2.0486], + device='cuda:1'), covar=tensor([0.1175, 0.1568, 0.0524, 0.2119, 0.0783, 0.0475, 0.0814, 0.0880], + device='cuda:1'), in_proj_covar=tensor([0.0311, 0.0327, 0.0297, 0.0325, 0.0329, 0.0279, 0.0447, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 01:50:50,350 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.350e+02 2.877e+02 3.583e+02 6.549e+02, threshold=5.755e+02, percent-clipped=1.0 +2023-02-09 01:50:52,578 INFO [train.py:901] (1/4) Epoch 29, batch 2300, loss[loss=0.1884, simple_loss=0.267, pruned_loss=0.05489, over 7782.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2817, pruned_loss=0.05706, over 1608149.29 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:53,470 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6518, 1.5286, 2.1233, 1.3570, 1.2281, 2.0909, 0.3599, 1.2727], + device='cuda:1'), covar=tensor([0.1400, 0.1310, 0.0354, 0.1037, 0.2474, 0.0481, 0.1906, 0.1289], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0223, 0.0278, 0.0149, 0.0173, 0.0199], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 01:51:06,297 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-09 01:51:22,792 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1689, 1.5272, 4.3159, 1.5786, 3.8574, 3.5740, 3.8784, 3.7500], + device='cuda:1'), covar=tensor([0.0650, 0.4711, 0.0610, 0.4373, 0.1162, 0.1081, 0.0623, 0.0760], + device='cuda:1'), in_proj_covar=tensor([0.0686, 0.0669, 0.0746, 0.0666, 0.0751, 0.0640, 0.0647, 0.0722], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:51:28,785 INFO [train.py:901] (1/4) Epoch 29, batch 2350, loss[loss=0.1968, simple_loss=0.2855, pruned_loss=0.0541, over 8326.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2827, pruned_loss=0.05697, over 1616868.90 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:51:43,129 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:51:45,887 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228695.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:02,338 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.434e+02 3.194e+02 3.873e+02 7.294e+02, threshold=6.388e+02, percent-clipped=4.0 +2023-02-09 01:52:04,285 INFO [train.py:901] (1/4) Epoch 29, batch 2400, loss[loss=0.204, simple_loss=0.2732, pruned_loss=0.06741, over 7976.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2834, pruned_loss=0.05763, over 1619003.17 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:17,436 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228740.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:34,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228765.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:39,614 INFO [train.py:901] (1/4) Epoch 29, batch 2450, loss[loss=0.1791, simple_loss=0.2713, pruned_loss=0.04343, over 8288.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.282, pruned_loss=0.05696, over 1619033.67 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:55,038 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228791.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:53:05,513 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228806.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:08,257 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:10,298 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228813.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:13,501 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.278e+02 2.630e+02 3.527e+02 5.802e+02, threshold=5.259e+02, percent-clipped=0.0 +2023-02-09 01:53:16,198 INFO [train.py:901] (1/4) Epoch 29, batch 2500, loss[loss=0.1809, simple_loss=0.2701, pruned_loss=0.04584, over 8227.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05729, over 1617035.09 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:53:28,187 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:49,886 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-09 01:53:51,011 INFO [train.py:901] (1/4) Epoch 29, batch 2550, loss[loss=0.1984, simple_loss=0.2622, pruned_loss=0.06726, over 7798.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05679, over 1617083.88 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:17,016 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:54:25,705 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.320e+02 2.761e+02 3.420e+02 6.403e+02, threshold=5.523e+02, percent-clipped=2.0 +2023-02-09 01:54:27,391 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0336, 2.3286, 3.1609, 1.9399, 2.7518, 2.3801, 2.1319, 2.7492], + device='cuda:1'), covar=tensor([0.1464, 0.2053, 0.0685, 0.3384, 0.1332, 0.2453, 0.1864, 0.1736], + device='cuda:1'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0679, 0.0670, 0.0619, 0.0568, 0.0652], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:54:27,842 INFO [train.py:901] (1/4) Epoch 29, batch 2600, loss[loss=0.2189, simple_loss=0.2996, pruned_loss=0.06912, over 7011.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2813, pruned_loss=0.05676, over 1618166.41 frames. ], batch size: 71, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:51,301 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:55:03,876 INFO [train.py:901] (1/4) Epoch 29, batch 2650, loss[loss=0.1793, simple_loss=0.2626, pruned_loss=0.04797, over 8092.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2801, pruned_loss=0.05635, over 1614693.86 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:55:27,209 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2824, 1.3785, 3.3843, 1.1240, 3.0960, 2.8629, 3.1210, 3.0545], + device='cuda:1'), covar=tensor([0.0771, 0.3832, 0.0793, 0.4090, 0.1120, 0.1015, 0.0634, 0.0815], + device='cuda:1'), in_proj_covar=tensor([0.0681, 0.0663, 0.0741, 0.0662, 0.0743, 0.0635, 0.0640, 0.0715], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 01:55:37,362 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.477e+02 2.922e+02 3.574e+02 8.790e+02, threshold=5.845e+02, percent-clipped=2.0 +2023-02-09 01:55:39,868 INFO [train.py:901] (1/4) Epoch 29, batch 2700, loss[loss=0.2472, simple_loss=0.3166, pruned_loss=0.08895, over 8579.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2803, pruned_loss=0.05632, over 1612815.11 frames. ], batch size: 31, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:11,095 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229062.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:14,605 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229066.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:17,767 INFO [train.py:901] (1/4) Epoch 29, batch 2750, loss[loss=0.1735, simple_loss=0.2622, pruned_loss=0.04245, over 8024.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2804, pruned_loss=0.05627, over 1616018.07 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:29,425 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229087.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:32,342 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229091.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:51,303 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.406e+02 2.857e+02 3.824e+02 7.570e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-09 01:56:53,439 INFO [train.py:901] (1/4) Epoch 29, batch 2800, loss[loss=0.2118, simple_loss=0.295, pruned_loss=0.06423, over 8294.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2793, pruned_loss=0.05584, over 1610315.29 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:54,323 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2034, 1.1416, 1.5429, 1.1130, 0.7870, 1.3254, 1.2780, 1.0628], + device='cuda:1'), covar=tensor([0.0681, 0.1747, 0.2212, 0.1884, 0.0651, 0.1971, 0.0784, 0.0796], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0163, 0.0114, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:57:24,913 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229162.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:57:31,016 INFO [train.py:901] (1/4) Epoch 29, batch 2850, loss[loss=0.2299, simple_loss=0.2972, pruned_loss=0.08124, over 8600.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2805, pruned_loss=0.05652, over 1611720.38 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:57:43,223 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229187.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:58:03,986 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1597, 1.6998, 3.1034, 1.6206, 2.3367, 3.4463, 3.5417, 3.0355], + device='cuda:1'), covar=tensor([0.1137, 0.1849, 0.0481, 0.2207, 0.1383, 0.0270, 0.0657, 0.0505], + device='cuda:1'), in_proj_covar=tensor([0.0314, 0.0330, 0.0299, 0.0327, 0.0331, 0.0282, 0.0451, 0.0314], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 01:58:05,227 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.443e+02 3.095e+02 3.828e+02 9.615e+02, threshold=6.189e+02, percent-clipped=4.0 +2023-02-09 01:58:07,399 INFO [train.py:901] (1/4) Epoch 29, batch 2900, loss[loss=0.1665, simple_loss=0.2569, pruned_loss=0.03807, over 8025.00 frames. ], tot_loss[loss=0.197, simple_loss=0.281, pruned_loss=0.05654, over 1614220.68 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:21,867 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 01:58:23,780 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1435, 1.2844, 1.5705, 1.2765, 0.7918, 1.3494, 1.2107, 0.9914], + device='cuda:1'), covar=tensor([0.0653, 0.1275, 0.1679, 0.1498, 0.0576, 0.1490, 0.0731, 0.0772], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0103, 0.0165, 0.0114, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 01:58:44,210 INFO [train.py:901] (1/4) Epoch 29, batch 2950, loss[loss=0.2074, simple_loss=0.2998, pruned_loss=0.05756, over 8515.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2806, pruned_loss=0.05596, over 1613384.72 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:47,081 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 01:59:02,251 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229297.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:17,273 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.481e+02 3.005e+02 3.741e+02 9.617e+02, threshold=6.010e+02, percent-clipped=3.0 +2023-02-09 01:59:17,399 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229318.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:19,290 INFO [train.py:901] (1/4) Epoch 29, batch 3000, loss[loss=0.1786, simple_loss=0.2722, pruned_loss=0.04248, over 8468.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2808, pruned_loss=0.05658, over 1611359.54 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:59:19,290 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 01:59:34,614 INFO [train.py:935] (1/4) Epoch 29, validation: loss=0.17, simple_loss=0.2699, pruned_loss=0.03504, over 944034.00 frames. +2023-02-09 01:59:34,616 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 02:00:09,410 INFO [train.py:901] (1/4) Epoch 29, batch 3050, loss[loss=0.1704, simple_loss=0.266, pruned_loss=0.03739, over 8134.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2811, pruned_loss=0.05644, over 1611296.15 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:28,258 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4187, 4.4203, 3.9473, 2.2787, 3.8937, 4.0025, 3.8282, 3.8091], + device='cuda:1'), covar=tensor([0.0717, 0.0490, 0.0933, 0.4382, 0.0872, 0.1074, 0.1332, 0.0929], + device='cuda:1'), in_proj_covar=tensor([0.0551, 0.0465, 0.0458, 0.0567, 0.0446, 0.0471, 0.0446, 0.0416], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:00:40,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:00:41,971 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9955, 1.9933, 1.8018, 2.6358, 1.4056, 1.7158, 2.0704, 2.1505], + device='cuda:1'), covar=tensor([0.0724, 0.0915, 0.0841, 0.0462, 0.1073, 0.1241, 0.0730, 0.0747], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0213, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:00:44,471 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.505e+02 2.815e+02 3.570e+02 7.212e+02, threshold=5.630e+02, percent-clipped=4.0 +2023-02-09 02:00:46,514 INFO [train.py:901] (1/4) Epoch 29, batch 3100, loss[loss=0.2259, simple_loss=0.3117, pruned_loss=0.07007, over 8294.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2815, pruned_loss=0.05647, over 1613779.56 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:59,208 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4597, 2.0721, 3.1591, 1.6875, 1.7747, 3.1234, 0.8445, 2.1383], + device='cuda:1'), covar=tensor([0.1264, 0.1227, 0.0279, 0.1566, 0.2088, 0.0462, 0.2048, 0.1282], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0208, 0.0138, 0.0224, 0.0280, 0.0149, 0.0173, 0.0201], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 02:01:15,193 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:21,911 INFO [train.py:901] (1/4) Epoch 29, batch 3150, loss[loss=0.174, simple_loss=0.2639, pruned_loss=0.0421, over 8569.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05712, over 1612943.23 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:39,329 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:56,480 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.412e+02 3.144e+02 3.923e+02 1.015e+03, threshold=6.289e+02, percent-clipped=11.0 +2023-02-09 02:01:58,581 INFO [train.py:901] (1/4) Epoch 29, batch 3200, loss[loss=0.2115, simple_loss=0.2967, pruned_loss=0.06319, over 8341.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05704, over 1612894.32 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:58,993 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-09 02:02:02,520 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-09 02:02:14,560 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-09 02:02:29,961 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2889, 2.9121, 2.3172, 2.6015, 2.5834, 2.2206, 2.5377, 2.7717], + device='cuda:1'), covar=tensor([0.1197, 0.0423, 0.0972, 0.0591, 0.0625, 0.1210, 0.0777, 0.0841], + device='cuda:1'), in_proj_covar=tensor([0.0353, 0.0243, 0.0342, 0.0314, 0.0300, 0.0349, 0.0349, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:02:35,461 INFO [train.py:901] (1/4) Epoch 29, batch 3250, loss[loss=0.198, simple_loss=0.2846, pruned_loss=0.0557, over 8698.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.282, pruned_loss=0.05669, over 1617897.96 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:52,619 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:09,574 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.257e+02 2.746e+02 3.374e+02 9.131e+02, threshold=5.492e+02, percent-clipped=1.0 +2023-02-09 02:03:11,692 INFO [train.py:901] (1/4) Epoch 29, batch 3300, loss[loss=0.188, simple_loss=0.2814, pruned_loss=0.04735, over 7433.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2817, pruned_loss=0.05601, over 1620572.92 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:03:41,136 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:45,540 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229668.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:47,460 INFO [train.py:901] (1/4) Epoch 29, batch 3350, loss[loss=0.2108, simple_loss=0.2842, pruned_loss=0.06866, over 7718.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2823, pruned_loss=0.0567, over 1619262.90 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:03:52,160 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229677.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:04:03,325 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:04:20,887 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.561e+02 2.994e+02 3.779e+02 7.703e+02, threshold=5.989e+02, percent-clipped=7.0 +2023-02-09 02:04:22,322 INFO [train.py:901] (1/4) Epoch 29, batch 3400, loss[loss=0.187, simple_loss=0.2581, pruned_loss=0.05793, over 8122.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2822, pruned_loss=0.05637, over 1618465.87 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:04:25,974 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1538, 2.2375, 1.8358, 2.9398, 1.4918, 1.7435, 2.2510, 2.2137], + device='cuda:1'), covar=tensor([0.0683, 0.0785, 0.0894, 0.0308, 0.0933, 0.1182, 0.0734, 0.0847], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0195, 0.0245, 0.0213, 0.0203, 0.0246, 0.0251, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:04:59,074 INFO [train.py:901] (1/4) Epoch 29, batch 3450, loss[loss=0.193, simple_loss=0.2825, pruned_loss=0.05174, over 8188.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.0562, over 1612769.32 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:03,510 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229777.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:23,715 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:33,258 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.314e+02 2.735e+02 3.470e+02 1.051e+03, threshold=5.470e+02, percent-clipped=3.0 +2023-02-09 02:05:34,610 INFO [train.py:901] (1/4) Epoch 29, batch 3500, loss[loss=0.2356, simple_loss=0.3191, pruned_loss=0.07605, over 8558.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2805, pruned_loss=0.05585, over 1612654.80 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:46,928 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229839.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:58,492 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 02:06:12,017 INFO [train.py:901] (1/4) Epoch 29, batch 3550, loss[loss=0.1873, simple_loss=0.2773, pruned_loss=0.04867, over 8317.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2815, pruned_loss=0.05616, over 1615253.85 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:06:41,730 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229912.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:42,432 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.0550, 1.8335, 2.2201, 1.9161, 1.1106, 1.8718, 2.2559, 2.4141], + device='cuda:1'), covar=tensor([0.0441, 0.1243, 0.1550, 0.1334, 0.0594, 0.1435, 0.0637, 0.0542], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0154, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 02:06:44,119 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:06:46,515 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.466e+02 3.015e+02 3.666e+02 8.686e+02, threshold=6.030e+02, percent-clipped=2.0 +2023-02-09 02:06:47,434 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229920.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:47,947 INFO [train.py:901] (1/4) Epoch 29, batch 3600, loss[loss=0.2175, simple_loss=0.3028, pruned_loss=0.06616, over 8188.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2819, pruned_loss=0.05649, over 1615351.27 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:07:00,990 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:11,753 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229954.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:24,202 INFO [train.py:901] (1/4) Epoch 29, batch 3650, loss[loss=0.2507, simple_loss=0.3316, pruned_loss=0.08489, over 8328.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.281, pruned_loss=0.05589, over 1614340.76 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:07:50,393 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:00,964 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.398e+02 2.862e+02 3.372e+02 7.881e+02, threshold=5.724e+02, percent-clipped=3.0 +2023-02-09 02:08:01,713 INFO [train.py:901] (1/4) Epoch 29, batch 3700, loss[loss=0.2148, simple_loss=0.2937, pruned_loss=0.06798, over 8341.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.05615, over 1615693.30 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:08:02,483 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230021.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:08:06,540 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:08:10,863 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230033.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:25,157 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230054.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:28,463 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230058.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:37,282 INFO [train.py:901] (1/4) Epoch 29, batch 3750, loss[loss=0.1865, simple_loss=0.2766, pruned_loss=0.04822, over 8087.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2823, pruned_loss=0.05678, over 1616854.22 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:08:38,299 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-09 02:09:09,097 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-09 02:09:13,042 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.469e+02 3.110e+02 3.966e+02 1.066e+03, threshold=6.219e+02, percent-clipped=4.0 +2023-02-09 02:09:13,774 INFO [train.py:901] (1/4) Epoch 29, batch 3800, loss[loss=0.1761, simple_loss=0.2545, pruned_loss=0.04883, over 7944.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2826, pruned_loss=0.05716, over 1619841.73 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:24,881 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230136.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:09:47,381 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230168.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:09:49,388 INFO [train.py:901] (1/4) Epoch 29, batch 3850, loss[loss=0.1866, simple_loss=0.281, pruned_loss=0.04613, over 8475.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2834, pruned_loss=0.05731, over 1623795.25 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:52,571 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 02:09:53,092 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230176.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:10,985 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:12,900 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 02:10:17,569 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230210.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:24,395 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.511e+02 3.297e+02 3.973e+02 1.066e+03, threshold=6.594e+02, percent-clipped=6.0 +2023-02-09 02:10:25,130 INFO [train.py:901] (1/4) Epoch 29, batch 3900, loss[loss=0.2203, simple_loss=0.2871, pruned_loss=0.07678, over 7660.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.05623, over 1618036.47 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:10:36,221 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:49,413 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 02:10:51,858 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:54,122 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:02,669 INFO [train.py:901] (1/4) Epoch 29, batch 3950, loss[loss=0.169, simple_loss=0.241, pruned_loss=0.04849, over 7700.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05632, over 1615711.45 frames. ], batch size: 18, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:11:30,509 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230310.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:37,968 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.564e+02 3.192e+02 4.107e+02 9.729e+02, threshold=6.384e+02, percent-clipped=2.0 +2023-02-09 02:11:38,728 INFO [train.py:901] (1/4) Epoch 29, batch 4000, loss[loss=0.2399, simple_loss=0.3144, pruned_loss=0.08275, over 7058.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05638, over 1609723.44 frames. ], batch size: 71, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:11:49,754 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230335.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:59,723 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230349.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:16,079 INFO [train.py:901] (1/4) Epoch 29, batch 4050, loss[loss=0.2085, simple_loss=0.2895, pruned_loss=0.0638, over 8100.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05698, over 1613552.41 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:12:16,261 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:31,112 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230392.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:43,589 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2386, 1.2719, 1.2345, 1.9471, 0.8451, 1.1183, 1.5902, 1.4333], + device='cuda:1'), covar=tensor([0.1495, 0.1124, 0.1906, 0.0489, 0.1242, 0.1965, 0.0690, 0.0863], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0213, 0.0202, 0.0247, 0.0251, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:12:48,555 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230417.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:50,324 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.354e+02 2.823e+02 3.644e+02 9.834e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 02:12:51,013 INFO [train.py:901] (1/4) Epoch 29, batch 4100, loss[loss=0.1938, simple_loss=0.2827, pruned_loss=0.05244, over 8802.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2815, pruned_loss=0.05679, over 1613697.09 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:22,439 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230464.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:26,984 INFO [train.py:901] (1/4) Epoch 29, batch 4150, loss[loss=0.2359, simple_loss=0.3177, pruned_loss=0.07708, over 8650.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2833, pruned_loss=0.05775, over 1612561.22 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:33,237 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 02:13:38,467 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:46,435 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.29 vs. limit=5.0 +2023-02-09 02:13:56,255 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230512.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:01,408 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1689, 1.9537, 2.4747, 2.0683, 2.4454, 2.2445, 2.1151, 1.4329], + device='cuda:1'), covar=tensor([0.5951, 0.5439, 0.2193, 0.4151, 0.2740, 0.3462, 0.1988, 0.5703], + device='cuda:1'), in_proj_covar=tensor([0.0968, 0.1036, 0.0841, 0.1007, 0.1029, 0.0941, 0.0778, 0.0857], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:14:01,810 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.471e+02 3.120e+02 4.398e+02 1.014e+03, threshold=6.241e+02, percent-clipped=11.0 +2023-02-09 02:14:02,515 INFO [train.py:901] (1/4) Epoch 29, batch 4200, loss[loss=0.2178, simple_loss=0.3069, pruned_loss=0.06432, over 8317.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2829, pruned_loss=0.05737, over 1612366.46 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:16,937 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 02:14:17,784 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230543.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:24,144 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:38,390 INFO [train.py:901] (1/4) Epoch 29, batch 4250, loss[loss=0.1862, simple_loss=0.2717, pruned_loss=0.05037, over 7934.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2822, pruned_loss=0.05704, over 1610183.21 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:41,195 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 02:15:01,587 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:13,975 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.532e+02 3.108e+02 3.832e+02 7.900e+02, threshold=6.217e+02, percent-clipped=4.0 +2023-02-09 02:15:14,735 INFO [train.py:901] (1/4) Epoch 29, batch 4300, loss[loss=0.1682, simple_loss=0.2504, pruned_loss=0.04302, over 8084.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.05766, over 1614359.10 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:19,483 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:19,525 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:37,548 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230652.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:51,725 INFO [train.py:901] (1/4) Epoch 29, batch 4350, loss[loss=0.1944, simple_loss=0.2791, pruned_loss=0.05488, over 7984.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2833, pruned_loss=0.058, over 1616629.27 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:55,694 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2363, 1.1066, 1.3214, 1.0111, 1.0946, 1.3502, 0.0950, 0.8983], + device='cuda:1'), covar=tensor([0.1454, 0.1222, 0.0501, 0.0625, 0.2179, 0.0531, 0.1899, 0.1188], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0225, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 02:16:17,530 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 02:16:27,817 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,089 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.462e+02 3.042e+02 3.743e+02 1.027e+03, threshold=6.085e+02, percent-clipped=1.0 +2023-02-09 02:16:29,336 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230720.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,846 INFO [train.py:901] (1/4) Epoch 29, batch 4400, loss[loss=0.2496, simple_loss=0.3174, pruned_loss=0.09086, over 8483.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05782, over 1618389.51 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:16:47,635 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230745.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:57,339 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 02:17:06,602 INFO [train.py:901] (1/4) Epoch 29, batch 4450, loss[loss=0.1677, simple_loss=0.2555, pruned_loss=0.03995, over 7813.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05759, over 1620241.95 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:36,256 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:43,245 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.373e+02 2.913e+02 3.542e+02 8.795e+02, threshold=5.826e+02, percent-clipped=1.0 +2023-02-09 02:17:43,983 INFO [train.py:901] (1/4) Epoch 29, batch 4500, loss[loss=0.2174, simple_loss=0.3099, pruned_loss=0.06242, over 8277.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05747, over 1618499.05 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:49,304 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8609, 1.5128, 3.4274, 1.6919, 2.3500, 3.7100, 3.8075, 3.2027], + device='cuda:1'), covar=tensor([0.1298, 0.1917, 0.0323, 0.1998, 0.1098, 0.0238, 0.0600, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0311, 0.0328, 0.0296, 0.0325, 0.0328, 0.0279, 0.0447, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:17:51,400 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:54,895 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 02:18:19,714 INFO [train.py:901] (1/4) Epoch 29, batch 4550, loss[loss=0.1852, simple_loss=0.2686, pruned_loss=0.05087, over 7417.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2829, pruned_loss=0.05743, over 1615920.66 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:18:28,415 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:31,629 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230887.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:37,202 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230895.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,427 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,490 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:55,066 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.340e+02 2.935e+02 3.730e+02 9.176e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-09 02:18:55,765 INFO [train.py:901] (1/4) Epoch 29, batch 4600, loss[loss=0.2127, simple_loss=0.3126, pruned_loss=0.0564, over 8329.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05783, over 1613049.09 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:13,588 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:31,779 INFO [train.py:901] (1/4) Epoch 29, batch 4650, loss[loss=0.1994, simple_loss=0.2949, pruned_loss=0.05197, over 8106.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2827, pruned_loss=0.05776, over 1614951.87 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:33,907 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230974.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:42,737 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4524, 4.4002, 3.9735, 2.0332, 3.9098, 4.1147, 3.9414, 3.9547], + device='cuda:1'), covar=tensor([0.0694, 0.0545, 0.0964, 0.4519, 0.0901, 0.0853, 0.1315, 0.0870], + device='cuda:1'), in_proj_covar=tensor([0.0544, 0.0460, 0.0451, 0.0563, 0.0442, 0.0466, 0.0441, 0.0411], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:19:51,178 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:53,286 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:59,427 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231010.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:20:06,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.812e+02 2.522e+02 3.157e+02 3.845e+02 7.559e+02, threshold=6.314e+02, percent-clipped=7.0 +2023-02-09 02:20:07,122 INFO [train.py:901] (1/4) Epoch 29, batch 4700, loss[loss=0.1976, simple_loss=0.2867, pruned_loss=0.05426, over 8092.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05729, over 1616068.07 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:20:43,864 INFO [train.py:901] (1/4) Epoch 29, batch 4750, loss[loss=0.1963, simple_loss=0.2889, pruned_loss=0.05189, over 8451.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2827, pruned_loss=0.05699, over 1621400.49 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:00,606 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 02:21:02,784 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 02:21:11,853 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5110, 2.3386, 1.7258, 2.1499, 2.0215, 1.4870, 1.9866, 1.9673], + device='cuda:1'), covar=tensor([0.1333, 0.0447, 0.1309, 0.0559, 0.0723, 0.1561, 0.0913, 0.0846], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0244, 0.0344, 0.0314, 0.0301, 0.0348, 0.0351, 0.0319], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:21:18,720 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.440e+02 2.924e+02 3.615e+02 6.392e+02, threshold=5.847e+02, percent-clipped=1.0 +2023-02-09 02:21:19,439 INFO [train.py:901] (1/4) Epoch 29, batch 4800, loss[loss=0.1453, simple_loss=0.2346, pruned_loss=0.02797, over 8080.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2831, pruned_loss=0.05754, over 1622920.31 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:26,337 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7634, 2.3876, 4.1175, 1.7763, 3.1490, 2.4482, 1.8536, 3.0388], + device='cuda:1'), covar=tensor([0.2024, 0.2767, 0.0877, 0.4727, 0.1789, 0.3296, 0.2559, 0.2408], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0644, 0.0567, 0.0678, 0.0668, 0.0616, 0.0570, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:21:43,702 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231154.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:21:43,876 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3556, 2.3181, 2.9721, 2.4108, 2.9532, 2.5652, 2.4031, 1.9038], + device='cuda:1'), covar=tensor([0.6014, 0.5306, 0.2312, 0.4349, 0.2864, 0.3191, 0.1902, 0.5901], + device='cuda:1'), in_proj_covar=tensor([0.0970, 0.1035, 0.0841, 0.1007, 0.1028, 0.0940, 0.0779, 0.0858], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:21:55,947 INFO [train.py:901] (1/4) Epoch 29, batch 4850, loss[loss=0.16, simple_loss=0.244, pruned_loss=0.03795, over 7255.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2819, pruned_loss=0.05669, over 1623913.69 frames. ], batch size: 16, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:55,956 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 02:22:17,915 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:31,032 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.490e+02 3.133e+02 4.186e+02 8.287e+02, threshold=6.266e+02, percent-clipped=6.0 +2023-02-09 02:22:31,778 INFO [train.py:901] (1/4) Epoch 29, batch 4900, loss[loss=0.1656, simple_loss=0.2491, pruned_loss=0.04104, over 7785.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2823, pruned_loss=0.05691, over 1618903.91 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:22:35,623 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231226.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:53,270 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.82 vs. limit=5.0 +2023-02-09 02:22:54,381 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:59,238 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231258.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:04,600 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,606 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,641 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:07,928 INFO [train.py:901] (1/4) Epoch 29, batch 4950, loss[loss=0.2069, simple_loss=0.2956, pruned_loss=0.05904, over 8608.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2835, pruned_loss=0.05765, over 1618958.61 frames. ], batch size: 34, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:17,261 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231283.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:22,755 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231291.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:43,160 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.476e+02 2.910e+02 3.581e+02 7.956e+02, threshold=5.820e+02, percent-clipped=2.0 +2023-02-09 02:23:43,853 INFO [train.py:901] (1/4) Epoch 29, batch 5000, loss[loss=0.2223, simple_loss=0.3109, pruned_loss=0.06691, over 8608.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.0572, over 1618724.65 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:45,410 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5184, 1.2902, 2.2214, 1.2852, 2.2205, 2.3914, 2.5369, 2.0270], + device='cuda:1'), covar=tensor([0.1032, 0.1401, 0.0467, 0.1959, 0.0809, 0.0413, 0.0834, 0.0684], + device='cuda:1'), in_proj_covar=tensor([0.0313, 0.0330, 0.0299, 0.0327, 0.0328, 0.0281, 0.0450, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:24:17,059 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:24:19,691 INFO [train.py:901] (1/4) Epoch 29, batch 5050, loss[loss=0.2384, simple_loss=0.3168, pruned_loss=0.07997, over 8545.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05671, over 1620821.60 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:24:37,781 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 02:24:55,715 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.315e+02 2.860e+02 3.491e+02 8.708e+02, threshold=5.721e+02, percent-clipped=8.0 +2023-02-09 02:24:56,368 INFO [train.py:901] (1/4) Epoch 29, batch 5100, loss[loss=0.1817, simple_loss=0.2513, pruned_loss=0.05603, over 7443.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05663, over 1619078.84 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:32,299 INFO [train.py:901] (1/4) Epoch 29, batch 5150, loss[loss=0.2292, simple_loss=0.3197, pruned_loss=0.0693, over 8142.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2805, pruned_loss=0.05648, over 1619096.59 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:52,005 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 02:26:07,335 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-09 02:26:08,155 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.313e+02 2.816e+02 3.592e+02 7.666e+02, threshold=5.632e+02, percent-clipped=6.0 +2023-02-09 02:26:08,940 INFO [train.py:901] (1/4) Epoch 29, batch 5200, loss[loss=0.1977, simple_loss=0.2909, pruned_loss=0.05229, over 8248.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2808, pruned_loss=0.05662, over 1618155.92 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:26:12,095 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:30,200 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231550.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:39,125 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 02:26:44,754 INFO [train.py:901] (1/4) Epoch 29, batch 5250, loss[loss=0.2286, simple_loss=0.3081, pruned_loss=0.07453, over 8508.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05661, over 1614795.43 frames. ], batch size: 28, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:15,796 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231613.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:20,503 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.485e+02 2.947e+02 3.559e+02 7.815e+02, threshold=5.893e+02, percent-clipped=2.0 +2023-02-09 02:27:21,229 INFO [train.py:901] (1/4) Epoch 29, batch 5300, loss[loss=0.1937, simple_loss=0.2774, pruned_loss=0.05505, over 8122.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2806, pruned_loss=0.05654, over 1613525.46 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:22,849 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:41,043 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:46,366 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1019, 1.5176, 4.2855, 1.7818, 3.8045, 3.5911, 3.8826, 3.7929], + device='cuda:1'), covar=tensor([0.0711, 0.4739, 0.0599, 0.4157, 0.1098, 0.1000, 0.0625, 0.0704], + device='cuda:1'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:27:47,752 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4444, 2.6305, 2.2468, 3.9822, 1.6662, 2.0165, 2.4005, 2.6487], + device='cuda:1'), covar=tensor([0.0701, 0.0846, 0.0785, 0.0243, 0.1060, 0.1237, 0.0958, 0.0808], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0213, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:27:53,030 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231665.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:54,362 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1421, 1.5280, 4.3469, 1.5999, 3.8276, 3.6086, 3.9179, 3.8180], + device='cuda:1'), covar=tensor([0.0697, 0.4473, 0.0558, 0.4373, 0.1105, 0.1005, 0.0664, 0.0724], + device='cuda:1'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:27:57,008 INFO [train.py:901] (1/4) Epoch 29, batch 5350, loss[loss=0.2253, simple_loss=0.303, pruned_loss=0.07375, over 8490.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.281, pruned_loss=0.0566, over 1609454.88 frames. ], batch size: 39, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:32,684 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.535e+02 3.138e+02 3.956e+02 6.651e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-09 02:28:33,431 INFO [train.py:901] (1/4) Epoch 29, batch 5400, loss[loss=0.1947, simple_loss=0.2867, pruned_loss=0.05136, over 8667.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05701, over 1610618.20 frames. ], batch size: 34, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:38,406 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:29:09,675 INFO [train.py:901] (1/4) Epoch 29, batch 5450, loss[loss=0.2381, simple_loss=0.3165, pruned_loss=0.07981, over 8501.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2796, pruned_loss=0.05621, over 1608447.62 frames. ], batch size: 26, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:24,004 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4943, 2.3222, 1.7413, 2.2055, 1.9091, 1.4661, 1.9577, 1.9472], + device='cuda:1'), covar=tensor([0.1374, 0.0506, 0.1298, 0.0612, 0.0917, 0.1744, 0.1005, 0.0977], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0244, 0.0343, 0.0315, 0.0302, 0.0348, 0.0351, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:29:29,513 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 02:29:43,731 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7087, 1.9612, 2.0345, 1.3549, 2.2141, 1.4849, 0.7296, 1.9187], + device='cuda:1'), covar=tensor([0.0761, 0.0448, 0.0348, 0.0771, 0.0508, 0.1034, 0.1120, 0.0445], + device='cuda:1'), in_proj_covar=tensor([0.0476, 0.0417, 0.0371, 0.0465, 0.0400, 0.0556, 0.0408, 0.0445], + device='cuda:1'), out_proj_covar=tensor([1.2593e-04, 1.0787e-04, 9.6390e-05, 1.2157e-04, 1.0474e-04, 1.5474e-04, + 1.0873e-04, 1.1633e-04], device='cuda:1') +2023-02-09 02:29:44,878 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.416e+02 2.826e+02 3.521e+02 6.915e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-09 02:29:45,630 INFO [train.py:901] (1/4) Epoch 29, batch 5500, loss[loss=0.1606, simple_loss=0.246, pruned_loss=0.03761, over 7928.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2797, pruned_loss=0.05657, over 1606132.12 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:48,659 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8179, 1.4463, 1.8473, 1.3759, 0.8932, 1.5761, 1.5897, 1.5470], + device='cuda:1'), covar=tensor([0.0590, 0.1299, 0.1650, 0.1505, 0.0624, 0.1454, 0.0753, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 02:30:21,383 INFO [train.py:901] (1/4) Epoch 29, batch 5550, loss[loss=0.2009, simple_loss=0.2822, pruned_loss=0.05978, over 7967.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2799, pruned_loss=0.05667, over 1611209.73 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:30:49,872 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-09 02:30:56,571 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.336e+02 2.917e+02 3.506e+02 1.057e+03, threshold=5.834e+02, percent-clipped=5.0 +2023-02-09 02:30:57,329 INFO [train.py:901] (1/4) Epoch 29, batch 5600, loss[loss=0.2183, simple_loss=0.2938, pruned_loss=0.07139, over 8101.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2804, pruned_loss=0.05698, over 1611387.78 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:31:24,823 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-09 02:31:34,496 INFO [train.py:901] (1/4) Epoch 29, batch 5650, loss[loss=0.1655, simple_loss=0.2509, pruned_loss=0.04008, over 7787.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2798, pruned_loss=0.05663, over 1608139.87 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:31:38,801 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 02:31:43,800 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:02,828 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:02,957 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:10,312 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.351e+02 2.719e+02 3.536e+02 6.635e+02, threshold=5.438e+02, percent-clipped=1.0 +2023-02-09 02:32:11,021 INFO [train.py:901] (1/4) Epoch 29, batch 5700, loss[loss=0.2154, simple_loss=0.301, pruned_loss=0.06491, over 8372.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.05671, over 1612058.09 frames. ], batch size: 48, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:45,188 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 02:32:47,252 INFO [train.py:901] (1/4) Epoch 29, batch 5750, loss[loss=0.2078, simple_loss=0.3015, pruned_loss=0.05705, over 8479.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05694, over 1616047.65 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:54,527 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9678, 2.4320, 3.6936, 1.8814, 2.0717, 3.6739, 0.6280, 2.1687], + device='cuda:1'), covar=tensor([0.1306, 0.1120, 0.0265, 0.1477, 0.2078, 0.0279, 0.2083, 0.1371], + device='cuda:1'), in_proj_covar=tensor([0.0202, 0.0205, 0.0137, 0.0224, 0.0278, 0.0149, 0.0172, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 02:33:07,927 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7615, 2.3720, 4.1958, 1.7043, 3.1033, 2.3737, 1.9881, 3.0565], + device='cuda:1'), covar=tensor([0.1976, 0.2792, 0.0798, 0.4746, 0.1822, 0.3288, 0.2412, 0.2334], + device='cuda:1'), in_proj_covar=tensor([0.0544, 0.0642, 0.0566, 0.0676, 0.0667, 0.0615, 0.0569, 0.0647], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:33:23,822 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 2.417e+02 3.013e+02 3.730e+02 1.097e+03, threshold=6.026e+02, percent-clipped=6.0 +2023-02-09 02:33:24,554 INFO [train.py:901] (1/4) Epoch 29, batch 5800, loss[loss=0.1635, simple_loss=0.2591, pruned_loss=0.03401, over 7805.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.282, pruned_loss=0.05637, over 1618985.95 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:33:26,837 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232124.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:33:38,341 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9566, 1.5531, 3.1182, 1.2422, 2.3358, 3.3790, 3.6511, 2.5642], + device='cuda:1'), covar=tensor([0.1393, 0.2112, 0.0458, 0.2937, 0.1189, 0.0397, 0.0580, 0.0963], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0328, 0.0295, 0.0325, 0.0326, 0.0279, 0.0446, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:33:46,044 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7270, 2.0172, 2.0971, 1.3719, 2.1897, 1.5377, 0.6943, 2.0128], + device='cuda:1'), covar=tensor([0.0694, 0.0412, 0.0337, 0.0706, 0.0452, 0.1061, 0.1044, 0.0383], + device='cuda:1'), in_proj_covar=tensor([0.0477, 0.0417, 0.0371, 0.0465, 0.0400, 0.0558, 0.0407, 0.0445], + device='cuda:1'), out_proj_covar=tensor([1.2632e-04, 1.0779e-04, 9.6523e-05, 1.2158e-04, 1.0470e-04, 1.5528e-04, + 1.0856e-04, 1.1647e-04], device='cuda:1') +2023-02-09 02:33:59,562 INFO [train.py:901] (1/4) Epoch 29, batch 5850, loss[loss=0.1607, simple_loss=0.2504, pruned_loss=0.03545, over 7917.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2806, pruned_loss=0.05555, over 1618849.16 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:34,659 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.421e+02 2.869e+02 3.503e+02 7.290e+02, threshold=5.737e+02, percent-clipped=3.0 +2023-02-09 02:34:35,359 INFO [train.py:901] (1/4) Epoch 29, batch 5900, loss[loss=0.1946, simple_loss=0.2802, pruned_loss=0.05456, over 8579.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2805, pruned_loss=0.05587, over 1617900.65 frames. ], batch size: 31, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:41,344 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.96 vs. limit=5.0 +2023-02-09 02:34:48,937 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.27 vs. limit=2.0 +2023-02-09 02:35:06,627 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:35:10,677 INFO [train.py:901] (1/4) Epoch 29, batch 5950, loss[loss=0.2386, simple_loss=0.3183, pruned_loss=0.07947, over 8696.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05629, over 1614119.03 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:13,134 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-09 02:35:18,589 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2459, 2.0533, 2.6541, 2.2403, 2.6238, 2.3249, 2.1597, 1.6032], + device='cuda:1'), covar=tensor([0.5741, 0.4924, 0.2065, 0.3643, 0.2452, 0.2974, 0.1845, 0.5137], + device='cuda:1'), in_proj_covar=tensor([0.0978, 0.1039, 0.0845, 0.1010, 0.1033, 0.0942, 0.0781, 0.0860], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:35:46,600 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.503e+02 2.837e+02 3.700e+02 9.228e+02, threshold=5.675e+02, percent-clipped=4.0 +2023-02-09 02:35:47,347 INFO [train.py:901] (1/4) Epoch 29, batch 6000, loss[loss=0.1991, simple_loss=0.2878, pruned_loss=0.05516, over 7976.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05659, over 1612740.05 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:47,348 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 02:35:55,517 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8517, 3.8202, 3.5342, 2.0041, 3.3654, 3.5613, 3.3866, 3.4506], + device='cuda:1'), covar=tensor([0.0781, 0.0456, 0.0801, 0.4722, 0.0927, 0.0802, 0.1201, 0.0827], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0459, 0.0448, 0.0560, 0.0441, 0.0467, 0.0441, 0.0410], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:36:01,202 INFO [train.py:935] (1/4) Epoch 29, validation: loss=0.1708, simple_loss=0.2701, pruned_loss=0.03577, over 944034.00 frames. +2023-02-09 02:36:01,203 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 02:36:15,989 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2381, 3.1057, 2.9208, 1.5526, 2.8526, 2.9256, 2.8683, 2.7649], + device='cuda:1'), covar=tensor([0.1266, 0.0921, 0.1470, 0.4689, 0.1203, 0.1288, 0.1765, 0.1188], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0460, 0.0450, 0.0562, 0.0442, 0.0469, 0.0442, 0.0412], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:36:37,559 INFO [train.py:901] (1/4) Epoch 29, batch 6050, loss[loss=0.1804, simple_loss=0.2644, pruned_loss=0.04814, over 8098.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2795, pruned_loss=0.05615, over 1610569.11 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:36:44,009 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:36:57,843 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232399.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:02,394 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232405.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:12,730 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.608e+02 3.073e+02 4.031e+02 7.869e+02, threshold=6.145e+02, percent-clipped=3.0 +2023-02-09 02:37:13,450 INFO [train.py:901] (1/4) Epoch 29, batch 6100, loss[loss=0.1813, simple_loss=0.2714, pruned_loss=0.04562, over 8501.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2793, pruned_loss=0.05602, over 1612706.04 frames. ], batch size: 29, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:37:27,131 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 02:37:50,002 INFO [train.py:901] (1/4) Epoch 29, batch 6150, loss[loss=0.1854, simple_loss=0.2782, pruned_loss=0.04629, over 8027.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05687, over 1613595.95 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:06,444 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232494.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:25,292 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.548e+02 2.901e+02 3.636e+02 6.365e+02, threshold=5.801e+02, percent-clipped=1.0 +2023-02-09 02:38:25,864 INFO [train.py:901] (1/4) Epoch 29, batch 6200, loss[loss=0.2513, simple_loss=0.2941, pruned_loss=0.1042, over 7545.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2812, pruned_loss=0.05733, over 1609900.51 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:36,670 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232536.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:48,576 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3802, 2.3067, 1.7615, 2.0910, 1.8869, 1.5306, 1.8283, 1.8634], + device='cuda:1'), covar=tensor([0.1675, 0.0547, 0.1389, 0.0728, 0.0887, 0.1726, 0.1115, 0.1036], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0245, 0.0345, 0.0315, 0.0303, 0.0351, 0.0352, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:38:53,536 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0387, 1.8769, 2.2903, 1.9838, 2.2571, 2.1262, 1.9782, 1.2492], + device='cuda:1'), covar=tensor([0.5694, 0.4925, 0.2150, 0.3733, 0.2632, 0.3429, 0.2065, 0.5424], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1036, 0.0843, 0.1005, 0.1030, 0.0939, 0.0779, 0.0857], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:39:02,789 INFO [train.py:901] (1/4) Epoch 29, batch 6250, loss[loss=0.1875, simple_loss=0.276, pruned_loss=0.04949, over 8505.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2808, pruned_loss=0.05701, over 1611075.19 frames. ], batch size: 29, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:07,143 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7176, 1.3933, 1.6855, 1.3020, 0.8677, 1.4555, 1.6166, 1.5169], + device='cuda:1'), covar=tensor([0.0617, 0.1309, 0.1654, 0.1542, 0.0621, 0.1547, 0.0754, 0.0648], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0155, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 02:39:16,229 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1499, 1.7330, 3.4065, 1.6320, 2.5564, 3.7839, 3.8250, 3.2950], + device='cuda:1'), covar=tensor([0.1221, 0.1843, 0.0377, 0.2167, 0.1171, 0.0236, 0.0636, 0.0520], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0326, 0.0325, 0.0278, 0.0445, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:39:23,932 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8692, 1.3578, 3.3814, 1.6740, 2.5381, 3.6615, 3.7769, 3.1746], + device='cuda:1'), covar=tensor([0.1200, 0.1965, 0.0286, 0.1959, 0.0854, 0.0221, 0.0585, 0.0524], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0326, 0.0295, 0.0326, 0.0325, 0.0278, 0.0445, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:39:30,146 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232609.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:39:37,831 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.332e+02 2.762e+02 3.491e+02 8.673e+02, threshold=5.523e+02, percent-clipped=4.0 +2023-02-09 02:39:39,193 INFO [train.py:901] (1/4) Epoch 29, batch 6300, loss[loss=0.1938, simple_loss=0.2737, pruned_loss=0.05702, over 7977.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2799, pruned_loss=0.05623, over 1610700.94 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:56,294 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5243, 2.3571, 3.0775, 2.4237, 2.9755, 2.6019, 2.5561, 1.9150], + device='cuda:1'), covar=tensor([0.5884, 0.5797, 0.2423, 0.4811, 0.3070, 0.3704, 0.1855, 0.6350], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1036, 0.0844, 0.1006, 0.1030, 0.0940, 0.0780, 0.0858], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:40:12,352 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6792, 2.3063, 3.8685, 1.5454, 2.7314, 2.2825, 1.7754, 2.9525], + device='cuda:1'), covar=tensor([0.2089, 0.2859, 0.0862, 0.5037, 0.2223, 0.3414, 0.2755, 0.2489], + device='cuda:1'), in_proj_covar=tensor([0.0544, 0.0641, 0.0565, 0.0674, 0.0667, 0.0614, 0.0569, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:40:14,953 INFO [train.py:901] (1/4) Epoch 29, batch 6350, loss[loss=0.2349, simple_loss=0.315, pruned_loss=0.07737, over 8723.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2795, pruned_loss=0.05571, over 1612201.21 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:50,678 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.465e+02 3.018e+02 3.778e+02 1.284e+03, threshold=6.036e+02, percent-clipped=3.0 +2023-02-09 02:40:51,324 INFO [train.py:901] (1/4) Epoch 29, batch 6400, loss[loss=0.203, simple_loss=0.2967, pruned_loss=0.05469, over 8490.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.05638, over 1613833.59 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:53,598 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232724.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:08,031 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232743.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:28,293 INFO [train.py:901] (1/4) Epoch 29, batch 6450, loss[loss=0.2147, simple_loss=0.3077, pruned_loss=0.06083, over 8285.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2806, pruned_loss=0.05617, over 1615831.34 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:05,016 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.358e+02 3.084e+02 4.266e+02 9.590e+02, threshold=6.168e+02, percent-clipped=5.0 +2023-02-09 02:42:05,753 INFO [train.py:901] (1/4) Epoch 29, batch 6500, loss[loss=0.2111, simple_loss=0.2913, pruned_loss=0.06551, over 8777.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2817, pruned_loss=0.05626, over 1625208.79 frames. ], batch size: 32, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:17,436 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:24,000 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-09 02:42:24,514 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6111, 2.5591, 1.8500, 2.2811, 2.1940, 1.6029, 2.1214, 2.1680], + device='cuda:1'), covar=tensor([0.1638, 0.0470, 0.1334, 0.0682, 0.0786, 0.1681, 0.1056, 0.1073], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0246, 0.0347, 0.0316, 0.0303, 0.0351, 0.0352, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:42:31,253 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232858.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:31,450 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-09 02:42:40,786 INFO [train.py:901] (1/4) Epoch 29, batch 6550, loss[loss=0.2392, simple_loss=0.3183, pruned_loss=0.08002, over 8624.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05648, over 1621748.51 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:47,143 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:47,794 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 02:43:06,964 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3169, 2.1320, 1.6854, 1.9234, 1.8060, 1.4451, 1.7889, 1.6710], + device='cuda:1'), covar=tensor([0.1414, 0.0459, 0.1292, 0.0624, 0.0767, 0.1688, 0.0969, 0.0960], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0247, 0.0348, 0.0317, 0.0304, 0.0352, 0.0353, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:43:08,193 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:43:16,620 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.398e+02 2.846e+02 3.696e+02 7.042e+02, threshold=5.692e+02, percent-clipped=2.0 +2023-02-09 02:43:17,343 INFO [train.py:901] (1/4) Epoch 29, batch 6600, loss[loss=0.1658, simple_loss=0.2471, pruned_loss=0.04226, over 7528.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2816, pruned_loss=0.05649, over 1620464.15 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:40,629 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:43:52,852 INFO [train.py:901] (1/4) Epoch 29, batch 6650, loss[loss=0.224, simple_loss=0.3094, pruned_loss=0.06934, over 8130.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.281, pruned_loss=0.05626, over 1618967.03 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:59,974 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232980.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:10,260 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232995.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:17,501 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:19,561 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233008.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:28,483 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.388e+02 2.820e+02 3.481e+02 6.998e+02, threshold=5.640e+02, percent-clipped=2.0 +2023-02-09 02:44:28,503 INFO [train.py:901] (1/4) Epoch 29, batch 6700, loss[loss=0.2413, simple_loss=0.3242, pruned_loss=0.07917, over 8140.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2805, pruned_loss=0.05644, over 1617560.09 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:44:40,615 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:55,114 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7863, 2.2937, 4.1267, 1.5822, 2.9034, 2.4044, 1.8503, 3.0555], + device='cuda:1'), covar=tensor([0.2044, 0.2798, 0.0867, 0.4943, 0.2043, 0.3241, 0.2610, 0.2414], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0640, 0.0565, 0.0675, 0.0665, 0.0614, 0.0567, 0.0646], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:45:06,272 INFO [train.py:901] (1/4) Epoch 29, batch 6750, loss[loss=0.1809, simple_loss=0.255, pruned_loss=0.05339, over 7434.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2802, pruned_loss=0.05662, over 1613743.00 frames. ], batch size: 17, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:30,751 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 02:45:38,411 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233114.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:45:43,344 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.536e+02 3.042e+02 3.988e+02 8.675e+02, threshold=6.084e+02, percent-clipped=8.0 +2023-02-09 02:45:43,365 INFO [train.py:901] (1/4) Epoch 29, batch 6800, loss[loss=0.2006, simple_loss=0.2821, pruned_loss=0.05953, over 8036.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05707, over 1618543.29 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:56,010 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:18,744 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9831, 2.1065, 1.8776, 2.8041, 1.2466, 1.6967, 2.0195, 2.1690], + device='cuda:1'), covar=tensor([0.0786, 0.0889, 0.0872, 0.0343, 0.1122, 0.1349, 0.0843, 0.0847], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0215, 0.0203, 0.0248, 0.0250, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:46:19,947 INFO [train.py:901] (1/4) Epoch 29, batch 6850, loss[loss=0.2109, simple_loss=0.2919, pruned_loss=0.06494, over 8298.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.0571, over 1619924.08 frames. ], batch size: 49, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:46:22,009 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 02:46:34,900 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2071, 0.9843, 1.7164, 1.0036, 1.6162, 1.8617, 1.9569, 1.6180], + device='cuda:1'), covar=tensor([0.0814, 0.1213, 0.0483, 0.1627, 0.1230, 0.0322, 0.0714, 0.0482], + device='cuda:1'), in_proj_covar=tensor([0.0311, 0.0328, 0.0297, 0.0327, 0.0328, 0.0280, 0.0449, 0.0311], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 02:46:45,171 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7516, 2.3997, 1.8196, 2.1084, 1.9939, 1.7960, 1.9841, 2.1385], + device='cuda:1'), covar=tensor([0.1114, 0.0367, 0.1020, 0.0587, 0.0695, 0.1204, 0.0815, 0.0797], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0246, 0.0346, 0.0315, 0.0302, 0.0350, 0.0351, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 02:46:46,498 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233209.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:48,575 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3139, 2.1186, 2.6516, 2.2511, 2.8275, 2.4278, 2.2250, 1.6703], + device='cuda:1'), covar=tensor([0.6076, 0.5326, 0.2215, 0.3976, 0.2487, 0.3391, 0.2011, 0.5669], + device='cuda:1'), in_proj_covar=tensor([0.0975, 0.1037, 0.0844, 0.1010, 0.1030, 0.0943, 0.0780, 0.0859], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:46:55,110 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.470e+02 3.068e+02 3.817e+02 7.038e+02, threshold=6.136e+02, percent-clipped=5.0 +2023-02-09 02:46:55,130 INFO [train.py:901] (1/4) Epoch 29, batch 6900, loss[loss=0.2028, simple_loss=0.2823, pruned_loss=0.06166, over 8472.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2829, pruned_loss=0.05756, over 1616916.09 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:00,399 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3088, 2.1718, 2.7090, 2.2736, 2.7794, 2.4767, 2.2489, 1.6585], + device='cuda:1'), covar=tensor([0.5920, 0.5346, 0.2340, 0.4471, 0.2694, 0.3379, 0.2150, 0.6005], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1037, 0.0844, 0.1009, 0.1030, 0.0943, 0.0779, 0.0858], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:47:04,467 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233234.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:16,275 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:31,602 INFO [train.py:901] (1/4) Epoch 29, batch 6950, loss[loss=0.1911, simple_loss=0.2831, pruned_loss=0.04961, over 8568.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2815, pruned_loss=0.0568, over 1615657.46 frames. ], batch size: 31, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:33,664 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 02:47:35,285 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:35,913 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233277.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:07,351 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.388e+02 2.860e+02 3.725e+02 6.106e+02, threshold=5.720e+02, percent-clipped=0.0 +2023-02-09 02:48:07,372 INFO [train.py:901] (1/4) Epoch 29, batch 7000, loss[loss=0.1765, simple_loss=0.2646, pruned_loss=0.04419, over 8133.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2809, pruned_loss=0.05611, over 1614988.44 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:30,364 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233352.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:44,307 INFO [train.py:901] (1/4) Epoch 29, batch 7050, loss[loss=0.1946, simple_loss=0.2861, pruned_loss=0.0516, over 8601.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05642, over 1614360.70 frames. ], batch size: 49, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:51,037 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:21,596 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7459, 2.1680, 3.2097, 1.6462, 2.3571, 2.1361, 1.8785, 2.4197], + device='cuda:1'), covar=tensor([0.1959, 0.2647, 0.0916, 0.4612, 0.2101, 0.3340, 0.2389, 0.2365], + device='cuda:1'), in_proj_covar=tensor([0.0543, 0.0641, 0.0566, 0.0676, 0.0664, 0.0615, 0.0568, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:49:22,015 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.400e+02 3.088e+02 3.796e+02 6.683e+02, threshold=6.176e+02, percent-clipped=2.0 +2023-02-09 02:49:22,035 INFO [train.py:901] (1/4) Epoch 29, batch 7100, loss[loss=0.1924, simple_loss=0.2654, pruned_loss=0.05968, over 7799.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2816, pruned_loss=0.05654, over 1615128.16 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:49:54,755 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233467.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:57,410 INFO [train.py:901] (1/4) Epoch 29, batch 7150, loss[loss=0.1838, simple_loss=0.264, pruned_loss=0.05182, over 8343.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05656, over 1615190.33 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:50:09,462 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6847, 1.8138, 1.5934, 2.0907, 1.1661, 1.4391, 1.6985, 1.8358], + device='cuda:1'), covar=tensor([0.0676, 0.0698, 0.0847, 0.0529, 0.0937, 0.1170, 0.0647, 0.0673], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0214, 0.0202, 0.0246, 0.0249, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:50:14,761 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:50:23,099 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.2107, 4.2012, 3.8223, 1.9349, 3.7108, 3.8307, 3.7338, 3.6313], + device='cuda:1'), covar=tensor([0.0821, 0.0572, 0.1028, 0.4607, 0.1023, 0.1037, 0.1262, 0.0868], + device='cuda:1'), in_proj_covar=tensor([0.0549, 0.0462, 0.0454, 0.0563, 0.0446, 0.0470, 0.0445, 0.0414], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:50:23,177 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7465, 1.4069, 1.7419, 1.3001, 1.0299, 1.4625, 1.6864, 1.4023], + device='cuda:1'), covar=tensor([0.0612, 0.1300, 0.1652, 0.1542, 0.0595, 0.1493, 0.0703, 0.0702], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 02:50:34,185 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.385e+02 2.900e+02 3.377e+02 5.605e+02, threshold=5.800e+02, percent-clipped=0.0 +2023-02-09 02:50:34,205 INFO [train.py:901] (1/4) Epoch 29, batch 7200, loss[loss=0.2193, simple_loss=0.3109, pruned_loss=0.06385, over 8631.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2801, pruned_loss=0.05586, over 1613997.09 frames. ], batch size: 31, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:10,563 INFO [train.py:901] (1/4) Epoch 29, batch 7250, loss[loss=0.1825, simple_loss=0.2691, pruned_loss=0.04799, over 7665.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2799, pruned_loss=0.05546, over 1616488.77 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:11,748 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.64 vs. limit=2.0 +2023-02-09 02:51:36,848 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6820, 1.5487, 2.3052, 1.3880, 1.2535, 2.2468, 0.4204, 1.3742], + device='cuda:1'), covar=tensor([0.1658, 0.1254, 0.0341, 0.1190, 0.2393, 0.0417, 0.1898, 0.1300], + device='cuda:1'), in_proj_covar=tensor([0.0204, 0.0207, 0.0138, 0.0227, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 02:51:46,281 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.384e+02 2.959e+02 3.398e+02 1.041e+03, threshold=5.918e+02, percent-clipped=4.0 +2023-02-09 02:51:46,302 INFO [train.py:901] (1/4) Epoch 29, batch 7300, loss[loss=0.1895, simple_loss=0.2759, pruned_loss=0.05153, over 8144.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2802, pruned_loss=0.05583, over 1620407.65 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:46,382 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233621.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:13,017 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233657.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:22,375 INFO [train.py:901] (1/4) Epoch 29, batch 7350, loss[loss=0.1944, simple_loss=0.2807, pruned_loss=0.05408, over 8485.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.279, pruned_loss=0.05527, over 1619015.66 frames. ], batch size: 29, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:27,563 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4095, 1.6244, 2.1472, 1.3420, 1.5022, 1.6668, 1.4785, 1.5471], + device='cuda:1'), covar=tensor([0.2173, 0.2915, 0.1131, 0.5200, 0.2335, 0.3830, 0.2701, 0.2370], + device='cuda:1'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0678, 0.0667, 0.0619, 0.0570, 0.0651], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:52:28,052 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 02:52:42,944 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1033, 1.5688, 4.2711, 1.8378, 3.7968, 3.5713, 3.8903, 3.7707], + device='cuda:1'), covar=tensor([0.0719, 0.4839, 0.0645, 0.4265, 0.1144, 0.1071, 0.0681, 0.0771], + device='cuda:1'), in_proj_covar=tensor([0.0695, 0.0671, 0.0756, 0.0663, 0.0755, 0.0642, 0.0650, 0.0733], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:52:48,258 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 02:52:58,115 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.663e+02 3.074e+02 4.100e+02 9.512e+02, threshold=6.147e+02, percent-clipped=7.0 +2023-02-09 02:52:58,135 INFO [train.py:901] (1/4) Epoch 29, batch 7400, loss[loss=0.1636, simple_loss=0.2484, pruned_loss=0.03945, over 7802.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2798, pruned_loss=0.05557, over 1617561.14 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:59,684 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:03,229 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:08,951 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233736.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:18,927 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233748.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:21,097 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:32,421 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 02:53:36,088 INFO [train.py:901] (1/4) Epoch 29, batch 7450, loss[loss=0.1647, simple_loss=0.2489, pruned_loss=0.04026, over 7697.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2792, pruned_loss=0.05537, over 1618274.61 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:53:39,814 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:42,689 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8681, 1.6643, 2.0635, 1.6713, 1.1512, 1.7382, 2.3247, 1.9902], + device='cuda:1'), covar=tensor([0.0471, 0.1260, 0.1664, 0.1481, 0.0600, 0.1454, 0.0615, 0.0640], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0115, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 02:54:12,671 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.228e+02 2.637e+02 3.402e+02 7.399e+02, threshold=5.273e+02, percent-clipped=2.0 +2023-02-09 02:54:12,691 INFO [train.py:901] (1/4) Epoch 29, batch 7500, loss[loss=0.2001, simple_loss=0.2766, pruned_loss=0.06184, over 7646.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2802, pruned_loss=0.05578, over 1612878.64 frames. ], batch size: 19, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:54:48,746 INFO [train.py:901] (1/4) Epoch 29, batch 7550, loss[loss=0.2394, simple_loss=0.3248, pruned_loss=0.07704, over 8503.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05631, over 1612350.17 frames. ], batch size: 28, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:00,147 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0978, 2.2597, 1.8181, 2.9466, 1.2767, 1.6688, 1.9746, 2.2693], + device='cuda:1'), covar=tensor([0.0681, 0.0735, 0.0907, 0.0320, 0.1214, 0.1259, 0.0908, 0.0749], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0213, 0.0202, 0.0246, 0.0249, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 02:55:24,355 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.424e+02 2.935e+02 3.534e+02 7.288e+02, threshold=5.870e+02, percent-clipped=3.0 +2023-02-09 02:55:24,375 INFO [train.py:901] (1/4) Epoch 29, batch 7600, loss[loss=0.2141, simple_loss=0.3057, pruned_loss=0.06122, over 8629.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2806, pruned_loss=0.0561, over 1613517.63 frames. ], batch size: 34, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:24,644 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7858, 1.6374, 2.4937, 1.9340, 2.1004, 1.7796, 1.6213, 1.0602], + device='cuda:1'), covar=tensor([0.7715, 0.6241, 0.2340, 0.4205, 0.3277, 0.4801, 0.3088, 0.5738], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1037, 0.0845, 0.1009, 0.1033, 0.0945, 0.0781, 0.0859], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 02:56:01,024 INFO [train.py:901] (1/4) Epoch 29, batch 7650, loss[loss=0.179, simple_loss=0.2671, pruned_loss=0.04546, over 8136.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2793, pruned_loss=0.05542, over 1613505.16 frames. ], batch size: 22, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:12,957 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.7651, 1.5176, 3.9392, 1.6118, 3.4989, 3.2767, 3.5713, 3.5013], + device='cuda:1'), covar=tensor([0.0781, 0.4638, 0.0768, 0.4381, 0.1182, 0.1138, 0.0736, 0.0773], + device='cuda:1'), in_proj_covar=tensor([0.0693, 0.0670, 0.0754, 0.0662, 0.0751, 0.0641, 0.0649, 0.0728], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 02:56:16,597 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233992.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:23,674 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234001.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:35,522 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234017.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:38,038 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.208e+02 2.731e+02 3.331e+02 6.993e+02, threshold=5.462e+02, percent-clipped=2.0 +2023-02-09 02:56:38,058 INFO [train.py:901] (1/4) Epoch 29, batch 7700, loss[loss=0.1491, simple_loss=0.239, pruned_loss=0.02962, over 8084.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2785, pruned_loss=0.05482, over 1616098.02 frames. ], batch size: 21, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:49,501 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 02:56:53,146 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:12,872 INFO [train.py:901] (1/4) Epoch 29, batch 7750, loss[loss=0.1756, simple_loss=0.2658, pruned_loss=0.04276, over 8199.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2791, pruned_loss=0.05523, over 1618554.54 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:57:13,650 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234072.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:45,242 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:48,459 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.355e+02 2.809e+02 3.505e+02 7.382e+02, threshold=5.617e+02, percent-clipped=2.0 +2023-02-09 02:57:48,479 INFO [train.py:901] (1/4) Epoch 29, batch 7800, loss[loss=0.1683, simple_loss=0.2598, pruned_loss=0.03845, over 7813.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2799, pruned_loss=0.05568, over 1616590.15 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:24,431 INFO [train.py:901] (1/4) Epoch 29, batch 7850, loss[loss=0.1745, simple_loss=0.2503, pruned_loss=0.04937, over 7420.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2798, pruned_loss=0.0554, over 1614045.91 frames. ], batch size: 17, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:36,023 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234187.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:58,930 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:59,498 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.264e+02 2.745e+02 3.500e+02 8.048e+02, threshold=5.490e+02, percent-clipped=4.0 +2023-02-09 02:58:59,518 INFO [train.py:901] (1/4) Epoch 29, batch 7900, loss[loss=0.1822, simple_loss=0.273, pruned_loss=0.04569, over 8200.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.28, pruned_loss=0.05555, over 1617310.01 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:19,147 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:59:34,076 INFO [train.py:901] (1/4) Epoch 29, batch 7950, loss[loss=0.201, simple_loss=0.2804, pruned_loss=0.06074, over 7538.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.0561, over 1614860.23 frames. ], batch size: 18, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:40,221 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.06 vs. limit=5.0 +2023-02-09 03:00:10,405 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.366e+02 2.676e+02 3.633e+02 8.832e+02, threshold=5.352e+02, percent-clipped=6.0 +2023-02-09 03:00:10,425 INFO [train.py:901] (1/4) Epoch 29, batch 8000, loss[loss=0.2021, simple_loss=0.3027, pruned_loss=0.05077, over 8690.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2799, pruned_loss=0.05542, over 1614472.08 frames. ], batch size: 34, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:44,624 INFO [train.py:901] (1/4) Epoch 29, batch 8050, loss[loss=0.1949, simple_loss=0.256, pruned_loss=0.06687, over 6783.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2791, pruned_loss=0.05622, over 1585591.66 frames. ], batch size: 15, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:45,524 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:00:55,735 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234387.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:02,574 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234397.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:19,720 WARNING [train.py:1067] (1/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 03:01:23,762 INFO [train.py:901] (1/4) Epoch 30, batch 0, loss[loss=0.2403, simple_loss=0.3177, pruned_loss=0.08151, over 8632.00 frames. ], tot_loss[loss=0.2403, simple_loss=0.3177, pruned_loss=0.08151, over 8632.00 frames. ], batch size: 39, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:01:23,762 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 03:01:35,938 INFO [train.py:935] (1/4) Epoch 30, validation: loss=0.1704, simple_loss=0.27, pruned_loss=0.03537, over 944034.00 frames. +2023-02-09 03:01:35,940 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 03:01:47,831 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.332e+02 2.743e+02 3.464e+02 7.498e+02, threshold=5.486e+02, percent-clipped=3.0 +2023-02-09 03:01:52,025 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 03:02:04,488 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234443.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:12,494 INFO [train.py:901] (1/4) Epoch 30, batch 50, loss[loss=0.1816, simple_loss=0.2727, pruned_loss=0.04528, over 8615.00 frames. ], tot_loss[loss=0.2029, simple_loss=0.2853, pruned_loss=0.06021, over 363475.76 frames. ], batch size: 34, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:23,084 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234468.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:28,111 WARNING [train.py:1067] (1/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 03:02:49,595 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234502.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:50,896 INFO [train.py:901] (1/4) Epoch 30, batch 100, loss[loss=0.2183, simple_loss=0.3055, pruned_loss=0.06558, over 8436.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2799, pruned_loss=0.05586, over 638730.58 frames. ], batch size: 27, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:54,610 WARNING [train.py:1067] (1/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 03:03:03,348 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.294e+02 2.794e+02 3.449e+02 7.855e+02, threshold=5.588e+02, percent-clipped=7.0 +2023-02-09 03:03:28,105 INFO [train.py:901] (1/4) Epoch 30, batch 150, loss[loss=0.2084, simple_loss=0.2897, pruned_loss=0.06357, over 8466.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2799, pruned_loss=0.05559, over 855098.37 frames. ], batch size: 27, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:03:35,360 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:03:56,911 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234593.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:04:04,594 INFO [train.py:901] (1/4) Epoch 30, batch 200, loss[loss=0.2026, simple_loss=0.2659, pruned_loss=0.06962, over 7679.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05617, over 1018631.40 frames. ], batch size: 18, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:13,772 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3840, 2.2420, 2.7550, 2.4035, 2.7920, 2.3990, 2.3253, 1.8752], + device='cuda:1'), covar=tensor([0.5113, 0.4780, 0.2166, 0.3974, 0.2633, 0.3296, 0.1885, 0.5357], + device='cuda:1'), in_proj_covar=tensor([0.0973, 0.1039, 0.0847, 0.1012, 0.1034, 0.0945, 0.0781, 0.0861], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:04:16,945 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.329e+02 2.797e+02 3.759e+02 1.341e+03, threshold=5.593e+02, percent-clipped=8.0 +2023-02-09 03:04:40,208 INFO [train.py:901] (1/4) Epoch 30, batch 250, loss[loss=0.2019, simple_loss=0.2967, pruned_loss=0.05356, over 8467.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2821, pruned_loss=0.05664, over 1152826.29 frames. ], batch size: 49, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:48,445 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 03:04:50,779 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8793, 1.7619, 2.5063, 1.5498, 1.3686, 2.5033, 0.5607, 1.4806], + device='cuda:1'), covar=tensor([0.1462, 0.1104, 0.0329, 0.1126, 0.2349, 0.0366, 0.1752, 0.1345], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0227, 0.0280, 0.0149, 0.0174, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 03:04:57,626 WARNING [train.py:1067] (1/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 03:04:58,540 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234679.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:04,642 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234687.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:16,612 INFO [train.py:901] (1/4) Epoch 30, batch 300, loss[loss=0.2095, simple_loss=0.2996, pruned_loss=0.05967, over 8469.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2806, pruned_loss=0.05543, over 1255419.62 frames. ], batch size: 25, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:19,746 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234708.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:29,385 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.381e+02 2.888e+02 3.640e+02 7.253e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-09 03:05:45,724 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:46,513 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6887, 2.0188, 2.0530, 1.4652, 2.1167, 1.5871, 0.5952, 1.9508], + device='cuda:1'), covar=tensor([0.0635, 0.0345, 0.0345, 0.0583, 0.0430, 0.1021, 0.1035, 0.0307], + device='cuda:1'), in_proj_covar=tensor([0.0477, 0.0416, 0.0372, 0.0465, 0.0400, 0.0555, 0.0405, 0.0443], + device='cuda:1'), out_proj_covar=tensor([1.2618e-04, 1.0768e-04, 9.6869e-05, 1.2151e-04, 1.0482e-04, 1.5420e-04, + 1.0793e-04, 1.1575e-04], device='cuda:1') +2023-02-09 03:05:52,652 INFO [train.py:901] (1/4) Epoch 30, batch 350, loss[loss=0.1691, simple_loss=0.2424, pruned_loss=0.04796, over 7700.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2826, pruned_loss=0.05685, over 1334439.22 frames. ], batch size: 18, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:55,585 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:14,218 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234783.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:27,360 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234800.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:30,081 INFO [train.py:901] (1/4) Epoch 30, batch 400, loss[loss=0.2154, simple_loss=0.3002, pruned_loss=0.06531, over 8345.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2842, pruned_loss=0.05797, over 1397492.24 frames. ], batch size: 25, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:06:42,234 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.513e+02 2.926e+02 3.697e+02 1.204e+03, threshold=5.852e+02, percent-clipped=7.0 +2023-02-09 03:07:06,512 INFO [train.py:901] (1/4) Epoch 30, batch 450, loss[loss=0.2136, simple_loss=0.3005, pruned_loss=0.06329, over 8034.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.283, pruned_loss=0.05737, over 1446058.74 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:07:38,965 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:07:42,320 INFO [train.py:901] (1/4) Epoch 30, batch 500, loss[loss=0.1888, simple_loss=0.282, pruned_loss=0.04777, over 8291.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2812, pruned_loss=0.05639, over 1484775.32 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:07:54,764 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.393e+02 2.948e+02 3.833e+02 6.284e+02, threshold=5.896e+02, percent-clipped=1.0 +2023-02-09 03:08:04,859 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234935.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:18,293 INFO [train.py:901] (1/4) Epoch 30, batch 550, loss[loss=0.1707, simple_loss=0.2566, pruned_loss=0.04236, over 7652.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2805, pruned_loss=0.05627, over 1510495.19 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:08:22,852 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234960.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:26,367 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234964.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:34,787 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234976.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:36,243 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234978.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:44,116 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234989.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:54,539 INFO [train.py:901] (1/4) Epoch 30, batch 600, loss[loss=0.1669, simple_loss=0.2614, pruned_loss=0.03619, over 8099.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2802, pruned_loss=0.05578, over 1533013.76 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:06,004 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.485e+02 2.961e+02 3.544e+02 6.861e+02, threshold=5.922e+02, percent-clipped=1.0 +2023-02-09 03:09:10,828 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 03:09:13,652 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235031.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:09:29,246 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-09 03:09:29,966 INFO [train.py:901] (1/4) Epoch 30, batch 650, loss[loss=0.1964, simple_loss=0.2834, pruned_loss=0.05472, over 8142.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2806, pruned_loss=0.05601, over 1552392.52 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:54,554 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:05,672 INFO [train.py:901] (1/4) Epoch 30, batch 700, loss[loss=0.2366, simple_loss=0.3224, pruned_loss=0.07538, over 8809.00 frames. ], tot_loss[loss=0.197, simple_loss=0.281, pruned_loss=0.05648, over 1563447.09 frames. ], batch size: 49, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:17,689 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.477e+02 3.055e+02 3.959e+02 7.285e+02, threshold=6.109e+02, percent-clipped=6.0 +2023-02-09 03:10:33,427 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:34,917 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235146.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:41,061 INFO [train.py:901] (1/4) Epoch 30, batch 750, loss[loss=0.1793, simple_loss=0.2652, pruned_loss=0.04668, over 8090.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05636, over 1578541.78 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:59,027 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 03:10:59,126 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.2884, 3.1866, 3.0118, 1.5755, 2.9365, 3.0243, 2.8355, 2.8990], + device='cuda:1'), covar=tensor([0.1085, 0.0767, 0.1232, 0.4222, 0.1108, 0.1088, 0.1595, 0.0999], + device='cuda:1'), in_proj_covar=tensor([0.0551, 0.0461, 0.0451, 0.0562, 0.0445, 0.0470, 0.0446, 0.0414], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:11:08,051 WARNING [train.py:1067] (1/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 03:11:17,084 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:17,649 INFO [train.py:901] (1/4) Epoch 30, batch 800, loss[loss=0.2101, simple_loss=0.2862, pruned_loss=0.06697, over 8034.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2802, pruned_loss=0.05573, over 1586679.17 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:30,473 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.417e+02 2.836e+02 3.328e+02 8.160e+02, threshold=5.671e+02, percent-clipped=2.0 +2023-02-09 03:11:46,749 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:53,486 INFO [train.py:901] (1/4) Epoch 30, batch 850, loss[loss=0.181, simple_loss=0.2526, pruned_loss=0.05465, over 7218.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2805, pruned_loss=0.05608, over 1591198.77 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:57,054 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:07,383 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235273.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:29,574 INFO [train.py:901] (1/4) Epoch 30, batch 900, loss[loss=0.1821, simple_loss=0.2702, pruned_loss=0.04705, over 8021.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2801, pruned_loss=0.05588, over 1597086.79 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:12:38,254 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.86 vs. limit=2.0 +2023-02-09 03:12:41,044 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:41,602 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.438e+02 3.006e+02 3.865e+02 6.238e+02, threshold=6.012e+02, percent-clipped=6.0 +2023-02-09 03:12:42,346 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235322.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:02,599 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.1578, 1.6297, 4.3463, 1.6001, 3.8023, 3.6321, 3.8832, 3.8467], + device='cuda:1'), covar=tensor([0.0655, 0.4631, 0.0567, 0.4768, 0.1261, 0.1046, 0.0705, 0.0749], + device='cuda:1'), in_proj_covar=tensor([0.0696, 0.0673, 0.0758, 0.0667, 0.0755, 0.0645, 0.0652, 0.0731], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:13:05,256 INFO [train.py:901] (1/4) Epoch 30, batch 950, loss[loss=0.2201, simple_loss=0.3023, pruned_loss=0.06888, over 8251.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2804, pruned_loss=0.05587, over 1602285.29 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:08,759 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235359.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:19,186 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9994, 1.7726, 2.0110, 1.8878, 1.9301, 2.0388, 1.9328, 0.8221], + device='cuda:1'), covar=tensor([0.5611, 0.4636, 0.2389, 0.3567, 0.2520, 0.3382, 0.1991, 0.5126], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1040, 0.0849, 0.1011, 0.1033, 0.0946, 0.0779, 0.0862], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:13:21,830 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235378.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:13:32,984 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 03:13:39,423 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235402.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:40,618 INFO [train.py:901] (1/4) Epoch 30, batch 1000, loss[loss=0.1963, simple_loss=0.2839, pruned_loss=0.05428, over 8503.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2801, pruned_loss=0.05561, over 1608308.73 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:52,264 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.479e+02 3.055e+02 4.205e+02 7.814e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-09 03:13:56,457 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235427.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:02,519 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235435.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:03,943 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:08,437 WARNING [train.py:1067] (1/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 03:14:14,981 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 03:14:15,939 INFO [train.py:901] (1/4) Epoch 30, batch 1050, loss[loss=0.2113, simple_loss=0.2848, pruned_loss=0.06888, over 7926.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2816, pruned_loss=0.05672, over 1615227.77 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:14:16,045 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8399, 3.8216, 3.4613, 1.7332, 3.4025, 3.5198, 3.3925, 3.3891], + device='cuda:1'), covar=tensor([0.0952, 0.0636, 0.1138, 0.4971, 0.0988, 0.0970, 0.1406, 0.0886], + device='cuda:1'), in_proj_covar=tensor([0.0550, 0.0459, 0.0450, 0.0562, 0.0444, 0.0469, 0.0444, 0.0412], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:14:19,402 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:21,156 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 03:14:36,653 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:50,591 INFO [train.py:901] (1/4) Epoch 30, batch 1100, loss[loss=0.1522, simple_loss=0.2341, pruned_loss=0.03516, over 7243.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2814, pruned_loss=0.05624, over 1615858.18 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:14:59,086 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235515.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:03,870 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.444e+02 3.135e+02 3.900e+02 6.752e+02, threshold=6.270e+02, percent-clipped=2.0 +2023-02-09 03:15:11,142 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8347, 2.0298, 2.1213, 1.4164, 2.2656, 1.5181, 0.7881, 2.0208], + device='cuda:1'), covar=tensor([0.0736, 0.0422, 0.0342, 0.0745, 0.0504, 0.1048, 0.1146, 0.0395], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0422, 0.0375, 0.0471, 0.0404, 0.0561, 0.0410, 0.0448], + device='cuda:1'), out_proj_covar=tensor([1.2799e-04, 1.0900e-04, 9.7585e-05, 1.2291e-04, 1.0577e-04, 1.5615e-04, + 1.0930e-04, 1.1730e-04], device='cuda:1') +2023-02-09 03:15:17,338 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:26,821 INFO [train.py:901] (1/4) Epoch 30, batch 1150, loss[loss=0.2288, simple_loss=0.3031, pruned_loss=0.07719, over 8559.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2808, pruned_loss=0.05605, over 1615611.61 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:15:35,848 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 03:16:03,362 INFO [train.py:901] (1/4) Epoch 30, batch 1200, loss[loss=0.2317, simple_loss=0.3005, pruned_loss=0.0815, over 7071.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2798, pruned_loss=0.05554, over 1611479.74 frames. ], batch size: 72, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:16:11,496 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:12,764 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235617.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:16,062 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.378e+02 2.945e+02 3.640e+02 8.540e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-09 03:16:30,035 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:39,500 INFO [train.py:901] (1/4) Epoch 30, batch 1250, loss[loss=0.1697, simple_loss=0.2457, pruned_loss=0.04681, over 7798.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05585, over 1610450.54 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:16:43,892 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.41 vs. limit=5.0 +2023-02-09 03:17:06,188 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:07,625 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:10,371 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235697.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:15,249 INFO [train.py:901] (1/4) Epoch 30, batch 1300, loss[loss=0.2129, simple_loss=0.2923, pruned_loss=0.0668, over 8559.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05595, over 1612438.41 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:23,826 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235716.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:25,224 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:27,370 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8874, 1.6291, 2.0042, 1.8248, 1.9829, 1.9368, 1.7952, 0.9337], + device='cuda:1'), covar=tensor([0.6176, 0.5030, 0.2169, 0.3366, 0.2544, 0.3175, 0.1997, 0.5157], + device='cuda:1'), in_proj_covar=tensor([0.0964, 0.1030, 0.0841, 0.1002, 0.1025, 0.0939, 0.0773, 0.0855], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:17:27,734 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.444e+02 2.774e+02 3.314e+02 6.214e+02, threshold=5.548e+02, percent-clipped=2.0 +2023-02-09 03:17:27,830 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235722.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:17:34,611 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235732.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:35,435 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-09 03:17:50,118 INFO [train.py:901] (1/4) Epoch 30, batch 1350, loss[loss=0.2301, simple_loss=0.3164, pruned_loss=0.07189, over 8078.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2803, pruned_loss=0.05636, over 1611856.12 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:26,587 INFO [train.py:901] (1/4) Epoch 30, batch 1400, loss[loss=0.2005, simple_loss=0.2888, pruned_loss=0.05615, over 8464.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2804, pruned_loss=0.05615, over 1615237.79 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:30,246 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235809.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:18:39,097 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.338e+02 2.741e+02 3.583e+02 7.907e+02, threshold=5.482e+02, percent-clipped=6.0 +2023-02-09 03:18:49,551 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235837.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:19:00,850 INFO [train.py:901] (1/4) Epoch 30, batch 1450, loss[loss=0.1882, simple_loss=0.2721, pruned_loss=0.05217, over 8322.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2812, pruned_loss=0.05655, over 1612215.26 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:07,596 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 03:19:22,541 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-09 03:19:38,200 INFO [train.py:901] (1/4) Epoch 30, batch 1500, loss[loss=0.1992, simple_loss=0.2861, pruned_loss=0.05619, over 8600.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2803, pruned_loss=0.05621, over 1612906.34 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:42,078 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-09 03:19:51,220 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.306e+02 2.900e+02 3.560e+02 8.272e+02, threshold=5.801e+02, percent-clipped=7.0 +2023-02-09 03:19:56,726 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-09 03:20:14,191 INFO [train.py:901] (1/4) Epoch 30, batch 1550, loss[loss=0.1654, simple_loss=0.2501, pruned_loss=0.04032, over 7931.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2795, pruned_loss=0.05524, over 1616972.12 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:17,111 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9759, 1.3081, 4.3793, 1.8590, 2.4596, 4.9438, 5.1100, 4.3298], + device='cuda:1'), covar=tensor([0.1272, 0.2164, 0.0253, 0.1930, 0.1185, 0.0187, 0.0350, 0.0535], + device='cuda:1'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0329, 0.0327, 0.0282, 0.0447, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 03:20:22,056 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 03:20:27,550 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8345, 1.6783, 2.3292, 1.5375, 1.4515, 2.3214, 0.8273, 1.6094], + device='cuda:1'), covar=tensor([0.1319, 0.1116, 0.0333, 0.1019, 0.2105, 0.0372, 0.1625, 0.1322], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0226, 0.0280, 0.0149, 0.0175, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 03:20:38,870 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235988.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:20:51,290 INFO [train.py:901] (1/4) Epoch 30, batch 1600, loss[loss=0.213, simple_loss=0.2928, pruned_loss=0.06662, over 8707.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2798, pruned_loss=0.05602, over 1612836.85 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:57,833 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:00,082 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6685, 2.5886, 1.8893, 2.3892, 2.2872, 1.7172, 2.1876, 2.2460], + device='cuda:1'), covar=tensor([0.1622, 0.0436, 0.1319, 0.0730, 0.0787, 0.1604, 0.1130, 0.1207], + device='cuda:1'), in_proj_covar=tensor([0.0360, 0.0246, 0.0349, 0.0317, 0.0304, 0.0352, 0.0354, 0.0325], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 03:21:04,932 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.693e+02 3.134e+02 4.092e+02 8.333e+02, threshold=6.267e+02, percent-clipped=7.0 +2023-02-09 03:21:15,453 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:19,088 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236041.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:28,375 INFO [train.py:901] (1/4) Epoch 30, batch 1650, loss[loss=0.2464, simple_loss=0.3238, pruned_loss=0.08455, over 8793.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2809, pruned_loss=0.05648, over 1615341.60 frames. ], batch size: 30, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:21:56,788 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236093.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:22:04,819 INFO [train.py:901] (1/4) Epoch 30, batch 1700, loss[loss=0.19, simple_loss=0.2738, pruned_loss=0.05311, over 8659.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2799, pruned_loss=0.05602, over 1614246.06 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:15,382 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236118.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:22:17,735 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.438e+02 2.804e+02 3.459e+02 5.840e+02, threshold=5.608e+02, percent-clipped=0.0 +2023-02-09 03:22:34,511 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.57 vs. limit=5.0 +2023-02-09 03:22:40,514 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236153.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:22:41,159 INFO [train.py:901] (1/4) Epoch 30, batch 1750, loss[loss=0.208, simple_loss=0.2938, pruned_loss=0.06111, over 8282.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2794, pruned_loss=0.05585, over 1612033.75 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:42,647 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236156.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:23:07,920 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.1658, 1.8849, 2.2689, 2.0297, 2.2960, 2.2425, 2.1033, 1.2357], + device='cuda:1'), covar=tensor([0.6077, 0.5246, 0.2346, 0.4049, 0.2686, 0.3353, 0.1944, 0.5681], + device='cuda:1'), in_proj_covar=tensor([0.0972, 0.1037, 0.0848, 0.1008, 0.1032, 0.0946, 0.0778, 0.0860], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:23:16,214 INFO [train.py:901] (1/4) Epoch 30, batch 1800, loss[loss=0.2166, simple_loss=0.2998, pruned_loss=0.06671, over 8326.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2805, pruned_loss=0.05637, over 1614181.84 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:23:19,551 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.97 vs. limit=2.0 +2023-02-09 03:23:29,387 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.503e+02 3.165e+02 3.852e+02 7.294e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-09 03:23:31,000 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0119, 1.8651, 2.2916, 2.0561, 2.2698, 2.1026, 1.9442, 1.1661], + device='cuda:1'), covar=tensor([0.5786, 0.4762, 0.2083, 0.3596, 0.2524, 0.3044, 0.1960, 0.5089], + device='cuda:1'), in_proj_covar=tensor([0.0970, 0.1035, 0.0846, 0.1005, 0.1030, 0.0945, 0.0776, 0.0858], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:23:52,507 INFO [train.py:901] (1/4) Epoch 30, batch 1850, loss[loss=0.2698, simple_loss=0.3361, pruned_loss=0.1017, over 8037.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2814, pruned_loss=0.05701, over 1609464.61 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:03,962 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236268.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:24:28,815 INFO [train.py:901] (1/4) Epoch 30, batch 1900, loss[loss=0.2639, simple_loss=0.3319, pruned_loss=0.09792, over 7012.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2803, pruned_loss=0.05589, over 1609195.02 frames. ], batch size: 71, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:41,167 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-09 03:24:41,427 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.325e+02 3.004e+02 3.832e+02 8.674e+02, threshold=6.008e+02, percent-clipped=3.0 +2023-02-09 03:25:01,724 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 03:25:05,198 INFO [train.py:901] (1/4) Epoch 30, batch 1950, loss[loss=0.2028, simple_loss=0.2906, pruned_loss=0.0575, over 8626.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2796, pruned_loss=0.0558, over 1607616.54 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:13,614 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 03:25:24,199 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:25,016 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.4642, 2.6992, 2.9344, 1.8536, 3.0928, 2.0342, 1.5577, 2.4628], + device='cuda:1'), covar=tensor([0.0948, 0.0431, 0.0388, 0.0853, 0.0594, 0.0944, 0.1126, 0.0565], + device='cuda:1'), in_proj_covar=tensor([0.0477, 0.0418, 0.0373, 0.0465, 0.0400, 0.0555, 0.0405, 0.0442], + device='cuda:1'), out_proj_covar=tensor([1.2627e-04, 1.0814e-04, 9.7134e-05, 1.2130e-04, 1.0478e-04, 1.5435e-04, + 1.0794e-04, 1.1561e-04], device='cuda:1') +2023-02-09 03:25:25,037 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9132, 2.4360, 4.0263, 1.7841, 2.9242, 2.5063, 2.0197, 2.9639], + device='cuda:1'), covar=tensor([0.1856, 0.2643, 0.0811, 0.4669, 0.2041, 0.3176, 0.2386, 0.2454], + device='cuda:1'), in_proj_covar=tensor([0.0546, 0.0646, 0.0569, 0.0681, 0.0670, 0.0622, 0.0573, 0.0651], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:25:33,317 WARNING [train.py:1067] (1/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 03:25:41,055 INFO [train.py:901] (1/4) Epoch 30, batch 2000, loss[loss=0.2228, simple_loss=0.3101, pruned_loss=0.06776, over 8428.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05669, over 1612982.30 frames. ], batch size: 49, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:43,357 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.5495, 2.3322, 2.9697, 2.4442, 3.0063, 2.5585, 2.4850, 2.0552], + device='cuda:1'), covar=tensor([0.5905, 0.5540, 0.2410, 0.4527, 0.3071, 0.3500, 0.1916, 0.5863], + device='cuda:1'), in_proj_covar=tensor([0.0970, 0.1036, 0.0846, 0.1007, 0.1032, 0.0947, 0.0778, 0.0857], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:25:46,881 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:53,568 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.406e+02 2.956e+02 3.756e+02 9.982e+02, threshold=5.913e+02, percent-clipped=8.0 +2023-02-09 03:26:04,555 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:16,258 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6411, 1.8380, 1.5513, 2.3107, 1.0815, 1.4348, 1.7368, 1.8640], + device='cuda:1'), covar=tensor([0.0777, 0.0709, 0.0969, 0.0388, 0.1024, 0.1208, 0.0685, 0.0707], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0215, 0.0201, 0.0247, 0.0251, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 03:26:16,752 INFO [train.py:901] (1/4) Epoch 30, batch 2050, loss[loss=0.2255, simple_loss=0.306, pruned_loss=0.0725, over 8033.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.281, pruned_loss=0.05658, over 1610228.77 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:26:28,372 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9628, 1.6045, 1.3744, 1.4800, 1.3030, 1.2765, 1.2209, 1.2280], + device='cuda:1'), covar=tensor([0.1326, 0.0576, 0.1483, 0.0661, 0.0862, 0.1678, 0.1151, 0.1018], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0245, 0.0348, 0.0316, 0.0304, 0.0351, 0.0354, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 03:26:47,592 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:53,532 INFO [train.py:901] (1/4) Epoch 30, batch 2100, loss[loss=0.203, simple_loss=0.2931, pruned_loss=0.05648, over 8501.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05623, over 1612352.01 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:06,242 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.477e+02 3.089e+02 3.892e+02 8.089e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-09 03:27:07,829 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236524.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:16,738 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3959, 1.2257, 2.3856, 1.3472, 2.2797, 2.5541, 2.7361, 2.1584], + device='cuda:1'), covar=tensor([0.1177, 0.1554, 0.0402, 0.2098, 0.0718, 0.0382, 0.0584, 0.0659], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0330, 0.0298, 0.0329, 0.0329, 0.0282, 0.0449, 0.0312], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 03:27:25,243 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:28,497 INFO [train.py:901] (1/4) Epoch 30, batch 2150, loss[loss=0.1915, simple_loss=0.2713, pruned_loss=0.05589, over 8125.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2793, pruned_loss=0.05588, over 1606341.41 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:59,354 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=3.84 vs. limit=5.0 +2023-02-09 03:28:04,587 INFO [train.py:901] (1/4) Epoch 30, batch 2200, loss[loss=0.2174, simple_loss=0.303, pruned_loss=0.06585, over 8630.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2791, pruned_loss=0.05595, over 1603490.27 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:28:06,910 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.7066, 4.7671, 4.3072, 2.0619, 4.2077, 4.3884, 4.2324, 4.2384], + device='cuda:1'), covar=tensor([0.0703, 0.0474, 0.0971, 0.4941, 0.0912, 0.0851, 0.1253, 0.0626], + device='cuda:1'), in_proj_covar=tensor([0.0552, 0.0462, 0.0450, 0.0565, 0.0447, 0.0474, 0.0450, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:28:18,768 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.435e+02 2.816e+02 3.564e+02 9.413e+02, threshold=5.632e+02, percent-clipped=3.0 +2023-02-09 03:28:40,981 INFO [train.py:901] (1/4) Epoch 30, batch 2250, loss[loss=0.1784, simple_loss=0.2594, pruned_loss=0.04873, over 8076.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2796, pruned_loss=0.05582, over 1605946.31 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:04,195 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-09 03:29:16,926 INFO [train.py:901] (1/4) Epoch 30, batch 2300, loss[loss=0.2113, simple_loss=0.3033, pruned_loss=0.05968, over 8510.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2807, pruned_loss=0.05613, over 1611641.64 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:29,254 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.489e+02 3.036e+02 4.215e+02 7.962e+02, threshold=6.071e+02, percent-clipped=6.0 +2023-02-09 03:29:50,614 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:29:52,574 INFO [train.py:901] (1/4) Epoch 30, batch 2350, loss[loss=0.1962, simple_loss=0.2703, pruned_loss=0.06106, over 7537.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2801, pruned_loss=0.05575, over 1611804.78 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:08,614 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:30:29,051 INFO [train.py:901] (1/4) Epoch 30, batch 2400, loss[loss=0.1842, simple_loss=0.2602, pruned_loss=0.05409, over 8091.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2798, pruned_loss=0.05563, over 1613849.59 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:42,217 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.281e+02 2.685e+02 3.729e+02 8.099e+02, threshold=5.371e+02, percent-clipped=9.0 +2023-02-09 03:31:05,192 INFO [train.py:901] (1/4) Epoch 30, batch 2450, loss[loss=0.1642, simple_loss=0.2466, pruned_loss=0.04088, over 7823.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2793, pruned_loss=0.05584, over 1618223.26 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:39,710 INFO [train.py:901] (1/4) Epoch 30, batch 2500, loss[loss=0.2336, simple_loss=0.3268, pruned_loss=0.07022, over 8326.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2792, pruned_loss=0.05556, over 1623317.47 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:43,942 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236910.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:31:52,882 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.373e+02 3.086e+02 3.767e+02 7.222e+02, threshold=6.171e+02, percent-clipped=6.0 +2023-02-09 03:32:13,801 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2241, 3.6912, 2.4005, 2.9577, 2.8906, 2.0935, 2.8889, 3.0697], + device='cuda:1'), covar=tensor([0.1719, 0.0342, 0.1183, 0.0847, 0.0878, 0.1533, 0.1182, 0.1123], + device='cuda:1'), in_proj_covar=tensor([0.0356, 0.0242, 0.0345, 0.0314, 0.0302, 0.0350, 0.0351, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 03:32:16,579 INFO [train.py:901] (1/4) Epoch 30, batch 2550, loss[loss=0.2058, simple_loss=0.2963, pruned_loss=0.05761, over 8524.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2791, pruned_loss=0.05557, over 1621857.70 frames. ], batch size: 28, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:32:54,184 INFO [train.py:901] (1/4) Epoch 30, batch 2600, loss[loss=0.1726, simple_loss=0.2479, pruned_loss=0.04865, over 7542.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2794, pruned_loss=0.05596, over 1617818.69 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:33:06,912 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.458e+02 3.021e+02 3.974e+02 8.394e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:33:12,329 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.20 vs. limit=5.0 +2023-02-09 03:33:30,295 INFO [train.py:901] (1/4) Epoch 30, batch 2650, loss[loss=0.2024, simple_loss=0.2805, pruned_loss=0.0622, over 7809.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2801, pruned_loss=0.05654, over 1617198.44 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:06,433 INFO [train.py:901] (1/4) Epoch 30, batch 2700, loss[loss=0.2104, simple_loss=0.2841, pruned_loss=0.0683, over 7802.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2796, pruned_loss=0.05574, over 1617721.87 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:07,585 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 03:34:18,974 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.464e+02 3.015e+02 4.068e+02 7.247e+02, threshold=6.030e+02, percent-clipped=1.0 +2023-02-09 03:34:41,475 INFO [train.py:901] (1/4) Epoch 30, batch 2750, loss[loss=0.2001, simple_loss=0.2876, pruned_loss=0.05629, over 8454.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2792, pruned_loss=0.05587, over 1611989.69 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:18,232 INFO [train.py:901] (1/4) Epoch 30, batch 2800, loss[loss=0.2099, simple_loss=0.2959, pruned_loss=0.06197, over 8500.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2798, pruned_loss=0.05593, over 1615713.01 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:20,498 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.6165, 1.5643, 1.9457, 1.5748, 1.1128, 1.6162, 2.2187, 1.9313], + device='cuda:1'), covar=tensor([0.0491, 0.1242, 0.1581, 0.1430, 0.0580, 0.1489, 0.0619, 0.0642], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 03:35:31,339 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.300e+02 2.824e+02 3.573e+02 8.919e+02, threshold=5.648e+02, percent-clipped=3.0 +2023-02-09 03:35:45,297 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4908, 1.7256, 4.6785, 1.9196, 4.1804, 3.9278, 4.2386, 4.1565], + device='cuda:1'), covar=tensor([0.0521, 0.4184, 0.0519, 0.4209, 0.0936, 0.0866, 0.0545, 0.0560], + device='cuda:1'), in_proj_covar=tensor([0.0694, 0.0671, 0.0752, 0.0670, 0.0756, 0.0645, 0.0655, 0.0729], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:35:48,278 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 03:35:53,018 INFO [train.py:901] (1/4) Epoch 30, batch 2850, loss[loss=0.2034, simple_loss=0.2895, pruned_loss=0.05862, over 8530.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05609, over 1620034.24 frames. ], batch size: 28, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:53,088 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237254.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:36:18,912 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.90 vs. limit=2.0 +2023-02-09 03:36:27,596 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.66 vs. limit=2.0 +2023-02-09 03:36:29,225 INFO [train.py:901] (1/4) Epoch 30, batch 2900, loss[loss=0.2085, simple_loss=0.2916, pruned_loss=0.0627, over 8187.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2814, pruned_loss=0.05646, over 1616145.89 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:36:42,591 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.592e+02 3.021e+02 4.387e+02 8.419e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:37:04,019 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 03:37:05,361 INFO [train.py:901] (1/4) Epoch 30, batch 2950, loss[loss=0.209, simple_loss=0.2974, pruned_loss=0.06028, over 8134.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2809, pruned_loss=0.05598, over 1613564.72 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:15,788 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237369.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:37:36,194 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.4164, 4.3709, 4.0072, 1.7580, 3.9007, 4.0637, 3.9700, 3.8776], + device='cuda:1'), covar=tensor([0.0686, 0.0500, 0.0874, 0.4888, 0.0862, 0.0882, 0.1181, 0.0782], + device='cuda:1'), in_proj_covar=tensor([0.0550, 0.0461, 0.0450, 0.0562, 0.0445, 0.0473, 0.0447, 0.0415], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:37:40,336 INFO [train.py:901] (1/4) Epoch 30, batch 3000, loss[loss=0.2384, simple_loss=0.3142, pruned_loss=0.0813, over 7150.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2819, pruned_loss=0.05614, over 1617308.45 frames. ], batch size: 72, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:40,336 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 03:37:54,066 INFO [train.py:935] (1/4) Epoch 30, validation: loss=0.1704, simple_loss=0.2697, pruned_loss=0.0356, over 944034.00 frames. +2023-02-09 03:37:54,068 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 03:38:07,361 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.369e+02 2.918e+02 3.560e+02 6.316e+02, threshold=5.836e+02, percent-clipped=1.0 +2023-02-09 03:38:31,180 INFO [train.py:901] (1/4) Epoch 30, batch 3050, loss[loss=0.2209, simple_loss=0.3, pruned_loss=0.07089, over 8511.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2826, pruned_loss=0.05683, over 1617184.89 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:39:01,958 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.1002, 1.2307, 1.2146, 0.9091, 1.1907, 1.0299, 0.1550, 1.1988], + device='cuda:1'), covar=tensor([0.0474, 0.0453, 0.0433, 0.0580, 0.0514, 0.1025, 0.1020, 0.0375], + device='cuda:1'), in_proj_covar=tensor([0.0483, 0.0422, 0.0376, 0.0467, 0.0403, 0.0562, 0.0409, 0.0446], + device='cuda:1'), out_proj_covar=tensor([1.2778e-04, 1.0912e-04, 9.7950e-05, 1.2182e-04, 1.0531e-04, 1.5642e-04, + 1.0891e-04, 1.1676e-04], device='cuda:1') +2023-02-09 03:39:07,118 INFO [train.py:901] (1/4) Epoch 30, batch 3100, loss[loss=0.2204, simple_loss=0.3058, pruned_loss=0.06749, over 8462.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.284, pruned_loss=0.05784, over 1620642.22 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:39:10,846 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237509.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:39:14,042 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-09 03:39:19,646 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.429e+02 3.016e+02 3.485e+02 6.483e+02, threshold=6.032e+02, percent-clipped=4.0 +2023-02-09 03:39:20,786 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 03:39:43,982 INFO [train.py:901] (1/4) Epoch 30, batch 3150, loss[loss=0.1394, simple_loss=0.225, pruned_loss=0.0269, over 7420.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.282, pruned_loss=0.05674, over 1621157.06 frames. ], batch size: 17, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:03,668 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237581.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:21,025 INFO [train.py:901] (1/4) Epoch 30, batch 3200, loss[loss=0.1716, simple_loss=0.259, pruned_loss=0.04206, over 7968.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2799, pruned_loss=0.05587, over 1618623.15 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:33,293 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.320e+02 2.861e+02 3.592e+02 8.186e+02, threshold=5.722e+02, percent-clipped=5.0 +2023-02-09 03:40:35,497 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=237625.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:52,664 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=237650.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:56,035 INFO [train.py:901] (1/4) Epoch 30, batch 3250, loss[loss=0.1809, simple_loss=0.2721, pruned_loss=0.04489, over 8331.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05649, over 1617168.69 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:32,173 INFO [train.py:901] (1/4) Epoch 30, batch 3300, loss[loss=0.224, simple_loss=0.3031, pruned_loss=0.07248, over 8027.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05679, over 1616825.86 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:44,070 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.40 vs. limit=5.0 +2023-02-09 03:41:45,786 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.392e+02 2.907e+02 3.818e+02 6.093e+02, threshold=5.813e+02, percent-clipped=2.0 +2023-02-09 03:42:07,968 INFO [train.py:901] (1/4) Epoch 30, batch 3350, loss[loss=0.2616, simple_loss=0.335, pruned_loss=0.09406, over 8439.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2831, pruned_loss=0.05824, over 1613341.44 frames. ], batch size: 29, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:44,184 INFO [train.py:901] (1/4) Epoch 30, batch 3400, loss[loss=0.1586, simple_loss=0.26, pruned_loss=0.02856, over 8098.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2818, pruned_loss=0.05773, over 1607978.91 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:47,126 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237808.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:42:57,410 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.484e+02 3.245e+02 4.483e+02 9.283e+02, threshold=6.490e+02, percent-clipped=12.0 +2023-02-09 03:43:00,482 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3675, 2.6897, 2.8445, 1.6912, 3.1676, 1.9905, 1.5556, 2.4707], + device='cuda:1'), covar=tensor([0.1024, 0.0473, 0.0368, 0.1001, 0.0640, 0.1001, 0.1151, 0.0614], + device='cuda:1'), in_proj_covar=tensor([0.0486, 0.0425, 0.0380, 0.0470, 0.0406, 0.0566, 0.0410, 0.0449], + device='cuda:1'), out_proj_covar=tensor([1.2850e-04, 1.0992e-04, 9.8809e-05, 1.2263e-04, 1.0614e-04, 1.5748e-04, + 1.0928e-04, 1.1749e-04], device='cuda:1') +2023-02-09 03:43:19,446 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237853.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:43:20,122 INFO [train.py:901] (1/4) Epoch 30, batch 3450, loss[loss=0.1908, simple_loss=0.2823, pruned_loss=0.04966, over 8603.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2832, pruned_loss=0.05781, over 1614054.38 frames. ], batch size: 34, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:43:56,089 INFO [train.py:901] (1/4) Epoch 30, batch 3500, loss[loss=0.1978, simple_loss=0.2972, pruned_loss=0.04924, over 8556.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2819, pruned_loss=0.05651, over 1613923.71 frames. ], batch size: 31, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:08,739 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.505e+02 3.010e+02 3.725e+02 8.965e+02, threshold=6.019e+02, percent-clipped=4.0 +2023-02-09 03:44:11,005 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237925.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:44:11,725 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6720, 1.3476, 2.8927, 1.4714, 2.4408, 3.0822, 3.2882, 2.6489], + device='cuda:1'), covar=tensor([0.1216, 0.1784, 0.0338, 0.2045, 0.0758, 0.0332, 0.0606, 0.0560], + device='cuda:1'), in_proj_covar=tensor([0.0308, 0.0327, 0.0296, 0.0326, 0.0328, 0.0281, 0.0447, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 03:44:14,958 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237930.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:22,365 WARNING [train.py:1067] (1/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 03:44:32,802 INFO [train.py:901] (1/4) Epoch 30, batch 3550, loss[loss=0.1629, simple_loss=0.2493, pruned_loss=0.03831, over 7649.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2817, pruned_loss=0.05649, over 1610454.33 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:43,164 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237968.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:49,855 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 03:45:09,425 INFO [train.py:901] (1/4) Epoch 30, batch 3600, loss[loss=0.1965, simple_loss=0.282, pruned_loss=0.05545, over 8095.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2816, pruned_loss=0.05644, over 1613150.87 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:45:22,458 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.286e+02 2.832e+02 3.360e+02 7.556e+02, threshold=5.664e+02, percent-clipped=2.0 +2023-02-09 03:45:35,405 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238040.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:45:40,890 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5868, 4.5811, 4.1491, 1.9989, 4.0586, 4.2165, 4.0431, 4.0435], + device='cuda:1'), covar=tensor([0.0646, 0.0494, 0.0892, 0.4661, 0.0804, 0.1036, 0.1325, 0.0784], + device='cuda:1'), in_proj_covar=tensor([0.0551, 0.0463, 0.0452, 0.0563, 0.0445, 0.0475, 0.0451, 0.0416], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:45:44,902 INFO [train.py:901] (1/4) Epoch 30, batch 3650, loss[loss=0.1706, simple_loss=0.2671, pruned_loss=0.03707, over 8322.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2792, pruned_loss=0.05541, over 1606961.09 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:05,849 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:12,778 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238092.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:20,739 INFO [train.py:901] (1/4) Epoch 30, batch 3700, loss[loss=0.2164, simple_loss=0.3042, pruned_loss=0.06429, over 8284.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2796, pruned_loss=0.0553, over 1610438.63 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:29,085 WARNING [train.py:1067] (1/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 03:46:33,265 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.352e+02 3.001e+02 3.686e+02 7.575e+02, threshold=6.003e+02, percent-clipped=3.0 +2023-02-09 03:46:50,316 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:54,655 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6847, 1.3491, 2.9028, 1.5354, 2.4384, 3.1466, 3.2587, 2.7065], + device='cuda:1'), covar=tensor([0.1222, 0.1809, 0.0320, 0.1978, 0.0749, 0.0290, 0.0672, 0.0570], + device='cuda:1'), in_proj_covar=tensor([0.0310, 0.0330, 0.0298, 0.0328, 0.0331, 0.0283, 0.0451, 0.0310], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 03:46:56,061 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238152.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:57,365 INFO [train.py:901] (1/4) Epoch 30, batch 3750, loss[loss=0.2065, simple_loss=0.2825, pruned_loss=0.06526, over 8462.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2801, pruned_loss=0.05573, over 1610552.65 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:19,946 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6128, 2.0890, 3.0547, 1.4521, 2.4032, 2.0392, 1.7148, 2.3986], + device='cuda:1'), covar=tensor([0.2126, 0.2556, 0.1036, 0.5031, 0.1992, 0.3497, 0.2581, 0.2411], + device='cuda:1'), in_proj_covar=tensor([0.0547, 0.0644, 0.0569, 0.0679, 0.0671, 0.0620, 0.0573, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:47:33,683 INFO [train.py:901] (1/4) Epoch 30, batch 3800, loss[loss=0.2096, simple_loss=0.3011, pruned_loss=0.05908, over 8481.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2806, pruned_loss=0.05615, over 1609981.47 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:46,040 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.427e+02 2.911e+02 3.474e+02 7.215e+02, threshold=5.821e+02, percent-clipped=2.0 +2023-02-09 03:47:47,585 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:47:55,413 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2692, 2.0714, 2.6733, 2.2678, 2.6972, 2.3682, 2.1535, 1.5413], + device='cuda:1'), covar=tensor([0.6018, 0.5115, 0.2136, 0.4215, 0.2642, 0.3379, 0.2141, 0.5819], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1041, 0.0851, 0.1016, 0.1037, 0.0951, 0.0782, 0.0863], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:48:05,483 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:05,502 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0495, 1.2521, 1.2025, 0.7285, 1.2192, 1.0608, 0.0614, 1.1949], + device='cuda:1'), covar=tensor([0.0516, 0.0421, 0.0405, 0.0678, 0.0489, 0.1037, 0.1007, 0.0373], + device='cuda:1'), in_proj_covar=tensor([0.0482, 0.0422, 0.0378, 0.0468, 0.0405, 0.0561, 0.0408, 0.0446], + device='cuda:1'), out_proj_covar=tensor([1.2749e-04, 1.0912e-04, 9.8345e-05, 1.2207e-04, 1.0605e-04, 1.5607e-04, + 1.0864e-04, 1.1678e-04], device='cuda:1') +2023-02-09 03:48:09,471 INFO [train.py:901] (1/4) Epoch 30, batch 3850, loss[loss=0.1854, simple_loss=0.2605, pruned_loss=0.05517, over 8241.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2817, pruned_loss=0.05672, over 1613784.92 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:18,888 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238267.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:23,698 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238274.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:37,707 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 03:48:39,907 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238296.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:45,176 INFO [train.py:901] (1/4) Epoch 30, batch 3900, loss[loss=0.2054, simple_loss=0.2776, pruned_loss=0.06656, over 6009.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2795, pruned_loss=0.05596, over 1609245.17 frames. ], batch size: 13, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:57,800 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238321.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:58,294 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.361e+02 2.887e+02 3.538e+02 6.169e+02, threshold=5.773e+02, percent-clipped=2.0 +2023-02-09 03:49:20,478 INFO [train.py:901] (1/4) Epoch 30, batch 3950, loss[loss=0.1847, simple_loss=0.26, pruned_loss=0.05467, over 7280.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05614, over 1609298.22 frames. ], batch size: 16, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:49:46,426 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238389.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:49:56,957 INFO [train.py:901] (1/4) Epoch 30, batch 4000, loss[loss=0.1692, simple_loss=0.2558, pruned_loss=0.04128, over 8083.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05587, over 1610859.68 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:09,960 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.354e+02 2.920e+02 3.674e+02 8.815e+02, threshold=5.839e+02, percent-clipped=5.0 +2023-02-09 03:50:12,702 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:20,360 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238436.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:21,771 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238438.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:50:32,677 INFO [train.py:901] (1/4) Epoch 30, batch 4050, loss[loss=0.1716, simple_loss=0.2543, pruned_loss=0.04446, over 7642.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05625, over 1609655.99 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:57,404 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238488.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:09,340 INFO [train.py:901] (1/4) Epoch 30, batch 4100, loss[loss=0.2408, simple_loss=0.3139, pruned_loss=0.08383, over 7242.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05665, over 1606990.11 frames. ], batch size: 73, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:21,790 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.287e+02 2.925e+02 3.934e+02 1.031e+03, threshold=5.850e+02, percent-clipped=7.0 +2023-02-09 03:51:22,755 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:35,957 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:41,373 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238548.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:43,263 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:45,004 INFO [train.py:901] (1/4) Epoch 30, batch 4150, loss[loss=0.1925, simple_loss=0.2678, pruned_loss=0.05863, over 6829.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2814, pruned_loss=0.05678, over 1609262.73 frames. ], batch size: 15, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:19,900 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:20,442 INFO [train.py:901] (1/4) Epoch 30, batch 4200, loss[loss=0.1816, simple_loss=0.2497, pruned_loss=0.05676, over 7244.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2818, pruned_loss=0.05742, over 1603646.38 frames. ], batch size: 16, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:33,705 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.494e+02 3.256e+02 4.447e+02 1.288e+03, threshold=6.511e+02, percent-clipped=8.0 +2023-02-09 03:52:41,497 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 03:52:50,658 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238645.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:56,580 INFO [train.py:901] (1/4) Epoch 30, batch 4250, loss[loss=0.2019, simple_loss=0.2834, pruned_loss=0.06021, over 8497.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05675, over 1608011.88 frames. ], batch size: 29, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:02,319 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.13 vs. limit=2.0 +2023-02-09 03:53:05,078 WARNING [train.py:1067] (1/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 03:53:07,961 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238670.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:25,820 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7088, 1.4702, 1.7653, 1.3857, 0.9311, 1.4908, 1.5622, 1.4024], + device='cuda:1'), covar=tensor([0.0598, 0.1276, 0.1607, 0.1408, 0.0622, 0.1458, 0.0737, 0.0679], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 03:53:27,923 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238699.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:29,417 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5116, 1.8601, 2.6793, 1.4410, 1.9951, 1.8378, 1.6209, 2.0204], + device='cuda:1'), covar=tensor([0.1904, 0.2760, 0.0913, 0.4812, 0.1968, 0.3582, 0.2503, 0.2390], + device='cuda:1'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0679, 0.0671, 0.0619, 0.0572, 0.0650], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:53:31,133 INFO [train.py:901] (1/4) Epoch 30, batch 4300, loss[loss=0.2264, simple_loss=0.3013, pruned_loss=0.07572, over 8139.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05668, over 1608015.87 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:44,805 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.303e+02 2.743e+02 3.342e+02 6.438e+02, threshold=5.486e+02, percent-clipped=0.0 +2023-02-09 03:54:06,887 INFO [train.py:901] (1/4) Epoch 30, batch 4350, loss[loss=0.225, simple_loss=0.3084, pruned_loss=0.0708, over 8592.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2823, pruned_loss=0.05711, over 1614456.11 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:27,360 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238782.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:54:36,398 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 03:54:37,978 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:42,562 INFO [train.py:901] (1/4) Epoch 30, batch 4400, loss[loss=0.1973, simple_loss=0.2882, pruned_loss=0.05322, over 8703.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05655, over 1613913.17 frames. ], batch size: 50, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:44,935 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238807.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:55,879 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.573e+02 3.023e+02 3.983e+02 6.680e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-09 03:54:56,101 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:03,795 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238832.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:15,276 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 03:55:18,601 INFO [train.py:901] (1/4) Epoch 30, batch 4450, loss[loss=0.237, simple_loss=0.3097, pruned_loss=0.08209, over 8357.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.057, over 1612877.47 frames. ], batch size: 24, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:55:22,523 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238859.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:26,395 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 03:55:40,870 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:50,464 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238897.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:55:55,009 INFO [train.py:901] (1/4) Epoch 30, batch 4500, loss[loss=0.2221, simple_loss=0.3005, pruned_loss=0.07183, over 7818.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2816, pruned_loss=0.05736, over 1611604.29 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:07,440 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.335e+02 2.828e+02 3.474e+02 8.376e+02, threshold=5.656e+02, percent-clipped=3.0 +2023-02-09 03:56:08,192 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 03:56:11,163 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0690, 2.1847, 1.8533, 2.9498, 1.3361, 1.7110, 2.1239, 2.1270], + device='cuda:1'), covar=tensor([0.0709, 0.0816, 0.0823, 0.0324, 0.1131, 0.1302, 0.0828, 0.0895], + device='cuda:1'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0215, 0.0203, 0.0247, 0.0250, 0.0206], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 03:56:30,982 INFO [train.py:901] (1/4) Epoch 30, batch 4550, loss[loss=0.1857, simple_loss=0.2737, pruned_loss=0.04878, over 8043.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05672, over 1612779.39 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:31,132 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238954.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:56:37,965 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 03:56:38,545 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7990, 1.4414, 1.7780, 1.4188, 0.9343, 1.5145, 1.6238, 1.6493], + device='cuda:1'), covar=tensor([0.0589, 0.1332, 0.1610, 0.1477, 0.0611, 0.1459, 0.0730, 0.0623], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 03:57:06,050 INFO [train.py:901] (1/4) Epoch 30, batch 4600, loss[loss=0.2224, simple_loss=0.3036, pruned_loss=0.07059, over 8464.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2809, pruned_loss=0.05679, over 1612786.20 frames. ], batch size: 27, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:19,157 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.347e+02 2.832e+02 3.443e+02 5.144e+02, threshold=5.665e+02, percent-clipped=0.0 +2023-02-09 03:57:20,227 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-09 03:57:34,218 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:57:41,731 INFO [train.py:901] (1/4) Epoch 30, batch 4650, loss[loss=0.1905, simple_loss=0.2705, pruned_loss=0.05523, over 7550.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2825, pruned_loss=0.05725, over 1616391.32 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:46,069 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3915, 2.2031, 2.9908, 2.3507, 2.7729, 2.4569, 2.2709, 1.6448], + device='cuda:1'), covar=tensor([0.5989, 0.5514, 0.2101, 0.4274, 0.3001, 0.3379, 0.2112, 0.6099], + device='cuda:1'), in_proj_covar=tensor([0.0972, 0.1038, 0.0851, 0.1013, 0.1035, 0.0949, 0.0783, 0.0861], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 03:57:58,377 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([3.8126, 1.4945, 4.0087, 1.5125, 3.5616, 3.3732, 3.6474, 3.5481], + device='cuda:1'), covar=tensor([0.0772, 0.4488, 0.0705, 0.4353, 0.1238, 0.1035, 0.0652, 0.0806], + device='cuda:1'), in_proj_covar=tensor([0.0700, 0.0674, 0.0759, 0.0672, 0.0758, 0.0649, 0.0658, 0.0733], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 03:58:03,513 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0245, 1.5481, 1.7916, 1.4785, 0.9841, 1.5821, 1.8632, 1.7113], + device='cuda:1'), covar=tensor([0.0562, 0.1279, 0.1719, 0.1470, 0.0603, 0.1491, 0.0682, 0.0650], + device='cuda:1'), in_proj_covar=tensor([0.0100, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 03:58:17,765 INFO [train.py:901] (1/4) Epoch 30, batch 4700, loss[loss=0.2679, simple_loss=0.3469, pruned_loss=0.09441, over 8783.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2823, pruned_loss=0.05741, over 1612326.72 frames. ], batch size: 30, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:30,951 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.353e+02 2.866e+02 3.941e+02 8.957e+02, threshold=5.733e+02, percent-clipped=8.0 +2023-02-09 03:58:52,011 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-09 03:58:52,478 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239153.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:58:52,925 INFO [train.py:901] (1/4) Epoch 30, batch 4750, loss[loss=0.2204, simple_loss=0.3127, pruned_loss=0.06398, over 8589.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.283, pruned_loss=0.05777, over 1612117.68 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:55,954 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239158.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:58:58,518 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8054, 1.3590, 3.3473, 1.5854, 2.3317, 3.6512, 3.7509, 3.1430], + device='cuda:1'), covar=tensor([0.1274, 0.1992, 0.0305, 0.1958, 0.1038, 0.0242, 0.0546, 0.0488], + device='cuda:1'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0329, 0.0330, 0.0282, 0.0450, 0.0309], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 03:59:05,427 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3565, 3.6514, 2.6844, 3.2422, 3.0178, 2.1633, 3.1167, 3.2306], + device='cuda:1'), covar=tensor([0.1693, 0.0480, 0.1085, 0.0705, 0.0855, 0.1583, 0.1063, 0.1120], + device='cuda:1'), in_proj_covar=tensor([0.0360, 0.0246, 0.0347, 0.0317, 0.0304, 0.0351, 0.0351, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 03:59:10,882 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239178.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:59:12,054 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 03:59:14,171 WARNING [train.py:1067] (1/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 03:59:28,642 INFO [train.py:901] (1/4) Epoch 30, batch 4800, loss[loss=0.2162, simple_loss=0.2837, pruned_loss=0.07437, over 7796.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2807, pruned_loss=0.05708, over 1608916.73 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:59:41,698 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.393e+02 3.010e+02 3.751e+02 7.640e+02, threshold=6.020e+02, percent-clipped=2.0 +2023-02-09 04:00:04,581 INFO [train.py:901] (1/4) Epoch 30, batch 4850, loss[loss=0.1723, simple_loss=0.2529, pruned_loss=0.04588, over 8090.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05625, over 1606998.71 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:06,723 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 04:00:36,498 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239298.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:00:40,436 INFO [train.py:901] (1/4) Epoch 30, batch 4900, loss[loss=0.2328, simple_loss=0.317, pruned_loss=0.07427, over 8367.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2799, pruned_loss=0.05633, over 1608380.38 frames. ], batch size: 24, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:53,055 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.412e+02 2.818e+02 3.519e+02 1.028e+03, threshold=5.635e+02, percent-clipped=4.0 +2023-02-09 04:01:04,924 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.5903, 4.5591, 4.1846, 2.0673, 4.0596, 4.1810, 4.1123, 4.1042], + device='cuda:1'), covar=tensor([0.0646, 0.0447, 0.0846, 0.4379, 0.0876, 0.0951, 0.1149, 0.0690], + device='cuda:1'), in_proj_covar=tensor([0.0556, 0.0465, 0.0454, 0.0568, 0.0450, 0.0479, 0.0453, 0.0418], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:01:15,933 INFO [train.py:901] (1/4) Epoch 30, batch 4950, loss[loss=0.2027, simple_loss=0.2932, pruned_loss=0.05613, over 8097.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05707, over 1609126.45 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:51,704 INFO [train.py:901] (1/4) Epoch 30, batch 5000, loss[loss=0.286, simple_loss=0.3484, pruned_loss=0.1118, over 8619.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05673, over 1612773.79 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:58,097 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239413.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:01:58,889 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239414.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:04,994 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.513e+02 3.095e+02 3.810e+02 1.179e+03, threshold=6.190e+02, percent-clipped=9.0 +2023-02-09 04:02:07,460 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6968, 2.6006, 1.9468, 2.4453, 2.2459, 1.7441, 2.2068, 2.2409], + device='cuda:1'), covar=tensor([0.1557, 0.0478, 0.1229, 0.0607, 0.0767, 0.1453, 0.1039, 0.1136], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0246, 0.0345, 0.0315, 0.0302, 0.0348, 0.0349, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:02:17,689 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239439.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:19,893 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([0.9928, 1.5780, 1.3673, 1.5247, 1.3087, 1.3073, 1.2992, 1.2690], + device='cuda:1'), covar=tensor([0.1282, 0.0568, 0.1534, 0.0630, 0.0822, 0.1645, 0.1005, 0.0928], + device='cuda:1'), in_proj_covar=tensor([0.0359, 0.0246, 0.0345, 0.0315, 0.0302, 0.0348, 0.0349, 0.0321], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:02:29,032 INFO [train.py:901] (1/4) Epoch 30, batch 5050, loss[loss=0.191, simple_loss=0.2717, pruned_loss=0.05519, over 8132.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05637, over 1612198.67 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:02:52,709 WARNING [train.py:1067] (1/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 04:03:05,838 INFO [train.py:901] (1/4) Epoch 30, batch 5100, loss[loss=0.2165, simple_loss=0.3071, pruned_loss=0.06296, over 8289.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05666, over 1615121.31 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:20,035 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.525e+02 3.230e+02 3.994e+02 1.175e+03, threshold=6.461e+02, percent-clipped=6.0 +2023-02-09 04:03:42,221 INFO [train.py:901] (1/4) Epoch 30, batch 5150, loss[loss=0.2026, simple_loss=0.2828, pruned_loss=0.06121, over 8009.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2821, pruned_loss=0.05733, over 1616016.40 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:53,438 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.8533, 1.8797, 2.1881, 1.9089, 1.1947, 1.8852, 2.5035, 2.2813], + device='cuda:1'), covar=tensor([0.0510, 0.1126, 0.1564, 0.1328, 0.0553, 0.1309, 0.0557, 0.0574], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:1') +2023-02-09 04:04:18,743 INFO [train.py:901] (1/4) Epoch 30, batch 5200, loss[loss=0.1866, simple_loss=0.2806, pruned_loss=0.04631, over 8099.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05658, over 1614908.57 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:31,919 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.360e+02 2.794e+02 3.430e+02 1.458e+03, threshold=5.587e+02, percent-clipped=2.0 +2023-02-09 04:04:44,695 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=239640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:04:54,717 INFO [train.py:901] (1/4) Epoch 30, batch 5250, loss[loss=0.1615, simple_loss=0.2378, pruned_loss=0.04267, over 7531.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2797, pruned_loss=0.05605, over 1610552.29 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:56,121 WARNING [train.py:1067] (1/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 04:05:05,209 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239669.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:05:23,273 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239694.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:05:29,842 INFO [train.py:901] (1/4) Epoch 30, batch 5300, loss[loss=0.1935, simple_loss=0.2897, pruned_loss=0.04865, over 8253.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2797, pruned_loss=0.05589, over 1612435.53 frames. ], batch size: 24, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:05:43,733 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.433e+02 2.937e+02 3.850e+02 7.663e+02, threshold=5.875e+02, percent-clipped=5.0 +2023-02-09 04:05:46,043 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9648, 2.1016, 1.8191, 2.6330, 1.1814, 1.6974, 1.9952, 2.0340], + device='cuda:1'), covar=tensor([0.0713, 0.0697, 0.0808, 0.0355, 0.1015, 0.1177, 0.0732, 0.0783], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0243, 0.0213, 0.0201, 0.0245, 0.0248, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:05:54,319 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6752, 1.9697, 2.8880, 1.5690, 2.2802, 2.0684, 1.7650, 2.3367], + device='cuda:1'), covar=tensor([0.1921, 0.2744, 0.1055, 0.4809, 0.1995, 0.3321, 0.2577, 0.2310], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0645, 0.0568, 0.0679, 0.0669, 0.0619, 0.0572, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:06:04,873 INFO [train.py:901] (1/4) Epoch 30, batch 5350, loss[loss=0.1639, simple_loss=0.2465, pruned_loss=0.0406, over 8088.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2807, pruned_loss=0.05631, over 1618284.89 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:22,341 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.2374, 2.2277, 1.7690, 1.9823, 1.7203, 1.6035, 1.6465, 1.6713], + device='cuda:1'), covar=tensor([0.1343, 0.0478, 0.1280, 0.0547, 0.0819, 0.1531, 0.0990, 0.0878], + device='cuda:1'), in_proj_covar=tensor([0.0358, 0.0245, 0.0345, 0.0315, 0.0301, 0.0348, 0.0349, 0.0320], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:06:40,969 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5914, 1.4081, 1.6787, 1.3143, 0.9400, 1.4597, 1.5584, 1.2371], + device='cuda:1'), covar=tensor([0.0605, 0.1279, 0.1740, 0.1508, 0.0585, 0.1485, 0.0697, 0.0736], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0163, 0.0102, 0.0165, 0.0113, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 04:06:41,534 INFO [train.py:901] (1/4) Epoch 30, batch 5400, loss[loss=0.2013, simple_loss=0.2916, pruned_loss=0.05545, over 8294.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05646, over 1615580.93 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:55,040 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.371e+02 2.881e+02 3.522e+02 8.420e+02, threshold=5.763e+02, percent-clipped=7.0 +2023-02-09 04:07:17,634 INFO [train.py:901] (1/4) Epoch 30, batch 5450, loss[loss=0.195, simple_loss=0.2792, pruned_loss=0.0554, over 8516.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2809, pruned_loss=0.05679, over 1616326.49 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:07:49,889 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 04:07:53,966 INFO [train.py:901] (1/4) Epoch 30, batch 5500, loss[loss=0.2186, simple_loss=0.3012, pruned_loss=0.06801, over 8333.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2798, pruned_loss=0.05599, over 1616828.57 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:08,775 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.378e+02 3.012e+02 4.037e+02 9.246e+02, threshold=6.023e+02, percent-clipped=5.0 +2023-02-09 04:08:30,327 INFO [train.py:901] (1/4) Epoch 30, batch 5550, loss[loss=0.2096, simple_loss=0.2917, pruned_loss=0.06369, over 8655.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05594, over 1618591.72 frames. ], batch size: 39, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:50,819 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:09:07,157 INFO [train.py:901] (1/4) Epoch 30, batch 5600, loss[loss=0.1941, simple_loss=0.2863, pruned_loss=0.05096, over 7801.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2796, pruned_loss=0.0561, over 1615123.27 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:15,197 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 04:09:21,041 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.479e+02 2.939e+02 3.472e+02 8.474e+02, threshold=5.878e+02, percent-clipped=2.0 +2023-02-09 04:09:21,976 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7661, 1.6381, 2.1826, 1.3567, 1.3280, 2.1067, 0.3791, 1.3646], + device='cuda:1'), covar=tensor([0.1221, 0.1125, 0.0320, 0.0826, 0.2114, 0.0411, 0.1620, 0.1106], + device='cuda:1'), in_proj_covar=tensor([0.0204, 0.0206, 0.0138, 0.0224, 0.0279, 0.0149, 0.0175, 0.0202], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 04:09:42,585 INFO [train.py:901] (1/4) Epoch 30, batch 5650, loss[loss=0.1891, simple_loss=0.2669, pruned_loss=0.05569, over 8483.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2794, pruned_loss=0.05625, over 1610459.82 frames. ], batch size: 28, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:44,731 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.7909, 5.9617, 5.2655, 2.4983, 5.2260, 5.6000, 5.4579, 5.4859], + device='cuda:1'), covar=tensor([0.0515, 0.0303, 0.0768, 0.4337, 0.0745, 0.0857, 0.0887, 0.0591], + device='cuda:1'), in_proj_covar=tensor([0.0558, 0.0466, 0.0456, 0.0572, 0.0453, 0.0480, 0.0456, 0.0419], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:09:59,646 WARNING [train.py:1067] (1/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 04:10:14,055 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240099.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:10:17,325 INFO [train.py:901] (1/4) Epoch 30, batch 5700, loss[loss=0.2492, simple_loss=0.3206, pruned_loss=0.08888, over 7025.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2803, pruned_loss=0.05648, over 1611066.84 frames. ], batch size: 72, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:10:20,763 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.4629, 1.7624, 1.4237, 2.8166, 1.1935, 1.3121, 2.1206, 1.8955], + device='cuda:1'), covar=tensor([0.1481, 0.1277, 0.1749, 0.0377, 0.1316, 0.1976, 0.0906, 0.0998], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0193, 0.0243, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:10:31,996 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 2.506e+02 3.162e+02 4.194e+02 1.225e+03, threshold=6.325e+02, percent-clipped=8.0 +2023-02-09 04:10:53,042 INFO [train.py:901] (1/4) Epoch 30, batch 5750, loss[loss=0.2127, simple_loss=0.2989, pruned_loss=0.06329, over 8512.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.28, pruned_loss=0.05676, over 1609822.28 frames. ], batch size: 28, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:04,196 WARNING [train.py:1067] (1/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 04:11:21,800 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.0628, 1.6432, 1.3762, 1.5671, 1.2983, 1.2633, 1.2583, 1.3631], + device='cuda:1'), covar=tensor([0.1221, 0.0563, 0.1541, 0.0636, 0.0929, 0.1724, 0.1127, 0.0889], + device='cuda:1'), in_proj_covar=tensor([0.0357, 0.0245, 0.0344, 0.0314, 0.0301, 0.0346, 0.0349, 0.0318], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:11:28,591 INFO [train.py:901] (1/4) Epoch 30, batch 5800, loss[loss=0.1809, simple_loss=0.2655, pruned_loss=0.04819, over 8519.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05712, over 1610945.73 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:35,160 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-09 04:11:39,938 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.8090, 2.0015, 1.6733, 2.5408, 1.1572, 1.4901, 1.9198, 1.9927], + device='cuda:1'), covar=tensor([0.0751, 0.0776, 0.0904, 0.0387, 0.1117, 0.1359, 0.0747, 0.0764], + device='cuda:1'), in_proj_covar=tensor([0.0230, 0.0194, 0.0244, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:11:42,452 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.400e+02 2.667e+02 3.487e+02 8.848e+02, threshold=5.334e+02, percent-clipped=2.0 +2023-02-09 04:12:04,252 INFO [train.py:901] (1/4) Epoch 30, batch 5850, loss[loss=0.1798, simple_loss=0.2611, pruned_loss=0.04926, over 7783.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05697, over 1614978.43 frames. ], batch size: 19, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:33,881 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5846, 1.4636, 1.8548, 1.2296, 1.3094, 1.7900, 0.2565, 1.2083], + device='cuda:1'), covar=tensor([0.1414, 0.1249, 0.0427, 0.0841, 0.2209, 0.0499, 0.1733, 0.1168], + device='cuda:1'), in_proj_covar=tensor([0.0205, 0.0208, 0.0139, 0.0225, 0.0281, 0.0150, 0.0176, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 04:12:39,826 INFO [train.py:901] (1/4) Epoch 30, batch 5900, loss[loss=0.1936, simple_loss=0.2778, pruned_loss=0.05471, over 8367.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05741, over 1613794.05 frames. ], batch size: 24, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:52,667 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-09 04:12:53,715 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.330e+02 2.970e+02 3.920e+02 1.059e+03, threshold=5.939e+02, percent-clipped=6.0 +2023-02-09 04:13:15,506 INFO [train.py:901] (1/4) Epoch 30, batch 5950, loss[loss=0.2099, simple_loss=0.2975, pruned_loss=0.06117, over 8452.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.282, pruned_loss=0.05674, over 1609992.19 frames. ], batch size: 27, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:16,423 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=240355.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:18,456 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240358.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:33,797 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=240380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:50,593 INFO [train.py:901] (1/4) Epoch 30, batch 6000, loss[loss=0.1835, simple_loss=0.2761, pruned_loss=0.04548, over 8332.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2809, pruned_loss=0.05587, over 1606292.35 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:50,593 INFO [train.py:926] (1/4) Computing validation loss +2023-02-09 04:14:04,296 INFO [train.py:935] (1/4) Epoch 30, validation: loss=0.1701, simple_loss=0.2695, pruned_loss=0.03536, over 944034.00 frames. +2023-02-09 04:14:04,297 INFO [train.py:936] (1/4) Maximum memory allocated so far is 6668MB +2023-02-09 04:14:17,955 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.377e+02 3.122e+02 3.554e+02 6.850e+02, threshold=6.243e+02, percent-clipped=2.0 +2023-02-09 04:14:39,925 INFO [train.py:901] (1/4) Epoch 30, batch 6050, loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05804, over 8029.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2801, pruned_loss=0.05562, over 1603057.01 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:04,207 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 04:15:16,411 INFO [train.py:901] (1/4) Epoch 30, batch 6100, loss[loss=0.2127, simple_loss=0.2985, pruned_loss=0.06347, over 8320.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2818, pruned_loss=0.05657, over 1606011.76 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:30,263 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 2.992e+02 3.767e+02 7.583e+02, threshold=5.983e+02, percent-clipped=4.0 +2023-02-09 04:15:40,564 WARNING [train.py:1067] (1/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 04:15:51,549 INFO [train.py:901] (1/4) Epoch 30, batch 6150, loss[loss=0.2158, simple_loss=0.3069, pruned_loss=0.06233, over 8456.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2811, pruned_loss=0.0563, over 1609904.64 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:28,465 INFO [train.py:901] (1/4) Epoch 30, batch 6200, loss[loss=0.2062, simple_loss=0.2931, pruned_loss=0.0597, over 8643.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05588, over 1611061.70 frames. ], batch size: 34, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:44,278 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.533e+02 2.968e+02 3.901e+02 6.917e+02, threshold=5.935e+02, percent-clipped=4.0 +2023-02-09 04:17:05,856 INFO [train.py:901] (1/4) Epoch 30, batch 6250, loss[loss=0.2362, simple_loss=0.2999, pruned_loss=0.0863, over 6461.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2792, pruned_loss=0.05564, over 1606996.63 frames. ], batch size: 71, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:34,725 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.9854, 1.6800, 2.2659, 1.6170, 1.1531, 1.9039, 2.4536, 2.4755], + device='cuda:1'), covar=tensor([0.0461, 0.1115, 0.1403, 0.1372, 0.0532, 0.1329, 0.0564, 0.0533], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0113, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 04:17:39,922 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=240702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:17:41,234 INFO [train.py:901] (1/4) Epoch 30, batch 6300, loss[loss=0.1883, simple_loss=0.2783, pruned_loss=0.04912, over 8290.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.28, pruned_loss=0.05538, over 1614230.57 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:54,925 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.569e+02 3.101e+02 4.376e+02 1.063e+03, threshold=6.203e+02, percent-clipped=9.0 +2023-02-09 04:18:17,086 INFO [train.py:901] (1/4) Epoch 30, batch 6350, loss[loss=0.1839, simple_loss=0.2701, pruned_loss=0.04887, over 8502.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2812, pruned_loss=0.0557, over 1621343.07 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:18:41,761 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-09 04:18:47,105 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3376, 2.5560, 2.8251, 1.9135, 3.1411, 1.8352, 1.6043, 2.3070], + device='cuda:1'), covar=tensor([0.0834, 0.0424, 0.0330, 0.0726, 0.0502, 0.0984, 0.0914, 0.0539], + device='cuda:1'), in_proj_covar=tensor([0.0486, 0.0420, 0.0379, 0.0470, 0.0405, 0.0562, 0.0411, 0.0450], + device='cuda:1'), out_proj_covar=tensor([1.2857e-04, 1.0813e-04, 9.8695e-05, 1.2268e-04, 1.0587e-04, 1.5621e-04, + 1.0949e-04, 1.1748e-04], device='cuda:1') +2023-02-09 04:18:53,320 INFO [train.py:901] (1/4) Epoch 30, batch 6400, loss[loss=0.1811, simple_loss=0.2755, pruned_loss=0.04337, over 8250.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2816, pruned_loss=0.05565, over 1621214.25 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:02,602 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240817.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:19:07,074 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.422e+02 2.800e+02 3.642e+02 5.918e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-09 04:19:28,686 INFO [train.py:901] (1/4) Epoch 30, batch 6450, loss[loss=0.1896, simple_loss=0.279, pruned_loss=0.05007, over 8469.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2817, pruned_loss=0.0559, over 1621213.71 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:33,454 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.2294, 2.5311, 2.7280, 1.6603, 2.9686, 1.8017, 1.5037, 2.1789], + device='cuda:1'), covar=tensor([0.0949, 0.0432, 0.0335, 0.0873, 0.0628, 0.0949, 0.1131, 0.0668], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0419, 0.0378, 0.0468, 0.0404, 0.0561, 0.0409, 0.0448], + device='cuda:1'), out_proj_covar=tensor([1.2815e-04, 1.0786e-04, 9.8505e-05, 1.2213e-04, 1.0561e-04, 1.5575e-04, + 1.0907e-04, 1.1711e-04], device='cuda:1') +2023-02-09 04:19:38,862 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0332, 1.5215, 3.3337, 1.5522, 2.4240, 3.6072, 3.7858, 3.1161], + device='cuda:1'), covar=tensor([0.1176, 0.1852, 0.0293, 0.1972, 0.0983, 0.0221, 0.0446, 0.0503], + device='cuda:1'), in_proj_covar=tensor([0.0309, 0.0329, 0.0295, 0.0328, 0.0329, 0.0283, 0.0450, 0.0308], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:1') +2023-02-09 04:19:42,469 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9092, 1.5420, 1.7613, 1.5055, 0.9926, 1.6105, 1.7769, 1.6862], + device='cuda:1'), covar=tensor([0.0569, 0.1239, 0.1637, 0.1434, 0.0611, 0.1404, 0.0675, 0.0639], + device='cuda:1'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0148], + device='cuda:1'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:1') +2023-02-09 04:20:03,783 INFO [train.py:901] (1/4) Epoch 30, batch 6500, loss[loss=0.1761, simple_loss=0.2696, pruned_loss=0.04124, over 8542.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2801, pruned_loss=0.0553, over 1616143.10 frames. ], batch size: 50, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:17,923 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.582e+02 3.161e+02 3.840e+02 1.025e+03, threshold=6.322e+02, percent-clipped=7.0 +2023-02-09 04:20:30,131 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3523, 2.6213, 2.7488, 1.8254, 3.0781, 1.9824, 1.5923, 2.3095], + device='cuda:1'), covar=tensor([0.0994, 0.0467, 0.0367, 0.0892, 0.0551, 0.1007, 0.1097, 0.0606], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0418, 0.0378, 0.0469, 0.0404, 0.0561, 0.0409, 0.0448], + device='cuda:1'), out_proj_covar=tensor([1.2810e-04, 1.0780e-04, 9.8317e-05, 1.2237e-04, 1.0558e-04, 1.5580e-04, + 1.0912e-04, 1.1716e-04], device='cuda:1') +2023-02-09 04:20:36,285 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240950.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:20:38,865 INFO [train.py:901] (1/4) Epoch 30, batch 6550, loss[loss=0.1709, simple_loss=0.2553, pruned_loss=0.04322, over 7667.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2802, pruned_loss=0.05557, over 1614778.64 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:00,572 WARNING [train.py:1067] (1/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 04:21:01,494 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7730, 2.0513, 2.0673, 1.2901, 2.2054, 1.6607, 0.5468, 1.9924], + device='cuda:1'), covar=tensor([0.0725, 0.0427, 0.0389, 0.0726, 0.0470, 0.0971, 0.1052, 0.0330], + device='cuda:1'), in_proj_covar=tensor([0.0485, 0.0419, 0.0377, 0.0469, 0.0405, 0.0561, 0.0409, 0.0449], + device='cuda:1'), out_proj_covar=tensor([1.2821e-04, 1.0787e-04, 9.8151e-05, 1.2251e-04, 1.0565e-04, 1.5593e-04, + 1.0907e-04, 1.1719e-04], device='cuda:1') +2023-02-09 04:21:13,967 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.8493, 5.9230, 5.1801, 2.4982, 5.2345, 5.6587, 5.3041, 5.3440], + device='cuda:1'), covar=tensor([0.0488, 0.0335, 0.0763, 0.4243, 0.0747, 0.0695, 0.1062, 0.0469], + device='cuda:1'), in_proj_covar=tensor([0.0560, 0.0467, 0.0460, 0.0572, 0.0454, 0.0482, 0.0457, 0.0420], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:21:15,890 INFO [train.py:901] (1/4) Epoch 30, batch 6600, loss[loss=0.216, simple_loss=0.2998, pruned_loss=0.06615, over 8260.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2806, pruned_loss=0.05571, over 1616927.53 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:16,033 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241004.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:21:20,061 WARNING [train.py:1067] (1/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 04:21:28,329 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-09 04:21:29,318 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.0216, 2.2276, 1.8338, 2.9153, 1.3656, 1.7722, 2.1484, 2.2212], + device='cuda:1'), covar=tensor([0.0730, 0.0811, 0.0876, 0.0314, 0.1170, 0.1234, 0.0850, 0.0791], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0213, 0.0202, 0.0246, 0.0248, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:21:29,837 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.233e+02 3.026e+02 3.930e+02 1.368e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-09 04:21:51,501 INFO [train.py:901] (1/4) Epoch 30, batch 6650, loss[loss=0.1609, simple_loss=0.2382, pruned_loss=0.04182, over 7445.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.28, pruned_loss=0.05507, over 1618041.48 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:04,818 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241073.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:23,560 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:27,625 INFO [train.py:901] (1/4) Epoch 30, batch 6700, loss[loss=0.1915, simple_loss=0.284, pruned_loss=0.04947, over 7967.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2792, pruned_loss=0.05508, over 1619433.33 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:42,111 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.246e+02 2.834e+02 3.422e+02 9.903e+02, threshold=5.667e+02, percent-clipped=4.0 +2023-02-09 04:23:04,036 INFO [train.py:901] (1/4) Epoch 30, batch 6750, loss[loss=0.2044, simple_loss=0.2941, pruned_loss=0.05734, over 8340.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2803, pruned_loss=0.05572, over 1625099.05 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:08,862 INFO [scaling.py:679] (1/4) Whitening: num_groups=1, num_channels=256, metric=4.47 vs. limit=5.0 +2023-02-09 04:23:39,180 INFO [train.py:901] (1/4) Epoch 30, batch 6800, loss[loss=0.1821, simple_loss=0.2664, pruned_loss=0.04891, over 6861.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2784, pruned_loss=0.05515, over 1615186.85 frames. ], batch size: 15, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:42,688 WARNING [train.py:1067] (1/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 04:23:53,781 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.178e+02 2.682e+02 3.526e+02 7.087e+02, threshold=5.364e+02, percent-clipped=2.0 +2023-02-09 04:24:00,918 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3955, 2.0931, 1.6495, 2.0411, 1.7790, 1.4440, 1.6849, 1.6756], + device='cuda:1'), covar=tensor([0.1271, 0.0512, 0.1297, 0.0537, 0.0789, 0.1668, 0.0991, 0.0910], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0246, 0.0347, 0.0316, 0.0304, 0.0350, 0.0350, 0.0322], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:24:15,452 INFO [train.py:901] (1/4) Epoch 30, batch 6850, loss[loss=0.2004, simple_loss=0.2776, pruned_loss=0.06165, over 8495.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2797, pruned_loss=0.05599, over 1613810.37 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:24:34,759 WARNING [train.py:1067] (1/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 04:24:43,872 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241294.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:24:50,575 INFO [train.py:901] (1/4) Epoch 30, batch 6900, loss[loss=0.18, simple_loss=0.2712, pruned_loss=0.04445, over 7976.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2801, pruned_loss=0.05646, over 1614191.55 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:05,717 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.527e+02 3.094e+02 3.969e+02 8.004e+02, threshold=6.188e+02, percent-clipped=9.0 +2023-02-09 04:25:22,104 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241348.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:25:26,913 INFO [train.py:901] (1/4) Epoch 30, batch 6950, loss[loss=0.1788, simple_loss=0.2768, pruned_loss=0.04045, over 8330.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2789, pruned_loss=0.05555, over 1612684.85 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:46,735 WARNING [train.py:1067] (1/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 04:26:04,048 INFO [train.py:901] (1/4) Epoch 30, batch 7000, loss[loss=0.2564, simple_loss=0.3211, pruned_loss=0.09586, over 6845.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.278, pruned_loss=0.05533, over 1604061.51 frames. ], batch size: 72, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:07,754 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241409.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:26:17,959 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.441e+02 2.932e+02 3.651e+02 7.920e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-09 04:26:18,113 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241424.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:26:40,283 INFO [train.py:901] (1/4) Epoch 30, batch 7050, loss[loss=0.2227, simple_loss=0.3037, pruned_loss=0.07085, over 8295.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2781, pruned_loss=0.055, over 1606279.86 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:46,794 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241463.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:27:16,741 INFO [train.py:901] (1/4) Epoch 30, batch 7100, loss[loss=0.1814, simple_loss=0.2796, pruned_loss=0.04159, over 8784.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2785, pruned_loss=0.05491, over 1609471.26 frames. ], batch size: 30, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:27:27,514 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6208, 2.0340, 3.1831, 1.4859, 2.4828, 2.0563, 1.6912, 2.5217], + device='cuda:1'), covar=tensor([0.2075, 0.2754, 0.0927, 0.4883, 0.1918, 0.3450, 0.2639, 0.2217], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0648, 0.0566, 0.0675, 0.0670, 0.0620, 0.0573, 0.0649], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:27:30,224 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6972, 1.8217, 1.6211, 2.2193, 1.0846, 1.4940, 1.7650, 1.8793], + device='cuda:1'), covar=tensor([0.0777, 0.0691, 0.0861, 0.0456, 0.1024, 0.1226, 0.0669, 0.0677], + device='cuda:1'), in_proj_covar=tensor([0.0231, 0.0193, 0.0244, 0.0213, 0.0202, 0.0245, 0.0247, 0.0203], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:27:30,723 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.381e+02 2.857e+02 3.660e+02 8.579e+02, threshold=5.714e+02, percent-clipped=3.0 +2023-02-09 04:27:51,597 INFO [train.py:901] (1/4) Epoch 30, batch 7150, loss[loss=0.1522, simple_loss=0.2436, pruned_loss=0.03041, over 8083.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2791, pruned_loss=0.05517, over 1612502.23 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:00,173 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.5210, 2.3042, 1.8527, 2.2553, 1.9975, 1.5986, 1.8769, 1.9007], + device='cuda:1'), covar=tensor([0.1395, 0.0465, 0.1208, 0.0567, 0.0795, 0.1584, 0.0997, 0.0966], + device='cuda:1'), in_proj_covar=tensor([0.0362, 0.0246, 0.0348, 0.0317, 0.0305, 0.0351, 0.0352, 0.0323], + device='cuda:1'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:1') +2023-02-09 04:28:28,702 INFO [train.py:901] (1/4) Epoch 30, batch 7200, loss[loss=0.2028, simple_loss=0.2896, pruned_loss=0.05803, over 7822.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2796, pruned_loss=0.0554, over 1614361.25 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:43,487 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.259e+02 2.765e+02 3.853e+02 1.030e+03, threshold=5.530e+02, percent-clipped=3.0 +2023-02-09 04:28:53,983 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241639.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:03,963 INFO [train.py:901] (1/4) Epoch 30, batch 7250, loss[loss=0.2041, simple_loss=0.2909, pruned_loss=0.0587, over 8080.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2795, pruned_loss=0.05546, over 1615135.23 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:11,970 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241665.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:29:26,162 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7270, 1.6586, 2.1353, 1.3172, 1.3340, 2.0943, 0.3057, 1.2471], + device='cuda:1'), covar=tensor([0.1417, 0.0976, 0.0356, 0.1026, 0.2079, 0.0410, 0.1596, 0.1145], + device='cuda:1'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0223, 0.0277, 0.0149, 0.0174, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 04:29:30,389 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241690.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:29:40,038 INFO [train.py:901] (1/4) Epoch 30, batch 7300, loss[loss=0.2376, simple_loss=0.324, pruned_loss=0.07564, over 8644.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2792, pruned_loss=0.05557, over 1611703.65 frames. ], batch size: 34, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:50,471 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241719.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:53,736 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.349e+02 2.997e+02 3.899e+02 6.597e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-09 04:30:08,885 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:15,572 INFO [train.py:901] (1/4) Epoch 30, batch 7350, loss[loss=0.2148, simple_loss=0.2954, pruned_loss=0.06708, over 8436.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2804, pruned_loss=0.05631, over 1614046.04 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:25,406 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241768.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:27,486 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([4.6287, 4.6607, 4.1177, 2.1871, 4.0695, 4.2207, 4.0801, 4.0761], + device='cuda:1'), covar=tensor([0.0632, 0.0456, 0.0971, 0.4289, 0.0932, 0.0886, 0.1165, 0.0748], + device='cuda:1'), in_proj_covar=tensor([0.0558, 0.0465, 0.0459, 0.0568, 0.0452, 0.0481, 0.0454, 0.0419], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:30:39,746 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 04:30:51,563 INFO [train.py:901] (1/4) Epoch 30, batch 7400, loss[loss=0.1798, simple_loss=0.2597, pruned_loss=0.04997, over 7545.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2802, pruned_loss=0.05607, over 1612180.17 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:59,776 WARNING [train.py:1067] (1/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 04:31:04,080 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241821.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:05,936 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.497e+02 3.037e+02 3.880e+02 5.984e+02, threshold=6.074e+02, percent-clipped=0.0 +2023-02-09 04:31:24,324 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241849.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:31:27,626 INFO [train.py:901] (1/4) Epoch 30, batch 7450, loss[loss=0.1816, simple_loss=0.2659, pruned_loss=0.04864, over 7557.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2802, pruned_loss=0.05608, over 1610487.30 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:31:40,058 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 04:31:40,202 WARNING [train.py:1067] (1/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 04:31:48,158 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:55,069 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.3951, 1.4843, 1.3942, 1.8365, 0.6602, 1.2781, 1.3194, 1.4871], + device='cuda:1'), covar=tensor([0.0927, 0.0758, 0.0973, 0.0489, 0.1183, 0.1424, 0.0802, 0.0741], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0195, 0.0245, 0.0215, 0.0203, 0.0248, 0.0250, 0.0204], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:32:02,789 INFO [train.py:901] (1/4) Epoch 30, batch 7500, loss[loss=0.1585, simple_loss=0.2343, pruned_loss=0.04136, over 7213.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.0564, over 1612789.71 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 16.0 +2023-02-09 04:32:18,961 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.491e+02 2.852e+02 3.531e+02 9.058e+02, threshold=5.704e+02, percent-clipped=2.0 +2023-02-09 04:32:39,984 INFO [train.py:901] (1/4) Epoch 30, batch 7550, loss[loss=0.209, simple_loss=0.2906, pruned_loss=0.06368, over 8526.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2803, pruned_loss=0.05576, over 1615080.01 frames. ], batch size: 28, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:01,505 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241983.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:33:17,213 INFO [train.py:901] (1/4) Epoch 30, batch 7600, loss[loss=0.198, simple_loss=0.2873, pruned_loss=0.05436, over 8569.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2803, pruned_loss=0.05604, over 1610770.37 frames. ], batch size: 39, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:32,887 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.414e+02 3.070e+02 3.745e+02 6.631e+02, threshold=6.140e+02, percent-clipped=3.0 +2023-02-09 04:33:45,095 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.7358, 2.1518, 3.2086, 1.5649, 2.5382, 2.2152, 1.8694, 2.5831], + device='cuda:1'), covar=tensor([0.1928, 0.2798, 0.0958, 0.4876, 0.1942, 0.3415, 0.2555, 0.2402], + device='cuda:1'), in_proj_covar=tensor([0.0545, 0.0649, 0.0566, 0.0678, 0.0670, 0.0619, 0.0573, 0.0648], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:33:54,462 INFO [train.py:901] (1/4) Epoch 30, batch 7650, loss[loss=0.219, simple_loss=0.2968, pruned_loss=0.07058, over 8604.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.05715, over 1613109.57 frames. ], batch size: 31, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:25,714 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:34:29,810 INFO [train.py:901] (1/4) Epoch 30, batch 7700, loss[loss=0.2164, simple_loss=0.305, pruned_loss=0.06391, over 8453.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05709, over 1615123.51 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:44,311 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.431e+02 3.028e+02 3.722e+02 6.918e+02, threshold=6.057e+02, percent-clipped=1.0 +2023-02-09 04:34:46,822 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9706, 1.9936, 2.0131, 1.7179, 2.0576, 1.7498, 1.3681, 1.9653], + device='cuda:1'), covar=tensor([0.0423, 0.0360, 0.0258, 0.0420, 0.0387, 0.0565, 0.0677, 0.0248], + device='cuda:1'), in_proj_covar=tensor([0.0484, 0.0420, 0.0377, 0.0469, 0.0406, 0.0562, 0.0409, 0.0448], + device='cuda:1'), out_proj_covar=tensor([1.2781e-04, 1.0832e-04, 9.8124e-05, 1.2240e-04, 1.0608e-04, 1.5618e-04, + 1.0897e-04, 1.1705e-04], device='cuda:1') +2023-02-09 04:34:54,344 WARNING [train.py:1067] (1/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 04:34:55,280 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:05,987 INFO [train.py:901] (1/4) Epoch 30, batch 7750, loss[loss=0.153, simple_loss=0.2353, pruned_loss=0.03536, over 7233.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2813, pruned_loss=0.05727, over 1613363.92 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:11,039 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.9729, 2.2865, 1.8917, 2.9406, 1.3246, 1.6330, 2.3040, 2.2071], + device='cuda:1'), covar=tensor([0.0844, 0.0766, 0.0859, 0.0366, 0.1186, 0.1343, 0.0784, 0.0781], + device='cuda:1'), in_proj_covar=tensor([0.0233, 0.0195, 0.0246, 0.0215, 0.0204, 0.0248, 0.0250, 0.0205], + device='cuda:1'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:1') +2023-02-09 04:35:11,679 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([5.5535, 5.6480, 4.9784, 2.5744, 4.9948, 5.2478, 5.2570, 5.1497], + device='cuda:1'), covar=tensor([0.0532, 0.0360, 0.0834, 0.4079, 0.0773, 0.0924, 0.0966, 0.0604], + device='cuda:1'), in_proj_covar=tensor([0.0557, 0.0463, 0.0456, 0.0567, 0.0451, 0.0480, 0.0453, 0.0418], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:1') +2023-02-09 04:35:13,189 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242164.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:13,776 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242165.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:18,598 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242171.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:34,909 INFO [zipformer.py:1185] (1/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242193.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:35:42,638 INFO [train.py:901] (1/4) Epoch 30, batch 7800, loss[loss=0.1666, simple_loss=0.2483, pruned_loss=0.04249, over 7806.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05727, over 1611061.37 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:57,874 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.396e+02 3.014e+02 3.960e+02 8.063e+02, threshold=6.029e+02, percent-clipped=4.0 +2023-02-09 04:36:11,178 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:18,002 INFO [train.py:901] (1/4) Epoch 30, batch 7850, loss[loss=0.2056, simple_loss=0.2894, pruned_loss=0.06095, over 8186.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05698, over 1614829.71 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:36,019 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:52,298 INFO [train.py:901] (1/4) Epoch 30, batch 7900, loss[loss=0.1835, simple_loss=0.2607, pruned_loss=0.05319, over 7647.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2801, pruned_loss=0.05635, over 1610977.19 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:54,993 INFO [zipformer.py:1185] (1/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242308.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:37:01,360 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 04:37:06,479 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.359e+02 2.894e+02 3.889e+02 1.272e+03, threshold=5.788e+02, percent-clipped=10.0 +2023-02-09 04:37:08,036 INFO [zipformer.py:1185] (1/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242327.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:11,720 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.94 vs. limit=2.0 +2023-02-09 04:37:14,231 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([2.3238, 2.1122, 2.6116, 2.2435, 2.6375, 2.4190, 2.1981, 1.5410], + device='cuda:1'), covar=tensor([0.6092, 0.5499, 0.2406, 0.4294, 0.2758, 0.3430, 0.2057, 0.5842], + device='cuda:1'), in_proj_covar=tensor([0.0974, 0.1040, 0.0857, 0.1017, 0.1038, 0.0951, 0.0783, 0.0864], + device='cuda:1'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:1') +2023-02-09 04:37:26,302 INFO [train.py:901] (1/4) Epoch 30, batch 7950, loss[loss=0.2019, simple_loss=0.2958, pruned_loss=0.05397, over 8318.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2807, pruned_loss=0.05708, over 1609311.64 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:37:26,517 INFO [zipformer.py:1185] (1/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242354.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:37,023 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.43 vs. limit=2.0 +2023-02-09 04:37:42,302 INFO [zipformer.py:2431] (1/4) attn_weights_entropy = tensor([1.6532, 1.5411, 2.1142, 1.3838, 1.2176, 2.0756, 0.3489, 1.3051], + device='cuda:1'), covar=tensor([0.1458, 0.1177, 0.0392, 0.0964, 0.2321, 0.0424, 0.1856, 0.1164], + device='cuda:1'), in_proj_covar=tensor([0.0204, 0.0207, 0.0138, 0.0224, 0.0278, 0.0149, 0.0175, 0.0200], + device='cuda:1'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:1') +2023-02-09 04:37:42,514 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 04:37:43,556 INFO [zipformer.py:1185] (1/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242379.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:38:00,602 INFO [train.py:901] (1/4) Epoch 30, batch 8000, loss[loss=0.2404, simple_loss=0.3118, pruned_loss=0.08451, over 8344.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2816, pruned_loss=0.05714, over 1613794.03 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:13,197 INFO [scaling.py:679] (1/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-09 04:38:14,883 INFO [optim.py:369] (1/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.478e+02 2.968e+02 3.707e+02 1.083e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-09 04:38:35,235 INFO [train.py:901] (1/4) Epoch 30, batch 8050, loss[loss=0.1985, simple_loss=0.2865, pruned_loss=0.05526, over 7938.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2806, pruned_loss=0.05733, over 1595420.49 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:58,134 INFO [train.py:1165] (1/4) Done! diff --git a/log/log-train-2023-02-08-23-42-53-2 b/log/log-train-2023-02-08-23-42-53-2 new file mode 100644 index 0000000000000000000000000000000000000000..ce3215f75dd804df101f57aaf2a9d969def195a2 --- /dev/null +++ b/log/log-train-2023-02-08-23-42-53-2 @@ -0,0 +1,2801 @@ +2023-02-08 23:42:53,783 INFO [train.py:973] (2/4) Training started +2023-02-08 23:42:53,784 INFO [train.py:983] (2/4) Device: cuda:2 +2023-02-08 23:42:53,850 INFO [train.py:992] (2/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r8n07', 'IP address': '10.1.8.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 28, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-08 23:42:53,850 INFO [train.py:994] (2/4) About to create model +2023-02-08 23:42:54,155 INFO [zipformer.py:402] (2/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-08 23:42:54,167 INFO [train.py:998] (2/4) Number of model parameters: 20697573 +2023-02-08 23:42:54,168 INFO [checkpoint.py:112] (2/4) Loading checkpoint from pruned_transducer_stateless7_streaming/exp/v1/epoch-27.pt +2023-02-08 23:43:03,631 INFO [train.py:1013] (2/4) Using DDP +2023-02-08 23:43:03,870 INFO [train.py:1030] (2/4) Loading optimizer state dict +2023-02-08 23:43:04,090 INFO [train.py:1038] (2/4) Loading scheduler state dict +2023-02-08 23:43:04,090 INFO [asr_datamodule.py:420] (2/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-08 23:43:04,266 INFO [asr_datamodule.py:224] (2/4) Enable MUSAN +2023-02-08 23:43:04,267 INFO [asr_datamodule.py:225] (2/4) About to get Musan cuts +2023-02-08 23:43:05,857 INFO [asr_datamodule.py:249] (2/4) Enable SpecAugment +2023-02-08 23:43:05,857 INFO [asr_datamodule.py:250] (2/4) Time warp factor: 80 +2023-02-08 23:43:05,857 INFO [asr_datamodule.py:260] (2/4) Num frame mask: 10 +2023-02-08 23:43:05,857 INFO [asr_datamodule.py:273] (2/4) About to create train dataset +2023-02-08 23:43:05,858 INFO [asr_datamodule.py:300] (2/4) Using DynamicBucketingSampler. +2023-02-08 23:43:05,878 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:08,031 INFO [asr_datamodule.py:316] (2/4) About to create train dataloader +2023-02-08 23:43:08,032 INFO [asr_datamodule.py:430] (2/4) About to get dev-clean cuts +2023-02-08 23:43:08,033 INFO [asr_datamodule.py:437] (2/4) About to get dev-other cuts +2023-02-08 23:43:08,034 INFO [asr_datamodule.py:347] (2/4) About to create dev dataset +2023-02-08 23:43:08,393 INFO [asr_datamodule.py:364] (2/4) About to create dev dataloader +2023-02-08 23:43:08,394 INFO [train.py:1122] (2/4) Loading grad scaler state dict +2023-02-08 23:43:20,316 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:25,772 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-08 23:43:26,065 INFO [train.py:901] (2/4) Epoch 28, batch 0, loss[loss=0.2635, simple_loss=0.3266, pruned_loss=0.1002, over 7822.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3266, pruned_loss=0.1002, over 7822.00 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:43:26,065 INFO [train.py:926] (2/4) Computing validation loss +2023-02-08 23:43:38,188 INFO [train.py:935] (2/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2712, pruned_loss=0.03579, over 944034.00 frames. +2023-02-08 23:43:38,189 INFO [train.py:936] (2/4) Maximum memory allocated so far is 5838MB +2023-02-08 23:43:48,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218250.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:43:59,673 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-08 23:43:59,745 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218260.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:44:16,006 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.41 vs. limit=2.0 +2023-02-08 23:44:20,162 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0747, 1.9676, 2.4129, 2.0784, 2.5189, 2.1905, 2.0254, 1.3193], + device='cuda:2'), covar=tensor([0.5907, 0.5106, 0.2178, 0.3912, 0.2506, 0.3281, 0.2050, 0.5704], + device='cuda:2'), in_proj_covar=tensor([0.0955, 0.1015, 0.0823, 0.0986, 0.1019, 0.0922, 0.0763, 0.0846], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-08 23:44:24,442 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.44 vs. limit=5.0 +2023-02-08 23:44:26,829 INFO [train.py:901] (2/4) Epoch 28, batch 50, loss[loss=0.1997, simple_loss=0.2883, pruned_loss=0.05557, over 8463.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2869, pruned_loss=0.06145, over 365673.98 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:44:44,696 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-08 23:44:48,222 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.503e+02 3.099e+02 3.838e+02 3.677e+03, threshold=6.198e+02, percent-clipped=7.0 +2023-02-08 23:44:58,760 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8713, 1.5754, 3.1736, 1.4389, 2.3785, 3.3934, 3.5531, 2.9075], + device='cuda:2'), covar=tensor([0.1193, 0.1724, 0.0332, 0.2177, 0.0963, 0.0262, 0.0610, 0.0592], + device='cuda:2'), in_proj_covar=tensor([0.0306, 0.0324, 0.0291, 0.0320, 0.0322, 0.0277, 0.0438, 0.0306], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-08 23:45:09,774 INFO [train.py:901] (2/4) Epoch 28, batch 100, loss[loss=0.1887, simple_loss=0.2729, pruned_loss=0.05221, over 8240.00 frames. ], tot_loss[loss=0.2018, simple_loss=0.2852, pruned_loss=0.05914, over 646175.08 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:45:12,261 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-08 23:45:23,499 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.87 vs. limit=5.0 +2023-02-08 23:45:42,216 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218375.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:45:52,960 INFO [train.py:901] (2/4) Epoch 28, batch 150, loss[loss=0.2193, simple_loss=0.3072, pruned_loss=0.06568, over 8290.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2849, pruned_loss=0.05852, over 860056.92 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:01,141 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218397.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:02,731 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.74 vs. limit=2.0 +2023-02-08 23:46:12,820 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.274e+02 2.796e+02 3.416e+02 5.816e+02, threshold=5.591e+02, percent-clipped=0.0 +2023-02-08 23:46:19,735 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218422.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:21,826 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4735, 1.3436, 1.7566, 1.1345, 1.1462, 1.7312, 0.3269, 1.1473], + device='cuda:2'), covar=tensor([0.1602, 0.1214, 0.0421, 0.0954, 0.2356, 0.0507, 0.1753, 0.1308], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0206, 0.0136, 0.0224, 0.0277, 0.0147, 0.0172, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-08 23:46:32,308 INFO [train.py:901] (2/4) Epoch 28, batch 200, loss[loss=0.1943, simple_loss=0.28, pruned_loss=0.05427, over 8241.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2842, pruned_loss=0.0582, over 1030304.90 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:50,628 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218462.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:10,732 INFO [train.py:901] (2/4) Epoch 28, batch 250, loss[loss=0.1715, simple_loss=0.2499, pruned_loss=0.0465, over 8244.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2839, pruned_loss=0.05824, over 1160043.40 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:23,077 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-08 23:47:30,508 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-08 23:47:31,295 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.405e+02 2.917e+02 3.543e+02 7.929e+02, threshold=5.833e+02, percent-clipped=6.0 +2023-02-08 23:47:33,443 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-08 23:47:41,362 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218527.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:48,879 INFO [train.py:901] (2/4) Epoch 28, batch 300, loss[loss=0.1924, simple_loss=0.2804, pruned_loss=0.05223, over 8143.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2836, pruned_loss=0.05814, over 1261330.38 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:53,405 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218544.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:11,947 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-08 23:48:14,321 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218572.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:18,151 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218577.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:18,211 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6711, 2.2286, 4.1426, 1.6249, 3.1813, 2.3419, 1.6833, 3.3090], + device='cuda:2'), covar=tensor([0.2203, 0.2813, 0.0829, 0.4877, 0.1785, 0.3288, 0.2772, 0.2095], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0636, 0.0565, 0.0671, 0.0658, 0.0614, 0.0567, 0.0645], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-08 23:48:25,696 INFO [train.py:901] (2/4) Epoch 28, batch 350, loss[loss=0.1993, simple_loss=0.2818, pruned_loss=0.05839, over 7820.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2838, pruned_loss=0.05868, over 1338721.29 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:48:28,663 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218592.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:32,352 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8331, 1.4151, 1.7126, 1.3324, 1.0089, 1.4982, 1.6372, 1.3791], + device='cuda:2'), covar=tensor([0.0557, 0.1310, 0.1667, 0.1518, 0.0580, 0.1480, 0.0712, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-08 23:48:43,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.330e+02 2.853e+02 3.797e+02 9.826e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-08 23:48:44,821 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3592, 2.1468, 2.7650, 2.3662, 2.8273, 2.4041, 2.2432, 1.6073], + device='cuda:2'), covar=tensor([0.5949, 0.5276, 0.2069, 0.3875, 0.2510, 0.3152, 0.1865, 0.5629], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1019, 0.0824, 0.0987, 0.1020, 0.0924, 0.0764, 0.0847], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-08 23:48:59,282 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218631.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:49:02,080 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.0691, 4.0027, 3.6991, 2.2400, 3.5732, 3.6662, 3.6190, 3.5035], + device='cuda:2'), covar=tensor([0.0781, 0.0635, 0.1047, 0.3996, 0.0935, 0.1122, 0.1370, 0.0883], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0461, 0.0446, 0.0556, 0.0444, 0.0463, 0.0439, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-08 23:49:04,807 INFO [train.py:901] (2/4) Epoch 28, batch 400, loss[loss=0.1504, simple_loss=0.2365, pruned_loss=0.03217, over 8252.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2836, pruned_loss=0.05908, over 1402197.13 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:09,174 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4502, 2.3768, 3.1229, 2.5834, 3.0777, 2.5130, 2.3477, 1.8429], + device='cuda:2'), covar=tensor([0.6009, 0.5255, 0.2239, 0.4061, 0.2771, 0.3260, 0.2060, 0.5945], + device='cuda:2'), in_proj_covar=tensor([0.0961, 0.1018, 0.0824, 0.0987, 0.1019, 0.0925, 0.0764, 0.0846], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-08 23:49:17,852 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218656.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:49:19,967 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218659.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:40,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218687.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:41,121 INFO [train.py:901] (2/4) Epoch 28, batch 450, loss[loss=0.1549, simple_loss=0.2413, pruned_loss=0.03426, over 7796.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2827, pruned_loss=0.05885, over 1448765.14 frames. ], batch size: 19, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:59,789 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.362e+02 2.836e+02 3.643e+02 9.062e+02, threshold=5.672e+02, percent-clipped=2.0 +2023-02-08 23:50:18,542 INFO [train.py:901] (2/4) Epoch 28, batch 500, loss[loss=0.1613, simple_loss=0.249, pruned_loss=0.03679, over 7539.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2813, pruned_loss=0.05789, over 1485993.51 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:50:37,341 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1509, 4.0630, 3.6793, 2.1175, 3.5538, 3.7770, 3.6208, 3.6920], + device='cuda:2'), covar=tensor([0.0745, 0.0578, 0.1010, 0.4287, 0.0944, 0.0947, 0.1337, 0.0697], + device='cuda:2'), in_proj_covar=tensor([0.0542, 0.0462, 0.0446, 0.0557, 0.0445, 0.0465, 0.0440, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-08 23:50:57,135 INFO [train.py:901] (2/4) Epoch 28, batch 550, loss[loss=0.2036, simple_loss=0.2755, pruned_loss=0.0658, over 8074.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2827, pruned_loss=0.05879, over 1517059.57 frames. ], batch size: 21, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:16,043 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.392e+02 2.925e+02 3.560e+02 1.211e+03, threshold=5.850e+02, percent-clipped=4.0 +2023-02-08 23:51:30,169 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218833.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:33,478 INFO [train.py:901] (2/4) Epoch 28, batch 600, loss[loss=0.1787, simple_loss=0.2478, pruned_loss=0.0548, over 7539.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2816, pruned_loss=0.05821, over 1533321.52 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:53,195 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218858.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:56,594 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-08 23:51:59,013 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8156, 1.8353, 1.7045, 2.3518, 1.1245, 1.6052, 1.8483, 1.8676], + device='cuda:2'), covar=tensor([0.0747, 0.0802, 0.0842, 0.0371, 0.0984, 0.1199, 0.0654, 0.0704], + device='cuda:2'), in_proj_covar=tensor([0.0229, 0.0193, 0.0242, 0.0211, 0.0201, 0.0244, 0.0247, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-08 23:52:04,161 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218871.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:18,548 INFO [train.py:901] (2/4) Epoch 28, batch 650, loss[loss=0.1541, simple_loss=0.2461, pruned_loss=0.0311, over 7702.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2796, pruned_loss=0.05718, over 1550122.00 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:40,031 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.221e+02 2.637e+02 3.403e+02 7.509e+02, threshold=5.274e+02, percent-clipped=1.0 +2023-02-08 23:52:41,075 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218915.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:55,980 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218936.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:57,376 INFO [train.py:901] (2/4) Epoch 28, batch 700, loss[loss=0.2091, simple_loss=0.2992, pruned_loss=0.05949, over 8504.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2816, pruned_loss=0.05783, over 1568759.29 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:59,039 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218940.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:01,202 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218943.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:18,864 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218968.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:31,058 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218983.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:33,198 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218986.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:34,532 INFO [train.py:901] (2/4) Epoch 28, batch 750, loss[loss=0.2096, simple_loss=0.2896, pruned_loss=0.06475, over 8255.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2821, pruned_loss=0.05839, over 1578459.95 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:53:46,822 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219002.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:55,127 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.280e+02 2.810e+02 3.388e+02 7.203e+02, threshold=5.620e+02, percent-clipped=6.0 +2023-02-08 23:53:55,167 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-08 23:54:04,603 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-08 23:54:12,581 INFO [train.py:901] (2/4) Epoch 28, batch 800, loss[loss=0.2238, simple_loss=0.3091, pruned_loss=0.06923, over 8103.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2816, pruned_loss=0.05844, over 1583476.12 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:13,441 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219039.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:22,035 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219051.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:49,147 INFO [train.py:901] (2/4) Epoch 28, batch 850, loss[loss=0.191, simple_loss=0.2696, pruned_loss=0.05617, over 7926.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2815, pruned_loss=0.0579, over 1588019.05 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:50,777 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219090.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:55:10,262 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.432e+02 3.183e+02 3.929e+02 8.024e+02, threshold=6.365e+02, percent-clipped=6.0 +2023-02-08 23:55:27,563 INFO [train.py:901] (2/4) Epoch 28, batch 900, loss[loss=0.192, simple_loss=0.2689, pruned_loss=0.05757, over 7421.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2813, pruned_loss=0.05792, over 1596406.60 frames. ], batch size: 17, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:55:34,942 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.63 vs. limit=5.0 +2023-02-08 23:56:03,865 INFO [train.py:901] (2/4) Epoch 28, batch 950, loss[loss=0.179, simple_loss=0.2566, pruned_loss=0.05066, over 7713.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2814, pruned_loss=0.05745, over 1601908.35 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:22,890 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.524e+02 3.053e+02 4.249e+02 9.516e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-08 23:56:29,706 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219221.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:56:34,858 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-08 23:56:43,600 INFO [train.py:901] (2/4) Epoch 28, batch 1000, loss[loss=0.1658, simple_loss=0.2452, pruned_loss=0.04319, over 7980.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2799, pruned_loss=0.05662, over 1603380.68 frames. ], batch size: 21, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:46,627 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219242.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:56:47,401 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2993, 2.2642, 3.0140, 2.3807, 2.9722, 2.4137, 2.2323, 1.8164], + device='cuda:2'), covar=tensor([0.6292, 0.5632, 0.2271, 0.4523, 0.2941, 0.3513, 0.2141, 0.6113], + device='cuda:2'), in_proj_covar=tensor([0.0967, 0.1026, 0.0832, 0.0994, 0.1027, 0.0932, 0.0774, 0.0852], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-08 23:57:04,456 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219267.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:11,558 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-08 23:57:19,441 INFO [train.py:901] (2/4) Epoch 28, batch 1050, loss[loss=0.1824, simple_loss=0.2564, pruned_loss=0.05425, over 7530.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2798, pruned_loss=0.05659, over 1602463.54 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:57:23,705 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-08 23:57:33,329 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219307.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:38,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.456e+02 2.957e+02 3.788e+02 8.190e+02, threshold=5.915e+02, percent-clipped=1.0 +2023-02-08 23:57:47,817 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219327.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,096 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219332.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:56,800 INFO [train.py:901] (2/4) Epoch 28, batch 1100, loss[loss=0.2268, simple_loss=0.311, pruned_loss=0.07135, over 8508.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2805, pruned_loss=0.05696, over 1601832.43 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:03,512 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219346.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:12,850 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7590, 1.9769, 5.8661, 2.1061, 5.3045, 4.8292, 5.4118, 5.2699], + device='cuda:2'), covar=tensor([0.0479, 0.4567, 0.0494, 0.4497, 0.1065, 0.0979, 0.0520, 0.0535], + device='cuda:2'), in_proj_covar=tensor([0.0679, 0.0662, 0.0731, 0.0657, 0.0744, 0.0635, 0.0639, 0.0714], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-08 23:58:13,593 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219360.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:58:30,138 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219383.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:33,477 INFO [train.py:901] (2/4) Epoch 28, batch 1150, loss[loss=0.2489, simple_loss=0.3118, pruned_loss=0.09298, over 7251.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2815, pruned_loss=0.05729, over 1597636.90 frames. ], batch size: 71, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:37,161 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-08 23:58:52,533 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.386e+02 3.071e+02 3.782e+02 1.293e+03, threshold=6.141e+02, percent-clipped=2.0 +2023-02-08 23:59:02,425 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.17 vs. limit=5.0 +2023-02-08 23:59:07,140 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219434.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:10,002 INFO [train.py:901] (2/4) Epoch 28, batch 1200, loss[loss=0.1793, simple_loss=0.2664, pruned_loss=0.04609, over 8027.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.282, pruned_loss=0.0573, over 1603418.65 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:13,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219442.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:27,991 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219461.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:30,760 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5892, 2.8791, 2.4310, 3.9538, 1.8408, 2.2177, 2.7122, 2.7695], + device='cuda:2'), covar=tensor([0.0687, 0.0821, 0.0748, 0.0308, 0.1070, 0.1209, 0.0893, 0.0854], + device='cuda:2'), in_proj_covar=tensor([0.0227, 0.0192, 0.0241, 0.0209, 0.0200, 0.0243, 0.0246, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-08 23:59:47,602 INFO [train.py:901] (2/4) Epoch 28, batch 1250, loss[loss=0.1804, simple_loss=0.2727, pruned_loss=0.04406, over 7811.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05698, over 1607890.12 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:55,045 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:02,360 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219508.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:06,605 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.357e+02 2.809e+02 3.466e+02 7.121e+02, threshold=5.618e+02, percent-clipped=3.0 +2023-02-09 00:00:20,247 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3929, 2.6987, 2.9361, 1.6697, 3.2752, 2.1245, 1.6339, 2.3047], + device='cuda:2'), covar=tensor([0.0926, 0.0441, 0.0336, 0.0925, 0.0543, 0.0920, 0.1050, 0.0646], + device='cuda:2'), in_proj_covar=tensor([0.0475, 0.0412, 0.0366, 0.0461, 0.0395, 0.0552, 0.0403, 0.0444], + device='cuda:2'), out_proj_covar=tensor([1.2574e-04, 1.0706e-04, 9.5227e-05, 1.2065e-04, 1.0346e-04, 1.5374e-04, + 1.0753e-04, 1.1646e-04], device='cuda:2') +2023-02-09 00:00:23,408 INFO [train.py:901] (2/4) Epoch 28, batch 1300, loss[loss=0.2095, simple_loss=0.3053, pruned_loss=0.05685, over 8468.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05736, over 1606368.50 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-09 00:00:31,528 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:42,938 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219565.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:01:02,337 INFO [train.py:901] (2/4) Epoch 28, batch 1350, loss[loss=0.1887, simple_loss=0.2634, pruned_loss=0.05705, over 7940.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05703, over 1606961.30 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:01:22,006 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.365e+02 2.856e+02 3.377e+02 7.819e+02, threshold=5.713e+02, percent-clipped=4.0 +2023-02-09 00:01:39,672 INFO [train.py:901] (2/4) Epoch 28, batch 1400, loss[loss=0.2064, simple_loss=0.2924, pruned_loss=0.06023, over 8465.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05667, over 1610024.88 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:01:47,197 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7358, 4.7313, 4.2370, 2.3327, 4.1866, 4.2991, 4.2153, 4.1477], + device='cuda:2'), covar=tensor([0.0638, 0.0471, 0.0951, 0.3865, 0.0900, 0.1059, 0.1130, 0.0696], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0459, 0.0447, 0.0555, 0.0442, 0.0464, 0.0438, 0.0405], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:02:09,800 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219680.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:15,332 INFO [train.py:901] (2/4) Epoch 28, batch 1450, loss[loss=0.2335, simple_loss=0.3115, pruned_loss=0.07779, over 7210.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2808, pruned_loss=0.05658, over 1613804.92 frames. ], batch size: 71, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:22,723 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2437, 2.5251, 2.1514, 3.7971, 1.6476, 1.8709, 2.4330, 2.6917], + device='cuda:2'), covar=tensor([0.0831, 0.0860, 0.1024, 0.0284, 0.1092, 0.1387, 0.0891, 0.0831], + device='cuda:2'), in_proj_covar=tensor([0.0226, 0.0191, 0.0240, 0.0208, 0.0199, 0.0242, 0.0245, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 00:02:24,272 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:25,389 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 00:02:28,351 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219704.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:02:33,734 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.52 vs. limit=5.0 +2023-02-09 00:02:36,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.298e+02 2.874e+02 3.536e+02 7.746e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-09 00:02:39,220 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:43,507 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:54,214 INFO [train.py:901] (2/4) Epoch 28, batch 1500, loss[loss=0.1848, simple_loss=0.2562, pruned_loss=0.05666, over 7644.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05673, over 1615298.17 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:55,522 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 00:02:57,282 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:00,791 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 00:03:05,714 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219754.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:23,845 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219779.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:25,277 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219781.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:29,883 INFO [train.py:901] (2/4) Epoch 28, batch 1550, loss[loss=0.1906, simple_loss=0.2643, pruned_loss=0.05843, over 7247.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2824, pruned_loss=0.05755, over 1615400.98 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:03:42,604 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:49,436 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.435e+02 2.945e+02 3.827e+02 6.900e+02, threshold=5.889e+02, percent-clipped=4.0 +2023-02-09 00:03:54,630 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219819.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:04:03,146 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:08,593 INFO [train.py:901] (2/4) Epoch 28, batch 1600, loss[loss=0.1835, simple_loss=0.2727, pruned_loss=0.04719, over 8229.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05694, over 1617169.30 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:04:18,710 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219852.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:35,327 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:44,470 INFO [train.py:901] (2/4) Epoch 28, batch 1650, loss[loss=0.2327, simple_loss=0.2998, pruned_loss=0.08279, over 7214.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05713, over 1612516.57 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:02,696 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.482e+02 2.898e+02 3.443e+02 5.647e+02, threshold=5.797e+02, percent-clipped=0.0 +2023-02-09 00:05:06,525 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0622, 1.8213, 2.3493, 2.0323, 2.3349, 2.1793, 1.9544, 1.1932], + device='cuda:2'), covar=tensor([0.6152, 0.5302, 0.2051, 0.3865, 0.2601, 0.3317, 0.2063, 0.5637], + device='cuda:2'), in_proj_covar=tensor([0.0967, 0.1026, 0.0832, 0.0998, 0.1026, 0.0933, 0.0774, 0.0853], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:05:09,983 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3391, 2.1298, 2.6953, 2.3028, 2.6782, 2.3553, 2.2362, 1.7915], + device='cuda:2'), covar=tensor([0.5124, 0.4606, 0.2068, 0.3576, 0.2357, 0.3132, 0.1773, 0.4812], + device='cuda:2'), in_proj_covar=tensor([0.0968, 0.1026, 0.0832, 0.0998, 0.1026, 0.0933, 0.0774, 0.0853], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:05:20,265 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219936.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:21,472 INFO [train.py:901] (2/4) Epoch 28, batch 1700, loss[loss=0.2099, simple_loss=0.2784, pruned_loss=0.07072, over 8243.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05689, over 1615003.14 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:39,122 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219961.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:43,291 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219967.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:56,599 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3101, 2.1409, 1.7440, 1.9853, 1.7187, 1.5918, 1.7240, 1.7516], + device='cuda:2'), covar=tensor([0.1289, 0.0484, 0.1278, 0.0526, 0.0833, 0.1522, 0.0960, 0.0911], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0245, 0.0344, 0.0316, 0.0307, 0.0350, 0.0351, 0.0326], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:05:57,778 INFO [train.py:901] (2/4) Epoch 28, batch 1750, loss[loss=0.2009, simple_loss=0.2932, pruned_loss=0.05428, over 8468.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05652, over 1616327.69 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:06:17,596 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.339e+02 2.848e+02 3.606e+02 1.047e+03, threshold=5.695e+02, percent-clipped=4.0 +2023-02-09 00:06:34,451 INFO [train.py:901] (2/4) Epoch 28, batch 1800, loss[loss=0.1867, simple_loss=0.2715, pruned_loss=0.0509, over 7687.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2809, pruned_loss=0.05696, over 1617416.00 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:06:39,240 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1652, 1.4412, 4.3716, 1.6057, 3.9261, 3.6999, 3.9901, 3.8659], + device='cuda:2'), covar=tensor([0.0719, 0.4677, 0.0574, 0.4393, 0.1140, 0.0958, 0.0635, 0.0736], + device='cuda:2'), in_proj_covar=tensor([0.0681, 0.0668, 0.0738, 0.0661, 0.0750, 0.0638, 0.0643, 0.0720], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:07:02,982 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220075.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:07:11,958 INFO [train.py:901] (2/4) Epoch 28, batch 1850, loss[loss=0.2384, simple_loss=0.3099, pruned_loss=0.08342, over 6970.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05747, over 1615844.59 frames. ], batch size: 71, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:20,524 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220100.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:07:23,911 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220105.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:30,313 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.328e+02 2.682e+02 3.608e+02 8.535e+02, threshold=5.364e+02, percent-clipped=7.0 +2023-02-09 00:07:31,761 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:38,070 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220125.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:47,183 INFO [train.py:901] (2/4) Epoch 28, batch 1900, loss[loss=0.189, simple_loss=0.2788, pruned_loss=0.04966, over 8188.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2817, pruned_loss=0.05709, over 1621159.52 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:19,370 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 00:08:25,770 INFO [train.py:901] (2/4) Epoch 28, batch 1950, loss[loss=0.1864, simple_loss=0.2684, pruned_loss=0.05221, over 7977.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05798, over 1618438.41 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:32,997 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 00:08:44,725 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.461e+02 2.916e+02 3.869e+02 7.609e+02, threshold=5.833e+02, percent-clipped=8.0 +2023-02-09 00:08:48,265 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220219.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:51,058 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:53,663 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 00:09:01,379 INFO [train.py:901] (2/4) Epoch 28, batch 2000, loss[loss=0.1948, simple_loss=0.2643, pruned_loss=0.06261, over 7242.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05749, over 1620265.95 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:02,181 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4653, 1.3539, 2.6893, 1.2221, 2.1691, 2.9162, 3.1884, 2.1610], + device='cuda:2'), covar=tensor([0.1739, 0.2110, 0.0570, 0.2864, 0.1118, 0.0459, 0.0763, 0.1122], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0327, 0.0293, 0.0320, 0.0322, 0.0277, 0.0441, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 00:09:02,868 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:07,807 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220247.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:08,577 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220248.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:29,131 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:37,451 INFO [train.py:901] (2/4) Epoch 28, batch 2050, loss[loss=0.1849, simple_loss=0.2854, pruned_loss=0.0422, over 8244.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05719, over 1620625.74 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:58,200 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.398e+02 2.757e+02 3.324e+02 6.340e+02, threshold=5.514e+02, percent-clipped=2.0 +2023-02-09 00:10:12,783 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220334.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:10:15,428 INFO [train.py:901] (2/4) Epoch 28, batch 2100, loss[loss=0.1959, simple_loss=0.2835, pruned_loss=0.05419, over 8191.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.0579, over 1624215.84 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:10:51,250 INFO [train.py:901] (2/4) Epoch 28, batch 2150, loss[loss=0.2212, simple_loss=0.2965, pruned_loss=0.07293, over 8140.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05839, over 1621612.89 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:11,476 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.504e+02 2.973e+02 4.041e+02 1.001e+03, threshold=5.945e+02, percent-clipped=8.0 +2023-02-09 00:11:28,321 INFO [train.py:901] (2/4) Epoch 28, batch 2200, loss[loss=0.2285, simple_loss=0.3105, pruned_loss=0.07328, over 8080.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05819, over 1618039.85 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:36,282 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220449.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:11:44,020 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220460.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:11:48,267 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6055, 1.8589, 1.9747, 1.3070, 2.0832, 1.4573, 0.5349, 1.9078], + device='cuda:2'), covar=tensor([0.0636, 0.0412, 0.0334, 0.0692, 0.0447, 0.0994, 0.1020, 0.0330], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0415, 0.0366, 0.0464, 0.0399, 0.0554, 0.0404, 0.0446], + device='cuda:2'), out_proj_covar=tensor([1.2618e-04, 1.0759e-04, 9.5326e-05, 1.2143e-04, 1.0427e-04, 1.5443e-04, + 1.0803e-04, 1.1695e-04], device='cuda:2') +2023-02-09 00:11:51,008 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6878, 1.8809, 1.9840, 1.4286, 2.1242, 1.5175, 0.7317, 1.9959], + device='cuda:2'), covar=tensor([0.0715, 0.0447, 0.0344, 0.0715, 0.0405, 0.0963, 0.1106, 0.0359], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0415, 0.0366, 0.0465, 0.0399, 0.0554, 0.0405, 0.0447], + device='cuda:2'), out_proj_covar=tensor([1.2627e-04, 1.0766e-04, 9.5398e-05, 1.2151e-04, 1.0434e-04, 1.5453e-04, + 1.0809e-04, 1.1701e-04], device='cuda:2') +2023-02-09 00:12:03,395 INFO [train.py:901] (2/4) Epoch 28, batch 2250, loss[loss=0.2269, simple_loss=0.3063, pruned_loss=0.07376, over 7827.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.283, pruned_loss=0.05787, over 1618483.61 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:09,250 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220496.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:22,281 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.331e+02 2.835e+02 3.325e+02 7.200e+02, threshold=5.671e+02, percent-clipped=3.0 +2023-02-09 00:12:24,961 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-09 00:12:27,561 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220521.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:41,520 INFO [train.py:901] (2/4) Epoch 28, batch 2300, loss[loss=0.1923, simple_loss=0.2896, pruned_loss=0.04754, over 8451.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2824, pruned_loss=0.05757, over 1617834.21 frames. ], batch size: 29, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:43,141 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-09 00:12:59,726 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:06,020 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220573.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:07,462 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220575.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:13,909 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7953, 1.6573, 1.8571, 1.6862, 1.0713, 1.6150, 2.3974, 2.0934], + device='cuda:2'), covar=tensor([0.0451, 0.1257, 0.1731, 0.1426, 0.0612, 0.1462, 0.0595, 0.0595], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0153, 0.0189, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 00:13:16,606 INFO [train.py:901] (2/4) Epoch 28, batch 2350, loss[loss=0.2506, simple_loss=0.3298, pruned_loss=0.08565, over 7243.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2837, pruned_loss=0.05856, over 1612780.69 frames. ], batch size: 71, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:13:18,226 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220590.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:18,828 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220591.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:35,684 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.370e+02 2.329e+02 2.956e+02 3.826e+02 8.837e+02, threshold=5.912e+02, percent-clipped=4.0 +2023-02-09 00:13:36,666 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:40,247 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:53,607 INFO [train.py:901] (2/4) Epoch 28, batch 2400, loss[loss=0.1821, simple_loss=0.267, pruned_loss=0.04865, over 8087.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2828, pruned_loss=0.05796, over 1613259.73 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:01,101 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.32 vs. limit=2.0 +2023-02-09 00:14:16,686 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220669.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:29,715 INFO [train.py:901] (2/4) Epoch 28, batch 2450, loss[loss=0.1773, simple_loss=0.2562, pruned_loss=0.04923, over 7420.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2819, pruned_loss=0.05767, over 1612186.10 frames. ], batch size: 17, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:42,731 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220706.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:48,791 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.507e+02 3.309e+02 3.917e+02 8.053e+02, threshold=6.618e+02, percent-clipped=4.0 +2023-02-09 00:15:03,094 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220735.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:15:05,105 INFO [train.py:901] (2/4) Epoch 28, batch 2500, loss[loss=0.2104, simple_loss=0.3016, pruned_loss=0.05953, over 8202.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2806, pruned_loss=0.05672, over 1609894.58 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:15:15,089 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9384, 1.4507, 1.5897, 1.3703, 1.0203, 1.3807, 1.7554, 1.3954], + device='cuda:2'), covar=tensor([0.0558, 0.1332, 0.1772, 0.1549, 0.0603, 0.1575, 0.0705, 0.0718], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 00:15:42,750 INFO [train.py:901] (2/4) Epoch 28, batch 2550, loss[loss=0.2096, simple_loss=0.2821, pruned_loss=0.06857, over 7928.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05639, over 1615411.39 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:02,765 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.505e+02 3.011e+02 3.782e+02 1.017e+03, threshold=6.023e+02, percent-clipped=3.0 +2023-02-09 00:16:06,706 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220820.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:14,501 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220831.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:19,226 INFO [train.py:901] (2/4) Epoch 28, batch 2600, loss[loss=0.182, simple_loss=0.2536, pruned_loss=0.0552, over 7651.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2805, pruned_loss=0.05671, over 1616423.70 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:20,647 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220840.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:24,289 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:32,229 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220856.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:57,368 INFO [train.py:901] (2/4) Epoch 28, batch 2650, loss[loss=0.1831, simple_loss=0.2745, pruned_loss=0.04589, over 8340.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2806, pruned_loss=0.05651, over 1613835.26 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:01,285 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.76 vs. limit=5.0 +2023-02-09 00:17:16,289 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.381e+02 2.801e+02 3.642e+02 5.464e+02, threshold=5.602e+02, percent-clipped=0.0 +2023-02-09 00:17:17,797 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220917.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:21,290 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220922.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:32,909 INFO [train.py:901] (2/4) Epoch 28, batch 2700, loss[loss=0.1999, simple_loss=0.2839, pruned_loss=0.05795, over 8181.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2822, pruned_loss=0.05765, over 1611543.14 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:50,478 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220962.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,047 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,538 INFO [train.py:901] (2/4) Epoch 28, batch 2750, loss[loss=0.1833, simple_loss=0.2692, pruned_loss=0.04867, over 7544.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2812, pruned_loss=0.05765, over 1612263.05 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:11,782 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:29,677 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:31,037 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 2.908e+02 3.517e+02 7.342e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-09 00:18:31,973 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221016.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:35,000 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-09 00:18:43,192 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9000, 3.8341, 3.5310, 1.8400, 3.4705, 3.5392, 3.3920, 3.3822], + device='cuda:2'), covar=tensor([0.0752, 0.0597, 0.0965, 0.3952, 0.0875, 0.1086, 0.1275, 0.0942], + device='cuda:2'), in_proj_covar=tensor([0.0536, 0.0453, 0.0442, 0.0552, 0.0438, 0.0460, 0.0434, 0.0403], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:18:43,281 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221032.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:47,245 INFO [train.py:901] (2/4) Epoch 28, batch 2800, loss[loss=0.1487, simple_loss=0.2272, pruned_loss=0.03512, over 7430.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2796, pruned_loss=0.05674, over 1610519.38 frames. ], batch size: 17, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:19:22,678 INFO [train.py:901] (2/4) Epoch 28, batch 2850, loss[loss=0.1686, simple_loss=0.2609, pruned_loss=0.03814, over 8288.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.05686, over 1612968.79 frames. ], batch size: 23, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:19:36,290 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4658, 1.2762, 2.3995, 1.3612, 2.2346, 2.5020, 2.7109, 2.1620], + device='cuda:2'), covar=tensor([0.1172, 0.1485, 0.0446, 0.2046, 0.0770, 0.0402, 0.0764, 0.0635], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0329, 0.0295, 0.0322, 0.0325, 0.0278, 0.0443, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 00:19:43,226 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.366e+02 2.856e+02 3.627e+02 6.501e+02, threshold=5.713e+02, percent-clipped=2.0 +2023-02-09 00:19:44,152 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8124, 1.5132, 1.8487, 1.4904, 1.0519, 1.5485, 2.1185, 1.8273], + device='cuda:2'), covar=tensor([0.0443, 0.1318, 0.1707, 0.1516, 0.0601, 0.1490, 0.0632, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0161, 0.0101, 0.0162, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 00:19:45,663 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3917, 2.7688, 2.9380, 1.8567, 3.1878, 2.0215, 1.8269, 2.4002], + device='cuda:2'), covar=tensor([0.0997, 0.0423, 0.0348, 0.0985, 0.0523, 0.1026, 0.1033, 0.0574], + device='cuda:2'), in_proj_covar=tensor([0.0479, 0.0415, 0.0368, 0.0464, 0.0399, 0.0555, 0.0406, 0.0447], + device='cuda:2'), out_proj_covar=tensor([1.2668e-04, 1.0748e-04, 9.5821e-05, 1.2132e-04, 1.0425e-04, 1.5453e-04, + 1.0846e-04, 1.1720e-04], device='cuda:2') +2023-02-09 00:19:53,961 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221128.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:00,662 INFO [train.py:901] (2/4) Epoch 28, batch 2900, loss[loss=0.2034, simple_loss=0.2939, pruned_loss=0.05643, over 8495.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2819, pruned_loss=0.05749, over 1608909.03 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:14,797 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6507, 5.6749, 5.0550, 2.3394, 5.1407, 5.3875, 5.2084, 5.2078], + device='cuda:2'), covar=tensor([0.0489, 0.0357, 0.0821, 0.4420, 0.0702, 0.0744, 0.1037, 0.0624], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0456, 0.0445, 0.0556, 0.0440, 0.0463, 0.0437, 0.0404], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:20:32,280 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 00:20:33,725 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221184.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:36,493 INFO [train.py:901] (2/4) Epoch 28, batch 2950, loss[loss=0.1687, simple_loss=0.2489, pruned_loss=0.04424, over 7806.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2814, pruned_loss=0.05729, over 1607630.45 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:55,456 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.299e+02 2.993e+02 3.879e+02 1.208e+03, threshold=5.985e+02, percent-clipped=10.0 +2023-02-09 00:21:13,540 INFO [train.py:901] (2/4) Epoch 28, batch 3000, loss[loss=0.2281, simple_loss=0.3065, pruned_loss=0.07485, over 8321.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2807, pruned_loss=0.05676, over 1611284.44 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:21:13,541 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 00:21:31,975 INFO [train.py:935] (2/4) Epoch 28, validation: loss=0.1712, simple_loss=0.2708, pruned_loss=0.03578, over 944034.00 frames. +2023-02-09 00:21:31,976 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6510MB +2023-02-09 00:21:54,457 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:01,998 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7522, 1.7392, 1.6383, 2.1061, 1.0557, 1.5030, 1.6533, 1.8220], + device='cuda:2'), covar=tensor([0.0735, 0.0778, 0.0879, 0.0546, 0.1054, 0.1179, 0.0711, 0.0663], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0194, 0.0244, 0.0212, 0.0202, 0.0245, 0.0249, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 00:22:10,137 INFO [train.py:901] (2/4) Epoch 28, batch 3050, loss[loss=0.2035, simple_loss=0.2853, pruned_loss=0.06087, over 6987.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.283, pruned_loss=0.05785, over 1613862.82 frames. ], batch size: 71, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:22:10,366 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221288.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:18,080 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221299.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:28,218 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221313.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:29,357 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.361e+02 2.830e+02 3.600e+02 1.199e+03, threshold=5.660e+02, percent-clipped=4.0 +2023-02-09 00:22:45,361 INFO [train.py:901] (2/4) Epoch 28, batch 3100, loss[loss=0.2137, simple_loss=0.2974, pruned_loss=0.06501, over 8484.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2827, pruned_loss=0.05766, over 1616575.90 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:18,459 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221381.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:20,619 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221384.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:21,943 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0382, 1.3670, 3.1799, 1.5703, 2.2751, 3.3991, 3.5504, 2.9291], + device='cuda:2'), covar=tensor([0.1028, 0.1883, 0.0314, 0.2029, 0.1026, 0.0243, 0.0453, 0.0516], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0330, 0.0294, 0.0323, 0.0326, 0.0278, 0.0443, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 00:23:23,254 INFO [train.py:901] (2/4) Epoch 28, batch 3150, loss[loss=0.2397, simple_loss=0.3194, pruned_loss=0.07997, over 8499.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05738, over 1617280.27 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:27,178 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3609, 1.5762, 2.0058, 1.2751, 1.4591, 1.5650, 1.4320, 1.3772], + device='cuda:2'), covar=tensor([0.2111, 0.2695, 0.1154, 0.5035, 0.2240, 0.3747, 0.2746, 0.2618], + device='cuda:2'), in_proj_covar=tensor([0.0537, 0.0636, 0.0563, 0.0669, 0.0659, 0.0608, 0.0562, 0.0645], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:23:38,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221409.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:43,020 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.344e+02 3.031e+02 3.872e+02 9.124e+02, threshold=6.062e+02, percent-clipped=5.0 +2023-02-09 00:24:00,284 INFO [train.py:901] (2/4) Epoch 28, batch 3200, loss[loss=0.2135, simple_loss=0.2849, pruned_loss=0.07111, over 7979.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05782, over 1617519.62 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:20,317 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2941, 2.5559, 2.8377, 1.6528, 3.1946, 1.9329, 1.5424, 2.2745], + device='cuda:2'), covar=tensor([0.0882, 0.0459, 0.0336, 0.0894, 0.0475, 0.0957, 0.1142, 0.0661], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0413, 0.0366, 0.0463, 0.0397, 0.0553, 0.0405, 0.0446], + device='cuda:2'), out_proj_covar=tensor([1.2618e-04, 1.0701e-04, 9.5382e-05, 1.2113e-04, 1.0373e-04, 1.5417e-04, + 1.0822e-04, 1.1671e-04], device='cuda:2') +2023-02-09 00:24:36,745 INFO [train.py:901] (2/4) Epoch 28, batch 3250, loss[loss=0.1867, simple_loss=0.2707, pruned_loss=0.05131, over 7525.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2825, pruned_loss=0.05698, over 1614784.49 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:56,686 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.800e+02 3.771e+02 8.910e+02, threshold=5.600e+02, percent-clipped=3.0 +2023-02-09 00:25:04,061 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:12,963 INFO [train.py:901] (2/4) Epoch 28, batch 3300, loss[loss=0.2082, simple_loss=0.2945, pruned_loss=0.06092, over 8365.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2833, pruned_loss=0.05742, over 1616501.92 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:25:23,906 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.31 vs. limit=2.0 +2023-02-09 00:25:25,043 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221555.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:42,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:48,454 INFO [train.py:901] (2/4) Epoch 28, batch 3350, loss[loss=0.2246, simple_loss=0.3077, pruned_loss=0.07078, over 8596.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2838, pruned_loss=0.05748, over 1619063.80 frames. ], batch size: 50, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:09,958 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.531e+02 3.062e+02 3.663e+02 8.444e+02, threshold=6.124e+02, percent-clipped=3.0 +2023-02-09 00:26:23,943 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4784, 1.4154, 1.8290, 1.2299, 1.1552, 1.7979, 0.2241, 1.1481], + device='cuda:2'), covar=tensor([0.1530, 0.1140, 0.0374, 0.0820, 0.2272, 0.0447, 0.1789, 0.1113], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0208, 0.0138, 0.0226, 0.0281, 0.0148, 0.0176, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 00:26:26,129 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221637.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:26:26,629 INFO [train.py:901] (2/4) Epoch 28, batch 3400, loss[loss=0.1935, simple_loss=0.2847, pruned_loss=0.05116, over 8463.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2833, pruned_loss=0.0575, over 1618110.24 frames. ], batch size: 25, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:43,876 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:27:02,342 INFO [train.py:901] (2/4) Epoch 28, batch 3450, loss[loss=0.2389, simple_loss=0.3171, pruned_loss=0.08033, over 8444.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2835, pruned_loss=0.05745, over 1620447.52 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:21,423 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.306e+02 2.763e+02 3.583e+02 8.756e+02, threshold=5.526e+02, percent-clipped=3.0 +2023-02-09 00:27:39,496 INFO [train.py:901] (2/4) Epoch 28, batch 3500, loss[loss=0.1745, simple_loss=0.2564, pruned_loss=0.04633, over 7808.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2839, pruned_loss=0.05793, over 1616109.87 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:28:03,514 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 00:28:07,244 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:28:15,811 INFO [train.py:901] (2/4) Epoch 28, batch 3550, loss[loss=0.1585, simple_loss=0.2499, pruned_loss=0.03356, over 8514.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05753, over 1614205.57 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:28:35,273 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.405e+02 2.949e+02 3.672e+02 8.337e+02, threshold=5.897e+02, percent-clipped=3.0 +2023-02-09 00:28:52,586 INFO [train.py:901] (2/4) Epoch 28, batch 3600, loss[loss=0.1704, simple_loss=0.2504, pruned_loss=0.04518, over 7651.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2823, pruned_loss=0.05724, over 1610055.06 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:15,453 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221869.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:20,482 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.7203, 5.8670, 5.1273, 2.5728, 5.1723, 5.5467, 5.3092, 5.3161], + device='cuda:2'), covar=tensor([0.0497, 0.0309, 0.0804, 0.4166, 0.0704, 0.0803, 0.1034, 0.0570], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0456, 0.0447, 0.0559, 0.0443, 0.0464, 0.0440, 0.0406], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:29:28,481 INFO [train.py:901] (2/4) Epoch 28, batch 3650, loss[loss=0.1561, simple_loss=0.2444, pruned_loss=0.0339, over 7822.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2838, pruned_loss=0.05837, over 1614627.59 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:42,348 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:47,070 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.399e+02 3.022e+02 3.885e+02 8.966e+02, threshold=6.044e+02, percent-clipped=2.0 +2023-02-09 00:30:02,963 INFO [train.py:901] (2/4) Epoch 28, batch 3700, loss[loss=0.1848, simple_loss=0.2699, pruned_loss=0.04987, over 8028.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.2844, pruned_loss=0.05876, over 1614586.13 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:05,069 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 00:30:29,849 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-09 00:30:38,684 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:30:41,396 INFO [train.py:901] (2/4) Epoch 28, batch 3750, loss[loss=0.228, simple_loss=0.3016, pruned_loss=0.07718, over 6805.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2844, pruned_loss=0.05829, over 1613357.10 frames. ], batch size: 71, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:51,146 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.45 vs. limit=2.0 +2023-02-09 00:31:01,419 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.662e+02 3.142e+02 4.083e+02 1.270e+03, threshold=6.284e+02, percent-clipped=8.0 +2023-02-09 00:31:10,204 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222027.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:31:17,963 INFO [train.py:901] (2/4) Epoch 28, batch 3800, loss[loss=0.2259, simple_loss=0.3045, pruned_loss=0.07361, over 8632.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2841, pruned_loss=0.05833, over 1610893.11 frames. ], batch size: 31, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:31:55,278 INFO [train.py:901] (2/4) Epoch 28, batch 3850, loss[loss=0.2128, simple_loss=0.2998, pruned_loss=0.06293, over 8321.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2831, pruned_loss=0.05773, over 1613449.80 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:32:06,549 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-09 00:32:11,779 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 00:32:13,833 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.208e+02 2.768e+02 3.453e+02 7.901e+02, threshold=5.537e+02, percent-clipped=1.0 +2023-02-09 00:32:17,527 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222120.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:32:30,066 INFO [train.py:901] (2/4) Epoch 28, batch 3900, loss[loss=0.1529, simple_loss=0.236, pruned_loss=0.03487, over 8241.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2829, pruned_loss=0.05757, over 1612281.75 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:01,202 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2873, 1.4491, 3.4014, 1.1987, 3.0342, 2.8788, 3.0946, 3.0424], + device='cuda:2'), covar=tensor([0.0800, 0.3931, 0.0749, 0.4133, 0.1269, 0.1052, 0.0793, 0.0856], + device='cuda:2'), in_proj_covar=tensor([0.0682, 0.0669, 0.0734, 0.0662, 0.0746, 0.0634, 0.0642, 0.0717], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:33:06,447 INFO [train.py:901] (2/4) Epoch 28, batch 3950, loss[loss=0.1823, simple_loss=0.2775, pruned_loss=0.04354, over 8238.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05765, over 1611262.72 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:17,880 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:26,088 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.338e+02 2.821e+02 3.606e+02 1.107e+03, threshold=5.643e+02, percent-clipped=4.0 +2023-02-09 00:33:31,798 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:40,239 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:42,144 INFO [train.py:901] (2/4) Epoch 28, batch 4000, loss[loss=0.1806, simple_loss=0.2575, pruned_loss=0.05179, over 7271.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05719, over 1612693.64 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:43,721 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:51,747 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:01,385 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:02,822 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7763, 1.7010, 2.5079, 2.0080, 2.2339, 1.8808, 1.6434, 1.1309], + device='cuda:2'), covar=tensor([0.7167, 0.6145, 0.2260, 0.3956, 0.3277, 0.4629, 0.2822, 0.5614], + device='cuda:2'), in_proj_covar=tensor([0.0965, 0.1027, 0.0832, 0.0993, 0.1021, 0.0934, 0.0772, 0.0851], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:34:08,192 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6929, 2.0689, 3.1813, 1.5597, 2.5351, 2.0749, 1.8097, 2.4845], + device='cuda:2'), covar=tensor([0.1953, 0.2585, 0.0911, 0.4544, 0.1705, 0.3316, 0.2442, 0.2101], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0638, 0.0565, 0.0673, 0.0664, 0.0611, 0.0566, 0.0645], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:34:09,544 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3066, 2.0625, 1.6375, 1.8789, 1.6425, 1.3848, 1.5785, 1.6901], + device='cuda:2'), covar=tensor([0.1515, 0.0517, 0.1379, 0.0661, 0.0957, 0.1862, 0.1166, 0.0985], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0245, 0.0343, 0.0314, 0.0305, 0.0348, 0.0350, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:34:17,514 INFO [train.py:901] (2/4) Epoch 28, batch 4050, loss[loss=0.2357, simple_loss=0.3125, pruned_loss=0.0794, over 6636.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2824, pruned_loss=0.05815, over 1610965.41 frames. ], batch size: 74, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:34:38,321 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.414e+02 3.092e+02 4.009e+02 1.246e+03, threshold=6.184e+02, percent-clipped=7.0 +2023-02-09 00:34:54,250 INFO [train.py:901] (2/4) Epoch 28, batch 4100, loss[loss=0.1901, simple_loss=0.2751, pruned_loss=0.05251, over 8532.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2818, pruned_loss=0.05785, over 1606830.69 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:14,877 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:17,646 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:29,381 INFO [train.py:901] (2/4) Epoch 28, batch 4150, loss[loss=0.1389, simple_loss=0.2204, pruned_loss=0.02867, over 7446.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2813, pruned_loss=0.05715, over 1607815.47 frames. ], batch size: 17, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:49,108 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.330e+02 2.692e+02 3.176e+02 6.436e+02, threshold=5.384e+02, percent-clipped=1.0 +2023-02-09 00:35:50,176 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.30 vs. limit=5.0 +2023-02-09 00:36:07,144 INFO [train.py:901] (2/4) Epoch 28, batch 4200, loss[loss=0.2025, simple_loss=0.2785, pruned_loss=0.06327, over 7800.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2809, pruned_loss=0.05672, over 1611174.44 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:14,728 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 00:36:37,817 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 00:36:41,346 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:42,590 INFO [train.py:901] (2/4) Epoch 28, batch 4250, loss[loss=0.1757, simple_loss=0.252, pruned_loss=0.04975, over 7715.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2806, pruned_loss=0.05666, over 1610349.09 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:44,931 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222491.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:00,852 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.539e+02 3.193e+02 4.198e+02 8.289e+02, threshold=6.386e+02, percent-clipped=5.0 +2023-02-09 00:37:01,766 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222516.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:18,019 INFO [train.py:901] (2/4) Epoch 28, batch 4300, loss[loss=0.1764, simple_loss=0.2667, pruned_loss=0.04306, over 8466.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2805, pruned_loss=0.05643, over 1611972.68 frames. ], batch size: 28, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:20,182 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:25,031 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222547.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:28,747 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 00:37:29,476 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 00:37:39,465 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222567.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:54,079 INFO [train.py:901] (2/4) Epoch 28, batch 4350, loss[loss=0.1981, simple_loss=0.2931, pruned_loss=0.05159, over 8462.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05693, over 1610592.07 frames. ], batch size: 25, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:54,222 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222588.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:59,284 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0976, 1.6514, 1.9131, 1.6104, 1.1771, 1.7402, 1.9083, 1.8171], + device='cuda:2'), covar=tensor([0.0563, 0.1103, 0.1512, 0.1359, 0.0574, 0.1306, 0.0632, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 00:38:05,223 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-09 00:38:11,699 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 00:38:13,114 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.501e+02 2.979e+02 3.614e+02 7.360e+02, threshold=5.959e+02, percent-clipped=2.0 +2023-02-09 00:38:18,809 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:29,009 INFO [train.py:901] (2/4) Epoch 28, batch 4400, loss[loss=0.1904, simple_loss=0.2659, pruned_loss=0.05745, over 7545.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05681, over 1607107.08 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:38:36,663 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:47,084 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:54,364 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 00:39:01,974 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222682.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:05,978 INFO [train.py:901] (2/4) Epoch 28, batch 4450, loss[loss=0.2203, simple_loss=0.3022, pruned_loss=0.06918, over 8459.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.0574, over 1612042.34 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:24,976 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.353e+02 2.798e+02 3.446e+02 6.111e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-09 00:39:41,230 INFO [train.py:901] (2/4) Epoch 28, batch 4500, loss[loss=0.195, simple_loss=0.282, pruned_loss=0.05403, over 8232.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05761, over 1616720.48 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:44,228 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:45,367 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 00:39:53,950 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.89 vs. limit=2.0 +2023-02-09 00:40:02,730 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222767.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:40:18,345 INFO [train.py:901] (2/4) Epoch 28, batch 4550, loss[loss=0.2163, simple_loss=0.2867, pruned_loss=0.0729, over 8091.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.05758, over 1616509.33 frames. ], batch size: 21, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:40:22,053 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6435, 2.5259, 1.8776, 2.3071, 2.1598, 1.6833, 2.1190, 2.2022], + device='cuda:2'), covar=tensor([0.1698, 0.0485, 0.1250, 0.0671, 0.0834, 0.1555, 0.1110, 0.1105], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0245, 0.0342, 0.0315, 0.0304, 0.0349, 0.0349, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:40:37,191 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.324e+02 2.721e+02 3.677e+02 6.861e+02, threshold=5.442e+02, percent-clipped=4.0 +2023-02-09 00:40:40,226 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 00:40:53,695 INFO [train.py:901] (2/4) Epoch 28, batch 4600, loss[loss=0.1818, simple_loss=0.2758, pruned_loss=0.04388, over 8626.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.0569, over 1615729.17 frames. ], batch size: 34, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:41:27,918 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222885.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:41:30,003 INFO [train.py:901] (2/4) Epoch 28, batch 4650, loss[loss=0.1835, simple_loss=0.2661, pruned_loss=0.05049, over 8351.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05744, over 1618274.60 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 16.0 +2023-02-09 00:41:50,685 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 3.099e+02 3.500e+02 7.849e+02, threshold=6.198e+02, percent-clipped=6.0 +2023-02-09 00:41:53,069 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222918.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:02,696 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222932.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:06,747 INFO [train.py:901] (2/4) Epoch 28, batch 4700, loss[loss=0.1607, simple_loss=0.2517, pruned_loss=0.03482, over 8198.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05755, over 1619270.26 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:06,959 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222938.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:10,454 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222943.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:20,635 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222958.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:24,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222963.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:40,849 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:41,323 INFO [train.py:901] (2/4) Epoch 28, batch 4750, loss[loss=0.2023, simple_loss=0.2826, pruned_loss=0.06102, over 8242.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2837, pruned_loss=0.0586, over 1615480.15 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:50,004 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223000.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:52,051 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:54,707 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 00:42:58,161 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 00:43:02,875 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.412e+02 2.807e+02 3.833e+02 7.869e+02, threshold=5.613e+02, percent-clipped=5.0 +2023-02-09 00:43:18,663 INFO [train.py:901] (2/4) Epoch 28, batch 4800, loss[loss=0.1869, simple_loss=0.2687, pruned_loss=0.05257, over 8242.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05787, over 1620063.31 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:43:25,201 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223047.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:48,815 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 00:43:49,658 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:53,653 INFO [train.py:901] (2/4) Epoch 28, batch 4850, loss[loss=0.1967, simple_loss=0.2821, pruned_loss=0.05568, over 8138.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2826, pruned_loss=0.05785, over 1616510.96 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:13,745 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.508e+02 3.332e+02 4.408e+02 9.671e+02, threshold=6.663e+02, percent-clipped=7.0 +2023-02-09 00:44:18,768 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9317, 1.7748, 2.5227, 1.6317, 1.4921, 2.5336, 0.5955, 1.5696], + device='cuda:2'), covar=tensor([0.1234, 0.0985, 0.0321, 0.0974, 0.1884, 0.0324, 0.1648, 0.1106], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0205, 0.0137, 0.0223, 0.0277, 0.0147, 0.0174, 0.0198], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 00:44:21,502 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6875, 1.6574, 1.9187, 1.6838, 1.2611, 1.7676, 2.3381, 2.2037], + device='cuda:2'), covar=tensor([0.0504, 0.1229, 0.1658, 0.1463, 0.0630, 0.1461, 0.0635, 0.0604], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 00:44:31,160 INFO [train.py:901] (2/4) Epoch 28, batch 4900, loss[loss=0.1829, simple_loss=0.2608, pruned_loss=0.05254, over 7694.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2823, pruned_loss=0.05787, over 1616497.45 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:07,006 INFO [train.py:901] (2/4) Epoch 28, batch 4950, loss[loss=0.2069, simple_loss=0.298, pruned_loss=0.05791, over 8358.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2824, pruned_loss=0.05839, over 1612101.42 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:09,279 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223191.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:45:26,418 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.334e+02 2.712e+02 3.560e+02 9.309e+02, threshold=5.424e+02, percent-clipped=3.0 +2023-02-09 00:45:42,295 INFO [train.py:901] (2/4) Epoch 28, batch 5000, loss[loss=0.2367, simple_loss=0.3168, pruned_loss=0.07824, over 8356.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05787, over 1613034.02 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:56,141 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:14,352 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223281.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:18,935 INFO [train.py:901] (2/4) Epoch 28, batch 5050, loss[loss=0.2248, simple_loss=0.3143, pruned_loss=0.06765, over 8316.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2812, pruned_loss=0.05761, over 1605560.07 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:28,802 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223302.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:29,636 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223303.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:32,888 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 00:46:35,804 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1341, 1.6625, 1.4858, 1.6023, 1.3216, 1.3355, 1.3600, 1.3402], + device='cuda:2'), covar=tensor([0.1128, 0.0524, 0.1339, 0.0584, 0.0812, 0.1489, 0.0888, 0.0825], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0243, 0.0339, 0.0314, 0.0303, 0.0345, 0.0347, 0.0321], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:46:38,384 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.274e+02 2.931e+02 3.573e+02 6.090e+02, threshold=5.862e+02, percent-clipped=1.0 +2023-02-09 00:46:46,947 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223328.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:48,963 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223331.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:53,548 INFO [train.py:901] (2/4) Epoch 28, batch 5100, loss[loss=0.1988, simple_loss=0.28, pruned_loss=0.05884, over 8106.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2811, pruned_loss=0.05748, over 1605413.82 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:59,469 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223346.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:13,977 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1277, 2.1816, 1.9343, 2.3656, 1.8403, 1.9536, 2.1345, 2.2867], + device='cuda:2'), covar=tensor([0.0618, 0.0689, 0.0739, 0.0559, 0.0794, 0.0918, 0.0544, 0.0573], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0213, 0.0203, 0.0246, 0.0250, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 00:47:31,191 INFO [train.py:901] (2/4) Epoch 28, batch 5150, loss[loss=0.1634, simple_loss=0.2451, pruned_loss=0.04086, over 8143.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2827, pruned_loss=0.05799, over 1611635.79 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:47:37,583 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0500, 1.2226, 1.1790, 0.7642, 1.1943, 0.9894, 0.1226, 1.1953], + device='cuda:2'), covar=tensor([0.0472, 0.0419, 0.0399, 0.0620, 0.0466, 0.1077, 0.0957, 0.0366], + device='cuda:2'), in_proj_covar=tensor([0.0469, 0.0409, 0.0362, 0.0456, 0.0394, 0.0549, 0.0400, 0.0438], + device='cuda:2'), out_proj_covar=tensor([1.2407e-04, 1.0587e-04, 9.4242e-05, 1.1912e-04, 1.0306e-04, 1.5314e-04, + 1.0684e-04, 1.1481e-04], device='cuda:2') +2023-02-09 00:47:38,171 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4234, 4.4026, 4.0601, 2.0759, 3.9367, 3.9835, 4.0305, 3.8404], + device='cuda:2'), covar=tensor([0.0747, 0.0511, 0.1022, 0.4403, 0.0861, 0.1158, 0.1272, 0.0777], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0454, 0.0449, 0.0554, 0.0443, 0.0463, 0.0441, 0.0407], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:47:43,143 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.39 vs. limit=5.0 +2023-02-09 00:47:50,491 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.349e+02 2.964e+02 3.516e+02 1.122e+03, threshold=5.928e+02, percent-clipped=3.0 +2023-02-09 00:47:51,402 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223417.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:57,706 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:58,571 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6863, 1.5710, 2.2121, 1.5200, 1.2551, 2.1699, 0.5416, 1.3520], + device='cuda:2'), covar=tensor([0.1376, 0.1281, 0.0368, 0.0958, 0.2324, 0.0453, 0.1817, 0.1234], + device='cuda:2'), in_proj_covar=tensor([0.0200, 0.0206, 0.0137, 0.0224, 0.0279, 0.0148, 0.0175, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 00:48:05,981 INFO [train.py:901] (2/4) Epoch 28, batch 5200, loss[loss=0.2115, simple_loss=0.2973, pruned_loss=0.0629, over 8335.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2831, pruned_loss=0.05807, over 1612381.97 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:48:11,622 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223446.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:22,805 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:31,226 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 00:48:44,069 INFO [train.py:901] (2/4) Epoch 28, batch 5250, loss[loss=0.2056, simple_loss=0.2871, pruned_loss=0.06206, over 8082.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2826, pruned_loss=0.05759, over 1614006.20 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:03,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.237e+02 2.837e+02 3.561e+02 7.405e+02, threshold=5.674e+02, percent-clipped=6.0 +2023-02-09 00:49:17,137 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223535.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:49:19,101 INFO [train.py:901] (2/4) Epoch 28, batch 5300, loss[loss=0.2054, simple_loss=0.2975, pruned_loss=0.05662, over 8514.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05689, over 1617585.67 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:21,384 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:49:22,686 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4858, 2.3534, 3.0415, 2.4709, 2.9336, 2.5077, 2.4356, 1.9912], + device='cuda:2'), covar=tensor([0.5781, 0.5409, 0.2346, 0.4249, 0.2997, 0.3311, 0.1822, 0.5875], + device='cuda:2'), in_proj_covar=tensor([0.0966, 0.1030, 0.0835, 0.0999, 0.1026, 0.0936, 0.0773, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:49:55,534 INFO [train.py:901] (2/4) Epoch 28, batch 5350, loss[loss=0.2086, simple_loss=0.2903, pruned_loss=0.06342, over 8106.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05666, over 1612471.31 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:50:01,801 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223596.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:50:04,676 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8558, 1.3815, 3.9884, 1.5163, 3.6004, 3.3596, 3.6363, 3.5540], + device='cuda:2'), covar=tensor([0.0649, 0.4787, 0.0648, 0.4049, 0.1106, 0.0982, 0.0656, 0.0733], + device='cuda:2'), in_proj_covar=tensor([0.0679, 0.0663, 0.0734, 0.0656, 0.0737, 0.0632, 0.0640, 0.0712], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:50:15,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.840e+02 3.657e+02 7.209e+02, threshold=5.681e+02, percent-clipped=3.0 +2023-02-09 00:50:30,415 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2953, 2.1691, 2.7466, 2.3284, 2.7037, 2.4173, 2.2411, 1.6247], + device='cuda:2'), covar=tensor([0.5447, 0.5068, 0.2105, 0.4120, 0.2603, 0.3143, 0.1986, 0.5690], + device='cuda:2'), in_proj_covar=tensor([0.0967, 0.1031, 0.0837, 0.1000, 0.1027, 0.0938, 0.0774, 0.0856], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:50:30,879 INFO [train.py:901] (2/4) Epoch 28, batch 5400, loss[loss=0.2167, simple_loss=0.2971, pruned_loss=0.06814, over 8455.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05693, over 1610939.15 frames. ], batch size: 29, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:50:39,707 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223650.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:50:55,924 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223673.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:03,757 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7536, 2.6024, 1.8743, 2.4145, 2.1466, 1.5257, 2.1857, 2.3734], + device='cuda:2'), covar=tensor([0.1597, 0.0459, 0.1353, 0.0704, 0.0865, 0.1806, 0.1133, 0.0990], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0244, 0.0342, 0.0315, 0.0303, 0.0347, 0.0350, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:51:06,370 INFO [train.py:901] (2/4) Epoch 28, batch 5450, loss[loss=0.1811, simple_loss=0.2759, pruned_loss=0.04321, over 8087.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2809, pruned_loss=0.05675, over 1609262.18 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:13,798 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:16,756 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:28,420 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.405e+02 2.886e+02 3.694e+02 6.837e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 00:51:28,659 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:29,454 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3530, 2.3193, 3.1246, 2.4956, 3.1266, 2.5084, 2.2948, 1.9194], + device='cuda:2'), covar=tensor([0.6263, 0.5260, 0.2195, 0.4335, 0.2672, 0.3287, 0.2022, 0.5955], + device='cuda:2'), in_proj_covar=tensor([0.0966, 0.1029, 0.0837, 0.0998, 0.1025, 0.0934, 0.0772, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:51:31,411 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 00:51:35,317 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-09 00:51:36,526 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223727.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:44,560 INFO [train.py:901] (2/4) Epoch 28, batch 5500, loss[loss=0.1538, simple_loss=0.2423, pruned_loss=0.03267, over 7928.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.0562, over 1611431.85 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:47,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:18,970 INFO [train.py:901] (2/4) Epoch 28, batch 5550, loss[loss=0.1697, simple_loss=0.2666, pruned_loss=0.03642, over 8251.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05684, over 1610023.15 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:52:20,252 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-09 00:52:25,546 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:39,665 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.462e+02 3.010e+02 3.574e+02 1.274e+03, threshold=6.020e+02, percent-clipped=3.0 +2023-02-09 00:52:43,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:55,584 INFO [train.py:901] (2/4) Epoch 28, batch 5600, loss[loss=0.1952, simple_loss=0.2938, pruned_loss=0.04834, over 8246.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.05654, over 1607076.04 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:05,096 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4676, 1.3705, 1.7672, 1.2292, 1.0872, 1.7756, 0.2056, 1.0917], + device='cuda:2'), covar=tensor([0.1397, 0.1114, 0.0426, 0.0824, 0.2443, 0.0427, 0.1821, 0.1138], + device='cuda:2'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0223, 0.0278, 0.0147, 0.0174, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 00:53:31,691 INFO [train.py:901] (2/4) Epoch 28, batch 5650, loss[loss=0.2042, simple_loss=0.2808, pruned_loss=0.06384, over 7442.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05681, over 1609139.97 frames. ], batch size: 73, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:41,201 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 00:53:44,202 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:53:44,787 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8185, 2.0371, 1.6911, 2.5978, 1.2806, 1.5707, 1.9492, 1.9823], + device='cuda:2'), covar=tensor([0.0769, 0.0705, 0.0866, 0.0384, 0.0984, 0.1265, 0.0693, 0.0739], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0215, 0.0205, 0.0249, 0.0252, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 00:53:51,308 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.313e+02 2.789e+02 3.752e+02 1.102e+03, threshold=5.578e+02, percent-clipped=3.0 +2023-02-09 00:54:01,598 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223931.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:54:06,155 INFO [train.py:901] (2/4) Epoch 28, batch 5700, loss[loss=0.1805, simple_loss=0.2543, pruned_loss=0.05338, over 7439.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.279, pruned_loss=0.0558, over 1607151.44 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:07,517 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223940.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:54:10,615 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.55 vs. limit=2.0 +2023-02-09 00:54:16,420 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 00:54:43,234 INFO [train.py:901] (2/4) Epoch 28, batch 5750, loss[loss=0.1812, simple_loss=0.2688, pruned_loss=0.04675, over 7820.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05623, over 1610243.06 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:48,703 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 00:55:04,150 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.263e+02 2.713e+02 3.241e+02 8.661e+02, threshold=5.425e+02, percent-clipped=3.0 +2023-02-09 00:55:18,773 INFO [train.py:901] (2/4) Epoch 28, batch 5800, loss[loss=0.2128, simple_loss=0.3061, pruned_loss=0.05973, over 8330.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.05635, over 1614053.38 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:55:22,322 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8245, 1.3272, 4.0142, 1.4441, 3.5592, 3.3761, 3.6786, 3.5671], + device='cuda:2'), covar=tensor([0.0728, 0.5003, 0.0727, 0.4545, 0.1296, 0.1154, 0.0640, 0.0814], + device='cuda:2'), in_proj_covar=tensor([0.0683, 0.0665, 0.0737, 0.0661, 0.0742, 0.0636, 0.0643, 0.0718], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:55:30,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:55:31,246 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.9024, 1.2176, 3.2653, 1.1659, 2.5403, 2.5793, 2.9429, 2.9314], + device='cuda:2'), covar=tensor([0.1997, 0.6292, 0.1605, 0.5584, 0.3095, 0.2297, 0.1445, 0.1589], + device='cuda:2'), in_proj_covar=tensor([0.0683, 0.0664, 0.0737, 0.0661, 0.0742, 0.0636, 0.0643, 0.0718], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:55:43,050 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5707, 2.0415, 3.2062, 1.4409, 2.3217, 2.0331, 1.7090, 2.5338], + device='cuda:2'), covar=tensor([0.2018, 0.2663, 0.0885, 0.4796, 0.2097, 0.3356, 0.2531, 0.2282], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0636, 0.0564, 0.0671, 0.0663, 0.0612, 0.0563, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:55:47,089 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1008, 3.5491, 2.2909, 2.7533, 2.5942, 2.0025, 2.6284, 2.9399], + device='cuda:2'), covar=tensor([0.1726, 0.0393, 0.1115, 0.0816, 0.0853, 0.1499, 0.1077, 0.1094], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0243, 0.0341, 0.0314, 0.0303, 0.0346, 0.0350, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 00:55:55,795 INFO [train.py:901] (2/4) Epoch 28, batch 5850, loss[loss=0.1945, simple_loss=0.2764, pruned_loss=0.05626, over 7822.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2816, pruned_loss=0.05666, over 1620214.89 frames. ], batch size: 20, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:56:15,667 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.538e+02 3.148e+02 4.118e+02 7.183e+02, threshold=6.296e+02, percent-clipped=12.0 +2023-02-09 00:56:30,204 INFO [train.py:901] (2/4) Epoch 28, batch 5900, loss[loss=0.2651, simple_loss=0.3288, pruned_loss=0.1008, over 6843.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2822, pruned_loss=0.05669, over 1621763.95 frames. ], batch size: 71, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:06,188 INFO [train.py:901] (2/4) Epoch 28, batch 5950, loss[loss=0.188, simple_loss=0.2751, pruned_loss=0.05047, over 8251.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2839, pruned_loss=0.05773, over 1620030.04 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:25,382 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-09 00:57:28,300 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.486e+02 3.110e+02 3.888e+02 7.674e+02, threshold=6.220e+02, percent-clipped=4.0 +2023-02-09 00:57:37,732 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224230.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:57:43,109 INFO [train.py:901] (2/4) Epoch 28, batch 6000, loss[loss=0.1925, simple_loss=0.2833, pruned_loss=0.05087, over 8469.00 frames. ], tot_loss[loss=0.2, simple_loss=0.284, pruned_loss=0.058, over 1622477.31 frames. ], batch size: 27, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:43,110 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 00:57:56,800 INFO [train.py:935] (2/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2708, pruned_loss=0.03603, over 944034.00 frames. +2023-02-09 00:57:56,801 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 00:57:59,129 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5185, 2.3920, 3.0804, 2.5031, 2.9313, 2.5671, 2.4108, 2.0320], + device='cuda:2'), covar=tensor([0.5476, 0.5206, 0.2279, 0.4177, 0.2850, 0.3263, 0.1883, 0.5693], + device='cuda:2'), in_proj_covar=tensor([0.0973, 0.1035, 0.0841, 0.1004, 0.1028, 0.0940, 0.0776, 0.0859], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 00:58:07,091 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5370, 4.4987, 4.1451, 2.2485, 4.0916, 4.0985, 4.0877, 3.9364], + device='cuda:2'), covar=tensor([0.0641, 0.0459, 0.0851, 0.4029, 0.0756, 0.1021, 0.1141, 0.0812], + device='cuda:2'), in_proj_covar=tensor([0.0545, 0.0457, 0.0451, 0.0559, 0.0445, 0.0466, 0.0442, 0.0408], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:58:33,310 INFO [train.py:901] (2/4) Epoch 28, batch 6050, loss[loss=0.1761, simple_loss=0.2618, pruned_loss=0.04523, over 7651.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2835, pruned_loss=0.05764, over 1619469.71 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:58:43,552 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6525, 2.0646, 3.2527, 1.4238, 2.4758, 2.1279, 1.7219, 2.6089], + device='cuda:2'), covar=tensor([0.1916, 0.2807, 0.0844, 0.4849, 0.1989, 0.3252, 0.2523, 0.2279], + device='cuda:2'), in_proj_covar=tensor([0.0541, 0.0637, 0.0565, 0.0671, 0.0665, 0.0614, 0.0565, 0.0649], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 00:58:48,811 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.77 vs. limit=2.0 +2023-02-09 00:58:49,931 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224311.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:58:52,994 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 00:58:54,045 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.462e+02 3.109e+02 3.867e+02 1.260e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-09 00:59:08,857 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224336.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:59:10,070 INFO [train.py:901] (2/4) Epoch 28, batch 6100, loss[loss=0.1805, simple_loss=0.261, pruned_loss=0.04997, over 7326.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2827, pruned_loss=0.05726, over 1615813.26 frames. ], batch size: 16, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:26,304 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 00:59:46,741 INFO [train.py:901] (2/4) Epoch 28, batch 6150, loss[loss=0.1945, simple_loss=0.2809, pruned_loss=0.05405, over 8461.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2828, pruned_loss=0.05737, over 1619520.32 frames. ], batch size: 29, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:54,611 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5274, 1.9468, 2.0713, 1.2091, 2.1326, 1.5453, 0.6053, 1.9010], + device='cuda:2'), covar=tensor([0.0767, 0.0423, 0.0293, 0.0744, 0.0478, 0.1060, 0.1004, 0.0367], + device='cuda:2'), in_proj_covar=tensor([0.0471, 0.0409, 0.0363, 0.0458, 0.0393, 0.0550, 0.0402, 0.0440], + device='cuda:2'), out_proj_covar=tensor([1.2457e-04, 1.0580e-04, 9.4517e-05, 1.1960e-04, 1.0283e-04, 1.5344e-04, + 1.0723e-04, 1.1521e-04], device='cuda:2') +2023-02-09 01:00:06,960 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.362e+02 2.823e+02 3.455e+02 8.158e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 01:00:21,346 INFO [train.py:901] (2/4) Epoch 28, batch 6200, loss[loss=0.1949, simple_loss=0.29, pruned_loss=0.04994, over 8322.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.05753, over 1615691.98 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:58,234 INFO [train.py:901] (2/4) Epoch 28, batch 6250, loss[loss=0.1702, simple_loss=0.2441, pruned_loss=0.04819, over 7550.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.0577, over 1605664.57 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:59,402 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.54 vs. limit=5.0 +2023-02-09 01:01:18,589 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.593e+02 3.043e+02 4.250e+02 9.084e+02, threshold=6.087e+02, percent-clipped=11.0 +2023-02-09 01:01:33,268 INFO [train.py:901] (2/4) Epoch 28, batch 6300, loss[loss=0.1572, simple_loss=0.2386, pruned_loss=0.03792, over 6799.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05724, over 1606961.78 frames. ], batch size: 15, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:00,129 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224574.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:04,363 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:10,460 INFO [train.py:901] (2/4) Epoch 28, batch 6350, loss[loss=0.182, simple_loss=0.2679, pruned_loss=0.0481, over 7817.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2835, pruned_loss=0.05787, over 1612240.34 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:30,928 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.315e+02 2.720e+02 3.259e+02 6.733e+02, threshold=5.440e+02, percent-clipped=2.0 +2023-02-09 01:02:45,884 INFO [train.py:901] (2/4) Epoch 28, batch 6400, loss[loss=0.2331, simple_loss=0.3191, pruned_loss=0.0736, over 8470.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05768, over 1611876.79 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:16,422 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 01:03:21,510 INFO [train.py:901] (2/4) Epoch 28, batch 6450, loss[loss=0.1882, simple_loss=0.2812, pruned_loss=0.04763, over 8187.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.282, pruned_loss=0.05788, over 1610991.59 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:22,394 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224689.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:03:43,003 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.303e+02 2.784e+02 3.485e+02 7.082e+02, threshold=5.567e+02, percent-clipped=7.0 +2023-02-09 01:03:44,832 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 01:03:56,389 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1694, 2.4029, 2.5030, 1.6603, 2.7036, 1.8585, 1.6176, 2.1539], + device='cuda:2'), covar=tensor([0.0929, 0.0451, 0.0346, 0.0834, 0.0531, 0.0870, 0.1093, 0.0594], + device='cuda:2'), in_proj_covar=tensor([0.0475, 0.0412, 0.0366, 0.0461, 0.0396, 0.0555, 0.0406, 0.0444], + device='cuda:2'), out_proj_covar=tensor([1.2572e-04, 1.0680e-04, 9.5128e-05, 1.2062e-04, 1.0360e-04, 1.5473e-04, + 1.0836e-04, 1.1623e-04], device='cuda:2') +2023-02-09 01:03:57,598 INFO [train.py:901] (2/4) Epoch 28, batch 6500, loss[loss=0.1596, simple_loss=0.2365, pruned_loss=0.04136, over 7699.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2814, pruned_loss=0.0573, over 1611520.85 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:02,103 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:11,987 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:32,256 INFO [train.py:901] (2/4) Epoch 28, batch 6550, loss[loss=0.2419, simple_loss=0.2947, pruned_loss=0.09452, over 7804.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2824, pruned_loss=0.05795, over 1615194.98 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:47,798 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 01:04:53,991 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.489e+02 3.184e+02 3.768e+02 7.222e+02, threshold=6.368e+02, percent-clipped=1.0 +2023-02-09 01:05:08,030 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 01:05:09,349 INFO [train.py:901] (2/4) Epoch 28, batch 6600, loss[loss=0.2189, simple_loss=0.2842, pruned_loss=0.0768, over 7655.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2825, pruned_loss=0.05831, over 1609985.26 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:05:17,204 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1702, 1.8765, 2.5274, 1.6443, 1.6316, 2.5025, 1.3729, 2.0599], + device='cuda:2'), covar=tensor([0.1533, 0.1018, 0.0292, 0.1034, 0.1829, 0.0369, 0.1478, 0.1121], + device='cuda:2'), in_proj_covar=tensor([0.0204, 0.0209, 0.0138, 0.0226, 0.0280, 0.0149, 0.0176, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 01:05:44,396 INFO [train.py:901] (2/4) Epoch 28, batch 6650, loss[loss=0.1533, simple_loss=0.2417, pruned_loss=0.03245, over 7433.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2818, pruned_loss=0.05785, over 1612256.80 frames. ], batch size: 17, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:04,781 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.463e+02 2.971e+02 3.895e+02 9.422e+02, threshold=5.941e+02, percent-clipped=4.0 +2023-02-09 01:06:10,966 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224924.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:11,639 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2118, 4.2120, 3.7742, 1.9960, 3.7221, 3.8509, 3.8392, 3.6960], + device='cuda:2'), covar=tensor([0.0751, 0.0546, 0.1040, 0.4455, 0.0934, 0.1190, 0.1199, 0.0898], + device='cuda:2'), in_proj_covar=tensor([0.0550, 0.0462, 0.0456, 0.0566, 0.0448, 0.0471, 0.0448, 0.0414], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:06:17,465 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.91 vs. limit=2.0 +2023-02-09 01:06:21,167 INFO [train.py:901] (2/4) Epoch 28, batch 6700, loss[loss=0.1621, simple_loss=0.2532, pruned_loss=0.03548, over 7975.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2813, pruned_loss=0.05749, over 1611929.07 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:26,235 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:30,480 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0196, 1.6339, 1.8353, 1.5700, 0.9385, 1.6735, 1.7976, 1.7082], + device='cuda:2'), covar=tensor([0.0539, 0.1193, 0.1590, 0.1394, 0.0602, 0.1405, 0.0696, 0.0640], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0154, 0.0190, 0.0161, 0.0102, 0.0164, 0.0113, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:06:44,447 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224970.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:57,025 INFO [train.py:901] (2/4) Epoch 28, batch 6750, loss[loss=0.1876, simple_loss=0.2812, pruned_loss=0.04701, over 8337.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2794, pruned_loss=0.05621, over 1607215.69 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:57,468 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-09 01:06:58,657 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0267, 1.6271, 1.3885, 1.5424, 1.3128, 1.2543, 1.2919, 1.2562], + device='cuda:2'), covar=tensor([0.1282, 0.0536, 0.1453, 0.0669, 0.0862, 0.1661, 0.0984, 0.0984], + device='cuda:2'), in_proj_covar=tensor([0.0362, 0.0247, 0.0345, 0.0317, 0.0304, 0.0350, 0.0354, 0.0327], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:07:01,383 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3568, 2.6288, 2.9662, 1.7769, 3.1724, 2.0052, 1.6130, 2.3302], + device='cuda:2'), covar=tensor([0.0890, 0.0413, 0.0340, 0.0899, 0.0530, 0.0891, 0.1072, 0.0640], + device='cuda:2'), in_proj_covar=tensor([0.0473, 0.0409, 0.0364, 0.0459, 0.0396, 0.0552, 0.0403, 0.0442], + device='cuda:2'), out_proj_covar=tensor([1.2520e-04, 1.0596e-04, 9.4811e-05, 1.2003e-04, 1.0343e-04, 1.5400e-04, + 1.0766e-04, 1.1576e-04], device='cuda:2') +2023-02-09 01:07:17,010 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.343e+02 2.979e+02 3.883e+02 6.136e+02, threshold=5.958e+02, percent-clipped=2.0 +2023-02-09 01:07:28,015 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 01:07:32,093 INFO [train.py:901] (2/4) Epoch 28, batch 6800, loss[loss=0.1805, simple_loss=0.2637, pruned_loss=0.04859, over 7424.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2793, pruned_loss=0.05578, over 1606362.12 frames. ], batch size: 17, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:07:32,951 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225039.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:07:57,290 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7104, 1.4744, 1.8180, 1.4815, 0.8476, 1.6171, 1.6019, 1.5257], + device='cuda:2'), covar=tensor([0.0591, 0.1298, 0.1601, 0.1465, 0.0609, 0.1421, 0.0725, 0.0655], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:08:08,382 INFO [train.py:901] (2/4) Epoch 28, batch 6850, loss[loss=0.1663, simple_loss=0.2542, pruned_loss=0.03916, over 8242.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05634, over 1611848.17 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:08,458 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,039 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225102.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,711 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 01:08:28,500 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.283e+02 2.996e+02 3.907e+02 8.918e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-09 01:08:31,368 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225121.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:42,882 INFO [train.py:901] (2/4) Epoch 28, batch 6900, loss[loss=0.2272, simple_loss=0.3064, pruned_loss=0.07401, over 7816.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.05684, over 1610354.88 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:47,491 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-09 01:08:58,663 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225160.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:18,984 INFO [train.py:901] (2/4) Epoch 28, batch 6950, loss[loss=0.2077, simple_loss=0.2838, pruned_loss=0.06583, over 7430.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2805, pruned_loss=0.05703, over 1608127.67 frames. ], batch size: 17, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:09:30,330 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:30,870 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 01:09:40,023 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.456e+02 2.946e+02 3.977e+02 8.721e+02, threshold=5.892e+02, percent-clipped=6.0 +2023-02-09 01:09:40,234 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225217.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:54,781 INFO [train.py:901] (2/4) Epoch 28, batch 7000, loss[loss=0.2061, simple_loss=0.2886, pruned_loss=0.06185, over 8554.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2798, pruned_loss=0.05634, over 1609553.91 frames. ], batch size: 34, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:31,221 INFO [train.py:901] (2/4) Epoch 28, batch 7050, loss[loss=0.2236, simple_loss=0.3085, pruned_loss=0.06937, over 8528.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05688, over 1609482.60 frames. ], batch size: 48, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:36,315 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225295.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:10:52,614 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.402e+02 2.844e+02 3.449e+02 6.425e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-09 01:10:55,649 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:11:08,202 INFO [train.py:901] (2/4) Epoch 28, batch 7100, loss[loss=0.1865, simple_loss=0.2826, pruned_loss=0.04524, over 8343.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2813, pruned_loss=0.05632, over 1614161.65 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:11:43,049 INFO [train.py:901] (2/4) Epoch 28, batch 7150, loss[loss=0.2163, simple_loss=0.3053, pruned_loss=0.06366, over 8360.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2807, pruned_loss=0.05618, over 1613436.19 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:05,401 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.377e+02 2.906e+02 3.542e+02 6.036e+02, threshold=5.811e+02, percent-clipped=2.0 +2023-02-09 01:12:21,601 INFO [train.py:901] (2/4) Epoch 28, batch 7200, loss[loss=0.1982, simple_loss=0.284, pruned_loss=0.05622, over 8545.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2819, pruned_loss=0.05657, over 1614425.68 frames. ], batch size: 28, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:35,169 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225457.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:36,585 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:40,503 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225465.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:41,417 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2653, 2.0354, 2.6158, 2.2520, 2.5578, 2.3323, 2.1563, 1.4690], + device='cuda:2'), covar=tensor([0.5662, 0.5186, 0.2121, 0.3815, 0.2577, 0.3256, 0.1950, 0.5483], + device='cuda:2'), in_proj_covar=tensor([0.0969, 0.1033, 0.0841, 0.1000, 0.1028, 0.0938, 0.0775, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:12:44,884 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2582, 3.6790, 2.4474, 2.9153, 2.9080, 2.1478, 2.8554, 3.2205], + device='cuda:2'), covar=tensor([0.1532, 0.0372, 0.1055, 0.0735, 0.0771, 0.1416, 0.1017, 0.1032], + device='cuda:2'), in_proj_covar=tensor([0.0362, 0.0247, 0.0345, 0.0317, 0.0304, 0.0350, 0.0353, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:12:46,185 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225473.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:47,076 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-09 01:12:51,112 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225480.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:53,922 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:56,557 INFO [train.py:901] (2/4) Epoch 28, batch 7250, loss[loss=0.186, simple_loss=0.265, pruned_loss=0.05354, over 7272.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05722, over 1617612.07 frames. ], batch size: 16, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:03,612 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:07,603 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225504.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:16,360 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.486e+02 3.022e+02 3.617e+02 8.325e+02, threshold=6.044e+02, percent-clipped=6.0 +2023-02-09 01:13:21,986 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:32,777 INFO [train.py:901] (2/4) Epoch 28, batch 7300, loss[loss=0.1863, simple_loss=0.2833, pruned_loss=0.04469, over 8323.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2819, pruned_loss=0.05702, over 1617277.04 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:34,184 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:00,395 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225577.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:14:02,421 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:07,681 INFO [train.py:901] (2/4) Epoch 28, batch 7350, loss[loss=0.1577, simple_loss=0.2421, pruned_loss=0.03663, over 8092.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05681, over 1611998.23 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:14:24,565 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 01:14:27,903 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.380e+02 2.753e+02 3.463e+02 7.224e+02, threshold=5.506e+02, percent-clipped=3.0 +2023-02-09 01:14:29,528 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225619.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:42,945 INFO [train.py:901] (2/4) Epoch 28, batch 7400, loss[loss=0.192, simple_loss=0.2846, pruned_loss=0.04967, over 8475.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2825, pruned_loss=0.05742, over 1615287.15 frames. ], batch size: 27, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:14:42,955 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 01:14:48,689 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2144, 1.4495, 4.2300, 1.9238, 2.4715, 4.7651, 4.8802, 4.1441], + device='cuda:2'), covar=tensor([0.1321, 0.2100, 0.0277, 0.2129, 0.1215, 0.0190, 0.0430, 0.0519], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0327, 0.0295, 0.0325, 0.0326, 0.0277, 0.0443, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 01:15:18,724 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.3162, 1.1787, 3.3836, 1.0766, 3.0315, 2.8110, 3.1095, 3.0042], + device='cuda:2'), covar=tensor([0.0738, 0.4493, 0.0825, 0.4473, 0.1235, 0.1135, 0.0741, 0.0877], + device='cuda:2'), in_proj_covar=tensor([0.0683, 0.0664, 0.0735, 0.0660, 0.0747, 0.0636, 0.0645, 0.0717], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:15:19,316 INFO [train.py:901] (2/4) Epoch 28, batch 7450, loss[loss=0.1873, simple_loss=0.2766, pruned_loss=0.04894, over 8194.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2829, pruned_loss=0.05764, over 1617703.65 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:15:23,689 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8750, 1.6884, 2.1775, 1.7206, 1.2913, 1.9182, 2.4222, 2.2922], + device='cuda:2'), covar=tensor([0.0446, 0.1209, 0.1516, 0.1383, 0.0553, 0.1343, 0.0573, 0.0576], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0160, 0.0101, 0.0162, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:15:25,033 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 01:15:40,013 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.396e+02 3.007e+02 3.866e+02 7.466e+02, threshold=6.014e+02, percent-clipped=6.0 +2023-02-09 01:15:54,482 INFO [train.py:901] (2/4) Epoch 28, batch 7500, loss[loss=0.1797, simple_loss=0.2704, pruned_loss=0.04451, over 8361.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2826, pruned_loss=0.05716, over 1618944.34 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:32,496 INFO [train.py:901] (2/4) Epoch 28, batch 7550, loss[loss=0.1936, simple_loss=0.2815, pruned_loss=0.05287, over 8457.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2828, pruned_loss=0.05761, over 1616876.00 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:39,078 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4821, 2.5078, 1.8701, 2.1404, 2.0995, 1.6074, 2.0145, 2.1305], + device='cuda:2'), covar=tensor([0.1656, 0.0455, 0.1292, 0.0719, 0.0830, 0.1576, 0.1100, 0.1066], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0244, 0.0341, 0.0312, 0.0300, 0.0345, 0.0348, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:16:41,875 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225801.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:16:52,725 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.388e+02 3.127e+02 4.485e+02 1.321e+03, threshold=6.254e+02, percent-clipped=11.0 +2023-02-09 01:16:57,651 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225824.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:05,948 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225836.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:07,133 INFO [train.py:901] (2/4) Epoch 28, batch 7600, loss[loss=0.2088, simple_loss=0.2978, pruned_loss=0.05997, over 8460.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05735, over 1616163.33 frames. ], batch size: 29, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:23,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225861.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:27,277 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225867.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:34,157 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:37,631 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:40,887 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:43,480 INFO [train.py:901] (2/4) Epoch 28, batch 7650, loss[loss=0.2363, simple_loss=0.3148, pruned_loss=0.07893, over 8599.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2825, pruned_loss=0.05742, over 1613835.07 frames. ], batch size: 39, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:51,600 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:01,184 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225913.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,348 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225916.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,835 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.353e+02 2.789e+02 3.444e+02 7.654e+02, threshold=5.579e+02, percent-clipped=1.0 +2023-02-09 01:18:06,735 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225921.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:18:18,658 INFO [train.py:901] (2/4) Epoch 28, batch 7700, loss[loss=0.2505, simple_loss=0.3272, pruned_loss=0.08689, over 8372.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2821, pruned_loss=0.05812, over 1613033.77 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:18:19,536 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:31,340 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 01:18:44,244 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 01:18:44,500 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4357, 2.3794, 2.9974, 2.4641, 3.0263, 2.4984, 2.4470, 1.9285], + device='cuda:2'), covar=tensor([0.5783, 0.5388, 0.2307, 0.4196, 0.2788, 0.3527, 0.1934, 0.6016], + device='cuda:2'), in_proj_covar=tensor([0.0964, 0.1031, 0.0836, 0.0999, 0.1025, 0.0936, 0.0772, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:18:49,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225982.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:53,157 INFO [train.py:901] (2/4) Epoch 28, batch 7750, loss[loss=0.2138, simple_loss=0.2956, pruned_loss=0.06603, over 8367.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2807, pruned_loss=0.05708, over 1612316.69 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:01,896 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:19:15,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.439e+02 2.815e+02 3.514e+02 7.333e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-09 01:19:29,668 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226036.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:19:30,737 INFO [train.py:901] (2/4) Epoch 28, batch 7800, loss[loss=0.2565, simple_loss=0.3301, pruned_loss=0.0915, over 8242.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.05685, over 1614978.10 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:45,517 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226059.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:20:05,554 INFO [train.py:901] (2/4) Epoch 28, batch 7850, loss[loss=0.2002, simple_loss=0.2858, pruned_loss=0.05733, over 8245.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05698, over 1608779.18 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:20:25,274 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.405e+02 3.030e+02 3.828e+02 1.208e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-09 01:20:39,675 INFO [train.py:901] (2/4) Epoch 28, batch 7900, loss[loss=0.217, simple_loss=0.304, pruned_loss=0.065, over 8450.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2805, pruned_loss=0.05693, over 1608149.97 frames. ], batch size: 27, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:02,887 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226172.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:13,378 INFO [train.py:901] (2/4) Epoch 28, batch 7950, loss[loss=0.1839, simple_loss=0.2483, pruned_loss=0.0598, over 7677.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2804, pruned_loss=0.05732, over 1607338.75 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:18,309 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226195.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:19,697 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226197.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:33,033 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.474e+02 2.920e+02 3.612e+02 7.690e+02, threshold=5.839e+02, percent-clipped=4.0 +2023-02-09 01:21:35,312 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:36,606 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0449, 1.3951, 1.6572, 1.3155, 0.8969, 1.4264, 1.6309, 1.5062], + device='cuda:2'), covar=tensor([0.0580, 0.1319, 0.1746, 0.1567, 0.0675, 0.1531, 0.0770, 0.0706], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0146], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:21:37,938 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:47,691 INFO [train.py:901] (2/4) Epoch 28, batch 8000, loss[loss=0.1965, simple_loss=0.2921, pruned_loss=0.05044, over 8337.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.05714, over 1613601.75 frames. ], batch size: 25, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:21:47,896 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226238.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:52,826 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5194, 1.5988, 4.7396, 1.8109, 4.1352, 3.9312, 4.2882, 4.1619], + device='cuda:2'), covar=tensor([0.0601, 0.4871, 0.0537, 0.4363, 0.1128, 0.1044, 0.0602, 0.0685], + device='cuda:2'), in_proj_covar=tensor([0.0688, 0.0668, 0.0738, 0.0664, 0.0753, 0.0641, 0.0648, 0.0721], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:21:57,012 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:59,704 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226255.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:00,950 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226257.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:05,094 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226263.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:17,276 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:22,560 INFO [train.py:901] (2/4) Epoch 28, batch 8050, loss[loss=0.1741, simple_loss=0.2547, pruned_loss=0.04672, over 7551.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2795, pruned_loss=0.05706, over 1599012.39 frames. ], batch size: 18, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:22:25,396 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226292.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:22:30,116 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.3583, 1.6217, 4.6187, 1.8371, 4.0655, 3.8899, 4.1854, 4.0557], + device='cuda:2'), covar=tensor([0.0681, 0.4464, 0.0547, 0.4190, 0.1144, 0.0978, 0.0586, 0.0719], + device='cuda:2'), in_proj_covar=tensor([0.0685, 0.0665, 0.0735, 0.0661, 0.0749, 0.0638, 0.0644, 0.0718], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:22:37,472 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-09 01:22:42,673 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.434e+02 3.095e+02 3.696e+02 6.520e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-09 01:22:42,888 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226317.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:22:57,909 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 01:23:01,703 INFO [train.py:901] (2/4) Epoch 29, batch 0, loss[loss=0.2081, simple_loss=0.2945, pruned_loss=0.0608, over 8577.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2945, pruned_loss=0.0608, over 8577.00 frames. ], batch size: 31, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:01,703 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 01:23:13,271 INFO [train.py:935] (2/4) Epoch 29, validation: loss=0.1705, simple_loss=0.2705, pruned_loss=0.03528, over 944034.00 frames. +2023-02-09 01:23:13,272 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 01:23:26,221 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226339.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:29,656 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 01:23:39,591 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-09 01:23:49,914 INFO [train.py:901] (2/4) Epoch 29, batch 50, loss[loss=0.2077, simple_loss=0.2976, pruned_loss=0.05894, over 8343.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2836, pruned_loss=0.0559, over 369403.14 frames. ], batch size: 25, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:50,828 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:53,662 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4468, 1.7442, 1.6877, 1.1210, 1.7523, 1.4578, 0.3220, 1.6395], + device='cuda:2'), covar=tensor([0.0542, 0.0433, 0.0366, 0.0557, 0.0501, 0.0938, 0.1000, 0.0321], + device='cuda:2'), in_proj_covar=tensor([0.0478, 0.0414, 0.0368, 0.0462, 0.0398, 0.0555, 0.0406, 0.0443], + device='cuda:2'), out_proj_covar=tensor([1.2657e-04, 1.0724e-04, 9.5925e-05, 1.2073e-04, 1.0402e-04, 1.5462e-04, + 1.0826e-04, 1.1606e-04], device='cuda:2') +2023-02-09 01:24:06,020 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 01:24:12,517 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226403.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:24:22,926 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.293e+02 2.936e+02 3.721e+02 6.222e+02, threshold=5.872e+02, percent-clipped=1.0 +2023-02-09 01:24:25,771 INFO [train.py:901] (2/4) Epoch 29, batch 100, loss[loss=0.1641, simple_loss=0.2391, pruned_loss=0.04452, over 7799.00 frames. ], tot_loss[loss=0.196, simple_loss=0.28, pruned_loss=0.05599, over 643795.97 frames. ], batch size: 19, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:24:30,604 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 01:24:33,770 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1790, 1.9889, 2.4592, 2.1453, 2.4405, 2.2875, 2.1389, 1.4472], + device='cuda:2'), covar=tensor([0.5886, 0.5097, 0.2276, 0.3901, 0.2614, 0.3238, 0.1934, 0.5408], + device='cuda:2'), in_proj_covar=tensor([0.0969, 0.1034, 0.0840, 0.1003, 0.1030, 0.0940, 0.0776, 0.0858], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:25:02,686 INFO [train.py:901] (2/4) Epoch 29, batch 150, loss[loss=0.2131, simple_loss=0.3018, pruned_loss=0.0622, over 8106.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.0569, over 857782.18 frames. ], batch size: 23, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:25:34,579 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.438e+02 2.916e+02 4.111e+02 7.524e+02, threshold=5.832e+02, percent-clipped=2.0 +2023-02-09 01:25:35,509 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226518.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:25:37,497 INFO [train.py:901] (2/4) Epoch 29, batch 200, loss[loss=0.1809, simple_loss=0.2678, pruned_loss=0.04704, over 8444.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2813, pruned_loss=0.057, over 1024342.56 frames. ], batch size: 27, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:12,528 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8383, 1.6968, 2.2928, 1.7209, 1.0727, 1.8245, 2.3151, 2.4818], + device='cuda:2'), covar=tensor([0.0488, 0.1217, 0.1541, 0.1393, 0.0610, 0.1404, 0.0588, 0.0543], + device='cuda:2'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:26:12,613 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5988, 2.5297, 3.2262, 2.5801, 3.1061, 2.7123, 2.6048, 2.1961], + device='cuda:2'), covar=tensor([0.5575, 0.5163, 0.2189, 0.4515, 0.2883, 0.3357, 0.1777, 0.5690], + device='cuda:2'), in_proj_covar=tensor([0.0965, 0.1030, 0.0837, 0.0999, 0.1025, 0.0935, 0.0773, 0.0852], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:26:13,673 INFO [train.py:901] (2/4) Epoch 29, batch 250, loss[loss=0.248, simple_loss=0.3163, pruned_loss=0.08984, over 6709.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2822, pruned_loss=0.05714, over 1153565.74 frames. ], batch size: 71, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:26,049 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 01:26:31,141 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:31,292 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:33,828 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 01:26:46,401 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.514e+02 3.003e+02 3.645e+02 8.891e+02, threshold=6.006e+02, percent-clipped=9.0 +2023-02-09 01:26:48,780 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:49,256 INFO [train.py:901] (2/4) Epoch 29, batch 300, loss[loss=0.1948, simple_loss=0.276, pruned_loss=0.05684, over 7804.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2834, pruned_loss=0.05787, over 1257046.00 frames. ], batch size: 20, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:54,305 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226628.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:12,768 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226653.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:25,798 INFO [train.py:901] (2/4) Epoch 29, batch 350, loss[loss=0.1858, simple_loss=0.2709, pruned_loss=0.05031, over 8122.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.283, pruned_loss=0.05757, over 1336590.78 frames. ], batch size: 22, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:27:45,007 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0676, 1.2591, 1.1968, 0.7047, 1.2123, 1.0239, 0.0840, 1.2436], + device='cuda:2'), covar=tensor([0.0512, 0.0449, 0.0427, 0.0698, 0.0527, 0.1151, 0.1034, 0.0408], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0418, 0.0372, 0.0466, 0.0401, 0.0559, 0.0408, 0.0447], + device='cuda:2'), out_proj_covar=tensor([1.2785e-04, 1.0831e-04, 9.7018e-05, 1.2160e-04, 1.0475e-04, 1.5584e-04, + 1.0894e-04, 1.1705e-04], device='cuda:2') +2023-02-09 01:27:46,741 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4838, 1.9207, 2.6519, 1.4085, 1.9370, 1.8551, 1.6475, 1.9488], + device='cuda:2'), covar=tensor([0.1981, 0.2499, 0.0861, 0.4686, 0.2000, 0.3358, 0.2450, 0.2273], + device='cuda:2'), in_proj_covar=tensor([0.0540, 0.0637, 0.0562, 0.0671, 0.0664, 0.0614, 0.0564, 0.0646], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:27:54,190 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226710.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:58,932 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.400e+02 2.862e+02 3.557e+02 6.632e+02, threshold=5.725e+02, percent-clipped=2.0 +2023-02-09 01:28:01,712 INFO [train.py:901] (2/4) Epoch 29, batch 400, loss[loss=0.1722, simple_loss=0.2493, pruned_loss=0.04759, over 7708.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2836, pruned_loss=0.05751, over 1398681.69 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:37,571 INFO [train.py:901] (2/4) Epoch 29, batch 450, loss[loss=0.1765, simple_loss=0.247, pruned_loss=0.05304, over 7706.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2819, pruned_loss=0.05643, over 1448358.11 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:38,565 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.07 vs. limit=5.0 +2023-02-09 01:28:39,871 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226774.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:28:53,563 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7529, 1.6833, 2.3417, 1.4782, 1.3593, 2.3020, 0.5301, 1.5120], + device='cuda:2'), covar=tensor([0.1481, 0.1149, 0.0305, 0.0973, 0.2342, 0.0352, 0.1769, 0.1171], + device='cuda:2'), in_proj_covar=tensor([0.0202, 0.0208, 0.0138, 0.0224, 0.0280, 0.0148, 0.0174, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 01:28:56,603 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.30 vs. limit=2.0 +2023-02-09 01:28:58,203 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226799.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:29:11,132 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.449e+02 2.964e+02 3.856e+02 9.700e+02, threshold=5.929e+02, percent-clipped=9.0 +2023-02-09 01:29:13,796 INFO [train.py:901] (2/4) Epoch 29, batch 500, loss[loss=0.1818, simple_loss=0.277, pruned_loss=0.0433, over 8658.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2813, pruned_loss=0.05668, over 1480664.78 frames. ], batch size: 39, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:48,217 INFO [train.py:901] (2/4) Epoch 29, batch 550, loss[loss=0.1579, simple_loss=0.2431, pruned_loss=0.03632, over 7658.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05708, over 1513196.46 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:51,498 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 01:30:21,915 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.486e+02 3.156e+02 4.092e+02 1.034e+03, threshold=6.313e+02, percent-clipped=6.0 +2023-02-09 01:30:24,633 INFO [train.py:901] (2/4) Epoch 29, batch 600, loss[loss=0.1993, simple_loss=0.2778, pruned_loss=0.06037, over 8095.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2825, pruned_loss=0.0579, over 1532245.38 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:30:28,546 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.94 vs. limit=5.0 +2023-02-09 01:30:35,119 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9305, 1.5239, 2.9655, 1.6117, 2.3327, 3.1585, 3.2830, 2.7599], + device='cuda:2'), covar=tensor([0.1018, 0.1581, 0.0330, 0.1815, 0.0819, 0.0290, 0.0692, 0.0520], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0325, 0.0327, 0.0278, 0.0444, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 01:30:38,693 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3980, 2.3602, 3.0018, 2.5400, 2.9241, 2.5719, 2.4015, 1.8250], + device='cuda:2'), covar=tensor([0.5700, 0.5274, 0.2400, 0.4214, 0.2771, 0.3210, 0.1859, 0.5897], + device='cuda:2'), in_proj_covar=tensor([0.0965, 0.1033, 0.0838, 0.0999, 0.1026, 0.0936, 0.0772, 0.0852], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:30:43,060 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 01:30:56,506 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226966.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:30:59,759 INFO [train.py:901] (2/4) Epoch 29, batch 650, loss[loss=0.1843, simple_loss=0.2663, pruned_loss=0.05113, over 8488.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2823, pruned_loss=0.05795, over 1551756.29 frames. ], batch size: 28, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:13,843 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:25,428 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227007.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:32,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.425e+02 2.942e+02 3.859e+02 6.314e+02, threshold=5.885e+02, percent-clipped=1.0 +2023-02-09 01:31:36,262 INFO [train.py:901] (2/4) Epoch 29, batch 700, loss[loss=0.1786, simple_loss=0.2657, pruned_loss=0.04575, over 7546.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05758, over 1563060.05 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:56,836 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8107, 2.0766, 1.6764, 2.8157, 1.3253, 1.5601, 2.0102, 2.1714], + device='cuda:2'), covar=tensor([0.0906, 0.0951, 0.1056, 0.0361, 0.1125, 0.1432, 0.0849, 0.0806], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0214, 0.0204, 0.0249, 0.0251, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 01:32:12,690 INFO [train.py:901] (2/4) Epoch 29, batch 750, loss[loss=0.2077, simple_loss=0.2839, pruned_loss=0.06577, over 8579.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2815, pruned_loss=0.05751, over 1571949.29 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:32:31,395 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 01:32:40,242 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 01:32:44,349 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.547e+02 3.055e+02 4.023e+02 1.198e+03, threshold=6.109e+02, percent-clipped=3.0 +2023-02-09 01:32:47,763 INFO [train.py:901] (2/4) Epoch 29, batch 800, loss[loss=0.1869, simple_loss=0.2828, pruned_loss=0.04544, over 8619.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.05688, over 1586508.96 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:33:24,591 INFO [train.py:901] (2/4) Epoch 29, batch 850, loss[loss=0.1505, simple_loss=0.2263, pruned_loss=0.03733, over 7701.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2828, pruned_loss=0.05787, over 1590846.07 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:33:57,033 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.463e+02 2.886e+02 3.949e+02 8.845e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 01:33:59,149 INFO [train.py:901] (2/4) Epoch 29, batch 900, loss[loss=0.1895, simple_loss=0.2783, pruned_loss=0.05035, over 8319.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2826, pruned_loss=0.0575, over 1599168.21 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:34:35,613 INFO [train.py:901] (2/4) Epoch 29, batch 950, loss[loss=0.2137, simple_loss=0.2781, pruned_loss=0.07467, over 7791.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05781, over 1602385.21 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:04,861 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 01:35:09,005 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.581e+02 3.033e+02 3.905e+02 1.035e+03, threshold=6.066e+02, percent-clipped=4.0 +2023-02-09 01:35:11,178 INFO [train.py:901] (2/4) Epoch 29, batch 1000, loss[loss=0.2197, simple_loss=0.3027, pruned_loss=0.06834, over 8508.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2819, pruned_loss=0.05702, over 1606468.65 frames. ], batch size: 28, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:32,311 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:35:39,828 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 01:35:47,252 INFO [train.py:901] (2/4) Epoch 29, batch 1050, loss[loss=0.2315, simple_loss=0.3262, pruned_loss=0.06842, over 8703.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2823, pruned_loss=0.05702, over 1612338.37 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:52,713 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 01:36:22,044 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.373e+02 3.020e+02 3.651e+02 1.051e+03, threshold=6.040e+02, percent-clipped=1.0 +2023-02-09 01:36:23,012 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8052, 1.6663, 2.4827, 1.6115, 1.3177, 2.3884, 0.5918, 1.5397], + device='cuda:2'), covar=tensor([0.1396, 0.1248, 0.0291, 0.1032, 0.2321, 0.0330, 0.1781, 0.1204], + device='cuda:2'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0224, 0.0278, 0.0147, 0.0173, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 01:36:24,319 INFO [train.py:901] (2/4) Epoch 29, batch 1100, loss[loss=0.1867, simple_loss=0.2753, pruned_loss=0.04911, over 8493.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2828, pruned_loss=0.05719, over 1613946.40 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:36:30,382 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227429.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:56,358 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227466.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:59,075 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8551, 1.4661, 1.6923, 1.3372, 0.9679, 1.4605, 1.5948, 1.6368], + device='cuda:2'), covar=tensor([0.0566, 0.1243, 0.1713, 0.1548, 0.0605, 0.1510, 0.0765, 0.0675], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:36:59,552 INFO [train.py:901] (2/4) Epoch 29, batch 1150, loss[loss=0.2169, simple_loss=0.2959, pruned_loss=0.06896, over 7811.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2825, pruned_loss=0.05701, over 1614208.34 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:06,449 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 01:37:34,264 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.320e+02 2.698e+02 3.625e+02 7.425e+02, threshold=5.396e+02, percent-clipped=3.0 +2023-02-09 01:37:36,419 INFO [train.py:901] (2/4) Epoch 29, batch 1200, loss[loss=0.1718, simple_loss=0.2635, pruned_loss=0.04007, over 8129.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2822, pruned_loss=0.05685, over 1611927.87 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:53,823 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9370, 2.0329, 1.7212, 2.4675, 1.1819, 1.5645, 1.7407, 2.0691], + device='cuda:2'), covar=tensor([0.0713, 0.0760, 0.0875, 0.0439, 0.1063, 0.1323, 0.0836, 0.0742], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0195, 0.0244, 0.0213, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 01:38:09,052 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0038, 1.5544, 1.7780, 1.4315, 1.0973, 1.5504, 1.8409, 1.6201], + device='cuda:2'), covar=tensor([0.0549, 0.1289, 0.1674, 0.1467, 0.0591, 0.1492, 0.0706, 0.0661], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:38:11,530 INFO [train.py:901] (2/4) Epoch 29, batch 1250, loss[loss=0.2271, simple_loss=0.3059, pruned_loss=0.07415, over 8452.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2814, pruned_loss=0.05625, over 1613721.59 frames. ], batch size: 27, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:38:13,776 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1345, 1.6755, 1.5109, 1.6715, 1.4666, 1.4176, 1.4295, 1.3710], + device='cuda:2'), covar=tensor([0.1216, 0.0549, 0.1322, 0.0547, 0.0762, 0.1573, 0.0902, 0.0868], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0246, 0.0344, 0.0314, 0.0303, 0.0349, 0.0351, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:38:19,535 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5590, 2.0375, 3.1237, 1.4832, 2.3062, 2.0677, 1.6943, 2.3050], + device='cuda:2'), covar=tensor([0.1931, 0.2742, 0.0815, 0.4790, 0.2019, 0.3305, 0.2511, 0.2353], + device='cuda:2'), in_proj_covar=tensor([0.0539, 0.0639, 0.0564, 0.0672, 0.0667, 0.0617, 0.0565, 0.0648], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:38:46,455 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.398e+02 2.828e+02 3.393e+02 7.704e+02, threshold=5.657e+02, percent-clipped=4.0 +2023-02-09 01:38:48,671 INFO [train.py:901] (2/4) Epoch 29, batch 1300, loss[loss=0.1994, simple_loss=0.2919, pruned_loss=0.05346, over 8490.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05593, over 1610031.25 frames. ], batch size: 28, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:24,554 INFO [train.py:901] (2/4) Epoch 29, batch 1350, loss[loss=0.2008, simple_loss=0.2847, pruned_loss=0.05844, over 8376.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.282, pruned_loss=0.05655, over 1614434.47 frames. ], batch size: 49, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:52,816 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227711.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:39:58,210 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.335e+02 2.785e+02 3.650e+02 1.055e+03, threshold=5.570e+02, percent-clipped=4.0 +2023-02-09 01:40:00,377 INFO [train.py:901] (2/4) Epoch 29, batch 1400, loss[loss=0.2189, simple_loss=0.3063, pruned_loss=0.06574, over 8537.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2818, pruned_loss=0.05635, over 1614935.14 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:01,308 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=227722.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:20,601 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=227747.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:38,114 INFO [train.py:901] (2/4) Epoch 29, batch 1450, loss[loss=0.2224, simple_loss=0.2987, pruned_loss=0.07307, over 8498.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2827, pruned_loss=0.05676, over 1617694.96 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:39,492 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227773.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:48,019 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 01:41:11,911 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.369e+02 2.795e+02 3.434e+02 1.018e+03, threshold=5.589e+02, percent-clipped=3.0 +2023-02-09 01:41:14,105 INFO [train.py:901] (2/4) Epoch 29, batch 1500, loss[loss=0.1942, simple_loss=0.2854, pruned_loss=0.05155, over 8232.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2821, pruned_loss=0.05681, over 1612947.05 frames. ], batch size: 24, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:41:32,538 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:41:40,143 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2795, 3.1885, 2.9193, 1.6092, 2.8443, 2.9523, 2.9300, 2.8431], + device='cuda:2'), covar=tensor([0.1060, 0.0778, 0.1431, 0.4406, 0.1225, 0.1212, 0.1441, 0.1062], + device='cuda:2'), in_proj_covar=tensor([0.0545, 0.0458, 0.0456, 0.0563, 0.0446, 0.0470, 0.0442, 0.0414], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:41:51,485 INFO [train.py:901] (2/4) Epoch 29, batch 1550, loss[loss=0.176, simple_loss=0.2599, pruned_loss=0.04606, over 7791.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2816, pruned_loss=0.0572, over 1610958.32 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:04,415 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227888.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:42:25,548 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.452e+02 3.275e+02 4.864e+02 1.208e+03, threshold=6.551e+02, percent-clipped=17.0 +2023-02-09 01:42:27,699 INFO [train.py:901] (2/4) Epoch 29, batch 1600, loss[loss=0.1886, simple_loss=0.2759, pruned_loss=0.0507, over 7982.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05696, over 1612483.49 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:27,873 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2754, 2.6483, 1.9813, 3.9833, 1.5847, 1.8566, 2.5088, 2.6938], + device='cuda:2'), covar=tensor([0.0885, 0.0942, 0.1119, 0.0293, 0.1212, 0.1520, 0.0897, 0.0962], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0213, 0.0204, 0.0247, 0.0251, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 01:43:04,956 INFO [train.py:901] (2/4) Epoch 29, batch 1650, loss[loss=0.1866, simple_loss=0.2751, pruned_loss=0.04902, over 8024.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2824, pruned_loss=0.05721, over 1620933.42 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:39,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.401e+02 2.687e+02 3.396e+02 6.045e+02, threshold=5.374e+02, percent-clipped=0.0 +2023-02-09 01:43:42,082 INFO [train.py:901] (2/4) Epoch 29, batch 1700, loss[loss=0.1767, simple_loss=0.2688, pruned_loss=0.04232, over 8517.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2827, pruned_loss=0.05808, over 1617123.43 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:42,948 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8662, 3.8399, 3.4979, 1.9440, 3.4237, 3.5393, 3.4999, 3.3674], + device='cuda:2'), covar=tensor([0.0842, 0.0602, 0.1093, 0.4213, 0.0954, 0.1066, 0.1290, 0.0979], + device='cuda:2'), in_proj_covar=tensor([0.0548, 0.0461, 0.0457, 0.0567, 0.0447, 0.0472, 0.0445, 0.0415], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:43:44,493 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8079, 1.3989, 3.2845, 1.6207, 2.3208, 3.5427, 3.7358, 3.1118], + device='cuda:2'), covar=tensor([0.1273, 0.1929, 0.0339, 0.1990, 0.1069, 0.0268, 0.0551, 0.0514], + device='cuda:2'), in_proj_covar=tensor([0.0312, 0.0329, 0.0298, 0.0327, 0.0329, 0.0280, 0.0450, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 01:43:45,195 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228025.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:06,275 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:18,096 INFO [train.py:901] (2/4) Epoch 29, batch 1750, loss[loss=0.1522, simple_loss=0.2264, pruned_loss=0.03898, over 7233.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2834, pruned_loss=0.05849, over 1618719.38 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:44:28,128 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2610, 1.9963, 2.4460, 2.1487, 2.4748, 2.3511, 2.2537, 1.4152], + device='cuda:2'), covar=tensor([0.6336, 0.5331, 0.2383, 0.3823, 0.2612, 0.3505, 0.2015, 0.5819], + device='cuda:2'), in_proj_covar=tensor([0.0974, 0.1038, 0.0842, 0.1005, 0.1030, 0.0942, 0.0776, 0.0857], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 01:44:38,632 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:38,668 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8041, 1.7655, 2.0789, 1.6708, 1.1069, 1.7268, 2.2710, 2.0803], + device='cuda:2'), covar=tensor([0.0459, 0.1244, 0.1580, 0.1373, 0.0578, 0.1409, 0.0599, 0.0606], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:44:52,744 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.425e+02 2.925e+02 3.463e+02 6.679e+02, threshold=5.849e+02, percent-clipped=2.0 +2023-02-09 01:44:55,484 INFO [train.py:901] (2/4) Epoch 29, batch 1800, loss[loss=0.1931, simple_loss=0.2878, pruned_loss=0.04919, over 8502.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05811, over 1614939.02 frames. ], batch size: 28, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:11,245 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:12,123 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.83 vs. limit=2.0 +2023-02-09 01:45:28,612 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228169.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,270 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228170.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,719 INFO [train.py:901] (2/4) Epoch 29, batch 1850, loss[loss=0.1849, simple_loss=0.2694, pruned_loss=0.05017, over 8252.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2809, pruned_loss=0.05718, over 1614212.37 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:42,839 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228189.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:46:03,761 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.509e+02 2.932e+02 3.424e+02 5.958e+02, threshold=5.864e+02, percent-clipped=1.0 +2023-02-09 01:46:05,868 INFO [train.py:901] (2/4) Epoch 29, batch 1900, loss[loss=0.1925, simple_loss=0.2779, pruned_loss=0.05356, over 7652.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05716, over 1617874.51 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:29,136 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.37 vs. limit=5.0 +2023-02-09 01:46:32,893 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 01:46:38,539 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 01:46:41,416 INFO [train.py:901] (2/4) Epoch 29, batch 1950, loss[loss=0.18, simple_loss=0.2621, pruned_loss=0.04896, over 8314.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2794, pruned_loss=0.05661, over 1614395.53 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:50,986 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 01:47:05,322 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228304.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:10,140 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 01:47:16,244 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.422e+02 3.056e+02 3.762e+02 8.552e+02, threshold=6.111e+02, percent-clipped=4.0 +2023-02-09 01:47:18,252 INFO [train.py:901] (2/4) Epoch 29, batch 2000, loss[loss=0.1874, simple_loss=0.2772, pruned_loss=0.0488, over 8103.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2808, pruned_loss=0.05708, over 1619098.31 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:47:36,284 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228347.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:39,106 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:44,567 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7555, 1.9827, 2.1034, 1.4228, 2.2758, 1.5751, 0.7704, 1.9871], + device='cuda:2'), covar=tensor([0.0736, 0.0445, 0.0337, 0.0694, 0.0440, 0.0938, 0.1065, 0.0405], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0414, 0.0369, 0.0460, 0.0397, 0.0552, 0.0403, 0.0443], + device='cuda:2'), out_proj_covar=tensor([1.2611e-04, 1.0730e-04, 9.6024e-05, 1.2017e-04, 1.0384e-04, 1.5379e-04, + 1.0767e-04, 1.1589e-04], device='cuda:2') +2023-02-09 01:47:52,203 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228369.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:53,545 INFO [train.py:901] (2/4) Epoch 29, batch 2050, loss[loss=0.2009, simple_loss=0.2829, pruned_loss=0.0594, over 8541.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2818, pruned_loss=0.05739, over 1619454.75 frames. ], batch size: 39, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:48:25,962 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.333e+02 2.866e+02 3.600e+02 5.490e+02, threshold=5.733e+02, percent-clipped=0.0 +2023-02-09 01:48:28,119 INFO [train.py:901] (2/4) Epoch 29, batch 2100, loss[loss=0.1971, simple_loss=0.2954, pruned_loss=0.04941, over 8472.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2821, pruned_loss=0.05703, over 1617634.97 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:48:32,346 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:44,591 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228442.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:48,016 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228447.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:48:50,742 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228451.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:59,114 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5033, 1.9898, 3.0507, 1.4381, 2.3553, 1.9448, 1.6706, 2.4112], + device='cuda:2'), covar=tensor([0.2221, 0.2913, 0.1091, 0.5151, 0.2113, 0.3664, 0.2762, 0.2583], + device='cuda:2'), in_proj_covar=tensor([0.0543, 0.0642, 0.0568, 0.0675, 0.0667, 0.0617, 0.0566, 0.0650], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:49:04,307 INFO [train.py:901] (2/4) Epoch 29, batch 2150, loss[loss=0.1741, simple_loss=0.2557, pruned_loss=0.04629, over 7914.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05698, over 1615392.48 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:05,161 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1460, 1.3109, 4.3350, 1.5743, 3.8431, 3.6451, 3.9125, 3.7922], + device='cuda:2'), covar=tensor([0.0716, 0.5074, 0.0657, 0.4524, 0.1204, 0.1019, 0.0694, 0.0779], + device='cuda:2'), in_proj_covar=tensor([0.0686, 0.0669, 0.0744, 0.0664, 0.0750, 0.0639, 0.0647, 0.0721], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:49:14,150 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:49:37,698 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.659e+02 3.270e+02 3.951e+02 1.171e+03, threshold=6.540e+02, percent-clipped=10.0 +2023-02-09 01:49:39,911 INFO [train.py:901] (2/4) Epoch 29, batch 2200, loss[loss=0.2313, simple_loss=0.3143, pruned_loss=0.07409, over 8503.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2813, pruned_loss=0.05751, over 1610286.55 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:47,022 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6773, 1.4042, 4.9122, 1.7918, 4.3140, 4.0788, 4.4351, 4.3103], + device='cuda:2'), covar=tensor([0.0622, 0.4698, 0.0474, 0.4199, 0.1027, 0.0901, 0.0512, 0.0632], + device='cuda:2'), in_proj_covar=tensor([0.0684, 0.0666, 0.0742, 0.0662, 0.0747, 0.0637, 0.0644, 0.0719], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:50:06,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228557.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:08,439 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228560.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:16,508 INFO [train.py:901] (2/4) Epoch 29, batch 2250, loss[loss=0.2016, simple_loss=0.2865, pruned_loss=0.05842, over 8354.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2811, pruned_loss=0.05729, over 1607303.59 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:26,599 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228585.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:44,246 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7843, 1.7199, 4.2000, 1.5637, 2.5835, 4.6449, 5.1171, 3.6350], + device='cuda:2'), covar=tensor([0.2007, 0.2433, 0.0420, 0.2882, 0.1436, 0.0360, 0.0459, 0.1020], + device='cuda:2'), in_proj_covar=tensor([0.0311, 0.0327, 0.0297, 0.0325, 0.0329, 0.0279, 0.0447, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 01:50:50,350 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.350e+02 2.877e+02 3.583e+02 6.549e+02, threshold=5.755e+02, percent-clipped=1.0 +2023-02-09 01:50:52,578 INFO [train.py:901] (2/4) Epoch 29, batch 2300, loss[loss=0.174, simple_loss=0.2546, pruned_loss=0.04671, over 7242.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05713, over 1608227.19 frames. ], batch size: 16, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:53,463 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4665, 1.3946, 1.8049, 1.1952, 1.1198, 1.7806, 0.2927, 1.1643], + device='cuda:2'), covar=tensor([0.1581, 0.1183, 0.0380, 0.0825, 0.2317, 0.0506, 0.1801, 0.1107], + device='cuda:2'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0223, 0.0278, 0.0149, 0.0173, 0.0199], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 01:51:06,293 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.73 vs. limit=2.0 +2023-02-09 01:51:22,795 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8420, 1.5850, 4.0234, 1.4677, 3.5753, 3.3958, 3.6417, 3.5036], + device='cuda:2'), covar=tensor([0.0699, 0.4130, 0.0648, 0.4094, 0.1198, 0.0981, 0.0638, 0.0800], + device='cuda:2'), in_proj_covar=tensor([0.0686, 0.0669, 0.0746, 0.0666, 0.0751, 0.0640, 0.0647, 0.0722], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:51:28,772 INFO [train.py:901] (2/4) Epoch 29, batch 2350, loss[loss=0.2061, simple_loss=0.2984, pruned_loss=0.05694, over 8505.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2816, pruned_loss=0.0572, over 1607532.61 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:51:43,128 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:51:45,885 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228695.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:02,330 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.434e+02 3.194e+02 3.873e+02 7.294e+02, threshold=6.388e+02, percent-clipped=4.0 +2023-02-09 01:52:04,276 INFO [train.py:901] (2/4) Epoch 29, batch 2400, loss[loss=0.1829, simple_loss=0.2663, pruned_loss=0.04975, over 8083.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.05636, over 1610654.72 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:17,432 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228740.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:34,956 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228765.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:39,604 INFO [train.py:901] (2/4) Epoch 29, batch 2450, loss[loss=0.2019, simple_loss=0.295, pruned_loss=0.0544, over 8517.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05633, over 1615150.22 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:55,038 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228791.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:53:05,521 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228806.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:08,255 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:10,297 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228813.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:13,499 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.278e+02 2.630e+02 3.527e+02 5.802e+02, threshold=5.259e+02, percent-clipped=0.0 +2023-02-09 01:53:16,189 INFO [train.py:901] (2/4) Epoch 29, batch 2500, loss[loss=0.2005, simple_loss=0.2961, pruned_loss=0.05244, over 8141.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2816, pruned_loss=0.0569, over 1617033.68 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:53:28,187 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:49,890 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 01:53:51,003 INFO [train.py:901] (2/4) Epoch 29, batch 2550, loss[loss=0.1791, simple_loss=0.2568, pruned_loss=0.05066, over 7544.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2825, pruned_loss=0.05736, over 1618333.75 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:17,016 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:54:25,696 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.320e+02 2.761e+02 3.420e+02 6.403e+02, threshold=5.523e+02, percent-clipped=2.0 +2023-02-09 01:54:27,394 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0366, 2.3079, 3.1119, 1.9601, 2.6496, 2.3668, 2.1103, 2.6689], + device='cuda:2'), covar=tensor([0.1556, 0.2120, 0.0762, 0.3368, 0.1414, 0.2408, 0.1859, 0.1807], + device='cuda:2'), in_proj_covar=tensor([0.0546, 0.0644, 0.0569, 0.0679, 0.0670, 0.0619, 0.0568, 0.0652], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:54:27,832 INFO [train.py:901] (2/4) Epoch 29, batch 2600, loss[loss=0.2629, simple_loss=0.3266, pruned_loss=0.09961, over 6947.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05738, over 1612206.72 frames. ], batch size: 71, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:51,312 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:55:03,867 INFO [train.py:901] (2/4) Epoch 29, batch 2650, loss[loss=0.1697, simple_loss=0.245, pruned_loss=0.04722, over 7433.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05683, over 1611252.85 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:55:27,203 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2578, 1.4688, 3.4199, 1.0185, 3.0039, 2.8614, 3.1219, 3.0279], + device='cuda:2'), covar=tensor([0.0767, 0.3820, 0.0780, 0.4308, 0.1308, 0.1051, 0.0697, 0.0841], + device='cuda:2'), in_proj_covar=tensor([0.0681, 0.0663, 0.0741, 0.0662, 0.0743, 0.0635, 0.0640, 0.0715], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 01:55:37,352 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.477e+02 2.922e+02 3.574e+02 8.790e+02, threshold=5.845e+02, percent-clipped=2.0 +2023-02-09 01:55:39,858 INFO [train.py:901] (2/4) Epoch 29, batch 2700, loss[loss=0.2078, simple_loss=0.2886, pruned_loss=0.06347, over 8021.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2818, pruned_loss=0.05685, over 1615349.92 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:11,094 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229062.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:14,607 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229066.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:17,757 INFO [train.py:901] (2/4) Epoch 29, batch 2750, loss[loss=0.1787, simple_loss=0.2466, pruned_loss=0.05537, over 7535.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2812, pruned_loss=0.05663, over 1611531.66 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:29,426 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229087.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:32,344 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229091.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:51,298 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.406e+02 2.857e+02 3.824e+02 7.570e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-09 01:56:53,439 INFO [train.py:901] (2/4) Epoch 29, batch 2800, loss[loss=0.212, simple_loss=0.301, pruned_loss=0.06155, over 8493.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05716, over 1615015.76 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:54,325 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0903, 1.5717, 3.5218, 1.6208, 2.4717, 3.9158, 3.9510, 3.3583], + device='cuda:2'), covar=tensor([0.1168, 0.1876, 0.0340, 0.2139, 0.1065, 0.0224, 0.0603, 0.0538], + device='cuda:2'), in_proj_covar=tensor([0.0313, 0.0329, 0.0298, 0.0326, 0.0331, 0.0281, 0.0450, 0.0313], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:57:24,941 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229162.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:57:31,020 INFO [train.py:901] (2/4) Epoch 29, batch 2850, loss[loss=0.1882, simple_loss=0.2636, pruned_loss=0.05645, over 7453.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05717, over 1613845.88 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:57:43,221 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229187.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:58:03,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5060, 2.1772, 3.4283, 2.1957, 2.9546, 3.8857, 3.8732, 3.5317], + device='cuda:2'), covar=tensor([0.0962, 0.1567, 0.0583, 0.1753, 0.1611, 0.0214, 0.0718, 0.0441], + device='cuda:2'), in_proj_covar=tensor([0.0314, 0.0330, 0.0299, 0.0327, 0.0331, 0.0282, 0.0451, 0.0314], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 01:58:05,219 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.443e+02 3.095e+02 3.828e+02 9.615e+02, threshold=6.189e+02, percent-clipped=4.0 +2023-02-09 01:58:07,399 INFO [train.py:901] (2/4) Epoch 29, batch 2900, loss[loss=0.1735, simple_loss=0.2581, pruned_loss=0.04445, over 7460.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.05658, over 1611215.76 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:21,866 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-09 01:58:23,777 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6554, 1.4358, 1.5891, 1.4013, 0.9586, 1.4256, 1.5175, 1.5665], + device='cuda:2'), covar=tensor([0.0632, 0.1275, 0.1735, 0.1454, 0.0625, 0.1514, 0.0769, 0.0641], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0103, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 01:58:44,220 INFO [train.py:901] (2/4) Epoch 29, batch 2950, loss[loss=0.2007, simple_loss=0.2921, pruned_loss=0.05462, over 8557.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2807, pruned_loss=0.05667, over 1612630.45 frames. ], batch size: 31, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:47,089 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 01:59:02,249 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229297.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:17,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.481e+02 3.005e+02 3.741e+02 9.617e+02, threshold=6.010e+02, percent-clipped=3.0 +2023-02-09 01:59:17,404 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229318.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:19,291 INFO [train.py:901] (2/4) Epoch 29, batch 3000, loss[loss=0.1752, simple_loss=0.2702, pruned_loss=0.0401, over 8329.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05676, over 1610692.22 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:59:19,291 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 01:59:34,612 INFO [train.py:935] (2/4) Epoch 29, validation: loss=0.17, simple_loss=0.2699, pruned_loss=0.03504, over 944034.00 frames. +2023-02-09 01:59:34,613 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 02:00:09,410 INFO [train.py:901] (2/4) Epoch 29, batch 3050, loss[loss=0.1545, simple_loss=0.2331, pruned_loss=0.0379, over 7429.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05705, over 1609378.59 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:28,259 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.6088, 4.6313, 4.1370, 2.1592, 4.0827, 4.1978, 4.0871, 4.1293], + device='cuda:2'), covar=tensor([0.0655, 0.0442, 0.0865, 0.4539, 0.0906, 0.1007, 0.1255, 0.0730], + device='cuda:2'), in_proj_covar=tensor([0.0551, 0.0465, 0.0458, 0.0567, 0.0446, 0.0471, 0.0446, 0.0416], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:00:40,616 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:00:41,980 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7599, 1.8162, 1.6789, 2.3371, 1.0521, 1.5613, 1.8260, 1.8770], + device='cuda:2'), covar=tensor([0.0814, 0.0799, 0.0889, 0.0409, 0.1063, 0.1298, 0.0700, 0.0783], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0213, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:00:44,471 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.505e+02 2.815e+02 3.570e+02 7.212e+02, threshold=5.630e+02, percent-clipped=4.0 +2023-02-09 02:00:46,515 INFO [train.py:901] (2/4) Epoch 29, batch 3100, loss[loss=0.1887, simple_loss=0.2788, pruned_loss=0.04932, over 8250.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2801, pruned_loss=0.05705, over 1607523.40 frames. ], batch size: 24, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:59,213 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8632, 2.1923, 3.6776, 1.7957, 1.8932, 3.6480, 0.6665, 2.1527], + device='cuda:2'), covar=tensor([0.1105, 0.1201, 0.0260, 0.1619, 0.2052, 0.0296, 0.2095, 0.1357], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0208, 0.0138, 0.0224, 0.0280, 0.0149, 0.0173, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 02:01:15,192 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:21,900 INFO [train.py:901] (2/4) Epoch 29, batch 3150, loss[loss=0.1535, simple_loss=0.2372, pruned_loss=0.03491, over 7908.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2807, pruned_loss=0.05714, over 1610512.33 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:39,329 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:56,471 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.412e+02 3.144e+02 3.923e+02 1.015e+03, threshold=6.289e+02, percent-clipped=11.0 +2023-02-09 02:01:58,573 INFO [train.py:901] (2/4) Epoch 29, batch 3200, loss[loss=0.1995, simple_loss=0.2961, pruned_loss=0.05146, over 8199.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2811, pruned_loss=0.05723, over 1607694.48 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:58,993 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.49 vs. limit=2.0 +2023-02-09 02:02:02,523 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.71 vs. limit=2.0 +2023-02-09 02:02:14,562 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.15 vs. limit=5.0 +2023-02-09 02:02:29,972 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7233, 2.5364, 1.8697, 2.3021, 2.2871, 1.5972, 2.2227, 2.1471], + device='cuda:2'), covar=tensor([0.1295, 0.0403, 0.1152, 0.0601, 0.0668, 0.1511, 0.0844, 0.0885], + device='cuda:2'), in_proj_covar=tensor([0.0353, 0.0243, 0.0342, 0.0314, 0.0300, 0.0349, 0.0349, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 02:02:35,461 INFO [train.py:901] (2/4) Epoch 29, batch 3250, loss[loss=0.1814, simple_loss=0.2663, pruned_loss=0.04827, over 7809.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05779, over 1607764.70 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:52,624 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:09,575 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.257e+02 2.746e+02 3.374e+02 9.131e+02, threshold=5.492e+02, percent-clipped=1.0 +2023-02-09 02:03:11,692 INFO [train.py:901] (2/4) Epoch 29, batch 3300, loss[loss=0.2623, simple_loss=0.3411, pruned_loss=0.09172, over 8451.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2817, pruned_loss=0.05664, over 1614138.71 frames. ], batch size: 27, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:03:41,137 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:45,538 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229668.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:47,461 INFO [train.py:901] (2/4) Epoch 29, batch 3350, loss[loss=0.1681, simple_loss=0.248, pruned_loss=0.04412, over 7800.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05666, over 1614147.86 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:03:52,168 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229677.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:04:03,324 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:04:20,884 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.561e+02 2.994e+02 3.779e+02 7.703e+02, threshold=5.989e+02, percent-clipped=7.0 +2023-02-09 02:04:22,316 INFO [train.py:901] (2/4) Epoch 29, batch 3400, loss[loss=0.2086, simple_loss=0.2951, pruned_loss=0.06101, over 8195.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2812, pruned_loss=0.05697, over 1609990.78 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:04:25,972 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1294, 2.1892, 1.7322, 2.8945, 1.3909, 1.6653, 2.0602, 2.2079], + device='cuda:2'), covar=tensor([0.0646, 0.0737, 0.0939, 0.0323, 0.1084, 0.1281, 0.0856, 0.0815], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0195, 0.0245, 0.0213, 0.0203, 0.0246, 0.0251, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:04:59,068 INFO [train.py:901] (2/4) Epoch 29, batch 3450, loss[loss=0.1692, simple_loss=0.2531, pruned_loss=0.04262, over 7544.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2801, pruned_loss=0.05649, over 1608864.43 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:03,516 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229777.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:11,078 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5172, 1.8669, 1.8734, 1.2497, 1.9227, 1.4981, 0.4601, 1.8191], + device='cuda:2'), covar=tensor([0.0652, 0.0376, 0.0376, 0.0627, 0.0478, 0.1071, 0.1100, 0.0326], + device='cuda:2'), in_proj_covar=tensor([0.0484, 0.0416, 0.0375, 0.0468, 0.0403, 0.0559, 0.0411, 0.0450], + device='cuda:2'), out_proj_covar=tensor([1.2816e-04, 1.0766e-04, 9.7610e-05, 1.2232e-04, 1.0553e-04, 1.5573e-04, + 1.0969e-04, 1.1767e-04], device='cuda:2') +2023-02-09 02:05:23,715 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:33,258 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.314e+02 2.735e+02 3.470e+02 1.051e+03, threshold=5.470e+02, percent-clipped=3.0 +2023-02-09 02:05:34,611 INFO [train.py:901] (2/4) Epoch 29, batch 3500, loss[loss=0.2224, simple_loss=0.3051, pruned_loss=0.0698, over 8556.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.0569, over 1616614.93 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:46,947 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229839.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:49,139 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.2701, 3.1797, 2.9638, 1.6149, 2.9037, 2.9811, 2.8194, 2.8706], + device='cuda:2'), covar=tensor([0.1117, 0.0825, 0.1325, 0.4602, 0.1149, 0.1130, 0.1738, 0.1002], + device='cuda:2'), in_proj_covar=tensor([0.0547, 0.0462, 0.0455, 0.0567, 0.0446, 0.0470, 0.0445, 0.0413], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:05:58,499 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 02:06:12,025 INFO [train.py:901] (2/4) Epoch 29, batch 3550, loss[loss=0.2044, simple_loss=0.2885, pruned_loss=0.06015, over 8089.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05724, over 1613159.59 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:06:15,083 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4334, 1.4293, 1.3917, 1.8533, 0.8119, 1.2738, 1.4452, 1.4776], + device='cuda:2'), covar=tensor([0.0867, 0.0774, 0.0984, 0.0420, 0.1058, 0.1402, 0.0659, 0.0780], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0213, 0.0203, 0.0247, 0.0251, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:06:41,728 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229912.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:46,512 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.466e+02 3.015e+02 3.666e+02 8.686e+02, threshold=6.030e+02, percent-clipped=2.0 +2023-02-09 02:06:47,430 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229920.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:47,947 INFO [train.py:901] (2/4) Epoch 29, batch 3600, loss[loss=0.16, simple_loss=0.2451, pruned_loss=0.03745, over 7805.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2819, pruned_loss=0.05726, over 1616147.14 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:07:00,995 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:01,976 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 02:07:05,361 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9767, 2.0083, 1.7726, 2.6390, 1.2899, 1.7042, 2.0107, 2.0755], + device='cuda:2'), covar=tensor([0.0691, 0.0751, 0.0817, 0.0363, 0.1061, 0.1213, 0.0707, 0.0773], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0196, 0.0244, 0.0213, 0.0203, 0.0246, 0.0251, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:07:11,758 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229954.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:24,209 INFO [train.py:901] (2/4) Epoch 29, batch 3650, loss[loss=0.2126, simple_loss=0.2869, pruned_loss=0.06911, over 8846.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2797, pruned_loss=0.05632, over 1612782.54 frames. ], batch size: 35, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:07:37,192 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 02:07:38,963 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1825, 2.2792, 1.9212, 2.9871, 1.3796, 1.6995, 2.1718, 2.3070], + device='cuda:2'), covar=tensor([0.0692, 0.0791, 0.0875, 0.0334, 0.1147, 0.1363, 0.0867, 0.0830], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0212, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:07:50,393 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:56,887 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 02:08:00,955 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.398e+02 2.862e+02 3.372e+02 7.881e+02, threshold=5.724e+02, percent-clipped=3.0 +2023-02-09 02:08:01,702 INFO [train.py:901] (2/4) Epoch 29, batch 3700, loss[loss=0.1747, simple_loss=0.2618, pruned_loss=0.04384, over 8095.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2798, pruned_loss=0.05589, over 1611766.61 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:08:02,483 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230021.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:08:06,530 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:08:07,555 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.21 vs. limit=2.0 +2023-02-09 02:08:10,872 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230033.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:25,156 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230054.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:28,463 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230058.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:37,283 INFO [train.py:901] (2/4) Epoch 29, batch 3750, loss[loss=0.1972, simple_loss=0.2928, pruned_loss=0.05085, over 8454.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2816, pruned_loss=0.05666, over 1615967.73 frames. ], batch size: 27, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:13,042 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.469e+02 3.110e+02 3.966e+02 1.066e+03, threshold=6.219e+02, percent-clipped=4.0 +2023-02-09 02:09:13,774 INFO [train.py:901] (2/4) Epoch 29, batch 3800, loss[loss=0.1633, simple_loss=0.235, pruned_loss=0.04575, over 7710.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2821, pruned_loss=0.0571, over 1613398.87 frames. ], batch size: 18, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:24,880 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230136.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:09:47,382 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230168.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:09:49,388 INFO [train.py:901] (2/4) Epoch 29, batch 3850, loss[loss=0.2093, simple_loss=0.2901, pruned_loss=0.0643, over 8134.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05747, over 1612507.23 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:53,095 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230176.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:09:56,411 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7925, 1.9852, 2.0710, 1.4215, 2.2147, 1.5555, 0.6898, 1.9832], + device='cuda:2'), covar=tensor([0.0786, 0.0403, 0.0386, 0.0722, 0.0601, 0.0993, 0.1061, 0.0355], + device='cuda:2'), in_proj_covar=tensor([0.0475, 0.0412, 0.0369, 0.0462, 0.0397, 0.0552, 0.0404, 0.0443], + device='cuda:2'), out_proj_covar=tensor([1.2572e-04, 1.0659e-04, 9.6022e-05, 1.2081e-04, 1.0388e-04, 1.5382e-04, + 1.0775e-04, 1.1608e-04], device='cuda:2') +2023-02-09 02:10:10,986 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:12,900 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 02:10:17,560 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230210.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:24,395 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.511e+02 3.297e+02 3.973e+02 1.066e+03, threshold=6.594e+02, percent-clipped=6.0 +2023-02-09 02:10:25,130 INFO [train.py:901] (2/4) Epoch 29, batch 3900, loss[loss=0.2242, simple_loss=0.2915, pruned_loss=0.07844, over 8234.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05725, over 1610402.19 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:10:36,241 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:51,859 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:54,117 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:02,669 INFO [train.py:901] (2/4) Epoch 29, batch 3950, loss[loss=0.2095, simple_loss=0.2983, pruned_loss=0.06035, over 8363.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2827, pruned_loss=0.05751, over 1607942.61 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:11:30,499 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230310.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:37,958 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.564e+02 3.192e+02 4.107e+02 9.729e+02, threshold=6.384e+02, percent-clipped=2.0 +2023-02-09 02:11:38,719 INFO [train.py:901] (2/4) Epoch 29, batch 4000, loss[loss=0.2262, simple_loss=0.3, pruned_loss=0.07617, over 8498.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2819, pruned_loss=0.05749, over 1608923.09 frames. ], batch size: 28, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:11:50,444 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230335.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:59,727 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230349.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:16,073 INFO [train.py:901] (2/4) Epoch 29, batch 4050, loss[loss=0.2071, simple_loss=0.2964, pruned_loss=0.05888, over 8469.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05753, over 1614135.22 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:12:16,258 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:31,110 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230392.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:48,565 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230417.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:50,324 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.354e+02 2.823e+02 3.644e+02 9.834e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 02:12:51,013 INFO [train.py:901] (2/4) Epoch 29, batch 4100, loss[loss=0.2351, simple_loss=0.3187, pruned_loss=0.07579, over 8454.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05691, over 1615563.26 frames. ], batch size: 29, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:22,441 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230464.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:26,984 INFO [train.py:901] (2/4) Epoch 29, batch 4150, loss[loss=0.1863, simple_loss=0.2729, pruned_loss=0.04991, over 8084.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05705, over 1615338.83 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:38,478 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:56,267 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230512.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:01,810 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.471e+02 3.120e+02 4.398e+02 1.014e+03, threshold=6.241e+02, percent-clipped=11.0 +2023-02-09 02:14:02,515 INFO [train.py:901] (2/4) Epoch 29, batch 4200, loss[loss=0.1732, simple_loss=0.2638, pruned_loss=0.04129, over 8143.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2824, pruned_loss=0.05724, over 1619780.83 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:16,944 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 02:14:17,779 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230543.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:24,143 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:38,389 INFO [train.py:901] (2/4) Epoch 29, batch 4250, loss[loss=0.1951, simple_loss=0.284, pruned_loss=0.05313, over 8363.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2834, pruned_loss=0.05796, over 1620966.62 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:41,195 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 02:15:01,587 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:13,975 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.532e+02 3.108e+02 3.832e+02 7.900e+02, threshold=6.217e+02, percent-clipped=4.0 +2023-02-09 02:15:14,735 INFO [train.py:901] (2/4) Epoch 29, batch 4300, loss[loss=0.1907, simple_loss=0.2746, pruned_loss=0.0534, over 8029.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.05755, over 1615732.23 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:19,485 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:19,526 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:37,545 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230652.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:51,725 INFO [train.py:901] (2/4) Epoch 29, batch 4350, loss[loss=0.1846, simple_loss=0.2817, pruned_loss=0.04376, over 8651.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.05679, over 1616790.64 frames. ], batch size: 34, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:55,703 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2337, 1.1303, 1.3362, 1.0333, 1.0145, 1.3440, 0.1069, 0.9429], + device='cuda:2'), covar=tensor([0.1336, 0.1116, 0.0460, 0.0620, 0.2241, 0.0482, 0.1782, 0.1062], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0225, 0.0281, 0.0149, 0.0174, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 02:16:17,531 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 02:16:27,826 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,089 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.462e+02 3.042e+02 3.743e+02 1.027e+03, threshold=6.085e+02, percent-clipped=1.0 +2023-02-09 02:16:29,341 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230720.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,846 INFO [train.py:901] (2/4) Epoch 29, batch 4400, loss[loss=0.1982, simple_loss=0.2747, pruned_loss=0.06081, over 8086.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2812, pruned_loss=0.05652, over 1615623.72 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:16:47,627 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230745.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:57,339 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 02:17:06,602 INFO [train.py:901] (2/4) Epoch 29, batch 4450, loss[loss=0.1789, simple_loss=0.2705, pruned_loss=0.04359, over 8105.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05726, over 1617904.54 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:36,258 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:43,245 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.373e+02 2.913e+02 3.542e+02 8.795e+02, threshold=5.826e+02, percent-clipped=1.0 +2023-02-09 02:17:43,984 INFO [train.py:901] (2/4) Epoch 29, batch 4500, loss[loss=0.2147, simple_loss=0.2902, pruned_loss=0.06958, over 8455.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05735, over 1620653.58 frames. ], batch size: 27, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:49,307 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1027, 1.5578, 4.2744, 2.0914, 2.3984, 4.8225, 4.9827, 4.2242], + device='cuda:2'), covar=tensor([0.1325, 0.2030, 0.0298, 0.1899, 0.1360, 0.0186, 0.0529, 0.0564], + device='cuda:2'), in_proj_covar=tensor([0.0311, 0.0328, 0.0296, 0.0325, 0.0328, 0.0279, 0.0447, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 02:17:51,400 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:54,896 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 02:18:19,714 INFO [train.py:901] (2/4) Epoch 29, batch 4550, loss[loss=0.2074, simple_loss=0.2921, pruned_loss=0.06134, over 7801.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2821, pruned_loss=0.05707, over 1620736.27 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:18:28,416 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:31,629 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230887.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:37,202 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230895.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,426 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,491 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:55,062 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.340e+02 2.935e+02 3.730e+02 9.176e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-09 02:18:55,766 INFO [train.py:901] (2/4) Epoch 29, batch 4600, loss[loss=0.1938, simple_loss=0.2566, pruned_loss=0.06548, over 6777.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2815, pruned_loss=0.05658, over 1619908.51 frames. ], batch size: 15, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:13,589 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:31,772 INFO [train.py:901] (2/4) Epoch 29, batch 4650, loss[loss=0.1595, simple_loss=0.2549, pruned_loss=0.03203, over 7656.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2818, pruned_loss=0.05642, over 1623499.48 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:33,908 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230974.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:42,741 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.5072, 4.4387, 4.0427, 1.8592, 3.9853, 4.0409, 3.9886, 3.9466], + device='cuda:2'), covar=tensor([0.0649, 0.0502, 0.1012, 0.4447, 0.0833, 0.0926, 0.1276, 0.0886], + device='cuda:2'), in_proj_covar=tensor([0.0544, 0.0460, 0.0451, 0.0563, 0.0442, 0.0466, 0.0441, 0.0411], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:19:51,178 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:53,283 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:59,428 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231010.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:20:06,387 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.812e+02 2.522e+02 3.157e+02 3.845e+02 7.559e+02, threshold=6.314e+02, percent-clipped=7.0 +2023-02-09 02:20:07,122 INFO [train.py:901] (2/4) Epoch 29, batch 4700, loss[loss=0.1582, simple_loss=0.2412, pruned_loss=0.0376, over 7922.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05597, over 1620409.55 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:20:43,870 INFO [train.py:901] (2/4) Epoch 29, batch 4750, loss[loss=0.1847, simple_loss=0.2696, pruned_loss=0.04985, over 7795.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2804, pruned_loss=0.05579, over 1618895.68 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:00,617 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 02:21:02,795 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 02:21:09,909 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-09 02:21:18,720 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.440e+02 2.924e+02 3.615e+02 6.392e+02, threshold=5.847e+02, percent-clipped=1.0 +2023-02-09 02:21:19,433 INFO [train.py:901] (2/4) Epoch 29, batch 4800, loss[loss=0.1582, simple_loss=0.2404, pruned_loss=0.03797, over 7699.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2794, pruned_loss=0.05572, over 1616074.65 frames. ], batch size: 18, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:43,700 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231154.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:21:55,936 INFO [train.py:901] (2/4) Epoch 29, batch 4850, loss[loss=0.2196, simple_loss=0.2987, pruned_loss=0.0702, over 8731.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2794, pruned_loss=0.05584, over 1616427.05 frames. ], batch size: 30, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:55,943 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 02:22:17,913 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:31,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.490e+02 3.133e+02 4.186e+02 8.287e+02, threshold=6.266e+02, percent-clipped=6.0 +2023-02-09 02:22:31,778 INFO [train.py:901] (2/4) Epoch 29, batch 4900, loss[loss=0.1609, simple_loss=0.2438, pruned_loss=0.039, over 7212.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2805, pruned_loss=0.05608, over 1617391.24 frames. ], batch size: 16, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:22:35,618 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231226.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:54,381 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:59,244 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231258.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:04,608 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,602 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,638 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:07,928 INFO [train.py:901] (2/4) Epoch 29, batch 4950, loss[loss=0.2075, simple_loss=0.2868, pruned_loss=0.06416, over 7986.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05696, over 1617134.11 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:17,261 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231283.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:22,755 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231291.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:43,160 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.476e+02 2.910e+02 3.581e+02 7.956e+02, threshold=5.820e+02, percent-clipped=2.0 +2023-02-09 02:23:43,853 INFO [train.py:901] (2/4) Epoch 29, batch 5000, loss[loss=0.2362, simple_loss=0.3158, pruned_loss=0.07831, over 8033.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.282, pruned_loss=0.05719, over 1619604.86 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:45,410 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4017, 1.8914, 3.4697, 1.5671, 2.4228, 3.8538, 3.9678, 3.2851], + device='cuda:2'), covar=tensor([0.0926, 0.1527, 0.0294, 0.2212, 0.1076, 0.0244, 0.0737, 0.0538], + device='cuda:2'), in_proj_covar=tensor([0.0313, 0.0330, 0.0299, 0.0327, 0.0328, 0.0281, 0.0450, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 02:24:17,055 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:24:19,694 INFO [train.py:901] (2/4) Epoch 29, batch 5050, loss[loss=0.1874, simple_loss=0.2776, pruned_loss=0.04857, over 8234.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2822, pruned_loss=0.05693, over 1622192.61 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:24:37,785 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 02:24:55,716 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.315e+02 2.860e+02 3.491e+02 8.708e+02, threshold=5.721e+02, percent-clipped=8.0 +2023-02-09 02:24:56,378 INFO [train.py:901] (2/4) Epoch 29, batch 5100, loss[loss=0.2078, simple_loss=0.292, pruned_loss=0.06183, over 8138.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2835, pruned_loss=0.0574, over 1624916.44 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:32,299 INFO [train.py:901] (2/4) Epoch 29, batch 5150, loss[loss=0.1726, simple_loss=0.2651, pruned_loss=0.04008, over 7908.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.283, pruned_loss=0.05716, over 1625516.59 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:52,007 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 02:26:07,336 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-09 02:26:08,162 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.313e+02 2.816e+02 3.592e+02 7.666e+02, threshold=5.632e+02, percent-clipped=6.0 +2023-02-09 02:26:08,942 INFO [train.py:901] (2/4) Epoch 29, batch 5200, loss[loss=0.1763, simple_loss=0.284, pruned_loss=0.03427, over 8338.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2826, pruned_loss=0.05704, over 1620977.43 frames. ], batch size: 26, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:26:12,099 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:30,207 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231550.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:39,121 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 02:26:44,754 INFO [train.py:901] (2/4) Epoch 29, batch 5250, loss[loss=0.2279, simple_loss=0.3115, pruned_loss=0.07215, over 7814.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2824, pruned_loss=0.05741, over 1619571.75 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:15,803 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231613.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:20,495 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.485e+02 2.947e+02 3.559e+02 7.815e+02, threshold=5.893e+02, percent-clipped=2.0 +2023-02-09 02:27:21,220 INFO [train.py:901] (2/4) Epoch 29, batch 5300, loss[loss=0.1854, simple_loss=0.2718, pruned_loss=0.04947, over 8462.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05749, over 1616732.73 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:22,860 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:41,043 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:46,394 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9296, 1.7459, 6.0953, 2.2886, 5.5153, 5.1635, 5.5991, 5.5385], + device='cuda:2'), covar=tensor([0.0477, 0.4894, 0.0409, 0.4084, 0.1005, 0.0888, 0.0501, 0.0522], + device='cuda:2'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:27:47,749 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4491, 1.4540, 1.4330, 1.8353, 0.6556, 1.3218, 1.3416, 1.4954], + device='cuda:2'), covar=tensor([0.0844, 0.0773, 0.0954, 0.0493, 0.1115, 0.1342, 0.0737, 0.0670], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0195, 0.0243, 0.0213, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:27:53,027 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231665.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:54,364 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1788, 1.4769, 4.3587, 1.5571, 3.8654, 3.6373, 3.9241, 3.8401], + device='cuda:2'), covar=tensor([0.0629, 0.4776, 0.0553, 0.4366, 0.1125, 0.0967, 0.0612, 0.0693], + device='cuda:2'), in_proj_covar=tensor([0.0682, 0.0660, 0.0742, 0.0653, 0.0739, 0.0632, 0.0640, 0.0717], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:27:57,015 INFO [train.py:901] (2/4) Epoch 29, batch 5350, loss[loss=0.2331, simple_loss=0.308, pruned_loss=0.0791, over 7692.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2834, pruned_loss=0.05836, over 1611057.82 frames. ], batch size: 73, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:32,684 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.535e+02 3.138e+02 3.956e+02 6.651e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-09 02:28:33,430 INFO [train.py:901] (2/4) Epoch 29, batch 5400, loss[loss=0.2226, simple_loss=0.2981, pruned_loss=0.0735, over 7640.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05799, over 1611617.89 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:38,406 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:29:09,676 INFO [train.py:901] (2/4) Epoch 29, batch 5450, loss[loss=0.1694, simple_loss=0.2614, pruned_loss=0.03865, over 7439.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2819, pruned_loss=0.05734, over 1609752.62 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:24,004 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3272, 3.6999, 2.7813, 3.2328, 3.0190, 2.4141, 3.0952, 3.2942], + device='cuda:2'), covar=tensor([0.1594, 0.0471, 0.0954, 0.0659, 0.0722, 0.1386, 0.0984, 0.1032], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0244, 0.0343, 0.0315, 0.0302, 0.0348, 0.0351, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 02:29:29,518 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 02:29:43,734 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7725, 1.9857, 2.0466, 1.4267, 2.1446, 1.5182, 0.7665, 1.9677], + device='cuda:2'), covar=tensor([0.0614, 0.0406, 0.0325, 0.0670, 0.0407, 0.0937, 0.1032, 0.0326], + device='cuda:2'), in_proj_covar=tensor([0.0476, 0.0417, 0.0371, 0.0465, 0.0400, 0.0556, 0.0408, 0.0445], + device='cuda:2'), out_proj_covar=tensor([1.2593e-04, 1.0787e-04, 9.6390e-05, 1.2157e-04, 1.0474e-04, 1.5474e-04, + 1.0873e-04, 1.1633e-04], device='cuda:2') +2023-02-09 02:29:44,879 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.416e+02 2.826e+02 3.521e+02 6.915e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-09 02:29:45,631 INFO [train.py:901] (2/4) Epoch 29, batch 5500, loss[loss=0.1522, simple_loss=0.2349, pruned_loss=0.03476, over 6864.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05748, over 1609365.97 frames. ], batch size: 15, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:48,650 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8465, 1.4875, 1.7592, 1.4295, 0.9165, 1.4966, 1.6624, 1.5675], + device='cuda:2'), covar=tensor([0.0614, 0.1214, 0.1645, 0.1490, 0.0625, 0.1424, 0.0726, 0.0662], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 02:30:21,383 INFO [train.py:901] (2/4) Epoch 29, batch 5550, loss[loss=0.1787, simple_loss=0.2614, pruned_loss=0.04804, over 7665.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2823, pruned_loss=0.05768, over 1608338.78 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:30:56,572 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.336e+02 2.917e+02 3.506e+02 1.057e+03, threshold=5.834e+02, percent-clipped=5.0 +2023-02-09 02:30:57,331 INFO [train.py:901] (2/4) Epoch 29, batch 5600, loss[loss=0.1633, simple_loss=0.2607, pruned_loss=0.03298, over 7811.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2815, pruned_loss=0.05734, over 1606234.81 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:31:34,506 INFO [train.py:901] (2/4) Epoch 29, batch 5650, loss[loss=0.1907, simple_loss=0.284, pruned_loss=0.04876, over 8024.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2822, pruned_loss=0.05764, over 1610877.26 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:31:38,805 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 02:31:43,799 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:31:58,932 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-09 02:32:02,831 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:02,956 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:10,323 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.351e+02 2.719e+02 3.536e+02 6.635e+02, threshold=5.438e+02, percent-clipped=1.0 +2023-02-09 02:32:11,030 INFO [train.py:901] (2/4) Epoch 29, batch 5700, loss[loss=0.2099, simple_loss=0.287, pruned_loss=0.06645, over 8358.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05712, over 1613367.79 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:45,197 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 02:32:47,262 INFO [train.py:901] (2/4) Epoch 29, batch 5750, loss[loss=0.2333, simple_loss=0.3144, pruned_loss=0.07613, over 8534.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05682, over 1615832.89 frames. ], batch size: 31, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:33:23,822 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 2.417e+02 3.013e+02 3.730e+02 1.097e+03, threshold=6.026e+02, percent-clipped=6.0 +2023-02-09 02:33:24,554 INFO [train.py:901] (2/4) Epoch 29, batch 5800, loss[loss=0.1689, simple_loss=0.25, pruned_loss=0.04387, over 7808.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05621, over 1615712.49 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:33:26,838 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232124.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:33:35,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.6144, 2.2646, 3.1631, 1.6713, 1.7980, 3.1353, 0.8826, 2.1258], + device='cuda:2'), covar=tensor([0.1447, 0.0977, 0.0277, 0.1414, 0.2058, 0.0358, 0.1844, 0.1259], + device='cuda:2'), in_proj_covar=tensor([0.0201, 0.0204, 0.0137, 0.0224, 0.0278, 0.0148, 0.0172, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 02:33:43,977 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.8882, 2.3570, 3.6373, 1.6344, 1.7732, 3.6146, 0.7704, 2.0498], + device='cuda:2'), covar=tensor([0.1303, 0.1035, 0.0208, 0.1666, 0.2260, 0.0251, 0.1875, 0.1366], + device='cuda:2'), in_proj_covar=tensor([0.0201, 0.0204, 0.0137, 0.0224, 0.0278, 0.0148, 0.0172, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 02:33:59,563 INFO [train.py:901] (2/4) Epoch 29, batch 5850, loss[loss=0.188, simple_loss=0.275, pruned_loss=0.05045, over 8473.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2796, pruned_loss=0.05577, over 1613489.56 frames. ], batch size: 29, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:31,415 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9280, 1.4869, 1.7441, 1.4031, 0.9037, 1.5259, 1.6931, 1.5676], + device='cuda:2'), covar=tensor([0.0540, 0.1315, 0.1641, 0.1491, 0.0617, 0.1480, 0.0713, 0.0654], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0115, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:2') +2023-02-09 02:34:34,659 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.421e+02 2.869e+02 3.503e+02 7.290e+02, threshold=5.737e+02, percent-clipped=3.0 +2023-02-09 02:34:35,360 INFO [train.py:901] (2/4) Epoch 29, batch 5900, loss[loss=0.2149, simple_loss=0.2973, pruned_loss=0.0662, over 8657.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2795, pruned_loss=0.05531, over 1617119.42 frames. ], batch size: 39, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:44,585 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8514, 1.3384, 4.0024, 1.4752, 3.5616, 3.3704, 3.6460, 3.5247], + device='cuda:2'), covar=tensor([0.0738, 0.4669, 0.0655, 0.4356, 0.1190, 0.1015, 0.0701, 0.0805], + device='cuda:2'), in_proj_covar=tensor([0.0688, 0.0667, 0.0748, 0.0659, 0.0747, 0.0637, 0.0643, 0.0724], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:35:06,628 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:35:10,677 INFO [train.py:901] (2/4) Epoch 29, batch 5950, loss[loss=0.1975, simple_loss=0.2754, pruned_loss=0.05982, over 8139.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2803, pruned_loss=0.05539, over 1622444.53 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:26,278 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 02:35:46,609 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.503e+02 2.837e+02 3.700e+02 9.228e+02, threshold=5.675e+02, percent-clipped=4.0 +2023-02-09 02:35:47,356 INFO [train.py:901] (2/4) Epoch 29, batch 6000, loss[loss=0.1742, simple_loss=0.2629, pruned_loss=0.04278, over 8734.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2797, pruned_loss=0.05518, over 1616446.95 frames. ], batch size: 30, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:47,357 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 02:36:01,202 INFO [train.py:935] (2/4) Epoch 29, validation: loss=0.1708, simple_loss=0.2701, pruned_loss=0.03577, over 944034.00 frames. +2023-02-09 02:36:01,203 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 02:36:14,198 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-09 02:36:37,559 INFO [train.py:901] (2/4) Epoch 29, batch 6050, loss[loss=0.1959, simple_loss=0.2723, pruned_loss=0.05978, over 7655.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2803, pruned_loss=0.05573, over 1618995.33 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:36:44,009 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:36:57,840 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232399.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:02,392 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232405.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:12,722 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.608e+02 3.073e+02 4.031e+02 7.869e+02, threshold=6.145e+02, percent-clipped=3.0 +2023-02-09 02:37:13,448 INFO [train.py:901] (2/4) Epoch 29, batch 6100, loss[loss=0.2036, simple_loss=0.2913, pruned_loss=0.05797, over 8333.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2803, pruned_loss=0.0555, over 1618038.94 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:37:27,120 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 02:37:49,993 INFO [train.py:901] (2/4) Epoch 29, batch 6150, loss[loss=0.1918, simple_loss=0.2602, pruned_loss=0.06174, over 7793.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2811, pruned_loss=0.05584, over 1614448.35 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:06,451 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232494.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:25,293 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.548e+02 2.901e+02 3.636e+02 6.365e+02, threshold=5.801e+02, percent-clipped=1.0 +2023-02-09 02:38:25,872 INFO [train.py:901] (2/4) Epoch 29, batch 6200, loss[loss=0.2618, simple_loss=0.3356, pruned_loss=0.09399, over 8409.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05697, over 1605807.17 frames. ], batch size: 49, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:36,676 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232536.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:41,134 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 02:38:47,162 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8005, 1.4791, 1.7484, 1.4064, 0.9417, 1.5239, 1.7154, 1.5666], + device='cuda:2'), covar=tensor([0.0612, 0.1259, 0.1610, 0.1465, 0.0622, 0.1450, 0.0711, 0.0663], + device='cuda:2'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0164, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 02:39:02,798 INFO [train.py:901] (2/4) Epoch 29, batch 6250, loss[loss=0.1694, simple_loss=0.2606, pruned_loss=0.03907, over 7973.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2811, pruned_loss=0.0569, over 1603196.84 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:30,162 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232609.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:39:36,585 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2094, 2.1097, 1.6539, 1.9199, 1.6997, 1.4521, 1.6568, 1.6748], + device='cuda:2'), covar=tensor([0.1434, 0.0459, 0.1321, 0.0593, 0.0803, 0.1614, 0.1023, 0.0843], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0246, 0.0347, 0.0316, 0.0304, 0.0352, 0.0352, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 02:39:37,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.332e+02 2.762e+02 3.491e+02 8.673e+02, threshold=5.523e+02, percent-clipped=4.0 +2023-02-09 02:39:39,193 INFO [train.py:901] (2/4) Epoch 29, batch 6300, loss[loss=0.1851, simple_loss=0.276, pruned_loss=0.04708, over 8024.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05687, over 1602563.52 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:14,954 INFO [train.py:901] (2/4) Epoch 29, batch 6350, loss[loss=0.1494, simple_loss=0.2247, pruned_loss=0.03702, over 7694.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2801, pruned_loss=0.05598, over 1607452.73 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:50,678 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.465e+02 3.018e+02 3.778e+02 1.284e+03, threshold=6.036e+02, percent-clipped=3.0 +2023-02-09 02:40:51,324 INFO [train.py:901] (2/4) Epoch 29, batch 6400, loss[loss=0.207, simple_loss=0.2943, pruned_loss=0.05987, over 8319.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.05655, over 1608382.37 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:53,608 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232724.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:08,029 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232743.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:28,293 INFO [train.py:901] (2/4) Epoch 29, batch 6450, loss[loss=0.1666, simple_loss=0.247, pruned_loss=0.04309, over 7191.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2801, pruned_loss=0.05642, over 1610686.48 frames. ], batch size: 16, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:05,012 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.358e+02 3.084e+02 4.266e+02 9.590e+02, threshold=6.168e+02, percent-clipped=5.0 +2023-02-09 02:42:05,753 INFO [train.py:901] (2/4) Epoch 29, batch 6500, loss[loss=0.192, simple_loss=0.2752, pruned_loss=0.05437, over 8472.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05686, over 1618643.52 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:17,441 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:31,253 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232858.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:38,231 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6244, 2.1127, 3.0802, 1.4866, 2.3108, 2.0361, 1.7934, 2.4659], + device='cuda:2'), covar=tensor([0.1991, 0.2690, 0.0981, 0.4928, 0.2158, 0.3475, 0.2494, 0.2476], + device='cuda:2'), in_proj_covar=tensor([0.0544, 0.0641, 0.0566, 0.0675, 0.0665, 0.0614, 0.0569, 0.0647], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:42:40,786 INFO [train.py:901] (2/4) Epoch 29, batch 6550, loss[loss=0.2282, simple_loss=0.3217, pruned_loss=0.06738, over 8456.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2815, pruned_loss=0.05673, over 1620996.40 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:47,138 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:47,798 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 02:43:00,699 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5185, 1.8007, 1.8811, 1.2319, 2.0051, 1.3054, 0.5940, 1.8265], + device='cuda:2'), covar=tensor([0.0831, 0.0451, 0.0360, 0.0743, 0.0486, 0.1078, 0.1113, 0.0394], + device='cuda:2'), in_proj_covar=tensor([0.0480, 0.0420, 0.0375, 0.0467, 0.0402, 0.0560, 0.0410, 0.0447], + device='cuda:2'), out_proj_covar=tensor([1.2700e-04, 1.0875e-04, 9.7622e-05, 1.2210e-04, 1.0533e-04, 1.5572e-04, + 1.0920e-04, 1.1695e-04], device='cuda:2') +2023-02-09 02:43:02,013 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7524, 2.6746, 2.0068, 2.4142, 2.2363, 1.8081, 2.2052, 2.3051], + device='cuda:2'), covar=tensor([0.1555, 0.0441, 0.1126, 0.0645, 0.0815, 0.1427, 0.0971, 0.0861], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0247, 0.0347, 0.0317, 0.0304, 0.0352, 0.0353, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 02:43:08,164 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:43:11,912 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4473, 1.5313, 1.4278, 1.8655, 0.7809, 1.3385, 1.3486, 1.5444], + device='cuda:2'), covar=tensor([0.0853, 0.0779, 0.0930, 0.0437, 0.1070, 0.1391, 0.0737, 0.0699], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0196, 0.0246, 0.0215, 0.0204, 0.0247, 0.0251, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 02:43:16,620 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.398e+02 2.846e+02 3.696e+02 7.042e+02, threshold=5.692e+02, percent-clipped=2.0 +2023-02-09 02:43:17,343 INFO [train.py:901] (2/4) Epoch 29, batch 6600, loss[loss=0.16, simple_loss=0.247, pruned_loss=0.03648, over 7815.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.0568, over 1614472.35 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:40,635 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:43:52,850 INFO [train.py:901] (2/4) Epoch 29, batch 6650, loss[loss=0.2048, simple_loss=0.2902, pruned_loss=0.05971, over 8130.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.0564, over 1616009.56 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:59,983 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232980.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:10,257 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232995.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:17,495 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:19,556 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233008.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:28,482 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.388e+02 2.820e+02 3.481e+02 6.998e+02, threshold=5.640e+02, percent-clipped=2.0 +2023-02-09 02:44:28,503 INFO [train.py:901] (2/4) Epoch 29, batch 6700, loss[loss=0.2092, simple_loss=0.2987, pruned_loss=0.05983, over 8461.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2809, pruned_loss=0.05657, over 1618983.86 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:44:40,611 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:45:06,280 INFO [train.py:901] (2/4) Epoch 29, batch 6750, loss[loss=0.1943, simple_loss=0.2758, pruned_loss=0.05637, over 8249.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05593, over 1617183.39 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:30,225 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4122, 2.3490, 3.0080, 2.4822, 3.0852, 2.5801, 2.4210, 1.8624], + device='cuda:2'), covar=tensor([0.6054, 0.5462, 0.2327, 0.4119, 0.2611, 0.3269, 0.1884, 0.6125], + device='cuda:2'), in_proj_covar=tensor([0.0972, 0.1035, 0.0843, 0.1004, 0.1028, 0.0940, 0.0777, 0.0857], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 02:45:30,752 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 02:45:38,406 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233114.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:45:43,344 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.536e+02 3.042e+02 3.988e+02 8.675e+02, threshold=6.084e+02, percent-clipped=8.0 +2023-02-09 02:45:43,365 INFO [train.py:901] (2/4) Epoch 29, batch 6800, loss[loss=0.2094, simple_loss=0.2803, pruned_loss=0.06929, over 8125.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.05649, over 1609594.14 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:56,014 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:03,486 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7016, 1.4786, 4.8580, 1.8634, 4.3614, 4.0709, 4.3832, 4.2783], + device='cuda:2'), covar=tensor([0.0557, 0.5071, 0.0541, 0.4298, 0.1063, 0.0942, 0.0607, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0691, 0.0667, 0.0751, 0.0660, 0.0748, 0.0639, 0.0644, 0.0728], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:46:19,946 INFO [train.py:901] (2/4) Epoch 29, batch 6850, loss[loss=0.1833, simple_loss=0.2755, pruned_loss=0.04556, over 8188.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2799, pruned_loss=0.0565, over 1609054.90 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:46:22,009 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 02:46:23,567 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8309, 3.7946, 3.4314, 1.8088, 3.4046, 3.5538, 3.3565, 3.2904], + device='cuda:2'), covar=tensor([0.0886, 0.0629, 0.1156, 0.4904, 0.1055, 0.1314, 0.1428, 0.0996], + device='cuda:2'), in_proj_covar=tensor([0.0548, 0.0461, 0.0454, 0.0563, 0.0446, 0.0472, 0.0446, 0.0415], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:46:30,301 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-09 02:46:46,499 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233209.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:55,110 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.470e+02 3.068e+02 3.817e+02 7.038e+02, threshold=6.136e+02, percent-clipped=5.0 +2023-02-09 02:46:55,130 INFO [train.py:901] (2/4) Epoch 29, batch 6900, loss[loss=0.1849, simple_loss=0.2639, pruned_loss=0.05291, over 7416.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05699, over 1611472.11 frames. ], batch size: 17, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:04,465 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233234.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:16,261 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:31,602 INFO [train.py:901] (2/4) Epoch 29, batch 6950, loss[loss=0.2121, simple_loss=0.2989, pruned_loss=0.06271, over 8644.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2803, pruned_loss=0.0565, over 1612119.77 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:33,663 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 02:47:35,276 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:35,914 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233277.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:07,352 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.388e+02 2.860e+02 3.725e+02 6.106e+02, threshold=5.720e+02, percent-clipped=0.0 +2023-02-09 02:48:07,372 INFO [train.py:901] (2/4) Epoch 29, batch 7000, loss[loss=0.1698, simple_loss=0.2559, pruned_loss=0.04185, over 8479.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2795, pruned_loss=0.05549, over 1611259.80 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:30,363 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233352.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:40,382 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:48:44,307 INFO [train.py:901] (2/4) Epoch 29, batch 7050, loss[loss=0.1929, simple_loss=0.2719, pruned_loss=0.05694, over 7913.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.0559, over 1607001.85 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:51,038 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:09,793 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2594, 1.4003, 4.3456, 2.1040, 2.5615, 4.8935, 5.0312, 4.1744], + device='cuda:2'), covar=tensor([0.1269, 0.2219, 0.0276, 0.1979, 0.1225, 0.0187, 0.0535, 0.0607], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0328, 0.0296, 0.0326, 0.0327, 0.0279, 0.0447, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 02:49:22,021 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.400e+02 3.088e+02 3.796e+02 6.683e+02, threshold=6.176e+02, percent-clipped=2.0 +2023-02-09 02:49:22,041 INFO [train.py:901] (2/4) Epoch 29, batch 7100, loss[loss=0.1943, simple_loss=0.2888, pruned_loss=0.04989, over 8602.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2802, pruned_loss=0.05602, over 1608883.09 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:49:34,962 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1978, 2.0851, 2.6577, 2.2166, 2.6694, 2.3183, 2.1860, 1.5863], + device='cuda:2'), covar=tensor([0.5730, 0.5073, 0.2095, 0.4322, 0.2870, 0.3284, 0.1996, 0.5746], + device='cuda:2'), in_proj_covar=tensor([0.0971, 0.1035, 0.0841, 0.1005, 0.1029, 0.0941, 0.0778, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 02:49:48,645 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.48 vs. limit=2.0 +2023-02-09 02:49:54,755 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233467.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:57,422 INFO [train.py:901] (2/4) Epoch 29, batch 7150, loss[loss=0.2271, simple_loss=0.3119, pruned_loss=0.07118, over 8581.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.0562, over 1615409.80 frames. ], batch size: 31, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:50:00,189 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9916, 1.4797, 1.6756, 1.3813, 1.2100, 1.3837, 2.0291, 1.5253], + device='cuda:2'), covar=tensor([0.0578, 0.1314, 0.1700, 0.1510, 0.0581, 0.1605, 0.0668, 0.0703], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0162, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 02:50:14,747 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:50:34,190 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.385e+02 2.900e+02 3.377e+02 5.605e+02, threshold=5.800e+02, percent-clipped=0.0 +2023-02-09 02:50:34,211 INFO [train.py:901] (2/4) Epoch 29, batch 7200, loss[loss=0.1649, simple_loss=0.2442, pruned_loss=0.04282, over 7428.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05591, over 1614145.07 frames. ], batch size: 17, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:50:37,514 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-09 02:50:47,427 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7854, 2.0207, 2.0845, 1.4929, 2.2478, 1.5713, 0.7190, 2.0130], + device='cuda:2'), covar=tensor([0.0667, 0.0416, 0.0344, 0.0638, 0.0456, 0.0988, 0.1049, 0.0363], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0417, 0.0374, 0.0464, 0.0400, 0.0560, 0.0408, 0.0446], + device='cuda:2'), out_proj_covar=tensor([1.2620e-04, 1.0794e-04, 9.7401e-05, 1.2124e-04, 1.0481e-04, 1.5582e-04, + 1.0882e-04, 1.1654e-04], device='cuda:2') +2023-02-09 02:51:10,563 INFO [train.py:901] (2/4) Epoch 29, batch 7250, loss[loss=0.1686, simple_loss=0.2556, pruned_loss=0.0408, over 7521.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2819, pruned_loss=0.05664, over 1616689.84 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:31,468 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-09 02:51:46,281 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.384e+02 2.959e+02 3.398e+02 1.041e+03, threshold=5.918e+02, percent-clipped=4.0 +2023-02-09 02:51:46,302 INFO [train.py:901] (2/4) Epoch 29, batch 7300, loss[loss=0.1616, simple_loss=0.2335, pruned_loss=0.04487, over 7531.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2819, pruned_loss=0.05673, over 1614098.69 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:46,383 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233621.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:13,024 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233657.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:22,375 INFO [train.py:901] (2/4) Epoch 29, batch 7350, loss[loss=0.2004, simple_loss=0.2889, pruned_loss=0.05599, over 8477.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2831, pruned_loss=0.05774, over 1613441.72 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:28,052 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 02:52:39,772 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 02:52:48,258 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 02:52:58,115 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.663e+02 3.074e+02 4.100e+02 9.512e+02, threshold=6.147e+02, percent-clipped=7.0 +2023-02-09 02:52:58,135 INFO [train.py:901] (2/4) Epoch 29, batch 7400, loss[loss=0.205, simple_loss=0.2897, pruned_loss=0.06018, over 8612.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.282, pruned_loss=0.05709, over 1611940.10 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:59,682 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:03,228 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:08,949 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233736.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:18,933 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233748.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:21,096 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:32,421 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 02:53:36,088 INFO [train.py:901] (2/4) Epoch 29, batch 7450, loss[loss=0.1816, simple_loss=0.2638, pruned_loss=0.0497, over 7934.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.05596, over 1612142.37 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:53:39,815 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:54:12,675 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.228e+02 2.637e+02 3.402e+02 7.399e+02, threshold=5.273e+02, percent-clipped=2.0 +2023-02-09 02:54:12,695 INFO [train.py:901] (2/4) Epoch 29, batch 7500, loss[loss=0.1781, simple_loss=0.2577, pruned_loss=0.04924, over 7792.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2811, pruned_loss=0.05617, over 1614169.62 frames. ], batch size: 19, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:54:48,750 INFO [train.py:901] (2/4) Epoch 29, batch 7550, loss[loss=0.1581, simple_loss=0.2423, pruned_loss=0.03694, over 5995.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05643, over 1610885.43 frames. ], batch size: 13, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:24,355 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.424e+02 2.935e+02 3.534e+02 7.288e+02, threshold=5.870e+02, percent-clipped=3.0 +2023-02-09 02:55:24,375 INFO [train.py:901] (2/4) Epoch 29, batch 7600, loss[loss=0.1677, simple_loss=0.2592, pruned_loss=0.0381, over 7929.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2805, pruned_loss=0.05592, over 1614230.28 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:01,017 INFO [train.py:901] (2/4) Epoch 29, batch 7650, loss[loss=0.1955, simple_loss=0.2827, pruned_loss=0.05412, over 8322.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.28, pruned_loss=0.05552, over 1613479.78 frames. ], batch size: 25, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:16,601 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233992.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:23,680 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234001.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:35,521 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234017.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:38,046 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.208e+02 2.731e+02 3.331e+02 6.993e+02, threshold=5.462e+02, percent-clipped=2.0 +2023-02-09 02:56:38,066 INFO [train.py:901] (2/4) Epoch 29, batch 7700, loss[loss=0.1746, simple_loss=0.2635, pruned_loss=0.04278, over 8079.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2793, pruned_loss=0.05491, over 1615867.48 frames. ], batch size: 21, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:49,503 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 02:56:53,151 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:12,872 INFO [train.py:901] (2/4) Epoch 29, batch 7750, loss[loss=0.1662, simple_loss=0.2432, pruned_loss=0.04465, over 7417.00 frames. ], tot_loss[loss=0.1931, simple_loss=0.2776, pruned_loss=0.05428, over 1613544.79 frames. ], batch size: 17, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:57:13,650 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234072.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:43,295 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.11 vs. limit=5.0 +2023-02-09 02:57:45,239 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:48,462 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.355e+02 2.809e+02 3.505e+02 7.382e+02, threshold=5.617e+02, percent-clipped=2.0 +2023-02-09 02:57:48,482 INFO [train.py:901] (2/4) Epoch 29, batch 7800, loss[loss=0.1897, simple_loss=0.275, pruned_loss=0.05224, over 8034.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2774, pruned_loss=0.05391, over 1616587.31 frames. ], batch size: 22, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:09,969 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.3410, 2.0580, 2.5459, 2.1226, 2.5380, 2.3778, 2.2309, 1.4165], + device='cuda:2'), covar=tensor([0.6169, 0.5484, 0.2468, 0.4671, 0.3023, 0.3683, 0.2046, 0.6103], + device='cuda:2'), in_proj_covar=tensor([0.0973, 0.1036, 0.0845, 0.1009, 0.1034, 0.0945, 0.0780, 0.0859], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 02:58:24,431 INFO [train.py:901] (2/4) Epoch 29, batch 7850, loss[loss=0.1612, simple_loss=0.244, pruned_loss=0.03923, over 8080.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2763, pruned_loss=0.05349, over 1612505.73 frames. ], batch size: 21, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:26,120 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8090, 2.3906, 3.6852, 1.6710, 2.8506, 2.3199, 1.9607, 2.8910], + device='cuda:2'), covar=tensor([0.1924, 0.2645, 0.0958, 0.4679, 0.1881, 0.3224, 0.2490, 0.2451], + device='cuda:2'), in_proj_covar=tensor([0.0540, 0.0637, 0.0563, 0.0670, 0.0660, 0.0613, 0.0564, 0.0643], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 02:58:36,027 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234187.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:58,922 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:59,504 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.264e+02 2.745e+02 3.500e+02 8.048e+02, threshold=5.490e+02, percent-clipped=4.0 +2023-02-09 02:58:59,525 INFO [train.py:901] (2/4) Epoch 29, batch 7900, loss[loss=0.1804, simple_loss=0.2705, pruned_loss=0.04514, over 8459.00 frames. ], tot_loss[loss=0.1917, simple_loss=0.2764, pruned_loss=0.0535, over 1614056.20 frames. ], batch size: 25, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:19,146 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:59:34,076 INFO [train.py:901] (2/4) Epoch 29, batch 7950, loss[loss=0.1945, simple_loss=0.2697, pruned_loss=0.05967, over 7917.00 frames. ], tot_loss[loss=0.1911, simple_loss=0.2757, pruned_loss=0.0533, over 1608266.11 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:10,396 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.366e+02 2.676e+02 3.633e+02 8.832e+02, threshold=5.352e+02, percent-clipped=6.0 +2023-02-09 03:00:10,416 INFO [train.py:901] (2/4) Epoch 29, batch 8000, loss[loss=0.173, simple_loss=0.2523, pruned_loss=0.04691, over 7968.00 frames. ], tot_loss[loss=0.1926, simple_loss=0.2774, pruned_loss=0.05389, over 1611638.68 frames. ], batch size: 21, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:30,577 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1435, 1.6400, 1.8674, 1.4540, 1.0283, 1.5640, 1.8246, 1.7802], + device='cuda:2'), covar=tensor([0.0530, 0.1185, 0.1592, 0.1465, 0.0615, 0.1421, 0.0689, 0.0618], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 03:00:44,624 INFO [train.py:901] (2/4) Epoch 29, batch 8050, loss[loss=0.179, simple_loss=0.2542, pruned_loss=0.05188, over 7245.00 frames. ], tot_loss[loss=0.1913, simple_loss=0.2747, pruned_loss=0.05392, over 1584701.34 frames. ], batch size: 16, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:45,524 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:00:55,749 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234387.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:02,568 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234397.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:20,235 WARNING [train.py:1067] (2/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 03:01:23,761 INFO [train.py:901] (2/4) Epoch 30, batch 0, loss[loss=0.2019, simple_loss=0.2894, pruned_loss=0.05719, over 8451.00 frames. ], tot_loss[loss=0.2019, simple_loss=0.2894, pruned_loss=0.05719, over 8451.00 frames. ], batch size: 25, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:01:23,762 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 03:01:35,940 INFO [train.py:935] (2/4) Epoch 30, validation: loss=0.1704, simple_loss=0.27, pruned_loss=0.03537, over 944034.00 frames. +2023-02-09 03:01:35,941 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 03:01:47,831 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.332e+02 2.743e+02 3.464e+02 7.498e+02, threshold=5.486e+02, percent-clipped=3.0 +2023-02-09 03:01:52,033 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 03:02:04,493 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234443.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:12,495 INFO [train.py:901] (2/4) Epoch 30, batch 50, loss[loss=0.203, simple_loss=0.2897, pruned_loss=0.05815, over 8350.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2821, pruned_loss=0.05709, over 368506.84 frames. ], batch size: 24, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:23,084 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234468.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:28,106 WARNING [train.py:1067] (2/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 03:02:49,595 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234502.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:50,898 INFO [train.py:901] (2/4) Epoch 30, batch 100, loss[loss=0.2097, simple_loss=0.2949, pruned_loss=0.06221, over 8436.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2823, pruned_loss=0.05634, over 648631.30 frames. ], batch size: 27, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:54,581 WARNING [train.py:1067] (2/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 03:03:03,348 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.294e+02 2.794e+02 3.449e+02 7.855e+02, threshold=5.588e+02, percent-clipped=7.0 +2023-02-09 03:03:28,106 INFO [train.py:901] (2/4) Epoch 30, batch 150, loss[loss=0.269, simple_loss=0.3257, pruned_loss=0.1062, over 6920.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2838, pruned_loss=0.05707, over 866739.75 frames. ], batch size: 71, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:03:35,359 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:03:56,911 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234593.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:04:04,594 INFO [train.py:901] (2/4) Epoch 30, batch 200, loss[loss=0.2082, simple_loss=0.2863, pruned_loss=0.06503, over 8451.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2834, pruned_loss=0.05649, over 1031108.05 frames. ], batch size: 27, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:13,775 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4110, 2.1953, 2.6679, 2.3146, 2.7691, 2.4566, 2.3248, 1.6855], + device='cuda:2'), covar=tensor([0.5663, 0.5295, 0.2372, 0.4237, 0.2613, 0.3500, 0.1963, 0.5716], + device='cuda:2'), in_proj_covar=tensor([0.0973, 0.1039, 0.0847, 0.1012, 0.1034, 0.0945, 0.0781, 0.0861], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:04:16,945 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.329e+02 2.797e+02 3.759e+02 1.341e+03, threshold=5.593e+02, percent-clipped=8.0 +2023-02-09 03:04:40,208 INFO [train.py:901] (2/4) Epoch 30, batch 250, loss[loss=0.1823, simple_loss=0.2657, pruned_loss=0.04947, over 7648.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.283, pruned_loss=0.0561, over 1162805.44 frames. ], batch size: 19, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:48,468 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 03:04:57,627 WARNING [train.py:1067] (2/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 03:04:58,544 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234679.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:04,641 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234687.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:16,613 INFO [train.py:901] (2/4) Epoch 30, batch 300, loss[loss=0.1807, simple_loss=0.2816, pruned_loss=0.03988, over 8324.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2814, pruned_loss=0.05592, over 1263368.80 frames. ], batch size: 25, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:19,746 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234708.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:29,385 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.381e+02 2.888e+02 3.640e+02 7.253e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-09 03:05:45,711 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:46,509 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5951, 1.8459, 1.8443, 1.3544, 1.9469, 1.4052, 0.4393, 1.8237], + device='cuda:2'), covar=tensor([0.0686, 0.0422, 0.0397, 0.0630, 0.0573, 0.1067, 0.1076, 0.0327], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0416, 0.0372, 0.0465, 0.0400, 0.0555, 0.0405, 0.0443], + device='cuda:2'), out_proj_covar=tensor([1.2618e-04, 1.0768e-04, 9.6869e-05, 1.2151e-04, 1.0482e-04, 1.5420e-04, + 1.0793e-04, 1.1575e-04], device='cuda:2') +2023-02-09 03:05:52,651 INFO [train.py:901] (2/4) Epoch 30, batch 350, loss[loss=0.1966, simple_loss=0.2897, pruned_loss=0.05181, over 8032.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2814, pruned_loss=0.05619, over 1342170.60 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:55,592 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:14,215 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234783.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:27,357 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234800.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:30,081 INFO [train.py:901] (2/4) Epoch 30, batch 400, loss[loss=0.1919, simple_loss=0.2861, pruned_loss=0.04883, over 8512.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.05571, over 1400351.42 frames. ], batch size: 26, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:06:42,234 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.513e+02 2.926e+02 3.697e+02 1.204e+03, threshold=5.852e+02, percent-clipped=7.0 +2023-02-09 03:07:06,518 INFO [train.py:901] (2/4) Epoch 30, batch 450, loss[loss=0.2001, simple_loss=0.2703, pruned_loss=0.06493, over 6782.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2799, pruned_loss=0.05546, over 1447790.92 frames. ], batch size: 15, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:07:38,957 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:07:42,332 INFO [train.py:901] (2/4) Epoch 30, batch 500, loss[loss=0.1901, simple_loss=0.2834, pruned_loss=0.04844, over 8333.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2783, pruned_loss=0.05481, over 1486589.42 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:07:54,773 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.393e+02 2.948e+02 3.833e+02 6.284e+02, threshold=5.896e+02, percent-clipped=1.0 +2023-02-09 03:08:04,863 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234935.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:18,288 INFO [train.py:901] (2/4) Epoch 30, batch 550, loss[loss=0.1795, simple_loss=0.2585, pruned_loss=0.05028, over 8088.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2787, pruned_loss=0.05516, over 1517518.76 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:08:22,856 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234960.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:26,373 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234964.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:34,797 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234976.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:36,243 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234978.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:44,115 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234989.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:54,540 INFO [train.py:901] (2/4) Epoch 30, batch 600, loss[loss=0.2028, simple_loss=0.2954, pruned_loss=0.05512, over 8323.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2788, pruned_loss=0.05492, over 1542780.50 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:06,005 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.485e+02 2.961e+02 3.544e+02 6.861e+02, threshold=5.922e+02, percent-clipped=1.0 +2023-02-09 03:09:10,828 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 03:09:13,652 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235031.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:09:29,976 INFO [train.py:901] (2/4) Epoch 30, batch 650, loss[loss=0.2101, simple_loss=0.2947, pruned_loss=0.06271, over 8454.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2799, pruned_loss=0.05549, over 1561984.20 frames. ], batch size: 27, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:54,556 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:05,672 INFO [train.py:901] (2/4) Epoch 30, batch 700, loss[loss=0.1929, simple_loss=0.2932, pruned_loss=0.04631, over 8191.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2804, pruned_loss=0.05536, over 1576301.73 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:12,448 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.55 vs. limit=2.0 +2023-02-09 03:10:17,689 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.477e+02 3.055e+02 3.959e+02 7.285e+02, threshold=6.109e+02, percent-clipped=6.0 +2023-02-09 03:10:33,426 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:34,916 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235146.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:41,067 INFO [train.py:901] (2/4) Epoch 30, batch 750, loss[loss=0.199, simple_loss=0.2718, pruned_loss=0.06309, over 7178.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2796, pruned_loss=0.05503, over 1582371.52 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:59,029 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 03:10:59,124 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8378, 3.8081, 3.5400, 1.9602, 3.4147, 3.5842, 3.3871, 3.4332], + device='cuda:2'), covar=tensor([0.1024, 0.0670, 0.1075, 0.4262, 0.1019, 0.1066, 0.1537, 0.0886], + device='cuda:2'), in_proj_covar=tensor([0.0551, 0.0461, 0.0451, 0.0562, 0.0445, 0.0470, 0.0446, 0.0414], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:11:08,044 WARNING [train.py:1067] (2/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 03:11:17,082 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:17,655 INFO [train.py:901] (2/4) Epoch 30, batch 800, loss[loss=0.1709, simple_loss=0.2611, pruned_loss=0.04035, over 8502.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2785, pruned_loss=0.055, over 1586220.36 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:30,473 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.417e+02 2.836e+02 3.328e+02 8.160e+02, threshold=5.671e+02, percent-clipped=2.0 +2023-02-09 03:11:46,749 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:53,490 INFO [train.py:901] (2/4) Epoch 30, batch 850, loss[loss=0.1917, simple_loss=0.2711, pruned_loss=0.05616, over 7536.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2795, pruned_loss=0.0556, over 1591737.81 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:57,057 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:07,383 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235273.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:20,619 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8027, 3.8008, 3.4677, 1.7851, 3.3217, 3.5212, 3.3474, 3.3282], + device='cuda:2'), covar=tensor([0.0991, 0.0699, 0.1041, 0.4714, 0.1086, 0.1119, 0.1497, 0.1016], + device='cuda:2'), in_proj_covar=tensor([0.0547, 0.0458, 0.0448, 0.0559, 0.0443, 0.0467, 0.0443, 0.0411], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:12:25,086 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-09 03:12:29,574 INFO [train.py:901] (2/4) Epoch 30, batch 900, loss[loss=0.2376, simple_loss=0.3171, pruned_loss=0.07901, over 8589.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2793, pruned_loss=0.05546, over 1598452.48 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:12:41,045 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:41,602 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.438e+02 3.006e+02 3.865e+02 6.238e+02, threshold=6.012e+02, percent-clipped=6.0 +2023-02-09 03:12:43,016 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235322.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:05,251 INFO [train.py:901] (2/4) Epoch 30, batch 950, loss[loss=0.192, simple_loss=0.2932, pruned_loss=0.04538, over 8325.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2801, pruned_loss=0.05555, over 1604127.72 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:08,758 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235359.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:21,829 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235378.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:13:32,471 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3297, 2.0655, 1.6684, 1.9496, 1.7567, 1.4555, 1.6775, 1.6794], + device='cuda:2'), covar=tensor([0.1389, 0.0462, 0.1269, 0.0549, 0.0769, 0.1628, 0.0968, 0.0932], + device='cuda:2'), in_proj_covar=tensor([0.0357, 0.0243, 0.0344, 0.0314, 0.0302, 0.0349, 0.0350, 0.0321], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:13:32,983 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 03:13:39,419 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235402.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:40,158 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4928, 2.4549, 3.1055, 2.6252, 3.1813, 2.6345, 2.4861, 1.8659], + device='cuda:2'), covar=tensor([0.6566, 0.5597, 0.2414, 0.4289, 0.2842, 0.3318, 0.2052, 0.6398], + device='cuda:2'), in_proj_covar=tensor([0.0978, 0.1045, 0.0853, 0.1015, 0.1038, 0.0949, 0.0782, 0.0865], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:13:40,619 INFO [train.py:901] (2/4) Epoch 30, batch 1000, loss[loss=0.1625, simple_loss=0.2442, pruned_loss=0.04038, over 7657.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2792, pruned_loss=0.05559, over 1599712.48 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:52,267 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.479e+02 3.055e+02 4.205e+02 7.814e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-09 03:13:56,465 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235427.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:02,533 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235435.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:03,940 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:08,440 WARNING [train.py:1067] (2/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 03:14:15,934 INFO [train.py:901] (2/4) Epoch 30, batch 1050, loss[loss=0.2056, simple_loss=0.288, pruned_loss=0.06163, over 7974.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2799, pruned_loss=0.05574, over 1602490.10 frames. ], batch size: 21, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:14:19,400 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:21,156 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 03:14:36,650 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:41,812 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-09 03:14:50,591 INFO [train.py:901] (2/4) Epoch 30, batch 1100, loss[loss=0.1961, simple_loss=0.2876, pruned_loss=0.0523, over 8289.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05588, over 1603700.90 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:14:59,106 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235515.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:03,871 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.444e+02 3.135e+02 3.900e+02 6.752e+02, threshold=6.270e+02, percent-clipped=2.0 +2023-02-09 03:15:04,829 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5582, 1.8299, 1.8746, 1.2254, 1.9132, 1.3897, 0.4332, 1.7748], + device='cuda:2'), covar=tensor([0.0662, 0.0451, 0.0382, 0.0681, 0.0492, 0.1070, 0.1141, 0.0350], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0421, 0.0375, 0.0470, 0.0404, 0.0561, 0.0410, 0.0448], + device='cuda:2'), out_proj_covar=tensor([1.2799e-04, 1.0897e-04, 9.7569e-05, 1.2288e-04, 1.0573e-04, 1.5611e-04, + 1.0930e-04, 1.1726e-04], device='cuda:2') +2023-02-09 03:15:09,968 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.56 vs. limit=5.0 +2023-02-09 03:15:12,911 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.85 vs. limit=5.0 +2023-02-09 03:15:17,341 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:26,820 INFO [train.py:901] (2/4) Epoch 30, batch 1150, loss[loss=0.2269, simple_loss=0.307, pruned_loss=0.07341, over 8495.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05667, over 1609860.40 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:15:35,848 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 03:15:39,057 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.48 vs. limit=2.0 +2023-02-09 03:16:03,359 INFO [train.py:901] (2/4) Epoch 30, batch 1200, loss[loss=0.1852, simple_loss=0.2756, pruned_loss=0.04738, over 8366.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05659, over 1608094.69 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:16:11,494 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:12,764 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235617.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:16,060 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.378e+02 2.945e+02 3.640e+02 8.540e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-09 03:16:30,036 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:32,802 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2907, 1.9741, 2.4915, 2.1737, 2.4375, 2.3321, 2.2011, 1.3613], + device='cuda:2'), covar=tensor([0.5586, 0.4761, 0.2082, 0.3885, 0.2649, 0.3339, 0.2007, 0.5263], + device='cuda:2'), in_proj_covar=tensor([0.0970, 0.1037, 0.0846, 0.1008, 0.1030, 0.0943, 0.0777, 0.0860], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:16:39,493 INFO [train.py:901] (2/4) Epoch 30, batch 1250, loss[loss=0.1604, simple_loss=0.235, pruned_loss=0.04288, over 7697.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2808, pruned_loss=0.05627, over 1610532.47 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:06,191 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:07,624 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:10,373 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235697.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:15,249 INFO [train.py:901] (2/4) Epoch 30, batch 1300, loss[loss=0.1984, simple_loss=0.2889, pruned_loss=0.05391, over 8760.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2816, pruned_loss=0.05675, over 1612680.94 frames. ], batch size: 40, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:23,823 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235716.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:25,222 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:27,375 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1834, 2.0426, 2.6049, 2.2006, 2.5506, 2.2677, 2.1337, 1.5083], + device='cuda:2'), covar=tensor([0.5954, 0.5255, 0.2164, 0.4224, 0.3003, 0.3409, 0.2027, 0.5616], + device='cuda:2'), in_proj_covar=tensor([0.0964, 0.1030, 0.0841, 0.1002, 0.1025, 0.0939, 0.0773, 0.0855], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:17:27,741 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.444e+02 2.774e+02 3.314e+02 6.214e+02, threshold=5.548e+02, percent-clipped=2.0 +2023-02-09 03:17:27,828 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235722.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:17:34,606 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235732.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:35,435 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-09 03:17:50,125 INFO [train.py:901] (2/4) Epoch 30, batch 1350, loss[loss=0.2429, simple_loss=0.3205, pruned_loss=0.08272, over 7223.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2823, pruned_loss=0.05685, over 1611054.75 frames. ], batch size: 71, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:26,587 INFO [train.py:901] (2/4) Epoch 30, batch 1400, loss[loss=0.1696, simple_loss=0.2575, pruned_loss=0.0408, over 8139.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2823, pruned_loss=0.05678, over 1617441.96 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:30,242 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235809.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:18:39,090 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.338e+02 2.741e+02 3.583e+02 7.907e+02, threshold=5.482e+02, percent-clipped=6.0 +2023-02-09 03:18:49,550 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235837.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:19:00,841 INFO [train.py:901] (2/4) Epoch 30, batch 1450, loss[loss=0.1892, simple_loss=0.2788, pruned_loss=0.04983, over 8140.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2821, pruned_loss=0.05638, over 1617149.73 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:07,580 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 03:19:22,539 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-09 03:19:38,191 INFO [train.py:901] (2/4) Epoch 30, batch 1500, loss[loss=0.1768, simple_loss=0.2576, pruned_loss=0.04802, over 7428.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.281, pruned_loss=0.0558, over 1618832.60 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:42,081 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 03:19:51,220 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.306e+02 2.900e+02 3.560e+02 8.272e+02, threshold=5.801e+02, percent-clipped=7.0 +2023-02-09 03:19:56,729 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.85 vs. limit=2.0 +2023-02-09 03:20:14,192 INFO [train.py:901] (2/4) Epoch 30, batch 1550, loss[loss=0.1885, simple_loss=0.2607, pruned_loss=0.05819, over 7711.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2808, pruned_loss=0.05565, over 1617769.88 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:17,114 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9308, 1.5600, 3.5272, 1.5419, 2.5537, 3.8861, 4.0392, 3.4170], + device='cuda:2'), covar=tensor([0.1179, 0.1856, 0.0293, 0.2057, 0.0954, 0.0248, 0.0537, 0.0531], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0329, 0.0327, 0.0282, 0.0447, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:20:22,056 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 03:20:27,554 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5036, 1.4366, 1.8423, 1.1900, 1.1008, 1.8182, 0.2736, 1.2410], + device='cuda:2'), covar=tensor([0.1521, 0.1190, 0.0401, 0.0867, 0.2459, 0.0422, 0.1744, 0.1193], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0226, 0.0280, 0.0149, 0.0175, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 03:20:38,890 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235988.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:20:51,280 INFO [train.py:901] (2/4) Epoch 30, batch 1600, loss[loss=0.2088, simple_loss=0.2876, pruned_loss=0.06501, over 8249.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2814, pruned_loss=0.05587, over 1619048.62 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:57,833 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:00,082 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2166, 3.4815, 2.3324, 3.0910, 2.7760, 2.2146, 2.9342, 3.0191], + device='cuda:2'), covar=tensor([0.1510, 0.0369, 0.1200, 0.0631, 0.0783, 0.1395, 0.0926, 0.1048], + device='cuda:2'), in_proj_covar=tensor([0.0360, 0.0246, 0.0349, 0.0317, 0.0304, 0.0352, 0.0354, 0.0325], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:21:04,921 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.693e+02 3.134e+02 4.092e+02 8.333e+02, threshold=6.267e+02, percent-clipped=7.0 +2023-02-09 03:21:15,451 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:19,098 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236041.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:28,384 INFO [train.py:901] (2/4) Epoch 30, batch 1650, loss[loss=0.2282, simple_loss=0.3093, pruned_loss=0.07356, over 8597.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2806, pruned_loss=0.05598, over 1614026.02 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:21:56,786 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236093.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:22:04,813 INFO [train.py:901] (2/4) Epoch 30, batch 1700, loss[loss=0.1725, simple_loss=0.2419, pruned_loss=0.05153, over 7568.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2806, pruned_loss=0.05607, over 1613027.60 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:15,384 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236118.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:22:17,725 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.438e+02 2.804e+02 3.459e+02 5.840e+02, threshold=5.608e+02, percent-clipped=0.0 +2023-02-09 03:22:34,513 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.33 vs. limit=5.0 +2023-02-09 03:22:40,514 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236153.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:22:41,159 INFO [train.py:901] (2/4) Epoch 30, batch 1750, loss[loss=0.1733, simple_loss=0.258, pruned_loss=0.0443, over 8229.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.28, pruned_loss=0.05543, over 1615439.70 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:42,663 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236156.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:23:07,917 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.1510, 1.9023, 2.2620, 2.0194, 2.3132, 2.1897, 2.0755, 1.2356], + device='cuda:2'), covar=tensor([0.5912, 0.5068, 0.2386, 0.4180, 0.2754, 0.3456, 0.2092, 0.5506], + device='cuda:2'), in_proj_covar=tensor([0.0972, 0.1037, 0.0848, 0.1008, 0.1032, 0.0946, 0.0778, 0.0860], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:23:16,214 INFO [train.py:901] (2/4) Epoch 30, batch 1800, loss[loss=0.2174, simple_loss=0.3029, pruned_loss=0.06594, over 8551.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2798, pruned_loss=0.05521, over 1613951.97 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:23:29,387 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.503e+02 3.165e+02 3.852e+02 7.294e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-09 03:23:52,508 INFO [train.py:901] (2/4) Epoch 30, batch 1850, loss[loss=0.2291, simple_loss=0.3104, pruned_loss=0.07386, over 7542.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2805, pruned_loss=0.05547, over 1615894.93 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:03,968 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236268.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:24:28,815 INFO [train.py:901] (2/4) Epoch 30, batch 1900, loss[loss=0.1469, simple_loss=0.2269, pruned_loss=0.03342, over 7427.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2791, pruned_loss=0.05469, over 1617021.45 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:41,169 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-09 03:24:41,424 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.325e+02 3.004e+02 3.832e+02 8.674e+02, threshold=6.008e+02, percent-clipped=3.0 +2023-02-09 03:25:01,733 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 03:25:05,202 INFO [train.py:901] (2/4) Epoch 30, batch 1950, loss[loss=0.1899, simple_loss=0.2795, pruned_loss=0.05015, over 8493.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2802, pruned_loss=0.05541, over 1620047.62 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:13,616 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 03:25:24,201 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:25,042 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0610, 1.2083, 1.1737, 0.8312, 1.1815, 1.0309, 0.1252, 1.2041], + device='cuda:2'), covar=tensor([0.0529, 0.0441, 0.0412, 0.0599, 0.0534, 0.1066, 0.1016, 0.0391], + device='cuda:2'), in_proj_covar=tensor([0.0477, 0.0418, 0.0373, 0.0465, 0.0400, 0.0555, 0.0405, 0.0442], + device='cuda:2'), out_proj_covar=tensor([1.2627e-04, 1.0814e-04, 9.7134e-05, 1.2130e-04, 1.0478e-04, 1.5435e-04, + 1.0794e-04, 1.1561e-04], device='cuda:2') +2023-02-09 03:25:25,064 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4259, 1.6850, 2.1254, 1.3652, 1.5166, 1.6855, 1.5279, 1.5932], + device='cuda:2'), covar=tensor([0.1999, 0.2685, 0.1021, 0.4754, 0.2092, 0.3485, 0.2557, 0.2224], + device='cuda:2'), in_proj_covar=tensor([0.0546, 0.0646, 0.0569, 0.0681, 0.0670, 0.0622, 0.0573, 0.0651], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:25:33,317 WARNING [train.py:1067] (2/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 03:25:41,055 INFO [train.py:901] (2/4) Epoch 30, batch 2000, loss[loss=0.2323, simple_loss=0.3103, pruned_loss=0.07711, over 8114.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2816, pruned_loss=0.05562, over 1623574.30 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:43,360 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2152, 1.9244, 2.3255, 2.0711, 2.3029, 2.2454, 2.1242, 1.1809], + device='cuda:2'), covar=tensor([0.6378, 0.5379, 0.2441, 0.3912, 0.2729, 0.3580, 0.2066, 0.5727], + device='cuda:2'), in_proj_covar=tensor([0.0970, 0.1036, 0.0846, 0.1007, 0.1032, 0.0947, 0.0778, 0.0857], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 03:25:46,887 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:53,568 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.406e+02 2.956e+02 3.756e+02 9.982e+02, threshold=5.913e+02, percent-clipped=8.0 +2023-02-09 03:26:04,553 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:16,285 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6398, 1.8170, 1.6144, 2.3147, 0.9514, 1.4437, 1.6856, 1.8634], + device='cuda:2'), covar=tensor([0.0842, 0.0736, 0.0921, 0.0452, 0.1076, 0.1291, 0.0759, 0.0741], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0215, 0.0201, 0.0247, 0.0251, 0.0206], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 03:26:16,759 INFO [train.py:901] (2/4) Epoch 30, batch 2050, loss[loss=0.2227, simple_loss=0.2969, pruned_loss=0.07422, over 7528.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.282, pruned_loss=0.05624, over 1622654.89 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:26:28,372 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2842, 2.1506, 1.6940, 1.9234, 1.7797, 1.4838, 1.6443, 1.6392], + device='cuda:2'), covar=tensor([0.1431, 0.0392, 0.1324, 0.0612, 0.0798, 0.1580, 0.1085, 0.0990], + device='cuda:2'), in_proj_covar=tensor([0.0359, 0.0245, 0.0348, 0.0316, 0.0304, 0.0351, 0.0354, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:26:47,590 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:53,532 INFO [train.py:901] (2/4) Epoch 30, batch 2100, loss[loss=0.211, simple_loss=0.3003, pruned_loss=0.06084, over 8302.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2811, pruned_loss=0.05602, over 1621126.82 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:06,242 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.477e+02 3.089e+02 3.892e+02 8.089e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-09 03:27:07,829 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236524.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:16,738 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0423, 1.5732, 4.4247, 1.9838, 2.4736, 5.1269, 5.1629, 4.4198], + device='cuda:2'), covar=tensor([0.1332, 0.2009, 0.0302, 0.1938, 0.1316, 0.0181, 0.0473, 0.0556], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0330, 0.0298, 0.0329, 0.0329, 0.0282, 0.0449, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:27:25,241 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:28,497 INFO [train.py:901] (2/4) Epoch 30, batch 2150, loss[loss=0.2412, simple_loss=0.3242, pruned_loss=0.07908, over 8575.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2808, pruned_loss=0.05589, over 1619958.30 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:59,356 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.98 vs. limit=5.0 +2023-02-09 03:28:04,582 INFO [train.py:901] (2/4) Epoch 30, batch 2200, loss[loss=0.2364, simple_loss=0.3243, pruned_loss=0.07427, over 8519.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2811, pruned_loss=0.0558, over 1621640.41 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:28:06,911 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.2732, 4.2042, 3.8956, 2.1331, 3.7632, 3.9178, 3.7924, 3.7368], + device='cuda:2'), covar=tensor([0.0825, 0.0635, 0.1027, 0.4557, 0.0926, 0.1060, 0.1315, 0.0847], + device='cuda:2'), in_proj_covar=tensor([0.0552, 0.0462, 0.0450, 0.0565, 0.0447, 0.0474, 0.0450, 0.0415], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:28:18,763 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.435e+02 2.816e+02 3.564e+02 9.413e+02, threshold=5.632e+02, percent-clipped=3.0 +2023-02-09 03:28:40,981 INFO [train.py:901] (2/4) Epoch 30, batch 2250, loss[loss=0.1678, simple_loss=0.2526, pruned_loss=0.0415, over 7830.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2812, pruned_loss=0.05635, over 1621689.23 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:04,195 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-09 03:29:16,926 INFO [train.py:901] (2/4) Epoch 30, batch 2300, loss[loss=0.1797, simple_loss=0.2667, pruned_loss=0.04636, over 8332.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2816, pruned_loss=0.05664, over 1620613.01 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:29,246 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.489e+02 3.036e+02 4.215e+02 7.962e+02, threshold=6.071e+02, percent-clipped=6.0 +2023-02-09 03:29:50,619 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:29:52,585 INFO [train.py:901] (2/4) Epoch 30, batch 2350, loss[loss=0.1859, simple_loss=0.2647, pruned_loss=0.05362, over 7522.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2811, pruned_loss=0.05655, over 1617009.35 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:08,609 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:30:29,061 INFO [train.py:901] (2/4) Epoch 30, batch 2400, loss[loss=0.1993, simple_loss=0.2867, pruned_loss=0.05597, over 8249.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.05613, over 1617563.76 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:36,998 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7320, 2.5724, 1.9368, 2.3595, 2.2605, 1.6581, 2.1046, 2.2631], + device='cuda:2'), covar=tensor([0.1553, 0.0453, 0.1221, 0.0711, 0.0827, 0.1641, 0.1142, 0.1058], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0243, 0.0347, 0.0316, 0.0303, 0.0351, 0.0353, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:30:42,227 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.281e+02 2.685e+02 3.729e+02 8.099e+02, threshold=5.371e+02, percent-clipped=9.0 +2023-02-09 03:30:43,211 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-09 03:30:53,422 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 03:31:05,203 INFO [train.py:901] (2/4) Epoch 30, batch 2450, loss[loss=0.1607, simple_loss=0.2449, pruned_loss=0.03822, over 7831.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2812, pruned_loss=0.05616, over 1619197.36 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:39,710 INFO [train.py:901] (2/4) Epoch 30, batch 2500, loss[loss=0.1908, simple_loss=0.2681, pruned_loss=0.05674, over 7517.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2799, pruned_loss=0.05561, over 1618774.99 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:43,936 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236910.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:31:52,874 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.373e+02 3.086e+02 3.767e+02 7.222e+02, threshold=6.171e+02, percent-clipped=6.0 +2023-02-09 03:32:13,802 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7933, 2.7370, 2.0609, 2.5244, 2.3708, 1.7920, 2.2924, 2.4222], + device='cuda:2'), covar=tensor([0.1511, 0.0405, 0.1187, 0.0665, 0.0773, 0.1595, 0.1031, 0.0896], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0242, 0.0345, 0.0314, 0.0302, 0.0350, 0.0351, 0.0320], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:32:16,569 INFO [train.py:901] (2/4) Epoch 30, batch 2550, loss[loss=0.176, simple_loss=0.2607, pruned_loss=0.04561, over 7792.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2802, pruned_loss=0.05544, over 1618742.16 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:32:54,185 INFO [train.py:901] (2/4) Epoch 30, batch 2600, loss[loss=0.1717, simple_loss=0.255, pruned_loss=0.04422, over 7918.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.05615, over 1619035.52 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:33:06,912 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.458e+02 3.021e+02 3.974e+02 8.394e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:33:12,328 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.52 vs. limit=5.0 +2023-02-09 03:33:30,300 INFO [train.py:901] (2/4) Epoch 30, batch 2650, loss[loss=0.2105, simple_loss=0.3002, pruned_loss=0.06044, over 8553.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2819, pruned_loss=0.05679, over 1615766.63 frames. ], batch size: 34, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:06,433 INFO [train.py:901] (2/4) Epoch 30, batch 2700, loss[loss=0.2068, simple_loss=0.2851, pruned_loss=0.06423, over 7931.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2817, pruned_loss=0.05636, over 1616800.22 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:07,584 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 03:34:18,966 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.464e+02 3.015e+02 4.068e+02 7.247e+02, threshold=6.030e+02, percent-clipped=1.0 +2023-02-09 03:34:41,480 INFO [train.py:901] (2/4) Epoch 30, batch 2750, loss[loss=0.2168, simple_loss=0.3009, pruned_loss=0.06632, over 8551.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2813, pruned_loss=0.05605, over 1615442.54 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:18,235 INFO [train.py:901] (2/4) Epoch 30, batch 2800, loss[loss=0.1849, simple_loss=0.2686, pruned_loss=0.05062, over 8031.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.281, pruned_loss=0.05587, over 1618231.56 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:20,499 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7468, 1.5123, 1.8509, 1.4533, 0.9612, 1.5740, 1.6468, 1.4598], + device='cuda:2'), covar=tensor([0.0574, 0.1255, 0.1547, 0.1441, 0.0612, 0.1474, 0.0712, 0.0681], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:2') +2023-02-09 03:35:31,346 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.300e+02 2.824e+02 3.573e+02 8.919e+02, threshold=5.648e+02, percent-clipped=3.0 +2023-02-09 03:35:45,298 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.1475, 1.3972, 4.3426, 1.6188, 3.8511, 3.6307, 3.9188, 3.8331], + device='cuda:2'), covar=tensor([0.0633, 0.4528, 0.0564, 0.4245, 0.1009, 0.0921, 0.0627, 0.0679], + device='cuda:2'), in_proj_covar=tensor([0.0694, 0.0671, 0.0752, 0.0670, 0.0756, 0.0645, 0.0655, 0.0729], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:35:48,281 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 03:35:53,009 INFO [train.py:901] (2/4) Epoch 30, batch 2850, loss[loss=0.2307, simple_loss=0.3039, pruned_loss=0.07877, over 7979.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2813, pruned_loss=0.05606, over 1620512.21 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:53,088 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237254.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:36:18,915 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-09 03:36:27,599 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-09 03:36:29,225 INFO [train.py:901] (2/4) Epoch 30, batch 2900, loss[loss=0.1829, simple_loss=0.2728, pruned_loss=0.04649, over 8081.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2803, pruned_loss=0.05562, over 1617646.14 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:36:42,591 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.592e+02 3.021e+02 4.387e+02 8.419e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:37:04,021 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 03:37:05,367 INFO [train.py:901] (2/4) Epoch 30, batch 2950, loss[loss=0.1903, simple_loss=0.2709, pruned_loss=0.05486, over 7925.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2792, pruned_loss=0.05531, over 1612466.39 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:15,789 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237369.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:37:36,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.8185, 3.8100, 3.4573, 1.6586, 3.3569, 3.4980, 3.3635, 3.4070], + device='cuda:2'), covar=tensor([0.0914, 0.0650, 0.1088, 0.5062, 0.0990, 0.1042, 0.1379, 0.0903], + device='cuda:2'), in_proj_covar=tensor([0.0550, 0.0461, 0.0450, 0.0562, 0.0445, 0.0473, 0.0447, 0.0415], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:37:40,336 INFO [train.py:901] (2/4) Epoch 30, batch 3000, loss[loss=0.191, simple_loss=0.2846, pruned_loss=0.04875, over 8036.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2799, pruned_loss=0.05535, over 1614902.12 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:40,336 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 03:37:54,062 INFO [train.py:935] (2/4) Epoch 30, validation: loss=0.1704, simple_loss=0.2697, pruned_loss=0.0356, over 944034.00 frames. +2023-02-09 03:37:54,063 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6724MB +2023-02-09 03:38:07,361 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.369e+02 2.918e+02 3.560e+02 6.316e+02, threshold=5.836e+02, percent-clipped=1.0 +2023-02-09 03:38:31,173 INFO [train.py:901] (2/4) Epoch 30, batch 3050, loss[loss=0.2106, simple_loss=0.2963, pruned_loss=0.06245, over 8519.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2807, pruned_loss=0.05611, over 1612626.28 frames. ], batch size: 31, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:39:01,960 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6649, 1.8697, 1.9584, 1.3810, 2.0666, 1.5081, 0.6072, 1.9567], + device='cuda:2'), covar=tensor([0.0666, 0.0436, 0.0362, 0.0665, 0.0489, 0.1037, 0.1101, 0.0324], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0422, 0.0376, 0.0467, 0.0403, 0.0562, 0.0409, 0.0446], + device='cuda:2'), out_proj_covar=tensor([1.2778e-04, 1.0912e-04, 9.7950e-05, 1.2182e-04, 1.0531e-04, 1.5642e-04, + 1.0891e-04, 1.1676e-04], device='cuda:2') +2023-02-09 03:39:07,127 INFO [train.py:901] (2/4) Epoch 30, batch 3100, loss[loss=0.1951, simple_loss=0.2779, pruned_loss=0.05611, over 7813.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.05674, over 1614461.33 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:39:10,845 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237509.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:39:14,041 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-09 03:39:19,646 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.429e+02 3.016e+02 3.485e+02 6.483e+02, threshold=6.032e+02, percent-clipped=4.0 +2023-02-09 03:39:20,785 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.23 vs. limit=2.0 +2023-02-09 03:39:43,976 INFO [train.py:901] (2/4) Epoch 30, batch 3150, loss[loss=0.1786, simple_loss=0.2715, pruned_loss=0.04289, over 8029.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2814, pruned_loss=0.05733, over 1610250.52 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:03,664 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237581.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:21,026 INFO [train.py:901] (2/4) Epoch 30, batch 3200, loss[loss=0.1772, simple_loss=0.2667, pruned_loss=0.04389, over 7971.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.05727, over 1608555.14 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:33,294 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.320e+02 2.861e+02 3.592e+02 8.186e+02, threshold=5.722e+02, percent-clipped=5.0 +2023-02-09 03:40:35,496 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=237625.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:52,664 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=237650.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:56,035 INFO [train.py:901] (2/4) Epoch 30, batch 3250, loss[loss=0.1684, simple_loss=0.2611, pruned_loss=0.03785, over 7805.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05797, over 1609218.81 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:32,176 INFO [train.py:901] (2/4) Epoch 30, batch 3300, loss[loss=0.1805, simple_loss=0.2716, pruned_loss=0.04472, over 7818.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2815, pruned_loss=0.05746, over 1606991.77 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:44,069 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.48 vs. limit=5.0 +2023-02-09 03:41:45,795 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.392e+02 2.907e+02 3.818e+02 6.093e+02, threshold=5.813e+02, percent-clipped=2.0 +2023-02-09 03:42:07,977 INFO [train.py:901] (2/4) Epoch 30, batch 3350, loss[loss=0.2113, simple_loss=0.2925, pruned_loss=0.06504, over 8536.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2807, pruned_loss=0.05693, over 1609529.86 frames. ], batch size: 50, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:44,184 INFO [train.py:901] (2/4) Epoch 30, batch 3400, loss[loss=0.1783, simple_loss=0.2655, pruned_loss=0.04553, over 8139.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05712, over 1606524.97 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:47,123 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237808.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:42:57,410 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.484e+02 3.245e+02 4.483e+02 9.283e+02, threshold=6.490e+02, percent-clipped=12.0 +2023-02-09 03:43:00,484 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2864, 2.5878, 2.6751, 1.7202, 3.0628, 1.8392, 1.5228, 2.4088], + device='cuda:2'), covar=tensor([0.1012, 0.0472, 0.0443, 0.0941, 0.0593, 0.1034, 0.1214, 0.0588], + device='cuda:2'), in_proj_covar=tensor([0.0486, 0.0425, 0.0380, 0.0470, 0.0406, 0.0566, 0.0410, 0.0449], + device='cuda:2'), out_proj_covar=tensor([1.2850e-04, 1.0992e-04, 9.8809e-05, 1.2263e-04, 1.0614e-04, 1.5748e-04, + 1.0928e-04, 1.1749e-04], device='cuda:2') +2023-02-09 03:43:19,449 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237853.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:43:20,120 INFO [train.py:901] (2/4) Epoch 30, batch 3450, loss[loss=0.1508, simple_loss=0.2338, pruned_loss=0.0339, over 7686.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2816, pruned_loss=0.05764, over 1606260.79 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:43:56,090 INFO [train.py:901] (2/4) Epoch 30, batch 3500, loss[loss=0.1994, simple_loss=0.2726, pruned_loss=0.06309, over 8086.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.05825, over 1607807.20 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:08,739 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.505e+02 3.010e+02 3.725e+02 8.965e+02, threshold=6.019e+02, percent-clipped=4.0 +2023-02-09 03:44:11,004 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237925.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:44:11,726 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9670, 1.5323, 3.4549, 1.6541, 2.5111, 3.8373, 3.9103, 3.3027], + device='cuda:2'), covar=tensor([0.1189, 0.1874, 0.0311, 0.1967, 0.0985, 0.0214, 0.0499, 0.0502], + device='cuda:2'), in_proj_covar=tensor([0.0308, 0.0327, 0.0296, 0.0326, 0.0328, 0.0281, 0.0447, 0.0308], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:44:14,957 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237930.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:22,349 WARNING [train.py:1067] (2/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 03:44:32,808 INFO [train.py:901] (2/4) Epoch 30, batch 3550, loss[loss=0.1882, simple_loss=0.2773, pruned_loss=0.04957, over 8240.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2813, pruned_loss=0.05802, over 1609645.68 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:43,165 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237968.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:49,853 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 03:45:09,426 INFO [train.py:901] (2/4) Epoch 30, batch 3600, loss[loss=0.1924, simple_loss=0.281, pruned_loss=0.05196, over 8467.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2802, pruned_loss=0.05711, over 1608158.18 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:45:22,452 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.286e+02 2.832e+02 3.360e+02 7.556e+02, threshold=5.664e+02, percent-clipped=2.0 +2023-02-09 03:45:35,416 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238040.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:45:40,892 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.9340, 3.8232, 3.5632, 1.7145, 3.4635, 3.5312, 3.4380, 3.3734], + device='cuda:2'), covar=tensor([0.0777, 0.0605, 0.0986, 0.4463, 0.0902, 0.1083, 0.1413, 0.0891], + device='cuda:2'), in_proj_covar=tensor([0.0551, 0.0463, 0.0452, 0.0563, 0.0445, 0.0475, 0.0451, 0.0416], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:45:44,902 INFO [train.py:901] (2/4) Epoch 30, batch 3650, loss[loss=0.1602, simple_loss=0.2475, pruned_loss=0.03643, over 7660.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2795, pruned_loss=0.05625, over 1611665.53 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:05,855 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:12,778 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238092.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:20,739 INFO [train.py:901] (2/4) Epoch 30, batch 3700, loss[loss=0.2425, simple_loss=0.3271, pruned_loss=0.07891, over 8104.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2805, pruned_loss=0.05705, over 1612186.26 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:29,080 WARNING [train.py:1067] (2/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 03:46:33,265 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.352e+02 3.001e+02 3.686e+02 7.575e+02, threshold=6.003e+02, percent-clipped=3.0 +2023-02-09 03:46:50,317 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:54,656 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6336, 1.2941, 2.8964, 1.4832, 2.3694, 3.1337, 3.2624, 2.6822], + device='cuda:2'), covar=tensor([0.1256, 0.1877, 0.0350, 0.2081, 0.0811, 0.0294, 0.0640, 0.0582], + device='cuda:2'), in_proj_covar=tensor([0.0310, 0.0330, 0.0298, 0.0328, 0.0331, 0.0283, 0.0451, 0.0310], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:46:56,063 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238152.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:57,361 INFO [train.py:901] (2/4) Epoch 30, batch 3750, loss[loss=0.2192, simple_loss=0.2997, pruned_loss=0.06935, over 8300.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2803, pruned_loss=0.05669, over 1611559.09 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:19,947 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5519, 1.8596, 2.7633, 1.5385, 2.1472, 1.9644, 1.6378, 2.0481], + device='cuda:2'), covar=tensor([0.2053, 0.2734, 0.0939, 0.4969, 0.1873, 0.3563, 0.2636, 0.2218], + device='cuda:2'), in_proj_covar=tensor([0.0547, 0.0644, 0.0569, 0.0679, 0.0671, 0.0620, 0.0573, 0.0649], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:47:33,676 INFO [train.py:901] (2/4) Epoch 30, batch 3800, loss[loss=0.2045, simple_loss=0.2967, pruned_loss=0.05617, over 8463.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05657, over 1611613.08 frames. ], batch size: 48, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:46,035 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.427e+02 2.911e+02 3.474e+02 7.215e+02, threshold=5.821e+02, percent-clipped=2.0 +2023-02-09 03:47:47,580 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:47:57,379 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([0.9782, 0.9158, 1.6216, 0.9423, 1.6046, 1.7914, 1.8485, 1.4981], + device='cuda:2'), covar=tensor([0.1051, 0.1305, 0.0596, 0.1733, 0.1224, 0.0393, 0.0753, 0.0609], + device='cuda:2'), in_proj_covar=tensor([0.0308, 0.0328, 0.0297, 0.0327, 0.0329, 0.0281, 0.0449, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:48:05,480 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:09,471 INFO [train.py:901] (2/4) Epoch 30, batch 3850, loss[loss=0.1829, simple_loss=0.278, pruned_loss=0.04383, over 8393.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2802, pruned_loss=0.05671, over 1602689.89 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:18,889 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238267.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:23,698 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238274.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:38,379 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 03:48:39,907 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238296.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:48:45,173 INFO [train.py:901] (2/4) Epoch 30, batch 3900, loss[loss=0.1939, simple_loss=0.2801, pruned_loss=0.05384, over 8245.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2781, pruned_loss=0.05532, over 1602968.21 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:57,793 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238321.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:58,284 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.361e+02 2.887e+02 3.538e+02 6.169e+02, threshold=5.773e+02, percent-clipped=2.0 +2023-02-09 03:49:18,556 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7460, 1.6131, 2.2819, 1.4960, 1.3071, 2.2296, 0.5296, 1.3958], + device='cuda:2'), covar=tensor([0.1384, 0.1218, 0.0298, 0.0949, 0.2261, 0.0361, 0.1644, 0.1153], + device='cuda:2'), in_proj_covar=tensor([0.0204, 0.0206, 0.0139, 0.0225, 0.0279, 0.0149, 0.0176, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 03:49:20,478 INFO [train.py:901] (2/4) Epoch 30, batch 3950, loss[loss=0.2101, simple_loss=0.2867, pruned_loss=0.06678, over 8035.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2791, pruned_loss=0.05566, over 1609160.97 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:49:46,425 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238389.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:49:56,962 INFO [train.py:901] (2/4) Epoch 30, batch 4000, loss[loss=0.1873, simple_loss=0.2852, pruned_loss=0.04468, over 8271.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2791, pruned_loss=0.05523, over 1609212.98 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:01,961 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5804, 1.6579, 2.0368, 1.6892, 1.1802, 1.7182, 2.2405, 2.1119], + device='cuda:2'), covar=tensor([0.0538, 0.1271, 0.1619, 0.1405, 0.0602, 0.1414, 0.0652, 0.0595], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0162, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:2') +2023-02-09 03:50:09,951 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.354e+02 2.920e+02 3.674e+02 8.815e+02, threshold=5.839e+02, percent-clipped=5.0 +2023-02-09 03:50:12,701 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:20,359 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238436.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:21,790 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238438.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:50:28,648 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.0360, 2.2901, 3.8171, 2.0302, 1.9341, 3.7657, 0.7991, 2.3087], + device='cuda:2'), covar=tensor([0.1259, 0.1108, 0.0192, 0.1426, 0.2283, 0.0273, 0.1907, 0.1159], + device='cuda:2'), in_proj_covar=tensor([0.0204, 0.0206, 0.0139, 0.0225, 0.0279, 0.0149, 0.0175, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 03:50:32,677 INFO [train.py:901] (2/4) Epoch 30, batch 4050, loss[loss=0.1912, simple_loss=0.2878, pruned_loss=0.04732, over 8489.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.279, pruned_loss=0.05532, over 1605800.52 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:38,558 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.0013, 2.2473, 1.8348, 2.7174, 1.3640, 1.5900, 2.1126, 2.1848], + device='cuda:2'), covar=tensor([0.0680, 0.0628, 0.0814, 0.0330, 0.1005, 0.1286, 0.0747, 0.0657], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0193, 0.0243, 0.0214, 0.0201, 0.0245, 0.0248, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 03:50:57,400 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238488.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:09,340 INFO [train.py:901] (2/4) Epoch 30, batch 4100, loss[loss=0.2144, simple_loss=0.297, pruned_loss=0.0659, over 8237.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2801, pruned_loss=0.05597, over 1607209.14 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:21,790 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.287e+02 2.925e+02 3.934e+02 1.031e+03, threshold=5.850e+02, percent-clipped=7.0 +2023-02-09 03:51:22,756 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:35,961 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:41,372 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238548.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:43,278 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:45,010 INFO [train.py:901] (2/4) Epoch 30, batch 4150, loss[loss=0.2086, simple_loss=0.3009, pruned_loss=0.05812, over 8286.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.05626, over 1607798.01 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:57,581 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6289, 2.0031, 2.9238, 1.5172, 2.2217, 2.0793, 1.7027, 2.2670], + device='cuda:2'), covar=tensor([0.1947, 0.2711, 0.1055, 0.4821, 0.2099, 0.3254, 0.2509, 0.2530], + device='cuda:2'), in_proj_covar=tensor([0.0548, 0.0646, 0.0571, 0.0682, 0.0675, 0.0622, 0.0574, 0.0653], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:52:11,579 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 03:52:19,895 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:20,452 INFO [train.py:901] (2/4) Epoch 30, batch 4200, loss[loss=0.2051, simple_loss=0.2974, pruned_loss=0.05644, over 8444.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2824, pruned_loss=0.05714, over 1609722.30 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:30,416 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2276, 1.5484, 4.4984, 1.9136, 2.5380, 5.1466, 5.1927, 4.4949], + device='cuda:2'), covar=tensor([0.1194, 0.1965, 0.0233, 0.2112, 0.1127, 0.0149, 0.0358, 0.0471], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0328, 0.0329, 0.0282, 0.0449, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 03:52:33,715 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.494e+02 3.256e+02 4.447e+02 1.288e+03, threshold=6.511e+02, percent-clipped=8.0 +2023-02-09 03:52:41,502 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 03:52:50,647 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238645.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:56,590 INFO [train.py:901] (2/4) Epoch 30, batch 4250, loss[loss=0.2107, simple_loss=0.2998, pruned_loss=0.06082, over 8291.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2814, pruned_loss=0.05686, over 1605550.30 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:02,696 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5432, 1.4310, 1.8233, 1.1526, 1.1676, 1.7811, 0.2143, 1.1898], + device='cuda:2'), covar=tensor([0.1229, 0.0998, 0.0355, 0.0773, 0.2038, 0.0397, 0.1594, 0.0964], + device='cuda:2'), in_proj_covar=tensor([0.0204, 0.0206, 0.0139, 0.0224, 0.0279, 0.0149, 0.0176, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 03:53:05,091 WARNING [train.py:1067] (2/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 03:53:07,989 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238670.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:27,928 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238699.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:31,128 INFO [train.py:901] (2/4) Epoch 30, batch 4300, loss[loss=0.2408, simple_loss=0.3189, pruned_loss=0.08137, over 8226.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2811, pruned_loss=0.05659, over 1604939.36 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:44,805 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.303e+02 2.743e+02 3.342e+02 6.438e+02, threshold=5.486e+02, percent-clipped=0.0 +2023-02-09 03:54:04,291 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2507, 3.0425, 2.3555, 2.7233, 2.6371, 2.2339, 2.4849, 2.8014], + device='cuda:2'), covar=tensor([0.1070, 0.0302, 0.0829, 0.0454, 0.0519, 0.1049, 0.0689, 0.0738], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0243, 0.0345, 0.0314, 0.0301, 0.0348, 0.0349, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 03:54:06,887 INFO [train.py:901] (2/4) Epoch 30, batch 4350, loss[loss=0.2115, simple_loss=0.2938, pruned_loss=0.06462, over 8096.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05651, over 1604844.26 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:27,361 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238782.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:54:36,429 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 03:54:37,980 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:42,562 INFO [train.py:901] (2/4) Epoch 30, batch 4400, loss[loss=0.1949, simple_loss=0.2732, pruned_loss=0.0583, over 7942.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05618, over 1604675.51 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:44,443 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.91 vs. limit=5.0 +2023-02-09 03:54:44,937 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238807.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:53,640 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-09 03:54:55,879 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.573e+02 3.023e+02 3.983e+02 6.680e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-09 03:54:56,113 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:03,788 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238832.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:15,271 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 03:55:18,601 INFO [train.py:901] (2/4) Epoch 30, batch 4450, loss[loss=0.222, simple_loss=0.3003, pruned_loss=0.07182, over 8525.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05695, over 1609558.37 frames. ], batch size: 28, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:55:22,515 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238859.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:40,874 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:50,462 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238897.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:55:55,019 INFO [train.py:901] (2/4) Epoch 30, batch 4500, loss[loss=0.164, simple_loss=0.2482, pruned_loss=0.03987, over 7531.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2809, pruned_loss=0.05653, over 1611851.87 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:07,440 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.335e+02 2.828e+02 3.474e+02 8.376e+02, threshold=5.656e+02, percent-clipped=3.0 +2023-02-09 03:56:08,185 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 03:56:12,490 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5011, 1.3781, 1.7181, 1.2967, 0.8903, 1.4219, 1.4542, 1.3923], + device='cuda:2'), covar=tensor([0.0611, 0.1273, 0.1591, 0.1467, 0.0582, 0.1447, 0.0756, 0.0675], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:2') +2023-02-09 03:56:29,263 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.53 vs. limit=2.0 +2023-02-09 03:56:30,983 INFO [train.py:901] (2/4) Epoch 30, batch 4550, loss[loss=0.2023, simple_loss=0.3013, pruned_loss=0.05164, over 8460.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.05703, over 1613681.36 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:31,139 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238954.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:57:06,050 INFO [train.py:901] (2/4) Epoch 30, batch 4600, loss[loss=0.2478, simple_loss=0.3189, pruned_loss=0.08831, over 6429.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2813, pruned_loss=0.05698, over 1611304.22 frames. ], batch size: 72, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:19,158 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.347e+02 2.832e+02 3.443e+02 5.144e+02, threshold=5.665e+02, percent-clipped=0.0 +2023-02-09 03:57:26,210 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.6960, 5.8614, 5.1091, 2.5732, 5.0925, 5.4613, 5.3149, 5.2603], + device='cuda:2'), covar=tensor([0.0426, 0.0264, 0.0708, 0.4016, 0.0704, 0.0749, 0.0969, 0.0529], + device='cuda:2'), in_proj_covar=tensor([0.0556, 0.0466, 0.0458, 0.0571, 0.0451, 0.0481, 0.0455, 0.0420], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 03:57:34,217 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:57:40,144 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.92 vs. limit=5.0 +2023-02-09 03:57:41,725 INFO [train.py:901] (2/4) Epoch 30, batch 4650, loss[loss=0.2051, simple_loss=0.2817, pruned_loss=0.06428, over 8301.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2822, pruned_loss=0.05718, over 1617442.91 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:17,765 INFO [train.py:901] (2/4) Epoch 30, batch 4700, loss[loss=0.2065, simple_loss=0.2936, pruned_loss=0.05972, over 8334.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.0574, over 1613490.41 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:25,196 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.0687, 1.3025, 1.2180, 0.7359, 1.2374, 1.0539, 0.1050, 1.2411], + device='cuda:2'), covar=tensor([0.0527, 0.0430, 0.0404, 0.0660, 0.0458, 0.1010, 0.0943, 0.0377], + device='cuda:2'), in_proj_covar=tensor([0.0481, 0.0420, 0.0378, 0.0467, 0.0402, 0.0560, 0.0407, 0.0449], + device='cuda:2'), out_proj_covar=tensor([1.2733e-04, 1.0839e-04, 9.8362e-05, 1.2200e-04, 1.0492e-04, 1.5570e-04, + 1.0844e-04, 1.1746e-04], device='cuda:2') +2023-02-09 03:58:30,952 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.353e+02 2.866e+02 3.941e+02 8.957e+02, threshold=5.733e+02, percent-clipped=8.0 +2023-02-09 03:58:52,478 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239153.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:58:52,925 INFO [train.py:901] (2/4) Epoch 30, batch 4750, loss[loss=0.1899, simple_loss=0.2821, pruned_loss=0.0489, over 8107.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.283, pruned_loss=0.05791, over 1615109.88 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:55,954 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239158.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:59:05,580 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.37 vs. limit=5.0 +2023-02-09 03:59:10,883 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239178.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:59:12,054 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 03:59:14,171 WARNING [train.py:1067] (2/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 03:59:21,387 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.76 vs. limit=5.0 +2023-02-09 03:59:28,648 INFO [train.py:901] (2/4) Epoch 30, batch 4800, loss[loss=0.2328, simple_loss=0.3013, pruned_loss=0.08218, over 8326.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05741, over 1615650.97 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:59:35,690 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7833, 3.1195, 2.5954, 4.1539, 1.7939, 2.2682, 3.0009, 2.9279], + device='cuda:2'), covar=tensor([0.0619, 0.0694, 0.0701, 0.0196, 0.1017, 0.1147, 0.0687, 0.0758], + device='cuda:2'), in_proj_covar=tensor([0.0233, 0.0195, 0.0245, 0.0215, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 03:59:41,698 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.393e+02 3.010e+02 3.751e+02 7.640e+02, threshold=6.020e+02, percent-clipped=2.0 +2023-02-09 03:59:46,062 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7580, 2.1857, 3.5283, 1.7063, 1.7201, 3.4250, 0.8322, 2.1228], + device='cuda:2'), covar=tensor([0.1227, 0.1005, 0.0178, 0.1368, 0.2242, 0.0230, 0.1781, 0.1114], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0205, 0.0138, 0.0223, 0.0276, 0.0148, 0.0174, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:00:04,581 INFO [train.py:901] (2/4) Epoch 30, batch 4850, loss[loss=0.1813, simple_loss=0.2751, pruned_loss=0.04377, over 8613.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05671, over 1614597.28 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:06,729 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 04:00:36,496 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239298.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:00:40,436 INFO [train.py:901] (2/4) Epoch 30, batch 4900, loss[loss=0.1805, simple_loss=0.2782, pruned_loss=0.0414, over 8482.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05667, over 1617488.67 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:53,055 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.412e+02 2.818e+02 3.519e+02 1.028e+03, threshold=5.635e+02, percent-clipped=4.0 +2023-02-09 04:01:15,934 INFO [train.py:901] (2/4) Epoch 30, batch 4950, loss[loss=0.217, simple_loss=0.3046, pruned_loss=0.06467, over 8194.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.05639, over 1617013.18 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:43,675 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7239, 4.7514, 4.3081, 2.1725, 4.1832, 4.3844, 4.3104, 4.2338], + device='cuda:2'), covar=tensor([0.0660, 0.0502, 0.0928, 0.4391, 0.0918, 0.0867, 0.1173, 0.0697], + device='cuda:2'), in_proj_covar=tensor([0.0556, 0.0465, 0.0455, 0.0569, 0.0451, 0.0480, 0.0453, 0.0418], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:01:51,704 INFO [train.py:901] (2/4) Epoch 30, batch 5000, loss[loss=0.2193, simple_loss=0.2993, pruned_loss=0.06964, over 8279.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05678, over 1615245.45 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:58,093 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239413.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:01:58,912 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239414.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:04,998 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.513e+02 3.095e+02 3.810e+02 1.179e+03, threshold=6.190e+02, percent-clipped=9.0 +2023-02-09 04:02:17,682 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239439.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:29,042 INFO [train.py:901] (2/4) Epoch 30, batch 5050, loss[loss=0.1657, simple_loss=0.2446, pruned_loss=0.04344, over 7558.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2811, pruned_loss=0.05728, over 1614132.39 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:02:52,719 WARNING [train.py:1067] (2/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 04:03:05,847 INFO [train.py:901] (2/4) Epoch 30, batch 5100, loss[loss=0.1978, simple_loss=0.2826, pruned_loss=0.05651, over 8197.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.05724, over 1610906.22 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:08,876 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8284, 1.6268, 2.4107, 1.4265, 1.3905, 2.3345, 0.4040, 1.4406], + device='cuda:2'), covar=tensor([0.1358, 0.1253, 0.0290, 0.1183, 0.2337, 0.0479, 0.1970, 0.1297], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0207, 0.0138, 0.0225, 0.0279, 0.0149, 0.0175, 0.0201], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:03:20,027 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.525e+02 3.230e+02 3.994e+02 1.175e+03, threshold=6.461e+02, percent-clipped=6.0 +2023-02-09 04:03:23,165 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.2045, 1.0765, 1.3037, 0.9594, 0.9758, 1.3007, 0.0730, 0.8966], + device='cuda:2'), covar=tensor([0.1393, 0.1241, 0.0533, 0.0652, 0.2171, 0.0583, 0.1865, 0.1158], + device='cuda:2'), in_proj_covar=tensor([0.0203, 0.0207, 0.0138, 0.0225, 0.0279, 0.0149, 0.0175, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:03:24,059 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.69 vs. limit=5.0 +2023-02-09 04:03:42,212 INFO [train.py:901] (2/4) Epoch 30, batch 5150, loss[loss=0.2139, simple_loss=0.2999, pruned_loss=0.06399, over 8098.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2819, pruned_loss=0.05787, over 1607862.76 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:44,421 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7109, 1.6206, 2.3126, 1.5186, 1.4124, 2.2223, 0.4094, 1.4359], + device='cuda:2'), covar=tensor([0.1487, 0.1105, 0.0330, 0.0989, 0.2202, 0.0393, 0.1789, 0.1216], + device='cuda:2'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0225, 0.0279, 0.0149, 0.0175, 0.0202], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:03:47,836 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6023, 2.4914, 1.9174, 2.3192, 2.1394, 1.6458, 2.0959, 2.0516], + device='cuda:2'), covar=tensor([0.1511, 0.0464, 0.1275, 0.0622, 0.0728, 0.1622, 0.0978, 0.1048], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0247, 0.0348, 0.0317, 0.0304, 0.0350, 0.0352, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 04:03:49,473 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 04:04:18,743 INFO [train.py:901] (2/4) Epoch 30, batch 5200, loss[loss=0.2027, simple_loss=0.2896, pruned_loss=0.0579, over 8477.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05702, over 1611224.30 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:31,922 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.360e+02 2.794e+02 3.430e+02 1.458e+03, threshold=5.587e+02, percent-clipped=2.0 +2023-02-09 04:04:44,694 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=239640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:04:54,717 INFO [train.py:901] (2/4) Epoch 30, batch 5250, loss[loss=0.2159, simple_loss=0.2984, pruned_loss=0.06668, over 8289.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05685, over 1613065.99 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:56,126 WARNING [train.py:1067] (2/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 04:05:05,221 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239669.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:05:23,272 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239694.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:05:29,842 INFO [train.py:901] (2/4) Epoch 30, batch 5300, loss[loss=0.2171, simple_loss=0.3017, pruned_loss=0.06622, over 8764.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2819, pruned_loss=0.05713, over 1614816.44 frames. ], batch size: 30, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:05:43,733 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.433e+02 2.937e+02 3.850e+02 7.663e+02, threshold=5.875e+02, percent-clipped=5.0 +2023-02-09 04:05:51,710 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.31 vs. limit=5.0 +2023-02-09 04:06:04,874 INFO [train.py:901] (2/4) Epoch 30, batch 5350, loss[loss=0.1696, simple_loss=0.258, pruned_loss=0.0406, over 8504.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05739, over 1614735.10 frames. ], batch size: 28, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:41,533 INFO [train.py:901] (2/4) Epoch 30, batch 5400, loss[loss=0.2232, simple_loss=0.3129, pruned_loss=0.06673, over 8343.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2827, pruned_loss=0.05756, over 1610162.89 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:55,041 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.371e+02 2.881e+02 3.522e+02 8.420e+02, threshold=5.763e+02, percent-clipped=7.0 +2023-02-09 04:07:01,254 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.4725, 2.3664, 3.0336, 2.4360, 3.1254, 2.5120, 2.3950, 1.9435], + device='cuda:2'), covar=tensor([0.5720, 0.5298, 0.2344, 0.4414, 0.2788, 0.3241, 0.1940, 0.6026], + device='cuda:2'), in_proj_covar=tensor([0.0969, 0.1035, 0.0848, 0.1010, 0.1032, 0.0949, 0.0780, 0.0857], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 04:07:09,548 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5384, 2.3233, 1.7449, 2.2717, 1.9771, 1.5386, 1.9617, 1.9977], + device='cuda:2'), covar=tensor([0.1403, 0.0459, 0.1427, 0.0556, 0.0798, 0.1635, 0.0938, 0.0890], + device='cuda:2'), in_proj_covar=tensor([0.0356, 0.0244, 0.0343, 0.0313, 0.0299, 0.0346, 0.0347, 0.0318], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 04:07:17,645 INFO [train.py:901] (2/4) Epoch 30, batch 5450, loss[loss=0.2139, simple_loss=0.3057, pruned_loss=0.06112, over 8102.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2821, pruned_loss=0.05715, over 1610609.13 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:07:49,868 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 04:07:53,972 INFO [train.py:901] (2/4) Epoch 30, batch 5500, loss[loss=0.2153, simple_loss=0.2986, pruned_loss=0.06596, over 8494.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2822, pruned_loss=0.05738, over 1611064.96 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:08,780 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.378e+02 3.012e+02 4.037e+02 9.246e+02, threshold=6.023e+02, percent-clipped=5.0 +2023-02-09 04:08:30,322 INFO [train.py:901] (2/4) Epoch 30, batch 5550, loss[loss=0.1712, simple_loss=0.2513, pruned_loss=0.04556, over 7542.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.282, pruned_loss=0.05769, over 1609950.18 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:50,818 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:09:07,157 INFO [train.py:901] (2/4) Epoch 30, batch 5600, loss[loss=0.1971, simple_loss=0.295, pruned_loss=0.04959, over 8345.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05775, over 1608414.39 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:21,032 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.479e+02 2.939e+02 3.472e+02 8.474e+02, threshold=5.878e+02, percent-clipped=2.0 +2023-02-09 04:09:42,587 INFO [train.py:901] (2/4) Epoch 30, batch 5650, loss[loss=0.1888, simple_loss=0.2815, pruned_loss=0.04805, over 8580.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2826, pruned_loss=0.05772, over 1610540.00 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:59,655 WARNING [train.py:1067] (2/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 04:10:14,051 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240099.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:10:16,487 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=3.88 vs. limit=5.0 +2023-02-09 04:10:17,315 INFO [train.py:901] (2/4) Epoch 30, batch 5700, loss[loss=0.1882, simple_loss=0.2806, pruned_loss=0.04793, over 8197.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05796, over 1614680.18 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:10:32,005 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 2.506e+02 3.162e+02 4.194e+02 1.225e+03, threshold=6.325e+02, percent-clipped=8.0 +2023-02-09 04:10:53,042 INFO [train.py:901] (2/4) Epoch 30, batch 5750, loss[loss=0.2201, simple_loss=0.2962, pruned_loss=0.07199, over 8464.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.05833, over 1614041.00 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:04,197 WARNING [train.py:1067] (2/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 04:11:28,591 INFO [train.py:901] (2/4) Epoch 30, batch 5800, loss[loss=0.2007, simple_loss=0.2801, pruned_loss=0.0607, over 8111.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2815, pruned_loss=0.05753, over 1610555.71 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:42,452 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.400e+02 2.667e+02 3.487e+02 8.848e+02, threshold=5.334e+02, percent-clipped=2.0 +2023-02-09 04:11:43,511 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.75 vs. limit=5.0 +2023-02-09 04:12:04,252 INFO [train.py:901] (2/4) Epoch 30, batch 5850, loss[loss=0.2281, simple_loss=0.3015, pruned_loss=0.07733, over 7289.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2813, pruned_loss=0.05708, over 1608351.21 frames. ], batch size: 73, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:39,827 INFO [train.py:901] (2/4) Epoch 30, batch 5900, loss[loss=0.224, simple_loss=0.308, pruned_loss=0.06995, over 8445.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2805, pruned_loss=0.05653, over 1608165.78 frames. ], batch size: 48, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:53,715 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.330e+02 2.970e+02 3.920e+02 1.059e+03, threshold=5.939e+02, percent-clipped=6.0 +2023-02-09 04:13:13,640 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.9136, 2.0976, 1.7517, 2.8276, 1.3054, 1.6795, 2.1318, 2.1018], + device='cuda:2'), covar=tensor([0.0786, 0.0852, 0.0913, 0.0322, 0.1066, 0.1252, 0.0754, 0.0827], + device='cuda:2'), in_proj_covar=tensor([0.0230, 0.0193, 0.0244, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 04:13:15,499 INFO [train.py:901] (2/4) Epoch 30, batch 5950, loss[loss=0.2152, simple_loss=0.2921, pruned_loss=0.06916, over 7808.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2818, pruned_loss=0.05723, over 1610209.67 frames. ], batch size: 19, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:16,424 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=240355.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:18,458 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240358.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:33,807 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=240380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:50,594 INFO [train.py:901] (2/4) Epoch 30, batch 6000, loss[loss=0.2247, simple_loss=0.3097, pruned_loss=0.06986, over 8772.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05702, over 1609916.44 frames. ], batch size: 30, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:50,594 INFO [train.py:926] (2/4) Computing validation loss +2023-02-09 04:14:04,302 INFO [train.py:935] (2/4) Epoch 30, validation: loss=0.1701, simple_loss=0.2695, pruned_loss=0.03536, over 944034.00 frames. +2023-02-09 04:14:04,303 INFO [train.py:936] (2/4) Maximum memory allocated so far is 6731MB +2023-02-09 04:14:07,896 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3546, 1.4564, 1.3536, 1.7980, 0.7287, 1.1920, 1.3062, 1.4739], + device='cuda:2'), covar=tensor([0.0856, 0.0755, 0.0939, 0.0454, 0.1129, 0.1385, 0.0738, 0.0686], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0213, 0.0202, 0.0246, 0.0248, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 04:14:17,955 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.377e+02 3.122e+02 3.554e+02 6.850e+02, threshold=6.243e+02, percent-clipped=2.0 +2023-02-09 04:14:39,916 INFO [train.py:901] (2/4) Epoch 30, batch 6050, loss[loss=0.1591, simple_loss=0.2377, pruned_loss=0.04022, over 7687.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2811, pruned_loss=0.05709, over 1609950.23 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:16,411 INFO [train.py:901] (2/4) Epoch 30, batch 6100, loss[loss=0.1812, simple_loss=0.271, pruned_loss=0.04573, over 8468.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2818, pruned_loss=0.05725, over 1615429.95 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:30,263 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 2.992e+02 3.767e+02 7.583e+02, threshold=5.983e+02, percent-clipped=4.0 +2023-02-09 04:15:40,575 WARNING [train.py:1067] (2/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 04:15:51,558 INFO [train.py:901] (2/4) Epoch 30, batch 6150, loss[loss=0.1819, simple_loss=0.2609, pruned_loss=0.05152, over 7788.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.05774, over 1618486.56 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:24,186 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.8890, 1.3625, 3.1437, 1.5413, 2.4173, 3.3681, 3.5146, 2.9468], + device='cuda:2'), covar=tensor([0.1200, 0.1994, 0.0331, 0.2105, 0.0947, 0.0262, 0.0651, 0.0506], + device='cuda:2'), in_proj_covar=tensor([0.0309, 0.0330, 0.0296, 0.0330, 0.0330, 0.0284, 0.0452, 0.0309], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 04:16:28,465 INFO [train.py:901] (2/4) Epoch 30, batch 6200, loss[loss=0.1974, simple_loss=0.2836, pruned_loss=0.05563, over 8331.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2818, pruned_loss=0.05765, over 1616452.32 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:44,278 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.533e+02 2.968e+02 3.901e+02 6.917e+02, threshold=5.935e+02, percent-clipped=4.0 +2023-02-09 04:17:05,850 INFO [train.py:901] (2/4) Epoch 30, batch 6250, loss[loss=0.2322, simple_loss=0.299, pruned_loss=0.08267, over 7968.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.28, pruned_loss=0.05691, over 1611735.74 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:06,787 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5747, 2.9509, 2.5163, 4.0560, 1.5917, 2.1751, 2.5120, 2.7651], + device='cuda:2'), covar=tensor([0.0669, 0.0744, 0.0675, 0.0201, 0.1104, 0.1196, 0.0895, 0.0763], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0214, 0.0203, 0.0247, 0.0248, 0.0205], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 04:17:16,721 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 04:17:39,922 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=240702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:17:41,235 INFO [train.py:901] (2/4) Epoch 30, batch 6300, loss[loss=0.1797, simple_loss=0.2627, pruned_loss=0.04836, over 7519.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2798, pruned_loss=0.05606, over 1608233.25 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:51,941 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=2.93 vs. limit=5.0 +2023-02-09 04:17:54,933 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.569e+02 3.101e+02 4.376e+02 1.063e+03, threshold=6.203e+02, percent-clipped=9.0 +2023-02-09 04:18:17,086 INFO [train.py:901] (2/4) Epoch 30, batch 6350, loss[loss=0.1749, simple_loss=0.2624, pruned_loss=0.04374, over 7804.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05681, over 1610380.86 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:18:53,328 INFO [train.py:901] (2/4) Epoch 30, batch 6400, loss[loss=0.1796, simple_loss=0.2637, pruned_loss=0.0477, over 7534.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05674, over 1613211.69 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:02,608 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240817.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:19:07,074 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.422e+02 2.800e+02 3.642e+02 5.918e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-09 04:19:28,696 INFO [train.py:901] (2/4) Epoch 30, batch 6450, loss[loss=0.1796, simple_loss=0.2578, pruned_loss=0.05071, over 7418.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05728, over 1612937.77 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:03,783 INFO [train.py:901] (2/4) Epoch 30, batch 6500, loss[loss=0.1867, simple_loss=0.2741, pruned_loss=0.04967, over 8292.00 frames. ], tot_loss[loss=0.198, simple_loss=0.282, pruned_loss=0.05701, over 1609614.08 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:16,798 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7850, 1.9019, 1.7383, 2.2631, 0.9621, 1.5467, 1.7388, 1.9229], + device='cuda:2'), covar=tensor([0.0755, 0.0759, 0.0893, 0.0414, 0.1055, 0.1298, 0.0720, 0.0680], + device='cuda:2'), in_proj_covar=tensor([0.0232, 0.0194, 0.0245, 0.0214, 0.0203, 0.0247, 0.0249, 0.0204], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 04:20:17,923 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.582e+02 3.161e+02 3.840e+02 1.025e+03, threshold=6.322e+02, percent-clipped=7.0 +2023-02-09 04:20:36,286 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240950.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:20:38,871 INFO [train.py:901] (2/4) Epoch 30, batch 6550, loss[loss=0.1794, simple_loss=0.2616, pruned_loss=0.04862, over 8499.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2816, pruned_loss=0.05659, over 1611541.24 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:00,575 WARNING [train.py:1067] (2/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 04:21:15,890 INFO [train.py:901] (2/4) Epoch 30, batch 6600, loss[loss=0.2261, simple_loss=0.3191, pruned_loss=0.06659, over 8477.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05639, over 1609323.68 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:16,033 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241004.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:21:20,065 WARNING [train.py:1067] (2/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 04:21:27,612 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.39 vs. limit=2.0 +2023-02-09 04:21:29,847 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.233e+02 3.026e+02 3.930e+02 1.368e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-09 04:21:51,501 INFO [train.py:901] (2/4) Epoch 30, batch 6650, loss[loss=0.1904, simple_loss=0.2643, pruned_loss=0.05825, over 7711.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.279, pruned_loss=0.05532, over 1606412.90 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:53,753 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5936, 1.6299, 4.3249, 2.0024, 2.7010, 4.8910, 5.0176, 4.2446], + device='cuda:2'), covar=tensor([0.1015, 0.2016, 0.0276, 0.1937, 0.1059, 0.0174, 0.0454, 0.0562], + device='cuda:2'), in_proj_covar=tensor([0.0311, 0.0331, 0.0299, 0.0331, 0.0332, 0.0285, 0.0455, 0.0311], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 04:22:04,820 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241073.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:07,893 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-09 04:22:08,260 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5694, 1.3533, 2.3470, 1.3002, 2.2301, 2.4906, 2.7051, 2.1164], + device='cuda:2'), covar=tensor([0.1158, 0.1529, 0.0427, 0.2208, 0.0756, 0.0408, 0.0615, 0.0671], + device='cuda:2'), in_proj_covar=tensor([0.0312, 0.0332, 0.0299, 0.0331, 0.0333, 0.0286, 0.0456, 0.0312], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:2') +2023-02-09 04:22:23,569 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:27,617 INFO [train.py:901] (2/4) Epoch 30, batch 6700, loss[loss=0.1711, simple_loss=0.2491, pruned_loss=0.0465, over 7422.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2803, pruned_loss=0.05591, over 1612543.87 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:28,144 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.40 vs. limit=2.0 +2023-02-09 04:22:42,111 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.246e+02 2.834e+02 3.422e+02 9.903e+02, threshold=5.667e+02, percent-clipped=4.0 +2023-02-09 04:22:57,376 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-09 04:23:04,034 INFO [train.py:901] (2/4) Epoch 30, batch 6750, loss[loss=0.182, simple_loss=0.2691, pruned_loss=0.04747, over 7649.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2803, pruned_loss=0.05562, over 1616486.60 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:39,180 INFO [train.py:901] (2/4) Epoch 30, batch 6800, loss[loss=0.2325, simple_loss=0.3177, pruned_loss=0.0737, over 8462.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2816, pruned_loss=0.05616, over 1617884.34 frames. ], batch size: 29, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:42,693 WARNING [train.py:1067] (2/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 04:23:53,781 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.178e+02 2.682e+02 3.526e+02 7.087e+02, threshold=5.364e+02, percent-clipped=2.0 +2023-02-09 04:23:54,034 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5598, 1.8589, 2.9053, 1.4399, 2.1353, 1.9807, 1.6017, 2.2127], + device='cuda:2'), covar=tensor([0.2236, 0.3107, 0.0992, 0.5263, 0.2201, 0.3535, 0.2944, 0.2490], + device='cuda:2'), in_proj_covar=tensor([0.0549, 0.0651, 0.0571, 0.0681, 0.0675, 0.0624, 0.0576, 0.0653], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:24:05,764 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.5701, 5.6296, 5.0746, 2.5603, 5.0409, 5.3005, 5.1134, 5.0851], + device='cuda:2'), covar=tensor([0.0595, 0.0418, 0.0881, 0.4161, 0.0772, 0.0859, 0.1049, 0.0634], + device='cuda:2'), in_proj_covar=tensor([0.0556, 0.0466, 0.0458, 0.0568, 0.0451, 0.0480, 0.0454, 0.0419], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:24:15,459 INFO [train.py:901] (2/4) Epoch 30, batch 6850, loss[loss=0.1898, simple_loss=0.2609, pruned_loss=0.05939, over 7244.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2797, pruned_loss=0.05541, over 1617672.29 frames. ], batch size: 16, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:24:34,755 WARNING [train.py:1067] (2/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 04:24:42,628 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.7431, 2.1998, 3.7773, 1.5619, 2.6959, 2.2608, 1.8369, 2.7094], + device='cuda:2'), covar=tensor([0.1996, 0.2910, 0.0987, 0.4786, 0.2167, 0.3441, 0.2611, 0.2869], + device='cuda:2'), in_proj_covar=tensor([0.0548, 0.0650, 0.0569, 0.0679, 0.0674, 0.0623, 0.0575, 0.0653], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:24:43,872 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241294.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:24:50,576 INFO [train.py:901] (2/4) Epoch 30, batch 6900, loss[loss=0.189, simple_loss=0.2557, pruned_loss=0.06116, over 7422.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2797, pruned_loss=0.05527, over 1617383.55 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:05,717 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.527e+02 3.094e+02 3.969e+02 8.004e+02, threshold=6.188e+02, percent-clipped=9.0 +2023-02-09 04:25:22,107 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241348.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:25:26,914 INFO [train.py:901] (2/4) Epoch 30, batch 6950, loss[loss=0.1888, simple_loss=0.279, pruned_loss=0.04935, over 8295.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2787, pruned_loss=0.05512, over 1615863.64 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:32,447 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2211, 1.8594, 2.5541, 1.5130, 1.7054, 2.5068, 1.3166, 2.0969], + device='cuda:2'), covar=tensor([0.1459, 0.1004, 0.0345, 0.1177, 0.1690, 0.0607, 0.1494, 0.1146], + device='cuda:2'), in_proj_covar=tensor([0.0202, 0.0206, 0.0137, 0.0222, 0.0276, 0.0149, 0.0174, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:25:46,728 WARNING [train.py:1067] (2/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 04:25:53,380 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.5821, 3.0103, 2.3353, 4.0708, 1.7151, 2.1293, 2.6451, 2.8457], + device='cuda:2'), covar=tensor([0.0655, 0.0667, 0.0768, 0.0193, 0.1019, 0.1242, 0.0882, 0.0799], + device='cuda:2'), in_proj_covar=tensor([0.0231, 0.0193, 0.0244, 0.0213, 0.0201, 0.0246, 0.0247, 0.0203], + device='cuda:2'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:2') +2023-02-09 04:26:04,041 INFO [train.py:901] (2/4) Epoch 30, batch 7000, loss[loss=0.2113, simple_loss=0.2873, pruned_loss=0.06767, over 8084.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2786, pruned_loss=0.05514, over 1614076.03 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:07,764 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241409.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:26:17,959 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.441e+02 2.932e+02 3.651e+02 7.920e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-09 04:26:18,117 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241424.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:26:30,777 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.1433, 1.2801, 1.2182, 0.9190, 1.2189, 1.0583, 0.1497, 1.2229], + device='cuda:2'), covar=tensor([0.0510, 0.0464, 0.0442, 0.0570, 0.0541, 0.1191, 0.0976, 0.0380], + device='cuda:2'), in_proj_covar=tensor([0.0483, 0.0418, 0.0377, 0.0468, 0.0405, 0.0560, 0.0406, 0.0447], + device='cuda:2'), out_proj_covar=tensor([1.2779e-04, 1.0769e-04, 9.8145e-05, 1.2203e-04, 1.0567e-04, 1.5574e-04, + 1.0832e-04, 1.1685e-04], device='cuda:2') +2023-02-09 04:26:40,280 INFO [train.py:901] (2/4) Epoch 30, batch 7050, loss[loss=0.1999, simple_loss=0.2873, pruned_loss=0.05621, over 8557.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.279, pruned_loss=0.05561, over 1613609.28 frames. ], batch size: 31, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:46,391 INFO [scaling.py:679] (2/4) Whitening: num_groups=1, num_channels=256, metric=4.36 vs. limit=5.0 +2023-02-09 04:26:46,797 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241463.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:27:16,738 INFO [train.py:901] (2/4) Epoch 30, batch 7100, loss[loss=0.1949, simple_loss=0.267, pruned_loss=0.06136, over 7530.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2803, pruned_loss=0.05624, over 1615291.19 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:27:28,169 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.3405, 2.1249, 1.6952, 2.0270, 1.7122, 1.4391, 1.6077, 1.6643], + device='cuda:2'), covar=tensor([0.1318, 0.0474, 0.1345, 0.0545, 0.0824, 0.1679, 0.1029, 0.0966], + device='cuda:2'), in_proj_covar=tensor([0.0363, 0.0247, 0.0349, 0.0318, 0.0305, 0.0352, 0.0353, 0.0323], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 04:27:30,723 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.381e+02 2.857e+02 3.660e+02 8.579e+02, threshold=5.714e+02, percent-clipped=3.0 +2023-02-09 04:27:51,598 INFO [train.py:901] (2/4) Epoch 30, batch 7150, loss[loss=0.1617, simple_loss=0.2306, pruned_loss=0.04636, over 7416.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2812, pruned_loss=0.05655, over 1617978.54 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:28,702 INFO [train.py:901] (2/4) Epoch 30, batch 7200, loss[loss=0.1847, simple_loss=0.2623, pruned_loss=0.05358, over 8081.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2809, pruned_loss=0.05614, over 1620207.82 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:43,487 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.259e+02 2.765e+02 3.853e+02 1.030e+03, threshold=5.530e+02, percent-clipped=3.0 +2023-02-09 04:28:53,993 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241639.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:03,963 INFO [train.py:901] (2/4) Epoch 30, batch 7250, loss[loss=0.2025, simple_loss=0.2884, pruned_loss=0.05824, over 7940.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2799, pruned_loss=0.05603, over 1611674.76 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:11,986 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241665.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:29:30,386 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241690.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:29:40,038 INFO [train.py:901] (2/4) Epoch 30, batch 7300, loss[loss=0.1904, simple_loss=0.2801, pruned_loss=0.05041, over 8459.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2794, pruned_loss=0.05546, over 1612661.89 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:50,473 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241719.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:53,738 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.349e+02 2.997e+02 3.899e+02 6.597e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-09 04:30:08,887 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:15,572 INFO [train.py:901] (2/4) Epoch 30, batch 7350, loss[loss=0.1804, simple_loss=0.2594, pruned_loss=0.05072, over 7556.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.0557, over 1613680.95 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:25,405 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241768.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:39,742 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 04:30:46,599 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([5.9678, 1.6789, 6.1269, 2.3317, 5.4823, 5.0294, 5.6193, 5.5230], + device='cuda:2'), covar=tensor([0.0486, 0.4753, 0.0460, 0.3804, 0.1068, 0.0897, 0.0512, 0.0571], + device='cuda:2'), in_proj_covar=tensor([0.0703, 0.0676, 0.0762, 0.0680, 0.0765, 0.0653, 0.0664, 0.0740], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:30:51,563 INFO [train.py:901] (2/4) Epoch 30, batch 7400, loss[loss=0.178, simple_loss=0.2738, pruned_loss=0.04109, over 8131.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2793, pruned_loss=0.05559, over 1609285.72 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:59,777 WARNING [train.py:1067] (2/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 04:31:04,085 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241821.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:05,936 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.497e+02 3.037e+02 3.880e+02 5.984e+02, threshold=6.074e+02, percent-clipped=0.0 +2023-02-09 04:31:24,334 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241849.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:31:27,627 INFO [train.py:901] (2/4) Epoch 30, batch 7450, loss[loss=0.2036, simple_loss=0.2865, pruned_loss=0.0604, over 8089.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2797, pruned_loss=0.05584, over 1611662.06 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:31:40,210 WARNING [train.py:1067] (2/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 04:31:48,184 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:48,872 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2663, 3.4453, 2.1928, 3.0338, 2.8381, 2.0186, 2.8300, 2.9813], + device='cuda:2'), covar=tensor([0.1607, 0.0430, 0.1322, 0.0663, 0.0689, 0.1580, 0.1027, 0.1083], + device='cuda:2'), in_proj_covar=tensor([0.0361, 0.0245, 0.0348, 0.0317, 0.0304, 0.0350, 0.0351, 0.0322], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 04:32:02,782 INFO [train.py:901] (2/4) Epoch 30, batch 7500, loss[loss=0.18, simple_loss=0.2757, pruned_loss=0.04219, over 8546.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.279, pruned_loss=0.05527, over 1613679.67 frames. ], batch size: 49, lr: 2.49e-03, grad_scale: 16.0 +2023-02-09 04:32:13,912 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.45 vs. limit=2.0 +2023-02-09 04:32:18,961 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.491e+02 2.852e+02 3.531e+02 9.058e+02, threshold=5.704e+02, percent-clipped=2.0 +2023-02-09 04:32:28,836 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4864, 1.8396, 1.8614, 1.2239, 1.8812, 1.4411, 0.4677, 1.7283], + device='cuda:2'), covar=tensor([0.0851, 0.0441, 0.0437, 0.0785, 0.0644, 0.1227, 0.1181, 0.0387], + device='cuda:2'), in_proj_covar=tensor([0.0482, 0.0418, 0.0376, 0.0467, 0.0404, 0.0560, 0.0407, 0.0446], + device='cuda:2'), out_proj_covar=tensor([1.2741e-04, 1.0779e-04, 9.7851e-05, 1.2190e-04, 1.0548e-04, 1.5577e-04, + 1.0841e-04, 1.1653e-04], device='cuda:2') +2023-02-09 04:32:39,978 INFO [train.py:901] (2/4) Epoch 30, batch 7550, loss[loss=0.2159, simple_loss=0.3055, pruned_loss=0.06318, over 7970.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2793, pruned_loss=0.05529, over 1612062.64 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:32:46,886 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 04:33:01,507 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241983.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:33:03,586 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.4108, 4.4239, 3.9858, 2.0570, 3.8505, 4.0185, 3.9821, 3.8728], + device='cuda:2'), covar=tensor([0.0710, 0.0500, 0.1042, 0.4531, 0.0975, 0.0922, 0.1195, 0.0793], + device='cuda:2'), in_proj_covar=tensor([0.0553, 0.0460, 0.0454, 0.0563, 0.0449, 0.0478, 0.0450, 0.0416], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:33:12,195 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.5067, 1.3927, 1.8031, 1.1717, 1.1405, 1.7676, 0.2382, 1.1102], + device='cuda:2'), covar=tensor([0.1403, 0.1129, 0.0371, 0.0825, 0.2242, 0.0425, 0.1714, 0.1169], + device='cuda:2'), in_proj_covar=tensor([0.0202, 0.0205, 0.0137, 0.0222, 0.0276, 0.0148, 0.0174, 0.0200], + device='cuda:2'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:2') +2023-02-09 04:33:17,220 INFO [train.py:901] (2/4) Epoch 30, batch 7600, loss[loss=0.1823, simple_loss=0.2621, pruned_loss=0.05122, over 7933.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2792, pruned_loss=0.05548, over 1612124.84 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:32,888 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.414e+02 3.070e+02 3.745e+02 6.631e+02, threshold=6.140e+02, percent-clipped=3.0 +2023-02-09 04:33:54,462 INFO [train.py:901] (2/4) Epoch 30, batch 7650, loss[loss=0.192, simple_loss=0.2706, pruned_loss=0.05668, over 7973.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2803, pruned_loss=0.05653, over 1610967.39 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:56,937 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.28 vs. limit=2.0 +2023-02-09 04:34:25,721 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:34:29,810 INFO [train.py:901] (2/4) Epoch 30, batch 7700, loss[loss=0.1711, simple_loss=0.2555, pruned_loss=0.04335, over 8138.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.05645, over 1615597.74 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:39,658 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.4804, 1.3587, 1.4827, 1.3203, 0.9356, 1.4048, 1.4652, 1.4245], + device='cuda:2'), covar=tensor([0.0733, 0.0955, 0.1335, 0.1220, 0.0627, 0.1139, 0.0764, 0.0507], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 04:34:44,311 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.431e+02 3.028e+02 3.722e+02 6.918e+02, threshold=6.057e+02, percent-clipped=1.0 +2023-02-09 04:34:54,353 WARNING [train.py:1067] (2/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 04:34:55,206 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([4.7330, 1.4205, 4.9688, 1.9274, 4.4741, 4.1622, 4.4823, 4.3872], + device='cuda:2'), covar=tensor([0.0552, 0.4826, 0.0415, 0.4207, 0.0982, 0.0859, 0.0568, 0.0614], + device='cuda:2'), in_proj_covar=tensor([0.0699, 0.0672, 0.0757, 0.0675, 0.0758, 0.0646, 0.0659, 0.0733], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:34:55,291 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:05,979 INFO [train.py:901] (2/4) Epoch 30, batch 7750, loss[loss=0.1943, simple_loss=0.2784, pruned_loss=0.0551, over 8191.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.05656, over 1612312.38 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:13,195 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242164.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:13,775 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242165.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:18,607 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242171.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:34,906 INFO [zipformer.py:1185] (2/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242193.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:35:42,648 INFO [train.py:901] (2/4) Epoch 30, batch 7800, loss[loss=0.1728, simple_loss=0.2568, pruned_loss=0.04438, over 7693.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2799, pruned_loss=0.05595, over 1616784.09 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:44,308 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([1.6091, 2.2744, 1.4435, 2.4124, 2.0432, 1.2317, 1.8820, 2.3901], + device='cuda:2'), covar=tensor([0.1394, 0.0478, 0.1551, 0.0595, 0.0871, 0.1947, 0.1180, 0.0690], + device='cuda:2'), in_proj_covar=tensor([0.0358, 0.0243, 0.0343, 0.0313, 0.0301, 0.0347, 0.0347, 0.0319], + device='cuda:2'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:2') +2023-02-09 04:35:57,874 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.396e+02 3.014e+02 3.960e+02 8.063e+02, threshold=6.029e+02, percent-clipped=4.0 +2023-02-09 04:36:11,177 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:18,002 INFO [train.py:901] (2/4) Epoch 30, batch 7850, loss[loss=0.1925, simple_loss=0.2793, pruned_loss=0.05284, over 8039.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2793, pruned_loss=0.05556, over 1620416.25 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:36,021 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:47,919 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.7574, 1.6499, 1.8590, 1.7376, 1.0670, 1.7111, 2.1977, 1.9610], + device='cuda:2'), covar=tensor([0.0509, 0.1252, 0.1721, 0.1448, 0.0609, 0.1438, 0.0668, 0.0653], + device='cuda:2'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0113, 0.0148], + device='cuda:2'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:2') +2023-02-09 04:36:52,301 INFO [train.py:901] (2/4) Epoch 30, batch 7900, loss[loss=0.2065, simple_loss=0.2967, pruned_loss=0.05817, over 8507.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2794, pruned_loss=0.05552, over 1616573.08 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:54,992 INFO [zipformer.py:1185] (2/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242308.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:37:06,487 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.359e+02 2.894e+02 3.889e+02 1.272e+03, threshold=5.788e+02, percent-clipped=10.0 +2023-02-09 04:37:08,036 INFO [zipformer.py:1185] (2/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242327.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:20,020 INFO [scaling.py:679] (2/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 04:37:26,309 INFO [train.py:901] (2/4) Epoch 30, batch 7950, loss[loss=0.1805, simple_loss=0.2659, pruned_loss=0.04753, over 8087.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.278, pruned_loss=0.05452, over 1618113.49 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:37:26,519 INFO [zipformer.py:1185] (2/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242354.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:43,567 INFO [zipformer.py:1185] (2/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242379.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:38:00,600 INFO [train.py:901] (2/4) Epoch 30, batch 8000, loss[loss=0.2131, simple_loss=0.3019, pruned_loss=0.06214, over 8239.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2785, pruned_loss=0.05446, over 1618980.99 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:14,883 INFO [optim.py:369] (2/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.478e+02 2.968e+02 3.707e+02 1.083e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-09 04:38:24,833 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([3.5111, 1.7962, 3.6657, 1.9374, 3.3208, 3.0961, 3.3574, 3.2881], + device='cuda:2'), covar=tensor([0.0829, 0.3610, 0.0953, 0.4376, 0.1016, 0.0992, 0.0697, 0.0734], + device='cuda:2'), in_proj_covar=tensor([0.0705, 0.0677, 0.0764, 0.0680, 0.0764, 0.0652, 0.0663, 0.0740], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:2') +2023-02-09 04:38:28,339 INFO [zipformer.py:2431] (2/4) attn_weights_entropy = tensor([2.2807, 1.9833, 2.4494, 2.1191, 2.5172, 2.3211, 2.1522, 1.3651], + device='cuda:2'), covar=tensor([0.5683, 0.5101, 0.2325, 0.3973, 0.2826, 0.3463, 0.2001, 0.5801], + device='cuda:2'), in_proj_covar=tensor([0.0972, 0.1039, 0.0855, 0.1016, 0.1035, 0.0950, 0.0783, 0.0863], + device='cuda:2'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:2') +2023-02-09 04:38:35,235 INFO [train.py:901] (2/4) Epoch 30, batch 8050, loss[loss=0.2452, simple_loss=0.3112, pruned_loss=0.08959, over 7039.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.279, pruned_loss=0.0558, over 1601086.43 frames. ], batch size: 71, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:58,154 INFO [train.py:1165] (2/4) Done! diff --git a/log/log-train-2023-02-08-23-42-53-3 b/log/log-train-2023-02-08-23-42-53-3 new file mode 100644 index 0000000000000000000000000000000000000000..dba67c8911114220b512eb53ccbc5944eb80cf7e --- /dev/null +++ b/log/log-train-2023-02-08-23-42-53-3 @@ -0,0 +1,2623 @@ +2023-02-08 23:42:53,786 INFO [train.py:973] (3/4) Training started +2023-02-08 23:42:53,786 INFO [train.py:983] (3/4) Device: cuda:3 +2023-02-08 23:42:53,850 INFO [train.py:992] (3/4) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'b3d0d34-dirty', 'icefall-git-date': 'Sat Feb 4 14:53:48 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r8n07', 'IP address': '10.1.8.7'}, 'world_size': 4, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 30, 'start_epoch': 28, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 10, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500} +2023-02-08 23:42:53,850 INFO [train.py:994] (3/4) About to create model +2023-02-08 23:42:54,169 INFO [zipformer.py:402] (3/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-08 23:42:54,181 INFO [train.py:998] (3/4) Number of model parameters: 20697573 +2023-02-08 23:42:54,181 INFO [checkpoint.py:112] (3/4) Loading checkpoint from pruned_transducer_stateless7_streaming/exp/v1/epoch-27.pt +2023-02-08 23:43:03,645 INFO [train.py:1013] (3/4) Using DDP +2023-02-08 23:43:03,870 INFO [train.py:1030] (3/4) Loading optimizer state dict +2023-02-08 23:43:04,078 INFO [train.py:1038] (3/4) Loading scheduler state dict +2023-02-08 23:43:04,078 INFO [asr_datamodule.py:420] (3/4) About to get the shuffled train-clean-100, train-clean-360 and train-other-500 cuts +2023-02-08 23:43:04,265 INFO [asr_datamodule.py:224] (3/4) Enable MUSAN +2023-02-08 23:43:04,265 INFO [asr_datamodule.py:225] (3/4) About to get Musan cuts +2023-02-08 23:43:05,836 INFO [asr_datamodule.py:249] (3/4) Enable SpecAugment +2023-02-08 23:43:05,836 INFO [asr_datamodule.py:250] (3/4) Time warp factor: 80 +2023-02-08 23:43:05,836 INFO [asr_datamodule.py:260] (3/4) Num frame mask: 10 +2023-02-08 23:43:05,836 INFO [asr_datamodule.py:273] (3/4) About to create train dataset +2023-02-08 23:43:05,837 INFO [asr_datamodule.py:300] (3/4) Using DynamicBucketingSampler. +2023-02-08 23:43:05,857 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:08,000 INFO [asr_datamodule.py:316] (3/4) About to create train dataloader +2023-02-08 23:43:08,001 INFO [asr_datamodule.py:430] (3/4) About to get dev-clean cuts +2023-02-08 23:43:08,002 INFO [asr_datamodule.py:437] (3/4) About to get dev-other cuts +2023-02-08 23:43:08,003 INFO [asr_datamodule.py:347] (3/4) About to create dev dataset +2023-02-08 23:43:08,360 INFO [asr_datamodule.py:364] (3/4) About to create dev dataloader +2023-02-08 23:43:08,360 INFO [train.py:1122] (3/4) Loading grad scaler state dict +2023-02-08 23:43:20,252 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-08 23:43:25,773 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-08 23:43:26,062 INFO [train.py:901] (3/4) Epoch 28, batch 0, loss[loss=0.2773, simple_loss=0.3445, pruned_loss=0.105, over 8338.00 frames. ], tot_loss[loss=0.2773, simple_loss=0.3445, pruned_loss=0.105, over 8338.00 frames. ], batch size: 26, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:43:26,063 INFO [train.py:926] (3/4) Computing validation loss +2023-02-08 23:43:38,187 INFO [train.py:935] (3/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2712, pruned_loss=0.03579, over 944034.00 frames. +2023-02-08 23:43:38,189 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6172MB +2023-02-08 23:43:48,606 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218250.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:43:59,124 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-08 23:43:59,745 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218260.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:44:26,827 INFO [train.py:901] (3/4) Epoch 28, batch 50, loss[loss=0.1722, simple_loss=0.2535, pruned_loss=0.04542, over 7681.00 frames. ], tot_loss[loss=0.2039, simple_loss=0.2884, pruned_loss=0.05966, over 368830.33 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:44:44,702 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-08 23:44:48,220 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.503e+02 3.099e+02 3.838e+02 3.677e+03, threshold=6.198e+02, percent-clipped=7.0 +2023-02-08 23:45:09,768 INFO [train.py:901] (3/4) Epoch 28, batch 100, loss[loss=0.1583, simple_loss=0.2476, pruned_loss=0.03449, over 7250.00 frames. ], tot_loss[loss=0.2021, simple_loss=0.2869, pruned_loss=0.05862, over 646435.40 frames. ], batch size: 16, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:45:12,261 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-08 23:45:21,937 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.67 vs. limit=5.0 +2023-02-08 23:45:42,217 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218375.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:45:52,964 INFO [train.py:901] (3/4) Epoch 28, batch 150, loss[loss=0.2217, simple_loss=0.3054, pruned_loss=0.06899, over 8029.00 frames. ], tot_loss[loss=0.203, simple_loss=0.2872, pruned_loss=0.05942, over 861149.95 frames. ], batch size: 22, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:01,150 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218397.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:12,823 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.615e+02 2.274e+02 2.796e+02 3.416e+02 5.816e+02, threshold=5.591e+02, percent-clipped=0.0 +2023-02-08 23:46:19,722 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218422.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:46:32,312 INFO [train.py:901] (3/4) Epoch 28, batch 200, loss[loss=0.2025, simple_loss=0.2894, pruned_loss=0.05781, over 8294.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2842, pruned_loss=0.05762, over 1028911.67 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:46:50,636 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218462.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:10,726 INFO [train.py:901] (3/4) Epoch 28, batch 250, loss[loss=0.1621, simple_loss=0.2421, pruned_loss=0.04104, over 7655.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2819, pruned_loss=0.05681, over 1159440.38 frames. ], batch size: 19, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:23,075 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-08 23:47:31,295 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.405e+02 2.917e+02 3.543e+02 7.929e+02, threshold=5.833e+02, percent-clipped=6.0 +2023-02-08 23:47:33,429 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-08 23:47:41,366 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218527.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:47:48,884 INFO [train.py:901] (3/4) Epoch 28, batch 300, loss[loss=0.2228, simple_loss=0.3082, pruned_loss=0.06874, over 8480.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2829, pruned_loss=0.05782, over 1261879.64 frames. ], batch size: 29, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:47:53,404 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218544.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:14,322 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218572.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:18,159 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218577.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:25,696 INFO [train.py:901] (3/4) Epoch 28, batch 350, loss[loss=0.1862, simple_loss=0.2743, pruned_loss=0.04904, over 8590.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2838, pruned_loss=0.05819, over 1342752.24 frames. ], batch size: 49, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:48:28,674 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218592.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:48:29,483 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0123, 2.0399, 3.2701, 2.5324, 2.8620, 2.1814, 1.8781, 1.7359], + device='cuda:3'), covar=tensor([0.7750, 0.6949, 0.2414, 0.4547, 0.3588, 0.4418, 0.2957, 0.6298], + device='cuda:3'), in_proj_covar=tensor([0.0959, 0.1017, 0.0823, 0.0986, 0.1018, 0.0922, 0.0763, 0.0846], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-08 23:48:43,888 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 2.330e+02 2.853e+02 3.797e+02 9.826e+02, threshold=5.707e+02, percent-clipped=4.0 +2023-02-08 23:49:00,083 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218631.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:49:04,816 INFO [train.py:901] (3/4) Epoch 28, batch 400, loss[loss=0.1988, simple_loss=0.2864, pruned_loss=0.05563, over 8514.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.2837, pruned_loss=0.05786, over 1409164.74 frames. ], batch size: 28, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:16,414 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5512, 1.8825, 2.8553, 1.4712, 2.0254, 1.9280, 1.7093, 2.2373], + device='cuda:3'), covar=tensor([0.2033, 0.2853, 0.0921, 0.4836, 0.2102, 0.3463, 0.2543, 0.2301], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0637, 0.0566, 0.0672, 0.0660, 0.0615, 0.0568, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-08 23:49:17,853 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218656.0, num_to_drop=1, layers_to_drop={0} +2023-02-08 23:49:19,982 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218659.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:40,567 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218687.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:49:41,128 INFO [train.py:901] (3/4) Epoch 28, batch 450, loss[loss=0.198, simple_loss=0.2753, pruned_loss=0.06029, over 8295.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2828, pruned_loss=0.05749, over 1453736.92 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:49:59,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.362e+02 2.836e+02 3.643e+02 9.062e+02, threshold=5.672e+02, percent-clipped=2.0 +2023-02-08 23:50:18,546 INFO [train.py:901] (3/4) Epoch 28, batch 500, loss[loss=0.2004, simple_loss=0.2856, pruned_loss=0.05758, over 8547.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2838, pruned_loss=0.05804, over 1491748.28 frames. ], batch size: 39, lr: 2.71e-03, grad_scale: 16.0 +2023-02-08 23:50:57,133 INFO [train.py:901] (3/4) Epoch 28, batch 550, loss[loss=0.1777, simple_loss=0.2683, pruned_loss=0.04354, over 8325.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2823, pruned_loss=0.05738, over 1521291.96 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:05,273 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5919, 2.4267, 3.1531, 2.5908, 3.1196, 2.6485, 2.5440, 1.8463], + device='cuda:3'), covar=tensor([0.5409, 0.4994, 0.2192, 0.4103, 0.2785, 0.3351, 0.1891, 0.5938], + device='cuda:3'), in_proj_covar=tensor([0.0963, 0.1021, 0.0829, 0.0991, 0.1024, 0.0928, 0.0768, 0.0849], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-08 23:51:16,041 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.820e+02 2.392e+02 2.925e+02 3.560e+02 1.211e+03, threshold=5.850e+02, percent-clipped=4.0 +2023-02-08 23:51:29,401 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7925, 2.6443, 1.9672, 2.4746, 2.3655, 1.7605, 2.2783, 2.3314], + device='cuda:3'), covar=tensor([0.1487, 0.0434, 0.1228, 0.0648, 0.0771, 0.1549, 0.0982, 0.0974], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0244, 0.0342, 0.0315, 0.0305, 0.0349, 0.0352, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-08 23:51:30,193 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218833.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:33,473 INFO [train.py:901] (3/4) Epoch 28, batch 600, loss[loss=0.2595, simple_loss=0.3358, pruned_loss=0.09164, over 8669.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2838, pruned_loss=0.05819, over 1544857.98 frames. ], batch size: 34, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:51:33,881 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.79 vs. limit=5.0 +2023-02-08 23:51:53,194 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218858.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:51:56,611 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-08 23:52:04,161 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218871.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:06,123 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.77 vs. limit=5.0 +2023-02-08 23:52:10,233 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6356, 1.9088, 1.9845, 1.4125, 2.1495, 1.4600, 0.6641, 1.9078], + device='cuda:3'), covar=tensor([0.0737, 0.0416, 0.0328, 0.0667, 0.0425, 0.0956, 0.0990, 0.0360], + device='cuda:3'), in_proj_covar=tensor([0.0474, 0.0412, 0.0366, 0.0458, 0.0395, 0.0553, 0.0402, 0.0442], + device='cuda:3'), out_proj_covar=tensor([1.2539e-04, 1.0696e-04, 9.5266e-05, 1.1974e-04, 1.0344e-04, 1.5414e-04, + 1.0732e-04, 1.1585e-04], device='cuda:3') +2023-02-08 23:52:18,546 INFO [train.py:901] (3/4) Epoch 28, batch 650, loss[loss=0.2359, simple_loss=0.3177, pruned_loss=0.07701, over 8533.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2835, pruned_loss=0.05814, over 1558626.51 frames. ], batch size: 31, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:40,038 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.221e+02 2.637e+02 3.403e+02 7.509e+02, threshold=5.274e+02, percent-clipped=1.0 +2023-02-08 23:52:41,079 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218915.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:54,643 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8751, 3.8076, 3.5321, 1.9273, 3.3667, 3.4842, 3.4282, 3.3284], + device='cuda:3'), covar=tensor([0.0832, 0.0595, 0.0951, 0.4371, 0.0978, 0.1191, 0.1368, 0.0952], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0461, 0.0445, 0.0555, 0.0444, 0.0463, 0.0439, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-08 23:52:55,975 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=218936.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:52:57,374 INFO [train.py:901] (3/4) Epoch 28, batch 700, loss[loss=0.1914, simple_loss=0.2904, pruned_loss=0.04625, over 8282.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2833, pruned_loss=0.05791, over 1571576.20 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:52:59,064 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218940.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:01,202 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=218943.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:18,867 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=218968.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:31,049 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=218983.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:33,197 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=218986.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:34,531 INFO [train.py:901] (3/4) Epoch 28, batch 750, loss[loss=0.2104, simple_loss=0.2976, pruned_loss=0.06157, over 8246.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2845, pruned_loss=0.05822, over 1583600.64 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:53:46,823 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219002.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:53:55,127 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.438e+02 2.280e+02 2.810e+02 3.388e+02 7.203e+02, threshold=5.620e+02, percent-clipped=6.0 +2023-02-08 23:53:55,168 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-08 23:54:04,608 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-08 23:54:12,584 INFO [train.py:901] (3/4) Epoch 28, batch 800, loss[loss=0.2244, simple_loss=0.2999, pruned_loss=0.07445, over 8359.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2844, pruned_loss=0.05837, over 1591836.39 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:12,707 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2426, 3.1519, 2.9383, 1.6002, 2.8151, 2.9595, 2.7911, 2.8058], + device='cuda:3'), covar=tensor([0.1225, 0.0913, 0.1338, 0.4692, 0.1310, 0.1233, 0.1784, 0.1181], + device='cuda:3'), in_proj_covar=tensor([0.0541, 0.0460, 0.0445, 0.0555, 0.0444, 0.0464, 0.0438, 0.0405], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-08 23:54:13,435 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219039.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:22,038 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219051.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:54:49,140 INFO [train.py:901] (3/4) Epoch 28, batch 850, loss[loss=0.199, simple_loss=0.2879, pruned_loss=0.05504, over 8655.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.284, pruned_loss=0.0581, over 1598090.78 frames. ], batch size: 39, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:54:50,769 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219090.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:55:10,262 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.432e+02 3.183e+02 3.929e+02 8.024e+02, threshold=6.365e+02, percent-clipped=6.0 +2023-02-08 23:55:11,247 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6794, 2.4975, 3.1594, 2.6463, 3.0651, 2.6764, 2.5707, 2.4329], + device='cuda:3'), covar=tensor([0.4526, 0.4376, 0.1780, 0.3074, 0.2193, 0.2558, 0.1567, 0.4160], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1023, 0.0830, 0.0991, 0.1022, 0.0928, 0.0769, 0.0848], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-08 23:55:15,817 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-08 23:55:27,563 INFO [train.py:901] (3/4) Epoch 28, batch 900, loss[loss=0.1898, simple_loss=0.2844, pruned_loss=0.04757, over 7816.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2837, pruned_loss=0.05825, over 1601094.93 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:03,866 INFO [train.py:901] (3/4) Epoch 28, batch 950, loss[loss=0.1683, simple_loss=0.2524, pruned_loss=0.04215, over 7697.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2827, pruned_loss=0.05712, over 1605667.69 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:15,311 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2411, 1.4883, 3.3809, 1.0372, 2.9879, 2.7682, 3.0725, 2.9842], + device='cuda:3'), covar=tensor([0.0898, 0.4131, 0.0825, 0.4573, 0.1365, 0.1233, 0.0833, 0.0928], + device='cuda:3'), in_proj_covar=tensor([0.0676, 0.0659, 0.0729, 0.0654, 0.0739, 0.0631, 0.0637, 0.0711], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-08 23:56:22,891 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.715e+02 2.524e+02 3.053e+02 4.249e+02 9.516e+02, threshold=6.106e+02, percent-clipped=7.0 +2023-02-08 23:56:29,705 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219221.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:56:34,873 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-08 23:56:40,766 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3514, 1.4002, 4.5469, 1.7276, 4.0187, 3.7492, 4.0902, 3.9557], + device='cuda:3'), covar=tensor([0.0657, 0.5109, 0.0537, 0.4381, 0.1153, 0.1049, 0.0633, 0.0736], + device='cuda:3'), in_proj_covar=tensor([0.0676, 0.0659, 0.0727, 0.0653, 0.0739, 0.0631, 0.0636, 0.0711], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-08 23:56:43,595 INFO [train.py:901] (3/4) Epoch 28, batch 1000, loss[loss=0.2055, simple_loss=0.2913, pruned_loss=0.05981, over 8241.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2829, pruned_loss=0.05718, over 1606567.52 frames. ], batch size: 24, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:56:46,622 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219242.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:04,459 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219267.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:11,566 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-08 23:57:19,442 INFO [train.py:901] (3/4) Epoch 28, batch 1050, loss[loss=0.2046, simple_loss=0.287, pruned_loss=0.0611, over 8458.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2824, pruned_loss=0.05682, over 1613101.38 frames. ], batch size: 27, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:57:23,683 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-08 23:57:33,336 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219307.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:38,279 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.456e+02 2.957e+02 3.788e+02 8.190e+02, threshold=5.915e+02, percent-clipped=1.0 +2023-02-08 23:57:47,827 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219327.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,100 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219332.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:57:52,297 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-08 23:57:56,811 INFO [train.py:901] (3/4) Epoch 28, batch 1100, loss[loss=0.2463, simple_loss=0.337, pruned_loss=0.07775, over 8466.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2822, pruned_loss=0.05658, over 1616541.25 frames. ], batch size: 25, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:03,512 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219346.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:13,605 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219360.0, num_to_drop=1, layers_to_drop={1} +2023-02-08 23:58:30,142 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219383.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:58:33,477 INFO [train.py:901] (3/4) Epoch 28, batch 1150, loss[loss=0.2127, simple_loss=0.3006, pruned_loss=0.0624, over 8096.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2824, pruned_loss=0.05708, over 1613806.73 frames. ], batch size: 23, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:58:37,170 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-08 23:58:52,534 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.530e+02 2.386e+02 3.071e+02 3.782e+02 1.293e+03, threshold=6.141e+02, percent-clipped=2.0 +2023-02-08 23:59:07,140 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219434.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:10,014 INFO [train.py:901] (3/4) Epoch 28, batch 1200, loss[loss=0.175, simple_loss=0.253, pruned_loss=0.04852, over 7810.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2814, pruned_loss=0.05638, over 1617818.17 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:13,032 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219442.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:27,990 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219461.0, num_to_drop=0, layers_to_drop=set() +2023-02-08 23:59:47,599 INFO [train.py:901] (3/4) Epoch 28, batch 1250, loss[loss=0.1831, simple_loss=0.2496, pruned_loss=0.05837, over 7709.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2802, pruned_loss=0.05597, over 1613637.68 frames. ], batch size: 18, lr: 2.71e-03, grad_scale: 8.0 +2023-02-08 23:59:48,520 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7938, 1.6702, 1.9062, 1.7113, 0.9458, 1.6368, 2.2519, 2.2658], + device='cuda:3'), covar=tensor([0.0452, 0.1278, 0.1605, 0.1416, 0.0649, 0.1510, 0.0654, 0.0526], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0188, 0.0161, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-08 23:59:55,045 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:02,378 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219508.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:06,615 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.598e+02 2.357e+02 2.809e+02 3.466e+02 7.121e+02, threshold=5.618e+02, percent-clipped=3.0 +2023-02-09 00:00:23,411 INFO [train.py:901] (3/4) Epoch 28, batch 1300, loss[loss=0.1924, simple_loss=0.2824, pruned_loss=0.05124, over 7808.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2823, pruned_loss=0.05685, over 1614870.73 frames. ], batch size: 20, lr: 2.71e-03, grad_scale: 8.0 +2023-02-09 00:00:25,027 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.7824, 2.9232, 2.5733, 4.1047, 1.8562, 2.3339, 2.7348, 2.9686], + device='cuda:3'), covar=tensor([0.0622, 0.0779, 0.0717, 0.0203, 0.0975, 0.1086, 0.0792, 0.0750], + device='cuda:3'), in_proj_covar=tensor([0.0227, 0.0191, 0.0240, 0.0209, 0.0199, 0.0242, 0.0245, 0.0202], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 00:00:31,523 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:00:42,960 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219565.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:01:02,340 INFO [train.py:901] (3/4) Epoch 28, batch 1350, loss[loss=0.2085, simple_loss=0.277, pruned_loss=0.07001, over 7694.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2824, pruned_loss=0.05705, over 1616427.95 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:01:22,006 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.513e+02 2.365e+02 2.856e+02 3.377e+02 7.819e+02, threshold=5.713e+02, percent-clipped=4.0 +2023-02-09 00:01:39,672 INFO [train.py:901] (3/4) Epoch 28, batch 1400, loss[loss=0.1877, simple_loss=0.2677, pruned_loss=0.05387, over 8082.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.05716, over 1614995.59 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:09,801 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219680.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:15,329 INFO [train.py:901] (3/4) Epoch 28, batch 1450, loss[loss=0.1794, simple_loss=0.2502, pruned_loss=0.0543, over 7667.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.0563, over 1613169.78 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:24,266 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:25,400 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 00:02:28,351 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219704.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:02:33,318 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.1875, 3.0798, 2.8727, 1.6156, 2.7867, 2.8906, 2.7197, 2.7696], + device='cuda:3'), covar=tensor([0.1137, 0.0863, 0.1219, 0.4508, 0.1191, 0.1246, 0.1650, 0.1062], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0460, 0.0448, 0.0555, 0.0444, 0.0465, 0.0439, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:02:36,733 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.298e+02 2.874e+02 3.536e+02 7.746e+02, threshold=5.748e+02, percent-clipped=3.0 +2023-02-09 00:02:39,220 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:43,526 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:02:54,207 INFO [train.py:901] (3/4) Epoch 28, batch 1500, loss[loss=0.2276, simple_loss=0.3089, pruned_loss=0.07316, over 8548.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2799, pruned_loss=0.0561, over 1615795.38 frames. ], batch size: 31, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:02:57,277 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:05,719 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219754.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:23,844 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219779.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:25,274 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219781.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:29,883 INFO [train.py:901] (3/4) Epoch 28, batch 1550, loss[loss=0.233, simple_loss=0.3129, pruned_loss=0.07653, over 8307.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2797, pruned_loss=0.05626, over 1607980.28 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:03:42,633 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:03:49,436 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.435e+02 2.945e+02 3.827e+02 6.900e+02, threshold=5.889e+02, percent-clipped=4.0 +2023-02-09 00:03:54,634 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219819.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:03:55,271 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5664, 1.5010, 1.7966, 1.3755, 0.9357, 1.5573, 1.5583, 1.4053], + device='cuda:3'), covar=tensor([0.0584, 0.1153, 0.1571, 0.1464, 0.0572, 0.1361, 0.0676, 0.0674], + device='cuda:3'), in_proj_covar=tensor([0.0098, 0.0152, 0.0188, 0.0161, 0.0101, 0.0162, 0.0112, 0.0145], + device='cuda:3'), out_proj_covar=tensor([0.0006, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:04:03,145 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:08,591 INFO [train.py:901] (3/4) Epoch 28, batch 1600, loss[loss=0.1936, simple_loss=0.2714, pruned_loss=0.0579, over 7793.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2796, pruned_loss=0.05595, over 1605847.78 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:04:18,707 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=219852.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:35,336 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=219875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:04:44,471 INFO [train.py:901] (3/4) Epoch 28, batch 1650, loss[loss=0.2549, simple_loss=0.3282, pruned_loss=0.09081, over 8536.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2801, pruned_loss=0.05673, over 1606000.28 frames. ], batch size: 49, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:02,695 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 2.482e+02 2.898e+02 3.443e+02 5.647e+02, threshold=5.797e+02, percent-clipped=0.0 +2023-02-09 00:05:20,308 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=219936.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:21,479 INFO [train.py:901] (3/4) Epoch 28, batch 1700, loss[loss=0.1707, simple_loss=0.2572, pruned_loss=0.0421, over 7823.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.05682, over 1608587.82 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:05:39,123 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=219961.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:43,286 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=219967.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:05:57,781 INFO [train.py:901] (3/4) Epoch 28, batch 1750, loss[loss=0.2328, simple_loss=0.3233, pruned_loss=0.07114, over 8622.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.0568, over 1609755.99 frames. ], batch size: 31, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:06:17,596 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.415e+02 2.339e+02 2.848e+02 3.606e+02 1.047e+03, threshold=5.695e+02, percent-clipped=4.0 +2023-02-09 00:06:34,452 INFO [train.py:901] (3/4) Epoch 28, batch 1800, loss[loss=0.1711, simple_loss=0.2607, pruned_loss=0.04071, over 8027.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2798, pruned_loss=0.05644, over 1610687.75 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:02,982 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220075.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:07:11,953 INFO [train.py:901] (3/4) Epoch 28, batch 1850, loss[loss=0.2119, simple_loss=0.2825, pruned_loss=0.07064, over 6841.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2815, pruned_loss=0.05683, over 1616688.50 frames. ], batch size: 15, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:07:20,521 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220100.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:07:23,908 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220105.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:30,313 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.328e+02 2.682e+02 3.608e+02 8.535e+02, threshold=5.364e+02, percent-clipped=7.0 +2023-02-09 00:07:31,752 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:38,072 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220125.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:07:47,172 INFO [train.py:901] (3/4) Epoch 28, batch 1900, loss[loss=0.1657, simple_loss=0.2466, pruned_loss=0.04235, over 7785.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2822, pruned_loss=0.05711, over 1618024.59 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:19,369 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 00:08:25,768 INFO [train.py:901] (3/4) Epoch 28, batch 1950, loss[loss=0.174, simple_loss=0.2618, pruned_loss=0.04314, over 8246.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2816, pruned_loss=0.05665, over 1619529.97 frames. ], batch size: 22, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:08:32,998 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 00:08:44,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.461e+02 2.916e+02 3.869e+02 7.609e+02, threshold=5.833e+02, percent-clipped=8.0 +2023-02-09 00:08:48,262 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220219.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:51,070 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:08:53,654 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 00:09:01,370 INFO [train.py:901] (3/4) Epoch 28, batch 2000, loss[loss=0.1922, simple_loss=0.2829, pruned_loss=0.05073, over 8492.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2804, pruned_loss=0.05558, over 1619103.65 frames. ], batch size: 28, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:02,869 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:07,814 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220247.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:08,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220248.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:29,133 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:09:37,452 INFO [train.py:901] (3/4) Epoch 28, batch 2050, loss[loss=0.1516, simple_loss=0.2355, pruned_loss=0.0339, over 7254.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2797, pruned_loss=0.05581, over 1613233.72 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:09:58,201 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.398e+02 2.757e+02 3.324e+02 6.340e+02, threshold=5.514e+02, percent-clipped=2.0 +2023-02-09 00:10:03,996 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4675, 2.3891, 3.0722, 2.5694, 2.9423, 2.5385, 2.3516, 1.9293], + device='cuda:3'), covar=tensor([0.5609, 0.5131, 0.2152, 0.3899, 0.2714, 0.3230, 0.1930, 0.5577], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1029, 0.0835, 0.0997, 0.1028, 0.0934, 0.0773, 0.0853], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 00:10:12,779 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220334.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:10:15,437 INFO [train.py:901] (3/4) Epoch 28, batch 2100, loss[loss=0.1886, simple_loss=0.2692, pruned_loss=0.05398, over 7905.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2803, pruned_loss=0.05607, over 1614485.37 frames. ], batch size: 20, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:10:20,353 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 00:10:42,393 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.59 vs. limit=2.0 +2023-02-09 00:10:51,258 INFO [train.py:901] (3/4) Epoch 28, batch 2150, loss[loss=0.2704, simple_loss=0.3445, pruned_loss=0.09815, over 8589.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2812, pruned_loss=0.05647, over 1616417.39 frames. ], batch size: 34, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:11,485 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.504e+02 2.973e+02 4.041e+02 1.001e+03, threshold=5.945e+02, percent-clipped=8.0 +2023-02-09 00:11:28,331 INFO [train.py:901] (3/4) Epoch 28, batch 2200, loss[loss=0.2017, simple_loss=0.2853, pruned_loss=0.05904, over 8467.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05671, over 1612601.39 frames. ], batch size: 29, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:11:36,287 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220449.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:11:44,017 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220460.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:03,396 INFO [train.py:901] (3/4) Epoch 28, batch 2250, loss[loss=0.2743, simple_loss=0.3386, pruned_loss=0.105, over 7206.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2817, pruned_loss=0.05685, over 1614702.28 frames. ], batch size: 72, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:09,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220496.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:15,027 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.43 vs. limit=5.0 +2023-02-09 00:12:22,281 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.608e+02 2.331e+02 2.835e+02 3.325e+02 7.200e+02, threshold=5.671e+02, percent-clipped=3.0 +2023-02-09 00:12:27,565 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220521.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:12:41,523 INFO [train.py:901] (3/4) Epoch 28, batch 2300, loss[loss=0.1816, simple_loss=0.2662, pruned_loss=0.04851, over 8344.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2817, pruned_loss=0.057, over 1612323.09 frames. ], batch size: 26, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:12:44,394 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8360, 1.4281, 3.9883, 1.4481, 3.5645, 3.2466, 3.6172, 3.5074], + device='cuda:3'), covar=tensor([0.0661, 0.4735, 0.0671, 0.4530, 0.1209, 0.1126, 0.0704, 0.0765], + device='cuda:3'), in_proj_covar=tensor([0.0682, 0.0670, 0.0739, 0.0664, 0.0752, 0.0639, 0.0646, 0.0722], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:12:51,188 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3801, 1.1766, 2.3311, 1.2983, 2.1837, 2.4952, 2.7022, 2.0701], + device='cuda:3'), covar=tensor([0.1177, 0.1559, 0.0419, 0.2015, 0.0707, 0.0406, 0.0611, 0.0721], + device='cuda:3'), in_proj_covar=tensor([0.0308, 0.0327, 0.0293, 0.0321, 0.0323, 0.0277, 0.0441, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 00:12:59,748 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:06,034 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220573.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:07,482 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220575.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:16,616 INFO [train.py:901] (3/4) Epoch 28, batch 2350, loss[loss=0.1867, simple_loss=0.2878, pruned_loss=0.04283, over 8613.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2818, pruned_loss=0.05713, over 1612678.44 frames. ], batch size: 31, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:13:18,250 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220590.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:18,835 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220591.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:35,683 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.370e+02 2.329e+02 2.956e+02 3.826e+02 8.837e+02, threshold=5.912e+02, percent-clipped=4.0 +2023-02-09 00:13:36,675 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:40,248 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:13:53,607 INFO [train.py:901] (3/4) Epoch 28, batch 2400, loss[loss=0.1599, simple_loss=0.249, pruned_loss=0.03541, over 7560.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05669, over 1617060.95 frames. ], batch size: 18, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:16,684 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220669.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:29,716 INFO [train.py:901] (3/4) Epoch 28, batch 2450, loss[loss=0.1781, simple_loss=0.2798, pruned_loss=0.03817, over 8251.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2816, pruned_loss=0.05697, over 1617346.23 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:14:42,732 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220706.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:14:48,782 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.600e+02 2.507e+02 3.309e+02 3.917e+02 8.053e+02, threshold=6.618e+02, percent-clipped=4.0 +2023-02-09 00:15:03,093 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=220735.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:15:05,098 INFO [train.py:901] (3/4) Epoch 28, batch 2500, loss[loss=0.1995, simple_loss=0.2915, pruned_loss=0.05372, over 8340.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2827, pruned_loss=0.05745, over 1617550.65 frames. ], batch size: 26, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:15:15,123 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0298, 1.0437, 0.9850, 1.2303, 0.6495, 0.8976, 1.0149, 1.0678], + device='cuda:3'), covar=tensor([0.0626, 0.0612, 0.0703, 0.0490, 0.0837, 0.1007, 0.0520, 0.0486], + device='cuda:3'), in_proj_covar=tensor([0.0229, 0.0193, 0.0242, 0.0211, 0.0202, 0.0245, 0.0249, 0.0204], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 00:15:42,751 INFO [train.py:901] (3/4) Epoch 28, batch 2550, loss[loss=0.214, simple_loss=0.3034, pruned_loss=0.06231, over 8361.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.283, pruned_loss=0.05803, over 1612792.32 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:02,758 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.505e+02 3.011e+02 3.782e+02 1.017e+03, threshold=6.023e+02, percent-clipped=3.0 +2023-02-09 00:16:06,707 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220820.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:14,508 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220831.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:19,219 INFO [train.py:901] (3/4) Epoch 28, batch 2600, loss[loss=0.212, simple_loss=0.2933, pruned_loss=0.06531, over 7978.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05818, over 1617593.27 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:16:20,648 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220840.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:24,298 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:32,228 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220856.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:16:57,370 INFO [train.py:901] (3/4) Epoch 28, batch 2650, loss[loss=0.1922, simple_loss=0.2711, pruned_loss=0.05669, over 7657.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2831, pruned_loss=0.05791, over 1619348.06 frames. ], batch size: 19, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:12,920 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5345, 2.3833, 1.6386, 2.1852, 2.0899, 1.5529, 2.1233, 2.1025], + device='cuda:3'), covar=tensor([0.1653, 0.0513, 0.1467, 0.0706, 0.0836, 0.1725, 0.0991, 0.1107], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0242, 0.0339, 0.0311, 0.0301, 0.0345, 0.0346, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 00:17:16,289 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.381e+02 2.801e+02 3.642e+02 5.464e+02, threshold=5.602e+02, percent-clipped=0.0 +2023-02-09 00:17:17,801 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=220917.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:21,291 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=220922.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:17:32,907 INFO [train.py:901] (3/4) Epoch 28, batch 2700, loss[loss=0.1784, simple_loss=0.2728, pruned_loss=0.04195, over 8325.00 frames. ], tot_loss[loss=0.2, simple_loss=0.284, pruned_loss=0.05798, over 1621892.33 frames. ], batch size: 25, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:17:50,477 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220962.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,046 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=220987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:09,541 INFO [train.py:901] (3/4) Epoch 28, batch 2750, loss[loss=0.1913, simple_loss=0.2791, pruned_loss=0.05177, over 8473.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2833, pruned_loss=0.05776, over 1623938.00 frames. ], batch size: 27, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:11,779 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=220991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:29,675 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:31,036 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.419e+02 2.908e+02 3.517e+02 7.342e+02, threshold=5.816e+02, percent-clipped=5.0 +2023-02-09 00:18:31,972 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221016.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:43,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221032.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:18:47,246 INFO [train.py:901] (3/4) Epoch 28, batch 2800, loss[loss=0.1789, simple_loss=0.2755, pruned_loss=0.04113, over 8364.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2819, pruned_loss=0.05667, over 1622143.50 frames. ], batch size: 24, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:18:57,815 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5552, 1.4594, 1.8655, 1.2495, 1.2168, 1.8163, 0.2108, 1.2230], + device='cuda:3'), covar=tensor([0.1387, 0.1220, 0.0380, 0.0822, 0.2293, 0.0440, 0.1846, 0.1104], + device='cuda:3'), in_proj_covar=tensor([0.0201, 0.0205, 0.0136, 0.0224, 0.0277, 0.0146, 0.0174, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 00:19:22,678 INFO [train.py:901] (3/4) Epoch 28, batch 2850, loss[loss=0.1811, simple_loss=0.2735, pruned_loss=0.04434, over 7978.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2825, pruned_loss=0.05713, over 1620327.51 frames. ], batch size: 21, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:19:43,235 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.366e+02 2.856e+02 3.627e+02 6.501e+02, threshold=5.713e+02, percent-clipped=2.0 +2023-02-09 00:19:53,963 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221128.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:00,671 INFO [train.py:901] (3/4) Epoch 28, batch 2900, loss[loss=0.2189, simple_loss=0.2813, pruned_loss=0.07819, over 7417.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2822, pruned_loss=0.05685, over 1620943.56 frames. ], batch size: 17, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:04,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4768, 1.9991, 3.1033, 1.3516, 2.4359, 1.9285, 1.6891, 2.4035], + device='cuda:3'), covar=tensor([0.2244, 0.2868, 0.0987, 0.5290, 0.2021, 0.3747, 0.2709, 0.2433], + device='cuda:3'), in_proj_covar=tensor([0.0541, 0.0639, 0.0566, 0.0673, 0.0663, 0.0612, 0.0566, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:20:32,266 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 00:20:33,724 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221184.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:20:36,494 INFO [train.py:901] (3/4) Epoch 28, batch 2950, loss[loss=0.1859, simple_loss=0.2632, pruned_loss=0.05426, over 7266.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2824, pruned_loss=0.05706, over 1621034.03 frames. ], batch size: 16, lr: 2.70e-03, grad_scale: 8.0 +2023-02-09 00:20:43,340 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8636, 2.3075, 3.7593, 1.7019, 3.0641, 2.3021, 1.9494, 2.8389], + device='cuda:3'), covar=tensor([0.1953, 0.2769, 0.0967, 0.4618, 0.1754, 0.3269, 0.2430, 0.2459], + device='cuda:3'), in_proj_covar=tensor([0.0541, 0.0640, 0.0567, 0.0674, 0.0663, 0.0612, 0.0567, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:20:55,452 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 2.299e+02 2.993e+02 3.879e+02 1.208e+03, threshold=5.985e+02, percent-clipped=10.0 +2023-02-09 00:21:01,648 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.00 vs. limit=5.0 +2023-02-09 00:21:13,543 INFO [train.py:901] (3/4) Epoch 28, batch 3000, loss[loss=0.2014, simple_loss=0.2826, pruned_loss=0.06011, over 8562.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2809, pruned_loss=0.05648, over 1613237.08 frames. ], batch size: 39, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:21:13,543 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 00:21:31,972 INFO [train.py:935] (3/4) Epoch 28, validation: loss=0.1712, simple_loss=0.2708, pruned_loss=0.03578, over 944034.00 frames. +2023-02-09 00:21:31,974 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6430MB +2023-02-09 00:21:47,610 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6528, 1.6635, 2.1370, 1.3915, 1.3306, 2.1017, 0.2785, 1.3729], + device='cuda:3'), covar=tensor([0.1583, 0.1142, 0.0394, 0.1002, 0.2206, 0.0452, 0.1815, 0.1187], + device='cuda:3'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0225, 0.0278, 0.0147, 0.0174, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 00:21:54,464 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:21:55,527 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-09 00:22:03,533 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.12 vs. limit=5.0 +2023-02-09 00:22:10,152 INFO [train.py:901] (3/4) Epoch 28, batch 3050, loss[loss=0.1824, simple_loss=0.2557, pruned_loss=0.0545, over 7432.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05687, over 1613906.86 frames. ], batch size: 17, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:22:10,388 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221288.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:18,084 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221299.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:28,220 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221313.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:22:29,365 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 2.361e+02 2.830e+02 3.600e+02 1.199e+03, threshold=5.660e+02, percent-clipped=4.0 +2023-02-09 00:22:37,155 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5432, 2.0794, 3.3102, 1.6186, 1.7285, 3.2335, 0.8256, 2.0389], + device='cuda:3'), covar=tensor([0.1368, 0.1155, 0.0246, 0.1544, 0.2263, 0.0349, 0.2026, 0.1235], + device='cuda:3'), in_proj_covar=tensor([0.0201, 0.0205, 0.0137, 0.0224, 0.0277, 0.0146, 0.0174, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 00:22:45,352 INFO [train.py:901] (3/4) Epoch 28, batch 3100, loss[loss=0.2037, simple_loss=0.2997, pruned_loss=0.05385, over 8368.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2813, pruned_loss=0.05689, over 1615725.00 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:18,458 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221381.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:20,619 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221384.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:23,254 INFO [train.py:901] (3/4) Epoch 28, batch 3150, loss[loss=0.2251, simple_loss=0.3056, pruned_loss=0.07227, over 8408.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2813, pruned_loss=0.05695, over 1613946.52 frames. ], batch size: 49, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:23:31,492 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9683, 1.5706, 3.1421, 1.5728, 2.2448, 3.3285, 3.4833, 2.8628], + device='cuda:3'), covar=tensor([0.1174, 0.1785, 0.0357, 0.2044, 0.1111, 0.0279, 0.0563, 0.0567], + device='cuda:3'), in_proj_covar=tensor([0.0308, 0.0329, 0.0294, 0.0323, 0.0326, 0.0278, 0.0443, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 00:23:34,616 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6453, 2.4648, 1.7415, 2.2605, 2.0845, 1.6182, 2.0474, 2.2555], + device='cuda:3'), covar=tensor([0.1683, 0.0523, 0.1529, 0.0752, 0.0983, 0.1789, 0.1185, 0.1233], + device='cuda:3'), in_proj_covar=tensor([0.0362, 0.0246, 0.0344, 0.0315, 0.0305, 0.0350, 0.0351, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 00:23:38,998 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221409.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:23:43,026 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.344e+02 3.031e+02 3.872e+02 9.124e+02, threshold=6.062e+02, percent-clipped=5.0 +2023-02-09 00:24:00,285 INFO [train.py:901] (3/4) Epoch 28, batch 3200, loss[loss=0.2359, simple_loss=0.3065, pruned_loss=0.08267, over 8635.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2813, pruned_loss=0.05715, over 1609566.04 frames. ], batch size: 39, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:21,664 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8966, 1.4772, 1.6092, 1.4492, 1.1713, 1.5182, 1.8197, 1.7242], + device='cuda:3'), covar=tensor([0.0750, 0.1346, 0.1802, 0.1534, 0.0739, 0.1554, 0.0855, 0.0587], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:24:35,508 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6876, 1.4911, 2.8586, 1.3867, 2.3019, 3.0488, 3.2317, 2.6284], + device='cuda:3'), covar=tensor([0.1284, 0.1728, 0.0382, 0.2255, 0.0937, 0.0313, 0.0595, 0.0585], + device='cuda:3'), in_proj_covar=tensor([0.0307, 0.0328, 0.0294, 0.0323, 0.0325, 0.0278, 0.0441, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 00:24:36,746 INFO [train.py:901] (3/4) Epoch 28, batch 3250, loss[loss=0.1924, simple_loss=0.2733, pruned_loss=0.05578, over 7656.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2808, pruned_loss=0.05665, over 1609824.14 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:24:56,686 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.800e+02 3.771e+02 8.910e+02, threshold=5.600e+02, percent-clipped=3.0 +2023-02-09 00:25:04,064 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:12,969 INFO [train.py:901] (3/4) Epoch 28, batch 3300, loss[loss=0.1719, simple_loss=0.2631, pruned_loss=0.04039, over 7925.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2809, pruned_loss=0.05687, over 1606973.43 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:25:25,041 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221555.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:42,974 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:25:48,463 INFO [train.py:901] (3/4) Epoch 28, batch 3350, loss[loss=0.2098, simple_loss=0.2945, pruned_loss=0.06257, over 8670.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2814, pruned_loss=0.0572, over 1610122.86 frames. ], batch size: 34, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:09,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.531e+02 3.062e+02 3.663e+02 8.444e+02, threshold=6.124e+02, percent-clipped=3.0 +2023-02-09 00:26:26,152 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=221637.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:26:26,641 INFO [train.py:901] (3/4) Epoch 28, batch 3400, loss[loss=0.1791, simple_loss=0.268, pruned_loss=0.04504, over 8134.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2817, pruned_loss=0.05746, over 1607791.35 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:26:43,903 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=221662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:27:02,351 INFO [train.py:901] (3/4) Epoch 28, batch 3450, loss[loss=0.2402, simple_loss=0.3228, pruned_loss=0.07879, over 8369.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.05769, over 1610139.82 frames. ], batch size: 24, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:21,425 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.306e+02 2.763e+02 3.583e+02 8.756e+02, threshold=5.526e+02, percent-clipped=3.0 +2023-02-09 00:27:39,502 INFO [train.py:901] (3/4) Epoch 28, batch 3500, loss[loss=0.241, simple_loss=0.2996, pruned_loss=0.0912, over 7206.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2838, pruned_loss=0.05874, over 1610103.49 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:27:56,077 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 00:28:03,549 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 00:28:07,264 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:28:15,813 INFO [train.py:901] (3/4) Epoch 28, batch 3550, loss[loss=0.2063, simple_loss=0.2917, pruned_loss=0.06038, over 8125.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2825, pruned_loss=0.05785, over 1606290.22 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:28:35,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 00:28:35,267 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.405e+02 2.949e+02 3.672e+02 8.337e+02, threshold=5.897e+02, percent-clipped=3.0 +2023-02-09 00:28:52,590 INFO [train.py:901] (3/4) Epoch 28, batch 3600, loss[loss=0.1613, simple_loss=0.2419, pruned_loss=0.04042, over 7225.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.05689, over 1611463.75 frames. ], batch size: 16, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:08,872 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 00:29:09,925 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2560, 3.1582, 2.9212, 1.7723, 2.8836, 2.8975, 2.8365, 2.8051], + device='cuda:3'), covar=tensor([0.1065, 0.0750, 0.1154, 0.4123, 0.1043, 0.1302, 0.1515, 0.1020], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0457, 0.0448, 0.0560, 0.0444, 0.0465, 0.0441, 0.0406], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:29:15,451 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=221869.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:28,471 INFO [train.py:901] (3/4) Epoch 28, batch 3650, loss[loss=0.2425, simple_loss=0.3132, pruned_loss=0.08592, over 7027.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2831, pruned_loss=0.05796, over 1610307.24 frames. ], batch size: 71, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:29:42,346 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=221908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:29:47,070 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.635e+02 2.399e+02 3.022e+02 3.885e+02 8.966e+02, threshold=6.044e+02, percent-clipped=2.0 +2023-02-09 00:30:02,955 INFO [train.py:901] (3/4) Epoch 28, batch 3700, loss[loss=0.2572, simple_loss=0.3169, pruned_loss=0.09879, over 6493.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2832, pruned_loss=0.05854, over 1603722.03 frames. ], batch size: 72, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:05,061 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 00:30:38,683 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=221984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:30:41,399 INFO [train.py:901] (3/4) Epoch 28, batch 3750, loss[loss=0.1991, simple_loss=0.2917, pruned_loss=0.05325, over 8465.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2842, pruned_loss=0.05827, over 1609798.14 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:30:48,356 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4893, 1.4409, 1.7513, 1.3736, 0.9377, 1.4997, 1.5285, 1.4119], + device='cuda:3'), covar=tensor([0.0605, 0.1267, 0.1617, 0.1482, 0.0575, 0.1458, 0.0697, 0.0670], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0112, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:31:01,413 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.662e+02 3.142e+02 4.083e+02 1.270e+03, threshold=6.284e+02, percent-clipped=8.0 +2023-02-09 00:31:10,199 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222027.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:31:17,963 INFO [train.py:901] (3/4) Epoch 28, batch 3800, loss[loss=0.2195, simple_loss=0.2978, pruned_loss=0.07057, over 8240.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2829, pruned_loss=0.05787, over 1605959.17 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:31:55,279 INFO [train.py:901] (3/4) Epoch 28, batch 3850, loss[loss=0.2261, simple_loss=0.3147, pruned_loss=0.06875, over 8516.00 frames. ], tot_loss[loss=0.2004, simple_loss=0.2836, pruned_loss=0.0586, over 1610917.02 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:32:11,766 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 00:32:13,843 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.208e+02 2.768e+02 3.453e+02 7.901e+02, threshold=5.537e+02, percent-clipped=1.0 +2023-02-09 00:32:15,442 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8801, 6.0035, 5.2115, 2.5589, 5.3085, 5.7276, 5.4786, 5.5338], + device='cuda:3'), covar=tensor([0.0509, 0.0352, 0.0999, 0.4770, 0.0711, 0.0669, 0.1106, 0.0516], + device='cuda:3'), in_proj_covar=tensor([0.0545, 0.0459, 0.0448, 0.0560, 0.0444, 0.0467, 0.0442, 0.0408], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:32:17,532 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222120.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:32:30,077 INFO [train.py:901] (3/4) Epoch 28, batch 3900, loss[loss=0.2019, simple_loss=0.2833, pruned_loss=0.06019, over 6833.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2833, pruned_loss=0.05833, over 1609433.68 frames. ], batch size: 71, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:00,577 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0257, 3.6082, 1.9397, 2.8774, 2.5259, 1.8428, 2.6051, 3.1431], + device='cuda:3'), covar=tensor([0.1837, 0.0464, 0.1566, 0.0866, 0.1068, 0.1936, 0.1284, 0.0842], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0245, 0.0343, 0.0315, 0.0305, 0.0348, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 00:33:06,448 INFO [train.py:901] (3/4) Epoch 28, batch 3950, loss[loss=0.2294, simple_loss=0.2977, pruned_loss=0.08058, over 8106.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2832, pruned_loss=0.05854, over 1611012.96 frames. ], batch size: 23, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:17,904 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:20,797 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2003, 3.6695, 2.3613, 3.0314, 2.7147, 2.1228, 2.8334, 3.0917], + device='cuda:3'), covar=tensor([0.1744, 0.0373, 0.1267, 0.0736, 0.0852, 0.1598, 0.1230, 0.1244], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0245, 0.0342, 0.0314, 0.0304, 0.0347, 0.0350, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 00:33:26,082 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 2.338e+02 2.821e+02 3.606e+02 1.107e+03, threshold=5.643e+02, percent-clipped=4.0 +2023-02-09 00:33:31,801 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222223.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:35,385 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5576, 1.5436, 2.0202, 1.3495, 1.2793, 2.0543, 0.3189, 1.3009], + device='cuda:3'), covar=tensor([0.1749, 0.1572, 0.0502, 0.1228, 0.2542, 0.0429, 0.2122, 0.1552], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0225, 0.0279, 0.0147, 0.0176, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 00:33:37,687 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.22 vs. limit=2.0 +2023-02-09 00:33:40,243 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:42,144 INFO [train.py:901] (3/4) Epoch 28, batch 4000, loss[loss=0.1676, simple_loss=0.2505, pruned_loss=0.04234, over 7698.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.0582, over 1612950.18 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:33:43,728 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222240.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:33:51,748 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:01,382 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:34:17,513 INFO [train.py:901] (3/4) Epoch 28, batch 4050, loss[loss=0.2365, simple_loss=0.3085, pruned_loss=0.08224, over 7541.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.2827, pruned_loss=0.0583, over 1612015.30 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:34:38,325 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.744e+02 2.414e+02 3.092e+02 4.009e+02 1.246e+03, threshold=6.184e+02, percent-clipped=7.0 +2023-02-09 00:34:54,252 INFO [train.py:901] (3/4) Epoch 28, batch 4100, loss[loss=0.1629, simple_loss=0.245, pruned_loss=0.04042, over 7721.00 frames. ], tot_loss[loss=0.2, simple_loss=0.283, pruned_loss=0.05855, over 1608353.77 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:14,880 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:17,648 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:35:29,384 INFO [train.py:901] (3/4) Epoch 28, batch 4150, loss[loss=0.1695, simple_loss=0.2495, pruned_loss=0.04477, over 7792.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05765, over 1611463.78 frames. ], batch size: 19, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:35:49,100 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.330e+02 2.692e+02 3.176e+02 6.436e+02, threshold=5.384e+02, percent-clipped=1.0 +2023-02-09 00:36:07,144 INFO [train.py:901] (3/4) Epoch 28, batch 4200, loss[loss=0.1973, simple_loss=0.2922, pruned_loss=0.05122, over 8339.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2823, pruned_loss=0.05761, over 1612244.94 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:14,736 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 00:36:37,817 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 00:36:41,348 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:42,589 INFO [train.py:901] (3/4) Epoch 28, batch 4250, loss[loss=0.1855, simple_loss=0.2695, pruned_loss=0.05078, over 7930.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.282, pruned_loss=0.05678, over 1616023.45 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:36:44,938 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222491.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:36:53,800 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 00:37:00,857 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.734e+02 2.539e+02 3.193e+02 4.198e+02 8.289e+02, threshold=6.386e+02, percent-clipped=5.0 +2023-02-09 00:37:01,771 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222516.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:18,018 INFO [train.py:901] (3/4) Epoch 28, batch 4300, loss[loss=0.2251, simple_loss=0.2998, pruned_loss=0.07516, over 8732.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2828, pruned_loss=0.05698, over 1621161.52 frames. ], batch size: 30, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:20,195 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:25,033 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222547.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:39,471 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222567.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:37:54,078 INFO [train.py:901] (3/4) Epoch 28, batch 4350, loss[loss=0.2102, simple_loss=0.2873, pruned_loss=0.06656, over 8516.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2832, pruned_loss=0.05742, over 1621529.51 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 8.0 +2023-02-09 00:37:54,218 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222588.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:11,687 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 00:38:13,105 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.579e+02 2.501e+02 2.979e+02 3.614e+02 7.360e+02, threshold=5.959e+02, percent-clipped=2.0 +2023-02-09 00:38:18,806 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:29,001 INFO [train.py:901] (3/4) Epoch 28, batch 4400, loss[loss=0.1705, simple_loss=0.2591, pruned_loss=0.04098, over 7802.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2829, pruned_loss=0.05732, over 1622270.96 frames. ], batch size: 20, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:38:37,330 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:40,089 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8849, 1.9115, 3.1352, 2.3275, 2.6487, 1.9373, 1.6926, 1.6980], + device='cuda:3'), covar=tensor([0.8009, 0.6950, 0.2388, 0.4939, 0.4111, 0.5156, 0.3275, 0.6569], + device='cuda:3'), in_proj_covar=tensor([0.0966, 0.1028, 0.0832, 0.0995, 0.1022, 0.0934, 0.0771, 0.0850], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 00:38:47,080 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:38:54,356 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 00:39:01,973 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=222682.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:04,786 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.9794, 1.8711, 2.1186, 2.0606, 1.2605, 1.9358, 2.4216, 2.2847], + device='cuda:3'), covar=tensor([0.0445, 0.1076, 0.1495, 0.1235, 0.0516, 0.1286, 0.0558, 0.0553], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0162, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:39:05,968 INFO [train.py:901] (3/4) Epoch 28, batch 4450, loss[loss=0.1919, simple_loss=0.2674, pruned_loss=0.05825, over 7702.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2832, pruned_loss=0.05769, over 1621071.65 frames. ], batch size: 18, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:24,968 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.632e+02 2.353e+02 2.798e+02 3.446e+02 6.111e+02, threshold=5.597e+02, percent-clipped=1.0 +2023-02-09 00:39:38,908 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.99 vs. limit=5.0 +2023-02-09 00:39:41,219 INFO [train.py:901] (3/4) Epoch 28, batch 4500, loss[loss=0.201, simple_loss=0.2865, pruned_loss=0.05773, over 8436.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2825, pruned_loss=0.0577, over 1618438.03 frames. ], batch size: 27, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:39:44,227 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:39:45,360 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 00:39:58,786 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.62 vs. limit=2.0 +2023-02-09 00:40:02,733 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222767.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:40:18,346 INFO [train.py:901] (3/4) Epoch 28, batch 4550, loss[loss=0.1741, simple_loss=0.2685, pruned_loss=0.03986, over 8023.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2832, pruned_loss=0.05827, over 1617468.33 frames. ], batch size: 22, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:40:26,135 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6728, 1.7611, 1.5852, 2.2026, 1.0480, 1.5260, 1.7022, 1.7673], + device='cuda:3'), covar=tensor([0.0849, 0.0808, 0.0960, 0.0490, 0.1166, 0.1264, 0.0754, 0.0747], + device='cuda:3'), in_proj_covar=tensor([0.0232, 0.0195, 0.0245, 0.0214, 0.0205, 0.0248, 0.0251, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 00:40:37,186 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.362e+02 2.324e+02 2.721e+02 3.677e+02 6.861e+02, threshold=5.442e+02, percent-clipped=4.0 +2023-02-09 00:40:53,694 INFO [train.py:901] (3/4) Epoch 28, batch 4600, loss[loss=0.2174, simple_loss=0.2996, pruned_loss=0.06758, over 8496.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.05752, over 1620232.45 frames. ], batch size: 26, lr: 2.69e-03, grad_scale: 16.0 +2023-02-09 00:41:11,248 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6123, 2.0556, 3.2506, 1.5177, 2.4475, 2.1312, 1.7679, 2.5833], + device='cuda:3'), covar=tensor([0.2057, 0.2850, 0.0941, 0.4776, 0.1973, 0.3165, 0.2577, 0.2353], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0640, 0.0567, 0.0674, 0.0664, 0.0612, 0.0566, 0.0647], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:41:27,927 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222885.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:41:30,003 INFO [train.py:901] (3/4) Epoch 28, batch 4650, loss[loss=0.1864, simple_loss=0.2619, pruned_loss=0.05545, over 7657.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2821, pruned_loss=0.05737, over 1614596.34 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 16.0 +2023-02-09 00:41:50,684 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.622e+02 2.423e+02 3.099e+02 3.500e+02 7.849e+02, threshold=6.198e+02, percent-clipped=6.0 +2023-02-09 00:41:53,068 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222918.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:02,698 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=222932.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:06,747 INFO [train.py:901] (3/4) Epoch 28, batch 4700, loss[loss=0.217, simple_loss=0.3069, pruned_loss=0.06356, over 8461.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05756, over 1617570.34 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:06,969 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=222938.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:10,454 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222943.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:20,652 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222958.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:24,266 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=222963.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:40,841 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=222987.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:41,326 INFO [train.py:901] (3/4) Epoch 28, batch 4750, loss[loss=0.2025, simple_loss=0.2876, pruned_loss=0.05871, over 8314.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2821, pruned_loss=0.05792, over 1615654.18 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:42:50,003 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223000.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:52,052 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:42:54,706 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 00:42:58,160 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 00:43:02,872 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.412e+02 2.807e+02 3.833e+02 7.869e+02, threshold=5.613e+02, percent-clipped=5.0 +2023-02-09 00:43:18,663 INFO [train.py:901] (3/4) Epoch 28, batch 4800, loss[loss=0.1797, simple_loss=0.2638, pruned_loss=0.04782, over 7276.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2818, pruned_loss=0.0576, over 1611484.39 frames. ], batch size: 16, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:43:25,201 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223047.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:36,315 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6473, 1.5013, 1.7866, 1.4252, 0.9287, 1.5695, 1.5624, 1.5397], + device='cuda:3'), covar=tensor([0.0566, 0.1173, 0.1560, 0.1411, 0.0549, 0.1366, 0.0674, 0.0623], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0190, 0.0161, 0.0101, 0.0163, 0.0113, 0.0146], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:43:48,808 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 00:43:49,675 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:43:50,676 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-09 00:43:53,650 INFO [train.py:901] (3/4) Epoch 28, batch 4850, loss[loss=0.1983, simple_loss=0.2893, pruned_loss=0.05371, over 8352.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2827, pruned_loss=0.05816, over 1612046.21 frames. ], batch size: 24, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:13,745 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.508e+02 3.332e+02 4.408e+02 9.671e+02, threshold=6.663e+02, percent-clipped=7.0 +2023-02-09 00:44:31,161 INFO [train.py:901] (3/4) Epoch 28, batch 4900, loss[loss=0.1877, simple_loss=0.2614, pruned_loss=0.05705, over 8076.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2821, pruned_loss=0.05768, over 1612339.43 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:44:56,993 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-09 00:45:07,007 INFO [train.py:901] (3/4) Epoch 28, batch 4950, loss[loss=0.1827, simple_loss=0.2766, pruned_loss=0.04438, over 8253.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2825, pruned_loss=0.05761, over 1616423.90 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:09,276 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223191.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:45:26,419 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.334e+02 2.712e+02 3.560e+02 9.309e+02, threshold=5.424e+02, percent-clipped=3.0 +2023-02-09 00:45:42,295 INFO [train.py:901] (3/4) Epoch 28, batch 5000, loss[loss=0.1864, simple_loss=0.2785, pruned_loss=0.0471, over 8134.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2833, pruned_loss=0.05814, over 1615630.29 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:45:56,166 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:14,344 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223281.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:18,934 INFO [train.py:901] (3/4) Epoch 28, batch 5050, loss[loss=0.1686, simple_loss=0.2647, pruned_loss=0.03626, over 8193.00 frames. ], tot_loss[loss=0.1998, simple_loss=0.2831, pruned_loss=0.05822, over 1610545.26 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:28,803 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223302.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:29,639 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223303.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:32,883 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 00:46:38,387 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.274e+02 2.931e+02 3.573e+02 6.090e+02, threshold=5.862e+02, percent-clipped=1.0 +2023-02-09 00:46:46,952 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223328.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:48,963 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223331.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:46:53,555 INFO [train.py:901] (3/4) Epoch 28, batch 5100, loss[loss=0.1911, simple_loss=0.2751, pruned_loss=0.05357, over 7810.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2822, pruned_loss=0.05773, over 1606437.29 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:46:59,470 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223346.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:31,192 INFO [train.py:901] (3/4) Epoch 28, batch 5150, loss[loss=0.1588, simple_loss=0.2522, pruned_loss=0.03275, over 8042.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2814, pruned_loss=0.05738, over 1604413.90 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:47:50,495 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.755e+02 2.349e+02 2.964e+02 3.516e+02 1.122e+03, threshold=5.928e+02, percent-clipped=3.0 +2023-02-09 00:47:51,407 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223417.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:47:57,709 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:05,968 INFO [train.py:901] (3/4) Epoch 28, batch 5200, loss[loss=0.185, simple_loss=0.273, pruned_loss=0.04854, over 8105.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2815, pruned_loss=0.0576, over 1607095.93 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:48:11,625 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223446.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:22,800 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:48:31,223 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 00:48:44,075 INFO [train.py:901] (3/4) Epoch 28, batch 5250, loss[loss=0.2355, simple_loss=0.3085, pruned_loss=0.08122, over 7649.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2822, pruned_loss=0.05779, over 1605204.47 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:03,772 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.237e+02 2.837e+02 3.561e+02 7.405e+02, threshold=5.674e+02, percent-clipped=6.0 +2023-02-09 00:49:17,137 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223535.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:49:19,096 INFO [train.py:901] (3/4) Epoch 28, batch 5300, loss[loss=0.2079, simple_loss=0.2944, pruned_loss=0.06075, over 8808.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2807, pruned_loss=0.05705, over 1607978.40 frames. ], batch size: 40, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:49:21,387 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:49:22,624 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9290, 1.7014, 2.5662, 1.5817, 2.2078, 2.8653, 2.8779, 2.4548], + device='cuda:3'), covar=tensor([0.0983, 0.1504, 0.0634, 0.1820, 0.1618, 0.0335, 0.0807, 0.0611], + device='cuda:3'), in_proj_covar=tensor([0.0306, 0.0326, 0.0293, 0.0320, 0.0324, 0.0276, 0.0440, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 00:49:55,535 INFO [train.py:901] (3/4) Epoch 28, batch 5350, loss[loss=0.2133, simple_loss=0.2962, pruned_loss=0.0652, over 8241.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2808, pruned_loss=0.05701, over 1606787.11 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:50:01,804 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=223596.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:50:15,668 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.359e+02 2.840e+02 3.657e+02 7.209e+02, threshold=5.681e+02, percent-clipped=3.0 +2023-02-09 00:50:30,884 INFO [train.py:901] (3/4) Epoch 28, batch 5400, loss[loss=0.1839, simple_loss=0.2702, pruned_loss=0.04882, over 8191.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.05653, over 1609178.84 frames. ], batch size: 23, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:50:39,708 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=223650.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 00:50:53,017 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1973, 1.3999, 4.3606, 1.7218, 3.9476, 3.6172, 3.9664, 3.8667], + device='cuda:3'), covar=tensor([0.0614, 0.4649, 0.0519, 0.4039, 0.0953, 0.0941, 0.0587, 0.0612], + device='cuda:3'), in_proj_covar=tensor([0.0683, 0.0665, 0.0738, 0.0659, 0.0741, 0.0634, 0.0644, 0.0715], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 00:50:55,915 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223673.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:06,364 INFO [train.py:901] (3/4) Epoch 28, batch 5450, loss[loss=0.1552, simple_loss=0.2349, pruned_loss=0.03772, over 7702.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2803, pruned_loss=0.0565, over 1610974.74 frames. ], batch size: 18, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:13,799 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223698.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:16,760 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:20,202 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7718, 1.4877, 1.6794, 1.3926, 1.0362, 1.4596, 1.5990, 1.3313], + device='cuda:3'), covar=tensor([0.0608, 0.1305, 0.1698, 0.1558, 0.0586, 0.1515, 0.0738, 0.0742], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0162, 0.0102, 0.0164, 0.0113, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 00:51:28,421 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.718e+02 2.405e+02 2.886e+02 3.694e+02 6.837e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 00:51:28,657 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223717.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:31,408 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 00:51:36,518 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223727.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:51:44,559 INFO [train.py:901] (3/4) Epoch 28, batch 5500, loss[loss=0.1985, simple_loss=0.2887, pruned_loss=0.05418, over 8602.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.28, pruned_loss=0.05632, over 1611712.74 frames. ], batch size: 34, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:51:47,336 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223742.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:01,377 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.46 vs. limit=2.0 +2023-02-09 00:52:18,977 INFO [train.py:901] (3/4) Epoch 28, batch 5550, loss[loss=0.172, simple_loss=0.2691, pruned_loss=0.03743, over 7972.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2796, pruned_loss=0.05622, over 1611421.55 frames. ], batch size: 21, lr: 2.68e-03, grad_scale: 4.0 +2023-02-09 00:52:25,550 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:39,666 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.770e+02 2.462e+02 3.010e+02 3.574e+02 1.274e+03, threshold=6.020e+02, percent-clipped=3.0 +2023-02-09 00:52:43,457 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:52:55,583 INFO [train.py:901] (3/4) Epoch 28, batch 5600, loss[loss=0.1898, simple_loss=0.2792, pruned_loss=0.05019, over 8243.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2792, pruned_loss=0.05619, over 1611747.92 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:31,690 INFO [train.py:901] (3/4) Epoch 28, batch 5650, loss[loss=0.2322, simple_loss=0.3083, pruned_loss=0.078, over 7201.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.281, pruned_loss=0.05724, over 1613078.32 frames. ], batch size: 71, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:53:31,873 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0288, 1.5090, 3.4412, 1.5972, 2.4482, 3.7847, 3.9026, 3.2698], + device='cuda:3'), covar=tensor([0.1122, 0.1812, 0.0314, 0.1980, 0.0990, 0.0208, 0.0510, 0.0469], + device='cuda:3'), in_proj_covar=tensor([0.0307, 0.0327, 0.0294, 0.0322, 0.0325, 0.0277, 0.0441, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 00:53:41,207 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 00:53:44,200 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=223906.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:53:51,298 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 2.313e+02 2.789e+02 3.752e+02 1.102e+03, threshold=5.578e+02, percent-clipped=3.0 +2023-02-09 00:54:01,593 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=223931.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 00:54:06,144 INFO [train.py:901] (3/4) Epoch 28, batch 5700, loss[loss=0.2235, simple_loss=0.3043, pruned_loss=0.07132, over 8662.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2815, pruned_loss=0.05725, over 1617246.84 frames. ], batch size: 34, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:07,521 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=223940.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:54:43,225 INFO [train.py:901] (3/4) Epoch 28, batch 5750, loss[loss=0.2308, simple_loss=0.3089, pruned_loss=0.07635, over 8450.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2802, pruned_loss=0.05676, over 1612390.89 frames. ], batch size: 27, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:54:48,696 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 00:55:04,141 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 2.263e+02 2.713e+02 3.241e+02 8.661e+02, threshold=5.425e+02, percent-clipped=3.0 +2023-02-09 00:55:18,763 INFO [train.py:901] (3/4) Epoch 28, batch 5800, loss[loss=0.1758, simple_loss=0.2621, pruned_loss=0.04472, over 8130.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2811, pruned_loss=0.05731, over 1613336.13 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:55:30,597 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:55:55,787 INFO [train.py:901] (3/4) Epoch 28, batch 5850, loss[loss=0.1885, simple_loss=0.2678, pruned_loss=0.05454, over 5111.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2821, pruned_loss=0.05769, over 1608033.69 frames. ], batch size: 11, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:56:04,943 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7099, 1.9548, 2.1050, 1.3985, 2.2289, 1.5524, 0.6866, 1.9316], + device='cuda:3'), covar=tensor([0.0693, 0.0436, 0.0361, 0.0689, 0.0445, 0.0930, 0.0962, 0.0415], + device='cuda:3'), in_proj_covar=tensor([0.0472, 0.0411, 0.0364, 0.0460, 0.0395, 0.0552, 0.0402, 0.0442], + device='cuda:3'), out_proj_covar=tensor([1.2492e-04, 1.0636e-04, 9.4696e-05, 1.2019e-04, 1.0338e-04, 1.5402e-04, + 1.0734e-04, 1.1575e-04], device='cuda:3') +2023-02-09 00:56:14,224 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.38 vs. limit=2.0 +2023-02-09 00:56:15,668 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.538e+02 3.148e+02 4.118e+02 7.183e+02, threshold=6.296e+02, percent-clipped=12.0 +2023-02-09 00:56:30,206 INFO [train.py:901] (3/4) Epoch 28, batch 5900, loss[loss=0.2277, simple_loss=0.3009, pruned_loss=0.07728, over 8495.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.0579, over 1610932.11 frames. ], batch size: 26, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:56:41,648 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.81 vs. limit=5.0 +2023-02-09 00:57:06,187 INFO [train.py:901] (3/4) Epoch 28, batch 5950, loss[loss=0.1911, simple_loss=0.2848, pruned_loss=0.04869, over 8473.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2827, pruned_loss=0.05779, over 1613492.54 frames. ], batch size: 25, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:28,301 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.818e+02 2.486e+02 3.110e+02 3.888e+02 7.674e+02, threshold=6.220e+02, percent-clipped=4.0 +2023-02-09 00:57:37,732 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224230.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:57:43,111 INFO [train.py:901] (3/4) Epoch 28, batch 6000, loss[loss=0.2105, simple_loss=0.2875, pruned_loss=0.06674, over 8463.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05772, over 1616149.27 frames. ], batch size: 49, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:57:43,111 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 00:57:56,798 INFO [train.py:935] (3/4) Epoch 28, validation: loss=0.1714, simple_loss=0.2708, pruned_loss=0.03603, over 944034.00 frames. +2023-02-09 00:57:56,800 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6473MB +2023-02-09 00:58:33,308 INFO [train.py:901] (3/4) Epoch 28, batch 6050, loss[loss=0.1746, simple_loss=0.2525, pruned_loss=0.04831, over 7436.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.05735, over 1612830.82 frames. ], batch size: 17, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:58:46,566 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.58 vs. limit=2.0 +2023-02-09 00:58:49,938 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224311.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:58:54,040 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.623e+02 2.462e+02 3.109e+02 3.867e+02 1.260e+03, threshold=6.217e+02, percent-clipped=5.0 +2023-02-09 00:59:06,133 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4678, 2.3665, 3.1498, 2.4474, 2.8798, 2.4862, 2.3388, 1.8115], + device='cuda:3'), covar=tensor([0.5818, 0.5434, 0.2091, 0.4334, 0.3036, 0.3251, 0.1987, 0.5826], + device='cuda:3'), in_proj_covar=tensor([0.0974, 0.1037, 0.0842, 0.1008, 0.1031, 0.0941, 0.0779, 0.0860], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 00:59:08,854 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224336.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 00:59:10,065 INFO [train.py:901] (3/4) Epoch 28, batch 6100, loss[loss=0.2058, simple_loss=0.3046, pruned_loss=0.05353, over 8024.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2826, pruned_loss=0.05793, over 1613709.60 frames. ], batch size: 22, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 00:59:14,108 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 00:59:26,298 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 00:59:46,736 INFO [train.py:901] (3/4) Epoch 28, batch 6150, loss[loss=0.2095, simple_loss=0.295, pruned_loss=0.06197, over 8442.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2808, pruned_loss=0.05695, over 1614410.16 frames. ], batch size: 49, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:06,959 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.535e+02 2.362e+02 2.823e+02 3.455e+02 8.158e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 01:00:21,346 INFO [train.py:901] (3/4) Epoch 28, batch 6200, loss[loss=0.2179, simple_loss=0.3007, pruned_loss=0.06753, over 8494.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2812, pruned_loss=0.05716, over 1618999.99 frames. ], batch size: 28, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:00:58,236 INFO [train.py:901] (3/4) Epoch 28, batch 6250, loss[loss=0.153, simple_loss=0.2339, pruned_loss=0.0361, over 7812.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05675, over 1616271.38 frames. ], batch size: 19, lr: 2.68e-03, grad_scale: 8.0 +2023-02-09 01:01:18,588 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 2.593e+02 3.043e+02 4.250e+02 9.084e+02, threshold=6.087e+02, percent-clipped=11.0 +2023-02-09 01:01:33,265 INFO [train.py:901] (3/4) Epoch 28, batch 6300, loss[loss=0.1737, simple_loss=0.2709, pruned_loss=0.03824, over 8469.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05693, over 1608620.49 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:00,127 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224574.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:04,367 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:02:10,459 INFO [train.py:901] (3/4) Epoch 28, batch 6350, loss[loss=0.2139, simple_loss=0.301, pruned_loss=0.06337, over 8552.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.05729, over 1607505.01 frames. ], batch size: 31, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:02:17,589 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2724, 3.1776, 3.0200, 1.7064, 2.9494, 2.9517, 2.8724, 2.8410], + device='cuda:3'), covar=tensor([0.1191, 0.0784, 0.1210, 0.4276, 0.1079, 0.1277, 0.1575, 0.1129], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0456, 0.0449, 0.0557, 0.0443, 0.0465, 0.0441, 0.0408], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:02:30,928 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.559e+02 2.315e+02 2.720e+02 3.259e+02 6.733e+02, threshold=5.440e+02, percent-clipped=2.0 +2023-02-09 01:02:38,891 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6451, 2.5342, 1.9209, 2.2346, 2.1874, 1.5552, 2.0746, 2.1573], + device='cuda:3'), covar=tensor([0.1565, 0.0446, 0.1267, 0.0725, 0.0781, 0.1672, 0.1060, 0.0957], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0245, 0.0342, 0.0316, 0.0303, 0.0348, 0.0352, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 01:02:45,885 INFO [train.py:901] (3/4) Epoch 28, batch 6400, loss[loss=0.2624, simple_loss=0.3483, pruned_loss=0.08828, over 8333.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2805, pruned_loss=0.0571, over 1610525.66 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:06,888 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5304, 1.6777, 2.1162, 1.4015, 1.5305, 1.7827, 1.5549, 1.5514], + device='cuda:3'), covar=tensor([0.2175, 0.2884, 0.1059, 0.5120, 0.2177, 0.3704, 0.2949, 0.2283], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0642, 0.0569, 0.0677, 0.0668, 0.0617, 0.0569, 0.0652], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:03:21,510 INFO [train.py:901] (3/4) Epoch 28, batch 6450, loss[loss=0.1866, simple_loss=0.281, pruned_loss=0.04611, over 8465.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2809, pruned_loss=0.0574, over 1611785.06 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:03:22,398 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=224689.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:03:43,002 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.303e+02 2.784e+02 3.485e+02 7.082e+02, threshold=5.567e+02, percent-clipped=7.0 +2023-02-09 01:03:57,597 INFO [train.py:901] (3/4) Epoch 28, batch 6500, loss[loss=0.1733, simple_loss=0.2717, pruned_loss=0.03746, over 8469.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05694, over 1609083.58 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:02,105 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:11,984 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=224758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:04:32,261 INFO [train.py:901] (3/4) Epoch 28, batch 6550, loss[loss=0.2205, simple_loss=0.3065, pruned_loss=0.06726, over 8501.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.05639, over 1612832.73 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:04:43,338 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6815, 1.9183, 1.9342, 1.3405, 1.9841, 1.5177, 0.4405, 1.9809], + device='cuda:3'), covar=tensor([0.0490, 0.0358, 0.0331, 0.0509, 0.0403, 0.0902, 0.0943, 0.0236], + device='cuda:3'), in_proj_covar=tensor([0.0475, 0.0412, 0.0366, 0.0462, 0.0397, 0.0555, 0.0407, 0.0444], + device='cuda:3'), out_proj_covar=tensor([1.2584e-04, 1.0681e-04, 9.5229e-05, 1.2065e-04, 1.0362e-04, 1.5488e-04, + 1.0854e-04, 1.1618e-04], device='cuda:3') +2023-02-09 01:04:47,800 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 01:04:53,996 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.489e+02 3.184e+02 3.768e+02 7.222e+02, threshold=6.368e+02, percent-clipped=1.0 +2023-02-09 01:05:08,042 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 01:05:09,343 INFO [train.py:901] (3/4) Epoch 28, batch 6600, loss[loss=0.2234, simple_loss=0.3067, pruned_loss=0.07005, over 8197.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2816, pruned_loss=0.05729, over 1614711.91 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:05:14,990 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8392, 1.6833, 2.1469, 1.7198, 1.1495, 1.8777, 2.5948, 2.2407], + device='cuda:3'), covar=tensor([0.0454, 0.1254, 0.1490, 0.1404, 0.0559, 0.1399, 0.0560, 0.0583], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0101, 0.0163, 0.0113, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 01:05:19,997 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 01:05:44,396 INFO [train.py:901] (3/4) Epoch 28, batch 6650, loss[loss=0.2344, simple_loss=0.3168, pruned_loss=0.07594, over 8358.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2811, pruned_loss=0.05719, over 1613302.69 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:04,774 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.705e+02 2.463e+02 2.971e+02 3.895e+02 9.422e+02, threshold=5.941e+02, percent-clipped=4.0 +2023-02-09 01:06:10,966 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=224924.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:21,166 INFO [train.py:901] (3/4) Epoch 28, batch 6700, loss[loss=0.1785, simple_loss=0.2635, pruned_loss=0.04669, over 8455.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2813, pruned_loss=0.05731, over 1615126.87 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:06:22,724 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5158, 1.3860, 1.8294, 1.1731, 1.1329, 1.8134, 0.2832, 1.1430], + device='cuda:3'), covar=tensor([0.1532, 0.1188, 0.0361, 0.0850, 0.2136, 0.0406, 0.1747, 0.1144], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0208, 0.0138, 0.0225, 0.0279, 0.0149, 0.0176, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 01:06:26,241 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=224945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:40,863 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8344, 1.8549, 2.5479, 1.5049, 1.2384, 2.5644, 0.5586, 1.4957], + device='cuda:3'), covar=tensor([0.1672, 0.1008, 0.0360, 0.1223, 0.2413, 0.0313, 0.1919, 0.1240], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0208, 0.0138, 0.0225, 0.0279, 0.0148, 0.0176, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 01:06:43,079 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1209, 1.9641, 2.5086, 2.1213, 2.3735, 2.2475, 2.1066, 1.3552], + device='cuda:3'), covar=tensor([0.5935, 0.5038, 0.2067, 0.4039, 0.2818, 0.3454, 0.2016, 0.5481], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1035, 0.0839, 0.1001, 0.1028, 0.0939, 0.0776, 0.0855], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:06:44,443 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=224970.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:06:57,021 INFO [train.py:901] (3/4) Epoch 28, batch 6750, loss[loss=0.2433, simple_loss=0.3292, pruned_loss=0.0787, over 8448.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05751, over 1613819.79 frames. ], batch size: 27, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:07:17,011 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.343e+02 2.979e+02 3.883e+02 6.136e+02, threshold=5.958e+02, percent-clipped=2.0 +2023-02-09 01:07:28,015 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 01:07:32,092 INFO [train.py:901] (3/4) Epoch 28, batch 6800, loss[loss=0.2596, simple_loss=0.3311, pruned_loss=0.09402, over 6986.00 frames. ], tot_loss[loss=0.1983, simple_loss=0.2821, pruned_loss=0.05724, over 1610629.21 frames. ], batch size: 71, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:07:32,972 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225039.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:08,381 INFO [train.py:901] (3/4) Epoch 28, batch 6850, loss[loss=0.1969, simple_loss=0.285, pruned_loss=0.05445, over 8535.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2823, pruned_loss=0.0575, over 1613129.74 frames. ], batch size: 34, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:08,460 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,037 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225102.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:18,710 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 01:08:28,498 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.283e+02 2.996e+02 3.907e+02 8.918e+02, threshold=5.992e+02, percent-clipped=3.0 +2023-02-09 01:08:31,367 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225121.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:08:32,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.6284, 1.4062, 3.9215, 1.4958, 3.2373, 3.0999, 3.6073, 3.5555], + device='cuda:3'), covar=tensor([0.1248, 0.6181, 0.1136, 0.5337, 0.2117, 0.1861, 0.0968, 0.1036], + device='cuda:3'), in_proj_covar=tensor([0.0686, 0.0667, 0.0738, 0.0662, 0.0746, 0.0635, 0.0645, 0.0722], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:08:42,880 INFO [train.py:901] (3/4) Epoch 28, batch 6900, loss[loss=0.2545, simple_loss=0.3227, pruned_loss=0.09315, over 6876.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2829, pruned_loss=0.05804, over 1608315.06 frames. ], batch size: 71, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:08:58,671 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225160.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:18,981 INFO [train.py:901] (3/4) Epoch 28, batch 6950, loss[loss=0.1765, simple_loss=0.2581, pruned_loss=0.04744, over 8085.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2833, pruned_loss=0.05823, over 1610132.81 frames. ], batch size: 21, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:09:30,339 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:30,868 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 01:09:40,021 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 2.456e+02 2.946e+02 3.977e+02 8.721e+02, threshold=5.892e+02, percent-clipped=6.0 +2023-02-09 01:09:40,234 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225217.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:09:54,451 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.54 vs. limit=2.0 +2023-02-09 01:09:54,776 INFO [train.py:901] (3/4) Epoch 28, batch 7000, loss[loss=0.2166, simple_loss=0.2845, pruned_loss=0.07431, over 7780.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05733, over 1611616.22 frames. ], batch size: 19, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:31,226 INFO [train.py:901] (3/4) Epoch 28, batch 7050, loss[loss=0.2068, simple_loss=0.2713, pruned_loss=0.07114, over 7819.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05688, over 1610760.81 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:10:36,349 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225295.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:10:52,626 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.402e+02 2.844e+02 3.449e+02 6.425e+02, threshold=5.688e+02, percent-clipped=2.0 +2023-02-09 01:10:55,666 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:11:08,193 INFO [train.py:901] (3/4) Epoch 28, batch 7100, loss[loss=0.1782, simple_loss=0.2462, pruned_loss=0.05512, over 7252.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.28, pruned_loss=0.05612, over 1608729.12 frames. ], batch size: 16, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:11:43,041 INFO [train.py:901] (3/4) Epoch 28, batch 7150, loss[loss=0.1828, simple_loss=0.2739, pruned_loss=0.04581, over 8472.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2795, pruned_loss=0.05585, over 1609379.06 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:11:54,513 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5798, 1.5734, 5.7089, 2.2861, 5.1844, 4.7798, 5.2434, 5.1505], + device='cuda:3'), covar=tensor([0.0502, 0.4955, 0.0437, 0.4003, 0.0946, 0.0847, 0.0530, 0.0528], + device='cuda:3'), in_proj_covar=tensor([0.0684, 0.0667, 0.0736, 0.0662, 0.0747, 0.0636, 0.0646, 0.0721], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:12:05,408 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.377e+02 2.906e+02 3.542e+02 6.036e+02, threshold=5.811e+02, percent-clipped=2.0 +2023-02-09 01:12:21,602 INFO [train.py:901] (3/4) Epoch 28, batch 7200, loss[loss=0.1825, simple_loss=0.2693, pruned_loss=0.04785, over 8518.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2805, pruned_loss=0.05684, over 1611028.58 frames. ], batch size: 26, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:12:35,183 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225457.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:36,589 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:40,499 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225465.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:45,517 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6849, 4.6901, 4.1991, 2.0615, 4.0667, 4.2950, 4.1654, 4.1283], + device='cuda:3'), covar=tensor([0.0598, 0.0445, 0.0883, 0.4423, 0.0886, 0.0802, 0.1126, 0.0815], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0459, 0.0452, 0.0557, 0.0444, 0.0466, 0.0444, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:12:46,209 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225473.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:51,110 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225480.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:53,936 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:12:56,554 INFO [train.py:901] (3/4) Epoch 28, batch 7250, loss[loss=0.172, simple_loss=0.2657, pruned_loss=0.0391, over 8228.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2795, pruned_loss=0.0563, over 1608501.82 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:03,606 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225498.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:07,607 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225504.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:13,959 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4189, 2.3085, 3.0288, 2.4803, 2.9599, 2.5470, 2.3416, 1.9032], + device='cuda:3'), covar=tensor([0.5964, 0.5413, 0.2292, 0.4463, 0.3014, 0.3259, 0.1918, 0.5979], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1034, 0.0842, 0.1001, 0.1029, 0.0938, 0.0776, 0.0856], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:13:16,362 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.440e+02 2.486e+02 3.022e+02 3.617e+02 8.325e+02, threshold=6.044e+02, percent-clipped=6.0 +2023-02-09 01:13:21,986 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:13:29,593 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.79 vs. limit=5.0 +2023-02-09 01:13:32,776 INFO [train.py:901] (3/4) Epoch 28, batch 7300, loss[loss=0.2047, simple_loss=0.2926, pruned_loss=0.05838, over 8526.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2807, pruned_loss=0.05693, over 1612851.94 frames. ], batch size: 28, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:13:34,179 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:00,402 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225577.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:14:02,411 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225580.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:07,681 INFO [train.py:901] (3/4) Epoch 28, batch 7350, loss[loss=0.1815, simple_loss=0.2779, pruned_loss=0.04253, over 8294.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2804, pruned_loss=0.05695, over 1612369.29 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 8.0 +2023-02-09 01:14:24,556 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 01:14:27,897 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.502e+02 2.380e+02 2.753e+02 3.463e+02 7.224e+02, threshold=5.506e+02, percent-clipped=3.0 +2023-02-09 01:14:29,530 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225619.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:14:42,948 INFO [train.py:901] (3/4) Epoch 28, batch 7400, loss[loss=0.1796, simple_loss=0.2644, pruned_loss=0.0474, over 7922.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2808, pruned_loss=0.05712, over 1613569.94 frames. ], batch size: 20, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:14:42,964 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 01:14:57,030 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6498, 1.8696, 2.6223, 1.5318, 2.1561, 1.8919, 1.7198, 2.0704], + device='cuda:3'), covar=tensor([0.1507, 0.2213, 0.0701, 0.3651, 0.1453, 0.2675, 0.1941, 0.2104], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0638, 0.0562, 0.0673, 0.0664, 0.0614, 0.0564, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:15:19,326 INFO [train.py:901] (3/4) Epoch 28, batch 7450, loss[loss=0.208, simple_loss=0.2898, pruned_loss=0.06312, over 8355.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2814, pruned_loss=0.05746, over 1608405.78 frames. ], batch size: 24, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:15:25,008 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 01:15:40,012 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.685e+02 2.396e+02 3.007e+02 3.866e+02 7.466e+02, threshold=6.014e+02, percent-clipped=6.0 +2023-02-09 01:15:54,482 INFO [train.py:901] (3/4) Epoch 28, batch 7500, loss[loss=0.1804, simple_loss=0.2733, pruned_loss=0.04381, over 8320.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2819, pruned_loss=0.05789, over 1604934.14 frames. ], batch size: 25, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:32,496 INFO [train.py:901] (3/4) Epoch 28, batch 7550, loss[loss=0.1853, simple_loss=0.2548, pruned_loss=0.05794, over 7711.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2811, pruned_loss=0.05766, over 1603911.87 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:16:41,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225801.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:16:52,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.729e+02 2.388e+02 3.127e+02 4.485e+02 1.321e+03, threshold=6.254e+02, percent-clipped=11.0 +2023-02-09 01:16:57,642 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225824.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:05,959 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225836.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:07,134 INFO [train.py:901] (3/4) Epoch 28, batch 7600, loss[loss=0.1703, simple_loss=0.2586, pruned_loss=0.04101, over 8249.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05711, over 1602397.61 frames. ], batch size: 22, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:17,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1068, 3.4378, 2.2309, 2.9044, 2.8719, 2.0388, 2.8748, 3.2032], + device='cuda:3'), covar=tensor([0.1855, 0.0425, 0.1279, 0.0827, 0.0760, 0.1628, 0.1062, 0.1065], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0246, 0.0342, 0.0314, 0.0302, 0.0347, 0.0349, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 01:17:23,290 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225861.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:24,625 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0250, 1.6514, 1.5064, 1.5346, 1.3386, 1.3954, 1.3136, 1.2879], + device='cuda:3'), covar=tensor([0.1200, 0.0501, 0.1311, 0.0614, 0.0847, 0.1528, 0.0930, 0.0826], + device='cuda:3'), in_proj_covar=tensor([0.0358, 0.0246, 0.0342, 0.0314, 0.0302, 0.0347, 0.0349, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 01:17:27,276 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225867.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:34,160 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=225875.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:35,175 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.46 vs. limit=2.0 +2023-02-09 01:17:37,638 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:40,905 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:17:43,473 INFO [train.py:901] (3/4) Epoch 28, batch 7650, loss[loss=0.2061, simple_loss=0.3134, pruned_loss=0.04943, over 8520.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2806, pruned_loss=0.05709, over 1603257.22 frames. ], batch size: 28, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:17:51,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=225900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:01,182 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=225913.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,359 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225916.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:03,830 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.518e+02 2.353e+02 2.789e+02 3.444e+02 7.654e+02, threshold=5.579e+02, percent-clipped=1.0 +2023-02-09 01:18:06,735 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=225921.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:18:18,652 INFO [train.py:901] (3/4) Epoch 28, batch 7700, loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05664, over 7702.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2807, pruned_loss=0.05746, over 1597323.86 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:18:19,540 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:44,242 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 01:18:49,252 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225982.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:18:53,155 INFO [train.py:901] (3/4) Epoch 28, batch 7750, loss[loss=0.1816, simple_loss=0.263, pruned_loss=0.05008, over 7518.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2802, pruned_loss=0.05672, over 1601286.02 frames. ], batch size: 18, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:01,896 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=225999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:19:15,720 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.439e+02 2.815e+02 3.514e+02 7.333e+02, threshold=5.630e+02, percent-clipped=1.0 +2023-02-09 01:19:29,680 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226036.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:19:30,737 INFO [train.py:901] (3/4) Epoch 28, batch 7800, loss[loss=0.2559, simple_loss=0.3301, pruned_loss=0.09089, over 8666.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2817, pruned_loss=0.05776, over 1603203.27 frames. ], batch size: 34, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:19:45,520 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226059.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:20:01,006 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2476, 2.0663, 2.6397, 2.2530, 2.6588, 2.3387, 2.1954, 1.5463], + device='cuda:3'), covar=tensor([0.5915, 0.5314, 0.2249, 0.4081, 0.2506, 0.3289, 0.1953, 0.5534], + device='cuda:3'), in_proj_covar=tensor([0.0967, 0.1033, 0.0839, 0.1001, 0.1028, 0.0938, 0.0775, 0.0856], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:20:05,562 INFO [train.py:901] (3/4) Epoch 28, batch 7850, loss[loss=0.2099, simple_loss=0.2968, pruned_loss=0.06144, over 8587.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05714, over 1605844.33 frames. ], batch size: 31, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:20:25,277 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.578e+02 2.405e+02 3.030e+02 3.828e+02 1.208e+03, threshold=6.060e+02, percent-clipped=4.0 +2023-02-09 01:20:39,671 INFO [train.py:901] (3/4) Epoch 28, batch 7900, loss[loss=0.2396, simple_loss=0.3155, pruned_loss=0.08185, over 8602.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2822, pruned_loss=0.05734, over 1608888.16 frames. ], batch size: 31, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:02,884 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226172.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:13,381 INFO [train.py:901] (3/4) Epoch 28, batch 7950, loss[loss=0.2279, simple_loss=0.303, pruned_loss=0.07644, over 8292.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2813, pruned_loss=0.0569, over 1608347.19 frames. ], batch size: 23, lr: 2.67e-03, grad_scale: 16.0 +2023-02-09 01:21:18,310 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226195.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:19,708 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226197.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:33,035 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.474e+02 2.920e+02 3.612e+02 7.690e+02, threshold=5.839e+02, percent-clipped=4.0 +2023-02-09 01:21:35,322 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:37,938 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:47,691 INFO [train.py:901] (3/4) Epoch 28, batch 8000, loss[loss=0.1662, simple_loss=0.2437, pruned_loss=0.04433, over 7232.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2816, pruned_loss=0.05681, over 1612402.02 frames. ], batch size: 16, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:21:47,896 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226238.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:57,011 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=226251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:21:59,709 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226255.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:00,950 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226257.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:05,095 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226263.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:17,272 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:22:22,556 INFO [train.py:901] (3/4) Epoch 28, batch 8050, loss[loss=0.1461, simple_loss=0.2276, pruned_loss=0.03232, over 7298.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2793, pruned_loss=0.05632, over 1598361.91 frames. ], batch size: 16, lr: 2.66e-03, grad_scale: 16.0 +2023-02-09 01:22:25,399 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226292.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:22:42,674 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.612e+02 2.434e+02 3.095e+02 3.696e+02 6.520e+02, threshold=6.190e+02, percent-clipped=3.0 +2023-02-09 01:22:42,879 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226317.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:22:57,839 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 01:23:01,698 INFO [train.py:901] (3/4) Epoch 29, batch 0, loss[loss=0.1951, simple_loss=0.2822, pruned_loss=0.05399, over 8024.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2822, pruned_loss=0.05399, over 8024.00 frames. ], batch size: 22, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:01,698 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 01:23:13,264 INFO [train.py:935] (3/4) Epoch 29, validation: loss=0.1705, simple_loss=0.2705, pruned_loss=0.03528, over 944034.00 frames. +2023-02-09 01:23:13,265 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6473MB +2023-02-09 01:23:26,224 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226339.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:29,645 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 01:23:39,592 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.90 vs. limit=5.0 +2023-02-09 01:23:49,917 INFO [train.py:901] (3/4) Epoch 29, batch 50, loss[loss=0.2171, simple_loss=0.295, pruned_loss=0.0696, over 8445.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2841, pruned_loss=0.05795, over 363541.11 frames. ], batch size: 27, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:23:50,828 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:23:53,661 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0888, 1.2399, 1.1828, 0.8399, 1.2110, 1.0172, 0.1414, 1.2262], + device='cuda:3'), covar=tensor([0.0474, 0.0431, 0.0411, 0.0576, 0.0486, 0.1056, 0.0953, 0.0351], + device='cuda:3'), in_proj_covar=tensor([0.0478, 0.0414, 0.0368, 0.0462, 0.0398, 0.0555, 0.0406, 0.0443], + device='cuda:3'), out_proj_covar=tensor([1.2657e-04, 1.0724e-04, 9.5925e-05, 1.2073e-04, 1.0402e-04, 1.5462e-04, + 1.0826e-04, 1.1606e-04], device='cuda:3') +2023-02-09 01:24:06,018 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 01:24:12,516 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226403.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:24:22,929 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.399e+02 2.293e+02 2.936e+02 3.721e+02 6.222e+02, threshold=5.872e+02, percent-clipped=1.0 +2023-02-09 01:24:25,776 INFO [train.py:901] (3/4) Epoch 29, batch 100, loss[loss=0.1612, simple_loss=0.2382, pruned_loss=0.04215, over 8036.00 frames. ], tot_loss[loss=0.201, simple_loss=0.2852, pruned_loss=0.05841, over 646663.15 frames. ], batch size: 22, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:24:30,616 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 01:24:33,810 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3404, 2.0482, 2.5304, 2.1533, 2.4782, 2.3456, 2.2185, 1.4171], + device='cuda:3'), covar=tensor([0.5481, 0.4832, 0.2208, 0.4135, 0.2525, 0.3131, 0.1898, 0.5395], + device='cuda:3'), in_proj_covar=tensor([0.0969, 0.1034, 0.0840, 0.1003, 0.1030, 0.0940, 0.0776, 0.0858], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:25:02,686 INFO [train.py:901] (3/4) Epoch 29, batch 150, loss[loss=0.1757, simple_loss=0.2646, pruned_loss=0.04338, over 8256.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.283, pruned_loss=0.05701, over 861909.54 frames. ], batch size: 24, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:25:34,578 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.592e+02 2.438e+02 2.916e+02 4.111e+02 7.524e+02, threshold=5.832e+02, percent-clipped=2.0 +2023-02-09 01:25:35,534 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226518.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:25:37,497 INFO [train.py:901] (3/4) Epoch 29, batch 200, loss[loss=0.2152, simple_loss=0.2947, pruned_loss=0.06783, over 8444.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2835, pruned_loss=0.05765, over 1033869.09 frames. ], batch size: 29, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:12,528 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5851, 1.6624, 2.0940, 1.6425, 0.9810, 1.6717, 2.1241, 2.2030], + device='cuda:3'), covar=tensor([0.0487, 0.1176, 0.1516, 0.1380, 0.0602, 0.1396, 0.0650, 0.0581], + device='cuda:3'), in_proj_covar=tensor([0.0099, 0.0153, 0.0189, 0.0161, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 01:26:12,614 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4069, 2.3460, 3.0723, 2.4468, 2.9403, 2.5693, 2.4642, 2.0684], + device='cuda:3'), covar=tensor([0.5876, 0.5170, 0.2132, 0.4836, 0.3128, 0.3160, 0.1856, 0.5815], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1030, 0.0837, 0.0999, 0.1025, 0.0935, 0.0773, 0.0852], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:26:13,673 INFO [train.py:901] (3/4) Epoch 29, batch 250, loss[loss=0.2481, simple_loss=0.3333, pruned_loss=0.08142, over 8506.00 frames. ], tot_loss[loss=0.2005, simple_loss=0.2847, pruned_loss=0.05819, over 1169184.76 frames. ], batch size: 26, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:26,060 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 01:26:31,160 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:31,310 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:33,827 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 01:26:46,401 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.514e+02 3.003e+02 3.645e+02 8.891e+02, threshold=6.006e+02, percent-clipped=9.0 +2023-02-09 01:26:48,784 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226620.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:26:49,255 INFO [train.py:901] (3/4) Epoch 29, batch 300, loss[loss=0.2028, simple_loss=0.2794, pruned_loss=0.06311, over 7926.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.284, pruned_loss=0.05806, over 1269378.88 frames. ], batch size: 20, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:26:54,305 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226628.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:12,772 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226653.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:25,799 INFO [train.py:901] (3/4) Epoch 29, batch 350, loss[loss=0.19, simple_loss=0.286, pruned_loss=0.04699, over 8467.00 frames. ], tot_loss[loss=0.2006, simple_loss=0.2846, pruned_loss=0.05829, over 1347240.17 frames. ], batch size: 25, lr: 2.62e-03, grad_scale: 16.0 +2023-02-09 01:27:45,005 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2254, 2.6196, 2.7870, 1.5549, 3.2960, 1.8669, 1.5887, 2.2208], + device='cuda:3'), covar=tensor([0.0921, 0.0477, 0.0385, 0.1027, 0.0514, 0.1071, 0.1055, 0.0738], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0418, 0.0372, 0.0466, 0.0401, 0.0559, 0.0408, 0.0447], + device='cuda:3'), out_proj_covar=tensor([1.2785e-04, 1.0831e-04, 9.7018e-05, 1.2160e-04, 1.0475e-04, 1.5584e-04, + 1.0894e-04, 1.1705e-04], device='cuda:3') +2023-02-09 01:27:46,744 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4178, 1.6234, 2.0476, 1.3625, 1.4244, 1.6989, 1.4793, 1.4819], + device='cuda:3'), covar=tensor([0.2012, 0.2677, 0.1151, 0.4678, 0.2211, 0.3539, 0.2624, 0.2318], + device='cuda:3'), in_proj_covar=tensor([0.0540, 0.0637, 0.0562, 0.0671, 0.0664, 0.0614, 0.0564, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:27:54,192 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=226710.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:27:58,935 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.666e+02 2.400e+02 2.862e+02 3.557e+02 6.632e+02, threshold=5.725e+02, percent-clipped=2.0 +2023-02-09 01:28:01,716 INFO [train.py:901] (3/4) Epoch 29, batch 400, loss[loss=0.1489, simple_loss=0.2251, pruned_loss=0.03636, over 7203.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05659, over 1405216.33 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:37,562 INFO [train.py:901] (3/4) Epoch 29, batch 450, loss[loss=0.2331, simple_loss=0.3137, pruned_loss=0.0763, over 8499.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2818, pruned_loss=0.05688, over 1453364.50 frames. ], batch size: 26, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:28:38,565 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.19 vs. limit=5.0 +2023-02-09 01:28:39,871 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226774.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:28:53,563 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6752, 2.1627, 3.5492, 1.8182, 1.7358, 3.5356, 0.5984, 2.2267], + device='cuda:3'), covar=tensor([0.1203, 0.1227, 0.0204, 0.1380, 0.2313, 0.0353, 0.2008, 0.1134], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0208, 0.0138, 0.0224, 0.0280, 0.0148, 0.0174, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 01:28:56,591 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-09 01:28:58,207 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226799.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:29:11,123 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.449e+02 2.964e+02 3.856e+02 9.700e+02, threshold=5.929e+02, percent-clipped=9.0 +2023-02-09 01:29:13,785 INFO [train.py:901] (3/4) Epoch 29, batch 500, loss[loss=0.2069, simple_loss=0.3035, pruned_loss=0.05512, over 8096.00 frames. ], tot_loss[loss=0.2003, simple_loss=0.2839, pruned_loss=0.05829, over 1493689.32 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:48,207 INFO [train.py:901] (3/4) Epoch 29, batch 550, loss[loss=0.1894, simple_loss=0.276, pruned_loss=0.05142, over 8461.00 frames. ], tot_loss[loss=0.2012, simple_loss=0.2845, pruned_loss=0.05901, over 1521808.05 frames. ], batch size: 27, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:29:51,494 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.25 vs. limit=2.0 +2023-02-09 01:30:21,915 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.374e+02 2.486e+02 3.156e+02 4.092e+02 1.034e+03, threshold=6.313e+02, percent-clipped=6.0 +2023-02-09 01:30:24,629 INFO [train.py:901] (3/4) Epoch 29, batch 600, loss[loss=0.1813, simple_loss=0.2622, pruned_loss=0.05027, over 7202.00 frames. ], tot_loss[loss=0.2009, simple_loss=0.284, pruned_loss=0.05887, over 1538431.20 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:30:28,544 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.16 vs. limit=5.0 +2023-02-09 01:30:35,115 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2652, 1.5441, 3.4314, 1.5177, 2.4533, 3.7300, 3.8785, 3.2660], + device='cuda:3'), covar=tensor([0.1025, 0.1875, 0.0365, 0.2214, 0.1173, 0.0242, 0.0534, 0.0488], + device='cuda:3'), in_proj_covar=tensor([0.0310, 0.0327, 0.0295, 0.0325, 0.0327, 0.0278, 0.0444, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 01:30:38,689 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3377, 2.2167, 2.7488, 2.4044, 2.7458, 2.3821, 2.2299, 1.7625], + device='cuda:3'), covar=tensor([0.5493, 0.5182, 0.2149, 0.3754, 0.2536, 0.3249, 0.1917, 0.5201], + device='cuda:3'), in_proj_covar=tensor([0.0965, 0.1033, 0.0838, 0.0999, 0.1026, 0.0936, 0.0772, 0.0852], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:30:43,053 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 01:30:56,508 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=226966.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:30:59,759 INFO [train.py:901] (3/4) Epoch 29, batch 650, loss[loss=0.1866, simple_loss=0.2604, pruned_loss=0.05641, over 7667.00 frames. ], tot_loss[loss=0.2, simple_loss=0.283, pruned_loss=0.05853, over 1557331.46 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:13,841 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=226991.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:25,429 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227007.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:31:32,762 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.693e+02 2.425e+02 2.942e+02 3.859e+02 6.314e+02, threshold=5.885e+02, percent-clipped=1.0 +2023-02-09 01:31:36,256 INFO [train.py:901] (3/4) Epoch 29, batch 700, loss[loss=0.1828, simple_loss=0.269, pruned_loss=0.04833, over 8132.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2818, pruned_loss=0.05755, over 1566709.54 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:31:56,846 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9297, 2.1983, 1.8084, 3.0759, 2.0406, 1.9062, 2.2135, 2.3798], + device='cuda:3'), covar=tensor([0.1250, 0.1099, 0.1527, 0.0431, 0.1025, 0.1542, 0.0859, 0.0888], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0245, 0.0214, 0.0204, 0.0249, 0.0251, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 01:32:12,699 INFO [train.py:901] (3/4) Epoch 29, batch 750, loss[loss=0.2001, simple_loss=0.293, pruned_loss=0.05363, over 8311.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2809, pruned_loss=0.05737, over 1575170.98 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:32:31,397 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 01:32:40,262 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 01:32:44,348 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.547e+02 3.055e+02 4.023e+02 1.198e+03, threshold=6.109e+02, percent-clipped=3.0 +2023-02-09 01:32:47,755 INFO [train.py:901] (3/4) Epoch 29, batch 800, loss[loss=0.169, simple_loss=0.2503, pruned_loss=0.04387, over 7920.00 frames. ], tot_loss[loss=0.1989, simple_loss=0.2819, pruned_loss=0.05795, over 1582414.66 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 16.0 +2023-02-09 01:33:24,592 INFO [train.py:901] (3/4) Epoch 29, batch 850, loss[loss=0.1708, simple_loss=0.2419, pruned_loss=0.04991, over 7552.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2813, pruned_loss=0.0576, over 1587660.51 frames. ], batch size: 18, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:33:57,024 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.463e+02 2.886e+02 3.949e+02 8.845e+02, threshold=5.773e+02, percent-clipped=3.0 +2023-02-09 01:33:59,146 INFO [train.py:901] (3/4) Epoch 29, batch 900, loss[loss=0.2075, simple_loss=0.2856, pruned_loss=0.06474, over 8470.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2803, pruned_loss=0.05721, over 1591229.17 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:34:35,609 INFO [train.py:901] (3/4) Epoch 29, batch 950, loss[loss=0.2124, simple_loss=0.2916, pruned_loss=0.06658, over 8625.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.282, pruned_loss=0.0582, over 1602537.10 frames. ], batch size: 31, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:04,851 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 01:35:08,996 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.642e+02 2.581e+02 3.033e+02 3.905e+02 1.035e+03, threshold=6.066e+02, percent-clipped=4.0 +2023-02-09 01:35:11,166 INFO [train.py:901] (3/4) Epoch 29, batch 1000, loss[loss=0.2059, simple_loss=0.2843, pruned_loss=0.06378, over 7969.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2823, pruned_loss=0.05803, over 1606033.10 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:32,312 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:35:39,814 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 01:35:47,242 INFO [train.py:901] (3/4) Epoch 29, batch 1050, loss[loss=0.2824, simple_loss=0.3549, pruned_loss=0.105, over 8598.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2824, pruned_loss=0.05796, over 1608084.84 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:35:52,703 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 01:36:22,043 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.565e+02 2.373e+02 3.020e+02 3.651e+02 1.051e+03, threshold=6.040e+02, percent-clipped=1.0 +2023-02-09 01:36:23,013 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4810, 1.9876, 3.0609, 1.7259, 1.6753, 3.0068, 0.9627, 2.1739], + device='cuda:3'), covar=tensor([0.1298, 0.1212, 0.0245, 0.1177, 0.2153, 0.0314, 0.1762, 0.1132], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0207, 0.0137, 0.0224, 0.0278, 0.0147, 0.0173, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 01:36:24,309 INFO [train.py:901] (3/4) Epoch 29, batch 1100, loss[loss=0.183, simple_loss=0.2707, pruned_loss=0.04765, over 8462.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2813, pruned_loss=0.05725, over 1606817.24 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:36:30,382 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227429.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:56,362 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227466.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:36:59,078 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7459, 1.4991, 1.7733, 1.3948, 0.9592, 1.5256, 1.6187, 1.5775], + device='cuda:3'), covar=tensor([0.0554, 0.1206, 0.1637, 0.1457, 0.0582, 0.1450, 0.0718, 0.0667], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0163, 0.0102, 0.0164, 0.0114, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 01:36:59,552 INFO [train.py:901] (3/4) Epoch 29, batch 1150, loss[loss=0.1973, simple_loss=0.2664, pruned_loss=0.06406, over 7434.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2795, pruned_loss=0.05664, over 1605842.14 frames. ], batch size: 17, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:06,449 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 01:37:34,273 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.484e+02 2.320e+02 2.698e+02 3.625e+02 7.425e+02, threshold=5.396e+02, percent-clipped=3.0 +2023-02-09 01:37:36,426 INFO [train.py:901] (3/4) Epoch 29, batch 1200, loss[loss=0.1361, simple_loss=0.2188, pruned_loss=0.02668, over 7421.00 frames. ], tot_loss[loss=0.1948, simple_loss=0.2783, pruned_loss=0.05572, over 1608458.91 frames. ], batch size: 17, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:37:53,821 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9843, 2.0883, 1.7566, 2.7312, 1.2514, 1.6200, 1.9197, 2.1688], + device='cuda:3'), covar=tensor([0.0719, 0.0786, 0.0928, 0.0353, 0.1139, 0.1337, 0.0852, 0.0795], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0195, 0.0244, 0.0213, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 01:38:09,059 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0715, 1.4765, 1.7312, 1.3919, 1.0189, 1.5103, 1.8689, 1.7231], + device='cuda:3'), covar=tensor([0.0541, 0.1285, 0.1680, 0.1511, 0.0620, 0.1508, 0.0702, 0.0634], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0190, 0.0162, 0.0102, 0.0163, 0.0113, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 01:38:11,539 INFO [train.py:901] (3/4) Epoch 29, batch 1250, loss[loss=0.1985, simple_loss=0.2856, pruned_loss=0.0557, over 8103.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2788, pruned_loss=0.05587, over 1613091.10 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:38:13,783 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4977, 2.3458, 1.8142, 2.1604, 1.9762, 1.5795, 1.9447, 1.9519], + device='cuda:3'), covar=tensor([0.1425, 0.0493, 0.1171, 0.0617, 0.0782, 0.1547, 0.1000, 0.0980], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0246, 0.0344, 0.0314, 0.0303, 0.0349, 0.0351, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 01:38:19,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6009, 2.0620, 3.1442, 1.5097, 2.3864, 2.0395, 1.7558, 2.3702], + device='cuda:3'), covar=tensor([0.1985, 0.2675, 0.0886, 0.4868, 0.2074, 0.3551, 0.2590, 0.2408], + device='cuda:3'), in_proj_covar=tensor([0.0539, 0.0639, 0.0564, 0.0672, 0.0667, 0.0617, 0.0565, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:38:46,454 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 2.398e+02 2.828e+02 3.393e+02 7.704e+02, threshold=5.657e+02, percent-clipped=4.0 +2023-02-09 01:38:48,670 INFO [train.py:901] (3/4) Epoch 29, batch 1300, loss[loss=0.2033, simple_loss=0.2821, pruned_loss=0.06222, over 8238.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2789, pruned_loss=0.05565, over 1608240.56 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:24,555 INFO [train.py:901] (3/4) Epoch 29, batch 1350, loss[loss=0.1712, simple_loss=0.249, pruned_loss=0.0467, over 7435.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2781, pruned_loss=0.05519, over 1605185.38 frames. ], batch size: 17, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:39:52,810 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227711.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:39:58,202 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.335e+02 2.785e+02 3.650e+02 1.055e+03, threshold=5.570e+02, percent-clipped=4.0 +2023-02-09 01:40:00,379 INFO [train.py:901] (3/4) Epoch 29, batch 1400, loss[loss=0.2634, simple_loss=0.3418, pruned_loss=0.09253, over 8360.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2792, pruned_loss=0.05581, over 1610138.09 frames. ], batch size: 24, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:01,311 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=227722.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:20,607 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=227747.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:38,114 INFO [train.py:901] (3/4) Epoch 29, batch 1450, loss[loss=0.2238, simple_loss=0.3128, pruned_loss=0.06742, over 8470.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2795, pruned_loss=0.0554, over 1613677.53 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:40:39,494 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=227773.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:40:47,996 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 01:41:11,911 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.751e+02 2.369e+02 2.795e+02 3.434e+02 1.018e+03, threshold=5.589e+02, percent-clipped=3.0 +2023-02-09 01:41:14,106 INFO [train.py:901] (3/4) Epoch 29, batch 1500, loss[loss=0.1793, simple_loss=0.2824, pruned_loss=0.03809, over 8324.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2797, pruned_loss=0.05546, over 1616061.13 frames. ], batch size: 25, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:41:32,536 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=227845.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:41:40,143 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.8341, 6.0642, 5.0794, 2.5640, 5.2300, 5.7161, 5.5229, 5.5039], + device='cuda:3'), covar=tensor([0.0491, 0.0335, 0.0931, 0.4437, 0.0780, 0.0622, 0.0956, 0.0641], + device='cuda:3'), in_proj_covar=tensor([0.0545, 0.0458, 0.0456, 0.0563, 0.0446, 0.0470, 0.0442, 0.0414], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:41:51,482 INFO [train.py:901] (3/4) Epoch 29, batch 1550, loss[loss=0.1892, simple_loss=0.2767, pruned_loss=0.05088, over 8354.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2794, pruned_loss=0.05555, over 1617643.84 frames. ], batch size: 24, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:04,416 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=227888.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:42:25,539 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 2.452e+02 3.275e+02 4.864e+02 1.208e+03, threshold=6.551e+02, percent-clipped=17.0 +2023-02-09 01:42:27,688 INFO [train.py:901] (3/4) Epoch 29, batch 1600, loss[loss=0.1751, simple_loss=0.2562, pruned_loss=0.04699, over 7770.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2797, pruned_loss=0.05551, over 1615827.29 frames. ], batch size: 19, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:42:27,878 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4076, 1.3941, 1.3762, 1.8373, 0.6174, 1.2617, 1.3238, 1.4971], + device='cuda:3'), covar=tensor([0.1000, 0.0835, 0.1205, 0.0531, 0.1215, 0.1479, 0.0733, 0.0789], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0213, 0.0204, 0.0247, 0.0251, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 01:43:04,965 INFO [train.py:901] (3/4) Epoch 29, batch 1650, loss[loss=0.2033, simple_loss=0.291, pruned_loss=0.05776, over 8038.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2791, pruned_loss=0.05553, over 1610620.91 frames. ], batch size: 22, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:39,886 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.526e+02 2.401e+02 2.687e+02 3.396e+02 6.045e+02, threshold=5.374e+02, percent-clipped=0.0 +2023-02-09 01:43:42,091 INFO [train.py:901] (3/4) Epoch 29, batch 1700, loss[loss=0.1778, simple_loss=0.2579, pruned_loss=0.04885, over 7232.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.278, pruned_loss=0.05484, over 1606840.55 frames. ], batch size: 16, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:43:42,959 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4175, 4.3993, 3.9918, 2.0595, 3.9061, 4.0311, 3.9189, 3.8397], + device='cuda:3'), covar=tensor([0.0697, 0.0503, 0.0947, 0.4812, 0.0870, 0.0940, 0.1238, 0.0817], + device='cuda:3'), in_proj_covar=tensor([0.0548, 0.0461, 0.0457, 0.0567, 0.0447, 0.0472, 0.0445, 0.0415], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:43:44,503 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9568, 1.4315, 4.1786, 1.8199, 2.5600, 4.6852, 4.8966, 4.1334], + device='cuda:3'), covar=tensor([0.1406, 0.2127, 0.0306, 0.2173, 0.1172, 0.0221, 0.0564, 0.0546], + device='cuda:3'), in_proj_covar=tensor([0.0312, 0.0329, 0.0298, 0.0327, 0.0329, 0.0280, 0.0450, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 01:43:45,212 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228025.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:06,284 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228055.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:18,101 INFO [train.py:901] (3/4) Epoch 29, batch 1750, loss[loss=0.19, simple_loss=0.2754, pruned_loss=0.05229, over 8079.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2796, pruned_loss=0.0557, over 1610423.76 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:44:28,146 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0430, 1.8765, 2.2583, 1.9739, 2.2971, 2.1625, 2.0349, 1.1759], + device='cuda:3'), covar=tensor([0.6227, 0.5114, 0.2262, 0.3921, 0.2645, 0.3478, 0.2095, 0.5633], + device='cuda:3'), in_proj_covar=tensor([0.0974, 0.1038, 0.0842, 0.1005, 0.1030, 0.0942, 0.0776, 0.0857], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 01:44:38,679 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:44:52,744 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 2.425e+02 2.925e+02 3.463e+02 6.679e+02, threshold=5.849e+02, percent-clipped=2.0 +2023-02-09 01:44:55,485 INFO [train.py:901] (3/4) Epoch 29, batch 1800, loss[loss=0.2006, simple_loss=0.2813, pruned_loss=0.05989, over 8301.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2796, pruned_loss=0.05596, over 1606725.84 frames. ], batch size: 23, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:11,255 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:12,124 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-09 01:45:28,618 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228169.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,267 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228170.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:45:29,728 INFO [train.py:901] (3/4) Epoch 29, batch 1850, loss[loss=0.2119, simple_loss=0.2964, pruned_loss=0.06366, over 8507.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.0564, over 1611486.22 frames. ], batch size: 39, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:45:42,841 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228189.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:46:03,753 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.509e+02 2.932e+02 3.424e+02 5.958e+02, threshold=5.864e+02, percent-clipped=1.0 +2023-02-09 01:46:05,869 INFO [train.py:901] (3/4) Epoch 29, batch 1900, loss[loss=0.1814, simple_loss=0.2672, pruned_loss=0.0478, over 8670.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.05628, over 1613564.50 frames. ], batch size: 34, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:29,137 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.92 vs. limit=5.0 +2023-02-09 01:46:32,893 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 01:46:38,538 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 01:46:41,414 INFO [train.py:901] (3/4) Epoch 29, batch 1950, loss[loss=0.2092, simple_loss=0.2971, pruned_loss=0.06063, over 8358.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2804, pruned_loss=0.0564, over 1611577.03 frames. ], batch size: 24, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:46:50,972 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 01:47:05,322 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228304.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:10,130 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 01:47:16,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.886e+02 2.422e+02 3.056e+02 3.762e+02 8.552e+02, threshold=6.111e+02, percent-clipped=4.0 +2023-02-09 01:47:18,246 INFO [train.py:901] (3/4) Epoch 29, batch 2000, loss[loss=0.1861, simple_loss=0.278, pruned_loss=0.04711, over 7820.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2799, pruned_loss=0.05609, over 1612381.39 frames. ], batch size: 20, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:47:36,284 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228347.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:39,108 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228351.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:44,567 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8999, 2.0782, 2.1519, 1.5069, 2.3263, 1.5658, 0.7695, 2.0364], + device='cuda:3'), covar=tensor([0.0792, 0.0413, 0.0399, 0.0724, 0.0570, 0.1089, 0.1133, 0.0403], + device='cuda:3'), in_proj_covar=tensor([0.0476, 0.0414, 0.0369, 0.0460, 0.0397, 0.0552, 0.0403, 0.0443], + device='cuda:3'), out_proj_covar=tensor([1.2611e-04, 1.0730e-04, 9.6024e-05, 1.2017e-04, 1.0384e-04, 1.5379e-04, + 1.0767e-04, 1.1589e-04], device='cuda:3') +2023-02-09 01:47:52,203 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228369.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:47:53,547 INFO [train.py:901] (3/4) Epoch 29, batch 2050, loss[loss=0.1833, simple_loss=0.2698, pruned_loss=0.0484, over 8088.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2799, pruned_loss=0.05586, over 1616032.15 frames. ], batch size: 21, lr: 2.61e-03, grad_scale: 8.0 +2023-02-09 01:48:25,963 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.725e+02 2.333e+02 2.866e+02 3.600e+02 5.490e+02, threshold=5.733e+02, percent-clipped=0.0 +2023-02-09 01:48:28,128 INFO [train.py:901] (3/4) Epoch 29, batch 2100, loss[loss=0.174, simple_loss=0.2552, pruned_loss=0.04645, over 7250.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2803, pruned_loss=0.05618, over 1616587.79 frames. ], batch size: 16, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:48:32,344 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:44,591 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228442.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:48,010 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228447.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:48:50,740 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228451.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:48:59,117 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8044, 2.2010, 3.4005, 1.6958, 2.6640, 2.2863, 1.8939, 2.7264], + device='cuda:3'), covar=tensor([0.1913, 0.2627, 0.0838, 0.4629, 0.1760, 0.3197, 0.2453, 0.2092], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0642, 0.0568, 0.0675, 0.0667, 0.0617, 0.0566, 0.0650], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:49:04,308 INFO [train.py:901] (3/4) Epoch 29, batch 2150, loss[loss=0.2264, simple_loss=0.3039, pruned_loss=0.0744, over 8539.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05629, over 1614733.87 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:05,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.7699, 1.5545, 3.9477, 1.5444, 3.4760, 3.3249, 3.5637, 3.4711], + device='cuda:3'), covar=tensor([0.0764, 0.4517, 0.0771, 0.4459, 0.1280, 0.1074, 0.0744, 0.0860], + device='cuda:3'), in_proj_covar=tensor([0.0686, 0.0669, 0.0744, 0.0664, 0.0750, 0.0639, 0.0647, 0.0721], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:49:14,153 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:49:37,700 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.659e+02 3.270e+02 3.951e+02 1.171e+03, threshold=6.540e+02, percent-clipped=10.0 +2023-02-09 01:49:39,906 INFO [train.py:901] (3/4) Epoch 29, batch 2200, loss[loss=0.196, simple_loss=0.2892, pruned_loss=0.05139, over 8597.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05637, over 1617269.82 frames. ], batch size: 34, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:49:47,014 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2532, 2.0045, 4.4438, 2.0736, 3.9882, 3.7435, 4.0374, 3.9465], + device='cuda:3'), covar=tensor([0.0698, 0.3991, 0.0627, 0.3967, 0.0966, 0.0929, 0.0607, 0.0691], + device='cuda:3'), in_proj_covar=tensor([0.0684, 0.0666, 0.0742, 0.0662, 0.0747, 0.0637, 0.0644, 0.0719], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:50:06,275 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228557.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:08,439 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228560.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:16,510 INFO [train.py:901] (3/4) Epoch 29, batch 2250, loss[loss=0.2047, simple_loss=0.2925, pruned_loss=0.05843, over 8244.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2806, pruned_loss=0.05614, over 1618913.40 frames. ], batch size: 24, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:26,598 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228585.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:50:44,246 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8354, 1.7157, 2.7769, 1.3249, 2.4069, 3.0984, 3.2999, 2.3687], + device='cuda:3'), covar=tensor([0.1465, 0.1979, 0.0714, 0.2646, 0.1580, 0.0432, 0.0745, 0.0969], + device='cuda:3'), in_proj_covar=tensor([0.0311, 0.0327, 0.0297, 0.0325, 0.0329, 0.0279, 0.0447, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 01:50:50,353 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.529e+02 2.350e+02 2.877e+02 3.583e+02 6.549e+02, threshold=5.755e+02, percent-clipped=1.0 +2023-02-09 01:50:52,580 INFO [train.py:901] (3/4) Epoch 29, batch 2300, loss[loss=0.2347, simple_loss=0.3115, pruned_loss=0.07897, over 8685.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2796, pruned_loss=0.05524, over 1613873.57 frames. ], batch size: 39, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:50:53,467 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8489, 1.7560, 2.4881, 1.6876, 1.3702, 2.4255, 0.5901, 1.5323], + device='cuda:3'), covar=tensor([0.1365, 0.1280, 0.0296, 0.1030, 0.2354, 0.0384, 0.1856, 0.1158], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0206, 0.0138, 0.0223, 0.0278, 0.0149, 0.0173, 0.0199], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 01:51:06,297 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.68 vs. limit=2.0 +2023-02-09 01:51:22,808 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8563, 1.5627, 4.0109, 1.4350, 3.5961, 3.3373, 3.6141, 3.4980], + device='cuda:3'), covar=tensor([0.0644, 0.4471, 0.0639, 0.4247, 0.1158, 0.1009, 0.0647, 0.0786], + device='cuda:3'), in_proj_covar=tensor([0.0686, 0.0669, 0.0746, 0.0666, 0.0751, 0.0640, 0.0647, 0.0722], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:51:28,782 INFO [train.py:901] (3/4) Epoch 29, batch 2350, loss[loss=0.2202, simple_loss=0.2986, pruned_loss=0.07094, over 8583.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2803, pruned_loss=0.05559, over 1616296.28 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:51:43,130 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:51:45,886 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228695.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:02,331 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.434e+02 3.194e+02 3.873e+02 7.294e+02, threshold=6.388e+02, percent-clipped=4.0 +2023-02-09 01:52:04,282 INFO [train.py:901] (3/4) Epoch 29, batch 2400, loss[loss=0.2204, simple_loss=0.3117, pruned_loss=0.06453, over 8199.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2808, pruned_loss=0.05622, over 1615623.44 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:17,433 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228740.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:34,957 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228765.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:52:39,615 INFO [train.py:901] (3/4) Epoch 29, batch 2450, loss[loss=0.1739, simple_loss=0.2546, pruned_loss=0.04658, over 7961.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2811, pruned_loss=0.05655, over 1615666.64 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:52:55,045 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=228791.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:53:05,517 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228806.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:08,252 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:10,296 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=228813.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:13,508 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.470e+02 2.278e+02 2.630e+02 3.527e+02 5.802e+02, threshold=5.259e+02, percent-clipped=0.0 +2023-02-09 01:53:16,199 INFO [train.py:901] (3/4) Epoch 29, batch 2500, loss[loss=0.2011, simple_loss=0.2768, pruned_loss=0.06271, over 6807.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2815, pruned_loss=0.05648, over 1615032.10 frames. ], batch size: 15, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:53:28,181 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=228838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:53:31,516 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5363, 2.4528, 1.8300, 2.2911, 2.1480, 1.5360, 2.0604, 2.1358], + device='cuda:3'), covar=tensor([0.1513, 0.0492, 0.1269, 0.0598, 0.0706, 0.1634, 0.0965, 0.1020], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0244, 0.0343, 0.0315, 0.0302, 0.0349, 0.0350, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 01:53:51,014 INFO [train.py:901] (3/4) Epoch 29, batch 2550, loss[loss=0.2023, simple_loss=0.2876, pruned_loss=0.05848, over 8459.00 frames. ], tot_loss[loss=0.198, simple_loss=0.282, pruned_loss=0.05696, over 1614060.80 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:05,689 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.07 vs. limit=5.0 +2023-02-09 01:54:17,018 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=228906.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:54:25,705 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 2.320e+02 2.761e+02 3.420e+02 6.403e+02, threshold=5.523e+02, percent-clipped=2.0 +2023-02-09 01:54:27,841 INFO [train.py:901] (3/4) Epoch 29, batch 2600, loss[loss=0.2336, simple_loss=0.3136, pruned_loss=0.07679, over 8687.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2811, pruned_loss=0.05657, over 1615783.28 frames. ], batch size: 34, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:54:51,311 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=228953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:55:03,876 INFO [train.py:901] (3/4) Epoch 29, batch 2650, loss[loss=0.1795, simple_loss=0.2524, pruned_loss=0.05332, over 7782.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.05679, over 1615105.72 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:55:12,101 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.75 vs. limit=2.0 +2023-02-09 01:55:37,355 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.671e+02 2.477e+02 2.922e+02 3.574e+02 8.790e+02, threshold=5.845e+02, percent-clipped=2.0 +2023-02-09 01:55:39,297 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.3266, 1.2804, 3.4071, 0.9785, 3.0514, 2.8142, 3.1116, 3.0241], + device='cuda:3'), covar=tensor([0.0693, 0.4460, 0.0792, 0.4461, 0.1221, 0.1147, 0.0690, 0.0779], + device='cuda:3'), in_proj_covar=tensor([0.0682, 0.0663, 0.0742, 0.0662, 0.0744, 0.0636, 0.0641, 0.0716], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:55:39,870 INFO [train.py:901] (3/4) Epoch 29, batch 2700, loss[loss=0.1928, simple_loss=0.2798, pruned_loss=0.05287, over 7814.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2798, pruned_loss=0.05634, over 1612190.97 frames. ], batch size: 20, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:09,693 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.2323, 2.0329, 5.4105, 2.5623, 4.9177, 4.5744, 4.9485, 4.8750], + device='cuda:3'), covar=tensor([0.0522, 0.4397, 0.0448, 0.3801, 0.0953, 0.0878, 0.0512, 0.0553], + device='cuda:3'), in_proj_covar=tensor([0.0680, 0.0662, 0.0740, 0.0661, 0.0743, 0.0635, 0.0640, 0.0715], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 01:56:11,148 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229062.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:14,634 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229066.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:17,775 INFO [train.py:901] (3/4) Epoch 29, batch 2750, loss[loss=0.1941, simple_loss=0.2722, pruned_loss=0.058, over 8089.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2791, pruned_loss=0.05587, over 1613000.86 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:29,445 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229087.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:32,358 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229091.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:56:51,299 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.676e+02 2.406e+02 2.857e+02 3.824e+02 7.570e+02, threshold=5.715e+02, percent-clipped=1.0 +2023-02-09 01:56:53,448 INFO [train.py:901] (3/4) Epoch 29, batch 2800, loss[loss=0.1607, simple_loss=0.244, pruned_loss=0.03871, over 8095.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2796, pruned_loss=0.05617, over 1605576.43 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 01:56:57,317 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.63 vs. limit=2.0 +2023-02-09 01:57:24,974 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229162.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 01:57:31,032 INFO [train.py:901] (3/4) Epoch 29, batch 2850, loss[loss=0.1825, simple_loss=0.2685, pruned_loss=0.04825, over 8093.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2803, pruned_loss=0.05633, over 1608699.98 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:57:43,240 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229187.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 01:58:05,224 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.341e+02 2.443e+02 3.095e+02 3.828e+02 9.615e+02, threshold=6.189e+02, percent-clipped=4.0 +2023-02-09 01:58:07,410 INFO [train.py:901] (3/4) Epoch 29, batch 2900, loss[loss=0.2254, simple_loss=0.2996, pruned_loss=0.07562, over 8440.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.28, pruned_loss=0.05625, over 1607562.12 frames. ], batch size: 27, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:15,665 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-09 01:58:44,220 INFO [train.py:901] (3/4) Epoch 29, batch 2950, loss[loss=0.1642, simple_loss=0.2384, pruned_loss=0.04503, over 7208.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05655, over 1609947.25 frames. ], batch size: 16, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:58:47,090 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 01:59:02,255 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229297.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:17,278 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.610e+02 2.481e+02 3.005e+02 3.741e+02 9.617e+02, threshold=6.010e+02, percent-clipped=3.0 +2023-02-09 01:59:17,409 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229318.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 01:59:19,282 INFO [train.py:901] (3/4) Epoch 29, batch 3000, loss[loss=0.1983, simple_loss=0.2958, pruned_loss=0.05035, over 8198.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2807, pruned_loss=0.05633, over 1610017.50 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 01:59:19,282 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 01:59:34,610 INFO [train.py:935] (3/4) Epoch 29, validation: loss=0.17, simple_loss=0.2699, pruned_loss=0.03504, over 944034.00 frames. +2023-02-09 01:59:34,611 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6473MB +2023-02-09 02:00:09,406 INFO [train.py:901] (3/4) Epoch 29, batch 3050, loss[loss=0.2106, simple_loss=0.2902, pruned_loss=0.06555, over 8347.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2801, pruned_loss=0.05603, over 1610431.45 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:00:24,138 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3325, 3.5833, 2.5138, 3.1379, 3.0727, 2.0210, 2.9668, 3.2414], + device='cuda:3'), covar=tensor([0.1685, 0.0436, 0.1206, 0.0746, 0.0732, 0.1682, 0.1022, 0.1076], + device='cuda:3'), in_proj_covar=tensor([0.0354, 0.0243, 0.0342, 0.0313, 0.0300, 0.0348, 0.0349, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 02:00:40,613 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:00:44,471 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.402e+02 2.505e+02 2.815e+02 3.570e+02 7.212e+02, threshold=5.630e+02, percent-clipped=4.0 +2023-02-09 02:00:46,509 INFO [train.py:901] (3/4) Epoch 29, batch 3100, loss[loss=0.2184, simple_loss=0.3056, pruned_loss=0.06558, over 8506.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2805, pruned_loss=0.05651, over 1609162.67 frames. ], batch size: 28, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:15,193 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229461.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:21,908 INFO [train.py:901] (3/4) Epoch 29, batch 3150, loss[loss=0.1918, simple_loss=0.2852, pruned_loss=0.0492, over 8474.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05695, over 1608961.18 frames. ], batch size: 25, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:01:24,683 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-09 02:01:39,335 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:01:56,480 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.645e+02 2.412e+02 3.144e+02 3.923e+02 1.015e+03, threshold=6.289e+02, percent-clipped=11.0 +2023-02-09 02:01:58,593 INFO [train.py:901] (3/4) Epoch 29, batch 3200, loss[loss=0.1792, simple_loss=0.2899, pruned_loss=0.03424, over 8234.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05623, over 1611447.37 frames. ], batch size: 24, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:25,102 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 02:02:35,456 INFO [train.py:901] (3/4) Epoch 29, batch 3250, loss[loss=0.1564, simple_loss=0.2363, pruned_loss=0.03824, over 7424.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2814, pruned_loss=0.05718, over 1610389.19 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:02:52,634 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229595.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:09,578 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.589e+02 2.257e+02 2.746e+02 3.374e+02 9.131e+02, threshold=5.492e+02, percent-clipped=1.0 +2023-02-09 02:03:11,691 INFO [train.py:901] (3/4) Epoch 29, batch 3300, loss[loss=0.2143, simple_loss=0.3106, pruned_loss=0.05899, over 8487.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05645, over 1610663.06 frames. ], batch size: 26, lr: 2.60e-03, grad_scale: 16.0 +2023-02-09 02:03:41,141 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229662.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:45,541 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=229668.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:03:47,460 INFO [train.py:901] (3/4) Epoch 29, batch 3350, loss[loss=0.1882, simple_loss=0.2797, pruned_loss=0.0484, over 8544.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2809, pruned_loss=0.05673, over 1611806.59 frames. ], batch size: 49, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:03:52,169 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229677.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:04:03,324 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=229693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:04:20,882 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.561e+02 2.994e+02 3.779e+02 7.703e+02, threshold=5.989e+02, percent-clipped=7.0 +2023-02-09 02:04:22,314 INFO [train.py:901] (3/4) Epoch 29, batch 3400, loss[loss=0.2196, simple_loss=0.2897, pruned_loss=0.07473, over 8443.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2819, pruned_loss=0.05766, over 1612854.54 frames. ], batch size: 27, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:04:59,076 INFO [train.py:901] (3/4) Epoch 29, batch 3450, loss[loss=0.1807, simple_loss=0.2682, pruned_loss=0.04665, over 8031.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2811, pruned_loss=0.05762, over 1604894.72 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:03,519 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229777.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:23,717 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229805.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:25,167 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0712, 2.0917, 1.7993, 2.7638, 1.3180, 1.6991, 1.9189, 2.1374], + device='cuda:3'), covar=tensor([0.0635, 0.0749, 0.0836, 0.0334, 0.1071, 0.1196, 0.0848, 0.0716], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0195, 0.0244, 0.0212, 0.0202, 0.0245, 0.0250, 0.0204], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 02:05:33,259 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.314e+02 2.735e+02 3.470e+02 1.051e+03, threshold=5.470e+02, percent-clipped=3.0 +2023-02-09 02:05:34,613 INFO [train.py:901] (3/4) Epoch 29, batch 3500, loss[loss=0.1907, simple_loss=0.27, pruned_loss=0.05571, over 8095.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2812, pruned_loss=0.05753, over 1605372.52 frames. ], batch size: 21, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:05:34,872 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1266, 1.9681, 2.4617, 2.1138, 2.4687, 2.2356, 2.0980, 1.3966], + device='cuda:3'), covar=tensor([0.6161, 0.5278, 0.2175, 0.4054, 0.2743, 0.3332, 0.1953, 0.5451], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1034, 0.0841, 0.1005, 0.1029, 0.0942, 0.0777, 0.0858], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 02:05:46,951 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229839.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:05:58,496 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 02:06:12,024 INFO [train.py:901] (3/4) Epoch 29, batch 3550, loss[loss=0.1865, simple_loss=0.2651, pruned_loss=0.05391, over 7434.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2798, pruned_loss=0.05633, over 1608511.37 frames. ], batch size: 17, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:06:41,728 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=229912.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:46,512 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.708e+02 2.466e+02 3.015e+02 3.666e+02 8.686e+02, threshold=6.030e+02, percent-clipped=2.0 +2023-02-09 02:06:47,438 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229920.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:06:47,948 INFO [train.py:901] (3/4) Epoch 29, batch 3600, loss[loss=0.1874, simple_loss=0.2706, pruned_loss=0.05211, over 8284.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2809, pruned_loss=0.05713, over 1609442.75 frames. ], batch size: 23, lr: 2.60e-03, grad_scale: 8.0 +2023-02-09 02:07:00,989 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=229939.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:11,758 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=229954.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:07:24,207 INFO [train.py:901] (3/4) Epoch 29, batch 3650, loss[loss=0.2456, simple_loss=0.3152, pruned_loss=0.08798, over 8610.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2791, pruned_loss=0.05624, over 1607757.44 frames. ], batch size: 34, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:07:50,385 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:00,958 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 2.398e+02 2.862e+02 3.372e+02 7.881e+02, threshold=5.724e+02, percent-clipped=3.0 +2023-02-09 02:08:01,709 INFO [train.py:901] (3/4) Epoch 29, batch 3700, loss[loss=0.169, simple_loss=0.2581, pruned_loss=0.03997, over 8244.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2777, pruned_loss=0.05524, over 1606943.44 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:08:02,485 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230021.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 02:08:06,536 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:08:10,859 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230033.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:25,153 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230054.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:28,458 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230058.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:08:37,283 INFO [train.py:901] (3/4) Epoch 29, batch 3750, loss[loss=0.1863, simple_loss=0.2617, pruned_loss=0.05548, over 7657.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2776, pruned_loss=0.05564, over 1601777.38 frames. ], batch size: 19, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:13,034 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.469e+02 3.110e+02 3.966e+02 1.066e+03, threshold=6.219e+02, percent-clipped=4.0 +2023-02-09 02:09:13,774 INFO [train.py:901] (3/4) Epoch 29, batch 3800, loss[loss=0.1976, simple_loss=0.281, pruned_loss=0.05706, over 8246.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2783, pruned_loss=0.05593, over 1605963.22 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:24,875 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230136.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:09:29,249 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5869, 2.0660, 2.9040, 1.4102, 2.1468, 1.8591, 1.8128, 2.1704], + device='cuda:3'), covar=tensor([0.2248, 0.2943, 0.1288, 0.5485, 0.2493, 0.4067, 0.2813, 0.2937], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0641, 0.0567, 0.0675, 0.0668, 0.0616, 0.0567, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:09:40,095 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.15 vs. limit=2.0 +2023-02-09 02:09:47,386 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230168.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:09:49,383 INFO [train.py:901] (3/4) Epoch 29, batch 3850, loss[loss=0.1713, simple_loss=0.2616, pruned_loss=0.04052, over 8016.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2793, pruned_loss=0.0563, over 1608035.83 frames. ], batch size: 22, lr: 2.60e-03, grad_scale: 4.0 +2023-02-09 02:09:53,096 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230176.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:10,990 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:12,892 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 02:10:17,565 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230210.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:24,394 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.322e+02 2.511e+02 3.297e+02 3.973e+02 1.066e+03, threshold=6.594e+02, percent-clipped=6.0 +2023-02-09 02:10:25,128 INFO [train.py:901] (3/4) Epoch 29, batch 3900, loss[loss=0.1921, simple_loss=0.2635, pruned_loss=0.06037, over 7791.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.28, pruned_loss=0.05631, over 1611232.48 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:10:36,234 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230235.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:51,873 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230256.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:10:54,165 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:02,680 INFO [train.py:901] (3/4) Epoch 29, batch 3950, loss[loss=0.1587, simple_loss=0.2436, pruned_loss=0.03692, over 8248.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2802, pruned_loss=0.05622, over 1611644.92 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 4.0 +2023-02-09 02:11:15,909 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7005, 2.5371, 1.8461, 2.2974, 2.2375, 1.5740, 2.1600, 2.2911], + device='cuda:3'), covar=tensor([0.1530, 0.0509, 0.1382, 0.0687, 0.0755, 0.1689, 0.1034, 0.0947], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0246, 0.0347, 0.0317, 0.0305, 0.0351, 0.0354, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 02:11:30,500 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230310.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:37,967 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.749e+02 2.564e+02 3.192e+02 4.107e+02 9.729e+02, threshold=6.384e+02, percent-clipped=2.0 +2023-02-09 02:11:38,727 INFO [train.py:901] (3/4) Epoch 29, batch 4000, loss[loss=0.1711, simple_loss=0.2503, pruned_loss=0.046, over 7967.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2792, pruned_loss=0.05603, over 1606652.72 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:11:43,478 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5234, 2.2756, 2.7777, 2.3831, 2.7847, 2.4213, 2.3908, 2.0531], + device='cuda:3'), covar=tensor([0.4125, 0.4218, 0.1847, 0.3354, 0.2129, 0.2797, 0.1633, 0.4146], + device='cuda:3'), in_proj_covar=tensor([0.0963, 0.1032, 0.0837, 0.1002, 0.1027, 0.0937, 0.0774, 0.0854], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 02:11:50,444 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230335.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:11:59,739 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230349.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:16,076 INFO [train.py:901] (3/4) Epoch 29, batch 4050, loss[loss=0.1939, simple_loss=0.275, pruned_loss=0.05643, over 7968.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2796, pruned_loss=0.05605, over 1610772.24 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:12:16,260 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230371.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:12:31,107 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230392.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 02:12:48,557 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230417.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 02:12:50,317 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 2.354e+02 2.823e+02 3.644e+02 9.834e+02, threshold=5.645e+02, percent-clipped=2.0 +2023-02-09 02:12:51,008 INFO [train.py:901] (3/4) Epoch 29, batch 4100, loss[loss=0.189, simple_loss=0.2749, pruned_loss=0.05159, over 8138.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2798, pruned_loss=0.05632, over 1614167.90 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:22,439 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230464.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:26,983 INFO [train.py:901] (3/4) Epoch 29, batch 4150, loss[loss=0.2095, simple_loss=0.2971, pruned_loss=0.06097, over 8480.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2815, pruned_loss=0.0572, over 1614574.93 frames. ], batch size: 29, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:13:38,474 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230486.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:13:56,275 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230512.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:01,809 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.471e+02 3.120e+02 4.398e+02 1.014e+03, threshold=6.241e+02, percent-clipped=11.0 +2023-02-09 02:14:02,509 INFO [train.py:901] (3/4) Epoch 29, batch 4200, loss[loss=0.1872, simple_loss=0.276, pruned_loss=0.04923, over 8194.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2807, pruned_loss=0.05663, over 1612680.03 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:16,934 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 02:14:17,778 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230543.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:24,144 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:14:28,484 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8239, 1.4330, 4.0129, 1.4423, 3.6030, 3.3801, 3.6704, 3.5493], + device='cuda:3'), covar=tensor([0.0648, 0.4590, 0.0648, 0.4300, 0.1100, 0.0966, 0.0609, 0.0731], + device='cuda:3'), in_proj_covar=tensor([0.0682, 0.0664, 0.0744, 0.0659, 0.0746, 0.0638, 0.0642, 0.0718], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:14:38,390 INFO [train.py:901] (3/4) Epoch 29, batch 4250, loss[loss=0.1939, simple_loss=0.2745, pruned_loss=0.05666, over 8701.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2819, pruned_loss=0.05754, over 1616130.18 frames. ], batch size: 39, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:14:41,222 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 02:15:01,588 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:09,498 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.38 vs. limit=5.0 +2023-02-09 02:15:13,975 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.550e+02 2.532e+02 3.108e+02 3.832e+02 7.900e+02, threshold=6.217e+02, percent-clipped=4.0 +2023-02-09 02:15:14,728 INFO [train.py:901] (3/4) Epoch 29, batch 4300, loss[loss=0.1962, simple_loss=0.2784, pruned_loss=0.05703, over 8125.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.282, pruned_loss=0.0576, over 1615099.64 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:19,490 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:19,532 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230627.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:37,544 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230652.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:15:51,724 INFO [train.py:901] (3/4) Epoch 29, batch 4350, loss[loss=0.1941, simple_loss=0.2755, pruned_loss=0.05635, over 7814.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2816, pruned_loss=0.05721, over 1617173.55 frames. ], batch size: 20, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:15:51,991 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.2067, 2.0376, 2.6077, 2.1819, 2.6904, 2.2996, 2.1256, 1.5410], + device='cuda:3'), covar=tensor([0.5976, 0.5372, 0.2294, 0.4078, 0.2615, 0.3422, 0.1993, 0.5641], + device='cuda:3'), in_proj_covar=tensor([0.0966, 0.1036, 0.0839, 0.1005, 0.1028, 0.0939, 0.0777, 0.0856], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 02:16:17,533 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 02:16:22,179 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0663, 1.2270, 1.2317, 0.8642, 1.2243, 1.0939, 0.0685, 1.1927], + device='cuda:3'), covar=tensor([0.0525, 0.0482, 0.0409, 0.0632, 0.0505, 0.1063, 0.1030, 0.0385], + device='cuda:3'), in_proj_covar=tensor([0.0480, 0.0417, 0.0372, 0.0466, 0.0401, 0.0557, 0.0408, 0.0447], + device='cuda:3'), out_proj_covar=tensor([1.2716e-04, 1.0794e-04, 9.6898e-05, 1.2189e-04, 1.0498e-04, 1.5509e-04, + 1.0882e-04, 1.1685e-04], device='cuda:3') +2023-02-09 02:16:27,810 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,089 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.602e+02 2.462e+02 3.042e+02 3.743e+02 1.027e+03, threshold=6.085e+02, percent-clipped=1.0 +2023-02-09 02:16:29,340 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230720.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:29,848 INFO [train.py:901] (3/4) Epoch 29, batch 4400, loss[loss=0.1742, simple_loss=0.2627, pruned_loss=0.04283, over 8133.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2819, pruned_loss=0.05776, over 1611815.71 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:16:31,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7118, 2.4337, 1.8222, 2.3516, 2.2576, 1.6503, 2.2386, 2.2081], + device='cuda:3'), covar=tensor([0.1374, 0.0427, 0.1224, 0.0594, 0.0656, 0.1460, 0.0843, 0.0859], + device='cuda:3'), in_proj_covar=tensor([0.0355, 0.0244, 0.0344, 0.0314, 0.0301, 0.0347, 0.0351, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 02:16:47,627 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230745.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:16:57,328 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 02:17:06,600 INFO [train.py:901] (3/4) Epoch 29, batch 4450, loss[loss=0.1607, simple_loss=0.254, pruned_loss=0.03372, over 7971.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2823, pruned_loss=0.05785, over 1612376.64 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:36,254 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230810.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:43,245 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.628e+02 2.373e+02 2.913e+02 3.542e+02 8.795e+02, threshold=5.826e+02, percent-clipped=1.0 +2023-02-09 02:17:43,976 INFO [train.py:901] (3/4) Epoch 29, batch 4500, loss[loss=0.2299, simple_loss=0.3075, pruned_loss=0.07615, over 8466.00 frames. ], tot_loss[loss=0.2008, simple_loss=0.2837, pruned_loss=0.05894, over 1614455.11 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:17:51,397 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230830.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:17:54,893 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 02:18:19,708 INFO [train.py:901] (3/4) Epoch 29, batch 4550, loss[loss=0.205, simple_loss=0.2876, pruned_loss=0.06123, over 8184.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2835, pruned_loss=0.05843, over 1616579.98 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:18:28,417 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:31,626 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230887.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:37,214 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=230895.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,428 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:46,487 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230908.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:18:55,058 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.340e+02 2.935e+02 3.730e+02 9.176e+02, threshold=5.869e+02, percent-clipped=5.0 +2023-02-09 02:18:55,763 INFO [train.py:901] (3/4) Epoch 29, batch 4600, loss[loss=0.2027, simple_loss=0.2799, pruned_loss=0.06279, over 6853.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.283, pruned_loss=0.05793, over 1612488.83 frames. ], batch size: 71, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:04,456 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.2474, 1.8430, 5.4242, 2.5654, 4.9338, 4.6043, 5.0118, 4.8925], + device='cuda:3'), covar=tensor([0.0523, 0.4541, 0.0405, 0.3555, 0.0900, 0.0844, 0.0505, 0.0551], + device='cuda:3'), in_proj_covar=tensor([0.0680, 0.0660, 0.0742, 0.0654, 0.0742, 0.0634, 0.0639, 0.0717], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:19:13,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=230945.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:31,776 INFO [train.py:901] (3/4) Epoch 29, batch 4650, loss[loss=0.2496, simple_loss=0.3167, pruned_loss=0.0913, over 7518.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2826, pruned_loss=0.05802, over 1608355.50 frames. ], batch size: 71, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:19:33,927 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=230974.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:51,179 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=230999.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:53,938 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231002.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:19:59,423 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231010.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:20:06,387 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.812e+02 2.522e+02 3.157e+02 3.845e+02 7.559e+02, threshold=6.314e+02, percent-clipped=7.0 +2023-02-09 02:20:07,122 INFO [train.py:901] (3/4) Epoch 29, batch 4700, loss[loss=0.164, simple_loss=0.2414, pruned_loss=0.04329, over 7803.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2817, pruned_loss=0.05787, over 1608864.32 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:20:43,871 INFO [train.py:901] (3/4) Epoch 29, batch 4750, loss[loss=0.1931, simple_loss=0.2787, pruned_loss=0.05373, over 8104.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2804, pruned_loss=0.05685, over 1608452.41 frames. ], batch size: 23, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:20:56,678 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5360, 1.6363, 4.7599, 1.8081, 4.2183, 3.9965, 4.3155, 4.1760], + device='cuda:3'), covar=tensor([0.0586, 0.4522, 0.0506, 0.4234, 0.1081, 0.0912, 0.0537, 0.0675], + device='cuda:3'), in_proj_covar=tensor([0.0678, 0.0656, 0.0739, 0.0652, 0.0738, 0.0632, 0.0636, 0.0715], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:21:00,626 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 02:21:02,802 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 02:21:11,158 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7952, 1.6682, 2.5220, 1.5319, 1.3950, 2.4511, 0.5183, 1.4908], + device='cuda:3'), covar=tensor([0.1669, 0.1262, 0.0341, 0.1188, 0.2452, 0.0362, 0.1902, 0.1335], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0225, 0.0281, 0.0150, 0.0174, 0.0202], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:21:18,721 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.727e+02 2.440e+02 2.924e+02 3.615e+02 6.392e+02, threshold=5.847e+02, percent-clipped=1.0 +2023-02-09 02:21:19,433 INFO [train.py:901] (3/4) Epoch 29, batch 4800, loss[loss=0.1852, simple_loss=0.2714, pruned_loss=0.04948, over 8043.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2801, pruned_loss=0.05667, over 1611096.63 frames. ], batch size: 22, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:43,702 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231154.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:21:55,944 INFO [train.py:901] (3/4) Epoch 29, batch 4850, loss[loss=0.1625, simple_loss=0.2459, pruned_loss=0.03951, over 7432.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2805, pruned_loss=0.05692, over 1606210.37 frames. ], batch size: 17, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:21:55,953 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 02:22:17,919 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231201.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:31,026 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 2.490e+02 3.133e+02 4.186e+02 8.287e+02, threshold=6.266e+02, percent-clipped=6.0 +2023-02-09 02:22:31,772 INFO [train.py:901] (3/4) Epoch 29, batch 4900, loss[loss=0.1479, simple_loss=0.2369, pruned_loss=0.02941, over 7546.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2809, pruned_loss=0.05736, over 1607586.65 frames. ], batch size: 18, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:22:35,614 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231226.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:40,495 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6794, 2.4514, 3.2269, 2.6831, 3.3150, 2.7209, 2.6000, 2.0839], + device='cuda:3'), covar=tensor([0.5624, 0.5471, 0.2138, 0.4188, 0.2568, 0.3186, 0.1918, 0.5903], + device='cuda:3'), in_proj_covar=tensor([0.0964, 0.1030, 0.0838, 0.1002, 0.1024, 0.0935, 0.0775, 0.0854], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 02:22:54,383 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231252.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:22:59,245 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231258.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:04,603 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231266.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,615 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:06,654 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231269.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:07,919 INFO [train.py:901] (3/4) Epoch 29, batch 4950, loss[loss=0.1972, simple_loss=0.2909, pruned_loss=0.05179, over 8682.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.2813, pruned_loss=0.05777, over 1609380.44 frames. ], batch size: 34, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:17,259 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231283.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:19,709 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-09 02:23:22,763 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231291.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:23:43,160 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.636e+02 2.476e+02 2.910e+02 3.581e+02 7.956e+02, threshold=5.820e+02, percent-clipped=2.0 +2023-02-09 02:23:43,854 INFO [train.py:901] (3/4) Epoch 29, batch 5000, loss[loss=0.2374, simple_loss=0.3186, pruned_loss=0.07814, over 8520.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2818, pruned_loss=0.05816, over 1609368.64 frames. ], batch size: 28, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:23:51,804 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8169, 1.6311, 2.3788, 1.4658, 1.4257, 2.3269, 0.7320, 1.5556], + device='cuda:3'), covar=tensor([0.1536, 0.1201, 0.0359, 0.1158, 0.2270, 0.0439, 0.1832, 0.1317], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0224, 0.0281, 0.0149, 0.0174, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:24:17,060 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231367.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:24:19,688 INFO [train.py:901] (3/4) Epoch 29, batch 5050, loss[loss=0.2246, simple_loss=0.3022, pruned_loss=0.07356, over 7255.00 frames. ], tot_loss[loss=0.1996, simple_loss=0.283, pruned_loss=0.05814, over 1611175.00 frames. ], batch size: 71, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:24:30,653 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.81 vs. limit=5.0 +2023-02-09 02:24:37,786 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 02:24:55,716 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.315e+02 2.860e+02 3.491e+02 8.708e+02, threshold=5.721e+02, percent-clipped=8.0 +2023-02-09 02:24:56,376 INFO [train.py:901] (3/4) Epoch 29, batch 5100, loss[loss=0.229, simple_loss=0.3145, pruned_loss=0.07178, over 8353.00 frames. ], tot_loss[loss=0.1994, simple_loss=0.2825, pruned_loss=0.05811, over 1608761.46 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:25:32,301 INFO [train.py:901] (3/4) Epoch 29, batch 5150, loss[loss=0.2454, simple_loss=0.3257, pruned_loss=0.08257, over 6715.00 frames. ], tot_loss[loss=0.1992, simple_loss=0.2822, pruned_loss=0.05811, over 1603223.06 frames. ], batch size: 72, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:26:08,162 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.585e+02 2.313e+02 2.816e+02 3.592e+02 7.666e+02, threshold=5.632e+02, percent-clipped=6.0 +2023-02-09 02:26:08,942 INFO [train.py:901] (3/4) Epoch 29, batch 5200, loss[loss=0.2207, simple_loss=0.3055, pruned_loss=0.06795, over 8281.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2819, pruned_loss=0.05773, over 1603236.88 frames. ], batch size: 49, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:26:12,091 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231525.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:18,580 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-09 02:26:30,212 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231550.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:26:39,123 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 02:26:44,753 INFO [train.py:901] (3/4) Epoch 29, batch 5250, loss[loss=0.2123, simple_loss=0.3028, pruned_loss=0.06096, over 8355.00 frames. ], tot_loss[loss=0.199, simple_loss=0.2825, pruned_loss=0.05778, over 1608187.53 frames. ], batch size: 24, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:00,050 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.44 vs. limit=2.0 +2023-02-09 02:27:15,800 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=231613.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:20,505 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.520e+02 2.485e+02 2.947e+02 3.559e+02 7.815e+02, threshold=5.893e+02, percent-clipped=2.0 +2023-02-09 02:27:21,231 INFO [train.py:901] (3/4) Epoch 29, batch 5300, loss[loss=0.2046, simple_loss=0.2824, pruned_loss=0.06344, over 7781.00 frames. ], tot_loss[loss=0.1986, simple_loss=0.2825, pruned_loss=0.05738, over 1605616.03 frames. ], batch size: 19, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:27:22,864 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231623.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:41,036 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=231648.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:53,040 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=231665.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:27:57,010 INFO [train.py:901] (3/4) Epoch 29, batch 5350, loss[loss=0.2417, simple_loss=0.3295, pruned_loss=0.07691, over 8563.00 frames. ], tot_loss[loss=0.1993, simple_loss=0.2828, pruned_loss=0.05789, over 1604638.78 frames. ], batch size: 34, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:32,684 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.493e+02 2.535e+02 3.138e+02 3.956e+02 6.651e+02, threshold=6.276e+02, percent-clipped=5.0 +2023-02-09 02:28:33,441 INFO [train.py:901] (3/4) Epoch 29, batch 5400, loss[loss=0.1961, simple_loss=0.2788, pruned_loss=0.05663, over 8631.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2831, pruned_loss=0.05833, over 1606596.50 frames. ], batch size: 39, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:28:38,398 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=231728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:28:52,251 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7432, 4.7099, 4.3029, 2.4628, 4.2663, 4.3415, 4.2791, 4.1785], + device='cuda:3'), covar=tensor([0.0597, 0.0428, 0.0888, 0.4034, 0.0790, 0.1009, 0.1196, 0.0739], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0458, 0.0449, 0.0561, 0.0440, 0.0468, 0.0440, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:29:09,665 INFO [train.py:901] (3/4) Epoch 29, batch 5450, loss[loss=0.1965, simple_loss=0.2791, pruned_loss=0.05691, over 8081.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.2821, pruned_loss=0.05742, over 1607365.20 frames. ], batch size: 21, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:29:29,511 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 02:29:44,870 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.390e+02 2.416e+02 2.826e+02 3.521e+02 6.915e+02, threshold=5.653e+02, percent-clipped=1.0 +2023-02-09 02:29:45,620 INFO [train.py:901] (3/4) Epoch 29, batch 5500, loss[loss=0.1739, simple_loss=0.2518, pruned_loss=0.04799, over 7563.00 frames. ], tot_loss[loss=0.198, simple_loss=0.282, pruned_loss=0.05704, over 1611838.16 frames. ], batch size: 18, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:30:18,916 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6014, 1.9430, 3.1443, 1.4399, 2.5588, 2.0610, 1.6195, 2.6114], + device='cuda:3'), covar=tensor([0.2094, 0.3007, 0.0943, 0.5169, 0.1964, 0.3558, 0.2668, 0.2355], + device='cuda:3'), in_proj_covar=tensor([0.0542, 0.0641, 0.0566, 0.0674, 0.0664, 0.0611, 0.0568, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:30:21,385 INFO [train.py:901] (3/4) Epoch 29, batch 5550, loss[loss=0.2097, simple_loss=0.2993, pruned_loss=0.06008, over 8328.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2817, pruned_loss=0.05699, over 1612461.44 frames. ], batch size: 25, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:30:56,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.336e+02 2.917e+02 3.506e+02 1.057e+03, threshold=5.834e+02, percent-clipped=5.0 +2023-02-09 02:30:57,329 INFO [train.py:901] (3/4) Epoch 29, batch 5600, loss[loss=0.229, simple_loss=0.2963, pruned_loss=0.08081, over 7218.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2808, pruned_loss=0.05657, over 1607179.83 frames. ], batch size: 71, lr: 2.59e-03, grad_scale: 8.0 +2023-02-09 02:31:20,492 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5674, 1.6322, 2.3717, 1.2776, 1.0546, 2.3397, 0.3603, 1.3991], + device='cuda:3'), covar=tensor([0.1834, 0.1204, 0.0383, 0.1476, 0.2745, 0.0404, 0.2057, 0.1285], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0205, 0.0137, 0.0225, 0.0279, 0.0149, 0.0173, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:31:34,510 INFO [train.py:901] (3/4) Epoch 29, batch 5650, loss[loss=0.2013, simple_loss=0.2872, pruned_loss=0.05767, over 8334.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2815, pruned_loss=0.05645, over 1613155.03 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:31:38,815 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 02:31:43,823 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=231984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:31:58,720 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5566, 1.4559, 1.6596, 1.3952, 0.8242, 1.4352, 1.4754, 1.3584], + device='cuda:3'), covar=tensor([0.0610, 0.1226, 0.1686, 0.1502, 0.0618, 0.1440, 0.0737, 0.0678], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0189, 0.0162, 0.0102, 0.0163, 0.0114, 0.0147], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0009, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 02:32:02,833 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:02,964 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232009.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:32:10,319 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 2.351e+02 2.719e+02 3.536e+02 6.635e+02, threshold=5.438e+02, percent-clipped=1.0 +2023-02-09 02:32:11,030 INFO [train.py:901] (3/4) Epoch 29, batch 5700, loss[loss=0.1865, simple_loss=0.2641, pruned_loss=0.05447, over 8091.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2825, pruned_loss=0.05676, over 1617636.24 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:31,552 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8246, 3.7874, 3.4238, 1.9515, 3.3603, 3.5017, 3.3485, 3.3800], + device='cuda:3'), covar=tensor([0.0855, 0.0609, 0.1100, 0.4498, 0.1005, 0.1202, 0.1419, 0.0972], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0457, 0.0447, 0.0560, 0.0441, 0.0466, 0.0440, 0.0410], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:32:44,169 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.82 vs. limit=2.0 +2023-02-09 02:32:45,198 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 02:32:47,263 INFO [train.py:901] (3/4) Epoch 29, batch 5750, loss[loss=0.2615, simple_loss=0.3369, pruned_loss=0.09309, over 8256.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2812, pruned_loss=0.05644, over 1616308.97 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:32:54,477 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7015, 1.5318, 3.1938, 1.4850, 2.4485, 3.5058, 3.7394, 2.6410], + device='cuda:3'), covar=tensor([0.1530, 0.1998, 0.0402, 0.2370, 0.1000, 0.0352, 0.0522, 0.0878], + device='cuda:3'), in_proj_covar=tensor([0.0310, 0.0328, 0.0296, 0.0325, 0.0326, 0.0278, 0.0446, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 02:33:02,540 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-09 02:33:23,816 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 2.417e+02 3.013e+02 3.730e+02 1.097e+03, threshold=6.026e+02, percent-clipped=6.0 +2023-02-09 02:33:24,556 INFO [train.py:901] (3/4) Epoch 29, batch 5800, loss[loss=0.2071, simple_loss=0.2899, pruned_loss=0.06213, over 8557.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2802, pruned_loss=0.05612, over 1610751.17 frames. ], batch size: 48, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:33:26,860 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232124.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:33:34,978 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.5989, 5.7265, 5.0393, 2.5870, 4.9748, 5.3380, 5.2578, 5.1361], + device='cuda:3'), covar=tensor([0.0502, 0.0333, 0.0786, 0.3953, 0.0749, 0.0757, 0.0934, 0.0616], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0457, 0.0448, 0.0560, 0.0441, 0.0467, 0.0440, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:33:59,561 INFO [train.py:901] (3/4) Epoch 29, batch 5850, loss[loss=0.1768, simple_loss=0.269, pruned_loss=0.04223, over 8367.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2803, pruned_loss=0.05579, over 1612919.91 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:34:34,659 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.378e+02 2.421e+02 2.869e+02 3.503e+02 7.290e+02, threshold=5.737e+02, percent-clipped=3.0 +2023-02-09 02:34:35,358 INFO [train.py:901] (3/4) Epoch 29, batch 5900, loss[loss=0.1782, simple_loss=0.2668, pruned_loss=0.04483, over 8352.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.281, pruned_loss=0.05604, over 1616797.51 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:06,618 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232265.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:35:10,675 INFO [train.py:901] (3/4) Epoch 29, batch 5950, loss[loss=0.2766, simple_loss=0.3489, pruned_loss=0.1022, over 6978.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2814, pruned_loss=0.05628, over 1619803.28 frames. ], batch size: 71, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:46,608 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.609e+02 2.503e+02 2.837e+02 3.700e+02 9.228e+02, threshold=5.675e+02, percent-clipped=4.0 +2023-02-09 02:35:47,358 INFO [train.py:901] (3/4) Epoch 29, batch 6000, loss[loss=0.2634, simple_loss=0.3421, pruned_loss=0.09232, over 8318.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2804, pruned_loss=0.05582, over 1614279.02 frames. ], batch size: 25, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:35:47,358 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 02:36:01,201 INFO [train.py:935] (3/4) Epoch 29, validation: loss=0.1708, simple_loss=0.2701, pruned_loss=0.03577, over 944034.00 frames. +2023-02-09 02:36:01,202 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6473MB +2023-02-09 02:36:37,558 INFO [train.py:901] (3/4) Epoch 29, batch 6050, loss[loss=0.1931, simple_loss=0.2918, pruned_loss=0.04721, over 8510.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2802, pruned_loss=0.05577, over 1614637.50 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:36:44,009 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:36:57,841 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232399.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:02,393 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=232405.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:37:12,725 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.752e+02 2.608e+02 3.073e+02 4.031e+02 7.869e+02, threshold=6.145e+02, percent-clipped=3.0 +2023-02-09 02:37:13,453 INFO [train.py:901] (3/4) Epoch 29, batch 6100, loss[loss=0.1767, simple_loss=0.282, pruned_loss=0.03565, over 8696.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05611, over 1615342.90 frames. ], batch size: 39, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:37:27,129 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 02:37:49,995 INFO [train.py:901] (3/4) Epoch 29, batch 6150, loss[loss=0.1847, simple_loss=0.2675, pruned_loss=0.05096, over 8241.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2803, pruned_loss=0.05613, over 1614584.55 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:06,464 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232494.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:38:25,294 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.548e+02 2.901e+02 3.636e+02 6.365e+02, threshold=5.801e+02, percent-clipped=1.0 +2023-02-09 02:38:25,882 INFO [train.py:901] (3/4) Epoch 29, batch 6200, loss[loss=0.2143, simple_loss=0.3027, pruned_loss=0.06296, over 8102.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05693, over 1611772.82 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:38:36,019 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8524, 1.4296, 2.9099, 1.4910, 2.2338, 3.1211, 3.2632, 2.6680], + device='cuda:3'), covar=tensor([0.1121, 0.1717, 0.0344, 0.2045, 0.0872, 0.0278, 0.0595, 0.0555], + device='cuda:3'), in_proj_covar=tensor([0.0309, 0.0326, 0.0295, 0.0325, 0.0324, 0.0278, 0.0444, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 02:38:36,692 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=232536.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:39:02,790 INFO [train.py:901] (3/4) Epoch 29, batch 6250, loss[loss=0.2022, simple_loss=0.2923, pruned_loss=0.05601, over 8495.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.281, pruned_loss=0.05695, over 1612859.85 frames. ], batch size: 39, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:06,499 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5958, 1.9931, 3.1058, 1.4705, 2.4554, 2.0883, 1.6997, 2.5706], + device='cuda:3'), covar=tensor([0.2068, 0.2857, 0.0834, 0.4966, 0.1931, 0.3461, 0.2613, 0.2130], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0642, 0.0565, 0.0676, 0.0667, 0.0617, 0.0570, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:39:30,166 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232609.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:39:36,610 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7400, 1.9345, 2.0221, 1.4440, 2.2109, 1.4469, 0.7763, 1.9724], + device='cuda:3'), covar=tensor([0.0764, 0.0474, 0.0397, 0.0718, 0.0500, 0.1224, 0.1071, 0.0387], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0419, 0.0373, 0.0465, 0.0400, 0.0560, 0.0409, 0.0444], + device='cuda:3'), out_proj_covar=tensor([1.2637e-04, 1.0845e-04, 9.7002e-05, 1.2166e-04, 1.0481e-04, 1.5611e-04, + 1.0894e-04, 1.1617e-04], device='cuda:3') +2023-02-09 02:39:37,821 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.584e+02 2.332e+02 2.762e+02 3.491e+02 8.673e+02, threshold=5.523e+02, percent-clipped=4.0 +2023-02-09 02:39:39,185 INFO [train.py:901] (3/4) Epoch 29, batch 6300, loss[loss=0.2046, simple_loss=0.2743, pruned_loss=0.06744, over 7566.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2803, pruned_loss=0.05678, over 1609004.60 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:39:43,048 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.78 vs. limit=2.0 +2023-02-09 02:40:14,943 INFO [train.py:901] (3/4) Epoch 29, batch 6350, loss[loss=0.1947, simple_loss=0.2707, pruned_loss=0.05929, over 8090.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05664, over 1610245.13 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:50,669 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 2.465e+02 3.018e+02 3.778e+02 1.284e+03, threshold=6.036e+02, percent-clipped=3.0 +2023-02-09 02:40:51,316 INFO [train.py:901] (3/4) Epoch 29, batch 6400, loss[loss=0.1946, simple_loss=0.2799, pruned_loss=0.05463, over 7909.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.28, pruned_loss=0.05612, over 1610204.62 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:40:53,593 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232724.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:08,031 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232743.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:41:28,283 INFO [train.py:901] (3/4) Epoch 29, batch 6450, loss[loss=0.1984, simple_loss=0.2934, pruned_loss=0.05166, over 8497.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2804, pruned_loss=0.05654, over 1614395.85 frames. ], batch size: 28, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:05,009 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.603e+02 2.358e+02 3.084e+02 4.266e+02 9.590e+02, threshold=6.168e+02, percent-clipped=5.0 +2023-02-09 02:42:05,756 INFO [train.py:901] (3/4) Epoch 29, batch 6500, loss[loss=0.2039, simple_loss=0.2951, pruned_loss=0.05632, over 8247.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2805, pruned_loss=0.05694, over 1613095.60 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:07,055 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.37 vs. limit=2.0 +2023-02-09 02:42:17,442 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232838.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:23,947 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 02:42:31,263 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232858.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:40,782 INFO [train.py:901] (3/4) Epoch 29, batch 6550, loss[loss=0.2341, simple_loss=0.3272, pruned_loss=0.07049, over 8355.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2811, pruned_loss=0.05727, over 1610584.67 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:42:47,140 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=232880.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:42:47,794 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 02:42:48,128 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:43:08,163 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 02:43:16,617 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.606e+02 2.398e+02 2.846e+02 3.696e+02 7.042e+02, threshold=5.692e+02, percent-clipped=2.0 +2023-02-09 02:43:17,342 INFO [train.py:901] (3/4) Epoch 29, batch 6600, loss[loss=0.1948, simple_loss=0.2849, pruned_loss=0.0524, over 8247.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2798, pruned_loss=0.05635, over 1609339.20 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:32,636 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.17 vs. limit=2.0 +2023-02-09 02:43:40,631 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232953.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:43:52,849 INFO [train.py:901] (3/4) Epoch 29, batch 6650, loss[loss=0.2316, simple_loss=0.3208, pruned_loss=0.07118, over 8255.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2806, pruned_loss=0.05648, over 1611993.81 frames. ], batch size: 24, lr: 2.58e-03, grad_scale: 16.0 +2023-02-09 02:43:59,979 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=232980.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:10,262 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=232995.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:17,494 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233005.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:19,564 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233008.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:28,482 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.750e+02 2.388e+02 2.820e+02 3.481e+02 6.998e+02, threshold=5.640e+02, percent-clipped=2.0 +2023-02-09 02:44:28,502 INFO [train.py:901] (3/4) Epoch 29, batch 6700, loss[loss=0.1948, simple_loss=0.2881, pruned_loss=0.05074, over 8239.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2798, pruned_loss=0.05592, over 1613859.74 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:44:40,621 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:44:42,172 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6393, 2.3393, 3.9603, 1.5748, 2.7196, 2.1887, 1.8507, 2.8999], + device='cuda:3'), covar=tensor([0.2012, 0.2701, 0.0973, 0.4895, 0.2160, 0.3555, 0.2583, 0.2505], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0640, 0.0564, 0.0674, 0.0664, 0.0613, 0.0567, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:45:06,278 INFO [train.py:901] (3/4) Epoch 29, batch 6750, loss[loss=0.2162, simple_loss=0.2925, pruned_loss=0.06997, over 8286.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2789, pruned_loss=0.05569, over 1608599.71 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:30,754 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 02:45:38,426 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233114.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:45:41,383 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6438, 2.1650, 3.2041, 1.5075, 2.5736, 1.9950, 1.8273, 2.5908], + device='cuda:3'), covar=tensor([0.2068, 0.2833, 0.0965, 0.5096, 0.1964, 0.3749, 0.2597, 0.2393], + device='cuda:3'), in_proj_covar=tensor([0.0543, 0.0640, 0.0565, 0.0675, 0.0663, 0.0615, 0.0567, 0.0646], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:45:42,082 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0161, 2.4170, 3.7002, 1.8991, 2.0800, 3.6282, 0.6331, 2.2286], + device='cuda:3'), covar=tensor([0.1165, 0.1126, 0.0209, 0.1630, 0.2120, 0.0284, 0.2140, 0.1282], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0206, 0.0138, 0.0226, 0.0280, 0.0149, 0.0173, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:45:43,343 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.753e+02 2.536e+02 3.042e+02 3.988e+02 8.675e+02, threshold=6.084e+02, percent-clipped=8.0 +2023-02-09 02:45:43,363 INFO [train.py:901] (3/4) Epoch 29, batch 6800, loss[loss=0.199, simple_loss=0.2811, pruned_loss=0.0584, over 8289.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2789, pruned_loss=0.05527, over 1613366.02 frames. ], batch size: 23, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:45:56,023 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:19,947 INFO [train.py:901] (3/4) Epoch 29, batch 6850, loss[loss=0.1587, simple_loss=0.2486, pruned_loss=0.03442, over 7540.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2797, pruned_loss=0.05533, over 1613452.80 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:46:22,005 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 02:46:46,505 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233209.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:46:55,109 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.759e+02 2.470e+02 3.068e+02 3.817e+02 7.038e+02, threshold=6.136e+02, percent-clipped=5.0 +2023-02-09 02:46:55,130 INFO [train.py:901] (3/4) Epoch 29, batch 6900, loss[loss=0.1771, simple_loss=0.2652, pruned_loss=0.04453, over 7653.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05624, over 1614088.57 frames. ], batch size: 19, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:04,474 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233234.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:16,262 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233251.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:31,598 INFO [train.py:901] (3/4) Epoch 29, batch 6950, loss[loss=0.1902, simple_loss=0.271, pruned_loss=0.05472, over 7810.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2798, pruned_loss=0.05572, over 1614873.46 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:47:33,687 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 02:47:35,275 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233276.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:47:35,910 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233277.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:03,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4529, 2.8480, 2.2696, 3.8562, 1.8496, 2.2501, 2.5788, 2.9013], + device='cuda:3'), covar=tensor([0.0685, 0.0782, 0.0789, 0.0238, 0.1100, 0.1226, 0.0873, 0.0752], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0214, 0.0202, 0.0246, 0.0249, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 02:48:07,353 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.388e+02 2.860e+02 3.725e+02 6.106e+02, threshold=5.720e+02, percent-clipped=0.0 +2023-02-09 02:48:07,373 INFO [train.py:901] (3/4) Epoch 29, batch 7000, loss[loss=0.1695, simple_loss=0.2658, pruned_loss=0.0366, over 8134.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05586, over 1615161.22 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:30,364 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233352.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:48:44,309 INFO [train.py:901] (3/4) Epoch 29, batch 7050, loss[loss=0.1681, simple_loss=0.2514, pruned_loss=0.04246, over 7972.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.279, pruned_loss=0.05565, over 1612217.77 frames. ], batch size: 21, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:48:51,050 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:22,023 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.696e+02 2.400e+02 3.088e+02 3.796e+02 6.683e+02, threshold=6.176e+02, percent-clipped=2.0 +2023-02-09 02:49:22,043 INFO [train.py:901] (3/4) Epoch 29, batch 7100, loss[loss=0.2031, simple_loss=0.2814, pruned_loss=0.06239, over 7908.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2793, pruned_loss=0.05561, over 1613836.09 frames. ], batch size: 20, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:49:30,716 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5252, 1.4172, 1.8282, 1.1942, 1.1774, 1.7985, 0.2826, 1.2131], + device='cuda:3'), covar=tensor([0.1451, 0.1113, 0.0395, 0.0919, 0.2255, 0.0463, 0.1766, 0.1121], + device='cuda:3'), in_proj_covar=tensor([0.0202, 0.0205, 0.0137, 0.0225, 0.0279, 0.0148, 0.0172, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:49:48,018 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.29 vs. limit=2.0 +2023-02-09 02:49:49,428 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.80 vs. limit=2.0 +2023-02-09 02:49:54,759 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233467.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:49:57,421 INFO [train.py:901] (3/4) Epoch 29, batch 7150, loss[loss=0.1666, simple_loss=0.2437, pruned_loss=0.04473, over 7715.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2798, pruned_loss=0.05562, over 1615878.44 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:50:14,748 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:50:17,637 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0816, 2.2334, 1.8553, 2.8200, 1.3744, 1.6729, 2.1004, 2.2091], + device='cuda:3'), covar=tensor([0.0670, 0.0749, 0.0853, 0.0340, 0.0957, 0.1173, 0.0662, 0.0760], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0244, 0.0214, 0.0202, 0.0246, 0.0249, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 02:50:34,192 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.385e+02 2.900e+02 3.377e+02 5.605e+02, threshold=5.800e+02, percent-clipped=0.0 +2023-02-09 02:50:34,212 INFO [train.py:901] (3/4) Epoch 29, batch 7200, loss[loss=0.222, simple_loss=0.3009, pruned_loss=0.07156, over 8651.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2797, pruned_loss=0.05555, over 1615001.86 frames. ], batch size: 34, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:10,563 INFO [train.py:901] (3/4) Epoch 29, batch 7250, loss[loss=0.1833, simple_loss=0.2679, pruned_loss=0.04938, over 8025.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2803, pruned_loss=0.05586, over 1615776.06 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:15,245 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7435, 1.6301, 2.4443, 1.5053, 1.3668, 2.3241, 0.5819, 1.4428], + device='cuda:3'), covar=tensor([0.1565, 0.1194, 0.0292, 0.1169, 0.2241, 0.0413, 0.1790, 0.1276], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0228, 0.0281, 0.0150, 0.0174, 0.0202], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 02:51:46,282 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.384e+02 2.959e+02 3.398e+02 1.041e+03, threshold=5.918e+02, percent-clipped=4.0 +2023-02-09 02:51:46,302 INFO [train.py:901] (3/4) Epoch 29, batch 7300, loss[loss=0.1867, simple_loss=0.2792, pruned_loss=0.04706, over 8349.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2785, pruned_loss=0.0552, over 1611492.12 frames. ], batch size: 26, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:51:46,383 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=233621.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:13,023 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233657.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:52:22,373 INFO [train.py:901] (3/4) Epoch 29, batch 7350, loss[loss=0.2009, simple_loss=0.299, pruned_loss=0.0514, over 8458.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2794, pruned_loss=0.05566, over 1612410.21 frames. ], batch size: 27, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:28,050 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 02:52:43,007 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5811, 1.9922, 1.9818, 1.2976, 2.1158, 1.3900, 0.7574, 1.7902], + device='cuda:3'), covar=tensor([0.1124, 0.0476, 0.0503, 0.0908, 0.0630, 0.1345, 0.1271, 0.0535], + device='cuda:3'), in_proj_covar=tensor([0.0478, 0.0417, 0.0374, 0.0464, 0.0400, 0.0558, 0.0409, 0.0447], + device='cuda:3'), out_proj_covar=tensor([1.2646e-04, 1.0781e-04, 9.7219e-05, 1.2119e-04, 1.0461e-04, 1.5514e-04, + 1.0893e-04, 1.1695e-04], device='cuda:3') +2023-02-09 02:52:47,223 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.79 vs. limit=2.0 +2023-02-09 02:52:48,258 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 02:52:58,116 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.683e+02 2.663e+02 3.074e+02 4.100e+02 9.512e+02, threshold=6.147e+02, percent-clipped=7.0 +2023-02-09 02:52:58,137 INFO [train.py:901] (3/4) Epoch 29, batch 7400, loss[loss=0.1787, simple_loss=0.2613, pruned_loss=0.04799, over 8032.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2804, pruned_loss=0.05588, over 1614905.22 frames. ], batch size: 22, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:52:59,679 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233723.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:03,228 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=233728.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:09,649 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=233736.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:18,945 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233748.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:21,098 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:26,159 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3416, 2.7108, 2.1259, 3.5732, 1.9185, 2.0312, 2.4629, 2.6295], + device='cuda:3'), covar=tensor([0.0737, 0.0737, 0.0830, 0.0368, 0.0937, 0.1172, 0.0875, 0.0771], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0195, 0.0245, 0.0214, 0.0202, 0.0246, 0.0250, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 02:53:32,427 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 02:53:36,088 INFO [train.py:901] (3/4) Epoch 29, batch 7450, loss[loss=0.1666, simple_loss=0.2451, pruned_loss=0.04405, over 7705.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2795, pruned_loss=0.0553, over 1613620.00 frames. ], batch size: 18, lr: 2.58e-03, grad_scale: 8.0 +2023-02-09 02:53:39,821 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=233776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:53:45,832 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.20 vs. limit=2.0 +2023-02-09 02:54:07,541 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8398, 1.4916, 1.6973, 1.3612, 0.9661, 1.4785, 1.6417, 1.4794], + device='cuda:3'), covar=tensor([0.0622, 0.1284, 0.1759, 0.1565, 0.0641, 0.1517, 0.0753, 0.0697], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0191, 0.0163, 0.0102, 0.0165, 0.0115, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:3') +2023-02-09 02:54:12,670 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.228e+02 2.637e+02 3.402e+02 7.399e+02, threshold=5.273e+02, percent-clipped=2.0 +2023-02-09 02:54:12,692 INFO [train.py:901] (3/4) Epoch 29, batch 7500, loss[loss=0.1907, simple_loss=0.2836, pruned_loss=0.04893, over 8479.00 frames. ], tot_loss[loss=0.1953, simple_loss=0.2796, pruned_loss=0.05557, over 1614163.36 frames. ], batch size: 25, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:54:12,803 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4021, 4.3997, 3.9516, 1.8834, 3.8596, 4.0445, 3.9426, 3.8268], + device='cuda:3'), covar=tensor([0.0705, 0.0512, 0.0958, 0.4913, 0.0968, 0.0942, 0.1168, 0.0801], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0458, 0.0449, 0.0560, 0.0444, 0.0465, 0.0443, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:54:27,537 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7351, 4.7482, 4.2065, 2.0971, 4.1456, 4.3899, 4.2908, 4.2072], + device='cuda:3'), covar=tensor([0.0596, 0.0474, 0.0933, 0.4467, 0.0872, 0.0876, 0.1128, 0.0823], + device='cuda:3'), in_proj_covar=tensor([0.0544, 0.0458, 0.0450, 0.0560, 0.0445, 0.0465, 0.0443, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:54:48,749 INFO [train.py:901] (3/4) Epoch 29, batch 7550, loss[loss=0.1925, simple_loss=0.2828, pruned_loss=0.0511, over 8507.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2803, pruned_loss=0.05618, over 1615407.49 frames. ], batch size: 26, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:24,355 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.668e+02 2.424e+02 2.935e+02 3.534e+02 7.288e+02, threshold=5.870e+02, percent-clipped=3.0 +2023-02-09 02:55:24,376 INFO [train.py:901] (3/4) Epoch 29, batch 7600, loss[loss=0.2227, simple_loss=0.3072, pruned_loss=0.06906, over 8247.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05639, over 1613992.24 frames. ], batch size: 24, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:55:47,835 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.64 vs. limit=5.0 +2023-02-09 02:56:01,026 INFO [train.py:901] (3/4) Epoch 29, batch 7650, loss[loss=0.1948, simple_loss=0.2933, pruned_loss=0.04816, over 8339.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2806, pruned_loss=0.05584, over 1611723.02 frames. ], batch size: 26, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:16,595 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=233992.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:23,685 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234001.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:35,529 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234017.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:56:38,045 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.742e+02 2.208e+02 2.731e+02 3.331e+02 6.993e+02, threshold=5.462e+02, percent-clipped=2.0 +2023-02-09 02:56:38,066 INFO [train.py:901] (3/4) Epoch 29, batch 7700, loss[loss=0.1704, simple_loss=0.2508, pruned_loss=0.04502, over 7811.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2803, pruned_loss=0.05549, over 1612733.83 frames. ], batch size: 19, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:56:49,503 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 02:56:53,160 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:12,874 INFO [train.py:901] (3/4) Epoch 29, batch 7750, loss[loss=0.1827, simple_loss=0.2666, pruned_loss=0.04943, over 7810.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2811, pruned_loss=0.05579, over 1618503.09 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:57:13,648 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234072.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:45,238 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234116.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:57:48,461 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.355e+02 2.809e+02 3.505e+02 7.382e+02, threshold=5.617e+02, percent-clipped=2.0 +2023-02-09 02:57:48,481 INFO [train.py:901] (3/4) Epoch 29, batch 7800, loss[loss=0.1839, simple_loss=0.2808, pruned_loss=0.04351, over 8464.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2802, pruned_loss=0.05544, over 1620578.92 frames. ], batch size: 25, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:21,909 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-09 02:58:24,429 INFO [train.py:901] (3/4) Epoch 29, batch 7850, loss[loss=0.1916, simple_loss=0.2717, pruned_loss=0.05574, over 7928.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2792, pruned_loss=0.05484, over 1619207.99 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:58:36,022 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234187.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:58,948 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234220.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:58:59,503 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.576e+02 2.264e+02 2.745e+02 3.500e+02 8.048e+02, threshold=5.490e+02, percent-clipped=4.0 +2023-02-09 02:58:59,523 INFO [train.py:901] (3/4) Epoch 29, batch 7900, loss[loss=0.2169, simple_loss=0.3037, pruned_loss=0.06511, over 8294.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2786, pruned_loss=0.05485, over 1616104.42 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:19,163 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 02:59:34,070 INFO [train.py:901] (3/4) Epoch 29, batch 7950, loss[loss=0.1715, simple_loss=0.26, pruned_loss=0.04153, over 8474.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2789, pruned_loss=0.05543, over 1613963.24 frames. ], batch size: 27, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 02:59:38,565 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7767, 1.5279, 2.6395, 1.4083, 2.2269, 2.8627, 2.9570, 2.5348], + device='cuda:3'), covar=tensor([0.1203, 0.1709, 0.0452, 0.2163, 0.1254, 0.0311, 0.0769, 0.0509], + device='cuda:3'), in_proj_covar=tensor([0.0312, 0.0331, 0.0298, 0.0328, 0.0330, 0.0282, 0.0452, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 02:59:40,745 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.4526, 4.4436, 4.1083, 2.1945, 3.9976, 4.1443, 4.0671, 3.8626], + device='cuda:3'), covar=tensor([0.0772, 0.0496, 0.0939, 0.4172, 0.0908, 0.0802, 0.1239, 0.0685], + device='cuda:3'), in_proj_covar=tensor([0.0548, 0.0459, 0.0450, 0.0559, 0.0443, 0.0466, 0.0446, 0.0413], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 02:59:51,754 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7790, 1.9058, 1.6346, 2.3431, 0.9813, 1.4560, 1.6645, 1.8655], + device='cuda:3'), covar=tensor([0.0742, 0.0720, 0.0943, 0.0370, 0.1107, 0.1321, 0.0790, 0.0767], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0196, 0.0246, 0.0214, 0.0203, 0.0248, 0.0251, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 03:00:10,405 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 2.366e+02 2.676e+02 3.633e+02 8.832e+02, threshold=5.352e+02, percent-clipped=6.0 +2023-02-09 03:00:10,425 INFO [train.py:901] (3/4) Epoch 29, batch 8000, loss[loss=0.1954, simple_loss=0.2806, pruned_loss=0.05506, over 8294.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2791, pruned_loss=0.05553, over 1617753.55 frames. ], batch size: 23, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:34,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0011, 1.5571, 3.4802, 1.5158, 2.4294, 3.7644, 3.9333, 3.2355], + device='cuda:3'), covar=tensor([0.1166, 0.1840, 0.0285, 0.2018, 0.0973, 0.0240, 0.0543, 0.0510], + device='cuda:3'), in_proj_covar=tensor([0.0312, 0.0331, 0.0298, 0.0329, 0.0330, 0.0282, 0.0452, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 03:00:41,455 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8160, 1.5046, 1.7630, 1.4425, 1.1144, 1.5152, 1.8005, 1.4448], + device='cuda:3'), covar=tensor([0.0585, 0.1272, 0.1673, 0.1513, 0.0590, 0.1518, 0.0678, 0.0732], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0154, 0.0191, 0.0162, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 03:00:44,626 INFO [train.py:901] (3/4) Epoch 29, batch 8050, loss[loss=0.1822, simple_loss=0.2557, pruned_loss=0.05438, over 7817.00 frames. ], tot_loss[loss=0.1938, simple_loss=0.2767, pruned_loss=0.05542, over 1594933.64 frames. ], batch size: 20, lr: 2.57e-03, grad_scale: 8.0 +2023-02-09 03:00:45,524 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234372.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:00:55,754 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234387.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:02,566 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234397.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:01:19,887 WARNING [train.py:1067] (3/4) Exclude cut with ID 3488-85273-0017-111273_sp0.9 from training. Duration: 27.47775 +2023-02-09 03:01:23,757 INFO [train.py:901] (3/4) Epoch 30, batch 0, loss[loss=0.177, simple_loss=0.2614, pruned_loss=0.04627, over 8237.00 frames. ], tot_loss[loss=0.177, simple_loss=0.2614, pruned_loss=0.04627, over 8237.00 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:01:23,757 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 03:01:35,938 INFO [train.py:935] (3/4) Epoch 30, validation: loss=0.1704, simple_loss=0.27, pruned_loss=0.03537, over 944034.00 frames. +2023-02-09 03:01:35,940 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6543MB +2023-02-09 03:01:47,832 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 2.332e+02 2.743e+02 3.464e+02 7.498e+02, threshold=5.486e+02, percent-clipped=3.0 +2023-02-09 03:01:52,036 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590_sp0.9 from training. Duration: 28.72225 +2023-02-09 03:02:04,481 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234443.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:12,492 INFO [train.py:901] (3/4) Epoch 30, batch 50, loss[loss=0.2163, simple_loss=0.3061, pruned_loss=0.06324, over 8612.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05704, over 366781.02 frames. ], batch size: 49, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:23,086 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234468.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:28,104 WARNING [train.py:1067] (3/4) Exclude cut with ID 6709-74022-0004-57021_sp1.1 from training. Duration: 0.9409375 +2023-02-09 03:02:49,596 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234502.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:02:50,885 INFO [train.py:901] (3/4) Epoch 30, batch 100, loss[loss=0.1751, simple_loss=0.2532, pruned_loss=0.04853, over 7970.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2827, pruned_loss=0.05616, over 648997.20 frames. ], batch size: 21, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:02:54,576 WARNING [train.py:1067] (3/4) Exclude cut with ID 497-129325-0061-9566_sp1.1 from training. Duration: 0.97725 +2023-02-09 03:03:03,338 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.539e+02 2.294e+02 2.794e+02 3.449e+02 7.855e+02, threshold=5.588e+02, percent-clipped=7.0 +2023-02-09 03:03:28,113 INFO [train.py:901] (3/4) Epoch 30, batch 150, loss[loss=0.2075, simple_loss=0.2748, pruned_loss=0.07016, over 7814.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2825, pruned_loss=0.05697, over 861691.10 frames. ], batch size: 20, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:03:35,359 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234564.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:03:56,932 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=234593.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:04:04,598 INFO [train.py:901] (3/4) Epoch 30, batch 200, loss[loss=0.2565, simple_loss=0.3212, pruned_loss=0.09589, over 8548.00 frames. ], tot_loss[loss=0.2001, simple_loss=0.2838, pruned_loss=0.05824, over 1027488.03 frames. ], batch size: 31, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:13,774 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.1999, 2.1306, 2.6419, 2.2975, 2.7978, 2.3176, 2.1441, 1.7058], + device='cuda:3'), covar=tensor([0.6176, 0.5334, 0.2308, 0.4212, 0.2646, 0.3584, 0.2113, 0.5527], + device='cuda:3'), in_proj_covar=tensor([0.0973, 0.1039, 0.0847, 0.1012, 0.1034, 0.0945, 0.0781, 0.0861], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:04:16,943 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 2.329e+02 2.797e+02 3.759e+02 1.341e+03, threshold=5.593e+02, percent-clipped=8.0 +2023-02-09 03:04:40,210 INFO [train.py:901] (3/4) Epoch 30, batch 250, loss[loss=0.2108, simple_loss=0.2931, pruned_loss=0.06421, over 7805.00 frames. ], tot_loss[loss=0.2015, simple_loss=0.2847, pruned_loss=0.05912, over 1160082.22 frames. ], batch size: 19, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:04:48,451 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149_sp0.9 from training. Duration: 28.0944375 +2023-02-09 03:04:57,629 WARNING [train.py:1067] (3/4) Exclude cut with ID 4278-13270-0009-62705_sp0.9 from training. Duration: 25.45 +2023-02-09 03:04:58,550 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234679.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:04,658 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234687.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:16,612 INFO [train.py:901] (3/4) Epoch 30, batch 300, loss[loss=0.1893, simple_loss=0.278, pruned_loss=0.05031, over 8656.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2819, pruned_loss=0.05699, over 1262947.99 frames. ], batch size: 39, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:19,745 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=234708.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:29,385 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.381e+02 2.888e+02 3.640e+02 7.253e+02, threshold=5.776e+02, percent-clipped=4.0 +2023-02-09 03:05:45,727 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:05:46,513 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6946, 1.9407, 1.9410, 1.3636, 2.1002, 1.5418, 0.5552, 1.9201], + device='cuda:3'), covar=tensor([0.0652, 0.0445, 0.0403, 0.0672, 0.0485, 0.0994, 0.1019, 0.0310], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0416, 0.0372, 0.0465, 0.0400, 0.0555, 0.0405, 0.0443], + device='cuda:3'), out_proj_covar=tensor([1.2618e-04, 1.0768e-04, 9.6869e-05, 1.2151e-04, 1.0482e-04, 1.5420e-04, + 1.0793e-04, 1.1575e-04], device='cuda:3') +2023-02-09 03:05:52,651 INFO [train.py:901] (3/4) Epoch 30, batch 350, loss[loss=0.206, simple_loss=0.296, pruned_loss=0.05797, over 8677.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2809, pruned_loss=0.05667, over 1337525.99 frames. ], batch size: 34, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:05:55,593 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234758.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:14,216 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234783.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:27,361 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234800.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:06:30,080 INFO [train.py:901] (3/4) Epoch 30, batch 400, loss[loss=0.1764, simple_loss=0.2548, pruned_loss=0.049, over 7795.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05659, over 1401330.12 frames. ], batch size: 19, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:06:42,234 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.545e+02 2.513e+02 2.926e+02 3.697e+02 1.204e+03, threshold=5.852e+02, percent-clipped=7.0 +2023-02-09 03:07:06,517 INFO [train.py:901] (3/4) Epoch 30, batch 450, loss[loss=0.2128, simple_loss=0.2826, pruned_loss=0.07149, over 8043.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2807, pruned_loss=0.05617, over 1449740.27 frames. ], batch size: 22, lr: 2.53e-03, grad_scale: 8.0 +2023-02-09 03:07:38,962 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234900.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:07:42,330 INFO [train.py:901] (3/4) Epoch 30, batch 500, loss[loss=0.2031, simple_loss=0.2755, pruned_loss=0.06537, over 7677.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2802, pruned_loss=0.05545, over 1484993.77 frames. ], batch size: 19, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:07:54,768 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.531e+02 2.393e+02 2.948e+02 3.833e+02 6.284e+02, threshold=5.896e+02, percent-clipped=1.0 +2023-02-09 03:08:04,864 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234935.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:18,303 INFO [train.py:901] (3/4) Epoch 30, batch 550, loss[loss=0.1937, simple_loss=0.2735, pruned_loss=0.05697, over 7808.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2816, pruned_loss=0.05627, over 1519630.17 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:08:22,869 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234960.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:26,398 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=234964.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:34,799 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234976.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:36,257 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=234978.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:44,127 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=234989.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:08:54,548 INFO [train.py:901] (3/4) Epoch 30, batch 600, loss[loss=0.225, simple_loss=0.3102, pruned_loss=0.06994, over 8513.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2804, pruned_loss=0.05545, over 1544180.71 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:05,997 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 2.485e+02 2.961e+02 3.544e+02 6.861e+02, threshold=5.922e+02, percent-clipped=1.0 +2023-02-09 03:09:10,829 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465_sp0.9 from training. Duration: 29.816625 +2023-02-09 03:09:13,652 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235031.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:09:29,975 INFO [train.py:901] (3/4) Epoch 30, batch 650, loss[loss=0.2099, simple_loss=0.3, pruned_loss=0.05993, over 8500.00 frames. ], tot_loss[loss=0.1942, simple_loss=0.2789, pruned_loss=0.05476, over 1557865.69 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:09:54,569 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235088.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:05,671 INFO [train.py:901] (3/4) Epoch 30, batch 700, loss[loss=0.1994, simple_loss=0.2875, pruned_loss=0.05565, over 8509.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2789, pruned_loss=0.05509, over 1565758.31 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:12,449 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.51 vs. limit=2.0 +2023-02-09 03:10:17,690 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 2.477e+02 3.055e+02 3.959e+02 7.285e+02, threshold=6.109e+02, percent-clipped=6.0 +2023-02-09 03:10:33,426 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:34,913 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235146.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:10:41,067 INFO [train.py:901] (3/4) Epoch 30, batch 750, loss[loss=0.2065, simple_loss=0.2992, pruned_loss=0.05688, over 8584.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2794, pruned_loss=0.05532, over 1580201.40 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:10:59,028 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994_sp0.9 from training. Duration: 30.1555625 +2023-02-09 03:10:59,128 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2573, 3.1797, 2.9842, 1.5663, 2.8835, 3.0004, 2.8311, 2.9046], + device='cuda:3'), covar=tensor([0.1224, 0.0772, 0.1234, 0.4820, 0.1199, 0.1163, 0.1591, 0.1023], + device='cuda:3'), in_proj_covar=tensor([0.0551, 0.0461, 0.0451, 0.0562, 0.0445, 0.0470, 0.0446, 0.0414], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:11:08,043 WARNING [train.py:1067] (3/4) Exclude cut with ID 8631-249866-0030-64025_sp0.9 from training. Duration: 26.32775 +2023-02-09 03:11:17,084 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235203.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:17,654 INFO [train.py:901] (3/4) Epoch 30, batch 800, loss[loss=0.2071, simple_loss=0.2929, pruned_loss=0.06061, over 8336.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2786, pruned_loss=0.05532, over 1584321.56 frames. ], batch size: 25, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:30,474 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.417e+02 2.836e+02 3.328e+02 8.160e+02, threshold=5.671e+02, percent-clipped=2.0 +2023-02-09 03:11:46,749 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:11:53,489 INFO [train.py:901] (3/4) Epoch 30, batch 850, loss[loss=0.2042, simple_loss=0.3056, pruned_loss=0.05143, over 8260.00 frames. ], tot_loss[loss=0.1941, simple_loss=0.2784, pruned_loss=0.05488, over 1593098.21 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:11:57,057 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235259.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:07,384 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235273.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:20,619 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.6217, 4.6951, 4.1889, 2.1792, 4.0927, 4.3678, 4.1782, 4.1542], + device='cuda:3'), covar=tensor([0.0722, 0.0496, 0.1018, 0.4535, 0.0928, 0.0941, 0.1125, 0.0772], + device='cuda:3'), in_proj_covar=tensor([0.0547, 0.0458, 0.0448, 0.0559, 0.0443, 0.0467, 0.0443, 0.0411], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:12:25,087 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.50 vs. limit=2.0 +2023-02-09 03:12:29,572 INFO [train.py:901] (3/4) Epoch 30, batch 900, loss[loss=0.2095, simple_loss=0.2934, pruned_loss=0.06284, over 8605.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.279, pruned_loss=0.05495, over 1599810.32 frames. ], batch size: 34, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:12:41,044 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235320.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:12:41,602 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.438e+02 3.006e+02 3.865e+02 6.238e+02, threshold=6.012e+02, percent-clipped=6.0 +2023-02-09 03:12:43,011 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235322.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:05,257 INFO [train.py:901] (3/4) Epoch 30, batch 950, loss[loss=0.2024, simple_loss=0.2921, pruned_loss=0.05636, over 8729.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2802, pruned_loss=0.0557, over 1607640.80 frames. ], batch size: 40, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:08,761 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235359.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:21,827 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235378.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:13:32,472 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3185, 3.5899, 2.5820, 3.1642, 2.9561, 2.1585, 3.0597, 3.1603], + device='cuda:3'), covar=tensor([0.1554, 0.0371, 0.1036, 0.0593, 0.0732, 0.1469, 0.0934, 0.1047], + device='cuda:3'), in_proj_covar=tensor([0.0357, 0.0243, 0.0344, 0.0314, 0.0302, 0.0349, 0.0350, 0.0321], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 03:13:32,983 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp1.1 from training. Duration: 25.3818125 +2023-02-09 03:13:39,417 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235402.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:13:40,160 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3908, 2.1979, 2.6647, 2.3917, 2.6719, 2.3892, 2.3184, 1.9747], + device='cuda:3'), covar=tensor([0.4786, 0.4597, 0.1988, 0.3380, 0.2225, 0.3017, 0.1792, 0.4576], + device='cuda:3'), in_proj_covar=tensor([0.0978, 0.1045, 0.0853, 0.1015, 0.1038, 0.0949, 0.0782, 0.0865], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:13:40,616 INFO [train.py:901] (3/4) Epoch 30, batch 1000, loss[loss=0.2096, simple_loss=0.2893, pruned_loss=0.06499, over 7489.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2811, pruned_loss=0.05594, over 1615674.18 frames. ], batch size: 73, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:13:52,268 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.479e+02 3.055e+02 4.205e+02 7.814e+02, threshold=6.110e+02, percent-clipped=3.0 +2023-02-09 03:13:56,464 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235427.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:02,533 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235435.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:03,948 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:08,461 WARNING [train.py:1067] (3/4) Exclude cut with ID 6951-79737-0043-83149 from training. Duration: 25.285 +2023-02-09 03:14:15,938 INFO [train.py:901] (3/4) Epoch 30, batch 1050, loss[loss=0.2468, simple_loss=0.3322, pruned_loss=0.08067, over 8107.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2808, pruned_loss=0.05606, over 1613846.04 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 16.0 +2023-02-09 03:14:19,402 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235459.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:21,156 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403 from training. Duration: 29.735 +2023-02-09 03:14:36,667 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235484.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:14:41,817 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.61 vs. limit=2.0 +2023-02-09 03:14:50,595 INFO [train.py:901] (3/4) Epoch 30, batch 1100, loss[loss=0.1945, simple_loss=0.2897, pruned_loss=0.04962, over 8498.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2805, pruned_loss=0.05632, over 1610272.25 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:14:59,100 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235515.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:03,868 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.639e+02 2.444e+02 3.135e+02 3.900e+02 6.752e+02, threshold=6.270e+02, percent-clipped=2.0 +2023-02-09 03:15:04,825 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9035, 2.1833, 2.2803, 1.5292, 2.3401, 1.7221, 0.8092, 2.1728], + device='cuda:3'), covar=tensor([0.0719, 0.0387, 0.0322, 0.0685, 0.0519, 0.1001, 0.1023, 0.0350], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0421, 0.0375, 0.0470, 0.0404, 0.0561, 0.0410, 0.0448], + device='cuda:3'), out_proj_covar=tensor([1.2799e-04, 1.0897e-04, 9.7569e-05, 1.2288e-04, 1.0573e-04, 1.5611e-04, + 1.0930e-04, 1.1726e-04], device='cuda:3') +2023-02-09 03:15:09,971 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.72 vs. limit=5.0 +2023-02-09 03:15:12,924 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.86 vs. limit=5.0 +2023-02-09 03:15:17,338 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235540.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:15:26,829 INFO [train.py:901] (3/4) Epoch 30, batch 1150, loss[loss=0.1968, simple_loss=0.2758, pruned_loss=0.05892, over 7936.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2811, pruned_loss=0.05674, over 1614730.09 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:15:35,845 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467_sp0.9 from training. Duration: 27.8166875 +2023-02-09 03:15:39,057 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.52 vs. limit=2.0 +2023-02-09 03:16:03,363 INFO [train.py:901] (3/4) Epoch 30, batch 1200, loss[loss=0.1753, simple_loss=0.2504, pruned_loss=0.05011, over 7235.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05591, over 1617291.54 frames. ], batch size: 16, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:16:11,493 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235615.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:12,764 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235617.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:16,061 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.378e+02 2.945e+02 3.640e+02 8.540e+02, threshold=5.890e+02, percent-clipped=4.0 +2023-02-09 03:16:30,042 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:16:32,796 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4432, 2.4030, 3.1310, 2.6229, 3.0669, 2.5080, 2.4292, 2.0790], + device='cuda:3'), covar=tensor([0.6027, 0.5297, 0.2105, 0.3925, 0.2608, 0.3255, 0.1938, 0.5490], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1037, 0.0846, 0.1008, 0.1030, 0.0943, 0.0777, 0.0860], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:16:39,500 INFO [train.py:901] (3/4) Epoch 30, batch 1250, loss[loss=0.2007, simple_loss=0.2978, pruned_loss=0.05182, over 8575.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2791, pruned_loss=0.05545, over 1612625.47 frames. ], batch size: 31, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:06,190 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235691.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:07,623 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235693.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:10,371 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235697.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:15,249 INFO [train.py:901] (3/4) Epoch 30, batch 1300, loss[loss=0.2289, simple_loss=0.3065, pruned_loss=0.07562, over 8127.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2804, pruned_loss=0.0563, over 1616985.43 frames. ], batch size: 22, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:17:23,824 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235716.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:25,221 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=235718.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:27,375 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.3746, 2.3095, 2.9742, 2.4613, 2.8567, 2.4450, 2.3448, 1.9016], + device='cuda:3'), covar=tensor([0.6076, 0.5472, 0.2283, 0.4455, 0.3097, 0.3326, 0.1869, 0.5824], + device='cuda:3'), in_proj_covar=tensor([0.0964, 0.1030, 0.0841, 0.1002, 0.1025, 0.0939, 0.0773, 0.0855], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:17:27,741 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.629e+02 2.444e+02 2.774e+02 3.314e+02 6.214e+02, threshold=5.548e+02, percent-clipped=2.0 +2023-02-09 03:17:27,830 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=235722.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:17:34,608 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235732.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:17:35,434 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.76 vs. limit=5.0 +2023-02-09 03:17:50,125 INFO [train.py:901] (3/4) Epoch 30, batch 1350, loss[loss=0.2339, simple_loss=0.31, pruned_loss=0.07885, over 8449.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2809, pruned_loss=0.05671, over 1614901.54 frames. ], batch size: 29, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:26,587 INFO [train.py:901] (3/4) Epoch 30, batch 1400, loss[loss=0.1932, simple_loss=0.2731, pruned_loss=0.05666, over 8556.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2802, pruned_loss=0.05646, over 1615352.04 frames. ], batch size: 49, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:18:30,243 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=235809.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:18:39,099 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.499e+02 2.338e+02 2.741e+02 3.583e+02 7.907e+02, threshold=5.482e+02, percent-clipped=6.0 +2023-02-09 03:18:49,552 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=235837.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:19:00,849 INFO [train.py:901] (3/4) Epoch 30, batch 1450, loss[loss=0.1689, simple_loss=0.251, pruned_loss=0.0434, over 7431.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.0565, over 1618089.77 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:08,256 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0007-9590 from training. Duration: 25.85 +2023-02-09 03:19:22,538 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.57 vs. limit=2.0 +2023-02-09 03:19:38,201 INFO [train.py:901] (3/4) Epoch 30, batch 1500, loss[loss=0.181, simple_loss=0.2758, pruned_loss=0.04313, over 8203.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2809, pruned_loss=0.05591, over 1619559.66 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:19:42,081 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 03:19:51,219 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.723e+02 2.306e+02 2.900e+02 3.560e+02 8.272e+02, threshold=5.801e+02, percent-clipped=7.0 +2023-02-09 03:19:56,730 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-09 03:20:14,187 INFO [train.py:901] (3/4) Epoch 30, batch 1550, loss[loss=0.1752, simple_loss=0.2647, pruned_loss=0.04287, over 7819.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2803, pruned_loss=0.05565, over 1617597.77 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:17,113 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8606, 1.2785, 3.4979, 1.5438, 2.5920, 3.8906, 4.0503, 3.3836], + device='cuda:3'), covar=tensor([0.1213, 0.2113, 0.0290, 0.2080, 0.0912, 0.0213, 0.0535, 0.0542], + device='cuda:3'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0329, 0.0327, 0.0282, 0.0447, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 03:20:22,056 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 03:20:27,557 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3063, 1.2842, 1.5443, 1.0294, 1.0326, 1.5487, 0.2603, 1.0790], + device='cuda:3'), covar=tensor([0.1112, 0.0887, 0.0323, 0.0659, 0.1864, 0.0392, 0.1467, 0.1058], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0208, 0.0139, 0.0226, 0.0280, 0.0149, 0.0175, 0.0202], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 03:20:38,890 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=235988.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:20:51,283 INFO [train.py:901] (3/4) Epoch 30, batch 1600, loss[loss=0.2051, simple_loss=0.2936, pruned_loss=0.05828, over 8521.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2801, pruned_loss=0.05581, over 1613555.40 frames. ], batch size: 28, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:20:57,831 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236013.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:00,085 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2902, 2.1319, 1.7227, 1.9954, 1.8181, 1.5505, 1.7248, 1.7408], + device='cuda:3'), covar=tensor([0.1283, 0.0418, 0.1141, 0.0504, 0.0702, 0.1457, 0.0920, 0.0836], + device='cuda:3'), in_proj_covar=tensor([0.0360, 0.0246, 0.0349, 0.0317, 0.0304, 0.0352, 0.0354, 0.0325], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 03:21:04,931 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.669e+02 2.693e+02 3.134e+02 4.092e+02 8.333e+02, threshold=6.267e+02, percent-clipped=7.0 +2023-02-09 03:21:15,458 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236036.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:19,101 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236041.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:21:28,385 INFO [train.py:901] (3/4) Epoch 30, batch 1650, loss[loss=0.21, simple_loss=0.2935, pruned_loss=0.0633, over 8106.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.281, pruned_loss=0.05598, over 1618438.42 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:21:56,790 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236093.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:22:04,816 INFO [train.py:901] (3/4) Epoch 30, batch 1700, loss[loss=0.1843, simple_loss=0.2748, pruned_loss=0.04686, over 8109.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05665, over 1615550.08 frames. ], batch size: 23, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:15,382 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236118.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:22:17,731 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.648e+02 2.438e+02 2.804e+02 3.459e+02 5.840e+02, threshold=5.608e+02, percent-clipped=0.0 +2023-02-09 03:22:34,514 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.31 vs. limit=5.0 +2023-02-09 03:22:40,514 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236153.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:22:41,155 INFO [train.py:901] (3/4) Epoch 30, batch 1750, loss[loss=0.18, simple_loss=0.2755, pruned_loss=0.04224, over 8243.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2807, pruned_loss=0.05646, over 1615241.55 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:22:42,663 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236156.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:23:07,927 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0424, 1.8915, 2.2858, 1.9947, 2.2603, 2.0638, 2.0045, 1.6026], + device='cuda:3'), covar=tensor([0.4324, 0.4071, 0.1875, 0.3251, 0.2104, 0.2791, 0.1587, 0.4168], + device='cuda:3'), in_proj_covar=tensor([0.0972, 0.1037, 0.0848, 0.1008, 0.1032, 0.0946, 0.0778, 0.0860], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:23:16,215 INFO [train.py:901] (3/4) Epoch 30, batch 1800, loss[loss=0.1682, simple_loss=0.2568, pruned_loss=0.03982, over 7709.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.28, pruned_loss=0.05592, over 1613154.61 frames. ], batch size: 18, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:23:29,389 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.503e+02 3.165e+02 3.852e+02 7.294e+02, threshold=6.329e+02, percent-clipped=5.0 +2023-02-09 03:23:52,514 INFO [train.py:901] (3/4) Epoch 30, batch 1850, loss[loss=0.3074, simple_loss=0.385, pruned_loss=0.1149, over 8572.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2804, pruned_loss=0.05605, over 1609491.91 frames. ], batch size: 39, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:03,965 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236268.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:24:28,804 INFO [train.py:901] (3/4) Epoch 30, batch 1900, loss[loss=0.2564, simple_loss=0.3195, pruned_loss=0.0967, over 7065.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2795, pruned_loss=0.05569, over 1607829.58 frames. ], batch size: 72, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:24:41,167 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.65 vs. limit=2.0 +2023-02-09 03:24:41,427 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 2.325e+02 3.004e+02 3.832e+02 8.674e+02, threshold=6.008e+02, percent-clipped=3.0 +2023-02-09 03:25:01,728 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0021-44397_sp0.9 from training. Duration: 27.511125 +2023-02-09 03:25:05,198 INFO [train.py:901] (3/4) Epoch 30, batch 1950, loss[loss=0.1908, simple_loss=0.2824, pruned_loss=0.04957, over 8350.00 frames. ], tot_loss[loss=0.1945, simple_loss=0.2787, pruned_loss=0.05515, over 1610727.10 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:13,610 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390 from training. Duration: 27.92 +2023-02-09 03:25:24,201 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=236380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:25,043 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4670, 2.7327, 3.0191, 1.8797, 3.1745, 1.9923, 1.7120, 2.3853], + device='cuda:3'), covar=tensor([0.0773, 0.0416, 0.0329, 0.0810, 0.0542, 0.0835, 0.1046, 0.0579], + device='cuda:3'), in_proj_covar=tensor([0.0477, 0.0418, 0.0373, 0.0465, 0.0400, 0.0555, 0.0405, 0.0442], + device='cuda:3'), out_proj_covar=tensor([1.2627e-04, 1.0814e-04, 9.7134e-05, 1.2130e-04, 1.0478e-04, 1.5435e-04, + 1.0794e-04, 1.1561e-04], device='cuda:3') +2023-02-09 03:25:25,065 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8308, 2.3670, 4.0771, 1.6923, 3.0264, 2.4743, 1.8610, 3.0156], + device='cuda:3'), covar=tensor([0.1881, 0.2781, 0.0749, 0.4826, 0.1895, 0.3229, 0.2633, 0.2459], + device='cuda:3'), in_proj_covar=tensor([0.0546, 0.0646, 0.0569, 0.0681, 0.0670, 0.0622, 0.0573, 0.0651], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:25:33,317 WARNING [train.py:1067] (3/4) Exclude cut with ID 4964-30587-0040-138716_sp0.9 from training. Duration: 25.0944375 +2023-02-09 03:25:41,056 INFO [train.py:901] (3/4) Epoch 30, batch 2000, loss[loss=0.2087, simple_loss=0.3011, pruned_loss=0.05816, over 8451.00 frames. ], tot_loss[loss=0.1959, simple_loss=0.2801, pruned_loss=0.05581, over 1616316.87 frames. ], batch size: 39, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:25:43,381 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9382, 1.7217, 2.0409, 1.8608, 2.0481, 2.0105, 1.8869, 0.9202], + device='cuda:3'), covar=tensor([0.6179, 0.4873, 0.2324, 0.3639, 0.2515, 0.3272, 0.2014, 0.5044], + device='cuda:3'), in_proj_covar=tensor([0.0970, 0.1036, 0.0846, 0.1007, 0.1032, 0.0947, 0.0778, 0.0857], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 03:25:46,882 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236412.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:25:53,568 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.624e+02 2.406e+02 2.956e+02 3.756e+02 9.982e+02, threshold=5.913e+02, percent-clipped=8.0 +2023-02-09 03:26:04,549 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236437.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:16,287 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6675, 1.7882, 1.5317, 2.2689, 1.0002, 1.3946, 1.6685, 1.8466], + device='cuda:3'), covar=tensor([0.0765, 0.0696, 0.0985, 0.0388, 0.0975, 0.1304, 0.0701, 0.0723], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0194, 0.0244, 0.0215, 0.0201, 0.0247, 0.0251, 0.0206], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 03:26:16,759 INFO [train.py:901] (3/4) Epoch 30, batch 2050, loss[loss=0.2484, simple_loss=0.3204, pruned_loss=0.08822, over 8521.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2805, pruned_loss=0.05603, over 1618286.81 frames. ], batch size: 26, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:26:28,377 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3273, 2.1348, 1.7249, 1.9965, 1.8477, 1.5312, 1.7736, 1.6834], + device='cuda:3'), covar=tensor([0.1300, 0.0481, 0.1293, 0.0586, 0.0788, 0.1637, 0.0934, 0.0930], + device='cuda:3'), in_proj_covar=tensor([0.0359, 0.0245, 0.0348, 0.0316, 0.0304, 0.0351, 0.0354, 0.0323], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 03:26:47,588 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=236495.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:26:53,532 INFO [train.py:901] (3/4) Epoch 30, batch 2100, loss[loss=0.2061, simple_loss=0.2905, pruned_loss=0.06087, over 8248.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2805, pruned_loss=0.05676, over 1614542.96 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:06,244 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.478e+02 2.477e+02 3.089e+02 3.892e+02 8.089e+02, threshold=6.178e+02, percent-clipped=3.0 +2023-02-09 03:27:07,830 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236524.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:16,745 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4163, 1.2575, 2.3765, 1.4654, 2.2089, 2.5338, 2.7344, 2.1546], + device='cuda:3'), covar=tensor([0.1186, 0.1576, 0.0426, 0.2045, 0.0800, 0.0395, 0.0652, 0.0659], + device='cuda:3'), in_proj_covar=tensor([0.0310, 0.0330, 0.0298, 0.0329, 0.0329, 0.0282, 0.0449, 0.0312], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 03:27:25,243 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236549.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:27:28,493 INFO [train.py:901] (3/4) Epoch 30, batch 2150, loss[loss=0.1848, simple_loss=0.2696, pruned_loss=0.04996, over 7816.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2802, pruned_loss=0.05631, over 1617684.80 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:27:59,355 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.75 vs. limit=5.0 +2023-02-09 03:28:04,582 INFO [train.py:901] (3/4) Epoch 30, batch 2200, loss[loss=0.2296, simple_loss=0.3067, pruned_loss=0.07629, over 8247.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2805, pruned_loss=0.05616, over 1619329.29 frames. ], batch size: 24, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:28:06,919 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3894, 4.3695, 4.0041, 1.9793, 3.8845, 4.0432, 3.8827, 3.8512], + device='cuda:3'), covar=tensor([0.0811, 0.0522, 0.0965, 0.4690, 0.0927, 0.0884, 0.1243, 0.0840], + device='cuda:3'), in_proj_covar=tensor([0.0552, 0.0462, 0.0450, 0.0565, 0.0447, 0.0474, 0.0450, 0.0415], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:28:18,768 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.663e+02 2.435e+02 2.816e+02 3.564e+02 9.413e+02, threshold=5.632e+02, percent-clipped=3.0 +2023-02-09 03:28:40,981 INFO [train.py:901] (3/4) Epoch 30, batch 2250, loss[loss=0.1909, simple_loss=0.2745, pruned_loss=0.05369, over 7921.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05705, over 1616615.39 frames. ], batch size: 20, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:04,191 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.58 vs. limit=2.0 +2023-02-09 03:29:16,922 INFO [train.py:901] (3/4) Epoch 30, batch 2300, loss[loss=0.1957, simple_loss=0.2776, pruned_loss=0.05689, over 7623.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05678, over 1614636.32 frames. ], batch size: 17, lr: 2.52e-03, grad_scale: 8.0 +2023-02-09 03:29:29,252 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.489e+02 3.036e+02 4.215e+02 7.962e+02, threshold=6.071e+02, percent-clipped=6.0 +2023-02-09 03:29:50,611 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=236751.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:29:52,586 INFO [train.py:901] (3/4) Epoch 30, batch 2350, loss[loss=0.1824, simple_loss=0.2686, pruned_loss=0.04808, over 8332.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2795, pruned_loss=0.05635, over 1609530.31 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:08,613 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=236776.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:30:29,061 INFO [train.py:901] (3/4) Epoch 30, batch 2400, loss[loss=0.1678, simple_loss=0.2674, pruned_loss=0.03413, over 8127.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.281, pruned_loss=0.05674, over 1609803.39 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:30:42,226 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.757e+02 2.281e+02 2.685e+02 3.729e+02 8.099e+02, threshold=5.371e+02, percent-clipped=9.0 +2023-02-09 03:31:05,196 INFO [train.py:901] (3/4) Epoch 30, batch 2450, loss[loss=0.1914, simple_loss=0.2831, pruned_loss=0.04981, over 8241.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2808, pruned_loss=0.05681, over 1610683.25 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:39,706 INFO [train.py:901] (3/4) Epoch 30, batch 2500, loss[loss=0.1695, simple_loss=0.2491, pruned_loss=0.04497, over 7242.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2814, pruned_loss=0.05713, over 1613911.94 frames. ], batch size: 16, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:31:43,939 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=236910.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:31:52,883 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.501e+02 2.373e+02 3.086e+02 3.767e+02 7.222e+02, threshold=6.171e+02, percent-clipped=6.0 +2023-02-09 03:32:13,799 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4078, 3.8271, 2.6439, 3.2047, 2.9957, 2.2074, 3.1309, 3.2861], + device='cuda:3'), covar=tensor([0.1648, 0.0355, 0.1126, 0.0676, 0.0838, 0.1505, 0.1103, 0.1064], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0242, 0.0345, 0.0314, 0.0302, 0.0350, 0.0351, 0.0320], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 03:32:16,575 INFO [train.py:901] (3/4) Epoch 30, batch 2550, loss[loss=0.1791, simple_loss=0.2594, pruned_loss=0.04943, over 7669.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2806, pruned_loss=0.05683, over 1612238.18 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:32:54,178 INFO [train.py:901] (3/4) Epoch 30, batch 2600, loss[loss=0.1887, simple_loss=0.2812, pruned_loss=0.0481, over 8203.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2806, pruned_loss=0.05672, over 1611598.60 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:33:06,912 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 2.458e+02 3.021e+02 3.974e+02 8.394e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:33:12,328 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.61 vs. limit=5.0 +2023-02-09 03:33:30,300 INFO [train.py:901] (3/4) Epoch 30, batch 2650, loss[loss=0.1987, simple_loss=0.292, pruned_loss=0.05275, over 8318.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2817, pruned_loss=0.057, over 1613770.72 frames. ], batch size: 26, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:06,434 INFO [train.py:901] (3/4) Epoch 30, batch 2700, loss[loss=0.2586, simple_loss=0.3407, pruned_loss=0.08829, over 8468.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.281, pruned_loss=0.05639, over 1609543.06 frames. ], batch size: 27, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:34:07,585 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 03:34:18,973 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.464e+02 3.015e+02 4.068e+02 7.247e+02, threshold=6.030e+02, percent-clipped=1.0 +2023-02-09 03:34:41,474 INFO [train.py:901] (3/4) Epoch 30, batch 2750, loss[loss=0.1855, simple_loss=0.2693, pruned_loss=0.05091, over 7794.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2804, pruned_loss=0.05612, over 1606025.25 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:18,239 INFO [train.py:901] (3/4) Epoch 30, batch 2800, loss[loss=0.1779, simple_loss=0.2673, pruned_loss=0.04424, over 7928.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2806, pruned_loss=0.05668, over 1604167.53 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:20,497 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9612, 1.4497, 1.6878, 1.3582, 1.0075, 1.3980, 1.7048, 1.5585], + device='cuda:3'), covar=tensor([0.0563, 0.1289, 0.1720, 0.1540, 0.0633, 0.1602, 0.0724, 0.0684], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:3') +2023-02-09 03:35:31,345 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.593e+02 2.300e+02 2.824e+02 3.573e+02 8.919e+02, threshold=5.648e+02, percent-clipped=3.0 +2023-02-09 03:35:45,300 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.8405, 1.3972, 4.0049, 1.5864, 3.5485, 3.3807, 3.6386, 3.5690], + device='cuda:3'), covar=tensor([0.0713, 0.4573, 0.0614, 0.4190, 0.1113, 0.0965, 0.0723, 0.0697], + device='cuda:3'), in_proj_covar=tensor([0.0694, 0.0671, 0.0752, 0.0670, 0.0756, 0.0645, 0.0655, 0.0729], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:35:48,279 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 03:35:53,018 INFO [train.py:901] (3/4) Epoch 30, batch 2850, loss[loss=0.2225, simple_loss=0.2994, pruned_loss=0.07277, over 8360.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2808, pruned_loss=0.05689, over 1606538.97 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:35:53,088 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237254.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:36:18,914 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.84 vs. limit=2.0 +2023-02-09 03:36:27,596 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.60 vs. limit=2.0 +2023-02-09 03:36:29,227 INFO [train.py:901] (3/4) Epoch 30, batch 2900, loss[loss=0.2433, simple_loss=0.3173, pruned_loss=0.08464, over 7444.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2813, pruned_loss=0.05722, over 1611423.89 frames. ], batch size: 71, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:36:42,593 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.704e+02 2.592e+02 3.021e+02 4.387e+02 8.419e+02, threshold=6.042e+02, percent-clipped=5.0 +2023-02-09 03:37:04,018 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp1.1 from training. Duration: 0.7545625 +2023-02-09 03:37:05,362 INFO [train.py:901] (3/4) Epoch 30, batch 2950, loss[loss=0.2158, simple_loss=0.3094, pruned_loss=0.06105, over 8353.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2809, pruned_loss=0.05658, over 1613641.59 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:15,789 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237369.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:37:36,850 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2815, 3.1483, 2.9730, 1.4347, 2.8913, 2.9292, 2.8328, 2.8276], + device='cuda:3'), covar=tensor([0.1148, 0.0754, 0.1252, 0.4599, 0.1111, 0.1186, 0.1481, 0.0980], + device='cuda:3'), in_proj_covar=tensor([0.0550, 0.0461, 0.0450, 0.0562, 0.0445, 0.0473, 0.0447, 0.0415], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:37:40,341 INFO [train.py:901] (3/4) Epoch 30, batch 3000, loss[loss=0.1815, simple_loss=0.2577, pruned_loss=0.05271, over 7767.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2814, pruned_loss=0.05737, over 1610430.00 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:37:40,341 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 03:37:54,058 INFO [train.py:935] (3/4) Epoch 30, validation: loss=0.1704, simple_loss=0.2697, pruned_loss=0.0356, over 944034.00 frames. +2023-02-09 03:37:54,059 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6543MB +2023-02-09 03:38:07,361 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.369e+02 2.918e+02 3.560e+02 6.316e+02, threshold=5.836e+02, percent-clipped=1.0 +2023-02-09 03:38:31,181 INFO [train.py:901] (3/4) Epoch 30, batch 3050, loss[loss=0.2387, simple_loss=0.3141, pruned_loss=0.08162, over 8324.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2808, pruned_loss=0.05728, over 1606288.78 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 8.0 +2023-02-09 03:39:01,958 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8957, 1.9773, 1.9677, 1.5736, 2.0758, 1.7234, 1.2600, 1.9573], + device='cuda:3'), covar=tensor([0.0549, 0.0386, 0.0280, 0.0564, 0.0370, 0.0704, 0.0786, 0.0362], + device='cuda:3'), in_proj_covar=tensor([0.0483, 0.0422, 0.0376, 0.0467, 0.0403, 0.0562, 0.0409, 0.0446], + device='cuda:3'), out_proj_covar=tensor([1.2778e-04, 1.0912e-04, 9.7950e-05, 1.2182e-04, 1.0531e-04, 1.5642e-04, + 1.0891e-04, 1.1676e-04], device='cuda:3') +2023-02-09 03:39:07,127 INFO [train.py:901] (3/4) Epoch 30, batch 3100, loss[loss=0.1873, simple_loss=0.2709, pruned_loss=0.05187, over 7445.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.28, pruned_loss=0.0572, over 1606049.79 frames. ], batch size: 17, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:39:10,844 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237509.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:39:14,042 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.34 vs. limit=2.0 +2023-02-09 03:39:19,648 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.408e+02 2.429e+02 3.016e+02 3.485e+02 6.483e+02, threshold=6.032e+02, percent-clipped=4.0 +2023-02-09 03:39:20,784 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 03:39:43,980 INFO [train.py:901] (3/4) Epoch 30, batch 3150, loss[loss=0.1971, simple_loss=0.2935, pruned_loss=0.05032, over 8483.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2796, pruned_loss=0.05677, over 1611375.67 frames. ], batch size: 28, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:03,668 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237581.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:40:21,025 INFO [train.py:901] (3/4) Epoch 30, batch 3200, loss[loss=0.1431, simple_loss=0.2207, pruned_loss=0.03279, over 7936.00 frames. ], tot_loss[loss=0.1962, simple_loss=0.2791, pruned_loss=0.05661, over 1613666.68 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:40:33,294 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 2.320e+02 2.861e+02 3.592e+02 8.186e+02, threshold=5.722e+02, percent-clipped=5.0 +2023-02-09 03:40:35,499 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=237625.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:53,394 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=237650.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:40:56,034 INFO [train.py:901] (3/4) Epoch 30, batch 3250, loss[loss=0.2033, simple_loss=0.2845, pruned_loss=0.06108, over 6781.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2811, pruned_loss=0.05712, over 1615734.53 frames. ], batch size: 15, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:32,173 INFO [train.py:901] (3/4) Epoch 30, batch 3300, loss[loss=0.1475, simple_loss=0.2326, pruned_loss=0.0312, over 7428.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2816, pruned_loss=0.05723, over 1616689.00 frames. ], batch size: 17, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:41:44,069 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.38 vs. limit=5.0 +2023-02-09 03:41:45,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.739e+02 2.392e+02 2.907e+02 3.818e+02 6.093e+02, threshold=5.813e+02, percent-clipped=2.0 +2023-02-09 03:42:07,977 INFO [train.py:901] (3/4) Epoch 30, batch 3350, loss[loss=0.2024, simple_loss=0.2888, pruned_loss=0.05795, over 8572.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.2814, pruned_loss=0.0574, over 1616838.98 frames. ], batch size: 31, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:44,179 INFO [train.py:901] (3/4) Epoch 30, batch 3400, loss[loss=0.1671, simple_loss=0.2539, pruned_loss=0.04015, over 8087.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2807, pruned_loss=0.05692, over 1618574.38 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:42:47,135 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237808.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:42:57,411 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.758e+02 2.484e+02 3.245e+02 4.483e+02 9.283e+02, threshold=6.490e+02, percent-clipped=12.0 +2023-02-09 03:43:00,493 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4091, 1.5899, 1.5848, 1.1771, 1.6510, 1.2304, 0.3458, 1.6357], + device='cuda:3'), covar=tensor([0.0615, 0.0460, 0.0388, 0.0600, 0.0549, 0.1177, 0.1103, 0.0321], + device='cuda:3'), in_proj_covar=tensor([0.0486, 0.0425, 0.0380, 0.0470, 0.0406, 0.0566, 0.0410, 0.0449], + device='cuda:3'), out_proj_covar=tensor([1.2850e-04, 1.0992e-04, 9.8809e-05, 1.2263e-04, 1.0614e-04, 1.5748e-04, + 1.0928e-04, 1.1749e-04], device='cuda:3') +2023-02-09 03:43:19,447 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237853.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:43:20,121 INFO [train.py:901] (3/4) Epoch 30, batch 3450, loss[loss=0.155, simple_loss=0.2469, pruned_loss=0.03161, over 8238.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2805, pruned_loss=0.05699, over 1612923.52 frames. ], batch size: 22, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:43:56,083 INFO [train.py:901] (3/4) Epoch 30, batch 3500, loss[loss=0.1973, simple_loss=0.2764, pruned_loss=0.05907, over 7547.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2813, pruned_loss=0.05729, over 1614486.15 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:08,738 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.505e+02 3.010e+02 3.725e+02 8.965e+02, threshold=6.019e+02, percent-clipped=4.0 +2023-02-09 03:44:11,005 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=237925.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:44:11,721 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9200, 1.4879, 3.3178, 1.7276, 2.4183, 3.6290, 3.7385, 3.0864], + device='cuda:3'), covar=tensor([0.1180, 0.1851, 0.0286, 0.1820, 0.1014, 0.0234, 0.0522, 0.0471], + device='cuda:3'), in_proj_covar=tensor([0.0308, 0.0327, 0.0296, 0.0326, 0.0328, 0.0281, 0.0447, 0.0308], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 03:44:14,960 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=237930.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:22,371 WARNING [train.py:1067] (3/4) Exclude cut with ID 453-131332-0000-131866_sp0.9 from training. Duration: 25.3333125 +2023-02-09 03:44:32,809 INFO [train.py:901] (3/4) Epoch 30, batch 3550, loss[loss=0.2041, simple_loss=0.2754, pruned_loss=0.0664, over 8084.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2807, pruned_loss=0.05651, over 1615663.23 frames. ], batch size: 21, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:44:43,166 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=237968.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:44:49,855 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 03:45:09,425 INFO [train.py:901] (3/4) Epoch 30, batch 3600, loss[loss=0.2144, simple_loss=0.3072, pruned_loss=0.06083, over 8241.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2816, pruned_loss=0.05666, over 1615663.78 frames. ], batch size: 48, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:45:22,458 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.286e+02 2.832e+02 3.360e+02 7.556e+02, threshold=5.664e+02, percent-clipped=2.0 +2023-02-09 03:45:35,411 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238040.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:45:40,890 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5664, 4.5644, 4.1420, 2.0995, 4.0357, 4.2320, 4.0847, 4.1033], + device='cuda:3'), covar=tensor([0.0692, 0.0497, 0.0882, 0.4622, 0.0836, 0.1036, 0.1327, 0.0912], + device='cuda:3'), in_proj_covar=tensor([0.0551, 0.0463, 0.0452, 0.0563, 0.0445, 0.0475, 0.0451, 0.0416], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:45:44,900 INFO [train.py:901] (3/4) Epoch 30, batch 3650, loss[loss=0.1907, simple_loss=0.2668, pruned_loss=0.05734, over 7909.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2815, pruned_loss=0.05669, over 1617948.86 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:05,849 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238082.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:12,776 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238092.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:20,737 INFO [train.py:901] (3/4) Epoch 30, batch 3700, loss[loss=0.2354, simple_loss=0.3268, pruned_loss=0.07203, over 8540.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.0567, over 1613229.84 frames. ], batch size: 39, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:29,083 WARNING [train.py:1067] (3/4) Exclude cut with ID 2411-132532-0017-25057_sp1.1 from training. Duration: 0.9681875 +2023-02-09 03:46:33,263 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.392e+02 2.352e+02 3.001e+02 3.686e+02 7.575e+02, threshold=6.003e+02, percent-clipped=3.0 +2023-02-09 03:46:50,315 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238144.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:56,063 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238152.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:46:57,363 INFO [train.py:901] (3/4) Epoch 30, batch 3750, loss[loss=0.1875, simple_loss=0.2727, pruned_loss=0.05119, over 8097.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2815, pruned_loss=0.05701, over 1614748.30 frames. ], batch size: 23, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:46:59,943 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.16 vs. limit=2.0 +2023-02-09 03:47:18,051 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.30 vs. limit=5.0 +2023-02-09 03:47:33,674 INFO [train.py:901] (3/4) Epoch 30, batch 3800, loss[loss=0.1975, simple_loss=0.2689, pruned_loss=0.06307, over 7646.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.05699, over 1614532.54 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:47:46,040 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.427e+02 2.911e+02 3.474e+02 7.215e+02, threshold=5.821e+02, percent-clipped=2.0 +2023-02-09 03:47:47,588 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238224.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:05,484 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238249.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:09,469 INFO [train.py:901] (3/4) Epoch 30, batch 3850, loss[loss=0.1777, simple_loss=0.2556, pruned_loss=0.04995, over 7926.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2814, pruned_loss=0.05666, over 1618307.78 frames. ], batch size: 20, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:17,656 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.14 vs. limit=2.0 +2023-02-09 03:48:18,889 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238267.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:23,698 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238274.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:48:37,703 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585_sp1.1 from training. Duration: 0.836375 +2023-02-09 03:48:39,921 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238296.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:48:45,174 INFO [train.py:901] (3/4) Epoch 30, batch 3900, loss[loss=0.2006, simple_loss=0.2949, pruned_loss=0.05309, over 8462.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2811, pruned_loss=0.05636, over 1616995.13 frames. ], batch size: 25, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:48:57,808 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238321.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:48:58,292 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 2.361e+02 2.887e+02 3.538e+02 6.169e+02, threshold=5.773e+02, percent-clipped=2.0 +2023-02-09 03:49:20,476 INFO [train.py:901] (3/4) Epoch 30, batch 3950, loss[loss=0.1714, simple_loss=0.2442, pruned_loss=0.04927, over 7543.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2815, pruned_loss=0.05665, over 1618094.91 frames. ], batch size: 18, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:49:46,427 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238389.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:49:56,966 INFO [train.py:901] (3/4) Epoch 30, batch 4000, loss[loss=0.1498, simple_loss=0.2297, pruned_loss=0.03493, over 7667.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.281, pruned_loss=0.05632, over 1614658.99 frames. ], batch size: 19, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:01,963 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0221, 1.4734, 1.7144, 1.3473, 1.1054, 1.4340, 1.8276, 1.7446], + device='cuda:3'), covar=tensor([0.0539, 0.1323, 0.1706, 0.1514, 0.0620, 0.1545, 0.0746, 0.0631], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0162, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:3') +2023-02-09 03:50:09,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 2.354e+02 2.920e+02 3.674e+02 8.815e+02, threshold=5.839e+02, percent-clipped=5.0 +2023-02-09 03:50:12,702 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238426.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:20,360 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238436.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:50:21,793 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238438.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:50:28,636 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.0933, 2.4742, 3.8120, 2.0411, 2.1761, 3.7848, 0.9660, 2.4483], + device='cuda:3'), covar=tensor([0.1388, 0.1077, 0.0249, 0.1606, 0.2150, 0.0286, 0.1884, 0.1208], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0206, 0.0139, 0.0225, 0.0279, 0.0149, 0.0175, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 03:50:32,682 INFO [train.py:901] (3/4) Epoch 30, batch 4050, loss[loss=0.1997, simple_loss=0.288, pruned_loss=0.05567, over 8594.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2803, pruned_loss=0.05583, over 1615852.50 frames. ], batch size: 34, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:50:38,563 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0723, 2.2001, 1.8788, 2.5490, 1.4731, 1.7389, 2.0658, 2.1594], + device='cuda:3'), covar=tensor([0.0606, 0.0629, 0.0746, 0.0416, 0.0941, 0.1106, 0.0699, 0.0696], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0193, 0.0243, 0.0214, 0.0201, 0.0245, 0.0248, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 03:50:57,403 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238488.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:09,333 INFO [train.py:901] (3/4) Epoch 30, batch 4100, loss[loss=0.1947, simple_loss=0.2872, pruned_loss=0.05108, over 8252.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2804, pruned_loss=0.05549, over 1614386.33 frames. ], batch size: 24, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:21,789 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.287e+02 2.925e+02 3.934e+02 1.031e+03, threshold=5.850e+02, percent-clipped=7.0 +2023-02-09 03:51:22,761 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238523.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:36,004 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238541.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:41,367 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238548.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:43,289 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238551.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:51:45,010 INFO [train.py:901] (3/4) Epoch 30, batch 4150, loss[loss=0.2131, simple_loss=0.2943, pruned_loss=0.06593, over 8401.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2809, pruned_loss=0.05606, over 1616338.14 frames. ], batch size: 49, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:51:57,586 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6628, 2.0041, 3.0467, 1.5212, 2.2881, 2.1317, 1.7125, 2.4540], + device='cuda:3'), covar=tensor([0.1953, 0.2805, 0.1012, 0.4897, 0.2122, 0.3437, 0.2597, 0.2479], + device='cuda:3'), in_proj_covar=tensor([0.0548, 0.0646, 0.0571, 0.0682, 0.0675, 0.0622, 0.0574, 0.0653], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:52:11,579 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.35 vs. limit=2.0 +2023-02-09 03:52:19,916 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238603.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:20,448 INFO [train.py:901] (3/4) Epoch 30, batch 4200, loss[loss=0.2207, simple_loss=0.2963, pruned_loss=0.07259, over 6900.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2811, pruned_loss=0.05671, over 1611502.93 frames. ], batch size: 71, lr: 2.51e-03, grad_scale: 16.0 +2023-02-09 03:52:30,425 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7735, 1.3470, 2.8961, 1.5033, 2.2293, 3.1029, 3.2584, 2.6711], + device='cuda:3'), covar=tensor([0.1080, 0.1683, 0.0285, 0.1951, 0.0832, 0.0271, 0.0560, 0.0500], + device='cuda:3'), in_proj_covar=tensor([0.0309, 0.0329, 0.0296, 0.0328, 0.0329, 0.0282, 0.0449, 0.0309], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 03:52:33,714 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.657e+02 2.494e+02 3.256e+02 4.447e+02 1.288e+03, threshold=6.511e+02, percent-clipped=8.0 +2023-02-09 03:52:41,527 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0005-9467 from training. Duration: 25.035 +2023-02-09 03:52:50,648 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238645.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:52:56,592 INFO [train.py:901] (3/4) Epoch 30, batch 4250, loss[loss=0.1757, simple_loss=0.2575, pruned_loss=0.04696, over 7923.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2818, pruned_loss=0.05701, over 1612170.31 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:02,698 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.8194, 1.6717, 2.3427, 1.5118, 1.4102, 2.3418, 0.4169, 1.4686], + device='cuda:3'), covar=tensor([0.1401, 0.1110, 0.0321, 0.1055, 0.2054, 0.0335, 0.1744, 0.1057], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0206, 0.0139, 0.0224, 0.0279, 0.0149, 0.0176, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 03:53:05,080 WARNING [train.py:1067] (3/4) Exclude cut with ID 1914-133440-0024-53073_sp0.9 from training. Duration: 25.2444375 +2023-02-09 03:53:07,994 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238670.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:27,925 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238699.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:53:31,133 INFO [train.py:901] (3/4) Epoch 30, batch 4300, loss[loss=0.2145, simple_loss=0.2895, pruned_loss=0.06973, over 7927.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05713, over 1608924.40 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:53:44,805 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.604e+02 2.303e+02 2.743e+02 3.342e+02 6.438e+02, threshold=5.486e+02, percent-clipped=0.0 +2023-02-09 03:54:04,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.3201, 2.1266, 1.7642, 2.0517, 1.8095, 1.5080, 1.7275, 1.7307], + device='cuda:3'), covar=tensor([0.1240, 0.0432, 0.1255, 0.0512, 0.0697, 0.1632, 0.0907, 0.0843], + device='cuda:3'), in_proj_covar=tensor([0.0356, 0.0243, 0.0345, 0.0314, 0.0301, 0.0348, 0.0349, 0.0318], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 03:54:06,886 INFO [train.py:901] (3/4) Epoch 30, batch 4350, loss[loss=0.2235, simple_loss=0.2952, pruned_loss=0.07594, over 8734.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2812, pruned_loss=0.05689, over 1612799.61 frames. ], batch size: 39, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:27,361 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=238782.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:54:36,397 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425_sp0.9 from training. Duration: 28.638875 +2023-02-09 03:54:37,982 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238797.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:42,561 INFO [train.py:901] (3/4) Epoch 30, batch 4400, loss[loss=0.2057, simple_loss=0.2958, pruned_loss=0.05774, over 8099.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2814, pruned_loss=0.05691, over 1607557.57 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:54:44,441 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=4.22 vs. limit=5.0 +2023-02-09 03:54:44,937 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238807.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:54:53,640 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.72 vs. limit=2.0 +2023-02-09 03:54:55,878 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 2.573e+02 3.023e+02 3.983e+02 6.680e+02, threshold=6.046e+02, percent-clipped=2.0 +2023-02-09 03:54:56,114 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238822.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:03,783 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238832.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:15,274 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp0.9 from training. Duration: 33.038875 +2023-02-09 03:55:18,608 INFO [train.py:901] (3/4) Epoch 30, batch 4450, loss[loss=0.1672, simple_loss=0.2429, pruned_loss=0.04578, over 7422.00 frames. ], tot_loss[loss=0.1984, simple_loss=0.282, pruned_loss=0.0574, over 1606355.92 frames. ], batch size: 17, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:55:22,533 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=238859.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:40,874 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=238884.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:55:50,474 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=238897.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:55:55,016 INFO [train.py:901] (3/4) Epoch 30, batch 4500, loss[loss=0.213, simple_loss=0.2892, pruned_loss=0.06839, over 8530.00 frames. ], tot_loss[loss=0.1985, simple_loss=0.282, pruned_loss=0.05747, over 1608243.53 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:07,437 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.647e+02 2.335e+02 2.828e+02 3.474e+02 8.376e+02, threshold=5.656e+02, percent-clipped=3.0 +2023-02-09 03:56:08,191 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983 from training. Duration: 0.83 +2023-02-09 03:56:12,490 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6819, 1.7116, 2.0929, 1.8296, 1.1391, 1.8037, 2.3113, 2.0655], + device='cuda:3'), covar=tensor([0.0481, 0.1151, 0.1557, 0.1323, 0.0585, 0.1364, 0.0599, 0.0607], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:3') +2023-02-09 03:56:29,264 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.47 vs. limit=2.0 +2023-02-09 03:56:30,975 INFO [train.py:901] (3/4) Epoch 30, batch 4550, loss[loss=0.1819, simple_loss=0.2587, pruned_loss=0.05252, over 7711.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2805, pruned_loss=0.05644, over 1605780.77 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:56:31,138 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=238954.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 03:57:06,048 INFO [train.py:901] (3/4) Epoch 30, batch 4600, loss[loss=0.2648, simple_loss=0.3297, pruned_loss=0.09996, over 8563.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.281, pruned_loss=0.05696, over 1607475.60 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:57:19,157 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.347e+02 2.832e+02 3.443e+02 5.144e+02, threshold=5.665e+02, percent-clipped=0.0 +2023-02-09 03:57:26,213 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.2899, 3.2121, 2.9970, 1.6065, 2.9188, 2.9501, 2.8796, 2.8342], + device='cuda:3'), covar=tensor([0.1071, 0.0779, 0.1208, 0.4463, 0.1113, 0.1281, 0.1634, 0.1097], + device='cuda:3'), in_proj_covar=tensor([0.0556, 0.0466, 0.0458, 0.0571, 0.0451, 0.0481, 0.0455, 0.0420], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 03:57:34,217 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239043.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:57:40,145 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.73 vs. limit=5.0 +2023-02-09 03:57:41,730 INFO [train.py:901] (3/4) Epoch 30, batch 4650, loss[loss=0.1869, simple_loss=0.2687, pruned_loss=0.05248, over 8468.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2817, pruned_loss=0.05736, over 1609986.84 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:17,764 INFO [train.py:901] (3/4) Epoch 30, batch 4700, loss[loss=0.2333, simple_loss=0.3169, pruned_loss=0.07485, over 8555.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2805, pruned_loss=0.05668, over 1610034.41 frames. ], batch size: 39, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:25,197 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.0500, 1.2840, 1.1920, 0.6780, 1.2302, 1.0912, 0.0786, 1.2291], + device='cuda:3'), covar=tensor([0.0522, 0.0429, 0.0396, 0.0670, 0.0464, 0.0978, 0.0957, 0.0359], + device='cuda:3'), in_proj_covar=tensor([0.0481, 0.0420, 0.0378, 0.0467, 0.0402, 0.0560, 0.0407, 0.0449], + device='cuda:3'), out_proj_covar=tensor([1.2733e-04, 1.0839e-04, 9.8362e-05, 1.2200e-04, 1.0492e-04, 1.5570e-04, + 1.0844e-04, 1.1746e-04], device='cuda:3') +2023-02-09 03:58:30,952 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.697e+02 2.353e+02 2.866e+02 3.941e+02 8.957e+02, threshold=5.733e+02, percent-clipped=8.0 +2023-02-09 03:58:52,482 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239153.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:58:52,925 INFO [train.py:901] (3/4) Epoch 30, batch 4750, loss[loss=0.2127, simple_loss=0.2901, pruned_loss=0.06768, over 8300.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2809, pruned_loss=0.05695, over 1609275.87 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:58:55,953 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239158.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 03:59:05,580 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.32 vs. limit=5.0 +2023-02-09 03:59:10,889 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239178.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 03:59:12,056 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0003-9465 from training. Duration: 26.8349375 +2023-02-09 03:59:14,172 WARNING [train.py:1067] (3/4) Exclude cut with ID 5622-44585-0006-50425 from training. Duration: 25.775 +2023-02-09 03:59:21,387 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.55 vs. limit=5.0 +2023-02-09 03:59:28,648 INFO [train.py:901] (3/4) Epoch 30, batch 4800, loss[loss=0.2319, simple_loss=0.3098, pruned_loss=0.07704, over 8554.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.281, pruned_loss=0.05688, over 1609459.33 frames. ], batch size: 31, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 03:59:35,684 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5276, 2.8648, 2.3265, 3.9606, 1.5508, 2.0238, 2.6873, 2.8244], + device='cuda:3'), covar=tensor([0.0690, 0.0696, 0.0767, 0.0227, 0.1095, 0.1221, 0.0822, 0.0740], + device='cuda:3'), in_proj_covar=tensor([0.0233, 0.0195, 0.0245, 0.0215, 0.0203, 0.0247, 0.0250, 0.0205], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 03:59:41,696 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.637e+02 2.393e+02 3.010e+02 3.751e+02 7.640e+02, threshold=6.020e+02, percent-clipped=2.0 +2023-02-09 03:59:46,063 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.8409, 2.1829, 3.6148, 2.0244, 1.9040, 3.5666, 0.8121, 2.2567], + device='cuda:3'), covar=tensor([0.1097, 0.1211, 0.0227, 0.1439, 0.2103, 0.0243, 0.1834, 0.1079], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0205, 0.0138, 0.0223, 0.0276, 0.0148, 0.0174, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 04:00:04,582 INFO [train.py:901] (3/4) Epoch 30, batch 4850, loss[loss=0.1966, simple_loss=0.2821, pruned_loss=0.05551, over 8244.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2805, pruned_loss=0.05655, over 1610562.66 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:06,726 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914 from training. Duration: 26.205 +2023-02-09 04:00:36,498 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239298.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:00:40,437 INFO [train.py:901] (3/4) Epoch 30, batch 4900, loss[loss=0.1498, simple_loss=0.2333, pruned_loss=0.03318, over 7440.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2793, pruned_loss=0.05583, over 1611117.55 frames. ], batch size: 17, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:00:53,056 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 2.412e+02 2.818e+02 3.519e+02 1.028e+03, threshold=5.635e+02, percent-clipped=4.0 +2023-02-09 04:01:15,931 INFO [train.py:901] (3/4) Epoch 30, batch 4950, loss[loss=0.2064, simple_loss=0.2897, pruned_loss=0.06157, over 7979.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2787, pruned_loss=0.05571, over 1608818.77 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:43,683 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([3.9099, 3.8412, 3.6047, 1.7836, 3.4760, 3.5349, 3.4910, 3.4156], + device='cuda:3'), covar=tensor([0.0825, 0.0569, 0.0919, 0.4236, 0.1004, 0.1064, 0.1311, 0.0789], + device='cuda:3'), in_proj_covar=tensor([0.0556, 0.0465, 0.0455, 0.0569, 0.0451, 0.0480, 0.0453, 0.0418], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:01:51,711 INFO [train.py:901] (3/4) Epoch 30, batch 5000, loss[loss=0.1895, simple_loss=0.2587, pruned_loss=0.06013, over 7291.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2801, pruned_loss=0.05628, over 1610577.08 frames. ], batch size: 16, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:01:58,093 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=239413.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:01:58,914 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239414.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:05,004 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.597e+02 2.513e+02 3.095e+02 3.810e+02 1.179e+03, threshold=6.190e+02, percent-clipped=9.0 +2023-02-09 04:02:17,683 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239439.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:02:29,040 INFO [train.py:901] (3/4) Epoch 30, batch 5050, loss[loss=0.1778, simple_loss=0.2616, pruned_loss=0.04701, over 7974.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2791, pruned_loss=0.05562, over 1612589.28 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:02:52,720 WARNING [train.py:1067] (3/4) Exclude cut with ID 5239-32139-0047-92994 from training. Duration: 27.14 +2023-02-09 04:03:05,849 INFO [train.py:901] (3/4) Epoch 30, batch 5100, loss[loss=0.2182, simple_loss=0.3119, pruned_loss=0.06222, over 8687.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2787, pruned_loss=0.05499, over 1613317.09 frames. ], batch size: 39, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:08,880 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2337, 1.1078, 1.3271, 0.9971, 1.0071, 1.3086, 0.0820, 0.9196], + device='cuda:3'), covar=tensor([0.1273, 0.1195, 0.0450, 0.0643, 0.2266, 0.0535, 0.1791, 0.1068], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0207, 0.0138, 0.0225, 0.0279, 0.0149, 0.0175, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 04:03:20,035 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.557e+02 2.525e+02 3.230e+02 3.994e+02 1.175e+03, threshold=6.461e+02, percent-clipped=6.0 +2023-02-09 04:03:42,220 INFO [train.py:901] (3/4) Epoch 30, batch 5150, loss[loss=0.1954, simple_loss=0.2762, pruned_loss=0.05732, over 8229.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2798, pruned_loss=0.05554, over 1616218.22 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:03:56,229 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([5.9479, 1.6936, 6.0840, 2.2315, 5.5206, 5.1575, 5.5916, 5.5159], + device='cuda:3'), covar=tensor([0.0446, 0.4994, 0.0371, 0.4046, 0.0966, 0.0916, 0.0489, 0.0505], + device='cuda:3'), in_proj_covar=tensor([0.0699, 0.0671, 0.0755, 0.0671, 0.0757, 0.0648, 0.0660, 0.0733], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:04:18,741 INFO [train.py:901] (3/4) Epoch 30, batch 5200, loss[loss=0.1873, simple_loss=0.2701, pruned_loss=0.05223, over 8234.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2791, pruned_loss=0.05486, over 1610889.76 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:30,968 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.24 vs. limit=2.0 +2023-02-09 04:04:31,919 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.498e+02 2.360e+02 2.794e+02 3.430e+02 1.458e+03, threshold=5.587e+02, percent-clipped=2.0 +2023-02-09 04:04:37,738 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7348, 1.4720, 4.9185, 1.8418, 4.4007, 4.0556, 4.3919, 4.3264], + device='cuda:3'), covar=tensor([0.0505, 0.5171, 0.0416, 0.4441, 0.0959, 0.0951, 0.0560, 0.0612], + device='cuda:3'), in_proj_covar=tensor([0.0699, 0.0671, 0.0756, 0.0671, 0.0756, 0.0646, 0.0660, 0.0733], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:04:44,695 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=239640.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:04:49,567 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7668, 1.7173, 2.3495, 1.4304, 1.4590, 2.2566, 0.4702, 1.4235], + device='cuda:3'), covar=tensor([0.1544, 0.1171, 0.0325, 0.1033, 0.2045, 0.0448, 0.1719, 0.1168], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0207, 0.0138, 0.0224, 0.0278, 0.0148, 0.0174, 0.0201], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 04:04:54,716 INFO [train.py:901] (3/4) Epoch 30, batch 5250, loss[loss=0.1813, simple_loss=0.2767, pruned_loss=0.04295, over 8452.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2792, pruned_loss=0.05502, over 1614293.71 frames. ], batch size: 25, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:04:56,150 WARNING [train.py:1067] (3/4) Exclude cut with ID 7859-102521-0017-21930_sp0.9 from training. Duration: 27.25 +2023-02-09 04:05:05,224 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=239669.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:05:05,366 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.89 vs. limit=5.0 +2023-02-09 04:05:23,283 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=239694.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:05:29,842 INFO [train.py:901] (3/4) Epoch 30, batch 5300, loss[loss=0.224, simple_loss=0.327, pruned_loss=0.06049, over 8488.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2781, pruned_loss=0.05486, over 1606032.99 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:05:43,734 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 2.433e+02 2.937e+02 3.850e+02 7.663e+02, threshold=5.875e+02, percent-clipped=5.0 +2023-02-09 04:06:04,873 INFO [train.py:901] (3/4) Epoch 30, batch 5350, loss[loss=0.1898, simple_loss=0.2659, pruned_loss=0.0568, over 7702.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.279, pruned_loss=0.05545, over 1608677.97 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:06,436 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.0999, 1.4409, 1.7798, 1.3752, 1.0442, 1.5453, 1.9711, 1.6329], + device='cuda:3'), covar=tensor([0.0583, 0.1242, 0.1677, 0.1526, 0.0643, 0.1486, 0.0678, 0.0660], + device='cuda:3'), in_proj_covar=tensor([0.0101, 0.0155, 0.0192, 0.0163, 0.0102, 0.0165, 0.0114, 0.0149], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0008, 0.0008], + device='cuda:3') +2023-02-09 04:06:41,542 INFO [train.py:901] (3/4) Epoch 30, batch 5400, loss[loss=0.2004, simple_loss=0.2917, pruned_loss=0.05458, over 8293.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.28, pruned_loss=0.0557, over 1611625.92 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:06:55,042 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.816e+02 2.371e+02 2.881e+02 3.522e+02 8.420e+02, threshold=5.763e+02, percent-clipped=7.0 +2023-02-09 04:06:57,402 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5800, 1.7331, 4.8354, 1.9383, 4.3347, 4.0799, 4.3247, 4.2801], + device='cuda:3'), covar=tensor([0.0620, 0.4092, 0.0405, 0.3867, 0.0872, 0.0829, 0.0515, 0.0575], + device='cuda:3'), in_proj_covar=tensor([0.0701, 0.0674, 0.0759, 0.0675, 0.0757, 0.0649, 0.0663, 0.0734], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:07:07,694 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.26 vs. limit=2.0 +2023-02-09 04:07:17,655 INFO [train.py:901] (3/4) Epoch 30, batch 5450, loss[loss=0.1778, simple_loss=0.2586, pruned_loss=0.04854, over 7807.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.2805, pruned_loss=0.05631, over 1610895.67 frames. ], batch size: 20, lr: 2.50e-03, grad_scale: 16.0 +2023-02-09 04:07:49,874 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0014-44390_sp0.9 from training. Duration: 31.02225 +2023-02-09 04:07:53,971 INFO [train.py:901] (3/4) Epoch 30, batch 5500, loss[loss=0.2012, simple_loss=0.2955, pruned_loss=0.05344, over 8534.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2802, pruned_loss=0.0564, over 1611243.74 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:08,783 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.536e+02 2.378e+02 3.012e+02 4.037e+02 9.246e+02, threshold=6.023e+02, percent-clipped=5.0 +2023-02-09 04:08:30,323 INFO [train.py:901] (3/4) Epoch 30, batch 5550, loss[loss=0.1748, simple_loss=0.2641, pruned_loss=0.04269, over 8589.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2802, pruned_loss=0.05595, over 1613991.51 frames. ], batch size: 34, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:08:50,822 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=239984.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:09:07,149 INFO [train.py:901] (3/4) Epoch 30, batch 5600, loss[loss=0.1984, simple_loss=0.2867, pruned_loss=0.05508, over 8041.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2813, pruned_loss=0.05656, over 1615989.03 frames. ], batch size: 22, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:21,043 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.553e+02 2.479e+02 2.939e+02 3.472e+02 8.474e+02, threshold=5.878e+02, percent-clipped=2.0 +2023-02-09 04:09:42,584 INFO [train.py:901] (3/4) Epoch 30, batch 5650, loss[loss=0.1981, simple_loss=0.2802, pruned_loss=0.05804, over 8293.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2813, pruned_loss=0.05663, over 1613754.90 frames. ], batch size: 23, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:09:59,637 WARNING [train.py:1067] (3/4) Exclude cut with ID 6758-72288-0033-148662_sp0.9 from training. Duration: 25.988875 +2023-02-09 04:10:14,057 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240099.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:10:16,491 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=3.91 vs. limit=5.0 +2023-02-09 04:10:17,324 INFO [train.py:901] (3/4) Epoch 30, batch 5700, loss[loss=0.2316, simple_loss=0.3121, pruned_loss=0.07555, over 8360.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2806, pruned_loss=0.0564, over 1614751.84 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:10:31,996 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.343e+02 2.506e+02 3.162e+02 4.194e+02 1.225e+03, threshold=6.325e+02, percent-clipped=8.0 +2023-02-09 04:10:53,032 INFO [train.py:901] (3/4) Epoch 30, batch 5750, loss[loss=0.2165, simple_loss=0.2919, pruned_loss=0.07057, over 8081.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2799, pruned_loss=0.05642, over 1609641.98 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:03,865 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.87 vs. limit=2.0 +2023-02-09 04:11:04,186 WARNING [train.py:1067] (3/4) Exclude cut with ID 3972-170212-0014-103914_sp0.9 from training. Duration: 29.1166875 +2023-02-09 04:11:28,581 INFO [train.py:901] (3/4) Epoch 30, batch 5800, loss[loss=0.1553, simple_loss=0.2411, pruned_loss=0.03479, over 6763.00 frames. ], tot_loss[loss=0.1966, simple_loss=0.28, pruned_loss=0.0566, over 1607322.08 frames. ], batch size: 15, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:11:31,036 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.19 vs. limit=2.0 +2023-02-09 04:11:42,443 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 2.400e+02 2.667e+02 3.487e+02 8.848e+02, threshold=5.334e+02, percent-clipped=2.0 +2023-02-09 04:12:04,243 INFO [train.py:901] (3/4) Epoch 30, batch 5850, loss[loss=0.1821, simple_loss=0.2662, pruned_loss=0.04894, over 8341.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05686, over 1612267.36 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:39,817 INFO [train.py:901] (3/4) Epoch 30, batch 5900, loss[loss=0.2209, simple_loss=0.3087, pruned_loss=0.06661, over 8511.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2812, pruned_loss=0.0567, over 1615486.11 frames. ], batch size: 49, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:12:53,707 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 2.330e+02 2.970e+02 3.920e+02 1.059e+03, threshold=5.939e+02, percent-clipped=6.0 +2023-02-09 04:13:08,088 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.4218, 2.9517, 2.3318, 4.1085, 1.7891, 2.2180, 2.7008, 2.8950], + device='cuda:3'), covar=tensor([0.0722, 0.0804, 0.0759, 0.0261, 0.1065, 0.1141, 0.0858, 0.0715], + device='cuda:3'), in_proj_covar=tensor([0.0230, 0.0193, 0.0244, 0.0213, 0.0201, 0.0245, 0.0247, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0005, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 04:13:15,497 INFO [train.py:901] (3/4) Epoch 30, batch 5950, loss[loss=0.2093, simple_loss=0.3073, pruned_loss=0.05558, over 8238.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2809, pruned_loss=0.05633, over 1617643.16 frames. ], batch size: 24, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:16,425 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=240355.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:18,464 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240358.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:33,802 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=240380.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:13:50,584 INFO [train.py:901] (3/4) Epoch 30, batch 6000, loss[loss=0.2028, simple_loss=0.2881, pruned_loss=0.05872, over 8340.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2813, pruned_loss=0.05641, over 1618324.72 frames. ], batch size: 26, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:13:50,585 INFO [train.py:926] (3/4) Computing validation loss +2023-02-09 04:14:04,295 INFO [train.py:935] (3/4) Epoch 30, validation: loss=0.1701, simple_loss=0.2695, pruned_loss=0.03536, over 944034.00 frames. +2023-02-09 04:14:04,296 INFO [train.py:936] (3/4) Maximum memory allocated so far is 6543MB +2023-02-09 04:14:17,955 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.377e+02 3.122e+02 3.554e+02 6.850e+02, threshold=6.243e+02, percent-clipped=2.0 +2023-02-09 04:14:39,390 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6097, 1.3187, 2.4081, 1.4172, 2.2309, 2.5580, 2.7340, 2.1984], + device='cuda:3'), covar=tensor([0.1001, 0.1446, 0.0371, 0.1986, 0.0707, 0.0357, 0.0589, 0.0601], + device='cuda:3'), in_proj_covar=tensor([0.0310, 0.0331, 0.0297, 0.0330, 0.0331, 0.0285, 0.0452, 0.0310], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 04:14:39,922 INFO [train.py:901] (3/4) Epoch 30, batch 6050, loss[loss=0.1867, simple_loss=0.273, pruned_loss=0.05024, over 7976.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.2816, pruned_loss=0.05647, over 1622314.49 frames. ], batch size: 21, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:14:55,006 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.5194, 1.8435, 4.7097, 1.9647, 4.2193, 4.0085, 4.2543, 4.1698], + device='cuda:3'), covar=tensor([0.0611, 0.4403, 0.0501, 0.4432, 0.1002, 0.0896, 0.0656, 0.0669], + device='cuda:3'), in_proj_covar=tensor([0.0704, 0.0674, 0.0759, 0.0677, 0.0761, 0.0650, 0.0662, 0.0738], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:15:16,415 INFO [train.py:901] (3/4) Epoch 30, batch 6100, loss[loss=0.1723, simple_loss=0.2456, pruned_loss=0.04946, over 7692.00 frames. ], tot_loss[loss=0.1976, simple_loss=0.2818, pruned_loss=0.0567, over 1623240.42 frames. ], batch size: 18, lr: 2.50e-03, grad_scale: 8.0 +2023-02-09 04:15:30,263 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.423e+02 2.992e+02 3.767e+02 7.583e+02, threshold=5.983e+02, percent-clipped=4.0 +2023-02-09 04:15:36,682 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=96, metric=1.18 vs. limit=2.0 +2023-02-09 04:15:40,593 WARNING [train.py:1067] (3/4) Exclude cut with ID 3033-130750-0096-107983_sp0.9 from training. Duration: 0.92225 +2023-02-09 04:15:51,549 INFO [train.py:901] (3/4) Epoch 30, batch 6150, loss[loss=0.1648, simple_loss=0.2393, pruned_loss=0.04516, over 7789.00 frames. ], tot_loss[loss=0.1974, simple_loss=0.2812, pruned_loss=0.05675, over 1616824.69 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:22,933 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5693, 1.7953, 1.8534, 1.2473, 1.8908, 1.4908, 0.4553, 1.7912], + device='cuda:3'), covar=tensor([0.0703, 0.0497, 0.0392, 0.0681, 0.0609, 0.1140, 0.1130, 0.0390], + device='cuda:3'), in_proj_covar=tensor([0.0482, 0.0417, 0.0376, 0.0467, 0.0402, 0.0558, 0.0408, 0.0446], + device='cuda:3'), out_proj_covar=tensor([1.2749e-04, 1.0757e-04, 9.7819e-05, 1.2187e-04, 1.0515e-04, 1.5508e-04, + 1.0873e-04, 1.1661e-04], device='cuda:3') +2023-02-09 04:16:28,466 INFO [train.py:901] (3/4) Epoch 30, batch 6200, loss[loss=0.1938, simple_loss=0.2876, pruned_loss=0.04993, over 8254.00 frames. ], tot_loss[loss=0.1968, simple_loss=0.2808, pruned_loss=0.05641, over 1617963.26 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:16:44,278 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 2.533e+02 2.968e+02 3.901e+02 6.917e+02, threshold=5.935e+02, percent-clipped=4.0 +2023-02-09 04:17:05,855 INFO [train.py:901] (3/4) Epoch 30, batch 6250, loss[loss=0.2238, simple_loss=0.3011, pruned_loss=0.07324, over 8367.00 frames. ], tot_loss[loss=0.1969, simple_loss=0.2806, pruned_loss=0.05657, over 1613208.34 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:39,922 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=240702.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:17:41,233 INFO [train.py:901] (3/4) Epoch 30, batch 6300, loss[loss=0.2051, simple_loss=0.2825, pruned_loss=0.06382, over 8545.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2817, pruned_loss=0.05711, over 1613008.76 frames. ], batch size: 49, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:17:54,932 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.569e+02 3.101e+02 4.376e+02 1.063e+03, threshold=6.203e+02, percent-clipped=9.0 +2023-02-09 04:18:10,185 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.3830, 1.5996, 4.5997, 1.6649, 4.1217, 3.8609, 4.1487, 4.0609], + device='cuda:3'), covar=tensor([0.0579, 0.4465, 0.0518, 0.4657, 0.1040, 0.0949, 0.0550, 0.0649], + device='cuda:3'), in_proj_covar=tensor([0.0700, 0.0669, 0.0754, 0.0673, 0.0755, 0.0645, 0.0657, 0.0732], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:18:17,093 INFO [train.py:901] (3/4) Epoch 30, batch 6350, loss[loss=0.1629, simple_loss=0.2487, pruned_loss=0.03852, over 7700.00 frames. ], tot_loss[loss=0.198, simple_loss=0.2819, pruned_loss=0.05707, over 1617349.98 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:18:53,327 INFO [train.py:901] (3/4) Epoch 30, batch 6400, loss[loss=0.1855, simple_loss=0.2659, pruned_loss=0.05251, over 7442.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2809, pruned_loss=0.05654, over 1616043.94 frames. ], batch size: 17, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:19:02,608 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=240817.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:19:07,074 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 2.422e+02 2.800e+02 3.642e+02 5.918e+02, threshold=5.600e+02, percent-clipped=0.0 +2023-02-09 04:19:28,698 INFO [train.py:901] (3/4) Epoch 30, batch 6450, loss[loss=0.193, simple_loss=0.2832, pruned_loss=0.05137, over 8470.00 frames. ], tot_loss[loss=0.1979, simple_loss=0.2815, pruned_loss=0.05719, over 1616119.38 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:03,785 INFO [train.py:901] (3/4) Epoch 30, batch 6500, loss[loss=0.1879, simple_loss=0.269, pruned_loss=0.0534, over 8470.00 frames. ], tot_loss[loss=0.1973, simple_loss=0.281, pruned_loss=0.0568, over 1618683.43 frames. ], batch size: 29, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:20:05,406 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.7019, 1.6841, 4.8978, 1.8006, 4.3765, 4.0826, 4.4088, 4.3250], + device='cuda:3'), covar=tensor([0.0527, 0.4684, 0.0497, 0.4605, 0.0989, 0.0980, 0.0544, 0.0608], + device='cuda:3'), in_proj_covar=tensor([0.0704, 0.0673, 0.0759, 0.0678, 0.0760, 0.0650, 0.0660, 0.0737], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:20:17,923 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 2.582e+02 3.161e+02 3.840e+02 1.025e+03, threshold=6.322e+02, percent-clipped=7.0 +2023-02-09 04:20:36,286 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=240950.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:20:38,870 INFO [train.py:901] (3/4) Epoch 30, batch 6550, loss[loss=0.1788, simple_loss=0.2629, pruned_loss=0.04735, over 7970.00 frames. ], tot_loss[loss=0.1963, simple_loss=0.2799, pruned_loss=0.05634, over 1615316.61 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:00,574 WARNING [train.py:1067] (3/4) Exclude cut with ID 3557-8342-0013-71585 from training. Duration: 0.92 +2023-02-09 04:21:15,887 INFO [train.py:901] (3/4) Epoch 30, batch 6600, loss[loss=0.1981, simple_loss=0.2916, pruned_loss=0.05235, over 8789.00 frames. ], tot_loss[loss=0.1975, simple_loss=0.2813, pruned_loss=0.05684, over 1614215.48 frames. ], batch size: 30, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:21:16,033 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241004.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:21:20,066 WARNING [train.py:1067] (3/4) Exclude cut with ID 4133-6541-0027-26893_sp1.1 from training. Duration: 0.9681875 +2023-02-09 04:21:29,839 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.473e+02 2.233e+02 3.026e+02 3.930e+02 1.368e+03, threshold=6.053e+02, percent-clipped=4.0 +2023-02-09 04:21:51,499 INFO [train.py:901] (3/4) Epoch 30, batch 6650, loss[loss=0.1892, simple_loss=0.2668, pruned_loss=0.05576, over 7932.00 frames. ], tot_loss[loss=0.1964, simple_loss=0.2804, pruned_loss=0.05615, over 1614123.54 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:04,820 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241073.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:23,586 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:22:27,625 INFO [train.py:901] (3/4) Epoch 30, batch 6700, loss[loss=0.2091, simple_loss=0.3011, pruned_loss=0.05851, over 8348.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2795, pruned_loss=0.05545, over 1616632.31 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:22:42,112 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 2.246e+02 2.834e+02 3.422e+02 9.903e+02, threshold=5.667e+02, percent-clipped=4.0 +2023-02-09 04:22:43,744 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7842, 2.5981, 1.8695, 2.4797, 2.3116, 1.6641, 2.1217, 2.2750], + device='cuda:3'), covar=tensor([0.1681, 0.0461, 0.1365, 0.0704, 0.0848, 0.1703, 0.1208, 0.1223], + device='cuda:3'), in_proj_covar=tensor([0.0363, 0.0246, 0.0347, 0.0317, 0.0304, 0.0350, 0.0351, 0.0322], + device='cuda:3'), out_proj_covar=tensor([0.0004, 0.0003, 0.0004, 0.0003, 0.0003, 0.0004, 0.0004, 0.0003], + device='cuda:3') +2023-02-09 04:23:04,036 INFO [train.py:901] (3/4) Epoch 30, batch 6750, loss[loss=0.181, simple_loss=0.2741, pruned_loss=0.04398, over 8337.00 frames. ], tot_loss[loss=0.1944, simple_loss=0.2788, pruned_loss=0.05499, over 1614621.61 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:29,825 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.5993, 2.4260, 3.1619, 2.6200, 3.1483, 2.6284, 2.5003, 1.9693], + device='cuda:3'), covar=tensor([0.5617, 0.5520, 0.2086, 0.4077, 0.2887, 0.3477, 0.1980, 0.5929], + device='cuda:3'), in_proj_covar=tensor([0.0967, 0.1038, 0.0852, 0.1011, 0.1031, 0.0948, 0.0778, 0.0859], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 04:23:39,179 INFO [train.py:901] (3/4) Epoch 30, batch 6800, loss[loss=0.1951, simple_loss=0.2864, pruned_loss=0.05192, over 8481.00 frames. ], tot_loss[loss=0.1946, simple_loss=0.2788, pruned_loss=0.05525, over 1613868.42 frames. ], batch size: 27, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:23:42,683 WARNING [train.py:1067] (3/4) Exclude cut with ID 8291-282929-0024-9607_sp0.9 from training. Duration: 26.438875 +2023-02-09 04:23:53,776 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 2.178e+02 2.682e+02 3.526e+02 7.087e+02, threshold=5.364e+02, percent-clipped=2.0 +2023-02-09 04:24:15,461 INFO [train.py:901] (3/4) Epoch 30, batch 6850, loss[loss=0.2081, simple_loss=0.2963, pruned_loss=0.05991, over 8363.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2797, pruned_loss=0.05535, over 1611120.61 frames. ], batch size: 48, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:24:34,764 WARNING [train.py:1067] (3/4) Exclude cut with ID 2929-85685-0079-61403_sp1.1 from training. Duration: 27.0318125 +2023-02-09 04:24:43,877 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241294.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:24:50,576 INFO [train.py:901] (3/4) Epoch 30, batch 6900, loss[loss=0.1959, simple_loss=0.2739, pruned_loss=0.05897, over 8134.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.28, pruned_loss=0.05536, over 1612849.45 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:24:54,881 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9795, 2.1970, 1.8093, 2.7644, 1.2235, 1.6334, 1.9916, 2.1330], + device='cuda:3'), covar=tensor([0.0745, 0.0712, 0.0924, 0.0346, 0.1051, 0.1303, 0.0820, 0.0785], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0194, 0.0245, 0.0214, 0.0202, 0.0246, 0.0248, 0.0204], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 04:25:05,718 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.527e+02 3.094e+02 3.969e+02 8.004e+02, threshold=6.188e+02, percent-clipped=9.0 +2023-02-09 04:25:12,695 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7929, 1.9086, 1.6915, 2.3521, 0.9728, 1.5207, 1.7446, 1.8932], + device='cuda:3'), covar=tensor([0.0781, 0.0778, 0.0978, 0.0396, 0.1133, 0.1416, 0.0781, 0.0749], + device='cuda:3'), in_proj_covar=tensor([0.0231, 0.0193, 0.0244, 0.0213, 0.0202, 0.0246, 0.0247, 0.0203], + device='cuda:3'), out_proj_covar=tensor([0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0006, 0.0006, 0.0005], + device='cuda:3') +2023-02-09 04:25:22,110 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241348.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:25:26,923 INFO [train.py:901] (3/4) Epoch 30, batch 6950, loss[loss=0.1946, simple_loss=0.2856, pruned_loss=0.05183, over 8486.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2802, pruned_loss=0.0557, over 1614462.05 frames. ], batch size: 28, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:25:46,733 WARNING [train.py:1067] (3/4) Exclude cut with ID 7255-291500-0009-9471_sp0.9 from training. Duration: 26.62775 +2023-02-09 04:25:59,228 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.5338, 1.7780, 2.5738, 1.4354, 1.9703, 1.8582, 1.5494, 1.9953], + device='cuda:3'), covar=tensor([0.2179, 0.2900, 0.1069, 0.4997, 0.2171, 0.3591, 0.2768, 0.2420], + device='cuda:3'), in_proj_covar=tensor([0.0547, 0.0649, 0.0567, 0.0677, 0.0670, 0.0621, 0.0574, 0.0650], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:26:04,048 INFO [train.py:901] (3/4) Epoch 30, batch 7000, loss[loss=0.2576, simple_loss=0.3209, pruned_loss=0.09716, over 7280.00 frames. ], tot_loss[loss=0.1967, simple_loss=0.2809, pruned_loss=0.05623, over 1619112.14 frames. ], batch size: 72, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:07,753 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241409.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:26:17,959 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.728e+02 2.441e+02 2.932e+02 3.651e+02 7.920e+02, threshold=5.865e+02, percent-clipped=3.0 +2023-02-09 04:26:18,115 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241424.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:26:40,277 INFO [train.py:901] (3/4) Epoch 30, batch 7050, loss[loss=0.1984, simple_loss=0.2855, pruned_loss=0.05569, over 8359.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2802, pruned_loss=0.05601, over 1617923.21 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:26:46,795 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241463.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:27:16,741 INFO [train.py:901] (3/4) Epoch 30, batch 7100, loss[loss=0.1723, simple_loss=0.2633, pruned_loss=0.04067, over 8468.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2798, pruned_loss=0.05558, over 1616549.83 frames. ], batch size: 25, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:27:30,715 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 2.381e+02 2.857e+02 3.660e+02 8.579e+02, threshold=5.714e+02, percent-clipped=3.0 +2023-02-09 04:27:51,587 INFO [train.py:901] (3/4) Epoch 30, batch 7150, loss[loss=0.1888, simple_loss=0.266, pruned_loss=0.05575, over 7697.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2796, pruned_loss=0.05523, over 1615767.13 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:28,712 INFO [train.py:901] (3/4) Epoch 30, batch 7200, loss[loss=0.2021, simple_loss=0.2883, pruned_loss=0.05796, over 8336.00 frames. ], tot_loss[loss=0.1954, simple_loss=0.2798, pruned_loss=0.0555, over 1616613.19 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:28:43,479 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.677e+02 2.259e+02 2.765e+02 3.853e+02 1.030e+03, threshold=5.530e+02, percent-clipped=3.0 +2023-02-09 04:28:53,986 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241639.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:03,954 INFO [train.py:901] (3/4) Epoch 30, batch 7250, loss[loss=0.2116, simple_loss=0.2891, pruned_loss=0.06708, over 7922.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2793, pruned_loss=0.05545, over 1615490.98 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:11,981 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241665.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:29:30,390 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241690.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:29:40,026 INFO [train.py:901] (3/4) Epoch 30, batch 7300, loss[loss=0.2152, simple_loss=0.3036, pruned_loss=0.06339, over 8500.00 frames. ], tot_loss[loss=0.1955, simple_loss=0.2795, pruned_loss=0.05575, over 1616377.60 frames. ], batch size: 28, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:29:50,469 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=241719.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:29:53,729 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.652e+02 2.349e+02 2.997e+02 3.899e+02 6.597e+02, threshold=5.994e+02, percent-clipped=5.0 +2023-02-09 04:29:59,160 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.9419, 1.4699, 1.8230, 1.4510, 0.9509, 1.5836, 1.7312, 1.6752], + device='cuda:3'), covar=tensor([0.0573, 0.1211, 0.1617, 0.1432, 0.0594, 0.1400, 0.0694, 0.0658], + device='cuda:3'), in_proj_covar=tensor([0.0100, 0.0154, 0.0191, 0.0162, 0.0102, 0.0163, 0.0113, 0.0148], + device='cuda:3'), out_proj_covar=tensor([0.0007, 0.0009, 0.0010, 0.0010, 0.0006, 0.0010, 0.0007, 0.0008], + device='cuda:3') +2023-02-09 04:30:08,890 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=241744.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:15,563 INFO [train.py:901] (3/4) Epoch 30, batch 7350, loss[loss=0.1908, simple_loss=0.2868, pruned_loss=0.0474, over 8497.00 frames. ], tot_loss[loss=0.1958, simple_loss=0.2798, pruned_loss=0.05595, over 1611553.46 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:25,405 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241768.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:30:39,735 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0026-44402_sp0.9 from training. Duration: 25.061125 +2023-02-09 04:30:51,553 INFO [train.py:901] (3/4) Epoch 30, batch 7400, loss[loss=0.1923, simple_loss=0.2726, pruned_loss=0.05601, over 8345.00 frames. ], tot_loss[loss=0.1957, simple_loss=0.2795, pruned_loss=0.05598, over 1610774.10 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:30:59,775 WARNING [train.py:1067] (3/4) Exclude cut with ID 774-127930-0014-48411_sp1.1 from training. Duration: 0.95 +2023-02-09 04:31:00,900 INFO [scaling.py:679] (3/4) Whitening: num_groups=1, num_channels=256, metric=2.70 vs. limit=5.0 +2023-02-09 04:31:04,083 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241821.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:31:05,934 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.776e+02 2.497e+02 3.037e+02 3.880e+02 5.984e+02, threshold=6.074e+02, percent-clipped=0.0 +2023-02-09 04:31:24,332 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=241849.0, num_to_drop=1, layers_to_drop={1} +2023-02-09 04:31:27,625 INFO [train.py:901] (3/4) Epoch 30, batch 7450, loss[loss=0.1857, simple_loss=0.2856, pruned_loss=0.04294, over 8547.00 frames. ], tot_loss[loss=0.1952, simple_loss=0.2792, pruned_loss=0.05555, over 1607679.47 frames. ], batch size: 31, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:31:28,705 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([2.6416, 2.4641, 3.1727, 2.6315, 3.2009, 2.7010, 2.6026, 2.1064], + device='cuda:3'), covar=tensor([0.5553, 0.5307, 0.2126, 0.4183, 0.2608, 0.3124, 0.1816, 0.5604], + device='cuda:3'), in_proj_covar=tensor([0.0967, 0.1033, 0.0850, 0.1009, 0.1030, 0.0944, 0.0778, 0.0858], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0001, 0.0002], + device='cuda:3') +2023-02-09 04:31:40,211 WARNING [train.py:1067] (3/4) Exclude cut with ID 7699-105389-0094-102071_sp0.9 from training. Duration: 26.6166875 +2023-02-09 04:31:48,184 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=241883.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:32:02,786 INFO [train.py:901] (3/4) Epoch 30, batch 7500, loss[loss=0.1625, simple_loss=0.2515, pruned_loss=0.03672, over 8252.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2787, pruned_loss=0.05562, over 1605900.63 frames. ], batch size: 24, lr: 2.49e-03, grad_scale: 16.0 +2023-02-09 04:32:18,960 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 2.491e+02 2.852e+02 3.531e+02 9.058e+02, threshold=5.704e+02, percent-clipped=2.0 +2023-02-09 04:32:37,298 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.1446, 1.4802, 4.3294, 1.7249, 3.8563, 3.6588, 3.9403, 3.8450], + device='cuda:3'), covar=tensor([0.0708, 0.4659, 0.0598, 0.4390, 0.1076, 0.0950, 0.0656, 0.0727], + device='cuda:3'), in_proj_covar=tensor([0.0700, 0.0673, 0.0759, 0.0677, 0.0761, 0.0648, 0.0660, 0.0736], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:32:39,981 INFO [train.py:901] (3/4) Epoch 30, batch 7550, loss[loss=0.2197, simple_loss=0.3018, pruned_loss=0.06878, over 6819.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.2801, pruned_loss=0.05649, over 1606327.55 frames. ], batch size: 15, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:32:43,672 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([4.2123, 4.1826, 3.8310, 1.9511, 3.7412, 3.8329, 3.6303, 3.7378], + device='cuda:3'), covar=tensor([0.0757, 0.0587, 0.1094, 0.4385, 0.0939, 0.1092, 0.1383, 0.0746], + device='cuda:3'), in_proj_covar=tensor([0.0552, 0.0460, 0.0453, 0.0563, 0.0449, 0.0477, 0.0450, 0.0416], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:33:01,519 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=241983.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:33:17,233 INFO [train.py:901] (3/4) Epoch 30, batch 7600, loss[loss=0.2123, simple_loss=0.2965, pruned_loss=0.06403, over 8197.00 frames. ], tot_loss[loss=0.1961, simple_loss=0.2798, pruned_loss=0.05622, over 1605545.95 frames. ], batch size: 23, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:33:32,889 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.614e+02 2.414e+02 3.070e+02 3.745e+02 6.631e+02, threshold=6.140e+02, percent-clipped=3.0 +2023-02-09 04:33:45,823 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.7400, 2.0279, 2.8272, 1.5646, 2.2445, 2.0503, 1.8047, 2.1702], + device='cuda:3'), covar=tensor([0.1925, 0.2627, 0.0953, 0.4581, 0.2026, 0.3278, 0.2556, 0.2517], + device='cuda:3'), in_proj_covar=tensor([0.0546, 0.0649, 0.0566, 0.0678, 0.0670, 0.0620, 0.0573, 0.0648], + device='cuda:3'), out_proj_covar=tensor([0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0003, 0.0002, 0.0002], + device='cuda:3') +2023-02-09 04:33:54,462 INFO [train.py:901] (3/4) Epoch 30, batch 7650, loss[loss=0.2373, simple_loss=0.3154, pruned_loss=0.07961, over 8495.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2795, pruned_loss=0.05625, over 1607544.06 frames. ], batch size: 26, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:25,726 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242098.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:34:29,809 INFO [train.py:901] (3/4) Epoch 30, batch 7700, loss[loss=0.167, simple_loss=0.2596, pruned_loss=0.03722, over 7980.00 frames. ], tot_loss[loss=0.1971, simple_loss=0.2803, pruned_loss=0.05701, over 1610601.01 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:34:44,312 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.431e+02 3.028e+02 3.722e+02 6.918e+02, threshold=6.057e+02, percent-clipped=1.0 +2023-02-09 04:34:54,343 WARNING [train.py:1067] (3/4) Exclude cut with ID 7357-94126-0009-44385_sp0.9 from training. Duration: 27.02225 +2023-02-09 04:34:55,277 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242139.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:05,988 INFO [train.py:901] (3/4) Epoch 30, batch 7750, loss[loss=0.2255, simple_loss=0.3071, pruned_loss=0.07194, over 8533.00 frames. ], tot_loss[loss=0.1965, simple_loss=0.28, pruned_loss=0.05647, over 1613582.10 frames. ], batch size: 49, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:13,195 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242164.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:13,204 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.4295, 1.3946, 1.7398, 1.1181, 1.0679, 1.6957, 0.2922, 1.1704], + device='cuda:3'), covar=tensor([0.1343, 0.1040, 0.0400, 0.0851, 0.2380, 0.0502, 0.1676, 0.1192], + device='cuda:3'), in_proj_covar=tensor([0.0203, 0.0206, 0.0138, 0.0223, 0.0277, 0.0149, 0.0175, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 04:35:13,774 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242165.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:18,596 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242171.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:35:20,781 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6687, 1.2632, 3.1282, 1.4335, 2.3241, 3.3579, 3.5807, 2.8970], + device='cuda:3'), covar=tensor([0.1380, 0.2105, 0.0349, 0.2338, 0.1023, 0.0280, 0.0516, 0.0537], + device='cuda:3'), in_proj_covar=tensor([0.0311, 0.0331, 0.0298, 0.0330, 0.0332, 0.0287, 0.0455, 0.0311], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0003, 0.0003, 0.0003, 0.0002, 0.0004, 0.0002], + device='cuda:3') +2023-02-09 04:35:34,906 INFO [zipformer.py:1185] (3/4) warmup_begin=666.7, warmup_end=1333.3, batch_count=242193.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:35:42,648 INFO [train.py:901] (3/4) Epoch 30, batch 7800, loss[loss=0.1769, simple_loss=0.2656, pruned_loss=0.04412, over 7790.00 frames. ], tot_loss[loss=0.1977, simple_loss=0.2815, pruned_loss=0.0569, over 1617293.46 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:35:57,866 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.396e+02 3.014e+02 3.960e+02 8.063e+02, threshold=6.029e+02, percent-clipped=4.0 +2023-02-09 04:36:11,177 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242244.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:18,001 INFO [train.py:901] (3/4) Epoch 30, batch 7850, loss[loss=0.1688, simple_loss=0.2436, pruned_loss=0.04695, over 7803.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2807, pruned_loss=0.05683, over 1615410.31 frames. ], batch size: 19, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:36,021 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242280.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:36:52,298 INFO [train.py:901] (3/4) Epoch 30, batch 7900, loss[loss=0.1495, simple_loss=0.2413, pruned_loss=0.02886, over 7809.00 frames. ], tot_loss[loss=0.1972, simple_loss=0.2808, pruned_loss=0.05677, over 1619948.01 frames. ], batch size: 20, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:36:54,985 INFO [zipformer.py:1185] (3/4) warmup_begin=2000.0, warmup_end=2666.7, batch_count=242308.0, num_to_drop=1, layers_to_drop={0} +2023-02-09 04:37:06,486 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.670e+02 2.359e+02 2.894e+02 3.889e+02 1.272e+03, threshold=5.788e+02, percent-clipped=10.0 +2023-02-09 04:37:08,044 INFO [zipformer.py:1185] (3/4) warmup_begin=1333.3, warmup_end=2000.0, batch_count=242327.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:26,030 INFO [scaling.py:679] (3/4) Whitening: num_groups=8, num_channels=192, metric=1.69 vs. limit=2.0 +2023-02-09 04:37:26,303 INFO [train.py:901] (3/4) Epoch 30, batch 7950, loss[loss=0.1834, simple_loss=0.2704, pruned_loss=0.04819, over 7966.00 frames. ], tot_loss[loss=0.197, simple_loss=0.2807, pruned_loss=0.05667, over 1616912.76 frames. ], batch size: 21, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:37:26,516 INFO [zipformer.py:1185] (3/4) warmup_begin=3333.3, warmup_end=4000.0, batch_count=242354.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:43,577 INFO [zipformer.py:1185] (3/4) warmup_begin=2666.7, warmup_end=3333.3, batch_count=242379.0, num_to_drop=0, layers_to_drop=set() +2023-02-09 04:37:53,732 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.6552, 1.8466, 1.8871, 1.3756, 1.9712, 1.4749, 0.4562, 1.8274], + device='cuda:3'), covar=tensor([0.0534, 0.0378, 0.0325, 0.0565, 0.0477, 0.0981, 0.0994, 0.0301], + device='cuda:3'), in_proj_covar=tensor([0.0484, 0.0422, 0.0378, 0.0471, 0.0407, 0.0564, 0.0409, 0.0449], + device='cuda:3'), out_proj_covar=tensor([1.2803e-04, 1.0876e-04, 9.8259e-05, 1.2281e-04, 1.0642e-04, 1.5677e-04, + 1.0904e-04, 1.1734e-04], device='cuda:3') +2023-02-09 04:38:00,610 INFO [train.py:901] (3/4) Epoch 30, batch 8000, loss[loss=0.1622, simple_loss=0.2527, pruned_loss=0.03586, over 8136.00 frames. ], tot_loss[loss=0.1956, simple_loss=0.2795, pruned_loss=0.05588, over 1613207.57 frames. ], batch size: 22, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:14,884 INFO [optim.py:369] (3/4) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.478e+02 2.968e+02 3.707e+02 1.083e+03, threshold=5.936e+02, percent-clipped=5.0 +2023-02-09 04:38:35,242 INFO [train.py:901] (3/4) Epoch 30, batch 8050, loss[loss=0.1759, simple_loss=0.2512, pruned_loss=0.05032, over 7538.00 frames. ], tot_loss[loss=0.196, simple_loss=0.2795, pruned_loss=0.05624, over 1602405.21 frames. ], batch size: 18, lr: 2.49e-03, grad_scale: 8.0 +2023-02-09 04:38:53,666 INFO [zipformer.py:2431] (3/4) attn_weights_entropy = tensor([1.2339, 1.0619, 1.3155, 1.0268, 1.0167, 1.3212, 0.1414, 0.9189], + device='cuda:3'), covar=tensor([0.1365, 0.1272, 0.0495, 0.0674, 0.2424, 0.0570, 0.1781, 0.1134], + device='cuda:3'), in_proj_covar=tensor([0.0204, 0.0207, 0.0139, 0.0224, 0.0278, 0.0150, 0.0175, 0.0200], + device='cuda:3'), out_proj_covar=tensor([0.0003, 0.0003, 0.0002, 0.0003, 0.0004, 0.0002, 0.0003, 0.0003], + device='cuda:3') +2023-02-09 04:38:58,180 INFO [train.py:1165] (3/4) Done! diff --git a/log/modified_beam_search/errs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/errs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4b7e49ba36a375dfab5e9a15da5011ef05a24c2 --- /dev/null +++ b/log/modified_beam_search/errs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,12973 @@ +%WER = 3.88 +Errors: 255 insertions, 160 deletions, 1623 substitutions, over 52576 reference words (50793 correct) +Search below for sections starting with PER-UTT DETAILS:, SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS: + +PER-UTT DETAILS: corr or (ref->hyp) +1089-134686-0000-1733: HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED (FLOUR FATTENED->FLOWER FAT AND) SAUCE +1089-134686-0001-1734: STUFF IT INTO YOU HIS BELLY COUNSELLED HIM +1089-134686-0002-1735: AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE (BROTHELS->BRAFFLELS) +1089-134686-0003-1736: (HELLO->HALLO) BERTIE ANY GOOD IN YOUR MIND +1089-134686-0004-1737: NUMBER TEN FRESH (NELLY IS->NELLIERS) WAITING ON YOU GOOD NIGHT HUSBAND +1089-134686-0005-1738: THE MUSIC CAME NEARER AND HE RECALLED THE WORDS THE WORDS OF SHELLEY'S FRAGMENT UPON THE MOON WANDERING COMPANIONLESS PALE FOR WEARINESS +1089-134686-0006-1739: THE DULL LIGHT FELL MORE FAINTLY UPON THE PAGE WHEREON ANOTHER EQUATION BEGAN TO UNFOLD ITSELF SLOWLY AND TO SPREAD ABROAD ITS WIDENING TAIL +1089-134686-0007-1740: A COLD LUCID INDIFFERENCE REIGNED IN HIS SOUL +1089-134686-0008-1741: THE CHAOS IN WHICH HIS ARDOUR EXTINGUISHED ITSELF WAS A COLD INDIFFERENT KNOWLEDGE OF HIMSELF +1089-134686-0009-1742: AT MOST BY AN ALMS GIVEN TO A BEGGAR WHOSE BLESSING HE FLED FROM HE MIGHT HOPE WEARILY TO WIN FOR HIMSELF SOME MEASURE OF ACTUAL GRACE +1089-134686-0010-1743: WELL NOW ENNIS I DECLARE YOU HAVE A HEAD AND SO HAS MY STICK +1089-134686-0011-1744: ON SATURDAY MORNINGS WHEN THE SODALITY MET IN THE CHAPEL TO RECITE THE LITTLE OFFICE HIS PLACE WAS A CUSHIONED KNEELING DESK AT THE RIGHT OF THE ALTAR FROM WHICH HE LED HIS WING OF BOYS THROUGH THE RESPONSES +1089-134686-0012-1745: HER EYES SEEMED TO REGARD HIM WITH MILD PITY HER HOLINESS A STRANGE LIGHT GLOWING FAINTLY UPON HER FRAIL FLESH DID NOT HUMILIATE THE SINNER WHO APPROACHED HER +1089-134686-0013-1746: IF EVER HE WAS IMPELLED TO CAST SIN FROM HIM AND TO REPENT THE IMPULSE THAT MOVED HIM WAS THE WISH TO BE HER KNIGHT +1089-134686-0014-1747: HE TRIED TO THINK HOW IT COULD BE +1089-134686-0015-1748: BUT THE DUSK DEEPENING IN THE SCHOOLROOM COVERED OVER HIS THOUGHTS THE BELL RANG +1089-134686-0016-1749: THEN YOU CAN ASK HIM QUESTIONS ON THE CATECHISM DEDALUS +1089-134686-0017-1750: STEPHEN LEANING BACK AND DRAWING IDLY ON HIS SCRIBBLER LISTENED TO THE TALK ABOUT HIM WHICH HERON CHECKED FROM TIME TO TIME BY SAYING +1089-134686-0018-1751: IT WAS STRANGE TOO THAT HE FOUND AN ARID PLEASURE IN FOLLOWING UP TO THE END THE RIGID LINES OF THE DOCTRINES OF THE CHURCH AND PENETRATING INTO OBSCURE SILENCES ONLY TO HEAR AND FEEL THE MORE DEEPLY HIS OWN CONDEMNATION +1089-134686-0019-1752: THE SENTENCE OF SAINT JAMES WHICH SAYS THAT HE WHO OFFENDS AGAINST ONE COMMANDMENT BECOMES GUILTY OF ALL HAD SEEMED TO HIM FIRST A SWOLLEN PHRASE UNTIL HE HAD BEGUN TO GROPE IN THE DARKNESS OF HIS OWN STATE +1089-134686-0020-1753: IF A MAN HAD STOLEN A POUND IN HIS YOUTH AND HAD USED THAT POUND TO AMASS A HUGE FORTUNE HOW MUCH WAS HE OBLIGED TO GIVE BACK THE POUND HE HAD STOLEN ONLY (OR->WERE) THE POUND TOGETHER WITH THE COMPOUND INTEREST ACCRUING UPON IT OR ALL HIS HUGE FORTUNE +1089-134686-0021-1754: IF A LAYMAN IN GIVING BAPTISM POUR THE WATER BEFORE SAYING THE WORDS IS THE CHILD BAPTIZED +1089-134686-0022-1755: HOW COMES IT THAT WHILE THE FIRST (BEATITUDE->BE ATTITUDE) PROMISES THE KINGDOM OF HEAVEN TO THE POOR OF HEART THE SECOND (BEATITUDE->BE ATTITUDE) PROMISES ALSO TO THE MEEK THAT THEY SHALL POSSESS THE LAND +1089-134686-0023-1756: WHY WAS THE SACRAMENT OF THE EUCHARIST INSTITUTED UNDER THE TWO SPECIES OF BREAD AND WINE IF JESUS CHRIST BE PRESENT BODY AND BLOOD SOUL AND DIVINITY IN THE BREAD ALONE AND IN THE WINE ALONE +1089-134686-0024-1757: IF THE WINE CHANGE INTO VINEGAR AND THE HOST CRUMBLE INTO CORRUPTION AFTER THEY HAVE BEEN CONSECRATED IS JESUS CHRIST STILL PRESENT UNDER THEIR SPECIES AS GOD AND AS MAN +1089-134686-0025-1758: A GENTLE KICK FROM THE TALL BOY IN THE BENCH BEHIND URGED STEPHEN TO ASK A DIFFICULT QUESTION +1089-134686-0026-1759: THE RECTOR DID NOT ASK FOR A CATECHISM TO HEAR THE LESSON FROM +1089-134686-0027-1760: HE CLASPED HIS HANDS ON THE DESK AND SAID +1089-134686-0028-1761: THE RETREAT WILL BEGIN ON WEDNESDAY AFTERNOON IN (HONOUR->HONOR) OF SAINT FRANCIS (XAVIER->SAVIER) WHOSE FEAST DAY IS SATURDAY +1089-134686-0029-1762: ON FRIDAY CONFESSION WILL BE HEARD ALL THE AFTERNOON AFTER BEADS +1089-134686-0030-1763: BEWARE OF MAKING THAT MISTAKE +1089-134686-0031-1764: STEPHEN'S HEART BEGAN SLOWLY TO FOLD AND FADE WITH FEAR LIKE A WITHERING FLOWER +1089-134686-0032-1765: HE IS CALLED AS YOU KNOW THE APOSTLE OF THE INDIES +1089-134686-0033-1766: A GREAT SAINT SAINT FRANCIS (XAVIER->ZEVIR) +1089-134686-0034-1767: THE RECTOR PAUSED AND THEN SHAKING HIS CLASPED HANDS BEFORE HIM WENT ON +1089-134686-0035-1768: HE HAD THE FAITH IN HIM THAT MOVES MOUNTAINS +1089-134686-0036-1769: A GREAT SAINT SAINT FRANCIS (XAVIER->ZEVIER) +1089-134686-0037-1770: IN THE SILENCE THEIR DARK FIRE KINDLED THE DUSK INTO A TAWNY GLOW +1089-134691-0000-1707: HE COULD WAIT NO LONGER +1089-134691-0001-1708: FOR A FULL HOUR HE HAD PACED UP AND DOWN WAITING BUT HE COULD WAIT NO LONGER +1089-134691-0002-1709: HE SET OFF ABRUPTLY FOR THE BULL WALKING RAPIDLY LEST HIS FATHER'S SHRILL WHISTLE MIGHT CALL HIM BACK AND IN A FEW MOMENTS HE HAD ROUNDED THE CURVE AT THE POLICE BARRACK AND WAS SAFE +1089-134691-0003-1710: THE UNIVERSITY +1089-134691-0004-1711: PRIDE AFTER SATISFACTION UPLIFTED HIM LIKE LONG SLOW WAVES +1089-134691-0005-1712: WHOSE FEET ARE AS THE FEET OF (HARTS->HEARTS) AND UNDERNEATH THE EVERLASTING ARMS +1089-134691-0006-1713: THE PRIDE OF THAT DIM IMAGE BROUGHT BACK TO HIS MIND THE DIGNITY OF THE OFFICE HE HAD REFUSED +1089-134691-0007-1714: SOON THE WHOLE BRIDGE WAS TREMBLING AND RESOUNDING +1089-134691-0008-1715: THE UNCOUTH FACES PASSED HIM TWO BY TWO STAINED YELLOW OR RED OR LIVID BY THE SEA AND AS HE STROVE TO LOOK AT THEM WITH EASE AND INDIFFERENCE A FAINT STAIN OF PERSONAL SHAME AND COMMISERATION ROSE TO HIS OWN FACE +1089-134691-0009-1716: ANGRY WITH HIMSELF HE TRIED TO HIDE HIS FACE FROM THEIR EYES BY GAZING DOWN SIDEWAYS INTO THE SHALLOW SWIRLING WATER UNDER THE BRIDGE BUT HE STILL SAW A REFLECTION THEREIN OF THEIR TOP HEAVY SILK HATS AND HUMBLE TAPE LIKE COLLARS AND LOOSELY HANGING CLERICAL CLOTHES BROTHER HICKEY +1089-134691-0010-1717: BROTHER (MAC ARDLE->MC CARDLE) BROTHER (KEOGH->KIOPH) +1089-134691-0011-1718: THEIR PIETY WOULD BE LIKE THEIR NAMES LIKE THEIR FACES LIKE THEIR CLOTHES AND (IT->*) WAS IDLE FOR HIM TO TELL HIMSELF THAT THEIR HUMBLE AND CONTRITE HEARTS IT MIGHT BE PAID A FAR RICHER TRIBUTE OF DEVOTION THAN HIS HAD EVER BEEN A GIFT TENFOLD MORE ACCEPTABLE THAN HIS ELABORATE ADORATION +1089-134691-0012-1719: IT WAS IDLE FOR HIM TO MOVE HIMSELF TO BE GENEROUS TOWARDS THEM TO TELL HIMSELF THAT IF HE EVER CAME TO THEIR GATES STRIPPED OF HIS PRIDE BEATEN AND IN (BEGGAR'S->BEGGARS) WEEDS THAT THEY WOULD BE GENEROUS TOWARDS HIM LOVING HIM AS THEMSELVES +1089-134691-0013-1720: IDLE AND EMBITTERING FINALLY TO ARGUE AGAINST HIS OWN DISPASSIONATE CERTITUDE THAT THE COMMANDMENT OF LOVE BADE US NOT TO LOVE OUR NEIGHBOUR AS OURSELVES WITH THE SAME AMOUNT AND INTENSITY OF LOVE BUT TO LOVE HIM AS OURSELVES WITH THE SAME KIND OF LOVE +1089-134691-0014-1721: THE PHRASE AND THE DAY AND THE SCENE HARMONIZED IN (A CHORD->ACCORD) +1089-134691-0015-1722: WORDS WAS IT THEIR (COLOURS->COLORS) +1089-134691-0016-1723: THEY WERE VOYAGING ACROSS THE DESERTS OF THE SKY A HOST OF NOMADS ON THE MARCH VOYAGING HIGH OVER IRELAND WESTWARD BOUND +1089-134691-0017-1724: THE EUROPE THEY HAD COME FROM LAY OUT THERE BEYOND THE IRISH SEA EUROPE OF STRANGE TONGUES AND (VALLEYED->VALID) AND (WOODBEGIRT->WOULD BE GIRT) AND (CITADELLED->CITADELED) AND OF ENTRENCHED AND (MARSHALLED->MARSHALED) RACES +1089-134691-0018-1725: AGAIN AGAIN +1089-134691-0019-1726: A VOICE FROM BEYOND THE WORLD WAS CALLING +1089-134691-0020-1727: HELLO (STEPHANOS->STUFFANOS) HERE COMES THE (DEDALUS->DAEDALUS) +1089-134691-0021-1728: THEIR DIVING STONE POISED ON ITS RUDE SUPPORTS AND ROCKING UNDER THEIR PLUNGES AND THE ROUGH HEWN STONES OF THE SLOPING (BREAKWATER->BRAKE WATER) OVER WHICH THEY SCRAMBLED IN THEIR (HORSEPLAY->HORSE PLAY) GLEAMED WITH COLD WET LUSTRE +1089-134691-0022-1729: HE STOOD STILL IN DEFERENCE TO THEIR CALLS AND PARRIED THEIR BANTER WITH EASY WORDS +1089-134691-0023-1730: IT WAS A PAIN TO SEE THEM AND A SWORD LIKE PAIN TO SEE THE SIGNS OF ADOLESCENCE THAT MADE REPELLENT THEIR PITIABLE NAKEDNESS +1089-134691-0024-1731: (STEPHANOS DEDALOS->STEPHANO'S DEAD LOSS) +1089-134691-0025-1732: A MOMENT BEFORE THE GHOST OF THE ANCIENT KINGDOM OF THE DANES HAD LOOKED FORTH THROUGH THE VESTURE OF THE (HAZEWRAPPED->HAYES WRAPPED) CITY +1188-133604-0000-1771: YOU WILL FIND ME CONTINUALLY SPEAKING OF FOUR MEN TITIAN (HOLBEIN->HOLBINE) TURNER AND (TINTORET->TINTARETTE) IN ALMOST THE SAME TERMS +1188-133604-0001-1772: (THEY->THE) UNITE EVERY QUALITY AND SOMETIMES YOU WILL FIND ME REFERRING TO THEM AS COLORISTS SOMETIMES AS (CHIAROSCURISTS->KIARASCURISTS) +1188-133604-0002-1773: BY BEING STUDIOUS OF COLOR THEY ARE STUDIOUS OF DIVISION AND WHILE THE (CHIAROSCURIST->CURE SCURUS) DEVOTES HIMSELF TO THE REPRESENTATION OF DEGREES OF FORCE IN ONE THING (UNSEPARATED->ON SEPARATED) LIGHT THE COLORISTS HAVE FOR THEIR FUNCTION THE ATTAINMENT OF BEAUTY BY ARRANGEMENT OF THE DIVISIONS OF LIGHT +1188-133604-0003-1774: MY FIRST AND PRINCIPAL REASON WAS THAT THEY ENFORCED BEYOND ALL RESISTANCE ON ANY STUDENT WHO MIGHT ATTEMPT TO COPY THEM THIS METHOD OF LAYING PORTIONS OF DISTINCT HUE SIDE BY SIDE +1188-133604-0004-1775: SOME OF THE TOUCHES INDEED WHEN THE TINT HAS BEEN MIXED WITH MUCH WATER HAVE BEEN LAID IN LITTLE DROPS OR PONDS SO THAT THE PIGMENT MIGHT CRYSTALLIZE HARD AT THE EDGE +1188-133604-0005-1776: IT IS THE HEAD OF A PARROT WITH A LITTLE FLOWER IN HIS BEAK FROM A PICTURE OF (CARPACCIO'S->CARPATCHIO'S) ONE OF HIS SERIES OF THE LIFE OF SAINT GEORGE +1188-133604-0006-1777: THEN HE COMES TO THE BEAK OF IT +1188-133604-0007-1778: THE BROWN GROUND BENEATH IS LEFT FOR THE MOST PART ONE TOUCH OF BLACK IS PUT FOR THE HOLLOW (TWO->TOO) DELICATE LINES OF DARK (GRAY DEFINE->GREY TO FIND) THE OUTER CURVE AND ONE LITTLE QUIVERING TOUCH OF WHITE DRAWS THE INNER EDGE OF THE MANDIBLE +1188-133604-0008-1779: FOR BELIEVE ME THE FINAL PHILOSOPHY OF ART CAN ONLY RATIFY THEIR OPINION THAT THE BEAUTY OF A COCK ROBIN IS TO BE (RED->READ) AND OF A GRASS PLOT TO BE GREEN AND THE BEST SKILL OF ART IS (IN->AN) INSTANTLY SEIZING ON THE MANIFOLD DELICIOUSNESS OF LIGHT WHICH YOU CAN ONLY SEIZE BY PRECISION OF INSTANTANEOUS TOUCH +1188-133604-0009-1780: NOW YOU WILL SEE IN THESE STUDIES THAT THE MOMENT THE (WHITE->WIGHT) IS (INCLOSED->ENCLOSED) PROPERLY AND (HARMONIZED->HARMONIZE) WITH THE OTHER HUES IT BECOMES SOMEHOW MORE PRECIOUS AND PEARLY THAN THE WHITE PAPER AND THAT I AM NOT AFRAID TO LEAVE A WHOLE FIELD OF UNTREATED WHITE PAPER ALL ROUND IT BEING SURE THAT EVEN THE LITTLE DIAMONDS IN THE ROUND WINDOW WILL TELL AS JEWELS IF THEY ARE GRADATED JUSTLY +1188-133604-0010-1781: BUT IN THIS (VIGNETTE->VINEY) COPIED FROM TURNER YOU HAVE THE TWO PRINCIPLES BROUGHT OUT PERFECTLY +1188-133604-0011-1782: THEY ARE BEYOND ALL OTHER WORKS (THAT->THAN) I KNOW EXISTING DEPENDENT FOR THEIR EFFECT ON LOW SUBDUED TONES THEIR FAVORITE CHOICE IN TIME OF DAY BEING EITHER DAWN OR TWILIGHT AND EVEN THEIR BRIGHTEST SUNSETS PRODUCED CHIEFLY OUT OF GRAY PAPER +1188-133604-0012-1783: IT MAY BE THAT A GREAT (COLORIST->COLOR LIST) WILL USE HIS UTMOST FORCE OF COLOR AS A SINGER HIS FULL POWER OF VOICE BUT LOUD OR LOW THE VIRTUE IS IN BOTH CASES ALWAYS IN REFINEMENT NEVER IN LOUDNESS +1188-133604-0013-1784: IT MUST REMEMBER BE ONE OR THE OTHER +1188-133604-0014-1785: DO NOT THEREFORE THINK THAT THE GOTHIC (SCHOOL IS->SCHOOLS) AN EASY ONE +1188-133604-0015-1786: THE LAW OF THAT SCHOOL IS THAT EVERYTHING SHALL BE SEEN CLEARLY OR AT LEAST ONLY IN SUCH MIST OR FAINTNESS AS SHALL BE DELIGHTFUL AND I HAVE NO DOUBT THAT THE BEST INTRODUCTION TO IT WOULD BE THE ELEMENTARY PRACTICE OF PAINTING EVERY STUDY ON A GOLDEN GROUND +1188-133604-0016-1787: THIS AT ONCE COMPELS YOU TO UNDERSTAND THAT THE WORK IS TO BE IMAGINATIVE AND DECORATIVE THAT IT REPRESENTS BEAUTIFUL THINGS IN THE CLEAREST WAY BUT NOT UNDER EXISTING CONDITIONS AND THAT IN FACT YOU ARE PRODUCING (JEWELER'S->JEWELLER'S) WORK RATHER THAN PICTURES +1188-133604-0017-1788: THAT A STYLE IS RESTRAINED OR SEVERE DOES NOT MEAN THAT IT IS ALSO ERRONEOUS +1188-133604-0018-1789: IN ALL EARLY GOTHIC ART INDEED YOU WILL FIND FAILURE OF THIS KIND ESPECIALLY DISTORTION AND RIGIDITY WHICH ARE IN MANY RESPECTS PAINFULLY TO BE COMPARED WITH THE SPLENDID REPOSE OF CLASSIC ART +1188-133604-0019-1790: THE LARGE LETTER CONTAINS INDEED ENTIRELY FEEBLE AND ILL DRAWN FIGURES THAT IS MERELY CHILDISH AND FAILING WORK OF AN INFERIOR HAND IT IS NOT CHARACTERISTIC OF GOTHIC OR ANY OTHER SCHOOL +1188-133604-0020-1791: BUT OBSERVE YOU CAN ONLY DO THIS ON ONE CONDITION THAT OF STRIVING ALSO TO CREATE IN REALITY THE BEAUTY WHICH YOU SEEK (IN->AND) IMAGINATION +1188-133604-0021-1792: IT WILL BE WHOLLY IMPOSSIBLE FOR YOU TO RETAIN THE TRANQUILLITY OF TEMPER AND FELICITY OF FAITH NECESSARY FOR NOBLE (PURIST->PUREST) PAINTING UNLESS YOU ARE ACTIVELY ENGAGED IN PROMOTING THE FELICITY AND PEACE OF PRACTICAL LIFE +1188-133604-0022-1793: YOU MUST LOOK AT HIM IN THE FACE FIGHT HIM CONQUER HIM WITH WHAT (SCATHE->SCATH) YOU MAY YOU NEED NOT THINK TO KEEP OUT OF THE WAY OF HIM +1188-133604-0023-1794: THE (COLORIST->CHOLERAIST) SAYS FIRST OF ALL AS MY DELICIOUS (PAROQUET->PERICE) WAS RUBY SO THIS NASTY VIPER SHALL BE BLACK AND THEN IS THE QUESTION CAN I ROUND HIM OFF EVEN THOUGH HE IS BLACK AND MAKE HIM SLIMY AND YET SPRINGY AND CLOSE DOWN CLOTTED LIKE A POOL OF BLACK BLOOD ON THE EARTH ALL THE SAME +1188-133604-0024-1795: NOTHING WILL BE MORE PRECIOUS TO YOU I THINK IN THE PRACTICAL STUDY OF ART THAN THE CONVICTION WHICH WILL FORCE ITSELF ON YOU MORE AND MORE EVERY HOUR OF THE WAY ALL THINGS ARE BOUND TOGETHER LITTLE AND GREAT IN SPIRIT AND IN MATTER +1188-133604-0025-1796: YOU KNOW (I HAVE->I'VE) JUST BEEN TELLING YOU HOW THIS SCHOOL OF MATERIALISM (AND->IN) CLAY INVOLVED ITSELF AT LAST IN CLOUD AND FIRE +1188-133604-0026-1797: HERE IS AN EQUALLY TYPICAL GREEK SCHOOL LANDSCAPE BY WILSON LOST WHOLLY IN GOLDEN MIST THE TREES SO SLIGHTLY DRAWN THAT YOU DON'T KNOW IF THEY ARE TREES OR TOWERS AND NO CARE FOR COLOR (WHATEVER->WHATSOEVER) PERFECTLY DECEPTIVE AND (MARVELOUS->MARVELLOUS) EFFECT OF SUNSHINE THROUGH THE MIST APOLLO (AND->IN) THE PYTHON +1188-133604-0027-1798: NOW HERE IS RAPHAEL EXACTLY BETWEEN THE TWO TREES STILL DRAWN LEAF BY LEAF (WHOLLY->HOLY) FORMAL BUT BEAUTIFUL MIST COMING GRADUALLY INTO THE DISTANCE +1188-133604-0028-1799: WELL THEN LAST HERE IS (TURNER'S->TURNERS) GREEK SCHOOL OF THE HIGHEST CLASS AND YOU DEFINE HIS ART ABSOLUTELY AS FIRST THE DISPLAYING INTENSELY AND WITH THE STERNEST INTELLECT OF NATURAL FORM AS IT IS AND THEN THE ENVELOPMENT OF IT WITH CLOUD AND FIRE +1188-133604-0029-1800: ONLY THERE ARE TWO SORTS OF CLOUD (AND->IN) FIRE +1188-133604-0030-1801: HE KNOWS THEM BOTH +1188-133604-0031-1802: THERE'S ONE AND THERE'S ANOTHER THE DUDLEY AND THE FLINT +1188-133604-0032-1803: IT IS ONLY A PENCIL OUTLINE BY EDWARD (BURNE->BYRNE) JONES IN ILLUSTRATION OF THE STORY OF PSYCHE IT IS THE INTRODUCTION OF PSYCHE AFTER ALL HER TROUBLES (INTO->AND TO) HEAVEN +1188-133604-0033-1804: EVERY PLANT IN THE GRASS IS SET FORMALLY GROWS PERFECTLY AND MAY BE REALIZED COMPLETELY +1188-133604-0034-1805: EXQUISITE ORDER AND UNIVERSAL WITH ETERNAL LIFE AND LIGHT THIS IS THE FAITH AND EFFORT OF THE SCHOOLS OF (CRYSTAL->CRISTEL) AND YOU MAY DESCRIBE AND COMPLETE THEIR WORK QUITE LITERALLY BY TAKING ANY VERSES OF CHAUCER IN HIS TENDER MOOD (AND->IN) OBSERVING HOW HE INSISTS ON THE CLEARNESS AND BRIGHTNESS FIRST AND THEN ON THE ORDER +1188-133604-0035-1806: THUS IN CHAUCER'S DREAM +1188-133604-0036-1807: IN BOTH THESE HIGH MYTHICAL SUBJECTS THE SURROUNDING NATURE THOUGH SUFFERING IS STILL DIGNIFIED AND BEAUTIFUL +1188-133604-0037-1808: EVERY LINE IN WHICH THE MASTER TRACES IT EVEN WHERE SEEMINGLY NEGLIGENT IS LOVELY AND SET DOWN WITH A MEDITATIVE CALMNESS WHICH MAKES THESE TWO ETCHINGS CAPABLE OF BEING PLACED BESIDE THE MOST TRANQUIL WORK OF (HOLBEIN->HOLBINE) OR (DUERER->DURE) +1188-133604-0038-1809: BUT NOW HERE IS A SUBJECT OF WHICH YOU WILL WONDER AT FIRST WHY TURNER DREW IT AT ALL +1188-133604-0039-1810: IT HAS NO BEAUTY WHATSOEVER NO SPECIALTY OF PICTURESQUENESS (AND->IN) ALL ITS LINES ARE CRAMPED AND POOR +1188-133604-0040-1811: THE CRAMPNESS (AND->IN) THE POVERTY ARE ALL INTENDED +1188-133604-0041-1812: IT IS A GLEANER BRINGING DOWN HER ONE SHEAF OF CORN TO AN OLD (WATERMILL->WATER MILL) ITSELF MOSSY AND RENT SCARCELY ABLE TO GET ITS STONES TO TURN +1188-133604-0042-1813: THE SCENE IS ABSOLUTELY ARCADIAN +1188-133604-0043-1814: SEE THAT YOUR (LIVES->LIES) BE IN NOTHING WORSE THAN A BOY'S CLIMBING FOR HIS ENTANGLED KITE +1188-133604-0044-1815: IT WILL BE WELL FOR YOU IF YOU JOIN NOT WITH THOSE WHO INSTEAD OF KITES FLY FALCONS WHO INSTEAD OF OBEYING THE LAST WORDS OF THE GREAT CLOUD SHEPHERD TO FEED HIS SHEEP LIVE THE LIVES HOW MUCH LESS THAN VANITY OF THE WAR WOLF AND THE (GIER->GEAR) EAGLE +121-121726-0000-2558: ALSO A POPULAR CONTRIVANCE WHEREBY LOVE MAKING MAY BE SUSPENDED BUT NOT STOPPED DURING THE PICNIC SEASON +121-121726-0001-2559: (HARANGUE->HARANG) THE TIRESOME PRODUCT OF A TIRELESS TONGUE +121-121726-0002-2560: (ANGOR PAIN->ANGORE HAYNE) PAINFUL TO HEAR +121-121726-0003-2561: HAY FEVER A HEART TROUBLE CAUSED BY FALLING IN LOVE WITH A GRASS WIDOW +121-121726-0004-2562: HEAVEN A GOOD PLACE TO BE RAISED TO +121-121726-0005-2563: HEDGE (A FENCE->OFFENCE) +121-121726-0006-2564: HEREDITY THE CAUSE OF ALL OUR FAULTS +121-121726-0007-2565: HORSE SENSE A DEGREE OF WISDOM THAT KEEPS ONE FROM BETTING ON THE RACES +121-121726-0008-2566: HOSE MAN'S EXCUSE FOR WETTING THE WALK +121-121726-0009-2567: HOTEL A PLACE WHERE A GUEST OFTEN GIVES UP GOOD DOLLARS FOR POOR QUARTERS +121-121726-0010-2568: (HOUSECLEANING->HOUSE CLEANING) A DOMESTIC UPHEAVAL THAT MAKES IT EASY FOR THE GOVERNMENT TO ENLIST ALL THE SOLDIERS IT NEEDS +121-121726-0011-2569: HUSBAND THE NEXT THING TO A WIFE +121-121726-0012-2570: HUSSY WOMAN AND BOND TIE +121-121726-0013-2571: TIED TO A WOMAN +121-121726-0014-2572: HYPOCRITE A HORSE DEALER +121-123852-0000-2615: THOSE PRETTY WRONGS THAT LIBERTY COMMITS WHEN I AM (SOMETIME->SOME TIME) ABSENT FROM THY HEART THY BEAUTY AND THY YEARS (FULL->FALL) WELL BEFITS FOR STILL TEMPTATION FOLLOWS WHERE THOU ART +121-123852-0001-2616: (AY->I) ME +121-123852-0002-2617: NO MATTER THEN ALTHOUGH MY FOOT DID STAND UPON THE FARTHEST EARTH (REMOV'D->REMOVED) FROM THEE FOR NIMBLE THOUGHT CAN JUMP BOTH SEA AND LAND AS SOON AS THINK THE PLACE WHERE HE WOULD BE BUT AH +121-123852-0003-2618: THOUGHT KILLS ME THAT I AM NOT (THOUGHT->BOUGHT) TO LEAP LARGE LENGTHS OF MILES WHEN THOU ART GONE BUT THAT SO MUCH OF EARTH AND WATER WROUGHT I MUST ATTEND TIME'S LEISURE WITH MY MOAN RECEIVING (NOUGHT->NOT) BY ELEMENTS SO SLOW BUT HEAVY TEARS BADGES OF EITHER'S WOE +121-123852-0004-2619: MY HEART DOTH PLEAD THAT THOU IN HIM DOST LIE A CLOSET NEVER (PIERC'D->PIERCED) WITH CRYSTAL EYES BUT THE DEFENDANT DOTH THAT PLEA DENY AND SAYS IN HIM THY FAIR APPEARANCE LIES +121-123859-0000-2573: YOU ARE MY ALL THE WORLD AND I MUST STRIVE TO KNOW MY SHAMES AND PRAISES FROM YOUR TONGUE NONE ELSE TO ME NOR I TO NONE ALIVE THAT MY (STEEL'D->STEELED) SENSE OR CHANGES RIGHT OR WRONG +121-123859-0001-2574: (O->OH) TIS THE FIRST TIS FLATTERY IN MY SEEING AND MY GREAT MIND MOST KINGLY DRINKS IT UP MINE EYE WELL KNOWS WHAT WITH HIS GUST IS (GREEING->GREEN) AND TO HIS PALATE DOTH PREPARE THE CUP IF IT BE (POISON'D->POISONED) TIS THE LESSER SIN THAT MINE EYE LOVES IT AND DOTH FIRST BEGIN +121-123859-0002-2575: BUT RECKONING TIME WHOSE (MILLION'D->MILLIONED) ACCIDENTS CREEP IN TWIXT VOWS AND CHANGE DECREES OF KINGS TAN SACRED BEAUTY BLUNT THE (SHARP'ST INTENTS->SHARPEST INTENSE) DIVERT STRONG MINDS TO THE COURSE OF ALTERING THINGS ALAS WHY FEARING OF TIME'S TYRANNY MIGHT I NOT THEN SAY NOW I LOVE YOU BEST WHEN I WAS CERTAIN (O'ER INCERTAINTY->OR IN CERTAINTY) CROWNING THE PRESENT DOUBTING OF THE REST +121-123859-0003-2576: LOVE IS A BABE THEN MIGHT I NOT SAY SO TO GIVE FULL GROWTH TO THAT WHICH STILL DOTH GROW +121-123859-0004-2577: SO I RETURN (REBUK'D->REBUKED) TO MY CONTENT AND GAIN BY ILL THRICE MORE THAN I HAVE SPENT +121-127105-0000-2578: IT WAS THIS OBSERVATION THAT DREW FROM DOUGLAS NOT IMMEDIATELY BUT LATER IN THE EVENING A REPLY THAT HAD THE INTERESTING CONSEQUENCE TO WHICH I CALL ATTENTION +121-127105-0001-2579: (SOMEONE->SOME ONE) ELSE TOLD A STORY NOT PARTICULARLY EFFECTIVE WHICH I SAW HE WAS NOT FOLLOWING +121-127105-0002-2580: CRIED ONE OF THE WOMEN HE TOOK NO NOTICE OF HER HE LOOKED AT ME BUT AS IF INSTEAD OF ME HE SAW WHAT HE SPOKE OF +121-127105-0003-2581: THERE WAS A UNANIMOUS GROAN AT THIS AND MUCH REPROACH AFTER WHICH IN HIS PREOCCUPIED WAY HE EXPLAINED +121-127105-0004-2582: THE (STORY'S->STORIES) WRITTEN +121-127105-0005-2583: (I COULD WRITE->THY GOOD RIGHT) TO MY MAN AND ENCLOSE THE KEY HE COULD SEND DOWN THE PACKET AS HE FINDS IT +121-127105-0006-2584: THE OTHERS RESENTED POSTPONEMENT BUT IT WAS JUST HIS SCRUPLES THAT CHARMED ME +121-127105-0007-2585: TO THIS HIS ANSWER WAS PROMPT OH THANK GOD NO AND IS THE RECORD YOURS +121-127105-0008-2586: HE HUNG FIRE AGAIN A WOMAN'S +121-127105-0009-2587: SHE HAS BEEN DEAD THESE TWENTY YEARS +121-127105-0010-2588: SHE SENT ME THE PAGES IN QUESTION BEFORE SHE DIED +121-127105-0011-2589: SHE WAS THE MOST AGREEABLE WOMAN I'VE EVER KNOWN IN HER POSITION SHE WOULD HAVE BEEN WORTHY OF ANY WHATEVER +121-127105-0012-2590: (IT WASN'T->TWASN'T) SIMPLY THAT SHE SAID SO BUT THAT I KNEW SHE HADN'T I WAS SURE I COULD SEE +121-127105-0013-2591: YOU'LL EASILY JUDGE WHY WHEN YOU HEAR BECAUSE THE THING HAD BEEN SUCH A SCARE HE CONTINUED TO FIX ME +121-127105-0014-2592: YOU ARE ACUTE +121-127105-0015-2593: HE QUITTED THE FIRE AND DROPPED BACK INTO HIS CHAIR +121-127105-0016-2594: PROBABLY NOT TILL THE SECOND POST +121-127105-0017-2595: IT WAS ALMOST THE TONE OF HOPE EVERYBODY WILL STAY +121-127105-0018-2596: CRIED THE LADIES WHOSE DEPARTURE HAD BEEN FIXED +121-127105-0019-2597: MISSUS GRIFFIN HOWEVER EXPRESSED THE NEED FOR (A->*) LITTLE MORE LIGHT +121-127105-0020-2598: WHO WAS IT SHE WAS IN LOVE WITH THE STORY WILL TELL I TOOK UPON MYSELF TO REPLY OH I CAN'T WAIT FOR THE STORY THE STORY WON'T TELL SAID DOUGLAS NOT IN ANY LITERAL VULGAR WAY MORE'S THE PITY THEN +121-127105-0021-2599: WON'T YOU TELL DOUGLAS +121-127105-0022-2600: (WELL->FOR) IF I DON'T KNOW WHO SHE WAS IN LOVE WITH I KNOW WHO HE WAS +121-127105-0023-2601: LET ME SAY HERE DISTINCTLY TO HAVE DONE WITH IT THAT THIS NARRATIVE FROM AN EXACT TRANSCRIPT OF MY OWN MADE MUCH LATER IS WHAT I SHALL PRESENTLY GIVE +121-127105-0024-2602: POOR DOUGLAS BEFORE HIS DEATH WHEN IT WAS IN SIGHT COMMITTED TO ME THE MANUSCRIPT THAT REACHED HIM ON THE THIRD OF THESE DAYS AND THAT ON THE SAME SPOT WITH IMMENSE EFFECT HE BEGAN TO READ TO OUR HUSHED LITTLE CIRCLE ON THE NIGHT OF THE FOURTH +121-127105-0025-2603: THE DEPARTING LADIES WHO HAD SAID THEY WOULD STAY DIDN'T OF COURSE THANK HEAVEN STAY THEY DEPARTED IN CONSEQUENCE OF ARRANGEMENTS MADE IN A RAGE OF CURIOSITY AS THEY PROFESSED PRODUCED BY THE TOUCHES WITH WHICH HE HAD ALREADY WORKED US UP +121-127105-0026-2604: THE FIRST OF THESE TOUCHES CONVEYED THAT THE WRITTEN STATEMENT TOOK UP THE TALE AT A POINT AFTER IT HAD IN A MANNER BEGUN +121-127105-0027-2605: HE HAD FOR HIS OWN TOWN RESIDENCE A BIG HOUSE FILLED WITH THE SPOILS OF TRAVEL AND THE TROPHIES OF THE CHASE BUT IT WAS TO HIS COUNTRY HOME AN OLD FAMILY PLACE IN ESSEX THAT HE WISHED HER IMMEDIATELY TO PROCEED +121-127105-0028-2606: THE AWKWARD THING WAS THAT THEY HAD PRACTICALLY NO OTHER RELATIONS AND THAT HIS OWN AFFAIRS TOOK UP ALL HIS TIME +121-127105-0029-2607: THERE WERE PLENTY OF PEOPLE TO HELP BUT OF COURSE THE YOUNG LADY WHO SHOULD GO DOWN AS GOVERNESS WOULD BE IN SUPREME AUTHORITY +121-127105-0030-2608: I DON'T ANTICIPATE +121-127105-0031-2609: SHE WAS YOUNG UNTRIED NERVOUS IT WAS A VISION OF SERIOUS DUTIES (AND->IN) LITTLE COMPANY OF REALLY GREAT LONELINESS +121-127105-0032-2610: YES BUT THAT'S JUST THE BEAUTY OF HER PASSION +121-127105-0033-2611: IT WAS THE BEAUTY OF IT +121-127105-0034-2612: IT SOUNDED DULL (IT->BUT) SOUNDED STRANGE AND ALL THE MORE SO BECAUSE OF HIS MAIN CONDITION WHICH WAS +121-127105-0035-2613: SHE PROMISED TO DO THIS AND SHE MENTIONED TO ME THAT WHEN FOR A MOMENT DISBURDENED DELIGHTED HE HELD HER HAND THANKING HER FOR THE SACRIFICE SHE ALREADY FELT REWARDED +121-127105-0036-2614: BUT WAS THAT ALL HER REWARD ONE OF THE LADIES ASKED +1221-135766-0000-1305: HOW STRANGE IT SEEMED TO THE SAD WOMAN AS SHE WATCHED THE GROWTH AND THE BEAUTY THAT BECAME EVERY DAY MORE BRILLIANT AND THE INTELLIGENCE THAT THREW ITS QUIVERING SUNSHINE OVER THE TINY FEATURES OF THIS CHILD +1221-135766-0001-1306: GOD AS A DIRECT CONSEQUENCE OF THE SIN WHICH MAN THUS PUNISHED HAD GIVEN HER A LOVELY CHILD WHOSE PLACE WAS ON THAT SAME (DISHONOURED->DISHONORED) BOSOM TO CONNECT HER PARENT FOR EVER WITH THE RACE AND DESCENT OF MORTALS AND TO BE FINALLY A BLESSED SOUL IN HEAVEN +1221-135766-0002-1307: YET THESE THOUGHTS AFFECTED HESTER PRYNNE LESS WITH HOPE THAN APPREHENSION +1221-135766-0003-1308: THE CHILD HAD A NATIVE GRACE WHICH DOES NOT INVARIABLY (CO EXIST->COEXIST) WITH FAULTLESS BEAUTY ITS ATTIRE HOWEVER SIMPLE ALWAYS (IMPRESSED->IMPRESS) THE BEHOLDER AS IF IT WERE THE VERY GARB THAT PRECISELY BECAME IT BEST +1221-135766-0004-1309: THIS OUTWARD MUTABILITY INDICATED AND DID NOT MORE THAN FAIRLY EXPRESS THE VARIOUS PROPERTIES OF HER INNER LIFE +1221-135766-0005-1310: HESTER COULD ONLY ACCOUNT FOR THE CHILD'S CHARACTER AND EVEN THEN MOST VAGUELY AND IMPERFECTLY BY RECALLING WHAT SHE HERSELF HAD BEEN DURING THAT MOMENTOUS PERIOD WHILE PEARL WAS IMBIBING HER SOUL FROM THE SPIRITUAL WORLD AND HER BODILY FRAME FROM ITS MATERIAL OF EARTH +1221-135766-0006-1311: THEY WERE NOW ILLUMINATED BY THE MORNING RADIANCE OF A YOUNG CHILD'S DISPOSITION BUT LATER IN THE DAY OF EARTHLY EXISTENCE MIGHT BE PROLIFIC OF THE STORM AND WHIRLWIND +1221-135766-0007-1312: HESTER PRYNNE NEVERTHELESS THE LOVING MOTHER OF THIS ONE CHILD RAN LITTLE RISK OF ERRING ON THE SIDE OF UNDUE SEVERITY +1221-135766-0008-1313: MINDFUL HOWEVER OF HER OWN ERRORS AND MISFORTUNES SHE EARLY SOUGHT TO IMPOSE A TENDER BUT STRICT CONTROL OVER THE INFANT IMMORTALITY THAT WAS COMMITTED TO HER CHARGE +1221-135766-0009-1314: AS TO ANY OTHER KIND OF DISCIPLINE WHETHER ADDRESSED TO HER MIND OR HEART LITTLE PEARL MIGHT OR MIGHT NOT BE WITHIN ITS REACH IN ACCORDANCE WITH THE CAPRICE THAT RULED THE MOMENT +1221-135766-0010-1315: IT WAS A LOOK SO INTELLIGENT YET INEXPLICABLE PERVERSE SOMETIMES SO MALICIOUS BUT GENERALLY ACCOMPANIED BY A WILD FLOW OF SPIRITS THAT HESTER COULD NOT HELP QUESTIONING AT SUCH MOMENTS WHETHER PEARL WAS A HUMAN CHILD +1221-135766-0011-1316: BEHOLDING IT HESTER WAS CONSTRAINED TO RUSH TOWARDS THE CHILD TO PURSUE THE LITTLE ELF IN THE FLIGHT WHICH SHE INVARIABLY BEGAN TO SNATCH HER TO HER BOSOM WITH A CLOSE PRESSURE AND EARNEST KISSES NOT SO MUCH FROM OVERFLOWING LOVE AS TO ASSURE HERSELF THAT PEARL WAS FLESH AND BLOOD AND NOT UTTERLY DELUSIVE +1221-135766-0012-1317: BROODING OVER ALL THESE MATTERS THE MOTHER FELT LIKE ONE WHO HAS EVOKED A SPIRIT BUT BY SOME IRREGULARITY IN THE PROCESS OF CONJURATION HAS FAILED TO WIN THE MASTER WORD THAT SHOULD CONTROL THIS NEW AND INCOMPREHENSIBLE INTELLIGENCE +1221-135766-0013-1318: PEARL WAS A BORN OUTCAST OF THE INFANTILE WORLD +1221-135766-0014-1319: PEARL (SAW->SAUL) AND GAZED INTENTLY BUT NEVER SOUGHT TO MAKE ACQUAINTANCE +1221-135766-0015-1320: IF SPOKEN TO SHE WOULD NOT SPEAK AGAIN +1221-135767-0000-1280: HESTER PRYNNE WENT ONE DAY TO THE MANSION OF GOVERNOR BELLINGHAM WITH A PAIR OF GLOVES WHICH SHE HAD FRINGED AND EMBROIDERED TO HIS ORDER AND WHICH WERE TO BE WORN ON SOME GREAT OCCASION OF STATE FOR THOUGH THE CHANCES OF A POPULAR ELECTION HAD CAUSED THIS FORMER RULER TO DESCEND A STEP OR TWO FROM THE HIGHEST RANK HE STILL HELD AN HONOURABLE AND INFLUENTIAL PLACE AMONG THE COLONIAL MAGISTRACY +1221-135767-0001-1281: ANOTHER AND FAR MORE IMPORTANT REASON THAN THE DELIVERY OF A PAIR OF EMBROIDERED GLOVES IMPELLED HESTER AT THIS TIME TO SEEK AN INTERVIEW WITH A PERSONAGE OF SO MUCH POWER AND ACTIVITY IN THE AFFAIRS OF THE SETTLEMENT +1221-135767-0002-1282: AT THAT EPOCH OF PRISTINE SIMPLICITY HOWEVER MATTERS OF EVEN SLIGHTER PUBLIC INTEREST AND OF FAR LESS INTRINSIC WEIGHT THAN THE WELFARE OF HESTER AND HER CHILD WERE STRANGELY MIXED UP WITH THE DELIBERATIONS OF LEGISLATORS AND ACTS OF STATE +1221-135767-0003-1283: THE PERIOD WAS HARDLY IF AT ALL EARLIER THAN THAT OF OUR STORY WHEN A DISPUTE CONCERNING THE RIGHT OF PROPERTY IN A PIG NOT ONLY CAUSED A FIERCE AND BITTER CONTEST IN THE LEGISLATIVE BODY OF THE COLONY BUT RESULTED IN AN IMPORTANT MODIFICATION OF THE FRAMEWORK ITSELF OF THE LEGISLATURE +1221-135767-0004-1284: WE HAVE SPOKEN OF (PEARL'S->PEARLS) RICH AND LUXURIANT BEAUTY A BEAUTY THAT SHONE WITH DEEP AND VIVID TINTS A BRIGHT COMPLEXION EYES POSSESSING INTENSITY BOTH OF DEPTH AND GLOW AND HAIR ALREADY OF A DEEP GLOSSY BROWN AND WHICH IN AFTER YEARS WOULD BE NEARLY AKIN TO BLACK +1221-135767-0005-1285: IT WAS THE SCARLET LETTER IN ANOTHER FORM THE SCARLET LETTER ENDOWED WITH LIFE +1221-135767-0006-1286: THE MOTHER HERSELF AS IF THE RED IGNOMINY WERE SO DEEPLY SCORCHED INTO HER BRAIN THAT ALL HER CONCEPTIONS ASSUMED ITS FORM HAD CAREFULLY WROUGHT OUT THE SIMILITUDE LAVISHING MANY HOURS OF MORBID INGENUITY TO CREATE AN ANALOGY BETWEEN THE OBJECT OF HER AFFECTION AND THE EMBLEM OF HER GUILT AND TORTURE +1221-135767-0007-1287: BUT IN TRUTH PEARL WAS THE ONE AS WELL AS THE OTHER AND ONLY IN CONSEQUENCE OF THAT IDENTITY HAD HESTER CONTRIVED SO PERFECTLY TO REPRESENT THE SCARLET LETTER IN HER APPEARANCE +1221-135767-0008-1288: COME THEREFORE AND LET US FLING MUD AT THEM +1221-135767-0009-1289: BUT PEARL WHO WAS A DAUNTLESS CHILD AFTER FROWNING STAMPING HER FOOT AND SHAKING HER LITTLE HAND WITH A VARIETY OF THREATENING GESTURES SUDDENLY MADE A RUSH AT THE KNOT OF HER ENEMIES AND PUT THEM ALL TO FLIGHT +1221-135767-0010-1290: SHE SCREAMED AND SHOUTED TOO WITH A TERRIFIC VOLUME OF SOUND WHICH DOUBTLESS CAUSED THE HEARTS OF THE FUGITIVES TO QUAKE WITHIN THEM +1221-135767-0011-1291: IT WAS FURTHER DECORATED WITH STRANGE AND SEEMINGLY CABALISTIC FIGURES AND DIAGRAMS SUITABLE TO THE QUAINT TASTE OF THE AGE WHICH HAD BEEN DRAWN IN THE STUCCO WHEN NEWLY LAID ON AND HAD NOW GROWN HARD AND DURABLE FOR THE ADMIRATION OF AFTER TIMES +1221-135767-0012-1292: THEY APPROACHED THE DOOR WHICH WAS OF AN ARCHED FORM AND FLANKED ON EACH SIDE BY A NARROW TOWER OR PROJECTION OF THE EDIFICE IN BOTH OF WHICH WERE LATTICE WINDOWS THE WOODEN SHUTTERS TO CLOSE OVER THEM AT NEED +1221-135767-0013-1293: LIFTING THE IRON HAMMER THAT HUNG AT THE PORTAL HESTER PRYNNE GAVE A SUMMONS WHICH WAS ANSWERED BY ONE OF THE GOVERNOR'S BOND (SERVANT->SERVANTS) A FREE BORN ENGLISHMAN BUT NOW A SEVEN YEARS SLAVE +1221-135767-0014-1294: YEA HIS HONOURABLE WORSHIP IS WITHIN BUT HE HATH A GODLY MINISTER OR TWO WITH HIM AND LIKEWISE A LEECH +1221-135767-0015-1295: (YE->YEA) MAY NOT SEE HIS WORSHIP NOW +1221-135767-0016-1296: WITH MANY VARIATIONS SUGGESTED BY THE NATURE OF HIS BUILDING MATERIALS DIVERSITY OF CLIMATE AND A DIFFERENT MODE OF SOCIAL LIFE GOVERNOR BELLINGHAM HAD PLANNED HIS NEW HABITATION AFTER THE RESIDENCES OF GENTLEMEN OF (FAIR ESTATE->FAIREST STATE) IN HIS NATIVE LAND +1221-135767-0017-1297: ON THE TABLE IN TOKEN THAT THE SENTIMENT OF OLD ENGLISH HOSPITALITY HAD NOT BEEN LEFT BEHIND STOOD A LARGE PEWTER TANKARD AT THE BOTTOM OF WHICH HAD HESTER OR PEARL PEEPED INTO IT THEY MIGHT HAVE SEEN THE FROTHY REMNANT OF A RECENT DRAUGHT OF ALE +1221-135767-0018-1298: LITTLE PEARL WHO WAS AS GREATLY PLEASED WITH THE GLEAMING (ARMOUR->ARMOR) AS SHE HAD BEEN WITH THE GLITTERING (FRONTISPIECE->FRONTESPIECE) OF THE HOUSE SPENT SOME TIME LOOKING INTO THE POLISHED MIRROR OF THE BREASTPLATE +1221-135767-0019-1299: MOTHER CRIED SHE I SEE YOU HERE LOOK LOOK +1221-135767-0020-1300: IN TRUTH SHE SEEMED ABSOLUTELY HIDDEN BEHIND IT +1221-135767-0021-1301: PEARL ACCORDINGLY RAN TO THE BOW WINDOW AT THE FURTHER END OF THE HALL AND LOOKED ALONG THE VISTA OF A GARDEN WALK CARPETED WITH CLOSELY SHAVEN GRASS AND BORDERED WITH SOME RUDE AND (IMMATURE->IMMATOR) ATTEMPT AT SHRUBBERY +1221-135767-0022-1302: BUT THE PROPRIETOR APPEARED ALREADY TO HAVE RELINQUISHED AS HOPELESS THE EFFORT TO PERPETUATE ON THIS SIDE OF THE ATLANTIC IN A HARD SOIL AND AMID THE CLOSE STRUGGLE FOR SUBSISTENCE THE NATIVE ENGLISH TASTE FOR ORNAMENTAL GARDENING +1221-135767-0023-1303: THERE WERE A FEW ROSE BUSHES HOWEVER AND A NUMBER OF APPLE TREES PROBABLY THE DESCENDANTS OF THOSE PLANTED BY THE REVEREND MISTER BLACKSTONE THE FIRST SETTLER OF THE PENINSULA THAT HALF MYTHOLOGICAL PERSONAGE WHO RIDES THROUGH OUR EARLY ANNALS SEATED ON THE BACK OF A BULL +1221-135767-0024-1304: PEARL SEEING THE ROSE BUSHES BEGAN TO CRY FOR A RED ROSE AND WOULD NOT BE PACIFIED +1284-1180-0000-829: HE WORE BLUE SILK STOCKINGS BLUE (KNEE PANTS->KNEEP HANDS) WITH GOLD BUCKLES A BLUE RUFFLED WAIST AND A JACKET OF BRIGHT BLUE BRAIDED WITH GOLD +1284-1180-0001-830: HIS HAT HAD A PEAKED CROWN (AND->IN) A FLAT BRIM AND AROUND THE BRIM WAS A ROW OF TINY GOLDEN BELLS THAT TINKLED WHEN HE MOVED +1284-1180-0002-831: INSTEAD OF SHOES THE OLD (MAN->MEN) WORE BOOTS WITH (TURNOVER->TURN OVER) TOPS AND HIS BLUE COAT HAD WIDE CUFFS OF GOLD BRAID +1284-1180-0003-832: FOR A LONG TIME HE HAD WISHED TO EXPLORE THE BEAUTIFUL LAND OF OZ IN WHICH THEY LIVED +1284-1180-0004-833: WHEN THEY WERE OUTSIDE (UNC->UN) SIMPLY LATCHED THE DOOR AND STARTED UP THE PATH +1284-1180-0005-834: NO ONE WOULD DISTURB THEIR LITTLE HOUSE EVEN IF (ANYONE->ANY ONE) CAME SO FAR INTO THE THICK FOREST WHILE THEY WERE GONE +1284-1180-0006-835: AT THE FOOT OF THE MOUNTAIN THAT SEPARATED THE COUNTRY OF THE MUNCHKINS FROM THE COUNTRY OF THE (GILLIKINS->GYLICANS) THE PATH DIVIDED +1284-1180-0007-836: HE KNEW IT WOULD TAKE THEM TO THE HOUSE OF THE CROOKED MAGICIAN WHOM HE HAD NEVER SEEN BUT WHO WAS (THEIR->THERE) NEAREST (NEIGHBOR->NEIGHBOUR) +1284-1180-0008-837: ALL THE MORNING THEY TRUDGED UP THE MOUNTAIN PATH AND AT NOON (UNC AND->UNCAN) OJO SAT ON A FALLEN TREE TRUNK AND ATE THE LAST OF THE BREAD WHICH THE OLD MUNCHKIN HAD PLACED IN HIS POCKET +1284-1180-0009-838: THEN THEY STARTED ON AGAIN AND TWO HOURS LATER CAME IN SIGHT OF THE HOUSE OF DOCTOR PIPT +1284-1180-0010-839: (UNC KNOCKED AT->UNCONOCTED) THE DOOR OF THE HOUSE (AND A->INTO) CHUBBY PLEASANT FACED WOMAN DRESSED ALL IN BLUE OPENED IT AND GREETED THE VISITORS WITH A SMILE +1284-1180-0011-840: I AM MY DEAR AND ALL STRANGERS ARE WELCOME TO MY HOME +1284-1180-0012-841: WE HAVE COME FROM (A FAR->AFAR) LONELIER PLACE THAN THIS A LONELIER PLACE +1284-1180-0013-842: AND YOU MUST BE OJO THE UNLUCKY SHE ADDED +1284-1180-0014-843: OJO HAD NEVER EATEN SUCH A FINE MEAL IN ALL HIS LIFE +1284-1180-0015-844: WE ARE (TRAVELING->TRAVELLING) REPLIED OJO AND WE STOPPED AT YOUR HOUSE JUST (TO->A) REST AND REFRESH OURSELVES +1284-1180-0016-845: THE WOMAN SEEMED THOUGHTFUL +1284-1180-0017-846: AT ONE END STOOD A GREAT FIREPLACE IN WHICH A BLUE LOG WAS BLAZING WITH A BLUE FLAME AND OVER THE FIRE HUNG FOUR KETTLES IN A ROW ALL BUBBLING AND STEAMING AT A GREAT RATE +1284-1180-0018-847: IT TAKES ME SEVERAL YEARS TO MAKE THIS MAGIC POWDER BUT AT THIS MOMENT I AM PLEASED TO SAY IT IS NEARLY DONE YOU SEE I AM MAKING IT FOR MY GOOD WIFE MARGOLOTTE WHO WANTS TO USE SOME OF IT FOR A PURPOSE OF HER OWN +1284-1180-0019-848: YOU MUST KNOW SAID MARGOLOTTE WHEN THEY WERE ALL SEATED TOGETHER ON THE BROAD WINDOW SEAT THAT MY HUSBAND FOOLISHLY GAVE AWAY ALL THE POWDER OF LIFE HE FIRST MADE TO OLD (MOMBI->MOMBY) THE WITCH WHO USED TO LIVE IN THE COUNTRY OF THE (GILLIKINS->GYLICANS) TO THE NORTH OF HERE +1284-1180-0020-849: THE FIRST LOT WE TESTED ON OUR GLASS CAT WHICH NOT ONLY BEGAN TO LIVE BUT HAS LIVED EVER SINCE +1284-1180-0021-850: I THINK THE NEXT GLASS CAT THE MAGICIAN MAKES WILL HAVE NEITHER BRAINS NOR HEART FOR THEN IT WILL NOT OBJECT TO CATCHING MICE AND (MAY->THEY) PROVE OF SOME USE TO US +1284-1180-0022-851: (I'M->I AM) AFRAID I DON'T KNOW MUCH ABOUT THE LAND OF OZ +1284-1180-0023-852: YOU SEE (I'VE->I HAVE) LIVED ALL MY LIFE WITH UNC NUNKIE THE SILENT ONE AND THERE WAS NO ONE TO TELL ME ANYTHING +1284-1180-0024-853: THAT IS ONE REASON YOU ARE OJO THE UNLUCKY SAID THE WOMAN IN (A->*) SYMPATHETIC TONE +1284-1180-0025-854: I THINK I MUST SHOW YOU MY PATCHWORK GIRL SAID MARGOLOTTE LAUGHING AT THE BOY'S ASTONISHMENT FOR SHE IS RATHER DIFFICULT TO EXPLAIN +1284-1180-0026-855: BUT FIRST I WILL TELL YOU THAT (FOR->FROM) MANY YEARS I HAVE LONGED FOR A SERVANT TO HELP ME WITH THE HOUSEWORK AND TO (COOK->COPE) THE MEALS AND WASH THE DISHES +1284-1180-0027-856: YET THAT TASK WAS NOT SO EASY AS YOU MAY SUPPOSE +1284-1180-0028-857: A BED QUILT MADE OF PATCHES OF DIFFERENT KINDS AND (COLORS->COLLARS) OF CLOTH ALL NEATLY SEWED TOGETHER +1284-1180-0029-858: SOMETIMES IT IS CALLED A CRAZY QUILT BECAUSE THE PATCHES AND COLORS ARE SO MIXED UP +1284-1180-0030-859: WHEN I FOUND IT I SAID TO MYSELF THAT IT WOULD DO NICELY FOR MY SERVANT GIRL FOR WHEN SHE WAS BROUGHT TO LIFE SHE WOULD NOT BE PROUD NOR HAUGHTY AS THE GLASS CAT IS FOR SUCH A DREADFUL MIXTURE OF (COLORS->COLOURS) WOULD DISCOURAGE HER FROM TRYING TO BE AS DIGNIFIED AS THE BLUE MUNCHKINS ARE +1284-1180-0031-860: AT THE EMERALD CITY WHERE OUR PRINCESS (OZMA->OSMO) LIVES GREEN IS THE POPULAR (COLOR->COLOUR) +1284-1180-0032-861: I WILL SHOW YOU WHAT A GOOD JOB I DID AND SHE WENT TO A TALL CUPBOARD AND THREW OPEN THE DOORS +1284-1181-0000-807: OJO EXAMINED THIS CURIOUS CONTRIVANCE WITH WONDER +1284-1181-0001-808: (MARGOLOTTE->MARGOLOTT) HAD FIRST MADE THE GIRL'S FORM FROM THE PATCHWORK QUILT AND THEN SHE HAD DRESSED IT WITH A PATCHWORK SKIRT AND AN APRON WITH POCKETS IN IT USING THE SAME (GAY->GAME) MATERIAL THROUGHOUT +1284-1181-0002-809: THE HEAD OF THE PATCHWORK GIRL WAS THE MOST CURIOUS PART OF HER +1284-1181-0003-810: THE HAIR WAS OF BROWN YARN AND HUNG DOWN ON HER NECK (IN->AND) SEVERAL NEAT BRAIDS +1284-1181-0004-811: GOLD IS THE MOST COMMON (METAL->MEDAL) IN THE LAND OF OZ AND IS USED FOR MANY PURPOSES BECAUSE IT IS SOFT AND PLIABLE +1284-1181-0005-812: NO I FORGOT ALL ABOUT THE BRAINS EXCLAIMED THE WOMAN +1284-1181-0006-813: WELL THAT MAY BE TRUE AGREED MARGOLOTTE BUT ON THE CONTRARY A SERVANT WITH TOO MUCH BRAINS IS SURE TO BECOME INDEPENDENT AND HIGH AND MIGHTY AND FEEL ABOVE HER WORK +1284-1181-0007-814: SHE POURED INTO THE DISH A QUANTITY FROM EACH OF THESE BOTTLES +1284-1181-0008-815: I THINK THAT WILL DO SHE CONTINUED FOR THE OTHER QUALITIES ARE NOT NEEDED IN A SERVANT +1284-1181-0009-816: SHE RAN TO HER HUSBAND'S SIDE AT ONCE AND HELPED HIM LIFT THE FOUR KETTLES FROM THE FIRE +1284-1181-0010-817: THEIR CONTENTS HAD ALL BOILED AWAY LEAVING IN THE BOTTOM OF EACH KETTLE A FEW GRAINS OF FINE WHITE POWDER +1284-1181-0011-818: VERY CAREFULLY THE MAGICIAN REMOVED THIS POWDER PLACING IT (ALL TOGETHER->ALTOGETHER) IN A GOLDEN DISH WHERE HE MIXED IT WITH A GOLDEN SPOON +1284-1181-0012-819: NO ONE SAW HIM DO THIS FOR ALL WERE LOOKING AT THE POWDER OF LIFE BUT SOON THE WOMAN REMEMBERED WHAT SHE HAD BEEN DOING AND CAME BACK TO THE CUPBOARD +1284-1181-0013-820: OJO BECAME A BIT UNEASY AT THIS FOR HE HAD ALREADY PUT QUITE A LOT OF THE CLEVERNESS POWDER IN THE DISH BUT HE DARED NOT INTERFERE AND SO HE COMFORTED HIMSELF WITH THE THOUGHT THAT ONE CANNOT HAVE TOO MUCH CLEVERNESS +1284-1181-0014-821: HE SELECTED A SMALL GOLD BOTTLE WITH A PEPPER BOX TOP SO THAT THE POWDER MIGHT BE SPRINKLED ON ANY OBJECT THROUGH THE SMALL HOLES +1284-1181-0015-822: MOST PEOPLE TALK TOO MUCH SO IT IS A RELIEF TO FIND ONE WHO TALKS TOO LITTLE +1284-1181-0016-823: I AM NOT ALLOWED TO PERFORM MAGIC EXCEPT FOR MY OWN AMUSEMENT HE TOLD HIS VISITORS AS HE LIGHTED A PIPE WITH A CROOKED STEM AND BEGAN TO SMOKE +1284-1181-0017-824: THE WIZARD OF OZ WHO USED TO BE A HUMBUG AND KNEW NO MAGIC AT ALL HAS BEEN TAKING LESSONS OF GLINDA AND I'M TOLD HE IS GETTING TO BE A PRETTY GOOD WIZARD BUT HE IS MERELY THE ASSISTANT OF THE GREAT SORCERESS +1284-1181-0018-825: IT TRULY IS ASSERTED THE MAGICIAN +1284-1181-0019-826: I NOW USE THEM AS ORNAMENTAL STATUARY IN MY GARDEN +1284-1181-0020-827: DEAR ME WHAT A (CHATTERBOX YOU'RE->CHATTER BOX ARE) GETTING TO BE (UNC->YUNK) REMARKED THE MAGICIAN WHO WAS PLEASED WITH THE COMPLIMENT +1284-1181-0021-828: ASKED THE VOICE IN SCORNFUL ACCENTS +1284-134647-0000-862: THE GRATEFUL APPLAUSE OF THE CLERGY HAS CONSECRATED THE MEMORY OF A PRINCE WHO INDULGED THEIR PASSIONS AND PROMOTED THEIR INTEREST +1284-134647-0001-863: THE EDICT OF MILAN THE GREAT CHARTER OF TOLERATION HAD CONFIRMED TO EACH INDIVIDUAL OF THE ROMAN WORLD THE PRIVILEGE OF CHOOSING AND PROFESSING HIS OWN RELIGION +1284-134647-0002-864: BUT THIS INESTIMABLE PRIVILEGE WAS SOON VIOLATED WITH THE KNOWLEDGE OF TRUTH THE EMPEROR (IMBIBED->IBED) THE MAXIMS OF PERSECUTION AND THE SECTS WHICH DISSENTED FROM THE CATHOLIC CHURCH WERE AFFLICTED AND OPPRESSED BY THE TRIUMPH OF CHRISTIANITY +1284-134647-0003-865: CONSTANTINE EASILY BELIEVED THAT THE HERETICS WHO PRESUMED TO DISPUTE HIS OPINIONS OR TO OPPOSE HIS COMMANDS WERE GUILTY OF THE MOST ABSURD AND CRIMINAL OBSTINACY AND THAT A SEASONABLE APPLICATION OF MODERATE SEVERITIES MIGHT SAVE THOSE UNHAPPY MEN FROM THE DANGER OF AN EVERLASTING CONDEMNATION +1284-134647-0004-866: SOME OF THE PENAL REGULATIONS WERE COPIED FROM THE EDICTS OF DIOCLETIAN AND THIS METHOD OF CONVERSION WAS APPLAUDED BY THE SAME BISHOPS WHO HAD (FELT->FELLED) THE HAND OF OPPRESSION AND PLEADED FOR THE RIGHTS OF HUMANITY +1284-134647-0005-867: THEY ASSERTED WITH CONFIDENCE AND ALMOST WITH EXULTATION THAT THE APOSTOLICAL SUCCESSION WAS INTERRUPTED THAT ALL THE BISHOPS OF EUROPE AND ASIA WERE INFECTED BY THE CONTAGION OF GUILT AND SCHISM AND THAT THE PREROGATIVES OF THE CATHOLIC CHURCH WERE CONFINED TO THE CHOSEN PORTION OF THE AFRICAN BELIEVERS WHO ALONE HAD PRESERVED INVIOLATE THE INTEGRITY OF THEIR FAITH AND DISCIPLINE +1284-134647-0006-868: BISHOPS VIRGINS AND EVEN SPOTLESS INFANTS WERE SUBJECTED TO THE DISGRACE OF A PUBLIC PENANCE BEFORE THEY COULD BE ADMITTED TO THE COMMUNION OF THE DONATISTS +1284-134647-0007-869: (PROSCRIBED->PRESCRIBED) BY THE CIVIL AND ECCLESIASTICAL POWERS OF THE EMPIRE THE (DONATISTS->DONATIST) STILL MAINTAINED IN SOME PROVINCES PARTICULARLY IN (NUMIDIA->MEDIA) THEIR SUPERIOR NUMBERS AND FOUR HUNDRED BISHOPS ACKNOWLEDGED THE JURISDICTION OF THEIR PRIMATE +1320-122612-0000-120: SINCE THE PERIOD OF OUR TALE THE ACTIVE SPIRIT OF THE COUNTRY HAS SURROUNDED IT WITH A BELT OF RICH AND THRIVING SETTLEMENTS THOUGH NONE BUT THE HUNTER OR THE SAVAGE IS EVER KNOWN EVEN NOW TO PENETRATE ITS WILD RECESSES +1320-122612-0001-121: THE DEWS WERE SUFFERED TO EXHALE AND THE SUN HAD DISPERSED THE MISTS AND WAS SHEDDING A STRONG AND CLEAR LIGHT IN THE FOREST WHEN THE (TRAVELERS->TRAVELLERS) RESUMED THEIR JOURNEY +1320-122612-0002-122: AFTER PROCEEDING A FEW MILES THE PROGRESS OF HAWKEYE WHO LED THE ADVANCE BECAME MORE DELIBERATE AND WATCHFUL +1320-122612-0003-123: HE OFTEN STOPPED TO EXAMINE THE TREES NOR DID HE CROSS A RIVULET WITHOUT ATTENTIVELY CONSIDERING THE QUANTITY THE VELOCITY AND THE (COLOR->COLOUR) OF ITS WATERS +1320-122612-0004-124: DISTRUSTING HIS OWN JUDGMENT HIS APPEALS TO THE OPINION OF CHINGACHGOOK WERE FREQUENT AND EARNEST +1320-122612-0005-125: YET HERE ARE WE (WITHIN->WITH AN) A SHORT RANGE OF THE (SCAROONS->SCARONS) AND NOT A SIGN OF A TRAIL HAVE WE CROSSED +1320-122612-0006-126: LET US RETRACE OUR STEPS AND EXAMINE AS WE GO WITH KEENER EYES +1320-122612-0007-127: (CHINGACHGOOK->INGACHGOOK) HAD CAUGHT THE LOOK AND MOTIONING WITH HIS HAND HE BADE HIM SPEAK +1320-122612-0008-128: THE EYES OF THE WHOLE PARTY FOLLOWED THE UNEXPECTED MOVEMENT AND READ THEIR SUCCESS IN THE AIR OF TRIUMPH THAT THE YOUTH ASSUMED +1320-122612-0009-129: IT WOULD HAVE BEEN MORE WONDERFUL HAD HE SPOKEN WITHOUT A BIDDING +1320-122612-0010-130: SEE SAID UNCAS POINTING NORTH AND SOUTH AT THE EVIDENT MARKS OF THE BROAD TRAIL ON EITHER SIDE OF HIM THE DARK HAIR HAS GONE TOWARD THE FOREST +1320-122612-0011-131: IF A ROCK OR A RIVULET OR A BIT OF EARTH HARDER THAN COMMON SEVERED THE LINKS OF THE (CLEW->CLUE) THEY FOLLOWED THE TRUE EYE OF THE SCOUT RECOVERED THEM AT A DISTANCE AND SELDOM RENDERED THE DELAY OF A SINGLE MOMENT NECESSARY +1320-122612-0012-132: EXTINGUISHED BRANDS WERE LYING AROUND A SPRING THE OFFALS OF A DEER WERE SCATTERED ABOUT THE PLACE AND THE TREES BORE EVIDENT MARKS OF HAVING BEEN BROWSED BY THE HORSES +1320-122612-0013-133: A CIRCLE OF A FEW HUNDRED FEET IN CIRCUMFERENCE WAS DRAWN AND EACH OF THE PARTY TOOK A SEGMENT FOR HIS PORTION +1320-122612-0014-134: THE EXAMINATION HOWEVER RESULTED IN NO DISCOVERY +1320-122612-0015-135: THE WHOLE PARTY CROWDED TO THE SPOT WHERE UNCAS POINTED OUT THE IMPRESSION OF A MOCCASIN IN THE MOIST (ALLUVION->ALLUVIAN) +1320-122612-0016-136: RUN BACK UNCAS AND BRING ME THE SIZE OF THE SINGER'S FOOT +1320-122617-0000-78: NOTWITHSTANDING THE HIGH RESOLUTION OF HAWKEYE HE FULLY COMPREHENDED ALL THE DIFFICULTIES AND DANGER HE WAS ABOUT TO INCUR +1320-122617-0001-79: IN HIS RETURN TO THE CAMP HIS ACUTE AND PRACTISED INTELLECTS WERE INTENTLY ENGAGED IN DEVISING MEANS TO COUNTERACT A WATCHFULNESS AND SUSPICION ON THE PART OF HIS ENEMIES THAT HE KNEW WERE IN NO DEGREE INFERIOR TO HIS OWN +1320-122617-0002-80: IN OTHER WORDS WHILE HE HAD IMPLICIT FAITH IN THE ABILITY OF (BALAAM'S->BALEM'S) ASS TO SPEAK HE WAS SOMEWHAT (SKEPTICAL->SCEPTICAL) ON THE SUBJECT OF A BEAR'S SINGING AND YET HE HAD BEEN ASSURED OF THE LATTER ON THE TESTIMONY OF HIS OWN EXQUISITE ORGANS +1320-122617-0003-81: THERE WAS SOMETHING IN HIS AIR AND MANNER THAT BETRAYED TO THE SCOUT THE UTTER CONFUSION OF THE STATE OF HIS MIND +1320-122617-0004-82: THE INGENIOUS HAWKEYE WHO RECALLED THE HASTY MANNER IN WHICH THE OTHER HAD ABANDONED HIS POST AT THE BEDSIDE OF THE SICK WOMAN WAS NOT WITHOUT HIS SUSPICIONS CONCERNING THE SUBJECT OF SO MUCH SOLEMN DELIBERATION +1320-122617-0005-83: THE BEAR SHOOK HIS SHAGGY SIDES AND THEN A WELL KNOWN VOICE REPLIED +1320-122617-0006-84: CAN THESE THINGS BE RETURNED DAVID BREATHING MORE FREELY AS THE TRUTH BEGAN TO DAWN UPON HIM +1320-122617-0007-85: COME COME RETURNED HAWKEYE UNCASING HIS HONEST COUNTENANCE THE BETTER TO ASSURE THE WAVERING CONFIDENCE OF HIS COMPANION YOU MAY SEE A SKIN WHICH IF IT BE NOT AS WHITE AS ONE OF THE GENTLE ONES HAS NO TINGE OF RED TO IT THAT THE WINDS OF THE HEAVEN AND THE SUN HAVE NOT BESTOWED NOW LET US TO BUSINESS +1320-122617-0008-86: THE YOUNG MAN IS IN BONDAGE AND MUCH I FEAR HIS DEATH IS DECREED +1320-122617-0009-87: I GREATLY (MOURN->MOURNED) THAT ONE SO WELL DISPOSED SHOULD DIE IN HIS IGNORANCE AND I HAVE SOUGHT A GOODLY HYMN CAN YOU LEAD ME TO HIM +1320-122617-0010-88: THE TASK WILL NOT BE DIFFICULT RETURNED DAVID HESITATING THOUGH I GREATLY FEAR YOUR PRESENCE WOULD RATHER INCREASE THAN MITIGATE HIS UNHAPPY FORTUNES +1320-122617-0011-89: THE LODGE IN WHICH UNCAS WAS CONFINED WAS IN THE VERY (CENTER->CENTRE) OF THE VILLAGE AND IN A SITUATION PERHAPS MORE DIFFICULT THAN ANY OTHER TO APPROACH OR LEAVE WITHOUT OBSERVATION +1320-122617-0012-90: FOUR OR FIVE OF THE LATTER ONLY LINGERED ABOUT THE DOOR OF THE PRISON OF UNCAS WARY BUT CLOSE OBSERVERS OF THE MANNER OF THEIR CAPTIVE +1320-122617-0013-91: DELIVERED IN A STRONG TONE OF ASSENT ANNOUNCED THE GRATIFICATION THE SAVAGE WOULD RECEIVE (IN->AND) WITNESSING SUCH AN EXHIBITION OF WEAKNESS (IN->AND) AN ENEMY SO LONG HATED AND SO MUCH FEARED +1320-122617-0014-92: THEY DREW BACK A LITTLE FROM THE ENTRANCE AND MOTIONED TO THE SUPPOSED (CONJURER->CONJUROR) TO ENTER +1320-122617-0015-93: BUT THE BEAR INSTEAD OF OBEYING MAINTAINED THE SEAT IT HAD TAKEN AND GROWLED +1320-122617-0016-94: THE CUNNING MAN IS AFRAID THAT HIS BREATH WILL BLOW UPON HIS BROTHERS AND TAKE AWAY THEIR COURAGE TOO CONTINUED DAVID IMPROVING THE HINT HE RECEIVED THEY MUST STAND FURTHER OFF +1320-122617-0017-95: THEN AS IF SATISFIED OF THEIR SAFETY THE SCOUT LEFT HIS POSITION AND SLOWLY ENTERED THE PLACE +1320-122617-0018-96: IT WAS SILENT AND GLOOMY BEING TENANTED SOLELY BY THE CAPTIVE AND LIGHTED BY THE DYING EMBERS OF A FIRE WHICH HAD BEEN USED FOR THE (PURPOSED->PURPOSE) OF COOKERY +1320-122617-0019-97: UNCAS OCCUPIED A DISTANT CORNER IN A RECLINING ATTITUDE BEING RIGIDLY BOUND BOTH HANDS AND FEET BY STRONG AND PAINFUL (WITHES->WIDTHS) +1320-122617-0020-98: THE SCOUT WHO HAD LEFT DAVID AT THE DOOR TO ASCERTAIN THEY WERE NOT OBSERVED THOUGHT IT PRUDENT TO PRESERVE HIS DISGUISE UNTIL ASSURED OF THEIR PRIVACY +1320-122617-0021-99: WHAT SHALL WE DO WITH THE MINGOES AT THE DOOR THEY COUNT SIX AND (THIS->THE) SINGER IS AS GOOD AS NOTHING +1320-122617-0022-100: THE DELAWARES ARE CHILDREN OF THE TORTOISE AND (THEY OUTSTRIP->THE OUTSTRIPPED) THE DEER +1320-122617-0023-101: UNCAS WHO HAD ALREADY APPROACHED THE DOOR IN READINESS TO LEAD THE WAY NOW RECOILED AND PLACED HIMSELF ONCE MORE IN THE BOTTOM OF THE LODGE +1320-122617-0024-102: BUT HAWKEYE WHO WAS TOO MUCH OCCUPIED WITH HIS OWN THOUGHTS TO NOTE THE MOVEMENT CONTINUED SPEAKING MORE TO HIMSELF THAN TO HIS COMPANION +1320-122617-0025-103: SO UNCAS YOU HAD BETTER TAKE THE LEAD WHILE I WILL PUT ON THE SKIN AGAIN AND TRUST TO CUNNING FOR WANT OF SPEED +1320-122617-0026-104: WELL WHAT CAN'T BE DONE BY MAIN COURAGE (IN->AND) WAR MUST BE DONE BY CIRCUMVENTION +1320-122617-0027-105: AS SOON AS THESE DISPOSITIONS WERE MADE THE SCOUT TURNED TO DAVID AND GAVE HIM HIS PARTING INSTRUCTIONS +1320-122617-0028-106: MY PURSUITS ARE PEACEFUL AND MY TEMPER I HUMBLY TRUST IS GREATLY GIVEN TO MERCY AND LOVE RETURNED DAVID A LITTLE NETTLED AT SO DIRECT AN ATTACK ON HIS MANHOOD BUT THERE ARE NONE WHO CAN SAY THAT I HAVE EVER FORGOTTEN MY FAITH IN THE LORD EVEN IN THE GREATEST STRAITS +1320-122617-0029-107: IF YOU ARE NOT THEN KNOCKED ON THE HEAD YOUR BEING A (NON COMPOSSER->NONCOMPOSTER) WILL PROTECT YOU AND YOU'LL THEN HAVE A GOOD REASON TO EXPECT TO DIE IN YOUR BED +1320-122617-0030-108: (SO->SIR) CHOOSE FOR YOURSELF TO MAKE A RUSH OR TARRY HERE +1320-122617-0031-109: BRAVELY AND GENEROUSLY HAS HE BATTLED IN MY BEHALF AND THIS AND MORE WILL I DARE IN HIS SERVICE +1320-122617-0032-110: KEEP SILENT AS LONG AS MAY BE AND IT WOULD BE WISE WHEN YOU DO SPEAK TO BREAK OUT SUDDENLY IN ONE OF YOUR SHOUTINGS WHICH WILL SERVE TO REMIND THE INDIANS THAT YOU ARE NOT ALTOGETHER AS RESPONSIBLE AS MEN SHOULD BE +1320-122617-0033-111: IF HOWEVER THEY TAKE YOUR SCALP AS I TRUST AND BELIEVE THEY WILL NOT DEPEND ON IT UNCAS AND I WILL NOT FORGET THE DEED BUT REVENGE IT AS BECOMES TRUE WARRIORS AND TRUSTY FRIENDS +1320-122617-0034-112: HOLD SAID DAVID PERCEIVING THAT WITH THIS ASSURANCE THEY WERE ABOUT TO LEAVE HIM I AM AN UNWORTHY AND HUMBLE FOLLOWER OF ONE WHO TAUGHT NOT THE DAMNABLE PRINCIPLE OF REVENGE +1320-122617-0035-113: THEN HEAVING A HEAVY SIGH PROBABLY AMONG THE LAST HE EVER DREW IN PINING FOR A CONDITION HE HAD SO LONG ABANDONED HE ADDED IT IS WHAT I WOULD WISH TO PRACTISE MYSELF AS ONE WITHOUT A CROSS OF BLOOD THOUGH IT IS NOT ALWAYS EASY TO DEAL WITH AN INDIAN AS YOU WOULD WITH A FELLOW CHRISTIAN +1320-122617-0036-114: GOD BLESS YOU FRIEND I DO BELIEVE YOUR (SCENT IS->SIN HAS) NOT GREATLY WRONG WHEN THE MATTER IS DULY CONSIDERED AND KEEPING ETERNITY BEFORE THE EYES THOUGH MUCH DEPENDS ON THE NATURAL GIFTS (AND->IN) THE FORCE OF TEMPTATION +1320-122617-0037-115: THE DELAWARE DOG HE SAID LEANING FORWARD AND PEERING THROUGH THE DIM LIGHT TO CATCH THE EXPRESSION OF THE OTHER'S FEATURES IS HE AFRAID +1320-122617-0038-116: WILL THE HURONS HEAR HIS GROANS +1320-122617-0039-117: THE (MOHICAN->MOHICANS) STARTED ON HIS FEET AND SHOOK HIS SHAGGY COVERING AS THOUGH THE ANIMAL HE COUNTERFEITED WAS ABOUT TO MAKE SOME DESPERATE EFFORT +1320-122617-0040-118: HE HAD NO OCCASION TO DELAY FOR AT THE NEXT INSTANT A BURST OF CRIES FILLED THE OUTER AIR AND RAN ALONG THE WHOLE EXTENT OF THE VILLAGE +1320-122617-0041-119: UNCAS CAST HIS SKIN AND STEPPED FORTH IN HIS OWN BEAUTIFUL PROPORTIONS +1580-141083-0000-1949: I WILL ENDEAVOUR IN MY STATEMENT TO AVOID SUCH TERMS AS WOULD SERVE TO LIMIT THE EVENTS TO ANY PARTICULAR PLACE OR GIVE A CLUE AS TO THE PEOPLE CONCERNED +1580-141083-0001-1950: I HAD ALWAYS KNOWN HIM TO BE RESTLESS IN HIS MANNER BUT ON THIS PARTICULAR OCCASION HE WAS IN SUCH A STATE OF UNCONTROLLABLE AGITATION THAT IT WAS CLEAR SOMETHING VERY UNUSUAL HAD OCCURRED +1580-141083-0002-1951: MY FRIEND'S TEMPER HAD NOT IMPROVED SINCE HE HAD BEEN DEPRIVED OF THE CONGENIAL SURROUNDINGS OF BAKER STREET +1580-141083-0003-1952: WITHOUT HIS (SCRAPBOOKS->SCRAP BOOKS) HIS CHEMICALS AND HIS HOMELY UNTIDINESS HE WAS AN UNCOMFORTABLE MAN +1580-141083-0004-1953: I HAD TO READ IT OVER CAREFULLY AS THE TEXT MUST BE ABSOLUTELY CORRECT +1580-141083-0005-1954: I WAS ABSENT RATHER MORE THAN AN HOUR +1580-141083-0006-1955: THE ONLY DUPLICATE WHICH EXISTED SO FAR AS I KNEW WAS THAT WHICH BELONGED TO MY SERVANT (BANNISTER->BANISTER) A MAN WHO HAS LOOKED AFTER MY ROOM FOR TEN YEARS AND WHOSE HONESTY IS ABSOLUTELY ABOVE SUSPICION +1580-141083-0007-1956: THE MOMENT I LOOKED AT MY TABLE I WAS AWARE THAT (SOMEONE->SOME ONE) HAD RUMMAGED AMONG MY PAPERS +1580-141083-0008-1957: THE PROOF WAS IN THREE LONG SLIPS I HAD LEFT THEM ALL TOGETHER +1580-141083-0009-1958: (THE ALTERNATIVE->THEY ALL TURNED OF) WAS THAT (SOMEONE->SOME ONE) PASSING HAD OBSERVED THE KEY IN THE DOOR HAD KNOWN THAT I WAS OUT AND HAD ENTERED TO LOOK AT THE PAPERS +1580-141083-0010-1959: I GAVE HIM A LITTLE BRANDY AND LEFT HIM COLLAPSED IN A CHAIR WHILE I MADE A MOST CAREFUL EXAMINATION OF THE ROOM +1580-141083-0011-1960: A BROKEN TIP OF LEAD WAS LYING THERE ALSO +1580-141083-0012-1961: NOT ONLY THIS BUT ON THE TABLE I FOUND A SMALL BALL OF BLACK DOUGH OR CLAY WITH SPECKS OF SOMETHING WHICH LOOKS LIKE SAWDUST IN IT +1580-141083-0013-1962: ABOVE ALL THINGS I DESIRE TO SETTLE THE MATTER QUIETLY AND DISCREETLY +1580-141083-0014-1963: TO THE BEST OF MY BELIEF THEY WERE ROLLED UP +1580-141083-0015-1964: DID (ANYONE->ANY ONE) KNOW THAT THESE PROOFS WOULD BE THERE NO ONE SAVE THE PRINTER +1580-141083-0016-1965: I WAS IN SUCH A HURRY TO COME TO YOU YOU LEFT YOUR DOOR OPEN +1580-141083-0017-1966: SO IT SEEMS TO ME +1580-141083-0018-1967: NOW MISTER (SOAMES->SOLMES) AT YOUR DISPOSAL +1580-141083-0019-1968: ABOVE WERE THREE STUDENTS ONE ON EACH STORY +1580-141083-0020-1969: THEN HE APPROACHED IT AND STANDING ON TIPTOE WITH HIS NECK CRANED HE LOOKED INTO THE ROOM +1580-141083-0021-1970: THERE IS NO OPENING EXCEPT THE ONE (PANE->PAIN) SAID OUR LEARNED GUIDE +1580-141083-0022-1971: I AM AFRAID THERE ARE NO SIGNS HERE SAID HE +1580-141083-0023-1972: ONE COULD HARDLY HOPE FOR ANY UPON SO DRY A DAY +1580-141083-0024-1973: YOU LEFT HIM IN A CHAIR YOU SAY WHICH CHAIR BY THE WINDOW THERE +1580-141083-0025-1974: THE (MAN->MEN) ENTERED AND TOOK THE PAPERS SHEET BY SHEET FROM THE CENTRAL TABLE +1580-141083-0026-1975: AS A MATTER OF FACT HE COULD NOT SAID (SOAMES->SOLMES) FOR I ENTERED BY THE SIDE DOOR +1580-141083-0027-1976: HOW LONG WOULD IT TAKE HIM TO DO THAT USING EVERY POSSIBLE CONTRACTION A QUARTER OF AN HOUR NOT LESS +1580-141083-0028-1977: THEN HE TOSSED IT DOWN AND SEIZED THE NEXT +1580-141083-0029-1978: HE WAS IN THE MIDST OF THAT WHEN YOUR RETURN CAUSED HIM TO MAKE A VERY HURRIED RETREAT VERY HURRIED SINCE HE HAD NOT TIME TO REPLACE THE PAPERS WHICH WOULD TELL YOU THAT HE HAD BEEN THERE +1580-141083-0030-1979: MISTER (SOAMES->PSALMS) WAS SOMEWHAT OVERWHELMED BY THIS FLOOD OF INFORMATION +1580-141083-0031-1980: HOLMES HELD OUT A SMALL CHIP WITH THE LETTERS N N AND A SPACE OF CLEAR WOOD AFTER THEM YOU SEE +1580-141083-0032-1981: WATSON I HAVE ALWAYS DONE YOU (AN->AND) INJUSTICE THERE ARE OTHERS +1580-141083-0033-1982: I WAS HOPING THAT IF THE PAPER ON WHICH HE WROTE WAS THIN SOME TRACE OF IT MIGHT COME THROUGH UPON THIS POLISHED SURFACE NO I SEE NOTHING +1580-141083-0034-1983: AS HOLMES DREW THE CURTAIN I WAS AWARE FROM SOME LITTLE RIGIDITY AND (*->AN) ALERTNESS OF HIS ATTITUDE THAT HE WAS PREPARED FOR AN EMERGENCY +1580-141083-0035-1984: HOLMES TURNED AWAY AND STOOPED SUDDENLY TO THE FLOOR (HALLOA WHAT'S->HULLO WHAT IS) THIS +1580-141083-0036-1985: HOLMES HELD IT OUT ON HIS OPEN PALM IN THE GLARE OF THE ELECTRIC LIGHT +1580-141083-0037-1986: WHAT COULD HE DO HE CAUGHT UP EVERYTHING WHICH WOULD BETRAY HIM AND HE RUSHED INTO YOUR BEDROOM TO CONCEAL HIMSELF +1580-141083-0038-1987: I UNDERSTAND YOU TO SAY THAT THERE ARE THREE STUDENTS WHO USE THIS (STAIR->STARE) AND ARE IN THE HABIT OF PASSING YOUR DOOR YES THERE ARE +1580-141083-0039-1988: AND THEY ARE ALL IN FOR (THIS->THE) EXAMINATION YES +1580-141083-0040-1989: ONE HARDLY LIKES TO THROW SUSPICION WHERE THERE ARE NO PROOFS +1580-141083-0041-1990: LET US (HEAR->SEE) THE SUSPICIONS I WILL LOOK AFTER THE PROOFS +1580-141083-0042-1991: MY SCHOLAR HAS BEEN LEFT VERY POOR BUT HE IS HARD WORKING AND INDUSTRIOUS HE WILL DO WELL +1580-141083-0043-1992: THE TOP FLOOR BELONGS TO (MILES->MYLES) MC LAREN +1580-141083-0044-1993: I DARE NOT GO SO FAR AS THAT BUT OF THE THREE HE IS PERHAPS THE LEAST UNLIKELY +1580-141083-0045-1994: HE WAS STILL SUFFERING FROM THIS SUDDEN DISTURBANCE OF THE QUIET ROUTINE OF HIS LIFE +1580-141083-0046-1995: BUT I HAVE OCCASIONALLY DONE THE SAME THING AT OTHER TIMES +1580-141083-0047-1996: DID YOU LOOK AT THESE PAPERS ON THE TABLE +1580-141083-0048-1997: HOW CAME YOU TO LEAVE THE KEY IN THE DOOR +1580-141083-0049-1998: (ANYONE->ANY ONE) IN THE ROOM COULD GET OUT YES SIR +1580-141083-0050-1999: I (*->HAVE) REALLY DON'T THINK HE KNEW MUCH ABOUT IT MISTER HOLMES +1580-141083-0051-2000: ONLY FOR A MINUTE OR SO +1580-141083-0052-2001: OH I WOULD NOT VENTURE TO SAY SIR +1580-141083-0053-2002: YOU HAVEN'T SEEN ANY OF THEM NO SIR +1580-141084-0000-2003: IT WAS THE INDIAN WHOSE DARK SILHOUETTE APPEARED SUDDENLY UPON HIS BLIND +1580-141084-0001-2004: HE WAS PACING SWIFTLY UP AND DOWN HIS ROOM +1580-141084-0002-2005: (THIS->THE) SET OF ROOMS IS QUITE THE OLDEST IN THE COLLEGE AND IT IS NOT UNUSUAL FOR VISITORS TO GO OVER THEM +1580-141084-0003-2006: NO NAMES PLEASE SAID HOLMES AS WE KNOCKED AT (GILCHRIST'S->GILCRE'S) DOOR +1580-141084-0004-2007: OF COURSE HE DID NOT REALIZE THAT IT WAS I WHO WAS KNOCKING BUT NONE THE LESS HIS CONDUCT WAS VERY UNCOURTEOUS AND INDEED UNDER THE CIRCUMSTANCES RATHER SUSPICIOUS +1580-141084-0005-2008: THAT IS VERY IMPORTANT SAID HOLMES +1580-141084-0006-2009: YOU DON'T SEEM TO REALIZE THE POSITION +1580-141084-0007-2010: TO MORROW IS THE EXAMINATION +1580-141084-0008-2011: I CANNOT ALLOW THE EXAMINATION TO BE HELD IF ONE OF THE PAPERS HAS BEEN TAMPERED WITH THE SITUATION MUST BE FACED +1580-141084-0009-2012: IT IS POSSIBLE THAT I MAY BE IN A POSITION THEN TO INDICATE SOME COURSE OF ACTION +1580-141084-0010-2013: I WILL TAKE THE BLACK CLAY WITH ME ALSO THE PENCIL CUTTINGS GOOD BYE +1580-141084-0011-2014: WHEN WE WERE OUT IN THE DARKNESS OF THE QUADRANGLE WE AGAIN LOOKED UP AT THE WINDOWS +1580-141084-0012-2015: THE FOUL MOUTHED FELLOW AT THE TOP +1580-141084-0013-2016: HE IS THE ONE WITH THE WORST RECORD +1580-141084-0014-2017: WHY BANNISTER THE SERVANT WHAT'S HIS GAME IN THE MATTER +1580-141084-0015-2018: HE IMPRESSED ME AS BEING A PERFECTLY HONEST MAN +1580-141084-0016-2019: MY FRIEND DID NOT APPEAR TO BE DEPRESSED BY HIS FAILURE BUT SHRUGGED HIS SHOULDERS IN HALF HUMOROUS RESIGNATION +1580-141084-0017-2020: NO GOOD MY DEAR WATSON +1580-141084-0018-2021: I THINK SO YOU HAVE FORMED A CONCLUSION +1580-141084-0019-2022: YES MY DEAR WATSON I HAVE SOLVED THE MYSTERY +1580-141084-0020-2023: LOOK AT THAT HE HELD OUT HIS HAND +1580-141084-0021-2024: ON THE PALM WERE THREE LITTLE PYRAMIDS OF BLACK DOUGHY CLAY +1580-141084-0022-2025: AND ONE MORE THIS MORNING +1580-141084-0023-2026: IN A FEW HOURS THE EXAMINATION WOULD COMMENCE AND HE WAS STILL IN THE DILEMMA BETWEEN MAKING THE FACTS PUBLIC AND ALLOWING THE CULPRIT TO COMPETE FOR THE VALUABLE SCHOLARSHIP +1580-141084-0024-2027: HE COULD HARDLY STAND STILL SO GREAT WAS HIS MENTAL AGITATION AND HE RAN TOWARDS (HOLMES->HOMES) WITH TWO EAGER HANDS OUTSTRETCHED THANK HEAVEN THAT YOU HAVE COME +1580-141084-0025-2028: YOU KNOW HIM I THINK SO +1580-141084-0026-2029: IF THIS MATTER IS NOT TO BECOME PUBLIC WE MUST GIVE OURSELVES CERTAIN POWERS AND RESOLVE OURSELVES INTO A SMALL PRIVATE COURT MARTIAL +1580-141084-0027-2030: NO SIR CERTAINLY NOT +1580-141084-0028-2031: THERE WAS NO MAN SIR +1580-141084-0029-2032: HIS TROUBLED BLUE EYES GLANCED AT EACH OF US AND FINALLY RESTED WITH AN EXPRESSION OF BLANK DISMAY UPON (BANNISTER->BANISTER) IN THE FARTHER CORNER +1580-141084-0030-2033: JUST CLOSE THE DOOR SAID HOLMES +1580-141084-0031-2034: WE WANT TO KNOW MISTER (GILCHRIST->GOCRIST) HOW YOU AN HONOURABLE MAN EVER CAME TO COMMIT SUCH AN ACTION AS THAT OF YESTERDAY +1580-141084-0032-2035: FOR A MOMENT (GILCHRIST->GILCRIS) WITH UPRAISED HAND TRIED TO CONTROL HIS WRITHING FEATURES +1580-141084-0033-2036: COME COME SAID HOLMES KINDLY IT IS HUMAN TO ERR AND AT LEAST NO ONE CAN ACCUSE YOU OF BEING A CALLOUS CRIMINAL +1580-141084-0034-2037: WELL WELL DON'T TROUBLE TO ANSWER LISTEN AND SEE THAT I DO YOU (NO->KNOW) INJUSTICE +1580-141084-0035-2038: HE COULD EXAMINE THE PAPERS IN HIS OWN OFFICE +1580-141084-0036-2039: THE INDIAN I ALSO THOUGHT NOTHING OF +1580-141084-0037-2040: WHEN I APPROACHED YOUR ROOM I EXAMINED THE WINDOW +1580-141084-0038-2041: NO ONE LESS THAN THAT WOULD HAVE A CHANCE +1580-141084-0039-2042: I ENTERED AND I TOOK YOU INTO MY CONFIDENCE AS TO THE SUGGESTIONS OF THE SIDE TABLE +1580-141084-0040-2043: HE RETURNED CARRYING HIS JUMPING SHOES WHICH ARE PROVIDED AS YOU ARE (AWARE->WEAR) WITH SEVERAL SHARP SPIKES +1580-141084-0041-2044: NO HARM WOULD HAVE BEEN DONE HAD IT NOT BEEN THAT AS HE PASSED YOUR DOOR HE PERCEIVED THE KEY WHICH HAD BEEN LEFT BY THE CARELESSNESS OF YOUR SERVANT +1580-141084-0042-2045: A SUDDEN IMPULSE CAME OVER HIM TO ENTER AND SEE IF THEY WERE INDEED THE PROOFS +1580-141084-0043-2046: HE PUT HIS SHOES ON THE TABLE +1580-141084-0044-2047: GLOVES SAID THE YOUNG MAN +1580-141084-0045-2048: SUDDENLY HE HEARD HIM AT THE VERY DOOR THERE WAS NO POSSIBLE ESCAPE +1580-141084-0046-2049: HAVE I TOLD THE TRUTH MISTER (GILCHRIST->GORIST) +1580-141084-0047-2050: I HAVE A LETTER HERE MISTER (SOAMES->SOLMES) WHICH I WROTE TO YOU EARLY THIS MORNING IN THE MIDDLE OF A RESTLESS NIGHT +1580-141084-0048-2051: IT (WILL->WOULD) BE CLEAR TO YOU FROM WHAT I HAVE SAID THAT ONLY YOU COULD HAVE LET THIS YOUNG MAN OUT SINCE YOU WERE LEFT IN THE ROOM AND MUST HAVE LOCKED THE DOOR WHEN YOU WENT OUT +1580-141084-0049-2052: IT WAS SIMPLE ENOUGH SIR IF YOU ONLY HAD KNOWN BUT WITH ALL YOUR CLEVERNESS IT WAS IMPOSSIBLE THAT YOU COULD KNOW +1580-141084-0050-2053: IF MISTER (SOAMES->SOLMES) SAW THEM THE GAME WAS UP +1995-1826-0000-750: IN THE DEBATE BETWEEN THE SENIOR SOCIETIES HER DEFENCE OF THE FIFTEENTH AMENDMENT HAD BEEN NOT ONLY A NOTABLE BIT OF REASONING BUT DELIVERED WITH REAL ENTHUSIASM +1995-1826-0001-751: THE SOUTH SHE HAD NOT THOUGHT OF SERIOUSLY AND YET KNOWING OF ITS DELIGHTFUL HOSPITALITY AND MILD CLIMATE SHE WAS NOT AVERSE TO CHARLESTON OR NEW ORLEANS +1995-1826-0002-752: JOHN TAYLOR WHO HAD SUPPORTED HER THROUGH COLLEGE WAS INTERESTED IN COTTON +1995-1826-0003-753: BETTER GO HE HAD (COUNSELLED->COUNSELS) SENTENTIOUSLY +1995-1826-0004-754: MIGHT LEARN SOMETHING USEFUL DOWN THERE +1995-1826-0005-755: BUT JOHN THERE'S NO SOCIETY JUST ELEMENTARY WORK +1995-1826-0006-756: BEEN LOOKING UP (TOOMS->TOMB'S) COUNTY +1995-1826-0007-757: (FIND SOME CRESSWELLS->FIVE CROSS WHIRLS) THERE BIG PLANTATIONS RATED AT TWO HUNDRED AND FIFTY THOUSAND DOLLARS +1995-1826-0008-758: SOME OTHERS TOO BIG COTTON COUNTY +1995-1826-0009-759: YOU OUGHT TO KNOW JOHN IF I TEACH NEGROES I'LL SCARCELY SEE MUCH OF PEOPLE IN MY OWN CLASS +1995-1826-0010-760: AT ANY RATE I SAY GO +1995-1826-0011-761: HERE SHE WAS TEACHING DIRTY CHILDREN AND THE SMELL OF CONFUSED ODORS AND BODILY PERSPIRATION WAS TO HER AT TIMES UNBEARABLE +1995-1826-0012-762: SHE WANTED A GLANCE OF THE NEW BOOKS (AND->IN) PERIODICALS AND TALK OF (GREAT->GRATE) PHILANTHROPIES AND REFORMS +1995-1826-0013-763: SO FOR THE HUNDREDTH TIME SHE WAS THINKING (TODAY->TO DAY) AS SHE WALKED ALONE UP THE LANE BACK OF THE BARN AND THEN SLOWLY DOWN THROUGH THE BOTTOMS +1995-1826-0014-764: COTTON SHE PAUSED +1995-1826-0015-765: SHE HAD ALMOST FORGOTTEN THAT IT WAS HERE WITHIN TOUCH (AND->IN) SIGHT +1995-1826-0016-766: THE GLIMMERING SEA OF DELICATE LEAVES WHISPERED AND MURMURED BEFORE HER STRETCHING AWAY TO THE NORTHWARD +1995-1826-0017-767: THERE MIGHT BE A BIT OF POETRY HERE AND THERE BUT MOST OF THIS PLACE WAS SUCH DESPERATE PROSE +1995-1826-0018-768: HER REGARD SHIFTED TO THE GREEN STALKS AND LEAVES AGAIN AND SHE STARTED TO MOVE AWAY +1995-1826-0019-769: COTTON IS A WONDERFUL THING IS IT NOT BOYS SHE SAID RATHER PRIMLY +1995-1826-0020-770: MISS TAYLOR DID NOT KNOW MUCH ABOUT COTTON BUT AT LEAST ONE MORE (REMARK->REMARKED) SEEMED CALLED FOR +1995-1826-0021-771: DON'T KNOW WELL OF ALL THINGS INWARDLY COMMENTED MISS TAYLOR (LITERALLY->THAT A) BORN IN COTTON AND OH WELL AS MUCH AS TO ASK WHAT'S THE USE SHE TURNED AGAIN TO GO +1995-1826-0022-772: I SUPPOSE THOUGH IT'S TOO EARLY FOR THEM THEN CAME THE EXPLOSION +1995-1826-0023-773: (GOOBERS->GOULD WAS) DON'T GROW ON THE TOPS (OF VINES->EVENS) BUT (UNDERGROUND->UNDER GROUND) ON THE (ROOTS->WOODS) LIKE YAMS IS THAT SO +1995-1826-0024-774: THE GOLDEN FLEECE IT'S THE SILVER FLEECE HE (HARKENED->HEARKENED) +1995-1826-0025-775: (SOME TIME YOU'LL TELL ME->SOMETIME YOU DAMNLY) PLEASE WON'T YOU +1995-1826-0026-776: (NOW->THOU) FOR ONE LITTLE HALF HOUR SHE HAD BEEN A WOMAN TALKING TO A BOY NO NOT EVEN THAT SHE HAD BEEN TALKING JUST TALKING THERE WERE NO PERSONS IN THE CONVERSATION JUST THINGS ONE THING COTTON +1995-1836-0000-735: THE (HON->HONOURABLE) CHARLES SMITH MISS SARAH'S BROTHER WAS WALKING SWIFTLY UPTOWN FROM MISTER EASTERLY'S WALL STREET OFFICE AND HIS FACE WAS PALE +1995-1836-0001-736: AT LAST THE COTTON COMBINE WAS TO ALL APPEARANCES (AN->AND) ASSURED FACT AND HE WAS SLATED FOR THE SENATE +1995-1836-0002-737: WHY SHOULD HE NOT BE AS OTHER MEN +1995-1836-0003-738: SHE WAS NOT HERSELF (A NOTABLY->UNNOTABLY) INTELLIGENT WOMAN SHE GREATLY ADMIRED INTELLIGENCE OR WHATEVER LOOKED TO HER LIKE INTELLIGENCE IN OTHERS +1995-1836-0004-739: AS SHE AWAITED HER (GUESTS->GUESS) SHE SURVEYED THE TABLE WITH BOTH SATISFACTION AND (DISQUIETUDE->AS QUIETUDE) FOR HER SOCIAL FUNCTIONS WERE FEW (TONIGHT->TO NIGHT) THERE WERE SHE CHECKED THEM OFF ON HER FINGERS SIR JAMES (CREIGHTON->CRIGHTON) THE RICH ENGLISH MANUFACTURER AND LADY (CREIGHTON->KREITON) MISTER AND MISSUS (VANDERPOOL->VAN DERBOOLE) MISTER HARRY (CRESSWELL->CRESWELL) AND HIS SISTER JOHN TAYLOR AND HIS SISTER AND MISTER CHARLES SMITH WHOM THE EVENING PAPERS MENTIONED AS LIKELY TO BE UNITED STATES SENATOR FROM NEW JERSEY A SELECTION OF GUESTS THAT HAD BEEN DETERMINED UNKNOWN TO THE HOSTESS BY THE MEETING OF COTTON INTERESTS EARLIER IN THE DAY +1995-1836-0005-740: MISSUS (GREY->GRAY) HAD MET SOUTHERNERS BEFORE BUT NOT INTIMATELY AND SHE ALWAYS HAD IN MIND VIVIDLY THEIR CRUELTY TO POOR NEGROES A SUBJECT SHE MADE A POINT OF INTRODUCING FORTHWITH +1995-1836-0006-741: SHE WAS THEREFORE MOST AGREEABLY SURPRISED TO HEAR MISTER (CRESSWELL->CRESWELL) EXPRESS HIMSELF SO CORDIALLY AS APPROVING OF NEGRO EDUCATION +1995-1836-0007-742: (BUT YOU->DO) BELIEVE IN SOME EDUCATION ASKED MARY TAYLOR +1995-1836-0008-743: I BELIEVE IN THE TRAINING OF PEOPLE TO (THEIR HIGHEST->THE HOUSE) CAPACITY THE ENGLISHMAN HERE HEARTILY SECONDED HIM +1995-1836-0009-744: BUT (CRESSWELL->CRESWELL) ADDED SIGNIFICANTLY CAPACITY DIFFERS ENORMOUSLY BETWEEN RACES +1995-1836-0010-745: THE (VANDERPOOLS->VANDER POOLS) WERE SURE (OF->*) THIS AND THE ENGLISHMAN INSTANCING INDIA BECAME QUITE ELOQUENT MISSUS (GREY->GRAY) WAS MYSTIFIED BUT HARDLY DARED ADMIT IT THE GENERAL TREND OF THE CONVERSATION SEEMED TO BE THAT MOST INDIVIDUALS NEEDED TO BE SUBMITTED TO THE SHARPEST SCRUTINY BEFORE BEING ALLOWED MUCH EDUCATION AND AS FOR THE LOWER RACES IT WAS SIMPLY CRIMINAL TO OPEN SUCH USELESS OPPORTUNITIES TO THEM +1995-1836-0011-746: (POSITIVELY->WAS ACTIVELY) HEROIC ADDED (CRESSWELL->CHRISWELL) AVOIDING HIS SISTER'S EYES +1995-1836-0012-747: BUT (WE'RE->WE ARE) NOT (ER->A) EXACTLY (WELCOMED->WELCOME) +1995-1836-0013-748: (MARY->MERRY) TAYLOR HOWEVER RELATED THE TALE OF (ZORA->ZORAH) TO MISSUS (GREY'S->GRAY'S) PRIVATE EAR LATER +1995-1836-0014-749: FORTUNATELY SAID MISTER (VANDERPOOL->VAN DERPOOL) NORTHERNERS (AND->IN) SOUTHERNERS (ARE ARRIVING->ALL RIVING) AT A BETTER MUTUAL UNDERSTANDING ON MOST OF THESE MATTERS +1995-1837-0000-777: HE KNEW THE SILVER FLEECE HIS AND (ZORA'S->ZORAS) MUST BE RUINED +1995-1837-0001-778: IT WAS THE FIRST GREAT SORROW OF HIS LIFE IT WAS NOT SO MUCH THE LOSS OF THE (COTTON->CONTIN) ITSELF BUT THE FANTASY THE HOPES THE DREAMS BUILT AROUND IT +1995-1837-0002-779: AH THE SWAMP THE CRUEL SWAMP +1995-1837-0003-780: (THE->WHO) REVELATION OF HIS LOVE LIGHTED AND BRIGHTENED SLOWLY TILL IT FLAMED LIKE A SUNRISE OVER HIM AND LEFT HIM IN BURNING WONDER +1995-1837-0004-781: HE PANTED TO KNOW IF SHE TOO KNEW OR (KNEW->NEW) AND CARED NOT OR CARED AND KNEW NOT +1995-1837-0005-782: SHE WAS SO STRANGE (AND->IN) HUMAN A CREATURE +1995-1837-0006-783: THE WORLD WAS WATER VEILED IN MISTS +1995-1837-0007-784: THEN OF A SUDDEN AT MIDDAY THE SUN SHOT OUT HOT AND STILL NO BREATH OF AIR STIRRED THE SKY WAS LIKE BLUE STEEL THE EARTH STEAMED +1995-1837-0008-785: WHERE WAS THE USE OF IMAGINING +1995-1837-0009-786: THE LAGOON HAD BEEN LEVEL WITH THE (DYKES->DIKES) A WEEK AGO AND NOW +1995-1837-0010-787: PERHAPS SHE TOO MIGHT BE THERE WAITING WEEPING +1995-1837-0011-788: HE STARTED AT THE THOUGHT HE HURRIED FORTH SADLY +1995-1837-0012-789: HE SPLASHED AND STAMPED ALONG FARTHER AND FARTHER ONWARD UNTIL HE NEARED THE RAMPART OF THE CLEARING AND PUT FOOT UPON THE TREE BRIDGE +1995-1837-0013-790: THEN HE LOOKED DOWN THE LAGOON WAS DRY +1995-1837-0014-791: HE STOOD A MOMENT BEWILDERED THEN TURNED AND RUSHED UPON THE ISLAND A GREAT SHEET OF DAZZLING SUNLIGHT SWEPT THE PLACE AND BENEATH LAY A MIGHTY MASS OF OLIVE GREEN THICK TALL WET AND WILLOWY +1995-1837-0015-792: THE SQUARES OF COTTON SHARP EDGED HEAVY WERE JUST ABOUT TO BURST TO (BOLLS->BOWLS) +1995-1837-0016-793: FOR ONE LONG MOMENT HE PAUSED STUPID AGAPE WITH UTTER AMAZEMENT THEN LEANED DIZZILY AGAINST A TREE +1995-1837-0017-794: HE GAZED ABOUT PERPLEXED ASTONISHED +1995-1837-0018-795: HERE LAY THE READING OF THE RIDDLE WITH INFINITE WORK AND PAIN SOME ONE HAD DUG A CANAL FROM THE LAGOON TO THE CREEK INTO WHICH THE FORMER HAD DRAINED BY A LONG AND CROOKED WAY THUS ALLOWING IT TO EMPTY DIRECTLY +1995-1837-0019-796: HE SAT DOWN WEAK BEWILDERED AND ONE THOUGHT WAS UPPERMOST (ZORA->SORA) +1995-1837-0020-797: THE YEARS OF THE DAYS OF HER DYING WERE TEN +1995-1837-0021-798: THE HOPE AND DREAM OF HARVEST WAS UPON THE LAND +1995-1837-0022-799: UP IN THE SICK ROOM ZORA LAY ON THE LITTLE WHITE BED +1995-1837-0023-800: THE (NET->NED) AND WEB OF ENDLESS THINGS HAD BEEN CRAWLING AND CREEPING AROUND HER SHE HAD STRUGGLED IN DUMB SPEECHLESS TERROR AGAINST SOME MIGHTY GRASPING THAT STROVE FOR HER LIFE WITH GNARLED AND CREEPING FINGERS BUT NOW AT LAST (WEAKLY->WEEKLY) SHE OPENED HER EYES AND QUESTIONED +1995-1837-0024-801: FOR A WHILE SHE LAY IN HER CHAIR IN HAPPY DREAMY PLEASURE (AT->ITS) SUN AND BIRD AND TREE +1995-1837-0025-802: SHE ROSE WITH A FLEETING GLANCE GATHERED THE SHAWL (ROUND->AROUND) HER THEN GLIDING FORWARD WAVERING TREMULOUS SLIPPED ACROSS THE ROAD AND INTO THE SWAMP +1995-1837-0026-803: SHE HAD BEEN BORN WITHIN ITS BORDERS WITHIN (ITS->HIS) BORDERS SHE HAD LIVED AND GROWN AND WITHIN ITS (BORDERS->BORDER) SHE HAD MET HER LOVE +1995-1837-0027-804: ON SHE HURRIED UNTIL SWEEPING DOWN TO THE LAGOON AND THE ISLAND LO THE COTTON LAY BEFORE HER +1995-1837-0028-805: THE CHAIR WAS EMPTY BUT HE KNEW +1995-1837-0029-806: HE DARTED THROUGH THE TREES AND PAUSED A TALL MAN STRONGLY BUT SLIMLY MADE +2094-142345-0000-308: IT IS A VERY FINE OLD PLACE OF RED BRICK SOFTENED BY A PALE POWDERY LICHEN WHICH HAS DISPERSED ITSELF WITH HAPPY IRREGULARITY SO AS TO BRING THE RED BRICK INTO TERMS OF FRIENDLY COMPANIONSHIP WITH (THE->A) LIMESTONE ORNAMENTS SURROUNDING THE THREE GABLES THE WINDOWS AND THE DOOR PLACE +2094-142345-0001-309: BUT THE WINDOWS ARE PATCHED WITH WOODEN PANES AND THE DOOR I THINK IS LIKE THE GATE IT IS NEVER OPENED +2094-142345-0002-310: FOR IT IS A SOLID HEAVY HANDSOME DOOR AND MUST ONCE HAVE BEEN IN THE HABIT OF (SHUTTING->SHEDDING) WITH A SONOROUS BANG BEHIND (A LIVERIED->THE LIVERYED) LACKEY WHO HAD JUST SEEN HIS MASTER AND MISTRESS OFF THE GROUNDS IN A CARRIAGE AND PAIR +2094-142345-0003-311: A LARGE OPEN FIREPLACE WITH RUSTY DOGS IN IT AND A BARE BOARDED FLOOR AT THE FAR END FLEECES OF WOOL STACKED UP IN THE MIDDLE OF THE FLOOR SOME EMPTY CORN BAGS +2094-142345-0004-312: AND WHAT THROUGH THE LEFT HAND WINDOW +2094-142345-0005-313: SEVERAL CLOTHES HORSES (A PILLION->APILLION) A SPINNING WHEEL AND AN OLD BOX WIDE OPEN AND STUFFED FULL OF COLOURED RAGS +2094-142345-0006-314: AT THE EDGE OF THIS BOX THERE LIES A GREAT WOODEN DOLL WHICH SO FAR AS MUTILATION IS CONCERNED BEARS A STRONG RESEMBLANCE TO THE FINEST GREEK SCULPTURE AND ESPECIALLY IN THE TOTAL LOSS OF ITS NOSE +2094-142345-0007-315: THE HISTORY OF THE HOUSE IS PLAIN NOW +2094-142345-0008-316: BUT THERE IS ALWAYS (A->AS) STRONGER SENSE OF LIFE WHEN THE SUN IS BRILLIANT AFTER RAIN AND NOW HE IS POURING DOWN HIS BEAMS AND MAKING SPARKLES AMONG THE WET STRAW AND LIGHTING UP EVERY PATCH OF VIVID GREEN MOSS ON THE RED TILES OF THE (COW SHED->COWSHED) AND TURNING EVEN THE MUDDY WATER THAT IS HURRYING ALONG THE CHANNEL TO THE DRAIN INTO A MIRROR FOR THE YELLOW (BILLED->BUILD) DUCKS WHO ARE SEIZING THE OPPORTUNITY OF GETTING A DRINK WITH AS MUCH BODY IN IT AS POSSIBLE +2094-142345-0009-317: FOR THE GREAT BARN DOORS ARE THROWN WIDE OPEN AND MEN ARE BUSY THERE MENDING THE HARNESS UNDER THE SUPERINTENDENCE OF MISTER GOBY THE (WHITTAW->WIDOW) OTHERWISE SADDLER WHO ENTERTAINS THEM WITH THE LATEST (TREDDLESTON->TREADLESTON) GOSSIP +2094-142345-0010-318: (HETTY SORREL->HETTY'S SURREL) OFTEN TOOK THE OPPORTUNITY WHEN HER AUNT'S BACK WAS TURNED OF LOOKING AT THE PLEASING REFLECTION OF HERSELF IN THOSE POLISHED (SURFACES->SERVICES) FOR THE OAK TABLE WAS USUALLY TURNED UP LIKE A SCREEN AND WAS MORE FOR ORNAMENT THAN FOR USE AND SHE COULD SEE HERSELF SOMETIMES IN THE GREAT ROUND PEWTER DISHES THAT WERE RANGED ON THE SHELVES ABOVE THE LONG DEAL DINNER TABLE OR IN THE HOBS OF THE GRATE WHICH ALWAYS SHONE LIKE JASPER +2094-142345-0011-319: DO NOT SUPPOSE HOWEVER THAT MISSUS POYSER WAS ELDERLY OR SHREWISH IN HER APPEARANCE SHE WAS A GOOD LOOKING WOMAN NOT MORE THAN EIGHT AND THIRTY (OF->A) FAIR COMPLEXION AND SANDY HAIR (WELL SHAPEN LIGHT FOOTED->WHILE SHAKEN LIGHTFOOTED) +2094-142345-0012-320: THE FAMILY LIKENESS BETWEEN HER AND HER NIECE DINAH MORRIS WITH (THE->A) CONTRAST BETWEEN HER KEENNESS AND DINAH'S SERAPHIC GENTLENESS OF EXPRESSION MIGHT HAVE SERVED A PAINTER AS AN EXCELLENT SUGGESTION FOR (A->*) MARTHA AND MARY +2094-142345-0013-321: HER TONGUE WAS NOT LESS KEEN THAN HER EYE AND WHENEVER A DAMSEL CAME WITHIN (EARSHOT->EAR SHOT) SEEMED TO TAKE UP AN UNFINISHED LECTURE AS A BARREL ORGAN TAKES UP A TUNE PRECISELY AT THE POINT WHERE IT HAD LEFT OFF +2094-142345-0014-322: THE FACT THAT IT WAS CHURNING DAY WAS ANOTHER REASON WHY IT WAS INCONVENIENT TO HAVE THE (WHITTAWS->WIDOWS) AND WHY CONSEQUENTLY MISSUS POYSER SHOULD SCOLD MOLLY THE HOUSEMAID WITH UNUSUAL SEVERITY +2094-142345-0015-323: TO ALL APPEARANCE MOLLY HAD GOT THROUGH HER AFTER DINNER WORK IN AN EXEMPLARY MANNER HAD CLEANED HERSELF WITH GREAT DISPATCH AND NOW CAME TO ASK SUBMISSIVELY IF SHE SHOULD SIT DOWN TO HER SPINNING TILL MILKING TIME +2094-142345-0016-324: SPINNING INDEED +2094-142345-0017-325: I NEVER KNEW YOUR EQUALS FOR GALLOWSNESS +2094-142345-0018-326: WHO TAUGHT YOU TO SCRUB A FLOOR I SHOULD LIKE TO KNOW +2094-142345-0019-327: COMB THE WOOL FOR THE (WHITTAWS->WIDOWS) INDEED +2094-142345-0020-328: THAT'S WHAT YOU'D LIKE TO BE DOING IS IT +2094-142345-0021-329: THAT'S THE WAY WITH YOU THAT'S THE ROAD YOU'D ALL LIKE TO GO HEADLONGS TO RUIN +2094-142345-0022-330: MISTER (OTTLEY'S->OUTLEY'S) INDEED +2094-142345-0023-331: (YOU'RE->YOU ARE) A RARE (UN FOR SITTING->AND PROCEEDING) DOWN TO YOUR WORK A LITTLE WHILE AFTER (IT'S->ITS) TIME TO PUT BY +2094-142345-0024-332: (MUNNY->MONEY) MY (IRON'S TWITE->IRON STRIKE) TOLD PEASE PUT IT DOWN TO WARM +2094-142345-0025-333: COLD IS IT MY DARLING BLESS YOUR SWEET FACE +2094-142345-0026-334: SHE'S GOING TO PUT THE IRONING THINGS AWAY +2094-142345-0027-335: (MUNNY->MONEY) I (TOULD IKE->DID LIKE) TO DO INTO (DE->THE) BARN TO TOMMY TO SEE (DE WHITTAWD->THE WID ODD) +2094-142345-0028-336: NO NO NO TOTTY (UD->HAD) GET HER FEET WET SAID MISSUS POYSER CARRYING AWAY HER IRON +2094-142345-0029-337: DID EVER ANYBODY SEE THE LIKE SCREAMED MISSUS POYSER RUNNING TOWARDS THE TABLE WHEN HER EYE HAD FALLEN ON THE BLUE STREAM +2094-142345-0030-338: TOTTY HOWEVER HAD DESCENDED FROM HER CHAIR WITH GREAT SWIFTNESS AND WAS ALREADY IN RETREAT TOWARDS THE DAIRY WITH A SORT OF WADDLING RUN AND AN AMOUNT OF FAT ON THE NAPE OF HER NECK WHICH MADE HER LOOK LIKE THE METAMORPHOSIS OF A WHITE SUCKLING PIG +2094-142345-0031-339: AND SHE WAS VERY FOND OF YOU TOO AUNT RACHEL +2094-142345-0032-340: I OFTEN HEARD HER TALK OF YOU IN THE SAME SORT OF WAY +2094-142345-0033-341: WHEN SHE HAD THAT (BAD->BAN) ILLNESS AND I WAS ONLY ELEVEN YEARS OLD SHE USED TO SAY YOU'LL HAVE A FRIEND ON EARTH IN YOUR AUNT RACHEL IF I'M TAKEN FROM YOU FOR SHE HAS A KIND HEART AND I'M SURE I'VE FOUND IT SO +2094-142345-0034-342: AND THERE'S LINEN IN THE HOUSE AS I COULD WELL SPARE YOU FOR (I'VE->I) GOT LOTS (O->OF) SHEETING AND TABLE CLOTHING AND (TOWELLING AS->TOWELINGS) ISN'T MADE UP +2094-142345-0035-343: BUT NOT MORE THAN WHAT'S IN THE BIBLE (AUNT->AND) SAID DINAH +2094-142345-0036-344: NAY DEAR AUNT YOU NEVER HEARD ME SAY THAT ALL PEOPLE ARE CALLED TO FORSAKE THEIR WORK AND THEIR FAMILIES +2094-142345-0037-345: WE CAN ALL BE SERVANTS OF GOD WHEREVER OUR LOT IS CAST BUT HE GIVES US DIFFERENT SORTS OF WORK ACCORDING AS HE FITS US FOR IT AND CALLS US TO IT +2094-142345-0038-346: I CAN NO MORE HELP SPENDING MY LIFE IN TRYING TO DO WHAT I CAN FOR THE SOULS OF OTHERS (THAN->THEN) YOU COULD HELP RUNNING IF YOU HEARD LITTLE TOTTY CRYING AT THE OTHER END OF THE HOUSE THE VOICE WOULD GO TO YOUR HEART YOU WOULD THINK THE DEAR CHILD WAS IN TROUBLE OR IN DANGER AND YOU COULDN'T REST WITHOUT RUNNING TO HELP HER AND COMFORT HER +2094-142345-0039-347: I'VE STRONG ASSURANCE THAT NO EVIL WILL HAPPEN TO YOU AND MY UNCLE AND THE CHILDREN FROM ANYTHING (I'VE->I HAVE) DONE +2094-142345-0040-348: I DIDN'T PREACH WITHOUT DIRECTION +2094-142345-0041-349: DIRECTION +2094-142345-0042-350: I (HANNA->HAD A) COMMON PATIENCE WITH YOU +2094-142345-0043-351: BY THIS TIME THE TWO GENTLEMEN HAD REACHED THE PALINGS AND HAD GOT DOWN FROM THEIR HORSES IT WAS PLAIN THEY MEANT TO COME IN +2094-142345-0044-352: SAID MISTER IRWINE WITH HIS STATELY CORDIALITY +2094-142345-0045-353: OH SIR DON'T MENTION IT SAID MISSUS POYSER +2094-142345-0046-354: I DELIGHT IN YOUR KITCHEN +2094-142345-0047-355: POYSER IS NOT AT HOME IS HE +2094-142345-0048-356: SAID CAPTAIN DONNITHORNE (SEATING->SITTING) HIMSELF WHERE HE COULD SEE ALONG THE SHORT PASSAGE TO THE OPEN DAIRY DOOR +2094-142345-0049-357: NO SIR HE ISN'T HE'S GONE TO (ROSSETER->ROSSOTER) TO SEE MISTER WEST THE FACTOR ABOUT THE WOOL +2094-142345-0050-358: BUT THERE'S FATHER (THE->IN) BARN SIR IF HE'D BE OF ANY USE +2094-142345-0051-359: NO THANK YOU I'LL JUST LOOK AT THE (WHELPS->WHELMS) AND LEAVE A MESSAGE ABOUT THEM WITH YOUR SHEPHERD +2094-142345-0052-360: I MUST COME ANOTHER DAY AND SEE YOUR HUSBAND I WANT TO HAVE A CONSULTATION WITH HIM ABOUT HORSES +2094-142345-0053-361: FOR IF HE'S ANYWHERE ON THE FARM WE CAN SEND FOR HIM IN A MINUTE +2094-142345-0054-362: OH SIR SAID MISSUS POYSER RATHER ALARMED YOU WOULDN'T LIKE IT AT ALL +2094-142345-0055-363: BUT YOU KNOW MORE ABOUT THAT THAN I DO SIR +2094-142345-0056-364: I THINK I SHOULD BE DOING YOU A SERVICE TO TURN YOU OUT OF SUCH A PLACE +2094-142345-0057-365: I (KNOW HIS->KNOWS) FARM IS IN BETTER ORDER THAN ANY OTHER WITHIN TEN MILES OF US AND AS FOR THE KITCHEN HE ADDED SMILING I DON'T BELIEVE THERE'S ONE IN THE KINGDOM TO BEAT IT +2094-142345-0058-366: BY THE (BY I'VE->BYE I HAVE) NEVER SEEN YOUR DAIRY I MUST SEE YOUR (DAIRY->DEARIE) MISSUS POYSER +2094-142345-0059-367: THIS MISSUS POYSER SAID BLUSHING AND BELIEVING THAT THE CAPTAIN WAS REALLY INTERESTED IN HER MILK PANS AND WOULD ADJUST HIS OPINION OF HER TO THE APPEARANCE OF HER DAIRY +2094-142345-0060-368: OH I'VE NO DOUBT IT'S IN CAPITAL ORDER +2300-131720-0000-1816: THE PARIS PLANT LIKE THAT AT THE CRYSTAL PALACE WAS A TEMPORARY EXHIBIT +2300-131720-0001-1817: THE LONDON PLANT WAS LESS TEMPORARY BUT NOT PERMANENT SUPPLYING BEFORE IT WAS TORN OUT NO FEWER THAN THREE THOUSAND LAMPS IN HOTELS CHURCHES STORES AND DWELLINGS IN THE VICINITY OF HOLBORN (VIADUCT->VIADUC) +2300-131720-0002-1818: THERE MESSRS JOHNSON AND HAMMER PUT INTO PRACTICE MANY OF THE IDEAS NOW STANDARD IN THE ART AND SECURED MUCH USEFUL DATA FOR THE WORK IN NEW YORK OF WHICH THE STORY HAS JUST BEEN TOLD +2300-131720-0003-1819: THE DYNAMO ELECTRIC MACHINE THOUGH SMALL WAS ROBUST FOR UNDER ALL THE VARYING SPEEDS OF WATER POWER AND THE VICISSITUDES OF THE PLANT TO WHICH IT BELONGED IT CONTINUED IN ACTIVE USE UNTIL EIGHTEEN NINETY NINE SEVENTEEN YEARS +2300-131720-0004-1820: OWING TO HIS INSISTENCE ON LOW PRESSURE DIRECT CURRENT FOR USE IN DENSELY POPULATED DISTRICTS AS THE ONLY SAFE AND TRULY UNIVERSAL PROFITABLE WAY OF DELIVERING ELECTRICAL ENERGY TO THE CONSUMERS EDISON HAS BEEN FREQUENTLY SPOKEN OF AS AN OPPONENT OF THE ALTERNATING CURRENT +2300-131720-0005-1821: WHY IF WE ERECT A STATION AT THE FALLS IT IS A GREAT ECONOMY TO GET IT UP TO THE CITY +2300-131720-0006-1822: THERE SEEMS NO GOOD REASON FOR BELIEVING THAT IT WILL CHANGE +2300-131720-0007-1823: BROAD AS THE PRAIRIES AND FREE IN THOUGHT AS THE WINDS THAT (SWEEP->SWEPT) THEM HE IS (IDIOSYNCRATICALLY->IDIO SENCRATICALLY) OPPOSED TO LOOSE AND WASTEFUL METHODS TO PLANS OF EMPIRE THAT NEGLECT THE POOR AT THE GATE +2300-131720-0008-1824: EVERYTHING HE HAS DONE HAS BEEN AIMED AT THE CONSERVATION OF ENERGY THE CONTRACTION OF SPACE THE INTENSIFICATION OF CULTURE +2300-131720-0009-1825: FOR SOME YEARS IT WAS NOT FOUND FEASIBLE TO OPERATE MOTORS ON ALTERNATING CURRENT CIRCUITS AND THAT REASON WAS OFTEN URGED AGAINST IT SERIOUSLY +2300-131720-0010-1826: IT COULD NOT BE USED FOR ELECTROPLATING OR DEPOSITION NOR COULD IT CHARGE STORAGE BATTERIES ALL OF WHICH ARE EASILY WITHIN THE ABILITY OF THE DIRECT CURRENT +2300-131720-0011-1827: BUT WHEN IT CAME TO BE A QUESTION OF LIGHTING A SCATTERED SUBURB A GROUP OF DWELLINGS ON THE OUTSKIRTS A REMOTE COUNTRY RESIDENCE OR A FARM HOUSE THE ALTERNATING CURRENT IN ALL ELEMENTS SAVE ITS DANGER WAS AND IS IDEAL +2300-131720-0012-1828: EDISON WAS INTOLERANT OF SHAM AND (SHODDY->SHOTTY) AND NOTHING WOULD SATISFY HIM THAT COULD NOT STAND CROSS EXAMINATION BY MICROSCOPE TEST TUBE AND GALVANOMETER +2300-131720-0013-1829: UNLESS HE COULD SECURE AN ENGINE OF SMOOTHER RUNNING AND MORE EXACTLY (GOVERNED->GOVERN) AND REGULATED THAN THOSE AVAILABLE FOR HIS DYNAMO AND LAMP EDISON REALIZED THAT HE WOULD FIND IT ALMOST IMPOSSIBLE TO GIVE A STEADY LIGHT +2300-131720-0014-1830: MISTER EDISON WAS A LEADER FAR AHEAD OF THE TIME +2300-131720-0015-1831: HE OBTAINED THE DESIRED SPEED AND LOAD WITH A FRICTION (BRAKE->BREAK) ALSO REGULATOR OF SPEED BUT WAITED FOR AN INDICATOR TO VERIFY IT +2300-131720-0016-1832: THEN AGAIN THERE WAS NO KNOWN WAY TO (LUBRICATE->LUBRICADE) AN ENGINE FOR CONTINUOUS RUNNING AND MISTER EDISON INFORMED ME THAT AS A MARINE ENGINE STARTED BEFORE THE SHIP LEFT NEW YORK AND CONTINUED RUNNING UNTIL IT REACHED ITS HOME PORT SO AN ENGINE FOR HIS PURPOSES MUST PRODUCE LIGHT AT ALL TIMES +2300-131720-0017-1833: EDISON HAD INSTALLED HIS HISTORIC FIRST GREAT CENTRAL STATION SYSTEM IN NEW YORK ON THE MULTIPLE ARC SYSTEM COVERED BY HIS FEEDER AND MAIN INVENTION WHICH RESULTED IN A NOTABLE SAVING IN THE COST OF CONDUCTORS AS AGAINST A (STRAIGHT->STRAIT) TWO WIRE SYSTEM THROUGHOUT OF THE TREE KIND +2300-131720-0018-1834: HE SOON FORESAW THAT STILL GREATER ECONOMY WOULD BE NECESSARY FOR COMMERCIAL SUCCESS NOT ALONE FOR THE LARGER TERRITORY OPENING BUT FOR THE COMPACT (DISTRICTS->DISTRICT) OF LARGE CITIES +2300-131720-0019-1835: THE STRONG POSITION HELD BY THE EDISON SYSTEM UNDER THE STRENUOUS COMPETITION (THAT->IT) WAS ALREADY SPRINGING UP WAS ENORMOUSLY IMPROVED BY THE INTRODUCTION OF THE THREE WIRE SYSTEM AND (IT->HE) GAVE AN IMMEDIATE IMPETUS TO (INCANDESCENT->INCONDESCENT) LIGHTING +2300-131720-0020-1836: IT WAS SPECIALLY SUITED FOR A TRIAL PLANT ALSO IN THE EARLY DAYS WHEN A YIELD OF SIX OR EIGHT LAMPS TO THE HORSE (POWER->BOWER) WAS CONSIDERED SUBJECT FOR CONGRATULATION +2300-131720-0021-1837: THE STREET CONDUCTORS WERE OF THE OVERHEAD POLE LINE CONSTRUCTION AND WERE INSTALLED BY THE CONSTRUCTION COMPANY THAT HAD BEEN ORGANIZED BY EDISON TO BUILD (AND->AN) EQUIP CENTRAL STATIONS +2300-131720-0022-1838: MEANWHILE HE HAD CALLED UPON ME TO MAKE A REPORT OF THE THREE WIRE SYSTEM KNOWN IN ENGLAND AS THE HOPKINSON BOTH DOCTOR JOHN HOPKINSON AND MISTER EDISON BEING INDEPENDENT (INVENTORS->IN VENORS) AT PRACTICALLY THE SAME TIME +2300-131720-0023-1839: I THINK HE WAS PERHAPS MORE APPRECIATIVE (THAN->THAT) I WAS OF THE DISCIPLINE OF THE EDISON CONSTRUCTION DEPARTMENT AND THOUGHT IT WOULD BE WELL FOR US TO WAIT UNTIL THE MORNING OF THE FOURTH BEFORE WE STARTED UP +2300-131720-0024-1840: BUT THE PLANT RAN AND IT WAS THE FIRST THREE WIRE STATION IN THIS COUNTRY +2300-131720-0025-1841: THEY WERE LATER USED AS (RESERVE->RESERVED) MACHINES AND FINALLY WITH THE ENGINE RETIRED FROM SERVICE AS PART OF THE COLLECTION OF EDISONIA BUT THEY REMAIN IN PRACTICALLY AS GOOD CONDITION AS WHEN INSTALLED IN EIGHTEEN EIGHTY THREE +2300-131720-0026-1842: THE ARC LAMP INSTALLED OUTSIDE A CUSTOMER'S PREMISES OR IN A CIRCUIT FOR PUBLIC STREET LIGHTING BURNED SO MANY HOURS NIGHTLY SO MANY NIGHTS IN THE MONTH AND WAS PAID FOR AT THAT RATE SUBJECT TO REBATE FOR HOURS WHEN THE LAMP MIGHT BE OUT THROUGH ACCIDENT +2300-131720-0027-1843: EDISON HELD THAT THE ELECTRICITY SOLD MUST BE MEASURED JUST LIKE GAS OR WATER AND HE PROCEEDED TO DEVELOP A METER +2300-131720-0028-1844: THERE WAS INFINITE SCEPTICISM AROUND HIM ON THE SUBJECT AND WHILE OTHER INVENTORS WERE ALSO GIVING THE SUBJECT THEIR THOUGHT THE PUBLIC TOOK IT FOR GRANTED THAT ANYTHING SO UTTERLY INTANGIBLE AS ELECTRICITY THAT COULD NOT BE SEEN OR WEIGHED AND ONLY GAVE SECONDARY EVIDENCE OF ITSELF AT THE EXACT POINT OF USE COULD NOT BE BROUGHT TO ACCURATE REGISTRATION +2300-131720-0029-1845: HENCE THE EDISON ELECTROLYTIC METER IS NO LONGER USED DESPITE ITS EXCELLENT QUALITIES +2300-131720-0030-1846: THE PRINCIPLE EMPLOYED IN THE EDISON ELECTROLYTIC METER IS THAT WHICH EXEMPLIFIES THE POWER OF ELECTRICITY TO DECOMPOSE A CHEMICAL SUBSTANCE +2300-131720-0031-1847: ASSOCIATED WITH THIS SIMPLE FORM OF APPARATUS WERE VARIOUS INGENIOUS DETAILS AND REFINEMENTS TO SECURE REGULARITY OF OPERATION FREEDOM FROM INACCURACY AND IMMUNITY FROM SUCH TAMPERING AS WOULD PERMIT THEFT OF CURRENT OR DAMAGE +2300-131720-0032-1848: THE STANDARD EDISON METER PRACTICE WAS TO REMOVE THE CELLS ONCE A MONTH TO THE (METER->METEOR) ROOM OF THE CENTRAL STATION COMPANY FOR EXAMINATION ANOTHER SET BEING SUBSTITUTED +2300-131720-0033-1849: IN DECEMBER EIGHTEEN EIGHTY EIGHT MISTER W J JENKS READ AN INTERESTING PAPER BEFORE THE AMERICAN INSTITUTE OF ELECTRICAL ENGINEERS ON THE SIX YEARS OF PRACTICAL EXPERIENCE HAD UP TO THAT TIME WITH THE (METER->METRE) THEN MORE GENERALLY IN USE THAN ANY OTHER +2300-131720-0034-1850: THE OTHERS HAVING BEEN IN OPERATION TOO SHORT A TIME TO SHOW DEFINITE RESULTS ALTHOUGH THEY ALSO WENT QUICKLY TO A DIVIDEND BASIS +2300-131720-0035-1851: IN THIS CONNECTION IT SHOULD BE MENTIONED THAT THE ASSOCIATION OF EDISON ILLUMINATING COMPANIES IN THE SAME YEAR ADOPTED RESOLUTIONS UNANIMOUSLY TO THE EFFECT THAT THE EDISON METER WAS ACCURATE AND THAT ITS USE WAS NOT EXPENSIVE FOR STATIONS ABOVE ONE THOUSAND LIGHTS AND THAT THE BEST FINANCIAL RESULTS WERE INVARIABLY SECURED IN A STATION SELLING CURRENT BY (METER->METRE) +2300-131720-0036-1852: THE (METER->METRE) CONTINUED IN GENERAL SERVICE DURING EIGHTEEN NINETY NINE AND PROBABLY UP TO THE CLOSE OF THE CENTURY +2300-131720-0037-1853: HE WEIGHED AND (REWEIGHED->REWAYED) THE (METER PLATES->METERPLATES) AND PURSUED EVERY LINE OF INVESTIGATION IMAGINABLE BUT ALL IN VAIN +2300-131720-0038-1854: HE FELT HE WAS UP AGAINST IT AND THAT PERHAPS ANOTHER KIND OF A JOB WOULD SUIT HIM BETTER +2300-131720-0039-1855: THE PROBLEM WAS SOLVED +2300-131720-0040-1856: WE WERE MORE INTERESTED IN THE TECHNICAL CONDITION OF THE STATION THAN IN THE COMMERCIAL PART +2300-131720-0041-1857: WE HAD (METERS->METRES) IN WHICH THERE WERE TWO BOTTLES OF LIQUID +237-126133-0000-2407: HERE SHE WOULD STAY COMFORTED AND (SOOTHED->SOOTHE) AMONG THE LOVELY PLANTS AND RICH EXOTICS REJOICING THE HEART OF OLD TURNER THE GARDENER WHO SINCE POLLY'S FIRST RAPTUROUS ENTRANCE HAD TAKEN HER INTO HIS GOOD GRACES FOR ALL TIME +237-126133-0001-2408: EVERY CHANCE SHE COULD STEAL AFTER PRACTICE HOURS WERE OVER AND AFTER THE CLAMOROUS DEMANDS OF THE BOYS UPON HER TIME WERE FULLY SATISFIED WAS SEIZED TO FLY ON THE WINGS OF THE WIND TO THE FLOWERS +237-126133-0002-2409: THEN DEAR SAID MISSUS WHITNEY YOU MUST BE KINDER TO HER THAN EVER THINK WHAT IT WOULD BE FOR ONE OF YOU TO BE AWAY FROM HOME EVEN AMONG FRIENDS +237-126133-0003-2410: SOMEHOW OF ALL THE DAYS WHEN THE HOME FEELING WAS THE STRONGEST THIS DAY IT SEEMED AS IF SHE COULD BEAR IT NO LONGER +237-126133-0004-2411: IF SHE COULD ONLY SEE PHRONSIE FOR JUST ONE MOMENT +237-126133-0005-2412: OH SHE'S ALWAYS AT THE PIANO SAID VAN SHE MUST BE THERE NOW SOMEWHERE AND THEN SOMEBODY LAUGHED +237-126133-0006-2413: AT THIS THE BUNDLE OPENED SUDDENLY AND OUT POPPED PHRONSIE +237-126133-0007-2414: BUT POLLY COULDN'T SPEAK AND IF JASPER HADN'T CAUGHT HER JUST IN TIME SHE WOULD HAVE TUMBLED OVER BACKWARD FROM THE STOOL PHRONSIE AND ALL +237-126133-0008-2415: ASKED PHRONSIE WITH HER LITTLE FACE CLOSE TO POLLY'S OWN +237-126133-0009-2416: NOW YOU'LL STAY CRIED VAN SAY POLLY WON'T YOU +237-126133-0010-2417: OH YOU ARE THE DEAREST AND BEST MISTER KING I EVER SAW BUT HOW DID YOU MAKE MAMMY LET HER COME +237-126133-0011-2418: ISN'T HE SPLENDID CRIED JASPER IN INTENSE PRIDE SWELLING UP FATHER KNEW HOW TO DO IT +237-126133-0012-2419: THERE THERE HE SAID SOOTHINGLY PATTING HER BROWN FUZZY HEAD +237-126133-0013-2420: I KNOW GASPED POLLY CONTROLLING HER SOBS I WON'T ONLY I CAN'T THANK YOU +237-126133-0014-2421: ASKED PHRONSIE IN INTENSE INTEREST SLIPPING DOWN OUT OF POLLY'S ARMS AND CROWDING UP CLOSE TO JASPER'S SIDE +237-126133-0015-2422: YES ALL ALONE BY HIMSELF ASSERTED JASPER VEHEMENTLY AND WINKING FURIOUSLY TO THE OTHERS TO STOP THEIR LAUGHING HE DID NOW TRULY PHRONSIE +237-126133-0016-2423: OH NO (JASPER->JAPS HER) I MUST GO BY MY VERY OWN SELF +237-126133-0017-2424: THERE JAP YOU'VE CAUGHT IT LAUGHED PERCY WHILE THE OTHERS SCREAMED AT THE SIGHT OF JASPER'S FACE +237-126133-0018-2425: DON'T MIND IT POLLY WHISPERED JASPER TWASN'T HER FAULT +237-126133-0019-2426: DEAR ME EJACULATED THE OLD GENTLEMAN IN THE UTMOST AMAZEMENT AND SUCH A TIME AS I'VE HAD TO GET HER HERE TOO +237-126133-0020-2427: HOW DID HER MOTHER EVER LET HER GO +237-126133-0021-2428: SHE ASKED IMPULSIVELY I DIDN'T BELIEVE YOU COULD PERSUADE HER FATHER +237-126133-0022-2429: I DIDN'T HAVE ANY FEARS IF I WORKED IT RIGHTLY SAID THE OLD GENTLEMAN COMPLACENTLY +237-126133-0023-2430: HE CRIED (IN->AND) HIGH DUDGEON JUST AS IF HE OWNED THE WHOLE OF THE PEPPERS AND COULD DISPOSE OF THEM ALL TO SUIT HIS FANCY +237-126133-0024-2431: AND THE OLD GENTLEMAN WAS SO DELIGHTED WITH HIS SUCCESS THAT HE HAD TO BURST OUT INTO A SERIES OF SHORT HAPPY BITS OF LAUGHTER THAT OCCUPIED QUITE A SPACE OF TIME +237-126133-0025-2432: AT LAST HE CAME OUT OF THEM AND WIPED HIS FACE VIGOROUSLY +237-134493-0000-2388: IT IS SIXTEEN YEARS SINCE JOHN (BERGSON->BERKS AND) DIED +237-134493-0001-2389: HIS WIFE NOW LIES BESIDE HIM AND THE WHITE SHAFT THAT MARKS THEIR GRAVES GLEAMS ACROSS THE WHEAT FIELDS +237-134493-0002-2390: FROM THE NORWEGIAN GRAVEYARD ONE LOOKS OUT OVER A VAST CHECKER BOARD MARKED OFF IN SQUARES OF WHEAT AND CORN LIGHT AND DARK (DARK->*) AND LIGHT +237-134493-0003-2391: FROM THE GRAVEYARD GATE ONE CAN COUNT A DOZEN (GAYLY->GAILY) PAINTED (FARMHOUSES->FARM HOUSES) THE GILDED WEATHER (VANES->VEINS) ON THE BIG RED BARNS WINK AT EACH OTHER ACROSS THE GREEN AND BROWN AND YELLOW FIELDS +237-134493-0004-2392: THE AIR AND THE EARTH ARE CURIOUSLY MATED AND INTERMINGLED AS IF THE ONE WERE THE BREATH OF THE OTHER +237-134493-0005-2393: HE WAS A SPLENDID FIGURE OF A BOY TALL AND STRAIGHT AS A YOUNG PINE TREE WITH A HANDSOME HEAD AND STORMY GRAY EYES DEEPLY SET UNDER A SERIOUS BROW +237-134493-0006-2394: THAT'S NOT MUCH OF A JOB FOR AN ATHLETE HERE I'VE BEEN TO TOWN AND BACK +237-134493-0007-2395: (ALEXANDRA LETS->ALEXANDER THAT'S) YOU SLEEP LATE +237-134493-0008-2396: SHE GATHERED UP HER REINS +237-134493-0009-2397: PLEASE WAIT FOR ME MARIE (EMIL->AMYL) COAXED +237-134493-0010-2398: I NEVER SEE (LOU'S SCYTHE->LOOSE SIGH) OVER HERE +237-134493-0011-2399: HOW BROWN YOU'VE GOT SINCE YOU CAME HOME I WISH I HAD AN ATHLETE TO MOW MY ORCHARD +237-134493-0012-2400: I GET WET TO MY KNEES WHEN I GO DOWN TO (PICK CHERRIES->PICTURES) +237-134493-0013-2401: INDEED HE HAD LOOKED AWAY WITH (THE->A) PURPOSE OF NOT SEEING IT +237-134493-0014-2402: THEY THINK (YOU'RE->YOU ARE) PROUD BECAUSE YOU'VE BEEN AWAY TO SCHOOL OR SOMETHING +237-134493-0015-2403: THERE WAS SOMETHING INDIVIDUAL ABOUT THE GREAT FARM A MOST UNUSUAL TRIMNESS AND CARE FOR DETAIL +237-134493-0016-2404: ON EITHER SIDE OF THE ROAD FOR A MILE BEFORE YOU REACHED THE FOOT OF THE HILL STOOD TALL (OSAGE->O SAGE) ORANGE HEDGES THEIR GLOSSY GREEN MARKING OFF THE YELLOW FIELDS +237-134493-0017-2405: ANY ONE THEREABOUTS WOULD HAVE TOLD YOU THAT THIS WAS ONE OF THE RICHEST FARMS ON THE DIVIDE AND THAT THE FARMER WAS A WOMAN ALEXANDRA BERGSON +237-134493-0018-2406: THERE IS EVEN A WHITE ROW OF BEEHIVES IN THE ORCHARD UNDER THE WALNUT TREES +237-134500-0000-2345: FRANK READ ENGLISH SLOWLY AND THE MORE HE READ ABOUT THIS DIVORCE CASE THE ANGRIER HE GREW +237-134500-0001-2346: MARIE SIGHED +237-134500-0002-2347: A (BRISK WIND->BRAY SQUINT) HAD COME UP AND WAS DRIVING PUFFY WHITE CLOUDS ACROSS THE SKY +237-134500-0003-2348: THE ORCHARD WAS SPARKLING AND RIPPLING IN THE SUN +237-134500-0004-2349: THAT INVITATION DECIDED HER +237-134500-0005-2350: OH BUT I'M GLAD TO GET THIS PLACE MOWED +237-134500-0006-2351: JUST SMELL THE WILD ROSES THEY ARE ALWAYS SO SPICY AFTER A RAIN +237-134500-0007-2352: WE NEVER HAD SO MANY OF THEM IN HERE BEFORE +237-134500-0008-2353: I SUPPOSE IT'S THE WET SEASON WILL YOU HAVE TO CUT THEM TOO +237-134500-0009-2354: I SUPPOSE THAT'S THE (WET->WHITE) SEASON TOO THEN +237-134500-0010-2355: IT'S EXCITING TO SEE EVERYTHING GROWING SO FAST AND TO GET THE GRASS CUT +237-134500-0011-2356: AREN'T YOU SPLASHED LOOK AT THE SPIDER (WEBS->WHIPS) ALL OVER THE GRASS +237-134500-0012-2357: IN A FEW MOMENTS HE HEARD THE CHERRIES DROPPING SMARTLY INTO THE PAIL AND HE BEGAN TO SWING HIS SCYTHE WITH THAT LONG EVEN STROKE THAT FEW AMERICAN BOYS EVER LEARN +237-134500-0013-2358: MARIE PICKED (*->THE) CHERRIES AND SANG SOFTLY TO HERSELF STRIPPING ONE GLITTERING (BRANCH->RANCH) AFTER ANOTHER SHIVERING WHEN SHE CAUGHT A SHOWER OF RAINDROPS ON HER NECK AND HAIR +237-134500-0014-2359: AND (EMIL->AMIEL) MOWED HIS WAY SLOWLY DOWN TOWARD THE CHERRY TREES +237-134500-0015-2360: THAT SUMMER THE RAINS HAD BEEN SO MANY AND OPPORTUNE THAT IT WAS ALMOST MORE THAN (SHABATA->CHEBATA) AND HIS MAN COULD DO TO KEEP UP WITH THE CORN THE ORCHARD WAS A NEGLECTED WILDERNESS +237-134500-0016-2361: I DON'T KNOW ALL OF THEM BUT I KNOW LINDENS ARE +237-134500-0017-2362: IF I FEEL THAT WAY I FEEL THAT WAY +237-134500-0018-2363: HE REACHED UP AMONG THE BRANCHES AND BEGAN TO PICK THE SWEET INSIPID FRUIT LONG IVORY COLORED BERRIES TIPPED WITH FAINT PINK LIKE WHITE CORAL THAT FALL TO THE GROUND UNHEEDED ALL SUMMER THROUGH +237-134500-0019-2364: HE DROPPED A HANDFUL INTO HER LAP +237-134500-0020-2365: YES DON'T YOU +237-134500-0021-2366: OH EVER SO MUCH ONLY HE SEEMS KIND OF STAID AND SCHOOL TEACHERY +237-134500-0022-2367: WHEN SHE USED TO TELL ME ABOUT HIM I ALWAYS WONDERED WHETHER SHE WASN'T A LITTLE IN LOVE WITH HIM +237-134500-0023-2368: IT WOULD SERVE YOU ALL RIGHT IF SHE WALKED OFF WITH (CARL->KARL) +237-134500-0024-2369: I LIKE TO TALK TO (CARL->KARL) ABOUT NEW YORK AND WHAT A FELLOW CAN DO THERE +237-134500-0025-2370: OH (EMIL->AMIEL) +237-134500-0026-2371: SURELY YOU ARE NOT THINKING OF GOING OFF THERE +237-134500-0027-2372: MARIE'S FACE FELL UNDER HIS BROODING GAZE +237-134500-0028-2373: (I'M->I AM) SURE (ALEXANDRA HOPES->ALEXANDER HELPS) YOU WILL STAY ON HERE SHE MURMURED +237-134500-0029-2374: I DON'T WANT TO STAND AROUND AND LOOK ON +237-134500-0030-2375: I WANT TO BE DOING SOMETHING ON MY OWN ACCOUNT +237-134500-0031-2376: SOMETIMES I DON'T WANT TO DO ANYTHING AT ALL AND SOMETIMES I WANT TO PULL THE FOUR CORNERS OF THE DIVIDE TOGETHER HE THREW OUT HIS ARM AND BROUGHT IT BACK WITH A JERK SO LIKE A (TABLE CLOTH->TABLECLOTH) +237-134500-0032-2377: I GET TIRED OF SEEING (MEN->MAN) AND HORSES GOING UP AND DOWN UP AND DOWN +237-134500-0033-2378: I WISH YOU WEREN'T SO RESTLESS AND DIDN'T GET SO WORKED UP OVER THINGS SHE SAID SADLY +237-134500-0034-2379: THANK YOU HE RETURNED SHORTLY +237-134500-0035-2380: AND (YOU->WHO) NEVER USED TO BE CROSS TO ME +237-134500-0036-2381: I CAN'T PLAY WITH YOU LIKE A LITTLE BOY ANY MORE HE SAID SLOWLY THAT'S WHAT YOU MISS MARIE +237-134500-0037-2382: BUT (EMIL->AMIEL) IF I UNDERSTAND (THEN->IN) ALL OUR GOOD TIMES ARE OVER WE CAN NEVER DO NICE THINGS TOGETHER ANY MORE +237-134500-0038-2383: AND ANYHOW THERE'S NOTHING (TO->TOO) UNDERSTAND +237-134500-0039-2384: THAT WON'T LAST IT WILL GO AWAY AND THINGS WILL BE JUST AS THEY USED TO +237-134500-0040-2385: I PRAY FOR YOU BUT THAT'S NOT THE SAME AS IF YOU PRAYED YOURSELF +237-134500-0041-2386: I CAN'T PRAY TO HAVE THE THINGS I WANT HE SAID SLOWLY AND I WON'T PRAY NOT TO HAVE THEM NOT IF I'M DAMNED FOR IT +237-134500-0042-2387: THEN ALL OUR GOOD TIMES ARE OVER +260-123286-0000-200: SATURDAY AUGUST FIFTEENTH THE SEA UNBROKEN ALL ROUND NO LAND IN SIGHT +260-123286-0001-201: THE HORIZON SEEMS EXTREMELY DISTANT +260-123286-0002-202: ALL MY DANGER AND SUFFERINGS WERE NEEDED TO STRIKE A SPARK OF HUMAN FEELING OUT OF HIM BUT NOW THAT I AM WELL HIS NATURE HAS RESUMED ITS SWAY +260-123286-0003-203: YOU SEEM ANXIOUS MY UNCLE I SAID SEEING HIM CONTINUALLY WITH HIS GLASS TO HIS EYE ANXIOUS +260-123286-0004-204: ONE MIGHT BE WITH LESS REASON THAN NOW +260-123286-0005-205: I AM NOT COMPLAINING THAT THE RATE IS SLOW BUT THAT THE (SEA->SEAT) IS SO WIDE +260-123286-0006-206: WE ARE LOSING TIME AND THE FACT IS I HAVE NOT COME ALL THIS WAY TO TAKE A LITTLE SAIL UPON A POND ON A RAFT +260-123286-0007-207: HE CALLED (THIS->THE) SEA (A POND->UPON) AND OUR LONG VOYAGE TAKING A LITTLE SAIL +260-123286-0008-208: THEREFORE DON'T TALK TO ME ABOUT VIEWS AND PROSPECTS +260-123286-0009-209: I TAKE THIS AS MY ANSWER AND I LEAVE THE PROFESSOR TO BITE HIS LIPS WITH IMPATIENCE +260-123286-0010-210: SUNDAY AUGUST SIXTEENTH +260-123286-0011-211: NOTHING NEW (WEATHER->WHETHER) UNCHANGED THE WIND FRESHENS +260-123286-0012-212: BUT THERE SEEMED NO REASON (TO->OF) FEAR +260-123286-0013-213: THE SHADOW OF THE RAFT WAS CLEARLY OUTLINED UPON THE SURFACE OF THE WAVES +260-123286-0014-214: TRULY (THIS->THE) SEA IS OF INFINITE WIDTH +260-123286-0015-215: IT MUST BE AS WIDE AS THE MEDITERRANEAN OR THE ATLANTIC AND WHY NOT +260-123286-0016-216: THESE THOUGHTS AGITATED ME ALL DAY AND MY IMAGINATION SCARCELY CALMED DOWN AFTER SEVERAL HOURS (SLEEP->SLEEVE) +260-123286-0017-217: I SHUDDER AS I RECALL THESE MONSTERS TO MY REMEMBRANCE +260-123286-0018-218: I SAW AT THE HAMBURG MUSEUM THE SKELETON OF ONE OF THESE CREATURES THIRTY FEET IN LENGTH +260-123286-0019-219: I SUPPOSE PROFESSOR LIEDENBROCK WAS OF MY OPINION TOO AND EVEN SHARED MY FEARS FOR AFTER HAVING EXAMINED THE PICK HIS EYES TRAVERSED THE OCEAN FROM SIDE TO SIDE +260-123286-0020-220: TUESDAY AUGUST EIGHTEENTH +260-123286-0021-221: DURING HIS WATCH I SLEPT +260-123286-0022-222: TWO HOURS AFTERWARDS A TERRIBLE SHOCK AWOKE ME +260-123286-0023-223: THE RAFT WAS HEAVED UP ON A WATERY MOUNTAIN AND PITCHED DOWN AGAIN AT A DISTANCE OF TWENTY FATHOMS +260-123286-0024-224: THERE'S A (WHALE->WAIL) A (WHALE->WELL) CRIED THE PROFESSOR +260-123286-0025-225: (FLIGHT->FIGHT) WAS OUT OF THE QUESTION NOW THE REPTILES ROSE THEY WHEELED AROUND OUR LITTLE RAFT WITH A RAPIDITY GREATER THAN THAT OF EXPRESS TRAINS +260-123286-0026-226: TWO (MONSTERS->MASTERS) ONLY WERE CREATING ALL THIS COMMOTION AND BEFORE MY EYES ARE (TWO->TOO) REPTILES OF THE PRIMITIVE WORLD +260-123286-0027-227: I CAN DISTINGUISH THE EYE OF THE (ICHTHYOSAURUS->ICT THEASURUS) GLOWING LIKE A RED HOT (COAL->CO) AND AS LARGE AS A MAN'S HEAD +260-123286-0028-228: ITS JAW IS ENORMOUS AND ACCORDING TO NATURALISTS IT IS ARMED WITH NO LESS THAN ONE HUNDRED AND EIGHTY TWO TEETH +260-123286-0029-229: THOSE HUGE CREATURES ATTACKED EACH OTHER WITH THE GREATEST ANIMOSITY +260-123286-0030-230: SUDDENLY THE (ICHTHYOSAURUS->ICTUSORIS) AND THE (PLESIOSAURUS->PLUSIASURUS) DISAPPEAR BELOW LEAVING A (WHIRLPOOL->WAR POOL) EDDYING IN THE WATER +260-123286-0031-231: AS FOR THE (ICHTHYOSAURUS->ITTHIASORIS) HAS HE RETURNED (TO HIS->WHOSE) SUBMARINE CAVERN +260-123288-0000-232: THE ROARINGS BECOME LOST IN THE DISTANCE +260-123288-0001-233: THE WEATHER IF WE MAY USE (THAT->THE) TERM WILL CHANGE BEFORE (LONG->LAWN) +260-123288-0002-234: THE ATMOSPHERE IS CHARGED WITH (VAPOURS->VAPORS) PERVADED WITH THE ELECTRICITY GENERATED BY THE EVAPORATION OF (SALINE->SAILING) WATERS +260-123288-0003-235: THE ELECTRIC LIGHT CAN SCARCELY PENETRATE THROUGH THE DENSE CURTAIN WHICH (HAS->IS) DROPPED OVER THE THEATRE ON WHICH THE BATTLE OF THE ELEMENTS IS ABOUT TO BE WAGED +260-123288-0004-236: THE AIR IS HEAVY THE SEA IS CALM +260-123288-0005-237: FROM TIME TO TIME A FLEECY TUFT OF (MIST->MISTS) WITH YET SOME GLEAMING LIGHT LEFT UPON IT DROPS DOWN UPON THE DENSE FLOOR OF GREY AND LOSES ITSELF IN THE (OPAQUE->OPE) AND IMPENETRABLE MASS +260-123288-0006-238: THE ATMOSPHERE (IS->AS) EVIDENTLY CHARGED (AND->IN) SURCHARGED WITH ELECTRICITY +260-123288-0007-239: THE WIND NEVER LULLS BUT TO ACQUIRE INCREASED STRENGTH THE VAST BANK OF HEAVY CLOUDS IS A HUGE RESERVOIR OF FEARFUL WINDY GUSTS AND RUSHING STORMS +260-123288-0008-240: THERE'S A HEAVY STORM COMING ON I CRIED POINTING TOWARDS THE HORIZON +260-123288-0009-241: THOSE CLOUDS SEEM AS IF THEY WERE GOING TO CRUSH THE SEA +260-123288-0010-242: ON THE MAST ALREADY I SEE THE LIGHT PLAY OF A (LAMBENT->LAMENT) SAINT (ELMO'S->AIRABLE'S) FIRE THE OUTSTRETCHED (SAIL->SILL) CATCHES NOT A BREATH OF WIND AND HANGS LIKE A SHEET OF LEAD +260-123288-0011-243: BUT IF WE HAVE NOW CEASED TO ADVANCE WHY DO WE YET LEAVE THAT (SAIL->SALE) LOOSE WHICH AT THE FIRST SHOCK OF (THE->A) TEMPEST MAY CAPSIZE US IN A MOMENT +260-123288-0012-244: THAT WILL BE (*->THE) SAFEST NO NO NEVER +260-123288-0013-245: THE PILED UP (VAPOURS CONDENSE->VAPORS CONTENTS) INTO WATER AND THE AIR PUT INTO VIOLENT ACTION TO SUPPLY THE VACUUM LEFT BY THE CONDENSATION OF THE (MISTS->MIST) ROUSES ITSELF INTO A WHIRLWIND +260-123288-0014-246: HANS STIRS NOT +260-123288-0015-247: FROM THE UNDER SURFACE OF THE CLOUDS THERE ARE CONTINUAL (EMISSIONS->MISSIONS) OF LURID LIGHT ELECTRIC MATTER IS IN CONTINUAL EVOLUTION FROM THEIR COMPONENT MOLECULES THE GASEOUS ELEMENTS OF THE AIR NEED TO BE SLAKED WITH MOISTURE FOR INNUMERABLE COLUMNS OF WATER RUSH UPWARDS INTO THE AIR AND FALL BACK AGAIN IN WHITE FOAM +260-123288-0016-248: I REFER TO THE THERMOMETER IT INDICATES THE FIGURE IS OBLITERATED +260-123288-0017-249: IS THE ATMOSPHERIC CONDITION HAVING ONCE REACHED (THIS DENSITY->OSTENSITY) TO BECOME FINAL +260-123288-0018-250: THE RAFT BEARS ON STILL TO THE SOUTH EAST +260-123288-0019-251: AT NOON THE VIOLENCE OF THE STORM REDOUBLES +260-123288-0020-252: EACH OF US IS LASHED (TO->IN) SOME PART OF THE RAFT +260-123288-0021-253: THE WAVES RISE ABOVE OUR HEADS +260-123288-0022-254: THEY (SEEM->SEEMED) TO BE WE ARE LOST BUT I AM NOT SURE +260-123288-0023-255: HE (NODS->GNAWEDS) HIS CONSENT +260-123288-0024-256: THE (FIREBALL->FIRE BALL) HALF OF IT WHITE HALF AZURE BLUE AND THE SIZE OF A TEN INCH (SHELL->CHILL) MOVED SLOWLY ABOUT THE RAFT BUT REVOLVING ON ITS OWN AXIS WITH ASTONISHING VELOCITY AS IF WHIPPED ROUND BY THE FORCE OF THE WHIRLWIND +260-123288-0025-257: HERE IT COMES THERE IT GLIDES NOW IT IS UP THE RAGGED STUMP OF THE MAST THENCE IT LIGHTLY LEAPS ON THE PROVISION BAG DESCENDS WITH A LIGHT BOUND AND JUST SKIMS THE POWDER MAGAZINE HORRIBLE +260-123288-0026-258: WE SHALL BE BLOWN UP BUT NO THE DAZZLING DISK OF MYSTERIOUS LIGHT NIMBLY LEAPS ASIDE IT APPROACHES HANS WHO FIXES HIS BLUE EYE UPON IT STEADILY IT THREATENS THE HEAD OF MY UNCLE WHO FALLS UPON HIS KNEES WITH HIS HEAD DOWN TO AVOID IT +260-123288-0027-259: A SUFFOCATING SMELL OF NITROGEN FILLS THE AIR IT ENTERS THE THROAT IT FILLS THE LUNGS +260-123288-0028-260: WE SUFFER STIFLING PAINS +260-123440-0000-179: AND HOW ODD THE DIRECTIONS WILL LOOK +260-123440-0001-180: POOR ALICE +260-123440-0002-181: IT WAS THE WHITE RABBIT RETURNING SPLENDIDLY DRESSED WITH A PAIR OF WHITE KID GLOVES IN ONE HAND AND A LARGE FAN IN THE OTHER HE CAME TROTTING ALONG IN A GREAT HURRY MUTTERING TO HIMSELF AS HE CAME OH THE DUCHESS THE DUCHESS +260-123440-0003-182: OH WON'T SHE BE SAVAGE IF I'VE KEPT HER WAITING +260-123440-0004-183: ALICE TOOK UP THE FAN AND GLOVES AND AS THE HALL WAS VERY HOT SHE KEPT FANNING HERSELF ALL THE TIME SHE WENT ON TALKING DEAR DEAR HOW QUEER EVERYTHING IS TO DAY +260-123440-0005-184: AND YESTERDAY (THINGS->THANKS) WENT ON JUST AS USUAL +260-123440-0006-185: I WONDER IF I'VE BEEN CHANGED IN THE NIGHT +260-123440-0007-186: I ALMOST THINK I CAN REMEMBER FEELING (A->*) LITTLE DIFFERENT +260-123440-0008-187: I'LL TRY IF I KNOW ALL THE THINGS I USED TO KNOW +260-123440-0009-188: I SHALL NEVER GET TO TWENTY AT THAT RATE +260-123440-0010-189: HOW CHEERFULLY HE SEEMS TO GRIN HOW NEATLY SPREAD HIS CLAWS AND WELCOME LITTLE FISHES IN WITH GENTLY SMILING JAWS +260-123440-0011-190: NO I'VE MADE UP MY MIND ABOUT IT IF (I'M MABEL->I MAYBEL) I'LL STAY DOWN HERE +260-123440-0012-191: IT'LL BE NO USE (THEIR->THEY'RE) PUTTING THEIR HEADS DOWN AND SAYING COME UP AGAIN DEAR +260-123440-0013-192: I AM SO VERY TIRED OF BEING ALL ALONE HERE +260-123440-0014-193: AND I DECLARE IT'S TOO BAD THAT IT IS +260-123440-0015-194: I WISH I HADN'T CRIED SO MUCH SAID ALICE AS SHE SWAM ABOUT TRYING TO FIND HER WAY OUT +260-123440-0016-195: I SHALL BE PUNISHED FOR IT NOW I SUPPOSE BY BEING DROWNED IN MY OWN TEARS +260-123440-0017-196: THAT WILL BE A QUEER THING TO BE SURE +260-123440-0018-197: I AM VERY TIRED OF SWIMMING ABOUT HERE O MOUSE +260-123440-0019-198: CRIED ALICE AGAIN FOR THIS TIME THE MOUSE WAS BRISTLING ALL OVER AND SHE FELT CERTAIN IT MUST BE REALLY OFFENDED +260-123440-0020-199: WE WON'T TALK ABOUT HER ANY MORE IF YOU'D RATHER NOT WE INDEED +2830-3979-0000-1120: WE WANT YOU TO HELP US PUBLISH SOME LEADING WORK OF (LUTHER'S->LUTHERS) FOR THE GENERAL AMERICAN MARKET WILL YOU DO IT +2830-3979-0001-1121: THE CONDITION IS THAT I WILL BE PERMITTED TO MAKE LUTHER TALK AMERICAN (STREAMLINE->STREAM LINE) HIM SO TO SPEAK BECAUSE YOU WILL NEVER GET PEOPLE WHETHER IN OR OUTSIDE THE LUTHERAN CHURCH ACTUALLY TO READ LUTHER UNLESS WE MAKE HIM TALK AS HE WOULD TALK (TODAY->TO DAY) TO AMERICANS +2830-3979-0002-1122: LET US BEGIN WITH THAT HIS COMMENTARY (ON GALATIANS->ONGOLATIONS) +2830-3979-0003-1123: THE UNDERTAKING WHICH SEEMED SO ATTRACTIVE WHEN VIEWED AS A LITERARY TASK PROVED A MOST DIFFICULT ONE AND AT TIMES BECAME OPPRESSIVE +2830-3979-0004-1124: IT WAS WRITTEN IN LATIN +2830-3979-0005-1125: THE WORK HAD TO BE CONDENSED +2830-3979-0006-1126: A WORD SHOULD NOW BE SAID ABOUT THE ORIGIN OF LUTHER'S COMMENTARY (ON GALATIANS->ANGULATIONS) +2830-3979-0007-1127: MUCH LATER WHEN A FRIEND OF HIS WAS PREPARING AN (EDITION->ADDITION) OF ALL HIS LATIN WORKS HE REMARKED TO HIS HOME CIRCLE IF I HAD MY WAY ABOUT IT THEY WOULD REPUBLISH ONLY THOSE OF MY BOOKS WHICH HAVE DOCTRINE (MY GALATIANS->MIGALLATIONS) FOR INSTANCE +2830-3979-0008-1128: IN OTHER WORDS THESE THREE MEN TOOK DOWN THE LECTURES WHICH LUTHER ADDRESSED TO HIS STUDENTS IN THE COURSE OF GALATIANS AND (ROERER->ROAR) PREPARED THE MANUSCRIPT FOR THE PRINTER +2830-3979-0009-1129: IT PRESENTS LIKE NO OTHER OF LUTHER'S WRITINGS THE CENTRAL THOUGHT OF CHRISTIANITY THE JUSTIFICATION OF THE SINNER FOR THE SAKE OF CHRIST'S MERITS ALONE +2830-3979-0010-1130: BUT THE ESSENCE OF LUTHER'S LECTURES IS THERE +2830-3979-0011-1131: THE LORD WHO HAS GIVEN US POWER TO TEACH AND TO HEAR LET HIM ALSO GIVE US THE POWER TO SERVE AND TO DO LUKE (TWO->TOO) +2830-3979-0012-1132: THE WORD OF OUR GOD SHALL STAND (FOREVER->FOR EVER) +2830-3980-0000-1043: IN EVERY WAY THEY SOUGHT TO UNDERMINE THE AUTHORITY OF SAINT PAUL +2830-3980-0001-1044: THEY SAID TO THE GALATIANS YOU HAVE NO RIGHT TO THINK HIGHLY OF PAUL +2830-3980-0002-1045: HE WAS THE LAST TO TURN TO CHRIST +2830-3980-0003-1046: PAUL CAME LATER (AND IS->IN HIS) BENEATH US +2830-3980-0004-1047: INDEED HE PERSECUTED THE CHURCH OF CHRIST FOR A LONG TIME +2830-3980-0005-1048: DO YOU SUPPOSE THAT GOD FOR THE SAKE OF A FEW LUTHERAN HERETICS WOULD DISOWN HIS ENTIRE CHURCH +2830-3980-0006-1049: AGAINST THESE BOASTING FALSE APOSTLES PAUL BOLDLY DEFENDS HIS APOSTOLIC AUTHORITY AND MINISTRY +2830-3980-0007-1050: AS THE AMBASSADOR OF A GOVERNMENT IS HONORED FOR HIS OFFICE AND NOT FOR HIS PRIVATE PERSON SO THE MINISTER OF CHRIST SHOULD EXALT HIS OFFICE IN ORDER TO GAIN AUTHORITY AMONG MEN +2830-3980-0008-1051: PAUL TAKES PRIDE IN HIS MINISTRY NOT TO HIS OWN (PRAISE->PHRASE) BUT TO THE PRAISE OF GOD +2830-3980-0009-1052: PAUL AN APOSTLE NOT OF MEN ET CETERA +2830-3980-0010-1053: EITHER HE CALLS MINISTERS THROUGH THE AGENCY OF MEN OR HE CALLS THEM DIRECTLY AS HE CALLED THE PROPHETS AND APOSTLES +2830-3980-0011-1054: PAUL DECLARES THAT THE FALSE APOSTLES (WERE CALLED OR SENT->RECALL THEIR SCENT) NEITHER BY MEN NOR BY MAN +2830-3980-0012-1055: THE MOST THEY COULD CLAIM IS THAT THEY WERE SENT BY OTHERS +2830-3980-0013-1056: HE MENTIONS THE APOSTLES FIRST BECAUSE THEY WERE APPOINTED DIRECTLY BY GOD +2830-3980-0014-1057: THE CALL IS NOT TO BE TAKEN LIGHTLY +2830-3980-0015-1058: FOR A PERSON TO POSSESS KNOWLEDGE IS NOT ENOUGH +2830-3980-0016-1059: IT SPOILS ONE'S BEST WORK +2830-3980-0017-1060: WHEN I WAS A YOUNG MAN I THOUGHT PAUL WAS MAKING TOO MUCH OF HIS CALL +2830-3980-0018-1061: I DID NOT THEN REALIZE THE IMPORTANCE OF THE MINISTRY +2830-3980-0019-1062: I KNEW NOTHING OF THE DOCTRINE OF FAITH BECAUSE WE WERE TAUGHT SOPHISTRY INSTEAD OF CERTAINTY AND NOBODY UNDERSTOOD SPIRITUAL BOASTING +2830-3980-0020-1063: THIS IS NO SINFUL PRIDE IT IS (HOLY->WHOLLY) PRIDE +2830-3980-0021-1064: AND GOD THE FATHER WHO RAISED HIM FROM THE DEAD +2830-3980-0022-1065: THE (CLAUSE SEEMS SUPERFLUOUS->CLAWS SEEMED SUPERVOUS) ON FIRST SIGHT +2830-3980-0023-1066: THESE PERVERTERS OF THE RIGHTEOUSNESS OF CHRIST RESIST THE FATHER AND THE SON AND THE WORKS OF THEM BOTH +2830-3980-0024-1067: IN THIS WHOLE EPISTLE PAUL TREATS OF THE RESURRECTION OF CHRIST +2830-3980-0025-1068: BY HIS RESURRECTION CHRIST WON THE VICTORY OVER LAW SIN FLESH WORLD DEVIL DEATH HELL AND EVERY EVIL +2830-3980-0026-1069: (VERSE->FIRST) TWO +2830-3980-0027-1070: AND ALL THE BRETHREN WHICH ARE WITH ME +2830-3980-0028-1071: THIS SHOULD GO FAR IN SHUTTING THE MOUTHS OF THE FALSE APOSTLES +2830-3980-0029-1072: ALTHOUGH THE BRETHREN WITH ME ARE NOT APOSTLES LIKE MYSELF YET THEY ARE ALL OF ONE MIND WITH ME THINK WRITE AND TEACH AS I DO +2830-3980-0030-1073: THEY DO NOT GO WHERE THE ENEMIES OF THE GOSPEL PREDOMINATE THEY GO (WHERE->WITH) THE CHRISTIANS ARE +2830-3980-0031-1074: WHY DO THEY NOT INVADE THE CATHOLIC PROVINCES AND PREACH THEIR DOCTRINE TO GODLESS PRINCES BISHOPS AND DOCTORS AS WE HAVE DONE BY THE HELP OF GOD +2830-3980-0032-1075: WE LOOK FOR THAT REWARD WHICH (EYE->I) HATH NOT SEEN NOR EAR HEARD NEITHER HATH ENTERED INTO THE HEART OF MAN +2830-3980-0033-1076: NOT ALL THE GALATIANS HAD BECOME PERVERTED +2830-3980-0034-1077: THESE MEANS CANNOT BE CONTAMINATED +2830-3980-0035-1078: THEY (REMAIN->REMAINED) DIVINE REGARDLESS OF MEN'S OPINION +2830-3980-0036-1079: WHEREVER THE MEANS OF GRACE ARE FOUND THERE IS THE HOLY CHURCH EVEN THOUGH ANTICHRIST REIGNS THERE +2830-3980-0037-1080: SO MUCH FOR THE TITLE OF THE EPISTLE NOW FOLLOWS THE GREETING OF THE APOSTLE VERSE THREE +2830-3980-0038-1081: GRACE BE TO YOU (AND->IN) PEACE FROM GOD THE FATHER AND FROM OUR LORD JESUS CHRIST +2830-3980-0039-1082: THE TERMS OF GRACE AND PEACE ARE COMMON TERMS WITH PAUL AND ARE NOW PRETTY WELL UNDERSTOOD +2830-3980-0040-1083: THE GREETING OF THE APOSTLE IS REFRESHING +2830-3980-0041-1084: GRACE INVOLVES THE REMISSION OF SINS PEACE AND A HAPPY CONSCIENCE +2830-3980-0042-1085: THE WORLD (BRANDS->BRAINS) THIS A PERNICIOUS DOCTRINE +2830-3980-0043-1086: EXPERIENCE PROVES THIS +2830-3980-0044-1087: HOWEVER THE GRACE AND PEACE OF GOD WILL +2830-3980-0045-1088: MEN SHOULD NOT SPECULATE ABOUT THE NATURE OF GOD +2830-3980-0046-1089: WAS IT NOT ENOUGH TO SAY FROM GOD THE FATHER +2830-3980-0047-1090: TO DO SO IS TO LOSE GOD ALTOGETHER BECAUSE GOD BECOMES INTOLERABLE WHEN WE SEEK TO MEASURE AND TO COMPREHEND HIS INFINITE MAJESTY +2830-3980-0048-1091: HE CAME DOWN TO EARTH LIVED AMONG MEN SUFFERED WAS CRUCIFIED AND THEN HE DIED STANDING CLEARLY BEFORE US SO THAT OUR HEARTS AND EYES MAY FASTEN UPON HIM +2830-3980-0049-1092: EMBRACE HIM AND FORGET ABOUT THE NATURE OF GOD +2830-3980-0050-1093: DID NOT CHRIST HIMSELF SAY I AM THE WAY AND THE TRUTH AND THE LIFE NO MAN COMETH UNTO THE FATHER BUT BY ME +2830-3980-0051-1094: WHEN YOU ARGUE ABOUT THE NATURE OF GOD APART FROM THE QUESTION OF JUSTIFICATION YOU MAY BE AS PROFOUND AS YOU LIKE +2830-3980-0052-1095: WE ARE TO HEAR CHRIST WHO HAS BEEN APPOINTED BY THE FATHER AS OUR DIVINE TEACHER +2830-3980-0053-1096: AT THE SAME TIME PAUL CONFIRMS OUR CREED THAT CHRIST IS VERY GOD +2830-3980-0054-1097: THAT CHRIST IS VERY GOD IS APPARENT IN THAT PAUL ASCRIBES TO HIM DIVINE POWERS EQUALLY WITH THE FATHER AS FOR INSTANCE THE POWER (TO DISPENSE->DOES SPENCE) GRACE AND PEACE +2830-3980-0055-1098: TO BESTOW PEACE AND GRACE LIES IN THE PROVINCE OF GOD WHO ALONE CAN CREATE THESE BLESSINGS THE ANGELS CANNOT +2830-3980-0056-1099: OTHERWISE PAUL SHOULD HAVE WRITTEN GRACE FROM GOD THE FATHER (AND->IN) PEACE FROM OUR LORD JESUS CHRIST +2830-3980-0057-1100: THE ARIANS TOOK CHRIST FOR A NOBLE AND PERFECT CREATURE SUPERIOR EVEN TO THE ANGELS BECAUSE BY HIM GOD CREATED HEAVEN AND EARTH +2830-3980-0058-1101: MOHAMMED ALSO SPEAKS HIGHLY OF CHRIST +2830-3980-0059-1102: PAUL STICKS TO HIS THEME +2830-3980-0060-1103: HE NEVER LOSES SIGHT OF THE PURPOSE OF HIS EPISTLE +2830-3980-0061-1104: NOT GOLD OR SILVER OR (PASCHAL->PASSIONAL) LAMBS OR AN ANGEL BUT HIMSELF WHAT FOR +2830-3980-0062-1105: NOT FOR A CROWN OR A KINGDOM OR (OUR->A) GOODNESS (BUT FOR->BEFORE) OUR SINS +2830-3980-0063-1106: UNDERSCORE THESE WORDS FOR THEY ARE FULL OF COMFORT FOR SORE CONSCIENCES +2830-3980-0064-1107: HOW MAY WE OBTAIN REMISSION OF OUR SINS +2830-3980-0065-1108: PAUL ANSWERS THE MAN WHO IS NAMED JESUS CHRIST AND THE SON OF GOD GAVE HIMSELF FOR OUR SINS +2830-3980-0066-1109: SINCE CHRIST WAS GIVEN FOR OUR SINS IT STANDS TO REASON THAT THEY CANNOT BE PUT AWAY BY OUR OWN EFFORTS +2830-3980-0067-1110: THIS SENTENCE ALSO DEFINES OUR SINS AS GREAT SO GREAT IN FACT THAT THE WHOLE WORLD COULD NOT MAKE AMENDS FOR A SINGLE SIN +2830-3980-0068-1111: THE GREATNESS OF THE RANSOM CHRIST THE SON OF GOD INDICATES THIS +2830-3980-0069-1112: THE VICIOUS CHARACTER OF SIN IS BROUGHT OUT BY THE WORDS WHO GAVE HIMSELF FOR OUR SINS +2830-3980-0070-1113: BUT WE ARE CARELESS WE MAKE LIGHT OF SIN +2830-3980-0071-1114: WE THINK THAT BY SOME LITTLE WORK OR MERIT WE CAN DISMISS (SIN->IN) +2830-3980-0072-1115: THIS PASSAGE THEN BEARS OUT THE FACT THAT ALL MEN ARE SOLD UNDER SIN +2830-3980-0073-1116: THIS ATTITUDE SPRINGS FROM A FALSE CONCEPTION OF SIN THE CONCEPTION THAT SIN IS A SMALL MATTER EASILY TAKEN CARE OF BY GOOD WORKS THAT WE MUST PRESENT OURSELVES (UNTO->INTO) GOD WITH A GOOD CONSCIENCE THAT WE MUST FEEL NO SIN BEFORE WE MAY FEEL THAT CHRIST WAS GIVEN FOR OUR SINS +2830-3980-0074-1117: THIS ATTITUDE IS UNIVERSAL AND PARTICULARLY DEVELOPED IN THOSE WHO CONSIDER THEMSELVES BETTER THAN OTHERS +2830-3980-0075-1118: BUT THE REAL SIGNIFICANCE AND COMFORT OF THE WORDS FOR OUR SINS IS LOST UPON THEM +2830-3980-0076-1119: ON THE OTHER HAND WE ARE NOT TO REGARD THEM AS SO TERRIBLE THAT WE MUST DESPAIR +2961-960-0000-497: HE PASSES ABRUPTLY FROM PERSONS TO IDEAS AND NUMBERS AND FROM IDEAS AND NUMBERS TO PERSONS FROM THE HEAVENS TO (MAN->MEN) FROM ASTRONOMY TO PHYSIOLOGY HE CONFUSES OR RATHER DOES NOT DISTINGUISH SUBJECT AND OBJECT FIRST AND FINAL CAUSES AND IS DREAMING OF GEOMETRICAL FIGURES LOST IN A FLUX OF SENSE +2961-960-0001-498: THE INFLUENCE (WITH->WHICH) THE (TIMAEUS->TIMAS) HAS EXERCISED UPON POSTERITY IS DUE PARTLY TO A MISUNDERSTANDING +2961-960-0002-499: IN THE SUPPOSED DEPTHS OF THIS DIALOGUE THE NEO (PLATONISTS->PLATINISTS) FOUND HIDDEN MEANINGS (AND->IN) CONNECTIONS WITH THE JEWISH AND CHRISTIAN SCRIPTURES AND OUT OF THEM THEY ELICITED DOCTRINES QUITE AT VARIANCE WITH THE SPIRIT OF PLATO +2961-960-0003-500: THEY WERE ABSORBED IN HIS THEOLOGY AND WERE UNDER THE DOMINION OF HIS NAME WHILE THAT WHICH WAS TRULY GREAT AND TRULY (CHARACTERISTIC->CORRECTURISTIC) IN HIM HIS EFFORT TO REALIZE AND CONNECT ABSTRACTIONS WAS NOT UNDERSTOOD BY THEM AT ALL +2961-960-0004-501: THERE IS NO DANGER OF THE MODERN COMMENTATORS ON THE (TIMAEUS->TIMEUS) FALLING INTO THE ABSURDITIES OF THE (NEO PLATONISTS->NEOPLATANISTS) +2961-960-0005-502: IN THE PRESENT DAY WE ARE WELL AWARE THAT AN ANCIENT PHILOSOPHER IS TO BE INTERPRETED FROM HIMSELF AND BY THE CONTEMPORARY HISTORY OF THOUGHT +2961-960-0006-503: THE FANCIES OF THE (NEO PLATONISTS->NEW PLATINISTS) ARE ONLY INTERESTING TO US BECAUSE THEY EXHIBIT A PHASE OF THE HUMAN MIND WHICH (PREVAILED->PREVAIL) WIDELY IN THE FIRST CENTURIES OF THE CHRISTIAN ERA AND IS NOT WHOLLY EXTINCT IN OUR OWN DAY +2961-960-0007-504: BUT THEY HAVE NOTHING TO DO WITH THE INTERPRETATION OF PLATO AND IN SPIRIT THEY ARE OPPOSED TO HIM +2961-960-0008-505: WE DO NOT KNOW HOW PLATO WOULD HAVE ARRANGED HIS OWN (DIALOGUES->DIALECTS) OR WHETHER THE THOUGHT OF ARRANGING ANY OF THEM BESIDES THE (TWO TRILOGIES->TUTRILOGIES) WHICH HE HAS EXPRESSLY CONNECTED WAS EVER PRESENT TO HIS MIND +2961-960-0009-506: THE DIALOGUE IS PRIMARILY CONCERNED WITH THE ANIMAL CREATION INCLUDING UNDER THIS TERM THE HEAVENLY BODIES AND WITH MAN ONLY AS ONE AMONG THE ANIMALS +2961-960-0010-507: BUT HE HAS NOT AS YET (DEFINED->THE FIND) THIS INTERMEDIATE TERRITORY WHICH LIES SOMEWHERE BETWEEN MEDICINE AND MATHEMATICS AND HE WOULD HAVE FELT THAT THERE WAS AS GREAT AN IMPIETY IN RANKING THEORIES OF PHYSICS FIRST IN THE ORDER OF KNOWLEDGE AS IN PLACING THE BODY BEFORE THE SOUL +2961-960-0011-508: WITH (HERACLEITUS->HERACLITUS) HE ACKNOWLEDGES THE PERPETUAL FLUX LIKE (ANAXAGORAS->AN EXAGGARIST) HE ASSERTS THE PREDOMINANCE OF MIND ALTHOUGH ADMITTING AN ELEMENT OF NECESSITY WHICH REASON IS INCAPABLE OF SUBDUING LIKE THE (PYTHAGOREANS->PITHAGORIANS) HE SUPPOSES THE MYSTERY OF THE WORLD TO BE CONTAINED IN NUMBER +2961-960-0012-509: MANY IF NOT ALL THE ELEMENTS OF THE (PRE SOCRATIC->PRESOCRATIC) PHILOSOPHY ARE INCLUDED IN THE (TIMAEUS->TIMIUS) +2961-960-0013-510: IT IS PROBABLE THAT THE RELATION OF THE IDEAS TO GOD OR OF GOD TO THE WORLD WAS DIFFERENTLY CONCEIVED BY HIM AT DIFFERENT TIMES OF HIS LIFE +2961-960-0014-511: THE IDEAS ALSO REMAIN BUT THEY HAVE BECOME TYPES IN NATURE FORMS OF MEN ANIMALS BIRDS FISHES +2961-960-0015-512: THE STYLE AND PLAN OF THE (TIMAEUS->TIMIRS) DIFFER GREATLY FROM THAT OF ANY OTHER OF THE PLATONIC DIALOGUES +2961-960-0016-513: BUT PLATO HAS NOT THE SAME (MASTERY->MYSTERY) OVER HIS INSTRUMENT WHICH HE EXHIBITS IN THE (PHAEDRUS->FEEDRESS) OR (SYMPOSIUM->SUPPOSIUM) +2961-960-0017-514: NOTHING CAN EXCEED THE BEAUTY OR ART OF (THE->*) INTRODUCTION IN WHICH (HE IS->HIS) USING WORDS AFTER HIS ACCUSTOMED MANNER +2961-960-0018-515: BUT IN THE REST OF THE WORK THE POWER OF LANGUAGE SEEMS TO FAIL HIM AND THE DRAMATIC FORM IS WHOLLY GIVEN UP +2961-960-0019-516: HE COULD WRITE IN ONE STYLE BUT NOT IN ANOTHER AND THE GREEK LANGUAGE HAD NOT AS YET BEEN FASHIONED BY ANY POET OR PHILOSOPHER TO DESCRIBE PHYSICAL PHENOMENA +2961-960-0020-517: AND HENCE WE FIND THE SAME SORT OF CLUMSINESS IN THE (TIMAEUS->TIMAS) OF PLATO WHICH CHARACTERIZES THE PHILOSOPHICAL POEM OF LUCRETIUS +2961-960-0021-518: THERE IS A WANT OF FLOW AND OFTEN A DEFECT OF RHYTHM THE MEANING IS SOMETIMES OBSCURE AND THERE IS A GREATER USE OF APPOSITION (AND->IN) MORE OF REPETITION THAN OCCURS IN PLATO'S EARLIER WRITINGS +2961-960-0022-519: PLATO HAD NOT THE COMMAND OF HIS MATERIALS WHICH WOULD HAVE ENABLED HIM TO PRODUCE A PERFECT WORK OF ART +2961-961-0000-520: SOCRATES BEGINS (THE TIMAEUS->TO TIMAS) WITH A SUMMARY OF THE REPUBLIC +2961-961-0001-521: AND NOW HE DESIRES TO SEE THE IDEAL STATE SET IN MOTION HE WOULD LIKE TO KNOW HOW SHE BEHAVED IN SOME GREAT STRUGGLE +2961-961-0002-522: AND THEREFORE TO YOU I TURN (TIMAEUS->TO ME AS) CITIZEN OF LOCRIS WHO ARE AT ONCE A PHILOSOPHER (AND->IN) A STATESMAN AND TO YOU (CRITIAS->CRITIUS) WHOM ALL ATHENIANS KNOW TO BE SIMILARLY ACCOMPLISHED AND TO HERMOCRATES (WHO IS->WHOSE) ALSO FITTED BY NATURE AND EDUCATION TO SHARE IN OUR DISCOURSE +2961-961-0003-523: I WILL IF (TIMAEUS->TO ME AS) APPROVES I APPROVE +2961-961-0004-524: LISTEN THEN SOCRATES TO A TALE OF (SOLON'S->SILENCE) WHO BEING THE FRIEND OF (DROPIDAS MY->DROPIDUS BY) GREAT GRANDFATHER TOLD IT TO MY GRANDFATHER (CRITIAS->CRITIUS) AND HE TOLD ME +2961-961-0005-525: SOME POEMS OF (SOLON->SOLID) WERE RECITED BY THE BOYS +2961-961-0006-526: AND WHAT WAS THE SUBJECT OF THE POEM SAID THE PERSON WHO MADE THE REMARK +2961-961-0007-527: THE SUBJECT WAS A VERY NOBLE ONE HE DESCRIBED THE MOST FAMOUS ACTION IN WHICH THE ATHENIAN PEOPLE WERE EVER ENGAGED +2961-961-0008-528: BUT THE MEMORY OF THEIR EXPLOITS (HAS->HAD) PASSED AWAY OWING TO THE LAPSE OF TIME AND THE EXTINCTION OF THE ACTORS +2961-961-0009-529: TELL US SAID THE OTHER THE WHOLE STORY AND (WHERE SOLON->WEAR SOLEMN) HEARD (THE->THIS) STORY +2961-961-0010-530: BUT IN EGYPT THE TRADITIONS OF OUR OWN AND OTHER LANDS ARE BY US REGISTERED (FOR EVER->FOREVER) IN OUR TEMPLES +2961-961-0011-531: THE GENEALOGIES WHICH YOU HAVE RECITED TO US OUT OF YOUR OWN (ANNALS SOLON->ANNAL SOLEMN) ARE A MERE CHILDREN'S STORY +2961-961-0012-532: FOR IN THE TIMES BEFORE THE GREAT FLOOD ATHENS WAS THE GREATEST AND BEST OF CITIES AND (DID->DEAD) THE NOBLEST DEEDS AND HAD THE BEST CONSTITUTION OF ANY UNDER THE FACE OF HEAVEN +2961-961-0013-533: (SOLON->SULLEN) MARVELLED AND DESIRED TO BE INFORMED OF THE PARTICULARS +2961-961-0014-534: NINE THOUSAND YEARS HAVE ELAPSED SINCE (SHE FOUNDED->YOU FOUND IT) YOURS AND EIGHT THOUSAND SINCE (SHE FOUNDED->YOU FOUND IT) OURS AS OUR ANNALS RECORD +2961-961-0015-535: MANY LAWS EXIST AMONG US WHICH ARE THE COUNTERPART OF YOURS AS THEY WERE IN THE OLDEN TIME +2961-961-0016-536: I WILL BRIEFLY DESCRIBE THEM TO YOU AND YOU SHALL READ THE ACCOUNT OF THEM AT YOUR LEISURE IN THE SACRED REGISTERS +2961-961-0017-537: OBSERVE AGAIN WHAT CARE THE LAW TOOK IN THE PURSUIT OF WISDOM SEARCHING OUT THE DEEP THINGS OF THE WORLD AND APPLYING THEM TO THE USE OF (MAN->MEN) +2961-961-0018-538: THE MOST (FAMOUS->FAME AS) OF THEM ALL WAS THE OVERTHROW OF THE ISLAND OF ATLANTIS +2961-961-0019-539: FOR AT THE PERIL OF HER OWN EXISTENCE AND WHEN THE (OTHER->OTTER) HELLENES HAD DESERTED HER SHE REPELLED (THE->*) INVADER AND OF HER OWN ACCORD GAVE LIBERTY TO ALL THE NATIONS WITHIN THE PILLARS +2961-961-0020-540: THIS IS THE EXPLANATION OF THE SHALLOWS WHICH ARE FOUND IN THAT PART OF THE ATLANTIC OCEAN +2961-961-0021-541: BUT I WOULD NOT SPEAK AT THE TIME BECAUSE I WANTED TO REFRESH MY MEMORY +2961-961-0022-542: THEN (NOW->THOU) LET ME EXPLAIN TO YOU THE ORDER OF OUR ENTERTAINMENT FIRST (TIMAEUS->TIMAS) WHO IS A NATURAL PHILOSOPHER WILL SPEAK OF THE ORIGIN OF THE WORLD GOING DOWN TO THE CREATION OF (MAN->MEN) AND THEN I SHALL RECEIVE THE MEN WHOM HE HAS CREATED AND SOME OF WHOM WILL HAVE BEEN EDUCATED BY YOU AND INTRODUCE THEM TO YOU AS THE LOST ATHENIAN CITIZENS OF WHOM THE EGYPTIAN (RECORD->RECORDS) SPOKE +3570-5694-0000-2433: (BUT->BETTER) ALREADY AT A POINT IN ECONOMIC EVOLUTION FAR (ANTEDATING->ANTIDATING) THE EMERGENCE OF THE LADY (SPECIALISED->SPECIALIZED) CONSUMPTION OF GOODS AS AN EVIDENCE OF PECUNIARY STRENGTH HAD BEGUN TO WORK OUT IN A MORE OR LESS (ELABORATE->CELEBRATE) SYSTEM +3570-5694-0001-2434: (THE UTILITY->THEATILITY) OF CONSUMPTION AS AN EVIDENCE OF WEALTH IS TO BE CLASSED AS A DERIVATIVE GROWTH +3570-5694-0002-2435: SUCH CONSUMPTION AS FALLS (TO->THROUGH) THE WOMEN IS MERELY INCIDENTAL TO THEIR WORK IT IS A MEANS TO THEIR CONTINUED (LABOUR->LABOR) AND NOT (A->TO) CONSUMPTION DIRECTED TO THEIR OWN COMFORT AND (FULNESS->FULLNESS) OF LIFE +3570-5694-0003-2436: WITH A FURTHER ADVANCE IN CULTURE THIS (TABU->TABOO) MAY (CHANGE->CHANGED) INTO SIMPLE CUSTOM OF A MORE OR LESS RIGOROUS CHARACTER BUT WHATEVER BE THE THEORETICAL BASIS OF THE DISTINCTION WHICH IS MAINTAINED WHETHER IT BE (*->AT) A (TABU->BOO) OR A LARGER CONVENTIONALITY THE FEATURES OF THE CONVENTIONAL SCHEME OF CONSUMPTION DO NOT CHANGE EASILY +3570-5694-0004-2437: IN THE NATURE OF THINGS LUXURIES AND THE COMFORTS OF LIFE BELONG TO THE LEISURE CLASS +3570-5694-0005-2438: UNDER THE (TABU->TABOO) CERTAIN VICTUALS AND MORE PARTICULARLY CERTAIN BEVERAGES ARE STRICTLY RESERVED FOR THE USE OF THE SUPERIOR CLASS +3570-5694-0006-2439: (DRUNKENNESS->DRINKENNESS) AND THE OTHER PATHOLOGICAL CONSEQUENCES OF THE FREE USE OF STIMULANTS THEREFORE TEND IN THEIR TURN TO BECOME (HONORIFIC->UNERRIFIC) AS BEING A MARK AT THE SECOND REMOVE OF THE SUPERIOR STATUS OF THOSE WHO ARE ABLE TO AFFORD THE INDULGENCE +3570-5694-0007-2440: IT HAS EVEN HAPPENED THAT THE NAME FOR CERTAIN DISEASED CONDITIONS OF THE BODY ARISING FROM SUCH AN ORIGIN HAS PASSED INTO EVERYDAY SPEECH AS A SYNONYM FOR NOBLE OR GENTLE +3570-5694-0008-2441: THE CONSUMPTION OF LUXURIES IN THE TRUE SENSE IS A CONSUMPTION DIRECTED TO THE COMFORT OF THE CONSUMER HIMSELF AND IS THEREFORE A MARK OF THE MASTER +3570-5694-0009-2442: WITH MANY QUALIFICATIONS WITH MORE QUALIFICATIONS AS THE PATRIARCHAL TRADITION HAS GRADUALLY WEAKENED THE GENERAL RULE IS FELT TO BE RIGHT AND BINDING THAT WOMEN SHOULD CONSUME ONLY FOR THE BENEFIT OF THEIR MASTERS +3570-5694-0010-2443: THE OBJECTION OF COURSE PRESENTS ITSELF THAT EXPENDITURE ON WOMEN'S DRESS AND HOUSEHOLD PARAPHERNALIA IS AN OBVIOUS EXCEPTION TO THIS RULE BUT IT WILL APPEAR IN THE SEQUEL THAT THIS EXCEPTION IS MUCH MORE OBVIOUS THAN SUBSTANTIAL +3570-5694-0011-2444: THE CUSTOM OF FESTIVE GATHERINGS PROBABLY ORIGINATED IN MOTIVES OF CONVIVIALITY AND RELIGION THESE MOTIVES ARE ALSO PRESENT IN THE LATER DEVELOPMENT BUT THEY DO NOT CONTINUE TO BE THE SOLE MOTIVES +3570-5694-0012-2445: THERE IS A MORE OR LESS ELABORATE SYSTEM OF RANK AND (GRADES->GRATES) +3570-5694-0013-2446: THIS DIFFERENTIATION IS FURTHERED BY THE INHERITANCE OF WEALTH AND THE CONSEQUENT INHERITANCE OF GENTILITY +3570-5694-0014-2447: MANY OF THESE (AFFILIATED->HAVE FILLIOTTED) GENTLEMEN OF LEISURE ARE AT THE SAME TIME (LESSER MEN->LESS AMEN) OF SUBSTANCE IN THEIR OWN RIGHT SO THAT SOME OF THEM ARE SCARCELY AT ALL OTHERS ONLY PARTIALLY TO BE RATED AS VICARIOUS CONSUMERS +3570-5694-0015-2448: SO MANY OF THEM HOWEVER AS MAKE UP THE RETAINER AND HANGERS ON OF THE PATRON MAY BE CLASSED AS VICARIOUS CONSUMER WITHOUT QUALIFICATION +3570-5694-0016-2449: MANY OF THESE AGAIN AND ALSO MANY OF THE OTHER ARISTOCRACY OF LESS DEGREE HAVE IN TURN ATTACHED TO THEIR PERSONS A MORE OR LESS COMPREHENSIVE GROUP OF VICARIOUS CONSUMER IN THE PERSONS OF THEIR WIVES AND CHILDREN THEIR SERVANTS RETAINERS ET CETERA +3570-5694-0017-2450: THE WEARING OF UNIFORMS (OR->A) LIVERIES IMPLIES A CONSIDERABLE DEGREE OF DEPENDENCE AND MAY EVEN BE SAID TO BE A MARK OF SERVITUDE REAL OR OSTENSIBLE +3570-5694-0018-2451: THE WEARERS OF UNIFORMS AND LIVERIES MAY BE ROUGHLY DIVIDED INTO TWO CLASSES THE FREE AND THE SERVILE OR THE NOBLE AND THE IGNOBLE +3570-5694-0019-2452: BUT THE GENERAL DISTINCTION IS NOT ON THAT ACCOUNT TO BE OVERLOOKED +3570-5694-0020-2453: SO THOSE (OFFICES->OFFICERS) WHICH ARE BY RIGHT THE PROPER EMPLOYMENT OF THE LEISURE CLASS ARE NOBLE SUCH AS GOVERNMENT FIGHTING HUNTING THE CARE OF ARMS AND ACCOUTREMENTS AND THE LIKE IN SHORT THOSE WHICH MAY BE CLASSED AS OSTENSIBLY PREDATORY EMPLOYMENTS +3570-5694-0021-2454: WHENEVER AS IN THESE CASES THE MENIAL SERVICE IN QUESTION HAS TO DO DIRECTLY WITH (THE->A) PRIMARY LEISURE EMPLOYMENTS OF FIGHTING AND HUNTING IT EASILY ACQUIRES A REFLECTED HONORIFIC CHARACTER +3570-5694-0022-2455: THE LIVERY BECOMES OBNOXIOUS TO NEARLY ALL WHO ARE REQUIRED TO WEAR IT +3570-5695-0000-2456: (IN->AND) A GENERAL WAY THOUGH NOT WHOLLY NOR CONSISTENTLY THESE TWO GROUPS COINCIDE +3570-5695-0001-2457: THE DEPENDENT WHO WAS FIRST DELEGATED FOR THESE DUTIES WAS THE WIFE OR THE CHIEF WIFE AND AS WOULD BE EXPECTED IN THE LATER DEVELOPMENT OF THE INSTITUTION WHEN THE NUMBER OF PERSONS BY WHOM THESE DUTIES ARE (CUSTOMARILY->CUSTOMARY) PERFORMED (GRADUALLY->GRADUAL AND) NARROWS THE WIFE REMAINS THE LAST +3570-5695-0002-2458: BUT AS WE DESCEND THE SOCIAL SCALE THE POINT IS PRESENTLY REACHED WHERE THE DUTIES OF (VICARIOUS->YCARIOUS) LEISURE AND CONSUMPTION DEVOLVE UPON THE WIFE ALONE +3570-5695-0003-2459: IN THE COMMUNITIES OF THE WESTERN CULTURE THIS POINT IS AT PRESENT FOUND AMONG THE LOWER MIDDLE CLASS +3570-5695-0004-2460: IF BEAUTY OR COMFORT IS ACHIEVED AND IT IS A MORE OR LESS FORTUITOUS CIRCUMSTANCE IF THEY ARE THEY MUST BE ACHIEVED BY MEANS AND METHODS THAT COMMEND THEMSELVES TO THE GREAT ECONOMIC LAW OF WASTED EFFORT +3570-5695-0005-2461: THE MAN OF THE HOUSEHOLD ALSO CAN DO SOMETHING IN THIS DIRECTION AND INDEED HE COMMONLY DOES BUT WITH A STILL LOWER (DESCENT->DISSENT) INTO THE LEVELS OF INDIGENCE ALONG THE MARGIN OF THE SLUMS THE MAN AND PRESENTLY ALSO THE CHILDREN VIRTUALLY (CEASE->SEIZED) TO CONSUME VALUABLE GOODS FOR APPEARANCES AND THE WOMAN REMAINS VIRTUALLY THE SOLE EXPONENT OF THE HOUSEHOLD'S PECUNIARY DECENCY +3570-5695-0006-2462: VERY MUCH OF SQUALOR AND DISCOMFORT WILL BE ENDURED BEFORE THE LAST TRINKET OR THE LAST (PRETENSE->PRETENCE) OF PECUNIARY (DECENCY IS->DECENCIES) PUT AWAY +3570-5695-0007-2463: THERE IS NO CLASS AND NO COUNTRY THAT HAS YIELDED SO (ABJECTLY->OBJECTLY) BEFORE THE PRESSURE OF PHYSICAL WANT AS TO DENY THEMSELVES ALL GRATIFICATION OF THIS HIGHER OR SPIRITUAL NEED +3570-5695-0008-2464: THE QUESTION IS WHICH OF THE TWO METHODS WILL MOST EFFECTIVELY REACH THE PERSONS WHOSE CONVICTIONS IT IS DESIRED TO (AFFECT->EFFECT) +3570-5695-0009-2465: EACH WILL THEREFORE SERVE ABOUT EQUALLY WELL DURING THE EARLIER STAGES OF SOCIAL GROWTH +3570-5695-0010-2466: THE MODERN ORGANIZATION OF INDUSTRY WORKS IN THE SAME DIRECTION ALSO BY ANOTHER LINE +3570-5695-0011-2467: IT IS EVIDENT THEREFORE THAT THE PRESENT TREND OF THE DEVELOPMENT IS IN THE DIRECTION OF HEIGHTENING THE UTILITY OF CONSPICUOUS CONSUMPTION AS COMPARED WITH LEISURE +3570-5695-0012-2468: IT IS ALSO NOTICEABLE THAT THE (SERVICEABILITY->SURFABILITY) OF CONSUMPTION AS A MEANS OF REPUTE AS WELL AS THE INSISTENCE ON IT AS AN ELEMENT OF DECENCY IS AT ITS BEST IN THOSE PORTIONS OF THE COMMUNITY WHERE THE HUMAN CONTACT OF THE INDIVIDUAL IS WIDEST AND THE MOBILITY OF THE POPULATION IS GREATEST +3570-5695-0013-2469: CONSUMPTION BECOMES A LARGER ELEMENT IN THE STANDARD OF LIVING IN THE CITY THAN IN THE COUNTRY +3570-5695-0014-2470: AMONG THE COUNTRY POPULATION ITS (PLACE IS->PLACES) TO SOME EXTENT TAKEN BY SAVINGS AND HOME COMFORTS KNOWN THROUGH THE MEDIUM OF (NEIGHBORHOOD GOSSIP->NEIGHBOURHOOD GOSSIPS) SUFFICIENTLY TO SERVE THE LIKE GENERAL PURPOSE OF PECUNIARY REPUTE +3570-5695-0015-2471: THE RESULT IS A GREAT MOBILITY OF THE LABOR EMPLOYED IN PRINTING PERHAPS GREATER THAN IN ANY OTHER EQUALLY WELL DEFINED AND CONSIDERABLE BODY OF WORKMEN +3570-5696-0000-2472: UNDER THE SIMPLE TEST OF EFFECTIVENESS FOR ADVERTISING WE SHOULD EXPECT TO FIND LEISURE AND THE CONSPICUOUS CONSUMPTION OF GOODS DIVIDING THE FIELD OF PECUNIARY EMULATION PRETTY EVENLY BETWEEN THEM AT THE OUTSET +3570-5696-0001-2473: BUT THE ACTUAL COURSE OF DEVELOPMENT HAS BEEN SOMEWHAT DIFFERENT FROM THIS IDEAL SCHEME LEISURE HELD THE FIRST PLACE AT THE START AND CAME TO (HOLD A->ALL THE) RANK (VERY MUCH->VEREMENT) ABOVE WASTEFUL CONSUMPTION OF GOODS BOTH AS A DIRECT EXPONENT OF WEALTH AND AS AN ELEMENT IN THE STANDARD OF DECENCY DURING THE (QUASI->COURSE I) PEACEABLE CULTURE +3570-5696-0002-2474: (OTHER->ARE THE) CIRCUMSTANCES PERMITTING THAT INSTINCT DISPOSES MEN TO LOOK WITH (FAVOR->FAVOUR) UPON PRODUCTIVE EFFICIENCY AND ON WHATEVER IS OF HUMAN USE +3570-5696-0003-2475: (A->I) RECONCILIATION BETWEEN THE TWO CONFLICTING REQUIREMENTS IS (EFFECTED->AFFECTED) BY (A->*) RESORT TO MAKE BELIEVE (MANY AND->MEN IN) INTRICATE POLITE OBSERVANCES AND SOCIAL DUTIES OF A CEREMONIAL NATURE ARE DEVELOPED MANY ORGANIZATIONS ARE FOUNDED WITH SOME SPECIOUS OBJECT OF AMELIORATION EMBODIED IN THEIR OFFICIAL (STYLE AND->STYLANT) TITLE THERE IS MUCH COMING AND GOING AND A DEAL OF TALK TO THE END THAT THE (TALKERS->TALK IS) MAY NOT HAVE OCCASION TO REFLECT ON WHAT IS THE EFFECTUAL ECONOMIC VALUE OF THEIR TRAFFIC +3570-5696-0004-2476: THE (SALIENT->SAILOR AND) FEATURES OF THIS DEVELOPMENT OF DOMESTIC SERVICE HAVE ALREADY BEEN INDICATED +3570-5696-0005-2477: THROUGHOUT THE ENTIRE (EVOLUTION->REVOLUTION) OF CONSPICUOUS EXPENDITURE WHETHER OF GOODS OR OF SERVICES OR HUMAN LIFE RUNS THE OBVIOUS IMPLICATION THAT IN ORDER TO EFFECTUALLY MEND THE (CONSUMER'S->CONSUMERS) GOOD FAME IT MUST BE AN EXPENDITURE OF SUPERFLUITIES +3570-5696-0006-2478: AS USED IN THE SPEECH OF (EVERYDAY->EVERY DAY) LIFE THE WORD CARRIES AN UNDERTONE OF DEPRECATION +3570-5696-0007-2479: THE USE OF THE WORD WASTE AS A TECHNICAL TERM THEREFORE IMPLIES NO DEPRECATION OF THE MOTIVES OR OF THE ENDS SOUGHT BY THE CONSUMER UNDER THIS CANON OF CONSPICUOUS WASTE +3570-5696-0008-2480: BUT IT IS (ON OTHER->ANOTHER) GROUNDS WORTH NOTING THAT THE TERM WASTE IN THE LANGUAGE OF (EVERYDAY->EVERY DAY) LIFE IMPLIES DEPRECATION OF WHAT IS CHARACTERIZED AS WASTEFUL +3570-5696-0009-2481: IN STRICT ACCURACY NOTHING SHOULD BE INCLUDED UNDER THE HEAD OF CONSPICUOUS WASTE BUT SUCH EXPENDITURE AS IS INCURRED ON THE GROUND OF AN INVIDIOUS PECUNIARY COMPARISON +3570-5696-0010-2482: AN ARTICLE MAY BE USEFUL AND WASTEFUL BOTH AND ITS UTILITY TO THE CONSUMER MAY BE MADE UP OF USE AND WASTE IN THE MOST VARYING PROPORTIONS +3575-170457-0000-369: AND OFTEN HAS MY MOTHER SAID WHILE ON HER LAP I LAID MY HEAD SHE FEARED FOR TIME I WAS NOT MADE BUT FOR ETERNITY +3575-170457-0001-370: WHY ARE WE TO BE DENIED EACH OTHER'S SOCIETY +3575-170457-0002-371: WHY ARE WE TO BE DIVIDED +3575-170457-0003-372: SURELY IT MUST BE BECAUSE WE ARE IN DANGER OF LOVING EACH OTHER TOO WELL OF LOSING SIGHT OF THE CREATOR (IN->AND) IDOLATRY OF THE CREATURE +3575-170457-0004-373: WE USED TO DISPUTE ABOUT POLITICS AND RELIGION +3575-170457-0005-374: SHE (A TORY AND->ATTORIAN) CLERGYMAN'S DAUGHTER WAS ALWAYS IN A MINORITY OF ONE IN OUR HOUSE (OF->A) VIOLENT (DISSENT->DESCENT) AND RADICALISM +3575-170457-0006-375: HER FEEBLE HEALTH GAVE HER HER YIELDING MANNER FOR SHE COULD NEVER OPPOSE ANY ONE WITHOUT GATHERING UP ALL HER STRENGTH FOR THE STRUGGLE +3575-170457-0007-376: HE SPOKE FRENCH PERFECTLY I HAVE BEEN TOLD WHEN NEED WAS BUT DELIGHTED USUALLY IN TALKING THE BROADEST YORKSHIRE +3575-170457-0008-377: AND SO LIFE AND DEATH HAVE DISPERSED THE CIRCLE OF VIOLENT RADICALS AND DISSENTERS INTO WHICH TWENTY YEARS AGO THE LITTLE QUIET RESOLUTE CLERGYMAN'S DAUGHTER WAS RECEIVED AND BY WHOM SHE WAS TRULY LOVED AND (HONOURED->HONORED) +3575-170457-0009-378: JANUARY AND FEBRUARY OF EIGHTEEN THIRTY SEVEN HAD PASSED AWAY AND STILL THERE WAS NO REPLY FROM (SOUTHEY->SALVIE) +3575-170457-0010-379: I AM NOT DEPRECIATING IT WHEN I SAY THAT IN THESE TIMES IT IS NOT RARE +3575-170457-0011-380: BUT IT IS NOT WITH A VIEW TO DISTINCTION THAT YOU SHOULD CULTIVATE THIS TALENT IF YOU CONSULT YOUR OWN HAPPINESS +3575-170457-0012-381: YOU WILL SAY THAT A WOMAN HAS NO NEED OF SUCH A CAUTION THERE CAN BE NO PERIL IN IT FOR HER +3575-170457-0013-382: THE MORE SHE IS ENGAGED IN HER PROPER DUTIES THE LESS LEISURE WILL SHE HAVE FOR IT EVEN AS AN ACCOMPLISHMENT AND A RECREATION +3575-170457-0014-383: TO THOSE DUTIES YOU HAVE NOT YET BEEN CALLED AND WHEN YOU ARE YOU WILL BE LESS EAGER FOR CELEBRITY +3575-170457-0015-384: BUT DO NOT SUPPOSE THAT I DISPARAGE THE GIFT WHICH YOU POSSESS NOR THAT I WOULD DISCOURAGE YOU FROM EXERCISING IT I ONLY EXHORT YOU SO TO THINK OF IT AND SO TO USE IT AS TO RENDER IT CONDUCIVE TO YOUR OWN PERMANENT GOOD +3575-170457-0016-385: FAREWELL (MADAM->MADAME) +3575-170457-0017-386: THOUGH I MAY BE BUT AN UNGRACIOUS ADVISER YOU WILL ALLOW ME THEREFORE TO SUBSCRIBE MYSELF WITH THE BEST WISHES FOR YOUR HAPPINESS HERE AND HEREAFTER YOUR TRUE FRIEND ROBERT (SOUTHEY->SELVIE) +3575-170457-0018-387: SIR MARCH SIXTEENTH +3575-170457-0019-388: I (HAD->HAVE) NOT VENTURED TO HOPE FOR SUCH A REPLY SO CONSIDERATE IN ITS TONE SO NOBLE IN ITS SPIRIT +3575-170457-0020-389: I KNOW THE FIRST LETTER I WROTE TO YOU WAS ALL SENSELESS TRASH FROM BEGINNING TO END BUT I AM NOT ALTOGETHER THE IDLE DREAMING BEING IT WOULD SEEM TO DENOTE +3575-170457-0021-390: I THOUGHT IT THEREFORE MY DUTY WHEN I LEFT SCHOOL TO BECOME A GOVERNESS +3575-170457-0022-391: IN THE EVENINGS I (CONFESS->CONFESSED) I DO THINK BUT I NEVER TROUBLE (ANY ONE->ANYONE) ELSE WITH MY THOUGHTS +3575-170457-0023-392: I CAREFULLY AVOID ANY APPEARANCE OF PREOCCUPATION AND (ECCENTRICITY->EXCENTRICITY) WHICH MIGHT LEAD THOSE I LIVE AMONGST TO SUSPECT THE NATURE OF MY PURSUITS +3575-170457-0024-393: I DON'T ALWAYS SUCCEED FOR SOMETIMES WHEN I'M TEACHING OR SEWING I WOULD RATHER BE READING (OR->A) WRITING BUT I (TRY->TRIED) TO DENY MYSELF AND MY FATHER'S APPROBATION AMPLY REWARDED ME FOR THE PRIVATION +3575-170457-0025-394: AGAIN I THANK YOU THIS INCIDENT I SUPPOSE WILL BE RENEWED NO MORE IF I LIVE TO BE AN OLD WOMAN I SHALL REMEMBER IT THIRTY YEARS HENCE AS A BRIGHT DREAM +3575-170457-0026-395: P S PRAY SIR EXCUSE ME FOR WRITING TO YOU A SECOND TIME I COULD NOT HELP WRITING PARTLY TO TELL YOU HOW THANKFUL I AM FOR YOUR KINDNESS AND PARTLY TO LET YOU KNOW THAT YOUR ADVICE SHALL NOT BE WASTED HOWEVER SORROWFULLY AND RELUCTANTLY IT MAY BE AT FIRST FOLLOWED C B +3575-170457-0027-396: I CANNOT DENY MYSELF THE GRATIFICATION OF INSERTING (SOUTHEY'S->SO THESE) REPLY +3575-170457-0028-397: (KESWICK->KEZWICK) MARCH TWENTY SECOND EIGHTEEN THIRTY SEVEN DEAR (MADAM->MADAME) +3575-170457-0029-398: YOUR LETTER HAS GIVEN ME GREAT PLEASURE AND I SHOULD NOT FORGIVE MYSELF IF I DID NOT TELL YOU SO +3575-170457-0030-399: OF THIS SECOND LETTER ALSO SHE SPOKE AND TOLD ME THAT IT CONTAINED AN INVITATION FOR HER TO GO AND SEE THE POET IF EVER SHE VISITED THE LAKES +3575-170457-0031-400: ON AUGUST TWENTY SEVENTH EIGHTEEN THIRTY SEVEN SHE WRITES +3575-170457-0032-401: COME COME (I AM->I'M) GETTING REALLY TIRED OF YOUR ABSENCE +3575-170457-0033-402: SATURDAY AFTER SATURDAY COMES (ROUND->AROUND) AND I CAN HAVE NO HOPE OF HEARING YOUR KNOCK AT THE DOOR AND THEN BEING TOLD THAT (MISS E->MISSY) IS COME OH DEAR +3575-170457-0034-403: IN THIS MONOTONOUS LIFE OF (MINE->MIND) THAT WAS A PLEASANT EVENT +3575-170457-0035-404: I WISH (IT WOULD->YOU WERE) RECUR AGAIN BUT IT WILL TAKE TWO OR THREE INTERVIEWS BEFORE THE STIFFNESS THE ESTRANGEMENT OF THIS LONG SEPARATION WILL WEAR AWAY +3575-170457-0036-405: MY EYES (FILL WITH->FILLED) TEARS WHEN I CONTRAST THE BLISS OF SUCH A STATE BRIGHTENED BY HOPES OF THE FUTURE WITH THE MELANCHOLY STATE I NOW LIVE IN UNCERTAIN THAT I EVER FELT TRUE CONTRITION (WANDERING->WONDERING) IN THOUGHT (AND DEED->INDEED) LONGING FOR HOLINESS WHICH I SHALL NEVER NEVER OBTAIN SMITTEN AT TIMES TO THE HEART WITH THE CONVICTION THAT GHASTLY CALVINISTIC DOCTRINES ARE TRUE DARKENED IN SHORT BY THE VERY SHADOWS OF SPIRITUAL DEATH +3575-170457-0037-406: IF CHRISTIAN PERFECTION BE NECESSARY TO SALVATION I SHALL NEVER BE SAVED MY HEART IS A VERY (HOTBED->HOT BED) FOR SINFUL THOUGHTS AND WHEN I DECIDE ON AN ACTION I SCARCELY REMEMBER TO LOOK TO MY REDEEMER FOR (*->A) DIRECTION +3575-170457-0038-407: AND MEANTIME I KNOW THE GREATNESS OF JEHOVAH I ACKNOWLEDGE THE PERFECTION OF HIS WORD I ADORE THE PURITY OF THE CHRISTIAN FAITH MY THEORY IS RIGHT MY PRACTICE HORRIBLY WRONG +3575-170457-0039-408: THE CHRISTMAS HOLIDAYS CAME AND SHE AND ANNE RETURNED TO THE PARSONAGE AND TO THAT HAPPY HOME CIRCLE IN WHICH ALONE THEIR NATURES EXPANDED AMONGST ALL OTHER PEOPLE THEY SHRIVELLED UP MORE OR LESS +3575-170457-0040-409: INDEED THERE WERE ONLY ONE OR TWO STRANGERS WHO COULD BE ADMITTED AMONG THE SISTERS WITHOUT PRODUCING THE SAME RESULT +3575-170457-0041-410: SHE WAS GONE OUT INTO THE VILLAGE ON SOME ERRAND WHEN AS SHE WAS DESCENDING THE STEEP STREET HER FOOT SLIPPED ON THE ICE AND SHE FELL (IT->HE) WAS DARK AND NO ONE SAW HER MISCHANCE TILL AFTER A TIME HER GROANS ATTRACTED THE ATTENTION OF A PASSER BY +3575-170457-0042-411: UNFORTUNATELY THE FRACTURE COULD NOT BE SET TILL SIX O'CLOCK THE NEXT MORNING AS NO SURGEON WAS TO BE HAD BEFORE THAT TIME AND SHE NOW LIES AT (OUR->HER) HOUSE IN A VERY DOUBTFUL AND DANGEROUS STATE +3575-170457-0043-412: HOWEVER REMEMBERING WHAT YOU TOLD ME NAMELY THAT YOU HAD COMMENDED THE MATTER TO A HIGHER DECISION THAN OURS AND THAT YOU WERE RESOLVED TO SUBMIT WITH RESIGNATION TO THAT DECISION WHATEVER IT MIGHT BE I HOLD IT MY DUTY TO YIELD ALSO AND TO BE SILENT (IT->AND) MAY BE ALL FOR THE BEST +3575-170457-0044-413: AFTER THIS DISAPPOINTMENT I NEVER DARE RECKON WITH CERTAINTY ON THE ENJOYMENT OF A PLEASURE AGAIN IT SEEMS AS IF SOME FATALITY STOOD BETWEEN YOU AND ME +3575-170457-0045-414: I AM NOT GOOD ENOUGH FOR YOU AND YOU MUST BE KEPT FROM THE CONTAMINATION OF (TOO->TWO) INTIMATE SOCIETY +3575-170457-0046-415: A GOOD (NEIGHBOUR->NEIGHBOR) OF THE BRONTES A CLEVER INTELLIGENT YORKSHIRE WOMAN WHO KEEPS A (DRUGGIST'S->DRUGGIST) SHOP IN HAWORTH (AND->*) FROM HER OCCUPATION HER EXPERIENCE AND EXCELLENT SENSE HOLDS THE POSITION OF VILLAGE (DOCTRESS->DOCTRIS) AND NURSE AND AS SUCH HAS BEEN A FRIEND IN MANY A TIME OF TRIAL AND SICKNESS AND DEATH IN THE HOUSEHOLDS ROUND TOLD ME A CHARACTERISTIC LITTLE INCIDENT CONNECTED WITH TABBY'S FRACTURED LEG +3575-170457-0047-416: TABBY HAD LIVED WITH THEM FOR TEN OR TWELVE YEARS AND WAS AS CHARLOTTE EXPRESSED IT ONE OF THE FAMILY +3575-170457-0048-417: HE (REFUSED->REFUSE) AT FIRST TO LISTEN TO THE CAREFUL ADVICE IT WAS REPUGNANT TO HIS LIBERAL NATURE +3575-170457-0049-418: THIS DECISION WAS COMMUNICATED TO THE GIRLS +3575-170457-0050-419: TABBY HAD TENDED THEM IN THEIR CHILDHOOD THEY AND NONE OTHER SHOULD TEND HER IN HER INFIRMITY AND AGE +3575-170457-0051-420: AT TEA TIME THEY WERE SAD AND SILENT AND THE MEAL WENT AWAY UNTOUCHED BY ANY OF THE THREE +3575-170457-0052-421: SHE HAD ANOTHER WEIGHT ON HER MIND THIS CHRISTMAS +3575-170457-0053-422: BUT ANNE HAD BEGUN TO SUFFER JUST BEFORE THE HOLIDAYS AND CHARLOTTE WATCHED OVER HER YOUNGER SISTERS WITH (THE->A) JEALOUS VIGILANCE OF SOME WILD CREATURE THAT CHANGES HER VERY NATURE IF DANGER THREATENS HER YOUNG +3575-170457-0054-423: STUNG BY ANXIETY FOR THIS LITTLE SISTER SHE UPBRAIDED MISS W FOR HER FANCIED INDIFFERENCE TO (ANNE'S->AN) STATE OF HEALTH +3575-170457-0055-424: STILL HER HEART HAD RECEIVED A SHOCK IN THE PERCEPTION OF ANNE'S DELICACY AND ALL THESE HOLIDAYS SHE WATCHED OVER HER WITH THE LONGING FOND ANXIETY WHICH IS SO FULL OF SUDDEN PANGS OF FEAR +3575-170457-0056-425: I DOUBT WHETHER (BRANWELL->BROWNWELL) WAS MAINTAINING HIMSELF AT THIS TIME +3729-6852-0000-1660: TO CELEBRATE THE ARRIVAL OF HER SON (SILVIA->SYLVIA) GAVE A SPLENDID SUPPER TO WHICH SHE HAD INVITED ALL HER RELATIVES AND IT WAS A GOOD OPPORTUNITY FOR ME TO MAKE THEIR ACQUAINTANCE +3729-6852-0001-1661: WITHOUT SAYING IT POSITIVELY SHE MADE ME UNDERSTAND THAT BEING HERSELF AN ILLUSTRIOUS MEMBER OF THE REPUBLIC OF LETTERS SHE WAS WELL AWARE THAT SHE WAS SPEAKING TO AN INSECT +3729-6852-0002-1662: IN ORDER TO PLEASE HER I SPOKE TO HER OF THE (ABBE->ABBEY) CONTI AND I HAD OCCASION TO QUOTE TWO LINES OF THAT PROFOUND WRITER +3729-6852-0003-1663: (MADAM->MADAME) CORRECTED ME WITH A PATRONIZING AIR FOR MY PRONUNCIATION OF THE WORD (SCEVRA->SKRA) WHICH MEANS DIVIDED SAYING THAT IT OUGHT TO BE PRONOUNCED (SCEURA->SKURA) AND SHE ADDED THAT I OUGHT TO BE VERY GLAD TO HAVE LEARNED SO MUCH ON THE FIRST DAY OF MY ARRIVAL IN PARIS TELLING ME THAT IT WOULD BE AN IMPORTANT DAY IN MY LIFE +3729-6852-0004-1664: HER FACE WAS AN ENIGMA FOR IT INSPIRED (EVERYONE->EVERY ONE) WITH THE WARMEST SYMPATHY AND YET IF YOU EXAMINED IT ATTENTIVELY THERE WAS NOT ONE BEAUTIFUL FEATURE SHE COULD NOT BE CALLED HANDSOME BUT NO ONE COULD HAVE THOUGHT HER UGLY +3729-6852-0005-1665: (SILVIA->SYLVIA) WAS THE ADORATION OF FRANCE AND HER TALENT WAS THE REAL SUPPORT OF ALL THE COMEDIES WHICH THE GREATEST AUTHORS WROTE FOR HER ESPECIALLY OF THE PLAYS OF MARIVAUX FOR WITHOUT HER HIS COMEDIES WOULD NEVER HAVE GONE TO (POSTERITY->PROSTERITY) +3729-6852-0006-1666: (SILVIA->SYLVIA) DID NOT THINK THAT HER GOOD CONDUCT WAS A MERIT FOR SHE KNEW THAT SHE WAS VIRTUOUS ONLY BECAUSE HER SELF LOVE COMPELLED HER TO BE SO AND SHE NEVER EXHIBITED ANY PRIDE OR ASSUMED ANY SUPERIORITY TOWARDS HER THEATRICAL SISTERS ALTHOUGH SATISFIED TO SHINE BY THEIR TALENT OR THEIR BEAUTY THEY CARED LITTLE ABOUT RENDERING THEMSELVES CONSPICUOUS BY THEIR VIRTUE +3729-6852-0007-1667: TWO YEARS BEFORE HER DEATH I SAW HER PERFORM THE CHARACTER OF MARIANNE IN THE COMEDY OF (MARIVAUX->MARAVO) AND IN SPITE OF HER AGE AND DECLINING HEALTH THE ILLUSION WAS COMPLETE +3729-6852-0008-1668: SHE WAS HONOURABLY BURIED IN THE CHURCH OF SAINT (SAUVEUR->SAVERE) WITHOUT THE SLIGHTEST OPPOSITION FROM THE VENERABLE PRIEST WHO FAR FROM SHARING THE ANTI (CHRISTAIN->CHRISTIAN) INTOLERANCY OF THE CLERGY IN GENERAL SAID THAT HER PROFESSION AS AN ACTRESS HAD NOT HINDERED HER FROM BEING A GOOD CHRISTIAN AND THAT THE EARTH WAS (THE->A) COMMON MOTHER OF ALL HUMAN BEINGS AS JESUS CHRIST HAD BEEN THE SAVIOUR OF ALL MANKIND +3729-6852-0009-1669: YOU WILL FORGIVE ME DEAR READER IF I HAVE MADE YOU ATTEND THE FUNERAL OF (SILVIA->SYLVIA) TEN YEARS BEFORE HER DEATH BELIEVE ME I HAVE NO INTENTION OF PERFORMING A MIRACLE YOU MAY CONSOLE YOURSELF WITH THE IDEA THAT I SHALL SPARE YOU THAT UNPLEASANT TASK WHEN POOR (SILVIA->SYLVIA) DIES +3729-6852-0010-1670: I NEVER HAD ANY FAMILY +3729-6852-0011-1671: I HAD A NAME I BELIEVE IN MY YOUNG DAYS BUT I HAVE FORGOTTEN IT SINCE I HAVE BEEN IN SERVICE +3729-6852-0012-1672: I SHALL CALL YOU (ESPRIT->A SPREE) +3729-6852-0013-1673: YOU DO ME A GREAT HONOUR +3729-6852-0014-1674: HERE GO AND GET ME CHANGE FOR A LOUIS I HAVE IT SIR +3729-6852-0015-1675: AT YOUR SERVICE SIR +3729-6852-0016-1676: MADAME (QUINSON->QUINCENT) BESIDES CAN ANSWER YOUR (ENQUIRIES->INQUIRIES) +3729-6852-0017-1677: I SEE A QUANTITY OF CHAIRS FOR HIRE AT THE RATE OF ONE (SOU->SOUS) MEN READING THE NEWSPAPER UNDER THE SHADE OF THE TREES GIRLS AND MEN BREAKFASTING EITHER ALONE OR IN COMPANY WAITERS WHO WERE RAPIDLY GOING UP AND DOWN A NARROW STAIRCASE HIDDEN UNDER THE FOLIAGE +3729-6852-0018-1678: I SIT DOWN AT A SMALL TABLE A WAITER COMES IMMEDIATELY TO (ENQUIRE->INQUIRE) MY WISHES +3729-6852-0019-1679: I TELL HIM TO GIVE ME SOME COFFEE IF IT IS GOOD +3729-6852-0020-1680: THEN TURNING TOWARDS ME HE SAYS THAT I LOOK LIKE A FOREIGNER AND WHEN I SAY THAT I AM AN ITALIAN HE BEGINS TO SPEAK TO ME OF THE (COURT OF->CORPS) THE CITY OF THE THEATRES AND AT LAST HE OFFERS TO ACCOMPANY ME EVERYWHERE +3729-6852-0021-1681: I THANK HIM AND TAKE MY LEAVE +3729-6852-0022-1682: I ADDRESS HIM IN ITALIAN AND HE ANSWERS VERY WITTILY BUT HIS WAY OF SPEAKING MAKES ME SMILE AND I TELL HIM WHY +3729-6852-0023-1683: MY REMARK PLEASES HIM BUT I SOON PROVE TO HIM THAT IT IS NOT THE RIGHT WAY TO SPEAK HOWEVER PERFECT MAY HAVE BEEN THE LANGUAGE OF THAT ANCIENT WRITER +3729-6852-0024-1684: I SEE A CROWD IN ONE CORNER OF THE GARDEN EVERYBODY STANDING STILL AND LOOKING UP +3729-6852-0025-1685: IS THERE NOT A MERIDIAN EVERYWHERE +3729-6852-0026-1686: YES BUT THE MERIDIAN OF THE PALAIS ROYAL IS THE MOST EXACT +3729-6852-0027-1687: THAT IS TRUE (BADAUDERIE->BAD DEALT GREE) +3729-6852-0028-1688: ALL THESE HONEST PERSONS ARE WAITING THEIR TURN TO GET THEIR SNUFF BOXES FILLED +3729-6852-0029-1689: IT IS SOLD EVERYWHERE BUT FOR THE LAST THREE WEEKS NOBODY WILL USE ANY SNUFF BUT (THAT->THAT'S) SOLD AT THE (CIVET->SAVE) CAT +3729-6852-0030-1690: IS IT BETTER THAN ANYWHERE ELSE +3729-6852-0031-1691: BUT HOW DID SHE MANAGE TO RENDER IT SO FASHIONABLE +3729-6852-0032-1692: SIMPLY BY STOPPING HER CARRIAGE TWO OR THREE TIMES BEFORE THE SHOP TO HAVE HER SNUFF BOX FILLED AND BY SAYING ALOUD TO THE YOUNG GIRL WHO HANDED BACK THE BOX THAT HER SNUFF WAS THE VERY BEST IN PARIS +3729-6852-0033-1693: YOU ARE NOW IN THE ONLY COUNTRY IN THE WORLD WHERE WIT CAN MAKE A FORTUNE BY SELLING EITHER A GENUINE OR A FALSE ARTICLE IN THE FIRST CASE IT RECEIVES THE WELCOME OF INTELLIGENT AND TALENTED PEOPLE AND IN THE SECOND FOOLS ARE ALWAYS READY TO REWARD IT FOR SILLINESS IS TRULY A CHARACTERISTIC OF THE PEOPLE HERE AND HOWEVER WONDERFUL IT MAY APPEAR SILLINESS IS THE DAUGHTER OF WIT +3729-6852-0034-1694: LET A MAN RUN AND EVERYBODY WILL RUN AFTER HIM THE CROWD WILL NOT STOP UNLESS THE MAN IS PROVED TO BE MAD BUT TO PROVE IT IS INDEED A DIFFICULT TASK BECAUSE WE HAVE A CROWD OF MEN WHO MAD FROM THEIR BIRTH ARE STILL CONSIDERED WISE +3729-6852-0035-1695: IT SEEMS TO ME I REPLIED THAT SUCH APPROVAL SUCH RATIFICATION OF THE OPINION EXPRESSED BY THE KING THE PRINCES OF THE BLOOD ET CETERA IS RATHER A PROOF OF THE AFFECTION FELT FOR THEM BY THE NATION FOR THE FRENCH CARRY THAT AFFECTION TO SUCH AN EXTENT THAT THEY (BELIEVE->BELIEVED) THEM INFALLIBLE +3729-6852-0036-1696: WHEN THE KING COMES TO PARIS EVERYBODY CALLS OUT VIVE LE (ROI->ROY) +3729-6852-0037-1697: SHE INTRODUCED ME TO ALL HER GUESTS AND GAVE ME SOME PARTICULARS RESPECTING EVERY ONE OF THEM +3729-6852-0038-1698: WHAT SIR I SAID TO HIM AM I FORTUNATE ENOUGH TO SEE YOU +3729-6852-0039-1699: HE HIMSELF RECITED THE SAME PASSAGE IN FRENCH AND POLITELY POINTED OUT THE PARTS IN WHICH HE THOUGHT THAT I HAD IMPROVED ON THE ORIGINAL +3729-6852-0040-1700: FOR THE FIRST DAY SIR I THINK THAT WHAT YOU HAVE DONE GIVES GREAT HOPES OF YOU AND WITHOUT ANY DOUBT YOU WILL MAKE RAPID PROGRESS +3729-6852-0041-1701: I BELIEVE IT SIR AND THAT IS WHAT I FEAR THEREFORE THE PRINCIPAL OBJECT OF MY VISIT HERE IS TO DEVOTE MYSELF ENTIRELY TO THE STUDY OF THE FRENCH LANGUAGE +3729-6852-0042-1702: I AM A VERY UNPLEASANT PUPIL ALWAYS ASKING QUESTIONS CURIOUS TROUBLESOME INSATIABLE AND EVEN SUPPOSING THAT I COULD MEET WITH THE TEACHER I REQUIRE I AM AFRAID I AM NOT RICH ENOUGH TO PAY HIM +3729-6852-0043-1703: I RESIDE IN THE (MARAIS RUE->MARAE GRUE) DE (DOUZE PORTES->DUSPORT) +3729-6852-0044-1704: I WILL MAKE YOU TRANSLATE THEM INTO FRENCH AND YOU NEED NOT BE AFRAID OF MY FINDING YOU INSATIABLE +3729-6852-0045-1705: HE HAD A GOOD APPETITE (COULD TELL->COTEL) A GOOD STORY WITHOUT LAUGHING WAS CELEBRATED FOR HIS WITTY REPARTEES AND HIS SOCIABLE MANNERS BUT HE SPENT HIS LIFE AT HOME SELDOM GOING OUT AND SEEING HARDLY (ANYONE->ANY ONE) BECAUSE HE ALWAYS HAD A PIPE IN HIS MOUTH AND WAS SURROUNDED BY AT LEAST TWENTY CATS WITH WHICH HE WOULD AMUSE HIMSELF ALL DAY +3729-6852-0046-1706: HIS HOUSEKEEPER HAD THE MANAGEMENT OF EVERYTHING SHE NEVER ALLOWED HIM TO BE IN NEED OF ANYTHING AND SHE GAVE NO ACCOUNT OF HIS MONEY WHICH SHE KEPT ALTOGETHER BECAUSE HE NEVER ASKED HER TO RENDER ANY ACCOUNTS +4077-13751-0000-1258: ON THE SIXTH OF APRIL EIGHTEEN THIRTY THE CHURCH OF JESUS CHRIST OF LATTER DAY SAINTS WAS (FORMALLY->FORMERLY) ORGANIZED AND THUS TOOK ON A LEGAL EXISTENCE +4077-13751-0001-1259: ITS ORIGIN WAS SMALL A GERM AN INSIGNIFICANT SEED HARDLY TO BE THOUGHT OF AS LIKELY TO AROUSE OPPOSITION +4077-13751-0002-1260: INSTEAD OF BUT SIX REGULARLY AFFILIATED MEMBERS AND AT MOST TWO SCORE OF ADHERENTS THE ORGANIZATION NUMBERS (TODAY->TO DAY) MANY HUNDRED THOUSAND SOULS +4077-13751-0003-1261: IN PLACE OF A SINGLE HAMLET IN THE SMALLEST CORNER OF WHICH THE MEMBERS COULD HAVE CONGREGATED THERE NOW ARE ABOUT SEVENTY STAKES OF ZION AND ABOUT SEVEN HUNDRED ORGANIZED WARDS EACH WARD AND STAKE WITH ITS FULL COMPLEMENT OF OFFICERS AND PRIESTHOOD ORGANIZATIONS +4077-13751-0004-1262: THE (PRACTISE->PRACTICE) OF GATHERING ITS PROSELYTES INTO ONE PLACE PREVENTS THE (BUILDING->BILLING) UP AND STRENGTHENING OF FOREIGN BRANCHES AND INASMUCH AS EXTENSIVE AND STRONG ORGANIZATIONS ARE SELDOM MET WITH ABROAD VERY ERRONEOUS IDEAS EXIST CONCERNING THE STRENGTH OF THE CHURCH +4077-13751-0005-1263: NEVERTHELESS THE MUSTARD SEED AMONG THE SMALLEST OF ALL SEEDS (HAS ATTAINED->HESITATED) THE PROPORTIONS OF A TREE AND THE BIRDS OF THE AIR ARE NESTING IN ITS BRANCHES THE ACORN IS NOW (AN->IN) OAK OFFERING PROTECTION AND THE SWEETS OF SATISFACTION TO EVERY EARNEST PILGRIM JOURNEYING ITS WAY FOR TRUTH +4077-13751-0006-1264: THEIR EYES WERE FROM THE FIRST TURNED IN ANTICIPATION TOWARD THE EVENING SUN NOT MERELY THAT THE WORK OF (PROSELYTING->PROSELLING) SHOULD BE CARRIED ON IN THE WEST BUT THAT THE HEADQUARTERS OF THE CHURCH SHOULD BE (THERE->THEIR) ESTABLISHED +4077-13751-0007-1265: THE BOOK (OF->OR) MORMON HAD TAUGHT (THE->THAT) PEOPLE THE TRUE ORIGIN AND DESTINY OF THE AMERICAN INDIANS AND TOWARD THIS DARK SKINNED REMNANT OF A ONCE MIGHTY PEOPLE THE MISSIONARIES OF MORMONISM EARLY TURNED THEIR EYES AND WITH THEIR EYES WENT THEIR HEARTS AND THEIR HOPES +4077-13751-0008-1266: IT IS NOTABLE THAT THE INDIAN TRIBES HAVE GENERALLY REGARDED (THE->THEIR) RELIGION OF THE LATTER DAY SAINTS WITH FAVOR SEEING IN THE BOOK (OF->A) MORMON STRIKING AGREEMENT WITH THEIR OWN TRADITIONS +4077-13751-0009-1267: THE FIRST WELL ESTABLISHED SEAT OF THE CHURCH WAS IN THE PRETTY LITTLE TOWN OF (KIRTLAND->CURTLEND) OHIO ALMOST WITHIN SIGHT OF LAKE ERIE AND HERE SOON ROSE THE FIRST TEMPLE OF MODERN TIMES +4077-13751-0010-1268: TO THE FERVENT LATTER DAY SAINT A TEMPLE IS NOT SIMPLY A CHURCH BUILDING A HOUSE FOR RELIGIOUS ASSEMBLY +4077-13751-0011-1269: SOON THOUSANDS OF CONVERTS HAD RENTED OR PURCHASED HOMES IN MISSOURI INDEPENDENCE JACKSON COUNTY BEING THEIR CENTER BUT FROM THE FIRST THEY WERE UNPOPULAR AMONG THE MISSOURIANS +4077-13751-0012-1270: THE LIEUTENANT GOVERNOR (LILBURN->LITTLE BURN) W (BOGGS->BOX) AFTERWARD GOVERNOR WAS A PRONOUNCED MORMON (HATER->HAYTER) AND THROUGHOUT THE PERIOD OF THE TROUBLES HE (MANIFESTED->MANIFEST HIS) SYMPATHY WITH THE PERSECUTORS +4077-13751-0013-1271: THEIR SUFFERINGS HAVE NEVER YET BEEN FITLY CHRONICLED BY HUMAN SCRIBE +4077-13751-0014-1272: MAKING THEIR WAY ACROSS THE RIVER MOST OF THE REFUGEES FOUND SHELTER AMONG THE MORE HOSPITABLE PEOPLE OF CLAY COUNTY AND AFTERWARD ESTABLISHED THEMSELVES IN (CALDWELL->COLDWELL) COUNTY (THEREIN->THEY WERE IN) FOUNDING THE CITY OF FAR WEST +4077-13751-0015-1273: A SMALL SETTLEMENT HAD BEEN FOUNDED BY MORMON FAMILIES ON SHOAL CREEK AND HERE ON THE THIRTIETH OF OCTOBER EIGHTEEN THIRTY EIGHT A COMPANY OF TWO HUNDRED AND FORTY FELL UPON THE HAPLESS SETTLERS AND (BUTCHERED A->BUTCHER TO) SCORE +4077-13751-0016-1274: BE IT SAID TO THE HONOR OF SOME OF THE OFFICERS ENTRUSTED WITH (THE->A) TERRIBLE COMMISSION THAT WHEN THEY LEARNED ITS TRUE SIGNIFICANCE THEY RESIGNED THEIR AUTHORITY RATHER THAN HAVE ANYTHING TO DO WITH WHAT THEY DESIGNATED A COLD BLOODED BUTCHERY +4077-13751-0017-1275: OH WHAT A RECORD TO READ WHAT A PICTURE TO GAZE UPON HOW AWFUL THE FACT +4077-13751-0018-1276: AMERICAN (SCHOOL BOYS->SCHOOLBOYS) READ WITH EMOTIONS OF HORROR OF THE (ALBIGENSES->ALBIGENZAS) DRIVEN BEATEN AND KILLED WITH A (PAPAL->PEPPEL) LEGATE DIRECTING THE BUTCHERY AND OF THE (VAUDOIS->FAUDOIR) HUNTED AND HOUNDED LIKE BEASTS AS THE EFFECT OF A ROYAL DECREE AND THEY YET SHALL READ IN THE HISTORY OF THEIR OWN COUNTRY OF SCENES AS TERRIBLE AS THESE IN THE EXHIBITION OF INJUSTICE AND INHUMAN HATE +4077-13751-0019-1277: WHO BEGAN THE QUARREL WAS IT THE MORMONS +4077-13751-0020-1278: AS (A->THE) SAMPLE OF THE PRESS (COMMENTS->COMETS) AGAINST THE BRUTALITY OF THE (MISSOURIANS->MISERIES) I QUOTE A PARAGRAPH FROM THE QUINCY ARGUS MARCH SIXTEENTH EIGHTEEN THIRTY NINE +4077-13751-0021-1279: IT WILL BE OBSERVED THAT AN ORGANIZED MOB AIDED BY MANY OF THE CIVIL AND MILITARY OFFICERS OF MISSOURI WITH GOVERNOR (BOGGS->BOX) AT THEIR HEAD HAVE BEEN THE PROMINENT ACTORS IN THIS BUSINESS INCITED TOO IT APPEARS AGAINST THE MORMONS BY POLITICAL HATRED AND BY THE ADDITIONAL MOTIVES OF PLUNDER AND REVENGE +4077-13754-0000-1241: THE ARMY FOUND THE PEOPLE IN POVERTY AND LEFT THEM IN COMPARATIVE WEALTH +4077-13754-0001-1242: BUT A WORD FURTHER CONCERNING THE EXPEDITION IN GENERAL +4077-13754-0002-1243: IT WAS THROUGH FLOYD'S ADVICE THAT (BUCHANAN ORDERED->BUCATED ORDER) THE MILITARY EXPEDITION TO UTAH OSTENSIBLY TO INSTALL CERTAIN FEDERAL OFFICIALS AND TO REPRESS AN ALLEGED INFANTILE REBELLION WHICH IN FACT HAD NEVER COME INTO EXISTENCE BUT IN REALITY TO FURTHER THE (INTERESTS->INTRICTS) OF THE SECESSIONISTS +4077-13754-0003-1244: MOREOVER HAD THE PEOPLE BEEN INCLINED TO REBELLION WHAT GREATER OPPORTUNITY COULD THEY HAVE WISHED +4077-13754-0004-1245: ALREADY A NORTH AND (A->THE) SOUTH WERE TALKED OF WHY NOT SET UP ALSO A WEST +4077-13754-0005-1246: THEY KNEW NO NORTH (NO->NOR) SOUTH (NO->NOR) EAST NO WEST THEY STOOD POSITIVELY BY THE CONSTITUTION AND WOULD HAVE NOTHING TO DO IN THE BLOODY STRIFE BETWEEN BROTHERS UNLESS INDEED THEY WERE SUMMONED BY THE AUTHORITY TO WHICH THEY HAD ALREADY ONCE LOYALLY RESPONDED TO FURNISH MEN (AND->IN) ARMS FOR THEIR COUNTRY'S NEED +4077-13754-0006-1247: WHAT THE LATTER DAY SAINTS CALL CELESTIAL MARRIAGE IS CHARACTERISTIC OF THE CHURCH AND IS IN VERY GENERAL (PRACTISE->PRACTICE) BUT OF CELESTIAL MARRIAGE PLURALITY OF WIVES WAS AN INCIDENT NEVER AN ESSENTIAL +4077-13754-0007-1248: WE BELIEVE IN A LITERAL RESURRECTION AND AN ACTUAL HEREAFTER IN WHICH FUTURE (STATE->STATES) SHALL BE RECOGNIZED EVERY SANCTIFIED AND AUTHORIZED RELATIONSHIP EXISTING HERE ON EARTH OF PARENT AND CHILD BROTHER AND SISTER HUSBAND AND WIFE +4077-13754-0008-1249: IT HAS BEEN MY PRIVILEGE TO TREAD THE SOIL OF MANY LANDS TO OBSERVE THE CUSTOMS AND STUDY THE HABITS OF MORE NATIONS THAN ONE AND I HAVE YET TO FIND THE PLACE AND MEET THE PEOPLE WHERE AND WITH WHOM THE PURITY OF MAN AND WOMAN IS HELD MORE PRECIOUS THAN AMONG THE MALIGNED MORMONS IN THE MOUNTAIN VALLEYS OF THE WEST +4077-13754-0009-1250: AT THE INCEPTION OF (PLURAL->PEARL) MARRIAGE AMONG THE LATTER DAY SAINTS THERE WAS NO LAW NATIONAL OR STATE AGAINST ITS (PRACTISE->PRACTICE) +4077-13754-0010-1251: IN EIGHTEEN SIXTY TWO A LAW WAS ENACTED WITH (THE->A) PURPOSE OF SUPPRESSING PLURAL MARRIAGE AND AS HAD BEEN PREDICTED IN THE NATIONAL SENATE (PRIOR->PRAYER) TO ITS PASSAGE IT LAY FOR MANY YEARS A DEAD LETTER +4077-13754-0011-1252: FEDERAL JUDGES AND UNITED STATES ATTORNEYS IN (UTAH->UTA) WHO WERE NOT MORMONS NOR LOVERS OF (MORMONISM->WOMENISM) REFUSED TO ENTERTAIN COMPLAINTS OR PROSECUTE CASES UNDER THE LAW BECAUSE OF ITS MANIFEST INJUSTICE AND INADEQUACY +4077-13754-0012-1253: THIS MEANT THAT FOR AN ALLEGED MISDEMEANOR FOR WHICH CONGRESS PRESCRIBED A MAXIMUM PENALTY OF SIX MONTHS IMPRISONMENT AND A FINE OF THREE HUNDRED DOLLARS A MAN MIGHT BE IMPRISONED FOR LIFE (AYE->I) FOR MANY TERMS OF A MAN'S NATURAL LIFE DID THE (COURT'S->COURTS) POWER TO ENFORCE ITS SENTENCES EXTEND SO FAR AND MIGHT BE FINED MILLIONS OF DOLLARS +4077-13754-0013-1254: BEFORE THIS TRAVESTY ON THE ADMINISTRATION OF LAW COULD BE BROUGHT BEFORE THE COURT OF LAST RESORT AND THERE (MEET->MET) WITH THE REVERSAL AND REBUKE IT DESERVED MEN WERE IMPRISONED UNDER (SENTENCES->SENTENCE) OF MANY YEARS DURATION +4077-13754-0014-1255: THE PEOPLE CONTESTED THESE MEASURES ONE BY ONE IN THE COURTS PRESENTING IN CASE AFTER CASE THE DIFFERENT PHASES OF THE SUBJECT AND URGING THE UNCONSTITUTIONALITY OF THE MEASURE +4077-13754-0015-1256: THEN THE CHURCH WAS DISINCORPORATED AND ITS PROPERTY BOTH REAL AND PERSONAL CONFISCATED AND (ESCHEATED->ISIATED) TO THE GOVERNMENT OF THE UNITED STATES AND ALTHOUGH THE PERSONAL PROPERTY WAS SOON RESTORED REAL ESTATE OF GREAT VALUE LONG LAY IN THE HANDS OF THE (COURT'S->COURTS) RECEIVER AND THE MORMON CHURCH HAD TO PAY THE NATIONAL GOVERNMENT (HIGH->HIGHER) RENTAL ON ITS OWN PROPERTY +4077-13754-0016-1257: AND SO THE STORY OF MORMONISM RUNS ON ITS FINALE HAS NOT YET BEEN WRITTEN THE CURRENT PRESS PRESENTS CONTINUOUSLY NEW STAGES OF ITS PROGRESS NEW DEVELOPMENTS OF ITS PLAN +4446-2271-0000-1133: (MAINHALL->MAIN HALL) LIKED ALEXANDER BECAUSE HE WAS AN ENGINEER +4446-2271-0001-1134: (HE->WE) HAD (PRECONCEIVED->FREQUENCY) IDEAS ABOUT EVERYTHING AND HIS IDEA ABOUT AMERICANS WAS THAT THEY SHOULD BE ENGINEERS OR MECHANICS +4446-2271-0002-1135: (IT'S->ITS) TREMENDOUSLY WELL PUT ON TOO +4446-2271-0003-1136: IT'S BEEN ON ONLY TWO WEEKS AND I'VE BEEN HALF A DOZEN TIMES ALREADY +4446-2271-0004-1137: DO YOU KNOW ALEXANDER (MAINHALL->MAIN HALL) LOOKED WITH PERPLEXITY UP INTO THE TOP OF THE HANSOM AND RUBBED HIS PINK CHEEK WITH HIS GLOVED FINGER DO YOU KNOW I SOMETIMES THINK OF TAKING TO CRITICISM SERIOUSLY MYSELF +4446-2271-0005-1138: SHE SAVES HER HAND TOO SHE'S AT HER BEST IN THE SECOND ACT +4446-2271-0006-1139: HE'S BEEN WANTING TO MARRY (HILDA->HILDER) THESE THREE YEARS AND MORE +4446-2271-0007-1140: SHE DOESN'T TAKE UP WITH ANYBODY YOU KNOW +4446-2271-0008-1141: IRENE (BURGOYNE->WERE GOING) ONE OF HER FAMILY TOLD ME IN CONFIDENCE THAT THERE WAS A ROMANCE SOMEWHERE BACK IN THE BEGINNING +4446-2271-0009-1142: (MAINHALL->MAIN HOLE) VOUCHED FOR HER CONSTANCY WITH A LOFTINESS THAT MADE ALEXANDER SMILE EVEN WHILE A KIND OF RAPID EXCITEMENT WAS TINGLING THROUGH HIM +4446-2271-0010-1143: HE'S ANOTHER WHO'S AWFULLY KEEN ABOUT HER LET ME INTRODUCE YOU +4446-2271-0011-1144: SIR (HARRY TOWNE->HARRYTOWN) MISTER BARTLEY ALEXANDER THE AMERICAN ENGINEER +4446-2271-0012-1145: I SAY SIR HARRY THE LITTLE GIRL'S GOING FAMOUSLY TO NIGHT ISN'T SHE +4446-2271-0013-1146: (DO->*) YOU KNOW I THOUGHT THE DANCE A BIT CONSCIOUS TO NIGHT FOR THE FIRST TIME +4446-2271-0014-1147: (WESTMERE->WESTMER) AND I WERE BACK AFTER THE FIRST ACT AND WE THOUGHT SHE SEEMED QUITE UNCERTAIN OF HERSELF +4446-2271-0015-1148: A LITTLE ATTACK OF NERVES POSSIBLY +4446-2271-0016-1149: (HE->IT) WAS BEGINNING TO FEEL (A->THE) KEEN INTEREST IN THE SLENDER BAREFOOT DONKEY GIRL WHO SLIPPED IN AND OUT OF THE PLAY SINGING LIKE SOME ONE WINDING THROUGH A HILLY FIELD +4446-2271-0017-1150: ONE NIGHT WHEN HE AND WINIFRED WERE SITTING TOGETHER ON THE BRIDGE HE TOLD HER THAT THINGS HAD HAPPENED WHILE HE WAS STUDYING ABROAD THAT HE WAS SORRY FOR ONE THING IN PARTICULAR AND HE ASKED HER WHETHER SHE THOUGHT SHE OUGHT TO KNOW ABOUT THEM +4446-2271-0018-1151: SHE CONSIDERED (*->FOR) A MOMENT AND THEN SAID NO I THINK NOT (THOUGH->THE WAY) I AM GLAD YOU ASK ME +4446-2271-0019-1152: AFTER THAT IT WAS EASY TO FORGET ACTUALLY TO FORGET +4446-2271-0020-1153: OF COURSE HE REFLECTED SHE ALWAYS HAD THAT COMBINATION OF SOMETHING HOMELY AND SENSIBLE AND SOMETHING UTTERLY WILD AND DAFT +4446-2271-0021-1154: SHE MUST CARE ABOUT THE THEATRE A GREAT DEAL MORE THAN SHE USED TO +4446-2271-0022-1155: I'M GLAD SHE'S HELD HER OWN SINCE +4446-2271-0023-1156: AFTER ALL WE WERE AWFULLY YOUNG +4446-2271-0024-1157: I SHOULDN'T WONDER IF SHE COULD LAUGH ABOUT IT WITH ME NOW +4446-2273-0000-1158: HILDA WAS VERY NICE TO HIM AND HE SAT ON THE EDGE OF HIS CHAIR FLUSHED WITH HIS CONVERSATIONAL EFFORTS AND MOVING HIS CHIN ABOUT NERVOUSLY OVER HIS HIGH COLLAR +4446-2273-0001-1159: THEY ASKED HIM TO COME TO SEE THEM IN CHELSEA AND THEY SPOKE VERY TENDERLY OF HILDA +4446-2273-0002-1160: LAMB WOULDN'T CARE A GREAT DEAL ABOUT MANY OF THEM I FANCY +4446-2273-0003-1161: WHEN BARTLEY ARRIVED AT BEDFORD SQUARE ON SUNDAY EVENING MARIE THE PRETTY LITTLE FRENCH GIRL MET HIM AT THE DOOR AND CONDUCTED HIM UPSTAIRS +4446-2273-0004-1162: I SHOULD NEVER HAVE ASKED YOU IF MOLLY HAD BEEN HERE FOR I REMEMBER YOU DON'T LIKE ENGLISH COOKERY +4446-2273-0005-1163: I HAVEN'T HAD A CHANCE YET TO TELL YOU WHAT A JOLLY LITTLE PLACE I THINK THIS IS +4446-2273-0006-1164: THEY ARE ALL SKETCHES MADE ABOUT THE VILLA (D'ESTE->DESTA) YOU SEE +4446-2273-0007-1165: THOSE FELLOWS ARE ALL VERY LOYAL EVEN (MAINHALL->MAIN HALL) +4446-2273-0008-1166: I'VE MANAGED TO SAVE SOMETHING EVERY YEAR AND THAT WITH HELPING MY THREE SISTERS NOW AND THEN AND TIDING POOR COUSIN (MIKE->MICHAEL) OVER BAD SEASONS +4446-2273-0009-1167: IT'S NOT PARTICULARLY RARE SHE SAID BUT SOME OF IT WAS MY MOTHER'S +4446-2273-0010-1168: THERE WAS WATERCRESS SOUP AND SOLE AND A DELIGHTFUL (OMELETTE->OMELET) STUFFED WITH MUSHROOMS AND TRUFFLES AND TWO SMALL RARE DUCKLINGS AND (ARTICHOKES->ART OF CHOKES) AND A DRY YELLOW (RHONE->ROAN) WINE OF WHICH BARTLEY HAD ALWAYS BEEN VERY FOND +4446-2273-0011-1169: THERE IS NOTHING ELSE THAT LOOKS SO JOLLY +4446-2273-0012-1170: THANK YOU BUT I DON'T LIKE IT SO WELL AS THIS +4446-2273-0013-1171: HAVE YOU BEEN IN PARIS MUCH THESE LATE YEARS +4446-2273-0014-1172: THERE ARE (*->A) FEW CHANGES IN THE OLD QUARTER +4446-2273-0015-1173: DON'T I THOUGH I'M SO SORRY TO HEAR IT HOW DID HER SON TURN OUT +4446-2273-0016-1174: HER HAIR IS STILL LIKE FLAX AND HER BLUE EYES ARE JUST LIKE A BABY'S AND SHE HAS THE SAME THREE FRECKLES ON HER LITTLE NOSE AND TALKS ABOUT GOING BACK TO HER (BAINS DE MER->BANDA MARE) +4446-2273-0017-1175: HOW JOLLY IT WAS BEING YOUNG HILDA +4446-2273-0018-1176: DO YOU REMEMBER THAT FIRST WALK WE TOOK TOGETHER IN PARIS +4446-2273-0019-1177: COME WE'LL HAVE OUR COFFEE IN THE OTHER ROOM AND YOU CAN SMOKE +4446-2273-0020-1178: I THINK WE DID SHE ANSWERED DEMURELY +4446-2273-0021-1179: WHAT SHE WANTED FROM US WAS NEITHER OUR FLOWERS NOR OUR (FRANCS->FRANKS) BUT JUST OUR YOUTH +4446-2273-0022-1180: THEY WERE BOTH REMEMBERING WHAT THE WOMAN HAD SAID WHEN SHE TOOK THE MONEY GOD GIVE YOU A HAPPY LOVE +4446-2273-0023-1181: THE STRANGE WOMAN AND HER PASSIONATE SENTENCE THAT RANG OUT SO SHARPLY HAD FRIGHTENED THEM BOTH +4446-2273-0024-1182: BARTLEY STARTED WHEN HILDA RANG THE LITTLE BELL BESIDE HER DEAR ME WHY DID YOU DO THAT +4446-2273-0025-1183: IT WAS VERY JOLLY HE MURMURED LAZILY AS MARIE CAME IN TO TAKE AWAY THE COFFEE +4446-2273-0026-1184: HAVE I TOLD YOU ABOUT MY NEW PLAY +4446-2273-0027-1185: WHEN SHE FINISHED ALEXANDER SHOOK HIMSELF OUT OF A REVERIE +4446-2273-0028-1186: NONSENSE OF COURSE I CAN'T REALLY SING EXCEPT THE WAY MY MOTHER AND GRANDMOTHER DID BEFORE ME +4446-2273-0029-1187: IT'S REALLY TOO WARM IN THIS ROOM TO SING DON'T YOU FEEL IT +4446-2273-0030-1188: ALEXANDER WENT OVER AND OPENED THE WINDOW FOR HER +4446-2273-0031-1189: THERE JUST IN FRONT +4446-2273-0032-1190: HE STOOD A LITTLE BEHIND HER AND TRIED TO STEADY HIMSELF AS HE SAID IT'S SOFT AND MISTY SEE HOW WHITE THE STARS ARE +4446-2273-0033-1191: FOR A LONG TIME NEITHER HILDA NOR BARTLEY SPOKE +4446-2273-0034-1192: HE FELT A TREMOR RUN THROUGH THE SLENDER YELLOW FIGURE IN FRONT OF HIM +4446-2273-0035-1193: BARTLEY LEANED OVER HER SHOULDER WITHOUT TOUCHING HER AND WHISPERED IN HER EAR YOU ARE GIVING ME A CHANCE YES +4446-2273-0036-1194: ALEXANDER (UNCLENCHED->CLENCHED) THE TWO HANDS AT HIS SIDES +4446-2275-0000-1195: THE STOP AT QUEENSTOWN THE TEDIOUS PASSAGE UP THE (MERSEY->MERCY) WERE THINGS THAT HE NOTED DIMLY THROUGH HIS GROWING IMPATIENCE +4446-2275-0001-1196: SHE BLUSHED AND SMILED AND FUMBLED HIS CARD IN HER CONFUSION BEFORE SHE RAN UPSTAIRS +4446-2275-0002-1197: ALEXANDER PACED UP AND DOWN THE HALLWAY BUTTONING AND UNBUTTONING HIS OVERCOAT UNTIL SHE RETURNED AND TOOK HIM UP TO HILDA'S LIVING ROOM +4446-2275-0003-1198: THE ROOM WAS EMPTY WHEN HE ENTERED +4446-2275-0004-1199: ALEXANDER DID NOT SIT DOWN +4446-2275-0005-1200: I FELT IT IN MY BONES WHEN I WOKE THIS MORNING THAT SOMETHING SPLENDID WAS GOING TO TURN UP +4446-2275-0006-1201: I THOUGHT IT MIGHT BE SISTER KATE OR COUSIN MIKE WOULD BE HAPPENING ALONG +4446-2275-0007-1202: SHE PUSHED HIM TOWARD THE BIG CHAIR BY THE FIRE AND SAT DOWN ON A STOOL AT THE OPPOSITE SIDE OF THE HEARTH HER KNEES DRAWN UP TO HER CHIN LAUGHING LIKE A HAPPY LITTLE GIRL +4446-2275-0008-1203: WHEN DID YOU COME BARTLEY AND HOW DID IT HAPPEN YOU HAVEN'T SPOKEN A WORD +4446-2275-0009-1204: I GOT IN ABOUT TEN MINUTES AGO +4446-2275-0010-1205: ALEXANDER LEANED FORWARD AND WARMED HIS HANDS BEFORE THE BLAZE +4446-2275-0011-1206: BARTLEY BENT (LOWER->LOWERED) OVER THE FIRE +4446-2275-0012-1207: SHE LOOKED AT HIS HEAVY SHOULDERS (AND->IN) BIG DETERMINED HEAD THRUST FORWARD LIKE A CATAPULT IN LEASH +4446-2275-0013-1208: I'LL DO ANYTHING YOU WISH ME TO BARTLEY SHE SAID TREMULOUSLY +4446-2275-0014-1209: I CAN'T STAND SEEING YOU MISERABLE +4446-2275-0015-1210: HE PULLED UP A WINDOW AS IF THE AIR WERE HEAVY +4446-2275-0016-1211: HILDA WATCHED HIM FROM (HER->THE) CORNER TREMBLING AND SCARCELY BREATHING DARK SHADOWS GROWING ABOUT HER EYES (IT->*) +4446-2275-0017-1212: BUT IT'S WORSE NOW IT'S UNBEARABLE +4446-2275-0018-1213: I GET NOTHING BUT MISERY OUT OF EITHER +4446-2275-0019-1214: THE WORLD IS ALL THERE JUST AS IT USED TO BE BUT I CAN'T GET AT IT ANY MORE +4446-2275-0020-1215: IT WAS MYSELF I WAS DEFYING (HILDA->HELDA) +4446-2275-0021-1216: (HILDA'S->HELDA'S) FACE QUIVERED BUT SHE WHISPERED YES I THINK IT MUST HAVE BEEN +4446-2275-0022-1217: BUT WHY DIDN'T YOU TELL ME WHEN YOU WERE HERE IN THE SUMMER +4446-2275-0023-1218: ALEXANDER GROANED I MEANT TO BUT SOMEHOW I COULDN'T +4446-2275-0024-1219: SHE PRESSED HIS HAND GENTLY IN GRATITUDE +4446-2275-0025-1220: WEREN'T YOU HAPPY THEN AT ALL +4446-2275-0026-1221: SHE CLOSED HER EYES AND TOOK A DEEP BREATH AS IF TO DRAW IN AGAIN THE FRAGRANCE OF THOSE DAYS +4446-2275-0027-1222: HE MOVED UNEASILY AND HIS CHAIR CREAKED +4446-2275-0028-1223: YES YES SHE HURRIED PULLING HER HAND GENTLY AWAY FROM HIM +4446-2275-0029-1224: PLEASE TELL ME ONE THING BARTLEY AT LEAST TELL ME THAT YOU BELIEVE I THOUGHT I WAS MAKING YOU HAPPY +4446-2275-0030-1225: YES (HILDA->HELDA) I KNOW THAT HE SAID SIMPLY +4446-2275-0031-1226: I UNDERSTAND BARTLEY I WAS WRONG +4446-2275-0032-1227: BUT I DIDN'T KNOW YOU'VE ONLY TO TELL ME NOW +4446-2275-0033-1228: WHAT I MEAN IS THAT I WANT YOU TO PROMISE NEVER TO SEE ME AGAIN NO MATTER HOW OFTEN I COME NO MATTER HOW HARD I BEG +4446-2275-0034-1229: KEEP AWAY IF YOU WISH WHEN HAVE I EVER FOLLOWED YOU +4446-2275-0035-1230: ALEXANDER ROSE AND SHOOK HIMSELF ANGRILY YES I KNOW I'M COWARDLY +4446-2275-0036-1231: HE TOOK (HER->A) ROUGHLY IN HIS ARMS DO YOU KNOW WHAT I MEAN +4446-2275-0037-1232: OH BARTLEY WHAT AM I TO DO +4446-2275-0038-1233: I WILL ASK THE LEAST IMAGINABLE BUT I MUST HAVE SOMETHING +4446-2275-0039-1234: I MUST KNOW ABOUT YOU +4446-2275-0040-1235: THE SIGHT OF YOU BARTLEY TO SEE YOU LIVING AND HAPPY AND SUCCESSFUL CAN I NEVER MAKE YOU UNDERSTAND WHAT THAT MEANS TO ME +4446-2275-0041-1236: YOU SEE LOVING SOME ONE AS I LOVE YOU MAKES THE WHOLE WORLD DIFFERENT +4446-2275-0042-1237: AND THEN YOU CAME BACK NOT CARING VERY MUCH BUT IT MADE NO DIFFERENCE +4446-2275-0043-1238: BARTLEY BENT OVER AND TOOK HER IN HIS ARMS KISSING HER MOUTH AND HER WET TIRED EYES +4446-2275-0044-1239: (DON'T->A TALL) CRY DON'T CRY HE WHISPERED +4446-2275-0045-1240: (WE'VE->WITH) TORTURED EACH OTHER ENOUGH FOR (TONIGHT->TO NIGHT) +4507-16021-0000-1469: CHAPTER ONE ORIGIN +4507-16021-0001-1470: IT ENGENDERS A WHOLE WORLD (LA PEGRE->LAPE) FOR WHICH (READ->RED) THEFT AND A HELL LA (PEGRENNE->PAGRIN) FOR WHICH (READ->RED) HUNGER +4507-16021-0002-1471: THUS IDLENESS IS THE MOTHER +4507-16021-0003-1472: SHE HAS A SON THEFT AND A DAUGHTER HUNGER +4507-16021-0004-1473: WHAT IS SLANG +4507-16021-0005-1474: WE HAVE NEVER UNDERSTOOD THIS SORT OF OBJECTIONS +4507-16021-0006-1475: SLANG IS ODIOUS +4507-16021-0007-1476: SLANG MAKES ONE SHUDDER +4507-16021-0008-1477: WHO DENIES THAT OF COURSE IT DOES +4507-16021-0009-1478: WHEN IT IS A QUESTION OF PROBING A WOUND A GULF A SOCIETY SINCE (WHEN->ONE) HAS IT BEEN CONSIDERED WRONG TO GO TOO FAR TO GO TO THE BOTTOM +4507-16021-0010-1479: WE HAVE ALWAYS THOUGHT THAT IT WAS SOMETIMES A COURAGEOUS ACT AND AT LEAST A SIMPLE AND USEFUL DEED WORTHY OF THE SYMPATHETIC ATTENTION WHICH DUTY ACCEPTED AND FULFILLED MERITS +4507-16021-0011-1480: WHY SHOULD ONE NOT EXPLORE EVERYTHING AND STUDY EVERYTHING +4507-16021-0012-1481: WHY SHOULD ONE HALT ON THE WAY +4507-16021-0013-1482: NOTHING IS MORE LUGUBRIOUS THAN THE CONTEMPLATION THUS IN ITS NUDITY IN THE BROAD LIGHT OF THOUGHT OF THE HORRIBLE SWARMING OF SLANG +4507-16021-0014-1483: (NOW->NO) WHEN HAS HORROR EVER EXCLUDED STUDY +4507-16021-0015-1484: SINCE WHEN HAS MALADY BANISHED MEDICINE +4507-16021-0016-1485: CAN ONE IMAGINE A NATURALIST REFUSING TO STUDY THE VIPER THE BAT THE SCORPION THE CENTIPEDE THE (TARANTULA->TURANSULA) AND ONE WHO WOULD CAST THEM BACK INTO THEIR DARKNESS SAYING (OH->O) HOW UGLY THAT IS +4507-16021-0017-1486: HE WOULD BE LIKE A PHILOLOGIST REFUSING TO EXAMINE A FACT IN LANGUAGE A PHILOSOPHER HESITATING TO SCRUTINIZE A FACT IN HUMANITY +4507-16021-0018-1487: WHAT IS SLANG PROPERLY SPEAKING +4507-16021-0019-1488: IT IS THE LANGUAGE OF WRETCHEDNESS +4507-16021-0020-1489: WE MAY BE STOPPED THE FACT MAY BE PUT TO US IN GENERAL TERMS WHICH IS ONE WAY OF ATTENUATING IT WE MAY BE TOLD THAT ALL TRADES PROFESSIONS IT MAY BE ADDED ALL THE ACCIDENTS OF THE SOCIAL HIERARCHY AND ALL FORMS OF INTELLIGENCE HAVE THEIR OWN SLANG +4507-16021-0021-1490: THE PAINTER WHO SAYS MY GRINDER THE NOTARY WHO SAYS MY SKIP THE GUTTER THE (HAIRDRESSER->HAIR DRESSER) WHO SAYS MY (MEALYBACK->MEALEY BACK) THE COBBLER WHO SAYS MY CUB TALKS (SLANG->SLING) +4507-16021-0022-1491: THERE IS THE (SLANG->SLAYING) OF THE AFFECTED LADY AS WELL AS OF THE (PRECIEUSES->PURSUS) +4507-16021-0023-1492: THE SUGAR MANUFACTURER WHO SAYS LOAF CLARIFIED LUMPS BASTARD COMMON BURNT THIS HONEST MANUFACTURER TALKS SLANG +4507-16021-0024-1493: ALGEBRA MEDICINE (BOTANY->BARTANY) HAVE EACH THEIR SLANG +4507-16021-0025-1494: TO MEET THE NEEDS OF THIS CONFLICT WRETCHEDNESS HAS INVENTED A LANGUAGE OF COMBAT WHICH IS SLANG +4507-16021-0026-1495: TO KEEP AFLOAT AND TO RESCUE FROM OBLIVION TO HOLD ABOVE THE GULF (WERE->WHERE) IT BUT A FRAGMENT OF SOME LANGUAGE WHICH MAN HAS SPOKEN AND WHICH WOULD OTHERWISE BE LOST THAT IS TO SAY ONE OF THE ELEMENTS GOOD OR BAD OF WHICH CIVILIZATION IS COMPOSED OR BY WHICH IT IS COMPLICATED TO EXTEND THE RECORDS OF SOCIAL OBSERVATION IS TO SERVE CIVILIZATION ITSELF +4507-16021-0027-1496: PHOENICIAN VERY GOOD +4507-16021-0028-1497: EVEN DIALECT LET THAT PASS +4507-16021-0029-1498: TO THIS WE REPLY IN ONE WORD ONLY +4507-16021-0030-1499: ASSUREDLY IF THE TONGUE WHICH A NATION OR A PROVINCE HAS SPOKEN IS WORTHY OF INTEREST THE LANGUAGE WHICH HAS BEEN SPOKEN BY A MISERY IS STILL MORE WORTHY OF ATTENTION AND STUDY +4507-16021-0031-1500: AND THEN WE INSIST UPON IT THE STUDY OF SOCIAL DEFORMITIES AND INFIRMITIES AND THE TASK OF POINTING THEM OUT WITH (A->THE) VIEW TO REMEDY IS NOT A BUSINESS IN WHICH (CHOICE IS->CHOICES) PERMITTED +4507-16021-0032-1501: HE MUST DESCEND WITH HIS HEART FULL OF CHARITY AND SEVERITY AT THE SAME TIME AS A BROTHER AND AS A JUDGE TO THOSE IMPENETRABLE CASEMATES (WHERE->WERE) CRAWL PELL MELL THOSE WHO BLEED AND THOSE WHO DEAL THE BLOW THOSE WHO WEEP (AND->IN) THOSE WHO CURSE THOSE WHO FAST (AND->IN) THOSE WHO DEVOUR THOSE WHO ENDURE EVIL AND THOSE WHO INFLICT IT +4507-16021-0033-1502: DO WE REALLY KNOW THE MOUNTAIN WELL WHEN WE ARE NOT ACQUAINTED WITH THE CAVERN +4507-16021-0034-1503: THEY CONSTITUTE TWO DIFFERENT ORDERS OF FACTS WHICH CORRESPOND TO EACH OTHER WHICH ARE ALWAYS INTERLACED AND WHICH OFTEN BRING FORTH RESULTS +4507-16021-0035-1504: TRUE HISTORY BEING A MIXTURE OF ALL THINGS THE TRUE HISTORIAN MINGLES IN EVERYTHING +4507-16021-0036-1505: FACTS FORM ONE OF THESE AND IDEAS THE OTHER +4507-16021-0037-1506: THERE IT CLOTHES ITSELF IN WORD MASKS IN METAPHOR RAGS +4507-16021-0038-1507: IN THIS (GUISE->SKIES) IT BECOMES HORRIBLE +4507-16021-0039-1508: ONE PERCEIVES WITHOUT UNDERSTANDING IT A HIDEOUS MURMUR SOUNDING ALMOST LIKE HUMAN ACCENTS BUT MORE NEARLY RESEMBLING A HOWL THAN AN ARTICULATE WORD +4507-16021-0040-1509: ONE THINKS ONE HEARS HYDRAS TALKING +4507-16021-0041-1510: IT IS UNINTELLIGIBLE IN THE DARK +4507-16021-0042-1511: IT IS BLACK (IN->AND) MISFORTUNE IT IS BLACKER STILL (IN->AND) CRIME THESE TWO BLACKNESSES AMALGAMATED (COMPOSE SLANG->COMPOSED SLING) +4507-16021-0043-1512: THE EARTH IS NOT DEVOID OF RESEMBLANCE TO A JAIL +4507-16021-0044-1513: LOOK CLOSELY AT LIFE +4507-16021-0045-1514: IT IS SO MADE THAT EVERYWHERE WE FEEL THE SENSE OF PUNISHMENT +4507-16021-0046-1515: EACH DAY HAS ITS OWN GREAT GRIEF (OR->FOR) ITS LITTLE CARE +4507-16021-0047-1516: YESTERDAY YOU WERE TREMBLING FOR A HEALTH THAT IS DEAR TO YOU TO DAY YOU FEAR FOR YOUR OWN TO MORROW IT WILL BE ANXIETY ABOUT MONEY THE DAY AFTER TO MORROW THE (DIATRIBE->DIETRIBE) OF A SLANDERER THE DAY AFTER THAT THE MISFORTUNE OF SOME FRIEND THEN THE PREVAILING WEATHER THEN SOMETHING THAT HAS BEEN BROKEN OR LOST THEN A PLEASURE WITH WHICH YOUR CONSCIENCE AND YOUR VERTEBRAL COLUMN REPROACH YOU AGAIN THE COURSE OF PUBLIC AFFAIRS +4507-16021-0048-1517: THIS WITHOUT RECKONING IN THE PAINS OF THE HEART AND SO (IT->TO) GOES ON +4507-16021-0049-1518: THERE IS HARDLY ONE DAY OUT OF A HUNDRED WHICH IS WHOLLY JOYOUS AND SUNNY +4507-16021-0050-1519: AND YOU BELONG TO THAT SMALL CLASS WHO ARE HAPPY +4507-16021-0051-1520: IN THIS (WORLD->WORLD'S) EVIDENTLY THE VESTIBULE OF ANOTHER THERE ARE NO FORTUNATE +4507-16021-0052-1521: THE REAL HUMAN DIVISION IS THIS THE LUMINOUS AND THE SHADY +4507-16021-0053-1522: TO DIMINISH THE NUMBER OF THE SHADY TO AUGMENT THE NUMBER OF THE LUMINOUS THAT IS THE OBJECT +4507-16021-0054-1523: THAT IS WHY WE CRY EDUCATION SCIENCE +4507-16021-0055-1524: TO TEACH READING MEANS TO LIGHT THE FIRE EVERY SYLLABLE (SPELLED->SPELL'D) OUT SPARKLES +4507-16021-0056-1525: HOWEVER HE WHO SAYS LIGHT DOES NOT NECESSARILY SAY JOY +4507-16021-0057-1526: PEOPLE SUFFER IN THE LIGHT EXCESS BURNS +4507-16021-0058-1527: THE FLAME IS THE ENEMY OF THE WING +4507-16021-0059-1528: TO BURN WITHOUT CEASING TO FLY THEREIN LIES THE MARVEL OF GENIUS +4970-29093-0000-2093: YOU'LL NEVER DIG IT OUT OF THE (ASTOR->ASTER) LIBRARY +4970-29093-0001-2094: TO THE YOUNG AMERICAN HERE OR ELSEWHERE THE PATHS TO FORTUNE ARE INNUMERABLE AND ALL OPEN THERE IS INVITATION IN THE AIR AND SUCCESS IN ALL HIS WIDE HORIZON +4970-29093-0002-2095: HE HAS NO TRADITIONS TO BIND HIM OR GUIDE HIM AND HIS IMPULSE IS TO BREAK AWAY FROM THE OCCUPATION HIS FATHER HAS FOLLOWED AND MAKE A NEW WAY FOR HIMSELF +4970-29093-0003-2096: THE MODEST FELLOW WOULD HAVE LIKED FAME THRUST UPON HIM FOR SOME WORTHY ACHIEVEMENT IT MIGHT BE FOR A BOOK OR FOR THE (SKILLFUL->SKILFUL) MANAGEMENT OF SOME GREAT NEWSPAPER OR FOR SOME DARING EXPEDITION LIKE THAT OF LIEUTENANT (STRAIN->STRAYNE) OR DOCTOR KANE +4970-29093-0004-2097: HE WAS UNABLE TO DECIDE EXACTLY WHAT IT SHOULD BE +4970-29093-0005-2098: SOMETIMES HE THOUGHT HE WOULD LIKE TO STAND IN A CONSPICUOUS PULPIT AND HUMBLY PREACH THE GOSPEL OF REPENTANCE AND IT EVEN CROSSED HIS MIND THAT IT WOULD BE NOBLE TO GIVE HIMSELF TO A MISSIONARY LIFE TO SOME BENIGHTED REGION WHERE THE DATE PALM (GROWS->GROVES) AND THE NIGHTINGALE'S VOICE IS IN TUNE AND THE (BUL BUL->BULL BOWL) SINGS ON THE (OFF->OPT) NIGHTS +4970-29093-0006-2099: LAW SEEMED TO HIM WELL ENOUGH AS A SCIENCE BUT HE NEVER COULD DISCOVER A PRACTICAL CASE WHERE IT APPEARED TO HIM WORTH WHILE TO GO TO LAW AND ALL THE CLIENTS WHO STOPPED WITH THIS NEW CLERK IN THE ANTE ROOM OF THE LAW OFFICE WHERE HE WAS WRITING PHILIP INVARIABLY ADVISED TO SETTLE NO MATTER HOW BUT SETTLE GREATLY TO THE DISGUST OF HIS EMPLOYER WHO KNEW THAT JUSTICE BETWEEN MAN AND MAN COULD ONLY BE ATTAINED BY THE RECOGNIZED PROCESSES WITH THE ATTENDANT FEES +4970-29093-0007-2100: IT IS SUCH A NOBLE AMBITION THAT IT IS A PITY IT HAS USUALLY SUCH A SHALLOW FOUNDATION +4970-29093-0008-2101: HE WANTED TO BEGIN AT THE TOP OF THE LADDER +4970-29093-0009-2102: PHILIP THEREFORE READ DILIGENTLY IN THE (ASTOR->ASTER) LIBRARY PLANNED LITERARY WORKS THAT SHOULD COMPEL ATTENTION AND NURSED HIS GENIUS +4970-29093-0010-2103: HE HAD NO FRIEND WISE ENOUGH TO TELL HIM TO STEP INTO THE DORKING CONVENTION (THEN->THAN) IN SESSION MAKE A SKETCH OF THE MEN AND WOMEN ON THE PLATFORM AND TAKE IT TO THE EDITOR OF THE DAILY (GRAPEVINE->GRAPE VINE) AND SEE WHAT HE COULD GET A LINE FOR IT +4970-29093-0011-2104: (O->OH) VERY WELL SAID (GRINGO->GREENOW) TURNING AWAY WITH A SHADE OF CONTEMPT YOU'LL FIND IF YOU ARE GOING INTO LITERATURE AND NEWSPAPER WORK THAT YOU CAN'T AFFORD A CONSCIENCE LIKE THAT +4970-29093-0012-2105: BUT PHILIP DID AFFORD IT AND HE WROTE THANKING HIS FRIENDS AND DECLINING BECAUSE HE SAID THE POLITICAL SCHEME WOULD FAIL AND OUGHT TO FAIL +4970-29093-0013-2106: AND HE WENT BACK TO HIS BOOKS AND TO HIS WAITING FOR AN OPENING LARGE ENOUGH FOR HIS DIGNIFIED ENTRANCE INTO THE LITERARY WORLD +4970-29093-0014-2107: WELL I'M GOING AS AN ENGINEER YOU (CAN->COULD) GO AS ONE +4970-29093-0015-2108: YOU CAN BEGIN BY CARRYING A ROD AND PUTTING DOWN THE FIGURES +4970-29093-0016-2109: NO (ITS NOT->IT'S OUGHT) TOO SOON +4970-29093-0017-2110: I'VE BEEN READY TO GO ANYWHERE FOR SIX MONTHS +4970-29093-0018-2111: THE TWO YOUNG MEN WHO WERE BY THIS TIME FULL OF THE ADVENTURE WENT DOWN TO THE WALL STREET OFFICE OF HENRY'S UNCLE AND HAD A TALK WITH THAT WILY OPERATOR +4970-29093-0019-2112: THE NIGHT WAS SPENT IN PACKING UP AND WRITING LETTERS FOR PHILIP WOULD NOT TAKE SUCH AN IMPORTANT STEP WITHOUT INFORMING HIS FRIENDS +4970-29093-0020-2113: WHY IT'S (IN->A) MISSOURI SOMEWHERE ON THE FRONTIER I THINK WE'LL GET A MAP +4970-29093-0021-2114: I WAS AFRAID IT WAS NEARER HOME +4970-29093-0022-2115: HE KNEW HIS UNCLE WOULD BE GLAD TO HEAR THAT HE HAD AT LAST TURNED HIS THOUGHTS TO A PRACTICAL MATTER +4970-29093-0023-2116: HE WELL KNEW THE PERILS OF THE FRONTIER THE SAVAGE STATE OF SOCIETY THE LURKING INDIANS AND THE DANGERS OF FEVER +4970-29095-0000-2054: SHE WAS TIRED OF OTHER THINGS +4970-29095-0001-2055: SHE TRIED THIS MORNING AN AIR OR TWO UPON THE PIANO (SANG->SAYING) A SIMPLE SONG (IN->AND) A SWEET BUT SLIGHTLY METALLIC VOICE AND THEN SEATING HERSELF BY THE OPEN WINDOW READ PHILIP'S LETTER +4970-29095-0002-2056: WELL MOTHER SAID THE YOUNG STUDENT LOOKING UP WITH A SHADE OF IMPATIENCE +4970-29095-0003-2057: I HOPE THEE TOLD THE ELDERS THAT FATHER AND I ARE RESPONSIBLE FOR THE PIANO AND THAT MUCH AS THEE LOVES MUSIC THEE IS NEVER IN THE ROOM WHEN IT IS PLAYED +4970-29095-0004-2058: I HEARD FATHER TELL COUSIN ABNER THAT HE WAS WHIPPED SO OFTEN FOR WHISTLING WHEN HE WAS A BOY THAT HE WAS DETERMINED TO HAVE WHAT COMPENSATION HE COULD GET NOW +4970-29095-0005-2059: THY WAYS GREATLY TRY ME RUTH AND ALL THY RELATIONS +4970-29095-0006-2060: IS THY FATHER WILLING THEE SHOULD GO AWAY TO A SCHOOL OF THE WORLD'S PEOPLE +4970-29095-0007-2061: I HAVE NOT ASKED HIM RUTH REPLIED WITH A LOOK THAT MIGHT IMPLY THAT SHE WAS ONE OF THOSE DETERMINED LITTLE BODIES WHO FIRST MADE UP HER OWN MIND AND THEN COMPELLED OTHERS TO MAKE UP THEIRS IN ACCORDANCE WITH HERS +4970-29095-0008-2062: MOTHER (I'M->I AM) GOING TO STUDY MEDICINE +4970-29095-0009-2063: MARGARET BOLTON ALMOST LOST FOR A MOMENT HER HABITUAL PLACIDITY +4970-29095-0010-2064: (THEE->THE) STUDY MEDICINE +4970-29095-0011-2065: DOES THEE THINK THEE COULD STAND (IT->AT) SIX MONTHS +4970-29095-0012-2066: AND BESIDES SUPPOSE THEE DOES LEARN MEDICINE +4970-29095-0013-2067: I WILL (PRACTICE->PRACTISE) IT +4970-29095-0014-2068: (WHERE->WHERE'S) THEE AND THY FAMILY ARE KNOWN +4970-29095-0015-2069: IF I CAN GET (PATIENTS->PATIENCE) +4970-29095-0016-2070: RUTH SAT QUITE STILL FOR A TIME WITH FACE INTENT AND FLUSHED IT WAS OUT NOW +4970-29095-0017-2071: THE (SIGHT SEERS->SIGHTSEERS) RETURNED IN HIGH SPIRITS FROM THE CITY +4970-29095-0018-2072: RUTH ASKED THE ENTHUSIASTS IF THEY WOULD LIKE TO LIVE IN SUCH A SOUNDING (MAUSOLEUM->MUZOLEUM) WITH ITS GREAT HALLS AND ECHOING ROOMS AND NO COMFORTABLE PLACE IN IT FOR THE ACCOMMODATION OF ANY BODY +4970-29095-0019-2073: AND THEN THERE WAS BROAD STREET +4970-29095-0020-2074: THERE (*->IS) CERTAINLY WAS NO END TO IT AND EVEN RUTH WAS PHILADELPHIAN ENOUGH TO BELIEVE THAT A STREET OUGHT NOT TO HAVE ANY END OR ARCHITECTURAL (POINT->BLINT) UPON WHICH THE WEARY EYE COULD REST +4970-29095-0021-2075: BUT NEITHER SAINT (GIRARD->GERARD) NOR BROAD STREET NEITHER WONDERS OF THE (MINT->MENT) NOR THE GLORIES OF THE HALL WHERE THE GHOSTS OF OUR FATHERS SIT ALWAYS SIGNING THE DECLARATION (IMPRESSED->IMPRESS) THE VISITORS SO MUCH AS THE SPLENDORS OF THE CHESTNUT STREET WINDOWS AND THE BARGAINS ON EIGHTH STREET +4970-29095-0022-2076: IS THEE GOING TO THE YEARLY MEETING RUTH ASKED ONE OF THE GIRLS +4970-29095-0023-2077: I HAVE NOTHING TO WEAR REPLIED THAT DEMURE PERSON +4970-29095-0024-2078: IT HAS OCCUPIED MOTHER A LONG TIME TO FIND (AT->THAT) THE SHOPS THE EXACT SHADE FOR HER NEW BONNET +4970-29095-0025-2079: AND THEE WON'T GO WHY SHOULD I +4970-29095-0026-2080: IF I GO TO MEETING AT ALL I LIKE BEST TO SIT IN THE QUIET OLD HOUSE IN GERMANTOWN WHERE THE WINDOWS ARE ALL OPEN AND I CAN SEE THE TREES AND (HEAR->HERE) THE STIR OF THE LEAVES +4970-29095-0027-2081: IT'S SUCH A CRUSH AT THE YEARLY MEETING AT ARCH STREET AND THEN THERE'S THE ROW OF SLEEK LOOKING YOUNG MEN WHO (LINE->LIE IN) THE CURBSTONE AND STARE AT US AS WE COME OUT +4970-29095-0028-2082: HE DOESN'T SAY BUT IT'S ON THE FRONTIER AND ON THE MAP EVERYTHING BEYOND IT IS MARKED INDIANS AND DESERT AND LOOKS AS DESOLATE AS A (WEDNESDAY->WINDSAY) MEETING (HUMPH->*) IT WAS TIME FOR HIM TO DO SOMETHING +4970-29095-0029-2083: IS HE GOING TO START A DAILY NEWSPAPER AMONG THE (KICK A POOS->KICKAPOOS) +4970-29095-0030-2084: FATHER (THEE'S UNJUST TO PHILIP->THESE UNJUSTIFILL UP) HE'S GOING INTO BUSINESS +4970-29095-0031-2085: HE DOESN'T SAY EXACTLY WHAT IT IS SAID RUTH A LITTLE DUBIOUSLY BUT IT'S SOMETHING ABOUT LAND AND RAILROADS AND (THEE KNOWS->THE NOSE) FATHER THAT FORTUNES ARE MADE NOBODY KNOWS EXACTLY HOW IN A NEW COUNTRY +4970-29095-0032-2086: BUT PHILIP IS HONEST AND HE HAS TALENT ENOUGH IF HE WILL STOP SCRIBBLING TO MAKE HIS WAY +4970-29095-0033-2087: WHAT A (BOX WOMEN->BOXWOMEN) ARE PUT INTO MEASURED FOR IT AND PUT IN YOUNG IF WE GO ANYWHERE IT'S IN A BOX VEILED AND PINIONED AND SHUT IN BY DISABILITIES +4970-29095-0034-2088: WHY SHOULD I RUST AND BE STUPID AND SIT IN (INACTION->AN ACTION) BECAUSE I AM A GIRL +4970-29095-0035-2089: AND IF I HAD A FORTUNE WOULD THEE WANT ME TO LEAD A USELESS LIFE +4970-29095-0036-2090: HAS (THEE->THE) CONSULTED THY MOTHER ABOUT A CAREER I SUPPOSE IT IS A CAREER (*->OF) THEE WANTS +4970-29095-0037-2091: BUT THAT WISE AND PLACID WOMAN UNDERSTOOD THE SWEET REBEL A GREAT DEAL BETTER THAN RUTH UNDERSTOOD HERSELF +4970-29095-0038-2092: RUTH WAS GLAD TO HEAR THAT PHILIP HAD MADE A PUSH INTO THE WORLD AND SHE WAS SURE THAT HIS TALENT AND COURAGE WOULD MAKE (A WAY->AWAY) FOR HIM +4992-23283-0000-2140: BUT THE MORE FORGETFULNESS HAD THEN PREVAILED THE MORE POWERFUL WAS THE FORCE OF REMEMBRANCE WHEN SHE AWOKE +4992-23283-0001-2141: MISS MILNER'S HEALTH IS NOT GOOD +4992-23283-0002-2142: SAID MISSUS (HORTON->WHARTON) A FEW MINUTES AFTER +4992-23283-0003-2143: SO THERE IS TO ME ADDED SANDFORD WITH A SARCASTIC SNEER +4992-23283-0004-2144: AND YET YOU MUST OWN HER (BEHAVIOUR->BEHAVIOR) HAS WARRANTED THEM HAS IT NOT BEEN IN THIS PARTICULAR INCOHERENT AND UNACCOUNTABLE +4992-23283-0005-2145: NOT THAT I KNOW OF NOT ONE MORE THAT I KNOW OF HE REPLIED WITH ASTONISHMENT AT WHAT SHE HAD INSINUATED AND YET WITH A PERFECT ASSURANCE THAT SHE WAS IN THE WRONG +4992-23283-0006-2146: PERHAPS I AM MISTAKEN ANSWERED SHE +4992-23283-0007-2147: TO ASK ANY MORE QUESTIONS OF YOU I BELIEVE WOULD BE UNFAIR +4992-23283-0008-2148: HE SEEMED TO WAIT FOR HER REPLY BUT AS SHE MADE NONE HE PROCEEDED +4992-23283-0009-2149: OH MY LORD CRIED MISS WOODLEY WITH A MOST FORCIBLE ACCENT YOU ARE THE LAST PERSON ON EARTH SHE WOULD PARDON ME FOR (ENTRUSTING->INTRUSTING) +4992-23283-0010-2150: BUT IN SUCH A CASE MISS MILNER'S ELECTION OF A HUSBAND SHALL NOT DIRECT MINE +4992-23283-0011-2151: IF SHE DOES NOT KNOW HOW TO ESTIMATE HER OWN VALUE I DO +4992-23283-0012-2152: INDEPENDENT OF HER FORTUNE SHE HAS BEAUTY TO CAPTIVATE THE HEART OF ANY MAN AND WITH ALL HER FOLLIES SHE HAS A FRANKNESS IN HER MANNER AN UNAFFECTED WISDOM IN HER THOUGHTS (*->OF) A VIVACITY IN HER CONVERSATION AND WITHAL A SOFTNESS IN HER DEMEANOUR THAT MIGHT ALONE ENGAGE THE AFFECTIONS OF A MAN OF THE NICEST SENTIMENTS AND THE STRONGEST UNDERSTANDING +4992-23283-0013-2153: MY LORD MISS MILNER'S TASTE IS NOT A DEPRAVED ONE IT IS BUT TOO REFINED +4992-23283-0014-2154: WHAT CAN YOU MEAN BY THAT MISS WOODLEY YOU TALK MYSTERIOUSLY +4992-23283-0015-2155: IS SHE NOT AFRAID THAT I WILL THWART HER INCLINATIONS +4992-23283-0016-2156: AGAIN HE SEARCHED HIS OWN THOUGHTS NOR INEFFECTUALLY AS BEFORE +4992-23283-0017-2157: MISS WOODLEY WAS TOO LITTLE VERSED IN THE SUBJECT TO KNOW THIS WOULD HAVE BEEN NOT TO LOVE AT ALL AT LEAST NOT TO THE EXTENT OF BREAKING THROUGH ENGAGEMENTS AND ALL THE VARIOUS OBSTACLES THAT STILL (MILITATED->MITIGATED) AGAINST THEIR UNION +4992-23283-0018-2158: TO RELIEVE HER FROM BOTH HE LAID HIS HAND WITH FORCE UPON HIS HEART AND SAID DO YOU BELIEVE ME +4992-23283-0019-2159: I WILL MAKE NO UNJUST USE OF WHAT I KNOW HE REPLIED WITH FIRMNESS I BELIEVE YOU MY LORD +4992-23283-0020-2160: I HAVE NEVER YET HOWEVER BEEN VANQUISHED BY THEM AND EVEN UPON THIS OCCASION MY REASON SHALL COMBAT THEM TO THE LAST AND MY REASON SHALL FAIL ME BEFORE I DO WRONG +4992-41797-0000-2117: YES DEAD THESE FOUR YEARS (AN->AND) A GOOD JOB FOR HER TOO +4992-41797-0001-2118: WELL AS I SAY IT'S AN AWFUL QUEER WORLD THEY CLAP ALL THE BURGLARS (INTO JAIL->*) AND (*->DOWN) THE MURDERERS AND THE (WIFE->WHITE) BEATERS (I'VE->I) ALLERS THOUGHT A GENTLE REPROOF WOULD BE ENOUGH PUNISHMENT FOR A WIFE BEATER CAUSE HE PROBABLY HAS A LOT (O->OF) PROVOCATION THAT NOBODY KNOWS AND THE (FIREBUGS->FIRE BUGS) CAN'T THINK (O->OF) THE RIGHT NAME (SOMETHING->SOMETHIN) LIKE (CENDENARIES AN->SENDIARIES AND) THE BREAKERS (O->OF) THE PEACE (AN->AND) WHAT NOT (AN->AND) YET THE LAW HAS (NOTHIN->NOTHING) TO SAY TO A MAN LIKE (HEN LORD->HANDLED) +4992-41797-0002-2119: GRANDFATHER WAS ALEXANDER CAREY L (L->*) D DOCTOR OF LAWS THAT IS +4992-41797-0003-2120: MISTER POPHAM LAID DOWN HIS BRUSH +4992-41797-0004-2121: I (SWAN TO MAN->SWAY INTO MEN) HE EJACULATED IF YOU DON'T WORK HARD YOU CAN'T KEEP UP WITH THE TIMES DOCTOR OF LAWS +4992-41797-0005-2122: DONE HE AIN'T DONE A THING (HE'D OUGHTER SENCE->HE ORDERED SINCE) HE WAS BORN +4992-41797-0006-2123: HE KEEPS THE THOU SHALT NOT COMMANDMENTS FIRST RATE (HEN LORD->HENLOORD) DOES +4992-41797-0007-2124: HE (GIVE->GAVE) UP HIS POSITION AND SHUT THE FAMILY UP IN THAT TOMB OF A HOUSE (SO T->SODIN) HE (COULD->COULDN'T) STUDY HIS BOOKS +4992-41797-0008-2125: MISTER POPHAM EXAGGERATED NOTHING BUT ON THE CONTRARY LEFT MUCH UNSAID IN HIS NARRATIVE OF THE FAMILY AT THE HOUSE OF LORDS +4992-41797-0009-2126: HENRY LORD WITH THE DEGREE OF PH (D->*) TO HIS CREDIT HAD BEEN PROFESSOR OF ZOOLOGY AT A NEW ENGLAND COLLEGE BUT HAD RESIGNED HIS POST IN ORDER TO WRITE A SERIES OF SCIENTIFIC TEXT BOOKS +4992-41797-0010-2127: ALWAYS IRRITABLE COLD INDIFFERENT HE HAD GROWN RAPIDLY MORE SO AS YEARS WENT ON +4992-41797-0011-2128: WHATEVER APPEALED TO HER SENSE OF BEAUTY WAS STRAIGHTWAY TRANSFERRED TO PAPER OR (CANVAS->GAMBUS) +4992-41797-0012-2129: SHE IS WILD TO KNOW HOW TO DO THINGS +4992-41797-0013-2130: SHE MAKES EFFORT AFTER EFFORT TREMBLING WITH EAGERNESS (AND->THAN) WHEN SHE FAILS TO REPRODUCE WHAT SHE SEES SHE WORKS HERSELF INTO A FRENZY OF GRIEF AND DISAPPOINTMENT +4992-41797-0014-2131: WHEN SHE COULD NOT MAKE A RABBIT OR A BIRD LOOK REAL ON PAPER SHE SEARCHED IN HER FATHER'S BOOKS FOR PICTURES OF ITS BONES +4992-41797-0015-2132: CYRIL THERE MUST BE SOME BETTER WAY OF DOING I JUST DRAW THE OUTLINE OF AN ANIMAL AND THEN I PUT HAIRS OR FEATHERS ON IT THEY HAVE NO BODIES +4992-41797-0016-2133: THEY COULDN'T RUN (NOR->OR) MOVE THEY'RE JUST PASTEBOARD +4992-41797-0017-2134: HE WOULDN'T SEARCH SO DON'T WORRY REPLIED CYRIL QUIETLY AND THE TWO LOOKED AT EACH OTHER AND KNEW THAT IT WAS SO +4992-41797-0018-2135: THERE IN THE CEDAR (HOLLOW->HOLLOWED) THEN LIVED OLIVE LORD AN ANGRY RESENTFUL LITTLE CREATURE WEIGHED DOWN BY A FIERCE SENSE OF INJURY +4992-41797-0019-2136: (OLIVE'S->ALL OF THIS) MOURNFUL BLACK EYES MET NANCY'S SPARKLING BROWN ONES +4992-41797-0020-2137: NANCY'S CURLY CHESTNUT CROP SHONE IN THE SUN AND OLIVE'S THICK BLACK (PLAITS->PLATES) LOOKED BLACKER BY CONTRAST +4992-41797-0021-2138: (SHE'S->SHE IS) WONDERFUL MORE WONDERFUL (THAN->IN) ANYBODY WE'VE EVER SEEN ANYWHERE AND SHE DRAWS BETTER THAN THE TEACHER IN CHARLESTOWN +4992-41797-0022-2139: SHE'S OLDER THAN I AM BUT SO TINY AND SAD AND SHY THAT SHE SEEMS LIKE A CHILD +4992-41806-0000-2161: NATTY HARMON TRIED THE KITCHEN PUMP SECRETLY SEVERAL TIMES DURING THE EVENING FOR THE WATER HAD TO RUN UP HILL ALL THE WAY FROM THE WELL TO THE KITCHEN SINK AND HE BELIEVED THIS TO BE (A->*) CONTINUAL MIRACLE THAT MIGHT GIVE OUT AT ANY MOMENT +4992-41806-0001-2162: TO NIGHT THERE WAS NO NEED OF EXTRA HEAT AND THERE WERE GREAT CEREMONIES TO BE OBSERVED IN LIGHTING THE FIRES ON THE HEARTHSTONES +4992-41806-0002-2163: THEY BEGAN WITH THE ONE IN THE FAMILY SITTING ROOM COLONEL WHEELER RALPH THURSTON MISTER AND MISSUS BILL HARMON WITH NATTY AND (RUFUS->RUFFUS) MISTER AND MISSUS (POPHAM->POPPUM) WITH DIGBY AND (LALLIE->LALLY) JOY ALL STANDING IN ADMIRING GROUPS AND THRILLING WITH DELIGHT AT THE ORDER OF EVENTS +4992-41806-0003-2164: (KATHLEEN->CATHERINE) WAVED THE TORCH TO AND FRO AS SHE RECITED SOME BEAUTIFUL LINES WRITTEN FOR SOME SUCH PURPOSE AS THAT WHICH CALLED THEM TOGETHER TO NIGHT +4992-41806-0004-2165: (BURN->BURNE) FIRE BURN FLICKER FLICKER FLAME +4992-41806-0005-2166: NEXT CAME (OLIVE'S->OLIVES) TURN TO HELP IN THE CEREMONIES +4992-41806-0006-2167: RALPH THURSTON HAD FOUND A LINE OF LATIN FOR THEM IN HIS BELOVED (HORACE TIBI SPLENDET->HORRANCE TIBEE SPLENDID) FOCUS FOR YOU THE HEARTH FIRE SHINES +4992-41806-0007-2168: OLIVE HAD PAINTED THE MOTTO ON A LONG NARROW PANEL OF CANVAS AND GIVING IT TO MISTER POPHAM STOOD BY THE FIRESIDE WHILE HE DEFTLY FITTED IT INTO THE PLACE PREPARED FOR IT +4992-41806-0008-2169: OLIVE HAS ANOTHER LOVELY GIFT FOR THE YELLOW HOUSE SAID MOTHER CAREY RISING AND TO CARRY OUT THE NEXT PART OF THE PROGRAMME WE SHALL HAVE TO GO IN PROCESSION UPSTAIRS TO MY BEDROOM +4992-41806-0009-2170: EXCLAIMED BILL HARMON TO HIS WIFE AS THEY WENT THROUGH THE LIGHTED HALL +4992-41806-0010-2171: AIN'T THEY THE GREATEST +4992-41806-0011-2172: MOTHER CAREY POURED COFFEE NANCY CHOCOLATE AND THE OTHERS (HELPED SERVE->HELP SERVED) THE SANDWICHES AND CAKE DOUGHNUTS AND TARTS +4992-41806-0012-2173: AT THAT MOMENT THE GENTLEMAN ENTERED BEARING A HUGE OBJECT CONCEALED BY A PIECE OF GREEN (FELT->FILT) +4992-41806-0013-2174: APPROACHING THE DINING TABLE HE CAREFULLY PLACED THE ARTICLE IN THE CENTRE AND REMOVED THE CLOTH +4992-41806-0014-2175: (THINKS I->THINK SAD) TO MYSELF I NEVER SEEN ANYTHING (OSH POPHAM COULDN'T MEND->I WAS POPLED GOODN'T MEN) IF HE TOOK TIME ENOUGH AND GLUE ENOUGH SO I CARRIED THIS LITTLE FELLER HOME IN A BUSHEL BASKET ONE NIGHT LAST MONTH (AN->AND) I'VE SPENT ELEVEN (EVENIN'S PUTTIN->EVENINGS PUTTING) HIM TOGETHER +4992-41806-0015-2176: MISSUS HARMON THOUGHT HE SANG TOO MUCH AND TOLD HER HUSBAND PRIVATELY THAT IF HE WAS A CANARY BIRD SHE SHOULD WANT TO KEEP A TABLE COVER OVER HIS HEAD MOST OF THE TIME BUT HE WAS IMMENSELY POPULAR WITH THE REST OF HIS AUDIENCE +4992-41806-0016-2177: THE FACE OF THE MAHOGANY SHONE WITH DELIGHT AND WHY NOT WHEN IT WAS DOING EVERYTHING ALMOST EVERYTHING WITHIN THE SCOPE OF A PIANO AND YET THE FAMILY HAD ENJOYED WEEKS OF GOOD NOURISHING MEALS ON WHAT HAD BEEN SAVED BY ITS EXERTIONS +4992-41806-0017-2178: WE SHUT OUR EYES THE FLOWERS BLOOM ON WE MURMUR BUT THE (CORN EARS->CORNEERS) FILL WE CHOOSE THE SHADOW BUT THE SUN THAT (CASTS->CAST) IT SHINES BEHIND US STILL +5105-28233-0000-1649: LENGTH OF SERVICE FOURTEEN YEARS THREE MONTHS AND FIVE DAYS +5105-28233-0001-1650: HE SEEMED BORN TO PLEASE WITHOUT BEING CONSCIOUS OF THE POWER HE POSSESSED +5105-28233-0002-1651: IT MUST BE OWNED AND NO ONE WAS MORE READY TO CONFESS IT THAN HIMSELF THAT HIS LITERARY ATTAINMENTS WERE BY NO MEANS OF A HIGH ORDER +5105-28233-0003-1652: WE DON'T (SPIN->SPEND) TOPS IS A FAVORITE SAYING AMONGST ARTILLERY OFFICERS INDICATING THAT THEY DO NOT SHIRK THEIR DUTY BY FRIVOLOUS PURSUITS BUT IT MUST BE CONFESSED THAT SERVADAC BEING NATURALLY IDLE WAS VERY MUCH GIVEN TO SPINNING TOPS +5105-28233-0004-1653: ONCE IN ACTION HE WAS LEADING A DETACHMENT OF INFANTRY THROUGH AN (INTRENCHMENT->ENTRENCHMENT) +5105-28233-0005-1654: SOMETIMES HE WOULD WANDER ON FOOT UPON THE SANDY SHORE AND SOMETIMES HE WOULD ENJOY A RIDE ALONG THE SUMMIT OF THE CLIFF ALTOGETHER BEING IN NO HURRY AT ALL TO BRING HIS TASK TO AN END +5105-28233-0006-1655: NO CATHEDRAL NOT EVEN BURGOS ITSELF COULD VIE WITH THE CHURCH AT (MONTMARTRE->MONT MARTRA) +5105-28233-0007-1656: BEN ZOOF'S MOST AMBITIOUS DESIRE WAS TO INDUCE THE CAPTAIN TO GO WITH HIM AND END HIS DAYS IN HIS MUCH LOVED HOME AND SO INCESSANTLY WERE SERVADAC'S EARS BESIEGED WITH DESCRIPTIONS OF THE UNPARALLELED BEAUTIES AND ADVANTAGES OF THIS EIGHTEENTH (ARRONDISSEMENT->ARE ON DE SAINT) OF PARIS THAT HE COULD SCARCELY HEAR THE NAME OF (MONTMARTRE->MONTMARTRA) WITHOUT A CONSCIOUS THRILL OF AVERSION +5105-28233-0008-1657: WHEN A PRIVATE (IN->AND) THE EIGHTH CAVALRY HE HAD BEEN ON THE POINT OF QUITTING THE ARMY AT TWENTY EIGHT YEARS OF AGE BUT UNEXPECTEDLY HE HAD BEEN APPOINTED ORDERLY TO CAPTAIN SERVADAC +5105-28233-0009-1658: THE BOND OF UNION THUS EFFECTED COULD NEVER BE SEVERED AND ALTHOUGH BEN (ZOOF'S->ZOV'S) ACHIEVEMENTS HAD FAIRLY EARNED HIM THE RIGHT OF RETIREMENT HE FIRMLY DECLINED ALL HONORS OR ANY PENSION THAT MIGHT PART HIM FROM HIS SUPERIOR OFFICER +5105-28233-0010-1659: (UNLIKE->I MAKE) HIS MASTER HE MADE NO PRETENSION TO ANY GIFT OF POETIC POWER BUT HIS INEXHAUSTIBLE MEMORY MADE HIM A LIVING ENCYCLOPAEDIA AND FOR HIS STOCK OF ANECDOTES AND TROOPER'S TALES HE WAS MATCHLESS +5105-28240-0000-1624: FAST AS HIS LEGS COULD CARRY HIM SERVADAC HAD MADE HIS WAY TO THE TOP OF THE CLIFF +5105-28240-0001-1625: IT WAS QUITE TRUE THAT A VESSEL WAS IN SIGHT HARDLY MORE THAN SIX MILES FROM THE SHORE BUT OWING TO THE INCREASE IN THE EARTH'S CONVEXITY AND THE CONSEQUENT LIMITATION OF THE RANGE OF VISION THE RIGGING OF THE TOPMASTS ALONE WAS VISIBLE ABOVE THE WATER +5105-28240-0002-1626: EXCLAIMED SERVADAC KEEPING HIS EYE UNMOVED AT HIS TELESCOPE +5105-28240-0003-1627: SHE IS UNDER (SAIL->SALE) BUT SHE IS COUNT TIMASCHEFF'S YACHT HE WAS RIGHT +5105-28240-0004-1628: IF THE COUNT WERE ON BOARD A STRANGE FATALITY WAS BRINGING HIM TO THE PRESENCE OF HIS RIVAL +5105-28240-0005-1629: HE RECKONED THEREFORE NOT ONLY UPON ASCERTAINING THE EXTENT OF THE LATE CATASTROPHE BUT UPON LEARNING ITS CAUSE +5105-28240-0006-1630: THE WIND BEING ADVERSE THE (DOBRYNA->DOBRINA) DID NOT MAKE VERY RAPID PROGRESS BUT AS THE WEATHER IN SPITE OF A FEW CLOUDS REMAINED CALM AND THE SEA WAS QUITE SMOOTH SHE WAS ENABLED TO HOLD A STEADY COURSE +5105-28240-0007-1631: SERVADAC TOOK IT FOR GRANTED THAT THE (DOBRYNA->DOBRINA) WAS ENDEAVORING TO PUT IN +5105-28240-0008-1632: A NARROW CHANNEL FORMED A PASSAGE THROUGH THE RIDGE OF ROCKS THAT PROTECTED IT FROM THE OPEN SEA AND WHICH EVEN IN THE ROUGHEST WEATHER WOULD (ENSURE->INSURE) THE CALMNESS OF ITS WATERS +5105-28240-0009-1633: SLIGHTLY CHANGING HER COURSE SHE FIRST STRUCK HER MAINSAIL AND IN ORDER TO FACILITATE THE MOVEMENTS OF HER HELMSMAN SOON CARRIED NOTHING BUT HER TWO TOPSAILS BRIGANTINE AND JIB +5105-28240-0010-1634: CAPTAIN SERVADAC HASTENED (TOWARDS->TOWARD) HIM +5105-28240-0011-1635: I LEFT YOU ON A CONTINENT AND HERE I HAVE THE HONOR OF FINDING YOU ON AN ISLAND +5105-28240-0012-1636: NEVER MIND NOW INTERPOSED THE CAPTAIN WE WILL TALK OF THAT BY AND BY +5105-28240-0013-1637: NOTHING MORE THAN YOU KNOW YOURSELF +5105-28240-0014-1638: ARE YOU CERTAIN THAT THIS IS THE MEDITERRANEAN +5105-28240-0015-1639: FOR SOME MOMENTS HE SEEMED PERFECTLY STUPEFIED (*->AND) THEN RECOVERING HIMSELF HE BEGAN TO OVERWHELM THE COUNT WITH A TORRENT OF QUESTIONS +5105-28240-0016-1640: TO ALL THESE INQUIRIES THE COUNT RESPONDED IN THE AFFIRMATIVE +5105-28240-0017-1641: SOME MYSTERIOUS FORCE SEEMED TO HAVE BROUGHT ABOUT A CONVULSION OF THE ELEMENTS +5105-28240-0018-1642: YOU WILL TAKE ME ON BOARD COUNT WILL YOU NOT +5105-28240-0019-1643: MY YACHT IS AT YOUR SERVICE SIR EVEN SHOULD YOU REQUIRE TO MAKE A TOUR (ROUND->AROUND) THE WORLD +5105-28240-0020-1644: THE COUNT SHOOK HIS HEAD +5105-28240-0021-1645: BEFORE STARTING IT WAS INDISPENSABLE THAT THE ENGINE OF THE (DOBRYNA->DOBRINA) SHOULD BE REPAIRED TO SAIL UNDER CANVAS ONLY WOULD IN CONTRARY WINDS AND ROUGH SEAS BE BOTH TEDIOUS AND DIFFICULT +5105-28240-0022-1646: IT WAS ON THE LAST DAY OF JANUARY THAT THE REPAIRS OF THE SCHOONER WERE COMPLETED +5105-28240-0023-1647: A SLIGHT DIMINUTION IN THE EXCESSIVELY HIGH TEMPERATURE WHICH HAD PREVAILED FOR THE LAST FEW WEEKS WAS THE ONLY APPARENT CHANGE IN THE GENERAL ORDER OF THINGS BUT WHETHER THIS WAS TO BE ATTRIBUTED TO ANY ALTERATION IN THE EARTH'S ORBIT WAS A QUESTION WHICH WOULD STILL REQUIRE SEVERAL DAYS TO DECIDE +5105-28240-0024-1648: DOUBTS NOW AROSE AND SOME DISCUSSION FOLLOWED WHETHER OR NOT IT WAS DESIRABLE FOR BEN ZOOF TO ACCOMPANY HIS MASTER +5105-28241-0000-1604: HER SEA GOING QUALITIES WERE EXCELLENT AND WOULD HAVE AMPLY SUFFICED FOR A CIRCUMNAVIGATION OF THE GLOBE +5105-28241-0001-1605: AFTER AN APPRENTICESHIP ON A MERCHANT SHIP HE HAD ENTERED THE IMPERIAL NAVY AND HAD ALREADY REACHED THE RANK OF LIEUTENANT WHEN THE COUNT APPOINTED HIM TO THE CHARGE OF HIS OWN PRIVATE YACHT IN WHICH HE WAS ACCUSTOMED TO SPEND BY FAR THE GREATER PART OF HIS TIME THROUGHOUT THE WINTER GENERALLY CRUISING IN THE MEDITERRANEAN WHILST IN THE SUMMER HE VISITED MORE NORTHERN WATERS +5105-28241-0002-1606: THE LATE ASTOUNDING EVENTS HOWEVER HAD RENDERED PROCOPE MANIFESTLY UNEASY AND NOT THE LESS SO FROM HIS CONSCIOUSNESS THAT THE COUNT SECRETLY PARTOOK OF HIS OWN ANXIETY +5105-28241-0003-1607: STEAM UP AND CANVAS SPREAD THE SCHOONER STARTED EASTWARDS +5105-28241-0004-1608: ALTHOUGH ONLY A MODERATE BREEZE WAS BLOWING THE SEA WAS ROUGH A CIRCUMSTANCE TO BE ACCOUNTED FOR ONLY BY THE DIMINUTION IN THE FORCE OF THE EARTH'S ATTRACTION RENDERING THE LIQUID (PARTICLES->PARTICLE) SO BUOYANT THAT BY THE MERE EFFECT OF OSCILLATION THEY WERE CARRIED TO A HEIGHT THAT WAS QUITE UNPRECEDENTED +5105-28241-0005-1609: FOR A FEW MILES SHE FOLLOWED THE LINE HITHERTO PRESUMABLY OCCUPIED BY THE COAST OF ALGERIA BUT NO LAND APPEARED TO THE SOUTH +5105-28241-0006-1610: THE LOG AND THE COMPASS THEREFORE WERE ABLE TO BE CALLED UPON TO DO THE WORK OF THE SEXTANT WHICH HAD BECOME UTTERLY USELESS +5105-28241-0007-1611: (THERE IS->THERE'S) NO FEAR OF THAT SIR +5105-28241-0008-1612: (*->AT) THE EARTH HAS UNDOUBTEDLY ENTERED UPON A NEW ORBIT BUT SHE IS NOT INCURRING ANY PROBABLE RISK OF BEING PRECIPITATED (ONTO->ON TO) THE SUN +5105-28241-0009-1613: AND WHAT DEMONSTRATION DO YOU OFFER ASKED SERVADAC EAGERLY THAT IT WILL NOT HAPPEN +5105-28241-0010-1614: OCEAN (REIGNED->RAINED) SUPREME +5105-28241-0011-1615: ALL THE IMAGES OF HIS PAST LIFE FLOATED UPON HIS MEMORY HIS THOUGHTS SPED AWAY TO HIS NATIVE FRANCE ONLY TO RETURN AGAIN TO WONDER WHETHER THE DEPTHS OF OCEAN WOULD REVEAL ANY TRACES OF THE ALGERIAN METROPOLIS +5105-28241-0012-1616: IS IT NOT IMPOSSIBLE HE MURMURED ALOUD THAT ANY CITY SHOULD DISAPPEAR SO COMPLETELY +5105-28241-0013-1617: WOULD NOT THE LOFTIEST EMINENCES OF THE CITY AT LEAST BE VISIBLE +5105-28241-0014-1618: ANOTHER CIRCUMSTANCE WAS MOST REMARKABLE +5105-28241-0015-1619: TO THE SURPRISE OF ALL AND ESPECIALLY OF LIEUTENANT PROCOPE THE LINE INDICATED A BOTTOM AT A NEARLY UNIFORM DEPTH OF FROM FOUR TO FIVE FATHOMS AND ALTHOUGH THE SOUNDING WAS PERSEVERED WITH CONTINUOUSLY FOR MORE THAN TWO HOURS OVER A CONSIDERABLE AREA THE DIFFERENCES OF LEVEL WERE INSIGNIFICANT NOT CORRESPONDING IN ANY DEGREE TO WHAT WOULD BE EXPECTED OVER THE SITE OF A CITY THAT HAD BEEN TERRACED LIKE THE SEATS OF AN (AMPHITHEATER->AMPHITHEATRE) +5105-28241-0016-1620: YOU MUST SEE LIEUTENANT I SHOULD THINK THAT WE ARE NOT SO NEAR THE COAST OF ALGERIA AS YOU IMAGINED +5105-28241-0017-1621: AFTER PONDERING (AWHILE->A WHILE) HE SAID IF WE WERE FARTHER AWAY I SHOULD EXPECT TO FIND A DEPTH OF TWO OR THREE HUNDRED FATHOMS INSTEAD OF FIVE FATHOMS FIVE FATHOMS +5105-28241-0018-1622: ITS DEPTH REMAINED INVARIABLE STILL FOUR OR AT MOST FIVE FATHOMS AND ALTHOUGH ITS BOTTOM WAS ASSIDUOUSLY DREDGED IT WAS ONLY TO PROVE IT BARREN OF MARINE PRODUCTION OF ANY TYPE +5105-28241-0019-1623: NOTHING WAS TO BE DONE BUT TO PUT ABOUT AND RETURN (IN->AND) DISAPPOINTMENT (TOWARDS->TOWARD) THE NORTH +5142-33396-0000-898: AT ANOTHER TIME (HARALD->HAROLD) ASKED +5142-33396-0001-899: WHAT IS YOUR COUNTRY OLAF HAVE YOU ALWAYS BEEN A THRALL THE THRALL'S EYES FLASHED +5142-33396-0002-900: TWO HUNDRED WARRIORS FEASTED IN HIS HALL AND FOLLOWED HIM TO BATTLE +5142-33396-0003-901: THE REST OF YOU OFF A VIKING HE HAD THREE SHIPS +5142-33396-0004-902: THESE HE GAVE TO THREE OF MY BROTHERS +5142-33396-0005-903: BUT I STAYED THAT SPRING AND BUILT ME A BOAT +5142-33396-0006-904: I MADE HER (FOR->*) ONLY TWENTY (OARS->WARS) BECAUSE I THOUGHT FEW MEN WOULD FOLLOW ME FOR I WAS YOUNG FIFTEEN YEARS OLD +5142-33396-0007-905: AT THE PROW I CARVED THE HEAD WITH OPEN MOUTH AND FORKED TONGUE THRUST OUT +5142-33396-0008-906: I PAINTED THE EYES RED FOR ANGER +5142-33396-0009-907: THERE STAND SO I SAID AND GLARE AND HISS AT MY FOES +5142-33396-0010-908: IN (THE->A) STERN I (CURVED->CARVED) THE TAIL UP ALMOST AS HIGH AS THE HEAD +5142-33396-0011-909: THERE SHE SAT ON THE ROLLERS AS FAIR A SHIP AS I EVER SAW +5142-33396-0012-910: THEN I WILL GET ME A FARM AND WILL (WINTER->WIN HER) IN THAT LAND NOW WHO WILL FOLLOW ME +5142-33396-0013-911: HE IS BUT A BOY THE (MEN->MAN) SAID +5142-33396-0014-912: THIRTY MEN ONE AFTER ANOTHER RAISED THEIR HORNS AND SAID +5142-33396-0015-913: AS OUR BOAT FLASHED DOWN THE ROLLERS INTO THE WATER I MADE THIS SONG AND SANG IT +5142-33396-0016-914: SO WE (HARRIED->HURRIED) THE COAST OF NORWAY +5142-33396-0017-915: WE ATE (AT->IT) MANY MEN'S TABLES UNINVITED +5142-33396-0018-916: (MY->I) DRAGON'S BELLY IS NEVER FULL AND ON BOARD WENT THE GOLD +5142-33396-0019-917: OH IT IS BETTER TO LIVE ON THE SEA AND LET OTHER MEN RAISE YOUR CROPS AND COOK YOUR MEALS +5142-33396-0020-918: A HOUSE SMELLS OF SMOKE A (SHIP SMELLS->SHIP'S MILLS) OF FROLIC +5142-33396-0021-919: UP AND DOWN THE WATER WE WENT TO GET MUCH WEALTH AND MUCH FROLIC +5142-33396-0022-920: WHAT OF THE FARM (OLAF->ALL OFF) NOT YET I ANSWERED VIKING IS BETTER FOR SUMMER +5142-33396-0023-921: IT WAS SO DARK THAT I COULD SEE NOTHING BUT A FEW SPARKS ON THE HEARTH +5142-33396-0024-922: I STOOD WITH MY BACK TO THE WALL FOR I WANTED NO SWORD REACHING OUT OF THE DARK FOR ME +5142-33396-0025-923: COME COME I CALLED WHEN NO ONE OBEYED A FIRE +5142-33396-0026-924: MY MEN LAUGHED YES A STINGY HOST +5142-33396-0027-925: HE ACTS AS THOUGH HE (HAD->IS) NOT EXPECTED US +5142-33396-0028-926: ON A BENCH IN A FAR CORNER WERE A DOZEN PEOPLE HUDDLED TOGETHER +5142-33396-0029-927: BRING IN THE TABLE WE ARE HUNGRY +5142-33396-0030-928: THE THRALLS WERE (BRINGING->RINGING) IN A GREAT POT OF MEAT +5142-33396-0031-929: THEY SET UP A CRANE OVER THE FIRE AND HUNG THE POT UPON IT AND WE SAT AND WATCHED IT BOIL WHILE WE JOKED AT LAST THE SUPPER BEGAN +5142-33396-0032-930: THE FARMER SAT GLOOMILY ON THE BENCH AND WOULD NOT EAT AND YOU CANNOT WONDER FOR HE SAW US PUTTING POTFULS OF HIS GOOD BEEF AND (BASKET->BASKEY) LOADS OF BREAD (INTO->AND) OUR BIG MOUTHS +5142-33396-0033-931: YOU WOULD NOT EAT WITH US YOU CANNOT SAY NO TO HALF OF MY ALE I DRINK THIS TO YOUR HEALTH +5142-33396-0034-932: THEN I DRANK HALF OF THE HORNFUL AND (SENT->SET) THE REST ACROSS THE FIRE TO THE FARMER HE TOOK IT AND SMILED SAYING +5142-33396-0035-933: DID YOU EVER HAVE SUCH A LORDLY GUEST BEFORE I WENT ON +5142-33396-0036-934: SO I WILL GIVE OUT THIS LAW THAT MY MEN SHALL NEVER LEAVE YOU ALONE +5142-33396-0037-935: (HAKON->HAWKIN) THERE SHALL BE YOUR CONSTANT COMPANION FRIEND FARMER +5142-33396-0038-936: HE SHALL NOT LEAVE YOU DAY OR NIGHT WHETHER YOU ARE WORKING OR PLAYING OR SLEEPING +5142-33396-0039-937: I (NAMED->NAME) NINE OTHERS AND SAID +5142-33396-0040-938: AND THESE SHALL FOLLOW YOUR THRALLS IN THE SAME WAY +5142-33396-0041-939: SO I SET GUARDS OVER (EVERY ONE->EVERYONE) IN THAT HOUSE +5142-33396-0042-940: SO NO TALES GOT OUT TO THE NEIGHBORS BESIDES IT WAS A LONELY PLACE AND BY GOOD LUCK NO ONE CAME THAT WAY +5142-33396-0043-941: THEIR EYES DANCED BIG (THORLEIF->TORE LEAF) STOOD UP AND STRETCHED HIMSELF +5142-33396-0044-942: (I AM->I'M) STIFF WITH LONG (SITTING->CITY) HE SAID I ITCH FOR A FIGHT I TURNED TO THE FARMER +5142-33396-0045-943: THIS IS OUR LAST FEAST WITH YOU I SAID +5142-33396-0046-944: BY THE BEARD OF ODIN I CRIED YOU HAVE TAKEN OUR JOKE LIKE A MAN +5142-33396-0047-945: MY MEN POUNDED THE TABLE WITH THEIR FISTS +5142-33396-0048-946: BY THE HAMMER (OF THOR->OTHOR) SHOUTED GRIM (HERE->THERE) IS NO STINGY COWARD +5142-33396-0049-947: HERE FRIEND TAKE IT AND HE THRUST (IT->*) INTO THE FARMER'S HAND +5142-33396-0050-948: MAY YOU DRINK (HEART'S->HEARTS) EASE FROM IT FOR MANY YEARS +5142-33396-0051-949: AND WITH IT I LEAVE YOU A NAME (SIF->SIFT) THE FRIENDLY I SHALL HOPE TO DRINK WITH YOU (SOMETIME->SOME TIME) IN VALHALLA +5142-33396-0052-950: HERE IS A RING FOR (SIF->SIFT) THE FRIENDLY AND HERE IS A BRACELET (*->AND) A SWORD WOULD NOT BE ASHAMED TO HANG AT YOUR SIDE +5142-33396-0053-951: I TOOK FIVE GREAT BRACELETS OF GOLD FROM OUR TREASURE CHEST AND GAVE THEM TO HIM +5142-33396-0054-952: THAT IS THE BEST WAY TO DECIDE FOR THE SPEAR WILL ALWAYS POINT SOMEWHERE AND ONE THING IS AS GOOD AS ANOTHER +5142-33396-0055-953: THAT TIME IT POINTED US INTO YOUR FATHER'S SHIPS +5142-33396-0056-954: HERE THEY SAID (IS->AS) A RASCAL WHO HAS BEEN HARRYING OUR COASTS +5142-33396-0057-955: WE SUNK HIS SHIP AND MEN BUT HIM WE BROUGHT TO YOU +5142-33396-0058-956: A ROBBER VIKING SAID THE KING AND (*->HE) SCOWLED AT ME +5142-33396-0059-957: YES AND WITH ALL YOUR FINGERS IT TOOK YOU A YEAR TO CATCH ME THE KING FROWNED MORE ANGRILY +5142-33396-0060-958: TAKE HIM OUT (THORKEL->TORCAL) AND LET HIM TASTE YOUR SWORD +5142-33396-0061-959: YOUR MOTHER THE QUEEN WAS STANDING BY +5142-33396-0062-960: NOW SHE PUT HER HAND ON HIS ARM AND SMILED AND SAID +5142-33396-0063-961: AND WOULD HE NOT BE A GOOD GIFT FOR OUR BABY +5142-33396-0064-962: YOUR FATHER THOUGHT A MOMENT (THEN->AND) LOOKED AT YOUR MOTHER AND SMILED +5142-33396-0065-963: SOFT HEART HE SAID GENTLY TO HER THEN TO (THORKEL->TORQUAL) WELL LET HIM GO (THORKEL->TORKO) +5142-33396-0066-964: THEN HE TURNED TO ME AGAIN FROWNING +5142-33396-0067-965: BUT YOUNG SHARP TONGUE NOW THAT (WE HAVE->WE'VE) CAUGHT YOU (WE->*) WILL PUT YOU INTO A TRAP THAT YOU CANNOT GET OUT OF +5142-33396-0068-966: SO I LIVED AND NOW (AM->I'M) YOUR TOOTH THRALL WELL IT IS THE LUCK OF WAR +5142-36377-0000-870: IT WAS ONE OF THE MASTERLY AND CHARMING STORIES OF DUMAS THE ELDER +5142-36377-0001-871: IN FIVE MINUTES I WAS IN A NEW WORLD AND MY MELANCHOLY ROOM WAS FULL OF THE LIVELIEST FRENCH COMPANY +5142-36377-0002-872: THE SOUND OF AN IMPERATIVE AND UNCOMPROMISING BELL RECALLED ME IN DUE TIME TO THE REGIONS OF REALITY +5142-36377-0003-873: AMBROSE MET ME AT THE BOTTOM OF THE STAIRS AND SHOWED ME THE WAY TO THE SUPPER ROOM +5142-36377-0004-874: SHE SIGNED TO ME WITH A GHOSTLY SOLEMNITY TO TAKE THE VACANT PLACE ON THE LEFT OF HER FATHER +5142-36377-0005-875: THE DOOR OPENED AGAIN WHILE I WAS STILL STUDYING THE TWO BROTHERS WITHOUT I HONESTLY CONFESS BEING VERY FAVORABLY IMPRESSED BY EITHER OF THEM +5142-36377-0006-876: A NEW MEMBER OF THE FAMILY CIRCLE WHO INSTANTLY ATTRACTED MY ATTENTION ENTERED THE ROOM +5142-36377-0007-877: A LITTLE CRACKED THAT IN THE POPULAR PHRASE WAS MY IMPRESSION OF THE STRANGER WHO NOW MADE HIS APPEARANCE IN THE SUPPER ROOM +5142-36377-0008-878: MISTER (MEADOWCROFT->MEDICRAFT) THE ELDER HAVING NOT SPOKEN ONE WORD THUS FAR HIMSELF INTRODUCED THE NEWCOMER TO ME WITH A SIDE GLANCE AT HIS SONS WHICH HAD SOMETHING LIKE DEFIANCE IN IT A GLANCE WHICH AS I WAS SORRY TO NOTICE WAS RETURNED WITH THE DEFIANCE ON THEIR SIDE BY THE TWO YOUNG MEN +5142-36377-0009-879: PHILIP (LEFRANK->LE FRANK) THIS IS MY OVERLOOKER MISTER (JAGO->YAGO) SAID THE OLD MAN (FORMALLY->FORMERLY) PRESENTING US +5142-36377-0010-880: HE IS NOT WELL HE HAS COME OVER THE OCEAN FOR REST AND CHANGE (OF SCENE->IS SEEN) +5142-36377-0011-881: (MISTER JAGO->THE TRIAGO) IS AN AMERICAN PHILIP +5142-36377-0012-882: MAKE ACQUAINTANCE WITH (MISTER JAGO SIT->MISCHIAGO SIP) TOGETHER +5142-36377-0013-883: THEY POINTEDLY DREW BACK FROM JOHN JAGO AS HE APPROACHED THE EMPTY CHAIR NEXT TO ME AND MOVED ROUND TO THE OPPOSITE SIDE OF THE TABLE +5142-36377-0014-884: A PRETTY GIRL AND SO FAR AS I COULD JUDGE (BY->MY) APPEARANCES A GOOD GIRL TOO DESCRIBING HER GENERALLY I MAY SAY THAT SHE HAD A SMALL HEAD WELL CARRIED AND WELL SET ON HER SHOULDERS BRIGHT (GRAY->GREY) EYES THAT LOOKED AT YOU HONESTLY AND MEANT WHAT THEY LOOKED A TRIM SLIGHT LITTLE FIGURE TOO SLIGHT FOR OUR ENGLISH NOTIONS OF BEAUTY A STRONG AMERICAN ACCENT AND A RARE THING IN AMERICA A PLEASANTLY TONED VOICE WHICH MADE THE ACCENT AGREEABLE TO ENGLISH (EARS->YEARS) +5142-36377-0015-885: OUR FIRST IMPRESSIONS OF PEOPLE ARE IN NINE CASES (OUT OF->AT A) TEN THE RIGHT IMPRESSIONS +5142-36377-0016-886: FOR ONCE IN A WAY I PROVED A TRUE PROPHET +5142-36377-0017-887: THE ONLY CHEERFUL CONVERSATION WAS THE CONVERSATION ACROSS THE TABLE BETWEEN NAOMI AND ME +5142-36377-0018-888: HE LOOKED UP (AT NAOMI->AND NOW AND ME) DOUBTINGLY FROM HIS PLATE AND LOOKED DOWN AGAIN SLOWLY WITH A FROWN +5142-36377-0019-889: WHEN I ADDRESSED HIM HE ANSWERED CONSTRAINEDLY +5142-36377-0020-890: A MORE DREARY AND MORE DISUNITED FAMILY PARTY I NEVER SAT AT THE TABLE WITH +5142-36377-0021-891: ENVY HATRED MALICE AND UNCHARITABLENESS ARE NEVER SO ESSENTIALLY DETESTABLE TO MY MIND AS WHEN THEY ARE ANIMATED BY (A->THE) SENSE OF PROPRIETY AND WORK UNDER THE SURFACE BUT FOR MY INTEREST IN (NAOMI->THEY OWE ME) AND MY OTHER INTEREST IN THE LITTLE LOVE LOOKS WHICH I NOW AND THEN SURPRISED PASSING BETWEEN HER AND AMBROSE I SHOULD NEVER HAVE SAT THROUGH THAT SUPPER +5142-36377-0022-892: I WISH YOU GOOD NIGHT SHE LAID HER BONY HANDS ON THE BACK OF MISTER (MEADOWCROFT'S->METICOFF'S) INVALID CHAIR (CUT->CAUGHT) HIM SHORT IN HIS FAREWELL SALUTATION TO ME AND WHEELED HIM OUT TO HIS BED AS IF SHE WERE WHEELING HIM OUT TO HIS GRAVE +5142-36377-0023-893: YOU WERE QUITE RIGHT TO SAY NO AMBROSE BEGAN NEVER SMOKE WITH JOHN (JAGO->IAGO) HIS CIGARS WILL POISON YOU +5142-36377-0024-894: (NAOMI->THEY ONLY) SHOOK HER FOREFINGER REPROACHFULLY AT THEM AS IF THE TWO STURDY YOUNG FARMERS HAD BEEN TWO CHILDREN +5142-36377-0025-895: SILAS SLUNK AWAY WITHOUT A WORD OF PROTEST AMBROSE STOOD HIS GROUND EVIDENTLY BENT ON MAKING HIS PEACE (WITH->WHEN) NAOMI BEFORE HE LEFT HER SEEING THAT I WAS IN THE WAY I WALKED ASIDE TOWARD A GLASS DOOR AT THE LOWER END OF THE ROOM +5142-36586-0000-967: IT IS MANIFEST THAT MAN IS NOW SUBJECT TO MUCH VARIABILITY +5142-36586-0001-968: SO IT IS WITH THE LOWER ANIMALS +5142-36586-0002-969: THE (VARIABILITY->VERY ABILITY) OF MULTIPLE PARTS +5142-36586-0003-970: BUT THIS SUBJECT WILL BE MORE PROPERLY DISCUSSED WHEN WE TREAT OF THE DIFFERENT RACES OF MANKIND +5142-36586-0004-971: EFFECTS OF THE INCREASED USE AND DISUSE OF PARTS +5142-36600-0000-896: CHAPTER SEVEN ON THE RACES OF MAN +5142-36600-0001-897: (IN->AND) DETERMINING WHETHER TWO OR MORE ALLIED FORMS OUGHT TO BE RANKED (AS->A) SPECIES OR VARIETIES NATURALISTS ARE PRACTICALLY GUIDED BY THE FOLLOWING CONSIDERATIONS NAMELY THE AMOUNT OF DIFFERENCE BETWEEN THEM AND WHETHER SUCH DIFFERENCES RELATE TO FEW OR MANY POINTS OF STRUCTURE AND WHETHER THEY ARE OF PHYSIOLOGICAL IMPORTANCE BUT MORE ESPECIALLY WHETHER THEY ARE CONSTANT +5639-40744-0000-137: ELEVEN O'CLOCK HAD STRUCK IT WAS A FINE CLEAR NIGHT (THEY->THERE) WERE THE ONLY PERSONS ON THE ROAD AND THEY SAUNTERED LEISURELY ALONG TO AVOID PAYING THE PRICE OF FATIGUE FOR THE RECREATION PROVIDED FOR THE TOLEDANS IN (THEIR->THE) VALLEY OR ON THE BANKS OF THEIR RIVER +5639-40744-0001-138: SECURE AS HE THOUGHT IN THE CAREFUL ADMINISTRATION OF JUSTICE IN THAT CITY AND THE CHARACTER OF ITS WELL DISPOSED INHABITANTS THE GOOD (HIDALGO->HADALGO) WAS FAR FROM THINKING THAT ANY DISASTER COULD (BEFAL->BEFALL) HIS FAMILY +5639-40744-0002-139: (RODOLFO->RUDOLPHO) AND HIS COMPANIONS (WITH->WERE) THEIR FACES MUFFLED IN THEIR CLOAKS STARED RUDELY AND INSOLENTLY AT THE MOTHER THE DAUGHTER AND THE SERVANT MAID +5639-40744-0003-140: IN A MOMENT HE COMMUNICATED HIS THOUGHTS TO HIS COMPANIONS AND IN THE NEXT MOMENT THEY RESOLVED TO TURN BACK AND CARRY HER OFF TO PLEASE (RODOLFO->RUDOLPHO) FOR THE RICH WHO ARE OPEN HANDED ALWAYS FIND PARASITES READY TO ENCOURAGE THEIR BAD PROPENSITIES AND THUS TO CONCEIVE THIS WICKED DESIGN TO COMMUNICATE IT APPROVE IT RESOLVE ON RAVISHING (LEOCADIA->LOCATIA) AND TO CARRY THAT DESIGN INTO EFFECT WAS THE WORK OF A MOMENT +5639-40744-0004-141: THEY DREW THEIR SWORDS HID THEIR FACES IN THE FLAPS OF THEIR CLOAKS TURNED BACK AND SOON CAME IN FRONT OF THE LITTLE PARTY WHO HAD NOT YET DONE GIVING THANKS TO GOD FOR THEIR ESCAPE FROM THOSE AUDACIOUS MEN +5639-40744-0005-142: FINALLY THE ONE PARTY WENT OFF EXULTING AND THE OTHER WAS LEFT IN DESOLATION AND WOE +5639-40744-0006-143: (RODOLFO->RUDOLPHO) ARRIVED AT HIS OWN HOUSE WITHOUT ANY IMPEDIMENT (AND LEOCADIA'S->ALYOCADIA'S) PARENTS REACHED THEIRS HEART BROKEN AND DESPAIRING +5639-40744-0007-144: MEANWHILE (RODOLFO->RUDOLPHO) HAD (LEOCADIA->LOCALIA) SAFE IN HIS CUSTODY AND IN HIS OWN APARTMENT +5639-40744-0008-145: WHO TOUCHES ME AM I IN BED +5639-40744-0009-146: MOTHER DEAR FATHER DO YOU HEAR ME +5639-40744-0010-147: IT IS THE ONLY AMENDS I ASK OF YOU FOR THE WRONG YOU HAVE DONE ME +5639-40744-0011-148: SHE FOUND THE DOOR BUT IT WAS LOCKED OUTSIDE +5639-40744-0012-149: SHE SUCCEEDED IN OPENING THE WINDOW AND THE MOONLIGHT SHONE IN SO BRIGHTLY THAT SHE COULD DISTINGUISH THE COLOUR OF SOME DAMASK (HANGINGS->HANGING) IN THE ROOM +5639-40744-0013-150: SHE SAW THAT THE BED WAS GILDED AND SO RICH THAT IT SEEMED THAT OF A PRINCE (*->THE) RATHER (THAN->THAT) OF A PRIVATE GENTLEMAN +5639-40744-0014-151: AMONG OTHER THINGS ON WHICH SHE CAST HER EYES WAS A SMALL CRUCIFIX OF SOLID SILVER STANDING ON A CABINET NEAR THE WINDOW +5639-40744-0015-152: THIS PERSON WAS (RODOLFO->RUDOLPU) WHO THOUGH HE HAD GONE TO LOOK FOR HIS FRIENDS HAD CHANGED HIS MIND IN THAT RESPECT NOT THINKING IT ADVISABLE TO ACQUAINT THEM WITH WHAT HAD PASSED BETWEEN HIM AND THE GIRL +5639-40744-0016-153: ON THE CONTRARY HE RESOLVED TO TELL THEM THAT REPENTING OF HIS VIOLENCE AND MOVED BY (HER->A) TEARS HE HAD ONLY CARRIED HER HALF WAY TOWARDS HIS HOUSE AND THEN LET HER GO +5639-40744-0017-154: CHOKING WITH EMOTION (LEOCADI->LOCATIA) MADE A SIGN TO HER PARENTS THAT SHE WISHED TO BE ALONE WITH THEM +5639-40744-0018-155: THAT WOULD BE VERY WELL MY CHILD REPLIED HER FATHER IF YOUR PLAN WERE NOT LIABLE TO BE FRUSTRATED BY ORDINARY CUNNING BUT NO DOUBT THIS IMAGE (HAS->HAD) BEEN ALREADY MISSED BY ITS OWNER AND HE WILL HAVE SET IT DOWN FOR CERTAIN THAT IT WAS TAKEN OUT OF THE ROOM BY THE PERSON HE LOCKED UP THERE +5639-40744-0019-156: WHAT YOU HAD BEST DO MY CHILD IS TO KEEP IT AND PRAY TO IT THAT (SINCE->SINS) IT WAS A WITNESS TO YOUR UNDOING IT WILL DEIGN TO VINDICATE YOUR CAUSE BY ITS RIGHTEOUS JUDGMENT +5639-40744-0020-157: THUS DID (THIS->THE) HUMANE AND RIGHT MINDED FATHER COMFORT HIS UNHAPPY DAUGHTER AND HER MOTHER EMBRACING HER AGAIN DID ALL SHE COULD TO SOOTHE (HER->THE) FEELINGS +5639-40744-0021-158: SHE MEANWHILE (PASSED->PAST) HER LIFE WITH HER PARENTS IN THE STRICTEST RETIREMENT NEVER LETTING HERSELF BE SEEN BUT SHUNNING EVERY EYE LEST IT SHOULD READ HER MISFORTUNE IN HER FACE +5639-40744-0022-159: TIME ROLLED ON THE HOUR OF HER DELIVERY ARRIVED IT TOOK PLACE IN THE UTMOST SECRECY HER MOTHER TAKING (UPON->UP ON) HER THE OFFICE OF MIDWIFE (AND->AS) SHE GAVE BIRTH TO A SON ONE OF THE MOST BEAUTIFUL EVER SEEN +5639-40744-0023-160: (WHEN->AND) THE BOY WALKED THROUGH THE STREETS BLESSINGS (WERE->WHERE) SHOWERED (UPON->UP ON) HIM BY ALL WHO SAW HIM (BLESSINGS->BLESSING) UPON HIS BEAUTY UPON THE MOTHER THAT BORE HIM UPON THE FATHER THAT BEGOT HIM UPON THOSE WHO BROUGHT HIM UP SO WELL +5639-40744-0024-161: ONE DAY WHEN THE BOY WAS SENT BY HIS GRANDFATHER WITH A MESSAGE TO A RELATION HE PASSED ALONG A STREET IN WHICH THERE WAS A GREAT CONCOURSE OF HORSEMEN +5639-40744-0025-162: THE BED SHE TOO WELL REMEMBERED WAS THERE AND ABOVE ALL THE CABINET ON WHICH HAD STOOD THE IMAGE SHE HAD TAKEN AWAY WAS STILL ON THE SAME SPOT +5639-40744-0026-163: (LUIS->LOUIS) WAS OUT OF DANGER IN A FORTNIGHT IN A MONTH HE ROSE FROM HIS BED AND (DURING->DREWING) ALL THAT TIME HE WAS VISITED DAILY BY HIS MOTHER AND GRANDMOTHER AND TREATED BY THE MASTER AND MISTRESS OF THE HOUSE AS IF HE WAS THEIR OWN CHILD +5639-40744-0027-164: THUS SAYING AND PRESSING THE CRUCIFIX TO HER BREAST SHE FELL FAINTING INTO THE ARMS OF DONA ESTAFANIA WHO AS A GENTLEWOMAN TO WHOSE SEX PITY IS (AS->A) NATURAL AS CRUELTY (IS->AS) TO MAN INSTANTLY PRESSED HER LIPS TO THOSE OF THE FAINTING GIRL SHEDDING OVER HER SO MANY TEARS THAT THERE NEEDED NO OTHER SPRINKLING OF WATER TO RECOVER (LEOCADIA->LOCATIA) FROM HER SWOON +5639-40744-0028-165: I HAVE GREAT THINGS TO TELL YOU SENOR SAID (DONA ESTAFANIA->DORIS DA FANIA) TO HER HUSBAND THE CREAM AND SUBSTANCE OF WHICH IS THIS THE FAINTING GIRL BEFORE YOU IS YOUR DAUGHTER AND (THAT->THE) BOY IS YOUR GRANDSON +5639-40744-0029-166: THIS TRUTH WHICH I HAVE LEARNED FROM HER LIPS IS CONFIRMED BY HIS FACE IN WHICH WE HAVE BOTH BEHELD THAT OF OUR SON +5639-40744-0030-167: JUST THEN (LEOCADIA->LOCATIA) CAME TO HERSELF AND EMBRACING THE CROSS SEEMED CHANGED INTO A SEA OF TEARS AND THE GENTLEMAN (REMAINED->REMAINING) IN (UTTER BEWILDERMENT->OUT OF A WILDERMENT) UNTIL HIS WIFE HAD REPEATED TO HIM FROM BEGINNING TO END (LEOCADIA'S->LOCATEOUS) WHOLE STORY AND HE BELIEVED IT THROUGH THE BLESSED DISPENSATION OF HEAVEN WHICH HAD CONFIRMED IT BY SO MANY CONVINCING TESTIMONIES +5639-40744-0031-168: SO PERSUASIVE WERE HER ENTREATIES AND SO STRONG HER ASSURANCES THAT NO HARM WHATEVER COULD RESULT TO THEM FROM THE INFORMATION SHE SOUGHT THEY WERE INDUCED TO CONFESS THAT ONE SUMMER'S NIGHT THE SAME SHE HAD MENTIONED THEMSELVES AND ANOTHER FRIEND BEING OUT ON (A->THE) STROLL WITH (RODOLFO->RUDOLPHO) THEY HAD BEEN CONCERNED IN THE ABDUCTION OF A GIRL WHOM (RODOLFO->RUDOLPHO) CARRIED OFF WHILST THE REST OF THEM DETAINED HER FAMILY WHO MADE A GREAT OUTCRY AND WOULD HAVE DEFENDED HER IF THEY COULD +5639-40744-0032-169: FOR GOD'S SAKE MY LADY MOTHER GIVE ME A WIFE WHO (WOULD->WILL) BE AN AGREEABLE COMPANION NOT ONE WHO WILL DISGUST ME SO THAT WE MAY BOTH BEAR EVENLY AND WITH MUTUAL GOOD WILL THE YOKE (IMPOSED->AND POST) ON US BY HEAVEN INSTEAD OF PULLING THIS WAY AND THAT WAY AND FRETTING EACH OTHER TO DEATH +5639-40744-0033-170: HER BEARING WAS GRACEFUL AND ANIMATED SHE LED HER SON BY THE HAND AND BEFORE HER WALKED TWO MAIDS WITH WAX LIGHTS AND SILVER CANDLESTICKS +5639-40744-0034-171: ALL ROSE TO DO HER REVERENCE AS IF SOMETHING FROM HEAVEN HAD MIRACULOUSLY APPEARED BEFORE THEM BUT GAZING ON HER (ENTRANCED->AND TRANCED) WITH ADMIRATION NOT ONE OF THEM WAS ABLE TO ADDRESS A SINGLE WORD TO HER +5639-40744-0035-172: SHE REFLECTED HOW NEAR SHE STOOD TO THE CRISIS WHICH WAS TO DETERMINE WHETHER SHE WAS TO BE BLESSED OR UNHAPPY FOR EVER AND RACKED BY THE INTENSITY OF HER EMOTIONS SHE SUDDENLY CHANGED (COLOUR->COLOR) HER HEAD DROPPED AND SHE FELL FORWARD IN A SWOON INTO THE ARMS OF THE DISMAYED (ESTAFANIA->STEFFANIA) +5639-40744-0036-173: HIS MOTHER HAD LEFT HER TO HIM AS BEING HER DESTINED PROTECTOR BUT WHEN SHE SAW THAT HE TOO WAS INSENSIBLE SHE WAS NEAR MAKING A THIRD AND WOULD HAVE DONE SO HAD HE NOT COME TO HIMSELF +5639-40744-0037-174: KNOW THEN SON OF MY HEART THAT THIS FAINTING LADY IS YOUR REAL BRIDE I SAY REAL BECAUSE SHE IS THE ONE WHOM YOUR FATHER AND I HAVE CHOSEN FOR YOU AND (THE->A) PORTRAIT WAS A PRETENCE +5639-40744-0038-175: JUST AT (THE->A) MOMENT WHEN THE TEARS OF THE PITYING BEHOLDERS FLOWED FASTEST AND (THEIR->THERE) EJACULATIONS WERE MOST EXPRESSIVE OF DESPAIR (LEOCADIA->LE OCCADIA) GAVE SIGNS OF RECOVERY AND BROUGHT BACK GLADNESS (TO->THROUGH) THE HEARTS OF ALL +5639-40744-0039-176: WHEN SHE CAME TO HER SENSES AND BLUSHING TO FIND HERSELF IN (RODOLFO'S->RIDOLPH'S) ARMS WOULD HAVE DISENGAGED HERSELF NO SENORA HE SAID THAT MUST NOT BE STRIVE NOT TO WITHDRAW FROM THE ARMS OF HIM WHO HOLDS YOU IN HIS SOUL +5639-40744-0040-177: THIS WAS DONE FOR THE EVENT TOOK PLACE AT A TIME WHEN THE CONSENT OF THE PARTIES WAS SUFFICIENT FOR THE CELEBRATION OF (A->THE) MARRIAGE WITHOUT ANY OF THE PRELIMINARY FORMALITIES WHICH ARE NOW SO PROPERLY REQUIRED +5639-40744-0041-178: NOR WAS (RODOLFO->RUDOLPHAL) LESS SURPRISED THAN THEY AND (THE->A) BETTER TO ASSURE HIMSELF OF SO WONDERFUL A FACT HE BEGGED (LEOCADIA->LOCATIA) TO GIVE HIM SOME TOKEN WHICH SHOULD MAKE PERFECTLY CLEAR TO HIM THAT WHICH INDEED HE DID NOT DOUBT SINCE IT WAS AUTHENTICATED BY HIS PARENTS +5683-32865-0000-2483: YOU KNOW CAPTAIN LAKE +5683-32865-0001-2484: SAID LORD CHELFORD ADDRESSING ME +5683-32865-0002-2485: HE HAD HIS HAND UPON LAKE'S SHOULDER +5683-32865-0003-2486: THEY ARE COUSINS YOU KNOW WE ARE ALL COUSINS +5683-32865-0004-2487: WHATEVER LORD CHELFORD SAID MISS BRANDON RECEIVED IT VERY GRACIOUSLY AND EVEN WITH A MOMENTARY SMILE +5683-32865-0005-2488: BUT HER GREETING TO CAPTAIN (LAKE->LEAK) WAS MORE THAN USUALLY HAUGHTY AND FROZEN AND HER FEATURES I FANCIED PARTICULARLY PROUD AND PALE +5683-32865-0006-2489: AT DINNER LAKE WAS EASY AND AMUSING +5683-32865-0007-2490: (I'M->I AM) GLAD YOU LIKE IT SAYS WYLDER CHUCKLING BENIGNANTLY ON IT OVER HIS SHOULDER +5683-32865-0008-2491: I BELIEVE I HAVE A LITTLE TASTE THAT WAY THOSE ARE ALL REAL YOU KNOW THOSE JEWELS +5683-32865-0009-2492: AND HE PLACED IT IN THAT GENTLEMAN'S FINGERS WHO NOW TOOK HIS TURN AT THE LAMP AND CONTEMPLATED THE LITTLE (PARALLELOGRAM->PARALLELLOGRAM) WITH A GLEAM OF SLY AMUSEMENT +5683-32865-0010-2493: I WAS THINKING IT'S VERY LIKE THE ACE OF HEARTS ANSWERED THE CAPTAIN SOFTLY SMILING ON +5683-32865-0011-2494: WHEREUPON LAKE LAUGHED QUIETLY STILL LOOKING ON THE ACE OF HEARTS WITH HIS SLY EYES +5683-32865-0012-2495: AND (WYLDER LAUGHED->WHILE THEIR LEFT) TOO MORE SUDDENLY AND NOISILY THAN THE (HUMOUR->HUMOR) OF THE JOKE SEEMED QUITE TO CALL FOR AND GLANCED A GRIM LOOK FROM THE CORNERS OF HIS EYES (ON LAKE->UNLIKE) BUT THE GALLANT CAPTAIN DID NOT SEEM TO PERCEIVE IT AND AFTER A FEW SECONDS MORE HE HANDED IT VERY INNOCENTLY BACK TO MISSUS DOROTHY ONLY REMARKING +5683-32865-0013-2496: DO YOU KNOW LAKE OH I REALLY CAN'T TELL BUT HE'LL SOON TIRE OF COUNTRY LIFE +5683-32865-0014-2497: HE'S NOT A MAN FOR COUNTRY QUARTERS +5683-32865-0015-2498: I HAD A HORRID DREAM ABOUT HIM LAST NIGHT THAT +5683-32865-0016-2499: OH I KNOW THAT'S (LORNE->LORN) BRANDON +5683-32865-0017-2500: ALL THE TIME HE WAS TALKING TO ME HIS ANGRY LITTLE EYES WERE FOLLOWING LAKE +5683-32866-0000-2527: MISS LAKE DECLINED THE CARRIAGE TO NIGHT +5683-32866-0001-2528: AND HE ADDED (SOMETHING->SOME THINGS) STILL LESS COMPLIMENTARY +5683-32866-0002-2529: BUT DON'T THESE VERY WISE THINGS SOMETIMES TURN OUT VERY FOOLISHLY +5683-32866-0003-2530: IN THE MEANTIME I HAD FORMED A NEW IDEA OF HER +5683-32866-0004-2531: BY THIS TIME LORD CHELFORD AND WYLDER RETURNED AND DISGUSTED RATHER WITH MYSELF I RUMINATED ON MY WANT OF (GENERAL SHIP->GENERALSHIP) +5683-32866-0005-2532: AND HE MADE A LITTLE DIP OF HIS CANE TOWARDS BRANDON HALL OVER HIS SHOULDER +5683-32866-0006-2533: YES SO THEY SAID BUT THAT WOULD I THINK HAVE BEEN WORSE +5683-32866-0007-2534: IF A FELLOW'S BEEN A LITTLE BIT WILD (HE'S BEELZEBUB->HE IS BEALES A BUB) AT ONCE +5683-32866-0008-2535: BRACTON'S A VERY GOOD FELLOW I CAN ASSURE YOU +5683-32866-0009-2536: I DON'T KNOW (AND->ONE) CAN'T SAY HOW YOU (FINE->FIND) GENTLEMEN (DEFINE->TO FIND) WICKEDNESS ONLY AS AN OBSCURE FEMALE I SPEAK ACCORDING TO MY LIGHTS AND HE IS GENERALLY THOUGHT THE WICKEDEST MAN IN THIS COUNTY +5683-32866-0010-2537: WELL YOU KNOW RADIE WOMEN LIKE WICKED FELLOWS IT IS CONTRAST I SUPPOSE BUT THEY DO AND I'M SURE FROM WHAT BRACTON HAS SAID TO ME I KNOW HIM INTIMATELY THAT DORCAS LIKES HIM AND I CAN'T CONCEIVE WHY THEY ARE NOT MARRIED +5683-32866-0011-2538: THEIR WALK CONTINUED SILENT FOR THE GREATER PART NEITHER WAS QUITE SATISFIED WITH THE OTHER BUT RACHEL AT LAST SAID +5683-32866-0012-2539: NOW THAT'S IMPOSSIBLE RADIE FOR I REALLY DON'T THINK I ONCE THOUGHT OF HIM ALL THIS EVENING EXCEPT JUST WHILE WE WERE TALKING +5683-32866-0013-2540: THERE WAS A BRIGHT MOONLIGHT BROKEN BY THE SHADOWS OF OVERHANGING BOUGHS AND WITHERED LEAVES AND THE MOTTLED LIGHTS AND SHADOWS GLIDED ODDLY ACROSS HIS PALE FEATURES +5683-32866-0014-2541: DON'T INSULT ME STANLEY BY TALKING AGAIN AS YOU DID THIS MORNING +5683-32866-0015-2542: WHAT I SAY IS ALTOGETHER ON YOUR OWN ACCOUNT +5683-32866-0016-2543: MARK MY WORDS YOU'LL FIND HIM TOO STRONG FOR YOU (AYE->I) AND TOO DEEP +5683-32866-0017-2544: I AM VERY UNEASY ABOUT IT WHATEVER IT IS I CAN'T HELP IT +5683-32866-0018-2545: TO MY MIND THERE HAS ALWAYS BEEN SOMETHING INEXPRESSIBLY AWFUL IN FAMILY FEUDS +5683-32866-0019-2546: THE MYSTERY OF THEIR ORIGIN THEIR CAPACITY FOR EVOLVING LATENT FACULTIES OF CRIME AND THE (STEADY->STUDY) VITALITY WITH WHICH THEY SURVIVE THE HEARSE AND SPEAK THEIR DEEP (MOUTHED->MOUTH) MALIGNITIES IN EVERY NEW BORN GENERATION HAVE ASSOCIATED THEM SOMEHOW IN MY MIND WITH A SPELL OF LIFE EXCEEDING AND DISTINCT FROM HUMAN AND (A SPECIAL->ESPECIAL) SATANIC ACTION +5683-32866-0020-2547: THE FLOOR MORE THAN ANYTHING ELSE SHOWED THE GREAT AGE OF THE ROOM +5683-32866-0021-2548: MY BED WAS (UNEXCEPTIONABLY->UNEXCEPTIONALLY) COMFORTABLE BUT IN MY THEN MOOD I COULD HAVE WISHED IT A GREAT DEAL MORE MODERN +5683-32866-0022-2549: ITS CURTAINS WERE OF THICK AND FADED TAPESTRY +5683-32866-0023-2550: ALL THE FURNITURE BELONGED TO OTHER TIMES +5683-32866-0024-2551: I SHAN'T TROUBLE YOU ABOUT MY TRAIN OF THOUGHTS OR FANCIES BUT I BEGAN TO FEEL VERY LIKE A GENTLEMAN IN A GHOST STORY WATCHING EXPERIMENTALLY IN A HAUNTED CHAMBER +5683-32866-0025-2552: I DID NOT EVEN TAKE THE PRECAUTION OF SMOKING UP THE CHIMNEY +5683-32866-0026-2553: I BOLDLY LIGHTED MY (CHEROOT->JEROOT) +5683-32866-0027-2554: A COLD BRIGHT MOON WAS SHINING WITH CLEAR SHARP LIGHTS AND SHADOWS +5683-32866-0028-2555: THE SOMBRE OLD TREES LIKE GIGANTIC HEARSE PLUMES BLACK AND AWFUL +5683-32866-0029-2556: SOMEHOW I HAD GROWN NERVOUS +5683-32866-0030-2557: A LITTLE BIT OF PLASTER TUMBLED DOWN THE CHIMNEY AND STARTLED ME CONFOUNDEDLY +5683-32879-0000-2501: IT WAS NOT VERY MUCH PAST ELEVEN THAT MORNING WHEN THE PONY CARRIAGE FROM BRANDON DREW UP BEFORE THE LITTLE GARDEN WICKET OF REDMAN'S FARM +5683-32879-0001-2502: (WELL->WHILE) SHE WAS BETTER THOUGH SHE HAD HAD A BAD NIGHT +5683-32879-0002-2503: SO THERE CAME A STEP AND A LITTLE RUSTLING OF FEMININE DRAPERIES THE SMALL DOOR OPENED AND RACHEL ENTERED WITH HER HAND EXTENDED AND A PALE SMILE OF WELCOME +5683-32879-0003-2504: WOMEN CAN HIDE THEIR PAIN BETTER THAN WE MEN AND BEAR IT BETTER TOO EXCEPT WHEN SHAME DROPS FIRE INTO THE DREADFUL CHALICE +5683-32879-0004-2505: BUT POOR RACHEL LAKE HAD MORE THAN THAT STOICAL HYPOCRISY WHICH ENABLES THE TORTURED SPIRITS OF HER SEX TO LIFT A PALE FACE THROUGH THE FLAMES AND SMILE +5683-32879-0005-2506: THIS TRANSIENT SPRING AND LIGHTING UP (ARE->OUR) BEAUTIFUL A GLAMOUR BEGUILING OUR SENSES +5683-32879-0006-2507: THERE WAS SOMETHING OF SWEETNESS AND FONDNESS IN HER TONES AND MANNER WHICH WAS NEW TO RACHEL AND COMFORTING AND SHE RETURNED THE GREETING AS KINDLY AND FELT MORE LIKE HER FORMER SELF +5683-32879-0007-2508: RACHEL'S PALE AND SHARPENED FEATURES AND DILATED EYE STRUCK HER WITH A PAINFUL SURPRISE +5683-32879-0008-2509: YOU HAVE BEEN SO ILL MY POOR RACHEL +5683-32879-0009-2510: ILL AND TROUBLED DEAR TROUBLED IN MIND AND MISERABLY NERVOUS +5683-32879-0010-2511: POOR RACHEL HER NATURE RECOILED FROM DECEIT AND SHE TOLD AT ALL EVENTS AS MUCH OF THE TRUTH AS SHE DARED +5683-32879-0011-2512: SHE SPOKE WITH A SUDDEN ENERGY WHICH PARTOOK (OF->A) FEAR AND PASSION AND FLUSHED HER THIN CHEEK AND MADE HER LANGUID EYES FLASH +5683-32879-0012-2513: THANK YOU RACHEL MY COUSIN RACHEL MY ONLY FRIEND +5683-32879-0013-2514: CHELFORD HAD A NOTE FROM MISTER WYLDER THIS MORNING ANOTHER NOTE HIS COMING DELAYED AND SOMETHING OF HIS HAVING TO SEE SOME PERSON WHO (IS->WAS) ABROAD CONTINUED DORCAS AFTER A LITTLE PAUSE +5683-32879-0014-2515: YES SOMETHING EVERYTHING SAID RACHEL HURRIEDLY LOOKING FROWNINGLY AT A FLOWER WHICH SHE WAS TWIRLING IN HER FINGERS +5683-32879-0015-2516: YES SAID RACHEL +5683-32879-0016-2517: AND THE WAN ORACLE HAVING SPOKEN SHE (SATE->SAT) DOWN IN THE SAME SORT OF ABSTRACTION AGAIN BESIDE DORCAS AND SHE LOOKED FULL IN HER COUSIN'S EYES +5683-32879-0017-2518: OF MARK WYLDER I SAY THIS HIS NAME HAS BEEN FOR YEARS HATEFUL TO ME AND RECENTLY IT HAS BECOME FRIGHTFUL AND YOU WILL PROMISE ME SIMPLY THIS THAT YOU WILL NEVER ASK ME TO SPEAK AGAIN ABOUT HIM +5683-32879-0018-2519: IT IS AN ANTIPATHY AN ANTIPATHY I CANNOT GET OVER DEAR DORCAS YOU MAY THINK IT A MADNESS BUT DON'T BLAME ME +5683-32879-0019-2520: I HAVE VERY FEW TO LOVE ME NOW AND I THOUGHT YOU MIGHT LOVE ME AS I HAVE BEGUN TO LOVE YOU +5683-32879-0020-2521: AND SHE THREW HER ARMS ROUND HER COUSIN'S NECK AND BRAVE RACHEL AT LAST BURST INTO TEARS +5683-32879-0021-2522: DORCAS IN HER STRANGE WAY WAS MOVED +5683-32879-0022-2523: I LIKE YOU STILL RACHEL I'M SURE I'LL ALWAYS LIKE YOU +5683-32879-0023-2524: YOU RESEMBLE ME RACHEL YOU ARE FEARLESS AND INFLEXIBLE AND GENEROUS +5683-32879-0024-2525: YES RACHEL I DO LOVE YOU +5683-32879-0025-2526: THANK YOU DORCAS DEAR +61-70968-0000-2179: HE BEGAN A CONFUSED COMPLAINT AGAINST THE WIZARD WHO HAD VANISHED BEHIND THE CURTAIN ON THE LEFT +61-70968-0001-2180: GIVE NOT SO EARNEST A MIND TO THESE MUMMERIES CHILD +61-70968-0002-2181: A GOLDEN FORTUNE AND A HAPPY LIFE +61-70968-0003-2182: HE WAS LIKE UNTO MY FATHER IN A WAY AND YET WAS NOT MY FATHER +61-70968-0004-2183: ALSO THERE WAS A STRIPLING PAGE WHO TURNED INTO A MAID +61-70968-0005-2184: THIS WAS SO SWEET A LADY SIR AND IN SOME MANNER I DO THINK SHE DIED +61-70968-0006-2185: BUT THEN THE PICTURE WAS GONE AS QUICKLY AS IT CAME +61-70968-0007-2186: SISTER NELL DO YOU HEAR THESE MARVELS +61-70968-0008-2187: TAKE YOUR PLACE AND LET US SEE WHAT THE CRYSTAL CAN SHOW TO YOU +61-70968-0009-2188: LIKE AS NOT YOUNG MASTER THOUGH I AM AN OLD MAN +61-70968-0010-2189: FORTHWITH ALL RAN TO THE OPENING OF THE TENT TO SEE WHAT MIGHT BE AMISS BUT MASTER WILL WHO PEEPED OUT FIRST NEEDED NO MORE THAN ONE GLANCE +61-70968-0011-2190: HE GAVE WAY TO THE OTHERS VERY READILY AND RETREATED UNPERCEIVED BY THE SQUIRE AND MISTRESS FITZOOTH TO THE REAR OF THE TENT +61-70968-0012-2191: CRIES OF (A NOTTINGHAM A->UNNOTTINGHAM ARE) NOTTINGHAM +61-70968-0013-2192: BEFORE THEM FLED THE STROLLER AND HIS THREE SONS CAPLESS AND TERRIFIED +61-70968-0014-2193: WHAT IS THE TUMULT AND RIOTING CRIED OUT THE SQUIRE AUTHORITATIVELY AND HE BLEW TWICE ON (A->THE) SILVER WHISTLE WHICH HUNG AT HIS BELT +61-70968-0015-2194: NAY WE REFUSED THEIR REQUEST MOST POLITELY MOST NOBLE SAID THE LITTLE STROLLER +61-70968-0016-2195: AND THEN THEY BECAME VEXED AND WOULD HAVE SNATCHED YOUR PURSE FROM US +61-70968-0017-2196: I COULD NOT SEE MY BOY (INJURED->INJURE) EXCELLENCE FOR BUT DOING HIS DUTY AS ONE OF CUMBERLAND'S SONS +61-70968-0018-2197: SO I DID PUSH THIS FELLOW +61-70968-0019-2198: IT IS ENOUGH SAID GEORGE GAMEWELL SHARPLY (AND->AS) HE TURNED UPON THE CROWD +61-70968-0020-2199: SHAME ON YOU CITIZENS CRIED HE I BLUSH FOR MY FELLOWS OF NOTTINGHAM +61-70968-0021-2200: SURELY WE CAN SUBMIT WITH GOOD GRACE +61-70968-0022-2201: TIS FINE FOR YOU TO TALK OLD MAN ANSWERED THE LEAN SULLEN APPRENTICE +61-70968-0023-2202: BUT I WRESTLED WITH THIS FELLOW AND DO KNOW THAT HE PLAYED UNFAIRLY IN THE SECOND BOUT +61-70968-0024-2203: SPOKE THE SQUIRE LOSING ALL (PATIENCE->PATIENT) AND IT WAS TO YOU THAT I GAVE ANOTHER (PURSE IN->PERSON) CONSOLATION +61-70968-0025-2204: COME TO ME MEN HERE HERE HE RAISED HIS VOICE STILL LOUDER +61-70968-0026-2205: THE STROLLERS TOOK THEIR PART IN IT WITH HEARTY ZEST NOW THAT THEY HAD SOME CHANCE OF BEATING OFF THEIR FOES +61-70968-0027-2206: ROBIN AND THE LITTLE TUMBLER BETWEEN THEM TRIED TO FORCE THE SQUIRE TO STAND BACK AND VERY VALIANTLY DID THESE TWO COMPORT THEMSELVES +61-70968-0028-2207: THE HEAD AND CHIEF OF THE RIOT (THE NOTTINGHAM APPRENTICE->DENOTTINGHAM APPRENTICED) WITH CLENCHED FISTS THREATENED MONTFICHET +61-70968-0029-2208: THE SQUIRE HELPED TO THRUST THEM ALL IN AND ENTERED SWIFTLY HIMSELF +61-70968-0030-2209: NOW BE SILENT ON YOUR LIVES HE BEGAN BUT THE CAPTURED APPRENTICE SET UP AN INSTANT SHOUT +61-70968-0031-2210: SILENCE YOU KNAVE CRIED MONTFICHET +61-70968-0032-2211: HE FELT FOR AND FOUND THE WIZARD'S BLACK CLOTH THE SQUIRE WAS QUITE OUT OF BREATH +61-70968-0033-2212: THRUSTING OPEN THE PROPER ENTRANCE OF THE TENT ROBIN SUDDENLY RUSHED FORTH WITH HIS BURDEN WITH A GREAT SHOUT +61-70968-0034-2213: A MONTFICHET A MONTFICHET GAMEWELL TO THE RESCUE +61-70968-0035-2214: TAKING ADVANTAGE OF THIS THE SQUIRE'S FEW MEN REDOUBLED THEIR EFFORTS AND ENCOURAGED BY (ROBIN'S->ROBINS) AND THE LITTLE STROLLER'S CRIES FOUGHT THEIR WAY TO HIM +61-70968-0036-2215: GEORGE MONTFICHET WILL NEVER FORGET THIS DAY +61-70968-0037-2216: WHAT IS YOUR NAME LORDING ASKED THE LITTLE STROLLER PRESENTLY +61-70968-0038-2217: ROBIN FITZOOTH +61-70968-0039-2218: AND MINE IS WILL STUTELEY SHALL WE BE COMRADES +61-70968-0040-2219: RIGHT WILLINGLY FOR BETWEEN US WE HAVE WON THE BATTLE ANSWERED ROBIN +61-70968-0041-2220: I LIKE YOU WILL YOU ARE THE SECOND WILL THAT I HAVE MET AND LIKED WITHIN TWO DAYS IS THERE A SIGN IN THAT +61-70968-0042-2221: MONTFICHET CALLED OUT FOR ROBIN TO GIVE HIM AN ARM +61-70968-0043-2222: FRIENDS SAID MONTFICHET FAINTLY TO THE WRESTLERS BEAR US ESCORT SO FAR AS THE SHERIFF'S HOUSE +61-70968-0044-2223: IT WILL NOT BE SAFE FOR YOU TO STAY HERE NOW +61-70968-0045-2224: PRAY FOLLOW US WITH MINE AND MY LORD SHERIFF'S MEN +61-70968-0046-2225: (NOTTINGHAM->NODDING HIM) CASTLE WAS REACHED AND ADMITTANCE WAS DEMANDED +61-70968-0047-2226: MASTER MONCEUX THE SHERIFF OF NOTTINGHAM WAS MIGHTILY PUT ABOUT WHEN TOLD OF THE RIOTING +61-70968-0048-2227: AND HENRY MIGHT RETURN TO ENGLAND AT ANY MOMENT +61-70968-0049-2228: HAVE YOUR WILL CHILD IF THE BOY ALSO (WILLS->WILDS) IT MONTFICHET ANSWERED FEELING TOO ILL TO OPPOSE ANYTHING VERY STRONGLY JUST THEN +61-70968-0050-2229: HE MADE AN EFFORT TO HIDE HIS CONDITION FROM THEM ALL AND ROBIN FELT HIS FINGERS TIGHTEN UPON HIS ARM +61-70968-0051-2230: (BEG->BEGGED) ME A ROOM OF THE SHERIFF CHILD QUICKLY +61-70968-0052-2231: BUT WHO IS THIS FELLOW PLUCKING (AT YOUR SLEEVE->IT OR STEVE) +61-70968-0053-2232: HE IS MY ESQUIRE EXCELLENCY RETURNED ROBIN WITH DIGNITY +61-70968-0054-2233: MISTRESS FITZOOTH HAD BEEN CARRIED OFF BY THE SHERIFF'S DAUGHTER AND HER MAIDS AS SOON AS THEY HAD ENTERED THE HOUSE SO THAT ROBIN ALONE HAD THE CARE OF (MONTFICHET->MONT VICHET) +61-70968-0055-2234: ROBIN WAS GLAD WHEN AT LENGTH THEY WERE LEFT TO THEIR OWN DEVICES +61-70968-0056-2235: THE WINE DID CERTAINLY BRING BACK THE COLOR TO THE SQUIRE'S CHEEKS +61-70968-0057-2236: THESE ESCAPADES ARE NOT FOR OLD GAMEWELL LAD HIS DAY HAS COME TO TWILIGHT +61-70968-0058-2237: WILL YOU FORGIVE ME NOW +61-70968-0059-2238: IT WILL BE NO DISAPPOINTMENT TO ME +61-70968-0060-2239: NO THANKS I AM GLAD TO GIVE YOU SUCH EASY HAPPINESS +61-70968-0061-2240: YOU ARE A WORTHY LEECH WILL PRESENTLY WHISPERED ROBIN THE WINE HAS WORKED A MARVEL +61-70968-0062-2241: (AY->I) AND SHOW YOU SOME PRETTY TRICKS +61-70970-0000-2242: YOUNG FITZOOTH HAD BEEN COMMANDED TO HIS MOTHER'S CHAMBER SO SOON AS HE HAD COME OUT FROM HIS CONVERSE WITH THE SQUIRE +61-70970-0001-2243: THERE (BEFELL->BEFEL) AN ANXIOUS INTERVIEW MISTRESS FITZOOTH ARGUING (FOR->FOUR) AND AGAINST THE SQUIRE'S PROJECT IN A BREATH +61-70970-0002-2244: MOST OF ALL ROBIN THOUGHT OF HIS FATHER WHAT WOULD HE COUNSEL +61-70970-0003-2245: IF FOR A WHIM YOU BEGGAR YOURSELF I CANNOT STAY YOU +61-70970-0004-2246: BUT TAKE IT WHILST I LIVE AND (WEAR->WHERE) MONTFICHET'S SHIELD IN THE DAYS WHEN MY EYES CAN BE REJOICED BY SO BRAVE A SIGHT FOR YOU WILL (NE'ER->NEVER) DISGRACE OUR (SCUTCHEON->DUCHEN) I WARRANT ME +61-70970-0005-2247: THE LAD HAD CHECKED HIM THEN +61-70970-0006-2248: NEVER THAT SIR HE HAD SAID +61-70970-0007-2249: HE WAS IN DEEP CONVERSE WITH THE CLERK AND ENTERED THE HALL HOLDING HIM BY THE ARM +61-70970-0008-2250: NOW TO BED BOY +61-70970-0009-2251: TIS LATE AND I GO MYSELF WITHIN A SHORT SPACE +61-70970-0010-2252: DISMISS YOUR SQUIRE ROBIN AND BID ME GOOD (E E N->EVEN) +61-70970-0011-2253: AS ANY IN ENGLAND I WOULD SAY SAID GAMEWELL PROUDLY THAT IS IN HIS DAY +61-70970-0012-2254: YET HE WILL TEACH YOU A FEW TRICKS WHEN MORNING IS COME +61-70970-0013-2255: THERE WAS NO CHANCE TO ALTER HIS SLEEPING ROOM TO ONE NEARER TO GAMEWELL'S CHAMBER +61-70970-0014-2256: PRESENTLY HE CROSSED THE FLOOR OF HIS ROOM WITH DECIDED STEP +61-70970-0015-2257: WILL CRIED HE SOFTLY AND STUTELEY WHO HAD CHOSEN HIS COUCH ACROSS THE DOOR OF HIS YOUNG MASTER'S CHAMBER SPRANG UP AT ONCE IN ANSWER +61-70970-0016-2258: WE WILL GO OUT TOGETHER TO THE BOWER THERE IS A WAY DOWN TO THE COURT FROM MY WINDOW +61-70970-0017-2259: REST AND BE STILL UNTIL I WARN YOU +61-70970-0018-2260: THE HOURS PASSED WEARILY BY AND MOVEMENT COULD YET BE HEARD ABOUT THE HALL +61-70970-0019-2261: AT LAST ALL WAS QUIET AND BLACK IN THE COURTYARD OF GAMEWELL +61-70970-0020-2262: WILL WHISPERED ROBIN OPENING HIS DOOR AS HE SPOKE ARE YOU READY +61-70970-0021-2263: THEY THEN RENEWED THEIR JOURNEY AND UNDER THE BETTER LIGHT MADE A SAFE CROSSING OF THE STABLE ROOFS +61-70970-0022-2264: ROBIN ENTERED THE HUT DRAGGING THE UNWILLING ESQUIRE AFTER HIM +61-70970-0023-2265: BE NOT SO FOOLISH FRIEND SAID FITZOOTH CROSSLY +61-70970-0024-2266: THEY MOVED THEREAFTER CAUTIOUSLY ABOUT THE HUT GROPING BEFORE AND ABOUT THEM TO FIND SOMETHING TO SHOW THAT (*->THE) WARRENTON HAD FULFILLED HIS MISSION +61-70970-0025-2267: THEY WERE UPON THE VERGE OF AN OPEN TRAP IN THE FAR CORNER OF THE HUT AND STUTELEY HAD TRIPPED OVER THE EDGE OF THE REVERSED FLAP MOUTH OF THIS PIT +61-70970-0026-2268: FITZOOTH'S HAND RESTED AT LAST UPON THE (TOP RUNG->TOPRUNG) OF A LADDER AND SLOWLY THE TRUTH CAME TO HIM +61-70970-0027-2269: ROBIN CAREFULLY DESCENDED THE LADDER AND FOUND HIMSELF SOON UPON FIRM ROCKY GROUND +61-70970-0028-2270: (*->A) STUTELEY WAS BY HIS SIDE IN A FLASH AND THEN THEY BOTH BEGAN FEELING ABOUT THEM TO ASCERTAIN THE SHAPE AND CHARACTER OF THIS VAULT +61-70970-0029-2271: FROM THE BLACKNESS BEHIND THE LIGHT THEY HEARD A VOICE (WARRENTON'S->WARRENTONS) +61-70970-0030-2272: SAVE ME MASTERS BUT YOU STARTLED ME RARELY +61-70970-0031-2273: CRIED HE WAVING THE (LANTHORN->LANTERN) BEFORE HIM TO MAKE SURE THAT THESE WERE NO GHOSTS IN FRONT OF HIM +61-70970-0032-2274: (ENQUIRED->INQUIRED) ROBIN WITH HIS (SUSPICIONS->SUSPICION) STILL UPON HIM +61-70970-0033-2275: TRULY SUCH A HORSE (SHOULD->WOULD) BE WORTH MUCH IN NOTTINGHAM FAIR +61-70970-0034-2276: NAY NAY LORDING ANSWERED WARRENTON WITH A HALF LAUGH +61-70970-0035-2277: WARRENTON SPOKE THUS WITH SIGNIFICANCE TO SHOW ROBIN THAT HE WAS NOT TO THINK (GEOFFREY'S->JEFFREY'S) CLAIMS TO THE ESTATE WOULD BE PASSED BY +61-70970-0036-2278: ROBIN FITZOOTH SAW THAT HIS DOUBTS OF WARRENTON HAD BEEN UNFAIR AND HE BECAME ASHAMED OF HIMSELF FOR (HARBORING->HARBOURING) THEM +61-70970-0037-2279: HIS TONES RANG PLEASANTLY (ON WARRENTON'S->UNWARRANTON'S) EARS AND FORTHWITH (A->THE) GOOD FELLOWSHIP WAS HERALDED BETWEEN THEM +61-70970-0038-2280: THE OLD SERVANT TOLD HIM QUIETLY AS THEY CREPT BACK TO GAMEWELL THAT THIS (PASSAGE WAY->PASSAGEWAY) LED FROM THE HUT IN THE (PLEASANCE->PLEASANTS) TO SHERWOOD AND THAT (GEOFFREY->JEFFREY) FOR THE TIME WAS HIDING WITH THE OUTLAWS IN THE FOREST +61-70970-0039-2281: HE (IMPLORES->IMPLIES) US TO BE DISCREET AS THE GRAVE IN THIS MATTER FOR IN SOOTH HIS LIFE IS IN THE HOLLOW OF OUR HANDS +61-70970-0040-2282: THEY (REGAINED->REGAIN) THEIR APARTMENT APPARENTLY WITHOUT DISTURBING THE HOUSEHOLD OF GAMEWELL +672-122797-0000-1529: OUT IN THE (WOODS->WOOD) STOOD A NICE LITTLE FIR TREE +672-122797-0001-1530: THE PLACE HE HAD WAS A VERY GOOD ONE THE SUN SHONE ON HIM AS TO FRESH AIR THERE WAS ENOUGH OF THAT AND ROUND HIM GREW MANY LARGE SIZED COMRADES PINES AS WELL AS (FIRS->FURS) +672-122797-0002-1531: HE DID NOT THINK OF THE WARM SUN AND OF THE FRESH AIR HE DID NOT CARE FOR THE LITTLE COTTAGE CHILDREN THAT RAN ABOUT (AND->IN) PRATTLED WHEN THEY WERE IN THE WOODS LOOKING FOR WILD STRAWBERRIES +672-122797-0003-1532: BUT THIS WAS WHAT THE TREE COULD NOT BEAR TO HEAR +672-122797-0004-1533: IN WINTER WHEN THE SNOW LAY GLITTERING ON THE GROUND A HARE WOULD OFTEN COME LEAPING ALONG AND JUMP RIGHT OVER THE LITTLE TREE +672-122797-0005-1534: OH THAT MADE HIM SO ANGRY +672-122797-0006-1535: TO GROW AND GROW TO GET OLDER AND BE TALL THOUGHT THE TREE THAT AFTER ALL IS THE MOST DELIGHTFUL THING IN THE WORLD +672-122797-0007-1536: IN AUTUMN THE (WOOD CUTTERS->WOODCUTTERS) ALWAYS CAME AND FELLED SOME OF THE LARGEST TREES +672-122797-0008-1537: THIS HAPPENED EVERY YEAR AND THE YOUNG FIR TREE THAT HAD NOW GROWN TO A VERY COMELY (SIZE->SIZED) TREMBLED AT THE SIGHT FOR THE MAGNIFICENT GREAT TREES FELL TO THE EARTH WITH NOISE AND CRACKING THE BRANCHES WERE LOPPED OFF AND THE TREES LOOKED LONG AND BARE THEY WERE HARDLY TO BE (RECOGNISED->RECOGNIZED) AND THEN THEY WERE LAID IN CARTS AND THE HORSES DRAGGED THEM OUT OF THE WOOD +672-122797-0009-1538: HAVE YOU NOT MET (THEM ANYWHERE->THE MANYWHERE) +672-122797-0010-1539: REJOICE IN THY GROWTH SAID THE SUNBEAMS +672-122797-0011-1540: AND THEN WHAT HAPPENS THEN +672-122797-0012-1541: I WOULD FAIN KNOW IF I AM DESTINED FOR SO GLORIOUS A CAREER CRIED THE TREE REJOICING +672-122797-0013-1542: I AM NOW TALL AND MY BRANCHES SPREAD LIKE THE OTHERS THAT WERE CARRIED OFF LAST YEAR OH +672-122797-0014-1543: WERE I BUT ALREADY ON THE CART +672-122797-0015-1544: (WERE->WHERE) I IN THE WARM ROOM WITH ALL (THE SPLENDOR->BUT SPLENDOUR) AND MAGNIFICENCE +672-122797-0016-1545: YES (THEN->AND) SOMETHING BETTER SOMETHING STILL GRANDER WILL SURELY FOLLOW OR WHEREFORE SHOULD THEY THUS ORNAMENT ME +672-122797-0017-1546: SOMETHING BETTER (SOMETHING->OR SOME THING) STILL GRANDER MUST FOLLOW BUT WHAT +672-122797-0018-1547: REJOICE IN OUR PRESENCE SAID THE (AIR AND->HEIR IN) THE SUNLIGHT +672-122797-0019-1548: REJOICE IN THY OWN FRESH YOUTH +672-122797-0020-1549: BUT THE TREE DID NOT REJOICE AT ALL HE GREW AND GREW AND WAS GREEN BOTH WINTER AND SUMMER +672-122797-0021-1550: AND TOWARDS CHRISTMAS HE WAS ONE OF THE FIRST THAT WAS CUT DOWN +672-122797-0022-1551: THE AXE STRUCK DEEP INTO THE VERY PITH THE TREE FELL TO THE EARTH WITH A SIGH HE FELT A PANG IT WAS LIKE A SWOON HE COULD NOT THINK OF HAPPINESS FOR HE WAS SORROWFUL AT BEING SEPARATED FROM HIS HOME FROM THE PLACE WHERE HE HAD SPRUNG UP +672-122797-0023-1552: HE WELL KNEW THAT HE SHOULD NEVER SEE HIS DEAR OLD COMRADES THE LITTLE BUSHES AND FLOWERS AROUND HIM (ANYMORE->ANY MORE) PERHAPS NOT EVEN THE BIRDS +672-122797-0024-1553: THE DEPARTURE WAS NOT AT ALL AGREEABLE +672-122797-0025-1554: THE TREE ONLY CAME TO HIMSELF WHEN HE WAS UNLOADED IN A (COURT YARD->COURTYARD) WITH THE OTHER TREES AND HEARD A MAN SAY THAT ONE IS SPLENDID WE DON'T WANT THE OTHERS +672-122797-0026-1555: THERE TOO WERE LARGE EASY CHAIRS SILKEN SOFAS LARGE TABLES FULL OF PICTURE BOOKS AND FULL OF TOYS WORTH HUNDREDS AND HUNDREDS OF CROWNS AT LEAST THE CHILDREN SAID SO +672-122797-0027-1556: THE SERVANTS AS WELL AS THE YOUNG LADIES DECORATED IT +672-122797-0028-1557: THIS EVENING THEY ALL SAID +672-122797-0029-1558: HOW IT WILL SHINE THIS EVENING +672-122797-0030-1559: PERHAPS THE OTHER TREES FROM THE FOREST WILL COME TO LOOK AT ME +672-122797-0031-1560: IT BLAZED UP FAMOUSLY HELP HELP +672-122797-0032-1561: CRIED THE YOUNG LADIES AND THEY QUICKLY PUT OUT THE FIRE +672-122797-0033-1562: A STORY +672-122797-0034-1563: A STORY CRIED THE CHILDREN DRAWING A LITTLE FAT MAN TOWARDS THE TREE +672-122797-0035-1564: BUT I SHALL TELL ONLY ONE STORY +672-122797-0036-1565: HUMPY (DUMPY->DON'T BE) FELL DOWNSTAIRS AND YET HE MARRIED THE PRINCESS +672-122797-0037-1566: THAT'S THE WAY OF THE WORLD +672-122797-0038-1567: THOUGHT THE FIR TREE AND BELIEVED IT ALL BECAUSE THE MAN WHO TOLD THE STORY WAS SO GOOD LOOKING WELL WELL +672-122797-0039-1568: I WON'T TREMBLE TO MORROW THOUGHT THE FIR TREE +672-122797-0040-1569: AND THE WHOLE NIGHT THE TREE STOOD STILL AND IN DEEP THOUGHT +672-122797-0041-1570: IN THE MORNING THE SERVANT AND THE HOUSEMAID CAME IN +672-122797-0042-1571: BUT THEY DRAGGED HIM OUT OF THE ROOM AND UP THE STAIRS INTO THE LOFT AND HERE (IN->IT) A DARK CORNER WHERE NO DAYLIGHT COULD ENTER THEY LEFT HIM +672-122797-0043-1572: WHAT'S THE MEANING OF THIS THOUGHT THE TREE +672-122797-0044-1573: AND HE LEANED AGAINST THE WALL LOST IN REVERIE +672-122797-0045-1574: TIME ENOUGH HAD HE TOO FOR HIS REFLECTIONS FOR DAYS AND NIGHTS PASSED ON AND NOBODY CAME UP AND WHEN AT LAST SOMEBODY DID COME IT WAS ONLY TO PUT SOME GREAT TRUNKS IN A CORNER OUT OF THE WAY +672-122797-0046-1575: TIS NOW WINTER OUT OF DOORS THOUGHT THE TREE +672-122797-0047-1576: HOW KIND MAN IS AFTER ALL +672-122797-0048-1577: IF IT ONLY WERE NOT SO DARK HERE AND SO TERRIBLY LONELY +672-122797-0049-1578: SQUEAK SQUEAK +672-122797-0050-1579: THEY SNUFFED ABOUT THE FIR TREE AND RUSTLED AMONG THE BRANCHES +672-122797-0051-1580: I AM BY NO MEANS OLD SAID THE FIR TREE +672-122797-0052-1581: THERE'S MANY A ONE CONSIDERABLY OLDER THAN I AM +672-122797-0053-1582: THEY WERE SO EXTREMELY CURIOUS +672-122797-0054-1583: I KNOW NO SUCH PLACE SAID THE TREE +672-122797-0055-1584: AND THEN HE TOLD ALL ABOUT HIS YOUTH AND THE LITTLE MICE HAD NEVER HEARD THE LIKE BEFORE AND THEY LISTENED AND SAID +672-122797-0056-1585: SAID THE (FIR->FUR) TREE THINKING OVER WHAT HE HAD HIMSELF RELATED +672-122797-0057-1586: YES IN REALITY THOSE WERE HAPPY TIMES +672-122797-0058-1587: WHO (IS->IT'S) HUMPY (DUMPY->DUMPEY) ASKED THE MICE +672-122797-0059-1588: ONLY THAT ONE ANSWERED THE TREE +672-122797-0060-1589: IT IS A VERY STUPID STORY +672-122797-0061-1590: DON'T YOU KNOW ONE ABOUT BACON AND TALLOW CANDLES CAN'T YOU TELL ANY LARDER STORIES +672-122797-0062-1591: NO SAID THE TREE +672-122797-0063-1592: THEN GOOD BYE SAID THE RATS AND THEY WENT HOME +672-122797-0064-1593: AT LAST THE LITTLE MICE STAYED AWAY ALSO AND THE TREE SIGHED AFTER ALL IT WAS VERY PLEASANT WHEN THE SLEEK LITTLE MICE SAT ROUND ME AND LISTENED TO WHAT I TOLD THEM +672-122797-0065-1594: NOW THAT TOO IS OVER +672-122797-0066-1595: WHY ONE MORNING THERE CAME A QUANTITY OF PEOPLE AND SET TO WORK IN THE LOFT +672-122797-0067-1596: THE TRUNKS WERE MOVED THE TREE WAS PULLED OUT AND THROWN RATHER HARD IT IS TRUE DOWN ON THE FLOOR BUT A MAN DREW HIM TOWARDS THE STAIRS WHERE THE DAYLIGHT SHONE +672-122797-0068-1597: BUT IT WAS NOT THE FIR TREE THAT THEY MEANT +672-122797-0069-1598: IT WAS IN A CORNER THAT HE LAY AMONG WEEDS AND NETTLES +672-122797-0070-1599: THE GOLDEN STAR OF TINSEL WAS STILL ON THE TOP OF THE TREE AND GLITTERED IN THE SUNSHINE +672-122797-0071-1600: IN THE (COURT YARD->COURTYARD) SOME OF THE (MERRY->MARRIED) CHILDREN WERE PLAYING WHO HAD DANCED AT CHRISTMAS ROUND THE FIR TREE AND WERE SO GLAD AT THE SIGHT OF HIM +672-122797-0072-1601: AND THE GARDENER'S BOY CHOPPED THE TREE INTO SMALL PIECES THERE WAS A WHOLE HEAP LYING THERE +672-122797-0073-1602: THE WOOD FLAMED UP SPLENDIDLY UNDER THE LARGE BREWING COPPER AND (IT SIGHED->ITS SIDE) SO DEEPLY +672-122797-0074-1603: HOWEVER THAT WAS OVER NOW THE TREE GONE THE STORY AT AN END +6829-68769-0000-1858: KENNETH AND BETH REFRAINED FROM TELLING THE OTHER GIRLS OR UNCLE JOHN OF OLD WILL (ROGERS'S->ROGERS) VISIT BUT THEY GOT MISTER WATSON IN THE LIBRARY AND QUESTIONED HIM CLOSELY ABOUT THE PENALTY FOR FORGING A CHECK +6829-68769-0001-1859: IT WAS A SERIOUS CRIME INDEED MISTER WATSON TOLD THEM AND TOM GATES BADE FAIR TO SERVE A LENGTHY TERM IN (*->THE) STATE'S PRISON AS A CONSEQUENCE OF HIS RASH ACT +6829-68769-0002-1860: I CAN'T SEE IT IN THAT LIGHT SAID THE OLD LAWYER +6829-68769-0003-1861: IT WAS A DELIBERATE THEFT FROM HIS EMPLOYERS TO PROTECT A GIRL HE LOVED +6829-68769-0004-1862: BUT THEY COULD NOT HAVE PROVEN A CASE AGAINST LUCY IF SHE WAS INNOCENT AND ALL THEIR THREATS OF ARRESTING HER WERE PROBABLY (*->A) MERE BLUFF +6829-68769-0005-1863: HE WAS (*->A) SOFT HEARTED AND IMPETUOUS SAID BETH AND BEING IN LOVE HE DIDN'T STOP TO COUNT THE COST +6829-68769-0006-1864: IF THE PROSECUTION WERE WITHDRAWN AND THE CASE SETTLED WITH THE VICTIM OF THE FORGED CHECK THEN THE YOUNG MAN WOULD BE ALLOWED HIS FREEDOM +6829-68769-0007-1865: BUT UNDER THE CIRCUMSTANCES I DOUBT (IF->OF) SUCH AN ARRANGEMENT COULD BE MADE +6829-68769-0008-1866: (FAIRVIEW WAS->FAIR VIEWS) TWELVE MILES AWAY BUT BY TEN O'CLOCK THEY DREW UP AT THE COUNTY (JAIL->TRAIL) +6829-68769-0009-1867: THEY WERE RECEIVED IN THE LITTLE OFFICE BY A MAN NAMED MARKHAM WHO WAS THE JAILER +6829-68769-0010-1868: WE WISH TO TALK WITH HIM ANSWERED KENNETH TALK +6829-68769-0011-1869: I'M RUNNING FOR REPRESENTATIVE ON THE REPUBLICAN TICKET SAID KENNETH QUIETLY +6829-68769-0012-1870: (OH->I'LL) SAY THAT'S DIFFERENT OBSERVED MARKHAM ALTERING HIS DEMEANOR +6829-68769-0013-1871: (MAY WE SEE->MAYBE SEA) GATES AT ONCE ASKED KENNETH +6829-68769-0014-1872: THEY FOLLOWED THE JAILER ALONG A SUCCESSION OF PASSAGES +6829-68769-0015-1873: SOMETIMES (I'M->ON) THAT (YEARNING->YEARNIN) FOR A SMOKE I'M NEARLY CRAZY (AN->AND) I DUNNO WHICH IS (WORST->WORSE) DYIN ONE WAY OR (ANOTHER->THE OTHER) +6829-68769-0016-1874: HE UNLOCKED THE DOOR AND CALLED HERE'S VISITORS TOM +6829-68769-0017-1875: WORSE TOM WORSE (N->THAN) EVER REPLIED THE JAILER GLOOMILY +6829-68769-0018-1876: (MISS DE->MISTER) GRAF SAID KENNETH NOTICING THE BOY'S FACE CRITICALLY AS HE STOOD WHERE THE LIGHT FROM THE PASSAGE FELL UPON IT +6829-68769-0019-1877: SORRY WE HAVEN'T ANY RECEPTION ROOM IN THE JAIL +6829-68769-0020-1878: SIT DOWN PLEASE SAID GATES IN A CHEERFUL AND PLEASANT VOICE THERE'S A BENCH HERE +6829-68769-0021-1879: A FRESH WHOLESOME LOOKING BOY WAS TOM GATES (WITH->WHOSE) STEADY GRAY EYES AN INTELLIGENT FOREHEAD BUT A SENSITIVE RATHER WEAK MOUTH +6829-68769-0022-1880: WE HAVE HEARD SOMETHING OF YOUR STORY SAID KENNETH AND ARE INTERESTED IN IT +6829-68769-0023-1881: I DIDN'T STOP TO THINK WHETHER IT WAS FOOLISH OR NOT I DID IT AND I'M GLAD I DID (*->IT) +6829-68769-0024-1882: OLD WILL IS A FINE FELLOW BUT POOR AND HELPLESS SINCE MISSUS ROGERS HAD HER ACCIDENT +6829-68769-0025-1883: THEN ROGERS WOULDN'T DO ANYTHING BUT LEAD HER AROUND AND WAIT UPON HER AND THE PLACE WENT TO RACK AND RUIN +6829-68769-0026-1884: HE SPOKE SIMPLY BUT PACED UP AND DOWN THE NARROW CELL IN FRONT OF THEM +6829-68769-0027-1885: WHOSE NAME DID YOU SIGN TO THE CHECK ASKED KENNETH +6829-68769-0028-1886: HE IS SUPPOSED TO SIGN ALL THE CHECKS OF THE CONCERN +6829-68769-0029-1887: IT'S A STOCK COMPANY (AND->IN) RICH +6829-68769-0030-1888: I WAS (BOOKKEEPER->BIT KEEPER) SO IT WAS EASY TO GET A BLANK CHECK AND FORGE THE SIGNATURE +6829-68769-0031-1889: AS REGARDS MY ROBBING THE COMPANY I'LL SAY THAT I SAVED (THEM->HIM) A HEAVY (LOSS->LOST) ONE DAY +6829-68769-0032-1890: I DISCOVERED AND PUT OUT A FIRE THAT WOULD HAVE DESTROYED THE WHOLE PLANT BUT (MARSHALL->MARTIAL) NEVER EVEN THANKED ME +6829-68769-0033-1891: IT WAS BETTER FOR HIM TO THINK THE GIRL UNFEELING THAN TO KNOW THE TRUTH +6829-68769-0034-1892: I'M GOING TO SEE MISTER (MARSHALL->MARSHAL) SAID KENNETH AND DISCOVER WHAT I CAN DO TO ASSIST YOU THANK YOU SIR +6829-68769-0035-1893: IT WON'T BE MUCH BUT I'M GRATEFUL TO FIND A FRIEND +6829-68769-0036-1894: THEY LEFT HIM THEN FOR THE JAILER ARRIVED TO UNLOCK THE DOOR AND ESCORT THEM TO THE OFFICE +6829-68769-0037-1895: I'VE SEEN LOTS OF THAT KIND IN MY DAY +6829-68769-0038-1896: AND IT RUINS A MAN'S DISPOSITION +6829-68769-0039-1897: HE LOOKED UP RATHER UNGRACIOUSLY BUT MOTIONED THEM TO BE SEATED +6829-68769-0040-1898: SOME GIRL HAS BEEN (*->IN) HERE TWICE TO INTERVIEW MY MEN AND I HAVE REFUSED TO ADMIT HER +6829-68769-0041-1899: I'M NOT ELECTIONEERING JUST NOW +6829-68769-0042-1900: OH WELL SIR WHAT ABOUT (HIM->EM) +6829-68769-0043-1901: AND HE DESERVES A TERM IN STATE'S PRISON +6829-68769-0044-1902: IT HAS COST ME TWICE SIXTY DOLLARS IN ANNOYANCE +6829-68769-0045-1903: I'LL PAY ALL THE (COSTS->COST) BESIDES +6829-68769-0046-1904: YOU'RE FOOLISH WHY SHOULD YOU DO ALL THIS +6829-68769-0047-1905: I HAVE MY OWN REASONS MISTER MARSHALL +6829-68769-0048-1906: GIVE ME A CHECK FOR A HUNDRED AND FIFTY AND I'LL TURN OVER TO YOU THE FORGED CHECK AND (QUASH->CASH) FURTHER PROCEEDINGS +6829-68769-0049-1907: HE DETESTED THE GRASPING DISPOSITION THAT WOULD ENDEAVOR TO TAKE ADVANTAGE OF HIS EVIDENT DESIRE TO HELP YOUNG GATES +6829-68769-0050-1908: BETH UNEASY AT (HIS->A) SILENCE NUDGED HIM +6829-68769-0051-1909: THERE WAS A GRIM SMILE OF AMUSEMENT ON HIS SHREWD FACE +6829-68769-0052-1910: HE MIGHT HAVE HAD THAT FORGED CHECK FOR THE FACE OF IT IF HE'D BEEN SHARP +6829-68769-0053-1911: AND TO THINK WE CAN SAVE ALL THAT MISERY AND DESPAIR BY THE PAYMENT OF A HUNDRED AND FIFTY DOLLARS +6829-68771-0000-1912: SO TO THE SURPRISE OF THE DEMOCRATIC COMMITTEE AND ALL HIS FRIENDS MISTER HOPKINS ANNOUNCED THAT HE WOULD OPPOSE (FORBES'S->FORCE) AGGRESSIVE CAMPAIGN WITH AN EQUAL AGGRESSIVENESS AND SPEND AS MANY DOLLARS IN DOING SO AS MIGHT BE NECESSARY +6829-68771-0001-1913: ONE OF MISTER (HOPKINS'S->HOPKINS) FIRST TASKS AFTER CALLING HIS FAITHFUL (HENCHMEN->HENCHMAN) AROUND HIM WAS TO MAKE A CAREFUL CANVASS OF THE VOTERS OF HIS DISTRICT TO SEE WHAT WAS STILL TO BE ACCOMPLISHED +6829-68771-0002-1914: THE WEAK (KNEED->NEED) CONTINGENCY MUST BE STRENGTHENED AND FORTIFIED AND A COUPLE OF HUNDRED VOTES IN ONE WAY OR (ANOTHER->THE OTHER) SECURED FROM THE OPPOSITION +6829-68771-0003-1915: THE DEMOCRATIC COMMITTEE FIGURED OUT A WAY TO DO THIS +6829-68771-0004-1916: UNDER ORDINARY CONDITIONS REYNOLDS WAS SURE TO BE ELECTED BUT THE COMMITTEE PROPOSED TO SACRIFICE HIM IN ORDER TO ELECT HOPKINS +6829-68771-0005-1917: THE ONLY THING NECESSARY WAS TO FIX SETH REYNOLDS AND THIS HOPKINS ARRANGED PERSONALLY +6829-68771-0006-1918: AND THIS WAS WHY KENNETH AND BETH DISCOVERED HIM CONVERSING WITH THE YOUNG WOMAN IN THE BUGGY +6829-68771-0007-1919: THE DESCRIPTION SHE GAVE OF THE COMING RECEPTION TO THE (WOMAN'S->WOMEN'S) POLITICAL LEAGUE WAS SO HUMOROUS AND DIVERTING THAT THEY WERE BOTH LAUGHING HEARTILY OVER THE THING WHEN THE YOUNG PEOPLE PASSED THEM AND THUS MISTER HOPKINS FAILED TO NOTICE WHO THE (OCCUPANTS->OCCUPANT) OF THE OTHER VEHICLE WERE +6829-68771-0008-1920: THESE WOMEN WERE FLATTERED BY THE ATTENTION OF THE YOUNG LADY AND HAD PROMISED TO ASSIST IN ELECTING MISTER FORBES +6829-68771-0009-1921: LOUISE HOPED FOR EXCELLENT RESULTS FROM THIS ORGANIZATION AND WISHED THE ENTERTAINMENT TO BE SO EFFECTIVE IN WINNING THEIR GOOD WILL THAT THEY WOULD WORK EARNESTLY FOR THE CAUSE IN WHICH THEY WERE ENLISTED +6829-68771-0010-1922: THE (FAIRVIEW->FAIR VIEW) BAND WAS ENGAGED TO DISCOURSE AS MUCH HARMONY AS IT COULD PRODUCE AND THE RESOURCES OF THE GREAT HOUSE WERE TAXED TO ENTERTAIN THE GUESTS +6829-68771-0011-1923: TABLES WERE SPREAD ON THE LAWN AND A DAINTY BUT SUBSTANTIAL REPAST WAS TO BE SERVED +6829-68771-0012-1924: THIS WAS THE FIRST OCCASION WITHIN A GENERATION WHEN SUCH AN ENTERTAINMENT HAD BEEN GIVEN AT ELMHURST AND THE ONLY ONE WITHIN THE MEMORY OF MAN (WHERE->WERE) THE NEIGHBORS AND COUNTRY PEOPLE HAD BEEN (*->THE) INVITED GUESTS +6829-68771-0013-1925: THE (ATTENDANCE->ATTENDANTS) WAS UNEXPECTEDLY LARGE AND THE GIRLS WERE DELIGHTED (FORESEEING->FOR SEEING) GREAT SUCCESS FOR THEIR (FETE->FIGHT) +6829-68771-0014-1926: WE OUGHT TO HAVE MORE (ATTENDANTS->ATTENDANCE) BETH SAID LOUISE APPROACHING HER COUSIN +6829-68771-0015-1927: WON'T YOU RUN INTO THE HOUSE AND SEE IF MARTHA CAN'T SPARE ONE OR TWO MORE (MAIDS->MATES) +6829-68771-0016-1928: SHE WAS VERY FOND OF THE YOUNG LADIES WHOM SHE HAD KNOWN WHEN AUNT JANE WAS (THE->THEIR) MISTRESS HERE AND BETH WAS HER ESPECIAL FAVORITE +6829-68771-0017-1929: THE HOUSEKEEPER LED THE WAY (AND->IN) BETH FOLLOWED +6829-68771-0018-1930: FOR A MOMENT BETH STOOD STARING WHILE THE NEW MAID REGARDED HER WITH COMPOSURE AND A SLIGHT SMILE UPON HER BEAUTIFUL FACE +6829-68771-0019-1931: SHE WAS DRESSED IN THE REGULATION COSTUME OF THE MAIDS AT ELMHURST A (PLAIN->PLAYING) BLACK GOWN WITH (*->A) WHITE APRON AND CAP +6829-68771-0020-1932: THEN SHE GAVE A LITTLE LAUGH AND REPLIED NO MISS BETH I'M ELIZABETH (PARSONS->PARSON'S) +6829-68771-0021-1933: BUT IT CAN'T BE PROTESTED THE GIRL +6829-68771-0022-1934: I ATTEND TO THE HOUSEHOLD MENDING YOU KNOW AND CARE FOR THE LINEN +6829-68771-0023-1935: YOU SPEAK LIKE AN EDUCATED PERSON SAID BETH WONDERINGLY WHERE IS YOUR HOME +6829-68771-0024-1936: FOR THE FIRST TIME THE MAID SEEMED A LITTLE CONFUSED AND HER GAZE WANDERED FROM THE FACE OF HER VISITOR +6829-68771-0025-1937: SHE SAT DOWN IN A ROCKING CHAIR AND CLASPING HER HANDS IN HER LAP (ROCKED->ROCK) SLOWLY BACK AND FORTH I'M SORRY SAID BETH +6829-68771-0026-1938: ELIZA (PARSONS->PARSON) SHOOK HER HEAD +6829-68771-0027-1939: (THEY->FATE) THEY EXCITE ME IN SOME WAY AND I I CAN'T BEAR THEM YOU MUST EXCUSE ME +6829-68771-0028-1940: SHE EVEN SEEMED MILDLY AMUSED AT THE ATTENTION SHE ATTRACTED +6829-68771-0029-1941: BETH WAS A BEAUTIFUL GIRL THE HANDSOMEST OF THE THREE COUSINS BY FAR YET ELIZA SURPASSED HER (IN->A) NATURAL CHARM AND SEEMED WELL AWARE OF THE FACT +6829-68771-0030-1942: HER MANNER WAS NEITHER INDEPENDENT NOR ASSERTIVE BUT RATHER ONE OF WELL BRED COMPOSURE AND CALM RELIANCE +6829-68771-0031-1943: HER EYES WANDERED TO THE MAID'S HANDS +6829-68771-0032-1944: HOWEVER HER FEATURES (AND FORM->INFORM) MIGHT REPRESS ANY EVIDENCE OF NERVOUSNESS THESE HANDS TOLD A DIFFERENT STORY +6829-68771-0033-1945: SHE ROSE QUICKLY TO HER FEET WITH AN IMPETUOUS GESTURE THAT MADE HER VISITOR CATCH HER BREATH +6829-68771-0034-1946: I WISH I KNEW MYSELF SHE CRIED FIERCELY +6829-68771-0035-1947: WILL YOU LEAVE ME ALONE IN MY OWN ROOM OR MUST I GO AWAY TO ESCAPE YOU +6829-68771-0036-1948: ELIZA CLOSED THE DOOR BEHIND HER WITH A DECIDED SLAM AND A KEY CLICKED IN THE LOCK +6930-75918-0000-0: CONCORD RETURNED TO ITS PLACE AMIDST THE TENTS +6930-75918-0001-1: THE ENGLISH (FORWARDED->FOOTED) TO THE FRENCH BASKETS OF FLOWERS OF WHICH THEY HAD MADE A PLENTIFUL PROVISION TO GREET THE ARRIVAL OF THE YOUNG PRINCESS THE FRENCH IN RETURN INVITED THE ENGLISH TO A SUPPER WHICH WAS TO BE GIVEN THE NEXT DAY +6930-75918-0002-2: CONGRATULATIONS WERE POURED IN UPON THE PRINCESS EVERYWHERE DURING HER JOURNEY +6930-75918-0003-3: FROM THE RESPECT PAID HER ON ALL SIDES SHE SEEMED LIKE A QUEEN AND FROM THE ADORATION WITH WHICH SHE WAS TREATED BY TWO OR THREE SHE APPEARED AN OBJECT OF WORSHIP THE QUEEN MOTHER GAVE THE FRENCH THE MOST AFFECTIONATE RECEPTION FRANCE WAS HER NATIVE COUNTRY AND SHE HAD SUFFERED TOO MUCH UNHAPPINESS IN ENGLAND FOR ENGLAND TO HAVE MADE HER FORGET FRANCE +6930-75918-0004-4: SHE TAUGHT HER DAUGHTER THEN BY HER OWN AFFECTION FOR IT THAT LOVE FOR A COUNTRY WHERE THEY HAD BOTH BEEN HOSPITABLY RECEIVED AND WHERE A BRILLIANT FUTURE OPENED (BEFORE->FOR) THEM +6930-75918-0005-5: THE COUNT HAD THROWN HIMSELF BACK ON HIS SEAT LEANING HIS SHOULDERS AGAINST THE PARTITION OF THE TENT AND REMAINED THUS HIS FACE BURIED IN HIS HANDS WITH HEAVING CHEST AND RESTLESS LIMBS +6930-75918-0006-6: THIS HAS INDEED BEEN A HARASSING DAY CONTINUED THE YOUNG MAN HIS EYES FIXED UPON HIS FRIEND +6930-75918-0007-7: YOU WILL BE FRANK WITH ME I ALWAYS AM +6930-75918-0008-8: CAN YOU IMAGINE (WHY->MY) BUCKINGHAM HAS BEEN SO VIOLENT I SUSPECT +6930-75918-0009-9: IT IS YOU WHO ARE MISTAKEN RAOUL I HAVE READ HIS DISTRESS IN HIS EYES IN HIS EVERY GESTURE AND ACTION THE WHOLE DAY +6930-75918-0010-10: I CAN PERCEIVE LOVE CLEARLY ENOUGH +6930-75918-0011-11: I AM CONVINCED OF WHAT I SAY SAID THE COUNT +6930-75918-0012-12: IT IS ANNOYANCE THEN +6930-75918-0013-13: IN THOSE VERY TERMS I EVEN ADDED MORE +6930-75918-0014-14: BUT CONTINUED RAOUL NOT INTERRUPTED BY THIS MOVEMENT OF HIS FRIEND HEAVEN BE PRAISED THE FRENCH WHO ARE PRONOUNCED TO BE THOUGHTLESS AND INDISCREET RECKLESS EVEN ARE CAPABLE OF BRINGING A CALM AND SOUND JUDGMENT TO (BEAR ON->BARON) MATTERS OF SUCH HIGH IMPORTANCE +6930-75918-0015-15: THUS IT IS THAT THE HONOR OF THREE IS SAVED OUR (COUNTRY'S->COUNTRY) OUR (MASTER'S->MASTERS) AND OUR OWN +6930-75918-0016-16: YES I NEED REPOSE MANY THINGS HAVE AGITATED ME TO DAY BOTH IN MIND AND BODY WHEN YOU RETURN TO MORROW I SHALL NO LONGER BE THE SAME MAN +6930-75918-0017-17: BUT IN THIS FRIENDLY PRESSURE RAOUL COULD DETECT THE NERVOUS AGITATION OF A GREAT INTERNAL CONFLICT +6930-75918-0018-18: THE NIGHT WAS CLEAR STARLIT AND SPLENDID THE TEMPEST HAD PASSED AWAY AND THE SWEET INFLUENCES OF THE EVENING HAD RESTORED LIFE PEACE AND SECURITY EVERYWHERE +6930-75918-0019-19: UPON THE LARGE SQUARE IN FRONT OF THE HOTEL THE SHADOWS OF THE TENTS INTERSECTED BY THE GOLDEN MOONBEAMS FORMED AS IT WERE A HUGE MOSAIC OF JET AND YELLOW FLAGSTONES +6930-75918-0020-20: (BRAGELONNE->BRAGGLIN) WATCHED FOR SOME TIME THE CONDUCT OF THE TWO LOVERS LISTENED TO THE LOUD AND UNCIVIL SLUMBERS OF MANICAMP WHO SNORED AS IMPERIOUSLY AS THOUGH HE WAS WEARING HIS BLUE AND GOLD INSTEAD OF HIS VIOLET SUIT +6930-76324-0000-21: GOLIATH MAKES ANOTHER DISCOVERY +6930-76324-0001-22: (THEY->THERE) WERE CERTAINLY NO (NEARER->NEAR) THE SOLUTION OF THEIR PROBLEM +6930-76324-0002-23: THE POOR LITTLE THINGS CRIED CYNTHIA THINK OF THEM HAVING BEEN TURNED TO THE WALL ALL THESE YEARS +6930-76324-0003-24: NOW WHAT (WAS->IS) THE SENSE OF IT (TWO->TOO) INNOCENT BABIES LIKE THAT +6930-76324-0004-25: BUT JOYCE HAD NOT BEEN LISTENING ALL AT ONCE SHE PUT DOWN HER CANDLE ON THE TABLE AND FACED HER COMPANION +6930-76324-0005-26: THE TWIN BROTHER DID SOMETHING SHE DIDN'T LIKE AND SHE TURNED HIS PICTURE TO THE WALL +6930-76324-0006-27: HERS HAPPENED TO BE (IN->ON) THE SAME FRAME TOO BUT SHE EVIDENTLY DIDN'T CARE ABOUT (THAT->IT) +6930-76324-0007-28: NOW WHAT HAVE YOU TO SAY (CYNTHIA SPRAGUE->CYNTHIA'S BROGG) +6930-76324-0008-29: I THOUGHT WE WERE STUMPED AGAIN WHEN I FIRST SAW THAT PICTURE BUT (IT'S BEEN->IT SPIN) OF SOME USE AFTER ALL +6930-76324-0009-30: DO YOU SUPPOSE THE MINIATURE WAS A COPY OF THE SAME THING +6930-76324-0010-31: (WHAT->ONE) IN THE WORLD IS (THAT->IT) QUERIED JOYCE +6930-76324-0011-32: (THEY->MAY) WORRY ME TERRIBLY (AND BESIDES->EMBICIDES) I'D LIKE TO SEE WHAT THIS LOVELY FURNITURE LOOKS LIKE WITHOUT SUCH QUANTITIES OF DUST ALL OVER IT GOOD SCHEME (CYN->SIN) +6930-76324-0012-33: (WE'LL->OR) COME IN HERE THIS AFTERNOON WITH OLD CLOTHES ON AND (HAVE->HALF) A REGULAR HOUSE CLEANING +6930-76324-0013-34: (IT->YOU) CAN'T HURT ANYTHING I'M SURE FOR WE WON'T DISTURB THINGS AT ALL +6930-76324-0014-35: THIS THOUGHT HOWEVER DID NOT ENTER THE HEADS OF THE ENTHUSIASTIC PAIR +6930-76324-0015-36: SMUGGLING (*->IN) THE HOUSE CLEANING (PARAPHERNALIA->PAIR OF ANALIA) INTO THE CELLAR WINDOW UNOBSERVED THAT AFTERNOON PROVED NO EASY TASK FOR CYNTHIA HAD ADDED A WHISK BROOM AND DUST PAN TO THE OUTFIT +6930-76324-0016-37: THE LURE PROVED TOO MUCH FOR HIM AND HE CAME SPORTING AFTER IT AS (FRISKILY->FRISKLY) AS A YOUNG KITTEN MUCH TO CYNTHIA'S DELIGHT WHEN SHE CAUGHT SIGHT OF HIM +6930-76324-0017-38: OH LET HIM COME ALONG SHE URGED I DO LOVE TO SEE HIM ABOUT THAT OLD HOUSE +6930-76324-0018-39: HE MAKES IT SORT OF (COZIER->COSIER) +6930-76324-0019-40: NOW (LET'S->ITS) DUST THE FURNITURE AND PICTURES +6930-76324-0020-41: YET LITTLE AS IT WAS IT HAD ALREADY MADE A VAST DIFFERENCE IN THE ASPECT OF THE ROOM +6930-76324-0021-42: SURFACE (DUST->DUS) AT LEAST HAD BEEN REMOVED AND THE FINE OLD FURNITURE GAVE A HINT OF ITS REAL ELEGANCE AND POLISH +6930-76324-0022-43: THEN SHE SUDDENLY REMARKED +6930-76324-0023-44: AND MY POCKET MONEY IS GETTING LOW AGAIN AND YOU HAVEN'T ANY LEFT AS USUAL +6930-76324-0024-45: THEY SAY ILLUMINATION BY CANDLE LIGHT IS THE PRETTIEST IN THE WORLD +6930-76324-0025-46: WHY (IT'S->IT) GOLIATH AS USUAL THEY BOTH CRIED PEERING IN +6930-76324-0026-47: ISN'T HE THE GREATEST FOR GETTING INTO ODD CORNERS +6930-76324-0027-48: FORGETTING ALL THEIR WEARINESS THEY SEIZED THEIR CANDLES AND SCURRIED THROUGH THE HOUSE FINDING (AN->ON) OCCASIONAL PAPER TUCKED AWAY IN SOME ODD CORNER +6930-76324-0028-49: WELL I'M CONVINCED THAT THE BOARDED UP HOUSE MYSTERY HAPPENED NOT EARLIER THAN APRIL SIXTEENTH EIGHTEEN SIXTY ONE AND PROBABLY NOT MUCH LATER +6930-81414-0000-50: NO WORDS WERE SPOKEN NO LANGUAGE WAS UTTERED SAVE THAT OF WAILING AND HISSING AND THAT SOMEHOW WAS INDISTINCT AS IF IT EXISTED IN FANCY AND NOT IN REALITY +6930-81414-0001-51: I HEARD A NOISE BEHIND I TURNED AND SAW (KAFFAR->KAFFIR) HIS BLACK EYES SHINING WHILE IN HIS HAND HE HELD A GLEAMING KNIFE HE LIFTED IT ABOVE HIS HEAD AS IF TO STRIKE BUT I HAD THE STRENGTH OF TEN MEN AND I HURLED HIM FROM ME +6930-81414-0002-52: ONWARD SAID A DISTANT VOICE +6930-81414-0003-53: NO SOUND BROKE THE STILLNESS OF THE NIGHT +6930-81414-0004-54: THE STORY OF ITS EVIL INFLUENCE CAME BACK TO ME AND IN MY BEWILDERED CONDITION I WONDERED WHETHER THERE WAS NOT SOME TRUTH IN WHAT HAD BEEN SAID +6930-81414-0005-55: WHAT WAS THAT +6930-81414-0006-56: WHAT THEN A HUMAN HAND LARGE AND (SHAPELY->SHABBLY) APPEARED DISTINCTLY ON THE SURFACE OF THE POND +6930-81414-0007-57: NOTHING MORE NOT EVEN THE WRIST TO WHICH IT MIGHT BE ATTACHED +6930-81414-0008-58: IT DID NOT BECKON OR INDEED MOVE AT ALL IT WAS AS STILL AS THE HAND OF DEATH +6930-81414-0009-59: I AWOKE TO CONSCIOUSNESS FIGHTING AT FIRST IT SEEMED AS IF I WAS FIGHTING WITH (A->THE) PHANTOM BUT GRADUALLY MY OPPONENT BECAME MORE REAL TO ME IT WAS (KAFFAR->KAFFIR) +6930-81414-0010-60: A SOUND OF VOICES A FLASH OF LIGHT +6930-81414-0011-61: A FEELING OF FREEDOM AND I WAS AWAKE WHERE +6930-81414-0012-62: SAID ANOTHER VOICE WHICH I RECOGNIZED AS VOLTAIRE'S (KAFFAR->KAFFIR) +6930-81414-0013-63: I HAD SCARCELY KNOWN (WHAT->WHEN) I HAD BEEN SAYING OR DOING UP TO THIS TIME BUT AS HE SPOKE I LOOKED AT MY HAND +6930-81414-0014-64: IN THE LIGHT OF THE MOON I SAW A KNIFE RED WITH BLOOD AND MY HAND TOO WAS ALSO (DISCOLOURED->DISCOLORED) +6930-81414-0015-65: I DO NOT KNOW I AM DAZED BEWILDERED +6930-81414-0016-66: BUT THAT IS (KAFFAR'S->KAFFIR'S) KNIFE +6930-81414-0017-67: I KNOW HE HAD IT THIS VERY EVENING +6930-81414-0018-68: I (REMEMBER->REMEMBERED) SAYING HAVE WE BEEN TOGETHER +6930-81414-0019-69: (VOLTAIRE->OLD CHAIR) PICKED UP SOMETHING FROM THE GROUND AND LOOKED AT IT +6930-81414-0020-70: I SAY YOU DO KNOW WHAT THIS MEANS AND YOU MUST TELL US +6930-81414-0021-71: A TERRIBLE THOUGHT FLASHED INTO MY MIND +6930-81414-0022-72: I HAD AGAIN BEEN ACTING UNDER THE INFLUENCE OF THIS MAN'S POWER +6930-81414-0023-73: PERCHANCE TOO (KAFFAR'S->KAFFIR'S) DEATH MIGHT SERVE HIM IN GOOD STEAD +6930-81414-0024-74: MY TONGUE REFUSED TO ARTICULATE MY POWER OF SPEECH (LEFT->LAUGHED) ME +6930-81414-0025-75: MY POSITION WAS TOO TERRIBLE +6930-81414-0026-76: MY OVERWROUGHT NERVES YIELDED AT LAST +6930-81414-0027-77: FOR SOME TIME AFTER THAT I REMEMBERED NOTHING DISTINCTLY +7021-79730-0000-1399: THE THREE MODES OF MANAGEMENT +7021-79730-0001-1400: TO SUPPOSE THAT THE OBJECT OF THIS WORK IS TO AID IN EFFECTING SUCH A SUBSTITUTION AS THAT IS ENTIRELY TO MISTAKE ITS NATURE AND DESIGN +7021-79730-0002-1401: BY REASON AND AFFECTION +7021-79730-0003-1402: AS THE (CHAISE->CHASE) DRIVES AWAY MARY STANDS BEWILDERED AND PERPLEXED ON THE (DOOR STEP->DOORSTEP) HER MIND IN A TUMULT OF EXCITEMENT IN WHICH HATRED OF THE DOCTOR DISTRUST AND SUSPICION OF HER MOTHER DISAPPOINTMENT VEXATION AND ILL (HUMOR->HUMOUR) SURGE AND SWELL AMONG THOSE DELICATE ORGANIZATIONS ON WHICH THE STRUCTURE AND DEVELOPMENT OF THE SOUL SO CLOSELY DEPEND DOING PERHAPS AN IRREPARABLE INJURY +7021-79730-0004-1403: THE MOTHER AS SOON AS THE (CHAISE->CHASE) IS SO FAR TURNED THAT MARY CAN NO LONGER WATCH THE EXPRESSION OF HER COUNTENANCE GOES AWAY FROM THE DOOR WITH A SMILE OF COMPLACENCY AND SATISFACTION (UPON->ON) HER FACE AT THE INGENUITY AND SUCCESS OF HER LITTLE ARTIFICE +7021-79730-0005-1404: SO YOU WILL BE A GOOD GIRL I KNOW AND NOT MAKE ANY TROUBLE BUT WILL STAY AT HOME CONTENTEDLY WON'T YOU +7021-79730-0006-1405: THE MOTHER IN MANAGING THE CASE IN THIS WAY (RELIES->REALIZE) PARTLY ON CONVINCING THE REASON OF THE CHILD AND PARTLY ON AN APPEAL TO HER AFFECTION +7021-79730-0007-1406: IF YOU SHOULD NOT BE A GOOD GIRL BUT SHOULD SHOW SIGNS OF MAKING US ANY TROUBLE I SHALL HAVE TO SEND YOU OUT SOMEWHERE TO THE BACK PART OF THE HOUSE UNTIL WE ARE GONE +7021-79730-0008-1407: BUT THIS LAST SUPPOSITION IS ALMOST ALWAYS UNNECESSARY FOR IF MARY HAS BEEN HABITUALLY MANAGED ON THIS PRINCIPLE SHE WILL NOT MAKE ANY TROUBLE +7021-79730-0009-1408: IT IS INDEED TRUE THAT THE IMPORTANCE OF TACT AND SKILL IN THE TRAINING OF THE YOUNG AND OF CULTIVATING THEIR REASON AND SECURING THEIR AFFECTION (CAN NOT->CANNOT) BE OVERRATED +7021-79740-0000-1384: TO SUCH PERSONS THESE INDIRECT MODES OF TRAINING CHILDREN IN HABITS OF SUBORDINATION TO THEIR WILL OR RATHER OF YIELDING TO THEIR INFLUENCE ARE SPECIALLY USEFUL +7021-79740-0001-1385: DELLA HAD A YOUNG SISTER NAMED MARIA AND A COUSIN WHOSE NAME WAS JANE +7021-79740-0002-1386: NOW (DELIA->GELIA) CONTRIVED TO OBTAIN A GREAT INFLUENCE AND (ASCENDENCY->ASCENDANCY) OVER THE MINDS OF THE CHILDREN BY MEANS OF THESE DOLLS +7021-79740-0003-1387: TO GIVE AN IDEA OF THESE CONVERSATIONS I WILL REPORT ONE OF THEM IN FULL +7021-79740-0004-1388: YOU HAVE COME (ANDELLA ANDELLA->AND DELA AND DELLA) WAS THE NAME OF (JANE'S DOLL->JANE STALL) TO MAKE ROSALIE A VISIT +7021-79740-0005-1389: I AM VERY GLAD +7021-79740-0006-1390: I EXPECT YOU HAVE BEEN A VERY GOOD GIRL ANDELLA SINCE YOU WERE HERE LAST +7021-79740-0007-1391: THEN TURNING TO JANE SHE ASKED IN A SOMEWHAT ALTERED TONE HAS SHE BEEN A GOOD GIRL JANE +7021-79740-0008-1392: FOR INSTANCE ONE DAY THE CHILDREN HAD BEEN PLAYING UPON THE PIAZZA WITH BLOCKS AND OTHER PLAYTHINGS AND FINALLY HAD GONE INTO THE HOUSE LEAVING ALL THE THINGS ON THE FLOOR OF THE PIAZZA INSTEAD OF PUTTING THEM AWAY IN THEIR PLACES AS THEY OUGHT TO HAVE DONE +7021-79740-0009-1393: THEY WERE NOW PLAYING WITH THEIR DOLLS IN THE (PARLOR->PARLOUR) +7021-79740-0010-1394: (DELIA->DELHIA) CAME TO THE (PARLOR->PARLOUR) AND WITH AN AIR OF GREAT MYSTERY BECKONED THE CHILDREN ASIDE AND SAID TO THEM IN A WHISPER LEAVE (ANDELLA->AND DELLA) AND ROSALIE HERE AND DON'T SAY A WORD TO THEM +7021-79740-0011-1395: SO SAYING SHE LED THE WAY ON TIPTOE FOLLOWED BY THE CHILDREN OUT OF THE ROOM AND ROUND BY A CIRCUITOUS ROUTE TO THE PIAZZA THERE +7021-79740-0012-1396: SAID SHE POINTING TO THE PLAYTHINGS SEE +7021-79740-0013-1397: PUT THESE PLAYTHINGS ALL AWAY QUICK AND CAREFULLY AND WE WILL NOT LET THEM KNOW (ANY THING->ANYTHING) ABOUT YOUR LEAVING THEM OUT +7021-79740-0014-1398: AND THIS METHOD OF TREATING THE CASE WAS MUCH MORE EFFECTUAL IN MAKING THEM DISPOSED TO AVOID COMMITTING A SIMILAR FAULT ANOTHER TIME THAN ANY DIRECT REBUKES OR EXPRESSIONS OF DISPLEASURE ADDRESSED PERSONALLY TO THEM WOULD HAVE BEEN +7021-79759-0000-1378: NATURE OF THE EFFECT PRODUCED BY EARLY IMPRESSIONS +7021-79759-0001-1379: THAT IS COMPARATIVELY NOTHING +7021-79759-0002-1380: THEY ARE CHIEFLY FORMED FROM COMBINATIONS OF THE IMPRESSIONS MADE IN CHILDHOOD +7021-79759-0003-1381: VAST IMPORTANCE AND INFLUENCE OF THIS MENTAL FURNISHING +7021-79759-0004-1382: WITHOUT GOING TO ANY SUCH EXTREME AS THIS WE CAN EASILY SEE ON REFLECTION HOW VAST AN INFLUENCE ON THE IDEAS AND CONCEPTIONS AS WELL AS ON THE PRINCIPLES OF ACTION (IN->AND) MATURE YEARS MUST BE EXERTED BY THE NATURE AND CHARACTER OF THE IMAGES WHICH THE PERIOD OF INFANCY AND CHILDHOOD (IMPRESSES->IMPRESS) UPON THE MIND +7021-79759-0005-1383: THE PAIN PRODUCED BY AN ACT OF HASTY AND ANGRY VIOLENCE TO WHICH A FATHER SUBJECTS HIS SON MAY SOON PASS AWAY BUT THE MEMORY OF IT DOES NOT PASS AWAY WITH THE PAIN +7021-85628-0000-1409: BUT (ANDERS->ANDREWS) CARED NOTHING ABOUT THAT +7021-85628-0001-1410: HE MADE A BOW SO DEEP THAT HIS BACK CAME NEAR BREAKING AND HE WAS DUMBFOUNDED I CAN TELL YOU WHEN HE SAW IT WAS NOBODY BUT (ANDERS->ANDREW'S) +7021-85628-0002-1411: HE WAS SUCH A BIG BOY THAT HE WORE HIGH BOOTS AND CARRIED A JACK KNIFE +7021-85628-0003-1412: NOW THIS KNIFE WAS A SPLENDID ONE THOUGH HALF THE BLADE WAS GONE AND THE HANDLE WAS A LITTLE CRACKED AND (ANDERS->ANDREWS) KNEW THAT ONE IS ALMOST A MAN AS SOON AS ONE HAS A (JACK KNIFE->JACKKNIFE) +7021-85628-0004-1413: YES WHY NOT THOUGHT ANDERS +7021-85628-0005-1414: SEEING THAT I AM SO FINE I MAY AS WELL GO AND VISIT THE KING +7021-85628-0006-1415: I AM GOING TO THE COURT BALL ANSWERED (ANDERS->ANDREWS) +7021-85628-0007-1416: AND SHE TOOK (ANDERS->ANDER'S) HAND AND WALKED WITH HIM UP THE BROAD MARBLE STAIRS WHERE SOLDIERS WERE POSTED AT EVERY THIRD STEP AND THROUGH THE MAGNIFICENT HALLS WHERE COURTIERS IN SILK AND VELVET STOOD BOWING WHEREVER HE WENT +7021-85628-0008-1417: FOR LIKE AS NOT THEY MUST HAVE THOUGHT HIM A PRINCE WHEN THEY SAW HIS FINE CAP +7021-85628-0009-1418: AT THE FARTHER END OF THE LARGEST HALL A TABLE WAS SET WITH GOLDEN CUPS AND GOLDEN PLATES IN LONG ROWS +7021-85628-0010-1419: ON HUGE SILVER PLATTERS WERE PYRAMIDS OF TARTS AND CAKES AND RED WINE SPARKLED IN GLITTERING DECANTERS +7021-85628-0011-1420: THE PRINCESS SAT DOWN UNDER A BLUE CANOPY WITH BOUQUETS OF ROSES AND SHE LET (ANDERS->ANDREW) SIT IN A GOLDEN CHAIR BY HER SIDE +7021-85628-0012-1421: BUT YOU MUST NOT EAT WITH YOUR CAP ON YOUR HEAD SHE SAID AND WAS GOING TO TAKE IT OFF +7021-85628-0013-1422: THE PRINCESS CERTAINLY WAS BEAUTIFUL AND HE WOULD HAVE DEARLY LIKED TO BE KISSED BY HER BUT THE CAP WHICH HIS MOTHER HAD MADE HE WOULD NOT GIVE UP ON ANY CONDITION +7021-85628-0014-1423: HE ONLY SHOOK HIS HEAD +7021-85628-0015-1424: WELL BUT NOW SAID THE PRINCESS AND SHE FILLED HIS POCKETS WITH CAKES AND PUT HER OWN HEAVY GOLD CHAIN AROUND HIS NECK AND BENT DOWN AND KISSED HIM +7021-85628-0016-1425: THAT IS A VERY FINE CAP YOU HAVE HE SAID +7021-85628-0017-1426: SO IT IS SAID (ANDERS->ANDREWS) +7021-85628-0018-1427: AND IT IS MADE OF MOTHER'S BEST YARN AND SHE KNITTED IT HERSELF AND EVERYBODY WANTS TO GET IT AWAY FROM ME +7021-85628-0019-1428: WITH ONE JUMP (ANDERS->ANDREWS) GOT OUT OF HIS CHAIR +7021-85628-0020-1429: HE DARTED LIKE AN ARROW THROUGH ALL THE HALLS DOWN ALL THE STAIRS AND ACROSS THE YARD +7021-85628-0021-1430: HE STILL HELD ON TO IT WITH BOTH HANDS AS HE RUSHED INTO HIS MOTHER'S COTTAGE +7021-85628-0022-1431: AND ALL HIS BROTHERS AND SISTERS STOOD ROUND AND LISTENED WITH THEIR MOUTHS OPEN +7021-85628-0023-1432: BUT WHEN HIS BIG BROTHER HEARD THAT HE HAD REFUSED TO GIVE HIS CAP FOR A KING'S GOLDEN CROWN HE SAID THAT ANDERS WAS A STUPID +7021-85628-0024-1433: (ANDERS->ANDREW'S) FACE GREW RED +7021-85628-0025-1434: BUT HIS MOTHER HUGGED HIM CLOSE +7021-85628-0026-1435: NO MY LITTLE (SON->FUN) SHE SAID +7021-85628-0027-1436: IF YOU DRESSED IN SILK AND GOLD FROM TOP TO TOE YOU COULD NOT LOOK ANY NICER THAN IN YOUR LITTLE RED CAP +7127-75946-0000-467: AT THE CONCLUSION OF THE BANQUET WHICH WAS SERVED AT FIVE O'CLOCK THE KING ENTERED HIS CABINET WHERE HIS TAILORS WERE AWAITING HIM FOR THE PURPOSE OF TRYING ON THE CELEBRATED COSTUME REPRESENTING SPRING WHICH WAS THE RESULT OF SO MUCH IMAGINATION AND HAD COST SO MANY EFFORTS OF THOUGHT TO THE DESIGNERS AND ORNAMENT WORKERS OF THE COURT +7127-75946-0001-468: AH VERY WELL +7127-75946-0002-469: LET HIM COME IN THEN SAID THE KING AND AS IF COLBERT HAD BEEN LISTENING AT THE DOOR FOR THE PURPOSE OF KEEPING HIMSELF (AU COURANT->ACCURANT) WITH THE CONVERSATION HE ENTERED AS SOON AS THE KING HAD PRONOUNCED HIS NAME TO THE TWO COURTIERS +7127-75946-0003-470: GENTLEMEN TO YOUR POSTS WHEREUPON SAINT (AIGNAN->ENG YON) AND (VILLEROY->VILLAIRY) TOOK THEIR LEAVE +7127-75946-0004-471: CERTAINLY SIRE BUT I MUST HAVE MONEY TO DO THAT WHAT +7127-75946-0005-472: WHAT DO YOU MEAN INQUIRED (LOUIS->LOUISE) +7127-75946-0006-473: HE HAS GIVEN THEM WITH TOO MUCH GRACE NOT TO HAVE OTHERS STILL TO GIVE IF THEY ARE REQUIRED WHICH IS THE CASE AT THE PRESENT MOMENT +7127-75946-0007-474: IT IS NECESSARY THEREFORE THAT HE SHOULD COMPLY THE KING FROWNED +7127-75946-0008-475: DOES YOUR MAJESTY THEN NO LONGER BELIEVE THE DISLOYAL ATTEMPT +7127-75946-0009-476: NOT AT ALL YOU ARE ON THE CONTRARY MOST AGREEABLE TO ME +7127-75946-0010-477: YOUR MAJESTY'S PLAN THEN IN THIS AFFAIR IS +7127-75946-0011-478: YOU WILL TAKE THEM FROM MY PRIVATE TREASURE +7127-75946-0012-479: THE NEWS CIRCULATED WITH THE RAPIDITY OF LIGHTNING DURING ITS PROGRESS IT KINDLED EVERY VARIETY OF COQUETRY DESIRE AND WILD AMBITION +7127-75946-0013-480: THE KING HAD COMPLETED HIS (TOILETTE->TOILET) BY NINE O'CLOCK HE APPEARED IN AN OPEN CARRIAGE DECORATED WITH BRANCHES OF TREES AND FLOWERS +7127-75946-0014-481: THE QUEENS HAD TAKEN THEIR SEATS UPON A MAGNIFICENT (DIAS->DAIS) OR PLATFORM ERECTED UPON THE BORDERS OF THE LAKE IN A (THEATER->THEATRE) OF WONDERFUL ELEGANCE OF CONSTRUCTION +7127-75946-0015-482: SUDDENLY FOR THE PURPOSE OF RESTORING PEACE AND ORDER (SPRING->SPRANG) ACCOMPANIED BY HIS WHOLE COURT MADE HIS APPEARANCE +7127-75946-0016-483: THE SEASONS ALLIES OF SPRING FOLLOWED HIM CLOSELY TO FORM A QUADRILLE WHICH AFTER MANY WORDS OF MORE OR LESS FLATTERING IMPORT WAS THE COMMENCEMENT OF THE DANCE +7127-75946-0017-484: HIS LEGS THE BEST SHAPED AT COURT WERE DISPLAYED TO GREAT ADVANTAGE IN FLESH COLORED SILKEN HOSE (OF->A) SILK SO FINE AND SO TRANSPARENT THAT IT SEEMED ALMOST LIKE FLESH ITSELF +7127-75946-0018-485: THERE WAS SOMETHING IN HIS CARRIAGE WHICH RESEMBLED THE BUOYANT MOVEMENTS OF AN IMMORTAL AND HE DID NOT DANCE SO MUCH AS (SEEM->SEEMED) TO SOAR ALONG +7127-75946-0019-486: YES IT IS SUPPRESSED +7127-75946-0020-487: FAR FROM IT SIRE YOUR MAJESTY (HAVING->HEAVEN) GIVEN NO DIRECTIONS ABOUT IT THE MUSICIANS HAVE RETAINED IT +7127-75946-0021-488: YES SIRE AND READY DRESSED FOR THE BALLET +7127-75946-0022-489: SIRE HE SAID YOUR MAJESTY'S MOST DEVOTED SERVANT APPROACHES TO PERFORM A SERVICE ON THIS OCCASION WITH SIMILAR ZEAL THAT HE HAS ALREADY SHOWN ON THE FIELD OF BATTLE +7127-75946-0023-490: THE KING SEEMED ONLY PLEASED WITH EVERY ONE PRESENT +7127-75946-0024-491: MONSIEUR WAS THE ONLY ONE WHO DID NOT UNDERSTAND ANYTHING ABOUT THE MATTER +7127-75946-0025-492: THE BALLET BEGAN THE EFFECT WAS MORE THAN BEAUTIFUL +7127-75946-0026-493: WHEN THE MUSIC BY ITS BURSTS OF MELODY CARRIED AWAY THESE ILLUSTRIOUS DANCERS WHEN (THE->THIS) SIMPLE UNTUTORED PANTOMIME OF THAT PERIOD ONLY THE MORE NATURAL ON ACCOUNT OF THE VERY INDIFFERENT ACTING OF THE AUGUST ACTORS HAD REACHED ITS CULMINATING POINT OF TRIUMPH THE (THEATER->THEATRE) SHOOK WITH TUMULTUOUS APPLAUSE +7127-75946-0027-494: DISDAINFUL OF A SUCCESS OF WHICH MADAME SHOWED NO (ACKNOWLEDGEMENT->ACKNOWLEDGMENT) HE THOUGHT OF NOTHING BUT BOLDLY REGAINING THE (MARKED->MARKET) PREFERENCE OF THE PRINCESS +7127-75946-0028-495: BY DEGREES ALL HIS HAPPINESS ALL HIS BRILLIANCY SUBSIDED INTO REGRET AND UNEASINESS SO THAT HIS LIMBS LOST THEIR POWER HIS ARMS HUNG HEAVILY BY HIS SIDES AND HIS HEAD DROOPED AS THOUGH HE WAS STUPEFIED +7127-75946-0029-496: THE KING WHO HAD FROM THIS MOMENT BECOME IN REALITY THE PRINCIPAL DANCER IN THE QUADRILLE CAST A LOOK UPON HIS VANQUISHED RIVAL +7127-75947-0000-426: EVERY ONE COULD OBSERVE HIS AGITATION AND PROSTRATION A PROSTRATION WHICH WAS INDEED THE MORE REMARKABLE SINCE PEOPLE WERE NOT ACCUSTOMED TO SEE HIM WITH HIS ARMS HANGING LISTLESSLY BY HIS SIDE HIS HEAD BEWILDERED AND HIS EYES WITH ALL THEIR BRIGHT INTELLIGENCE (BEDIMMED->BE DIMMED) +7127-75947-0001-427: UPON THIS MADAME DEIGNED TO TURN HER EYES LANGUISHINGLY TOWARDS THE COMTE OBSERVING +7127-75947-0002-428: DO YOU THINK SO SHE REPLIED WITH INDIFFERENCE +7127-75947-0003-429: YES THE CHARACTER WHICH (YOUR->YOU ARE) ROYAL HIGHNESS ASSUMED IS IN PERFECT HARMONY WITH YOUR OWN +7127-75947-0004-430: EXPLAIN YOURSELF +7127-75947-0005-431: I ALLUDE TO THE GODDESS +7127-75947-0006-432: THE PRINCESS INQUIRED NO +7127-75947-0007-433: SHE THEN ROSE HUMMING THE AIR TO WHICH SHE WAS PRESENTLY GOING TO DANCE +7127-75947-0008-434: THE ARROW PIERCED HIS HEART AND WOUNDED HIM MORTALLY +7127-75947-0009-435: A QUARTER OF AN HOUR AFTERWARDS HE RETURNED TO THE (THEATER->THEATRE) BUT IT WILL BE READILY BELIEVED THAT IT WAS ONLY A POWERFUL EFFORT OF REASON OVER HIS GREAT EXCITEMENT THAT ENABLED HIM TO GO BACK OR PERHAPS FOR LOVE IS THUS STRANGELY CONSTITUTED HE FOUND IT IMPOSSIBLE EVEN TO REMAIN MUCH LONGER SEPARATED FROM (THE->THEIR) PRESENCE OF ONE WHO HAD BROKEN HIS HEART +7127-75947-0010-436: WHEN SHE PERCEIVED THE YOUNG MAN SHE ROSE LIKE A WOMAN SURPRISED IN THE MIDST OF IDEAS SHE WAS DESIROUS OF CONCEALING FROM HERSELF +7127-75947-0011-437: REMAIN I IMPLORE YOU THE EVENING IS MOST LOVELY +7127-75947-0012-438: INDEED AH +7127-75947-0013-439: I REMEMBER NOW AND I CONGRATULATE MYSELF DO YOU LOVE ANY ONE +7127-75947-0014-440: FORGIVE ME I HARDLY KNOW WHAT I AM SAYING A THOUSAND TIMES FORGIVE ME MADAME WAS RIGHT QUITE RIGHT THIS BRUTAL EXILE HAS COMPLETELY TURNED MY BRAIN +7127-75947-0015-441: THERE CANNOT BE A DOUBT HE RECEIVED YOU KINDLY FOR IN FACT YOU RETURNED WITHOUT HIS PERMISSION +7127-75947-0016-442: OH MADEMOISELLE WHY HAVE I NOT A DEVOTED SISTER OR A TRUE FRIEND SUCH AS YOURSELF +7127-75947-0017-443: WHAT ALREADY HERE THEY SAID TO HER +7127-75947-0018-444: I HAVE BEEN HERE THIS QUARTER OF AN HOUR REPLIED LA (VALLIERE->VALLIERS) +7127-75947-0019-445: DID NOT THE DANCING AMUSE YOU NO +7127-75947-0020-446: NO MORE THAN THE DANCING +7127-75947-0021-447: LA (VALLIERE->VALLIERS) IS QUITE A (POETESS->POETES) SAID (TONNAY CHARENTE->TONY SCHERANT) +7127-75947-0022-448: I AM A WOMAN AND THERE ARE FEW LIKE ME WHOEVER LOVES ME FLATTERS ME WHOEVER FLATTERS ME PLEASES ME AND WHOEVER PLEASES WELL SAID MONTALAIS YOU DO NOT FINISH +7127-75947-0023-449: IT IS TOO DIFFICULT REPLIED MADEMOISELLE (DE TONNAY CHARENTE->DENISCHALANT) LAUGHING LOUDLY +7127-75947-0024-450: LOOK YONDER DO YOU NOT SEE THE MOON SLOWLY RISING SILVERING THE TOPMOST BRANCHES OF THE CHESTNUTS AND THE (OAKS->YOLKS) +7127-75947-0025-451: EXQUISITE SOFT TURF OF THE WOODS THE HAPPINESS WHICH YOUR FRIENDSHIP CONFERS UPON ME +7127-75947-0026-452: WELL SAID MADEMOISELLE (DE TONNAY CHARENTE->DETERNATION) I ALSO THINK A GOOD DEAL BUT I TAKE CARE +7127-75947-0027-453: TO SAY NOTHING SAID MONTALAIS SO THAT WHEN MADEMOISELLE DE (TONNAY CHARENTE->TOURNISHER AUNT) THINKS (ATHENAIS->ETHNE) IS THE ONLY ONE WHO KNOWS IT +7127-75947-0028-454: QUICK QUICK THEN AMONG THE HIGH REED GRASS SAID MONTALAIS STOOP (ATHENAIS->ETHINE) YOU ARE SO TALL +7127-75947-0029-455: THE YOUNG GIRLS HAD INDEED MADE THEMSELVES SMALL INDEED INVISIBLE +7127-75947-0030-456: SHE WAS HERE JUST NOW SAID THE COUNT +7127-75947-0031-457: YOU ARE POSITIVE THEN +7127-75947-0032-458: YES BUT PERHAPS I FRIGHTENED HER (IN->AND) WHAT WAY +7127-75947-0033-459: HOW IS IT LA (VALLIERE->VALLIERS) SAID MADEMOISELLE (DE TONNAY CHARENTE->DETENNACHELANT) THAT THE VICOMTE DE (BRAGELONNE->BREG ALONE) SPOKE OF YOU AS LOUISE +7127-75947-0034-460: IT SEEMS THE KING WILL NOT CONSENT TO IT +7127-75947-0035-461: GOOD GRACIOUS HAS THE KING ANY RIGHT TO INTERFERE IN MATTERS OF THAT KIND +7127-75947-0036-462: I GIVE MY CONSENT +7127-75947-0037-463: OH I AM SPEAKING SERIOUSLY REPLIED MONTALAIS AND MY OPINION IN THIS CASE IS QUITE AS GOOD AS THE (KING'S->KING AS) I SUPPOSE IS IT NOT LOUISE +7127-75947-0038-464: LET US RUN THEN SAID ALL THREE AND GRACEFULLY LIFTING UP THE LONG SKIRTS OF THEIR SILK DRESSES THEY LIGHTLY RAN ACROSS THE OPEN SPACE BETWEEN THE LAKE AND THE THICKEST COVERT OF THE PARK +7127-75947-0039-465: IN FACT THE SOUND OF MADAME'S AND THE QUEEN'S CARRIAGES COULD BE HEARD IN THE DISTANCE UPON THE HARD DRY GROUND OF THE ROADS FOLLOWED BY THE (MOUNTED->MOUNTAIN) CAVALIERS +7127-75947-0040-466: IN THIS WAY THE FETE OF THE WHOLE COURT WAS A FETE ALSO FOR THE MYSTERIOUS INHABITANTS OF THE FOREST FOR CERTAINLY THE DEER IN THE BRAKE THE PHEASANT ON THE BRANCH THE FOX IN ITS HOLE WERE ALL LISTENING +7176-88083-0000-707: ALL ABOUT HIM WAS A TUMULT OF BRIGHT AND BROKEN COLOR SCATTERED (IN->AND) BROAD SPLASHES +7176-88083-0001-708: THE (MERGANSER->MERGANCER) HAD A (CRESTED->CRUSTED) HEAD OF IRIDESCENT GREEN BLACK A BROAD COLLAR OF LUSTROUS WHITE BLACK BACK BLACK AND WHITE WINGS WHITE BELLY SIDES FINELY PENCILLED IN BLACK AND WHITE AND A BREAST OF RICH CHESTNUT RED STREAKED WITH BLACK +7176-88083-0002-709: HIS FEET WERE RED HIS LONG NARROW BEAK WITH ITS (SAW->SALL) TOOTHED EDGES AND SHARP HOOKED TIP WAS BRIGHT RED +7176-88083-0003-710: BUT HERE HE WAS AT A TERRIBLE DISADVANTAGE AS COMPARED WITH THE OWLS HAWKS AND EAGLES HE HAD NO RENDING CLAWS +7176-88083-0004-711: BUT SUDDENLY STRAIGHT AND SWIFT AS A DIVING (CORMORANT->COMRADE) HE SHOT DOWN INTO THE TORRENT AND DISAPPEARED BENEATH THE SURFACE +7176-88083-0005-712: ONCE FAIRLY A WING HOWEVER HE WHEELED AND MADE BACK HURRIEDLY FOR HIS PERCH +7176-88083-0006-713: (IT->AND) MIGHT HAVE SEEMED THAT A TROUT OF THIS SIZE WAS A FAIRLY SUBSTANTIAL MEAL +7176-88083-0007-714: BUT SUCH WAS HIS KEENNESS THAT EVEN WHILE THE WIDE FLUKES OF HIS ENGORGED VICTIM WERE STILL STICKING OUT AT THE CORNERS OF HIS BEAK HIS FIERCE RED EYES WERE ONCE MORE PEERING DOWNWARD INTO THE TORRENT IN SEARCH OF FRESH PREY +7176-88083-0008-715: IN DESPAIR HE HURLED HIMSELF DOWNWARD TOO SOON +7176-88083-0009-716: THE GREAT HAWK (FOLLOWED->FOWLED) HURRIEDLY TO RETRIEVE HIS PREY FROM THE GROUND +7176-88083-0010-717: THE CAT GROWLED SOFTLY PICKED UP THE PRIZE IN HER JAWS AND TROTTED INTO THE BUSHES TO DEVOUR IT +7176-88083-0011-718: IN FACT HE HAD JUST FINISHED IT THE LAST OF THE TROUT'S TAIL HAD JUST VANISHED WITH A SPASM DOWN HIS STRAINED GULLET WHEN THE BAFFLED HAWK CAUGHT SIGHT OF HIM AND SWOOPED +7176-88083-0012-719: THE HAWK ALIGHTED ON THE DEAD BRANCH AND SAT UPRIGHT MOTIONLESS AS IF SURPRISED +7176-88083-0013-720: LIKE HIS UNFORTUNATE LITTLE COUSIN THE TEAL HE TOO HAD FELT THE FEAR OF DEATH SMITTEN INTO HIS HEART AND WAS HEADING DESPERATELY FOR THE REFUGE OF SOME DARK OVERHANGING BANK DEEP FRINGED WITH WEEDS WHERE THE DREADFUL EYE OF THE HAWK SHOULD NOT DISCERN HIM +7176-88083-0014-721: THE HAWK SAT UPON THE BRANCH AND WATCHED HIS QUARRY SWIMMING BENEATH THE SURFACE +7176-88083-0015-722: ALMOST INSTANTLY HE WAS FORCED TO THE TOP +7176-88083-0016-723: STRAIGHTWAY (*->IN) THE HAWK GLIDED FROM HIS PERCH AND DARTED AFTER HIM +7176-88083-0017-724: BUT AT THIS POINT IN THE RAPIDS IT WAS IMPOSSIBLE FOR HIM TO STAY DOWN +7176-88083-0018-725: BUT THIS FREQUENTER OF THE HEIGHTS OF AIR FOR ALL HIS SAVAGE (VALOR->VALOUR) WAS TROUBLED AT THE LEAPING WAVES AND THE TOSSING FOAM OF THESE MAD RAPIDS HE DID NOT UNDERSTAND THEM +7176-88083-0019-726: AS HE FLEW HIS (DOWN REACHING->DOWNREACHING) CLUTCHING TALONS WERE NOT HALF A YARD ABOVE THE FUGITIVE'S HEAD +7176-88083-0020-727: WHERE THE (WAVES->WAVE IS) FOR AN INSTANT SANK THEY CAME CLOSER BUT NOT QUITE WITHIN GRASPING REACH +7176-88083-0021-728: BUT AS BEFORE THE LEAPING WAVES OF THE RAPIDS WERE TOO MUCH FOR HIS PURSUER AND HE WAS ABLE TO FLAP HIS WAY ONWARD IN A CLOUD OF FOAM WHILE DOOM HUNG LOW ABOVE HIS HEAD YET HESITATED TO STRIKE +7176-88083-0022-729: THE HAWK EMBITTERED BY THE LOSS OF HIS FIRST QUARRY HAD BECOME AS DOGGED IN PURSUIT AS A WEASEL NOT TO BE SHAKEN OFF OR EVADED OR DECEIVED +7176-88083-0023-730: HE HAD A LOT OF LINE OUT AND THE PLACE WAS NONE TOO FREE FOR A LONG CAST BUT HE WAS IMPATIENT TO DROP HIS FLIES AGAIN ON THE SPOT WHERE THE BIG FISH WAS FEEDING +7176-88083-0024-731: THE LAST DROP FLY AS LUCK WOULD HAVE IT CAUGHT JUST IN THE CORNER OF THE HAWK'S ANGRILY OPEN BEAK HOOKING ITSELF FIRMLY +7176-88083-0025-732: AT THE SUDDEN SHARP STING OF IT THE GREAT BIRD TURNED HIS HEAD AND NOTICED FOR THE FIRST TIME THE FISHERMAN STANDING ON THE BANK +7176-88083-0026-733: THE DRAG UPON HIS BEAK AND THE LIGHT CHECK UPON HIS WINGS WERE INEXPLICABLE TO HIM AND APPALLING +7176-88083-0027-734: (THEN->THAN) THE LEADER PARTED FROM THE LINE +7176-92135-0000-661: HE IS A WELCOME FIGURE AT THE GARDEN PARTIES OF THE ELECT WHO ARE ALWAYS READY TO ENCOURAGE HIM BY ACCEPTING FREE SEATS FOR HIS PLAY ACTOR MANAGERS NOD TO HIM EDITORS ALLOW HIM TO CONTRIBUTE WITHOUT CHARGE TO A (SYMPOSIUM->SUPPOSIUM) ON THE PRICE OF GOLF BALLS +7176-92135-0001-662: IN SHORT HE BECOMES A PROMINENT FIGURE IN LONDON SOCIETY AND IF HE IS NOT CAREFUL SOMEBODY WILL SAY SO +7176-92135-0002-663: BUT EVEN THE UNSUCCESSFUL DRAMATIST HAS HIS MOMENTS +7176-92135-0003-664: (YOUR->YOU ARE) PLAY MUST BE NOT MERELY A GOOD PLAY BUT A SUCCESSFUL ONE +7176-92135-0004-665: FRANKLY I CANNOT ALWAYS SAY +7176-92135-0005-666: BUT SUPPOSE YOU SAID I'M FOND OF WRITING MY PEOPLE ALWAYS SAY MY LETTERS HOME ARE GOOD ENOUGH FOR PUNCH +7176-92135-0006-667: I'VE GOT A LITTLE IDEA FOR A PLAY ABOUT A MAN AND A WOMAN AND ANOTHER WOMAN AND BUT PERHAPS (I'D->I) BETTER KEEP THE PLOT A SECRET FOR THE MOMENT +7176-92135-0007-668: ANYHOW IT'S (*->A) JOLLY EXCITING AND I CAN DO THE DIALOGUE ALL RIGHT +7176-92135-0008-669: LEND ME YOUR EAR FOR TEN MINUTES AND YOU SHALL LEARN JUST WHAT (STAGECRAFT->STAGE CRAFT) IS +7176-92135-0009-670: AND I SHOULD BEGIN WITH A SHORT HOMILY ON SOLILOQUY +7176-92135-0010-671: (HAM->HIM) TO BE OR NOT TO BE +7176-92135-0011-672: NOW THE OBJECT OF THIS (SOLILOQUY->SOLOQUY) IS PLAIN +7176-92135-0012-673: INDEED IRRESOLUTION (BEING->MEAN) THE (KEYNOTE->KEEN OUT) OF HAMLET'S SOLILOQUY A CLEVER PLAYER COULD TO SOME EXTENT INDICATE THE WHOLE THIRTY LINES BY A (SILENT->SILAGE) WORKING OF THE (JAW->JOB) BUT AT THE SAME TIME IT WOULD BE IDLE TO DENY THAT HE WOULD MISS THE FINER SHADES OF THE DRAMATIST'S MEANING +7176-92135-0013-674: WE MODERNS HOWEVER SEE THE ABSURDITY OF IT +7176-92135-0014-675: IF IT BE GRANTED FIRST THAT THE THOUGHTS OF A CERTAIN CHARACTER SHOULD BE KNOWN TO THE AUDIENCE AND SECONDLY THAT SOLILOQUY OR THE HABIT OF THINKING ALOUD IS IN OPPOSITION TO MODERN STAGE (TECHNIQUE HOW->TYPE HALL) SHALL A SOLILOQUY BE AVOIDED WITHOUT DAMAGE TO THE PLAY +7176-92135-0015-676: AND SO ON TILL YOU GET (TO->*) THE END (WHEN OPHELIA->ONE OF VILLIA) MIGHT SAY AH YES OR SOMETHING NON COMMITTAL OF THAT SORT +7176-92135-0016-677: THIS WOULD BE AN EASY WAY OF DOING IT BUT IT WOULD NOT BE THE BEST WAY FOR THE REASON THAT IT IS TOO EASY TO CALL ATTENTION TO ITSELF +7176-92135-0017-678: IN THE OLD BADLY MADE PLAY IT WAS FREQUENTLY NECESSARY FOR ONE OF THE CHARACTERS TO TAKE THE AUDIENCE INTO HIS CONFIDENCE +7176-92135-0018-679: IN THE MODERN WELL CONSTRUCTED PLAY HE SIMPLY RINGS UP AN IMAGINARY CONFEDERATE AND TELLS HIM WHAT HE IS GOING TO DO COULD ANYTHING BE MORE NATURAL +7176-92135-0019-680: I WANT DOUBLE NINE (HAL LO->HELLO) +7176-92135-0020-681: (DOUBLE->DOUBLED) NINE TWO THREE (ELSINORE->ELZINORE) DOUBLE (NINE->NOT) YES (HALLO->HELLO) IS THAT YOU HORATIO HAMLET SPEAKING +7176-92135-0021-682: I SAY I'VE BEEN (WONDERING->WANDERING) ABOUT THIS BUSINESS +7176-92135-0022-683: TO BE OR NOT TO BE THAT IS THE QUESTION WHETHER TIS NOBLER IN THE MIND TO SUFFER THE SLINGS AND ARROWS WHAT NO HAMLET SPEAKING +7176-92135-0023-684: YOU GAVE ME DOUBLE FIVE I WANT DOUBLE NINE (HALLO->HELLO) IS THAT YOU HORATIO HAMLET SPEAKING +7176-92135-0024-685: TO BE OR NOT TO BE THAT IS THE QUESTION WHETHER TIS NOBLER +7176-92135-0025-686: IT IS TO LET HAMLET IF THAT (HAPPEN->HAPPENED) TO BE THE NAME OF YOUR CHARACTER (ENTER WITH->INTO) A SMALL DOG PET FALCON MONGOOSE TAME BEAR (OR WHATEVER->ORDER) ANIMAL IS MOST IN KEEPING WITH THE PART AND CONFIDE IN THIS ANIMAL SUCH SORROWS HOPES OR SECRET HISTORY AS THE AUDIENCE HAS GOT TO KNOW +7176-92135-0026-687: ENTER HAMLET WITH HIS FAVOURITE (BOAR HOUND->BOARHOUND) +7176-92135-0027-688: LADY (LARKSPUR STARTS->LARKSBURG START) SUDDENLY AND (TURNS TOWARDS->TURNED TOWARD) HIM +7176-92135-0028-689: (LARKSPUR BIT->LARKS WERE BID) ME AGAIN THIS MORNING FOR THE THIRD TIME +7176-92135-0029-690: I WANT TO GET AWAY FROM IT ALL (SWOONS->SWOON) +7176-92135-0030-691: ENTER LORD ARTHUR (FLUFFINOSE->FLUFFINO'S) +7176-92135-0031-692: AND THERE YOU ARE YOU WILL OF COURSE APPRECIATE THAT THE (UNFINISHED SENTENCES->UNFINISHANCES) NOT ONLY SAVE TIME BUT ALSO MAKE THE MANOEUVRING VERY MUCH MORE NATURAL +7176-92135-0032-693: HOW YOU MAY BE WONDERING ARE (YOU->YE) TO BEGIN YOUR MASTERPIECE +7176-92135-0033-694: RELAPSES INTO SILENCE FOR THE REST OF THE EVENING +7176-92135-0034-695: THE DUCHESS OF SOUTHBRIDGE (TO->TWO) LORD REGGIE OH REGGIE WHAT DID YOU SAY +7176-92135-0035-696: THEN LORD (TUPPENY->TOPPENNY) WELL WHAT ABOUT AUCTION +7176-92135-0036-697: THE CROWD DRIFTS OFF (LEAVING->LEAPING) THE HERO AND HEROINE ALONE IN THE MIDDLE OF THE STAGE AND THEN YOU CAN BEGIN +7176-92135-0037-698: THEN IS THE TIME TO INTRODUCE A MEAL ON THE STAGE +7176-92135-0038-699: A (STAGE->SAGE) MEAL IS POPULAR BECAUSE IT (PROVES->PROVED) TO THE AUDIENCE THAT THE ACTORS EVEN WHEN CALLED CHARLES (HAWTREY->HOLTREE) OR (OWEN NARES->OWENAIRS) ARE REAL PEOPLE JUST LIKE YOU AND ME +7176-92135-0039-700: (TEA->T) PLEASE MATTHEWS BUTLER IMPASSIVELY +7176-92135-0040-701: HOSTESS REPLACES LUMP AND INCLINES EMPTY TEAPOT (OVER TRAY->OVERTRAY) FOR A MOMENT THEN (HANDS HIM->HANDSOME) A CUP PAINTED BROWN INSIDE (THUS->LUST) DECEIVING THE GENTLEMAN WITH THE TELESCOPE IN THE UPPER CIRCLE +7176-92135-0041-702: RE ENTER BUTLER AND THREE FOOTMEN WHO (REMOVE->MOVED) THE TEA THINGS HOSTESS (TO GUEST->TWO GUESTS) +7176-92135-0042-703: (IN->AND) NOVELS THE HERO HAS OFTEN PUSHED HIS MEALS AWAY UNTASTED BUT NO (STAGE->STEED) HERO WOULD DO ANYTHING SO UNNATURAL AS THIS +7176-92135-0043-704: TWO (BITES->WHITES) ARE MADE AND THE BREAD IS CRUMBLED WITH AN AIR OF GREAT EAGERNESS INDEED ONE FEELS THAT IN REAL LIFE THE GUEST WOULD CLUTCH HOLD OF THE FOOTMAN AND SAY HALF A (MO OLD->MOLD) CHAP I HAVEN'T NEARLY FINISHED BUT THE ACTOR IS BETTER SCHOOLED THAN THIS +7176-92135-0044-705: BUT IT IS (THE->A) CIGARETTE WHICH CHIEFLY HAS BROUGHT THE MODERN DRAMA TO ITS PRESENT STATE OF PERFECTION +7176-92135-0045-706: LORD JOHN TAKING OUT GOLD (CIGARETTE->SICK RED) CASE FROM HIS LEFT HAND UPPER WAISTCOAT POCKET +7729-102255-0000-261: THE BOGUS LEGISLATURE NUMBERED THIRTY SIX MEMBERS +7729-102255-0001-262: THIS WAS AT THE MARCH ELECTION EIGHTEEN FIFTY FIVE +7729-102255-0002-263: THAT SUMMER'S EMIGRATION HOWEVER BEING MAINLY FROM THE FREE STATES GREATLY CHANGED THE RELATIVE STRENGTH OF THE TWO PARTIES +7729-102255-0003-264: FOR GENERAL SERVICE THEREFORE REQUIRING NO SPECIAL EFFORT THE NUMERICAL STRENGTH OF THE FACTIONS WAS ABOUT EQUAL WHILE ON EXTRAORDINARY OCCASIONS THE TWO THOUSAND BORDER RUFFIAN (RESERVE->RESERVED) LYING A LITTLE FARTHER BACK FROM THE STATE LINE COULD AT ANY TIME EASILY TURN THE SCALE +7729-102255-0004-265: THE FREE STATE MEN HAD ONLY THEIR CONVICTIONS THEIR INTELLIGENCE THEIR COURAGE AND THE MORAL SUPPORT OF THE NORTH THE CONSPIRACY HAD ITS SECRET COMBINATION THE TERRITORIAL OFFICIALS THE LEGISLATURE THE BOGUS LAWS THE COURTS THE MILITIA OFFICERS THE PRESIDENT AND THE ARMY +7729-102255-0005-266: THIS WAS A FORMIDABLE ARRAY OF ADVANTAGES SLAVERY WAS PLAYING WITH LOADED DICE +7729-102255-0006-267: COMING BY WAY OF THE MISSOURI RIVER TOWNS HE FELL FIRST AMONG BORDER RUFFIAN COMPANIONSHIP AND INFLUENCES AND PERHAPS HAVING HIS INCLINATIONS ALREADY (MOLDED->MOULDED) BY HIS WASHINGTON INSTRUCTIONS HIS EARLY IMPRESSIONS WERE DECIDEDLY ADVERSE TO THE FREE STATE CAUSE +7729-102255-0007-268: HIS RECEPTION SPEECH AT WESTPORT IN WHICH HE MAINTAINED THE LEGALITY OF THE LEGISLATURE AND HIS DETERMINATION TO ENFORCE THEIR LAWS DELIGHTED HIS PRO SLAVERY AUDITORS +7729-102255-0008-269: ALL THE TERRITORIAL DIGNITARIES WERE PRESENT GOVERNOR SHANNON PRESIDED JOHN CALHOUN THE SURVEYOR GENERAL MADE THE PRINCIPAL SPEECH A DENUNCIATION OF THE (ABOLITIONISTS->ABOLITIONIST) SUPPORTING THE (TOPEKA->TOPICA) MOVEMENT CHIEF JUSTICE (LECOMPTE->LECOMTE) DIGNIFIED THE OCCASION WITH APPROVING REMARKS +7729-102255-0009-270: ALL (DISSENT->DESCENT) ALL NON COMPLIANCE ALL HESITATION ALL MERE SILENCE EVEN WERE IN THEIR STRONGHOLD TOWNS LIKE (LEAVENWORTH->LEVIN WORTH) BRANDED AS ABOLITIONISM DECLARED TO BE HOSTILITY TO THE PUBLIC WELFARE AND PUNISHED WITH PROSCRIPTION PERSONAL VIOLENCE EXPULSION AND FREQUENTLY DEATH +7729-102255-0010-271: OF THE (LYNCHINGS->LUNCHINGS) THE MOBS AND THE MURDERS IT WOULD BE IMPOSSIBLE EXCEPT IN A VERY EXTENDED WORK TO NOTE THE FREQUENT AND ATROCIOUS DETAILS +7729-102255-0011-272: THE PRESENT CHAPTERS CAN ONLY TOUCH UPON THE MORE SALIENT MOVEMENTS OF THE CIVIL WAR IN KANSAS WHICH HAPPILY (WERE->ARE) NOT SANGUINARY IF HOWEVER THE INDIVIDUAL AND MORE ISOLATED CASES OF BLOODSHED COULD BE DESCRIBED THEY WOULD SHOW A STARTLING AGGREGATE OF BARBARITY AND (*->A) LOSS OF LIFE FOR OPINION'S SAKE +7729-102255-0012-273: SEVERAL HUNDRED FREE STATE MEN PROMPTLY RESPONDED TO THE SUMMONS +7729-102255-0013-274: IT WAS IN FACT THE BEST WEAPON OF ITS DAY +7729-102255-0014-275: THE LEADERS OF THE CONSPIRACY BECAME DISTRUSTFUL OF THEIR POWER TO CRUSH THE TOWN +7729-102255-0015-276: ONE OF HIS MILITIA GENERALS SUGGESTED THAT THE GOVERNOR SHOULD REQUIRE THE OUTLAWS AT LAWRENCE AND ELSEWHERE TO SURRENDER THE (SHARPS->SHARP'S) RIFLES ANOTHER WROTE ASKING HIM TO CALL OUT THE GOVERNMENT TROOPS AT FORT (LEAVENWORTH->LEVINWORTH) +7729-102255-0016-277: THE GOVERNOR ON HIS PART BECOMING DOUBTFUL OF THE LEGALITY OF EMPLOYING MISSOURI MILITIA TO ENFORCE KANSAS LAWS WAS ALSO EAGER TO SECURE THE HELP OF FEDERAL TROOPS +7729-102255-0017-278: SHERIFF JONES HAD HIS POCKETS ALWAYS FULL OF WRITS ISSUED IN THE SPIRIT OF PERSECUTION BUT WAS OFTEN BAFFLED BY THE SHARP WITS AND READY RESOURCES OF THE FREE STATE PEOPLE AND SOMETIMES DEFIED OUTRIGHT +7729-102255-0018-279: LITTLE BY LITTLE HOWEVER THE LATTER BECAME HEMMED AND BOUND IN THE MESHES OF THE VARIOUS DEVICES AND PROCEEDINGS WHICH THE TERRITORIAL OFFICIALS EVOLVED FROM THE (BOGUS->VOGUS) LAWS +7729-102255-0019-280: TO EMBARRASS THIS DAMAGING EXPOSURE JUDGE (LECOMPTE->LECOMTE) ISSUED A WRIT AGAINST THE EX GOVERNOR ON A FRIVOLOUS CHARGE OF CONTEMPT +7729-102255-0020-281: THE INCIDENT WAS NOT VIOLENT NOR EVEN DRAMATIC NO POSSE WAS (SUMMONED->SUMMON) NO FURTHER EFFORT MADE AND (REEDER->READER) FEARING PERSONAL VIOLENCE SOON FLED IN DISGUISE +7729-102255-0021-282: BUT THE AFFAIR WAS MAGNIFIED AS A CROWNING PROOF THAT THE FREE STATE MEN WERE INSURRECTIONISTS AND OUTLAWS +7729-102255-0022-283: FROM THESE AGAIN SPRANG BARRICADED AND FORTIFIED DWELLINGS CAMPS AND (SCOUTING->SCOUT) PARTIES FINALLY CULMINATING IN ROVING GUERRILLA (BANDS->VANS) HALF PARTISAN HALF PREDATORY +7729-102255-0023-284: THEIR DISTINCTIVE CHARACTERS HOWEVER DISPLAY ONE BROAD AND UNFAILING DIFFERENCE +7729-102255-0024-285: THE FREE STATE MEN CLUNG TO THEIR PRAIRIE TOWNS AND (PRAIRIE->PRAIRI) RAVINES WITH ALL THE OBSTINACY AND COURAGE OF TRUE DEFENDERS OF THEIR HOMES AND FIRESIDES +7729-102255-0025-286: (THEIR->THERE) ASSUMED CHARACTER CHANGED WITH THEIR CHANGING OPPORTUNITIES OR NECESSITIES +7729-102255-0026-287: IN THE SHOOTING OF (SHERIFF->SHERIFF'S) JONES IN LAWRENCE AND IN THE REFUSAL OF EX GOVERNOR (BEEDER->READER) TO ALLOW THE DEPUTY MARSHAL TO ARREST HIM THEY DISCOVERED GRAVE (OFFENSES->OFFENCES) AGAINST THE TERRITORIAL AND (*->THE) UNITED STATES LAWS +7729-102255-0027-288: FOOTNOTE SUMNER TO SHANNON MAY TWELFTH EIGHTEEN FIFTY SIX +7729-102255-0028-289: PRIVATE PERSONS WHO HAD (LEASED->LEAST) THE FREE STATE HOTEL VAINLY BESOUGHT THE VARIOUS AUTHORITIES TO (PREVENT->PRESENT) THE DESTRUCTION OF THEIR PROPERTY +7729-102255-0029-290: TEN DAYS WERE CONSUMED IN THESE NEGOTIATIONS BUT THE SPIRIT OF VENGEANCE REFUSED TO YIELD +7729-102255-0030-291: HE SUMMONED HALF A DOZEN CITIZENS TO JOIN HIS POSSE WHO FOLLOWED OBEYED AND ASSISTED HIM +7729-102255-0031-292: HE CONTINUED HIS PRETENDED SEARCH AND TO GIVE COLOR TO HIS ERRAND MADE (TWO ARRESTS->TO ARREST) +7729-102255-0032-293: THE FREE STATE HOTEL A STONE BUILDING IN DIMENSIONS FIFTY BY SEVENTY FEET THREE STORIES HIGH AND HANDSOMELY FURNISHED PREVIOUSLY OCCUPIED ONLY FOR LODGING ROOMS ON THAT DAY FOR THE FIRST TIME OPENED ITS TABLE ACCOMMODATIONS TO THE PUBLIC AND PROVIDED A FREE DINNER IN HONOR OF THE OCCASION +7729-102255-0033-294: AS HE HAD PROMISED TO PROTECT THE HOTEL THE REASSURED CITIZENS BEGAN TO LAUGH AT THEIR OWN FEARS +7729-102255-0034-295: TO THEIR SORROW THEY WERE SOON UNDECEIVED +7729-102255-0035-296: THE MILITARY FORCE PARTLY RABBLE PARTLY ORGANIZED HAD MEANWHILE MOVED INTO THE TOWN +7729-102255-0036-297: HE PLANTED (A COMPANY->ACCOMPANIED) BEFORE THE HOTEL AND DEMANDED A SURRENDER OF THE ARMS BELONGING TO THE FREE STATE MILITARY COMPANIES +7729-102255-0037-298: HALF AN HOUR LATER TURNING A DEAF EAR TO ALL REMONSTRANCE HE GAVE THE PROPRIETORS UNTIL FIVE O'CLOCK TO REMOVE THEIR FAMILIES AND PERSONAL PROPERTY FROM THE FREE STATE HOTEL +7729-102255-0038-299: (ATCHISON->ATTITSON) WHO HAD BEEN HARANGUING THE MOB PLANTED HIS TWO GUNS BEFORE THE BUILDING AND TRAINED THEM UPON IT +7729-102255-0039-300: THE INMATES BEING REMOVED AT THE APPOINTED HOUR A FEW CANNON BALLS WERE FIRED THROUGH THE STONE WALLS +7729-102255-0040-301: IN THIS INCIDENT CONTRASTING THE CREATIVE AND THE DESTRUCTIVE SPIRIT OF THE FACTIONS THE (EMIGRANT AID->IMMIGRANT AIDS) SOCIETY OF MASSACHUSETTS FINDS ITS MOST HONORABLE AND TRIUMPHANT VINDICATION +7729-102255-0041-302: THE WHOLE PROCEEDING WAS SO CHILDISH THE MISERABLE PLOT SO TRANSPARENT THE (OUTRAGE->OUTRAGED) SO GROSS AS TO BRING DISGUST TO THE BETTER CLASS OF BORDER RUFFIANS WHO WERE WITNESSES AND ACCESSORIES +7729-102255-0042-303: (RELOCATED->RE LOCATED) FOOTNOTE GOVERNOR ROBINSON BEING ON HIS WAY EAST THE STEAMBOAT ON WHICH HE WAS (TRAVELING->TRAVELLING) STOPPED AT LEXINGTON MISSOURI +7729-102255-0043-304: IN A FEW DAYS AN OFFICER CAME WITH A REQUISITION FROM GOVERNOR SHANNON AND TOOK THE PRISONER BY (LAND TO->LANDA) WESTPORT AND AFTERWARDS FROM THERE TO KANSAS CITY (AND LEAVENWORTH->IN LEVINWORTH) +7729-102255-0044-305: (HERE HE->HARRY) WAS PLACED IN THE CUSTODY OF CAPTAIN MARTIN OF THE KICKAPOO RANGERS WHO PROVED A KIND JAILER AND MATERIALLY ASSISTED IN PROTECTING HIM FROM THE DANGEROUS INTENTIONS OF THE MOB WHICH AT THAT TIME HELD (LEAVENWORTH->LEVIN WORTH) UNDER (A->THE) REIGN OF TERROR +7729-102255-0045-306: CAPTAIN MARTIN SAID I SHALL GIVE YOU A PISTOL TO HELP PROTECT YOURSELF IF WORSE COMES TO WORST +7729-102255-0046-307: IN THE EARLY MORNING OF THE NEXT DAY MAY TWENTY NINTH A COMPANY OF DRAGOONS WITH ONE EMPTY SADDLE CAME DOWN FROM THE FORT AND WHILE THE PRO SLAVERY MEN STILL SLEPT THE PRISONER AND HIS ESCORT WERE ON THEIR WAY ACROSS THE PRAIRIES TO LECOMPTON IN THE CHARGE OF OFFICERS OF THE UNITED STATES ARMY +8224-274381-0000-1451: THOUGH THROWN INTO PRISON FOR THIS ENTERPRISE AND DETAINED SOME TIME HE WAS NOT DISCOURAGED BUT STILL CONTINUED BY HIS COUNTENANCE AND PROTECTION TO INFUSE SPIRIT INTO THE DISTRESSED ROYALISTS +8224-274381-0001-1452: AMONG OTHER PERSONS OF DISTINCTION WHO UNITED THEMSELVES TO HIM WAS LORD NAPIER OF (MERCHISTON->MURCHISTON) SON OF THE FAMOUS INVENTOR OF THE LOGARITHMS THE PERSON TO WHOM THE TITLE OF A GREAT MAN IS MORE JUSTLY DUE THAN TO ANY OTHER WHOM HIS COUNTRY EVER PRODUCED +8224-274381-0002-1453: WHILE THE FORMER FORETOLD THAT THE SCOTTISH COVENANTERS WERE SECRETLY FORMING A UNION WITH THE ENGLISH PARLIAMENT AND (INCULCATED->INCALCATED) THE NECESSITY OF PREVENTING THEM BY SOME VIGOROUS UNDERTAKING THE LATTER STILL INSISTED THAT EVERY SUCH ATTEMPT WOULD PRECIPITATE THEM INTO MEASURES TO WHICH OTHERWISE THEY WERE NOT PERHAPS INCLINED +8224-274381-0003-1454: THE KING'S EARS WERE NOW OPEN TO MONTROSE'S (COUNSELS->COUNCILS) WHO PROPOSED NONE BUT THE BOLDEST AND MOST DARING AGREEABLY TO THE DESPERATE STATE OF THE ROYAL CAUSE IN SCOTLAND +8224-274381-0004-1455: FIVE HUNDRED MEN MORE WHO HAD BEEN LEVIED BY THE COVENANTERS WERE PERSUADED TO EMBRACE THE ROYAL CAUSE AND WITH THIS COMBINED FORCE HE HASTENED TO ATTACK LORD (ELCHO->ELKOE) WHO LAY AT PERTH WITH AN ARMY OF SIX THOUSAND MEN ASSEMBLED UPON THE FIRST NEWS OF THE IRISH INVASION +8224-274381-0005-1456: DREADING THE SUPERIOR POWER OF ARGYLE WHO HAVING JOINED HIS VASSALS TO A FORCE LEVIED BY THE PUBLIC WAS APPROACHING WITH A CONSIDERABLE ARMY MONTROSE HASTENED (NORTHWARDS->NORTHWARD) IN ORDER TO ROUSE AGAIN THE MARQUIS OF (HUNTLEY->HUNTLY) AND THE GORDONS WHO HAVING BEFORE HASTILY TAKEN ARMS HAD BEEN INSTANTLY SUPPRESSED BY THE COVENANTERS +8224-274381-0006-1457: THIS NOBLEMAN'S CHARACTER THOUGH CELEBRATED FOR POLITICAL COURAGE AND CONDUCT WAS VERY LOW FOR MILITARY PROWESS AND AFTER SOME SKIRMISHES IN WHICH HE WAS WORSTED HE HERE ALLOWED MONTROSE TO ESCAPE HIM +8224-274381-0007-1458: BY QUICK MARCHES THROUGH THESE INACCESSIBLE MOUNTAINS THAT GENERAL FREED HIMSELF FROM THE SUPERIOR FORCES OF THE COVENANTERS +8224-274381-0008-1459: WITH THESE AND SOME (REENFORCEMENTS->REINFORCEMENTS) OF THE (ATHOLEMEN->ETHEL MEN) AND (MACDONALDS->MAC DONALDS) WHOM HE HAD RECALLED MONTROSE FELL SUDDENLY UPON ARGYLE'S COUNTRY AND LET LOOSE UPON IT ALL THE RAGE OF WAR CARRYING OFF THE CATTLE BURNING THE HOUSES AND PUTTING THE INHABITANTS TO THE SWORD +8224-274381-0009-1460: THIS SEVERITY BY WHICH MONTROSE SULLIED HIS VICTORIES WAS THE RESULT OF PRIVATE ANIMOSITY AGAINST THE CHIEFTAIN AS MUCH AS OF ZEAL FOR THE PUBLIC CAUSE ARGYLE COLLECTING THREE THOUSAND MEN MARCHED IN QUEST OF THE ENEMY WHO HAD RETIRED WITH THEIR PLUNDER AND HE LAY AT (INNERLOCHY->INERLOCKY) SUPPOSING HIMSELF STILL AT A CONSIDERABLE DISTANCE FROM THEM +8224-274381-0010-1461: BY A QUICK AND UNEXPECTED MARCH MONTROSE HASTENED TO (INNERLOCHY->IN A LOCKY) AND PRESENTED HIMSELF IN ORDER OF BATTLE BEFORE THE SURPRISED BUT NOT (AFFRIGHTENED->A FRIGHTENED) COVENANTERS +8224-274381-0011-1462: HIS CONDUCT AND PRESENCE OF MIND IN THIS EMERGENCE APPEARED CONSPICUOUS +8224-274381-0012-1463: MONTROSE WEAK IN CAVALRY HERE LINED HIS TROOPS OF HORSE WITH INFANTRY AND AFTER PUTTING THE ENEMY'S HORSE TO ROUT FELL WITH UNITED FORCE UPON THEIR FOOT WHO WERE ENTIRELY CUT IN PIECES THOUGH WITH THE LOSS OF THE GALLANT LORD GORDON ON THE PART OF THE ROYALISTS +8224-274381-0013-1464: FROM THE SAME MEN NEW REGIMENTS AND NEW COMPANIES WERE FORMED DIFFERENT OFFICERS APPOINTED AND THE WHOLE MILITARY FORCE PUT INTO SUCH HANDS AS THE INDEPENDENTS COULD RELY ON +8224-274381-0014-1465: BESIDES MEMBERS OF PARLIAMENT WHO WERE EXCLUDED MANY OFFICERS UNWILLING TO SERVE UNDER THE NEW GENERALS THREW UP THEIR COMMISSIONS AND (UNWARILY->THEN WARILY) FACILITATED THE PROJECT OF PUTTING THE ARMY ENTIRELY INTO THE HANDS OF THAT FACTION +8224-274381-0015-1466: THOUGH THE DISCIPLINE OF THE FORMER PARLIAMENTARY ARMY WAS NOT CONTEMPTIBLE A MORE EXACT PLAN WAS INTRODUCED AND RIGOROUSLY EXECUTED BY THESE NEW COMMANDERS +8224-274381-0016-1467: VALOR INDEED WAS VERY GENERALLY DIFFUSED OVER THE ONE PARTY AS WELL AS THE OTHER DURING THIS PERIOD DISCIPLINE ALSO WAS ATTAINED BY THE FORCES OF THE PARLIAMENT BUT THE PERFECTION OF THE MILITARY ART IN CONCERTING THE GENERAL PLANS OF ACTION AND THE OPERATIONS OF THE FIELD SEEMS STILL ON BOTH SIDES TO HAVE BEEN IN A GREAT MEASURE WANTING +8224-274381-0017-1468: HISTORIANS AT LEAST PERHAPS FROM THEIR OWN IGNORANCE AND INEXPERIENCE HAVE NOT REMARKED ANY THING BUT A HEADLONG IMPETUOUS CONDUCT EACH PARTY HURRYING TO A BATTLE WHERE VALOR AND FORTUNE CHIEFLY (DETERMINED->DETERMINE) THE SUCCESS +8224-274384-0000-1437: HE PASSED THROUGH HENLEY SAINT (ALBANS->ALBAN'S) AND CAME SO NEAR TO LONDON AS HARROW ON THE HILL +8224-274384-0001-1438: THE SCOTTISH GENERALS AND COMMISSIONERS AFFECTED GREAT SURPRISE ON THE APPEARANCE OF THE KING AND THOUGH THEY PAID HIM ALL THE EXTERIOR RESPECT DUE TO HIS DIGNITY THEY INSTANTLY SET A GUARD UPON HIM UNDER COLOR OF PROTECTION AND MADE HIM IN REALITY A PRISONER +8224-274384-0002-1439: THEY INFORMED THE ENGLISH PARLIAMENT OF THIS UNEXPECTED INCIDENT AND ASSURED THEM THAT THEY HAD ENTERED INTO NO PRIVATE TREATY WITH THE KING +8224-274384-0003-1440: OR HATH HE GIVEN US ANY GIFT +8224-274384-0004-1441: AND THE MEN OF ISRAEL ANSWERED THE MEN OF JUDAH AND SAID WE HAVE TEN PARTS IN THE KING AND WE HAVE ALSO MORE RIGHT IN DAVID THAN YE WHY THEN DID YE DESPISE US THAT OUR ADVICE SHOULD NOT BE FIRST HAD IN BRINGING BACK OUR KING +8224-274384-0005-1442: ANOTHER PREACHER AFTER REPROACHING HIM TO HIS FACE WITH HIS MISGOVERNMENT ORDERED THIS (PSALM->SUM) TO BE SUNG +8224-274384-0006-1443: THE KING STOOD UP AND CALLED FOR THAT PSALM WHICH BEGINS WITH THESE WORDS +8224-274384-0007-1444: HAVE MERCY LORD ON ME I PRAY FOR MEN WOULD ME DEVOUR +8224-274384-0008-1445: THE GOOD NATURED AUDIENCE IN PITY TO (FALLEN->FALL AND) MAJESTY SHOWED FOR ONCE GREATER DEFERENCE TO THE KING THAN TO THE MINISTER AND SUNG THE PSALM WHICH THE FORMER HAD CALLED FOR +8224-274384-0009-1446: THE PARLIAMENT AND THE SCOTS LAID THEIR PROPOSALS BEFORE THE KING +8224-274384-0010-1447: BEFORE THE SETTLEMENT OF TERMS THE ADMINISTRATION MUST BE POSSESSED ENTIRELY BY THE PARLIAMENTS OF BOTH KINGDOMS AND HOW INCOMPATIBLE THAT SCHEME WITH THE LIBERTY OF THE KING IS EASILY IMAGINED +8224-274384-0011-1448: THE ENGLISH IT IS EVIDENT HAD THEY NOT BEEN PREVIOUSLY ASSURED OF RECEIVING THE KING WOULD NEVER HAVE PARTED WITH SO CONSIDERABLE A SUM AND WHILE THEY WEAKENED THEMSELVES BY THE SAME MEASURE HAVE STRENGTHENED A PEOPLE WITH WHOM THEY MUST AFTERWARDS HAVE SO MATERIAL AN INTEREST TO DISCUSS +8224-274384-0012-1449: IF ANY STILL RETAINED (RANCOR->RANK OR) AGAINST HIM IN HIS PRESENT CONDITION THEY PASSED IN SILENCE WHILE HIS WELL WISHERS MORE GENEROUS THAN PRUDENT ACCOMPANIED HIS MARCH WITH TEARS WITH ACCLAMATIONS AND WITH PRAYERS FOR HIS SAFETY +8224-274384-0013-1450: HIS DEATH IN THIS CONJUNCTURE WAS A PUBLIC MISFORTUNE +8230-279154-0000-617: THE ANALYSIS OF KNOWLEDGE WILL OCCUPY US UNTIL THE END OF THE THIRTEENTH LECTURE AND IS THE MOST DIFFICULT PART OF OUR WHOLE ENTERPRISE +8230-279154-0001-618: WHAT IS CALLED PERCEPTION DIFFERS FROM SENSATION BY THE FACT THAT THE SENSATIONAL INGREDIENTS BRING UP HABITUAL ASSOCIATES IMAGES AND EXPECTATIONS OF THEIR USUAL (CORRELATES->COROLLETS) ALL OF WHICH ARE SUBJECTIVELY INDISTINGUISHABLE FROM THE SENSATION +8230-279154-0002-619: WHETHER OR NOT THIS PRINCIPLE IS LIABLE TO EXCEPTIONS (EVERYONE->EVERY ONE) WOULD AGREE THAT (IS->IT) HAS A BROAD MEASURE OF TRUTH THOUGH THE WORD EXACTLY MIGHT SEEM AN OVERSTATEMENT AND IT MIGHT SEEM MORE CORRECT TO SAY THAT IDEAS APPROXIMATELY REPRESENT IMPRESSIONS +8230-279154-0003-620: AND WHAT SORT OF EVIDENCE IS LOGICALLY POSSIBLE +8230-279154-0004-621: THERE IS NO LOGICAL IMPOSSIBILITY IN THE HYPOTHESIS THAT THE WORLD SPRANG INTO BEING FIVE MINUTES AGO EXACTLY AS IT THEN WAS WITH A POPULATION THAT REMEMBERED A WHOLLY UNREAL PAST +8230-279154-0005-622: ALL THAT I AM DOING IS TO USE ITS LOGICAL TENABILITY AS A HELP IN THE ANALYSIS OF WHAT OCCURS WHEN WE REMEMBER +8230-279154-0006-623: THE BEHAVIOURIST WHO ATTEMPTS TO MAKE PSYCHOLOGY A RECORD OF (BEHAVIOUR->BEHAVIOR) HAS TO TRUST HIS MEMORY IN MAKING THE RECORD +8230-279154-0007-624: HABIT IS A CONCEPT INVOLVING THE OCCURRENCE OF SIMILAR EVENTS AT DIFFERENT TIMES IF THE BEHAVIOURIST (FEELS->FILLS) CONFIDENT THAT THERE IS SUCH A PHENOMENON AS HABIT THAT CAN ONLY BE BECAUSE HE TRUSTS HIS MEMORY WHEN IT ASSURES HIM THAT THERE HAVE BEEN OTHER TIMES +8230-279154-0008-625: BUT I DO NOT THINK SUCH AN (INFERENCE->EFFERENCE) IS WARRANTED +8230-279154-0009-626: OUR CONFIDENCE OR LACK OF CONFIDENCE IN THE ACCURACY OF A MEMORY IMAGE MUST IN FUNDAMENTAL CASES BE BASED UPON A CHARACTERISTIC OF THE IMAGE ITSELF SINCE WE CANNOT EVOKE THE PAST BODILY AND COMPARE IT WITH THE PRESENT IMAGE +8230-279154-0010-627: WE SOMETIMES HAVE IMAGES THAT ARE BY NO MEANS PECULIARLY VAGUE WHICH YET WE DO NOT TRUST FOR EXAMPLE UNDER THE INFLUENCE OF FATIGUE WE MAY SEE A FRIEND'S FACE VIVIDLY AND CLEARLY BUT HORRIBLY DISTORTED +8230-279154-0011-628: SOME IMAGES LIKE SOME SENSATIONS FEEL VERY FAMILIAR WHILE OTHERS FEEL STRANGE +8230-279154-0012-629: FAMILIARITY IS A (FEELING->FILLING) CAPABLE OF DEGREES +8230-279154-0013-630: IN AN IMAGE OF A WELL KNOWN FACE FOR EXAMPLE SOME PARTS MAY FEEL MORE FAMILIAR THAN OTHERS WHEN THIS HAPPENS WE HAVE MORE BELIEF IN THE ACCURACY OF THE FAMILIAR PARTS THAN IN THAT OF THE UNFAMILIAR PARTS +8230-279154-0014-631: I COME NOW TO THE OTHER CHARACTERISTIC WHICH MEMORY IMAGES MUST HAVE IN ORDER TO ACCOUNT FOR OUR KNOWLEDGE OF THE PAST +8230-279154-0015-632: THEY MUST HAVE SOME CHARACTERISTIC WHICH MAKES US REGARD THEM AS REFERRING TO MORE OR LESS REMOTE PORTIONS OF THE PAST +8230-279154-0016-633: IN ACTUAL FACT THERE ARE DOUBTLESS VARIOUS FACTORS THAT CONCUR IN GIVING US THE FEELING OF GREATER OR LESS REMOTENESS IN SOME REMEMBERED EVENT +8230-279154-0017-634: THERE MAY BE A SPECIFIC FEELING WHICH COULD BE CALLED THE (FEELING->FILLING) OF PASTNESS ESPECIALLY WHERE IMMEDIATE MEMORY IS CONCERNED +8230-279154-0018-635: THERE IS OF COURSE A DIFFERENCE BETWEEN KNOWING THE TEMPORAL RELATION OF A REMEMBERED EVENT TO THE PRESENT AND KNOWING THE TIME ORDER OF TWO REMEMBERED EVENTS +8230-279154-0019-636: IT WOULD SEEM THAT ONLY RATHER RECENT EVENTS CAN BE PLACED AT ALL ACCURATELY BY MEANS OF FEELINGS GIVING THEIR TEMPORAL RELATION TO THE PRESENT BUT IT IS CLEAR THAT SUCH FEELINGS MUST PLAY AN ESSENTIAL PART IN THE PROCESS OF DATING REMEMBERED EVENTS +8230-279154-0020-637: IF WE HAD RETAINED THE SUBJECT OR ACT IN KNOWLEDGE THE WHOLE PROBLEM OF MEMORY WOULD HAVE BEEN COMPARATIVELY SIMPLE +8230-279154-0021-638: REMEMBERING HAS TO BE A PRESENT OCCURRENCE IN SOME WAY RESEMBLING OR RELATED TO WHAT IS REMEMBERED +8230-279154-0022-639: SOME POINTS MAY BE TAKEN AS FIXED AND SUCH AS ANY THEORY OF MEMORY MUST ARRIVE AT +8230-279154-0023-640: IN THIS CASE AS IN MOST OTHERS WHAT MAY BE TAKEN AS CERTAIN IN ADVANCE IS RATHER VAGUE +8230-279154-0024-641: THE FIRST OF OUR VAGUE BUT INDUBITABLE DATA IS THAT THERE IS KNOWLEDGE OF THE PAST +8230-279154-0025-642: WE MIGHT PROVISIONALLY THOUGH PERHAPS NOT QUITE CORRECTLY DEFINE MEMORY AS THAT WAY OF KNOWING ABOUT THE PAST WHICH HAS NO ANALOGUE IN OUR KNOWLEDGE OF THE FUTURE SUCH A DEFINITION WOULD AT LEAST SERVE TO MARK THE PROBLEM WITH WHICH WE ARE CONCERNED THOUGH SOME EXPECTATIONS MAY DESERVE TO RANK WITH MEMORY AS REGARDS IMMEDIACY +8230-279154-0026-643: THIS DISTINCTION IS VITAL TO THE UNDERSTANDING OF MEMORY BUT IT IS NOT SO EASY TO CARRY OUT IN PRACTICE AS IT IS TO DRAW IN THEORY +8230-279154-0027-644: A GRAMOPHONE BY THE HELP OF SUITABLE RECORDS MIGHT RELATE TO US THE INCIDENTS OF ITS PAST AND PEOPLE ARE NOT SO DIFFERENT FROM GRAMOPHONES AS THEY LIKE TO BELIEVE +8230-279154-0028-645: I CAN SET TO WORK NOW TO REMEMBER THINGS I NEVER REMEMBERED BEFORE SUCH AS WHAT I HAD TO EAT FOR BREAKFAST THIS MORNING AND IT CAN HARDLY BE WHOLLY HABIT THAT ENABLES ME TO DO THIS +8230-279154-0029-646: THE FACT THAT A MAN CAN RECITE A POEM DOES NOT SHOW THAT HE REMEMBERS ANY PREVIOUS OCCASION ON WHICH HE HAS RECITED OR READ IT +8230-279154-0030-647: (SEMON'S->SIMMONS) TWO BOOKS MENTIONED IN AN EARLIER LECTURE DO NOT TOUCH KNOWLEDGE MEMORY AT ALL CLOSELY +8230-279154-0031-648: THEY GIVE LAWS ACCORDING TO WHICH IMAGES OF PAST OCCURRENCES COME INTO OUR MINDS BUT DO NOT DISCUSS OUR BELIEF THAT THESE IMAGES REFER TO PAST OCCURRENCES WHICH IS WHAT CONSTITUTES KNOWLEDGE MEMORY +8230-279154-0032-649: IT IS THIS THAT IS OF INTEREST TO THEORY OF KNOWLEDGE +8230-279154-0033-650: IT IS BY NO MEANS ALWAYS RELIABLE ALMOST EVERYBODY HAS AT SOME TIME EXPERIENCED THE WELL KNOWN ILLUSION THAT ALL THAT IS HAPPENING NOW HAPPENED BEFORE AT SOME TIME +8230-279154-0034-651: WHENEVER THE SENSE OF FAMILIARITY OCCURS WITHOUT A DEFINITE OBJECT IT LEADS US TO SEARCH THE ENVIRONMENT UNTIL WE ARE SATISFIED THAT WE HAVE FOUND THE APPROPRIATE OBJECT WHICH LEADS US TO THE JUDGMENT THIS IS FAMILIAR +8230-279154-0035-652: THUS NO KNOWLEDGE AS TO THE PAST IS TO BE DERIVED FROM THE FEELING OF FAMILIARITY ALONE +8230-279154-0036-653: A FURTHER STAGE IS RECOGNITION +8230-279154-0037-654: RECOGNITION IN THIS SENSE DOES NOT NECESSARILY INVOLVE MORE THAN A HABIT OF ASSOCIATION THE KIND OF OBJECT WE ARE SEEING AT THE MOMENT IS ASSOCIATED WITH THE WORD CAT OR WITH AN AUDITORY IMAGE OF PURRING OR WHATEVER OTHER CHARACTERISTIC WE MAY HAPPEN TO RECOGNIZE IN THE CAT OF THE MOMENT +8230-279154-0038-655: WE ARE OF COURSE IN FACT ABLE TO JUDGE WHEN WE RECOGNIZE AN OBJECT THAT WE HAVE SEEN IT BEFORE BUT THIS JUDGMENT IS SOMETHING OVER AND ABOVE RECOGNITION IN THIS FIRST SENSE AND MAY VERY PROBABLY BE IMPOSSIBLE TO ANIMALS THAT NEVERTHELESS HAVE THE EXPERIENCE OF RECOGNITION IN THIS FIRST SENSE OF THE WORD +8230-279154-0039-656: THIS KNOWLEDGE IS MEMORY IN ONE SENSE THOUGH IN ANOTHER IT IS NOT +8230-279154-0040-657: THERE ARE HOWEVER SEVERAL POINTS IN WHICH SUCH AN ACCOUNT OF RECOGNITION IS INADEQUATE TO BEGIN WITH IT MIGHT SEEM AT FIRST SIGHT MORE CORRECT TO DEFINE RECOGNITION AS I HAVE SEEN THIS BEFORE THAN AS THIS HAS EXISTED BEFORE +8230-279154-0041-658: THE DEFINITION OF MY EXPERIENCE IS DIFFICULT BROADLY SPEAKING IT IS EVERYTHING THAT IS CONNECTED WITH WHAT I AM EXPERIENCING NOW BY CERTAIN LINKS OF WHICH THE VARIOUS FORMS OF MEMORY ARE AMONG THE MOST IMPORTANT +8230-279154-0042-659: THUS IF I RECOGNIZE A THING THE OCCASION OF ITS PREVIOUS EXISTENCE IN VIRTUE OF WHICH I RECOGNIZE IT FORMS PART OF MY EXPERIENCE BY DEFINITION RECOGNITION WILL BE ONE OF THE MARKS BY WHICH MY EXPERIENCE IS SINGLED OUT FROM THE REST OF THE WORLD +8230-279154-0043-660: OF COURSE THE WORDS THIS HAS EXISTED BEFORE ARE (A->OF) VERY INADEQUATE TRANSLATION OF WHAT ACTUALLY HAPPENS WHEN WE FORM A JUDGMENT OF RECOGNITION BUT THAT IS UNAVOIDABLE WORDS ARE FRAMED TO EXPRESS A LEVEL OF THOUGHT WHICH IS BY NO MEANS PRIMITIVE AND ARE QUITE INCAPABLE OF EXPRESSING SUCH AN ELEMENTARY OCCURRENCE AS RECOGNITION +8455-210777-0000-972: I REMAINED THERE ALONE FOR MANY HOURS BUT I MUST ACKNOWLEDGE THAT BEFORE I LEFT THE CHAMBERS I HAD GRADUALLY BROUGHT MYSELF TO LOOK AT THE MATTER IN ANOTHER LIGHT +8455-210777-0001-973: HAD (EVA CRASWELLER->EITHER CRUSHWELLER) NOT BEEN GOOD LOOKING HAD JACK BEEN STILL AT COLLEGE HAD SIR KENNINGTON OVAL REMAINED IN ENGLAND HAD MISTER (BUNNIT->BUNNOT) AND THE BAR KEEPER NOT SUCCEEDED IN STOPPING MY CARRIAGE ON THE HILL SHOULD I HAVE SUCCEEDED IN (ARRANGING->A RAGING) FOR THE FINAL DEPARTURE OF MY OLD FRIEND +8455-210777-0002-974: ON ARRIVING AT HOME AT MY OWN RESIDENCE I FOUND THAT OUR SALON WAS FILLED WITH A BRILLIANT COMPANY +8455-210777-0003-975: AS I SPOKE I MADE HIM A GRACIOUS BOW AND I THINK I SHOWED HIM BY MY MODE OF ADDRESS THAT I DID NOT BEAR ANY GRUDGE AS TO MY INDIVIDUAL SELF +8455-210777-0004-976: I HAVE COME TO YOUR SHORES MISTER PRESIDENT WITH THE PURPOSE OF SEEING HOW THINGS ARE PROGRESSING IN THIS DISTANT QUARTER OF THE WORLD +8455-210777-0005-977: WE HAVE OUR LITTLE STRUGGLES HERE AS ELSEWHERE AND ALL THINGS CANNOT BE DONE BY ROSE WATER +8455-210777-0006-978: WE ARE QUITE SATISFIED NOW CAPTAIN (BATTLEAX->BATTLE AXE) SAID MY WIFE +8455-210777-0007-979: QUITE SATISFIED SAID EVA +8455-210777-0008-980: THE LADIES IN COMPLIANCE WITH THAT SOFTNESS OF HEART WHICH IS THEIR CHARACTERISTIC ARE ON ONE SIDE AND THE MEN BY WHOM THE WORLD HAS TO BE MANAGED (ARE->OR) ON THE OTHER +8455-210777-0009-981: NO DOUBT IN PROCESS OF TIME THE LADIES WILL FOLLOW +8455-210777-0010-982: THEIR (MASTERS->MASTER) SAID MISSUS NEVERBEND +8455-210777-0011-983: I DID NOT MEAN SAID CAPTAIN (BATTLEAX->BATTLE AXE) TO TOUCH UPON PUBLIC SUBJECTS AT SUCH A MOMENT AS THIS +8455-210777-0012-984: MISSUS NEVERBEND YOU MUST INDEED BE PROUD OF YOUR SON +8455-210777-0013-985: JACK HAD BEEN STANDING IN THE FAR CORNER OF THE ROOM TALKING TO EVA AND WAS NOW REDUCED TO SILENCE BY HIS PRAISES +8455-210777-0014-986: SIR KENNINGTON OVAL IS A VERY FINE PLAYER SAID MY WIFE +8455-210777-0015-987: I (AND->AM) MY WIFE AND SON AND THE TWO (CRASWELLERS->CRESTWELLERS) AND THREE OR FOUR OTHERS AGREED TO DINE ON BOARD THE SHIP ON THE NEXT +8455-210777-0016-988: THIS I FELT WAS PAID TO ME AS BEING PRESIDENT OF THE REPUBLIC AND I ENDEAVOURED TO BEHAVE MYSELF WITH SUCH MINGLED HUMILITY AND DIGNITY AS MIGHT (BEFIT->BE FIT) THE OCCASION BUT I COULD NOT BUT FEEL THAT SOMETHING WAS WANTING TO THE SIMPLICITY OF MY ORDINARY LIFE +8455-210777-0017-989: MY WIFE ON THE SPUR OF THE MOMENT MANAGED TO GIVE THE (GENTLEMEN->GENTLEMAN) A VERY GOOD DINNER +8455-210777-0018-990: THIS SHE SAID WAS TRUE HOSPITALITY AND I AM NOT SURE THAT I DID NOT AGREE WITH (HER->THERE) +8455-210777-0019-991: THEN THERE WERE THREE OR FOUR LEADING MEN OF THE COMMUNITY WITH THEIR WIVES WHO WERE FOR THE MOST PART THE FATHERS AND MOTHERS OF THE YOUNG LADIES +8455-210777-0020-992: OH YES SAID JACK AND I'M NOWHERE +8455-210777-0021-993: BUT I MEAN TO HAVE MY INNINGS BEFORE LONG +8455-210777-0022-994: OF WHAT MISSUS NEVERBEND HAD GONE THROUGH IN PROVIDING BIRDS BEASTS AND FISHES NOT TO TALK OF TARTS AND JELLIES FOR THE DINNER OF THAT DAY NO ONE BUT MYSELF CAN HAVE ANY IDEA BUT IT MUST BE ADMITTED THAT SHE ACCOMPLISHED HER TASK WITH THOROUGH SUCCESS +8455-210777-0023-995: WE SAT WITH THE (OFFICERS->OFFICER) SOME LITTLE TIME AFTER DINNER AND THEN WENT ASHORE +8455-210777-0024-996: HOW MUCH OF EVIL OF REAL ACCOMPLISHED EVIL HAD THERE NOT OCCURRED TO ME DURING THE LAST FEW DAYS +8455-210777-0025-997: WHAT COULD I DO NOW BUT JUST LAY MYSELF DOWN AND DIE +8455-210777-0026-998: AND THE DEATH OF WHICH I DREAMT COULD NOT ALAS +8455-210777-0027-999: WHEN THIS CAPTAIN SHOULD HAVE TAKEN HIMSELF AND HIS VESSEL BACK TO ENGLAND I WOULD RETIRE TO A SMALL FARM WHICH I POSSESSED AT THE (FARTHEST->FURTHEST) SIDE OF THE ISLAND AND THERE IN SECLUSION (WOULD->WHAT) I END MY DAYS +8455-210777-0028-1000: JACK WOULD BECOME EVA'S HAPPY HUSBAND AND WOULD REMAIN AMIDST THE HURRIED DUTIES OF THE EAGER WORLD +8455-210777-0029-1001: THINKING OF ALL THIS I WENT TO SLEEP +8455-210777-0030-1002: MISTER NEVERBEND BEGAN THE CAPTAIN AND I (OBSERVED->OBSERVE) THAT UP TO THAT MOMENT HE HAD GENERALLY ADDRESSED ME AS PRESIDENT IT CANNOT BE DENIED THAT WE HAVE COME HERE ON AN UNPLEASANT MISSION +8455-210777-0031-1003: YOU HAVE RECEIVED US WITH ALL THAT COURTESY AND HOSPITALITY FOR WHICH YOUR CHARACTER (*->AND) IN ENGLAND (STANDS->STAND) SO HIGH +8455-210777-0032-1004: IT IS A DUTY SAID I +8455-210777-0033-1005: BUT YOUR POWER IS SO SUPERIOR TO ANY THAT I CAN ADVANCE AS TO MAKE US HERE FEEL THAT THERE IS NO DISGRACE IN YIELDING TO IT +8455-210777-0034-1006: NOT A DOUBT BUT HAD YOUR FORCE BEEN ONLY DOUBLE OR (TREBLE->TROUBLE) OUR OWN I SHOULD HAVE FOUND IT MY DUTY TO STRUGGLE WITH YOU +8455-210777-0035-1007: THAT IS ALL QUITE TRUE MISTER NEVERBEND SAID SIR FERDINANDO BROWN +8455-210777-0036-1008: I CAN AFFORD TO SMILE BECAUSE I AM ABSOLUTELY POWERLESS BEFORE YOU BUT I DO NOT THE LESS FEEL THAT IN A MATTER (IN->OF) WHICH THE PROGRESS OF THE WORLD IS CONCERNED I OR RATHER WE HAVE BEEN PUT DOWN BY BRUTE FORCE +8455-210777-0037-1009: YOU HAVE COME TO US THREATENING US WITH ABSOLUTE DESTRUCTION +8455-210777-0038-1010: THEREFORE I FEEL MYSELF QUITE ABLE AS PRESIDENT OF THIS REPUBLIC TO RECEIVE YOU WITH A COURTESY DUE TO THE SERVANTS OF A FRIENDLY ALLY +8455-210777-0039-1011: I CAN ASSURE YOU HE HAS NOT EVEN ALLOWED ME TO SEE THE TRIGGER SINCE I HAVE BEEN ON BOARD +8455-210777-0040-1012: THEN SAID SIR FERDINANDO THERE IS NOTHING FOR IT BUT THAT (HE->WE) MUST TAKE YOU WITH HIM +8455-210777-0041-1013: THERE CAME UPON ME A SUDDEN SHOCK WHEN I HEARD THESE WORDS WHICH EXCEEDED ANYTHING WHICH I HAD YET FELT +8455-210777-0042-1014: YOU HEAR WHAT SIR FERDINANDO BROWN HAS SAID REPLIED CAPTAIN (BATTLEAX->BATTLEX) +8455-210777-0043-1015: BUT WHAT IS THE DELICATE MISSION I ASKED +8455-210777-0044-1016: I WAS TO BE TAKEN AWAY AND CARRIED TO ENGLAND OR ELSEWHERE OR DROWNED UPON THE VOYAGE IT MATTERED NOT WHICH +8455-210777-0045-1017: THEN THE REPUBLIC OF (BRITANNULA->BRITAIN YULA) WAS TO BE DECLARED AS NON EXISTENT AND THE BRITISH FLAG WAS TO BE EXALTED AND A BRITISH GOVERNOR INSTALLED IN THE EXECUTIVE CHAMBERS +8455-210777-0046-1018: YOU MAY BE QUITE SURE (IT'S->TO) THERE SAID CAPTAIN (BATTLEAX->BATTLE AXE) AND THAT I CAN SO USE IT AS TO HALF OBLITERATE YOUR TOWN WITHIN TWO MINUTES OF MY RETURN ON BOARD +8455-210777-0047-1019: YOU PROPOSE TO KIDNAP ME I SAID +8455-210777-0048-1020: WHAT (WOULD->WILL) BECOME OF YOUR GUN WERE I TO KIDNAP YOU +8455-210777-0049-1021: LIEUTENANT (CROSSTREES->CROSS TREES) IS A VERY GALLANT OFFICER +8455-210777-0050-1022: ONE OF US ALWAYS REMAINS ON BOARD WHILE THE OTHER IS ON SHORE +8455-210777-0051-1023: WHAT WORLD WIDE INIQUITY SUCH A SPEECH AS THAT DISCLOSES SAID I STILL TURNING MYSELF TO THE CAPTAIN FOR THOUGH I WOULD HAVE CRUSHED THEM BOTH BY MY WORDS HAD IT BEEN POSSIBLE MY DISLIKE (CENTRED->SENATE) ITSELF ON SIR FERDINANDO +8455-210777-0052-1024: YOU WILL ALLOW ME TO SUGGEST SAID HE THAT THAT IS A MATTER OF OPINION +8455-210777-0053-1025: WERE I TO COMPLY WITH YOUR ORDERS WITHOUT EXPRESSING MY OWN OPINION I SHOULD SEEM TO HAVE DONE SO WILLINGLY HEREAFTER +8455-210777-0054-1026: THE LETTER RAN AS FOLLOWS +8455-210777-0055-1027: SIR I HAVE IT IN COMMAND TO INFORM YOUR EXCELLENCY THAT YOU HAVE BEEN APPOINTED GOVERNOR OF THE CROWN COLONY WHICH IS CALLED (BRITANNULA->BRITAIN ULLA) +8455-210777-0056-1028: THE PECULIAR CIRCUMSTANCES OF THE COLONY ARE WITHIN YOUR EXCELLENCY'S KNOWLEDGE +8455-210777-0057-1029: BUT IN THEIR SELECTION OF A CONSTITUTION THE (BRITANNULISTS->BRITAIN UILISTS) HAVE UNFORTUNATELY ALLOWED THEMSELVES BUT ONE (DELIBERATIVE->DELIBERATE) ASSEMBLY AND HENCE (HAVE->HAS) SPRUNG THEIR PRESENT DIFFICULTIES +8455-210777-0058-1030: IT IS FOUNDED ON THE ACKNOWLEDGED WEAKNESS OF THOSE WHO SURVIVE THAT PERIOD OF LIFE AT WHICH MEN CEASE TO WORK +8455-210777-0059-1031: BUT IT IS SURMISED THAT YOU WILL FIND DIFFICULTIES IN THE WAY OF YOUR ENTERING AT ONCE UPON YOUR (GOVERNMENT->GOVERNOR) +8455-210777-0060-1032: THE JOHN BRIGHT (IS ARMED->HIS ARM) WITH A WEAPON OF GREAT POWER AGAINST WHICH IT IS IMPOSSIBLE THAT THE PEOPLE OF (BRITANNULA->BRITAIN EULO) SHOULD PREVAIL +8455-210777-0061-1033: YOU WILL CARRY OUT WITH YOU ONE HUNDRED MEN OF THE NORTH NORTH WEST BIRMINGHAM REGIMENT WHICH WILL PROBABLY SUFFICE FOR YOUR OWN SECURITY AS IT IS THOUGHT THAT IF MISTER NEVERBEND BE WITHDRAWN THE PEOPLE WILL REVERT EASILY TO THEIR OLD HABITS OF OBEDIENCE +8455-210777-0062-1034: WHEN DO YOU INTEND THAT (THE->THAT) JOHN BRIGHT SHALL START +8455-210777-0063-1035: TO DAY I SHOUTED +8455-210777-0064-1036: AND I HAVE NO ONE READY TO WHOM I CAN GIVE UP THE ARCHIVES OF THE GOVERNMENT +8455-210777-0065-1037: I SHALL BE HAPPY TO TAKE CHARGE OF THEM SAID SIR FERDINANDO +8455-210777-0066-1038: THEY OF COURSE MUST ALL BE ALTERED +8455-210777-0067-1039: OR OF THE HABITS OF OUR PEOPLE IT IS QUITE IMPOSSIBLE +8455-210777-0068-1040: YOUR POWER IS SUFFICIENT I SAID +8455-210777-0069-1041: IF YOU WILL GIVE US YOUR PROMISE TO MEET CAPTAIN (BATTLEAX->ADELAX) HERE AT THIS TIME TO MORROW WE WILL STRETCH A POINT AND DELAY THE DEPARTURE OF THE JOHN BRIGHT FOR TWENTY FOUR HOURS +8455-210777-0070-1042: AND THIS PLAN WAS ADOPTED TOO IN ORDER TO EXTRACT FROM ME A PROMISE THAT I WOULD DEPART IN PEACE +8463-287645-0000-543: THIS WAS WHAT DID THE MISCHIEF SO FAR AS THE RUNNING AWAY WAS CONCERNED +8463-287645-0001-544: IT IS HARDLY NECESSARY TO SAY MORE OF THEM HERE +8463-287645-0002-545: FROM THE MANNER IN WHICH HE EXPRESSED HIMSELF WITH REGARD TO ROBERT (HOLLAN->HOLLAND) NO MAN IN THE WHOLE RANGE OF HIS RECOLLECTIONS WILL BE LONGER REMEMBERED THAN HE HIS (ENTHRALMENT->ENTHRALLMENT) WHILE UNDER (HOLLAN->HOLLAND) WILL HARDLY EVER BE FORGOTTEN +8463-287645-0003-546: OF THIS PARTY EDWARD A BOY OF SEVENTEEN CALLED FORTH MUCH SYMPATHY HE TOO WAS CLAIMED BY (HOLLAN->HOLLAND) +8463-287645-0004-547: JOHN WESLEY (COMBASH->COMBATCH) JACOB TAYLOR AND THOMAS EDWARD SKINNER +8463-287645-0005-548: (A FEW->IF YOU) YEARS BACK ONE OF THEIR SLAVES A COACHMAN WAS KEPT ON THE COACH BOX ONE (COLD->CALLED) NIGHT WHEN THEY WERE OUT AT A BALL UNTIL HE BECAME ALMOST FROZEN TO DEATH IN FACT HE DID DIE IN THE INFIRMARY FROM THE EFFECTS OF THE FROST ABOUT ONE WEEK AFTERWARDS +8463-287645-0006-549: THE DOCTOR WHO ATTENDED THE (INJURED->ANCIENT) CREATURE IN THIS CASE WAS SIMPLY TOLD THAT SHE SLIPPED AND FELL DOWN (*->THE) STAIRS AS SHE WAS COMING DOWN +8463-287645-0007-550: ANOTHER CASE SAID JOHN (WESLEY->WESTLEY) WAS A LITTLE GIRL HALF GROWN WHO WAS WASHING WINDOWS (UP STAIRS->UPSTAIRS) ONE DAY AND UNLUCKILY FELL ASLEEP IN THE WINDOW AND IN THIS POSITION WAS FOUND BY HER MISTRESS IN A RAGE THE MISTRESS (HIT->HID) HER A HEAVY SLAP KNOCKED HER OUT OF THE WINDOW AND SHE FELL TO THE PAVEMENT AND DIED IN A FEW HOURS FROM THE EFFECTS THEREOF +8463-287645-0008-551: AS USUAL NOTHING WAS DONE IN THE WAY OF PUNISHMENT +8463-287645-0009-552: I NEVER KNEW OF BUT ONE MAN WHO COULD EVER PLEASE HIM +8463-287645-0010-553: HE WORKED ME VERY HARD HE WANTED TO BE BEATING ME ALL THE TIME +8463-287645-0011-554: SHE WAS A LARGE HOMELY WOMAN THEY WERE COMMON WHITE PEOPLE WITH NO REPUTATION IN THE COMMUNITY +8463-287645-0012-555: SUBSTANTIALLY THIS WAS JACOB'S UNVARNISHED DESCRIPTION OF HIS MASTER AND MISTRESS +8463-287645-0013-556: AS TO HIS AGE AND ALSO THE NAME OF HIS MASTER JACOB'S STATEMENT VARIED SOMEWHAT FROM THE ADVERTISEMENT +8463-287645-0014-557: OF STARTING I DIDN'T KNOW THE WAY TO COME +8463-294825-0000-558: IT'S ALMOST BEYOND CONJECTURE +8463-294825-0001-559: THIS REALITY BEGINS TO EXPLAIN THE DARK POWER AND (OTHERWORLDLY->OTHER WORLDLY) FASCINATION OF TWENTY THOUSAND LEAGUES UNDER THE SEAS +8463-294825-0002-560: FIRST AS A PARIS (STOCKBROKER->DOCKBROKER) LATER AS A CELEBRATED AUTHOR AND YACHTSMAN HE WENT ON FREQUENT VOYAGES TO BRITAIN AMERICA THE MEDITERRANEAN +8463-294825-0003-561: NEMO BUILDS A FABULOUS (FUTURISTIC->FUTURESTIC) SUBMARINE THE NAUTILUS THEN CONDUCTS AN UNDERWATER CAMPAIGN OF VENGEANCE AGAINST HIS IMPERIALIST OPPRESSOR +8463-294825-0004-562: IN ALL THE NOVEL (HAD->HEAD) A DIFFICULT (GESTATION->JUST STATION) +8463-294825-0005-563: OTHER SUBTLETIES OCCUR INSIDE EACH EPISODE THE TEXTURES SPARKLING WITH WIT INFORMATION AND INSIGHT +8463-294825-0006-564: HIS SPECIFICATIONS FOR AN OPEN SEA SUBMARINE AND A SELF (CONTAINED->CONTAINING) DIVING SUIT WERE DECADES BEFORE THEIR TIME YET MODERN TECHNOLOGY BEARS THEM OUT TRIUMPHANTLY +8463-294825-0007-565: EVEN THE SUPPORTING CAST IS SHREWDLY DRAWN PROFESSOR ARONNAX THE CAREER SCIENTIST CAUGHT IN AN ETHICAL CONFLICT CONSEIL THE COMPULSIVE CLASSIFIER WHO SUPPLIES HUMOROUS TAG LINES FOR (VERNE'S->VERNS) FAST FACTS THE HARPOONER NED LAND A CREATURE OF CONSTANT APPETITES MAN AS HEROIC ANIMAL +8463-294825-0008-566: BUT MUCH OF THE (NOVEL'S->NOVELS) BROODING POWER COMES FROM CAPTAIN NEMO +8463-294825-0009-567: THIS COMPULSION LEADS NEMO INTO UGLY CONTRADICTIONS (HE'S->HE IS) A FIGHTER FOR FREEDOM YET ALL WHO BOARD HIS SHIP ARE IMPRISONED THERE FOR GOOD HE WORKS TO SAVE LIVES BOTH HUMAN AND ANIMAL YET HE HIMSELF CREATES A (HOLOCAUST->HOHLAST) HE DETESTS IMPERIALISM YET HE LAYS PERSONAL CLAIM TO THE SOUTH POLE +8463-294825-0010-568: AND IN THIS LAST ACTION HE FALLS INTO THE CLASSIC SIN OF PRIDE +8463-294825-0011-569: (HE'S->HIS) SWIFTLY PUNISHED +8463-294825-0012-570: THE NAUTILUS NEARLY PERISHES IN THE ANTARCTIC AND NEMO SINKS INTO A GROWING DEPRESSION +8463-294825-0013-571: FOR MANY THEN THIS BOOK HAS BEEN A SOURCE OF FASCINATION SURELY ONE OF THE MOST INFLUENTIAL NOVELS EVER WRITTEN (AN->AND) INSPIRATION FOR SUCH SCIENTISTS AND DISCOVERERS AS (ENGINEER->ENGINEERS) SIMON LAKE OCEANOGRAPHER WILLIAM (BEEBE POLAR TRAVELER SIR ERNEST->B POLLAR TRAVELLERS ARE EARNEST) SHACKLETON +8463-294825-0014-572: FATHOM SIX FEET +8463-294825-0015-573: (GRAM->GRAHAM) ROUGHLY (ONE->WON) TWENTY EIGHTH OF AN OUNCE +8463-294825-0016-574: (MILLIGRAM->MILAGRAM) ROUGHLY (ONE->WON) TWENTY EIGHT (THOUSAND->THOUSANDTH) OF AN OUNCE +8463-294825-0017-575: (LITER->LEADER) ROUGHLY (ONE QUART->WON COURT) +8463-294825-0018-576: METER ROUGHLY ONE YARD THREE INCHES +8463-294825-0019-577: (MILLIMETER->MILLAMETER) ROUGHLY (ONE->WON) TWENTY FIFTH OF AN INCH +8463-294828-0000-578: CHAPTER THREE AS MASTER WISHES +8463-294828-0001-579: THREE SECONDS BEFORE THE ARRIVAL OF J B HOBSON'S LETTER I (NO->KNOW) MORE DREAMED OF CHASING THE UNICORN THAN OF TRYING FOR THE (NORTHWEST->NORTH WEST) PASSAGE +8463-294828-0002-580: EVEN SO I HAD JUST RETURNED FROM AN ARDUOUS JOURNEY EXHAUSTED AND BADLY NEEDING (A REST->ARREST) +8463-294828-0003-581: I WANTED NOTHING MORE THAN TO SEE MY COUNTRY AGAIN MY FRIENDS MY MODEST QUARTERS BY THE BOTANICAL GARDENS MY DEARLY BELOVED COLLECTIONS +8463-294828-0004-582: BUT NOW NOTHING COULD HOLD ME BACK +8463-294828-0005-583: CONSEIL WAS MY (MANSERVANT->MAN'S SERVANT) +8463-294828-0006-584: FROM RUBBING SHOULDERS WITH SCIENTISTS IN OUR LITTLE UNIVERSE BY THE BOTANICAL GARDENS THE BOY HAD COME TO KNOW A THING OR TWO +8463-294828-0007-585: CLASSIFYING WAS EVERYTHING TO HIM SO HE KNEW NOTHING ELSE (WELL->WILL) VERSED IN (THE->A) THEORY OF CLASSIFICATION HE WAS POORLY VERSED IN ITS PRACTICAL APPLICATION AND I DOUBT THAT HE COULD TELL A SPERM WHALE FROM A (BALEEN->BALINE) WHALE +8463-294828-0008-586: AND YET WHAT A FINE GALLANT LAD +8463-294828-0009-587: NOT ONCE DID HE COMMENT ON THE LENGTH OR THE HARDSHIPS OF (A->THE) JOURNEY +8463-294828-0010-588: NEVER DID HE OBJECT TO BUCKLING UP HIS (SUITCASE->SUIT CASE) FOR ANY COUNTRY WHATEVER CHINA OR THE CONGO NO MATTER HOW FAR OFF IT WAS +8463-294828-0011-589: HE WENT HERE THERE AND EVERYWHERE IN PERFECT CONTENTMENT +8463-294828-0012-590: PLEASE FORGIVE ME FOR THIS UNDERHANDED WAY OF ADMITTING (*->THAT) I HAD TURNED FORTY +8463-294828-0013-591: HE WAS A FANATIC ON FORMALITY AND HE ONLY ADDRESSED ME IN THE THIRD PERSON TO THE POINT WHERE IT GOT (TIRESOME->TO HYAHSOME) +8463-294828-0014-592: THERE WAS GOOD REASON TO STOP AND THINK EVEN FOR THE WORLD'S MOST EMOTIONLESS MAN +8463-294828-0015-593: CONSEIL I CALLED A THIRD TIME CONSEIL APPEARED +8463-294828-0016-594: (DID->DEAD) MASTER SUMMON ME HE SAID ENTERING +8463-294828-0017-595: PACK AS MUCH INTO MY TRUNK AS YOU CAN MY (TRAVELING->TRAVELLING) KIT MY SUITS SHIRTS AND SOCKS DON'T BOTHER COUNTING JUST SQUEEZE IT ALL IN AND HURRY +8463-294828-0018-596: WE'LL DEAL WITH THEM LATER WHAT +8463-294828-0019-597: ANYHOW WE'LL (LEAVE->LIVE) INSTRUCTIONS TO SHIP THE WHOLE MENAGERIE TO FRANCE +8463-294828-0020-598: YES WE ARE CERTAINLY I REPLIED EVASIVELY BUT AFTER WE MAKE A DETOUR +8463-294828-0021-599: A (ROUTE->ROUT) SLIGHTLY LESS DIRECT THAT'S ALL +8463-294828-0022-600: (WE'RE->WERE) LEAVING ON THE ABRAHAM LINCOLN +8463-294828-0023-601: YOU SEE MY FRIEND IT'S AN ISSUE OF THE MONSTER THE NOTORIOUS NARWHALE +8463-294828-0024-602: WE DON'T KNOW WHERE IT WILL TAKE US +8463-294828-0025-603: BUT WE'RE GOING JUST THE SAME +8463-294828-0026-604: WE HAVE A COMMANDER (WHO'S->WHOSE) GAME FOR ANYTHING +8463-294828-0027-605: I LEFT INSTRUCTIONS FOR SHIPPING MY CONTAINERS OF STUFFED ANIMALS AND DRIED PLANTS TO PARIS FRANCE +8463-294828-0028-606: I OPENED A LINE OF CREDIT SUFFICIENT TO COVER THE (BABIRUSA->BARBAROUSA) AND CONSEIL AT MY HEELS I JUMPED INTO A CARRIAGE +8463-294828-0029-607: OUR BAGGAGE WAS IMMEDIATELY CARRIED TO THE DECK OF THE FRIGATE I RUSHED ABOARD +8463-294828-0030-608: I ASKED FOR COMMANDER FARRAGUT +8463-294828-0031-609: ONE OF THE SAILORS LED ME TO THE (AFTERDECK->AFTER DECK) WHERE I STOOD IN THE PRESENCE OF A SMART LOOKING OFFICER WHO EXTENDED HIS HAND TO ME +8463-294828-0032-610: IN PERSON WELCOME ABOARD PROFESSOR YOUR CABIN IS WAITING FOR YOU +8463-294828-0033-611: I WAS WELL SATISFIED WITH MY CABIN WHICH WAS LOCATED IN THE STERN AND OPENED INTO THE (OFFICERS->OFFICER'S) MESS +8463-294828-0034-612: (WE'LL->WILL) BE QUITE COMFORTABLE HERE I TOLD CONSEIL +8463-294828-0035-613: AND SO IF (I'D->I HAD) BEEN DELAYED BY A QUARTER OF AN HOUR OR EVEN LESS THE FRIGATE WOULD HAVE GONE WITHOUT ME AND I WOULD HAVE MISSED OUT ON THIS UNEARTHLY EXTRAORDINARY AND INCONCEIVABLE EXPEDITION WHOSE TRUE STORY MIGHT WELL MEET WITH SOME SKEPTICISM +8463-294828-0036-614: THE WHARVES OF BROOKLYN AND EVERY PART OF NEW YORK BORDERING THE EAST RIVER WERE CROWDED WITH CURIOSITY SEEKERS +8463-294828-0037-615: DEPARTING FROM FIVE HUNDRED THOUSAND THROATS THREE CHEERS BURST FORTH IN SUCCESSION +8463-294828-0038-616: THOUSANDS OF HANDKERCHIEFS WERE WAVING ABOVE THESE TIGHTLY PACKED MASSES HAILING THE ABRAHAM LINCOLN UNTIL IT REACHED THE WATERS OF THE HUDSON RIVER AT THE TIP OF THE LONG (PENINSULA->PRONUNCILA) THAT FORMS NEW YORK CITY +8555-284447-0000-2299: THEN HE RUSHED (DOWN STAIRS->DOWNSTAIRS) INTO THE COURTYARD SHOUTING LOUDLY FOR HIS SOLDIERS AND THREATENING TO PATCH EVERYBODY IN HIS DOMINIONS (IF->AT) THE SAILORMAN WAS NOT RECAPTURED +8555-284447-0001-2300: HOLD HIM FAST (*->TO) MY MEN AND AS SOON AS I'VE HAD MY COFFEE (AND->AN) OATMEAL (I'LL->I WILL) TAKE HIM TO THE ROOM OF THE GREAT KNIFE AND PATCH HIM +8555-284447-0002-2301: I WOULDN'T MIND A CUP (O->OF) COFFEE MYSELF SAID CAP'N BILL (I'VE->I HAVE) HAD (CONSID'BLE->CONSIDERABLE) EXERCISE THIS (MORNIN->MORNING) AND I'M (ALL READY->ALREADY) FOR (BREAKFAS->BREAKFAST) +8555-284447-0003-2302: BUT CAP'N BILL MADE NO SUCH ATTEMPT KNOWING IT WOULD BE USELESS +8555-284447-0004-2303: AS SOON AS THEY ENTERED THE ROOM OF THE GREAT KNIFE THE BOOLOOROO GAVE A YELL OF DISAPPOINTMENT +8555-284447-0005-2304: THE ROOM OF THE GREAT KNIFE WAS HIGH AND BIG AND AROUND IT RAN ROWS OF BENCHES FOR THE SPECTATORS TO SIT UPON +8555-284447-0006-2305: IN ONE PLACE AT THE HEAD OF THE ROOM WAS A RAISED PLATFORM FOR THE ROYAL FAMILY WITH ELEGANT THRONE CHAIRS FOR THE KING AND QUEEN AND SIX SMALLER BUT RICHLY UPHOLSTERED CHAIRS FOR THE SNUBNOSED PRINCESSES +8555-284447-0007-2306: THEREFORE HER MAJESTY PAID NO ATTENTION TO (ANYONE->ANY ONE) AND NO ONE PAID ANY ATTENTION TO HER +8555-284447-0008-2307: RICH JEWELS OF BLUE STONES GLITTERED UPON THEIR PERSONS AND THE ROYAL LADIES WERE FULLY AS GORGEOUS AS THEY WERE (HAUGHTY->HALTING) AND OVERBEARING +8555-284447-0009-2308: (MORNIN->MORNING) GIRLS (HOPE YE FEEL->OPIEVILLE) AS WELL AS YE LOOK +8555-284447-0010-2309: CONTROL YOURSELVES MY DEARS REPLIED THE BOOLOOROO THE WORST PUNISHMENT I KNOW HOW TO INFLICT ON (ANYONE->ANY ONE) THIS PRISONER IS ABOUT TO SUFFER (YOU'LL->YOU WILL) SEE A VERY PRETTY PATCHING MY ROYAL DAUGHTERS +8555-284447-0011-2310: SUPPOSE IT'S (A FRIEND->OF BRAND) +8555-284447-0012-2311: THE CAPTAIN SHOOK HIS HEAD +8555-284447-0013-2312: WHY YOU (SAID->SIT) TO FETCH THE FIRST LIVING CREATURE WE MET AND THAT WAS (THIS BILLYGOAT->THE SPILLY GOAT) REPLIED THE CAPTAIN PANTING HARD AS HE HELD FAST TO ONE OF THE GOAT'S HORNS +8555-284447-0014-2313: THE IDEA OF PATCHING CAP'N BILL TO A GOAT WAS VASTLY AMUSING TO HIM AND THE MORE HE THOUGHT OF IT THE MORE HE ROARED WITH LAUGHTER +8555-284447-0015-2314: THEY LOOK SOMETHING ALIKE YOU KNOW SUGGESTED THE CAPTAIN OF THE GUARDS LOOKING FROM ONE TO THE OTHER DOUBTFULLY AND THEY'RE NEARLY THE SAME SIZE IF (YOU->HE) STAND (THE GOAT->A BOAT) ON HIS HIND LEGS THEY'VE BOTH GOT THE SAME STYLE OF WHISKERS AND THEY'RE BOTH OF (EM->THEM) OBSTINATE AND DANGEROUS SO THEY OUGHT TO MAKE A GOOD PATCH SPLENDID +8555-284447-0016-2315: FINE GLORIOUS +8555-284447-0017-2316: WHEN THIS HAD BEEN ACCOMPLISHED THE BOOLOOROO LEANED OVER TO TRY TO DISCOVER WHY THE FRAME ROLLED AWAY SEEMINGLY OF ITS OWN ACCORD AND HE WAS THE MORE PUZZLED BECAUSE IT HAD NEVER DONE SUCH A THING BEFORE +8555-284447-0018-2317: AT ONCE THE GOAT GAVE A LEAP (ESCAPED->ESCAPE) FROM THE SOLDIERS AND WITH BOWED HEAD RUSHED UPON THE BOOLOOROO +8555-284447-0019-2318: BEFORE ANY COULD STOP HIM HE BUTTED HIS MAJESTY SO FURIOUSLY THAT THE KING SOARED FAR INTO THE AIR AND TUMBLED IN A HEAP AMONG THE BENCHES WHERE HE LAY MOANING AND GROANING +8555-284447-0020-2319: THE (GOAT'S WARLIKE->GOATS WORE LIKE) SPIRIT WAS ROUSED BY THIS SUCCESSFUL ATTACK +8555-284447-0021-2320: THEN THEY SPED IN GREAT HASTE FOR THE DOOR AND THE GOAT GAVE A FINAL (BUTT->BUT) THAT SENT (THE->A) ROW OF ROYAL LADIES ALL DIVING INTO THE CORRIDOR IN ANOTHER TANGLE WHEREUPON THEY SHRIEKED IN A MANNER THAT TERRIFIED EVERYONE WITHIN SOUND OF THEIR VOICES +8555-284447-0022-2321: I HAD A NOTION IT WAS YOU (MATE AS SAVED->MADE TO SEE) ME FROM THE KNIFE +8555-284447-0023-2322: I (COULDN'T->COULDN') SHIVER MUCH (BEIN->BEING) BOUND SO TIGHT BUT WHEN I'M LOOSE I MEAN TO HAVE (JUS ONE->JUST SWUNG) GOOD SHIVER TO RELIEVE MY (FEELIN'S->FEELINS) +8555-284447-0024-2323: COME AND GET THE BOOLOOROO SHE SAID GOING TOWARD THE BENCHES +8555-284449-0000-2324: SO THEY WERE QUITE WILLING TO OBEY THE ORDERS OF THEIR GIRL QUEEN AND IN A SHORT TIME THE (BLASTS->BLAST) OF TRUMPETS AND ROLL OF DRUMS AND CLASHING OF CYMBALS TOLD TROT AND CAP'N BILL THAT THE BLUE BANDS HAD (ASSEMBLED->A SIMPLED) BEFORE THE PALACE +8555-284449-0001-2325: THEN THEY ALL MARCHED OUT A LITTLE WAY INTO THE FIELDS AND FOUND THAT THE ARMY OF PINKIES HAD ALREADY FORMED AND WAS ADVANCING STEADILY TOWARD THEM +8555-284449-0002-2326: AT THE HEAD OF THE PINKIES WERE GHIP GHISIZZLE AND BUTTON BRIGHT WHO HAD THE PARROT ON HIS SHOULDER AND THEY WERE SUPPORTED BY CAPTAIN (CORALIE->CORLEY) AND CAPTAIN (TINTINT->TINTANT) AND ROSALIE THE WITCH +8555-284449-0003-2327: WHEN THE (BLUESKINS->BLUESKIN) SAW GHIP GHISIZZLE THEY RAISED ANOTHER GREAT SHOUT FOR HE WAS THE (FAVORITE->FAVOURITE) OF THE SOLDIERS AND VERY POPULAR WITH ALL THE PEOPLE +8555-284449-0004-2328: SINCE LAST THURSDAY I (GHIP->*) GHISIZZLE HAVE BEEN THE LAWFUL BOOLOOROO OF THE BLUE COUNTRY BUT NOW THAT YOU ARE CONQUERED BY QUEEN TROT I SUPPOSE I AM CONQUERED TOO AND YOU HAVE NO BOOLOOROO AT ALL +8555-284449-0005-2329: WHEN HE FINISHED SHE SAID CHEERFULLY +8555-284449-0006-2330: DON'T WORRY SIZZLE DEAR (IT'LL->IT) ALL COME RIGHT PRETTY SOON +8555-284449-0007-2331: NOW THEN LET'S ENTER THE CITY (AN->AND) ENJOY THE (GRAND->GREAT) FEAST (THAT'S->ITS) BEING COOKED I'M NEARLY STARVED MYSELF FOR THIS (CONQUERIN KINGDOMS->CONQUERING KINGDOM'S) IS HARD WORK +8555-284449-0008-2332: THEN SHE GAVE ROSALIE BACK HER MAGIC RING THANKING THE KIND (WITCH->WHICH) FOR ALL SHE HAD DONE FOR THEM +8555-284449-0009-2333: YOU ARE MATE REPLIED THE SAILOR +8555-284449-0010-2334: IT WILL BE SUCH A SATISFACTION +8555-284449-0011-2335: THE GUARDS HAD A TERRIBLE STRUGGLE WITH THE GOAT WHICH WAS LOOSE IN THE ROOM AND STILL WANTED TO FIGHT BUT FINALLY THEY SUBDUED THE ANIMAL AND THEN THEY TOOK THE BOOLOOROO OUT OF THE FRAME (HE WAS->WHOSE) TIED IN AND BROUGHT BOTH HIM AND THE GOAT BEFORE QUEEN TROT WHO AWAITED THEM IN THE THRONE ROOM OF THE PALACE +8555-284449-0012-2336: (I'LL->I WILL) GLADLY DO THAT PROMISED THE NEW BOOLOOROO AND I'LL FEED THE (HONORABLE GOAT->HON GO TO) ALL THE SHAVINGS AND LEATHER AND TIN CANS HE CAN EAT BESIDES THE GRASS +8555-284449-0013-2337: (SCUSE->EXCUSE) ME SAID TROT I NEGLECTED TO TELL YOU THAT YOU'RE NOT THE BOOLOOROO ANY MORE +8555-284449-0014-2338: THE FORMER BOOLOOROO GROANED +8555-284449-0015-2339: (I'LL NOT->HOW NOW) BE WICKED ANY MORE SIGHED THE OLD BOOLOOROO I'LL REFORM +8555-284449-0016-2340: AS A PRIVATE CITIZEN I SHALL BE A MODEL OF DEPORTMENT BECAUSE IT WOULD BE DANGEROUS TO BE OTHERWISE +8555-284449-0017-2341: WHEN FIRST THEY ENTERED THE THRONE ROOM THEY TRIED TO BE AS HAUGHTY AND SCORNFUL AS EVER BUT THE BLUES WHO WERE ASSEMBLED THERE ALL LAUGHED AT THEM AND JEERED THEM FOR THERE WAS NOT A SINGLE PERSON IN ALL THE BLUE COUNTRY WHO LOVED THE PRINCESSES THE LEAST LITTLE BIT +8555-284449-0018-2342: SO GHIP GHISIZZLE ORDERED THE CAPTAIN TO TAKE A FILE OF SOLDIERS AND ESCORT THE RAVING BEAUTIES TO THEIR NEW HOME +8555-284449-0019-2343: THAT EVENING TROT GAVE A GRAND BALL IN THE PALACE TO WHICH THE MOST IMPORTANT OF THE PINKIES (AND->IN) THE BLUESKINS WERE INVITED +8555-284449-0020-2344: THE COMBINED BANDS OF BOTH THE COUNTRIES PLAYED THE MUSIC AND A FINE SUPPER WAS SERVED +8555-292519-0000-2283: BRIGHTER THAN EARLY (DAWN'S->DAWNS) MOST BRILLIANT DYE ARE BLOWN CLEAR BANDS OF (COLOR->COLOUR) THROUGH THE SKY THAT SWIRL AND SWEEP AND MEET TO BREAK AND FOAM LIKE RAINBOW VEILS UPON A BUBBLE'S DOME +8555-292519-0001-2284: GUIDED BY YOU HOW WE MIGHT STROLL TOWARDS DEATH OUR ONLY MUSIC ONE ANOTHER'S BREATH THROUGH GARDENS INTIMATE WITH HOLLYHOCKS WHERE SILENT POPPIES (BURN->BURNED) BETWEEN THE ROCKS BY POOLS WHERE BIRCHES BEND TO CONFIDANTS ABOVE GREEN WATERS (SCUMMED->SCUMBED) WITH (*->A) LILY PLANTS +8555-292519-0002-2285: VENICE +8555-292519-0003-2286: IN A SUNSET GLOWING OF CRIMSON AND GOLD SHE LIES THE GLORY OF THE WORLD A (BEACHED->BEECHED) KING'S GALLEY (WHOSE->WHO) SAILS ARE FURLED WHO IS HUNG WITH TAPESTRIES RICH AND OLD +8555-292519-0004-2287: THE PITY THAT WE MUST COME AND GO +8555-292519-0005-2288: WHILE THE OLD GOLD AND THE MARBLE STAYS (FOREVER->FOR EVER) GLEAMING ITS SOFT STRONG BLAZE CALM IN THE EARLY EVENING GLOW +8555-292519-0006-2289: THE PLEASANT GRAVEYARD OF MY SOUL WITH SENTIMENTAL CYPRESS TREES AND FLOWERS IS FILLED THAT I MAY STROLL IN MEDITATION AT MY EASE +8555-292519-0007-2290: IT IS MY HEART HUNG IN THE SKY AND NO CLOUDS EVER FLOAT BETWEEN THE (GRAVE->GRAY) FLOWERS AND MY HEART ON HIGH +8555-292519-0008-2291: OVER THE TRACK LINED CITY STREET THE YOUNG (MEN->MAN) THE GRINNING MEN PASS +8555-292519-0009-2292: (HO->HOME) YE SAILS THAT SEEM TO (WANDER IN->WONDER AND) DREAM FILLED MEADOWS SAY IS THE SHORE WHERE I STAND THE ONLY FIELD OF STRUGGLE OR ARE YE HIT AND BATTERED OUT THERE BY WAVES AND WIND GUSTS AS YE TACK OVER A CLASHING SEA OF WATERY ECHOES +8555-292519-0010-2293: OLD DANCES ARE SIMPLIFIED OF THEIR YEARNING BLEACHED BY TIME +8555-292519-0011-2294: HE HAD GOT INTO HER COURTYARD +8555-292519-0012-2295: THROUGH THE BLACK NIGHT RAIN HE SANG TO HER WINDOW BARS +8555-292519-0013-2296: THAT WAS BUT RUSTLING OF (DRIPPING->TRIPPING) PLANTS IN THE DARK +8555-292519-0014-2297: SHE WAS ALONE THAT NIGHT +8555-292519-0015-2298: HE HAD BROKEN INTO HER COURTYARD +908-157963-0000-1321: TO FADE AWAY LIKE MORNING BEAUTY FROM HER MORTAL DAY DOWN BY THE RIVER OF (ADONA->ADONNA) HER SOFT (VOICE IS->VOICES) HEARD AND THUS HER GENTLE LAMENTATION FALLS LIKE MORNING DEW +908-157963-0001-1322: O LIFE OF THIS OUR SPRING +908-157963-0002-1323: WHY FADES THE LOTUS OF THE WATER +908-157963-0003-1324: WHY FADE THESE CHILDREN OF THE SPRING +908-157963-0004-1325: (THEL->FELL) IS LIKE A (WATRY->WATERY) BOW AND LIKE A PARTING CLOUD LIKE A REFLECTION IN A GLASS LIKE SHADOWS IN THE WATER LIKE DREAMS OF INFANTS LIKE A SMILE UPON AN (INFANTS->INFANT'S) FACE +908-157963-0005-1326: LIKE THE DOVES (VOICE->BOYS) LIKE TRANSIENT DAY LIKE MUSIC IN THE AIR AH +908-157963-0006-1327: AND GENTLE SLEEP THE SLEEP OF DEATH AND GENTLY HEAR THE VOICE OF HIM THAT WALKETH IN THE GARDEN IN THE EVENING TIME +908-157963-0007-1328: THE (LILLY->LILY) OF THE VALLEY BREATHING IN THE HUMBLE GRASS (ANSWERD->ANSWERED) THE LOVELY (MAID AND->MAIDEN) SAID I AM A (WATRY->WATCHERY) WEED AND I AM VERY SMALL AND LOVE TO DWELL IN LOWLY VALES SO WEAK THE GILDED BUTTERFLY SCARCE (PERCHES->PURCHASE) ON MY HEAD YET I AM VISITED FROM HEAVEN AND HE THAT SMILES ON ALL WALKS IN THE VALLEY AND EACH MORN OVER ME SPREADS HIS HAND SAYING REJOICE THOU HUMBLE GRASS THOU (NEW BORN->NEWBORN) LILY FLOWER +908-157963-0008-1329: THOU GENTLE MAID OF SILENT VALLEYS AND OF MODEST BROOKS FOR THOU (SHALL->SHALT) BE CLOTHED IN LIGHT AND FED WITH MORNING (MANNA->MANA) TILL (SUMMERS->SUMMER'S) HEAT MELTS THEE BESIDE THE FOUNTAINS AND THE SPRINGS TO FLOURISH IN ETERNAL VALES THEY WHY SHOULD (THEL->THOU) COMPLAIN +908-157963-0009-1330: WHY SHOULD THE MISTRESS OF THE (VALES->VEILS) OF HAR UTTER A SIGH +908-157963-0010-1331: SHE (CEASD->CEASED) AND (SMILD->SMILED) IN TEARS THEN SAT DOWN IN HER SILVER SHRINE +908-157963-0011-1332: WHICH THOU DOST SCATTER ON EVERY LITTLE BLADE OF GRASS THAT SPRINGS REVIVES THE MILKED COW AND TAMES THE FIRE BREATHING STEED +908-157963-0012-1333: BUT (THEL->THOU) IS LIKE A FAINT CLOUD KINDLED AT THE RISING SUN I VANISH FROM MY PEARLY THRONE AND WHO SHALL FIND MY PLACE +908-157963-0013-1334: AND (WHY IT->WYAT) SCATTERS ITS BRIGHT BEAUTY (THRO->THROUGH) THE (HUMID->HUMAN) AIR +908-157963-0014-1335: DESCEND O (*->A) LITTLE CLOUD AND HOVER BEFORE THE EYES OF (THEL->FELL) +908-157963-0015-1336: O LITTLE CLOUD THE VIRGIN SAID I CHARGE THEE TO TELL ME WHY THOU COMPLAINEST NOW WHEN IN ONE HOUR THOU FADE AWAY THEN WE SHALL SEEK THEE BUT NOT FIND AH (THEL->FELL) IS LIKE TO THEE +908-157963-0016-1337: I PASS AWAY YET I COMPLAIN AND NO ONE HEARS MY VOICE +908-157963-0017-1338: THE CLOUD THEN (SHEWD->SHOWED) HIS GOLDEN HEAD AND HIS BRIGHT FORM (EMERG'D->EMERGED) +908-157963-0018-1339: AND (FEAREST->FEAR'ST) THOU BECAUSE I VANISH AND AM SEEN NO MORE +908-157963-0019-1340: IT IS TO TENFOLD LIFE TO LOVE TO PEACE AND RAPTURES (HOLY->WHOLLY) UNSEEN DESCENDING WEIGH MY LIGHT WINGS UPON BALMY FLOWERS AND COURT THE FAIR EYED DEW TO TAKE ME TO HER SHINING TENT THE WEEPING VIRGIN TREMBLING KNEELS BEFORE THE RISEN SUN +908-157963-0020-1341: TILL WE ARISE (LINK'D->LINKED) IN A GOLDEN BAND AND NEVER PART BUT WALK UNITED BEARING FOOD TO ALL OUR TENDER FLOWERS +908-157963-0021-1342: LIVES NOT ALONE NOR (OR->OF) ITSELF FEAR NOT AND I WILL CALL THE WEAK WORM FROM ITS LOWLY BED AND THOU SHALT HEAR ITS VOICE +908-157963-0022-1343: COME FORTH WORM AND THE SILENT VALLEY TO THY PENSIVE QUEEN +908-157963-0023-1344: THE HELPLESS WORM AROSE AND SAT UPON THE (LILLYS->LILY'S) LEAF AND THE BRIGHT CLOUD (SAILD->SAILED) ON TO FIND HIS PARTNER IN THE VALE +908-157963-0024-1345: IMAGE OF WEAKNESS ART THOU BUT A WORM +908-157963-0025-1346: I SEE THEY LAY HELPLESS AND NAKED WEEPING AND NONE TO ANSWER NONE TO CHERISH THEE WITH (MOTHERS->MOTHER'S) SMILES +908-157963-0026-1347: AND SAYS THOU MOTHER OF MY CHILDREN I HAVE LOVED THEE AND I HAVE GIVEN THEE A CROWN THAT NONE CAN TAKE AWAY +908-157963-0027-1348: AND LAY ME DOWN IN THY COLD BED AND LEAVE MY SHINING LOT +908-157963-0028-1349: OR AN EYE OF GIFTS AND GRACES (SHOWRING->SHOWERING) FRUITS AND COINED GOLD +908-157963-0029-1350: WHY A TONGUE (IMPRESS'D->IMPRESSED) WITH HONEY FROM EVERY WIND +908-157963-0030-1351: WHY AN EAR A WHIRLPOOL FIERCE TO DRAW CREATIONS IN +908-31957-0000-1352: ALL IS SAID WITHOUT A WORD +908-31957-0001-1353: I SIT BENEATH THY LOOKS AS CHILDREN DO IN THE NOON SUN WITH SOULS THAT TREMBLE THROUGH THEIR HAPPY EYELIDS FROM AN UNAVERRED YET (PRODIGAL->CHRONICAL) INWARD JOY +908-31957-0002-1354: I DID NOT WRONG MYSELF SO BUT I PLACED A WRONG ON THEE +908-31957-0003-1355: WHEN CALLED BEFORE I TOLD HOW HASTILY I DROPPED MY FLOWERS OR (BRAKE->BREAK) OFF FROM A GAME +908-31957-0004-1356: SHALL I NEVER MISS HOME TALK AND BLESSING AND THE COMMON KISS THAT COMES TO EACH IN TURN NOR COUNT IT STRANGE WHEN I LOOK UP TO DROP ON A NEW RANGE OF WALLS AND FLOORS ANOTHER HOME THAN THIS +908-31957-0005-1357: ALAS I HAVE GRIEVED SO I AM HARD TO LOVE +908-31957-0006-1358: OPEN THY HEART WIDE AND FOLD WITHIN THE WET WINGS OF THY DOVE +908-31957-0007-1359: COULD IT MEAN TO LAST A LOVE SET PENDULOUS BETWEEN SORROW AND SORROW +908-31957-0008-1360: NAY I RATHER THRILLED DISTRUSTING EVERY LIGHT THAT SEEMED TO GILD THE ONWARD PATH (AND FEARED->IN FEAR) TO (OVERLEAN->OVERLENE) A FINGER EVEN +908-31957-0009-1361: AND THOUGH I HAVE GROWN SERENE AND STRONG SINCE THEN I THINK THAT GOD HAS WILLED A STILL RENEWABLE FEAR +908-31957-0010-1362: O LOVE O TROTH +908-31957-0011-1363: AND LOVE BE FALSE +908-31957-0012-1364: IF HE TO KEEP ONE OATH MUST LOSE ONE JOY BY HIS LIFE'S STAR FORETOLD +908-31957-0013-1365: SLOW TO WORLD GREETINGS QUICK WITH ITS O LIST WHEN THE (ANGELS->ANGEL) SPEAK +908-31957-0014-1366: A RING OF AMETHYST I COULD NOT WEAR HERE PLAINER TO MY SIGHT THAN THAT FIRST KISS +908-31957-0015-1367: THAT WAS THE CHRISM OF LOVE WHICH (LOVE'S->LOVES) OWN CROWN WITH SANCTIFYING SWEETNESS DID (PRECEDE->PROCEED) THE THIRD UPON MY LIPS WAS FOLDED DOWN (IN PERFECT->IMPERFECT) PURPLE STATE SINCE WHEN INDEED I HAVE BEEN PROUD AND SAID MY LOVE MY OWN +908-31957-0016-1368: DEAREST TEACH ME SO TO POUR OUT GRATITUDE AS THOU DOST GOOD +908-31957-0017-1369: MUSSULMANS AND (GIAOURS->GEYORS) THROW KERCHIEFS AT A SMILE AND HAVE NO RUTH FOR ANY WEEPING +908-31957-0018-1370: BUT THOU ART NOT SUCH A LOVER MY BELOVED +908-31957-0019-1371: THOU CANST WAIT THROUGH SORROW AND SICKNESS TO BRING SOULS TO TOUCH AND THINK IT SOON WHEN OTHERS CRY TOO LATE +908-31957-0020-1372: I (THANK->THINK) ALL WHO HAVE LOVED ME IN THEIR HEARTS WITH THANKS AND LOVE FROM MINE +908-31957-0021-1373: OH TO SHOOT MY SOUL'S FULL MEANING INTO FUTURE YEARS THAT THEY SHOULD LEND IT UTTERANCE AND SALUTE LOVE THAT ENDURES FROM LIFE THAT DISAPPEARS +908-31957-0022-1374: THEN I LONG TRIED BY NATURAL ILLS RECEIVED THE COMFORT FAST WHILE BUDDING AT THY SIGHT MY PILGRIM'S STAFF GAVE OUT GREEN LEAVES WITH MORNING DEWS (IMPEARLED->IMPERILLED) +908-31957-0023-1375: I LOVE THEE FREELY AS MEN STRIVE FOR RIGHT I LOVE THEE PURELY AS THEY TURN FROM (PRAISE->PREISE) +908-31957-0024-1376: I LOVE THEE WITH THE PASSION PUT TO USE IN MY OLD (GRIEFS->GREEDS) AND WITH MY CHILDHOOD'S FAITH +908-31957-0025-1377: I LOVE THEE WITH A LOVE I SEEMED TO LOSE WITH MY LOST SAINTS I LOVE THEE WITH THE BREATH SMILES TEARS OF ALL MY LIFE AND IF GOD CHOOSE I SHALL BUT LOVE THEE BETTER AFTER DEATH + +SUBSTITUTIONS: count ref -> hyp +32 AND -> IN +19 IN -> AND +17 THE -> A +14 A -> THE +10 AN -> AND +7 THIS -> THE +6 RODOLFO -> RUDOLPHO +6 OF -> A +6 MAN -> MEN +6 I'VE -> I +6 ANYONE -> ANY +5 SILVIA -> SYLVIA +5 O -> OF +5 I'M -> I +5 ANDERS -> ANDREWS +4 TWO -> TOO +4 TIMAEUS -> TIMAS +4 SOAMES -> SOLMES +4 ONE -> WON +4 MAINHALL -> MAIN +4 LEOCADIA -> LOCATIA +3 WERE -> WHERE +3 VALLIERE -> VALLIERS +3 TRAVELING -> TRAVELLING +3 TOWARDS -> TOWARD +3 TODAY -> TO +3 THEL -> FELL +3 THEIR -> THERE +3 THEE -> THE +3 THEATER -> THEATRE +3 THE -> THEIR +3 THAT -> IT +3 SOMEONE -> SOME +3 ROUND -> AROUND +3 PRACTISE -> PRACTICE +3 METER -> METRE +3 MEN -> MAN +3 MADAM -> MADAME +3 KAFFAR -> KAFFIR +3 IS -> AS +3 HOLLAN -> HOLLAND +3 EMIL -> AMIEL +3 DOBRYNA -> DOBRINA +3 CRESSWELL -> CRESWELL +3 COLOR -> COLOUR +3 BRITANNULA -> BRITAIN +3 BATTLEAX -> BATTLE +2 YOUR -> YOU +2 YOU'RE -> YOU +2 WOULD -> WILL +2 WHITTAWS -> WIDOWS +2 WHERE -> WERE +2 WHEN -> ONE +2 WELL -> WHILE +2 VAPOURS -> VAPORS +2 VANDERPOOL -> VAN +2 UPON -> UP +2 TONIGHT -> TO +2 TO -> TWO +2 TO -> THROUGH +2 TIMAEUS -> TO +2 THEY -> THERE +2 THEY -> THE +2 THEN -> THAN +2 THEN -> AND +2 THEL -> THOU +2 THEIR -> THE +2 THE -> THIS +2 THE -> THAT +2 THAT -> THE +2 THAN -> THAT +2 TABU -> TABOO +2 SYMPOSIUM -> SUPPOSIUM +2 SOMETIME -> SOME +2 SOLON -> SOLEMN +2 SLANG -> SLING +2 SIF -> SIFT +2 SHE -> YOU +2 SEEM -> SEEMED +2 SAIL -> SALE +2 RESERVE -> RESERVED +2 READ -> RED +2 PLATONISTS -> PLATINISTS +2 PARLOR -> PARLOUR +2 OR -> A +2 O -> OH +2 NOW -> THOU +2 NO -> NOR +2 NO -> KNOW +2 NAOMI -> THEY +2 MUNNY -> MONEY +2 MORNIN -> MORNING +2 LECOMPTE -> LECOMTE +2 LEAVENWORTH -> LEVINWORTH +2 LEAVENWORTH -> LEVIN +2 KAFFAR'S -> KAFFIR'S +2 IT'S -> ITS +2 IT'S -> IT +2 IT -> YOU +2 IT -> HE +2 IT -> AND +2 IS -> HIS +2 INTO -> AND +2 IN -> A +2 IMPRESSED -> IMPRESS +2 I'LL -> I +2 I'D -> I +2 I -> I'M +2 HOLY -> WHOLLY +2 HOLBEIN -> HOLBINE +2 HILDA -> HELDA +2 HER -> THE +2 HER -> A +2 HE'S -> HE +2 HE -> WE +2 HAS -> HAD +2 HALLO -> HELLO +2 GREY -> GRAY +2 GRAY -> GREY +2 GILLIKINS -> GYLICANS +2 FOUNDED -> FOUND +2 FORMALLY -> FORMERLY +2 FOREVER -> FOR +2 FEELING -> FILLING +2 FAIRVIEW -> FAIR +2 EVERYONE -> EVERY +2 EVERYDAY -> EVERY +2 DISSENT -> DESCENT +2 DID -> DEAD +2 DEFINE -> TO +2 DE -> THE +2 CRITIAS -> CRITIUS +2 COURT'S -> COURTS +2 COURT -> COURTYARD +2 CHAISE -> CHASE +2 CARL -> KARL +2 BRAKE -> BREAK +2 BOGGS -> BOX +2 BEHAVIOUR -> BEHAVIOR +2 BEATITUDE -> BE +2 BANNISTER -> BANISTER +2 AYE -> I +2 AY -> I +2 AT -> IT +2 ASTOR -> ASTER +2 AS -> A +2 ANOTHER -> THE +2 ANDERS -> ANDREW'S +2 ANDELLA -> AND +2 AND -> AS +2 AND -> AN +2 ALEXANDRA -> ALEXANDER +2 A -> TO +2 A -> OF +1 ZORA'S -> ZORAS +1 ZORA -> ZORAH +1 ZORA -> SORA +1 ZOOF'S -> ZOV'S +1 YOUR -> OR +1 YOU'RE -> BOX +1 YOU'LL -> YOU +1 YOU'LL -> DAMNLY +1 YOU -> YE +1 YOU -> WHO +1 YOU -> HE +1 YEARNING -> YEARNIN +1 YE -> YEA +1 XAVIER -> ZEVIR +1 XAVIER -> ZEVIER +1 XAVIER -> SAVIER +1 WYLDER -> WHILE +1 WRITE -> RIGHT +1 WOULD -> WHAT +1 WOULD -> WERE +1 WORST -> WORSE +1 WORLD -> WORLD'S +1 WOODS -> WOOD +1 WOODBEGIRT -> WOULD +1 WOOD -> WOODCUTTERS +1 WONDERING -> WANDERING +1 WOMAN'S -> WOMEN'S +1 WITHIN -> WITH +1 WITHES -> WIDTHS +1 WITH -> WHOSE +1 WITH -> WHICH +1 WITH -> WHEN +1 WITH -> WERE +1 WITCH -> WHICH +1 WINTER -> WIN +1 WIND -> SQUINT +1 WILLS -> WILDS +1 WILL -> WOULD +1 WIFE -> WHITE +1 WHY -> WYAT +1 WHY -> MY +1 WHOSE -> WHO +1 WHOLLY -> HOLY +1 WHO'S -> WHOSE +1 WHO -> WHOSE +1 WHITTAWD -> WID +1 WHITTAW -> WIDOW +1 WHITE -> WIGHT +1 WHIRLPOOL -> WAR +1 WHERE -> WITH +1 WHERE -> WHERE'S +1 WHERE -> WEAR +1 WHEN -> AND +1 WHELPS -> WHELMS +1 WHATEVER -> WHATSOEVER +1 WHAT'S -> WHAT +1 WHAT -> WHEN +1 WHAT -> ONE +1 WHALE -> WELL +1 WHALE -> WAIL +1 WET -> WHITE +1 WESTMERE -> WESTMER +1 WESLEY -> WESTLEY +1 WERE -> RECALL +1 WERE -> ARE +1 WELL -> WILL +1 WELL -> FOR +1 WELCOMED -> WELCOME +1 WEDNESDAY -> WINDSAY +1 WEBS -> WHIPS +1 WEATHER -> WHETHER +1 WEAR -> WHERE +1 WEAKLY -> WEEKLY +1 WE'VE -> WITH +1 WE'RE -> WERE +1 WE'RE -> WE +1 WE'LL -> WILL +1 WE'LL -> OR +1 WE -> WE'VE +1 WE -> SEA +1 WAVES -> WAVE +1 WATRY -> WATERY +1 WATRY -> WATCHERY +1 WATERMILL -> WATER +1 WAS -> VIEWS +1 WAS -> IS +1 WARRENTON'S -> WARRENTONS +1 WARLIKE -> WORE +1 WANDERING -> WONDERING +1 WANDER -> WONDER +1 VOLTAIRE -> OLD +1 VOICE -> VOICES +1 VOICE -> BOYS +1 VILLEROY -> VILLAIRY +1 VIGNETTE -> VINEY +1 VICARIOUS -> YCARIOUS +1 VIADUCT -> VIADUC +1 VERY -> VEREMENT +1 VERSE -> FIRST +1 VERNE'S -> VERNS +1 VAUDOIS -> FAUDOIR +1 VARIABILITY -> VERY +1 VANES -> VEINS +1 VANDERPOOLS -> VANDER +1 VALOR -> VALOUR +1 VALLEYED -> VALID +1 VALES -> VEILS +1 UTTER -> OUT +1 UTAH -> UTA +1 UPON -> ON +1 UP -> UPSTAIRS +1 UNWARILY -> THEN +1 UNTO -> INTO +1 UNSEPARATED -> ON +1 UNLIKE -> I +1 UNJUST -> UNJUSTIFILL +1 UNFINISHED -> UNFINISHANCES +1 UNEXCEPTIONABLY -> UNEXCEPTIONALLY +1 UNDERGROUND -> UNDER +1 UNCLENCHED -> CLENCHED +1 UNC -> YUNK +1 UNC -> UNCONOCTED +1 UNC -> UNCAN +1 UNC -> UN +1 UN -> AND +1 UD -> HAD +1 TWO -> TUTRILOGIES +1 TWO -> TO +1 TWITE -> STRIKE +1 TURNS -> TURNED +1 TURNOVER -> TURN +1 TURNER'S -> TURNERS +1 TUPPENY -> TOPPENNY +1 TRY -> TRIED +1 TREDDLESTON -> TREADLESTON +1 TREBLE -> TROUBLE +1 TRAVELERS -> TRAVELLERS +1 TRAVELER -> TRAVELLERS +1 TOWELLING -> TOWELINGS +1 TOULD -> DID +1 TOPEKA -> TOPICA +1 TOP -> TOPRUNG +1 TOOMS -> TOMB'S +1 TOO -> TWO +1 TONNAY -> TOURNISHER +1 TONNAY -> TONY +1 TOILETTE -> TOILET +1 TO -> WHOSE +1 TO -> UP +1 TO -> TOO +1 TO -> OF +1 TO -> INTO +1 TO -> IN +1 TO -> DOES +1 TO -> A +1 TIRESOME -> TO +1 TINTORET -> TINTARETTE +1 TINTINT -> TINTANT +1 TIME -> YOU +1 TIMAEUS -> TIMIUS +1 TIMAEUS -> TIMIRS +1 TIMAEUS -> TIMEUS +1 TIBI -> TIBEE +1 THUS -> LUST +1 THRO -> THROUGH +1 THOUSAND -> THOUSANDTH +1 THOUGHT -> BOUGHT +1 THOUGH -> THE +1 THORLEIF -> TORE +1 THORKEL -> TORQUAL +1 THORKEL -> TORKO +1 THORKEL -> TORCAL +1 THIS -> OSTENSITY +1 THINKS -> THINK +1 THINGS -> THANKS +1 THEY -> MAY +1 THEY -> FATE +1 THEREIN -> THEY +1 THERE -> THERE'S +1 THERE -> THEIR +1 THEN -> IN +1 THEM -> THE +1 THEM -> HIM +1 THEIR -> THEY'RE +1 THEE'S -> THESE +1 THE -> WHO +1 THE -> TO +1 THE -> THEY +1 THE -> THEATILITY +1 THE -> IN +1 THE -> DENOTTINGHAM +1 THE -> BUT +1 THAT'S -> ITS +1 THAT -> THAT'S +1 THAT -> THAN +1 THANK -> THINK +1 THAN -> THEN +1 THAN -> IN +1 TECHNIQUE -> TYPE +1 TEA -> T +1 TARANTULA -> TURANSULA +1 TALKERS -> TALK +1 TABU -> BOO +1 TABLE -> TABLECLOTH +1 SWOONS -> SWOON +1 SWEEP -> SWEPT +1 SWAN -> SWAY +1 SUSPICIONS -> SUSPICION +1 SURFACES -> SERVICES +1 SUPERFLUOUS -> SUPERVOUS +1 SUMMONED -> SUMMON +1 SUMMERS -> SUMMER'S +1 SUITCASE -> SUIT +1 STYLE -> STYLANT +1 STREAMLINE -> STREAM +1 STRAIN -> STRAYNE +1 STRAIGHT -> STRAIT +1 STORY'S -> STORIES +1 STOCKBROKER -> DOCKBROKER +1 STEPHANOS -> STUFFANOS +1 STEPHANOS -> STEPHANO'S +1 STEEL'D -> STEELED +1 STEADY -> STUDY +1 STATE -> STATES +1 STARTS -> START +1 STANDS -> STAND +1 STAIR -> STARE +1 STAGECRAFT -> STAGE +1 STAGE -> STEED +1 STAGE -> SAGE +1 SPRING -> SPRANG +1 SPRAGUE -> BROGG +1 SPLENDOR -> SPLENDOUR +1 SPLENDET -> SPLENDID +1 SPIN -> SPEND +1 SPELLED -> SPELL'D +1 SPECIALISED -> SPECIALIZED +1 SOUTHEY'S -> SO +1 SOUTHEY -> SELVIE +1 SOUTHEY -> SALVIE +1 SOU -> SOUS +1 SORREL -> SURREL +1 SOOTHED -> SOOTHE +1 SON -> FUN +1 SOMETHING -> SOMETHIN +1 SOMETHING -> SOME +1 SOMETHING -> OR +1 SOME -> SOMETIME +1 SOME -> CROSS +1 SOLON'S -> SILENCE +1 SOLON -> SULLEN +1 SOLON -> SOLID +1 SOLILOQUY -> SOLOQUY +1 SOAMES -> PSALMS +1 SO -> SODIN +1 SO -> SIR +1 SMILD -> SMILED +1 SMELLS -> MILLS +1 SLEEVE -> STEVE +1 SLEEP -> SLEEVE +1 SLANG -> SLAYING +1 SKILLFUL -> SKILFUL +1 SKEPTICAL -> SCEPTICAL +1 SIZE -> SIZED +1 SITTING -> CITY +1 SIR -> ARE +1 SINCE -> SINS +1 SIN -> IN +1 SILENT -> SILAGE +1 SIGHT -> SIGHTSEERS +1 SIGHED -> SIDE +1 SHUTTING -> SHEDDING +1 SHOWRING -> SHOWERING +1 SHOULD -> WOULD +1 SHODDY -> SHOTTY +1 SHIP -> SHIP'S +1 SHEWD -> SHOWED +1 SHERIFF -> SHERIFF'S +1 SHELL -> CHILL +1 SHE'S -> SHE +1 SHARPS -> SHARP'S +1 SHARP'ST -> SHARPEST +1 SHAPEN -> SHAKEN +1 SHAPELY -> SHABBLY +1 SHALL -> SHALT +1 SHABATA -> CHEBATA +1 SERVICEABILITY -> SURFABILITY +1 SERVE -> SERVED +1 SERVANT -> SERVANTS +1 SENTENCES -> SENTENCE +1 SENT -> SET +1 SENCE -> SINCE +1 SEMON'S -> SIMMONS +1 SEEMS -> SEEMED +1 SEATING -> SITTING +1 SEA -> SEAT +1 SCYTHE -> SIGH +1 SCUTCHEON -> DUCHEN +1 SCUSE -> EXCUSE +1 SCUMMED -> SCUMBED +1 SCRAPBOOKS -> SCRAP +1 SCOUTING -> SCOUT +1 SCHOOL -> SCHOOLS +1 SCHOOL -> SCHOOLBOYS +1 SCEVRA -> SKRA +1 SCEURA -> SKURA +1 SCENT -> SIN +1 SCENE -> SEEN +1 SCATHE -> SCATH +1 SCAROONS -> SCARONS +1 SAW -> SAUL +1 SAW -> SALL +1 SAVED -> SEE +1 SAUVEUR -> SAVERE +1 SATE -> SAT +1 SANG -> SAYING +1 SALINE -> SAILING +1 SALIENT -> SAILOR +1 SAILD -> SAILED +1 SAIL -> SILL +1 SAID -> SIT +1 RUFUS -> RUFFUS +1 RUE -> GRUE +1 ROUTE -> ROUT +1 ROSSETER -> ROSSOTER +1 ROOTS -> WOODS +1 ROI -> ROY +1 ROGERS'S -> ROGERS +1 ROERER -> ROAR +1 RODOLFO'S -> RIDOLPH'S +1 RODOLFO -> RUDOLPU +1 RODOLFO -> RUDOLPHAL +1 ROCKED -> ROCK +1 ROBIN'S -> ROBINS +1 RHONE -> ROAN +1 REWEIGHED -> REWAYED +1 REMOVE -> MOVED +1 REMOV'D -> REMOVED +1 REMEMBER -> REMEMBERED +1 REMARK -> REMARKED +1 REMAINED -> REMAINING +1 REMAIN -> REMAINED +1 RELOCATED -> RE +1 RELIES -> REALIZE +1 REIGNED -> RAINED +1 REGAINED -> REGAIN +1 REFUSED -> REFUSE +1 REENFORCEMENTS -> REINFORCEMENTS +1 REEDER -> READER +1 RED -> READ +1 RECORD -> RECORDS +1 RECOGNISED -> RECOGNIZED +1 REBUK'D -> REBUKED +1 RANCOR -> RANK +1 QUINSON -> QUINCENT +1 QUASI -> COURSE +1 QUASH -> CASH +1 QUART -> COURT +1 PYTHAGOREANS -> PITHAGORIANS +1 PUTTIN -> PUTTING +1 PURSE -> PERSON +1 PURPOSED -> PURPOSE +1 PURIST -> PUREST +1 PSALM -> SUM +1 PROVES -> PROVED +1 PROSELYTING -> PROSELLING +1 PROSCRIBED -> PRESCRIBED +1 PRODIGAL -> CHRONICAL +1 PRIOR -> PRAYER +1 PREVENT -> PRESENT +1 PREVAILED -> PREVAIL +1 PRETENSE -> PRETENCE +1 PRECONCEIVED -> FREQUENCY +1 PRECIEUSES -> PURSUS +1 PRECEDE -> PROCEED +1 PRE -> PRESOCRATIC +1 PRAISE -> PREISE +1 PRAISE -> PHRASE +1 PRAIRIE -> PRAIRI +1 PRACTICE -> PRACTISE +1 POWER -> BOWER +1 POSTERITY -> PROSTERITY +1 POSITIVELY -> WAS +1 POPHAM -> WAS +1 POPHAM -> POPPUM +1 POLAR -> POLLAR +1 POISON'D -> POISONED +1 POINT -> BLINT +1 POETESS -> POETES +1 PLURAL -> PEARL +1 PLESIOSAURUS -> PLUSIASURUS +1 PLEASANCE -> PLEASANTS +1 PLAITS -> PLATES +1 PLAIN -> PLAYING +1 PLACE -> PLACES +1 PIERC'D -> PIERCED +1 PICK -> PICTURES +1 PHAEDRUS -> FEEDRESS +1 PERCHES -> PURCHASE +1 PENINSULA -> PRONUNCILA +1 PEGRENNE -> PAGRIN +1 PEARL'S -> PEARLS +1 PATIENTS -> PATIENCE +1 PATIENCE -> PATIENT +1 PASSED -> PAST +1 PASSAGE -> PASSAGEWAY +1 PASCHAL -> PASSIONAL +1 PARTICLES -> PARTICLE +1 PARSONS -> PARSON'S +1 PARSONS -> PARSON +1 PAROQUET -> PERICE +1 PARAPHERNALIA -> PAIR +1 PARALLELOGRAM -> PARALLELLOGRAM +1 PAPAL -> PEPPEL +1 PANTS -> HANDS +1 PANE -> PAIN +1 PAIN -> HAYNE +1 OZMA -> OSMO +1 OWEN -> OWENAIRS +1 OVERLEAN -> OVERLENE +1 OVER -> OVERTRAY +1 OUTSTRIP -> OUTSTRIPPED +1 OUTRAGE -> OUTRAGED +1 OUT -> AT +1 OUR -> HER +1 OUR -> A +1 OUGHTER -> ORDERED +1 OTTLEY'S -> OUTLEY'S +1 OTHERWORLDLY -> OTHER +1 OTHER -> OTTER +1 OTHER -> ARE +1 OSH -> I +1 OSAGE -> O +1 ORDERED -> ORDER +1 OR -> WERE +1 OR -> SCENT +1 OR -> ORDER +1 OR -> OF +1 OR -> FOR +1 OPHELIA -> OF +1 OPAQUE -> OPE +1 ONTO -> ON +1 ONE -> SWUNG +1 ON -> UNWARRANTON'S +1 ON -> UNLIKE +1 ON -> ONGOLATIONS +1 ON -> ANOTHER +1 ON -> ANGULATIONS +1 OMELETTE -> OMELET +1 OLIVE'S -> OLIVES +1 OLIVE'S -> ALL +1 OLAF -> ALL +1 OH -> O +1 OH -> I'LL +1 OFFICES -> OFFICERS +1 OFFICERS -> OFFICER'S +1 OFFICERS -> OFFICER +1 OFFENSES -> OFFENCES +1 OFF -> OPT +1 OF -> OTHOR +1 OF -> OR +1 OF -> IS +1 OF -> EVENS +1 OCCUPANTS -> OCCUPANT +1 OBSERVED -> OBSERVE +1 OARS -> WARS +1 OAKS -> YOLKS +1 O'ER -> OR +1 NUMIDIA -> MEDIA +1 NOW -> NO +1 NOVEL'S -> NOVELS +1 NOUGHT -> NOT +1 NOTTINGHAM -> NODDING +1 NOTTINGHAM -> ARE +1 NOTTINGHAM -> APPRENTICED +1 NOTHIN -> NOTHING +1 NOT -> OUGHT +1 NOT -> NOW +1 NORTHWEST -> NORTH +1 NORTHWARDS -> NORTHWARD +1 NOR -> OR +1 NON -> NONCOMPOSTER +1 NODS -> GNAWEDS +1 NINE -> NOT +1 NEW -> NEWBORN +1 NET -> NED +1 NEO -> NEW +1 NEO -> NEOPLATANISTS +1 NELLY -> NELLIERS +1 NEIGHBOUR -> NEIGHBOR +1 NEIGHBORHOOD -> NEIGHBOURHOOD +1 NEIGHBOR -> NEIGHBOUR +1 NEARER -> NEAR +1 NE'ER -> NEVER +1 NAOMI -> NOW +1 NAMED -> NAME +1 N -> THAN +1 MY -> MIGALLATIONS +1 MY -> I +1 MY -> BY +1 MOUTHED -> MOUTH +1 MOURN -> MOURNED +1 MOUNTED -> MOUNTAIN +1 MOTHERS -> MOTHER'S +1 MORMONISM -> WOMENISM +1 MONTMARTRE -> MONTMARTRA +1 MONTMARTRE -> MONT +1 MONTFICHET -> MONT +1 MONSTERS -> MASTERS +1 MOMBI -> MOMBY +1 MOLDED -> MOULDED +1 MOHICAN -> MOHICANS +1 MO -> MOLD +1 MISTS -> MIST +1 MISTER -> THE +1 MISTER -> MISCHIAGO +1 MIST -> MISTS +1 MISSOURIANS -> MISERIES +1 MISS -> MISTER +1 MISS -> MISSY +1 MINT -> MENT +1 MINE -> MIND +1 MILLION'D -> MILLIONED +1 MILLIMETER -> MILLAMETER +1 MILLIGRAM -> MILAGRAM +1 MILITATED -> MITIGATED +1 MILES -> MYLES +1 MIKE -> MICHAEL +1 METERS -> METRES +1 METER -> METERPLATES +1 METER -> METEOR +1 METAL -> MEDAL +1 MERSEY -> MERCY +1 MERRY -> MARRIED +1 MERGANSER -> MERGANCER +1 MERCHISTON -> MURCHISTON +1 MEND -> GOODN'T +1 MEN -> AMEN +1 MEET -> MET +1 MEALYBACK -> MEALEY +1 MEADOWCROFT'S -> METICOFF'S +1 MEADOWCROFT -> MEDICRAFT +1 MAY -> THEY +1 MAY -> MAYBE +1 MAUSOLEUM -> MUZOLEUM +1 MATE -> MADE +1 MASTERY -> MYSTERY +1 MASTERS -> MASTER +1 MASTER'S -> MASTERS +1 MARY -> MERRY +1 MARVELOUS -> MARVELLOUS +1 MARSHALLED -> MARSHALED +1 MARSHALL -> MARTIAL +1 MARSHALL -> MARSHAL +1 MARKED -> MARKET +1 MARIVAUX -> MARAVO +1 MARGOLOTTE -> MARGOLOTT +1 MARAIS -> MARAE +1 MANY -> MEN +1 MANSERVANT -> MAN'S +1 MANNA -> MANA +1 MANIFESTED -> MANIFEST +1 MAIDS -> MATES +1 MAID -> MAIDEN +1 MACDONALDS -> MAC +1 MAC -> MC +1 MABEL -> MAYBEL +1 LYNCHINGS -> LUNCHINGS +1 LUTHER'S -> LUTHERS +1 LUIS -> LOUIS +1 LUBRICATE -> LUBRICADE +1 LOWER -> LOWERED +1 LOVE'S -> LOVES +1 LOUIS -> LOUISE +1 LOU'S -> LOOSE +1 LOSS -> LOST +1 LORNE -> LORN +1 LONG -> LAWN +1 LIVES -> LIES +1 LIVERIED -> LIVERYED +1 LITERALLY -> THAT +1 LITER -> LEADER +1 LINK'D -> LINKED +1 LINE -> LIE +1 LILLYS -> LILY'S +1 LILLY -> LILY +1 LILBURN -> LITTLE +1 LIGHT -> LIGHTFOOTED +1 LETS -> THAT'S +1 LET'S -> ITS +1 LESSER -> LESS +1 LEOCADIA'S -> LOCATEOUS +1 LEOCADIA -> LOCALIA +1 LEOCADIA -> LE +1 LEOCADI -> LOCATIA +1 LEFT -> LAUGHED +1 LEFRANK -> LE +1 LEAVING -> LEAPING +1 LEAVE -> LIVE +1 LEASED -> LEAST +1 LAUGHED -> THEIR +1 LARKSPUR -> LARKSBURG +1 LARKSPUR -> LARKS +1 LANTHORN -> LANTERN +1 LAND -> LANDA +1 LAMBENT -> LAMENT +1 LALLIE -> LALLY +1 LAKE -> LEAK +1 LABOUR -> LABOR +1 LA -> LAPE +1 KNOWS -> NOSE +1 KNOW -> KNOWS +1 KNEW -> NEW +1 KNEED -> NEED +1 KNEE -> KNEEP +1 KIRTLAND -> CURTLEND +1 KINGDOMS -> KINGDOM'S +1 KING'S -> KING +1 KICK -> KICKAPOOS +1 KEYNOTE -> KEEN +1 KESWICK -> KEZWICK +1 KEOGH -> KIOPH +1 KATHLEEN -> CATHERINE +1 JUS -> JUST +1 JEWELER'S -> JEWELLER'S +1 JAW -> JOB +1 JASPER -> JAPS +1 JANE'S -> JANE +1 JAIL -> TRAIL +1 JAGO -> YAGO +1 JAGO -> TRIAGO +1 JAGO -> SIP +1 JAGO -> IAGO +1 JACK -> JACKKNIFE +1 ITS -> IT'S +1 ITS -> HIS +1 IT'S -> TO +1 IT'LL -> IT +1 IT -> TWASN'T +1 IT -> TO +1 IT -> ITS +1 IT -> BUT +1 IT -> AT +1 IS -> WAS +1 IS -> IT'S +1 IS -> IT +1 IS -> HAS +1 IRON'S -> IRON +1 INVENTORS -> IN +1 INTRENCHMENT -> ENTRENCHMENT +1 INTERESTS -> INTRICTS +1 INTENTS -> INTENSE +1 INNERLOCHY -> INERLOCKY +1 INNERLOCHY -> IN +1 INJURED -> INJURE +1 INJURED -> ANCIENT +1 INFERENCE -> EFFERENCE +1 INFANTS -> INFANT'S +1 INCULCATED -> INCALCATED +1 INCLOSED -> ENCLOSED +1 INCERTAINTY -> IN +1 INCANDESCENT -> INCONDESCENT +1 INACTION -> AN +1 IN -> ON +1 IN -> OF +1 IN -> IT +1 IN -> IMPERFECT +1 IN -> AN +1 IMPRESSES -> IMPRESS +1 IMPRESS'D -> IMPRESSED +1 IMPOSED -> AND +1 IMPLORES -> IMPLIES +1 IMPEARLED -> IMPERILLED +1 IMMATURE -> IMMATOR +1 IMBIBED -> IBED +1 IKE -> LIKE +1 IF -> OF +1 IF -> AT +1 IDIOSYNCRATICALLY -> IDIO +1 ICHTHYOSAURUS -> ITTHIASORIS +1 ICHTHYOSAURUS -> ICTUSORIS +1 ICHTHYOSAURUS -> ICT +1 I'M -> ON +1 I'LL -> HOW +1 I -> THY +1 I -> SAD +1 I -> I'VE +1 HUNTLEY -> HUNTLY +1 HUMOUR -> HUMOR +1 HUMOR -> HUMOUR +1 HUMID -> HUMAN +1 HOW -> HALL +1 HOUSECLEANING -> HOUSE +1 HOTBED -> HOT +1 HORTON -> WHARTON +1 HORSEPLAY -> HORSE +1 HORACE -> HORRANCE +1 HOPKINS'S -> HOPKINS +1 HOPES -> HELPS +1 HOPE -> OPIEVILLE +1 HONOURED -> HONORED +1 HONOUR -> HONOR +1 HONORIFIC -> UNERRIFIC +1 HONORABLE -> HON +1 HON -> HONOURABLE +1 HOLOCAUST -> HOHLAST +1 HOLMES -> HOMES +1 HOLLOW -> HOLLOWED +1 HOLD -> ALL +1 HO -> HOME +1 HIT -> HID +1 HIS -> A +1 HIM -> EM +1 HILDA'S -> HELDA'S +1 HILDA -> HILDER +1 HIGHEST -> HOUSE +1 HIGH -> HIGHER +1 HIDALGO -> HADALGO +1 HETTY -> HETTY'S +1 HERE -> THERE +1 HERE -> HARRY +1 HERACLEITUS -> HERACLITUS +1 HER -> THERE +1 HENCHMEN -> HENCHMAN +1 HEN -> HENLOORD +1 HEN -> HANDLED +1 HELPED -> HELP +1 HELLO -> HALLO +1 HEART'S -> HEARTS +1 HEAR -> SEE +1 HEAR -> HERE +1 HE'S -> HIS +1 HE'D -> HE +1 HE -> WHOSE +1 HE -> IT +1 HE -> HIS +1 HAZEWRAPPED -> HAYES +1 HAWTREY -> HOLTREE +1 HAVING -> HEAVEN +1 HAVE -> HAS +1 HAVE -> HALF +1 HAUGHTY -> HALTING +1 HATER -> HAYTER +1 HAS -> IS +1 HAS -> HESITATED +1 HARTS -> HEARTS +1 HARRY -> HARRYTOWN +1 HARRIED -> HURRIED +1 HARMONIZED -> HARMONIZE +1 HARKENED -> HEARKENED +1 HARBORING -> HARBOURING +1 HARANGUE -> HARANG +1 HARALD -> HAROLD +1 HAPPEN -> HAPPENED +1 HANNA -> HAD +1 HANGINGS -> HANGING +1 HANDS -> HANDSOME +1 HAM -> HIM +1 HALLOA -> HULLO +1 HAL -> HELLO +1 HAKON -> HAWKIN +1 HAIRDRESSER -> HAIR +1 HAD -> IS +1 HAD -> HEAD +1 HAD -> HAVE +1 GUISE -> SKIES +1 GUESTS -> GUESS +1 GUEST -> GUESTS +1 GROWS -> GROVES +1 GRINGO -> GREENOW +1 GRIEFS -> GREEDS +1 GREY'S -> GRAY'S +1 GREEING -> GREEN +1 GREAT -> GRATE +1 GRAVE -> GRAY +1 GRAPEVINE -> GRAPE +1 GRAND -> GREAT +1 GRAM -> GRAHAM +1 GRADUALLY -> GRADUAL +1 GRADES -> GRATES +1 GOVERNMENT -> GOVERNOR +1 GOVERNED -> GOVERN +1 GOSSIP -> GOSSIPS +1 GOOBERS -> GOULD +1 GOAT'S -> GOATS +1 GOAT -> GO +1 GOAT -> BOAT +1 GIVE -> GAVE +1 GIRARD -> GERARD +1 GILCHRIST'S -> GILCRE'S +1 GILCHRIST -> GORIST +1 GILCHRIST -> GOCRIST +1 GILCHRIST -> GILCRIS +1 GIER -> GEAR +1 GIAOURS -> GEYORS +1 GESTATION -> JUST +1 GEOFFREY'S -> JEFFREY'S +1 GEOFFREY -> JEFFREY +1 GENTLEMEN -> GENTLEMAN +1 GENERAL -> GENERALSHIP +1 GAYLY -> GAILY +1 GAY -> GAME +1 FUTURISTIC -> FUTURESTIC +1 FULNESS -> FULLNESS +1 FULL -> FALL +1 FRONTISPIECE -> FRONTESPIECE +1 FRISKILY -> FRISKLY +1 FRIEND -> BRAND +1 FRANCS -> FRANKS +1 FORWARDED -> FOOTED +1 FORESEEING -> FOR +1 FORBES'S -> FORCE +1 FOR -> PROCEEDING +1 FOR -> FROM +1 FOR -> FOUR +1 FOR -> FOREVER +1 FOLLOWED -> FOWLED +1 FLUFFINOSE -> FLUFFINO'S +1 FLOUR -> FLOWER +1 FLIGHT -> FIGHT +1 FIRS -> FURS +1 FIREBUGS -> FIRE +1 FIREBALL -> FIRE +1 FIR -> FUR +1 FINE -> FIND +1 FIND -> FIVE +1 FILL -> FILLED +1 FEW -> YOU +1 FETE -> FIGHT +1 FELT -> FILT +1 FELT -> FELLED +1 FEELS -> FILLS +1 FEELIN'S -> FEELINS +1 FEAREST -> FEAR'ST +1 FEARED -> FEAR +1 FAVORITE -> FAVOURITE +1 FAVOR -> FAVOUR +1 FATTENED -> FAT +1 FARTHEST -> FURTHEST +1 FARMHOUSES -> FARM +1 FAMOUS -> FAME +1 FALLEN -> FALL +1 FAIR -> FAIREST +1 EYE -> I +1 EVOLUTION -> REVOLUTION +1 EVERY -> EVERYONE +1 EVENIN'S -> EVENINGS +1 EVA -> EITHER +1 ESTATE -> STATE +1 ESTAFANIA -> STEFFANIA +1 ESTAFANIA -> DA +1 ESPRIT -> A +1 ESCHEATED -> ISIATED +1 ESCAPED -> ESCAPE +1 ERNEST -> EARNEST +1 ER -> A +1 ENTRUSTING -> INTRUSTING +1 ENTRANCED -> AND +1 ENTHRALMENT -> ENTHRALLMENT +1 ENTER -> INTO +1 ENSURE -> INSURE +1 ENQUIRIES -> INQUIRIES +1 ENQUIRED -> INQUIRED +1 ENQUIRE -> INQUIRE +1 ENGINEER -> ENGINEERS +1 EMISSIONS -> MISSIONS +1 EMIL -> AMYL +1 EMIGRANT -> IMMIGRANT +1 EMERG'D -> EMERGED +1 EM -> THEM +1 ELSINORE -> ELZINORE +1 ELMO'S -> AIRABLE'S +1 ELCHO -> ELKOE +1 ELABORATE -> CELEBRATE +1 EFFECTED -> AFFECTED +1 EDITION -> ADDITION +1 ECCENTRICITY -> EXCENTRICITY +1 EARSHOT -> EAR +1 EARS -> YEARS +1 E -> EVEN +1 DYKES -> DIKES +1 DUST -> DUS +1 DURING -> DREWING +1 DUMPY -> DUMPEY +1 DUMPY -> DON'T +1 DUERER -> DURE +1 DRUNKENNESS -> DRINKENNESS +1 DRUGGIST'S -> DRUGGIST +1 DROPIDAS -> DROPIDUS +1 DRIPPING -> TRIPPING +1 DOWN -> DOWNSTAIRS +1 DOWN -> DOWNREACHING +1 DOUZE -> DUSPORT +1 DOUBLE -> DOUBLED +1 DOOR -> DOORSTEP +1 DONATISTS -> DONATIST +1 DONA -> DORIS +1 DON'T -> A +1 DOLL -> STALL +1 DOCTRESS -> DOCTRIS +1 DISTRICTS -> DISTRICT +1 DISQUIETUDE -> AS +1 DISPENSE -> SPENCE +1 DISHONOURED -> DISHONORED +1 DISCOLOURED -> DISCOLORED +1 DIATRIBE -> DIETRIBE +1 DIAS -> DAIS +1 DIALOGUES -> DIALECTS +1 DETERMINED -> DETERMINE +1 DESCENT -> DISSENT +1 DELIBERATIVE -> DELIBERATE +1 DELIA -> GELIA +1 DELIA -> DELHIA +1 DEFINED -> THE +1 DEDALUS -> DAEDALUS +1 DEDALOS -> DEAD +1 DECENCY -> DECENCIES +1 DE -> MARE +1 DE -> DETERNATION +1 DE -> DETENNACHELANT +1 DE -> DENISCHALANT +1 DAWN'S -> DAWNS +1 DAIRY -> DEARIE +1 D'ESTE -> DESTA +1 CYNTHIA -> CYNTHIA'S +1 CYN -> SIN +1 CUT -> CAUGHT +1 CUSTOMARILY -> CUSTOMARY +1 CURVED -> CARVED +1 CRYSTAL -> CRISTEL +1 CROSSTREES -> CROSS +1 CRESTED -> CRUSTED +1 CRESSWELLS -> WHIRLS +1 CRESSWELL -> CHRISWELL +1 CREIGHTON -> KREITON +1 CREIGHTON -> CRIGHTON +1 CRASWELLERS -> CRESTWELLERS +1 CRASWELLER -> CRUSHWELLER +1 COZIER -> COSIER +1 COW -> COWSHED +1 COURT -> CORPS +1 COUNTRY'S -> COUNTRY +1 COUNSELS -> COUNCILS +1 COUNSELLED -> COUNSELS +1 COULDN'T -> POPLED +1 COULDN'T -> COULDN' +1 COULD -> GOOD +1 COULD -> COULDN'T +1 COULD -> COTEL +1 COTTON -> CONTIN +1 COSTS -> COST +1 CORRELATES -> COROLLETS +1 CORN -> CORNEERS +1 CORMORANT -> COMRADE +1 CORALIE -> CORLEY +1 COOK -> COPE +1 CONTAINED -> CONTAINING +1 CONSUMER'S -> CONSUMERS +1 CONSID'BLE -> CONSIDERABLE +1 CONQUERIN -> CONQUERING +1 CONJURER -> CONJUROR +1 CONFESS -> CONFESSED +1 CONDENSE -> CONTENTS +1 COMPOSE -> COMPOSED +1 COMMENTS -> COMETS +1 COMBASH -> COMBATCH +1 COLOURS -> COLORS +1 COLOUR -> COLOR +1 COLORS -> COLOURS +1 COLORS -> COLLARS +1 COLORIST -> COLOR +1 COLORIST -> CHOLERAIST +1 COLD -> CALLED +1 COAL -> CO +1 CO -> COEXIST +1 CLEW -> CLUE +1 CLAUSE -> CLAWS +1 CIVET -> SAVE +1 CITADELLED -> CITADELED +1 CIGARETTE -> SICK +1 CHRISTAIN -> CHRISTIAN +1 CHOICE -> CHOICES +1 CHINGACHGOOK -> INGACHGOOK +1 CHIAROSCURISTS -> KIARASCURISTS +1 CHIAROSCURIST -> CURE +1 CHEROOT -> JEROOT +1 CHATTERBOX -> CHATTER +1 CHARENTE -> SCHERANT +1 CHARENTE -> AUNT +1 CHARACTERISTIC -> CORRECTURISTIC +1 CHANGE -> CHANGED +1 CENTRED -> SENATE +1 CENTER -> CENTRE +1 CENDENARIES -> SENDIARIES +1 CEASE -> SEIZED +1 CEASD -> CEASED +1 CASTS -> CAST +1 CARPACCIO'S -> CARPATCHIO'S +1 CANVAS -> GAMBUS +1 CAN -> COULD +1 CAN -> CANNOT +1 CALLED -> THEIR +1 CALDWELL -> COLDWELL +1 BY -> MY +1 BY -> BYE +1 BUTT -> BUT +1 BUTCHERED -> BUTCHER +1 BUT -> DO +1 BUT -> BETTER +1 BUT -> BEFORE +1 BURNE -> BYRNE +1 BURN -> BURNED +1 BURN -> BURNE +1 BURGOYNE -> WERE +1 BUNNIT -> BUNNOT +1 BUL -> BULL +1 BUL -> BOWL +1 BUILDING -> BILLING +1 BUCHANAN -> BUCATED +1 BROTHELS -> BRAFFLELS +1 BRITANNULISTS -> BRITAIN +1 BRISK -> BRAY +1 BRINGING -> RINGING +1 BREAKWATER -> BRAKE +1 BREAKFAS -> BREAKFAST +1 BRANWELL -> BROWNWELL +1 BRANDS -> BRAINS +1 BRANCH -> RANCH +1 BRAGELONNE -> BREG +1 BRAGELONNE -> BRAGGLIN +1 BOX -> BOXWOMEN +1 BOTANY -> BARTANY +1 BORDERS -> BORDER +1 BOOKKEEPER -> BIT +1 BOLLS -> BOWLS +1 BOGUS -> VOGUS +1 BOAR -> BOARHOUND +1 BLUESKINS -> BLUESKIN +1 BLESSINGS -> BLESSING +1 BLASTS -> BLAST +1 BITES -> WHITES +1 BIT -> WERE +1 BILLYGOAT -> SPILLY +1 BILLED -> BUILD +1 BEWILDERMENT -> OF +1 BERGSON -> BERKS +1 BELIEVE -> BELIEVED +1 BEING -> MEAN +1 BEIN -> BEING +1 BEGGAR'S -> BEGGARS +1 BEG -> BEGGED +1 BEFORE -> FOR +1 BEFIT -> BE +1 BEFELL -> BEFEL +1 BEFAL -> BEFALL +1 BEEN -> SPIN +1 BEELZEBUB -> IS +1 BEEDER -> READER +1 BEEBE -> B +1 BEDIMMED -> BE +1 BEAR -> BARON +1 BEACHED -> BEECHED +1 BATTLEAX -> BATTLEX +1 BATTLEAX -> ADELAX +1 BASKET -> BASKEY +1 BANDS -> VANS +1 BALEEN -> BALINE +1 BALAAM'S -> BALEM'S +1 BAINS -> BANDA +1 BADAUDERIE -> BAD +1 BAD -> BAN +1 BABIRUSA -> BARBAROUSA +1 AWHILE -> A +1 AWARE -> WEAR +1 AUNT -> AND +1 AU -> ACCURANT +1 ATTENDANTS -> ATTENDANCE +1 ATTENDANCE -> ATTENDANTS +1 ATHOLEMEN -> ETHEL +1 ATHENAIS -> ETHNE +1 ATHENAIS -> ETHINE +1 ATCHISON -> ATTITSON +1 AT -> THAT +1 AT -> ITS +1 AT -> AND +1 ASSEMBLED -> A +1 ASCENDENCY -> ASCENDANCY +1 AS -> TO +1 ARTICHOKES -> ART +1 ARRONDISSEMENT -> ARE +1 ARRIVING -> RIVING +1 ARRESTS -> ARREST +1 ARRANGING -> A +1 ARMOUR -> ARMOR +1 ARMED -> ARM +1 ARE -> OUR +1 ARE -> OR +1 ARE -> ALL +1 ARDLE -> CARDLE +1 ANYWHERE -> MANYWHERE +1 ANYMORE -> ANY +1 ANY -> ANYTHING +1 ANY -> ANYONE +1 ANTEDATING -> ANTIDATING +1 ANSWERD -> ANSWERED +1 ANNE'S -> AN +1 ANNALS -> ANNAL +1 ANGOR -> ANGORE +1 ANGELS -> ANGEL +1 ANDERS -> ANDREW +1 ANDERS -> ANDER'S +1 ANDELLA -> DELA +1 AND -> THAN +1 AND -> ONE +1 AND -> INTO +1 AND -> INFORM +1 AND -> INDEED +1 AND -> EMBICIDES +1 AND -> AM +1 AND -> ALYOCADIA'S +1 ANAXAGORAS -> AN +1 AN -> ON +1 AN -> IN +1 AMPHITHEATER -> AMPHITHEATRE +1 AM -> I'M +1 ALTERNATIVE -> ALL +1 ALLUVION -> ALLUVIAN +1 ALL -> ALTOGETHER +1 ALL -> ALREADY +1 ALBIGENSES -> ALBIGENZAS +1 ALBANS -> ALBAN'S +1 AIR -> HEIR +1 AIGNAN -> ENG +1 AID -> AIDS +1 AFTERDECK -> AFTER +1 AFFRIGHTENED -> A +1 AFFILIATED -> HAVE +1 AFFECT -> EFFECT +1 ADONA -> ADONNA +1 ACKNOWLEDGEMENT -> ACKNOWLEDGMENT +1 ABOLITIONISTS -> ABOLITIONIST +1 ABJECTLY -> OBJECTLY +1 ABBE -> ABBEY +1 A -> UPON +1 A -> UNNOTTINGHAM +1 A -> UNNOTABLY +1 A -> OFFENCE +1 A -> IF +1 A -> I +1 A -> ESPECIAL +1 A -> AWAY +1 A -> ATTORIAN +1 A -> AS +1 A -> ARREST +1 A -> APILLION +1 A -> AFAR +1 A -> ACCORD +1 A -> ACCOMPANIED + +DELETIONS: count ref +9 IS +9 A +5 AND +4 IT +3 TONNAY +3 GALATIANS +3 CHARENTE +2 YARD +2 WITH +2 WAY +2 TO +2 THE +2 TELL +2 STAIRS +2 ONE +2 OF +2 LORD +2 HIS +2 HAVE +2 FOR +2 E +2 AM +1 YOU +1 YE +1 WOMEN +1 WHATEVER +1 WE +1 WASN'T +1 WAS +1 WARRENTON'S +1 VINES +1 UTILITY +1 TRILOGIES +1 TRAY +1 TOWNE +1 TORY +1 TOGETHER +1 THOR +1 THING +1 T +1 STEP +1 SPECIAL +1 SOCRATIC +1 SITTING +1 SIT +1 SHIP +1 SHED +1 SENTENCES +1 SENT +1 SEERS +1 SEE +1 RUNG +1 REST +1 READY +1 REACHING +1 PORTES +1 POOS +1 POND +1 PLATONISTS +1 PLATES +1 PILLION +1 PHILIP +1 PERFECT +1 PEGRE +1 OTHER +1 ON +1 OLD +1 NOTABLY +1 NOT +1 NARES +1 N +1 MUCH +1 MER +1 ME +1 LO +1 LEOCADIA'S +1 LAKE +1 L +1 KNOCKED +1 KNIFE +1 JAIL +1 INTO +1 IN +1 HUMPH +1 HOUND +1 HIM +1 HE +1 GHIP +1 FORM +1 FOOTED +1 FENCE +1 FEEL +1 FAR +1 EXIST +1 EVER +1 EARS +1 DO +1 DENSITY +1 DEED +1 DE +1 DARK +1 D +1 CUTTERS +1 COURANT +1 COMPOSSER +1 COMPANY +1 CLOTH +1 CHORD +1 CHERRIES +1 BOYS +1 BORN +1 BESIDES +1 ATTAINED +1 AT +1 AS +1 APPRENTICE + +INSERTIONS: count hyp +15 A +11 ONE +10 AND +9 THE +6 OF +6 IS +6 ARE +5 IN +5 HAVE +5 DAY +4 TO +4 ME +4 AS +4 AM +3 WILL +3 ON +3 IT +3 HALL +3 FIND +3 AXE +2 WORTH +2 TIME +2 OTHER +2 NIGHT +2 MEN +2 HER +2 EVER +2 DELLA +2 BE +2 ATTITUDE +2 AT +2 AN +1 YULA +1 YON +1 WRAPPED +1 WORLDLY +1 WILDERMENT +1 WHILE +1 WEST +1 WERE +1 WAY +1 WATER +1 WAS +1 WARILY +1 VINE +1 VILLIA +1 VIEW +1 VICHET +1 VENORS +1 ULLA +1 UILISTS +1 TURNED +1 TREES +1 TRANCED +1 THIS +1 THINGS +1 THING +1 THESE +1 THEASURUS +1 THAT +1 TALL +1 STATION +1 SPREE +1 SOME +1 SIMPLED +1 SHOT +1 SERVANT +1 SEPARATED +1 SENCRATICALLY +1 SEEING +1 SCURUS +1 SAINT +1 SAGE +1 RED +1 RAGING +1 QUIETUDE +1 POST +1 POOLS +1 POOL +1 PLAY +1 OWE +1 OVER +1 OUT +1 OR +1 ONLY +1 OFF +1 ODD +1 OCCADIA +1 MORE +1 MILL +1 MARTRA +1 MAKE +1 LOSS +1 LOCKY +1 LOCATED +1 LIST +1 LINE +1 LIKE +1 LEFT +1 LEAF +1 KEEPER +1 I +1 HYAHSOME +1 HOUSES +1 HOLE +1 HIS +1 HIM +1 HE +1 HAD +1 GROUND +1 GREE +1 GOING +1 GOAT +1 GIRT +1 FRIGHTENED +1 FRANK +1 FOR +1 FIT +1 FILLIOTTED +1 FANIA +1 EXAGGARIST +1 EULO +1 DRESSER +1 DOWN +1 DONALDS +1 DIMMED +1 DERPOOL +1 DERBOOLE +1 DECK +1 DEALT +1 DE +1 CRAFT +1 CLEANING +1 CHOKES +1 CHAIR +1 CERTAINTY +1 CASE +1 BURN +1 BUGS +1 BUB +1 BOOKS +1 BID +1 BED +1 BEALES +1 BALL +1 BACK +1 ANALIA +1 ALONE +1 ACTIVELY +1 ACTION +1 ABILITY + +PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp +AND 1738 102 1787 1791 +A 1124 98 1166 1180 +THE 3428 82 3461 3477 +IN 878 73 905 924 +TO 1326 35 1340 1347 +OF 1787 31 1799 1806 +I 706 30 711 731 +IT 543 29 558 557 +IS 450 29 468 461 +ONE 184 22 191 199 +AN 153 20 165 161 +YOU 414 15 418 425 +OR 169 15 176 177 +ON 273 15 279 282 +AS 379 15 383 390 +WERE 181 14 186 190 +THAT 603 14 610 610 +ARE 179 14 182 190 +MEN 58 13 62 67 +HE 520 13 526 527 +FOR 414 13 420 421 +THEIR 167 12 173 173 +THIS 255 11 263 258 +THEY 203 11 209 208 +HAVE 211 11 215 218 +AT 278 11 284 283 +WITH 418 9 424 421 +WHERE 44 9 49 48 +TWO 65 9 71 68 +TIMAEUS 0 9 9 0 +THERE 135 9 137 142 +THAN 84 9 88 89 +SOME 85 9 87 92 +O 7 9 14 9 +MAN 61 9 67 64 +I'M 27 9 33 30 +HIS 470 9 473 476 +ANY 83 9 85 90 +ANDERS 2 9 11 2 +WILL 142 8 143 149 +RODOLFO 0 8 8 0 +ITS 80 8 82 86 +HER 319 8 324 322 +HAD 318 8 321 323 +DE 3 8 10 4 +AM 56 8 59 61 +WOULD 137 7 141 140 +WAS 576 7 579 580 +THEN 120 7 125 122 +IT'S 24 7 29 26 +INTO 101 7 104 105 +I'VE 17 7 23 18 +ANYONE 0 7 6 1 +ALL 223 7 225 228 +WHOSE 13 6 14 18 +WE 149 6 152 152 +TOO 60 6 61 65 +RUDOLPHO 0 6 0 6 +OTHER 62 6 65 65 +LEOCADIA 0 6 6 0 +HAS 104 6 108 106 +BUT 341 6 344 344 +BE 314 6 314 320 +WHEN 130 5 133 132 +WELL 71 5 75 72 +TONNAY 0 5 5 0 +THEL 0 5 5 0 +SYLVIA 0 5 0 5 +SOAMES 0 5 5 0 +SILVIA 0 5 5 0 +NOW 91 5 94 93 +NOT 335 5 338 337 +NO 165 5 169 166 +MY 222 5 225 224 +METER 6 5 11 6 +ME 183 5 184 187 +LOCATIA 0 5 0 5 +HIM 213 5 215 216 +GRAY 3 5 5 6 +FIND 21 5 22 25 +EVERY 30 5 31 34 +DAY 50 5 50 55 +COLOR 7 5 10 9 +CHARENTE 0 5 5 0 +BATTLEAX 0 5 5 0 +ANDREWS 0 5 0 5 +WON 2 4 2 6 +WHO 153 4 154 156 +WHILE 34 4 34 38 +WHAT 111 4 113 113 +UPON 91 4 94 92 +UP 109 4 110 112 +UNC 1 4 5 1 +TIMAS 0 4 0 4 +THOU 18 4 18 22 +SOLON 0 4 4 0 +SOLMES 0 4 0 4 +RED 18 4 19 21 +PRACTISE 1 4 4 2 +PRACTICE 6 4 7 9 +OH 31 4 33 33 +MAINHALL 0 4 4 0 +MAIN 3 4 3 7 +LEAVENWORTH 0 4 4 0 +JAGO 1 4 5 1 +I'LL 11 4 14 12 +HELLO 1 4 2 4 +HALL 9 4 9 13 +GREY 1 4 3 3 +EMIL 0 4 4 0 +CRESSWELL 0 4 4 0 +COURT 9 4 12 10 +COULD 92 4 95 93 +COLOUR 1 4 2 4 +BRITAIN 1 4 1 5 +BOX 7 4 8 10 +YOUR 106 3 109 106 +YOU'RE 2 3 5 2 +YE 6 3 8 7 +XAVIER 0 3 3 0 +WHOLLY 8 3 9 10 +WHITE 22 3 23 24 +WEAR 4 3 5 6 +WAY 71 3 73 72 +VALLIERS 0 3 0 3 +VALLIERE 0 3 3 0 +TRAVELLING 0 3 0 3 +TRAVELING 0 3 3 0 +TOWARDS 16 3 19 16 +TOWARD 8 3 8 11 +TODAY 0 3 3 0 +TIME 86 3 87 88 +THROUGH 42 3 42 45 +THORKEL 0 3 3 0 +THEM 120 3 122 121 +THEE 27 3 30 27 +THEATRE 2 3 2 5 +THEATER 0 3 3 0 +THAT'S 13 3 14 15 +TABU 0 3 3 0 +STAGE 4 3 6 5 +SOMETIME 0 3 2 1 +SOMETHING 34 3 37 34 +SOMEONE 0 3 3 0 +SO 195 3 197 196 +SLANG 9 3 12 9 +SITTING 2 3 4 3 +SIN 12 3 13 14 +SHE 279 3 281 280 +SEEMED 30 3 30 33 +SEE 64 3 65 66 +SAIL 3 3 6 3 +ROUND 14 3 17 14 +READ 16 3 18 17 +PLATONISTS 0 3 3 0 +OUT 99 3 100 101 +OUR 79 3 81 80 +OFFICERS 8 3 10 9 +NOTTINGHAM 4 3 7 4 +NOR 20 3 21 22 +NEW 34 3 35 36 +NAOMI 2 3 5 2 +MISTER 46 3 48 47 +METRE 0 3 0 3 +MAY 54 3 56 55 +MASTERS 2 3 3 4 +MADAME 4 3 4 7 +MADAM 0 3 3 0 +KNOW 75 3 76 77 +KAFFIR 0 3 0 3 +KAFFAR 0 3 3 0 +IMPRESSED 2 3 4 3 +IMPRESS 0 3 0 3 +IF 129 3 131 130 +ICHTHYOSAURUS 0 3 3 0 +HOLY 1 3 3 2 +HOLLAND 0 3 0 3 +HOLLAN 0 3 3 0 +HILDA 6 3 9 6 +HERE 68 3 70 69 +HE'S 6 3 9 6 +HALLO 0 3 2 1 +GOAT 5 3 7 6 +GILCHRIST 0 3 3 0 +GALATIANS 3 3 6 3 +FOREVER 0 3 2 1 +FELL 16 3 16 19 +FAIR 6 3 7 8 +EVERYONE 1 3 3 2 +EVER 34 3 35 36 +E 0 3 3 0 +DOWN 71 3 73 72 +DOBRYNA 0 3 3 0 +DOBRINA 0 3 0 3 +DISSENT 0 3 2 1 +DID 65 3 67 66 +DESCENT 1 3 2 3 +DEAD 5 3 5 8 +CRESWELL 0 3 0 3 +COULDN'T 4 3 6 5 +COLORS 1 3 3 2 +BY 246 3 248 247 +BURN 2 3 4 3 +BRITANNULA 0 3 3 0 +BRAKE 1 3 3 2 +BATTLE 6 3 6 9 +AXE 1 3 1 4 +AROUND 12 3 12 15 +ANOTHER 34 3 36 35 +ANDELLA 1 3 4 1 +AMIEL 0 3 0 3 +ZORA 1 2 3 1 +YOU'LL 7 2 9 7 +YARD 3 2 5 3 +WORTH 4 2 4 6 +WOODS 2 2 3 3 +WOOD 3 2 4 4 +WONDERING 1 2 2 2 +WIDOWS 0 2 0 2 +WHY 42 2 44 42 +WHITTAWS 0 2 2 0 +WHICH 216 2 216 218 +WHATEVER 11 2 13 11 +WHALE 2 2 4 2 +WE'VE 1 2 2 2 +WE'RE 1 2 3 1 +WE'LL 4 2 6 4 +WATRY 0 2 2 0 +WATER 19 2 19 21 +WARRENTON'S 0 2 2 0 +WANDERING 1 2 2 2 +VOICE 16 2 18 16 +VERY 82 2 83 83 +VAPOURS 0 2 2 0 +VAPORS 0 2 0 2 +VANDERPOOL 0 2 2 0 +VAN 2 2 2 4 +UNLIKE 0 2 1 1 +UN 0 2 1 1 +TURNED 21 2 21 23 +TRAVELLERS 0 2 0 2 +TONIGHT 0 2 2 0 +THINK 52 2 52 54 +THINGS 33 2 34 34 +THING 21 2 22 22 +THESE 68 2 68 70 +TELL 32 2 34 32 +TABOO 0 2 0 2 +T 0 2 1 1 +SYMPOSIUM 0 2 2 0 +SUPPOSIUM 0 2 0 2 +STEPHANOS 0 2 2 0 +STATE 26 2 27 27 +STAIRS 6 2 8 6 +SPIN 0 2 1 1 +SOUTHEY 0 2 2 0 +SOLEMN 1 2 1 3 +SLING 0 2 0 2 +SLEEVE 0 2 1 1 +SIT 10 2 11 11 +SIR 35 2 36 36 +SINCE 24 2 25 25 +SIFT 0 2 0 2 +SIF 0 2 2 0 +SHIP 7 2 9 7 +SERVANT 11 2 12 12 +SENTENCES 1 2 3 1 +SENT 4 2 6 4 +SEEM 11 2 13 11 +SEA 17 2 18 18 +SCHOOL 9 2 11 9 +SCENT 0 2 1 1 +SAW 21 2 23 21 +SALE 0 2 0 2 +SAGE 0 2 0 2 +RESERVED 1 2 1 3 +RESERVE 0 2 2 0 +REMAINED 5 2 6 6 +READER 1 2 1 3 +PRAISE 1 2 3 1 +POPHAM 3 2 5 3 +PLATINISTS 0 2 0 2 +PLATES 1 2 2 2 +PATIENCE 1 2 2 2 +PARSONS 0 2 2 0 +PARLOUR 0 2 0 2 +PARLOR 0 2 2 0 +PAIN 6 2 7 7 +OVER 58 2 59 59 +ORDERED 2 2 3 3 +ORDER 22 2 22 24 +OLIVE'S 1 2 3 1 +OLD 39 2 40 40 +OFF 24 2 25 25 +NIGHT 24 2 24 26 +NEO 1 2 3 1 +NEIGHBOUR 1 2 2 2 +NEIGHBOR 0 2 1 1 +N 2 2 4 2 +MUNNY 0 2 2 0 +MORNING 21 2 21 23 +MORNIN 0 2 2 0 +MONTMARTRE 0 2 2 0 +MONT 0 2 0 2 +MONEY 5 2 5 7 +MISTS 2 2 3 3 +MIST 4 2 5 5 +MISS 16 2 18 16 +MERRY 0 2 1 1 +MARSHALL 1 2 3 1 +MAC 0 2 1 1 +LOUIS 1 2 2 2 +LOSS 5 2 6 6 +LORD 21 2 23 21 +LINE 12 2 13 13 +LIKE 105 2 105 107 +LEVINWORTH 0 2 0 2 +LEVIN 0 2 0 2 +LEOCADIA'S 0 2 2 0 +LEFT 33 2 34 34 +LECOMTE 0 2 0 2 +LECOMPTE 0 2 2 0 +LE 1 2 1 3 +LAUGHED 5 2 6 6 +LARKSPUR 0 2 2 0 +LAKE 11 2 13 11 +KNOWS 5 2 6 6 +KARL 0 2 0 2 +KAFFIR'S 0 2 0 2 +KAFFAR'S 0 2 2 0 +JUST 42 2 42 44 +JAIL 2 2 4 2 +INNERLOCHY 0 2 2 0 +INJURED 0 2 2 0 +I'D 1 2 3 1 +HUMOUR 0 2 1 1 +HUMOR 0 2 1 1 +HOW 49 2 50 50 +HOUSE 34 2 34 36 +HON 0 2 1 1 +HOLBINE 0 2 0 2 +HOLBEIN 0 2 2 0 +HEN 0 2 2 0 +HELDA 0 2 0 2 +HEARTS 8 2 8 10 +HEAR 18 2 20 18 +HARRY 2 2 3 3 +HANDS 16 2 17 17 +GYLICANS 0 2 0 2 +GUESTS 4 2 5 5 +GREAT 73 2 74 74 +GILLIKINS 0 2 2 0 +FOUNDED 3 2 5 3 +FOUND 21 2 21 23 +FORMERLY 0 2 0 2 +FORMALLY 1 2 3 1 +FOOTED 0 2 1 1 +FIRE 22 2 22 24 +FILLING 0 2 0 2 +FIGHT 3 2 3 5 +FELT 17 2 19 17 +FEELING 9 2 11 9 +FALL 2 2 2 4 +FAIRVIEW 0 2 2 0 +EVERYDAY 1 2 3 1 +ESTAFANIA 1 2 3 1 +EM 0 2 1 1 +EARS 3 2 5 3 +DUMPY 0 2 2 0 +DON'T 37 2 38 38 +DO 93 2 94 94 +DELLA 1 2 1 3 +DELIA 0 2 2 0 +DEFINE 3 2 5 3 +CROSS 5 2 5 7 +CRITIUS 0 2 0 2 +CRITIAS 0 2 2 0 +CREIGHTON 0 2 2 0 +COURTYARD 4 2 4 6 +COURTS 2 2 2 4 +COURT'S 0 2 2 0 +COUNSELS 0 2 1 1 +COLOURS 0 2 1 1 +COLORIST 0 2 2 0 +CO 0 2 1 1 +CHASE 1 2 1 3 +CHAISE 0 2 2 0 +CARL 0 2 2 0 +CAN 64 2 66 64 +CALLED 23 2 24 24 +BURNE 0 2 1 1 +BUL 0 2 2 0 +BREAK 3 2 3 5 +BRAGELONNE 0 2 2 0 +BOYS 5 2 6 6 +BOGGS 0 2 2 0 +BIT 8 2 9 9 +BEING 39 2 40 40 +BEHAVIOUR 0 2 2 0 +BEHAVIOR 0 2 0 2 +BEFORE 74 2 75 75 +BEATITUDE 0 2 2 0 +BANNISTER 1 2 3 1 +BANISTER 0 2 0 2 +BAD 5 2 6 6 +AYE 0 2 2 0 +AY 0 2 2 0 +AUNT 4 2 5 5 +ATTITUDE 4 2 4 6 +ATTENDANTS 0 2 1 1 +ATTENDANCE 0 2 1 1 +ATHENAIS 0 2 2 0 +ASTOR 0 2 2 0 +ASTER 0 2 0 2 +ARREST 1 2 1 3 +ANDREW'S 0 2 0 2 +ALEXANDRA 1 2 3 1 +ALEXANDER 13 2 13 15 +ZOV'S 0 1 0 1 +ZORAS 0 1 0 1 +ZORAH 0 1 0 1 +ZORA'S 0 1 1 0 +ZOOF'S 1 1 2 1 +ZEVIR 0 1 0 1 +ZEVIER 0 1 0 1 +YUNK 0 1 0 1 +YULA 0 1 0 1 +YON 0 1 0 1 +YOLKS 0 1 0 1 +YEARS 34 1 34 35 +YEARNING 1 1 2 1 +YEARNIN 0 1 0 1 +YEA 1 1 1 2 +YCARIOUS 0 1 0 1 +YAGO 0 1 0 1 +WYLDER 4 1 5 4 +WYAT 0 1 0 1 +WRITE 3 1 4 3 +WRAPPED 0 1 0 1 +WORST 3 1 4 3 +WORSE 6 1 6 7 +WORLDLY 0 1 0 1 +WORLD'S 2 1 2 3 +WORLD 36 1 37 36 +WORE 3 1 3 4 +WOODCUTTERS 0 1 0 1 +WOODBEGIRT 0 1 1 0 +WONDER 7 1 7 8 +WOMENISM 0 1 0 1 +WOMEN'S 1 1 1 2 +WOMEN 7 1 8 7 +WOMAN'S 1 1 2 1 +WITHIN 22 1 23 22 +WITHES 0 1 1 0 +WITCH 2 1 3 2 +WINTER 4 1 5 4 +WINDSAY 0 1 0 1 +WIND 7 1 8 7 +WIN 2 1 2 3 +WILLS 0 1 1 0 +WILDS 0 1 0 1 +WILDERMENT 0 1 0 1 +WIGHT 0 1 0 1 +WIFE 16 1 17 16 +WIDTHS 0 1 0 1 +WIDOW 1 1 1 2 +WID 0 1 0 1 +WHO'S 1 1 2 1 +WHITTAWD 0 1 1 0 +WHITTAW 0 1 1 0 +WHITES 0 1 0 1 +WHIRLS 0 1 0 1 +WHIRLPOOL 1 1 2 1 +WHIPS 0 1 0 1 +WHETHER 23 1 23 24 +WHERE'S 0 1 0 1 +WHELPS 0 1 1 0 +WHELMS 0 1 0 1 +WHATSOEVER 1 1 1 2 +WHAT'S 4 1 5 4 +WHARTON 0 1 0 1 +WET 8 1 9 8 +WESTMERE 0 1 1 0 +WESTMER 0 1 0 1 +WESTLEY 0 1 0 1 +WEST 7 1 7 8 +WESLEY 1 1 2 1 +WELCOMED 0 1 1 0 +WELCOME 6 1 6 7 +WEEKLY 0 1 0 1 +WEDNESDAY 1 1 2 1 +WEBS 0 1 1 0 +WEATHER 5 1 6 5 +WEAKLY 0 1 1 0 +WAVES 6 1 7 6 +WAVE 0 1 0 1 +WATERY 2 1 2 3 +WATERMILL 0 1 1 0 +WATCHERY 0 1 0 1 +WASN'T 1 1 2 1 +WARS 0 1 0 1 +WARRENTONS 0 1 0 1 +WARLIKE 0 1 1 0 +WARILY 0 1 0 1 +WAR 5 1 5 6 +WANDER 1 1 2 1 +WAIL 0 1 0 1 +VOLTAIRE 0 1 1 0 +VOICES 2 1 2 3 +VOGUS 0 1 0 1 +VINEY 0 1 0 1 +VINES 0 1 1 0 +VINE 0 1 0 1 +VILLIA 0 1 0 1 +VILLEROY 0 1 1 0 +VILLAIRY 0 1 0 1 +VIGNETTE 0 1 1 0 +VIEWS 1 1 1 2 +VIEW 2 1 2 3 +VICHET 0 1 0 1 +VICARIOUS 3 1 4 3 +VIADUCT 0 1 1 0 +VIADUC 0 1 0 1 +VERSE 1 1 2 1 +VERNS 0 1 0 1 +VERNE'S 0 1 1 0 +VEREMENT 0 1 0 1 +VENORS 0 1 0 1 +VEINS 0 1 0 1 +VEILS 1 1 1 2 +VAUDOIS 0 1 1 0 +VARIABILITY 1 1 2 1 +VANS 0 1 0 1 +VANES 0 1 1 0 +VANDERPOOLS 0 1 1 0 +VANDER 0 1 0 1 +VALOUR 0 1 0 1 +VALOR 2 1 3 2 +VALLEYED 0 1 1 0 +VALID 0 1 0 1 +VALES 2 1 3 2 +UTTER 3 1 4 3 +UTILITY 2 1 3 2 +UTAH 1 1 2 1 +UTA 0 1 0 1 +UPSTAIRS 3 1 3 4 +UNWARRANTON'S 0 1 0 1 +UNWARILY 0 1 1 0 +UNTO 2 1 3 2 +UNSEPARATED 0 1 1 0 +UNNOTTINGHAM 0 1 0 1 +UNNOTABLY 0 1 0 1 +UNJUSTIFILL 0 1 0 1 +UNJUST 1 1 2 1 +UNFINISHED 1 1 2 1 +UNFINISHANCES 0 1 0 1 +UNEXCEPTIONALLY 0 1 0 1 +UNEXCEPTIONABLY 0 1 1 0 +UNERRIFIC 0 1 0 1 +UNDERGROUND 0 1 1 0 +UNDER 40 1 40 41 +UNCONOCTED 0 1 0 1 +UNCLENCHED 0 1 1 0 +UNCAN 0 1 0 1 +ULLA 0 1 0 1 +UILISTS 0 1 0 1 +UD 0 1 1 0 +TYPE 1 1 1 2 +TWITE 0 1 1 0 +TWASN'T 1 1 1 2 +TUTRILOGIES 0 1 0 1 +TURNS 0 1 1 0 +TURNOVER 0 1 1 0 +TURNERS 0 1 0 1 +TURNER'S 0 1 1 0 +TURN 18 1 18 19 +TURANSULA 0 1 0 1 +TUPPENY 0 1 1 0 +TRY 3 1 4 3 +TROUBLE 8 1 8 9 +TRIPPING 0 1 0 1 +TRILOGIES 0 1 1 0 +TRIED 9 1 9 10 +TRIAGO 0 1 0 1 +TREES 19 1 19 20 +TREDDLESTON 0 1 1 0 +TREBLE 0 1 1 0 +TREADLESTON 0 1 0 1 +TRAY 0 1 1 0 +TRAVELERS 0 1 1 0 +TRAVELER 0 1 1 0 +TRANCED 0 1 0 1 +TRAIL 2 1 2 3 +TOWNE 0 1 1 0 +TOWELLING 0 1 1 0 +TOWELINGS 0 1 0 1 +TOURNISHER 0 1 0 1 +TOULD 0 1 1 0 +TORY 0 1 1 0 +TORQUAL 0 1 0 1 +TORKO 0 1 0 1 +TORE 0 1 0 1 +TORCAL 0 1 0 1 +TOPRUNG 0 1 0 1 +TOPPENNY 0 1 0 1 +TOPICA 0 1 0 1 +TOPEKA 0 1 1 0 +TOP 10 1 11 10 +TOOMS 0 1 1 0 +TONY 0 1 0 1 +TOMB'S 0 1 0 1 +TOILETTE 0 1 1 0 +TOILET 0 1 0 1 +TOGETHER 15 1 16 15 +TIRESOME 1 1 2 1 +TINTORET 0 1 1 0 +TINTINT 0 1 1 0 +TINTARETTE 0 1 0 1 +TINTANT 0 1 0 1 +TIMIUS 0 1 0 1 +TIMIRS 0 1 0 1 +TIMEUS 0 1 0 1 +TIBI 0 1 1 0 +TIBEE 0 1 0 1 +THY 17 1 17 18 +THUS 20 1 21 20 +THRO 0 1 1 0 +THOUSANDTH 0 1 0 1 +THOUSAND 12 1 13 12 +THOUGHT 53 1 54 53 +THOUGH 32 1 33 32 +THORLEIF 0 1 1 0 +THOR 0 1 1 0 +THINKS 2 1 3 2 +THEY'RE 3 1 3 4 +THEREIN 2 1 3 2 +THERE'S 12 1 12 13 +THEE'S 0 1 1 0 +THEATILITY 0 1 0 1 +THEASURUS 0 1 0 1 +THANKS 3 1 3 4 +THANK 12 1 13 12 +TECHNIQUE 0 1 1 0 +TEA 2 1 3 2 +TARANTULA 0 1 1 0 +TALL 9 1 9 10 +TALKERS 0 1 1 0 +TALK 19 1 19 20 +TABLECLOTH 0 1 0 1 +TABLE 23 1 24 23 +SWUNG 0 1 0 1 +SWOONS 0 1 1 0 +SWOON 3 1 3 4 +SWEPT 1 1 1 2 +SWEEP 1 1 2 1 +SWAY 1 1 1 2 +SWAN 0 1 1 0 +SUSPICIONS 2 1 3 2 +SUSPICION 4 1 4 5 +SURREL 0 1 0 1 +SURFACES 0 1 1 0 +SURFABILITY 0 1 0 1 +SUPERVOUS 0 1 0 1 +SUPERFLUOUS 0 1 1 0 +SUMMONED 2 1 3 2 +SUMMON 1 1 1 2 +SUMMERS 0 1 1 0 +SUMMER'S 2 1 2 3 +SUM 1 1 1 2 +SULLEN 1 1 1 2 +SUITCASE 0 1 1 0 +SUIT 4 1 4 5 +STYLE 4 1 5 4 +STYLANT 0 1 0 1 +STUFFANOS 0 1 0 1 +STUDY 12 1 12 13 +STRIKE 3 1 3 4 +STREAMLINE 0 1 1 0 +STREAM 1 1 1 2 +STRAYNE 0 1 0 1 +STRAIT 0 1 0 1 +STRAIN 0 1 1 0 +STRAIGHT 2 1 3 2 +STORY'S 0 1 1 0 +STORIES 3 1 3 4 +STOCKBROKER 0 1 1 0 +STEVE 0 1 0 1 +STEPHANO'S 0 1 0 1 +STEP 6 1 7 6 +STEFFANIA 0 1 0 1 +STEELED 0 1 0 1 +STEEL'D 0 1 1 0 +STEED 1 1 1 2 +STEADY 4 1 5 4 +STATION 6 1 6 7 +STATES 6 1 6 7 +STARTS 0 1 1 0 +START 3 1 3 4 +STARE 1 1 1 2 +STANDS 2 1 3 2 +STAND 13 1 13 14 +STALL 0 1 0 1 +STAIR 0 1 1 0 +STAGECRAFT 0 1 1 0 +SQUINT 0 1 0 1 +SPRING 7 1 8 7 +SPREE 0 1 0 1 +SPRANG 3 1 3 4 +SPRAGUE 0 1 1 0 +SPLENDOUR 0 1 0 1 +SPLENDOR 0 1 1 0 +SPLENDID 9 1 9 10 +SPLENDET 0 1 1 0 +SPILLY 0 1 0 1 +SPEND 2 1 2 3 +SPENCE 0 1 0 1 +SPELLED 0 1 1 0 +SPELL'D 0 1 0 1 +SPECIALIZED 0 1 0 1 +SPECIALISED 0 1 1 0 +SPECIAL 1 1 2 1 +SOUTHEY'S 0 1 1 0 +SOUS 0 1 0 1 +SOU 0 1 1 0 +SORREL 0 1 1 0 +SORA 0 1 0 1 +SOOTHED 0 1 1 0 +SOOTHE 1 1 1 2 +SON 14 1 15 14 +SOMETHIN 0 1 0 1 +SOLOQUY 0 1 0 1 +SOLON'S 0 1 1 0 +SOLILOQUY 4 1 5 4 +SOLID 2 1 2 3 +SODIN 0 1 0 1 +SOCRATIC 0 1 1 0 +SMILED 4 1 4 5 +SMILD 0 1 1 0 +SMELLS 1 1 2 1 +SLEEP 4 1 5 4 +SLAYING 0 1 0 1 +SKURA 0 1 0 1 +SKRA 0 1 0 1 +SKILLFUL 0 1 1 0 +SKILFUL 0 1 0 1 +SKIES 0 1 0 1 +SKEPTICAL 0 1 1 0 +SIZED 1 1 1 2 +SIZE 4 1 5 4 +SIP 0 1 0 1 +SINS 9 1 9 10 +SIMPLED 0 1 0 1 +SIMMONS 0 1 0 1 +SILL 0 1 0 1 +SILENT 10 1 11 10 +SILENCE 7 1 7 8 +SILAGE 0 1 0 1 +SIGHTSEERS 0 1 0 1 +SIGHT 19 1 20 19 +SIGHED 3 1 4 3 +SIGH 3 1 3 4 +SIDE 23 1 23 24 +SICK 2 1 2 3 +SHUTTING 1 1 2 1 +SHOWRING 0 1 1 0 +SHOWERING 0 1 0 1 +SHOWED 5 1 5 6 +SHOULD 60 1 61 60 +SHOTTY 0 1 0 1 +SHOT 2 1 2 3 +SHODDY 0 1 1 0 +SHIP'S 0 1 0 1 +SHEWD 0 1 1 0 +SHERIFF'S 3 1 3 4 +SHERIFF 3 1 4 3 +SHELL 0 1 1 0 +SHEDDING 2 1 2 3 +SHED 0 1 1 0 +SHE'S 5 1 6 5 +SHARPS 0 1 1 0 +SHARPEST 1 1 1 2 +SHARP'ST 0 1 1 0 +SHARP'S 0 1 0 1 +SHAPEN 0 1 1 0 +SHAPELY 0 1 1 0 +SHALT 2 1 2 3 +SHALL 43 1 44 43 +SHAKEN 1 1 1 2 +SHABBLY 0 1 0 1 +SHABATA 0 1 1 0 +SET 19 1 19 20 +SERVICES 1 1 1 2 +SERVICEABILITY 0 1 1 0 +SERVED 4 1 4 5 +SERVE 11 1 12 11 +SERVANTS 4 1 4 5 +SEPARATED 3 1 3 4 +SENTENCE 3 1 3 4 +SENDIARIES 0 1 0 1 +SENCRATICALLY 0 1 0 1 +SENCE 0 1 1 0 +SENATE 2 1 2 3 +SEMON'S 0 1 1 0 +SELVIE 0 1 0 1 +SEIZED 3 1 3 4 +SEERS 0 1 1 0 +SEEN 16 1 16 17 +SEEMS 11 1 12 11 +SEEING 12 1 12 13 +SEATING 1 1 2 1 +SEAT 4 1 4 5 +SCYTHE 1 1 2 1 +SCUTCHEON 0 1 1 0 +SCUSE 0 1 1 0 +SCURUS 0 1 0 1 +SCUMMED 0 1 1 0 +SCUMBED 0 1 0 1 +SCRAPBOOKS 0 1 1 0 +SCRAP 0 1 0 1 +SCOUTING 0 1 1 0 +SCOUT 5 1 5 6 +SCHOOLS 1 1 1 2 +SCHOOLBOYS 0 1 0 1 +SCHERANT 0 1 0 1 +SCEVRA 0 1 1 0 +SCEURA 0 1 1 0 +SCEPTICAL 0 1 0 1 +SCENE 2 1 3 2 +SCATHE 0 1 1 0 +SCATH 0 1 0 1 +SCAROONS 0 1 1 0 +SCARONS 0 1 0 1 +SAYING 15 1 15 16 +SAVIER 0 1 0 1 +SAVERE 0 1 0 1 +SAVED 4 1 5 4 +SAVE 9 1 9 10 +SAUVEUR 0 1 1 0 +SAUL 0 1 0 1 +SATE 0 1 1 0 +SAT 18 1 18 19 +SANG 4 1 5 4 +SALVIE 0 1 0 1 +SALL 0 1 0 1 +SALINE 0 1 1 0 +SALIENT 1 1 2 1 +SAINT 14 1 14 15 +SAILOR 1 1 1 2 +SAILING 0 1 0 1 +SAILED 0 1 0 1 +SAILD 0 1 1 0 +SAID 160 1 161 160 +SAD 3 1 3 4 +RUNG 0 1 1 0 +RUFUS 0 1 1 0 +RUFFUS 0 1 0 1 +RUE 0 1 1 0 +RUDOLPU 0 1 0 1 +RUDOLPHAL 0 1 0 1 +ROY 0 1 0 1 +ROUTE 1 1 2 1 +ROUT 1 1 1 2 +ROSSOTER 0 1 0 1 +ROSSETER 0 1 1 0 +ROOTS 0 1 1 0 +ROI 0 1 1 0 +ROGERS'S 0 1 1 0 +ROGERS 2 1 2 3 +ROERER 0 1 1 0 +RODOLFO'S 0 1 1 0 +ROCKED 0 1 1 0 +ROCK 1 1 1 2 +ROBINS 0 1 0 1 +ROBIN'S 0 1 1 0 +ROAR 0 1 0 1 +ROAN 0 1 0 1 +RIVING 0 1 0 1 +RINGING 0 1 0 1 +RIGHT 25 1 25 26 +RIDOLPH'S 0 1 0 1 +RHONE 0 1 1 0 +REWEIGHED 0 1 1 0 +REWAYED 0 1 0 1 +REVOLUTION 0 1 0 1 +REST 13 1 14 13 +REMOVED 4 1 4 5 +REMOVE 3 1 4 3 +REMOV'D 0 1 1 0 +REMEMBERED 11 1 11 12 +REMEMBER 9 1 10 9 +REMARKED 4 1 4 5 +REMARK 2 1 3 2 +REMAINING 0 1 0 1 +REMAIN 5 1 6 5 +RELOCATED 0 1 1 0 +RELIES 0 1 1 0 +REINFORCEMENTS 0 1 0 1 +REIGNED 1 1 2 1 +REGAINED 0 1 1 0 +REGAIN 0 1 0 1 +REFUSED 7 1 8 7 +REFUSE 0 1 0 1 +REENFORCEMENTS 0 1 1 0 +REEDER 0 1 1 0 +RECORDS 2 1 2 3 +RECORD 6 1 7 6 +RECOGNIZED 3 1 3 4 +RECOGNISED 0 1 1 0 +RECALL 1 1 1 2 +REBUKED 0 1 0 1 +REBUK'D 0 1 1 0 +REALIZE 4 1 4 5 +READY 9 1 10 9 +REACHING 1 1 2 1 +RE 1 1 1 2 +RANK 5 1 5 6 +RANCOR 0 1 1 0 +RANCH 0 1 0 1 +RAINED 0 1 0 1 +RAGING 0 1 0 1 +QUINSON 0 1 1 0 +QUINCENT 0 1 0 1 +QUIETUDE 0 1 0 1 +QUASI 0 1 1 0 +QUASH 0 1 1 0 +QUART 0 1 1 0 +PYTHAGOREANS 0 1 1 0 +PUTTING 7 1 7 8 +PUTTIN 0 1 1 0 +PURSUS 0 1 0 1 +PURSE 1 1 2 1 +PURPOSED 0 1 1 0 +PURPOSE 10 1 10 11 +PURIST 0 1 1 0 +PUREST 0 1 0 1 +PURCHASE 0 1 0 1 +PSALMS 0 1 0 1 +PSALM 2 1 3 2 +PROVES 1 1 2 1 +PROVED 6 1 6 7 +PROSTERITY 0 1 0 1 +PROSELYTING 0 1 1 0 +PROSELLING 0 1 0 1 +PROSCRIBED 0 1 1 0 +PRONUNCILA 0 1 0 1 +PRODIGAL 0 1 1 0 +PROCEEDING 2 1 2 3 +PROCEED 1 1 1 2 +PRIOR 0 1 1 0 +PREVENT 0 1 1 0 +PREVAILED 2 1 3 2 +PREVAIL 1 1 1 2 +PRETENSE 0 1 1 0 +PRETENCE 1 1 1 2 +PRESOCRATIC 0 1 0 1 +PRESENT 20 1 20 21 +PRESCRIBED 1 1 1 2 +PREISE 0 1 0 1 +PRECONCEIVED 0 1 1 0 +PRECIEUSES 0 1 1 0 +PRECEDE 0 1 1 0 +PRE 0 1 1 0 +PRAYER 0 1 0 1 +PRAIRIE 1 1 2 1 +PRAIRI 0 1 0 1 +POWER 21 1 22 21 +POSTERITY 1 1 2 1 +POST 3 1 3 4 +POSITIVELY 2 1 3 2 +PORTES 0 1 1 0 +POPPUM 0 1 0 1 +POPLED 0 1 0 1 +POOS 0 1 1 0 +POOLS 1 1 1 2 +POOL 1 1 1 2 +POND 2 1 3 2 +POLLAR 0 1 0 1 +POLAR 0 1 1 0 +POISONED 0 1 0 1 +POISON'D 0 1 1 0 +POINT 13 1 14 13 +POETESS 0 1 1 0 +POETES 0 1 0 1 +PLUSIASURUS 0 1 0 1 +PLURAL 1 1 2 1 +PLESIOSAURUS 0 1 1 0 +PLEASANTS 0 1 0 1 +PLEASANCE 0 1 1 0 +PLAYING 5 1 5 6 +PLAY 12 1 12 13 +PLAITS 0 1 1 0 +PLAIN 3 1 4 3 +PLACES 1 1 1 2 +PLACE 38 1 39 38 +PITHAGORIANS 0 1 0 1 +PILLION 0 1 1 0 +PIERCED 1 1 1 2 +PIERC'D 0 1 1 0 +PICTURES 3 1 3 4 +PICK 2 1 3 2 +PHRASE 3 1 3 4 +PHILIP 8 1 9 8 +PHAEDRUS 0 1 1 0 +PERSON 13 1 13 14 +PERICE 0 1 0 1 +PERFECT 6 1 7 6 +PERCHES 0 1 1 0 +PEPPEL 0 1 0 1 +PENINSULA 1 1 2 1 +PEGRENNE 0 1 1 0 +PEGRE 0 1 1 0 +PEARLS 0 1 0 1 +PEARL'S 0 1 1 0 +PEARL 12 1 12 13 +PATIENTS 0 1 1 0 +PATIENT 0 1 0 1 +PAST 12 1 12 13 +PASSIONAL 0 1 0 1 +PASSED 14 1 15 14 +PASSAGEWAY 0 1 0 1 +PASSAGE 8 1 9 8 +PASCHAL 0 1 1 0 +PARTICLES 0 1 1 0 +PARTICLE 0 1 0 1 +PARSON'S 0 1 0 1 +PARSON 0 1 0 1 +PAROQUET 0 1 1 0 +PARAPHERNALIA 1 1 2 1 +PARALLELOGRAM 0 1 1 0 +PARALLELLOGRAM 0 1 0 1 +PAPAL 0 1 1 0 +PANTS 0 1 1 0 +PANE 0 1 1 0 +PAIR 5 1 5 6 +PAGRIN 0 1 0 1 +OZMA 0 1 1 0 +OWENAIRS 0 1 0 1 +OWEN 0 1 1 0 +OWE 0 1 0 1 +OVERTRAY 0 1 0 1 +OVERLENE 0 1 0 1 +OVERLEAN 0 1 1 0 +OUTSTRIPPED 0 1 0 1 +OUTSTRIP 0 1 1 0 +OUTRAGED 0 1 0 1 +OUTRAGE 0 1 1 0 +OUTLEY'S 0 1 0 1 +OUGHTER 0 1 1 0 +OUGHT 10 1 10 11 +OTTLEY'S 0 1 1 0 +OTTER 0 1 0 1 +OTHOR 0 1 0 1 +OTHERWORLDLY 0 1 1 0 +OSTENSITY 0 1 0 1 +OSMO 0 1 0 1 +OSH 0 1 1 0 +OSAGE 0 1 1 0 +OPT 0 1 0 1 +OPIEVILLE 0 1 0 1 +OPHELIA 0 1 1 0 +OPE 0 1 0 1 +OPAQUE 0 1 1 0 +ONTO 0 1 1 0 +ONLY 77 1 77 78 +ONGOLATIONS 0 1 0 1 +OMELETTE 0 1 1 0 +OMELET 0 1 0 1 +OLIVES 0 1 0 1 +OLAF 1 1 2 1 +OFFICES 0 1 1 0 +OFFICER'S 0 1 0 1 +OFFICER 4 1 4 5 +OFFENSES 0 1 1 0 +OFFENCES 0 1 0 1 +OFFENCE 0 1 0 1 +ODD 3 1 3 4 +OCCUPANTS 0 1 1 0 +OCCUPANT 0 1 0 1 +OCCADIA 0 1 0 1 +OBSERVED 5 1 6 5 +OBSERVE 4 1 4 5 +OBJECTLY 0 1 0 1 +OARS 0 1 1 0 +OAKS 0 1 1 0 +O'ER 0 1 1 0 +NUMIDIA 0 1 1 0 +NOVELS 2 1 2 3 +NOVEL'S 0 1 1 0 +NOUGHT 0 1 1 0 +NOTHING 33 1 33 34 +NOTHIN 0 1 1 0 +NOTABLY 0 1 1 0 +NOSE 2 1 2 3 +NORTHWEST 0 1 1 0 +NORTHWARDS 0 1 1 0 +NORTHWARD 1 1 1 2 +NORTH 8 1 8 9 +NONCOMPOSTER 0 1 0 1 +NON 3 1 4 3 +NODS 0 1 1 0 +NODDING 0 1 0 1 +NINE 10 1 11 10 +NEWBORN 0 1 0 1 +NEVER 63 1 63 64 +NET 0 1 1 0 +NEOPLATANISTS 0 1 0 1 +NELLY 0 1 1 0 +NELLIERS 0 1 0 1 +NEIGHBOURHOOD 0 1 0 1 +NEIGHBORHOOD 0 1 1 0 +NEED 12 1 12 13 +NED 1 1 1 2 +NEARER 3 1 4 3 +NEAR 6 1 6 7 +NE'ER 0 1 1 0 +NARES 0 1 1 0 +NAMED 3 1 4 3 +NAME 14 1 14 15 +MYSTERY 5 1 5 6 +MYLES 0 1 0 1 +MUZOLEUM 0 1 0 1 +MURCHISTON 0 1 0 1 +MUCH 68 1 69 68 +MOVED 10 1 10 11 +MOUTHED 1 1 2 1 +MOUTH 5 1 5 6 +MOURNED 0 1 0 1 +MOURN 0 1 1 0 +MOUNTED 0 1 1 0 +MOUNTAIN 5 1 5 6 +MOULDED 0 1 0 1 +MOTHERS 1 1 2 1 +MOTHER'S 4 1 4 5 +MORMONISM 2 1 3 2 +MORE 119 1 119 120 +MONTMARTRA 0 1 0 1 +MONTFICHET 8 1 9 8 +MONSTERS 1 1 2 1 +MOMBY 0 1 0 1 +MOMBI 0 1 1 0 +MOLDED 0 1 1 0 +MOLD 0 1 0 1 +MOHICANS 0 1 0 1 +MOHICAN 0 1 1 0 +MO 0 1 1 0 +MITIGATED 0 1 0 1 +MISSY 0 1 0 1 +MISSOURIANS 1 1 2 1 +MISSIONS 0 1 0 1 +MISERIES 0 1 0 1 +MISCHIAGO 0 1 0 1 +MINT 0 1 1 0 +MINE 6 1 7 6 +MIND 29 1 29 30 +MILLS 0 1 0 1 +MILLIONED 0 1 0 1 +MILLION'D 0 1 1 0 +MILLIMETER 0 1 1 0 +MILLIGRAM 0 1 1 0 +MILLAMETER 0 1 0 1 +MILL 0 1 0 1 +MILITATED 0 1 1 0 +MILES 6 1 7 6 +MILAGRAM 0 1 0 1 +MIKE 1 1 2 1 +MIGALLATIONS 0 1 0 1 +MICHAEL 0 1 0 1 +METRES 0 1 0 1 +METICOFF'S 0 1 0 1 +METERS 0 1 1 0 +METERPLATES 0 1 0 1 +METEOR 0 1 0 1 +METAL 0 1 1 0 +MET 10 1 10 11 +MERSEY 0 1 1 0 +MERGANSER 0 1 1 0 +MERGANCER 0 1 0 1 +MERCY 2 1 2 3 +MERCHISTON 0 1 1 0 +MER 0 1 1 0 +MENT 0 1 0 1 +MEND 1 1 2 1 +MEET 6 1 7 6 +MEDICRAFT 0 1 0 1 +MEDIA 0 1 0 1 +MEDAL 0 1 0 1 +MEAN 9 1 9 10 +MEALYBACK 0 1 1 0 +MEALEY 0 1 0 1 +MEADOWCROFT'S 0 1 1 0 +MEADOWCROFT 0 1 1 0 +MC 1 1 1 2 +MAYBEL 0 1 0 1 +MAYBE 0 1 0 1 +MAUSOLEUM 0 1 1 0 +MATES 0 1 0 1 +MATE 1 1 2 1 +MASTERY 0 1 1 0 +MASTER'S 1 1 2 1 +MASTER 14 1 14 15 +MARY 5 1 6 5 +MARVELOUS 0 1 1 0 +MARVELLOUS 0 1 0 1 +MARTRA 0 1 0 1 +MARTIAL 1 1 1 2 +MARSHALLED 0 1 1 0 +MARSHALED 0 1 0 1 +MARSHAL 1 1 1 2 +MARRIED 2 1 2 3 +MARKET 1 1 1 2 +MARKED 2 1 3 2 +MARIVAUX 1 1 2 1 +MARGOLOTTE 4 1 5 4 +MARGOLOTT 0 1 0 1 +MARE 0 1 0 1 +MARAVO 0 1 0 1 +MARAIS 0 1 1 0 +MARAE 0 1 0 1 +MANYWHERE 0 1 0 1 +MANY 40 1 41 40 +MANSERVANT 0 1 1 0 +MANNA 0 1 1 0 +MANIFESTED 0 1 1 0 +MANIFEST 2 1 2 3 +MANA 0 1 0 1 +MAN'S 5 1 5 6 +MAKE 40 1 40 41 +MAIDS 3 1 4 3 +MAIDEN 0 1 0 1 +MAID 5 1 6 5 +MADE 61 1 61 62 +MACDONALDS 0 1 1 0 +MABEL 0 1 1 0 +LYNCHINGS 0 1 1 0 +LUTHERS 0 1 0 1 +LUTHER'S 3 1 4 3 +LUST 0 1 0 1 +LUNCHINGS 0 1 0 1 +LUIS 0 1 1 0 +LUBRICATE 0 1 1 0 +LUBRICADE 0 1 0 1 +LOWERED 0 1 0 1 +LOWER 5 1 6 5 +LOVES 3 1 3 4 +LOVE'S 0 1 1 0 +LOUISE 4 1 4 5 +LOU'S 0 1 1 0 +LOST 12 1 12 13 +LORNE 0 1 1 0 +LORN 0 1 0 1 +LOOSE 5 1 5 6 +LONG 28 1 29 28 +LOCKY 0 1 0 1 +LOCATEOUS 0 1 0 1 +LOCATED 1 1 1 2 +LOCALIA 0 1 0 1 +LO 1 1 2 1 +LIVES 5 1 6 5 +LIVERYED 0 1 0 1 +LIVERIED 0 1 1 0 +LIVE 9 1 9 10 +LITTLE 101 1 101 102 +LITERALLY 1 1 2 1 +LITER 0 1 1 0 +LIST 1 1 1 2 +LINKED 0 1 0 1 +LINK'D 0 1 1 0 +LILY'S 0 1 0 1 +LILY 2 1 2 3 +LILLYS 0 1 1 0 +LILLY 0 1 1 0 +LILBURN 0 1 1 0 +LIGHTFOOTED 0 1 0 1 +LIGHT 38 1 39 38 +LIES 8 1 8 9 +LIE 1 1 1 2 +LETS 0 1 1 0 +LET'S 1 1 2 1 +LESSER 1 1 2 1 +LESS 28 1 28 29 +LEOCADI 0 1 1 0 +LEFRANK 0 1 1 0 +LEAVING 5 1 6 5 +LEAVE 15 1 16 15 +LEAST 15 1 15 16 +LEASED 0 1 1 0 +LEAPING 3 1 3 4 +LEAK 0 1 0 1 +LEAF 3 1 3 4 +LEADER 2 1 2 3 +LAWN 1 1 1 2 +LARKSBURG 0 1 0 1 +LARKS 0 1 0 1 +LAPE 0 1 0 1 +LANTHORN 0 1 1 0 +LANTERN 0 1 0 1 +LANDA 0 1 0 1 +LAND 12 1 13 12 +LAMENT 0 1 0 1 +LAMBENT 0 1 1 0 +LALLY 0 1 0 1 +LALLIE 0 1 1 0 +LABOUR 0 1 1 0 +LABOR 1 1 1 2 +LA 4 1 5 4 +L 1 1 2 1 +KREITON 0 1 0 1 +KNOCKED 3 1 4 3 +KNIFE 9 1 10 9 +KNEW 24 1 25 24 +KNEEP 0 1 0 1 +KNEED 0 1 1 0 +KNEE 0 1 1 0 +KIRTLAND 0 1 1 0 +KIOPH 0 1 0 1 +KINGDOMS 1 1 2 1 +KINGDOM'S 0 1 0 1 +KING'S 3 1 4 3 +KING 26 1 26 27 +KICKAPOOS 0 1 0 1 +KICK 1 1 2 1 +KIARASCURISTS 0 1 0 1 +KEZWICK 0 1 0 1 +KEYNOTE 0 1 1 0 +KESWICK 0 1 1 0 +KEOGH 0 1 1 0 +KEEPER 1 1 1 2 +KEEN 3 1 3 4 +KATHLEEN 0 1 1 0 +JUS 0 1 1 0 +JOB 4 1 4 5 +JEWELLER'S 0 1 0 1 +JEWELER'S 0 1 1 0 +JEROOT 0 1 0 1 +JEFFREY'S 0 1 0 1 +JEFFREY 0 1 0 1 +JAW 1 1 2 1 +JASPER 5 1 6 5 +JAPS 0 1 0 1 +JANE'S 0 1 1 0 +JANE 4 1 4 5 +JACKKNIFE 0 1 0 1 +JACK 5 1 6 5 +ITTHIASORIS 0 1 0 1 +IT'LL 1 1 2 1 +ISIATED 0 1 0 1 +IRON'S 0 1 1 0 +IRON 2 1 2 3 +INVENTORS 1 1 2 1 +INTRUSTING 0 1 0 1 +INTRICTS 0 1 0 1 +INTRENCHMENT 0 1 1 0 +INTERESTS 1 1 2 1 +INTENTS 0 1 1 0 +INTENSE 2 1 2 3 +INSURE 0 1 0 1 +INQUIRIES 1 1 1 2 +INQUIRED 2 1 2 3 +INQUIRE 0 1 0 1 +INJURE 0 1 0 1 +INGACHGOOK 0 1 0 1 +INFORM 1 1 1 2 +INFERENCE 0 1 1 0 +INFANTS 2 1 3 2 +INFANT'S 0 1 0 1 +INERLOCKY 0 1 0 1 +INDEED 29 1 29 30 +INCULCATED 0 1 1 0 +INCONDESCENT 0 1 0 1 +INCLOSED 0 1 1 0 +INCERTAINTY 0 1 1 0 +INCANDESCENT 0 1 1 0 +INCALCATED 0 1 0 1 +INACTION 0 1 1 0 +IMPRESSES 0 1 1 0 +IMPRESS'D 0 1 1 0 +IMPOSED 0 1 1 0 +IMPLORES 0 1 1 0 +IMPLIES 3 1 3 4 +IMPERILLED 0 1 0 1 +IMPERFECT 0 1 0 1 +IMPEARLED 0 1 1 0 +IMMIGRANT 0 1 0 1 +IMMATURE 0 1 1 0 +IMMATOR 0 1 0 1 +IMBIBED 0 1 1 0 +IKE 0 1 1 0 +IDIOSYNCRATICALLY 0 1 1 0 +IDIO 0 1 0 1 +ICTUSORIS 0 1 0 1 +ICT 0 1 0 1 +IBED 0 1 0 1 +IAGO 0 1 0 1 +HYAHSOME 0 1 0 1 +HURRIED 6 1 6 7 +HUNTLY 0 1 0 1 +HUNTLEY 0 1 1 0 +HUMPH 0 1 1 0 +HUMID 0 1 1 0 +HUMAN 15 1 15 16 +HULLO 0 1 0 1 +HOUSES 1 1 1 2 +HOUSECLEANING 0 1 1 0 +HOUND 0 1 1 0 +HOTBED 0 1 1 0 +HOT 3 1 3 4 +HORTON 0 1 1 0 +HORSEPLAY 0 1 1 0 +HORSE 6 1 6 7 +HORRANCE 0 1 0 1 +HORACE 0 1 1 0 +HOPKINS'S 0 1 1 0 +HOPKINS 4 1 4 5 +HOPES 5 1 6 5 +HOPE 9 1 10 9 +HONOURED 0 1 1 0 +HONOURABLE 3 1 3 4 +HONOUR 1 1 2 1 +HONORIFIC 1 1 2 1 +HONORED 1 1 1 2 +HONORABLE 1 1 2 1 +HONOR 4 1 4 5 +HOMES 2 1 2 3 +HOME 23 1 23 24 +HOLTREE 0 1 0 1 +HOLOCAUST 0 1 1 0 +HOLMES 9 1 10 9 +HOLLOWED 0 1 0 1 +HOLLOW 2 1 3 2 +HOLE 1 1 1 2 +HOLD 7 1 8 7 +HOHLAST 0 1 0 1 +HO 0 1 1 0 +HIT 1 1 2 1 +HILDER 0 1 0 1 +HILDA'S 1 1 2 1 +HIGHEST 2 1 3 2 +HIGHER 2 1 2 3 +HIGH 17 1 18 17 +HIDALGO 0 1 1 0 +HID 1 1 1 2 +HETTY'S 0 1 0 1 +HETTY 0 1 1 0 +HESITATED 1 1 1 2 +HERACLITUS 0 1 0 1 +HERACLEITUS 0 1 1 0 +HENLOORD 0 1 0 1 +HENCHMEN 0 1 1 0 +HENCHMAN 0 1 0 1 +HELPS 0 1 0 1 +HELPED 2 1 3 2 +HELP 18 1 18 19 +HELDA'S 0 1 0 1 +HEIR 0 1 0 1 +HEAVEN 14 1 14 15 +HEART'S 0 1 1 0 +HEARKENED 0 1 0 1 +HEAD 36 1 36 37 +HE'D 2 1 3 2 +HAZEWRAPPED 0 1 1 0 +HAYTER 0 1 0 1 +HAYNE 0 1 0 1 +HAYES 0 1 0 1 +HAWTREY 0 1 1 0 +HAWKIN 0 1 0 1 +HAVING 11 1 12 11 +HAUGHTY 3 1 4 3 +HATER 0 1 1 0 +HARTS 0 1 1 0 +HARRYTOWN 0 1 0 1 +HARRIED 0 1 1 0 +HAROLD 0 1 0 1 +HARMONIZED 1 1 2 1 +HARMONIZE 0 1 0 1 +HARKENED 0 1 1 0 +HARBOURING 0 1 0 1 +HARBORING 0 1 1 0 +HARANGUE 0 1 1 0 +HARANG 0 1 0 1 +HARALD 0 1 1 0 +HAPPENED 6 1 6 7 +HAPPEN 4 1 5 4 +HANNA 0 1 1 0 +HANGINGS 0 1 1 0 +HANGING 2 1 2 3 +HANDSOME 3 1 3 4 +HANDLED 0 1 0 1 +HAM 0 1 1 0 +HALTING 0 1 0 1 +HALLOA 0 1 1 0 +HALF 19 1 19 20 +HAL 0 1 1 0 +HAKON 0 1 1 0 +HAIRDRESSER 0 1 1 0 +HAIR 6 1 6 7 +HADALGO 0 1 0 1 +GUISE 0 1 1 0 +GUEST 3 1 4 3 +GUESS 0 1 0 1 +GRUE 0 1 0 1 +GROWS 1 1 2 1 +GROVES 0 1 0 1 +GROUND 10 1 10 11 +GRINGO 0 1 1 0 +GRIEFS 0 1 1 0 +GREY'S 0 1 1 0 +GREENOW 0 1 0 1 +GREEN 12 1 12 13 +GREEING 0 1 1 0 +GREEDS 0 1 0 1 +GREE 0 1 0 1 +GRAY'S 0 1 0 1 +GRAVE 3 1 4 3 +GRATES 0 1 0 1 +GRATE 1 1 1 2 +GRAPEVINE 0 1 1 0 +GRAPE 0 1 0 1 +GRAND 1 1 2 1 +GRAM 0 1 1 0 +GRAHAM 0 1 0 1 +GRADUALLY 4 1 5 4 +GRADUAL 0 1 0 1 +GRADES 0 1 1 0 +GOVERNOR 14 1 14 15 +GOVERNMENT 7 1 8 7 +GOVERNED 0 1 1 0 +GOVERN 0 1 0 1 +GOULD 0 1 0 1 +GOSSIPS 0 1 0 1 +GOSSIP 1 1 2 1 +GORIST 0 1 0 1 +GOODN'T 0 1 0 1 +GOOD 69 1 69 70 +GOOBERS 0 1 1 0 +GOING 26 1 26 27 +GOCRIST 0 1 0 1 +GOATS 0 1 0 1 +GOAT'S 1 1 2 1 +GO 37 1 37 38 +GNAWEDS 0 1 0 1 +GIVE 29 1 30 29 +GIRT 0 1 0 1 +GIRARD 0 1 1 0 +GILCRIS 0 1 0 1 +GILCRE'S 0 1 0 1 +GILCHRIST'S 0 1 1 0 +GIER 0 1 1 0 +GIAOURS 0 1 1 0 +GHIP 3 1 4 3 +GEYORS 0 1 0 1 +GESTATION 0 1 1 0 +GERARD 0 1 0 1 +GEOFFREY'S 0 1 1 0 +GEOFFREY 0 1 1 0 +GENTLEMEN 5 1 6 5 +GENTLEMAN 8 1 8 9 +GENERALSHIP 0 1 0 1 +GENERAL 16 1 17 16 +GELIA 0 1 0 1 +GEAR 0 1 0 1 +GAYLY 0 1 1 0 +GAY 0 1 1 0 +GAVE 31 1 31 32 +GAME 4 1 4 5 +GAMBUS 0 1 0 1 +GAILY 0 1 0 1 +FUTURISTIC 0 1 1 0 +FUTURESTIC 0 1 0 1 +FURTHEST 0 1 0 1 +FURS 0 1 0 1 +FUR 0 1 0 1 +FUN 0 1 0 1 +FULNESS 0 1 1 0 +FULLNESS 0 1 0 1 +FULL 17 1 18 17 +FRONTISPIECE 0 1 1 0 +FRONTESPIECE 0 1 0 1 +FROM 187 1 187 188 +FRISKLY 0 1 0 1 +FRISKILY 0 1 1 0 +FRIGHTENED 2 1 2 3 +FRIEND 20 1 21 20 +FREQUENCY 0 1 0 1 +FRANKS 0 1 0 1 +FRANK 2 1 2 3 +FRANCS 0 1 1 0 +FOWLED 0 1 0 1 +FOUR 12 1 12 13 +FORWARDED 0 1 1 0 +FORM 11 1 12 11 +FORESEEING 0 1 1 0 +FORCE 17 1 17 18 +FORBES'S 0 1 1 0 +FOLLOWED 14 1 15 14 +FLUFFINOSE 0 1 1 0 +FLUFFINO'S 0 1 0 1 +FLOWER 4 1 4 5 +FLOUR 0 1 1 0 +FLIGHT 2 1 3 2 +FIVE 15 1 15 16 +FIT 0 1 0 1 +FIRST 67 1 67 68 +FIRS 0 1 1 0 +FIREBUGS 0 1 1 0 +FIREBALL 0 1 1 0 +FIR 8 1 9 8 +FINE 16 1 17 16 +FILT 0 1 0 1 +FILLS 2 1 2 3 +FILLIOTTED 0 1 0 1 +FILLED 8 1 8 9 +FILL 1 1 2 1 +FEW 27 1 28 27 +FETE 2 1 3 2 +FENCE 0 1 1 0 +FELLED 1 1 1 2 +FEELS 1 1 2 1 +FEELINS 0 1 0 1 +FEELIN'S 0 1 1 0 +FEEL 17 1 18 17 +FEEDRESS 0 1 0 1 +FEAREST 0 1 1 0 +FEARED 2 1 3 2 +FEAR'ST 0 1 0 1 +FEAR 12 1 12 13 +FAVOURITE 1 1 1 2 +FAVOUR 0 1 0 1 +FAVORITE 3 1 4 3 +FAVOR 1 1 2 1 +FAUDOIR 0 1 0 1 +FATTENED 0 1 1 0 +FATE 0 1 0 1 +FAT 3 1 3 4 +FARTHEST 1 1 2 1 +FARMHOUSES 0 1 1 0 +FARM 8 1 8 9 +FAR 29 1 30 29 +FANIA 0 1 0 1 +FAMOUS 2 1 3 2 +FAME 2 1 2 3 +FALLEN 2 1 3 2 +FAIREST 0 1 0 1 +EYE 14 1 15 14 +EXIST 2 1 3 2 +EXCUSE 3 1 3 4 +EXCENTRICITY 0 1 0 1 +EXAGGARIST 0 1 0 1 +EVOLUTION 2 1 3 2 +EVENS 0 1 0 1 +EVENINGS 1 1 1 2 +EVENIN'S 0 1 1 0 +EVEN 51 1 51 52 +EVA 2 1 3 2 +EULO 0 1 0 1 +ETHNE 0 1 0 1 +ETHINE 0 1 0 1 +ETHEL 0 1 0 1 +ESTATE 2 1 3 2 +ESPRIT 0 1 1 0 +ESPECIAL 1 1 1 2 +ESCHEATED 0 1 1 0 +ESCAPED 0 1 1 0 +ESCAPE 4 1 4 5 +ERNEST 0 1 1 0 +ER 0 1 1 0 +ENTRUSTING 0 1 1 0 +ENTRENCHMENT 0 1 0 1 +ENTRANCED 0 1 1 0 +ENTHRALMENT 0 1 1 0 +ENTHRALLMENT 0 1 0 1 +ENTER 8 1 9 8 +ENSURE 0 1 1 0 +ENQUIRIES 0 1 1 0 +ENQUIRED 0 1 1 0 +ENQUIRE 0 1 1 0 +ENGINEERS 2 1 2 3 +ENGINEER 3 1 4 3 +ENG 0 1 0 1 +ENCLOSED 0 1 0 1 +EMISSIONS 0 1 1 0 +EMIGRANT 0 1 1 0 +EMERGED 0 1 0 1 +EMERG'D 0 1 1 0 +EMBICIDES 0 1 0 1 +ELZINORE 0 1 0 1 +ELSINORE 0 1 1 0 +ELMO'S 0 1 1 0 +ELKOE 0 1 0 1 +ELCHO 0 1 1 0 +ELABORATE 2 1 3 2 +EITHER 8 1 8 9 +EFFERENCE 0 1 0 1 +EFFECTED 1 1 2 1 +EFFECT 9 1 9 10 +EDITION 0 1 1 0 +ECCENTRICITY 0 1 1 0 +EARSHOT 0 1 1 0 +EARNEST 4 1 4 5 +EAR 6 1 6 7 +DYKES 0 1 1 0 +DUST 3 1 4 3 +DUSPORT 0 1 0 1 +DUS 0 1 0 1 +DURING 11 1 12 11 +DURE 0 1 0 1 +DUMPEY 0 1 0 1 +DUERER 0 1 1 0 +DUCHEN 0 1 0 1 +DRUNKENNESS 0 1 1 0 +DRUGGIST'S 0 1 1 0 +DRUGGIST 0 1 0 1 +DROPIDUS 0 1 0 1 +DROPIDAS 0 1 1 0 +DRIPPING 0 1 1 0 +DRINKENNESS 0 1 0 1 +DREWING 0 1 0 1 +DRESSER 0 1 0 1 +DOWNSTAIRS 1 1 1 2 +DOWNREACHING 0 1 0 1 +DOUZE 0 1 1 0 +DOUBLED 0 1 0 1 +DOUBLE 5 1 6 5 +DORIS 0 1 0 1 +DOORSTEP 0 1 0 1 +DOOR 35 1 36 35 +DONATISTS 1 1 2 1 +DONATIST 0 1 0 1 +DONALDS 0 1 0 1 +DONA 1 1 2 1 +DOLL 1 1 2 1 +DOES 14 1 14 15 +DOCTRIS 0 1 0 1 +DOCTRESS 0 1 1 0 +DOCKBROKER 0 1 0 1 +DISTRICTS 1 1 2 1 +DISTRICT 1 1 1 2 +DISQUIETUDE 0 1 1 0 +DISPENSE 0 1 1 0 +DISHONOURED 0 1 1 0 +DISHONORED 0 1 0 1 +DISCOLOURED 0 1 1 0 +DISCOLORED 0 1 0 1 +DIMMED 0 1 0 1 +DIKES 0 1 0 1 +DIETRIBE 0 1 0 1 +DIATRIBE 0 1 1 0 +DIAS 0 1 1 0 +DIALOGUES 1 1 2 1 +DIALECTS 0 1 0 1 +DETERNATION 0 1 0 1 +DETERMINED 4 1 5 4 +DETERMINE 1 1 1 2 +DETENNACHELANT 0 1 0 1 +DESTA 0 1 0 1 +DERPOOL 0 1 0 1 +DERBOOLE 0 1 0 1 +DENSITY 0 1 1 0 +DENOTTINGHAM 0 1 0 1 +DENISCHALANT 0 1 0 1 +DELIBERATIVE 0 1 1 0 +DELIBERATE 2 1 2 3 +DELHIA 0 1 0 1 +DELA 0 1 0 1 +DEFINED 1 1 2 1 +DEED 2 1 3 2 +DEDALUS 1 1 2 1 +DEDALOS 0 1 1 0 +DECK 1 1 1 2 +DECENCY 3 1 4 3 +DECENCIES 0 1 0 1 +DEARIE 0 1 0 1 +DEALT 0 1 0 1 +DAWNS 0 1 0 1 +DAWN'S 0 1 1 0 +DARK 16 1 17 16 +DAMNLY 0 1 0 1 +DAIS 0 1 0 1 +DAIRY 4 1 5 4 +DAEDALUS 0 1 0 1 +DA 0 1 0 1 +D'ESTE 0 1 1 0 +D 1 1 2 1 +CYNTHIA'S 1 1 1 2 +CYNTHIA 2 1 3 2 +CYN 0 1 1 0 +CUTTERS 0 1 1 0 +CUT 4 1 5 4 +CUSTOMARY 0 1 0 1 +CUSTOMARILY 0 1 1 0 +CURVED 0 1 1 0 +CURTLEND 0 1 0 1 +CURE 0 1 0 1 +CRYSTAL 3 1 4 3 +CRUSTED 0 1 0 1 +CRUSHWELLER 0 1 0 1 +CROSSTREES 0 1 1 0 +CRISTEL 0 1 0 1 +CRIGHTON 0 1 0 1 +CRESTWELLERS 0 1 0 1 +CRESTED 0 1 1 0 +CRESSWELLS 0 1 1 0 +CRASWELLERS 0 1 1 0 +CRASWELLER 0 1 1 0 +CRAFT 0 1 0 1 +COZIER 0 1 1 0 +COWSHED 0 1 0 1 +COW 1 1 2 1 +COURSE 19 1 19 20 +COURANT 0 1 1 0 +COUNTRY'S 1 1 2 1 +COUNTRY 25 1 25 26 +COUNSELLED 1 1 2 1 +COUNCILS 0 1 0 1 +COULDN' 0 1 0 1 +COTTON 11 1 12 11 +COTEL 0 1 0 1 +COSTS 0 1 1 0 +COST 4 1 4 5 +COSIER 0 1 0 1 +CORRELATES 0 1 1 0 +CORRECTURISTIC 0 1 0 1 +CORPS 0 1 0 1 +COROLLETS 0 1 0 1 +CORNEERS 0 1 0 1 +CORN 4 1 5 4 +CORMORANT 0 1 1 0 +CORLEY 0 1 0 1 +CORALIE 0 1 1 0 +COPE 0 1 0 1 +COOK 1 1 2 1 +CONTIN 0 1 0 1 +CONTENTS 1 1 1 2 +CONTAINING 0 1 0 1 +CONTAINED 2 1 3 2 +CONSUMERS 2 1 2 3 +CONSUMER'S 0 1 1 0 +CONSIDERABLE 6 1 6 7 +CONSID'BLE 0 1 1 0 +CONQUERING 0 1 0 1 +CONQUERIN 0 1 1 0 +CONJUROR 0 1 0 1 +CONJURER 0 1 1 0 +CONFESSED 1 1 1 2 +CONFESS 3 1 4 3 +CONDENSE 0 1 1 0 +COMRADE 0 1 0 1 +COMPOSSER 0 1 1 0 +COMPOSED 1 1 1 2 +COMPOSE 0 1 1 0 +COMPANY 10 1 11 10 +COMMENTS 0 1 1 0 +COMETS 0 1 0 1 +COMBATCH 0 1 0 1 +COMBASH 0 1 1 0 +COLLARS 1 1 1 2 +COLDWELL 0 1 0 1 +COLD 8 1 9 8 +COEXIST 0 1 0 1 +COAL 0 1 1 0 +CLUE 1 1 1 2 +CLOTH 3 1 4 3 +CLEW 0 1 1 0 +CLENCHED 1 1 1 2 +CLEANING 2 1 2 3 +CLAWS 2 1 2 3 +CLAUSE 0 1 1 0 +CIVET 0 1 1 0 +CITY 15 1 15 16 +CITADELLED 0 1 1 0 +CITADELED 0 1 0 1 +CIGARETTE 1 1 2 1 +CHRONICAL 0 1 0 1 +CHRISWELL 0 1 0 1 +CHRISTIAN 6 1 6 7 +CHRISTAIN 0 1 1 0 +CHORD 0 1 1 0 +CHOLERAIST 0 1 0 1 +CHOKES 0 1 0 1 +CHOICES 0 1 0 1 +CHOICE 1 1 2 1 +CHINGACHGOOK 1 1 2 1 +CHILL 0 1 0 1 +CHIAROSCURISTS 0 1 1 0 +CHIAROSCURIST 0 1 1 0 +CHERRIES 2 1 3 2 +CHEROOT 0 1 1 0 +CHEBATA 0 1 0 1 +CHATTERBOX 0 1 1 0 +CHATTER 0 1 0 1 +CHARACTERISTIC 9 1 10 9 +CHANGED 6 1 6 7 +CHANGE 8 1 9 8 +CHAIR 15 1 15 16 +CERTAINTY 2 1 2 3 +CENTRED 0 1 1 0 +CENTRE 1 1 1 2 +CENTER 1 1 2 1 +CENDENARIES 0 1 1 0 +CELEBRATE 1 1 1 2 +CEASED 1 1 1 2 +CEASE 1 1 2 1 +CEASD 0 1 1 0 +CAUGHT 10 1 10 11 +CATHERINE 0 1 0 1 +CASTS 0 1 1 0 +CAST 8 1 8 9 +CASH 0 1 0 1 +CASE 16 1 16 17 +CARVED 1 1 1 2 +CARPATCHIO'S 0 1 0 1 +CARPACCIO'S 0 1 1 0 +CARDLE 0 1 0 1 +CANVAS 3 1 4 3 +CANNOT 16 1 16 17 +CALDWELL 0 1 1 0 +BYRNE 0 1 0 1 +BYE 2 1 2 3 +BUTT 0 1 1 0 +BUTCHERED 0 1 1 0 +BUTCHER 0 1 0 1 +BURNED 1 1 1 2 +BURGOYNE 0 1 1 0 +BUNNOT 0 1 0 1 +BUNNIT 0 1 1 0 +BULL 2 1 2 3 +BUILDING 4 1 5 4 +BUILD 1 1 1 2 +BUGS 0 1 0 1 +BUCHANAN 0 1 1 0 +BUCATED 0 1 0 1 +BUB 0 1 0 1 +BROWNWELL 0 1 0 1 +BROTHELS 0 1 1 0 +BROGG 0 1 0 1 +BRITANNULISTS 0 1 1 0 +BRISK 0 1 1 0 +BRINGING 4 1 5 4 +BREG 0 1 0 1 +BREAKWATER 0 1 1 0 +BREAKFAST 1 1 1 2 +BREAKFAS 0 1 1 0 +BRAY 0 1 0 1 +BRANWELL 0 1 1 0 +BRANDS 1 1 2 1 +BRAND 0 1 0 1 +BRANCH 3 1 4 3 +BRAINS 3 1 3 4 +BRAGGLIN 0 1 0 1 +BRAFFLELS 0 1 0 1 +BOXWOMEN 0 1 0 1 +BOWLS 0 1 0 1 +BOWL 0 1 0 1 +BOWER 1 1 1 2 +BOUGHT 0 1 0 1 +BOTANY 0 1 1 0 +BORN 7 1 8 7 +BORDERS 3 1 4 3 +BORDER 3 1 3 4 +BOOKS 8 1 8 9 +BOOKKEEPER 0 1 1 0 +BOO 0 1 0 1 +BOLLS 0 1 1 0 +BOGUS 2 1 3 2 +BOAT 2 1 2 3 +BOARHOUND 0 1 0 1 +BOAR 0 1 1 0 +BLUESKINS 1 1 2 1 +BLUESKIN 0 1 0 1 +BLINT 0 1 0 1 +BLESSINGS 2 1 3 2 +BLESSING 2 1 2 3 +BLASTS 0 1 1 0 +BLAST 0 1 0 1 +BITES 0 1 1 0 +BILLYGOAT 0 1 1 0 +BILLING 0 1 0 1 +BILLED 0 1 1 0 +BID 1 1 1 2 +BEWILDERMENT 0 1 1 0 +BETTER 25 1 25 26 +BESIDES 7 1 8 7 +BERKS 0 1 0 1 +BERGSON 1 1 2 1 +BELIEVED 5 1 5 6 +BELIEVE 20 1 21 20 +BEIN 0 1 1 0 +BEGGED 1 1 1 2 +BEGGARS 0 1 0 1 +BEGGAR'S 0 1 1 0 +BEG 1 1 2 1 +BEFIT 0 1 1 0 +BEFELL 0 1 1 0 +BEFEL 0 1 0 1 +BEFALL 0 1 0 1 +BEFAL 0 1 1 0 +BEEN 136 1 137 136 +BEELZEBUB 0 1 1 0 +BEEDER 0 1 1 0 +BEECHED 0 1 0 1 +BEEBE 0 1 1 0 +BEDIMMED 0 1 1 0 +BED 12 1 12 13 +BEAR 10 1 11 10 +BEALES 0 1 0 1 +BEACHED 0 1 1 0 +BATTLEX 0 1 0 1 +BASKEY 0 1 0 1 +BASKET 1 1 2 1 +BARTANY 0 1 0 1 +BARON 0 1 0 1 +BARBAROUSA 0 1 0 1 +BANDS 3 1 4 3 +BANDA 0 1 0 1 +BAN 0 1 0 1 +BALL 4 1 4 5 +BALINE 0 1 0 1 +BALEM'S 0 1 0 1 +BALEEN 0 1 1 0 +BALAAM'S 0 1 1 0 +BAINS 0 1 1 0 +BADAUDERIE 0 1 1 0 +BACK 45 1 45 46 +BABIRUSA 0 1 1 0 +B 2 1 2 3 +AWHILE 0 1 1 0 +AWAY 50 1 50 51 +AWARE 5 1 6 5 +AU 0 1 1 0 +ATTORIAN 0 1 0 1 +ATTITSON 0 1 0 1 +ATTAINED 2 1 3 2 +ATHOLEMEN 0 1 1 0 +ATCHISON 0 1 1 0 +ASSEMBLED 2 1 3 2 +ASCENDENCY 0 1 1 0 +ASCENDANCY 0 1 0 1 +ARTICHOKES 0 1 1 0 +ART 14 1 14 15 +ARRONDISSEMENT 0 1 1 0 +ARRIVING 1 1 2 1 +ARRESTS 0 1 1 0 +ARRANGING 1 1 2 1 +ARMOUR 0 1 1 0 +ARMOR 0 1 0 1 +ARMED 1 1 2 1 +ARM 5 1 5 6 +ARDLE 0 1 1 0 +APPRENTICED 0 1 0 1 +APPRENTICE 2 1 3 2 +APILLION 0 1 0 1 +ANYWHERE 5 1 6 5 +ANYTHING 17 1 17 18 +ANYMORE 0 1 1 0 +ANTIDATING 0 1 0 1 +ANTEDATING 0 1 1 0 +ANSWERED 14 1 14 15 +ANSWERD 0 1 1 0 +ANNE'S 1 1 2 1 +ANNALS 2 1 3 2 +ANNAL 0 1 0 1 +ANGULATIONS 0 1 0 1 +ANGORE 0 1 0 1 +ANGOR 0 1 1 0 +ANGELS 2 1 3 2 +ANGEL 1 1 1 2 +ANDREW 0 1 0 1 +ANDER'S 0 1 0 1 +ANCIENT 3 1 3 4 +ANAXAGORAS 0 1 1 0 +ANALIA 0 1 0 1 +AMYL 0 1 0 1 +AMPHITHEATRE 0 1 0 1 +AMPHITHEATER 0 1 1 0 +AMEN 0 1 0 1 +ALYOCADIA'S 0 1 0 1 +ALTOGETHER 6 1 6 7 +ALTERNATIVE 0 1 1 0 +ALREADY 22 1 22 23 +ALONE 23 1 23 24 +ALLUVION 0 1 1 0 +ALLUVIAN 0 1 0 1 +ALBIGENZAS 0 1 0 1 +ALBIGENSES 0 1 1 0 +ALBANS 0 1 1 0 +ALBAN'S 0 1 0 1 +AIRABLE'S 0 1 0 1 +AIR 24 1 25 24 +AIGNAN 0 1 1 0 +AIDS 0 1 0 1 +AID 1 1 2 1 +AFTERDECK 0 1 1 0 +AFTER 58 1 58 59 +AFFRIGHTENED 0 1 1 0 +AFFILIATED 1 1 2 1 +AFFECTED 3 1 3 4 +AFFECT 0 1 1 0 +AFAR 0 1 0 1 +ADONNA 0 1 0 1 +ADONA 0 1 1 0 +ADELAX 0 1 0 1 +ADDITION 0 1 0 1 +ACTIVELY 1 1 1 2 +ACTION 11 1 11 12 +ACKNOWLEDGMENT 0 1 0 1 +ACKNOWLEDGEMENT 0 1 1 0 +ACCURANT 0 1 0 1 +ACCORD 2 1 2 3 +ACCOMPANIED 3 1 3 4 +ABOLITIONISTS 0 1 1 0 +ABOLITIONIST 0 1 0 1 +ABJECTLY 0 1 1 0 +ABILITY 2 1 2 3 +ABBEY 0 1 0 1 +ABBE 0 1 1 0 +ZOOLOGY 1 0 1 1 +ZOOF 1 0 1 1 +ZION 1 0 1 1 +ZEST 1 0 1 1 +ZEAL 2 0 2 2 +YOUTH 5 0 5 5 +YOURSELVES 1 0 1 1 +YOURSELF 8 0 8 8 +YOURS 3 0 3 3 +YOUNGER 1 0 1 1 +YOUNG 43 0 43 43 +YOU'VE 4 0 4 4 +YOU'D 3 0 3 3 +YORKSHIRE 2 0 2 2 +YORK 6 0 6 6 +YONDER 1 0 1 1 +YOKE 1 0 1 1 +YIELDING 3 0 3 3 +YIELDED 2 0 2 2 +YIELD 3 0 3 3 +YET 43 0 43 43 +YESTERDAY 3 0 3 3 +YES 33 0 33 33 +YELLOW 9 0 9 9 +YELL 1 0 1 1 +YEARLY 2 0 2 2 +YEAR 5 0 5 5 +YARN 2 0 2 2 +YAMS 1 0 1 1 +YACHTSMAN 1 0 1 1 +YACHT 3 0 3 3 +WROUGHT 2 0 2 2 +WROTE 6 0 6 6 +WRONGS 1 0 1 1 +WRONG 10 0 10 10 +WRITTEN 7 0 7 7 +WRITS 1 0 1 1 +WRITINGS 2 0 2 2 +WRITING 6 0 6 6 +WRITHING 1 0 1 1 +WRITES 1 0 1 1 +WRITER 2 0 2 2 +WRIT 1 0 1 1 +WRIST 1 0 1 1 +WRETCHEDNESS 2 0 2 2 +WRESTLERS 1 0 1 1 +WRESTLED 1 0 1 1 +WOUNDED 1 0 1 1 +WOUND 1 0 1 1 +WOULDN'T 5 0 5 5 +WORTHY 6 0 6 6 +WORSTED 1 0 1 1 +WORSHIP 3 0 3 3 +WORRY 3 0 3 3 +WORN 1 0 1 1 +WORM 4 0 4 4 +WORKS 8 0 8 8 +WORKMEN 1 0 1 1 +WORKING 3 0 3 3 +WORKERS 1 0 1 1 +WORKED 5 0 5 5 +WORK 34 0 34 34 +WORDS 20 0 20 20 +WORD 20 0 20 20 +WOOL 3 0 3 3 +WOODLEY 3 0 3 3 +WOODEN 3 0 3 3 +WONDERS 1 0 1 1 +WONDERINGLY 1 0 1 1 +WONDERFUL 7 0 7 7 +WONDERED 2 0 2 2 +WON'T 15 0 15 15 +WOMAN 28 0 28 28 +WOLF 1 0 1 1 +WOKE 1 0 1 1 +WOE 2 0 2 2 +WIZARD'S 1 0 1 1 +WIZARD 3 0 3 3 +WIVES 3 0 3 3 +WITTY 1 0 1 1 +WITTILY 1 0 1 1 +WITS 1 0 1 1 +WITNESSING 1 0 1 1 +WITNESSES 1 0 1 1 +WITNESS 1 0 1 1 +WITHOUT 37 0 37 37 +WITHERING 1 0 1 1 +WITHERED 1 0 1 1 +WITHDRAWN 2 0 2 2 +WITHDRAW 1 0 1 1 +WITHAL 1 0 1 1 +WIT 3 0 3 3 +WISHES 3 0 3 3 +WISHERS 1 0 1 1 +WISHED 6 0 6 6 +WISH 11 0 11 11 +WISE 5 0 5 5 +WISDOM 3 0 3 3 +WIRE 4 0 4 4 +WIPED 1 0 1 1 +WINNING 1 0 1 1 +WINKING 1 0 1 1 +WINK 1 0 1 1 +WINIFRED 1 0 1 1 +WINGS 5 0 5 5 +WING 3 0 3 3 +WINE 7 0 7 7 +WINDY 1 0 1 1 +WINDS 3 0 3 3 +WINDOWS 7 0 7 7 +WINDOW 16 0 16 16 +WINDING 1 0 1 1 +WILY 1 0 1 1 +WILSON 1 0 1 1 +WILLOWY 1 0 1 1 +WILLINGLY 2 0 2 2 +WILLING 2 0 2 2 +WILLIAM 1 0 1 1 +WILLED 1 0 1 1 +WILDERNESS 1 0 1 1 +WILD 9 0 9 9 +WIDTH 1 0 1 1 +WIDEST 1 0 1 1 +WIDENING 1 0 1 1 +WIDELY 1 0 1 1 +WIDE 9 0 9 9 +WICKET 1 0 1 1 +WICKEDNESS 1 0 1 1 +WICKEDEST 1 0 1 1 +WICKED 3 0 3 3 +WHOM 18 0 18 18 +WHOLESOME 1 0 1 1 +WHOLE 25 0 25 25 +WHOEVER 3 0 3 3 +WHITNEY 1 0 1 1 +WHISTLING 1 0 1 1 +WHISTLE 2 0 2 2 +WHISPERED 7 0 7 7 +WHISPER 1 0 1 1 +WHISKERS 1 0 1 1 +WHISK 1 0 1 1 +WHIRLWIND 3 0 3 3 +WHIPPED 2 0 2 2 +WHIM 1 0 1 1 +WHILST 3 0 3 3 +WHEREVER 3 0 3 3 +WHEREUPON 3 0 3 3 +WHEREON 1 0 1 1 +WHEREFORE 1 0 1 1 +WHEREBY 1 0 1 1 +WHENEVER 3 0 3 3 +WHEELING 1 0 1 1 +WHEELER 1 0 1 1 +WHEELED 3 0 3 3 +WHEEL 1 0 1 1 +WHEAT 2 0 2 2 +WHARVES 1 0 1 1 +WETTING 1 0 1 1 +WESTWARD 1 0 1 1 +WESTPORT 2 0 2 2 +WESTERN 1 0 1 1 +WEREN'T 2 0 2 2 +WENT 25 0 25 25 +WELFARE 2 0 2 2 +WEIGHT 2 0 2 2 +WEIGHED 3 0 3 3 +WEIGH 1 0 1 1 +WEEPING 4 0 4 4 +WEEP 1 0 1 1 +WEEKS 4 0 4 4 +WEEK 2 0 2 2 +WEEDS 3 0 3 3 +WEED 1 0 1 1 +WEB 1 0 1 1 +WEASEL 1 0 1 1 +WEARY 1 0 1 1 +WEARING 2 0 2 2 +WEARINESS 2 0 2 2 +WEARILY 2 0 2 2 +WEARERS 1 0 1 1 +WEAPON 2 0 2 2 +WEALTH 5 0 5 5 +WEAKNESS 3 0 3 3 +WEAKENED 2 0 2 2 +WEAK 6 0 6 6 +WAYS 1 0 1 1 +WAX 1 0 1 1 +WAVING 2 0 2 2 +WAVERING 2 0 2 2 +WAVED 1 0 1 1 +WATSON 5 0 5 5 +WATERS 6 0 6 6 +WATERCRESS 1 0 1 1 +WATCHING 1 0 1 1 +WATCHFULNESS 1 0 1 1 +WATCHFUL 1 0 1 1 +WATCHED 7 0 7 7 +WATCH 2 0 2 2 +WASTEFUL 4 0 4 4 +WASTED 2 0 2 2 +WASTE 5 0 5 5 +WASHINGTON 1 0 1 1 +WASHING 1 0 1 1 +WASH 1 0 1 1 +WARY 1 0 1 1 +WARRIORS 2 0 2 2 +WARRENTON 4 0 4 4 +WARRANTED 2 0 2 2 +WARRANT 1 0 1 1 +WARN 1 0 1 1 +WARMEST 1 0 1 1 +WARMED 1 0 1 1 +WARM 4 0 4 4 +WARDS 1 0 1 1 +WARD 1 0 1 1 +WANTS 3 0 3 3 +WANTING 3 0 3 3 +WANTED 8 0 8 8 +WANT 19 0 19 19 +WANDERED 2 0 2 2 +WAN 1 0 1 1 +WALNUT 1 0 1 1 +WALLS 2 0 2 2 +WALL 6 0 6 6 +WALKS 1 0 1 1 +WALKING 2 0 2 2 +WALKETH 1 0 1 1 +WALKED 6 0 6 6 +WALK 5 0 5 5 +WAITING 7 0 7 7 +WAITERS 1 0 1 1 +WAITER 1 0 1 1 +WAITED 1 0 1 1 +WAIT 8 0 8 8 +WAISTCOAT 1 0 1 1 +WAIST 1 0 1 1 +WAILING 1 0 1 1 +WAGED 1 0 1 1 +WADDLING 1 0 1 1 +W 3 0 3 3 +VULGAR 1 0 1 1 +VOYAGING 2 0 2 2 +VOYAGES 1 0 1 1 +VOYAGE 2 0 2 2 +VOWS 1 0 1 1 +VOUCHED 1 0 1 1 +VOTES 1 0 1 1 +VOTERS 1 0 1 1 +VOLUME 1 0 1 1 +VOLTAIRE'S 1 0 1 1 +VIVIDLY 2 0 2 2 +VIVID 2 0 2 2 +VIVE 1 0 1 1 +VIVACITY 1 0 1 1 +VITALITY 1 0 1 1 +VITAL 1 0 1 1 +VISTA 1 0 1 1 +VISITORS 5 0 5 5 +VISITOR 2 0 2 2 +VISITED 4 0 4 4 +VISIT 4 0 4 4 +VISION 2 0 2 2 +VISIBLE 2 0 2 2 +VIRTUOUS 1 0 1 1 +VIRTUE 3 0 3 3 +VIRTUALLY 2 0 2 2 +VIRGINS 1 0 1 1 +VIRGIN 2 0 2 2 +VIPER 2 0 2 2 +VIOLET 1 0 1 1 +VIOLENT 5 0 5 5 +VIOLENCE 5 0 5 5 +VIOLATED 1 0 1 1 +VINEGAR 1 0 1 1 +VINDICATION 1 0 1 1 +VINDICATE 1 0 1 1 +VILLAGE 4 0 4 4 +VILLA 1 0 1 1 +VIKING 3 0 3 3 +VIGOROUSLY 1 0 1 1 +VIGOROUS 1 0 1 1 +VIGILANCE 1 0 1 1 +VIEWED 1 0 1 1 +VIE 1 0 1 1 +VICTUALS 1 0 1 1 +VICTORY 1 0 1 1 +VICTORIES 1 0 1 1 +VICTIM 2 0 2 2 +VICOMTE 1 0 1 1 +VICISSITUDES 1 0 1 1 +VICIOUS 1 0 1 1 +VICINITY 1 0 1 1 +VEXED 1 0 1 1 +VEXATION 1 0 1 1 +VESTURE 1 0 1 1 +VESTIBULE 1 0 1 1 +VESSEL 2 0 2 2 +VERTEBRAL 1 0 1 1 +VERSES 1 0 1 1 +VERSED 3 0 3 3 +VERIFY 1 0 1 1 +VERGE 1 0 1 1 +VENTURED 1 0 1 1 +VENTURE 1 0 1 1 +VENICE 1 0 1 1 +VENGEANCE 2 0 2 2 +VENERABLE 1 0 1 1 +VELVET 1 0 1 1 +VELOCITY 2 0 2 2 +VEILED 2 0 2 2 +VEHICLE 1 0 1 1 +VEHEMENTLY 1 0 1 1 +VAULT 1 0 1 1 +VASTLY 1 0 1 1 +VAST 5 0 5 5 +VASSALS 1 0 1 1 +VARYING 2 0 2 2 +VARIOUS 7 0 7 7 +VARIETY 2 0 2 2 +VARIETIES 1 0 1 1 +VARIED 1 0 1 1 +VARIATIONS 1 0 1 1 +VARIANCE 1 0 1 1 +VANQUISHED 2 0 2 2 +VANITY 1 0 1 1 +VANISHED 2 0 2 2 +VANISH 2 0 2 2 +VALUE 3 0 3 3 +VALUABLE 2 0 2 2 +VALLEYS 2 0 2 2 +VALLEY 4 0 4 4 +VALIANTLY 1 0 1 1 +VALHALLA 1 0 1 1 +VALE 1 0 1 1 +VAINLY 1 0 1 1 +VAIN 1 0 1 1 +VAGUELY 1 0 1 1 +VAGUE 3 0 3 3 +VACUUM 1 0 1 1 +VACANT 1 0 1 1 +UTTERLY 4 0 4 4 +UTTERED 1 0 1 1 +UTTERANCE 1 0 1 1 +UTMOST 3 0 3 3 +USUALLY 4 0 4 4 +USUAL 5 0 5 5 +USING 3 0 3 3 +USELESS 4 0 4 4 +USEFUL 5 0 5 5 +USED 17 0 17 17 +USE 31 0 31 31 +US 60 0 60 60 +URGING 1 0 1 1 +URGED 3 0 3 3 +UPWARDS 1 0 1 1 +UPTOWN 1 0 1 1 +UPRIGHT 1 0 1 1 +UPRAISED 1 0 1 1 +UPPERMOST 1 0 1 1 +UPPER 2 0 2 2 +UPLIFTED 1 0 1 1 +UPHOLSTERED 1 0 1 1 +UPHEAVAL 1 0 1 1 +UPBRAIDED 1 0 1 1 +UNWORTHY 1 0 1 1 +UNWILLING 2 0 2 2 +UNVARNISHED 1 0 1 1 +UNUSUAL 4 0 4 4 +UNTUTORED 1 0 1 1 +UNTRIED 1 0 1 1 +UNTREATED 1 0 1 1 +UNTOUCHED 1 0 1 1 +UNTIL 16 0 16 16 +UNTIDINESS 1 0 1 1 +UNTASTED 1 0 1 1 +UNSUCCESSFUL 1 0 1 1 +UNSEEN 1 0 1 1 +UNSAID 1 0 1 1 +UNREAL 1 0 1 1 +UNPRECEDENTED 1 0 1 1 +UNPOPULAR 1 0 1 1 +UNPLEASANT 3 0 3 3 +UNPERCEIVED 1 0 1 1 +UNPARALLELED 1 0 1 1 +UNOBSERVED 1 0 1 1 +UNNECESSARY 1 0 1 1 +UNNATURAL 1 0 1 1 +UNMOVED 1 0 1 1 +UNLUCKY 2 0 2 2 +UNLUCKILY 1 0 1 1 +UNLOCKED 1 0 1 1 +UNLOCK 1 0 1 1 +UNLOADED 1 0 1 1 +UNLIKELY 1 0 1 1 +UNLESS 5 0 5 5 +UNKNOWN 1 0 1 1 +UNIVERSITY 1 0 1 1 +UNIVERSE 1 0 1 1 +UNIVERSAL 3 0 3 3 +UNITED 8 0 8 8 +UNITE 1 0 1 1 +UNION 3 0 3 3 +UNINVITED 1 0 1 1 +UNINTELLIGIBLE 1 0 1 1 +UNIFORMS 2 0 2 2 +UNIFORM 1 0 1 1 +UNICORN 1 0 1 1 +UNHEEDED 1 0 1 1 +UNHAPPY 4 0 4 4 +UNHAPPINESS 1 0 1 1 +UNGRACIOUSLY 1 0 1 1 +UNGRACIOUS 1 0 1 1 +UNFORTUNATELY 2 0 2 2 +UNFORTUNATE 1 0 1 1 +UNFOLD 1 0 1 1 +UNFEELING 1 0 1 1 +UNFAMILIAR 1 0 1 1 +UNFAIRLY 1 0 1 1 +UNFAIR 2 0 2 2 +UNFAILING 1 0 1 1 +UNEXPECTEDLY 2 0 2 2 +UNEXPECTED 3 0 3 3 +UNEASY 4 0 4 4 +UNEASINESS 1 0 1 1 +UNEASILY 1 0 1 1 +UNEARTHLY 1 0 1 1 +UNDUE 1 0 1 1 +UNDOUBTEDLY 1 0 1 1 +UNDOING 1 0 1 1 +UNDERWATER 1 0 1 1 +UNDERTONE 1 0 1 1 +UNDERTAKING 2 0 2 2 +UNDERSTOOD 6 0 6 6 +UNDERSTANDING 4 0 4 4 +UNDERSTAND 9 0 9 9 +UNDERSCORE 1 0 1 1 +UNDERNEATH 1 0 1 1 +UNDERMINE 1 0 1 1 +UNDERHANDED 1 0 1 1 +UNDECEIVED 1 0 1 1 +UNCOUTH 1 0 1 1 +UNCOURTEOUS 1 0 1 1 +UNCONTROLLABLE 1 0 1 1 +UNCONSTITUTIONALITY 1 0 1 1 +UNCOMPROMISING 1 0 1 1 +UNCOMFORTABLE 1 0 1 1 +UNCLE 6 0 6 6 +UNCIVIL 1 0 1 1 +UNCHARITABLENESS 1 0 1 1 +UNCHANGED 1 0 1 1 +UNCERTAIN 2 0 2 2 +UNCASING 1 0 1 1 +UNCAS 10 0 10 10 +UNBUTTONING 1 0 1 1 +UNBROKEN 1 0 1 1 +UNBEARABLE 2 0 2 2 +UNAVOIDABLE 1 0 1 1 +UNAVERRED 1 0 1 1 +UNANIMOUSLY 1 0 1 1 +UNANIMOUS 1 0 1 1 +UNAFFECTED 1 0 1 1 +UNACCOUNTABLE 1 0 1 1 +UNABLE 1 0 1 1 +UGLY 3 0 3 3 +TYRANNY 1 0 1 1 +TYPICAL 1 0 1 1 +TYPES 1 0 1 1 +TWIXT 1 0 1 1 +TWIRLING 1 0 1 1 +TWIN 1 0 1 1 +TWILIGHT 2 0 2 2 +TWICE 3 0 3 3 +TWENTY 15 0 15 15 +TWELVE 2 0 2 2 +TWELFTH 1 0 1 1 +TURNIPS 1 0 1 1 +TURNING 6 0 6 6 +TURNER 4 0 4 4 +TURF 1 0 1 1 +TUNE 2 0 2 2 +TUMULTUOUS 1 0 1 1 +TUMULT 3 0 3 3 +TUMBLER 1 0 1 1 +TUMBLED 3 0 3 3 +TUFT 1 0 1 1 +TUESDAY 1 0 1 1 +TUCKED 1 0 1 1 +TUBE 1 0 1 1 +TRYING 5 0 5 5 +TRUTH 13 0 13 13 +TRUSTY 1 0 1 1 +TRUSTS 1 0 1 1 +TRUST 5 0 5 5 +TRUNKS 2 0 2 2 +TRUNK 2 0 2 2 +TRUMPETS 1 0 1 1 +TRULY 9 0 9 9 +TRUFFLES 1 0 1 1 +TRUE 21 0 21 21 +TRUDGED 1 0 1 1 +TROUT'S 1 0 1 1 +TROUT 1 0 1 1 +TROUBLESOME 1 0 1 1 +TROUBLES 2 0 2 2 +TROUBLED 4 0 4 4 +TROTTING 1 0 1 1 +TROTTED 1 0 1 1 +TROTH 1 0 1 1 +TROT 5 0 5 5 +TROPHIES 1 0 1 1 +TROOPS 3 0 3 3 +TROOPER'S 1 0 1 1 +TRIUMPHANTLY 1 0 1 1 +TRIUMPHANT 1 0 1 1 +TRIUMPH 3 0 3 3 +TRIPPED 1 0 1 1 +TRINKET 1 0 1 1 +TRIMNESS 1 0 1 1 +TRIM 1 0 1 1 +TRIGGER 1 0 1 1 +TRICKS 2 0 2 2 +TRIBUTE 1 0 1 1 +TRIBES 1 0 1 1 +TRIAL 2 0 2 2 +TREND 2 0 2 2 +TREMULOUSLY 1 0 1 1 +TREMULOUS 1 0 1 1 +TREMOR 1 0 1 1 +TREMENDOUSLY 1 0 1 1 +TREMBLING 5 0 5 5 +TREMBLED 1 0 1 1 +TREMBLE 2 0 2 2 +TREE 35 0 35 35 +TREATY 1 0 1 1 +TREATS 1 0 1 1 +TREATING 1 0 1 1 +TREATED 2 0 2 2 +TREAT 1 0 1 1 +TREASURE 2 0 2 2 +TREAD 1 0 1 1 +TRAVESTY 1 0 1 1 +TRAVERSED 1 0 1 1 +TRAVEL 1 0 1 1 +TRASH 1 0 1 1 +TRAP 2 0 2 2 +TRANSPARENT 2 0 2 2 +TRANSLATION 1 0 1 1 +TRANSLATE 1 0 1 1 +TRANSIENT 2 0 2 2 +TRANSFERRED 1 0 1 1 +TRANSCRIPT 1 0 1 1 +TRANQUILLITY 1 0 1 1 +TRANQUIL 1 0 1 1 +TRAINS 1 0 1 1 +TRAINING 3 0 3 3 +TRAINED 1 0 1 1 +TRAIN 1 0 1 1 +TRAFFIC 1 0 1 1 +TRADITIONS 3 0 3 3 +TRADITION 1 0 1 1 +TRADES 1 0 1 1 +TRACK 1 0 1 1 +TRACES 2 0 2 2 +TRACE 1 0 1 1 +TOYS 1 0 1 1 +TOWNS 3 0 3 3 +TOWN 6 0 6 6 +TOWERS 1 0 1 1 +TOWER 1 0 1 1 +TOUR 1 0 1 1 +TOUCHING 1 0 1 1 +TOUCHES 4 0 4 4 +TOUCH 8 0 8 8 +TOTTY 3 0 3 3 +TOTAL 1 0 1 1 +TOSSING 1 0 1 1 +TOSSED 1 0 1 1 +TORTURED 2 0 2 2 +TORTURE 1 0 1 1 +TORTOISE 1 0 1 1 +TORRENT 3 0 3 3 +TORN 1 0 1 1 +TORCH 1 0 1 1 +TOPSAILS 1 0 1 1 +TOPS 4 0 4 4 +TOPMOST 1 0 1 1 +TOPMASTS 1 0 1 1 +TOOTHED 1 0 1 1 +TOOTH 1 0 1 1 +TOOK 33 0 33 33 +TONGUES 1 0 1 1 +TONGUE 8 0 8 8 +TONES 3 0 3 3 +TONED 1 0 1 1 +TONE 5 0 5 5 +TOMMY 1 0 1 1 +TOMB 1 0 1 1 +TOM 4 0 4 4 +TOLERATION 1 0 1 1 +TOLEDANS 1 0 1 1 +TOLD 32 0 32 32 +TOKEN 2 0 2 2 +TOE 1 0 1 1 +TITLE 3 0 3 3 +TITIAN 1 0 1 1 +TIS 8 0 8 8 +TIRELESS 1 0 1 1 +TIRED 6 0 6 6 +TIRE 1 0 1 1 +TIPTOE 2 0 2 2 +TIPPED 1 0 1 1 +TIP 3 0 3 3 +TINY 3 0 3 3 +TINTS 1 0 1 1 +TINT 1 0 1 1 +TINSEL 1 0 1 1 +TINKLED 1 0 1 1 +TINGLING 1 0 1 1 +TINGE 1 0 1 1 +TIN 1 0 1 1 +TIMES 21 0 21 21 +TIME'S 2 0 2 2 +TIMASCHEFF'S 1 0 1 1 +TILL 8 0 8 8 +TILES 1 0 1 1 +TIGHTLY 1 0 1 1 +TIGHTEN 1 0 1 1 +TIGHT 1 0 1 1 +TIED 2 0 2 2 +TIE 1 0 1 1 +TIDING 1 0 1 1 +TICKET 1 0 1 1 +THWART 1 0 1 1 +THURSTON 2 0 2 2 +THURSDAY 1 0 1 1 +THRUSTING 1 0 1 1 +THRUST 5 0 5 5 +THROWN 4 0 4 4 +THROW 2 0 2 2 +THROUGHOUT 5 0 5 5 +THRONE 4 0 4 4 +THROATS 1 0 1 1 +THROAT 1 0 1 1 +THRIVING 1 0 1 1 +THRILLING 1 0 1 1 +THRILLED 1 0 1 1 +THRILL 1 0 1 1 +THRICE 1 0 1 1 +THREW 5 0 5 5 +THREE 41 0 41 41 +THREATS 1 0 1 1 +THREATENS 2 0 2 2 +THREATENING 3 0 3 3 +THREATENED 1 0 1 1 +THRALLS 2 0 2 2 +THRALL'S 1 0 1 1 +THRALL 2 0 2 2 +THOUSANDS 2 0 2 2 +THOUGHTS 13 0 13 13 +THOUGHTLESS 1 0 1 1 +THOUGHTFUL 1 0 1 1 +THOSE 37 0 37 37 +THOROUGH 1 0 1 1 +THOMAS 1 0 1 1 +THIRTY 12 0 12 12 +THIRTIETH 1 0 1 1 +THIRTEENTH 1 0 1 1 +THIRD 7 0 7 7 +THINKING 8 0 8 8 +THIN 2 0 2 2 +THICKEST 1 0 1 1 +THICK 5 0 5 5 +THEY'VE 1 0 1 1 +THERMOMETER 1 0 1 1 +THEREOF 1 0 1 1 +THEREFORE 20 0 20 20 +THEREAFTER 1 0 1 1 +THEREABOUTS 1 0 1 1 +THEORY 5 0 5 5 +THEORIES 1 0 1 1 +THEORETICAL 1 0 1 1 +THEOLOGY 1 0 1 1 +THENCE 1 0 1 1 +THEMSELVES 12 0 12 12 +THEME 1 0 1 1 +THEIRS 2 0 2 2 +THEFT 4 0 4 4 +THEATRICAL 1 0 1 1 +THEATRES 1 0 1 1 +THANKING 3 0 3 3 +THANKFUL 1 0 1 1 +THANKED 1 0 1 1 +TEXTURES 1 0 1 1 +TEXT 2 0 2 2 +TESTIMONY 1 0 1 1 +TESTIMONIES 1 0 1 1 +TESTED 1 0 1 1 +TEST 2 0 2 2 +TERROR 2 0 2 2 +TERRITORY 2 0 2 2 +TERRITORIAL 4 0 4 4 +TERRIFIED 2 0 2 2 +TERRIFIC 1 0 1 1 +TERRIBLY 2 0 2 2 +TERRIBLE 8 0 8 8 +TERRACED 1 0 1 1 +TERMS 9 0 9 9 +TERM 6 0 6 6 +TENTS 2 0 2 2 +TENT 5 0 5 5 +TENFOLD 2 0 2 2 +TENDERLY 1 0 1 1 +TENDER 3 0 3 3 +TENDED 1 0 1 1 +TEND 2 0 2 2 +TENANTED 1 0 1 1 +TENABILITY 1 0 1 1 +TEN 14 0 14 14 +TEMPTATION 2 0 2 2 +TEMPORARY 2 0 2 2 +TEMPORAL 2 0 2 2 +TEMPLES 1 0 1 1 +TEMPLE 2 0 2 2 +TEMPEST 2 0 2 2 +TEMPERATURE 1 0 1 1 +TEMPER 3 0 3 3 +TELLS 1 0 1 1 +TELLING 3 0 3 3 +TELESCOPE 2 0 2 2 +TEETH 1 0 1 1 +TEDIOUS 2 0 2 2 +TECHNOLOGY 1 0 1 1 +TECHNICAL 2 0 2 2 +TEARS 11 0 11 11 +TEAPOT 1 0 1 1 +TEAL 1 0 1 1 +TEACHING 2 0 2 2 +TEACHERY 1 0 1 1 +TEACHER 3 0 3 3 +TEACH 6 0 6 6 +TAYLOR 7 0 7 7 +TAXED 1 0 1 1 +TAWNY 1 0 1 1 +TAUGHT 5 0 5 5 +TASTE 5 0 5 5 +TASKS 1 0 1 1 +TASK 9 0 9 9 +TARTS 3 0 3 3 +TARRY 1 0 1 1 +TAPESTRY 1 0 1 1 +TAPESTRIES 1 0 1 1 +TAPE 1 0 1 1 +TANKARD 1 0 1 1 +TANGLE 1 0 1 1 +TAN 1 0 1 1 +TAMPERING 1 0 1 1 +TAMPERED 1 0 1 1 +TAMES 1 0 1 1 +TAME 1 0 1 1 +TALONS 1 0 1 1 +TALLOW 1 0 1 1 +TALKS 4 0 4 4 +TALKING 10 0 10 10 +TALKED 1 0 1 1 +TALES 2 0 2 2 +TALENTED 1 0 1 1 +TALENT 5 0 5 5 +TALE 4 0 4 4 +TAKING 7 0 7 7 +TAKES 3 0 3 3 +TAKEN 15 0 15 15 +TAKE 34 0 34 34 +TAILORS 1 0 1 1 +TAIL 3 0 3 3 +TAG 1 0 1 1 +TACT 1 0 1 1 +TACK 1 0 1 1 +TABLES 3 0 3 3 +TABBY'S 1 0 1 1 +TABBY 2 0 2 2 +SYSTEM 8 0 8 8 +SYNONYM 1 0 1 1 +SYMPATHY 3 0 3 3 +SYMPATHETIC 2 0 2 2 +SYLLABLE 1 0 1 1 +SWORDS 1 0 1 1 +SWORD 5 0 5 5 +SWOOPED 1 0 1 1 +SWOLLEN 1 0 1 1 +SWIRLING 1 0 1 1 +SWIRL 1 0 1 1 +SWING 1 0 1 1 +SWIMMING 2 0 2 2 +SWIFTNESS 1 0 1 1 +SWIFTLY 4 0 4 4 +SWIFT 1 0 1 1 +SWELLING 1 0 1 1 +SWELL 1 0 1 1 +SWEETS 1 0 1 1 +SWEETNESS 2 0 2 2 +SWEET 6 0 6 6 +SWEEPING 1 0 1 1 +SWARMING 1 0 1 1 +SWAMP 3 0 3 3 +SWAM 1 0 1 1 +SUSPICIOUS 1 0 1 1 +SUSPENDED 1 0 1 1 +SUSPECT 2 0 2 2 +SURVIVE 2 0 2 2 +SURVEYOR 1 0 1 1 +SURVEYED 1 0 1 1 +SURROUNDINGS 1 0 1 1 +SURROUNDING 2 0 2 2 +SURROUNDED 2 0 2 2 +SURRENDER 2 0 2 2 +SURPRISED 6 0 6 6 +SURPRISE 4 0 4 4 +SURPASSED 1 0 1 1 +SURMISED 1 0 1 1 +SURGEON 1 0 1 1 +SURGE 1 0 1 1 +SURFACE 8 0 8 8 +SURELY 5 0 5 5 +SURE 16 0 16 16 +SURCHARGED 1 0 1 1 +SUPREME 2 0 2 2 +SUPPRESSING 1 0 1 1 +SUPPRESSED 2 0 2 2 +SUPPOSITION 1 0 1 1 +SUPPOSING 2 0 2 2 +SUPPOSES 1 0 1 1 +SUPPOSED 3 0 3 3 +SUPPOSE 19 0 19 19 +SUPPORTS 1 0 1 1 +SUPPORTING 2 0 2 2 +SUPPORTED 2 0 2 2 +SUPPORT 2 0 2 2 +SUPPLYING 1 0 1 1 +SUPPLY 1 0 1 1 +SUPPLIES 1 0 1 1 +SUPPER 7 0 7 7 +SUPERIORITY 1 0 1 1 +SUPERIOR 8 0 8 8 +SUPERINTENDENCE 1 0 1 1 +SUPERFLUITIES 1 0 1 1 +SUNSHINE 3 0 3 3 +SUNSETS 1 0 1 1 +SUNSET 1 0 1 1 +SUNRISE 1 0 1 1 +SUNNY 1 0 1 1 +SUNLIGHT 2 0 2 2 +SUNK 1 0 1 1 +SUNG 2 0 2 2 +SUNDAY 2 0 2 2 +SUNBEAMS 1 0 1 1 +SUN 15 0 15 15 +SUMNER 1 0 1 1 +SUMMONS 2 0 2 2 +SUMMIT 1 0 1 1 +SUMMER 6 0 6 6 +SUMMARY 1 0 1 1 +SULLIED 1 0 1 1 +SUITS 1 0 1 1 +SUITED 1 0 1 1 +SUITABLE 2 0 2 2 +SUGGESTIONS 1 0 1 1 +SUGGESTION 1 0 1 1 +SUGGESTED 3 0 3 3 +SUGGEST 1 0 1 1 +SUGAR 1 0 1 1 +SUFFOCATING 1 0 1 1 +SUFFICIENTLY 1 0 1 1 +SUFFICIENT 3 0 3 3 +SUFFICED 1 0 1 1 +SUFFICE 1 0 1 1 +SUFFERINGS 2 0 2 2 +SUFFERING 2 0 2 2 +SUFFERED 3 0 3 3 +SUFFER 5 0 5 5 +SUDDENLY 15 0 15 15 +SUDDEN 7 0 7 7 +SUCKLING 1 0 1 1 +SUCH 67 0 67 67 +SUCCESSION 3 0 3 3 +SUCCESSFUL 3 0 3 3 +SUCCESS 9 0 9 9 +SUCCEEDED 3 0 3 3 +SUCCEED 1 0 1 1 +SUBURB 1 0 1 1 +SUBTLETIES 1 0 1 1 +SUBSTITUTION 1 0 1 1 +SUBSTITUTED 1 0 1 1 +SUBSTANTIALLY 1 0 1 1 +SUBSTANTIAL 3 0 3 3 +SUBSTANCE 3 0 3 3 +SUBSISTENCE 1 0 1 1 +SUBSIDED 1 0 1 1 +SUBSCRIBE 1 0 1 1 +SUBORDINATION 1 0 1 1 +SUBMITTED 1 0 1 1 +SUBMIT 2 0 2 2 +SUBMISSIVELY 1 0 1 1 +SUBMARINE 3 0 3 3 +SUBJECTS 3 0 3 3 +SUBJECTIVELY 1 0 1 1 +SUBJECTED 1 0 1 1 +SUBJECT 16 0 16 16 +SUBDUING 1 0 1 1 +SUBDUED 2 0 2 2 +STUTELEY 4 0 4 4 +STURDY 1 0 1 1 +STUPID 4 0 4 4 +STUPEFIED 2 0 2 2 +STUNG 1 0 1 1 +STUMPED 1 0 1 1 +STUMP 1 0 1 1 +STUFFED 3 0 3 3 +STUFF 1 0 1 1 +STUDYING 2 0 2 2 +STUDIOUS 2 0 2 2 +STUDIES 1 0 1 1 +STUDENTS 3 0 3 3 +STUDENT 2 0 2 2 +STUCCO 1 0 1 1 +STRUGGLES 1 0 1 1 +STRUGGLED 1 0 1 1 +STRUGGLE 6 0 6 6 +STRUCTURE 2 0 2 2 +STRUCK 4 0 4 4 +STROVE 2 0 2 2 +STRONGLY 2 0 2 2 +STRONGHOLD 1 0 1 1 +STRONGEST 2 0 2 2 +STRONGER 1 0 1 1 +STRONG 13 0 13 13 +STROLLERS 1 0 1 1 +STROLLER'S 1 0 1 1 +STROLLER 3 0 3 3 +STROLL 3 0 3 3 +STROKE 1 0 1 1 +STRIVING 1 0 1 1 +STRIVE 3 0 3 3 +STRIPPING 1 0 1 1 +STRIPPED 1 0 1 1 +STRIPLING 1 0 1 1 +STRIKING 1 0 1 1 +STRIFE 1 0 1 1 +STRICTLY 1 0 1 1 +STRICTEST 1 0 1 1 +STRICT 2 0 2 2 +STRETCHING 1 0 1 1 +STRETCHED 1 0 1 1 +STRETCH 1 0 1 1 +STRENUOUS 1 0 1 1 +STRENGTHENING 1 0 1 1 +STRENGTHENED 2 0 2 2 +STRENGTH 7 0 7 7 +STREETS 1 0 1 1 +STREET 14 0 14 14 +STREAKED 1 0 1 1 +STRAWBERRIES 1 0 1 1 +STRAW 1 0 1 1 +STRANGERS 2 0 2 2 +STRANGER 1 0 1 1 +STRANGELY 2 0 2 2 +STRANGE 12 0 12 12 +STRAITS 1 0 1 1 +STRAINED 1 0 1 1 +STRAIGHTWAY 2 0 2 2 +STORY 25 0 25 25 +STORMY 1 0 1 1 +STORMS 1 0 1 1 +STORM 3 0 3 3 +STORES 1 0 1 1 +STORAGE 1 0 1 1 +STOPPING 2 0 2 2 +STOPPED 6 0 6 6 +STOP 8 0 8 8 +STOOPED 1 0 1 1 +STOOP 1 0 1 1 +STOOL 2 0 2 2 +STOOD 22 0 22 22 +STONES 3 0 3 3 +STONE 3 0 3 3 +STOLEN 2 0 2 2 +STOICAL 1 0 1 1 +STOCKINGS 1 0 1 1 +STOCK 2 0 2 2 +STIRS 1 0 1 1 +STIRRED 1 0 1 1 +STIR 1 0 1 1 +STINGY 2 0 2 2 +STING 1 0 1 1 +STIMULANTS 1 0 1 1 +STILLNESS 1 0 1 1 +STILL 55 0 55 55 +STIFLING 1 0 1 1 +STIFFNESS 1 0 1 1 +STIFF 1 0 1 1 +STICKS 1 0 1 1 +STICKING 1 0 1 1 +STICK 1 0 1 1 +STEW 1 0 1 1 +STERNEST 1 0 1 1 +STERN 2 0 2 2 +STEPS 1 0 1 1 +STEPPED 1 0 1 1 +STEPHEN'S 1 0 1 1 +STEPHEN 2 0 2 2 +STEM 1 0 1 1 +STEEP 1 0 1 1 +STEEL 1 0 1 1 +STEAMING 1 0 1 1 +STEAMED 1 0 1 1 +STEAMBOAT 1 0 1 1 +STEAM 1 0 1 1 +STEAL 1 0 1 1 +STEADILY 2 0 2 2 +STEAD 1 0 1 1 +STAYS 1 0 1 1 +STAYED 2 0 2 2 +STAY 11 0 11 11 +STATUS 1 0 1 1 +STATUARY 1 0 1 1 +STATIONS 2 0 2 2 +STATESMAN 1 0 1 1 +STATEMENT 3 0 3 3 +STATELY 1 0 1 1 +STATE'S 2 0 2 2 +STARVED 1 0 1 1 +STARTLING 1 0 1 1 +STARTLED 2 0 2 2 +STARTING 2 0 2 2 +STARTED 9 0 9 9 +STARS 1 0 1 1 +STARLIT 1 0 1 1 +STARING 1 0 1 1 +STARED 1 0 1 1 +STAR 2 0 2 2 +STANLEY 1 0 1 1 +STANDING 8 0 8 8 +STANDARD 4 0 4 4 +STAMPING 1 0 1 1 +STAMPED 1 0 1 1 +STALKS 1 0 1 1 +STAKES 1 0 1 1 +STAKE 1 0 1 1 +STAIRCASE 1 0 1 1 +STAINED 1 0 1 1 +STAIN 1 0 1 1 +STAID 1 0 1 1 +STAGES 2 0 2 2 +STAFF 1 0 1 1 +STACKED 1 0 1 1 +STABLE 1 0 1 1 +SQUIRE'S 3 0 3 3 +SQUIRE 8 0 8 8 +SQUEEZE 1 0 1 1 +SQUEAK 2 0 2 2 +SQUARES 2 0 2 2 +SQUARE 2 0 2 2 +SQUALOR 1 0 1 1 +SQUALID 1 0 1 1 +SPUR 1 0 1 1 +SPRUNG 2 0 2 2 +SPRINKLING 1 0 1 1 +SPRINKLED 1 0 1 1 +SPRINGY 1 0 1 1 +SPRINGS 3 0 3 3 +SPRINGING 1 0 1 1 +SPREADS 1 0 1 1 +SPREAD 5 0 5 5 +SPOTLESS 1 0 1 1 +SPOT 4 0 4 4 +SPORTING 1 0 1 1 +SPOON 1 0 1 1 +SPOKEN 11 0 11 11 +SPOKE 15 0 15 15 +SPOILS 2 0 2 2 +SPLENDORS 1 0 1 1 +SPLENDIDLY 2 0 2 2 +SPLASHES 1 0 1 1 +SPLASHED 2 0 2 2 +SPITE 2 0 2 2 +SPIRITUAL 4 0 4 4 +SPIRITS 3 0 3 3 +SPIRIT 11 0 11 11 +SPINNING 4 0 4 4 +SPIKES 1 0 1 1 +SPIDER 1 0 1 1 +SPICY 1 0 1 1 +SPERM 1 0 1 1 +SPENT 5 0 5 5 +SPENDING 1 0 1 1 +SPELL 1 0 1 1 +SPEEDS 1 0 1 1 +SPEED 3 0 3 3 +SPEECHLESS 1 0 1 1 +SPEECH 6 0 6 6 +SPED 2 0 2 2 +SPECULATE 1 0 1 1 +SPECTATORS 1 0 1 1 +SPECKS 1 0 1 1 +SPECIOUS 1 0 1 1 +SPECIFICATIONS 1 0 1 1 +SPECIFIC 1 0 1 1 +SPECIES 3 0 3 3 +SPECIALTY 1 0 1 1 +SPECIALLY 2 0 2 2 +SPEAR 1 0 1 1 +SPEAKS 1 0 1 1 +SPEAKING 10 0 10 10 +SPEAK 15 0 15 15 +SPASM 1 0 1 1 +SPARKS 1 0 1 1 +SPARKLING 3 0 3 3 +SPARKLES 2 0 2 2 +SPARKLED 1 0 1 1 +SPARK 1 0 1 1 +SPARE 3 0 3 3 +SPACE 5 0 5 5 +SOUTHERNERS 2 0 2 2 +SOUTHBRIDGE 1 0 1 1 +SOUTH 7 0 7 7 +SOURCE 1 0 1 1 +SOUP 1 0 1 1 +SOUNDING 3 0 3 3 +SOUNDED 2 0 2 2 +SOUND 7 0 7 7 +SOULS 4 0 4 4 +SOUL'S 1 0 1 1 +SOUL 8 0 8 8 +SOUGHT 6 0 6 6 +SORTS 2 0 2 2 +SORT 8 0 8 8 +SORRY 5 0 5 5 +SORROWS 1 0 1 1 +SORROWFULLY 1 0 1 1 +SORROWFUL 1 0 1 1 +SORROW 5 0 5 5 +SORE 1 0 1 1 +SORCERESS 1 0 1 1 +SOPHISTRY 1 0 1 1 +SOOTHINGLY 1 0 1 1 +SOOTH 1 0 1 1 +SOON 28 0 28 28 +SONS 3 0 3 3 +SONOROUS 1 0 1 1 +SONG 2 0 2 2 +SOMEWHERE 6 0 6 6 +SOMEWHAT 5 0 5 5 +SOMETIMES 18 0 18 18 +SOMEHOW 6 0 6 6 +SOMEBODY 3 0 3 3 +SOMBRE 1 0 1 1 +SOLVED 2 0 2 2 +SOLUTION 1 0 1 1 +SOLEMNITY 1 0 1 1 +SOLELY 1 0 1 1 +SOLE 3 0 3 3 +SOLDIERS 6 0 6 6 +SOLD 4 0 4 4 +SOIL 2 0 2 2 +SOFTNESS 2 0 2 2 +SOFTLY 4 0 4 4 +SOFTENED 1 0 1 1 +SOFT 7 0 7 7 +SOFAS 1 0 1 1 +SODALITY 1 0 1 1 +SOCRATES 2 0 2 2 +SOCKS 1 0 1 1 +SOCIETY 7 0 7 7 +SOCIETIES 1 0 1 1 +SOCIAL 8 0 8 8 +SOCIABLE 1 0 1 1 +SOBS 1 0 1 1 +SOARED 1 0 1 1 +SOAR 1 0 1 1 +SNUFFED 1 0 1 1 +SNUFF 4 0 4 4 +SNUBNOSED 1 0 1 1 +SNOW 1 0 1 1 +SNORED 1 0 1 1 +SNEER 1 0 1 1 +SNATCHED 1 0 1 1 +SNATCH 1 0 1 1 +SMUGGLING 1 0 1 1 +SMOOTHER 1 0 1 1 +SMOOTH 1 0 1 1 +SMOKING 1 0 1 1 +SMOKE 5 0 5 5 +SMITTEN 2 0 2 2 +SMITH 2 0 2 2 +SMILING 3 0 3 3 +SMILES 3 0 3 3 +SMILE 12 0 12 12 +SMELL 3 0 3 3 +SMARTLY 1 0 1 1 +SMART 1 0 1 1 +SMALLEST 2 0 2 2 +SMALLER 1 0 1 1 +SMALL 20 0 20 20 +SLY 2 0 2 2 +SLUNK 1 0 1 1 +SLUMS 1 0 1 1 +SLUMBERS 1 0 1 1 +SLOWLY 14 0 14 14 +SLOW 4 0 4 4 +SLOPING 1 0 1 1 +SLIPS 1 0 1 1 +SLIPPING 1 0 1 1 +SLIPPED 4 0 4 4 +SLINGS 1 0 1 1 +SLIMY 1 0 1 1 +SLIMLY 1 0 1 1 +SLIGHTLY 4 0 4 4 +SLIGHTEST 1 0 1 1 +SLIGHTER 1 0 1 1 +SLIGHT 4 0 4 4 +SLEPT 2 0 2 2 +SLENDER 2 0 2 2 +SLEEPING 2 0 2 2 +SLEEK 2 0 2 2 +SLAVES 1 0 1 1 +SLAVERY 3 0 3 3 +SLAVE 1 0 1 1 +SLATED 1 0 1 1 +SLAP 1 0 1 1 +SLANDERER 1 0 1 1 +SLAM 1 0 1 1 +SLAKED 1 0 1 1 +SKY 5 0 5 5 +SKIRTS 1 0 1 1 +SKIRT 1 0 1 1 +SKIRMISHES 1 0 1 1 +SKIP 1 0 1 1 +SKINNER 1 0 1 1 +SKINNED 1 0 1 1 +SKIN 3 0 3 3 +SKIMS 1 0 1 1 +SKILL 2 0 2 2 +SKETCHES 1 0 1 1 +SKETCH 1 0 1 1 +SKEPTICISM 1 0 1 1 +SKELETON 1 0 1 1 +SIZZLE 1 0 1 1 +SIXTY 3 0 3 3 +SIXTH 1 0 1 1 +SIXTEENTH 4 0 4 4 +SIXTEEN 1 0 1 1 +SIX 14 0 14 14 +SITUATION 2 0 2 2 +SITE 1 0 1 1 +SISTERS 5 0 5 5 +SISTER'S 1 0 1 1 +SISTER 8 0 8 8 +SIRE 4 0 4 4 +SINNER 2 0 2 2 +SINKS 1 0 1 1 +SINK 1 0 1 1 +SINGS 1 0 1 1 +SINGLED 1 0 1 1 +SINGLE 5 0 5 5 +SINGING 2 0 2 2 +SINGER'S 1 0 1 1 +SINGER 2 0 2 2 +SING 2 0 2 2 +SINFUL 2 0 2 2 +SIMPLY 10 0 10 10 +SIMPLIFIED 1 0 1 1 +SIMPLICITY 2 0 2 2 +SIMPLE 9 0 9 9 +SIMON 1 0 1 1 +SIMILITUDE 1 0 1 1 +SIMILARLY 1 0 1 1 +SIMILAR 3 0 3 3 +SILVERING 1 0 1 1 +SILVER 8 0 8 8 +SILLINESS 2 0 2 2 +SILKEN 2 0 2 2 +SILK 6 0 6 6 +SILHOUETTE 1 0 1 1 +SILENCES 1 0 1 1 +SILAS 1 0 1 1 +SIGNS 4 0 4 4 +SIGNING 1 0 1 1 +SIGNIFICANTLY 1 0 1 1 +SIGNIFICANCE 3 0 3 3 +SIGNED 1 0 1 1 +SIGNATURE 1 0 1 1 +SIGN 5 0 5 5 +SIDEWAYS 1 0 1 1 +SIDES 6 0 6 6 +SICKNESS 2 0 2 2 +SHY 1 0 1 1 +SHUTTERS 1 0 1 1 +SHUT 3 0 3 3 +SHUNNING 1 0 1 1 +SHUDDER 2 0 2 2 +SHRUGGED 1 0 1 1 +SHRUBBERY 1 0 1 1 +SHRIVELLED 1 0 1 1 +SHRINE 1 0 1 1 +SHRILL 1 0 1 1 +SHRIEKED 1 0 1 1 +SHREWISH 1 0 1 1 +SHREWDLY 1 0 1 1 +SHREWD 1 0 1 1 +SHOWN 1 0 1 1 +SHOWERED 1 0 1 1 +SHOWER 1 0 1 1 +SHOW 10 0 10 10 +SHOUTINGS 1 0 1 1 +SHOUTING 1 0 1 1 +SHOUTED 3 0 3 3 +SHOUT 3 0 3 3 +SHOULDN'T 1 0 1 1 +SHOULDERS 5 0 5 5 +SHOULDER 5 0 5 5 +SHORTLY 1 0 1 1 +SHORT 11 0 11 11 +SHORES 1 0 1 1 +SHORE 4 0 4 4 +SHOPS 1 0 1 1 +SHOP 2 0 2 2 +SHOOTING 1 0 1 1 +SHOOT 1 0 1 1 +SHOOK 10 0 10 10 +SHONE 7 0 7 7 +SHOES 3 0 3 3 +SHOCK 4 0 4 4 +SHOAL 1 0 1 1 +SHIVERING 1 0 1 1 +SHIVER 2 0 2 2 +SHIRTS 1 0 1 1 +SHIRK 1 0 1 1 +SHIPS 2 0 2 2 +SHIPPING 1 0 1 1 +SHINING 4 0 4 4 +SHINES 2 0 2 2 +SHINE 2 0 2 2 +SHIFTED 1 0 1 1 +SHIELD 1 0 1 1 +SHERWOOD 1 0 1 1 +SHEPHERD 2 0 2 2 +SHELVES 1 0 1 1 +SHELTER 1 0 1 1 +SHELLEY'S 1 0 1 1 +SHEETING 1 0 1 1 +SHEET 4 0 4 4 +SHEEP 1 0 1 1 +SHEAF 1 0 1 1 +SHAWL 1 0 1 1 +SHAVINGS 1 0 1 1 +SHAVEN 1 0 1 1 +SHARPLY 2 0 2 2 +SHARPENED 1 0 1 1 +SHARP 8 0 8 8 +SHARING 1 0 1 1 +SHARED 1 0 1 1 +SHARE 1 0 1 1 +SHAPED 1 0 1 1 +SHAPE 1 0 1 1 +SHANNON 3 0 3 3 +SHAN'T 1 0 1 1 +SHAMES 1 0 1 1 +SHAME 3 0 3 3 +SHAM 1 0 1 1 +SHALLOWS 1 0 1 1 +SHALLOW 2 0 2 2 +SHAKING 2 0 2 2 +SHAGGY 2 0 2 2 +SHAFT 1 0 1 1 +SHADY 2 0 2 2 +SHADOWS 7 0 7 7 +SHADOW 2 0 2 2 +SHADES 1 0 1 1 +SHADE 4 0 4 4 +SHACKLETON 1 0 1 1 +SEXTANT 1 0 1 1 +SEX 2 0 2 2 +SEWING 1 0 1 1 +SEWED 1 0 1 1 +SEVERITY 4 0 4 4 +SEVERITIES 1 0 1 1 +SEVERED 2 0 2 2 +SEVERE 1 0 1 1 +SEVERAL 9 0 9 9 +SEVENTY 2 0 2 2 +SEVENTH 1 0 1 1 +SEVENTEEN 2 0 2 2 +SEVEN 6 0 6 6 +SETTLERS 1 0 1 1 +SETTLER 1 0 1 1 +SETTLEMENTS 1 0 1 1 +SETTLEMENT 3 0 3 3 +SETTLED 1 0 1 1 +SETTLE 3 0 3 3 +SETH 1 0 1 1 +SESSION 1 0 1 1 +SERVITUDE 1 0 1 1 +SERVILE 1 0 1 1 +SERVICE 12 0 12 12 +SERVADAC'S 1 0 1 1 +SERVADAC 7 0 7 7 +SERIOUSLY 4 0 4 4 +SERIOUS 3 0 3 3 +SERIES 3 0 3 3 +SERENE 1 0 1 1 +SERAPHIC 1 0 1 1 +SEQUEL 1 0 1 1 +SEPARATION 1 0 1 1 +SENTIMENTS 1 0 1 1 +SENTIMENTAL 1 0 1 1 +SENTIMENT 1 0 1 1 +SENTENTIOUSLY 1 0 1 1 +SENSITIVE 1 0 1 1 +SENSIBLE 1 0 1 1 +SENSES 2 0 2 2 +SENSELESS 1 0 1 1 +SENSE 16 0 16 16 +SENSATIONS 1 0 1 1 +SENSATIONAL 1 0 1 1 +SENSATION 2 0 2 2 +SENORA 1 0 1 1 +SENOR 1 0 1 1 +SENIOR 1 0 1 1 +SEND 3 0 3 3 +SENATOR 1 0 1 1 +SELLING 2 0 2 2 +SELF 5 0 5 5 +SELECTION 2 0 2 2 +SELECTED 1 0 1 1 +SELDOM 3 0 3 3 +SEIZING 2 0 2 2 +SEIZE 1 0 1 1 +SEGMENT 1 0 1 1 +SEES 1 0 1 1 +SEEMINGLY 3 0 3 3 +SEEKERS 1 0 1 1 +SEEK 4 0 4 4 +SEEDS 1 0 1 1 +SEED 2 0 2 2 +SECURITY 2 0 2 2 +SECURING 1 0 1 1 +SECURED 3 0 3 3 +SECURE 4 0 4 4 +SECTS 1 0 1 1 +SECRETLY 3 0 3 3 +SECRET 3 0 3 3 +SECRECY 1 0 1 1 +SECONDS 2 0 2 2 +SECONDLY 1 0 1 1 +SECONDED 1 0 1 1 +SECONDARY 1 0 1 1 +SECOND 10 0 10 10 +SECLUSION 1 0 1 1 +SECESSIONISTS 1 0 1 1 +SEATS 3 0 3 3 +SEATED 3 0 3 3 +SEASONS 2 0 2 2 +SEASONABLE 1 0 1 1 +SEASON 3 0 3 3 +SEAS 2 0 2 2 +SEARCHING 1 0 1 1 +SEARCHED 2 0 2 2 +SEARCH 4 0 4 4 +SCURRIED 1 0 1 1 +SCULPTURE 1 0 1 1 +SCRUTINY 1 0 1 1 +SCRUTINIZE 1 0 1 1 +SCRUPLES 1 0 1 1 +SCRUB 1 0 1 1 +SCRIPTURES 1 0 1 1 +SCRIBE 1 0 1 1 +SCRIBBLING 1 0 1 1 +SCRIBBLER 1 0 1 1 +SCREEN 1 0 1 1 +SCREAMED 3 0 3 3 +SCRAMBLED 1 0 1 1 +SCOWLED 1 0 1 1 +SCOTTISH 2 0 2 2 +SCOTS 1 0 1 1 +SCOTLAND 1 0 1 1 +SCORPION 1 0 1 1 +SCORNFUL 2 0 2 2 +SCORE 2 0 2 2 +SCORCHED 1 0 1 1 +SCOPE 1 0 1 1 +SCOLD 1 0 1 1 +SCIENTISTS 2 0 2 2 +SCIENTIST 1 0 1 1 +SCIENTIFIC 1 0 1 1 +SCIENCE 2 0 2 2 +SCHOONER 2 0 2 2 +SCHOOLROOM 1 0 1 1 +SCHOOLED 1 0 1 1 +SCHOLARSHIP 1 0 1 1 +SCHOLAR 1 0 1 1 +SCHISM 1 0 1 1 +SCHEME 5 0 5 5 +SCEPTICISM 1 0 1 1 +SCENES 1 0 1 1 +SCATTERS 1 0 1 1 +SCATTERED 3 0 3 3 +SCATTER 1 0 1 1 +SCARLET 3 0 3 3 +SCARE 1 0 1 1 +SCARCELY 9 0 9 9 +SCARCE 1 0 1 1 +SCALP 1 0 1 1 +SCALE 2 0 2 2 +SAYS 12 0 12 12 +SAY 51 0 51 51 +SAWDUST 1 0 1 1 +SAVIOUR 1 0 1 1 +SAVINGS 1 0 1 1 +SAVING 1 0 1 1 +SAVES 1 0 1 1 +SAVAGE 5 0 5 5 +SAUNTERED 1 0 1 1 +SAUCE 1 0 1 1 +SATURDAY 5 0 5 5 +SATISFY 1 0 1 1 +SATISFIED 8 0 8 8 +SATISFACTION 5 0 5 5 +SATANIC 1 0 1 1 +SARCASTIC 1 0 1 1 +SARAH'S 1 0 1 1 +SANK 1 0 1 1 +SANGUINARY 1 0 1 1 +SANDY 2 0 2 2 +SANDWICHES 1 0 1 1 +SANDFORD 1 0 1 1 +SANCTIFYING 1 0 1 1 +SANCTIFIED 1 0 1 1 +SAMPLE 1 0 1 1 +SAME 35 0 35 35 +SALVATION 1 0 1 1 +SALUTE 1 0 1 1 +SALUTATION 1 0 1 1 +SALON 1 0 1 1 +SAKE 4 0 4 4 +SAINTS 5 0 5 5 +SAILS 2 0 2 2 +SAILORS 1 0 1 1 +SAILORMAN 1 0 1 1 +SAFETY 2 0 2 2 +SAFEST 1 0 1 1 +SAFE 5 0 5 5 +SADLY 2 0 2 2 +SADDLER 1 0 1 1 +SADDLE 1 0 1 1 +SACRIFICE 2 0 2 2 +SACRED 2 0 2 2 +SACRAMENT 1 0 1 1 +S 1 0 1 1 +RUTH 10 0 10 10 +RUSTY 1 0 1 1 +RUSTLING 2 0 2 2 +RUSTLED 1 0 1 1 +RUST 1 0 1 1 +RUSHING 1 0 1 1 +RUSHED 7 0 7 7 +RUSH 4 0 4 4 +RUNS 2 0 2 2 +RUNNING 8 0 8 8 +RUN 9 0 9 9 +RUMMAGED 1 0 1 1 +RUMINATED 1 0 1 1 +RULER 1 0 1 1 +RULED 1 0 1 1 +RULE 2 0 2 2 +RUINS 1 0 1 1 +RUINED 1 0 1 1 +RUIN 2 0 2 2 +RUFFLED 1 0 1 1 +RUFFIANS 1 0 1 1 +RUFFIAN 2 0 2 2 +RUDELY 1 0 1 1 +RUDE 2 0 2 2 +RUBY 1 0 1 1 +RUBBING 1 0 1 1 +RUBBED 1 0 1 1 +ROYALISTS 2 0 2 2 +ROYAL 9 0 9 9 +ROWS 2 0 2 2 +ROW 5 0 5 5 +ROVING 1 0 1 1 +ROUTINE 1 0 1 1 +ROUSES 1 0 1 1 +ROUSED 1 0 1 1 +ROUSE 1 0 1 1 +ROUNDED 1 0 1 1 +ROUGHLY 7 0 7 7 +ROUGHEST 1 0 1 1 +ROUGH 3 0 3 3 +ROSES 2 0 2 2 +ROSE 14 0 14 14 +ROSALIE 4 0 4 4 +ROOMS 3 0 3 3 +ROOM 41 0 41 41 +ROOFS 1 0 1 1 +ROMANCE 1 0 1 1 +ROMAN 1 0 1 1 +ROLLERS 2 0 2 2 +ROLLED 3 0 3 3 +ROLL 1 0 1 1 +ROD 1 0 1 1 +ROCKY 1 0 1 1 +ROCKS 2 0 2 2 +ROCKING 2 0 2 2 +ROBUST 1 0 1 1 +ROBINSON 1 0 1 1 +ROBIN 19 0 19 19 +ROBERT 2 0 2 2 +ROBBING 1 0 1 1 +ROBBER 1 0 1 1 +ROARINGS 1 0 1 1 +ROARED 1 0 1 1 +ROADS 1 0 1 1 +ROAD 4 0 4 4 +RIVULET 2 0 2 2 +RIVER 6 0 6 6 +RIVAL 2 0 2 2 +RISK 2 0 2 2 +RISING 3 0 3 3 +RISEN 1 0 1 1 +RISE 1 0 1 1 +RIPPLING 1 0 1 1 +RIOTING 2 0 2 2 +RIOT 1 0 1 1 +RINGS 1 0 1 1 +RING 3 0 3 3 +RIGOROUSLY 1 0 1 1 +RIGOROUS 1 0 1 1 +RIGIDLY 1 0 1 1 +RIGIDITY 2 0 2 2 +RIGID 1 0 1 1 +RIGHTS 1 0 1 1 +RIGHTLY 1 0 1 1 +RIGHTEOUSNESS 1 0 1 1 +RIGHTEOUS 1 0 1 1 +RIGGING 1 0 1 1 +RIFLES 1 0 1 1 +RIDGE 1 0 1 1 +RIDES 1 0 1 1 +RIDE 1 0 1 1 +RIDDLE 1 0 1 1 +RICHLY 1 0 1 1 +RICHEST 1 0 1 1 +RICHER 1 0 1 1 +RICH 11 0 11 11 +RHYTHM 1 0 1 1 +REYNOLDS 2 0 2 2 +REWARDED 2 0 2 2 +REWARD 3 0 3 3 +REVOLVING 1 0 1 1 +REVIVES 1 0 1 1 +REVERT 1 0 1 1 +REVERSED 1 0 1 1 +REVERSAL 1 0 1 1 +REVERIE 2 0 2 2 +REVEREND 1 0 1 1 +REVERENCE 1 0 1 1 +REVENGE 3 0 3 3 +REVELATION 1 0 1 1 +REVEAL 1 0 1 1 +RETURNING 1 0 1 1 +RETURNED 18 0 18 18 +RETURN 9 0 9 9 +RETRIEVE 1 0 1 1 +RETREATED 1 0 1 1 +RETREAT 3 0 3 3 +RETRACE 1 0 1 1 +RETIREMENT 2 0 2 2 +RETIRED 2 0 2 2 +RETIRE 1 0 1 1 +RETAINERS 1 0 1 1 +RETAINER 1 0 1 1 +RETAINED 3 0 3 3 +RETAIN 1 0 1 1 +RESURRECTION 3 0 3 3 +RESUMED 2 0 2 2 +RESULTS 4 0 4 4 +RESULTED 3 0 3 3 +RESULT 5 0 5 5 +RESTRAINED 1 0 1 1 +RESTORING 1 0 1 1 +RESTORED 2 0 2 2 +RESTLESS 4 0 4 4 +RESTED 2 0 2 2 +RESPONSIBLE 2 0 2 2 +RESPONSES 1 0 1 1 +RESPONDED 3 0 3 3 +RESPECTS 1 0 1 1 +RESPECTING 1 0 1 1 +RESPECT 3 0 3 3 +RESOURCES 2 0 2 2 +RESOUNDING 1 0 1 1 +RESORT 2 0 2 2 +RESOLVED 3 0 3 3 +RESOLVE 2 0 2 2 +RESOLUTIONS 1 0 1 1 +RESOLUTION 1 0 1 1 +RESOLUTE 1 0 1 1 +RESISTANCE 1 0 1 1 +RESIST 1 0 1 1 +RESIGNED 2 0 2 2 +RESIGNATION 2 0 2 2 +RESIDENCES 1 0 1 1 +RESIDENCE 3 0 3 3 +RESIDE 1 0 1 1 +RESERVOIR 1 0 1 1 +RESENTFUL 1 0 1 1 +RESENTED 1 0 1 1 +RESEMBLING 2 0 2 2 +RESEMBLED 1 0 1 1 +RESEMBLE 1 0 1 1 +RESEMBLANCE 2 0 2 2 +RESCUE 2 0 2 2 +REQUISITION 1 0 1 1 +REQUIRING 1 0 1 1 +REQUIREMENTS 1 0 1 1 +REQUIRED 3 0 3 3 +REQUIRE 4 0 4 4 +REQUEST 1 0 1 1 +REPUTE 2 0 2 2 +REPUTATION 1 0 1 1 +REPUGNANT 1 0 1 1 +REPUBLISH 1 0 1 1 +REPUBLICAN 1 0 1 1 +REPUBLIC 5 0 5 5 +REPTILES 2 0 2 2 +REPROOF 1 0 1 1 +REPRODUCE 1 0 1 1 +REPROACHING 1 0 1 1 +REPROACHFULLY 1 0 1 1 +REPROACH 2 0 2 2 +REPRESS 2 0 2 2 +REPRESENTS 1 0 1 1 +REPRESENTING 1 0 1 1 +REPRESENTATIVE 1 0 1 1 +REPRESENTATION 1 0 1 1 +REPRESENT 2 0 2 2 +REPOSE 2 0 2 2 +REPORT 2 0 2 2 +REPLY 7 0 7 7 +REPLIED 20 0 20 20 +REPLACES 1 0 1 1 +REPLACE 1 0 1 1 +REPETITION 1 0 1 1 +REPENTING 1 0 1 1 +REPENTANCE 1 0 1 1 +REPENT 1 0 1 1 +REPELLENT 1 0 1 1 +REPELLED 1 0 1 1 +REPEATED 1 0 1 1 +REPAST 1 0 1 1 +REPARTEES 1 0 1 1 +REPAIRS 1 0 1 1 +REPAIRED 1 0 1 1 +RENTED 1 0 1 1 +RENTAL 1 0 1 1 +RENT 1 0 1 1 +RENEWED 2 0 2 2 +RENEWABLE 1 0 1 1 +RENDING 1 0 1 1 +RENDERING 2 0 2 2 +RENDERED 2 0 2 2 +RENDER 3 0 3 3 +REMOTENESS 1 0 1 1 +REMOTE 2 0 2 2 +REMONSTRANCE 1 0 1 1 +REMNANT 2 0 2 2 +REMISSION 2 0 2 2 +REMIND 1 0 1 1 +REMEMBRANCE 2 0 2 2 +REMEMBERS 1 0 1 1 +REMEMBERING 3 0 3 3 +REMEDY 1 0 1 1 +REMARKS 1 0 1 1 +REMARKING 1 0 1 1 +REMARKABLE 2 0 2 2 +REMAINS 3 0 3 3 +RELY 1 0 1 1 +RELUCTANTLY 1 0 1 1 +RELINQUISHED 1 0 1 1 +RELIGIOUS 1 0 1 1 +RELIGION 4 0 4 4 +RELIEVE 2 0 2 2 +RELIEF 1 0 1 1 +RELIANCE 1 0 1 1 +RELIABLE 1 0 1 1 +RELATIVES 1 0 1 1 +RELATIVE 1 0 1 1 +RELATIONSHIP 1 0 1 1 +RELATIONS 2 0 2 2 +RELATION 4 0 4 4 +RELATED 3 0 3 3 +RELATE 2 0 2 2 +RELAPSES 1 0 1 1 +REJOICING 2 0 2 2 +REJOICED 1 0 1 1 +REJOICE 5 0 5 5 +REINS 1 0 1 1 +REIGNS 1 0 1 1 +REIGN 1 0 1 1 +REGULATOR 1 0 1 1 +REGULATIONS 1 0 1 1 +REGULATION 1 0 1 1 +REGULATED 1 0 1 1 +REGULARLY 1 0 1 1 +REGULARITY 1 0 1 1 +REGULAR 1 0 1 1 +REGRET 1 0 1 1 +REGISTRATION 1 0 1 1 +REGISTERS 1 0 1 1 +REGISTERED 1 0 1 1 +REGIONS 1 0 1 1 +REGION 1 0 1 1 +REGIMENTS 1 0 1 1 +REGIMENT 1 0 1 1 +REGGIE 2 0 2 2 +REGARDS 2 0 2 2 +REGARDLESS 1 0 1 1 +REGARDED 2 0 2 2 +REGARD 5 0 5 5 +REGAINING 1 0 1 1 +REFUSING 2 0 2 2 +REFUSAL 1 0 1 1 +REFUGEES 1 0 1 1 +REFUGE 1 0 1 1 +REFRESHING 1 0 1 1 +REFRESH 2 0 2 2 +REFRAINED 1 0 1 1 +REFORMS 1 0 1 1 +REFORM 1 0 1 1 +REFLECTIONS 1 0 1 1 +REFLECTION 4 0 4 4 +REFLECTED 3 0 3 3 +REFLECT 1 0 1 1 +REFINEMENTS 1 0 1 1 +REFINEMENT 1 0 1 1 +REFINED 1 0 1 1 +REFERRING 2 0 2 2 +REFER 2 0 2 2 +REED 1 0 1 1 +REDUCED 1 0 1 1 +REDOUBLES 1 0 1 1 +REDOUBLED 1 0 1 1 +REDMAN'S 1 0 1 1 +REDEEMER 1 0 1 1 +RECUR 1 0 1 1 +RECTOR 2 0 2 2 +RECREATION 2 0 2 2 +RECOVERY 1 0 1 1 +RECOVERING 1 0 1 1 +RECOVERED 1 0 1 1 +RECOVER 1 0 1 1 +RECONCILIATION 1 0 1 1 +RECOLLECTIONS 1 0 1 1 +RECOILED 2 0 2 2 +RECOGNIZE 4 0 4 4 +RECOGNITION 9 0 9 9 +RECLINING 1 0 1 1 +RECKONING 2 0 2 2 +RECKONED 1 0 1 1 +RECKON 1 0 1 1 +RECKLESS 1 0 1 1 +RECITED 5 0 5 5 +RECITE 2 0 2 2 +RECESSES 1 0 1 1 +RECEPTION 4 0 4 4 +RECENTLY 1 0 1 1 +RECENT 2 0 2 2 +RECEIVING 2 0 2 2 +RECEIVES 1 0 1 1 +RECEIVER 1 0 1 1 +RECEIVED 9 0 9 9 +RECEIVE 3 0 3 3 +RECAPTURED 1 0 1 1 +RECALLING 1 0 1 1 +RECALLED 4 0 4 4 +REBUKES 1 0 1 1 +REBUKE 1 0 1 1 +REBELLION 2 0 2 2 +REBEL 1 0 1 1 +REBATE 1 0 1 1 +REASSURED 1 0 1 1 +REASONS 1 0 1 1 +REASONING 1 0 1 1 +REASON 19 0 19 19 +REAR 1 0 1 1 +REALLY 10 0 10 10 +REALIZED 2 0 2 2 +REALITY 8 0 8 8 +REAL 16 0 16 16 +READING 4 0 4 4 +READINESS 1 0 1 1 +READILY 2 0 2 2 +REACHED 12 0 12 12 +REACH 3 0 3 3 +RAVISHING 1 0 1 1 +RAVING 1 0 1 1 +RAVINES 1 0 1 1 +RATS 1 0 1 1 +RATIFY 1 0 1 1 +RATIFICATION 1 0 1 1 +RATHER 23 0 23 23 +RATED 2 0 2 2 +RATE 7 0 7 7 +RASH 1 0 1 1 +RASCAL 1 0 1 1 +RARELY 1 0 1 1 +RARE 5 0 5 5 +RAPTUROUS 1 0 1 1 +RAPTURES 1 0 1 1 +RAPIDS 3 0 3 3 +RAPIDLY 3 0 3 3 +RAPIDITY 2 0 2 2 +RAPID 3 0 3 3 +RAPHAEL 1 0 1 1 +RAOUL 3 0 3 3 +RANSOM 1 0 1 1 +RANKING 1 0 1 1 +RANKED 1 0 1 1 +RANGERS 1 0 1 1 +RANGED 1 0 1 1 +RANGE 4 0 4 4 +RANG 4 0 4 4 +RAN 12 0 12 12 +RAMPART 1 0 1 1 +RALPH 2 0 2 2 +RAISED 6 0 6 6 +RAISE 1 0 1 1 +RAINS 1 0 1 1 +RAINDROPS 1 0 1 1 +RAINBOW 1 0 1 1 +RAIN 3 0 3 3 +RAILROADS 1 0 1 1 +RAGS 2 0 2 2 +RAGGED 1 0 1 1 +RAGE 3 0 3 3 +RAFT 7 0 7 7 +RADIE 2 0 2 2 +RADICALS 1 0 1 1 +RADICALISM 1 0 1 1 +RADIANCE 1 0 1 1 +RACKED 1 0 1 1 +RACK 1 0 1 1 +RACHEL'S 1 0 1 1 +RACHEL 16 0 16 16 +RACES 6 0 6 6 +RACE 1 0 1 1 +RABBLE 1 0 1 1 +RABBIT 2 0 2 2 +QUOTE 2 0 2 2 +QUIVERING 2 0 2 2 +QUIVERED 1 0 1 1 +QUITTING 1 0 1 1 +QUITTED 1 0 1 1 +QUITE 29 0 29 29 +QUINCY 1 0 1 1 +QUILT 3 0 3 3 +QUIETLY 5 0 5 5 +QUIET 4 0 4 4 +QUICKLY 5 0 5 5 +QUICK 6 0 6 6 +QUESTIONS 4 0 4 4 +QUESTIONING 1 0 1 1 +QUESTIONED 2 0 2 2 +QUESTION 12 0 12 12 +QUEST 1 0 1 1 +QUERIED 1 0 1 1 +QUEER 3 0 3 3 +QUEENSTOWN 1 0 1 1 +QUEENS 1 0 1 1 +QUEEN'S 1 0 1 1 +QUEEN 8 0 8 8 +QUARTERS 3 0 3 3 +QUARTER 7 0 7 7 +QUARRY 2 0 2 2 +QUARREL 1 0 1 1 +QUANTITY 4 0 4 4 +QUANTITIES 1 0 1 1 +QUALITY 1 0 1 1 +QUALITIES 3 0 3 3 +QUALIFICATIONS 2 0 2 2 +QUALIFICATION 1 0 1 1 +QUAKE 1 0 1 1 +QUAINT 1 0 1 1 +QUADRILLE 2 0 2 2 +QUADRANGLE 1 0 1 1 +PYTHON 1 0 1 1 +PYRAMIDS 2 0 2 2 +PUZZLED 1 0 1 1 +PUT 32 0 32 32 +PUSHED 2 0 2 2 +PUSH 2 0 2 2 +PURSUITS 3 0 3 3 +PURSUIT 2 0 2 2 +PURSUER 1 0 1 1 +PURSUED 1 0 1 1 +PURSUE 1 0 1 1 +PURRING 1 0 1 1 +PURPOSES 2 0 2 2 +PURPLE 1 0 1 1 +PURITY 2 0 2 2 +PURELY 1 0 1 1 +PURCHASED 1 0 1 1 +PUPIL 1 0 1 1 +PUNISHMENT 4 0 4 4 +PUNISHED 4 0 4 4 +PUNCH 1 0 1 1 +PUMP 1 0 1 1 +PULPIT 1 0 1 1 +PULLING 2 0 2 2 +PULLED 2 0 2 2 +PULL 1 0 1 1 +PUFFY 1 0 1 1 +PUBLISH 1 0 1 1 +PUBLIC 13 0 13 13 +PSYCHOLOGY 1 0 1 1 +PSYCHE 2 0 2 2 +PRYNNE 4 0 4 4 +PRUDENT 2 0 2 2 +PROWESS 1 0 1 1 +PROW 1 0 1 1 +PROVOCATION 1 0 1 1 +PROVISIONALLY 1 0 1 1 +PROVISION 2 0 2 2 +PROVINCES 2 0 2 2 +PROVINCE 2 0 2 2 +PROVIDING 1 0 1 1 +PROVIDED 3 0 3 3 +PROVEN 1 0 1 1 +PROVE 4 0 4 4 +PROUDLY 1 0 1 1 +PROUD 5 0 5 5 +PROTESTED 1 0 1 1 +PROTEST 1 0 1 1 +PROTECTOR 1 0 1 1 +PROTECTION 3 0 3 3 +PROTECTING 1 0 1 1 +PROTECTED 1 0 1 1 +PROTECT 4 0 4 4 +PROSTRATION 2 0 2 2 +PROSPECTS 1 0 1 1 +PROSELYTES 1 0 1 1 +PROSECUTION 1 0 1 1 +PROSECUTE 1 0 1 1 +PROSE 1 0 1 1 +PROSCRIPTION 1 0 1 1 +PROPRIETY 1 0 1 1 +PROPRIETORS 1 0 1 1 +PROPRIETOR 1 0 1 1 +PROPOSED 2 0 2 2 +PROPOSE 1 0 1 1 +PROPOSALS 1 0 1 1 +PROPORTIONS 3 0 3 3 +PROPHETS 1 0 1 1 +PROPHET 1 0 1 1 +PROPERTY 6 0 6 6 +PROPERTIES 1 0 1 1 +PROPERLY 4 0 4 4 +PROPER 3 0 3 3 +PROPENSITIES 1 0 1 1 +PROOFS 4 0 4 4 +PROOF 3 0 3 3 +PRONUNCIATION 1 0 1 1 +PRONOUNCED 4 0 4 4 +PROMPTLY 1 0 1 1 +PROMPT 1 0 1 1 +PROMOTING 1 0 1 1 +PROMOTED 1 0 1 1 +PROMISES 2 0 2 2 +PROMISED 4 0 4 4 +PROMISE 4 0 4 4 +PROMINENT 2 0 2 2 +PROLIFIC 1 0 1 1 +PROJECTION 1 0 1 1 +PROJECT 2 0 2 2 +PROGRESSING 1 0 1 1 +PROGRESS 6 0 6 6 +PROGRAMME 1 0 1 1 +PROFOUND 2 0 2 2 +PROFITABLE 1 0 1 1 +PROFESSOR 6 0 6 6 +PROFESSIONS 1 0 1 1 +PROFESSION 1 0 1 1 +PROFESSING 1 0 1 1 +PROFESSED 1 0 1 1 +PRODUCTIVE 1 0 1 1 +PRODUCTION 1 0 1 1 +PRODUCT 1 0 1 1 +PRODUCING 2 0 2 2 +PRODUCED 5 0 5 5 +PRODUCE 3 0 3 3 +PROCOPE 2 0 2 2 +PROCESSION 1 0 1 1 +PROCESSES 1 0 1 1 +PROCESS 3 0 3 3 +PROCEEDINGS 2 0 2 2 +PROCEEDED 2 0 2 2 +PROBLEM 4 0 4 4 +PROBING 1 0 1 1 +PROBABLY 10 0 10 10 +PROBABLE 2 0 2 2 +PRO 2 0 2 2 +PRIZE 1 0 1 1 +PRIVILEGE 3 0 3 3 +PRIVATION 1 0 1 1 +PRIVATELY 1 0 1 1 +PRIVATE 11 0 11 11 +PRIVACY 1 0 1 1 +PRISTINE 1 0 1 1 +PRISONER 4 0 4 4 +PRISON 4 0 4 4 +PRINTING 1 0 1 1 +PRINTER 2 0 2 2 +PRINCIPLES 2 0 2 2 +PRINCIPLE 4 0 4 4 +PRINCIPAL 4 0 4 4 +PRINCESSES 2 0 2 2 +PRINCESS 9 0 9 9 +PRINCES 2 0 2 2 +PRINCE 3 0 3 3 +PRIMLY 1 0 1 1 +PRIMITIVE 2 0 2 2 +PRIMATE 1 0 1 1 +PRIMARY 1 0 1 1 +PRIMARILY 1 0 1 1 +PRIESTHOOD 1 0 1 1 +PRIEST 1 0 1 1 +PRIDE 9 0 9 9 +PRICE 2 0 2 2 +PREY 2 0 2 2 +PREVIOUSLY 2 0 2 2 +PREVIOUS 2 0 2 2 +PREVENTS 1 0 1 1 +PREVENTING 1 0 1 1 +PREVAILING 1 0 1 1 +PRETTY 10 0 10 10 +PRETTIEST 1 0 1 1 +PRETENSION 1 0 1 1 +PRETENDED 1 0 1 1 +PRESUMED 1 0 1 1 +PRESUMABLY 1 0 1 1 +PRESSURE 4 0 4 4 +PRESSING 1 0 1 1 +PRESSED 2 0 2 2 +PRESS 2 0 2 2 +PRESIDENT 5 0 5 5 +PRESIDED 1 0 1 1 +PRESERVED 1 0 1 1 +PRESERVE 1 0 1 1 +PRESENTS 3 0 3 3 +PRESENTLY 7 0 7 7 +PRESENTING 2 0 2 2 +PRESENTED 1 0 1 1 +PRESENCE 6 0 6 6 +PREROGATIVES 1 0 1 1 +PREPARING 1 0 1 1 +PREPARED 3 0 3 3 +PREPARE 1 0 1 1 +PREOCCUPIED 1 0 1 1 +PREOCCUPATION 1 0 1 1 +PREMISES 1 0 1 1 +PRELIMINARY 1 0 1 1 +PREFERENCE 1 0 1 1 +PREDOMINATE 1 0 1 1 +PREDOMINANCE 1 0 1 1 +PREDICTED 1 0 1 1 +PREDATORY 2 0 2 2 +PRECISION 1 0 1 1 +PRECISELY 2 0 2 2 +PRECIPITATED 1 0 1 1 +PRECIPITATE 1 0 1 1 +PRECIOUS 3 0 3 3 +PRECAUTION 1 0 1 1 +PREACHER 1 0 1 1 +PREACH 3 0 3 3 +PRAYERS 1 0 1 1 +PRAYED 1 0 1 1 +PRAY 7 0 7 7 +PRATTLED 1 0 1 1 +PRAISES 2 0 2 2 +PRAISED 1 0 1 1 +PRAIRIES 2 0 2 2 +PRACTISED 1 0 1 1 +PRACTICALLY 4 0 4 4 +PRACTICAL 6 0 6 6 +POYSER 9 0 9 9 +POWERS 3 0 3 3 +POWERLESS 1 0 1 1 +POWERFUL 2 0 2 2 +POWDERY 1 0 1 1 +POWDER 8 0 8 8 +POVERTY 2 0 2 2 +POURING 1 0 1 1 +POURED 3 0 3 3 +POUR 2 0 2 2 +POUNDED 1 0 1 1 +POUND 4 0 4 4 +POTFULS 1 0 1 1 +POTATOES 1 0 1 1 +POT 2 0 2 2 +POSTS 1 0 1 1 +POSTPONEMENT 1 0 1 1 +POSTED 1 0 1 1 +POSSIBLY 1 0 1 1 +POSSIBLE 6 0 6 6 +POSSESSING 1 0 1 1 +POSSESSED 3 0 3 3 +POSSESS 3 0 3 3 +POSSE 2 0 2 2 +POSITIVE 1 0 1 1 +POSITION 9 0 9 9 +PORTRAIT 1 0 1 1 +PORTIONS 3 0 3 3 +PORTION 2 0 2 2 +PORTAL 1 0 1 1 +PORT 1 0 1 1 +POPULATION 3 0 3 3 +POPULATED 1 0 1 1 +POPULAR 7 0 7 7 +POPPIES 1 0 1 1 +POPPED 1 0 1 1 +POORLY 1 0 1 1 +POOR 15 0 15 15 +PONY 1 0 1 1 +PONDS 1 0 1 1 +PONDERING 1 0 1 1 +POLLY'S 3 0 3 3 +POLLY 4 0 4 4 +POLITICS 1 0 1 1 +POLITICAL 4 0 4 4 +POLITELY 2 0 2 2 +POLITE 1 0 1 1 +POLISHED 3 0 3 3 +POLISH 1 0 1 1 +POLICE 1 0 1 1 +POLE 2 0 2 2 +POISON 1 0 1 1 +POISED 1 0 1 1 +POINTS 3 0 3 3 +POINTING 4 0 4 4 +POINTEDLY 1 0 1 1 +POINTED 3 0 3 3 +POETRY 1 0 1 1 +POETIC 1 0 1 1 +POET 2 0 2 2 +POEMS 1 0 1 1 +POEM 3 0 3 3 +POCKETS 3 0 3 3 +POCKET 3 0 3 3 +PLURALITY 1 0 1 1 +PLUNGES 1 0 1 1 +PLUNDER 2 0 2 2 +PLUMES 1 0 1 1 +PLUCKING 1 0 1 1 +PLOT 3 0 3 3 +PLIABLE 1 0 1 1 +PLENTY 1 0 1 1 +PLENTIFUL 1 0 1 1 +PLEASURE 5 0 5 5 +PLEASING 1 0 1 1 +PLEASES 3 0 3 3 +PLEASED 4 0 4 4 +PLEASE 11 0 11 11 +PLEASANTLY 2 0 2 2 +PLEASANT 5 0 5 5 +PLEADED 1 0 1 1 +PLEAD 1 0 1 1 +PLEA 1 0 1 1 +PLAYTHINGS 3 0 3 3 +PLAYS 1 0 1 1 +PLAYER 2 0 2 2 +PLAYED 3 0 3 3 +PLATTERS 1 0 1 1 +PLATONIC 1 0 1 1 +PLATO'S 1 0 1 1 +PLATO 6 0 6 6 +PLATFORM 3 0 3 3 +PLATE 1 0 1 1 +PLASTER 1 0 1 1 +PLANTS 4 0 4 4 +PLANTED 3 0 3 3 +PLANTATIONS 1 0 1 1 +PLANT 7 0 7 7 +PLANS 2 0 2 2 +PLANNED 2 0 2 2 +PLAN 6 0 6 6 +PLAINER 1 0 1 1 +PLACING 2 0 2 2 +PLACIDITY 1 0 1 1 +PLACID 1 0 1 1 +PLACED 8 0 8 8 +PITYING 1 0 1 1 +PITY 6 0 6 6 +PITIABLE 1 0 1 1 +PITH 1 0 1 1 +PITCHED 1 0 1 1 +PIT 1 0 1 1 +PISTOL 1 0 1 1 +PIPT 1 0 1 1 +PIPE 2 0 2 2 +PINKIES 3 0 3 3 +PINK 2 0 2 2 +PINIONED 1 0 1 1 +PINING 1 0 1 1 +PINES 1 0 1 1 +PINE 1 0 1 1 +PILLARS 1 0 1 1 +PILGRIM'S 1 0 1 1 +PILGRIM 1 0 1 1 +PILED 1 0 1 1 +PIGMENT 1 0 1 1 +PIG 2 0 2 2 +PIETY 1 0 1 1 +PIECES 3 0 3 3 +PIECE 1 0 1 1 +PICTURESQUENESS 1 0 1 1 +PICTURE 6 0 6 6 +PICNIC 1 0 1 1 +PICKED 3 0 3 3 +PIAZZA 3 0 3 3 +PIANO 4 0 4 4 +PHYSIOLOGY 1 0 1 1 +PHYSIOLOGICAL 1 0 1 1 +PHYSICS 1 0 1 1 +PHYSICAL 2 0 2 2 +PHRONSIE 6 0 6 6 +PHOENICIAN 1 0 1 1 +PHILOSOPHY 2 0 2 2 +PHILOSOPHICAL 1 0 1 1 +PHILOSOPHER 5 0 5 5 +PHILOLOGIST 1 0 1 1 +PHILIP'S 1 0 1 1 +PHILANTHROPIES 1 0 1 1 +PHILADELPHIAN 1 0 1 1 +PHENOMENON 1 0 1 1 +PHENOMENA 1 0 1 1 +PHEASANT 1 0 1 1 +PHASES 1 0 1 1 +PHASE 1 0 1 1 +PHANTOM 1 0 1 1 +PH 1 0 1 1 +PEWTER 2 0 2 2 +PET 1 0 1 1 +PERVERTERS 1 0 1 1 +PERVERTED 1 0 1 1 +PERVERSE 1 0 1 1 +PERVADED 1 0 1 1 +PERTH 1 0 1 1 +PERSUASIVE 1 0 1 1 +PERSUADED 1 0 1 1 +PERSUADE 1 0 1 1 +PERSPIRATION 1 0 1 1 +PERSONS 13 0 13 13 +PERSONALLY 2 0 2 2 +PERSONAL 7 0 7 7 +PERSONAGE 2 0 2 2 +PERSEVERED 1 0 1 1 +PERSECUTORS 1 0 1 1 +PERSECUTION 2 0 2 2 +PERSECUTED 1 0 1 1 +PERPLEXITY 1 0 1 1 +PERPLEXED 2 0 2 2 +PERPETUATE 1 0 1 1 +PERPETUAL 1 0 1 1 +PERNICIOUS 1 0 1 1 +PERMITTING 1 0 1 1 +PERMITTED 2 0 2 2 +PERMIT 1 0 1 1 +PERMISSION 1 0 1 1 +PERMANENT 2 0 2 2 +PERISHES 1 0 1 1 +PERIODICALS 1 0 1 1 +PERIOD 8 0 8 8 +PERILS 1 0 1 1 +PERIL 2 0 2 2 +PERHAPS 17 0 17 17 +PERFORMING 1 0 1 1 +PERFORMED 1 0 1 1 +PERFORM 3 0 3 3 +PERFECTLY 8 0 8 8 +PERFECTION 4 0 4 4 +PERCY 1 0 1 1 +PERCHANCE 1 0 1 1 +PERCH 2 0 2 2 +PERCEPTION 2 0 2 2 +PERCEIVING 1 0 1 1 +PERCEIVES 1 0 1 1 +PERCEIVED 2 0 2 2 +PERCEIVE 2 0 2 2 +PEPPERS 1 0 1 1 +PEPPERED 1 0 1 1 +PEPPER 1 0 1 1 +PEOPLE 36 0 36 36 +PENSIVE 1 0 1 1 +PENSION 1 0 1 1 +PENETRATING 1 0 1 1 +PENETRATE 2 0 2 2 +PENDULOUS 1 0 1 1 +PENCILLED 1 0 1 1 +PENCIL 2 0 2 2 +PENANCE 1 0 1 1 +PENALTY 2 0 2 2 +PENAL 1 0 1 1 +PELL 1 0 1 1 +PEERING 3 0 3 3 +PEEPED 2 0 2 2 +PECUNIARY 6 0 6 6 +PECULIARLY 1 0 1 1 +PECULIAR 1 0 1 1 +PEASE 1 0 1 1 +PEARLY 2 0 2 2 +PEAKED 1 0 1 1 +PEACEFUL 1 0 1 1 +PEACEABLE 1 0 1 1 +PEACE 14 0 14 14 +PAYMENT 1 0 1 1 +PAYING 1 0 1 1 +PAY 3 0 3 3 +PAVEMENT 1 0 1 1 +PAUSED 4 0 4 4 +PAUSE 1 0 1 1 +PAUL 15 0 15 15 +PATTING 1 0 1 1 +PATRONIZING 1 0 1 1 +PATRON 1 0 1 1 +PATRIARCHAL 1 0 1 1 +PATHS 1 0 1 1 +PATHOLOGICAL 1 0 1 1 +PATH 4 0 4 4 +PATCHWORK 4 0 4 4 +PATCHING 2 0 2 2 +PATCHES 2 0 2 2 +PATCHED 1 0 1 1 +PATCH 4 0 4 4 +PASTNESS 1 0 1 1 +PASTEBOARD 1 0 1 1 +PASSIONS 1 0 1 1 +PASSIONATE 1 0 1 1 +PASSION 3 0 3 3 +PASSING 3 0 3 3 +PASSES 1 0 1 1 +PASSER 1 0 1 1 +PASSAGES 1 0 1 1 +PASS 5 0 5 5 +PARTY 9 0 9 9 +PARTS 7 0 7 7 +PARTOOK 2 0 2 2 +PARTNER 1 0 1 1 +PARTLY 7 0 7 7 +PARTITION 1 0 1 1 +PARTISAN 1 0 1 1 +PARTING 2 0 2 2 +PARTIES 4 0 4 4 +PARTICULARS 2 0 2 2 +PARTICULARLY 6 0 6 6 +PARTICULAR 4 0 4 4 +PARTIALLY 1 0 1 1 +PARTED 2 0 2 2 +PART 22 0 22 22 +PARSONAGE 1 0 1 1 +PARROT 2 0 2 2 +PARRIED 1 0 1 1 +PARLIAMENTS 1 0 1 1 +PARLIAMENTARY 1 0 1 1 +PARLIAMENT 5 0 5 5 +PARK 1 0 1 1 +PARIS 9 0 9 9 +PARENTS 4 0 4 4 +PARENT 2 0 2 2 +PARDON 1 0 1 1 +PARASITES 1 0 1 1 +PARAGRAPH 1 0 1 1 +PAPERS 8 0 8 8 +PAPER 8 0 8 8 +PANTOMIME 1 0 1 1 +PANTING 1 0 1 1 +PANTED 1 0 1 1 +PANS 1 0 1 1 +PANGS 1 0 1 1 +PANG 1 0 1 1 +PANES 1 0 1 1 +PANEL 1 0 1 1 +PAN 1 0 1 1 +PALM 3 0 3 3 +PALINGS 1 0 1 1 +PALE 8 0 8 8 +PALATE 1 0 1 1 +PALAIS 1 0 1 1 +PALACE 4 0 4 4 +PAINTING 2 0 2 2 +PAINTER 2 0 2 2 +PAINTED 4 0 4 4 +PAINS 2 0 2 2 +PAINFULLY 1 0 1 1 +PAINFUL 3 0 3 3 +PAIL 1 0 1 1 +PAID 7 0 7 7 +PAGES 1 0 1 1 +PAGE 2 0 2 2 +PACKING 1 0 1 1 +PACKET 1 0 1 1 +PACKED 1 0 1 1 +PACK 1 0 1 1 +PACING 1 0 1 1 +PACIFIED 1 0 1 1 +PACED 3 0 3 3 +P 1 0 1 1 +OZ 4 0 4 4 +OWNER 1 0 1 1 +OWNED 2 0 2 2 +OWN 69 0 69 69 +OWLS 1 0 1 1 +OWING 3 0 3 3 +OVERWROUGHT 1 0 1 1 +OVERWHELMED 1 0 1 1 +OVERWHELM 1 0 1 1 +OVERTHROW 1 0 1 1 +OVERSTATEMENT 1 0 1 1 +OVERRATED 1 0 1 1 +OVERLOOKER 1 0 1 1 +OVERLOOKED 1 0 1 1 +OVERHEAD 1 0 1 1 +OVERHANGING 2 0 2 2 +OVERFLOWING 1 0 1 1 +OVERCOAT 1 0 1 1 +OVERBEARING 1 0 1 1 +OVAL 2 0 2 2 +OUTWARD 1 0 1 1 +OUTSTRETCHED 2 0 2 2 +OUTSKIRTS 1 0 1 1 +OUTSIDE 4 0 4 4 +OUTSET 1 0 1 1 +OUTRIGHT 1 0 1 1 +OUTLINED 1 0 1 1 +OUTLINE 2 0 2 2 +OUTLAWS 3 0 3 3 +OUTFIT 1 0 1 1 +OUTER 2 0 2 2 +OUTCRY 1 0 1 1 +OUTCAST 1 0 1 1 +OURSELVES 6 0 6 6 +OURS 2 0 2 2 +OUNCE 2 0 2 2 +OTHERWISE 5 0 5 5 +OTHERS 23 0 23 23 +OTHER'S 2 0 2 2 +OSTENSIBLY 2 0 2 2 +OSTENSIBLE 1 0 1 1 +OSCILLATION 1 0 1 1 +ORNAMENTS 1 0 1 1 +ORNAMENTAL 2 0 2 2 +ORNAMENT 3 0 3 3 +ORLEANS 1 0 1 1 +ORIGINATED 1 0 1 1 +ORIGINAL 1 0 1 1 +ORIGIN 7 0 7 7 +ORGANS 1 0 1 1 +ORGANIZED 5 0 5 5 +ORGANIZATIONS 4 0 4 4 +ORGANIZATION 3 0 3 3 +ORGAN 1 0 1 1 +ORDINARY 3 0 3 3 +ORDERS 3 0 3 3 +ORDERLY 1 0 1 1 +ORCHARD 4 0 4 4 +ORBIT 2 0 2 2 +ORANGE 1 0 1 1 +ORACLE 1 0 1 1 +OPPRESSOR 1 0 1 1 +OPPRESSIVE 1 0 1 1 +OPPRESSION 1 0 1 1 +OPPRESSED 1 0 1 1 +OPPOSITION 4 0 4 4 +OPPOSITE 2 0 2 2 +OPPOSED 2 0 2 2 +OPPOSE 4 0 4 4 +OPPORTUNITY 4 0 4 4 +OPPORTUNITIES 2 0 2 2 +OPPORTUNE 1 0 1 1 +OPPONENT 2 0 2 2 +OPINIONS 1 0 1 1 +OPINION'S 1 0 1 1 +OPINION 9 0 9 9 +OPERATOR 1 0 1 1 +OPERATIONS 1 0 1 1 +OPERATION 2 0 2 2 +OPERATE 1 0 1 1 +OPENING 6 0 6 6 +OPENED 11 0 11 11 +OPEN 23 0 23 23 +ONWARD 4 0 4 4 +ONES 2 0 2 2 +ONE'S 1 0 1 1 +ONCE 22 0 22 22 +OLIVE 4 0 4 4 +OLDEST 1 0 1 1 +OLDER 3 0 3 3 +OLDEN 1 0 1 1 +OJO 7 0 7 7 +OHIO 1 0 1 1 +OFTEN 13 0 13 13 +OFFICIALS 3 0 3 3 +OFFICIAL 1 0 1 1 +OFFICE 11 0 11 11 +OFFERS 1 0 1 1 +OFFERING 1 0 1 1 +OFFER 1 0 1 1 +OFFENDS 1 0 1 1 +OFFENDED 1 0 1 1 +OFFALS 1 0 1 1 +ODORS 1 0 1 1 +ODIOUS 1 0 1 1 +ODIN 1 0 1 1 +ODDLY 1 0 1 1 +OCTOBER 1 0 1 1 +OCEANOGRAPHER 1 0 1 1 +OCEAN 5 0 5 5 +OCCURS 3 0 3 3 +OCCURRENCES 2 0 2 2 +OCCURRENCE 3 0 3 3 +OCCURRED 2 0 2 2 +OCCUR 1 0 1 1 +OCCUPY 1 0 1 1 +OCCUPIED 6 0 6 6 +OCCUPATION 2 0 2 2 +OCCASIONS 1 0 1 1 +OCCASIONALLY 1 0 1 1 +OCCASIONAL 1 0 1 1 +OCCASION 13 0 13 13 +OBVIOUS 3 0 3 3 +OBTAINED 1 0 1 1 +OBTAIN 3 0 3 3 +OBSTINATE 1 0 1 1 +OBSTINACY 2 0 2 2 +OBSTACLES 1 0 1 1 +OBSERVING 2 0 2 2 +OBSERVERS 1 0 1 1 +OBSERVATION 3 0 3 3 +OBSERVANCES 1 0 1 1 +OBSCURE 3 0 3 3 +OBNOXIOUS 1 0 1 1 +OBLIVION 1 0 1 1 +OBLITERATED 1 0 1 1 +OBLITERATE 1 0 1 1 +OBLIGED 1 0 1 1 +OBJECTIONS 1 0 1 1 +OBJECTION 1 0 1 1 +OBJECT 16 0 16 16 +OBEYING 2 0 2 2 +OBEYED 2 0 2 2 +OBEY 1 0 1 1 +OBEDIENCE 1 0 1 1 +OATMEAL 1 0 1 1 +OATH 1 0 1 1 +OAK 2 0 2 2 +O'CLOCK 6 0 6 6 +NURSED 1 0 1 1 +NURSE 1 0 1 1 +NUNKIE 1 0 1 1 +NUMERICAL 1 0 1 1 +NUMBERS 4 0 4 4 +NUMBERED 1 0 1 1 +NUMBER 6 0 6 6 +NUDITY 1 0 1 1 +NUDGED 1 0 1 1 +NOWHERE 1 0 1 1 +NOVEL 1 0 1 1 +NOURISHING 1 0 1 1 +NOTWITHSTANDING 1 0 1 1 +NOTORIOUS 1 0 1 1 +NOTIONS 1 0 1 1 +NOTION 1 0 1 1 +NOTING 1 0 1 1 +NOTICING 1 0 1 1 +NOTICED 1 0 1 1 +NOTICEABLE 1 0 1 1 +NOTICE 3 0 3 3 +NOTED 1 0 1 1 +NOTE 4 0 4 4 +NOTARY 1 0 1 1 +NOTABLE 3 0 3 3 +NORWEGIAN 1 0 1 1 +NORWAY 1 0 1 1 +NORTHERNERS 1 0 1 1 +NORTHERN 1 0 1 1 +NOON 3 0 3 3 +NONSENSE 1 0 1 1 +NONE 12 0 12 12 +NOMADS 1 0 1 1 +NOISILY 1 0 1 1 +NOISE 2 0 2 2 +NOD 1 0 1 1 +NOBODY 6 0 6 6 +NOBLEST 1 0 1 1 +NOBLER 2 0 2 2 +NOBLEMAN'S 1 0 1 1 +NOBLE 10 0 10 10 +NITROGEN 1 0 1 1 +NINTH 1 0 1 1 +NINETY 2 0 2 2 +NIMBLY 1 0 1 1 +NIMBLE 1 0 1 1 +NIGHTS 3 0 3 3 +NIGHTLY 1 0 1 1 +NIGHTINGALE'S 1 0 1 1 +NIGHTFALL 1 0 1 1 +NIECE 1 0 1 1 +NICEST 1 0 1 1 +NICER 1 0 1 1 +NICELY 1 0 1 1 +NICE 3 0 3 3 +NEXT 12 0 12 12 +NEWSPAPER 4 0 4 4 +NEWS 2 0 2 2 +NEWLY 1 0 1 1 +NEWCOMER 1 0 1 1 +NEVERTHELESS 3 0 3 3 +NEVERBEND 6 0 6 6 +NETTLES 1 0 1 1 +NETTLED 1 0 1 1 +NESTING 1 0 1 1 +NERVOUSNESS 1 0 1 1 +NERVOUSLY 1 0 1 1 +NERVOUS 4 0 4 4 +NERVES 2 0 2 2 +NEMO 4 0 4 4 +NELL 1 0 1 1 +NEITHER 9 0 9 9 +NEIGHBORS 2 0 2 2 +NEGROES 2 0 2 2 +NEGRO 1 0 1 1 +NEGOTIATIONS 1 0 1 1 +NEGLIGENT 1 0 1 1 +NEGLECTED 2 0 2 2 +NEGLECT 1 0 1 1 +NEEDS 2 0 2 2 +NEEDING 1 0 1 1 +NEEDED 5 0 5 5 +NECK 6 0 6 6 +NECESSITY 2 0 2 2 +NECESSITIES 1 0 1 1 +NECESSARY 9 0 9 9 +NECESSARILY 2 0 2 2 +NEATLY 2 0 2 2 +NEAT 1 0 1 1 +NEARLY 10 0 10 10 +NEAREST 1 0 1 1 +NEARED 1 0 1 1 +NAY 5 0 5 5 +NAVY 1 0 1 1 +NAUTILUS 2 0 2 2 +NATURES 1 0 1 1 +NATURED 1 0 1 1 +NATURE 17 0 17 17 +NATURALLY 1 0 1 1 +NATURALISTS 2 0 2 2 +NATURALIST 1 0 1 1 +NATURAL 10 0 10 10 +NATTY 2 0 2 2 +NATIVE 5 0 5 5 +NATIONS 2 0 2 2 +NATIONAL 3 0 3 3 +NATION 2 0 2 2 +NASTY 1 0 1 1 +NARWHALE 1 0 1 1 +NARROWS 1 0 1 1 +NARROW 6 0 6 6 +NARRATIVE 2 0 2 2 +NAPIER 1 0 1 1 +NAPE 1 0 1 1 +NANCY'S 2 0 2 2 +NANCY 1 0 1 1 +NAMES 2 0 2 2 +NAMELY 2 0 2 2 +NAKEDNESS 1 0 1 1 +NAKED 1 0 1 1 +MYTHOLOGICAL 1 0 1 1 +MYTHICAL 1 0 1 1 +MYSTIFIED 1 0 1 1 +MYSTERIOUSLY 1 0 1 1 +MYSTERIOUS 3 0 3 3 +MYSELF 25 0 25 25 +MUTUAL 2 0 2 2 +MUTTON 1 0 1 1 +MUTTERING 1 0 1 1 +MUTILATION 1 0 1 1 +MUTABILITY 1 0 1 1 +MUSTARD 1 0 1 1 +MUST 66 0 66 66 +MUSSULMANS 1 0 1 1 +MUSICIANS 1 0 1 1 +MUSIC 6 0 6 6 +MUSHROOMS 1 0 1 1 +MUSEUM 1 0 1 1 +MURMURED 4 0 4 4 +MURMUR 2 0 2 2 +MURDERS 1 0 1 1 +MURDERERS 1 0 1 1 +MUNCHKINS 2 0 2 2 +MUNCHKIN 1 0 1 1 +MUMMERIES 1 0 1 1 +MULTIPLE 2 0 2 2 +MUFFLED 1 0 1 1 +MUDDY 1 0 1 1 +MUD 1 0 1 1 +MOWED 2 0 2 2 +MOW 1 0 1 1 +MOVING 1 0 1 1 +MOVES 1 0 1 1 +MOVEMENTS 3 0 3 3 +MOVEMENT 5 0 5 5 +MOVE 4 0 4 4 +MOUTHS 3 0 3 3 +MOUSE 2 0 2 2 +MOURNFUL 1 0 1 1 +MOUNTAINS 2 0 2 2 +MOTTO 1 0 1 1 +MOTTLED 1 0 1 1 +MOTORS 1 0 1 1 +MOTIVES 5 0 5 5 +MOTIONLESS 1 0 1 1 +MOTIONING 1 0 1 1 +MOTIONED 2 0 2 2 +MOTION 1 0 1 1 +MOTHER 32 0 32 32 +MOST 51 0 51 51 +MOSSY 1 0 1 1 +MOSS 1 0 1 1 +MOSAIC 1 0 1 1 +MORTALS 1 0 1 1 +MORTALLY 1 0 1 1 +MORTAL 1 0 1 1 +MORROW 6 0 6 6 +MORRIS 1 0 1 1 +MORNINGS 1 0 1 1 +MORN 1 0 1 1 +MORMONS 4 0 4 4 +MORMON 5 0 5 5 +MOREOVER 1 0 1 1 +MORE'S 1 0 1 1 +MORBID 1 0 1 1 +MORAL 1 0 1 1 +MOONLIGHT 2 0 2 2 +MOONBEAMS 1 0 1 1 +MOON 4 0 4 4 +MOOD 2 0 2 2 +MONTROSE'S 1 0 1 1 +MONTROSE 6 0 6 6 +MONTHS 4 0 4 4 +MONTH 4 0 4 4 +MONTFICHET'S 1 0 1 1 +MONTALAIS 4 0 4 4 +MONSTER 1 0 1 1 +MONSIEUR 1 0 1 1 +MONOTONOUS 1 0 1 1 +MONGOOSE 1 0 1 1 +MONCEUX 1 0 1 1 +MOMENTS 5 0 5 5 +MOMENTOUS 1 0 1 1 +MOMENTARY 1 0 1 1 +MOMENT 32 0 32 32 +MOLLY 3 0 3 3 +MOLECULES 1 0 1 1 +MOISTURE 1 0 1 1 +MOIST 1 0 1 1 +MOHAMMED 1 0 1 1 +MODIFICATION 1 0 1 1 +MODEST 3 0 3 3 +MODES 2 0 2 2 +MODERNS 1 0 1 1 +MODERN 8 0 8 8 +MODERATE 2 0 2 2 +MODEL 1 0 1 1 +MODE 2 0 2 2 +MOCCASIN 1 0 1 1 +MOBS 1 0 1 1 +MOBILITY 2 0 2 2 +MOB 3 0 3 3 +MOANING 1 0 1 1 +MOAN 1 0 1 1 +MIXTURE 2 0 2 2 +MIXED 4 0 4 4 +MITIGATE 1 0 1 1 +MISUNDERSTANDING 1 0 1 1 +MISTY 1 0 1 1 +MISTRESS 10 0 10 10 +MISTAKEN 2 0 2 2 +MISTAKE 2 0 2 2 +MISSUS 23 0 23 23 +MISSOURI 6 0 6 6 +MISSIONARY 1 0 1 1 +MISSIONARIES 1 0 1 1 +MISSION 3 0 3 3 +MISSED 2 0 2 2 +MISGOVERNMENT 1 0 1 1 +MISFORTUNES 1 0 1 1 +MISFORTUNE 4 0 4 4 +MISERY 3 0 3 3 +MISERABLY 1 0 1 1 +MISERABLE 2 0 2 2 +MISDEMEANOR 1 0 1 1 +MISCHIEF 1 0 1 1 +MISCHANCE 1 0 1 1 +MIRROR 2 0 2 2 +MIRACULOUSLY 1 0 1 1 +MIRACLE 2 0 2 2 +MINUTES 6 0 6 6 +MINUTE 2 0 2 2 +MINORITY 1 0 1 1 +MINISTRY 3 0 3 3 +MINISTERS 1 0 1 1 +MINISTER 3 0 3 3 +MINIATURE 1 0 1 1 +MINGOES 1 0 1 1 +MINGLES 1 0 1 1 +MINGLED 1 0 1 1 +MINDS 3 0 3 3 +MINDFUL 1 0 1 1 +MINDED 1 0 1 1 +MILNER'S 3 0 3 3 +MILLIONS 1 0 1 1 +MILKING 1 0 1 1 +MILKED 1 0 1 1 +MILK 1 0 1 1 +MILITIA 3 0 3 3 +MILITARY 7 0 7 7 +MILE 1 0 1 1 +MILDLY 1 0 1 1 +MILD 2 0 2 2 +MILAN 1 0 1 1 +MIGHTY 4 0 4 4 +MIGHTILY 1 0 1 1 +MIGHT 48 0 48 48 +MIDWIFE 1 0 1 1 +MIDST 2 0 2 2 +MIDDLE 4 0 4 4 +MIDDAY 1 0 1 1 +MICROSCOPE 1 0 1 1 +MICE 5 0 5 5 +METROPOLIS 1 0 1 1 +METHODS 3 0 3 3 +METHOD 3 0 3 3 +METAPHOR 1 0 1 1 +METAMORPHOSIS 1 0 1 1 +METALLIC 1 0 1 1 +MESSRS 1 0 1 1 +MESSAGE 2 0 2 2 +MESS 1 0 1 1 +MESHES 1 0 1 1 +MERITS 2 0 2 2 +MERIT 2 0 2 2 +MERIDIAN 2 0 2 2 +MERELY 5 0 5 5 +MERE 4 0 4 4 +MERCHANT 1 0 1 1 +MENTIONS 1 0 1 1 +MENTIONED 5 0 5 5 +MENTION 1 0 1 1 +MENTAL 2 0 2 2 +MENIAL 1 0 1 1 +MENDING 2 0 2 2 +MENAGERIE 1 0 1 1 +MEN'S 2 0 2 2 +MEMORY 21 0 21 21 +MEMBERS 4 0 4 4 +MEMBER 2 0 2 2 +MELTS 1 0 1 1 +MELODY 1 0 1 1 +MELL 1 0 1 1 +MELANCHOLY 2 0 2 2 +MEETING 5 0 5 5 +MEEK 1 0 1 1 +MEDIUM 1 0 1 1 +MEDITERRANEAN 4 0 4 4 +MEDITATIVE 1 0 1 1 +MEDITATION 1 0 1 1 +MEDICINE 6 0 6 6 +MECHANICS 1 0 1 1 +MEAT 1 0 1 1 +MEASURES 2 0 2 2 +MEASURED 2 0 2 2 +MEASURE 6 0 6 6 +MEANWHILE 4 0 4 4 +MEANTIME 2 0 2 2 +MEANT 5 0 5 5 +MEANS 17 0 17 17 +MEANINGS 1 0 1 1 +MEANING 4 0 4 4 +MEALS 4 0 4 4 +MEAL 5 0 5 5 +MEADOWS 1 0 1 1 +MAXIMUM 1 0 1 1 +MAXIMS 1 0 1 1 +MATURE 1 0 1 1 +MATTHEWS 1 0 1 1 +MATTERS 5 0 5 5 +MATTERED 1 0 1 1 +MATTER 20 0 20 20 +MATHEMATICS 1 0 1 1 +MATERIALS 2 0 2 2 +MATERIALLY 1 0 1 1 +MATERIALISM 1 0 1 1 +MATERIAL 3 0 3 3 +MATED 1 0 1 1 +MATCHLESS 1 0 1 1 +MASTERPIECE 1 0 1 1 +MASTERLY 1 0 1 1 +MAST 2 0 2 2 +MASSES 1 0 1 1 +MASSACHUSETTS 1 0 1 1 +MASS 2 0 2 2 +MASKS 1 0 1 1 +MARVELS 1 0 1 1 +MARVELLED 1 0 1 1 +MARVEL 2 0 2 2 +MARTIN 2 0 2 2 +MARTHA 2 0 2 2 +MARRY 1 0 1 1 +MARRIAGE 5 0 5 5 +MARQUIS 1 0 1 1 +MARKS 4 0 4 4 +MARKING 1 0 1 1 +MARKHAM 2 0 2 2 +MARK 6 0 6 6 +MARINE 2 0 2 2 +MARIE'S 1 0 1 1 +MARIE 6 0 6 6 +MARIANNE 1 0 1 1 +MARIA 1 0 1 1 +MARGIN 1 0 1 1 +MARGARET 1 0 1 1 +MARCHES 1 0 1 1 +MARCHED 2 0 2 2 +MARCH 7 0 7 7 +MARBLE 2 0 2 2 +MAP 2 0 2 2 +MANUSCRIPT 2 0 2 2 +MANUFACTURER 3 0 3 3 +MANSION 1 0 1 1 +MANOEUVRING 1 0 1 1 +MANNERS 1 0 1 1 +MANNER 14 0 14 14 +MANKIND 2 0 2 2 +MANIFOLD 1 0 1 1 +MANIFESTLY 1 0 1 1 +MANICAMP 1 0 1 1 +MANHOOD 1 0 1 1 +MANDIBLE 1 0 1 1 +MANAGING 1 0 1 1 +MANAGERS 1 0 1 1 +MANAGEMENT 3 0 3 3 +MANAGED 4 0 4 4 +MANAGE 1 0 1 1 +MAMMY 1 0 1 1 +MALIGNITIES 1 0 1 1 +MALIGNED 1 0 1 1 +MALICIOUS 1 0 1 1 +MALICE 1 0 1 1 +MALADY 1 0 1 1 +MAKING 13 0 13 13 +MAKES 10 0 10 10 +MAJESTY'S 2 0 2 2 +MAJESTY 6 0 6 6 +MAINTAINING 1 0 1 1 +MAINTAINED 4 0 4 4 +MAINSAIL 1 0 1 1 +MAINLY 1 0 1 1 +MAID'S 1 0 1 1 +MAHOGANY 1 0 1 1 +MAGNIFIED 1 0 1 1 +MAGNIFICENT 3 0 3 3 +MAGNIFICENCE 1 0 1 1 +MAGISTRACY 1 0 1 1 +MAGICIAN 5 0 5 5 +MAGIC 4 0 4 4 +MAGAZINE 1 0 1 1 +MADNESS 1 0 1 1 +MADEMOISELLE 5 0 5 5 +MADAME'S 1 0 1 1 +MAD 3 0 3 3 +MACHINES 1 0 1 1 +MACHINE 1 0 1 1 +LYING 4 0 4 4 +LUXURIES 2 0 2 2 +LUXURIANT 1 0 1 1 +LUTHERAN 2 0 2 2 +LUTHER 3 0 3 3 +LUSTROUS 1 0 1 1 +LUSTRE 1 0 1 1 +LURKING 1 0 1 1 +LURID 1 0 1 1 +LURE 1 0 1 1 +LUNGS 1 0 1 1 +LUMPS 1 0 1 1 +LUMP 1 0 1 1 +LUMINOUS 2 0 2 2 +LULLS 1 0 1 1 +LUKE 1 0 1 1 +LUGUBRIOUS 1 0 1 1 +LUCY 1 0 1 1 +LUCRETIUS 1 0 1 1 +LUCK 3 0 3 3 +LUCID 1 0 1 1 +LOYALLY 1 0 1 1 +LOYAL 1 0 1 1 +LOWLY 2 0 2 2 +LOW 6 0 6 6 +LOVING 4 0 4 4 +LOVERS 2 0 2 2 +LOVER 1 0 1 1 +LOVELY 7 0 7 7 +LOVED 6 0 6 6 +LOVE 48 0 48 48 +LOUDNESS 1 0 1 1 +LOUDLY 2 0 2 2 +LOUDER 1 0 1 1 +LOUD 2 0 2 2 +LOTUS 1 0 1 1 +LOTS 2 0 2 2 +LOT 6 0 6 6 +LOSING 3 0 3 3 +LOSES 2 0 2 2 +LOSE 3 0 3 3 +LORDS 1 0 1 1 +LORDLY 1 0 1 1 +LORDING 2 0 2 2 +LOPPED 1 0 1 1 +LOOSELY 1 0 1 1 +LOOKS 7 0 7 7 +LOOKING 16 0 16 16 +LOOKED 24 0 24 24 +LOOK 32 0 32 32 +LONGING 2 0 2 2 +LONGER 9 0 9 9 +LONGED 1 0 1 1 +LONELY 2 0 2 2 +LONELINESS 1 0 1 1 +LONELIER 2 0 2 2 +LONDON 3 0 3 3 +LOGICALLY 1 0 1 1 +LOGICAL 2 0 2 2 +LOGARITHMS 1 0 1 1 +LOG 2 0 2 2 +LOFTINESS 1 0 1 1 +LOFTIEST 1 0 1 1 +LOFT 2 0 2 2 +LODGING 1 0 1 1 +LODGE 2 0 2 2 +LOCRIS 1 0 1 1 +LOCKED 3 0 3 3 +LOCK 1 0 1 1 +LOAF 1 0 1 1 +LOADS 1 0 1 1 +LOADED 1 0 1 1 +LOAD 1 0 1 1 +LIVING 5 0 5 5 +LIVID 1 0 1 1 +LIVERY 1 0 1 1 +LIVERIES 2 0 2 2 +LIVELIEST 1 0 1 1 +LIVED 8 0 8 8 +LITERATURE 1 0 1 1 +LITERARY 4 0 4 4 +LITERAL 2 0 2 2 +LISTLESSLY 1 0 1 1 +LISTENING 3 0 3 3 +LISTENED 5 0 5 5 +LISTEN 3 0 3 3 +LIQUID 2 0 2 2 +LIPS 4 0 4 4 +LINKS 2 0 2 2 +LINGERED 1 0 1 1 +LINES 7 0 7 7 +LINEN 2 0 2 2 +LINED 2 0 2 2 +LINDENS 1 0 1 1 +LINCOLN 2 0 2 2 +LIMITATION 1 0 1 1 +LIMIT 1 0 1 1 +LIMESTONE 1 0 1 1 +LIMBS 2 0 2 2 +LIKEWISE 1 0 1 1 +LIKES 2 0 2 2 +LIKENESS 1 0 1 1 +LIKELY 2 0 2 2 +LIKED 4 0 4 4 +LIGHTS 5 0 5 5 +LIGHTNING 1 0 1 1 +LIGHTLY 3 0 3 3 +LIGHTING 6 0 6 6 +LIGHTED 5 0 5 5 +LIFTING 2 0 2 2 +LIFTED 1 0 1 1 +LIFT 2 0 2 2 +LIFE'S 1 0 1 1 +LIFE 47 0 47 47 +LIEUTENANT 6 0 6 6 +LIEDENBROCK 1 0 1 1 +LICHEN 1 0 1 1 +LIBRARY 3 0 3 3 +LIBERTY 3 0 3 3 +LIBERAL 1 0 1 1 +LIABLE 2 0 2 2 +LEXINGTON 1 0 1 1 +LEVIED 2 0 2 2 +LEVELS 1 0 1 1 +LEVEL 3 0 3 3 +LETTING 1 0 1 1 +LETTERS 4 0 4 4 +LETTER 12 0 12 12 +LET 27 0 27 27 +LEST 2 0 2 2 +LESSONS 1 0 1 1 +LESSON 1 0 1 1 +LENGTHY 1 0 1 1 +LENGTHS 1 0 1 1 +LENGTH 4 0 4 4 +LEND 2 0 2 2 +LEISURELY 1 0 1 1 +LEISURE 11 0 11 11 +LEGS 3 0 3 3 +LEGISLATURE 4 0 4 4 +LEGISLATORS 1 0 1 1 +LEGISLATIVE 1 0 1 1 +LEGATE 1 0 1 1 +LEGALITY 2 0 2 2 +LEGAL 1 0 1 1 +LEG 1 0 1 1 +LEECH 2 0 2 2 +LED 7 0 7 7 +LECTURES 2 0 2 2 +LECTURE 3 0 3 3 +LECOMPTON 1 0 1 1 +LEAVES 5 0 5 5 +LEATHER 1 0 1 1 +LEASH 1 0 1 1 +LEARNING 1 0 1 1 +LEARNED 4 0 4 4 +LEARN 4 0 4 4 +LEAPS 2 0 2 2 +LEAP 2 0 2 2 +LEANING 3 0 3 3 +LEANED 5 0 5 5 +LEAN 1 0 1 1 +LEAGUES 1 0 1 1 +LEAGUE 1 0 1 1 +LEADS 3 0 3 3 +LEADING 3 0 3 3 +LEADERS 1 0 1 1 +LEAD 8 0 8 8 +LAZILY 1 0 1 1 +LAYS 1 0 1 1 +LAYMAN 1 0 1 1 +LAYING 1 0 1 1 +LAY 16 0 16 16 +LAWYER 1 0 1 1 +LAWS 9 0 9 9 +LAWRENCE 2 0 2 2 +LAWFUL 1 0 1 1 +LAW 13 0 13 13 +LAVISHING 1 0 1 1 +LAUGHTER 2 0 2 2 +LAUGHING 6 0 6 6 +LAUGH 4 0 4 4 +LATTICE 1 0 1 1 +LATTER 9 0 9 9 +LATIN 3 0 3 3 +LATEST 1 0 1 1 +LATER 14 0 14 14 +LATENT 1 0 1 1 +LATE 6 0 6 6 +LATCHED 1 0 1 1 +LAST 41 0 41 41 +LASHED 1 0 1 1 +LARGEST 2 0 2 2 +LARGER 3 0 3 3 +LARGE 16 0 16 16 +LAREN 1 0 1 1 +LARDER 1 0 1 1 +LAPSE 1 0 1 1 +LAP 3 0 3 3 +LANGUISHINGLY 1 0 1 1 +LANGUID 1 0 1 1 +LANGUAGE 11 0 11 11 +LANE 1 0 1 1 +LANDSCAPE 1 0 1 1 +LANDS 2 0 2 2 +LAMPS 3 0 3 3 +LAMP 4 0 4 4 +LAMENTATION 1 0 1 1 +LAMBS 1 0 1 1 +LAMB 1 0 1 1 +LAKES 1 0 1 1 +LAKE'S 1 0 1 1 +LAID 8 0 8 8 +LAGOON 4 0 4 4 +LADY 9 0 9 9 +LADLED 1 0 1 1 +LADIES 11 0 11 11 +LADDER 3 0 3 3 +LAD 3 0 3 3 +LACKEY 1 0 1 1 +LACK 1 0 1 1 +KNOWN 15 0 15 15 +KNOWLEDGE 15 0 15 15 +KNOWING 5 0 5 5 +KNOT 1 0 1 1 +KNOCKING 1 0 1 1 +KNOCK 1 0 1 1 +KNITTED 1 0 1 1 +KNIGHT 1 0 1 1 +KNEES 3 0 3 3 +KNEELS 1 0 1 1 +KNEELING 1 0 1 1 +KNAVE 1 0 1 1 +KITTEN 1 0 1 1 +KITES 1 0 1 1 +KITE 1 0 1 1 +KITCHEN 4 0 4 4 +KIT 1 0 1 1 +KISSING 1 0 1 1 +KISSES 1 0 1 1 +KISSED 2 0 2 2 +KISS 2 0 2 2 +KINGS 1 0 1 1 +KINGLY 1 0 1 1 +KINGDOM 4 0 4 4 +KINDS 1 0 1 1 +KINDNESS 1 0 1 1 +KINDLY 3 0 3 3 +KINDLED 3 0 3 3 +KINDER 1 0 1 1 +KIND 14 0 14 14 +KILLS 1 0 1 1 +KILLED 1 0 1 1 +KIDNAP 2 0 2 2 +KID 1 0 1 1 +KICKAPOO 1 0 1 1 +KEY 5 0 5 5 +KETTLES 2 0 2 2 +KETTLE 1 0 1 1 +KERCHIEFS 1 0 1 1 +KEPT 5 0 5 5 +KENNINGTON 2 0 2 2 +KENNETH 9 0 9 9 +KEEPS 3 0 3 3 +KEEPING 4 0 4 4 +KEEP 10 0 10 10 +KEENNESS 2 0 2 2 +KEENER 1 0 1 1 +KATE 1 0 1 1 +KANSAS 3 0 3 3 +KANE 1 0 1 1 +JUSTLY 2 0 2 2 +JUSTIFICATION 2 0 2 2 +JUSTICE 3 0 3 3 +JURISDICTION 1 0 1 1 +JUMPING 1 0 1 1 +JUMPED 1 0 1 1 +JUMP 3 0 3 3 +JUDGMENT 6 0 6 6 +JUDGES 1 0 1 1 +JUDGE 5 0 5 5 +JUDAH 1 0 1 1 +JOYOUS 1 0 1 1 +JOYCE 2 0 2 2 +JOY 4 0 4 4 +JOURNEYING 1 0 1 1 +JOURNEY 5 0 5 5 +JONES 3 0 3 3 +JOLLY 5 0 5 5 +JOKED 1 0 1 1 +JOKE 2 0 2 2 +JOINED 1 0 1 1 +JOIN 2 0 2 2 +JOHNSON 1 0 1 1 +JOHN 16 0 16 16 +JIB 1 0 1 1 +JEWISH 1 0 1 1 +JEWELS 3 0 3 3 +JET 1 0 1 1 +JESUS 7 0 7 7 +JERSEY 1 0 1 1 +JERK 1 0 1 1 +JENKS 1 0 1 1 +JELLIES 1 0 1 1 +JEHOVAH 1 0 1 1 +JEERED 1 0 1 1 +JEALOUS 1 0 1 1 +JAWS 2 0 2 2 +JASPER'S 2 0 2 2 +JAP 1 0 1 1 +JANUARY 2 0 2 2 +JAMES 2 0 2 2 +JAILER 5 0 5 5 +JACOB'S 2 0 2 2 +JACOB 1 0 1 1 +JACKSON 1 0 1 1 +JACKET 1 0 1 1 +J 2 0 2 2 +IVORY 1 0 1 1 +ITSELF 21 0 21 21 +ITCH 1 0 1 1 +ITALIAN 2 0 2 2 +ISSUED 2 0 2 2 +ISSUE 1 0 1 1 +ISRAEL 1 0 1 1 +ISOLATED 1 0 1 1 +ISN'T 5 0 5 5 +ISLAND 5 0 5 5 +IRWINE 1 0 1 1 +IRRITABLE 1 0 1 1 +IRRESOLUTION 1 0 1 1 +IRREPARABLE 1 0 1 1 +IRREGULARITY 2 0 2 2 +IRONING 1 0 1 1 +IRISH 2 0 2 2 +IRIDESCENT 1 0 1 1 +IRENE 1 0 1 1 +IRELAND 1 0 1 1 +INWARDLY 1 0 1 1 +INWARD 1 0 1 1 +INVOLVING 1 0 1 1 +INVOLVES 1 0 1 1 +INVOLVED 1 0 1 1 +INVOLVE 1 0 1 1 +INVITED 4 0 4 4 +INVITATION 3 0 3 3 +INVISIBLE 1 0 1 1 +INVIOLATE 1 0 1 1 +INVIDIOUS 1 0 1 1 +INVESTIGATION 1 0 1 1 +INVENTOR 1 0 1 1 +INVENTION 1 0 1 1 +INVENTED 1 0 1 1 +INVASION 1 0 1 1 +INVARIABLY 4 0 4 4 +INVARIABLE 1 0 1 1 +INVALID 1 0 1 1 +INVADER 1 0 1 1 +INVADE 1 0 1 1 +INTRODUCTION 4 0 4 4 +INTRODUCING 1 0 1 1 +INTRODUCED 3 0 3 3 +INTRODUCE 3 0 3 3 +INTRINSIC 1 0 1 1 +INTRICATE 1 0 1 1 +INTOLERANT 1 0 1 1 +INTOLERANCY 1 0 1 1 +INTOLERABLE 1 0 1 1 +INTIMATELY 2 0 2 2 +INTIMATE 2 0 2 2 +INTERVIEWS 1 0 1 1 +INTERVIEW 3 0 3 3 +INTERSECTED 1 0 1 1 +INTERRUPTED 2 0 2 2 +INTERPRETED 1 0 1 1 +INTERPRETATION 1 0 1 1 +INTERPOSED 1 0 1 1 +INTERNAL 1 0 1 1 +INTERMINGLED 1 0 1 1 +INTERMEDIATE 1 0 1 1 +INTERLACED 1 0 1 1 +INTERFERE 2 0 2 2 +INTERESTING 3 0 3 3 +INTERESTED 4 0 4 4 +INTEREST 10 0 10 10 +INTENTLY 2 0 2 2 +INTENTIONS 1 0 1 1 +INTENTION 1 0 1 1 +INTENT 1 0 1 1 +INTENSITY 3 0 3 3 +INTENSIFICATION 1 0 1 1 +INTENSELY 1 0 1 1 +INTENDED 1 0 1 1 +INTEND 1 0 1 1 +INTELLIGENT 5 0 5 5 +INTELLIGENCE 7 0 7 7 +INTELLECTS 1 0 1 1 +INTELLECT 1 0 1 1 +INTEGRITY 1 0 1 1 +INTANGIBLE 1 0 1 1 +INSURRECTIONISTS 1 0 1 1 +INSULT 1 0 1 1 +INSTRUMENT 1 0 1 1 +INSTRUCTIONS 4 0 4 4 +INSTITUTION 1 0 1 1 +INSTITUTED 1 0 1 1 +INSTITUTE 1 0 1 1 +INSTINCT 1 0 1 1 +INSTEAD 11 0 11 11 +INSTANTLY 6 0 6 6 +INSTANTANEOUS 1 0 1 1 +INSTANT 3 0 3 3 +INSTANCING 1 0 1 1 +INSTANCE 3 0 3 3 +INSTALLED 5 0 5 5 +INSTALL 1 0 1 1 +INSPIRED 1 0 1 1 +INSPIRATION 1 0 1 1 +INSOLENTLY 1 0 1 1 +INSISTS 1 0 1 1 +INSISTENCE 2 0 2 2 +INSISTED 1 0 1 1 +INSIST 1 0 1 1 +INSIPID 1 0 1 1 +INSINUATED 1 0 1 1 +INSIGNIFICANT 2 0 2 2 +INSIGHT 1 0 1 1 +INSIDE 2 0 2 2 +INSERTING 1 0 1 1 +INSENSIBLE 1 0 1 1 +INSECT 1 0 1 1 +INSATIABLE 2 0 2 2 +INNUMERABLE 2 0 2 2 +INNOCENTLY 1 0 1 1 +INNOCENT 2 0 2 2 +INNINGS 1 0 1 1 +INNER 2 0 2 2 +INMATES 1 0 1 1 +INJUSTICE 4 0 4 4 +INJURY 2 0 2 2 +INIQUITY 1 0 1 1 +INHUMAN 1 0 1 1 +INHERITANCE 2 0 2 2 +INHABITANTS 3 0 3 3 +INGREDIENTS 1 0 1 1 +INGENUITY 2 0 2 2 +INGENIOUS 2 0 2 2 +INFUSE 1 0 1 1 +INFORMING 1 0 1 1 +INFORMED 3 0 3 3 +INFORMATION 3 0 3 3 +INFLUENTIAL 2 0 2 2 +INFLUENCES 2 0 2 2 +INFLUENCE 8 0 8 8 +INFLICT 2 0 2 2 +INFLEXIBLE 1 0 1 1 +INFIRMITY 1 0 1 1 +INFIRMITIES 1 0 1 1 +INFIRMARY 1 0 1 1 +INFINITE 4 0 4 4 +INFERIOR 2 0 2 2 +INFECTED 1 0 1 1 +INFANTRY 2 0 2 2 +INFANTILE 2 0 2 2 +INFANT 1 0 1 1 +INFANCY 1 0 1 1 +INFALLIBLE 1 0 1 1 +INEXPRESSIBLY 1 0 1 1 +INEXPLICABLE 2 0 2 2 +INEXPERIENCE 1 0 1 1 +INEXHAUSTIBLE 1 0 1 1 +INESTIMABLE 1 0 1 1 +INEFFECTUALLY 1 0 1 1 +INDUSTRY 1 0 1 1 +INDUSTRIOUS 1 0 1 1 +INDULGENCE 1 0 1 1 +INDULGED 1 0 1 1 +INDUCED 1 0 1 1 +INDUCE 1 0 1 1 +INDUBITABLE 1 0 1 1 +INDIVIDUALS 1 0 1 1 +INDIVIDUAL 5 0 5 5 +INDISTINGUISHABLE 1 0 1 1 +INDISTINCT 1 0 1 1 +INDISPENSABLE 1 0 1 1 +INDISCREET 1 0 1 1 +INDIRECT 1 0 1 1 +INDIGENCE 1 0 1 1 +INDIFFERENT 3 0 3 3 +INDIFFERENCE 4 0 4 4 +INDIES 1 0 1 1 +INDICATOR 1 0 1 1 +INDICATING 1 0 1 1 +INDICATES 2 0 2 2 +INDICATED 3 0 3 3 +INDICATE 2 0 2 2 +INDIANS 4 0 4 4 +INDIAN 4 0 4 4 +INDIA 1 0 1 1 +INDEPENDENTS 1 0 1 1 +INDEPENDENT 4 0 4 4 +INDEPENDENCE 1 0 1 1 +INCURRING 1 0 1 1 +INCURRED 1 0 1 1 +INCUR 1 0 1 1 +INCREASED 2 0 2 2 +INCREASE 2 0 2 2 +INCONVENIENT 1 0 1 1 +INCONCEIVABLE 1 0 1 1 +INCOMPREHENSIBLE 1 0 1 1 +INCOMPATIBLE 1 0 1 1 +INCOHERENT 1 0 1 1 +INCLUDING 1 0 1 1 +INCLUDED 2 0 2 2 +INCLINES 1 0 1 1 +INCLINED 2 0 2 2 +INCLINATIONS 2 0 2 2 +INCITED 1 0 1 1 +INCIDENTS 1 0 1 1 +INCIDENTAL 1 0 1 1 +INCIDENT 6 0 6 6 +INCHES 1 0 1 1 +INCH 2 0 2 2 +INCESSANTLY 1 0 1 1 +INCEPTION 1 0 1 1 +INCAPABLE 2 0 2 2 +INASMUCH 1 0 1 1 +INADEQUATE 2 0 2 2 +INADEQUACY 1 0 1 1 +INACCURACY 1 0 1 1 +INACCESSIBLE 1 0 1 1 +IMPULSIVELY 1 0 1 1 +IMPULSE 3 0 3 3 +IMPROVING 1 0 1 1 +IMPROVED 3 0 3 3 +IMPRISONMENT 1 0 1 1 +IMPRISONED 3 0 3 3 +IMPRESSIONS 6 0 6 6 +IMPRESSION 2 0 2 2 +IMPOSSIBLE 11 0 11 11 +IMPOSSIBILITY 1 0 1 1 +IMPOSE 1 0 1 1 +IMPORTANT 7 0 7 7 +IMPORTANCE 5 0 5 5 +IMPORT 1 0 1 1 +IMPLY 1 0 1 1 +IMPLORE 1 0 1 1 +IMPLICIT 1 0 1 1 +IMPLICATION 1 0 1 1 +IMPIETY 1 0 1 1 +IMPETUS 1 0 1 1 +IMPETUOUS 3 0 3 3 +IMPERIOUSLY 1 0 1 1 +IMPERIALIST 1 0 1 1 +IMPERIALISM 1 0 1 1 +IMPERIAL 1 0 1 1 +IMPERFECTLY 1 0 1 1 +IMPERATIVE 1 0 1 1 +IMPENETRABLE 2 0 2 2 +IMPELLED 2 0 2 2 +IMPEDIMENT 1 0 1 1 +IMPATIENT 1 0 1 1 +IMPATIENCE 3 0 3 3 +IMPASSIVELY 1 0 1 1 +IMMUNITY 1 0 1 1 +IMMORTALITY 1 0 1 1 +IMMORTAL 1 0 1 1 +IMMENSELY 1 0 1 1 +IMMENSE 1 0 1 1 +IMMEDIATELY 4 0 4 4 +IMMEDIATE 2 0 2 2 +IMMEDIACY 1 0 1 1 +IMBIBING 1 0 1 1 +IMAGINING 1 0 1 1 +IMAGINED 2 0 2 2 +IMAGINE 2 0 2 2 +IMAGINATIVE 1 0 1 1 +IMAGINATION 3 0 3 3 +IMAGINARY 1 0 1 1 +IMAGINABLE 2 0 2 2 +IMAGES 8 0 8 8 +IMAGE 9 0 9 9 +ILLUSTRIOUS 2 0 2 2 +ILLUSTRATION 1 0 1 1 +ILLUSION 2 0 2 2 +ILLUMINATION 1 0 1 1 +ILLUMINATING 1 0 1 1 +ILLUMINATED 1 0 1 1 +ILLS 1 0 1 1 +ILLNESS 1 0 1 1 +ILL 6 0 6 6 +IGNORANCE 2 0 2 2 +IGNOMINY 1 0 1 1 +IGNOBLE 1 0 1 1 +IDOLATRY 1 0 1 1 +IDLY 1 0 1 1 +IDLENESS 1 0 1 1 +IDLE 6 0 6 6 +IDENTITY 1 0 1 1 +IDEAS 11 0 11 11 +IDEAL 3 0 3 3 +IDEA 7 0 7 7 +ICE 1 0 1 1 +HYPOTHESIS 1 0 1 1 +HYPOCRITE 1 0 1 1 +HYPOCRISY 1 0 1 1 +HYMN 1 0 1 1 +HYDRAS 1 0 1 1 +HUT 4 0 4 4 +HUSSY 1 0 1 1 +HUSHED 1 0 1 1 +HUSBAND'S 1 0 1 1 +HUSBAND 9 0 9 9 +HURT 1 0 1 1 +HURRYING 2 0 2 2 +HURRY 4 0 4 4 +HURRIEDLY 3 0 3 3 +HURONS 1 0 1 1 +HURLED 2 0 2 2 +HUNTING 2 0 2 2 +HUNTER 1 0 1 1 +HUNTED 1 0 1 1 +HUNGRY 1 0 1 1 +HUNGER 2 0 2 2 +HUNG 10 0 10 10 +HUNDREDTH 1 0 1 1 +HUNDREDS 2 0 2 2 +HUNDRED 18 0 18 18 +HUMPY 2 0 2 2 +HUMOROUS 3 0 3 3 +HUMMING 1 0 1 1 +HUMILITY 1 0 1 1 +HUMILIATE 1 0 1 1 +HUMBUG 1 0 1 1 +HUMBLY 2 0 2 2 +HUMBLE 5 0 5 5 +HUMANITY 2 0 2 2 +HUMANE 1 0 1 1 +HUGGED 1 0 1 1 +HUGE 7 0 7 7 +HUES 1 0 1 1 +HUE 1 0 1 1 +HUDSON 1 0 1 1 +HUDDLED 1 0 1 1 +HOWL 1 0 1 1 +HOWEVER 29 0 29 29 +HOVER 1 0 1 1 +HOUSEWORK 1 0 1 1 +HOUSEMAID 2 0 2 2 +HOUSEKEEPER 2 0 2 2 +HOUSEHOLDS 1 0 1 1 +HOUSEHOLD'S 1 0 1 1 +HOUSEHOLD 4 0 4 4 +HOURS 13 0 13 13 +HOUR 12 0 12 12 +HOUNDED 1 0 1 1 +HOTELS 1 0 1 1 +HOTEL 7 0 7 7 +HOSTILITY 1 0 1 1 +HOSTESS 3 0 3 3 +HOST 3 0 3 3 +HOSPITALITY 4 0 4 4 +HOSPITABLY 1 0 1 1 +HOSPITABLE 1 0 1 1 +HOSE 2 0 2 2 +HORSES 6 0 6 6 +HORSEMEN 1 0 1 1 +HORROR 2 0 2 2 +HORRID 1 0 1 1 +HORRIBLY 2 0 2 2 +HORRIBLE 3 0 3 3 +HORNS 2 0 2 2 +HORNFUL 1 0 1 1 +HORIZON 3 0 3 3 +HORATIO 2 0 2 2 +HOPKINSON 2 0 2 2 +HOPING 1 0 1 1 +HOPELESS 1 0 1 1 +HOPED 2 0 2 2 +HOOKING 1 0 1 1 +HOOKED 1 0 1 1 +HONOURABLY 1 0 1 1 +HONORS 1 0 1 1 +HONEY 1 0 1 1 +HONESTY 1 0 1 1 +HONESTLY 2 0 2 2 +HONEST 5 0 5 5 +HOMILY 1 0 1 1 +HOMELY 3 0 3 3 +HOLLYHOCKS 1 0 1 1 +HOLINESS 2 0 2 2 +HOLIDAYS 3 0 3 3 +HOLES 1 0 1 1 +HOLDS 2 0 2 2 +HOLDING 1 0 1 1 +HOLBORN 1 0 1 1 +HOBSON'S 1 0 1 1 +HOBS 1 0 1 1 +HITHERTO 1 0 1 1 +HISTORY 5 0 5 5 +HISTORIC 1 0 1 1 +HISTORIANS 1 0 1 1 +HISTORIAN 1 0 1 1 +HISSING 1 0 1 1 +HISS 1 0 1 1 +HIRE 1 0 1 1 +HINT 2 0 2 2 +HINDERED 1 0 1 1 +HIND 1 0 1 1 +HIMSELF 49 0 49 49 +HILLY 1 0 1 1 +HILL 4 0 4 4 +HIGHNESS 1 0 1 1 +HIGHLY 2 0 2 2 +HIERARCHY 1 0 1 1 +HIDING 1 0 1 1 +HIDEOUS 1 0 1 1 +HIDE 3 0 3 3 +HIDDEN 3 0 3 3 +HICKEY 1 0 1 1 +HEWN 1 0 1 1 +HESTER 11 0 11 11 +HESITATION 1 0 1 1 +HESITATING 2 0 2 2 +HERSELF 20 0 20 20 +HERS 2 0 2 2 +HERON 1 0 1 1 +HEROINE 1 0 1 1 +HEROIC 2 0 2 2 +HERO 3 0 3 3 +HERMOCRATES 1 0 1 1 +HERETICS 2 0 2 2 +HEREDITY 1 0 1 1 +HEREAFTER 3 0 3 3 +HERE'S 1 0 1 1 +HERALDED 1 0 1 1 +HENRY'S 1 0 1 1 +HENRY 2 0 2 2 +HENLEY 1 0 1 1 +HENCE 4 0 4 4 +HEMMED 1 0 1 1 +HELPLESS 3 0 3 3 +HELPING 1 0 1 1 +HELMSMAN 1 0 1 1 +HELLENES 1 0 1 1 +HELL 2 0 2 2 +HELD 15 0 15 15 +HEIGHTS 1 0 1 1 +HEIGHTENING 1 0 1 1 +HEIGHT 1 0 1 1 +HEELS 1 0 1 1 +HEDGES 1 0 1 1 +HEDGE 1 0 1 1 +HEAVY 13 0 13 13 +HEAVING 2 0 2 2 +HEAVILY 1 0 1 1 +HEAVENS 1 0 1 1 +HEAVENLY 1 0 1 1 +HEAVED 1 0 1 1 +HEAT 2 0 2 2 +HEARTY 1 0 1 1 +HEARTILY 2 0 2 2 +HEARTHSTONES 1 0 1 1 +HEARTH 3 0 3 3 +HEARTED 1 0 1 1 +HEART 28 0 28 28 +HEARSE 2 0 2 2 +HEARS 2 0 2 2 +HEARING 1 0 1 1 +HEARD 19 0 19 19 +HEAP 2 0 2 2 +HEALTH 6 0 6 6 +HEADS 3 0 3 3 +HEADQUARTERS 1 0 1 1 +HEADLONGS 1 0 1 1 +HEADLONG 1 0 1 1 +HEADING 1 0 1 1 +HE'LL 1 0 1 1 +HAY 1 0 1 1 +HAWORTH 1 0 1 1 +HAWKS 1 0 1 1 +HAWKEYE 5 0 5 5 +HAWK'S 1 0 1 1 +HAWK 7 0 7 7 +HAVEN'T 6 0 6 6 +HAUNTED 1 0 1 1 +HATS 1 0 1 1 +HATRED 3 0 3 3 +HATH 4 0 4 4 +HATEFUL 1 0 1 1 +HATED 1 0 1 1 +HATE 1 0 1 1 +HAT 1 0 1 1 +HASTY 2 0 2 2 +HASTILY 2 0 2 2 +HASTENED 4 0 4 4 +HASTE 1 0 1 1 +HARVEST 1 0 1 1 +HARRYING 1 0 1 1 +HARROW 1 0 1 1 +HARPOONER 1 0 1 1 +HARNESS 1 0 1 1 +HARMONY 2 0 2 2 +HARMON 4 0 4 4 +HARM 2 0 2 2 +HARE 1 0 1 1 +HARDSHIPS 1 0 1 1 +HARDLY 14 0 14 14 +HARDER 1 0 1 1 +HARD 12 0 12 12 +HARASSING 1 0 1 1 +HARANGUING 1 0 1 1 +HAR 1 0 1 1 +HAPPY 16 0 16 16 +HAPPINESS 6 0 6 6 +HAPPILY 1 0 1 1 +HAPPENS 3 0 3 3 +HAPPENING 2 0 2 2 +HAPLESS 1 0 1 1 +HANSOM 1 0 1 1 +HANS 2 0 2 2 +HANGS 1 0 1 1 +HANGERS 1 0 1 1 +HANG 1 0 1 1 +HANDSOMEST 1 0 1 1 +HANDSOMELY 1 0 1 1 +HANDLE 1 0 1 1 +HANDKERCHIEFS 1 0 1 1 +HANDFUL 1 0 1 1 +HANDED 3 0 3 3 +HAND 29 0 29 29 +HAMMER 3 0 3 3 +HAMLET'S 1 0 1 1 +HAMLET 6 0 6 6 +HAMBURG 1 0 1 1 +HALT 1 0 1 1 +HALLWAY 1 0 1 1 +HALLS 3 0 3 3 +HAIRS 1 0 1 1 +HAILING 1 0 1 1 +HADN'T 3 0 3 3 +HABITUALLY 1 0 1 1 +HABITUAL 2 0 2 2 +HABITS 4 0 4 4 +HABITATION 1 0 1 1 +HABIT 7 0 7 7 +GUTTER 1 0 1 1 +GUSTS 2 0 2 2 +GUST 1 0 1 1 +GUNS 1 0 1 1 +GUN 1 0 1 1 +GULLET 1 0 1 1 +GULF 2 0 2 2 +GUILTY 2 0 2 2 +GUILT 2 0 2 2 +GUIDED 2 0 2 2 +GUIDE 2 0 2 2 +GUERRILLA 1 0 1 1 +GUARDS 3 0 3 3 +GUARD 1 0 1 1 +GRUDGE 1 0 1 1 +GROWTH 5 0 5 5 +GROWN 7 0 7 7 +GROWLED 2 0 2 2 +GROWING 4 0 4 4 +GROW 4 0 4 4 +GROUPS 2 0 2 2 +GROUP 2 0 2 2 +GROUNDS 2 0 2 2 +GROSS 1 0 1 1 +GROPING 1 0 1 1 +GROPE 1 0 1 1 +GROANS 2 0 2 2 +GROANING 1 0 1 1 +GROANED 2 0 2 2 +GROAN 1 0 1 1 +GRINNING 1 0 1 1 +GRINDER 1 0 1 1 +GRIN 1 0 1 1 +GRIM 3 0 3 3 +GRIFFIN 1 0 1 1 +GRIEVED 1 0 1 1 +GRIEF 2 0 2 2 +GREW 5 0 5 5 +GREETINGS 1 0 1 1 +GREETING 4 0 4 4 +GREETED 1 0 1 1 +GREET 1 0 1 1 +GREEK 4 0 4 4 +GREATNESS 2 0 2 2 +GREATLY 10 0 10 10 +GREATEST 7 0 7 7 +GREATER 9 0 9 9 +GRAVEYARD 3 0 3 3 +GRAVES 1 0 1 1 +GRATITUDE 2 0 2 2 +GRATIFICATION 3 0 3 3 +GRATEFUL 2 0 2 2 +GRASS 11 0 11 11 +GRASPING 3 0 3 3 +GRANTED 3 0 3 3 +GRANDSON 1 0 1 1 +GRANDMOTHER 2 0 2 2 +GRANDFATHER 4 0 4 4 +GRANDER 2 0 2 2 +GRAMOPHONES 1 0 1 1 +GRAMOPHONE 1 0 1 1 +GRAINS 1 0 1 1 +GRAF 1 0 1 1 +GRADATED 1 0 1 1 +GRACIOUSLY 1 0 1 1 +GRACIOUS 2 0 2 2 +GRACES 2 0 2 2 +GRACEFULLY 1 0 1 1 +GRACEFUL 1 0 1 1 +GRACE 12 0 12 12 +GOWN 1 0 1 1 +GOVERNOR'S 1 0 1 1 +GOVERNESS 2 0 2 2 +GOTHIC 3 0 3 3 +GOT 13 0 13 13 +GOSPEL 2 0 2 2 +GORGEOUS 1 0 1 1 +GORDONS 1 0 1 1 +GORDON 1 0 1 1 +GOODS 5 0 5 5 +GOODNESS 1 0 1 1 +GOODLY 1 0 1 1 +GONE 14 0 14 14 +GOLIATH 2 0 2 2 +GOLF 1 0 1 1 +GOLDEN 15 0 15 15 +GOLD 15 0 15 15 +GOES 2 0 2 2 +GODLY 1 0 1 1 +GODLESS 1 0 1 1 +GODDESS 1 0 1 1 +GOD'S 1 0 1 1 +GOD 33 0 33 33 +GOBY 1 0 1 1 +GNARLED 1 0 1 1 +GLUE 1 0 1 1 +GLOWING 3 0 3 3 +GLOW 3 0 3 3 +GLOVES 5 0 5 5 +GLOVED 1 0 1 1 +GLOSSY 2 0 2 2 +GLORY 1 0 1 1 +GLORIOUS 2 0 2 2 +GLORIES 1 0 1 1 +GLOOMY 1 0 1 1 +GLOOMILY 2 0 2 2 +GLOBE 1 0 1 1 +GLITTERING 4 0 4 4 +GLITTERED 2 0 2 2 +GLINDA 1 0 1 1 +GLIMMERING 1 0 1 1 +GLIDING 1 0 1 1 +GLIDES 1 0 1 1 +GLIDED 2 0 2 2 +GLEANER 1 0 1 1 +GLEAMS 1 0 1 1 +GLEAMING 4 0 4 4 +GLEAMED 1 0 1 1 +GLEAM 1 0 1 1 +GLASS 6 0 6 6 +GLARE 2 0 2 2 +GLANCED 2 0 2 2 +GLANCE 5 0 5 5 +GLAMOUR 1 0 1 1 +GLADNESS 1 0 1 1 +GLADLY 1 0 1 1 +GLAD 12 0 12 12 +GIVING 7 0 7 7 +GIVES 3 0 3 3 +GIVEN 15 0 15 15 +GIRLS 7 0 7 7 +GIRL'S 2 0 2 2 +GIRL 25 0 25 25 +GILDED 3 0 3 3 +GILD 1 0 1 1 +GIGANTIC 1 0 1 1 +GIFTS 2 0 2 2 +GIFT 6 0 6 6 +GHOSTS 2 0 2 2 +GHOSTLY 1 0 1 1 +GHOST 2 0 2 2 +GHISIZZLE 4 0 4 4 +GHASTLY 1 0 1 1 +GETTING 6 0 6 6 +GET 30 0 30 30 +GESTURES 1 0 1 1 +GESTURE 2 0 2 2 +GERMANTOWN 1 0 1 1 +GERM 1 0 1 1 +GEORGE 3 0 3 3 +GEOMETRICAL 1 0 1 1 +GENUINE 1 0 1 1 +GENTLY 5 0 5 5 +GENTLEWOMAN 1 0 1 1 +GENTLENESS 1 0 1 1 +GENTLEMAN'S 1 0 1 1 +GENTLE 7 0 7 7 +GENTILITY 1 0 1 1 +GENIUS 2 0 2 2 +GENEROUSLY 1 0 1 1 +GENEROUS 4 0 4 4 +GENERATION 2 0 2 2 +GENERATED 1 0 1 1 +GENERALS 3 0 3 3 +GENERALLY 8 0 8 8 +GENEALOGIES 1 0 1 1 +GAZING 2 0 2 2 +GAZED 2 0 2 2 +GAZE 3 0 3 3 +GATHERINGS 1 0 1 1 +GATHERING 2 0 2 2 +GATHERED 2 0 2 2 +GATES 6 0 6 6 +GATE 3 0 3 3 +GASPED 1 0 1 1 +GASEOUS 1 0 1 1 +GAS 1 0 1 1 +GARDENS 3 0 3 3 +GARDENING 1 0 1 1 +GARDENER'S 1 0 1 1 +GARDENER 1 0 1 1 +GARDEN 6 0 6 6 +GARB 1 0 1 1 +GAMEWELL'S 1 0 1 1 +GAMEWELL 7 0 7 7 +GALVANOMETER 1 0 1 1 +GALLOWSNESS 1 0 1 1 +GALLEY 1 0 1 1 +GALLANT 4 0 4 4 +GAIN 2 0 2 2 +GABLES 1 0 1 1 +FUZZY 1 0 1 1 +FUTURE 5 0 5 5 +FURTHERED 1 0 1 1 +FURTHER 9 0 9 9 +FURNITURE 4 0 4 4 +FURNISHING 1 0 1 1 +FURNISHED 1 0 1 1 +FURNISH 1 0 1 1 +FURLED 1 0 1 1 +FURIOUSLY 2 0 2 2 +FUNERAL 1 0 1 1 +FUNDAMENTAL 1 0 1 1 +FUNCTIONS 1 0 1 1 +FUNCTION 1 0 1 1 +FUMBLED 1 0 1 1 +FULLY 3 0 3 3 +FULFILLED 2 0 2 2 +FUGITIVES 1 0 1 1 +FUGITIVE'S 1 0 1 1 +FRUSTRATED 1 0 1 1 +FRUITS 1 0 1 1 +FRUIT 1 0 1 1 +FROZEN 2 0 2 2 +FROWNINGLY 1 0 1 1 +FROWNING 2 0 2 2 +FROWNED 2 0 2 2 +FROWN 1 0 1 1 +FROTHY 1 0 1 1 +FROST 1 0 1 1 +FRONTIER 3 0 3 3 +FRONT 6 0 6 6 +FROLIC 2 0 2 2 +FRO 1 0 1 1 +FRIVOLOUS 2 0 2 2 +FRINGED 2 0 2 2 +FRIGHTFUL 1 0 1 1 +FRIGATE 2 0 2 2 +FRIENDSHIP 1 0 1 1 +FRIENDS 8 0 8 8 +FRIENDLY 5 0 5 5 +FRIEND'S 2 0 2 2 +FRIDAY 1 0 1 1 +FRICTION 1 0 1 1 +FRETTING 1 0 1 1 +FRESHENS 1 0 1 1 +FRESH 6 0 6 6 +FREQUENTLY 3 0 3 3 +FREQUENTER 1 0 1 1 +FREQUENT 3 0 3 3 +FRENZY 1 0 1 1 +FRENCH 11 0 11 11 +FREELY 2 0 2 2 +FREEDOM 4 0 4 4 +FREED 1 0 1 1 +FREE 18 0 18 18 +FRECKLES 1 0 1 1 +FRANKNESS 1 0 1 1 +FRANKLY 1 0 1 1 +FRANCIS 3 0 3 3 +FRANCE 6 0 6 6 +FRAMEWORK 1 0 1 1 +FRAMED 1 0 1 1 +FRAME 4 0 4 4 +FRAIL 1 0 1 1 +FRAGRANCE 1 0 1 1 +FRAGMENT 2 0 2 2 +FRACTURED 1 0 1 1 +FRACTURE 1 0 1 1 +FOX 1 0 1 1 +FOURTH 2 0 2 2 +FOURTEEN 1 0 1 1 +FOUNTAINS 1 0 1 1 +FOUNDING 1 0 1 1 +FOUNDATION 1 0 1 1 +FOUL 1 0 1 1 +FOUGHT 1 0 1 1 +FORWARD 5 0 5 5 +FORTY 2 0 2 2 +FORTUNES 2 0 2 2 +FORTUNE 8 0 8 8 +FORTUNATELY 1 0 1 1 +FORTUNATE 2 0 2 2 +FORTUITOUS 1 0 1 1 +FORTNIGHT 1 0 1 1 +FORTIFIED 2 0 2 2 +FORTHWITH 3 0 3 3 +FORTH 9 0 9 9 +FORT 2 0 2 2 +FORSAKE 1 0 1 1 +FORMS 6 0 6 6 +FORMING 1 0 1 1 +FORMIDABLE 1 0 1 1 +FORMER 7 0 7 7 +FORMED 7 0 7 7 +FORMALITY 1 0 1 1 +FORMALITIES 1 0 1 1 +FORMAL 1 0 1 1 +FORKED 1 0 1 1 +FORGOTTEN 4 0 4 4 +FORGOT 1 0 1 1 +FORGIVE 6 0 6 6 +FORGING 1 0 1 1 +FORGETTING 1 0 1 1 +FORGETFULNESS 1 0 1 1 +FORGET 6 0 6 6 +FORGED 3 0 3 3 +FORGE 1 0 1 1 +FORETOLD 2 0 2 2 +FOREST 6 0 6 6 +FORESAW 1 0 1 1 +FOREIGNER 1 0 1 1 +FOREIGN 1 0 1 1 +FOREHEAD 1 0 1 1 +FOREFINGER 1 0 1 1 +FORCIBLE 1 0 1 1 +FORCES 2 0 2 2 +FORCED 1 0 1 1 +FORBES 1 0 1 1 +FOOTNOTE 2 0 2 2 +FOOTMEN 1 0 1 1 +FOOTMAN 1 0 1 1 +FOOT 9 0 9 9 +FOOLS 1 0 1 1 +FOOLISHLY 2 0 2 2 +FOOLISH 3 0 3 3 +FOOD 1 0 1 1 +FONDNESS 1 0 1 1 +FOND 5 0 5 5 +FOLLOWS 3 0 3 3 +FOLLOWING 4 0 4 4 +FOLLOWER 1 0 1 1 +FOLLOW 7 0 7 7 +FOLLIES 1 0 1 1 +FOLIAGE 1 0 1 1 +FOLDED 1 0 1 1 +FOLD 2 0 2 2 +FOES 2 0 2 2 +FOCUS 1 0 1 1 +FOAM 4 0 4 4 +FLY 4 0 4 4 +FLUX 2 0 2 2 +FLUSHED 3 0 3 3 +FLUKES 1 0 1 1 +FLOYD'S 1 0 1 1 +FLOWERS 11 0 11 11 +FLOWED 1 0 1 1 +FLOW 2 0 2 2 +FLOURISH 1 0 1 1 +FLOORS 1 0 1 1 +FLOOR 10 0 10 10 +FLOOD 2 0 2 2 +FLOATED 1 0 1 1 +FLOAT 1 0 1 1 +FLINT 1 0 1 1 +FLING 1 0 1 1 +FLIES 1 0 1 1 +FLICKER 2 0 2 2 +FLEW 1 0 1 1 +FLESH 5 0 5 5 +FLEETING 1 0 1 1 +FLEECY 1 0 1 1 +FLEECES 1 0 1 1 +FLEECE 3 0 3 3 +FLED 3 0 3 3 +FLAX 1 0 1 1 +FLATTERY 1 0 1 1 +FLATTERS 2 0 2 2 +FLATTERING 1 0 1 1 +FLATTERED 1 0 1 1 +FLAT 1 0 1 1 +FLASHED 3 0 3 3 +FLASH 3 0 3 3 +FLAPS 1 0 1 1 +FLAP 2 0 2 2 +FLANKED 1 0 1 1 +FLAMES 1 0 1 1 +FLAMED 2 0 2 2 +FLAME 3 0 3 3 +FLAGSTONES 1 0 1 1 +FLAG 1 0 1 1 +FIXES 1 0 1 1 +FIXED 3 0 3 3 +FIX 2 0 2 2 +FITZOOTH'S 1 0 1 1 +FITZOOTH 7 0 7 7 +FITTED 2 0 2 2 +FITS 1 0 1 1 +FITLY 1 0 1 1 +FISTS 2 0 2 2 +FISHES 3 0 3 3 +FISHERMAN 1 0 1 1 +FISH 1 0 1 1 +FIRMNESS 1 0 1 1 +FIRMLY 2 0 2 2 +FIRM 1 0 1 1 +FIRESIDES 1 0 1 1 +FIRESIDE 1 0 1 1 +FIRES 1 0 1 1 +FIREPLACE 2 0 2 2 +FIRED 1 0 1 1 +FINISHED 4 0 4 4 +FINISH 1 0 1 1 +FINGERS 6 0 6 6 +FINGER 2 0 2 2 +FINEST 1 0 1 1 +FINER 1 0 1 1 +FINELY 1 0 1 1 +FINED 1 0 1 1 +FINDS 2 0 2 2 +FINDING 3 0 3 3 +FINANCIAL 1 0 1 1 +FINALLY 8 0 8 8 +FINALE 1 0 1 1 +FINAL 5 0 5 5 +FILE 1 0 1 1 +FIGURES 4 0 4 4 +FIGURED 1 0 1 1 +FIGURE 6 0 6 6 +FIGHTING 4 0 4 4 +FIGHTER 1 0 1 1 +FIFTY 6 0 6 6 +FIFTH 1 0 1 1 +FIFTEENTH 2 0 2 2 +FIFTEEN 1 0 1 1 +FIERCELY 1 0 1 1 +FIERCE 4 0 4 4 +FIELDS 4 0 4 4 +FIELD 6 0 6 6 +FEWER 1 0 1 1 +FEVER 2 0 2 2 +FEUDS 1 0 1 1 +FETCH 1 0 1 1 +FESTIVE 1 0 1 1 +FERVENT 1 0 1 1 +FERDINANDO 5 0 5 5 +FEMININE 1 0 1 1 +FEMALE 1 0 1 1 +FELLOWSHIP 1 0 1 1 +FELLOWS 3 0 3 3 +FELLOW'S 1 0 1 1 +FELLOW 9 0 9 9 +FELLER 1 0 1 1 +FELICITY 2 0 2 2 +FEET 11 0 11 11 +FEES 1 0 1 1 +FEELINGS 3 0 3 3 +FEEDING 1 0 1 1 +FEEDER 1 0 1 1 +FEED 2 0 2 2 +FEEBLE 2 0 2 2 +FEDERAL 3 0 3 3 +FED 1 0 1 1 +FEBRUARY 1 0 1 1 +FEATURES 9 0 9 9 +FEATURE 1 0 1 1 +FEATHERS 1 0 1 1 +FEASTED 1 0 1 1 +FEAST 3 0 3 3 +FEASIBLE 1 0 1 1 +FEARS 3 0 3 3 +FEARLESS 1 0 1 1 +FEARING 2 0 2 2 +FEARFUL 1 0 1 1 +FAVORABLY 1 0 1 1 +FAULTS 1 0 1 1 +FAULTLESS 1 0 1 1 +FAULT 2 0 2 2 +FATIGUE 2 0 2 2 +FATHOMS 6 0 6 6 +FATHOM 1 0 1 1 +FATHERS 2 0 2 2 +FATHER'S 4 0 4 4 +FATHER 28 0 28 28 +FATALITY 2 0 2 2 +FASTEST 1 0 1 1 +FASTEN 1 0 1 1 +FAST 7 0 7 7 +FASHIONED 1 0 1 1 +FASHIONABLE 1 0 1 1 +FASCINATION 2 0 2 2 +FARTHER 6 0 6 6 +FARRAGUT 1 0 1 1 +FARMS 1 0 1 1 +FARMERS 1 0 1 1 +FARMER'S 1 0 1 1 +FARMER 5 0 5 5 +FAREWELL 2 0 2 2 +FANTASY 1 0 1 1 +FANNING 1 0 1 1 +FANCY 3 0 3 3 +FANCIES 2 0 2 2 +FANCIED 2 0 2 2 +FANATIC 1 0 1 1 +FAN 2 0 2 2 +FAMOUSLY 2 0 2 2 +FAMILY 16 0 16 16 +FAMILIES 3 0 3 3 +FAMILIARITY 3 0 3 3 +FAMILIAR 4 0 4 4 +FALSE 6 0 6 6 +FALLS 5 0 5 5 +FALLING 2 0 2 2 +FALCONS 1 0 1 1 +FALCON 1 0 1 1 +FAITHFUL 1 0 1 1 +FAITH 9 0 9 9 +FAIRLY 4 0 4 4 +FAINTNESS 1 0 1 1 +FAINTLY 3 0 3 3 +FAINTING 4 0 4 4 +FAINT 3 0 3 3 +FAIN 1 0 1 1 +FAILURE 2 0 2 2 +FAILS 1 0 1 1 +FAILING 1 0 1 1 +FAILED 2 0 2 2 +FAIL 4 0 4 4 +FADES 1 0 1 1 +FADED 1 0 1 1 +FADE 4 0 4 4 +FACULTIES 1 0 1 1 +FACTS 4 0 4 4 +FACTORS 1 0 1 1 +FACTOR 1 0 1 1 +FACTIONS 2 0 2 2 +FACTION 1 0 1 1 +FACT 23 0 23 23 +FACILITATED 1 0 1 1 +FACILITATE 1 0 1 1 +FACES 4 0 4 4 +FACED 3 0 3 3 +FACE 29 0 29 29 +FABULOUS 1 0 1 1 +EYES 44 0 44 44 +EYELIDS 1 0 1 1 +EYED 1 0 1 1 +EXULTING 1 0 1 1 +EXULTATION 1 0 1 1 +EXTREMELY 2 0 2 2 +EXTREME 1 0 1 1 +EXTRAORDINARY 2 0 2 2 +EXTRACT 1 0 1 1 +EXTRA 1 0 1 1 +EXTINGUISHED 2 0 2 2 +EXTINCTION 1 0 1 1 +EXTINCT 1 0 1 1 +EXTERIOR 1 0 1 1 +EXTENT 6 0 6 6 +EXTENSIVE 1 0 1 1 +EXTENDED 3 0 3 3 +EXTEND 2 0 2 2 +EXQUISITE 3 0 3 3 +EXPULSION 1 0 1 1 +EXPRESSLY 1 0 1 1 +EXPRESSIVE 1 0 1 1 +EXPRESSIONS 1 0 1 1 +EXPRESSION 4 0 4 4 +EXPRESSING 2 0 2 2 +EXPRESSED 4 0 4 4 +EXPRESS 4 0 4 4 +EXPOSURE 1 0 1 1 +EXPONENT 2 0 2 2 +EXPLOSION 1 0 1 1 +EXPLORE 2 0 2 2 +EXPLOITS 1 0 1 1 +EXPLANATION 1 0 1 1 +EXPLAINED 1 0 1 1 +EXPLAIN 4 0 4 4 +EXPERIMENTALLY 1 0 1 1 +EXPERIENCING 1 0 1 1 +EXPERIENCED 1 0 1 1 +EXPERIENCE 7 0 7 7 +EXPENSIVE 1 0 1 1 +EXPENDITURE 4 0 4 4 +EXPEDITION 4 0 4 4 +EXPECTED 3 0 3 3 +EXPECTATIONS 2 0 2 2 +EXPECT 4 0 4 4 +EXPANDED 1 0 1 1 +EXOTICS 1 0 1 1 +EXISTING 3 0 3 3 +EXISTENT 1 0 1 1 +EXISTENCE 5 0 5 5 +EXISTED 4 0 4 4 +EXILE 1 0 1 1 +EXHORT 1 0 1 1 +EXHIBITS 1 0 1 1 +EXHIBITION 2 0 2 2 +EXHIBITED 1 0 1 1 +EXHIBIT 2 0 2 2 +EXHAUSTED 1 0 1 1 +EXHALE 1 0 1 1 +EXERTIONS 1 0 1 1 +EXERTED 1 0 1 1 +EXERCISING 1 0 1 1 +EXERCISED 1 0 1 1 +EXERCISE 1 0 1 1 +EXEMPLIFIES 1 0 1 1 +EXEMPLARY 1 0 1 1 +EXECUTIVE 1 0 1 1 +EXECUTED 1 0 1 1 +EXCLUDED 2 0 2 2 +EXCLAIMED 3 0 3 3 +EXCITING 2 0 2 2 +EXCITEMENT 3 0 3 3 +EXCITE 1 0 1 1 +EXCESSIVELY 1 0 1 1 +EXCESS 1 0 1 1 +EXCEPTIONS 1 0 1 1 +EXCEPTION 2 0 2 2 +EXCEPT 6 0 6 6 +EXCELLENT 5 0 5 5 +EXCELLENCY'S 1 0 1 1 +EXCELLENCY 2 0 2 2 +EXCELLENCE 1 0 1 1 +EXCEEDING 1 0 1 1 +EXCEEDED 1 0 1 1 +EXCEED 1 0 1 1 +EXAMPLE 2 0 2 2 +EXAMINED 4 0 4 4 +EXAMINE 4 0 4 4 +EXAMINATION 8 0 8 8 +EXALTED 1 0 1 1 +EXALT 1 0 1 1 +EXAGGERATED 1 0 1 1 +EXACTLY 8 0 8 8 +EXACT 5 0 5 5 +EX 2 0 2 2 +EVOLVING 1 0 1 1 +EVOLVED 1 0 1 1 +EVOKED 1 0 1 1 +EVOKE 1 0 1 1 +EVIL 6 0 6 6 +EVIDENTLY 4 0 4 4 +EVIDENT 5 0 5 5 +EVIDENCE 5 0 5 5 +EVERYWHERE 7 0 7 7 +EVERYTHING 16 0 16 16 +EVERYBODY 7 0 7 7 +EVERLASTING 2 0 2 2 +EVENTS 8 0 8 8 +EVENT 4 0 4 4 +EVENLY 2 0 2 2 +EVENING 15 0 15 15 +EVASIVELY 1 0 1 1 +EVAPORATION 1 0 1 1 +EVADED 1 0 1 1 +EVA'S 1 0 1 1 +EUROPE 3 0 3 3 +EUCHARIST 1 0 1 1 +ETHICAL 1 0 1 1 +ETERNITY 2 0 2 2 +ETERNAL 2 0 2 2 +ETCHINGS 1 0 1 1 +ET 3 0 3 3 +ESTRANGEMENT 1 0 1 1 +ESTIMATE 1 0 1 1 +ESTABLISHED 3 0 3 3 +ESSEX 1 0 1 1 +ESSENTIALLY 1 0 1 1 +ESSENTIAL 2 0 2 2 +ESSENCE 1 0 1 1 +ESQUIRE 2 0 2 2 +ESPECIALLY 6 0 6 6 +ESCORT 4 0 4 4 +ESCAPADES 1 0 1 1 +ERRORS 1 0 1 1 +ERRONEOUS 2 0 2 2 +ERRING 1 0 1 1 +ERRAND 2 0 2 2 +ERR 1 0 1 1 +ERIE 1 0 1 1 +ERECTED 1 0 1 1 +ERECT 1 0 1 1 +ERA 1 0 1 1 +EQUIP 1 0 1 1 +EQUATION 1 0 1 1 +EQUALS 1 0 1 1 +EQUALLY 4 0 4 4 +EQUAL 2 0 2 2 +EPOCH 1 0 1 1 +EPISTLE 3 0 3 3 +EPISODE 1 0 1 1 +ENVY 1 0 1 1 +ENVIRONMENT 1 0 1 1 +ENVELOPMENT 1 0 1 1 +ENTRUSTED 1 0 1 1 +ENTRENCHED 1 0 1 1 +ENTREATIES 1 0 1 1 +ENTRANCE 4 0 4 4 +ENTIRELY 6 0 6 6 +ENTIRE 2 0 2 2 +ENTHUSIASTS 1 0 1 1 +ENTHUSIASTIC 1 0 1 1 +ENTHUSIASM 1 0 1 1 +ENTERTAINS 1 0 1 1 +ENTERTAINMENT 3 0 3 3 +ENTERTAIN 2 0 2 2 +ENTERS 1 0 1 1 +ENTERPRISE 2 0 2 2 +ENTERING 2 0 2 2 +ENTERED 21 0 21 21 +ENTANGLED 1 0 1 1 +ENOUGH 20 0 20 20 +ENORMOUSLY 2 0 2 2 +ENORMOUS 1 0 1 1 +ENNIS 1 0 1 1 +ENLISTED 1 0 1 1 +ENLIST 1 0 1 1 +ENJOYMENT 1 0 1 1 +ENJOYED 1 0 1 1 +ENJOY 2 0 2 2 +ENIGMA 1 0 1 1 +ENGORGED 1 0 1 1 +ENGLISHMAN 3 0 3 3 +ENGLISH 12 0 12 12 +ENGLAND 10 0 10 10 +ENGINE 6 0 6 6 +ENGENDERS 1 0 1 1 +ENGAGEMENTS 1 0 1 1 +ENGAGED 5 0 5 5 +ENGAGE 1 0 1 1 +ENFORCED 1 0 1 1 +ENFORCE 3 0 3 3 +ENERGY 3 0 3 3 +ENEMY'S 1 0 1 1 +ENEMY 3 0 3 3 +ENEMIES 3 0 3 3 +ENDURES 1 0 1 1 +ENDURED 1 0 1 1 +ENDURE 1 0 1 1 +ENDS 1 0 1 1 +ENDOWED 1 0 1 1 +ENDLESS 1 0 1 1 +ENDEAVOURED 1 0 1 1 +ENDEAVOUR 1 0 1 1 +ENDEAVORING 1 0 1 1 +ENDEAVOR 1 0 1 1 +END 18 0 18 18 +ENCYCLOPAEDIA 1 0 1 1 +ENCOURAGED 1 0 1 1 +ENCOURAGE 2 0 2 2 +ENCLOSE 1 0 1 1 +ENACTED 1 0 1 1 +ENABLES 2 0 2 2 +ENABLED 3 0 3 3 +EMULATION 1 0 1 1 +EMPTY 7 0 7 7 +EMPLOYMENTS 2 0 2 2 +EMPLOYMENT 1 0 1 1 +EMPLOYING 1 0 1 1 +EMPLOYERS 1 0 1 1 +EMPLOYER 1 0 1 1 +EMPLOYED 2 0 2 2 +EMPIRE 2 0 2 2 +EMPEROR 1 0 1 1 +EMOTIONS 2 0 2 2 +EMOTIONLESS 1 0 1 1 +EMOTION 1 0 1 1 +EMINENCES 1 0 1 1 +EMIGRATION 1 0 1 1 +EMERGENCY 1 0 1 1 +EMERGENCE 2 0 2 2 +EMERALD 1 0 1 1 +EMBROIDERED 2 0 2 2 +EMBRACING 2 0 2 2 +EMBRACE 2 0 2 2 +EMBODIED 1 0 1 1 +EMBLEM 1 0 1 1 +EMBITTERING 1 0 1 1 +EMBITTERED 1 0 1 1 +EMBERS 1 0 1 1 +EMBARRASS 1 0 1 1 +ELSEWHERE 4 0 4 4 +ELSE 7 0 7 7 +ELOQUENT 1 0 1 1 +ELMHURST 2 0 2 2 +ELIZABETH 1 0 1 1 +ELIZA 3 0 3 3 +ELICITED 1 0 1 1 +ELF 1 0 1 1 +ELEVEN 4 0 4 4 +ELEMENTS 7 0 7 7 +ELEMENTARY 3 0 3 3 +ELEMENT 4 0 4 4 +ELEGANT 1 0 1 1 +ELEGANCE 2 0 2 2 +ELECTROPLATING 1 0 1 1 +ELECTROLYTIC 2 0 2 2 +ELECTRICITY 5 0 5 5 +ELECTRICAL 2 0 2 2 +ELECTRIC 4 0 4 4 +ELECTIONEERING 1 0 1 1 +ELECTION 3 0 3 3 +ELECTING 1 0 1 1 +ELECTED 1 0 1 1 +ELECT 2 0 2 2 +ELDERS 1 0 1 1 +ELDERLY 1 0 1 1 +ELDER 2 0 2 2 +ELAPSED 1 0 1 1 +EJACULATIONS 1 0 1 1 +EJACULATED 2 0 2 2 +EITHER'S 1 0 1 1 +EIGHTY 3 0 3 3 +EIGHTH 3 0 3 3 +EIGHTEENTH 2 0 2 2 +EIGHTEEN 14 0 14 14 +EIGHT 7 0 7 7 +EGYPTIAN 1 0 1 1 +EGYPT 1 0 1 1 +EFFORTS 4 0 4 4 +EFFORT 11 0 11 11 +EFFICIENCY 1 0 1 1 +EFFECTUALLY 1 0 1 1 +EFFECTUAL 2 0 2 2 +EFFECTS 3 0 3 3 +EFFECTIVENESS 1 0 1 1 +EFFECTIVELY 1 0 1 1 +EFFECTIVE 2 0 2 2 +EFFECTING 1 0 1 1 +EDWARD 3 0 3 3 +EDUCATION 5 0 5 5 +EDUCATED 2 0 2 2 +EDITORS 1 0 1 1 +EDITOR 1 0 1 1 +EDISONIA 1 0 1 1 +EDISON 16 0 16 16 +EDIFICE 1 0 1 1 +EDICTS 1 0 1 1 +EDICT 1 0 1 1 +EDGES 1 0 1 1 +EDGED 1 0 1 1 +EDGE 5 0 5 5 +EDDYING 1 0 1 1 +ECONOMY 2 0 2 2 +ECONOMIC 3 0 3 3 +ECHOING 1 0 1 1 +ECHOES 1 0 1 1 +ECCLESIASTICAL 1 0 1 1 +EATEN 1 0 1 1 +EAT 5 0 5 5 +EASY 14 0 14 14 +EASTWARDS 1 0 1 1 +EASTERLY'S 1 0 1 1 +EAST 4 0 4 4 +EASILY 10 0 10 10 +EASE 3 0 3 3 +EARTHLY 1 0 1 1 +EARTH'S 3 0 3 3 +EARTH 17 0 17 17 +EARNESTLY 1 0 1 1 +EARNED 1 0 1 1 +EARLY 13 0 13 13 +EARLIER 6 0 6 6 +EAGLES 1 0 1 1 +EAGLE 1 0 1 1 +EAGERNESS 2 0 2 2 +EAGERLY 1 0 1 1 +EAGER 4 0 4 4 +EACH 24 0 24 24 +DYNAMO 2 0 2 2 +DYING 2 0 2 2 +DYIN 1 0 1 1 +DYE 1 0 1 1 +DWELLINGS 3 0 3 3 +DWELL 1 0 1 1 +DUTY 7 0 7 7 +DUTIES 8 0 8 8 +DUSK 2 0 2 2 +DURATION 1 0 1 1 +DURABLE 1 0 1 1 +DUPLICATE 1 0 1 1 +DUNNO 1 0 1 1 +DUMBFOUNDED 1 0 1 1 +DUMB 1 0 1 1 +DUMAS 1 0 1 1 +DULY 1 0 1 1 +DULL 2 0 2 2 +DUG 1 0 1 1 +DUE 5 0 5 5 +DUDLEY 1 0 1 1 +DUDGEON 1 0 1 1 +DUCKS 1 0 1 1 +DUCKLINGS 1 0 1 1 +DUCHESS 3 0 3 3 +DUBIOUSLY 1 0 1 1 +DRY 4 0 4 4 +DRUMS 1 0 1 1 +DROWNED 2 0 2 2 +DROPS 3 0 3 3 +DROPPING 1 0 1 1 +DROPPED 5 0 5 5 +DROP 3 0 3 3 +DROOPED 1 0 1 1 +DRIVING 1 0 1 1 +DRIVES 1 0 1 1 +DRIVEN 1 0 1 1 +DRINKS 1 0 1 1 +DRINK 4 0 4 4 +DRIFTS 1 0 1 1 +DRIED 1 0 1 1 +DREW 10 0 10 10 +DRESSES 1 0 1 1 +DRESSED 6 0 6 6 +DRESS 1 0 1 1 +DREDGED 1 0 1 1 +DREARY 1 0 1 1 +DREAMY 1 0 1 1 +DREAMT 1 0 1 1 +DREAMS 2 0 2 2 +DREAMING 2 0 2 2 +DREAMED 1 0 1 1 +DREAM 5 0 5 5 +DREADING 1 0 1 1 +DREADFUL 3 0 3 3 +DRAWS 2 0 2 2 +DRAWN 7 0 7 7 +DRAWING 2 0 2 2 +DRAW 4 0 4 4 +DRAUGHT 1 0 1 1 +DRAPERIES 1 0 1 1 +DRANK 1 0 1 1 +DRAMATIST'S 1 0 1 1 +DRAMATIST 1 0 1 1 +DRAMATIC 2 0 2 2 +DRAMA 1 0 1 1 +DRAINED 1 0 1 1 +DRAIN 1 0 1 1 +DRAGOONS 1 0 1 1 +DRAGON'S 1 0 1 1 +DRAGGING 1 0 1 1 +DRAGGED 2 0 2 2 +DRAG 1 0 1 1 +DOZEN 4 0 4 4 +DOWNWARD 2 0 2 2 +DOVES 1 0 1 1 +DOVE 1 0 1 1 +DOUGLAS 4 0 4 4 +DOUGHY 1 0 1 1 +DOUGHNUTS 1 0 1 1 +DOUGH 1 0 1 1 +DOUBTS 2 0 2 2 +DOUBTLESS 2 0 2 2 +DOUBTINGLY 1 0 1 1 +DOUBTING 1 0 1 1 +DOUBTFULLY 1 0 1 1 +DOUBTFUL 2 0 2 2 +DOUBT 11 0 11 11 +DOTH 5 0 5 5 +DOST 3 0 3 3 +DOROTHY 1 0 1 1 +DORKING 1 0 1 1 +DORCAS 6 0 6 6 +DOORS 3 0 3 3 +DOOM 1 0 1 1 +DONNITHORNE 1 0 1 1 +DONKEY 1 0 1 1 +DONE 24 0 24 24 +DOMINIONS 1 0 1 1 +DOMINION 1 0 1 1 +DOMESTIC 2 0 2 2 +DOME 1 0 1 1 +DOLLS 2 0 2 2 +DOLLARS 7 0 7 7 +DOING 12 0 12 12 +DOGS 1 0 1 1 +DOGGED 1 0 1 1 +DOG 2 0 2 2 +DOESN'T 3 0 3 3 +DOCTRINES 3 0 3 3 +DOCTRINE 4 0 4 4 +DOCTORS 1 0 1 1 +DOCTOR 7 0 7 7 +DIZZILY 1 0 1 1 +DIVORCE 1 0 1 1 +DIVISIONS 1 0 1 1 +DIVISION 2 0 2 2 +DIVINITY 1 0 1 1 +DIVING 4 0 4 4 +DIVINE 3 0 3 3 +DIVIDING 1 0 1 1 +DIVIDEND 1 0 1 1 +DIVIDED 4 0 4 4 +DIVIDE 2 0 2 2 +DIVERTING 1 0 1 1 +DIVERT 1 0 1 1 +DIVERSITY 1 0 1 1 +DISUSE 1 0 1 1 +DISUNITED 1 0 1 1 +DISTURBING 1 0 1 1 +DISTURBANCE 1 0 1 1 +DISTURB 2 0 2 2 +DISTRUSTING 2 0 2 2 +DISTRUSTFUL 1 0 1 1 +DISTRUST 1 0 1 1 +DISTRESSED 1 0 1 1 +DISTRESS 1 0 1 1 +DISTORTION 1 0 1 1 +DISTORTED 1 0 1 1 +DISTINGUISH 3 0 3 3 +DISTINCTLY 3 0 3 3 +DISTINCTIVE 1 0 1 1 +DISTINCTION 5 0 5 5 +DISTINCT 2 0 2 2 +DISTANT 4 0 4 4 +DISTANCE 6 0 6 6 +DISSENTERS 1 0 1 1 +DISSENTED 1 0 1 1 +DISPUTE 3 0 3 3 +DISPOSITIONS 1 0 1 1 +DISPOSITION 3 0 3 3 +DISPOSES 1 0 1 1 +DISPOSED 3 0 3 3 +DISPOSE 1 0 1 1 +DISPOSAL 1 0 1 1 +DISPLEASURE 1 0 1 1 +DISPLAYING 1 0 1 1 +DISPLAYED 1 0 1 1 +DISPLAY 1 0 1 1 +DISPERSED 3 0 3 3 +DISPENSATION 1 0 1 1 +DISPATCH 1 0 1 1 +DISPASSIONATE 1 0 1 1 +DISPARAGE 1 0 1 1 +DISOWN 1 0 1 1 +DISMISS 2 0 2 2 +DISMAYED 1 0 1 1 +DISMAY 1 0 1 1 +DISLOYAL 1 0 1 1 +DISLIKE 1 0 1 1 +DISK 1 0 1 1 +DISINCORPORATED 1 0 1 1 +DISHES 2 0 2 2 +DISH 3 0 3 3 +DISGUSTED 1 0 1 1 +DISGUST 3 0 3 3 +DISGUISE 2 0 2 2 +DISGRACE 3 0 3 3 +DISENGAGED 1 0 1 1 +DISEASED 1 0 1 1 +DISDAINFUL 1 0 1 1 +DISCUSSION 1 0 1 1 +DISCUSSED 1 0 1 1 +DISCUSS 2 0 2 2 +DISCREETLY 1 0 1 1 +DISCREET 1 0 1 1 +DISCOVERY 2 0 2 2 +DISCOVERERS 1 0 1 1 +DISCOVERED 3 0 3 3 +DISCOVER 3 0 3 3 +DISCOURSE 2 0 2 2 +DISCOURAGED 1 0 1 1 +DISCOURAGE 2 0 2 2 +DISCOMFORT 1 0 1 1 +DISCLOSES 1 0 1 1 +DISCIPLINE 5 0 5 5 +DISCERN 1 0 1 1 +DISBURDENED 1 0 1 1 +DISASTER 1 0 1 1 +DISAPPOINTMENT 6 0 6 6 +DISAPPEARS 1 0 1 1 +DISAPPEARED 1 0 1 1 +DISAPPEAR 2 0 2 2 +DISADVANTAGE 1 0 1 1 +DISABILITIES 1 0 1 1 +DIRTY 1 0 1 1 +DIRECTLY 4 0 4 4 +DIRECTIONS 2 0 2 2 +DIRECTION 6 0 6 6 +DIRECTING 1 0 1 1 +DIRECTED 2 0 2 2 +DIRECT 8 0 8 8 +DIP 1 0 1 1 +DIOCLETIAN 1 0 1 1 +DINNER 8 0 8 8 +DINING 1 0 1 1 +DINE 1 0 1 1 +DINAH'S 1 0 1 1 +DINAH 2 0 2 2 +DIMLY 1 0 1 1 +DIMINUTION 2 0 2 2 +DIMINISH 1 0 1 1 +DIMENSIONS 1 0 1 1 +DIM 2 0 2 2 +DILIGENTLY 1 0 1 1 +DILEMMA 1 0 1 1 +DILATED 1 0 1 1 +DIGNITY 4 0 4 4 +DIGNITARIES 1 0 1 1 +DIGNIFIED 4 0 4 4 +DIGBY 1 0 1 1 +DIG 1 0 1 1 +DIFFUSED 1 0 1 1 +DIFFICULTIES 3 0 3 3 +DIFFICULT 11 0 11 11 +DIFFERS 2 0 2 2 +DIFFERENTLY 1 0 1 1 +DIFFERENTIATION 1 0 1 1 +DIFFERENT 15 0 15 15 +DIFFERENCES 2 0 2 2 +DIFFERENCE 5 0 5 5 +DIFFER 1 0 1 1 +DIES 1 0 1 1 +DIED 5 0 5 5 +DIE 4 0 4 4 +DIDN'T 12 0 12 12 +DICE 1 0 1 1 +DIAMONDS 1 0 1 1 +DIALOGUE 3 0 3 3 +DIALECT 1 0 1 1 +DIAGRAMS 1 0 1 1 +DEWS 2 0 2 2 +DEW 2 0 2 2 +DEVOUR 3 0 3 3 +DEVOTION 1 0 1 1 +DEVOTES 1 0 1 1 +DEVOTED 2 0 2 2 +DEVOTE 1 0 1 1 +DEVOLVE 1 0 1 1 +DEVOID 1 0 1 1 +DEVISING 1 0 1 1 +DEVIL 1 0 1 1 +DEVICES 2 0 2 2 +DEVELOPMENTS 1 0 1 1 +DEVELOPMENT 6 0 6 6 +DEVELOPED 2 0 2 2 +DEVELOP 1 0 1 1 +DETOUR 1 0 1 1 +DETESTS 1 0 1 1 +DETESTED 1 0 1 1 +DETESTABLE 1 0 1 1 +DETERMINING 1 0 1 1 +DETERMINATION 1 0 1 1 +DETECT 1 0 1 1 +DETAINED 2 0 2 2 +DETAILS 2 0 2 2 +DETAIL 1 0 1 1 +DETACHMENT 1 0 1 1 +DESTRUCTIVE 1 0 1 1 +DESTRUCTION 2 0 2 2 +DESTROYED 1 0 1 1 +DESTINY 1 0 1 1 +DESTINED 2 0 2 2 +DESPITE 1 0 1 1 +DESPISE 1 0 1 1 +DESPERATELY 1 0 1 1 +DESPERATE 3 0 3 3 +DESPAIRING 1 0 1 1 +DESPAIR 4 0 4 4 +DESOLATION 1 0 1 1 +DESOLATE 1 0 1 1 +DESK 2 0 2 2 +DESIROUS 1 0 1 1 +DESIRES 1 0 1 1 +DESIRED 3 0 3 3 +DESIRE 4 0 4 4 +DESIRABLE 1 0 1 1 +DESIGNERS 1 0 1 1 +DESIGNATED 1 0 1 1 +DESIGN 3 0 3 3 +DESERVES 1 0 1 1 +DESERVED 1 0 1 1 +DESERVE 1 0 1 1 +DESERTS 1 0 1 1 +DESERTED 1 0 1 1 +DESERT 1 0 1 1 +DESCRIPTIONS 1 0 1 1 +DESCRIPTION 2 0 2 2 +DESCRIBING 1 0 1 1 +DESCRIBED 2 0 2 2 +DESCRIBE 3 0 3 3 +DESCENDS 1 0 1 1 +DESCENDING 2 0 2 2 +DESCENDED 2 0 2 2 +DESCENDANTS 1 0 1 1 +DESCEND 4 0 4 4 +DERIVED 1 0 1 1 +DERIVATIVE 1 0 1 1 +DEPUTY 1 0 1 1 +DEPTHS 2 0 2 2 +DEPTH 4 0 4 4 +DEPRIVED 1 0 1 1 +DEPRESSION 1 0 1 1 +DEPRESSED 1 0 1 1 +DEPRECIATING 1 0 1 1 +DEPRECATION 3 0 3 3 +DEPRAVED 1 0 1 1 +DEPOSITION 1 0 1 1 +DEPORTMENT 1 0 1 1 +DEPENDS 1 0 1 1 +DEPENDENT 2 0 2 2 +DEPENDENCE 1 0 1 1 +DEPEND 2 0 2 2 +DEPARTURE 4 0 4 4 +DEPARTMENT 1 0 1 1 +DEPARTING 2 0 2 2 +DEPARTED 1 0 1 1 +DEPART 1 0 1 1 +DENY 5 0 5 5 +DENUNCIATION 1 0 1 1 +DENSELY 1 0 1 1 +DENSE 2 0 2 2 +DENOTE 1 0 1 1 +DENIES 1 0 1 1 +DENIED 2 0 2 2 +DEMURELY 1 0 1 1 +DEMURE 1 0 1 1 +DEMONSTRATION 1 0 1 1 +DEMOCRATIC 2 0 2 2 +DEMEANOUR 1 0 1 1 +DEMEANOR 1 0 1 1 +DEMANDS 1 0 1 1 +DEMANDED 2 0 2 2 +DELUSIVE 1 0 1 1 +DELIVERY 2 0 2 2 +DELIVERING 1 0 1 1 +DELIVERED 2 0 2 2 +DELIGHTFUL 4 0 4 4 +DELIGHTED 5 0 5 5 +DELIGHT 4 0 4 4 +DELICIOUSNESS 1 0 1 1 +DELICIOUS 1 0 1 1 +DELICATE 4 0 4 4 +DELICACY 1 0 1 1 +DELIBERATIONS 1 0 1 1 +DELIBERATION 1 0 1 1 +DELEGATED 1 0 1 1 +DELAYED 2 0 2 2 +DELAY 3 0 3 3 +DELAWARES 1 0 1 1 +DELAWARE 1 0 1 1 +DEIGNED 1 0 1 1 +DEIGN 1 0 1 1 +DEGREES 3 0 3 3 +DEGREE 6 0 6 6 +DEFYING 1 0 1 1 +DEFTLY 1 0 1 1 +DEFORMITIES 1 0 1 1 +DEFINITION 3 0 3 3 +DEFINITE 2 0 2 2 +DEFINES 1 0 1 1 +DEFIED 1 0 1 1 +DEFIANCE 2 0 2 2 +DEFERENCE 2 0 2 2 +DEFENDS 1 0 1 1 +DEFENDERS 1 0 1 1 +DEFENDED 1 0 1 1 +DEFENDANT 1 0 1 1 +DEFENCE 1 0 1 1 +DEFECT 1 0 1 1 +DEER 3 0 3 3 +DEEPLY 4 0 4 4 +DEEPENING 1 0 1 1 +DEEP 11 0 11 11 +DEEDS 1 0 1 1 +DECREES 1 0 1 1 +DECREED 1 0 1 1 +DECREE 1 0 1 1 +DECORATIVE 1 0 1 1 +DECORATED 3 0 3 3 +DECOMPOSE 1 0 1 1 +DECLINING 2 0 2 2 +DECLINED 2 0 2 2 +DECLARES 1 0 1 1 +DECLARED 2 0 2 2 +DECLARE 2 0 2 2 +DECLARATION 1 0 1 1 +DECISION 3 0 3 3 +DECIDEDLY 1 0 1 1 +DECIDED 3 0 3 3 +DECIDE 4 0 4 4 +DECEPTIVE 1 0 1 1 +DECEMBER 1 0 1 1 +DECEIVING 1 0 1 1 +DECEIVED 1 0 1 1 +DECEIT 1 0 1 1 +DECANTERS 1 0 1 1 +DECADES 1 0 1 1 +DEBATE 1 0 1 1 +DEATH 19 0 19 19 +DEARS 1 0 1 1 +DEARLY 2 0 2 2 +DEAREST 2 0 2 2 +DEAR 22 0 22 22 +DEALER 1 0 1 1 +DEAL 10 0 10 10 +DEAF 1 0 1 1 +DAZZLING 2 0 2 2 +DAZED 1 0 1 1 +DAYS 16 0 16 16 +DAYLIGHT 2 0 2 2 +DAWN 2 0 2 2 +DAVID 8 0 8 8 +DAUNTLESS 1 0 1 1 +DAUGHTERS 1 0 1 1 +DAUGHTER 9 0 9 9 +DATING 1 0 1 1 +DATE 1 0 1 1 +DATA 2 0 2 2 +DARTED 3 0 3 3 +DARLING 1 0 1 1 +DARKNESS 3 0 3 3 +DARKENED 1 0 1 1 +DARING 2 0 2 2 +DARED 3 0 3 3 +DARE 3 0 3 3 +DANGERS 1 0 1 1 +DANGEROUS 4 0 4 4 +DANGER 9 0 9 9 +DANES 1 0 1 1 +DANCING 2 0 2 2 +DANCES 1 0 1 1 +DANCERS 1 0 1 1 +DANCER 1 0 1 1 +DANCED 2 0 2 2 +DANCE 4 0 4 4 +DAMSEL 1 0 1 1 +DAMNED 1 0 1 1 +DAMNABLE 1 0 1 1 +DAMASK 1 0 1 1 +DAMAGING 1 0 1 1 +DAMAGE 2 0 2 2 +DAINTY 1 0 1 1 +DAILY 3 0 3 3 +DAFT 1 0 1 1 +CYRIL 2 0 2 2 +CYPRESS 1 0 1 1 +CYMBALS 1 0 1 1 +CUTTINGS 1 0 1 1 +CUSTOMS 1 0 1 1 +CUSTOMER'S 1 0 1 1 +CUSTOM 2 0 2 2 +CUSTODY 2 0 2 2 +CUSHIONED 1 0 1 1 +CURVE 2 0 2 2 +CURTAINS 1 0 1 1 +CURTAIN 3 0 3 3 +CURSE 1 0 1 1 +CURRENT 8 0 8 8 +CURLY 1 0 1 1 +CURIOUSLY 1 0 1 1 +CURIOUS 4 0 4 4 +CURIOSITY 2 0 2 2 +CURBSTONE 1 0 1 1 +CUPS 1 0 1 1 +CUPBOARD 2 0 2 2 +CUP 3 0 3 3 +CUNNING 3 0 3 3 +CUMBERLAND'S 1 0 1 1 +CULTURE 4 0 4 4 +CULTIVATING 1 0 1 1 +CULTIVATE 1 0 1 1 +CULPRIT 1 0 1 1 +CULMINATING 2 0 2 2 +CUFFS 1 0 1 1 +CUB 1 0 1 1 +CRYSTALLIZE 1 0 1 1 +CRYING 1 0 1 1 +CRY 5 0 5 5 +CRUSHED 1 0 1 1 +CRUSH 3 0 3 3 +CRUMBLED 1 0 1 1 +CRUMBLE 1 0 1 1 +CRUISING 1 0 1 1 +CRUELTY 2 0 2 2 +CRUEL 1 0 1 1 +CRUCIFIX 2 0 2 2 +CRUCIFIED 1 0 1 1 +CROWNS 1 0 1 1 +CROWNING 2 0 2 2 +CROWN 6 0 6 6 +CROWDING 1 0 1 1 +CROWDED 2 0 2 2 +CROWD 5 0 5 5 +CROSSLY 1 0 1 1 +CROSSING 1 0 1 1 +CROSSED 3 0 3 3 +CROPS 1 0 1 1 +CROP 1 0 1 1 +CROOKED 3 0 3 3 +CRITICISM 1 0 1 1 +CRITICALLY 1 0 1 1 +CRISIS 1 0 1 1 +CRIMSON 1 0 1 1 +CRIMINAL 3 0 3 3 +CRIME 3 0 3 3 +CRIES 3 0 3 3 +CRIED 23 0 23 23 +CREPT 1 0 1 1 +CREEPING 2 0 2 2 +CREEP 1 0 1 1 +CREEK 2 0 2 2 +CREED 1 0 1 1 +CREDIT 2 0 2 2 +CREATURES 2 0 2 2 +CREATURE 8 0 8 8 +CREATOR 1 0 1 1 +CREATIVE 1 0 1 1 +CREATIONS 1 0 1 1 +CREATION 2 0 2 2 +CREATING 1 0 1 1 +CREATES 1 0 1 1 +CREATED 2 0 2 2 +CREATE 3 0 3 3 +CREAM 1 0 1 1 +CREAKED 1 0 1 1 +CRAZY 2 0 2 2 +CRAWLING 1 0 1 1 +CRAWL 1 0 1 1 +CRANED 1 0 1 1 +CRANE 1 0 1 1 +CRAMPNESS 1 0 1 1 +CRAMPED 1 0 1 1 +CRACKING 1 0 1 1 +CRACKED 2 0 2 2 +COWARDLY 1 0 1 1 +COWARD 1 0 1 1 +COVERT 1 0 1 1 +COVERING 1 0 1 1 +COVERED 2 0 2 2 +COVER 2 0 2 2 +COVENANTERS 5 0 5 5 +COUSINS 3 0 3 3 +COUSIN'S 2 0 2 2 +COUSIN 7 0 7 7 +COURTIERS 2 0 2 2 +COURTESY 2 0 2 2 +COURAGEOUS 1 0 1 1 +COURAGE 6 0 6 6 +COUPLE 1 0 1 1 +COUNTY 7 0 7 7 +COUNTRIES 1 0 1 1 +COUNTING 1 0 1 1 +COUNTERPART 1 0 1 1 +COUNTERFEITED 1 0 1 1 +COUNTERACT 1 0 1 1 +COUNTENANCE 3 0 3 3 +COUNT 15 0 15 15 +COUNSEL 1 0 1 1 +COUCH 1 0 1 1 +COTTAGE 2 0 2 2 +COSTUME 2 0 2 2 +CORRUPTION 1 0 1 1 +CORRIDOR 1 0 1 1 +CORRESPONDING 1 0 1 1 +CORRESPOND 1 0 1 1 +CORRECTLY 1 0 1 1 +CORRECTED 1 0 1 1 +CORRECT 3 0 3 3 +CORNERS 4 0 4 4 +CORNER 13 0 13 13 +CORDIALLY 1 0 1 1 +CORDIALITY 1 0 1 1 +CORAL 1 0 1 1 +COQUETRY 1 0 1 1 +COPY 2 0 2 2 +COPPER 1 0 1 1 +COPIED 2 0 2 2 +COOKERY 2 0 2 2 +COOKED 1 0 1 1 +CONVULSION 1 0 1 1 +CONVIVIALITY 1 0 1 1 +CONVINCING 2 0 2 2 +CONVINCED 2 0 2 2 +CONVICTIONS 2 0 2 2 +CONVICTION 2 0 2 2 +CONVEYED 1 0 1 1 +CONVEXITY 1 0 1 1 +CONVERTS 1 0 1 1 +CONVERSION 1 0 1 1 +CONVERSING 1 0 1 1 +CONVERSE 2 0 2 2 +CONVERSATIONS 1 0 1 1 +CONVERSATIONAL 1 0 1 1 +CONVERSATION 6 0 6 6 +CONVENTIONALITY 1 0 1 1 +CONVENTIONAL 1 0 1 1 +CONVENTION 1 0 1 1 +CONTROLLING 1 0 1 1 +CONTROL 4 0 4 4 +CONTRIVED 2 0 2 2 +CONTRIVANCE 2 0 2 2 +CONTRITION 1 0 1 1 +CONTRITE 1 0 1 1 +CONTRIBUTE 1 0 1 1 +CONTRASTING 1 0 1 1 +CONTRAST 4 0 4 4 +CONTRARY 5 0 5 5 +CONTRADICTIONS 1 0 1 1 +CONTRACTION 2 0 2 2 +CONTINUOUSLY 2 0 2 2 +CONTINUOUS 1 0 1 1 +CONTINUED 14 0 14 14 +CONTINUE 1 0 1 1 +CONTINUALLY 2 0 2 2 +CONTINUAL 3 0 3 3 +CONTINGENCY 1 0 1 1 +CONTINENT 1 0 1 1 +CONTI 1 0 1 1 +CONTESTED 1 0 1 1 +CONTEST 1 0 1 1 +CONTENTMENT 1 0 1 1 +CONTENTEDLY 1 0 1 1 +CONTENT 1 0 1 1 +CONTEMPTIBLE 1 0 1 1 +CONTEMPT 2 0 2 2 +CONTEMPORARY 1 0 1 1 +CONTEMPLATION 1 0 1 1 +CONTEMPLATED 1 0 1 1 +CONTAMINATION 1 0 1 1 +CONTAMINATED 1 0 1 1 +CONTAINS 1 0 1 1 +CONTAINERS 1 0 1 1 +CONTAGION 1 0 1 1 +CONTACT 1 0 1 1 +CONSUMPTION 13 0 13 13 +CONSUMER 5 0 5 5 +CONSUMED 1 0 1 1 +CONSUME 2 0 2 2 +CONSULTED 1 0 1 1 +CONSULTATION 1 0 1 1 +CONSULT 1 0 1 1 +CONSTRUCTION 4 0 4 4 +CONSTRUCTED 1 0 1 1 +CONSTRAINEDLY 1 0 1 1 +CONSTRAINED 1 0 1 1 +CONSTITUTION 3 0 3 3 +CONSTITUTES 1 0 1 1 +CONSTITUTED 1 0 1 1 +CONSTITUTE 1 0 1 1 +CONSTANTINE 1 0 1 1 +CONSTANT 3 0 3 3 +CONSTANCY 1 0 1 1 +CONSPIRACY 2 0 2 2 +CONSPICUOUS 8 0 8 8 +CONSOLE 1 0 1 1 +CONSOLATION 1 0 1 1 +CONSISTENTLY 1 0 1 1 +CONSIDERING 1 0 1 1 +CONSIDERED 5 0 5 5 +CONSIDERATIONS 1 0 1 1 +CONSIDERATE 1 0 1 1 +CONSIDERABLY 1 0 1 1 +CONSIDER 1 0 1 1 +CONSERVATION 1 0 1 1 +CONSEQUENTLY 1 0 1 1 +CONSEQUENT 2 0 2 2 +CONSEQUENCES 1 0 1 1 +CONSEQUENCE 5 0 5 5 +CONSENT 4 0 4 4 +CONSEIL 6 0 6 6 +CONSECRATED 2 0 2 2 +CONSCIOUSNESS 2 0 2 2 +CONSCIOUS 3 0 3 3 +CONSCIENCES 1 0 1 1 +CONSCIENCE 4 0 4 4 +CONQUERED 2 0 2 2 +CONQUER 1 0 1 1 +CONNECTIONS 1 0 1 1 +CONNECTION 1 0 1 1 +CONNECTED 3 0 3 3 +CONNECT 2 0 2 2 +CONJURATION 1 0 1 1 +CONJUNCTURE 1 0 1 1 +CONJECTURE 1 0 1 1 +CONGRESS 1 0 1 1 +CONGREGATED 1 0 1 1 +CONGRATULATIONS 1 0 1 1 +CONGRATULATION 1 0 1 1 +CONGRATULATE 1 0 1 1 +CONGO 1 0 1 1 +CONGENIAL 1 0 1 1 +CONFUSION 2 0 2 2 +CONFUSES 1 0 1 1 +CONFUSED 3 0 3 3 +CONFOUNDEDLY 1 0 1 1 +CONFLICTING 1 0 1 1 +CONFLICT 3 0 3 3 +CONFISCATED 1 0 1 1 +CONFIRMS 1 0 1 1 +CONFIRMED 3 0 3 3 +CONFINED 2 0 2 2 +CONFIDENT 1 0 1 1 +CONFIDENCE 7 0 7 7 +CONFIDE 1 0 1 1 +CONFIDANTS 1 0 1 1 +CONFESSION 1 0 1 1 +CONFERS 1 0 1 1 +CONFEDERATE 1 0 1 1 +CONDUCTS 1 0 1 1 +CONDUCTORS 2 0 2 2 +CONDUCTED 1 0 1 1 +CONDUCT 6 0 6 6 +CONDUCIVE 1 0 1 1 +CONDITIONS 3 0 3 3 +CONDITION 11 0 11 11 +CONDENSED 1 0 1 1 +CONDENSATION 1 0 1 1 +CONDEMNATION 2 0 2 2 +CONCUR 1 0 1 1 +CONCOURSE 1 0 1 1 +CONCORD 1 0 1 1 +CONCLUSION 2 0 2 2 +CONCERTING 1 0 1 1 +CONCERNING 4 0 4 4 +CONCERNED 8 0 8 8 +CONCERN 1 0 1 1 +CONCEPTIONS 2 0 2 2 +CONCEPTION 2 0 2 2 +CONCEPT 1 0 1 1 +CONCEIVED 1 0 1 1 +CONCEIVE 2 0 2 2 +CONCEALING 1 0 1 1 +CONCEALED 1 0 1 1 +CONCEAL 1 0 1 1 +COMTE 1 0 1 1 +COMRADES 3 0 3 3 +COMPULSIVE 1 0 1 1 +COMPULSION 1 0 1 1 +COMPREHENSIVE 1 0 1 1 +COMPREHENDED 1 0 1 1 +COMPREHEND 1 0 1 1 +COMPOUND 1 0 1 1 +COMPOSURE 2 0 2 2 +COMPORT 1 0 1 1 +COMPONENT 1 0 1 1 +COMPLY 2 0 2 2 +COMPLIMENTARY 1 0 1 1 +COMPLIMENT 1 0 1 1 +COMPLICATED 1 0 1 1 +COMPLIANCE 2 0 2 2 +COMPLEXION 2 0 2 2 +COMPLETELY 3 0 3 3 +COMPLETED 2 0 2 2 +COMPLETE 2 0 2 2 +COMPLEMENT 1 0 1 1 +COMPLAINTS 1 0 1 1 +COMPLAINT 1 0 1 1 +COMPLAINING 1 0 1 1 +COMPLAINEST 1 0 1 1 +COMPLAIN 2 0 2 2 +COMPLACENTLY 1 0 1 1 +COMPLACENCY 1 0 1 1 +COMPETITION 1 0 1 1 +COMPETE 1 0 1 1 +COMPENSATION 1 0 1 1 +COMPELS 1 0 1 1 +COMPELLED 2 0 2 2 +COMPEL 1 0 1 1 +COMPASS 1 0 1 1 +COMPARISON 1 0 1 1 +COMPARED 3 0 3 3 +COMPARE 1 0 1 1 +COMPARATIVELY 2 0 2 2 +COMPARATIVE 1 0 1 1 +COMPANIONSHIP 2 0 2 2 +COMPANIONS 2 0 2 2 +COMPANIONLESS 1 0 1 1 +COMPANION 5 0 5 5 +COMPANIES 3 0 3 3 +COMPACT 1 0 1 1 +COMMUNITY 3 0 3 3 +COMMUNITIES 1 0 1 1 +COMMUNION 1 0 1 1 +COMMUNICATED 2 0 2 2 +COMMUNICATE 1 0 1 1 +COMMOTION 1 0 1 1 +COMMONLY 1 0 1 1 +COMMON 8 0 8 8 +COMMITTING 1 0 1 1 +COMMITTEE 3 0 3 3 +COMMITTED 2 0 2 2 +COMMITTAL 1 0 1 1 +COMMITS 1 0 1 1 +COMMIT 1 0 1 1 +COMMISSIONS 1 0 1 1 +COMMISSIONERS 1 0 1 1 +COMMISSION 1 0 1 1 +COMMISERATION 1 0 1 1 +COMMERCIAL 2 0 2 2 +COMMENTED 1 0 1 1 +COMMENTATORS 1 0 1 1 +COMMENTARY 2 0 2 2 +COMMENT 1 0 1 1 +COMMENDED 1 0 1 1 +COMMEND 1 0 1 1 +COMMENCEMENT 1 0 1 1 +COMMENCE 1 0 1 1 +COMMANDS 1 0 1 1 +COMMANDMENTS 1 0 1 1 +COMMANDMENT 2 0 2 2 +COMMANDERS 1 0 1 1 +COMMANDER 2 0 2 2 +COMMANDED 1 0 1 1 +COMMAND 2 0 2 2 +COMING 7 0 7 7 +COMFORTS 2 0 2 2 +COMFORTING 1 0 1 1 +COMFORTED 2 0 2 2 +COMFORTABLE 3 0 3 3 +COMFORT 8 0 8 8 +COMETH 1 0 1 1 +COMES 10 0 10 10 +COMELY 1 0 1 1 +COMEDY 1 0 1 1 +COMEDIES 2 0 2 2 +COME 51 0 51 51 +COMBINED 2 0 2 2 +COMBINE 1 0 1 1 +COMBINATIONS 1 0 1 1 +COMBINATION 2 0 2 2 +COMBAT 2 0 2 2 +COMB 1 0 1 1 +COLUMNS 1 0 1 1 +COLUMN 1 0 1 1 +COLOURED 1 0 1 1 +COLORISTS 2 0 2 2 +COLORED 2 0 2 2 +COLONY 3 0 3 3 +COLONIAL 1 0 1 1 +COLONEL 1 0 1 1 +COLLEGE 4 0 4 4 +COLLECTIONS 1 0 1 1 +COLLECTION 1 0 1 1 +COLLECTING 1 0 1 1 +COLLAR 2 0 2 2 +COLLAPSED 1 0 1 1 +COLBERT 1 0 1 1 +COINED 1 0 1 1 +COINCIDE 1 0 1 1 +COFFEE 6 0 6 6 +COCK 1 0 1 1 +COBBLER 1 0 1 1 +COAXED 1 0 1 1 +COAT 1 0 1 1 +COASTS 1 0 1 1 +COAST 3 0 3 3 +COACHMAN 1 0 1 1 +COACH 1 0 1 1 +CLUTCHING 1 0 1 1 +CLUTCH 1 0 1 1 +CLUNG 1 0 1 1 +CLUMSINESS 1 0 1 1 +CLOUDS 6 0 6 6 +CLOUD 11 0 11 11 +CLOTTED 1 0 1 1 +CLOTHING 1 0 1 1 +CLOTHES 5 0 5 5 +CLOTHED 1 0 1 1 +CLOSET 1 0 1 1 +CLOSER 1 0 1 1 +CLOSELY 6 0 6 6 +CLOSED 2 0 2 2 +CLOSE 10 0 10 10 +CLOAKS 2 0 2 2 +CLIMBING 1 0 1 1 +CLIMATE 2 0 2 2 +CLIFF 2 0 2 2 +CLIENTS 1 0 1 1 +CLICKED 1 0 1 1 +CLEVERNESS 3 0 3 3 +CLEVER 2 0 2 2 +CLERK 2 0 2 2 +CLERICAL 1 0 1 1 +CLERGYMAN'S 2 0 2 2 +CLERGY 2 0 2 2 +CLEARNESS 1 0 1 1 +CLEARLY 5 0 5 5 +CLEARING 1 0 1 1 +CLEAREST 1 0 1 1 +CLEAR 10 0 10 10 +CLEANED 1 0 1 1 +CLAY 5 0 5 5 +CLASSIFYING 1 0 1 1 +CLASSIFIER 1 0 1 1 +CLASSIFICATION 1 0 1 1 +CLASSIC 2 0 2 2 +CLASSES 1 0 1 1 +CLASSED 3 0 3 3 +CLASS 9 0 9 9 +CLASPING 1 0 1 1 +CLASPED 2 0 2 2 +CLASHING 2 0 2 2 +CLARIFIED 1 0 1 1 +CLAP 1 0 1 1 +CLAMOROUS 1 0 1 1 +CLAIMS 1 0 1 1 +CLAIMED 1 0 1 1 +CLAIM 2 0 2 2 +CIVILIZATION 2 0 2 2 +CIVIL 3 0 3 3 +CITIZENS 4 0 4 4 +CITIZEN 2 0 2 2 +CITIES 2 0 2 2 +CIRCUMVENTION 1 0 1 1 +CIRCUMSTANCES 4 0 4 4 +CIRCUMSTANCE 3 0 3 3 +CIRCUMNAVIGATION 1 0 1 1 +CIRCUMFERENCE 1 0 1 1 +CIRCULATED 1 0 1 1 +CIRCUITS 1 0 1 1 +CIRCUITOUS 1 0 1 1 +CIRCUIT 1 0 1 1 +CIRCLE 7 0 7 7 +CIGARS 1 0 1 1 +CHURNING 1 0 1 1 +CHURCHES 1 0 1 1 +CHURCH 17 0 17 17 +CHUCKLING 1 0 1 1 +CHUBBY 1 0 1 1 +CHRONICLED 1 0 1 1 +CHRISTMAS 4 0 4 4 +CHRISTIANS 1 0 1 1 +CHRISTIANITY 2 0 2 2 +CHRIST'S 1 0 1 1 +CHRIST 22 0 22 22 +CHRISM 1 0 1 1 +CHOSEN 3 0 3 3 +CHOPPED 1 0 1 1 +CHOOSING 1 0 1 1 +CHOOSE 3 0 3 3 +CHOKING 1 0 1 1 +CHOCOLATE 1 0 1 1 +CHIP 1 0 1 1 +CHINA 1 0 1 1 +CHIN 2 0 2 2 +CHIMNEY 2 0 2 2 +CHILDREN'S 1 0 1 1 +CHILDREN 18 0 18 18 +CHILDISH 2 0 2 2 +CHILDHOOD'S 1 0 1 1 +CHILDHOOD 3 0 3 3 +CHILD'S 2 0 2 2 +CHILD 19 0 19 19 +CHIEFTAIN 1 0 1 1 +CHIEFLY 4 0 4 4 +CHIEF 3 0 3 3 +CHESTNUTS 1 0 1 1 +CHESTNUT 3 0 3 3 +CHEST 2 0 2 2 +CHERRY 1 0 1 1 +CHERISH 1 0 1 1 +CHEMICALS 1 0 1 1 +CHEMICAL 1 0 1 1 +CHELSEA 1 0 1 1 +CHELFORD 4 0 4 4 +CHEERS 1 0 1 1 +CHEERFULLY 2 0 2 2 +CHEERFUL 2 0 2 2 +CHEEKS 1 0 1 1 +CHEEK 2 0 2 2 +CHECKS 1 0 1 1 +CHECKER 1 0 1 1 +CHECKED 3 0 3 3 +CHECK 8 0 8 8 +CHAUCER'S 1 0 1 1 +CHAUCER 1 0 1 1 +CHASING 1 0 1 1 +CHARTER 1 0 1 1 +CHARMING 1 0 1 1 +CHARMED 1 0 1 1 +CHARM 1 0 1 1 +CHARLOTTE 2 0 2 2 +CHARLESTOWN 1 0 1 1 +CHARLESTON 1 0 1 1 +CHARLES 3 0 3 3 +CHARITY 1 0 1 1 +CHARGED 2 0 2 2 +CHARGE 8 0 8 8 +CHARACTERS 2 0 2 2 +CHARACTERIZES 1 0 1 1 +CHARACTERIZED 1 0 1 1 +CHARACTER 14 0 14 14 +CHAPTERS 1 0 1 1 +CHAPTER 3 0 3 3 +CHAPEL 1 0 1 1 +CHAP 1 0 1 1 +CHAOS 1 0 1 1 +CHANNEL 2 0 2 2 +CHANGING 2 0 2 2 +CHANGES 3 0 3 3 +CHANCES 1 0 1 1 +CHANCE 6 0 6 6 +CHAMBERS 2 0 2 2 +CHAMBER 4 0 4 4 +CHALICE 1 0 1 1 +CHAIRS 4 0 4 4 +CHAIN 1 0 1 1 +CETERA 3 0 3 3 +CERTITUDE 1 0 1 1 +CERTAINLY 8 0 8 8 +CERTAIN 12 0 12 12 +CEREMONIES 2 0 2 2 +CEREMONIAL 1 0 1 1 +CENTURY 1 0 1 1 +CENTURIES 1 0 1 1 +CENTRAL 5 0 5 5 +CENTIPEDE 1 0 1 1 +CELLS 1 0 1 1 +CELLAR 1 0 1 1 +CELL 1 0 1 1 +CELESTIAL 2 0 2 2 +CELEBRITY 1 0 1 1 +CELEBRATION 1 0 1 1 +CELEBRATED 4 0 4 4 +CEDAR 1 0 1 1 +CEASING 1 0 1 1 +CAVERN 2 0 2 2 +CAVALRY 2 0 2 2 +CAVALIERS 1 0 1 1 +CAUTIOUSLY 1 0 1 1 +CAUTION 1 0 1 1 +CAUSES 1 0 1 1 +CAUSED 5 0 5 5 +CAUSE 9 0 9 9 +CATTLE 1 0 1 1 +CATS 1 0 1 1 +CATHOLIC 3 0 3 3 +CATHEDRAL 1 0 1 1 +CATECHISM 2 0 2 2 +CATCHING 1 0 1 1 +CATCHES 1 0 1 1 +CATCH 3 0 3 3 +CATASTROPHE 1 0 1 1 +CATAPULT 1 0 1 1 +CAT 7 0 7 7 +CASTLE 1 0 1 1 +CASES 6 0 6 6 +CASEMATES 1 0 1 1 +CARTS 1 0 1 1 +CART 1 0 1 1 +CARRYING 4 0 4 4 +CARRY 7 0 7 7 +CARROTS 1 0 1 1 +CARRIES 1 0 1 1 +CARRIED 13 0 13 13 +CARRIAGES 1 0 1 1 +CARRIAGE 8 0 8 8 +CARPETED 1 0 1 1 +CARING 1 0 1 1 +CAREY 3 0 3 3 +CARELESSNESS 1 0 1 1 +CARELESS 1 0 1 1 +CAREFULLY 7 0 7 7 +CAREFUL 5 0 5 5 +CAREER 4 0 4 4 +CARED 4 0 4 4 +CARE 13 0 13 13 +CARD 1 0 1 1 +CAPTURED 1 0 1 1 +CAPTIVE 2 0 2 2 +CAPTIVATE 1 0 1 1 +CAPTAIN 27 0 27 27 +CAPSIZE 1 0 1 1 +CAPRICE 1 0 1 1 +CAPLESS 1 0 1 1 +CAPITAL 1 0 1 1 +CAPACITY 3 0 3 3 +CAPABLE 3 0 3 3 +CAP'N 4 0 4 4 +CAP 7 0 7 7 +CANVASS 1 0 1 1 +CANST 1 0 1 1 +CANS 1 0 1 1 +CANOPY 1 0 1 1 +CANON 1 0 1 1 +CANNON 1 0 1 1 +CANE 1 0 1 1 +CANDLESTICKS 1 0 1 1 +CANDLES 2 0 2 2 +CANDLE 2 0 2 2 +CANARY 1 0 1 1 +CANAL 1 0 1 1 +CAN'T 21 0 21 21 +CAMPS 1 0 1 1 +CAMPAIGN 2 0 2 2 +CAMP 1 0 1 1 +CAME 44 0 44 44 +CALVINISTIC 1 0 1 1 +CALMNESS 2 0 2 2 +CALMED 1 0 1 1 +CALM 5 0 5 5 +CALLS 5 0 5 5 +CALLOUS 1 0 1 1 +CALLING 2 0 2 2 +CALL 10 0 10 10 +CALHOUN 1 0 1 1 +CAKES 2 0 2 2 +CAKE 1 0 1 1 +CABINET 3 0 3 3 +CABIN 2 0 2 2 +CABALISTIC 1 0 1 1 +C 1 0 1 1 +BUTTONING 1 0 1 1 +BUTTON 1 0 1 1 +BUTTERFLY 1 0 1 1 +BUTTED 1 0 1 1 +BUTLER 2 0 2 2 +BUTCHERY 2 0 2 2 +BUSY 1 0 1 1 +BUSINESS 5 0 5 5 +BUSHES 4 0 4 4 +BUSHEL 1 0 1 1 +BURSTS 1 0 1 1 +BURST 5 0 5 5 +BURNT 1 0 1 1 +BURNS 1 0 1 1 +BURNING 2 0 2 2 +BURIED 2 0 2 2 +BURGOS 1 0 1 1 +BURGLARS 1 0 1 1 +BURDEN 1 0 1 1 +BUOYANT 2 0 2 2 +BUNDLE 1 0 1 1 +BUILT 2 0 2 2 +BUILDS 1 0 1 1 +BUGGY 1 0 1 1 +BUDDING 1 0 1 1 +BUCKLING 1 0 1 1 +BUCKLES 1 0 1 1 +BUCKINGHAM 1 0 1 1 +BUBBLING 1 0 1 1 +BUBBLE'S 1 0 1 1 +BRUTE 1 0 1 1 +BRUTALITY 1 0 1 1 +BRUTAL 1 0 1 1 +BRUSH 1 0 1 1 +BRUISED 1 0 1 1 +BROWSED 1 0 1 1 +BROWN 10 0 10 10 +BROW 1 0 1 1 +BROUGHT 14 0 14 14 +BROTHERS 5 0 5 5 +BROTHER 8 0 8 8 +BROOM 1 0 1 1 +BROOKS 1 0 1 1 +BROOKLYN 1 0 1 1 +BROODING 3 0 3 3 +BRONTES 1 0 1 1 +BROKEN 7 0 7 7 +BROKE 1 0 1 1 +BROADLY 1 0 1 1 +BROADEST 1 0 1 1 +BROAD 11 0 11 11 +BRITISH 2 0 2 2 +BRISTLING 1 0 1 1 +BRING 9 0 9 9 +BRIM 2 0 2 2 +BRILLIANT 5 0 5 5 +BRILLIANCY 1 0 1 1 +BRIGHTNESS 1 0 1 1 +BRIGHTLY 1 0 1 1 +BRIGHTEST 1 0 1 1 +BRIGHTER 1 0 1 1 +BRIGHTENED 2 0 2 2 +BRIGHT 16 0 16 16 +BRIGANTINE 1 0 1 1 +BRIEFLY 1 0 1 1 +BRIDGE 4 0 4 4 +BRIDE 1 0 1 1 +BRICK 2 0 2 2 +BREWING 1 0 1 1 +BRETHREN 2 0 2 2 +BREEZE 1 0 1 1 +BRED 1 0 1 1 +BREATHING 4 0 4 4 +BREATH 10 0 10 10 +BREASTPLATE 1 0 1 1 +BREAST 2 0 2 2 +BREAKING 2 0 2 2 +BREAKFASTING 1 0 1 1 +BREAKERS 1 0 1 1 +BREAD 5 0 5 5 +BRAVELY 1 0 1 1 +BRAVE 2 0 2 2 +BRANDY 1 0 1 1 +BRANDON 4 0 4 4 +BRANDED 1 0 1 1 +BRANCHES 8 0 8 8 +BRAIN 2 0 2 2 +BRAIDS 1 0 1 1 +BRAIDED 1 0 1 1 +BRAID 1 0 1 1 +BRACTON'S 1 0 1 1 +BRACTON 1 0 1 1 +BRACELETS 1 0 1 1 +BRACELET 1 0 1 1 +BOY'S 3 0 3 3 +BOY 17 0 17 17 +BOXES 1 0 1 1 +BOWING 1 0 1 1 +BOWED 1 0 1 1 +BOW 4 0 4 4 +BOUT 1 0 1 1 +BOUQUETS 1 0 1 1 +BOUND 6 0 6 6 +BOUGHS 1 0 1 1 +BOTTOMS 1 0 1 1 +BOTTOM 7 0 7 7 +BOTTLES 2 0 2 2 +BOTTLE 1 0 1 1 +BOTHER 1 0 1 1 +BOTH 34 0 34 34 +BOTANICAL 2 0 2 2 +BOSOM 2 0 2 2 +BORE 2 0 2 2 +BORDERING 1 0 1 1 +BORDERED 1 0 1 1 +BOOTS 2 0 2 2 +BOOLOOROO 12 0 12 12 +BOOK 4 0 4 4 +BONY 1 0 1 1 +BONNET 1 0 1 1 +BONES 2 0 2 2 +BONDAGE 1 0 1 1 +BOND 3 0 3 3 +BOLTON 1 0 1 1 +BOLDLY 3 0 3 3 +BOLDEST 1 0 1 1 +BOILED 1 0 1 1 +BOIL 1 0 1 1 +BODY 8 0 8 8 +BODILY 3 0 3 3 +BODIES 3 0 3 3 +BOASTING 2 0 2 2 +BOARDED 2 0 2 2 +BOARD 9 0 9 9 +BLUSHING 2 0 2 2 +BLUSHED 1 0 1 1 +BLUSH 1 0 1 1 +BLUNT 1 0 1 1 +BLUFF 1 0 1 1 +BLUES 1 0 1 1 +BLUE 21 0 21 21 +BLOWN 2 0 2 2 +BLOWING 1 0 1 1 +BLOW 2 0 2 2 +BLOOM 1 0 1 1 +BLOODY 1 0 1 1 +BLOODSHED 1 0 1 1 +BLOODED 1 0 1 1 +BLOOD 6 0 6 6 +BLOCKS 1 0 1 1 +BLISS 1 0 1 1 +BLIND 1 0 1 1 +BLEW 1 0 1 1 +BLESSED 3 0 3 3 +BLESS 2 0 2 2 +BLEED 1 0 1 1 +BLEACHED 1 0 1 1 +BLAZING 1 0 1 1 +BLAZED 1 0 1 1 +BLAZE 2 0 2 2 +BLANK 2 0 2 2 +BLAME 1 0 1 1 +BLADE 2 0 2 2 +BLACKSTONE 1 0 1 1 +BLACKNESSES 1 0 1 1 +BLACKNESS 1 0 1 1 +BLACKER 2 0 2 2 +BLACK 22 0 22 22 +BITTER 1 0 1 1 +BITS 1 0 1 1 +BITE 1 0 1 1 +BISHOPS 5 0 5 5 +BIRTH 2 0 2 2 +BIRMINGHAM 1 0 1 1 +BIRDS 4 0 4 4 +BIRD 4 0 4 4 +BIRCHES 1 0 1 1 +BINDING 1 0 1 1 +BIND 1 0 1 1 +BILL 6 0 6 6 +BIG 12 0 12 12 +BIDDING 1 0 1 1 +BIBLE 1 0 1 1 +BEYOND 6 0 6 6 +BEWILDERED 6 0 6 6 +BEWARE 1 0 1 1 +BEVERAGES 1 0 1 1 +BETWEEN 25 0 25 25 +BETTING 1 0 1 1 +BETRAYED 1 0 1 1 +BETRAY 1 0 1 1 +BETH 12 0 12 12 +BESTOWED 1 0 1 1 +BESTOW 1 0 1 1 +BEST 22 0 22 22 +BESOUGHT 1 0 1 1 +BESIEGED 1 0 1 1 +BESIDE 5 0 5 5 +BERTIE 1 0 1 1 +BERRIES 1 0 1 1 +BENT 4 0 4 4 +BENIGNANTLY 1 0 1 1 +BENIGHTED 1 0 1 1 +BENEFIT 1 0 1 1 +BENEATH 6 0 6 6 +BEND 1 0 1 1 +BENCHES 3 0 3 3 +BENCH 4 0 4 4 +BEN 3 0 3 3 +BELT 2 0 2 2 +BELOW 1 0 1 1 +BELOVED 3 0 3 3 +BELONGS 1 0 1 1 +BELONGING 1 0 1 1 +BELONGED 3 0 3 3 +BELONG 2 0 2 2 +BELLY 3 0 3 3 +BELLS 1 0 1 1 +BELLINGHAM 2 0 2 2 +BELL 3 0 3 3 +BELIEVING 2 0 2 2 +BELIEVERS 1 0 1 1 +BELIEF 3 0 3 3 +BEINGS 1 0 1 1 +BEHOLDING 1 0 1 1 +BEHOLDERS 1 0 1 1 +BEHOLDER 1 0 1 1 +BEHIND 10 0 10 10 +BEHELD 1 0 1 1 +BEHAVIOURIST 2 0 2 2 +BEHAVED 1 0 1 1 +BEHAVE 1 0 1 1 +BEHALF 1 0 1 1 +BEGUN 5 0 5 5 +BEGUILING 1 0 1 1 +BEGOT 1 0 1 1 +BEGINS 4 0 4 4 +BEGINNING 4 0 4 4 +BEGIN 9 0 9 9 +BEGGAR 2 0 2 2 +BEGAN 22 0 22 22 +BEFITS 1 0 1 1 +BEEHIVES 1 0 1 1 +BEEF 1 0 1 1 +BEDSIDE 1 0 1 1 +BEDROOM 2 0 2 2 +BEDFORD 1 0 1 1 +BECOMING 1 0 1 1 +BECOMES 8 0 8 8 +BECOME 14 0 14 14 +BECKONED 1 0 1 1 +BECKON 1 0 1 1 +BECAUSE 30 0 30 30 +BECAME 12 0 12 12 +BEAUTY 21 0 21 21 +BEAUTIFUL 13 0 13 13 +BEAUTIES 2 0 2 2 +BEATING 2 0 2 2 +BEATERS 1 0 1 1 +BEATER 1 0 1 1 +BEATEN 2 0 2 2 +BEAT 1 0 1 1 +BEASTS 2 0 2 2 +BEARS 4 0 4 4 +BEARING 3 0 3 3 +BEARD 1 0 1 1 +BEAR'S 1 0 1 1 +BEAMS 1 0 1 1 +BEAK 6 0 6 6 +BEADS 1 0 1 1 +BATTLED 1 0 1 1 +BATTERIES 1 0 1 1 +BATTERED 1 0 1 1 +BAT 1 0 1 1 +BASTARD 1 0 1 1 +BASKETS 1 0 1 1 +BASIS 2 0 2 2 +BASED 1 0 1 1 +BARTLEY 14 0 14 14 +BARS 1 0 1 1 +BARRICADED 1 0 1 1 +BARREN 1 0 1 1 +BARREL 1 0 1 1 +BARRACK 1 0 1 1 +BARNS 1 0 1 1 +BARN 4 0 4 4 +BARGAINS 1 0 1 1 +BAREFOOT 1 0 1 1 +BARE 2 0 2 2 +BARBARITY 1 0 1 1 +BAR 1 0 1 1 +BAPTIZED 1 0 1 1 +BAPTISM 1 0 1 1 +BANTER 1 0 1 1 +BANQUET 1 0 1 1 +BANKS 1 0 1 1 +BANK 3 0 3 3 +BANISHED 1 0 1 1 +BANG 1 0 1 1 +BAND 2 0 2 2 +BALMY 1 0 1 1 +BALLS 2 0 2 2 +BALLET 2 0 2 2 +BAKER 1 0 1 1 +BAGS 1 0 1 1 +BAGGAGE 1 0 1 1 +BAG 1 0 1 1 +BAFFLED 2 0 2 2 +BADLY 2 0 2 2 +BADGES 1 0 1 1 +BADE 3 0 3 3 +BACON 1 0 1 1 +BACKWARD 1 0 1 1 +BABY'S 1 0 1 1 +BABY 1 0 1 1 +BABIES 1 0 1 1 +BABE 1 0 1 1 +AZURE 1 0 1 1 +AXIS 1 0 1 1 +AWOKE 3 0 3 3 +AWKWARD 1 0 1 1 +AWFULLY 2 0 2 2 +AWFUL 4 0 4 4 +AWAKE 1 0 1 1 +AWAITING 1 0 1 1 +AWAITED 2 0 2 2 +AVOIDING 1 0 1 1 +AVOIDED 1 0 1 1 +AVOID 5 0 5 5 +AVERSION 1 0 1 1 +AVERSE 1 0 1 1 +AVAILABLE 1 0 1 1 +AUTUMN 1 0 1 1 +AUTHORS 1 0 1 1 +AUTHORIZED 1 0 1 1 +AUTHORITY 6 0 6 6 +AUTHORITIES 1 0 1 1 +AUTHORITATIVELY 1 0 1 1 +AUTHOR 1 0 1 1 +AUTHENTICATED 1 0 1 1 +AUNT'S 1 0 1 1 +AUGUST 5 0 5 5 +AUGMENT 1 0 1 1 +AUDITORY 1 0 1 1 +AUDITORS 1 0 1 1 +AUDIENCE 6 0 6 6 +AUDACIOUS 1 0 1 1 +AUCTION 1 0 1 1 +ATTRIBUTED 1 0 1 1 +ATTRACTIVE 1 0 1 1 +ATTRACTION 1 0 1 1 +ATTRACTED 3 0 3 3 +ATTORNEYS 1 0 1 1 +ATTIRE 1 0 1 1 +ATTENUATING 1 0 1 1 +ATTENTIVELY 2 0 2 2 +ATTENTION 11 0 11 11 +ATTENDED 1 0 1 1 +ATTENDANT 1 0 1 1 +ATTEND 3 0 3 3 +ATTEMPTS 1 0 1 1 +ATTEMPT 5 0 5 5 +ATTAINMENTS 1 0 1 1 +ATTAINMENT 1 0 1 1 +ATTACKED 1 0 1 1 +ATTACK 4 0 4 4 +ATTACHED 2 0 2 2 +ATROCIOUS 1 0 1 1 +ATMOSPHERIC 1 0 1 1 +ATMOSPHERE 2 0 2 2 +ATLANTIS 1 0 1 1 +ATLANTIC 3 0 3 3 +ATHLETE 2 0 2 2 +ATHENS 1 0 1 1 +ATHENIANS 1 0 1 1 +ATHENIAN 2 0 2 2 +ATE 2 0 2 2 +ASTRONOMY 1 0 1 1 +ASTOUNDING 1 0 1 1 +ASTONISHMENT 2 0 2 2 +ASTONISHING 1 0 1 1 +ASTONISHED 1 0 1 1 +ASSURES 1 0 1 1 +ASSUREDLY 1 0 1 1 +ASSURED 5 0 5 5 +ASSURE 5 0 5 5 +ASSURANCES 1 0 1 1 +ASSURANCE 3 0 3 3 +ASSUMED 5 0 5 5 +ASSOCIATION 2 0 2 2 +ASSOCIATES 1 0 1 1 +ASSOCIATED 3 0 3 3 +ASSISTED 2 0 2 2 +ASSISTANT 1 0 1 1 +ASSIST 2 0 2 2 +ASSIDUOUSLY 1 0 1 1 +ASSERTS 1 0 1 1 +ASSERTIVE 1 0 1 1 +ASSERTED 3 0 3 3 +ASSENT 1 0 1 1 +ASSEMBLY 2 0 2 2 +ASS 1 0 1 1 +ASPECT 1 0 1 1 +ASLEEP 1 0 1 1 +ASKING 2 0 2 2 +ASKED 22 0 22 22 +ASK 10 0 10 10 +ASIDE 3 0 3 3 +ASIA 1 0 1 1 +ASHORE 1 0 1 1 +ASHAMED 2 0 2 2 +ASCRIBES 1 0 1 1 +ASCERTAINING 1 0 1 1 +ASCERTAIN 2 0 2 2 +ARTILLERY 1 0 1 1 +ARTIFICE 1 0 1 1 +ARTICULATE 2 0 2 2 +ARTICLE 3 0 3 3 +ARTHUR 1 0 1 1 +ARROWS 1 0 1 1 +ARROW 2 0 2 2 +ARRIVED 4 0 4 4 +ARRIVE 1 0 1 1 +ARRIVAL 4 0 4 4 +ARRESTING 1 0 1 1 +ARRAY 1 0 1 1 +ARRANGEMENTS 1 0 1 1 +ARRANGEMENT 2 0 2 2 +ARRANGED 2 0 2 2 +AROUSE 1 0 1 1 +AROSE 2 0 2 2 +ARONNAX 1 0 1 1 +ARMY 9 0 9 9 +ARMS 15 0 15 15 +ARISTOCRACY 1 0 1 1 +ARISING 1 0 1 1 +ARISE 1 0 1 1 +ARID 1 0 1 1 +ARIANS 1 0 1 1 +ARGYLE'S 1 0 1 1 +ARGYLE 2 0 2 2 +ARGUS 1 0 1 1 +ARGUING 1 0 1 1 +ARGUE 2 0 2 2 +AREN'T 1 0 1 1 +AREA 1 0 1 1 +ARDUOUS 1 0 1 1 +ARDOUR 1 0 1 1 +ARCHIVES 1 0 1 1 +ARCHITECTURAL 1 0 1 1 +ARCHED 1 0 1 1 +ARCH 1 0 1 1 +ARCADIAN 1 0 1 1 +ARC 2 0 2 2 +APRON 2 0 2 2 +APRIL 2 0 2 2 +APPROXIMATELY 1 0 1 1 +APPROVING 2 0 2 2 +APPROVES 1 0 1 1 +APPROVE 2 0 2 2 +APPROVAL 1 0 1 1 +APPROPRIATE 1 0 1 1 +APPROBATION 1 0 1 1 +APPROACHING 3 0 3 3 +APPROACHES 2 0 2 2 +APPROACHED 6 0 6 6 +APPROACH 1 0 1 1 +APPRENTICESHIP 1 0 1 1 +APPREHENSION 1 0 1 1 +APPRECIATIVE 1 0 1 1 +APPRECIATE 1 0 1 1 +APPOSITION 1 0 1 1 +APPOINTED 7 0 7 7 +APPLYING 1 0 1 1 +APPLICATION 2 0 2 2 +APPLE 1 0 1 1 +APPLAUSE 2 0 2 2 +APPLAUDED 1 0 1 1 +APPETITES 1 0 1 1 +APPETITE 1 0 1 1 +APPEARS 1 0 1 1 +APPEARED 10 0 10 10 +APPEARANCES 3 0 3 3 +APPEARANCE 9 0 9 9 +APPEAR 3 0 3 3 +APPEALS 1 0 1 1 +APPEALED 1 0 1 1 +APPEAL 1 0 1 1 +APPARENTLY 1 0 1 1 +APPARENT 2 0 2 2 +APPARATUS 1 0 1 1 +APPALLING 1 0 1 1 +APOSTOLICAL 1 0 1 1 +APOSTOLIC 1 0 1 1 +APOSTLES 6 0 6 6 +APOSTLE 4 0 4 4 +APOLLO 1 0 1 1 +APARTMENT 2 0 2 2 +APART 1 0 1 1 +ANYHOW 3 0 3 3 +ANYBODY 3 0 3 3 +ANXIOUS 3 0 3 3 +ANXIETY 4 0 4 4 +ANTIPATHY 2 0 2 2 +ANTICIPATION 1 0 1 1 +ANTICIPATE 1 0 1 1 +ANTICHRIST 1 0 1 1 +ANTI 1 0 1 1 +ANTE 1 0 1 1 +ANTARCTIC 1 0 1 1 +ANSWERS 2 0 2 2 +ANSWER 6 0 6 6 +ANOTHER'S 1 0 1 1 +ANNOYANCE 2 0 2 2 +ANNOUNCED 2 0 2 2 +ANNE 2 0 2 2 +ANIMOSITY 2 0 2 2 +ANIMATED 2 0 2 2 +ANIMALS 5 0 5 5 +ANIMAL 8 0 8 8 +ANGRY 5 0 5 5 +ANGRILY 3 0 3 3 +ANGRIER 1 0 1 1 +ANGER 1 0 1 1 +ANECDOTES 1 0 1 1 +ANALYSIS 2 0 2 2 +ANALOGY 1 0 1 1 +ANALOGUE 1 0 1 1 +AMUSING 2 0 2 2 +AMUSEMENT 3 0 3 3 +AMUSED 1 0 1 1 +AMUSE 2 0 2 2 +AMPLY 2 0 2 2 +AMOUNT 3 0 3 3 +AMONGST 3 0 3 3 +AMONG 29 0 29 29 +AMISS 1 0 1 1 +AMIDST 2 0 2 2 +AMID 1 0 1 1 +AMETHYST 1 0 1 1 +AMERICANS 2 0 2 2 +AMERICAN 10 0 10 10 +AMERICA 2 0 2 2 +AMENDS 2 0 2 2 +AMENDMENT 1 0 1 1 +AMELIORATION 1 0 1 1 +AMBROSE 4 0 4 4 +AMBITIOUS 1 0 1 1 +AMBITION 2 0 2 2 +AMBASSADOR 1 0 1 1 +AMAZEMENT 2 0 2 2 +AMASS 1 0 1 1 +AMALGAMATED 1 0 1 1 +ALWAYS 36 0 36 36 +ALTHOUGH 10 0 10 10 +ALTERNATING 3 0 3 3 +ALTERING 2 0 2 2 +ALTERED 2 0 2 2 +ALTERATION 1 0 1 1 +ALTER 1 0 1 1 +ALTAR 1 0 1 1 +ALSO 36 0 36 36 +ALOUD 3 0 3 3 +ALONG 15 0 15 15 +ALMS 1 0 1 1 +ALMOST 19 0 19 19 +ALLY 1 0 1 1 +ALLUDE 1 0 1 1 +ALLOWING 2 0 2 2 +ALLOWED 7 0 7 7 +ALLOW 5 0 5 5 +ALLIES 1 0 1 1 +ALLIED 1 0 1 1 +ALLERS 1 0 1 1 +ALLEGED 2 0 2 2 +ALIVE 1 0 1 1 +ALIKE 1 0 1 1 +ALIGHTED 1 0 1 1 +ALICE 4 0 4 4 +ALGERIAN 1 0 1 1 +ALGERIA 2 0 2 2 +ALGEBRA 1 0 1 1 +ALERTNESS 1 0 1 1 +ALE 2 0 2 2 +ALAS 3 0 3 3 +ALARMED 1 0 1 1 +AKIN 1 0 1 1 +AIN'T 2 0 2 2 +AIMED 1 0 1 1 +AIDED 1 0 1 1 +AHEAD 1 0 1 1 +AH 7 0 7 7 +AGREEMENT 1 0 1 1 +AGREED 2 0 2 2 +AGREEABLY 2 0 2 2 +AGREEABLE 5 0 5 5 +AGREE 2 0 2 2 +AGO 4 0 4 4 +AGITATION 4 0 4 4 +AGITATED 2 0 2 2 +AGGRESSIVENESS 1 0 1 1 +AGGRESSIVE 1 0 1 1 +AGGREGATE 1 0 1 1 +AGENCY 1 0 1 1 +AGE 6 0 6 6 +AGAPE 1 0 1 1 +AGAINST 23 0 23 23 +AGAIN 39 0 39 39 +AFTERWARDS 5 0 5 5 +AFTERWARD 2 0 2 2 +AFTERNOON 4 0 4 4 +AFRICAN 1 0 1 1 +AFRAID 9 0 9 9 +AFLOAT 1 0 1 1 +AFFORD 4 0 4 4 +AFFLICTED 1 0 1 1 +AFFIRMATIVE 1 0 1 1 +AFFECTIONS 1 0 1 1 +AFFECTIONATE 1 0 1 1 +AFFECTION 7 0 7 7 +AFFAIRS 3 0 3 3 +AFFAIR 2 0 2 2 +ADVISER 1 0 1 1 +ADVISED 1 0 1 1 +ADVISABLE 1 0 1 1 +ADVICE 4 0 4 4 +ADVERTISING 1 0 1 1 +ADVERTISEMENT 1 0 1 1 +ADVERSE 2 0 2 2 +ADVENTURE 1 0 1 1 +ADVANTAGES 2 0 2 2 +ADVANTAGE 3 0 3 3 +ADVANCING 1 0 1 1 +ADVANCE 5 0 5 5 +ADORE 1 0 1 1 +ADORATION 3 0 3 3 +ADOPTED 2 0 2 2 +ADOLESCENCE 1 0 1 1 +ADMITTING 2 0 2 2 +ADMITTED 3 0 3 3 +ADMITTANCE 1 0 1 1 +ADMIT 2 0 2 2 +ADMIRING 1 0 1 1 +ADMIRED 1 0 1 1 +ADMIRATION 2 0 2 2 +ADMINISTRATION 3 0 3 3 +ADJUST 1 0 1 1 +ADHERENTS 1 0 1 1 +ADDRESSING 1 0 1 1 +ADDRESSED 6 0 6 6 +ADDRESS 3 0 3 3 +ADDITIONAL 1 0 1 1 +ADDED 11 0 11 11 +ACUTE 2 0 2 2 +ACTUALLY 3 0 3 3 +ACTUAL 4 0 4 4 +ACTS 2 0 2 2 +ACTRESS 1 0 1 1 +ACTORS 4 0 4 4 +ACTOR 2 0 2 2 +ACTIVITY 1 0 1 1 +ACTIVE 2 0 2 2 +ACTING 2 0 2 2 +ACT 6 0 6 6 +ACROSS 13 0 13 13 +ACQUIRES 1 0 1 1 +ACQUIRE 1 0 1 1 +ACQUAINTED 1 0 1 1 +ACQUAINTANCE 3 0 3 3 +ACQUAINT 1 0 1 1 +ACORN 1 0 1 1 +ACKNOWLEDGES 1 0 1 1 +ACKNOWLEDGED 2 0 2 2 +ACKNOWLEDGE 2 0 2 2 +ACHIEVEMENTS 1 0 1 1 +ACHIEVEMENT 1 0 1 1 +ACHIEVED 2 0 2 2 +ACE 2 0 2 2 +ACCUSTOMED 3 0 3 3 +ACCUSE 1 0 1 1 +ACCURATELY 1 0 1 1 +ACCURATE 2 0 2 2 +ACCURACY 3 0 3 3 +ACCRUING 1 0 1 1 +ACCOUTREMENTS 1 0 1 1 +ACCOUNTS 1 0 1 1 +ACCOUNTED 1 0 1 1 +ACCOUNT 9 0 9 9 +ACCORDINGLY 1 0 1 1 +ACCORDING 4 0 4 4 +ACCORDANCE 2 0 2 2 +ACCOMPLISHMENT 1 0 1 1 +ACCOMPLISHED 5 0 5 5 +ACCOMPANY 2 0 2 2 +ACCOMMODATIONS 1 0 1 1 +ACCOMMODATION 1 0 1 1 +ACCLAMATIONS 1 0 1 1 +ACCIDENTS 2 0 2 2 +ACCIDENT 2 0 2 2 +ACCESSORIES 1 0 1 1 +ACCEPTING 1 0 1 1 +ACCEPTED 1 0 1 1 +ACCEPTABLE 1 0 1 1 +ACCENTS 2 0 2 2 +ACCENT 3 0 3 3 +ABSURDITY 1 0 1 1 +ABSURDITIES 1 0 1 1 +ABSURD 1 0 1 1 +ABSTRACTIONS 1 0 1 1 +ABSTRACTION 1 0 1 1 +ABSORBED 1 0 1 1 +ABSOLUTELY 6 0 6 6 +ABSOLUTE 1 0 1 1 +ABSENT 2 0 2 2 +ABSENCE 1 0 1 1 +ABRUPTLY 2 0 2 2 +ABROAD 4 0 4 4 +ABRAHAM 2 0 2 2 +ABOVE 17 0 17 17 +ABOUT 85 0 85 85 +ABOLITIONISM 1 0 1 1 +ABOARD 2 0 2 2 +ABNER 1 0 1 1 +ABLE 7 0 7 7 +ABDUCTION 1 0 1 1 +ABANDONED 2 0 2 2 diff --git a/log/modified_beam_search/errs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/errs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4044e24fc4d5fc9e7ff855edd4d8e81aaa83017 --- /dev/null +++ b/log/modified_beam_search/errs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,15669 @@ +%WER = 9.53 +Errors: 533 insertions, 455 deletions, 4000 substitutions, over 52343 reference words (47888 correct) +Search below for sections starting with PER-UTT DETAILS:, SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS: + +PER-UTT DETAILS: corr or (ref->hyp) +1688-142285-0000-1948: THERE'S IRON THEY SAY IN ALL OUR BLOOD AND A GRAIN OR TWO PERHAPS IS GOOD BUT HIS HE MAKES ME HARSHLY FEEL HAS GOT A LITTLE TOO MUCH OF STEEL ANON +1688-142285-0001-1949: MARGARET SAID MISTER HALE AS HE RETURNED FROM SHOWING HIS GUEST DOWNSTAIRS I COULD NOT HELP WATCHING YOUR FACE WITH SOME ANXIETY WHEN MISTER THORNTON MADE HIS CONFESSION OF HAVING BEEN A SHOP BOY +1688-142285-0002-1950: YOU DON'T MEAN THAT YOU THOUGHT ME SO SILLY +1688-142285-0003-1951: I REALLY (LIKED->LIKE) THAT ACCOUNT OF HIMSELF BETTER THAN ANYTHING ELSE HE SAID +1688-142285-0004-1952: HIS STATEMENT OF HAVING BEEN A SHOP BOY WAS THE THING I (LIKED->LIKE) BEST OF ALL +1688-142285-0005-1953: YOU WHO WERE ALWAYS ACCUSING PEOPLE OF BEING SHOPPY AT HELSTONE +1688-142285-0006-1954: I DON'T THINK MISTER HALE YOU HAVE DONE QUITE RIGHT (IN->HE) INTRODUCING SUCH A PERSON TO US WITHOUT TELLING US WHAT HE HAD BEEN +1688-142285-0007-1955: I REALLY WAS VERY MUCH AFRAID OF SHOWING HIM HOW MUCH SHOCKED I WAS AT SOME (PARTS->PART) OF WHAT HE SAID +1688-142285-0008-1956: HIS FATHER DYING IN MISERABLE CIRCUMSTANCES +1688-142285-0009-1957: WHY IT MIGHT HAVE BEEN IN THE WORKHOUSE +1688-142285-0010-1958: HIS FATHER SPECULATED WILDLY FAILED AND THEN KILLED HIMSELF BECAUSE HE COULD NOT BEAR THE DISGRACE +1688-142285-0011-1959: ALL HIS FORMER FRIENDS SHRUNK FROM THE DISCLOSURES THAT HAD TO BE MADE OF HIS DISHONEST GAMBLING WILD HOPELESS STRUGGLES MADE WITH OTHER PEOPLE'S MONEY TO REGAIN HIS OWN MODERATE PORTION OF WEALTH +1688-142285-0012-1960: NO ONE CAME FORWARDS TO HELP THE MOTHER AND THIS BOY +1688-142285-0013-1961: AT LEAST NO FRIEND CAME FORWARDS IMMEDIATELY AND (MISSUS->MISTER) THORNTON IS NOT ONE I FANCY TO WAIT TILL (TARDY->TIDY) KINDNESS COMES TO FIND HER OUT +1688-142285-0014-1962: SO THEY LEFT MILTON +1688-142285-0015-1963: HOW TAINTED ASKED HER FATHER +1688-142285-0016-1964: OH PAPA BY THAT TESTING EVERYTHING BY THE STANDARD OF WEALTH +1688-142285-0017-1965: WHEN HE SPOKE OF THE MECHANICAL POWERS HE EVIDENTLY LOOKED UPON THEM ONLY AS NEW WAYS OF EXTENDING TRADE AND MAKING MONEY +1688-142285-0018-1966: AND THE POOR MEN AROUND HIM THEY WERE POOR BECAUSE THEY WERE VICIOUS OUT OF THE PALE OF HIS SYMPATHIES BECAUSE THEY HAD NOT HIS IRON NATURE AND THE CAPABILITIES THAT IT GIVES HIM FOR BEING RICH +1688-142285-0019-1967: NOT VICIOUS HE NEVER SAID THAT +1688-142285-0020-1968: IMPROVIDENT AND SELF INDULGENT WERE HIS WORDS +1688-142285-0021-1969: MARGARET WAS COLLECTING HER MOTHER'S WORKING MATERIALS AND PREPARING TO GO TO BED +1688-142285-0022-1970: JUST AS SHE WAS LEAVING THE ROOM SHE HESITATED SHE WAS INCLINED TO MAKE AN ACKNOWLEDGMENT WHICH SHE THOUGHT WOULD PLEASE HER FATHER BUT WHICH TO BE FULL AND TRUE MUST INCLUDE A LITTLE ANNOYANCE +1688-142285-0023-1971: HOWEVER OUT IT CAME +1688-142285-0024-1972: PAPA I DO THINK MISTER THORNTON A VERY REMARKABLE MAN BUT PERSONALLY I DON'T LIKE HIM AT ALL +1688-142285-0025-1973: AND I DO SAID HER FATHER LAUGHING +1688-142285-0026-1974: PERSONALLY AS YOU CALL IT AND ALL +1688-142285-0027-1975: I DON'T SET HIM UP FOR A HERO OR ANYTHING OF THAT KIND +1688-142285-0028-1976: BUT GOOD NIGHT CHILD +1688-142285-0029-1977: THERE WERE SEVERAL OTHER SIGNS OF SOMETHING WRONG ABOUT MISSUS HALE +1688-142285-0030-1978: SHE AND DIXON HELD MYSTERIOUS CONSULTATIONS IN HER BEDROOM FROM WHICH DIXON WOULD COME OUT CRYING AND CROSS AS WAS (HER CUSTOM->ACCUSTOM) WHEN ANY DISTRESS OF HER MISTRESS CALLED UPON HER SYMPATHY +1688-142285-0031-1979: ONCE MARGARET HAD GONE INTO THE CHAMBER SOON AFTER DIXON (LEFT IT->LIFTED) AND FOUND HER MOTHER ON HER KNEES AND AS MARGARET STOLE OUT SHE CAUGHT A FEW WORDS WHICH WERE EVIDENTLY A PRAYER FOR STRENGTH AND PATIENCE TO (ENDURE->INDUCE) SEVERE BODILY SUFFERING +1688-142285-0032-1980: BUT THOUGH SHE RECEIVED CARESSES AND FOND WORDS BACK AGAIN IN SUCH PROFUSION AS WOULD HAVE GLADDENED HER FORMERLY YET SHE FELT THAT THERE WAS A SECRET WITHHELD FROM HER AND SHE BELIEVED IT BORE SERIOUS REFERENCE TO HER MOTHER'S HEALTH +1688-142285-0033-1981: SHE LAY AWAKE VERY LONG THIS NIGHT PLANNING HOW TO (LESSEN->LISTEN) THE EVIL INFLUENCE OF THEIR MILTON LIFE ON HER MOTHER +1688-142285-0034-1982: A SERVANT TO GIVE DIXON PERMANENT ASSISTANCE SHOULD BE GOT IF SHE GAVE UP (HER->THE) WHOLE TIME TO THE SEARCH AND THEN AT ANY RATE HER MOTHER MIGHT HAVE ALL THE PERSONAL (ATTENTION->ATTENTIONS) SHE REQUIRED AND HAD BEEN ACCUSTOMED TO HER WHOLE LIFE +1688-142285-0035-1983: VISITING REGISTER (OFFICES->OFFICERS) SEEING ALL MANNER OF UNLIKELY PEOPLE AND VERY FEW IN THE LEAST LIKELY ABSORBED MARGARET'S TIME AND THOUGHTS FOR SEVERAL DAYS +1688-142285-0036-1984: ONE AFTERNOON SHE MET BESSY HIGGINS IN THE STREET AND STOPPED TO SPEAK TO HER +1688-142285-0037-1985: WELL (BESSY->BUSY) HOW ARE YOU +1688-142285-0038-1986: BETTER AND NOT BETTER IF (YO->YOU) KNOW WHAT THAT MEANS +1688-142285-0039-1987: NOT EXACTLY REPLIED MARGARET SMILING +1688-142285-0040-1988: I'M BETTER IN NOT BEING (TORN->TAUGHT) TO PIECES BY COUGHING (O'NIGHTS->OR NIGHTS) BUT I'M WEARY AND TIRED (O->OF) MILTON AND LONGING TO GET AWAY TO THE LAND (O BEULAH->OF BOOLA) AND WHEN I THINK I'M FARTHER AND FARTHER OFF MY HEART SINKS AND I'M NO BETTER I'M WORSE +1688-142285-0041-1989: MARGARET TURNED (ROUND->AROUND) TO WALK (ALONGSIDE->LONG SIDE) OF THE GIRL IN HER FEEBLE PROGRESS HOMEWARD +1688-142285-0042-1990: BUT FOR A MINUTE OR TWO SHE DID NOT SPEAK +1688-142285-0043-1991: AT LAST SHE SAID IN A LOW VOICE +1688-142285-0044-1992: BESSY DO YOU WISH TO DIE +1688-142285-0045-1993: BESSY WAS SILENT IN HER TURN FOR A MINUTE OR TWO THEN SHE REPLIED +1688-142285-0046-1994: (NOUGHT->NOT) WORSE THAN MANY OTHERS I RECKON +1688-142285-0047-1995: BUT WHAT WAS IT +1688-142285-0048-1996: YOU KNOW I'M A STRANGER HERE SO PERHAPS I'M NOT SO QUICK AT UNDERSTANDING WHAT YOU MEAN AS IF I'D LIVED ALL MY LIFE (AT->IN) MILTON +1688-142285-0049-1997: I HAD FORGOTTEN WHAT I SAID FOR THE TIME CONTINUED MARGARET QUIETLY +1688-142285-0050-1998: I SHOULD HAVE THOUGHT OF IT AGAIN WHEN I WAS LESS BUSY MAY I GO WITH YOU NOW +1688-142285-0051-1999: THE SHARPNESS IN HER EYE TURNED TO A WISTFUL LONGING AS SHE MET (MARGARET'S->MARGARET) SOFT AND FRIENDLY GAZE +1688-142285-0052-2000: AS THEY TURNED UP INTO A SMALL COURT OPENING OUT (OF->INTO) A SQUALID STREET BESSY SAID +1688-142285-0053-2001: (YO'LL->YOU WILL) NOT BE DAUNTED IF FATHER'S AT HOME AND SPEAKS A BIT GRUFFISH AT FIRST +1688-142285-0054-2002: BUT NICHOLAS WAS NOT AT HOME WHEN THEY ENTERED +1688-142285-0055-2003: GASPED BESSY AT LAST +1688-142285-0056-2004: BESSY TOOK A LONG AND FEVERISH DRAUGHT AND THEN FELL BACK AND SHUT HER EYES +1688-142285-0057-2005: MARGARET BENT OVER AND SAID BESSY DON'T BE IMPATIENT WITH YOUR LIFE WHATEVER IT IS OR MAY HAVE BEEN +1688-142285-0058-2006: REMEMBER WHO GAVE IT (*->TO) YOU AND MADE IT WHAT IT IS +1688-142285-0059-2007: NOW I'LL NOT HAVE MY WENCH (PREACHED->PREACH) TO +1688-142285-0060-2008: BUT SURELY SAID MARGARET FACING ROUND YOU BELIEVE IN WHAT I SAID THAT GOD GAVE HER LIFE AND ORDERED WHAT KIND OF LIFE IT WAS TO BE +1688-142285-0061-2009: I BELIEVE WHAT I SEE AND NO MORE +1688-142285-0062-2010: THAT'S WHAT I BELIEVE YOUNG WOMAN +1688-142285-0063-2011: I DON'T BELIEVE ALL I HEAR NO NOT BY A BIG DEAL +1688-142285-0064-2012: BUT (HOO'S->WHOSE) COME AT LAST AND (HOO'S->WHO'S) WELCOME AS LONG AS (HOO'LL->HE'LL) KEEP FROM PREACHING ON WHAT (HOO->WHO) KNOWS (NOUGHT->NOT) ABOUT +1688-142285-0065-2013: IT'S SIMPLE AND NOT FAR TO FETCH NOR HARD TO WORK +1688-142285-0066-2014: BUT THE GIRL ONLY PLEADED THE MORE WITH MARGARET +1688-142285-0067-2015: DON'T THINK HARDLY ON HIM HE'S A GOOD MAN HE IS +1688-142285-0068-2016: I SOMETIMES THINK I SHALL BE (MOPED WI->MILKED WITH) SORROW EVEN IN THE CITY OF GOD IF (FATHER->EITHER) IS NOT THERE +1688-142285-0069-2017: THE FEVERISH COLOUR CAME INTO (HER CHEEK->A CHEEKS) AND THE FEVERISH FLAME INTO HER EYE +1688-142285-0070-2018: BUT (YOU WILL->YOU'LL) BE (THERE->THEIR) FATHER YOU SHALL OH MY HEART +1688-142285-0071-2019: SHE PUT HER HAND TO IT AND BECAME GHASTLY PALE +1688-142285-0072-2020: MARGARET HELD HER IN HER ARMS AND PUT THE WEARY HEAD TO REST UPON HER BOSOM +1688-142285-0073-2021: PRESENTLY THE SPASM THAT FORESHADOWED DEATH HAD PASSED AWAY AND (BESSY->BUSY) ROUSED HERSELF AND SAID +1688-142285-0074-2022: I'LL GO TO BED IT'S BEST PLACE BUT CATCHING (AT->THAT) MARGARET'S GOWN (YO'LL->YOU'LL) COME AGAIN I KNOW (YO->YOU) WILL BUT JUST SAY IT +1688-142285-0075-2023: (I WILL->OH) COME TO MORROW SAID MARGARET +1688-142285-0076-2024: MARGARET WENT AWAY VERY SAD AND THOUGHTFUL +1688-142285-0077-2025: SHE WAS LATE FOR TEA AT HOME +1688-142285-0078-2026: HAVE YOU MET WITH A SERVANT DEAR +1688-142285-0079-2027: NO MAMMA THAT ANNE BUCKLEY WOULD NEVER HAVE DONE +1688-142285-0080-2028: (SUPPOSE->S'POSE) I TRY SAID MISTER HALE +1688-142285-0081-2029: EVERYBODY ELSE HAS HAD (THEIR->THEY) TURN (AT->UP) THIS GREAT DIFFICULTY NOW LET ME TRY +1688-142285-0082-2030: I MAY BE THE (CINDERELLA->CINRILLA) TO PUT ON THE SLIPPER AFTER ALL +1688-142285-0083-2031: WHAT WOULD YOU DO PAPA HOW WOULD YOU SET ABOUT IT +1688-142285-0084-2032: WHY I WOULD APPLY (*->IT) TO SOME GOOD HOUSE MOTHER TO RECOMMEND ME ONE KNOWN TO HERSELF OR HER SERVANTS +1688-142285-0085-2033: VERY GOOD BUT WE MUST FIRST CATCH OUR HOUSE MOTHER +1688-142285-0086-2034: THE MOTHER OF WHOM HE SPOKE TO US SAID MARGARET +1688-142285-0087-2035: (MISSUS->MISTER) THORNTON THE ONLY MOTHER HE HAS I BELIEVE SAID MISTER HALE QUIETLY +1688-142285-0088-2036: I SHALL LIKE TO SEE HER SHE MUST BE AN UNCOMMON PERSON HER MOTHER ADDED +1688-142285-0089-2037: PERHAPS SHE MAY HAVE A RELATION WHO MIGHT SUIT US AND BE GLAD OF OUR PLACE +1688-142285-0090-2038: SHE SOUNDED TO BE SUCH A CAREFUL ECONOMICAL PERSON THAT I SHOULD LIKE ANY ONE OUT OF THE SAME FAMILY +1688-142285-0091-2039: MY DEAR SAID MISTER HALE ALARMED PRAY DON'T GO OFF ON THAT IDEA +1688-142285-0092-2040: I AM SURE AT ANY RATE SHE WOULD NOT LIKE STRANGERS TO KNOW ANYTHING ABOUT IT +1688-142285-0093-2041: TAKE NOTICE THAT (*->THIS) IS NOT MY KIND OF (HAUGHTINESS->FORTNESS) PAPA IF I HAVE ANY AT ALL WHICH I DON'T AGREE TO THOUGH (YOU'RE->YOU) ALWAYS ACCUSING ME OF IT +1688-142285-0094-2042: I DON'T KNOW POSITIVELY THAT IT IS HERS EITHER BUT FROM LITTLE THINGS I HAVE GATHERED FROM HIM I FANCY SO +1688-142285-0095-2043: THEY CARED TOO LITTLE TO ASK IN WHAT MANNER HER SON HAD SPOKEN ABOUT HER +1998-15444-0000-2204: IF CALLED TO A CASE SUPPOSED (OR->OF) SUSPECTED TO BE ONE OF POISONING THE MEDICAL MAN HAS TWO DUTIES TO PERFORM TO SAVE THE PATIENT'S LIFE AND TO PLACE HIMSELF IN A POSITION TO GIVE EVIDENCE (IF->OF) CALLED (ON TO DO->UNTO) SO +1998-15444-0001-2205: HE SHOULD MAKE INQUIRIES AS TO SYMPTOMS AND TIME AT WHICH FOOD OR MEDICINE WAS LAST TAKEN +1998-15444-0002-2206: HE SHOULD NOTICE THE POSITION AND TEMPERATURE OF THE BODY THE CONDITION OF (RIGOR MORTIS->RIGA MORTARS) MARKS OF (VIOLENCE->IDENTS) APPEARANCE OF LIPS AND MOUTH +1998-15444-0003-2207: IN MAKING A POST (MORTEM EXAMINATION->MODER MAXIMMUNITION) THE (ALIMENTARY->ELEMENTARY) CANAL SHOULD BE REMOVED AND PRESERVED FOR FURTHER INVESTIGATION +1998-15444-0004-2208: THE GUT AND THE (GULLET->COLLEGE) BEING CUT ACROSS BETWEEN THESE LIGATURES THE STOMACH MAY BE REMOVED (ENTIRE->AND TIRED) WITHOUT (SPILLING->SPINNING) ITS CONTENTS +1998-15444-0005-2209: IF THE (MEDICAL PRACTITIONER IS IN DOUBT->MEDICA PRACTITIONERS ENDOWED) ON ANY POINT HE SHOULD OBTAIN TECHNICAL ASSISTANCE FROM (SOMEONE->SOME ONE) WHO HAS PAID ATTENTION TO THE SUBJECT +1998-15444-0006-2210: IN A CASE OF ATTEMPTED SUICIDE BY POISONING IS IT THE DUTY OF THE DOCTOR TO INFORM THE POLICE +1998-15444-0007-2211: THE BEST (EMETIC->AMATIC) IS THAT WHICH IS AT HAND +1998-15444-0008-2212: THE DOSE FOR (AN ADULT->NO DOUBT) IS TEN MINIMS +1998-15444-0009-2213: (APOMORPHINE->EPIMORPHONE) IS NOT (ALLIED->ALIT) IN PHYSIOLOGICAL ACTION TO MORPHINE AND MAY BE GIVEN IN CASES OF (NARCOTIC->NAUCOTIC) POISONING +1998-15444-0010-2214: TICKLING THE (FAUCES->FORCES) WITH (A->THE) FEATHER MAY EXCITE (VOMITING->RHOMETTING) +1998-15444-0011-2215: IN USING THE ELASTIC STOMACH TUBE SOME FLUID SHOULD BE INTRODUCED INTO THE STOMACH BEFORE ATTEMPTING TO EMPTY IT OR A PORTION OF THE MUCOUS (MEMBRANE->MEMORANE) MAY BE (SUCKED->SACKED) INTO THE APERTURE +1998-15444-0012-2216: THE TUBE SHOULD BE EXAMINED TO SEE THAT IT IS NOT BROKEN OR CRACKED AS ACCIDENTS HAVE HAPPENED FROM NEGLECTING THIS PRECAUTION +1998-15444-0013-2217: (ANTIDOTES ARE->AND HE DOES A) USUALLY GIVEN HYPODERMICALLY OR IF (BY->THE) MOUTH (IN->AND) THE FORM OF TABLETS +1998-15444-0014-2218: IN THE ABSENCE OF (A->THE) HYPODERMIC SYRINGE THE REMEDY MAY BE GIVEN BY THE RECTUM +1998-15444-0015-2219: NOTICE THE (SMELL->SMILE) COLOUR AND GENERAL APPEARANCE OF THE MATTER SUBMITTED FOR EXAMINATION +1998-15444-0016-2220: FOR THE SEPARATION OF AN ALKALOID THE FOLLOWING IS THE PROCESS OF (STAS OTTO->STATU) +1998-15444-0017-2221: THIS PROCESS IS BASED UPON THE PRINCIPLE THAT THE (SALTS->SOULS) OF THE (ALKALOIDS->ACOLITES) ARE SOLUBLE IN (ALCOHOL AND->ACCULENT) WATER AND INSOLUBLE IN ETHER +1998-15444-0018-2222: THE PURE (ALKALOIDS WITH->AKALOIDS WERE) THE EXCEPTION OF MORPHINE IN ITS CRYSTALLINE FORM (ARE->A) SOLUBLE (IN ETHER->BENEATH THEM) +1998-15444-0019-2223: TWO (COOL->UR) THE MIXTURE AND FILTER WASH THE RESIDUE WITH STRONG ALCOHOL AND MIX THE (FILTRATES->FIR TRADES) +1998-15444-0020-2224: THE (RESIDUE->READY YOU) MAY BE (SET->SAID) ASIDE FOR THE DETECTION OF THE METALLIC POISONS (IF->OF) SUSPECTED EXPEL THE (ALCOHOL->ACCOHOL) BY CAREFUL EVAPORATION +1998-15444-0021-2225: ON THE EVAPORATION OF THE ALCOHOL THE (RESINOUS->VEZENOUS) AND (FATTY MATTERS->FATIGMATIS) SEPARATE +1998-15444-0022-2226: EVAPORATE THE (FILTRATE->FEDERATE) TO A (SYRUP AND->CYRUP AN) EXTRACT WITH SUCCESSIVE PORTIONS OF ABSOLUTE ALCOHOL +1998-15444-0023-2227: SEPARATE THE ETHEREAL SOLUTION AND EVAPORATE +1998-15444-0024-2228: FIVE A PART OF THIS ETHEREAL SOLUTION IS (POURED->PUT) INTO A WATCH GLASS AND (ALLOWED->ALLOW) TO EVAPORATE +1998-15444-0025-2229: TO PURIFY IT (ADD->EDISM) A (SMALL->*) QUANTITY OF (DILUTE SULPHURIC->DELUDE SUFFER) ACID AND AFTER EVAPORATING TO THREE QUARTERS OF ITS (BULK->BARK) ADD A (SATURATED->SITUATED) SOLUTION OF CARBONATE OF POTASH OR SODA +1998-15444-0026-2230: (BOIL->BOY) THE (FINELY DIVIDED SUBSTANCE->FINALLY DIVIDEST ABSTANCE) WITH ABOUT ONE (EIGHTH->EIGHTHS) ITS (BULK->BARK) OF PURE (HYDROCHLORIC->HYDROCLOIC) ACID ADD FROM TIME TO TIME POTASSIC (CHLORATE->LOW RAGE) UNTIL THE SOLIDS ARE REDUCED TO A STRAW YELLOW FLUID +1998-15444-0027-2231: THE RESIDUE OF THE MATERIAL AFTER DIGESTION (WITH HYDROCHLORIC->WAS HYDROGLOIC) ACID AND (POTASSIUM CHLORATE->PROTESTING CHLORODE) MAY HAVE TO BE EXAMINED FOR SILVER LEAD AND BARIUM +1998-29454-0000-2157: A THOUSAND BLESSINGS FROM A GRATEFUL HEART +1998-29454-0001-2158: PERUSAL SAID THE (PAWNBROKER->PAN BROKER) THAT'S THE WAY TO (PERNOUNCE->PRONOUNCE) IT +1998-29454-0002-2159: HIS BOOKS TOLD HIM (THAT TREASURE IS->THE TREASURES) BEST HIDDEN UNDER LOOSE BOARDS (UNLESS->AND AS) OF COURSE YOUR HOUSE (HAS->HAD) A SECRET (PANEL->PANNER) WHICH HIS HAD NOT +1998-29454-0003-2160: HE GOT IT UP AND PUSHED HIS TREASURES AS FAR IN AS HE COULD ALONG THE ROUGH (CRUMBLY->CRAMBLY) SURFACE OF THE (LATH->LAST) AND PLASTER +1998-29454-0004-2161: WHEN DICKIE CAME DOWN HIS AUNT (SLIGHTLY SLAPPED->SAT HE SLEPT) HIM AND HE TOOK THE HALFPENNY AND LIMPED OFF OBEDIENTLY +1998-29454-0005-2162: HE HAD NEVER SEEN ONE BEFORE AND IT INTERESTED HIM EXTREMELY +1998-29454-0006-2163: HE LOOKED ABOUT HIM AND KNEW THAT HE DID NOT AT ALL KNOW WHERE HE WAS +1998-29454-0007-2164: WHAT'S (UP MATEY->THAT MAY TEA) LOST YOUR WAY DICKIE EXPLAINED +1998-29454-0008-2165: WHEN HE SAID (AVE->HAVE) I (BIN->BEEN) ASLEEP +1998-29454-0009-2166: HERE WE ARE SAID THE MAN +1998-29454-0010-2167: NOT (EXACKLY->EXACTLY) SAID THE MAN BUT IT'S ALL RIGHT +1998-29454-0011-2168: WHEN IT WAS OVER THE MAN ASKED DICKIE IF HE COULD WALK A LITTLE WAY AND WHEN DICKIE SAID HE COULD THEY SET OUT IN THE MOST FRIENDLY WAY SIDE BY SIDE +1998-29454-0012-2169: AND THE (TEA->TINEL) AND (ALL AN->*) THE EGG +1998-29454-0013-2170: AND THIS IS THE PRETTIEST PLACE EVER I SEE +1998-29454-0014-2171: I (SHALL->SHOULD) CATCH IT (A FAIR->IF HER) TREAT AS IT IS +1998-29454-0015-2172: SHE WAS (WAITIN->WAITING) FOR THE WOOD TO BOIL THE (KETTLE->CATTLE) WHEN (I->TO) COME OUT MOTHER +1998-29454-0016-2173: (AIN'T->AND) BAD WHEN SHE'S IN A GOOD TEMPER +1998-29454-0017-2174: THAT (AIN'T WHAT SHE'LL BE IN->ANNE BUT HER BEING) WHEN YOU GETS BACK +1998-29454-0018-2175: I GOT (TO->A) STICK IT SAID (DICKIE->DICKY) SADLY I'D BEST BE GETTING HOME +1998-29454-0019-2176: I WOULDN'T GO (OME->HOME) NOT (IF I WAS->A FAIR US) YOU SAID THE MAN +1998-29454-0020-2177: NO SAID DICKIE OH NO NO I NEVER +1998-29454-0021-2178: I (AIN'T IT YER->ENTER) HAVE I LIKE WHAT (YER AUNT DO->YOU AREN'TO) +1998-29454-0022-2179: WELL (THAT'LL->THAT) SHOW YOU (THE->A) SORT OF (MAN->MEN) I AM +1998-29454-0023-2180: THE MAN'S MANNER WAS SO KIND AND HEARTY THE WHOLE (ADVENTURE->ADVENTUR) WAS SO WONDERFUL AND NEW IS IT COUNTRY WHERE YOU GOING +1998-29454-0024-2181: THE SUN (SHOT->HAD) LONG GOLDEN BEAMS THROUGH THE GAPS (IN->AND) THE HEDGE +1998-29454-0025-2182: A BIRD (PAUSED->PASSED) IN ITS FLIGHT ON (A->*) BRANCH QUITE CLOSE AND CLUNG (THERE SWAYING->THEIR SWAIN) +1998-29454-0026-2183: HE TOOK OUT OF HIS POCKET (A NEW->AND YOUR) ENVELOPE (A NEW SHEET->AND YOU SEED) OF PAPER AND A NEW PENCIL READY SHARPENED BY MACHINERY +1998-29454-0027-2184: (AN->AND) I (ASKS->ASK) YOU LET ME COME (ALONGER->ALONG OF) YOU GOT THAT +1998-29454-0028-2185: (GET IT->GENISH) WROTE DOWN THEN DONE +1998-29454-0029-2186: THEN HE FOLDED IT AND PUT IT IN HIS POCKET +1998-29454-0030-2187: NOW (WE'RE SQUARE->HE IS QUEER) HE SAID +1998-29454-0031-2188: THEY COULD PUT A (MAN->MEN) AWAY FOR LESS THAN THAT +1998-29454-0032-2189: I SEE THAT (THERE IN->THEN) A BOOK SAID (DICKIE->DICK HAD) CHARMED +1998-29454-0033-2190: HE REWARD THE WAKE THE LAST OF THE ENGLISH AND I (WUNNERED->WANTED) WHAT IT STOOD FOR +1998-29454-0034-2191: WILD ONES (AIN'T ALF THE SIZE->AND A HALF SIGHS) I LAY +1998-29454-0035-2192: ADVENTURES I SHOULD THINK SO +1998-29454-0036-2193: AH SAID (DICKIE->DICKY) AND A (FULL->FOOT) SILENCE FELL BETWEEN THEM +1998-29454-0037-2194: THAT WAS CHARMING BUT IT WAS PLEASANT TOO TO WASH THE (MUD OFF->MATVE) ON THE WET GRASS +1998-29454-0038-2195: (DICKIE->DICKY) ALWAYS REMEMBERED THAT MOMENT +1998-29454-0039-2196: SO YOU SHALL SAID MISTER BEALE A (REG'LER->REG'LAR) WASH ALL OVER THIS VERY NIGHT I ALWAYS LIKE A WASH (MESELF->MYSELF) +1998-29454-0040-2197: SOME (BLOKES->LOOSE) THINK IT PAYS TO BE DIRTY BUT IT DON'T +1998-29454-0041-2198: IF (YOU'RE->YOU) CLEAN THEY SAY HONEST POVERTY (AN->AND) IF YOU'RE DIRTY THEY SAY SERVE YOU RIGHT +1998-29454-0042-2199: YOU ARE GOOD SAID DICKIE I DO LIKE YOU +1998-29454-0043-2200: I KNOW YOU WILL SAID DICKIE WITH ENTHUSIASM I KNOW (OW->HOW) GOOD YOU ARE +1998-29454-0044-2201: BLESS ME SAID MISTER BEALE UNCOMFORTABLY WELL (THERE->THEN) +1998-29454-0045-2202: (STEP OUT SONNY->SABATANI) OR WE'LL NEVER GET THERE THIS (SIDE->OUT OF) CHRISTMAS +1998-29454-0046-2203: WELL (YOU'LL->YOU) KNOW ALL ABOUT IT PRESENTLY +1998-29455-0000-2232: THE SINGING AND LAUGHING WENT ON LONG AFTER HE HAD FALLEN ASLEEP AND IF (LATER->LATE) IN THE EVENING (THERE->THEY) WERE LOUD (VOICED->VOICE) ARGUMENTS OR QUARRELS EVEN (DICKIE->DICKY) DID NOT HEAR THEM +1998-29455-0001-2233: WHAT'S (ALL->ON) THAT THERE DICKIE ASKED POINTING TO THE (ODD KNOBBLY->OTT KNOBLY) BUNDLES OF ALL SORTS AND SHAPES TIED ON TO THE PERAMBULATOR'S FRONT +1998-29455-0002-2234: TELL (YER->YOU) WHAT MATE LOOKS TO ME AS IF (I'D->I) TOOK A FANCY TO YOU +1998-29455-0003-2235: (SWELP->SWAP) ME HE SAID HELPLESSLY +1998-29455-0004-2236: (OH->O) LOOK SAID (DICKIE->DICKY) THE FLOWERS +1998-29455-0005-2237: (THEY'RE->THEY ARE) ONLY (WEEDS->REEDS) SAID BEALE +1998-29455-0006-2238: BUT I SHALL HAVE THEM (WHILE THEY'RE->WHERE THEY ARE) ALIVE SAID (DICKIE->DICKY) AS HE HAD SAID TO THE PAWNBROKER (ABOUT->BY) THE MOONFLOWERS +1998-29455-0007-2239: (HI->AY) THERE GOES A RABBIT +1998-29455-0008-2240: (SEE IM CROST THE ROAD->SEEM QUEST ABOUT) THERE SEE (HIM->EM) +1998-29455-0009-2241: HOW BEAUTIFUL SAID (DICKIE->DICKY) WRIGGLING (WITH->WIS) DELIGHT +1998-29455-0010-2242: THIS LIFE OF THE RABBIT AS DESCRIBED BY MISTER BEALE WAS THE CHILD'S FIRST GLIMPSE OF FREEDOM I'D LIKE TO BE A RABBIT +1998-29455-0011-2243: (OW'M I TO WHEEL->ALL MY TOWER) THE (BLOOMIN->ROOM AND) PRAM IF YOU (GOES ON->GO SON) LIKE (AS->US) IF YOU WAS A (BAG->BICK) OF (EELS->FIELDS) +1998-29455-0012-2244: I LIKE YOU (NEXTER->NEXT TO) MY OWN (DADDY->DIRTY) AND MISTER (BAXTER->BEXT THE) NEXT DOOR +1998-29455-0013-2245: THAT'S ALL RIGHT SAID MISTER BEALE AWKWARDLY +1998-29455-0014-2246: DICKIE (QUICK TO->QUICKLY) IMITATE TOUCHED HIS +1998-29455-0015-2247: POOR LITTLE MAN SAID THE LADY YOU MISS YOUR MOTHER DON'T YOU +1998-29455-0016-2248: OH WELL DONE LITTLE (UN->ONE) SAID MISTER (BEALE->BEE) TO HIMSELF +1998-29455-0017-2249: THE TWO TRAVELLERS WERE LEFT FACING EACH OTHER THE RICHER BY A PENNY AND (OH->O) WONDERFUL GOOD FORTUNE A WHOLE HALF CROWN +1998-29455-0018-2250: NO I NEVER SAID DICKIE (ERE'S->YES) THE (STEEVER->STEVEN) +1998-29455-0019-2251: YOU STICK TO THAT SAID (BEALE->BEER) RADIANT WITH DELIGHT (YOU'RE->YOU ARE) A FAIR MASTERPIECE YOU ARE YOU EARNED IT HONEST IF EVER (A->*) KID DONE +1998-29455-0020-2252: THEY WENT ON UP THE HILL AS HAPPY AS ANY ONE NEED WISH TO BE +1998-29455-0021-2253: PLEASE (DO NOT->DON'T) BE TOO SHOCKED +1998-29455-0022-2254: REMEMBER THAT NEITHER OF THEM KNEW ANY BETTER +1998-29455-0023-2255: TO THE (ELDER->OTHER) TRAMP LIES AND (BEGGING WERE->PEGGING WHEN) NATURAL MEANS OF LIVELIHOOD +1998-29455-0024-2256: BUT YOU SAID THE BED (WITH->WAS) THE GREEN CURTAINS URGED DICKIE +1998-29455-0025-2257: WHICH THIS (AIN'T->END) NOT BY NO MEANS +1998-29455-0026-2258: THE NIGHT IS FULL OF INTERESTING LITTLE SOUNDS THAT WILL NOT AT FIRST LET YOU SLEEP THE RUSTLE OF LITTLE (WILD->WHITE) THINGS IN THE (HEDGES->HATCHES) THE BARKING OF DOGS (IN->AND) DISTANT FARMS THE CHIRP OF CRICKETS AND THE CROAKING OF FROGS +1998-29455-0027-2259: THE NEW GAME OF BEGGING AND INVENTING STORIES TO INTEREST THE PEOPLE FROM WHOM IT WAS (WORTH WHILE->WORSE WIDE) TO BEG WENT ON GAILY DAY BY DAY AND WEEK BY WEEK AND DICKIE BY CONSTANT PRACTICE GREW SO CLEVER (AT->*) TAKING HIS PART IN THE ACTING THAT MISTER (BEALE->BEA) WAS QUITE DAZED WITH ADMIRATION +1998-29455-0028-2260: BLESSED (IF I->FOR) EVER SEE SUCH A (NIPPER->NIBBER) HE SAID OVER AND OVER AGAIN +1998-29455-0029-2261: CLEVER AS A (TRAINDAWG E->TRAIN DOG) IS (AN ALL OUTER IS OWN EAD->IN OR OUTER'S OWNETTE) +1998-29455-0030-2262: I (AIN'T->AM) SURE AS I (ADN'T->HADN'T) BETTER STICK TO THE ROAD AND KEEP AWAY FROM OLD (ANDS->ENDS) LIKE (YOU JIM->EUGEUM) +1998-29455-0031-2263: I (OPE E'S CLEVER->OPEUS LOVE) ENOUGH TO DO (WOT E'S TOLD KEEP IS MUG SHUT->WHAT HE STOWED HE WAS MUCH AT) THAT'S ALL +1998-29455-0032-2264: IF (E'S->HE) STRAIGHT (E'LL->YOU'LL) DO FOR ME AND IF HE (AIN'T->AND) I'LL DO FOR (IM->HIM) SEE +1998-29455-0033-2265: SEE THAT (BLOKE JUST->LOG DOES) NOW SAID MISTER BEALE (YUSS->YES) SAID DICKIE +1998-29455-0034-2266: WELL YOU NEVER SEE (IM->EM) +1998-29455-0035-2267: IF ANY ONE (ARSTS->ASKS) YOU IF YOU EVER SEE (IM->HIM) YOU NEVER (SET->SAID) EYES ON (IM->HIM) IN ALL (YOUR->YOU'RE) BORN NOT TO REMEMBER (IM->HIM) +1998-29455-0036-2268: (DICKIE->DICKY) WAS FULL OF QUESTIONS BUT MISTER (BEALE->BEE) HAD NO ANSWERS (FOR->WERE) THEM +1998-29455-0037-2269: NOR WAS IT SUNDAY ON WHICH THEY TOOK A REST AND WASHED THEIR SHIRTS ACCORDING TO MISTER BEALE'S RULE OF LIFE +1998-29455-0038-2270: THEY DID NOT STAY THERE BUT WALKED OUT ACROSS THE DOWNS (WHERE->WITH) THE (SKYLARKS WERE->SKYLACKS WAS) SINGING AND ON A DIP OF THE DOWNS CAME UPON GREAT STONE (WALLS->WARDS) AND TOWERS VERY STRONG AND GRAY +1998-29455-0039-2271: WHAT'S THAT THERE SAID (DICKIE->DICKY) +2033-164914-0000-661: REPLIED HE OF A TRUTH I HEARD HIM NOT AND I WOT HIM NOT AND FOLKS ARE ALL SLEEPING +2033-164914-0001-662: BUT SHE SAID WHOMSOEVER THOU SEEST AWAKE HE IS THE (RECITER->RESIDER) +2033-164914-0002-663: THEN SAID THE EUNUCH ART THOU HE WHO REPEATED POETRY BUT NOW AND MY LADY HEARD HIM +2033-164914-0003-664: REJOINED THE EUNUCH WHO THEN WAS THE RECITER POINT HIM OUT TO ME +2033-164914-0004-665: BY ALLAH REPLIED THE FIREMAN I TELL THEE THE TRUTH +2033-164914-0005-666: TELL ME WHAT HAPPENED QUOTH (ZAU AL->OWL) MAKAN +2033-164914-0006-667: WHAT (AILS->ELSE) THEE THEN THAT THOU MUST NEEDS (RECITE->RESIDE) VERSES SEEING THAT WE ARE TIRED OUT WITH WALKING AND WATCHING AND ALL THE FOLK ARE ASLEEP FOR THEY REQUIRE SLEEP TO REST THEM OF THEIR FATIGUE +2033-164914-0007-668: AND HE ALSO (IMPROVISED->PROVISED) THE TWO FOLLOWING DISTICHS +2033-164914-0008-669: WHEN (NUZHAT->UZHAT) AL ZAMAN HEARD THE FIRST IMPROVISATION SHE CALLED TO (MIND->MINE) HER FATHER AND HER MOTHER AND HER BROTHER AND THEIR (WHILOME->WILL ON) HOME THEN SHE WEPT AND CRIED (AT->TO) THE EUNUCH AND SAID TO HIM WOE TO THEE +2033-164914-0009-670: HE WHO RECITED THE FIRST TIME (HATH->HAD) RECITED A SECOND TIME AND I HEARD HIM HARD BY +2033-164914-0010-671: BY ALLAH AN THOU FETCH HIM NOT TO ME I WILL ASSUREDLY ROUSE THE CHAMBERLAIN ON THEE AND HE SHALL BEAT THEE AND CAST THEE OUT +2033-164914-0011-672: BUT TAKE THESE HUNDRED (DINERS->DINNERS) AND GIVE THEM TO THE SINGER AND BRING HIM TO ME GENTLY AND DO HIM NO HURT +2033-164914-0012-673: RETURN QUICKLY AND LINGER NOT +2033-164914-0013-674: WHEN IT WAS THE SEVENTY THIRD NIGHT +2033-164914-0014-675: BUT THE EUNUCH SAID I WILL NOT LEAVE THEE TILL THOU SHOW ME WHO IT WAS THAT RECITED THE VERSES FOR I DREAD RETURNING TO MY LADY WITHOUT HIM +2033-164914-0015-676: NOW WHEN THE FIREMAN HEARD THESE WORDS HE FEARED FOR (ZAU AL->ZOUAL) MAKAN AND WEPT WITH EXCEEDING WEEPING AND SAID TO THE EUNUCH BY ALLAH IT WAS NOT I AND I KNOW HIM NOT +2033-164914-0016-677: SO GO THOU TO THY STATION AND IF THOU AGAIN MEET ANY ONE AFTER THIS HOUR RECITING AUGHT OF POETRY WHETHER HE BE NEAR OR FAR IT WILL BE I OR SOME ONE I KNOW AND THOU SHALT NOT LEARN OF HIM BUT BY ME +2033-164914-0017-678: THEN HE KISSED THE EUNUCH'S HEAD AND SPAKE HIM FAIR TILL HE WENT AWAY BUT THE (CASTRATO->GASTRATO) FETCHED (A->THE) ROUND AND RETURNING SECRETLY CAME AND STOOD BEHIND THE FIREMAN FEARING TO GO BACK TO HIS MISTRESS WITHOUT TIDINGS +2033-164914-0018-679: I SAY WHAT MADE MY (IGNOMY WHATE'ER->IGNOMINY WHATEVER) THE (BITTER CUP->BEACHER CARP) I DRAIN FAR BE (FRO->FROM) ME (THAT->THY) LAND TO FLEE NOR WILL I BOW TO THOSE WHO BLAME AND FOR SUCH LOVE WOULD DEAL ME SHAME +2033-164914-0019-680: THEN SAID THE EUNUCH TO (ZAU AL->ZAWAL) MAKAN PEACE BE WITH THEE O MY LORD +2033-164914-0020-681: O MY LORD CONTINUED THE EUNUCH AND SHAHRAZAD PERCEIVED (*->THAT) THE DAWN OF DAY AND CEASED TO SAY HER PERMITTED SAY +2033-164914-0021-682: WE WILL DO THEE NO UPRIGHT O MY SON NOR WRONG THEE IN AUGHT BUT OUR OBJECT IS THAT THOU BEND THY GRACIOUS STEPS WITH ME TO MY MISTRESS TO RECEIVE HER ANSWER AND (RETURN IN WEAL->RETURNING WHEEL) AND SAFETY AND THOU SHALT HAVE A HANDSOME PRESENT AS ONE WHO BRINGETH GOOD NEWS +2033-164914-0022-683: THEN THE EUNUCH WENT OUT TO (ZAU AL->ZAO) MAKAN AND SAID TO HIM RECITE WHAT (VERSES->VERSE IS) THOU KNOWEST FOR MY (LADY IS HERE->LADY'S HEAR) HARD BY LISTENING TO THEE AND AFTER I WILL ASK THEE OF THY NAME AND (THY->THINE) NATIVE COUNTRY AND THY CONDITION +2033-164915-0000-643: AND ALSO THESE +2033-164915-0001-644: THEN SHE THREW HERSELF UPON HIM AND HE GATHERED HER TO HIS BOSOM AND (THE TWAIN->ITWAIN) FELL DOWN IN A FAINTING FIT +2033-164915-0002-645: WHEN THE (EUNUCH->EUNUCHS) SAW (THIS->THESE) CASE HE WONDERED AT THEM AND THROWING OVER THEM SOMEWHAT TO COVER THEM WAITED TILL THEY SHOULD RECOVER +2033-164915-0003-646: AFTER A WHILE THEY CAME TO THEMSELVES AND (NUZHAT->USHART) AL ZAMAN REJOICED WITH EXCEEDING JOY OPPRESSION AND DEPRESSION (LEFT HER->LAUGHTER) AND GLADNESS TOOK THE MASTERY OF HER AND SHE REPEATED THESE VERSES +2033-164915-0004-647: ACCORDINGLY SHE TOLD HIM ALL THAT HAD COME TO HER SINCE THEIR SEPARATION AT THE KHAN AND WHAT HAD HAPPENED TO HER WITH THE BADAWI HOW THE MERCHANT HAD BOUGHT HER OF HIM AND HAD TAKEN HER TO HER BROTHER (SHARRKAN->SHARKAN) AND HAD SOLD HER TO HIM HOW HE HAD FREED HER AT THE TIME OF BUYING HOW HE HAD MADE (A->HER) MARRIAGE CONTRACT WITH HER AND HAD GONE IN TO HER AND HOW THE KING THEIR SIRE HAD SENT AND ASKED FOR HER FROM (SHARRKAN->SHARKAN) +2033-164915-0005-648: BUT NOW GO TO THY MASTER AND BRING HIM QUICKLY TO ME +2033-164915-0006-649: THE CHAMBERLAIN CALLED THE CASTRATO AND CHARGED HIM TO DO ACCORDINGLY SO HE REPLIED I HEAR AND I OBEY AND HE TOOK HIS PAGES WITH HIM AND WENT OUT IN SEARCH OF THE (STOKER->STOCKER) TILL HE FOUND HIM IN THE REAR OF THE CARAVAN (GIRTHING->GIRDING) HIS ASS AND PREPARING FOR FLIGHT +2033-164915-0007-650: SHE SAID IT HATH REACHED ME O AUSPICIOUS KING THAT WHEN THE (STOKER GIRTHED->STOCKER GIRDED) HIS (ASS->EYES) FOR FLIGHT AND BESPAKE HIMSELF SAYING OH WOULD I KNEW WHAT IS BECOME OF HIM +2033-164915-0008-651: I BELIEVE HE HATH DENOUNCED ME TO THE EUNUCH HENCE THESE PAGES (ET->AT) ABOUT ME AND HE HATH MADE ME AN ACCOMPLICE IN HIS CRIME +2033-164915-0009-652: WHY DIDST THOU SAY I NEVER REPEATED THESE COUPLETS NOR DO I KNOW WHO REPEATED THEM WHEN IT WAS THY COMPANION +2033-164915-0010-653: BUT NOW I WILL NOT LEAVE THEE BETWEEN THIS PLACE AND (BAGHDAD->BAGDAD) AND WHAT BETIDETH THY COMRADE SHALL BETIDE THEE +2033-164915-0011-654: (TWAS->TOWARDS) AS I FEARED THE (COMING ILLS->CARMINALS) DISCERNING BUT (UNTO->ON TO) ALLAH WE ARE ALL RETURNING +2033-164915-0012-655: THEN THE EUNUCH CRIED UPON (*->IN) THE PAGES SAYING TAKE HIM OFF THE ASS +2033-164915-0013-656: AND HE ANSWERED I AM THE CHAMBERLAIN OF THE EMIR OF DAMASCUS KING (SHARRKAN SON OF OMAR BIN->SHARKAN SUNG OVER MARBIN) AL (NU'UMAN->NUMA) LORD OF (BAGHDAD->WABDAD) AND OF THE LAND OF KHORASAN AND I BRING TRIBUTE AND PRESENTS FROM HIM TO HIS FATHER IN BAGHDAD +2033-164915-0014-657: (SO FARE YE->SOPHIA) FORWARDS NO HARM SHALL (BEFAL->BEFALL) YOU TILL YOU JOIN HIS GRAND WAZIR (DANDAN->THAN DAN) +2033-164915-0015-658: THEN HE BADE HIM BE SEATED AND QUESTIONED HIM AND HE REPLIED THAT HE WAS (CHAMBERLAIN->TREMBLING) TO THE EMIR OF DAMASCUS AND WAS BOUND TO KING OMAR WITH PRESENTS AND THE TRIBUTE OF SYRIA +2033-164915-0016-659: SO IT WAS AGREED THAT WE GO TO DAMASCUS AND FETCH THENCE THE KING'S SON (SHARRKAN->SHARKAN) AND (MAKE HIM->MAY CAME) SULTAN OVER HIS FATHER'S REALM +2033-164915-0017-660: AND AMONGST THEM WERE SOME WHO WOULD HAVE CHOSEN THE CADET (ZAU AL->THOUA) MAKAN FOR QUOTH THEY HIS NAME BE LIGHT OF THE PLACE AND HE HATH A SISTER NUZHAT AL ZAMAN (HIGHS->HIES) THE DELIGHT OF THE TIME BUT THEY SET OUT FIVE YEARS AGO FOR AL (HIJAZ->KI JAS) AND NONE WOTTETH WHAT IS BECOME OF THEM +2033-164916-0000-684: SO HE TURNED TO THE WAZIR DANDAN AND SAID TO HIM VERILY YOUR TALE IS A (WONDER->WANDER) OF WONDERS +2033-164916-0001-685: (KNOW->NO) O CHIEF WAZIR THAT HERE WHERE YOU HAVE ENCOUNTERED ME ALLAH HATH GIVEN YOU REST FROM FATIGUE AND BRINGETH YOU YOUR DESIRE AFTER THE EASIEST OF FASHIONS FOR (THAT->LET) HIS ALMIGHTY WILL (RESTORETH->RESTORE IT) TO YOU (ZAU AL->THOU A) MAKAN AND HIS SISTER NUZHAT AL ZAMAN WHEREBY WE WILL SETTLE THE MATTER AS WE EASILY CAN +2033-164916-0002-686: WHEN THE (MINISTER->MEANS SIR) HEARD THESE WORDS HE REJOICED WITH (GREAT->GRAY) JOY AND SAID O CHAMBERLAIN TELL ME THE TALE OF THE TWAIN AND WHAT (BEFEL->BEFELL) THEM AND THE CAUSE OF THEIR LONG ABSENCE +2033-164916-0003-687: (ZAU AL->ZAO) MAKAN BOWED HIS HEAD (AWHILE->A WHILE) AND THEN SAID I ACCEPT (THIS->THE) POSITION FOR INDEED THERE WAS NO REFUSING AND HE WAS CERTIFIED THAT THE CHAMBERLAIN HAD COUNSELLED HIM WELL AND WISELY AND (SET->SAT) HIM ON THE RIGHT WAY +2033-164916-0004-688: THEN HE ADDED O MY UNCLE HOW SHALL I DO WITH MY BROTHER (SHARRKAN->SHARKAN) +2033-164916-0005-689: AFTER (AWHILE->A WHILE) THE DUST DISPERSED (*->THEM) AND THERE APPEARED UNDER IT THE ARMY OF BAGHDAD AND KHORASAN A CONQUERING HOST LIKE THE (FULL->POOL) TIDE SEA AND SHAHRAZAD PERCEIVED (*->THAT) THE DAWN OF DAY AND CEASED TO SAY HER PERMITTED SAY +2033-164916-0006-690: WHEN IT WAS THE SEVENTY EIGHTH NIGHT +2033-164916-0007-691: (AND IN IT ALL->ANY NEAT OR) REJOICED AT THE ACCESSION OF THE LIGHT OF THE PLACE +2033-164916-0008-692: LASTLY THE MINISTER WENT IN AND KISSED THE GROUND BEFORE (ZAU AL->ZAO) MAKAN WHO ROSE TO MEET HIM SAYING WELCOME O WAZIR AND (SIRE SANS PEER->SIRES SONSPIER) +2033-164916-0009-693: MOREOVER THE SULTAN COMMANDED HIS WAZIR (DANDAN->DAN) CALL (A->AT) TEN DAYS HALT OF THE ARMY THAT HE MIGHT BE PRIVATE WITH HIM AND LEARN FROM HIM HOW AND WHEREFORE HIS FATHER HAD BEEN SLAIN +2033-164916-0010-694: HE THEN REPAIRED TO THE (HEART->HEARTS) OF THE ENCAMPMENT AND ORDERED (*->THAT) THE HOST TO HALT TEN DAYS +2414-128291-0000-2689: WHAT HATH HAPPENED (UNTO->TO) ME +2414-128291-0001-2690: HE ASKED HIMSELF SOMETHING WARM AND LIVING (QUICKENETH->QUICKENED) ME IT MUST BE IN THE NEIGHBOURHOOD +2414-128291-0002-2691: WHEN HOWEVER (ZARATHUSTRA->THE TWO STRAW) WAS QUITE NIGH (UNTO->AND TO) THEM THEN DID HE HEAR PLAINLY (THAT A->WITH) HUMAN VOICE (SPAKE->PIKE) IN THE MIDST OF THE (KINE->KIND) AND (APPARENTLY->THE FRIENDLY) ALL OF THEM HAD TURNED THEIR HEADS TOWARDS THE SPEAKER +2414-128291-0003-2692: (WHAT->FOR) DO I HERE SEEK +2414-128291-0004-2693: ANSWERED HE THE SAME THAT THOU SEEKEST THOU MISCHIEF MAKER THAT IS TO SAY HAPPINESS UPON EARTH +2414-128291-0005-2694: FOR I TELL THEE THAT I HAVE (ALREADY->ALL WE) TALKED HALF A MORNING UNTO THEM AND JUST NOW (WERE THEY->WITH) ABOUT TO GIVE ME (THEIR->THE) ANSWER +2414-128291-0006-2695: HE WOULD NOT BE RID OF HIS (AFFLICTION->AFFLICATION) +2414-128291-0007-2696: WHO (HATH->HAD) NOT AT PRESENT HIS HEART HIS MOUTH AND HIS EYES FULL OF DISGUST +2414-128291-0008-2697: THOU ALSO THOU ALSO +2414-128291-0009-2698: BUT BEHOLD (THESE KINE->HIS KIND) +2414-128291-0010-2699: (THE KINE->DECLINE) HOWEVER GAZED AT IT ALL AND WONDERED +2414-128291-0011-2700: (WANTON AVIDITY->WARRENTON ALD DUTY) BILIOUS ENVY CAREWORN REVENGE (POPULACE->POPULOUS) PRIDE ALL (THESE->THIS) STRUCK (MINE->MIGHT) EYE +2414-128291-0012-2701: IT IS NO LONGER TRUE (THAT THE->NEITHER) POOR ARE BLESSED +2414-128291-0013-2702: THE KINGDOM OF HEAVEN HOWEVER IS WITH (THE KINE->A KIND) AND WHY IS IT NOT WITH (THE->A) RICH +2414-128291-0014-2703: WHY (DOST->THOSE) THOU TEMPT ME +2414-128291-0015-2704: ANSWERED (THE OTHER->HER) +2414-128291-0016-2705: THOU KNOWEST IT THYSELF BETTER EVEN THAN I +2414-128291-0017-2706: (THUS SPAKE->DOES SPEAK) THE (PEACEFUL->BEATHFUL) ONE AND PUFFED HIMSELF AND (PERSPIRED->POSPIRED) WITH HIS WORDS (SO THAT->FOR IN) THE (KINE WONDERED ANEW->KIND WOUNDED I KNEW) +2414-128291-0018-2707: THOU DOEST VIOLENCE TO THYSELF THOU PREACHER ON THE MOUNT (WHEN->AND) THOU USEST SUCH (SEVERE->SAVIER) WORDS +2414-128291-0019-2708: THEY ALSO (ABSTAIN->ABSTAINED) FROM ALL HEAVY (THOUGHTS->TORCH) WHICH INFLATE THE HEART +2414-128291-0020-2709: WELL +2414-128291-0021-2710: (SAID ZARATHUSTRA->SAYS THE ACCUSTRA) THOU (SHOULDST->SHOULDEST) ALSO SEE (MINE->MY) ANIMALS (MINE->MIGHT) EAGLE AND MY SERPENT THEIR LIKE DO NOT AT PRESENT EXIST ON EARTH +2414-128291-0022-2711: AND (TALK->TALKED) TO MINE ANIMALS OF THE HAPPINESS OF ANIMALS +2414-128291-0023-2712: NOW HOWEVER TAKE LEAVE AT ONCE OF (THY KINE->THEIR KIND) THOU STRANGE ONE +2414-128291-0024-2713: THOU (AMIABLE->ADMIABLE) ONE +2414-128291-0025-2714: FOR THEY ARE THY WARMEST FRIENDS AND (PRECEPTORS->PERCEPTIVES) +2414-128291-0026-2715: THOU (EVIL FLATTERER->EVE IS SLATTERER) +2414-128292-0000-2618: WHITHER (HATH->HAD) MY (LONESOMENESS GONE->LONESOME DISCOUR) SPAKE HE +2414-128292-0001-2619: MY SHADOW CALLETH ME +2414-128292-0002-2620: WHAT MATTER ABOUT MY SHADOW +2414-128292-0003-2621: LET IT RUN AFTER ME I (RUN->RAN) AWAY FROM IT +2414-128292-0004-2622: (THUS SPAKE ZARATHUSTRA->THE SPEAKER TOO STRIKE) TO HIS HEART AND RAN AWAY +2414-128292-0005-2623: VERILY MY FOLLY HATH GROWN BIG IN THE MOUNTAINS +2414-128292-0006-2624: NOW DO I HEAR SIX OLD (FOOLS->FOOTS) LEGS RATTLING BEHIND ONE ANOTHER +2414-128292-0007-2625: (BUT DOTH ZARATHUSTRA->BY DIRTS ARTISTRA) NEED TO BE FRIGHTENED BY (HIS->A) SHADOW +2414-128292-0008-2626: ALSO (METHINKETH->METHINK IT) THAT AFTER ALL IT (HATH->HAD) LONGER (LEGS->LESS) THAN MINE +2414-128292-0009-2627: FOR WHEN (ZARATHUSTRA SCRUTINISED->THEIR TWO STREETS CRIED) HIM WITH HIS GLANCE HE WAS FRIGHTENED AS BY (A SUDDEN->ASSERTED) APPARITION SO SLENDER SWARTHY HOLLOW AND WORN OUT (DID THIS->WITH HIS) FOLLOWER (APPEAR->APPEARED) +2414-128292-0010-2628: (ASKED ZARATHUSTRA VEHEMENTLY->I OBJECT TO ESTRAVA IMAGEDLY) WHAT (DOEST->DO WEST) THOU HERE +2414-128292-0011-2629: AND WHY CALLEST THOU THYSELF MY SHADOW +2414-128292-0012-2630: THOU ART NOT PLEASING (UNTO->IN TO) ME +2414-128292-0013-2631: MUST I EVER BE ON THE WAY +2414-128292-0014-2632: O (EARTH->ART) THOU HAST BECOME (TOO->TO) ROUND FOR ME +2414-128292-0015-2633: (WHEN THE->WITH A) DEVIL (CASTETH->CAST AT) HIS SKIN DOTH NOT HIS NAME ALSO FALL AWAY IT IS ALSO (SKIN->SKINNED) +2414-128292-0016-2634: THE DEVIL HIMSELF IS PERHAPS (SKIN->KIN) +2414-128292-0017-2635: SOMETIMES I MEANT TO LIE AND BEHOLD +2414-128292-0018-2636: THEN (ONLY->ALLEY) DID I (HIT->HATE) THE TRUTH +2414-128292-0019-2637: HOW HAVE I STILL (INCLINATION->INCLINATIONS) +2414-128292-0020-2638: (HAVE->ERE) I STILL A (GOAL->GOLD) +2414-128292-0021-2639: A (HAVEN TOWARDS->HEROD DOORS) WHICH MY (SAIL IS SET->SAILORS SAID) +2414-128292-0022-2640: FOR IT (DO->TOO) I ASK AND SEEK AND (HAVE SOUGHT BUT->HATH THOUGHT IT) HAVE NOT FOUND IT +2414-128292-0023-2641: (O->OR) ETERNAL EVERYWHERE (O ETERNAL->WHO HAD TURNED OUT) NOWHERE (O ETERNAL->WHO HAD TURNED) IN VAIN +2414-128292-0024-2642: THOU ART MY SHADOW +2414-128292-0025-2643: SAID HE AT LAST SADLY +2414-128292-0026-2644: THY DANGER IS NOT SMALL THOU FREE SPIRIT AND (WANDERER->WONDER) +2414-128292-0027-2645: (THEY->THE) SLEEP QUIETLY THEY (ENJOY->ENJOYED) THEIR NEW SECURITY +2414-128292-0028-2646: (BEWARE LEST->BE REALIZED) IN THE (END->AID) A NARROW (FAITH CAPTURE THEE->FATE CAPTURED THE) A HARD (RIGOROUS DELUSION->RECKLESS VOLUTION) +2414-128292-0029-2647: FOR NOW EVERYTHING THAT IS NARROW AND FIXED (SEDUCETH->SEDUCE IT) AND (TEMPTETH->TEMPT IT) THEE +2414-128292-0030-2648: THOU HAST LOST (THY GOAL->DANGLE) +2414-128292-0031-2649: (THOU->THOUGH) POOR ROVER AND RAMBLER (THOU->NOW) TIRED (BUTTERFLY->BUT TO FLY) +2414-128292-0032-2650: WILT THOU HAVE (A REST->ARREST) AND A HOME THIS EVENING +2414-159411-0000-2653: ONCE UPON (A->HER) TIME A BRAHMAN WHO WAS WALKING ALONG THE ROAD CAME UPON AN IRON CAGE IN WHICH A GREAT TIGER (HAD BEEN SHUT->ADMIRED) UP BY THE (VILLAGERS->VILLAGES) WHO CAUGHT HIM +2414-159411-0001-2654: THE (BRAHMAN->BRAMIN) ANSWERED NO I WILL NOT FOR IF I LET YOU OUT OF THE CAGE YOU WILL EAT ME +2414-159411-0002-2655: (OH->ALL) FATHER OF MERCY ANSWERED THE TIGER IN TRUTH THAT I WILL NOT +2414-159411-0003-2656: I WILL NEVER BE SO UNGRATEFUL ONLY LET ME OUT THAT I MAY DRINK SOME WATER AND RETURN +2414-159411-0004-2657: (THEN->AND IN) THE (BRAHMAN->BRAMMING) TOOK PITY ON HIM AND OPENED THE CAGE DOOR BUT NO SOONER HAD HE (DONE->TURNED) SO THAN THE TIGER JUMPING OUT SAID NOW I WILL EAT YOU FIRST AND DRINK THE WATER AFTERWARDS +2414-159411-0005-2658: SO THE (BRAHMAN->BRAMID) AND THE TIGER WALKED ON TILL THEY CAME TO A (BANYAN->BENDONED) TREE AND THE (BRAHMAN->BRAMMEN) SAID TO IT (BANYAN->BANNY) TREE (BANYAN TREE HEAR->BANDREE HERE) AND GIVE (JUDGMENT->GERMAN) +2414-159411-0006-2659: ON WHAT MUST I GIVE JUDGMENT ASKED THE (BANYAN->BEN) TREE +2414-159411-0007-2660: (THIS TIGER->THE STAGER) SAID THE (BRAHMAN BEGGED->BRAMIN BEG) ME TO LET HIM OUT OF HIS CAGE TO DRINK A LITTLE WATER AND HE PROMISED NOT TO HURT ME IF I DID SO BUT NOW THAT I HAVE (LET->LEFT) HIM OUT HE WISHES TO EAT ME +2414-159411-0008-2661: (IS IT JUST->IT'S A JEALOUS) THAT HE SHOULD DO SO OR NO +2414-159411-0009-2662: (LET->LAID) THE TIGER EAT THE MAN FOR MEN ARE AN UNGRATEFUL RACE +2414-159411-0010-2663: SIR (CAMEL->CAMELO) SIR (CAMEL->CAMEO) CRIED THE BRAHMAN (HEAR->HERE) AND GIVE (JUDGMENT->JAGIMENT) +2414-159411-0011-2664: AT A LITTLE DISTANCE THEY FOUND A BULLOCK LYING BY THE (ROADSIDE->ROAD'S HEAD) +2414-159411-0012-2665: IS IT FAIR THAT HE SHOULD DO SO OR NOT +2414-159411-0013-2666: LET THE TIGER EAT THE MAN FOR MEN HAVE NO PITY +2414-159411-0014-2667: THREE OUT OF THE SIX (HAD->AND) GIVEN JUDGMENT AGAINST THE BRAHMAN (BUT->WAS) STILL HE DID NOT LOSE ALL HOPE AND (DETERMINED->TO TURN MIND) TO ASK THE OTHER THREE +2414-159411-0015-2668: ON WHAT MUST I GIVE (JUDGMENT->JULIET) ASKED THE EAGLE +2414-159411-0016-2669: THE BRAHMAN (STATED->SUITED) THE CASE AND THE EAGLE ANSWERED WHENEVER MEN SEE ME THEY TRY TO SHOOT ME (THEY CLIMB->DECLINED) THE ROCKS AND (STEAL->STEED) AWAY MY LITTLE ONES +2414-159411-0017-2670: THEN THE TIGER BEGAN TO ROAR AND SAID (THE->*) JUDGMENT OF ALL IS AGAINST YOU O (BRAHMAN->BRAHMIN) +2414-159411-0018-2671: AFTER THIS THEY SAW AN ALLIGATOR AND THE (BRAHMAN->BRAMA) RELATED THE MATTER TO HIM HOPING FOR A MORE (FAVORABLE->FAVOURABLE) VERDICT +2414-159411-0019-2672: (BUT->WITH) THE (ALLIGATOR SAID->ADDER TO SIT) WHENEVER I PUT MY NOSE OUT OF THE WATER (MEN TORMENT->MEANTIME AND) ME AND (TRY->TRIED) TO KILL ME +2414-159411-0020-2673: (THE BRAHMAN->NO BROWN MEN) GAVE HIMSELF UP AS LOST BUT AGAIN HE PRAYED THE TIGER TO HAVE PATIENCE AND LET HIM ASK THE OPINION OF THE (SIXTH->SIX) JUDGE +2414-159411-0021-2674: (NOW->ON) THE SIXTH WAS A JACKAL +2414-159411-0022-2675: THE (BRAHMAN->GRANDMOTHER) TOLD HIS STORY AND SAID TO HIM UNCLE JACKAL (UNCLE->AND WILL) JACKAL SAY WHAT IS YOUR JUDGMENT +2414-159411-0023-2676: SHOW ME THE (PLACE->PACE) +2414-159411-0024-2677: (WHEN THEY GOT THERE->AND THE COURT DEER) THE JACKAL SAID (NOW BRAHMAN->NABRAMIN) SHOW ME EXACTLY WHERE YOU STOOD +2414-159411-0025-2678: EXACTLY THERE WAS IT ASKED THE (JACKAL->JACK WHO) +2414-159411-0026-2679: EXACTLY HERE REPLIED THE (BRAHMAN->PROMIN) +2414-159411-0027-2680: (WHERE->THERE) WAS THE TIGER THEN +2414-159411-0028-2681: WHY I STOOD SO SAID THE TIGER JUMPING INTO THE CAGE AND MY HEAD WAS ON THIS SIDE +2414-159411-0029-2682: VERY GOOD SAID (THE JACKAL->TO JACK WHO) BUT I CANNOT JUDGE WITHOUT UNDERSTANDING THE WHOLE MATTER EXACTLY +2414-159411-0030-2683: SHUT AND BOLTED SAID (THE BRAHMAN->DE BRAMIN) +2414-159411-0031-2684: THEN (SHUT->SHET) AND (BOLT IT->BOLTED) SAID (THE->TO) JACKAL +2414-159411-0032-2685: WHEN THE BRAHMAN HAD (DONE->TURNED) THIS THE JACKAL SAID OH YOU WICKED AND UNGRATEFUL (TIGER->TIRE) +2414-159411-0033-2686: (WHEN THE->WITH A) GOOD (BRAHMAN->BRAMIN) OPENED (YOUR CAGE DOOR->YOU CARED TO HER) IS TO EAT HIM THE ONLY RETURN YOU WOULD MAKE +2414-159411-0034-2687: PROCEED ON YOUR JOURNEY FRIEND (BRAHMAN->DRAMIN) +2414-159411-0035-2688: (YOUR ROAD LIES->HE RULED LIVES) THAT WAY (AND MINE->IN MIND) THIS +2414-165385-0000-2651: THUS ACCOMPLISHED HE EXCITED (THE->*) ADMIRATION OF EVERY SILLY (COQUETTE->COCKET) AND THE ENVY OF EVERY (FLUTTERING COXCOMB->FACTIVE ACCOUNT) BUT BY ALL YOUNG GENTLEMEN AND LADIES OF UNDERSTANDING HE WAS HEARTILY DESPISED AS A MERE CIVILIZED MONKEY +2414-165385-0001-2652: (*->AND) THAT HIS SOUL MIGHT AFTERWARDS OCCUPY SUCH A STATION AS WOULD BE MOST SUITABLE TO HIS CHARACTER IT WAS (SENTENCED->INTENSE) TO INHABIT (THE->A) BODY OF THAT (FINICAL->FINNICAL) GRINNING AND MISCHIEVOUS LITTLE (MIMICK->MIMIC) WITH (FOUR->FULL) LEGS WHICH YOU NOW BEHOLD BEFORE YOU +2609-156975-0000-2367: THEN MOSES WAS AFRAID AND SAID SURELY THE THING IS KNOWN +2609-156975-0001-2368: (HOLD ON HOLD->ERON HER) FAST (HOLD OUT->HOTEL) PATIENCE IS GENIUS +2609-156975-0002-2369: LET US HAVE FAITH THAT RIGHT (MAKES->MATRON) MIGHT AND IN THAT FAITH (LET US DARE->THAT STARED) TO DO OUR DUTY AS WE UNDERSTAND IT LINCOLN +2609-156975-0003-2370: THE EGYPTIAN BACKGROUND OF THE BONDAGE +2609-156975-0004-2371: EVERY ONE (WHO IS TURBULENT->WHOSE TURBOT) HAS BEEN FOUND BY KING (MERNEPTAH THE TESTIMONY OF->MARNETTE PATH DETACHEMONY AS) THE OLDEST (BIBLICAL NARRATIVES->BAPLICO NARRATIVE) REGARDING THE SOJOURN OF THE HEBREWS IN EGYPT IS ALSO IN PERFECT ACCORD WITH THE PICTURE WHICH (THE->A) CONTEMPORARY EGYPTIAN (INSCRIPTIONS->SCRIPTIONS) GIVE (OF->*) THE PERIOD +2609-156975-0005-2372: THE ABSENCE OF (*->THE) DETAILED (REFERENCE TO->REFERENCES) THE HEBREWS IS THEREFORE PERFECTLY NATURAL +2609-156975-0006-2373: IT SEEMS PROBABLE THAT NOT ALL BUT ONLY PART (OF->IN) THE TRIBES WHICH (ULTIMATELY COALESCED->ULTIMATE COVETTES) INTO THE HEBREW NATION FOUND THEIR WAY TO EGYPT +2609-156975-0007-2374: THE STORIES REGARDING JOSEPH (THE->THEIR) TRADITIONAL (FATHER OF EPHRAIM->FOUND THEIR ATRONE) AND (MANASSEH IMPLY->MANOT SAY INCLINE) THAT THESE STRONG CENTRAL TRIBES POSSIBLY TOGETHER WITH THE SOUTHERN (TRIBES->TRINES) OF (BENJAMIN->BINTAMEN) AND JUDAH (WERE->WHERE) THE CHIEF ACTORS (IN THIS OPENING->WHO THAT SOMETHING) SCENE IN ISRAEL'S HISTORY +2609-156975-0008-2375: THE (BIBLICAL->BEVOCO) NARRATIVES APPARENTLY (DISAGREE REGARDING->DISAGRATING GUARDING) THE DURATION OF THE (SOJOURN->SAJOURN) IN EGYPT +2609-156975-0009-2376: THE LATER (TRADITIONS TEND TO EXTEND->JUDICINES INTERESTING) THE PERIOD +2609-156975-0010-2377: (HERE->HE) WERE FOUND (SEVERAL INSCRIPTIONS->CHEVARIN SCRIPTIONS) BEARING THE EGYPTIAN NAME OF THE CITY (P ATUM HOUSE OF->PATUM OUTSIDE) THE GOD (ATUM->ATOM) +2609-156975-0011-2378: A CONTEMPORARY INSCRIPTION (ALSO STATES->ONCE ESTATES) THAT HE (FOUNDED->FOUND A) NEAR (PITHUM->PITTHAM) THE HOUSE OF (RAMSES->RAMESES) A CITY WITH (A->THE) ROYAL RESIDENCE AND (TEMPLES->SIMPLES) +2609-156975-0012-2379: THAT THE HEBREWS WERE (RESTIVE->RENTS OF) UNDER THIS (TYRANNY->SOON) WAS (NATURAL->NATURALLY) INEVITABLE +2609-156975-0013-2380: WAS ANY OTHER PROCEDURE TO BE (EXPECTED->SPECTRE) FROM (A DESPOTIC RULER->IT THAT SPONNET ROAR) OF THAT LAND AND DAY +2609-156975-0014-2381: THE MAKING OF (A->THE) LOYAL PATRIOT +2609-156975-0015-2382: THE STORY OF MOSES (BIRTH AND->BERTH AN) EARLY CHILDHOOD IS ONE OF THE MOST INTERESTING CHAPTERS IN (BIBLICAL->BEPPOCO) HISTORY +2609-156975-0016-2383: (WAS MOSES JUSTIFIED IN RESISTING->WITH MOVES IT'S JUST FUN AND RESISTS IN) THE EGYPTIAN (TASKMASTER->TAX MASTER) +2609-156975-0017-2384: (IS PEONAGE->HIS PINIONS) ALWAYS (DISASTROUS->DISASTRATES) NOT (ONLY->OWING) TO ITS VICTIMS BUT ALSO TO THE GOVERNMENT IMPOSING IT +2609-156975-0018-2385: NATURALLY HE WENT TO THE LAND OF (MIDIAN->MEDIAN) +2609-156975-0019-2386: THE WILDERNESS TO THE EAST OF EGYPT (HAD->AND) FOR CENTURIES BEEN THE (PLACE->PLATES) OF (REFUGE FOR->REFUGERY) EGYPTIAN (FUGITIVES->FUGITIVE) +2609-156975-0020-2387: FROM (ABOUT->A BOUT) TWO THOUSAND (B->*) C +2609-156975-0021-2388: ON THE BORDERS OF THE (WILDERNESS->WIDERNESS) HE FOUND CERTAIN (BEDOUIN->BEDOING) HERDSMEN WHO RECEIVED HIM (HOSPITABLY->HOW SPECTABLY) +2609-156975-0022-2389: THESE (SAND WANDERERS->SEND WONDERERS) SENT HIM ON FROM (TRIBE->TIME) TO (TRIBE UNTIL HE REACHED->TIME INTO A REACH) THE LAND OF (KEDEM EAST->KIDAM EACH) OF THE DEAD SEA WHERE HE REMAINED FOR A YEAR AND A HALF +2609-156975-0023-2390: LATER HE FOUND HIS WAY TO THE COURT OF ONE OF THE LOCAL KINGS (IN->AND) CENTRAL (PALESTINE->PALASTEIN) WHERE HE MARRIED AND BECAME IN (*->THE) TIME A PROSPEROUS LOCAL PRINCE +2609-156975-0024-2391: THE SCHOOL (OF->AND) THE (WILDERNESS->WEARINESS) +2609-156975-0025-2392: THE STORY (OF->*) MOSES IS IN MANY WAYS CLOSELY PARALLEL TO THAT (OF SINUHIT->AS SOON WIT) +2609-156975-0026-2393: THE PRIEST OF THE (SUB TRIBE->SUBTERRAB) OF THE (KENITES->KANITE) RECEIVED HIM INTO HIS HOME AND GAVE HIM HIS DAUGHTER IN MARRIAGE +2609-156975-0027-2394: NOTE THE (CHARACTERISTIC->CARE OF A RIDICT) ORIENTAL (IDEA OF MARRIAGE->AND GIVE MARY'S) +2609-156975-0028-2395: HERE MOSES LEARNED (THE->THAT) LESSONS THAT WERE ESSENTIAL FOR HIS TRAINING AS (THE->A) LEADER AND DELIVERER OF HIS PEOPLE +2609-156975-0029-2396: (AFTER->ANSWERED) THE CAPTURE OF JERICHO CERTAIN OF THEM WENT UP WITH (THE SOUTHERN TRIBES TO CONQUER->A SUDDEN TRIUMPHS WHO CONCUR) SOUTHERN PALESTINE +2609-156975-0030-2397: MANY MODERN SCHOLARS (DRAW->DRAWING) THE CONCLUSION FROM THE (BIBLICAL->BIBBICAL) NARRATIVE THAT IT WAS FROM THE (KENITES THAT->KENITE SNAT) MOSES FIRST LEARNED OF (YAHWEH->YANAWAY) OR AS THE DISTINCTIVE NAME OF (ISRAEL'S GOD->ISRAEL GUN) WAS (TRANSLATED->TRANSGRATED) BY LATER (JEWISH SCRIBES->TO ITS GRIMES) JEHOVAH +2609-156975-0031-2398: DO THE (EARLIEST HEBREW TRADITIONS->AREIAT SEA BERTRADIZANCE) IMPLY THAT THE (ANCESTORS->INCES) OF THE (ISRAELITES->ISRAITS) WERE (WORSHIPPERS->WORSE SUPPOSED) OF JEHOVAH +2609-156975-0032-2399: THE (TITLE->TANA) OF HIS (FATHER IN LAW->FUNDEMENT) IMPLIES (THAT->AT) THIS PRIEST MINISTERED AT SOME (WILDERNESS->LITERN) SANCTUARY +2609-156975-0033-2400: MOSES IN THE HOME OF THE (MIDIAN PRIEST->MENDIAN PRIESTS) WAS BROUGHT INTO DIRECT AND CONSTANT CONTACT WITH THE JEHOVAH WORSHIP +2609-156975-0034-2401: THE CRUEL FATE OF (HIS->THIS) PEOPLE (AND->IN) THE PAINFUL EXPERIENCE IN EGYPT THAT HAD DRIVEN HIM INTO THE WILDERNESS PREPARED HIS MIND TO RECEIVE THIS TRAINING +2609-156975-0035-2402: HIS (QUEST->FRENCH) WAS FOR A (JUST->JETS) AND STRONG GOD ABLE TO (DELIVER->DRIVER) THE OPPRESSED +2609-156975-0036-2403: THE (WILDERNESS->WIDERNESS) WITH ITS LURKING FOES AND THE EVER PRESENT DREAD OF HUNGER AND THIRST (DEEPENED HIS SENSE->DEEP IN DESCENTS) OF NEED AND OF DEPENDENCE UPON (A->THE) POWER ABLE TO (GUIDE->GOD) THE (DESTINIES->DEBT'S NEEDS) OF MEN +2609-156975-0037-2404: THE PEASANTS OF THE (VAST ANTOLIAN PLAIN->VATS IN TOWING) IN (*->PLAIN OF) CENTRAL (ASIA->AS A) MINOR (STILL->SO WILL) CALL EVERY LIFE (GIVING->GIVEN) SPRING GOD HATH GIVEN +2609-156975-0038-2405: (THE CONSTANT->THEY CAN'T SENTIN) NECESSITY (OF->A) MEETING THE DANGERS OF THE (WILDERNESS->WIDERNESS) AND (OF->THE) DEFENDING THE (FLOCKS ENTRUSTED TO MOSES->FLAUNT AND TRITES OF JAMIES ITS) CARE DEVELOPED HIS COURAGE AND POWER OF (LEADERSHIP->LEGERSHIP) AND ACTION +2609-157645-0000-2352: EVIDENTLY THE INTENTION (WAS TO MAKE->WHICH MADE) THINGS (PLEASANT->PRESENT) FOR THE ROYAL (FOE OF->FOLK A) TOBACCO DURING HIS VISIT +2609-157645-0001-2353: THE (PROHIBITION IN->PROBITS AND) THE (REGULATION->REGULATING) QUOTED (OF->HER) SMOKING (IN->AND) SAINT MARY'S CHURCH REFERRED IT MAY BE NOTED TO THE ACT WHICH WAS HELD THEREIN +2609-157645-0002-2354: SOMETIMES TOBACCO (WAS->IS) USED IN CHURCH FOR (DISINFECTING OR DEODORIZING->DISINFACTANT ORDEALIZING) PURPOSES +2609-157645-0003-2355: BLACKBURN ARCHBISHOP OF YORK WAS A GREAT SMOKER +2609-157645-0004-2356: ON ONE OCCASION HE WAS AT SAINT MARY'S CHURCH (NOTTINGHAM->NOT IN HAM) FOR A (CONFIRMATION->CONFIRMATON) +2609-157645-0005-2357: ANOTHER EIGHTEENTH CENTURY CLERICAL WORTHY THE FAMOUS (DOCTOR PARR->DOCTROPOS) AN INVETERATE SMOKER WAS ACCUSTOMED TO DO (WHAT MISTER DISNEY->AT MIDSRSANY) PREVENTED (ARCHBISHOP->ARCHBISH AT) BLACKBURN FROM DOING HE SMOKED IN HIS (VESTRY->VETCHERY) AT HATTON +2609-157645-0006-2358: (PARR->POOR) WAS SUCH A (CONTINUAL->CONTINUOUS) SMOKER THAT (ANYONE->ANY ONE) WHO CAME INTO HIS COMPANY (IF HE->FIT) HAD NEVER SMOKED BEFORE (HAD->AND) TO (LEARN->LEARNED) THE USE OF A PIPE AS A MEANS OF SELF DEFENCE +2609-157645-0007-2359: ONE SUNDAY SAYS MISTER (DITCHFIELD->DIXFIELD) HE HAD (AN EXTRA->IN NATURE) PIPE AND (JOSHUA->JANSHIRE) THE CLERK TOLD HIM THAT THE PEOPLE WERE GETTING (*->THEM) IMPATIENT +2609-157645-0008-2360: (LET->THEM TO) THEM SING (ANOTHER PSALM SAID->AND NEITHER PSALMS SAY THAT) THE CURATE +2609-157645-0009-2361: THEY HAVE SIR REPLIED THE CLERK +2609-157645-0010-2362: THEN LET THEM SING THE HUNDRED AND NINETEENTH REPLIED THE CURATE +2609-157645-0011-2363: SIX ARMS THE (NEAREST->NURSE) WITHIN REACH PRESENTED WITH AN OBEDIENT START (*->AND) AS MANY TOBACCO (POUCHES->PIUCES) TO THE MAN OF OFFICE +2609-157645-0012-2364: DAVID (DEANS HOWEVER->DEAN SAMURED) DID NOT AT ALL (APPROVE->IMPROVE) THIS IRREVERENCE +2609-157645-0013-2365: (GOING TO->GO INTO) CHURCH (AT HAYES IN THOSE DAYS MUST->AUNT HAZE AND THUS THE DAY'S MISS) HAVE BEEN (QUITE->ACQUAINT) AN (EXCITING EXPERIENCE->THESE SIGNING INSPIRANTS) +2609-157645-0014-2366: WHEN THESE MEN (IN->AIMED) THE COURSE OF MY REMONSTRANCE FOUND (*->OUT) THAT (I->*) WAS NOT GOING TO CONTINUE THE CUSTOM THEY NO LONGER CARED TO BE COMMUNICANTS +2609-169640-0000-2406: (PROAS->PRATS) IN THAT QUARTER WERE USUALLY (DISTRUSTED->DISTRUDGED) BY (SHIPS IT->THE STIPS AT) IS TRUE BUT THE SEA IS (FULL->FOUR) OF THEM (AND FAR->FOR) MORE (ARE->OR) INNOCENT THAN (ARE->OUR) GUILTY OF ANY (ACTS->ACT) OF (VIOLENCE->ONLENETS) +2609-169640-0001-2407: (AN HOUR->NOW I) AFTER THE SUN HAD SET THE WIND FELL TO (A->AN) LIGHT AIR (THAT JUST->BUT JEST) KEPT STEERAGE WAY ON THE SHIP +2609-169640-0002-2408: FORTUNATELY THE JOHN WAS NOT ONLY FAST BUT (SHE->SEA) MINDED HER (HELM->HAIL) AS (A LIGHT FOOTED->THE LIGHTFOOTED) GIRL (TURNS->TURNED) IN A (LIVELY->LOVELY) DANCE +2609-169640-0003-2409: (I->AND) NEVER WAS IN A BETTER (STEERING->STERN) SHIP (MOST ESPECIALLY IN->POESY SPENTRY AND) MODERATE WEATHER +2609-169640-0004-2410: MISTER MARBLE HE (I DO->OUGHT TO) BELIEVE WAS (FAIRLY SNOOZING->FAIRLY'S NEWSING) ON THE (HEN COOPS->HINCOUX) BEING LIKE THE (SAILS->SAILORS) AS ONE MIGHT SAY (BARELY ASLEEP->VARIOUS SLEEP) +2609-169640-0005-2411: AT THAT MOMENT I (HEARD->HAD) A NOISE (ONE->WHEN) FAMILIAR TO (SEAMEN->SEE MEN) THAT OF AN OAR (FALLING->FOLLOWING) IN (A->THE) BOAT +2609-169640-0006-2412: (*->AS) I (SANG OUT->YET) SAIL HO AND CLOSE (ABOARD->ABROAD) +2609-169640-0007-2413: HE WAS (TOO MUCH->CHIMNETS) OF A SEAMAN TO REQUIRE A SECOND LOOK IN ORDER TO (ASCERTAIN WHAT->ASSERT BUT) WAS TO BE DONE +2609-169640-0008-2414: (ALTHOUGH->ON THOSE) THEY WENT THREE FEET TO OUR TWO THIS GAVE (US A->UP SOME) MOMENT OF (BREATHING->BREASING) TIME +2609-169640-0009-2415: AS OUR (SHEETS->SEATS) WERE (ALL FLYING->OFF LYING) FORWARD AND REMAINED SO FOR A FEW MINUTES IT GAVE ME (*->A) LEISURE TO LOOK ABOUT +2609-169640-0010-2416: I SOON SAW BOTH (PROAS AND GLAD ENOUGH->PROTS IN GRINDING UP) WAS I TO PERCEIVE THAT THEY HAD NOT APPROACHED MATERIALLY (NEARER->NEAR) +2609-169640-0011-2417: MISTER KITE OBSERVED (THIS->IT) ALSO (AND REMARKED->IN REMARK) THAT OUR MOVEMENTS HAD BEEN SO PROMPT AS TO TAKE THE (RASCALS->RATS WAS) ABACK +2609-169640-0012-2418: A (BREATHLESS->BRENT WITCH) STILLNESS SUCCEEDED +2609-169640-0013-2419: THE (PROAS->POETS) DID NOT (ALTER THEIR->ENTER THE) COURSE BUT NEARED (US->ITS) FAST +2609-169640-0014-2420: I HEARD THE (RATTLING->RIDING) OF THE BOARDING (PIKES->PINES) TOO AS THEY WERE CUT ADRIFT FROM THE SPANKER BOOM AND FELL UPON THE DECKS +2609-169640-0015-2421: (KITE WENT AFT->TIGHTLY ACT) AND RETURNED WITH THREE OR FOUR MUSKETS AND AS MANY PIKES +2609-169640-0016-2422: THE STILLNESS (THAT REIGNED->DOWN RINGS) ON BOTH SIDES WAS LIKE THAT OF DEATH +2609-169640-0017-2423: THE JOHN BEHAVED BEAUTIFULLY (AND->HE) CAME ROUND LIKE A TOP +2609-169640-0018-2424: THE QUESTION WAS NOW WHETHER WE COULD PASS THEM OR NOT BEFORE THEY GOT (NEAR ENOUGH->NEARING UP) TO (GRAPPLE->GRANTPLE) +2609-169640-0019-2425: THE CAPTAIN BEHAVED PERFECTLY (WELL IN THIS->AWAY ON ITS) CRITICAL INSTANT COMMANDING A DEAD SILENCE (AND->IN) THE (CLOSEST ATTENTION->CLOSETS INTENTION) TO HIS ORDERS +2609-169640-0020-2426: (NOT A SOUL->NOW I'M SO) ON BOARD THE JOHN WAS (HURT->SHARP) +2609-169640-0021-2427: (ON->WHEN) OUR (SIDE->SON) WE GAVE THE (GENTLEMEN->GENTLEMAN) THE FOUR (SIXES TWO AT->SAXES TO AUNT) THE (NEAREST->NURSE) AND TWO AT THE (STERN MOST PROA->STERNMOST PRO) WHICH WAS STILL NEAR A CABLE'S LENGTH (DISTANT->OF ITS) +2609-169640-0022-2428: THEY WERE (LIKE->NIGHT) THE (YELLS->YEARS) OF (FIENDS->FIEND) IN (ANGUISH->ENGLISH) +2609-169640-0023-2429: (I DOUBT->AND OUT) IF WE TOUCHED A MAN IN THE (NEAREST PROA->NEAR ITS PRAYER) +2609-169640-0024-2430: (IN THIS->AND THAT) STATE THE SHIP PASSED AHEAD (ALL->OF) HER CANVAS (BEING FULL->BEEN FOR) LEAVING THE (PROA MOTIONLESS->PROTINENT) IN HER WAKE +3005-163389-0000-1108: THEY SWARMED UP IN FRONT (OF SHERBURN'S->A SHERBOURNE'S) PALINGS AS THICK AS THEY COULD (JAM->JAMMED) TOGETHER AND YOU COULDN'T HEAR YOURSELF THINK FOR THE NOISE +3005-163389-0001-1109: SOME SUNG OUT TEAR DOWN THE FENCE TEAR DOWN THE FENCE +3005-163389-0002-1110: THE STILLNESS WAS AWFUL CREEPY AND UNCOMFORTABLE +3005-163389-0003-1111: (SHERBURN->SHERBIN) RUN HIS EYE SLOW ALONG THE CROWD AND WHEREVER IT STRUCK THE PEOPLE TRIED A LITTLE TO (OUT GAZE->OUTGAZE) HIM BUT THEY COULDN'T THEY DROPPED THEIR EYES AND LOOKED SNEAKY +3005-163389-0004-1112: THE AVERAGE MAN'S A COWARD +3005-163389-0005-1113: BECAUSE THEY'RE AFRAID THE MAN'S FRIENDS WILL SHOOT THEM IN THE BACK IN THE (DARKAND IT'S->DARK AND IS) JUST WHAT THEY WOULD DO +3005-163389-0006-1114: SO THEY ALWAYS ACQUIT AND THEN A MAN GOES IN THE NIGHT WITH A HUNDRED (MASKED->MASSED) COWARDS AT HIS BACK AND LYNCHES THE RASCAL +3005-163389-0007-1115: YOU DIDN'T WANT TO COME +3005-163389-0008-1116: BUT A MOB WITHOUT ANY MAN AT THE HEAD OF IT IS BENEATH PITIFULNESS +3005-163389-0009-1117: NOW (LEAVE->LEE) AND TAKE YOUR HALF A MAN WITH YOU (TOSSING HIS->TAUSEN HE HAS) GUN UP ACROSS HIS LEFT ARM AND COCKING IT WHEN HE SAYS THIS +3005-163389-0010-1118: THE CROWD WASHED BACK SUDDEN AND THEN BROKE ALL APART AND WENT TEARING OFF EVERY (WHICH->WITCH) WAY AND BUCK HARKNESS HE (HEELED->HEALED) IT AFTER THEM (LOOKING TOLERABLE CHEAP->LOOK INTOLERABLE CHEEK) +3005-163389-0011-1119: (YOU->HE) CAN'T BE TOO CAREFUL +3005-163389-0012-1120: THEY ARGUED AND TRIED TO KEEP HIM OUT BUT HE WOULDN'T LISTEN AND (THE->A) WHOLE SHOW COME TO A (STANDSTILL->FANSTILL) +3005-163389-0013-1121: AND ONE OR TWO WOMEN (BEGUN->BEGAN) TO SCREAM +3005-163389-0014-1122: SO THEN (THE RINGMASTER->A RING MASTER) HE MADE A LITTLE SPEECH AND SAID HE HOPED THERE WOULDN'T BE NO DISTURBANCE AND IF THE MAN WOULD PROMISE HE WOULDN'T MAKE NO MORE TROUBLE HE WOULD LET HIM RIDE IF HE THOUGHT HE COULD STAY ON THE HORSE +3005-163389-0015-1123: IT WARN'T FUNNY TO ME THOUGH I WAS ALL OF A TREMBLE TO SEE HIS DANGER +3005-163389-0016-1124: AND (THE->A) HORSE A GOING LIKE A HOUSE (AFIRE->AFAR) TOO +3005-163389-0017-1125: HE (SHED->SHARED) THEM SO THICK (THEY->THAT) KIND OF CLOGGED UP THE AIR AND ALTOGETHER HE SHED SEVENTEEN SUITS +3005-163389-0018-1126: WHY IT WAS ONE OF HIS OWN MEN +3005-163390-0000-1185: (ANDBUT->AND BUT) NEVER MIND THE REST OF HIS OUTFIT IT WAS JUST WILD BUT IT WAS AWFUL FUNNY +3005-163390-0001-1186: THE PEOPLE MOST KILLED THEMSELVES LAUGHING AND WHEN THE KING GOT DONE CAPERING AND CAPERED OFF BEHIND THE SCENES THEY ROARED AND CLAPPED AND STORMED AND HAW HAWED TILL HE COME BACK AND DONE IT OVER AGAIN AND AFTER THAT THEY MADE HIM DO IT ANOTHER TIME +3005-163390-0002-1187: TWENTY PEOPLE (SINGS->SANGS) OUT +3005-163390-0003-1188: THE DUKE SAYS YES +3005-163390-0004-1189: EVERYBODY SINGS OUT SOLD +3005-163390-0005-1190: BUT A BIG FINE LOOKING MAN JUMPS UP ON A BENCH (AND->AN) SHOUTS HOLD ON +3005-163390-0006-1191: JUST A WORD GENTLEMEN THEY STOPPED TO LISTEN +3005-163390-0007-1192: WHAT WE WANT IS TO GO OUT OF HERE QUIET AND TALK (THIS->TO) SHOW UP AND SELL THE REST (OF->O) THE TOWN +3005-163390-0008-1193: YOU BET IT IS THE (JEDGE->JUDGE) IS RIGHT EVERYBODY SINGS OUT +3005-163390-0009-1194: WE STRUCK THE RAFT AT THE SAME TIME AND IN LESS THAN TWO SECONDS WE WAS GLIDING DOWN STREAM ALL DARK AND STILL AND EDGING TOWARDS THE MIDDLE OF THE RIVER NOBODY SAYING A WORD +3005-163390-0010-1195: WE NEVER SHOWED A LIGHT TILL WE WAS ABOUT TEN MILE BELOW THE VILLAGE +3005-163390-0011-1196: GREENHORNS (FLATHEADS->FLAT HEADS) +3005-163390-0012-1197: NO I (SAYS->SAY IS) IT DON'T +3005-163390-0013-1198: WELL IT DON'T BECAUSE IT'S IN (THE BREED->TO BREATHE) I RECKON THEY'RE ALL ALIKE +3005-163390-0014-1199: WELL THAT'S WHAT I'M A SAYING ALL KINGS IS MOSTLY (RAPSCALLIONS->RASCALIONS) AS FUR AS I (CAN->KIN) MAKE OUT IS DAT SO +3005-163390-0015-1200: AND LOOK AT CHARLES SECOND AND LOUIS FOURTEEN AND LOUIS FIFTEEN AND JAMES SECOND AND EDWARD SECOND AND RICHARD THIRD AND FORTY MORE BESIDES ALL THEM SAXON HEPTARCHIES THAT USED TO RIP AROUND SO (IN->WHEN) OLD TIMES AND RAISE (CAIN->GAME) +3005-163390-0016-1201: MY YOU OUGHT TO (SEEN->SEE AN) OLD HENRY THE EIGHT WHEN HE WAS IN BLOOM HE WAS A BLOSSOM +3005-163390-0017-1202: RING UP FAIR (ROSAMUN->ROSAMOND) +3005-163390-0018-1203: WELL HENRY HE TAKES A NOTION HE WANTS TO GET UP SOME TROUBLE WITH THIS COUNTRY +3005-163390-0019-1204: S'POSE HE OPENED HIS (MOUTHWHAT->MOUTH WHAT) THEN +3005-163390-0020-1205: ALL I SAY IS KINGS (IS->AS) KINGS (AND YOU->AN YE) GOT TO MAKE ALLOWANCES +3005-163390-0021-1206: TAKE THEM ALL AROUND THEY'RE A MIGHTY ORNERY LOT IT'S THE WAY THEY'RE RAISED +3005-163390-0022-1207: WELL THEY ALL DO JIM +3005-163390-0023-1208: NOW (DE DUKE->TO DO) HE'S A (TOLERBLE LIKELY->TOLERABLE LIKE THE) MAN IN SOME WAYS +3005-163390-0024-1209: THIS ONE'S A (MIDDLING->MIDDLIN) HARD LOT FOR A DUKE +3005-163390-0025-1210: WHEN I WAKED UP (JUST->JIST) AT DAYBREAK HE WAS SITTING THERE WITH HIS HEAD DOWN BETWIXT HIS KNEES MOANING AND MOURNING TO HIMSELF +3005-163390-0026-1211: IT DON'T SEEM NATURAL BUT I RECKON IT'S SO +3005-163390-0027-1212: HE WAS OFTEN MOANING AND MOURNING (*->IN) THAT WAY NIGHTS WHEN HE JUDGED I WAS ASLEEP AND SAYING (PO->POE) LITTLE (LIZABETH->ELIZABETH) +3005-163390-0028-1213: (DOAN->DON'T) YOU HEAR ME (SHET DE DO->SHUT DEAD DOUGH) +3005-163390-0029-1214: I LAY I MAKE YOU MINE +3005-163390-0030-1215: (JIS AS->IT IS) LOUD AS I COULD YELL +3005-163391-0000-1127: WHICH WAS SOUND ENOUGH JUDGMENT BUT YOU TAKE THE AVERAGE MAN AND HE WOULDN'T WAIT FOR HIM TO (HOWL->HOWE) +3005-163391-0001-1128: THE KING'S (DUDS->DEADS) WAS ALL BLACK AND HE DID LOOK REAL (SWELL AND->SWELLIN) STARCHY +3005-163391-0002-1129: WHY BEFORE HE LOOKED LIKE THE (ORNERIEST->ORNEIST) OLD RIP THAT EVER WAS BUT NOW WHEN HE'D TAKE OFF HIS NEW WHITE BEAVER AND MAKE A BOW AND DO A SMILE HE LOOKED THAT GRAND AND GOOD AND PIOUS THAT YOU'D SAY HE HAD WALKED RIGHT OUT OF THE ARK AND MAYBE WAS OLD (LEVITICUS->LEVIKUS) HIMSELF +3005-163391-0003-1130: JIM CLEANED UP THE CANOE AND I GOT MY PADDLE READY +3005-163391-0004-1131: (WHER->WERE) YOU BOUND FOR YOUNG MAN +3005-163391-0005-1132: (GIT->GET) ABOARD SAYS THE KING +3005-163391-0006-1133: I DONE SO AND (THEN->THEY) WE ALL THREE STARTED ON AGAIN +3005-163391-0007-1134: THE YOUNG CHAP WAS MIGHTY THANKFUL SAID (IT->HE) WAS TOUGH WORK (TOTING->TOATING) HIS BAGGAGE SUCH WEATHER +3005-163391-0008-1135: (HE ASKED->PIERRE) THE KING WHERE HE WAS GOING AND THE KING TOLD HIM HE'D COME DOWN (THE->A) RIVER AND (LANDED->LAND IT) AT THE OTHER VILLAGE THIS MORNING AND NOW HE WAS GOING UP A FEW (MILE->MILES) TO SEE AN OLD FRIEND ON A FARM UP THERE THE YOUNG FELLOW SAYS +3005-163391-0009-1136: BUT THEN I SAYS AGAIN NO I RECKON IT AIN'T HIM OR ELSE HE WOULDN'T BE (PADDLING->PADDLIN) UP THE RIVER YOU AIN'T HIM ARE YOU +3005-163391-0010-1137: NO MY NAME'S (BLODGETT ELEXANDER BLODGETT->BLADGE IT ALEXANDER BLADGET) REVEREND (ELEXANDER BLODGETT->ALEXANDER BLOTCHETT) I S'POSE I MUST SAY AS I'M ONE (O->OF) THE (LORD'S->LARGE) POOR SERVANTS +3005-163391-0011-1138: YOU SEE HE WAS PRETTY OLD (AND GEORGE'S G'YIRLS->AN GEORGE IS GOOD EARL'S) WAS TOO YOUNG TO BE MUCH COMPANY FOR HIM EXCEPT MARY JANE THE RED HEADED ONE AND SO HE WAS (KINDER->KIND OR) LONESOME AFTER GEORGE AND HIS WIFE DIED AND DIDN'T SEEM TO CARE MUCH TO LIVE +3005-163391-0012-1139: (TOO->DO) BAD TOO BAD HE COULDN'T (A->HAVE) LIVED TO SEE HIS (BROTHERS->BROTHER'S) POOR SOUL +3005-163391-0013-1140: I'M (GOING->GOIN) IN A SHIP NEXT WEDNESDAY (FOR RYO JANEERO->FER RYEO GENERO) WHERE MY UNCLE (LIVES->IS) +3005-163391-0014-1141: BUT IT'LL BE LOVELY (WISHT->WISHED) I WAS A (GOING->GOIN) +3005-163391-0015-1142: MARY JANE'S NINETEEN SUSAN'S FIFTEEN AND JOANNA'S ABOUT (FOURTEENTHAT'S->FOURTEEN THAT'S) THE ONE THAT GIVES HERSELF TO GOOD WORKS AND HAS A (HARE->HAIR) LIP POOR THINGS +3005-163391-0016-1143: WELL THEY COULD BE WORSE OFF +3005-163391-0017-1144: (OLD->O) PETER HAD FRIENDS AND THEY AIN'T (GOING->GOIN) TO LET THEM COME TO NO HARM +3005-163391-0018-1145: BLAMED IF HE DIDN'T (INQUIRE->ACQUIRE) ABOUT EVERYBODY AND EVERYTHING (IN->AND) THAT BLESSED TOWN AND ALL ABOUT THE (WILKSES->WILKS) AND ABOUT PETER'S (BUSINESSWHICH->BUSINESS WHICH) WAS A TANNER AND ABOUT (GEORGE'SWHICH->GEORGE'S WHICH) WAS A CARPENTER AND ABOUT (HARVEY'SWHICH->HARVEST WHICH) WAS A DISSENTERING MINISTER AND SO ON AND SO ON THEN HE SAYS +3005-163391-0019-1146: WHEN (THEY'RE->HER) DEEP THEY WON'T STOP FOR A HAIL +3005-163391-0020-1147: WAS PETER (WILKS->WILKES) WELL OFF +3005-163391-0021-1148: WHEN (WE STRUCK->WASTER UP) THE BOAT SHE WAS ABOUT DONE LOADING AND PRETTY SOON SHE GOT OFF +3005-163391-0022-1149: NOW HUSTLE BACK RIGHT OFF AND FETCH THE DUKE UP HERE AND THE NEW CARPET BAGS +3005-163391-0023-1150: SO THEN THEY WAITED FOR A STEAMBOAT +3005-163391-0024-1151: (BUT->THAT) THE KING WAS (CA'M->CALM) HE SAYS +3005-163391-0025-1152: THEY GIVE A GLANCE AT ONE ANOTHER AND NODDED THEIR HEADS AS MUCH AS TO SAY (WHAT D I->WOULD DATA) TELL YOU +3005-163391-0026-1153: THEN ONE OF THEM SAYS KIND (OF->O) SOFT AND GENTLE +3005-163399-0000-1154: PHELPS (WAS->IS) ONE OF THESE LITTLE ONE HORSE COTTON PLANTATIONS AND THEY ALL LOOK ALIKE +3005-163399-0001-1155: I WENT AROUND AND (CLUMB->CLIMBED) OVER THE BACK STILE BY THE (ASH HOPPER->ASHHOPPER) AND STARTED FOR THE KITCHEN +3005-163399-0002-1156: (I->AH) OUT (WITH A->WI THE) YES'M (BEFORE->FOUR) I THOUGHT +3005-163399-0003-1157: SO THEN SHE STARTED FOR THE HOUSE LEADING ME BY THE HAND AND THE CHILDREN TAGGING AFTER +3005-163399-0004-1158: WHEN WE GOT THERE SHE SET ME DOWN IN A SPLIT (BOTTOMED->BOTTOM) CHAIR AND SET HERSELF DOWN ON A LITTLE LOW STOOL IN FRONT OF ME HOLDING BOTH OF MY HANDS AND SAYS +3005-163399-0005-1159: WELL IT'S LUCKY BECAUSE SOMETIMES PEOPLE DO GET HURT +3005-163399-0006-1160: AND I THINK HE DIED AFTERWARDS HE WAS A BAPTIST +3005-163399-0007-1161: YES IT WAS (MORTIFICATIONTHAT->MORTIFICATION THAT) WAS IT +3005-163399-0008-1162: YOUR UNCLE'S BEEN UP TO THE TOWN EVERY DAY TO FETCH YOU +3005-163399-0009-1163: YOU MUST A MET HIM ON THE ROAD DIDN'T YOU OLDISH MAN (WITH->WIDTH) A +3005-163399-0010-1164: WHY CHILD (IT LL BE STOLE->IT'LL BESTOW) +3005-163399-0011-1165: IT WAS KINDER THIN ICE BUT I SAYS +3005-163399-0012-1166: I HAD MY MIND ON THE CHILDREN ALL THE TIME I WANTED TO (GET->GIT) THEM OUT TO ONE SIDE AND PUMP THEM A LITTLE AND FIND OUT WHO I WAS +3005-163399-0013-1167: (PRETTY->BERTIE) SOON SHE MADE THE COLD (CHILLS->CHILL) STREAK ALL DOWN MY BACK BECAUSE SHE SAYS +3005-163399-0014-1168: I SEE IT WARN'T A BIT OF USE TO TRY TO GO AHEAD I'D GOT TO THROW UP MY HAND +3005-163399-0015-1169: SO I SAYS TO MYSELF (HERE'S->HERE IS) ANOTHER PLACE WHERE I GOT TO (RESK->REST) THE TRUTH +3005-163399-0016-1170: I OPENED MY MOUTH TO BEGIN BUT SHE GRABBED ME AND HUSTLED ME IN BEHIND THE BED AND SAYS HERE HE COMES +3005-163399-0017-1171: CHILDREN DON'T YOU SAY A WORD +3005-163399-0018-1172: I SEE I WAS IN A FIX NOW +3005-163399-0019-1173: MISSUS (PHELPS->PHILP) SHE JUMPS FOR HIM AND SAYS +3005-163399-0020-1174: (HAS->AS) HE COME NO SAYS HER HUSBAND +3005-163399-0021-1175: I CAN'T IMAGINE SAYS THE OLD GENTLEMAN AND I MUST SAY IT MAKES ME DREADFUL UNEASY +3005-163399-0022-1176: UNEASY SHE SAYS I'M READY TO GO DISTRACTED +3005-163399-0023-1177: HE MUST (A->HAVE) COME AND YOU'VE MISSED HIM ALONG THE ROAD +3005-163399-0024-1178: OH DON'T DISTRESS ME ANY (MORE'N->MORE) I'M ALREADY DISTRESSED +3005-163399-0025-1179: WHY SILAS LOOK YONDER UP THE ROAD AIN'T THAT SOMEBODY (COMING->COMIN) +3005-163399-0026-1180: THE OLD GENTLEMAN STARED AND SAYS +3005-163399-0027-1181: I HAIN'T NO IDEA WHO IS IT +3005-163399-0028-1182: (IT'S->IS) TOM SAWYER +3005-163399-0029-1183: BEING TOM SAWYER WAS EASY AND COMFORTABLE AND (IT STAYED EASY->ITS STEESEY) AND COMFORTABLE TILL BY AND BY I HEAR A STEAMBOAT (COUGHING->COFFIN) ALONG DOWN THE RIVER +3005-163399-0030-1184: THEN I SAYS TO MYSELF S'POSE TOM SAWYER COMES DOWN ON THAT BOAT +3080-5032-0000-312: BUT I AM HUGELY PLEASED THAT YOU HAVE SEEN MY LADY +3080-5032-0001-313: I KNEW YOU COULD NOT CHOOSE BUT LIKE HER BUT YET LET ME TELL YOU YOU HAVE SEEN BUT THE WORST OF HER +3080-5032-0002-314: HER CONVERSATION HAS MORE CHARMS (THAN->AND) CAN BE IN MERE BEAUTY AND (HER->A) HUMOUR AND DISPOSITION WOULD MAKE A DEFORMED PERSON APPEAR LOVELY +3080-5032-0003-315: WHY DID YOU NOT SEND ME THAT NEWS AND A GARLAND +3080-5032-0004-316: (WELL->WHY) THE BEST (ON'T->ON IT) IS (*->THAT) I HAVE A SQUIRE NOW THAT IS AS GOOD AS A KNIGHT +3080-5032-0005-317: IN EARNEST WE HAVE HAD SUCH A SKIRMISH AND UPON SO FOOLISH AN OCCASION AS I CANNOT TELL WHICH (IS STRANGEST->YOUR STRANGER'S) +3080-5032-0006-318: ALL THE PEOPLE THAT I HAD EVER IN MY LIFE REFUSED WERE BROUGHT AGAIN UPON THE STAGE LIKE RICHARD THE (THREE S->THIRD'S) GHOSTS TO REPROACH ME (WITHAL AND->WITH A IN) ALL THE KINDNESS HIS DISCOVERIES COULD MAKE I HAD FOR YOU WAS LAID TO MY CHARGE +3080-5032-0007-319: MY BEST QUALITIES IF I HAVE ANY THAT ARE GOOD SERVED BUT FOR AGGRAVATIONS OF MY FAULT AND I WAS ALLOWED TO HAVE WIT AND UNDERSTANDING AND DISCRETION IN OTHER THINGS THAT IT MIGHT APPEAR I HAD NONE IN THIS +3080-5032-0008-320: TIS A STRANGE CHANGE AND I AM VERY SORRY FOR IT BUT I'LL SWEAR I KNOW NOT HOW TO HELP IT +3080-5032-0009-321: MISTER FISH IS (THE->A) SQUIRE OF DAMES AND HAS SO MANY MISTRESSES THAT ANYBODY MAY PRETEND (A->TO) SHARE IN HIM AND BE BELIEVED (BUT->THAT) THOUGH I HAVE THE HONOUR TO BE HIS NEAR NEIGHBOUR TO SPEAK FREELY I CANNOT BRAG MUCH THAT HE MAKES ANY COURT TO ME AND I KNOW NO YOUNG WOMAN IN THE COUNTRY THAT HE DOES NOT VISIT OFTEN +3080-5032-0010-322: I THINK MY YOUNGEST BROTHER COMES DOWN WITH HIM +3080-5032-0011-323: I CAN NO SOONER GIVE YOU SOME LITTLE HINTS (WHEREABOUTS->WHEREABOUT) THEY LIVE BUT YOU KNOW THEM PRESENTLY AND I MEANT YOU SHOULD BE BEHOLDING TO ME FOR YOUR ACQUAINTANCE +3080-5032-0012-324: BUT IT SEEMS THIS GENTLEMAN IS NOT SO EASY (ACCESS->AXIS) BUT YOU MAY ACKNOWLEDGE SOMETHING DUE TO ME IF I INCLINE HIM TO LOOK GRACIOUSLY UPON YOU AND THEREFORE THERE IS NOT MUCH HARM DONE +3080-5032-0013-325: I HAVE MISSED FOUR FITS AND (*->HAVE) HAD BUT FIVE AND HAVE RECOVERED SO MUCH STRENGTH AS MADE ME VENTURE TO MEET YOUR LETTER ON WEDNESDAY A MILE FROM HOME +3080-5032-0014-326: BUT BESIDES I CAN GIVE YOU OTHERS +3080-5032-0015-327: I AM HERE MUCH MORE OUT OF PEOPLE'S WAY THAN IN TOWN WHERE MY (AUNT->AUNTS) AND SUCH (AS->HAS) PRETEND (AN->AND) INTEREST IN ME AND A POWER OVER ME DO SO PERSECUTE ME WITH (THEIR->DEAR) GOOD NATURE AND TAKE IT SO ILL THAT THEY ARE NOT ACCEPTED AS I WOULD LIVE IN A HOLLOW TREE TO AVOID THEM +3080-5032-0016-328: YOU WILL THINK HIM ALTERED AND IF IT BE POSSIBLE MORE MELANCHOLY THAN HE WAS +3080-5032-0017-329: IF MARRIAGE AGREES NO BETTER (WITH OTHER->WHETHER) PEOPLE THAN IT DOES WITH HIM I SHALL PRAY THAT ALL MY FRIENDS MAY (SCAPE->ESCAPE) IT +3080-5032-0018-330: WELL IN (EARNEST->HONEST) IF I WERE A PRINCE THAT LADY SHOULD BE MY MISTRESS BUT I CAN GIVE NO RULE TO ANY ONE ELSE AND PERHAPS THOSE THAT ARE IN NO DANGER OF LOSING THEIR HEARTS TO HER MAY BE INFINITELY TAKEN WITH ONE I SHOULD NOT VALUE AT ALL FOR SO SAYS THE JUSTINIAN WISE PROVIDENCE HAS ORDAINED IT THAT BY THEIR DIFFERENT HUMOURS EVERYBODY MIGHT FIND SOMETHING TO PLEASE THEMSELVES WITHAL WITHOUT ENVYING THEIR (NEIGHBOURS->NEIGHBORS) +3080-5032-0019-331: THE MATTER IS NOT GREAT FOR I CONFESS I DO NATURALLY HATE THE NOISE AND TALK OF THE WORLD AND SHOULD BE BEST PLEASED NEVER TO BE KNOWN (IN'T->IN IT) UPON ANY OCCASION WHATSOEVER YET SINCE IT CAN NEVER BE WHOLLY AVOIDED ONE MUST SATISFY ONESELF BY DOING NOTHING THAT ONE NEED CARE WHO KNOWS +3080-5032-0020-332: IF I HAD A PICTURE THAT WERE FIT FOR YOU YOU SHOULD HAVE IT +3080-5032-0021-333: HOW CAN YOU TALK OF DEFYING FORTUNE NOBODY LIVES WITHOUT IT AND THEREFORE WHY SHOULD YOU IMAGINE YOU COULD +3080-5032-0022-334: I KNOW NOT HOW MY BROTHER COMES TO BE SO WELL INFORMED AS YOU SAY BUT I AM CERTAIN HE KNOWS (THE->*) UTMOST OF THE INJURIES YOU HAVE RECEIVED FROM HER +3080-5032-0023-335: WE HAVE HAD ANOTHER DEBATE BUT MUCH MORE CALMLY +3080-5032-0024-336: (AND->THEN) BESIDES THERE WAS A TIME WHEN WE OURSELVES WERE INDIFFERENT TO ONE ANOTHER DID I DO SO THEN OR HAVE I LEARNED IT SINCE +3080-5032-0025-337: I HAVE BEEN STUDYING HOW TOM (CHEEKE->CHEEK) MIGHT COME BY HIS INTELLIGENCE AND I (VERILY->VERY) BELIEVE HE HAS IT FROM MY COUSIN PETERS +3080-5032-0026-338: HOW KINDLY DO I TAKE (THESE->THE) CIVILITIES OF YOUR (FATHER'S->FATHERS) IN EARNEST YOU CANNOT IMAGINE HOW HIS LETTER PLEASED ME +3080-5040-0000-278: WOULD IT WOULD LEAVE ME AND THEN I COULD BELIEVE I SHALL NOT ALWAYS HAVE OCCASION FOR IT +3080-5040-0001-279: MY POOR LADY (VAVASOUR->VAVASOR) IS (CARRIED TO THE->CHARACTER) TOWER AND HER GREAT BELLY COULD NOT EXCUSE HER BECAUSE SHE WAS ACQUAINTED BY SOMEBODY THAT THERE WAS A PLOT AGAINST THE PROTECTOR AND DID NOT DISCOVER IT +3080-5040-0002-280: SHE HAS TOLD NOW ALL THAT WAS TOLD HER BUT VOWS SHE WILL NEVER SAY FROM WHENCE SHE HAD IT WE SHALL SEE WHETHER HER RESOLUTIONS ARE AS UNALTERABLE AS THOSE OF MY LADY (TALMASH->THOMAS) +3080-5040-0003-281: I WONDER HOW SHE BEHAVED HERSELF WHEN SHE WAS MARRIED +3080-5040-0004-282: I NEVER SAW ANY ONE YET THAT DID NOT LOOK SIMPLY AND OUT OF COUNTENANCE (NOR EVER->WHATEVER) KNEW A WEDDING WELL DESIGNED BUT ONE AND THAT WAS OF TWO PERSONS WHO HAD TIME ENOUGH I CONFESS TO CONTRIVE IT AND NOBODY TO PLEASE (IN'T->IN) BUT THEMSELVES +3080-5040-0005-283: THE TRUTH IS I COULD NOT ENDURE TO BE MISSUS BRIDE IN A PUBLIC WEDDING TO BE MADE THE HAPPIEST PERSON ON EARTH +3080-5040-0006-284: DO NOT TAKE IT ILL FOR I WOULD ENDURE IT IF I COULD RATHER THAN FAIL BUT IN EARNEST I DO NOT THINK IT WERE POSSIBLE FOR ME +3080-5040-0007-285: YET IN EARNEST YOUR FATHER WILL NOT FIND MY BROTHER PEYTON WANTING IN CIVILITY THOUGH HE IS NOT A MAN OF MUCH COMPLIMENT UNLESS IT BE IN HIS LETTERS TO ME NOR AN UNREASONABLE PERSON IN ANYTHING SO HE WILL ALLOW HIM OUT OF HIS KINDNESS TO HIS WIFE TO SET A HIGHER VALUE UPON HER SISTER THAN SHE DESERVES +3080-5040-0008-286: MY AUNT TOLD ME NO LONGER (AGONE THAN->A GONDEN) YESTERDAY THAT I WAS THE MOST WILFUL WOMAN THAT EVER SHE KNEW AND HAD AN OBSTINACY OF SPIRIT NOTHING COULD OVERCOME TAKE HEED +3080-5040-0009-287: YOU SEE I GIVE YOU FAIR WARNING +3080-5040-0010-288: BY THE NEXT I SHALL BE GONE INTO KENT AND MY OTHER JOURNEY IS LAID ASIDE WHICH I AM NOT DISPLEASED AT BECAUSE IT WOULD HAVE BROKEN OUR INTERCOURSE VERY MUCH +3080-5040-0011-289: HERE ARE SOME VERSES OF (COWLEY'S->COLLEASE) TELL ME HOW YOU LIKE THEM +3080-5040-0012-290: I TOLD YOU IN MY LAST THAT MY (SUFFOLK->SUFFOLD) JOURNEY WAS LAID ASIDE AND THAT INTO KENT HASTENED +3080-5040-0013-291: IF I DROWN BY THE WAY THIS WILL BE MY LAST LETTER AND LIKE A WILL I BEQUEATH ALL MY KINDNESS TO YOU IN IT WITH A CHARGE NEVER TO BESTOW IT ALL UPON ANOTHER MISTRESS LEST MY GHOST RISE AGAIN AND HAUNT YOU +3080-5040-0014-292: INDEED I LIKE HIM EXTREMELY AND HE IS COMMENDED TO ME BY PEOPLE THAT KNOW HIM VERY WELL AND ARE ABLE TO JUDGE FOR A MOST EXCELLENT SERVANT AND FAITHFUL AS POSSIBLE +3080-5040-0015-293: BECAUSE YOU FIND FAULT WITH MY OTHER LETTERS THIS IS LIKE TO BE SHORTER THAN THEY I DID NOT INTEND IT SO THOUGH I CAN ASSURE YOU +3080-5040-0016-294: I DO NOT FIND IT THOUGH I AM TOLD I WAS SO EXTREMELY WHEN I BELIEVED YOU (LOVED->LOVE) ME +3080-5040-0017-295: BUT I AM CALLED UPON +3080-5040-0018-296: DIRECTED FOR YOUR MASTER +3080-5040-0019-297: I SEE YOU CAN (CHIDE->CHID) WHEN YOU PLEASE AND WITH AUTHORITY BUT I DESERVE IT I CONFESS AND ALL I CAN SAY FOR MYSELF IS THAT MY FAULT PROCEEDED FROM A VERY GOOD PRINCIPLE IN ME +3080-5040-0020-298: WE DARE NOT LET OUR TONGUES LIE MORE ON ONE SIDE OF OUR (MOUTHS->MOTHS) THAN (T'OTHER->THE OTHER) FOR FEAR OF OVERTURNING IT +3080-5040-0021-299: YOU ARE SATISFIED I HOPE (ERE->AT) THIS THAT I (SCAPED->ESCAPED) DROWNING +3080-5040-0022-300: BUT I AM TROUBLED MUCH YOU SHOULD MAKE SO ILL A JOURNEY TO (SO->SAW) LITTLE PURPOSE INDEED I (WRIT->WRITE) BY THE FIRST POST AFTER MY ARRIVAL HERE AND CANNOT IMAGINE HOW YOU CAME TO MISS OF MY LETTERS +3080-5040-0023-301: (HOW->OH) WELCOME YOU WILL BE BUT ALAS +3080-5040-0024-302: FOR MY LIFE I CANNOT BEAT INTO THEIR HEADS A PASSION THAT MUST BE SUBJECT TO NO DECAY (AN->AND) EVEN PERFECT KINDNESS THAT MUST LAST PERPETUALLY WITHOUT THE LEAST INTERMISSION +3080-5040-0025-303: THEY LAUGH TO HEAR ME SAY THAT ONE UNKIND WORD WOULD DESTROY ALL THE SATISFACTION OF MY LIFE AND THAT I SHOULD EXPECT OUR KINDNESS SHOULD INCREASE EVERY DAY IF IT WERE POSSIBLE BUT NEVER LESSEN +3080-5040-0026-304: WE GO ABROAD ALL DAY AND PLAY ALL NIGHT AND (SAY->SEE) OUR (PRAYERS->PRAY AS) WHEN WE HAVE TIME +3080-5040-0027-305: (WELL->WHILE) IN SOBER EARNEST NOW I WOULD NOT LIVE THUS (A->AT) TWELVEMONTH TO GAIN ALL THAT (THE->*) KING HAS LOST UNLESS IT WERE TO GIVE IT HIM AGAIN +3080-5040-0028-306: WILL YOU BE SO GOOD NATURED +3080-5040-0029-307: HE HAS ONE SON AND TIS THE FINEST BOY THAT E'ER YOU SAW AND HAS A NOBLE SPIRIT BUT YET STANDS IN THAT AWE OF HIS FATHER THAT ONE WORD FROM HIM IS AS MUCH AS TWENTY WHIPPINGS +3080-5040-0030-308: YOU MUST GIVE ME LEAVE TO ENTERTAIN (YOU THUS->YOURSELVES) WITH DISCOURSES OF THE FAMILY FOR I CAN TELL YOU NOTHING ELSE FROM HENCE +3080-5040-0031-309: NOT TO KNOW WHEN YOU (WOULD->HAD) COME HOME I CAN ASSURE YOU (NOR->NO) FOR ANY OTHER (OCCASION->CASION) OF MY OWN BUT WITH A COUSIN OF MINE THAT HAD LONG (DESIGNED->DESIGN) TO MAKE HERSELF SPORT WITH HIM AND DID NOT MISS OF HER AIM +3080-5040-0032-310: IN MY LIFE I NEVER HEARD SO RIDICULOUS A DISCOURSE AS HE MADE US AND NO OLD WOMAN WHO (PASSES->PAUSES) FOR A WITCH COULD HAVE BEEN MORE PUZZLED TO SEEK WHAT TO SAY TO REASONABLE PEOPLE THAN HE WAS +3080-5040-0033-311: EVER SINCE THIS ADVENTURE I HAVE HAD SO GREAT A BELIEF IN ALL THINGS (OF->FOR) THIS NATURE THAT I COULD NOT FORBEAR LAYING A (PEAS COD->PEA'S CART) WITH NINE PEAS (IN'T->INTO) UNDER MY DOOR YESTERDAY AND WAS INFORMED BY IT THAT MY HUSBAND'S NAME SHOULD BE THOMAS HOW DO YOU LIKE THAT +3331-159605-0000-695: SHE PULLED HER HAIR DOWN TURNED (HER SKIRT->AS GOOD) BACK PUT HER FEET ON THE FENDER AND TOOK (PUTTEL->POTTER) INTO HER LAP ALL OF WHICH ARRANGEMENTS SIGNIFIED THAT SOMETHING VERY IMPORTANT HAD GOT TO BE THOUGHT OVER AND SETTLED +3331-159605-0001-696: THE MORE PROPOSALS THE MORE (CREDIT->CREDITED) +3331-159605-0002-697: (I VE->I'VE) TRIED IT AND LIKED IT AND MAYBE THIS IS THE CONSEQUENCE OF THAT NIGHT'S FUN +3331-159605-0003-698: JUST SUPPOSE IT IS TRUE THAT HE DOES ASK ME AND I SAY YES +3331-159605-0004-699: WHAT A SPITEFUL THING I AM +3331-159605-0005-700: I COULD DO SO MUCH FOR ALL AT HOME HOW I SHOULD ENJOY THAT +3331-159605-0006-701: (LET ME SEE->THAT MISS C) HOW CAN I BEGIN +3331-159605-0007-702: HE HAS KNOWN HER ALL HER LIFE AND HAS A GOOD INFLUENCE OVER HER +3331-159605-0008-703: NOW AS POLLY WAS BY NO MEANS A PERFECT CREATURE I AM FREE TO CONFESS THAT THE OLD TEMPTATION ASSAILED (HER->HIM) MORE THAN ONCE (THAT->THE) WEEK FOR WHEN THE FIRST EXCITEMENT OF THE DODGING REFORM HAD SUBSIDED SHE MISSED THE PLEASANT LITTLE INTERVIEWS THAT USED TO PUT A CERTAIN (FLAVOR->FLAVOUR) OF (ROMANCE->ROMANS) INTO HER DULL HARD WORKING DAYS +3331-159605-0009-704: I DON'T THINK IT WAS HIS WEALTH ACCOMPLISHMENTS (OR POSITION->OPPOSITION) THAT MOST ATTRACTED POLLY THOUGH THESE DOUBTLESS POSSESSED A GREATER INFLUENCE THAN SHE SUSPECTED +3331-159605-0010-705: IT WAS THAT INDESCRIBABLE SOMETHING WHICH WOMEN ARE QUICK TO SEE AND FEEL IN MEN WHO HAVE BEEN BLESSED (WITH->*) WISE AND GOOD MOTHERS +3331-159605-0011-706: THIS HAD AN ESPECIAL CHARM TO POLLY FOR SHE SOON FOUND THAT THIS SIDE OF HIS CHARACTER WAS NOT SHOWN TO EVERY ONE +3331-159605-0012-707: (LATELY->PLATELY) THIS HAD CHANGED ESPECIALLY TOWARDS POLLY AND IT (FLATTERED->FLUTTERED) HER MORE THAN SHE WOULD CONFESS EVEN TO HERSELF +3331-159605-0013-708: AT FIRST SHE TRIED TO THINK SHE COULD BUT UNFORTUNATELY HEARTS ARE SO CONTRARY THAT THEY WON'T BE OBEDIENT TO REASON WILL OR EVEN (GRATITUDE->CREDITUDE) +3331-159605-0014-709: POLLY FELT A VERY CORDIAL FRIENDSHIP FOR MISTER SYDNEY BUT NOT ONE PARTICLE OF THE (LOVE WHICH IS->LAW PITCHES) THE ONLY (COIN->KIND) IN WHICH LOVE CAN BE TRULY PAID +3331-159605-0015-710: THIS FINISHED POLLY'S INDECISION AND AFTER THAT NIGHT SHE NEVER ALLOWED HERSELF TO DWELL UPON THE PLEASANT TEMPTATION WHICH CAME IN A GUISE PARTICULARLY ATTRACTIVE TO A YOUNG GIRL WITH A (SPICE->SPIES) OF THE OLD EVE IN HER COMPOSITION +3331-159605-0016-711: WHEN (SATURDAY->SEDATE) CAME POLLY STARTED AS USUAL FOR A VISIT TO (BECKY->BACKY) AND BESS BUT (COULD N'T->COULDN'T) RESIST STOPPING AT THE (SHAWS->SHORES) TO LEAVE A LITTLE PARCEL FOR FAN (THOUGH IT->THAT) WAS CALLING TIME +3331-159605-0017-712: A FOOLISH LITTLE SPEECH TO MAKE TO A (DOG->DARK) BUT YOU SEE POLLY WAS ONLY A TENDER HEARTED GIRL TRYING TO (DO->*) HER DUTY +3331-159605-0018-713: TAKE HOLD OF (MASTER CHARLEY'S->MASSA CHARLIE'S) HAND MISS (MAMIE->MAMMY) AND WALK PRETTY LIKE (WILLY->BILLY) AND (FLOSSY->FLOSSIE) SAID THE MAID +3331-159605-0019-714: AT A STREET CORNER A BLACK EYED (SCHOOL BOY->SCHOOLBOY) WAS PARTING FROM A ROSY FACED SCHOOL GIRL WHOSE MUSIC ROLL HE WAS RELUCTANTLY SURRENDERING +3331-159605-0020-715: HOW HE GOT THERE WAS NEVER VERY CLEAR TO POLLY BUT THERE HE WAS FLUSHED AND A LITTLE OUT OF BREATH BUT LOOKING SO GLAD TO SEE HER (THAT->TILL) SHE HAD (N'T->NOT) THE HEART TO BE STIFF AND COOL AS SHE HAD FULLY INTENDED TO BE WHEN THEY MET +3331-159605-0021-716: SHE REALLY COULD (N'T->NOT) HELP IT IT WAS SO PLEASANT TO SEE HIM AGAIN JUST WHEN SHE WAS FEELING SO LONELY +3331-159605-0022-717: THAT IS THE WAY I GET TO THE (ROTHS->ROSS) ANSWERED POLLY +3331-159605-0023-718: SHE DID NOT MEAN TO TELL BUT HIS FRANKNESS WAS SO AGREEABLE SHE FORGOT HERSELF +3331-159605-0024-719: BUT I KNOW HER BETTER AND I ASSURE YOU THAT SHE (DOES IMPROVE->DOESN'T PROVE) SHE TRIES TO (MEND HER->MENTAL) FAULTS THOUGH SHE WON'T OWN IT AND (WILL->WAS) SURPRISE YOU SOME DAY BY THE AMOUNT OF HEART AND SENSE AND GOODNESS SHE HAS GOT +3331-159605-0025-720: THANK YOU NO +3331-159605-0026-721: HOW LOVELY THE PARK LOOKS SHE SAID IN GREAT CONFUSION +3331-159605-0027-722: ASKED THE ARTFUL YOUNG MAN LAYING A TRAP INTO WHICH POLLY IMMEDIATELY FELL +3331-159605-0028-723: HE WAS QUICKER TO TAKE A HINT THAN SHE HAD EXPECTED AND BEING BOTH PROUD AND GENEROUS (RESOLVED->WE SOFT) TO SETTLE THE MATTER AT ONCE FOR POLLY'S SAKE AS WELL AS HIS OWN +3331-159605-0029-724: SO WHEN SHE MADE HER LAST (BRILLIANT->POINT) REMARK HE SAID QUIETLY WATCHING HER FACE KEENLY ALL THE WHILE I THOUGHT SO WELL (I M->I'M) GOING OUT OF TOWN ON BUSINESS FOR SEVERAL WEEKS SO YOU CAN ENJOY (YOUR->YOU) LITTLE BIT OF COUNTRY WITHOUT BEING ANNOYED BY ME (ANNOYED->ANNOY IT) +3331-159605-0030-725: SHE THOUGHT SHE HAD A GOOD DEAL OF THE COQUETTE IN HER AND (I VE->I'VE) NO DOUBT THAT WITH TIME AND TRAINING SHE WOULD HAVE BECOME A VERY DANGEROUS LITTLE PERSON BUT NOW SHE WAS FAR (TOO->TO) TRANSPARENT AND STRAIGHTFORWARD BY NATURE EVEN TO TELL A (WHITE LIE CLEVERLY->WIDE LIKE LEVELLY) +3331-159605-0031-726: HE WAS GONE BEFORE SHE COULD DO ANYTHING BUT LOOK UP AT HIM WITH A REMORSEFUL FACE AND SHE WALKED ON FEELING THAT THE FIRST AND PERHAPS THE ONLY LOVER SHE WOULD EVER HAVE HAD READ HIS ANSWER AND ACCEPTED (IT->*) IN SILENCE +3331-159605-0032-727: (POLLY->PARLEY) DID NOT RETURN TO HER (FAVORITE->FAVOURITE) WALK TILL SHE LEARNED (FROM->FOR) MINNIE THAT UNCLE HAD REALLY LEFT TOWN AND THEN SHE FOUND THAT HIS FRIENDLY COMPANY AND CONVERSATION WAS WHAT HAD MADE THE WAY SO PLEASANT AFTER ALL +3331-159605-0033-728: (WAGGING->WORKING) TO AND FRO AS USUAL WHAT'S THE NEWS WITH YOU +3331-159605-0034-729: PERHAPS (SHE LL JILT->SHE'LL CHILLED) HIM +3331-159605-0035-730: UTTERLY DONE WITH AND LAID UPON THE SHELF +3331-159605-0036-731: MINNIE SAID THE OTHER DAY SHE WISHED SHE WAS A (PIGEON->PITCHEN) SO SHE COULD PADDLE IN THE (PUDDLES->BOTTLES) AND (NOT->NUT) FUSS ABOUT RUBBERS +3331-159605-0037-732: NOW DON'T BE AFFECTED POLLY BUT JUST TELL ME LIKE A DEAR HAS (N'T->NOT) HE PROPOSED +3331-159605-0038-733: DON'T YOU THINK HE MEANS TO +3331-159605-0039-734: TRULY (TRULY->JULIE) FAN +3331-159605-0040-735: I DON'T MEAN TO BE PRYING BUT I REALLY THOUGHT HE DID +3331-159605-0041-736: WELL I ALWAYS MEANT TO TRY IT IF I GOT A CHANCE AND I HAVE +3331-159605-0042-737: I JUST GAVE HIM A HINT AND HE TOOK IT +3331-159605-0043-738: HE MEANT TO GO AWAY BEFORE THAT SO DON'T THINK HIS HEART IS BROKEN (OR->O) MIND WHAT (SILLY TATTLERS->SIDY TEDLERS) SAY +3331-159605-0044-739: HE UNDERSTOOD AND BEING A GENTLEMAN MADE NO FUSS +3331-159605-0045-740: BUT POLLY IT WOULD HAVE BEEN A GRAND THING FOR YOU +3331-159605-0046-741: I (M ODD->AM AUGHT) YOU KNOW AND PREFER TO BE AN INDEPENDENT SPINSTER AND TEACH MUSIC ALL MY DAYS +3331-159609-0000-742: NEVER MIND WHAT THE BUSINESS WAS (IT SUFFICES->ITS SURFACES) TO SAY THAT IT WAS A GOOD BEGINNING FOR A YOUNG MAN LIKE TOM WHO HAVING BEEN BORN AND BRED IN THE MOST CONSERVATIVE (CLASS->GLASS) OF THE MOST CONCEITED CITY IN NEW ENGLAND NEEDED JUST THE HEALTHY HEARTY SOCIAL INFLUENCES OF THE WEST TO WIDEN HIS VIEWS AND MAKE A MAN OF HIM +3331-159609-0001-743: FORTUNATELY EVERY ONE WAS SO BUSY WITH THE NECESSARY PREPARATIONS THAT THERE WAS NO TIME FOR (ROMANCE->ROMANS) OF ANY SORT AND THE FOUR YOUNG PEOPLE WORKED TOGETHER AS SOBERLY AND SENSIBLY AS IF ALL SORTS OF EMOTIONS WERE NOT (BOTTLED->BOTHERED) UP IN THEIR RESPECTIVE HEARTS +3331-159609-0002-744: PITY THAT THE END SHOULD COME SO SOON BUT THE HOUR DID ITS WORK AND (WENT->WHEN) ITS WAY LEAVING A CLEARER ATMOSPHERE BEHIND (THOUGH->THAT) THE YOUNG FOLKS DID NOT SEE IT THEN FOR THEIR EYES WERE DIM BECAUSE OF THE PARTINGS THAT MUST BE +3331-159609-0003-745: IF IT HAD NOT BEEN FOR TWO THINGS I FEAR SHE NEVER WOULD HAVE STOOD A SUMMER IN TOWN BUT SYDNEY OFTEN CALLED (TILL->TO) HIS VACATION CAME AND A VOLUMINOUS CORRESPONDENCE WITH POLLY BEGUILED THE LONG DAYS +3331-159609-0004-746: (TOM WROTE ONCE->TUMULT ONES) A WEEK TO HIS MOTHER BUT (THE LETTERS->THEY LET US) WERE SHORT AND NOT VERY SATISFACTORY FOR MEN NEVER DO (TELL->SO) THE INTERESTING LITTLE THINGS THAT WOMEN BEST LIKE TO HEAR +3331-159609-0005-747: NO I (M->AM) ONLY TIRED HAD A GOOD DEAL TO DO LATELY AND THE DULL WEATHER MAKES ME (JUST A TRIFLE->CHOSE TO TRAVEL) BLUE +3331-159609-0006-748: FORGIVE ME POLLY BUT I CAN'T HELP SAYING IT FOR IT IS THERE AND I WANT TO BE AS TRUE TO YOU AS YOU WERE TO ME IF I CAN +3331-159609-0007-749: I (TRY->TRIED) NOT (TO->A) DECEIVE MYSELF BUT IT DOES SEEM AS IF THERE WAS A CHANCE OF HAPPINESS FOR ME +3331-159609-0008-750: THANK HEAVEN FOR THAT +3331-159609-0009-751: CRIED POLLY WITH THE (HEARTIEST->HARDIEST) SATISFACTION IN HER VOICE +3331-159609-0010-752: POOR POLLY WAS SO TAKEN BY SURPRISE THAT SHE HAD NOT A WORD TO SAY +3331-159609-0011-753: NONE WERE NEEDED (*->HOTELED) HER (TELLTALE->*) FACE ANSWERED FOR HER AS WELL AS THE IMPULSE WHICH MADE HER HIDE HER HEAD IN THE SOFA CUSHION LIKE A FOOLISH OSTRICH (WHEN->AND) THE (HUNTERS->HANDLES) ARE AFTER IT +3331-159609-0012-754: ONCE OR TWICE BUT SORT OF (JOKINGLY->CHOKINGLY) AND I THOUGHT IT WAS ONLY SOME LITTLE FLIRTATION +3331-159609-0013-755: IT WAS SO STUPID OF ME NOT TO GUESS BEFORE +3331-159609-0014-756: IT WAS (SO->HER) TENDER EARNEST AND DEFIANT THAT FANNY FORGOT THE DEFENCE OF HER OWN (LOVER->LOVE) IN ADMIRATION OF POLLY'S LOYALTY TO HERS FOR THIS FAITHFUL ALL ABSORBING LOVE WAS A NEW REVELATION TO FANNY WHO WAS USED TO HEARING HER FRIENDS BOAST OF TWO OR THREE LOVERS A YEAR AND CALCULATE THEIR RESPECTIVE VALUES WITH ALMOST AS MUCH COOLNESS AS THE YOUNG MEN DISCUSSED THE FORTUNES OF THE GIRLS THEY WISHED FOR BUT COULD NOT AFFORD TO MARRY +3331-159609-0015-757: I HOPE MARIA (BAILEY IS ALL HE THINKS HER->BAILEY'S ONLY THINK SIR) SHE ADDED SOFTLY FOR I COULD (N'T->NOT) BEAR TO HAVE HIM DISAPPOINTED AGAIN +3331-159609-0016-758: SAID FANNY TURNING HOPEFUL ALL AT ONCE +3331-159609-0017-759: SUPPOSE I SAY A WORD TO TOM JUST INQUIRE AFTER HIS HEART IN A GENERAL WAY YOU KNOW AND GIVE HIM A CHANCE TO TELL ME IF (THERE IS->THERE'S) ANYTHING TO TELL +3331-159609-0018-760: BEAR IT PEOPLE ALWAYS DO (BEAR->BARE) THINGS SOMEHOW ANSWERED POLLY LOOKING AS IF SENTENCE HAD BEEN PASSED UPON HER +3331-159609-0019-761: IT WAS A VERY DIFFERENT (WINTER->WINDOW) FROM THE LAST (FOR BOTH->ABOVE) THE GIRLS +3331-159609-0020-762: IF (FANNY->ANY) WANTED TO SHOW HIM WHAT SHE COULD DO TOWARD MAKING A PLEASANT HOME SHE CERTAINLY SUCCEEDED (BETTER THAN->BY THEN) SHE SUSPECTED FOR IN SPITE OF MANY FAILURES AND DISCOURAGEMENTS BEHIND THE SCENES THE LITTLE HOUSE BECAME A MOST ATTRACTIVE PLACE TO MISTER (SYDNEY->SIDNEY) AT LEAST FOR HE WAS MORE THE HOUSE FRIEND THAN EVER AND SEEMED DETERMINED TO PROVE THAT CHANGE OF FORTUNE MADE NO DIFFERENCE TO HIM +3331-159609-0021-763: SHE KEPT MUCH AT HOME WHEN THE DAY'S WORK WAS DONE FINDING IT PLEASANTER TO SIT DREAMING (OVER->OF A) BOOK OR SEWING ALONE THAN TO EXERT HERSELF EVEN TO GO TO THE (SHAWS->SHORES) +3331-159609-0022-764: POLLY WAS NOT AT ALL LIKE HERSELF THAT (WINTER->WINDOW) AND THOSE NEAREST TO HER SAW AND (WONDERED->WANDERED) AT IT MOST +3331-159609-0023-765: FOR NED WAS SO ABSORBED IN BUSINESS THAT HE (IGNORED->NURED) THE WHOLE (BAILEY->BAILIQUE) QUESTION AND LEFT THEM IN (UTTER->OTHER) DARKNESS +3331-159609-0024-766: (FANNY->THEN HE) CAME WALKING IN UPON HER ONE DAY LOOKING AS IF SHE (BROUGHT TIDINGS->POURED HIDINGS) OF SUCH GREAT JOY THAT SHE HARDLY KNEW HOW TO TELL THEM +3331-159609-0025-767: BUT IF (WORK->WORD) BASKETS WERE GIFTED WITH POWERS OF SPEECH THEY COULD TELL STORIES MORE TRUE AND TENDER THAN ANY (WE READ->REED) +3528-168656-0000-864: SHE HAD EVEN BEEN IN SOCIETY BEFORE THE REVOLUTION +3528-168656-0001-865: IT WAS HER PLEASURE AND HER VANITY TO DRAG IN THESE NAMES ON EVERY PRETEXT +3528-168656-0002-866: EVERY YEAR SHE SOLEMNLY RENEWED HER VOWS AND AT THE MOMENT OF TAKING THE OATH SHE SAID TO THE PRIEST MONSEIGNEUR SAINT FRANCOIS GAVE IT TO MONSEIGNEUR (SAINT JULIEN->SAY JULIAN) MONSEIGNEUR SAINT (JULIEN->JULIAN) GAVE IT TO MONSEIGNEUR SAINT (EUSEBIUS MONSEIGNEUR->EUSCIBIUS MONSIEUR) SAINT (EUSEBIUS->USUBIUS) GAVE IT TO MONSEIGNEUR SAINT (PROCOPIUS->PROCOPIAS) ET CETERA ET CETERA +3528-168656-0003-867: AND THE (SCHOOL GIRLS->SCHOOLGIRLS) WOULD BEGIN TO LAUGH NOT IN THEIR SLEEVES BUT UNDER (THEIR->THE) VEILS CHARMING LITTLE STIFLED LAUGHS WHICH MADE THE VOCAL MOTHERS FROWN +3528-168656-0004-868: IT WAS A CENTURY WHICH SPOKE THROUGH HER BUT IT WAS THE EIGHTEENTH CENTURY +3528-168656-0005-869: THE RULE OF (FONTEVRAULT->FONTREVAL) DID NOT FORBID THIS +3528-168656-0006-870: SHE WOULD NOT SHOW (THIS OBJECT->THE SUBJECT) TO (ANYONE->ANY ONE) +3528-168656-0007-871: THUS IT FURNISHED A SUBJECT OF COMMENT FOR ALL THOSE WHO WERE UNOCCUPIED OR BORED IN THE CONVENT +3528-168656-0008-872: SOME UNIQUE CHAPLET SOME AUTHENTIC RELIC +3528-168656-0009-873: THEY LOST THEMSELVES IN CONJECTURES +3528-168656-0010-874: WHEN THE POOR OLD WOMAN DIED THEY RUSHED TO HER CUPBOARD MORE HASTILY THAN WAS FITTING PERHAPS AND OPENED IT +3528-168656-0011-875: HE IS RESISTING FLUTTERING HIS TINY WINGS AND STILL MAKING AN EFFORT TO FLY BUT THE (DANCER IS->DANCERS) LAUGHING WITH (A->US) SATANICAL AIR +3528-168656-0012-876: MORAL LOVE CONQUERED BY THE COLIC +3528-168669-0000-877: THE (PRIORESS->PRIORS) RETURNED AND SEATED HERSELF ONCE MORE ON HER CHAIR +3528-168669-0001-878: WE WILL PRESENT A (STENOGRAPHIC->SYNOGRAPHIC) REPORT OF THE DIALOGUE WHICH THEN ENSUED TO THE BEST OF OUR ABILITY +3528-168669-0002-879: FATHER (FAUVENT->FERVENT) +3528-168669-0003-880: REVEREND MOTHER DO YOU KNOW THE CHAPEL +3528-168669-0004-881: AND YOU HAVE BEEN IN THE CHOIR IN PURSUANCE OF YOUR DUTIES TWO OR THREE TIMES +3528-168669-0005-882: THERE IS A STONE TO BE RAISED HEAVY +3528-168669-0006-883: THE SLAB OF THE PAVEMENT WHICH IS AT THE SIDE OF THE ALTAR +3528-168669-0007-884: THE (SLAB->FLAP) WHICH CLOSES THE VAULT YES +3528-168669-0008-885: IT WOULD BE A GOOD THING TO HAVE TWO MEN FOR IT +3528-168669-0009-886: A WOMAN IS NEVER A MAN +3528-168669-0010-887: BECAUSE (DOM MABILLON->DON MARBYLON) GIVES FOUR HUNDRED AND SEVENTEEN EPISTLES OF SAINT BERNARD WHILE (MERLONUS HORSTIUS->MERLUNUS HORSES) ONLY GIVES THREE HUNDRED AND SIXTY SEVEN I DO NOT DESPISE (MERLONUS HORSTIUS->MERLINUS HORSES) NEITHER DO I +3528-168669-0011-888: (MERIT->MARRIAGE) CONSISTS IN WORKING ACCORDING TO ONE'S STRENGTH A CLOISTER IS NOT A (DOCK YARD->DOCKYARD) +3528-168669-0012-889: (AND->ADD) A WOMAN IS NOT A MAN BUT MY BROTHER IS THE STRONG ONE THOUGH +3528-168669-0013-890: AND CAN YOU GET A (LEVER->LOVER) +3528-168669-0014-891: THERE IS A RING IN THE STONE +3528-168669-0015-892: I WILL PUT THE (LEVER->LOVER) THROUGH IT +3528-168669-0016-893: THAT IS GOOD REVEREND MOTHER I WILL OPEN THE VAULT +3528-168669-0017-894: WILL THAT BE ALL NO +3528-168669-0018-895: GIVE ME YOUR ORDERS VERY REVEREND MOTHER +3528-168669-0019-896: (FAUVENT->FOR THAT) WE HAVE CONFIDENCE IN YOU +3528-168669-0020-897: I AM HERE TO DO ANYTHING YOU WISH +3528-168669-0021-898: AND TO HOLD YOUR PEACE ABOUT EVERYTHING YES (REVEREND->ROBIN) MOTHER +3528-168669-0022-899: WHEN THE (VAULT->VOLT) IS OPEN I WILL CLOSE IT AGAIN +3528-168669-0023-900: BUT BEFORE THAT WHAT REVEREND MOTHER +3528-168669-0024-901: FATHER (FAUVENT->FERVENT) REVEREND MOTHER +3528-168669-0025-902: YOU KNOW THAT A MOTHER DIED THIS MORNING +3528-168669-0026-903: NO DID YOU NOT HEAR THE BELL +3528-168669-0027-904: NOTHING CAN BE HEARD AT THE BOTTOM OF THE GARDEN REALLY +3528-168669-0028-905: AND THEN THE WIND IS NOT BLOWING IN MY DIRECTION THIS MORNING +3528-168669-0029-906: IT WAS MOTHER CRUCIFIXION +3528-168669-0030-907: THREE YEARS AGO MADAME DE (BETHUNE->BESOON) A (JANSENIST->JENSONIST) TURNED ORTHODOX MERELY FROM HAVING SEEN MOTHER CRUCIFIXION AT PRAYER AH +3528-168669-0031-908: THE MOTHERS HAVE TAKEN HER TO THE DEAD ROOM WHICH OPENS ON THE CHURCH I KNOW +3528-168669-0032-909: A FINE SIGHT IT WOULD BE TO SEE A MAN ENTER THE DEAD ROOM MORE OFTEN +3528-168669-0033-910: HEY MORE OFTEN +3528-168669-0034-911: WHAT DO YOU SAY +3528-168669-0035-912: I SAY MORE OFTEN MORE OFTEN THAN WHAT +3528-168669-0036-913: REVEREND MOTHER I DID NOT SAY MORE OFTEN THAN WHAT I SAID MORE OFTEN +3528-168669-0037-914: BUT I DID NOT SAY MORE OFTEN +3528-168669-0038-915: AT THAT MOMENT NINE O'CLOCK STRUCK +3528-168669-0039-916: AT NINE O'CLOCK IN THE MORNING AND AT ALL HOURS PRAISED AND (ADORED->ENDURED) BE THE MOST HOLY SACRAMENT OF THE ALTAR SAID THE (PRIORESS->PROGRESS) +3528-168669-0040-917: IT CUT MORE OFTEN SHORT +3528-168669-0041-918: FAUCHELEVENT MOPPED HIS FOREHEAD +3528-168669-0042-919: IN HER LIFETIME MOTHER CRUCIFIXION MADE CONVERTS AFTER HER DEATH SHE WILL PERFORM MIRACLES SHE WILL +3528-168669-0043-920: FATHER (FAUVENT->FUVENT) THE COMMUNITY HAS BEEN BLESSED IN MOTHER (CRUCIFIXION->CURSE FICTION) +3528-168669-0044-921: SHE RETAINED HER CONSCIOUSNESS TO THE VERY LAST MOMENT +3528-168669-0045-922: SHE GAVE US HER LAST COMMANDS +3528-168669-0046-923: IF YOU HAD A LITTLE MORE FAITH AND IF YOU COULD HAVE BEEN IN (HER CELL->HERSELF) SHE WOULD HAVE CURED YOUR LEG MERELY BY TOUCHING IT SHE SMILED +3528-168669-0047-924: THERE WAS SOMETHING OF PARADISE IN THAT DEATH +3528-168669-0048-925: FAUCHELEVENT THOUGHT THAT IT WAS AN ORISON WHICH SHE WAS FINISHING +3528-168669-0049-926: (FAUCHELEVENT->FORCHELEVENT) HELD HIS PEACE SHE WENT ON +3528-168669-0050-927: I HAVE CONSULTED UPON THIS POINT MANY ECCLESIASTICS (LABORING->LABOURING) IN OUR LORD WHO OCCUPY THEMSELVES IN THE EXERCISES OF THE CLERICAL LIFE AND WHO BEAR WONDERFUL FRUIT +3528-168669-0051-928: FORTUNATELY THE (PRIORESS->PRIESTS) COMPLETELY ABSORBED IN HER OWN THOUGHTS DID NOT HEAR IT +3528-168669-0052-929: SHE CONTINUED (FATHER FAUVENT->FURTHER PREVENT) +3528-168669-0053-930: YES REVEREND MOTHER +3528-168669-0054-931: SAINT (TERENTIUS->TORRENTIUS) BISHOP OF PORT (WHERE->WEAR) THE MOUTH OF THE (TIBER->TYBER) EMPTIES INTO THE SEA REQUESTED THAT ON HIS TOMB MIGHT BE ENGRAVED THE SIGN WHICH WAS PLACED ON THE GRAVES OF (PARRICIDES->PARASITES) IN THE HOPE THAT PASSERS BY WOULD SPIT ON HIS TOMB THIS WAS DONE +3528-168669-0055-932: THE DEAD MUST BE OBEYED SO BE IT +3528-168669-0056-933: FOR THAT MATTER NO REVEREND MOTHER +3528-168669-0057-934: FATHER (FAUVENT->PREVENT) MOTHER CRUCIFIXION WILL BE INTERRED IN THE COFFIN IN WHICH SHE HAS SLEPT FOR THE LAST TWENTY YEARS THAT IS JUST +3528-168669-0058-935: IT IS A CONTINUATION OF HER SLUMBER +3528-168669-0059-936: SO I SHALL HAVE TO NAIL UP THAT COFFIN YES +3528-168669-0060-937: I AM AT THE ORDERS OF THE VERY REVEREND COMMUNITY +3528-168669-0061-938: (THE FOUR->BEFORE) MOTHER (PRECENTORS->PRESENTERS) WILL ASSIST YOU +3528-168669-0062-939: NO IN (LOWERING->LORING) THE COFFIN +3528-168669-0063-940: WHERE INTO THE VAULT +3528-168669-0064-941: FAUCHELEVENT STARTED THE VAULT UNDER THE ALTAR +3528-168669-0065-942: UNDER THE ALTAR BUT +3528-168669-0066-943: YOU WILL HAVE AN IRON BAR YES BUT +3528-168669-0067-944: YOU WILL RAISE THE STONE WITH THE BAR BY MEANS OF THE RING BUT +3528-168669-0068-945: THE DEAD MUST BE OBEYED TO BE BURIED IN THE VAULT UNDER THE ALTAR OF THE CHAPEL NOT TO GO TO PROFANE EARTH TO REMAIN THERE IN DEATH WHERE SHE PRAYED WHILE LIVING SUCH WAS THE LAST WISH OF MOTHER CRUCIFIXION +3528-168669-0069-946: SHE ASKED IT OF US THAT IS TO SAY COMMANDED US +3528-168669-0070-947: BUT IT IS FORBIDDEN +3528-168669-0071-948: OH I AM A STONE IN YOUR WALLS +3528-168669-0072-949: THINK FATHER (FAUVENT->FERVENT) IF SHE WERE TO WORK MIRACLES HERE +3528-168669-0073-950: WHAT A GLORY OF GOD FOR THE COMMUNITY AND MIRACLES ISSUE FROM TOMBS +3528-168669-0074-951: BUT REVEREND MOTHER IF THE (AGENT OF->AGENTIVE) THE SANITARY COMMISSION +3528-168669-0075-952: BUT THE COMMISSARY OF POLICE +3528-168669-0076-953: (CHONODEMAIRE->SHADOW DE MAR) ONE OF THE SEVEN GERMAN KINGS WHO ENTERED AMONG THE GAULS UNDER THE EMPIRE OF CONSTANTIUS EXPRESSLY RECOGNIZED THE RIGHT OF NUNS TO BE BURIED IN RELIGION THAT IS TO SAY BENEATH THE ALTAR +3528-168669-0077-954: THE WORLD IS NOTHING IN THE PRESENCE OF THE CROSS +3528-168669-0078-955: (MARTIN->MERTON) THE ELEVENTH GENERAL OF THE CARTHUSIANS GAVE TO HIS ORDER THIS DEVICE STAT (CRUX DUM VOLVITUR->CREW DOOMFUL) ORBIS +3528-168669-0079-956: THE (PRIORESS->PIRRUS) WHO WAS USUALLY SUBJECTED TO THE BARRIER OF SILENCE AND WHOSE RESERVOIR WAS (OVERFULL->OVER FULL) ROSE AND EXCLAIMED WITH THE LOQUACITY OF A DAM WHICH HAS BROKEN AWAY +3528-168669-0080-957: I HAVE ON MY RIGHT (BENOIT->BENOIS) AND (ON->ALL) MY LEFT BERNARD WHO WAS BERNARD +3528-168669-0081-958: THE FIRST ABBOT OF (CLAIRVAUX->CLERVAL) +3528-168669-0082-959: HIS ORDER HAS PRODUCED FORTY POPES TWO HUNDRED CARDINALS FIFTY PATRIARCHS SIXTEEN HUNDRED ARCHBISHOPS FOUR THOUSAND SIX HUNDRED BISHOPS FOUR EMPERORS TWELVE EMPRESSES FORTY SIX KINGS FORTY ONE QUEENS THREE THOUSAND SIX HUNDRED (CANONIZED->CANNONIZED) SAINTS AND HAS BEEN IN EXISTENCE FOR FOURTEEN HUNDRED YEARS +3528-168669-0083-960: ON ONE SIDE SAINT BERNARD ON THE OTHER THE AGENT OF THE (SANITARY->SENATORY) DEPARTMENT +3528-168669-0084-961: GOD SUBORDINATED TO THE COMMISSARY OF POLICE SUCH (IS->AS) THE AGE SILENCE (FAUVENT->FOR VAUGHAN) +3528-168669-0085-962: NO ONE DOUBTS THE RIGHT OF THE MONASTERY TO SEPULTURE +3528-168669-0086-963: ONLY FANATICS AND THOSE IN ERROR DENY IT +3528-168669-0087-964: WE LIVE IN TIMES OF TERRIBLE CONFUSION +3528-168669-0088-965: WE ARE IGNORANT AND IMPIOUS +3528-168669-0089-966: AND THEN RELIGION IS ATTACKED WHY +3528-168669-0090-967: BECAUSE THERE HAVE BEEN BAD PRIESTS BECAUSE (SAGITTAIRE->SAGOTARA) BISHOP OF GAP WAS (THE->A) BROTHER OF (SALONE->SALON) BISHOP OF (EMBRUN->AMBRON) AND BECAUSE BOTH OF THEM FOLLOWED (MOMMOL->MAMMA) +3528-168669-0091-968: THEY PERSECUTE THE SAINTS +3528-168669-0092-969: THEY SHUT THEIR EYES TO THE TRUTH DARKNESS IS THE RULE +3528-168669-0093-970: THE MOST FEROCIOUS BEASTS ARE BEASTS WHICH ARE BLIND +3528-168669-0094-971: OH HOW WICKED PEOPLE ARE +3528-168669-0095-972: BY ORDER OF THE KING SIGNIFIES TO DAY BY ORDER OF THE REVOLUTION +3528-168669-0096-973: ONE NO LONGER KNOWS WHAT IS DUE TO THE LIVING OR TO THE DEAD A HOLY DEATH IS PROHIBITED +3528-168669-0097-974: (GAUTHIER->GATHIERRE) BISHOP OF (CHALONS->CHALON) HELD HIS OWN IN THIS MATTER AGAINST OTHO DUKE OF BURGUNDY +3528-168669-0098-975: THE (PRIORESS->PRIESTS) TOOK BREATH THEN TURNED TO FAUCHELEVENT +3528-168669-0099-976: YOU WILL CLOSE THE COFFIN THE SISTERS WILL CARRY IT TO THE CHAPEL +3528-168669-0100-977: THE OFFICE FOR THE DEAD WILL THEN BE (SAID->SET) +3528-168669-0101-978: BUT SHE WILL HEAR SHE WILL NOT LISTEN +3528-168669-0102-979: BESIDES WHAT THE (CLOISTER->CLOSER) KNOWS THE WORLD LEARNS NOT +3528-168669-0103-980: A PAUSE (ENSUED->IN SUIT) +3528-168669-0104-981: YOU WILL REMOVE YOUR (BELL->BELT) +3528-168669-0105-982: HAS THE DOCTOR FOR THE DEAD PAID HIS VISIT +3528-168669-0106-983: HE WILL PAY IT AT FOUR O'CLOCK TO DAY +3528-168669-0107-984: THE PEAL WHICH ORDERS THE DOCTOR FOR THE (DEAD->DEBT) TO BE SUMMONED HAS ALREADY BEEN (RUNG->RUN) +3528-168669-0108-985: BUT YOU DO NOT UNDERSTAND ANY OF THE PEALS +3528-168669-0109-986: THAT IS WELL FATHER (FAUVENT->VENT) +3528-168669-0110-987: WHERE WILL YOU OBTAIN IT +3528-168669-0111-988: I HAVE MY HEAP OF OLD IRON AT THE BOTTOM OF THE GARDEN +3528-168669-0112-989: (REVEREND->REVERE) MOTHER WHAT +3528-168669-0113-990: IF YOU WERE EVER TO HAVE ANY OTHER JOBS OF THIS SORT MY BROTHER IS THE STRONG MAN FOR YOU A PERFECT TURK +3528-168669-0114-991: YOU WILL DO IT AS SPEEDILY AS POSSIBLE +3528-168669-0115-992: I CANNOT WORK VERY FAST I AM INFIRM THAT IS WHY I REQUIRE AN ASSISTANT I LIMP +3528-168669-0116-993: EVERYTHING MUST HAVE BEEN COMPLETED A GOOD QUARTER OF AN HOUR BEFORE THAT +3528-168669-0117-994: I WILL DO ANYTHING TO PROVE MY ZEAL TOWARDS THE COMMUNITY THESE ARE MY ORDERS I AM TO NAIL UP THE COFFIN +3528-168669-0118-995: AT ELEVEN O'CLOCK EXACTLY I AM TO BE IN THE CHAPEL +3528-168669-0119-996: MOTHER ASCENSION WILL BE THERE TWO MEN WOULD BE BETTER +3528-168669-0120-997: HOWEVER NEVER MIND I SHALL HAVE MY (LEVER->LOVER) +3528-168669-0121-998: AFTER WHICH THERE WILL BE NO TRACE OF ANYTHING +3528-168669-0122-999: THE GOVERNMENT WILL HAVE NO SUSPICION +3528-168669-0123-1000: THE EMPTY COFFIN REMAINS THIS PRODUCED A (PAUSE->PULSE) +3528-168669-0124-1001: WHAT IS TO BE DONE WITH THAT COFFIN FATHER (FAUVENT->PREVENT) +3528-168669-0125-1002: IT WILL BE GIVEN TO THE EARTH EMPTY +3528-168669-0126-1003: AH (THE DE->LIDA) EXCLAIMED FAUCHELEVENT +3528-168669-0127-1004: THE (VIL->VILLE) STUCK FAST IN HIS THROAT +3528-168669-0128-1005: HE MADE HASTE TO IMPROVISE AN EXPEDIENT TO MAKE HER FORGET THE OATH +3528-168669-0129-1006: I WILL PUT EARTH IN THE COFFIN (REVEREND->REVERED) MOTHER THAT WILL PRODUCE THE EFFECT OF A CORPSE +3528-168669-0130-1007: I WILL MAKE THAT MY SPECIAL BUSINESS +3538-142836-0000-1567: (GENERAL->JOE) OBSERVATIONS ON PRESERVES CONFECTIONARY ICES AND DESSERT DISHES +3538-142836-0001-1568: THE EXPENSE OF PRESERVING THEM WITH SUGAR IS A SERIOUS OBJECTION FOR EXCEPT (THE->A) SUGAR IS USED IN CONSIDERABLE (QUANTITIES->QUALITIES) THE SUCCESS IS VERY UNCERTAIN +3538-142836-0002-1569: FRUIT GATHERED IN WET OR FOGGY WEATHER WILL SOON BE (MILDEWED->MILDED) AND BE OF NO SERVICE FOR PRESERVES +3538-142836-0003-1570: BUT TO DISTINGUISH (THESE PROPERLY->HIS PROPER) REQUIRES VERY GREAT ATTENTION AND CONSIDERABLE EXPERIENCE +3538-142836-0004-1571: IF YOU DIP THE FINGER INTO THE (SYRUP->SERF) AND APPLY IT TO THE THUMB THE TENACITY OF THE (SYRUP->SERF) WILL ON SEPARATING THE FINGER AND THUMB AFFORD A THREAD WHICH SHORTLY BREAKS THIS IS THE LITTLE THREAD +3538-142836-0005-1572: LET IT BOIL UP AGAIN THEN TAKE IT OFF AND REMOVE CAREFULLY THE SCUM THAT HAS RISEN +3538-142836-0006-1573: IT IS CONSIDERED TO BE SUFFICIENTLY BOILED WHEN SOME TAKEN UP IN A SPOON POURS OUT LIKE OIL +3538-142836-0007-1574: BEFORE SUGAR WAS IN USE HONEY WAS EMPLOYED TO PRESERVE MANY VEGETABLE PRODUCTIONS THOUGH THIS SUBSTANCE (HAS->IS) NOW GIVEN WAY TO THE JUICE OF THE SUGAR CANE +3538-142836-0008-1575: FOURTEEN NINETY NINE +3538-142836-0009-1576: BOIL THEM UP THREE DAYS SUCCESSIVELY SKIMMING EACH TIME AND THEY WILL THEN BE FINISHED AND IN A STATE FIT TO BE PUT INTO POTS FOR USE +3538-142836-0010-1577: THE REASON WHY THE FRUIT IS EMPTIED OUT OF THE PRESERVING PAN INTO AN EARTHEN PAN IS THAT THE ACID OF THE FRUIT ACTS UPON THE COPPER OF WHICH THE PRESERVING PANS ARE USUALLY MADE +3538-142836-0011-1578: FROM THIS EXAMPLE THE PROCESS OF PRESERVING FRUITS BY SYRUP (WILL->WOULD) BE EASILY COMPREHENDED +3538-142836-0012-1579: (THEY->THIS) SHOULD BE DRIED IN THE STOVE OR OVEN ON A (SIEVE->SEA) AND TURNED EVERY SIX OR EIGHT HOURS FRESH (POWDERED->PADDED) SUGAR BEING SIFTED OVER THEM EVERY TIME THEY ARE TURNED +3538-142836-0013-1580: IN THIS WAY IT IS ALSO THAT ORANGE AND (LEMON CHIPS->LINENSHIPS) ARE PRESERVED +3538-142836-0014-1581: MARMALADES JAMS AND FRUIT (PASTES->PACE) ARE OF THE SAME NATURE AND ARE NOW IN VERY GENERAL (REQUEST->QUEST) +3538-142836-0015-1582: (MARMALADES->MARVELL EATS) AND (JAMS->JAMES) DIFFER LITTLE FROM EACH OTHER (THEY ARE->THEIR) PRESERVES OF (A->*) HALF LIQUID CONSISTENCY MADE BY BOILING THE PULP OF FRUITS AND SOMETIMES PART OF THE (RINDS->RHINES) WITH SUGAR +3538-142836-0016-1583: THAT THEY MAY KEEP IT IS NECESSARY NOT TO BE SPARING OF SUGAR FIFTEEN O THREE +3538-142836-0017-1584: IN ALL THE OPERATIONS FOR PRESERVE MAKING WHEN THE PRESERVING PAN IS USED IT SHOULD NOT BE PLACED ON THE FIRE BUT ON A (TRIVET->TRIBUT) UNLESS THE (JAM->JAME) IS MADE ON A HOT PLATE WHEN THIS IS NOT NECESSARY +3538-142836-0018-1585: CONFECTIONARY FIFTEEN O EIGHT +3538-142836-0019-1586: IN SPEAKING OF CONFECTIONARY (IT->*) SHOULD BE REMARKED THAT ALL THE VARIOUS PREPARATIONS ABOVE NAMED COME STRICTLY SPEAKING UNDER THAT HEAD FOR THE VARIOUS FRUITS FLOWERS HERBS (ROOTS AND JUICES->OR SAUCES) WHICH (WHEN->ONE) BOILED WITH SUGAR WERE FORMERLY EMPLOYED IN PHARMACY AS WELL AS FOR SWEETMEATS WERE CALLED CONFECTIONS FROM THE LATIN WORD (CONFICERE->CONFUSE) TO MAKE UP BUT THE TERM CONFECTIONARY EMBRACES A VERY LARGE CLASS INDEED OF SWEET FOOD MANY KINDS OF WHICH SHOULD NOT BE ATTEMPTED IN THE ORDINARY (CUISINE->COSEINE) +3538-142836-0020-1587: (THE->A) THOUSAND AND ONE ORNAMENTAL DISHES THAT ADORN THE TABLES OF THE WEALTHY SHOULD BE PURCHASED FROM THE CONFECTIONER THEY CANNOT PROFITABLY BE MADE AT HOME +3538-142836-0021-1588: HOWEVER AS LATE AS THE (REIGNS->REIGN) OF OUR TWO LAST GEORGES FABULOUS SUMS WERE OFTEN EXPENDED UPON FANCIFUL (DESSERTS->DESERTS) +3538-142836-0022-1589: THE SHAPE OF THE (DISHES->DISH IS) VARIES AT DIFFERENT PERIODS THE PREVAILING FASHION AT PRESENT BEING OVAL AND CIRCULAR DISHES ON STEMS +3538-142836-0023-1590: (ICES->ISIS) +3538-142836-0024-1591: (AT DESSERTS->I DESERTS) OR AT SOME EVENING PARTIES ICES ARE SCARCELY (TO->DID) BE DISPENSED WITH +3538-142836-0025-1592: THE (SPADDLE->SPATTLE) IS GENERALLY MADE OF COPPER KEPT BRIGHT AND CLEAN +3538-142836-0026-1593: THEY SHOULD BE TAKEN IMMEDIATELY AFTER THE REPAST OR SOME HOURS AFTER BECAUSE THE TAKING (*->OF) THESE SUBSTANCES DURING THE PROCESS OF DIGESTION IS APT TO PROVOKE INDISPOSITION +3538-163619-0000-1500: THERE WAS ONCE ON (A->THE) TIME A WIDOWER WHO HAD A (SON->SUDDEN) AND A DAUGHTER BY HIS FIRST (WIFE->WIF) +3538-163619-0001-1501: FROM THE VERY DAY THAT THE NEW WIFE CAME INTO THE HOUSE THERE WAS NO PEACE FOR THE MAN'S CHILDREN AND NOT A CORNER TO BE FOUND WHERE THEY COULD GET ANY REST SO THE BOY THOUGHT THAT THE BEST THING HE COULD DO WAS TO GO OUT INTO THE WORLD AND TRY TO EARN HIS OWN BREAD +3538-163619-0002-1502: BUT HIS SISTER WHO WAS STILL AT HOME FARED WORSE AND WORSE +3538-163619-0003-1503: KISS ME (GIRL->GO) SAID THE HEAD +3538-163619-0004-1504: WHEN THE KING ENTERED AND (SAW IT->SOUGHT) HE STOOD STILL AS IF HE WERE IN FETTERS AND COULD NOT STIR FROM THE SPOT FOR THE PICTURE SEEMED TO HIM SO BEAUTIFUL +3538-163619-0005-1505: (THE YOUTH->THESE) PROMISED TO MAKE ALL THE HASTE HE COULD AND SET FORTH FROM THE KING'S PALACE +3538-163619-0006-1506: AT LAST THEY CAME IN SIGHT OF LAND +3538-163619-0007-1507: WELL (IF->OF) MY BROTHER SAYS SO I MUST DO IT SAID THE MAN'S DAUGHTER AND SHE FLUNG HER CASKET INTO THE SEA +3538-163619-0008-1508: WHAT IS MY BROTHER SAYING ASKED HIS SISTER AGAIN +3538-163619-0009-1509: ON THE FIRST THURSDAY NIGHT AFTER THIS A BEAUTIFUL MAIDEN CAME INTO THE KITCHEN OF THE PALACE AND BEGGED THE KITCHEN MAID WHO SLEPT THERE TO LEND HER A BRUSH +3538-163619-0010-1510: SHE BEGGED VERY PRETTILY AND GOT IT AND THEN SHE BRUSHED HER HAIR AND THE GOLD DROPPED FROM IT +3538-163619-0011-1511: OUT ON (THEE->ME) UGLY BUSHY BRIDE SLEEPING SO SOFT BY THE YOUNG KING'S SIDE ON SAND AND STONES MY BED I MAKE AND MY BROTHER SLEEPS WITH THE COLD SNAKE UNPITIED AND UNWEPT +3538-163619-0012-1512: I SHALL COME TWICE MORE AND THEN NEVER AGAIN SAID SHE +3538-163619-0013-1513: THIS TIME ALSO AS BEFORE SHE BORROWED A BRUSH AND BRUSHED HER HAIR WITH IT AND THE GOLD DROPPED DOWN AS SHE DID IT AND AGAIN SHE SENT THE DOG OUT THREE TIMES AND WHEN DAY DAWNED SHE DEPARTED BUT AS SHE WAS GOING SHE SAID AS SHE HAD SAID BEFORE I SHALL COME ONCE MORE AND THEN NEVER AGAIN +3538-163619-0014-1514: NO ONE CAN TELL HOW DELIGHTED THE KING WAS TO GET RID OF THAT HIDEOUS BUSHY BRIDE AND GET A QUEEN WHO WAS BRIGHT AND BEAUTIFUL AS DAY ITSELF +3538-163622-0000-1515: WILT THOU SERVE ME AND WATCH MY SEVEN (FOALS->FOOLS) ASKED THE KING +3538-163622-0001-1516: THE YOUTH THOUGHT (THAT->*) IT WAS VERY EASY WORK TO WATCH THE FOALS AND THAT HE COULD DO IT WELL ENOUGH +3538-163622-0002-1517: HAST THOU WATCHED FAITHFULLY AND WELL (THE WHOLE DAY->BEHOLDAY) LONG SAID THE KING WHEN THE LAD CAME INTO HIS PRESENCE IN THE EVENING +3538-163622-0003-1518: YES THAT I HAVE SAID THE YOUTH +3538-163622-0004-1519: HE HAD GONE (OUT->AT) ONCE TO SEEK A PLACE HE SAID BUT NEVER WOULD HE DO SUCH A THING AGAIN +3538-163622-0005-1520: THEN THE KING PROMISED HIM THE SAME PUNISHMENT AND THE SAME REWARD THAT HE HAD PROMISED HIS BROTHER +3538-163622-0006-1521: WHEN HE HAD RUN AFTER THE (FOALS->FALLS) FOR A LONG LONG TIME AND WAS HOT AND TIRED HE PASSED BY (A CLEFT->CLEF) IN THE ROCK WHERE AN OLD WOMAN WAS SITTING SPINNING WITH A DISTAFF AND SHE CALLED TO HIM +3538-163622-0007-1522: (COME HITHER->COMMANDER) COME HITHER MY HANDSOME SON AND LET ME (COMB->CALM) YOUR HAIR +3538-163622-0008-1523: THE YOUTH LIKED THE THOUGHT OF THIS LET THE (FOALS RUN->FOLDS RUM) WHERE THEY CHOSE AND SEATED HIMSELF IN THE CLEFT OF THE ROCK BY THE SIDE OF THE OLD HAG +3538-163622-0009-1524: SO THERE HE SAT WITH HIS HEAD ON HER LAP TAKING HIS EASE THE (LIVELONG->LIVE LONG) DAY +3538-163622-0010-1525: ON THE THIRD DAY (CINDERLAD->SAID THE LAD) WANTED TO SET OUT +3538-163622-0011-1526: THE TWO BROTHERS LAUGHED AT HIM AND HIS FATHER AND MOTHER BEGGED HIM NOT TO GO BUT ALL TO NO PURPOSE AND (CINDERLAD->SINDERLAD) SET OUT ON HIS WAY +3538-163622-0012-1527: I AM WALKING ABOUT IN SEARCH OF A PLACE SAID CINDERLAD +3538-163622-0013-1528: I WOULD MUCH RATHER HAVE THE PRINCESS SAID (CINDERLAD->CINDER LAD) +3538-163622-0014-1529: AND THUS THEY JOURNEYED ONWARDS A LONG LONG WAY +3538-163622-0015-1530: WHEN THEY HAD GONE THUS FOR A LONG LONG WAY THE (FOAL->FULL) AGAIN ASKED DOST THOU SEE ANYTHING NOW +3538-163622-0016-1531: (*->OH) YES NOW I SEE SOMETHING THAT IS WHITE SAID CINDERLAD +3538-163622-0017-1532: IT LOOKS LIKE THE TRUNK OF A GREAT THICK BIRCH TREE +3538-163622-0018-1533: (CINDERLAD->CINDER LAD) TRIED BUT COULD NOT DO IT SO HE HAD TO TAKE A DRAUGHT FROM THE PITCHER AND THEN ONE MORE AND AFTER THAT STILL ANOTHER AND THEN HE WAS ABLE TO WIELD THE SWORD WITH PERFECT EASE +3538-163622-0019-1534: FOR WE ARE BROTHERS OF THE PRINCESS WHOM THOU ART TO HAVE WHEN THOU CANST TELL THE KING WHAT WE EAT AND DRINK BUT THERE IS A MIGHTY TROLL WHO HAS CAST A SPELL OVER US +3538-163622-0020-1535: WHEN THEY HAD TRAVELLED A LONG LONG WAY THE (FOAL->FALL) SAID DOST THOU SEE ANYTHING +3538-163622-0021-1536: AND NOW INQUIRED THE (FOAL SEEST THOU->WHOLE CEASE DONE) NOTHING NOW +3538-163622-0022-1537: NOW THEN SAID THE (FOAL->FOOL) DOST THOU NOT SEE ANYTHING NOW +3538-163622-0023-1538: THAT IS A RIVER SAID THE FOAL AND WE HAVE TO CROSS IT +3538-163622-0024-1539: I HAVE DONE MY BEST REPLIED (CINDERLAD->SIR LAD) +3538-163624-0000-1540: ONCE UPON A TIME THERE WAS A KING IN THE NORTH WHO HAD WON MANY WARS BUT NOW HE WAS OLD +3538-163624-0001-1541: THE OLD KING WENT OUT AND (FOUGHT->THOUGHT) BRAVELY BUT AT LAST HIS SWORD BROKE AND HE WAS WOUNDED AND HIS MEN FLED +3538-163624-0002-1542: BUT IN THE NIGHT WHEN THE BATTLE (WAS->IS) OVER HIS YOUNG WIFE CAME OUT (AND->IN) SEARCHED FOR HIM AMONG THE SLAIN AND AT LAST SHE FOUND HIM AND ASKED WHETHER HE MIGHT BE HEALED +3538-163624-0003-1543: SO (HE ASKED->YES) THE QUEEN HOW DO YOU KNOW IN THE DARK OF NIGHT WHETHER THE HOURS ARE WEARING TO THE MORNING AND SHE SAID +3538-163624-0004-1544: THEN THE OLD MAN SAID DRIVE ALL THE HORSES INTO THE RIVER AND CHOOSE THE ONE THAT SWIMS ACROSS +3538-163624-0005-1545: HE (IS->HAS) NO BIGGER THAN OTHER DRAGONS SAID THE TUTOR AND IF (YOU WERE AS->YOURS) BRAVE AS YOUR FATHER YOU WOULD NOT FEAR HIM +3538-163624-0006-1546: THEN THE PERSON WHO HAD KILLED OTTER WENT DOWN AND CAUGHT THE DWARF WHO OWNED ALL THE TREASURE AND TOOK IT FROM HIM +3538-163624-0007-1547: ONLY ONE RING WAS LEFT WHICH THE DWARF WORE AND EVEN THAT WAS TAKEN FROM HIM +3538-163624-0008-1548: SO (REGIN->RIGAN) MADE A SWORD AND (SIGURD->CIGAR) TRIED IT WITH (A->THE) BLOW ON A LUMP OF IRON AND THE SWORD BROKE +3538-163624-0009-1549: THEN (SIGURD->CIGARET) WENT TO HIS MOTHER AND ASKED FOR THE BROKEN PIECES OF HIS FATHER'S BLADE AND GAVE THEM TO (REGIN->RIGAN) +3538-163624-0010-1550: SO (SIGURD->CIGARET) SAID THAT SWORD WOULD DO +3538-163624-0011-1551: THEN HE SAW THE TRACK WHICH THE DRAGON (*->HAD) MADE WHEN HE WENT TO A CLIFF TO DRINK AND THE TRACK WAS AS IF A GREAT RIVER HAD ROLLED ALONG AND LEFT A DEEP VALLEY +3538-163624-0012-1552: BUT (SIGURD->CIGARET) WAITED TILL HALF OF HIM HAD CRAWLED OVER THE PIT AND THEN HE THRUST THE SWORD (GRAM->GRAHAM) RIGHT INTO HIS VERY HEART +3538-163624-0013-1553: (SIGURD->CIGARET) SAID I WOULD TOUCH NONE OF IT IF BY LOSING IT I SHOULD NEVER DIE +3538-163624-0014-1554: BUT ALL MEN DIE AND NO BRAVE MAN (LETS->LET'S) DEATH FRIGHTEN HIM FROM HIS DESIRE +3538-163624-0015-1555: (DIE->GUY) THOU (FAFNIR->FAFNER) AND THEN (FAFNIR->STAFFNER) DIED +3538-163624-0016-1556: THEN SIGURD RODE BACK AND MET (REGIN->RIGAN) AND (REGIN->RIGAN) ASKED HIM TO ROAST (FAFNIR'S->FAFNER'S) HEART AND LET HIM TASTE OF IT +3538-163624-0017-1557: SO (SIGURD->SIR GOOD) PUT THE HEART OF (FAFNIR->FAFFNER) ON A STAKE AND ROASTED IT +3538-163624-0018-1558: (THERE IS SIGURD->THERE'S CIGARET) ROASTING (FAFNIR'S->FAFTENER'S) HEART FOR ANOTHER WHEN HE SHOULD TASTE OF IT HIMSELF AND LEARN ALL WISDOM +3538-163624-0019-1559: THAT LET HIM DO (AND->*) THEN RIDE OVER (HINDFELL->HINFELD) TO THE PLACE WHERE (BRYNHILD->BRINEHILL) SLEEPS +3538-163624-0020-1560: THERE MUST SHE SLEEP TILL THOU (COMEST->COMES) FOR HER WAKING RISE UP AND RIDE FOR NOW SURE SHE WILL SWEAR THE VOW FEARLESS OF BREAKING +3538-163624-0021-1561: THEN HE TOOK THE HELMET OFF THE HEAD OF THE SLEEPER AND BEHOLD SHE WAS A MOST BEAUTIFUL LADY +3538-163624-0022-1562: THEN (SIGURD->CIGARET) RODE AWAY AND HE CAME TO THE HOUSE OF A KING WHO HAD A FAIR DAUGHTER +3538-163624-0023-1563: (THEN BRYNHILD'S->WHEN BURNE HAD) FATHER TOLD (GUNNAR->GUNNER) THAT SHE WOULD MARRY NONE BUT HIM WHO COULD RIDE THE FLAME IN FRONT OF HER ENCHANTED TOWER AND (THITHER->THAT AS) THEY RODE AND (GUNNAR->GUTTER) SET HIS HORSE AT THE FLAME BUT HE WOULD NOT FACE IT +3538-163624-0024-1564: FOR ONE DAY WHEN (BRYNHILD->BURNEHILD) AND (GUDRUN->GUNDRON) WERE BATHING (BRYNHILD WADED->BURNE HELD WAITED) FARTHEST OUT INTO THE RIVER AND SAID SHE DID THAT TO SHOW SHE WAS (GUIRUN'S->GUNDER AND) SUPERIOR +3538-163624-0025-1565: FOR HER HUSBAND SHE SAID HAD RIDDEN THROUGH THE FLAME WHEN NO OTHER MAN DARED FACE IT +3538-163624-0026-1566: NOT LONG TO WAIT HE SAID TILL THE BITTER SWORD STANDS FAST IN MY HEART AND THOU (WILL->WILT) NOT LIVE LONG WHEN I AM DEAD +367-130732-0000-1466: LOBSTERS AND LOBSTERS +367-130732-0001-1467: WHEN (IS->AS) A LOBSTER NOT A LOBSTER WHEN IT IS A CRAYFISH +367-130732-0002-1468: THIS QUESTION (AND->IN) ANSWER MIGHT WELL GO INTO THE (PRIMER->PRIMARY) OF INFORMATION FOR (THOSE WHO COME TO SAN->LUCIKAM THE SENT) FRANCISCO FROM THE EAST FOR WHAT IS CALLED A (LOBSTER IN->LOBSTERN) SAN FRANCISCO IS NOT A (LOBSTER->LOBSURD) AT ALL BUT A CRAYFISH +367-130732-0003-1469: THE PACIFIC (CRAYFISH HOWEVER->CRATER SHOWER) SERVES EVERY PURPOSE AND WHILE MANY CONTEND THAT ITS MEAT IS NOT SO DELICATE (IN FLAVOR->AND FLARE) AS THAT OF ITS EASTERN COUSIN THE CALIFORNIAN (WILL AS->WALLA) STRENUOUSLY (INSIST THAT->INSISTS AND) IT IS BETTER BUT OF COURSE SOMETHING MUST ALWAYS BE ALLOWED FOR THE PATRIOTISM OF THE CALIFORNIAN +367-130732-0004-1470: A BOOK COULD BE WRITTEN ABOUT THIS RESTAURANT AND THEN ALL WOULD NOT BE TOLD FOR ALL ITS SECRETS CAN NEVER BE KNOWN +367-130732-0005-1471: IT WAS HERE THAT MOST MAGNIFICENT DINNERS WERE ARRANGED IT WAS HERE THAT EXTRAORDINARY DISHES WERE (*->CALLED) CONCOCTED BY (CHEFS->CHEFTS) OF (WORLD WIDE->WOOLWRIGHT) FAME IT WAS HERE THAT LOBSTER (A LA NEWBERG->ALENUBERG) REACHED ITS HIGHEST PERFECTION AND THIS IS THE RECIPE THAT WAS (FOLLOWED->FOLLOW) WHEN IT WAS PREPARED IN THE (DELMONICO->DEMONICO) +367-130732-0006-1472: LOBSTER (A LA->OLLA) NEWBERG +367-130732-0007-1473: ONE POUND OF (LOBSTER->LOBS TO) MEAT ONE TEASPOONFUL OF BUTTER ONE HALF PINT OF CREAM YOLKS OF FOUR EGGS ONE WINE GLASS OF SHERRY LOBSTER FAT +367-130732-0008-1474: PUT THIS IN A DOUBLE (BOILER->WHIRLER) AND LET COOK UNTIL THICK STIRRING CONSTANTLY +367-130732-0009-1475: SERVE IN A CHAFING DISH WITH (THIN->FLITTON) SLICES OF DRY TOAST +367-130732-0010-1476: KING OF (SHELL FISH->SHELLFISH) +367-130732-0011-1477: ONE HAS TO COME TO (SAN->SENT) FRANCISCO TO PARTAKE OF THE KING OF (SHELL FISH->SHELLFISH) THE MAMMOTH PACIFIC CRAB +367-130732-0012-1478: I SAY COME TO SAN FRANCISCO ADVISEDLY FOR WHILE THE CRAB IS FOUND ALL ALONG THE COAST IT IS PREPARED NOWHERE SO DELICIOUSLY AS IN SAN FRANCISCO +367-130732-0013-1479: (GOBEY'S PASSED->GOBY'S PASS) WITH THE FIRE AND THE LITTLE RESTAURANT BEARING HIS NAME AND IN CHARGE OF HIS WIDOW IN UNION SQUARE AVENUE HAS NOT ATTAINED THE FAME OF THE OLD PLACE +367-130732-0014-1480: IT IS POSSIBLE THAT SHE KNOWS THE SECRET OF PREPARING CRAB AS IT WAS PREPARED IN THE (GOBEY'S->GOBIES) OF BEFORE THE FIRE BUT HIS PRESTIGE (DID->HAD) NOT DESCEND TO HER +367-130732-0015-1481: (GOBEY'S CRAB STEW->GOBIAS CRABS DO) +367-130732-0016-1482: TAKE THE MEAT OF ONE LARGE CRAB SCRAPING OUT ALL (OF->*) THE (FAT->BAT) FROM THE SHELL +367-130732-0017-1483: SOAK THE CRAB (MEAT->ME) IN THE SHERRY TWO HOURS BEFORE COOKING +367-130732-0018-1484: CHOP FINE THE ONION (SWEET->SWEEP) PEPPER (AND TOMATO->INTOMATO) WITH THE ROSEMARY +367-130732-0019-1485: (HEAT THIS->HEATLESS) IN A (STEWPAN->STEWPENT) AND WHEN SIMMERING ADD THE SHERRY AND CRAB (MEAT->ME) AND LET ALL COOK TOGETHER WITH (A->THE) SLOW FIRE FOR EIGHT MINUTES +367-130732-0020-1486: SERVE IN (A->THE) CHAFING DISH WITH TOASTED CRACKERS OR THIN SLICES OF TOASTED BREAD +367-130732-0021-1487: LOBSTER IN MINIATURE +367-130732-0022-1488: SO FAR IT HAS BEEN USED MOSTLY FOR GARNISHMENT OF OTHER DISHES AND IT IS ONLY RECENTLY THAT THE (HOF BRAU->WHOLE BROW) HAS BEEN MAKING (A SPECIALTY->ESPECIALTY) OF THEM +367-130732-0023-1489: ALL (OF->*) THE BETTER CLASS RESTAURANTS HOWEVER WILL SERVE THEM IF YOU ORDER THEM +367-130732-0024-1490: THIS IS THE RECIPE FOR EIGHT PEOPLE AND IT IS WELL (*->IT) WORTH TRYING IF YOU ARE GIVING A DINNER OF IMPORTANCE +367-130732-0025-1491: (BISQUE->DISK) OF CRAWFISH +367-130732-0026-1492: TAKE THIRTY CRAWFISH FROM WHICH REMOVE THE GUT CONTAINING THE GALL IN THE FOLLOWING MANNER TAKE FIRM HOLD OF THE CRAWFISH WITH THE LEFT HAND SO AS TO AVOID BEING PINCHED BY ITS (CLAWS->CLOTHS) WITH THE THUMB AND FOREFINGER OF THE RIGHT HAND PINCH THE EXTREME END OF THE CENTRAL FIN OF THE TAIL AND WITH A SUDDEN JERK THE GUT WILL BE WITHDRAWN +367-130732-0027-1493: (MINCE OR->MINSER) CUT INTO SMALL DICE A CARROT (AN->AND) ONION ONE HEAD OF CELERY AND A FEW PARSLEY ROOTS AND TO THESE (ADD->AT) A BAY LEAF A SPRIG OF THYME A LITTLE (MINIONETTE->MINOR NUT) PEPPER AND TWO (OUNCES->OUNCE) OF BUTTER +367-130732-0028-1494: PUT THESE INGREDIENTS INTO A STEWPAN AND FRY THEM TEN MINUTES THEN THROW IN THE (CRAWFISH->CROPPISH) AND POUR ON THEM HALF A BOTTLE OF FRENCH WHITE WINE +367-130732-0029-1495: ALLOW (THIS->US) TO BOIL AND THEN ADD A QUART OF STRONG (CONSOMME->CONSUM) AND LET ALL CONTINUE BOILING FOR HALF AN HOUR +367-130732-0030-1496: PICK OUT THE CRAWFISH AND STRAIN THE BROTH THROUGH A NAPKIN BY PRESSURE INTO A BASIN IN ORDER TO EXTRACT ALL THE ESSENCE FROM THE VEGETABLES +367-130732-0031-1497: PICK THE SHELLS (OFF->OF) TWENTY FIVE OF THE CRAWFISH TAILS TRIM THEM NEATLY AND SET THEM ASIDE UNTIL (WANTED->WANTON) +367-130732-0032-1498: RESERVE SOME OF THE SPAWN ALSO (HALF OF->HAPPENED) THE BODY SHELLS WITH WHICH TO MAKE THE (CRAWFISH->COFFISH) BUTTER TO FINISH THE SOUP +367-130732-0033-1499: THIS BUTTER IS MADE AS FOLLOWS PLACE THE SHELLS (ON->IN) A BAKING SHEET IN THE OVEN TO DRY LET THE SHELLS COOL AND THEN POUND THEM IN A MORTAR WITH A LITTLE LOBSTER (CORAL->COAL) AND FOUR OUNCES OF FRESH BUTTER THOROUGHLY BRUISING THE WHOLE TOGETHER SO AS TO MAKE A FINE PASTE +367-293981-0000-1445: I SWEAR (IT->*) ANSWERED SANCHO +367-293981-0001-1446: I SAY SO CONTINUED DON QUIXOTE BECAUSE I HATE TAKING AWAY (ANYONE'S->ANY ONE'S) GOOD NAME +367-293981-0002-1447: I SAY REPLIED SANCHO THAT I SWEAR TO HOLD MY TONGUE ABOUT IT TILL THE END OF YOUR (WORSHIP'S DAYS->WORSHIP STAYS) AND (GOD->GONE) GRANT I MAY BE ABLE TO LET IT OUT (TOMORROW->TO MORROW) +367-293981-0003-1448: THOUGH YOUR WORSHIP WAS NOT SO BADLY OFF HAVING IN YOUR ARMS (THAT INCOMPARABLE->THE INN COMPARABLE) BEAUTY YOU SPOKE OF BUT I WHAT DID I HAVE EXCEPT THE HEAVIEST (WHACKS->WAX THAT) I THINK I HAD IN ALL MY LIFE +367-293981-0004-1449: UNLUCKY ME AND THE MOTHER THAT BORE ME +367-293981-0005-1450: DIDN'T I SAY SO WORSE LUCK TO MY LINE SAID SANCHO +367-293981-0006-1451: IT CANNOT BE THE (MOOR->MORE) ANSWERED DON QUIXOTE FOR THOSE UNDER ENCHANTMENT DO NOT LET THEMSELVES BE SEEN BY ANYONE +367-293981-0007-1452: IF THEY (DON'T->DO NOT) LET THEMSELVES BE SEEN THEY LET THEMSELVES BE FELT SAID SANCHO IF NOT LET MY (SHOULDERS->SHOULDER) SPEAK TO THE POINT +367-293981-0008-1453: (MINE COULD->MIKE HAD) SPEAK TOO SAID DON QUIXOTE BUT THAT IS NOT A (SUFFICIENT->SUSPICIENT) REASON FOR BELIEVING THAT WHAT WE SEE IS THE ENCHANTED MOOR +367-293981-0009-1454: THE (OFFICER->OFFICERS) TURNED (TO HIM->ROOM) AND SAID WELL HOW GOES (IT->A) GOOD MAN +367-293981-0010-1455: SANCHO GOT UP WITH PAIN ENOUGH IN HIS BONES AND WENT (AFTER->OUT TO) THE INNKEEPER IN THE DARK (AND->IN) MEETING THE OFFICER WHO WAS LOOKING TO SEE WHAT HAD BECOME OF HIS ENEMY HE SAID TO HIM (SENOR->SIGNOR) WHOEVER YOU ARE DO US THE FAVOUR AND KINDNESS TO GIVE US A LITTLE ROSEMARY OIL SALT AND (WINE->WHITE) FOR IT IS (WANTED->WATER) TO CURE ONE OF (THE->OUR) BEST KNIGHTS ERRANT ON EARTH WHO LIES ON YONDER BED WOUNDED BY THE HANDS OF THE ENCHANTED MOOR THAT IS IN THIS INN +367-293981-0011-1456: TO BE BRIEF HE TOOK THE (MATERIALS->MATERIORS) OF WHICH HE MADE A COMPOUND MIXING THEM ALL (AND->*) BOILING THEM A GOOD WHILE (*->IT) UNTIL IT SEEMED TO HIM THEY HAD COME TO PERFECTION +367-293981-0012-1457: SANCHO PANZA WHO ALSO REGARDED THE AMENDMENT OF HIS MASTER AS MIRACULOUS BEGGED HIM TO GIVE HIM WHAT WAS LEFT IN THE (PIGSKIN->PICTION) WHICH WAS NO SMALL QUANTITY +367-293981-0013-1458: DON QUIXOTE CONSENTED AND HE TAKING IT WITH BOTH HANDS IN GOOD FAITH AND WITH A BETTER WILL GULPED (*->IT) DOWN AND DRAINED OFF VERY LITTLE LESS THAN HIS MASTER +367-293981-0014-1459: IF YOUR WORSHIP KNEW THAT RETURNED SANCHO (WOE->WE'LL) BETIDE ME (AND->IN) ALL MY KINDRED WHY DID YOU LET ME TASTE (IT->HIM) +367-293981-0015-1460: SEARCH YOUR MEMORY AND IF YOU FIND ANYTHING OF THIS KIND YOU NEED ONLY TELL ME OF IT AND I PROMISE YOU BY THE ORDER OF KNIGHTHOOD WHICH I HAVE RECEIVED TO PROCURE YOU SATISFACTION (AND->IN) REPARATION TO THE UTMOST OF YOUR DESIRE +367-293981-0016-1461: THEN THIS IS AN (INN->IN) SAID DON QUIXOTE +367-293981-0017-1462: (AND->IN) A VERY RESPECTABLE ONE SAID THE INNKEEPER +367-293981-0018-1463: THE CRIES OF THE POOR (BLANKETED->BLANKET) WRETCH WERE SO LOUD THAT THEY REACHED THE EARS OF HIS MASTER WHO HALTING TO LISTEN ATTENTIVELY WAS PERSUADED THAT SOME NEW ADVENTURE WAS COMING UNTIL HE CLEARLY PERCEIVED THAT IT WAS HIS SQUIRE WHO UTTERED THEM +367-293981-0019-1464: HE SAW HIM RISING AND FALLING IN THE AIR WITH SUCH GRACE AND NIMBLENESS THAT HAD HIS RAGE ALLOWED HIM IT IS MY BELIEF HE WOULD HAVE LAUGHED +367-293981-0020-1465: SANCHO TOOK IT AND AS HE WAS RAISING IT TO HIS MOUTH HE WAS STOPPED BY THE CRIES OF HIS MASTER EXCLAIMING SANCHO MY SON DRINK NOT WATER DRINK IT (NOT->OUT) MY SON FOR IT WILL KILL THEE SEE HERE I (HAVE->HAD) THE BLESSED BALSAM AND HE HELD UP THE FLASK OF LIQUOR AND WITH DRINKING TWO DROPS (OF IT->WHAT) THOU WILT CERTAINLY BE RESTORED +3764-168670-0000-1666: THE STRIDES OF A LAME MAN (ARE->*) LIKE THE OGLING GLANCES OF A ONE EYED MAN THEY DO NOT REACH THEIR GOAL VERY PROMPTLY +3764-168670-0001-1667: COSETTE HAD WAKED UP +3764-168670-0002-1668: JEAN VALJEAN HAD PLACED HER NEAR THE FIRE +3764-168670-0003-1669: YOU WILL WAIT FOR ME AT A LADY'S HOUSE I SHALL COME TO FETCH YOU +3764-168670-0004-1670: EVERYTHING IS ARRANGED AND NOTHING IS SAID FAUCHELEVENT +3764-168670-0005-1671: I HAVE PERMISSION TO BRING YOU IN BUT BEFORE BRINGING YOU IN YOU MUST BE GOT OUT +3764-168670-0006-1672: THAT'S WHERE THE DIFFICULTY LIES +3764-168670-0007-1673: IT IS EASY ENOUGH WITH THE CHILD YOU WILL CARRY HER OUT +3764-168670-0008-1674: AND SHE WILL HOLD HER TONGUE I ANSWER FOR THAT +3764-168670-0009-1675: (FAUCHELEVENT->FOR SCHLEVENT) GRUMBLED MORE TO HIMSELF THAN TO JEAN VALJEAN +3764-168670-0010-1676: YOU UNDERSTAND FATHER MADELEINE THE GOVERNMENT WILL NOTICE IT +3764-168670-0011-1677: JEAN VALJEAN STARED HIM STRAIGHT IN THE EYE AND THOUGHT THAT HE WAS RAVING +3764-168670-0012-1678: (FAUCHELEVENT->FOUCHELEVENT) WENT ON +3764-168670-0013-1679: IT IS TO MORROW THAT I AM TO BRING YOU IN THE (PRIORESS->PRIORS) EXPECTS YOU +3764-168670-0014-1680: THEN HE EXPLAINED TO JEAN VALJEAN THAT THIS WAS HIS RECOMPENSE FOR A SERVICE WHICH HE (FAUCHELEVENT->FOUCHELEVENT) WAS (TO RENDER->SURRENDER) TO THE COMMUNITY +3764-168670-0015-1681: THAT THE NUN WHO HAD DIED THAT MORNING HAD REQUESTED TO BE BURIED IN THE COFFIN WHICH HAD SERVED HER FOR A BED AND INTERRED IN THE VAULT UNDER THE ALTAR OF THE CHAPEL +3764-168670-0016-1682: THAT THE (PRIORESS->PRIORS) AND THE VOCAL MOTHERS INTENDED TO FULFIL THE WISH OF THE DECEASED +3764-168670-0017-1683: THAT HE (FAUCHELEVENT->FOR SCHLEVENT) WAS TO NAIL UP THE COFFIN IN THE CELL RAISE THE STONE IN THE CHAPEL AND (LOWER->BLOW) THE CORPSE INTO THE VAULT +3764-168670-0018-1684: AND THEN THAT THERE WAS ANOTHER THE EMPTY COFFIN +3764-168670-0019-1685: WHAT IS THAT EMPTY COFFIN +3764-168670-0020-1686: ASKED JEAN VALJEAN FAUCHELEVENT REPLIED +3764-168670-0021-1687: WHAT COFFIN WHAT ADMINISTRATION +3764-168670-0022-1688: (FAUCHELEVENT->SO SLAVENT) WHO WAS SEATED SPRANG UP AS THOUGH A BOMB HAD BURST UNDER HIS CHAIR YOU +3764-168670-0023-1689: YOU KNOW (FAUCHELEVENT->FOURCHELEVENT) WHAT YOU HAVE SAID MOTHER CRUCIFIXION IS DEAD +3764-168670-0024-1690: AND I ADD AND FATHER MADELEINE IS BURIED AH +3764-168670-0025-1691: YOU ARE NOT LIKE OTHER MEN FATHER MADELEINE +3764-168670-0026-1692: THIS OFFERS THE MEANS BUT GIVE ME SOME INFORMATION IN THE FIRST PLACE +3764-168670-0027-1693: HOW LONG IS THE COFFIN SIX FEET +3764-168670-0028-1694: IT IS A CHAMBER ON THE GROUND FLOOR WHICH HAS A GRATED WINDOW OPENING ON THE GARDEN WHICH IS CLOSED ON THE OUTSIDE BY A SHUTTER AND TWO DOORS ONE LEADS INTO THE CONVENT THE OTHER INTO THE CHURCH (WHAT CHURCH->A WATCH) +3764-168670-0029-1695: THE CHURCH IN THE STREET (*->AT) THE CHURCH WHICH ANY ONE CAN ENTER +3764-168670-0030-1696: HAVE YOU THE KEYS TO THOSE TWO DOORS +3764-168670-0031-1697: (*->AND) NO I HAVE THE KEY TO THE DOOR WHICH COMMUNICATES WITH THE CONVENT THE PORTER HAS THE KEY TO THE DOOR WHICH COMMUNICATES WITH THE CHURCH +3764-168670-0032-1698: ONLY TO ALLOW THE UNDERTAKER'S MEN TO ENTER WHEN THEY COME TO GET THE COFFIN +3764-168670-0033-1699: WHO NAILS UP THE COFFIN I DO +3764-168670-0034-1700: WHO SPREADS THE (PALL->POOL) OVER IT +3764-168670-0035-1701: NOT ANOTHER MAN EXCEPT THE POLICE DOCTOR CAN ENTER THE (DEAD ROOM->DEDUREUM) THAT IS EVEN WRITTEN ON THE WALL +3764-168670-0036-1702: COULD YOU HIDE ME IN THAT ROOM TO NIGHT WHEN EVERY ONE IS ASLEEP +3764-168670-0037-1703: ABOUT THREE O'CLOCK IN THE AFTERNOON +3764-168670-0038-1704: I SHALL BE HUNGRY I WILL BRING YOU SOMETHING +3764-168670-0039-1705: YOU CAN COME AND NAIL ME UP IN THE COFFIN AT TWO O'CLOCK +3764-168670-0040-1706: (FAUCHELEVENT->FUCHELEVENT) RECOILED AND CRACKED HIS FINGER JOINTS BUT THAT IS IMPOSSIBLE +3764-168670-0041-1707: BAH IMPOSSIBLE TO TAKE A HAMMER AND DRIVE SOME NAILS IN A PLANK +3764-168670-0042-1708: JEAN VALJEAN HAD BEEN IN WORSE STRAITS THAN THIS +3764-168670-0043-1709: ANY MAN WHO HAS BEEN A PRISONER UNDERSTANDS HOW TO CONTRACT HIMSELF TO FIT THE DIAMETER OF THE ESCAPE +3764-168670-0044-1710: WHAT DOES NOT A MAN UNDERGO FOR THE SAKE OF A CURE +3764-168670-0045-1711: TO HAVE HIMSELF NAILED UP IN A CASE AND CARRIED OFF LIKE A (BALE->BAIL) OF GOODS TO LIVE FOR A LONG TIME IN A BOX TO FIND AIR WHERE THERE IS NONE TO ECONOMIZE HIS BREATH FOR HOURS TO KNOW HOW TO STIFLE WITHOUT DYING THIS WAS ONE OF JEAN VALJEAN'S GLOOMY TALENTS +3764-168670-0046-1712: YOU SURELY MUST HAVE A GIMLET YOU WILL MAKE A FEW HOLES HERE AND THERE AROUND MY MOUTH AND YOU WILL NAIL THE TOP PLANK ON LOOSELY GOOD AND WHAT IF YOU SHOULD HAPPEN TO COUGH OR TO SNEEZE +3764-168670-0047-1713: A MAN WHO IS MAKING HIS ESCAPE DOES NOT COUGH OR SNEEZE +3764-168670-0048-1714: WHO IS THERE WHO HAS NOT SAID TO A CAT DO COME IN +3764-168670-0049-1715: THE OVER PRUDENT (CATS->COUNTS) AS THEY ARE AND BECAUSE THEY ARE CATS SOMETIMES INCUR MORE DANGER THAN THE AUDACIOUS +3764-168670-0050-1716: BUT JEAN VALJEAN'S COOLNESS PREVAILED OVER HIM IN SPITE OF HIMSELF HE GRUMBLED +3764-168670-0051-1717: IF YOU ARE SURE OF COMING OUT OF THE COFFIN ALL RIGHT I AM SURE OF GETTING (YOU->*) OUT OF THE GRAVE +3764-168670-0052-1718: AN OLD FELLOW OF THE OLD SCHOOL THE GRAVE DIGGER PUTS THE CORPSES IN THE GRAVE AND I PUT THE GRAVE DIGGER IN MY POCKET +3764-168670-0053-1719: I SHALL FOLLOW THAT IS MY BUSINESS +3764-168670-0054-1720: THE (HEARSE->HOUSE) HALTS THE (UNDERTAKER'S->UNDERTAKERS) MEN (KNOT->NOT) A ROPE AROUND YOUR COFFIN AND LOWER YOU DOWN +3764-168670-0055-1721: THE (PRIEST SAYS->PRIESTS AS) THE PRAYERS MAKES THE SIGN OF THE CROSS SPRINKLES THE HOLY WATER AND TAKES HIS DEPARTURE +3764-168670-0056-1722: ONE OF TWO THINGS WILL HAPPEN HE WILL EITHER BE SOBER OR HE WILL NOT BE SOBER +3764-168670-0057-1723: THAT IS SETTLED FATHER FAUCHELEVENT ALL WILL GO WELL +3764-168671-0000-1724: ON THE FOLLOWING DAY AS THE SUN WAS DECLINING THE VERY RARE (PASSERS->PASSES) BY ON THE BOULEVARD (DU MAINE->DUMEN) PULLED OFF THEIR HATS TO AN OLD FASHIONED HEARSE ORNAMENTED WITH SKULLS CROSS BONES AND TEARS +3764-168671-0001-1725: THIS HEARSE CONTAINED A COFFIN COVERED WITH A WHITE CLOTH OVER WHICH SPREAD A LARGE BLACK CROSS LIKE A HUGE CORPSE WITH DROOPING ARMS +3764-168671-0002-1726: (A MOURNING->THE MORNING) COACH IN WHICH COULD BE SEEN A PRIEST IN HIS SURPLICE AND A CHOIR BOY IN HIS RED CAP FOLLOWED +3764-168671-0003-1727: BEHIND IT CAME AN OLD MAN IN THE GARMENTS OF A LABORER WHO LIMPED ALONG +3764-168671-0004-1728: THE GRAVE DIGGERS BEING THUS BOUND TO SERVICE IN THE EVENING IN SUMMER AND AT NIGHT IN WINTER IN THIS CEMETERY THEY WERE SUBJECTED TO A SPECIAL DISCIPLINE +3764-168671-0005-1729: THESE GATES THEREFORE SWUNG INEXORABLY ON THEIR HINGES AT THE INSTANT WHEN THE SUN DISAPPEARED BEHIND THE DOME OF THE (INVALIDES->INVALID) +3764-168671-0006-1730: DAMPNESS WAS INVADING IT THE FLOWERS WERE DESERTING IT +3764-168671-0007-1731: THE BOURGEOIS DID NOT CARE MUCH ABOUT BEING BURIED IN THE (VAUGIRARD->VIGORE) IT HINTED AT POVERTY (PERE LACHAISE->BAT LACHES) IF YOU PLEASE +3764-168671-0008-1732: TO BE BURIED IN (PERE LACHAISE->PEGLASHES) IS EQUIVALENT TO HAVING FURNITURE OF MAHOGANY IT IS RECOGNIZED AS ELEGANT +3764-168671-0009-1733: THE INTERMENT OF MOTHER CRUCIFIXION IN THE VAULT UNDER THE ALTAR THE EXIT OF COSETTE THE INTRODUCTION OF JEAN VALJEAN (TO->INTO) THE DEAD ROOM ALL HAD BEEN EXECUTED WITHOUT DIFFICULTY AND THERE HAD BEEN NO HITCH LET US REMARK IN PASSING THAT THE BURIAL OF MOTHER CRUCIFIXION UNDER THE ALTAR OF THE CONVENT IS A PERFECTLY VENIAL OFFENCE IN OUR SIGHT +3764-168671-0010-1734: IT IS ONE OF THE FAULTS WHICH RESEMBLE A DUTY +3764-168671-0011-1735: THE NUNS HAD COMMITTED IT NOT ONLY WITHOUT DIFFICULTY BUT EVEN WITH THE APPLAUSE OF THEIR OWN CONSCIENCES +3764-168671-0012-1736: IN THE CLOISTER WHAT IS CALLED THE GOVERNMENT IS ONLY AN INTERMEDDLING WITH AUTHORITY AN INTERFERENCE WHICH IS ALWAYS QUESTIONABLE +3764-168671-0013-1737: MAKE AS MANY LAWS AS YOU PLEASE MEN BUT KEEP THEM FOR YOURSELVES +3764-168671-0014-1738: A PRINCE IS NOTHING IN THE PRESENCE OF A PRINCIPLE +3764-168671-0015-1739: (FAUCHELEVENT->FLUCHELEVENT) LIMPED ALONG BEHIND THE HEARSE IN A VERY CONTENTED FRAME OF MIND +3764-168671-0016-1740: JEAN VALJEAN'S COMPOSURE WAS ONE OF THOSE POWERFUL TRANQUILLITIES WHICH ARE CONTAGIOUS +3764-168671-0017-1741: WHAT REMAINED TO BE DONE WAS A MERE NOTHING +3764-168671-0018-1742: HE PLAYED WITH FATHER (MESTIENNE->MESTINE) +3764-168671-0019-1743: HE DID WHAT HE LIKED WITH HIM HE MADE HIM DANCE ACCORDING TO HIS WHIM +3764-168671-0020-1744: THE PERMISSION FOR INTERMENT MUST BE EXHIBITED +3764-168671-0021-1745: HE WAS A SORT OF LABORING MAN WHO WORE A WAISTCOAT WITH LARGE POCKETS AND CARRIED A MATTOCK UNDER HIS ARM +3764-168671-0022-1746: THE MAN REPLIED THE GRAVE DIGGER +3764-168671-0023-1747: THE GRAVE DIGGER YES +3764-168671-0024-1748: YOU I +3764-168671-0025-1749: FATHER (MESTIENNE->MISCHIENNE) IS THE GRAVE DIGGER HE WAS +3764-168671-0026-1750: (FAUCHELEVENT->FUSSION OF WHAT) HAD EXPECTED ANYTHING BUT THIS THAT A GRAVE DIGGER COULD DIE +3764-168671-0027-1751: IT IS TRUE NEVERTHELESS THAT GRAVE DIGGERS DO DIE THEMSELVES +3764-168671-0028-1752: HE HAD HARDLY THE STRENGTH TO STAMMER +3764-168671-0029-1753: BUT HE PERSISTED FEEBLY FATHER (MESTIENNE->MISSED HERE) IS THE GRAVE DIGGER +3764-168671-0030-1754: DO YOU KNOW WHO LITTLE (FATHER LENOIR IS->FATHERLAND WARRITZ) HE IS A JUG OF RED WINE +3764-168671-0031-1755: BUT (YOU ARE->YOU'RE) A JOLLY FELLOW TOO +3764-168671-0032-1756: ARE YOU NOT COMRADE WE'LL GO AND HAVE A DRINK TOGETHER PRESENTLY +3764-168671-0033-1757: THE MAN REPLIED +3764-168671-0034-1758: HE LIMPED MORE OUT OF ANXIETY THAN FROM INFIRMITY +3764-168671-0035-1759: THE GRAVE DIGGER WALKED ON IN FRONT OF HIM +3764-168671-0036-1760: FAUCHELEVENT PASSED THE UNEXPECTED (GRIBIER->CLAVIER) ONCE MORE IN REVIEW +3764-168671-0037-1761: (FAUCHELEVENT->FASHIONEVENT) WHO WAS ILLITERATE BUT VERY SHARP UNDERSTOOD THAT HE HAD TO DEAL WITH A FORMIDABLE SPECIES OF MAN WITH A FINE TALKER HE MUTTERED +3764-168671-0038-1762: (SO->MISS OH) FATHER (MESTIENNE->MESTINE) IS DEAD +3764-168671-0039-1763: THE MAN REPLIED COMPLETELY +3764-168671-0040-1764: THE GOOD GOD CONSULTED HIS NOTE BOOK WHICH (SHOWS->SHARES) WHEN THE TIME IS UP IT WAS FATHER (MESTIENNE'S->MESTINE'S) TURN FATHER (MESTIENNE->MISS HE HAD) DIED +3764-168671-0041-1765: STAMMERED FAUCHELEVENT IT IS MADE +3764-168671-0042-1766: YOU ARE A PEASANT I AM A PARISIAN +3764-168671-0043-1767: (FAUCHELEVENT->FOR CHAUVELT) THOUGHT I AM LOST +3764-168671-0044-1768: THEY WERE ONLY A FEW TURNS OF THE WHEEL DISTANT FROM THE SMALL ALLEY LEADING TO THE (NUNS->NUN'S) CORNER +3764-168671-0045-1769: AND HE ADDED WITH THE SATISFACTION OF A SERIOUS MAN WHO IS TURNING A PHRASE WELL +3764-168671-0046-1770: FORTUNATELY THE SOIL WHICH WAS LIGHT AND WET WITH THE WINTER RAINS CLOGGED THE WHEELS AND RETARDED ITS SPEED +3764-168671-0047-1771: MY FATHER WAS A PORTER AT THE (PRYTANEUM->BRITTANNIUM) TOWN HALL +3764-168671-0048-1772: BUT HE HAD REVERSES HE HAD LOSSES (ON CHANGE->UNCHANGED) I WAS OBLIGED TO RENOUNCE THE PROFESSION OF AUTHOR BUT I AM STILL A PUBLIC WRITER +3764-168671-0049-1773: (SO->BUT SIR) YOU ARE NOT A GRAVE DIGGER THEN +3764-168671-0050-1774: RETURNED FAUCHELEVENT CLUTCHING AT THIS BRANCH FEEBLE AS IT WAS +3764-168671-0051-1775: HERE A REMARK BECOMES NECESSARY +3764-168671-0052-1776: (*->A) FAUCHELEVENT WHATEVER HIS ANGUISH OFFERED A DRINK BUT HE DID NOT EXPLAIN HIMSELF ON ONE POINT WHO WAS TO PAY +3764-168671-0053-1777: THE GRAVE DIGGER WENT ON WITH (A->THE) SUPERIOR SMILE +3764-168671-0054-1778: ONE MUST EAT +3997-180294-0000-1800: THE DUKE COMES EVERY MORNING THEY WILL TELL HIM WHEN HE COMES THAT I AM ASLEEP AND PERHAPS HE WILL WAIT UNTIL I (WAKE->AWAKE) +3997-180294-0001-1801: YES BUT IF I SHOULD ALREADY ASK FOR SOMETHING WHAT +3997-180294-0002-1802: WELL DO IT FOR ME FOR I SWEAR TO YOU (THAT I->THY) DON'T LOVE YOU AS THE OTHERS HAVE LOVED YOU +3997-180294-0003-1803: THERE ARE BOLTS (ON->IN) THE DOOR WRETCH +3997-180294-0004-1804: I DON'T KNOW HOW IT IS BUT IT SEEMS TO ME AS IF I DO +3997-180294-0005-1805: (NOW->THOU) GO I CAN'T KEEP MY EYES OPEN +3997-180294-0006-1806: IT (SEEMED->SEEMS) TO ME AS IF THIS SLEEPING CITY (BELONGED->BELONGS) TO ME I SEARCHED MY MEMORY FOR THE NAMES OF THOSE WHOSE HAPPINESS I HAD ONCE ENVIED AND I COULD NOT RECALL ONE WITHOUT FINDING MYSELF THE HAPPIER +3997-180294-0007-1807: EDUCATION FAMILY FEELING THE SENSE OF DUTY THE FAMILY ARE STRONG SENTINELS BUT THERE ARE NO SENTINELS SO VIGILANT AS NOT TO BE DECEIVED BY A GIRL OF SIXTEEN TO WHOM NATURE BY THE VOICE OF THE MAN SHE LOVES GIVES THE FIRST (COUNSELS->COUNCIL) OF LOVE ALL THE MORE (ARDENT->ARDENTS) BECAUSE THEY SEEM SO PURE +3997-180294-0008-1808: THE MORE (A->*) GIRL BELIEVES IN GOODNESS THE MORE (EASILY->IS WE) WILL SHE GIVE WAY IF NOT TO HER LOVER AT LEAST TO LOVE FOR BEING WITHOUT MISTRUST SHE IS WITHOUT FORCE AND TO WIN HER LOVE (IS->AS) A TRIUMPH THAT CAN BE GAINED BY ANY YOUNG MAN OF FIVE AND TWENTY SEE HOW YOUNG GIRLS ARE WATCHED AND GUARDED +3997-180294-0009-1809: THEN HOW SURELY MUST THEY DESIRE THE WORLD WHICH IS HIDDEN FROM THEM (HOW->HAS) SURELY MUST THEY FIND IT TEMPTING HOW SURELY MUST THEY LISTEN TO THE FIRST VOICE WHICH COMES TO TELL ITS SECRETS THROUGH THEIR BARS AND BLESS THE HAND WHICH IS THE FIRST TO RAISE A CORNER OF THE (MYSTERIOUS->MYSTERY) VEIL +3997-180294-0010-1810: WITH THEM THE BODY HAS WORN OUT THE SOUL THE SENSES (HAVE->HALF) BURNED UP THE HEART DISSIPATION HAS BLUNTED THE FEELINGS +3997-180294-0011-1811: THEY (LOVE->LOVED) BY PROFESSION AND NOT BY INSTINCT +3997-180294-0012-1812: WHEN A CREATURE WHO HAS ALL HER PAST TO REPROACH HERSELF WITH IS TAKEN ALL AT ONCE BY A PROFOUND SINCERE IRRESISTIBLE LOVE OF WHICH SHE HAD NEVER FELT HERSELF CAPABLE WHEN SHE HAS CONFESSED HER LOVE HOW ABSOLUTELY THE MAN WHOM SHE LOVES DOMINATES HER +3997-180294-0013-1813: THEY KNOW NOT WHAT PROOF TO GIVE +3997-180294-0014-1814: IN ORDER TO DISTURB THE (LABOURERS->LABORERS) IN THE (FIELD->FIELDS) WAS ONE DAY DEVOURED BY A WOLF BECAUSE THOSE WHOM HE HAD SO OFTEN DECEIVED NO LONGER BELIEVED IN HIS CRIES FOR HELP +3997-180294-0015-1815: (IT->THIS) IS THE SAME WITH THESE UNHAPPY WOMEN WHEN THEY LOVE SERIOUSLY +3997-180294-0016-1816: BUT WHEN THE MAN WHO INSPIRES THIS REDEEMING LOVE IS GREAT ENOUGH IN SOUL TO RECEIVE IT WITHOUT REMEMBERING THE PAST WHEN HE GIVES HIMSELF UP TO IT WHEN IN SHORT HE LOVES AS HE IS LOVED THIS MAN (DRAINS->DREAMS) AT ONE (DRAUGHT->DROUGHT) ALL EARTHLY EMOTIONS AND AFTER SUCH A LOVE HIS HEART WILL BE CLOSED TO EVERY OTHER +3997-180294-0017-1817: BUT TO RETURN TO THE FIRST DAY OF MY (LIAISON->LEAR SONG) +3997-180294-0018-1818: WHEN I REACHED HOME I WAS IN A STATE OF MAD GAIETY +3997-180294-0019-1819: THE WOMAN BECOMES THE MAN'S MISTRESS AND LOVES HIM +3997-180294-0020-1820: HOW WHY +3997-180294-0021-1821: MY WHOLE BEING WAS EXALTED INTO JOY AT THE MEMORY OF THE WORDS WE HAD EXCHANGED DURING THAT FIRST NIGHT +3997-180294-0022-1822: HERE ARE MY ORDERS TO NIGHT AT (THE->A) VAUDEVILLE +3997-180294-0023-1823: (COME->CALM) DURING THE THIRD (ENTR'ACTE->AND TRACT) +3997-180294-0024-1824: THE BOXES FILLED ONE AFTER ANOTHER +3997-180294-0025-1825: ONLY ONE (REMAINED->REMAINS) EMPTY THE STAGE BOX +3997-180294-0026-1826: AT THE BEGINNING OF THE THIRD ACT I HEARD THE DOOR OF THE BOX ON WHICH MY EYES HAD BEEN ALMOST CONSTANTLY FIXED OPEN AND MARGUERITE APPEARED +3997-180294-0027-1827: (DID->THAT) SHE (LOVE->LOVED) ME ENOUGH TO BELIEVE THAT THE MORE BEAUTIFUL SHE (LOOKED->LOOKS) THE HAPPIER I SHOULD BE +3997-180294-0028-1828: WHAT IS THE MATTER WITH YOU TO NIGHT SAID MARGUERITE RISING AND COMING TO THE BACK OF THE BOX AND KISSING ME ON THE FOREHEAD +3997-180294-0029-1829: YOU SHOULD GO TO BED SHE REPLIED WITH THAT (IRONICAL->IRONIC) AIR WHICH WENT SO WELL WITH HER DELICATE AND WITTY FACE +3997-180294-0030-1830: WHERE AT HOME +3997-180294-0031-1831: YOU STILL LOVE ME CAN YOU ASK +3997-180294-0032-1832: BECAUSE YOU DON'T LIKE SEEING HIM +3997-180294-0033-1833: (NONETHELESS->NONE THE LESS) I WAS VERY UNHAPPY ALL THE REST OF THE EVENING AND WENT AWAY VERY SADLY AFTER HAVING SEEN PRUDENCE THE COUNT AND MARGUERITE (GET INTO->GAINED TO) THE CARRIAGE WHICH WAS (WAITING->WINNING) FOR THEM AT THE DOOR +3997-180297-0000-1834: I HAVE NOT COME TO HINDER YOU FROM LEAVING PARIS +3997-180297-0001-1835: YOU IN THE WAY MARGUERITE BUT HOW +3997-180297-0002-1836: WELL YOU MIGHT HAVE HAD A WOMAN HERE SAID PRUDENCE AND IT WOULD HARDLY HAVE BEEN AMUSING FOR HER TO SEE TWO MORE ARRIVE +3997-180297-0003-1837: DURING THIS REMARK MARGUERITE LOOKED AT ME ATTENTIVELY +3997-180297-0004-1838: MY DEAR PRUDENCE I ANSWERED YOU DO NOT KNOW WHAT YOU ARE SAYING +3997-180297-0005-1839: YES BUT BESIDES NOT WISHING TO PUT YOU OUT I WAS SURE THAT IF YOU CAME AS FAR AS MY DOOR YOU WOULD WANT TO COME UP AND AS I COULD NOT LET YOU I DID NOT WISH TO LET YOU GO AWAY BLAMING ME FOR SAYING NO +3997-180297-0006-1840: BECAUSE I AM WATCHED AND THE LEAST SUSPICION MIGHT (DO->TO) ME THE GREATEST HARM +3997-180297-0007-1841: IS THAT REALLY THE ONLY REASON +3997-180297-0008-1842: IF THERE WERE ANY OTHER I WOULD TELL YOU FOR WE ARE NOT TO HAVE ANY SECRETS FROM ONE ANOTHER NOW +3997-180297-0009-1843: (HONESTLY->ON THE STREET) DO YOU CARE FOR ME A LITTLE A GREAT DEAL +3997-180297-0010-1844: I FANCIED FOR A MOMENT THAT (I->IT) MIGHT GIVE MYSELF THAT HAPPINESS FOR SIX MONTHS YOU (WOULD->WILL) NOT HAVE IT YOU INSISTED ON KNOWING THE MEANS +3997-180297-0011-1845: WELL GOOD HEAVENS THE MEANS WERE EASY ENOUGH TO GUESS +3997-180297-0012-1846: I LISTENED AND I GAZED AT MARGUERITE WITH ADMIRATION +3997-180297-0013-1847: WHEN (I->THEY) THOUGHT THAT THIS MARVELLOUS CREATURE WHOSE FEET I HAD ONCE LONGED TO KISS WAS WILLING TO LET ME TAKE MY PLACE IN HER THOUGHTS (MY PART->BY PARTS) IN HER LIFE AND THAT I WAS NOT YET CONTENT WITH WHAT SHE GAVE ME I ASKED IF (MAN'S->MEN'S) DESIRE (HAS->HAD) INDEED LIMITS WHEN SATISFIED AS PROMPTLY AS MINE HAD BEEN IT REACHED AFTER SOMETHING FURTHER +3997-180297-0014-1848: TRULY SHE CONTINUED WE POOR CREATURES OF CHANCE HAVE FANTASTIC (DESIRES->DESIRE) AND INCONCEIVABLE LOVES +3997-180297-0015-1849: WE ARE NOT ALLOWED TO HAVE HEARTS UNDER PENALTY OF BEING HOOTED DOWN AND OF RUINING OUR CREDIT +3997-180297-0016-1850: WE NO LONGER BELONG TO OURSELVES +3997-180297-0017-1851: WE STAND FIRST IN THEIR SELF ESTEEM LAST IN THEIR ESTEEM +3997-180297-0018-1852: NEVER (DO THEY->DID HE) GIVE YOU ADVICE WHICH IS NOT (LUCRATIVE->LOOK ATTENTIVE) +3997-180297-0019-1853: IT MEANS LITTLE ENOUGH TO THEM THAT WE SHOULD HAVE TEN LOVERS EXTRA AS LONG AS THEY GET DRESSES OR A BRACELET OUT OF THEM AND THAT THEY CAN DRIVE (IN OUR->AND ARE) CARRIAGE FROM TIME TO TIME OR COME TO OUR BOX AT THE (THEATRE->FUTURE) +3997-180297-0020-1854: SUCH A MAN I FOUND IN THE DUKE BUT THE DUKE IS OLD AND (*->THE) OLD AGE NEITHER PROTECTS NOR CONSOLES +3997-180297-0021-1855: I THOUGHT I COULD ACCEPT THE LIFE WHICH HE OFFERED ME (BUT->OR) WHAT WOULD YOU HAVE +3997-180297-0022-1856: WHAT I LOVED IN YOU WAS NOT THE MAN WHO WAS BUT THE MAN WHO WAS GOING TO BE +3997-180297-0023-1857: MARGUERITE TIRED OUT WITH THIS LONG CONFESSION THREW HERSELF BACK ON THE SOFA AND TO STIFLE A SLIGHT COUGH PUT UP HER HANDKERCHIEF TO HER LIPS AND FROM THAT TO HER EYES +3997-180297-0024-1858: MARGUERITE DO WITH ME AS YOU WILL I AM YOUR SLAVE YOUR DOG BUT IN THE NAME OF HEAVEN TEAR UP THE LETTER WHICH I WROTE TO YOU AND DO NOT MAKE ME LEAVE YOU TO MORROW IT WOULD KILL ME +3997-180297-0025-1859: MARGUERITE DREW THE LETTER FROM HER BOSOM AND HANDING IT TO ME WITH A SMILE OF INFINITE SWEETNESS SAID +3997-180297-0026-1860: HERE IT IS I HAVE BROUGHT IT BACK +3997-180297-0027-1861: I TORE THE LETTER INTO FRAGMENTS AND KISSED (*->IT) WITH TEARS THE HAND THAT (*->I) GAVE IT TO ME +3997-180297-0028-1862: LOOK HERE PRUDENCE DO YOU KNOW WHAT HE WANTS SAID MARGUERITE +3997-180297-0029-1863: HE WANTS YOU TO FORGIVE HIM +3997-180297-0030-1864: ONE HAS (TO->TWO) BUT HE (WANTS->ONCE) MORE THAN THAT WHAT THEN +3997-180297-0031-1865: I EMBRACED MARGUERITE UNTIL SHE WAS ALMOST STIFLED +3997-182399-0000-1779: (OL MISTAH->ALL MISTER) BUZZARD GRINNED +3997-182399-0001-1780: THIS SOUNDED LIKE ANOTHER STORY +3997-182399-0002-1781: HE WAS CURIOUS ABOUT THAT BLACK HEADED COUSIN OF (OL MISTAH->ALL MISTER) BUZZARD VERY CURIOUS INDEED +3997-182399-0003-1782: ANYWAY HE WOULD FIND OUT +3997-182399-0004-1783: PLEASE MISTER BUZZARD PLEASE TELL US THE STORY HE BEGGED +3997-182399-0005-1784: NOW (OL MISTAH->ALL MISTER) BUZZARD IS NATURALLY GOOD NATURED AND ACCOMMODATING AND WHEN PETER (BEGGED->BAGS) SO HARD HE JUST COULDN'T FIND IT IN HIS HEART TO REFUSE +3997-182399-0006-1785: WAY BACK IN THE DAYS WHEN (GRANDPAP BUZZARD->GRANDPAPAZZARD) HAD HIS (LIL->LITTLE) FALLING OUT WITH (OL->OLD) KING EAGLE AND (DONE FLY->DON FLIES) SO HIGH HE (SCO'TCH->SCORCHED) THE FEATHERS (OFFEN->OFTEN) HIS (HAID->HEAD) HE HAD A COUSIN DID GRANDPAP BUZZARD AND THIS COUSIN WAS (JES->JUST) NATURALLY LAZY AND NO COUNT +3997-182399-0007-1786: LIKE MOST NO COUNT PEOPLE HE USED TO MAKE A REGULAR NUISANCE OF (HISSELF->HIMSELF) POKING HIS NOSE INTO (EV'YBODY'S->EVERYBODY'S) BUSINESS AND NEVER TENDING TO HIS OWN +3997-182399-0008-1787: WASN'T ANYTHING GOING ON THAT THIS TRIFLING MEMBER OF THE BUZZARD (FAM'LY->FAMILY) DIDN'T FIND OUT ABOUT (AND->A) MEDDLE IN HE COULD ASK (MO->MORE) QUESTIONS THAN PETER RABBIT (CAN AN->KENN AND) ANYBODY THAT CAN DO THAT HAS GOT TO ASK A LOT +3997-182399-0009-1788: EVERYBODY LOOKED AT PETER AND LAUGHED +3997-182399-0010-1789: SO WE UNS (SIT->SET) ON THE CHIMNEY TOPS WHENEVER (OL->OLD) JACK FROST GETS TO (STRAYING->STRAIN) DOWN WHERE HE HAVE NO BUSINESS +3997-182399-0011-1790: ONE DAY (THIS->THERE'S) NO COUNT TRIFLING COUSIN OF GRANDPAP BUZZARD GET COLD IN HIS FEET +3997-182399-0012-1791: IT WAS ON (A LIL OL->THE LITTLE OLD) HOUSE A (LIL OL->LITTLE OLD) TUMBLE DOWN HOUSE +3997-182399-0013-1792: WHY HE (JES->JUST) STRETCH HIS (FOOL HAID->FULL HEAD) AS FAR DOWN (THAT->THE) CHIMNEY AS HE CAN (AN->AND) LISTEN (AN->AND) LISTEN +3997-182399-0014-1793: BUT HE DON'T MIND THAT +3997-182399-0015-1794: (WILL YO' ALLS->OH YOU ALL) PLEASE SPEAK A (LIL->LOW) LOUDER HE (HOLLER->HOLLERED) DOWN THE CHIMNEY (JES->JUST) LIKE THAT +3997-182399-0016-1795: YES SAH SHE (SHO'LY->SURELY YOU) WAS (PLUMB->PLUM) SCARED +3997-182399-0017-1796: (THEY->THEY'D) LIKE TO CHOKE THAT NO (COUNT BUZZARD->COMPOSER) TO DEATH +3997-182399-0018-1797: WHEN HE GET HOME (HE->HE'D) TRY (AN TRY->AND TRIES) TO BRUSH (THAT->US) SOOT OFF BUT IT DONE GET INTO THE SKIN (AN IT->AND IT'S) STAY THERE +3997-182399-0019-1798: A LITTLE SIGH OF SATISFACTION WENT (AROUND->ROUND) THE CIRCLE OF LISTENERS +3997-182399-0020-1799: IT WAS JUST AS GOOD AS ONE OF GRANDFATHER (FROG'S->FROGS) +4198-12259-0000-203: DRAW REACH FILL MIX GIVE IT ME WITHOUT WATER +4198-12259-0001-204: SO MY FRIEND SO WHIP ME OFF THIS GLASS NEATLY BRING ME HITHER SOME (CLARET->CLARE IT) A FULL WEEPING GLASS TILL IT RUN OVER +4198-12259-0002-205: A CESSATION AND TRUCE WITH THIRST +4198-12259-0003-206: YOU HAVE (CATCHED->CAST) A COLD GAMMER YEA FORSOOTH SIR +4198-12259-0004-207: BY THE BELLY OF (SANCT->SAINT) BUFF LET US TALK OF OUR DRINK I NEVER DRINK BUT AT MY HOURS LIKE THE POPE'S MULE +4198-12259-0005-208: WHICH WAS FIRST (THIRST OR->THUS TO) DRINKING +4198-12259-0006-209: WHAT IT SEEMS I DO NOT DRINK BUT (BY->BUY) AN ATTORNEY +4198-12259-0007-210: DRINK ALWAYS AND YOU SHALL NEVER DIE +4198-12259-0008-211: IF I DRINK NOT I AM A GROUND DRY GRAVELLED AND SPENT I AM STARK DEAD WITHOUT DRINK AND MY SOUL READY TO FLY INTO SOME (MARSH AMONGST->MARS A MONTH'S) FROGS THE SOUL NEVER DWELLS IN A DRY PLACE (DROUTH KILLS->DROUGHT KILL) IT +4198-12259-0009-212: HE (DRINKS IN->DRINK SO) VAIN THAT (FEELS->FILLS) NOT THE PLEASURE OF IT +4198-12259-0010-213: IT IS ENOUGH TO BREAK BOTH (GIRDS->GORGE) AND (PETREL->PETAL) +4198-12259-0011-214: WHAT DIFFERENCE IS THERE BETWEEN A BOTTLE AND A FLAGON +4198-12259-0012-215: BRAVELY AND WELL PLAYED UPON THE WORDS +4198-12259-0013-216: OUR FATHERS DRANK LUSTILY AND EMPTIED THEIR (CANS->CANES) +4198-12259-0014-217: WELL (CACKED->CAGLE) WELL SUNG +4198-12259-0015-218: COME LET US DRINK WILL YOU SEND NOTHING TO THE RIVER +4198-12259-0016-219: I DRINK NO MORE THAN (A SPONGE->HIS SPINES) +4198-12259-0017-220: I DRINK LIKE A TEMPLAR (KNIGHT->NIGHT) +4198-12259-0018-221: AND I (TANQUAM SPONSUS->TANK QUON SPONSES) +4198-12259-0019-222: AND I SICUT (TERRA SINE->TERRACE IN) AQUA +4198-12259-0020-223: GIVE ME A (SYNONYMON->SYNONYM) FOR A (GAMMON->GAMIN) OF BACON +4198-12259-0021-224: IT IS THE COMPULSORY OF DRINKERS IT IS A PULLEY +4198-12259-0022-225: A LITTLE RAIN (ALLAYS->IT LAYS) A GREAT DEAL OF WIND LONG TIPPLING BREAKS THE THUNDER +4198-12259-0023-226: BUT IF THERE CAME SUCH LIQUOR (FROM->FOR) MY BALLOCK (WOULD->WERE) YOU NOT WILLINGLY THEREAFTER SUCK THE (UDDER->UTTER) WHENCE IT ISSUED +4198-12259-0024-227: HERE PAGE FILL +4198-12259-0025-228: I APPEAL FROM THIRST AND DISCLAIM ITS JURISDICTION +4198-12259-0026-229: I WAS WONT (HERETOFORE->HERE) TO (*->FOR TO) DRINK OUT ALL BUT NOW I LEAVE NOTHING +4198-12259-0027-230: (HEYDAY->HAY THEE) HERE (ARE TRIPES->A TRIPE'S) FIT FOR OUR SPORT AND IN EARNEST EXCELLENT (GODEBILLIOS->GO TO BE YOURS) OF THE DUN (OX->AX) YOU KNOW WITH THE BLACK (STREAK->STREET) +4198-12259-0028-231: (O->OH) FOR GOD'S SAKE LET US (LASH->LAST) THEM SOUNDLY YET THRIFTILY +4198-12259-0029-232: SPARROWS (WILL NOT->WHEN I) EAT UNLESS YOU BOB THEM ON THE TAIL NOR CAN I DRINK IF I BE NOT FAIRLY SPOKE TO +4198-12259-0030-233: (HO->OH) THIS (WILL BANG IT SOUNDLY->WAS BEING IT'S ONLY) +4198-12259-0031-234: BUT THIS SHALL BANISH (IT->THEE) UTTERLY +4198-12259-0032-235: LET US WIND OUR HORNS BY THE SOUND OF FLAGONS AND BOTTLES AND CRY ALOUD (THAT->THERE) WHOEVER HATH LOST HIS THIRST COME (NOT->NIGH) HITHER TO SEEK IT +4198-12259-0033-236: THE GREAT GOD MADE THE PLANETS AND WE MAKE THE PLATTERS NEAT +4198-12259-0034-237: APPETITE (COMES->COUNT) WITH EATING SAYS (ANGESTON->ANGISTON) BUT THE THIRST GOES AWAY WITH DRINKING +4198-12259-0035-238: I HAVE A REMEDY AGAINST THIRST QUITE CONTRARY TO THAT WHICH IS GOOD AGAINST (THE BITING->ABIDING) OF A (MAD DOG->MAN DOLE) +4198-12259-0036-239: (WHITE WINE->WHY) HERE (WINE->WHY) BOYS +4198-12259-0037-240: O (LACHRYMA->LACK REMO) CHRISTI IT IS OF THE BEST GRAPE +4198-12259-0038-241: (I'FAITH->I FAITH) PURE GREEK GREEK O THE FINE WHITE WINE +4198-12259-0039-242: THERE IS NO ENCHANTMENT NOR CHARM THERE EVERY ONE OF YOU HATH SEEN IT +4198-12259-0040-243: MY (PRENTICESHIP->PREDICUP) IS OUT (I AM->I'M) A FREE MAN AT THIS TRADE +4198-12259-0041-244: (I SHOULD SAY->AS YOU SEE) MASTER (PAST->PASS) +4198-12259-0042-245: (O->OH) THE DRINKERS THOSE THAT ARE (A->*) DRY O (POOR->PORT) THIRSTY SOULS +4198-12259-0043-246: CLEAR OFF (NEAT->MEAT) SUPERNACULUM +4198-12281-0000-187: ALTHOUGH THE PLAGUE WAS THERE IN THE MOST PART OF ALL THE HOUSES THEY NEVERTHELESS ENTERED EVERYWHERE THEN PLUNDERED AND CARRIED AWAY ALL THAT WAS WITHIN AND YET FOR ALL THIS NOT ONE OF THEM TOOK ANY HURT WHICH IS A MOST WONDERFUL CASE +4198-12281-0001-188: I BESEECH YOU THINK UPON IT +4198-12281-0002-189: NEVERTHELESS AT ALL (ADVENTURES->VENTURES) THEY RANG THE BELLS (AD->AT) CAPITULUM (CAPITULANTES->CAPITULAT DAYS) +4198-12281-0003-190: BY THE VIRTUE OF GOD WHY DO NOT YOU SING PANNIERS FAREWELL (VINTAGE->VENTAGE) IS (DONE->NONE) +4198-12281-0004-191: BY THE BELLY OF (SANCT->SAINT) JAMES (WHAT->WHICH) SHALL WE POOR DEVILS DRINK THE WHILE +4198-12281-0005-192: LORD GOD (DA MIHI POTUM->DALMY HE POT EM) +4198-12281-0006-193: LET HIM BE CARRIED (TO->THE) PRISON FOR TROUBLING THE DIVINE SERVICE +4198-12281-0007-194: WHEREFORE IS IT THAT OUR DEVOTIONS WERE INSTITUTED TO BE SHORT IN THE TIME OF HARVEST AND VINTAGE AND LONG IN (THE->*) ADVENT (AND->IN) ALL THE WINTER +4198-12281-0008-195: (HARK->AREN'T) YOU MY MASTERS YOU THAT LOVE (THE WINE->THEM WHY) COP'S BODY FOLLOW ME FOR (SANCT->SAINT) ANTHONY BURN ME AS FREELY AS A FAGGOT (IF->*) THEY GET LEAVE TO TASTE ONE DROP OF THE LIQUOR THAT (WILL->WOULD) NOT NOW COME AND FIGHT FOR RELIEF OF THE VINE +4198-12281-0009-196: TO OTHERS AGAIN HE UNJOINTED THE (SPONDYLES->SPIND MULES) OR KNUCKLES OF THE NECK (DISFIGURED->THIS FIGURED) THEIR CHAPS (GASHED->GASH) THEIR FACES MADE THEIR CHEEKS HANG FLAPPING ON THEIR CHIN AND SO SWINGED AND (BALAMMED->BLAMMED) THEM THAT THEY FELL DOWN BEFORE HIM LIKE HAY BEFORE (A MOWER->HIM OVER) +4198-12281-0010-197: TO SOME (WITH A->WOULD THEY) SMART (SOUSE->SOUS) ON (THE->THEIR) EPIGASTER HE WOULD MAKE THEIR (MIDRIFF SWAG->MIDRIFTS WAG) THEN REDOUBLING THE BLOW GAVE THEM SUCH A (HOMEPUSH->HOME PUSH) ON THE NAVEL THAT HE MADE THEIR PUDDINGS TO GUSH OUT +4198-12281-0011-198: BELIEVE (THAT->THEN) IT WAS THE MOST HORRIBLE SPECTACLE THAT EVER (ONE->WON) SAW +4198-12281-0012-199: (O->OH) THE HOLY LADY (NYTOUCH->KNIGHTSAGE) SAID ONE THE GOOD (SANCTESS->SANCTUS) O OUR LADY (OF SUCCOURS->OFURUS) SAID ANOTHER HELP HELP +4198-12281-0013-200: SOME DIED WITHOUT SPEAKING OTHERS SPOKE WITHOUT DYING SOME DIED IN SPEAKING OTHERS SPOKE (IN->AND) DYING +4198-12281-0014-201: CAN YOU TELL WITH WHAT INSTRUMENTS THEY DID IT +4198-12281-0015-202: IN THE MEANTIME (FRIAR->FRY) JOHN WITH HIS FORMIDABLE BATON OF THE CROSS GOT TO THE BREACH WHICH THE ENEMIES HAD MADE AND THERE STOOD TO SNATCH UP THOSE THAT ENDEAVOURED TO ESCAPE +4198-61336-0000-247: IT IS SIGNIFICANT TO NOTE IN THIS CONNECTION THAT THE NEW KING WAS AN UNSWERVING ADHERENT OF THE CULT OF (ASHUR->ASHER) BY THE (ADHERENTS->ADHERENCE) OF WHICH HE WAS PROBABLY STRONGLY SUPPORTED +4198-61336-0001-248: AT THE BEGINNING OF HIS REIGN THERE WAS MUCH SOCIAL DISCONTENT AND SUFFERING +4198-61336-0002-249: WELL MIGHT (SHARDURIS->YOUR DEARUS) EXCLAIM IN THE WORDS OF THE PROPHET WHERE IS THE KING OF (ARPAD->ARPET) +4198-61336-0003-250: (TIGLATH PILESER->TIGG LAST BELIEVER) HOWEVER CROSSED THE (EUPHRATES->EUPHADIS) AND MOVING NORTHWARD DELIVERED AN UNEXPECTED ATTACK ON THE (URARTIAN->RACIAN) ARMY (IN QUMMUKH->AND KUMAK) +4198-61336-0004-251: A FIERCE BATTLE ENSUED AND ONE OF (ITS->HIS) DRAMATIC INCIDENTS WAS A SINGLE COMBAT BETWEEN THE RIVAL KINGS +4198-61336-0005-252: AN ATTEMPT WAS MADE TO CAPTURE KING (SHARDURIS->CHAUDURUS) WHO LEAPT FROM HIS CHARIOT AND MADE HASTY ESCAPE ON HORSEBACK HOTLY PURSUED (IN->AND) THE GATHERING DARKNESS BY AN ASSYRIAN (CONTINGENT->CONTENDENT) OF CAVALRY +4198-61336-0006-253: DESPITE THE BLOW DEALT AGAINST (URARTU->YOU ARE TO) ASSYRIA DID NOT IMMEDIATELY REGAIN POSSESSION OF NORTH SYRIA +4198-61336-0007-254: THE SHIFTY (MATI->MANTIL) ILU EITHER CHERISHED THE HOPE THAT (SHARDURIS->SHALL DORIS) WOULD RECOVER STRENGTH AND AGAIN INVADE NORTH SYRIA OR THAT HE MIGHT HIMSELF ESTABLISH AN EMPIRE IN THAT REGION +4198-61336-0008-255: (TIGLATH PILESER->TIG GLASS BELIEVER) HAD THEREFORE TO MARCH WESTWARD AGAIN +4198-61336-0009-256: FOR THREE YEARS HE CONDUCTED VIGOROUS CAMPAIGNS IN THE WESTERN LAND WHERE HE MET WITH VIGOROUS RESISTANCE +4198-61336-0010-257: (ARPAD->OUR PAD) WAS CAPTURED AND (MATI ILU->MEANT TO ILL YOU) DEPOSED AND PROBABLY PUT TO DEATH +4198-61336-0011-258: ONCE AGAIN THE HEBREWS CAME INTO CONTACT WITH (ASSYRIA->THE ZERIA) +4198-61336-0012-259: (ITS FALL MAY->IT'S FOR ME) NOT HAVE BEEN UNCONNECTED WITH THE TREND OF EVENTS IN (ASSYRIA->A SYRIA) DURING THE CLOSING YEARS OF THE MIDDLE EMPIRE +4198-61336-0013-260: (JEHOASH->JOESH) THE GRANDSON OF JEHU HAD ACHIEVED SUCCESSES IN CONFLICT WITH DAMASCUS +4198-61336-0014-261: SIX MONTHS (AFTERWARDS->AFTERWARD) HE WAS ASSASSINATED BY (SHALLUM->CHARLEM) +4198-61336-0015-262: THIS USURPER (HELD SWAY->HELDS WEIGH) AT SAMARIA FOR ONLY A MONTH +4198-61336-0016-263: NO RESISTANCE WAS POSSIBLE ON THE PART OF (MENAHEM->MANY HIM) THE USURPER WHO WAS PROBABLY READY TO WELCOME THE ASSYRIAN CONQUEROR SO THAT BY ARRANGING AN ALLIANCE HE MIGHT SECURE HIS OWN POSITION +4198-61336-0017-264: (TIGLATH PILESER->TAKE THAT PLEASURE) NEXT OPERATED AGAINST THE MEDIAN AND OTHER HILL TRIBES IN THE (NORTH EAST->NORTHEAST) +4198-61336-0018-265: HE OVERTHREW BUILDINGS DESTROYED ORCHARDS AND TRANSPORTED TO NINEVEH THOSE OF THE INHABITANTS HE HAD NOT PUT TO (THE SWORD->THIS WOOD) WITH ALL THE LIVE STOCK HE COULD LAY HANDS ON +4198-61336-0019-266: THUS WAS (URARTU->HERE TO) CRIPPLED AND HUMILIATED IT NEVER REGAINED ITS FORMER PRESTIGE AMONG THE NORTHERN STATES +4198-61336-0020-267: IN THE FOLLOWING YEAR (TIGLATH PILESER->TIG LAS BELIEVER) RETURNED TO SYRIA +4198-61336-0021-268: (MENAHEM->MANY HIM) KING OF ISRAEL HAD DIED AND WAS SUCCEEDED BY HIS SON (PEKAHIAH->PEKAHIA) +4198-61336-0022-269: (JUDAH->JULIA) HAD TAKEN ADVANTAGE OF THE DISTURBED CONDITIONS IN ISRAEL TO ASSERT ITS INDEPENDENCE +4198-61336-0023-270: HE CONDEMNED ISRAEL FOR ITS IDOLATRIES AND CRIED +4198-61336-0024-271: FOR (THUS->THIS) SAITH THE LORD UNTO THE HOUSE OF ISRAEL SEEK YE ME (AND YE SHALL->TO) LIVE HAVE YE OFFERED UNTO ME SACRIFICES AND OFFERINGS IN THE WILDERNESS FORTY YEARS O HOUSE OF ISRAEL +4198-61336-0025-272: THE REMNANT OF THE PHILISTINES SHALL PERISH +4198-61336-0026-273: ISRAEL WAS ALSO DEALT WITH +4198-61336-0027-274: HE SWEPT THROUGH ISRAEL LIKE A HURRICANE +4198-61336-0028-275: THE (PHILISTINES->FAIRLY STEAMS) AND THE ARABIANS OF THE DESERT WERE ALSO SUBDUED +4198-61336-0029-276: HE INVADED BABYLONIA +4198-61336-0030-277: (UKINZER->A KINDRED) TOOK REFUGE IN HIS CAPITAL SHAPIA WHICH HELD OUT SUCCESSFULLY ALTHOUGH THE SURROUNDING COUNTRY WAS RAVAGED AND DESPOILED +4294-14317-0000-1866: AS I THOUGHT THAT THIS WAS DUE TO SOME FAULT IN THE EARTH I WANTED TO MAKE THESE FIRST EXPERIMENTS BEFORE I UNDERTOOK MY PERSEUS +4294-14317-0001-1867: WHEN I SAW (THAT->*) THIS (BUST->BEST) CAME OUT SHARP AND CLEAN I (SET->SAID) AT ONCE TO CONSTRUCT A LITTLE FURNACE IN THE WORKSHOP ERECTED FOR ME BY THE DUKE AFTER MY OWN PLANS AND DESIGN IN THE HOUSE WHICH THE DUKE HAD GIVEN ME +4294-14317-0002-1868: IT WAS AN EXTREMELY DIFFICULT TASK AND I WAS ANXIOUS TO OBSERVE ALL THE NICETIES OF ART WHICH I HAD LEARNED SO AS NOT TO LAPSE INTO SOME ERROR +4294-14317-0003-1869: I IN MY TURN FEEL THE SAME DESIRE AND HOPE TO PLAY MY PART LIKE THEM THEREFORE MY LORD GIVE ME THE LEAVE TO GO +4294-14317-0004-1870: BUT BEWARE OF LETTING (BANDINELLO->BEND NELLO) QUIT YOU RATHER BESTOW UPON HIM ALWAYS MORE THAN HE DEMANDS FOR IF HE GOES INTO FOREIGN PARTS HIS IGNORANCE IS SO PRESUMPTUOUS THAT HE IS JUST THE MAN TO DISGRACE OUR MOST ILLUSTRIOUS SCHOOL +4294-14317-0005-1871: I (ASK->ASKED) NO FURTHER REWARD FOR MY LABOURS UP TO THIS TIME THAN THE GRACIOUS FAVOUR OF YOUR MOST ILLUSTRIOUS EXCELLENCY +4294-14317-0006-1872: THEN I THANKED HIM AND SAID I HAD NO GREATER DESIRE THAN TO SHOW THOSE ENVIOUS FOLK THAT I HAD IT IN ME TO EXECUTE THE PROMISED WORK +4294-14317-0007-1873: I HAD BETTER LOOK TO MY CONDUCT FOR IT (HAD->HAS) COME TO HIS EARS THAT I RELIED UPON HIS FAVOUR TO TAKE IN FIRST ONE MAN AND THEN ANOTHER +4294-14317-0008-1874: I BEGGED HIS MOST ILLUSTRIOUS EXCELLENCY TO NAME A SINGLE PERSON (WHOM I->WHY) HAD EVER TAKEN IN +4294-14317-0009-1875: I SAID MY LORD I THANK YOU AND BEG YOU TO CONDESCEND SO FAR AS TO LISTEN TO FOUR WORDS IT IS TRUE THAT HE LENT ME A PAIR OF OLD SCALES TWO ANVILS AND THREE LITTLE HAMMERS WHICH ARTICLES I BEGGED HIS (WORKMAN GIORGIO DA CORTONA->WORKMEN GEORGIO DESCORTONA) FIFTEEN DAYS AGO TO FETCH BACK +4294-14317-0010-1876: (GIORGIO->GEORGO) CAME FOR THEM (HIMSELF->HIS HEALTH) +4294-14317-0011-1877: I HOPE TO PROVE ON WHAT ACCOUNT THAT SCOUNDREL TRIES TO BRING ME INTO DISGRACE +4294-14317-0012-1878: WHEN HE HAD HEARD THIS SPEECH THE DUKE ROSE UP IN ANGER AND SENT FOR BERNARDONE WHO WAS FORCED TO TAKE FLIGHT AS FAR AS VENICE HE AND (ANTONIO LANDI->ANTONIA LANDY) WITH HIM +4294-14317-0013-1879: YOU HAD BETTER PUT THIS TO THE PROOF AND I WILL GO AT ONCE TO THE (BARGELLO->BARGIENLO) +4294-14317-0014-1880: I AM WILLING TO ENTER INTO COMPETITION WITH THE ANCIENTS AND FEEL ABLE TO SURPASS THEM FOR SINCE THOSE EARLY DAYS IN WHICH I MADE THE (MEDALS->METALS) OF POPE CLEMENT I HAVE LEARNED SO MUCH THAT I CAN NOW PRODUCE FAR BETTER PIECES OF THE KIND I THINK I CAN ALSO OUTDO THE COINS I STRUCK FOR DUKE (ALESSANDRO->ALISANDRO) WHICH (ARE->IS) STILL HELD IN HIGH ESTEEM IN LIKE MANNER I COULD MAKE FOR YOU LARGE PIECES OF GOLD AND SILVER PLATE AS I DID SO OFTEN FOR THAT NOBLE MONARCH KING FRANCIS OF FRANCE THANKS TO THE GREAT CONVENIENCES HE ALLOWED ME WITHOUT EVER LOSING TIME FOR THE EXECUTION OF COLOSSAL STATUES OR OTHER WORKS OF THE (SCULPTORS->SCULPTOR'S) CRAFT +4294-14317-0015-1881: AFTER SEVERAL MONTHS WERE WASTED AND PIERO WOULD NEITHER WORK NOR PUT MEN TO WORK UPON THE PIECE I MADE HIM GIVE IT BACK +4294-14317-0016-1882: AMONG ARTISTS CERTAIN (ENRAGED->ENRAGE) SCULPTORS LAUGHED AT ME AND CALLED ME THE NEW SCULPTOR +4294-14317-0017-1883: NOW I HOPE TO SHOW THEM THAT I AM AN OLD SCULPTOR IF GOD SHALL GRANT ME THE BOON OF FINISHING MY PERSEUS FOR THAT NOBLE PIAZZA OF HIS MOST ILLUSTRIOUS EXCELLENCY +4294-14317-0018-1884: HAVING THIS EXCELLENT RESOLVE IN HEART I REACHED MY HOME +4294-32859-0000-1942: WYLDER WAS RATHER SURLY AFTER THE LADIES HAD FLOATED AWAY FROM THE SCENE AND HE DRANK HIS LIQUOR DOGGEDLY +4294-32859-0001-1943: IT WAS HIS FANCY I SUPPOSE TO REVIVE CERTAIN SENTIMENTAL RELATIONS WHICH HAD IT MAY BE ONCE EXISTED BETWEEN HIM AND MISS LAKE AND HE WAS A PERSON OF THAT COMBATIVE TEMPERAMENT THAT MAGNIFIES AN OBJECT IN PROPORTION AS ITS PURSUIT IS THWARTED +4294-32859-0002-1944: THE STORY OF FRIDOLIN AND (RETZCH'S->WRETCH IS) PRETTY OUTLINES +4294-32859-0003-1945: SIT DOWN BESIDE ME AND I'LL TELL YOU THE STORY +4294-32859-0004-1946: HE ASSISTED AT IT BUT TOOK NO PART AND IN FACT WAS LISTENING TO THAT OTHER CONVERSATION WHICH SOUNDED WITH ITS PLEASANT GABBLE AND LAUGHTER LIKE A LITTLE MUSICAL TINKLE OF BELLS IN THE DISTANCE +4294-32859-0005-1947: BUT HONEST MARK FORGOT THAT YOUNG LADIES DO NOT ALWAYS COME OUT QUITE ALONE AND JUMP UNASSISTED INTO THEIR VEHICLES +4294-35475-0000-1885: BUT THE MIDDLE (SON->SUN) WAS LITTLE AND LORN HE WAS NEITHER DARK NOR FAIR HE WAS NEITHER HANDSOME NOR STRONG +4294-35475-0001-1886: (THROWING->ROWING) HIMSELF ON HIS KNEES BEFORE THE KING HE CRIED (OH->O) ROYAL SIRE BESTOW UPON ME ALSO A SWORD AND A STEED THAT I MAY UP AND (AWAY->WAIT) TO FOLLOW MY BRETHREN +4294-35475-0002-1887: BUT THE KING LAUGHED HIM TO SCORN THOU A SWORD HE QUOTH +4294-35475-0003-1888: IN SOOTH THOU SHALT HAVE ONE BUT IT SHALL BE ONE BEFITTING THY MAIDEN (SIZE->SIGHS) AND COURAGE IF SO SMALL A WEAPON CAN BE FOUND IN ALL MY KINGDOM +4294-35475-0004-1889: FORTHWITH THE GRINNING (JESTER->GESTURE) BEGAN SHRIEKING WITH LAUGHTER SO THAT THE BELLS UPON HIS MOTLEY CAP WERE ALL SET A JANGLING +4294-35475-0005-1890: I DID BUT LAUGH TO THINK THE SWORD OF (ETHELRIED->EFFLARIDE) HAD BEEN SO QUICKLY FOUND RESPONDED THE JESTER AND HE POINTED TO THE SCISSORS HANGING FROM THE TAILOR'S GIRDLE +4294-35475-0006-1891: ONE NIGHT AS HE LAY IN A DEEP FOREST (TOO->TWO) UNHAPPY TO SLEEP HE HEARD A NOISE NEAR AT HAND IN THE BUSHES +4294-35475-0007-1892: THOU SHALT HAVE THY LIBERTY HE CRIED EVEN THOUGH THOU SHOULDST (REND->RUN) ME IN PIECES THE MOMENT THOU ART FREE +4294-35475-0008-1893: (IT->HE) HAD (*->HITHER) SUDDENLY DISAPPEARED AND IN ITS PLACE STOOD A BEAUTIFUL FAIRY WITH FILMY WINGS WHICH SHONE LIKE RAINBOWS IN THE MOONLIGHT +4294-35475-0009-1894: AT THIS MOMENT THERE WAS A DISTANT RUMBLING AS OF THUNDER TIS THE OGRE CRIED THE FAIRY WE MUST HASTEN +4294-35475-0010-1895: SCISSORS GROW A GIANT'S HEIGHT AND SAVE US FROM THE (OGRE'S->OGRES) MIGHT +4294-35475-0011-1896: HE COULD SEE THE OGRE STANDING POWERLESS TO HURT HIM ON THE OTHER SIDE OF THE CHASM AND GNASHING HIS TEETH EACH ONE OF WHICH WAS AS BIG AS A (MILLSTON->MILLSTONE) +4294-35475-0012-1897: THE SIGHT WAS SO TERRIBLE THAT HE TURNED ON HIS HEEL AND FLED AWAY AS FAST AS HIS FEET COULD CARRY HIM +4294-35475-0013-1898: THOU SHALT NOT BE LEFT A PRISONER IN THIS DISMAL SPOT WHILE I HAVE THE POWER TO HELP THEE +4294-35475-0014-1899: HE LIFTED THE SCISSORS AND WITH ONE STROKE DESTROYED THE WEB AND GAVE THE FLY (ITS FREEDOM->TO READ THEM) +4294-35475-0015-1900: A FAINT GLIMMER OF LIGHT ON THE OPPOSITE WALL SHOWS ME THE KEYHOLE +4294-35475-0016-1901: THE PRINCE SPENT ALL THE FOLLOWING TIME UNTIL MIDNIGHT TRYING TO THINK OF A SUITABLE VERSE TO SAY TO THE SCISSORS +4294-35475-0017-1902: AS HE UTTERED THE WORDS THE SCISSORS LEAPED OUT OF HIS HAND AND BEGAN TO CUT THROUGH THE WOODEN SHUTTERS AS EASILY AS THROUGH (A->ITS) CHEESE +4294-35475-0018-1903: IN (A->THE) VERY SHORT TIME THE PRINCE HAD CRAWLED THROUGH THE OPENING +4294-35475-0019-1904: WHILE HE STOOD LOOKING AROUND HIM IN BEWILDERMENT A FIREFLY ALIGHTED ON HIS (ARM->HEART) FLASHING ITS LITTLE LANTERN IN THE PRINCE'S FACE IT CRIED THIS WAY MY FRIEND THE FLY SENT ME TO GUIDE YOU TO A PLACE OF SAFETY +4294-35475-0020-1905: WHAT IS TO BECOME OF ME CRIED THE POOR PEASANT +4294-35475-0021-1906: MY GRAIN MUST FALL (AND->IN) ROT IN THE FIELD FROM (OVERRIPENESS->OVER RIPENESS) BECAUSE I HAVE NOT THE STRENGTH TO RISE (AND->IN) HARVEST IT THEN INDEED MUST WE ALL STARVE +4294-35475-0022-1907: THE (GRANDAME->GRAND DAME) WHOM HE SUPPLIED WITH FAGOTS THE MERCHANT WHOM HE RESCUED FROM ROBBERS THE KING'S (COUNCILLOR->COUNSELLOR) TO WHOM HE GAVE AID ALL BECAME HIS FRIENDS UP AND DOWN THE LAND TO BEGGAR (OR->O) LORD HOMELESS WANDERER (OR->*) HIGH BORN DAME HE GLADLY GAVE UNSELFISH SERVICE ALL UNSOUGHT AND SUCH AS HE HELPED STRAIGHTWAY BECAME HIS FRIENDS +4294-35475-0023-1908: TO HIM WHO COULD BRING HER BACK TO HER FATHER'S CASTLE SHOULD BE GIVEN THE THRONE AND KINGDOM AS WELL AS THE PRINCESS HERSELF SO FROM FAR AND NEAR INDEED FROM ALMOST EVERY COUNTRY UNDER THE SUN CAME (KNIGHTS->NIGHTS) AND PRINCES TO FIGHT THE OGRE +4294-35475-0024-1909: AMONG THOSE WHO DREW BACK (WERE->WHERE) ETHELRIED'S BROTHERS THE THREE THAT WERE DARK AND THE THREE THAT WERE FAIR +4294-35475-0025-1910: BUT (ETHELRIED HEEDED->ETHEL READ HE DID) NOT THEIR TAUNTS +4294-35475-0026-1911: SO THEY ALL CRIED OUT LONG AND LOUD LONG LIVE THE PRINCE (PRINCE CISEAUX->PRINCESO) +4294-9934-0000-1912: HE FELT (WHAT->WITH) THE EARTH MAY POSSIBLY FEEL AT THE MOMENT WHEN IT IS TORN OPEN WITH THE IRON IN ORDER THAT GRAIN MAY BE DEPOSITED WITHIN IT IT FEELS ONLY THE WOUND THE QUIVER OF THE GERM (AND->*) THE JOY OF THE FRUIT ONLY (ARRIVE->ARRIVED) LATER +4294-9934-0001-1913: (HE HAD->HE'D) BUT JUST ACQUIRED A FAITH MUST HE THEN (REJECT IT->REJECTED) ALREADY +4294-9934-0002-1914: HE AFFIRMED TO HIMSELF THAT HE WOULD NOT HE DECLARED TO HIMSELF THAT HE WOULD NOT DOUBT AND HE BEGAN TO DOUBT IN SPITE OF HIMSELF +4294-9934-0003-1915: TO STAND BETWEEN TWO RELIGIONS FROM ONE OF WHICH YOU HAVE NOT AS YET EMERGED (AND->IN) ANOTHER INTO WHICH YOU HAVE NOT YET ENTERED IS INTOLERABLE AND TWILIGHT IS PLEASING ONLY TO BAT LIKE SOULS +4294-9934-0004-1916: MARIUS WAS CLEAR EYED AND HE REQUIRED THE TRUE LIGHT +4294-9934-0005-1917: THE HALF LIGHTS OF DOUBT PAINED HIM +4294-9934-0006-1918: WHATEVER MAY HAVE BEEN HIS DESIRE TO REMAIN WHERE HE WAS HE COULD NOT HALT THERE HE WAS IRRESISTIBLY CONSTRAINED TO CONTINUE TO ADVANCE TO EXAMINE TO THINK TO MARCH FURTHER +4294-9934-0007-1919: HE FEARED AFTER HAVING TAKEN SO MANY STEPS WHICH HAD BROUGHT HIM NEARER TO HIS FATHER TO NOW TAKE A STEP WHICH SHOULD ESTRANGE HIM FROM THAT FATHER +4294-9934-0008-1920: HIS DISCOMFORT WAS AUGMENTED BY ALL THE REFLECTIONS WHICH OCCURRED TO HIM +4294-9934-0009-1921: IN THE TROUBLED STATE OF HIS CONSCIENCE HE NO LONGER THOUGHT OF CERTAIN SERIOUS SIDES OF EXISTENCE +4294-9934-0010-1922: THEY SOON ELBOWED HIM ABRUPTLY +4294-9934-0011-1923: REQUEST (COURFEYRAC->HER FOR ACT) TO COME AND TALK WITH ME SAID MARIUS +4294-9934-0012-1924: WHAT IS TO BECOME OF YOU SAID (COURFEYRAC->CURFYRAC) +4294-9934-0013-1925: WHAT ARE YOU GOING TO DO I DO NOT KNOW +4294-9934-0014-1926: SILVER GOLD HERE IT IS +4294-9934-0015-1927: YOU WILL THEN HAVE ONLY A PAIR OF TROUSERS A WAISTCOAT A HAT AND A COAT AND MY BOOTS +4294-9934-0016-1928: THAT WILL BE ENOUGH +4294-9934-0017-1929: NO IT IS NOT GOOD WHAT WILL YOU DO AFTER THAT +4294-9934-0018-1930: DO YOU KNOW GERMAN NO +4294-9934-0019-1931: IT IS BADLY PAID WORK BUT ONE CAN LIVE BY IT +4294-9934-0020-1932: THE CLOTHES DEALER WAS SENT FOR +4294-9934-0021-1933: HE PAID TWENTY FRANCS FOR THE CAST OFF GARMENTS THEY WENT TO THE (WATCHMAKER'S->WATCHMAKERS) +4294-9934-0022-1934: HE BOUGHT THE WATCH FOR FORTY FIVE FRANCS +4294-9934-0023-1935: (HELLO->HALLO) I HAD FORGOTTEN THAT SAID MARIUS +4294-9934-0024-1936: THE LANDLORD PRESENTED HIS BILL WHICH HAD TO BE PAID ON THE SPOT +4294-9934-0025-1937: I HAVE TEN FRANCS LEFT SAID MARIUS +4294-9934-0026-1938: THAT WILL BE SWALLOWING A TONGUE VERY FAST OR A HUNDRED SOUS VERY SLOWLY +4294-9934-0027-1939: ONE MORNING ON HIS RETURN FROM THE (LAW->LAST) SCHOOL MARIUS FOUND A LETTER FROM HIS AUNT AND THE SIXTY (PISTOLES->PISTOL) THAT IS TO SAY SIX HUNDRED FRANCS IN GOLD (IN->AND) A SEALED BOX +4294-9934-0028-1940: MARIUS SENT BACK THE THIRTY LOUIS TO HIS AUNT WITH (A->THE) RESPECTFUL LETTER IN WHICH (HE->SHE) STATED THAT HE HAD (SUFFICIENT->SUSPICION) MEANS OF SUBSISTENCE AND THAT HE SHOULD BE ABLE THENCEFORTH TO SUPPLY ALL HIS NEEDS +4294-9934-0029-1941: AT THAT MOMENT HE HAD THREE FRANCS LEFT +4350-10919-0000-2716: HE PERCEIVED THAT IT WAS NO GOOD TALKING TO THE OLD MAN AND THAT THE PRINCIPAL PERSON IN THE HOUSE WAS THE MOTHER +4350-10919-0001-2717: BEFORE HER HE DECIDED TO SCATTER HIS PEARLS +4350-10919-0002-2718: THE PRINCESS WAS DISTRACTED AND DID NOT KNOW WHAT TO DO SHE FELT SHE HAD (SINNED->SENT) AGAINST KITTY +4350-10919-0003-2719: WELL DOCTOR DECIDE OUR FATE SAID THE PRINCESS TELL ME EVERYTHING +4350-10919-0004-2720: IS THERE HOPE SHE MEANT TO SAY BUT HER LIPS QUIVERED AND SHE COULD NOT UTTER THE QUESTION WELL DOCTOR +4350-10919-0005-2721: AS YOU PLEASE THE PRINCESS WENT OUT WITH A SIGH +4350-10919-0006-2722: THE FAMILY DOCTOR RESPECTFULLY CEASED IN THE MIDDLE OF HIS OBSERVATIONS +4350-10919-0007-2723: AND THERE ARE INDICATIONS (MALNUTRITION->MALLETRICIAN) NERVOUS EXCITABILITY AND SO ON +4350-10919-0008-2724: THE QUESTION (STANDS->SENDS) THUS IN PRESENCE OF INDICATIONS OF (TUBERCULOUS->TIBERICAN'S) PROCESS WHAT IS TO BE DONE TO MAINTAIN NUTRITION +4350-10919-0009-2725: YES (THAT'S AN->I CAN) UNDERSTOOD THING RESPONDED THE CELEBRATED PHYSICIAN AGAIN GLANCING AT HIS WATCH +4350-10919-0010-2726: BEG PARDON IS (THE YAUSKY->THEOSKEY) BRIDGE (DONE->DON) YET OR SHALL I HAVE TO DRIVE (AROUND->HER ON) +4350-10919-0011-2727: HE ASKED AH IT IS +4350-10919-0012-2728: OH WELL THEN I CAN DO IT IN TWENTY MINUTES +4350-10919-0013-2729: AND HOW ABOUT (A TOUR ABROAD->IT WERE BROAD) ASKED THE (FAMILY->FELLOW) DOCTOR +4350-10919-0014-2730: WHAT IS WANTED IS (*->THE) MEANS OF IMPROVING (NUTRITION->UTRITION) AND NOT FOR LOWERING IT +4350-10919-0015-2731: THE FAMILY DOCTOR LISTENED ATTENTIVELY AND RESPECTFULLY +4350-10919-0016-2732: BUT IN (FAVOR->FAVOUR) OF FOREIGN TRAVEL I WOULD URGE THE CHANGE OF HABITS THE REMOVAL FROM CONDITIONS CALLING UP REMINISCENCES +4350-10919-0017-2733: AND THEN THE MOTHER WISHES IT HE ADDED +4350-10919-0018-2734: AH WELL IN THAT CASE TO BE SURE LET THEM GO ONLY THOSE GERMAN QUACKS ARE MISCHIEVOUS +4350-10919-0019-2735: OH (TIME'S->TIMES) UP ALREADY AND HE WENT TO THE DOOR +4350-10919-0020-2736: THE CELEBRATED DOCTOR ANNOUNCED TO THE PRINCESS A FEELING OF WHAT WAS DUE FROM HIM (DICTATED->DECLATED) HIS DOING SO THAT HE OUGHT TO SEE THE PATIENT ONCE MORE +4350-10919-0021-2737: OH NO ONLY A FEW DETAILS PRINCESS COME THIS WAY +4350-10919-0022-2738: AND THE MOTHER ACCOMPANIED BY THE DOCTOR WENT INTO THE DRAWING ROOM TO KITTY +4350-10919-0023-2739: WHEN THE DOCTOR CAME IN SHE FLUSHED CRIMSON AND HER EYES FILLED WITH TEARS +4350-10919-0024-2740: SHE ANSWERED HIM AND ALL AT ONCE GOT UP FURIOUS +4350-10919-0025-2741: EXCUSE ME DOCTOR BUT THERE IS REALLY NO OBJECT IN THIS +4350-10919-0026-2742: THIS IS THE THIRD TIME YOU'VE ASKED ME THE SAME THING +4350-10919-0027-2743: THE CELEBRATED DOCTOR DID NOT TAKE (OFFENSE->OFFENCE) +4350-10919-0028-2744: NERVOUS IRRITABILITY HE SAID TO THE PRINCESS WHEN KITTY HAD LEFT THE ROOM HOWEVER I HAD FINISHED +4350-10919-0029-2745: AND THE DOCTOR BEGAN SCIENTIFICALLY EXPLAINING TO THE PRINCESS AS AN EXCEPTIONALLY INTELLIGENT WOMAN THE CONDITION OF THE YOUNG PRINCESS AND CONCLUDED BY INSISTING ON THE DRINKING OF THE WATERS WHICH WERE CERTAINLY HARMLESS +4350-10919-0030-2746: (AT->BUT) THE QUESTION SHOULD THEY GO ABROAD THE DOCTOR PLUNGED INTO DEEP MEDITATION AS THOUGH RESOLVING A WEIGHTY PROBLEM +4350-10919-0031-2747: FINALLY HIS DECISION WAS PRONOUNCED THEY WERE TO GO ABROAD BUT TO PUT NO FAITH IN FOREIGN QUACKS AND TO APPLY TO HIM IN ANY NEED +4350-10919-0032-2748: IT SEEMED AS THOUGH SOME PIECE OF GOOD FORTUNE HAD COME TO PASS AFTER THE DOCTOR HAD GONE +4350-10919-0033-2749: THE MOTHER WAS MUCH MORE CHEERFUL WHEN SHE WENT BACK TO HER DAUGHTER AND KITTY PRETENDED TO BE MORE CHEERFUL +4350-9170-0000-2750: EDUCATED PEOPLE OF THE UPPER CLASSES ARE TRYING TO STIFLE THE (EVER GROWING->EVERGREWING) SENSE OF THE NECESSITY OF TRANSFORMING THE EXISTING SOCIAL ORDER +4350-9170-0001-2751: (THIS IS->MISSUS) ABSOLUTELY (INCORRECT->AND CORRECT) +4350-9170-0002-2752: IN THE SOCIAL CONCEPTION OF LIFE IT IS SUPPOSED THAT SINCE THE AIM OF LIFE IS FOUND IN GROUPS OF INDIVIDUALS INDIVIDUALS (WILL->WHO) VOLUNTARILY SACRIFICE THEIR OWN (INTERESTS->INTEREST) FOR THE (INTERESTS->INTEREST) OF THE GROUP +4350-9170-0003-2753: THE CHAMPIONS OF THE SOCIAL CONCEPTION OF LIFE USUALLY TRY TO CONNECT THE IDEA OF AUTHORITY THAT IS OF VIOLENCE WITH THE IDEA OF MORAL INFLUENCE BUT THIS CONNECTION IS QUITE IMPOSSIBLE +4350-9170-0004-2754: THE MAN WHO (IS->HAS) CONTROLLED BY MORAL INFLUENCE ACTS IN ACCORDANCE WITH HIS OWN DESIRES +4350-9170-0005-2755: THE BASIS OF AUTHORITY IS BODILY VIOLENCE +4350-9170-0006-2756: THE POSSIBILITY OF APPLYING BODILY VIOLENCE (TO->THE) PEOPLE IS PROVIDED ABOVE ALL BY AN ORGANIZATION OF ARMED MEN TRAINED TO ACT IN UNISON (IN->AND) SUBMISSION TO ONE WILL +4350-9170-0007-2757: THESE BANDS OF ARMED MEN SUBMISSIVE TO A SINGLE WILL ARE WHAT CONSTITUTE THE ARMY +4350-9170-0008-2758: THE ARMY HAS ALWAYS BEEN AND STILL IS THE BASIS OF POWER +4350-9170-0009-2759: POWER IS ALWAYS IN THE HANDS OF THOSE WHO CONTROL THE ARMY AND ALL MEN IN POWER FROM THE ROMAN CAESARS TO THE RUSSIAN AND GERMAN EMPERORS TAKE MORE INTEREST IN THEIR ARMY THAN IN ANYTHING (AND->IN) COURT POPULARITY IN THE ARMY KNOWING THAT IF THAT IS ON THEIR SIDE THEIR POWER IS SECURE +4350-9170-0010-2760: INDEED IT COULD NOT BE OTHERWISE +4350-9170-0011-2761: ONLY UNDER THOSE CONDITIONS COULD THE SOCIAL ORGANIZATION BE JUSTIFIED +4350-9170-0012-2762: BUT SINCE THIS IS NOT THE CASE AND ON THE CONTRARY MEN (IN->AND) POWER ARE ALWAYS FAR FROM BEING SAINTS THROUGH THE VERY FACT OF THEIR POSSESSION OF POWER THE SOCIAL ORGANIZATION BASED ON POWER HAS NO JUSTIFICATION +4350-9170-0013-2763: EVEN IF THERE WAS ONCE A TIME WHEN OWING TO THE LOW (STANDARD->STANDARDS) OF MORALS (AND->ON) THE DISPOSITION OF MEN TO VIOLENCE THE EXISTENCE OF AN AUTHORITY TO RESTRAIN SUCH VIOLENCE WAS AN ADVANTAGE BECAUSE THE VIOLENCE OF (*->THE) GOVERNMENT WAS LESS THAN THE VIOLENCE OF INDIVIDUALS ONE CANNOT BUT SEE THAT THIS ADVANTAGE COULD NOT BE LASTING +4350-9170-0014-2764: BETWEEN THE MEMBERS OF ONE STATE SUBJECT TO A SINGLE AUTHORITY THE (STRIFE->STRIPE) BETWEEN (*->THE) INDIVIDUALS SEEMS STILL LESS AND (THE->A) LIFE OF THE STATE SEEMS EVEN MORE SECURE +4350-9170-0015-2765: IT WAS PRODUCED ON ONE HAND BY THE NATURAL GROWTH OF POPULATION AND ON THE OTHER BY STRUGGLE AND CONQUEST +4350-9170-0016-2766: AFTER CONQUEST THE POWER OF THE EMPEROR PUTS AN END TO INTERNAL DISSENSIONS AND SO THE STATE CONCEPTION OF LIFE JUSTIFIES ITSELF +4350-9170-0017-2767: BUT THIS JUSTIFICATION IS NEVER MORE THAN TEMPORARY +4350-9170-0018-2768: (INTERNAL->AND HERALD) DISSENSIONS DISAPPEAR ONLY IN PROPORTION TO THE DEGREE OF OPPRESSION EXERTED BY THE AUTHORITY OVER THE (DISSENTIENT->DYSINTHIAN) INDIVIDUALS +4350-9170-0019-2769: (GOVERNMENT->GOVERN) AUTHORITY EVEN IF IT DOES SUPPRESS PRIVATE VIOLENCE ALWAYS INTRODUCES INTO THE LIFE OF MEN FRESH FORMS OF VIOLENCE WHICH TEND TO BECOME GREATER AND GREATER IN PROPORTION TO THE DURATION AND STRENGTH OF THE GOVERNMENT +4350-9170-0020-2770: AND THEREFORE THE OPPRESSION OF THE OPPRESSED ALWAYS GOES ON GROWING UP TO THE FURTHEST LIMIT BEYOND WHICH IT CANNOT GO WITHOUT KILLING THE GOOSE WITH THE (GOLDEN EGGS->GOLD NICE) +4350-9170-0021-2771: THE MOST CONVINCING EXAMPLE OF THIS IS TO BE FOUND IN THE CONDITION OF THE WORKING CLASSES OF OUR EPOCH WHO ARE IN REALITY NO BETTER THAN THE SLAVES OF ANCIENT TIMES SUBDUED BY CONQUEST +4350-9170-0022-2772: SO IT (HAS->IS) ALWAYS (BEEN->THEN) +4350-9170-0023-2773: FOOTNOTE THE FACT THAT IN AMERICA THE ABUSES OF AUTHORITY EXIST IN SPITE OF THE SMALL NUMBER OF THEIR TROOPS NOT ONLY FAILS TO DISPROVE THIS POSITION BUT POSITIVELY CONFIRMS IT +4350-9170-0024-2774: THE UPPER CLASSES KNOW THAT AN ARMY OF FIFTY THOUSAND WILL SOON BE INSUFFICIENT AND NO LONGER RELYING ON PINKERTON'S MEN THEY FEEL THAT (THE->*) SECURITY OF THEIR POSITION DEPENDS ON THE INCREASED STRENGTH OF THE ARMY +4350-9170-0025-2775: THE REASON TO WHICH HE GAVE EXPRESSION IS ESSENTIALLY THE SAME AS THAT WHICH MADE THE FRENCH KINGS AND THE POPES ENGAGE SWISS AND SCOTCH GUARDS AND MAKES THE RUSSIAN AUTHORITIES OF TO DAY SO CAREFULLY DISTRIBUTE THE RECRUITS SO THAT THE REGIMENTS FROM THE (FRONTIERS ARE->FRONTIER THEIR) STATIONED IN CENTRAL DISTRICTS AND THE REGIMENTS FROM THE (CENTER->CENTRE) ARE STATIONED ON THE FRONTIERS +4350-9170-0026-2776: THE MEANING OF (CAPRIVI'S->THE PREVIOUS) SPEECH PUT INTO PLAIN LANGUAGE IS THAT FUNDS ARE NEEDED NOT TO RESIST FOREIGN FOES BUT TO BUY UNDER OFFICERS TO BE READY TO ACT AGAINST THE ENSLAVED TOILING MASSES +4350-9170-0027-2777: AND THIS ABNORMAL ORDER OF (THINGS->THANKS) IS MAINTAINED BY THE ARMY +4350-9170-0028-2778: BUT THERE IS NOT ONLY ONE GOVERNMENT THERE ARE OTHER GOVERNMENTS (EXPLOITING->EXPLODING) THEIR SUBJECTS BY VIOLENCE IN THE SAME WAY AND (*->ARE) ALWAYS READY TO POUNCE DOWN ON ANY OTHER GOVERNMENT AND CARRY OFF THE FRUITS OF THE TOIL OF ITS (ENSLAVED->ENSLAVE) SUBJECTS +4350-9170-0029-2779: AND SO EVERY GOVERNMENT NEEDS AN ARMY ALSO TO PROTECT ITS BOOTY FROM ITS NEIGHBOR BRIGANDS +4350-9170-0030-2780: THIS (INCREASE IS->INCREASES) CONTAGIOUS AS MONTESQUIEU POINTED OUT (ONE->A) HUNDRED (*->AND) FIFTY YEARS AGO +4350-9170-0031-2781: EVERY INCREASE IN THE ARMY OF ONE STATE WITH THE AIM OF SELF (DEFENSE->DEFENCE) AGAINST ITS SUBJECTS BECOMES A (SOURCE->SORT) OF DANGER FOR NEIGHBORING STATES AND CALLS FOR A SIMILAR INCREASE IN THEIR ARMIES +4350-9170-0032-2782: THE DESPOTISM OF (A->THE) GOVERNMENT ALWAYS INCREASES WITH THE STRENGTH OF THE ARMY AND ITS EXTERNAL SUCCESSES AND THE AGGRESSIVENESS OF A GOVERNMENT INCREASES WITH ITS INTERNAL DESPOTISM +4350-9170-0033-2783: THE RIVALRY OF THE EUROPEAN STATES (IN->AND) CONSTANTLY INCREASING THEIR FORCES HAS REDUCED THEM TO THE NECESSITY OF HAVING RECOURSE TO UNIVERSAL MILITARY SERVICE SINCE BY THAT MEANS THE GREATEST POSSIBLE NUMBER OF SOLDIERS IS OBTAINED AT THE LEAST POSSIBLE EXPENSE +4350-9170-0034-2784: AND BY THIS MEANS ALL CITIZENS ARE UNDER ARMS TO SUPPORT THE INIQUITIES (PRACTICED->PRACTISED) UPON THEM ALL CITIZENS HAVE BECOME THEIR OWN (OPPRESSORS->IMPRESSORS) +4350-9170-0035-2785: THIS INCONSISTENCY HAS BECOME OBVIOUS (IN->AND) UNIVERSAL MILITARY SERVICE +4350-9170-0036-2786: IN FACT THE WHOLE SIGNIFICANCE OF THE SOCIAL CONCEPTION OF LIFE CONSISTS IN MAN'S RECOGNITION OF THE BARBARITY OF STRIFE BETWEEN INDIVIDUALS AND THE TRANSITORINESS OF PERSONAL LIFE ITSELF AND THE TRANSFERENCE OF THE AIM OF LIFE (TO->THE) GROUPS OF PERSONS +4350-9170-0037-2787: BUT WITH UNIVERSAL MILITARY SERVICE IT COMES TO PASS THAT MEN AFTER MAKING EVERY SACRIFICE TO GET RID OF THE CRUELTY OF STRIFE AND THE INSECURITY OF EXISTENCE ARE CALLED UPON TO FACE ALL THE PERILS THEY HAD MEANT TO AVOID +4350-9170-0038-2788: BUT INSTEAD OF DOING THAT THEY (EXPOSE->EXPOSED) THE INDIVIDUALS TO THE SAME NECESSITY OF STRIFE SUBSTITUTING STRIFE WITH INDIVIDUALS OF OTHER STATES FOR STRIFE WITH NEIGHBORS +4350-9170-0039-2789: THE TAXES RAISED FROM THE PEOPLE FOR WAR PREPARATIONS ABSORB THE GREATER PART OF THE PRODUCE OF LABOR WHICH THE ARMY OUGHT TO DEFEND +4350-9170-0040-2790: THE DANGER OF WAR EVER READY TO BREAK OUT RENDERS ALL REFORMS OF LIFE SOCIAL LIFE VAIN AND FRUITLESS +4350-9170-0041-2791: BUT THE (FATAL->FIELD) SIGNIFICANCE OF UNIVERSAL MILITARY SERVICE AS THE MANIFESTATION OF THE CONTRADICTION INHERENT IN THE SOCIAL CONCEPTION OF LIFE IS NOT ONLY APPARENT IN THAT +4350-9170-0042-2792: GOVERNMENTS ASSERT THAT ARMIES ARE NEEDED ABOVE ALL FOR EXTERNAL (DEFENSE->DEFENCE) BUT THAT IS NOT TRUE +4350-9170-0043-2793: (THEY ARE->THERE) NEEDED PRINCIPALLY AGAINST THEIR SUBJECTS AND EVERY MAN UNDER UNIVERSAL MILITARY SERVICE BECOMES AN ACCOMPLICE IN ALL (THE->THAT) ACTS OF VIOLENCE OF THE GOVERNMENT AGAINST THE CITIZENS WITHOUT ANY CHOICE OF HIS OWN +4350-9170-0044-2794: AND FOR THE SAKE OF WHAT AM I MAKING THEM +4350-9170-0045-2795: I AM EXPECTED FOR THE SAKE OF THE STATE TO MAKE THESE SACRIFICES TO RENOUNCE EVERYTHING THAT CAN BE PRECIOUS TO MAN PEACE FAMILY SECURITY AND HUMAN DIGNITY +4350-9170-0046-2796: EXCEPT FOR THE STATE THEY SAY WE SHOULD BE EXPOSED TO THE ATTACKS OF EVIL DISPOSED PERSONS IN OUR OWN COUNTRY +4350-9170-0047-2797: WE (KNOW->*) NOW (*->KNOW) THAT THREATS AND PUNISHMENTS CANNOT DIMINISH THEIR NUMBER THAT THAT CAN ONLY BE DONE BY CHANGE OF ENVIRONMENT AND MORAL INFLUENCE +4350-9170-0048-2798: SO THAT (THE->THIS) JUSTIFICATION OF STATE VIOLENCE ON THE GROUND OF THE PROTECTION IT GIVES US FROM EVIL (DISPOSED->DISPOSE) PERSONS EVEN IF IT HAD SOME FOUNDATION THREE OR FOUR CENTURIES AGO HAS NONE WHATEVER NOW +4350-9170-0049-2799: EXCEPT FOR THE STATE THEY TELL US WE SHOULD NOT HAVE ANY RELIGION EDUCATION CULTURE MEANS OF COMMUNICATION AND SO ON +4350-9170-0050-2800: WITHOUT THE STATE MEN WOULD NOT HAVE BEEN ABLE TO FORM THE SOCIAL INSTITUTIONS NEEDED FOR DOING (ANY THING->ANYTHING) +4350-9170-0051-2801: THIS ARGUMENT TOO WAS WELL FOUNDED ONLY SOME CENTURIES AGO +4350-9170-0052-2802: THE GREAT EXTENSION OF MEANS OF COMMUNICATION AND INTERCHANGE OF IDEAS HAS MADE MEN COMPLETELY ABLE TO DISPENSE WITH STATE AID IN FORMING SOCIETIES ASSOCIATIONS CORPORATIONS AND CONGRESSES FOR SCIENTIFIC (ECONOMIC->AGONIC) AND POLITICAL OBJECTS +4350-9170-0053-2803: WITHOUT (GOVERNMENTS->GOVERNMENT'S) NATIONS WOULD BE ENSLAVED BY THEIR NEIGHBORS +4350-9170-0054-2804: THE GOVERNMENT THEY TELL US WITH ITS ARMY IS NECESSARY TO DEFEND US FROM NEIGHBORING STATES WHO MIGHT ENSLAVE US +4350-9170-0055-2805: AND IF (DEFENSE->DEFENCE) AGAINST BARBAROUS NATIONS IS MEANT ONE THOUSANDTH PART OF THE TROOPS NOW UNDER ARMS WOULD BE AMPLY SUFFICIENT FOR THAT PURPOSE +4350-9170-0056-2806: THE POWER OF THE STATE FAR FROM BEING A SECURITY AGAINST THE ATTACKS OF OUR NEIGHBORS EXPOSES US ON THE CONTRARY TO MUCH GREATER DANGER OF SUCH ATTACKS +4350-9170-0057-2807: EVEN LOOKING AT IT PRACTICALLY WEIGHING THAT IS TO SAY ALL THE (BURDENS->BIRDS) LAID ON HIM BY THE (STATE->STATES) NO MAN CAN FAIL TO SEE THAT FOR HIM PERSONALLY TO COMPLY WITH (*->THE) STATE DEMANDS AND SERVE IN THE ARMY WOULD IN THE MAJORITY OF CASES BE MORE DISADVANTAGEOUS THAN TO REFUSE TO DO SO +4350-9170-0058-2808: TO RESIST WOULD NEED INDEPENDENT THOUGHT AND EFFORT OF WHICH EVERY MAN IS NOT CAPABLE +4350-9170-0059-2809: SO MUCH FOR THE ADVANTAGES AND DISADVANTAGES OF BOTH LINES OF CONDUCT FOR A MAN OF THE WEALTHY (CLASSES->CLASS) AN OPPRESSOR +4350-9170-0060-2810: FOR A MAN OF THE POOR WORKING CLASS THE ADVANTAGES AND DISADVANTAGES WILL BE THE SAME BUT WITH A GREAT INCREASE OF DISADVANTAGES +4852-28311-0000-2098: SAY YOU KNOW (SUMTHIN->SOMETHING) +4852-28311-0001-2099: CHRIS LOOKED FROM A NICKEL PLATED (FLASHLIGHT->FLASH LIKE) TO A CAR JACK AND SPARK PLUG +4852-28311-0002-2100: (KNOW WHO->NO ONE) NEEDS A JOB (BAD->BAND) THAT'S (JAKEY->JI) HARRIS +4852-28311-0003-2101: O K HE SAID +4852-28311-0004-2102: ONLY WHY DIDN'T YOU ASK HIM YOURSELF +4852-28311-0005-2103: MIKE BECAME UNEASY AND FISHED (AN ELASTIC->AND MOLASTIC) BAND OUT OF HIS POCKET MADE A FLICK OF PAPER AND SENT IT SOARING OUT (INTO M->IN EM) STREET +4852-28311-0006-2104: WELL HE ADMITTED I DID +4852-28311-0007-2105: CHRIS ASKED (AND->HIM) FOR THE FIRST TIME THAT DAY (THE->THAT) HEAVY WEIGHT HE CARRIED WITHIN HIM LIFTED AND LIGHTENED A LITTLE +4852-28311-0008-2106: (THINK HE->THEY CAME) REALLY NEEDS IT HE PURSUED +4852-28311-0009-2107: HE WOULD HAVE LIKED TO GET THE JOB FOR (JAKEY->JAKIE) WHO NEEDED IT BUT SOMEHOW THE TASK OF FACING MISTER WICKER ESPECIALLY NOW THAT THE LIGHT WAS GOING AND DUSK (EDGING->EDGED) INTO THE STREETS WAS NOT WHAT (CHRIS HAD INTENDED->CHRISTEN TENDED) FOR ENDING THE AFTERNOON +4852-28311-0010-2108: MIKE'S EXPRESSION CHANGED AT (*->WHAT) ONCE TO ONE OF TRIUMPH BUT CHRIS WAS ONLY (PARTLY->PARSLY) ENCOURAGED +4852-28311-0011-2109: (BETCHA AREN'T->PITCHER AND) GOIN AFTER ALL (CHRIS->THIS) TURNED ON HIM +4852-28311-0012-2110: MIKE WAS STANDING ON THE CORNER +4852-28311-0013-2111: (AW SHUCKS->AH SHOCKS) +4852-28311-0014-2112: CHRIS STARTED OFF ONCE MORE PASSING (THE->A) BLEAK LITTLE VICTORIAN CHURCH PERCHED ON THE HILL ABOVE MISTER WICKER'S HOUSE +4852-28311-0015-2113: AN EMPTY LOT CUT (*->IN) INTO BY CHURCH LANE GAVE A LOOK OF ISOLATION TO THE (L->ELE) SHAPED BRICK BUILDING THAT SERVED MISTER (WICKER AS->WICKER'S) BOTH HOUSE AND PLACE OF BUSINESS +4852-28311-0016-2114: (THE->NO) LONGER (WING->WINGED) TOWARD THE BACK (HAD A->GOT IT) BACK DOOR THAT OPENED (ONTO->ON A) WATER STREET THE SPACE BETWEEN THE HOUSE AND WISCONSIN (AVENUE->ATTIGUE) HAD BEEN MADE INTO A NEAT OBLONG FLOWER GARDEN FENCED OFF FROM THE SIDEWALK BY BOX (SHRUBS->SHRUGS) AND A WHITE PICKET FENCE +4852-28311-0017-2115: A LIVID YELLOW STAINED THE HORIZON BEYOND THE FACTORIES AND (GRAY->GLAY) CLOUDS LOWERED AND TUMBLED ABOVE +4852-28311-0018-2116: THE AIR WAS GROWING CHILL AND CHRIS DECIDED TO FINISH (HIS->THE) JOB +4852-28311-0019-2117: ALL AT ONCE (HE->YOU) WONDERED HOW HIS MOTHER WAS AND EVERYTHING IN HIM PINCHED AND TIGHTENED ITSELF +4852-28311-0020-2118: AT THE FOOT OF THE HILL HE REACHED THE HOUSE +4852-28311-0021-2119: THERE WERE THREE THINGS THAT ALWAYS CAUGHT HIS EYE AMID THE LITTER OF DUSTY PIECES +4852-28311-0022-2120: ON THE LEFT THE COIL OF ROPE IN THE (CENTER->CENTRE OF) THE MODEL OF A SAILING SHIP IN A GREEN GLASS BOTTLE AND ON THE RIGHT THE WOODEN STATUE OF A NEGRO BOY IN BAGGY TROUSERS TURKISH JACKET AND WHITE TURBAN +4852-28311-0023-2121: BUT THE NAME STILL SHOWED AT THE PROW AND MANY A TIME CHRIS SAFE AT HOME IN BED HAD SAILED IMAGINARY VOYAGES IN THE MIRABELLE +4852-28311-0024-2122: (HE HAD->HE'D) NEVER SEEN ANYONE GO INTO MISTER WICKER'S SHOP NOW HE THOUGHT OF IT +4852-28311-0025-2123: HOW THEN DID HE LIVE AND WHAT DID HE EVER SELL +4852-28311-0026-2124: A SUDDEN CAR HORN (WOKE HIM->WALKING) FROM (HIS DREAM->THE STREAM) +4852-28312-0000-2125: OF THE MANY TIMES (HE HAD->EITHER) EXAMINED MISTER WICKER'S WINDOW AND (PORED->POURED) OVER THE ROPE (THE->TO) SHIP AND THE NUBIAN BOY HE HAD NEVER GONE INTO MISTER WICKER'S SHOP +4852-28312-0001-2126: SO NOW ALONE UNTIL (SOMEONE->SOME ONE) SHOULD ANSWER THE BELL (HE->THEY) LOOKED EAGERLY IF UNEASILY AROUND HIM +4852-28312-0002-2127: WHAT WITH THE ONE WINDOW AND THE LOWERING DAY OUTSIDE THE LONG NARROW SHOP WAS SOMBER +4852-28312-0003-2128: HEAVY HAND (HEWN->YOU AND) BEAMS CROSSED IT FROM ONE SIDE TO THE OTHER +4852-28312-0004-2129: (MISTER WICKER'S->MISS JOKERS) BACK BEING TOWARD THE SOURCE OF LIGHT CHRIS COULD NOT SEE HIS FACE +4852-28312-0005-2130: THE DOUBLE FANS (OF->A) MINUTE WRINKLES BREAKING FROM EYE CORNER TO TEMPLE AND JOINING WITH THOSE OVER THE (CHEEKBONES->CHEAP BONES) WERE DRAWN INTO THE HORIZONTAL LINES ACROSS THE DOMED FOREHEAD +4852-28312-0006-2131: LITTLE TUFTS OF WHITE (FUZZ->FUZ) ABOVE THE EARS WERE ALL THAT REMAINED OF THE ANTIQUARIAN'S HAIR BUT WHAT DREW AND HELD CHRIS'S GAZE (WERE->WITH) THE OLD MAN'S EYES +4852-28312-0007-2132: (CHRIS BLINKED->CRISP BINKED) AND LOOKED AGAIN YES THEY WERE STILL THERE +4852-28312-0008-2133: (CHRIS SWALLOWED->GRIS SWALLOW) AND HIS VOICE CAME BACK TO HIM +4852-28312-0009-2134: YES SIR HE SAID +4852-28312-0010-2135: I SAW YOUR SIGN AND I KNOW A BOY WHO NEEDS THE JOB +4852-28312-0011-2136: HE'S A SCHOOLMATE OF MINE +4852-28312-0012-2137: (JAKEY HARRIS HIS->JAGGY HEIRESS'S) NAME (IS AND->ISN'T) HE REALLY NEEDS THE JOB +4852-28312-0013-2138: I I JUST (WONDERED->WANTED) IF THE PLACE WAS STILL OPEN +4852-28312-0014-2139: WHAT HE SAW WAS A FRESH CHEEKED LAD TALL FOR THIRTEEN STURDY WITH SINCERITY AND GOOD (HUMOR->HUMOUR) IN HIS FACE AND SOMETHING (SENSITIVE AND->SCENTED IN) APPEALING ABOUT HIS EYES +4852-28312-0015-2140: HE GUESSED THERE MUST BE A LIVELY FIRE IN THAT ROOM BEYOND +4852-28312-0016-2141: WOULD THAT (INTERFERE->INFERE) WITH (JAKEY'S->JAKI GIGGS) GETTING THE JOB SIR +4852-28312-0017-2142: BUT EVEN AS HE SLOWLY TURNED THE THOUGHT PIERCED HIS MIND WHY (HAD->*) HE NOT SEEN THE REFLECTION OF THE HEADLIGHTS OF THE CARS MOVING UP AROUND THE (CORNER->CORRIER) OF (WATER->WALL UNDER) STREET (AND UP->NOT) THE HILL TOWARD THE (TRAFFIC->LIFE) SIGNALS +4852-28312-0018-2143: THE ROOM SEEMED OVERLY STILL +4852-28312-0019-2144: THEN IN THAT SECOND HE TURNED AND FACED ABOUT +4852-28312-0020-2145: THE WIDE BOW (WINDOW->WIND THAT) WAS THERE BEFORE HIM THE THREE OBJECTS HE LIKED BEST SHOWING FROSTY IN THE MOONLIGHT THAT POURED IN FROM ACROSS THE WATER +4852-28312-0021-2146: ACROSS THE WATER WHERE (WAS->IS) THE (FREEWAY->FREE WAY) +4852-28312-0022-2147: IT WAS NO LONGER THERE NOR WERE THE HIGH WALLS AND SMOKESTACKS OF FACTORIES TO BE SEEN +4852-28312-0023-2148: THE WAREHOUSES WERE STILL THERE +4852-28312-0024-2149: (FLABBERGASTED->FLAVAGASTED) AND BREATHLESS CHRIS WAS UNAWARE THAT HE HAD MOVED CLOSER TO PEER OUT THE WINDOW IN EVERY DIRECTION +4852-28312-0025-2150: NO ELECTRIC SIGNS NO (LAMPLIT->LAMPLET) STREETS +4852-28312-0026-2151: WHERE THE PEOPLE'S (DRUGSTORE HAD->DRUG STORE IT) STOOD BUT (A->*) HALF (*->AN) HOUR BEFORE ROSE THE ROOFS OF WHAT WAS EVIDENTLY AN INN +4852-28312-0027-2152: A COURTYARD WAS (SPARSELY LIT->FIRSTLY LED) BY A FLARING (TORCH OR TWO->TORTURE TO) SHOWING A SWINGING SIGN HUNG ON (A->THE) POST +4852-28312-0028-2153: THE (POST WAS PLANTED->POSTS BLOODED) AT THE EDGE OF (WHAT->IT) WAS NOW A BROAD AND (MUDDY->MONEY) ROAD +4852-28312-0029-2154: A COACH (WITH ITS TOP->WERE THEN STOPPED) PILED HIGH WITH LUGGAGE STAMPED (TO A->OR) HALT BESIDE THE FLAGGED COURTYARD +4852-28312-0030-2155: THEY MOVED INTO THE INN THE COACH RATTLED OFF TO THE STABLE +4852-28312-0031-2156: MY WINDOW (HAS->AS) A POWER FOR THOSE FEW WHO ARE TO SEE +4852-28319-0000-2070: THE LEARNING OF MAGIC WAS BY NO MEANS EASY +4852-28319-0001-2071: HE (HAD->*) TOLD HIS MASTER AT ONCE (ABOUT->HE GOT) SIMON (GOSLER->GOSPIR) HIS (HORDE->HOARD) OF MONEY (AND->IN) HIS HIDING PLACES FOR IT +4852-28319-0002-2072: CHRIS THEREFORE THREW HIMSELF (INTO->AND) ALL THE (PRELIMINARIES->PROLIMINARIES) OF HIS TASK +4852-28319-0003-2073: ONE AFTERNOON WHEN HE (*->HAD) RETURNED AFTER A REST TO MISTER WICKER'S STUDY HE SAW THAT THERE WAS SOMETHING NEW IN THE ROOM A (BOWL->BULL) WITH A (GOLDFISH->GOLD FISH) IN IT STOOD ON THE TABLE BUT MISTER WICKER WAS NOT TO BE SEEN +4852-28319-0004-2074: WHAT (SHALL->SHOULD) I DO FIRST +4852-28319-0005-2075: HOW YOU (HAVE IMPROVED->OFTEN PROVED) MY BOY (HE->IT) EXCLAIMED (IT->*) IS NOW TIME FOR YOU TO TRY (AND THIS IS AS GOOD->MISSUS GOT) A CHANGE AS ANY +4852-28319-0006-2076: SUPPOSE I CHANGE AND (CAN'T CHANGE->CATCH ITS) BACK +4852-28319-0007-2077: MISTER WICKER WAITED PATIENTLY BESIDE HIM FOR A FEW MOMENTS FOR CHRIS TO GET UP HIS COURAGE +4852-28319-0008-2078: (THEN AS->THAT IS) NOTHING HAPPENED WITH A VOICE LIKE A WHIP MISTER WICKER SAID (START AT->STARTED) ONCE +4852-28319-0009-2079: THE SENSATION SPREAD FASTER AND FASTER +4852-28319-0010-2080: HIS HEAD SWAM AND HE FELT FAINT (AND->IN) A LITTLE SICK BUT HE PERSISTED THROUGH THE FINAL WORDS +4852-28319-0011-2081: HE THOUGHT (NOT->NOW) WITHOUT A FEELING OF PRIDE AND COMMENCED (*->THE) EXPERIMENTING WITH HIS TAIL AND FINS WITH SUCH ENTHUSIASM AND DELIGHT THAT SOME LITTLE TIME ELAPSED BEFORE MISTER WICKER'S VOICE (BOOMED CLOSE->BOOM BUT WAS) BY +4852-28319-0012-2082: SEVENTY FOUR BOOK ONE THE RETURN +4852-28319-0013-2083: THE FIGURE'S SHOES CARVED IN SOME EASTERN STYLE HAD CURVED UP POINTING TOES +4852-28319-0014-2084: THEN ALL AT ONCE THE IDEA CAME TO CHRIS +4852-28319-0015-2085: IF HE WAS TO BE A MAGICIAN COULD HE MAKE THIS BOY COME TO LIFE +4852-28319-0016-2086: (HE->IT) SQUATTED ON HIS HAUNCHES (EXAMINING->EXAMINED) THE CARVED WOODEN FIGURE ATTENTIVELY AND FELT CONVINCED THAT ONCE ALIVE THE BOY WOULD BE AN IDEAL AND HAPPY COMPANION +4852-28319-0017-2087: BUT HOW DID ONE (*->A) CHANGE INANIMATE TO (ANIMATE->ENEMY) +4852-28319-0018-2088: CHRIS GOT UP AND STOLE BACK TO MISTER WICKER'S DOOR +4852-28319-0019-2089: HE HEARD (THE->THAT) MAGICIAN GOING UP THE SPIRAL STAIRCASE TO HIS ROOM ABOVE AND AFTER CHANGING HIMSELF TO A MOUSE TO SLIP UNDER THE DOOR AND SEE THAT THE ROOM WAS (REALLY->REELING) EMPTY (CHRIS RESUMED HIS->MISTER JAMES'S) PROPER SHAPE AND OPENED THE DOORS OF THE CUPBOARD AT THE FAR END OF THE ROOM +4852-28319-0020-2090: THE AFTERNOON (RAINY->RAINING) BEFORE INCREASED IN STORM +4852-28319-0021-2091: (DUSK CAME->THUS GAINED) TWO HOURS BEFORE ITS TIME THUNDER (SNARLED->SNARLS) IN THE SKY +4852-28319-0022-2092: CERTAIN ELEMENTS WERE TO BE MIXED AND POURED AT THE PROPER TIME +4852-28319-0023-2093: MISTER WICKER BEGAN MOVING ABOUT UPSTAIRS THE (FLOORBOARDS->FLOOR BOARDS) CREAKED (*->CREEK) AND STILL CHRIS COULD NOT LEAVE UNTIL THE (POTION->FOCIN) FUMED AND GLOWED +4852-28319-0024-2094: WITH INFINITE CAUTION CHRIS CLOSED THE (DOOR->DOORS) SILENTLY BEHIND HIM AND RUNNING (LIGHTLY FORWARD->LATE BEFOREWARD) REACHED THE FIGURE (OF->AT) THE NEGRO BOY +4852-28319-0025-2095: IT WAS AS IF THE STIFFNESS MELTED +4852-28319-0026-2096: UNDER HIS EYES (THE WOODEN->WIDEN) FOLDS OF CLOTH BECAME RICH SILK EMBROIDERY GLEAMED IN ITS REALITY UPON THE COAT AND OH THE FACE +4852-28319-0027-2097: THE WOODEN GRIN LOOSENED THE LARGE EYES TURNED THE HAND HOLDING THE HARD BOUQUET OF CARVED FLOWERS MOVED (AND LET->*) THE BOUQUET FALL +4852-28330-0000-2044: THEY WENT DOWN TO THEIR QUARTERS FIRST +4852-28330-0001-2045: GUESS MISTER FINNEY WENT TO HIS QUARTERS I DON'T REMEMBER SEEING HIM CROSS THE DECK OR COME OVER THAT WAY AT ALL +4852-28330-0002-2046: NEXT NED CILLEY WAS RELIEVED (AT->TO) THE (HELM->HOME) BY (ELBERT->HILBER) JONES WHO TOOK OVER NED WENT ON DOWN +4852-28330-0003-2047: IT LOOKS TO ME AS IF IT COULD (HAVE->BE) BEEN ONE OF SEVERAL PEOPLE AND I'LL BE SWITCHED IF I KNOW WHO (I'LL KEEP->LOOK GIVE) MY EYES (OPEN->UP AND) +4852-28330-0004-2048: THE MIRABELLE WAS NEARING (TAHITI->TEDI) +4852-28330-0005-2049: (WE'VE->WE) WATER AND FRESH (STORES->STALLS) TO TAKE ON THERE +4852-28330-0006-2050: CHRIS LOST NO TIME AS SOON AS HE COULD DO IT WITHOUT BEING NOTICED (IN->AND) HURRYING DOWN TO HIS CABIN +4852-28330-0007-2051: CERTAINLY MY BOY BOOMED OUT THE CAPTAIN (HIS->AS) BLUE EYES ABRUPTLY KEEN AND PENETRATING +4852-28330-0008-2052: MISTER (FINNEY->FINNELL) WILL BE SOME TIME ON DECK WE CANNOT BE (OVERHEARD->OUR OWN HEARD) IN HERE +4852-28330-0009-2053: HIS FACE (FROZE->ROSE) WITH NERVOUSNESS THAT THIS MIGHT (NOT->DO IT) DO AS AN ANSWER AND HE STOOD STIFF AND STILL BEFORE CAPTAIN BLIZZARD +4852-28330-0010-2054: THE CAPTAIN SAT FORWARD IN HIS CHAIR LOOKING AT HIM FOR A LONG MOMENT CONSIDERING +4852-28330-0011-2055: THEN HE SAID WELL I DO NOT CARE FOR IT I CANNOT SAY (I->THAT) DO +4852-28330-0012-2056: (THIS->THE) SHIP IS MORE TO ME THAN (WIFE OR MOTHER->MY FOREMOTHER) OR FAMILY +4852-28330-0013-2057: (HE->AND) PAUSED FINGERING HIS LOWER LIP AND LOOKING SIDEWAYS (IN A->INTO) REFLECTIVE FASHION AT CHRIS STANDING BEFORE HIM +4852-28330-0014-2058: WE SHALL SAY NO MORE BUT I TRUST YOU UNDERSTAND THE RESPONSIBILITY YOU HAVE +4852-28330-0015-2059: (THIS->THE) SHIP ITS CARGO (AND->IN) ITS MEN WILL BE IN YOUR HANDS +4852-28330-0016-2060: YES SIR I THINK I CAN DO IT SAFELY OR I SHOULD NOT TRY SIR +4852-28330-0017-2061: CAPTAIN BLIZZARD'S ROUND PINK (FACE->FACED) CREASED IN (HIS->ITS) WINNING SMILE +4852-28330-0018-2062: HE THEN WENT ON TO DESCRIBE WHAT ELSE WAS TO FOLLOW THE COVERING OF THE SHIP WITH LEAVES TO MAKE IT BLEND WITH ITS SURROUNDINGS +4852-28330-0019-2063: (CAMOUFLAGE->THE CAMERA FLASH) WAS NOT A WORD THE CAPTAIN OR ANYONE ELSE OF HIS TIME (YET->HE HAD) UNDERSTOOD +4852-28330-0020-2064: WHAT CAN BE SAID DURING THAT TIME SIR CHRIS THOUGHT TO ASK +4852-28330-0021-2065: I AM SOMEWHAT SKILLED (IN->AT) MEDICAMENTS I HAVE TO BE AS (*->A) CAPTAIN OF (A->*) SHIP AND THE CREW KNOW IT +4852-28330-0022-2066: I SHALL SAY THAT YOU ARE IN MY OWN CABIN SO THAT I CAN CARE FOR YOU +4852-28330-0023-2067: NOT SINCE HE HAD LEFT MISTER WICKER (HAD->AND) CHRIS FELT SUCH CONFIDENCE AS HE DID IN THE WORDS AND ACTIONS OF CAPTAIN BLIZZARD +4852-28330-0024-2068: HE KNEW NOW THAT HIS ABSENCE FOR AS LONG AS HE HAD (*->HAD) TO BE AWAY WOULD BE COVERED UP AND SATISFACTORILY ACCOUNTED FOR +4852-28330-0025-2069: THEIR CONVERSATION HAD TAKEN SOME (LITTLE->OF THE) WHILE +533-1066-0000-796: WHEN CHURCHYARDS YAWN +533-1066-0001-797: I KNEW WELL ENOUGH THAT HE MIGHT BE CARRIED (THOUSANDS->THOUSAND) OF MILES IN THE BOX CAR LOCKED IN PERHAPS WITHOUT WATER OR (FOOD->FULL) +533-1066-0002-798: I AM SURE I (KISSED LIDDY->GUESS LIVY) AND (I HAVE->I'VE) HAD (TERRIBLE->SEVERAL) MOMENTS SINCE WHEN I (SEEM->SEEMED) TO REMEMBER KISSING MISTER JAMIESON TOO (IN->WITH) THE EXCITEMENT +533-1066-0003-799: FORTUNATELY WARNER AND THE (DETECTIVES->DETECTS) WERE KEEPING BACHELOR HALL IN (THE->*) LODGE +533-1066-0004-800: OUT OF (DEFERENCE TO LIDDY->THEIR FIRST LIVY) THEY WASHED (THEIR->HER) DISHES ONCE (A->TO) DAY AND THEY (CONCOCTED->CONCLUDED) QUEER (MESSES->MASSES) ACCORDING TO THEIR SEVERAL ABILITIES +533-1066-0005-801: MISS (INNES->EANS) HE SAID STOPPING ME AS I WAS ABOUT TO GO TO MY ROOM UP STAIRS HOW ARE YOUR NERVES (TONIGHT->TO NIGHT) +533-1066-0006-802: I HAVE NONE I SAID HAPPILY +533-1066-0007-803: I MEAN HE PERSISTED DO YOU FEEL AS THOUGH YOU COULD GO THROUGH WITH SOMETHING RATHER UNUSUAL +533-1066-0008-804: THE MOST UNUSUAL THING I CAN THINK OF WOULD BE A PEACEFUL NIGHT +533-1066-0009-805: SOMETHING IS GOING TO OCCUR HE SAID +533-1066-0010-806: PUT ON HEAVY SHOES AND SOME (OLD->ALL) DARK CLOTHES AND MAKE UP YOUR MIND NOT TO BE SURPRISED AT ANYTHING +533-1066-0011-807: (LIDDY->LADY) WAS SLEEPING (THE->*) SLEEP OF THE JUST WHEN I WENT (UP STAIRS->UPSTAIRS) AND I HUNTED OUT MY THINGS CAUTIOUSLY +533-1066-0012-808: (THEY WERE->DO YOU) TALKING (CONFIDENTIALLY->TO FILIALLY) TOGETHER BUT WHEN I CAME DOWN THEY CEASED +533-1066-0013-809: (THERE->THEY) WERE A FEW PREPARATIONS TO BE MADE (THE LOCKS->LOGS) TO BE GONE OVER (WINTERS TO BE INSTRUCTED->WINTERSPIN INSTRUCTIVE) AS TO RENEWED (VIGILANCE->VISIONS) AND THEN AFTER EXTINGUISHING THE (HALL->WHOLE) LIGHT WE CREPT IN THE DARKNESS THROUGH THE FRONT DOOR AND INTO THE NIGHT +533-1066-0014-810: I ASKED NO QUESTIONS +533-1066-0015-811: (ONCE->WAS) ONLY SOMEBODY SPOKE AND THEN IT WAS AN EMPHATIC (BIT->WID) OF PROFANITY FROM DOCTOR STEWART WHEN HE RAN INTO A WIRE FENCE +533-1066-0016-812: I (HARDLY->ARE TO) KNOW WHAT I EXPECTED +533-1066-0017-813: THE DOCTOR WAS PUFFING SOMEWHAT WHEN WE FINALLY CAME TO A HALT +533-1066-0018-814: I CONFESS THAT JUST AT THAT MINUTE EVEN (SUNNYSIDE->SUNNICIDE) SEEMED A CHEERFUL SPOT +533-1066-0019-815: IN SPITE OF MYSELF I DREW MY BREATH IN SHARPLY +533-1066-0020-816: IT WAS ALEX ARMED WITH TWO LONG HANDLED SPADES +533-1066-0021-817: (THE->*) DOCTOR KEPT A (KEEN LOOKOUT->KIN LOOK OUT) BUT NO ONE APPEARED +533-1066-0022-818: THERE'S ONE THING SURE I'LL NOT BE SUSPECTED OF COMPLICITY +533-1066-0023-819: A DOCTOR IS GENERALLY SUPPOSED TO BE (*->A) HANDIER AT (BURYING->BEARING) FOLKS THAN (AT DIGGING->A TIGGING) THEM UP +533-1066-0024-820: I HELD ON TO HIM FRANTICALLY AND SOMEHOW I GOT THERE AND LOOKED DOWN +533-131556-0000-821: BUT HOW AM I TO (GET->HER) OVER THE TEN OR TWELVE DAYS THAT MUST YET ELAPSE BEFORE THEY GO +533-131556-0001-822: FOR NONE COULD (INJURE->ENDURE) ME AS HE HAS DONE OH +533-131556-0002-823: THE (WORD STARES->WORDS TEARS) ME IN THE FACE LIKE A GUILTY CONFESSION BUT IT IS TRUE I HATE HIM I HATE HIM +533-131556-0003-824: I SOMETIMES THINK I OUGHT TO GIVE HIM CREDIT FOR THE GOOD FEELING (HE SIMULATES->SIMILATE) SO WELL AND THEN AGAIN I THINK IT IS MY DUTY TO SUSPECT HIM UNDER THE PECULIAR CIRCUMSTANCES IN WHICH I AM PLACED +533-131556-0004-825: I HAVE DONE WELL TO RECORD (THEM SO MINUTELY->HIM SOMINUTELY) +533-131556-0005-826: THEY (*->HAVE) HAD (BETAKEN->TAKEN) THEMSELVES TO THEIR WORK I LESS (TO->*) DIVERT MY MIND THAN TO DEPRECATE CONVERSATION (HAD->I) PROVIDED MYSELF (WITH->FIT) A BOOK +533-131556-0006-827: I AM TOO WELL ACQUAINTED WITH (YOUR->THEIR) CHARACTER AND CONDUCT TO FEEL ANY REAL FRIENDSHIP FOR YOU AND AS I AM WITHOUT YOUR TALENT FOR DISSIMULATION I CANNOT ASSUME THE APPEARANCE OF IT +533-131556-0007-828: (UPON->UP AND) PERUSING THIS SHE TURNED SCARLET AND BIT HER LIP +533-131556-0008-829: YOU MAY GO (MILICENT->MILLICENT) AND SHE'LL (FOLLOW IN A WHILE MILICENT->FOLLOWING AWHILE MELLICENT) WENT +533-131556-0009-830: (WILL YOU OBLIGE->OLY OBLIGED) ME (HELEN->ALAN) CONTINUED SHE +533-131556-0010-831: (AH->HA) YOU ARE SUSPICIOUS +533-131556-0011-832: IF I WERE SUSPICIOUS I REPLIED I SHOULD HAVE DISCOVERED YOUR (INFAMY LONG->INFAMYLON) BEFORE +533-131556-0012-833: I ENJOY A (MOONLIGHT->MONTH) RAMBLE AS WELL AS YOU I ANSWERED STEADILY FIXING MY EYES (UPON HER->UP) AND (THE SHRUBBERY->EARTH AND SHRABBERY) HAPPENS TO BE ONE OF MY (FAVOURITE->FAVORITE) RESORTS +533-131556-0013-834: SHE COLOURED (AGAIN->BEGAN) EXCESSIVELY AND REMAINED SILENT (PRESSING->RAISING) HER FINGER AGAINST HER (TEETH->CHEEKS) AND GAZING INTO THE FIRE +533-131556-0014-835: I (WATCHED HER->WAS FOR) A FEW MOMENTS (WITH A->TO THE) FEELING OF MALEVOLENT GRATIFICATION THEN MOVING TOWARDS THE DOOR I CALMLY ASKED IF SHE HAD ANYTHING MORE TO SAY +533-131556-0015-836: YES YES +533-131556-0016-837: SUPPOSE I DO +533-131556-0017-838: SHE PAUSED IN EVIDENT DISCONCERTION AND PERPLEXITY MINGLED WITH ANGER SHE DARED NOT SHOW +533-131556-0018-839: I CANNOT RENOUNCE WHAT IS DEARER THAN LIFE SHE MUTTERED IN A LOW HURRIED TONE +533-131556-0019-840: IF YOU ARE (GENEROUS->GENERALS) HERE IS A (FITTING->FEELING) OPPORTUNITY FOR THE EXERCISE OF YOUR MAGNANIMITY IF YOU ARE PROUD (HERE->HEAR) AM I YOUR RIVAL (READY->RETIC) TO (ACKNOWLEDGE->ANNOUNCE) MYSELF YOUR (DEBTOR->DAUGHTER) FOR (AN->*) ACT OF (THE->*) MOST NOBLE FORBEARANCE +533-131556-0020-841: I SHALL NOT TELL HIM +533-131556-0021-842: GIVE ME NO THANKS IT IS NOT FOR YOUR SAKE THAT I REFRAIN +533-131556-0022-843: AND (MILICENT->MILLISON) WILL (YOU->IT) TELL HER +533-131556-0023-844: I (WOULD->WILL) NOT FOR MUCH THAT (SHE->YOU) SHOULD (KNOW THE INFAMY AND->NOT EVEN IN) DISGRACE OF HER RELATION +533-131556-0024-845: YOU USE (HARD->OUR) WORDS MISSUS HUNTINGDON BUT I CAN PARDON YOU +533-131556-0025-846: HOW DARE YOU MENTION HIS NAME TO ME +533-131562-0000-847: IT SEEMS VERY INTERESTING LOVE SAID HE LIFTING HIS HEAD AND (TURNING->SHIRTING) TO (WHERE I STOOD->HER EYES TOO) WRINGING MY (HANDS->HAND) IN SILENT (RAGE->RATES) AND ANGUISH BUT IT'S RATHER LONG (I'LL->I) LOOK AT IT SOME OTHER TIME AND MEANWHILE I'LL TROUBLE YOU FOR YOUR (KEYS->CASE) MY DEAR WHAT (KEYS->CASE) +533-131562-0001-848: (THE KEYS->IT A KISS) OF YOUR CABINET DESK (DRAWERS->DRAWER) AND WHATEVER ELSE YOU POSSESS SAID HE RISING AND HOLDING OUT HIS HAND +533-131562-0002-849: THE KEY OF MY (DESK->VESK) IN FACT WAS AT THAT MOMENT IN (THE LOCK->LOVE) AND THE OTHERS WERE ATTACHED TO IT +533-131562-0003-850: NOW THEN SNEERED HE WE MUST HAVE A CONFISCATION OF PROPERTY +533-131562-0004-851: AND (PUTTING->PUT IN) THE KEYS INTO HIS POCKET HE WALKED INTO THE LIBRARY +533-131562-0005-852: THAT AND ALL REPLIED THE (MASTER->MERCER) AND THE THINGS WERE CLEARED AWAY +533-131562-0006-853: MISTER HUNTINGDON THEN WENT (UP STAIRS->UPSTAIRS) +533-131562-0007-854: MUTTERED HE STARTING BACK SHE'S (THE->*) VERY DEVIL FOR SPITE +533-131562-0008-855: I (DIDN'T->THEN) SAY (I'D->I'VE) BROKEN IT DID I RETURNED HE +533-131562-0009-856: I SHALL PUT YOU (UPON->UP IN) A SMALL (MONTHLY ALLOWANCE->MOUTHLY ALLOW US) IN FUTURE FOR YOUR OWN PRIVATE EXPENSES AND YOU NEEDN'T TROUBLE YOURSELF ANY MORE ABOUT MY CONCERNS I SHALL LOOK OUT FOR A STEWARD MY DEAR I WON'T EXPOSE YOU TO THE TEMPTATION +533-131562-0010-857: AND AS FOR THE (HOUSEHOLD->HOUSE OF) MATTERS MISSUS (GREAVES->GREEBS) MUST BE VERY PARTICULAR IN KEEPING HER ACCOUNTS WE MUST GO (UPON->UP IN) AN (ENTIRELY->ENCHANTING) NEW PLAN +533-131562-0011-858: WHAT GREAT DISCOVERY HAVE YOU MADE NOW MISTER (HUNTINGDON->HONEYMAN) +533-131562-0012-859: (HAVE I ATTEMPTED->EVER ATTENDED) TO DEFRAUD YOU +533-131562-0013-860: NOT IN MONEY MATTERS EXACTLY IT SEEMS BUT IT'S BEST TO KEEP OUT OF THE WAY OF TEMPTATION +533-131562-0014-861: HERE (BENSON->BESSON) ENTERED (WITH->*) THE CANDLES AND THERE (FOLLOWED A->FELL THE) BRIEF INTERVAL OF SILENCE I SITTING (STILL IN->STEALING) MY CHAIR AND HE STANDING WITH HIS BACK TO THE FIRE SILENTLY TRIUMPHING IN MY DESPAIR +533-131562-0015-862: I KNOW THAT DAY AFTER DAY SUCH FEELINGS (WILL->TO) RETURN (UPON->UP ON) ME +533-131562-0016-863: I (TRY->TRIED) TO LOOK TO HIM AND RAISE MY HEART TO HEAVEN BUT IT WILL (CLEAVE->CLIFF) TO THE DUST +533-131564-0000-768: VAIN HOPE I FEAR +533-131564-0001-769: MISTER AND MISSUS (HATTERSLEY->HAUGHTERSLEY) HAVE BEEN (STAYING AT THE GROVE A FORTNIGHT->SEEING IT TO GROW BEFORE NIGHT) AND AS (MISTER->MISSUS) HARGRAVE IS STILL ABSENT AND THE WEATHER WAS REMARKABLY FINE (I NEVER PASSED->AND REPAST) A DAY WITHOUT SEEING MY TWO FRIENDS (MILICENT->MIELSON) AND ESTHER EITHER THERE OR HERE +533-131564-0002-770: NO UNLESS YOU CAN TELL ME WHEN TO EXPECT HIM HOME +533-131564-0003-771: I CAN'T (YOU DON'T WANT->EVEN ONE WANTS) HIM DO YOU +533-131564-0004-772: IT IS A RESOLUTION YOU (OUGHT TO HAVE FORMED->ARE REFORMED) LONG AGO +533-131564-0005-773: WE ALL HAVE A BIT OF A LIKING FOR HIM AT THE BOTTOM OF OUR (HEARTS->HEART) THOUGH WE CAN'T RESPECT HIM +533-131564-0006-774: NO I'D RATHER BE LIKE MYSELF (BAD AS->WHETHER) I AM +533-131564-0007-775: NEVER MIND MY PLAIN SPEAKING SAID I IT IS FROM THE BEST OF MOTIVES +533-131564-0008-776: BUT TELL ME SHOULD YOU WISH (*->TO) YOUR SONS TO BE LIKE MISTER HUNTINGDON OR EVEN LIKE YOURSELF +533-131564-0009-777: OH NO I COULDN'T STAND THAT +533-131564-0010-778: (FIRE AND->FOREIGN) FURY +533-131564-0011-779: NOW DON'T (BURST->FORCE) INTO A TEMPEST AT THAT +533-131564-0012-780: BUT HANG IT THAT'S NOT MY FAULT +533-131564-0013-781: NOT (YEARS FOR SHE'S->EARS FORCES) ONLY FIVE AND TWENTY +533-131564-0014-782: WHAT (WOULD->DID) YOU MAKE OF ME AND THE CHILDREN TO BE SURE THAT (WORRY HER TO->WERE HE HURT) DEATH BETWEEN THEM +533-131564-0015-783: I KNOW THEY ARE BLESS THEM +533-131564-0016-784: (HE FOLLOWED->IF ALL OF) ME INTO THE LIBRARY +533-131564-0017-785: I (SOUGHT->SET) OUT AND PUT INTO HIS HANDS TWO OF (MILICENT'S->MILLSON'S) LETTERS ONE (DATED->DID IT) FROM LONDON AND WRITTEN DURING ONE OF HIS (WILDEST->WALLACE) SEASONS OF RECKLESS DISSIPATION THE OTHER IN THE COUNTRY DURING A LUCID INTERVAL +533-131564-0018-786: THE FORMER WAS FULL OF TROUBLE AND ANGUISH NOT ACCUSING HIM BUT DEEPLY REGRETTING HIS CONNECTION WITH HIS PROFLIGATE COMPANIONS ABUSING MISTER GRIMSBY AND OTHERS INSINUATING BITTER THINGS AGAINST MISTER (HUNTINGDON->HUNTON) AND MOST (INGENIOUSLY->INGENUOUSLY) THROWING THE BLAME OF HER HUSBAND'S MISCONDUCT ON (TO->THE) OTHER (MEN'S->MAN'S) SHOULDERS +533-131564-0019-787: I'VE BEEN A CURSED RASCAL GOD KNOWS SAID HE AS HE GAVE IT (A HEARTY->AN EARTHLY) SQUEEZE BUT YOU SEE IF I DON'T MAKE AMENDS FOR IT (D N->THEN) ME IF I DON'T +533-131564-0020-788: IF YOU INTEND TO REFORM INVOKE GOD'S BLESSING (HIS->IS) MERCY (AND HIS AID->IN THIS APE) NOT (HIS CURSE->DISCOURSE) +533-131564-0021-789: GOD HELP ME THEN FOR (I'M->I AM) SURE I NEED IT +533-131564-0022-790: WHERE'S (MILICENT->MILLISON) +533-131564-0023-791: NAY NOT I SAID HE TURNING (HER->*) ROUND AND PUSHING (HER->*) TOWARDS ME +533-131564-0024-792: (MILICENT FLEW->MILLISON FLUD) TO THANK ME (OVERFLOWING WITH->OVERWHELMING ITS) GRATITUDE +533-131564-0025-793: CRIED SHE I COULDN'T HAVE (INFLUENCED->EVILISED) HIM I'M SURE BY ANYTHING THAT I COULD HAVE SAID +533-131564-0026-794: YOU NEVER TRIED ME (MILLY->MERELY) SAID HE +533-131564-0027-795: AFTER THAT THEY WILL REPAIR TO THEIR COUNTRY HOME +5442-32873-0000-1365: CAPTAIN LAKE DID NOT LOOK AT (ALL->ON) LIKE A LONDON DANDY NOW +5442-32873-0001-1366: THERE WAS A VERY NATURAL SAVAGERY AND DEJECTION (THERE->THEN) AND A WILD (LEER IN HIS->URINA'S) YELLOW EYES RACHEL SAT DOWN +5442-32873-0002-1367: A SLAVE ONLY THINK A SLAVE +5442-32873-0003-1368: OH FRIGHTFUL FRIGHTFUL IS IT A DREAM +5442-32873-0004-1369: (OH->ALL) FRIGHTFUL (FRIGHTFUL->CRIED FAWN) +5442-32873-0005-1370: STANLEY STANLEY IT WOULD BE MERCY TO KILL ME SHE BROKE (OUT->HER) AGAIN +5442-32873-0006-1371: BRIGHT AND NATTY (WERE THE CHINTZ->WITH A CHIN) CURTAINS AND THE LITTLE TOILET SET OUT NOT (INELEGANTLY->IN ELEGANTLY) AND HER (PET->BED) PIPING GOLDFINCH ASLEEP ON HIS PERCH WITH HIS BIT OF SUGAR BETWEEN THE (WIRES->WISE) OF HIS CAGE HER PILLOW SO WHITE AND UNPRESSED WITH ITS LITTLE EDGING OF LACE +5442-32873-0007-1372: WHEN HE CAME BACK TO THE DRAWING ROOM (A TOILET BOTTLE->I TOLD IT WHAT HE) OF (EAU DE COLOGNE->O'ER THE CLOON) IN HIS HAND WITH HER LACE HANDKERCHIEF HE BATHED HER (TEMPLES->TEMPLE) AND FOREHEAD +5442-32873-0008-1373: THERE WAS NOTHING VERY BROTHERLY IN HIS LOOK AS HE PEERED INTO (HER->A) PALE SHARP FEATURES DURING THE PROCESS +5442-32873-0009-1374: THERE DON'T MIND ME SHE SAID SHARPLY AND GETTING UP SHE LOOKED DOWN AT HER DRESS AND THIN SHOES AND SEEMING TO RECOLLECT HERSELF SHE TOOK THE CANDLE HE HAD JUST (SET->SAT) DOWN AND (WENT->WHEN) SWIFTLY TO HER ROOM +5442-32873-0010-1375: AND SHE THREW BACK HER (VEIL->VEAL) AND GOING HURRIEDLY TO THE TOILET MECHANICALLY SURVEYED HERSELF (IN->FROM) THE (GLASS->GLANCE) +5442-32873-0011-1376: (RACHEL LAKE RACHEL->ORIGINALLY LATER) LAKE WHAT ARE YOU NOW +5442-32873-0012-1377: I'LL STAY HERE THAT IS IN THE DRAWING ROOM SHE ANSWERED AND THE FACE WAS WITHDRAWN +5442-32873-0013-1378: (HE SLACKENED->HIS CLACKENED) HIS (PACE->FACE) AND (TAPPED->TAP) SHARPLY AT THE LITTLE WINDOW OF (THAT->THE) MODEST POST OFFICE AT WHICH THE YOUNG LADIES IN THE PONY CARRIAGE HAD PULLED UP THE DAY BEFORE AND WITHIN WHICH LUKE (WAGGOT->RAGGED) WAS WONT TO SLEEP IN A SORT OF WOODEN BOX THAT FOLDED UP AND APPEARED TO BE A CHEST OF DRAWERS ALL DAY +5442-32873-0014-1379: (LUKE TOOK->LOOK TO) CARE OF MISTER (LARKIN'S->LARKINS) DOGS AND GROOMED MISTER (WYLDER'S->WILDER'S) HORSE AND CLEANED UP HIS (DOG->DOOR) CART FOR MARK BEING CLOSE ABOUT MONEY AND FINDING THAT THE THING WAS TO BE DONE MORE CHEAPLY THAT WAY PUT UP HIS HORSE AND DOG CART IN THE POST (OFFICE->OF HIS) PREMISES AND SO EVADED THE LIVERY CHARGES OF THE BRANDON ARMS +5442-32873-0015-1380: (BUT->THE) LUKE WAS (NOT->KNOWN) THERE AND CAPTAIN LAKE RECOLLECTING HIS HABITS AND HIS HAUNT HURRIED ON TO THE SILVER LION WHICH HAS ITS (GABLE->CABLE) TOWARDS (THE->A) COMMON ONLY ABOUT A HUNDRED STEPS AWAY FOR DISTANCES ARE NOT GREAT IN GYLINGDEN +5442-32873-0016-1381: HERE WERE THE FLOW OF SOUL AND OF STOUT LONG PIPES LONG YARNS AND TOLERABLY LONG CREDITS AND THE HUMBLE (SCAPEGRACES->SKIPPED BRACES) OF THE TOWN RESORTED THITHER FOR THE PLEASURES OF A CLUB LIFE AND OFTEN REVELLED DEEP INTO THE SMALL HOURS OF THE MORNING +5442-32873-0017-1382: LOSE NO TIME (AND->WHEN) I'LL GIVE YOU HALF A CROWN +5442-32873-0018-1383: LUKE STUCK ON HIS GREASY (WIDEAWAKE->WIDE AWAKE) AND IN A FEW MINUTES MORE THE (DOG->DOOR) CART WAS (TRUNDLED->TUMBLED) OUT INTO THE LANE AND THE HORSE HARNESSED WENT BETWEEN THE SHAFTS WITH THAT WONDERFUL CHEERFULNESS WITH WHICH THEY (BEAR->BEARED) TO BE CALLED UP (UNDER->AND THE) STARTLING CIRCUMSTANCES (AT->THAT) UNSEASONABLE HOURS +5442-32873-0019-1384: IF I THOUGHT YOU'D (FAIL->FILL) ME NOW (TAMAR->TO MORROW) I SHOULD NEVER COME BACK GOOD NIGHT (TAMAR->TO MORROW) +5442-41168-0000-1385: THE ACT SAID THAT IN CASE OF DIFFERENCE OF OPINION THERE MUST BE A BALLOT +5442-41168-0001-1386: HE WENT UP TO THE TABLE AND STRIKING IT WITH HIS (FINGER RING->FINGERING) HE SHOUTED LOUDLY A BALLOT +5442-41168-0002-1387: HE WAS SHOUTING FOR THE VERY (COURSE SERGEY->COARSE SURGY) IVANOVITCH HAD PROPOSED BUT IT WAS EVIDENT THAT HE HATED HIM AND ALL HIS PARTY AND THIS FEELING OF HATRED SPREAD THROUGH THE WHOLE PARTY AND ROUSED IN (OPPOSITION->OUR POSITION) TO IT THE SAME VINDICTIVENESS THOUGH IN A MORE SEEMLY FORM ON THE OTHER SIDE +5442-41168-0003-1388: SHOUTS WERE RAISED AND FOR A MOMENT ALL WAS CONFUSION SO THAT THE MARSHAL OF THE PROVINCE HAD TO CALL FOR (ORDER->OTTO) A BALLOT +5442-41168-0004-1389: WE (SHED->SHUT) OUR BLOOD FOR OUR COUNTRY +5442-41168-0005-1390: THE CONFIDENCE OF THE MONARCH (*->BUT) NO (CHECKING->COOKING) THE ACCOUNTS OF THE (MARSHAL HE'S->MARTIAN IS) NOT A CASHIER BUT THAT'S NOT THE POINT +5442-41168-0006-1391: VOTES PLEASE (BEASTLY->PEASY) +5442-41168-0007-1392: THEY EXPRESSED THE MOST IMPLACABLE HATRED +5442-41168-0008-1393: LEVIN DID NOT IN THE LEAST UNDERSTAND WHAT WAS THE MATTER AND HE (MARVELED->MARVELLED) AT THE PASSION WITH WHICH IT WAS DISPUTED WHETHER OR NOT THE DECISION ABOUT (FLEROV->FLIROFF) SHOULD BE PUT TO THE VOTE +5442-41168-0009-1394: HE FORGOT AS (SERGEY IVANOVITCH->SO GEVINOVITCH) EXPLAINED TO HIM AFTERWARDS THIS (SYLLOGISM->SILLIGIOUS EM) THAT IT WAS NECESSARY FOR THE PUBLIC GOOD TO GET RID OF THE MARSHAL OF THE PROVINCE THAT TO GET (RID OF->HER TO) THE MARSHAL IT WAS NECESSARY TO HAVE A MAJORITY OF VOTES THAT TO GET A MAJORITY OF VOTES IT WAS NECESSARY TO SECURE (FLEROV'S->FLIROV'S) RIGHT TO VOTE THAT TO (SECURE->SECURED) THE RECOGNITION OF (FLEROV'S->FLIROV'S) RIGHT TO VOTE THEY MUST DECIDE ON THE INTERPRETATION TO BE PUT ON THE ACT +5442-41168-0010-1395: BUT LEVIN FORGOT ALL THAT AND IT WAS PAINFUL TO HIM TO SEE ALL THESE EXCELLENT PERSONS FOR WHOM HE HAD A RESPECT IN SUCH AN UNPLEASANT AND VICIOUS STATE OF EXCITEMENT +5442-41168-0011-1396: TO (ESCAPE->US GIVE) FROM THIS PAINFUL FEELING HE WENT AWAY INTO THE OTHER ROOM WHERE THERE WAS NOBODY EXCEPT THE WAITERS AT THE (REFRESHMENT->FRESHMENT) BAR +5442-41168-0012-1397: HE PARTICULARLY LIKED THE WAY ONE (GRAY WHISKERED->GREY WAS GOOD) WAITER WHO SHOWED (HIS SCORN->US GONE) FOR THE OTHER YOUNGER ONES AND WAS (JEERED->JOURED) AT BY THEM WAS TEACHING THEM HOW TO FOLD UP NAPKINS PROPERLY +5442-41168-0013-1398: LEVIN ADVANCED BUT UTTERLY FORGETTING WHAT HE WAS TO DO AND MUCH EMBARRASSED HE TURNED TO SERGEY IVANOVITCH WITH THE QUESTION WHERE AM I TO PUT IT +5442-41168-0014-1399: (SERGEY->SO AS YE) IVANOVITCH (FROWNED->GROUND) +5442-41168-0015-1400: THAT IS A MATTER FOR EACH MAN'S OWN DECISION HE SAID SEVERELY +5442-41168-0016-1401: HAVING PUT IT IN HE RECOLLECTED THAT HE OUGHT (TO->*) HAVE THRUST HIS LEFT HAND TOO AND SO HE THRUST IT (IN->*) THOUGH TOO LATE AND STILL MORE OVERCOME WITH CONFUSION HE BEAT A HASTY RETREAT INTO THE BACKGROUND +5442-41168-0017-1402: A HUNDRED AND TWENTY SIX FOR ADMISSION NINETY EIGHT AGAINST +5442-41168-0018-1403: SANG (OUT->ALL) THE VOICE OF THE SECRETARY WHO COULD NOT PRONOUNCE (THE->A) LETTER R +5442-41168-0019-1404: THEN THERE WAS A LAUGH (A BUTTON->AT BOTTOM) AND TWO (NUTS->KNOTS) WERE FOUND IN THE BOX +5442-41168-0020-1405: BUT THE OLD PARTY DID NOT CONSIDER THEMSELVES CONQUERED +5442-41168-0021-1406: IN (REPLY SNETKOV->THE PLACE NED GOFF) SPOKE OF THE TRUST (THE->AND) NOBLEMEN OF THE PROVINCE HAD PLACED IN HIM THE (AFFECTION->AFFECTANT) THEY HAD SHOWN HIM WHICH HE DID NOT DESERVE AS HIS ONLY MERIT HAD BEEN HIS ATTACHMENT TO THE NOBILITY TO WHOM HE HAD DEVOTED TWELVE YEARS OF SERVICE +5442-41168-0022-1407: THIS EXPRESSION IN THE MARSHAL'S FACE WAS PARTICULARLY TOUCHING TO LEVIN BECAUSE ONLY THE DAY BEFORE HE HAD BEEN AT HIS HOUSE ABOUT HIS (TRUSTEE->TRUSTY) BUSINESS AND HAD SEEN HIM IN ALL HIS GRANDEUR A KIND HEARTED FATHERLY MAN +5442-41168-0023-1408: IF THERE ARE MEN YOUNGER AND MORE DESERVING THAN I LET (THEM SERVE->THEMSELVE) +5442-41168-0024-1409: AND THE MARSHAL DISAPPEARED THROUGH A SIDE DOOR +5442-41168-0025-1410: (THEY->THERE) WERE TO PROCEED IMMEDIATELY TO THE ELECTION +5442-41168-0026-1411: (TWO->DO) NOBLE GENTLEMEN WHO HAD A WEAKNESS FOR STRONG DRINK HAD BEEN MADE DRUNK BY THE PARTISANS OF (SNETKOV->SNATCOVE) AND (A->THE) THIRD HAD BEEN ROBBED OF HIS UNIFORM +5442-41168-0027-1412: ON LEARNING THIS THE NEW PARTY HAD MADE HASTE DURING THE (DISPUTE ABOUT FLEROV->DISPUTABLE FLIROFF) TO SEND SOME OF THEIR MEN IN A SLEDGE TO CLOTHE THE STRIPPED (GENTLEMAN->GENTLEMEN) AND TO BRING ALONG ONE OF THE INTOXICATED TO THE MEETING +5442-41169-0000-1413: LEVIN DID NOT CARE TO EAT AND HE WAS NOT SMOKING HE DID NOT WANT TO JOIN HIS OWN FRIENDS THAT IS (SERGEY->SOJI) IVANOVITCH STEPAN ARKADYEVITCH SVIAZHSKY AND THE REST BECAUSE VRONSKY IN (HIS EQUERRY'S->AN EQUITY'S) UNIFORM WAS STANDING WITH THEM IN EAGER CONVERSATION +5442-41169-0001-1414: HE WENT TO THE WINDOW AND SAT DOWN SCANNING THE GROUPS AND LISTENING TO WHAT WAS BEING SAID AROUND HIM +5442-41169-0002-1415: (HE'S->IS) SUCH A (BLACKGUARD->BLANKARD) +5442-41169-0003-1416: I HAVE TOLD HIM SO BUT IT MAKES NO DIFFERENCE ONLY THINK OF IT +5442-41169-0004-1417: THESE PERSONS WERE UNMISTAKABLY SEEKING A PLACE WHERE THEY COULD TALK WITHOUT BEING OVERHEARD +5442-41169-0005-1418: SHALL WE GO ON YOUR EXCELLENCY FINE CHAMPAGNE +5442-41169-0006-1419: (LAST YEAR AT OUR->LOST YOUR OTHER) DISTRICT (MARSHAL NIKOLAY->MARTIAL NIKOLA) IVANOVITCH'S +5442-41169-0007-1420: OH STILL JUST THE SAME ALWAYS AT A LOSS THE LANDOWNER ANSWERED WITH A RESIGNED SMILE BUT WITH AN EXPRESSION OF SERENITY AND CONVICTION THAT SO IT MUST BE +5442-41169-0008-1421: WHY WHAT IS (THERE->THAT) TO UNDERSTAND +5442-41169-0009-1422: (THERE'S->THERE IS) NO MEANING IN IT AT ALL +5442-41169-0010-1423: THEN TOO ONE MUST KEEP UP CONNECTIONS +5442-41169-0011-1424: IT'S A (MORAL->MORTAL) OBLIGATION OF A SORT +5442-41169-0012-1425: AND THEN TO TELL THE TRUTH THERE'S ONE'S OWN (INTERESTS->INTEREST) +5442-41169-0013-1426: (THEY'RE->THEIR) PROPRIETORS OF A SORT BUT (WE'RE->WE ARE) THE LANDOWNERS +5442-41169-0014-1427: THAT IT MAY BE BUT STILL IT OUGHT TO BE TREATED A LITTLE MORE RESPECTFULLY +5442-41169-0015-1428: IF (WE'RE->WE ARE) LAYING OUT A GARDEN PLANNING ONE BEFORE THE HOUSE YOU KNOW AND THERE (YOU'VE->YOU HAVE) A TREE (THAT'S->THAT) STOOD (FOR->IN) CENTURIES IN THE VERY SPOT OLD AND (GNARLED->GNOLD) IT MAY BE AND YET YOU DON'T CUT DOWN THE OLD FELLOW TO MAKE ROOM FOR THE (FLOWERBEDS->FLOWER BEDS) BUT LAY OUT YOUR BEDS SO AS TO TAKE ADVANTAGE OF THE TREE +5442-41169-0016-1429: WELL AND HOW IS YOUR LAND DOING +5442-41169-0017-1430: BUT ONE'S WORK IS THROWN IN FOR NOTHING +5442-41169-0018-1431: OH WELL ONE DOES IT WHAT WOULD YOU HAVE +5442-41169-0019-1432: AND (WHAT'S->ONCE) MORE THE LANDOWNER WENT ON LEANING HIS ELBOWS ON THE WINDOW AND CHATTING ON MY SON I MUST TELL YOU HAS NO TASTE FOR IT +5442-41169-0020-1433: SO (THERE'LL->THERE WILL) BE NO ONE TO KEEP IT UP AND YET ONE DOES IT +5442-41169-0021-1434: WE WALKED ABOUT THE FIELDS (AND->ON) THE GARDEN NO SAID HE (STEPAN VASSILIEVITCH->STEP ON MISS LEVITCH) EVERYTHING'S WELL LOOKED AFTER BUT YOUR (GARDEN'S->GARDENS) NEGLECTED +5442-41169-0022-1435: TO MY THINKING (I'D->I'VE) CUT DOWN (THAT LIME TREE->THE LIMETERY) +5442-41169-0023-1436: HERE (YOU'VE->YOUR) THOUSANDS OF LIMES AND EACH WOULD MAKE (TWO->TOO) GOOD BUNDLES OF (BARK->BALK) +5442-41169-0024-1437: YOU'RE MARRIED (I'VE->I) HEARD SAID THE LANDOWNER +5442-41169-0025-1438: YES IT'S (RATHER->ALL THE) STRANGE HE WENT ON +5442-41169-0026-1439: THE LANDOWNER CHUCKLED UNDER HIS WHITE (MUSTACHES->MOUSTACHES) +5442-41169-0027-1440: WHY DON'T WE (CUT->GOT) DOWN OUR (PARKS->BOX) FOR (TIMBER->TIMBOO) +5442-41169-0028-1441: SAID LEVIN RETURNING TO A THOUGHT THAT HAD STRUCK HIM +5442-41169-0029-1442: THERE'S A CLASS INSTINCT TOO OF WHAT ONE OUGHT AND (OUGHTN'T->OUGHT NOT) TO DO +5442-41169-0030-1443: THERE'S THE PEASANTS TOO I WONDER AT THEM SOMETIMES ANY GOOD PEASANT TRIES TO TAKE ALL THE LAND HE CAN +5442-41169-0031-1444: WITHOUT A RETURN (TOO AT->TO ADD) A SIMPLE (LOSS->LAWS) +5484-24317-0000-571: WHEN HE CAME FROM THE BATH (PROCLUS->PROCKLESS) VISITED HIM AGAIN +5484-24317-0001-572: BUT (HERMON->HERMAN) WAS NOT IN THE MOOD TO SHARE A JOYOUS REVEL AND HE FRANKLY SAID SO ALTHOUGH IMMEDIATELY AFTER HIS RETURN HE HAD ACCEPTED THE INVITATION TO THE FESTIVAL WHICH THE WHOLE FELLOWSHIP OF ARTISTS WOULD GIVE THE FOLLOWING DAY (IN->AND) HONOUR OF THE (SEVENTIETH->SEVENTEENTH) BIRTHDAY OF THE OLD SCULPTOR (EUPHRANOR->EUPHRANER) +5484-24317-0002-573: SHE WOULD APPEAR HERSELF (AT->A) DESSERT AND THE BANQUET MUST THEREFORE BEGIN AT AN UNUSUALLY EARLY HOUR +5484-24317-0003-574: SO THE ARTIST FOUND HIMSELF OBLIGED TO RELINQUISH HIS OPPOSITION +5484-24317-0004-575: THE BANQUET WAS TO BEGIN IN A FEW HOURS YET HE COULD NOT LET THE DAY PASS WITHOUT SEEING DAPHNE AND TELLING HER THE WORDS OF THE ORACLE +5484-24317-0005-576: HE LONGED WITH ARDENT YEARNING FOR THE SOUND OF HER VOICE AND STILL MORE TO UNBURDEN HIS SORELY TROUBLED SOUL TO HER +5484-24317-0006-577: SINCE HIS RETURN FROM THE ORACLE THE FEAR THAT THE (RESCUED->RESCUE) DEMETER MIGHT YET BE THE WORK OF (MYRTILUS->MERTULIST) HAD AGAIN MASTERED HIM +5484-24317-0007-578: THE APPROVAL AS WELL AS (THE DOUBTS->A DOUBT) WHICH (IT AROUSED->HAD ARISED) IN OTHERS STRENGTHENED HIS OPINION ALTHOUGH EVEN NOW HE COULD NOT SUCCEED IN BRINGING IT INTO HARMONY WITH THE FACTS +5484-24317-0008-579: THEN HE WENT DIRECTLY TO THE (NEIGHBOURING->NEIGHBORING) PALACE THE QUEEN MIGHT HAVE APPEARED ALREADY AND IT WOULD NOT DO TO KEEP HER WAITING +5484-24317-0009-580: HITHERTO THE MERCHANT HAD BEEN INDUCED IT IS TRUE TO ADVANCE LARGE SUMS OF MONEY TO THE QUEEN BUT THE LOYAL DEVOTION WHICH HE SHOWED TO HER ROYAL HUSBAND HAD RENDERED (IT->AN) IMPOSSIBLE TO GIVE HIM EVEN A HINT OF THE CONSPIRACY +5484-24317-0010-581: WHEN (HERMON->HERMANN) ENTERED THE RESIDENCE OF THE (GRAMMATEUS->GRAMMATIUS) IN THE PALACE (THE->THEY) GUESTS HAD ALREADY ASSEMBLED +5484-24317-0011-582: (THE PLACE->THEY PLACED) BY (HERMON'S->HARMONT'S) SIDE WHICH (ALTHEA->ALTHIE) HAD CHOSEN FOR HERSELF WOULD THEN BE GIVEN UP TO (ARSINOE->ARSENO) +5484-24317-0012-583: TRUE AN INTERESTING CONVERSATION STILL HAD POWER TO CHARM HIM BUT OFTEN DURING ITS CONTINUANCE THE FULL CONSCIOUSNESS OF HIS MISFORTUNE FORCED ITSELF UPON HIS MIND FOR THE MAJORITY OF THE SUBJECTS DISCUSSED BY THE ARTISTS CAME TO THEM THROUGH THE MEDIUM OF SIGHT AND REFERRED TO NEW CREATIONS OF ARCHITECTURE SCULPTURE AND PAINTING FROM WHOSE ENJOYMENT HIS BLINDNESS DEBARRED HIM +5484-24317-0013-584: A STRANGER OUT OF HIS OWN SPHERE HE (FELT->FELL) CHILLED AMONG THESE CLOSELY UNITED MEN AND WOMEN TO WHOM NO TIE BOUND HIM SAVE THE PRESENCE OF THE SAME HOST +5484-24317-0014-585: (CRATES->CREATES) HAD REALLY BEEN INVITED IN ORDER TO WIN HIM OVER TO THE QUEEN'S CAUSE BUT CHARMING FAIR HAIRED (NICO->NACO) HAD BEEN COMMISSIONED BY THE CONSPIRATORS TO PERSUADE HIM TO SING (ARSINOE'S->ARSENO'S) PRAISES AMONG HIS PROFESSIONAL ASSOCIATES +5484-24317-0015-586: HIS SON HAD BEEN (THIS->THE) ROYAL (DAME'S->JAMES'S) FIRST HUSBAND AND SHE HAD DESERTED HIM TO MARRY (LYSIMACHUS->LISIMACUS) THE AGED KING OF THRACE +5484-24317-0016-587: THE KING'S SISTER THE OBJECT OF HIS LOVE CRIED (HERMON->HARMON) INCREDULOUSLY +5484-24317-0017-588: WE WOMEN ARE (ONLY AS->EARLIEST) OLD AS WE LOOK AND THE (LEECHES AND TIRING WOMEN->LEECH HAS ENTIRE WOMAN) OF THIS BEAUTY OF FORTY PRACTISE ARTS WHICH GIVE HER THE APPEARANCE OF TWENTY FIVE YET PERHAPS THE KING VALUES HER INTELLECT MORE THAN HER PERSON AND THE WISDOM OF A HUNDRED SERPENTS IS CERTAINLY UNITED IN THIS WOMAN'S HEAD +5484-24317-0018-589: THE THREE MOST TRUSTWORTHY ONES (ARE HERE AMYNTAS->I HEAR I MEANTUS) THE (LEECH->LIEGE) CHRYSIPPUS (AND->IN) THE ADMIRABLE (PROCLUS->PROCLISS) +5484-24317-0019-590: LET US HOPE THAT YOU WILL MAKE THIS THREE LEAVED CLOVER THE LUCK PROMISING (FOUR LEAVED->FALL LEAVE TO) ONE +5484-24317-0020-591: YOUR UNCLE TOO HAS OFTEN WITH (PRAISEWORTHY->PRAISE WORTHY) GENEROSITY HELPED (ARSINOE->ALSO) IN MANY AN EMBARRASSMENT +5484-24317-0021-592: HOW LONG HE KEPT YOU WAITING (FOR->FROM) THE FIRST WORD CONCERNING A WORK WHICH JUSTLY TRANSPORTED THE WHOLE CITY WITH DELIGHT +5484-24317-0022-593: WHEN HE DID FINALLY SUMMON YOU HE SAID THINGS WHICH MUST HAVE WOUNDED YOU +5484-24317-0023-594: THAT IS GOING TOO FAR REPLIED (HERMON->HERMANN) +5484-24317-0024-595: HE (WINKED AT->WAITED) HER AND MADE A SIGNIFICANT GESTURE AS HE SPOKE AND THEN INFORMED THE BLIND ARTIST HOW GRACIOUSLY (ARSINOE->ARSENO) HAD REMEMBERED HIM WHEN SHE HEARD OF THE REMEDY BY WHOSE AID MANY A WONDERFUL CURE OF BLIND (EYES->EYE) HAD BEEN MADE IN (RHODES->ROADS) +5484-24317-0025-596: THE ROYAL LADY HAD INQUIRED ABOUT HIM AND HIS SUFFERINGS WITH ALMOST SISTERLY INTEREST AND (ALTHEA->ALTHIA) EAGERLY CONFIRMED THE STATEMENT +5484-24317-0026-597: (HERMON->HERMA) LISTENED TO THE (PAIR IN->PARENT) SILENCE +5484-24317-0027-598: THE (RHODIAN->RADIAN) WAS JUST BEGINNING TO PRAISE (ARSINOE->ARSENO) ALSO AS A SPECIAL FRIEND AND CONNOISSEUR OF THE (SCULPTOR'S->SCULPTURES) ART WHEN CRATES (HERMON'S->HERMANN'S) FELLOW STUDENT ASKED THE BLIND ARTIST IN BEHALF OF HIS BEAUTIFUL COMPANION WHY HIS DEMETER WAS PLACED UPON A PEDESTAL (WHICH->WITCH) TO OTHERS AS WELL AS HIMSELF SEEMED TOO HIGH FOR THE SIZE OF THE STATUE +5484-24317-0028-599: YET WHAT MATTERED IT EVEN IF THESE MISERABLE PEOPLE CONSIDERED THEMSELVES DECEIVED AND POINTED THE FINGER OF SCORN AT HIM +5484-24317-0029-600: A WOMAN WHO YEARNS FOR THE REGARD OF ALL MEN AND MAKES LOVE A TOY EASILY LESSENS THE DEMANDS SHE IMPOSES UPON INDIVIDUALS +5484-24317-0030-601: ONLY EVEN THOUGH LOVE HAS WHOLLY DISAPPEARED SHE STILL CLAIMS CONSIDERATION AND ALTHEA DID NOT WISH TO LOSE (HERMON'S->HARMON'S) REGARD +5484-24317-0031-602: HOW INDIFFERENT YOU LOOK BUT I TELL YOU HER DEEP BLUE EYES FLASHED AS SHE SPOKE THAT SO LONG AS YOU (WERE->WAS) STILL A GENUINE CREATING ARTIST THE CASE WAS DIFFERENT +5484-24317-0032-603: THOUGH SO LOUD A DENIAL IS WRITTEN ON YOUR FACE I PERSIST IN MY CONVICTION AND THAT NO IDLE DELUSION (ENSNARES->AND SNAS) ME I CAN PROVE +5484-24317-0033-604: IT WAS NAY IT COULD HAVE BEEN NOTHING ELSE THAT VERY SPIDER +5484-24318-0000-605: NOT A SOUND IF YOU VALUE YOUR LIVES +5484-24318-0001-606: TO OFFER RESISTANCE WOULD HAVE BEEN MADNESS FOR EVEN HERMON PERCEIVED BY THE LOUD CLANKING OF WEAPONS AROUND THEM (THE->THEY) GREATLY SUPERIOR POWER OF THE ENEMY AND THEY WERE ACTING BY THE ORDERS OF THE KING TO THE PRISON NEAR THE PLACE OF EXECUTION +5484-24318-0002-607: WAS HE TO BE LED TO THE EXECUTIONER'S BLOCK +5484-24318-0003-608: WHAT PLEASURE HAD LIFE TO OFFER HIM THE BLIND MAN WHO WAS ALREADY DEAD TO HIS ART +5484-24318-0004-609: OUGHT HE NOT TO GREET (THIS->HIS) SUDDEN END AS (A BOON->THE BOOM) FROM THE IMMORTALS +5484-24318-0005-610: DID IT NOT SPARE HIM A HUMILIATION AS GREAT AND PAINFUL AS COULD BE IMAGINED +5484-24318-0006-611: WHATEVER MIGHT AWAIT HIM HE DESIRED NO BETTER FATE +5484-24318-0007-612: IF HE HAD PASSED INTO ANNIHILATION HE (HERMON->HERMAN) WISHED TO FOLLOW HIM THITHER AND ANNIHILATION CERTAINLY MEANT REDEMPTION FROM PAIN AND MISERY +5484-24318-0008-613: BUT IF HE WERE DESTINED TO MEET HIS (MYRTILUS->BERTULAS) AND HIS MOTHER IN THE WORLD BEYOND THE GRAVE WHAT HAD HE NOT TO TELL THEM HOW SURE HE WAS (OF->A) FINDING A JOYFUL RECEPTION THERE FROM BOTH +5484-24318-0009-614: THE POWER WHICH DELIVERED HIM OVER TO DEATH JUST AT THAT MOMENT WAS NOT NEMESIS NO IT WAS A KINDLY DEITY +5484-24318-0010-615: YET IT WAS NO ILLUSION THAT DECEIVED HIM +5484-24318-0011-616: AGAIN HE HEARD THE BELOVED VOICE AND THIS TIME IT ADDRESSED NOT ONLY HIM BUT WITH THE UTMOST HASTE THE COMMANDER OF THE SOLDIERS +5484-24318-0012-617: SOMETIMES WITH (*->THE) TOUCHING ENTREATY SOMETIMES WITH IMPERIOUS COMMAND SHE PROTESTED AFTER GIVING HIM HER NAME THAT THIS MATTER COULD BE NOTHING BUT AN UNFORTUNATE MISTAKE +5484-24318-0013-618: LASTLY WITH EARNEST WARMTH SHE BESOUGHT HIM BEFORE TAKING THE PRISONERS AWAY TO PERMIT HER TO SPEAK TO THE COMMANDING GENERAL PHILIPPUS HER FATHER'S GUEST WHO SHE WAS CERTAIN WAS IN THE PALACE +5484-24318-0014-619: CRIED (HERMON->HERMAND) IN GRATEFUL AGITATION BUT SHE WOULD NOT LISTEN TO HIM AND (FOLLOWED->FOLLOW) THE SOLDIER WHOM THE CAPTAIN DETAILED TO GUIDE HER INTO THE PALACE +5484-24318-0015-620: TO MORROW YOU SHALL CONFESS TO ME WHO TREACHEROUSLY DIRECTED YOU TO THIS DANGEROUS PATH +5484-24318-0016-621: DAPHNE AGAIN PLEADED FOR THE LIBERATION OF THE PRISONERS BUT (PHILIPPUS SILENCED HER->PHILIP'S SILENCE CHARRED) WITH (THE->A) GRAVE EXCLAMATION THE ORDER OF THE KING +5484-24318-0017-622: AS SOON AS THE CAPTIVE ARTIST WAS ALONE WITH (THE->A) WOMAN HE LOVED HE CLASPED HER HAND POURING FORTH INCOHERENT WORDS OF THE MOST ARDENT GRATITUDE AND WHEN HE FELT HER WARMLY RETURN THE PRESSURE HE COULD NOT RESTRAIN THE DESIRE TO CLASP HER TO HIS HEART +5484-24318-0018-623: IN SPITE OF HIS DEEP MENTAL DISTRESS HE COULD HAVE SHOUTED ALOUD IN HIS DELIGHT AND GRATITUDE +5484-24318-0019-624: HE MIGHT NOW HAVE BEEN PERMITTED TO (BIND->FIND) FOREVER TO HIS LIFE THE WOMAN WHO HAD JUST RESCUED HIM FROM THE GREATEST DANGER BUT THE CONFESSION HE MUST MAKE TO HIS FELLOW ARTISTS IN THE (PALAESTRA->PELUSTER) THE FOLLOWING MORNING STILL SEALED HIS LIPS YET IN THIS HOUR HE FELT THAT HE WAS UNITED TO HER AND OUGHT NOT TO CONCEAL WHAT AWAITED HIM SO OBEYING A STRONG IMPULSE HE EXCLAIMED YOU KNOW THAT I LOVE YOU +5484-24318-0020-625: I LOVE YOU AND HAVE LOVED YOU ALWAYS +5484-24318-0021-626: (DAPHNE->TAPNEY) EXCLAIMED TENDERLY WHAT MORE IS NEEDED +5484-24318-0022-627: BUT (HERMON->HERMAN) WITH DROOPING HEAD MURMURED TO MORROW I SHALL NO LONGER BE WHAT I AM NOW +5484-24318-0023-628: THEN (DAPHNE->JAPLIN) RAISED HER FACE TO HIS ASKING SO THE DEMETER IS THE WORK OF (MYRTILUS->MERCILESS) +5484-24318-0024-629: WHAT A TERRIBLE ORDEAL AGAIN AWAITS YOU +5484-24318-0025-630: AND I FOOL BLINDED (ALSO->ALL SO) IN MIND COULD BE VEXED WITH YOU FOR IT +5484-24318-0026-631: BRING THIS BEFORE YOUR MIND AND EVERYTHING ELSE THAT YOU MUST ACCEPT WITH IT IF YOU CONSENT (WHEN->WITH) THE TIME ARRIVES TO BECOME MINE CONCEAL (AND PALLIATE->IMPALIATE) NOTHING +5484-24318-0027-632: (SO ARCHIAS->SOROCHIS) INTENDED TO LEAVE THE CITY ON ONE OF HIS OWN SHIPS THAT VERY DAY +5484-24318-0028-633: (HE->SHE) HIMSELF ON THE WAY TO EXPOSE HIMSELF TO THE MALICE AND MOCKERY OF THE WHOLE CITY +5484-24318-0029-634: HIS HEART CONTRACTED PAINFULLY AND HIS SOLICITUDE ABOUT HIS UNCLE'S FATE INCREASED WHEN (PHILIPPUS->PHILIPUS) INFORMED HIM THAT THE CONSPIRATORS HAD BEEN ARRESTED AT THE BANQUET AND HEADED BY (AMYNTAS->A MEANTES) THE (RHODIAN->HERODIAN) CHRYSIPPUS AND (PROCLUS->PROCLIS) HAD PERISHED BY THE EXECUTIONER'S SWORD AT SUNRISE +5484-24318-0030-635: BESIDES HE KNEW THAT THE OBJECT OF HIS LOVE WOULD NOT PART FROM HIM WITHOUT GRANTING HIM ONE LAST WORD +5484-24318-0031-636: ON THE WAY HIS (HEART THROBBED->HARD THROPPED) ALMOST TO BURSTING +5484-24318-0032-637: EVEN (DAPHNE'S->AFTER THESE) IMAGE AND WHAT THREATENED HER FATHER AND HER WITH HIM (RECEDED->WAS SEATED) FAR INTO THE BACKGROUND +5484-24318-0033-638: HE WAS APPEARING BEFORE HIS COMPANIONS ONLY TO GIVE TRUTH ITS JUST DUE +5484-24318-0034-639: THE EGYPTIAN (OBEYED->OBEY) AND HIS MASTER CROSSED THE WIDE SPACE STREWN WITH SAND AND APPROACHED THE STAGE WHICH HAD BEEN ERECTED FOR THE (FESTAL->FEAST HELL) PERFORMANCES EVEN HAD HIS EYES RETAINED THE POWER OF SIGHT HIS BLOOD WAS (COURSING->COARSING) SO (WILDLY->WIDELY) THROUGH HIS VEINS THAT HE MIGHT PERHAPS HAVE BEEN UNABLE TO DISTINGUISH THE STATUES AROUND HIM AND THE THOUSANDS OF SPECTATORS WHO CROWDED CLOSELY TOGETHER RICHLY GARLANDED THEIR (CHEEKS->CHIEFS) GLOWING WITH ENTHUSIASM SURROUNDED THE ARENA (HERMON->HERMANN) +5484-24318-0035-640: SHOUTED HIS FRIEND (SOTELES IN->SORTILESS AND) JOYFUL SURPRISE IN THE MIDST OF (THIS->HIS) PAINFUL WALK (HERMON->HAREMON) +5484-24318-0036-641: EVEN WHILE HE BELIEVED HIMSELF TO BE THE CREATOR OF THE DEMETER HE HAD BEEN SERIOUSLY TROUBLED BY THE PRAISE OF SO MANY CRITICS BECAUSE IT HAD EXPOSED HIM TO THE SUSPICION OF HAVING BECOME FAITHLESS TO HIS ART AND HIS NATURE +5484-24318-0037-642: HONOUR TO (MYRTILUS->MYRTULAS) AND HIS ART BUT HE TRUSTED (THIS NOBLE FESTAL->THE SNOWBLE FEAST ELL) ASSEMBLAGE WOULD PARDON THE UNINTENTIONAL DECEPTION AND AID HIS PRAYER FOR RECOVERY +5764-299665-0000-405: AFTERWARD IT WAS SUPPOSED THAT HE WAS SATISFIED WITH THE BLOOD OF OXEN (LAMBS->LAMPS) AND DOVES AND THAT IN EXCHANGE FOR OR (ON->IN) ACCOUNT OF THESE SACRIFICES (THIS->THESE) GOD GAVE (RAIN->REIGN) SUNSHINE AND HARVEST +5764-299665-0001-406: WHETHER HE WAS THE CREATOR OF YOURSELF AND MYSELF +5764-299665-0002-407: (WHETHER ANY->WEATHER AND A) PRAYER WAS EVER ANSWERED +5764-299665-0003-408: WHY DID HE CREATE THE (INTELLECTUALLY->INTELLECTUAL) INFERIOR +5764-299665-0004-409: WHY DID HE CREATE THE DEFORMED AND HELPLESS WHY DID HE CREATE THE CRIMINAL THE IDIOTIC THE INSANE +5764-299665-0005-410: ARE THE FAILURES (UNDER->AND THE) OBLIGATION TO THEIR CREATOR +5764-299665-0006-411: (IS HE RESPONSIBLE->HIS IRRESPONSIBLE) FOR ALL THE (WARS->WALLS) THAT HAVE BEEN WAGED FOR ALL THE INNOCENT BLOOD THAT HAS BEEN SHED +5764-299665-0007-412: (IS HE->IF YOU) RESPONSIBLE FOR THE CENTURIES OF SLAVERY FOR THE BACKS THAT HAVE BEEN SCARRED WITH (THE->A) LASH FOR THE (BABES->BABE) THAT HAVE BEEN SOLD FROM THE BREASTS OF MOTHERS FOR THE FAMILIES THAT HAVE BEEN SEPARATED AND DESTROYED +5764-299665-0008-413: IS (THIS GOD->THESE GOT) RESPONSIBLE FOR RELIGIOUS PERSECUTION FOR THE INQUISITION FOR THE (THUMB SCREW->TENTH'S CREW) AND RACK AND FOR ALL THE INSTRUMENTS OF TORTURE +5764-299665-0009-414: (DID THIS GOD ALLOW->THESE GOT THE LOAD) THE CRUEL AND VILE TO DESTROY THE BRAVE AND VIRTUOUS +5764-299665-0010-415: DID HE (ALLOW->ALONE) TYRANTS TO SHED (THE->A) BLOOD OF PATRIOTS +5764-299665-0011-416: CAN WE CONCEIVE OF A DEVIL BASE ENOUGH TO PREFER HIS ENEMIES TO HIS FRIENDS +5764-299665-0012-417: HOW CAN WE ACCOUNT FOR THE WILD BEASTS THAT (DEVOUR->THE FOUR) HUMAN BEINGS FOR THE (FANGED->FACT) SERPENTS WHOSE BITE (IS->ITS) DEATH +5764-299665-0013-418: HOW CAN WE ACCOUNT FOR A WORLD (WHERE LIFE FEEDS->WILL LIE FEATS) ON LIFE +5764-299665-0014-419: DID INFINITE WISDOM INTENTIONALLY (PRODUCE->PRODUCED) THE MICROSCOPIC BEASTS THAT FEED UPON THE OPTIC (NERVE->NURSE) THINK OF BLINDING A MAN TO SATISFY THE APPETITE OF A MICROBE +5764-299665-0015-420: FEAR (BUILDS->BIDS) THE ALTAR AND OFFERS THE SACRIFICE +5764-299665-0016-421: FEAR ERECTS THE (CATHEDRAL->CATEURAL) AND BOWS THE HEAD OF MAN IN WORSHIP +5764-299665-0017-422: (LIPS->LITZ) RELIGIOUS AND FEARFUL TREMBLINGLY REPEAT THIS PASSAGE THOUGH HE SLAY ME YET (WILL I->WE LIKE) TRUST HIM +5764-299665-0018-423: CAN WE SAY THAT HE CARED FOR THE CHILDREN OF MEN +5764-299665-0019-424: CAN WE SAY THAT HIS MERCY (ENDURETH FOREVER->AND DURE FOR EVER) +5764-299665-0020-425: (DO WE PROVE->THE REPROVE) HIS GOODNESS BY SHOWING THAT HE HAS OPENED THE EARTH AND SWALLOWED (THOUSANDS->THOUSAND) OF HIS HELPLESS CHILDREN (OR->ALL) THAT (WITH->WE) THE VOLCANOES HE HAS OVERWHELMED THEM WITH RIVERS OF FIRE +5764-299665-0021-426: WAS THERE GOODNESS WAS THERE WISDOM IN THIS +5764-299665-0022-427: (OUGHT THE SUPERIOR RACES TO->ALL DISAPPEAR RAYS TWO) THANK (GOD->GOT) THAT THEY ARE NOT THE INFERIOR +5764-299665-0023-428: MOST PEOPLE CLING TO THE SUPERNATURAL +5764-299665-0024-429: IF THEY GIVE UP ONE GOD THEY IMAGINE ANOTHER +5764-299665-0025-430: WHAT IS THIS POWER +5764-299665-0026-431: MAN ADVANCES AND NECESSARILY ADVANCES (THROUGH->TO) EXPERIENCE +5764-299665-0027-432: A MAN WISHING TO GO TO A CERTAIN PLACE (COMES->COME) TO WHERE THE (ROAD->REAL) DIVIDES +5764-299665-0028-433: HE HAS TRIED THAT ROAD AND KNOWS THAT IT IS THE WRONG ROAD +5764-299665-0029-434: A CHILD (CHARMED->SHONE) BY THE BEAUTY OF THE FLAME (GRASPS->GRASPED) IT WITH (ITS->HIS) DIMPLED HAND +5764-299665-0030-435: THE POWER (*->WITH) THAT (WORKS->WORK) FOR RIGHTEOUSNESS (HAS->HAD) TAUGHT THE CHILD A LESSON +5764-299665-0031-436: IT IS A RESULT +5764-299665-0032-437: IT IS INSISTED BY THESE THEOLOGIANS AND BY MANY OF THE (SO->SOUL) CALLED PHILOSOPHERS THAT THIS MORAL SENSE THIS SENSE OF DUTY OF OBLIGATION WAS IMPORTED AND THAT CONSCIENCE IS AN EXOTIC +5764-299665-0033-438: (WE LIVE->REALLY) TOGETHER IN FAMILIES TRIBES AND NATIONS +5764-299665-0034-439: THEY ARE PRAISED ADMIRED AND RESPECTED +5764-299665-0035-440: THEY ARE REGARDED AS GOOD THAT IS TO SAY AS MORAL +5764-299665-0036-441: THE MEMBERS WHO ADD TO THE MISERY OF THE FAMILY THE TRIBE (OR->OF) THE NATION ARE CONSIDERED BAD MEMBERS +5764-299665-0037-442: THE GREATEST OF HUMAN BEINGS (HAS->HAD) SAID CONSCIENCE IS BORN OF LOVE +5764-299665-0038-443: AS PEOPLE ADVANCE THE REMOTE CONSEQUENCES ARE PERCEIVED +5764-299665-0039-444: THE IMAGINATION IS CULTIVATED +5764-299665-0040-445: A MAN (PUTS->BUT) HIMSELF IN THE PLACE OF ANOTHER +5764-299665-0041-446: THE SENSE OF DUTY BECOMES STRONGER MORE IMPERATIVE +5764-299665-0042-447: MAN JUDGES HIMSELF +5764-299665-0043-448: IN ALL THIS THERE IS NOTHING SUPERNATURAL +5764-299665-0044-449: MAN HAS DECEIVED HIMSELF +5764-299665-0045-450: (HAS CHRISTIANITY->HISTORY STUNNITY) DONE GOOD +5764-299665-0046-451: WHEN THE CHURCH HAD CONTROL WERE MEN MADE BETTER AND HAPPIER +5764-299665-0047-452: WHAT HAS RELIGION DONE FOR HUNGARY (OR->O) AUSTRIA +5764-299665-0048-453: (COULD->GOOD) THESE COUNTRIES HAVE BEEN WORSE WITHOUT RELIGION +5764-299665-0049-454: COULD THEY HAVE BEEN WORSE HAD THEY HAD ANY OTHER RELIGION THAN CHRISTIANITY +5764-299665-0050-455: WHAT DID CHRISTIANITY DO (FOR->FAULT) THEM +5764-299665-0051-456: THEY HATED PLEASURE +5764-299665-0052-457: THEY MUFFLED ALL THE BELLS OF GLADNESS +5764-299665-0053-458: (THE->DURING) RELIGION OF THE PURITAN WAS AN (UNADULTERATED->AN ADULTERATED) CURSE +5764-299665-0054-459: THE PURITAN (BELIEVED->BELIEF) THE BIBLE TO BE THE (WORD->WORLD) OF GOD AND THIS BELIEF HAS ALWAYS MADE THOSE WHO HELD IT CRUEL AND WRETCHED +5764-299665-0055-460: LET ME REFER TO JUST ONE FACT SHOWING THE INFLUENCE OF A BELIEF IN THE BIBLE ON HUMAN BEINGS +5764-299665-0056-461: THE QUEEN RECEIVED THE BIBLE KISSED IT AND PLEDGED HERSELF TO DILIGENTLY READ THEREIN +5764-299665-0057-462: IN OTHER WORDS IT WAS JUST AS FIENDISH JUST AS (INFAMOUS->IN FAMOUS) AS THE CATHOLIC SPIRIT +5764-299665-0058-463: HAS THE (BIBLE->VARIABLE) MADE THE PEOPLE OF (GEORGIA->GEORGE A) KIND AND MERCIFUL +5764-299665-0059-464: (RELIGION HAS->WHO LEGION HAVE) BEEN TRIED AND IN ALL COUNTRIES IN ALL TIMES (HAS->BEST) FAILED +5764-299665-0060-465: RELIGION (HAS->HATH) ALWAYS BEEN THE ENEMY OF SCIENCE OF INVESTIGATION AND THOUGHT +5764-299665-0061-466: (RELIGION HAS->RELIGIONISTS) NEVER MADE (MAN->MEN) FREE +5764-299665-0062-467: (IT HAS->HE JUST) NEVER MADE MAN MORAL TEMPERATE INDUSTRIOUS AND HONEST +5764-299665-0063-468: (ARE CHRISTIANS MORE->AH CHRISTIAN SMALL) TEMPERATE NEARER VIRTUOUS NEARER HONEST THAN SAVAGES +5764-299665-0064-469: CAN WE CURE DISEASE BY SUPPLICATION +5764-299665-0065-470: CAN WE RECEIVE VIRTUE OR (HONOR->HANNER) AS (ALMS->ARMS) +5764-299665-0066-471: RELIGION RESTS ON THE IDEA THAT NATURE HAS A MASTER AND THAT THIS MASTER WILL LISTEN TO PRAYER THAT (THIS->HIS) MASTER PUNISHES AND REWARDS THAT HE LOVES PRAISE AND FLATTERY AND HATES THE BRAVE AND FREE +5764-299665-0067-472: WE MUST HAVE (CORNER->CORN THE) STONES +5764-299665-0068-473: THE STRUCTURE MUST HAVE (A BASEMENT->ABASEMENT) +5764-299665-0069-474: IF WE BUILD WE MUST BEGIN AT THE BOTTOM +5764-299665-0070-475: I HAVE A THEORY AND I HAVE FOUR (CORNER STONES->CORNESTONES) +5764-299665-0071-476: THE FIRST STONE (IS THAT MATTER->EAST AT MATHER) SUBSTANCE CANNOT BE DESTROYED CANNOT BE ANNIHILATED +5764-299665-0072-477: IF THESE (CORNER->SCORN THE) STONES ARE FACTS IT FOLLOWS AS A NECESSITY THAT MATTER AND FORCE ARE FROM (AND->END) TO ETERNITY THAT THEY CAN NEITHER BE INCREASED NOR DIMINISHED +5764-299665-0073-478: IT FOLLOWS THAT NOTHING (HAS->HATH) BEEN OR CAN BE CREATED THAT THERE NEVER HAS BEEN OR CAN BE A CREATOR +5764-299665-0074-479: IT (FOLLOWS->FOLLOWED) THAT THERE COULD NOT HAVE BEEN ANY INTELLIGENCE (ANY->AND A) DESIGN BACK OF MATTER AND FORCE +5764-299665-0075-480: I SAY WHAT I THINK +5764-299665-0076-481: EVERY EVENT HAS PARENTS +5764-299665-0077-482: THAT WHICH (HAS->HATH) NOT HAPPENED COULD NOT +5764-299665-0078-483: IN THE INFINITE (CHAIN THERE IS->CHANGE WRITHS) AND THERE CAN BE NO BROKEN NO MISSING LINK +5764-299665-0079-484: WE NOW KNOW THAT OUR FIRST PARENTS WERE NOT FOREIGNERS +5764-299665-0080-485: WE NOW KNOW IF WE KNOW ANYTHING THAT THE UNIVERSE IS NATURAL AND THAT (MEN->MAN) AND WOMEN HAVE BEEN NATURALLY PRODUCED +5764-299665-0081-486: WE KNOW THE PATHS THAT LIFE HAS (TRAVELED->TRAVELLED) +5764-299665-0082-487: WE KNOW THE FOOTSTEPS OF ADVANCE THEY HAVE BEEN TRACED +5764-299665-0083-488: (FOR->FOUR) THOUSANDS OF YEARS MEN AND WOMEN HAVE BEEN TRYING TO REFORM THE WORLD +5764-299665-0084-489: WHY HAVE (THE->*) REFORMERS (FAILED->FAME) +5764-299665-0085-490: THEY DEPEND ON THE (LORD ON LUCK->LOT UNLUCK) AND CHARITY +5764-299665-0086-491: THEY (LIVE BY->LEAVE THY) FRAUD AND VIOLENCE AND BEQUEATH THEIR VICES TO THEIR CHILDREN +5764-299665-0087-492: FAILURE SEEMS TO BE THE (TRADEMARK->TRADE MARK) OF NATURE WHY +5764-299665-0088-493: NATURE (PRODUCES->PROVED YOUTH IT) WITHOUT PURPOSE SUSTAINS WITHOUT INTENTION AND DESTROYS WITHOUT THOUGHT +5764-299665-0089-494: (MUST THE->MISTER) WORLD (FOREVER REMAIN THE->FOR EVER REMAINED A) VICTIM OF IGNORANT PASSION +5764-299665-0090-495: WHY SHOULD MEN AND WOMEN HAVE CHILDREN THAT THEY CANNOT TAKE CARE OF CHILDREN THAT ARE (BURDENS->BURGLAR) AND CURSES WHY +5764-299665-0091-496: PASSION IS AND ALWAYS HAS BEEN DEAF +5764-299665-0092-497: LAW CAN PUNISH BUT IT CAN NEITHER REFORM CRIMINALS NOR PREVENT CRIME +5764-299665-0093-498: (THIS->THESE) CANNOT BE DONE BY TALK OR EXAMPLE +5764-299665-0094-499: THIS IS THE SOLUTION OF THE WHOLE QUESTION +5764-299665-0095-500: THIS (FREES WOMAN->FREEZE WOMEN) +5764-299665-0096-501: POVERTY AND CRIME WILL BE CHILDLESS +5764-299665-0097-502: IT IS FAR BETTER TO BE FREE TO LEAVE THE (FORTS->FAULTS) AND BARRICADES OF FEAR TO STAND ERECT AND (FACE->FAITH) THE FUTURE (WITH A->WE TO) SMILE +6070-63485-0000-2599: (THEY'RE DONE FOR->THERE DUNFAR) SAID THE SCHOOLMASTER IN A LOW KEY TO THE (CHOUETTE->SWEAT) OUT WITH (YOUR->OUR) VITRIOL AND MIND YOUR EYE +6070-63485-0001-2600: THE TWO MONSTERS TOOK OFF THEIR SHOES AND MOVED STEALTHILY ALONG KEEPING IN THE SHADOWS OF THE HOUSES +6070-63485-0002-2601: BY MEANS OF THIS STRATAGEM THEY FOLLOWED SO CLOSELY THAT ALTHOUGH WITHIN A FEW STEPS OF (SARAH AND->SEREN) TOM THEY DID NOT HEAR THEM +6070-63485-0003-2602: SARAH AND HER BROTHER HAVING AGAIN PASSED BY THE (TAPIS FRANC->TAPPY FROG) ARRIVED CLOSE TO THE DILAPIDATED HOUSE WHICH WAS PARTLY IN RUINS AND ITS (OPENED->OPEN) CELLARS FORMED A KIND OF GULF ALONG WHICH THE STREET RAN IN THAT DIRECTION +6070-63485-0004-2603: IN AN INSTANT THE SCHOOLMASTER WITH A LEAP RESEMBLING IN STRENGTH AND AGILITY THE SPRING OF A TIGER SEIZED (SEYTON->SEATING) WITH ONE HAND BY THE THROAT AND EXCLAIMED YOUR MONEY OR I WILL FLING YOU INTO THIS (HOLE->HALL) +6070-63485-0005-2604: NO SAID THE OLD BRUTE (GRUMBLINGLY->TREMBLINGLY) NO NOT ONE RING WHAT A SHAME +6070-63485-0006-2605: TOM SEYTON DID NOT LOSE HIS PRESENCE OF MIND DURING THIS SCENE RAPIDLY AND UNEXPECTEDLY AS IT HAD OCCURRED +6070-63485-0007-2606: (OH->U) AH TO LAY A TRAP TO CATCH US REPLIED THE THIEF +6070-63485-0008-2607: THEN ADDRESSING THOMAS (SEYTON->SETTON) YOU KNOW THE (PLAIN->PLANE) OF SAINT DENIS +6070-63485-0009-2608: DID YOU SEE IN THE CABARET WE (HAVE->HAD) JUST LEFT FOR I KNOW YOU AGAIN THE MAN WHOM THE CHARCOAL MAN CAME TO SEEK +6070-63485-0010-2609: CRIED THE SCHOOLMASTER A THOUSAND FRANCS AND I'LL KILL HIM +6070-63485-0011-2610: (WRETCH->THATCH) I DO NOT (SEEK->SEE) HIS LIFE REPLIED SARAH TO THE SCHOOLMASTER +6070-63485-0012-2611: LET'S GO AND MEET HIM +6070-63485-0013-2612: OLD BOY IT WILL PAY FOR LOOKING AFTER +6070-63485-0014-2613: WELL MY WIFE SHALL BE THERE SAID THE SCHOOLMASTER YOU WILL TELL HER WHAT YOU WANT AND I SHALL SEE +6070-63485-0015-2614: IN THE (PLAIN->PLANE) OF SAINT (DENIS->DENY) +6070-63485-0016-2615: BETWEEN SAINT (OUEN->LOUIS) AND THE ROAD OF LA (REVOLTE->REVOLT) AT THE END OF THE ROAD AGREED +6070-63485-0017-2616: HE HAD FORGOTTEN THE ADDRESS OF THE SELF STYLED (FAN->PAMP) PAINTER +6070-63485-0018-2617: THE (FIACRE->FIACUS) STARTED +6070-86744-0000-2569: (FRANZ->FRANCE) WHO SEEMED ATTRACTED BY SOME INVISIBLE INFLUENCE (TOWARDS->TO WHICH) THE COUNT IN WHICH TERROR WAS STRANGELY MINGLED FELT AN EXTREME RELUCTANCE TO PERMIT HIS FRIEND TO BE EXPOSED ALONE TO THE SINGULAR FASCINATION THAT THIS MYSTERIOUS PERSONAGE SEEMED TO EXERCISE OVER HIM AND THEREFORE MADE NO OBJECTION TO ALBERT'S REQUEST BUT AT ONCE ACCOMPANIED HIM TO THE DESIRED SPOT AND AFTER A SHORT DELAY THE COUNT JOINED THEM IN THE SALON +6070-86744-0001-2570: MY VERY GOOD FRIEND AND EXCELLENT NEIGHBOR REPLIED THE (COUNT->COUCH) WITH A SMILE YOU REALLY EXAGGERATE MY TRIFLING EXERTIONS +6070-86744-0002-2571: MY FATHER THE COMTE DE MORCERF ALTHOUGH (OF->A) SPANISH ORIGIN POSSESSES CONSIDERABLE INFLUENCE BOTH AT THE COURT OF FRANCE AND MADRID AND I UNHESITATINGLY (PLACE->PLACED) THE BEST SERVICES OF MYSELF AND ALL TO WHOM MY LIFE IS DEAR AT YOUR DISPOSAL +6070-86744-0003-2572: I CAN SCARCELY CREDIT IT +6070-86744-0004-2573: THEN IT IS SETTLED SAID THE COUNT AND I GIVE YOU MY SOLEMN ASSURANCE THAT I ONLY WAITED (AN OPPORTUNITY->IN A PARTICULARITY) LIKE THE PRESENT TO REALIZE PLANS THAT I HAVE LONG MEDITATED +6070-86744-0005-2574: (SHALL->SHOW) WE MAKE A POSITIVE APPOINTMENT FOR A PARTICULAR DAY AND HOUR INQUIRED THE COUNT ONLY LET ME WARN YOU THAT I AM PROVERBIAL FOR MY PUNCTILIOUS EXACTITUDE IN KEEPING MY ENGAGEMENTS DAY FOR DAY HOUR FOR HOUR SAID ALBERT THAT WILL SUIT ME TO A DOT +6070-86744-0006-2575: SO BE IT THEN REPLIED THE COUNT AND EXTENDING HIS HAND TOWARDS (A CALENDAR->THE CALENDER) SUSPENDED NEAR THE CHIMNEY PIECE HE SAID TO DAY IS THE TWENTY FIRST OF FEBRUARY AND DRAWING OUT HIS WATCH ADDED IT IS EXACTLY HALF PAST TEN O'CLOCK NOW PROMISE ME TO REMEMBER THIS AND EXPECT ME THE TWENTY FIRST OF MAY AT THE SAME HOUR IN THE FORENOON +6070-86744-0007-2576: I RESIDE IN MY FATHER'S HOUSE BUT OCCUPY A PAVILION AT THE FARTHER SIDE OF THE (COURT YARD ENTIRELY->COURTYARD AND TIRELESS) SEPARATED FROM THE MAIN BUILDING +6070-86744-0008-2577: NOW THEN SAID THE COUNT RETURNING HIS TABLETS TO HIS POCKET MAKE YOURSELF PERFECTLY EASY THE HAND OF YOUR TIME (PIECE->PEACE) WILL NOT BE MORE ACCURATE IN MARKING THE TIME THAN MYSELF +6070-86744-0009-2578: THAT DEPENDS WHEN (DO YOU->D'YE) LEAVE +6070-86744-0010-2579: FOR FRANCE NO FOR VENICE I SHALL REMAIN IN ITALY FOR ANOTHER YEAR OR TWO +6070-86744-0011-2580: THEN WE SHALL NOT MEET IN PARIS +6070-86744-0012-2581: I FEAR I SHALL NOT HAVE THAT (HONOR->HONOUR) +6070-86744-0013-2582: WELL SINCE WE MUST PART SAID THE COUNT HOLDING OUT A HAND TO EACH OF THE YOUNG MEN ALLOW ME TO WISH YOU BOTH (A->AS) SAFE AND PLEASANT JOURNEY +6070-86744-0014-2583: WHAT IS THE MATTER ASKED ALBERT OF FRANZ WHEN THEY HAD RETURNED TO THEIR OWN APARTMENTS YOU (SEEM->SEE) MORE THAN COMMONLY THOUGHTFUL +6070-86744-0015-2584: I WILL (CONFESS->CONSIST) TO YOU ALBERT REPLIED FRANZ THE COUNT IS A VERY SINGULAR PERSON AND THE APPOINTMENT YOU HAVE MADE TO MEET HIM IN PARIS FILLS ME WITH A THOUSAND APPREHENSIONS +6070-86744-0016-2585: DID YOU EVER MEET HIM PREVIOUSLY TO COMING HITHER +6070-86744-0017-2586: UPON MY (HONOR->HONOUR) THEN LISTEN TO ME +6070-86744-0018-2587: HE DWELT WITH CONSIDERABLE FORCE AND ENERGY ON THE ALMOST MAGICAL HOSPITALITY HE HAD RECEIVED FROM THE COUNT AND THE MAGNIFICENCE OF HIS ENTERTAINMENT IN THE (GROTTO->DRATO) OF THE THOUSAND AND ONE NIGHTS HE RECOUNTED WITH CIRCUMSTANTIAL EXACTITUDE ALL THE PARTICULARS OF THE SUPPER THE HASHISH THE STATUES THE DREAM AND HOW AT HIS AWAKENING THERE REMAINED NO PROOF (OR->OF) TRACE OF ALL THESE EVENTS SAVE THE SMALL YACHT SEEN IN THE DISTANT HORIZON DRIVING UNDER FULL SAIL TOWARD PORTO VECCHIO +6070-86744-0019-2588: THEN HE DETAILED THE CONVERSATION OVERHEARD BY HIM AT THE (COLOSSEUM->COLISEUM) BETWEEN THE COUNT AND VAMPA IN WHICH THE COUNT HAD PROMISED TO OBTAIN THE RELEASE OF THE BANDIT PEPPINO (AN->AND) ENGAGEMENT WHICH AS OUR READERS ARE AWARE HE MOST FAITHFULLY FULFILLED +6070-86744-0020-2589: BUT SAID FRANZ THE (CORSICAN->CORSICIAN) BANDITS THAT WERE AMONG THE CREW OF HIS VESSEL +6070-86744-0021-2590: WHY REALLY THE THING SEEMS TO ME SIMPLE ENOUGH +6070-86744-0022-2591: TALKING OF COUNTRIES REPLIED FRANZ OF WHAT (COUNTRY IS->COUNTRIES) THE COUNT WHAT IS HIS NATIVE (TONGUE->DONG) WHENCE DOES HE DERIVE HIS IMMENSE FORTUNE AND WHAT WERE THOSE EVENTS OF HIS EARLY LIFE A LIFE AS MARVELLOUS AS UNKNOWN THAT HAVE (TINCTURED->TINTED) HIS SUCCEEDING YEARS WITH (SO->SORE) DARK AND (GLOOMY A->BLOOMY AND) MISANTHROPY +6070-86744-0023-2592: CERTAINLY THESE ARE QUESTIONS THAT IN YOUR PLACE I SHOULD LIKE TO HAVE ANSWERED +6070-86744-0024-2593: MY DEAR (FRANZ->FRANCE) REPLIED ALBERT WHEN UPON RECEIPT OF MY LETTER YOU FOUND THE NECESSITY OF ASKING THE COUNT'S ASSISTANCE YOU PROMPTLY WENT TO HIM SAYING MY FRIEND ALBERT DE MORCERF IS IN DANGER (HELP->HELPED) ME TO DELIVER HIM +6070-86744-0025-2594: WHAT ARE HIS MEANS OF EXISTENCE WHAT IS HIS (BIRTHPLACE->BOTH PLEASE) OF WHAT (COUNTRY IS->COUNTRIES) HE A NATIVE +6070-86744-0026-2595: I CONFESS HE ASKED ME NONE NO HE MERELY CAME AND FREED ME FROM THE HANDS OF (SIGNOR->SENOR) VAMPA WHERE I CAN ASSURE YOU IN SPITE OF ALL MY OUTWARD APPEARANCE OF EASE AND UNCONCERN I DID NOT VERY PARTICULARLY CARE TO REMAIN +6070-86744-0027-2596: AND THIS TIME IT MUST BE CONFESSED THAT CONTRARY TO THE USUAL STATE OF AFFAIRS IN DISCUSSIONS BETWEEN THE YOUNG MEN THE EFFECTIVE ARGUMENTS WERE ALL ON ALBERT'S SIDE +6070-86744-0028-2597: WELL SAID FRANZ WITH A SIGH DO AS YOU PLEASE MY DEAR VISCOUNT FOR YOUR ARGUMENTS ARE BEYOND MY POWERS OF REFUTATION +6070-86744-0029-2598: AND NOW MY DEAR FRANZ LET US TALK OF SOMETHING ELSE +6070-86745-0000-2549: THEN SHOULD ANYTHING APPEAR TO MERIT A MORE MINUTE EXAMINATION ALBERT (DE->THE) MORCERF COULD FOLLOW UP HIS RESEARCHES BY MEANS OF A SMALL GATE SIMILAR TO THAT CLOSE TO THE CONCIERGE'S DOOR AND WHICH MERITS (A->OF) PARTICULAR DESCRIPTION +6070-86745-0001-2550: SHRUBS AND CREEPING PLANTS COVERED THE WINDOWS AND HID FROM THE GARDEN AND COURT THESE TWO APARTMENTS THE ONLY ROOMS INTO WHICH AS THEY WERE ON THE GROUND FLOOR THE PRYING EYES OF THE CURIOUS COULD PENETRATE +6070-86745-0002-2551: AT A QUARTER TO TEN (A VALET->THE VALLED) ENTERED HE COMPOSED WITH A LITTLE (GROOM->ROOM) NAMED JOHN AND WHO ONLY SPOKE ENGLISH ALL (ALBERT'S->ALBERTS) ESTABLISHMENT ALTHOUGH THE COOK OF THE HOTEL WAS ALWAYS AT HIS SERVICE AND ON GREAT OCCASIONS THE COUNT'S CHASSEUR ALSO +6070-86745-0003-2552: WAIT THEN DURING THE DAY TELL ROSA THAT WHEN I LEAVE THE OPERA I WILL SUP WITH HER AS SHE WISHES +6070-86745-0004-2553: VERY WELL AT HALF PAST TEN +6070-86745-0005-2554: IS THE COUNTESS UP YET +6070-86745-0006-2555: THE VALET LEFT THE ROOM +6070-86745-0007-2556: GOOD MORNING (LUCIEN->MISS YOUNG) GOOD MORNING SAID ALBERT YOUR PUNCTUALITY REALLY ALARMS ME +6070-86745-0008-2557: YOU WHOM I EXPECTED LAST YOU ARRIVE AT FIVE MINUTES TO TEN WHEN THE TIME FIXED WAS HALF PAST +6070-86745-0009-2558: NO NO MY DEAR FELLOW DO NOT CONFOUND OUR PLANS +6070-86745-0010-2559: YES HE HAS NOT MUCH TO COMPLAIN OF (BOURGES->BOURGE) IS THE CAPITAL OF CHARLES (SEVEN->THE SEVENTH) +6070-86745-0011-2560: IT IS FOR THAT REASON YOU SEE ME SO EARLY +6070-86745-0012-2561: I RETURNED HOME AT DAYBREAK AND STROVE TO SLEEP BUT MY HEAD ACHED AND I GOT UP TO HAVE A RIDE FOR AN HOUR +6070-86745-0013-2562: (PESTE->PESTS) I WILL DO NOTHING OF THE KIND THE MOMENT THEY COME FROM GOVERNMENT YOU WOULD FIND THEM EXECRABLE +6070-86745-0014-2563: BESIDES THAT DOES NOT CONCERN THE HOME BUT THE FINANCIAL DEPARTMENT +6070-86745-0015-2564: ABOUT WHAT ABOUT THE PAPERS +6070-86745-0016-2565: IN THE ENTIRE POLITICAL WORLD OF WHICH YOU ARE ONE OF THE LEADERS +6070-86745-0017-2566: THEY SAY THAT IT IS QUITE FAIR AND THAT SOWING SO MUCH RED YOU OUGHT TO REAP A LITTLE BLUE +6070-86745-0018-2567: COME COME THAT IS NOT BAD SAID (LUCIEN->LUCIAN) +6070-86745-0019-2568: WITH (YOUR TALENTS YOU->THE OTALONS HE) WOULD MAKE YOUR FORTUNE IN THREE OR FOUR YEARS +6128-63240-0000-503: THE GENTLEMAN HAD NOT EVEN NEEDED TO SIT DOWN TO BECOME INTERESTED APPARENTLY HE HAD TAKEN UP THE VOLUME FROM A TABLE AS SOON AS HE CAME IN AND STANDING THERE AFTER A SINGLE GLANCE ROUND THE APARTMENT HAD LOST HIMSELF IN (ITS->HIS) PAGES +6128-63240-0001-504: THAT HAS AN UNFLATTERING SOUND FOR ME SAID THE YOUNG MAN +6128-63240-0002-505: SHE IS WILLING TO RISK THAT +6128-63240-0003-506: JUST AS I AM THE VISITOR INQUIRED PRESENTING HIMSELF WITH RATHER A (WORK A DAY->WORKADAY) ASPECT +6128-63240-0004-507: HE WAS TALL AND LEAN AND DRESSED THROUGHOUT IN BLACK HIS SHIRT COLLAR WAS LOW AND WIDE AND THE TRIANGLE OF LINEN A LITTLE (CRUMPLED->CRAMPLED) EXHIBITED BY THE OPENING OF HIS WAISTCOAT WAS ADORNED BY A PIN CONTAINING A SMALL RED STONE +6128-63240-0005-508: IN SPITE OF THIS DECORATION THE YOUNG MAN LOOKED POOR AS (POOR->FAR) AS A YOUNG MAN COULD LOOK WHO HAD SUCH A FINE HEAD AND SUCH MAGNIFICENT EYES +6128-63240-0006-509: THOSE OF BASIL RANSOM (WERE->WENT) DARK DEEP AND GLOWING HIS HEAD HAD A CHARACTER OF ELEVATION WHICH FAIRLY ADDED TO HIS (STATURE->STATUE) IT WAS A HEAD TO BE SEEN ABOVE THE LEVEL OF A CROWD ON SOME JUDICIAL BENCH OR POLITICAL PLATFORM OR EVEN ON A BRONZE MEDAL +6128-63240-0007-510: THESE THINGS THE EYES ESPECIALLY WITH THEIR SMOULDERING FIRE MIGHT HAVE INDICATED THAT HE WAS TO BE (A->*) GREAT AMERICAN STATESMAN OR ON THE OTHER HAND (THEY->THERE) MIGHT SIMPLY HAVE PROVED THAT HE CAME FROM CAROLINA OR (ALABAMA->ALADAMA) +6128-63240-0008-511: AND YET THE READER WHO LIKES A COMPLETE IMAGE WHO DESIRES TO READ WITH THE SENSES AS WELL AS WITH THE REASON IS ENTREATED NOT TO FORGET THAT HE PROLONGED HIS (CONSONANTS->COUNTENANCE) AND SWALLOWED HIS VOWELS THAT HE WAS GUILTY OF (ELISIONS->ELYGIANCE) AND INTERPOLATIONS WHICH WERE EQUALLY UNEXPECTED AND THAT HIS DISCOURSE WAS PERVADED BY SOMETHING SULTRY AND VAST SOMETHING ALMOST AFRICAN IN ITS RICH BASKING TONE SOMETHING THAT SUGGESTED THE TEEMING EXPANSE OF THE COTTON FIELD +6128-63240-0009-512: AND HE TOOK UP HIS HAT VAGUELY A SOFT BLACK HAT WITH A LOW CROWN AND AN IMMENSE STRAIGHT BRIM +6128-63240-0010-513: WELL SO IT IS (THEY->THERE) ARE ALL WITCHES AND WIZARDS MEDIUMS AND SPIRIT (RAPPERS->WRAPPERS) AND (ROARING->ROWING) RADICALS +6128-63240-0011-514: IF YOU ARE GOING TO DINE WITH HER YOU HAD BETTER KNOW IT OH MURDER +6128-63240-0012-515: HE (LOOKED AT->LIFTED) MISSUS (LUNA->LEWINA) WITH INTELLIGENT INCREDULITY +6128-63240-0013-516: SHE WAS ATTRACTIVE AND IMPERTINENT ESPECIALLY THE LATTER +6128-63240-0014-517: HAVE YOU BEEN IN EUROPE +6128-63240-0015-518: NO I HAVEN'T BEEN ANYWHERE +6128-63240-0016-519: SHE HATES IT SHE WOULD LIKE TO ABOLISH IT +6128-63240-0017-520: THIS LAST REMARK HE MADE (AT A VENTURE->THAT ADVENTURE) FOR HE HAD NATURALLY NOT DEVOTED ANY SUPPOSITION WHATEVER TO MISSUS (LUNA->LENA) +6128-63240-0018-521: ARE YOU VERY AMBITIOUS YOU LOOK AS IF YOU WERE +6128-63240-0019-522: AND MISSUS (LUNA->LENA) ADDED THAT NOW SHE WAS BACK SHE DIDN'T KNOW WHAT SHE SHOULD DO +6128-63240-0020-523: ONE DIDN'T EVEN (KNOW->THERE) WHAT ONE HAD COME BACK FOR +6128-63240-0021-524: BESIDES OLIVE DIDN'T WANT HER IN BOSTON AND DIDN'T GO THROUGH THE FORM OF SAYING SO +6128-63240-0022-525: THAT WAS ONE COMFORT WITH (OLIVE->ALIVE) SHE NEVER WENT THROUGH ANY FORMS +6128-63240-0023-526: SHE STOOD THERE LOOKING CONSCIOUSLY AND RATHER SERIOUSLY (AT->AND) MISTER RANSOM A SMILE OF EXCEEDING FAINTNESS PLAYED ABOUT HER LIPS IT WAS JUST PERCEPTIBLE ENOUGH TO LIGHT UP THE NATIVE GRAVITY OF HER FACE +6128-63240-0024-527: HER VOICE WAS LOW AND AGREEABLE A CULTIVATED VOICE AND SHE EXTENDED A SLENDER WHITE HAND TO HER VISITOR (WHO->HER) REMARKED WITH SOME SOLEMNITY HE FELT A CERTAIN GUILT OF PARTICIPATION IN MISSUS LUNA'S INDISCRETION THAT HE WAS INTENSELY HAPPY TO MAKE HER ACQUAINTANCE +6128-63240-0025-528: HE OBSERVED THAT MISS CHANCELLOR'S HAND WAS AT ONCE (COLD AND->CALLED IN) LIMP SHE MERELY PLACED IT IN HIS WITHOUT EXERTING THE SMALLEST PRESSURE +6128-63240-0026-529: I SHALL BE BACK VERY LATE (WE ARE GOING TO A THEATRE->WILL DON'T YOU THE) PARTY THAT'S WHY WE DINE SO EARLY +6128-63240-0027-530: MISSUS (LUNA'S->LUNE'S) FAMILIARITY EXTENDED EVEN TO HER SISTER SHE REMARKED TO MISS CHANCELLOR THAT SHE LOOKED AS IF SHE WERE GOT UP FOR A SEA VOYAGE +6128-63241-0000-557: POOR (RANSOM->RAMSON) ANNOUNCED THIS (FACT->THAT) TO HIMSELF AS IF HE HAD MADE A GREAT DISCOVERY BUT IN REALITY HE HAD NEVER BEEN SO (BOEOTIAN->BE OCHIAN) AS AT THAT MOMENT +6128-63241-0001-558: THE WOMEN HE HAD HITHERTO KNOWN HAD BEEN MAINLY OF HIS OWN SOFT (CLIME->CLIMB) AND IT WAS NOT OFTEN THEY EXHIBITED THE TENDENCY HE DETECTED AND CURSORILY DEPLORED IN MISSUS LUNA'S SISTER +6128-63241-0002-559: RANSOM WAS PLEASED WITH THE VISION OF THAT REMEDY IT MUST BE REPEATED THAT HE WAS VERY PROVINCIAL +6128-63241-0003-560: HE WAS SORRY FOR HER BUT (HE SAW->HIS SORROW) IN A FLASH THAT NO ONE COULD HELP HER THAT WAS WHAT MADE HER TRAGIC +6128-63241-0004-561: SHE COULD NOT DEFEND HERSELF AGAINST A RICH ADMIRATION A KIND OF TENDERNESS OF ENVY OF ANY ONE WHO HAD BEEN SO HAPPY AS TO HAVE THAT OPPORTUNITY +6128-63241-0005-562: HIS FAMILY WAS RUINED THEY HAD LOST THEIR SLAVES THEIR PROPERTY (THEIR->THE) FRIENDS AND RELATIONS (THEIR->THE) HOME HAD TASTED OF ALL THE CRUELTY OF DEFEAT +6128-63241-0006-563: THE STATE OF MISSISSIPPI (SEEMED->SEEM) TO HIM THE STATE OF DESPAIR SO HE SURRENDERED THE REMNANTS OF HIS PATRIMONY TO HIS MOTHER AND SISTERS AND AT NEARLY THIRTY YEARS OF AGE (ALIGHTED->DELIGHTED) FOR THE FIRST TIME IN NEW YORK IN THE COSTUME OF HIS PROVINCE WITH FIFTY DOLLARS IN HIS POCKET AND A GNAWING HUNGER IN HIS HEART +6128-63241-0007-564: IT WAS IN THE FEMALE LINE AS (BASIL->BALES HAD) RANSOM HAD WRITTEN IN ANSWERING HER LETTER WITH A GOOD DEAL OF FORM AND FLOURISH HE SPOKE AS IF THEY HAD BEEN ROYAL HOUSES +6128-63241-0008-565: IF IT HAD BEEN POSSIBLE TO SEND MISSUS RANSOM MONEY OR EVEN CLOTHES SHE WOULD HAVE LIKED THAT BUT SHE HAD NO MEANS OF ASCERTAINING (HOW->HER) SUCH AN OFFERING WOULD BE TAKEN +6128-63241-0009-566: OLIVE HAD A FEAR OF EVERYTHING BUT HER GREATEST FEAR WAS OF BEING AFRAID +6128-63241-0010-567: SHE HAD ERECTED IT INTO A SORT OF RULE OF CONDUCT THAT WHENEVER SHE SAW A RISK SHE WAS TO TAKE IT AND SHE HAD FREQUENT HUMILIATIONS AT FINDING HERSELF (SAFE->SAVED) AFTER ALL +6128-63241-0011-568: SHE WAS PERFECTLY SAFE AFTER WRITING TO BASIL RANSOM AND INDEED IT WAS DIFFICULT TO SEE WHAT HE COULD HAVE DONE TO HER EXCEPT THANK HER HE WAS ONLY EXCEPTIONALLY (SUPERLATIVE->SUPERNATIVE) FOR HER LETTER AND ASSURE HER THAT HE WOULD COME AND SEE HER THE FIRST TIME HIS BUSINESS HE WAS BEGINNING TO GET A LITTLE SHOULD TAKE HIM TO BOSTON +6128-63241-0012-569: HE WAS TOO SIMPLE TOO MISSISSIPPIAN FOR THAT SHE WAS ALMOST DISAPPOINTED +6128-63241-0013-570: OF ALL THINGS IN THE WORLD CONTENTION WAS MOST SWEET TO HER THOUGH WHY IT IS HARD TO IMAGINE FOR IT ALWAYS COST HER TEARS HEADACHES A DAY OR TWO IN BED (ACUTE EMOTION->ACUTORATION) AND IT WAS VERY POSSIBLE (BASIL->BASER) RANSOM WOULD NOT CARE TO (CONTEND->COMPEND) +6128-63244-0000-531: MISS CHANCELLOR HERSELF HAD THOUGHT SO MUCH ON THE VITAL SUBJECT WOULD NOT SHE MAKE A FEW REMARKS AND GIVE THEM SOME OF HER EXPERIENCES +6128-63244-0001-532: HOW DID THE LADIES (ON->AND) BEACON STREET FEEL ABOUT THE (BALLOT->BULLET) +6128-63244-0002-533: (PERHAPS->THERE) SHE COULD SPEAK FOR THEM MORE THAN FOR SOME OTHERS +6128-63244-0003-534: WITH HER (IMMENSE->MOST) SYMPATHY FOR REFORM SHE FOUND HERSELF SO OFTEN WISHING THAT (REFORMERS->WE FOOLING AS) WERE A LITTLE DIFFERENT +6128-63244-0004-535: (OLIVE->I HAVE) HATED (TO HEAR->DEER) THAT FINE AVENUE TALKED ABOUT AS IF IT WERE SUCH A REMARKABLE PLACE AND TO LIVE THERE (WERE->WHERE) A PROOF OF WORLDLY GLORY +6128-63244-0005-536: ALL SORTS (OF INFERIOR->HAVE CONTRAY YOUR) PEOPLE (LIVED->IF) THERE AND SO BRILLIANT A WOMAN AS MISSUS (FARRINDER->FARRENDER) WHO LIVED AT ROXBURY OUGHT NOT TO MIX THINGS UP +6128-63244-0006-537: SHE KNEW HER PLACE IN THE BOSTON (HIERARCHY->HILLRY KEY) AND IT WAS NOT WHAT MISSUS (FARRINDER->BARRENDERS) SUPPOSED (SO THAT->SELL HIM) THERE WAS A WANT OF PERSPECTIVE IN TALKING TO HER AS IF SHE HAD BEEN (A->I) REPRESENTATIVE OF THE ARISTOCRACY +6128-63244-0007-538: SHE WISHED TO WORK IN ANOTHER FIELD SHE HAD LONG BEEN PREOCCUPIED WITH THE ROMANCE OF THE PEOPLE +6128-63244-0008-539: THIS MIGHT SEEM ONE OF THE MOST ACCESSIBLE OF PLEASURES BUT IN POINT OF FACT SHE HAD NOT FOUND IT SO +6128-63244-0009-540: CHARLIE WAS A YOUNG MAN IN A (WHITE->WIDE) OVERCOAT AND A PAPER COLLAR IT WAS (FOR HIM->BOUHAIR) IN THE LAST (ANALYSIS->OF NICES) THAT (THEY->THE) CARED MUCH THE MOST +6128-63244-0010-541: OLIVE CHANCELLOR WONDERED HOW MISSUS (FARRINDER->KYNDER) WOULD TREAT (THAT->THEIR) BRANCH (OF->AT) THE QUESTION +6128-63244-0011-542: (IF->*) IT (BE->HAD BEEN) NECESSARY WE ARE PREPARED TO TAKE CERTAIN STEPS TO CONCILIATE THE SHRINKING +6128-63244-0012-543: (OUR->I'LL) MOVEMENT IS (FOR ALL->FULL) IT APPEALS TO THE MOST DELICATE LADIES +6128-63244-0013-544: (RAISE->THAT IS) THE STANDARD AMONG THEM AND BRING ME (A->YOUR) THOUSAND NAMES +6128-63244-0014-545: I LOOK AFTER THE DETAILS AS WELL AS THE BIG (CURRENTS->CURRANTS) MISSUS (FARRINDER->FERRINDER) ADDED IN A TONE AS EXPLANATORY AS COULD BE EXPECTED OF SUCH A WOMAN AND WITH A SMILE OF WHICH THE SWEETNESS WAS THRILLING TO HER LISTENER +6128-63244-0015-546: SAID (OLIVE->OLD) CHANCELLOR WITH A FACE WHICH SEEMED TO PLEAD FOR A (REMISSION OF->REMISSIONER'S) RESPONSIBILITY +6128-63244-0016-547: (I WANT->HOW WARNED) TO BE NEAR TO THEM TO HELP THEM +6128-63244-0017-548: IT WAS ONE THING TO CHOOSE (FOR->TO) HERSELF BUT NOW THE GREAT REPRESENTATIVE OF THE (ENFRANCHISEMENT->ENCRONTISEMENT) OF THEIR SEX FROM EVERY FORM OF (BONDAGE->BANDAGE) HAD CHOSEN FOR HER +6128-63244-0018-549: THE UNHAPPINESS OF WOMEN +6128-63244-0019-550: THEY WERE HER SISTERS (THEY->THERE) WERE HER OWN AND THE DAY OF THEIR DELIVERY HAD DAWNED +6128-63244-0020-551: THIS WAS THE ONLY SACRED CAUSE THIS WAS THE GREAT THE (JUST REVOLUTION->DESTRULICIAN) IT (MUST->WAS) TRIUMPH IT (MUST->WAS) SWEEP EVERYTHING BEFORE IT IT MUST EXACT FROM THE OTHER THE BRUTAL BLOOD STAINED RAVENING RACE THE (LAST->LOST) PARTICLE OF (EXPIATION->EXPLANATION) +6128-63244-0021-552: (THEY WOULD BE->THERE HAD BEEN) NAMES OF WOMEN WEAK INSULTED PERSECUTED BUT DEVOTED IN EVERY (PULSE->PART) OF THEIR BEING TO THE CAUSE AND ASKING NO BETTER FATE THAN TO DIE FOR IT +6128-63244-0022-553: IT (WAS->WILL) NOT CLEAR TO THIS INTERESTING GIRL IN WHAT MANNER SUCH A SACRIFICE AS THIS LAST WOULD BE REQUIRED OF HER BUT SHE (SAW THE->SOLD A) MATTER THROUGH A KIND OF SUNRISE MIST OF (EMOTION->THE NATION) WHICH MADE DANGER AS ROSY (AS->IS) SUCCESS +6128-63244-0023-554: WHEN MISS (BIRDSEYE->BIRD'S EYED) APPROACHED IT TRANSFIGURED HER FAMILIAR HER COMICAL SHAPE AND MADE THE POOR LITTLE (HUMANITARY->HUMANITY) HACK (SEEM->SIMPLE) ALREADY A MARTYR +6128-63244-0024-555: (OLIVE->I LEAVE) CHANCELLOR LOOKED AT HER WITH LOVE REMEMBERED THAT SHE HAD NEVER IN HER LONG (UNREWARDED->AND REWARDED) WEARY LIFE HAD A THOUGHT (OR->OF) AN IMPULSE FOR HERSELF +6128-63244-0025-556: SHE HAD BEEN CONSUMED BY THE PASSION OF SYMPATHY IT HAD (CRUMPLED->CRUMBLED) HER INTO AS MANY CREASES AS AN OLD GLAZED DISTENDED GLOVE +6432-63722-0000-2431: (BUT SCUSE->PECUSE) ME (DIDN'T YO FIGGER ON DOIN->THEN YOU'LL FOR GONE DOING) SOME (DETECTIN AN GIVE->DETECTIVE AND GIVEN) UP (FISHIN->FISHIN') +6432-63722-0001-2432: AND SHAG WITH THE FREEDOM OF AN OLD SERVANT STOOD LOOKING AT HIS (MASTER->MASTERY) AS IF NOT QUITE UNDERSTANDING THE NEW TWIST THE AFFAIRS HAD TAKEN +6432-63722-0002-2433: I'M GOING (OFF FISHING->OUR FISHIN) I MAY NOT CATCH ANYTHING (I->AND) MAY NOT WANT TO AFTER I GET THERE +6432-63722-0003-2434: GET READY SHAG YES (SAH COLONEL->I CAN) +6432-63722-0004-2435: AND HAVING PUT HIMSELF IN A FAIR WAY AS HE HOPED TO SOLVE SOME OF THE PROBLEMS CONNECTED WITH THE DARCY CASE COLONEL (ASHLEY->HASHY) WENT DOWN TO POLICE HEADQUARTERS TO LEARN MORE FACTS IN (*->THE) CONNECTION WITH THE MURDER OF THE EAST INDIAN +6432-63722-0005-2436: (PINKUS->PINKIS) AND DONOVAN HAVEN'T THEY (CARROLL YEP->CAROL HE EP) +6432-63722-0006-2437: (CARROLL->GAL) WAS TOO MUCH ENGAGED IN WATCHING THE BLUE SMOKE CURL LAZILY UPWARD FROM HIS CIGAR JUST THEN TO SAY MORE +6432-63722-0007-2438: ARE YOU GOING TO WORK ON THAT CASE COLONEL +6432-63722-0008-2439: BUT HE HADN'T ANY MORE TO DO WITH IT COLONEL THAN THAT CAT +6432-63722-0009-2440: PERHAPS NOT ADMITTED COLONEL ASHLEY +6432-63722-0010-2441: WE'VE GOT OUR MAN AND THAT'S ALL WE WANT +6432-63722-0011-2442: YOU'RE ON THE DARCY CASE THEY TELL ME IN A WAY YES +6432-63722-0012-2443: I'M WORKING IN THE (INTERESTS->INTEREST) OF THE YOUNG MAN +6432-63722-0013-2444: IT'S JUST ONE OF THEM COINCIDENCES LIKE +6432-63722-0014-2445: BUSTED HIS HEAD IN WITH A HEAVY CANDLESTICK ONE OF A PAIR +6432-63722-0015-2446: GAD (EXCLAIMED->EXPLAINED) THE COLONEL +6432-63722-0016-2447: THE VERY PAIR I WAS GOING TO BUY +6432-63722-0017-2448: LOOK HERE COLONEL DO YOU KNOW ANYTHING ABOUT THIS +6432-63722-0018-2449: AND THE DETECTIVE'S PROFESSIONAL INSTINCTS GOT THE UPPER HAND OF HIS FRIENDLINESS NOT THE LEAST IN THE WORLD NOT AS MUCH AS YOU DO WAS THE COOL ANSWER +6432-63722-0019-2450: I HAPPENED TO SEE THOSE CANDLESTICKS IN THE WINDOW OF SINGA PHUT'S SHOP THE OTHER DAY AND I MADE UP MY MIND TO BUY THEM WHEN I HAD A CHANCE +6432-63722-0020-2451: NOW I'M AFRAID I WON'T BUT HOW DID IT HAPPEN +6432-63722-0021-2452: (PHUT->FAT) I DON'T KNOW WHETHER THAT'S HIS FIRST OR HIS LAST NAME ANYHOW HE HAD A PARTNER NAMED (SHERE->SHEAR) ALI +6432-63722-0022-2453: ANYHOW HE (AND PHUT->INFORT) DIDN'T GET ALONG VERY WELL IT SEEMS +6432-63722-0023-2454: (NEIGHBORS->LABORS) OFTEN HEARD (EM SCRAPPIN->HIM SCRAP IN) A LOT AND THIS AFTERNOON THEY WENT AT IT AGAIN (*->AT) HOT AND HEAVY +6432-63722-0024-2455: (TOWARD->TO OUR) DARK A MAN WENT IN TO BUY A LAMP +6432-63722-0025-2456: HE FOUND THE PLACE WITHOUT A LIGHT IN IT STUMBLED OVER SOMETHING ON THE FLOOR AND THERE WAS ALI'S BODY WITH THE HEAD BUSTED IN AND THIS HEAVY CANDLESTICK NEAR IT +6432-63722-0026-2457: SURE HELD SO TIGHT WE COULD HARDLY GET IT OUT +6432-63722-0027-2458: MAYBE THE FIGHT WAS ABOUT WHO (OWNED->ON) THE WATCH FOR THE (DAGOS->DAGGERS) TALKED IN THEIR FOREIGN LINGO AND NONE OF THE (NEIGHBORS->NEIGHBOURS) COULD TELL WHAT THEY WERE (SAYIN->SAYING) I SEE +6432-63722-0028-2459: AND THE WATCH HAVE YOU IT YES IT'S HERE +6432-63722-0029-2460: THAT'S THE WATCH ANNOUNCED THE HEADQUARTERS DETECTIVE REACHING IN FOR IT GOING (YET->IN) SEE +6432-63722-0030-2461: YOU'RE NOT (AS SQUEAMISH->A SCREAMISH) AS ALL THAT ARE YOU JUST BECAUSE IT WAS IN A DEAD MAN'S (HAND->HANDS) AND (IN->*) A WOMAN'S +6432-63722-0031-2462: AND (DONOVAN'S->DOLOMAN'S) VOICE WAS PLAINLY (SKEPTICAL->SCEPTICAL) +6432-63722-0032-2463: YES IT MAY HAVE SOME ROUGH EDGES ON IT +6432-63722-0033-2464: AND I'VE READ ENOUGH ABOUT GERMS TO KNOW THE DANGER I'D ADVISE YOU TO BE CAREFUL +6432-63722-0034-2465: IF YOU DON'T MIND I SHOULD LIKE TO EXAMINE THIS A BIT +6432-63722-0035-2466: BEFORE THE BIG WIND IN IRELAND SUGGESTED THONG WITH A NOD (AT->OF) HIS IRISH COMPATRIOT (SLIGHTLY LAUGHED->SLIGHTLY'LL HAVE) THE COLONEL +6432-63722-0036-2467: THAT'S RIGHT AGREED THE COLONEL AS HE CONTINUED TO MOVE HIS MAGNIFYING GLASS OVER THE SURFACE OF THE STILL TICKING WATCH +6432-63722-0037-2468: (AND A->IN THE) CLOSE OBSERVER MIGHT HAVE OBSERVED THAT HE DID NOT TOUCH HIS BARE FINGERS TO THE TIMEPIECE BUT POKED IT ABOUT AND TOUCHED IT HERE AND THERE WITH THE END OF A (LEADPENCIL->LEAD PENCIL) +6432-63722-0038-2469: AND (DONOVAN->DONALD) TAKE (A->HER) FRIEND'S ADVICE AND DON'T BE TOO FREE WITH THAT WATCH TOO FREE WITH IT +6432-63722-0039-2470: (ASKED->AS) THE (SURPRISED->SURPRISE) DETECTIVE YES +6432-63722-0040-2471: DON'T SCRATCH YOURSELF ON IT WHATEVER YOU DO WHY NOT +6432-63722-0041-2472: SIMPLY BECAUSE THIS WATCH +6432-63722-0042-2473: SOME ONE OUT (HERE->HER) TO SEE YOU +6432-63722-0043-2474: ALL RIGHT BE THERE IN A SECOND +6432-63722-0044-2475: (SINGA PHUT->SHANGHAT) WAS THE PANTING ANSWER +6432-63722-0045-2476: I WANT TO TALK OVER DARCY'S CASE WITH YOU THE COLONEL HAD SAID AND THE TWO HAD TALKED HAD THOUGHT HAD TALKED AGAIN AND NOW WERE SILENT FOR A TIME +6432-63722-0046-2477: WHAT ARE THE CHANCES OF GETTING HIM OFF LEGALLY IF WE GO AT IT FROM A NEGATIVE STANDPOINT ASKED THE COLONEL +6432-63722-0047-2478: RATHER A HYPOTHETICAL QUESTION COLONEL BUT I SHOULD SAY IT MIGHT BE A FIFTY FIFTY PROPOSITION +6432-63722-0048-2479: AT BEST HE WOULD GET OFF WITH A SCOTCH VERDICT OF NOT (PROVEN->PROVING) BUT HE DOESN'T WANT THAT NOR DO I +6432-63722-0049-2480: AND YOU I DON'T WANT IT EITHER +6432-63722-0050-2481: BUT I WANT TO KNOW JUST WHERE WE STAND NOW I KNOW +6432-63722-0051-2482: BUT I NEED TO DO A LITTLE MORE SMOKING OUT FIRST NOW I WANT TO THINK +6432-63722-0052-2483: IF YOU'LL EXCUSE ME I'LL PRETEND I'M FISHING AND I MAY CATCH SOMETHING +6432-63722-0053-2484: IN FACT I HAVE A FEELING THAT (I'LL->I) LAND MY FISH +6432-63722-0054-2485: (I'D->I) RECOMMEND HIM TO YOU INSTEAD OF BLACKSTONE THANKS (LAUGHED->LAP) KENNETH +6432-63722-0055-2486: WHAT IS IT PERHAPS I CAN HELP YOU +6432-63722-0056-2487: THE OLD ADAGE OF TWO HEADS YOU KNOW +6432-63722-0057-2488: YES IT STILL HOLDS GOOD +6432-63722-0058-2489: NO ALIMONY (REPEATED->REPLIED) THE COLONEL PUZZLED YES JUST THAT +6432-63722-0059-2490: AND THERE'S NO REASON YOU SHOULDN'T KNOW +6432-63723-0000-2491: CHUCKLED THE COLONEL AS HE SKILFULLY PLAYED THE LUCKLESS TROUT NOW STRUGGLING TO GET LOOSE FROM THE HOOK +6432-63723-0001-2492: AND WHEN THE FISH WAS LANDED PANTING ON THE GRASS AND SHAG HAD BEEN ROUSED FROM HIS SLUMBER TO SLIP (THE->A) NOW LIMP FISH INTO THE (CREEL->CREO) COLONEL ASHLEY GAVE A SIGH OF RELIEF AND REMARKED I THINK I SEE IT NOW +6432-63723-0002-2493: THE REASON SHE ASKED NO ALIMONY INQUIRED KENNETH +6432-63723-0003-2494: NO I WASN'T THINKING OF THAT +6432-63723-0004-2495: HOWEVER DON'T THINK I'M NOT INTERESTED IN YOUR CASE I'VE (FISHED->FINISHED) ENOUGH FOR TO DAY +6432-63723-0005-2496: WELL I DON'T KNOW THAT YOU CAN +6432-63723-0006-2497: IT (ISN'T->IS IN) GENERALLY KNOWN WENT ON THE LAWYER THAT THE HOTEL KEEPER'S WIFE HAS LEFT HIM +6432-63723-0007-2498: IT WAS ONE OF WHAT AT FIRST MIGHT BE CALLED REFINED CRUELTY ON HER HUSBAND'S PART DEGENERATING GRADUALLY INTO THAT OF (THE->A) BASER SORT +6432-63723-0008-2499: YOU DON'T MEAN THAT (LARCH->LARGE) STRUCK HER THAT THERE WAS PHYSICAL ABUSE DO YOU ASKED THE COLONEL THAT'S WHAT HE DID +6432-63723-0009-2500: THE COLONEL DID NOT DISCLOSE THE FACT THAT IT WAS NO NEWS TO HIM +6432-63723-0010-2501: AARON GRAFTON'S STATEMENT WAS BEING UNEXPECTEDLY CONFIRMED +6432-63723-0011-2502: HE REMEMBERED THAT CYNTHIA AND GRAFTON HAD ONCE BEEN IN LOVE WITH EACH OTHER +6432-63723-0012-2503: SHE SAID HE HAD STRUCK HER MORE THAN ONCE AND SHE COULD STAND IT NO LONGER +6432-63723-0013-2504: BECAUSE (LARCH->LARGE) MADE NO (DEFENSE->DEFENCE) +6432-63723-0014-2505: (LARCH->LARGE) BY REFUSING TO APPEAR PRACTICALLY ADMITTED THE CHARGES AGAINST HIM AND DID NOT OPPOSE THE SEPARATION +6432-63723-0015-2506: SO I HAD TO LET HER HAVE HER WAY AND WE DID NOT ASK THE COURT FOR MONEY THOUGH I HAD NO SUCH SQUEAMISH FEELINGS WHEN IT CAME TO MY (COUNSEL FEE->COUNCIL FEET) +6432-63723-0016-2507: NO BUT HE WILL OR (I'LL->ELSE) SUE (HIM->EM) AND GET JUDGMENT OH HE'LL PAY ALL RIGHT +6432-63723-0017-2508: AND IT TAKES ALL SORTS OF PERSONS TO MAKE IT UP +6432-63723-0018-2509: STILL I WOULD LIKE TO KNOW +6432-63723-0019-2510: THE (MURDER->MURDERER) OF MISSUS DARCY HAD SOME TIME AGO BEEN SHIFTED OFF THE FRONT PAGE THOUGH IT WOULD GET BACK THERE WHEN THE YOUNG (JEWELER->JEWELLER) WAS TRIED +6432-63723-0020-2511: IT HAD A DOUBLE REPUTATION SO TO SPEAK +6432-63723-0021-2512: GRAVE AND EVEN REVEREND (*->THE) CONVENTIONS ASSEMBLED IN ITS (BALLROOM AND->BALL ROOM IN) POLITICIANS OF THE UPPER IF NOT BETTER CLASS WERE FREQUENTLY SEEN IN ITS DINING ROOM OR CAFE +6432-63723-0022-2513: (LARCH->LARGE) HIMSELF WAS A PECULIAR CHARACTER +6432-63723-0023-2514: IN A SMALLER PLACE HE WOULD HAVE BEEN CALLED A SALOON KEEPER +6432-63723-0024-2515: AND IT WAS THIS MAN RICH (IT WAS->OVER) SAID HANDSOME CERTAINLY THAT (CYNTHIA RATCHFORD->CENTIA RETFORD) HAD MARRIED +6432-63723-0025-2516: TO THIS WAS THE ANSWER WHISPERED MONEY +6432-63723-0026-2517: AND IN A WAY IT WAS TRUE +6432-63723-0027-2518: SHE ALSO SAW AN OPPORTUNITY OF PAYING OLD DEBTS AND REAPING SOME REVENGES +6432-63723-0028-2519: AFTER THE MARRIAGE WHICH WAS A BRILLIANT AND GAY ONE IF NOT HAPPY THE (LARCH->LARGE) HOTEL IT COULD HARDLY BE CALLED (A->*) HOME BECAME THE SCENE OF MANY (FESTIVE OCCASIONS->FESTIVATIONS) +6432-63723-0029-2520: THEN IT WAS SAID OF (LARCH->LARGE) THAT SOON AFTER THE ECHOES OF THE WEDDING CHIMES HAD DIED AWAY HE HAD BEGUN TO TREAT HIS WIFE (WITH->FOR THE) REFINED CRUELTY THAT HIDDEN AWAY FROM THE PUBLIC UNDERNEATH HIS HABITUAL MANNER THERE WAS THE RAWNESS OF THE BRUTE +6432-63723-0030-2521: BUT IT WAS NOTICED THAT THE OLDER AND MORE CONSERVATIVE FAMILIES WERE LESS OFTEN REPRESENTED AND WHEN THEY WERE IT WAS BY SOME OF THE YOUNGER MEMBERS WHOSE REPUTATIONS WERE ALREADY (SMIRCHED->SMARCHED) OR WHO HAD NOT YET ACQUIRED ANY AND WERE WILLING TO TAKE A CHANCE +6432-63723-0031-2522: IT WOULDN'T DO YOU KNOW AFTER THAT STORY CAME OUT FOR ME (AND->IN) THE VICE CHANCELLOR WHO SAT IN (THE->A) CASE AS WELL AS OTHER JUDGES AND MEMBERS OF THE BAR TO BE SEEN THERE KENNETH EXPLAINED TO THE COLONEL +6432-63723-0032-2523: MEANWHILE COLONEL ASHLEY WAS A VERY BUSY MAN AND TO NO ONE DID HE TELL VERY MUCH ABOUT HIS ACTIVITIES HE SAW DARCY FREQUENTLY AT THE JAIL AND TO THAT YOUNG MAN'S PLEADINGS THAT SOMETHING (*->TO) BE DONE ALWAYS RETURNED THE ANSWER +6432-63723-0033-2524: (DON'T->DONE) WORRY IT WILL COME OUT ALL RIGHT +6432-63723-0034-2525: I'M GOING (TO RECTIFY->DIRECTIFY) THEM BUT IT WILL TAKE TIME +6432-63723-0035-2526: IT'S HARD FOR MISS MASON TOO ALTHOUGH SHE'S BEARING UP LIKE A MAJOR +6432-63723-0036-2527: SO KING (GOT->GOD) BAIL WHO PUT IT UP +6432-63723-0037-2528: IT WAS (HIGH->TIME) LARCH +6432-63723-0038-2529: THEY TOOK HARRY AWAY A WHILE AGO +6432-63723-0039-2530: BUT HIS ARE PRETTY UNCERTAIN SHOES TO BE IN JUST THE SAME +6432-63723-0040-2531: ONLY THAT I DARCY HESITATED AND GREW RED +6432-63723-0041-2532: GOOD EVENING COLONEL HE CALLED GENIALLY (WILL->WHERE) YOU JOIN ME IN A (WELSH->WELL) RABBIT +6432-63723-0042-2533: THANK YOU NO +6432-63723-0043-2534: I'M AFRAID MY (DIGESTION->DIADE) ISN'T QUITE UP TO THAT AS I'VE HAD TO CUT OUT MY FISHING OF LATE +6432-63723-0044-2535: NOW AS TO CERTAIN MATTERS IN THE STORE ON THE MORNING OF THE MURDER +6432-63723-0045-2536: (THE->THEY) STOPPED CLOCKS FOR INSTANCE HAVE YOU ANY THEORY +6432-63723-0046-2537: THERE WERE THREE OF THEM THE (CENTER->CENTRE) FIGURE BEING THAT OF HARRY KING AND HE WAS VERY MUCH INTOXICATED +6432-63723-0047-2538: THAT IS NOT ALWAYS BUT SOMETIMES IT HAPPENED TO BE SO NOW +6432-63723-0048-2539: I BEG YOUR PARDON HE SAID IN THE CULTURED TONES HE KNEW SO WELL HOW TO USE YET OF WHICH HE MADE SO LITTLE USE OF LATE +6432-63723-0049-2540: I SAID WHERE HAVE YOU BEEN REMARKED THE OTHER WE'VE MISSED YOU +6432-63723-0050-2541: I SAID I WAS GOLFING HE WENT ON EXCEEDINGLY DISTINCTLY THOUGH WITH AN EFFORT +6432-63723-0051-2542: WHY POLONIUS SOME ONE ASKED +6432-63723-0052-2543: BECAUSE DEAR FRIEND REPLIED KING SOFTLY HE SOMEWHAT RESEMBLES A CERTAIN PERSON HERE WHO TALKS TOO MUCH BUT WHO IS NOT SO WISE AS HE THINKS +6432-63723-0053-2544: THERE WAS A RATTLE OF (COINS ON->COIN DOWN) THE MAHOGANY (BAR AS->BARS) KING SOUGHT TO DISENTANGLE A SINGLE BILL FROM THE (WADDED->WATERED) UP CURRENCY IN HIS POCKET +6432-63723-0054-2545: IT'S IT'S AN ODD COIN AN OLD ROMAN ONE THAT MISSUS DARCY HAD IN HER PRIVATE COLLECTION KEPT IN THE JEWELRY STORE SAFE WAS THE WHISPERED ANSWER +6432-63723-0055-2546: I WENT OVER THEM (*->NEAR) THE (OTHER->*) DAY AND NOTICED SOME WERE MISSING THOUGH I SAW THEM ALL WHEN I PAID A VISIT TO HER JUST A SHORT TIME BEFORE SHE WAS KILLED +6432-63723-0056-2547: THAT WAS HERS WENT ON THE (JEWELER->JUROR) +6432-63723-0057-2548: NOW HARRY KING HAS IT EXCLAIMED COLONEL ASHLEY +6938-70848-0000-1216: EVEN THE SUN CAME OUT PALE AND WATERY AT NOON +6938-70848-0001-1217: THE (COLDS->GOLDS) AND RHEUMATISM OF THE (RAINY->REINY) MONTHS VANISHED +6938-70848-0002-1218: (ASKED A->AS TO) WORKER LAST SUNDAY YOU DID IT WHEN THE YUNKERS +6938-70848-0003-1219: WELL DIDN'T (THEY SHOOT->ISSUED) US ONE MAN EXHIBITED HIS ARM IN A SLING +6938-70848-0004-1220: HAVEN'T I (GOT->GUARD) SOMETHING TO REMEMBER THEM BY THE DEVILS +6938-70848-0005-1221: WHO ARE YOU TO DESTROY THE LEGAL GOVERNMENT (WHO IS LENIN->WITH LINEN) A GERMAN +6938-70848-0006-1222: WHO ARE YOU A COUNTER REVOLUTIONIST A PROVOCATOR THEY (BELLOWED->BELOVED) AT HIM +6938-70848-0007-1223: YOU CALL YOURSELVES THE PEOPLE OF (RUSSIA->RACHEL) BUT (YOU'RE->YOU ARE) NOT THE PEOPLE OF (RUSSIA->RATIA) +6938-70848-0008-1224: (THE PEASANTS ARE THE->TO PIECE AND OTHER) PEOPLE OF (RUSSIA->RATIA) WAIT UNTIL THE PEASANTS +6938-70848-0009-1225: WE KNOW WHAT THE PEASANTS WILL SAY AREN'T THEY (WORKINGMEN->WORKING MAN) LIKE OURSELVES +6938-70848-0010-1226: (THESE MEN ESPECIALLY->THIS MAN HAS SPECIALLY) WELCOMED (THE->TO) CALL TO A CONGRESS OF PEASANTS +6938-70848-0011-1227: (THESE->THIS) LAST (WERE->WED) THE YOUNG GENERATION WHO HAD BEEN SERVING IN THE ARMY +6938-70848-0012-1228: WHEREUPON THE OLD (EXECUTIVE->EXECUTED) COMMITTEE LEFT THE HALL +6938-70848-0013-1229: DOWN WITH HIM THEY SHRIEKED +6938-70848-0014-1230: FEARFUL TUMULT (CRIES->QUITE) DOWN WITH THE (BOLSHEVIKI->BALL CHEVIKI) +6938-70848-0015-1231: UPON MY RETURN I VISITED (SMOLNY->MOLLY) NO SUCH ACCUSATION WAS MADE AGAINST ME THERE AFTER A BRIEF CONVERSATION I LEFT AND (THAT'S ALL LET ANY ONE->THAT SOUL LATINUE IN) PRESENT MAKE SUCH AN ACCUSATION +6938-70848-0016-1232: MEANWHILE THE QUESTION OF THE (STATUS->STRATUS) OF THE (EXECUTIVE->EXECUTORY) COMMITTEE WAS AGITATING ALL MINDS +6938-70848-0017-1233: BY DECLARING (THE->THEIR) ASSEMBLY (EXTRAORDINARY->EXTRAORDINARILY) CONFERENCE IT HAD BEEN PLANNED TO (BLOCK THE REELECTION->PLUCK THIRD TREE LECTION) OF THE (EXECUTIVE->EXECUTED) COMMITTEE +6938-70848-0018-1234: BUT THIS (WORKED->WORK) BOTH WAYS THE (LEFT SOCIALIST REVOLUTIONISTS->LAD SOCIALLY REVOLUTIONIST) DECIDED THAT IF THE CONGRESS HAD NO POWER OVER THE (EXECUTIVE->EXUDY) COMMITTEE (THEN THE EXECUTIVE->TEN TO EXECUTE) COMMITTEE HAD NO POWER OVER THE CONGRESS +6938-70848-0019-1235: ON THE TWENTY SEVENTH OCCURRED THE DEBATE ON THE LAND QUESTION WHICH (REVEALED THE->REVIL TO) DIFFERENCES BETWEEN THE (AGRARIAN PROGRAMME->INGREDIAN PROGRAM) OF THE BOLSHEVIKI AND THE LEFT SOCIALIST REVOLUTIONARIES +6938-70848-0020-1236: THE CONSTITUENT ASSEMBLY WILL NOT DARE TO BREAK WITH THE WILL OF THE PEOPLE +6938-70848-0021-1237: FOLLOWED HIM LENIN LISTENED TO NOW WITH ABSORBING INTENSITY +6938-70848-0022-1238: THE FIRST (STAGE->AGE) WAS (THE->A) CRUSHING OF AUTOCRACY AND THE (CRUSHING->CRASHING) OF THE POWER OF THE INDUSTRIAL (CAPITALISTS->CAPITALIST) AND (LAND OWNERS->THE LANDOWNERS) WHOSE INTERESTS ARE (CLOSELY->CLOTHING) RELATED +6938-70848-0023-1239: (THE DUMAS AND ZEMSTVOS->DID YOU ME SEND THEM STOOLS) WERE DROPPED +6938-70848-0024-1240: HE KNEW THAT AN AGREEMENT WITH THE BOLSHEVIKI WAS BEING DISCUSSED BUT HE DID NOT KNOW THAT IT HAD BEEN CONCLUDED +6938-70848-0025-1241: HE SPOKE TO THE (RUMP->RUM) CONVENTION +6938-70848-0026-1242: THE (VILLAGES->RELIGIOUS) WILL SAVE US IN THE END +6938-70848-0027-1243: BUT THE PRESENT (MOVEMENT->MOMENT) IS INTERNATIONAL AND THAT IS WHY IT IS INVINCIBLE +6938-70848-0028-1244: THE (WILL->WHEEL) OF MILLIONS OF WORKERS IS (NOW->SOME) CONCENTRATED IN (THIS->THE) HALL +6938-70848-0029-1245: A NEW HUMANITY WILL BE BORN OF THIS WAR +6938-70848-0030-1246: I GREET YOU WITH THE (CHRISTENING->CHRISTIANING) OF A NEW RUSSIAN LIFE AND FREEDOM +7018-75788-0000-135: THEN I TOOK UP A GREAT STONE FROM AMONG THE TREES AND COMING UP TO HIM SMOTE HIM THEREWITH ON THE HEAD WITH ALL MY MIGHT AND CRUSHED IN HIS SKULL AS HE LAY DEAD DRUNK +7018-75788-0001-136: BEHOLD A SHIP WAS MAKING FOR THE ISLAND THROUGH THE DASHING SEA AND CLASHING WAVES +7018-75788-0002-137: HEARING THIS I WAS SORE TROUBLED REMEMBERING WHAT I HAD BEFORE SUFFERED FROM THE APE KIND +7018-75788-0003-138: UPON THIS HE BROUGHT ME A COTTON BAG AND (GIVING->GIVEN) IT TO (ME->HIM HE) SAID TAKE THIS BAG AND FILL IT WITH PEBBLES FROM THE BEACH AND GO FORTH WITH A COMPANY OF THE TOWNSFOLK TO WHOM I WILL GIVE A CHARGE RESPECTING THEE +7018-75788-0004-139: DO AS THEY DO AND (BELIKE->BE LIKE) THOU SHALT GAIN WHAT MAY FURTHER THY RETURN VOYAGE TO THY NATIVE LAND +7018-75788-0005-140: THEN HE CARRIED ME TO THE BEACH WHERE I FILLED MY BAG (*->AND) WITH PEBBLES LARGE AND SMALL AND PRESENTLY WE SAW A COMPANY OF FOLK (ISSUE->ISSUED) FROM THE TOWN EACH BEARING A BAG LIKE MINE FILLED WITH PEBBLES +7018-75788-0006-141: TO THESE HE COMMITTED ME COMMENDING ME TO THEIR CARE AND SAYING THIS MAN IS A STRANGER SO TAKE HIM WITH YOU AND TEACH HIM HOW TO GATHER THAT HE MAY GET HIS DAILY BREAD AND YOU WILL EARN YOUR REWARD AND RECOMPENSE IN HEAVEN +7018-75788-0007-142: NOW SLEEPING UNDER THESE TREES WERE MANY (APES->IPES) WHICH WHEN THEY SAW US ROSE AND FLED FROM US AND SWARMED UP AMONG THE BRANCHES WHEREUPON MY COMPANIONS BEGAN TO PELT THEM WITH WHAT THEY HAD IN THEIR BAGS AND THE APES FELL TO PLUCKING OF THE FRUIT OF THE TREES AND CASTING THEM AT THE FOLK +7018-75788-0008-143: WE WEIGHED ANCHOR AND SHAHRAZAD PERCEIVED THE DAWN OF DAY AND CEASED SAYING HER PERMITTED SAY +7018-75788-0009-144: WHEN IT WAS THE FIVE HUNDRED AND FIFTY NINTH NIGHT +7018-75788-0010-145: AND CEASED NOT SAILING TILL WE ARRIVED SAFELY AT (BASSORAH->PESSORAR) +7018-75788-0011-146: THERE I ABODE A LITTLE AND THEN WENT ON TO (BAGHDAD->BAGDAD) WHERE I ENTERED MY QUARTER AND FOUND MY HOUSE AND (FOREGATHERED->FORGATHERED) WITH MY FAMILY AND SALUTED MY FRIENDS WHO GAVE ME JOY OF MY SAFE RETURN AND I LAID UP ALL MY GOODS AND VALUABLES IN MY STOREHOUSES +7018-75788-0012-147: AFTER WHICH I RETURNED TO MY OLD MERRY WAY OF LIFE AND FORGOT ALL I HAD SUFFERED IN THE GREAT PROFIT AND GAIN I HAD MADE +7018-75788-0013-148: NEXT MORNING AS SOON AS IT WAS LIGHT HE PRAYED THE DAWN PRAYER AND AFTER BLESSING MOHAMMED THE CREAM OF ALL CREATURES BETOOK HIMSELF TO THE HOUSE OF (SINDBAD->SINBAD) THE SEAMAN AND WISHED HIM A GOOD DAY +7018-75788-0014-149: HERE I FOUND A GREAT SHIP READY FOR SEA AND FULL OF MERCHANTS AND NOTABLES WHO HAD WITH THEM GOODS OF PRICE SO I EMBARKED MY BALES THEREIN +7018-75788-0015-150: (HAPLY->HAPPILY) AMONGST YOU IS ONE RIGHTEOUS WHOSE PRAYERS THE LORD WILL ACCEPT +7018-75788-0016-151: PRESENTLY THE SHIP STRUCK THE MOUNTAIN AND BROKE UP AND ALL (AND->THEN) EVERYTHING ON BOARD OF HER WERE PLUNGED INTO THE SEA +7018-75788-0017-152: BUT (IT->AT) BURNETH IN THEIR BELLIES SO THEY CAST IT UP AGAIN AND IT (CONGEALETH->CONCEALETH) ON THE SURFACE OF THE WATER WHEREBY ITS COLOR AND QUANTITIES ARE CHANGED AND AT LAST THE WAVES CAST IT ASHORE AND THE TRAVELLERS AND MERCHANTS WHO KNOW IT (COLLECT IT->COLLECTED) AND SELL IT +7018-75788-0018-153: EACH THAT DIED WE WASHED AND SHROUDED IN SOME OF THE CLOTHES AND LINEN CAST ASHORE BY THE TIDES AND AFTER (A->*) LITTLE THE REST OF MY FELLOWS PERISHED ONE BY ONE TILL I HAD BURIED THE LAST OF THE PARTY AND ABODE ALONE ON THE ISLAND WITH BUT A LITTLE PROVISION LEFT I WHO WAS WONT TO HAVE SO MUCH +7018-75788-0019-154: BUT THERE IS MAJESTY AND THERE IS NO MIGHT SAVE IN ALLAH THE GLORIOUS THE GREAT +7018-75789-0000-155: WHEN IT WAS THE FIVE HUNDRED AND SIXTY FIRST NIGHT +7018-75789-0001-156: THEN (SIGHING->SIGNED) FOR MYSELF I SET TO WORK COLLECTING A NUMBER OF PIECES OF CHINESE AND (COMORIN->CORMOR AND) ALOES WOOD AND I BOUND THEM TOGETHER WITH ROPES FROM THE WRECKAGE THEN I CHOSE OUT FROM THE BROKEN UP (SHIPS->SHIP) STRAIGHT PLANKS OF EVEN SIZE AND FIXED THEM FIRMLY UPON THE (ALOES->ALLIES) WOOD MAKING ME A BOAT RAFT A LITTLE NARROWER THAN THE CHANNEL OF THE STREAM AND I TIED IT TIGHTLY AND FIRMLY AS THOUGH IT WERE NAILED +7018-75789-0002-157: LAND AFTER LAND SHALT THOU (SEEK AND FIND->SEE CONFINED) BUT NO OTHER LIFE ON THY WISH SHALL WAIT FRET NOT THY SOUL IN THY THOUGHTS (O NIGHT->A KNIGHT) ALL (WOES->THOSE) SHALL END OR SOONER OR LATE +7018-75789-0003-158: I (ROWED->RIDE) MY CONVEYANCE INTO THE PLACE WHICH WAS INTENSELY DARK AND THE CURRENT CARRIED (*->ME) THE RAFT WITH IT DOWN THE UNDERGROUND CHANNEL +7018-75789-0004-159: AND I THREW MYSELF DOWN UPON MY FACE ON THE RAFT BY REASON OF THE NARROWNESS OF THE CHANNEL WHILST THE STREAM CEASED NOT TO CARRY ME ALONG KNOWING NOT NIGHT FROM DAY FOR THE EXCESS OF THE GLOOM WHICH ENCOMPASSED ME ABOUT (AND->IN) MY TERROR AND CONCERN FOR MYSELF LEST I SHOULD PERISH +7018-75789-0005-160: WHEN I AWOKE AT LAST I FOUND MYSELF IN THE LIGHT OF HEAVEN AND OPENING MY EYES I SAW MYSELF IN A BROAD STREAM AND THE RAFT MOORED TO AN ISLAND IN THE MIDST OF A NUMBER OF INDIANS AND ABYSSINIANS +7018-75789-0006-161: BUT I WAS DELIGHTED AT MY ESCAPE FROM THE RIVER +7018-75789-0007-162: WHEN THEY SAW I UNDERSTOOD THEM (NOT->NIGHT) AND MADE THEM NO ANSWER ONE OF THEM CAME FORWARD AND SAID TO ME IN ARABIC PEACE BE WITH THEE O MY BROTHER +7018-75789-0008-163: O MY BROTHER ANSWERED HE WE ARE HUSBANDMEN AND (TILLERS->TELLERS) OF THE SOIL WHO CAME OUT TO WATER OUR FIELDS (AND->IN) PLANTATIONS AND FINDING THEE ASLEEP ON THIS RAFT LAID HOLD OF IT AND MADE IT FAST BY US AGAINST THOU SHOULDST AWAKE AT THY LEISURE +7018-75789-0009-164: I ANSWERED FOR ALLAH'S SAKE (O->AND) MY LORD ERE I SPEAK GIVE ME SOMEWHAT TO EAT FOR I AM STARVING AND AFTER ASK ME WHAT THOU WILT +7018-75789-0010-165: WHEN IT WAS THE FIVE HUNDRED AND SIXTY SECOND NIGHT +7018-75789-0011-166: SHE SAID IT HATH (REACHED->RAGED) ME O AUSPICIOUS KING THAT SINDBAD THE SEAMAN CONTINUED WHEN I LANDED AND FOUND MYSELF AMONGST THE INDIANS AND ABYSSINIANS AND HAD TAKEN SOME REST THEY CONSULTED AMONG THEMSELVES AND SAID TO ONE ANOTHER THERE IS NO HELP FOR IT BUT WE CARRY HIM WITH US AND PRESENT HIM TO OUR KING THAT HE MAY ACQUAINT HIM WITH HIS ADVENTURES +7018-75789-0012-167: SO I CONSORTED WITH THE CHIEF OF THE ISLANDERS AND THEY PAID ME THE UTMOST RESPECT +7018-75789-0013-168: SO I ROSE WITHOUT STAY OR DELAY AND KISSED THE KING'S HAND AND ACQUAINTED HIM WITH MY LONGING TO SET OUT WITH THE MERCHANTS FOR THAT I PINED AFTER MY PEOPLE AND MINE OWN LAND +7018-75789-0014-169: QUOTH HE THOU ART THINE OWN MASTER YET IF IT BE THY WILL TO ABIDE WITH US ON OUR HEAD AND EYES BE IT FOR THOU (GLADDENEST->GLADNESSED) US WITH THY COMPANY +7018-75789-0015-170: BY ALLAH (O->ARE) MY LORD ANSWERED I THOU HAST INDEED OVERWHELMED ME WITH THY FAVOURS AND WELL DOINGS BUT I WEARY FOR A SIGHT OF MY FRIENDS AND FAMILY AND NATIVE COUNTRY +7018-75789-0016-171: THEN I TOOK LEAVE OF HIM AND OF ALL MY INTIMATES AND ACQUAINTANCES IN THE ISLAND AND EMBARKED WITH THE MERCHANTS (AFORESAID->AFOR SAID) +7018-75789-0017-172: HE ASKED ME WHENCE THEY CAME AND I SAID TO HIM BY ALLAH O COMMANDER OF THE FAITHFUL I KNOW NOT THE NAME OF THE CITY NOR THE WAY THITHER +7018-75789-0018-173: FOR STATE PROCESSIONS (A->ARE) THRONE IS (SET->SAID) FOR HIM UPON A HUGE ELEPHANT ELEVEN CUBITS HIGH AND UPON THIS HE SITTETH HAVING HIS GREAT LORDS AND OFFICERS AND GUESTS STANDING IN TWO RANKS ON HIS RIGHT HAND AND ON HIS LEFT +7018-75789-0019-174: HIS LETTER HATH SHOWN ME THIS AND AS FOR THE MIGHTINESS OF HIS DOMINION THOU HAST TOLD US WHAT THOU HAST (EYE->DIE) WITNESSED +7018-75789-0020-175: PRESENTLY MY FRIENDS CAME TO ME AND I DISTRIBUTED PRESENTS AMONG MY FAMILY AND GAVE (ALMS AND->ARMS IN) LARGESSE AFTER WHICH I YIELDED MYSELF TO JOYANCE AND ENJOYMENT MIRTH AND (MERRY MAKING->MERRYMAKING) AND FORGOT ALL THAT I HAD SUFFERED +7018-75789-0021-176: SUCH THEN (O->ARE) MY BROTHERS IS THE HISTORY OF WHAT (BEFEL->BEFELL) ME IN MY SIXTH VOYAGE AND TO MORROW INSHALLAH +7018-75789-0022-177: I WILL TELL YOU THE STORY OF MY SEVENTH AND LAST VOYAGE WHICH IS STILL MORE WONDROUS AND MARVELLOUS THAN THAT OF THE FIRST SIX +7018-75789-0023-178: WHEN IT WAS THE FIVE HUNDRED AND SIXTY THIRD NIGHT +7018-75789-0024-179: SHE SAID IT HATH REACHED ME O AUSPICIOUS KING THAT WHEN SINDBAD THE (SEAMAN->SEAMEN) HAD (RELATED->RELIGHTED) THE HISTORY OF WHAT (BEFEL->BEFELL) HIM IN HIS SIXTH VOYAGE AND ALL THE COMPANY HAD DISPERSED SINDBAD THE LANDSMAN WENT HOME AND SLEPT AS OF (WONT->WANT) +7018-75789-0025-180: THE SEVENTH VOYAGE OF (SINDBAD->SINBAD) THE SEAMAN +7018-75789-0026-181: (KNOW->NO) O COMPANY THAT AFTER MY RETURN FROM MY SIXTH VOYAGE WHICH BROUGHT ME ABUNDANT (PROFIT->PROPHET) I RESUMED MY FORMER LIFE (IN->AND) ALL POSSIBLE JOYANCE AND ENJOYMENT AND MIRTH AND MAKING MERRY DAY AND NIGHT AND I TARRIED SOME TIME IN THIS SOLACE AND SATISFACTION TILL MY SOUL BEGAN ONCE MORE TO LONG TO SAIL THE SEAS AND SEE FOREIGN COUNTRIES AND COMPANY WITH MERCHANTS AND (HEAR->HERE) NEW THINGS +7018-75789-0027-182: SO HAVING MADE UP MY MIND I PACKED UP IN BALES A QUANTITY OF PRECIOUS STUFFS SUITED FOR SEA TRADE AND REPAIRED WITH THEM FROM (BAGHDAD->BAGDAD) CITY TO (BASSORAH->BASSORA) TOWN WHERE I FOUND A SHIP READY FOR SEA AND IN HER (A->OUR) COMPANY OF CONSIDERABLE MERCHANTS +7018-75789-0028-183: BUT THE CAPTAIN AROSE AND TIGHTENING HIS GIRDLE TUCKED UP HIS SKIRTS AND AFTER TAKING REFUGE WITH ALLAH FROM SATAN THE (STONED CLOMB->STONE CLIMBED) TO THE MAST HEAD WHENCE HE LOOKED OUT RIGHT AND LEFT AND GAZING AT THE PASSENGERS AND CREW FELL TO (BUFFETING->BUFFET IN) HIS FACE AND PLUCKING OUT HIS BEARD +7018-75789-0029-184: THIS HE (SET->SAID) IN A SAUCER WETTED WITH A LITTLE WATER AND AFTER WAITING A SHORT TIME SMELT AND TASTED IT AND THEN HE TOOK OUT OF THE CHEST A BOOKLET WHEREIN HE READ AWHILE AND SAID WEEPING (KNOW O->NO ARE) YE PASSENGERS THAT IN THIS BOOK IS A MARVELLOUS MATTER DENOTING THAT WHOSO (COMETH HITHER->COME THITHER) SHALL SURELY DIE WITHOUT HOPE OF ESCAPE FOR THAT THIS OCEAN IS CALLED THE SEA OF THE CLIME OF THE KING WHEREIN IS (THE->A) SEPULCHRE OF OUR LORD SOLOMON SON OF DAVID ON BOTH BE PEACE +7018-75789-0030-185: A SECOND FISH MADE ITS APPEARANCE (THAN->AND) WHICH WE HAD SEEN (NAUGHT->NOUGHT) MORE MONSTROUS +7018-75789-0031-186: WHEN SUDDENLY A VIOLENT SQUALL OF WIND AROSE AND SMOTE THE SHIP WHICH ROSE OUT OF THE WATER AND SETTLED UPON A GREAT REEF THE HAUNT OF SEA MONSTERS WHERE IT BROKE UP AND FELL ASUNDER INTO PLANKS AND ALL AND EVERYTHING ON BOARD WERE PLUNGED INTO THE SEA +7105-2330-0000-2310: UNFORTUNATELY THERE COULD BE NO DOUBT OR MISCONCEPTION AS (TO PLATTERBAFF'S->THE PLATTERBUFF'S) GUILT +7105-2330-0001-2311: HE HAD NOT ONLY PLEADED GUILTY BUT HAD EXPRESSED HIS INTENTION OF REPEATING HIS ESCAPADE IN OTHER DIRECTIONS AS SOON AS CIRCUMSTANCES PERMITTED THROUGHOUT THE TRIAL HE WAS BUSY EXAMINING A SMALL MODEL OF THE FREE TRADE HALL IN MANCHESTER +7105-2330-0002-2312: (THE JURY->VIRTUARY) COULD NOT POSSIBLY FIND THAT THE PRISONER HAD NOT DELIBERATELY AND INTENTIONALLY BLOWN UP (THE->WE) ALBERT HALL THE QUESTION WAS COULD THEY FIND ANY EXTENUATING CIRCUMSTANCES WHICH WOULD PERMIT OF AN ACQUITTAL +7105-2330-0003-2313: OF COURSE ANY SENTENCE WHICH THE LAW MIGHT (FEEL->FILL) COMPELLED TO INFLICT WOULD BE FOLLOWED BY AN IMMEDIATE PARDON BUT IT WAS HIGHLY DESIRABLE FROM THE (GOVERNMENT'S POINT->GOVERNMENT SPITE) OF VIEW THAT THE NECESSITY FOR SUCH AN EXERCISE OF CLEMENCY SHOULD NOT ARISE +7105-2330-0004-2314: (A HEADLONG->I HAD LONG) PARDON (ON->AND) THE EVE OF A (BYE->BI) ELECTION WITH THREATS OF A HEAVY VOTING DEFECTION IF IT WERE WITHHELD OR EVEN DELAYED WOULD NOT NECESSARILY BE A SURRENDER BUT IT WOULD LOOK LIKE ONE +7105-2330-0005-2315: HENCE THE ANXIETY IN THE CROWDED COURT AND IN THE LITTLE GROUPS GATHERED ROUND THE TAPE MACHINES IN WHITEHALL AND (DOWNING->DAWNING) STREET AND OTHER AFFECTED CENTRES +7105-2330-0006-2316: THE JURY (RETURNED->TURN) FROM CONSIDERING THEIR VERDICT THERE WAS A FLUTTER AN EXCITED MURMUR A DEATHLIKE HUSH +7105-2330-0007-2317: (THE FOREMAN->THEREFORE MAN) DELIVERED HIS MESSAGE +7105-2330-0008-2318: THE JURY FIND THE PRISONER GUILTY OF BLOWING UP THE ALBERT HALL +7105-2330-0009-2319: THE JURY WISH TO ADD A (RIDER->WRITER) DRAWING ATTENTION TO THE FACT THAT A BY ELECTION IS (PENDING->SPENDING) IN THE PARLIAMENTARY DIVISION OF NEMESIS ON HAND +7105-2330-0010-2320: AND (MAY->MADE) THE (LORD->LARD) HAVE MERCY ON THE (POLL->POLE) A JUNIOR (COUNSEL->COUNCIL) EXCLAIMED IRREVERENTLY +7105-2330-0011-2321: FIFTEEN HUNDRED SAID (THE->A) PRIME MINISTER WITH A SHUDDER IT'S TOO HORRIBLE TO THINK OF +7105-2330-0012-2322: OUR MAJORITY LAST TIME WAS ONLY A THOUSAND AND SEVEN +7105-2330-0013-2323: SEVEN THIRTY AMENDED THE PRIME MINISTER WE MUST AVOID ANY APPEARANCE OF PRECIPITANCY +7105-2330-0014-2324: NOT LATER (THAN->THEN) SEVEN THIRTY THEN SAID THE CHIEF (ORGANISER->ORGANIZER) I HAVE PROMISED THE AGENT DOWN THERE THAT HE SHALL BE ABLE TO DISPLAY POSTERS ANNOUNCING (PLATTERBAFF->PLATTER BAFF) IS OUT BEFORE THE (POLL->POLE) OPENS +7105-2330-0015-2325: HE SAID IT WAS (OUR->HER) ONLY CHANCE OF GETTING A TELEGRAM (RADPROP IS->RED RAPPA'S) IN TO NIGHT +7105-2330-0016-2326: (DESPITE->THIS SPITE) THE EARLINESS OF THE HOUR A SMALL CROWD HAD GATHERED IN THE STREET OUTSIDE AND THE HORRIBLE MENACING (TRELAWNEY->TREE LONGER) REFRAIN OF THE FIFTEEN HUNDRED VOTING MEN CAME IN A STEADY MONOTONOUS CHANT +7105-2330-0017-2327: HE EXCLAIMED WON'T GO +7105-2330-0018-2328: HE SAYS HE NEVER HAS LEFT PRISON WITHOUT A (BRASS BAND->BREASTPAND) TO PLAY HIM OUT AND (HE'S NOT->HE SNUG) GOING TO GO WITHOUT ONE NOW +7105-2330-0019-2329: SAID (THE->A) PRIME MINISTER WE CAN HARDLY BE SUPPOSED TO SUPPLY A (RELEASED->RELEASE) PRISONER WITH A BRASS BAND HOW ON EARTH COULD WE DEFEND IT ON (THE->*) ESTIMATES +7105-2330-0020-2330: (ANYWAY->AND AWAY) HE WON'T GO UNLESS HE HAS A BAND +7105-2330-0021-2331: (POLL->PAUL) OPENS IN FIVE MINUTES +7105-2330-0022-2332: IS (PLATTERBAFF->FLATHER BATH) OUT YET +7105-2330-0023-2333: IN HEAVEN'S NAME WHY +7105-2330-0024-2334: THE CHIEF (ORGANISER->ORGANIZER) RANG OFF +7105-2330-0025-2335: THIS IS NOT A MOMENT FOR STANDING ON DIGNITY HE OBSERVED BLUNTLY MUSICIANS MUST BE SUPPLIED AT ONCE +7105-2330-0026-2336: CAN'T YOU GET (A->THE) STRIKE PERMIT ASKED THE (ORGANISER->ORGANIZER) +7105-2330-0027-2337: I'LL TRY SAID THE HOME SECRETARY AND WENT TO THE TELEPHONE +7105-2330-0028-2338: EIGHT O'CLOCK STRUCK THE CROWD OUTSIDE CHANTED WITH AN INCREASING VOLUME OF SOUND (WILL VOTE->WITHOUT) THE OTHER WAY +7105-2330-0029-2339: (A->I) TELEGRAM WAS BROUGHT IN +7105-2330-0030-2340: IT WAS FROM THE CENTRAL (COMMITTEE->COME INTO) ROOMS AT NEMESIS +7105-2330-0031-2341: WITHOUT A BAND HE WOULD NOT GO AND THEY HAD NO BAND +7105-2330-0032-2342: (A QUARTER->ACQUIRED THEIR) PAST TEN HALF PAST +7105-2330-0033-2343: HAVE YOU ANY BAND INSTRUMENTS OF AN EASY NATURE TO PLAY +7105-2330-0034-2344: DEMANDED THE CHIEF (ORGANISER->ORGANIZER) OF THE PRISON GOVERNOR DRUMS (CYMBALS->SYMBOLS) THOSE SORT OF THINGS +7105-2330-0035-2345: (THE WARDERS->THOUGH OURS) HAVE A PRIVATE BAND OF THEIR OWN SAID THE GOVERNOR BUT OF COURSE I COULDN'T ALLOW (THE MEN->THEM IN) THEMSELVES +7105-2330-0036-2346: LEND US THE INSTRUMENTS SAID THE CHIEF (ORGANISER->ORGANIZER) +7105-2330-0037-2347: (THE->THEIR) POPULAR SONG OF THE MOMENT REPLIED THE AGITATOR AFTER A MOMENT'S REFLECTION +7105-2330-0038-2348: IT WAS A TUNE THEY HAD ALL HEARD HUNDREDS OF TIMES SO THERE WAS NO DIFFICULTY IN TURNING OUT A PASSABLE IMITATION OF IT TO THE IMPROVISED (STRAINS->TRAINS) OF I (DIDN'T->DON'T) WANT TO DO IT THE (PRISONER STRODE->PRISONERS STROLLED) FORTH TO FREEDOM +7105-2330-0039-2349: THE WORD OF THE SONG HAD REFERENCE IT WAS UNDERSTOOD (TO->THAT) THE INCARCERATING GOVERNMENT AND NOT TO THE DESTROYER OF THE ALBERT HALL +7105-2330-0040-2350: (THE->THIS) SEAT WAS LOST AFTER ALL BY A NARROW (MAJORITY->MATURITY) +7105-2330-0041-2351: THE LOCAL TRADE UNIONISTS TOOK OFFENCE AT THE FACT OF CABINET MINISTERS HAVING PERSONALLY ACTED AS STRIKE BREAKERS AND EVEN THE RELEASE OF (PLATTERBAFF->PLATTERBUFF) FAILED TO PACIFY THEM +7105-2340-0000-2272: WITH THAT NOTORIOUS FAILING OF HIS HE WAS NOT (THE->A) SORT OF PERSON ONE WANTED IN ONE'S HOUSE +7105-2340-0001-2273: WELL THE FAILING STILL EXISTS (DOESN'T IT->DOESNATE) SAID (HER->THE) HUSBAND (OR->ORA) DO YOU SUPPOSE A REFORM OF CHARACTER IS ENTAILED ALONG WITH THE ESTATE +7105-2340-0002-2274: BESIDES CYNICISM APART (HIS BEING->IS VERY) RICH (WILL->WE) MAKE A DIFFERENCE IN THE WAY PEOPLE WILL LOOK AT HIS (FAILING->FEELING) +7105-2340-0003-2275: WHEN A MAN IS ABSOLUTELY WEALTHY NOT MERELY WELL TO DO ALL SUSPICION OF SORDID MOTIVE (NATURALLY->NATURAL) DISAPPEARS THE THING BECOMES MERELY A (TIRESOME->PERSON) MALADY +7105-2340-0004-2276: (WILFRID PIGEONCOTE->WILFRIED DIGEON CODE) HAD SUDDENLY BECOME HEIR TO HIS UNCLE SIR (WILFRID PIGEONCOTE ON->WILL FIDD PIGEON COAT UNDER) THE DEATH OF HIS COUSIN MAJOR WILFRID PIGEONCOTE WHO HAD SUCCUMBED (TO->*) THE AFTER EFFECTS OF (A POLO->APOLLO) ACCIDENT +7105-2340-0005-2277: (A WILFRID PIGEONCOTE->ALFRED FEAJANCOTT) HAD COVERED HIMSELF WITH HONOURS IN THE COURSE OF MARLBOROUGH'S CAMPAIGNS AND THE NAME WILFRID HAD BEEN A (BAPTISMAL->BABYSMAL) WEAKNESS IN THE FAMILY EVER SINCE THE NEW HEIR TO THE FAMILY DIGNITY AND ESTATES WAS A YOUNG MAN OF ABOUT FIVE AND TWENTY WHO WAS KNOWN MORE BY (REPUTATION->REPETITION) THAN BY PERSON TO (A WIDE->AVOID) CIRCLE OF COUSINS AND KINSFOLK +7105-2340-0006-2278: AND THE REPUTATION WAS AN UNPLEASANT ONE +7105-2340-0007-2279: FROM HIS LATE (SCHOOLDAYS->SCHOOL DAYS) ONWARD HE HAD BEEN POSSESSED BY AN ACUTE AND OBSTINATE FORM OF (KLEPTOMANIA->CLUBTOMANIA) HE HAD THE ACQUISITIVE INSTINCT OF THE COLLECTOR WITHOUT ANY OF THE COLLECTOR'S DISCRIMINATION +7105-2340-0008-2280: (THE->THIS) SEARCH USUALLY PRODUCED A LARGE AND VARIED YIELD THIS IS FUNNY SAID PETER (PIGEONCOTE->PIGEON BULLET) TO HIS WIFE (SOME->I'M) HALF (HOUR->OUR) AFTER THEIR CONVERSATION HERE'S A TELEGRAM FROM WILFRID SAYING HE'S PASSING THROUGH HERE IN HIS (MOTOR->MOTAR) AND WOULD LIKE TO STOP AND PAY US HIS RESPECTS +7105-2340-0009-2281: (SIGNED WILFRID PIGEONCOTE->SIGN WILFRED PIGEON COAT) +7105-2340-0010-2282: I SUPPOSE (HE'S->IS) BRINGING US A PRESENT (FOR THE->FURTHER) SILVER WEDDING GOOD GRACIOUS +7105-2340-0011-2283: THE TALK FLITTED NERVOUSLY AND HURRIEDLY FROM ONE IMPERSONAL TOPIC TO ANOTHER +7105-2340-0012-2284: IN THE DRAWING ROOM AFTER DINNER THEIR NERVOUSNESS AND AWKWARDNESS INCREASED +7105-2340-0013-2285: OH WE HAVEN'T SHOWN YOU THE SILVER WEDDING PRESENTS SAID MISSUS PETER SUDDENLY AS (THOUGH->THOSE) STRUCK BY A BRILLIANT IDEA (FOR->OF HER) ENTERTAINING THE GUEST HERE THEY ALL ARE +7105-2340-0014-2286: SUCH NICE (USEFUL GIFTS->YEARS FORGIVES) A FEW (DUPLICATES->DEPLICATES) OF COURSE +7105-2340-0015-2287: SEVEN CREAM JUGS PUT IN PETER +7105-2340-0016-2288: WE FEEL THAT WE MUST LIVE (ON CREAM->UNCLEAN) FOR THE REST OF OUR LIVES +7105-2340-0017-2289: OF COURSE SOME OF THEM CAN BE CHANGED +7105-2340-0018-2290: I PUT IT DOWN BY THE (CLARET->CLARA) JUG SAID WILFRID BUSY WITH ANOTHER OBJECT +7105-2340-0019-2291: (VIGILANCE->VICHILLENZ) WAS NOT COMPLETELY CROWNED WITH A SENSE OF VICTORY +7105-2340-0020-2292: AFTER THEY HAD SAID GOOD NIGHT TO THEIR VISITOR MISSUS PETER EXPRESSED HER CONVICTION THAT HE HAD TAKEN SOMETHING +7105-2340-0021-2293: HOW ON EARTH ARE WE TO KNOW SAID PETER THE MEAN PIG HASN'T BROUGHT US A PRESENT AND I'M HANGED IF HE SHALL CARRY ONE OFF +7105-2340-0022-2294: (IT'S->IS) THE ONLY THING TO DO +7105-2340-0023-2295: (WILFRID->WILFRED) WAS (LATE->LAID) IN COMING DOWN TO BREAKFAST AND HIS (MANNER->MANNERS) SHOWED PLAINLY THAT SOMETHING WAS AMISS +7105-2340-0024-2296: (IT'S->YES AND) AN UNPLEASANT THING TO HAVE TO SAY HE BLURTED OUT PRESENTLY BUT I'M AFRAID YOU MUST HAVE A THIEF AMONG YOUR SERVANTS SOMETHING'S BEEN TAKEN OUT OF MY (PORTMANTEAU->APARTMENTAL) +7105-2340-0025-2297: IT WAS A LITTLE PRESENT (FROM->FOR) MY MOTHER AND MYSELF FOR YOUR SILVER WEDDING +7105-2340-0026-2298: I SHOULD HAVE GIVEN IT TO YOU LAST NIGHT AFTER DINNER (ONLY->ON) IT HAPPENED TO BE A (CREAM->QUEEN) JUG AND YOU SEEMED ANNOYED AT HAVING SO MANY DUPLICATES SO I FELT RATHER AWKWARD (ABOUT->OF A) GIVING YOU ANOTHER +7105-2340-0027-2299: (THE SNATCHER->THIS NATURE) HAD BEEN AN ORPHAN (THESE->THIS) MANY YEARS +7105-2340-0028-2300: LADY (ERNESTINE PIGEONCOTE->ERNESTON BEECH AND COLD) HIS MOTHER MOVED IN CIRCLES WHICH WERE ENTIRELY BEYOND THEIR COMPASS OR AMBITIONS AND THE SON WOULD PROBABLY ONE DAY BE AN AMBASSADOR +7105-2340-0029-2301: HUSBAND AND WIFE LOOKED BLANKLY AND DESPERATELY AT ONE ANOTHER +7105-2340-0030-2302: IT WAS MISSUS PETER WHO ARRIVED FIRST AT AN INSPIRATION HOW DREADFUL (TO THINK->THE THING) THERE ARE THIEVES IN THE HOUSE WE (KEEP->GIVE) THE DRAWING ROOM LOCKED UP AT NIGHT OF COURSE BUT ANYTHING MIGHT BE CARRIED OFF WHILE WE (ARE->WERE) AT BREAKFAST +7105-2340-0031-2303: SHE ROSE AND WENT OUT HURRIEDLY AS THOUGH TO ASSURE HERSELF THAT THE DRAWING ROOM WAS NOT BEING STRIPPED OF ITS (SILVERWARE->SILVER WARE) AND RETURNED A MOMENT LATER BEARING A CREAM (JUG->CHUG) IN HER HANDS +7105-2340-0032-2304: THE (PIGEONCOTES->PIGEON COATS) HAD TURNED PALER THAN EVER MISSUS PETER HAD A FINAL INSPIRATION +7105-2340-0033-2305: PETER DASHED OUT OF THE ROOM WITH GLAD RELIEF HE HAD LIVED SO LONG DURING THE LAST FEW MINUTES THAT A GOLDEN WEDDING SEEMED WITHIN MEASURABLE DISTANCE +7105-2340-0034-2306: MISSUS PETER TURNED TO HER GUEST WITH CONFIDENTIAL (COYNESS->KINDNESS) +7105-2340-0035-2307: PETER'S LITTLE WEAKNESS (IT RUNS IN THE->A TRANSIENT) FAMILY GOOD LORD +7105-2340-0036-2308: DO YOU MEAN TO SAY HE'S A (KLEPTOMANIAC->CLEPTOMANIA) LIKE COUSIN SNATCHER +7105-2340-0037-2309: BRAVE LITTLE WOMAN SAID PETER WITH A GASP OF RELIEF I COULD NEVER HAVE DONE IT +7902-96591-0000-0: (I AM->I'M) FROM THE CUTTER LYING OFF THE COAST +7902-96591-0001-1: DON'T CRY HE SAID I WAS OBLIGED TO COME +7902-96591-0002-2: AND AND YOU HAVE NOT FOUND OUT ANYTHING CAME IN QUICK FRIGHTENED TONES +7902-96591-0003-3: I WISH YOU WOULD BELIEVE ME THAT I AM IN AS GREAT TROUBLE ABOUT IT AS YOU ARE +7902-96591-0004-4: THAT MY FATHER SIR RISDON (GRAEME HAS->GRAHAM) SMUGGLED GOODS HERE +7902-96591-0005-5: HE COULD NOT HELP IT HE (HATES THE SMUGGLERS->HATE THIS MOTHERS) YOU SHALL NOT TELL +7902-96591-0006-6: PRAY PRAY SAY YOU WILL NOT (ARCHY->ARCHIE) WAS SILENT +7902-96591-0007-7: THEN AS (ARCHY->ARCHIE) STOOD IN THE DARK LITERALLY AGHAST WITH ASTONISHMENT HE HEARD THE FAINT RUSTLING ONCE MORE AND AGAIN ALL WAS SILENT +7902-96591-0008-8: HE LAUGHED BUT IT WAS A CURIOUS KIND OF LAUGH FULL OF VEXATION INJURED (AMOUR PROPRE->AMOPRA) AS THE FRENCH (CALL OUR->CALLER) LOVE OF (OUR->HER) OWN DIGNITY OF WHICH (ARCHIBALD RAYSTOKE->ARQUEBALD RAYSTROKE) IN THE FULL FLUSH OF HIS YOUNG BELIEF IN HIS IMPORTANCE AS A BRITISH OFFICER HAD A PRETTY GOOD STOCK +7902-96591-0009-9: (IT->AND) ALL COMES OF DRESSING UP IN (THIS->THE) STUPID WAY LIKE A ROUGH FISHER LAD +7902-96591-0010-10: COLD WATER CAME ON THIS IDEA DIRECTLY AS HE RECALLED THE FACT THAT THE DARKNESS WAS INTENSE AND CELIA COULD NOT HAVE SEEN HIM +7902-96591-0011-11: I'LL SOON SHOW THEM THAT I AM NOT GOING TO BE PLAYED WITH +7902-96591-0012-12: FOR IT SUDDENLY OCCURRED TO HIM THAT HE WAS NOT ONLY A PRISONER BUT A PRISONER IN THE POWER OF A VERY RECKLESS SET OF PEOPLE (WHO->AND) WOULD STOP AT NOTHING +7902-96591-0013-13: NO HE THOUGHT TO HIMSELF I DON'T BELIEVE THEY WOULD KILL ME BUT THEY WOULD KNOCK ME ABOUT +7902-96591-0014-14: THE (KICK HE->KICKY) HAD RECEIVED WAS A FORETASTE OF WHAT HE MIGHT EXPECT AND AFTER A LITTLE CONSIDERATION HE CAME TO THE CONCLUSION THAT HIS DUTY WAS TO ESCAPE AND GET BACK TO THE CUTTER AS QUICKLY AS HE COULD +7902-96591-0015-15: TO DO THIS HE MUST SCHEME LIE HID TILL MORNING (THEN->THAN) MAKE FOR THE NEAREST POINT (AND->A) SIGNAL FOR HELP UNLESS A BOAT'S CREW WERE ALREADY (SEARCHING->SURGING) FOR HIM HOW TO ESCAPE +7902-96591-0016-16: THE WINDOW WAS BARRED BUT HE WENT TO IT AND TRIED THE BARS ONE BY ONE TO FIND THEM ALL SOLIDLY FITTED INTO THE STONE SILL +7902-96591-0017-17: NEXT MOMENT AS HE FELT HIS WAY ABOUT HIS HAND TOUCHED AN OLD FASHIONED MARBLE MANTELPIECE FIREPLACE CHIMNEY +7902-96591-0018-18: YES IF (OTHER WAYS->OTHERWAYS) FAILED HE COULD ESCAPE UP THE CHIMNEY +7902-96591-0019-19: NO THAT WAS TOO BAD HE COULD NOT DO THAT +7902-96591-0020-20: SYMPATHY AND PITY FOR THE DWELLERS IN THE (HOZE->HOSE) WERE COMPLETELY GONE NOW AND HE SET HIS TEETH FAST AND MENTALLY CALLED HIMSELF A (WEAK->WEEK) IDIOT FOR EVER THINKING ABOUT SUCH PEOPLE +7902-96591-0021-21: A NARROW TABLE AGAINST THE WALL IN TWO PLACES +7902-96591-0022-22: HE WENT AND TRIED TO FORCE HIS HEAD THROUGH RECALLING AS HE DID THAT WHERE A PERSON'S HEAD WOULD GO THE REST OF THE BODY WOULD PASS +7902-96591-0023-23: BUT THERE WAS NO CHANCE FOR HIS BODY THERE THE HEAD WOULD NOT GO FIRST +7902-96591-0024-24: A FELLOW WHO WAS SHUT UP IN (PRISON->PRISONED) FOR LIFE MIGHT DO IT HE SAID BUT NOT IN A CASE LIKE THIS +7902-96592-0000-25: SURE (YOU'VE LOOKED->YOU LOOK) ROUND EVERYWHERE BOY YES FATHER QUITE +7902-96592-0001-26: I'M GOING HOME TO BREAKFAST +7902-96592-0002-27: SHALL I COME (TOO->TO) FATHER NO +7902-96592-0003-28: STOP HERE TILL SIR RISDON COMES DOWN AND TELL HIM I'M VERY SORRY THAT WE SHOULD HAVE CLEARED OUT LAST NIGHT ONLY A BORN FOOL SAW JERRY (NANDY'S LOBSTER BOAT->ANDY'S LOBSTERBOAT) COMING INTO THE COVE AND CAME RUNNING TO SAY IT WAS A PARTY FROM THE CUTTER YES FATHER +7902-96592-0004-29: TELL HIM NOT TO BE UNEASY TIS ALL RIGHT AND I'LL HAVE EVERYTHING CLEAR AWAY TO NIGHT +7902-96592-0005-30: THE DULL SOUND OF DEPARTING STEPS AND A LOW WHISTLING SOUND COMING DOWN THROUGH THE SKYLIGHT WINDOW INTO THE CABIN WHERE (ARCHY RAYSTOKE->ARCHIE RAYSTROKE) LAY WITH HIS HEAVY EYELIDS PRESSED DOWN BY SLEEP +7902-96592-0006-31: WHAT A QUEER DREAM HE THOUGHT TO HIMSELF +7902-96592-0007-32: BUT HOW QUEER FOR MISTER (GURR->GORE) TO BE TALKING LIKE THAT (TO ANDREW TEAL->DANGER TEALE) THE BOY WHO (HELPED->HELPS) THE COOK +7902-96592-0008-33: AND WHY DID ANDY CALL MISTER (GURR->GORE) FATHER +7902-96592-0009-34: THERE WAS AN INTERVAL OF THINKING OVER THIS (KNOTTY->NAUGHTY) QUESTION DURING WHICH THE LOW WHISTLING WENT ON +7902-96592-0010-35: AND I'M HUNGRY TOO (TIME->TOM) I WAS UP I SUPPOSE +7902-96592-0011-36: NO HE WAS NOT DREAMING FOR HE WAS LOOKING OUT ON THE SEA OVER WHICH A FAINT MIST HUNG LIKE WREATHS OF SMOKE +7902-96592-0012-37: WHAT DID THEY SAY FALSE ALARM TELL (SIR RISDON->SERVANTS AND) THEY WOULD CLEAR ALL AWAY TO NIGHT SEE IF ANYTHING HAD BEEN LEFT ABOUT LOBSTER (BOAT->WROTE) +7902-96592-0013-38: ONCE OUT OF THAT ROOM HE COULD RAN AND BY DAYLIGHT THE (SMUGGLERS DARE->SMOGG WAS DARED) NOT HUNT HIM DOWN +7902-96592-0014-39: OH THOSE BARS HE MENTALLY EXCLAIMED AND HE WAS ADVANCING (TOWARD->TOWARDS) THEM WHEN JUST AS HE DREW NEAR THERE WAS A RUSTLING NOISE UNDER THE WINDOW A COUPLE OF HANDS SEIZED THE BARS THERE WAS A SCRATCHING OF BOOT TOES AGAINST STONE WORK AND RAM'S FACE APPEARED TO GAZE INTO THE ROOM BY INTENTION BUT INTO THE ASTONISHED COUNTENANCE OF THE YOUNG MIDSHIPMAN INSTEAD +7902-96592-0015-40: (RAM->ROOM) WAS THE FIRST TO RECOVER FROM HIS SURPRISE +7902-96592-0016-41: HULLO HE SAID WHO ARE YOU +7902-96592-0017-42: GO ROUND AND OPEN THE DOOR I WAS SHUT IN LAST NIGHT BY MISTAKE +7902-96592-0018-43: I SAW YOU LAST NIGHT AND WONDERED WHOSE BOY YOU WAS +7902-96592-0019-44: IT WAS (YOU->YOUR) FATHER KICKED FOR SHIRKING AND MY WELL I HARDLY KNOWED YOU +7902-96592-0020-45: NONSENSE +7902-96592-0021-46: WON'T DO SAID RAM (GRINNING->GRINNIE) +7902-96592-0022-47: THINK I DON'T KNOW YOU MISTER ORFICER +7902-96592-0023-48: WON'T DO SAID (RAM->RUM) QUICKLY I KNOW YOU +7902-96592-0024-49: (BEEN->THEN) PLAYING THE SPY THAT'S WHAT YOU'VE BEEN DOING WHO LOCKED YOU IN +7902-96592-0025-50: (ARCHY->ARCHIE) STEPPED BACK TO THE DOOR LISTENING BUT THERE WAS NOT A SOUND +7902-96592-0026-51: HE HAS GONE TO GIVE THE ALARM THOUGHT THE PRISONER AND HE LOOKED EXCITEDLY ROUND FOR A WAY OF ESCAPE +7902-96592-0027-52: NOTHING BUT THE CHIMNEY PRESENTED ITSELF +7902-96592-0028-53: A HAPPY INSPIRATION HAD COME AND PLACING ONE HAND UPON HIS (BREAST->CHEST) HE THRUST IN THE OTHER GAVE A TUG AND DREW OUT HIS LITTLE CURVED DIRK GLANCED AT THE EDGE RAN TO THE WINDOW AND BEGAN TO CUT AT ONE OF THE BARS (LABOUR->LABOR) IN VAIN +7902-96592-0029-54: HE DIVIDED THE (PAINT->PAIN) AND PRODUCED A FEW SQUEAKS (AND->IN) GRATING SOUNDS AS HE (REALISED->REALIZED) THAT THE ATTEMPT WAS MADNESS +7902-96592-0030-55: THE RESULT WAS NOT VERY SATISFACTORY BUT SUFFICIENTLY SO TO MAKE HIM ESSAY THE BAR OF THE WINDOW ONCE MORE PRODUCING A GRATING (EAR ASSAILING->IRRESCELLING) SOUND AS HE FOUND THAT NOW HE DID MAKE A LITTLE IMPRESSION SO LITTLE THOUGH THAT THE PROBABILITY WAS IF HE KEPT ON WORKING WELL FOR TWENTY FOUR HOURS HE WOULD NOT GET THROUGH +7902-96592-0031-56: BUT AT THE END OF FIVE MINUTES HE STOPPED AND THRUST BACK THE (DIRK->DARK) INTO ITS SHEATH +7902-96592-0032-57: NO I CAN'T PART WITH THAT HA HA (HA->*) LAUGHED THE BOY JEERINGLY +7902-96592-0033-58: (BUT I'LL->BLOW) YES I'LL GIVE YOU A GUINEA IF YOU WILL LET ME OUT +7902-96592-0034-59: (GUINEA SAID->GUINEAS OF) THE BOY THINK (I'D->I'LL) DO IT FOR A GUINEA WELL THEN (TWO->TOO) +7902-96592-0035-60: BE QUICK THERE'S A GOOD FELLOW I WANT TO GET AWAY AT ONCE +7902-96592-0036-61: NOT YOU ONLY A SHAM +7902-96592-0037-62: WHY (YOUR->YOU'RE) CLOTHES DON'T FIT YOU AND YOUR (CAP'S->CAPS) PUT ON ALL (SKEW REW->SKEERO) +7902-96592-0038-63: NEVER MIND ABOUT THAT LET ME OUT OF THIS PLACE +7902-96592-0039-64: I TOLD YOU A FISHER BOY CRIED (ARCHY->ARCHIE) IMPATIENTLY BUT TRYING NOT TO OFFEND HIS VISITOR WHO POSSESSED THE POWER OF CONFERRING FREEDOM BY SPEAKING SHARPLY +7902-96592-0040-65: NOT YOU LOOK LIKE A WILD BEAST IN A CAGE LIKE A MONKEY YOU INSOLENT +7902-96592-0041-66: (ARCHY->ARCHIE) CHECKED HIMSELF (AND->IN) THE BOY LAUGHED +7902-96592-0042-67: IT WAS YOUR TURN YESTERDAY IT'S MINE TO DAY WHAT A GAME +7902-96592-0043-68: YOU LAUGHED AND FLEERED AT ME WHEN I WAS ON THE CUTTER'S DECK +7902-96592-0044-69: I SAY YOU DO LOOK (*->LIKE) A (RUM UN->ROMAN) JUST LIKE A BIG MONKEY IN A SHOW +7902-96592-0045-70: RAM SHOWED HIS WHITE TEETH AS HE BURST OUT WITH A LONG LOW FIT OF LAUGHTER +7902-96592-0046-71: (YOU ROPE'S END->EURE HOPES AND) ME HE SAID +7902-96592-0047-72: WHY I COULD TIE YOU UP IN A KNOT AND HEAVE YOU OFF THE CLIFF ANY DAY WHAT A GAME +7902-96592-0048-73: BIT OF (A MIDDY->AMIDDY) FED ON (*->A) SALT TACK AND (WEEVILLY->WEEBLY) BISCUIT TALK OF GIVING ME (ROPE'S END->ROPES AND) +7902-96592-0049-74: ONCE MORE WILL YOU COME AND LET ME OUT NO +7902-96592-0050-75: TO HIS ASTONISHMENT THE BOY DID NOT FLINCH BUT THRUST HIS OWN ARMS THROUGH (PLACING->REPLACING) THEM ABOUT THE MIDDY'S WAIST CLENCHING HIS (HANDS->HAND) BEHIND AND UTTERING A SHARP WHISTLE +7902-96594-0000-76: (SEEMED IN GOOD SPIRITS->SEEMING EXPERIENCE) LAST NIGHT MISTER GURR (EH->HEY) +7902-96594-0001-77: YES SIR BUT HE MAY TURN (UP ON->UPON) THE CLIFF AT ANY MOMENT +7902-96594-0002-78: YES MEN QUITE READY YES SIR +7902-96594-0003-79: (THAT'S RIGHT->IT'S WRITTEN) OF COURSE WELL ARMED +7902-96594-0004-80: SOON AS THE SIGNAL COMES WE SHALL PUSH OFF +7902-96594-0005-81: (AWKWARD BIT O->OF HER BITTER) COUNTRY SIR SIX MILES ROW (BEFORE->FOR) YOU CAN FIND A PLACE TO LAND +7902-96594-0006-82: SO SHALL WE YET SIR +7902-96594-0007-83: YOU DON'T THINK MISTER (GURR->GIRL) THAT THEY WOULD DARE TO INJURE HIM IF HE WAS SO UNLUCKY AS TO BE CAUGHT +7902-96594-0008-84: WELL SIR SAID THE MASTER HESITATING SMUGGLERS (ARE->OR) SMUGGLERS +7902-96594-0009-85: CERTAINLY SIR SMUGGLERS ARE SMUGGLERS INDEED +7902-96594-0010-86: (BEG->THEY) PARDON SIR DIDN'T MEAN ANY HARM +7902-96594-0011-87: (I'M->AND) GETTING VERY ANXIOUS ABOUT MISTER (RAYSTOKE->RAYSTROKE) START AT ONCE SIR +7902-96594-0012-88: NO (WAIT->WHERE) ANOTHER (*->AND) HALF HOUR +7902-96594-0013-89: VERY ILL (ADVISED->ADVICE) THING TO DO +7902-96594-0014-90: (THEN->THAT) I MUST REQUEST THAT YOU WILL NOT MAKE IT AGAIN VERY TRUE +7902-96594-0015-91: (AWK WARD->AWKWARD) MISTER (GURR->GARR) AWKWARD +7902-96594-0016-92: YES SIR OF COURSE +7902-96594-0017-93: SAY (AWK WARD->AWKWARD) IN (*->THE) FUTURE NOT (AWK'ARD->UPWARD) +7902-96594-0018-94: I MEAN ALL ALONE BY MYSELF SIR +7902-96594-0019-95: WHAT FOR THERE AREN'T A PUBLIC HOUSE FOR TEN MILES DIDN'T MEAN THAT +7902-96594-0020-96: THEN WHAT DID (YOU MEAN->JULIA) SPEAK OUT AND DON'T DO THE DOUBLE SHUFFLE ALL OVER MY CLEAN DECK NO SIR +7902-96594-0021-97: (HOPPING->HAVING) ABOUT (LIKE A CAT->THE GUQUET) ON HOT BRICKS +7902-96594-0022-98: NOW THEN WHY DO YOU WANT TO GO ASHORE +7902-96594-0023-99: (BEG->THEY) PARDON DIDN'T MEAN (NOWT->THAT) SIR SAID THE SAILOR TOUCHING HIS FORELOCK +7902-96594-0024-100: YES SIR SAID THE (MAN HUMBLY->MADAMELY) SHALL I GO AT ONCE SIR +7902-96594-0025-101: NO WAIT +7902-96594-0026-102: (KEEP A->HE WAS) SHARP LOOK OUT ON THE CLIFF (TO SEE IF->AS EVEN) MISTER (RAYSTOKE->RAE STROKE) IS MAKING SIGNALS FOR A BOAT +7902-96594-0027-103: HE SWUNG ROUND WALKED AFT AND BEGAN SWEEPING (THE SHORE->ASHORE) AGAIN WITH HIS GLASS WHILE THE MASTER AND DICK EXCHANGED GLANCES WHICH MEANT A GREAT DEAL +7902-96594-0028-104: AT LAST THE LITTLE (LIEUTENANT->TANNIC) COULD BEAR THE ANXIETY NO LONGER +7902-96594-0029-105: (PIPE AWAY THE MEN TO->PAPER WEAR THEM INTO) THAT BOAT THERE HE SAID AND AS THE CREW SPRANG IN +7902-96594-0030-106: NOW MISTER (GURR->GURG) HE SAID I'M ONLY GOING TO SAY ONE THING TO YOU IN THE WAY OF INSTRUCTIONS YES SIR +7902-96594-0031-107: BEG PARDON SIR SAID THE MASTER DEPRECATINGLY +7902-96594-0032-108: STEADY MY (LADS->LAD) STEADY CRIED THE MASTER KEEP STROKE AND THEN HE BEGAN TO MAKE PLANS AS TO HIS FIRST PROCEEDINGS (ON->I'M) GETTING ASHORE +7902-96595-0000-109: SAY (MESTER GURR->MISTER GIRK) SAID DICK AFTER ONE OF THESE SEARCHES HE WOULDN'T RUN AWAY WHAT +7902-96595-0001-110: MISTER RAYSTOKE SIR DON'T BE A FOOL +7902-96595-0002-111: WHAT (CHUCKED HIM OFF->TECHTAMORPH) YONDER +7902-96595-0003-112: (GURR->GER) GLANCED ROUND TO SEE IF THE MEN WERE LOOKING AND THEN SAID (RATHER->WHETHER) HUSKILY (BUT->BE) KINDLY +7902-96595-0004-113: AH EJACULATED DICK SADLY +7902-96595-0005-114: SAY (MESTER GURR SIR->MISTER GURSER) WHICH THANKFUL I AM (TO->FOR) YOU FOR SPEAKING SO BUT YOU DON'T REALLY THINK AS HE HAS COME TO HARM +7902-96595-0006-115: I HOPE NOT DICK I HOPE NOT BUT (SMUGGLERS->SMOKE WAS) DON'T STAND AT ANYTHING SOMETIMES +7902-96595-0007-116: I DO ASSURE YOU THERE'S NOTHING HERE BUT WHAT YOU MAY SEE +7902-96595-0008-117: IF (YOU'D->YOU) LET ME FINISH YOU'D KNOW SAID (GURR GRUFFLY->GURG ROUGHLY) ONE OF OUR BOYS IS MISSING SEEN (HIM->EM) UP HERE +7902-96595-0009-118: BOY (BOUT->ABOUT) SEVENTEEN WITH A RED CAP NO SIR INDEED (I'VE NOT->OF NONE) +7902-96595-0010-119: DON'T KNOW AS HE HAS BEEN SEEN ABOUT HERE DO YOU SAID (GURR->GIRL) LOOKING AT HER SEARCHINGLY NO SIR +7902-96595-0011-120: IF SHE KNEW EVIL HAD COME TO THE POOR LAD HER FACE WOULD TELL TALES LIKE PRINT +7902-96595-0012-121: I SAID A LAD BOUT SEVENTEEN (IN->AND) A RED (CAP LIKE->CATHOLIC) YOURS SAID (GURR->GREW) VERY SHORTLY +7902-96595-0013-122: THE MAN SHOOK HIS HEAD AND STARED AS IF HE DIDN'T HALF UNDERSTAND THE DRIFT OF (WHAT WAS->ALL THIS) SAID +7902-96595-0014-123: HERE (MY LAD->MILAD) WHERE'S YOUR MASTER +7902-96595-0015-124: (EH->THEY) I SAY WHERE'S YOUR MASTER +7902-96595-0016-125: (GURR->GERT) TURNED AWAY IMPATIENTLY AGAIN AND SIGNING TO HIS MEN TO FOLLOW THEY ALL BEGAN TO TRAMP UP THE STEEP TRACK LEADING TOWARD THE (HOZE->HOSE) WITH THE (RABBITS->RABBIT) SCUTTLING AWAY AMONG THE (FURZE->FIRS) AND SHOWING THEIR WHITE COTTONY TAILS FOR A MOMENT AS THEY DARTED DOWN INTO THEIR HOLES +7902-96595-0017-126: I DUNNO MUTTERED DICK AND A (MAN->MEN) CAN'T BE SURE +7902-96595-0018-127: (GURR->DUR) SALUTED (AND STATED->INSTEAD OF) HIS BUSINESS WHILE THE BARONET WHO HAD TURNED (SALLOWER AND->SALARY) MORE (CAREWORN->CARE MORE) THAN HIS LOT DREW A BREATH (*->OF) FULL OF RELIEF ONE OF YOUR (SHIP BOYS->VOYS) HE SAID +7902-96595-0019-128: A LAD LOOKING LIKE A COMMON SAILOR AND WEARING A RED CAP NO SAID SIR RISDON +7902-96595-0020-129: I HAVE SEEN NO ONE ANSWERING TO THE DESCRIPTION HERE +7902-96595-0021-130: (BEG PARDON SIR BUT CAN->BIG PARTICER BECAUSE) YOU AS (A GENTLEMAN->GENTLEMEN) ASSURE ME THAT HE IS NOT HERE CERTAINLY SAID SIR RISDON +7902-96595-0022-131: SURELY CRIED SIR RISDON EXCITEDLY +7902-96595-0023-132: SIR (RISDON->RICHARD) WAS SILENT +7902-96595-0024-133: LADY (GRAEME->GRAHAM) LOOKED GHASTLY +7902-96595-0025-134: YOU DO NOT KNOW NO +7975-280057-0000-1008: THESE HATREDS WERE SOON TO MAKE TROUBLE FOR ME OF WHICH I HAD NEVER DREAMED +7975-280057-0001-1009: HENRY WASHINGTON YOUNGER MY FATHER REPRESENTED JACKSON COUNTY THREE TIMES IN THE LEGISLATURE AND WAS ALSO (*->A) JUDGE OF THE COUNTY COURT +7975-280057-0002-1010: MY MOTHER WHO WAS (BURSHEBA FRISTOE->PERCEIVER FOR STOVE) OF INDEPENDENCE WAS (THE->A) DAUGHTER OF RICHARD (FRISTOE->CRISTO) WHO FOUGHT UNDER GENERAL ANDREW JACKSON (AT->THAT) NEW ORLEANS JACKSON COUNTY HAVING BEEN SO NAMED (AT->IN) MY GRANDFATHER (FRISTOE'S->FRISTOWS) INSISTENCE +7975-280057-0003-1011: I CANNOT REMEMBER WHEN I DID NOT KNOW HOW TO SHOOT +7975-280057-0004-1012: MY BROTHER JAMES WAS BORN JANUARY FIFTEENTH EIGHTEEN FORTY EIGHT JOHN IN EIGHTEEN FIFTY ONE AND ROBERT IN DECEMBER EIGHTEEN FIFTY THREE +7975-280057-0005-1013: MY ELDEST BROTHER RICHARD DIED IN EIGHTEEN SIXTY +7975-280057-0006-1014: MY FATHER WAS IN THE EMPLOY OF THE UNITED STATES GOVERNMENT AND HAD THE (MAIL->MALE) CONTRACT FOR FIVE HUNDRED MILES +7975-280057-0007-1015: HE HAD STARTED BACK TO HARRISONVILLE IN A BUGGY BUT WAS WAYLAID ONE MILE SOUTH OF (WESTPORT->WESTBURT) A SUBURB OF (KANSAS->KANSA) CITY AND BRUTALLY MURDERED FALLING OUT OF HIS BUGGY INTO THE ROAD WITH THREE MORTAL BULLET WOUNDS +7975-280057-0008-1016: (MISSUS->MISS) WASHINGTON (WELLS->WALES) AND HER SON SAMUEL ON THE ROAD HOME FROM KANSAS CITY TO LEE'S SUMMIT RECOGNIZED THE BODY AS THAT OF MY FATHER +7975-280057-0009-1017: (MISSUS WELLS->MISS WELL) STAYED TO GUARD THE REMAINS WHILE HER (SON->SOON) CARRIED THE NEWS OF THE MURDER TO COLONEL PEABODY OF THE FEDERAL COMMAND WHO WAS THEN (IN CAMP->ENCAMP) AT (KANSAS->KANS OF) CITY +7975-280057-0010-1018: (MISSUS->MISS) MC (CORKLE->CORKEL) JUMPED FROM THE WINDOW OF THE HOUSE AND ESCAPED +7975-280057-0011-1019: AS THE RAIDERS (LEFT->LIVED) ONE OF THEM SHOUTED +7975-280057-0012-1020: NOW (OLD->*) LADY CALL ON YOUR PROTECTORS WHY DON'T YOU CALL ON (COLE->CO) YOUNGER NOW +7975-280057-0013-1021: EVERY KNOT REPRESENTED A HUMAN LIFE +7975-280057-0014-1022: BUT SHE FAILED TO FIND THE COMFORT SHE SOUGHT FOR ANNOYANCES CONTINUED IN A MORE AGGRAVATED FORM +7975-280057-0015-1023: TWO MONTHS AFTER THIS INCIDENT THE SAME PERSECUTORS AGAIN ENTERED OUR HOME IN THE (DEAD->DAY) OF THE NIGHT AND AT THE POINT OF A PISTOL TRIED TO FORCE MY MOTHER TO SET FIRE TO HER OWN HOME +7975-280057-0016-1024: I HAVE ALWAYS FELT THAT THE EXPOSURE TO WHICH SHE WAS SUBJECTED ON THIS CRUEL JOURNEY TOO HARD EVEN FOR A MAN TO TAKE WAS (THE->A) DIRECT CAUSE OF HER DEATH +7975-280057-0017-1025: FROM (HARRISONVILLE->HARRISON BILL) SHE WENT TO (WAVERLY->WAVERLEY) WHERE SHE WAS (HOUNDED->HOUNDY) CONTINUALLY +7975-280057-0018-1026: ONE OF THE CONDITIONS UPON WHICH HER LIFE WAS SPARED WAS THAT SHE WOULD REPORT AT (LEXINGTON WEEKLY->LESSINGTON WEAKLY) +7975-280057-0019-1027: ONE OF MY OLD SCHOOL TEACHERS WHOM I HAVE NEVER SEEN SINCE THE SPRING (OR->OF) SUMMER OF EIGHTEEN SIXTY TWO IS STEPHEN B ELKINS SENATOR FROM WEST VIRGINIA +7975-280057-0020-1028: WHEN I WAS TAKEN PRISONER I EXPECTED TO BE SHOT WITHOUT CEREMONY +7975-280063-0000-1058: WE TOOK THE OATH PERHAPS THREE HUNDRED OF US DOWN ON LUTHER MASON'S FARM A FEW MILES FROM WHERE I NOW (WRITE->RIGHT) WHERE COLONEL (HAYS->HAYES) HAD ENCAMPED AFTER INDEPENDENCE +7975-280063-0001-1059: (BOONE MUIR->WHOM YOU'RE) AND MYSELF (MET COFFEE->MAKE COUGHING) AND THE REST BELOW ROSE HILL ON GRAND RIVER +7975-280063-0002-1060: ACCORDINGLY I WAS SHORTLY AWAKENED TO ACCOMPANY HIM (TO LONE->THE LONG) JACK WHERE HE WOULD PERSONALLY MAKE KNOWN THE SITUATION TO THE OTHER COLONELS +7975-280063-0003-1061: FOSTER HAD NEARLY ONE THOUSAND (CAVALRYMEN->CAVERNMENT) AND TWO PIECES OF (RABB'S->RABS) INDIANA BATTERY THAT HAD ALREADY MADE FOR ITSELF A NAME FOR HARD FIGHTING +7975-280063-0004-1062: COME IN COLONEL (HAYS->HAYES) EXCLAIMED COLONEL (COCKRELL->COCKROL) +7975-280063-0005-1063: I THINK HE'LL BE RATHER (TOUGH MEAT->TO HAVE ME) FOR BREAKFAST I REPLIED HE MIGHT BE ALL (RIGHT FOR->RIPER) DINNER +7975-280063-0006-1064: (JACKMAN->JACKMEN) WITH A PARTY OF THIRTY SEASONED MEN CHARGED THE INDIANA GUNS AND CAPTURED THEM BUT MAJOR (FOSTER LED->FOXTER LIT) A GALLANT CHARGE AGAINST THE INVADERS AND RECAPTURED THE PIECES +7975-280063-0007-1065: WE WERE OUT OF AMMUNITION AND WERE HELPLESS HAD THE FIGHT BEEN PRESSED +7975-280063-0008-1066: THEY DID MARK MY CLOTHES IN ONE OR TWO PLACES HOWEVER +7975-280063-0009-1067: (MAJOR->MEASURE) FOSTER IN A LETTER TO (JUDGE GEORGE M BENNETT->JOE GEORGIUM BENNET) OF MINNEAPOLIS SAID +7975-280063-0010-1068: I WAS TOLD BY SOME OF OUR MEN FROM THE WESTERN BORDER OF THE STATE THAT THEY RECOGNIZED (THE->A) DARING (YOUNG RIDER AS COLE->OWN WRITER'S COAL) YOUNGER +7975-280063-0011-1069: ABOUT NINE THIRTY A M I WAS SHOT DOWN +7975-280063-0012-1070: THE WOUNDED OF BOTH FORCES WERE GATHERED UP AND WERE PLACED IN HOUSES +7975-280076-0000-1029: ALTHOUGH EVERY BOOK (PURPORTING->REPORTING) TO (NARRATE THE->THEIR EIGHTH) LIVES OF THE YOUNGER BROTHERS HAS TOLD (OF->THAT) THE LIBERTY ROBBERY AND IMPLIED THAT WE HAD A PART IN IT THE YOUNGERS WERE NOT SUSPECTED AT THAT TIME NOR (FOR A LONG->PROLONG) TIME AFTERWARD +7975-280076-0001-1030: IT WAS CLAIMED BY PEOPLE OF LIBERTY THAT THEY (POSITIVELY->POSIT TILL WE) RECOGNIZED AMONG THE ROBBERS (OLL SHEPHERD->ALL SHEPARD) RED (MONKERS->MOCKERS) AND BUD (PENCE->PANTS) WHO HAD SEEN SERVICE WITH (QUANTRELL->QUANTRAIL) +7975-280076-0002-1031: THIS (RAID->RAY) WAS ACCOMPANIED BY (BLOODSHED->BLOCHHEAD) JUDGE MC (LAIN->LANE) THE BANKER BEING SHOT THOUGH NOT FATALLY +7975-280076-0003-1032: (NO->THOUGH) WARRANT WAS ISSUED FOR THE YOUNGERS BUT SUBSEQUENT HISTORIANS HAVE INFERENTIALLY AT LEAST ACCUSED US OF TAKING PART BUT AS I SAID BEFORE THERE IS NO TRUTH IN THE ACCUSATION +7975-280076-0004-1033: JUNE THIRD EIGHTEEN SEVENTY ONE (OBOCOCK BROTHERS->OBEK BROTHER'S) BANK AT (CORYDON->CROYD AND) IOWA WAS ROBBED OF FORTY THOUSAND DOLLARS BY SEVEN MEN IN BROAD DAYLIGHT +7975-280076-0005-1034: IT WAS (CHARGED->CHARGE) THAT (ARTHUR MC->AWFUL MAC) COY OR A (C MC COY AND->SEMICA) MYSELF HAD BEEN PARTICIPANTS IN THE GAD'S HILL AFFAIR AND THE TWO STAGE ROBBERIES +7975-280076-0006-1035: THE PARTS OF THIS LETTER NOW (RELEVANT->ELEVANT) ARE AS FOLLOWS +7975-280076-0007-1036: YOU MAY USE THIS LETTER IN YOUR OWN WAY +7975-280076-0008-1037: I WILL GIVE YOU THIS OUTLINE AND SKETCH OF MY WHEREABOUTS AND ACTIONS AT THE TIME OF CERTAIN (ROBBERIES->ROBBERS) WITH WHICH I AM CHARGED +7975-280076-0009-1038: (AT->IT'S) THE TIME OF THE (GALLATIN->YELLED AND) BANK ROBBERY I WAS GATHERING CATTLE (IN->AND) ELLIS COUNTY TEXAS (CATTLE THAT I BOUGHT->KETTLET ABOUT) FROM (PLEAS TAYLOR->PLAYERS TAILOR) AND RECTOR +7975-280076-0010-1039: THIS CAN BE PROVED BY BOTH OF THEM ALSO BY (SHERIFF BARKLEY->SHARE PARKLEY) AND FIFTY OTHER RESPECTABLE MEN OF THAT COUNTY +7975-280076-0011-1040: (I BROUGHT->ABRUPT) THE CATTLE (TO->THE) KANSAS (THAT->SET) FALL AND REMAINED IN SAINT CLAIR COUNTY UNTIL FEBRUARY +7975-280076-0012-1041: (I->AND) THEN WENT TO (ARKANSAS AND->OUR CONCERN) RETURNED TO SAINT CLAIR COUNTY ABOUT THE FIRST OF MAY +7975-280076-0013-1042: (I->AND) WENT TO KANSAS WHERE (OUR CATTLE->A KETTLE) WERE IN (WOODSON->WOODS AND) COUNTY AT COLONEL (RIDGE'S->RIDGES) +7975-280076-0014-1043: DURING THE SUMMER I WAS EITHER IN SAINT CLAIR (JACKSON->*) OR (*->JACK'S UNDER) KANSAS BUT AS THERE WAS NO ROBBERY COMMITTED THAT SUMMER IT MAKES NO DIFFERENCE WHERE I WAS +7975-280076-0015-1044: (I->AND) WENT THROUGH INDEPENDENCE AND FROM THERE TO ACE (WEBB'S->WHIPS) +7975-280076-0016-1045: THERE I TOOK DINNER AND THEN WENT TO DOCTOR L W (TWYMAN'S->TWINS) +7975-280076-0017-1046: OUR BUSINESS THERE WAS TO SEE E (P->*) WEST HE WAS NOT AT HOME BUT THE FAMILY WILL REMEMBER THAT WE WERE THERE +7975-280076-0018-1047: WE CROSSED ON THE BRIDGE (STAYED IN->STATING) THE CITY ALL NIGHT AND THE NEXT MORNING WE RODE UP (THROUGH->TO) THE CITY +7975-280076-0019-1048: (I MET->AMID) SEVERAL OF MY FRIENDS AMONG THEM WAS BOB (HUDSPETH->HUSBITH) +7975-280076-0020-1049: WE WERE NOT ON (GOOD->THE) TERMS AT THE TIME NOR HAVE WE BEEN FOR SEVERAL YEARS +7975-280076-0021-1050: POOR JOHN HE HAS BEEN HUNTED DOWN AND SHOT LIKE A WILD BEAST AND NEVER WAS A BOY MORE INNOCENT +7975-280076-0022-1051: DOCTOR L (LEWIS->LOOSE) WAS HIS PHYSICIAN +7975-280076-0023-1052: THERE WERE FIFTY OR A HUNDRED PERSONS THERE WHO WILL TESTIFY IN ANY COURT THAT JOHN AND I WERE THERE +7975-280076-0024-1053: (HELVIN->HELD AND) FICKLE AND WIFE OF GREENTON VALLEY WERE ATTENDING THE SPRINGS AT THAT TIME AND EITHER OF THEM WILL TESTIFY TO THE ABOVE FOR JOHN AND I (SAT->SET) IN FRONT OF MISTER SMITH WHILE HE WAS PREACHING AND WAS IN HIS COMPANY FOR A FEW MOMENTS TOGETHER WITH HIS WIFE AND MISTER AND (MISSUS->MISS) FICKLE AFTER (*->THE) SERVICE +7975-280076-0025-1054: ABOUT THE LAST OF DECEMBER EIGHTEEN SEVENTY THREE I ARRIVED IN (CARROLL PARISH->CAROL PARRISH) LOUISIANA +7975-280076-0026-1055: I STAYED THERE UNTIL THE EIGHTH OF FEBRUARY EIGHTEEN SEVENTY FOUR +7975-280076-0027-1056: I HAD NOT HEARD OF THAT WHEN I WROTE THE LETTER OF EIGHTEEN SEVENTY FOUR AND TO CORRECT ANY MISAPPREHENSION THAT MIGHT BE CREATED BY OMITTING IT I WILL SAY THAT AT (THAT->THE) TIME I WAS AT (NEOSHO->NEOSHIL OF) KANSAS WITH A DROVE OF CATTLE WHICH I SOLD TO MAJOR RAY +7975-280076-0028-1057: IT WAS IMMEDIATELY FOLLOWING THE ROCK ISLAND ROBBERY AT (ADAIR->EIGHT AIR) IOWA THAT (THERE->THEIR) FIRST APPEARED A DELIBERATE ENLISTMENT OF SOME LOCAL PAPERS IN MISSOURI TO CONNECT US WITH THIS ROBBERY +7975-280084-0000-1090: I URGED ON THE BOYS (THAT->AT) WHATEVER (HAPPENED->HAPPEN) WE SHOULD NOT SHOOT ANY ONE +7975-280084-0001-1091: WHEN MILLER AND I CROSSED THE BRIDGE THE THREE WERE ON SOME DRY (GOODS->GOOD) BOXES AT THE CORNER NEAR THE BANK AND AS SOON AS THEY SAW US WENT RIGHT INTO THE BANK INSTEAD OF WAITING FOR US TO GET THERE +7975-280084-0002-1092: WHEN WE CAME UP I (TOLD->TELL) MILLER TO SHUT THE BANK DOOR WHICH THEY HAD LEFT OPEN IN THEIR HURRY +7975-280084-0003-1093: J S ALLEN WHOSE (HARDWARE->HARDWORTH) STORE WAS NEAR TRIED TO GO INTO THE BANK BUT MILLER ORDERED HIM AWAY AND HE RAN (AROUND->ROUND) THE CORNER SHOUTING +7975-280084-0004-1094: GET YOUR GUNS BOYS THEY'RE ROBBING THE BANK +7975-280084-0005-1095: AND I CALLED TO HIM TO GET INSIDE AT THE SAME TIME FIRING A PISTOL (SHOT->SHOUT) IN THE AIR AS A SIGNAL TO THE THREE BOYS AT THE BRIDGE THAT WE HAD BEEN DISCOVERED +7975-280084-0006-1096: ALMOST AT THIS INSTANT I HEARD A PISTOL SHOT IN THE BANK +7975-280084-0007-1097: (CHADWELL->SAID WELL) WOODS AND JIM RODE UP AND JOINED US SHOUTING TO (*->THE) PEOPLE IN THE STREET TO GET INSIDE AND FIRING THEIR PISTOLS TO EMPHASIZE THEIR COMMANDS +7975-280084-0008-1098: IF ANY OF OUR PARTY SHOT HIM IT MUST HAVE BEEN WOODS +7975-280084-0009-1099: MEANTIME THE STREET WAS GETTING UNCOMFORTABLY HOT +7975-280084-0010-1100: EVERY TIME I SAW ANY ONE WITH A BEAD ON ME I WOULD DROP OFF MY HORSE AND (TRY->TROUT) TO DRIVE THE (SHOOTER->SHEETTER) INSIDE BUT I COULD NOT SEE IN EVERY DIRECTION +7975-280084-0011-1101: DOCTOR (WHEELER->WHALER) WHO HAD GONE UPSTAIRS IN THE HOTEL SHOT MILLER AND HE LAY DYING IN THE STREET +7975-280084-0012-1102: CHANGING HIS PISTOL TO HIS LEFT HAND BOB RAN OUT AND MOUNTED MILLER'S (MARE->MAYOR) +7975-280084-0013-1103: (WHAT->BUT) KEPT YOU SO LONG (I ASKED->AS) PITTS +7975-280084-0014-1104: AS TO THE REST OF THE AFFAIR INSIDE THE BANK I TAKE THE ACCOUNT OF A (NORTHFIELD->NORTH FIELD) NARRATOR +7975-280084-0015-1105: WHERE'S THE MONEY OUTSIDE (THE->TO) SAFE BOB ASKED +7975-280084-0016-1106: THE (SHUTTERS->SHOWERS) WERE CLOSED AND THIS CAUSED BUNKER AN (INSTANT'S->INSTANCE) DELAY THAT WAS ALMOST FATAL (PITTS->FITZ) CHASED HIM WITH A BULLET +7975-280084-0017-1107: THE FIRST ONE MISSED HIM BUT THE SECOND WENT THROUGH HIS RIGHT SHOULDER +7975-280085-0000-1071: THAT NIGHT IT STARTED TO RAIN AND WE WORE OUT OUR HORSES +7975-280085-0001-1072: FRIDAY WE MOVED TOWARD WATERVILLE AND FRIDAY NIGHT WE (CAMPED->CAN'T) BETWEEN ELYSIAN AND GERMAN LAKE +7975-280085-0002-1073: (BOB'S SHATTERED ELBOW WAS->BOB SHUTTERED ELBOWS) REQUIRING FREQUENT ATTENTION AND THAT NIGHT WE MADE ONLY NINE MILES AND MONDAY MONDAY NIGHT (AND->IN) TUESDAY WE SPENT IN A DESERTED FARM HOUSE CLOSE TO (MANKATO->MAIN CATO) +7975-280085-0003-1074: THAT (DAY->THEY) A MAN NAMED DUNNING DISCOVERED US AND WE TOOK HIM PRISONER +7975-280085-0004-1075: FINALLY WE ADMINISTERED TO HIM AN OATH NOT TO BETRAY OUR WHEREABOUTS UNTIL WE HAD TIME TO MAKE OUR ESCAPE AND HE AGREED NOT TO +7975-280085-0005-1076: NO SOONER HOWEVER WAS HE RELEASED THAN HE MADE (POSTHASTE->POST HASTE) INTO (MANKATO->MANCATEO) TO ANNOUNCE OUR PRESENCE AND IN A FEW MINUTES ANOTHER POSSE WAS LOOKING FOR US +7975-280085-0006-1077: THE WHISTLE ON THE (OIL MILL BLEW->OARMEIL BLUE) AND WE FEARED THAT IT WAS A SIGNAL THAT HAD BEEN AGREED UPON TO ALARM THE TOWN IN CASE WE WERE OBSERVED BUT WE WERE NOT MOLESTED +7975-280085-0007-1078: HE HAD TO SLEEP WITH IT PILLOWED ON MY BREAST JIM BEING ALSO CRIPPLED WITH A WOUND IN HIS SHOULDER AND WE COULD NOT GET MUCH SLEEP +7975-280085-0008-1079: BUT THEY SOON AFTER GOT CLOSE ENOUGH SO THAT ONE OF THEM BROKE MY WALKING STICK WITH A SHOT +7975-280085-0009-1080: WE WERE (IN SIGHT->INSIDE) OF OUR LONG (SOUGHT->SALT) HORSES WHEN THEY CUT US OFF FROM THE ANIMALS AND OUR LAST HOPE WAS GONE +7975-280085-0010-1081: SIX (STEPPED TO->STEPS OF) THE FRONT SHERIFF (GLISPIN->LISPIN) COLONEL T L (VOUGHT->VAULT) B M RICE G A BRADFORD C A (POMEROY AND->POMERALIE IN) S (J SEVERSON->VERSON) +7975-280085-0011-1082: FORMING (IN->A) LINE FOUR PACES APART HE ORDERED THEM TO ADVANCE RAPIDLY AND CONCENTRATE THE FIRE OF THE WHOLE LINE THE INSTANT THE ROBBERS WERE DISCOVERED +7975-280085-0012-1083: MAKE FOR THE HORSES I SAID EVERY MAN FOR HIMSELF +7975-280085-0013-1084: (THERE IS->THERE'S) NO USE STOPPING TO PICK UP A COMRADE HERE FOR WE CAN'T GET HIM THROUGH THE LINE JUST (CHARGE->SHORES) THEM AND MAKE IT IF WE CAN +7975-280085-0014-1085: I GOT UP AS (THE->A) SIGNAL FOR THE CHARGE AND WE FIRED ONE VOLLEY +7975-280085-0015-1086: ONE OF THE FELLOWS IN THE OUTER LINE NOT BRAVE ENOUGH HIMSELF TO JOIN THE VOLUNTEERS WHO HAD COME IN TO (BEAT US OUT->BE DISOUT) WAS NOT DISPOSED TO BELIEVE IN THE SURRENDER AND HAD HIS GUN LEVELLED ON BOB IN SPITE OF THE HANDKERCHIEF WHICH WAS WAVING AS A FLAG OF TRUCE +7975-280085-0016-1087: SHERIFF (GLISPIN->GLISBON) OF (WATONWAN->WATERWIN) COUNTY WHO WAS TAKING BOB'S PISTOL FROM HIM WAS ALSO SHOUTING TO THE FELLOW +7975-280085-0017-1088: INCLUDING THOSE RECEIVED IN AND ON THE WAY FROM (NORTHFIELD->NORTH FIELD) I HAD ELEVEN (WOUNDS->WINDS) +7975-280085-0018-1089: AND (SHERIFF GLISPIN'S->SHARE OF GLISBON'S) ORDER NOT TO SHOOT WAS THE BEGINNING OF THE (PROTECTORATE->PROTECTOR) THAT MINNESOTA PEOPLE ESTABLISHED OVER US +8131-117016-0000-1303: CAPTAIN (MURDOCH->MURDOCK) +8131-117016-0001-1304: BUT MARSPORT HAD FLOURISHED ENOUGH TO KILL IT OFF +8131-117016-0002-1305: SOME OF MARS LAWS DATED FROM THE TIME WHEN (LAW ENFORCEMENT->LAWN FORCEMENT) HAD BEEN HAMPERED BY LACK OF MEN RATHER THAN BY THE TYPE OF MEN +8131-117016-0003-1306: THE (STONEWALL->STONE WALL) GANG NUMBERED PERHAPS FIVE HUNDRED +8131-117016-0004-1307: EVEN (DERELICTS->DEAR ALEXE) AND FAILURES HAD TO EAT THERE WERE (STORES->STORIES) AND SHOPS THROUGHOUT THE DISTRICT WHICH EKED OUT SOME KIND OF A MARGINAL LIVING +8131-117016-0005-1308: THEY WERE SAFE FROM PROTECTION (RACKETEERS->RACKETERS) THERE NONE BOTHERED TO COME SO FAR OUT +8131-117016-0006-1309: THE SHOPKEEPERS AND SOME OF THE LESS UNFORTUNATE PEOPLE THERE HAD PROTESTED LOUD ENOUGH TO REACH CLEAR BACK TO EARTH +8131-117016-0007-1310: CAPTAIN (MURDOCH->MURDOCK) WAS AN UNKNOWN FACTOR AND NOW WAS ASKING FOR MORE MEN +8131-117016-0008-1311: THE PRESSURE WAS ENOUGH TO GET THEM FOR HIM +8131-117016-0009-1312: GORDON REPORTED FOR WORK WITH A SENSE OF THE BOTTOM FALLING OUT MIXED WITH A VAGUE RELIEF +8131-117016-0010-1313: I'VE GOT A FREE HAND AND WE'RE GOING TO RUN THIS THE WAY WE WOULD ON EARTH +8131-117016-0011-1314: YOUR JOB IS TO PROTECT THE CITIZENS HERE AND THAT MEANS (EVERYONE->EVERY ONE) NOT BREAKING THE LAWS WHETHER YOU FEEL LIKE IT OR NOT NO GRAFT +8131-117016-0012-1315: THE FIRST MAN MAKING A (SHAKEDOWN->SHAKE DOWN) WILL GET THE SAME TREATMENT WE'RE GOING TO USE ON THE (STONEWALL->STONE WALL) BOYS YOU'LL GET DOUBLE PAY HERE AND YOU CAN LIVE ON IT +8131-117016-0013-1316: HE PICKED OUT FIVE OF THE MEN INCLUDING GORDON YOU FIVE WILL COME WITH ME +8131-117016-0014-1317: THE REST OF YOU CAN TEAM UP ANY WAY YOU WANT (TONIGHT->TO NIGHT) PICK ANY (ROUTE THAT'S->ROW OF THIS) OPEN (OKAY MEN->O CAME IN) LET'S GO +8131-117016-0015-1318: (BRUCE->BRUSH) GORDON GRINNED SLOWLY AS HE SWUNG THE STICK AND (MURDOCH'S->MARDOCK'S) EYES FELL ON HIM (EARTH COP->EARTHCOP) +8131-117016-0016-1319: TWO YEARS GORDON ADMITTED +8131-117016-0017-1320: FOR A SECOND GORDON CURSED HIMSELF +8131-117016-0018-1321: HE BEGAN WONDERING ABOUT SECURITY THEN +8131-117016-0019-1322: NOBODY HAD TRIED TO GET IN TOUCH WITH HIM +8131-117016-0020-1323: THERE WAS A CRUDE LIGHTING SYSTEM HERE PUT UP BY THE CITIZENS AT THE FRONT OF EACH BUILDING A DIM (PHOSPHOR BULB->PHOSPHO BOB) GLOWED WHEN DARKNESS FELL THEY WOULD HAVE NOTHING ELSE TO SEE BY +8131-117016-0021-1324: MOVING IN TWO GROUPS OF THREES (AT->IT) OPPOSITE SIDES OF THE STREET THEY BEGAN THEIR BEAT +8131-117016-0022-1325: THERE WAS NO CHANCE TO SAVE THE CITIZEN WHO WAS DYING FROM LACK OF AIR +8131-117016-0023-1326: GORDON FELT THE SOLID PLEASURE OF THE FINELY TURNED CLUB IN HIS HANDS +8131-117016-0024-1327: GORDON'S EYES POPPED AT THAT +8131-117016-0025-1328: HE SWALLOWED THE SENTIMENT HIS OWN CLUB WAS MOVING NOW +8131-117016-0026-1329: THE OTHER FOUR COPS HAD COME IN RELUCTANTLY +8131-117016-0027-1330: HE BROUGHT HIM TO THE GROUND WITH A SINGLE BLOW ACROSS THE KIDNEYS +8131-117016-0028-1331: THEY ROUNDED UP THE MEN OF THE GANG AND ONE OF THE (COPS->CUPS) STARTED OFF +8131-117016-0029-1332: TO FIND A PHONE AND CALL THE WAGON +8131-117016-0030-1333: (WE'RE->WERE) NOT USING WAGONS (MURDOCH->MURDOCK) TOLD HIM LINE THEM UP +8131-117016-0031-1334: IF THEY TRIED TO RUN THEY WERE HIT FROM BEHIND IF THEY STOOD STILL THEY WERE CLUBBED CAREFULLY +8131-117016-0032-1335: (MURDOCH->MURDOCK) INDICATED ONE WHO STOOD WITH HIS (SHOULDERS->SHOULDER) SHAKING AND TEARS RUNNING DOWN HIS CHEEKS +8131-117016-0033-1336: THE CAPTAIN'S FACE WAS AS SICK AS (GORDON->GORDON'S) FELT +8131-117016-0034-1337: I WANT THE NAME OF EVERY MAN IN THE GANG YOU CAN REMEMBER HE TOLD THE MAN +8131-117016-0035-1338: COLONEL THEY'D KILL ME I DON'T KNOW +8131-117016-0036-1339: (MURDOCH->MURDOCK) TOOK HIS NOD AS EVIDENCE ENOUGH AND TURNED TO THE WRETCHED (TOUGHS->TUFTS) +8131-117016-0037-1340: IF HE SHOULD TURN UP DEAD I'LL KNOW YOU BOYS ARE RESPONSIBLE AND I'LL FIND YOU +8131-117016-0038-1341: TROUBLE BEGAN BREWING SHORTLY AFTER THOUGH +8131-117016-0039-1342: (MURDOCH->MARDOCK) SENT ONE OF THE MEN TO PICK UP A SECOND SQUAD OF SIX AND THEN A THIRD +8131-117016-0040-1343: (IN->AND) THE THIRD ONE (BRUCE->BRUSH) GORDON SPOTTED ONE OF THE MEN (WHO'D->WHO HAD) BEEN BEATEN BEFORE +8131-117016-0041-1344: GET A STRETCHER AND TAKE HIM WHEREVER HE BELONGS HE ORDERED +8131-117016-0042-1345: BUT THE CAPTAIN STIRRED FINALLY SIGHING +8131-117016-0043-1346: NO THE COPS (THEY'RE->ARE) GIVING ME WE'RE COVERED GORDON +8131-117016-0044-1347: BUT THE (STONEWALL->STERNWALL) GANG IS BACKING (WAYNE->WANE) +8131-117016-0045-1348: BUT IT'S GOING TO BE TOUGH ON THEM +8131-117016-0046-1349: BRUCE GORDON GRIMACED I'VE GOT A YELLOW TICKET FROM SECURITY +8131-117016-0047-1350: (MURDOCH->MARDOCK) BLINKED HE DROPPED HIS EYES SLOWLY +8131-117016-0048-1351: WHAT MAKES YOU THINK (WAYNE->WAIN) WILL BE (RE ELECTED->REELECTED) +8131-117016-0049-1352: NOBODY WANTS HIM EXCEPT A GANG OF CROOKS AND THOSE IN POWER +8131-117016-0050-1353: EVER SEE A MARTIAN ELECTION +8131-117016-0051-1354: NO YOU'RE A (FIRSTER->FIRST TER) HE CAN'T LOSE +8131-117016-0052-1355: AND THEN HELL IS GOING TO POP (AND->IN) THIS WHOLE PLANET MAY BE BLOWN WIDE OPEN +8131-117016-0053-1356: IT FITTED WITH THE DIRE PREDICTIONS OF SECURITY AND WITH THE SPYING GORDON WAS GOING TO DO ACCORDING TO THEM +8131-117016-0054-1357: HE WAS GETTING EVEN FATTER NOW THAT HE WAS EATING BETTER FOOD FROM THE FAIR RESTAURANT AROUND THE CORNER +8131-117016-0055-1358: (COST EM->COSTUME) MORE BUT THEY'D BE RESPECTABLE +8131-117016-0056-1359: BECAUSE (IZZY->IZZIE) IS ALWAYS HONEST ACCORDING TO HOW HE SEES IT +8131-117016-0057-1360: BUT YOU GOT EARTH IDEAS OF THE STUFF LIKE I HAD ONCE +8131-117016-0058-1361: THE GROUPS GREW MORE EXPERIENCED AND (MURDOCH->MURDOCK) WAS TRAINING A NEW SQUAD EVERY NIGHT +8131-117016-0059-1362: IT (WASN'T->WAS AN) EXACTLY LEGAL BUT NOTHING WAS HERE +8131-117016-0060-1363: THIS COULD LEAD TO ABUSES AS HE'D SEEN ON EARTH +8131-117016-0061-1364: BUT THERE PROBABLY WOULDN'T BE TIME FOR IT IF MAYOR (WAYNE->WAIN) WAS RE ELECTED +8131-117017-0000-1270: IT WAS NIGHT OUTSIDE AND THE (PHOSPHOR BULBS->PHOSPHO BOBS) AT THE CORNERS GLOWED DIMLY GIVING HIM BARELY ENOUGH LIGHT BY WHICH TO LOCATE THE WAY TO THE (EXTEMPORIZED->EXTEMPORISED) PRECINCT HOUSE +8131-117017-0001-1271: IT HAD PROBABLY BEEN YEARS SINCE ANY HAD DARED RISK IT AFTER THE SUN WENT DOWN +8131-117017-0002-1272: AND THE SLOW DOUBTFUL RESPECT ON THE FACES OF THE CITIZENS AS THEY NODDED TO HIM WAS EVEN MORE PROOF THAT (HALEY'S->HAYE'S) SYSTEM WAS WORKING +8131-117017-0003-1273: GORDON HIT THE SIGNAL SWITCH AND THE (MARSPEAKER LET->MARKEE LED) OUT A SHRILL WHISTLE +8131-117017-0004-1274: (GUNS->GUN) SUDDENLY SEEMED TO BE FLOURISHING EVERYWHERE +8131-117017-0005-1275: YOU CAN'T DO IT TO ME +8131-117017-0006-1276: I'M REFORMED I'M GOING STRAIGHT +8131-117017-0007-1277: YOU DAMNED (COPS->COPSE) CAN'T (O'NEILL->ON NEIL) WAS BLUBBERING +8131-117017-0008-1278: ONE LOOK WAS ENOUGH THE WORK PAPERS HAD THE (TELLTALE->TELL TALE) OVER THICKENING OF THE SIGNATURE (THAT->THEY) HAD SHOWED UP ON OTHER PAPERS OBVIOUSLY FORGERIES +8131-117017-0009-1279: SOME TURNED AWAY AS GORDON AND THE OTHER COP WENT TO WORK BUT MOST OF THEM WEREN'T SQUEAMISH +8131-117017-0010-1280: WHEN IT WAS OVER THE TWO PICKED UP THEIR WHIMPERING CAPTIVE +8131-117017-0011-1281: JENKINS THE OTHER COP HAD BEEN HOLDING THE WALLET +8131-117017-0012-1282: MUST (OF->HAVE) BEEN MAKING A BIG CONTACT IN SOMETHING FIFTY FIFTY +8131-117017-0013-1283: THERE MUST HAVE BEEN OVER TWO THOUSAND CREDITS IN THE WALLET +8131-117017-0014-1284: WHEN GORDON AND JENKINS CAME BACK (MURDOCH->MARDOCK) TOSSED THE MONEY TO THEM SPLIT IT +8131-117017-0015-1285: WHATEVER COMES TO HAND (GOV'NOR->GOVERNOR) +8131-117017-0016-1286: LIKE THIS SOCIAL CALL GORDON ASKED HIM +8131-117017-0017-1287: THE LITTLE MAN SHOOK HIS HEAD HIS ANCIENT EIGHTEEN YEAR OLD FACE TURNING SOBER (NOPE->NOTE) +8131-117017-0018-1288: YOU OWE ME SOME BILLS (GOV'NOR->GUV'NER) +8131-117017-0019-1289: ELEVEN HUNDRED FIFTY CREDITS +8131-117017-0020-1290: YOU DIDN'T PAY UP YOUR PLEDGE TO THE (CAMPAIGN->CAPTAIN) FUND SO I (HADDA->HAD A) FILL IN +8131-117017-0021-1291: A THOUSAND (INTEREST->INTERESTS) AT TEN PER CENT A WEEK STANDARD RIGHT +8131-117017-0022-1292: GORDON HAD HEARD OF THE FRIENDLY INTEREST CHARGED ON THE SIDE HERE BUT HE SHOOK HIS HEAD WRONG (IZZY->IS HE) +8131-117017-0023-1293: (HUH IZZY->HER AS HE) TURNED IT OVER AND SHOOK HIS HEAD +8131-117017-0024-1294: NOW SHOW ME WHERE I SIGNED ANY AGREEMENT SAYING I'D PAY YOU BACK +8131-117017-0025-1295: FOR A SECOND (IZZY'S->IS HIS) FACE WENT BLANK THEN HE CHUCKLED +8131-117017-0026-1296: HE (PULLED->POURED) OUT THE BILLS AND HANDED THEM OVER +8131-117017-0027-1297: THANKS (IZZY->IS HE) THANKS YOURSELF +8131-117017-0028-1298: THE KID POCKETED THE MONEY CHEERFULLY NODDING +8131-117017-0029-1299: THE LITTLE GUY KNEW MARS AS FEW OTHERS DID APPARENTLY FROM ALL SIDES +8131-117017-0030-1300: AND IF ANY OF THE OTHER COPS HAD PRIVATE RACKETS OF THEIR OWN (IZZY->IS HE) WAS UNDOUBTEDLY THE MAN TO FIND IT OUT AND (USE->USED) THE INFORMATION WITH A BEAT SUCH AS THAT EVEN GOING HALVES AND WITH ALL THE GRAFT (TO->OF) THE UPPER BRACKETS HE'D STILL BE ABLE TO MAKE HIS PILE IN A MATTER OF MONTHS +8131-117017-0031-1301: THE CAPTAIN LOOKED COMPLETELY BEATEN AS HE CAME INTO THE ROOM AND DROPPED (ONTO->INTO) THE BENCH +8131-117017-0032-1302: GO ON (ACCEPT DAMN IT->EXCEPT DEMON) +8131-117029-0000-1247: THERE WAS A MAN COMING FROM EARTH ON A SECOND SHIP WHO WOULD SEE HIM +8131-117029-0001-1248: THE LITTLE PUBLISHER WAS BACK AT THE CRUSADER AGAIN +8131-117029-0002-1249: ONLY GORDON AND SHEILA WERE LEFT +8131-117029-0003-1250: CREDIT HAD BEEN ESTABLISHED AGAIN AND THE BUSINESSES WERE OPEN +8131-117029-0004-1251: GORDON CAME TO A ROW OF TEMPORARY BUBBLES INDIVIDUAL DWELLINGS BUILT LIKE THE DOME BUT OPAQUE FOR PRIVACY +8131-117029-0005-1252: THEY HAD BEEN LUCKY +8131-117029-0006-1253: (SCHULBERG'S->SILBERG'S) VOLUNTEERS WERE OFFICIAL NOW +8131-117029-0007-1254: (FATS->FAT) PLACE WAS STILL OPEN THOUGH THE CROOKED TABLES HAD BEEN REMOVED GORDON DROPPED TO A STOOL SLIPPING OFF HIS HELMET +8131-117029-0008-1255: HE REACHED AUTOMATICALLY FOR THE GLASS OF ETHER NEEDLED BEER +8131-117029-0009-1256: THOUGHT YOU'D BE IN THE CHIPS +8131-117029-0010-1257: THAT'S MARS GORDON (ECHOED THE OTHER'S->ACCORD OTHERS) COMMENT WHY DON'T YOU PULL OFF THE PLANET FATS YOU COULD GO BACK TO EARTH I'D GUESS THE OTHER NODDED +8131-117029-0011-1258: GUESS A MAN GETS USED TO ANYTHING HELL MAYBE I CAN HIRE SOME BUMS TO SIT AROUND AND WHOOP IT UP WHEN THE SHIPS COME IN AND (BILL->BUILD) THIS AS A REAL OLD MARTIAN DEN OF SIN +8131-117029-0012-1259: THERE WAS A GRIN ON THE OTHER'S FACE +8131-117029-0013-1260: FINALLY GOT OUR ORDERS FOR YOU IT'S MERCURY +8131-117029-0014-1261: WE SENT TWENTY OTHERS THE SAME WAY AND THEY FAILED +8131-117029-0015-1262: LET'S (SAY YOU'VE->SAVE) SHIFTED SOME OF THE MISERY AROUND A BIT AND GIVEN THEM A CHANCE TO DO BETTER +8131-117029-0016-1263: YOU CAN'T STAY HERE +8131-117029-0017-1264: THERE'S A ROCKET WAITING TO (TRANSSHIP->TRANSHIP) YOU TO THE MOON ON THE WAY TO MERCURY RIGHT NOW GORDON SIGHED +8131-117029-0018-1265: AND (I'VE->I) PAID HER THE PAY WE OWE YOU FROM THE TIME YOU (BEGAN->BEGIN) USING YOUR BADGE SHE'S OUT SHOPPING +8131-117029-0019-1266: BUT HIS OLD EYES WERE GLINTING +8131-117029-0020-1267: DID YOU THINK WE'D LET YOU GO WITHOUT SEEING YOU OFF (COBBER->COPPER) HE ASKED +8131-117029-0021-1268: I I OH (DRAT->DREAD) IT I'M GETTING OLD (IZZY->IS HE) YOU TELL HIM +8131-117029-0022-1269: HE GRABBED GORDON'S HAND AND WADDLED DOWN THE LANDING PLANK (IZZY->IS HE) SHOOK HIS HEAD +8188-269288-0000-2881: (ANNIE->ANY) COLCHESTER HAD BEGUN TO MAKE FRIENDS WITH (LESLIE->LISLEY) +8188-269288-0001-2882: LESLIE DETERMINED TO (TRY FOR->TRIFLE) HONORS IN ENGLISH LANGUAGE AND LITERATURE +8188-269288-0002-2883: HER TASTES ALL LAY IN THIS DIRECTION HER IDEA BEING BY AND BY TO FOLLOW HER MOTHER'S (PROFESSION->PROFICIENT) OF JOURNALISM FOR WHICH SHE (ALREADY->ALWAYS) SHOWED CONSIDERABLE APTITUDE +8188-269288-0003-2884: SHE HAD NO IDEA OF ALLOWING HERSELF TO BREAK DOWN +8188-269288-0004-2885: WHAT DO YOU MEAN REPLIED (LESLIE->LISLEY) +8188-269288-0005-2886: WHY YOU WILL BE PARTING FROM ME YOU KNOW +8188-269288-0006-2887: I WON'T BE THE CONSTANT WORRY AND PLAGUE OF YOUR LIFE +8188-269288-0007-2888: (IT->THIS) IS THIS IF BY ANY CHANCE YOU DON'T LEAVE SAINT (WODE'S->WORDS) ANNIE I HOPE YOU WILL ALLOW ME TO BE YOUR (ROOMFELLOW->ROOM FELLOW) AGAIN NEXT TERM +8188-269288-0008-2889: SAID ANNIE A FLASH OF LIGHT COMING INTO HER EYES AND THEN LEAVING THEM +8188-269288-0009-2890: BUT SHE ADDED ABRUPTLY YOU SPEAK OF SOMETHING WHICH MUST NOT TAKE PLACE +8188-269288-0010-2891: I MUST PASS (IN HONORS->AN HONOUR) IF I DON'T I SHALL DIE +8188-269288-0011-2892: A FEW MOMENTS LATER (THERE->DICK) CAME A TAP AT THE DOOR +8188-269288-0012-2893: LESLIE OPENED THE DOOR +8188-269288-0013-2894: JANE (HERIOT->HARRIET) STOOD WITHOUT +8188-269288-0014-2895: THESE LETTERS HAVE JUST COME FOR YOU (AND ANNIE->IN ANY) COLCHESTER SHE SAID AND AS I WAS COMING UPSTAIRS I THOUGHT I WOULD LEAVE THEM WITH YOU +8188-269288-0015-2896: (LESLIE->LISLEY) THANKED HER AND EAGERLY GRASPED THE LITTLE PARCEL +8188-269288-0016-2897: HER EYES SHONE WITH PLEASURE AT THE ANTICIPATION OF THE DELIGHTFUL TIME SHE WOULD HAVE (REVELING->RIVELING) IN THE HOME NEWS THE OTHER LETTER WAS DIRECTED TO (ANNIE->ANY) COLCHESTER +8188-269288-0017-2898: HERE IS A LETTER FOR YOU (ANNIE->ANY) CRIED (LESLIE->LIZZILY) +8188-269288-0018-2899: HER FACE GREW SUDDENLY WHITE AS DEATH WHAT IS IT DEAR +8188-269288-0019-2900: I HAVE BEEN (STARVING->STARLING) OR RATHER I HAVE BEEN THIRSTING +8188-269288-0020-2901: WELL READ IT IN PEACE SAID (LESLIE->LIDNESLEY) I WON'T DISTURB YOU +8188-269288-0021-2902: I AM TRULY GLAD IT HAS COME +8188-269288-0022-2903: LESLIE SEATED HERSELF WITH HER BACK TO HER COMPANION AND OPENED HER (OWN->ON) LETTERS +8188-269288-0023-2904: DON'T NOTICE ME REPLIED (ANNIE->ANY) +8188-269288-0024-2905: I MUST GO INTO THE GROUNDS THE AIR IS STIFLING +8188-269288-0025-2906: BUT THEY (ARE->HAD) JUST SHUTTING UP +8188-269288-0026-2907: I SHALL GO I KNOW A WAY +8188-269288-0027-2908: JUST AFTER MIDNIGHT SHE ROSE WITH A SIGH TO PREPARE FOR BED +8188-269288-0028-2909: SHE LOOKED ROUND THE ROOM +8188-269288-0029-2910: NOW I REMEMBER SHE GOT A LETTER WHICH UPSET HER VERY MUCH AND WENT OUT +8188-269288-0030-2911: (LESLIE->LISLEY) WENT TO THE WINDOW AND FLUNG IT OPEN SHE PUT HER HEAD OUT AND TRIED TO PEER INTO THE DARKNESS BUT THE MOON HAD ALREADY SET AND SHE COULD NOT SEE MORE THAN A COUPLE OF YARDS IN FRONT OF HER +8188-269288-0031-2912: SHE IS A VERY QUEER (ERRATIC->THE RATTIC) CREATURE AND THAT LETTER THERE WAS BAD NEWS IN THAT LETTER +8188-269288-0032-2913: WHAT (CAN SHE BE->CAN'T YOU) DOING OUT BY HERSELF +8188-269288-0033-2914: (LESLIE->THIS LILY) LEFT THE ROOM BUT SHE HAD SCARCELY GONE A DOZEN (PACES->PLACES) DOWN THE CORRIDOR BEFORE SHE MET (ANNIE->ANY) RETURNING +8188-269288-0034-2915: (ANNIE'S EYES->ANY THOUGHT) WERE VERY BRIGHT HER CHEEKS WERE NO LONGER PALE AND THERE WAS A BRILLIANT (COLOR->COLOUR) IN THEM +8188-269288-0035-2916: SHE DID NOT TAKE THE LEAST NOTICE OF (LESLIE->PLEASING) BUT GOING INTO THE ROOM SHUT THE DOOR +8188-269288-0036-2917: DON'T BEGIN SAID ANNIE +8188-269288-0037-2918: DON'T BEGIN WHAT DO YOU MEAN +8188-269288-0038-2919: I MEAN THAT I DON'T WANT YOU TO BEGIN TO ASK QUESTIONS +8188-269288-0039-2920: I WALKED UP AND DOWN AS FAST AS EVER I COULD OUTSIDE IN ORDER TO MAKE MYSELF SLEEPY +8188-269288-0040-2921: (DON'T->THEY'RE) TALK TO ME (LESLIE->LISLEY) DON'T SAY A SINGLE WORD +8188-269288-0041-2922: I SHALL GO OFF TO SLEEP THAT IS ALL I CARE FOR +8188-269288-0042-2923: DON'T SAID ANNIE +8188-269288-0043-2924: NOW DRINK THIS AT ONCE SHE SAID IN A VOICE OF AUTHORITY IF YOU REALLY WISH TO SLEEP +8188-269288-0044-2925: (ANNIE STARED->ANY STEERED) VACANTLY AT THE (COCOA THEN->CUCKOO DID) SHE UTTERED A LAUGH +8188-269288-0045-2926: DRINK THAT SHE SAID +8188-269288-0046-2927: DO YOU WANT TO KILL ME DON'T TALK ANY MORE +8188-269288-0047-2928: I AM SLEEPY I SHALL SLEEP +8188-269288-0048-2929: SHE GOT INTO BED AS SHE SPOKE AND WRAPPED THE CLOTHES TIGHTLY ROUND HER +8188-269288-0049-2930: (CAN'T->CAN) YOU MANAGE WITH A CANDLE JUST FOR ONCE +8188-269288-0050-2931: CERTAINLY SAID (LESLIE->IT EASILY) +8188-269288-0051-2932: SHE TURNED OFF THE LIGHT AND (LIT A->LET HER) CANDLE (WHICH->WOULD) SHE PUT BEHIND HER SCREEN THEN PREPARED TO GET INTO BED +8188-269288-0052-2933: (ANNIE'S->ANY'S) MANNER WAS VERY MYSTERIOUS +8188-269288-0053-2934: (ANNIE->AND HE) DID NOT MEAN TO (CONFIDE->CONFINE) IN (ANYONE->ANY ONE) THAT NIGHT AND THE KINDEST THING WAS TO LEAVE HER ALONE +8188-269288-0054-2935: (TIRED->TIE IT) OUT LESLIE HERSELF DROPPED ASLEEP +8188-269288-0055-2936: (ANNIE->ANY) IS THAT YOU SHE CALLED OUT +8188-269288-0056-2937: THERE WAS NO REPLY BUT THE SOUND OF HURRYING STEPS CAME QUICKER AND QUICKER NOW AND THEN (THEY WERE->THEIR) INTERRUPTED BY A GROAN +8188-269288-0057-2938: OH THIS WILL KILL ME MY HEART WILL BREAK THIS WILL KILL ME +8188-269290-0000-2823: THE GUILD OF SAINT ELIZABETH +8188-269290-0001-2824: IMMEDIATELY AFTER DINNER THAT EVENING LESLIE RAN UP TO HER ROOM TO MAKE PREPARATIONS FOR HER VISIT TO EAST HALL +8188-269290-0002-2825: I'M NOT COMING SAID ANNIE +8188-269290-0003-2826: EVERY STUDENT IS TO BE (IN->AN) EAST HALL AT HALF PAST EIGHT +8188-269290-0004-2827: IT (DOESN'T->DOES) MATTER REPLIED ANNIE (WHETHER->WHITHER) IT IS AN ORDER OR NOT (I'M->I AM) NOT COMING SAY NOTHING ABOUT ME PLEASE +8188-269290-0005-2828: IT BURNED AS IF WITH FEVER +8188-269290-0006-2829: YOU DON'T KNOW WHAT A TRIAL IT IS FOR ME TO HAVE YOU HERE +8188-269290-0007-2830: I WANT TO BE ALONE GO +8188-269290-0008-2831: I KNOW YOU DON'T QUITE MEAN WHAT YOU SAY SAID (LESLIE->LIZZIE) BUT OF COURSE IF YOU REALLY WISH ME +8188-269290-0009-2832: YOU FRET ME BEYOND ENDURANCE +8188-269290-0010-2833: WRAPPING A PRETTY BLUE SHAWL (ROUND HER HEAD AND->AROUND A HIDDEN) SHOULDERS SHE TURNED TO ANNIE +8188-269290-0011-2834: LESLIE WAS JUST CLOSING THE DOOR BEHIND HER WHEN (ANNIE->ANY) CALLED AFTER HER +8188-269290-0012-2835: I TOOK IT OUT SAID (LESLIE->LISLEY) TOOK IT OUT +8188-269290-0013-2836: HAVE THE GOODNESS TO FIND IT AND PUT IT BACK +8188-269290-0014-2837: BUT DON'T LOCK ME OUT PLEASE (ANNIE->ANY) +8188-269290-0015-2838: OH I WON'T LOCK YOU (OUT->ABOUT) SHE SAID BUT I MUST HAVE THE KEY +8188-269290-0016-2839: JANE (HERIOT'S->HEARET'S) VOICE WAS HEARD IN THE PASSAGE +8188-269290-0017-2840: AS SHE WALKED DOWN THE CORRIDOR SHE HEARD IT BEING TURNED (IN->TO) THE LOCK +8188-269290-0018-2841: WHAT CAN THIS MEAN SHE SAID TO HERSELF +8188-269290-0019-2842: OH I WON'T PRESS YOU REPLIED JANE +8188-269290-0020-2843: OH I SHALL NEVER DO THAT REPLIED (LESLIE->LISLEY) +8188-269290-0021-2844: YOU SEE ALL THE GIRLS EXCEPT (EILEEN->AILEEN) AND MARJORIE LAUGH AT HER AND THAT SEEMS TO ME TO MAKE HER WORSE +8188-269290-0022-2845: SOME DAY JANE YOU MUST SEE HER +8188-269290-0023-2846: IF YOU (ARE->*) IN LONDON DURING THE SUMMER YOU MUST COME (AND PAY US A->IN PAIR FOR) VISIT WILL YOU +8188-269290-0024-2847: THAT IS IF YOU CARE TO CONFIDE IN ME +8188-269290-0025-2848: I BELIEVE POOR ANNIE IS DREADFULLY UNHAPPY +8188-269290-0026-2849: THAT'S JUST (IT JANE->A CHAIN) THAT IS WHAT FRIGHTENS ME SHE REFUSES TO COME +8188-269290-0027-2850: REFUSES TO COME SHE CRIED +8188-269290-0028-2851: (SHE WILL->SHE'LL) GET (INTO AN->IN HER) AWFUL SCRAPE +8188-269290-0029-2852: I AM SURE SHE IS ILL SHE WORKS TOO HARD AND SHE BUT THERE I DON'T KNOW THAT I OUGHT TO SAY ANY MORE +8188-269290-0030-2853: I'LL WAIT FOR YOU HERE SAID (LESLIE->LISLEY) +8188-269290-0031-2854: DO COME (ANNIE->ANY) DO +8188-269290-0032-2855: SCARCELY LIKELY REPLIED LESLIE SHE TOLD ME SHE WAS DETERMINED NOT TO COME TO THE MEETING +8188-269290-0033-2856: BUT MARJORIE AND (EILEEN->IDLEEN) HAD ALREADY DEPARTED AND (LESLIE->LISLEY) AND JANE FOUND THEMSELVES AMONG THE LAST STUDENTS TO ARRIVE AT THE GREAT EAST HALL +8188-269290-0034-2857: MISS (LAUDERDALE->LORDAIL) WAS STANDING WITH THE OTHER TUTORS AND (PRINCIPALS->PRINCIPLES) OF THE DIFFERENT HALLS (ON->ARE) A RAISED PLATFORM +8188-269290-0035-2858: THEN A (ROLL CALL->ROCKLE) WAS GONE THROUGH BY ONE OF THE TUTORS THE ONLY (ABSENTEE->EBSENTEE) WAS (ANNIE->ANY) COLCHESTER +8188-269290-0036-2859: THE PHYSICAL PART OF (YOUR TRAINING->THE ORTRAINING) AND ALSO THE MENTAL PART ARE ABUNDANTLY SUPPLIED IN THIS GREAT HOUSE OF LEARNING SHE CONTINUED BUT THE SPIRITUAL PART IT SEEMS TO ME OUGHT NOW TO BE STRENGTHENED +8188-269290-0037-2860: HEAR (HEAR->HERE) AND ONCE AGAIN (HEAR->HAIR) +8188-269290-0038-2861: SHE UTTERED (HER STRANGE->A STREAM) REMARK STANDING UP +8188-269290-0039-2862: MARJORIE AND (EILEEN->ILINE) WERE CLOSE TO HER +8188-269290-0040-2863: I WILL TALK WITH YOU (BELLE ACHESON->BELL ARTISON) PRESENTLY SHE SAID +8188-269290-0041-2864: THE NAMES OF (*->THE) PROPOSED MEMBERS ARE TO BE SUBMITTED TO ME BEFORE THIS DAY WEEK +8188-269290-0042-2865: AM I MY BROTHER'S KEEPER +8188-269290-0043-2866: YOU ASK SHE CONTINUED +8188-269290-0044-2867: GOD (ANSWERS TO->AUTHEST) EACH OF YOU YOU ARE +8188-269290-0045-2868: THE WORLD (SAYS->TASTE) NO I AM NOT BUT GOD (SAYS->SAKES) YES YOU ARE +8188-269290-0046-2869: ALL MEN ARE (YOUR->*) BROTHERS +8188-269290-0047-2870: FOR ALL WHO SIN ALL WHO SUFFER YOU ARE TO (A CERTAIN->EXERT AN) EXTENT RESPONSIBLE +8188-269290-0048-2871: AFTER THE ADDRESS THE GIRLS THEMSELVES WERE ENCOURAGED TO SPEAK AND A VERY ANIMATED DISCUSSION FOLLOWED +8188-269290-0049-2872: IT WAS PAST TEN O'CLOCK WHEN SHE LEFT THE HALL +8188-269290-0050-2873: JUST AS SHE WAS DOING SO (MISS->WAS) FRERE CAME UP +8188-269290-0051-2874: (ANNIE COLCHESTER IS->ANY COLCHISED AS) YOUR (ROOMFELLOW->ROOM FELLOW) IS SHE NOT SHE SAID +8188-269290-0052-2875: I SEE BY YOUR FACE (MISS GILROY->MY SCALE ROY) THAT YOU ARE DISTRESSED ABOUT SOMETHING ARE (YOU->*) KEEPING ANYTHING BACK +8188-269290-0053-2876: I AM AFRAID I AM REPLIED LESLIE (DISTRESS->DISTRESSED) NOW IN HER TONE +8188-269290-0054-2877: I MUST SEE HER MYSELF EARLY IN THE MORNING AND I AM QUITE SURE THAT NOTHING WILL SATISFY MISS (LAUDERDALE->LAURDALE) EXCEPT A VERY AMPLE APOLOGY AND A FULL EXPLANATION OF THE REASON WHY SHE ABSENTED HERSELF +8188-269290-0055-2878: EXCUSES MAKE NO DIFFERENCE +8188-269290-0056-2879: THE GIRL WHO BREAKS THE RULES (HAS->HAVE) TO BE PUNISHED +8188-269290-0057-2880: I WILL TELL HER +8188-274364-0000-2811: THE COMMONS ALSO VOTED THAT THE NEW CREATED PEERS OUGHT TO HAVE NO VOICE IN THIS TRIAL BECAUSE THE ACCUSATION BEING AGREED TO WHILE THEY WERE COMMONERS THEIR CONSENT TO IT WAS IMPLIED WITH THAT OF ALL THE COMMONS OF ENGLAND +8188-274364-0001-2812: IN THE GOVERNMENT OF IRELAND HIS ADMINISTRATION HAD BEEN EQUALLY (PROMOTIVE->PROMOTED) OF HIS MASTER'S INTEREST AND THAT OF THE SUBJECTS COMMITTED TO HIS CARE +8188-274364-0002-2813: THE CASE OF LORD (MOUNTNORRIS->MONTORIS) OF ALL THOSE WHICH WERE COLLECTED WITH SO (MUCH->ACT) INDUSTRY IS THE MOST FLAGRANT AND THE LEAST EXCUSABLE +8188-274364-0003-2814: THE COURT WHICH CONSISTED OF THE (CHIEF OFFICERS->CHEAP OFFICIALS) OF THE ARMY FOUND THE (CRIME->CROWN) TO BE CAPITAL AND CONDEMNED THAT NOBLEMAN TO LOSE HIS HEAD +8188-274364-0004-2815: (WHERE THE->WITH A) TOKEN BY WHICH I (SHOULD->SHALL) DISCOVER IT +8188-274364-0005-2816: IT IS NOW (*->A) FULL TWO HUNDRED AND FORTY YEARS SINCE TREASONS WERE DEFINED AND SO LONG HAS IT BEEN SINCE ANY MAN WAS TOUCHED TO THIS EXTENT UPON THIS CRIME (BEFORE->FOR) MYSELF +8188-274364-0006-2817: LET US NOT TO (OUR->HER) OWN DESTRUCTION AWAKE THOSE (SLEEPING->KEEPING) LIONS BY RATTLING UP A COMPANY OF (OLD RECORDS->ALL RICARDS) WHICH HAVE LAIN FOR SO MANY AGES BY THE (WALL->WAR) FORGOTTEN AND NEGLECTED +8188-274364-0007-2818: (HOWEVER->HERBID) THESE GENTLEMEN AT THE BAR (SAY->SO) THEY SPEAK FOR THE (COMMONWEALTH->CORNWEALTH) AND THEY BELIEVE SO YET UNDER (FAVOR->FAVOUR) IT IS I WHO IN THIS PARTICULAR SPEAK FOR THE (COMMONWEALTH->CORNWEALTH) +8188-274364-0008-2819: MY LORDS I HAVE NOW TROUBLED YOUR LORDSHIPS A GREAT DEAL LONGER THAN I SHOULD HAVE DONE +8188-274364-0009-2820: YOUNG VANE FALLING UPON THIS PAPER OF NOTES DEEMED THE MATTER OF THE UTMOST IMPORTANCE AND IMMEDIATELY COMMUNICATED IT TO (PYM->POEM) WHO NOW PRODUCED THE PAPER BEFORE THE HOUSE OF COMMONS +8188-274364-0010-2821: THE KING PROPOSES THIS DIFFICULTY BUT HOW CAN I UNDERTAKE OFFENSIVE WAR IF I HAVE NO MORE MONEY +8188-274364-0011-2822: YOUR MAJESTY HAVING TRIED THE AFFECTIONS OF YOUR PEOPLE YOU ARE ABSOLVED AND LOOSE FROM ALL RULES OF GOVERNMENT AND MAY DO WHAT POWER WILL ADMIT +8280-266249-0000-339: OLD MISTER DINSMORE HAD ACCEPTED (A PRESSING->OPPRESSING) INVITATION FROM HIS GRANDDAUGHTER AND HER HUSBAND TO JOIN THE PARTY AND WITH THE ADDITION OF SERVANTS IT WAS A LARGE ONE +8280-266249-0001-340: AS THEY WERE IN NO HASTE AND THE CONFINEMENT OF A RAILROAD CAR (WOULD->WILL) BE VERY IRKSOME TO THE YOUNGER CHILDREN IT HAD BEEN DECIDED TO MAKE THE JOURNEY BY WATER +8280-266249-0002-341: THERE WERE NO SAD LEAVE TAKINGS TO MAR THEIR PLEASURE THE CHILDREN WERE IN WILD SPIRITS AND ALL SEEMED CHEERFUL AND HAPPY AS THEY SAT OR STOOD UPON THE DECK WATCHING THE RECEDING SHORE AS THE VESSEL STEAMED OUT OF THE (HARBOR->HARBOUR) +8280-266249-0003-342: AT LENGTH THE LAND HAD QUITE DISAPPEARED NOTHING COULD BE SEEN BUT THE SKY OVERHEAD AND A VAST EXPANSE OF WATER ALL (AROUND->ROUND) AND THE PASSENGERS FOUND LEISURE TO TURN THEIR ATTENTION UPON EACH OTHER +8280-266249-0004-343: THERE ARE SOME NICE LOOKING PEOPLE ON BOARD REMARKED MISTER TRAVILLA IN AN UNDERTONE TO HIS WIFE +8280-266249-0005-344: (BESIDE->BESIDES) OURSELVES ADDED COUSIN (RONALD->RANALD) LAUGHING +8280-266249-0006-345: YES SHE ANSWERED THAT LITTLE GROUP YONDER A YOUNG MINISTER AND HIS WIFE AND CHILD I SUPPOSE +8280-266249-0007-346: AND (WHAT->WHEN) A DEAR LITTLE FELLOW HE IS JUST ABOUT THE AGE OF OUR (HAROLD->HERALD) I SHOULD JUDGE +8280-266249-0008-347: DO YOU SON WAS THE SMILING REJOINDER +8280-266249-0009-348: HE CERTAINLY LOOKS LIKE A VERY NICE LITTLE BOY +8280-266249-0010-349: SUPPOSE YOU AND HE SHAKE HANDS FRANK +8280-266249-0011-350: I DO INDEED (THOUGH->THE) PROBABLY COMPARATIVELY FEW ARE AWARE THAT TOBACCO IS THE CAUSE OF THEIR AILMENTS +8280-266249-0012-351: DOUBTLESS THAT IS THE CASE REMARKED MISTER DINSMORE +8280-266249-0013-352: WITH ALL MY HEART IF YOU WILL STEP INTO THE (GENTLEMEN'S->GENTLEMAN'S) CABIN WHERE THERE'S A LIGHT +8280-266249-0014-353: HE LED THE WAY THE OTHERS ALL FOLLOWING AND TAKING OUT A SLIP OF PAPER READ FROM IT IN A DISTINCT TONE LOUD ENOUGH TO BE HEARD BY THOSE (*->ALL) ABOUT HIM WITHOUT DISTURBING THE OTHER PASSENGERS +8280-266249-0015-354: ONE DROP OF NICOTINE (EXTRACT OF->EXTRACTED) TOBACCO PLACED ON THE TONGUE OF (A->THE) DOG WILL KILL HIM IN A MINUTE THE HUNDREDTH PART OF A GRAIN (PICKED->PRICKED) UNDER THE SKIN OF A MAN'S ARM WILL PRODUCE NAUSEA AND FAINTING +8280-266249-0016-355: THE HALF DOZEN CIGARS WHICH MOST SMOKERS (USE->USED) A DAY CONTAIN SIX OR SEVEN GRAINS ENOUGH IF CONCENTRATED AND ABSORBED TO KILL THREE MEN AND A POUND (OF->OR) TOBACCO ACCORDING TO ITS QUALITY CONTAINS FROM ONE QUARTER TO ONE AND A QUARTER OUNCES +8280-266249-0017-356: IS IT STRANGE THEN THAT SMOKERS AND (CHEWERS->SHOERS) HAVE A THOUSAND AILMENTS +8280-266249-0018-357: THAT THE FRENCH (POLYTECHNIC->POLYTECHNICA) INSTITUTE HAD TO PROHIBIT ITS USE ON ACCOUNT OF ITS EFFECTS (ON->UPON) THE MIND +8280-266249-0019-358: NOTICE THE MULTITUDE OF SUDDEN DEATHS AND SEE HOW MANY (ARE->OUR) SMOKERS AND CHEWERS +8280-266249-0020-359: IN A SMALL COUNTRY TOWN SEVEN OF THESE MYSTERIOUS PROVIDENCES OCCURRED WITHIN THE CIRCUIT OF A MILE ALL DIRECTLY TRACEABLE TO TOBACCO AND ANY PHYSICIAN ON A FEW MOMENTS REFLECTION CAN MATCH THIS FACT BY HIS OWN OBSERVATION +8280-266249-0021-360: AND THEN SUCH POWERFUL ACIDS PRODUCE INTENSE IRRITATION AND THIRST THIRST WHICH WATER DOES NOT QUENCH +8280-266249-0022-361: HENCE A RESORT TO CIDER AND BEER +8280-266249-0023-362: NO SIR WHAT KNOW YE NOT THAT YOUR BODY IS THE TEMPLE OF THE HOLY GHOST WHICH IS IN YOU WHICH YE HAVE OF GOD AND YE ARE NOT YOUR OWN +8280-266249-0024-363: FOR (YE->YOU) ARE BOUGHT WITH A PRICE THEREFORE GLORIFY GOD IN YOUR BODY AND IN YOUR SPIRIT WHICH ARE (GOD'S->GODS) +8280-266249-0025-364: WE CERTAINLY HAVE NO RIGHT TO INJURE OUR BODIES EITHER BY NEGLECT OR SELF INDULGENCE +8280-266249-0026-365: AND AGAIN I BESEECH YOU THEREFORE BRETHREN BY THE MERCIES OF GOD THAT YE PRESENT YOUR BODIES A LIVING SACRIFICE HOLY ACCEPTABLE UNTO GOD WHICH IS YOUR REASONABLE SERVICE +8280-266249-0027-366: IT MUST REQUIRE A GOOD DEAL OF RESOLUTION FOR ONE WHO HAS BECOME FOND OF THE INDULGENCE TO GIVE IT UP REMARKED MISTER DALY +8280-266249-0028-367: NO DOUBT NO DOUBT RETURNED MISTER (LILBURN->LILBOURNE) BUT IF THY RIGHT (EYE OFFEND THEE->I OFFENDLY) PLUCK IT (OUT->UP) AND CAST IT FROM (THEE->ME) FOR IT IS PROFITABLE FOR THEE THAT ONE OF THY MEMBERS SHOULD PERISH AND NOT THAT THY WHOLE BODY SHOULD BE CAST INTO HELL +8280-266249-0029-368: THERE WAS A PAUSE BROKEN BY YOUNG HORACE WHO HAD BEEN WATCHING A GROUP OF MEN GATHERED ABOUT A TABLE AT THE FURTHER END OF THE ROOM +8280-266249-0030-369: THEY ARE GAMBLING YONDER AND I'M AFRAID THAT YOUNG FELLOW IS BEING BADLY FLEECED BY (THAT->THE) MIDDLE AGED MAN OPPOSITE +8280-266249-0031-370: THE EYES OF THE WHOLE PARTY WERE AT ONCE TURNED IN THAT DIRECTION +8280-266249-0032-371: NO SIR HE IS NOT HERE +8280-266249-0033-372: (AND->AT) THE DOOR WAS SLAMMED VIOLENTLY (TO->TOO) +8280-266249-0034-373: NOW THE VOICE CAME FROM THE SKYLIGHT OVERHEAD APPARENTLY AND WITH A FIERCE IMPRECATION THE IRATE GAMESTER RUSHED UPON DECK AND RAN HITHER AND THITHER IN SEARCH OF HIS TORMENTOR +8280-266249-0035-374: HIS VICTIM WHO HAD BEEN LOOKING ON DURING THE LITTLE SCENE AND LISTENING TO THE MYSTERIOUS VOICE (IN->AND) SILENT WIDE EYED WONDER AND FEAR NOW ROSE HASTILY HIS FACE DEATHLY PALE WITH TREMBLING HANDS GATHERED UP THE MONEY HE HAD STAKED AND HURRYING (INTO->TO) HIS (STATE ROOM->STATEROOM) LOCKED HIMSELF IN +8280-266249-0036-375: WHAT DOES IT MEAN CRIED ONE +8280-266249-0037-376: A (VENTRILOQUIST ABOARD->VENTILOQUE QUESTERED BOARD) OF COURSE RETURNED ANOTHER LET'S FOLLOW AND SEE THE FUN +8280-266249-0038-377: I WONDER WHICH OF US IT IS REMARKED THE FIRST LOOKING HARD AT OUR PARTY I DON'T KNOW BUT COME ON +8280-266249-0039-378: THAT FELLOW NICK WARD IS A NOTED (BLACKLEG->BLACK LAG) AND RUFFIAN HAD HIS NOSE BROKEN IN A FIGHT AND IS SENSITIVE ON THE SUBJECT WAS CHEATING OF COURSE +8280-266249-0040-379: WHO ASKED THE MATE I'VE SEEN (NONE UP->NO NAP) HERE THOUGH THERE ARE SOME IN THE STEERAGE +8280-266249-0041-380: THEY HEARD HIM IN SILENCE (WITH A->WHERE THE) COOL PHLEGMATIC INDIFFERENCE MOST EXASPERATING TO ONE IN HIS PRESENT MOOD +8280-266249-0042-381: A MAN OF GIANT SIZE AND HERCULEAN STRENGTH HAD LAID ASIDE HIS PIPE AND SLOWLY RISING TO HIS FEET SEIZED THE SCOUNDREL IN HIS POWERFUL GRASP +8280-266249-0043-382: LET ME GO YELLED WARD MAKING A DESPERATE EFFORT TO FREE HIS ARMS +8280-266249-0044-383: I (DINKS->THINK) NO I (DINKS->THINK) I (DEACH->DID) YOU VON LESSON RETURNED HIS CAPTOR NOT RELAXING HIS GRASP IN THE LEAST +8280-266249-0045-384: THE GERMAN RELEASED HIS PRISONER AND THE LATTER (SLUNK->SUNK) AWAY WITH MUTTERED THREATS AND IMPRECATIONS UPON THE HEAD OF HIS TORMENTOR +8280-266249-0046-385: MISTER (LILBURN->LILLBURN) AND MISTER DALY EACH (AT->HAD) A DIFFERENT TIME SOUGHT OUT THE YOUNG MAN (WARD'S->WORDS) INTENDED VICTIM AND TRIED TO INFLUENCE HIM FOR GOOD +8280-266249-0047-386: YET THERE WAS GAMBLING AGAIN THE SECOND NIGHT BETWEEN WARD AND SEVERAL OTHERS OF HIS (PROFESSION->PROFESSIONS) +8280-266249-0048-387: THEY KEPT IT UP TILL AFTER MIDNIGHT +8280-266249-0049-388: THEN MISTER (LILBURN->LOWBORNE) WAKING FROM HIS FIRST SLEEP IN A (STATEROOM->STATE ROOM) NEAR BY THOUGHT HE WOULD BREAK IT UP ONCE MORE +8280-266249-0050-389: AN INTENSE VOICELESS EXCITEMENT POSSESSED THE PLAYERS FOR THE GAME WAS A CLOSE ONE AND (THE STAKES->MISTAKES) WERE VERY HEAVY +8280-266249-0051-390: (THEY BENT->THEY'VE BEEN) EAGERLY OVER THE BOARD EACH WATCHING WITH FEVERISH ANXIETY HIS COMPANION'S MOVEMENTS EACH CASTING NOW AND AGAIN A GLOATING EYE UPON THE HEAP OF GOLD AND GREENBACKS THAT LAY BETWEEN THEM AND AT TIMES HALF STRETCHING OUT HIS HAND TO CLUTCH IT +8280-266249-0052-391: A DEEP GROAN STARTLED THEM AND THEY SPRANG TO THEIR FEET PALE AND TREMBLING WITH SUDDEN TERROR EACH HOLDING HIS BREATH AND STRAINING HIS EAR TO CATCH A REPETITION OF THE DREAD SOUND +8280-266249-0053-392: BUT (ALL WAS->ALWAYS) SILENT AND AFTER A MOMENT OF ANXIOUS WAITING THEY SAT DOWN TO THEIR GAME AGAIN TRYING TO CONCEAL AND SHAKE OFF THEIR FEARS (WITH A->TO THE) FORCED UNNATURAL LAUGH +8280-266249-0054-393: IT CAME FROM UNDER THE TABLE GASPED (WARD->HORN) LOOK WHAT'S THERE LOOK (*->TO) YOURSELF +8280-266249-0055-394: WHAT CAN IT HAVE BEEN THEY ASKED EACH OTHER +8280-266249-0056-395: OH NONSENSE WHAT FOOLS WE ARE +8280-266249-0057-396: IT WAS THE LAST GAME OF CARDS FOR THAT TRIP +8280-266249-0058-397: THE CAPTAIN COMING IN SHORTLY AFTER THE SUDDEN FLIGHT OF THE GAMBLERS TOOK CHARGE OF THE MONEY AND THE NEXT DAY RESTORED IT TO THE OWNERS +8280-266249-0059-398: TO ELSIE'S OBSERVANT EYES IT PRESENTLY BECAME EVIDENT THAT THE (DALYS WERE IN->DAILIES RAN) VERY (STRAITENED->STRAIGHT IN) CIRCUMSTANCES +8280-266249-0060-399: OH HOW KIND HOW VERY KIND MISSUS (DALY->DALEY) SAID WITH TEARS OF JOY AND GRATITUDE WE HAVE HARDLY KNOWN HOW WE SHOULD MEET THE MOST NECESSARY EXPENSES OF THIS TRIP BUT HAVE BEEN TRYING TO CAST OUR CARE UPON THE LORD ASKING HIM TO PROVIDE +8280-266249-0061-400: AND HOW WONDERFULLY HE HAS ANSWERED OUR PETITIONS +8280-266249-0062-401: ELSIE ANSWERED PRESSING HER HAND AFFECTIONATELY (ART->ARE) WE NOT SISTERS IN CHRIST +8280-266249-0063-402: YE ARE ALL THE CHILDREN OF GOD BY FAITH IN CHRIST JESUS +8280-266249-0064-403: (YE ARE->YEAR) ALL ONE (IN->AND) CHRIST JESUS +8280-266249-0065-404: WE (FEEL->SEE ON) MY HUSBAND AND I THAT WE ARE ONLY THE STEWARDS OF HIS BOUNTY AND (THAT->*) BECAUSE HE HAS SAID INASMUCH AS YE HAVE DONE IT UNTO ONE OF THE LEAST OF THESE MY BRETHREN YE HAVE DONE IT UNTO ME IT IS THE GREATEST PRIVILEGE AND DELIGHT TO DO ANYTHING FOR HIS PEOPLE +8461-258277-0000-1649: WHEN IT WAS THE SEVEN (HUNDRED->HUNDREDTH) AND EIGHTEENTH NIGHT +8461-258277-0001-1650: BUT HE ANSWERED NEEDS (MUST I HAVE ZAYNAB ALSO->MICE THY HALVES THINE APPLES SAY) NOW (SUDDENLY->CERTAINLY) THERE CAME A RAP AT THE DOOR AND THE MAID SAID WHO IS AT THE DOOR +8461-258277-0002-1651: THE KNOCKER REPLIED (KAMAR->COME ALL) DAUGHTER (OF AZARIAH->VASSARIAH) THE JEW SAY ME IS ALI OF CAIRO WITH YOU +8461-258277-0003-1652: REPLIED THE BROKER'S DAUGHTER O THOU DAUGHTER OF A DOG +8461-258277-0004-1653: (AND->ON) HAVING THUS (ISLAMISED->ISLAMIZED) SHE ASKED HIM (DO->TWO) MEN IN THE FAITH OF (AL ISLAM GIVE->ALI SLAM GAVE) MARRIAGE PORTIONS TO WOMEN OR (DO->TWO) WOMEN (DOWER->TO OUR) MEN +8461-258277-0005-1654: AND SHE THREW DOWN THE JEW'S HEAD BEFORE HIM +8461-258277-0006-1655: NOW THE CAUSE OF HER SLAYING HER SIRE WAS AS FOLLOWS +8461-258277-0007-1656: THEN HE (SET->SAT) OUT REJOICING TO RETURN TO THE BARRACK OF THE (FORTY->FORTE) +8461-258277-0008-1657: SO HE ATE AND FELL DOWN SENSELESS FOR THE SWEETMEATS WERE DRUGGED WITH (BHANG->BANG) WHEREUPON THE KAZI BUNDLED HIM INTO THE SACK AND MADE OFF WITH HIM CHARGER AND CHEST AND ALL TO THE BARRACK OF THE (FORTY->FORTE) +8461-258277-0009-1658: PRESENTLY (HASAN SHUMAN->HER SON SCHUMANN) CAME OUT OF A (CLOSET->CLOTH) AND SAID TO HIM HAST THOU GOTTEN (THE GEAR->AGAIN) O ALI +8461-258277-0010-1659: SO HE TOLD HIM WHAT HAD BEFALLEN HIM AND ADDED IF I KNOW (WHITHER->WHETHER) THE RASCAL IS GONE AND WHERE TO FIND THE KNAVE I WOULD PAY HIM OUT +8461-258277-0011-1660: KNOWEST THOU WHITHER HE WENT +8461-258277-0012-1661: ANSWERED HASAN I KNOW WHERE HE IS AND OPENING THE DOOR OF THE CLOSET SHOWED HIM THE (SWEETMEAT SELLER->SWEETMEAT'S CELLAR) WITHIN DRUGGED AND SENSELESS +8461-258277-0013-1662: SO I WENT ROUND ABOUT THE HIGHWAYS OF THE CITY TILL I MET A SWEETMEAT (SELLER->CELLAR) AND BUYING HIS CLOTHES AND STOCK IN TRADE AND GEAR FOR TEN DINARS DID WHAT WAS DONE +8461-258277-0014-1663: QUOTH (AL RASHID->A RASCHID) WHOSE HEAD IS THIS +8461-258277-0015-1664: SO (ALI->I) RELATED TO HIM ALL THAT (HAD->*) PASSED FROM FIRST (TO->*) LAST AND THE CALIPH SAID I (HAD->HATE) NOT THOUGHT THOU WOULDST KILL HIM FOR THAT HE WAS A SORCERER +8461-258277-0016-1665: HE REPLIED I HAVE FORTY LADS BUT THEY ARE IN CAIRO +8461-278226-0000-1633: AND LAURA HAD HER OWN PET PLANS +8461-278226-0001-1634: SHE MEANT TO BE SCRUPULOUSLY CONSCIENTIOUS IN THE ADMINISTRATION OF (HER->A) TALENTS AND SOMETIMES AT CHURCH ON A SUNDAY WHEN THE (SERMON->SIMON) WAS PARTICULARLY AWAKENING SHE MENTALLY DEBATED (THE->A) SERIOUS QUESTION AS TO (WHETHER->WHERE THE) NEW (BONNETS->BONNET) AND A PAIR OF (JOUVIN'S->JUBAUN'S) GLOVES DAILY WERE NOT SINFUL BUT I THINK SHE DECIDED THAT THE NEW BONNETS AND GLOVES WERE ON THE WHOLE A (PARDONABLE->PIONABLE) WEAKNESS AS BEING GOOD FOR TRADE +8461-278226-0002-1635: ONE MORNING LAURA TOLD HER HUSBAND WITH A GAY LAUGH THAT SHE WAS GOING TO VICTIMIZE HIM BUT HE WAS TO PROMISE TO BE PATIENT AND BEAR WITH HER FOR ONCE IN A WAY +8461-278226-0003-1636: I WANT TO SEE ALL THE PICTURES THE MODERN PICTURES ESPECIALLY +8461-278226-0004-1637: I REMEMBER ALL THE (RUBENSES AT->RUBEN SAYS THAT) THE LOUVRE FOR I SAW THEM (THREE->FOR) YEARS AGO WHEN I WAS STAYING IN PARIS WITH GRANDPAPA +8461-278226-0005-1638: SHE RETURNED IN A LITTLE MORE THAN TEN MINUTES IN THE FRESHEST TOILETTE ALL PALE SHIMMERING BLUE LIKE THE SPRING SKY WITH (PEARL GREY->PEAR GRAY) GLOVES AND BOOTS AND PARASOL AND A BONNET THAT SEEMED MADE OF (AZURE->USURE) BUTTERFLIES +8461-278226-0006-1639: (IT->HE) WAS DRAWING TOWARDS THE CLOSE OF THIS DELIGHTFUL HONEYMOON TOUR AND IT WAS A BRIGHT SUNSHINY MORNING EARLY IN FEBRUARY BUT FEBRUARY IN PARIS IS SOMETIMES BETTER THAN APRIL IN LONDON +8461-278226-0007-1640: BUT SHE FIXED UPON A PICTURE WHICH SHE SAID SHE PREFERRED TO ANYTHING SHE HAD SEEN IN THE GALLERY +8461-278226-0008-1641: PHILIP (JOCELYN->JOSCELYN) WAS EXAMINING SOME PICTURES ON THE OTHER SIDE OF THE ROOM WHEN HIS WIFE MADE (THIS->THE) DISCOVERY +8461-278226-0009-1642: HOW I WISH YOU COULD GET ME A COPY OF THAT PICTURE (PHILIP->FILLIP) LAURA SAID ENTREATINGLY +8461-278226-0010-1643: I SHOULD SO LIKE ONE TO HANG IN MY MORNING ROOM (AT JOCELYN'S ROCK->A JOSCELYN STRUCK) +8461-278226-0011-1644: SHE TURNED TO THE FRENCH (ARTIST->ARD THIS) PRESENTLY AND ASKED HIM WHERE THE ELDER MISTER (KERSTALL->KIRSTALL) LIVED AND IF THERE WAS ANY POSSIBILITY OF SEEING HIM +8461-278226-0012-1645: THEY HAVE SAID THAT HE IS EVEN A LITTLE IMBECILE THAT HE DOES NOT REMEMBER HIMSELF OF THE MOST COMMON EVENTS OF HIS LIFE +8461-278226-0013-1646: BUT THERE ARE SOME OTHERS WHO SAY THAT HIS MEMORY HAS NOT ALTOGETHER FAILED AND THAT HE (IS->*) STILL ENOUGH HARSHLY CRITICAL TOWARDS THE WORKS OF OTHERS +8461-278226-0014-1647: I DON'T THINK YOU WILL HAVE ANY DIFFICULTY IN FINDING THE HOUSE +8461-278226-0015-1648: YOU WILL BE (DOING->BETWEEN) ME SUCH A FAVOUR (PHILIP->FELLOW) IF (YOU'LL->YOU) SAY YES +8461-281231-0000-1594: HIS FOLLOWERS (RUSHED->RUSH) FORWARD (TO->*) WHERE HE LAY AND THEIR UNITED FORCE COMPELLING THE BLACK (KNIGHT->NIGHT) TO PAUSE THEY DRAGGED (THEIR->THE) WOUNDED LEADER WITHIN THE WALLS +8461-281231-0001-1595: IT WAS ON THEIR JOURNEY TO THAT TOWN THAT THEY WERE OVERTAKEN ON THE ROAD BY (CEDRIC->SADRIC) AND HIS PARTY IN WHOSE COMPANY THEY WERE AFTERWARDS CARRIED CAPTIVE TO THE (CASTLE->COUNCIL) OF (TORQUILSTONE->TORCLESTONE) +8461-281231-0002-1596: (AS HE->I SEE) LAY UPON HIS BED (RACKED->RAT) WITH PAIN AND (MENTAL->MANTLE) AGONY AND FILLED WITH THE FEAR OF RAPIDLY APPROACHING DEATH HE HEARD A VOICE ADDRESS HIM +8461-281231-0003-1597: WHAT ART THOU HE EXCLAIMED IN TERROR +8461-281231-0004-1598: LEAVE ME AND SEEK THE SAXON (WITCH ULRICA->WHICH OVERREKA) WHO WAS MY TEMPTRESS LET HER AS WELL AS I TASTE THE TORTURES WHICH ANTICIPATE HELL +8461-281231-0005-1599: EXCLAIMED THE NORMAN (HO->OH) +8461-281231-0006-1600: (REMEMBEREST->REMEMBER AS) THOU THE MAGAZINE OF FUEL THAT IS (STORED->STOLE) BENEATH THESE APARTMENTS WOMAN +8461-281231-0007-1601: THEY ARE FAST RISING AT LEAST SAID (ULRICA->A RIKA) AND A SIGNAL SHALL SOON WAVE (TO WARN->TOWARD) THE BESIEGERS TO PRESS HARD UPON THOSE WHO WOULD EXTINGUISH THEM +8461-281231-0008-1602: MEANWHILE THE BLACK KNIGHT HAD LED HIS FORCES AGAIN TO THE ATTACK AND SO VIGOROUS WAS THEIR ASSAULT THAT BEFORE LONG THE GATE OF THE CASTLE ALONE SEPARATED THEM FROM THOSE WITHIN +8461-281231-0009-1603: THE DEFENDERS (FINDING->FIND IN) THE CASTLE TO BE ON FIRE NOW DETERMINED TO SELL THEIR LIVES AS (DEARLY->DAILY) AS THEY COULD AND HEADED BY (DE BRACY->THE BRAZY) THEY THREW OPEN THE GATE AND WERE AT ONCE INVOLVED IN A TERRIFIC CONFLICT WITH THOSE OUTSIDE +8461-281231-0010-1604: THE BLACK (KNIGHT->NIGHT) WITH (PORTENTOUS->POTENTAL) STRENGTH (FORCED HIS WAY INWARD->FORCES AWAY IN WOOD) IN DESPITE OF (DE->THE) BRACY AND HIS FOLLOWERS +8461-281231-0011-1605: TWO OF THE FOREMOST INSTANTLY FELL AND THE REST GAVE WAY NOTWITHSTANDING ALL (THEIR LEADERS->THE LEADER'S) EFFORTS TO STOP THEM +8461-281231-0012-1606: THE BLACK (KNIGHT->NIGHT) WAS SOON ENGAGED IN DESPERATE COMBAT WITH THE NORMAN CHIEF AND (THE VAULTED->DEVOTED) ROOF OF THE HALL RUNG WITH (THEIR->A) FURIOUS BLOWS +8461-281231-0013-1607: AT LENGTH (DE->THE) BRACY FELL +8461-281231-0014-1608: TELL ME THY NAME OR WORK THY PLEASURE ON ME +8461-281231-0015-1609: YET FIRST LET ME SAY SAID (DE BRACY->DEBRACY) WHAT (IT->DID) IMPORTS THEE TO KNOW +8461-281231-0016-1610: EXCLAIMED THE BLACK KNIGHT PRISONER AND PERISH +8461-281231-0017-1611: THE LIFE OF EVERY MAN IN THE CASTLE SHALL ANSWER IT IF A HAIR OF HIS HEAD BE SINGED SHOW ME HIS CHAMBER +8461-281231-0018-1612: RAISING THE WOUNDED MAN WITH (EASE->THESE) THE BLACK KNIGHT RUSHED WITH (HIM->THEM) TO THE (POSTERN->PASTING) GATE AND HAVING THERE DELIVERED HIS BURDEN TO THE CARE OF TWO (YEOMEN->YEOMAN) HE AGAIN ENTERED THE CASTLE TO ASSIST IN THE RESCUE OF (THE OTHER->THAT A) PRISONERS +8461-281231-0019-1613: BUT IN OTHER PARTS THE BESIEGERS PURSUED THE DEFENDERS OF THE CASTLE FROM CHAMBER TO CHAMBER AND SATIATED IN (THEIR->THE) BLOOD THE VENGEANCE WHICH HAD LONG ANIMATED THEM AGAINST THE SOLDIERS OF THE TYRANT FRONT DE BOEUF +8461-281231-0020-1614: AS THE FIRE (COMMENCED->COMMANDS) TO SPREAD RAPIDLY THROUGH ALL PARTS OF THE CASTLE (ULRICA->OR RICA) APPEARED ON ONE OF THE TURRETS +8461-281231-0021-1615: BEFORE LONG THE TOWERING FLAMES HAD SURMOUNTED EVERY OBSTRUCTION AND ROSE TO THE EVENING SKIES (ONE->WHEN) HUGE AND BURNING BEACON (SEEN->SEEMED) FAR AND WIDE THROUGH THE ADJACENT COUNTRY (TOWER->TOWERED) AFTER TOWER CRASHED DOWN WITH BLAZING ROOF AND RAFTER +8461-281231-0022-1616: AT LENGTH WITH A TERRIFIC CRASH THE WHOLE (TURRET->TORROR) GAVE WAY AND SHE PERISHED IN (THE->*) FLAMES WHICH (HAD->I) CONSUMED HER TYRANT +8461-281231-0023-1617: WHEN THE OUTLAWS HAD DIVIDED THE SPOILS WHICH THEY HAD TAKEN FROM THE CASTLE OF (TORQUILSTONE->TORKILSTONE) CEDRIC PREPARED TO TAKE HIS DEPARTURE +8461-281231-0024-1618: HE LEFT THE GALLANT BAND OF FORESTERS SORROWING DEEPLY FOR HIS LOST FRIEND THE LORD OF (CONINGSBURGH->CUNNINGSBURG) AND HE AND HIS FOLLOWERS HAD SCARCE DEPARTED WHEN A PROCESSION MOVED SLOWLY FROM UNDER THE GREENWOOD BRANCHES IN THE DIRECTION WHICH HE HAD TAKEN IN THE CENTRE OF WHICH WAS THE CAR IN WHICH THE BODY OF (ATHELSTANE->ADDSTEIN) WAS LAID +8461-281231-0025-1619: (DE BRACY->DEBRACY) BOWED LOW AND IN SILENCE THREW HIMSELF UPON A HORSE AND GALLOPED OFF THROUGH THE (WOOD->WOODS) +8461-281231-0026-1620: HERE IS A BUGLE WHICH AN ENGLISH YEOMAN HAS ONCE WORN I PRAY YOU TO KEEP IT AS A MEMORIAL OF YOUR GALLANT BEARING +8461-281231-0027-1621: SO SAYING HE MOUNTED HIS STRONG WAR HORSE AND RODE OFF THROUGH THE FOREST +8461-281231-0028-1622: DURING ALL THIS TIME ISAAC OF YORK SAT MOURNFULLY APART GRIEVING FOR THE LOSS OF HIS (DEARLY->STEELY) LOVED DAUGHTER REBECCA +8461-281231-0029-1623: AND WITH THIS EPISTLE (THE UNHAPPY->THEN HAPPY) OLD MAN SET OUT TO PROCURE HIS DAUGHTER'S LIBERATION +8461-281231-0030-1624: THE TEMPLAR IS FLED SAID (DE BRACY->THE BRACEY) IN ANSWER TO THE PRINCE'S EAGER QUESTIONS (FRONT DE BOEUF->FROM THE BIRTH) YOU WILL NEVER SEE MORE AND HE ADDED IN A LOW AND EMPHATIC TONE RICHARD IS IN ENGLAND I HAVE SEEN HIM AND SPOKEN WITH HIM +8461-281231-0031-1625: HE APPEALED TO (DE BRACY->THE BRACELE) TO ASSIST HIM IN (THIS->HIS) PROJECT AND BECAME AT ONCE DEEPLY SUSPICIOUS OF THE (KNIGHT'S->NIGHT'S) LOYALTY TOWARDS HIM WHEN HE DECLINED TO LIFT HAND AGAINST THE MAN WHO HAD SPARED HIS OWN LIFE +8461-281231-0032-1626: BEFORE REACHING (HIS->ITS) DESTINATION HE WAS TOLD THAT LUCAS (DE BEAUMANOIR->THE BOURMANOIR) THE GRAND MASTER OF THE ORDER OF THE TEMPLARS WAS THEN ON VISIT TO (THE->THEIR) PRECEPTORY +8461-281231-0033-1627: HE HAD NOT UNTIL THEN BEEN INFORMED (OF->TO) THE PRESENCE OF THE JEWISH MAIDEN IN THE ABODE OF THE TEMPLARS AND GREAT WAS HIS FURY AND INDIGNATION ON LEARNING THAT SHE WAS AMONGST THEM +8461-281231-0034-1628: POOR ISAAC WAS HURRIED OFF ACCORDINGLY AND EXPELLED FROM THE PRECEPTORY ALL HIS ENTREATIES AND EVEN HIS OFFERS UNHEARD AND DISREGARDED +8461-281231-0035-1629: THE ASSURANCE THAT SHE POSSESSED SOME FRIEND IN (THIS->HIS) AWFUL ASSEMBLY GAVE (HER->A) COURAGE TO LOOK (AROUND->ROUND) AND TO MARK INTO WHOSE PRESENCE SHE HAD BEEN CONDUCTED +8461-281231-0036-1630: SHE GAZED ACCORDINGLY UPON A SCENE WHICH MIGHT WELL HAVE STRUCK TERROR INTO A BOLDER HEART THAN HERS +8461-281231-0037-1631: AT HIS FEET WAS PLACED (A->THE) TABLE OCCUPIED BY TWO SCRIBES WHOSE DUTY (IT->*) WAS TO RECORD THE PROCEEDINGS OF THE DAY +8461-281231-0038-1632: THE PRECEPTORS OF WHOM (THERE->THEY) WERE FOUR PRESENT OCCUPIED SEATS BEHIND (THEIR->THE) SUPERIORS AND BEHIND THEM STOOD THE ESQUIRES OF THE ORDER (ROBED->ROPED) IN WHITE + +SUBSTITUTIONS: count ref -> hyp +43 THE -> A +35 AND -> IN +32 A -> THE +29 IN -> AND +13 ANNIE -> ANY +13 AN -> AND +10 THIS -> THE +10 THE -> TO +9 TO -> THE +9 THEIR -> THE +9 LESLIE -> LISLEY +9 DICKIE -> DICKY +9 DE -> THE +8 THAT -> THE +7 I -> AND +7 HER -> A +6 THIS -> HIS +6 THEY -> THERE +6 THE -> THIS +6 THE -> THEY +6 OF -> A +6 MURDOCH -> MURDOCK +6 LARCH -> LARGE +6 ARCHY -> ARCHIE +5 UPON -> UP +5 THE -> THEIR +5 THE -> THAT +5 SIGURD -> CIGARET +5 SHARRKAN -> SHARKAN +5 SET -> SAID +5 ORGANISER -> ORGANIZER +5 OR -> OF +5 KINE -> KIND +5 IZZY -> IS +5 IS -> AS +5 AND -> AN +5 A -> TO +4 YOU'RE -> YOU +4 WAS -> IS +4 TOO -> TO +4 THIS -> THESE +4 THESE -> THIS +4 REGIN -> RIGAN +4 OL -> OLD +4 N'T -> NOT +4 MISSUS -> MISS +4 MAN -> MEN +4 KNOW -> NO +4 KNIGHT -> NIGHT +4 IT -> HE +4 IT -> A +4 INTERESTS -> INTEREST +4 IM -> HIM +4 HATH -> HAD +4 HAS -> HAD +4 HAD -> AND +4 DEFENSE -> DEFENCE +4 BRAHMAN -> BRAMIN +4 AT -> THAT +4 AROUND -> ROUND +4 A -> I +4 A -> HER +3 ZAU -> ZAO +3 WOULD -> WILL +3 WILDERNESS -> WIDERNESS +3 WHEN -> WITH +3 WHEN -> AND +3 WHAT -> BUT +3 WERE -> WITH +3 WERE -> WHERE +3 TRY -> TRIED +3 THEY -> THE +3 THERE -> THEY +3 THERE -> THERE'S +3 THERE -> THEN +3 THERE -> THEIR +3 THE -> THEM +3 THAT -> AT +3 SET -> SAT +3 SANCT -> SAINT +3 RAYSTOKE -> RAYSTROKE +3 PRIORESS -> PRIORS +3 OUR -> HER +3 OR -> O +3 ON -> IN +3 OL -> ALL +3 OH -> O +3 O -> OH +3 O -> OF +3 O -> ARE +3 MURDOCH -> MARDOCK +3 MISTAH -> MISTER +3 MILICENT -> MILLISON +3 LIL -> LITTLE +3 LEVER -> LOVER +3 JES -> JUST +3 ITS -> HIS +3 IF -> OF +3 I -> I'VE +3 I -> I'M +3 HIM -> EM +3 HERMON -> HERMANN +3 HERMON -> HERMAN +3 HEAR -> HERE +3 HE'S -> IS +3 HE -> HE'D +3 HAS -> HATH +3 GOING -> GOIN +3 FROM -> FOR +3 FAUVENT -> PREVENT +3 FAUVENT -> FERVENT +3 FAUCHELEVENT -> FOR +3 CENTER -> CENTRE +3 BEFEL -> BEFELL +3 BAGHDAD -> BAGDAD +3 AT -> A +3 AS -> IS +3 ARSINOE -> ARSENO +3 ANYONE -> ANY +3 AND -> ON +3 AND -> A +3 AN -> IN +3 AIN'T -> AND +3 A -> IT +3 A -> AT +3 A -> AND +2 ZARATHUSTRA -> THE +2 YOUR -> YOU'RE +2 YOUR -> YOU +2 YOUR -> THE +2 YOU'VE -> YOU +2 YOU'LL -> YOU +2 YOU -> HE +2 YO -> YOU +2 YER -> YOU +2 WOULD -> HAD +2 WITH -> WERE +2 WITH -> WE +2 WITH -> WAS +2 WITH -> TO +2 WINTER -> WINDOW +2 WINE -> WHY +2 WILL -> WOULD +2 WILL -> WE +2 WILL -> WAS +2 WILFRID -> WILFRED +2 WHITE -> WIDE +2 WHICH -> WITCH +2 WHERE -> WITH +2 WERE -> WAS +2 WENT -> WHEN +2 WE'RE -> WE +2 WAYNE -> WAIN +2 USE -> USED +2 UP -> UPSTAIRS +2 UNDER -> AND +2 TWO -> TOO +2 TRIBE -> TIME +2 TONIGHT -> TO +2 TO -> OF +2 TO -> INTO +2 TO -> A +2 TIGLATH -> TIG +2 THROUGH -> TO +2 THOUSANDS -> THOUSAND +2 THOUGH -> THAT +2 THIS -> THAT +2 THIS -> GOT +2 THEY'RE -> THEY +2 THEY -> THEIR +2 THESE -> HIS +2 THEN -> THAT +2 THEE -> ME +2 THE -> NO +2 THAT'S -> THAT +2 THAT -> THY +2 THAN -> THEN +2 THAN -> AND +2 TAMAR -> TO +2 SYRUP -> SERF +2 STONEWALL -> STONE +2 STOKER -> STOCKER +2 SOMEONE -> SOME +2 SIZE -> SIGHS +2 SINDBAD -> SINBAD +2 SHOULDERS -> SHOULDER +2 SHERIFF -> SHARE +2 SHELL -> SHELLFISH +2 SHE -> YOU +2 SHE -> SHE'LL +2 SHAWS -> SHORES +2 SHALL -> SHOULD +2 SERGEY -> SO +2 SELLER -> CELLAR +2 SEEK -> SEE +2 SAY -> SEE +2 RUSSIA -> RATIA +2 ROUND -> AROUND +2 ROOMFELLOW -> ROOM +2 ROMANCE -> ROMANS +2 PRIORESS -> PRIESTS +2 PRIEST -> PRIESTS +2 POLL -> POLE +2 PLAIN -> PLANE +2 PLACE -> PLACED +2 PIGEONCOTE -> PIGEON +2 PHOSPHOR -> PHOSPHO +2 OR -> TO +2 ONE -> WHEN +2 ON -> AND +2 OLIVE -> I +2 OLD -> ALL +2 OH -> ALL +2 OF -> TO +2 OF -> O +2 OF -> HAVE +2 OF -> AT +2 OF -> AS +2 O -> WHO +2 NOUGHT -> NOT +2 NOT -> NOW +2 NORTHFIELD -> NORTH +2 NEAREST -> NURSE +2 MUST -> WAS +2 MISSUS -> MISTER +2 MINE -> MIGHT +2 MESTIENNE -> MESTINE +2 MESTER -> MISTER +2 MENAHEM -> MANY +2 MEN -> MAN +2 MEAT -> ME +2 M -> AM +2 LUNA -> LENA +2 LOVE -> LOVED +2 LIKED -> LIKE +2 LIDDY -> LIVY +2 LET -> THAT +2 LAST -> LOST +2 KEYS -> CASE +2 KEEP -> HE +2 KEEP -> GIVE +2 JULIEN -> JULIAN +2 JACKAL -> JACK +2 IT'S -> IS +2 IT -> THIS +2 IT -> ITS +2 IT -> IT'S +2 IS -> HIS +2 IS -> HAS +2 INTO -> TO +2 INTO -> IN +2 INSCRIPTIONS -> SCRIPTIONS +2 IN'T -> IN +2 IN -> AN +2 I'VE -> I +2 I'M -> I +2 I'LL -> I +2 I'D -> I'VE +2 I'D -> I +2 I -> AS +2 HOZE -> HOSE +2 HORSTIUS -> HORSES +2 HONOR -> HONOUR +2 HO -> OH +2 HIS -> THIS +2 HIS -> THE +2 HIS -> ITS +2 HIS -> IS +2 HERE -> HEAR +2 HER -> THE +2 HE -> YOU +2 HE -> SHE +2 HE -> IT +2 HE -> HIS +2 HAYS -> HAYES +2 HAVE -> HAD +2 HAS -> IS +2 HAS -> AS +2 HANDS -> HAND +2 HAID -> HEAD +2 HAD -> I +2 GURR -> GURG +2 GURR -> GORE +2 GURR -> GIRL +2 GRAEME -> GRAHAM +2 GOD -> GOT +2 GIVING -> GIVEN +2 FRANZ -> FRANCE +2 FORTY -> FORTE +2 FOLLOWED -> FOLLOW +2 FLEROV'S -> FLIROV'S +2 FESTAL -> FEAST +2 FAVOR -> FAVOUR +2 FAUVENT -> FOR +2 FAUCHELEVENT -> FOUCHELEVENT +2 EXECUTIVE -> EXECUTED +2 ETERNAL -> HAD +2 ENOUGH -> UP +2 END -> AND +2 E'S -> HE +2 DONE -> TURNED +2 DONE -> DON +2 DOG -> DOOR +2 DO -> TWO +2 DO -> TO +2 DINKS -> THINK +2 DIDN'T -> THEN +2 DESSERTS -> DESERTS +2 DE -> DEBRACY +2 COUNTRY -> COUNTRIES +2 COUNSEL -> COUNCIL +2 COMMONWEALTH -> CORNWEALTH +2 CINDERLAD -> CINDER +2 CARROLL -> CAROL +2 BUT -> THAT +2 BULK -> BARK +2 BRUCE -> BRUSH +2 BROTHERS -> BROTHER'S +2 BESSY -> BUSY +2 BEG -> THEY +2 BEFORE -> FOR +2 BEEN -> THEN +2 BEALE -> BEE +2 AWK -> AWKWARD +2 AWHILE -> A +2 AT -> TO +2 AT -> IT +2 AT -> IN +2 AT -> AUNT +2 ASKED -> AS +2 ARE -> OUR +2 ARE -> OR +2 ARE -> A +2 ANY -> AND +2 AND -> THEN +2 ALMS -> ARMS +2 ALL -> ON +2 AL -> A +2 A -> HAVE +2 A -> AN +1 ZEMSTVOS -> SEND +1 ZAYNAB -> THINE +1 ZAU -> ZOUAL +1 ZAU -> ZAWAL +1 ZAU -> THOUA +1 ZAU -> THOU +1 ZAU -> OWL +1 ZARATHUSTRA -> TOO +1 ZARATHUSTRA -> THEIR +1 ZARATHUSTRA -> OBJECT +1 ZARATHUSTRA -> ARTISTRA +1 YUSS -> YES +1 YOUR -> THEIR +1 YOUR -> OUR +1 YOUR -> HE +1 YOUNG -> OWN +1 YOU'VE -> YOUR +1 YOU'D -> YOU +1 YOU -> YOURSELVES +1 YOU -> YOURS +1 YOU -> YOUR +1 YOU -> YOU'RE +1 YOU -> YOU'LL +1 YOU -> YE +1 YOU -> OBLIGED +1 YOU -> JULIA +1 YOU -> IT +1 YOU -> EVEN +1 YOU -> EURE +1 YOU -> EUGEUM +1 YO'LL -> YOU'LL +1 YO'LL -> YOU +1 YO' -> YOU +1 YO -> YOU'LL +1 YET -> IN +1 YET -> HE +1 YEP -> HE +1 YEOMEN -> YEOMAN +1 YELLS -> YEARS +1 YEARS -> EARS +1 YEAR -> YOUR +1 YE -> YOU +1 YE -> YEAR +1 YARD -> AND +1 YAHWEH -> YANAWAY +1 WYLDER'S -> WILDER'S +1 WUNNERED -> WANTED +1 WROTE -> ONES +1 WRITE -> RIGHT +1 WRIT -> WRITE +1 WRETCH -> THATCH +1 WOUNDS -> WINDS +1 WOULD -> WERE +1 WOULD -> DID +1 WOT -> WHAT +1 WORTH -> WORSE +1 WORSHIPPERS -> WORSE +1 WORSHIP'S -> WORSHIP +1 WORRY -> WERE +1 WORLD -> WOOLWRIGHT +1 WORKS -> WORK +1 WORKMAN -> WORKMEN +1 WORKINGMEN -> WORKING +1 WORKED -> WORK +1 WORK -> WORKADAY +1 WORK -> WORD +1 WORD -> WORLD +1 WORD -> WORDS +1 WOODSON -> WOODS +1 WOOD -> WOODS +1 WONT -> WANT +1 WONDERED -> WOUNDED +1 WONDERED -> WANTED +1 WONDERED -> WANDERED +1 WONDER -> WANDER +1 WOMEN -> WOMAN +1 WOMAN -> WOMEN +1 WOKE -> WALKING +1 WOES -> THOSE +1 WOE -> WE'LL +1 WODE'S -> WORDS +1 WITHAL -> WITH +1 WITH -> WOULD +1 WITH -> WIS +1 WITH -> WIDTH +1 WITH -> WI +1 WITH -> WHETHER +1 WITH -> WHERE +1 WITH -> ITS +1 WITH -> FOR +1 WITH -> FIT +1 WITCH -> WHICH +1 WISHT -> WISHED +1 WIRES -> WISE +1 WINTERS -> WINTERSPIN +1 WINKED -> WAITED +1 WING -> WINGED +1 WINE -> WHITE +1 WINDOW -> WIND +1 WILLY -> BILLY +1 WILL -> WITHOUT +1 WILL -> WILT +1 WILL -> WHO +1 WILL -> WHERE +1 WILL -> WHEN +1 WILL -> WHEEL +1 WILL -> WALLA +1 WILL -> TO +1 WILL -> OLY +1 WILL -> OH +1 WILKSES -> WILKS +1 WILKS -> WILKES +1 WILFRID -> WILL +1 WILFRID -> WILFRIED +1 WILFRID -> FEAJANCOTT +1 WILDLY -> WIDELY +1 WILDEST -> WALLACE +1 WILDERNESS -> WEARINESS +1 WILDERNESS -> LITERN +1 WILD -> WHITE +1 WIFE -> WIF +1 WIFE -> MY +1 WIDEAWAKE -> WIDE +1 WICKER'S -> JOKERS +1 WICKER -> WICKER'S +1 WI -> WITH +1 WHOM -> WHY +1 WHO'D -> WHO +1 WHO -> WITH +1 WHO -> WHOSE +1 WHO -> THE +1 WHO -> ONE +1 WHO -> HER +1 WHO -> AND +1 WHITHER -> WHETHER +1 WHITE -> WHY +1 WHISKERED -> WAS +1 WHILOME -> WILL +1 WHILE -> WIDE +1 WHILE -> WHERE +1 WHICH -> WOULD +1 WHICH -> PITCHES +1 WHETHER -> WHITHER +1 WHETHER -> WHERE +1 WHETHER -> WEATHER +1 WHEREABOUTS -> WHEREABOUT +1 WHERE -> WILL +1 WHERE -> WEAR +1 WHERE -> THERE +1 WHERE -> HER +1 WHER -> WERE +1 WHEN -> ONE +1 WHEELER -> WHALER +1 WHATE'ER -> WHATEVER +1 WHAT'S -> ONCE +1 WHAT -> WOULD +1 WHAT -> WITH +1 WHAT -> WHICH +1 WHAT -> WHEN +1 WHAT -> IT +1 WHAT -> FOR +1 WHAT -> AT +1 WHAT -> ALL +1 WHAT -> A +1 WHACKS -> WAX +1 WESTPORT -> WESTBURT +1 WERE -> YOU +1 WERE -> WHEN +1 WERE -> WENT +1 WERE -> WED +1 WERE -> RAN +1 WENT -> ACT +1 WELSH -> WELL +1 WELLS -> WELL +1 WELLS -> WALES +1 WELL -> WHY +1 WELL -> WHILE +1 WELL -> AWAY +1 WEEVILLY -> WEEBLY +1 WEEKLY -> WEAKLY +1 WEEDS -> REEDS +1 WEBB'S -> WHIPS +1 WEAK -> WEEK +1 WE'VE -> WE +1 WE'RE -> WERE +1 WE'RE -> HE +1 WE -> WILL +1 WE -> WASTER +1 WE -> REPROVE +1 WE -> REED +1 WE -> REALLY +1 WAYNE -> WANE +1 WAY -> IN +1 WAVERLY -> WAVERLEY +1 WATONWAN -> WATERWIN +1 WATER -> WALL +1 WATCHMAKER'S -> WATCHMAKERS +1 WATCHED -> WAS +1 WASN'T -> WAS +1 WAS -> WITH +1 WAS -> WILL +1 WAS -> WHICH +1 WAS -> US +1 WAS -> THIS +1 WAS -> BLOODED +1 WARS -> WALLS +1 WARDERS -> OURS +1 WARD'S -> WORDS +1 WARD -> HORN +1 WANTS -> ONCE +1 WANTON -> WARRENTON +1 WANTED -> WATER +1 WANTED -> WANTON +1 WANT -> WARNED +1 WANT -> WANTS +1 WANDERERS -> WONDERERS +1 WANDERER -> WONDER +1 WALLS -> WARDS +1 WALL -> WAR +1 WAKE -> AWAKE +1 WAITING -> WINNING +1 WAITIN -> WAITING +1 WAIT -> WHERE +1 WAGGOT -> RAGGED +1 WAGGING -> WORKING +1 WADED -> HELD +1 WADDED -> WATERED +1 VOUGHT -> VAULT +1 VOMITING -> RHOMETTING +1 VOICED -> VOICE +1 VIOLENCE -> ONLENETS +1 VIOLENCE -> IDENTS +1 VINTAGE -> VENTAGE +1 VILLAGES -> RELIGIOUS +1 VILLAGERS -> VILLAGES +1 VIL -> VILLE +1 VIGILANCE -> VISIONS +1 VIGILANCE -> VICHILLENZ +1 VESTRY -> VETCHERY +1 VERSES -> VERSE +1 VERILY -> VERY +1 VENTRILOQUIST -> VENTILOQUE +1 VEIL -> VEAL +1 VEHEMENTLY -> TO +1 VAVASOUR -> VAVASOR +1 VAULT -> VOLT +1 VAUGIRARD -> VIGORE +1 VAST -> VATS +1 VASSILIEVITCH -> ON +1 VALET -> VALLED +1 UTTER -> OTHER +1 USEFUL -> YEARS +1 US -> UP +1 US -> STARED +1 US -> ITS +1 US -> FOR +1 US -> DISOUT +1 URARTU -> YOU +1 URARTU -> HERE +1 URARTIAN -> RACIAN +1 UP -> UPON +1 UP -> THAT +1 UP -> NAP +1 UNTO -> TO +1 UNTO -> ON +1 UNTO -> IN +1 UNTO -> AND +1 UNTIL -> INTO +1 UNREWARDED -> AND +1 UNLESS -> AND +1 UNHAPPY -> HAPPY +1 UNDERTAKER'S -> UNDERTAKERS +1 UNCLE -> AND +1 UNADULTERATED -> AN +1 UN -> ONE +1 ULTIMATELY -> ULTIMATE +1 ULRICA -> OVERREKA +1 ULRICA -> OR +1 ULRICA -> A +1 UKINZER -> A +1 UDDER -> UTTER +1 TYRANNY -> SOON +1 TWYMAN'S -> TWINS +1 TWO -> TO +1 TWO -> DO +1 TWAS -> TOWARDS +1 TURRET -> TORROR +1 TURNS -> TURNED +1 TURNING -> SHIRTING +1 TUBERCULOUS -> TIBERICAN'S +1 TRY -> TROUT +1 TRY -> TRIFLE +1 TRY -> TRIES +1 TRUSTEE -> TRUSTY +1 TRUNDLED -> TUMBLED +1 TRULY -> JULIE +1 TRIVET -> TRIBUT +1 TRIPES -> TRIPE'S +1 TRIFLE -> TRAVEL +1 TRIBES -> TRIUMPHS +1 TRIBES -> TRINES +1 TRELAWNEY -> TREE +1 TREE -> HERE +1 TREASURE -> TREASURES +1 TRAVELED -> TRAVELLED +1 TRANSSHIP -> TRANSHIP +1 TRANSLATED -> TRANSGRATED +1 TRAINING -> ORTRAINING +1 TRAINDAWG -> TRAIN +1 TRAFFIC -> LIFE +1 TRADITIONS -> JUDICINES +1 TRADITIONS -> BERTRADIZANCE +1 TRADEMARK -> TRADE +1 TOWER -> TOWERED +1 TOWARDS -> TO +1 TOWARDS -> DOORS +1 TOWARD -> TOWARDS +1 TOWARD -> TO +1 TOUR -> WERE +1 TOUGHS -> TUFTS +1 TOUGH -> TO +1 TOTING -> TOATING +1 TOSSING -> TAUSEN +1 TORQUILSTONE -> TORKILSTONE +1 TORQUILSTONE -> TORCLESTONE +1 TORN -> TAUGHT +1 TORMENT -> AND +1 TORCH -> TORTURE +1 TOP -> STOPPED +1 TOOK -> TO +1 TOO -> TWO +1 TOO -> DO +1 TOO -> CHIMNETS +1 TONGUE -> DONG +1 TOMORROW -> TO +1 TOM -> TUMULT +1 TOLERBLE -> TOLERABLE +1 TOLERABLE -> INTOLERABLE +1 TOLD -> TELL +1 TOLD -> STOWED +1 TOILET -> TOLD +1 TO -> WHO +1 TO -> TWO +1 TO -> TRITES +1 TO -> TOWER +1 TO -> TOWARD +1 TO -> TOO +1 TO -> THAT +1 TO -> SURRENDER +1 TO -> ROOM +1 TO -> REFORMED +1 TO -> OR +1 TO -> MADE +1 TO -> INSTRUCTIVE +1 TO -> HURT +1 TO -> FOR +1 TO -> FIRST +1 TO -> DIRECTIFY +1 TO -> DID +1 TO -> DEER +1 TO -> DANGER +1 TO -> AS +1 TITLE -> TANA +1 TIRING -> ENTIRE +1 TIRESOME -> PERSON +1 TIRED -> TIE +1 TINCTURED -> TINTED +1 TIME'S -> TIMES +1 TIME -> TOM +1 TIMBER -> TIMBOO +1 TILLERS -> TELLERS +1 TILL -> TO +1 TIGLATH -> TIGG +1 TIGLATH -> TAKE +1 TIGER -> TIRE +1 TIGER -> STAGER +1 TIDINGS -> HIDINGS +1 TIBER -> TYBER +1 THY -> THINE +1 THY -> THEIR +1 THY -> DANGLE +1 THUS -> THIS +1 THUS -> THE +1 THUS -> DOES +1 THUMB -> TENTH'S +1 THROWING -> ROWING +1 THROBBED -> THROPPED +1 THREE -> THIRD'S +1 THREE -> FOR +1 THOUGHTS -> TORCH +1 THOUGH -> THOSE +1 THOUGH -> THE +1 THOU -> THOUGH +1 THOU -> NOW +1 THOU -> DONE +1 THOSE -> THUS +1 THOSE -> LUCIKAM +1 THITHER -> THAT +1 THIS -> US +1 THIS -> TO +1 THIS -> THERE'S +1 THIS -> MISSUS +1 THIS -> ITS +1 THIS -> IT +1 THIRST -> THUS +1 THINK -> THING +1 THINK -> THEY +1 THINGS -> THANKS +1 THIN -> FLITTON +1 THEY'RE -> THERE +1 THEY'RE -> THEIR +1 THEY'RE -> HER +1 THEY'RE -> ARE +1 THEY -> THIS +1 THEY -> THEY'VE +1 THEY -> THEY'D +1 THEY -> THAT +1 THEY -> ISSUED +1 THEY -> HE +1 THEY -> DO +1 THEY -> DECLINED +1 THESE -> THE +1 THERE'S -> THERE +1 THERE'LL -> THERE +1 THERE -> WRITHS +1 THERE -> THAT +1 THERE -> DICK +1 THERE -> DEER +1 THEN -> WHEN +1 THEN -> THEY +1 THEN -> THAN +1 THEN -> TEN +1 THEN -> DID +1 THEN -> AND +1 THEM -> THEMSELVE +1 THEM -> HIM +1 THEIR -> THEY +1 THEIR -> HER +1 THEIR -> DEAR +1 THEIR -> A +1 THEE -> THE +1 THEATRE -> FUTURE +1 THE -> WIDEN +1 THE -> WE +1 THE -> VIRTUARY +1 THE -> THOUGH +1 THE -> THIRD +1 THE -> THESE +1 THE -> THEREFORE +1 THE -> THEOSKEY +1 THE -> THEN +1 THE -> REMAINED +1 THE -> PATH +1 THE -> OUR +1 THE -> OTHERS +1 THE -> OTHER +1 THE -> MISTAKES +1 THE -> LOVE +1 THE -> LOGS +1 THE -> LIDA +1 THE -> ITWAIN +1 THE -> IT +1 THE -> HER +1 THE -> HALF +1 THE -> EVEN +1 THE -> EIGHTH +1 THE -> EARTH +1 THE -> DURING +1 THE -> DISAPPEAR +1 THE -> DID +1 THE -> DEVOTED +1 THE -> DECLINE +1 THE -> DE +1 THE -> BEHOLDAY +1 THE -> BEFORE +1 THE -> ASHORE +1 THE -> AND +1 THE -> AGAIN +1 THE -> ABIDING +1 THAT'S -> OF +1 THAT'S -> IT'S +1 THAT'S -> I +1 THAT'LL -> THAT +1 THAT -> WITH +1 THAT -> US +1 THAT -> TILL +1 THAT -> THEY +1 THAT -> THERE +1 THAT -> THEN +1 THAT -> THEIR +1 THAT -> SNAT +1 THAT -> SET +1 THAT -> NEITHER +1 THAT -> LET +1 THAT -> IN +1 THAT -> HIM +1 THAT -> DOWN +1 THAT -> BUT +1 THAT -> AND +1 THAT -> ABOUT +1 THAN -> GONDEN +1 TESTIMONY -> DETACHEMONY +1 TERRIBLE -> SEVERAL +1 TERRA -> TERRACE +1 TERENTIUS -> TORRENTIUS +1 TEND -> INTERESTING +1 TEMPTETH -> TEMPT +1 TEMPLES -> TEMPLE +1 TEMPLES -> SIMPLES +1 TELLTALE -> TELL +1 TELL -> SO +1 TEETH -> CHEEKS +1 TEA -> TINEL +1 TAYLOR -> TAILOR +1 TATTLERS -> TEDLERS +1 TASKMASTER -> TAX +1 TARDY -> TIDY +1 TAPPED -> TAP +1 TAPIS -> TAPPY +1 TANQUAM -> TANK +1 TALMASH -> THOMAS +1 TALK -> TALKED +1 TALENTS -> OTALONS +1 TAHITI -> TEDI +1 T'OTHER -> THE +1 SYRUP -> CYRUP +1 SYNONYMON -> SYNONYM +1 SYLLOGISM -> SILLIGIOUS +1 SYDNEY -> SIDNEY +1 SWORD -> WOOD +1 SWELP -> SWAP +1 SWELL -> SWELLIN +1 SWEETMEAT -> SWEETMEAT'S +1 SWEET -> SWEEP +1 SWAYING -> SWAIN +1 SWAY -> WEIGH +1 SWALLOWED -> SWALLOW +1 SWAG -> WAG +1 SURPRISED -> SURPRISE +1 SUPPOSE -> S'POSE +1 SUPERLATIVE -> SUPERNATIVE +1 SUPERIOR -> RAYS +1 SUNNYSIDE -> SUNNICIDE +1 SUMTHIN -> SOMETHING +1 SULPHURIC -> SUFFER +1 SUFFOLK -> SUFFOLD +1 SUFFICIENT -> SUSPICION +1 SUFFICIENT -> SUSPICIENT +1 SUFFICES -> SURFACES +1 SUDDENLY -> CERTAINLY +1 SUCKED -> SACKED +1 SUBSTANCE -> ABSTANCE +1 SUB -> SUBTERRAB +1 STRUCK -> UP +1 STRODE -> STROLLED +1 STRIFE -> STRIPE +1 STREAK -> STREET +1 STRAYING -> STRAIN +1 STRANGEST -> STRANGER'S +1 STRANGE -> STREAM +1 STRAITENED -> STRAIGHT +1 STRAINS -> TRAINS +1 STORES -> STORIES +1 STORES -> STALLS +1 STORED -> STOLE +1 STOOD -> TOO +1 STONEWALL -> STERNWALL +1 STONED -> STONE +1 STILL -> STEALING +1 STILL -> SO +1 STEWPAN -> STEWPENT +1 STEW -> DO +1 STERN -> STERNMOST +1 STEPPED -> STEPS +1 STEPAN -> STEP +1 STEP -> SABATANI +1 STENOGRAPHIC -> SYNOGRAPHIC +1 STEEVER -> STEVEN +1 STEERING -> STERN +1 STEAL -> STEED +1 STAYING -> SEEING +1 STAYED -> STEESEY +1 STAYED -> STATING +1 STATUS -> STRATUS +1 STATURE -> STATUE +1 STATES -> ESTATES +1 STATEROOM -> STATE +1 STATED -> SUITED +1 STATED -> OF +1 STATE -> STATES +1 STATE -> STATEROOM +1 STAS -> STATU +1 STARVING -> STARLING +1 START -> STARTED +1 STARES -> TEARS +1 STARED -> STEERED +1 STANDSTILL -> FANSTILL +1 STANDS -> SENDS +1 STANDARD -> STANDARDS +1 STAGE -> AGE +1 SQUEAMISH -> SCREAMISH +1 SQUARE -> IS +1 SPONSUS -> QUON +1 SPONGE -> SPINES +1 SPONDYLES -> SPIND +1 SPILLING -> SPINNING +1 SPICE -> SPIES +1 SPARSELY -> FIRSTLY +1 SPAKE -> SPEAKER +1 SPAKE -> SPEAK +1 SPAKE -> PIKE +1 SPADDLE -> SPATTLE +1 SOUTHERN -> SUDDEN +1 SOUSE -> SOUS +1 SOURCE -> SORT +1 SOUNDLY -> ONLY +1 SOUL -> SO +1 SOUGHT -> THOUGHT +1 SOUGHT -> SET +1 SOUGHT -> SALT +1 SOTELES -> SORTILESS +1 SON -> SUNG +1 SON -> SUN +1 SON -> SUDDEN +1 SON -> SOON +1 SOME -> I'M +1 SOJOURN -> SAJOURN +1 SOCIALIST -> SOCIALLY +1 SO -> SOUL +1 SO -> SOROCHIS +1 SO -> SORE +1 SO -> SOPHIA +1 SO -> SOMINUTELY +1 SO -> SELL +1 SO -> SAW +1 SO -> MISS +1 SO -> HER +1 SO -> FOR +1 SO -> BUT +1 SNOOZING -> NEWSING +1 SNETKOV -> SNATCOVE +1 SNETKOV -> PLACE +1 SNATCHER -> NATURE +1 SNARLED -> SNARLS +1 SMUGGLERS -> SMOKE +1 SMUGGLERS -> SMOGG +1 SMUGGLERS -> MOTHERS +1 SMOLNY -> MOLLY +1 SMIRCHED -> SMARCHED +1 SMELL -> SMILE +1 SLUNK -> SUNK +1 SLIGHTLY -> SLIGHTLY'LL +1 SLIGHTLY -> SAT +1 SLEEPING -> KEEPING +1 SLAPPED -> HE +1 SLACKENED -> CLACKENED +1 SLAB -> FLAP +1 SKYLARKS -> SKYLACKS +1 SKIRT -> GOOD +1 SKIN -> SKINNED +1 SKIN -> KIN +1 SKEW -> SKEERO +1 SKEPTICAL -> SCEPTICAL +1 SIXTH -> SIX +1 SIXES -> SAXES +1 SIT -> SET +1 SIRE -> SIRES +1 SIR -> SERVANTS +1 SIR -> BECAUSE +1 SINUHIT -> SOON +1 SINNED -> SENT +1 SINGS -> SANGS +1 SINGA -> SHANGHAT +1 SINE -> IN +1 SILVERWARE -> SILVER +1 SILLY -> SIDY +1 SILENCED -> SILENCE +1 SIGURD -> SIR +1 SIGURD -> CIGAR +1 SIGNOR -> SENOR +1 SIGNED -> SIGN +1 SIGHING -> SIGNED +1 SIEVE -> SEA +1 SIDE -> SON +1 SIDE -> OUT +1 SHUTTERS -> SHOWERS +1 SHUT -> SHET +1 SHUT -> AT +1 SHUMAN -> SON +1 SHUCKS -> SHOCKS +1 SHRUBS -> SHRUGS +1 SHRUBBERY -> AND +1 SHOWS -> SHARES +1 SHOULDST -> SHOULDEST +1 SHOULD -> YOU +1 SHOULD -> SHALL +1 SHOT -> SHOUT +1 SHOT -> HAD +1 SHOOTER -> SHEETTER +1 SHO'LY -> SURELY +1 SHIPS -> THE +1 SHIPS -> SHIP +1 SHIP -> VOYS +1 SHET -> SHUT +1 SHERE -> SHEAR +1 SHERBURN'S -> SHERBOURNE'S +1 SHERBURN -> SHERBIN +1 SHEPHERD -> SHEPARD +1 SHEETS -> SEATS +1 SHEET -> SEED +1 SHED -> SHUT +1 SHED -> SHARED +1 SHE'LL -> HER +1 SHE -> SEA +1 SHATTERED -> SHUTTERED +1 SHARDURIS -> YOUR +1 SHARDURIS -> SHALL +1 SHARDURIS -> CHAUDURUS +1 SHALLUM -> CHARLEM +1 SHALL -> SHOW +1 SHAKEDOWN -> SHAKE +1 SEYTON -> SETTON +1 SEYTON -> SEATING +1 SEVERE -> SAVIER +1 SEVERAL -> CHEVARIN +1 SEVENTIETH -> SEVENTEENTH +1 SEVEN -> THE +1 SERMON -> SIMON +1 SERGEY -> SURGY +1 SERGEY -> SOJI +1 SENTENCED -> INTENSE +1 SENSITIVE -> SCENTED +1 SENSE -> DESCENTS +1 SENOR -> SIGNOR +1 SEEST -> CEASE +1 SEEN -> SEEMED +1 SEEN -> SEE +1 SEEMED -> SEEMS +1 SEEMED -> SEEMING +1 SEEMED -> SEEM +1 SEEM -> SIMPLE +1 SEEM -> SEEMED +1 SEEM -> SEE +1 SEE -> SEEM +1 SEE -> EVEN +1 SEE -> C +1 SEDUCETH -> SEDUCE +1 SECURE -> SECURED +1 SEARCHING -> SURGING +1 SEAMEN -> SEE +1 SEAMAN -> SEAMEN +1 SCULPTORS -> SCULPTOR'S +1 SCULPTOR'S -> SCULPTURES +1 SCRUTINISED -> TWO +1 SCRIBES -> ITS +1 SCREW -> CREW +1 SCRAPPIN -> SCRAP +1 SCORN -> GONE +1 SCO'TCH -> SCORCHED +1 SCHULBERG'S -> SILBERG'S +1 SCHOOLDAYS -> SCHOOL +1 SCHOOL -> SCHOOLGIRLS +1 SCHOOL -> SCHOOLBOY +1 SCAPEGRACES -> SKIPPED +1 SCAPED -> ESCAPED +1 SCAPE -> ESCAPE +1 SAYS -> TASTE +1 SAYS -> SAY +1 SAYS -> SAKES +1 SAYS -> AS +1 SAYIN -> SAYING +1 SAY -> SO +1 SAY -> SAVE +1 SAW -> SOUGHT +1 SAW -> SORROW +1 SAW -> SOLD +1 SATURDAY -> SEDATE +1 SATURATED -> SITUATED +1 SAT -> SET +1 SARAH -> SEREN +1 SANS -> SONSPIER +1 SANITARY -> SENATORY +1 SANG -> YET +1 SAND -> SEND +1 SANCTESS -> SANCTUS +1 SAN -> SENT +1 SALTS -> SOULS +1 SALONE -> SALON +1 SALLOWER -> SALARY +1 SAINT -> SAY +1 SAILS -> SAILORS +1 SAIL -> SAILORS +1 SAID -> TO +1 SAID -> SET +1 SAID -> SAYS +1 SAID -> PSALMS +1 SAID -> OF +1 SAH -> I +1 SAGITTAIRE -> SAGOTARA +1 SAFE -> SAVED +1 RYO -> RYEO +1 RUSSIA -> RACHEL +1 RUSHED -> RUSH +1 RUNS -> TRANSIENT +1 RUNG -> RUN +1 RUN -> RUM +1 RUN -> RAN +1 RUMP -> RUM +1 RUM -> ROMAN +1 RULER -> SPONNET +1 RUBENSES -> RUBEN +1 ROWED -> RIDE +1 ROUTE -> ROW +1 ROTHS -> ROSS +1 ROSAMUN -> ROSAMOND +1 ROPE'S -> ROPES +1 ROPE'S -> HOPES +1 ROOTS -> OR +1 RONALD -> RANALD +1 ROLL -> ROCKLE +1 ROCK -> STRUCK +1 ROBED -> ROPED +1 ROBBERIES -> ROBBERS +1 ROARING -> ROWING +1 ROADSIDE -> ROAD'S +1 ROAD -> RULED +1 ROAD -> REAL +1 RISDON -> RICHARD +1 RISDON -> AND +1 RINGMASTER -> RING +1 RINDS -> RHINES +1 RIGOROUS -> RECKLESS +1 RIGOR -> RIGA +1 RIGHT -> WRITTEN +1 RIGHT -> RIPER +1 RIDGE'S -> RIDGES +1 RIDER -> WRITER'S +1 RIDER -> WRITER +1 RID -> HER +1 RHODIAN -> RADIAN +1 RHODIAN -> HERODIAN +1 RHODES -> ROADS +1 REVOLUTIONISTS -> REVOLUTIONIST +1 REVOLTE -> REVOLT +1 REVEREND -> ROBIN +1 REVEREND -> REVERED +1 REVEREND -> REVERE +1 REVELING -> RIVELING +1 REVEALED -> REVIL +1 RETZCH'S -> WRETCH +1 RETURNED -> TURN +1 RETURN -> RETURNING +1 RESUMED -> JAMES'S +1 RESTORETH -> RESTORE +1 RESTIVE -> RENTS +1 RESOLVED -> WE +1 RESK -> REST +1 RESISTING -> FUN +1 RESINOUS -> VEZENOUS +1 RESIDUE -> READY +1 RESCUED -> RESCUE +1 REQUEST -> QUEST +1 REPUTATION -> REPETITION +1 REPLY -> THE +1 REPEATED -> REPLIED +1 REND -> RUN +1 REMISSION -> REMISSIONER'S +1 REMEMBEREST -> REMEMBER +1 REMARKED -> REMARK +1 REMAINED -> REMAINS +1 REMAIN -> EVER +1 RELIGION -> WHO +1 RELIGION -> RELIGIONISTS +1 RELEVANT -> ELEVANT +1 RELEASED -> RELEASE +1 RELATED -> RELIGHTED +1 REJECT -> REJECTED +1 REIGNS -> REIGN +1 REIGNED -> RINGS +1 REGULATION -> REGULATING +1 REGARDING -> GUARDING +1 REG'LER -> REG'LAR +1 REFUGE -> REFUGERY +1 REFRESHMENT -> FRESHMENT +1 REFORMERS -> WE +1 REFERENCE -> REFERENCES +1 REELECTION -> TREE +1 RECORDS -> RICARDS +1 RECITER -> RESIDER +1 RECITE -> RESIDE +1 RECEDED -> WAS +1 REALLY -> REELING +1 REALISED -> REALIZED +1 READY -> RETIC +1 REACHED -> REACH +1 REACHED -> RAGED +1 RE -> REELECTED +1 RAYSTOKE -> RAE +1 RATTLING -> RIDING +1 RATHER -> WHETHER +1 RATHER -> ALL +1 RATCHFORD -> RETFORD +1 RASHID -> RASCHID +1 RASCALS -> RATS +1 RAPSCALLIONS -> RASCALIONS +1 RAPPERS -> WRAPPERS +1 RANSOM -> RAMSON +1 RAMSES -> RAMESES +1 RAM -> RUM +1 RAM -> ROOM +1 RAISE -> THAT +1 RAINY -> REINY +1 RAINY -> RAINING +1 RAIN -> REIGN +1 RAID -> RAY +1 RAGE -> RATES +1 RADPROP -> RED +1 RACKETEERS -> RACKETERS +1 RACKED -> RAT +1 RACHEL -> ORIGINALLY +1 RACES -> TWO +1 RABBITS -> RABBIT +1 RABB'S -> RABS +1 QUMMUKH -> KUMAK +1 QUITE -> ACQUAINT +1 QUICKENETH -> QUICKENED +1 QUICK -> QUICKLY +1 QUEST -> FRENCH +1 QUARTER -> THEIR +1 QUANTRELL -> QUANTRAIL +1 QUANTITIES -> QUALITIES +1 PYM -> POEM +1 PUTTING -> PUT +1 PUTTEL -> POTTER +1 PUTS -> BUT +1 PURPORTING -> REPORTING +1 PULSE -> PART +1 PULLED -> POURED +1 PUDDLES -> BOTTLES +1 PSALM -> NEITHER +1 PRYTANEUM -> BRITTANNIUM +1 PROVEN -> PROVING +1 PROTECTORATE -> PROTECTOR +1 PROPERLY -> PROPER +1 PROMOTIVE -> PROMOTED +1 PROHIBITION -> PROBITS +1 PROGRAMME -> PROGRAM +1 PROFIT -> PROPHET +1 PROFESSION -> PROFICIENT +1 PROFESSION -> PROFESSIONS +1 PRODUCES -> PROVED +1 PRODUCE -> PRODUCED +1 PROCOPIUS -> PROCOPIAS +1 PROCLUS -> PROCLISS +1 PROCLUS -> PROCLIS +1 PROCLUS -> PROCKLESS +1 PROAS -> PROTS +1 PROAS -> PRATS +1 PROAS -> POETS +1 PROA -> PROTINENT +1 PROA -> ITS +1 PRISONER -> PRISONERS +1 PRISON -> PRISONED +1 PRIORESS -> PROGRESS +1 PRIORESS -> PIRRUS +1 PRINCIPALS -> PRINCIPLES +1 PRINCE -> PRINCESO +1 PRIMER -> PRIMARY +1 PRETTY -> BERTIE +1 PRESSING -> RAISING +1 PRENTICESHIP -> PREDICUP +1 PRELIMINARIES -> PROLIMINARIES +1 PRECEPTORS -> PERCEPTIVES +1 PRECENTORS -> PRESENTERS +1 PREACHED -> PREACH +1 PRAYERS -> PRAY +1 PRAISEWORTHY -> PRAISE +1 PRACTITIONER -> PRACTITIONERS +1 PRACTICED -> PRACTISED +1 POWDERED -> PADDED +1 POURED -> PUT +1 POUCHES -> PIUCES +1 POTUM -> POT +1 POTION -> FOCIN +1 POTASSIUM -> PROTESTING +1 POSTHASTE -> POST +1 POSTERN -> PASTING +1 POST -> POSTS +1 POSITIVELY -> POSIT +1 PORTMANTEAU -> APARTMENTAL +1 PORTENTOUS -> POTENTAL +1 PORED -> POURED +1 POPULACE -> POPULOUS +1 POOR -> PORT +1 POOR -> FAR +1 POMEROY -> POMERALIE +1 POLYTECHNIC -> POLYTECHNICA +1 POLLY -> PARLEY +1 POLL -> PAUL +1 POINT -> SPITE +1 PO -> POE +1 PLUMB -> PLUM +1 PLEASANT -> PRESENT +1 PLEAS -> PLAYERS +1 PLATTERBAFF'S -> PLATTERBUFF'S +1 PLATTERBAFF -> PLATTERBUFF +1 PLATTERBAFF -> PLATTER +1 PLATTERBAFF -> FLATHER +1 PLAIN -> TOWING +1 PLACING -> REPLACING +1 PLACE -> PLATES +1 PLACE -> PACE +1 PITTS -> FITZ +1 PITHUM -> PITTHAM +1 PISTOLES -> PISTOL +1 PIPE -> PAPER +1 PINKUS -> PINKIS +1 PILESER -> THAT +1 PILESER -> LAST +1 PILESER -> LAS +1 PILESER -> GLASS +1 PIKES -> PINES +1 PIGSKIN -> PICTION +1 PIGEONCOTES -> PIGEON +1 PIGEONCOTE -> FIDD +1 PIGEONCOTE -> DIGEON +1 PIGEONCOTE -> BEECH +1 PIGEON -> PITCHEN +1 PIECE -> PEACE +1 PICKED -> PRICKED +1 PHUT -> FAT +1 PHILISTINES -> FAIRLY +1 PHILIPPUS -> PHILIPUS +1 PHILIPPUS -> PHILIP'S +1 PHILIP -> FILLIP +1 PHILIP -> FELLOW +1 PHELPS -> PHILP +1 PETREL -> PETAL +1 PET -> BED +1 PESTE -> PESTS +1 PERSPIRED -> POSPIRED +1 PERNOUNCE -> PRONOUNCE +1 PERHAPS -> THERE +1 PERE -> PEGLASHES +1 PERE -> BAT +1 PEONAGE -> PINIONS +1 PENDING -> SPENDING +1 PENCE -> PANTS +1 PEKAHIAH -> PEKAHIA +1 PEASANTS -> PIECE +1 PEAS -> PEA'S +1 PEARL -> PEAR +1 PEACEFUL -> BEATHFUL +1 PAY -> PAIR +1 PAWNBROKER -> PAN +1 PAUSED -> PASSED +1 PAUSE -> PULSE +1 PASTES -> PACE +1 PAST -> PASS +1 PASSES -> PAUSES +1 PASSERS -> PASSES +1 PASSED -> PASS +1 PARTS -> PART +1 PARTLY -> PARSLY +1 PART -> PARTS +1 PARRICIDES -> PARASITES +1 PARR -> POOR +1 PARKS -> BOX +1 PARISH -> PARRISH +1 PARDONABLE -> PIONABLE +1 PARDON -> PARTICER +1 PANEL -> PANNER +1 PALL -> POOL +1 PALESTINE -> PALASTEIN +1 PALAESTRA -> PELUSTER +1 PAIR -> PARENT +1 PAINT -> PAIN +1 PADDLING -> PADDLIN +1 PACES -> PLACES +1 PACE -> FACE +1 P -> PATUM +1 OX -> AX +1 OWNERS -> LANDOWNERS +1 OWNED -> ON +1 OWN -> ON +1 OW'M -> ALL +1 OW -> HOW +1 OVERRIPENESS -> OVER +1 OVERHEARD -> OUR +1 OVERFULL -> OVER +1 OVERFLOWING -> OVERWHELMING +1 OVER -> OF +1 OUTER -> OUTER'S +1 OUT -> UP +1 OUT -> OUTGAZE +1 OUT -> HER +1 OUT -> AT +1 OUT -> ALL +1 OUT -> ABOUT +1 OUR -> I'LL +1 OUR -> ARE +1 OUR -> A +1 OUNCES -> OUNCE +1 OUGHTN'T -> OUGHT +1 OUGHT -> ARE +1 OUGHT -> ALL +1 OUEN -> LOUIS +1 OTHER -> OTHERWAYS +1 OTHER -> A +1 ORNERIEST -> ORNEIST +1 ORDER -> OTTO +1 OR -> ORDEALIZING +1 OR -> ORA +1 OR -> OPPOSITION +1 OR -> FOREMOTHER +1 OR -> ALL +1 OPPRESSORS -> IMPRESSORS +1 OPPOSITION -> OUR +1 OPPORTUNITY -> A +1 OPENING -> SOMETHING +1 OPENED -> OPEN +1 OPEN -> UP +1 OPE -> OPEUS +1 ONTO -> ON +1 ONTO -> INTO +1 ONLY -> OWING +1 ONLY -> ON +1 ONLY -> EARLIEST +1 ONLY -> ALLEY +1 ONE -> WON +1 ONE -> A +1 ONCE -> WAS +1 ON'T -> ON +1 ON -> WHEN +1 ON -> UPON +1 ON -> UNTO +1 ON -> UNLUCK +1 ON -> UNCLEAN +1 ON -> UNCHANGED +1 ON -> SON +1 ON -> PIGEON +1 ON -> I'M +1 ON -> HER +1 ON -> GONE +1 ON -> DOWN +1 ON -> ARE +1 ON -> ALL +1 OME -> HOME +1 OMAR -> MARBIN +1 OLL -> ALL +1 OLIVE -> OLD +1 OLIVE -> ALIVE +1 OLD -> O +1 OKAY -> O +1 OIL -> OARMEIL +1 OH -> U +1 OGRE'S -> OGRES +1 OFFICES -> OFFICERS +1 OFFICERS -> OFFICIALS +1 OFFICER -> OFFICERS +1 OFFICE -> OF +1 OFFENSE -> OFFENCE +1 OFFEND -> OFFENDLY +1 OFFEN -> OFTEN +1 OFF -> OUR +1 OFF -> OF +1 OF -> WHAT +1 OF -> VASSARIAH +1 OF -> THEIR +1 OF -> THE +1 OF -> THAT +1 OF -> OVER +1 OF -> OR +1 OF -> OFURUS +1 OF -> INTO +1 OF -> IN +1 OF -> HER +1 OF -> GIVE +1 OF -> FOR +1 OF -> AND +1 ODD -> OTT +1 ODD -> AUGHT +1 OCCASION -> CASION +1 OBOCOCK -> OBEK +1 OBJECT -> SUBJECT +1 OBEYED -> OBEY +1 O'NIGHTS -> OR +1 O'NEILL -> ON +1 O -> OR +1 O -> BITTER +1 O -> AND +1 O -> A +1 NYTOUCH -> KNIGHTSAGE +1 NUZHAT -> UZHAT +1 NUZHAT -> USHART +1 NUTS -> KNOTS +1 NUTRITION -> UTRITION +1 NUNS -> NUN'S +1 NU'UMAN -> NUMA +1 NOWT -> THAT +1 NOW -> THOU +1 NOW -> SOME +1 NOW -> ON +1 NOW -> NABRAMIN +1 NOTTINGHAM -> NOT +1 NOT -> SNUG +1 NOT -> OUT +1 NOT -> NUT +1 NOT -> NONE +1 NOT -> NIGHT +1 NOT -> NIGH +1 NOT -> KNOWN +1 NOT -> I +1 NOT -> DO +1 NORTH -> NORTHEAST +1 NOR -> WHATEVER +1 NOR -> NO +1 NOPE -> NOTE +1 NONETHELESS -> NONE +1 NONE -> NO +1 NOBLE -> SNOWBLE +1 NO -> THOUGH +1 NIPPER -> NIBBER +1 NIKOLAY -> NIKOLA +1 NIGHT -> KNIGHT +1 NICO -> NACO +1 NEXTER -> NEXT +1 NEW -> YOUR +1 NEW -> YOU +1 NEVER -> REPAST +1 NERVE -> NURSE +1 NEOSHO -> NEOSHIL +1 NEIGHBOURS -> NEIGHBORS +1 NEIGHBOURING -> NEIGHBORING +1 NEIGHBORS -> NEIGHBOURS +1 NEIGHBORS -> LABORS +1 NEAT -> MEAT +1 NEAREST -> NEAR +1 NEARER -> NEAR +1 NEAR -> NEARING +1 NAUGHT -> NOUGHT +1 NATURALLY -> NATURAL +1 NATURAL -> NATURALLY +1 NARRATIVES -> NARRATIVE +1 NARRATE -> THEIR +1 NARCOTIC -> NAUCOTIC +1 NANDY'S -> ANDY'S +1 MYSTERIOUS -> MYSTERY +1 MYRTILUS -> MYRTULAS +1 MYRTILUS -> MERTULIST +1 MYRTILUS -> MERCILESS +1 MYRTILUS -> BERTULAS +1 MY -> MILAD +1 MY -> BY +1 MUSTACHES -> MOUSTACHES +1 MUST -> MISTER +1 MUST -> MICE +1 MUST -> DAY'S +1 MURDOCH'S -> MARDOCK'S +1 MURDER -> MURDERER +1 MUIR -> YOU'RE +1 MUG -> MUCH +1 MUDDY -> MONEY +1 MUD -> MATVE +1 MUCH -> ACT +1 MOWER -> OVER +1 MOVEMENT -> MOMENT +1 MOUTHWHAT -> MOUTH +1 MOUTHS -> MOTHS +1 MOURNING -> MORNING +1 MOUNTNORRIS -> MONTORIS +1 MOTOR -> MOTAR +1 MOST -> PRO +1 MOST -> POESY +1 MOSES -> OF +1 MOSES -> MOVES +1 MORTIS -> MORTARS +1 MORTIFICATIONTHAT -> MORTIFICATION +1 MORTEM -> MODER +1 MORE'N -> MORE +1 MORE -> SMALL +1 MORAL -> MORTAL +1 MOPED -> MILKED +1 MOOR -> MORE +1 MOONLIGHT -> MONTH +1 MONTHLY -> MOUTHLY +1 MONSEIGNEUR -> MONSIEUR +1 MONKERS -> MOCKERS +1 MOMMOL -> MAMMA +1 MO -> MORE +1 MISTER -> MISSUS +1 MISTER -> MISS +1 MISTER -> MIDSRSANY +1 MISS -> WAS +1 MISS -> MY +1 MINISTER -> MEANS +1 MINIONETTE -> MINOR +1 MINE -> MY +1 MINE -> MIND +1 MINE -> MIKE +1 MIND -> MINE +1 MINCE -> MINSER +1 MIMICK -> MIMIC +1 MILLY -> MERELY +1 MILLSTON -> MILLSTONE +1 MILL -> BLUE +1 MILICENT'S -> MILLSON'S +1 MILICENT -> MILLICENT +1 MILICENT -> MIELSON +1 MILE -> MILES +1 MILDEWED -> MILDED +1 MIHI -> HE +1 MIDRIFF -> MIDRIFTS +1 MIDIAN -> MENDIAN +1 MIDIAN -> MEDIAN +1 MIDDLING -> MIDDLIN +1 METHINKETH -> METHINK +1 MET -> MAKE +1 MESTIENNE'S -> MESTINE'S +1 MESTIENNE -> MISSED +1 MESTIENNE -> MISS +1 MESTIENNE -> MISCHIENNE +1 MESSES -> MASSES +1 MESELF -> MYSELF +1 MERRY -> MERRYMAKING +1 MERNEPTAH -> MARNETTE +1 MERLONUS -> MERLUNUS +1 MERLONUS -> MERLINUS +1 MERIT -> MARRIAGE +1 MENTAL -> MANTLE +1 MEND -> MENTAL +1 MEN'S -> MAN'S +1 MEN -> MEANTIME +1 MEN -> INTO +1 MEN -> IN +1 MEN -> CAME +1 MEMBRANE -> MEMORANE +1 MEDICAL -> MEDICA +1 MEDALS -> METALS +1 MEAT -> HAVE +1 ME -> MISS +1 ME -> HIM +1 MC -> MAC +1 MAY -> ME +1 MAY -> MADE +1 MATTER -> MATHER +1 MATI -> MEANT +1 MATI -> MANTIL +1 MATEY -> MAY +1 MATERIALS -> MATERIORS +1 MASTER -> MERCER +1 MASTER -> MASTERY +1 MASTER -> MASSA +1 MASKED -> MASSED +1 MARVELED -> MARVELLED +1 MARTIN -> MERTON +1 MARSPEAKER -> MARKEE +1 MARSHAL -> MARTIAN +1 MARSHAL -> MARTIAL +1 MARSH -> MARS +1 MARRIAGE -> MARY'S +1 MARMALADES -> MARVELL +1 MARGARET'S -> MARGARET +1 MARE -> MAYOR +1 MANNER -> MANNERS +1 MANKATO -> MANCATEO +1 MANKATO -> MAIN +1 MANASSEH -> MANOT +1 MAN'S -> MEN'S +1 MAN -> MADAMELY +1 MAMIE -> MAMMY +1 MALNUTRITION -> MALLETRICIAN +1 MAKES -> MATRON +1 MAKE -> MAY +1 MAJORITY -> MATURITY +1 MAJOR -> MEASURE +1 MAIL -> MALE +1 MAD -> MAN +1 MABILLON -> MARBYLON +1 M -> EM +1 M -> BENNET +1 LYSIMACHUS -> LISIMACUS +1 LUNA'S -> LUNE'S +1 LUNA -> LEWINA +1 LUKE -> LOOK +1 LUCRATIVE -> LOOK +1 LUCIEN -> MISS +1 LUCIEN -> LUCIAN +1 LOWERING -> LORING +1 LOWER -> BLOW +1 LOVER -> LOVE +1 LOVED -> LOVE +1 LOVE -> LAW +1 LOSS -> LAWS +1 LORD'S -> LARGE +1 LORD -> LOT +1 LORD -> LARD +1 LOOKOUT -> LOOK +1 LOOKING -> LOOK +1 LOOKED -> LOOKS +1 LOOKED -> LOOK +1 LOOKED -> LIFTED +1 LONESOMENESS -> LONESOME +1 LONE -> LONG +1 LOBSTER -> LOBSURD +1 LOBSTER -> LOBSTERN +1 LOBSTER -> LOBSTERBOAT +1 LOBSTER -> LOBS +1 LL -> CHILLED +1 LL -> BESTOW +1 LIZABETH -> ELIZABETH +1 LIVES -> IS +1 LIVELY -> LOVELY +1 LIVELONG -> LIVE +1 LIVED -> IF +1 LIVE -> LEAVE +1 LITTLE -> OF +1 LIT -> LET +1 LIT -> LED +1 LIPS -> LITZ +1 LIME -> LIMETERY +1 LILBURN -> LOWBORNE +1 LILBURN -> LILLBURN +1 LILBURN -> LILBOURNE +1 LIL -> LOW +1 LIKELY -> LIKE +1 LIKE -> THE +1 LIKE -> NIGHT +1 LIGHTLY -> LATE +1 LIGHT -> LIGHTFOOTED +1 LIFE -> LIE +1 LIEUTENANT -> TANNIC +1 LIES -> LIVES +1 LIE -> LIKE +1 LIDDY -> LADY +1 LIAISON -> LEAR +1 LEXINGTON -> LESSINGTON +1 LEWIS -> LOOSE +1 LEVITICUS -> LEVIKUS +1 LETTERS -> LET +1 LETS -> LET'S +1 LET -> THEM +1 LET -> LEFT +1 LET -> LED +1 LET -> LATINUE +1 LET -> LAID +1 LEST -> REALIZED +1 LESSEN -> LISTEN +1 LESLIE -> THIS +1 LESLIE -> PLEASING +1 LESLIE -> LIZZILY +1 LESLIE -> LIZZIE +1 LESLIE -> LIDNESLEY +1 LESLIE -> IT +1 LENOIR -> WARRITZ +1 LEMON -> LINENSHIPS +1 LEGS -> LESS +1 LEFT -> LIVED +1 LEFT -> LIFTED +1 LEFT -> LAUGHTER +1 LEFT -> LAD +1 LEER -> URINA'S +1 LEECHES -> LEECH +1 LEECH -> LIEGE +1 LED -> LIT +1 LEAVED -> LEAVE +1 LEAVE -> LEE +1 LEARN -> LEARNED +1 LEADPENCIL -> LEAD +1 LEADERSHIP -> LEGERSHIP +1 LEADERS -> LEADER'S +1 LAW -> LAWN +1 LAW -> LAST +1 LAUGHED -> LAP +1 LAUGHED -> HAVE +1 LAUDERDALE -> LORDAIL +1 LAUDERDALE -> LAURDALE +1 LATH -> LAST +1 LATER -> LATE +1 LATELY -> PLATELY +1 LATE -> LAID +1 LASH -> LAST +1 LARKIN'S -> LARKINS +1 LANDI -> LANDY +1 LANDED -> LAND +1 LAND -> THE +1 LAMPLIT -> LAMPLET +1 LAMBS -> LAMPS +1 LAKE -> LATER +1 LAIN -> LANE +1 LADY -> LADY'S +1 LADS -> LAD +1 LACHRYMA -> LACK +1 LACHAISE -> LACHES +1 LABOURERS -> LABORERS +1 LABOUR -> LABOR +1 LABORING -> LABOURING +1 L -> ELE +1 KNOW -> THERE +1 KNOW -> NOT +1 KNOTTY -> NAUGHTY +1 KNOT -> NOT +1 KNOBBLY -> KNOBLY +1 KNIGHTS -> NIGHTS +1 KNIGHT'S -> NIGHT'S +1 KLEPTOMANIAC -> CLEPTOMANIA +1 KLEPTOMANIA -> CLUBTOMANIA +1 KITE -> TIGHTLY +1 KISSED -> GUESS +1 KINDER -> KIND +1 KILLS -> KILL +1 KICK -> KICKY +1 KEYS -> A +1 KETTLE -> CATTLE +1 KERSTALL -> KIRSTALL +1 KENITES -> KENITE +1 KENITES -> KANITE +1 KEEN -> KIN +1 KEDEM -> KIDAM +1 KANSAS -> KANSA +1 KANSAS -> KANS +1 KAMAR -> COME +1 JUSTIFIED -> IT'S +1 JUST -> JIST +1 JUST -> JETS +1 JUST -> JEST +1 JUST -> JEALOUS +1 JUST -> DOES +1 JUST -> DESTRULICIAN +1 JUST -> CHOSE +1 JUG -> CHUG +1 JUDGMENT -> JULIET +1 JUDGMENT -> JAGIMENT +1 JUDGMENT -> GERMAN +1 JUDGE -> JOE +1 JUDAH -> JULIA +1 JOUVIN'S -> JUBAUN'S +1 JOSHUA -> JANSHIRE +1 JOKINGLY -> CHOKINGLY +1 JOCELYN'S -> JOSCELYN +1 JOCELYN -> JOSCELYN +1 JIS -> IT +1 JEWISH -> TO +1 JEWELER -> JUROR +1 JEWELER -> JEWELLER +1 JESTER -> GESTURE +1 JEHOASH -> JOESH +1 JEERED -> JOURED +1 JEDGE -> JUDGE +1 JANSENIST -> JENSONIST +1 JANEERO -> GENERO +1 JANE -> CHAIN +1 JAMS -> JAMES +1 JAM -> JAMMED +1 JAM -> JAME +1 JAKEY'S -> JAKI +1 JAKEY -> JI +1 JAKEY -> JAKIE +1 JAKEY -> JAGGY +1 JACKMAN -> JACKMEN +1 J -> VERSON +1 IZZY'S -> IS +1 IZZY -> IZZIE +1 IZZY -> AS +1 IVANOVITCH -> GEVINOVITCH +1 ITS -> TO +1 ITS -> THEN +1 ITS -> IT'S +1 IT'S -> YES +1 IT'S -> AND +1 IT -> THEE +1 IT -> STIPS +1 IT -> OVER +1 IT -> OR +1 IT -> IT'LL +1 IT -> HIM +1 IT -> HAD +1 IT -> DID +1 IT -> AT +1 IT -> AND +1 IT -> AN +1 ISSUE -> ISSUED +1 ISRAELITES -> ISRAITS +1 ISRAEL'S -> ISRAEL +1 ISN'T -> IS +1 ISLAMISED -> ISLAMIZED +1 ISLAM -> SLAM +1 IS -> YOUR +1 IS -> WAS +1 IS -> TURBOT +1 IS -> SAID +1 IS -> RAPPA'S +1 IS -> OWNETTE +1 IS -> ONLY +1 IS -> LINEN +1 IS -> ITS +1 IS -> IT'S +1 IS -> ISN'T +1 IS -> IF +1 IS -> HEAR +1 IS -> ENDOWED +1 IS -> EAST +1 IS -> CIGARET +1 IRONICAL -> IRONIC +1 INWARD -> WOOD +1 INVALIDES -> INVALID +1 INTO -> AND +1 INTERNAL -> AND +1 INTERFERE -> INFERE +1 INTEREST -> INTERESTS +1 INTELLECTUALLY -> INTELLECTUAL +1 INSTANT'S -> INSTANCE +1 INSIST -> INSISTS +1 INQUIRE -> ACQUIRE +1 INNES -> EANS +1 INN -> IN +1 INJURE -> ENDURE +1 INGENIOUSLY -> INGENUOUSLY +1 INFLUENCED -> EVILISED +1 INFERIOR -> CONTRAY +1 INFAMY -> INFAMYLON +1 INFAMY -> IN +1 INFAMOUS -> IN +1 INELEGANTLY -> IN +1 INCREASE -> INCREASES +1 INCORRECT -> AND +1 INCOMPARABLE -> INN +1 INCLINATION -> INCLINATIONS +1 IN'T -> INTO +1 IN -> WITH +1 IN -> WHO +1 IN -> WHEN +1 IN -> WHEEL +1 IN -> TO +1 IN -> SO +1 IN -> ON +1 IN -> NEAT +1 IN -> JUST +1 IN -> INTO +1 IN -> INSIDE +1 IN -> HE +1 IN -> FROM +1 IN -> EXPERIENCE +1 IN -> ENCAMP +1 IN -> BENEATH +1 IN -> AWHILE +1 IN -> AT +1 IN -> AIMED +1 IN -> A +1 IMPROVISED -> PROVISED +1 IMPROVED -> PROVED +1 IMPROVE -> PROVE +1 IMPLY -> SAY +1 IMMENSE -> MOST +1 IM -> QUEST +1 IM -> EM +1 ILU -> TO +1 IGNORED -> NURED +1 IGNOMY -> IGNOMINY +1 IF -> FOR +1 IF -> FIT +1 IF -> A +1 IDEA -> AND +1 ICES -> ISIS +1 I'VE -> OF +1 I'M -> AND +1 I'LL -> LOOK +1 I'LL -> ELSE +1 I'FAITH -> I +1 I'D -> I'LL +1 I -> TO +1 I -> THY +1 I -> THEY +1 I -> THAT +1 I -> OUGHT +1 I -> OH +1 I -> MY +1 I -> LIKE +1 I -> IT +1 I -> HOW +1 I -> FAIR +1 I -> EYES +1 I -> ATTENDED +1 I -> AMID +1 I -> AH +1 I -> ABRUPT +1 HYDROCHLORIC -> HYDROGLOIC +1 HYDROCHLORIC -> HYDROCLOIC +1 HURT -> SHARP +1 HUNTINGDON -> HUNTON +1 HUNTINGDON -> HONEYMAN +1 HUNTERS -> HANDLES +1 HUNDRED -> HUNDREDTH +1 HUMOR -> HUMOUR +1 HUMANITARY -> HUMANITY +1 HUH -> HER +1 HUDSPETH -> HUSBITH +1 HOWL -> HOWE +1 HOWEVER -> SHOWER +1 HOWEVER -> SAMURED +1 HOWEVER -> HERBID +1 HOW -> OH +1 HOW -> HER +1 HOW -> HAS +1 HOUSEHOLD -> HOUSE +1 HOUR -> OUR +1 HOUR -> I +1 HOUNDED -> HOUNDY +1 HOSPITABLY -> HOW +1 HORDE -> HOARD +1 HOPPING -> HAVING +1 HOO'S -> WHOSE +1 HOO'S -> WHO'S +1 HOO'LL -> HE'LL +1 HOO -> WHO +1 HONORS -> HONOUR +1 HONOR -> HANNER +1 HONESTLY -> ON +1 HOMEPUSH -> HOME +1 HOLLER -> HOLLERED +1 HOLE -> HALL +1 HOLD -> HOTEL +1 HOLD -> ERON +1 HOF -> WHOLE +1 HITHER -> THITHER +1 HIT -> HATE +1 HISSELF -> HIMSELF +1 HIS -> US +1 HIS -> IN +1 HIS -> HE +1 HIS -> DISCOURSE +1 HIS -> AWAY +1 HIS -> AS +1 HIS -> AN +1 HIS -> A +1 HINDFELL -> HINFELD +1 HIMSELF -> HIS +1 HIM -> THEM +1 HIM -> CAME +1 HIJAZ -> KI +1 HIGHS -> HIES +1 HIGH -> TIME +1 HIERARCHY -> HILLRY +1 HI -> AY +1 HEYDAY -> HAY +1 HEWN -> YOU +1 HERMON'S -> HERMANN'S +1 HERMON'S -> HARMONT'S +1 HERMON'S -> HARMON'S +1 HERMON -> HERMAND +1 HERMON -> HERMA +1 HERMON -> HARMON +1 HERMON -> HAREMON +1 HERIOT'S -> HEARET'S +1 HERIOT -> HARRIET +1 HERETOFORE -> HERE +1 HERE'S -> HERE +1 HERE -> HER +1 HERE -> HE +1 HER -> HIM +1 HER -> HERSELF +1 HER -> HE +1 HER -> FOR +1 HER -> CHARRED +1 HER -> AS +1 HER -> ACCUSTOM +1 HEN -> HINCOUX +1 HELVIN -> HELD +1 HELPED -> HELPS +1 HELP -> HELPED +1 HELM -> HOME +1 HELM -> HAIL +1 HELLO -> HALLO +1 HELEN -> ALAN +1 HELD -> HELDS +1 HEELED -> HEALED +1 HEEDED -> READ +1 HEDGES -> HATCHES +1 HEBREW -> SEA +1 HEAT -> HEATLESS +1 HEARTY -> EARTHLY +1 HEARTS -> HEART +1 HEARTIEST -> HARDIEST +1 HEART -> HEARTS +1 HEART -> HARD +1 HEARSE -> HOUSE +1 HEARD -> HAD +1 HEAR -> HAIR +1 HEADLONG -> HAD +1 HEAD -> HIDDEN +1 HE'S -> HE +1 HE -> YES +1 HE -> THEY +1 HE -> SIR +1 HE -> SIMILATE +1 HE -> SEE +1 HE -> PIERRE +1 HE -> IRRESPONSIBLE +1 HE -> IF +1 HE -> EITHER +1 HE -> CAME +1 HE -> AND +1 HE -> A +1 HAYES -> HAZE +1 HAVEN -> HEROD +1 HAVE -> OFTEN +1 HAVE -> HATH +1 HAVE -> HALVES +1 HAVE -> HALF +1 HAVE -> EVER +1 HAVE -> ERE +1 HAVE -> BE +1 HAUGHTINESS -> FORTNESS +1 HATTERSLEY -> HAUGHTERSLEY +1 HATES -> HATE +1 HASAN -> HER +1 HAS -> LEGION +1 HAS -> JUST +1 HAS -> HISTORY +1 HAS -> HAVE +1 HAS -> BEST +1 HARVEY'SWHICH -> HARVEST +1 HARRISONVILLE -> HARRISON +1 HARRIS -> HEIRESS'S +1 HAROLD -> HERALD +1 HARK -> AREN'T +1 HARE -> HAIR +1 HARDWARE -> HARDWORTH +1 HARDLY -> ARE +1 HARD -> OUR +1 HARBOR -> HARBOUR +1 HAPPENED -> HAPPEN +1 HAPLY -> HAPPILY +1 HAND -> HANDS +1 HALL -> WHOLE +1 HALF -> HAPPENED +1 HALEY'S -> HAYE'S +1 HADDA -> HAD +1 HAD -> TENDED +1 HAD -> STORE +1 HAD -> HATE +1 HAD -> HAS +1 HAD -> GOT +1 HAD -> ADMIRED +1 GURR -> GURSER +1 GURR -> GREW +1 GURR -> GIRK +1 GURR -> GERT +1 GURR -> GER +1 GURR -> GARR +1 GURR -> DUR +1 GUNS -> GUN +1 GUNNAR -> GUTTER +1 GUNNAR -> GUNNER +1 GULLET -> COLLEGE +1 GUIRUN'S -> GUNDER +1 GUINEA -> GUINEAS +1 GUIDE -> GOD +1 GUDRUN -> GUNDRON +1 GRUMBLINGLY -> TREMBLINGLY +1 GRUFFLY -> ROUGHLY +1 GROVE -> GROW +1 GROTTO -> DRATO +1 GROOM -> ROOM +1 GRINNING -> GRINNIE +1 GRIBIER -> CLAVIER +1 GREY -> GRAY +1 GREAVES -> GREEBS +1 GREAT -> GRAY +1 GRAY -> GREY +1 GRAY -> GLAY +1 GRATITUDE -> CREDITUDE +1 GRASPS -> GRASPED +1 GRAPPLE -> GRANTPLE +1 GRANDPAP -> GRANDPAPAZZARD +1 GRANDAME -> GRAND +1 GRAMMATEUS -> GRAMMATIUS +1 GRAM -> GRAHAM +1 GOVERNMENTS -> GOVERNMENT'S +1 GOVERNMENT'S -> GOVERNMENT +1 GOVERNMENT -> GOVERN +1 GOV'NOR -> GUV'NER +1 GOV'NOR -> GOVERNOR +1 GOT -> GUARD +1 GOT -> GOD +1 GOT -> COURT +1 GOSLER -> GOSPIR +1 GORDON -> GORDON'S +1 GOODS -> GOOD +1 GOOD -> THE +1 GONE -> DISCOUR +1 GOLDFISH -> GOLD +1 GOLDEN -> GOLD +1 GOING -> YOU +1 GOING -> GO +1 GOES -> GO +1 GODEBILLIOS -> GO +1 GOD'S -> GODS +1 GOD -> THE +1 GOD -> GUN +1 GOD -> GONE +1 GOBEY'S -> GOBY'S +1 GOBEY'S -> GOBIES +1 GOBEY'S -> GOBIAS +1 GOAL -> GOLD +1 GNARLED -> GNOLD +1 GLOOMY -> BLOOMY +1 GLISPIN'S -> OF +1 GLISPIN -> LISPIN +1 GLISPIN -> GLISBON +1 GLASS -> GLANCE +1 GLADDENEST -> GLADNESSED +1 GLAD -> GRINDING +1 GIVE -> GIVEN +1 GIVE -> GAVE +1 GIT -> GET +1 GIRTHING -> GIRDING +1 GIRTHED -> GIRDED +1 GIRL -> GO +1 GIRDS -> GORGE +1 GIORGIO -> GEORGO +1 GIORGIO -> GEORGIO +1 GILROY -> SCALE +1 GIFTS -> FORGIVES +1 GET -> HER +1 GET -> GIT +1 GET -> GENISH +1 GET -> GAINED +1 GEORGIA -> GEORGE +1 GEORGE'SWHICH -> GEORGE'S +1 GEORGE'S -> GEORGE +1 GEORGE -> GEORGIUM +1 GENTLEMEN'S -> GENTLEMAN'S +1 GENTLEMEN -> GENTLEMAN +1 GENTLEMAN -> GENTLEMEN +1 GENEROUS -> GENERALS +1 GENERAL -> JOE +1 GAUTHIER -> GATHIERRE +1 GASHED -> GASH +1 GARDEN'S -> GARDENS +1 GAMMON -> GAMIN +1 GALLATIN -> YELLED +1 GABLE -> CABLE +1 G'YIRLS -> IS +1 FUZZ -> FUZ +1 FURZE -> FIRS +1 FULL -> POOL +1 FULL -> FOUR +1 FULL -> FOR +1 FULL -> FOOT +1 FUGITIVES -> FUGITIVE +1 FROZE -> ROSE +1 FROWNED -> GROUND +1 FRONTIERS -> FRONTIER +1 FRONT -> FROM +1 FROG'S -> FROGS +1 FRO -> FROM +1 FRISTOE'S -> FRISTOWS +1 FRISTOE -> FOR +1 FRISTOE -> CRISTO +1 FRIGHTFUL -> CRIED +1 FRIAR -> FRY +1 FREEWAY -> FREE +1 FREES -> FREEZE +1 FREEDOM -> READ +1 FRANC -> FROG +1 FOURTEENTHAT'S -> FOURTEEN +1 FOUR -> FULL +1 FOUR -> FALL +1 FOUNDED -> FOUND +1 FOUGHT -> THOUGHT +1 FOSTER -> FOXTER +1 FORWARD -> BEFOREWARD +1 FORTS -> FAULTS +1 FORTNIGHT -> NIGHT +1 FOREVER -> FOR +1 FOREVER -> DURE +1 FOREMAN -> MAN +1 FOREGATHERED -> FORGATHERED +1 FORCED -> FORCES +1 FOR -> WERE +1 FOR -> TO +1 FOR -> PROLONG +1 FOR -> OF +1 FOR -> IN +1 FOR -> FURTHER +1 FOR -> FULL +1 FOR -> FROM +1 FOR -> FOUR +1 FOR -> FORCES +1 FOR -> FER +1 FOR -> FAULT +1 FOR -> BOUHAIR +1 FOR -> ABOVE +1 FOOLS -> FOOTS +1 FOOL -> FULL +1 FOOD -> FULL +1 FONTEVRAULT -> FONTREVAL +1 FOLLOWS -> FOLLOWED +1 FOLLOWED -> FELL +1 FOLLOWED -> ALL +1 FOLLOW -> FOLLOWING +1 FOE -> FOLK +1 FOALS -> FOOLS +1 FOALS -> FOLDS +1 FOALS -> FALLS +1 FOAL -> WHOLE +1 FOAL -> FULL +1 FOAL -> FOOL +1 FOAL -> FALL +1 FLYING -> LYING +1 FLY -> FLIES +1 FLUTTERING -> FACTIVE +1 FLOWERBEDS -> FLOWER +1 FLOSSY -> FLOSSIE +1 FLOORBOARDS -> FLOOR +1 FLOCKS -> FLAUNT +1 FLEW -> FLUD +1 FLEROV -> FLIROFF +1 FLAVOR -> FLAVOUR +1 FLAVOR -> FLARE +1 FLATTERER -> IS +1 FLATTERED -> FLUTTERED +1 FLATHEADS -> FLAT +1 FLASHLIGHT -> FLASH +1 FLABBERGASTED -> FLAVAGASTED +1 FITTING -> FEELING +1 FISHING -> FISHIN +1 FISHIN -> FISHIN' +1 FISHED -> FINISHED +1 FIRSTER -> FIRST +1 FIRE -> FOREIGN +1 FINNEY -> FINNELL +1 FINICAL -> FINNICAL +1 FINGER -> FINGERING +1 FINELY -> FINALLY +1 FINDING -> FIND +1 FILTRATES -> FIR +1 FILTRATE -> FEDERATE +1 FIGGER -> FOR +1 FIENDS -> FIEND +1 FIELD -> FIELDS +1 FIACRE -> FIACUS +1 FESTIVE -> FESTIVATIONS +1 FELT -> FELL +1 FEELS -> FILLS +1 FEEL -> SEE +1 FEEL -> FILL +1 FEEDS -> FEATS +1 FEE -> FEET +1 FAVOURITE -> FAVORITE +1 FAVORITE -> FAVOURITE +1 FAVORABLE -> FAVOURABLE +1 FAUVENT -> VENT +1 FAUVENT -> FUVENT +1 FAUCHELEVENT -> SO +1 FAUCHELEVENT -> FUSSION +1 FAUCHELEVENT -> FUCHELEVENT +1 FAUCHELEVENT -> FOURCHELEVENT +1 FAUCHELEVENT -> FORCHELEVENT +1 FAUCHELEVENT -> FLUCHELEVENT +1 FAUCHELEVENT -> FASHIONEVENT +1 FAUCES -> FORCES +1 FATTY -> FATIGMATIS +1 FATS -> FAT +1 FATHER'S -> FATHERS +1 FATHER -> FURTHER +1 FATHER -> FUNDEMENT +1 FATHER -> FOUND +1 FATHER -> FATHERLAND +1 FATHER -> EITHER +1 FATAL -> FIELD +1 FAT -> BAT +1 FARRINDER -> KYNDER +1 FARRINDER -> FERRINDER +1 FARRINDER -> FARRENDER +1 FARRINDER -> BARRENDERS +1 FANNY -> THEN +1 FANNY -> ANY +1 FANGED -> FACT +1 FAN -> PAMP +1 FAMILY -> FELLOW +1 FAM'LY -> FAMILY +1 FALLING -> FOLLOWING +1 FALL -> FOR +1 FAITH -> FATE +1 FAIRLY -> FAIRLY'S +1 FAIR -> HER +1 FAILING -> FEELING +1 FAILED -> FAME +1 FAIL -> FILL +1 FAFNIR'S -> FAFTENER'S +1 FAFNIR'S -> FAFNER'S +1 FAFNIR -> STAFFNER +1 FAFNIR -> FAFNER +1 FAFNIR -> FAFFNER +1 FACT -> THAT +1 FACE -> FAITH +1 FACE -> FACED +1 EYES -> THOUGHT +1 EYES -> EYE +1 EYE -> I +1 EYE -> DIE +1 EXTRAORDINARY -> EXTRAORDINARILY +1 EXTRACT -> EXTRACTED +1 EXTRA -> NATURE +1 EXTEMPORIZED -> EXTEMPORISED +1 EXPOSE -> EXPOSED +1 EXPLOITING -> EXPLODING +1 EXPIATION -> EXPLANATION +1 EXPERIENCE -> SIGNING +1 EXPECTED -> SPECTRE +1 EXECUTIVE -> EXUDY +1 EXECUTIVE -> EXECUTORY +1 EXECUTIVE -> EXECUTE +1 EXCLAIMED -> EXPLAINED +1 EXCITING -> THESE +1 EXAMINING -> EXAMINED +1 EXAMINATION -> MAXIMMUNITION +1 EXACKLY -> EXACTLY +1 EVIL -> EVE +1 EVERYONE -> EVERY +1 EVER -> EVERGREWING +1 EV'YBODY'S -> EVERYBODY'S +1 EUSEBIUS -> USUBIUS +1 EUSEBIUS -> EUSCIBIUS +1 EUPHRATES -> EUPHADIS +1 EUPHRANOR -> EUPHRANER +1 EUNUCH -> EUNUCHS +1 ETHER -> THEM +1 ETHELRIED -> ETHEL +1 ETHELRIED -> EFFLARIDE +1 ET -> AT +1 ESPECIALLY -> SPENTRY +1 ESPECIALLY -> HAS +1 ESCAPE -> US +1 ERRATIC -> THE +1 ERNESTINE -> ERNESTON +1 ERE'S -> YES +1 ERE -> AT +1 EQUERRY'S -> EQUITY'S +1 EPHRAIM -> ATRONE +1 ENTRUSTED -> AND +1 ENTR'ACTE -> AND +1 ENTIRELY -> TIRELESS +1 ENTIRELY -> ENCHANTING +1 ENTIRE -> AND +1 ENSUED -> IN +1 ENSNARES -> AND +1 ENSLAVED -> ENSLAVE +1 ENRAGED -> ENRAGE +1 ENJOY -> ENJOYED +1 ENFRANCHISEMENT -> ENCRONTISEMENT +1 ENFORCEMENT -> FORCEMENT +1 ENDURETH -> AND +1 ENDURE -> INDUCE +1 END -> AID +1 EMOTION -> THE +1 EMETIC -> AMATIC +1 EMBRUN -> AMBRON +1 EM -> HIM +1 ELISIONS -> ELYGIANCE +1 ELEXANDER -> IT +1 ELEXANDER -> ALEXANDER +1 ELDER -> OTHER +1 ELBOW -> ELBOWS +1 ELBERT -> HILBER +1 ELASTIC -> MOLASTIC +1 EILEEN -> ILINE +1 EILEEN -> IDLEEN +1 EILEEN -> AILEEN +1 EIGHTH -> EIGHTHS +1 EH -> THEY +1 EH -> HEY +1 EGGS -> NICE +1 EELS -> FIELDS +1 EDGING -> EDGED +1 ECONOMIC -> AGONIC +1 ECHOED -> ACCORD +1 EAU -> O'ER +1 EAST -> EACH +1 EASILY -> IS +1 EASE -> THESE +1 EARTH -> EARTHCOP +1 EARTH -> ART +1 EARNEST -> HONEST +1 EARLIEST -> AREIAT +1 EAR -> IRRESCELLING +1 E'S -> LOVE +1 E'LL -> YOU'LL +1 E -> DOG +1 DUSK -> THUS +1 DUPLICATES -> DEPLICATES +1 DUMAS -> YOU +1 DUM -> DOOMFUL +1 DUKE -> DO +1 DUDS -> DEADS +1 DU -> DUMEN +1 DRUGSTORE -> DRUG +1 DROUTH -> DROUGHT +1 DRINKS -> DRINK +1 DREAM -> STREAM +1 DRAWERS -> DRAWER +1 DRAW -> DRAWING +1 DRAUGHT -> DROUGHT +1 DRAT -> DREAD +1 DRAINS -> DREAMS +1 DOWNING -> DAWNING +1 DOWER -> TO +1 DOUBTS -> DOUBT +1 DOUBT -> OUT +1 DOTH -> DIRTS +1 DOST -> THOSE +1 DOOR -> TO +1 DOOR -> DOORS +1 DONOVAN'S -> DOLOMAN'S +1 DONOVAN -> DONALD +1 DONE -> NONE +1 DONE -> DUNFAR +1 DON'T -> THEY'RE +1 DON'T -> ONE +1 DON'T -> DONE +1 DON'T -> DO +1 DOM -> DON +1 DOING -> BETWEEN +1 DOIN -> DOING +1 DOG -> DOLE +1 DOG -> DARK +1 DOEST -> DO +1 DOESN'T -> DOESNATE +1 DOESN'T -> DOES +1 DOES -> DOESN'T +1 DOCTOR -> DOCTROPOS +1 DOCK -> DOCKYARD +1 DOAN -> DON'T +1 DO -> TOO +1 DO -> THE +1 DO -> DOUGH +1 DO -> DON'T +1 DO -> DID +1 DO -> D'YE +1 DIVIDED -> DIVIDEST +1 DITCHFIELD -> DIXFIELD +1 DISTRUSTED -> DISTRUDGED +1 DISTRESS -> DISTRESSED +1 DISTANT -> OF +1 DISSENTIENT -> DYSINTHIAN +1 DISPUTE -> DISPUTABLE +1 DISPOSED -> DISPOSE +1 DISINFECTING -> DISINFACTANT +1 DISHES -> DISH +1 DISFIGURED -> THIS +1 DISASTROUS -> DISASTRATES +1 DISAGREE -> DISAGRATING +1 DIRK -> DARK +1 DINERS -> DINNERS +1 DILUTE -> DELUDE +1 DIGGING -> TIGGING +1 DIGESTION -> DIADE +1 DIE -> GUY +1 DIDN'T -> DON'T +1 DID -> WITH +1 DID -> THESE +1 DID -> THAT +1 DID -> HAD +1 DICTATED -> DECLATED +1 DICKIE -> DICK +1 DEVOUR -> THE +1 DETERMINED -> TO +1 DETECTIVES -> DETECTS +1 DETECTIN -> DETECTIVE +1 DESTINIES -> DEBT'S +1 DESPOTIC -> THAT +1 DESPITE -> THIS +1 DESK -> VESK +1 DESIRES -> DESIRE +1 DESIGNED -> DESIGN +1 DERELICTS -> DEAR +1 DENIS -> DENY +1 DELUSION -> VOLUTION +1 DELMONICO -> DEMONICO +1 DELIVER -> DRIVER +1 DEFERENCE -> THEIR +1 DEEPENED -> DEEP +1 DEBTOR -> DAUGHTER +1 DEARLY -> STEELY +1 DEARLY -> DAILY +1 DEANS -> DEAN +1 DEAD -> DEDUREUM +1 DEAD -> DEBT +1 DEAD -> DAY +1 DEACH -> DID +1 DE -> TO +1 DE -> DEAD +1 DAYS -> THE +1 DAYS -> STAYS +1 DAY -> THEY +1 DATED -> DID +1 DARKAND -> DARK +1 DARE -> WAS +1 DAPHNE'S -> AFTER +1 DAPHNE -> TAPNEY +1 DAPHNE -> JAPLIN +1 DANDAN -> THAN +1 DANDAN -> DAN +1 DANCER -> DANCERS +1 DAMN -> DEMON +1 DAME'S -> JAMES'S +1 DALYS -> DAILIES +1 DALY -> DALEY +1 DAGOS -> DAGGERS +1 DADDY -> DIRTY +1 DA -> DESCORTONA +1 DA -> DALMY +1 D -> THEN +1 D -> DATA +1 CYNTHIA -> CENTIA +1 CYMBALS -> SYMBOLS +1 CUT -> GOT +1 CURRENTS -> CURRANTS +1 CUP -> CARP +1 CUISINE -> COSEINE +1 CRUX -> CREW +1 CRUSHING -> CRASHING +1 CRUMPLED -> CRUMBLED +1 CRUMPLED -> CRAMPLED +1 CRUMBLY -> CRAMBLY +1 CRUCIFIXION -> CURSE +1 CROST -> ABOUT +1 CRIME -> CROWN +1 CRIES -> QUITE +1 CREEL -> CREO +1 CREDIT -> CREDITED +1 CREAM -> QUEEN +1 CRAYFISH -> CRATER +1 CRAWFISH -> CROPPISH +1 CRAWFISH -> COFFISH +1 CRATES -> CREATES +1 CRAB -> CRABS +1 COYNESS -> KINDNESS +1 COXCOMB -> ACCOUNT +1 COWLEY'S -> COLLEASE +1 COURT -> COURTYARD +1 COURSING -> COARSING +1 COURSE -> COARSE +1 COURFEYRAC -> HER +1 COURFEYRAC -> CURFYRAC +1 COUNT -> COUCH +1 COUNT -> COMPOSER +1 COUNSELS -> COUNCIL +1 COUNCILLOR -> COUNSELLOR +1 COULD -> HAD +1 COULD -> GOOD +1 COULD -> COULDN'T +1 COUGHING -> COFFIN +1 COST -> COSTUME +1 CORYDON -> CROYD +1 CORSICAN -> CORSICIAN +1 CORNER -> SCORN +1 CORNER -> CORRIER +1 CORNER -> CORNESTONES +1 CORNER -> CORN +1 CORKLE -> CORKEL +1 CORAL -> COAL +1 COQUETTE -> COCKET +1 COPS -> CUPS +1 COPS -> COPSE +1 COOL -> UR +1 CONTINUAL -> CONTINUOUS +1 CONTINGENT -> CONTENDENT +1 CONTEND -> COMPEND +1 CONSTANT -> CAN'T +1 CONSONANTS -> COUNTENANCE +1 CONSOMME -> CONSUM +1 CONQUER -> CONCUR +1 CONINGSBURGH -> CUNNINGSBURG +1 CONGEALETH -> CONCEALETH +1 CONFIRMATION -> CONFIRMATON +1 CONFIDENTIALLY -> TO +1 CONFIDE -> CONFINE +1 CONFICERE -> CONFUSE +1 CONFESS -> CONSIST +1 CONCOCTED -> CONCLUDED +1 COMORIN -> CORMOR +1 COMMITTEE -> COME +1 COMMENCED -> COMMANDS +1 COMING -> COMIN +1 COMING -> CARMINALS +1 COMETH -> COME +1 COMEST -> COMES +1 COMES -> COUNT +1 COMES -> COME +1 COME -> SENT +1 COME -> COMMANDER +1 COME -> CALM +1 COMB -> CALM +1 COLOSSEUM -> COLISEUM +1 COLOR -> COLOUR +1 COLONEL -> CAN +1 COLOGNE -> CLOON +1 COLLECT -> COLLECTED +1 COLE -> CO +1 COLDS -> GOLDS +1 COLD -> CALLED +1 COLCHESTER -> COLCHISED +1 COINS -> COIN +1 COIN -> KIND +1 COFFEE -> COUGHING +1 COD -> CART +1 COCOA -> CUCKOO +1 COCKRELL -> COCKROL +1 COBBER -> COPPER +1 COALESCED -> COVETTES +1 CLUMB -> CLIMBED +1 CLOSET -> CLOTH +1 CLOSEST -> CLOSETS +1 CLOSELY -> CLOTHING +1 CLOSE -> BUT +1 CLOMB -> CLIMBED +1 CLOISTER -> CLOSER +1 CLIME -> CLIMB +1 CLEVERLY -> LEVELLY +1 CLEAVE -> CLIFF +1 CLAWS -> CLOTHS +1 CLASSES -> CLASS +1 CLASS -> GLASS +1 CLARET -> CLARE +1 CLARET -> CLARA +1 CLAIRVAUX -> CLERVAL +1 CINDERLAD -> SIR +1 CINDERLAD -> SINDERLAD +1 CINDERLAD -> SAID +1 CINDERELLA -> CINRILLA +1 CHURCH -> WATCH +1 CHUCKED -> TECHTAMORPH +1 CHRISTIANS -> CHRISTIAN +1 CHRISTIANITY -> STUNNITY +1 CHRISTENING -> CHRISTIANING +1 CHRIS -> THIS +1 CHRIS -> MISTER +1 CHRIS -> GRIS +1 CHRIS -> CRISP +1 CHRIS -> CHRISTEN +1 CHOUETTE -> SWEAT +1 CHONODEMAIRE -> SHADOW +1 CHLORATE -> LOW +1 CHLORATE -> CHLORODE +1 CHINTZ -> CHIN +1 CHILLS -> CHILL +1 CHIEF -> CHEAP +1 CHIDE -> CHID +1 CHEWERS -> SHOERS +1 CHEFS -> CHEFTS +1 CHEEKS -> CHIEFS +1 CHEEKE -> CHEEK +1 CHEEKBONES -> CHEAP +1 CHEEK -> CHEEKS +1 CHECKING -> COOKING +1 CHEAP -> CHEEK +1 CHARMED -> SHONE +1 CHARLEY'S -> CHARLIE'S +1 CHARGED -> CHARGE +1 CHARGE -> SHORES +1 CHARACTERISTIC -> CARE +1 CHANGE -> ITS +1 CHAMBERLAIN -> TREMBLING +1 CHALONS -> CHALON +1 CHAIN -> CHANGE +1 CHADWELL -> SAID +1 CERTAIN -> AN +1 CEDRIC -> SADRIC +1 CAVALRYMEN -> CAVERNMENT +1 CATTLE -> KETTLET +1 CATTLE -> KETTLE +1 CATS -> COUNTS +1 CATHEDRAL -> CATEURAL +1 CATCHED -> CAST +1 CASTRATO -> GASTRATO +1 CASTLE -> COUNCIL +1 CASTETH -> CAST +1 CARROLL -> GAL +1 CARRIED -> CHARACTER +1 CAREWORN -> CARE +1 CAPTURE -> CAPTURED +1 CAPRIVI'S -> THE +1 CAPITULANTES -> CAPITULAT +1 CAPITALISTS -> CAPITALIST +1 CAP'S -> CAPS +1 CAP -> CATHOLIC +1 CANS -> CANES +1 CANONIZED -> CANNONIZED +1 CAN'T -> CATCH +1 CAN'T -> CAN +1 CAN -> KIN +1 CAN -> KENN +1 CAN -> CAN'T +1 CAMPED -> CAN'T +1 CAMPAIGN -> CAPTAIN +1 CAMOUFLAGE -> THE +1 CAMEL -> CAMEO +1 CAMEL -> CAMELO +1 CAME -> GAINED +1 CALL -> CALLER +1 CALENDAR -> CALENDER +1 CAIN -> GAME +1 CAGE -> CARED +1 CACKED -> CAGLE +1 CA'M -> CALM +1 C -> SEMICA +1 BYE -> BI +1 BY -> THY +1 BY -> THE +1 BY -> BUY +1 BUTTON -> BOTTOM +1 BUTTERFLY -> BUT +1 BUT -> WITH +1 BUT -> WAS +1 BUT -> THE +1 BUT -> PECUSE +1 BUT -> OR +1 BUT -> IT +1 BUT -> BY +1 BUT -> BLOW +1 BUT -> BE +1 BUST -> BEST +1 BUSINESSWHICH -> BUSINESS +1 BURYING -> BEARING +1 BURST -> FORCE +1 BURSHEBA -> PERCEIVER +1 BURDENS -> BURGLAR +1 BURDENS -> BIRDS +1 BULBS -> BOBS +1 BULB -> BOB +1 BUILDS -> BIDS +1 BUFFETING -> BUFFET +1 BRYNHILD'S -> BURNE +1 BRYNHILD -> BURNEHILD +1 BRYNHILD -> BURNE +1 BRYNHILD -> BRINEHILL +1 BROUGHT -> POURED +1 BRILLIANT -> POINT +1 BREED -> BREATHE +1 BREATHLESS -> BRENT +1 BREATHING -> BREASING +1 BREAST -> CHEST +1 BRAU -> BROW +1 BRASS -> BREASTPAND +1 BRAHMAN -> PROMIN +1 BRAHMAN -> GRANDMOTHER +1 BRAHMAN -> DRAMIN +1 BRAHMAN -> BROWN +1 BRAHMAN -> BRAMMING +1 BRAHMAN -> BRAMMEN +1 BRAHMAN -> BRAMID +1 BRAHMAN -> BRAMA +1 BRAHMAN -> BRAHMIN +1 BRACY -> BRAZY +1 BRACY -> BRACEY +1 BRACY -> BRACELE +1 BOWL -> BULL +1 BOUT -> ABOUT +1 BOURGES -> BOURGE +1 BOTTOMED -> BOTTOM +1 BOTTLED -> BOTHERED +1 BOTTLE -> IT +1 BOONE -> WHOM +1 BOON -> BOOM +1 BOOMED -> BOOM +1 BONNETS -> BONNET +1 BONDAGE -> BANDAGE +1 BOLT -> BOLTED +1 BOLSHEVIKI -> BALL +1 BOILER -> WHIRLER +1 BOIL -> BOY +1 BOEUF -> BIRTH +1 BOEOTIAN -> BE +1 BOB'S -> BOB +1 BOAT -> WROTE +1 BLOOMIN -> ROOM +1 BLOODSHED -> BLOCHHEAD +1 BLOKES -> LOOSE +1 BLOKE -> LOG +1 BLODGETT -> BLOTCHETT +1 BLODGETT -> BLADGE +1 BLODGETT -> ALEXANDER +1 BLOCK -> PLUCK +1 BLINKED -> BINKED +1 BLANKETED -> BLANKET +1 BLACKLEG -> BLACK +1 BLACKGUARD -> BLANKARD +1 BITTER -> BEACHER +1 BIT -> WID +1 BIT -> HER +1 BISQUE -> DISK +1 BIRTHPLACE -> BOTH +1 BIRTH -> BERTH +1 BIRDSEYE -> BIRD'S +1 BIND -> FIND +1 BIN -> BEEN +1 BILL -> BUILD +1 BIBLICAL -> BIBBICAL +1 BIBLICAL -> BEVOCO +1 BIBLICAL -> BEPPOCO +1 BIBLICAL -> BAPLICO +1 BIBLE -> VARIABLE +1 BHANG -> BANG +1 BEWARE -> BE +1 BEULAH -> BOOLA +1 BETTER -> BY +1 BETHUNE -> BESOON +1 BETCHA -> PITCHER +1 BETAKEN -> TAKEN +1 BESIDE -> BESIDES +1 BENT -> BEEN +1 BENSON -> BESSON +1 BENOIT -> BENOIS +1 BENJAMIN -> BINTAMEN +1 BELONGED -> BELONGS +1 BELLOWED -> BELOVED +1 BELLE -> BELL +1 BELL -> BELT +1 BELIKE -> BE +1 BELIEVED -> BELIEF +1 BEING -> VERY +1 BEING -> BEEN +1 BEGUN -> BEGAN +1 BEGGING -> PEGGING +1 BEGGED -> BEG +1 BEGGED -> BAGS +1 BEGAN -> BEGIN +1 BEG -> BIG +1 BEFORE -> FOUR +1 BEFAL -> BEFALL +1 BEDOUIN -> BEDOING +1 BECKY -> BACKY +1 BEAUMANOIR -> BOURMANOIR +1 BEAT -> BE +1 BEASTLY -> PEASY +1 BEAR -> BEARED +1 BEAR -> BARE +1 BEALE -> BEER +1 BEALE -> BEA +1 BE -> HAD +1 BE -> BEING +1 BE -> BEEN +1 BAXTER -> BEXT +1 BASSORAH -> PESSORAR +1 BASSORAH -> BASSORA +1 BASIL -> BASER +1 BASIL -> BALES +1 BARKLEY -> PARKLEY +1 BARK -> BALK +1 BARGELLO -> BARGIENLO +1 BARELY -> VARIOUS +1 BAR -> BARS +1 BAPTISMAL -> BABYSMAL +1 BANYAN -> BENDONED +1 BANYAN -> BEN +1 BANYAN -> BANNY +1 BANYAN -> BANDREE +1 BANG -> BEING +1 BANDINELLO -> BEND +1 BALLROOM -> BALL +1 BALLOT -> BULLET +1 BALE -> BAIL +1 BALAMMED -> BLAMMED +1 BAILEY -> BAILIQUE +1 BAILEY -> BAILEY'S +1 BAGHDAD -> WABDAD +1 BAG -> BICK +1 BAD -> WHETHER +1 BAD -> BAND +1 BABES -> BABE +1 AZURE -> USURE +1 AWKWARD -> OF +1 AWK'ARD -> UPWARD +1 AWAY -> WEAR +1 AWAY -> WAIT +1 AW -> AH +1 AVIDITY -> ALD +1 AVENUE -> ATTIGUE +1 AVE -> HAVE +1 AUNT -> AUNTS +1 AUNT -> AREN'TO +1 ATUM -> OUTSIDE +1 ATUM -> ATOM +1 ATTENTION -> INTENTION +1 ATTENTION -> ATTENTIONS +1 ATHELSTANE -> ADDSTEIN +1 AT -> UP +1 AT -> SAYS +1 AT -> OTHER +1 AT -> OF +1 AT -> IT'S +1 AT -> I +1 AT -> HAD +1 AT -> BUT +1 AT -> AND +1 AT -> ADD +1 ASSYRIA -> THE +1 ASSYRIA -> A +1 ASS -> EYES +1 ASLEEP -> SLEEP +1 ASKS -> ASK +1 ASKED -> I +1 ASK -> ASKED +1 ASIA -> AS +1 ASHUR -> ASHER +1 ASHLEY -> HASHY +1 ASH -> ASHHOPPER +1 ASCERTAIN -> ASSERT +1 AS -> US +1 AS -> I +1 AS -> HAS +1 AS -> COAL +1 AS -> A +1 ARTIST -> ARD +1 ARTHUR -> AWFUL +1 ART -> ARE +1 ARSTS -> ASKS +1 ARSINOE'S -> ARSENO'S +1 ARSINOE -> ALSO +1 ARRIVE -> ARRIVED +1 ARPAD -> OUR +1 ARPAD -> ARPET +1 AROUSED -> ARISED +1 AROUND -> HER +1 ARM -> HEART +1 ARKANSAS -> OUR +1 AREN'T -> AND +1 ARE -> WERE +1 ARE -> THEIR +1 ARE -> IS +1 ARE -> I +1 ARE -> HE +1 ARE -> HAD +1 ARE -> DON'T +1 ARE -> AND +1 ARE -> AH +1 ARDENT -> ARDENTS +1 ARCHIBALD -> ARQUEBALD +1 ARCHBISHOP -> ARCHBISH +1 APPROVE -> IMPROVE +1 APPEAR -> APPEARED +1 APPARENTLY -> THE +1 APOMORPHINE -> EPIMORPHONE +1 APES -> IPES +1 ANYWAY -> AND +1 ANYONE'S -> ANY +1 ANY -> IN +1 ANY -> ANYTHING +1 ANTONIO -> ANTONIA +1 ANTOLIAN -> IN +1 ANTIDOTES -> AND +1 ANSWERS -> AUTHEST +1 ANOTHER -> AND +1 ANNOYED -> ANNOY +1 ANNIE'S -> ANY'S +1 ANNIE'S -> ANY +1 ANNIE -> AND +1 ANIMATE -> ENEMY +1 ANGUISH -> ENGLISH +1 ANGESTON -> ANGISTON +1 ANEW -> I +1 ANDS -> ENDS +1 ANDREW -> TEALE +1 ANDBUT -> AND +1 AND -> WHEN +1 AND -> TO +1 AND -> SAUCES +1 AND -> ROOM +1 AND -> NOT +1 AND -> MISSUS +1 AND -> ME +1 AND -> INTOMATO +1 AND -> INSTEAD +1 AND -> INFORT +1 AND -> IMPALIATE +1 AND -> HIM +1 AND -> HE +1 AND -> HAS +1 AND -> FOR +1 AND -> END +1 AND -> CONFINED +1 AND -> CONCERN +1 AND -> AT +1 AND -> ANY +1 AND -> ADD +1 ANCESTORS -> INCES +1 ANALYSIS -> OF +1 AN -> NOW +1 AN -> NO +1 AN -> HER +1 AN -> CAN +1 AMYNTAS -> I +1 AMYNTAS -> A +1 AMOUR -> AMOPRA +1 AMONGST -> A +1 AMIABLE -> ADMIABLE +1 ALTHOUGH -> ON +1 ALTHEA -> ALTHIE +1 ALTHEA -> ALTHIA +1 ALTER -> ENTER +1 ALSO -> ONCE +1 ALSO -> APPLES +1 ALSO -> ALL +1 ALREADY -> ALWAYS +1 ALREADY -> ALL +1 ALONGSIDE -> LONG +1 ALONGER -> ALONG +1 ALOES -> ALLIES +1 ALLS -> ALL +1 ALLOWED -> ALLOW +1 ALLOWANCE -> ALLOW +1 ALLOW -> LOAD +1 ALLOW -> ALONE +1 ALLIGATOR -> ADDER +1 ALLIED -> ALIT +1 ALLAYS -> IT +1 ALL -> THINK +1 ALL -> SOUL +1 ALL -> OR +1 ALL -> OFF +1 ALL -> OF +1 ALL -> ALWAYS +1 ALKALOIDS -> AKALOIDS +1 ALKALOIDS -> ACOLITES +1 ALIMENTARY -> ELEMENTARY +1 ALIGHTED -> DELIGHTED +1 ALI -> I +1 ALF -> A +1 ALESSANDRO -> ALISANDRO +1 ALCOHOL -> ACCULENT +1 ALCOHOL -> ACCOHOL +1 ALBERT'S -> ALBERTS +1 ALABAMA -> ALADAMA +1 AL -> ALI +1 AIN'T -> ENTER +1 AIN'T -> END +1 AIN'T -> ANNE +1 AIN'T -> AM +1 AILS -> ELSE +1 AID -> APE +1 AH -> HA +1 AGRARIAN -> INGREDIAN +1 AGONE -> A +1 AGENT -> AGENTIVE +1 AGAIN -> BEGAN +1 AFTERWARDS -> AFTERWARD +1 AFTER -> OUT +1 AFTER -> ANSWERED +1 AFORESAID -> AFOR +1 AFIRE -> AFAR +1 AFFLICTION -> AFFLICATION +1 AFFECTION -> AFFECTANT +1 ADVISED -> ADVICE +1 ADVENTURES -> VENTURES +1 ADVENTURE -> ADVENTUR +1 ADULT -> DOUBT +1 ADORED -> ENDURED +1 ADN'T -> HADN'T +1 ADHERENTS -> ADHERENCE +1 ADD -> EDISM +1 ADD -> AT +1 ADAIR -> EIGHT +1 AD -> AT +1 ACUTE -> ACUTORATION +1 ACTS -> ACT +1 ACKNOWLEDGE -> ANNOUNCE +1 ACHESON -> ARTISON +1 ACCESS -> AXIS +1 ACCEPT -> EXCEPT +1 ABSTAIN -> ABSTAINED +1 ABSENTEE -> EBSENTEE +1 ABROAD -> BROAD +1 ABOUT -> OF +1 ABOUT -> HE +1 ABOUT -> FLIROFF +1 ABOUT -> BY +1 ABOUT -> A +1 ABOARD -> QUESTERED +1 ABOARD -> ABROAD +1 A -> YOUR +1 A -> WAS +1 A -> US +1 A -> THEY +1 A -> SOME +1 A -> OUR +1 A -> OPPRESSING +1 A -> OLLA +1 A -> OF +1 A -> MELLICENT +1 A -> ITS +1 A -> IF +1 A -> I'M +1 A -> HIS +1 A -> HIM +1 A -> GUQUET +1 A -> GENTLEMEN +1 A -> EXERT +1 A -> ESPECIALTY +1 A -> CLEF +1 A -> BEFORE +1 A -> AVOID +1 A -> ASSERTED +1 A -> AS +1 A -> ARREST +1 A -> ARE +1 A -> APOLLO +1 A -> AMIDDY +1 A -> ALFRED +1 A -> ALENUBERG +1 A -> ADVENTURE +1 A -> ACQUIRED +1 A -> ABASEMENT + +DELETIONS: count ref +20 THE +17 A +16 IT +14 TO +13 IS +13 IN +13 AND +9 OF +7 AS +7 AL +6 I +6 HER +6 HAD +6 ARE +4 OUT +4 HIM +4 FOR +4 AT +3 YOU +3 WILL +3 WAS +3 THAT +3 OTHER +3 IF +3 HIS +3 DO +3 BE +3 ASKED +3 ALL +2 YE +2 WITH +2 WIDE +2 WERE +2 WARD +2 VE +2 STAIRS +2 ROOM +2 PHUT +2 OUR +2 OR +2 OFF +2 LONG +2 LA +2 HEAR +2 HE +2 HAVE +2 HAS +2 GOOD +2 FISH +2 DAY +2 BUZZARD +2 BRACY +2 AN +2 AM +1 YOUTH +1 YOUR +1 YOU'VE +1 YER +1 YAUSKY +1 YARD +1 WOODEN +1 WINE +1 WHOLE +1 WHILE +1 WHEEL +1 WEAL +1 WAYS +1 WARN +1 VOTE +1 VOLVITUR +1 VENTURE +1 VAULTED +1 UP +1 UN +1 TWO +1 TWAIN +1 TURBULENT +1 TRIBE +1 TREE +1 TOMATO +1 THUS +1 THIS +1 THINKS +1 THING +1 THEY +1 THEE +1 THEATRE +1 TELLTALE +1 TEAL +1 SUDDEN +1 SUCCOURS +1 STONES +1 STOLE +1 STAKES +1 SPIRITS +1 SPECIALTY +1 SONNY +1 SMALL +1 SIR +1 SIMULATES +1 SIGURD +1 SIGHT +1 SHUT +1 SHORE +1 SHOOT +1 SHE'S +1 SHALL +1 SEVERSON +1 SET +1 SERVE +1 SCUSE +1 SAN +1 S +1 ROAD +1 RING +1 REW +1 REVOLUTION +1 REST +1 RESPONSIBLE +1 RENDER +1 RECTIFY +1 READ +1 RACHEL +1 PROVE +1 PROPRE +1 PROA +1 PRESSING +1 POSITION +1 POLO +1 PLANTED +1 PIGEONCOTE +1 PEER +1 PASSED +1 PARR +1 PALLIATE +1 P +1 OWN +1 OTTO +1 OTHER'S +1 ONE +1 ONCE +1 ON +1 OLD +1 OCCASIONS +1 OBLIGE +1 NOT +1 NEWBERG +1 N'T +1 N +1 MUCH +1 MOTIONLESS +1 MOTHER +1 MINUTELY +1 MILICENT +1 MIDDY +1 MET +1 MEAN +1 MC +1 MATTERS +1 MAKING +1 MAKE +1 MAINE +1 M +1 LUCK +1 LOCKS +1 LOCK +1 LIVE +1 LIKE +1 LET +1 LENIN +1 LAW +1 LAD +1 LACHAISE +1 KNOW +1 KINE +1 JURY +1 JUICES +1 JIM +1 JILT +1 JACKSON +1 INTENDED +1 INSTRUCTED +1 ILLS +1 I'LL +1 HUMBLY +1 HOUSE +1 HOPPER +1 HOLD +1 HITHER +1 HERE +1 HA +1 GROWING +1 GOAL +1 GIRLS +1 GENTLEMAN +1 GEAR +1 GAZE +1 FOUR +1 FORMED +1 FOOTED +1 FLEROV +1 FIND +1 FARE +1 FAR +1 EXTEND +1 EVER +1 EMOTION +1 EM +1 ELECTED +1 EASY +1 EAST +1 EAD +1 DOUBT +1 DISNEY +1 DEODORIZING +1 DE +1 DARE +1 CUSTOM +1 CURSE +1 CREAM +1 COY +1 CORTONA +1 COP +1 COOPS +1 COLE +1 CLIMB +1 CLEVER +1 CLEFT +1 CISEAUX +1 CHIPS +1 CHANGE +1 CELL +1 CAT +1 CAN +1 CAMP +1 CALL +1 BUT +1 BROUGHT +1 BRAHMAN +1 BOYS +1 BOY +1 BOUGHT +1 BOTH +1 BOAT +1 BLEW +1 BITING +1 BIN +1 BENNETT +1 BEEN +1 BASEMENT +1 BAND +1 B +1 AZARIAH +1 ATTEMPTED +1 ASSAILING +1 ARCHIAS +1 AFT + +INSERTIONS: count hyp +28 THE +20 A +19 IT +19 AND +18 TO +17 IN +13 OF +12 HE +10 THAT +9 HAD +8 IS +8 ARE +7 AS +6 ONE +5 HAVE +5 AT +4 WILL +4 WHICH +4 WHAT +4 THEM +4 ON +4 LAD +3 YOU +3 WE +3 WAS +3 UNDER +3 THIS +3 OUT +3 MORROW +3 LIKE +3 GOOD +3 FOR +3 BELIEVER +3 AN +2 WHO +2 WHILE +2 WALL +2 US +2 TURNED +2 SIR +2 SCHLEVENT +2 SAY +2 OUR +2 OH +2 NOT +2 NIGHT +2 MISS +2 MEN +2 ME +2 MASTER +2 LONG +2 ITS +2 HIS +2 HIM +2 HER +2 FIELD +2 FELLOW +2 EM +2 DAYS +2 COAT +2 BUT +2 AM +2 ALL +1 ZERIA +1 YOUTH +1 YOURS +1 YOUR +1 YOUNG +1 YE +1 WORTHY +1 WITH +1 WITCH +1 WIT +1 WEST +1 WELL +1 WAY +1 WARE +1 WAITED +1 VAUGHAN +1 TWO +1 TURN +1 TRADES +1 TRACT +1 TIRED +1 TILL +1 THOSE +1 THESE +1 THEE +1 THAT'S +1 TER +1 TEA +1 TALE +1 SYRIA +1 SUPPOSED +1 SUIT +1 STROKE +1 STRIKE +1 STREETS +1 STREET +1 STRAW +1 STOVE +1 STOOLS +1 STEAMS +1 SPONSES +1 SPITE +1 SPECTABLY +1 SPECIALLY +1 SONG +1 SOFT +1 SO +1 SNAS +1 SLEPT +1 SLAVENT +1 SLATTERER +1 SIT +1 SIDE +1 SHRABBERY +1 SEVENTH +1 SENTIN +1 SEATED +1 SCHUMANN +1 SAID +1 ROY +1 ROOM +1 ROAR +1 RIPENESS +1 RIKA +1 RIDICT +1 RICA +1 REWARDED +1 RESISTS +1 REMO +1 RATTIC +1 RAGE +1 QUEER +1 PUSH +1 PREVIOUS +1 PRAYER +1 POSITION +1 PLEASURE +1 PLEASE +1 PLAIN +1 PENCIL +1 PARTICULARITY +1 PAD +1 OWN +1 OTHER +1 OR +1 ONE'S +1 OCHIAN +1 NUT +1 NIGHTS +1 NICES +1 NELLO +1 NEIL +1 NEEDS +1 NED +1 NEAR +1 NATION +1 MULES +1 MORE +1 MONTH'S +1 MIND +1 MEANTUS +1 MEANTES +1 MARK +1 MAR +1 MAN +1 LONGER +1 LILY +1 LEVITCH +1 LESS +1 LECTION +1 LEAVE +1 LAYS +1 LAG +1 KNOW +1 KNEW +1 KISS +1 KINDRED +1 KEY +1 JAS +1 JAMIES +1 JACK'S +1 INTO +1 INSPIRANTS +1 INCLINE +1 IMAGEDLY +1 ILL +1 I +1 HOTELED +1 HITHER +1 HERE +1 HERALD +1 HELL +1 HEARD +1 HEALTH +1 HEADS +1 HEAD +1 HASTE +1 HAS +1 HAM +1 GRIMES +1 GOT +1 GOFF +1 GLISBON'S +1 GIVE +1 GIGGS +1 FULL +1 FRIENDLY +1 FOUR +1 FOOLING +1 FLY +1 FLASH +1 FISH +1 FILIALLY +1 FIGURED +1 FICTION +1 FAWN +1 FAMOUS +1 FAITH +1 EYED +1 EVER +1 ESTRAVA +1 EP +1 ELL +1 ELEGANTLY +1 EATS +1 EASILY +1 EARL'S +1 DUTY +1 DOWN +1 DORIS +1 DOES +1 DID +1 DEARUS +1 DE +1 DARED +1 DAN +1 DAME +1 CRIED +1 CREEK +1 CORRECT +1 COMPARABLE +1 COLD +1 CODE +1 COATS +1 CHEVIKI +1 CHAUVELT +1 CATO +1 CAMERA +1 CALLED +1 BULLET +1 BROKER +1 BRACES +1 BOUT +1 BONES +1 BOARDS +1 BOARD +1 BLADGET +1 BILL +1 BEEN +1 BEDS +1 BE +1 BATH +1 BAFF +1 AWAY +1 AWAKE +1 ATTENTIVE +1 ALEXE +1 AIR +1 ADULTERATED +1 ACT +1 ACCUSTRA + +PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp +THE 2997 278 3134 3138 +A 1037 223 1145 1152 +AND 1706 208 1788 1832 +IN 744 144 808 824 +TO 1394 129 1444 1473 +OF 1347 83 1386 1391 +THAT 649 78 682 694 +IT 619 78 660 656 +IS 377 75 415 414 +I 816 67 853 846 +HE 668 62 693 705 +THIS 215 55 246 239 +HER 267 52 289 297 +AT 250 52 279 273 +HAD 357 51 375 390 +YOU 496 49 513 528 +FOR 413 46 431 441 +THEY 219 42 239 241 +AS 324 42 339 351 +ON 261 41 281 282 +ARE 138 39 159 156 +HIS 474 38 493 493 +AN 103 38 125 119 +WITH 366 37 385 384 +WAS 640 34 653 661 +THEIR 99 33 112 119 +ALL 225 32 236 246 +WILL 147 31 166 159 +THERE 158 29 174 171 +OR 92 29 109 104 +HAS 84 26 102 92 +O 26 25 41 36 +WERE 151 24 166 160 +THEN 145 24 153 161 +NOT 389 24 401 401 +HAVE 222 24 233 235 +ANY 72 24 76 92 +HIM 296 23 305 310 +BUT 358 23 370 369 +OUR 62 22 70 76 +DO 140 21 153 148 +SO 200 20 211 209 +ITS 51 20 57 65 +YOUR 94 18 104 102 +WHEN 155 18 162 166 +WHAT 180 18 192 186 +WE 146 18 151 159 +UP 139 18 145 151 +OUT 156 17 166 163 +WHO 145 16 151 155 +THESE 44 16 51 53 +OH 29 16 35 39 +DE 4 16 18 6 +ONE 211 15 216 221 +LESLIE 8 15 23 8 +WHERE 51 14 57 59 +US 53 14 58 62 +SET 22 14 31 27 +SAID 247 14 252 256 +INTO 112 14 117 121 +BRAHMAN 5 14 19 5 +ANNIE 8 14 22 8 +TOO 34 13 41 40 +MISS 13 13 15 24 +IF 158 13 167 162 +HERE 64 13 69 72 +GURR 1 13 14 1 +DID 91 13 95 100 +BE 307 13 313 314 +WOULD 122 12 129 127 +TWO 57 12 62 64 +THEM 142 12 144 152 +SEE 74 12 77 83 +MISTER 71 12 74 80 +MEN 60 12 66 66 +JUST 50 12 57 55 +FAUCHELEVENT 12 12 24 12 +WHICH 187 11 191 194 +LIKE 86 11 89 94 +LET 56 11 64 59 +IT'S 20 11 24 27 +ROOM 35 10 37 43 +OTHER 64 10 69 69 +NO 189 10 190 198 +NIGHT 48 10 49 57 +ME 258 10 260 266 +MAN 109 10 114 114 +HERMON 1 10 11 1 +GOT 37 10 40 44 +FULL 14 10 18 20 +FAUVENT 0 10 10 0 +DICKIE 13 10 23 13 +AL 6 10 16 6 +SAY 79 9 83 84 +OLD 53 9 57 58 +MURDOCH 0 9 9 0 +MISSUS 25 9 31 28 +LISLEY 0 9 0 9 +I'M 31 9 34 37 +HEAR 15 9 21 18 +GOOD 64 9 67 70 +EM 0 9 2 7 +DICKY 0 9 0 9 +ABOUT 74 9 79 78 +ZAU 0 8 8 0 +YOU'RE 5 8 9 9 +SIR 37 8 40 42 +SIGURD 1 8 9 1 +NOW 112 8 116 116 +KNOW 88 8 95 89 +I'VE 7 8 10 12 +HATH 13 8 17 17 +DONE 33 8 39 35 +DON'T 69 8 73 73 +BEEN 130 8 133 135 +WHETHER 9 7 12 13 +UPON 66 7 71 68 +THY 25 7 28 29 +THUS 9 7 13 12 +THOUGH 35 7 39 38 +THEY'RE 5 7 11 6 +THAN 69 7 74 71 +SON 14 7 18 17 +SHE 287 7 292 289 +PRIORESS 0 7 7 0 +OL 0 7 7 0 +LOVE 26 7 29 30 +LARGE 10 7 10 17 +LAD 6 7 7 12 +KIND 19 7 19 26 +IZZY 0 7 7 0 +I'LL 19 7 24 21 +GOD 24 7 29 26 +FROM 176 7 179 180 +FOUR 21 7 24 25 +COME 68 7 71 72 +CAN 76 7 80 79 +BY 196 7 199 200 +ASKED 40 7 46 41 +AROUND 15 7 20 17 +AM 60 7 62 65 +AIN'T 4 7 11 4 +ZARATHUSTRA 0 6 6 0 +YOU'LL 2 6 4 6 +YE 10 6 14 12 +WIDE 6 6 8 10 +WHILE 19 6 22 22 +WELL 82 6 85 85 +TRY 11 6 17 11 +THOSE 36 6 38 40 +THEE 23 6 27 25 +THAT'S 18 6 23 19 +SHALL 58 6 62 60 +SAYS 24 6 28 26 +ROUND 16 6 18 20 +PIGEONCOTE 1 6 7 1 +OVER 48 6 49 53 +ONLY 69 6 73 71 +MY 246 6 248 250 +MURDOCK 0 6 0 6 +MINE 10 6 15 11 +MILICENT 0 6 6 0 +LOOK 33 6 33 39 +LONG 50 6 52 54 +LAST 45 6 47 49 +LARCH 1 6 7 1 +KINE 0 6 6 0 +IM 0 6 6 0 +HOW 78 6 81 81 +GIVE 45 6 47 49 +CIGARET 0 6 0 6 +ARCHY 0 6 6 0 +ARCHIE 0 6 0 6 +WILFRID 4 5 9 4 +WILDERNESS 3 5 8 3 +WHY 47 5 47 52 +WHITE 15 5 18 17 +UNTO 6 5 10 7 +UNDER 35 5 37 38 +TURNED 31 5 31 36 +THOU 65 5 68 67 +THINK 44 5 46 47 +THERE'S 10 5 11 14 +SOME 76 5 77 80 +SHUT 7 5 10 9 +SHARRKAN 0 5 5 0 +SHARKAN 0 5 0 5 +SEEMED 15 5 18 17 +SEEM 5 5 8 7 +SAT 8 5 9 12 +PLACE 35 5 39 36 +PIGEON 0 5 1 4 +ORGANIZER 0 5 0 5 +ORGANISER 0 5 5 0 +ONCE 54 5 56 57 +OFF 49 5 53 50 +N'T 0 5 5 0 +MUST 72 5 77 72 +MORE 98 5 99 102 +MESTIENNE 0 5 5 0 +MASTER 20 5 23 22 +M 2 5 7 2 +LEFT 34 5 38 35 +KNIGHT 4 5 8 5 +INTERESTS 1 5 5 2 +INTEREST 6 5 7 10 +I'D 8 5 13 8 +GOING 34 5 39 34 +GET 49 5 53 50 +FOLLOWED 6 5 10 7 +FATHER 46 5 51 46 +EXECUTIVE 0 5 5 0 +EVER 25 5 27 28 +END 12 5 15 14 +DOG 5 5 9 6 +DOES 14 5 15 18 +CINDERLAD 2 5 7 2 +CHRIS 18 5 23 18 +CAN'T 14 5 16 17 +BRACY 2 5 7 2 +BEFORE 53 5 56 55 +AWAY 37 5 39 40 +YOU'VE 3 4 7 3 +YES 42 4 42 46 +WORK 24 4 26 26 +WITCH 1 4 2 4 +WINE 4 4 8 4 +WHOLE 21 4 22 24 +WENT 73 4 76 74 +WE'RE 3 4 7 3 +WANTED 6 4 8 8 +WALL 3 4 4 6 +TREE 7 4 9 9 +TOWARDS 11 4 13 13 +TIME 82 4 83 85 +TIGLATH 0 4 4 0 +SOUGHT 3 4 6 4 +SHOULD 70 4 72 72 +SERGEY 1 4 5 1 +SAW 26 4 29 27 +SAINT 18 4 19 21 +RUN 7 4 9 9 +RUM 0 4 1 3 +RIGAN 0 4 0 4 +REGIN 0 4 4 0 +RAYSTOKE 1 4 5 1 +PRIESTS 1 4 1 5 +POURED 2 4 3 5 +PLAIN 2 4 5 3 +PILESER 0 4 4 0 +OWN 43 4 45 45 +OUGHT 14 4 16 16 +OLIVE 3 4 7 3 +NONE 12 4 13 15 +NEAR 15 4 16 18 +MYRTILUS 0 4 4 0 +MEAT 3 4 6 4 +MAY 41 4 43 43 +LOVER 2 4 3 5 +LOBSTER 8 4 12 8 +LITTLE 90 4 91 93 +LIL 0 4 4 0 +LEAVE 21 4 22 24 +LAW 2 4 5 3 +KEEP 12 4 16 12 +HEART 26 4 28 28 +HEAD 40 4 41 43 +HE'S 5 4 9 5 +GRAY 1 4 3 3 +GONE 16 4 17 19 +GO 61 4 61 65 +FOAL 1 4 5 1 +FIELD 3 4 4 6 +FELLOW 13 4 13 17 +FARRINDER 0 4 4 0 +EYES 33 4 35 35 +DOUBT 7 4 9 9 +DOOR 28 4 30 30 +DEFENSE 0 4 4 0 +DEFENCE 2 4 2 6 +DEAD 18 4 21 19 +DAYS 11 4 13 13 +DAY 58 4 61 59 +COUNCIL 0 4 0 4 +CORNER 9 4 13 9 +CAME 65 4 66 68 +BRAMIN 0 4 0 4 +BIBLICAL 0 4 4 0 +BEING 37 4 39 39 +BEG 5 4 8 6 +BEALE 6 4 10 6 +BANYAN 0 4 4 0 +BAGHDAD 2 4 6 2 +AUNT 4 4 6 6 +ARSINOE 0 4 4 0 +ALSO 30 4 33 31 +ALLOW 6 4 8 8 +AH 8 4 9 11 +ADD 7 4 9 9 +ACT 7 4 7 11 +ZAO 0 3 0 3 +YO 0 3 3 0 +YET 32 3 34 33 +YER 0 3 3 0 +YEARS 19 3 20 21 +WORDS 18 3 18 21 +WORD 13 3 15 14 +WOOD 3 3 4 5 +WONDERED 5 3 8 5 +WINDOW 16 3 17 18 +WIDERNESS 0 3 0 3 +WHEEL 1 3 2 3 +WAYNE 0 3 3 0 +WARD 3 3 6 3 +WANT 24 3 26 25 +ULRICA 0 3 3 0 +TRIED 19 3 19 22 +TRIBE 1 3 4 1 +TOWARD 7 3 9 8 +TOLD 24 3 26 25 +TILL 23 3 24 25 +THOUGHT 39 3 39 42 +TELL 51 3 52 53 +SYRUP 1 3 4 1 +SUDDEN 7 3 8 9 +STONEWALL 0 3 3 0 +STONE 11 3 11 14 +STATE 22 3 24 23 +SPAKE 2 3 5 2 +SOUL 10 3 11 12 +SOON 22 3 22 25 +SMUGGLERS 4 3 7 4 +SIDE 20 3 22 21 +SHORES 0 3 0 3 +SHE'LL 1 3 2 3 +SHARDURIS 0 3 3 0 +SENT 10 3 10 13 +SEA 15 3 15 18 +SCHOOL 6 3 8 7 +SANCT 0 3 3 0 +RUSSIA 0 3 3 0 +ROAD 13 3 16 13 +RIGHT 31 3 33 32 +REVEREND 12 3 15 12 +READ 7 3 8 9 +RAYSTROKE 0 3 0 3 +RACHEL 1 3 3 2 +QUEST 0 3 1 2 +PROCLUS 0 3 3 0 +PROAS 0 3 3 0 +PROA 0 3 3 0 +PRIORS 0 3 0 3 +PREVENT 1 3 1 4 +POOR 23 3 25 24 +POLL 0 3 3 0 +PLATTERBAFF 0 3 3 0 +PHUT 0 3 3 0 +PASSED 8 3 10 9 +PART 20 3 21 22 +PACE 0 3 1 2 +OFFICERS 2 3 3 4 +NURSE 0 3 0 3 +NOUGHT 0 3 2 1 +NORTH 3 3 4 5 +NEIGHBORS 3 3 5 4 +NEAREST 2 3 5 2 +MUCH 39 3 41 40 +MOST 42 3 44 43 +MORROW 6 3 6 9 +MISTAH 0 3 3 0 +MIND 23 3 24 25 +MILLISON 0 3 0 3 +MARDOCK 0 3 0 3 +MAKE 64 3 66 65 +LUNA 0 3 3 0 +LOVED 6 3 7 8 +LOOKED 22 3 25 22 +LIVE 15 3 17 16 +LIT 0 3 2 1 +LILBURN 0 3 3 0 +LIDDY 0 3 3 0 +LEVER 0 3 3 0 +LED 3 3 4 5 +LATE 8 3 9 10 +KIN 0 3 0 3 +KEYS 2 3 5 2 +JUDGMENT 6 3 9 6 +JES 0 3 3 0 +JAKEY 0 3 3 0 +IN'T 0 3 3 0 +HOWEVER 13 3 16 13 +HOUSE 36 3 37 38 +HONOUR 3 3 3 6 +HONOR 0 3 3 0 +HOME 34 3 34 37 +HOLD 7 3 10 7 +HITHER 5 3 7 6 +HERMON'S 0 3 3 0 +HERMANN 0 3 0 3 +HERMAN 0 3 0 3 +HELD 12 3 13 14 +HE'D 4 3 4 7 +HAYES 0 3 1 2 +HATE 4 3 4 7 +HANDS 13 3 15 14 +HAND 39 3 40 41 +HALF 22 3 23 24 +GRAHAM 0 3 0 3 +GOLD 6 3 6 9 +GOIN 1 3 1 4 +GOBEY'S 0 3 3 0 +GLASS 8 3 9 10 +GIVEN 13 3 13 16 +GIRL 10 3 11 12 +GEORGE 1 3 2 3 +GENTLEMEN 4 3 5 6 +GENTLEMAN 5 3 7 6 +FORCES 3 3 3 6 +FOLLOW 9 3 10 11 +FOALS 1 3 4 1 +FISH 5 3 7 6 +FIND 24 3 25 26 +FERVENT 0 3 0 3 +FAT 1 3 2 3 +FALL 4 3 5 6 +FAITH 8 3 9 10 +FAFNIR 0 3 3 0 +FACE 31 3 33 32 +EYE 9 3 11 10 +EVEN 46 3 46 49 +EILEEN 0 3 3 0 +EAST 6 3 8 7 +EARTH 18 3 20 19 +E'S 0 3 3 0 +DOWN 68 3 68 71 +DON 5 3 5 8 +DOESN'T 1 3 3 2 +DIDN'T 18 3 21 18 +DARK 11 3 11 14 +COUNT 16 3 18 17 +COULD 111 3 114 111 +COMES 13 3 15 14 +CHEEKS 3 3 4 5 +CHEEK 0 3 1 2 +CHEAP 0 3 1 2 +CHANGE 7 3 9 8 +CENTRE 1 3 1 4 +CENTER 0 3 3 0 +CATTLE 3 3 5 4 +CARROLL 0 3 3 0 +CALM 0 3 0 3 +BRYNHILD 0 3 3 0 +BLODGETT 0 3 3 0 +BELIEVER 0 3 0 3 +BEGAN 15 3 16 17 +BEFELL 0 3 0 3 +BEFEL 0 3 3 0 +BARK 0 3 1 2 +BAGDAD 0 3 0 3 +AWKWARD 2 3 3 4 +AWHILE 1 3 3 2 +ARSENO 0 3 0 3 +ANYONE 3 3 6 3 +AFTER 95 3 97 96 +YOUTH 3 2 4 4 +YOURS 1 2 1 3 +YOUNG 38 2 39 39 +YO'LL 0 2 2 0 +YEAR 6 2 7 7 +YARD 0 2 2 0 +WROTE 3 2 4 4 +WRITE 0 2 1 1 +WRETCH 2 2 3 3 +WORSE 10 2 10 12 +WORLD 13 2 14 14 +WORKING 8 2 8 10 +WOODS 2 2 2 4 +WONDER 4 2 5 5 +WOMEN 14 2 15 15 +WOMAN 18 2 19 19 +WINTER 3 2 5 3 +WILKS 0 2 1 1 +WILFRED 0 2 0 2 +WIFE 14 2 16 14 +WICKER'S 7 2 8 8 +WI 0 2 1 1 +WHOSE 16 2 16 18 +WHOM 19 2 20 20 +WHITHER 2 2 3 3 +WHATEVER 10 2 10 12 +WELLS 0 2 2 0 +WEAR 0 2 0 2 +WAY 61 2 62 62 +WATER 21 2 22 22 +WANTS 4 2 5 5 +WANTON 0 2 1 1 +WALLS 3 2 4 4 +WAITING 6 2 7 7 +WAITED 5 2 5 7 +WAIT 10 2 11 11 +WAIN 0 2 0 2 +VIOLENCE 14 2 16 14 +VILLAGES 0 2 1 1 +VIGILANCE 0 2 2 0 +VERY 84 2 84 86 +VE 0 2 2 0 +VAULT 8 2 9 9 +UTTER 1 2 2 2 +USED 9 2 9 11 +USE 11 2 13 11 +URARTU 0 2 2 0 +UPSTAIRS 3 2 3 5 +UN 0 2 2 0 +TURN 8 2 8 10 +TRIFLE 0 2 1 1 +TRIBES 4 2 6 4 +TRADITIONS 0 2 2 0 +TOWER 3 2 4 4 +TORQUILSTONE 0 2 2 0 +TORCH 0 2 1 1 +TONIGHT 0 2 2 0 +TOM 8 2 9 9 +TOLERABLE 0 2 1 1 +TIRED 6 2 7 7 +TIGER 11 2 13 11 +TIG 0 2 0 2 +THROUGH 35 2 37 35 +THREE 35 2 37 35 +THOUSANDS 3 2 5 3 +THOUSAND 16 2 16 18 +THITHER 4 2 5 5 +THING 20 2 21 21 +THINE 1 2 1 3 +THEATRE 0 2 2 0 +TEMPLES 0 2 2 0 +TELLTALE 0 2 2 0 +TEA 1 2 2 2 +TAMAR 0 2 2 0 +SUFFICIENT 1 2 3 1 +STRUCK 12 2 13 13 +STREET 15 2 15 17 +STREAM 4 2 4 6 +STORES 0 2 2 0 +STOLE 2 2 3 3 +STOKER 0 2 2 0 +STOCKER 0 2 0 2 +STILL 39 2 41 39 +STERN 0 2 1 1 +STEP 2 2 3 3 +STAYED 2 2 4 2 +STATES 6 2 7 7 +STATEROOM 0 2 1 1 +STATED 1 2 3 1 +STARED 3 2 4 4 +STAIRS 1 2 3 1 +SPITE 10 2 10 12 +SOMETHING 29 2 29 31 +SOMEONE 0 2 2 0 +SNETKOV 0 2 2 0 +SMALL 16 2 17 17 +SLIGHTLY 0 2 2 0 +SKIN 3 2 5 3 +SIZE 3 2 5 3 +SIT 4 2 5 5 +SINDBAD 3 2 5 3 +SINBAD 0 2 0 2 +SIGNOR 0 2 1 1 +SIGNED 1 2 2 2 +SIGHS 0 2 0 2 +SHOULDERS 2 2 4 2 +SHOULDER 2 2 2 4 +SHOT 8 2 10 8 +SHIPS 2 2 4 2 +SHIP 16 2 17 17 +SHET 0 2 1 1 +SHERIFF 2 2 4 2 +SHELLFISH 0 2 0 2 +SHELL 1 2 3 1 +SHED 3 2 5 3 +SHAWS 0 2 2 0 +SHARE 2 2 2 4 +SEYTON 1 2 3 1 +SEVERAL 9 2 10 10 +SERF 0 2 0 2 +SENOR 0 2 1 1 +SEND 4 2 4 6 +SELLER 0 2 2 0 +SEEN 30 2 32 30 +SEEK 8 2 10 8 +SEAMEN 0 2 1 1 +SCULPTOR'S 0 2 1 1 +SCRIPTIONS 0 2 0 2 +SCORN 2 2 3 3 +SCHLEVENT 0 2 0 2 +SAN 3 2 5 3 +SAILORS 0 2 0 2 +ROWING 0 2 0 2 +ROPE'S 0 2 2 0 +ROOMFELLOW 0 2 2 0 +ROMANS 0 2 0 2 +ROMANCE 1 2 3 1 +RISDON 5 2 7 5 +RING 5 2 6 6 +RIDER 0 2 2 0 +RHODIAN 0 2 2 0 +REST 18 2 19 19 +REMAINED 7 2 8 8 +RELIGION 9 2 11 9 +REIGN 1 2 1 3 +REALLY 17 2 18 18 +REALIZED 0 2 0 2 +READY 12 2 13 13 +REACHED 10 2 12 10 +RATIA 0 2 0 2 +RATHER 14 2 16 14 +RAN 9 2 9 11 +RAM 2 2 4 2 +RAINY 0 2 2 0 +RAGE 1 2 2 2 +QUITE 15 2 16 16 +PUT 40 2 40 42 +PULSE 0 2 1 1 +PROVED 2 2 2 4 +PROVE 4 2 5 5 +PROFESSION 2 2 4 2 +PRIEST 4 2 6 4 +PRESSING 1 2 3 1 +POST 5 2 6 6 +POSITION 6 2 7 7 +POOL 0 2 0 2 +POLE 0 2 0 2 +POINT 9 2 10 10 +PLANE 0 2 0 2 +PLACED 10 2 10 12 +PIECE 3 2 4 4 +PHOSPHOR 0 2 2 0 +PHOSPHO 0 2 0 2 +PHILIPPUS 1 2 3 1 +PHILIP 1 2 3 1 +PERE 0 2 2 0 +PASSES 0 2 1 1 +PASS 6 2 6 8 +PARTS 4 2 5 5 +PARR 0 2 2 0 +PAIR 5 2 6 6 +P 0 2 2 0 +OTTO 0 2 1 1 +OPPOSITION 1 2 2 2 +OPEN 14 2 15 15 +ONTO 0 2 2 0 +OFTEN 21 2 21 23 +ODD 1 2 3 1 +OBJECT 6 2 7 7 +NUZHAT 2 2 4 2 +NUT 0 2 0 2 +NORTHFIELD 0 2 2 0 +NOR 19 2 21 19 +NIGHTS 2 2 2 4 +NEW 28 2 30 28 +NEITHER 8 2 8 10 +NEIGHBOURS 0 2 1 1 +NEAT 2 2 3 3 +NATURE 11 2 11 13 +NATURALLY 6 2 7 7 +NATURAL 6 2 7 7 +MOSES 6 2 8 6 +MIGHT 43 2 43 45 +MIDIAN 0 2 2 0 +MET 9 2 11 9 +MESTINE 0 2 0 2 +MESTER 0 2 2 0 +MERLONUS 0 2 2 0 +MENTAL 2 2 3 3 +MENAHEM 0 2 2 0 +MEN'S 0 2 1 1 +MC 2 2 4 2 +MATI 0 2 2 0 +MARSHAL 4 2 6 4 +MARRIAGE 5 2 6 6 +MANY 27 2 27 29 +MANKATO 0 2 2 0 +MAN'S 12 2 13 13 +MADE 77 2 77 79 +LUCIEN 0 2 2 0 +LOW 13 2 13 15 +LOST 12 2 12 14 +LORD 17 2 19 17 +LOOSE 3 2 3 5 +LL 0 2 2 0 +LIVY 0 2 0 2 +LIVES 5 2 6 6 +LIVED 5 2 6 6 +LIKED 7 2 9 7 +LIFTED 2 2 2 4 +LIFE 54 2 55 55 +LIE 3 2 4 4 +LESS 9 2 9 11 +LENA 0 2 0 2 +LEECH 0 2 1 1 +LAUGHED 9 2 11 9 +LAUDERDALE 0 2 2 0 +LATER 7 2 8 8 +LAND 19 2 20 20 +LAID 9 2 9 11 +LADY 14 2 15 15 +LACHAISE 0 2 2 0 +LA 1 2 3 1 +KETTLE 0 2 1 1 +KENITES 0 2 2 0 +KANSAS 5 2 7 5 +JULIEN 0 2 2 0 +JULIAN 0 2 0 2 +JULIA 0 2 0 2 +JUDGE 6 2 7 7 +JOSCELYN 0 2 0 2 +JOE 0 2 0 2 +JEWELER 0 2 2 0 +JAMES'S 0 2 0 2 +JAM 0 2 2 0 +JACKAL 6 2 8 6 +JACK 3 2 3 5 +ISSUED 2 2 2 4 +ISN'T 1 2 2 2 +INSCRIPTIONS 0 2 2 0 +INN 3 2 4 4 +INFAMY 0 2 2 0 +IMPROVE 0 2 1 1 +HYDROCHLORIC 0 2 2 0 +HURT 5 2 6 6 +HUNTINGDON 3 2 5 3 +HOZE 0 2 2 0 +HOUR 14 2 16 14 +HOSE 0 2 0 2 +HORSTIUS 0 2 2 0 +HORSES 4 2 4 6 +HOO'S 0 2 2 0 +HO 1 2 3 1 +HIMSELF 51 2 52 52 +HERALD 0 2 0 2 +HELPED 2 2 3 3 +HELM 0 2 2 0 +HEARTS 4 2 5 5 +HEARD 27 2 28 28 +HAYS 0 2 2 0 +HARD 15 2 16 16 +HAPPENED 9 2 10 10 +HALL 13 2 14 14 +HAIR 6 2 6 8 +HAID 0 2 2 0 +HA 2 2 3 3 +GURG 0 2 0 2 +GUNNAR 0 2 2 0 +GUN 2 2 2 4 +GREY 0 2 1 1 +GRAEME 0 2 2 0 +GOVERNMENT'S 0 2 1 1 +GOVERNMENT 19 2 20 20 +GOV'NOR 0 2 2 0 +GORE 0 2 0 2 +GOAL 1 2 3 1 +GLISPIN 0 2 2 0 +GIVING 6 2 8 6 +GIT 0 2 1 1 +GIORGIO 0 2 2 0 +GEORGE'S 0 2 1 1 +GAINED 1 2 1 3 +FURTHER 6 2 6 8 +FRISTOE 0 2 2 0 +FRANZ 6 2 8 6 +FRANCE 3 2 3 5 +FOUND 37 2 37 39 +FOUCHELEVENT 0 2 0 2 +FORTY 11 2 13 11 +FORTE 0 2 0 2 +FOREVER 1 2 3 1 +FOOLS 1 2 2 2 +FOOL 3 2 4 4 +FOLLOWING 10 2 10 12 +FLY 4 2 5 5 +FLIROV'S 0 2 0 2 +FLIROFF 0 2 0 2 +FLEROV'S 0 2 2 0 +FLEROV 0 2 2 0 +FLAVOR 0 2 2 0 +FLASH 2 2 2 4 +FIT 7 2 7 9 +FISHIN 0 2 1 1 +FIRST 54 2 54 56 +FILL 4 2 4 6 +FIELDS 2 2 2 4 +FESTAL 0 2 2 0 +FELL 15 2 15 17 +FEELING 10 2 10 12 +FEEL 11 2 13 11 +FEAST 0 2 0 2 +FAVOURITE 0 2 1 1 +FAVOUR 4 2 4 6 +FAVORITE 0 2 1 1 +FAVOR 0 2 2 0 +FAR 20 2 21 21 +FANNY 3 2 5 3 +FAMILY 17 2 18 18 +FAIRLY 2 2 3 3 +FAIR 12 2 13 13 +FAFNIR'S 0 2 2 0 +FACT 13 2 14 14 +EXPERIENCE 3 2 4 4 +EXECUTED 1 2 1 3 +EUSEBIUS 0 2 2 0 +ETHELRIED 0 2 2 0 +ETERNAL 1 2 3 1 +ESPECIALLY 5 2 7 5 +ESCAPE 11 2 12 12 +ERE 1 2 2 2 +ENTIRELY 1 2 3 1 +ENTIRE 1 2 2 2 +ENTER 5 2 5 7 +ENOUGH 29 2 31 29 +ENDURE 2 2 3 3 +EMOTION 0 2 2 0 +ELSE 12 2 12 14 +ELEXANDER 0 2 2 0 +EITHER 8 2 8 10 +EIGHTH 2 2 3 3 +EH 0 2 2 0 +EASILY 4 2 5 5 +EARLIEST 0 2 1 1 +DROUGHT 0 2 0 2 +DOORS 3 2 3 5 +DOING 9 2 10 10 +DINKS 0 2 2 0 +DIE 9 2 10 10 +DICK 5 2 5 7 +DESSERTS 0 2 2 0 +DESERTS 0 2 0 2 +DEER 0 2 0 2 +DEBRACY 0 2 0 2 +DEARLY 0 2 2 0 +DEAR 14 2 14 16 +DARE 4 2 6 4 +DAPHNE 2 2 4 2 +DANDAN 1 2 3 1 +DAN 0 2 0 2 +DA 0 2 2 0 +D 0 2 2 0 +CURSE 1 2 2 2 +CRUMPLED 0 2 2 0 +CRIED 21 2 21 23 +CREW 5 2 5 7 +CREAM 4 2 6 4 +CRAWFISH 5 2 7 5 +COURT 11 2 12 12 +COURFEYRAC 0 2 2 0 +COUNTRY 15 2 17 15 +COUNTRIES 4 2 4 6 +COUNSEL 0 2 2 0 +COUGHING 1 2 2 2 +CORNWEALTH 0 2 0 2 +COPS 3 2 5 3 +COMMONWEALTH 0 2 2 0 +COMING 14 2 16 14 +COLE 0 2 2 0 +COLD 5 2 6 6 +COIN 1 2 2 2 +COAT 2 2 2 4 +COAL 0 2 0 2 +CLIMBED 0 2 0 2 +CLIMB 0 2 1 1 +CLASS 5 2 6 6 +CLARET 0 2 2 0 +CINDER 0 2 0 2 +CHLORATE 0 2 2 0 +CHARGE 7 2 8 8 +CHAIN 0 2 1 1 +CELLAR 0 2 0 2 +CAST 9 2 9 11 +CASE 20 2 20 22 +CAROL 0 2 0 2 +CARE 18 2 18 20 +CAMEL 0 2 2 0 +CALLED 24 2 24 26 +CALL 11 2 13 11 +C 2 2 3 3 +BUZZARD 7 2 9 7 +BUSY 5 2 5 7 +BURNE 0 2 0 2 +BURDENS 0 2 2 0 +BULLET 2 2 2 4 +BULK 0 2 2 0 +BRUSH 3 2 3 5 +BRUCE 1 2 3 1 +BROUGHT 9 2 11 9 +BROTHERS 6 2 8 6 +BROTHER'S 1 2 1 3 +BOY 26 2 27 27 +BOUT 1 2 2 2 +BOTTOM 5 2 5 7 +BOTH 16 2 17 17 +BOOM 1 2 1 3 +BOB 5 2 5 7 +BOAT 6 2 8 6 +BLOW 4 2 4 6 +BITTER 2 2 3 3 +BIT 9 2 11 9 +BIRTH 0 2 1 1 +BIN 0 2 2 0 +BILL 2 2 3 3 +BEST 19 2 19 21 +BESSY 7 2 9 7 +BELL 2 2 3 3 +BEGGED 7 2 9 7 +BEE 0 2 0 2 +BEAR 6 2 8 6 +BAT 1 2 1 3 +BASSORAH 0 2 2 0 +BASIL 2 2 4 2 +BANG 0 2 1 1 +BAND 8 2 9 9 +BALL 0 2 0 2 +BAILEY 0 2 2 0 +BAD 8 2 10 8 +AWK 0 2 2 0 +AWAKE 4 2 4 6 +ATUM 0 2 2 0 +ATTENTION 5 2 7 5 +ASSYRIA 1 2 3 1 +ASKS 0 2 1 1 +ASK 16 2 17 17 +ART 12 2 13 13 +ARPAD 0 2 2 0 +ARMS 9 2 9 11 +AREN'T 2 2 3 3 +ANNIE'S 0 2 2 0 +AMYNTAS 0 2 2 0 +ALWAYS 34 2 34 36 +ALTHEA 1 2 3 1 +ALREADY 14 2 16 14 +ALMS 0 2 2 0 +ALKALOIDS 0 2 2 0 +ALI 3 2 4 4 +ALEXANDER 0 2 0 2 +ALCOHOL 3 2 5 3 +AID 4 2 5 5 +AGAIN 55 2 56 56 +ADVENTURE 2 2 3 3 +ABROAD 3 2 4 4 +ABOARD 1 2 3 1 +ZOUAL 0 1 0 1 +ZERIA 0 1 0 1 +ZEMSTVOS 0 1 1 0 +ZAYNAB 0 1 1 0 +ZAWAL 0 1 0 1 +YUSS 0 1 1 0 +YOURSELVES 2 1 2 3 +YOU'D 4 1 5 4 +YO' 0 1 1 0 +YEP 0 1 1 0 +YEOMEN 0 1 1 0 +YEOMAN 1 1 1 2 +YELLS 0 1 1 0 +YELLED 1 1 1 2 +YAUSKY 0 1 1 0 +YANAWAY 0 1 0 1 +YAHWEH 0 1 1 0 +WYLDER'S 0 1 1 0 +WUNNERED 0 1 1 0 +WRITTEN 5 1 5 6 +WRITHS 0 1 0 1 +WRITER'S 0 1 0 1 +WRITER 1 1 1 2 +WRIT 0 1 1 0 +WRAPPERS 0 1 0 1 +WOUNDS 1 1 2 1 +WOUNDED 6 1 6 7 +WOT 1 1 2 1 +WORTHY 1 1 1 2 +WORTH 1 1 2 1 +WORSHIPPERS 0 1 1 0 +WORSHIP'S 0 1 1 0 +WORSHIP 4 1 4 5 +WORRY 2 1 3 2 +WORKS 4 1 5 4 +WORKMEN 0 1 0 1 +WORKMAN 0 1 1 0 +WORKINGMEN 0 1 1 0 +WORKED 1 1 2 1 +WORKADAY 0 1 0 1 +WOOLWRIGHT 0 1 0 1 +WOODSON 0 1 1 0 +WOODEN 5 1 6 5 +WONT 3 1 4 3 +WONDERERS 0 1 0 1 +WON 1 1 1 2 +WOKE 0 1 1 0 +WOES 0 1 1 0 +WOE 1 1 2 1 +WODE'S 0 1 1 0 +WITHOUT 51 1 51 52 +WITHAL 1 1 2 1 +WIT 1 1 1 2 +WISHT 0 1 1 0 +WISHED 5 1 5 6 +WISE 3 1 3 4 +WIS 0 1 0 1 +WIRES 0 1 1 0 +WINTERSPIN 0 1 0 1 +WINTERS 0 1 1 0 +WINNING 1 1 1 2 +WINKED 0 1 1 0 +WINGED 0 1 0 1 +WING 0 1 1 0 +WINDS 0 1 0 1 +WIND 6 1 6 7 +WILT 4 1 4 5 +WILLY 0 1 1 0 +WILKSES 0 1 1 0 +WILKES 0 1 0 1 +WILFRIED 0 1 0 1 +WILDLY 1 1 2 1 +WILDEST 0 1 1 0 +WILDER'S 0 1 0 1 +WILD 8 1 9 8 +WIF 0 1 0 1 +WIDTH 0 1 0 1 +WIDEN 1 1 1 2 +WIDELY 0 1 0 1 +WIDEAWAKE 0 1 1 0 +WID 0 1 0 1 +WICKER 6 1 7 6 +WHO'S 0 1 0 1 +WHO'D 0 1 1 0 +WHISKERED 0 1 1 0 +WHIRLER 0 1 0 1 +WHIPS 0 1 0 1 +WHILOME 0 1 1 0 +WHEREABOUTS 2 1 3 2 +WHEREABOUT 0 1 0 1 +WHER 0 1 1 0 +WHEELER 0 1 1 0 +WHATE'ER 0 1 1 0 +WHAT'S 5 1 6 5 +WHALER 0 1 0 1 +WHACKS 0 1 1 0 +WESTPORT 0 1 1 0 +WESTBURT 0 1 0 1 +WEST 3 1 3 4 +WELSH 0 1 1 0 +WEIGH 0 1 0 1 +WEEVILLY 0 1 1 0 +WEEKLY 0 1 1 0 +WEEK 6 1 6 7 +WEEDS 0 1 1 0 +WEEBLY 0 1 0 1 +WED 0 1 0 1 +WEBB'S 0 1 1 0 +WEATHER 5 1 5 6 +WEARINESS 0 1 0 1 +WEAL 0 1 1 0 +WEAKLY 0 1 0 1 +WEAK 1 1 2 1 +WE'VE 2 1 3 2 +WE'LL 2 1 2 3 +WAYS 4 1 5 4 +WAX 0 1 0 1 +WAVERLY 0 1 1 0 +WAVERLEY 0 1 0 1 +WATONWAN 0 1 1 0 +WATERWIN 0 1 0 1 +WATERED 0 1 0 1 +WATCHMAKERS 0 1 0 1 +WATCHMAKER'S 0 1 1 0 +WATCHED 3 1 4 3 +WATCH 12 1 12 13 +WASTER 0 1 0 1 +WASN'T 2 1 3 2 +WARS 1 1 2 1 +WARRITZ 0 1 0 1 +WARRENTON 0 1 0 1 +WARNED 0 1 0 1 +WARN 1 1 2 1 +WARE 0 1 0 1 +WARDS 0 1 0 1 +WARDERS 0 1 1 0 +WARD'S 0 1 1 0 +WAR 5 1 5 6 +WANE 0 1 0 1 +WANDERERS 0 1 1 0 +WANDERER 1 1 2 1 +WANDERED 0 1 0 1 +WANDER 0 1 0 1 +WALLACE 0 1 0 1 +WALLA 0 1 0 1 +WALKING 5 1 5 6 +WALES 0 1 0 1 +WAKE 2 1 3 2 +WAITIN 0 1 1 0 +WAGGOT 0 1 1 0 +WAGGING 0 1 1 0 +WAG 0 1 0 1 +WADED 0 1 1 0 +WADDED 0 1 1 0 +WABDAD 0 1 0 1 +VOYS 0 1 0 1 +VOUGHT 0 1 1 0 +VOTE 3 1 4 3 +VOMITING 0 1 1 0 +VOLVITUR 0 1 1 0 +VOLUTION 0 1 0 1 +VOLT 0 1 0 1 +VOICED 0 1 1 0 +VOICE 20 1 20 21 +VISIONS 0 1 0 1 +VIRTUARY 0 1 0 1 +VINTAGE 1 1 2 1 +VILLE 0 1 0 1 +VILLAGERS 0 1 1 0 +VIL 0 1 1 0 +VIGORE 0 1 0 1 +VICHILLENZ 0 1 0 1 +VEZENOUS 0 1 0 1 +VETCHERY 0 1 0 1 +VESTRY 0 1 1 0 +VESK 0 1 0 1 +VERSON 0 1 0 1 +VERSES 4 1 5 4 +VERSE 1 1 1 2 +VERILY 2 1 3 2 +VENTURES 0 1 0 1 +VENTURE 1 1 2 1 +VENTRILOQUIST 0 1 1 0 +VENTILOQUE 0 1 0 1 +VENTAGE 0 1 0 1 +VENT 0 1 0 1 +VEIL 1 1 2 1 +VEHEMENTLY 0 1 1 0 +VEAL 0 1 0 1 +VAVASOUR 0 1 1 0 +VAVASOR 0 1 0 1 +VAULTED 0 1 1 0 +VAUGIRARD 0 1 1 0 +VAUGHAN 0 1 0 1 +VATS 0 1 0 1 +VAST 2 1 3 2 +VASSILIEVITCH 0 1 1 0 +VASSARIAH 0 1 0 1 +VARIOUS 2 1 2 3 +VARIABLE 0 1 0 1 +VALLED 0 1 0 1 +VALET 1 1 2 1 +UZHAT 0 1 0 1 +UTRITION 0 1 0 1 +USURE 0 1 0 1 +USUBIUS 0 1 0 1 +USHART 0 1 0 1 +USEFUL 0 1 1 0 +URINA'S 0 1 0 1 +URARTIAN 0 1 1 0 +UR 0 1 0 1 +UPWARD 1 1 1 2 +UNTIL 15 1 16 15 +UNREWARDED 0 1 1 0 +UNLUCK 0 1 0 1 +UNLESS 7 1 8 7 +UNHAPPY 4 1 5 4 +UNDERTAKERS 0 1 0 1 +UNDERTAKER'S 1 1 2 1 +UNCLEAN 0 1 0 1 +UNCLE 6 1 7 6 +UNCHANGED 0 1 0 1 +UNADULTERATED 0 1 1 0 +ULTIMATELY 0 1 1 0 +ULTIMATE 0 1 0 1 +UKINZER 0 1 1 0 +UDDER 0 1 1 0 +U 0 1 0 1 +TYRANNY 0 1 1 0 +TYBER 0 1 0 1 +TWYMAN'S 0 1 1 0 +TWINS 0 1 0 1 +TWAS 0 1 1 0 +TWAIN 1 1 2 1 +TURRET 0 1 1 0 +TURNS 1 1 2 1 +TURNING 5 1 6 5 +TURBULENT 0 1 1 0 +TURBOT 0 1 0 1 +TUMULT 1 1 1 2 +TUMBLED 1 1 1 2 +TUFTS 1 1 1 2 +TUBERCULOUS 0 1 1 0 +TRUSTY 0 1 0 1 +TRUSTEE 0 1 1 0 +TRUNDLED 0 1 1 0 +TRULY 4 1 5 4 +TROUT 1 1 1 2 +TRIVET 0 1 1 0 +TRIUMPHS 0 1 0 1 +TRITES 0 1 0 1 +TRIPES 0 1 1 0 +TRIPE'S 0 1 0 1 +TRINES 0 1 0 1 +TRIES 3 1 3 4 +TRIBUT 0 1 0 1 +TREMBLINGLY 1 1 1 2 +TREMBLING 2 1 2 3 +TRELAWNEY 0 1 1 0 +TREASURES 1 1 1 2 +TREASURE 1 1 2 1 +TRAVELLED 1 1 1 2 +TRAVELED 0 1 1 0 +TRAVEL 1 1 1 2 +TRANSSHIP 0 1 1 0 +TRANSLATED 0 1 1 0 +TRANSIENT 0 1 0 1 +TRANSHIP 0 1 0 1 +TRANSGRATED 0 1 0 1 +TRAINS 0 1 0 1 +TRAINING 4 1 5 4 +TRAINDAWG 0 1 1 0 +TRAIN 0 1 0 1 +TRAFFIC 0 1 1 0 +TRADES 0 1 0 1 +TRADEMARK 0 1 1 0 +TRADE 7 1 7 8 +TRACT 0 1 0 1 +TOWING 0 1 0 1 +TOWERED 0 1 0 1 +TOUR 1 1 2 1 +TOUGHS 0 1 1 0 +TOUGH 2 1 3 2 +TOTING 0 1 1 0 +TOSSING 0 1 1 0 +TORTURE 1 1 1 2 +TORROR 0 1 0 1 +TORRENTIUS 0 1 0 1 +TORN 1 1 2 1 +TORMENT 0 1 1 0 +TORKILSTONE 0 1 0 1 +TORCLESTONE 0 1 0 1 +TOP 2 1 3 2 +TOOK 34 1 35 34 +TONGUE 4 1 5 4 +TOMORROW 0 1 1 0 +TOMATO 0 1 1 0 +TOLERBLE 0 1 1 0 +TOILET 2 1 3 2 +TOATING 0 1 0 1 +TITLE 0 1 1 0 +TIRING 0 1 1 0 +TIRESOME 0 1 1 0 +TIRELESS 0 1 0 1 +TIRE 0 1 0 1 +TINTED 0 1 0 1 +TINEL 0 1 0 1 +TINCTURED 0 1 1 0 +TIMES 10 1 10 11 +TIME'S 0 1 1 0 +TIMBOO 0 1 0 1 +TIMBER 0 1 1 0 +TILLERS 0 1 1 0 +TIGHTLY 2 1 2 3 +TIGGING 0 1 0 1 +TIGG 0 1 0 1 +TIE 2 1 2 3 +TIDY 0 1 0 1 +TIDINGS 1 1 2 1 +TIBERICAN'S 0 1 0 1 +TIBER 0 1 1 0 +THUMB 3 1 4 3 +THROWING 2 1 3 2 +THROPPED 0 1 0 1 +THROBBED 0 1 1 0 +THOUGHTS 4 1 5 4 +THOUA 0 1 0 1 +THOMAS 2 1 2 3 +THIRST 8 1 9 8 +THIRD'S 0 1 0 1 +THIRD 11 1 11 12 +THINKS 1 1 2 1 +THINGS 20 1 21 20 +THIN 3 1 4 3 +THEY'VE 0 1 0 1 +THEY'D 2 1 2 3 +THEREFORE 12 1 12 13 +THERE'LL 0 1 1 0 +THEOSKEY 0 1 0 1 +THEMSELVE 0 1 0 1 +THATCH 0 1 0 1 +THAT'LL 0 1 1 0 +THANKS 5 1 5 6 +TESTIMONY 0 1 1 0 +TERRIBLE 3 1 4 3 +TERRACE 0 1 0 1 +TERRA 0 1 1 0 +TERENTIUS 0 1 1 0 +TER 0 1 0 1 +TENTH'S 0 1 0 1 +TENDED 0 1 0 1 +TEND 1 1 2 1 +TEN 18 1 18 19 +TEMPTETH 0 1 1 0 +TEMPT 1 1 1 2 +TEMPLE 2 1 2 3 +TELLERS 0 1 0 1 +TEETH 3 1 4 3 +TEDLERS 0 1 0 1 +TEDI 0 1 0 1 +TECHTAMORPH 0 1 0 1 +TEARS 6 1 6 7 +TEALE 0 1 0 1 +TEAL 0 1 1 0 +TAYLOR 0 1 1 0 +TAX 0 1 0 1 +TAUSEN 0 1 0 1 +TAUGHT 1 1 1 2 +TATTLERS 0 1 1 0 +TASTE 6 1 6 7 +TASKMASTER 0 1 1 0 +TARDY 0 1 1 0 +TAPPY 0 1 0 1 +TAPPED 0 1 1 0 +TAPNEY 0 1 0 1 +TAPIS 0 1 1 0 +TAP 1 1 1 2 +TANQUAM 0 1 1 0 +TANNIC 0 1 0 1 +TANK 0 1 0 1 +TANA 0 1 0 1 +TALMASH 0 1 1 0 +TALKED 5 1 5 6 +TALK 14 1 15 14 +TALENTS 2 1 3 2 +TALE 2 1 2 3 +TAKEN 22 1 22 23 +TAKE 45 1 45 46 +TAILOR 0 1 0 1 +TAHITI 0 1 1 0 +T'OTHER 0 1 1 0 +SYRIA 4 1 4 5 +SYNONYMON 0 1 1 0 +SYNONYM 0 1 0 1 +SYNOGRAPHIC 0 1 0 1 +SYMBOLS 0 1 0 1 +SYLLOGISM 0 1 1 0 +SYDNEY 2 1 3 2 +SWORD 11 1 12 11 +SWELP 0 1 1 0 +SWELLIN 0 1 0 1 +SWELL 0 1 1 0 +SWEETMEAT'S 0 1 0 1 +SWEETMEAT 1 1 2 1 +SWEET 2 1 3 2 +SWEEP 1 1 1 2 +SWEAT 0 1 0 1 +SWAYING 0 1 1 0 +SWAY 0 1 1 0 +SWAP 0 1 0 1 +SWALLOWED 3 1 4 3 +SWALLOW 0 1 0 1 +SWAIN 0 1 0 1 +SWAG 0 1 1 0 +SUSPICION 4 1 4 5 +SUSPICIENT 0 1 0 1 +SURRENDER 2 1 2 3 +SURPRISED 1 1 2 1 +SURPRISE 4 1 4 5 +SURGY 0 1 0 1 +SURGING 0 1 0 1 +SURFACES 0 1 0 1 +SURELY 8 1 8 9 +SUPPOSED 6 1 6 7 +SUPPOSE 10 1 11 10 +SUPERNATIVE 0 1 0 1 +SUPERLATIVE 0 1 1 0 +SUPERIOR 3 1 4 3 +SUNNYSIDE 0 1 1 0 +SUNNICIDE 0 1 0 1 +SUNK 0 1 0 1 +SUNG 2 1 2 3 +SUN 7 1 7 8 +SUMTHIN 0 1 1 0 +SULPHURIC 0 1 1 0 +SUITED 1 1 1 2 +SUIT 2 1 2 3 +SUFFOLK 0 1 1 0 +SUFFOLD 0 1 0 1 +SUFFICES 0 1 1 0 +SUFFER 1 1 1 2 +SUDDENLY 7 1 8 7 +SUCKED 0 1 1 0 +SUCCOURS 0 1 1 0 +SUBTERRAB 0 1 0 1 +SUBSTANCE 2 1 3 2 +SUBJECT 6 1 6 7 +SUB 0 1 1 0 +STUNNITY 0 1 0 1 +STROLLED 0 1 0 1 +STROKE 2 1 2 3 +STRODE 0 1 1 0 +STRIPE 0 1 0 1 +STRIKE 2 1 2 3 +STRIFE 5 1 6 5 +STREETS 2 1 2 3 +STREAK 1 1 2 1 +STRAYING 0 1 1 0 +STRAW 1 1 1 2 +STRATUS 0 1 0 1 +STRANGEST 0 1 1 0 +STRANGER'S 0 1 0 1 +STRANGE 4 1 5 4 +STRAITENED 0 1 1 0 +STRAINS 0 1 1 0 +STRAIN 1 1 1 2 +STRAIGHT 5 1 5 6 +STOWED 0 1 0 1 +STOVE 1 1 1 2 +STORIES 3 1 3 4 +STORED 0 1 1 0 +STORE 3 1 3 4 +STOPPED 5 1 5 6 +STOOLS 0 1 0 1 +STOOD 21 1 22 21 +STONES 3 1 4 3 +STONED 0 1 1 0 +STIPS 0 1 0 1 +STEWPENT 0 1 0 1 +STEWPAN 1 1 2 1 +STEW 0 1 1 0 +STEVEN 0 1 0 1 +STERNWALL 0 1 0 1 +STERNMOST 0 1 0 1 +STEPS 7 1 7 8 +STEPPED 1 1 2 1 +STEPAN 1 1 2 1 +STENOGRAPHIC 0 1 1 0 +STEEVER 0 1 1 0 +STEESEY 0 1 0 1 +STEERING 0 1 1 0 +STEERED 0 1 0 1 +STEELY 0 1 0 1 +STEED 1 1 1 2 +STEAMS 0 1 0 1 +STEALING 0 1 0 1 +STEAL 0 1 1 0 +STAYS 0 1 0 1 +STAYING 1 1 2 1 +STATUS 0 1 1 0 +STATURE 0 1 1 0 +STATUE 2 1 2 3 +STATU 0 1 0 1 +STATING 0 1 0 1 +STAS 0 1 1 0 +STARVING 1 1 2 1 +STARTED 10 1 10 11 +START 2 1 3 2 +STARLING 0 1 0 1 +STARES 0 1 1 0 +STANDSTILL 0 1 1 0 +STANDS 2 1 3 2 +STANDARDS 0 1 0 1 +STANDARD 3 1 4 3 +STALLS 0 1 0 1 +STAKES 0 1 1 0 +STAGER 0 1 0 1 +STAGE 4 1 5 4 +STAFFNER 0 1 0 1 +SQUEAMISH 2 1 3 2 +SQUARE 1 1 2 1 +SPONSUS 0 1 1 0 +SPONSES 0 1 0 1 +SPONNET 0 1 0 1 +SPONGE 0 1 1 0 +SPONDYLES 0 1 1 0 +SPIRITS 1 1 2 1 +SPINNING 1 1 1 2 +SPINES 0 1 0 1 +SPIND 0 1 0 1 +SPILLING 0 1 1 0 +SPIES 0 1 0 1 +SPICE 0 1 1 0 +SPENTRY 0 1 0 1 +SPENDING 0 1 0 1 +SPECTRE 0 1 0 1 +SPECTABLY 0 1 0 1 +SPECIALTY 0 1 1 0 +SPECIALLY 0 1 0 1 +SPEAKER 1 1 1 2 +SPEAK 15 1 15 16 +SPATTLE 0 1 0 1 +SPARSELY 0 1 1 0 +SPADDLE 0 1 1 0 +SOUTHERN 2 1 3 2 +SOUSE 0 1 1 0 +SOUS 1 1 1 2 +SOURCE 1 1 2 1 +SOUNDLY 1 1 2 1 +SOULS 2 1 2 3 +SOTELES 0 1 1 0 +SORTILESS 0 1 0 1 +SORT 12 1 12 13 +SORROW 1 1 1 2 +SOROCHIS 0 1 0 1 +SORE 1 1 1 2 +SOPHIA 0 1 0 1 +SONSPIER 0 1 0 1 +SONNY 0 1 1 0 +SONG 2 1 2 3 +SOMINUTELY 0 1 0 1 +SOLD 4 1 4 5 +SOJOURN 1 1 2 1 +SOJI 0 1 0 1 +SOFT 5 1 5 6 +SOCIALLY 0 1 0 1 +SOCIALIST 1 1 2 1 +SNUG 0 1 0 1 +SNOWBLE 0 1 0 1 +SNOOZING 0 1 1 0 +SNATCOVE 0 1 0 1 +SNATCHER 1 1 2 1 +SNAT 0 1 0 1 +SNAS 0 1 0 1 +SNARLS 0 1 0 1 +SNARLED 0 1 1 0 +SMOLNY 0 1 1 0 +SMOKE 2 1 2 3 +SMOGG 0 1 0 1 +SMIRCHED 0 1 1 0 +SMILE 9 1 9 10 +SMELL 0 1 1 0 +SMARCHED 0 1 0 1 +SLUNK 0 1 1 0 +SLIGHTLY'LL 0 1 0 1 +SLEPT 3 1 3 4 +SLEEPING 5 1 6 5 +SLEEP 15 1 15 16 +SLAVENT 0 1 0 1 +SLATTERER 0 1 0 1 +SLAPPED 0 1 1 0 +SLAM 0 1 0 1 +SLACKENED 0 1 1 0 +SLAB 1 1 2 1 +SKYLARKS 0 1 1 0 +SKYLACKS 0 1 0 1 +SKIRT 0 1 1 0 +SKIPPED 0 1 0 1 +SKINNED 0 1 0 1 +SKEW 0 1 1 0 +SKEPTICAL 0 1 1 0 +SKEERO 0 1 0 1 +SIXTH 4 1 5 4 +SIXES 0 1 1 0 +SIX 17 1 17 18 +SITUATED 0 1 0 1 +SIRES 0 1 0 1 +SIRE 3 1 4 3 +SINUHIT 0 1 1 0 +SINNED 0 1 1 0 +SINGS 2 1 3 2 +SINGA 1 1 2 1 +SINE 0 1 1 0 +SINDERLAD 0 1 0 1 +SIMULATES 0 1 1 0 +SIMPLES 0 1 0 1 +SIMPLE 4 1 4 5 +SIMON 1 1 1 2 +SIMILATE 0 1 0 1 +SILVERWARE 0 1 1 0 +SILVER 7 1 7 8 +SILLY 2 1 3 2 +SILLIGIOUS 0 1 0 1 +SILENCED 0 1 1 0 +SILENCE 9 1 9 10 +SILBERG'S 0 1 0 1 +SIGNING 1 1 1 2 +SIGN 4 1 4 5 +SIGHT 7 1 8 7 +SIGHING 1 1 2 1 +SIEVE 0 1 1 0 +SIDY 0 1 0 1 +SIDNEY 0 1 0 1 +SHUTTERS 1 1 2 1 +SHUTTERED 0 1 0 1 +SHUMAN 0 1 1 0 +SHUCKS 0 1 1 0 +SHRUGS 0 1 0 1 +SHRUBS 1 1 2 1 +SHRUBBERY 0 1 1 0 +SHRABBERY 0 1 0 1 +SHOWS 1 1 2 1 +SHOWERS 0 1 0 1 +SHOWER 0 1 0 1 +SHOW 16 1 16 17 +SHOUT 0 1 0 1 +SHOULDST 2 1 3 2 +SHOULDEST 0 1 0 1 +SHORE 1 1 2 1 +SHOOTER 0 1 1 0 +SHOOT 5 1 6 5 +SHONE 2 1 2 3 +SHOERS 0 1 0 1 +SHOCKS 0 1 0 1 +SHO'LY 0 1 1 0 +SHIRTING 0 1 0 1 +SHERE 0 1 1 0 +SHERBURN'S 0 1 1 0 +SHERBURN 0 1 1 0 +SHERBOURNE'S 0 1 0 1 +SHERBIN 0 1 0 1 +SHEPHERD 0 1 1 0 +SHEPARD 0 1 0 1 +SHEETTER 0 1 0 1 +SHEETS 0 1 1 0 +SHEET 1 1 2 1 +SHEAR 0 1 0 1 +SHE'S 4 1 5 4 +SHATTERED 0 1 1 0 +SHARP 5 1 5 6 +SHARES 0 1 0 1 +SHARED 0 1 0 1 +SHANGHAT 0 1 0 1 +SHALLUM 0 1 1 0 +SHAKEDOWN 0 1 1 0 +SHAKE 2 1 2 3 +SHADOW 5 1 5 6 +SEVERSON 0 1 1 0 +SEVERE 1 1 2 1 +SEVENTIETH 0 1 1 0 +SEVENTH 3 1 3 4 +SEVENTEENTH 0 1 0 1 +SEVEN 11 1 12 11 +SETTON 0 1 0 1 +SERVE 6 1 7 6 +SERVANTS 4 1 4 5 +SERMON 0 1 1 0 +SEREN 0 1 0 1 +SENTIN 0 1 0 1 +SENTENCED 0 1 1 0 +SENSITIVE 1 1 2 1 +SENSE 8 1 9 8 +SENDS 0 1 0 1 +SENATORY 0 1 0 1 +SEMICA 0 1 0 1 +SELL 4 1 4 5 +SEEST 1 1 2 1 +SEEMS 13 1 13 14 +SEEMING 1 1 1 2 +SEEING 8 1 8 9 +SEED 0 1 0 1 +SEDUCETH 0 1 1 0 +SEDUCE 0 1 0 1 +SEDATE 0 1 0 1 +SECURED 0 1 0 1 +SECURE 4 1 5 4 +SEATS 1 1 1 2 +SEATING 0 1 0 1 +SEATED 5 1 5 6 +SEARCHING 0 1 1 0 +SEAMAN 4 1 5 4 +SCUSE 0 1 1 0 +SCULPTURES 0 1 0 1 +SCULPTORS 1 1 2 1 +SCRUTINISED 0 1 1 0 +SCRIBES 1 1 2 1 +SCREW 0 1 1 0 +SCREAMISH 0 1 0 1 +SCRAPPIN 0 1 1 0 +SCRAP 0 1 0 1 +SCORCHED 0 1 0 1 +SCO'TCH 0 1 1 0 +SCHUMANN 0 1 0 1 +SCHULBERG'S 0 1 1 0 +SCHOOLGIRLS 0 1 0 1 +SCHOOLDAYS 0 1 1 0 +SCHOOLBOY 0 1 0 1 +SCEPTICAL 0 1 0 1 +SCENTED 0 1 0 1 +SCAPEGRACES 0 1 1 0 +SCAPED 0 1 1 0 +SCAPE 0 1 1 0 +SCALE 0 1 0 1 +SAYING 17 1 17 18 +SAYIN 0 1 1 0 +SAXES 0 1 0 1 +SAVIER 0 1 0 1 +SAVED 0 1 0 1 +SAVE 7 1 7 8 +SAUCES 0 1 0 1 +SATURDAY 0 1 1 0 +SATURATED 0 1 1 0 +SARAH 2 1 3 2 +SANS 0 1 1 0 +SANITARY 1 1 2 1 +SANGS 0 1 0 1 +SANG 1 1 2 1 +SAND 2 1 3 2 +SANCTUS 0 1 0 1 +SANCTESS 0 1 1 0 +SAMURED 0 1 0 1 +SALTS 0 1 1 0 +SALT 2 1 2 3 +SALONE 0 1 1 0 +SALON 1 1 1 2 +SALLOWER 0 1 1 0 +SALARY 0 1 0 1 +SAKES 0 1 0 1 +SAJOURN 0 1 0 1 +SAILS 0 1 1 0 +SAIL 3 1 4 3 +SAH 1 1 2 1 +SAGOTARA 0 1 0 1 +SAGITTAIRE 0 1 1 0 +SAFE 7 1 8 7 +SADRIC 0 1 0 1 +SACKED 0 1 0 1 +SABATANI 0 1 0 1 +S'POSE 3 1 3 4 +S 2 1 3 2 +RYO 0 1 1 0 +RYEO 0 1 0 1 +RUSHED 3 1 4 3 +RUSH 0 1 0 1 +RUNS 0 1 1 0 +RUNG 1 1 2 1 +RUMP 0 1 1 0 +RULER 0 1 1 0 +RULED 0 1 0 1 +RUBENSES 0 1 1 0 +RUBEN 0 1 0 1 +ROY 0 1 0 1 +ROWED 0 1 1 0 +ROW 2 1 2 3 +ROUTE 0 1 1 0 +ROUGHLY 0 1 0 1 +ROTHS 0 1 1 0 +ROSS 0 1 0 1 +ROSE 12 1 12 13 +ROSAMUN 0 1 1 0 +ROSAMOND 0 1 0 1 +ROPES 1 1 1 2 +ROPED 0 1 0 1 +ROOTS 1 1 2 1 +RONALD 0 1 1 0 +ROMAN 2 1 2 3 +ROLL 1 1 2 1 +ROCKLE 0 1 0 1 +ROCK 3 1 4 3 +ROBIN 0 1 0 1 +ROBED 0 1 1 0 +ROBBERS 3 1 3 4 +ROBBERIES 1 1 2 1 +ROARING 0 1 1 0 +ROAR 1 1 1 2 +ROADSIDE 0 1 1 0 +ROADS 0 1 0 1 +ROAD'S 0 1 0 1 +RIVELING 0 1 0 1 +RIPER 0 1 0 1 +RIPENESS 0 1 0 1 +RINGS 0 1 0 1 +RINGMASTER 0 1 1 0 +RINDS 0 1 1 0 +RIKA 0 1 0 1 +RIGOROUS 0 1 1 0 +RIGOR 0 1 1 0 +RIGA 0 1 0 1 +RIDING 0 1 0 1 +RIDICT 0 1 0 1 +RIDGES 0 1 0 1 +RIDGE'S 0 1 1 0 +RIDE 5 1 5 6 +RID 4 1 5 4 +RICHARD 5 1 5 6 +RICARDS 0 1 0 1 +RICA 0 1 0 1 +RHOMETTING 0 1 0 1 +RHODES 0 1 1 0 +RHINES 0 1 0 1 +REWARDED 0 1 0 1 +REW 0 1 1 0 +REVOLUTIONISTS 0 1 1 0 +REVOLUTIONIST 1 1 1 2 +REVOLUTION 2 1 3 2 +REVOLTE 0 1 1 0 +REVOLT 0 1 0 1 +REVIL 0 1 0 1 +REVERED 0 1 0 1 +REVERE 0 1 0 1 +REVELING 0 1 1 0 +REVEALED 0 1 1 0 +RETZCH'S 0 1 1 0 +RETURNING 6 1 6 7 +RETURNED 18 1 19 18 +RETURN 17 1 18 17 +RETIC 0 1 0 1 +RETFORD 0 1 0 1 +RESUMED 1 1 2 1 +RESTORETH 0 1 1 0 +RESTORE 0 1 0 1 +RESTIVE 0 1 1 0 +RESPONSIBLE 4 1 5 4 +RESOLVED 0 1 1 0 +RESK 0 1 1 0 +RESISTS 0 1 0 1 +RESISTING 1 1 2 1 +RESINOUS 0 1 1 0 +RESIDUE 2 1 3 2 +RESIDER 0 1 0 1 +RESIDE 1 1 1 2 +RESCUED 2 1 3 2 +RESCUE 1 1 1 2 +REQUEST 3 1 4 3 +REPUTATION 2 1 3 2 +REPROVE 0 1 0 1 +REPORTING 0 1 0 1 +REPLY 1 1 2 1 +REPLIED 39 1 39 40 +REPLACING 0 1 0 1 +REPETITION 1 1 1 2 +REPEATED 5 1 6 5 +REPAST 1 1 1 2 +RENTS 0 1 0 1 +RENDER 0 1 1 0 +REND 0 1 1 0 +REMO 0 1 0 1 +REMISSIONER'S 0 1 0 1 +REMISSION 0 1 1 0 +REMEMBEREST 0 1 1 0 +REMEMBER 13 1 13 14 +REMARKED 9 1 10 9 +REMARK 6 1 6 7 +REMAINS 2 1 2 3 +REMAIN 4 1 5 4 +RELIGIOUS 2 1 2 3 +RELIGIONISTS 0 1 0 1 +RELIGHTED 0 1 0 1 +RELEVANT 0 1 1 0 +RELEASED 2 1 3 2 +RELEASE 2 1 2 3 +RELATED 3 1 4 3 +REJECTED 0 1 0 1 +REJECT 0 1 1 0 +REINY 0 1 0 1 +REIGNS 0 1 1 0 +REIGNED 0 1 1 0 +REGULATION 0 1 1 0 +REGULATING 0 1 0 1 +REGARDING 2 1 3 2 +REG'LER 0 1 1 0 +REG'LAR 0 1 0 1 +REFUGERY 0 1 0 1 +REFUGE 2 1 3 2 +REFRESHMENT 0 1 1 0 +REFORMERS 1 1 2 1 +REFORMED 1 1 1 2 +REFERENCES 0 1 0 1 +REFERENCE 2 1 3 2 +REELING 0 1 0 1 +REELECTION 0 1 1 0 +REELECTED 0 1 0 1 +REEDS 0 1 0 1 +REED 0 1 0 1 +RED 10 1 10 11 +RECTIFY 0 1 1 0 +RECORDS 0 1 1 0 +RECKLESS 2 1 2 3 +RECITER 1 1 2 1 +RECITE 1 1 2 1 +RECEDED 0 1 1 0 +REALISED 0 1 1 0 +REAL 3 1 3 4 +REACH 4 1 4 5 +RE 1 1 2 1 +RAYS 0 1 0 1 +RAY 1 1 1 2 +RATTLING 2 1 3 2 +RATTIC 0 1 0 1 +RATS 0 1 0 1 +RATES 0 1 0 1 +RATCHFORD 0 1 1 0 +RAT 0 1 0 1 +RASHID 0 1 1 0 +RASCHID 0 1 0 1 +RASCALS 0 1 1 0 +RASCALIONS 0 1 0 1 +RAPSCALLIONS 0 1 1 0 +RAPPERS 0 1 1 0 +RAPPA'S 0 1 0 1 +RANSOM 7 1 8 7 +RANALD 0 1 0 1 +RAMSON 0 1 0 1 +RAMSES 0 1 1 0 +RAMESES 0 1 0 1 +RAISING 2 1 2 3 +RAISE 5 1 6 5 +RAINING 0 1 0 1 +RAIN 2 1 3 2 +RAID 0 1 1 0 +RAGGED 0 1 0 1 +RAGED 0 1 0 1 +RAE 0 1 0 1 +RADPROP 0 1 1 0 +RADIAN 0 1 0 1 +RACKETERS 0 1 0 1 +RACKETEERS 0 1 1 0 +RACKED 0 1 1 0 +RACIAN 0 1 0 1 +RACES 0 1 1 0 +RABS 0 1 0 1 +RABBITS 0 1 1 0 +RABBIT 5 1 5 6 +RABB'S 0 1 1 0 +QUON 0 1 0 1 +QUMMUKH 0 1 1 0 +QUICKLY 5 1 5 6 +QUICKENETH 0 1 1 0 +QUICKENED 0 1 0 1 +QUICK 4 1 5 4 +QUESTERED 0 1 0 1 +QUEER 4 1 4 5 +QUEEN 5 1 5 6 +QUARTER 6 1 7 6 +QUANTRELL 0 1 1 0 +QUANTRAIL 0 1 0 1 +QUANTITIES 1 1 2 1 +QUALITIES 1 1 1 2 +PYM 0 1 1 0 +PUTTING 0 1 1 0 +PUTTEL 0 1 1 0 +PUTS 2 1 3 2 +PUSH 1 1 1 2 +PURPORTING 0 1 1 0 +PULLED 3 1 4 3 +PUDDLES 0 1 1 0 +PSALMS 0 1 0 1 +PSALM 0 1 1 0 +PRYTANEUM 0 1 1 0 +PROVISED 0 1 0 1 +PROVING 0 1 0 1 +PROVEN 0 1 1 0 +PROTS 0 1 0 1 +PROTINENT 0 1 0 1 +PROTESTING 0 1 0 1 +PROTECTORATE 0 1 1 0 +PROTECTOR 1 1 1 2 +PROPRE 0 1 1 0 +PROPHET 1 1 1 2 +PROPERLY 1 1 2 1 +PROPER 2 1 2 3 +PRONOUNCE 1 1 1 2 +PROMOTIVE 0 1 1 0 +PROMOTED 0 1 0 1 +PROMIN 0 1 0 1 +PROLONG 0 1 0 1 +PROLIMINARIES 0 1 0 1 +PROHIBITION 0 1 1 0 +PROGRESS 1 1 1 2 +PROGRAMME 0 1 1 0 +PROGRAM 0 1 0 1 +PROFIT 1 1 2 1 +PROFICIENT 0 1 0 1 +PROFESSIONS 0 1 0 1 +PRODUCES 0 1 1 0 +PRODUCED 7 1 7 8 +PRODUCE 5 1 6 5 +PROCOPIUS 0 1 1 0 +PROCOPIAS 0 1 0 1 +PROCLISS 0 1 0 1 +PROCLIS 0 1 0 1 +PROCKLESS 0 1 0 1 +PROBITS 0 1 0 1 +PRO 0 1 0 1 +PRISONERS 3 1 3 4 +PRISONER 12 1 13 12 +PRISONED 0 1 0 1 +PRISON 4 1 5 4 +PRINCIPLES 0 1 0 1 +PRINCIPALS 0 1 1 0 +PRINCESO 0 1 0 1 +PRINCE 6 1 7 6 +PRIMER 0 1 1 0 +PRIMARY 0 1 0 1 +PRICKED 0 1 0 1 +PREVIOUS 0 1 0 1 +PRETTY 7 1 8 7 +PRESENTERS 0 1 0 1 +PRESENT 16 1 16 17 +PRENTICESHIP 0 1 1 0 +PRELIMINARIES 0 1 1 0 +PREDICUP 0 1 0 1 +PRECEPTORS 1 1 2 1 +PRECENTORS 0 1 1 0 +PREACHED 0 1 1 0 +PREACH 0 1 0 1 +PRAYERS 2 1 3 2 +PRAYER 6 1 6 7 +PRAY 5 1 5 6 +PRATS 0 1 0 1 +PRAISEWORTHY 0 1 1 0 +PRAISE 3 1 3 4 +PRACTITIONERS 0 1 0 1 +PRACTITIONER 0 1 1 0 +PRACTISED 0 1 0 1 +PRACTICED 0 1 1 0 +POWDERED 0 1 1 0 +POUCHES 0 1 1 0 +POTUM 0 1 1 0 +POTTER 0 1 0 1 +POTION 0 1 1 0 +POTENTAL 0 1 0 1 +POTASSIUM 0 1 1 0 +POT 0 1 0 1 +POSTS 0 1 0 1 +POSTHASTE 0 1 1 0 +POSTERN 0 1 1 0 +POSPIRED 0 1 0 1 +POSITIVELY 2 1 3 2 +POSIT 0 1 0 1 +PORTMANTEAU 0 1 1 0 +PORTENTOUS 0 1 1 0 +PORT 1 1 1 2 +PORED 0 1 1 0 +POPULOUS 0 1 0 1 +POPULACE 0 1 1 0 +POMEROY 0 1 1 0 +POMERALIE 0 1 0 1 +POLYTECHNICA 0 1 0 1 +POLYTECHNIC 0 1 1 0 +POLO 0 1 1 0 +POLLY 18 1 19 18 +POETS 0 1 0 1 +POESY 0 1 0 1 +POEM 0 1 0 1 +POE 0 1 0 1 +PO 0 1 1 0 +PLUMB 0 1 1 0 +PLUM 0 1 0 1 +PLUCK 1 1 1 2 +PLEASURE 8 1 8 9 +PLEASING 2 1 2 3 +PLEASE 15 1 15 16 +PLEASANT 8 1 9 8 +PLEAS 0 1 1 0 +PLAYERS 1 1 1 2 +PLATTERBUFF'S 0 1 0 1 +PLATTERBUFF 0 1 0 1 +PLATTERBAFF'S 0 1 1 0 +PLATTER 0 1 0 1 +PLATES 0 1 0 1 +PLATELY 0 1 0 1 +PLANTED 0 1 1 0 +PLACING 1 1 2 1 +PLACES 3 1 3 4 +PIUCES 0 1 0 1 +PITTS 1 1 2 1 +PITTHAM 0 1 0 1 +PITHUM 0 1 1 0 +PITCHES 0 1 0 1 +PITCHER 1 1 1 2 +PITCHEN 0 1 0 1 +PISTOLES 0 1 1 0 +PISTOL 5 1 5 6 +PIRRUS 0 1 0 1 +PIPE 3 1 4 3 +PIONABLE 0 1 0 1 +PINKUS 0 1 1 0 +PINKIS 0 1 0 1 +PINIONS 0 1 0 1 +PINES 0 1 0 1 +PIKES 1 1 2 1 +PIKE 0 1 0 1 +PIGSKIN 0 1 1 0 +PIGEONCOTES 0 1 1 0 +PIERRE 0 1 0 1 +PICTION 0 1 0 1 +PICKED 2 1 3 2 +PHILP 0 1 0 1 +PHILISTINES 1 1 2 1 +PHILIPUS 0 1 0 1 +PHILIP'S 0 1 0 1 +PHELPS 1 1 2 1 +PETREL 0 1 1 0 +PETAL 0 1 0 1 +PET 1 1 2 1 +PESTS 0 1 0 1 +PESTE 0 1 1 0 +PESSORAR 0 1 0 1 +PERSPIRED 0 1 1 0 +PERSON 16 1 16 17 +PERNOUNCE 0 1 1 0 +PERHAPS 16 1 17 16 +PERCEPTIVES 0 1 0 1 +PERCEIVER 0 1 0 1 +PEONAGE 0 1 1 0 +PENDING 0 1 1 0 +PENCIL 1 1 1 2 +PENCE 0 1 1 0 +PELUSTER 0 1 0 1 +PEKAHIAH 0 1 1 0 +PEKAHIA 0 1 0 1 +PEGLASHES 0 1 0 1 +PEGGING 0 1 0 1 +PEER 2 1 3 2 +PECUSE 0 1 0 1 +PEASY 0 1 0 1 +PEASANTS 5 1 6 5 +PEAS 1 1 2 1 +PEARL 0 1 1 0 +PEAR 0 1 0 1 +PEACEFUL 1 1 2 1 +PEACE 8 1 8 9 +PEA'S 0 1 0 1 +PAY 10 1 11 10 +PAWNBROKER 1 1 2 1 +PAUSES 0 1 0 1 +PAUSED 2 1 3 2 +PAUSE 3 1 4 3 +PAUL 0 1 0 1 +PATUM 0 1 0 1 +PATH 1 1 1 2 +PASTING 0 1 0 1 +PASTES 0 1 1 0 +PAST 9 1 10 9 +PASSERS 1 1 2 1 +PARTLY 1 1 2 1 +PARTICULARITY 0 1 0 1 +PARTICER 0 1 0 1 +PARSLY 0 1 0 1 +PARRISH 0 1 0 1 +PARRICIDES 0 1 1 0 +PARLEY 0 1 0 1 +PARKS 0 1 1 0 +PARKLEY 0 1 0 1 +PARISH 0 1 1 0 +PARENT 0 1 0 1 +PARDONABLE 0 1 1 0 +PARDON 9 1 10 9 +PARASITES 0 1 0 1 +PAPER 6 1 6 7 +PANTS 0 1 0 1 +PANNER 0 1 0 1 +PANEL 0 1 1 0 +PAN 3 1 3 4 +PAMP 0 1 0 1 +PALLIATE 0 1 1 0 +PALL 0 1 1 0 +PALESTINE 1 1 2 1 +PALASTEIN 0 1 0 1 +PALAESTRA 0 1 1 0 +PAINT 0 1 1 0 +PAIN 3 1 3 4 +PADDLING 0 1 1 0 +PADDLIN 0 1 0 1 +PADDED 0 1 0 1 +PAD 0 1 0 1 +PACES 1 1 2 1 +OX 0 1 1 0 +OWNETTE 0 1 0 1 +OWNERS 1 1 2 1 +OWNED 1 1 2 1 +OWL 0 1 0 1 +OWING 1 1 1 2 +OW'M 0 1 1 0 +OW 0 1 1 0 +OVERWHELMING 0 1 0 1 +OVERRIPENESS 0 1 1 0 +OVERREKA 0 1 0 1 +OVERHEARD 2 1 3 2 +OVERFULL 0 1 1 0 +OVERFLOWING 0 1 1 0 +OUTSIDE 8 1 8 9 +OUTGAZE 0 1 0 1 +OUTER'S 0 1 0 1 +OUTER 1 1 2 1 +OURS 0 1 0 1 +OUNCES 2 1 3 2 +OUNCE 0 1 0 1 +OUGHTN'T 0 1 1 0 +OUEN 0 1 1 0 +OTT 0 1 0 1 +OTHERWAYS 0 1 0 1 +OTHERS 17 1 17 18 +OTHER'S 1 1 2 1 +OTALONS 0 1 0 1 +ORTRAINING 0 1 0 1 +ORNERIEST 0 1 1 0 +ORNEIST 0 1 0 1 +ORIGINALLY 0 1 0 1 +ORDER 19 1 20 19 +ORDEALIZING 0 1 0 1 +ORA 0 1 0 1 +OPPRESSORS 0 1 1 0 +OPPRESSING 0 1 0 1 +OPPORTUNITY 3 1 4 3 +OPEUS 0 1 0 1 +OPENING 6 1 7 6 +OPENED 10 1 11 10 +OPE 0 1 1 0 +ONLENETS 0 1 0 1 +ONES 4 1 4 5 +ONE'S 5 1 5 6 +ON'T 0 1 1 0 +OME 0 1 1 0 +OMAR 1 1 2 1 +OLY 0 1 0 1 +OLLA 0 1 0 1 +OLL 0 1 1 0 +OKAY 0 1 1 0 +OIL 2 1 3 2 +OGRES 0 1 0 1 +OGRE'S 0 1 1 0 +OFURUS 0 1 0 1 +OFFICIALS 0 1 0 1 +OFFICES 0 1 1 0 +OFFICER 2 1 3 2 +OFFICE 3 1 4 3 +OFFENSE 0 1 1 0 +OFFENDLY 0 1 0 1 +OFFEND 1 1 2 1 +OFFENCE 2 1 2 3 +OFFEN 0 1 1 0 +OCHIAN 0 1 0 1 +OCCASIONS 1 1 2 1 +OCCASION 4 1 5 4 +OBOCOCK 0 1 1 0 +OBLIGED 3 1 3 4 +OBLIGE 0 1 1 0 +OBEYED 2 1 3 2 +OBEY 1 1 1 2 +OBEK 0 1 0 1 +OARMEIL 0 1 0 1 +O'NIGHTS 0 1 1 0 +O'NEILL 0 1 1 0 +O'ER 0 1 0 1 +NYTOUCH 0 1 1 0 +NUTS 0 1 1 0 +NUTRITION 1 1 2 1 +NURED 0 1 0 1 +NUNS 2 1 3 2 +NUN'S 0 1 0 1 +NUMA 0 1 0 1 +NU'UMAN 0 1 1 0 +NOWT 0 1 1 0 +NOTTINGHAM 0 1 1 0 +NOTE 3 1 3 4 +NORTHEAST 0 1 0 1 +NOPE 0 1 1 0 +NONETHELESS 0 1 1 0 +NOBLE 5 1 6 5 +NIPPER 0 1 1 0 +NIKOLAY 0 1 1 0 +NIKOLA 0 1 0 1 +NIGHT'S 1 1 1 2 +NIGH 1 1 1 2 +NICO 0 1 1 0 +NICES 0 1 0 1 +NICE 3 1 3 4 +NIBBER 0 1 0 1 +NEXTER 0 1 1 0 +NEXT 10 1 10 11 +NEWSING 0 1 0 1 +NEWBERG 1 1 2 1 +NEVER 60 1 61 60 +NERVE 0 1 1 0 +NEOSHO 0 1 1 0 +NEOSHIL 0 1 0 1 +NELLO 0 1 0 1 +NEIL 0 1 0 1 +NEIGHBOURING 0 1 1 0 +NEIGHBORING 2 1 2 3 +NEEDS 8 1 8 9 +NED 3 1 3 4 +NEARING 1 1 1 2 +NEARER 3 1 4 3 +NAUGHTY 0 1 0 1 +NAUGHT 0 1 1 0 +NAUCOTIC 0 1 0 1 +NATION 2 1 2 3 +NARRATIVES 1 1 2 1 +NARRATIVE 1 1 1 2 +NARRATE 0 1 1 0 +NARCOTIC 0 1 1 0 +NAP 0 1 0 1 +NANDY'S 0 1 1 0 +NACO 0 1 0 1 +NABRAMIN 0 1 0 1 +N 0 1 1 0 +MYSTERY 0 1 0 1 +MYSTERIOUS 5 1 6 5 +MYSELF 27 1 27 28 +MYRTULAS 0 1 0 1 +MUSTACHES 0 1 1 0 +MURDOCH'S 0 1 1 0 +MURDERER 0 1 0 1 +MURDER 4 1 5 4 +MULES 0 1 0 1 +MUIR 0 1 1 0 +MUG 0 1 1 0 +MUDDY 0 1 1 0 +MUD 0 1 1 0 +MOWER 0 1 1 0 +MOVES 0 1 0 1 +MOVEMENT 1 1 2 1 +MOUTHWHAT 0 1 1 0 +MOUTHS 0 1 1 0 +MOUTHLY 0 1 0 1 +MOUTH 7 1 7 8 +MOUSTACHES 0 1 0 1 +MOURNING 2 1 3 2 +MOUNTNORRIS 0 1 1 0 +MOTOR 0 1 1 0 +MOTIONLESS 0 1 1 0 +MOTHS 0 1 0 1 +MOTHERS 5 1 5 6 +MOTHER 51 1 52 51 +MOTAR 0 1 0 1 +MORTIS 0 1 1 0 +MORTIFICATIONTHAT 0 1 1 0 +MORTIFICATION 0 1 0 1 +MORTEM 0 1 1 0 +MORTARS 0 1 0 1 +MORTAL 1 1 1 2 +MORNING 21 1 21 22 +MORE'N 0 1 1 0 +MORAL 7 1 8 7 +MOPED 0 1 1 0 +MOOR 2 1 3 2 +MOONLIGHT 2 1 3 2 +MONTORIS 0 1 0 1 +MONTHLY 0 1 1 0 +MONTH'S 0 1 0 1 +MONTH 1 1 1 2 +MONSIEUR 0 1 0 1 +MONSEIGNEUR 5 1 6 5 +MONKERS 0 1 1 0 +MONEY 16 1 16 17 +MOMMOL 0 1 1 0 +MOMENT 24 1 24 25 +MOLLY 0 1 0 1 +MOLASTIC 0 1 0 1 +MODER 0 1 0 1 +MOCKERS 0 1 0 1 +MO 0 1 1 0 +MISTAKES 0 1 0 1 +MISSED 5 1 5 6 +MISCHIENNE 0 1 0 1 +MINUTELY 0 1 1 0 +MINSER 0 1 0 1 +MINOR 1 1 1 2 +MINISTER 6 1 7 6 +MINIONETTE 0 1 1 0 +MINCE 0 1 1 0 +MIMICK 0 1 1 0 +MIMIC 0 1 0 1 +MILLY 0 1 1 0 +MILLSTONE 0 1 0 1 +MILLSTON 0 1 1 0 +MILLSON'S 0 1 0 1 +MILLICENT 0 1 0 1 +MILL 0 1 1 0 +MILKED 0 1 0 1 +MILICENT'S 0 1 1 0 +MILES 6 1 6 7 +MILE 4 1 5 4 +MILDEWED 0 1 1 0 +MILDED 0 1 0 1 +MILAD 0 1 0 1 +MIKE 2 1 2 3 +MIHI 0 1 1 0 +MIELSON 0 1 0 1 +MIDSRSANY 0 1 0 1 +MIDRIFTS 0 1 0 1 +MIDRIFF 0 1 1 0 +MIDDY 0 1 1 0 +MIDDLING 0 1 1 0 +MIDDLIN 0 1 0 1 +MICE 0 1 0 1 +METHINKETH 0 1 1 0 +METHINK 0 1 0 1 +METALS 0 1 0 1 +MESTINE'S 0 1 0 1 +MESTIENNE'S 0 1 1 0 +MESSES 0 1 1 0 +MESELF 0 1 1 0 +MERTULIST 0 1 0 1 +MERTON 0 1 0 1 +MERRYMAKING 0 1 0 1 +MERRY 2 1 3 2 +MERNEPTAH 0 1 1 0 +MERLUNUS 0 1 0 1 +MERLINUS 0 1 0 1 +MERIT 2 1 3 2 +MERELY 6 1 6 7 +MERCILESS 0 1 0 1 +MERCER 0 1 0 1 +MENDIAN 0 1 0 1 +MEND 0 1 1 0 +MEMORANE 0 1 0 1 +MEMBRANE 0 1 1 0 +MELLICENT 0 1 0 1 +MEDICAL 1 1 2 1 +MEDICA 0 1 0 1 +MEDIAN 1 1 1 2 +MEDALS 0 1 1 0 +MEASURE 0 1 0 1 +MEANTUS 0 1 0 1 +MEANTIME 2 1 2 3 +MEANTES 0 1 0 1 +MEANT 10 1 10 11 +MEANS 23 1 23 24 +MEAN 19 1 20 19 +MAYOR 1 1 1 2 +MAXIMMUNITION 0 1 0 1 +MATVE 0 1 0 1 +MATURITY 0 1 0 1 +MATTERS 3 1 4 3 +MATTER 21 1 22 21 +MATRON 0 1 0 1 +MATHER 0 1 0 1 +MATEY 0 1 1 0 +MATERIORS 0 1 0 1 +MATERIALS 1 1 2 1 +MASTERY 1 1 1 2 +MASSES 1 1 1 2 +MASSED 0 1 0 1 +MASSA 0 1 0 1 +MASKED 0 1 1 0 +MARY'S 2 1 2 3 +MARVELLED 0 1 0 1 +MARVELL 0 1 0 1 +MARVELED 0 1 1 0 +MARTIN 0 1 1 0 +MARTIAN 2 1 2 3 +MARTIAL 0 1 0 1 +MARSPEAKER 0 1 1 0 +MARSH 0 1 1 0 +MARS 3 1 3 4 +MARNETTE 0 1 0 1 +MARMALADES 1 1 2 1 +MARKEE 0 1 0 1 +MARK 4 1 4 5 +MARGARET'S 2 1 3 2 +MARGARET 14 1 14 15 +MARE 0 1 1 0 +MARDOCK'S 0 1 0 1 +MARBYLON 0 1 0 1 +MARBIN 0 1 0 1 +MAR 1 1 1 2 +MANTLE 0 1 0 1 +MANTIL 0 1 0 1 +MANOT 0 1 0 1 +MANNERS 0 1 0 1 +MANNER 8 1 9 8 +MANCATEO 0 1 0 1 +MANASSEH 0 1 1 0 +MAMMY 0 1 0 1 +MAMMA 1 1 1 2 +MAMIE 0 1 1 0 +MALNUTRITION 0 1 1 0 +MALLETRICIAN 0 1 0 1 +MALE 0 1 0 1 +MAKING 17 1 18 17 +MAKES 10 1 11 10 +MAJORITY 5 1 6 5 +MAJOR 4 1 5 4 +MAINE 0 1 1 0 +MAIN 1 1 1 2 +MAIL 0 1 1 0 +MADAMELY 0 1 0 1 +MAD 1 1 2 1 +MAC 0 1 0 1 +MABILLON 0 1 1 0 +LYSIMACHUS 0 1 1 0 +LYING 2 1 2 3 +LUNE'S 0 1 0 1 +LUNA'S 2 1 3 2 +LUKE 3 1 4 3 +LUCRATIVE 0 1 1 0 +LUCK 2 1 3 2 +LUCIKAM 0 1 0 1 +LUCIAN 0 1 0 1 +LOWERING 2 1 3 2 +LOWER 2 1 3 2 +LOWBORNE 0 1 0 1 +LOVELY 3 1 3 4 +LOUIS 3 1 3 4 +LOT 6 1 6 7 +LOSS 2 1 3 2 +LORING 0 1 0 1 +LORDAIL 0 1 0 1 +LORD'S 0 1 1 0 +LOOKS 5 1 5 6 +LOOKOUT 0 1 1 0 +LOOKING 20 1 21 20 +LONGER 16 1 16 17 +LONESOMENESS 0 1 1 0 +LONESOME 1 1 1 2 +LONE 0 1 1 0 +LOGS 0 1 0 1 +LOG 0 1 0 1 +LOCKS 0 1 1 0 +LOCK 3 1 4 3 +LOBSURD 0 1 0 1 +LOBSTERN 0 1 0 1 +LOBSTERBOAT 0 1 0 1 +LOBS 0 1 0 1 +LOAD 0 1 0 1 +LIZZILY 0 1 0 1 +LIZZIE 0 1 0 1 +LIZABETH 0 1 1 0 +LIVELY 1 1 2 1 +LIVELONG 0 1 1 0 +LITZ 0 1 0 1 +LITERN 0 1 0 1 +LISTEN 11 1 11 12 +LISPIN 0 1 0 1 +LISIMACUS 0 1 0 1 +LIPS 5 1 6 5 +LINENSHIPS 0 1 0 1 +LINEN 2 1 2 3 +LIMETERY 0 1 0 1 +LIME 0 1 1 0 +LILY 0 1 0 1 +LILLBURN 0 1 0 1 +LILBOURNE 0 1 0 1 +LIKELY 2 1 3 2 +LIGHTLY 0 1 1 0 +LIGHTFOOTED 0 1 0 1 +LIGHT 18 1 19 18 +LIEUTENANT 0 1 1 0 +LIES 3 1 4 3 +LIEGE 0 1 0 1 +LIDNESLEY 0 1 0 1 +LIDA 0 1 0 1 +LIAISON 0 1 1 0 +LEXINGTON 0 1 1 0 +LEWIS 0 1 1 0 +LEWINA 0 1 0 1 +LEVITICUS 0 1 1 0 +LEVITCH 0 1 0 1 +LEVIKUS 0 1 0 1 +LEVELLY 0 1 0 1 +LETTERS 6 1 7 6 +LETS 0 1 1 0 +LET'S 4 1 4 5 +LEST 2 1 3 2 +LESSINGTON 0 1 0 1 +LESSEN 1 1 2 1 +LENOIR 0 1 1 0 +LENIN 1 1 2 1 +LEMON 0 1 1 0 +LEGS 2 1 3 2 +LEGION 0 1 0 1 +LEGERSHIP 0 1 0 1 +LEER 0 1 1 0 +LEECHES 0 1 1 0 +LEE 0 1 0 1 +LECTION 0 1 0 1 +LEAVED 1 1 2 1 +LEARNED 6 1 6 7 +LEARN 4 1 5 4 +LEAR 0 1 0 1 +LEADPENCIL 0 1 1 0 +LEADERSHIP 0 1 1 0 +LEADERS 1 1 2 1 +LEADER'S 0 1 0 1 +LEAD 2 1 2 3 +LAYS 0 1 0 1 +LAWS 3 1 3 4 +LAWN 0 1 0 1 +LAURDALE 0 1 0 1 +LAUGHTER 3 1 3 4 +LATINUE 0 1 0 1 +LATH 0 1 1 0 +LATELY 1 1 2 1 +LASH 1 1 2 1 +LAS 0 1 0 1 +LARKINS 0 1 0 1 +LARKIN'S 0 1 1 0 +LARD 0 1 0 1 +LAP 2 1 2 3 +LANE 2 1 2 3 +LANDY 0 1 0 1 +LANDOWNERS 1 1 1 2 +LANDI 0 1 1 0 +LANDED 2 1 3 2 +LAMPS 0 1 0 1 +LAMPLIT 0 1 1 0 +LAMPLET 0 1 0 1 +LAMBS 0 1 1 0 +LAKE 5 1 6 5 +LAIN 1 1 2 1 +LAG 0 1 0 1 +LADY'S 1 1 1 2 +LADS 1 1 2 1 +LACK 2 1 2 3 +LACHRYMA 0 1 1 0 +LACHES 0 1 0 1 +LABOURING 0 1 0 1 +LABOURERS 0 1 1 0 +LABOUR 0 1 1 0 +LABORS 0 1 0 1 +LABORING 1 1 2 1 +LABORERS 0 1 0 1 +LABOR 1 1 1 2 +L 3 1 4 3 +KYNDER 0 1 0 1 +KUMAK 0 1 0 1 +KNOWN 10 1 10 11 +KNOTTY 0 1 1 0 +KNOTS 0 1 0 1 +KNOT 2 1 3 2 +KNOBLY 0 1 0 1 +KNOBBLY 0 1 1 0 +KNIGHTSAGE 0 1 0 1 +KNIGHTS 1 1 2 1 +KNIGHT'S 0 1 1 0 +KNEW 16 1 16 17 +KLEPTOMANIAC 0 1 1 0 +KLEPTOMANIA 0 1 1 0 +KITE 1 1 2 1 +KISSED 5 1 6 5 +KISS 2 1 2 3 +KIRSTALL 0 1 0 1 +KINDRED 1 1 1 2 +KINDNESS 7 1 7 8 +KINDER 1 1 2 1 +KILLS 0 1 1 0 +KILL 14 1 14 15 +KIDAM 0 1 0 1 +KICKY 0 1 0 1 +KICK 0 1 1 0 +KI 0 1 0 1 +KEY 5 1 5 6 +KETTLET 0 1 0 1 +KERSTALL 0 1 1 0 +KENN 0 1 0 1 +KENITE 0 1 0 1 +KEEPING 5 1 5 6 +KEEN 1 1 2 1 +KEDEM 0 1 1 0 +KANSA 0 1 0 1 +KANS 0 1 0 1 +KANITE 0 1 0 1 +KAMAR 0 1 1 0 +JUSTIFIED 1 1 2 1 +JURY 3 1 4 3 +JUROR 0 1 0 1 +JULIET 0 1 0 1 +JULIE 0 1 0 1 +JUICES 0 1 1 0 +JUG 3 1 4 3 +JUDICINES 0 1 0 1 +JUDAH 1 1 2 1 +JUBAUN'S 0 1 0 1 +JOUVIN'S 0 1 1 0 +JOURED 0 1 0 1 +JOSHUA 0 1 1 0 +JOKINGLY 0 1 1 0 +JOKERS 0 1 0 1 +JOESH 0 1 0 1 +JOCELYN'S 0 1 1 0 +JOCELYN 0 1 1 0 +JIST 0 1 0 1 +JIS 0 1 1 0 +JIM 4 1 5 4 +JILT 0 1 1 0 +JI 0 1 0 1 +JEWISH 1 1 2 1 +JEWELLER 0 1 0 1 +JETS 0 1 0 1 +JESTER 1 1 2 1 +JEST 0 1 0 1 +JENSONIST 0 1 0 1 +JEHOASH 0 1 1 0 +JEERED 0 1 1 0 +JEDGE 0 1 1 0 +JEALOUS 0 1 0 1 +JAS 0 1 0 1 +JAPLIN 0 1 0 1 +JANSHIRE 0 1 0 1 +JANSENIST 0 1 1 0 +JANEERO 0 1 1 0 +JANE 6 1 7 6 +JAMS 1 1 2 1 +JAMMED 0 1 0 1 +JAMIES 0 1 0 1 +JAMES 3 1 3 4 +JAME 0 1 0 1 +JAKIE 0 1 0 1 +JAKI 0 1 0 1 +JAKEY'S 0 1 1 0 +JAGIMENT 0 1 0 1 +JAGGY 0 1 0 1 +JACKSON 3 1 4 3 +JACKMEN 0 1 0 1 +JACKMAN 0 1 1 0 +JACK'S 0 1 0 1 +J 1 1 2 1 +IZZY'S 0 1 1 0 +IZZIE 0 1 0 1 +IVANOVITCH 4 1 5 4 +ITWAIN 0 1 0 1 +IT'LL 1 1 1 2 +ISSUE 1 1 2 1 +ISRAITS 0 1 0 1 +ISRAELITES 0 1 1 0 +ISRAEL'S 1 1 2 1 +ISRAEL 7 1 7 8 +ISLAMIZED 0 1 0 1 +ISLAMISED 0 1 1 0 +ISLAM 0 1 1 0 +ISIS 0 1 0 1 +IRRESPONSIBLE 0 1 0 1 +IRRESCELLING 0 1 0 1 +IRONICAL 0 1 1 0 +IRONIC 0 1 0 1 +IPES 0 1 0 1 +INWARD 0 1 1 0 +INVALIDES 0 1 1 0 +INVALID 0 1 0 1 +INTOMATO 0 1 0 1 +INTOLERABLE 1 1 1 2 +INTERNAL 2 1 3 2 +INTERFERE 0 1 1 0 +INTERESTING 6 1 6 7 +INTENTION 4 1 4 5 +INTENSE 3 1 3 4 +INTENDED 4 1 5 4 +INTELLECTUALLY 0 1 1 0 +INTELLECTUAL 0 1 0 1 +INSTRUCTIVE 0 1 0 1 +INSTRUCTED 0 1 1 0 +INSTEAD 4 1 4 5 +INSTANT'S 0 1 1 0 +INSTANCE 1 1 1 2 +INSPIRANTS 0 1 0 1 +INSISTS 0 1 0 1 +INSIST 0 1 1 0 +INSIDE 4 1 4 5 +INQUIRE 1 1 2 1 +INNES 0 1 1 0 +INJURE 2 1 3 2 +INGREDIAN 0 1 0 1 +INGENUOUSLY 0 1 0 1 +INGENIOUSLY 0 1 1 0 +INFORT 0 1 0 1 +INFLUENCED 0 1 1 0 +INFERIOR 2 1 3 2 +INFERE 0 1 0 1 +INFAMYLON 0 1 0 1 +INFAMOUS 0 1 1 0 +INELEGANTLY 0 1 1 0 +INDUCE 0 1 0 1 +INCREASES 2 1 2 3 +INCREASE 4 1 5 4 +INCORRECT 0 1 1 0 +INCOMPARABLE 0 1 1 0 +INCLINE 1 1 1 2 +INCLINATIONS 0 1 0 1 +INCLINATION 0 1 1 0 +INCES 0 1 0 1 +IMPROVISED 1 1 2 1 +IMPROVED 0 1 1 0 +IMPRESSORS 0 1 0 1 +IMPLY 1 1 2 1 +IMPALIATE 0 1 0 1 +IMMENSE 2 1 3 2 +IMAGEDLY 0 1 0 1 +ILU 1 1 2 1 +ILLS 0 1 1 0 +ILL 5 1 5 6 +ILINE 0 1 0 1 +IGNORED 0 1 1 0 +IGNOMY 0 1 1 0 +IGNOMINY 0 1 0 1 +IDLEEN 0 1 0 1 +IDENTS 0 1 0 1 +IDEA 10 1 11 10 +ICES 2 1 3 2 +I'FAITH 0 1 1 0 +HYDROGLOIC 0 1 0 1 +HYDROCLOIC 0 1 0 1 +HUSBITH 0 1 0 1 +HUNTON 0 1 0 1 +HUNTERS 0 1 1 0 +HUNDREDTH 1 1 1 2 +HUNDRED 28 1 29 28 +HUMOUR 1 1 1 2 +HUMOR 0 1 1 0 +HUMBLY 0 1 1 0 +HUMANITY 1 1 1 2 +HUMANITARY 0 1 1 0 +HUH 0 1 1 0 +HUDSPETH 0 1 1 0 +HOWL 0 1 1 0 +HOWE 0 1 0 1 +HOUSEHOLD 0 1 1 0 +HOUNDY 0 1 0 1 +HOUNDED 0 1 1 0 +HOTELED 0 1 0 1 +HOTEL 4 1 4 5 +HOSPITABLY 0 1 1 0 +HORN 1 1 1 2 +HORDE 0 1 1 0 +HOPPING 0 1 1 0 +HOPPER 0 1 1 0 +HOPES 0 1 0 1 +HOO'LL 0 1 1 0 +HOO 0 1 1 0 +HONORS 1 1 2 1 +HONEYMAN 0 1 0 1 +HONESTLY 0 1 1 0 +HONEST 6 1 6 7 +HOMEPUSH 0 1 1 0 +HOLLERED 0 1 0 1 +HOLLER 0 1 1 0 +HOLE 0 1 1 0 +HOF 0 1 1 0 +HOARD 0 1 0 1 +HIT 2 1 3 2 +HISTORY 4 1 4 5 +HISSELF 0 1 1 0 +HINFELD 0 1 0 1 +HINDFELL 0 1 1 0 +HINCOUX 0 1 0 1 +HILLRY 0 1 0 1 +HILBER 0 1 0 1 +HIJAZ 0 1 1 0 +HIGHS 0 1 1 0 +HIGH 7 1 8 7 +HIES 0 1 0 1 +HIERARCHY 0 1 1 0 +HIDINGS 0 1 0 1 +HIDDEN 3 1 3 4 +HI 0 1 1 0 +HEYDAY 0 1 1 0 +HEY 1 1 1 2 +HEWN 0 1 1 0 +HERSELF 35 1 35 36 +HERODIAN 0 1 0 1 +HEROD 0 1 0 1 +HERMANN'S 0 1 0 1 +HERMAND 0 1 0 1 +HERMA 0 1 0 1 +HERIOT'S 0 1 1 0 +HERIOT 0 1 1 0 +HERETOFORE 0 1 1 0 +HERE'S 1 1 2 1 +HERBID 0 1 0 1 +HEN 0 1 1 0 +HELVIN 0 1 1 0 +HELPS 0 1 0 1 +HELP 16 1 17 16 +HELLO 0 1 1 0 +HELL 4 1 4 5 +HELEN 0 1 1 0 +HELDS 0 1 0 1 +HEIRESS'S 0 1 0 1 +HEELED 0 1 1 0 +HEEDED 0 1 1 0 +HEDGES 0 1 1 0 +HEBREW 1 1 2 1 +HEATLESS 0 1 0 1 +HEAT 0 1 1 0 +HEARTY 2 1 3 2 +HEARTIEST 0 1 1 0 +HEARSE 3 1 4 3 +HEARET'S 0 1 0 1 +HEALTH 1 1 1 2 +HEALED 1 1 1 2 +HEADS 4 1 4 5 +HEADLONG 0 1 1 0 +HE'LL 2 1 2 3 +HAZE 0 1 0 1 +HAYE'S 0 1 0 1 +HAY 1 1 1 2 +HAVING 22 1 22 23 +HAVEN 0 1 1 0 +HAUGHTINESS 0 1 1 0 +HAUGHTERSLEY 0 1 0 1 +HATTERSLEY 0 1 1 0 +HATES 2 1 3 2 +HATCHES 0 1 0 1 +HASTE 5 1 5 6 +HASHY 0 1 0 1 +HASAN 1 1 2 1 +HARVEY'SWHICH 0 1 1 0 +HARVEST 3 1 3 4 +HARRISONVILLE 1 1 2 1 +HARRISON 0 1 0 1 +HARRIS 1 1 2 1 +HARRIET 0 1 0 1 +HAROLD 0 1 1 0 +HARMONT'S 0 1 0 1 +HARMON'S 0 1 0 1 +HARMON 0 1 0 1 +HARK 0 1 1 0 +HAREMON 0 1 0 1 +HARE 0 1 1 0 +HARDWORTH 0 1 0 1 +HARDWARE 0 1 1 0 +HARDLY 9 1 10 9 +HARDIEST 0 1 0 1 +HARBOUR 0 1 0 1 +HARBOR 0 1 1 0 +HAPPY 7 1 7 8 +HAPPILY 1 1 1 2 +HAPPEN 3 1 3 4 +HAPLY 0 1 1 0 +HANNER 0 1 0 1 +HANDLES 0 1 0 1 +HAM 0 1 0 1 +HALVES 1 1 1 2 +HALLO 0 1 0 1 +HALEY'S 0 1 1 0 +HAIL 1 1 1 2 +HADN'T 1 1 1 2 +HADDA 0 1 1 0 +GUY 1 1 1 2 +GUV'NER 0 1 0 1 +GUTTER 0 1 0 1 +GURSER 0 1 0 1 +GUQUET 0 1 0 1 +GUNS 2 1 3 2 +GUNNER 0 1 0 1 +GUNDRON 0 1 0 1 +GUNDER 0 1 0 1 +GULLET 0 1 1 0 +GUIRUN'S 0 1 1 0 +GUINEAS 0 1 0 1 +GUINEA 2 1 3 2 +GUIDE 2 1 3 2 +GUESS 5 1 5 6 +GUDRUN 0 1 1 0 +GUARDING 0 1 0 1 +GUARD 1 1 1 2 +GRUMBLINGLY 0 1 1 0 +GRUFFLY 0 1 1 0 +GROWING 2 1 3 2 +GROW 1 1 1 2 +GROVE 0 1 1 0 +GROUND 6 1 6 7 +GROTTO 0 1 1 0 +GROOM 0 1 1 0 +GRIS 0 1 0 1 +GRINNING 2 1 3 2 +GRINNIE 0 1 0 1 +GRINDING 0 1 0 1 +GRIMES 0 1 0 1 +GRIBIER 0 1 1 0 +GREW 4 1 4 5 +GREEBS 0 1 0 1 +GREAVES 0 1 1 0 +GREAT 39 1 40 39 +GRATITUDE 4 1 5 4 +GRASPS 0 1 1 0 +GRASPED 1 1 1 2 +GRAPPLE 0 1 1 0 +GRANTPLE 0 1 0 1 +GRANDPAPAZZARD 0 1 0 1 +GRANDPAP 2 1 3 2 +GRANDMOTHER 0 1 0 1 +GRANDAME 0 1 1 0 +GRAND 5 1 5 6 +GRAMMATIUS 0 1 0 1 +GRAMMATEUS 0 1 1 0 +GRAM 0 1 1 0 +GOVERNOR 2 1 2 3 +GOVERNMENTS 2 1 3 2 +GOVERN 0 1 0 1 +GOSPIR 0 1 0 1 +GOSLER 0 1 1 0 +GORGE 0 1 0 1 +GORDON'S 2 1 2 3 +GORDON 20 1 21 20 +GOODS 4 1 5 4 +GONDEN 0 1 0 1 +GOLDS 0 1 0 1 +GOLDFISH 0 1 1 0 +GOLDEN 2 1 3 2 +GOFF 0 1 0 1 +GOES 6 1 7 6 +GODS 0 1 0 1 +GODEBILLIOS 0 1 1 0 +GOD'S 2 1 3 2 +GOBY'S 0 1 0 1 +GOBIES 0 1 0 1 +GOBIAS 0 1 0 1 +GNOLD 0 1 0 1 +GNARLED 0 1 1 0 +GLOOMY 1 1 2 1 +GLISPIN'S 0 1 1 0 +GLISBON'S 0 1 0 1 +GLISBON 0 1 0 1 +GLAY 0 1 0 1 +GLANCE 3 1 3 4 +GLADNESSED 0 1 0 1 +GLADDENEST 0 1 1 0 +GLAD 4 1 5 4 +GIRTHING 0 1 1 0 +GIRTHED 0 1 1 0 +GIRLS 5 1 6 5 +GIRK 0 1 0 1 +GIRDS 0 1 1 0 +GIRDING 0 1 0 1 +GIRDED 0 1 0 1 +GILROY 0 1 1 0 +GIGGS 0 1 0 1 +GIFTS 0 1 1 0 +GEVINOVITCH 0 1 0 1 +GESTURE 1 1 1 2 +GERT 0 1 0 1 +GERMAN 7 1 7 8 +GER 0 1 0 1 +GEORGO 0 1 0 1 +GEORGIUM 0 1 0 1 +GEORGIO 0 1 0 1 +GEORGIA 0 1 1 0 +GEORGE'SWHICH 0 1 1 0 +GENTLEMEN'S 0 1 1 0 +GENTLEMAN'S 0 1 0 1 +GENISH 0 1 0 1 +GENEROUS 1 1 2 1 +GENERO 0 1 0 1 +GENERALS 0 1 0 1 +GENERAL 6 1 7 6 +GEAR 1 1 2 1 +GAZE 3 1 4 3 +GAVE 32 1 32 33 +GAUTHIER 0 1 1 0 +GATHIERRE 0 1 0 1 +GASTRATO 0 1 0 1 +GASHED 0 1 1 0 +GASH 0 1 0 1 +GARR 0 1 0 1 +GARDENS 0 1 0 1 +GARDEN'S 0 1 1 0 +GAMMON 0 1 1 0 +GAMIN 0 1 0 1 +GAME 6 1 6 7 +GALLATIN 0 1 1 0 +GAL 0 1 0 1 +GABLE 0 1 1 0 +G'YIRLS 0 1 1 0 +FUZZ 0 1 1 0 +FUZ 0 1 0 1 +FUVENT 0 1 0 1 +FUTURE 3 1 3 4 +FUSSION 0 1 0 1 +FURZE 0 1 1 0 +FUNDEMENT 0 1 0 1 +FUN 2 1 2 3 +FUGITIVES 0 1 1 0 +FUGITIVE 0 1 0 1 +FUCHELEVENT 0 1 0 1 +FRY 1 1 1 2 +FROZE 0 1 1 0 +FROWNED 0 1 1 0 +FRONTIERS 1 1 2 1 +FRONTIER 0 1 0 1 +FRONT 12 1 13 12 +FROGS 2 1 2 3 +FROG'S 0 1 1 0 +FROG 0 1 0 1 +FRO 1 1 2 1 +FRISTOWS 0 1 0 1 +FRISTOE'S 0 1 1 0 +FRIGHTFUL 3 1 4 3 +FRIENDLY 4 1 4 5 +FRIAR 0 1 1 0 +FRESHMENT 0 1 0 1 +FRENCH 5 1 5 6 +FREEZE 0 1 0 1 +FREEWAY 0 1 1 0 +FREES 0 1 1 0 +FREEDOM 5 1 6 5 +FREE 12 1 12 13 +FRANC 0 1 1 0 +FOXTER 0 1 0 1 +FOURTEENTHAT'S 0 1 1 0 +FOURTEEN 3 1 3 4 +FOURCHELEVENT 0 1 0 1 +FOUNDED 1 1 2 1 +FOUGHT 1 1 2 1 +FOSTER 2 1 3 2 +FORWARD 4 1 5 4 +FORTS 0 1 1 0 +FORTNIGHT 0 1 1 0 +FORTNESS 0 1 0 1 +FORMED 1 1 2 1 +FORGIVES 0 1 0 1 +FORGATHERED 0 1 0 1 +FOREMOTHER 0 1 0 1 +FOREMAN 0 1 1 0 +FOREIGN 6 1 6 7 +FOREGATHERED 0 1 1 0 +FORCHELEVENT 0 1 0 1 +FORCEMENT 0 1 0 1 +FORCED 3 1 4 3 +FORCE 7 1 7 8 +FOOTS 0 1 0 1 +FOOTED 0 1 1 0 +FOOT 1 1 1 2 +FOOLING 0 1 0 1 +FOOD 3 1 4 3 +FONTREVAL 0 1 0 1 +FONTEVRAULT 0 1 1 0 +FOLLOWS 5 1 6 5 +FOLK 4 1 4 5 +FOLDS 1 1 1 2 +FOE 0 1 1 0 +FOCIN 0 1 0 1 +FLYING 0 1 1 0 +FLUTTERING 1 1 2 1 +FLUTTERED 0 1 0 1 +FLUD 0 1 0 1 +FLUCHELEVENT 0 1 0 1 +FLOWERBEDS 0 1 1 0 +FLOWER 1 1 1 2 +FLOSSY 0 1 1 0 +FLOSSIE 0 1 0 1 +FLOORBOARDS 0 1 1 0 +FLOOR 3 1 3 4 +FLOCKS 0 1 1 0 +FLITTON 0 1 0 1 +FLIES 0 1 0 1 +FLEW 0 1 1 0 +FLAVOUR 0 1 0 1 +FLAVAGASTED 0 1 0 1 +FLAUNT 0 1 0 1 +FLATTERER 0 1 1 0 +FLATTERED 0 1 1 0 +FLATHER 0 1 0 1 +FLATHEADS 0 1 1 0 +FLAT 0 1 0 1 +FLASHLIGHT 0 1 1 0 +FLARE 0 1 0 1 +FLAP 0 1 0 1 +FLABBERGASTED 0 1 1 0 +FITZ 0 1 0 1 +FITTING 1 1 2 1 +FISHING 2 1 3 2 +FISHIN' 0 1 0 1 +FISHED 1 1 2 1 +FIRSTLY 0 1 0 1 +FIRSTER 0 1 1 0 +FIRS 0 1 0 1 +FIRE 14 1 15 14 +FIR 0 1 0 1 +FINNICAL 0 1 0 1 +FINNEY 1 1 2 1 +FINNELL 0 1 0 1 +FINISHED 3 1 3 4 +FINICAL 0 1 1 0 +FINGERING 1 1 1 2 +FINGER 5 1 6 5 +FINELY 1 1 2 1 +FINDING 7 1 8 7 +FINALLY 6 1 6 7 +FILTRATES 0 1 1 0 +FILTRATE 0 1 1 0 +FILLS 1 1 1 2 +FILLIP 0 1 0 1 +FILIALLY 0 1 0 1 +FIGURED 0 1 0 1 +FIGGER 0 1 1 0 +FIENDS 0 1 1 0 +FIEND 0 1 0 1 +FIDD 0 1 0 1 +FICTION 0 1 0 1 +FIACUS 0 1 0 1 +FIACRE 0 1 1 0 +FESTIVE 0 1 1 0 +FESTIVATIONS 0 1 0 1 +FERRINDER 0 1 0 1 +FER 0 1 0 1 +FELT 18 1 19 18 +FEET 9 1 9 10 +FEELS 1 1 2 1 +FEEDS 0 1 1 0 +FEE 0 1 1 0 +FEDERATE 0 1 0 1 +FEATS 0 1 0 1 +FEAJANCOTT 0 1 0 1 +FAWN 0 1 0 1 +FAVOURABLE 0 1 0 1 +FAVORABLE 0 1 1 0 +FAULTS 2 1 2 3 +FAULT 5 1 5 6 +FAUCES 0 1 1 0 +FATTY 0 1 1 0 +FATS 1 1 2 1 +FATIGMATIS 0 1 0 1 +FATHERS 1 1 1 2 +FATHERLAND 0 1 0 1 +FATHER'S 6 1 7 6 +FATE 5 1 5 6 +FATAL 1 1 2 1 +FASHIONEVENT 0 1 0 1 +FARRENDER 0 1 0 1 +FARE 0 1 1 0 +FANSTILL 0 1 0 1 +FANGED 0 1 1 0 +FAN 2 1 3 2 +FAMOUS 1 1 1 2 +FAME 2 1 2 3 +FAM'LY 0 1 1 0 +FALLS 0 1 0 1 +FALLING 5 1 6 5 +FAIRLY'S 0 1 0 1 +FAILING 2 1 3 2 +FAILED 7 1 8 7 +FAIL 2 1 3 2 +FAFTENER'S 0 1 0 1 +FAFNER'S 0 1 0 1 +FAFNER 0 1 0 1 +FAFFNER 0 1 0 1 +FACTIVE 0 1 0 1 +FACED 2 1 2 3 +EYED 4 1 4 5 +EXUDY 0 1 0 1 +EXTRAORDINARY 1 1 2 1 +EXTRAORDINARILY 0 1 0 1 +EXTRACTED 0 1 0 1 +EXTRACT 2 1 3 2 +EXTRA 1 1 2 1 +EXTEND 0 1 1 0 +EXTEMPORIZED 0 1 1 0 +EXTEMPORISED 0 1 0 1 +EXPOSED 3 1 3 4 +EXPOSE 2 1 3 2 +EXPLOITING 0 1 1 0 +EXPLODING 0 1 0 1 +EXPLANATION 1 1 1 2 +EXPLAINED 4 1 4 5 +EXPIATION 0 1 1 0 +EXPECTED 7 1 8 7 +EXERT 1 1 1 2 +EXECUTORY 0 1 0 1 +EXECUTE 1 1 1 2 +EXCLAIMED 14 1 15 14 +EXCITING 0 1 1 0 +EXCEPT 11 1 11 12 +EXAMINING 2 1 3 2 +EXAMINED 3 1 3 4 +EXAMINATION 2 1 3 2 +EXACTLY 9 1 9 10 +EXACKLY 0 1 1 0 +EVILISED 0 1 0 1 +EVIL 4 1 5 4 +EVERYONE 0 1 1 0 +EVERYBODY'S 0 1 0 1 +EVERY 38 1 38 39 +EVERGREWING 0 1 0 1 +EVE 2 1 2 3 +EV'YBODY'S 0 1 1 0 +EUSCIBIUS 0 1 0 1 +EURE 0 1 0 1 +EUPHRATES 0 1 1 0 +EUPHRANOR 0 1 1 0 +EUPHRANER 0 1 0 1 +EUPHADIS 0 1 0 1 +EUNUCHS 0 1 0 1 +EUNUCH 10 1 11 10 +EUGEUM 0 1 0 1 +ETHER 2 1 3 2 +ETHEL 0 1 0 1 +ET 2 1 3 2 +ESTRAVA 0 1 0 1 +ESTATES 1 1 1 2 +ESPECIALTY 0 1 0 1 +ESCAPED 1 1 1 2 +ERRATIC 0 1 1 0 +ERON 0 1 0 1 +ERNESTON 0 1 0 1 +ERNESTINE 0 1 1 0 +ERE'S 0 1 1 0 +EQUITY'S 0 1 0 1 +EQUERRY'S 0 1 1 0 +EPIMORPHONE 0 1 0 1 +EPHRAIM 0 1 1 0 +EP 0 1 0 1 +ENTRUSTED 0 1 1 0 +ENTR'ACTE 0 1 1 0 +ENSUED 2 1 3 2 +ENSNARES 0 1 1 0 +ENSLAVED 2 1 3 2 +ENSLAVE 1 1 1 2 +ENRAGED 0 1 1 0 +ENRAGE 0 1 0 1 +ENJOYED 0 1 0 1 +ENJOY 3 1 4 3 +ENGLISH 4 1 4 5 +ENFRANCHISEMENT 0 1 1 0 +ENFORCEMENT 0 1 1 0 +ENEMY 3 1 3 4 +ENDURETH 0 1 1 0 +ENDURED 0 1 0 1 +ENDS 0 1 0 1 +ENDOWED 0 1 0 1 +ENCRONTISEMENT 0 1 0 1 +ENCHANTING 0 1 0 1 +ENCAMP 0 1 0 1 +EMETIC 0 1 1 0 +EMBRUN 0 1 1 0 +ELYGIANCE 0 1 0 1 +ELL 0 1 0 1 +ELIZABETH 1 1 1 2 +ELISIONS 0 1 1 0 +ELEVANT 0 1 0 1 +ELEMENTARY 0 1 0 1 +ELEGANTLY 0 1 0 1 +ELECTED 1 1 2 1 +ELE 0 1 0 1 +ELDER 1 1 2 1 +ELBOWS 1 1 1 2 +ELBOW 0 1 1 0 +ELBERT 0 1 1 0 +ELASTIC 1 1 2 1 +EIGHTHS 0 1 0 1 +EIGHT 9 1 9 10 +EGGS 1 1 2 1 +EFFLARIDE 0 1 0 1 +EELS 0 1 1 0 +EDISM 0 1 0 1 +EDGING 2 1 3 2 +EDGED 0 1 0 1 +ECONOMIC 0 1 1 0 +ECHOED 0 1 1 0 +EBSENTEE 0 1 0 1 +EAU 0 1 1 0 +EATS 0 1 0 1 +EASY 8 1 9 8 +EASE 3 1 4 3 +EARTHLY 1 1 1 2 +EARTHCOP 0 1 0 1 +EARS 3 1 3 4 +EARNEST 8 1 9 8 +EARL'S 0 1 0 1 +EAR 1 1 2 1 +EANS 0 1 0 1 +EAD 0 1 1 0 +EACH 18 1 18 19 +E'LL 0 1 1 0 +E 1 1 2 1 +DYSINTHIAN 0 1 0 1 +DUTY 10 1 10 11 +DUSK 1 1 2 1 +DURING 20 1 20 21 +DURE 0 1 0 1 +DUR 0 1 0 1 +DUPLICATES 1 1 2 1 +DUNFAR 0 1 0 1 +DUMEN 0 1 0 1 +DUMAS 0 1 1 0 +DUM 0 1 1 0 +DUKE 11 1 12 11 +DUDS 0 1 1 0 +DU 0 1 1 0 +DRUGSTORE 0 1 1 0 +DRUG 0 1 0 1 +DROUTH 0 1 1 0 +DRIVER 0 1 0 1 +DRINKS 0 1 1 0 +DRINK 24 1 24 25 +DREAMS 0 1 0 1 +DREAM 3 1 4 3 +DREAD 3 1 3 4 +DRAWING 9 1 9 10 +DRAWERS 1 1 2 1 +DRAWER 0 1 0 1 +DRAW 1 1 2 1 +DRAUGHT 2 1 3 2 +DRATO 0 1 0 1 +DRAT 0 1 1 0 +DRAMIN 0 1 0 1 +DRAINS 0 1 1 0 +DOWNING 0 1 1 0 +DOWER 0 1 1 0 +DOUGH 0 1 0 1 +DOUBTS 1 1 2 1 +DOTH 1 1 2 1 +DOST 3 1 4 3 +DORIS 0 1 0 1 +DOOMFUL 0 1 0 1 +DONOVAN'S 0 1 1 0 +DONOVAN 1 1 2 1 +DONG 0 1 0 1 +DONALD 0 1 0 1 +DOM 0 1 1 0 +DOLOMAN'S 0 1 0 1 +DOLE 0 1 0 1 +DOIN 0 1 1 0 +DOEST 1 1 2 1 +DOESNATE 0 1 0 1 +DOCTROPOS 0 1 0 1 +DOCTOR 24 1 25 24 +DOCKYARD 0 1 0 1 +DOCK 0 1 1 0 +DOAN 0 1 1 0 +DIXFIELD 0 1 0 1 +DIVIDEST 0 1 0 1 +DIVIDED 2 1 3 2 +DITCHFIELD 0 1 1 0 +DISTRUSTED 0 1 1 0 +DISTRUDGED 0 1 0 1 +DISTRESSED 2 1 2 3 +DISTRESS 3 1 4 3 +DISTANT 4 1 5 4 +DISSENTIENT 0 1 1 0 +DISPUTE 0 1 1 0 +DISPUTABLE 0 1 0 1 +DISPOSED 2 1 3 2 +DISPOSE 0 1 0 1 +DISOUT 0 1 0 1 +DISNEY 0 1 1 0 +DISK 0 1 0 1 +DISINFECTING 0 1 1 0 +DISINFACTANT 0 1 0 1 +DISHES 6 1 7 6 +DISH 2 1 2 3 +DISFIGURED 0 1 1 0 +DISCOURSE 2 1 2 3 +DISCOUR 0 1 0 1 +DISASTROUS 0 1 1 0 +DISASTRATES 0 1 0 1 +DISAPPEAR 1 1 1 2 +DISAGREE 0 1 1 0 +DISAGRATING 0 1 0 1 +DIRTY 2 1 2 3 +DIRTS 0 1 0 1 +DIRK 1 1 2 1 +DIRECTIFY 0 1 0 1 +DINNERS 1 1 1 2 +DINERS 0 1 1 0 +DILUTE 0 1 1 0 +DIGGING 0 1 1 0 +DIGESTION 2 1 3 2 +DIGEON 0 1 0 1 +DICTATED 0 1 1 0 +DIADE 0 1 0 1 +DEVOUR 0 1 1 0 +DEVOTED 3 1 3 4 +DETERMINED 4 1 5 4 +DETECTS 0 1 0 1 +DETECTIVES 0 1 1 0 +DETECTIVE 2 1 2 3 +DETECTIN 0 1 1 0 +DETACHEMONY 0 1 0 1 +DESTRULICIAN 0 1 0 1 +DESTINIES 0 1 1 0 +DESPOTIC 0 1 1 0 +DESPITE 2 1 3 2 +DESK 1 1 2 1 +DESIRES 2 1 3 2 +DESIRE 9 1 9 10 +DESIGNED 1 1 2 1 +DESIGN 2 1 2 3 +DESCORTONA 0 1 0 1 +DESCENTS 0 1 0 1 +DERELICTS 0 1 1 0 +DEPLICATES 0 1 0 1 +DEODORIZING 0 1 1 0 +DENY 1 1 1 2 +DENIS 1 1 2 1 +DEMONICO 0 1 0 1 +DEMON 0 1 0 1 +DELUSION 1 1 2 1 +DELUDE 0 1 0 1 +DELMONICO 0 1 1 0 +DELIVER 1 1 2 1 +DELIGHTED 2 1 2 3 +DEFERENCE 0 1 1 0 +DEEPENED 0 1 1 0 +DEEP 9 1 9 10 +DEDUREUM 0 1 0 1 +DECLINED 1 1 1 2 +DECLINE 0 1 0 1 +DECLATED 0 1 0 1 +DEBTOR 0 1 1 0 +DEBT'S 0 1 0 1 +DEBT 0 1 0 1 +DEARUS 0 1 0 1 +DEANS 0 1 1 0 +DEAN 0 1 0 1 +DEADS 0 1 0 1 +DEACH 0 1 1 0 +DAY'S 1 1 1 2 +DAWNING 0 1 0 1 +DAUGHTER 10 1 10 11 +DATED 1 1 2 1 +DATA 0 1 0 1 +DARKAND 0 1 1 0 +DARED 3 1 3 4 +DAPHNE'S 0 1 1 0 +DANGLE 0 1 0 1 +DANGER 11 1 11 12 +DANCERS 0 1 0 1 +DANCER 0 1 1 0 +DAMN 0 1 1 0 +DAME'S 0 1 1 0 +DAME 1 1 1 2 +DALYS 0 1 1 0 +DALY 2 1 3 2 +DALMY 0 1 0 1 +DALEY 0 1 0 1 +DAILY 2 1 2 3 +DAILIES 0 1 0 1 +DAGOS 0 1 1 0 +DAGGERS 0 1 0 1 +DADDY 0 1 1 0 +D'YE 0 1 0 1 +CYRUP 0 1 0 1 +CYNTHIA 1 1 2 1 +CYMBALS 0 1 1 0 +CUT 11 1 12 11 +CUSTOM 1 1 2 1 +CURRENTS 0 1 1 0 +CURRANTS 0 1 0 1 +CURFYRAC 0 1 0 1 +CUPS 0 1 0 1 +CUP 0 1 1 0 +CUNNINGSBURG 0 1 0 1 +CUISINE 0 1 1 0 +CUCKOO 0 1 0 1 +CRUX 0 1 1 0 +CRUSHING 1 1 2 1 +CRUMBLY 0 1 1 0 +CRUMBLED 0 1 0 1 +CRUCIFIXION 8 1 9 8 +CROYD 0 1 0 1 +CROWN 3 1 3 4 +CROST 0 1 1 0 +CROPPISH 0 1 0 1 +CRISTO 0 1 0 1 +CRISP 0 1 0 1 +CRIME 4 1 5 4 +CRIES 3 1 4 3 +CREO 0 1 0 1 +CREEL 0 1 1 0 +CREEK 0 1 0 1 +CREDITUDE 0 1 0 1 +CREDITED 0 1 0 1 +CREDIT 4 1 5 4 +CREATES 0 1 0 1 +CRAYFISH 2 1 3 2 +CRATES 1 1 2 1 +CRATER 0 1 0 1 +CRASHING 0 1 0 1 +CRAMPLED 0 1 0 1 +CRAMBLY 0 1 0 1 +CRABS 0 1 0 1 +CRAB 6 1 7 6 +COYNESS 0 1 1 0 +COY 1 1 2 1 +COXCOMB 0 1 1 0 +COWLEY'S 0 1 1 0 +COVETTES 0 1 0 1 +COURTYARD 2 1 2 3 +COURSING 0 1 1 0 +COURSE 15 1 16 15 +COUNTS 0 1 0 1 +COUNTENANCE 2 1 2 3 +COUNSELS 0 1 1 0 +COUNSELLOR 0 1 0 1 +COUNCILLOR 0 1 1 0 +COULDN'T 7 1 7 8 +COUCH 0 1 0 1 +COSTUME 1 1 1 2 +COST 1 1 2 1 +COSEINE 0 1 0 1 +CORYDON 0 1 1 0 +CORTONA 0 1 1 0 +CORSICIAN 0 1 0 1 +CORSICAN 0 1 1 0 +CORRIER 0 1 0 1 +CORRECT 1 1 1 2 +CORNESTONES 0 1 0 1 +CORN 0 1 0 1 +CORMOR 0 1 0 1 +CORKLE 0 1 1 0 +CORKEL 0 1 0 1 +CORAL 0 1 1 0 +COQUETTE 1 1 2 1 +COPSE 0 1 0 1 +COPPER 2 1 2 3 +COP 2 1 3 2 +COOPS 0 1 1 0 +COOL 4 1 5 4 +COOKING 1 1 1 2 +CONTRAY 0 1 0 1 +CONTINUOUS 0 1 0 1 +CONTINUAL 0 1 1 0 +CONTINGENT 0 1 1 0 +CONTENDENT 0 1 0 1 +CONTEND 1 1 2 1 +CONSUM 0 1 0 1 +CONSTANT 3 1 4 3 +CONSONANTS 0 1 1 0 +CONSOMME 0 1 1 0 +CONSIST 0 1 0 1 +CONQUER 0 1 1 0 +CONINGSBURGH 0 1 1 0 +CONGEALETH 0 1 1 0 +CONFUSE 0 1 0 1 +CONFIRMATON 0 1 0 1 +CONFIRMATION 0 1 1 0 +CONFINED 0 1 0 1 +CONFINE 0 1 0 1 +CONFIDENTIALLY 0 1 1 0 +CONFIDE 1 1 2 1 +CONFICERE 0 1 1 0 +CONFESS 8 1 9 8 +CONCUR 0 1 0 1 +CONCOCTED 1 1 2 1 +CONCLUDED 2 1 2 3 +CONCERN 2 1 2 3 +CONCEALETH 0 1 0 1 +COMPOSER 0 1 0 1 +COMPEND 0 1 0 1 +COMPARABLE 0 1 0 1 +COMORIN 0 1 1 0 +COMMITTEE 5 1 6 5 +COMMENCED 1 1 2 1 +COMMANDS 2 1 2 3 +COMMANDER 2 1 2 3 +COMIN 0 1 0 1 +COMETH 0 1 1 0 +COMEST 0 1 1 0 +COMB 0 1 1 0 +COLOUR 2 1 2 3 +COLOSSEUM 0 1 1 0 +COLOR 1 1 2 1 +COLONEL 27 1 28 27 +COLOGNE 0 1 1 0 +COLLEGE 0 1 0 1 +COLLECTED 1 1 1 2 +COLLECT 0 1 1 0 +COLLEASE 0 1 0 1 +COLISEUM 0 1 0 1 +COLDS 0 1 1 0 +COLCHISED 0 1 0 1 +COLCHESTER 4 1 5 4 +COINS 1 1 2 1 +COFFISH 0 1 0 1 +COFFIN 20 1 20 21 +COFFEE 0 1 1 0 +CODE 0 1 0 1 +COD 0 1 1 0 +COCOA 0 1 1 0 +COCKROL 0 1 0 1 +COCKRELL 0 1 1 0 +COCKET 0 1 0 1 +COBBER 0 1 1 0 +COATS 0 1 0 1 +COARSING 0 1 0 1 +COARSE 0 1 0 1 +COALESCED 0 1 1 0 +CO 0 1 0 1 +CLUMB 0 1 1 0 +CLUBTOMANIA 0 1 0 1 +CLOTHS 0 1 0 1 +CLOTHING 0 1 0 1 +CLOTH 2 1 2 3 +CLOSETS 0 1 0 1 +CLOSET 1 1 2 1 +CLOSEST 0 1 1 0 +CLOSER 1 1 1 2 +CLOSELY 4 1 5 4 +CLOSE 13 1 14 13 +CLOON 0 1 0 1 +CLOMB 0 1 1 0 +CLOISTER 2 1 3 2 +CLIME 1 1 2 1 +CLIFF 4 1 4 5 +CLEVERLY 0 1 1 0 +CLEVER 2 1 3 2 +CLERVAL 0 1 0 1 +CLEPTOMANIA 0 1 0 1 +CLEFT 1 1 2 1 +CLEF 0 1 0 1 +CLEAVE 0 1 1 0 +CLAWS 0 1 1 0 +CLAVIER 0 1 0 1 +CLASSES 3 1 4 3 +CLARE 0 1 0 1 +CLARA 0 1 0 1 +CLAIRVAUX 0 1 1 0 +CLACKENED 0 1 0 1 +CISEAUX 0 1 1 0 +CINRILLA 0 1 0 1 +CINDERELLA 0 1 1 0 +CIGAR 1 1 1 2 +CHURCH 13 1 14 13 +CHUG 0 1 0 1 +CHUCKED 0 1 1 0 +CHRISTIANS 0 1 1 0 +CHRISTIANITY 2 1 3 2 +CHRISTIANING 0 1 0 1 +CHRISTIAN 0 1 0 1 +CHRISTENING 0 1 1 0 +CHRISTEN 0 1 0 1 +CHOUETTE 0 1 1 0 +CHOSE 2 1 2 3 +CHONODEMAIRE 0 1 1 0 +CHOKINGLY 0 1 0 1 +CHLORODE 0 1 0 1 +CHIPS 1 1 2 1 +CHINTZ 0 1 1 0 +CHIN 1 1 1 2 +CHIMNETS 0 1 0 1 +CHILLS 0 1 1 0 +CHILLED 1 1 1 2 +CHILL 1 1 1 2 +CHIEFS 0 1 0 1 +CHIEF 8 1 9 8 +CHIDE 0 1 1 0 +CHID 0 1 0 1 +CHEWERS 1 1 2 1 +CHEVIKI 0 1 0 1 +CHEVARIN 0 1 0 1 +CHEST 3 1 3 4 +CHEFTS 0 1 0 1 +CHEFS 0 1 1 0 +CHEEKE 0 1 1 0 +CHEEKBONES 0 1 1 0 +CHECKING 0 1 1 0 +CHAUVELT 0 1 0 1 +CHAUDURUS 0 1 0 1 +CHARRED 0 1 0 1 +CHARMED 1 1 2 1 +CHARLIE'S 0 1 0 1 +CHARLEY'S 0 1 1 0 +CHARLEM 0 1 0 1 +CHARGED 4 1 5 4 +CHARACTERISTIC 0 1 1 0 +CHARACTER 6 1 6 7 +CHAMBERLAIN 5 1 6 5 +CHALONS 0 1 1 0 +CHALON 0 1 0 1 +CHADWELL 0 1 1 0 +CERTAINLY 13 1 13 14 +CERTAIN 15 1 16 15 +CENTIA 0 1 0 1 +CELL 1 1 2 1 +CEDRIC 1 1 2 1 +CEASE 0 1 0 1 +CAVERNMENT 0 1 0 1 +CAVALRYMEN 0 1 1 0 +CATS 1 1 2 1 +CATO 0 1 0 1 +CATHOLIC 1 1 1 2 +CATHEDRAL 0 1 1 0 +CATEURAL 0 1 0 1 +CATCHED 0 1 1 0 +CATCH 6 1 6 7 +CAT 2 1 3 2 +CASTRATO 1 1 2 1 +CASTLE 8 1 9 8 +CASTETH 0 1 1 0 +CASION 0 1 0 1 +CART 3 1 3 4 +CARRIED 11 1 12 11 +CARP 0 1 0 1 +CARMINALS 0 1 0 1 +CAREWORN 1 1 2 1 +CARED 4 1 4 5 +CAPTURED 2 1 2 3 +CAPTURE 2 1 3 2 +CAPTAIN 17 1 17 18 +CAPS 0 1 0 1 +CAPRIVI'S 0 1 1 0 +CAPITULAT 0 1 0 1 +CAPITULANTES 0 1 1 0 +CAPITALISTS 0 1 1 0 +CAPITALIST 0 1 0 1 +CAP'S 0 1 1 0 +CAP 4 1 5 4 +CANS 0 1 1 0 +CANONIZED 0 1 1 0 +CANNONIZED 0 1 0 1 +CANES 0 1 0 1 +CAMPED 0 1 1 0 +CAMPAIGN 0 1 1 0 +CAMP 0 1 1 0 +CAMOUFLAGE 0 1 1 0 +CAMERA 0 1 0 1 +CAMEO 0 1 0 1 +CAMELO 0 1 0 1 +CALLER 0 1 0 1 +CALENDER 0 1 0 1 +CALENDAR 0 1 1 0 +CAIN 0 1 1 0 +CAGLE 0 1 0 1 +CAGE 7 1 8 7 +CACKED 0 1 1 0 +CABLE 0 1 0 1 +CA'M 0 1 1 0 +BYE 0 1 1 0 +BUY 4 1 4 5 +BUTTON 0 1 1 0 +BUTTERFLY 0 1 1 0 +BUST 0 1 1 0 +BUSINESSWHICH 0 1 1 0 +BUSINESS 12 1 12 13 +BURYING 0 1 1 0 +BURST 2 1 3 2 +BURSHEBA 0 1 1 0 +BURNEHILD 0 1 0 1 +BURGLAR 0 1 0 1 +BULL 0 1 0 1 +BULBS 0 1 1 0 +BULB 0 1 1 0 +BUILDS 0 1 1 0 +BUILD 1 1 1 2 +BUFFETING 0 1 1 0 +BUFFET 0 1 0 1 +BRYNHILD'S 0 1 1 0 +BROWN 0 1 0 1 +BROW 0 1 0 1 +BROKER 0 1 0 1 +BROAD 3 1 3 4 +BRITTANNIUM 0 1 0 1 +BRINEHILL 0 1 0 1 +BRILLIANT 4 1 5 4 +BRENT 0 1 0 1 +BREED 0 1 1 0 +BREATHLESS 1 1 2 1 +BREATHING 0 1 1 0 +BREATHE 0 1 0 1 +BREASTPAND 0 1 0 1 +BREAST 1 1 2 1 +BREASING 0 1 0 1 +BRAZY 0 1 0 1 +BRAU 0 1 1 0 +BRASS 1 1 2 1 +BRAMMING 0 1 0 1 +BRAMMEN 0 1 0 1 +BRAMID 0 1 0 1 +BRAMA 0 1 0 1 +BRAHMIN 0 1 0 1 +BRACEY 0 1 0 1 +BRACES 0 1 0 1 +BRACELE 0 1 0 1 +BOYS 7 1 8 7 +BOX 10 1 10 11 +BOWL 0 1 1 0 +BOURMANOIR 0 1 0 1 +BOURGES 0 1 1 0 +BOURGE 0 1 0 1 +BOUHAIR 0 1 0 1 +BOUGHT 3 1 4 3 +BOTTOMED 0 1 1 0 +BOTTLES 1 1 1 2 +BOTTLED 0 1 1 0 +BOTTLE 3 1 4 3 +BOTHERED 1 1 1 2 +BOONE 0 1 1 0 +BOON 1 1 2 1 +BOOMED 1 1 2 1 +BOOLA 0 1 0 1 +BONNETS 1 1 2 1 +BONNET 1 1 1 2 +BONES 2 1 2 3 +BONDAGE 1 1 2 1 +BOLTED 1 1 1 2 +BOLT 0 1 1 0 +BOLSHEVIKI 2 1 3 2 +BOILER 0 1 1 0 +BOIL 4 1 5 4 +BOEUF 1 1 2 1 +BOEOTIAN 0 1 1 0 +BOBS 0 1 0 1 +BOB'S 1 1 2 1 +BOARDS 1 1 1 2 +BOARD 5 1 5 6 +BLUE 7 1 7 8 +BLOTCHETT 0 1 0 1 +BLOOMY 0 1 0 1 +BLOOMIN 0 1 1 0 +BLOODSHED 0 1 1 0 +BLOODED 0 1 0 1 +BLOKES 0 1 1 0 +BLOKE 0 1 1 0 +BLOCK 1 1 2 1 +BLOCHHEAD 0 1 0 1 +BLINKED 1 1 2 1 +BLEW 0 1 1 0 +BLANKETED 0 1 1 0 +BLANKET 0 1 0 1 +BLANKARD 0 1 0 1 +BLAMMED 0 1 0 1 +BLADGET 0 1 0 1 +BLADGE 0 1 0 1 +BLACKLEG 0 1 1 0 +BLACKGUARD 0 1 1 0 +BLACK 13 1 13 14 +BITING 0 1 1 0 +BISQUE 0 1 1 0 +BIRTHPLACE 0 1 1 0 +BIRDSEYE 0 1 1 0 +BIRDS 0 1 0 1 +BIRD'S 0 1 0 1 +BINTAMEN 0 1 0 1 +BINKED 0 1 0 1 +BIND 0 1 1 0 +BILLY 0 1 0 1 +BIG 8 1 8 9 +BIDS 0 1 0 1 +BICK 0 1 0 1 +BIBLE 3 1 4 3 +BIBBICAL 0 1 0 1 +BI 0 1 0 1 +BHANG 0 1 1 0 +BEXT 0 1 0 1 +BEWARE 1 1 2 1 +BEVOCO 0 1 0 1 +BEULAH 0 1 1 0 +BETWEEN 21 1 21 22 +BETTER 28 1 29 28 +BETHUNE 0 1 1 0 +BETCHA 0 1 1 0 +BETAKEN 0 1 1 0 +BESTOW 3 1 3 4 +BESSON 0 1 0 1 +BESOON 0 1 0 1 +BESIDES 9 1 9 10 +BESIDE 3 1 4 3 +BERTULAS 0 1 0 1 +BERTRADIZANCE 0 1 0 1 +BERTIE 0 1 0 1 +BERTH 0 1 0 1 +BEPPOCO 0 1 0 1 +BENT 1 1 2 1 +BENSON 0 1 1 0 +BENOIT 0 1 1 0 +BENOIS 0 1 0 1 +BENNETT 0 1 1 0 +BENNET 0 1 0 1 +BENJAMIN 0 1 1 0 +BENEATH 3 1 3 4 +BENDONED 0 1 0 1 +BEND 1 1 1 2 +BEN 0 1 0 1 +BELT 0 1 0 1 +BELOVED 1 1 1 2 +BELONGS 1 1 1 2 +BELONGED 0 1 1 0 +BELLOWED 0 1 1 0 +BELLE 0 1 1 0 +BELIKE 0 1 1 0 +BELIEVED 5 1 6 5 +BELIEF 5 1 5 6 +BEHOLDAY 0 1 0 1 +BEGUN 2 1 3 2 +BEGIN 9 1 9 10 +BEGGING 1 1 2 1 +BEFOREWARD 0 1 0 1 +BEFALL 0 1 0 1 +BEFAL 0 1 1 0 +BEER 2 1 2 3 +BEECH 0 1 0 1 +BEDS 1 1 1 2 +BEDOUIN 0 1 1 0 +BEDOING 0 1 0 1 +BED 14 1 14 15 +BECKY 0 1 1 0 +BECAUSE 34 1 34 35 +BEAUMANOIR 0 1 1 0 +BEATHFUL 0 1 0 1 +BEAT 5 1 6 5 +BEASTLY 0 1 1 0 +BEARING 6 1 6 7 +BEARED 0 1 0 1 +BEACHER 0 1 0 1 +BEA 0 1 0 1 +BAXTER 0 1 1 0 +BATH 1 1 1 2 +BASSORA 0 1 0 1 +BASER 1 1 1 2 +BASEMENT 0 1 1 0 +BARS 5 1 5 6 +BARRENDERS 0 1 0 1 +BARKLEY 0 1 1 0 +BARGIENLO 0 1 0 1 +BARGELLO 0 1 1 0 +BARELY 1 1 2 1 +BARE 1 1 1 2 +BAR 6 1 7 6 +BAPTISMAL 0 1 1 0 +BAPLICO 0 1 0 1 +BANNY 0 1 0 1 +BANDREE 0 1 0 1 +BANDINELLO 0 1 1 0 +BANDAGE 0 1 0 1 +BALLROOM 0 1 1 0 +BALLOT 3 1 4 3 +BALK 0 1 0 1 +BALES 2 1 2 3 +BALE 0 1 1 0 +BALAMMED 0 1 1 0 +BAILIQUE 0 1 0 1 +BAILEY'S 0 1 0 1 +BAIL 1 1 1 2 +BAGS 2 1 2 3 +BAG 4 1 5 4 +BAFF 0 1 0 1 +BACKY 0 1 0 1 +BABYSMAL 0 1 0 1 +BABES 0 1 1 0 +BABE 0 1 0 1 +B 2 1 3 2 +AZURE 0 1 1 0 +AZARIAH 0 1 1 0 +AY 0 1 0 1 +AXIS 0 1 0 1 +AX 0 1 0 1 +AWK'ARD 0 1 1 0 +AWFUL 4 1 4 5 +AW 0 1 1 0 +AVOID 4 1 4 5 +AVIDITY 0 1 1 0 +AVENUE 2 1 3 2 +AVE 0 1 1 0 +AUTHEST 0 1 0 1 +AUNTS 0 1 0 1 +AUGHT 2 1 2 3 +ATTIGUE 0 1 0 1 +ATTENTIVE 0 1 0 1 +ATTENTIONS 0 1 0 1 +ATTENDED 0 1 0 1 +ATTEMPTED 2 1 3 2 +ATRONE 0 1 0 1 +ATOM 0 1 0 1 +ATHELSTANE 0 1 1 0 +ASSERTED 0 1 0 1 +ASSERT 2 1 2 3 +ASSAILING 0 1 1 0 +ASS 2 1 3 2 +ASLEEP 9 1 10 9 +ASIA 0 1 1 0 +ASHUR 0 1 1 0 +ASHORE 4 1 4 5 +ASHLEY 4 1 5 4 +ASHHOPPER 0 1 0 1 +ASHER 0 1 0 1 +ASH 0 1 1 0 +ASCERTAIN 0 1 1 0 +ARTISTRA 0 1 0 1 +ARTIST 5 1 6 5 +ARTISON 0 1 0 1 +ARTHUR 0 1 1 0 +ARSTS 0 1 1 0 +ARSINOE'S 0 1 1 0 +ARSENO'S 0 1 0 1 +ARRIVED 4 1 4 5 +ARRIVE 3 1 4 3 +ARREST 0 1 0 1 +ARQUEBALD 0 1 0 1 +ARPET 0 1 0 1 +AROUSED 0 1 1 0 +ARM 4 1 5 4 +ARKANSAS 0 1 1 0 +ARISED 0 1 0 1 +AREN'TO 0 1 0 1 +AREIAT 0 1 0 1 +ARDENTS 0 1 0 1 +ARDENT 2 1 3 2 +ARD 0 1 0 1 +ARCHIBALD 0 1 1 0 +ARCHIAS 0 1 1 0 +ARCHBISHOP 1 1 2 1 +ARCHBISH 0 1 0 1 +APPROVE 0 1 1 0 +APPLES 0 1 0 1 +APPEARED 8 1 8 9 +APPEAR 5 1 6 5 +APPARENTLY 4 1 5 4 +APOMORPHINE 0 1 1 0 +APOLLO 0 1 0 1 +APES 1 1 2 1 +APE 1 1 1 2 +APARTMENTAL 0 1 0 1 +ANYWAY 1 1 2 1 +ANYTHING 31 1 31 32 +ANYONE'S 0 1 1 0 +ANY'S 0 1 0 1 +ANTONIO 0 1 1 0 +ANTONIA 0 1 0 1 +ANTOLIAN 0 1 1 0 +ANTIDOTES 0 1 1 0 +ANSWERS 1 1 2 1 +ANSWERED 26 1 26 27 +ANOTHER 30 1 31 30 +ANNOYED 2 1 3 2 +ANNOY 0 1 0 1 +ANNOUNCE 1 1 1 2 +ANNE 1 1 1 2 +ANIMATE 0 1 1 0 +ANGUISH 3 1 4 3 +ANGISTON 0 1 0 1 +ANGESTON 0 1 1 0 +ANEW 0 1 1 0 +ANDY'S 0 1 0 1 +ANDS 0 1 1 0 +ANDREW 1 1 2 1 +ANDBUT 0 1 1 0 +ANCESTORS 0 1 1 0 +ANALYSIS 0 1 1 0 +AMOUR 0 1 1 0 +AMOPRA 0 1 0 1 +AMONGST 4 1 5 4 +AMIDDY 0 1 0 1 +AMID 1 1 1 2 +AMIABLE 0 1 1 0 +AMBRON 0 1 0 1 +AMATIC 0 1 0 1 +ALTHOUGH 9 1 10 9 +ALTHIE 0 1 0 1 +ALTHIA 0 1 0 1 +ALTER 0 1 1 0 +ALONGSIDE 0 1 1 0 +ALONGER 0 1 1 0 +ALONG 15 1 15 16 +ALONE 10 1 10 11 +ALOES 1 1 2 1 +ALLS 0 1 1 0 +ALLOWED 6 1 7 6 +ALLOWANCE 0 1 1 0 +ALLIGATOR 1 1 2 1 +ALLIES 0 1 0 1 +ALLIED 0 1 1 0 +ALLEY 1 1 1 2 +ALLAYS 0 1 1 0 +ALIVE 2 1 2 3 +ALIT 0 1 0 1 +ALISANDRO 0 1 0 1 +ALIMENTARY 0 1 1 0 +ALIGHTED 1 1 2 1 +ALFRED 0 1 0 1 +ALF 0 1 1 0 +ALEXE 0 1 0 1 +ALESSANDRO 0 1 1 0 +ALENUBERG 0 1 0 1 +ALD 0 1 0 1 +ALBERTS 0 1 0 1 +ALBERT'S 2 1 3 2 +ALAN 0 1 0 1 +ALADAMA 0 1 0 1 +ALABAMA 0 1 1 0 +AKALOIDS 0 1 0 1 +AIR 10 1 10 11 +AIMED 0 1 0 1 +AILS 0 1 1 0 +AILEEN 0 1 0 1 +AGRARIAN 0 1 1 0 +AGONIC 0 1 0 1 +AGONE 0 1 1 0 +AGENTIVE 0 1 0 1 +AGENT 2 1 3 2 +AGE 4 1 4 5 +AFTERWARDS 5 1 6 5 +AFTERWARD 2 1 2 3 +AFT 1 1 2 1 +AFORESAID 0 1 1 0 +AFOR 0 1 0 1 +AFIRE 0 1 1 0 +AFFLICTION 0 1 1 0 +AFFLICATION 0 1 0 1 +AFFECTION 0 1 1 0 +AFFECTANT 0 1 0 1 +AFAR 0 1 0 1 +ADVISED 0 1 1 0 +ADVICE 2 1 2 3 +ADVENTURES 2 1 3 2 +ADVENTUR 0 1 0 1 +ADULTERATED 0 1 0 1 +ADULT 0 1 1 0 +ADORED 0 1 1 0 +ADN'T 0 1 1 0 +ADMIRED 1 1 1 2 +ADMIABLE 0 1 0 1 +ADHERENTS 0 1 1 0 +ADHERENCE 0 1 0 1 +ADDSTEIN 0 1 0 1 +ADDER 0 1 0 1 +ADAIR 0 1 1 0 +AD 0 1 1 0 +ACUTORATION 0 1 0 1 +ACUTE 1 1 2 1 +ACTS 3 1 4 3 +ACQUIRED 2 1 2 3 +ACQUIRE 0 1 0 1 +ACQUAINT 1 1 1 2 +ACOLITES 0 1 0 1 +ACKNOWLEDGE 1 1 2 1 +ACHESON 0 1 1 0 +ACCUSTRA 0 1 0 1 +ACCUSTOM 0 1 0 1 +ACCULENT 0 1 0 1 +ACCOUNT 7 1 7 8 +ACCORD 1 1 1 2 +ACCOHOL 0 1 0 1 +ACCESS 0 1 1 0 +ACCEPT 4 1 5 4 +ABSTANCE 0 1 0 1 +ABSTAINED 0 1 0 1 +ABSTAIN 0 1 1 0 +ABSENTEE 0 1 1 0 +ABRUPT 0 1 0 1 +ABOVE 9 1 9 10 +ABIDING 0 1 0 1 +ABASEMENT 0 1 0 1 +ZEAL 1 0 1 1 +ZAMAN 4 0 4 4 +YUNKERS 1 0 1 1 +YOURSELF 9 0 9 9 +YOUNGEST 1 0 1 1 +YOUNGERS 2 0 2 2 +YOUNGER 8 0 8 8 +YORK 3 0 3 3 +YONDER 5 0 5 5 +YOLKS 1 0 1 1 +YIELDED 1 0 1 1 +YIELD 1 0 1 1 +YESTERDAY 3 0 3 3 +YES'M 1 0 1 1 +YELLOW 4 0 4 4 +YELL 1 0 1 1 +YEARNS 1 0 1 1 +YEARNING 1 0 1 1 +YEA 1 0 1 1 +YAWN 1 0 1 1 +YARNS 1 0 1 1 +YARDS 1 0 1 1 +YACHT 1 0 1 1 +WYLDER 1 0 1 1 +WRONG 4 0 4 4 +WRITING 1 0 1 1 +WRINKLES 1 0 1 1 +WRINGING 1 0 1 1 +WRIGGLING 1 0 1 1 +WRETCHED 2 0 2 2 +WRECKAGE 1 0 1 1 +WREATHS 1 0 1 1 +WRAPPING 1 0 1 1 +WRAPPED 1 0 1 1 +WOUND 2 0 2 2 +WOULDST 1 0 1 1 +WOULDN'T 9 0 9 9 +WOTTETH 1 0 1 1 +WORST 1 0 1 1 +WORN 3 0 3 3 +WORLDLY 1 0 1 1 +WORKSHOP 1 0 1 1 +WORKHOUSE 1 0 1 1 +WORKERS 1 0 1 1 +WORKER 1 0 1 1 +WORE 3 0 3 3 +WONDROUS 1 0 1 1 +WONDERS 1 0 1 1 +WONDERING 1 0 1 1 +WONDERFULLY 1 0 1 1 +WONDERFUL 6 0 6 6 +WON'T 13 0 13 13 +WOMAN'S 2 0 2 2 +WOLF 1 0 1 1 +WIZARDS 1 0 1 1 +WITTY 1 0 1 1 +WITNESSED 1 0 1 1 +WITHIN 11 0 11 11 +WITHHELD 2 0 2 2 +WITHDRAWN 2 0 2 2 +WITCHES 1 0 1 1 +WISTFUL 1 0 1 1 +WISHING 3 0 3 3 +WISHES 3 0 3 3 +WISH 15 0 15 15 +WISELY 1 0 1 1 +WISDOM 4 0 4 4 +WISCONSIN 1 0 1 1 +WIRE 1 0 1 1 +WINGS 2 0 2 2 +WINDOWS 1 0 1 1 +WIN 2 0 2 2 +WILLINGLY 1 0 1 1 +WILLING 4 0 4 4 +WILFUL 1 0 1 1 +WIELD 1 0 1 1 +WIDOWER 1 0 1 1 +WIDOW 1 0 1 1 +WICKED 2 0 2 2 +WHOSO 1 0 1 1 +WHOOP 1 0 1 1 +WHOMSOEVER 1 0 1 1 +WHOLLY 2 0 2 2 +WHOEVER 2 0 2 2 +WHITEHALL 1 0 1 1 +WHISTLING 2 0 2 2 +WHISTLE 3 0 3 3 +WHISPERED 2 0 2 2 +WHIPPINGS 1 0 1 1 +WHIP 2 0 2 2 +WHIMPERING 1 0 1 1 +WHIM 1 0 1 1 +WHILST 1 0 1 1 +WHEREVER 2 0 2 2 +WHEREUPON 3 0 3 3 +WHEREIN 2 0 2 2 +WHEREFORE 2 0 2 2 +WHEREBY 2 0 2 2 +WHERE'S 4 0 4 4 +WHENEVER 4 0 4 4 +WHENCE 5 0 5 5 +WHEELS 1 0 1 1 +WHATSOEVER 1 0 1 1 +WETTED 1 0 1 1 +WET 3 0 3 3 +WESTWARD 1 0 1 1 +WESTERN 2 0 2 2 +WEREN'T 1 0 1 1 +WEPT 2 0 2 2 +WENCH 1 0 1 1 +WELCOMED 1 0 1 1 +WELCOME 4 0 4 4 +WEIGHTY 1 0 1 1 +WEIGHT 1 0 1 1 +WEIGHING 1 0 1 1 +WEIGHED 1 0 1 1 +WEEPING 3 0 3 3 +WEEKS 1 0 1 1 +WEDNESDAY 2 0 2 2 +WEDDING 7 0 7 7 +WEB 1 0 1 1 +WEARY 4 0 4 4 +WEARING 2 0 2 2 +WEAPONS 1 0 1 1 +WEAPON 1 0 1 1 +WEALTHY 3 0 3 3 +WEALTH 3 0 3 3 +WEAKNESS 4 0 4 4 +WE'D 1 0 1 1 +WAZIR 5 0 5 5 +WAYLAID 1 0 1 1 +WAVING 1 0 1 1 +WAVES 2 0 2 2 +WAVE 1 0 1 1 +WATERY 1 0 1 1 +WATERVILLE 1 0 1 1 +WATERS 1 0 1 1 +WATCHING 7 0 7 7 +WASTED 1 0 1 1 +WASHINGTON 2 0 2 2 +WASHED 4 0 4 4 +WASH 4 0 4 4 +WARRANT 1 0 1 1 +WARNING 1 0 1 1 +WARNER 1 0 1 1 +WARN'T 2 0 2 2 +WARMTH 1 0 1 1 +WARMLY 1 0 1 1 +WARMEST 1 0 1 1 +WARM 1 0 1 1 +WAREHOUSES 1 0 1 1 +WANTING 1 0 1 1 +WALLET 2 0 2 2 +WALKED 10 0 10 10 +WALK 5 0 5 5 +WAKING 2 0 2 2 +WAKED 2 0 2 2 +WAITERS 1 0 1 1 +WAITER 1 0 1 1 +WAISTCOAT 3 0 3 3 +WAIST 1 0 1 1 +WAGONS 1 0 1 1 +WAGON 1 0 1 1 +WAGED 1 0 1 1 +WADDLED 1 0 1 1 +W 1 0 1 1 +VRONSKY 1 0 1 1 +VOYAGES 1 0 1 1 +VOYAGE 7 0 7 7 +VOWS 2 0 2 2 +VOWELS 1 0 1 1 +VOW 1 0 1 1 +VOTING 2 0 2 2 +VOTES 3 0 3 3 +VOTED 1 0 1 1 +VON 1 0 1 1 +VOLUNTEERS 2 0 2 2 +VOLUNTARILY 1 0 1 1 +VOLUMINOUS 1 0 1 1 +VOLUME 2 0 2 2 +VOLLEY 1 0 1 1 +VOLCANOES 1 0 1 1 +VOICELESS 1 0 1 1 +VOCAL 2 0 2 2 +VITRIOL 1 0 1 1 +VITAL 1 0 1 1 +VISITOR 4 0 4 4 +VISITING 1 0 1 1 +VISITED 2 0 2 2 +VISIT 8 0 8 8 +VISION 1 0 1 1 +VISCOUNT 1 0 1 1 +VIRTUOUS 2 0 2 2 +VIRTUE 2 0 2 2 +VIRGINIA 1 0 1 1 +VIOLENTLY 1 0 1 1 +VIOLENT 1 0 1 1 +VINE 1 0 1 1 +VINDICTIVENESS 1 0 1 1 +VILLAGE 2 0 2 2 +VILE 1 0 1 1 +VIGOROUS 3 0 3 3 +VIGILANT 1 0 1 1 +VIEWS 1 0 1 1 +VIEW 1 0 1 1 +VICTORY 1 0 1 1 +VICTORIAN 1 0 1 1 +VICTIMS 1 0 1 1 +VICTIMIZE 1 0 1 1 +VICTIM 3 0 3 3 +VICIOUS 3 0 3 3 +VICES 1 0 1 1 +VICE 1 0 1 1 +VEXED 1 0 1 1 +VEXATION 1 0 1 1 +VESSEL 2 0 2 2 +VERDICT 3 0 3 3 +VENICE 2 0 2 2 +VENIAL 1 0 1 1 +VENGEANCE 1 0 1 1 +VEINS 1 0 1 1 +VEILS 1 0 1 1 +VEHICLES 1 0 1 1 +VEGETABLES 1 0 1 1 +VEGETABLE 1 0 1 1 +VECCHIO 1 0 1 1 +VAUDEVILLE 1 0 1 1 +VARIES 1 0 1 1 +VARIED 1 0 1 1 +VANITY 1 0 1 1 +VANISHED 1 0 1 1 +VANE 1 0 1 1 +VAMPA 2 0 2 2 +VALUES 2 0 2 2 +VALUE 3 0 3 3 +VALUABLES 1 0 1 1 +VALLEY 2 0 2 2 +VALJEAN'S 3 0 3 3 +VALJEAN 7 0 7 7 +VAIN 5 0 5 5 +VAGUELY 1 0 1 1 +VAGUE 1 0 1 1 +VACATION 1 0 1 1 +VACANTLY 1 0 1 1 +UTTERLY 3 0 3 3 +UTTERING 1 0 1 1 +UTTERED 4 0 4 4 +UTMOST 5 0 5 5 +USURPER 2 0 2 2 +USUALLY 6 0 6 6 +USUAL 3 0 3 3 +USING 3 0 3 3 +USEST 1 0 1 1 +URGED 2 0 2 2 +URGE 1 0 1 1 +UPSET 1 0 1 1 +UPRIGHT 1 0 1 1 +UPPER 5 0 5 5 +UNWEPT 1 0 1 1 +UNUSUALLY 1 0 1 1 +UNUSUAL 2 0 2 2 +UNSWERVING 1 0 1 1 +UNSOUGHT 1 0 1 1 +UNSELFISH 1 0 1 1 +UNSEASONABLE 1 0 1 1 +UNS 1 0 1 1 +UNREASONABLE 1 0 1 1 +UNPRESSED 1 0 1 1 +UNPLEASANT 3 0 3 3 +UNPITIED 1 0 1 1 +UNOCCUPIED 1 0 1 1 +UNNATURAL 1 0 1 1 +UNMISTAKABLY 1 0 1 1 +UNLUCKY 2 0 2 2 +UNLIKELY 1 0 1 1 +UNKNOWN 2 0 2 2 +UNKIND 1 0 1 1 +UNJOINTED 1 0 1 1 +UNIVERSE 1 0 1 1 +UNIVERSAL 5 0 5 5 +UNITED 5 0 5 5 +UNISON 1 0 1 1 +UNIQUE 1 0 1 1 +UNIONISTS 1 0 1 1 +UNION 1 0 1 1 +UNINTENTIONAL 1 0 1 1 +UNIFORM 2 0 2 2 +UNHESITATINGLY 1 0 1 1 +UNHEARD 1 0 1 1 +UNHAPPINESS 1 0 1 1 +UNGRATEFUL 3 0 3 3 +UNFORTUNATELY 2 0 2 2 +UNFORTUNATE 2 0 2 2 +UNFLATTERING 1 0 1 1 +UNEXPECTEDLY 2 0 2 2 +UNEXPECTED 3 0 3 3 +UNEASY 4 0 4 4 +UNEASILY 1 0 1 1 +UNDOUBTEDLY 1 0 1 1 +UNDERTOOK 1 0 1 1 +UNDERTONE 1 0 1 1 +UNDERTAKE 1 0 1 1 +UNDERSTOOD 6 0 6 6 +UNDERSTANDS 1 0 1 1 +UNDERSTANDING 5 0 5 5 +UNDERSTAND 7 0 7 7 +UNDERNEATH 1 0 1 1 +UNDERGROUND 1 0 1 1 +UNDERGO 1 0 1 1 +UNCONNECTED 1 0 1 1 +UNCONCERN 1 0 1 1 +UNCOMMON 1 0 1 1 +UNCOMFORTABLY 2 0 2 2 +UNCOMFORTABLE 1 0 1 1 +UNCLE'S 2 0 2 2 +UNCERTAIN 2 0 2 2 +UNBURDEN 1 0 1 1 +UNAWARE 1 0 1 1 +UNASSISTED 1 0 1 1 +UNALTERABLE 1 0 1 1 +UNABLE 1 0 1 1 +UGLY 1 0 1 1 +TYRANTS 1 0 1 1 +TYRANT 2 0 2 2 +TYPE 1 0 1 1 +TWIST 1 0 1 1 +TWILIGHT 1 0 1 1 +TWICE 2 0 2 2 +TWENTY 16 0 16 16 +TWELVEMONTH 1 0 1 1 +TWELVE 3 0 3 3 +TUTORS 2 0 2 2 +TUTOR 1 0 1 1 +TURRETS 1 0 1 1 +TURKISH 1 0 1 1 +TURK 1 0 1 1 +TURBAN 1 0 1 1 +TUNE 1 0 1 1 +TUMBLE 1 0 1 1 +TUG 1 0 1 1 +TUESDAY 1 0 1 1 +TUCKED 1 0 1 1 +TUBE 2 0 2 2 +TRYING 8 0 8 8 +TRUTH 10 0 10 10 +TRUSTWORTHY 1 0 1 1 +TRUSTED 1 0 1 1 +TRUST 3 0 3 3 +TRUNK 1 0 1 1 +TRUE 15 0 15 15 +TRUCE 2 0 2 2 +TROUSERS 2 0 2 2 +TROUBLING 1 0 1 1 +TROUBLED 6 0 6 6 +TROUBLE 8 0 8 8 +TROOPS 2 0 2 2 +TROLL 1 0 1 1 +TRIUMPHING 1 0 1 1 +TRIUMPH 3 0 3 3 +TRIP 2 0 2 2 +TRIM 1 0 1 1 +TRIFLING 3 0 3 3 +TRIBUTE 2 0 2 2 +TRIANGLE 1 0 1 1 +TRIAL 3 0 3 3 +TREND 1 0 1 1 +TREMBLE 1 0 1 1 +TREES 3 0 3 3 +TREATMENT 1 0 1 1 +TREATED 1 0 1 1 +TREAT 3 0 3 3 +TREASONS 1 0 1 1 +TREACHEROUSLY 1 0 1 1 +TRAVILLA 1 0 1 1 +TRAVELLERS 2 0 2 2 +TRAP 2 0 2 2 +TRANSPORTED 2 0 2 2 +TRANSPARENT 1 0 1 1 +TRANSITORINESS 1 0 1 1 +TRANSFORMING 1 0 1 1 +TRANSFIGURED 1 0 1 1 +TRANSFERENCE 1 0 1 1 +TRANQUILLITIES 1 0 1 1 +TRAMP 2 0 2 2 +TRAINED 1 0 1 1 +TRAGIC 1 0 1 1 +TRADITIONAL 1 0 1 1 +TRACK 3 0 3 3 +TRACED 1 0 1 1 +TRACEABLE 1 0 1 1 +TRACE 2 0 2 2 +TOY 1 0 1 1 +TOWNSFOLK 1 0 1 1 +TOWN 14 0 14 14 +TOWERS 1 0 1 1 +TOWERING 1 0 1 1 +TOUCHING 4 0 4 4 +TOUCHED 5 0 5 5 +TOUCH 3 0 3 3 +TOSSED 1 0 1 1 +TORTURES 1 0 1 1 +TORMENTOR 2 0 2 2 +TORE 1 0 1 1 +TOPS 1 0 1 1 +TOPIC 1 0 1 1 +TONGUES 1 0 1 1 +TONES 2 0 2 2 +TONE 6 0 6 6 +TOMBS 1 0 1 1 +TOMB 2 0 2 2 +TOLERABLY 1 0 1 1 +TOKEN 1 0 1 1 +TOILING 1 0 1 1 +TOILETTE 1 0 1 1 +TOIL 1 0 1 1 +TOGETHER 11 0 11 11 +TOES 2 0 2 2 +TOBACCO 7 0 7 7 +TOASTED 2 0 2 2 +TOAST 1 0 1 1 +TIS 4 0 4 4 +TIPPLING 1 0 1 1 +TINY 1 0 1 1 +TINKLE 1 0 1 1 +TIMEPIECE 1 0 1 1 +TIGHTENING 1 0 1 1 +TIGHTENED 1 0 1 1 +TIGHT 1 0 1 1 +TIED 2 0 2 2 +TIDES 1 0 1 1 +TIDE 1 0 1 1 +TICKLING 1 0 1 1 +TICKING 1 0 1 1 +TICKET 1 0 1 1 +THYSELF 3 0 3 3 +THYME 1 0 1 1 +THWARTED 1 0 1 1 +THURSDAY 1 0 1 1 +THUNDER 3 0 3 3 +THRUST 6 0 6 6 +THROWN 1 0 1 1 +THROW 2 0 2 2 +THROUGHOUT 3 0 3 3 +THRONE 2 0 2 2 +THROAT 2 0 2 2 +THRILLING 1 0 1 1 +THRIFTILY 1 0 1 1 +THREW 8 0 8 8 +THREES 1 0 1 1 +THREATS 3 0 3 3 +THREATENED 1 0 1 1 +THREAD 2 0 2 2 +THRACE 1 0 1 1 +THOUSANDTH 1 0 1 1 +THOUGHTFUL 2 0 2 2 +THOROUGHLY 1 0 1 1 +THORNTON 4 0 4 4 +THONG 1 0 1 1 +THIRTY 7 0 7 7 +THIRTEEN 1 0 1 1 +THIRSTY 1 0 1 1 +THIRSTING 1 0 1 1 +THINKING 4 0 4 4 +THIEVES 1 0 1 1 +THIEF 2 0 2 2 +THICKENING 1 0 1 1 +THICK 4 0 4 4 +THEREWITH 1 0 1 1 +THEREIN 3 0 3 3 +THEREAFTER 1 0 1 1 +THEORY 2 0 2 2 +THEOLOGIANS 1 0 1 1 +THENCEFORTH 1 0 1 1 +THENCE 1 0 1 1 +THEMSELVES 17 0 17 17 +THANKFUL 2 0 2 2 +THANKED 2 0 2 2 +THANK 7 0 7 7 +TEXAS 1 0 1 1 +TESTING 1 0 1 1 +TESTIFY 2 0 2 2 +TERROR 5 0 5 5 +TERRIFIC 2 0 2 2 +TERMS 1 0 1 1 +TERM 2 0 2 2 +TENDING 1 0 1 1 +TENDERNESS 1 0 1 1 +TENDERLY 1 0 1 1 +TENDER 3 0 3 3 +TENDENCY 1 0 1 1 +TENACITY 1 0 1 1 +TEMPTRESS 1 0 1 1 +TEMPTING 1 0 1 1 +TEMPTATION 4 0 4 4 +TEMPORARY 2 0 2 2 +TEMPLARS 2 0 2 2 +TEMPLAR 2 0 2 2 +TEMPEST 1 0 1 1 +TEMPERATURE 1 0 1 1 +TEMPERATE 2 0 2 2 +TEMPERAMENT 1 0 1 1 +TEMPER 1 0 1 1 +TELLING 2 0 2 2 +TELEPHONE 1 0 1 1 +TELEGRAM 3 0 3 3 +TEEMING 1 0 1 1 +TECHNICAL 1 0 1 1 +TEASPOONFUL 1 0 1 1 +TEARING 1 0 1 1 +TEAR 3 0 3 3 +TEAM 1 0 1 1 +TEACHING 1 0 1 1 +TEACHERS 1 0 1 1 +TEACH 2 0 2 2 +TAXES 1 0 1 1 +TAUNTS 1 0 1 1 +TASTES 1 0 1 1 +TASTED 2 0 2 2 +TASK 3 0 3 3 +TARRIED 1 0 1 1 +TAPE 1 0 1 1 +TANNER 1 0 1 1 +TALL 2 0 2 2 +TALKS 1 0 1 1 +TALKING 5 0 5 5 +TALKER 1 0 1 1 +TALES 1 0 1 1 +TALENT 1 0 1 1 +TAKINGS 1 0 1 1 +TAKING 11 0 11 11 +TAKES 3 0 3 3 +TAINTED 1 0 1 1 +TAILS 2 0 2 2 +TAILOR'S 1 0 1 1 +TAIL 3 0 3 3 +TAGGING 1 0 1 1 +TACK 1 0 1 1 +TABLETS 2 0 2 2 +TABLES 2 0 2 2 +TABLE 7 0 7 7 +T 1 0 1 1 +SYSTEM 2 0 2 2 +SYRINGE 1 0 1 1 +SYMPTOMS 1 0 1 1 +SYMPATHY 4 0 4 4 +SYMPATHIES 1 0 1 1 +SWUNG 3 0 3 3 +SWITCHED 1 0 1 1 +SWITCH 1 0 1 1 +SWISS 1 0 1 1 +SWINGING 1 0 1 1 +SWINGED 1 0 1 1 +SWIMS 1 0 1 1 +SWIFTLY 1 0 1 1 +SWEPT 1 0 1 1 +SWEETNESS 2 0 2 2 +SWEETMEATS 2 0 2 2 +SWEEPING 1 0 1 1 +SWEAR 5 0 5 5 +SWARTHY 1 0 1 1 +SWARMED 2 0 2 2 +SWAM 1 0 1 1 +SWALLOWING 1 0 1 1 +SVIAZHSKY 1 0 1 1 +SUSTAINS 1 0 1 1 +SUSPICIOUS 3 0 3 3 +SUSPENDED 1 0 1 1 +SUSPECTED 6 0 6 6 +SUSPECT 1 0 1 1 +SUSAN'S 1 0 1 1 +SURVEYED 1 0 1 1 +SURROUNDINGS 1 0 1 1 +SURROUNDING 1 0 1 1 +SURROUNDED 1 0 1 1 +SURRENDERING 1 0 1 1 +SURRENDERED 1 0 1 1 +SURPLICE 1 0 1 1 +SURPASS 1 0 1 1 +SURMOUNTED 1 0 1 1 +SURLY 1 0 1 1 +SURFACE 3 0 3 3 +SURE 18 0 18 18 +SUPPRESS 1 0 1 1 +SUPPOSITION 1 0 1 1 +SUPPORTED 1 0 1 1 +SUPPORT 1 0 1 1 +SUPPLY 2 0 2 2 +SUPPLIED 3 0 3 3 +SUPPLICATION 1 0 1 1 +SUPPER 1 0 1 1 +SUPERNATURAL 2 0 2 2 +SUPERNACULUM 1 0 1 1 +SUPERIORS 1 0 1 1 +SUP 1 0 1 1 +SUNSHINY 1 0 1 1 +SUNSHINE 1 0 1 1 +SUNRISE 2 0 2 2 +SUNDAY 4 0 4 4 +SUMS 2 0 2 2 +SUMMONED 1 0 1 1 +SUMMON 1 0 1 1 +SUMMIT 1 0 1 1 +SUMMER 6 0 6 6 +SULTRY 1 0 1 1 +SULTAN 2 0 2 2 +SUITS 1 0 1 1 +SUITABLE 2 0 2 2 +SUICIDE 1 0 1 1 +SUGGESTED 2 0 2 2 +SUGAR 9 0 9 9 +SUFFICIENTLY 2 0 2 2 +SUFFERINGS 1 0 1 1 +SUFFERING 2 0 2 2 +SUFFERED 3 0 3 3 +SUE 1 0 1 1 +SUCK 1 0 1 1 +SUCH 44 0 44 44 +SUCCUMBED 1 0 1 1 +SUCCESSIVELY 1 0 1 1 +SUCCESSIVE 1 0 1 1 +SUCCESSFULLY 1 0 1 1 +SUCCESSES 2 0 2 2 +SUCCESS 2 0 2 2 +SUCCEEDING 1 0 1 1 +SUCCEEDED 3 0 3 3 +SUCCEED 1 0 1 1 +SUBURB 1 0 1 1 +SUBSTITUTING 1 0 1 1 +SUBSTANCES 1 0 1 1 +SUBSISTENCE 1 0 1 1 +SUBSIDED 1 0 1 1 +SUBSEQUENT 1 0 1 1 +SUBORDINATED 1 0 1 1 +SUBMITTED 2 0 2 2 +SUBMISSIVE 1 0 1 1 +SUBMISSION 1 0 1 1 +SUBJECTS 6 0 6 6 +SUBJECTED 3 0 3 3 +SUBDUED 2 0 2 2 +STYLED 1 0 1 1 +STYLE 1 0 1 1 +STURDY 1 0 1 1 +STUPID 2 0 2 2 +STUMBLED 1 0 1 1 +STUFFS 1 0 1 1 +STUFF 1 0 1 1 +STUDYING 1 0 1 1 +STUDY 1 0 1 1 +STUDENTS 1 0 1 1 +STUDENT 2 0 2 2 +STUCK 2 0 2 2 +STRUGGLING 1 0 1 1 +STRUGGLES 1 0 1 1 +STRUGGLE 1 0 1 1 +STRUCTURE 1 0 1 1 +STROVE 1 0 1 1 +STRONGLY 1 0 1 1 +STRONGER 1 0 1 1 +STRONG 12 0 12 12 +STRIPPED 2 0 2 2 +STRIKING 1 0 1 1 +STRIDES 1 0 1 1 +STRICTLY 1 0 1 1 +STREWN 1 0 1 1 +STRETCHING 1 0 1 1 +STRETCHER 1 0 1 1 +STRETCH 1 0 1 1 +STRENUOUSLY 1 0 1 1 +STRENGTHENED 2 0 2 2 +STRENGTH 12 0 12 12 +STRATAGEM 1 0 1 1 +STRANGERS 1 0 1 1 +STRANGER 3 0 3 3 +STRANGELY 1 0 1 1 +STRAITS 1 0 1 1 +STRAINING 1 0 1 1 +STRAIGHTWAY 1 0 1 1 +STRAIGHTFORWARD 1 0 1 1 +STOUT 1 0 1 1 +STORY 9 0 9 9 +STORMED 1 0 1 1 +STORM 1 0 1 1 +STOREHOUSES 1 0 1 1 +STOPPING 3 0 3 3 +STOP 5 0 5 5 +STOOL 2 0 2 2 +STOMACH 3 0 3 3 +STOCK 3 0 3 3 +STIRRING 1 0 1 1 +STIRRED 1 0 1 1 +STIR 1 0 1 1 +STILLNESS 3 0 3 3 +STILE 1 0 1 1 +STIFLING 1 0 1 1 +STIFLED 2 0 2 2 +STIFLE 3 0 3 3 +STIFFNESS 1 0 1 1 +STIFF 2 0 2 2 +STICK 5 0 5 5 +STEWART 1 0 1 1 +STEWARDS 1 0 1 1 +STEWARD 1 0 1 1 +STEPHEN 1 0 1 1 +STEMS 1 0 1 1 +STEERAGE 2 0 2 2 +STEEP 1 0 1 1 +STEEL 1 0 1 1 +STEAMED 1 0 1 1 +STEAMBOAT 2 0 2 2 +STEALTHILY 1 0 1 1 +STEADY 3 0 3 3 +STEADILY 1 0 1 1 +STAY 6 0 6 6 +STATUES 3 0 3 3 +STATIONED 2 0 2 2 +STATION 2 0 2 2 +STATESMAN 1 0 1 1 +STATEMENT 3 0 3 3 +STAT 1 0 1 1 +STARVE 1 0 1 1 +STARTLING 1 0 1 1 +STARTLED 1 0 1 1 +STARTING 1 0 1 1 +STARK 1 0 1 1 +STARCHY 1 0 1 1 +STANLEY 2 0 2 2 +STANDPOINT 1 0 1 1 +STANDING 10 0 10 10 +STAND 7 0 7 7 +STAMPED 1 0 1 1 +STAMMERED 1 0 1 1 +STAMMER 1 0 1 1 +STAKED 1 0 1 1 +STAKE 1 0 1 1 +STAIRCASE 1 0 1 1 +STAINED 2 0 2 2 +STABLE 1 0 1 1 +SQUIRE 3 0 3 3 +SQUEEZE 1 0 1 1 +SQUEAKS 1 0 1 1 +SQUATTED 1 0 1 1 +SQUALL 1 0 1 1 +SQUALID 1 0 1 1 +SQUAD 2 0 2 2 +SPYING 1 0 1 1 +SPY 1 0 1 1 +SPRINKLES 1 0 1 1 +SPRINGS 1 0 1 1 +SPRING 4 0 4 4 +SPRIG 1 0 1 1 +SPREADS 1 0 1 1 +SPREAD 4 0 4 4 +SPRANG 3 0 3 3 +SPOTTED 1 0 1 1 +SPOT 6 0 6 6 +SPORT 2 0 2 2 +SPOON 1 0 1 1 +SPOKEN 2 0 2 2 +SPOKE 15 0 15 15 +SPOILS 1 0 1 1 +SPLIT 2 0 2 2 +SPITEFUL 1 0 1 1 +SPIT 1 0 1 1 +SPIRITUAL 1 0 1 1 +SPIRIT 6 0 6 6 +SPIRAL 1 0 1 1 +SPINSTER 1 0 1 1 +SPIDER 1 0 1 1 +SPHERE 1 0 1 1 +SPENT 3 0 3 3 +SPELL 1 0 1 1 +SPEEDILY 1 0 1 1 +SPEED 1 0 1 1 +SPEECH 5 0 5 5 +SPECULATED 1 0 1 1 +SPECTATORS 1 0 1 1 +SPECTACLE 1 0 1 1 +SPECIES 1 0 1 1 +SPECIAL 3 0 3 3 +SPEAKS 1 0 1 1 +SPEAKING 7 0 7 7 +SPAWN 1 0 1 1 +SPASM 1 0 1 1 +SPARROWS 1 0 1 1 +SPARK 1 0 1 1 +SPARING 1 0 1 1 +SPARED 2 0 2 2 +SPARE 1 0 1 1 +SPANKER 1 0 1 1 +SPANISH 1 0 1 1 +SPADES 1 0 1 1 +SPACE 2 0 2 2 +SOWING 1 0 1 1 +SOUTH 1 0 1 1 +SOUP 1 0 1 1 +SOUNDS 2 0 2 2 +SOUNDED 3 0 3 3 +SOUND 12 0 12 12 +SORTS 4 0 4 4 +SORRY 3 0 3 3 +SORROWING 1 0 1 1 +SORELY 1 0 1 1 +SORDID 1 0 1 1 +SORCERER 1 0 1 1 +SOOTH 1 0 1 1 +SOOT 1 0 1 1 +SOONER 4 0 4 4 +SONS 1 0 1 1 +SOMEWHAT 5 0 5 5 +SOMETIMES 14 0 14 14 +SOMETHING'S 1 0 1 1 +SOMEHOW 3 0 3 3 +SOMEBODY 3 0 3 3 +SOMBER 1 0 1 1 +SOLVE 1 0 1 1 +SOLUTION 4 0 4 4 +SOLUBLE 2 0 2 2 +SOLOMON 1 0 1 1 +SOLIDS 1 0 1 1 +SOLIDLY 1 0 1 1 +SOLID 1 0 1 1 +SOLICITUDE 1 0 1 1 +SOLEMNLY 1 0 1 1 +SOLEMNITY 1 0 1 1 +SOLEMN 1 0 1 1 +SOLDIERS 3 0 3 3 +SOLDIER 1 0 1 1 +SOLACE 1 0 1 1 +SOIL 2 0 2 2 +SOFTLY 2 0 2 2 +SOFA 2 0 2 2 +SODA 1 0 1 1 +SOCIETY 1 0 1 1 +SOCIETIES 1 0 1 1 +SOCIAL 12 0 12 12 +SOBERLY 1 0 1 1 +SOBER 4 0 4 4 +SOARING 1 0 1 1 +SOAK 1 0 1 1 +SNEEZE 2 0 2 2 +SNEERED 1 0 1 1 +SNEAKY 1 0 1 1 +SNATCH 1 0 1 1 +SNAKE 1 0 1 1 +SMUGGLED 1 0 1 1 +SMOULDERING 1 0 1 1 +SMOTE 2 0 2 2 +SMOKING 3 0 3 3 +SMOKESTACKS 1 0 1 1 +SMOKERS 3 0 3 3 +SMOKER 3 0 3 3 +SMOKED 2 0 2 2 +SMITH 1 0 1 1 +SMILING 2 0 2 2 +SMILED 1 0 1 1 +SMELT 1 0 1 1 +SMART 1 0 1 1 +SMALLEST 1 0 1 1 +SMALLER 1 0 1 1 +SLUMBER 2 0 2 2 +SLOWLY 6 0 6 6 +SLOW 3 0 3 3 +SLIPPING 1 0 1 1 +SLIPPER 1 0 1 1 +SLIP 3 0 3 3 +SLING 1 0 1 1 +SLIGHT 1 0 1 1 +SLICES 2 0 2 2 +SLENDER 2 0 2 2 +SLEEVES 1 0 1 1 +SLEEPY 2 0 2 2 +SLEEPS 2 0 2 2 +SLEEPER 1 0 1 1 +SLEDGE 1 0 1 1 +SLAYING 1 0 1 1 +SLAY 1 0 1 1 +SLAVES 2 0 2 2 +SLAVERY 1 0 1 1 +SLAVE 3 0 3 3 +SLAMMED 1 0 1 1 +SLAIN 2 0 2 2 +SKYLIGHT 2 0 2 2 +SKY 3 0 3 3 +SKULLS 1 0 1 1 +SKULL 1 0 1 1 +SKIRTS 1 0 1 1 +SKIRMISH 1 0 1 1 +SKIMMING 1 0 1 1 +SKILLED 1 0 1 1 +SKILFULLY 1 0 1 1 +SKIES 1 0 1 1 +SKETCH 1 0 1 1 +SIXTY 7 0 7 7 +SIXTEEN 2 0 2 2 +SITUATION 1 0 1 1 +SITTING 3 0 3 3 +SITTETH 1 0 1 1 +SISTERS 4 0 4 4 +SISTERLY 1 0 1 1 +SISTER 8 0 8 8 +SINKS 1 0 1 1 +SINGULAR 2 0 2 2 +SINGLE 8 0 8 8 +SINGING 2 0 2 2 +SINGER 1 0 1 1 +SINGED 1 0 1 1 +SING 4 0 4 4 +SINFUL 1 0 1 1 +SINCERITY 1 0 1 1 +SINCERE 1 0 1 1 +SINCE 17 0 17 17 +SIN 2 0 2 2 +SIMPLY 3 0 3 3 +SIMMERING 1 0 1 1 +SIMILAR 2 0 2 2 +SILL 1 0 1 1 +SILK 1 0 1 1 +SILENTLY 2 0 2 2 +SILENT 9 0 9 9 +SILAS 1 0 1 1 +SIGNS 2 0 2 2 +SIGNIFIES 1 0 1 1 +SIGNIFIED 1 0 1 1 +SIGNIFICANT 2 0 2 2 +SIGNIFICANCE 2 0 2 2 +SIGNATURE 1 0 1 1 +SIGNALS 2 0 2 2 +SIGNAL 7 0 7 7 +SIGHED 1 0 1 1 +SIGH 5 0 5 5 +SIFTED 1 0 1 1 +SIDEWAYS 1 0 1 1 +SIDEWALK 1 0 1 1 +SIDES 4 0 4 4 +SICUT 1 0 1 1 +SICK 2 0 2 2 +SHUTTING 1 0 1 1 +SHUTTER 1 0 1 1 +SHUFFLE 1 0 1 1 +SHUDDER 1 0 1 1 +SHRUNK 1 0 1 1 +SHROUDED 1 0 1 1 +SHRINKING 1 0 1 1 +SHRILL 1 0 1 1 +SHRIEKING 1 0 1 1 +SHRIEKED 1 0 1 1 +SHOWN 4 0 4 4 +SHOWING 7 0 7 7 +SHOWED 9 0 9 9 +SHOUTS 2 0 2 2 +SHOUTING 4 0 4 4 +SHOUTED 4 0 4 4 +SHOULDN'T 1 0 1 1 +SHORTLY 5 0 5 5 +SHORTER 1 0 1 1 +SHORT 8 0 8 8 +SHOPS 1 0 1 1 +SHOPPY 1 0 1 1 +SHOPPING 1 0 1 1 +SHOPKEEPERS 1 0 1 1 +SHOP 6 0 6 6 +SHOOK 5 0 5 5 +SHOES 5 0 5 5 +SHOCKED 2 0 2 2 +SHIRTS 1 0 1 1 +SHIRT 1 0 1 1 +SHIRKING 1 0 1 1 +SHIMMERING 1 0 1 1 +SHIFTY 1 0 1 1 +SHIFTED 2 0 2 2 +SHERRY 3 0 3 3 +SHELLS 4 0 4 4 +SHELF 1 0 1 1 +SHEILA 1 0 1 1 +SHEATH 1 0 1 1 +SHAWL 1 0 1 1 +SHARPNESS 1 0 1 1 +SHARPLY 4 0 4 4 +SHARPENED 1 0 1 1 +SHAPIA 1 0 1 1 +SHAPES 1 0 1 1 +SHAPED 1 0 1 1 +SHAPE 3 0 3 3 +SHAME 2 0 2 2 +SHAM 1 0 1 1 +SHALT 7 0 7 7 +SHAKING 1 0 1 1 +SHAHRAZAD 3 0 3 3 +SHAG 3 0 3 3 +SHAFTS 1 0 1 1 +SHADOWS 1 0 1 1 +SEX 1 0 1 1 +SEWING 1 0 1 1 +SEVERELY 1 0 1 1 +SEVENTY 7 0 7 7 +SEVENTEEN 4 0 4 4 +SETTLED 4 0 4 4 +SETTLE 2 0 2 2 +SERVING 1 0 1 1 +SERVICES 1 0 1 1 +SERVICE 15 0 15 15 +SERVES 1 0 1 1 +SERVED 3 0 3 3 +SERVANT 4 0 4 4 +SERPENTS 2 0 2 2 +SERPENT 1 0 1 1 +SERIOUSLY 3 0 3 3 +SERIOUS 5 0 5 5 +SERENITY 1 0 1 1 +SEPULTURE 1 0 1 1 +SEPULCHRE 1 0 1 1 +SEPARATION 3 0 3 3 +SEPARATING 1 0 1 1 +SEPARATED 3 0 3 3 +SEPARATE 2 0 2 2 +SENTINELS 2 0 2 2 +SENTIMENTAL 1 0 1 1 +SENTIMENT 1 0 1 1 +SENTENCE 2 0 2 2 +SENSIBLY 1 0 1 1 +SENSES 2 0 2 2 +SENSELESS 2 0 2 2 +SENSATION 1 0 1 1 +SENATOR 1 0 1 1 +SELF 6 0 6 6 +SEIZED 3 0 3 3 +SEES 1 0 1 1 +SEEMLY 1 0 1 1 +SEEKING 1 0 1 1 +SEEKEST 1 0 1 1 +SECURITY 7 0 7 7 +SECRETS 3 0 3 3 +SECRETLY 1 0 1 1 +SECRETARY 2 0 2 2 +SECRET 3 0 3 3 +SECONDS 1 0 1 1 +SECOND 15 0 15 15 +SEAT 1 0 1 1 +SEASONS 1 0 1 1 +SEASONED 1 0 1 1 +SEAS 1 0 1 1 +SEARCHINGLY 1 0 1 1 +SEARCHES 1 0 1 1 +SEARCHED 2 0 2 2 +SEARCH 6 0 6 6 +SEALED 2 0 2 2 +SCUTTLING 1 0 1 1 +SCUM 1 0 1 1 +SCULPTURE 1 0 1 1 +SCULPTOR 3 0 3 3 +SCRUPULOUSLY 1 0 1 1 +SCREEN 1 0 1 1 +SCREAM 1 0 1 1 +SCRATCHING 1 0 1 1 +SCRATCH 1 0 1 1 +SCRAPING 1 0 1 1 +SCRAPE 1 0 1 1 +SCOUNDREL 2 0 2 2 +SCOTCH 2 0 2 2 +SCISSORS 5 0 5 5 +SCIENTIFICALLY 1 0 1 1 +SCIENTIFIC 1 0 1 1 +SCIENCE 1 0 1 1 +SCHOOLMATE 1 0 1 1 +SCHOOLMASTER 5 0 5 5 +SCHOLARS 1 0 1 1 +SCHEME 1 0 1 1 +SCENES 2 0 2 2 +SCENE 6 0 6 6 +SCATTER 1 0 1 1 +SCARRED 1 0 1 1 +SCARLET 1 0 1 1 +SCARED 1 0 1 1 +SCARCELY 4 0 4 4 +SCARCE 1 0 1 1 +SCANNING 1 0 1 1 +SCALES 1 0 1 1 +SAXON 2 0 2 2 +SAWYER 3 0 3 3 +SAVAGES 1 0 1 1 +SAVAGERY 1 0 1 1 +SAUCER 1 0 1 1 +SATISFY 3 0 3 3 +SATISFIED 3 0 3 3 +SATISFACTORY 2 0 2 2 +SATISFACTORILY 1 0 1 1 +SATISFACTION 6 0 6 6 +SATIATED 1 0 1 1 +SATANICAL 1 0 1 1 +SATAN 1 0 1 1 +SANCTUARY 1 0 1 1 +SANCHO 9 0 9 9 +SAMUEL 1 0 1 1 +SAME 22 0 22 22 +SAMARIA 1 0 1 1 +SALUTED 2 0 2 2 +SALOON 1 0 1 1 +SAKE 7 0 7 7 +SAITH 1 0 1 1 +SAINTS 3 0 3 3 +SAILOR 2 0 2 2 +SAILING 2 0 2 2 +SAILED 1 0 1 1 +SAFETY 2 0 2 2 +SAFELY 2 0 2 2 +SADLY 4 0 4 4 +SAD 2 0 2 2 +SACRIFICES 3 0 3 3 +SACRIFICE 5 0 5 5 +SACRED 1 0 1 1 +SACRAMENT 1 0 1 1 +SACK 1 0 1 1 +RUSTLING 2 0 2 2 +RUSTLE 1 0 1 1 +RUSSIAN 3 0 3 3 +RUNNING 3 0 3 3 +RUMBLING 1 0 1 1 +RULES 2 0 2 2 +RULE 5 0 5 5 +RUINS 1 0 1 1 +RUINING 1 0 1 1 +RUINED 1 0 1 1 +RUFFIAN 1 0 1 1 +RUBBERS 1 0 1 1 +ROYAL 7 0 7 7 +ROXBURY 1 0 1 1 +ROVER 1 0 1 1 +ROUSED 3 0 3 3 +ROUSE 1 0 1 1 +ROUNDED 1 0 1 1 +ROUGH 3 0 3 3 +ROT 1 0 1 1 +ROSY 2 0 2 2 +ROSEMARY 2 0 2 2 +ROSA 1 0 1 1 +ROPE 3 0 3 3 +ROOMS 2 0 2 2 +ROOFS 1 0 1 1 +ROOF 2 0 2 2 +ROLLED 1 0 1 1 +RODE 6 0 6 6 +ROCKS 1 0 1 1 +ROCKET 1 0 1 1 +ROBERT 1 0 1 1 +ROBBING 1 0 1 1 +ROBBERY 5 0 5 5 +ROBBED 2 0 2 2 +ROASTING 1 0 1 1 +ROASTED 1 0 1 1 +ROAST 1 0 1 1 +ROARED 1 0 1 1 +RIVERS 1 0 1 1 +RIVER 11 0 11 11 +RIVALRY 1 0 1 1 +RIVAL 2 0 2 2 +RISK 3 0 3 3 +RISING 5 0 5 5 +RISEN 1 0 1 1 +RISE 3 0 3 3 +RIP 2 0 2 2 +RIGHTEOUSNESS 1 0 1 1 +RIGHTEOUS 1 0 1 1 +RIDICULOUS 1 0 1 1 +RIDDEN 1 0 1 1 +RICHLY 1 0 1 1 +RICHER 1 0 1 1 +RICH 7 0 7 7 +RICE 1 0 1 1 +RHEUMATISM 1 0 1 1 +REWARDS 1 0 1 1 +REWARD 4 0 4 4 +REVOLUTIONARIES 1 0 1 1 +REVIVE 1 0 1 1 +REVIEW 1 0 1 1 +REVERSES 1 0 1 1 +REVENGES 1 0 1 1 +REVENGE 1 0 1 1 +REVELLED 1 0 1 1 +REVELATION 1 0 1 1 +REVEL 1 0 1 1 +RETREAT 1 0 1 1 +RETARDED 1 0 1 1 +RETAINED 2 0 2 2 +RESULT 2 0 2 2 +RESTS 1 0 1 1 +RESTRAIN 2 0 2 2 +RESTORED 2 0 2 2 +RESTAURANTS 1 0 1 1 +RESTAURANT 3 0 3 3 +RESPONSIBILITY 2 0 2 2 +RESPONDED 2 0 2 2 +RESPECTS 1 0 1 1 +RESPECTIVE 2 0 2 2 +RESPECTING 1 0 1 1 +RESPECTFULLY 3 0 3 3 +RESPECTFUL 1 0 1 1 +RESPECTED 1 0 1 1 +RESPECTABLE 3 0 3 3 +RESPECT 4 0 4 4 +RESORTS 1 0 1 1 +RESORTED 1 0 1 1 +RESORT 1 0 1 1 +RESOLVING 1 0 1 1 +RESOLVE 1 0 1 1 +RESOLUTIONS 1 0 1 1 +RESOLUTION 2 0 2 2 +RESISTANCE 3 0 3 3 +RESIST 3 0 3 3 +RESIGNED 1 0 1 1 +RESIDENCE 2 0 2 2 +RESERVOIR 1 0 1 1 +RESERVE 1 0 1 1 +RESEMBLING 1 0 1 1 +RESEMBLES 1 0 1 1 +RESEMBLE 1 0 1 1 +RESEARCHES 1 0 1 1 +REQUIRING 1 0 1 1 +REQUIRES 1 0 1 1 +REQUIRED 3 0 3 3 +REQUIRE 4 0 4 4 +REQUESTED 2 0 2 2 +REPUTATIONS 1 0 1 1 +REPROACH 2 0 2 2 +REPRESENTED 3 0 3 3 +REPRESENTATIVE 2 0 2 2 +REPORTED 1 0 1 1 +REPORT 2 0 2 2 +REPEATING 1 0 1 1 +REPEAT 1 0 1 1 +REPARATION 1 0 1 1 +REPAIRED 2 0 2 2 +REPAIR 1 0 1 1 +RENOUNCE 3 0 3 3 +RENEWED 2 0 2 2 +RENDERS 1 0 1 1 +RENDERED 1 0 1 1 +REMOVED 3 0 3 3 +REMOVE 3 0 3 3 +REMOVAL 1 0 1 1 +REMOTE 1 0 1 1 +REMORSEFUL 1 0 1 1 +REMONSTRANCE 1 0 1 1 +REMNANTS 1 0 1 1 +REMNANT 1 0 1 1 +REMINISCENCES 1 0 1 1 +REMEMBERING 2 0 2 2 +REMEMBERED 4 0 4 4 +REMEDY 4 0 4 4 +REMARKS 1 0 1 1 +REMARKABLY 1 0 1 1 +REMARKABLE 2 0 2 2 +RELYING 1 0 1 1 +RELUCTANTLY 2 0 2 2 +RELUCTANCE 1 0 1 1 +RELINQUISH 1 0 1 1 +RELIGIONS 1 0 1 1 +RELIEVED 1 0 1 1 +RELIEF 6 0 6 6 +RELIED 1 0 1 1 +RELIC 1 0 1 1 +RELAXING 1 0 1 1 +RELATIONS 2 0 2 2 +RELATION 2 0 2 2 +REJOINED 1 0 1 1 +REJOINDER 1 0 1 1 +REJOICING 1 0 1 1 +REJOICED 3 0 3 3 +REGULAR 1 0 1 1 +REGRETTING 1 0 1 1 +REGISTER 1 0 1 1 +REGION 1 0 1 1 +REGIMENTS 2 0 2 2 +REGARDED 2 0 2 2 +REGARD 2 0 2 2 +REGAINED 1 0 1 1 +REGAIN 2 0 2 2 +REFUTATION 1 0 1 1 +REFUSING 2 0 2 2 +REFUSES 2 0 2 2 +REFUSED 1 0 1 1 +REFUSE 2 0 2 2 +REFRAIN 2 0 2 2 +REFORMS 1 0 1 1 +REFORM 6 0 6 6 +REFLECTIVE 1 0 1 1 +REFLECTIONS 1 0 1 1 +REFLECTION 3 0 3 3 +REFINED 2 0 2 2 +REFERRED 2 0 2 2 +REFER 1 0 1 1 +REEF 1 0 1 1 +REDUCED 2 0 2 2 +REDOUBLING 1 0 1 1 +REDEMPTION 1 0 1 1 +REDEEMING 1 0 1 1 +RECTUM 1 0 1 1 +RECTOR 1 0 1 1 +RECRUITS 1 0 1 1 +RECOVERY 1 0 1 1 +RECOVERED 1 0 1 1 +RECOVER 3 0 3 3 +RECOURSE 1 0 1 1 +RECOUNTED 1 0 1 1 +RECORD 2 0 2 2 +RECOMPENSE 2 0 2 2 +RECOMMEND 2 0 2 2 +RECOLLECTING 1 0 1 1 +RECOLLECTED 1 0 1 1 +RECOLLECT 1 0 1 1 +RECOILED 1 0 1 1 +RECOGNIZED 5 0 5 5 +RECOGNITION 2 0 2 2 +RECKON 4 0 4 4 +RECITING 1 0 1 1 +RECITED 3 0 3 3 +RECIPE 2 0 2 2 +RECEPTION 1 0 1 1 +RECENTLY 1 0 1 1 +RECEIVED 9 0 9 9 +RECEIVE 4 0 4 4 +RECEIPT 1 0 1 1 +RECEDING 1 0 1 1 +RECAPTURED 1 0 1 1 +RECALLING 1 0 1 1 +RECALLED 1 0 1 1 +RECALL 1 0 1 1 +REBECCA 1 0 1 1 +REASONABLE 2 0 2 2 +REASON 11 0 11 11 +REAR 1 0 1 1 +REAPING 1 0 1 1 +REAP 1 0 1 1 +REALM 1 0 1 1 +REALIZE 1 0 1 1 +REALITY 3 0 3 3 +READERS 1 0 1 1 +READER 1 0 1 1 +REACHING 2 0 2 2 +RAWNESS 1 0 1 1 +RAVING 1 0 1 1 +RAVENING 1 0 1 1 +RAVAGED 1 0 1 1 +RATTLED 1 0 1 1 +RATTLE 1 0 1 1 +RATE 2 0 2 2 +RASCAL 3 0 3 3 +RARE 1 0 1 1 +RAPIDLY 4 0 4 4 +RAP 1 0 1 1 +RANKS 1 0 1 1 +RANG 2 0 2 2 +RAMBLER 1 0 1 1 +RAMBLE 1 0 1 1 +RAM'S 1 0 1 1 +RAISED 6 0 6 6 +RAINS 1 0 1 1 +RAINBOWS 1 0 1 1 +RAILROAD 1 0 1 1 +RAIDERS 1 0 1 1 +RAFTER 1 0 1 1 +RAFT 6 0 6 6 +RADICALS 1 0 1 1 +RADIANT 1 0 1 1 +RACKETS 1 0 1 1 +RACK 1 0 1 1 +RACE 2 0 2 2 +R 1 0 1 1 +QUOTH 5 0 5 5 +QUOTED 1 0 1 1 +QUIXOTE 5 0 5 5 +QUIVERED 1 0 1 1 +QUIVER 1 0 1 1 +QUIT 1 0 1 1 +QUIETLY 4 0 4 4 +QUIET 1 0 1 1 +QUICKER 3 0 3 3 +QUESTIONS 6 0 6 6 +QUESTIONED 1 0 1 1 +QUESTIONABLE 1 0 1 1 +QUESTION 15 0 15 15 +QUENCH 1 0 1 1 +QUEENS 1 0 1 1 +QUEEN'S 1 0 1 1 +QUARTERS 3 0 3 3 +QUART 1 0 1 1 +QUARRELS 1 0 1 1 +QUANTITY 3 0 3 3 +QUALITY 1 0 1 1 +QUACKS 2 0 2 2 +PUZZLED 2 0 2 2 +PUSHING 1 0 1 1 +PUSHED 1 0 1 1 +PURSUIT 1 0 1 1 +PURSUED 3 0 3 3 +PURSUANCE 1 0 1 1 +PURPOSES 1 0 1 1 +PURPOSE 5 0 5 5 +PURITAN 2 0 2 2 +PURIFY 1 0 1 1 +PURE 4 0 4 4 +PURCHASED 1 0 1 1 +PUNISHMENTS 1 0 1 1 +PUNISHMENT 1 0 1 1 +PUNISHES 1 0 1 1 +PUNISHED 1 0 1 1 +PUNISH 1 0 1 1 +PUNCTUALITY 1 0 1 1 +PUNCTILIOUS 1 0 1 1 +PUMP 1 0 1 1 +PULP 1 0 1 1 +PULLEY 1 0 1 1 +PULL 1 0 1 1 +PUFFING 1 0 1 1 +PUFFED 1 0 1 1 +PUDDINGS 1 0 1 1 +PUBLISHER 1 0 1 1 +PUBLIC 5 0 5 5 +PRYING 2 0 2 2 +PRUDENT 1 0 1 1 +PRUDENCE 4 0 4 4 +PROW 1 0 1 1 +PROVOKE 1 0 1 1 +PROVOCATOR 1 0 1 1 +PROVISION 1 0 1 1 +PROVINCIAL 1 0 1 1 +PROVINCE 4 0 4 4 +PROVIDENCES 1 0 1 1 +PROVIDENCE 1 0 1 1 +PROVIDED 2 0 2 2 +PROVIDE 1 0 1 1 +PROVERBIAL 1 0 1 1 +PROUD 2 0 2 2 +PROTESTED 2 0 2 2 +PROTECTS 1 0 1 1 +PROTECTORS 1 0 1 1 +PROTECTION 2 0 2 2 +PROTECT 2 0 2 2 +PROSPEROUS 1 0 1 1 +PROPRIETORS 1 0 1 1 +PROPOSITION 1 0 1 1 +PROPOSES 1 0 1 1 +PROPOSED 3 0 3 3 +PROPOSALS 1 0 1 1 +PROPORTION 3 0 3 3 +PROPERTY 2 0 2 2 +PROOF 5 0 5 5 +PRONOUNCED 1 0 1 1 +PROMPTLY 3 0 3 3 +PROMPT 1 0 1 1 +PROMISING 1 0 1 1 +PROMISED 7 0 7 7 +PROMISE 4 0 4 4 +PROLONGED 1 0 1 1 +PROJECT 1 0 1 1 +PROHIBITED 1 0 1 1 +PROHIBIT 1 0 1 1 +PROFUSION 1 0 1 1 +PROFOUND 1 0 1 1 +PROFLIGATE 1 0 1 1 +PROFITABLY 1 0 1 1 +PROFITABLE 1 0 1 1 +PROFESSIONAL 2 0 2 2 +PROFANITY 1 0 1 1 +PROFANE 1 0 1 1 +PRODUCTIONS 1 0 1 1 +PRODUCING 1 0 1 1 +PROCURE 2 0 2 2 +PROCESSIONS 1 0 1 1 +PROCESSION 1 0 1 1 +PROCESS 6 0 6 6 +PROCEEDINGS 2 0 2 2 +PROCEEDED 1 0 1 1 +PROCEED 2 0 2 2 +PROCEDURE 1 0 1 1 +PROBLEMS 1 0 1 1 +PROBLEM 1 0 1 1 +PROBABLY 7 0 7 7 +PROBABLE 1 0 1 1 +PROBABILITY 1 0 1 1 +PRIVILEGE 1 0 1 1 +PRIVATE 6 0 6 6 +PRIVACY 1 0 1 1 +PRINT 1 0 1 1 +PRINCIPLE 3 0 3 3 +PRINCIPALLY 1 0 1 1 +PRINCIPAL 1 0 1 1 +PRINCESS 11 0 11 11 +PRINCES 1 0 1 1 +PRINCE'S 2 0 2 2 +PRIME 3 0 3 3 +PRIDE 2 0 2 2 +PRICE 2 0 2 2 +PREVIOUSLY 1 0 1 1 +PREVENTED 1 0 1 1 +PREVAILING 1 0 1 1 +PREVAILED 1 0 1 1 +PRETTILY 1 0 1 1 +PRETTIEST 1 0 1 1 +PRETEXT 1 0 1 1 +PRETENDED 1 0 1 1 +PRETEND 3 0 3 3 +PRESUMPTUOUS 1 0 1 1 +PRESTIGE 2 0 2 2 +PRESSURE 4 0 4 4 +PRESSED 2 0 2 2 +PRESS 2 0 2 2 +PRESERVING 5 0 5 5 +PRESERVES 3 0 3 3 +PRESERVED 2 0 2 2 +PRESERVE 2 0 2 2 +PRESENTS 4 0 4 4 +PRESENTLY 12 0 12 12 +PRESENTING 1 0 1 1 +PRESENTED 3 0 3 3 +PRESENCE 9 0 9 9 +PREPARING 3 0 3 3 +PREPARED 7 0 7 7 +PREPARE 1 0 1 1 +PREPARATIONS 5 0 5 5 +PREOCCUPIED 1 0 1 1 +PREMISES 1 0 1 1 +PREFERRED 1 0 1 1 +PREFER 2 0 2 2 +PREDICTIONS 1 0 1 1 +PRECIPITANCY 1 0 1 1 +PRECIOUS 2 0 2 2 +PRECINCT 1 0 1 1 +PRECEPTORY 2 0 2 2 +PRECAUTION 1 0 1 1 +PREACHING 2 0 2 2 +PREACHER 1 0 1 1 +PRAYED 3 0 3 3 +PRAM 1 0 1 1 +PRAISES 1 0 1 1 +PRAISED 2 0 2 2 +PRACTISE 1 0 1 1 +PRACTICE 1 0 1 1 +PRACTICALLY 2 0 2 2 +POWERS 3 0 3 3 +POWERLESS 1 0 1 1 +POWERFUL 3 0 3 3 +POWER 27 0 27 27 +POVERTY 3 0 3 3 +POURS 1 0 1 1 +POURING 1 0 1 1 +POUR 1 0 1 1 +POUND 3 0 3 3 +POUNCE 1 0 1 1 +POTS 1 0 1 1 +POTASSIC 1 0 1 1 +POTASH 1 0 1 1 +POSTERS 1 0 1 1 +POSSIBLY 3 0 3 3 +POSSIBLE 12 0 12 12 +POSSIBILITY 2 0 2 2 +POSSESSION 2 0 2 2 +POSSESSES 1 0 1 1 +POSSESSED 5 0 5 5 +POSSESS 1 0 1 1 +POSSE 1 0 1 1 +POSITIVE 1 0 1 1 +PORTO 1 0 1 1 +PORTIONS 2 0 2 2 +PORTION 2 0 2 2 +PORTER 2 0 2 2 +POPULATION 1 0 1 1 +POPULARITY 1 0 1 1 +POPULAR 1 0 1 1 +POPPED 1 0 1 1 +POPES 2 0 2 2 +POPE'S 1 0 1 1 +POPE 1 0 1 1 +POP 1 0 1 1 +PONY 1 0 1 1 +POLONIUS 1 0 1 1 +POLLY'S 3 0 3 3 +POLITICIANS 1 0 1 1 +POLITICAL 3 0 3 3 +POLICE 5 0 5 5 +POKING 1 0 1 1 +POKED 1 0 1 1 +POISONS 1 0 1 1 +POISONING 3 0 3 3 +POINTING 2 0 2 2 +POINTED 3 0 3 3 +POETRY 2 0 2 2 +POCKETS 1 0 1 1 +POCKETED 1 0 1 1 +POCKET 8 0 8 8 +PLUNGED 3 0 3 3 +PLUNDERED 1 0 1 1 +PLUG 1 0 1 1 +PLUCKING 2 0 2 2 +PLOT 1 0 1 1 +PLEDGED 1 0 1 1 +PLEDGE 1 0 1 1 +PLEASURES 2 0 2 2 +PLEASED 4 0 4 4 +PLEASANTER 1 0 1 1 +PLEADINGS 1 0 1 1 +PLEADED 3 0 3 3 +PLEAD 1 0 1 1 +PLAYING 1 0 1 1 +PLAYED 5 0 5 5 +PLAY 4 0 4 4 +PLATTERS 1 0 1 1 +PLATFORM 2 0 2 2 +PLATED 1 0 1 1 +PLATE 2 0 2 2 +PLASTER 1 0 1 1 +PLANTS 1 0 1 1 +PLANTATIONS 2 0 2 2 +PLANS 5 0 5 5 +PLANNING 2 0 2 2 +PLANNED 1 0 1 1 +PLANKS 2 0 2 2 +PLANK 3 0 3 3 +PLANETS 1 0 1 1 +PLANET 2 0 2 2 +PLAN 1 0 1 1 +PLAINLY 3 0 3 3 +PLAGUE 2 0 2 2 +PITY 4 0 4 4 +PITIFULNESS 1 0 1 1 +PIT 1 0 1 1 +PISTOLS 1 0 1 1 +PIPING 1 0 1 1 +PIPES 1 0 1 1 +PIOUS 1 0 1 1 +PINT 1 0 1 1 +PINKERTON'S 1 0 1 1 +PINK 1 0 1 1 +PINED 1 0 1 1 +PINCHED 2 0 2 2 +PINCH 1 0 1 1 +PIN 1 0 1 1 +PILLOWED 1 0 1 1 +PILLOW 1 0 1 1 +PILED 1 0 1 1 +PILE 1 0 1 1 +PIG 1 0 1 1 +PIERO 1 0 1 1 +PIERCED 1 0 1 1 +PIECES 9 0 9 9 +PICTURES 3 0 3 3 +PICTURE 5 0 5 5 +PICKET 1 0 1 1 +PICK 5 0 5 5 +PIAZZA 1 0 1 1 +PHYSIOLOGICAL 1 0 1 1 +PHYSICIAN 3 0 3 3 +PHYSICAL 2 0 2 2 +PHUT'S 1 0 1 1 +PHRASE 1 0 1 1 +PHONE 1 0 1 1 +PHLEGMATIC 1 0 1 1 +PHILOSOPHERS 1 0 1 1 +PHARMACY 1 0 1 1 +PEYTON 1 0 1 1 +PETITIONS 1 0 1 1 +PETERS 1 0 1 1 +PETER'S 2 0 2 2 +PETER 15 0 15 15 +PERVADED 1 0 1 1 +PERUSING 1 0 1 1 +PERUSAL 1 0 1 1 +PERSUADED 1 0 1 1 +PERSUADE 1 0 1 1 +PERSPECTIVE 1 0 1 1 +PERSONS 8 0 8 8 +PERSONALLY 5 0 5 5 +PERSONAL 2 0 2 2 +PERSONAGE 1 0 1 1 +PERSON'S 1 0 1 1 +PERSISTED 3 0 3 3 +PERSIST 1 0 1 1 +PERSEUS 2 0 2 2 +PERSECUTORS 1 0 1 1 +PERSECUTION 1 0 1 1 +PERSECUTED 1 0 1 1 +PERSECUTE 2 0 2 2 +PERPLEXITY 1 0 1 1 +PERPETUALLY 1 0 1 1 +PERMITTED 5 0 5 5 +PERMIT 4 0 4 4 +PERMISSION 2 0 2 2 +PERMANENT 1 0 1 1 +PERISHED 3 0 3 3 +PERISH 4 0 4 4 +PERIODS 1 0 1 1 +PERIOD 2 0 2 2 +PERILS 1 0 1 1 +PERFORMANCES 1 0 1 1 +PERFORM 2 0 2 2 +PERFECTLY 5 0 5 5 +PERFECTION 2 0 2 2 +PERFECT 5 0 5 5 +PERCHED 1 0 1 1 +PERCH 1 0 1 1 +PERCEPTIBLE 1 0 1 1 +PERCEIVED 7 0 7 7 +PERCEIVE 1 0 1 1 +PERAMBULATOR'S 1 0 1 1 +PER 1 0 1 1 +PEPPINO 1 0 1 1 +PEPPER 2 0 2 2 +PEOPLE'S 3 0 3 3 +PEOPLE 44 0 44 44 +PENNY 1 0 1 1 +PENETRATING 1 0 1 1 +PENETRATE 1 0 1 1 +PENALTY 1 0 1 1 +PELT 1 0 1 1 +PEERS 1 0 1 1 +PEERED 1 0 1 1 +PEDESTAL 1 0 1 1 +PECULIAR 2 0 2 2 +PEBBLES 3 0 3 3 +PEASANT 3 0 3 3 +PEARLS 1 0 1 1 +PEALS 1 0 1 1 +PEAL 1 0 1 1 +PEABODY 1 0 1 1 +PAYS 1 0 1 1 +PAYING 1 0 1 1 +PAVILION 1 0 1 1 +PAVEMENT 1 0 1 1 +PATRIOTS 1 0 1 1 +PATRIOTISM 1 0 1 1 +PATRIOT 1 0 1 1 +PATRIMONY 1 0 1 1 +PATRIARCHS 1 0 1 1 +PATIENTLY 1 0 1 1 +PATIENT'S 1 0 1 1 +PATIENT 2 0 2 2 +PATIENCE 3 0 3 3 +PATHS 1 0 1 1 +PASTE 1 0 1 1 +PASSION 5 0 5 5 +PASSING 3 0 3 3 +PASSENGERS 4 0 4 4 +PASSAGE 2 0 2 2 +PASSABLE 1 0 1 1 +PARTY 13 0 13 13 +PARTNER 1 0 1 1 +PARTISANS 1 0 1 1 +PARTINGS 1 0 1 1 +PARTING 2 0 2 2 +PARTIES 1 0 1 1 +PARTICULARS 1 0 1 1 +PARTICULARLY 5 0 5 5 +PARTICULAR 4 0 4 4 +PARTICLE 2 0 2 2 +PARTICIPATION 1 0 1 1 +PARTICIPANTS 1 0 1 1 +PARTAKE 1 0 1 1 +PARSLEY 1 0 1 1 +PARLIAMENTARY 1 0 1 1 +PARK 1 0 1 1 +PARISIAN 1 0 1 1 +PARIS 5 0 5 5 +PARENTS 2 0 2 2 +PARCEL 2 0 2 2 +PARASOL 1 0 1 1 +PARALLEL 1 0 1 1 +PARADISE 1 0 1 1 +PAPERS 4 0 4 4 +PAPA 4 0 4 4 +PANZA 1 0 1 1 +PANTING 2 0 2 2 +PANS 1 0 1 1 +PANNIERS 1 0 1 1 +PALINGS 1 0 1 1 +PALER 1 0 1 1 +PALE 8 0 8 8 +PALACE 6 0 6 6 +PAINTING 1 0 1 1 +PAINTER 1 0 1 1 +PAINFULLY 1 0 1 1 +PAINFUL 5 0 5 5 +PAINED 1 0 1 1 +PAID 9 0 9 9 +PAGES 4 0 4 4 +PAGE 2 0 2 2 +PADDLE 2 0 2 2 +PACKED 1 0 1 1 +PACIFY 1 0 1 1 +PACIFIC 2 0 2 2 +OXEN 1 0 1 1 +OWE 2 0 2 2 +OVERWHELMED 2 0 2 2 +OVERTURNING 1 0 1 1 +OVERTHREW 1 0 1 1 +OVERTAKEN 1 0 1 1 +OVERLY 1 0 1 1 +OVERHEAD 2 0 2 2 +OVERCOME 2 0 2 2 +OVERCOAT 1 0 1 1 +OVEN 2 0 2 2 +OVAL 1 0 1 1 +OUTWARD 1 0 1 1 +OUTLINES 1 0 1 1 +OUTLINE 1 0 1 1 +OUTLAWS 1 0 1 1 +OUTFIT 1 0 1 1 +OUTDO 1 0 1 1 +OURSELVES 4 0 4 4 +OTTER 1 0 1 1 +OTHO 1 0 1 1 +OTHERWISE 1 0 1 1 +OSTRICH 1 0 1 1 +ORTHODOX 1 0 1 1 +ORPHAN 1 0 1 1 +ORNERY 1 0 1 1 +ORNAMENTED 1 0 1 1 +ORNAMENTAL 1 0 1 1 +ORLEANS 1 0 1 1 +ORISON 1 0 1 1 +ORIGIN 1 0 1 1 +ORIENTAL 1 0 1 1 +ORGANIZATION 3 0 3 3 +ORFICER 1 0 1 1 +ORDINARY 1 0 1 1 +ORDERS 8 0 8 8 +ORDERED 5 0 5 5 +ORDEAL 1 0 1 1 +ORDAINED 1 0 1 1 +ORCHARDS 1 0 1 1 +ORBIS 1 0 1 1 +ORANGE 1 0 1 1 +ORACLE 2 0 2 2 +OPTIC 1 0 1 1 +OPPRESSOR 1 0 1 1 +OPPRESSION 3 0 3 3 +OPPRESSED 2 0 2 2 +OPPOSITE 3 0 3 3 +OPPOSE 1 0 1 1 +OPINION 3 0 3 3 +OPERATIONS 1 0 1 1 +OPERATED 1 0 1 1 +OPERA 1 0 1 1 +OPENS 3 0 3 3 +OPAQUE 1 0 1 1 +ONWARDS 1 0 1 1 +ONWARD 1 0 1 1 +ONION 2 0 2 2 +ONESELF 1 0 1 1 +OMITTING 1 0 1 1 +OLDISH 1 0 1 1 +OLDEST 1 0 1 1 +OLDER 1 0 1 1 +OGRE 3 0 3 3 +OGLING 1 0 1 1 +OFFICIAL 1 0 1 1 +OFFERS 3 0 3 3 +OFFERINGS 1 0 1 1 +OFFERING 1 0 1 1 +OFFERED 3 0 3 3 +OFFER 2 0 2 2 +OFFENSIVE 1 0 1 1 +OCEAN 1 0 1 1 +OCCURRED 5 0 5 5 +OCCUR 1 0 1 1 +OCCUPY 3 0 3 3 +OCCUPIED 2 0 2 2 +OBVIOUSLY 1 0 1 1 +OBVIOUS 1 0 1 1 +OBTAINED 1 0 1 1 +OBTAIN 3 0 3 3 +OBSTRUCTION 1 0 1 1 +OBSTINATE 1 0 1 1 +OBSTINACY 1 0 1 1 +OBSERVER 1 0 1 1 +OBSERVED 5 0 5 5 +OBSERVE 1 0 1 1 +OBSERVATIONS 2 0 2 2 +OBSERVATION 1 0 1 1 +OBSERVANT 1 0 1 1 +OBLONG 1 0 1 1 +OBLIGATION 3 0 3 3 +OBJECTS 2 0 2 2 +OBJECTION 2 0 2 2 +OBEYING 1 0 1 1 +OBEDIENTLY 1 0 1 1 +OBEDIENT 2 0 2 2 +OATH 4 0 4 4 +OAR 1 0 1 1 +O'CLOCK 9 0 9 9 +NUN 1 0 1 1 +NUMBERED 1 0 1 1 +NUMBER 5 0 5 5 +NUISANCE 1 0 1 1 +NUBIAN 1 0 1 1 +NOWHERE 2 0 2 2 +NOTWITHSTANDING 1 0 1 1 +NOTORIOUS 1 0 1 1 +NOTION 1 0 1 1 +NOTICED 3 0 3 3 +NOTICE 7 0 7 7 +NOTHING 28 0 28 28 +NOTES 1 0 1 1 +NOTED 2 0 2 2 +NOTABLES 1 0 1 1 +NOSE 3 0 3 3 +NORTHWARD 1 0 1 1 +NORTHERN 1 0 1 1 +NORMAN 2 0 2 2 +NOON 1 0 1 1 +NONSENSE 2 0 2 2 +NOISE 5 0 5 5 +NODDING 1 0 1 1 +NODDED 3 0 3 3 +NOD 2 0 2 2 +NOBODY 6 0 6 6 +NOBLEMEN 1 0 1 1 +NOBLEMAN 1 0 1 1 +NOBILITY 1 0 1 1 +NINTH 1 0 1 1 +NINEVEH 1 0 1 1 +NINETY 2 0 2 2 +NINETEENTH 1 0 1 1 +NINETEEN 1 0 1 1 +NINE 6 0 6 6 +NIMBLENESS 1 0 1 1 +NICOTINE 1 0 1 1 +NICKEL 1 0 1 1 +NICK 1 0 1 1 +NICHOLAS 1 0 1 1 +NICETIES 1 0 1 1 +NEWS 7 0 7 7 +NEVERTHELESS 3 0 3 3 +NERVOUSNESS 2 0 2 2 +NERVOUSLY 1 0 1 1 +NERVOUS 2 0 2 2 +NERVES 1 0 1 1 +NEMESIS 3 0 3 3 +NEIGHBOURHOOD 1 0 1 1 +NEIGHBOUR 1 0 1 1 +NEIGHBOR 2 0 2 2 +NEGRO 2 0 2 2 +NEGLECTING 1 0 1 1 +NEGLECTED 2 0 2 2 +NEGLECT 1 0 1 1 +NEGATIVE 1 0 1 1 +NEEDN'T 1 0 1 1 +NEEDLED 1 0 1 1 +NEEDED 9 0 9 9 +NEED 9 0 9 9 +NECK 1 0 1 1 +NECESSITY 7 0 7 7 +NECESSARY 10 0 10 10 +NECESSARILY 2 0 2 2 +NEATLY 2 0 2 2 +NEARLY 2 0 2 2 +NEARED 1 0 1 1 +NAY 2 0 2 2 +NAVEL 1 0 1 1 +NAUSEA 1 0 1 1 +NATURED 2 0 2 2 +NATTY 1 0 1 1 +NATIVE 6 0 6 6 +NATIONS 3 0 3 3 +NARROWNESS 1 0 1 1 +NARROWER 1 0 1 1 +NARROW 5 0 5 5 +NARRATOR 1 0 1 1 +NAPKINS 1 0 1 1 +NAPKIN 1 0 1 1 +NAMES 5 0 5 5 +NAMED 5 0 5 5 +NAME'S 1 0 1 1 +NAME 21 0 21 21 +NAILS 2 0 2 2 +NAILED 2 0 2 2 +NAIL 5 0 5 5 +MUTTERED 5 0 5 5 +MUSKETS 1 0 1 1 +MUSICIANS 1 0 1 1 +MUSICAL 1 0 1 1 +MUSIC 2 0 2 2 +MURMURED 1 0 1 1 +MURMUR 1 0 1 1 +MURDERED 1 0 1 1 +MULTITUDE 1 0 1 1 +MULE 1 0 1 1 +MUFFLED 1 0 1 1 +MUCOUS 1 0 1 1 +MOVING 6 0 6 6 +MOVEMENTS 2 0 2 2 +MOVED 7 0 7 7 +MOVE 1 0 1 1 +MOUSE 1 0 1 1 +MOURNFULLY 1 0 1 1 +MOUNTED 2 0 2 2 +MOUNTAINS 1 0 1 1 +MOUNTAIN 1 0 1 1 +MOUNT 1 0 1 1 +MOTLEY 1 0 1 1 +MOTIVES 1 0 1 1 +MOTIVE 1 0 1 1 +MOTHER'S 3 0 3 3 +MOSTLY 2 0 2 2 +MORTAR 1 0 1 1 +MORPHINE 2 0 2 2 +MOREOVER 1 0 1 1 +MORCERF 3 0 3 3 +MORALS 1 0 1 1 +MOPPED 1 0 1 1 +MOORED 1 0 1 1 +MOONFLOWERS 1 0 1 1 +MOON 2 0 2 2 +MOOD 2 0 2 2 +MONTHS 6 0 6 6 +MONTESQUIEU 1 0 1 1 +MONSTROUS 1 0 1 1 +MONSTERS 2 0 2 2 +MONOTONOUS 1 0 1 1 +MONKEY 3 0 3 3 +MONDAY 2 0 2 2 +MONASTERY 1 0 1 1 +MONARCH 2 0 2 2 +MOMENTS 6 0 6 6 +MOMENT'S 1 0 1 1 +MOLESTED 1 0 1 1 +MOHAMMED 1 0 1 1 +MODEST 1 0 1 1 +MODERN 2 0 2 2 +MODERATE 2 0 2 2 +MODEL 2 0 2 2 +MOCKERY 1 0 1 1 +MOB 1 0 1 1 +MOANING 2 0 2 2 +MIXTURE 1 0 1 1 +MIXING 1 0 1 1 +MIXED 2 0 2 2 +MIX 3 0 3 3 +MISTRUST 1 0 1 1 +MISTRESSES 1 0 1 1 +MISTRESS 6 0 6 6 +MISTAKE 2 0 2 2 +MIST 2 0 2 2 +MISSOURI 1 0 1 1 +MISSISSIPPIAN 1 0 1 1 +MISSISSIPPI 1 0 1 1 +MISSING 3 0 3 3 +MISFORTUNE 1 0 1 1 +MISERY 3 0 3 3 +MISERABLE 2 0 2 2 +MISCONDUCT 1 0 1 1 +MISCONCEPTION 1 0 1 1 +MISCHIEVOUS 2 0 2 2 +MISCHIEF 1 0 1 1 +MISAPPREHENSION 1 0 1 1 +MISANTHROPY 1 0 1 1 +MIRTH 2 0 2 2 +MIRACULOUS 1 0 1 1 +MIRACLES 3 0 3 3 +MIRABELLE 2 0 2 2 +MINUTES 11 0 11 11 +MINUTE 6 0 6 6 +MINNIE 2 0 2 2 +MINNESOTA 1 0 1 1 +MINNEAPOLIS 1 0 1 1 +MINISTERS 1 0 1 1 +MINISTERED 1 0 1 1 +MINIMS 1 0 1 1 +MINIATURE 1 0 1 1 +MINGLED 2 0 2 2 +MINDS 1 0 1 1 +MINDED 1 0 1 1 +MILTON 4 0 4 4 +MILLIONS 1 0 1 1 +MILLER'S 1 0 1 1 +MILLER 4 0 4 4 +MILITARY 5 0 5 5 +MIKE'S 1 0 1 1 +MIGHTY 3 0 3 3 +MIGHTINESS 1 0 1 1 +MIDST 3 0 3 3 +MIDSHIPMAN 1 0 1 1 +MIDNIGHT 3 0 3 3 +MIDDY'S 1 0 1 1 +MIDDLE 5 0 5 5 +MICROSCOPIC 1 0 1 1 +MICROBE 1 0 1 1 +METALLIC 1 0 1 1 +MESSAGE 1 0 1 1 +MERITS 1 0 1 1 +MERE 3 0 3 3 +MERCY 5 0 5 5 +MERCURY 2 0 2 2 +MERCIFUL 1 0 1 1 +MERCIES 1 0 1 1 +MERCHANTS 6 0 6 6 +MERCHANT 3 0 3 3 +MENTION 1 0 1 1 +MENTALLY 3 0 3 3 +MENACING 1 0 1 1 +MEMORY 4 0 4 4 +MEMORIAL 1 0 1 1 +MEMBERS 7 0 7 7 +MEMBER 1 0 1 1 +MELTED 1 0 1 1 +MELANCHOLY 1 0 1 1 +MEETING 4 0 4 4 +MEET 9 0 9 9 +MEDIUMS 1 0 1 1 +MEDIUM 1 0 1 1 +MEDITATION 1 0 1 1 +MEDITATED 1 0 1 1 +MEDICINE 1 0 1 1 +MEDICAMENTS 1 0 1 1 +MEDDLE 1 0 1 1 +MEDAL 1 0 1 1 +MECHANICALLY 1 0 1 1 +MECHANICAL 1 0 1 1 +MEASURABLE 1 0 1 1 +MEANWHILE 4 0 4 4 +MEANING 2 0 2 2 +MAYBE 4 0 4 4 +MATTOCK 1 0 1 1 +MATTERED 1 0 1 1 +MATERIALLY 1 0 1 1 +MATERIAL 1 0 1 1 +MATE 2 0 2 2 +MATCH 1 0 1 1 +MASTERS 1 0 1 1 +MASTERPIECE 1 0 1 1 +MASTERED 1 0 1 1 +MASTER'S 1 0 1 1 +MAST 1 0 1 1 +MASON'S 1 0 1 1 +MASON 1 0 1 1 +MARY 2 0 2 2 +MARVELLOUS 4 0 4 4 +MARTYR 1 0 1 1 +MARSPORT 1 0 1 1 +MARSHAL'S 1 0 1 1 +MARRY 3 0 3 3 +MARRIED 4 0 4 4 +MARLBOROUGH'S 1 0 1 1 +MARKS 1 0 1 1 +MARKING 1 0 1 1 +MARJORIE 3 0 3 3 +MARIUS 6 0 6 6 +MARIA 1 0 1 1 +MARGUERITE 11 0 11 11 +MARGINAL 1 0 1 1 +MARCH 2 0 2 2 +MARBLE 2 0 2 2 +MANTELPIECE 1 0 1 1 +MANIFESTATION 1 0 1 1 +MANCHESTER 1 0 1 1 +MANAGE 1 0 1 1 +MAMMOTH 1 0 1 1 +MALICE 1 0 1 1 +MALEVOLENT 1 0 1 1 +MALADY 1 0 1 1 +MAKER 1 0 1 1 +MAKAN 8 0 8 8 +MAJESTY 2 0 2 2 +MAINTAINED 1 0 1 1 +MAINTAIN 1 0 1 1 +MAINLY 1 0 1 1 +MAIDEN 3 0 3 3 +MAID 3 0 3 3 +MAHOGANY 2 0 2 2 +MAGNIFYING 1 0 1 1 +MAGNIFIES 1 0 1 1 +MAGNIFICENT 2 0 2 2 +MAGNIFICENCE 1 0 1 1 +MAGNANIMITY 1 0 1 1 +MAGICIAN 2 0 2 2 +MAGICAL 1 0 1 1 +MAGIC 1 0 1 1 +MAGAZINE 1 0 1 1 +MADRID 1 0 1 1 +MADNESS 2 0 2 2 +MADELEINE 3 0 3 3 +MADAME 1 0 1 1 +MACHINES 1 0 1 1 +MACHINERY 1 0 1 1 +LYNCHES 1 0 1 1 +LUTHER 1 0 1 1 +LUSTILY 1 0 1 1 +LURKING 1 0 1 1 +LUMP 1 0 1 1 +LUGGAGE 1 0 1 1 +LUCKY 2 0 2 2 +LUCKLESS 1 0 1 1 +LUCID 1 0 1 1 +LUCAS 1 0 1 1 +LOYALTY 2 0 2 2 +LOYAL 2 0 2 2 +LOWERED 1 0 1 1 +LOVES 6 0 6 6 +LOVERS 2 0 2 2 +LOUVRE 1 0 1 1 +LOUISIANA 1 0 1 1 +LOUDLY 1 0 1 1 +LOUDER 1 0 1 1 +LOUD 8 0 8 8 +LOSSES 1 0 1 1 +LOSING 3 0 3 3 +LOSE 6 0 6 6 +LORN 1 0 1 1 +LORDSHIPS 1 0 1 1 +LORDS 2 0 2 2 +LOQUACITY 1 0 1 1 +LOOSENED 1 0 1 1 +LOOSELY 1 0 1 1 +LONGING 3 0 3 3 +LONGED 2 0 2 2 +LONELY 1 0 1 1 +LONDON 4 0 4 4 +LODGE 1 0 1 1 +LOCKED 4 0 4 4 +LOCATE 1 0 1 1 +LOCAL 4 0 4 4 +LOBSTERS 2 0 2 2 +LOADING 1 0 1 1 +LIVING 5 0 5 5 +LIVID 1 0 1 1 +LIVERY 1 0 1 1 +LIVELIHOOD 1 0 1 1 +LITTER 1 0 1 1 +LITERATURE 1 0 1 1 +LITERALLY 1 0 1 1 +LISTENING 5 0 5 5 +LISTENERS 1 0 1 1 +LISTENER 1 0 1 1 +LISTENED 4 0 4 4 +LIQUOR 4 0 4 4 +LIQUID 1 0 1 1 +LIP 3 0 3 3 +LIONS 1 0 1 1 +LION 1 0 1 1 +LINK 1 0 1 1 +LINGO 1 0 1 1 +LINGER 1 0 1 1 +LINES 2 0 2 2 +LINE 7 0 7 7 +LINCOLN 1 0 1 1 +LIMPED 4 0 4 4 +LIMP 3 0 3 3 +LIMITS 1 0 1 1 +LIMIT 1 0 1 1 +LIMES 1 0 1 1 +LIKING 1 0 1 1 +LIKES 1 0 1 1 +LIGHTS 1 0 1 1 +LIGHTING 1 0 1 1 +LIGHTENED 1 0 1 1 +LIGATURES 1 0 1 1 +LIFTING 1 0 1 1 +LIFT 1 0 1 1 +LIFETIME 1 0 1 1 +LIBRARY 2 0 2 2 +LIBERTY 3 0 3 3 +LIBERATION 2 0 2 2 +LEVIN 6 0 6 6 +LEVELLED 1 0 1 1 +LEVEL 1 0 1 1 +LETTING 1 0 1 1 +LETTER 22 0 22 22 +LESSONS 1 0 1 1 +LESSON 2 0 2 2 +LESSENS 1 0 1 1 +LENT 1 0 1 1 +LENGTH 4 0 4 4 +LEND 2 0 2 2 +LEISURE 3 0 3 3 +LEGISLATURE 1 0 1 1 +LEGALLY 1 0 1 1 +LEGAL 2 0 2 2 +LEG 1 0 1 1 +LEE'S 1 0 1 1 +LEAVING 5 0 5 5 +LEAVES 1 0 1 1 +LEAST 15 0 15 15 +LEARNS 1 0 1 1 +LEARNING 4 0 4 4 +LEAPT 1 0 1 1 +LEAPED 1 0 1 1 +LEAP 1 0 1 1 +LEANING 1 0 1 1 +LEAN 1 0 1 1 +LEAF 1 0 1 1 +LEADS 1 0 1 1 +LEADING 3 0 3 3 +LEADER 2 0 2 2 +LAZY 1 0 1 1 +LAZILY 1 0 1 1 +LAYING 3 0 3 3 +LAY 14 0 14 14 +LAWYER 1 0 1 1 +LAURA 3 0 3 3 +LAUGHS 1 0 1 1 +LAUGHING 5 0 5 5 +LAUGH 9 0 9 9 +LATTER 2 0 2 2 +LATIN 1 0 1 1 +LASTLY 2 0 2 2 +LASTING 1 0 1 1 +LARGESSE 1 0 1 1 +LAPSE 1 0 1 1 +LANTERN 1 0 1 1 +LANGUAGE 2 0 2 2 +LANDSMAN 1 0 1 1 +LANDOWNER 4 0 4 4 +LANDLORD 1 0 1 1 +LANDING 1 0 1 1 +LAMP 1 0 1 1 +LAME 1 0 1 1 +LADIES 6 0 6 6 +LACE 2 0 2 2 +LABOURS 1 0 1 1 +LABORER 1 0 1 1 +KNUCKLES 1 0 1 1 +KNOWS 8 0 8 8 +KNOWING 3 0 3 3 +KNOWEST 3 0 3 3 +KNOWED 1 0 1 1 +KNOCKER 1 0 1 1 +KNOCK 1 0 1 1 +KNIGHTHOOD 1 0 1 1 +KNEES 3 0 3 3 +KNAVE 1 0 1 1 +KITTY 4 0 4 4 +KITCHEN 3 0 3 3 +KISSING 2 0 2 2 +KINSFOLK 1 0 1 1 +KINGS 8 0 8 8 +KINGDOM 3 0 3 3 +KING'S 7 0 7 7 +KING 45 0 45 45 +KINDS 1 0 1 1 +KINDLY 3 0 3 3 +KINDEST 1 0 1 1 +KILLING 1 0 1 1 +KILLED 4 0 4 4 +KIDNEYS 1 0 1 1 +KID 2 0 2 2 +KICKED 1 0 1 1 +KHORASAN 2 0 2 2 +KHAN 1 0 1 1 +KEYHOLE 1 0 1 1 +KEPT 9 0 9 9 +KENT 2 0 2 2 +KENNETH 3 0 3 3 +KEEPER'S 1 0 1 1 +KEEPER 2 0 2 2 +KEENLY 1 0 1 1 +KAZI 1 0 1 1 +K 1 0 1 1 +JUSTLY 1 0 1 1 +JUSTINIAN 1 0 1 1 +JUSTIFIES 1 0 1 1 +JUSTIFICATION 3 0 3 3 +JURISDICTION 1 0 1 1 +JUNIOR 1 0 1 1 +JUNE 1 0 1 1 +JUMPS 2 0 2 2 +JUMPING 2 0 2 2 +JUMPED 1 0 1 1 +JUMP 1 0 1 1 +JUICE 1 0 1 1 +JUGS 1 0 1 1 +JUDICIAL 1 0 1 1 +JUDGES 2 0 2 2 +JUDGED 1 0 1 1 +JOYOUS 1 0 1 1 +JOYFUL 2 0 2 2 +JOYANCE 2 0 2 2 +JOY 7 0 7 7 +JOURNEYED 1 0 1 1 +JOURNEY 8 0 8 8 +JOURNALISM 1 0 1 1 +JOSEPH 1 0 1 1 +JONES 1 0 1 1 +JOLLY 1 0 1 1 +JOINTS 1 0 1 1 +JOINING 1 0 1 1 +JOINED 2 0 2 2 +JOIN 5 0 5 5 +JOHN 9 0 9 9 +JOBS 1 0 1 1 +JOB 7 0 7 7 +JOANNA'S 1 0 1 1 +JEWELRY 1 0 1 1 +JEW'S 1 0 1 1 +JEW 1 0 1 1 +JESUS 2 0 2 2 +JERRY 1 0 1 1 +JERK 1 0 1 1 +JERICHO 1 0 1 1 +JENKINS 2 0 2 2 +JEHU 1 0 1 1 +JEHOVAH 3 0 3 3 +JEERINGLY 1 0 1 1 +JEAN 10 0 10 10 +JANUARY 1 0 1 1 +JANGLING 1 0 1 1 +JANE'S 1 0 1 1 +JAMIESON 1 0 1 1 +JAIL 1 0 1 1 +JACKET 1 0 1 1 +IVANOVITCH'S 1 0 1 1 +ITSELF 7 0 7 7 +ITALY 1 0 1 1 +ISOLATION 1 0 1 1 +ISLANDERS 1 0 1 1 +ISLAND 5 0 5 5 +ISAAC 2 0 2 2 +IRRITATION 1 0 1 1 +IRRITABILITY 1 0 1 1 +IRREVERENTLY 1 0 1 1 +IRREVERENCE 1 0 1 1 +IRRESISTIBLY 1 0 1 1 +IRRESISTIBLE 1 0 1 1 +IRON 7 0 7 7 +IRKSOME 1 0 1 1 +IRISH 1 0 1 1 +IRELAND 2 0 2 2 +IRATE 1 0 1 1 +IOWA 2 0 2 2 +INVOLVED 1 0 1 1 +INVOKE 1 0 1 1 +INVITED 1 0 1 1 +INVITATION 2 0 2 2 +INVISIBLE 1 0 1 1 +INVINCIBLE 1 0 1 1 +INVETERATE 1 0 1 1 +INVESTIGATION 2 0 2 2 +INVENTING 1 0 1 1 +INVADING 1 0 1 1 +INVADERS 1 0 1 1 +INVADED 1 0 1 1 +INVADE 1 0 1 1 +INTRODUCTION 1 0 1 1 +INTRODUCING 1 0 1 1 +INTRODUCES 1 0 1 1 +INTRODUCED 1 0 1 1 +INTOXICATED 2 0 2 2 +INTIMATES 1 0 1 1 +INTERVIEWS 1 0 1 1 +INTERVAL 3 0 3 3 +INTERRUPTED 1 0 1 1 +INTERRED 2 0 2 2 +INTERPRETATION 1 0 1 1 +INTERPOLATIONS 1 0 1 1 +INTERNATIONAL 1 0 1 1 +INTERMISSION 1 0 1 1 +INTERMENT 2 0 2 2 +INTERMEDDLING 1 0 1 1 +INTERFERENCE 1 0 1 1 +INTERESTED 3 0 3 3 +INTERCOURSE 1 0 1 1 +INTERCHANGE 1 0 1 1 +INTENTIONALLY 2 0 2 2 +INTENSITY 1 0 1 1 +INTENSELY 2 0 2 2 +INTEND 2 0 2 2 +INTELLIGENT 2 0 2 2 +INTELLIGENCE 2 0 2 2 +INTELLECT 1 0 1 1 +INSULTED 1 0 1 1 +INSUFFICIENT 1 0 1 1 +INSTRUMENTS 4 0 4 4 +INSTRUCTIONS 1 0 1 1 +INSTITUTIONS 1 0 1 1 +INSTITUTED 1 0 1 1 +INSTITUTE 1 0 1 1 +INSTINCTS 1 0 1 1 +INSTINCT 3 0 3 3 +INSTANTLY 1 0 1 1 +INSTANT 5 0 5 5 +INSPIRES 1 0 1 1 +INSPIRATION 3 0 3 3 +INSOLUBLE 1 0 1 1 +INSOLENT 1 0 1 1 +INSISTING 1 0 1 1 +INSISTENCE 1 0 1 1 +INSISTED 2 0 2 2 +INSINUATING 1 0 1 1 +INSHALLAH 1 0 1 1 +INSECURITY 1 0 1 1 +INSCRIPTION 1 0 1 1 +INSANE 1 0 1 1 +INQUISITION 1 0 1 1 +INQUIRIES 1 0 1 1 +INQUIRED 5 0 5 5 +INNOCENT 3 0 3 3 +INNKEEPER 2 0 2 2 +INJURIES 1 0 1 1 +INJURED 1 0 1 1 +INIQUITIES 1 0 1 1 +INHERENT 1 0 1 1 +INHABITANTS 1 0 1 1 +INHABIT 1 0 1 1 +INGREDIENTS 1 0 1 1 +INFORMED 5 0 5 5 +INFORMATION 3 0 3 3 +INFORM 1 0 1 1 +INFLUENCES 1 0 1 1 +INFLUENCE 10 0 10 10 +INFLICT 1 0 1 1 +INFLATE 1 0 1 1 +INFIRMITY 1 0 1 1 +INFIRM 1 0 1 1 +INFINITELY 1 0 1 1 +INFINITE 4 0 4 4 +INFERENTIALLY 1 0 1 1 +INEXORABLY 1 0 1 1 +INEVITABLE 1 0 1 1 +INDUSTRY 1 0 1 1 +INDUSTRIOUS 1 0 1 1 +INDUSTRIAL 1 0 1 1 +INDULGENT 1 0 1 1 +INDULGENCE 2 0 2 2 +INDUCED 1 0 1 1 +INDIVIDUALS 9 0 9 9 +INDIVIDUAL 1 0 1 1 +INDISPOSITION 1 0 1 1 +INDISCRETION 1 0 1 1 +INDIGNATION 1 0 1 1 +INDIFFERENT 2 0 2 2 +INDIFFERENCE 1 0 1 1 +INDICATIONS 2 0 2 2 +INDICATED 2 0 2 2 +INDIANS 2 0 2 2 +INDIANA 2 0 2 2 +INDIAN 1 0 1 1 +INDESCRIBABLE 1 0 1 1 +INDEPENDENT 2 0 2 2 +INDEPENDENCE 4 0 4 4 +INDEED 14 0 14 14 +INDECISION 1 0 1 1 +INCUR 1 0 1 1 +INCREDULOUSLY 1 0 1 1 +INCREDULITY 1 0 1 1 +INCREASING 2 0 2 2 +INCREASED 5 0 5 5 +INCONSISTENCY 1 0 1 1 +INCONCEIVABLE 1 0 1 1 +INCOHERENT 1 0 1 1 +INCLUDING 2 0 2 2 +INCLUDE 1 0 1 1 +INCLINED 1 0 1 1 +INCIDENTS 1 0 1 1 +INCIDENT 1 0 1 1 +INCARCERATING 1 0 1 1 +INASMUCH 1 0 1 1 +INANIMATE 1 0 1 1 +IMPULSE 3 0 3 3 +IMPROVISE 1 0 1 1 +IMPROVISATION 1 0 1 1 +IMPROVING 1 0 1 1 +IMPROVIDENT 1 0 1 1 +IMPRESSION 1 0 1 1 +IMPRECATIONS 1 0 1 1 +IMPRECATION 1 0 1 1 +IMPOSSIBLE 4 0 4 4 +IMPOSING 1 0 1 1 +IMPOSES 1 0 1 1 +IMPORTS 1 0 1 1 +IMPORTED 1 0 1 1 +IMPORTANT 1 0 1 1 +IMPORTANCE 3 0 3 3 +IMPLIES 1 0 1 1 +IMPLIED 2 0 2 2 +IMPLACABLE 1 0 1 1 +IMPIOUS 1 0 1 1 +IMPERTINENT 1 0 1 1 +IMPERSONAL 1 0 1 1 +IMPERIOUS 1 0 1 1 +IMPERATIVE 1 0 1 1 +IMPATIENTLY 2 0 2 2 +IMPATIENT 2 0 2 2 +IMMORTALS 1 0 1 1 +IMMEDIATELY 9 0 9 9 +IMMEDIATE 1 0 1 1 +IMITATION 1 0 1 1 +IMITATE 1 0 1 1 +IMBECILE 1 0 1 1 +IMAGINED 1 0 1 1 +IMAGINE 6 0 6 6 +IMAGINATION 1 0 1 1 +IMAGINARY 1 0 1 1 +IMAGE 2 0 2 2 +ILLUSTRIOUS 4 0 4 4 +ILLUSION 1 0 1 1 +ILLITERATE 1 0 1 1 +IGNORANT 2 0 2 2 +IGNORANCE 1 0 1 1 +IDOLATRIES 1 0 1 1 +IDLE 1 0 1 1 +IDIOTIC 1 0 1 1 +IDIOT 1 0 1 1 +IDEAS 2 0 2 2 +IDEAL 1 0 1 1 +ICE 1 0 1 1 +HYPOTHETICAL 1 0 1 1 +HYPODERMICALLY 1 0 1 1 +HYPODERMIC 1 0 1 1 +HUSTLED 1 0 1 1 +HUSTLE 1 0 1 1 +HUSKILY 1 0 1 1 +HUSH 1 0 1 1 +HUSBANDMEN 1 0 1 1 +HUSBAND'S 3 0 3 3 +HUSBAND 9 0 9 9 +HURRYING 3 0 3 3 +HURRY 1 0 1 1 +HURRIEDLY 3 0 3 3 +HURRIED 3 0 3 3 +HURRICANE 1 0 1 1 +HUNTED 2 0 2 2 +HUNT 1 0 1 1 +HUNGRY 2 0 2 2 +HUNGER 2 0 2 2 +HUNGARY 1 0 1 1 +HUNG 2 0 2 2 +HUNDREDS 1 0 1 1 +HUMOURS 1 0 1 1 +HUMILIATIONS 1 0 1 1 +HUMILIATION 1 0 1 1 +HUMILIATED 1 0 1 1 +HUMBLE 1 0 1 1 +HUMAN 6 0 6 6 +HULLO 1 0 1 1 +HUGELY 1 0 1 1 +HUGE 3 0 3 3 +HOUSES 4 0 4 4 +HOURS 12 0 12 12 +HOTLY 1 0 1 1 +HOT 5 0 5 5 +HOST 3 0 3 3 +HOSPITALITY 1 0 1 1 +HORSEBACK 1 0 1 1 +HORSE 10 0 10 10 +HORRIBLE 3 0 3 3 +HORNS 1 0 1 1 +HORIZONTAL 1 0 1 1 +HORIZON 2 0 2 2 +HORACE 1 0 1 1 +HOPING 1 0 1 1 +HOPELESS 1 0 1 1 +HOPEFUL 1 0 1 1 +HOPED 2 0 2 2 +HOPE 16 0 16 16 +HOOTED 1 0 1 1 +HOOK 1 0 1 1 +HONOURS 1 0 1 1 +HONEYMOON 1 0 1 1 +HONEY 1 0 1 1 +HOMEWARD 1 0 1 1 +HOMELESS 1 0 1 1 +HOLY 6 0 6 6 +HOLLOW 2 0 2 2 +HOLES 2 0 2 2 +HOLDS 1 0 1 1 +HOLDING 6 0 6 6 +HITHERTO 2 0 2 2 +HITCH 1 0 1 1 +HISTORIANS 1 0 1 1 +HIRE 1 0 1 1 +HINTS 1 0 1 1 +HINTED 1 0 1 1 +HINT 3 0 3 3 +HINGES 1 0 1 1 +HINDER 1 0 1 1 +HILL 7 0 7 7 +HIGHWAYS 1 0 1 1 +HIGHLY 1 0 1 1 +HIGHEST 1 0 1 1 +HIGHER 1 0 1 1 +HIGGINS 1 0 1 1 +HIDING 1 0 1 1 +HIDEOUS 1 0 1 1 +HIDE 2 0 2 2 +HID 2 0 2 2 +HESITATING 1 0 1 1 +HESITATED 2 0 2 2 +HERS 4 0 4 4 +HERO 1 0 1 1 +HERDSMEN 1 0 1 1 +HERCULEAN 1 0 1 1 +HERBS 1 0 1 1 +HEPTARCHIES 1 0 1 1 +HENRY 3 0 3 3 +HENCE 4 0 4 4 +HELSTONE 1 0 1 1 +HELPLESSLY 1 0 1 1 +HELPLESS 3 0 3 3 +HELMET 2 0 2 2 +HEIR 2 0 2 2 +HEIGHT 1 0 1 1 +HEEL 1 0 1 1 +HEED 1 0 1 1 +HEDGE 1 0 1 1 +HEBREWS 4 0 4 4 +HEAVY 11 0 11 11 +HEAVIEST 1 0 1 1 +HEAVENS 1 0 1 1 +HEAVEN'S 1 0 1 1 +HEAVEN 6 0 6 6 +HEAVE 1 0 1 1 +HEARTILY 1 0 1 1 +HEARTED 2 0 2 2 +HEARING 2 0 2 2 +HEAP 2 0 2 2 +HEALTHY 1 0 1 1 +HEADQUARTERS 2 0 2 2 +HEADLIGHTS 1 0 1 1 +HEADED 4 0 4 4 +HEADACHES 1 0 1 1 +HAWED 1 0 1 1 +HAW 1 0 1 1 +HAVEN'T 4 0 4 4 +HAUNT 3 0 3 3 +HAUNCHES 1 0 1 1 +HATTON 1 0 1 1 +HATS 1 0 1 1 +HATREDS 1 0 1 1 +HATRED 2 0 2 2 +HATED 3 0 3 3 +HAT 3 0 3 3 +HASTY 2 0 2 2 +HASTILY 2 0 2 2 +HASTENED 1 0 1 1 +HASTEN 1 0 1 1 +HAST 7 0 7 7 +HASN'T 1 0 1 1 +HASHISH 1 0 1 1 +HARSHLY 2 0 2 2 +HARRY 3 0 3 3 +HARNESSED 1 0 1 1 +HARMONY 1 0 1 1 +HARMLESS 1 0 1 1 +HARM 6 0 6 6 +HARKNESS 1 0 1 1 +HARGRAVE 1 0 1 1 +HAPPINESS 5 0 5 5 +HAPPIEST 1 0 1 1 +HAPPIER 3 0 3 3 +HAPPENS 1 0 1 1 +HANGING 1 0 1 1 +HANGED 1 0 1 1 +HANG 3 0 3 3 +HANDSOME 4 0 4 4 +HANDLED 1 0 1 1 +HANDKERCHIEF 3 0 3 3 +HANDING 1 0 1 1 +HANDIER 1 0 1 1 +HANDED 1 0 1 1 +HAMPERED 1 0 1 1 +HAMMERS 1 0 1 1 +HAMMER 1 0 1 1 +HALTS 1 0 1 1 +HALTING 1 0 1 1 +HALT 5 0 5 5 +HALLS 1 0 1 1 +HALFPENNY 1 0 1 1 +HALE 6 0 6 6 +HAIRED 1 0 1 1 +HAIN'T 1 0 1 1 +HAG 1 0 1 1 +HACK 1 0 1 1 +HABITUAL 1 0 1 1 +HABITS 2 0 2 2 +GYLINGDEN 1 0 1 1 +GUT 3 0 3 3 +GUSH 1 0 1 1 +GULPED 1 0 1 1 +GULF 1 0 1 1 +GUISE 1 0 1 1 +GUILTY 5 0 5 5 +GUILT 2 0 2 2 +GUILD 1 0 1 1 +GUESTS 2 0 2 2 +GUEST 4 0 4 4 +GUESSED 1 0 1 1 +GUARDS 1 0 1 1 +GUARDED 1 0 1 1 +GRUMBLED 2 0 2 2 +GRUFFISH 1 0 1 1 +GROWTH 1 0 1 1 +GROWN 1 0 1 1 +GROUPS 6 0 6 6 +GROUP 3 0 3 3 +GROUNDS 1 0 1 1 +GROOMED 1 0 1 1 +GROAN 2 0 2 2 +GRINNED 2 0 2 2 +GRIN 2 0 2 2 +GRIMSBY 1 0 1 1 +GRIMACED 1 0 1 1 +GRIEVING 1 0 1 1 +GREET 2 0 2 2 +GREENWOOD 1 0 1 1 +GREENTON 1 0 1 1 +GREENHORNS 1 0 1 1 +GREENBACKS 1 0 1 1 +GREEN 2 0 2 2 +GREEK 2 0 2 2 +GREATLY 1 0 1 1 +GREATEST 6 0 6 6 +GREATER 6 0 6 6 +GREASY 1 0 1 1 +GRAVITY 1 0 1 1 +GRAVES 1 0 1 1 +GRAVELLED 1 0 1 1 +GRAVE 17 0 17 17 +GRATING 2 0 2 2 +GRATIFICATION 1 0 1 1 +GRATEFUL 2 0 2 2 +GRATED 1 0 1 1 +GRASS 2 0 2 2 +GRASP 2 0 2 2 +GRAPE 1 0 1 1 +GRANTING 1 0 1 1 +GRANT 2 0 2 2 +GRANDSON 1 0 1 1 +GRANDPAPA 1 0 1 1 +GRANDFATHER 2 0 2 2 +GRANDEUR 1 0 1 1 +GRANDDAUGHTER 1 0 1 1 +GRAINS 1 0 1 1 +GRAIN 4 0 4 4 +GRAFTON'S 1 0 1 1 +GRAFTON 1 0 1 1 +GRAFT 2 0 2 2 +GRADUALLY 1 0 1 1 +GRACIOUSLY 2 0 2 2 +GRACIOUS 3 0 3 3 +GRACE 1 0 1 1 +GRABBED 2 0 2 2 +GOWN 1 0 1 1 +GOTTEN 1 0 1 1 +GOOSE 1 0 1 1 +GOODNESS 5 0 5 5 +GOLFING 1 0 1 1 +GOLDFINCH 1 0 1 1 +GNAWING 1 0 1 1 +GNASHING 1 0 1 1 +GLOWING 2 0 2 2 +GLOWED 3 0 3 3 +GLOVES 3 0 3 3 +GLOVE 1 0 1 1 +GLORY 2 0 2 2 +GLORIOUS 1 0 1 1 +GLORIFY 1 0 1 1 +GLOOM 1 0 1 1 +GLOATING 1 0 1 1 +GLINTING 1 0 1 1 +GLIMPSE 1 0 1 1 +GLIMMER 1 0 1 1 +GLIDING 1 0 1 1 +GLEAMED 1 0 1 1 +GLAZED 1 0 1 1 +GLANCING 1 0 1 1 +GLANCES 2 0 2 2 +GLANCED 2 0 2 2 +GLADNESS 2 0 2 2 +GLADLY 1 0 1 1 +GLADDENED 1 0 1 1 +GIVES 7 0 7 7 +GIRDLE 2 0 2 2 +GIMLET 1 0 1 1 +GIFTED 1 0 1 1 +GIANT'S 1 0 1 1 +GIANT 1 0 1 1 +GHOSTS 1 0 1 1 +GHOST 2 0 2 2 +GHASTLY 2 0 2 2 +GETTING 12 0 12 12 +GETS 3 0 3 3 +GERMS 1 0 1 1 +GERM 1 0 1 1 +GEORGES 1 0 1 1 +GENUINE 1 0 1 1 +GENTLY 1 0 1 1 +GENTLE 1 0 1 1 +GENIUS 1 0 1 1 +GENIALLY 1 0 1 1 +GENEROSITY 1 0 1 1 +GENERATION 1 0 1 1 +GENERALLY 3 0 3 3 +GAZING 2 0 2 2 +GAZED 3 0 3 3 +GAY 2 0 2 2 +GAULS 1 0 1 1 +GATHERING 2 0 2 2 +GATHERED 8 0 8 8 +GATHER 1 0 1 1 +GATES 1 0 1 1 +GATE 4 0 4 4 +GASPED 2 0 2 2 +GASP 1 0 1 1 +GARNISHMENT 1 0 1 1 +GARMENTS 2 0 2 2 +GARLANDED 1 0 1 1 +GARLAND 1 0 1 1 +GARDEN 7 0 7 7 +GAPS 1 0 1 1 +GAP 1 0 1 1 +GANG 5 0 5 5 +GAMMER 1 0 1 1 +GAMESTER 1 0 1 1 +GAMBLING 3 0 3 3 +GAMBLERS 1 0 1 1 +GALLOPED 1 0 1 1 +GALLERY 1 0 1 1 +GALLANT 3 0 3 3 +GALL 1 0 1 1 +GAIN 3 0 3 3 +GAILY 1 0 1 1 +GAIETY 1 0 1 1 +GAD'S 1 0 1 1 +GAD 1 0 1 1 +GABBLE 1 0 1 1 +G 1 0 1 1 +FUSS 2 0 2 2 +FURY 2 0 2 2 +FURTHEST 1 0 1 1 +FURNITURE 1 0 1 1 +FURNISHED 1 0 1 1 +FURNACE 1 0 1 1 +FURIOUS 2 0 2 2 +FUR 1 0 1 1 +FUNNY 3 0 3 3 +FUNDS 1 0 1 1 +FUND 1 0 1 1 +FUMED 1 0 1 1 +FULLY 1 0 1 1 +FULFILLED 1 0 1 1 +FULFIL 1 0 1 1 +FUEL 1 0 1 1 +FRUITS 4 0 4 4 +FRUITLESS 1 0 1 1 +FRUIT 7 0 7 7 +FROWN 1 0 1 1 +FROSTY 1 0 1 1 +FROST 1 0 1 1 +FRIGHTENS 1 0 1 1 +FRIGHTENED 3 0 3 3 +FRIGHTEN 1 0 1 1 +FRIENDSHIP 2 0 2 2 +FRIENDS 17 0 17 17 +FRIENDLINESS 1 0 1 1 +FRIEND'S 1 0 1 1 +FRIEND 14 0 14 14 +FRIDOLIN 1 0 1 1 +FRIDAY 2 0 2 2 +FRET 2 0 2 2 +FRESHEST 1 0 1 1 +FRESH 5 0 5 5 +FRERE 1 0 1 1 +FREQUENTLY 2 0 2 2 +FREQUENT 2 0 2 2 +FREELY 2 0 2 2 +FREED 2 0 2 2 +FRAUD 1 0 1 1 +FRANTICALLY 1 0 1 1 +FRANKNESS 1 0 1 1 +FRANKLY 1 0 1 1 +FRANK 1 0 1 1 +FRANCS 6 0 6 6 +FRANCOIS 1 0 1 1 +FRANCISCO 5 0 5 5 +FRANCIS 1 0 1 1 +FRAME 1 0 1 1 +FRAGMENTS 1 0 1 1 +FOUNDATION 1 0 1 1 +FORWARDS 3 0 3 3 +FORTUNES 1 0 1 1 +FORTUNE 6 0 6 6 +FORTUNATELY 5 0 5 5 +FORTHWITH 1 0 1 1 +FORTH 4 0 4 4 +FORSOOTH 1 0 1 1 +FORMS 2 0 2 2 +FORMING 2 0 2 2 +FORMIDABLE 2 0 2 2 +FORMERLY 2 0 2 2 +FORMER 4 0 4 4 +FORM 9 0 9 9 +FORGOTTEN 4 0 4 4 +FORGOT 7 0 7 7 +FORGIVE 2 0 2 2 +FORGETTING 1 0 1 1 +FORGET 2 0 2 2 +FORGERIES 1 0 1 1 +FORETASTE 1 0 1 1 +FORESTERS 1 0 1 1 +FOREST 2 0 2 2 +FORESHADOWED 1 0 1 1 +FORENOON 1 0 1 1 +FOREMOST 1 0 1 1 +FORELOCK 1 0 1 1 +FOREIGNERS 1 0 1 1 +FOREHEAD 4 0 4 4 +FOREFINGER 1 0 1 1 +FORBIDDEN 1 0 1 1 +FORBID 1 0 1 1 +FORBEARANCE 1 0 1 1 +FORBEAR 1 0 1 1 +FOOTSTEPS 1 0 1 1 +FOOTNOTE 1 0 1 1 +FOOLISH 3 0 3 3 +FOND 2 0 2 2 +FOLLY 1 0 1 1 +FOLLOWERS 3 0 3 3 +FOLLOWER 1 0 1 1 +FOLKS 3 0 3 3 +FOLDED 2 0 2 2 +FOLD 1 0 1 1 +FOGGY 1 0 1 1 +FOES 2 0 2 2 +FLUTTER 1 0 1 1 +FLUSHED 2 0 2 2 +FLUSH 1 0 1 1 +FLUNG 2 0 2 2 +FLUID 2 0 2 2 +FLOWERS 4 0 4 4 +FLOW 1 0 1 1 +FLOURISHING 1 0 1 1 +FLOURISHED 1 0 1 1 +FLOURISH 1 0 1 1 +FLOATED 1 0 1 1 +FLITTED 1 0 1 1 +FLIRTATION 1 0 1 1 +FLING 1 0 1 1 +FLINCH 1 0 1 1 +FLIGHT 5 0 5 5 +FLICK 1 0 1 1 +FLEERED 1 0 1 1 +FLEECED 1 0 1 1 +FLEE 1 0 1 1 +FLED 4 0 4 4 +FLATTERY 1 0 1 1 +FLASK 1 0 1 1 +FLASHING 1 0 1 1 +FLASHED 1 0 1 1 +FLARING 1 0 1 1 +FLAPPING 1 0 1 1 +FLAMES 2 0 2 2 +FLAME 5 0 5 5 +FLAGRANT 1 0 1 1 +FLAGONS 1 0 1 1 +FLAGON 1 0 1 1 +FLAGGED 1 0 1 1 +FLAG 1 0 1 1 +FIXING 1 0 1 1 +FIXED 5 0 5 5 +FIX 1 0 1 1 +FIVE 20 0 20 20 +FITTED 2 0 2 2 +FITS 1 0 1 1 +FISHER 2 0 2 2 +FIRMLY 2 0 2 2 +FIRM 1 0 1 1 +FIRING 2 0 2 2 +FIREPLACE 1 0 1 1 +FIREMAN 3 0 3 3 +FIREFLY 1 0 1 1 +FIRED 1 0 1 1 +FINS 1 0 1 1 +FINISHING 2 0 2 2 +FINISH 3 0 3 3 +FINGERS 1 0 1 1 +FINEST 1 0 1 1 +FINE 10 0 10 10 +FINANCIAL 1 0 1 1 +FINAL 2 0 2 2 +FIN 1 0 1 1 +FILTER 1 0 1 1 +FILMY 1 0 1 1 +FILLED 5 0 5 5 +FIGURE'S 1 0 1 1 +FIGURE 3 0 3 3 +FIGHTING 1 0 1 1 +FIGHT 5 0 5 5 +FIFTY 14 0 14 14 +FIFTEENTH 1 0 1 1 +FIFTEEN 7 0 7 7 +FIERCE 2 0 2 2 +FIENDISH 1 0 1 1 +FICKLE 2 0 2 2 +FEW 26 0 26 26 +FEVERISH 4 0 4 4 +FEVER 1 0 1 1 +FETTERS 1 0 1 1 +FETCHED 1 0 1 1 +FETCH 7 0 7 7 +FESTIVAL 1 0 1 1 +FEROCIOUS 1 0 1 1 +FENDER 1 0 1 1 +FENCED 1 0 1 1 +FENCE 4 0 4 4 +FEMALE 1 0 1 1 +FELLOWSHIP 1 0 1 1 +FELLOWS 2 0 2 2 +FEELINGS 3 0 3 3 +FEED 1 0 1 1 +FEEBLY 1 0 1 1 +FEEBLE 2 0 2 2 +FEDERAL 1 0 1 1 +FED 1 0 1 1 +FEBRUARY 5 0 5 5 +FEATURES 1 0 1 1 +FEATHERS 1 0 1 1 +FEATHER 1 0 1 1 +FEARS 1 0 1 1 +FEARLESS 1 0 1 1 +FEARING 1 0 1 1 +FEARFUL 2 0 2 2 +FEARED 4 0 4 4 +FEAR 13 0 13 13 +FAVOURS 1 0 1 1 +FATTER 1 0 1 1 +FATIGUE 2 0 2 2 +FATHERLY 1 0 1 1 +FATALLY 1 0 1 1 +FASTER 2 0 2 2 +FAST 12 0 12 12 +FASHIONS 1 0 1 1 +FASHIONED 2 0 2 2 +FASHION 2 0 2 2 +FASCINATION 1 0 1 1 +FARTHEST 1 0 1 1 +FARTHER 3 0 3 3 +FARMS 1 0 1 1 +FARM 3 0 3 3 +FAREWELL 1 0 1 1 +FARED 1 0 1 1 +FANTASTIC 1 0 1 1 +FANS 1 0 1 1 +FANCY 4 0 4 4 +FANCIFUL 1 0 1 1 +FANCIED 1 0 1 1 +FANATICS 1 0 1 1 +FAMILIES 3 0 3 3 +FAMILIARITY 1 0 1 1 +FAMILIAR 2 0 2 2 +FALSE 1 0 1 1 +FALLEN 1 0 1 1 +FAITHLESS 1 0 1 1 +FAITHFULLY 2 0 2 2 +FAITHFUL 3 0 3 3 +FAIRY 2 0 2 2 +FAINTNESS 1 0 1 1 +FAINTING 2 0 2 2 +FAINT 4 0 4 4 +FAILURES 3 0 3 3 +FAILURE 1 0 1 1 +FAILS 1 0 1 1 +FAGOTS 1 0 1 1 +FAGGOT 1 0 1 1 +FACTS 3 0 3 3 +FACTORIES 2 0 2 2 +FACTOR 1 0 1 1 +FACING 3 0 3 3 +FACES 2 0 2 2 +FABULOUS 1 0 1 1 +EYELIDS 1 0 1 1 +EXTREMELY 4 0 4 4 +EXTREME 2 0 2 2 +EXTINGUISHING 1 0 1 1 +EXTINGUISH 1 0 1 1 +EXTERNAL 2 0 2 2 +EXTENUATING 1 0 1 1 +EXTENT 2 0 2 2 +EXTENSION 1 0 1 1 +EXTENDING 2 0 2 2 +EXTENDED 2 0 2 2 +EXPRESSLY 1 0 1 1 +EXPRESSION 4 0 4 4 +EXPRESSED 3 0 3 3 +EXPOSURE 1 0 1 1 +EXPOSES 1 0 1 1 +EXPLANATORY 1 0 1 1 +EXPLAINING 1 0 1 1 +EXPLAIN 1 0 1 1 +EXPERIMENTS 1 0 1 1 +EXPERIMENTING 1 0 1 1 +EXPERIENCES 1 0 1 1 +EXPERIENCED 1 0 1 1 +EXPENSES 2 0 2 2 +EXPENSE 2 0 2 2 +EXPENDED 1 0 1 1 +EXPELLED 1 0 1 1 +EXPEL 1 0 1 1 +EXPEDIENT 1 0 1 1 +EXPECTS 1 0 1 1 +EXPECT 4 0 4 4 +EXPANSE 2 0 2 2 +EXOTIC 1 0 1 1 +EXIT 1 0 1 1 +EXISTS 1 0 1 1 +EXISTING 1 0 1 1 +EXISTENCE 5 0 5 5 +EXISTED 1 0 1 1 +EXIST 2 0 2 2 +EXHIBITED 4 0 4 4 +EXERTIONS 1 0 1 1 +EXERTING 1 0 1 1 +EXERTED 1 0 1 1 +EXERCISES 1 0 1 1 +EXERCISE 3 0 3 3 +EXECUTIONER'S 2 0 2 2 +EXECUTION 2 0 2 2 +EXECRABLE 1 0 1 1 +EXCUSES 1 0 1 1 +EXCUSE 3 0 3 3 +EXCUSABLE 1 0 1 1 +EXCLAMATION 1 0 1 1 +EXCLAIMING 1 0 1 1 +EXCLAIM 1 0 1 1 +EXCITEMENT 4 0 4 4 +EXCITEDLY 2 0 2 2 +EXCITED 2 0 2 2 +EXCITE 1 0 1 1 +EXCITABILITY 1 0 1 1 +EXCHANGED 2 0 2 2 +EXCHANGE 1 0 1 1 +EXCESSIVELY 1 0 1 1 +EXCESS 1 0 1 1 +EXCEPTIONALLY 2 0 2 2 +EXCEPTION 1 0 1 1 +EXCELLENT 5 0 5 5 +EXCELLENCY 4 0 4 4 +EXCEEDINGLY 1 0 1 1 +EXCEEDING 3 0 3 3 +EXASPERATING 1 0 1 1 +EXAMPLE 3 0 3 3 +EXAMINE 2 0 2 2 +EXALTED 1 0 1 1 +EXAGGERATE 1 0 1 1 +EXACTITUDE 2 0 2 2 +EXACT 1 0 1 1 +EVIDENTLY 4 0 4 4 +EVIDENT 3 0 3 3 +EVIDENCE 2 0 2 2 +EVERYWHERE 4 0 4 4 +EVERYTHING'S 1 0 1 1 +EVERYTHING 15 0 15 15 +EVERYBODY 6 0 6 6 +EVENTS 4 0 4 4 +EVENT 1 0 1 1 +EVENING 9 0 9 9 +EVAPORATION 2 0 2 2 +EVAPORATING 1 0 1 1 +EVAPORATE 3 0 3 3 +EVADED 1 0 1 1 +EUROPEAN 1 0 1 1 +EUROPE 1 0 1 1 +EUNUCH'S 1 0 1 1 +ETHEREAL 2 0 2 2 +ETHELRIED'S 1 0 1 1 +ETERNITY 1 0 1 1 +ESTRANGE 1 0 1 1 +ESTIMATES 1 0 1 1 +ESTHER 1 0 1 1 +ESTEEM 3 0 3 3 +ESTATE 1 0 1 1 +ESTABLISHMENT 1 0 1 1 +ESTABLISHED 2 0 2 2 +ESTABLISH 1 0 1 1 +ESSENTIALLY 1 0 1 1 +ESSENTIAL 1 0 1 1 +ESSENCE 1 0 1 1 +ESSAY 1 0 1 1 +ESQUIRES 1 0 1 1 +ESPECIAL 1 0 1 1 +ESCAPADE 1 0 1 1 +ERROR 2 0 2 2 +ERRANT 1 0 1 1 +ERECTS 1 0 1 1 +ERECTED 3 0 3 3 +ERECT 1 0 1 1 +EQUIVALENT 1 0 1 1 +EQUALLY 2 0 2 2 +EPOCH 1 0 1 1 +EPISTLES 1 0 1 1 +EPISTLE 1 0 1 1 +EPIGASTER 1 0 1 1 +ENVYING 1 0 1 1 +ENVY 3 0 3 3 +ENVIRONMENT 1 0 1 1 +ENVIOUS 1 0 1 1 +ENVIED 1 0 1 1 +ENVELOPE 1 0 1 1 +ENTREATY 1 0 1 1 +ENTREATINGLY 1 0 1 1 +ENTREATIES 1 0 1 1 +ENTREATED 1 0 1 1 +ENTHUSIASM 3 0 3 3 +ENTERTAINMENT 1 0 1 1 +ENTERTAINING 1 0 1 1 +ENTERTAIN 1 0 1 1 +ENTERED 11 0 11 11 +ENTAILED 1 0 1 1 +ENLISTMENT 1 0 1 1 +ENJOYMENT 3 0 3 3 +ENGRAVED 1 0 1 1 +ENGLAND 3 0 3 3 +ENGAGEMENTS 1 0 1 1 +ENGAGEMENT 1 0 1 1 +ENGAGED 2 0 2 2 +ENGAGE 1 0 1 1 +ENERGY 1 0 1 1 +ENEMIES 2 0 2 2 +ENDURANCE 1 0 1 1 +ENDING 1 0 1 1 +ENDEAVOURED 1 0 1 1 +ENCOURAGED 2 0 2 2 +ENCOUNTERED 1 0 1 1 +ENCOMPASSED 1 0 1 1 +ENCHANTMENT 2 0 2 2 +ENCHANTED 3 0 3 3 +ENCAMPMENT 1 0 1 1 +ENCAMPED 1 0 1 1 +EMPTY 8 0 8 8 +EMPTIES 1 0 1 1 +EMPTIED 2 0 2 2 +EMPRESSES 1 0 1 1 +EMPLOYED 2 0 2 2 +EMPLOY 1 0 1 1 +EMPIRE 3 0 3 3 +EMPHATIC 2 0 2 2 +EMPHASIZE 1 0 1 1 +EMPERORS 2 0 2 2 +EMPEROR 1 0 1 1 +EMOTIONS 2 0 2 2 +EMIR 2 0 2 2 +EMERGED 1 0 1 1 +EMBROIDERY 1 0 1 1 +EMBRACES 1 0 1 1 +EMBRACED 1 0 1 1 +EMBARRASSMENT 1 0 1 1 +EMBARRASSED 1 0 1 1 +EMBARKED 2 0 2 2 +ELYSIAN 1 0 1 1 +ELSIE'S 1 0 1 1 +ELSIE 1 0 1 1 +ELLIS 1 0 1 1 +ELKINS 1 0 1 1 +ELEVENTH 1 0 1 1 +ELEVEN 4 0 4 4 +ELEVATION 1 0 1 1 +ELEPHANT 1 0 1 1 +ELEMENTS 1 0 1 1 +ELEGANT 1 0 1 1 +ELECTRIC 1 0 1 1 +ELECTION 4 0 4 4 +ELDEST 1 0 1 1 +ELBOWED 1 0 1 1 +ELAPSED 1 0 1 1 +ELAPSE 1 0 1 1 +EKED 1 0 1 1 +EJACULATED 1 0 1 1 +EIGHTEENTH 3 0 3 3 +EIGHTEEN 10 0 10 10 +EGYPTIAN 6 0 6 6 +EGYPT 5 0 5 5 +EGG 1 0 1 1 +EFFORTS 1 0 1 1 +EFFORT 4 0 4 4 +EFFECTS 2 0 2 2 +EFFECTIVE 1 0 1 1 +EFFECT 1 0 1 1 +EDWARD 1 0 1 1 +EDUCATION 2 0 2 2 +EDUCATED 1 0 1 1 +EDGES 1 0 1 1 +EDGE 2 0 2 2 +ECONOMIZE 1 0 1 1 +ECONOMICAL 1 0 1 1 +ECHOES 1 0 1 1 +ECCLESIASTICS 1 0 1 1 +EATING 2 0 2 2 +EAT 12 0 12 12 +EASTERN 2 0 2 2 +EASIEST 1 0 1 1 +EARTHEN 1 0 1 1 +EARNED 1 0 1 1 +EARN 2 0 2 2 +EARLY 8 0 8 8 +EARLINESS 1 0 1 1 +EAGLE 4 0 4 4 +EAGERLY 4 0 4 4 +EAGER 2 0 2 2 +E'ER 1 0 1 1 +DYING 6 0 6 6 +DWELT 1 0 1 1 +DWELLS 1 0 1 1 +DWELLINGS 1 0 1 1 +DWELLERS 1 0 1 1 +DWELL 1 0 1 1 +DWARF 2 0 2 2 +DUTIES 2 0 2 2 +DUSTY 1 0 1 1 +DUST 2 0 2 2 +DURATION 2 0 2 2 +DUNNO 1 0 1 1 +DUNNING 1 0 1 1 +DUN 1 0 1 1 +DULL 3 0 3 3 +DUE 5 0 5 5 +DRY 6 0 6 6 +DRUNK 2 0 2 2 +DRUMS 1 0 1 1 +DRUGGED 2 0 2 2 +DROWNING 1 0 1 1 +DROWN 1 0 1 1 +DROVE 1 0 1 1 +DROPS 1 0 1 1 +DROPPED 8 0 8 8 +DROP 3 0 3 3 +DROOPING 2 0 2 2 +DRIVING 1 0 1 1 +DRIVEN 1 0 1 1 +DRIVE 5 0 5 5 +DRINKING 4 0 4 4 +DRINKERS 2 0 2 2 +DRIFT 1 0 1 1 +DRIED 1 0 1 1 +DREW 7 0 7 7 +DRESSING 1 0 1 1 +DRESSES 1 0 1 1 +DRESSED 1 0 1 1 +DRESS 1 0 1 1 +DREAMING 2 0 2 2 +DREAMED 1 0 1 1 +DREADFULLY 1 0 1 1 +DREADFUL 2 0 2 2 +DRAWN 1 0 1 1 +DRANK 2 0 2 2 +DRAMATIC 1 0 1 1 +DRAINED 1 0 1 1 +DRAIN 1 0 1 1 +DRAGONS 1 0 1 1 +DRAGON 1 0 1 1 +DRAGGED 1 0 1 1 +DRAG 1 0 1 1 +DOZEN 2 0 2 2 +DOWNSTAIRS 1 0 1 1 +DOWNS 2 0 2 2 +DOVES 1 0 1 1 +DOUBTLESS 2 0 2 2 +DOUBTFUL 1 0 1 1 +DOUBLE 5 0 5 5 +DOT 1 0 1 1 +DOSE 1 0 1 1 +DOMINION 1 0 1 1 +DOMINATES 1 0 1 1 +DOMED 1 0 1 1 +DOME 2 0 2 2 +DOLLARS 2 0 2 2 +DOINGS 1 0 1 1 +DOGS 2 0 2 2 +DOGGEDLY 1 0 1 1 +DODGING 1 0 1 1 +DIXON 4 0 4 4 +DIVISION 1 0 1 1 +DIVINE 1 0 1 1 +DIVIDES 1 0 1 1 +DIVERT 1 0 1 1 +DISTURBING 1 0 1 1 +DISTURBED 1 0 1 1 +DISTURBANCE 1 0 1 1 +DISTURB 2 0 2 2 +DISTRICTS 1 0 1 1 +DISTRICT 2 0 2 2 +DISTRIBUTED 1 0 1 1 +DISTRIBUTE 1 0 1 1 +DISTRACTED 2 0 2 2 +DISTINGUISH 2 0 2 2 +DISTINCTLY 1 0 1 1 +DISTINCTIVE 1 0 1 1 +DISTINCT 1 0 1 1 +DISTICHS 1 0 1 1 +DISTENDED 1 0 1 1 +DISTANCES 1 0 1 1 +DISTANCE 3 0 3 3 +DISTAFF 1 0 1 1 +DISSIPATION 2 0 2 2 +DISSIMULATION 1 0 1 1 +DISSENTERING 1 0 1 1 +DISSENSIONS 2 0 2 2 +DISREGARDED 1 0 1 1 +DISPUTED 1 0 1 1 +DISPROVE 1 0 1 1 +DISPOSITION 2 0 2 2 +DISPOSAL 1 0 1 1 +DISPLEASED 1 0 1 1 +DISPLAY 1 0 1 1 +DISPERSED 2 0 2 2 +DISPENSED 1 0 1 1 +DISPENSE 1 0 1 1 +DISMAL 1 0 1 1 +DISHONEST 1 0 1 1 +DISGUST 1 0 1 1 +DISGRACE 4 0 4 4 +DISENTANGLE 1 0 1 1 +DISEASE 1 0 1 1 +DISCUSSIONS 1 0 1 1 +DISCUSSION 1 0 1 1 +DISCUSSED 3 0 3 3 +DISCRIMINATION 1 0 1 1 +DISCRETION 1 0 1 1 +DISCOVERY 3 0 3 3 +DISCOVERIES 1 0 1 1 +DISCOVERED 4 0 4 4 +DISCOVER 2 0 2 2 +DISCOURSES 1 0 1 1 +DISCOURAGEMENTS 1 0 1 1 +DISCONTENT 1 0 1 1 +DISCONCERTION 1 0 1 1 +DISCOMFORT 1 0 1 1 +DISCLOSURES 1 0 1 1 +DISCLOSE 1 0 1 1 +DISCLAIM 1 0 1 1 +DISCIPLINE 1 0 1 1 +DISCERNING 1 0 1 1 +DISAPPOINTED 2 0 2 2 +DISAPPEARS 1 0 1 1 +DISAPPEARED 5 0 5 5 +DISADVANTAGES 3 0 3 3 +DISADVANTAGEOUS 1 0 1 1 +DIRECTLY 3 0 3 3 +DIRECTIONS 1 0 1 1 +DIRECTION 7 0 7 7 +DIRECTED 3 0 3 3 +DIRECT 2 0 2 2 +DIRE 1 0 1 1 +DIP 2 0 2 2 +DINSMORE 2 0 2 2 +DINNER 6 0 6 6 +DINING 1 0 1 1 +DINE 2 0 2 2 +DINARS 1 0 1 1 +DIMPLED 1 0 1 1 +DIMLY 1 0 1 1 +DIMINISHED 1 0 1 1 +DIMINISH 1 0 1 1 +DIM 2 0 2 2 +DILIGENTLY 1 0 1 1 +DILAPIDATED 1 0 1 1 +DIGNITY 4 0 4 4 +DIGGERS 2 0 2 2 +DIGGER 10 0 10 10 +DIFFICULTY 7 0 7 7 +DIFFICULT 2 0 2 2 +DIFFERENT 7 0 7 7 +DIFFERENCES 1 0 1 1 +DIFFERENCE 7 0 7 7 +DIFFER 1 0 1 1 +DIED 13 0 13 13 +DIDST 1 0 1 1 +DICE 1 0 1 1 +DIAMETER 1 0 1 1 +DIALOGUE 1 0 1 1 +DEVOURED 1 0 1 1 +DEVOTIONS 1 0 1 1 +DEVOTION 1 0 1 1 +DEVILS 2 0 2 2 +DEVIL 4 0 4 4 +DEVICE 1 0 1 1 +DEVELOPED 1 0 1 1 +DETECTIVE'S 1 0 1 1 +DETECTION 1 0 1 1 +DETECTED 1 0 1 1 +DETAILS 2 0 2 2 +DETAILED 3 0 3 3 +DESTRUCTION 1 0 1 1 +DESTROYS 1 0 1 1 +DESTROYER 1 0 1 1 +DESTROYED 4 0 4 4 +DESTROY 3 0 3 3 +DESTINED 1 0 1 1 +DESTINATION 1 0 1 1 +DESSERT 2 0 2 2 +DESPOTISM 2 0 2 2 +DESPOILED 1 0 1 1 +DESPISED 1 0 1 1 +DESPISE 1 0 1 1 +DESPERATELY 1 0 1 1 +DESPERATE 2 0 2 2 +DESPAIR 2 0 2 2 +DESIRED 2 0 2 2 +DESIRABLE 1 0 1 1 +DESERVING 1 0 1 1 +DESERVES 1 0 1 1 +DESERVE 2 0 2 2 +DESERTING 1 0 1 1 +DESERTED 2 0 2 2 +DESERT 1 0 1 1 +DESCRIPTION 2 0 2 2 +DESCRIBED 1 0 1 1 +DESCRIBE 1 0 1 1 +DESCEND 1 0 1 1 +DERIVE 1 0 1 1 +DEPRESSION 1 0 1 1 +DEPRECATINGLY 1 0 1 1 +DEPRECATE 1 0 1 1 +DEPOSITED 1 0 1 1 +DEPOSED 1 0 1 1 +DEPLORED 1 0 1 1 +DEPENDS 2 0 2 2 +DEPENDENCE 1 0 1 1 +DEPEND 1 0 1 1 +DEPARTURE 2 0 2 2 +DEPARTMENT 2 0 2 2 +DEPARTING 1 0 1 1 +DEPARTED 3 0 3 3 +DENOUNCED 1 0 1 1 +DENOTING 1 0 1 1 +DENIAL 1 0 1 1 +DEN 1 0 1 1 +DEMETER 4 0 4 4 +DEMANDS 3 0 3 3 +DEMANDED 1 0 1 1 +DELIVERY 1 0 1 1 +DELIVERER 1 0 1 1 +DELIVERED 4 0 4 4 +DELIGHTFUL 2 0 2 2 +DELIGHT 7 0 7 7 +DELICIOUSLY 1 0 1 1 +DELICATE 3 0 3 3 +DELIBERATELY 1 0 1 1 +DELIBERATE 1 0 1 1 +DELAYED 1 0 1 1 +DELAY 3 0 3 3 +DEJECTION 1 0 1 1 +DEITY 1 0 1 1 +DEGREE 1 0 1 1 +DEGENERATING 1 0 1 1 +DEFYING 1 0 1 1 +DEFRAUD 1 0 1 1 +DEFORMED 2 0 2 2 +DEFINED 1 0 1 1 +DEFIANT 1 0 1 1 +DEFENDING 1 0 1 1 +DEFENDERS 2 0 2 2 +DEFEND 4 0 4 4 +DEFECTION 1 0 1 1 +DEFEAT 1 0 1 1 +DEEPLY 3 0 3 3 +DEEMED 1 0 1 1 +DECORATION 1 0 1 1 +DECLINING 1 0 1 1 +DECLARING 1 0 1 1 +DECLARED 1 0 1 1 +DECKS 1 0 1 1 +DECK 6 0 6 6 +DECISION 3 0 3 3 +DECIDED 5 0 5 5 +DECIDE 2 0 2 2 +DECEPTION 1 0 1 1 +DECEMBER 2 0 2 2 +DECEIVED 5 0 5 5 +DECEIVE 1 0 1 1 +DECEASED 1 0 1 1 +DECAY 1 0 1 1 +DEBTS 1 0 1 1 +DEBATED 1 0 1 1 +DEBATE 2 0 2 2 +DEBARRED 1 0 1 1 +DEATHS 1 0 1 1 +DEATHLY 1 0 1 1 +DEATHLIKE 1 0 1 1 +DEATH 16 0 16 16 +DEARER 1 0 1 1 +DEALT 2 0 2 2 +DEALER 1 0 1 1 +DEAL 11 0 11 11 +DEAF 1 0 1 1 +DAZED 1 0 1 1 +DAYLIGHT 2 0 2 2 +DAYBREAK 2 0 2 2 +DAWNED 2 0 2 2 +DAWN 4 0 4 4 +DAVID 2 0 2 2 +DAUNTED 1 0 1 1 +DAUGHTER'S 1 0 1 1 +DAT 1 0 1 1 +DASHING 1 0 1 1 +DASHED 1 0 1 1 +DARTED 1 0 1 1 +DARKNESS 7 0 7 7 +DARING 1 0 1 1 +DARCY'S 1 0 1 1 +DARCY 6 0 6 6 +DANGERS 1 0 1 1 +DANGEROUS 2 0 2 2 +DANDY 1 0 1 1 +DANCE 2 0 2 2 +DAMPNESS 1 0 1 1 +DAMNED 1 0 1 1 +DAMES 1 0 1 1 +DAMASCUS 4 0 4 4 +DAM 1 0 1 1 +CYNICISM 1 0 1 1 +CUTTER'S 1 0 1 1 +CUTTER 3 0 3 3 +CUSHION 1 0 1 1 +CURVED 2 0 2 2 +CURTAINS 2 0 2 2 +CURSORILY 1 0 1 1 +CURSES 1 0 1 1 +CURSED 2 0 2 2 +CURRENT 1 0 1 1 +CURRENCY 1 0 1 1 +CURL 1 0 1 1 +CURIOUS 4 0 4 4 +CURED 1 0 1 1 +CURE 4 0 4 4 +CURATE 2 0 2 2 +CUPBOARD 2 0 2 2 +CULTURED 1 0 1 1 +CULTURE 1 0 1 1 +CULTIVATED 2 0 2 2 +CULT 1 0 1 1 +CUBITS 1 0 1 1 +CRYSTALLINE 1 0 1 1 +CRYING 1 0 1 1 +CRY 2 0 2 2 +CRUSHED 1 0 1 1 +CRUSADER 1 0 1 1 +CRUELTY 4 0 4 4 +CRUEL 4 0 4 4 +CRUDE 1 0 1 1 +CROWNED 1 0 1 1 +CROWDED 2 0 2 2 +CROWD 5 0 5 5 +CROSSED 5 0 5 5 +CROSS 8 0 8 8 +CROOKS 1 0 1 1 +CROOKED 1 0 1 1 +CROAKING 1 0 1 1 +CRITICS 1 0 1 1 +CRITICAL 2 0 2 2 +CRIPPLED 2 0 2 2 +CRIMSON 1 0 1 1 +CRIMINALS 1 0 1 1 +CRIMINAL 1 0 1 1 +CRICKETS 1 0 1 1 +CREPT 1 0 1 1 +CREEPY 1 0 1 1 +CREEPING 1 0 1 1 +CREDITS 3 0 3 3 +CREATURES 2 0 2 2 +CREATURE 4 0 4 4 +CREATOR 4 0 4 4 +CREATIONS 1 0 1 1 +CREATING 1 0 1 1 +CREATED 3 0 3 3 +CREATE 3 0 3 3 +CREASES 1 0 1 1 +CREASED 1 0 1 1 +CREAKED 1 0 1 1 +CRAWLED 2 0 2 2 +CRASHED 1 0 1 1 +CRASH 1 0 1 1 +CRAFT 1 0 1 1 +CRACKERS 1 0 1 1 +CRACKED 2 0 2 2 +COWARDS 1 0 1 1 +COWARD 1 0 1 1 +COVERING 1 0 1 1 +COVERED 5 0 5 5 +COVER 1 0 1 1 +COVE 1 0 1 1 +COUSINS 1 0 1 1 +COUSIN 10 0 10 10 +COURAGE 4 0 4 4 +COUPLETS 1 0 1 1 +COUPLE 2 0 2 2 +COUNTY 9 0 9 9 +COUNTESS 1 0 1 1 +COUNTER 1 0 1 1 +COUNT'S 2 0 2 2 +COUNSELLED 1 0 1 1 +COUGH 3 0 3 3 +COTTONY 1 0 1 1 +COTTON 3 0 3 3 +COSETTE 2 0 2 2 +CORRIDOR 2 0 2 2 +CORRESPONDENCE 1 0 1 1 +CORPSES 1 0 1 1 +CORPSE 3 0 3 3 +CORPORATIONS 1 0 1 1 +CORNERS 1 0 1 1 +CORDIAL 1 0 1 1 +COPY 1 0 1 1 +COP'S 1 0 1 1 +COOLNESS 2 0 2 2 +COOK 4 0 4 4 +CONVINCING 1 0 1 1 +CONVINCED 1 0 1 1 +CONVICTION 3 0 3 3 +CONVEYANCE 1 0 1 1 +CONVERTS 1 0 1 1 +CONVERSATION 10 0 10 10 +CONVENTIONS 1 0 1 1 +CONVENTION 1 0 1 1 +CONVENT 4 0 4 4 +CONVENIENCES 1 0 1 1 +CONTROLLED 1 0 1 1 +CONTROL 2 0 2 2 +CONTRIVE 1 0 1 1 +CONTRARY 5 0 5 5 +CONTRADICTION 1 0 1 1 +CONTRACTED 1 0 1 1 +CONTRACT 3 0 3 3 +CONTINUED 11 0 11 11 +CONTINUE 3 0 3 3 +CONTINUATION 1 0 1 1 +CONTINUANCE 1 0 1 1 +CONTINUALLY 1 0 1 1 +CONTENTS 1 0 1 1 +CONTENTION 1 0 1 1 +CONTENTED 1 0 1 1 +CONTENT 1 0 1 1 +CONTEMPORARY 2 0 2 2 +CONTAINS 1 0 1 1 +CONTAINING 2 0 2 2 +CONTAINED 1 0 1 1 +CONTAIN 1 0 1 1 +CONTAGIOUS 2 0 2 2 +CONTACT 3 0 3 3 +CONSUMED 2 0 2 2 +CONSULTED 3 0 3 3 +CONSULTATIONS 1 0 1 1 +CONSTRUCT 1 0 1 1 +CONSTRAINED 1 0 1 1 +CONSTITUTE 1 0 1 1 +CONSTITUENT 1 0 1 1 +CONSTANTLY 3 0 3 3 +CONSTANTIUS 1 0 1 1 +CONSPIRATORS 2 0 2 2 +CONSPIRACY 1 0 1 1 +CONSORTED 1 0 1 1 +CONSOLES 1 0 1 1 +CONSISTS 2 0 2 2 +CONSISTENCY 1 0 1 1 +CONSISTED 1 0 1 1 +CONSIDERING 2 0 2 2 +CONSIDERED 3 0 3 3 +CONSIDERATION 2 0 2 2 +CONSIDERABLE 6 0 6 6 +CONSIDER 1 0 1 1 +CONSERVATIVE 2 0 2 2 +CONSEQUENCES 1 0 1 1 +CONSEQUENCE 1 0 1 1 +CONSENTED 1 0 1 1 +CONSENT 2 0 2 2 +CONSCIOUSNESS 2 0 2 2 +CONSCIOUSLY 1 0 1 1 +CONSCIENTIOUS 1 0 1 1 +CONSCIENCES 1 0 1 1 +CONSCIENCE 3 0 3 3 +CONQUEST 3 0 3 3 +CONQUEROR 1 0 1 1 +CONQUERING 1 0 1 1 +CONQUERED 2 0 2 2 +CONNOISSEUR 1 0 1 1 +CONNECTIONS 1 0 1 1 +CONNECTION 4 0 4 4 +CONNECTED 1 0 1 1 +CONNECT 2 0 2 2 +CONJECTURES 1 0 1 1 +CONGRESSES 1 0 1 1 +CONGRESS 3 0 3 3 +CONFUSION 4 0 4 4 +CONFOUND 1 0 1 1 +CONFLICT 2 0 2 2 +CONFISCATION 1 0 1 1 +CONFIRMS 1 0 1 1 +CONFIRMED 2 0 2 2 +CONFINEMENT 1 0 1 1 +CONFIDENTIAL 1 0 1 1 +CONFIDENCE 3 0 3 3 +CONFESSION 4 0 4 4 +CONFESSED 2 0 2 2 +CONFERRING 1 0 1 1 +CONFERENCE 1 0 1 1 +CONFECTIONS 1 0 1 1 +CONFECTIONER 1 0 1 1 +CONFECTIONARY 4 0 4 4 +CONDUCTED 2 0 2 2 +CONDUCT 4 0 4 4 +CONDITIONS 4 0 4 4 +CONDITION 4 0 4 4 +CONDESCEND 1 0 1 1 +CONDEMNED 2 0 2 2 +CONCLUSION 2 0 2 2 +CONCILIATE 1 0 1 1 +CONCIERGE'S 1 0 1 1 +CONCERNS 1 0 1 1 +CONCERNING 1 0 1 1 +CONCEPTION 5 0 5 5 +CONCENTRATED 2 0 2 2 +CONCENTRATE 1 0 1 1 +CONCEIVE 1 0 1 1 +CONCEITED 1 0 1 1 +CONCEAL 3 0 3 3 +COMTE 1 0 1 1 +COMRADE 3 0 3 3 +COMPULSORY 1 0 1 1 +COMPREHENDED 1 0 1 1 +COMPOUND 1 0 1 1 +COMPOSURE 1 0 1 1 +COMPOSITION 1 0 1 1 +COMPOSED 1 0 1 1 +COMPLY 1 0 1 1 +COMPLIMENT 1 0 1 1 +COMPLICITY 1 0 1 1 +COMPLETELY 6 0 6 6 +COMPLETED 1 0 1 1 +COMPLETE 1 0 1 1 +COMPLAIN 1 0 1 1 +COMPETITION 1 0 1 1 +COMPELLING 1 0 1 1 +COMPELLED 1 0 1 1 +COMPATRIOT 1 0 1 1 +COMPASS 1 0 1 1 +COMPARATIVELY 1 0 1 1 +COMPANY 13 0 13 13 +COMPANIONS 3 0 3 3 +COMPANION'S 1 0 1 1 +COMPANION 4 0 4 4 +COMMUNITY 5 0 5 5 +COMMUNICATION 2 0 2 2 +COMMUNICATES 2 0 2 2 +COMMUNICATED 1 0 1 1 +COMMUNICANTS 1 0 1 1 +COMMONS 3 0 3 3 +COMMONLY 1 0 1 1 +COMMONERS 1 0 1 1 +COMMON 3 0 3 3 +COMMITTED 4 0 4 4 +COMMISSIONED 1 0 1 1 +COMMISSION 1 0 1 1 +COMMISSARY 2 0 2 2 +COMMENT 2 0 2 2 +COMMENDING 1 0 1 1 +COMMENDED 1 0 1 1 +COMMANDING 2 0 2 2 +COMMANDED 2 0 2 2 +COMMAND 2 0 2 2 +COMICAL 1 0 1 1 +COMFORTABLE 2 0 2 2 +COMFORT 2 0 2 2 +COMBATIVE 1 0 1 1 +COMBAT 2 0 2 2 +COLOURED 1 0 1 1 +COLOSSAL 1 0 1 1 +COLONELS 1 0 1 1 +COLLECTOR'S 1 0 1 1 +COLLECTOR 1 0 1 1 +COLLECTION 1 0 1 1 +COLLECTING 2 0 2 2 +COLLAR 2 0 2 2 +COLIC 1 0 1 1 +COINCIDENCES 1 0 1 1 +COIL 1 0 1 1 +COCKING 1 0 1 1 +COAST 2 0 2 2 +COACH 3 0 3 3 +CLUTCHING 1 0 1 1 +CLUTCH 1 0 1 1 +CLUNG 1 0 1 1 +CLUBBED 1 0 1 1 +CLUB 3 0 3 3 +CLOVER 1 0 1 1 +CLOUDS 1 0 1 1 +CLOTHES 8 0 8 8 +CLOTHE 1 0 1 1 +CLOSING 2 0 2 2 +CLOSES 1 0 1 1 +CLOSED 4 0 4 4 +CLOGGED 2 0 2 2 +CLOCKS 1 0 1 1 +CLING 1 0 1 1 +CLERK 2 0 2 2 +CLERICAL 2 0 2 2 +CLENCHING 1 0 1 1 +CLEMENT 1 0 1 1 +CLEMENCY 1 0 1 1 +CLEARLY 1 0 1 1 +CLEARER 1 0 1 1 +CLEARED 2 0 2 2 +CLEAR 7 0 7 7 +CLEANED 2 0 2 2 +CLEAN 4 0 4 4 +CLASPED 1 0 1 1 +CLASP 1 0 1 1 +CLASHING 1 0 1 1 +CLAPPED 1 0 1 1 +CLANKING 1 0 1 1 +CLAIR 3 0 3 3 +CLAIMS 1 0 1 1 +CLAIMED 1 0 1 1 +CIVILIZED 1 0 1 1 +CIVILITY 1 0 1 1 +CIVILITIES 1 0 1 1 +CITY 16 0 16 16 +CITIZENS 6 0 6 6 +CITIZEN 1 0 1 1 +CIRCUMSTANTIAL 1 0 1 1 +CIRCUMSTANCES 6 0 6 6 +CIRCULAR 1 0 1 1 +CIRCUIT 1 0 1 1 +CIRCLES 1 0 1 1 +CIRCLE 2 0 2 2 +CILLEY 1 0 1 1 +CIGARS 1 0 1 1 +CIDER 1 0 1 1 +CHURCHYARDS 1 0 1 1 +CHUCKLED 3 0 3 3 +CHRYSIPPUS 2 0 2 2 +CHRISTMAS 1 0 1 1 +CHRISTI 1 0 1 1 +CHRIST 3 0 3 3 +CHRIS'S 1 0 1 1 +CHOSEN 3 0 3 3 +CHOP 1 0 1 1 +CHOOSE 3 0 3 3 +CHOKE 1 0 1 1 +CHOIR 2 0 2 2 +CHOICE 1 0 1 1 +CHIRP 1 0 1 1 +CHINESE 1 0 1 1 +CHIMNEY 7 0 7 7 +CHIMES 1 0 1 1 +CHILDREN 13 0 13 13 +CHILDLESS 1 0 1 1 +CHILDHOOD 1 0 1 1 +CHILD'S 1 0 1 1 +CHILD 6 0 6 6 +CHERISHED 1 0 1 1 +CHEESE 1 0 1 1 +CHEERFULNESS 1 0 1 1 +CHEERFULLY 1 0 1 1 +CHEERFUL 4 0 4 4 +CHEEKED 1 0 1 1 +CHECKED 1 0 1 1 +CHEATING 1 0 1 1 +CHEAPLY 1 0 1 1 +CHATTING 1 0 1 1 +CHASSEUR 1 0 1 1 +CHASM 1 0 1 1 +CHASED 1 0 1 1 +CHARMS 1 0 1 1 +CHARMING 3 0 3 3 +CHARM 3 0 3 3 +CHARLIE 1 0 1 1 +CHARLES 2 0 2 2 +CHARITY 1 0 1 1 +CHARIOT 1 0 1 1 +CHARGES 2 0 2 2 +CHARGER 1 0 1 1 +CHARCOAL 1 0 1 1 +CHAPTERS 1 0 1 1 +CHAPS 1 0 1 1 +CHAPLET 1 0 1 1 +CHAPEL 6 0 6 6 +CHAP 1 0 1 1 +CHANTED 1 0 1 1 +CHANT 1 0 1 1 +CHANNEL 3 0 3 3 +CHANGING 2 0 2 2 +CHANGED 4 0 4 4 +CHANCES 1 0 1 1 +CHANCELLOR'S 1 0 1 1 +CHANCELLOR 6 0 6 6 +CHANCE 11 0 11 11 +CHAMPIONS 1 0 1 1 +CHAMPAGNE 1 0 1 1 +CHAMBER 5 0 5 5 +CHAIR 5 0 5 5 +CHAFING 2 0 2 2 +CETERA 2 0 2 2 +CESSATION 1 0 1 1 +CERTIFIED 1 0 1 1 +CEREMONY 1 0 1 1 +CENTURY 3 0 3 3 +CENTURIES 5 0 5 5 +CENTRES 1 0 1 1 +CENTRAL 6 0 6 6 +CENT 1 0 1 1 +CEMETERY 1 0 1 1 +CELLARS 1 0 1 1 +CELIA 1 0 1 1 +CELERY 1 0 1 1 +CELEBRATED 3 0 3 3 +CEASED 7 0 7 7 +CAVALRY 1 0 1 1 +CAUTIOUSLY 1 0 1 1 +CAUTION 1 0 1 1 +CAUSED 1 0 1 1 +CAUSE 7 0 7 7 +CAUGHT 5 0 5 5 +CATCHING 1 0 1 1 +CASTING 2 0 2 2 +CASKET 1 0 1 1 +CASHIER 1 0 1 1 +CASES 2 0 2 2 +CARVED 3 0 3 3 +CARTHUSIANS 1 0 1 1 +CARS 1 0 1 1 +CARRY 7 0 7 7 +CARROT 1 0 1 1 +CARRIAGE 3 0 3 3 +CARPET 1 0 1 1 +CARPENTER 1 0 1 1 +CAROLINA 1 0 1 1 +CARGO 1 0 1 1 +CARESSES 1 0 1 1 +CAREFULLY 3 0 3 3 +CAREFUL 4 0 4 4 +CARDS 1 0 1 1 +CARDINALS 1 0 1 1 +CARBONATE 1 0 1 1 +CARAVAN 1 0 1 1 +CAR 5 0 5 5 +CAPTOR 1 0 1 1 +CAPTIVE 3 0 3 3 +CAPTAIN'S 1 0 1 1 +CAPITULUM 1 0 1 1 +CAPITAL 3 0 3 3 +CAPERING 1 0 1 1 +CAPERED 1 0 1 1 +CAPABLE 2 0 2 2 +CAPABILITIES 1 0 1 1 +CANVAS 1 0 1 1 +CANST 1 0 1 1 +CANOE 1 0 1 1 +CANNOT 21 0 21 21 +CANE 1 0 1 1 +CANDLESTICKS 1 0 1 1 +CANDLESTICK 2 0 2 2 +CANDLES 1 0 1 1 +CANDLE 3 0 3 3 +CANAL 1 0 1 1 +CAMPAIGNS 2 0 2 2 +CALMLY 2 0 2 2 +CALLS 1 0 1 1 +CALLING 2 0 2 2 +CALLETH 1 0 1 1 +CALLEST 1 0 1 1 +CALIPH 1 0 1 1 +CALIFORNIAN 2 0 2 2 +CALCULATE 1 0 1 1 +CAIRO 2 0 2 2 +CAFE 1 0 1 1 +CAESARS 1 0 1 1 +CADET 1 0 1 1 +CABLE'S 1 0 1 1 +CABINET 2 0 2 2 +CABIN 4 0 4 4 +CABARET 1 0 1 1 +BUYING 2 0 2 2 +BUTTERFLIES 1 0 1 1 +BUTTER 5 0 5 5 +BUSTED 2 0 2 2 +BUSINESSES 1 0 1 1 +BUSHY 2 0 2 2 +BUSHES 1 0 1 1 +BURSTING 1 0 1 1 +BURNING 1 0 1 1 +BURNETH 1 0 1 1 +BURNED 2 0 2 2 +BURN 1 0 1 1 +BURIED 7 0 7 7 +BURIAL 1 0 1 1 +BURGUNDY 1 0 1 1 +BURDEN 1 0 1 1 +BUNKER 1 0 1 1 +BUNDLES 2 0 2 2 +BUNDLED 1 0 1 1 +BUMS 1 0 1 1 +BULLOCK 1 0 1 1 +BUILT 1 0 1 1 +BUILDINGS 1 0 1 1 +BUILDING 3 0 3 3 +BUGLE 1 0 1 1 +BUGGY 2 0 2 2 +BUFF 1 0 1 1 +BUD 1 0 1 1 +BUCKLEY 1 0 1 1 +BUCK 1 0 1 1 +BUBBLES 1 0 1 1 +BRUTE 2 0 2 2 +BRUTALLY 1 0 1 1 +BRUTAL 1 0 1 1 +BRUSHED 2 0 2 2 +BRUISING 1 0 1 1 +BROTHERLY 1 0 1 1 +BROTHER 18 0 18 18 +BROTH 1 0 1 1 +BRONZE 1 0 1 1 +BROKER'S 1 0 1 1 +BROKEN 10 0 10 10 +BROKE 7 0 7 7 +BRITISH 1 0 1 1 +BRINGING 3 0 3 3 +BRINGETH 2 0 2 2 +BRING 12 0 12 12 +BRIM 1 0 1 1 +BRIGHT 5 0 5 5 +BRIGANDS 1 0 1 1 +BRIEF 3 0 3 3 +BRIDGE 4 0 4 4 +BRIDE 3 0 3 3 +BRICKS 1 0 1 1 +BRICK 1 0 1 1 +BREWING 1 0 1 1 +BRETHREN 3 0 3 3 +BRED 1 0 1 1 +BREATH 6 0 6 6 +BREASTS 1 0 1 1 +BREAKS 3 0 3 3 +BREAKING 3 0 3 3 +BREAKFAST 4 0 4 4 +BREAKERS 1 0 1 1 +BREAK 6 0 6 6 +BREAD 3 0 3 3 +BREACH 1 0 1 1 +BRAVELY 2 0 2 2 +BRAVE 6 0 6 6 +BRANDON 1 0 1 1 +BRANCHES 2 0 2 2 +BRANCH 3 0 3 3 +BRAG 1 0 1 1 +BRADFORD 1 0 1 1 +BRACKETS 1 0 1 1 +BRACELET 1 0 1 1 +BOXES 2 0 2 2 +BOWS 1 0 1 1 +BOWED 2 0 2 2 +BOW 3 0 3 3 +BOURGEOIS 1 0 1 1 +BOUQUET 2 0 2 2 +BOUNTY 1 0 1 1 +BOUND 5 0 5 5 +BOULEVARD 1 0 1 1 +BOSTON 3 0 3 3 +BOSOM 3 0 3 3 +BORROWED 1 0 1 1 +BORN 7 0 7 7 +BORED 1 0 1 1 +BORE 2 0 2 2 +BORDERS 1 0 1 1 +BORDER 1 0 1 1 +BOOTY 1 0 1 1 +BOOTS 2 0 2 2 +BOOT 1 0 1 1 +BOOKS 1 0 1 1 +BOOKLET 1 0 1 1 +BOOK 8 0 8 8 +BOMB 1 0 1 1 +BOLTS 1 0 1 1 +BOLDER 1 0 1 1 +BOILING 3 0 3 3 +BOILED 2 0 2 2 +BODY 13 0 13 13 +BODILY 3 0 3 3 +BODIES 2 0 2 2 +BOAT'S 1 0 1 1 +BOAST 1 0 1 1 +BOARDING 1 0 1 1 +BLURTED 1 0 1 1 +BLUNTLY 1 0 1 1 +BLUNTED 1 0 1 1 +BLUBBERING 1 0 1 1 +BLOWS 1 0 1 1 +BLOWN 2 0 2 2 +BLOWING 2 0 2 2 +BLOSSOM 1 0 1 1 +BLOOM 1 0 1 1 +BLOOD 8 0 8 8 +BLIZZARD'S 1 0 1 1 +BLIZZARD 2 0 2 2 +BLINDNESS 1 0 1 1 +BLINDING 1 0 1 1 +BLINDED 1 0 1 1 +BLIND 5 0 5 5 +BLESSINGS 1 0 1 1 +BLESSING 2 0 2 2 +BLESSED 6 0 6 6 +BLESS 3 0 3 3 +BLEND 1 0 1 1 +BLEAK 1 0 1 1 +BLAZING 1 0 1 1 +BLANKLY 1 0 1 1 +BLANK 1 0 1 1 +BLAMING 1 0 1 1 +BLAMED 1 0 1 1 +BLAME 2 0 2 2 +BLADE 1 0 1 1 +BLACKSTONE 1 0 1 1 +BLACKBURN 2 0 2 2 +BITE 1 0 1 1 +BISHOPS 1 0 1 1 +BISHOP 4 0 4 4 +BISCUIT 1 0 1 1 +BIRTHDAY 1 0 1 1 +BIRD 1 0 1 1 +BIRCH 1 0 1 1 +BILLS 2 0 2 2 +BILIOUS 1 0 1 1 +BIGGER 1 0 1 1 +BEYOND 7 0 7 7 +BEWILDERMENT 1 0 1 1 +BETWIXT 1 0 1 1 +BETRAY 1 0 1 1 +BETOOK 1 0 1 1 +BETIDETH 1 0 1 1 +BETIDE 2 0 2 2 +BET 1 0 1 1 +BESS 1 0 1 1 +BESPAKE 1 0 1 1 +BESOUGHT 1 0 1 1 +BESIEGERS 2 0 2 2 +BESEECH 2 0 2 2 +BERNARDONE 1 0 1 1 +BERNARD 4 0 4 4 +BEQUEATH 2 0 2 2 +BENCH 3 0 3 3 +BELOW 2 0 2 2 +BELONG 1 0 1 1 +BELLY 3 0 3 3 +BELLS 4 0 4 4 +BELLIES 1 0 1 1 +BELIEVING 1 0 1 1 +BELIEVES 1 0 1 1 +BELIEVE 16 0 16 16 +BEINGS 3 0 3 3 +BEHOLDING 1 0 1 1 +BEHOLD 5 0 5 5 +BEHIND 16 0 16 16 +BEHAVED 3 0 3 3 +BEHALF 1 0 1 1 +BEGUILED 1 0 1 1 +BEGINNING 6 0 6 6 +BEGGAR 1 0 1 1 +BEFITTING 1 0 1 1 +BEFALLEN 1 0 1 1 +BEDROOM 1 0 1 1 +BECOMES 6 0 6 6 +BECOME 15 0 15 15 +BECAME 10 0 10 10 +BEAVER 1 0 1 1 +BEAUTY 4 0 4 4 +BEAUTIFULLY 1 0 1 1 +BEAUTIFUL 8 0 8 8 +BEATEN 2 0 2 2 +BEASTS 4 0 4 4 +BEAST 2 0 2 2 +BEARD 1 0 1 1 +BEAMS 2 0 2 2 +BEALE'S 1 0 1 1 +BEAD 1 0 1 1 +BEACON 2 0 2 2 +BEACH 2 0 2 2 +BAY 1 0 1 1 +BATTLE 2 0 2 2 +BATTERY 1 0 1 1 +BATON 1 0 1 1 +BATHING 1 0 1 1 +BATHED 1 0 1 1 +BASKING 1 0 1 1 +BASKETS 1 0 1 1 +BASIS 2 0 2 2 +BASIN 1 0 1 1 +BASED 2 0 2 2 +BASE 1 0 1 1 +BARRIER 1 0 1 1 +BARRICADES 1 0 1 1 +BARRED 1 0 1 1 +BARRACK 2 0 2 2 +BARONET 1 0 1 1 +BARKING 1 0 1 1 +BARIUM 1 0 1 1 +BARBAROUS 1 0 1 1 +BARBARITY 1 0 1 1 +BAPTIST 1 0 1 1 +BANQUET 3 0 3 3 +BANKER 1 0 1 1 +BANK 9 0 9 9 +BANISH 1 0 1 1 +BANDS 1 0 1 1 +BANDITS 1 0 1 1 +BANDIT 1 0 1 1 +BALSAM 1 0 1 1 +BALLOCK 1 0 1 1 +BAKING 1 0 1 1 +BAH 1 0 1 1 +BAGGY 1 0 1 1 +BAGGAGE 1 0 1 1 +BADLY 3 0 3 3 +BADGE 1 0 1 1 +BADE 1 0 1 1 +BADAWI 1 0 1 1 +BACON 1 0 1 1 +BACKS 1 0 1 1 +BACKING 1 0 1 1 +BACKGROUND 3 0 3 3 +BACK 51 0 51 51 +BACHELOR 1 0 1 1 +BABYLONIA 1 0 1 1 +AWOKE 1 0 1 1 +AWKWARDNESS 1 0 1 1 +AWKWARDLY 1 0 1 1 +AWE 1 0 1 1 +AWARE 2 0 2 2 +AWAKENING 2 0 2 2 +AWAKENED 1 0 1 1 +AWAITS 1 0 1 1 +AWAITED 1 0 1 1 +AWAIT 1 0 1 1 +AVOIDED 1 0 1 1 +AVERAGE 2 0 2 2 +AUTOMATICALLY 1 0 1 1 +AUTOCRACY 1 0 1 1 +AUTHORITY 10 0 10 10 +AUTHORITIES 1 0 1 1 +AUTHOR 1 0 1 1 +AUTHENTIC 1 0 1 1 +AUSTRIA 1 0 1 1 +AUSPICIOUS 3 0 3 3 +AUGMENTED 1 0 1 1 +AUDACIOUS 1 0 1 1 +ATTRACTIVE 3 0 3 3 +ATTRACTED 2 0 2 2 +ATTORNEY 1 0 1 1 +ATTENTIVELY 4 0 4 4 +ATTENDING 1 0 1 1 +ATTEMPTING 1 0 1 1 +ATTEMPT 2 0 2 2 +ATTAINED 1 0 1 1 +ATTACKS 3 0 3 3 +ATTACKED 1 0 1 1 +ATTACK 2 0 2 2 +ATTACHMENT 1 0 1 1 +ATTACHED 1 0 1 1 +ATMOSPHERE 1 0 1 1 +ATE 1 0 1 1 +ASUNDER 1 0 1 1 +ASTONISHMENT 2 0 2 2 +ASTONISHED 1 0 1 1 +ASSYRIAN 2 0 2 2 +ASSUREDLY 1 0 1 1 +ASSURE 8 0 8 8 +ASSURANCE 2 0 2 2 +ASSUME 1 0 1 1 +ASSOCIATIONS 1 0 1 1 +ASSOCIATES 1 0 1 1 +ASSISTED 1 0 1 1 +ASSISTANT 1 0 1 1 +ASSISTANCE 3 0 3 3 +ASSIST 3 0 3 3 +ASSEMBLY 3 0 3 3 +ASSEMBLED 2 0 2 2 +ASSEMBLAGE 1 0 1 1 +ASSAULT 1 0 1 1 +ASSASSINATED 1 0 1 1 +ASSAILED 1 0 1 1 +ASPECT 1 0 1 1 +ASKING 5 0 5 5 +ASIDE 5 0 5 5 +ASCERTAINING 1 0 1 1 +ASCENSION 1 0 1 1 +ARTS 1 0 1 1 +ARTISTS 4 0 4 4 +ARTICLES 1 0 1 1 +ARTFUL 1 0 1 1 +ARRIVES 1 0 1 1 +ARRIVAL 1 0 1 1 +ARRESTED 1 0 1 1 +ARRANGING 1 0 1 1 +ARRANGEMENTS 1 0 1 1 +ARRANGED 2 0 2 2 +AROSE 2 0 2 2 +ARMY 19 0 19 19 +ARMIES 2 0 2 2 +ARMED 4 0 4 4 +ARKADYEVITCH 1 0 1 1 +ARK 1 0 1 1 +ARISTOCRACY 1 0 1 1 +ARISE 1 0 1 1 +ARGUMENTS 3 0 3 3 +ARGUMENT 1 0 1 1 +ARGUED 1 0 1 1 +ARENA 1 0 1 1 +ARCHITECTURE 1 0 1 1 +ARCHBISHOPS 1 0 1 1 +ARABIC 1 0 1 1 +ARABIANS 1 0 1 1 +AQUA 1 0 1 1 +APTITUDE 1 0 1 1 +APT 1 0 1 1 +APRIL 1 0 1 1 +APPROVAL 1 0 1 1 +APPROACHING 1 0 1 1 +APPROACHED 3 0 3 3 +APPREHENSIONS 1 0 1 1 +APPOINTMENT 2 0 2 2 +APPLYING 1 0 1 1 +APPLY 3 0 3 3 +APPLAUSE 1 0 1 1 +APPETITE 2 0 2 2 +APPEARING 1 0 1 1 +APPEARANCE 7 0 7 7 +APPEALS 1 0 1 1 +APPEALING 1 0 1 1 +APPEALED 1 0 1 1 +APPEAL 1 0 1 1 +APPARITION 1 0 1 1 +APPARENT 1 0 1 1 +APOLOGY 1 0 1 1 +APERTURE 1 0 1 1 +APARTMENTS 3 0 3 3 +APARTMENT 1 0 1 1 +APART 4 0 4 4 +ANYWHERE 1 0 1 1 +ANYHOW 2 0 2 2 +ANYBODY 2 0 2 2 +ANXIOUS 3 0 3 3 +ANXIETY 5 0 5 5 +ANVILS 1 0 1 1 +ANTIQUARIAN'S 1 0 1 1 +ANTICIPATION 1 0 1 1 +ANTICIPATE 1 0 1 1 +ANTHONY 1 0 1 1 +ANSWERING 2 0 2 2 +ANSWER 15 0 15 15 +ANON 1 0 1 1 +ANNOYANCES 1 0 1 1 +ANNOYANCE 1 0 1 1 +ANNOUNCING 1 0 1 1 +ANNOUNCED 3 0 3 3 +ANNIHILATION 2 0 2 2 +ANNIHILATED 1 0 1 1 +ANIMATED 2 0 2 2 +ANIMALS 4 0 4 4 +ANGER 2 0 2 2 +ANDY 1 0 1 1 +ANCIENTS 1 0 1 1 +ANCIENT 2 0 2 2 +ANCHOR 1 0 1 1 +AMUSING 1 0 1 1 +AMPLY 1 0 1 1 +AMPLE 1 0 1 1 +AMOUNT 1 0 1 1 +AMONG 18 0 18 18 +AMMUNITION 1 0 1 1 +AMISS 1 0 1 1 +AMERICAN 1 0 1 1 +AMERICA 1 0 1 1 +AMENDS 1 0 1 1 +AMENDMENT 1 0 1 1 +AMENDED 1 0 1 1 +AMBITIOUS 1 0 1 1 +AMBITIONS 1 0 1 1 +AMBASSADOR 1 0 1 1 +ALTOGETHER 2 0 2 2 +ALTERED 1 0 1 1 +ALTAR 10 0 10 10 +ALOUD 2 0 2 2 +ALMOST 11 0 11 11 +ALMIGHTY 1 0 1 1 +ALLOWING 1 0 1 1 +ALLOWANCES 1 0 1 1 +ALLIANCE 1 0 1 1 +ALLEN 1 0 1 1 +ALLAH'S 1 0 1 1 +ALLAH 9 0 9 9 +ALKALOID 1 0 1 1 +ALIMONY 2 0 2 2 +ALIKE 2 0 2 2 +ALI'S 1 0 1 1 +ALEX 1 0 1 1 +ALBERT 10 0 10 10 +ALAS 1 0 1 1 +ALARMS 1 0 1 1 +ALARMED 1 0 1 1 +ALARM 3 0 3 3 +AIM 4 0 4 4 +AILMENTS 2 0 2 2 +AHEAD 2 0 2 2 +AGREES 1 0 1 1 +AGREEMENT 2 0 2 2 +AGREED 6 0 6 6 +AGREEABLE 2 0 2 2 +AGREE 1 0 1 1 +AGONY 1 0 1 1 +AGO 10 0 10 10 +AGITATOR 1 0 1 1 +AGITATION 1 0 1 1 +AGITATING 1 0 1 1 +AGILITY 1 0 1 1 +AGHAST 1 0 1 1 +AGGRESSIVENESS 1 0 1 1 +AGGRAVATIONS 1 0 1 1 +AGGRAVATED 1 0 1 1 +AGES 1 0 1 1 +AGED 2 0 2 2 +AGAINST 27 0 27 27 +AFTERNOON 6 0 6 6 +AFRICAN 1 0 1 1 +AFRAID 9 0 9 9 +AFFORD 2 0 2 2 +AFFIRMED 1 0 1 1 +AFFECTIONS 1 0 1 1 +AFFECTIONATELY 1 0 1 1 +AFFECTED 2 0 2 2 +AFFAIRS 2 0 2 2 +AFFAIR 2 0 2 2 +ADVISEDLY 1 0 1 1 +ADVISE 1 0 1 1 +ADVENT 1 0 1 1 +ADVANTAGES 2 0 2 2 +ADVANTAGE 4 0 4 4 +ADVANCING 1 0 1 1 +ADVANCES 2 0 2 2 +ADVANCED 1 0 1 1 +ADVANCE 5 0 5 5 +ADRIFT 1 0 1 1 +ADORNED 1 0 1 1 +ADORN 1 0 1 1 +ADMITTED 4 0 4 4 +ADMIT 1 0 1 1 +ADMISSION 1 0 1 1 +ADMIRATION 5 0 5 5 +ADMIRABLE 1 0 1 1 +ADMINISTRATION 3 0 3 3 +ADMINISTERED 1 0 1 1 +ADJACENT 1 0 1 1 +ADHERENT 1 0 1 1 +ADDRESSING 1 0 1 1 +ADDRESSED 1 0 1 1 +ADDRESS 3 0 3 3 +ADDITION 1 0 1 1 +ADDED 13 0 13 13 +ADAGE 1 0 1 1 +ACTORS 1 0 1 1 +ACTIVITIES 1 0 1 1 +ACTIONS 2 0 2 2 +ACTION 2 0 2 2 +ACTING 2 0 2 2 +ACTED 1 0 1 1 +ACROSS 8 0 8 8 +ACQUITTAL 1 0 1 1 +ACQUIT 1 0 1 1 +ACQUISITIVE 1 0 1 1 +ACQUAINTED 3 0 3 3 +ACQUAINTANCES 1 0 1 1 +ACQUAINTANCE 2 0 2 2 +ACKNOWLEDGMENT 1 0 1 1 +ACIDS 1 0 1 1 +ACID 4 0 4 4 +ACHIEVED 1 0 1 1 +ACHED 1 0 1 1 +ACE 1 0 1 1 +ACCUSTOMED 2 0 2 2 +ACCUSING 3 0 3 3 +ACCUSED 1 0 1 1 +ACCUSATION 4 0 4 4 +ACCURATE 1 0 1 1 +ACCOUNTS 2 0 2 2 +ACCOUNTED 1 0 1 1 +ACCORDINGLY 5 0 5 5 +ACCORDING 7 0 7 7 +ACCORDANCE 1 0 1 1 +ACCOMPLISHMENTS 1 0 1 1 +ACCOMPLISHED 1 0 1 1 +ACCOMPLICE 2 0 2 2 +ACCOMPANY 1 0 1 1 +ACCOMPANIED 3 0 3 3 +ACCOMMODATING 1 0 1 1 +ACCIDENTS 1 0 1 1 +ACCIDENT 1 0 1 1 +ACCESSION 1 0 1 1 +ACCESSIBLE 1 0 1 1 +ACCEPTED 4 0 4 4 +ACCEPTABLE 1 0 1 1 +ABYSSINIANS 2 0 2 2 +ABUSING 1 0 1 1 +ABUSES 2 0 2 2 +ABUSE 1 0 1 1 +ABUNDANTLY 1 0 1 1 +ABUNDANT 1 0 1 1 +ABSORBING 2 0 2 2 +ABSORBED 4 0 4 4 +ABSORB 1 0 1 1 +ABSOLVED 1 0 1 1 +ABSOLUTELY 3 0 3 3 +ABSOLUTE 1 0 1 1 +ABSENTED 1 0 1 1 +ABSENT 1 0 1 1 +ABSENCE 4 0 4 4 +ABRUPTLY 3 0 3 3 +ABOLISH 1 0 1 1 +ABODE 3 0 3 3 +ABNORMAL 1 0 1 1 +ABLE 11 0 11 11 +ABILITY 1 0 1 1 +ABILITIES 1 0 1 1 +ABIDE 1 0 1 1 +ABBOT 1 0 1 1 +ABACK 1 0 1 1 +AARON 1 0 1 1 diff --git a/log/modified_beam_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model-2023-02-12-09-10-17 b/log/modified_beam_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model-2023-02-12-09-10-17 new file mode 100644 index 0000000000000000000000000000000000000000..0bd9dba4c1524e6ed23a2e66e6a3b659cb7ac717 --- /dev/null +++ b/log/modified_beam_search/log-decode-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model-2023-02-12-09-10-17 @@ -0,0 +1,45 @@ +2023-02-12 09:10:17,598 INFO [decode.py:655] Decoding started +2023-02-12 09:10:17,598 INFO [decode.py:661] Device: cuda:0 +2023-02-12 09:10:17,601 INFO [decode.py:671] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.3', 'k2-build-type': 'Debug', 'k2-with-cuda': True, 'k2-git-sha1': '3b81ac9686aee539d447bb2085b2cdfc131c7c91', 'k2-git-date': 'Thu Jan 26 20:40:25 2023', 'lhotse-version': '1.9.0.dev+git.97bf4b0.dirty', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'surt', 'icefall-git-sha1': 'f8acb25-dirty', 'icefall-git-date': 'Thu Feb 9 12:58:59 2023', 'icefall-path': '/exp/draj/mini_scale_2022/icefall', 'k2-path': '/exp/draj/mini_scale_2022/k2/k2/python/k2/__init__.py', 'lhotse-path': '/exp/draj/mini_scale_2022/lhotse/lhotse/__init__.py', 'hostname': 'r7n03', 'IP address': '10.1.7.3'}, 'epoch': 30, 'iter': 0, 'avg': 9, 'use_averaged_model': True, 'exp_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 20.0, 'ngram_lm_scale': 0.01, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'num_paths': 200, 'nbest_scale': 0.5, 'num_encoder_layers': '2,2,2,2,2', 'feedforward_dims': '768,768,768,768,768', 'nhead': '8,8,8,8,8', 'encoder_dims': '256,256,256,256,256', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '192,192,192,192,192', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'short_chunk_size': 50, 'num_left_chunks': 4, 'decode_chunk_len': 32, 'full_libri': True, 'manifest_dir': PosixPath('data/manifests'), 'max_duration': 500, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('pruned_transducer_stateless7_streaming/exp/v1/modified_beam_search'), 'suffix': 'epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500} +2023-02-12 09:10:17,601 INFO [decode.py:673] About to create model +2023-02-12 09:10:17,862 INFO [zipformer.py:402] At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8. +2023-02-12 09:10:17,870 INFO [decode.py:744] Calculating the averaged model over epoch range from 21 (excluded) to 30 +2023-02-12 09:10:23,393 INFO [decode.py:778] Number of model parameters: 20697573 +2023-02-12 09:10:23,394 INFO [asr_datamodule.py:444] About to get test-clean cuts +2023-02-12 09:10:23,532 INFO [asr_datamodule.py:451] About to get test-other cuts +2023-02-12 09:10:30,409 INFO [decode.py:560] batch 0/?, cuts processed until now is 36 +2023-02-12 09:12:23,706 INFO [decode.py:560] batch 20/?, cuts processed until now is 1038 +2023-02-12 09:13:00,677 INFO [zipformer.py:2431] attn_weights_entropy = tensor([3.7696, 3.6940, 3.4648, 2.1037, 3.2757, 3.5310, 3.4728, 3.3882], + device='cuda:0'), covar=tensor([0.0950, 0.0609, 0.0885, 0.5375, 0.1094, 0.0782, 0.1186, 0.0733], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0443, 0.0434, 0.0544, 0.0429, 0.0451, 0.0427, 0.0392], + device='cuda:0'), out_proj_covar=tensor([0.0003, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-12 09:14:04,911 INFO [decode.py:560] batch 40/?, cuts processed until now is 2296 +2023-02-12 09:14:38,473 INFO [decode.py:576] The transcripts are stored in pruned_transducer_stateless7_streaming/exp/v1/modified_beam_search/recogs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt +2023-02-12 09:14:38,536 INFO [utils.py:538] [test-clean-beam_size_4] %WER 3.88% [2038 / 52576, 255 ins, 160 del, 1623 sub ] +2023-02-12 09:14:38,761 INFO [decode.py:589] Wrote detailed error stats to pruned_transducer_stateless7_streaming/exp/v1/modified_beam_search/errs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt +2023-02-12 09:14:38,762 INFO [decode.py:605] +For test-clean, WER of different settings are: +beam_size_4 3.88 best for test-clean + +2023-02-12 09:14:44,597 INFO [decode.py:560] batch 0/?, cuts processed until now is 43 +2023-02-12 09:15:16,588 INFO [zipformer.py:2431] attn_weights_entropy = tensor([1.5588, 1.8418, 2.6260, 1.4591, 1.9960, 1.9089, 1.6740, 2.0659], + device='cuda:0'), covar=tensor([0.1906, 0.2949, 0.0991, 0.5130, 0.2067, 0.3422, 0.2614, 0.2238], + device='cuda:0'), in_proj_covar=tensor([0.0529, 0.0621, 0.0552, 0.0656, 0.0651, 0.0599, 0.0549, 0.0635], + device='cuda:0'), out_proj_covar=tensor([0.0002, 0.0002, 0.0002, 0.0003, 0.0002, 0.0002, 0.0002, 0.0002], + device='cuda:0') +2023-02-12 09:15:27,052 INFO [zipformer.py:2431] attn_weights_entropy = tensor([1.3968, 1.5670, 1.5791, 1.1139, 1.6266, 1.3491, 0.3394, 1.5611], + device='cuda:0'), covar=tensor([0.0519, 0.0407, 0.0368, 0.0564, 0.0482, 0.1088, 0.1001, 0.0316], + device='cuda:0'), in_proj_covar=tensor([0.0459, 0.0397, 0.0350, 0.0449, 0.0382, 0.0540, 0.0392, 0.0425], + device='cuda:0'), out_proj_covar=tensor([1.2185e-04, 1.0291e-04, 9.1160e-05, 1.1731e-04, 9.9927e-05, 1.5083e-04, + 1.0519e-04, 1.1140e-04], device='cuda:0') +2023-02-12 09:16:30,704 INFO [decode.py:560] batch 20/?, cuts processed until now is 1198 +2023-02-12 09:18:11,673 INFO [decode.py:560] batch 40/?, cuts processed until now is 2642 +2023-02-12 09:18:40,706 INFO [decode.py:576] The transcripts are stored in pruned_transducer_stateless7_streaming/exp/v1/modified_beam_search/recogs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt +2023-02-12 09:18:40,785 INFO [utils.py:538] [test-other-beam_size_4] %WER 9.53% [4988 / 52343, 533 ins, 455 del, 4000 sub ] +2023-02-12 09:18:40,939 INFO [decode.py:589] Wrote detailed error stats to pruned_transducer_stateless7_streaming/exp/v1/modified_beam_search/errs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt +2023-02-12 09:18:40,940 INFO [decode.py:605] +For test-other, WER of different settings are: +beam_size_4 9.53 best for test-other + +2023-02-12 09:18:40,940 INFO [decode.py:809] Done! diff --git a/log/modified_beam_search/recogs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/recogs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..f85b1d91b0216d6789ee4254ad586c075371e32e --- /dev/null +++ b/log/modified_beam_search/recogs-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,5240 @@ +1089-134686-0000-1733: ref=['HE', 'HOPED', 'THERE', 'WOULD', 'BE', 'STEW', 'FOR', 'DINNER', 'TURNIPS', 'AND', 'CARROTS', 'AND', 'BRUISED', 'POTATOES', 'AND', 'FAT', 'MUTTON', 'PIECES', 'TO', 'BE', 'LADLED', 'OUT', 'IN', 'THICK', 'PEPPERED', 'FLOUR', 'FATTENED', 'SAUCE'] +1089-134686-0000-1733: hyp=['HE', 'HOPED', 'THERE', 'WOULD', 'BE', 'STEW', 'FOR', 'DINNER', 'TURNIPS', 'AND', 'CARROTS', 'AND', 'BRUISED', 'POTATOES', 'AND', 'FAT', 'MUTTON', 'PIECES', 'TO', 'BE', 'LADLED', 'OUT', 'IN', 'THICK', 'PEPPERED', 'FLOWER', 'FAT', 'AND', 'SAUCE'] +1089-134686-0001-1734: ref=['STUFF', 'IT', 'INTO', 'YOU', 'HIS', 'BELLY', 'COUNSELLED', 'HIM'] +1089-134686-0001-1734: hyp=['STUFF', 'IT', 'INTO', 'YOU', 'HIS', 'BELLY', 'COUNSELLED', 'HIM'] +1089-134686-0002-1735: ref=['AFTER', 'EARLY', 'NIGHTFALL', 'THE', 'YELLOW', 'LAMPS', 'WOULD', 'LIGHT', 'UP', 'HERE', 'AND', 'THERE', 'THE', 'SQUALID', 'QUARTER', 'OF', 'THE', 'BROTHELS'] +1089-134686-0002-1735: hyp=['AFTER', 'EARLY', 'NIGHTFALL', 'THE', 'YELLOW', 'LAMPS', 'WOULD', 'LIGHT', 'UP', 'HERE', 'AND', 'THERE', 'THE', 'SQUALID', 'QUARTER', 'OF', 'THE', 'BRAFFLELS'] +1089-134686-0003-1736: ref=['HELLO', 'BERTIE', 'ANY', 'GOOD', 'IN', 'YOUR', 'MIND'] +1089-134686-0003-1736: hyp=['HALLO', 'BERTIE', 'ANY', 'GOOD', 'IN', 'YOUR', 'MIND'] +1089-134686-0004-1737: ref=['NUMBER', 'TEN', 'FRESH', 'NELLY', 'IS', 'WAITING', 'ON', 'YOU', 'GOOD', 'NIGHT', 'HUSBAND'] +1089-134686-0004-1737: hyp=['NUMBER', 'TEN', 'FRESH', 'NELLIERS', 'WAITING', 'ON', 'YOU', 'GOOD', 'NIGHT', 'HUSBAND'] +1089-134686-0005-1738: ref=['THE', 'MUSIC', 'CAME', 'NEARER', 'AND', 'HE', 'RECALLED', 'THE', 'WORDS', 'THE', 'WORDS', 'OF', "SHELLEY'S", 'FRAGMENT', 'UPON', 'THE', 'MOON', 'WANDERING', 'COMPANIONLESS', 'PALE', 'FOR', 'WEARINESS'] +1089-134686-0005-1738: hyp=['THE', 'MUSIC', 'CAME', 'NEARER', 'AND', 'HE', 'RECALLED', 'THE', 'WORDS', 'THE', 'WORDS', 'OF', "SHELLEY'S", 'FRAGMENT', 'UPON', 'THE', 'MOON', 'WANDERING', 'COMPANIONLESS', 'PALE', 'FOR', 'WEARINESS'] +1089-134686-0006-1739: ref=['THE', 'DULL', 'LIGHT', 'FELL', 'MORE', 'FAINTLY', 'UPON', 'THE', 'PAGE', 'WHEREON', 'ANOTHER', 'EQUATION', 'BEGAN', 'TO', 'UNFOLD', 'ITSELF', 'SLOWLY', 'AND', 'TO', 'SPREAD', 'ABROAD', 'ITS', 'WIDENING', 'TAIL'] +1089-134686-0006-1739: hyp=['THE', 'DULL', 'LIGHT', 'FELL', 'MORE', 'FAINTLY', 'UPON', 'THE', 'PAGE', 'WHEREON', 'ANOTHER', 'EQUATION', 'BEGAN', 'TO', 'UNFOLD', 'ITSELF', 'SLOWLY', 'AND', 'TO', 'SPREAD', 'ABROAD', 'ITS', 'WIDENING', 'TAIL'] +1089-134686-0007-1740: ref=['A', 'COLD', 'LUCID', 'INDIFFERENCE', 'REIGNED', 'IN', 'HIS', 'SOUL'] +1089-134686-0007-1740: hyp=['A', 'COLD', 'LUCID', 'INDIFFERENCE', 'REIGNED', 'IN', 'HIS', 'SOUL'] +1089-134686-0008-1741: ref=['THE', 'CHAOS', 'IN', 'WHICH', 'HIS', 'ARDOUR', 'EXTINGUISHED', 'ITSELF', 'WAS', 'A', 'COLD', 'INDIFFERENT', 'KNOWLEDGE', 'OF', 'HIMSELF'] +1089-134686-0008-1741: hyp=['THE', 'CHAOS', 'IN', 'WHICH', 'HIS', 'ARDOUR', 'EXTINGUISHED', 'ITSELF', 'WAS', 'A', 'COLD', 'INDIFFERENT', 'KNOWLEDGE', 'OF', 'HIMSELF'] +1089-134686-0009-1742: ref=['AT', 'MOST', 'BY', 'AN', 'ALMS', 'GIVEN', 'TO', 'A', 'BEGGAR', 'WHOSE', 'BLESSING', 'HE', 'FLED', 'FROM', 'HE', 'MIGHT', 'HOPE', 'WEARILY', 'TO', 'WIN', 'FOR', 'HIMSELF', 'SOME', 'MEASURE', 'OF', 'ACTUAL', 'GRACE'] +1089-134686-0009-1742: hyp=['AT', 'MOST', 'BY', 'AN', 'ALMS', 'GIVEN', 'TO', 'A', 'BEGGAR', 'WHOSE', 'BLESSING', 'HE', 'FLED', 'FROM', 'HE', 'MIGHT', 'HOPE', 'WEARILY', 'TO', 'WIN', 'FOR', 'HIMSELF', 'SOME', 'MEASURE', 'OF', 'ACTUAL', 'GRACE'] +1089-134686-0010-1743: ref=['WELL', 'NOW', 'ENNIS', 'I', 'DECLARE', 'YOU', 'HAVE', 'A', 'HEAD', 'AND', 'SO', 'HAS', 'MY', 'STICK'] +1089-134686-0010-1743: hyp=['WELL', 'NOW', 'ENNIS', 'I', 'DECLARE', 'YOU', 'HAVE', 'A', 'HEAD', 'AND', 'SO', 'HAS', 'MY', 'STICK'] +1089-134686-0011-1744: ref=['ON', 'SATURDAY', 'MORNINGS', 'WHEN', 'THE', 'SODALITY', 'MET', 'IN', 'THE', 'CHAPEL', 'TO', 'RECITE', 'THE', 'LITTLE', 'OFFICE', 'HIS', 'PLACE', 'WAS', 'A', 'CUSHIONED', 'KNEELING', 'DESK', 'AT', 'THE', 'RIGHT', 'OF', 'THE', 'ALTAR', 'FROM', 'WHICH', 'HE', 'LED', 'HIS', 'WING', 'OF', 'BOYS', 'THROUGH', 'THE', 'RESPONSES'] +1089-134686-0011-1744: hyp=['ON', 'SATURDAY', 'MORNINGS', 'WHEN', 'THE', 'SODALITY', 'MET', 'IN', 'THE', 'CHAPEL', 'TO', 'RECITE', 'THE', 'LITTLE', 'OFFICE', 'HIS', 'PLACE', 'WAS', 'A', 'CUSHIONED', 'KNEELING', 'DESK', 'AT', 'THE', 'RIGHT', 'OF', 'THE', 'ALTAR', 'FROM', 'WHICH', 'HE', 'LED', 'HIS', 'WING', 'OF', 'BOYS', 'THROUGH', 'THE', 'RESPONSES'] +1089-134686-0012-1745: ref=['HER', 'EYES', 'SEEMED', 'TO', 'REGARD', 'HIM', 'WITH', 'MILD', 'PITY', 'HER', 'HOLINESS', 'A', 'STRANGE', 'LIGHT', 'GLOWING', 'FAINTLY', 'UPON', 'HER', 'FRAIL', 'FLESH', 'DID', 'NOT', 'HUMILIATE', 'THE', 'SINNER', 'WHO', 'APPROACHED', 'HER'] +1089-134686-0012-1745: hyp=['HER', 'EYES', 'SEEMED', 'TO', 'REGARD', 'HIM', 'WITH', 'MILD', 'PITY', 'HER', 'HOLINESS', 'A', 'STRANGE', 'LIGHT', 'GLOWING', 'FAINTLY', 'UPON', 'HER', 'FRAIL', 'FLESH', 'DID', 'NOT', 'HUMILIATE', 'THE', 'SINNER', 'WHO', 'APPROACHED', 'HER'] +1089-134686-0013-1746: ref=['IF', 'EVER', 'HE', 'WAS', 'IMPELLED', 'TO', 'CAST', 'SIN', 'FROM', 'HIM', 'AND', 'TO', 'REPENT', 'THE', 'IMPULSE', 'THAT', 'MOVED', 'HIM', 'WAS', 'THE', 'WISH', 'TO', 'BE', 'HER', 'KNIGHT'] +1089-134686-0013-1746: hyp=['IF', 'EVER', 'HE', 'WAS', 'IMPELLED', 'TO', 'CAST', 'SIN', 'FROM', 'HIM', 'AND', 'TO', 'REPENT', 'THE', 'IMPULSE', 'THAT', 'MOVED', 'HIM', 'WAS', 'THE', 'WISH', 'TO', 'BE', 'HER', 'KNIGHT'] +1089-134686-0014-1747: ref=['HE', 'TRIED', 'TO', 'THINK', 'HOW', 'IT', 'COULD', 'BE'] +1089-134686-0014-1747: hyp=['HE', 'TRIED', 'TO', 'THINK', 'HOW', 'IT', 'COULD', 'BE'] +1089-134686-0015-1748: ref=['BUT', 'THE', 'DUSK', 'DEEPENING', 'IN', 'THE', 'SCHOOLROOM', 'COVERED', 'OVER', 'HIS', 'THOUGHTS', 'THE', 'BELL', 'RANG'] +1089-134686-0015-1748: hyp=['BUT', 'THE', 'DUSK', 'DEEPENING', 'IN', 'THE', 'SCHOOLROOM', 'COVERED', 'OVER', 'HIS', 'THOUGHTS', 'THE', 'BELL', 'RANG'] +1089-134686-0016-1749: ref=['THEN', 'YOU', 'CAN', 'ASK', 'HIM', 'QUESTIONS', 'ON', 'THE', 'CATECHISM', 'DEDALUS'] +1089-134686-0016-1749: hyp=['THEN', 'YOU', 'CAN', 'ASK', 'HIM', 'QUESTIONS', 'ON', 'THE', 'CATECHISM', 'DEDALUS'] +1089-134686-0017-1750: ref=['STEPHEN', 'LEANING', 'BACK', 'AND', 'DRAWING', 'IDLY', 'ON', 'HIS', 'SCRIBBLER', 'LISTENED', 'TO', 'THE', 'TALK', 'ABOUT', 'HIM', 'WHICH', 'HERON', 'CHECKED', 'FROM', 'TIME', 'TO', 'TIME', 'BY', 'SAYING'] +1089-134686-0017-1750: hyp=['STEPHEN', 'LEANING', 'BACK', 'AND', 'DRAWING', 'IDLY', 'ON', 'HIS', 'SCRIBBLER', 'LISTENED', 'TO', 'THE', 'TALK', 'ABOUT', 'HIM', 'WHICH', 'HERON', 'CHECKED', 'FROM', 'TIME', 'TO', 'TIME', 'BY', 'SAYING'] +1089-134686-0018-1751: ref=['IT', 'WAS', 'STRANGE', 'TOO', 'THAT', 'HE', 'FOUND', 'AN', 'ARID', 'PLEASURE', 'IN', 'FOLLOWING', 'UP', 'TO', 'THE', 'END', 'THE', 'RIGID', 'LINES', 'OF', 'THE', 'DOCTRINES', 'OF', 'THE', 'CHURCH', 'AND', 'PENETRATING', 'INTO', 'OBSCURE', 'SILENCES', 'ONLY', 'TO', 'HEAR', 'AND', 'FEEL', 'THE', 'MORE', 'DEEPLY', 'HIS', 'OWN', 'CONDEMNATION'] +1089-134686-0018-1751: hyp=['IT', 'WAS', 'STRANGE', 'TOO', 'THAT', 'HE', 'FOUND', 'AN', 'ARID', 'PLEASURE', 'IN', 'FOLLOWING', 'UP', 'TO', 'THE', 'END', 'THE', 'RIGID', 'LINES', 'OF', 'THE', 'DOCTRINES', 'OF', 'THE', 'CHURCH', 'AND', 'PENETRATING', 'INTO', 'OBSCURE', 'SILENCES', 'ONLY', 'TO', 'HEAR', 'AND', 'FEEL', 'THE', 'MORE', 'DEEPLY', 'HIS', 'OWN', 'CONDEMNATION'] +1089-134686-0019-1752: ref=['THE', 'SENTENCE', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SAYS', 'THAT', 'HE', 'WHO', 'OFFENDS', 'AGAINST', 'ONE', 'COMMANDMENT', 'BECOMES', 'GUILTY', 'OF', 'ALL', 'HAD', 'SEEMED', 'TO', 'HIM', 'FIRST', 'A', 'SWOLLEN', 'PHRASE', 'UNTIL', 'HE', 'HAD', 'BEGUN', 'TO', 'GROPE', 'IN', 'THE', 'DARKNESS', 'OF', 'HIS', 'OWN', 'STATE'] +1089-134686-0019-1752: hyp=['THE', 'SENTENCE', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SAYS', 'THAT', 'HE', 'WHO', 'OFFENDS', 'AGAINST', 'ONE', 'COMMANDMENT', 'BECOMES', 'GUILTY', 'OF', 'ALL', 'HAD', 'SEEMED', 'TO', 'HIM', 'FIRST', 'A', 'SWOLLEN', 'PHRASE', 'UNTIL', 'HE', 'HAD', 'BEGUN', 'TO', 'GROPE', 'IN', 'THE', 'DARKNESS', 'OF', 'HIS', 'OWN', 'STATE'] +1089-134686-0020-1753: ref=['IF', 'A', 'MAN', 'HAD', 'STOLEN', 'A', 'POUND', 'IN', 'HIS', 'YOUTH', 'AND', 'HAD', 'USED', 'THAT', 'POUND', 'TO', 'AMASS', 'A', 'HUGE', 'FORTUNE', 'HOW', 'MUCH', 'WAS', 'HE', 'OBLIGED', 'TO', 'GIVE', 'BACK', 'THE', 'POUND', 'HE', 'HAD', 'STOLEN', 'ONLY', 'OR', 'THE', 'POUND', 'TOGETHER', 'WITH', 'THE', 'COMPOUND', 'INTEREST', 'ACCRUING', 'UPON', 'IT', 'OR', 'ALL', 'HIS', 'HUGE', 'FORTUNE'] +1089-134686-0020-1753: hyp=['IF', 'A', 'MAN', 'HAD', 'STOLEN', 'A', 'POUND', 'IN', 'HIS', 'YOUTH', 'AND', 'HAD', 'USED', 'THAT', 'POUND', 'TO', 'AMASS', 'A', 'HUGE', 'FORTUNE', 'HOW', 'MUCH', 'WAS', 'HE', 'OBLIGED', 'TO', 'GIVE', 'BACK', 'THE', 'POUND', 'HE', 'HAD', 'STOLEN', 'ONLY', 'WERE', 'THE', 'POUND', 'TOGETHER', 'WITH', 'THE', 'COMPOUND', 'INTEREST', 'ACCRUING', 'UPON', 'IT', 'OR', 'ALL', 'HIS', 'HUGE', 'FORTUNE'] +1089-134686-0021-1754: ref=['IF', 'A', 'LAYMAN', 'IN', 'GIVING', 'BAPTISM', 'POUR', 'THE', 'WATER', 'BEFORE', 'SAYING', 'THE', 'WORDS', 'IS', 'THE', 'CHILD', 'BAPTIZED'] +1089-134686-0021-1754: hyp=['IF', 'A', 'LAYMAN', 'IN', 'GIVING', 'BAPTISM', 'POUR', 'THE', 'WATER', 'BEFORE', 'SAYING', 'THE', 'WORDS', 'IS', 'THE', 'CHILD', 'BAPTIZED'] +1089-134686-0022-1755: ref=['HOW', 'COMES', 'IT', 'THAT', 'WHILE', 'THE', 'FIRST', 'BEATITUDE', 'PROMISES', 'THE', 'KINGDOM', 'OF', 'HEAVEN', 'TO', 'THE', 'POOR', 'OF', 'HEART', 'THE', 'SECOND', 'BEATITUDE', 'PROMISES', 'ALSO', 'TO', 'THE', 'MEEK', 'THAT', 'THEY', 'SHALL', 'POSSESS', 'THE', 'LAND'] +1089-134686-0022-1755: hyp=['HOW', 'COMES', 'IT', 'THAT', 'WHILE', 'THE', 'FIRST', 'BE', 'ATTITUDE', 'PROMISES', 'THE', 'KINGDOM', 'OF', 'HEAVEN', 'TO', 'THE', 'POOR', 'OF', 'HEART', 'THE', 'SECOND', 'BE', 'ATTITUDE', 'PROMISES', 'ALSO', 'TO', 'THE', 'MEEK', 'THAT', 'THEY', 'SHALL', 'POSSESS', 'THE', 'LAND'] +1089-134686-0023-1756: ref=['WHY', 'WAS', 'THE', 'SACRAMENT', 'OF', 'THE', 'EUCHARIST', 'INSTITUTED', 'UNDER', 'THE', 'TWO', 'SPECIES', 'OF', 'BREAD', 'AND', 'WINE', 'IF', 'JESUS', 'CHRIST', 'BE', 'PRESENT', 'BODY', 'AND', 'BLOOD', 'SOUL', 'AND', 'DIVINITY', 'IN', 'THE', 'BREAD', 'ALONE', 'AND', 'IN', 'THE', 'WINE', 'ALONE'] +1089-134686-0023-1756: hyp=['WHY', 'WAS', 'THE', 'SACRAMENT', 'OF', 'THE', 'EUCHARIST', 'INSTITUTED', 'UNDER', 'THE', 'TWO', 'SPECIES', 'OF', 'BREAD', 'AND', 'WINE', 'IF', 'JESUS', 'CHRIST', 'BE', 'PRESENT', 'BODY', 'AND', 'BLOOD', 'SOUL', 'AND', 'DIVINITY', 'IN', 'THE', 'BREAD', 'ALONE', 'AND', 'IN', 'THE', 'WINE', 'ALONE'] +1089-134686-0024-1757: ref=['IF', 'THE', 'WINE', 'CHANGE', 'INTO', 'VINEGAR', 'AND', 'THE', 'HOST', 'CRUMBLE', 'INTO', 'CORRUPTION', 'AFTER', 'THEY', 'HAVE', 'BEEN', 'CONSECRATED', 'IS', 'JESUS', 'CHRIST', 'STILL', 'PRESENT', 'UNDER', 'THEIR', 'SPECIES', 'AS', 'GOD', 'AND', 'AS', 'MAN'] +1089-134686-0024-1757: hyp=['IF', 'THE', 'WINE', 'CHANGE', 'INTO', 'VINEGAR', 'AND', 'THE', 'HOST', 'CRUMBLE', 'INTO', 'CORRUPTION', 'AFTER', 'THEY', 'HAVE', 'BEEN', 'CONSECRATED', 'IS', 'JESUS', 'CHRIST', 'STILL', 'PRESENT', 'UNDER', 'THEIR', 'SPECIES', 'AS', 'GOD', 'AND', 'AS', 'MAN'] +1089-134686-0025-1758: ref=['A', 'GENTLE', 'KICK', 'FROM', 'THE', 'TALL', 'BOY', 'IN', 'THE', 'BENCH', 'BEHIND', 'URGED', 'STEPHEN', 'TO', 'ASK', 'A', 'DIFFICULT', 'QUESTION'] +1089-134686-0025-1758: hyp=['A', 'GENTLE', 'KICK', 'FROM', 'THE', 'TALL', 'BOY', 'IN', 'THE', 'BENCH', 'BEHIND', 'URGED', 'STEPHEN', 'TO', 'ASK', 'A', 'DIFFICULT', 'QUESTION'] +1089-134686-0026-1759: ref=['THE', 'RECTOR', 'DID', 'NOT', 'ASK', 'FOR', 'A', 'CATECHISM', 'TO', 'HEAR', 'THE', 'LESSON', 'FROM'] +1089-134686-0026-1759: hyp=['THE', 'RECTOR', 'DID', 'NOT', 'ASK', 'FOR', 'A', 'CATECHISM', 'TO', 'HEAR', 'THE', 'LESSON', 'FROM'] +1089-134686-0027-1760: ref=['HE', 'CLASPED', 'HIS', 'HANDS', 'ON', 'THE', 'DESK', 'AND', 'SAID'] +1089-134686-0027-1760: hyp=['HE', 'CLASPED', 'HIS', 'HANDS', 'ON', 'THE', 'DESK', 'AND', 'SAID'] +1089-134686-0028-1761: ref=['THE', 'RETREAT', 'WILL', 'BEGIN', 'ON', 'WEDNESDAY', 'AFTERNOON', 'IN', 'HONOUR', 'OF', 'SAINT', 'FRANCIS', 'XAVIER', 'WHOSE', 'FEAST', 'DAY', 'IS', 'SATURDAY'] +1089-134686-0028-1761: hyp=['THE', 'RETREAT', 'WILL', 'BEGIN', 'ON', 'WEDNESDAY', 'AFTERNOON', 'IN', 'HONOR', 'OF', 'SAINT', 'FRANCIS', 'SAVIER', 'WHOSE', 'FEAST', 'DAY', 'IS', 'SATURDAY'] +1089-134686-0029-1762: ref=['ON', 'FRIDAY', 'CONFESSION', 'WILL', 'BE', 'HEARD', 'ALL', 'THE', 'AFTERNOON', 'AFTER', 'BEADS'] +1089-134686-0029-1762: hyp=['ON', 'FRIDAY', 'CONFESSION', 'WILL', 'BE', 'HEARD', 'ALL', 'THE', 'AFTERNOON', 'AFTER', 'BEADS'] +1089-134686-0030-1763: ref=['BEWARE', 'OF', 'MAKING', 'THAT', 'MISTAKE'] +1089-134686-0030-1763: hyp=['BEWARE', 'OF', 'MAKING', 'THAT', 'MISTAKE'] +1089-134686-0031-1764: ref=["STEPHEN'S", 'HEART', 'BEGAN', 'SLOWLY', 'TO', 'FOLD', 'AND', 'FADE', 'WITH', 'FEAR', 'LIKE', 'A', 'WITHERING', 'FLOWER'] +1089-134686-0031-1764: hyp=["STEPHEN'S", 'HEART', 'BEGAN', 'SLOWLY', 'TO', 'FOLD', 'AND', 'FADE', 'WITH', 'FEAR', 'LIKE', 'A', 'WITHERING', 'FLOWER'] +1089-134686-0032-1765: ref=['HE', 'IS', 'CALLED', 'AS', 'YOU', 'KNOW', 'THE', 'APOSTLE', 'OF', 'THE', 'INDIES'] +1089-134686-0032-1765: hyp=['HE', 'IS', 'CALLED', 'AS', 'YOU', 'KNOW', 'THE', 'APOSTLE', 'OF', 'THE', 'INDIES'] +1089-134686-0033-1766: ref=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'XAVIER'] +1089-134686-0033-1766: hyp=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'ZEVIR'] +1089-134686-0034-1767: ref=['THE', 'RECTOR', 'PAUSED', 'AND', 'THEN', 'SHAKING', 'HIS', 'CLASPED', 'HANDS', 'BEFORE', 'HIM', 'WENT', 'ON'] +1089-134686-0034-1767: hyp=['THE', 'RECTOR', 'PAUSED', 'AND', 'THEN', 'SHAKING', 'HIS', 'CLASPED', 'HANDS', 'BEFORE', 'HIM', 'WENT', 'ON'] +1089-134686-0035-1768: ref=['HE', 'HAD', 'THE', 'FAITH', 'IN', 'HIM', 'THAT', 'MOVES', 'MOUNTAINS'] +1089-134686-0035-1768: hyp=['HE', 'HAD', 'THE', 'FAITH', 'IN', 'HIM', 'THAT', 'MOVES', 'MOUNTAINS'] +1089-134686-0036-1769: ref=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'XAVIER'] +1089-134686-0036-1769: hyp=['A', 'GREAT', 'SAINT', 'SAINT', 'FRANCIS', 'ZEVIER'] +1089-134686-0037-1770: ref=['IN', 'THE', 'SILENCE', 'THEIR', 'DARK', 'FIRE', 'KINDLED', 'THE', 'DUSK', 'INTO', 'A', 'TAWNY', 'GLOW'] +1089-134686-0037-1770: hyp=['IN', 'THE', 'SILENCE', 'THEIR', 'DARK', 'FIRE', 'KINDLED', 'THE', 'DUSK', 'INTO', 'A', 'TAWNY', 'GLOW'] +1089-134691-0000-1707: ref=['HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0000-1707: hyp=['HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0001-1708: ref=['FOR', 'A', 'FULL', 'HOUR', 'HE', 'HAD', 'PACED', 'UP', 'AND', 'DOWN', 'WAITING', 'BUT', 'HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0001-1708: hyp=['FOR', 'A', 'FULL', 'HOUR', 'HE', 'HAD', 'PACED', 'UP', 'AND', 'DOWN', 'WAITING', 'BUT', 'HE', 'COULD', 'WAIT', 'NO', 'LONGER'] +1089-134691-0002-1709: ref=['HE', 'SET', 'OFF', 'ABRUPTLY', 'FOR', 'THE', 'BULL', 'WALKING', 'RAPIDLY', 'LEST', 'HIS', "FATHER'S", 'SHRILL', 'WHISTLE', 'MIGHT', 'CALL', 'HIM', 'BACK', 'AND', 'IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HAD', 'ROUNDED', 'THE', 'CURVE', 'AT', 'THE', 'POLICE', 'BARRACK', 'AND', 'WAS', 'SAFE'] +1089-134691-0002-1709: hyp=['HE', 'SET', 'OFF', 'ABRUPTLY', 'FOR', 'THE', 'BULL', 'WALKING', 'RAPIDLY', 'LEST', 'HIS', "FATHER'S", 'SHRILL', 'WHISTLE', 'MIGHT', 'CALL', 'HIM', 'BACK', 'AND', 'IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HAD', 'ROUNDED', 'THE', 'CURVE', 'AT', 'THE', 'POLICE', 'BARRACK', 'AND', 'WAS', 'SAFE'] +1089-134691-0003-1710: ref=['THE', 'UNIVERSITY'] +1089-134691-0003-1710: hyp=['THE', 'UNIVERSITY'] +1089-134691-0004-1711: ref=['PRIDE', 'AFTER', 'SATISFACTION', 'UPLIFTED', 'HIM', 'LIKE', 'LONG', 'SLOW', 'WAVES'] +1089-134691-0004-1711: hyp=['PRIDE', 'AFTER', 'SATISFACTION', 'UPLIFTED', 'HIM', 'LIKE', 'LONG', 'SLOW', 'WAVES'] +1089-134691-0005-1712: ref=['WHOSE', 'FEET', 'ARE', 'AS', 'THE', 'FEET', 'OF', 'HARTS', 'AND', 'UNDERNEATH', 'THE', 'EVERLASTING', 'ARMS'] +1089-134691-0005-1712: hyp=['WHOSE', 'FEET', 'ARE', 'AS', 'THE', 'FEET', 'OF', 'HEARTS', 'AND', 'UNDERNEATH', 'THE', 'EVERLASTING', 'ARMS'] +1089-134691-0006-1713: ref=['THE', 'PRIDE', 'OF', 'THAT', 'DIM', 'IMAGE', 'BROUGHT', 'BACK', 'TO', 'HIS', 'MIND', 'THE', 'DIGNITY', 'OF', 'THE', 'OFFICE', 'HE', 'HAD', 'REFUSED'] +1089-134691-0006-1713: hyp=['THE', 'PRIDE', 'OF', 'THAT', 'DIM', 'IMAGE', 'BROUGHT', 'BACK', 'TO', 'HIS', 'MIND', 'THE', 'DIGNITY', 'OF', 'THE', 'OFFICE', 'HE', 'HAD', 'REFUSED'] +1089-134691-0007-1714: ref=['SOON', 'THE', 'WHOLE', 'BRIDGE', 'WAS', 'TREMBLING', 'AND', 'RESOUNDING'] +1089-134691-0007-1714: hyp=['SOON', 'THE', 'WHOLE', 'BRIDGE', 'WAS', 'TREMBLING', 'AND', 'RESOUNDING'] +1089-134691-0008-1715: ref=['THE', 'UNCOUTH', 'FACES', 'PASSED', 'HIM', 'TWO', 'BY', 'TWO', 'STAINED', 'YELLOW', 'OR', 'RED', 'OR', 'LIVID', 'BY', 'THE', 'SEA', 'AND', 'AS', 'HE', 'STROVE', 'TO', 'LOOK', 'AT', 'THEM', 'WITH', 'EASE', 'AND', 'INDIFFERENCE', 'A', 'FAINT', 'STAIN', 'OF', 'PERSONAL', 'SHAME', 'AND', 'COMMISERATION', 'ROSE', 'TO', 'HIS', 'OWN', 'FACE'] +1089-134691-0008-1715: hyp=['THE', 'UNCOUTH', 'FACES', 'PASSED', 'HIM', 'TWO', 'BY', 'TWO', 'STAINED', 'YELLOW', 'OR', 'RED', 'OR', 'LIVID', 'BY', 'THE', 'SEA', 'AND', 'AS', 'HE', 'STROVE', 'TO', 'LOOK', 'AT', 'THEM', 'WITH', 'EASE', 'AND', 'INDIFFERENCE', 'A', 'FAINT', 'STAIN', 'OF', 'PERSONAL', 'SHAME', 'AND', 'COMMISERATION', 'ROSE', 'TO', 'HIS', 'OWN', 'FACE'] +1089-134691-0009-1716: ref=['ANGRY', 'WITH', 'HIMSELF', 'HE', 'TRIED', 'TO', 'HIDE', 'HIS', 'FACE', 'FROM', 'THEIR', 'EYES', 'BY', 'GAZING', 'DOWN', 'SIDEWAYS', 'INTO', 'THE', 'SHALLOW', 'SWIRLING', 'WATER', 'UNDER', 'THE', 'BRIDGE', 'BUT', 'HE', 'STILL', 'SAW', 'A', 'REFLECTION', 'THEREIN', 'OF', 'THEIR', 'TOP', 'HEAVY', 'SILK', 'HATS', 'AND', 'HUMBLE', 'TAPE', 'LIKE', 'COLLARS', 'AND', 'LOOSELY', 'HANGING', 'CLERICAL', 'CLOTHES', 'BROTHER', 'HICKEY'] +1089-134691-0009-1716: hyp=['ANGRY', 'WITH', 'HIMSELF', 'HE', 'TRIED', 'TO', 'HIDE', 'HIS', 'FACE', 'FROM', 'THEIR', 'EYES', 'BY', 'GAZING', 'DOWN', 'SIDEWAYS', 'INTO', 'THE', 'SHALLOW', 'SWIRLING', 'WATER', 'UNDER', 'THE', 'BRIDGE', 'BUT', 'HE', 'STILL', 'SAW', 'A', 'REFLECTION', 'THEREIN', 'OF', 'THEIR', 'TOP', 'HEAVY', 'SILK', 'HATS', 'AND', 'HUMBLE', 'TAPE', 'LIKE', 'COLLARS', 'AND', 'LOOSELY', 'HANGING', 'CLERICAL', 'CLOTHES', 'BROTHER', 'HICKEY'] +1089-134691-0010-1717: ref=['BROTHER', 'MAC', 'ARDLE', 'BROTHER', 'KEOGH'] +1089-134691-0010-1717: hyp=['BROTHER', 'MC', 'CARDLE', 'BROTHER', 'KIOPH'] +1089-134691-0011-1718: ref=['THEIR', 'PIETY', 'WOULD', 'BE', 'LIKE', 'THEIR', 'NAMES', 'LIKE', 'THEIR', 'FACES', 'LIKE', 'THEIR', 'CLOTHES', 'AND', 'IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'THEIR', 'HUMBLE', 'AND', 'CONTRITE', 'HEARTS', 'IT', 'MIGHT', 'BE', 'PAID', 'A', 'FAR', 'RICHER', 'TRIBUTE', 'OF', 'DEVOTION', 'THAN', 'HIS', 'HAD', 'EVER', 'BEEN', 'A', 'GIFT', 'TENFOLD', 'MORE', 'ACCEPTABLE', 'THAN', 'HIS', 'ELABORATE', 'ADORATION'] +1089-134691-0011-1718: hyp=['THEIR', 'PIETY', 'WOULD', 'BE', 'LIKE', 'THEIR', 'NAMES', 'LIKE', 'THEIR', 'FACES', 'LIKE', 'THEIR', 'CLOTHES', 'AND', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'THEIR', 'HUMBLE', 'AND', 'CONTRITE', 'HEARTS', 'IT', 'MIGHT', 'BE', 'PAID', 'A', 'FAR', 'RICHER', 'TRIBUTE', 'OF', 'DEVOTION', 'THAN', 'HIS', 'HAD', 'EVER', 'BEEN', 'A', 'GIFT', 'TENFOLD', 'MORE', 'ACCEPTABLE', 'THAN', 'HIS', 'ELABORATE', 'ADORATION'] +1089-134691-0012-1719: ref=['IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'MOVE', 'HIMSELF', 'TO', 'BE', 'GENEROUS', 'TOWARDS', 'THEM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'IF', 'HE', 'EVER', 'CAME', 'TO', 'THEIR', 'GATES', 'STRIPPED', 'OF', 'HIS', 'PRIDE', 'BEATEN', 'AND', 'IN', "BEGGAR'S", 'WEEDS', 'THAT', 'THEY', 'WOULD', 'BE', 'GENEROUS', 'TOWARDS', 'HIM', 'LOVING', 'HIM', 'AS', 'THEMSELVES'] +1089-134691-0012-1719: hyp=['IT', 'WAS', 'IDLE', 'FOR', 'HIM', 'TO', 'MOVE', 'HIMSELF', 'TO', 'BE', 'GENEROUS', 'TOWARDS', 'THEM', 'TO', 'TELL', 'HIMSELF', 'THAT', 'IF', 'HE', 'EVER', 'CAME', 'TO', 'THEIR', 'GATES', 'STRIPPED', 'OF', 'HIS', 'PRIDE', 'BEATEN', 'AND', 'IN', 'BEGGARS', 'WEEDS', 'THAT', 'THEY', 'WOULD', 'BE', 'GENEROUS', 'TOWARDS', 'HIM', 'LOVING', 'HIM', 'AS', 'THEMSELVES'] +1089-134691-0013-1720: ref=['IDLE', 'AND', 'EMBITTERING', 'FINALLY', 'TO', 'ARGUE', 'AGAINST', 'HIS', 'OWN', 'DISPASSIONATE', 'CERTITUDE', 'THAT', 'THE', 'COMMANDMENT', 'OF', 'LOVE', 'BADE', 'US', 'NOT', 'TO', 'LOVE', 'OUR', 'NEIGHBOUR', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'AMOUNT', 'AND', 'INTENSITY', 'OF', 'LOVE', 'BUT', 'TO', 'LOVE', 'HIM', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'KIND', 'OF', 'LOVE'] +1089-134691-0013-1720: hyp=['IDLE', 'AND', 'EMBITTERING', 'FINALLY', 'TO', 'ARGUE', 'AGAINST', 'HIS', 'OWN', 'DISPASSIONATE', 'CERTITUDE', 'THAT', 'THE', 'COMMANDMENT', 'OF', 'LOVE', 'BADE', 'US', 'NOT', 'TO', 'LOVE', 'OUR', 'NEIGHBOUR', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'AMOUNT', 'AND', 'INTENSITY', 'OF', 'LOVE', 'BUT', 'TO', 'LOVE', 'HIM', 'AS', 'OURSELVES', 'WITH', 'THE', 'SAME', 'KIND', 'OF', 'LOVE'] +1089-134691-0014-1721: ref=['THE', 'PHRASE', 'AND', 'THE', 'DAY', 'AND', 'THE', 'SCENE', 'HARMONIZED', 'IN', 'A', 'CHORD'] +1089-134691-0014-1721: hyp=['THE', 'PHRASE', 'AND', 'THE', 'DAY', 'AND', 'THE', 'SCENE', 'HARMONIZED', 'IN', 'ACCORD'] +1089-134691-0015-1722: ref=['WORDS', 'WAS', 'IT', 'THEIR', 'COLOURS'] +1089-134691-0015-1722: hyp=['WORDS', 'WAS', 'IT', 'THEIR', 'COLORS'] +1089-134691-0016-1723: ref=['THEY', 'WERE', 'VOYAGING', 'ACROSS', 'THE', 'DESERTS', 'OF', 'THE', 'SKY', 'A', 'HOST', 'OF', 'NOMADS', 'ON', 'THE', 'MARCH', 'VOYAGING', 'HIGH', 'OVER', 'IRELAND', 'WESTWARD', 'BOUND'] +1089-134691-0016-1723: hyp=['THEY', 'WERE', 'VOYAGING', 'ACROSS', 'THE', 'DESERTS', 'OF', 'THE', 'SKY', 'A', 'HOST', 'OF', 'NOMADS', 'ON', 'THE', 'MARCH', 'VOYAGING', 'HIGH', 'OVER', 'IRELAND', 'WESTWARD', 'BOUND'] +1089-134691-0017-1724: ref=['THE', 'EUROPE', 'THEY', 'HAD', 'COME', 'FROM', 'LAY', 'OUT', 'THERE', 'BEYOND', 'THE', 'IRISH', 'SEA', 'EUROPE', 'OF', 'STRANGE', 'TONGUES', 'AND', 'VALLEYED', 'AND', 'WOODBEGIRT', 'AND', 'CITADELLED', 'AND', 'OF', 'ENTRENCHED', 'AND', 'MARSHALLED', 'RACES'] +1089-134691-0017-1724: hyp=['THE', 'EUROPE', 'THEY', 'HAD', 'COME', 'FROM', 'LAY', 'OUT', 'THERE', 'BEYOND', 'THE', 'IRISH', 'SEA', 'EUROPE', 'OF', 'STRANGE', 'TONGUES', 'AND', 'VALID', 'AND', 'WOULD', 'BE', 'GIRT', 'AND', 'CITADELED', 'AND', 'OF', 'ENTRENCHED', 'AND', 'MARSHALED', 'RACES'] +1089-134691-0018-1725: ref=['AGAIN', 'AGAIN'] +1089-134691-0018-1725: hyp=['AGAIN', 'AGAIN'] +1089-134691-0019-1726: ref=['A', 'VOICE', 'FROM', 'BEYOND', 'THE', 'WORLD', 'WAS', 'CALLING'] +1089-134691-0019-1726: hyp=['A', 'VOICE', 'FROM', 'BEYOND', 'THE', 'WORLD', 'WAS', 'CALLING'] +1089-134691-0020-1727: ref=['HELLO', 'STEPHANOS', 'HERE', 'COMES', 'THE', 'DEDALUS'] +1089-134691-0020-1727: hyp=['HELLO', 'STUFFANOS', 'HERE', 'COMES', 'THE', 'DAEDALUS'] +1089-134691-0021-1728: ref=['THEIR', 'DIVING', 'STONE', 'POISED', 'ON', 'ITS', 'RUDE', 'SUPPORTS', 'AND', 'ROCKING', 'UNDER', 'THEIR', 'PLUNGES', 'AND', 'THE', 'ROUGH', 'HEWN', 'STONES', 'OF', 'THE', 'SLOPING', 'BREAKWATER', 'OVER', 'WHICH', 'THEY', 'SCRAMBLED', 'IN', 'THEIR', 'HORSEPLAY', 'GLEAMED', 'WITH', 'COLD', 'WET', 'LUSTRE'] +1089-134691-0021-1728: hyp=['THEIR', 'DIVING', 'STONE', 'POISED', 'ON', 'ITS', 'RUDE', 'SUPPORTS', 'AND', 'ROCKING', 'UNDER', 'THEIR', 'PLUNGES', 'AND', 'THE', 'ROUGH', 'HEWN', 'STONES', 'OF', 'THE', 'SLOPING', 'BRAKE', 'WATER', 'OVER', 'WHICH', 'THEY', 'SCRAMBLED', 'IN', 'THEIR', 'HORSE', 'PLAY', 'GLEAMED', 'WITH', 'COLD', 'WET', 'LUSTRE'] +1089-134691-0022-1729: ref=['HE', 'STOOD', 'STILL', 'IN', 'DEFERENCE', 'TO', 'THEIR', 'CALLS', 'AND', 'PARRIED', 'THEIR', 'BANTER', 'WITH', 'EASY', 'WORDS'] +1089-134691-0022-1729: hyp=['HE', 'STOOD', 'STILL', 'IN', 'DEFERENCE', 'TO', 'THEIR', 'CALLS', 'AND', 'PARRIED', 'THEIR', 'BANTER', 'WITH', 'EASY', 'WORDS'] +1089-134691-0023-1730: ref=['IT', 'WAS', 'A', 'PAIN', 'TO', 'SEE', 'THEM', 'AND', 'A', 'SWORD', 'LIKE', 'PAIN', 'TO', 'SEE', 'THE', 'SIGNS', 'OF', 'ADOLESCENCE', 'THAT', 'MADE', 'REPELLENT', 'THEIR', 'PITIABLE', 'NAKEDNESS'] +1089-134691-0023-1730: hyp=['IT', 'WAS', 'A', 'PAIN', 'TO', 'SEE', 'THEM', 'AND', 'A', 'SWORD', 'LIKE', 'PAIN', 'TO', 'SEE', 'THE', 'SIGNS', 'OF', 'ADOLESCENCE', 'THAT', 'MADE', 'REPELLENT', 'THEIR', 'PITIABLE', 'NAKEDNESS'] +1089-134691-0024-1731: ref=['STEPHANOS', 'DEDALOS'] +1089-134691-0024-1731: hyp=["STEPHANO'S", 'DEAD', 'LOSS'] +1089-134691-0025-1732: ref=['A', 'MOMENT', 'BEFORE', 'THE', 'GHOST', 'OF', 'THE', 'ANCIENT', 'KINGDOM', 'OF', 'THE', 'DANES', 'HAD', 'LOOKED', 'FORTH', 'THROUGH', 'THE', 'VESTURE', 'OF', 'THE', 'HAZEWRAPPED', 'CITY'] +1089-134691-0025-1732: hyp=['A', 'MOMENT', 'BEFORE', 'THE', 'GHOST', 'OF', 'THE', 'ANCIENT', 'KINGDOM', 'OF', 'THE', 'DANES', 'HAD', 'LOOKED', 'FORTH', 'THROUGH', 'THE', 'VESTURE', 'OF', 'THE', 'HAYES', 'WRAPPED', 'CITY'] +1188-133604-0000-1771: ref=['YOU', 'WILL', 'FIND', 'ME', 'CONTINUALLY', 'SPEAKING', 'OF', 'FOUR', 'MEN', 'TITIAN', 'HOLBEIN', 'TURNER', 'AND', 'TINTORET', 'IN', 'ALMOST', 'THE', 'SAME', 'TERMS'] +1188-133604-0000-1771: hyp=['YOU', 'WILL', 'FIND', 'ME', 'CONTINUALLY', 'SPEAKING', 'OF', 'FOUR', 'MEN', 'TITIAN', 'HOLBINE', 'TURNER', 'AND', 'TINTARETTE', 'IN', 'ALMOST', 'THE', 'SAME', 'TERMS'] +1188-133604-0001-1772: ref=['THEY', 'UNITE', 'EVERY', 'QUALITY', 'AND', 'SOMETIMES', 'YOU', 'WILL', 'FIND', 'ME', 'REFERRING', 'TO', 'THEM', 'AS', 'COLORISTS', 'SOMETIMES', 'AS', 'CHIAROSCURISTS'] +1188-133604-0001-1772: hyp=['THE', 'UNITE', 'EVERY', 'QUALITY', 'AND', 'SOMETIMES', 'YOU', 'WILL', 'FIND', 'ME', 'REFERRING', 'TO', 'THEM', 'AS', 'COLORISTS', 'SOMETIMES', 'AS', 'KIARASCURISTS'] +1188-133604-0002-1773: ref=['BY', 'BEING', 'STUDIOUS', 'OF', 'COLOR', 'THEY', 'ARE', 'STUDIOUS', 'OF', 'DIVISION', 'AND', 'WHILE', 'THE', 'CHIAROSCURIST', 'DEVOTES', 'HIMSELF', 'TO', 'THE', 'REPRESENTATION', 'OF', 'DEGREES', 'OF', 'FORCE', 'IN', 'ONE', 'THING', 'UNSEPARATED', 'LIGHT', 'THE', 'COLORISTS', 'HAVE', 'FOR', 'THEIR', 'FUNCTION', 'THE', 'ATTAINMENT', 'OF', 'BEAUTY', 'BY', 'ARRANGEMENT', 'OF', 'THE', 'DIVISIONS', 'OF', 'LIGHT'] +1188-133604-0002-1773: hyp=['BY', 'BEING', 'STUDIOUS', 'OF', 'COLOR', 'THEY', 'ARE', 'STUDIOUS', 'OF', 'DIVISION', 'AND', 'WHILE', 'THE', 'CURE', 'SCURUS', 'DEVOTES', 'HIMSELF', 'TO', 'THE', 'REPRESENTATION', 'OF', 'DEGREES', 'OF', 'FORCE', 'IN', 'ONE', 'THING', 'ON', 'SEPARATED', 'LIGHT', 'THE', 'COLORISTS', 'HAVE', 'FOR', 'THEIR', 'FUNCTION', 'THE', 'ATTAINMENT', 'OF', 'BEAUTY', 'BY', 'ARRANGEMENT', 'OF', 'THE', 'DIVISIONS', 'OF', 'LIGHT'] +1188-133604-0003-1774: ref=['MY', 'FIRST', 'AND', 'PRINCIPAL', 'REASON', 'WAS', 'THAT', 'THEY', 'ENFORCED', 'BEYOND', 'ALL', 'RESISTANCE', 'ON', 'ANY', 'STUDENT', 'WHO', 'MIGHT', 'ATTEMPT', 'TO', 'COPY', 'THEM', 'THIS', 'METHOD', 'OF', 'LAYING', 'PORTIONS', 'OF', 'DISTINCT', 'HUE', 'SIDE', 'BY', 'SIDE'] +1188-133604-0003-1774: hyp=['MY', 'FIRST', 'AND', 'PRINCIPAL', 'REASON', 'WAS', 'THAT', 'THEY', 'ENFORCED', 'BEYOND', 'ALL', 'RESISTANCE', 'ON', 'ANY', 'STUDENT', 'WHO', 'MIGHT', 'ATTEMPT', 'TO', 'COPY', 'THEM', 'THIS', 'METHOD', 'OF', 'LAYING', 'PORTIONS', 'OF', 'DISTINCT', 'HUE', 'SIDE', 'BY', 'SIDE'] +1188-133604-0004-1775: ref=['SOME', 'OF', 'THE', 'TOUCHES', 'INDEED', 'WHEN', 'THE', 'TINT', 'HAS', 'BEEN', 'MIXED', 'WITH', 'MUCH', 'WATER', 'HAVE', 'BEEN', 'LAID', 'IN', 'LITTLE', 'DROPS', 'OR', 'PONDS', 'SO', 'THAT', 'THE', 'PIGMENT', 'MIGHT', 'CRYSTALLIZE', 'HARD', 'AT', 'THE', 'EDGE'] +1188-133604-0004-1775: hyp=['SOME', 'OF', 'THE', 'TOUCHES', 'INDEED', 'WHEN', 'THE', 'TINT', 'HAS', 'BEEN', 'MIXED', 'WITH', 'MUCH', 'WATER', 'HAVE', 'BEEN', 'LAID', 'IN', 'LITTLE', 'DROPS', 'OR', 'PONDS', 'SO', 'THAT', 'THE', 'PIGMENT', 'MIGHT', 'CRYSTALLIZE', 'HARD', 'AT', 'THE', 'EDGE'] +1188-133604-0005-1776: ref=['IT', 'IS', 'THE', 'HEAD', 'OF', 'A', 'PARROT', 'WITH', 'A', 'LITTLE', 'FLOWER', 'IN', 'HIS', 'BEAK', 'FROM', 'A', 'PICTURE', 'OF', "CARPACCIO'S", 'ONE', 'OF', 'HIS', 'SERIES', 'OF', 'THE', 'LIFE', 'OF', 'SAINT', 'GEORGE'] +1188-133604-0005-1776: hyp=['IT', 'IS', 'THE', 'HEAD', 'OF', 'A', 'PARROT', 'WITH', 'A', 'LITTLE', 'FLOWER', 'IN', 'HIS', 'BEAK', 'FROM', 'A', 'PICTURE', 'OF', "CARPATCHIO'S", 'ONE', 'OF', 'HIS', 'SERIES', 'OF', 'THE', 'LIFE', 'OF', 'SAINT', 'GEORGE'] +1188-133604-0006-1777: ref=['THEN', 'HE', 'COMES', 'TO', 'THE', 'BEAK', 'OF', 'IT'] +1188-133604-0006-1777: hyp=['THEN', 'HE', 'COMES', 'TO', 'THE', 'BEAK', 'OF', 'IT'] +1188-133604-0007-1778: ref=['THE', 'BROWN', 'GROUND', 'BENEATH', 'IS', 'LEFT', 'FOR', 'THE', 'MOST', 'PART', 'ONE', 'TOUCH', 'OF', 'BLACK', 'IS', 'PUT', 'FOR', 'THE', 'HOLLOW', 'TWO', 'DELICATE', 'LINES', 'OF', 'DARK', 'GRAY', 'DEFINE', 'THE', 'OUTER', 'CURVE', 'AND', 'ONE', 'LITTLE', 'QUIVERING', 'TOUCH', 'OF', 'WHITE', 'DRAWS', 'THE', 'INNER', 'EDGE', 'OF', 'THE', 'MANDIBLE'] +1188-133604-0007-1778: hyp=['THE', 'BROWN', 'GROUND', 'BENEATH', 'IS', 'LEFT', 'FOR', 'THE', 'MOST', 'PART', 'ONE', 'TOUCH', 'OF', 'BLACK', 'IS', 'PUT', 'FOR', 'THE', 'HOLLOW', 'TOO', 'DELICATE', 'LINES', 'OF', 'DARK', 'GREY', 'TO', 'FIND', 'THE', 'OUTER', 'CURVE', 'AND', 'ONE', 'LITTLE', 'QUIVERING', 'TOUCH', 'OF', 'WHITE', 'DRAWS', 'THE', 'INNER', 'EDGE', 'OF', 'THE', 'MANDIBLE'] +1188-133604-0008-1779: ref=['FOR', 'BELIEVE', 'ME', 'THE', 'FINAL', 'PHILOSOPHY', 'OF', 'ART', 'CAN', 'ONLY', 'RATIFY', 'THEIR', 'OPINION', 'THAT', 'THE', 'BEAUTY', 'OF', 'A', 'COCK', 'ROBIN', 'IS', 'TO', 'BE', 'RED', 'AND', 'OF', 'A', 'GRASS', 'PLOT', 'TO', 'BE', 'GREEN', 'AND', 'THE', 'BEST', 'SKILL', 'OF', 'ART', 'IS', 'IN', 'INSTANTLY', 'SEIZING', 'ON', 'THE', 'MANIFOLD', 'DELICIOUSNESS', 'OF', 'LIGHT', 'WHICH', 'YOU', 'CAN', 'ONLY', 'SEIZE', 'BY', 'PRECISION', 'OF', 'INSTANTANEOUS', 'TOUCH'] +1188-133604-0008-1779: hyp=['FOR', 'BELIEVE', 'ME', 'THE', 'FINAL', 'PHILOSOPHY', 'OF', 'ART', 'CAN', 'ONLY', 'RATIFY', 'THEIR', 'OPINION', 'THAT', 'THE', 'BEAUTY', 'OF', 'A', 'COCK', 'ROBIN', 'IS', 'TO', 'BE', 'READ', 'AND', 'OF', 'A', 'GRASS', 'PLOT', 'TO', 'BE', 'GREEN', 'AND', 'THE', 'BEST', 'SKILL', 'OF', 'ART', 'IS', 'AN', 'INSTANTLY', 'SEIZING', 'ON', 'THE', 'MANIFOLD', 'DELICIOUSNESS', 'OF', 'LIGHT', 'WHICH', 'YOU', 'CAN', 'ONLY', 'SEIZE', 'BY', 'PRECISION', 'OF', 'INSTANTANEOUS', 'TOUCH'] +1188-133604-0009-1780: ref=['NOW', 'YOU', 'WILL', 'SEE', 'IN', 'THESE', 'STUDIES', 'THAT', 'THE', 'MOMENT', 'THE', 'WHITE', 'IS', 'INCLOSED', 'PROPERLY', 'AND', 'HARMONIZED', 'WITH', 'THE', 'OTHER', 'HUES', 'IT', 'BECOMES', 'SOMEHOW', 'MORE', 'PRECIOUS', 'AND', 'PEARLY', 'THAN', 'THE', 'WHITE', 'PAPER', 'AND', 'THAT', 'I', 'AM', 'NOT', 'AFRAID', 'TO', 'LEAVE', 'A', 'WHOLE', 'FIELD', 'OF', 'UNTREATED', 'WHITE', 'PAPER', 'ALL', 'ROUND', 'IT', 'BEING', 'SURE', 'THAT', 'EVEN', 'THE', 'LITTLE', 'DIAMONDS', 'IN', 'THE', 'ROUND', 'WINDOW', 'WILL', 'TELL', 'AS', 'JEWELS', 'IF', 'THEY', 'ARE', 'GRADATED', 'JUSTLY'] +1188-133604-0009-1780: hyp=['NOW', 'YOU', 'WILL', 'SEE', 'IN', 'THESE', 'STUDIES', 'THAT', 'THE', 'MOMENT', 'THE', 'WIGHT', 'IS', 'ENCLOSED', 'PROPERLY', 'AND', 'HARMONIZE', 'WITH', 'THE', 'OTHER', 'HUES', 'IT', 'BECOMES', 'SOMEHOW', 'MORE', 'PRECIOUS', 'AND', 'PEARLY', 'THAN', 'THE', 'WHITE', 'PAPER', 'AND', 'THAT', 'I', 'AM', 'NOT', 'AFRAID', 'TO', 'LEAVE', 'A', 'WHOLE', 'FIELD', 'OF', 'UNTREATED', 'WHITE', 'PAPER', 'ALL', 'ROUND', 'IT', 'BEING', 'SURE', 'THAT', 'EVEN', 'THE', 'LITTLE', 'DIAMONDS', 'IN', 'THE', 'ROUND', 'WINDOW', 'WILL', 'TELL', 'AS', 'JEWELS', 'IF', 'THEY', 'ARE', 'GRADATED', 'JUSTLY'] +1188-133604-0010-1781: ref=['BUT', 'IN', 'THIS', 'VIGNETTE', 'COPIED', 'FROM', 'TURNER', 'YOU', 'HAVE', 'THE', 'TWO', 'PRINCIPLES', 'BROUGHT', 'OUT', 'PERFECTLY'] +1188-133604-0010-1781: hyp=['BUT', 'IN', 'THIS', 'VINEY', 'COPIED', 'FROM', 'TURNER', 'YOU', 'HAVE', 'THE', 'TWO', 'PRINCIPLES', 'BROUGHT', 'OUT', 'PERFECTLY'] +1188-133604-0011-1782: ref=['THEY', 'ARE', 'BEYOND', 'ALL', 'OTHER', 'WORKS', 'THAT', 'I', 'KNOW', 'EXISTING', 'DEPENDENT', 'FOR', 'THEIR', 'EFFECT', 'ON', 'LOW', 'SUBDUED', 'TONES', 'THEIR', 'FAVORITE', 'CHOICE', 'IN', 'TIME', 'OF', 'DAY', 'BEING', 'EITHER', 'DAWN', 'OR', 'TWILIGHT', 'AND', 'EVEN', 'THEIR', 'BRIGHTEST', 'SUNSETS', 'PRODUCED', 'CHIEFLY', 'OUT', 'OF', 'GRAY', 'PAPER'] +1188-133604-0011-1782: hyp=['THEY', 'ARE', 'BEYOND', 'ALL', 'OTHER', 'WORKS', 'THAN', 'I', 'KNOW', 'EXISTING', 'DEPENDENT', 'FOR', 'THEIR', 'EFFECT', 'ON', 'LOW', 'SUBDUED', 'TONES', 'THEIR', 'FAVORITE', 'CHOICE', 'IN', 'TIME', 'OF', 'DAY', 'BEING', 'EITHER', 'DAWN', 'OR', 'TWILIGHT', 'AND', 'EVEN', 'THEIR', 'BRIGHTEST', 'SUNSETS', 'PRODUCED', 'CHIEFLY', 'OUT', 'OF', 'GRAY', 'PAPER'] +1188-133604-0012-1783: ref=['IT', 'MAY', 'BE', 'THAT', 'A', 'GREAT', 'COLORIST', 'WILL', 'USE', 'HIS', 'UTMOST', 'FORCE', 'OF', 'COLOR', 'AS', 'A', 'SINGER', 'HIS', 'FULL', 'POWER', 'OF', 'VOICE', 'BUT', 'LOUD', 'OR', 'LOW', 'THE', 'VIRTUE', 'IS', 'IN', 'BOTH', 'CASES', 'ALWAYS', 'IN', 'REFINEMENT', 'NEVER', 'IN', 'LOUDNESS'] +1188-133604-0012-1783: hyp=['IT', 'MAY', 'BE', 'THAT', 'A', 'GREAT', 'COLOR', 'LIST', 'WILL', 'USE', 'HIS', 'UTMOST', 'FORCE', 'OF', 'COLOR', 'AS', 'A', 'SINGER', 'HIS', 'FULL', 'POWER', 'OF', 'VOICE', 'BUT', 'LOUD', 'OR', 'LOW', 'THE', 'VIRTUE', 'IS', 'IN', 'BOTH', 'CASES', 'ALWAYS', 'IN', 'REFINEMENT', 'NEVER', 'IN', 'LOUDNESS'] +1188-133604-0013-1784: ref=['IT', 'MUST', 'REMEMBER', 'BE', 'ONE', 'OR', 'THE', 'OTHER'] +1188-133604-0013-1784: hyp=['IT', 'MUST', 'REMEMBER', 'BE', 'ONE', 'OR', 'THE', 'OTHER'] +1188-133604-0014-1785: ref=['DO', 'NOT', 'THEREFORE', 'THINK', 'THAT', 'THE', 'GOTHIC', 'SCHOOL', 'IS', 'AN', 'EASY', 'ONE'] +1188-133604-0014-1785: hyp=['DO', 'NOT', 'THEREFORE', 'THINK', 'THAT', 'THE', 'GOTHIC', 'SCHOOLS', 'AN', 'EASY', 'ONE'] +1188-133604-0015-1786: ref=['THE', 'LAW', 'OF', 'THAT', 'SCHOOL', 'IS', 'THAT', 'EVERYTHING', 'SHALL', 'BE', 'SEEN', 'CLEARLY', 'OR', 'AT', 'LEAST', 'ONLY', 'IN', 'SUCH', 'MIST', 'OR', 'FAINTNESS', 'AS', 'SHALL', 'BE', 'DELIGHTFUL', 'AND', 'I', 'HAVE', 'NO', 'DOUBT', 'THAT', 'THE', 'BEST', 'INTRODUCTION', 'TO', 'IT', 'WOULD', 'BE', 'THE', 'ELEMENTARY', 'PRACTICE', 'OF', 'PAINTING', 'EVERY', 'STUDY', 'ON', 'A', 'GOLDEN', 'GROUND'] +1188-133604-0015-1786: hyp=['THE', 'LAW', 'OF', 'THAT', 'SCHOOL', 'IS', 'THAT', 'EVERYTHING', 'SHALL', 'BE', 'SEEN', 'CLEARLY', 'OR', 'AT', 'LEAST', 'ONLY', 'IN', 'SUCH', 'MIST', 'OR', 'FAINTNESS', 'AS', 'SHALL', 'BE', 'DELIGHTFUL', 'AND', 'I', 'HAVE', 'NO', 'DOUBT', 'THAT', 'THE', 'BEST', 'INTRODUCTION', 'TO', 'IT', 'WOULD', 'BE', 'THE', 'ELEMENTARY', 'PRACTICE', 'OF', 'PAINTING', 'EVERY', 'STUDY', 'ON', 'A', 'GOLDEN', 'GROUND'] +1188-133604-0016-1787: ref=['THIS', 'AT', 'ONCE', 'COMPELS', 'YOU', 'TO', 'UNDERSTAND', 'THAT', 'THE', 'WORK', 'IS', 'TO', 'BE', 'IMAGINATIVE', 'AND', 'DECORATIVE', 'THAT', 'IT', 'REPRESENTS', 'BEAUTIFUL', 'THINGS', 'IN', 'THE', 'CLEAREST', 'WAY', 'BUT', 'NOT', 'UNDER', 'EXISTING', 'CONDITIONS', 'AND', 'THAT', 'IN', 'FACT', 'YOU', 'ARE', 'PRODUCING', "JEWELER'S", 'WORK', 'RATHER', 'THAN', 'PICTURES'] +1188-133604-0016-1787: hyp=['THIS', 'AT', 'ONCE', 'COMPELS', 'YOU', 'TO', 'UNDERSTAND', 'THAT', 'THE', 'WORK', 'IS', 'TO', 'BE', 'IMAGINATIVE', 'AND', 'DECORATIVE', 'THAT', 'IT', 'REPRESENTS', 'BEAUTIFUL', 'THINGS', 'IN', 'THE', 'CLEAREST', 'WAY', 'BUT', 'NOT', 'UNDER', 'EXISTING', 'CONDITIONS', 'AND', 'THAT', 'IN', 'FACT', 'YOU', 'ARE', 'PRODUCING', "JEWELLER'S", 'WORK', 'RATHER', 'THAN', 'PICTURES'] +1188-133604-0017-1788: ref=['THAT', 'A', 'STYLE', 'IS', 'RESTRAINED', 'OR', 'SEVERE', 'DOES', 'NOT', 'MEAN', 'THAT', 'IT', 'IS', 'ALSO', 'ERRONEOUS'] +1188-133604-0017-1788: hyp=['THAT', 'A', 'STYLE', 'IS', 'RESTRAINED', 'OR', 'SEVERE', 'DOES', 'NOT', 'MEAN', 'THAT', 'IT', 'IS', 'ALSO', 'ERRONEOUS'] +1188-133604-0018-1789: ref=['IN', 'ALL', 'EARLY', 'GOTHIC', 'ART', 'INDEED', 'YOU', 'WILL', 'FIND', 'FAILURE', 'OF', 'THIS', 'KIND', 'ESPECIALLY', 'DISTORTION', 'AND', 'RIGIDITY', 'WHICH', 'ARE', 'IN', 'MANY', 'RESPECTS', 'PAINFULLY', 'TO', 'BE', 'COMPARED', 'WITH', 'THE', 'SPLENDID', 'REPOSE', 'OF', 'CLASSIC', 'ART'] +1188-133604-0018-1789: hyp=['IN', 'ALL', 'EARLY', 'GOTHIC', 'ART', 'INDEED', 'YOU', 'WILL', 'FIND', 'FAILURE', 'OF', 'THIS', 'KIND', 'ESPECIALLY', 'DISTORTION', 'AND', 'RIGIDITY', 'WHICH', 'ARE', 'IN', 'MANY', 'RESPECTS', 'PAINFULLY', 'TO', 'BE', 'COMPARED', 'WITH', 'THE', 'SPLENDID', 'REPOSE', 'OF', 'CLASSIC', 'ART'] +1188-133604-0019-1790: ref=['THE', 'LARGE', 'LETTER', 'CONTAINS', 'INDEED', 'ENTIRELY', 'FEEBLE', 'AND', 'ILL', 'DRAWN', 'FIGURES', 'THAT', 'IS', 'MERELY', 'CHILDISH', 'AND', 'FAILING', 'WORK', 'OF', 'AN', 'INFERIOR', 'HAND', 'IT', 'IS', 'NOT', 'CHARACTERISTIC', 'OF', 'GOTHIC', 'OR', 'ANY', 'OTHER', 'SCHOOL'] +1188-133604-0019-1790: hyp=['THE', 'LARGE', 'LETTER', 'CONTAINS', 'INDEED', 'ENTIRELY', 'FEEBLE', 'AND', 'ILL', 'DRAWN', 'FIGURES', 'THAT', 'IS', 'MERELY', 'CHILDISH', 'AND', 'FAILING', 'WORK', 'OF', 'AN', 'INFERIOR', 'HAND', 'IT', 'IS', 'NOT', 'CHARACTERISTIC', 'OF', 'GOTHIC', 'OR', 'ANY', 'OTHER', 'SCHOOL'] +1188-133604-0020-1791: ref=['BUT', 'OBSERVE', 'YOU', 'CAN', 'ONLY', 'DO', 'THIS', 'ON', 'ONE', 'CONDITION', 'THAT', 'OF', 'STRIVING', 'ALSO', 'TO', 'CREATE', 'IN', 'REALITY', 'THE', 'BEAUTY', 'WHICH', 'YOU', 'SEEK', 'IN', 'IMAGINATION'] +1188-133604-0020-1791: hyp=['BUT', 'OBSERVE', 'YOU', 'CAN', 'ONLY', 'DO', 'THIS', 'ON', 'ONE', 'CONDITION', 'THAT', 'OF', 'STRIVING', 'ALSO', 'TO', 'CREATE', 'IN', 'REALITY', 'THE', 'BEAUTY', 'WHICH', 'YOU', 'SEEK', 'AND', 'IMAGINATION'] +1188-133604-0021-1792: ref=['IT', 'WILL', 'BE', 'WHOLLY', 'IMPOSSIBLE', 'FOR', 'YOU', 'TO', 'RETAIN', 'THE', 'TRANQUILLITY', 'OF', 'TEMPER', 'AND', 'FELICITY', 'OF', 'FAITH', 'NECESSARY', 'FOR', 'NOBLE', 'PURIST', 'PAINTING', 'UNLESS', 'YOU', 'ARE', 'ACTIVELY', 'ENGAGED', 'IN', 'PROMOTING', 'THE', 'FELICITY', 'AND', 'PEACE', 'OF', 'PRACTICAL', 'LIFE'] +1188-133604-0021-1792: hyp=['IT', 'WILL', 'BE', 'WHOLLY', 'IMPOSSIBLE', 'FOR', 'YOU', 'TO', 'RETAIN', 'THE', 'TRANQUILLITY', 'OF', 'TEMPER', 'AND', 'FELICITY', 'OF', 'FAITH', 'NECESSARY', 'FOR', 'NOBLE', 'PUREST', 'PAINTING', 'UNLESS', 'YOU', 'ARE', 'ACTIVELY', 'ENGAGED', 'IN', 'PROMOTING', 'THE', 'FELICITY', 'AND', 'PEACE', 'OF', 'PRACTICAL', 'LIFE'] +1188-133604-0022-1793: ref=['YOU', 'MUST', 'LOOK', 'AT', 'HIM', 'IN', 'THE', 'FACE', 'FIGHT', 'HIM', 'CONQUER', 'HIM', 'WITH', 'WHAT', 'SCATHE', 'YOU', 'MAY', 'YOU', 'NEED', 'NOT', 'THINK', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'HIM'] +1188-133604-0022-1793: hyp=['YOU', 'MUST', 'LOOK', 'AT', 'HIM', 'IN', 'THE', 'FACE', 'FIGHT', 'HIM', 'CONQUER', 'HIM', 'WITH', 'WHAT', 'SCATH', 'YOU', 'MAY', 'YOU', 'NEED', 'NOT', 'THINK', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'HIM'] +1188-133604-0023-1794: ref=['THE', 'COLORIST', 'SAYS', 'FIRST', 'OF', 'ALL', 'AS', 'MY', 'DELICIOUS', 'PAROQUET', 'WAS', 'RUBY', 'SO', 'THIS', 'NASTY', 'VIPER', 'SHALL', 'BE', 'BLACK', 'AND', 'THEN', 'IS', 'THE', 'QUESTION', 'CAN', 'I', 'ROUND', 'HIM', 'OFF', 'EVEN', 'THOUGH', 'HE', 'IS', 'BLACK', 'AND', 'MAKE', 'HIM', 'SLIMY', 'AND', 'YET', 'SPRINGY', 'AND', 'CLOSE', 'DOWN', 'CLOTTED', 'LIKE', 'A', 'POOL', 'OF', 'BLACK', 'BLOOD', 'ON', 'THE', 'EARTH', 'ALL', 'THE', 'SAME'] +1188-133604-0023-1794: hyp=['THE', 'CHOLERAIST', 'SAYS', 'FIRST', 'OF', 'ALL', 'AS', 'MY', 'DELICIOUS', 'PERICE', 'WAS', 'RUBY', 'SO', 'THIS', 'NASTY', 'VIPER', 'SHALL', 'BE', 'BLACK', 'AND', 'THEN', 'IS', 'THE', 'QUESTION', 'CAN', 'I', 'ROUND', 'HIM', 'OFF', 'EVEN', 'THOUGH', 'HE', 'IS', 'BLACK', 'AND', 'MAKE', 'HIM', 'SLIMY', 'AND', 'YET', 'SPRINGY', 'AND', 'CLOSE', 'DOWN', 'CLOTTED', 'LIKE', 'A', 'POOL', 'OF', 'BLACK', 'BLOOD', 'ON', 'THE', 'EARTH', 'ALL', 'THE', 'SAME'] +1188-133604-0024-1795: ref=['NOTHING', 'WILL', 'BE', 'MORE', 'PRECIOUS', 'TO', 'YOU', 'I', 'THINK', 'IN', 'THE', 'PRACTICAL', 'STUDY', 'OF', 'ART', 'THAN', 'THE', 'CONVICTION', 'WHICH', 'WILL', 'FORCE', 'ITSELF', 'ON', 'YOU', 'MORE', 'AND', 'MORE', 'EVERY', 'HOUR', 'OF', 'THE', 'WAY', 'ALL', 'THINGS', 'ARE', 'BOUND', 'TOGETHER', 'LITTLE', 'AND', 'GREAT', 'IN', 'SPIRIT', 'AND', 'IN', 'MATTER'] +1188-133604-0024-1795: hyp=['NOTHING', 'WILL', 'BE', 'MORE', 'PRECIOUS', 'TO', 'YOU', 'I', 'THINK', 'IN', 'THE', 'PRACTICAL', 'STUDY', 'OF', 'ART', 'THAN', 'THE', 'CONVICTION', 'WHICH', 'WILL', 'FORCE', 'ITSELF', 'ON', 'YOU', 'MORE', 'AND', 'MORE', 'EVERY', 'HOUR', 'OF', 'THE', 'WAY', 'ALL', 'THINGS', 'ARE', 'BOUND', 'TOGETHER', 'LITTLE', 'AND', 'GREAT', 'IN', 'SPIRIT', 'AND', 'IN', 'MATTER'] +1188-133604-0025-1796: ref=['YOU', 'KNOW', 'I', 'HAVE', 'JUST', 'BEEN', 'TELLING', 'YOU', 'HOW', 'THIS', 'SCHOOL', 'OF', 'MATERIALISM', 'AND', 'CLAY', 'INVOLVED', 'ITSELF', 'AT', 'LAST', 'IN', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0025-1796: hyp=['YOU', 'KNOW', "I'VE", 'JUST', 'BEEN', 'TELLING', 'YOU', 'HOW', 'THIS', 'SCHOOL', 'OF', 'MATERIALISM', 'IN', 'CLAY', 'INVOLVED', 'ITSELF', 'AT', 'LAST', 'IN', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0026-1797: ref=['HERE', 'IS', 'AN', 'EQUALLY', 'TYPICAL', 'GREEK', 'SCHOOL', 'LANDSCAPE', 'BY', 'WILSON', 'LOST', 'WHOLLY', 'IN', 'GOLDEN', 'MIST', 'THE', 'TREES', 'SO', 'SLIGHTLY', 'DRAWN', 'THAT', 'YOU', "DON'T", 'KNOW', 'IF', 'THEY', 'ARE', 'TREES', 'OR', 'TOWERS', 'AND', 'NO', 'CARE', 'FOR', 'COLOR', 'WHATEVER', 'PERFECTLY', 'DECEPTIVE', 'AND', 'MARVELOUS', 'EFFECT', 'OF', 'SUNSHINE', 'THROUGH', 'THE', 'MIST', 'APOLLO', 'AND', 'THE', 'PYTHON'] +1188-133604-0026-1797: hyp=['HERE', 'IS', 'AN', 'EQUALLY', 'TYPICAL', 'GREEK', 'SCHOOL', 'LANDSCAPE', 'BY', 'WILSON', 'LOST', 'WHOLLY', 'IN', 'GOLDEN', 'MIST', 'THE', 'TREES', 'SO', 'SLIGHTLY', 'DRAWN', 'THAT', 'YOU', "DON'T", 'KNOW', 'IF', 'THEY', 'ARE', 'TREES', 'OR', 'TOWERS', 'AND', 'NO', 'CARE', 'FOR', 'COLOR', 'WHATSOEVER', 'PERFECTLY', 'DECEPTIVE', 'AND', 'MARVELLOUS', 'EFFECT', 'OF', 'SUNSHINE', 'THROUGH', 'THE', 'MIST', 'APOLLO', 'IN', 'THE', 'PYTHON'] +1188-133604-0027-1798: ref=['NOW', 'HERE', 'IS', 'RAPHAEL', 'EXACTLY', 'BETWEEN', 'THE', 'TWO', 'TREES', 'STILL', 'DRAWN', 'LEAF', 'BY', 'LEAF', 'WHOLLY', 'FORMAL', 'BUT', 'BEAUTIFUL', 'MIST', 'COMING', 'GRADUALLY', 'INTO', 'THE', 'DISTANCE'] +1188-133604-0027-1798: hyp=['NOW', 'HERE', 'IS', 'RAPHAEL', 'EXACTLY', 'BETWEEN', 'THE', 'TWO', 'TREES', 'STILL', 'DRAWN', 'LEAF', 'BY', 'LEAF', 'HOLY', 'FORMAL', 'BUT', 'BEAUTIFUL', 'MIST', 'COMING', 'GRADUALLY', 'INTO', 'THE', 'DISTANCE'] +1188-133604-0028-1799: ref=['WELL', 'THEN', 'LAST', 'HERE', 'IS', "TURNER'S", 'GREEK', 'SCHOOL', 'OF', 'THE', 'HIGHEST', 'CLASS', 'AND', 'YOU', 'DEFINE', 'HIS', 'ART', 'ABSOLUTELY', 'AS', 'FIRST', 'THE', 'DISPLAYING', 'INTENSELY', 'AND', 'WITH', 'THE', 'STERNEST', 'INTELLECT', 'OF', 'NATURAL', 'FORM', 'AS', 'IT', 'IS', 'AND', 'THEN', 'THE', 'ENVELOPMENT', 'OF', 'IT', 'WITH', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0028-1799: hyp=['WELL', 'THEN', 'LAST', 'HERE', 'IS', 'TURNERS', 'GREEK', 'SCHOOL', 'OF', 'THE', 'HIGHEST', 'CLASS', 'AND', 'YOU', 'DEFINE', 'HIS', 'ART', 'ABSOLUTELY', 'AS', 'FIRST', 'THE', 'DISPLAYING', 'INTENSELY', 'AND', 'WITH', 'THE', 'STERNEST', 'INTELLECT', 'OF', 'NATURAL', 'FORM', 'AS', 'IT', 'IS', 'AND', 'THEN', 'THE', 'ENVELOPMENT', 'OF', 'IT', 'WITH', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0029-1800: ref=['ONLY', 'THERE', 'ARE', 'TWO', 'SORTS', 'OF', 'CLOUD', 'AND', 'FIRE'] +1188-133604-0029-1800: hyp=['ONLY', 'THERE', 'ARE', 'TWO', 'SORTS', 'OF', 'CLOUD', 'IN', 'FIRE'] +1188-133604-0030-1801: ref=['HE', 'KNOWS', 'THEM', 'BOTH'] +1188-133604-0030-1801: hyp=['HE', 'KNOWS', 'THEM', 'BOTH'] +1188-133604-0031-1802: ref=["THERE'S", 'ONE', 'AND', "THERE'S", 'ANOTHER', 'THE', 'DUDLEY', 'AND', 'THE', 'FLINT'] +1188-133604-0031-1802: hyp=["THERE'S", 'ONE', 'AND', "THERE'S", 'ANOTHER', 'THE', 'DUDLEY', 'AND', 'THE', 'FLINT'] +1188-133604-0032-1803: ref=['IT', 'IS', 'ONLY', 'A', 'PENCIL', 'OUTLINE', 'BY', 'EDWARD', 'BURNE', 'JONES', 'IN', 'ILLUSTRATION', 'OF', 'THE', 'STORY', 'OF', 'PSYCHE', 'IT', 'IS', 'THE', 'INTRODUCTION', 'OF', 'PSYCHE', 'AFTER', 'ALL', 'HER', 'TROUBLES', 'INTO', 'HEAVEN'] +1188-133604-0032-1803: hyp=['IT', 'IS', 'ONLY', 'A', 'PENCIL', 'OUTLINE', 'BY', 'EDWARD', 'BYRNE', 'JONES', 'IN', 'ILLUSTRATION', 'OF', 'THE', 'STORY', 'OF', 'PSYCHE', 'IT', 'IS', 'THE', 'INTRODUCTION', 'OF', 'PSYCHE', 'AFTER', 'ALL', 'HER', 'TROUBLES', 'AND', 'TO', 'HEAVEN'] +1188-133604-0033-1804: ref=['EVERY', 'PLANT', 'IN', 'THE', 'GRASS', 'IS', 'SET', 'FORMALLY', 'GROWS', 'PERFECTLY', 'AND', 'MAY', 'BE', 'REALIZED', 'COMPLETELY'] +1188-133604-0033-1804: hyp=['EVERY', 'PLANT', 'IN', 'THE', 'GRASS', 'IS', 'SET', 'FORMALLY', 'GROWS', 'PERFECTLY', 'AND', 'MAY', 'BE', 'REALIZED', 'COMPLETELY'] +1188-133604-0034-1805: ref=['EXQUISITE', 'ORDER', 'AND', 'UNIVERSAL', 'WITH', 'ETERNAL', 'LIFE', 'AND', 'LIGHT', 'THIS', 'IS', 'THE', 'FAITH', 'AND', 'EFFORT', 'OF', 'THE', 'SCHOOLS', 'OF', 'CRYSTAL', 'AND', 'YOU', 'MAY', 'DESCRIBE', 'AND', 'COMPLETE', 'THEIR', 'WORK', 'QUITE', 'LITERALLY', 'BY', 'TAKING', 'ANY', 'VERSES', 'OF', 'CHAUCER', 'IN', 'HIS', 'TENDER', 'MOOD', 'AND', 'OBSERVING', 'HOW', 'HE', 'INSISTS', 'ON', 'THE', 'CLEARNESS', 'AND', 'BRIGHTNESS', 'FIRST', 'AND', 'THEN', 'ON', 'THE', 'ORDER'] +1188-133604-0034-1805: hyp=['EXQUISITE', 'ORDER', 'AND', 'UNIVERSAL', 'WITH', 'ETERNAL', 'LIFE', 'AND', 'LIGHT', 'THIS', 'IS', 'THE', 'FAITH', 'AND', 'EFFORT', 'OF', 'THE', 'SCHOOLS', 'OF', 'CRISTEL', 'AND', 'YOU', 'MAY', 'DESCRIBE', 'AND', 'COMPLETE', 'THEIR', 'WORK', 'QUITE', 'LITERALLY', 'BY', 'TAKING', 'ANY', 'VERSES', 'OF', 'CHAUCER', 'IN', 'HIS', 'TENDER', 'MOOD', 'IN', 'OBSERVING', 'HOW', 'HE', 'INSISTS', 'ON', 'THE', 'CLEARNESS', 'AND', 'BRIGHTNESS', 'FIRST', 'AND', 'THEN', 'ON', 'THE', 'ORDER'] +1188-133604-0035-1806: ref=['THUS', 'IN', "CHAUCER'S", 'DREAM'] +1188-133604-0035-1806: hyp=['THUS', 'IN', "CHAUCER'S", 'DREAM'] +1188-133604-0036-1807: ref=['IN', 'BOTH', 'THESE', 'HIGH', 'MYTHICAL', 'SUBJECTS', 'THE', 'SURROUNDING', 'NATURE', 'THOUGH', 'SUFFERING', 'IS', 'STILL', 'DIGNIFIED', 'AND', 'BEAUTIFUL'] +1188-133604-0036-1807: hyp=['IN', 'BOTH', 'THESE', 'HIGH', 'MYTHICAL', 'SUBJECTS', 'THE', 'SURROUNDING', 'NATURE', 'THOUGH', 'SUFFERING', 'IS', 'STILL', 'DIGNIFIED', 'AND', 'BEAUTIFUL'] +1188-133604-0037-1808: ref=['EVERY', 'LINE', 'IN', 'WHICH', 'THE', 'MASTER', 'TRACES', 'IT', 'EVEN', 'WHERE', 'SEEMINGLY', 'NEGLIGENT', 'IS', 'LOVELY', 'AND', 'SET', 'DOWN', 'WITH', 'A', 'MEDITATIVE', 'CALMNESS', 'WHICH', 'MAKES', 'THESE', 'TWO', 'ETCHINGS', 'CAPABLE', 'OF', 'BEING', 'PLACED', 'BESIDE', 'THE', 'MOST', 'TRANQUIL', 'WORK', 'OF', 'HOLBEIN', 'OR', 'DUERER'] +1188-133604-0037-1808: hyp=['EVERY', 'LINE', 'IN', 'WHICH', 'THE', 'MASTER', 'TRACES', 'IT', 'EVEN', 'WHERE', 'SEEMINGLY', 'NEGLIGENT', 'IS', 'LOVELY', 'AND', 'SET', 'DOWN', 'WITH', 'A', 'MEDITATIVE', 'CALMNESS', 'WHICH', 'MAKES', 'THESE', 'TWO', 'ETCHINGS', 'CAPABLE', 'OF', 'BEING', 'PLACED', 'BESIDE', 'THE', 'MOST', 'TRANQUIL', 'WORK', 'OF', 'HOLBINE', 'OR', 'DURE'] +1188-133604-0038-1809: ref=['BUT', 'NOW', 'HERE', 'IS', 'A', 'SUBJECT', 'OF', 'WHICH', 'YOU', 'WILL', 'WONDER', 'AT', 'FIRST', 'WHY', 'TURNER', 'DREW', 'IT', 'AT', 'ALL'] +1188-133604-0038-1809: hyp=['BUT', 'NOW', 'HERE', 'IS', 'A', 'SUBJECT', 'OF', 'WHICH', 'YOU', 'WILL', 'WONDER', 'AT', 'FIRST', 'WHY', 'TURNER', 'DREW', 'IT', 'AT', 'ALL'] +1188-133604-0039-1810: ref=['IT', 'HAS', 'NO', 'BEAUTY', 'WHATSOEVER', 'NO', 'SPECIALTY', 'OF', 'PICTURESQUENESS', 'AND', 'ALL', 'ITS', 'LINES', 'ARE', 'CRAMPED', 'AND', 'POOR'] +1188-133604-0039-1810: hyp=['IT', 'HAS', 'NO', 'BEAUTY', 'WHATSOEVER', 'NO', 'SPECIALTY', 'OF', 'PICTURESQUENESS', 'IN', 'ALL', 'ITS', 'LINES', 'ARE', 'CRAMPED', 'AND', 'POOR'] +1188-133604-0040-1811: ref=['THE', 'CRAMPNESS', 'AND', 'THE', 'POVERTY', 'ARE', 'ALL', 'INTENDED'] +1188-133604-0040-1811: hyp=['THE', 'CRAMPNESS', 'IN', 'THE', 'POVERTY', 'ARE', 'ALL', 'INTENDED'] +1188-133604-0041-1812: ref=['IT', 'IS', 'A', 'GLEANER', 'BRINGING', 'DOWN', 'HER', 'ONE', 'SHEAF', 'OF', 'CORN', 'TO', 'AN', 'OLD', 'WATERMILL', 'ITSELF', 'MOSSY', 'AND', 'RENT', 'SCARCELY', 'ABLE', 'TO', 'GET', 'ITS', 'STONES', 'TO', 'TURN'] +1188-133604-0041-1812: hyp=['IT', 'IS', 'A', 'GLEANER', 'BRINGING', 'DOWN', 'HER', 'ONE', 'SHEAF', 'OF', 'CORN', 'TO', 'AN', 'OLD', 'WATER', 'MILL', 'ITSELF', 'MOSSY', 'AND', 'RENT', 'SCARCELY', 'ABLE', 'TO', 'GET', 'ITS', 'STONES', 'TO', 'TURN'] +1188-133604-0042-1813: ref=['THE', 'SCENE', 'IS', 'ABSOLUTELY', 'ARCADIAN'] +1188-133604-0042-1813: hyp=['THE', 'SCENE', 'IS', 'ABSOLUTELY', 'ARCADIAN'] +1188-133604-0043-1814: ref=['SEE', 'THAT', 'YOUR', 'LIVES', 'BE', 'IN', 'NOTHING', 'WORSE', 'THAN', 'A', "BOY'S", 'CLIMBING', 'FOR', 'HIS', 'ENTANGLED', 'KITE'] +1188-133604-0043-1814: hyp=['SEE', 'THAT', 'YOUR', 'LIES', 'BE', 'IN', 'NOTHING', 'WORSE', 'THAN', 'A', "BOY'S", 'CLIMBING', 'FOR', 'HIS', 'ENTANGLED', 'KITE'] +1188-133604-0044-1815: ref=['IT', 'WILL', 'BE', 'WELL', 'FOR', 'YOU', 'IF', 'YOU', 'JOIN', 'NOT', 'WITH', 'THOSE', 'WHO', 'INSTEAD', 'OF', 'KITES', 'FLY', 'FALCONS', 'WHO', 'INSTEAD', 'OF', 'OBEYING', 'THE', 'LAST', 'WORDS', 'OF', 'THE', 'GREAT', 'CLOUD', 'SHEPHERD', 'TO', 'FEED', 'HIS', 'SHEEP', 'LIVE', 'THE', 'LIVES', 'HOW', 'MUCH', 'LESS', 'THAN', 'VANITY', 'OF', 'THE', 'WAR', 'WOLF', 'AND', 'THE', 'GIER', 'EAGLE'] +1188-133604-0044-1815: hyp=['IT', 'WILL', 'BE', 'WELL', 'FOR', 'YOU', 'IF', 'YOU', 'JOIN', 'NOT', 'WITH', 'THOSE', 'WHO', 'INSTEAD', 'OF', 'KITES', 'FLY', 'FALCONS', 'WHO', 'INSTEAD', 'OF', 'OBEYING', 'THE', 'LAST', 'WORDS', 'OF', 'THE', 'GREAT', 'CLOUD', 'SHEPHERD', 'TO', 'FEED', 'HIS', 'SHEEP', 'LIVE', 'THE', 'LIVES', 'HOW', 'MUCH', 'LESS', 'THAN', 'VANITY', 'OF', 'THE', 'WAR', 'WOLF', 'AND', 'THE', 'GEAR', 'EAGLE'] +121-121726-0000-2558: ref=['ALSO', 'A', 'POPULAR', 'CONTRIVANCE', 'WHEREBY', 'LOVE', 'MAKING', 'MAY', 'BE', 'SUSPENDED', 'BUT', 'NOT', 'STOPPED', 'DURING', 'THE', 'PICNIC', 'SEASON'] +121-121726-0000-2558: hyp=['ALSO', 'A', 'POPULAR', 'CONTRIVANCE', 'WHEREBY', 'LOVE', 'MAKING', 'MAY', 'BE', 'SUSPENDED', 'BUT', 'NOT', 'STOPPED', 'DURING', 'THE', 'PICNIC', 'SEASON'] +121-121726-0001-2559: ref=['HARANGUE', 'THE', 'TIRESOME', 'PRODUCT', 'OF', 'A', 'TIRELESS', 'TONGUE'] +121-121726-0001-2559: hyp=['HARANG', 'THE', 'TIRESOME', 'PRODUCT', 'OF', 'A', 'TIRELESS', 'TONGUE'] +121-121726-0002-2560: ref=['ANGOR', 'PAIN', 'PAINFUL', 'TO', 'HEAR'] +121-121726-0002-2560: hyp=['ANGORE', 'HAYNE', 'PAINFUL', 'TO', 'HEAR'] +121-121726-0003-2561: ref=['HAY', 'FEVER', 'A', 'HEART', 'TROUBLE', 'CAUSED', 'BY', 'FALLING', 'IN', 'LOVE', 'WITH', 'A', 'GRASS', 'WIDOW'] +121-121726-0003-2561: hyp=['HAY', 'FEVER', 'A', 'HEART', 'TROUBLE', 'CAUSED', 'BY', 'FALLING', 'IN', 'LOVE', 'WITH', 'A', 'GRASS', 'WIDOW'] +121-121726-0004-2562: ref=['HEAVEN', 'A', 'GOOD', 'PLACE', 'TO', 'BE', 'RAISED', 'TO'] +121-121726-0004-2562: hyp=['HEAVEN', 'A', 'GOOD', 'PLACE', 'TO', 'BE', 'RAISED', 'TO'] +121-121726-0005-2563: ref=['HEDGE', 'A', 'FENCE'] +121-121726-0005-2563: hyp=['HEDGE', 'OFFENCE'] +121-121726-0006-2564: ref=['HEREDITY', 'THE', 'CAUSE', 'OF', 'ALL', 'OUR', 'FAULTS'] +121-121726-0006-2564: hyp=['HEREDITY', 'THE', 'CAUSE', 'OF', 'ALL', 'OUR', 'FAULTS'] +121-121726-0007-2565: ref=['HORSE', 'SENSE', 'A', 'DEGREE', 'OF', 'WISDOM', 'THAT', 'KEEPS', 'ONE', 'FROM', 'BETTING', 'ON', 'THE', 'RACES'] +121-121726-0007-2565: hyp=['HORSE', 'SENSE', 'A', 'DEGREE', 'OF', 'WISDOM', 'THAT', 'KEEPS', 'ONE', 'FROM', 'BETTING', 'ON', 'THE', 'RACES'] +121-121726-0008-2566: ref=['HOSE', "MAN'S", 'EXCUSE', 'FOR', 'WETTING', 'THE', 'WALK'] +121-121726-0008-2566: hyp=['HOSE', "MAN'S", 'EXCUSE', 'FOR', 'WETTING', 'THE', 'WALK'] +121-121726-0009-2567: ref=['HOTEL', 'A', 'PLACE', 'WHERE', 'A', 'GUEST', 'OFTEN', 'GIVES', 'UP', 'GOOD', 'DOLLARS', 'FOR', 'POOR', 'QUARTERS'] +121-121726-0009-2567: hyp=['HOTEL', 'A', 'PLACE', 'WHERE', 'A', 'GUEST', 'OFTEN', 'GIVES', 'UP', 'GOOD', 'DOLLARS', 'FOR', 'POOR', 'QUARTERS'] +121-121726-0010-2568: ref=['HOUSECLEANING', 'A', 'DOMESTIC', 'UPHEAVAL', 'THAT', 'MAKES', 'IT', 'EASY', 'FOR', 'THE', 'GOVERNMENT', 'TO', 'ENLIST', 'ALL', 'THE', 'SOLDIERS', 'IT', 'NEEDS'] +121-121726-0010-2568: hyp=['HOUSE', 'CLEANING', 'A', 'DOMESTIC', 'UPHEAVAL', 'THAT', 'MAKES', 'IT', 'EASY', 'FOR', 'THE', 'GOVERNMENT', 'TO', 'ENLIST', 'ALL', 'THE', 'SOLDIERS', 'IT', 'NEEDS'] +121-121726-0011-2569: ref=['HUSBAND', 'THE', 'NEXT', 'THING', 'TO', 'A', 'WIFE'] +121-121726-0011-2569: hyp=['HUSBAND', 'THE', 'NEXT', 'THING', 'TO', 'A', 'WIFE'] +121-121726-0012-2570: ref=['HUSSY', 'WOMAN', 'AND', 'BOND', 'TIE'] +121-121726-0012-2570: hyp=['HUSSY', 'WOMAN', 'AND', 'BOND', 'TIE'] +121-121726-0013-2571: ref=['TIED', 'TO', 'A', 'WOMAN'] +121-121726-0013-2571: hyp=['TIED', 'TO', 'A', 'WOMAN'] +121-121726-0014-2572: ref=['HYPOCRITE', 'A', 'HORSE', 'DEALER'] +121-121726-0014-2572: hyp=['HYPOCRITE', 'A', 'HORSE', 'DEALER'] +121-123852-0000-2615: ref=['THOSE', 'PRETTY', 'WRONGS', 'THAT', 'LIBERTY', 'COMMITS', 'WHEN', 'I', 'AM', 'SOMETIME', 'ABSENT', 'FROM', 'THY', 'HEART', 'THY', 'BEAUTY', 'AND', 'THY', 'YEARS', 'FULL', 'WELL', 'BEFITS', 'FOR', 'STILL', 'TEMPTATION', 'FOLLOWS', 'WHERE', 'THOU', 'ART'] +121-123852-0000-2615: hyp=['THOSE', 'PRETTY', 'WRONGS', 'THAT', 'LIBERTY', 'COMMITS', 'WHEN', 'I', 'AM', 'SOME', 'TIME', 'ABSENT', 'FROM', 'THY', 'HEART', 'THY', 'BEAUTY', 'AND', 'THY', 'YEARS', 'FALL', 'WELL', 'BEFITS', 'FOR', 'STILL', 'TEMPTATION', 'FOLLOWS', 'WHERE', 'THOU', 'ART'] +121-123852-0001-2616: ref=['AY', 'ME'] +121-123852-0001-2616: hyp=['I', 'ME'] +121-123852-0002-2617: ref=['NO', 'MATTER', 'THEN', 'ALTHOUGH', 'MY', 'FOOT', 'DID', 'STAND', 'UPON', 'THE', 'FARTHEST', 'EARTH', "REMOV'D", 'FROM', 'THEE', 'FOR', 'NIMBLE', 'THOUGHT', 'CAN', 'JUMP', 'BOTH', 'SEA', 'AND', 'LAND', 'AS', 'SOON', 'AS', 'THINK', 'THE', 'PLACE', 'WHERE', 'HE', 'WOULD', 'BE', 'BUT', 'AH'] +121-123852-0002-2617: hyp=['NO', 'MATTER', 'THEN', 'ALTHOUGH', 'MY', 'FOOT', 'DID', 'STAND', 'UPON', 'THE', 'FARTHEST', 'EARTH', 'REMOVED', 'FROM', 'THEE', 'FOR', 'NIMBLE', 'THOUGHT', 'CAN', 'JUMP', 'BOTH', 'SEA', 'AND', 'LAND', 'AS', 'SOON', 'AS', 'THINK', 'THE', 'PLACE', 'WHERE', 'HE', 'WOULD', 'BE', 'BUT', 'AH'] +121-123852-0003-2618: ref=['THOUGHT', 'KILLS', 'ME', 'THAT', 'I', 'AM', 'NOT', 'THOUGHT', 'TO', 'LEAP', 'LARGE', 'LENGTHS', 'OF', 'MILES', 'WHEN', 'THOU', 'ART', 'GONE', 'BUT', 'THAT', 'SO', 'MUCH', 'OF', 'EARTH', 'AND', 'WATER', 'WROUGHT', 'I', 'MUST', 'ATTEND', "TIME'S", 'LEISURE', 'WITH', 'MY', 'MOAN', 'RECEIVING', 'NOUGHT', 'BY', 'ELEMENTS', 'SO', 'SLOW', 'BUT', 'HEAVY', 'TEARS', 'BADGES', 'OF', "EITHER'S", 'WOE'] +121-123852-0003-2618: hyp=['THOUGHT', 'KILLS', 'ME', 'THAT', 'I', 'AM', 'NOT', 'BOUGHT', 'TO', 'LEAP', 'LARGE', 'LENGTHS', 'OF', 'MILES', 'WHEN', 'THOU', 'ART', 'GONE', 'BUT', 'THAT', 'SO', 'MUCH', 'OF', 'EARTH', 'AND', 'WATER', 'WROUGHT', 'I', 'MUST', 'ATTEND', "TIME'S", 'LEISURE', 'WITH', 'MY', 'MOAN', 'RECEIVING', 'NOT', 'BY', 'ELEMENTS', 'SO', 'SLOW', 'BUT', 'HEAVY', 'TEARS', 'BADGES', 'OF', "EITHER'S", 'WOE'] +121-123852-0004-2619: ref=['MY', 'HEART', 'DOTH', 'PLEAD', 'THAT', 'THOU', 'IN', 'HIM', 'DOST', 'LIE', 'A', 'CLOSET', 'NEVER', "PIERC'D", 'WITH', 'CRYSTAL', 'EYES', 'BUT', 'THE', 'DEFENDANT', 'DOTH', 'THAT', 'PLEA', 'DENY', 'AND', 'SAYS', 'IN', 'HIM', 'THY', 'FAIR', 'APPEARANCE', 'LIES'] +121-123852-0004-2619: hyp=['MY', 'HEART', 'DOTH', 'PLEAD', 'THAT', 'THOU', 'IN', 'HIM', 'DOST', 'LIE', 'A', 'CLOSET', 'NEVER', 'PIERCED', 'WITH', 'CRYSTAL', 'EYES', 'BUT', 'THE', 'DEFENDANT', 'DOTH', 'THAT', 'PLEA', 'DENY', 'AND', 'SAYS', 'IN', 'HIM', 'THY', 'FAIR', 'APPEARANCE', 'LIES'] +121-123859-0000-2573: ref=['YOU', 'ARE', 'MY', 'ALL', 'THE', 'WORLD', 'AND', 'I', 'MUST', 'STRIVE', 'TO', 'KNOW', 'MY', 'SHAMES', 'AND', 'PRAISES', 'FROM', 'YOUR', 'TONGUE', 'NONE', 'ELSE', 'TO', 'ME', 'NOR', 'I', 'TO', 'NONE', 'ALIVE', 'THAT', 'MY', "STEEL'D", 'SENSE', 'OR', 'CHANGES', 'RIGHT', 'OR', 'WRONG'] +121-123859-0000-2573: hyp=['YOU', 'ARE', 'MY', 'ALL', 'THE', 'WORLD', 'AND', 'I', 'MUST', 'STRIVE', 'TO', 'KNOW', 'MY', 'SHAMES', 'AND', 'PRAISES', 'FROM', 'YOUR', 'TONGUE', 'NONE', 'ELSE', 'TO', 'ME', 'NOR', 'I', 'TO', 'NONE', 'ALIVE', 'THAT', 'MY', 'STEELED', 'SENSE', 'OR', 'CHANGES', 'RIGHT', 'OR', 'WRONG'] +121-123859-0001-2574: ref=['O', 'TIS', 'THE', 'FIRST', 'TIS', 'FLATTERY', 'IN', 'MY', 'SEEING', 'AND', 'MY', 'GREAT', 'MIND', 'MOST', 'KINGLY', 'DRINKS', 'IT', 'UP', 'MINE', 'EYE', 'WELL', 'KNOWS', 'WHAT', 'WITH', 'HIS', 'GUST', 'IS', 'GREEING', 'AND', 'TO', 'HIS', 'PALATE', 'DOTH', 'PREPARE', 'THE', 'CUP', 'IF', 'IT', 'BE', "POISON'D", 'TIS', 'THE', 'LESSER', 'SIN', 'THAT', 'MINE', 'EYE', 'LOVES', 'IT', 'AND', 'DOTH', 'FIRST', 'BEGIN'] +121-123859-0001-2574: hyp=['OH', 'TIS', 'THE', 'FIRST', 'TIS', 'FLATTERY', 'IN', 'MY', 'SEEING', 'AND', 'MY', 'GREAT', 'MIND', 'MOST', 'KINGLY', 'DRINKS', 'IT', 'UP', 'MINE', 'EYE', 'WELL', 'KNOWS', 'WHAT', 'WITH', 'HIS', 'GUST', 'IS', 'GREEN', 'AND', 'TO', 'HIS', 'PALATE', 'DOTH', 'PREPARE', 'THE', 'CUP', 'IF', 'IT', 'BE', 'POISONED', 'TIS', 'THE', 'LESSER', 'SIN', 'THAT', 'MINE', 'EYE', 'LOVES', 'IT', 'AND', 'DOTH', 'FIRST', 'BEGIN'] +121-123859-0002-2575: ref=['BUT', 'RECKONING', 'TIME', 'WHOSE', "MILLION'D", 'ACCIDENTS', 'CREEP', 'IN', 'TWIXT', 'VOWS', 'AND', 'CHANGE', 'DECREES', 'OF', 'KINGS', 'TAN', 'SACRED', 'BEAUTY', 'BLUNT', 'THE', "SHARP'ST", 'INTENTS', 'DIVERT', 'STRONG', 'MINDS', 'TO', 'THE', 'COURSE', 'OF', 'ALTERING', 'THINGS', 'ALAS', 'WHY', 'FEARING', 'OF', "TIME'S", 'TYRANNY', 'MIGHT', 'I', 'NOT', 'THEN', 'SAY', 'NOW', 'I', 'LOVE', 'YOU', 'BEST', 'WHEN', 'I', 'WAS', 'CERTAIN', "O'ER", 'INCERTAINTY', 'CROWNING', 'THE', 'PRESENT', 'DOUBTING', 'OF', 'THE', 'REST'] +121-123859-0002-2575: hyp=['BUT', 'RECKONING', 'TIME', 'WHOSE', 'MILLIONED', 'ACCIDENTS', 'CREEP', 'IN', 'TWIXT', 'VOWS', 'AND', 'CHANGE', 'DECREES', 'OF', 'KINGS', 'TAN', 'SACRED', 'BEAUTY', 'BLUNT', 'THE', 'SHARPEST', 'INTENSE', 'DIVERT', 'STRONG', 'MINDS', 'TO', 'THE', 'COURSE', 'OF', 'ALTERING', 'THINGS', 'ALAS', 'WHY', 'FEARING', 'OF', "TIME'S", 'TYRANNY', 'MIGHT', 'I', 'NOT', 'THEN', 'SAY', 'NOW', 'I', 'LOVE', 'YOU', 'BEST', 'WHEN', 'I', 'WAS', 'CERTAIN', 'OR', 'IN', 'CERTAINTY', 'CROWNING', 'THE', 'PRESENT', 'DOUBTING', 'OF', 'THE', 'REST'] +121-123859-0003-2576: ref=['LOVE', 'IS', 'A', 'BABE', 'THEN', 'MIGHT', 'I', 'NOT', 'SAY', 'SO', 'TO', 'GIVE', 'FULL', 'GROWTH', 'TO', 'THAT', 'WHICH', 'STILL', 'DOTH', 'GROW'] +121-123859-0003-2576: hyp=['LOVE', 'IS', 'A', 'BABE', 'THEN', 'MIGHT', 'I', 'NOT', 'SAY', 'SO', 'TO', 'GIVE', 'FULL', 'GROWTH', 'TO', 'THAT', 'WHICH', 'STILL', 'DOTH', 'GROW'] +121-123859-0004-2577: ref=['SO', 'I', 'RETURN', "REBUK'D", 'TO', 'MY', 'CONTENT', 'AND', 'GAIN', 'BY', 'ILL', 'THRICE', 'MORE', 'THAN', 'I', 'HAVE', 'SPENT'] +121-123859-0004-2577: hyp=['SO', 'I', 'RETURN', 'REBUKED', 'TO', 'MY', 'CONTENT', 'AND', 'GAIN', 'BY', 'ILL', 'THRICE', 'MORE', 'THAN', 'I', 'HAVE', 'SPENT'] +121-127105-0000-2578: ref=['IT', 'WAS', 'THIS', 'OBSERVATION', 'THAT', 'DREW', 'FROM', 'DOUGLAS', 'NOT', 'IMMEDIATELY', 'BUT', 'LATER', 'IN', 'THE', 'EVENING', 'A', 'REPLY', 'THAT', 'HAD', 'THE', 'INTERESTING', 'CONSEQUENCE', 'TO', 'WHICH', 'I', 'CALL', 'ATTENTION'] +121-127105-0000-2578: hyp=['IT', 'WAS', 'THIS', 'OBSERVATION', 'THAT', 'DREW', 'FROM', 'DOUGLAS', 'NOT', 'IMMEDIATELY', 'BUT', 'LATER', 'IN', 'THE', 'EVENING', 'A', 'REPLY', 'THAT', 'HAD', 'THE', 'INTERESTING', 'CONSEQUENCE', 'TO', 'WHICH', 'I', 'CALL', 'ATTENTION'] +121-127105-0001-2579: ref=['SOMEONE', 'ELSE', 'TOLD', 'A', 'STORY', 'NOT', 'PARTICULARLY', 'EFFECTIVE', 'WHICH', 'I', 'SAW', 'HE', 'WAS', 'NOT', 'FOLLOWING'] +121-127105-0001-2579: hyp=['SOME', 'ONE', 'ELSE', 'TOLD', 'A', 'STORY', 'NOT', 'PARTICULARLY', 'EFFECTIVE', 'WHICH', 'I', 'SAW', 'HE', 'WAS', 'NOT', 'FOLLOWING'] +121-127105-0002-2580: ref=['CRIED', 'ONE', 'OF', 'THE', 'WOMEN', 'HE', 'TOOK', 'NO', 'NOTICE', 'OF', 'HER', 'HE', 'LOOKED', 'AT', 'ME', 'BUT', 'AS', 'IF', 'INSTEAD', 'OF', 'ME', 'HE', 'SAW', 'WHAT', 'HE', 'SPOKE', 'OF'] +121-127105-0002-2580: hyp=['CRIED', 'ONE', 'OF', 'THE', 'WOMEN', 'HE', 'TOOK', 'NO', 'NOTICE', 'OF', 'HER', 'HE', 'LOOKED', 'AT', 'ME', 'BUT', 'AS', 'IF', 'INSTEAD', 'OF', 'ME', 'HE', 'SAW', 'WHAT', 'HE', 'SPOKE', 'OF'] +121-127105-0003-2581: ref=['THERE', 'WAS', 'A', 'UNANIMOUS', 'GROAN', 'AT', 'THIS', 'AND', 'MUCH', 'REPROACH', 'AFTER', 'WHICH', 'IN', 'HIS', 'PREOCCUPIED', 'WAY', 'HE', 'EXPLAINED'] +121-127105-0003-2581: hyp=['THERE', 'WAS', 'A', 'UNANIMOUS', 'GROAN', 'AT', 'THIS', 'AND', 'MUCH', 'REPROACH', 'AFTER', 'WHICH', 'IN', 'HIS', 'PREOCCUPIED', 'WAY', 'HE', 'EXPLAINED'] +121-127105-0004-2582: ref=['THE', "STORY'S", 'WRITTEN'] +121-127105-0004-2582: hyp=['THE', 'STORIES', 'WRITTEN'] +121-127105-0005-2583: ref=['I', 'COULD', 'WRITE', 'TO', 'MY', 'MAN', 'AND', 'ENCLOSE', 'THE', 'KEY', 'HE', 'COULD', 'SEND', 'DOWN', 'THE', 'PACKET', 'AS', 'HE', 'FINDS', 'IT'] +121-127105-0005-2583: hyp=['THY', 'GOOD', 'RIGHT', 'TO', 'MY', 'MAN', 'AND', 'ENCLOSE', 'THE', 'KEY', 'HE', 'COULD', 'SEND', 'DOWN', 'THE', 'PACKET', 'AS', 'HE', 'FINDS', 'IT'] +121-127105-0006-2584: ref=['THE', 'OTHERS', 'RESENTED', 'POSTPONEMENT', 'BUT', 'IT', 'WAS', 'JUST', 'HIS', 'SCRUPLES', 'THAT', 'CHARMED', 'ME'] +121-127105-0006-2584: hyp=['THE', 'OTHERS', 'RESENTED', 'POSTPONEMENT', 'BUT', 'IT', 'WAS', 'JUST', 'HIS', 'SCRUPLES', 'THAT', 'CHARMED', 'ME'] +121-127105-0007-2585: ref=['TO', 'THIS', 'HIS', 'ANSWER', 'WAS', 'PROMPT', 'OH', 'THANK', 'GOD', 'NO', 'AND', 'IS', 'THE', 'RECORD', 'YOURS'] +121-127105-0007-2585: hyp=['TO', 'THIS', 'HIS', 'ANSWER', 'WAS', 'PROMPT', 'OH', 'THANK', 'GOD', 'NO', 'AND', 'IS', 'THE', 'RECORD', 'YOURS'] +121-127105-0008-2586: ref=['HE', 'HUNG', 'FIRE', 'AGAIN', 'A', "WOMAN'S"] +121-127105-0008-2586: hyp=['HE', 'HUNG', 'FIRE', 'AGAIN', 'A', "WOMAN'S"] +121-127105-0009-2587: ref=['SHE', 'HAS', 'BEEN', 'DEAD', 'THESE', 'TWENTY', 'YEARS'] +121-127105-0009-2587: hyp=['SHE', 'HAS', 'BEEN', 'DEAD', 'THESE', 'TWENTY', 'YEARS'] +121-127105-0010-2588: ref=['SHE', 'SENT', 'ME', 'THE', 'PAGES', 'IN', 'QUESTION', 'BEFORE', 'SHE', 'DIED'] +121-127105-0010-2588: hyp=['SHE', 'SENT', 'ME', 'THE', 'PAGES', 'IN', 'QUESTION', 'BEFORE', 'SHE', 'DIED'] +121-127105-0011-2589: ref=['SHE', 'WAS', 'THE', 'MOST', 'AGREEABLE', 'WOMAN', "I'VE", 'EVER', 'KNOWN', 'IN', 'HER', 'POSITION', 'SHE', 'WOULD', 'HAVE', 'BEEN', 'WORTHY', 'OF', 'ANY', 'WHATEVER'] +121-127105-0011-2589: hyp=['SHE', 'WAS', 'THE', 'MOST', 'AGREEABLE', 'WOMAN', "I'VE", 'EVER', 'KNOWN', 'IN', 'HER', 'POSITION', 'SHE', 'WOULD', 'HAVE', 'BEEN', 'WORTHY', 'OF', 'ANY', 'WHATEVER'] +121-127105-0012-2590: ref=['IT', "WASN'T", 'SIMPLY', 'THAT', 'SHE', 'SAID', 'SO', 'BUT', 'THAT', 'I', 'KNEW', 'SHE', "HADN'T", 'I', 'WAS', 'SURE', 'I', 'COULD', 'SEE'] +121-127105-0012-2590: hyp=["TWASN'T", 'SIMPLY', 'THAT', 'SHE', 'SAID', 'SO', 'BUT', 'THAT', 'I', 'KNEW', 'SHE', "HADN'T", 'I', 'WAS', 'SURE', 'I', 'COULD', 'SEE'] +121-127105-0013-2591: ref=["YOU'LL", 'EASILY', 'JUDGE', 'WHY', 'WHEN', 'YOU', 'HEAR', 'BECAUSE', 'THE', 'THING', 'HAD', 'BEEN', 'SUCH', 'A', 'SCARE', 'HE', 'CONTINUED', 'TO', 'FIX', 'ME'] +121-127105-0013-2591: hyp=["YOU'LL", 'EASILY', 'JUDGE', 'WHY', 'WHEN', 'YOU', 'HEAR', 'BECAUSE', 'THE', 'THING', 'HAD', 'BEEN', 'SUCH', 'A', 'SCARE', 'HE', 'CONTINUED', 'TO', 'FIX', 'ME'] +121-127105-0014-2592: ref=['YOU', 'ARE', 'ACUTE'] +121-127105-0014-2592: hyp=['YOU', 'ARE', 'ACUTE'] +121-127105-0015-2593: ref=['HE', 'QUITTED', 'THE', 'FIRE', 'AND', 'DROPPED', 'BACK', 'INTO', 'HIS', 'CHAIR'] +121-127105-0015-2593: hyp=['HE', 'QUITTED', 'THE', 'FIRE', 'AND', 'DROPPED', 'BACK', 'INTO', 'HIS', 'CHAIR'] +121-127105-0016-2594: ref=['PROBABLY', 'NOT', 'TILL', 'THE', 'SECOND', 'POST'] +121-127105-0016-2594: hyp=['PROBABLY', 'NOT', 'TILL', 'THE', 'SECOND', 'POST'] +121-127105-0017-2595: ref=['IT', 'WAS', 'ALMOST', 'THE', 'TONE', 'OF', 'HOPE', 'EVERYBODY', 'WILL', 'STAY'] +121-127105-0017-2595: hyp=['IT', 'WAS', 'ALMOST', 'THE', 'TONE', 'OF', 'HOPE', 'EVERYBODY', 'WILL', 'STAY'] +121-127105-0018-2596: ref=['CRIED', 'THE', 'LADIES', 'WHOSE', 'DEPARTURE', 'HAD', 'BEEN', 'FIXED'] +121-127105-0018-2596: hyp=['CRIED', 'THE', 'LADIES', 'WHOSE', 'DEPARTURE', 'HAD', 'BEEN', 'FIXED'] +121-127105-0019-2597: ref=['MISSUS', 'GRIFFIN', 'HOWEVER', 'EXPRESSED', 'THE', 'NEED', 'FOR', 'A', 'LITTLE', 'MORE', 'LIGHT'] +121-127105-0019-2597: hyp=['MISSUS', 'GRIFFIN', 'HOWEVER', 'EXPRESSED', 'THE', 'NEED', 'FOR', 'LITTLE', 'MORE', 'LIGHT'] +121-127105-0020-2598: ref=['WHO', 'WAS', 'IT', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'THE', 'STORY', 'WILL', 'TELL', 'I', 'TOOK', 'UPON', 'MYSELF', 'TO', 'REPLY', 'OH', 'I', "CAN'T", 'WAIT', 'FOR', 'THE', 'STORY', 'THE', 'STORY', "WON'T", 'TELL', 'SAID', 'DOUGLAS', 'NOT', 'IN', 'ANY', 'LITERAL', 'VULGAR', 'WAY', "MORE'S", 'THE', 'PITY', 'THEN'] +121-127105-0020-2598: hyp=['WHO', 'WAS', 'IT', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'THE', 'STORY', 'WILL', 'TELL', 'I', 'TOOK', 'UPON', 'MYSELF', 'TO', 'REPLY', 'OH', 'I', "CAN'T", 'WAIT', 'FOR', 'THE', 'STORY', 'THE', 'STORY', "WON'T", 'TELL', 'SAID', 'DOUGLAS', 'NOT', 'IN', 'ANY', 'LITERAL', 'VULGAR', 'WAY', "MORE'S", 'THE', 'PITY', 'THEN'] +121-127105-0021-2599: ref=["WON'T", 'YOU', 'TELL', 'DOUGLAS'] +121-127105-0021-2599: hyp=["WON'T", 'YOU', 'TELL', 'DOUGLAS'] +121-127105-0022-2600: ref=['WELL', 'IF', 'I', "DON'T", 'KNOW', 'WHO', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'I', 'KNOW', 'WHO', 'HE', 'WAS'] +121-127105-0022-2600: hyp=['FOR', 'IF', 'I', "DON'T", 'KNOW', 'WHO', 'SHE', 'WAS', 'IN', 'LOVE', 'WITH', 'I', 'KNOW', 'WHO', 'HE', 'WAS'] +121-127105-0023-2601: ref=['LET', 'ME', 'SAY', 'HERE', 'DISTINCTLY', 'TO', 'HAVE', 'DONE', 'WITH', 'IT', 'THAT', 'THIS', 'NARRATIVE', 'FROM', 'AN', 'EXACT', 'TRANSCRIPT', 'OF', 'MY', 'OWN', 'MADE', 'MUCH', 'LATER', 'IS', 'WHAT', 'I', 'SHALL', 'PRESENTLY', 'GIVE'] +121-127105-0023-2601: hyp=['LET', 'ME', 'SAY', 'HERE', 'DISTINCTLY', 'TO', 'HAVE', 'DONE', 'WITH', 'IT', 'THAT', 'THIS', 'NARRATIVE', 'FROM', 'AN', 'EXACT', 'TRANSCRIPT', 'OF', 'MY', 'OWN', 'MADE', 'MUCH', 'LATER', 'IS', 'WHAT', 'I', 'SHALL', 'PRESENTLY', 'GIVE'] +121-127105-0024-2602: ref=['POOR', 'DOUGLAS', 'BEFORE', 'HIS', 'DEATH', 'WHEN', 'IT', 'WAS', 'IN', 'SIGHT', 'COMMITTED', 'TO', 'ME', 'THE', 'MANUSCRIPT', 'THAT', 'REACHED', 'HIM', 'ON', 'THE', 'THIRD', 'OF', 'THESE', 'DAYS', 'AND', 'THAT', 'ON', 'THE', 'SAME', 'SPOT', 'WITH', 'IMMENSE', 'EFFECT', 'HE', 'BEGAN', 'TO', 'READ', 'TO', 'OUR', 'HUSHED', 'LITTLE', 'CIRCLE', 'ON', 'THE', 'NIGHT', 'OF', 'THE', 'FOURTH'] +121-127105-0024-2602: hyp=['POOR', 'DOUGLAS', 'BEFORE', 'HIS', 'DEATH', 'WHEN', 'IT', 'WAS', 'IN', 'SIGHT', 'COMMITTED', 'TO', 'ME', 'THE', 'MANUSCRIPT', 'THAT', 'REACHED', 'HIM', 'ON', 'THE', 'THIRD', 'OF', 'THESE', 'DAYS', 'AND', 'THAT', 'ON', 'THE', 'SAME', 'SPOT', 'WITH', 'IMMENSE', 'EFFECT', 'HE', 'BEGAN', 'TO', 'READ', 'TO', 'OUR', 'HUSHED', 'LITTLE', 'CIRCLE', 'ON', 'THE', 'NIGHT', 'OF', 'THE', 'FOURTH'] +121-127105-0025-2603: ref=['THE', 'DEPARTING', 'LADIES', 'WHO', 'HAD', 'SAID', 'THEY', 'WOULD', 'STAY', "DIDN'T", 'OF', 'COURSE', 'THANK', 'HEAVEN', 'STAY', 'THEY', 'DEPARTED', 'IN', 'CONSEQUENCE', 'OF', 'ARRANGEMENTS', 'MADE', 'IN', 'A', 'RAGE', 'OF', 'CURIOSITY', 'AS', 'THEY', 'PROFESSED', 'PRODUCED', 'BY', 'THE', 'TOUCHES', 'WITH', 'WHICH', 'HE', 'HAD', 'ALREADY', 'WORKED', 'US', 'UP'] +121-127105-0025-2603: hyp=['THE', 'DEPARTING', 'LADIES', 'WHO', 'HAD', 'SAID', 'THEY', 'WOULD', 'STAY', "DIDN'T", 'OF', 'COURSE', 'THANK', 'HEAVEN', 'STAY', 'THEY', 'DEPARTED', 'IN', 'CONSEQUENCE', 'OF', 'ARRANGEMENTS', 'MADE', 'IN', 'A', 'RAGE', 'OF', 'CURIOSITY', 'AS', 'THEY', 'PROFESSED', 'PRODUCED', 'BY', 'THE', 'TOUCHES', 'WITH', 'WHICH', 'HE', 'HAD', 'ALREADY', 'WORKED', 'US', 'UP'] +121-127105-0026-2604: ref=['THE', 'FIRST', 'OF', 'THESE', 'TOUCHES', 'CONVEYED', 'THAT', 'THE', 'WRITTEN', 'STATEMENT', 'TOOK', 'UP', 'THE', 'TALE', 'AT', 'A', 'POINT', 'AFTER', 'IT', 'HAD', 'IN', 'A', 'MANNER', 'BEGUN'] +121-127105-0026-2604: hyp=['THE', 'FIRST', 'OF', 'THESE', 'TOUCHES', 'CONVEYED', 'THAT', 'THE', 'WRITTEN', 'STATEMENT', 'TOOK', 'UP', 'THE', 'TALE', 'AT', 'A', 'POINT', 'AFTER', 'IT', 'HAD', 'IN', 'A', 'MANNER', 'BEGUN'] +121-127105-0027-2605: ref=['HE', 'HAD', 'FOR', 'HIS', 'OWN', 'TOWN', 'RESIDENCE', 'A', 'BIG', 'HOUSE', 'FILLED', 'WITH', 'THE', 'SPOILS', 'OF', 'TRAVEL', 'AND', 'THE', 'TROPHIES', 'OF', 'THE', 'CHASE', 'BUT', 'IT', 'WAS', 'TO', 'HIS', 'COUNTRY', 'HOME', 'AN', 'OLD', 'FAMILY', 'PLACE', 'IN', 'ESSEX', 'THAT', 'HE', 'WISHED', 'HER', 'IMMEDIATELY', 'TO', 'PROCEED'] +121-127105-0027-2605: hyp=['HE', 'HAD', 'FOR', 'HIS', 'OWN', 'TOWN', 'RESIDENCE', 'A', 'BIG', 'HOUSE', 'FILLED', 'WITH', 'THE', 'SPOILS', 'OF', 'TRAVEL', 'AND', 'THE', 'TROPHIES', 'OF', 'THE', 'CHASE', 'BUT', 'IT', 'WAS', 'TO', 'HIS', 'COUNTRY', 'HOME', 'AN', 'OLD', 'FAMILY', 'PLACE', 'IN', 'ESSEX', 'THAT', 'HE', 'WISHED', 'HER', 'IMMEDIATELY', 'TO', 'PROCEED'] +121-127105-0028-2606: ref=['THE', 'AWKWARD', 'THING', 'WAS', 'THAT', 'THEY', 'HAD', 'PRACTICALLY', 'NO', 'OTHER', 'RELATIONS', 'AND', 'THAT', 'HIS', 'OWN', 'AFFAIRS', 'TOOK', 'UP', 'ALL', 'HIS', 'TIME'] +121-127105-0028-2606: hyp=['THE', 'AWKWARD', 'THING', 'WAS', 'THAT', 'THEY', 'HAD', 'PRACTICALLY', 'NO', 'OTHER', 'RELATIONS', 'AND', 'THAT', 'HIS', 'OWN', 'AFFAIRS', 'TOOK', 'UP', 'ALL', 'HIS', 'TIME'] +121-127105-0029-2607: ref=['THERE', 'WERE', 'PLENTY', 'OF', 'PEOPLE', 'TO', 'HELP', 'BUT', 'OF', 'COURSE', 'THE', 'YOUNG', 'LADY', 'WHO', 'SHOULD', 'GO', 'DOWN', 'AS', 'GOVERNESS', 'WOULD', 'BE', 'IN', 'SUPREME', 'AUTHORITY'] +121-127105-0029-2607: hyp=['THERE', 'WERE', 'PLENTY', 'OF', 'PEOPLE', 'TO', 'HELP', 'BUT', 'OF', 'COURSE', 'THE', 'YOUNG', 'LADY', 'WHO', 'SHOULD', 'GO', 'DOWN', 'AS', 'GOVERNESS', 'WOULD', 'BE', 'IN', 'SUPREME', 'AUTHORITY'] +121-127105-0030-2608: ref=['I', "DON'T", 'ANTICIPATE'] +121-127105-0030-2608: hyp=['I', "DON'T", 'ANTICIPATE'] +121-127105-0031-2609: ref=['SHE', 'WAS', 'YOUNG', 'UNTRIED', 'NERVOUS', 'IT', 'WAS', 'A', 'VISION', 'OF', 'SERIOUS', 'DUTIES', 'AND', 'LITTLE', 'COMPANY', 'OF', 'REALLY', 'GREAT', 'LONELINESS'] +121-127105-0031-2609: hyp=['SHE', 'WAS', 'YOUNG', 'UNTRIED', 'NERVOUS', 'IT', 'WAS', 'A', 'VISION', 'OF', 'SERIOUS', 'DUTIES', 'IN', 'LITTLE', 'COMPANY', 'OF', 'REALLY', 'GREAT', 'LONELINESS'] +121-127105-0032-2610: ref=['YES', 'BUT', "THAT'S", 'JUST', 'THE', 'BEAUTY', 'OF', 'HER', 'PASSION'] +121-127105-0032-2610: hyp=['YES', 'BUT', "THAT'S", 'JUST', 'THE', 'BEAUTY', 'OF', 'HER', 'PASSION'] +121-127105-0033-2611: ref=['IT', 'WAS', 'THE', 'BEAUTY', 'OF', 'IT'] +121-127105-0033-2611: hyp=['IT', 'WAS', 'THE', 'BEAUTY', 'OF', 'IT'] +121-127105-0034-2612: ref=['IT', 'SOUNDED', 'DULL', 'IT', 'SOUNDED', 'STRANGE', 'AND', 'ALL', 'THE', 'MORE', 'SO', 'BECAUSE', 'OF', 'HIS', 'MAIN', 'CONDITION', 'WHICH', 'WAS'] +121-127105-0034-2612: hyp=['IT', 'SOUNDED', 'DULL', 'BUT', 'SOUNDED', 'STRANGE', 'AND', 'ALL', 'THE', 'MORE', 'SO', 'BECAUSE', 'OF', 'HIS', 'MAIN', 'CONDITION', 'WHICH', 'WAS'] +121-127105-0035-2613: ref=['SHE', 'PROMISED', 'TO', 'DO', 'THIS', 'AND', 'SHE', 'MENTIONED', 'TO', 'ME', 'THAT', 'WHEN', 'FOR', 'A', 'MOMENT', 'DISBURDENED', 'DELIGHTED', 'HE', 'HELD', 'HER', 'HAND', 'THANKING', 'HER', 'FOR', 'THE', 'SACRIFICE', 'SHE', 'ALREADY', 'FELT', 'REWARDED'] +121-127105-0035-2613: hyp=['SHE', 'PROMISED', 'TO', 'DO', 'THIS', 'AND', 'SHE', 'MENTIONED', 'TO', 'ME', 'THAT', 'WHEN', 'FOR', 'A', 'MOMENT', 'DISBURDENED', 'DELIGHTED', 'HE', 'HELD', 'HER', 'HAND', 'THANKING', 'HER', 'FOR', 'THE', 'SACRIFICE', 'SHE', 'ALREADY', 'FELT', 'REWARDED'] +121-127105-0036-2614: ref=['BUT', 'WAS', 'THAT', 'ALL', 'HER', 'REWARD', 'ONE', 'OF', 'THE', 'LADIES', 'ASKED'] +121-127105-0036-2614: hyp=['BUT', 'WAS', 'THAT', 'ALL', 'HER', 'REWARD', 'ONE', 'OF', 'THE', 'LADIES', 'ASKED'] +1221-135766-0000-1305: ref=['HOW', 'STRANGE', 'IT', 'SEEMED', 'TO', 'THE', 'SAD', 'WOMAN', 'AS', 'SHE', 'WATCHED', 'THE', 'GROWTH', 'AND', 'THE', 'BEAUTY', 'THAT', 'BECAME', 'EVERY', 'DAY', 'MORE', 'BRILLIANT', 'AND', 'THE', 'INTELLIGENCE', 'THAT', 'THREW', 'ITS', 'QUIVERING', 'SUNSHINE', 'OVER', 'THE', 'TINY', 'FEATURES', 'OF', 'THIS', 'CHILD'] +1221-135766-0000-1305: hyp=['HOW', 'STRANGE', 'IT', 'SEEMED', 'TO', 'THE', 'SAD', 'WOMAN', 'AS', 'SHE', 'WATCHED', 'THE', 'GROWTH', 'AND', 'THE', 'BEAUTY', 'THAT', 'BECAME', 'EVERY', 'DAY', 'MORE', 'BRILLIANT', 'AND', 'THE', 'INTELLIGENCE', 'THAT', 'THREW', 'ITS', 'QUIVERING', 'SUNSHINE', 'OVER', 'THE', 'TINY', 'FEATURES', 'OF', 'THIS', 'CHILD'] +1221-135766-0001-1306: ref=['GOD', 'AS', 'A', 'DIRECT', 'CONSEQUENCE', 'OF', 'THE', 'SIN', 'WHICH', 'MAN', 'THUS', 'PUNISHED', 'HAD', 'GIVEN', 'HER', 'A', 'LOVELY', 'CHILD', 'WHOSE', 'PLACE', 'WAS', 'ON', 'THAT', 'SAME', 'DISHONOURED', 'BOSOM', 'TO', 'CONNECT', 'HER', 'PARENT', 'FOR', 'EVER', 'WITH', 'THE', 'RACE', 'AND', 'DESCENT', 'OF', 'MORTALS', 'AND', 'TO', 'BE', 'FINALLY', 'A', 'BLESSED', 'SOUL', 'IN', 'HEAVEN'] +1221-135766-0001-1306: hyp=['GOD', 'AS', 'A', 'DIRECT', 'CONSEQUENCE', 'OF', 'THE', 'SIN', 'WHICH', 'MAN', 'THUS', 'PUNISHED', 'HAD', 'GIVEN', 'HER', 'A', 'LOVELY', 'CHILD', 'WHOSE', 'PLACE', 'WAS', 'ON', 'THAT', 'SAME', 'DISHONORED', 'BOSOM', 'TO', 'CONNECT', 'HER', 'PARENT', 'FOR', 'EVER', 'WITH', 'THE', 'RACE', 'AND', 'DESCENT', 'OF', 'MORTALS', 'AND', 'TO', 'BE', 'FINALLY', 'A', 'BLESSED', 'SOUL', 'IN', 'HEAVEN'] +1221-135766-0002-1307: ref=['YET', 'THESE', 'THOUGHTS', 'AFFECTED', 'HESTER', 'PRYNNE', 'LESS', 'WITH', 'HOPE', 'THAN', 'APPREHENSION'] +1221-135766-0002-1307: hyp=['YET', 'THESE', 'THOUGHTS', 'AFFECTED', 'HESTER', 'PRYNNE', 'LESS', 'WITH', 'HOPE', 'THAN', 'APPREHENSION'] +1221-135766-0003-1308: ref=['THE', 'CHILD', 'HAD', 'A', 'NATIVE', 'GRACE', 'WHICH', 'DOES', 'NOT', 'INVARIABLY', 'CO', 'EXIST', 'WITH', 'FAULTLESS', 'BEAUTY', 'ITS', 'ATTIRE', 'HOWEVER', 'SIMPLE', 'ALWAYS', 'IMPRESSED', 'THE', 'BEHOLDER', 'AS', 'IF', 'IT', 'WERE', 'THE', 'VERY', 'GARB', 'THAT', 'PRECISELY', 'BECAME', 'IT', 'BEST'] +1221-135766-0003-1308: hyp=['THE', 'CHILD', 'HAD', 'A', 'NATIVE', 'GRACE', 'WHICH', 'DOES', 'NOT', 'INVARIABLY', 'COEXIST', 'WITH', 'FAULTLESS', 'BEAUTY', 'ITS', 'ATTIRE', 'HOWEVER', 'SIMPLE', 'ALWAYS', 'IMPRESS', 'THE', 'BEHOLDER', 'AS', 'IF', 'IT', 'WERE', 'THE', 'VERY', 'GARB', 'THAT', 'PRECISELY', 'BECAME', 'IT', 'BEST'] +1221-135766-0004-1309: ref=['THIS', 'OUTWARD', 'MUTABILITY', 'INDICATED', 'AND', 'DID', 'NOT', 'MORE', 'THAN', 'FAIRLY', 'EXPRESS', 'THE', 'VARIOUS', 'PROPERTIES', 'OF', 'HER', 'INNER', 'LIFE'] +1221-135766-0004-1309: hyp=['THIS', 'OUTWARD', 'MUTABILITY', 'INDICATED', 'AND', 'DID', 'NOT', 'MORE', 'THAN', 'FAIRLY', 'EXPRESS', 'THE', 'VARIOUS', 'PROPERTIES', 'OF', 'HER', 'INNER', 'LIFE'] +1221-135766-0005-1310: ref=['HESTER', 'COULD', 'ONLY', 'ACCOUNT', 'FOR', 'THE', "CHILD'S", 'CHARACTER', 'AND', 'EVEN', 'THEN', 'MOST', 'VAGUELY', 'AND', 'IMPERFECTLY', 'BY', 'RECALLING', 'WHAT', 'SHE', 'HERSELF', 'HAD', 'BEEN', 'DURING', 'THAT', 'MOMENTOUS', 'PERIOD', 'WHILE', 'PEARL', 'WAS', 'IMBIBING', 'HER', 'SOUL', 'FROM', 'THE', 'SPIRITUAL', 'WORLD', 'AND', 'HER', 'BODILY', 'FRAME', 'FROM', 'ITS', 'MATERIAL', 'OF', 'EARTH'] +1221-135766-0005-1310: hyp=['HESTER', 'COULD', 'ONLY', 'ACCOUNT', 'FOR', 'THE', "CHILD'S", 'CHARACTER', 'AND', 'EVEN', 'THEN', 'MOST', 'VAGUELY', 'AND', 'IMPERFECTLY', 'BY', 'RECALLING', 'WHAT', 'SHE', 'HERSELF', 'HAD', 'BEEN', 'DURING', 'THAT', 'MOMENTOUS', 'PERIOD', 'WHILE', 'PEARL', 'WAS', 'IMBIBING', 'HER', 'SOUL', 'FROM', 'THE', 'SPIRITUAL', 'WORLD', 'AND', 'HER', 'BODILY', 'FRAME', 'FROM', 'ITS', 'MATERIAL', 'OF', 'EARTH'] +1221-135766-0006-1311: ref=['THEY', 'WERE', 'NOW', 'ILLUMINATED', 'BY', 'THE', 'MORNING', 'RADIANCE', 'OF', 'A', 'YOUNG', "CHILD'S", 'DISPOSITION', 'BUT', 'LATER', 'IN', 'THE', 'DAY', 'OF', 'EARTHLY', 'EXISTENCE', 'MIGHT', 'BE', 'PROLIFIC', 'OF', 'THE', 'STORM', 'AND', 'WHIRLWIND'] +1221-135766-0006-1311: hyp=['THEY', 'WERE', 'NOW', 'ILLUMINATED', 'BY', 'THE', 'MORNING', 'RADIANCE', 'OF', 'A', 'YOUNG', "CHILD'S", 'DISPOSITION', 'BUT', 'LATER', 'IN', 'THE', 'DAY', 'OF', 'EARTHLY', 'EXISTENCE', 'MIGHT', 'BE', 'PROLIFIC', 'OF', 'THE', 'STORM', 'AND', 'WHIRLWIND'] +1221-135766-0007-1312: ref=['HESTER', 'PRYNNE', 'NEVERTHELESS', 'THE', 'LOVING', 'MOTHER', 'OF', 'THIS', 'ONE', 'CHILD', 'RAN', 'LITTLE', 'RISK', 'OF', 'ERRING', 'ON', 'THE', 'SIDE', 'OF', 'UNDUE', 'SEVERITY'] +1221-135766-0007-1312: hyp=['HESTER', 'PRYNNE', 'NEVERTHELESS', 'THE', 'LOVING', 'MOTHER', 'OF', 'THIS', 'ONE', 'CHILD', 'RAN', 'LITTLE', 'RISK', 'OF', 'ERRING', 'ON', 'THE', 'SIDE', 'OF', 'UNDUE', 'SEVERITY'] +1221-135766-0008-1313: ref=['MINDFUL', 'HOWEVER', 'OF', 'HER', 'OWN', 'ERRORS', 'AND', 'MISFORTUNES', 'SHE', 'EARLY', 'SOUGHT', 'TO', 'IMPOSE', 'A', 'TENDER', 'BUT', 'STRICT', 'CONTROL', 'OVER', 'THE', 'INFANT', 'IMMORTALITY', 'THAT', 'WAS', 'COMMITTED', 'TO', 'HER', 'CHARGE'] +1221-135766-0008-1313: hyp=['MINDFUL', 'HOWEVER', 'OF', 'HER', 'OWN', 'ERRORS', 'AND', 'MISFORTUNES', 'SHE', 'EARLY', 'SOUGHT', 'TO', 'IMPOSE', 'A', 'TENDER', 'BUT', 'STRICT', 'CONTROL', 'OVER', 'THE', 'INFANT', 'IMMORTALITY', 'THAT', 'WAS', 'COMMITTED', 'TO', 'HER', 'CHARGE'] +1221-135766-0009-1314: ref=['AS', 'TO', 'ANY', 'OTHER', 'KIND', 'OF', 'DISCIPLINE', 'WHETHER', 'ADDRESSED', 'TO', 'HER', 'MIND', 'OR', 'HEART', 'LITTLE', 'PEARL', 'MIGHT', 'OR', 'MIGHT', 'NOT', 'BE', 'WITHIN', 'ITS', 'REACH', 'IN', 'ACCORDANCE', 'WITH', 'THE', 'CAPRICE', 'THAT', 'RULED', 'THE', 'MOMENT'] +1221-135766-0009-1314: hyp=['AS', 'TO', 'ANY', 'OTHER', 'KIND', 'OF', 'DISCIPLINE', 'WHETHER', 'ADDRESSED', 'TO', 'HER', 'MIND', 'OR', 'HEART', 'LITTLE', 'PEARL', 'MIGHT', 'OR', 'MIGHT', 'NOT', 'BE', 'WITHIN', 'ITS', 'REACH', 'IN', 'ACCORDANCE', 'WITH', 'THE', 'CAPRICE', 'THAT', 'RULED', 'THE', 'MOMENT'] +1221-135766-0010-1315: ref=['IT', 'WAS', 'A', 'LOOK', 'SO', 'INTELLIGENT', 'YET', 'INEXPLICABLE', 'PERVERSE', 'SOMETIMES', 'SO', 'MALICIOUS', 'BUT', 'GENERALLY', 'ACCOMPANIED', 'BY', 'A', 'WILD', 'FLOW', 'OF', 'SPIRITS', 'THAT', 'HESTER', 'COULD', 'NOT', 'HELP', 'QUESTIONING', 'AT', 'SUCH', 'MOMENTS', 'WHETHER', 'PEARL', 'WAS', 'A', 'HUMAN', 'CHILD'] +1221-135766-0010-1315: hyp=['IT', 'WAS', 'A', 'LOOK', 'SO', 'INTELLIGENT', 'YET', 'INEXPLICABLE', 'PERVERSE', 'SOMETIMES', 'SO', 'MALICIOUS', 'BUT', 'GENERALLY', 'ACCOMPANIED', 'BY', 'A', 'WILD', 'FLOW', 'OF', 'SPIRITS', 'THAT', 'HESTER', 'COULD', 'NOT', 'HELP', 'QUESTIONING', 'AT', 'SUCH', 'MOMENTS', 'WHETHER', 'PEARL', 'WAS', 'A', 'HUMAN', 'CHILD'] +1221-135766-0011-1316: ref=['BEHOLDING', 'IT', 'HESTER', 'WAS', 'CONSTRAINED', 'TO', 'RUSH', 'TOWARDS', 'THE', 'CHILD', 'TO', 'PURSUE', 'THE', 'LITTLE', 'ELF', 'IN', 'THE', 'FLIGHT', 'WHICH', 'SHE', 'INVARIABLY', 'BEGAN', 'TO', 'SNATCH', 'HER', 'TO', 'HER', 'BOSOM', 'WITH', 'A', 'CLOSE', 'PRESSURE', 'AND', 'EARNEST', 'KISSES', 'NOT', 'SO', 'MUCH', 'FROM', 'OVERFLOWING', 'LOVE', 'AS', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'PEARL', 'WAS', 'FLESH', 'AND', 'BLOOD', 'AND', 'NOT', 'UTTERLY', 'DELUSIVE'] +1221-135766-0011-1316: hyp=['BEHOLDING', 'IT', 'HESTER', 'WAS', 'CONSTRAINED', 'TO', 'RUSH', 'TOWARDS', 'THE', 'CHILD', 'TO', 'PURSUE', 'THE', 'LITTLE', 'ELF', 'IN', 'THE', 'FLIGHT', 'WHICH', 'SHE', 'INVARIABLY', 'BEGAN', 'TO', 'SNATCH', 'HER', 'TO', 'HER', 'BOSOM', 'WITH', 'A', 'CLOSE', 'PRESSURE', 'AND', 'EARNEST', 'KISSES', 'NOT', 'SO', 'MUCH', 'FROM', 'OVERFLOWING', 'LOVE', 'AS', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'PEARL', 'WAS', 'FLESH', 'AND', 'BLOOD', 'AND', 'NOT', 'UTTERLY', 'DELUSIVE'] +1221-135766-0012-1317: ref=['BROODING', 'OVER', 'ALL', 'THESE', 'MATTERS', 'THE', 'MOTHER', 'FELT', 'LIKE', 'ONE', 'WHO', 'HAS', 'EVOKED', 'A', 'SPIRIT', 'BUT', 'BY', 'SOME', 'IRREGULARITY', 'IN', 'THE', 'PROCESS', 'OF', 'CONJURATION', 'HAS', 'FAILED', 'TO', 'WIN', 'THE', 'MASTER', 'WORD', 'THAT', 'SHOULD', 'CONTROL', 'THIS', 'NEW', 'AND', 'INCOMPREHENSIBLE', 'INTELLIGENCE'] +1221-135766-0012-1317: hyp=['BROODING', 'OVER', 'ALL', 'THESE', 'MATTERS', 'THE', 'MOTHER', 'FELT', 'LIKE', 'ONE', 'WHO', 'HAS', 'EVOKED', 'A', 'SPIRIT', 'BUT', 'BY', 'SOME', 'IRREGULARITY', 'IN', 'THE', 'PROCESS', 'OF', 'CONJURATION', 'HAS', 'FAILED', 'TO', 'WIN', 'THE', 'MASTER', 'WORD', 'THAT', 'SHOULD', 'CONTROL', 'THIS', 'NEW', 'AND', 'INCOMPREHENSIBLE', 'INTELLIGENCE'] +1221-135766-0013-1318: ref=['PEARL', 'WAS', 'A', 'BORN', 'OUTCAST', 'OF', 'THE', 'INFANTILE', 'WORLD'] +1221-135766-0013-1318: hyp=['PEARL', 'WAS', 'A', 'BORN', 'OUTCAST', 'OF', 'THE', 'INFANTILE', 'WORLD'] +1221-135766-0014-1319: ref=['PEARL', 'SAW', 'AND', 'GAZED', 'INTENTLY', 'BUT', 'NEVER', 'SOUGHT', 'TO', 'MAKE', 'ACQUAINTANCE'] +1221-135766-0014-1319: hyp=['PEARL', 'SAUL', 'AND', 'GAZED', 'INTENTLY', 'BUT', 'NEVER', 'SOUGHT', 'TO', 'MAKE', 'ACQUAINTANCE'] +1221-135766-0015-1320: ref=['IF', 'SPOKEN', 'TO', 'SHE', 'WOULD', 'NOT', 'SPEAK', 'AGAIN'] +1221-135766-0015-1320: hyp=['IF', 'SPOKEN', 'TO', 'SHE', 'WOULD', 'NOT', 'SPEAK', 'AGAIN'] +1221-135767-0000-1280: ref=['HESTER', 'PRYNNE', 'WENT', 'ONE', 'DAY', 'TO', 'THE', 'MANSION', 'OF', 'GOVERNOR', 'BELLINGHAM', 'WITH', 'A', 'PAIR', 'OF', 'GLOVES', 'WHICH', 'SHE', 'HAD', 'FRINGED', 'AND', 'EMBROIDERED', 'TO', 'HIS', 'ORDER', 'AND', 'WHICH', 'WERE', 'TO', 'BE', 'WORN', 'ON', 'SOME', 'GREAT', 'OCCASION', 'OF', 'STATE', 'FOR', 'THOUGH', 'THE', 'CHANCES', 'OF', 'A', 'POPULAR', 'ELECTION', 'HAD', 'CAUSED', 'THIS', 'FORMER', 'RULER', 'TO', 'DESCEND', 'A', 'STEP', 'OR', 'TWO', 'FROM', 'THE', 'HIGHEST', 'RANK', 'HE', 'STILL', 'HELD', 'AN', 'HONOURABLE', 'AND', 'INFLUENTIAL', 'PLACE', 'AMONG', 'THE', 'COLONIAL', 'MAGISTRACY'] +1221-135767-0000-1280: hyp=['HESTER', 'PRYNNE', 'WENT', 'ONE', 'DAY', 'TO', 'THE', 'MANSION', 'OF', 'GOVERNOR', 'BELLINGHAM', 'WITH', 'A', 'PAIR', 'OF', 'GLOVES', 'WHICH', 'SHE', 'HAD', 'FRINGED', 'AND', 'EMBROIDERED', 'TO', 'HIS', 'ORDER', 'AND', 'WHICH', 'WERE', 'TO', 'BE', 'WORN', 'ON', 'SOME', 'GREAT', 'OCCASION', 'OF', 'STATE', 'FOR', 'THOUGH', 'THE', 'CHANCES', 'OF', 'A', 'POPULAR', 'ELECTION', 'HAD', 'CAUSED', 'THIS', 'FORMER', 'RULER', 'TO', 'DESCEND', 'A', 'STEP', 'OR', 'TWO', 'FROM', 'THE', 'HIGHEST', 'RANK', 'HE', 'STILL', 'HELD', 'AN', 'HONOURABLE', 'AND', 'INFLUENTIAL', 'PLACE', 'AMONG', 'THE', 'COLONIAL', 'MAGISTRACY'] +1221-135767-0001-1281: ref=['ANOTHER', 'AND', 'FAR', 'MORE', 'IMPORTANT', 'REASON', 'THAN', 'THE', 'DELIVERY', 'OF', 'A', 'PAIR', 'OF', 'EMBROIDERED', 'GLOVES', 'IMPELLED', 'HESTER', 'AT', 'THIS', 'TIME', 'TO', 'SEEK', 'AN', 'INTERVIEW', 'WITH', 'A', 'PERSONAGE', 'OF', 'SO', 'MUCH', 'POWER', 'AND', 'ACTIVITY', 'IN', 'THE', 'AFFAIRS', 'OF', 'THE', 'SETTLEMENT'] +1221-135767-0001-1281: hyp=['ANOTHER', 'AND', 'FAR', 'MORE', 'IMPORTANT', 'REASON', 'THAN', 'THE', 'DELIVERY', 'OF', 'A', 'PAIR', 'OF', 'EMBROIDERED', 'GLOVES', 'IMPELLED', 'HESTER', 'AT', 'THIS', 'TIME', 'TO', 'SEEK', 'AN', 'INTERVIEW', 'WITH', 'A', 'PERSONAGE', 'OF', 'SO', 'MUCH', 'POWER', 'AND', 'ACTIVITY', 'IN', 'THE', 'AFFAIRS', 'OF', 'THE', 'SETTLEMENT'] +1221-135767-0002-1282: ref=['AT', 'THAT', 'EPOCH', 'OF', 'PRISTINE', 'SIMPLICITY', 'HOWEVER', 'MATTERS', 'OF', 'EVEN', 'SLIGHTER', 'PUBLIC', 'INTEREST', 'AND', 'OF', 'FAR', 'LESS', 'INTRINSIC', 'WEIGHT', 'THAN', 'THE', 'WELFARE', 'OF', 'HESTER', 'AND', 'HER', 'CHILD', 'WERE', 'STRANGELY', 'MIXED', 'UP', 'WITH', 'THE', 'DELIBERATIONS', 'OF', 'LEGISLATORS', 'AND', 'ACTS', 'OF', 'STATE'] +1221-135767-0002-1282: hyp=['AT', 'THAT', 'EPOCH', 'OF', 'PRISTINE', 'SIMPLICITY', 'HOWEVER', 'MATTERS', 'OF', 'EVEN', 'SLIGHTER', 'PUBLIC', 'INTEREST', 'AND', 'OF', 'FAR', 'LESS', 'INTRINSIC', 'WEIGHT', 'THAN', 'THE', 'WELFARE', 'OF', 'HESTER', 'AND', 'HER', 'CHILD', 'WERE', 'STRANGELY', 'MIXED', 'UP', 'WITH', 'THE', 'DELIBERATIONS', 'OF', 'LEGISLATORS', 'AND', 'ACTS', 'OF', 'STATE'] +1221-135767-0003-1283: ref=['THE', 'PERIOD', 'WAS', 'HARDLY', 'IF', 'AT', 'ALL', 'EARLIER', 'THAN', 'THAT', 'OF', 'OUR', 'STORY', 'WHEN', 'A', 'DISPUTE', 'CONCERNING', 'THE', 'RIGHT', 'OF', 'PROPERTY', 'IN', 'A', 'PIG', 'NOT', 'ONLY', 'CAUSED', 'A', 'FIERCE', 'AND', 'BITTER', 'CONTEST', 'IN', 'THE', 'LEGISLATIVE', 'BODY', 'OF', 'THE', 'COLONY', 'BUT', 'RESULTED', 'IN', 'AN', 'IMPORTANT', 'MODIFICATION', 'OF', 'THE', 'FRAMEWORK', 'ITSELF', 'OF', 'THE', 'LEGISLATURE'] +1221-135767-0003-1283: hyp=['THE', 'PERIOD', 'WAS', 'HARDLY', 'IF', 'AT', 'ALL', 'EARLIER', 'THAN', 'THAT', 'OF', 'OUR', 'STORY', 'WHEN', 'A', 'DISPUTE', 'CONCERNING', 'THE', 'RIGHT', 'OF', 'PROPERTY', 'IN', 'A', 'PIG', 'NOT', 'ONLY', 'CAUSED', 'A', 'FIERCE', 'AND', 'BITTER', 'CONTEST', 'IN', 'THE', 'LEGISLATIVE', 'BODY', 'OF', 'THE', 'COLONY', 'BUT', 'RESULTED', 'IN', 'AN', 'IMPORTANT', 'MODIFICATION', 'OF', 'THE', 'FRAMEWORK', 'ITSELF', 'OF', 'THE', 'LEGISLATURE'] +1221-135767-0004-1284: ref=['WE', 'HAVE', 'SPOKEN', 'OF', "PEARL'S", 'RICH', 'AND', 'LUXURIANT', 'BEAUTY', 'A', 'BEAUTY', 'THAT', 'SHONE', 'WITH', 'DEEP', 'AND', 'VIVID', 'TINTS', 'A', 'BRIGHT', 'COMPLEXION', 'EYES', 'POSSESSING', 'INTENSITY', 'BOTH', 'OF', 'DEPTH', 'AND', 'GLOW', 'AND', 'HAIR', 'ALREADY', 'OF', 'A', 'DEEP', 'GLOSSY', 'BROWN', 'AND', 'WHICH', 'IN', 'AFTER', 'YEARS', 'WOULD', 'BE', 'NEARLY', 'AKIN', 'TO', 'BLACK'] +1221-135767-0004-1284: hyp=['WE', 'HAVE', 'SPOKEN', 'OF', 'PEARLS', 'RICH', 'AND', 'LUXURIANT', 'BEAUTY', 'A', 'BEAUTY', 'THAT', 'SHONE', 'WITH', 'DEEP', 'AND', 'VIVID', 'TINTS', 'A', 'BRIGHT', 'COMPLEXION', 'EYES', 'POSSESSING', 'INTENSITY', 'BOTH', 'OF', 'DEPTH', 'AND', 'GLOW', 'AND', 'HAIR', 'ALREADY', 'OF', 'A', 'DEEP', 'GLOSSY', 'BROWN', 'AND', 'WHICH', 'IN', 'AFTER', 'YEARS', 'WOULD', 'BE', 'NEARLY', 'AKIN', 'TO', 'BLACK'] +1221-135767-0005-1285: ref=['IT', 'WAS', 'THE', 'SCARLET', 'LETTER', 'IN', 'ANOTHER', 'FORM', 'THE', 'SCARLET', 'LETTER', 'ENDOWED', 'WITH', 'LIFE'] +1221-135767-0005-1285: hyp=['IT', 'WAS', 'THE', 'SCARLET', 'LETTER', 'IN', 'ANOTHER', 'FORM', 'THE', 'SCARLET', 'LETTER', 'ENDOWED', 'WITH', 'LIFE'] +1221-135767-0006-1286: ref=['THE', 'MOTHER', 'HERSELF', 'AS', 'IF', 'THE', 'RED', 'IGNOMINY', 'WERE', 'SO', 'DEEPLY', 'SCORCHED', 'INTO', 'HER', 'BRAIN', 'THAT', 'ALL', 'HER', 'CONCEPTIONS', 'ASSUMED', 'ITS', 'FORM', 'HAD', 'CAREFULLY', 'WROUGHT', 'OUT', 'THE', 'SIMILITUDE', 'LAVISHING', 'MANY', 'HOURS', 'OF', 'MORBID', 'INGENUITY', 'TO', 'CREATE', 'AN', 'ANALOGY', 'BETWEEN', 'THE', 'OBJECT', 'OF', 'HER', 'AFFECTION', 'AND', 'THE', 'EMBLEM', 'OF', 'HER', 'GUILT', 'AND', 'TORTURE'] +1221-135767-0006-1286: hyp=['THE', 'MOTHER', 'HERSELF', 'AS', 'IF', 'THE', 'RED', 'IGNOMINY', 'WERE', 'SO', 'DEEPLY', 'SCORCHED', 'INTO', 'HER', 'BRAIN', 'THAT', 'ALL', 'HER', 'CONCEPTIONS', 'ASSUMED', 'ITS', 'FORM', 'HAD', 'CAREFULLY', 'WROUGHT', 'OUT', 'THE', 'SIMILITUDE', 'LAVISHING', 'MANY', 'HOURS', 'OF', 'MORBID', 'INGENUITY', 'TO', 'CREATE', 'AN', 'ANALOGY', 'BETWEEN', 'THE', 'OBJECT', 'OF', 'HER', 'AFFECTION', 'AND', 'THE', 'EMBLEM', 'OF', 'HER', 'GUILT', 'AND', 'TORTURE'] +1221-135767-0007-1287: ref=['BUT', 'IN', 'TRUTH', 'PEARL', 'WAS', 'THE', 'ONE', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'AND', 'ONLY', 'IN', 'CONSEQUENCE', 'OF', 'THAT', 'IDENTITY', 'HAD', 'HESTER', 'CONTRIVED', 'SO', 'PERFECTLY', 'TO', 'REPRESENT', 'THE', 'SCARLET', 'LETTER', 'IN', 'HER', 'APPEARANCE'] +1221-135767-0007-1287: hyp=['BUT', 'IN', 'TRUTH', 'PEARL', 'WAS', 'THE', 'ONE', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'AND', 'ONLY', 'IN', 'CONSEQUENCE', 'OF', 'THAT', 'IDENTITY', 'HAD', 'HESTER', 'CONTRIVED', 'SO', 'PERFECTLY', 'TO', 'REPRESENT', 'THE', 'SCARLET', 'LETTER', 'IN', 'HER', 'APPEARANCE'] +1221-135767-0008-1288: ref=['COME', 'THEREFORE', 'AND', 'LET', 'US', 'FLING', 'MUD', 'AT', 'THEM'] +1221-135767-0008-1288: hyp=['COME', 'THEREFORE', 'AND', 'LET', 'US', 'FLING', 'MUD', 'AT', 'THEM'] +1221-135767-0009-1289: ref=['BUT', 'PEARL', 'WHO', 'WAS', 'A', 'DAUNTLESS', 'CHILD', 'AFTER', 'FROWNING', 'STAMPING', 'HER', 'FOOT', 'AND', 'SHAKING', 'HER', 'LITTLE', 'HAND', 'WITH', 'A', 'VARIETY', 'OF', 'THREATENING', 'GESTURES', 'SUDDENLY', 'MADE', 'A', 'RUSH', 'AT', 'THE', 'KNOT', 'OF', 'HER', 'ENEMIES', 'AND', 'PUT', 'THEM', 'ALL', 'TO', 'FLIGHT'] +1221-135767-0009-1289: hyp=['BUT', 'PEARL', 'WHO', 'WAS', 'A', 'DAUNTLESS', 'CHILD', 'AFTER', 'FROWNING', 'STAMPING', 'HER', 'FOOT', 'AND', 'SHAKING', 'HER', 'LITTLE', 'HAND', 'WITH', 'A', 'VARIETY', 'OF', 'THREATENING', 'GESTURES', 'SUDDENLY', 'MADE', 'A', 'RUSH', 'AT', 'THE', 'KNOT', 'OF', 'HER', 'ENEMIES', 'AND', 'PUT', 'THEM', 'ALL', 'TO', 'FLIGHT'] +1221-135767-0010-1290: ref=['SHE', 'SCREAMED', 'AND', 'SHOUTED', 'TOO', 'WITH', 'A', 'TERRIFIC', 'VOLUME', 'OF', 'SOUND', 'WHICH', 'DOUBTLESS', 'CAUSED', 'THE', 'HEARTS', 'OF', 'THE', 'FUGITIVES', 'TO', 'QUAKE', 'WITHIN', 'THEM'] +1221-135767-0010-1290: hyp=['SHE', 'SCREAMED', 'AND', 'SHOUTED', 'TOO', 'WITH', 'A', 'TERRIFIC', 'VOLUME', 'OF', 'SOUND', 'WHICH', 'DOUBTLESS', 'CAUSED', 'THE', 'HEARTS', 'OF', 'THE', 'FUGITIVES', 'TO', 'QUAKE', 'WITHIN', 'THEM'] +1221-135767-0011-1291: ref=['IT', 'WAS', 'FURTHER', 'DECORATED', 'WITH', 'STRANGE', 'AND', 'SEEMINGLY', 'CABALISTIC', 'FIGURES', 'AND', 'DIAGRAMS', 'SUITABLE', 'TO', 'THE', 'QUAINT', 'TASTE', 'OF', 'THE', 'AGE', 'WHICH', 'HAD', 'BEEN', 'DRAWN', 'IN', 'THE', 'STUCCO', 'WHEN', 'NEWLY', 'LAID', 'ON', 'AND', 'HAD', 'NOW', 'GROWN', 'HARD', 'AND', 'DURABLE', 'FOR', 'THE', 'ADMIRATION', 'OF', 'AFTER', 'TIMES'] +1221-135767-0011-1291: hyp=['IT', 'WAS', 'FURTHER', 'DECORATED', 'WITH', 'STRANGE', 'AND', 'SEEMINGLY', 'CABALISTIC', 'FIGURES', 'AND', 'DIAGRAMS', 'SUITABLE', 'TO', 'THE', 'QUAINT', 'TASTE', 'OF', 'THE', 'AGE', 'WHICH', 'HAD', 'BEEN', 'DRAWN', 'IN', 'THE', 'STUCCO', 'WHEN', 'NEWLY', 'LAID', 'ON', 'AND', 'HAD', 'NOW', 'GROWN', 'HARD', 'AND', 'DURABLE', 'FOR', 'THE', 'ADMIRATION', 'OF', 'AFTER', 'TIMES'] +1221-135767-0012-1292: ref=['THEY', 'APPROACHED', 'THE', 'DOOR', 'WHICH', 'WAS', 'OF', 'AN', 'ARCHED', 'FORM', 'AND', 'FLANKED', 'ON', 'EACH', 'SIDE', 'BY', 'A', 'NARROW', 'TOWER', 'OR', 'PROJECTION', 'OF', 'THE', 'EDIFICE', 'IN', 'BOTH', 'OF', 'WHICH', 'WERE', 'LATTICE', 'WINDOWS', 'THE', 'WOODEN', 'SHUTTERS', 'TO', 'CLOSE', 'OVER', 'THEM', 'AT', 'NEED'] +1221-135767-0012-1292: hyp=['THEY', 'APPROACHED', 'THE', 'DOOR', 'WHICH', 'WAS', 'OF', 'AN', 'ARCHED', 'FORM', 'AND', 'FLANKED', 'ON', 'EACH', 'SIDE', 'BY', 'A', 'NARROW', 'TOWER', 'OR', 'PROJECTION', 'OF', 'THE', 'EDIFICE', 'IN', 'BOTH', 'OF', 'WHICH', 'WERE', 'LATTICE', 'WINDOWS', 'THE', 'WOODEN', 'SHUTTERS', 'TO', 'CLOSE', 'OVER', 'THEM', 'AT', 'NEED'] +1221-135767-0013-1293: ref=['LIFTING', 'THE', 'IRON', 'HAMMER', 'THAT', 'HUNG', 'AT', 'THE', 'PORTAL', 'HESTER', 'PRYNNE', 'GAVE', 'A', 'SUMMONS', 'WHICH', 'WAS', 'ANSWERED', 'BY', 'ONE', 'OF', 'THE', "GOVERNOR'S", 'BOND', 'SERVANT', 'A', 'FREE', 'BORN', 'ENGLISHMAN', 'BUT', 'NOW', 'A', 'SEVEN', 'YEARS', 'SLAVE'] +1221-135767-0013-1293: hyp=['LIFTING', 'THE', 'IRON', 'HAMMER', 'THAT', 'HUNG', 'AT', 'THE', 'PORTAL', 'HESTER', 'PRYNNE', 'GAVE', 'A', 'SUMMONS', 'WHICH', 'WAS', 'ANSWERED', 'BY', 'ONE', 'OF', 'THE', "GOVERNOR'S", 'BOND', 'SERVANTS', 'A', 'FREE', 'BORN', 'ENGLISHMAN', 'BUT', 'NOW', 'A', 'SEVEN', 'YEARS', 'SLAVE'] +1221-135767-0014-1294: ref=['YEA', 'HIS', 'HONOURABLE', 'WORSHIP', 'IS', 'WITHIN', 'BUT', 'HE', 'HATH', 'A', 'GODLY', 'MINISTER', 'OR', 'TWO', 'WITH', 'HIM', 'AND', 'LIKEWISE', 'A', 'LEECH'] +1221-135767-0014-1294: hyp=['YEA', 'HIS', 'HONOURABLE', 'WORSHIP', 'IS', 'WITHIN', 'BUT', 'HE', 'HATH', 'A', 'GODLY', 'MINISTER', 'OR', 'TWO', 'WITH', 'HIM', 'AND', 'LIKEWISE', 'A', 'LEECH'] +1221-135767-0015-1295: ref=['YE', 'MAY', 'NOT', 'SEE', 'HIS', 'WORSHIP', 'NOW'] +1221-135767-0015-1295: hyp=['YEA', 'MAY', 'NOT', 'SEE', 'HIS', 'WORSHIP', 'NOW'] +1221-135767-0016-1296: ref=['WITH', 'MANY', 'VARIATIONS', 'SUGGESTED', 'BY', 'THE', 'NATURE', 'OF', 'HIS', 'BUILDING', 'MATERIALS', 'DIVERSITY', 'OF', 'CLIMATE', 'AND', 'A', 'DIFFERENT', 'MODE', 'OF', 'SOCIAL', 'LIFE', 'GOVERNOR', 'BELLINGHAM', 'HAD', 'PLANNED', 'HIS', 'NEW', 'HABITATION', 'AFTER', 'THE', 'RESIDENCES', 'OF', 'GENTLEMEN', 'OF', 'FAIR', 'ESTATE', 'IN', 'HIS', 'NATIVE', 'LAND'] +1221-135767-0016-1296: hyp=['WITH', 'MANY', 'VARIATIONS', 'SUGGESTED', 'BY', 'THE', 'NATURE', 'OF', 'HIS', 'BUILDING', 'MATERIALS', 'DIVERSITY', 'OF', 'CLIMATE', 'AND', 'A', 'DIFFERENT', 'MODE', 'OF', 'SOCIAL', 'LIFE', 'GOVERNOR', 'BELLINGHAM', 'HAD', 'PLANNED', 'HIS', 'NEW', 'HABITATION', 'AFTER', 'THE', 'RESIDENCES', 'OF', 'GENTLEMEN', 'OF', 'FAIREST', 'STATE', 'IN', 'HIS', 'NATIVE', 'LAND'] +1221-135767-0017-1297: ref=['ON', 'THE', 'TABLE', 'IN', 'TOKEN', 'THAT', 'THE', 'SENTIMENT', 'OF', 'OLD', 'ENGLISH', 'HOSPITALITY', 'HAD', 'NOT', 'BEEN', 'LEFT', 'BEHIND', 'STOOD', 'A', 'LARGE', 'PEWTER', 'TANKARD', 'AT', 'THE', 'BOTTOM', 'OF', 'WHICH', 'HAD', 'HESTER', 'OR', 'PEARL', 'PEEPED', 'INTO', 'IT', 'THEY', 'MIGHT', 'HAVE', 'SEEN', 'THE', 'FROTHY', 'REMNANT', 'OF', 'A', 'RECENT', 'DRAUGHT', 'OF', 'ALE'] +1221-135767-0017-1297: hyp=['ON', 'THE', 'TABLE', 'IN', 'TOKEN', 'THAT', 'THE', 'SENTIMENT', 'OF', 'OLD', 'ENGLISH', 'HOSPITALITY', 'HAD', 'NOT', 'BEEN', 'LEFT', 'BEHIND', 'STOOD', 'A', 'LARGE', 'PEWTER', 'TANKARD', 'AT', 'THE', 'BOTTOM', 'OF', 'WHICH', 'HAD', 'HESTER', 'OR', 'PEARL', 'PEEPED', 'INTO', 'IT', 'THEY', 'MIGHT', 'HAVE', 'SEEN', 'THE', 'FROTHY', 'REMNANT', 'OF', 'A', 'RECENT', 'DRAUGHT', 'OF', 'ALE'] +1221-135767-0018-1298: ref=['LITTLE', 'PEARL', 'WHO', 'WAS', 'AS', 'GREATLY', 'PLEASED', 'WITH', 'THE', 'GLEAMING', 'ARMOUR', 'AS', 'SHE', 'HAD', 'BEEN', 'WITH', 'THE', 'GLITTERING', 'FRONTISPIECE', 'OF', 'THE', 'HOUSE', 'SPENT', 'SOME', 'TIME', 'LOOKING', 'INTO', 'THE', 'POLISHED', 'MIRROR', 'OF', 'THE', 'BREASTPLATE'] +1221-135767-0018-1298: hyp=['LITTLE', 'PEARL', 'WHO', 'WAS', 'AS', 'GREATLY', 'PLEASED', 'WITH', 'THE', 'GLEAMING', 'ARMOR', 'AS', 'SHE', 'HAD', 'BEEN', 'WITH', 'THE', 'GLITTERING', 'FRONTESPIECE', 'OF', 'THE', 'HOUSE', 'SPENT', 'SOME', 'TIME', 'LOOKING', 'INTO', 'THE', 'POLISHED', 'MIRROR', 'OF', 'THE', 'BREASTPLATE'] +1221-135767-0019-1299: ref=['MOTHER', 'CRIED', 'SHE', 'I', 'SEE', 'YOU', 'HERE', 'LOOK', 'LOOK'] +1221-135767-0019-1299: hyp=['MOTHER', 'CRIED', 'SHE', 'I', 'SEE', 'YOU', 'HERE', 'LOOK', 'LOOK'] +1221-135767-0020-1300: ref=['IN', 'TRUTH', 'SHE', 'SEEMED', 'ABSOLUTELY', 'HIDDEN', 'BEHIND', 'IT'] +1221-135767-0020-1300: hyp=['IN', 'TRUTH', 'SHE', 'SEEMED', 'ABSOLUTELY', 'HIDDEN', 'BEHIND', 'IT'] +1221-135767-0021-1301: ref=['PEARL', 'ACCORDINGLY', 'RAN', 'TO', 'THE', 'BOW', 'WINDOW', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'HALL', 'AND', 'LOOKED', 'ALONG', 'THE', 'VISTA', 'OF', 'A', 'GARDEN', 'WALK', 'CARPETED', 'WITH', 'CLOSELY', 'SHAVEN', 'GRASS', 'AND', 'BORDERED', 'WITH', 'SOME', 'RUDE', 'AND', 'IMMATURE', 'ATTEMPT', 'AT', 'SHRUBBERY'] +1221-135767-0021-1301: hyp=['PEARL', 'ACCORDINGLY', 'RAN', 'TO', 'THE', 'BOW', 'WINDOW', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'HALL', 'AND', 'LOOKED', 'ALONG', 'THE', 'VISTA', 'OF', 'A', 'GARDEN', 'WALK', 'CARPETED', 'WITH', 'CLOSELY', 'SHAVEN', 'GRASS', 'AND', 'BORDERED', 'WITH', 'SOME', 'RUDE', 'AND', 'IMMATOR', 'ATTEMPT', 'AT', 'SHRUBBERY'] +1221-135767-0022-1302: ref=['BUT', 'THE', 'PROPRIETOR', 'APPEARED', 'ALREADY', 'TO', 'HAVE', 'RELINQUISHED', 'AS', 'HOPELESS', 'THE', 'EFFORT', 'TO', 'PERPETUATE', 'ON', 'THIS', 'SIDE', 'OF', 'THE', 'ATLANTIC', 'IN', 'A', 'HARD', 'SOIL', 'AND', 'AMID', 'THE', 'CLOSE', 'STRUGGLE', 'FOR', 'SUBSISTENCE', 'THE', 'NATIVE', 'ENGLISH', 'TASTE', 'FOR', 'ORNAMENTAL', 'GARDENING'] +1221-135767-0022-1302: hyp=['BUT', 'THE', 'PROPRIETOR', 'APPEARED', 'ALREADY', 'TO', 'HAVE', 'RELINQUISHED', 'AS', 'HOPELESS', 'THE', 'EFFORT', 'TO', 'PERPETUATE', 'ON', 'THIS', 'SIDE', 'OF', 'THE', 'ATLANTIC', 'IN', 'A', 'HARD', 'SOIL', 'AND', 'AMID', 'THE', 'CLOSE', 'STRUGGLE', 'FOR', 'SUBSISTENCE', 'THE', 'NATIVE', 'ENGLISH', 'TASTE', 'FOR', 'ORNAMENTAL', 'GARDENING'] +1221-135767-0023-1303: ref=['THERE', 'WERE', 'A', 'FEW', 'ROSE', 'BUSHES', 'HOWEVER', 'AND', 'A', 'NUMBER', 'OF', 'APPLE', 'TREES', 'PROBABLY', 'THE', 'DESCENDANTS', 'OF', 'THOSE', 'PLANTED', 'BY', 'THE', 'REVEREND', 'MISTER', 'BLACKSTONE', 'THE', 'FIRST', 'SETTLER', 'OF', 'THE', 'PENINSULA', 'THAT', 'HALF', 'MYTHOLOGICAL', 'PERSONAGE', 'WHO', 'RIDES', 'THROUGH', 'OUR', 'EARLY', 'ANNALS', 'SEATED', 'ON', 'THE', 'BACK', 'OF', 'A', 'BULL'] +1221-135767-0023-1303: hyp=['THERE', 'WERE', 'A', 'FEW', 'ROSE', 'BUSHES', 'HOWEVER', 'AND', 'A', 'NUMBER', 'OF', 'APPLE', 'TREES', 'PROBABLY', 'THE', 'DESCENDANTS', 'OF', 'THOSE', 'PLANTED', 'BY', 'THE', 'REVEREND', 'MISTER', 'BLACKSTONE', 'THE', 'FIRST', 'SETTLER', 'OF', 'THE', 'PENINSULA', 'THAT', 'HALF', 'MYTHOLOGICAL', 'PERSONAGE', 'WHO', 'RIDES', 'THROUGH', 'OUR', 'EARLY', 'ANNALS', 'SEATED', 'ON', 'THE', 'BACK', 'OF', 'A', 'BULL'] +1221-135767-0024-1304: ref=['PEARL', 'SEEING', 'THE', 'ROSE', 'BUSHES', 'BEGAN', 'TO', 'CRY', 'FOR', 'A', 'RED', 'ROSE', 'AND', 'WOULD', 'NOT', 'BE', 'PACIFIED'] +1221-135767-0024-1304: hyp=['PEARL', 'SEEING', 'THE', 'ROSE', 'BUSHES', 'BEGAN', 'TO', 'CRY', 'FOR', 'A', 'RED', 'ROSE', 'AND', 'WOULD', 'NOT', 'BE', 'PACIFIED'] +1284-1180-0000-829: ref=['HE', 'WORE', 'BLUE', 'SILK', 'STOCKINGS', 'BLUE', 'KNEE', 'PANTS', 'WITH', 'GOLD', 'BUCKLES', 'A', 'BLUE', 'RUFFLED', 'WAIST', 'AND', 'A', 'JACKET', 'OF', 'BRIGHT', 'BLUE', 'BRAIDED', 'WITH', 'GOLD'] +1284-1180-0000-829: hyp=['HE', 'WORE', 'BLUE', 'SILK', 'STOCKINGS', 'BLUE', 'KNEEP', 'HANDS', 'WITH', 'GOLD', 'BUCKLES', 'A', 'BLUE', 'RUFFLED', 'WAIST', 'AND', 'A', 'JACKET', 'OF', 'BRIGHT', 'BLUE', 'BRAIDED', 'WITH', 'GOLD'] +1284-1180-0001-830: ref=['HIS', 'HAT', 'HAD', 'A', 'PEAKED', 'CROWN', 'AND', 'A', 'FLAT', 'BRIM', 'AND', 'AROUND', 'THE', 'BRIM', 'WAS', 'A', 'ROW', 'OF', 'TINY', 'GOLDEN', 'BELLS', 'THAT', 'TINKLED', 'WHEN', 'HE', 'MOVED'] +1284-1180-0001-830: hyp=['HIS', 'HAT', 'HAD', 'A', 'PEAKED', 'CROWN', 'IN', 'A', 'FLAT', 'BRIM', 'AND', 'AROUND', 'THE', 'BRIM', 'WAS', 'A', 'ROW', 'OF', 'TINY', 'GOLDEN', 'BELLS', 'THAT', 'TINKLED', 'WHEN', 'HE', 'MOVED'] +1284-1180-0002-831: ref=['INSTEAD', 'OF', 'SHOES', 'THE', 'OLD', 'MAN', 'WORE', 'BOOTS', 'WITH', 'TURNOVER', 'TOPS', 'AND', 'HIS', 'BLUE', 'COAT', 'HAD', 'WIDE', 'CUFFS', 'OF', 'GOLD', 'BRAID'] +1284-1180-0002-831: hyp=['INSTEAD', 'OF', 'SHOES', 'THE', 'OLD', 'MEN', 'WORE', 'BOOTS', 'WITH', 'TURN', 'OVER', 'TOPS', 'AND', 'HIS', 'BLUE', 'COAT', 'HAD', 'WIDE', 'CUFFS', 'OF', 'GOLD', 'BRAID'] +1284-1180-0003-832: ref=['FOR', 'A', 'LONG', 'TIME', 'HE', 'HAD', 'WISHED', 'TO', 'EXPLORE', 'THE', 'BEAUTIFUL', 'LAND', 'OF', 'OZ', 'IN', 'WHICH', 'THEY', 'LIVED'] +1284-1180-0003-832: hyp=['FOR', 'A', 'LONG', 'TIME', 'HE', 'HAD', 'WISHED', 'TO', 'EXPLORE', 'THE', 'BEAUTIFUL', 'LAND', 'OF', 'OZ', 'IN', 'WHICH', 'THEY', 'LIVED'] +1284-1180-0004-833: ref=['WHEN', 'THEY', 'WERE', 'OUTSIDE', 'UNC', 'SIMPLY', 'LATCHED', 'THE', 'DOOR', 'AND', 'STARTED', 'UP', 'THE', 'PATH'] +1284-1180-0004-833: hyp=['WHEN', 'THEY', 'WERE', 'OUTSIDE', 'UN', 'SIMPLY', 'LATCHED', 'THE', 'DOOR', 'AND', 'STARTED', 'UP', 'THE', 'PATH'] +1284-1180-0005-834: ref=['NO', 'ONE', 'WOULD', 'DISTURB', 'THEIR', 'LITTLE', 'HOUSE', 'EVEN', 'IF', 'ANYONE', 'CAME', 'SO', 'FAR', 'INTO', 'THE', 'THICK', 'FOREST', 'WHILE', 'THEY', 'WERE', 'GONE'] +1284-1180-0005-834: hyp=['NO', 'ONE', 'WOULD', 'DISTURB', 'THEIR', 'LITTLE', 'HOUSE', 'EVEN', 'IF', 'ANY', 'ONE', 'CAME', 'SO', 'FAR', 'INTO', 'THE', 'THICK', 'FOREST', 'WHILE', 'THEY', 'WERE', 'GONE'] +1284-1180-0006-835: ref=['AT', 'THE', 'FOOT', 'OF', 'THE', 'MOUNTAIN', 'THAT', 'SEPARATED', 'THE', 'COUNTRY', 'OF', 'THE', 'MUNCHKINS', 'FROM', 'THE', 'COUNTRY', 'OF', 'THE', 'GILLIKINS', 'THE', 'PATH', 'DIVIDED'] +1284-1180-0006-835: hyp=['AT', 'THE', 'FOOT', 'OF', 'THE', 'MOUNTAIN', 'THAT', 'SEPARATED', 'THE', 'COUNTRY', 'OF', 'THE', 'MUNCHKINS', 'FROM', 'THE', 'COUNTRY', 'OF', 'THE', 'GYLICANS', 'THE', 'PATH', 'DIVIDED'] +1284-1180-0007-836: ref=['HE', 'KNEW', 'IT', 'WOULD', 'TAKE', 'THEM', 'TO', 'THE', 'HOUSE', 'OF', 'THE', 'CROOKED', 'MAGICIAN', 'WHOM', 'HE', 'HAD', 'NEVER', 'SEEN', 'BUT', 'WHO', 'WAS', 'THEIR', 'NEAREST', 'NEIGHBOR'] +1284-1180-0007-836: hyp=['HE', 'KNEW', 'IT', 'WOULD', 'TAKE', 'THEM', 'TO', 'THE', 'HOUSE', 'OF', 'THE', 'CROOKED', 'MAGICIAN', 'WHOM', 'HE', 'HAD', 'NEVER', 'SEEN', 'BUT', 'WHO', 'WAS', 'THERE', 'NEAREST', 'NEIGHBOUR'] +1284-1180-0008-837: ref=['ALL', 'THE', 'MORNING', 'THEY', 'TRUDGED', 'UP', 'THE', 'MOUNTAIN', 'PATH', 'AND', 'AT', 'NOON', 'UNC', 'AND', 'OJO', 'SAT', 'ON', 'A', 'FALLEN', 'TREE', 'TRUNK', 'AND', 'ATE', 'THE', 'LAST', 'OF', 'THE', 'BREAD', 'WHICH', 'THE', 'OLD', 'MUNCHKIN', 'HAD', 'PLACED', 'IN', 'HIS', 'POCKET'] +1284-1180-0008-837: hyp=['ALL', 'THE', 'MORNING', 'THEY', 'TRUDGED', 'UP', 'THE', 'MOUNTAIN', 'PATH', 'AND', 'AT', 'NOON', 'UNCAN', 'OJO', 'SAT', 'ON', 'A', 'FALLEN', 'TREE', 'TRUNK', 'AND', 'ATE', 'THE', 'LAST', 'OF', 'THE', 'BREAD', 'WHICH', 'THE', 'OLD', 'MUNCHKIN', 'HAD', 'PLACED', 'IN', 'HIS', 'POCKET'] +1284-1180-0009-838: ref=['THEN', 'THEY', 'STARTED', 'ON', 'AGAIN', 'AND', 'TWO', 'HOURS', 'LATER', 'CAME', 'IN', 'SIGHT', 'OF', 'THE', 'HOUSE', 'OF', 'DOCTOR', 'PIPT'] +1284-1180-0009-838: hyp=['THEN', 'THEY', 'STARTED', 'ON', 'AGAIN', 'AND', 'TWO', 'HOURS', 'LATER', 'CAME', 'IN', 'SIGHT', 'OF', 'THE', 'HOUSE', 'OF', 'DOCTOR', 'PIPT'] +1284-1180-0010-839: ref=['UNC', 'KNOCKED', 'AT', 'THE', 'DOOR', 'OF', 'THE', 'HOUSE', 'AND', 'A', 'CHUBBY', 'PLEASANT', 'FACED', 'WOMAN', 'DRESSED', 'ALL', 'IN', 'BLUE', 'OPENED', 'IT', 'AND', 'GREETED', 'THE', 'VISITORS', 'WITH', 'A', 'SMILE'] +1284-1180-0010-839: hyp=['UNCONOCTED', 'THE', 'DOOR', 'OF', 'THE', 'HOUSE', 'INTO', 'CHUBBY', 'PLEASANT', 'FACED', 'WOMAN', 'DRESSED', 'ALL', 'IN', 'BLUE', 'OPENED', 'IT', 'AND', 'GREETED', 'THE', 'VISITORS', 'WITH', 'A', 'SMILE'] +1284-1180-0011-840: ref=['I', 'AM', 'MY', 'DEAR', 'AND', 'ALL', 'STRANGERS', 'ARE', 'WELCOME', 'TO', 'MY', 'HOME'] +1284-1180-0011-840: hyp=['I', 'AM', 'MY', 'DEAR', 'AND', 'ALL', 'STRANGERS', 'ARE', 'WELCOME', 'TO', 'MY', 'HOME'] +1284-1180-0012-841: ref=['WE', 'HAVE', 'COME', 'FROM', 'A', 'FAR', 'LONELIER', 'PLACE', 'THAN', 'THIS', 'A', 'LONELIER', 'PLACE'] +1284-1180-0012-841: hyp=['WE', 'HAVE', 'COME', 'FROM', 'AFAR', 'LONELIER', 'PLACE', 'THAN', 'THIS', 'A', 'LONELIER', 'PLACE'] +1284-1180-0013-842: ref=['AND', 'YOU', 'MUST', 'BE', 'OJO', 'THE', 'UNLUCKY', 'SHE', 'ADDED'] +1284-1180-0013-842: hyp=['AND', 'YOU', 'MUST', 'BE', 'OJO', 'THE', 'UNLUCKY', 'SHE', 'ADDED'] +1284-1180-0014-843: ref=['OJO', 'HAD', 'NEVER', 'EATEN', 'SUCH', 'A', 'FINE', 'MEAL', 'IN', 'ALL', 'HIS', 'LIFE'] +1284-1180-0014-843: hyp=['OJO', 'HAD', 'NEVER', 'EATEN', 'SUCH', 'A', 'FINE', 'MEAL', 'IN', 'ALL', 'HIS', 'LIFE'] +1284-1180-0015-844: ref=['WE', 'ARE', 'TRAVELING', 'REPLIED', 'OJO', 'AND', 'WE', 'STOPPED', 'AT', 'YOUR', 'HOUSE', 'JUST', 'TO', 'REST', 'AND', 'REFRESH', 'OURSELVES'] +1284-1180-0015-844: hyp=['WE', 'ARE', 'TRAVELLING', 'REPLIED', 'OJO', 'AND', 'WE', 'STOPPED', 'AT', 'YOUR', 'HOUSE', 'JUST', 'A', 'REST', 'AND', 'REFRESH', 'OURSELVES'] +1284-1180-0016-845: ref=['THE', 'WOMAN', 'SEEMED', 'THOUGHTFUL'] +1284-1180-0016-845: hyp=['THE', 'WOMAN', 'SEEMED', 'THOUGHTFUL'] +1284-1180-0017-846: ref=['AT', 'ONE', 'END', 'STOOD', 'A', 'GREAT', 'FIREPLACE', 'IN', 'WHICH', 'A', 'BLUE', 'LOG', 'WAS', 'BLAZING', 'WITH', 'A', 'BLUE', 'FLAME', 'AND', 'OVER', 'THE', 'FIRE', 'HUNG', 'FOUR', 'KETTLES', 'IN', 'A', 'ROW', 'ALL', 'BUBBLING', 'AND', 'STEAMING', 'AT', 'A', 'GREAT', 'RATE'] +1284-1180-0017-846: hyp=['AT', 'ONE', 'END', 'STOOD', 'A', 'GREAT', 'FIREPLACE', 'IN', 'WHICH', 'A', 'BLUE', 'LOG', 'WAS', 'BLAZING', 'WITH', 'A', 'BLUE', 'FLAME', 'AND', 'OVER', 'THE', 'FIRE', 'HUNG', 'FOUR', 'KETTLES', 'IN', 'A', 'ROW', 'ALL', 'BUBBLING', 'AND', 'STEAMING', 'AT', 'A', 'GREAT', 'RATE'] +1284-1180-0018-847: ref=['IT', 'TAKES', 'ME', 'SEVERAL', 'YEARS', 'TO', 'MAKE', 'THIS', 'MAGIC', 'POWDER', 'BUT', 'AT', 'THIS', 'MOMENT', 'I', 'AM', 'PLEASED', 'TO', 'SAY', 'IT', 'IS', 'NEARLY', 'DONE', 'YOU', 'SEE', 'I', 'AM', 'MAKING', 'IT', 'FOR', 'MY', 'GOOD', 'WIFE', 'MARGOLOTTE', 'WHO', 'WANTS', 'TO', 'USE', 'SOME', 'OF', 'IT', 'FOR', 'A', 'PURPOSE', 'OF', 'HER', 'OWN'] +1284-1180-0018-847: hyp=['IT', 'TAKES', 'ME', 'SEVERAL', 'YEARS', 'TO', 'MAKE', 'THIS', 'MAGIC', 'POWDER', 'BUT', 'AT', 'THIS', 'MOMENT', 'I', 'AM', 'PLEASED', 'TO', 'SAY', 'IT', 'IS', 'NEARLY', 'DONE', 'YOU', 'SEE', 'I', 'AM', 'MAKING', 'IT', 'FOR', 'MY', 'GOOD', 'WIFE', 'MARGOLOTTE', 'WHO', 'WANTS', 'TO', 'USE', 'SOME', 'OF', 'IT', 'FOR', 'A', 'PURPOSE', 'OF', 'HER', 'OWN'] +1284-1180-0019-848: ref=['YOU', 'MUST', 'KNOW', 'SAID', 'MARGOLOTTE', 'WHEN', 'THEY', 'WERE', 'ALL', 'SEATED', 'TOGETHER', 'ON', 'THE', 'BROAD', 'WINDOW', 'SEAT', 'THAT', 'MY', 'HUSBAND', 'FOOLISHLY', 'GAVE', 'AWAY', 'ALL', 'THE', 'POWDER', 'OF', 'LIFE', 'HE', 'FIRST', 'MADE', 'TO', 'OLD', 'MOMBI', 'THE', 'WITCH', 'WHO', 'USED', 'TO', 'LIVE', 'IN', 'THE', 'COUNTRY', 'OF', 'THE', 'GILLIKINS', 'TO', 'THE', 'NORTH', 'OF', 'HERE'] +1284-1180-0019-848: hyp=['YOU', 'MUST', 'KNOW', 'SAID', 'MARGOLOTTE', 'WHEN', 'THEY', 'WERE', 'ALL', 'SEATED', 'TOGETHER', 'ON', 'THE', 'BROAD', 'WINDOW', 'SEAT', 'THAT', 'MY', 'HUSBAND', 'FOOLISHLY', 'GAVE', 'AWAY', 'ALL', 'THE', 'POWDER', 'OF', 'LIFE', 'HE', 'FIRST', 'MADE', 'TO', 'OLD', 'MOMBY', 'THE', 'WITCH', 'WHO', 'USED', 'TO', 'LIVE', 'IN', 'THE', 'COUNTRY', 'OF', 'THE', 'GYLICANS', 'TO', 'THE', 'NORTH', 'OF', 'HERE'] +1284-1180-0020-849: ref=['THE', 'FIRST', 'LOT', 'WE', 'TESTED', 'ON', 'OUR', 'GLASS', 'CAT', 'WHICH', 'NOT', 'ONLY', 'BEGAN', 'TO', 'LIVE', 'BUT', 'HAS', 'LIVED', 'EVER', 'SINCE'] +1284-1180-0020-849: hyp=['THE', 'FIRST', 'LOT', 'WE', 'TESTED', 'ON', 'OUR', 'GLASS', 'CAT', 'WHICH', 'NOT', 'ONLY', 'BEGAN', 'TO', 'LIVE', 'BUT', 'HAS', 'LIVED', 'EVER', 'SINCE'] +1284-1180-0021-850: ref=['I', 'THINK', 'THE', 'NEXT', 'GLASS', 'CAT', 'THE', 'MAGICIAN', 'MAKES', 'WILL', 'HAVE', 'NEITHER', 'BRAINS', 'NOR', 'HEART', 'FOR', 'THEN', 'IT', 'WILL', 'NOT', 'OBJECT', 'TO', 'CATCHING', 'MICE', 'AND', 'MAY', 'PROVE', 'OF', 'SOME', 'USE', 'TO', 'US'] +1284-1180-0021-850: hyp=['I', 'THINK', 'THE', 'NEXT', 'GLASS', 'CAT', 'THE', 'MAGICIAN', 'MAKES', 'WILL', 'HAVE', 'NEITHER', 'BRAINS', 'NOR', 'HEART', 'FOR', 'THEN', 'IT', 'WILL', 'NOT', 'OBJECT', 'TO', 'CATCHING', 'MICE', 'AND', 'THEY', 'PROVE', 'OF', 'SOME', 'USE', 'TO', 'US'] +1284-1180-0022-851: ref=["I'M", 'AFRAID', 'I', "DON'T", 'KNOW', 'MUCH', 'ABOUT', 'THE', 'LAND', 'OF', 'OZ'] +1284-1180-0022-851: hyp=['I', 'AM', 'AFRAID', 'I', "DON'T", 'KNOW', 'MUCH', 'ABOUT', 'THE', 'LAND', 'OF', 'OZ'] +1284-1180-0023-852: ref=['YOU', 'SEE', "I'VE", 'LIVED', 'ALL', 'MY', 'LIFE', 'WITH', 'UNC', 'NUNKIE', 'THE', 'SILENT', 'ONE', 'AND', 'THERE', 'WAS', 'NO', 'ONE', 'TO', 'TELL', 'ME', 'ANYTHING'] +1284-1180-0023-852: hyp=['YOU', 'SEE', 'I', 'HAVE', 'LIVED', 'ALL', 'MY', 'LIFE', 'WITH', 'UNC', 'NUNKIE', 'THE', 'SILENT', 'ONE', 'AND', 'THERE', 'WAS', 'NO', 'ONE', 'TO', 'TELL', 'ME', 'ANYTHING'] +1284-1180-0024-853: ref=['THAT', 'IS', 'ONE', 'REASON', 'YOU', 'ARE', 'OJO', 'THE', 'UNLUCKY', 'SAID', 'THE', 'WOMAN', 'IN', 'A', 'SYMPATHETIC', 'TONE'] +1284-1180-0024-853: hyp=['THAT', 'IS', 'ONE', 'REASON', 'YOU', 'ARE', 'OJO', 'THE', 'UNLUCKY', 'SAID', 'THE', 'WOMAN', 'IN', 'SYMPATHETIC', 'TONE'] +1284-1180-0025-854: ref=['I', 'THINK', 'I', 'MUST', 'SHOW', 'YOU', 'MY', 'PATCHWORK', 'GIRL', 'SAID', 'MARGOLOTTE', 'LAUGHING', 'AT', 'THE', "BOY'S", 'ASTONISHMENT', 'FOR', 'SHE', 'IS', 'RATHER', 'DIFFICULT', 'TO', 'EXPLAIN'] +1284-1180-0025-854: hyp=['I', 'THINK', 'I', 'MUST', 'SHOW', 'YOU', 'MY', 'PATCHWORK', 'GIRL', 'SAID', 'MARGOLOTTE', 'LAUGHING', 'AT', 'THE', "BOY'S", 'ASTONISHMENT', 'FOR', 'SHE', 'IS', 'RATHER', 'DIFFICULT', 'TO', 'EXPLAIN'] +1284-1180-0026-855: ref=['BUT', 'FIRST', 'I', 'WILL', 'TELL', 'YOU', 'THAT', 'FOR', 'MANY', 'YEARS', 'I', 'HAVE', 'LONGED', 'FOR', 'A', 'SERVANT', 'TO', 'HELP', 'ME', 'WITH', 'THE', 'HOUSEWORK', 'AND', 'TO', 'COOK', 'THE', 'MEALS', 'AND', 'WASH', 'THE', 'DISHES'] +1284-1180-0026-855: hyp=['BUT', 'FIRST', 'I', 'WILL', 'TELL', 'YOU', 'THAT', 'FROM', 'MANY', 'YEARS', 'I', 'HAVE', 'LONGED', 'FOR', 'A', 'SERVANT', 'TO', 'HELP', 'ME', 'WITH', 'THE', 'HOUSEWORK', 'AND', 'TO', 'COPE', 'THE', 'MEALS', 'AND', 'WASH', 'THE', 'DISHES'] +1284-1180-0027-856: ref=['YET', 'THAT', 'TASK', 'WAS', 'NOT', 'SO', 'EASY', 'AS', 'YOU', 'MAY', 'SUPPOSE'] +1284-1180-0027-856: hyp=['YET', 'THAT', 'TASK', 'WAS', 'NOT', 'SO', 'EASY', 'AS', 'YOU', 'MAY', 'SUPPOSE'] +1284-1180-0028-857: ref=['A', 'BED', 'QUILT', 'MADE', 'OF', 'PATCHES', 'OF', 'DIFFERENT', 'KINDS', 'AND', 'COLORS', 'OF', 'CLOTH', 'ALL', 'NEATLY', 'SEWED', 'TOGETHER'] +1284-1180-0028-857: hyp=['A', 'BED', 'QUILT', 'MADE', 'OF', 'PATCHES', 'OF', 'DIFFERENT', 'KINDS', 'AND', 'COLLARS', 'OF', 'CLOTH', 'ALL', 'NEATLY', 'SEWED', 'TOGETHER'] +1284-1180-0029-858: ref=['SOMETIMES', 'IT', 'IS', 'CALLED', 'A', 'CRAZY', 'QUILT', 'BECAUSE', 'THE', 'PATCHES', 'AND', 'COLORS', 'ARE', 'SO', 'MIXED', 'UP'] +1284-1180-0029-858: hyp=['SOMETIMES', 'IT', 'IS', 'CALLED', 'A', 'CRAZY', 'QUILT', 'BECAUSE', 'THE', 'PATCHES', 'AND', 'COLORS', 'ARE', 'SO', 'MIXED', 'UP'] +1284-1180-0030-859: ref=['WHEN', 'I', 'FOUND', 'IT', 'I', 'SAID', 'TO', 'MYSELF', 'THAT', 'IT', 'WOULD', 'DO', 'NICELY', 'FOR', 'MY', 'SERVANT', 'GIRL', 'FOR', 'WHEN', 'SHE', 'WAS', 'BROUGHT', 'TO', 'LIFE', 'SHE', 'WOULD', 'NOT', 'BE', 'PROUD', 'NOR', 'HAUGHTY', 'AS', 'THE', 'GLASS', 'CAT', 'IS', 'FOR', 'SUCH', 'A', 'DREADFUL', 'MIXTURE', 'OF', 'COLORS', 'WOULD', 'DISCOURAGE', 'HER', 'FROM', 'TRYING', 'TO', 'BE', 'AS', 'DIGNIFIED', 'AS', 'THE', 'BLUE', 'MUNCHKINS', 'ARE'] +1284-1180-0030-859: hyp=['WHEN', 'I', 'FOUND', 'IT', 'I', 'SAID', 'TO', 'MYSELF', 'THAT', 'IT', 'WOULD', 'DO', 'NICELY', 'FOR', 'MY', 'SERVANT', 'GIRL', 'FOR', 'WHEN', 'SHE', 'WAS', 'BROUGHT', 'TO', 'LIFE', 'SHE', 'WOULD', 'NOT', 'BE', 'PROUD', 'NOR', 'HAUGHTY', 'AS', 'THE', 'GLASS', 'CAT', 'IS', 'FOR', 'SUCH', 'A', 'DREADFUL', 'MIXTURE', 'OF', 'COLOURS', 'WOULD', 'DISCOURAGE', 'HER', 'FROM', 'TRYING', 'TO', 'BE', 'AS', 'DIGNIFIED', 'AS', 'THE', 'BLUE', 'MUNCHKINS', 'ARE'] +1284-1180-0031-860: ref=['AT', 'THE', 'EMERALD', 'CITY', 'WHERE', 'OUR', 'PRINCESS', 'OZMA', 'LIVES', 'GREEN', 'IS', 'THE', 'POPULAR', 'COLOR'] +1284-1180-0031-860: hyp=['AT', 'THE', 'EMERALD', 'CITY', 'WHERE', 'OUR', 'PRINCESS', 'OSMO', 'LIVES', 'GREEN', 'IS', 'THE', 'POPULAR', 'COLOUR'] +1284-1180-0032-861: ref=['I', 'WILL', 'SHOW', 'YOU', 'WHAT', 'A', 'GOOD', 'JOB', 'I', 'DID', 'AND', 'SHE', 'WENT', 'TO', 'A', 'TALL', 'CUPBOARD', 'AND', 'THREW', 'OPEN', 'THE', 'DOORS'] +1284-1180-0032-861: hyp=['I', 'WILL', 'SHOW', 'YOU', 'WHAT', 'A', 'GOOD', 'JOB', 'I', 'DID', 'AND', 'SHE', 'WENT', 'TO', 'A', 'TALL', 'CUPBOARD', 'AND', 'THREW', 'OPEN', 'THE', 'DOORS'] +1284-1181-0000-807: ref=['OJO', 'EXAMINED', 'THIS', 'CURIOUS', 'CONTRIVANCE', 'WITH', 'WONDER'] +1284-1181-0000-807: hyp=['OJO', 'EXAMINED', 'THIS', 'CURIOUS', 'CONTRIVANCE', 'WITH', 'WONDER'] +1284-1181-0001-808: ref=['MARGOLOTTE', 'HAD', 'FIRST', 'MADE', 'THE', "GIRL'S", 'FORM', 'FROM', 'THE', 'PATCHWORK', 'QUILT', 'AND', 'THEN', 'SHE', 'HAD', 'DRESSED', 'IT', 'WITH', 'A', 'PATCHWORK', 'SKIRT', 'AND', 'AN', 'APRON', 'WITH', 'POCKETS', 'IN', 'IT', 'USING', 'THE', 'SAME', 'GAY', 'MATERIAL', 'THROUGHOUT'] +1284-1181-0001-808: hyp=['MARGOLOTT', 'HAD', 'FIRST', 'MADE', 'THE', "GIRL'S", 'FORM', 'FROM', 'THE', 'PATCHWORK', 'QUILT', 'AND', 'THEN', 'SHE', 'HAD', 'DRESSED', 'IT', 'WITH', 'A', 'PATCHWORK', 'SKIRT', 'AND', 'AN', 'APRON', 'WITH', 'POCKETS', 'IN', 'IT', 'USING', 'THE', 'SAME', 'GAME', 'MATERIAL', 'THROUGHOUT'] +1284-1181-0002-809: ref=['THE', 'HEAD', 'OF', 'THE', 'PATCHWORK', 'GIRL', 'WAS', 'THE', 'MOST', 'CURIOUS', 'PART', 'OF', 'HER'] +1284-1181-0002-809: hyp=['THE', 'HEAD', 'OF', 'THE', 'PATCHWORK', 'GIRL', 'WAS', 'THE', 'MOST', 'CURIOUS', 'PART', 'OF', 'HER'] +1284-1181-0003-810: ref=['THE', 'HAIR', 'WAS', 'OF', 'BROWN', 'YARN', 'AND', 'HUNG', 'DOWN', 'ON', 'HER', 'NECK', 'IN', 'SEVERAL', 'NEAT', 'BRAIDS'] +1284-1181-0003-810: hyp=['THE', 'HAIR', 'WAS', 'OF', 'BROWN', 'YARN', 'AND', 'HUNG', 'DOWN', 'ON', 'HER', 'NECK', 'AND', 'SEVERAL', 'NEAT', 'BRAIDS'] +1284-1181-0004-811: ref=['GOLD', 'IS', 'THE', 'MOST', 'COMMON', 'METAL', 'IN', 'THE', 'LAND', 'OF', 'OZ', 'AND', 'IS', 'USED', 'FOR', 'MANY', 'PURPOSES', 'BECAUSE', 'IT', 'IS', 'SOFT', 'AND', 'PLIABLE'] +1284-1181-0004-811: hyp=['GOLD', 'IS', 'THE', 'MOST', 'COMMON', 'MEDAL', 'IN', 'THE', 'LAND', 'OF', 'OZ', 'AND', 'IS', 'USED', 'FOR', 'MANY', 'PURPOSES', 'BECAUSE', 'IT', 'IS', 'SOFT', 'AND', 'PLIABLE'] +1284-1181-0005-812: ref=['NO', 'I', 'FORGOT', 'ALL', 'ABOUT', 'THE', 'BRAINS', 'EXCLAIMED', 'THE', 'WOMAN'] +1284-1181-0005-812: hyp=['NO', 'I', 'FORGOT', 'ALL', 'ABOUT', 'THE', 'BRAINS', 'EXCLAIMED', 'THE', 'WOMAN'] +1284-1181-0006-813: ref=['WELL', 'THAT', 'MAY', 'BE', 'TRUE', 'AGREED', 'MARGOLOTTE', 'BUT', 'ON', 'THE', 'CONTRARY', 'A', 'SERVANT', 'WITH', 'TOO', 'MUCH', 'BRAINS', 'IS', 'SURE', 'TO', 'BECOME', 'INDEPENDENT', 'AND', 'HIGH', 'AND', 'MIGHTY', 'AND', 'FEEL', 'ABOVE', 'HER', 'WORK'] +1284-1181-0006-813: hyp=['WELL', 'THAT', 'MAY', 'BE', 'TRUE', 'AGREED', 'MARGOLOTTE', 'BUT', 'ON', 'THE', 'CONTRARY', 'A', 'SERVANT', 'WITH', 'TOO', 'MUCH', 'BRAINS', 'IS', 'SURE', 'TO', 'BECOME', 'INDEPENDENT', 'AND', 'HIGH', 'AND', 'MIGHTY', 'AND', 'FEEL', 'ABOVE', 'HER', 'WORK'] +1284-1181-0007-814: ref=['SHE', 'POURED', 'INTO', 'THE', 'DISH', 'A', 'QUANTITY', 'FROM', 'EACH', 'OF', 'THESE', 'BOTTLES'] +1284-1181-0007-814: hyp=['SHE', 'POURED', 'INTO', 'THE', 'DISH', 'A', 'QUANTITY', 'FROM', 'EACH', 'OF', 'THESE', 'BOTTLES'] +1284-1181-0008-815: ref=['I', 'THINK', 'THAT', 'WILL', 'DO', 'SHE', 'CONTINUED', 'FOR', 'THE', 'OTHER', 'QUALITIES', 'ARE', 'NOT', 'NEEDED', 'IN', 'A', 'SERVANT'] +1284-1181-0008-815: hyp=['I', 'THINK', 'THAT', 'WILL', 'DO', 'SHE', 'CONTINUED', 'FOR', 'THE', 'OTHER', 'QUALITIES', 'ARE', 'NOT', 'NEEDED', 'IN', 'A', 'SERVANT'] +1284-1181-0009-816: ref=['SHE', 'RAN', 'TO', 'HER', "HUSBAND'S", 'SIDE', 'AT', 'ONCE', 'AND', 'HELPED', 'HIM', 'LIFT', 'THE', 'FOUR', 'KETTLES', 'FROM', 'THE', 'FIRE'] +1284-1181-0009-816: hyp=['SHE', 'RAN', 'TO', 'HER', "HUSBAND'S", 'SIDE', 'AT', 'ONCE', 'AND', 'HELPED', 'HIM', 'LIFT', 'THE', 'FOUR', 'KETTLES', 'FROM', 'THE', 'FIRE'] +1284-1181-0010-817: ref=['THEIR', 'CONTENTS', 'HAD', 'ALL', 'BOILED', 'AWAY', 'LEAVING', 'IN', 'THE', 'BOTTOM', 'OF', 'EACH', 'KETTLE', 'A', 'FEW', 'GRAINS', 'OF', 'FINE', 'WHITE', 'POWDER'] +1284-1181-0010-817: hyp=['THEIR', 'CONTENTS', 'HAD', 'ALL', 'BOILED', 'AWAY', 'LEAVING', 'IN', 'THE', 'BOTTOM', 'OF', 'EACH', 'KETTLE', 'A', 'FEW', 'GRAINS', 'OF', 'FINE', 'WHITE', 'POWDER'] +1284-1181-0011-818: ref=['VERY', 'CAREFULLY', 'THE', 'MAGICIAN', 'REMOVED', 'THIS', 'POWDER', 'PLACING', 'IT', 'ALL', 'TOGETHER', 'IN', 'A', 'GOLDEN', 'DISH', 'WHERE', 'HE', 'MIXED', 'IT', 'WITH', 'A', 'GOLDEN', 'SPOON'] +1284-1181-0011-818: hyp=['VERY', 'CAREFULLY', 'THE', 'MAGICIAN', 'REMOVED', 'THIS', 'POWDER', 'PLACING', 'IT', 'ALTOGETHER', 'IN', 'A', 'GOLDEN', 'DISH', 'WHERE', 'HE', 'MIXED', 'IT', 'WITH', 'A', 'GOLDEN', 'SPOON'] +1284-1181-0012-819: ref=['NO', 'ONE', 'SAW', 'HIM', 'DO', 'THIS', 'FOR', 'ALL', 'WERE', 'LOOKING', 'AT', 'THE', 'POWDER', 'OF', 'LIFE', 'BUT', 'SOON', 'THE', 'WOMAN', 'REMEMBERED', 'WHAT', 'SHE', 'HAD', 'BEEN', 'DOING', 'AND', 'CAME', 'BACK', 'TO', 'THE', 'CUPBOARD'] +1284-1181-0012-819: hyp=['NO', 'ONE', 'SAW', 'HIM', 'DO', 'THIS', 'FOR', 'ALL', 'WERE', 'LOOKING', 'AT', 'THE', 'POWDER', 'OF', 'LIFE', 'BUT', 'SOON', 'THE', 'WOMAN', 'REMEMBERED', 'WHAT', 'SHE', 'HAD', 'BEEN', 'DOING', 'AND', 'CAME', 'BACK', 'TO', 'THE', 'CUPBOARD'] +1284-1181-0013-820: ref=['OJO', 'BECAME', 'A', 'BIT', 'UNEASY', 'AT', 'THIS', 'FOR', 'HE', 'HAD', 'ALREADY', 'PUT', 'QUITE', 'A', 'LOT', 'OF', 'THE', 'CLEVERNESS', 'POWDER', 'IN', 'THE', 'DISH', 'BUT', 'HE', 'DARED', 'NOT', 'INTERFERE', 'AND', 'SO', 'HE', 'COMFORTED', 'HIMSELF', 'WITH', 'THE', 'THOUGHT', 'THAT', 'ONE', 'CANNOT', 'HAVE', 'TOO', 'MUCH', 'CLEVERNESS'] +1284-1181-0013-820: hyp=['OJO', 'BECAME', 'A', 'BIT', 'UNEASY', 'AT', 'THIS', 'FOR', 'HE', 'HAD', 'ALREADY', 'PUT', 'QUITE', 'A', 'LOT', 'OF', 'THE', 'CLEVERNESS', 'POWDER', 'IN', 'THE', 'DISH', 'BUT', 'HE', 'DARED', 'NOT', 'INTERFERE', 'AND', 'SO', 'HE', 'COMFORTED', 'HIMSELF', 'WITH', 'THE', 'THOUGHT', 'THAT', 'ONE', 'CANNOT', 'HAVE', 'TOO', 'MUCH', 'CLEVERNESS'] +1284-1181-0014-821: ref=['HE', 'SELECTED', 'A', 'SMALL', 'GOLD', 'BOTTLE', 'WITH', 'A', 'PEPPER', 'BOX', 'TOP', 'SO', 'THAT', 'THE', 'POWDER', 'MIGHT', 'BE', 'SPRINKLED', 'ON', 'ANY', 'OBJECT', 'THROUGH', 'THE', 'SMALL', 'HOLES'] +1284-1181-0014-821: hyp=['HE', 'SELECTED', 'A', 'SMALL', 'GOLD', 'BOTTLE', 'WITH', 'A', 'PEPPER', 'BOX', 'TOP', 'SO', 'THAT', 'THE', 'POWDER', 'MIGHT', 'BE', 'SPRINKLED', 'ON', 'ANY', 'OBJECT', 'THROUGH', 'THE', 'SMALL', 'HOLES'] +1284-1181-0015-822: ref=['MOST', 'PEOPLE', 'TALK', 'TOO', 'MUCH', 'SO', 'IT', 'IS', 'A', 'RELIEF', 'TO', 'FIND', 'ONE', 'WHO', 'TALKS', 'TOO', 'LITTLE'] +1284-1181-0015-822: hyp=['MOST', 'PEOPLE', 'TALK', 'TOO', 'MUCH', 'SO', 'IT', 'IS', 'A', 'RELIEF', 'TO', 'FIND', 'ONE', 'WHO', 'TALKS', 'TOO', 'LITTLE'] +1284-1181-0016-823: ref=['I', 'AM', 'NOT', 'ALLOWED', 'TO', 'PERFORM', 'MAGIC', 'EXCEPT', 'FOR', 'MY', 'OWN', 'AMUSEMENT', 'HE', 'TOLD', 'HIS', 'VISITORS', 'AS', 'HE', 'LIGHTED', 'A', 'PIPE', 'WITH', 'A', 'CROOKED', 'STEM', 'AND', 'BEGAN', 'TO', 'SMOKE'] +1284-1181-0016-823: hyp=['I', 'AM', 'NOT', 'ALLOWED', 'TO', 'PERFORM', 'MAGIC', 'EXCEPT', 'FOR', 'MY', 'OWN', 'AMUSEMENT', 'HE', 'TOLD', 'HIS', 'VISITORS', 'AS', 'HE', 'LIGHTED', 'A', 'PIPE', 'WITH', 'A', 'CROOKED', 'STEM', 'AND', 'BEGAN', 'TO', 'SMOKE'] +1284-1181-0017-824: ref=['THE', 'WIZARD', 'OF', 'OZ', 'WHO', 'USED', 'TO', 'BE', 'A', 'HUMBUG', 'AND', 'KNEW', 'NO', 'MAGIC', 'AT', 'ALL', 'HAS', 'BEEN', 'TAKING', 'LESSONS', 'OF', 'GLINDA', 'AND', "I'M", 'TOLD', 'HE', 'IS', 'GETTING', 'TO', 'BE', 'A', 'PRETTY', 'GOOD', 'WIZARD', 'BUT', 'HE', 'IS', 'MERELY', 'THE', 'ASSISTANT', 'OF', 'THE', 'GREAT', 'SORCERESS'] +1284-1181-0017-824: hyp=['THE', 'WIZARD', 'OF', 'OZ', 'WHO', 'USED', 'TO', 'BE', 'A', 'HUMBUG', 'AND', 'KNEW', 'NO', 'MAGIC', 'AT', 'ALL', 'HAS', 'BEEN', 'TAKING', 'LESSONS', 'OF', 'GLINDA', 'AND', "I'M", 'TOLD', 'HE', 'IS', 'GETTING', 'TO', 'BE', 'A', 'PRETTY', 'GOOD', 'WIZARD', 'BUT', 'HE', 'IS', 'MERELY', 'THE', 'ASSISTANT', 'OF', 'THE', 'GREAT', 'SORCERESS'] +1284-1181-0018-825: ref=['IT', 'TRULY', 'IS', 'ASSERTED', 'THE', 'MAGICIAN'] +1284-1181-0018-825: hyp=['IT', 'TRULY', 'IS', 'ASSERTED', 'THE', 'MAGICIAN'] +1284-1181-0019-826: ref=['I', 'NOW', 'USE', 'THEM', 'AS', 'ORNAMENTAL', 'STATUARY', 'IN', 'MY', 'GARDEN'] +1284-1181-0019-826: hyp=['I', 'NOW', 'USE', 'THEM', 'AS', 'ORNAMENTAL', 'STATUARY', 'IN', 'MY', 'GARDEN'] +1284-1181-0020-827: ref=['DEAR', 'ME', 'WHAT', 'A', 'CHATTERBOX', "YOU'RE", 'GETTING', 'TO', 'BE', 'UNC', 'REMARKED', 'THE', 'MAGICIAN', 'WHO', 'WAS', 'PLEASED', 'WITH', 'THE', 'COMPLIMENT'] +1284-1181-0020-827: hyp=['DEAR', 'ME', 'WHAT', 'A', 'CHATTER', 'BOX', 'ARE', 'GETTING', 'TO', 'BE', 'YUNK', 'REMARKED', 'THE', 'MAGICIAN', 'WHO', 'WAS', 'PLEASED', 'WITH', 'THE', 'COMPLIMENT'] +1284-1181-0021-828: ref=['ASKED', 'THE', 'VOICE', 'IN', 'SCORNFUL', 'ACCENTS'] +1284-1181-0021-828: hyp=['ASKED', 'THE', 'VOICE', 'IN', 'SCORNFUL', 'ACCENTS'] +1284-134647-0000-862: ref=['THE', 'GRATEFUL', 'APPLAUSE', 'OF', 'THE', 'CLERGY', 'HAS', 'CONSECRATED', 'THE', 'MEMORY', 'OF', 'A', 'PRINCE', 'WHO', 'INDULGED', 'THEIR', 'PASSIONS', 'AND', 'PROMOTED', 'THEIR', 'INTEREST'] +1284-134647-0000-862: hyp=['THE', 'GRATEFUL', 'APPLAUSE', 'OF', 'THE', 'CLERGY', 'HAS', 'CONSECRATED', 'THE', 'MEMORY', 'OF', 'A', 'PRINCE', 'WHO', 'INDULGED', 'THEIR', 'PASSIONS', 'AND', 'PROMOTED', 'THEIR', 'INTEREST'] +1284-134647-0001-863: ref=['THE', 'EDICT', 'OF', 'MILAN', 'THE', 'GREAT', 'CHARTER', 'OF', 'TOLERATION', 'HAD', 'CONFIRMED', 'TO', 'EACH', 'INDIVIDUAL', 'OF', 'THE', 'ROMAN', 'WORLD', 'THE', 'PRIVILEGE', 'OF', 'CHOOSING', 'AND', 'PROFESSING', 'HIS', 'OWN', 'RELIGION'] +1284-134647-0001-863: hyp=['THE', 'EDICT', 'OF', 'MILAN', 'THE', 'GREAT', 'CHARTER', 'OF', 'TOLERATION', 'HAD', 'CONFIRMED', 'TO', 'EACH', 'INDIVIDUAL', 'OF', 'THE', 'ROMAN', 'WORLD', 'THE', 'PRIVILEGE', 'OF', 'CHOOSING', 'AND', 'PROFESSING', 'HIS', 'OWN', 'RELIGION'] +1284-134647-0002-864: ref=['BUT', 'THIS', 'INESTIMABLE', 'PRIVILEGE', 'WAS', 'SOON', 'VIOLATED', 'WITH', 'THE', 'KNOWLEDGE', 'OF', 'TRUTH', 'THE', 'EMPEROR', 'IMBIBED', 'THE', 'MAXIMS', 'OF', 'PERSECUTION', 'AND', 'THE', 'SECTS', 'WHICH', 'DISSENTED', 'FROM', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'AFFLICTED', 'AND', 'OPPRESSED', 'BY', 'THE', 'TRIUMPH', 'OF', 'CHRISTIANITY'] +1284-134647-0002-864: hyp=['BUT', 'THIS', 'INESTIMABLE', 'PRIVILEGE', 'WAS', 'SOON', 'VIOLATED', 'WITH', 'THE', 'KNOWLEDGE', 'OF', 'TRUTH', 'THE', 'EMPEROR', 'IBED', 'THE', 'MAXIMS', 'OF', 'PERSECUTION', 'AND', 'THE', 'SECTS', 'WHICH', 'DISSENTED', 'FROM', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'AFFLICTED', 'AND', 'OPPRESSED', 'BY', 'THE', 'TRIUMPH', 'OF', 'CHRISTIANITY'] +1284-134647-0003-865: ref=['CONSTANTINE', 'EASILY', 'BELIEVED', 'THAT', 'THE', 'HERETICS', 'WHO', 'PRESUMED', 'TO', 'DISPUTE', 'HIS', 'OPINIONS', 'OR', 'TO', 'OPPOSE', 'HIS', 'COMMANDS', 'WERE', 'GUILTY', 'OF', 'THE', 'MOST', 'ABSURD', 'AND', 'CRIMINAL', 'OBSTINACY', 'AND', 'THAT', 'A', 'SEASONABLE', 'APPLICATION', 'OF', 'MODERATE', 'SEVERITIES', 'MIGHT', 'SAVE', 'THOSE', 'UNHAPPY', 'MEN', 'FROM', 'THE', 'DANGER', 'OF', 'AN', 'EVERLASTING', 'CONDEMNATION'] +1284-134647-0003-865: hyp=['CONSTANTINE', 'EASILY', 'BELIEVED', 'THAT', 'THE', 'HERETICS', 'WHO', 'PRESUMED', 'TO', 'DISPUTE', 'HIS', 'OPINIONS', 'OR', 'TO', 'OPPOSE', 'HIS', 'COMMANDS', 'WERE', 'GUILTY', 'OF', 'THE', 'MOST', 'ABSURD', 'AND', 'CRIMINAL', 'OBSTINACY', 'AND', 'THAT', 'A', 'SEASONABLE', 'APPLICATION', 'OF', 'MODERATE', 'SEVERITIES', 'MIGHT', 'SAVE', 'THOSE', 'UNHAPPY', 'MEN', 'FROM', 'THE', 'DANGER', 'OF', 'AN', 'EVERLASTING', 'CONDEMNATION'] +1284-134647-0004-866: ref=['SOME', 'OF', 'THE', 'PENAL', 'REGULATIONS', 'WERE', 'COPIED', 'FROM', 'THE', 'EDICTS', 'OF', 'DIOCLETIAN', 'AND', 'THIS', 'METHOD', 'OF', 'CONVERSION', 'WAS', 'APPLAUDED', 'BY', 'THE', 'SAME', 'BISHOPS', 'WHO', 'HAD', 'FELT', 'THE', 'HAND', 'OF', 'OPPRESSION', 'AND', 'PLEADED', 'FOR', 'THE', 'RIGHTS', 'OF', 'HUMANITY'] +1284-134647-0004-866: hyp=['SOME', 'OF', 'THE', 'PENAL', 'REGULATIONS', 'WERE', 'COPIED', 'FROM', 'THE', 'EDICTS', 'OF', 'DIOCLETIAN', 'AND', 'THIS', 'METHOD', 'OF', 'CONVERSION', 'WAS', 'APPLAUDED', 'BY', 'THE', 'SAME', 'BISHOPS', 'WHO', 'HAD', 'FELLED', 'THE', 'HAND', 'OF', 'OPPRESSION', 'AND', 'PLEADED', 'FOR', 'THE', 'RIGHTS', 'OF', 'HUMANITY'] +1284-134647-0005-867: ref=['THEY', 'ASSERTED', 'WITH', 'CONFIDENCE', 'AND', 'ALMOST', 'WITH', 'EXULTATION', 'THAT', 'THE', 'APOSTOLICAL', 'SUCCESSION', 'WAS', 'INTERRUPTED', 'THAT', 'ALL', 'THE', 'BISHOPS', 'OF', 'EUROPE', 'AND', 'ASIA', 'WERE', 'INFECTED', 'BY', 'THE', 'CONTAGION', 'OF', 'GUILT', 'AND', 'SCHISM', 'AND', 'THAT', 'THE', 'PREROGATIVES', 'OF', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'CONFINED', 'TO', 'THE', 'CHOSEN', 'PORTION', 'OF', 'THE', 'AFRICAN', 'BELIEVERS', 'WHO', 'ALONE', 'HAD', 'PRESERVED', 'INVIOLATE', 'THE', 'INTEGRITY', 'OF', 'THEIR', 'FAITH', 'AND', 'DISCIPLINE'] +1284-134647-0005-867: hyp=['THEY', 'ASSERTED', 'WITH', 'CONFIDENCE', 'AND', 'ALMOST', 'WITH', 'EXULTATION', 'THAT', 'THE', 'APOSTOLICAL', 'SUCCESSION', 'WAS', 'INTERRUPTED', 'THAT', 'ALL', 'THE', 'BISHOPS', 'OF', 'EUROPE', 'AND', 'ASIA', 'WERE', 'INFECTED', 'BY', 'THE', 'CONTAGION', 'OF', 'GUILT', 'AND', 'SCHISM', 'AND', 'THAT', 'THE', 'PREROGATIVES', 'OF', 'THE', 'CATHOLIC', 'CHURCH', 'WERE', 'CONFINED', 'TO', 'THE', 'CHOSEN', 'PORTION', 'OF', 'THE', 'AFRICAN', 'BELIEVERS', 'WHO', 'ALONE', 'HAD', 'PRESERVED', 'INVIOLATE', 'THE', 'INTEGRITY', 'OF', 'THEIR', 'FAITH', 'AND', 'DISCIPLINE'] +1284-134647-0006-868: ref=['BISHOPS', 'VIRGINS', 'AND', 'EVEN', 'SPOTLESS', 'INFANTS', 'WERE', 'SUBJECTED', 'TO', 'THE', 'DISGRACE', 'OF', 'A', 'PUBLIC', 'PENANCE', 'BEFORE', 'THEY', 'COULD', 'BE', 'ADMITTED', 'TO', 'THE', 'COMMUNION', 'OF', 'THE', 'DONATISTS'] +1284-134647-0006-868: hyp=['BISHOPS', 'VIRGINS', 'AND', 'EVEN', 'SPOTLESS', 'INFANTS', 'WERE', 'SUBJECTED', 'TO', 'THE', 'DISGRACE', 'OF', 'A', 'PUBLIC', 'PENANCE', 'BEFORE', 'THEY', 'COULD', 'BE', 'ADMITTED', 'TO', 'THE', 'COMMUNION', 'OF', 'THE', 'DONATISTS'] +1284-134647-0007-869: ref=['PROSCRIBED', 'BY', 'THE', 'CIVIL', 'AND', 'ECCLESIASTICAL', 'POWERS', 'OF', 'THE', 'EMPIRE', 'THE', 'DONATISTS', 'STILL', 'MAINTAINED', 'IN', 'SOME', 'PROVINCES', 'PARTICULARLY', 'IN', 'NUMIDIA', 'THEIR', 'SUPERIOR', 'NUMBERS', 'AND', 'FOUR', 'HUNDRED', 'BISHOPS', 'ACKNOWLEDGED', 'THE', 'JURISDICTION', 'OF', 'THEIR', 'PRIMATE'] +1284-134647-0007-869: hyp=['PRESCRIBED', 'BY', 'THE', 'CIVIL', 'AND', 'ECCLESIASTICAL', 'POWERS', 'OF', 'THE', 'EMPIRE', 'THE', 'DONATIST', 'STILL', 'MAINTAINED', 'IN', 'SOME', 'PROVINCES', 'PARTICULARLY', 'IN', 'MEDIA', 'THEIR', 'SUPERIOR', 'NUMBERS', 'AND', 'FOUR', 'HUNDRED', 'BISHOPS', 'ACKNOWLEDGED', 'THE', 'JURISDICTION', 'OF', 'THEIR', 'PRIMATE'] +1320-122612-0000-120: ref=['SINCE', 'THE', 'PERIOD', 'OF', 'OUR', 'TALE', 'THE', 'ACTIVE', 'SPIRIT', 'OF', 'THE', 'COUNTRY', 'HAS', 'SURROUNDED', 'IT', 'WITH', 'A', 'BELT', 'OF', 'RICH', 'AND', 'THRIVING', 'SETTLEMENTS', 'THOUGH', 'NONE', 'BUT', 'THE', 'HUNTER', 'OR', 'THE', 'SAVAGE', 'IS', 'EVER', 'KNOWN', 'EVEN', 'NOW', 'TO', 'PENETRATE', 'ITS', 'WILD', 'RECESSES'] +1320-122612-0000-120: hyp=['SINCE', 'THE', 'PERIOD', 'OF', 'OUR', 'TALE', 'THE', 'ACTIVE', 'SPIRIT', 'OF', 'THE', 'COUNTRY', 'HAS', 'SURROUNDED', 'IT', 'WITH', 'A', 'BELT', 'OF', 'RICH', 'AND', 'THRIVING', 'SETTLEMENTS', 'THOUGH', 'NONE', 'BUT', 'THE', 'HUNTER', 'OR', 'THE', 'SAVAGE', 'IS', 'EVER', 'KNOWN', 'EVEN', 'NOW', 'TO', 'PENETRATE', 'ITS', 'WILD', 'RECESSES'] +1320-122612-0001-121: ref=['THE', 'DEWS', 'WERE', 'SUFFERED', 'TO', 'EXHALE', 'AND', 'THE', 'SUN', 'HAD', 'DISPERSED', 'THE', 'MISTS', 'AND', 'WAS', 'SHEDDING', 'A', 'STRONG', 'AND', 'CLEAR', 'LIGHT', 'IN', 'THE', 'FOREST', 'WHEN', 'THE', 'TRAVELERS', 'RESUMED', 'THEIR', 'JOURNEY'] +1320-122612-0001-121: hyp=['THE', 'DEWS', 'WERE', 'SUFFERED', 'TO', 'EXHALE', 'AND', 'THE', 'SUN', 'HAD', 'DISPERSED', 'THE', 'MISTS', 'AND', 'WAS', 'SHEDDING', 'A', 'STRONG', 'AND', 'CLEAR', 'LIGHT', 'IN', 'THE', 'FOREST', 'WHEN', 'THE', 'TRAVELLERS', 'RESUMED', 'THEIR', 'JOURNEY'] +1320-122612-0002-122: ref=['AFTER', 'PROCEEDING', 'A', 'FEW', 'MILES', 'THE', 'PROGRESS', 'OF', 'HAWKEYE', 'WHO', 'LED', 'THE', 'ADVANCE', 'BECAME', 'MORE', 'DELIBERATE', 'AND', 'WATCHFUL'] +1320-122612-0002-122: hyp=['AFTER', 'PROCEEDING', 'A', 'FEW', 'MILES', 'THE', 'PROGRESS', 'OF', 'HAWKEYE', 'WHO', 'LED', 'THE', 'ADVANCE', 'BECAME', 'MORE', 'DELIBERATE', 'AND', 'WATCHFUL'] +1320-122612-0003-123: ref=['HE', 'OFTEN', 'STOPPED', 'TO', 'EXAMINE', 'THE', 'TREES', 'NOR', 'DID', 'HE', 'CROSS', 'A', 'RIVULET', 'WITHOUT', 'ATTENTIVELY', 'CONSIDERING', 'THE', 'QUANTITY', 'THE', 'VELOCITY', 'AND', 'THE', 'COLOR', 'OF', 'ITS', 'WATERS'] +1320-122612-0003-123: hyp=['HE', 'OFTEN', 'STOPPED', 'TO', 'EXAMINE', 'THE', 'TREES', 'NOR', 'DID', 'HE', 'CROSS', 'A', 'RIVULET', 'WITHOUT', 'ATTENTIVELY', 'CONSIDERING', 'THE', 'QUANTITY', 'THE', 'VELOCITY', 'AND', 'THE', 'COLOUR', 'OF', 'ITS', 'WATERS'] +1320-122612-0004-124: ref=['DISTRUSTING', 'HIS', 'OWN', 'JUDGMENT', 'HIS', 'APPEALS', 'TO', 'THE', 'OPINION', 'OF', 'CHINGACHGOOK', 'WERE', 'FREQUENT', 'AND', 'EARNEST'] +1320-122612-0004-124: hyp=['DISTRUSTING', 'HIS', 'OWN', 'JUDGMENT', 'HIS', 'APPEALS', 'TO', 'THE', 'OPINION', 'OF', 'CHINGACHGOOK', 'WERE', 'FREQUENT', 'AND', 'EARNEST'] +1320-122612-0005-125: ref=['YET', 'HERE', 'ARE', 'WE', 'WITHIN', 'A', 'SHORT', 'RANGE', 'OF', 'THE', 'SCAROONS', 'AND', 'NOT', 'A', 'SIGN', 'OF', 'A', 'TRAIL', 'HAVE', 'WE', 'CROSSED'] +1320-122612-0005-125: hyp=['YET', 'HERE', 'ARE', 'WE', 'WITH', 'AN', 'A', 'SHORT', 'RANGE', 'OF', 'THE', 'SCARONS', 'AND', 'NOT', 'A', 'SIGN', 'OF', 'A', 'TRAIL', 'HAVE', 'WE', 'CROSSED'] +1320-122612-0006-126: ref=['LET', 'US', 'RETRACE', 'OUR', 'STEPS', 'AND', 'EXAMINE', 'AS', 'WE', 'GO', 'WITH', 'KEENER', 'EYES'] +1320-122612-0006-126: hyp=['LET', 'US', 'RETRACE', 'OUR', 'STEPS', 'AND', 'EXAMINE', 'AS', 'WE', 'GO', 'WITH', 'KEENER', 'EYES'] +1320-122612-0007-127: ref=['CHINGACHGOOK', 'HAD', 'CAUGHT', 'THE', 'LOOK', 'AND', 'MOTIONING', 'WITH', 'HIS', 'HAND', 'HE', 'BADE', 'HIM', 'SPEAK'] +1320-122612-0007-127: hyp=['INGACHGOOK', 'HAD', 'CAUGHT', 'THE', 'LOOK', 'AND', 'MOTIONING', 'WITH', 'HIS', 'HAND', 'HE', 'BADE', 'HIM', 'SPEAK'] +1320-122612-0008-128: ref=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'FOLLOWED', 'THE', 'UNEXPECTED', 'MOVEMENT', 'AND', 'READ', 'THEIR', 'SUCCESS', 'IN', 'THE', 'AIR', 'OF', 'TRIUMPH', 'THAT', 'THE', 'YOUTH', 'ASSUMED'] +1320-122612-0008-128: hyp=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'FOLLOWED', 'THE', 'UNEXPECTED', 'MOVEMENT', 'AND', 'READ', 'THEIR', 'SUCCESS', 'IN', 'THE', 'AIR', 'OF', 'TRIUMPH', 'THAT', 'THE', 'YOUTH', 'ASSUMED'] +1320-122612-0009-129: ref=['IT', 'WOULD', 'HAVE', 'BEEN', 'MORE', 'WONDERFUL', 'HAD', 'HE', 'SPOKEN', 'WITHOUT', 'A', 'BIDDING'] +1320-122612-0009-129: hyp=['IT', 'WOULD', 'HAVE', 'BEEN', 'MORE', 'WONDERFUL', 'HAD', 'HE', 'SPOKEN', 'WITHOUT', 'A', 'BIDDING'] +1320-122612-0010-130: ref=['SEE', 'SAID', 'UNCAS', 'POINTING', 'NORTH', 'AND', 'SOUTH', 'AT', 'THE', 'EVIDENT', 'MARKS', 'OF', 'THE', 'BROAD', 'TRAIL', 'ON', 'EITHER', 'SIDE', 'OF', 'HIM', 'THE', 'DARK', 'HAIR', 'HAS', 'GONE', 'TOWARD', 'THE', 'FOREST'] +1320-122612-0010-130: hyp=['SEE', 'SAID', 'UNCAS', 'POINTING', 'NORTH', 'AND', 'SOUTH', 'AT', 'THE', 'EVIDENT', 'MARKS', 'OF', 'THE', 'BROAD', 'TRAIL', 'ON', 'EITHER', 'SIDE', 'OF', 'HIM', 'THE', 'DARK', 'HAIR', 'HAS', 'GONE', 'TOWARD', 'THE', 'FOREST'] +1320-122612-0011-131: ref=['IF', 'A', 'ROCK', 'OR', 'A', 'RIVULET', 'OR', 'A', 'BIT', 'OF', 'EARTH', 'HARDER', 'THAN', 'COMMON', 'SEVERED', 'THE', 'LINKS', 'OF', 'THE', 'CLEW', 'THEY', 'FOLLOWED', 'THE', 'TRUE', 'EYE', 'OF', 'THE', 'SCOUT', 'RECOVERED', 'THEM', 'AT', 'A', 'DISTANCE', 'AND', 'SELDOM', 'RENDERED', 'THE', 'DELAY', 'OF', 'A', 'SINGLE', 'MOMENT', 'NECESSARY'] +1320-122612-0011-131: hyp=['IF', 'A', 'ROCK', 'OR', 'A', 'RIVULET', 'OR', 'A', 'BIT', 'OF', 'EARTH', 'HARDER', 'THAN', 'COMMON', 'SEVERED', 'THE', 'LINKS', 'OF', 'THE', 'CLUE', 'THEY', 'FOLLOWED', 'THE', 'TRUE', 'EYE', 'OF', 'THE', 'SCOUT', 'RECOVERED', 'THEM', 'AT', 'A', 'DISTANCE', 'AND', 'SELDOM', 'RENDERED', 'THE', 'DELAY', 'OF', 'A', 'SINGLE', 'MOMENT', 'NECESSARY'] +1320-122612-0012-132: ref=['EXTINGUISHED', 'BRANDS', 'WERE', 'LYING', 'AROUND', 'A', 'SPRING', 'THE', 'OFFALS', 'OF', 'A', 'DEER', 'WERE', 'SCATTERED', 'ABOUT', 'THE', 'PLACE', 'AND', 'THE', 'TREES', 'BORE', 'EVIDENT', 'MARKS', 'OF', 'HAVING', 'BEEN', 'BROWSED', 'BY', 'THE', 'HORSES'] +1320-122612-0012-132: hyp=['EXTINGUISHED', 'BRANDS', 'WERE', 'LYING', 'AROUND', 'A', 'SPRING', 'THE', 'OFFALS', 'OF', 'A', 'DEER', 'WERE', 'SCATTERED', 'ABOUT', 'THE', 'PLACE', 'AND', 'THE', 'TREES', 'BORE', 'EVIDENT', 'MARKS', 'OF', 'HAVING', 'BEEN', 'BROWSED', 'BY', 'THE', 'HORSES'] +1320-122612-0013-133: ref=['A', 'CIRCLE', 'OF', 'A', 'FEW', 'HUNDRED', 'FEET', 'IN', 'CIRCUMFERENCE', 'WAS', 'DRAWN', 'AND', 'EACH', 'OF', 'THE', 'PARTY', 'TOOK', 'A', 'SEGMENT', 'FOR', 'HIS', 'PORTION'] +1320-122612-0013-133: hyp=['A', 'CIRCLE', 'OF', 'A', 'FEW', 'HUNDRED', 'FEET', 'IN', 'CIRCUMFERENCE', 'WAS', 'DRAWN', 'AND', 'EACH', 'OF', 'THE', 'PARTY', 'TOOK', 'A', 'SEGMENT', 'FOR', 'HIS', 'PORTION'] +1320-122612-0014-134: ref=['THE', 'EXAMINATION', 'HOWEVER', 'RESULTED', 'IN', 'NO', 'DISCOVERY'] +1320-122612-0014-134: hyp=['THE', 'EXAMINATION', 'HOWEVER', 'RESULTED', 'IN', 'NO', 'DISCOVERY'] +1320-122612-0015-135: ref=['THE', 'WHOLE', 'PARTY', 'CROWDED', 'TO', 'THE', 'SPOT', 'WHERE', 'UNCAS', 'POINTED', 'OUT', 'THE', 'IMPRESSION', 'OF', 'A', 'MOCCASIN', 'IN', 'THE', 'MOIST', 'ALLUVION'] +1320-122612-0015-135: hyp=['THE', 'WHOLE', 'PARTY', 'CROWDED', 'TO', 'THE', 'SPOT', 'WHERE', 'UNCAS', 'POINTED', 'OUT', 'THE', 'IMPRESSION', 'OF', 'A', 'MOCCASIN', 'IN', 'THE', 'MOIST', 'ALLUVIAN'] +1320-122612-0016-136: ref=['RUN', 'BACK', 'UNCAS', 'AND', 'BRING', 'ME', 'THE', 'SIZE', 'OF', 'THE', "SINGER'S", 'FOOT'] +1320-122612-0016-136: hyp=['RUN', 'BACK', 'UNCAS', 'AND', 'BRING', 'ME', 'THE', 'SIZE', 'OF', 'THE', "SINGER'S", 'FOOT'] +1320-122617-0000-78: ref=['NOTWITHSTANDING', 'THE', 'HIGH', 'RESOLUTION', 'OF', 'HAWKEYE', 'HE', 'FULLY', 'COMPREHENDED', 'ALL', 'THE', 'DIFFICULTIES', 'AND', 'DANGER', 'HE', 'WAS', 'ABOUT', 'TO', 'INCUR'] +1320-122617-0000-78: hyp=['NOTWITHSTANDING', 'THE', 'HIGH', 'RESOLUTION', 'OF', 'HAWKEYE', 'HE', 'FULLY', 'COMPREHENDED', 'ALL', 'THE', 'DIFFICULTIES', 'AND', 'DANGER', 'HE', 'WAS', 'ABOUT', 'TO', 'INCUR'] +1320-122617-0001-79: ref=['IN', 'HIS', 'RETURN', 'TO', 'THE', 'CAMP', 'HIS', 'ACUTE', 'AND', 'PRACTISED', 'INTELLECTS', 'WERE', 'INTENTLY', 'ENGAGED', 'IN', 'DEVISING', 'MEANS', 'TO', 'COUNTERACT', 'A', 'WATCHFULNESS', 'AND', 'SUSPICION', 'ON', 'THE', 'PART', 'OF', 'HIS', 'ENEMIES', 'THAT', 'HE', 'KNEW', 'WERE', 'IN', 'NO', 'DEGREE', 'INFERIOR', 'TO', 'HIS', 'OWN'] +1320-122617-0001-79: hyp=['IN', 'HIS', 'RETURN', 'TO', 'THE', 'CAMP', 'HIS', 'ACUTE', 'AND', 'PRACTISED', 'INTELLECTS', 'WERE', 'INTENTLY', 'ENGAGED', 'IN', 'DEVISING', 'MEANS', 'TO', 'COUNTERACT', 'A', 'WATCHFULNESS', 'AND', 'SUSPICION', 'ON', 'THE', 'PART', 'OF', 'HIS', 'ENEMIES', 'THAT', 'HE', 'KNEW', 'WERE', 'IN', 'NO', 'DEGREE', 'INFERIOR', 'TO', 'HIS', 'OWN'] +1320-122617-0002-80: ref=['IN', 'OTHER', 'WORDS', 'WHILE', 'HE', 'HAD', 'IMPLICIT', 'FAITH', 'IN', 'THE', 'ABILITY', 'OF', "BALAAM'S", 'ASS', 'TO', 'SPEAK', 'HE', 'WAS', 'SOMEWHAT', 'SKEPTICAL', 'ON', 'THE', 'SUBJECT', 'OF', 'A', "BEAR'S", 'SINGING', 'AND', 'YET', 'HE', 'HAD', 'BEEN', 'ASSURED', 'OF', 'THE', 'LATTER', 'ON', 'THE', 'TESTIMONY', 'OF', 'HIS', 'OWN', 'EXQUISITE', 'ORGANS'] +1320-122617-0002-80: hyp=['IN', 'OTHER', 'WORDS', 'WHILE', 'HE', 'HAD', 'IMPLICIT', 'FAITH', 'IN', 'THE', 'ABILITY', 'OF', "BALEM'S", 'ASS', 'TO', 'SPEAK', 'HE', 'WAS', 'SOMEWHAT', 'SCEPTICAL', 'ON', 'THE', 'SUBJECT', 'OF', 'A', "BEAR'S", 'SINGING', 'AND', 'YET', 'HE', 'HAD', 'BEEN', 'ASSURED', 'OF', 'THE', 'LATTER', 'ON', 'THE', 'TESTIMONY', 'OF', 'HIS', 'OWN', 'EXQUISITE', 'ORGANS'] +1320-122617-0003-81: ref=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'AIR', 'AND', 'MANNER', 'THAT', 'BETRAYED', 'TO', 'THE', 'SCOUT', 'THE', 'UTTER', 'CONFUSION', 'OF', 'THE', 'STATE', 'OF', 'HIS', 'MIND'] +1320-122617-0003-81: hyp=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'AIR', 'AND', 'MANNER', 'THAT', 'BETRAYED', 'TO', 'THE', 'SCOUT', 'THE', 'UTTER', 'CONFUSION', 'OF', 'THE', 'STATE', 'OF', 'HIS', 'MIND'] +1320-122617-0004-82: ref=['THE', 'INGENIOUS', 'HAWKEYE', 'WHO', 'RECALLED', 'THE', 'HASTY', 'MANNER', 'IN', 'WHICH', 'THE', 'OTHER', 'HAD', 'ABANDONED', 'HIS', 'POST', 'AT', 'THE', 'BEDSIDE', 'OF', 'THE', 'SICK', 'WOMAN', 'WAS', 'NOT', 'WITHOUT', 'HIS', 'SUSPICIONS', 'CONCERNING', 'THE', 'SUBJECT', 'OF', 'SO', 'MUCH', 'SOLEMN', 'DELIBERATION'] +1320-122617-0004-82: hyp=['THE', 'INGENIOUS', 'HAWKEYE', 'WHO', 'RECALLED', 'THE', 'HASTY', 'MANNER', 'IN', 'WHICH', 'THE', 'OTHER', 'HAD', 'ABANDONED', 'HIS', 'POST', 'AT', 'THE', 'BEDSIDE', 'OF', 'THE', 'SICK', 'WOMAN', 'WAS', 'NOT', 'WITHOUT', 'HIS', 'SUSPICIONS', 'CONCERNING', 'THE', 'SUBJECT', 'OF', 'SO', 'MUCH', 'SOLEMN', 'DELIBERATION'] +1320-122617-0005-83: ref=['THE', 'BEAR', 'SHOOK', 'HIS', 'SHAGGY', 'SIDES', 'AND', 'THEN', 'A', 'WELL', 'KNOWN', 'VOICE', 'REPLIED'] +1320-122617-0005-83: hyp=['THE', 'BEAR', 'SHOOK', 'HIS', 'SHAGGY', 'SIDES', 'AND', 'THEN', 'A', 'WELL', 'KNOWN', 'VOICE', 'REPLIED'] +1320-122617-0006-84: ref=['CAN', 'THESE', 'THINGS', 'BE', 'RETURNED', 'DAVID', 'BREATHING', 'MORE', 'FREELY', 'AS', 'THE', 'TRUTH', 'BEGAN', 'TO', 'DAWN', 'UPON', 'HIM'] +1320-122617-0006-84: hyp=['CAN', 'THESE', 'THINGS', 'BE', 'RETURNED', 'DAVID', 'BREATHING', 'MORE', 'FREELY', 'AS', 'THE', 'TRUTH', 'BEGAN', 'TO', 'DAWN', 'UPON', 'HIM'] +1320-122617-0007-85: ref=['COME', 'COME', 'RETURNED', 'HAWKEYE', 'UNCASING', 'HIS', 'HONEST', 'COUNTENANCE', 'THE', 'BETTER', 'TO', 'ASSURE', 'THE', 'WAVERING', 'CONFIDENCE', 'OF', 'HIS', 'COMPANION', 'YOU', 'MAY', 'SEE', 'A', 'SKIN', 'WHICH', 'IF', 'IT', 'BE', 'NOT', 'AS', 'WHITE', 'AS', 'ONE', 'OF', 'THE', 'GENTLE', 'ONES', 'HAS', 'NO', 'TINGE', 'OF', 'RED', 'TO', 'IT', 'THAT', 'THE', 'WINDS', 'OF', 'THE', 'HEAVEN', 'AND', 'THE', 'SUN', 'HAVE', 'NOT', 'BESTOWED', 'NOW', 'LET', 'US', 'TO', 'BUSINESS'] +1320-122617-0007-85: hyp=['COME', 'COME', 'RETURNED', 'HAWKEYE', 'UNCASING', 'HIS', 'HONEST', 'COUNTENANCE', 'THE', 'BETTER', 'TO', 'ASSURE', 'THE', 'WAVERING', 'CONFIDENCE', 'OF', 'HIS', 'COMPANION', 'YOU', 'MAY', 'SEE', 'A', 'SKIN', 'WHICH', 'IF', 'IT', 'BE', 'NOT', 'AS', 'WHITE', 'AS', 'ONE', 'OF', 'THE', 'GENTLE', 'ONES', 'HAS', 'NO', 'TINGE', 'OF', 'RED', 'TO', 'IT', 'THAT', 'THE', 'WINDS', 'OF', 'THE', 'HEAVEN', 'AND', 'THE', 'SUN', 'HAVE', 'NOT', 'BESTOWED', 'NOW', 'LET', 'US', 'TO', 'BUSINESS'] +1320-122617-0008-86: ref=['THE', 'YOUNG', 'MAN', 'IS', 'IN', 'BONDAGE', 'AND', 'MUCH', 'I', 'FEAR', 'HIS', 'DEATH', 'IS', 'DECREED'] +1320-122617-0008-86: hyp=['THE', 'YOUNG', 'MAN', 'IS', 'IN', 'BONDAGE', 'AND', 'MUCH', 'I', 'FEAR', 'HIS', 'DEATH', 'IS', 'DECREED'] +1320-122617-0009-87: ref=['I', 'GREATLY', 'MOURN', 'THAT', 'ONE', 'SO', 'WELL', 'DISPOSED', 'SHOULD', 'DIE', 'IN', 'HIS', 'IGNORANCE', 'AND', 'I', 'HAVE', 'SOUGHT', 'A', 'GOODLY', 'HYMN', 'CAN', 'YOU', 'LEAD', 'ME', 'TO', 'HIM'] +1320-122617-0009-87: hyp=['I', 'GREATLY', 'MOURNED', 'THAT', 'ONE', 'SO', 'WELL', 'DISPOSED', 'SHOULD', 'DIE', 'IN', 'HIS', 'IGNORANCE', 'AND', 'I', 'HAVE', 'SOUGHT', 'A', 'GOODLY', 'HYMN', 'CAN', 'YOU', 'LEAD', 'ME', 'TO', 'HIM'] +1320-122617-0010-88: ref=['THE', 'TASK', 'WILL', 'NOT', 'BE', 'DIFFICULT', 'RETURNED', 'DAVID', 'HESITATING', 'THOUGH', 'I', 'GREATLY', 'FEAR', 'YOUR', 'PRESENCE', 'WOULD', 'RATHER', 'INCREASE', 'THAN', 'MITIGATE', 'HIS', 'UNHAPPY', 'FORTUNES'] +1320-122617-0010-88: hyp=['THE', 'TASK', 'WILL', 'NOT', 'BE', 'DIFFICULT', 'RETURNED', 'DAVID', 'HESITATING', 'THOUGH', 'I', 'GREATLY', 'FEAR', 'YOUR', 'PRESENCE', 'WOULD', 'RATHER', 'INCREASE', 'THAN', 'MITIGATE', 'HIS', 'UNHAPPY', 'FORTUNES'] +1320-122617-0011-89: ref=['THE', 'LODGE', 'IN', 'WHICH', 'UNCAS', 'WAS', 'CONFINED', 'WAS', 'IN', 'THE', 'VERY', 'CENTER', 'OF', 'THE', 'VILLAGE', 'AND', 'IN', 'A', 'SITUATION', 'PERHAPS', 'MORE', 'DIFFICULT', 'THAN', 'ANY', 'OTHER', 'TO', 'APPROACH', 'OR', 'LEAVE', 'WITHOUT', 'OBSERVATION'] +1320-122617-0011-89: hyp=['THE', 'LODGE', 'IN', 'WHICH', 'UNCAS', 'WAS', 'CONFINED', 'WAS', 'IN', 'THE', 'VERY', 'CENTRE', 'OF', 'THE', 'VILLAGE', 'AND', 'IN', 'A', 'SITUATION', 'PERHAPS', 'MORE', 'DIFFICULT', 'THAN', 'ANY', 'OTHER', 'TO', 'APPROACH', 'OR', 'LEAVE', 'WITHOUT', 'OBSERVATION'] +1320-122617-0012-90: ref=['FOUR', 'OR', 'FIVE', 'OF', 'THE', 'LATTER', 'ONLY', 'LINGERED', 'ABOUT', 'THE', 'DOOR', 'OF', 'THE', 'PRISON', 'OF', 'UNCAS', 'WARY', 'BUT', 'CLOSE', 'OBSERVERS', 'OF', 'THE', 'MANNER', 'OF', 'THEIR', 'CAPTIVE'] +1320-122617-0012-90: hyp=['FOUR', 'OR', 'FIVE', 'OF', 'THE', 'LATTER', 'ONLY', 'LINGERED', 'ABOUT', 'THE', 'DOOR', 'OF', 'THE', 'PRISON', 'OF', 'UNCAS', 'WARY', 'BUT', 'CLOSE', 'OBSERVERS', 'OF', 'THE', 'MANNER', 'OF', 'THEIR', 'CAPTIVE'] +1320-122617-0013-91: ref=['DELIVERED', 'IN', 'A', 'STRONG', 'TONE', 'OF', 'ASSENT', 'ANNOUNCED', 'THE', 'GRATIFICATION', 'THE', 'SAVAGE', 'WOULD', 'RECEIVE', 'IN', 'WITNESSING', 'SUCH', 'AN', 'EXHIBITION', 'OF', 'WEAKNESS', 'IN', 'AN', 'ENEMY', 'SO', 'LONG', 'HATED', 'AND', 'SO', 'MUCH', 'FEARED'] +1320-122617-0013-91: hyp=['DELIVERED', 'IN', 'A', 'STRONG', 'TONE', 'OF', 'ASSENT', 'ANNOUNCED', 'THE', 'GRATIFICATION', 'THE', 'SAVAGE', 'WOULD', 'RECEIVE', 'AND', 'WITNESSING', 'SUCH', 'AN', 'EXHIBITION', 'OF', 'WEAKNESS', 'AND', 'AN', 'ENEMY', 'SO', 'LONG', 'HATED', 'AND', 'SO', 'MUCH', 'FEARED'] +1320-122617-0014-92: ref=['THEY', 'DREW', 'BACK', 'A', 'LITTLE', 'FROM', 'THE', 'ENTRANCE', 'AND', 'MOTIONED', 'TO', 'THE', 'SUPPOSED', 'CONJURER', 'TO', 'ENTER'] +1320-122617-0014-92: hyp=['THEY', 'DREW', 'BACK', 'A', 'LITTLE', 'FROM', 'THE', 'ENTRANCE', 'AND', 'MOTIONED', 'TO', 'THE', 'SUPPOSED', 'CONJUROR', 'TO', 'ENTER'] +1320-122617-0015-93: ref=['BUT', 'THE', 'BEAR', 'INSTEAD', 'OF', 'OBEYING', 'MAINTAINED', 'THE', 'SEAT', 'IT', 'HAD', 'TAKEN', 'AND', 'GROWLED'] +1320-122617-0015-93: hyp=['BUT', 'THE', 'BEAR', 'INSTEAD', 'OF', 'OBEYING', 'MAINTAINED', 'THE', 'SEAT', 'IT', 'HAD', 'TAKEN', 'AND', 'GROWLED'] +1320-122617-0016-94: ref=['THE', 'CUNNING', 'MAN', 'IS', 'AFRAID', 'THAT', 'HIS', 'BREATH', 'WILL', 'BLOW', 'UPON', 'HIS', 'BROTHERS', 'AND', 'TAKE', 'AWAY', 'THEIR', 'COURAGE', 'TOO', 'CONTINUED', 'DAVID', 'IMPROVING', 'THE', 'HINT', 'HE', 'RECEIVED', 'THEY', 'MUST', 'STAND', 'FURTHER', 'OFF'] +1320-122617-0016-94: hyp=['THE', 'CUNNING', 'MAN', 'IS', 'AFRAID', 'THAT', 'HIS', 'BREATH', 'WILL', 'BLOW', 'UPON', 'HIS', 'BROTHERS', 'AND', 'TAKE', 'AWAY', 'THEIR', 'COURAGE', 'TOO', 'CONTINUED', 'DAVID', 'IMPROVING', 'THE', 'HINT', 'HE', 'RECEIVED', 'THEY', 'MUST', 'STAND', 'FURTHER', 'OFF'] +1320-122617-0017-95: ref=['THEN', 'AS', 'IF', 'SATISFIED', 'OF', 'THEIR', 'SAFETY', 'THE', 'SCOUT', 'LEFT', 'HIS', 'POSITION', 'AND', 'SLOWLY', 'ENTERED', 'THE', 'PLACE'] +1320-122617-0017-95: hyp=['THEN', 'AS', 'IF', 'SATISFIED', 'OF', 'THEIR', 'SAFETY', 'THE', 'SCOUT', 'LEFT', 'HIS', 'POSITION', 'AND', 'SLOWLY', 'ENTERED', 'THE', 'PLACE'] +1320-122617-0018-96: ref=['IT', 'WAS', 'SILENT', 'AND', 'GLOOMY', 'BEING', 'TENANTED', 'SOLELY', 'BY', 'THE', 'CAPTIVE', 'AND', 'LIGHTED', 'BY', 'THE', 'DYING', 'EMBERS', 'OF', 'A', 'FIRE', 'WHICH', 'HAD', 'BEEN', 'USED', 'FOR', 'THE', 'PURPOSED', 'OF', 'COOKERY'] +1320-122617-0018-96: hyp=['IT', 'WAS', 'SILENT', 'AND', 'GLOOMY', 'BEING', 'TENANTED', 'SOLELY', 'BY', 'THE', 'CAPTIVE', 'AND', 'LIGHTED', 'BY', 'THE', 'DYING', 'EMBERS', 'OF', 'A', 'FIRE', 'WHICH', 'HAD', 'BEEN', 'USED', 'FOR', 'THE', 'PURPOSE', 'OF', 'COOKERY'] +1320-122617-0019-97: ref=['UNCAS', 'OCCUPIED', 'A', 'DISTANT', 'CORNER', 'IN', 'A', 'RECLINING', 'ATTITUDE', 'BEING', 'RIGIDLY', 'BOUND', 'BOTH', 'HANDS', 'AND', 'FEET', 'BY', 'STRONG', 'AND', 'PAINFUL', 'WITHES'] +1320-122617-0019-97: hyp=['UNCAS', 'OCCUPIED', 'A', 'DISTANT', 'CORNER', 'IN', 'A', 'RECLINING', 'ATTITUDE', 'BEING', 'RIGIDLY', 'BOUND', 'BOTH', 'HANDS', 'AND', 'FEET', 'BY', 'STRONG', 'AND', 'PAINFUL', 'WIDTHS'] +1320-122617-0020-98: ref=['THE', 'SCOUT', 'WHO', 'HAD', 'LEFT', 'DAVID', 'AT', 'THE', 'DOOR', 'TO', 'ASCERTAIN', 'THEY', 'WERE', 'NOT', 'OBSERVED', 'THOUGHT', 'IT', 'PRUDENT', 'TO', 'PRESERVE', 'HIS', 'DISGUISE', 'UNTIL', 'ASSURED', 'OF', 'THEIR', 'PRIVACY'] +1320-122617-0020-98: hyp=['THE', 'SCOUT', 'WHO', 'HAD', 'LEFT', 'DAVID', 'AT', 'THE', 'DOOR', 'TO', 'ASCERTAIN', 'THEY', 'WERE', 'NOT', 'OBSERVED', 'THOUGHT', 'IT', 'PRUDENT', 'TO', 'PRESERVE', 'HIS', 'DISGUISE', 'UNTIL', 'ASSURED', 'OF', 'THEIR', 'PRIVACY'] +1320-122617-0021-99: ref=['WHAT', 'SHALL', 'WE', 'DO', 'WITH', 'THE', 'MINGOES', 'AT', 'THE', 'DOOR', 'THEY', 'COUNT', 'SIX', 'AND', 'THIS', 'SINGER', 'IS', 'AS', 'GOOD', 'AS', 'NOTHING'] +1320-122617-0021-99: hyp=['WHAT', 'SHALL', 'WE', 'DO', 'WITH', 'THE', 'MINGOES', 'AT', 'THE', 'DOOR', 'THEY', 'COUNT', 'SIX', 'AND', 'THE', 'SINGER', 'IS', 'AS', 'GOOD', 'AS', 'NOTHING'] +1320-122617-0022-100: ref=['THE', 'DELAWARES', 'ARE', 'CHILDREN', 'OF', 'THE', 'TORTOISE', 'AND', 'THEY', 'OUTSTRIP', 'THE', 'DEER'] +1320-122617-0022-100: hyp=['THE', 'DELAWARES', 'ARE', 'CHILDREN', 'OF', 'THE', 'TORTOISE', 'AND', 'THE', 'OUTSTRIPPED', 'THE', 'DEER'] +1320-122617-0023-101: ref=['UNCAS', 'WHO', 'HAD', 'ALREADY', 'APPROACHED', 'THE', 'DOOR', 'IN', 'READINESS', 'TO', 'LEAD', 'THE', 'WAY', 'NOW', 'RECOILED', 'AND', 'PLACED', 'HIMSELF', 'ONCE', 'MORE', 'IN', 'THE', 'BOTTOM', 'OF', 'THE', 'LODGE'] +1320-122617-0023-101: hyp=['UNCAS', 'WHO', 'HAD', 'ALREADY', 'APPROACHED', 'THE', 'DOOR', 'IN', 'READINESS', 'TO', 'LEAD', 'THE', 'WAY', 'NOW', 'RECOILED', 'AND', 'PLACED', 'HIMSELF', 'ONCE', 'MORE', 'IN', 'THE', 'BOTTOM', 'OF', 'THE', 'LODGE'] +1320-122617-0024-102: ref=['BUT', 'HAWKEYE', 'WHO', 'WAS', 'TOO', 'MUCH', 'OCCUPIED', 'WITH', 'HIS', 'OWN', 'THOUGHTS', 'TO', 'NOTE', 'THE', 'MOVEMENT', 'CONTINUED', 'SPEAKING', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'HIS', 'COMPANION'] +1320-122617-0024-102: hyp=['BUT', 'HAWKEYE', 'WHO', 'WAS', 'TOO', 'MUCH', 'OCCUPIED', 'WITH', 'HIS', 'OWN', 'THOUGHTS', 'TO', 'NOTE', 'THE', 'MOVEMENT', 'CONTINUED', 'SPEAKING', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'HIS', 'COMPANION'] +1320-122617-0025-103: ref=['SO', 'UNCAS', 'YOU', 'HAD', 'BETTER', 'TAKE', 'THE', 'LEAD', 'WHILE', 'I', 'WILL', 'PUT', 'ON', 'THE', 'SKIN', 'AGAIN', 'AND', 'TRUST', 'TO', 'CUNNING', 'FOR', 'WANT', 'OF', 'SPEED'] +1320-122617-0025-103: hyp=['SO', 'UNCAS', 'YOU', 'HAD', 'BETTER', 'TAKE', 'THE', 'LEAD', 'WHILE', 'I', 'WILL', 'PUT', 'ON', 'THE', 'SKIN', 'AGAIN', 'AND', 'TRUST', 'TO', 'CUNNING', 'FOR', 'WANT', 'OF', 'SPEED'] +1320-122617-0026-104: ref=['WELL', 'WHAT', "CAN'T", 'BE', 'DONE', 'BY', 'MAIN', 'COURAGE', 'IN', 'WAR', 'MUST', 'BE', 'DONE', 'BY', 'CIRCUMVENTION'] +1320-122617-0026-104: hyp=['WELL', 'WHAT', "CAN'T", 'BE', 'DONE', 'BY', 'MAIN', 'COURAGE', 'AND', 'WAR', 'MUST', 'BE', 'DONE', 'BY', 'CIRCUMVENTION'] +1320-122617-0027-105: ref=['AS', 'SOON', 'AS', 'THESE', 'DISPOSITIONS', 'WERE', 'MADE', 'THE', 'SCOUT', 'TURNED', 'TO', 'DAVID', 'AND', 'GAVE', 'HIM', 'HIS', 'PARTING', 'INSTRUCTIONS'] +1320-122617-0027-105: hyp=['AS', 'SOON', 'AS', 'THESE', 'DISPOSITIONS', 'WERE', 'MADE', 'THE', 'SCOUT', 'TURNED', 'TO', 'DAVID', 'AND', 'GAVE', 'HIM', 'HIS', 'PARTING', 'INSTRUCTIONS'] +1320-122617-0028-106: ref=['MY', 'PURSUITS', 'ARE', 'PEACEFUL', 'AND', 'MY', 'TEMPER', 'I', 'HUMBLY', 'TRUST', 'IS', 'GREATLY', 'GIVEN', 'TO', 'MERCY', 'AND', 'LOVE', 'RETURNED', 'DAVID', 'A', 'LITTLE', 'NETTLED', 'AT', 'SO', 'DIRECT', 'AN', 'ATTACK', 'ON', 'HIS', 'MANHOOD', 'BUT', 'THERE', 'ARE', 'NONE', 'WHO', 'CAN', 'SAY', 'THAT', 'I', 'HAVE', 'EVER', 'FORGOTTEN', 'MY', 'FAITH', 'IN', 'THE', 'LORD', 'EVEN', 'IN', 'THE', 'GREATEST', 'STRAITS'] +1320-122617-0028-106: hyp=['MY', 'PURSUITS', 'ARE', 'PEACEFUL', 'AND', 'MY', 'TEMPER', 'I', 'HUMBLY', 'TRUST', 'IS', 'GREATLY', 'GIVEN', 'TO', 'MERCY', 'AND', 'LOVE', 'RETURNED', 'DAVID', 'A', 'LITTLE', 'NETTLED', 'AT', 'SO', 'DIRECT', 'AN', 'ATTACK', 'ON', 'HIS', 'MANHOOD', 'BUT', 'THERE', 'ARE', 'NONE', 'WHO', 'CAN', 'SAY', 'THAT', 'I', 'HAVE', 'EVER', 'FORGOTTEN', 'MY', 'FAITH', 'IN', 'THE', 'LORD', 'EVEN', 'IN', 'THE', 'GREATEST', 'STRAITS'] +1320-122617-0029-107: ref=['IF', 'YOU', 'ARE', 'NOT', 'THEN', 'KNOCKED', 'ON', 'THE', 'HEAD', 'YOUR', 'BEING', 'A', 'NON', 'COMPOSSER', 'WILL', 'PROTECT', 'YOU', 'AND', "YOU'LL", 'THEN', 'HAVE', 'A', 'GOOD', 'REASON', 'TO', 'EXPECT', 'TO', 'DIE', 'IN', 'YOUR', 'BED'] +1320-122617-0029-107: hyp=['IF', 'YOU', 'ARE', 'NOT', 'THEN', 'KNOCKED', 'ON', 'THE', 'HEAD', 'YOUR', 'BEING', 'A', 'NONCOMPOSTER', 'WILL', 'PROTECT', 'YOU', 'AND', "YOU'LL", 'THEN', 'HAVE', 'A', 'GOOD', 'REASON', 'TO', 'EXPECT', 'TO', 'DIE', 'IN', 'YOUR', 'BED'] +1320-122617-0030-108: ref=['SO', 'CHOOSE', 'FOR', 'YOURSELF', 'TO', 'MAKE', 'A', 'RUSH', 'OR', 'TARRY', 'HERE'] +1320-122617-0030-108: hyp=['SIR', 'CHOOSE', 'FOR', 'YOURSELF', 'TO', 'MAKE', 'A', 'RUSH', 'OR', 'TARRY', 'HERE'] +1320-122617-0031-109: ref=['BRAVELY', 'AND', 'GENEROUSLY', 'HAS', 'HE', 'BATTLED', 'IN', 'MY', 'BEHALF', 'AND', 'THIS', 'AND', 'MORE', 'WILL', 'I', 'DARE', 'IN', 'HIS', 'SERVICE'] +1320-122617-0031-109: hyp=['BRAVELY', 'AND', 'GENEROUSLY', 'HAS', 'HE', 'BATTLED', 'IN', 'MY', 'BEHALF', 'AND', 'THIS', 'AND', 'MORE', 'WILL', 'I', 'DARE', 'IN', 'HIS', 'SERVICE'] +1320-122617-0032-110: ref=['KEEP', 'SILENT', 'AS', 'LONG', 'AS', 'MAY', 'BE', 'AND', 'IT', 'WOULD', 'BE', 'WISE', 'WHEN', 'YOU', 'DO', 'SPEAK', 'TO', 'BREAK', 'OUT', 'SUDDENLY', 'IN', 'ONE', 'OF', 'YOUR', 'SHOUTINGS', 'WHICH', 'WILL', 'SERVE', 'TO', 'REMIND', 'THE', 'INDIANS', 'THAT', 'YOU', 'ARE', 'NOT', 'ALTOGETHER', 'AS', 'RESPONSIBLE', 'AS', 'MEN', 'SHOULD', 'BE'] +1320-122617-0032-110: hyp=['KEEP', 'SILENT', 'AS', 'LONG', 'AS', 'MAY', 'BE', 'AND', 'IT', 'WOULD', 'BE', 'WISE', 'WHEN', 'YOU', 'DO', 'SPEAK', 'TO', 'BREAK', 'OUT', 'SUDDENLY', 'IN', 'ONE', 'OF', 'YOUR', 'SHOUTINGS', 'WHICH', 'WILL', 'SERVE', 'TO', 'REMIND', 'THE', 'INDIANS', 'THAT', 'YOU', 'ARE', 'NOT', 'ALTOGETHER', 'AS', 'RESPONSIBLE', 'AS', 'MEN', 'SHOULD', 'BE'] +1320-122617-0033-111: ref=['IF', 'HOWEVER', 'THEY', 'TAKE', 'YOUR', 'SCALP', 'AS', 'I', 'TRUST', 'AND', 'BELIEVE', 'THEY', 'WILL', 'NOT', 'DEPEND', 'ON', 'IT', 'UNCAS', 'AND', 'I', 'WILL', 'NOT', 'FORGET', 'THE', 'DEED', 'BUT', 'REVENGE', 'IT', 'AS', 'BECOMES', 'TRUE', 'WARRIORS', 'AND', 'TRUSTY', 'FRIENDS'] +1320-122617-0033-111: hyp=['IF', 'HOWEVER', 'THEY', 'TAKE', 'YOUR', 'SCALP', 'AS', 'I', 'TRUST', 'AND', 'BELIEVE', 'THEY', 'WILL', 'NOT', 'DEPEND', 'ON', 'IT', 'UNCAS', 'AND', 'I', 'WILL', 'NOT', 'FORGET', 'THE', 'DEED', 'BUT', 'REVENGE', 'IT', 'AS', 'BECOMES', 'TRUE', 'WARRIORS', 'AND', 'TRUSTY', 'FRIENDS'] +1320-122617-0034-112: ref=['HOLD', 'SAID', 'DAVID', 'PERCEIVING', 'THAT', 'WITH', 'THIS', 'ASSURANCE', 'THEY', 'WERE', 'ABOUT', 'TO', 'LEAVE', 'HIM', 'I', 'AM', 'AN', 'UNWORTHY', 'AND', 'HUMBLE', 'FOLLOWER', 'OF', 'ONE', 'WHO', 'TAUGHT', 'NOT', 'THE', 'DAMNABLE', 'PRINCIPLE', 'OF', 'REVENGE'] +1320-122617-0034-112: hyp=['HOLD', 'SAID', 'DAVID', 'PERCEIVING', 'THAT', 'WITH', 'THIS', 'ASSURANCE', 'THEY', 'WERE', 'ABOUT', 'TO', 'LEAVE', 'HIM', 'I', 'AM', 'AN', 'UNWORTHY', 'AND', 'HUMBLE', 'FOLLOWER', 'OF', 'ONE', 'WHO', 'TAUGHT', 'NOT', 'THE', 'DAMNABLE', 'PRINCIPLE', 'OF', 'REVENGE'] +1320-122617-0035-113: ref=['THEN', 'HEAVING', 'A', 'HEAVY', 'SIGH', 'PROBABLY', 'AMONG', 'THE', 'LAST', 'HE', 'EVER', 'DREW', 'IN', 'PINING', 'FOR', 'A', 'CONDITION', 'HE', 'HAD', 'SO', 'LONG', 'ABANDONED', 'HE', 'ADDED', 'IT', 'IS', 'WHAT', 'I', 'WOULD', 'WISH', 'TO', 'PRACTISE', 'MYSELF', 'AS', 'ONE', 'WITHOUT', 'A', 'CROSS', 'OF', 'BLOOD', 'THOUGH', 'IT', 'IS', 'NOT', 'ALWAYS', 'EASY', 'TO', 'DEAL', 'WITH', 'AN', 'INDIAN', 'AS', 'YOU', 'WOULD', 'WITH', 'A', 'FELLOW', 'CHRISTIAN'] +1320-122617-0035-113: hyp=['THEN', 'HEAVING', 'A', 'HEAVY', 'SIGH', 'PROBABLY', 'AMONG', 'THE', 'LAST', 'HE', 'EVER', 'DREW', 'IN', 'PINING', 'FOR', 'A', 'CONDITION', 'HE', 'HAD', 'SO', 'LONG', 'ABANDONED', 'HE', 'ADDED', 'IT', 'IS', 'WHAT', 'I', 'WOULD', 'WISH', 'TO', 'PRACTISE', 'MYSELF', 'AS', 'ONE', 'WITHOUT', 'A', 'CROSS', 'OF', 'BLOOD', 'THOUGH', 'IT', 'IS', 'NOT', 'ALWAYS', 'EASY', 'TO', 'DEAL', 'WITH', 'AN', 'INDIAN', 'AS', 'YOU', 'WOULD', 'WITH', 'A', 'FELLOW', 'CHRISTIAN'] +1320-122617-0036-114: ref=['GOD', 'BLESS', 'YOU', 'FRIEND', 'I', 'DO', 'BELIEVE', 'YOUR', 'SCENT', 'IS', 'NOT', 'GREATLY', 'WRONG', 'WHEN', 'THE', 'MATTER', 'IS', 'DULY', 'CONSIDERED', 'AND', 'KEEPING', 'ETERNITY', 'BEFORE', 'THE', 'EYES', 'THOUGH', 'MUCH', 'DEPENDS', 'ON', 'THE', 'NATURAL', 'GIFTS', 'AND', 'THE', 'FORCE', 'OF', 'TEMPTATION'] +1320-122617-0036-114: hyp=['GOD', 'BLESS', 'YOU', 'FRIEND', 'I', 'DO', 'BELIEVE', 'YOUR', 'SIN', 'HAS', 'NOT', 'GREATLY', 'WRONG', 'WHEN', 'THE', 'MATTER', 'IS', 'DULY', 'CONSIDERED', 'AND', 'KEEPING', 'ETERNITY', 'BEFORE', 'THE', 'EYES', 'THOUGH', 'MUCH', 'DEPENDS', 'ON', 'THE', 'NATURAL', 'GIFTS', 'IN', 'THE', 'FORCE', 'OF', 'TEMPTATION'] +1320-122617-0037-115: ref=['THE', 'DELAWARE', 'DOG', 'HE', 'SAID', 'LEANING', 'FORWARD', 'AND', 'PEERING', 'THROUGH', 'THE', 'DIM', 'LIGHT', 'TO', 'CATCH', 'THE', 'EXPRESSION', 'OF', 'THE', "OTHER'S", 'FEATURES', 'IS', 'HE', 'AFRAID'] +1320-122617-0037-115: hyp=['THE', 'DELAWARE', 'DOG', 'HE', 'SAID', 'LEANING', 'FORWARD', 'AND', 'PEERING', 'THROUGH', 'THE', 'DIM', 'LIGHT', 'TO', 'CATCH', 'THE', 'EXPRESSION', 'OF', 'THE', "OTHER'S", 'FEATURES', 'IS', 'HE', 'AFRAID'] +1320-122617-0038-116: ref=['WILL', 'THE', 'HURONS', 'HEAR', 'HIS', 'GROANS'] +1320-122617-0038-116: hyp=['WILL', 'THE', 'HURONS', 'HEAR', 'HIS', 'GROANS'] +1320-122617-0039-117: ref=['THE', 'MOHICAN', 'STARTED', 'ON', 'HIS', 'FEET', 'AND', 'SHOOK', 'HIS', 'SHAGGY', 'COVERING', 'AS', 'THOUGH', 'THE', 'ANIMAL', 'HE', 'COUNTERFEITED', 'WAS', 'ABOUT', 'TO', 'MAKE', 'SOME', 'DESPERATE', 'EFFORT'] +1320-122617-0039-117: hyp=['THE', 'MOHICANS', 'STARTED', 'ON', 'HIS', 'FEET', 'AND', 'SHOOK', 'HIS', 'SHAGGY', 'COVERING', 'AS', 'THOUGH', 'THE', 'ANIMAL', 'HE', 'COUNTERFEITED', 'WAS', 'ABOUT', 'TO', 'MAKE', 'SOME', 'DESPERATE', 'EFFORT'] +1320-122617-0040-118: ref=['HE', 'HAD', 'NO', 'OCCASION', 'TO', 'DELAY', 'FOR', 'AT', 'THE', 'NEXT', 'INSTANT', 'A', 'BURST', 'OF', 'CRIES', 'FILLED', 'THE', 'OUTER', 'AIR', 'AND', 'RAN', 'ALONG', 'THE', 'WHOLE', 'EXTENT', 'OF', 'THE', 'VILLAGE'] +1320-122617-0040-118: hyp=['HE', 'HAD', 'NO', 'OCCASION', 'TO', 'DELAY', 'FOR', 'AT', 'THE', 'NEXT', 'INSTANT', 'A', 'BURST', 'OF', 'CRIES', 'FILLED', 'THE', 'OUTER', 'AIR', 'AND', 'RAN', 'ALONG', 'THE', 'WHOLE', 'EXTENT', 'OF', 'THE', 'VILLAGE'] +1320-122617-0041-119: ref=['UNCAS', 'CAST', 'HIS', 'SKIN', 'AND', 'STEPPED', 'FORTH', 'IN', 'HIS', 'OWN', 'BEAUTIFUL', 'PROPORTIONS'] +1320-122617-0041-119: hyp=['UNCAS', 'CAST', 'HIS', 'SKIN', 'AND', 'STEPPED', 'FORTH', 'IN', 'HIS', 'OWN', 'BEAUTIFUL', 'PROPORTIONS'] +1580-141083-0000-1949: ref=['I', 'WILL', 'ENDEAVOUR', 'IN', 'MY', 'STATEMENT', 'TO', 'AVOID', 'SUCH', 'TERMS', 'AS', 'WOULD', 'SERVE', 'TO', 'LIMIT', 'THE', 'EVENTS', 'TO', 'ANY', 'PARTICULAR', 'PLACE', 'OR', 'GIVE', 'A', 'CLUE', 'AS', 'TO', 'THE', 'PEOPLE', 'CONCERNED'] +1580-141083-0000-1949: hyp=['I', 'WILL', 'ENDEAVOUR', 'IN', 'MY', 'STATEMENT', 'TO', 'AVOID', 'SUCH', 'TERMS', 'AS', 'WOULD', 'SERVE', 'TO', 'LIMIT', 'THE', 'EVENTS', 'TO', 'ANY', 'PARTICULAR', 'PLACE', 'OR', 'GIVE', 'A', 'CLUE', 'AS', 'TO', 'THE', 'PEOPLE', 'CONCERNED'] +1580-141083-0001-1950: ref=['I', 'HAD', 'ALWAYS', 'KNOWN', 'HIM', 'TO', 'BE', 'RESTLESS', 'IN', 'HIS', 'MANNER', 'BUT', 'ON', 'THIS', 'PARTICULAR', 'OCCASION', 'HE', 'WAS', 'IN', 'SUCH', 'A', 'STATE', 'OF', 'UNCONTROLLABLE', 'AGITATION', 'THAT', 'IT', 'WAS', 'CLEAR', 'SOMETHING', 'VERY', 'UNUSUAL', 'HAD', 'OCCURRED'] +1580-141083-0001-1950: hyp=['I', 'HAD', 'ALWAYS', 'KNOWN', 'HIM', 'TO', 'BE', 'RESTLESS', 'IN', 'HIS', 'MANNER', 'BUT', 'ON', 'THIS', 'PARTICULAR', 'OCCASION', 'HE', 'WAS', 'IN', 'SUCH', 'A', 'STATE', 'OF', 'UNCONTROLLABLE', 'AGITATION', 'THAT', 'IT', 'WAS', 'CLEAR', 'SOMETHING', 'VERY', 'UNUSUAL', 'HAD', 'OCCURRED'] +1580-141083-0002-1951: ref=['MY', "FRIEND'S", 'TEMPER', 'HAD', 'NOT', 'IMPROVED', 'SINCE', 'HE', 'HAD', 'BEEN', 'DEPRIVED', 'OF', 'THE', 'CONGENIAL', 'SURROUNDINGS', 'OF', 'BAKER', 'STREET'] +1580-141083-0002-1951: hyp=['MY', "FRIEND'S", 'TEMPER', 'HAD', 'NOT', 'IMPROVED', 'SINCE', 'HE', 'HAD', 'BEEN', 'DEPRIVED', 'OF', 'THE', 'CONGENIAL', 'SURROUNDINGS', 'OF', 'BAKER', 'STREET'] +1580-141083-0003-1952: ref=['WITHOUT', 'HIS', 'SCRAPBOOKS', 'HIS', 'CHEMICALS', 'AND', 'HIS', 'HOMELY', 'UNTIDINESS', 'HE', 'WAS', 'AN', 'UNCOMFORTABLE', 'MAN'] +1580-141083-0003-1952: hyp=['WITHOUT', 'HIS', 'SCRAP', 'BOOKS', 'HIS', 'CHEMICALS', 'AND', 'HIS', 'HOMELY', 'UNTIDINESS', 'HE', 'WAS', 'AN', 'UNCOMFORTABLE', 'MAN'] +1580-141083-0004-1953: ref=['I', 'HAD', 'TO', 'READ', 'IT', 'OVER', 'CAREFULLY', 'AS', 'THE', 'TEXT', 'MUST', 'BE', 'ABSOLUTELY', 'CORRECT'] +1580-141083-0004-1953: hyp=['I', 'HAD', 'TO', 'READ', 'IT', 'OVER', 'CAREFULLY', 'AS', 'THE', 'TEXT', 'MUST', 'BE', 'ABSOLUTELY', 'CORRECT'] +1580-141083-0005-1954: ref=['I', 'WAS', 'ABSENT', 'RATHER', 'MORE', 'THAN', 'AN', 'HOUR'] +1580-141083-0005-1954: hyp=['I', 'WAS', 'ABSENT', 'RATHER', 'MORE', 'THAN', 'AN', 'HOUR'] +1580-141083-0006-1955: ref=['THE', 'ONLY', 'DUPLICATE', 'WHICH', 'EXISTED', 'SO', 'FAR', 'AS', 'I', 'KNEW', 'WAS', 'THAT', 'WHICH', 'BELONGED', 'TO', 'MY', 'SERVANT', 'BANNISTER', 'A', 'MAN', 'WHO', 'HAS', 'LOOKED', 'AFTER', 'MY', 'ROOM', 'FOR', 'TEN', 'YEARS', 'AND', 'WHOSE', 'HONESTY', 'IS', 'ABSOLUTELY', 'ABOVE', 'SUSPICION'] +1580-141083-0006-1955: hyp=['THE', 'ONLY', 'DUPLICATE', 'WHICH', 'EXISTED', 'SO', 'FAR', 'AS', 'I', 'KNEW', 'WAS', 'THAT', 'WHICH', 'BELONGED', 'TO', 'MY', 'SERVANT', 'BANISTER', 'A', 'MAN', 'WHO', 'HAS', 'LOOKED', 'AFTER', 'MY', 'ROOM', 'FOR', 'TEN', 'YEARS', 'AND', 'WHOSE', 'HONESTY', 'IS', 'ABSOLUTELY', 'ABOVE', 'SUSPICION'] +1580-141083-0007-1956: ref=['THE', 'MOMENT', 'I', 'LOOKED', 'AT', 'MY', 'TABLE', 'I', 'WAS', 'AWARE', 'THAT', 'SOMEONE', 'HAD', 'RUMMAGED', 'AMONG', 'MY', 'PAPERS'] +1580-141083-0007-1956: hyp=['THE', 'MOMENT', 'I', 'LOOKED', 'AT', 'MY', 'TABLE', 'I', 'WAS', 'AWARE', 'THAT', 'SOME', 'ONE', 'HAD', 'RUMMAGED', 'AMONG', 'MY', 'PAPERS'] +1580-141083-0008-1957: ref=['THE', 'PROOF', 'WAS', 'IN', 'THREE', 'LONG', 'SLIPS', 'I', 'HAD', 'LEFT', 'THEM', 'ALL', 'TOGETHER'] +1580-141083-0008-1957: hyp=['THE', 'PROOF', 'WAS', 'IN', 'THREE', 'LONG', 'SLIPS', 'I', 'HAD', 'LEFT', 'THEM', 'ALL', 'TOGETHER'] +1580-141083-0009-1958: ref=['THE', 'ALTERNATIVE', 'WAS', 'THAT', 'SOMEONE', 'PASSING', 'HAD', 'OBSERVED', 'THE', 'KEY', 'IN', 'THE', 'DOOR', 'HAD', 'KNOWN', 'THAT', 'I', 'WAS', 'OUT', 'AND', 'HAD', 'ENTERED', 'TO', 'LOOK', 'AT', 'THE', 'PAPERS'] +1580-141083-0009-1958: hyp=['THEY', 'ALL', 'TURNED', 'OF', 'WAS', 'THAT', 'SOME', 'ONE', 'PASSING', 'HAD', 'OBSERVED', 'THE', 'KEY', 'IN', 'THE', 'DOOR', 'HAD', 'KNOWN', 'THAT', 'I', 'WAS', 'OUT', 'AND', 'HAD', 'ENTERED', 'TO', 'LOOK', 'AT', 'THE', 'PAPERS'] +1580-141083-0010-1959: ref=['I', 'GAVE', 'HIM', 'A', 'LITTLE', 'BRANDY', 'AND', 'LEFT', 'HIM', 'COLLAPSED', 'IN', 'A', 'CHAIR', 'WHILE', 'I', 'MADE', 'A', 'MOST', 'CAREFUL', 'EXAMINATION', 'OF', 'THE', 'ROOM'] +1580-141083-0010-1959: hyp=['I', 'GAVE', 'HIM', 'A', 'LITTLE', 'BRANDY', 'AND', 'LEFT', 'HIM', 'COLLAPSED', 'IN', 'A', 'CHAIR', 'WHILE', 'I', 'MADE', 'A', 'MOST', 'CAREFUL', 'EXAMINATION', 'OF', 'THE', 'ROOM'] +1580-141083-0011-1960: ref=['A', 'BROKEN', 'TIP', 'OF', 'LEAD', 'WAS', 'LYING', 'THERE', 'ALSO'] +1580-141083-0011-1960: hyp=['A', 'BROKEN', 'TIP', 'OF', 'LEAD', 'WAS', 'LYING', 'THERE', 'ALSO'] +1580-141083-0012-1961: ref=['NOT', 'ONLY', 'THIS', 'BUT', 'ON', 'THE', 'TABLE', 'I', 'FOUND', 'A', 'SMALL', 'BALL', 'OF', 'BLACK', 'DOUGH', 'OR', 'CLAY', 'WITH', 'SPECKS', 'OF', 'SOMETHING', 'WHICH', 'LOOKS', 'LIKE', 'SAWDUST', 'IN', 'IT'] +1580-141083-0012-1961: hyp=['NOT', 'ONLY', 'THIS', 'BUT', 'ON', 'THE', 'TABLE', 'I', 'FOUND', 'A', 'SMALL', 'BALL', 'OF', 'BLACK', 'DOUGH', 'OR', 'CLAY', 'WITH', 'SPECKS', 'OF', 'SOMETHING', 'WHICH', 'LOOKS', 'LIKE', 'SAWDUST', 'IN', 'IT'] +1580-141083-0013-1962: ref=['ABOVE', 'ALL', 'THINGS', 'I', 'DESIRE', 'TO', 'SETTLE', 'THE', 'MATTER', 'QUIETLY', 'AND', 'DISCREETLY'] +1580-141083-0013-1962: hyp=['ABOVE', 'ALL', 'THINGS', 'I', 'DESIRE', 'TO', 'SETTLE', 'THE', 'MATTER', 'QUIETLY', 'AND', 'DISCREETLY'] +1580-141083-0014-1963: ref=['TO', 'THE', 'BEST', 'OF', 'MY', 'BELIEF', 'THEY', 'WERE', 'ROLLED', 'UP'] +1580-141083-0014-1963: hyp=['TO', 'THE', 'BEST', 'OF', 'MY', 'BELIEF', 'THEY', 'WERE', 'ROLLED', 'UP'] +1580-141083-0015-1964: ref=['DID', 'ANYONE', 'KNOW', 'THAT', 'THESE', 'PROOFS', 'WOULD', 'BE', 'THERE', 'NO', 'ONE', 'SAVE', 'THE', 'PRINTER'] +1580-141083-0015-1964: hyp=['DID', 'ANY', 'ONE', 'KNOW', 'THAT', 'THESE', 'PROOFS', 'WOULD', 'BE', 'THERE', 'NO', 'ONE', 'SAVE', 'THE', 'PRINTER'] +1580-141083-0016-1965: ref=['I', 'WAS', 'IN', 'SUCH', 'A', 'HURRY', 'TO', 'COME', 'TO', 'YOU', 'YOU', 'LEFT', 'YOUR', 'DOOR', 'OPEN'] +1580-141083-0016-1965: hyp=['I', 'WAS', 'IN', 'SUCH', 'A', 'HURRY', 'TO', 'COME', 'TO', 'YOU', 'YOU', 'LEFT', 'YOUR', 'DOOR', 'OPEN'] +1580-141083-0017-1966: ref=['SO', 'IT', 'SEEMS', 'TO', 'ME'] +1580-141083-0017-1966: hyp=['SO', 'IT', 'SEEMS', 'TO', 'ME'] +1580-141083-0018-1967: ref=['NOW', 'MISTER', 'SOAMES', 'AT', 'YOUR', 'DISPOSAL'] +1580-141083-0018-1967: hyp=['NOW', 'MISTER', 'SOLMES', 'AT', 'YOUR', 'DISPOSAL'] +1580-141083-0019-1968: ref=['ABOVE', 'WERE', 'THREE', 'STUDENTS', 'ONE', 'ON', 'EACH', 'STORY'] +1580-141083-0019-1968: hyp=['ABOVE', 'WERE', 'THREE', 'STUDENTS', 'ONE', 'ON', 'EACH', 'STORY'] +1580-141083-0020-1969: ref=['THEN', 'HE', 'APPROACHED', 'IT', 'AND', 'STANDING', 'ON', 'TIPTOE', 'WITH', 'HIS', 'NECK', 'CRANED', 'HE', 'LOOKED', 'INTO', 'THE', 'ROOM'] +1580-141083-0020-1969: hyp=['THEN', 'HE', 'APPROACHED', 'IT', 'AND', 'STANDING', 'ON', 'TIPTOE', 'WITH', 'HIS', 'NECK', 'CRANED', 'HE', 'LOOKED', 'INTO', 'THE', 'ROOM'] +1580-141083-0021-1970: ref=['THERE', 'IS', 'NO', 'OPENING', 'EXCEPT', 'THE', 'ONE', 'PANE', 'SAID', 'OUR', 'LEARNED', 'GUIDE'] +1580-141083-0021-1970: hyp=['THERE', 'IS', 'NO', 'OPENING', 'EXCEPT', 'THE', 'ONE', 'PAIN', 'SAID', 'OUR', 'LEARNED', 'GUIDE'] +1580-141083-0022-1971: ref=['I', 'AM', 'AFRAID', 'THERE', 'ARE', 'NO', 'SIGNS', 'HERE', 'SAID', 'HE'] +1580-141083-0022-1971: hyp=['I', 'AM', 'AFRAID', 'THERE', 'ARE', 'NO', 'SIGNS', 'HERE', 'SAID', 'HE'] +1580-141083-0023-1972: ref=['ONE', 'COULD', 'HARDLY', 'HOPE', 'FOR', 'ANY', 'UPON', 'SO', 'DRY', 'A', 'DAY'] +1580-141083-0023-1972: hyp=['ONE', 'COULD', 'HARDLY', 'HOPE', 'FOR', 'ANY', 'UPON', 'SO', 'DRY', 'A', 'DAY'] +1580-141083-0024-1973: ref=['YOU', 'LEFT', 'HIM', 'IN', 'A', 'CHAIR', 'YOU', 'SAY', 'WHICH', 'CHAIR', 'BY', 'THE', 'WINDOW', 'THERE'] +1580-141083-0024-1973: hyp=['YOU', 'LEFT', 'HIM', 'IN', 'A', 'CHAIR', 'YOU', 'SAY', 'WHICH', 'CHAIR', 'BY', 'THE', 'WINDOW', 'THERE'] +1580-141083-0025-1974: ref=['THE', 'MAN', 'ENTERED', 'AND', 'TOOK', 'THE', 'PAPERS', 'SHEET', 'BY', 'SHEET', 'FROM', 'THE', 'CENTRAL', 'TABLE'] +1580-141083-0025-1974: hyp=['THE', 'MEN', 'ENTERED', 'AND', 'TOOK', 'THE', 'PAPERS', 'SHEET', 'BY', 'SHEET', 'FROM', 'THE', 'CENTRAL', 'TABLE'] +1580-141083-0026-1975: ref=['AS', 'A', 'MATTER', 'OF', 'FACT', 'HE', 'COULD', 'NOT', 'SAID', 'SOAMES', 'FOR', 'I', 'ENTERED', 'BY', 'THE', 'SIDE', 'DOOR'] +1580-141083-0026-1975: hyp=['AS', 'A', 'MATTER', 'OF', 'FACT', 'HE', 'COULD', 'NOT', 'SAID', 'SOLMES', 'FOR', 'I', 'ENTERED', 'BY', 'THE', 'SIDE', 'DOOR'] +1580-141083-0027-1976: ref=['HOW', 'LONG', 'WOULD', 'IT', 'TAKE', 'HIM', 'TO', 'DO', 'THAT', 'USING', 'EVERY', 'POSSIBLE', 'CONTRACTION', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'NOT', 'LESS'] +1580-141083-0027-1976: hyp=['HOW', 'LONG', 'WOULD', 'IT', 'TAKE', 'HIM', 'TO', 'DO', 'THAT', 'USING', 'EVERY', 'POSSIBLE', 'CONTRACTION', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'NOT', 'LESS'] +1580-141083-0028-1977: ref=['THEN', 'HE', 'TOSSED', 'IT', 'DOWN', 'AND', 'SEIZED', 'THE', 'NEXT'] +1580-141083-0028-1977: hyp=['THEN', 'HE', 'TOSSED', 'IT', 'DOWN', 'AND', 'SEIZED', 'THE', 'NEXT'] +1580-141083-0029-1978: ref=['HE', 'WAS', 'IN', 'THE', 'MIDST', 'OF', 'THAT', 'WHEN', 'YOUR', 'RETURN', 'CAUSED', 'HIM', 'TO', 'MAKE', 'A', 'VERY', 'HURRIED', 'RETREAT', 'VERY', 'HURRIED', 'SINCE', 'HE', 'HAD', 'NOT', 'TIME', 'TO', 'REPLACE', 'THE', 'PAPERS', 'WHICH', 'WOULD', 'TELL', 'YOU', 'THAT', 'HE', 'HAD', 'BEEN', 'THERE'] +1580-141083-0029-1978: hyp=['HE', 'WAS', 'IN', 'THE', 'MIDST', 'OF', 'THAT', 'WHEN', 'YOUR', 'RETURN', 'CAUSED', 'HIM', 'TO', 'MAKE', 'A', 'VERY', 'HURRIED', 'RETREAT', 'VERY', 'HURRIED', 'SINCE', 'HE', 'HAD', 'NOT', 'TIME', 'TO', 'REPLACE', 'THE', 'PAPERS', 'WHICH', 'WOULD', 'TELL', 'YOU', 'THAT', 'HE', 'HAD', 'BEEN', 'THERE'] +1580-141083-0030-1979: ref=['MISTER', 'SOAMES', 'WAS', 'SOMEWHAT', 'OVERWHELMED', 'BY', 'THIS', 'FLOOD', 'OF', 'INFORMATION'] +1580-141083-0030-1979: hyp=['MISTER', 'PSALMS', 'WAS', 'SOMEWHAT', 'OVERWHELMED', 'BY', 'THIS', 'FLOOD', 'OF', 'INFORMATION'] +1580-141083-0031-1980: ref=['HOLMES', 'HELD', 'OUT', 'A', 'SMALL', 'CHIP', 'WITH', 'THE', 'LETTERS', 'N', 'N', 'AND', 'A', 'SPACE', 'OF', 'CLEAR', 'WOOD', 'AFTER', 'THEM', 'YOU', 'SEE'] +1580-141083-0031-1980: hyp=['HOLMES', 'HELD', 'OUT', 'A', 'SMALL', 'CHIP', 'WITH', 'THE', 'LETTERS', 'N', 'N', 'AND', 'A', 'SPACE', 'OF', 'CLEAR', 'WOOD', 'AFTER', 'THEM', 'YOU', 'SEE'] +1580-141083-0032-1981: ref=['WATSON', 'I', 'HAVE', 'ALWAYS', 'DONE', 'YOU', 'AN', 'INJUSTICE', 'THERE', 'ARE', 'OTHERS'] +1580-141083-0032-1981: hyp=['WATSON', 'I', 'HAVE', 'ALWAYS', 'DONE', 'YOU', 'AND', 'INJUSTICE', 'THERE', 'ARE', 'OTHERS'] +1580-141083-0033-1982: ref=['I', 'WAS', 'HOPING', 'THAT', 'IF', 'THE', 'PAPER', 'ON', 'WHICH', 'HE', 'WROTE', 'WAS', 'THIN', 'SOME', 'TRACE', 'OF', 'IT', 'MIGHT', 'COME', 'THROUGH', 'UPON', 'THIS', 'POLISHED', 'SURFACE', 'NO', 'I', 'SEE', 'NOTHING'] +1580-141083-0033-1982: hyp=['I', 'WAS', 'HOPING', 'THAT', 'IF', 'THE', 'PAPER', 'ON', 'WHICH', 'HE', 'WROTE', 'WAS', 'THIN', 'SOME', 'TRACE', 'OF', 'IT', 'MIGHT', 'COME', 'THROUGH', 'UPON', 'THIS', 'POLISHED', 'SURFACE', 'NO', 'I', 'SEE', 'NOTHING'] +1580-141083-0034-1983: ref=['AS', 'HOLMES', 'DREW', 'THE', 'CURTAIN', 'I', 'WAS', 'AWARE', 'FROM', 'SOME', 'LITTLE', 'RIGIDITY', 'AND', 'ALERTNESS', 'OF', 'HIS', 'ATTITUDE', 'THAT', 'HE', 'WAS', 'PREPARED', 'FOR', 'AN', 'EMERGENCY'] +1580-141083-0034-1983: hyp=['AS', 'HOLMES', 'DREW', 'THE', 'CURTAIN', 'I', 'WAS', 'AWARE', 'FROM', 'SOME', 'LITTLE', 'RIGIDITY', 'AND', 'AN', 'ALERTNESS', 'OF', 'HIS', 'ATTITUDE', 'THAT', 'HE', 'WAS', 'PREPARED', 'FOR', 'AN', 'EMERGENCY'] +1580-141083-0035-1984: ref=['HOLMES', 'TURNED', 'AWAY', 'AND', 'STOOPED', 'SUDDENLY', 'TO', 'THE', 'FLOOR', 'HALLOA', "WHAT'S", 'THIS'] +1580-141083-0035-1984: hyp=['HOLMES', 'TURNED', 'AWAY', 'AND', 'STOOPED', 'SUDDENLY', 'TO', 'THE', 'FLOOR', 'HULLO', 'WHAT', 'IS', 'THIS'] +1580-141083-0036-1985: ref=['HOLMES', 'HELD', 'IT', 'OUT', 'ON', 'HIS', 'OPEN', 'PALM', 'IN', 'THE', 'GLARE', 'OF', 'THE', 'ELECTRIC', 'LIGHT'] +1580-141083-0036-1985: hyp=['HOLMES', 'HELD', 'IT', 'OUT', 'ON', 'HIS', 'OPEN', 'PALM', 'IN', 'THE', 'GLARE', 'OF', 'THE', 'ELECTRIC', 'LIGHT'] +1580-141083-0037-1986: ref=['WHAT', 'COULD', 'HE', 'DO', 'HE', 'CAUGHT', 'UP', 'EVERYTHING', 'WHICH', 'WOULD', 'BETRAY', 'HIM', 'AND', 'HE', 'RUSHED', 'INTO', 'YOUR', 'BEDROOM', 'TO', 'CONCEAL', 'HIMSELF'] +1580-141083-0037-1986: hyp=['WHAT', 'COULD', 'HE', 'DO', 'HE', 'CAUGHT', 'UP', 'EVERYTHING', 'WHICH', 'WOULD', 'BETRAY', 'HIM', 'AND', 'HE', 'RUSHED', 'INTO', 'YOUR', 'BEDROOM', 'TO', 'CONCEAL', 'HIMSELF'] +1580-141083-0038-1987: ref=['I', 'UNDERSTAND', 'YOU', 'TO', 'SAY', 'THAT', 'THERE', 'ARE', 'THREE', 'STUDENTS', 'WHO', 'USE', 'THIS', 'STAIR', 'AND', 'ARE', 'IN', 'THE', 'HABIT', 'OF', 'PASSING', 'YOUR', 'DOOR', 'YES', 'THERE', 'ARE'] +1580-141083-0038-1987: hyp=['I', 'UNDERSTAND', 'YOU', 'TO', 'SAY', 'THAT', 'THERE', 'ARE', 'THREE', 'STUDENTS', 'WHO', 'USE', 'THIS', 'STARE', 'AND', 'ARE', 'IN', 'THE', 'HABIT', 'OF', 'PASSING', 'YOUR', 'DOOR', 'YES', 'THERE', 'ARE'] +1580-141083-0039-1988: ref=['AND', 'THEY', 'ARE', 'ALL', 'IN', 'FOR', 'THIS', 'EXAMINATION', 'YES'] +1580-141083-0039-1988: hyp=['AND', 'THEY', 'ARE', 'ALL', 'IN', 'FOR', 'THE', 'EXAMINATION', 'YES'] +1580-141083-0040-1989: ref=['ONE', 'HARDLY', 'LIKES', 'TO', 'THROW', 'SUSPICION', 'WHERE', 'THERE', 'ARE', 'NO', 'PROOFS'] +1580-141083-0040-1989: hyp=['ONE', 'HARDLY', 'LIKES', 'TO', 'THROW', 'SUSPICION', 'WHERE', 'THERE', 'ARE', 'NO', 'PROOFS'] +1580-141083-0041-1990: ref=['LET', 'US', 'HEAR', 'THE', 'SUSPICIONS', 'I', 'WILL', 'LOOK', 'AFTER', 'THE', 'PROOFS'] +1580-141083-0041-1990: hyp=['LET', 'US', 'SEE', 'THE', 'SUSPICIONS', 'I', 'WILL', 'LOOK', 'AFTER', 'THE', 'PROOFS'] +1580-141083-0042-1991: ref=['MY', 'SCHOLAR', 'HAS', 'BEEN', 'LEFT', 'VERY', 'POOR', 'BUT', 'HE', 'IS', 'HARD', 'WORKING', 'AND', 'INDUSTRIOUS', 'HE', 'WILL', 'DO', 'WELL'] +1580-141083-0042-1991: hyp=['MY', 'SCHOLAR', 'HAS', 'BEEN', 'LEFT', 'VERY', 'POOR', 'BUT', 'HE', 'IS', 'HARD', 'WORKING', 'AND', 'INDUSTRIOUS', 'HE', 'WILL', 'DO', 'WELL'] +1580-141083-0043-1992: ref=['THE', 'TOP', 'FLOOR', 'BELONGS', 'TO', 'MILES', 'MC', 'LAREN'] +1580-141083-0043-1992: hyp=['THE', 'TOP', 'FLOOR', 'BELONGS', 'TO', 'MYLES', 'MC', 'LAREN'] +1580-141083-0044-1993: ref=['I', 'DARE', 'NOT', 'GO', 'SO', 'FAR', 'AS', 'THAT', 'BUT', 'OF', 'THE', 'THREE', 'HE', 'IS', 'PERHAPS', 'THE', 'LEAST', 'UNLIKELY'] +1580-141083-0044-1993: hyp=['I', 'DARE', 'NOT', 'GO', 'SO', 'FAR', 'AS', 'THAT', 'BUT', 'OF', 'THE', 'THREE', 'HE', 'IS', 'PERHAPS', 'THE', 'LEAST', 'UNLIKELY'] +1580-141083-0045-1994: ref=['HE', 'WAS', 'STILL', 'SUFFERING', 'FROM', 'THIS', 'SUDDEN', 'DISTURBANCE', 'OF', 'THE', 'QUIET', 'ROUTINE', 'OF', 'HIS', 'LIFE'] +1580-141083-0045-1994: hyp=['HE', 'WAS', 'STILL', 'SUFFERING', 'FROM', 'THIS', 'SUDDEN', 'DISTURBANCE', 'OF', 'THE', 'QUIET', 'ROUTINE', 'OF', 'HIS', 'LIFE'] +1580-141083-0046-1995: ref=['BUT', 'I', 'HAVE', 'OCCASIONALLY', 'DONE', 'THE', 'SAME', 'THING', 'AT', 'OTHER', 'TIMES'] +1580-141083-0046-1995: hyp=['BUT', 'I', 'HAVE', 'OCCASIONALLY', 'DONE', 'THE', 'SAME', 'THING', 'AT', 'OTHER', 'TIMES'] +1580-141083-0047-1996: ref=['DID', 'YOU', 'LOOK', 'AT', 'THESE', 'PAPERS', 'ON', 'THE', 'TABLE'] +1580-141083-0047-1996: hyp=['DID', 'YOU', 'LOOK', 'AT', 'THESE', 'PAPERS', 'ON', 'THE', 'TABLE'] +1580-141083-0048-1997: ref=['HOW', 'CAME', 'YOU', 'TO', 'LEAVE', 'THE', 'KEY', 'IN', 'THE', 'DOOR'] +1580-141083-0048-1997: hyp=['HOW', 'CAME', 'YOU', 'TO', 'LEAVE', 'THE', 'KEY', 'IN', 'THE', 'DOOR'] +1580-141083-0049-1998: ref=['ANYONE', 'IN', 'THE', 'ROOM', 'COULD', 'GET', 'OUT', 'YES', 'SIR'] +1580-141083-0049-1998: hyp=['ANY', 'ONE', 'IN', 'THE', 'ROOM', 'COULD', 'GET', 'OUT', 'YES', 'SIR'] +1580-141083-0050-1999: ref=['I', 'REALLY', "DON'T", 'THINK', 'HE', 'KNEW', 'MUCH', 'ABOUT', 'IT', 'MISTER', 'HOLMES'] +1580-141083-0050-1999: hyp=['I', 'HAVE', 'REALLY', "DON'T", 'THINK', 'HE', 'KNEW', 'MUCH', 'ABOUT', 'IT', 'MISTER', 'HOLMES'] +1580-141083-0051-2000: ref=['ONLY', 'FOR', 'A', 'MINUTE', 'OR', 'SO'] +1580-141083-0051-2000: hyp=['ONLY', 'FOR', 'A', 'MINUTE', 'OR', 'SO'] +1580-141083-0052-2001: ref=['OH', 'I', 'WOULD', 'NOT', 'VENTURE', 'TO', 'SAY', 'SIR'] +1580-141083-0052-2001: hyp=['OH', 'I', 'WOULD', 'NOT', 'VENTURE', 'TO', 'SAY', 'SIR'] +1580-141083-0053-2002: ref=['YOU', "HAVEN'T", 'SEEN', 'ANY', 'OF', 'THEM', 'NO', 'SIR'] +1580-141083-0053-2002: hyp=['YOU', "HAVEN'T", 'SEEN', 'ANY', 'OF', 'THEM', 'NO', 'SIR'] +1580-141084-0000-2003: ref=['IT', 'WAS', 'THE', 'INDIAN', 'WHOSE', 'DARK', 'SILHOUETTE', 'APPEARED', 'SUDDENLY', 'UPON', 'HIS', 'BLIND'] +1580-141084-0000-2003: hyp=['IT', 'WAS', 'THE', 'INDIAN', 'WHOSE', 'DARK', 'SILHOUETTE', 'APPEARED', 'SUDDENLY', 'UPON', 'HIS', 'BLIND'] +1580-141084-0001-2004: ref=['HE', 'WAS', 'PACING', 'SWIFTLY', 'UP', 'AND', 'DOWN', 'HIS', 'ROOM'] +1580-141084-0001-2004: hyp=['HE', 'WAS', 'PACING', 'SWIFTLY', 'UP', 'AND', 'DOWN', 'HIS', 'ROOM'] +1580-141084-0002-2005: ref=['THIS', 'SET', 'OF', 'ROOMS', 'IS', 'QUITE', 'THE', 'OLDEST', 'IN', 'THE', 'COLLEGE', 'AND', 'IT', 'IS', 'NOT', 'UNUSUAL', 'FOR', 'VISITORS', 'TO', 'GO', 'OVER', 'THEM'] +1580-141084-0002-2005: hyp=['THE', 'SET', 'OF', 'ROOMS', 'IS', 'QUITE', 'THE', 'OLDEST', 'IN', 'THE', 'COLLEGE', 'AND', 'IT', 'IS', 'NOT', 'UNUSUAL', 'FOR', 'VISITORS', 'TO', 'GO', 'OVER', 'THEM'] +1580-141084-0003-2006: ref=['NO', 'NAMES', 'PLEASE', 'SAID', 'HOLMES', 'AS', 'WE', 'KNOCKED', 'AT', "GILCHRIST'S", 'DOOR'] +1580-141084-0003-2006: hyp=['NO', 'NAMES', 'PLEASE', 'SAID', 'HOLMES', 'AS', 'WE', 'KNOCKED', 'AT', "GILCRE'S", 'DOOR'] +1580-141084-0004-2007: ref=['OF', 'COURSE', 'HE', 'DID', 'NOT', 'REALIZE', 'THAT', 'IT', 'WAS', 'I', 'WHO', 'WAS', 'KNOCKING', 'BUT', 'NONE', 'THE', 'LESS', 'HIS', 'CONDUCT', 'WAS', 'VERY', 'UNCOURTEOUS', 'AND', 'INDEED', 'UNDER', 'THE', 'CIRCUMSTANCES', 'RATHER', 'SUSPICIOUS'] +1580-141084-0004-2007: hyp=['OF', 'COURSE', 'HE', 'DID', 'NOT', 'REALIZE', 'THAT', 'IT', 'WAS', 'I', 'WHO', 'WAS', 'KNOCKING', 'BUT', 'NONE', 'THE', 'LESS', 'HIS', 'CONDUCT', 'WAS', 'VERY', 'UNCOURTEOUS', 'AND', 'INDEED', 'UNDER', 'THE', 'CIRCUMSTANCES', 'RATHER', 'SUSPICIOUS'] +1580-141084-0005-2008: ref=['THAT', 'IS', 'VERY', 'IMPORTANT', 'SAID', 'HOLMES'] +1580-141084-0005-2008: hyp=['THAT', 'IS', 'VERY', 'IMPORTANT', 'SAID', 'HOLMES'] +1580-141084-0006-2009: ref=['YOU', "DON'T", 'SEEM', 'TO', 'REALIZE', 'THE', 'POSITION'] +1580-141084-0006-2009: hyp=['YOU', "DON'T", 'SEEM', 'TO', 'REALIZE', 'THE', 'POSITION'] +1580-141084-0007-2010: ref=['TO', 'MORROW', 'IS', 'THE', 'EXAMINATION'] +1580-141084-0007-2010: hyp=['TO', 'MORROW', 'IS', 'THE', 'EXAMINATION'] +1580-141084-0008-2011: ref=['I', 'CANNOT', 'ALLOW', 'THE', 'EXAMINATION', 'TO', 'BE', 'HELD', 'IF', 'ONE', 'OF', 'THE', 'PAPERS', 'HAS', 'BEEN', 'TAMPERED', 'WITH', 'THE', 'SITUATION', 'MUST', 'BE', 'FACED'] +1580-141084-0008-2011: hyp=['I', 'CANNOT', 'ALLOW', 'THE', 'EXAMINATION', 'TO', 'BE', 'HELD', 'IF', 'ONE', 'OF', 'THE', 'PAPERS', 'HAS', 'BEEN', 'TAMPERED', 'WITH', 'THE', 'SITUATION', 'MUST', 'BE', 'FACED'] +1580-141084-0009-2012: ref=['IT', 'IS', 'POSSIBLE', 'THAT', 'I', 'MAY', 'BE', 'IN', 'A', 'POSITION', 'THEN', 'TO', 'INDICATE', 'SOME', 'COURSE', 'OF', 'ACTION'] +1580-141084-0009-2012: hyp=['IT', 'IS', 'POSSIBLE', 'THAT', 'I', 'MAY', 'BE', 'IN', 'A', 'POSITION', 'THEN', 'TO', 'INDICATE', 'SOME', 'COURSE', 'OF', 'ACTION'] +1580-141084-0010-2013: ref=['I', 'WILL', 'TAKE', 'THE', 'BLACK', 'CLAY', 'WITH', 'ME', 'ALSO', 'THE', 'PENCIL', 'CUTTINGS', 'GOOD', 'BYE'] +1580-141084-0010-2013: hyp=['I', 'WILL', 'TAKE', 'THE', 'BLACK', 'CLAY', 'WITH', 'ME', 'ALSO', 'THE', 'PENCIL', 'CUTTINGS', 'GOOD', 'BYE'] +1580-141084-0011-2014: ref=['WHEN', 'WE', 'WERE', 'OUT', 'IN', 'THE', 'DARKNESS', 'OF', 'THE', 'QUADRANGLE', 'WE', 'AGAIN', 'LOOKED', 'UP', 'AT', 'THE', 'WINDOWS'] +1580-141084-0011-2014: hyp=['WHEN', 'WE', 'WERE', 'OUT', 'IN', 'THE', 'DARKNESS', 'OF', 'THE', 'QUADRANGLE', 'WE', 'AGAIN', 'LOOKED', 'UP', 'AT', 'THE', 'WINDOWS'] +1580-141084-0012-2015: ref=['THE', 'FOUL', 'MOUTHED', 'FELLOW', 'AT', 'THE', 'TOP'] +1580-141084-0012-2015: hyp=['THE', 'FOUL', 'MOUTHED', 'FELLOW', 'AT', 'THE', 'TOP'] +1580-141084-0013-2016: ref=['HE', 'IS', 'THE', 'ONE', 'WITH', 'THE', 'WORST', 'RECORD'] +1580-141084-0013-2016: hyp=['HE', 'IS', 'THE', 'ONE', 'WITH', 'THE', 'WORST', 'RECORD'] +1580-141084-0014-2017: ref=['WHY', 'BANNISTER', 'THE', 'SERVANT', "WHAT'S", 'HIS', 'GAME', 'IN', 'THE', 'MATTER'] +1580-141084-0014-2017: hyp=['WHY', 'BANNISTER', 'THE', 'SERVANT', "WHAT'S", 'HIS', 'GAME', 'IN', 'THE', 'MATTER'] +1580-141084-0015-2018: ref=['HE', 'IMPRESSED', 'ME', 'AS', 'BEING', 'A', 'PERFECTLY', 'HONEST', 'MAN'] +1580-141084-0015-2018: hyp=['HE', 'IMPRESSED', 'ME', 'AS', 'BEING', 'A', 'PERFECTLY', 'HONEST', 'MAN'] +1580-141084-0016-2019: ref=['MY', 'FRIEND', 'DID', 'NOT', 'APPEAR', 'TO', 'BE', 'DEPRESSED', 'BY', 'HIS', 'FAILURE', 'BUT', 'SHRUGGED', 'HIS', 'SHOULDERS', 'IN', 'HALF', 'HUMOROUS', 'RESIGNATION'] +1580-141084-0016-2019: hyp=['MY', 'FRIEND', 'DID', 'NOT', 'APPEAR', 'TO', 'BE', 'DEPRESSED', 'BY', 'HIS', 'FAILURE', 'BUT', 'SHRUGGED', 'HIS', 'SHOULDERS', 'IN', 'HALF', 'HUMOROUS', 'RESIGNATION'] +1580-141084-0017-2020: ref=['NO', 'GOOD', 'MY', 'DEAR', 'WATSON'] +1580-141084-0017-2020: hyp=['NO', 'GOOD', 'MY', 'DEAR', 'WATSON'] +1580-141084-0018-2021: ref=['I', 'THINK', 'SO', 'YOU', 'HAVE', 'FORMED', 'A', 'CONCLUSION'] +1580-141084-0018-2021: hyp=['I', 'THINK', 'SO', 'YOU', 'HAVE', 'FORMED', 'A', 'CONCLUSION'] +1580-141084-0019-2022: ref=['YES', 'MY', 'DEAR', 'WATSON', 'I', 'HAVE', 'SOLVED', 'THE', 'MYSTERY'] +1580-141084-0019-2022: hyp=['YES', 'MY', 'DEAR', 'WATSON', 'I', 'HAVE', 'SOLVED', 'THE', 'MYSTERY'] +1580-141084-0020-2023: ref=['LOOK', 'AT', 'THAT', 'HE', 'HELD', 'OUT', 'HIS', 'HAND'] +1580-141084-0020-2023: hyp=['LOOK', 'AT', 'THAT', 'HE', 'HELD', 'OUT', 'HIS', 'HAND'] +1580-141084-0021-2024: ref=['ON', 'THE', 'PALM', 'WERE', 'THREE', 'LITTLE', 'PYRAMIDS', 'OF', 'BLACK', 'DOUGHY', 'CLAY'] +1580-141084-0021-2024: hyp=['ON', 'THE', 'PALM', 'WERE', 'THREE', 'LITTLE', 'PYRAMIDS', 'OF', 'BLACK', 'DOUGHY', 'CLAY'] +1580-141084-0022-2025: ref=['AND', 'ONE', 'MORE', 'THIS', 'MORNING'] +1580-141084-0022-2025: hyp=['AND', 'ONE', 'MORE', 'THIS', 'MORNING'] +1580-141084-0023-2026: ref=['IN', 'A', 'FEW', 'HOURS', 'THE', 'EXAMINATION', 'WOULD', 'COMMENCE', 'AND', 'HE', 'WAS', 'STILL', 'IN', 'THE', 'DILEMMA', 'BETWEEN', 'MAKING', 'THE', 'FACTS', 'PUBLIC', 'AND', 'ALLOWING', 'THE', 'CULPRIT', 'TO', 'COMPETE', 'FOR', 'THE', 'VALUABLE', 'SCHOLARSHIP'] +1580-141084-0023-2026: hyp=['IN', 'A', 'FEW', 'HOURS', 'THE', 'EXAMINATION', 'WOULD', 'COMMENCE', 'AND', 'HE', 'WAS', 'STILL', 'IN', 'THE', 'DILEMMA', 'BETWEEN', 'MAKING', 'THE', 'FACTS', 'PUBLIC', 'AND', 'ALLOWING', 'THE', 'CULPRIT', 'TO', 'COMPETE', 'FOR', 'THE', 'VALUABLE', 'SCHOLARSHIP'] +1580-141084-0024-2027: ref=['HE', 'COULD', 'HARDLY', 'STAND', 'STILL', 'SO', 'GREAT', 'WAS', 'HIS', 'MENTAL', 'AGITATION', 'AND', 'HE', 'RAN', 'TOWARDS', 'HOLMES', 'WITH', 'TWO', 'EAGER', 'HANDS', 'OUTSTRETCHED', 'THANK', 'HEAVEN', 'THAT', 'YOU', 'HAVE', 'COME'] +1580-141084-0024-2027: hyp=['HE', 'COULD', 'HARDLY', 'STAND', 'STILL', 'SO', 'GREAT', 'WAS', 'HIS', 'MENTAL', 'AGITATION', 'AND', 'HE', 'RAN', 'TOWARDS', 'HOMES', 'WITH', 'TWO', 'EAGER', 'HANDS', 'OUTSTRETCHED', 'THANK', 'HEAVEN', 'THAT', 'YOU', 'HAVE', 'COME'] +1580-141084-0025-2028: ref=['YOU', 'KNOW', 'HIM', 'I', 'THINK', 'SO'] +1580-141084-0025-2028: hyp=['YOU', 'KNOW', 'HIM', 'I', 'THINK', 'SO'] +1580-141084-0026-2029: ref=['IF', 'THIS', 'MATTER', 'IS', 'NOT', 'TO', 'BECOME', 'PUBLIC', 'WE', 'MUST', 'GIVE', 'OURSELVES', 'CERTAIN', 'POWERS', 'AND', 'RESOLVE', 'OURSELVES', 'INTO', 'A', 'SMALL', 'PRIVATE', 'COURT', 'MARTIAL'] +1580-141084-0026-2029: hyp=['IF', 'THIS', 'MATTER', 'IS', 'NOT', 'TO', 'BECOME', 'PUBLIC', 'WE', 'MUST', 'GIVE', 'OURSELVES', 'CERTAIN', 'POWERS', 'AND', 'RESOLVE', 'OURSELVES', 'INTO', 'A', 'SMALL', 'PRIVATE', 'COURT', 'MARTIAL'] +1580-141084-0027-2030: ref=['NO', 'SIR', 'CERTAINLY', 'NOT'] +1580-141084-0027-2030: hyp=['NO', 'SIR', 'CERTAINLY', 'NOT'] +1580-141084-0028-2031: ref=['THERE', 'WAS', 'NO', 'MAN', 'SIR'] +1580-141084-0028-2031: hyp=['THERE', 'WAS', 'NO', 'MAN', 'SIR'] +1580-141084-0029-2032: ref=['HIS', 'TROUBLED', 'BLUE', 'EYES', 'GLANCED', 'AT', 'EACH', 'OF', 'US', 'AND', 'FINALLY', 'RESTED', 'WITH', 'AN', 'EXPRESSION', 'OF', 'BLANK', 'DISMAY', 'UPON', 'BANNISTER', 'IN', 'THE', 'FARTHER', 'CORNER'] +1580-141084-0029-2032: hyp=['HIS', 'TROUBLED', 'BLUE', 'EYES', 'GLANCED', 'AT', 'EACH', 'OF', 'US', 'AND', 'FINALLY', 'RESTED', 'WITH', 'AN', 'EXPRESSION', 'OF', 'BLANK', 'DISMAY', 'UPON', 'BANISTER', 'IN', 'THE', 'FARTHER', 'CORNER'] +1580-141084-0030-2033: ref=['JUST', 'CLOSE', 'THE', 'DOOR', 'SAID', 'HOLMES'] +1580-141084-0030-2033: hyp=['JUST', 'CLOSE', 'THE', 'DOOR', 'SAID', 'HOLMES'] +1580-141084-0031-2034: ref=['WE', 'WANT', 'TO', 'KNOW', 'MISTER', 'GILCHRIST', 'HOW', 'YOU', 'AN', 'HONOURABLE', 'MAN', 'EVER', 'CAME', 'TO', 'COMMIT', 'SUCH', 'AN', 'ACTION', 'AS', 'THAT', 'OF', 'YESTERDAY'] +1580-141084-0031-2034: hyp=['WE', 'WANT', 'TO', 'KNOW', 'MISTER', 'GOCRIST', 'HOW', 'YOU', 'AN', 'HONOURABLE', 'MAN', 'EVER', 'CAME', 'TO', 'COMMIT', 'SUCH', 'AN', 'ACTION', 'AS', 'THAT', 'OF', 'YESTERDAY'] +1580-141084-0032-2035: ref=['FOR', 'A', 'MOMENT', 'GILCHRIST', 'WITH', 'UPRAISED', 'HAND', 'TRIED', 'TO', 'CONTROL', 'HIS', 'WRITHING', 'FEATURES'] +1580-141084-0032-2035: hyp=['FOR', 'A', 'MOMENT', 'GILCRIS', 'WITH', 'UPRAISED', 'HAND', 'TRIED', 'TO', 'CONTROL', 'HIS', 'WRITHING', 'FEATURES'] +1580-141084-0033-2036: ref=['COME', 'COME', 'SAID', 'HOLMES', 'KINDLY', 'IT', 'IS', 'HUMAN', 'TO', 'ERR', 'AND', 'AT', 'LEAST', 'NO', 'ONE', 'CAN', 'ACCUSE', 'YOU', 'OF', 'BEING', 'A', 'CALLOUS', 'CRIMINAL'] +1580-141084-0033-2036: hyp=['COME', 'COME', 'SAID', 'HOLMES', 'KINDLY', 'IT', 'IS', 'HUMAN', 'TO', 'ERR', 'AND', 'AT', 'LEAST', 'NO', 'ONE', 'CAN', 'ACCUSE', 'YOU', 'OF', 'BEING', 'A', 'CALLOUS', 'CRIMINAL'] +1580-141084-0034-2037: ref=['WELL', 'WELL', "DON'T", 'TROUBLE', 'TO', 'ANSWER', 'LISTEN', 'AND', 'SEE', 'THAT', 'I', 'DO', 'YOU', 'NO', 'INJUSTICE'] +1580-141084-0034-2037: hyp=['WELL', 'WELL', "DON'T", 'TROUBLE', 'TO', 'ANSWER', 'LISTEN', 'AND', 'SEE', 'THAT', 'I', 'DO', 'YOU', 'KNOW', 'INJUSTICE'] +1580-141084-0035-2038: ref=['HE', 'COULD', 'EXAMINE', 'THE', 'PAPERS', 'IN', 'HIS', 'OWN', 'OFFICE'] +1580-141084-0035-2038: hyp=['HE', 'COULD', 'EXAMINE', 'THE', 'PAPERS', 'IN', 'HIS', 'OWN', 'OFFICE'] +1580-141084-0036-2039: ref=['THE', 'INDIAN', 'I', 'ALSO', 'THOUGHT', 'NOTHING', 'OF'] +1580-141084-0036-2039: hyp=['THE', 'INDIAN', 'I', 'ALSO', 'THOUGHT', 'NOTHING', 'OF'] +1580-141084-0037-2040: ref=['WHEN', 'I', 'APPROACHED', 'YOUR', 'ROOM', 'I', 'EXAMINED', 'THE', 'WINDOW'] +1580-141084-0037-2040: hyp=['WHEN', 'I', 'APPROACHED', 'YOUR', 'ROOM', 'I', 'EXAMINED', 'THE', 'WINDOW'] +1580-141084-0038-2041: ref=['NO', 'ONE', 'LESS', 'THAN', 'THAT', 'WOULD', 'HAVE', 'A', 'CHANCE'] +1580-141084-0038-2041: hyp=['NO', 'ONE', 'LESS', 'THAN', 'THAT', 'WOULD', 'HAVE', 'A', 'CHANCE'] +1580-141084-0039-2042: ref=['I', 'ENTERED', 'AND', 'I', 'TOOK', 'YOU', 'INTO', 'MY', 'CONFIDENCE', 'AS', 'TO', 'THE', 'SUGGESTIONS', 'OF', 'THE', 'SIDE', 'TABLE'] +1580-141084-0039-2042: hyp=['I', 'ENTERED', 'AND', 'I', 'TOOK', 'YOU', 'INTO', 'MY', 'CONFIDENCE', 'AS', 'TO', 'THE', 'SUGGESTIONS', 'OF', 'THE', 'SIDE', 'TABLE'] +1580-141084-0040-2043: ref=['HE', 'RETURNED', 'CARRYING', 'HIS', 'JUMPING', 'SHOES', 'WHICH', 'ARE', 'PROVIDED', 'AS', 'YOU', 'ARE', 'AWARE', 'WITH', 'SEVERAL', 'SHARP', 'SPIKES'] +1580-141084-0040-2043: hyp=['HE', 'RETURNED', 'CARRYING', 'HIS', 'JUMPING', 'SHOES', 'WHICH', 'ARE', 'PROVIDED', 'AS', 'YOU', 'ARE', 'WEAR', 'WITH', 'SEVERAL', 'SHARP', 'SPIKES'] +1580-141084-0041-2044: ref=['NO', 'HARM', 'WOULD', 'HAVE', 'BEEN', 'DONE', 'HAD', 'IT', 'NOT', 'BEEN', 'THAT', 'AS', 'HE', 'PASSED', 'YOUR', 'DOOR', 'HE', 'PERCEIVED', 'THE', 'KEY', 'WHICH', 'HAD', 'BEEN', 'LEFT', 'BY', 'THE', 'CARELESSNESS', 'OF', 'YOUR', 'SERVANT'] +1580-141084-0041-2044: hyp=['NO', 'HARM', 'WOULD', 'HAVE', 'BEEN', 'DONE', 'HAD', 'IT', 'NOT', 'BEEN', 'THAT', 'AS', 'HE', 'PASSED', 'YOUR', 'DOOR', 'HE', 'PERCEIVED', 'THE', 'KEY', 'WHICH', 'HAD', 'BEEN', 'LEFT', 'BY', 'THE', 'CARELESSNESS', 'OF', 'YOUR', 'SERVANT'] +1580-141084-0042-2045: ref=['A', 'SUDDEN', 'IMPULSE', 'CAME', 'OVER', 'HIM', 'TO', 'ENTER', 'AND', 'SEE', 'IF', 'THEY', 'WERE', 'INDEED', 'THE', 'PROOFS'] +1580-141084-0042-2045: hyp=['A', 'SUDDEN', 'IMPULSE', 'CAME', 'OVER', 'HIM', 'TO', 'ENTER', 'AND', 'SEE', 'IF', 'THEY', 'WERE', 'INDEED', 'THE', 'PROOFS'] +1580-141084-0043-2046: ref=['HE', 'PUT', 'HIS', 'SHOES', 'ON', 'THE', 'TABLE'] +1580-141084-0043-2046: hyp=['HE', 'PUT', 'HIS', 'SHOES', 'ON', 'THE', 'TABLE'] +1580-141084-0044-2047: ref=['GLOVES', 'SAID', 'THE', 'YOUNG', 'MAN'] +1580-141084-0044-2047: hyp=['GLOVES', 'SAID', 'THE', 'YOUNG', 'MAN'] +1580-141084-0045-2048: ref=['SUDDENLY', 'HE', 'HEARD', 'HIM', 'AT', 'THE', 'VERY', 'DOOR', 'THERE', 'WAS', 'NO', 'POSSIBLE', 'ESCAPE'] +1580-141084-0045-2048: hyp=['SUDDENLY', 'HE', 'HEARD', 'HIM', 'AT', 'THE', 'VERY', 'DOOR', 'THERE', 'WAS', 'NO', 'POSSIBLE', 'ESCAPE'] +1580-141084-0046-2049: ref=['HAVE', 'I', 'TOLD', 'THE', 'TRUTH', 'MISTER', 'GILCHRIST'] +1580-141084-0046-2049: hyp=['HAVE', 'I', 'TOLD', 'THE', 'TRUTH', 'MISTER', 'GORIST'] +1580-141084-0047-2050: ref=['I', 'HAVE', 'A', 'LETTER', 'HERE', 'MISTER', 'SOAMES', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'EARLY', 'THIS', 'MORNING', 'IN', 'THE', 'MIDDLE', 'OF', 'A', 'RESTLESS', 'NIGHT'] +1580-141084-0047-2050: hyp=['I', 'HAVE', 'A', 'LETTER', 'HERE', 'MISTER', 'SOLMES', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'EARLY', 'THIS', 'MORNING', 'IN', 'THE', 'MIDDLE', 'OF', 'A', 'RESTLESS', 'NIGHT'] +1580-141084-0048-2051: ref=['IT', 'WILL', 'BE', 'CLEAR', 'TO', 'YOU', 'FROM', 'WHAT', 'I', 'HAVE', 'SAID', 'THAT', 'ONLY', 'YOU', 'COULD', 'HAVE', 'LET', 'THIS', 'YOUNG', 'MAN', 'OUT', 'SINCE', 'YOU', 'WERE', 'LEFT', 'IN', 'THE', 'ROOM', 'AND', 'MUST', 'HAVE', 'LOCKED', 'THE', 'DOOR', 'WHEN', 'YOU', 'WENT', 'OUT'] +1580-141084-0048-2051: hyp=['IT', 'WOULD', 'BE', 'CLEAR', 'TO', 'YOU', 'FROM', 'WHAT', 'I', 'HAVE', 'SAID', 'THAT', 'ONLY', 'YOU', 'COULD', 'HAVE', 'LET', 'THIS', 'YOUNG', 'MAN', 'OUT', 'SINCE', 'YOU', 'WERE', 'LEFT', 'IN', 'THE', 'ROOM', 'AND', 'MUST', 'HAVE', 'LOCKED', 'THE', 'DOOR', 'WHEN', 'YOU', 'WENT', 'OUT'] +1580-141084-0049-2052: ref=['IT', 'WAS', 'SIMPLE', 'ENOUGH', 'SIR', 'IF', 'YOU', 'ONLY', 'HAD', 'KNOWN', 'BUT', 'WITH', 'ALL', 'YOUR', 'CLEVERNESS', 'IT', 'WAS', 'IMPOSSIBLE', 'THAT', 'YOU', 'COULD', 'KNOW'] +1580-141084-0049-2052: hyp=['IT', 'WAS', 'SIMPLE', 'ENOUGH', 'SIR', 'IF', 'YOU', 'ONLY', 'HAD', 'KNOWN', 'BUT', 'WITH', 'ALL', 'YOUR', 'CLEVERNESS', 'IT', 'WAS', 'IMPOSSIBLE', 'THAT', 'YOU', 'COULD', 'KNOW'] +1580-141084-0050-2053: ref=['IF', 'MISTER', 'SOAMES', 'SAW', 'THEM', 'THE', 'GAME', 'WAS', 'UP'] +1580-141084-0050-2053: hyp=['IF', 'MISTER', 'SOLMES', 'SAW', 'THEM', 'THE', 'GAME', 'WAS', 'UP'] +1995-1826-0000-750: ref=['IN', 'THE', 'DEBATE', 'BETWEEN', 'THE', 'SENIOR', 'SOCIETIES', 'HER', 'DEFENCE', 'OF', 'THE', 'FIFTEENTH', 'AMENDMENT', 'HAD', 'BEEN', 'NOT', 'ONLY', 'A', 'NOTABLE', 'BIT', 'OF', 'REASONING', 'BUT', 'DELIVERED', 'WITH', 'REAL', 'ENTHUSIASM'] +1995-1826-0000-750: hyp=['IN', 'THE', 'DEBATE', 'BETWEEN', 'THE', 'SENIOR', 'SOCIETIES', 'HER', 'DEFENCE', 'OF', 'THE', 'FIFTEENTH', 'AMENDMENT', 'HAD', 'BEEN', 'NOT', 'ONLY', 'A', 'NOTABLE', 'BIT', 'OF', 'REASONING', 'BUT', 'DELIVERED', 'WITH', 'REAL', 'ENTHUSIASM'] +1995-1826-0001-751: ref=['THE', 'SOUTH', 'SHE', 'HAD', 'NOT', 'THOUGHT', 'OF', 'SERIOUSLY', 'AND', 'YET', 'KNOWING', 'OF', 'ITS', 'DELIGHTFUL', 'HOSPITALITY', 'AND', 'MILD', 'CLIMATE', 'SHE', 'WAS', 'NOT', 'AVERSE', 'TO', 'CHARLESTON', 'OR', 'NEW', 'ORLEANS'] +1995-1826-0001-751: hyp=['THE', 'SOUTH', 'SHE', 'HAD', 'NOT', 'THOUGHT', 'OF', 'SERIOUSLY', 'AND', 'YET', 'KNOWING', 'OF', 'ITS', 'DELIGHTFUL', 'HOSPITALITY', 'AND', 'MILD', 'CLIMATE', 'SHE', 'WAS', 'NOT', 'AVERSE', 'TO', 'CHARLESTON', 'OR', 'NEW', 'ORLEANS'] +1995-1826-0002-752: ref=['JOHN', 'TAYLOR', 'WHO', 'HAD', 'SUPPORTED', 'HER', 'THROUGH', 'COLLEGE', 'WAS', 'INTERESTED', 'IN', 'COTTON'] +1995-1826-0002-752: hyp=['JOHN', 'TAYLOR', 'WHO', 'HAD', 'SUPPORTED', 'HER', 'THROUGH', 'COLLEGE', 'WAS', 'INTERESTED', 'IN', 'COTTON'] +1995-1826-0003-753: ref=['BETTER', 'GO', 'HE', 'HAD', 'COUNSELLED', 'SENTENTIOUSLY'] +1995-1826-0003-753: hyp=['BETTER', 'GO', 'HE', 'HAD', 'COUNSELS', 'SENTENTIOUSLY'] +1995-1826-0004-754: ref=['MIGHT', 'LEARN', 'SOMETHING', 'USEFUL', 'DOWN', 'THERE'] +1995-1826-0004-754: hyp=['MIGHT', 'LEARN', 'SOMETHING', 'USEFUL', 'DOWN', 'THERE'] +1995-1826-0005-755: ref=['BUT', 'JOHN', "THERE'S", 'NO', 'SOCIETY', 'JUST', 'ELEMENTARY', 'WORK'] +1995-1826-0005-755: hyp=['BUT', 'JOHN', "THERE'S", 'NO', 'SOCIETY', 'JUST', 'ELEMENTARY', 'WORK'] +1995-1826-0006-756: ref=['BEEN', 'LOOKING', 'UP', 'TOOMS', 'COUNTY'] +1995-1826-0006-756: hyp=['BEEN', 'LOOKING', 'UP', "TOMB'S", 'COUNTY'] +1995-1826-0007-757: ref=['FIND', 'SOME', 'CRESSWELLS', 'THERE', 'BIG', 'PLANTATIONS', 'RATED', 'AT', 'TWO', 'HUNDRED', 'AND', 'FIFTY', 'THOUSAND', 'DOLLARS'] +1995-1826-0007-757: hyp=['FIVE', 'CROSS', 'WHIRLS', 'THERE', 'BIG', 'PLANTATIONS', 'RATED', 'AT', 'TWO', 'HUNDRED', 'AND', 'FIFTY', 'THOUSAND', 'DOLLARS'] +1995-1826-0008-758: ref=['SOME', 'OTHERS', 'TOO', 'BIG', 'COTTON', 'COUNTY'] +1995-1826-0008-758: hyp=['SOME', 'OTHERS', 'TOO', 'BIG', 'COTTON', 'COUNTY'] +1995-1826-0009-759: ref=['YOU', 'OUGHT', 'TO', 'KNOW', 'JOHN', 'IF', 'I', 'TEACH', 'NEGROES', "I'LL", 'SCARCELY', 'SEE', 'MUCH', 'OF', 'PEOPLE', 'IN', 'MY', 'OWN', 'CLASS'] +1995-1826-0009-759: hyp=['YOU', 'OUGHT', 'TO', 'KNOW', 'JOHN', 'IF', 'I', 'TEACH', 'NEGROES', "I'LL", 'SCARCELY', 'SEE', 'MUCH', 'OF', 'PEOPLE', 'IN', 'MY', 'OWN', 'CLASS'] +1995-1826-0010-760: ref=['AT', 'ANY', 'RATE', 'I', 'SAY', 'GO'] +1995-1826-0010-760: hyp=['AT', 'ANY', 'RATE', 'I', 'SAY', 'GO'] +1995-1826-0011-761: ref=['HERE', 'SHE', 'WAS', 'TEACHING', 'DIRTY', 'CHILDREN', 'AND', 'THE', 'SMELL', 'OF', 'CONFUSED', 'ODORS', 'AND', 'BODILY', 'PERSPIRATION', 'WAS', 'TO', 'HER', 'AT', 'TIMES', 'UNBEARABLE'] +1995-1826-0011-761: hyp=['HERE', 'SHE', 'WAS', 'TEACHING', 'DIRTY', 'CHILDREN', 'AND', 'THE', 'SMELL', 'OF', 'CONFUSED', 'ODORS', 'AND', 'BODILY', 'PERSPIRATION', 'WAS', 'TO', 'HER', 'AT', 'TIMES', 'UNBEARABLE'] +1995-1826-0012-762: ref=['SHE', 'WANTED', 'A', 'GLANCE', 'OF', 'THE', 'NEW', 'BOOKS', 'AND', 'PERIODICALS', 'AND', 'TALK', 'OF', 'GREAT', 'PHILANTHROPIES', 'AND', 'REFORMS'] +1995-1826-0012-762: hyp=['SHE', 'WANTED', 'A', 'GLANCE', 'OF', 'THE', 'NEW', 'BOOKS', 'IN', 'PERIODICALS', 'AND', 'TALK', 'OF', 'GRATE', 'PHILANTHROPIES', 'AND', 'REFORMS'] +1995-1826-0013-763: ref=['SO', 'FOR', 'THE', 'HUNDREDTH', 'TIME', 'SHE', 'WAS', 'THINKING', 'TODAY', 'AS', 'SHE', 'WALKED', 'ALONE', 'UP', 'THE', 'LANE', 'BACK', 'OF', 'THE', 'BARN', 'AND', 'THEN', 'SLOWLY', 'DOWN', 'THROUGH', 'THE', 'BOTTOMS'] +1995-1826-0013-763: hyp=['SO', 'FOR', 'THE', 'HUNDREDTH', 'TIME', 'SHE', 'WAS', 'THINKING', 'TO', 'DAY', 'AS', 'SHE', 'WALKED', 'ALONE', 'UP', 'THE', 'LANE', 'BACK', 'OF', 'THE', 'BARN', 'AND', 'THEN', 'SLOWLY', 'DOWN', 'THROUGH', 'THE', 'BOTTOMS'] +1995-1826-0014-764: ref=['COTTON', 'SHE', 'PAUSED'] +1995-1826-0014-764: hyp=['COTTON', 'SHE', 'PAUSED'] +1995-1826-0015-765: ref=['SHE', 'HAD', 'ALMOST', 'FORGOTTEN', 'THAT', 'IT', 'WAS', 'HERE', 'WITHIN', 'TOUCH', 'AND', 'SIGHT'] +1995-1826-0015-765: hyp=['SHE', 'HAD', 'ALMOST', 'FORGOTTEN', 'THAT', 'IT', 'WAS', 'HERE', 'WITHIN', 'TOUCH', 'IN', 'SIGHT'] +1995-1826-0016-766: ref=['THE', 'GLIMMERING', 'SEA', 'OF', 'DELICATE', 'LEAVES', 'WHISPERED', 'AND', 'MURMURED', 'BEFORE', 'HER', 'STRETCHING', 'AWAY', 'TO', 'THE', 'NORTHWARD'] +1995-1826-0016-766: hyp=['THE', 'GLIMMERING', 'SEA', 'OF', 'DELICATE', 'LEAVES', 'WHISPERED', 'AND', 'MURMURED', 'BEFORE', 'HER', 'STRETCHING', 'AWAY', 'TO', 'THE', 'NORTHWARD'] +1995-1826-0017-767: ref=['THERE', 'MIGHT', 'BE', 'A', 'BIT', 'OF', 'POETRY', 'HERE', 'AND', 'THERE', 'BUT', 'MOST', 'OF', 'THIS', 'PLACE', 'WAS', 'SUCH', 'DESPERATE', 'PROSE'] +1995-1826-0017-767: hyp=['THERE', 'MIGHT', 'BE', 'A', 'BIT', 'OF', 'POETRY', 'HERE', 'AND', 'THERE', 'BUT', 'MOST', 'OF', 'THIS', 'PLACE', 'WAS', 'SUCH', 'DESPERATE', 'PROSE'] +1995-1826-0018-768: ref=['HER', 'REGARD', 'SHIFTED', 'TO', 'THE', 'GREEN', 'STALKS', 'AND', 'LEAVES', 'AGAIN', 'AND', 'SHE', 'STARTED', 'TO', 'MOVE', 'AWAY'] +1995-1826-0018-768: hyp=['HER', 'REGARD', 'SHIFTED', 'TO', 'THE', 'GREEN', 'STALKS', 'AND', 'LEAVES', 'AGAIN', 'AND', 'SHE', 'STARTED', 'TO', 'MOVE', 'AWAY'] +1995-1826-0019-769: ref=['COTTON', 'IS', 'A', 'WONDERFUL', 'THING', 'IS', 'IT', 'NOT', 'BOYS', 'SHE', 'SAID', 'RATHER', 'PRIMLY'] +1995-1826-0019-769: hyp=['COTTON', 'IS', 'A', 'WONDERFUL', 'THING', 'IS', 'IT', 'NOT', 'BOYS', 'SHE', 'SAID', 'RATHER', 'PRIMLY'] +1995-1826-0020-770: ref=['MISS', 'TAYLOR', 'DID', 'NOT', 'KNOW', 'MUCH', 'ABOUT', 'COTTON', 'BUT', 'AT', 'LEAST', 'ONE', 'MORE', 'REMARK', 'SEEMED', 'CALLED', 'FOR'] +1995-1826-0020-770: hyp=['MISS', 'TAYLOR', 'DID', 'NOT', 'KNOW', 'MUCH', 'ABOUT', 'COTTON', 'BUT', 'AT', 'LEAST', 'ONE', 'MORE', 'REMARKED', 'SEEMED', 'CALLED', 'FOR'] +1995-1826-0021-771: ref=["DON'T", 'KNOW', 'WELL', 'OF', 'ALL', 'THINGS', 'INWARDLY', 'COMMENTED', 'MISS', 'TAYLOR', 'LITERALLY', 'BORN', 'IN', 'COTTON', 'AND', 'OH', 'WELL', 'AS', 'MUCH', 'AS', 'TO', 'ASK', "WHAT'S", 'THE', 'USE', 'SHE', 'TURNED', 'AGAIN', 'TO', 'GO'] +1995-1826-0021-771: hyp=["DON'T", 'KNOW', 'WELL', 'OF', 'ALL', 'THINGS', 'INWARDLY', 'COMMENTED', 'MISS', 'TAYLOR', 'THAT', 'A', 'BORN', 'IN', 'COTTON', 'AND', 'OH', 'WELL', 'AS', 'MUCH', 'AS', 'TO', 'ASK', "WHAT'S", 'THE', 'USE', 'SHE', 'TURNED', 'AGAIN', 'TO', 'GO'] +1995-1826-0022-772: ref=['I', 'SUPPOSE', 'THOUGH', "IT'S", 'TOO', 'EARLY', 'FOR', 'THEM', 'THEN', 'CAME', 'THE', 'EXPLOSION'] +1995-1826-0022-772: hyp=['I', 'SUPPOSE', 'THOUGH', "IT'S", 'TOO', 'EARLY', 'FOR', 'THEM', 'THEN', 'CAME', 'THE', 'EXPLOSION'] +1995-1826-0023-773: ref=['GOOBERS', "DON'T", 'GROW', 'ON', 'THE', 'TOPS', 'OF', 'VINES', 'BUT', 'UNDERGROUND', 'ON', 'THE', 'ROOTS', 'LIKE', 'YAMS', 'IS', 'THAT', 'SO'] +1995-1826-0023-773: hyp=['GOULD', 'WAS', "DON'T", 'GROW', 'ON', 'THE', 'TOPS', 'EVENS', 'BUT', 'UNDER', 'GROUND', 'ON', 'THE', 'WOODS', 'LIKE', 'YAMS', 'IS', 'THAT', 'SO'] +1995-1826-0024-774: ref=['THE', 'GOLDEN', 'FLEECE', "IT'S", 'THE', 'SILVER', 'FLEECE', 'HE', 'HARKENED'] +1995-1826-0024-774: hyp=['THE', 'GOLDEN', 'FLEECE', "IT'S", 'THE', 'SILVER', 'FLEECE', 'HE', 'HEARKENED'] +1995-1826-0025-775: ref=['SOME', 'TIME', "YOU'LL", 'TELL', 'ME', 'PLEASE', "WON'T", 'YOU'] +1995-1826-0025-775: hyp=['SOMETIME', 'YOU', 'DAMNLY', 'PLEASE', "WON'T", 'YOU'] +1995-1826-0026-776: ref=['NOW', 'FOR', 'ONE', 'LITTLE', 'HALF', 'HOUR', 'SHE', 'HAD', 'BEEN', 'A', 'WOMAN', 'TALKING', 'TO', 'A', 'BOY', 'NO', 'NOT', 'EVEN', 'THAT', 'SHE', 'HAD', 'BEEN', 'TALKING', 'JUST', 'TALKING', 'THERE', 'WERE', 'NO', 'PERSONS', 'IN', 'THE', 'CONVERSATION', 'JUST', 'THINGS', 'ONE', 'THING', 'COTTON'] +1995-1826-0026-776: hyp=['THOU', 'FOR', 'ONE', 'LITTLE', 'HALF', 'HOUR', 'SHE', 'HAD', 'BEEN', 'A', 'WOMAN', 'TALKING', 'TO', 'A', 'BOY', 'NO', 'NOT', 'EVEN', 'THAT', 'SHE', 'HAD', 'BEEN', 'TALKING', 'JUST', 'TALKING', 'THERE', 'WERE', 'NO', 'PERSONS', 'IN', 'THE', 'CONVERSATION', 'JUST', 'THINGS', 'ONE', 'THING', 'COTTON'] +1995-1836-0000-735: ref=['THE', 'HON', 'CHARLES', 'SMITH', 'MISS', "SARAH'S", 'BROTHER', 'WAS', 'WALKING', 'SWIFTLY', 'UPTOWN', 'FROM', 'MISTER', "EASTERLY'S", 'WALL', 'STREET', 'OFFICE', 'AND', 'HIS', 'FACE', 'WAS', 'PALE'] +1995-1836-0000-735: hyp=['THE', 'HONOURABLE', 'CHARLES', 'SMITH', 'MISS', "SARAH'S", 'BROTHER', 'WAS', 'WALKING', 'SWIFTLY', 'UPTOWN', 'FROM', 'MISTER', "EASTERLY'S", 'WALL', 'STREET', 'OFFICE', 'AND', 'HIS', 'FACE', 'WAS', 'PALE'] +1995-1836-0001-736: ref=['AT', 'LAST', 'THE', 'COTTON', 'COMBINE', 'WAS', 'TO', 'ALL', 'APPEARANCES', 'AN', 'ASSURED', 'FACT', 'AND', 'HE', 'WAS', 'SLATED', 'FOR', 'THE', 'SENATE'] +1995-1836-0001-736: hyp=['AT', 'LAST', 'THE', 'COTTON', 'COMBINE', 'WAS', 'TO', 'ALL', 'APPEARANCES', 'AND', 'ASSURED', 'FACT', 'AND', 'HE', 'WAS', 'SLATED', 'FOR', 'THE', 'SENATE'] +1995-1836-0002-737: ref=['WHY', 'SHOULD', 'HE', 'NOT', 'BE', 'AS', 'OTHER', 'MEN'] +1995-1836-0002-737: hyp=['WHY', 'SHOULD', 'HE', 'NOT', 'BE', 'AS', 'OTHER', 'MEN'] +1995-1836-0003-738: ref=['SHE', 'WAS', 'NOT', 'HERSELF', 'A', 'NOTABLY', 'INTELLIGENT', 'WOMAN', 'SHE', 'GREATLY', 'ADMIRED', 'INTELLIGENCE', 'OR', 'WHATEVER', 'LOOKED', 'TO', 'HER', 'LIKE', 'INTELLIGENCE', 'IN', 'OTHERS'] +1995-1836-0003-738: hyp=['SHE', 'WAS', 'NOT', 'HERSELF', 'UNNOTABLY', 'INTELLIGENT', 'WOMAN', 'SHE', 'GREATLY', 'ADMIRED', 'INTELLIGENCE', 'OR', 'WHATEVER', 'LOOKED', 'TO', 'HER', 'LIKE', 'INTELLIGENCE', 'IN', 'OTHERS'] +1995-1836-0004-739: ref=['AS', 'SHE', 'AWAITED', 'HER', 'GUESTS', 'SHE', 'SURVEYED', 'THE', 'TABLE', 'WITH', 'BOTH', 'SATISFACTION', 'AND', 'DISQUIETUDE', 'FOR', 'HER', 'SOCIAL', 'FUNCTIONS', 'WERE', 'FEW', 'TONIGHT', 'THERE', 'WERE', 'SHE', 'CHECKED', 'THEM', 'OFF', 'ON', 'HER', 'FINGERS', 'SIR', 'JAMES', 'CREIGHTON', 'THE', 'RICH', 'ENGLISH', 'MANUFACTURER', 'AND', 'LADY', 'CREIGHTON', 'MISTER', 'AND', 'MISSUS', 'VANDERPOOL', 'MISTER', 'HARRY', 'CRESSWELL', 'AND', 'HIS', 'SISTER', 'JOHN', 'TAYLOR', 'AND', 'HIS', 'SISTER', 'AND', 'MISTER', 'CHARLES', 'SMITH', 'WHOM', 'THE', 'EVENING', 'PAPERS', 'MENTIONED', 'AS', 'LIKELY', 'TO', 'BE', 'UNITED', 'STATES', 'SENATOR', 'FROM', 'NEW', 'JERSEY', 'A', 'SELECTION', 'OF', 'GUESTS', 'THAT', 'HAD', 'BEEN', 'DETERMINED', 'UNKNOWN', 'TO', 'THE', 'HOSTESS', 'BY', 'THE', 'MEETING', 'OF', 'COTTON', 'INTERESTS', 'EARLIER', 'IN', 'THE', 'DAY'] +1995-1836-0004-739: hyp=['AS', 'SHE', 'AWAITED', 'HER', 'GUESS', 'SHE', 'SURVEYED', 'THE', 'TABLE', 'WITH', 'BOTH', 'SATISFACTION', 'AND', 'AS', 'QUIETUDE', 'FOR', 'HER', 'SOCIAL', 'FUNCTIONS', 'WERE', 'FEW', 'TO', 'NIGHT', 'THERE', 'WERE', 'SHE', 'CHECKED', 'THEM', 'OFF', 'ON', 'HER', 'FINGERS', 'SIR', 'JAMES', 'CRIGHTON', 'THE', 'RICH', 'ENGLISH', 'MANUFACTURER', 'AND', 'LADY', 'KREITON', 'MISTER', 'AND', 'MISSUS', 'VAN', 'DERBOOLE', 'MISTER', 'HARRY', 'CRESWELL', 'AND', 'HIS', 'SISTER', 'JOHN', 'TAYLOR', 'AND', 'HIS', 'SISTER', 'AND', 'MISTER', 'CHARLES', 'SMITH', 'WHOM', 'THE', 'EVENING', 'PAPERS', 'MENTIONED', 'AS', 'LIKELY', 'TO', 'BE', 'UNITED', 'STATES', 'SENATOR', 'FROM', 'NEW', 'JERSEY', 'A', 'SELECTION', 'OF', 'GUESTS', 'THAT', 'HAD', 'BEEN', 'DETERMINED', 'UNKNOWN', 'TO', 'THE', 'HOSTESS', 'BY', 'THE', 'MEETING', 'OF', 'COTTON', 'INTERESTS', 'EARLIER', 'IN', 'THE', 'DAY'] +1995-1836-0005-740: ref=['MISSUS', 'GREY', 'HAD', 'MET', 'SOUTHERNERS', 'BEFORE', 'BUT', 'NOT', 'INTIMATELY', 'AND', 'SHE', 'ALWAYS', 'HAD', 'IN', 'MIND', 'VIVIDLY', 'THEIR', 'CRUELTY', 'TO', 'POOR', 'NEGROES', 'A', 'SUBJECT', 'SHE', 'MADE', 'A', 'POINT', 'OF', 'INTRODUCING', 'FORTHWITH'] +1995-1836-0005-740: hyp=['MISSUS', 'GRAY', 'HAD', 'MET', 'SOUTHERNERS', 'BEFORE', 'BUT', 'NOT', 'INTIMATELY', 'AND', 'SHE', 'ALWAYS', 'HAD', 'IN', 'MIND', 'VIVIDLY', 'THEIR', 'CRUELTY', 'TO', 'POOR', 'NEGROES', 'A', 'SUBJECT', 'SHE', 'MADE', 'A', 'POINT', 'OF', 'INTRODUCING', 'FORTHWITH'] +1995-1836-0006-741: ref=['SHE', 'WAS', 'THEREFORE', 'MOST', 'AGREEABLY', 'SURPRISED', 'TO', 'HEAR', 'MISTER', 'CRESSWELL', 'EXPRESS', 'HIMSELF', 'SO', 'CORDIALLY', 'AS', 'APPROVING', 'OF', 'NEGRO', 'EDUCATION'] +1995-1836-0006-741: hyp=['SHE', 'WAS', 'THEREFORE', 'MOST', 'AGREEABLY', 'SURPRISED', 'TO', 'HEAR', 'MISTER', 'CRESWELL', 'EXPRESS', 'HIMSELF', 'SO', 'CORDIALLY', 'AS', 'APPROVING', 'OF', 'NEGRO', 'EDUCATION'] +1995-1836-0007-742: ref=['BUT', 'YOU', 'BELIEVE', 'IN', 'SOME', 'EDUCATION', 'ASKED', 'MARY', 'TAYLOR'] +1995-1836-0007-742: hyp=['DO', 'BELIEVE', 'IN', 'SOME', 'EDUCATION', 'ASKED', 'MARY', 'TAYLOR'] +1995-1836-0008-743: ref=['I', 'BELIEVE', 'IN', 'THE', 'TRAINING', 'OF', 'PEOPLE', 'TO', 'THEIR', 'HIGHEST', 'CAPACITY', 'THE', 'ENGLISHMAN', 'HERE', 'HEARTILY', 'SECONDED', 'HIM'] +1995-1836-0008-743: hyp=['I', 'BELIEVE', 'IN', 'THE', 'TRAINING', 'OF', 'PEOPLE', 'TO', 'THE', 'HOUSE', 'CAPACITY', 'THE', 'ENGLISHMAN', 'HERE', 'HEARTILY', 'SECONDED', 'HIM'] +1995-1836-0009-744: ref=['BUT', 'CRESSWELL', 'ADDED', 'SIGNIFICANTLY', 'CAPACITY', 'DIFFERS', 'ENORMOUSLY', 'BETWEEN', 'RACES'] +1995-1836-0009-744: hyp=['BUT', 'CRESWELL', 'ADDED', 'SIGNIFICANTLY', 'CAPACITY', 'DIFFERS', 'ENORMOUSLY', 'BETWEEN', 'RACES'] +1995-1836-0010-745: ref=['THE', 'VANDERPOOLS', 'WERE', 'SURE', 'OF', 'THIS', 'AND', 'THE', 'ENGLISHMAN', 'INSTANCING', 'INDIA', 'BECAME', 'QUITE', 'ELOQUENT', 'MISSUS', 'GREY', 'WAS', 'MYSTIFIED', 'BUT', 'HARDLY', 'DARED', 'ADMIT', 'IT', 'THE', 'GENERAL', 'TREND', 'OF', 'THE', 'CONVERSATION', 'SEEMED', 'TO', 'BE', 'THAT', 'MOST', 'INDIVIDUALS', 'NEEDED', 'TO', 'BE', 'SUBMITTED', 'TO', 'THE', 'SHARPEST', 'SCRUTINY', 'BEFORE', 'BEING', 'ALLOWED', 'MUCH', 'EDUCATION', 'AND', 'AS', 'FOR', 'THE', 'LOWER', 'RACES', 'IT', 'WAS', 'SIMPLY', 'CRIMINAL', 'TO', 'OPEN', 'SUCH', 'USELESS', 'OPPORTUNITIES', 'TO', 'THEM'] +1995-1836-0010-745: hyp=['THE', 'VANDER', 'POOLS', 'WERE', 'SURE', 'THIS', 'AND', 'THE', 'ENGLISHMAN', 'INSTANCING', 'INDIA', 'BECAME', 'QUITE', 'ELOQUENT', 'MISSUS', 'GRAY', 'WAS', 'MYSTIFIED', 'BUT', 'HARDLY', 'DARED', 'ADMIT', 'IT', 'THE', 'GENERAL', 'TREND', 'OF', 'THE', 'CONVERSATION', 'SEEMED', 'TO', 'BE', 'THAT', 'MOST', 'INDIVIDUALS', 'NEEDED', 'TO', 'BE', 'SUBMITTED', 'TO', 'THE', 'SHARPEST', 'SCRUTINY', 'BEFORE', 'BEING', 'ALLOWED', 'MUCH', 'EDUCATION', 'AND', 'AS', 'FOR', 'THE', 'LOWER', 'RACES', 'IT', 'WAS', 'SIMPLY', 'CRIMINAL', 'TO', 'OPEN', 'SUCH', 'USELESS', 'OPPORTUNITIES', 'TO', 'THEM'] +1995-1836-0011-746: ref=['POSITIVELY', 'HEROIC', 'ADDED', 'CRESSWELL', 'AVOIDING', 'HIS', "SISTER'S", 'EYES'] +1995-1836-0011-746: hyp=['WAS', 'ACTIVELY', 'HEROIC', 'ADDED', 'CHRISWELL', 'AVOIDING', 'HIS', "SISTER'S", 'EYES'] +1995-1836-0012-747: ref=['BUT', "WE'RE", 'NOT', 'ER', 'EXACTLY', 'WELCOMED'] +1995-1836-0012-747: hyp=['BUT', 'WE', 'ARE', 'NOT', 'A', 'EXACTLY', 'WELCOME'] +1995-1836-0013-748: ref=['MARY', 'TAYLOR', 'HOWEVER', 'RELATED', 'THE', 'TALE', 'OF', 'ZORA', 'TO', 'MISSUS', "GREY'S", 'PRIVATE', 'EAR', 'LATER'] +1995-1836-0013-748: hyp=['MERRY', 'TAYLOR', 'HOWEVER', 'RELATED', 'THE', 'TALE', 'OF', 'ZORAH', 'TO', 'MISSUS', "GRAY'S", 'PRIVATE', 'EAR', 'LATER'] +1995-1836-0014-749: ref=['FORTUNATELY', 'SAID', 'MISTER', 'VANDERPOOL', 'NORTHERNERS', 'AND', 'SOUTHERNERS', 'ARE', 'ARRIVING', 'AT', 'A', 'BETTER', 'MUTUAL', 'UNDERSTANDING', 'ON', 'MOST', 'OF', 'THESE', 'MATTERS'] +1995-1836-0014-749: hyp=['FORTUNATELY', 'SAID', 'MISTER', 'VAN', 'DERPOOL', 'NORTHERNERS', 'IN', 'SOUTHERNERS', 'ALL', 'RIVING', 'AT', 'A', 'BETTER', 'MUTUAL', 'UNDERSTANDING', 'ON', 'MOST', 'OF', 'THESE', 'MATTERS'] +1995-1837-0000-777: ref=['HE', 'KNEW', 'THE', 'SILVER', 'FLEECE', 'HIS', 'AND', "ZORA'S", 'MUST', 'BE', 'RUINED'] +1995-1837-0000-777: hyp=['HE', 'KNEW', 'THE', 'SILVER', 'FLEECE', 'HIS', 'AND', 'ZORAS', 'MUST', 'BE', 'RUINED'] +1995-1837-0001-778: ref=['IT', 'WAS', 'THE', 'FIRST', 'GREAT', 'SORROW', 'OF', 'HIS', 'LIFE', 'IT', 'WAS', 'NOT', 'SO', 'MUCH', 'THE', 'LOSS', 'OF', 'THE', 'COTTON', 'ITSELF', 'BUT', 'THE', 'FANTASY', 'THE', 'HOPES', 'THE', 'DREAMS', 'BUILT', 'AROUND', 'IT'] +1995-1837-0001-778: hyp=['IT', 'WAS', 'THE', 'FIRST', 'GREAT', 'SORROW', 'OF', 'HIS', 'LIFE', 'IT', 'WAS', 'NOT', 'SO', 'MUCH', 'THE', 'LOSS', 'OF', 'THE', 'CONTIN', 'ITSELF', 'BUT', 'THE', 'FANTASY', 'THE', 'HOPES', 'THE', 'DREAMS', 'BUILT', 'AROUND', 'IT'] +1995-1837-0002-779: ref=['AH', 'THE', 'SWAMP', 'THE', 'CRUEL', 'SWAMP'] +1995-1837-0002-779: hyp=['AH', 'THE', 'SWAMP', 'THE', 'CRUEL', 'SWAMP'] +1995-1837-0003-780: ref=['THE', 'REVELATION', 'OF', 'HIS', 'LOVE', 'LIGHTED', 'AND', 'BRIGHTENED', 'SLOWLY', 'TILL', 'IT', 'FLAMED', 'LIKE', 'A', 'SUNRISE', 'OVER', 'HIM', 'AND', 'LEFT', 'HIM', 'IN', 'BURNING', 'WONDER'] +1995-1837-0003-780: hyp=['WHO', 'REVELATION', 'OF', 'HIS', 'LOVE', 'LIGHTED', 'AND', 'BRIGHTENED', 'SLOWLY', 'TILL', 'IT', 'FLAMED', 'LIKE', 'A', 'SUNRISE', 'OVER', 'HIM', 'AND', 'LEFT', 'HIM', 'IN', 'BURNING', 'WONDER'] +1995-1837-0004-781: ref=['HE', 'PANTED', 'TO', 'KNOW', 'IF', 'SHE', 'TOO', 'KNEW', 'OR', 'KNEW', 'AND', 'CARED', 'NOT', 'OR', 'CARED', 'AND', 'KNEW', 'NOT'] +1995-1837-0004-781: hyp=['HE', 'PANTED', 'TO', 'KNOW', 'IF', 'SHE', 'TOO', 'KNEW', 'OR', 'NEW', 'AND', 'CARED', 'NOT', 'OR', 'CARED', 'AND', 'KNEW', 'NOT'] +1995-1837-0005-782: ref=['SHE', 'WAS', 'SO', 'STRANGE', 'AND', 'HUMAN', 'A', 'CREATURE'] +1995-1837-0005-782: hyp=['SHE', 'WAS', 'SO', 'STRANGE', 'IN', 'HUMAN', 'A', 'CREATURE'] +1995-1837-0006-783: ref=['THE', 'WORLD', 'WAS', 'WATER', 'VEILED', 'IN', 'MISTS'] +1995-1837-0006-783: hyp=['THE', 'WORLD', 'WAS', 'WATER', 'VEILED', 'IN', 'MISTS'] +1995-1837-0007-784: ref=['THEN', 'OF', 'A', 'SUDDEN', 'AT', 'MIDDAY', 'THE', 'SUN', 'SHOT', 'OUT', 'HOT', 'AND', 'STILL', 'NO', 'BREATH', 'OF', 'AIR', 'STIRRED', 'THE', 'SKY', 'WAS', 'LIKE', 'BLUE', 'STEEL', 'THE', 'EARTH', 'STEAMED'] +1995-1837-0007-784: hyp=['THEN', 'OF', 'A', 'SUDDEN', 'AT', 'MIDDAY', 'THE', 'SUN', 'SHOT', 'OUT', 'HOT', 'AND', 'STILL', 'NO', 'BREATH', 'OF', 'AIR', 'STIRRED', 'THE', 'SKY', 'WAS', 'LIKE', 'BLUE', 'STEEL', 'THE', 'EARTH', 'STEAMED'] +1995-1837-0008-785: ref=['WHERE', 'WAS', 'THE', 'USE', 'OF', 'IMAGINING'] +1995-1837-0008-785: hyp=['WHERE', 'WAS', 'THE', 'USE', 'OF', 'IMAGINING'] +1995-1837-0009-786: ref=['THE', 'LAGOON', 'HAD', 'BEEN', 'LEVEL', 'WITH', 'THE', 'DYKES', 'A', 'WEEK', 'AGO', 'AND', 'NOW'] +1995-1837-0009-786: hyp=['THE', 'LAGOON', 'HAD', 'BEEN', 'LEVEL', 'WITH', 'THE', 'DIKES', 'A', 'WEEK', 'AGO', 'AND', 'NOW'] +1995-1837-0010-787: ref=['PERHAPS', 'SHE', 'TOO', 'MIGHT', 'BE', 'THERE', 'WAITING', 'WEEPING'] +1995-1837-0010-787: hyp=['PERHAPS', 'SHE', 'TOO', 'MIGHT', 'BE', 'THERE', 'WAITING', 'WEEPING'] +1995-1837-0011-788: ref=['HE', 'STARTED', 'AT', 'THE', 'THOUGHT', 'HE', 'HURRIED', 'FORTH', 'SADLY'] +1995-1837-0011-788: hyp=['HE', 'STARTED', 'AT', 'THE', 'THOUGHT', 'HE', 'HURRIED', 'FORTH', 'SADLY'] +1995-1837-0012-789: ref=['HE', 'SPLASHED', 'AND', 'STAMPED', 'ALONG', 'FARTHER', 'AND', 'FARTHER', 'ONWARD', 'UNTIL', 'HE', 'NEARED', 'THE', 'RAMPART', 'OF', 'THE', 'CLEARING', 'AND', 'PUT', 'FOOT', 'UPON', 'THE', 'TREE', 'BRIDGE'] +1995-1837-0012-789: hyp=['HE', 'SPLASHED', 'AND', 'STAMPED', 'ALONG', 'FARTHER', 'AND', 'FARTHER', 'ONWARD', 'UNTIL', 'HE', 'NEARED', 'THE', 'RAMPART', 'OF', 'THE', 'CLEARING', 'AND', 'PUT', 'FOOT', 'UPON', 'THE', 'TREE', 'BRIDGE'] +1995-1837-0013-790: ref=['THEN', 'HE', 'LOOKED', 'DOWN', 'THE', 'LAGOON', 'WAS', 'DRY'] +1995-1837-0013-790: hyp=['THEN', 'HE', 'LOOKED', 'DOWN', 'THE', 'LAGOON', 'WAS', 'DRY'] +1995-1837-0014-791: ref=['HE', 'STOOD', 'A', 'MOMENT', 'BEWILDERED', 'THEN', 'TURNED', 'AND', 'RUSHED', 'UPON', 'THE', 'ISLAND', 'A', 'GREAT', 'SHEET', 'OF', 'DAZZLING', 'SUNLIGHT', 'SWEPT', 'THE', 'PLACE', 'AND', 'BENEATH', 'LAY', 'A', 'MIGHTY', 'MASS', 'OF', 'OLIVE', 'GREEN', 'THICK', 'TALL', 'WET', 'AND', 'WILLOWY'] +1995-1837-0014-791: hyp=['HE', 'STOOD', 'A', 'MOMENT', 'BEWILDERED', 'THEN', 'TURNED', 'AND', 'RUSHED', 'UPON', 'THE', 'ISLAND', 'A', 'GREAT', 'SHEET', 'OF', 'DAZZLING', 'SUNLIGHT', 'SWEPT', 'THE', 'PLACE', 'AND', 'BENEATH', 'LAY', 'A', 'MIGHTY', 'MASS', 'OF', 'OLIVE', 'GREEN', 'THICK', 'TALL', 'WET', 'AND', 'WILLOWY'] +1995-1837-0015-792: ref=['THE', 'SQUARES', 'OF', 'COTTON', 'SHARP', 'EDGED', 'HEAVY', 'WERE', 'JUST', 'ABOUT', 'TO', 'BURST', 'TO', 'BOLLS'] +1995-1837-0015-792: hyp=['THE', 'SQUARES', 'OF', 'COTTON', 'SHARP', 'EDGED', 'HEAVY', 'WERE', 'JUST', 'ABOUT', 'TO', 'BURST', 'TO', 'BOWLS'] +1995-1837-0016-793: ref=['FOR', 'ONE', 'LONG', 'MOMENT', 'HE', 'PAUSED', 'STUPID', 'AGAPE', 'WITH', 'UTTER', 'AMAZEMENT', 'THEN', 'LEANED', 'DIZZILY', 'AGAINST', 'A', 'TREE'] +1995-1837-0016-793: hyp=['FOR', 'ONE', 'LONG', 'MOMENT', 'HE', 'PAUSED', 'STUPID', 'AGAPE', 'WITH', 'UTTER', 'AMAZEMENT', 'THEN', 'LEANED', 'DIZZILY', 'AGAINST', 'A', 'TREE'] +1995-1837-0017-794: ref=['HE', 'GAZED', 'ABOUT', 'PERPLEXED', 'ASTONISHED'] +1995-1837-0017-794: hyp=['HE', 'GAZED', 'ABOUT', 'PERPLEXED', 'ASTONISHED'] +1995-1837-0018-795: ref=['HERE', 'LAY', 'THE', 'READING', 'OF', 'THE', 'RIDDLE', 'WITH', 'INFINITE', 'WORK', 'AND', 'PAIN', 'SOME', 'ONE', 'HAD', 'DUG', 'A', 'CANAL', 'FROM', 'THE', 'LAGOON', 'TO', 'THE', 'CREEK', 'INTO', 'WHICH', 'THE', 'FORMER', 'HAD', 'DRAINED', 'BY', 'A', 'LONG', 'AND', 'CROOKED', 'WAY', 'THUS', 'ALLOWING', 'IT', 'TO', 'EMPTY', 'DIRECTLY'] +1995-1837-0018-795: hyp=['HERE', 'LAY', 'THE', 'READING', 'OF', 'THE', 'RIDDLE', 'WITH', 'INFINITE', 'WORK', 'AND', 'PAIN', 'SOME', 'ONE', 'HAD', 'DUG', 'A', 'CANAL', 'FROM', 'THE', 'LAGOON', 'TO', 'THE', 'CREEK', 'INTO', 'WHICH', 'THE', 'FORMER', 'HAD', 'DRAINED', 'BY', 'A', 'LONG', 'AND', 'CROOKED', 'WAY', 'THUS', 'ALLOWING', 'IT', 'TO', 'EMPTY', 'DIRECTLY'] +1995-1837-0019-796: ref=['HE', 'SAT', 'DOWN', 'WEAK', 'BEWILDERED', 'AND', 'ONE', 'THOUGHT', 'WAS', 'UPPERMOST', 'ZORA'] +1995-1837-0019-796: hyp=['HE', 'SAT', 'DOWN', 'WEAK', 'BEWILDERED', 'AND', 'ONE', 'THOUGHT', 'WAS', 'UPPERMOST', 'SORA'] +1995-1837-0020-797: ref=['THE', 'YEARS', 'OF', 'THE', 'DAYS', 'OF', 'HER', 'DYING', 'WERE', 'TEN'] +1995-1837-0020-797: hyp=['THE', 'YEARS', 'OF', 'THE', 'DAYS', 'OF', 'HER', 'DYING', 'WERE', 'TEN'] +1995-1837-0021-798: ref=['THE', 'HOPE', 'AND', 'DREAM', 'OF', 'HARVEST', 'WAS', 'UPON', 'THE', 'LAND'] +1995-1837-0021-798: hyp=['THE', 'HOPE', 'AND', 'DREAM', 'OF', 'HARVEST', 'WAS', 'UPON', 'THE', 'LAND'] +1995-1837-0022-799: ref=['UP', 'IN', 'THE', 'SICK', 'ROOM', 'ZORA', 'LAY', 'ON', 'THE', 'LITTLE', 'WHITE', 'BED'] +1995-1837-0022-799: hyp=['UP', 'IN', 'THE', 'SICK', 'ROOM', 'ZORA', 'LAY', 'ON', 'THE', 'LITTLE', 'WHITE', 'BED'] +1995-1837-0023-800: ref=['THE', 'NET', 'AND', 'WEB', 'OF', 'ENDLESS', 'THINGS', 'HAD', 'BEEN', 'CRAWLING', 'AND', 'CREEPING', 'AROUND', 'HER', 'SHE', 'HAD', 'STRUGGLED', 'IN', 'DUMB', 'SPEECHLESS', 'TERROR', 'AGAINST', 'SOME', 'MIGHTY', 'GRASPING', 'THAT', 'STROVE', 'FOR', 'HER', 'LIFE', 'WITH', 'GNARLED', 'AND', 'CREEPING', 'FINGERS', 'BUT', 'NOW', 'AT', 'LAST', 'WEAKLY', 'SHE', 'OPENED', 'HER', 'EYES', 'AND', 'QUESTIONED'] +1995-1837-0023-800: hyp=['THE', 'NED', 'AND', 'WEB', 'OF', 'ENDLESS', 'THINGS', 'HAD', 'BEEN', 'CRAWLING', 'AND', 'CREEPING', 'AROUND', 'HER', 'SHE', 'HAD', 'STRUGGLED', 'IN', 'DUMB', 'SPEECHLESS', 'TERROR', 'AGAINST', 'SOME', 'MIGHTY', 'GRASPING', 'THAT', 'STROVE', 'FOR', 'HER', 'LIFE', 'WITH', 'GNARLED', 'AND', 'CREEPING', 'FINGERS', 'BUT', 'NOW', 'AT', 'LAST', 'WEEKLY', 'SHE', 'OPENED', 'HER', 'EYES', 'AND', 'QUESTIONED'] +1995-1837-0024-801: ref=['FOR', 'A', 'WHILE', 'SHE', 'LAY', 'IN', 'HER', 'CHAIR', 'IN', 'HAPPY', 'DREAMY', 'PLEASURE', 'AT', 'SUN', 'AND', 'BIRD', 'AND', 'TREE'] +1995-1837-0024-801: hyp=['FOR', 'A', 'WHILE', 'SHE', 'LAY', 'IN', 'HER', 'CHAIR', 'IN', 'HAPPY', 'DREAMY', 'PLEASURE', 'ITS', 'SUN', 'AND', 'BIRD', 'AND', 'TREE'] +1995-1837-0025-802: ref=['SHE', 'ROSE', 'WITH', 'A', 'FLEETING', 'GLANCE', 'GATHERED', 'THE', 'SHAWL', 'ROUND', 'HER', 'THEN', 'GLIDING', 'FORWARD', 'WAVERING', 'TREMULOUS', 'SLIPPED', 'ACROSS', 'THE', 'ROAD', 'AND', 'INTO', 'THE', 'SWAMP'] +1995-1837-0025-802: hyp=['SHE', 'ROSE', 'WITH', 'A', 'FLEETING', 'GLANCE', 'GATHERED', 'THE', 'SHAWL', 'AROUND', 'HER', 'THEN', 'GLIDING', 'FORWARD', 'WAVERING', 'TREMULOUS', 'SLIPPED', 'ACROSS', 'THE', 'ROAD', 'AND', 'INTO', 'THE', 'SWAMP'] +1995-1837-0026-803: ref=['SHE', 'HAD', 'BEEN', 'BORN', 'WITHIN', 'ITS', 'BORDERS', 'WITHIN', 'ITS', 'BORDERS', 'SHE', 'HAD', 'LIVED', 'AND', 'GROWN', 'AND', 'WITHIN', 'ITS', 'BORDERS', 'SHE', 'HAD', 'MET', 'HER', 'LOVE'] +1995-1837-0026-803: hyp=['SHE', 'HAD', 'BEEN', 'BORN', 'WITHIN', 'ITS', 'BORDERS', 'WITHIN', 'HIS', 'BORDERS', 'SHE', 'HAD', 'LIVED', 'AND', 'GROWN', 'AND', 'WITHIN', 'ITS', 'BORDER', 'SHE', 'HAD', 'MET', 'HER', 'LOVE'] +1995-1837-0027-804: ref=['ON', 'SHE', 'HURRIED', 'UNTIL', 'SWEEPING', 'DOWN', 'TO', 'THE', 'LAGOON', 'AND', 'THE', 'ISLAND', 'LO', 'THE', 'COTTON', 'LAY', 'BEFORE', 'HER'] +1995-1837-0027-804: hyp=['ON', 'SHE', 'HURRIED', 'UNTIL', 'SWEEPING', 'DOWN', 'TO', 'THE', 'LAGOON', 'AND', 'THE', 'ISLAND', 'LO', 'THE', 'COTTON', 'LAY', 'BEFORE', 'HER'] +1995-1837-0028-805: ref=['THE', 'CHAIR', 'WAS', 'EMPTY', 'BUT', 'HE', 'KNEW'] +1995-1837-0028-805: hyp=['THE', 'CHAIR', 'WAS', 'EMPTY', 'BUT', 'HE', 'KNEW'] +1995-1837-0029-806: ref=['HE', 'DARTED', 'THROUGH', 'THE', 'TREES', 'AND', 'PAUSED', 'A', 'TALL', 'MAN', 'STRONGLY', 'BUT', 'SLIMLY', 'MADE'] +1995-1837-0029-806: hyp=['HE', 'DARTED', 'THROUGH', 'THE', 'TREES', 'AND', 'PAUSED', 'A', 'TALL', 'MAN', 'STRONGLY', 'BUT', 'SLIMLY', 'MADE'] +2094-142345-0000-308: ref=['IT', 'IS', 'A', 'VERY', 'FINE', 'OLD', 'PLACE', 'OF', 'RED', 'BRICK', 'SOFTENED', 'BY', 'A', 'PALE', 'POWDERY', 'LICHEN', 'WHICH', 'HAS', 'DISPERSED', 'ITSELF', 'WITH', 'HAPPY', 'IRREGULARITY', 'SO', 'AS', 'TO', 'BRING', 'THE', 'RED', 'BRICK', 'INTO', 'TERMS', 'OF', 'FRIENDLY', 'COMPANIONSHIP', 'WITH', 'THE', 'LIMESTONE', 'ORNAMENTS', 'SURROUNDING', 'THE', 'THREE', 'GABLES', 'THE', 'WINDOWS', 'AND', 'THE', 'DOOR', 'PLACE'] +2094-142345-0000-308: hyp=['IT', 'IS', 'A', 'VERY', 'FINE', 'OLD', 'PLACE', 'OF', 'RED', 'BRICK', 'SOFTENED', 'BY', 'A', 'PALE', 'POWDERY', 'LICHEN', 'WHICH', 'HAS', 'DISPERSED', 'ITSELF', 'WITH', 'HAPPY', 'IRREGULARITY', 'SO', 'AS', 'TO', 'BRING', 'THE', 'RED', 'BRICK', 'INTO', 'TERMS', 'OF', 'FRIENDLY', 'COMPANIONSHIP', 'WITH', 'A', 'LIMESTONE', 'ORNAMENTS', 'SURROUNDING', 'THE', 'THREE', 'GABLES', 'THE', 'WINDOWS', 'AND', 'THE', 'DOOR', 'PLACE'] +2094-142345-0001-309: ref=['BUT', 'THE', 'WINDOWS', 'ARE', 'PATCHED', 'WITH', 'WOODEN', 'PANES', 'AND', 'THE', 'DOOR', 'I', 'THINK', 'IS', 'LIKE', 'THE', 'GATE', 'IT', 'IS', 'NEVER', 'OPENED'] +2094-142345-0001-309: hyp=['BUT', 'THE', 'WINDOWS', 'ARE', 'PATCHED', 'WITH', 'WOODEN', 'PANES', 'AND', 'THE', 'DOOR', 'I', 'THINK', 'IS', 'LIKE', 'THE', 'GATE', 'IT', 'IS', 'NEVER', 'OPENED'] +2094-142345-0002-310: ref=['FOR', 'IT', 'IS', 'A', 'SOLID', 'HEAVY', 'HANDSOME', 'DOOR', 'AND', 'MUST', 'ONCE', 'HAVE', 'BEEN', 'IN', 'THE', 'HABIT', 'OF', 'SHUTTING', 'WITH', 'A', 'SONOROUS', 'BANG', 'BEHIND', 'A', 'LIVERIED', 'LACKEY', 'WHO', 'HAD', 'JUST', 'SEEN', 'HIS', 'MASTER', 'AND', 'MISTRESS', 'OFF', 'THE', 'GROUNDS', 'IN', 'A', 'CARRIAGE', 'AND', 'PAIR'] +2094-142345-0002-310: hyp=['FOR', 'IT', 'IS', 'A', 'SOLID', 'HEAVY', 'HANDSOME', 'DOOR', 'AND', 'MUST', 'ONCE', 'HAVE', 'BEEN', 'IN', 'THE', 'HABIT', 'OF', 'SHEDDING', 'WITH', 'A', 'SONOROUS', 'BANG', 'BEHIND', 'THE', 'LIVERYED', 'LACKEY', 'WHO', 'HAD', 'JUST', 'SEEN', 'HIS', 'MASTER', 'AND', 'MISTRESS', 'OFF', 'THE', 'GROUNDS', 'IN', 'A', 'CARRIAGE', 'AND', 'PAIR'] +2094-142345-0003-311: ref=['A', 'LARGE', 'OPEN', 'FIREPLACE', 'WITH', 'RUSTY', 'DOGS', 'IN', 'IT', 'AND', 'A', 'BARE', 'BOARDED', 'FLOOR', 'AT', 'THE', 'FAR', 'END', 'FLEECES', 'OF', 'WOOL', 'STACKED', 'UP', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'FLOOR', 'SOME', 'EMPTY', 'CORN', 'BAGS'] +2094-142345-0003-311: hyp=['A', 'LARGE', 'OPEN', 'FIREPLACE', 'WITH', 'RUSTY', 'DOGS', 'IN', 'IT', 'AND', 'A', 'BARE', 'BOARDED', 'FLOOR', 'AT', 'THE', 'FAR', 'END', 'FLEECES', 'OF', 'WOOL', 'STACKED', 'UP', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'FLOOR', 'SOME', 'EMPTY', 'CORN', 'BAGS'] +2094-142345-0004-312: ref=['AND', 'WHAT', 'THROUGH', 'THE', 'LEFT', 'HAND', 'WINDOW'] +2094-142345-0004-312: hyp=['AND', 'WHAT', 'THROUGH', 'THE', 'LEFT', 'HAND', 'WINDOW'] +2094-142345-0005-313: ref=['SEVERAL', 'CLOTHES', 'HORSES', 'A', 'PILLION', 'A', 'SPINNING', 'WHEEL', 'AND', 'AN', 'OLD', 'BOX', 'WIDE', 'OPEN', 'AND', 'STUFFED', 'FULL', 'OF', 'COLOURED', 'RAGS'] +2094-142345-0005-313: hyp=['SEVERAL', 'CLOTHES', 'HORSES', 'APILLION', 'A', 'SPINNING', 'WHEEL', 'AND', 'AN', 'OLD', 'BOX', 'WIDE', 'OPEN', 'AND', 'STUFFED', 'FULL', 'OF', 'COLOURED', 'RAGS'] +2094-142345-0006-314: ref=['AT', 'THE', 'EDGE', 'OF', 'THIS', 'BOX', 'THERE', 'LIES', 'A', 'GREAT', 'WOODEN', 'DOLL', 'WHICH', 'SO', 'FAR', 'AS', 'MUTILATION', 'IS', 'CONCERNED', 'BEARS', 'A', 'STRONG', 'RESEMBLANCE', 'TO', 'THE', 'FINEST', 'GREEK', 'SCULPTURE', 'AND', 'ESPECIALLY', 'IN', 'THE', 'TOTAL', 'LOSS', 'OF', 'ITS', 'NOSE'] +2094-142345-0006-314: hyp=['AT', 'THE', 'EDGE', 'OF', 'THIS', 'BOX', 'THERE', 'LIES', 'A', 'GREAT', 'WOODEN', 'DOLL', 'WHICH', 'SO', 'FAR', 'AS', 'MUTILATION', 'IS', 'CONCERNED', 'BEARS', 'A', 'STRONG', 'RESEMBLANCE', 'TO', 'THE', 'FINEST', 'GREEK', 'SCULPTURE', 'AND', 'ESPECIALLY', 'IN', 'THE', 'TOTAL', 'LOSS', 'OF', 'ITS', 'NOSE'] +2094-142345-0007-315: ref=['THE', 'HISTORY', 'OF', 'THE', 'HOUSE', 'IS', 'PLAIN', 'NOW'] +2094-142345-0007-315: hyp=['THE', 'HISTORY', 'OF', 'THE', 'HOUSE', 'IS', 'PLAIN', 'NOW'] +2094-142345-0008-316: ref=['BUT', 'THERE', 'IS', 'ALWAYS', 'A', 'STRONGER', 'SENSE', 'OF', 'LIFE', 'WHEN', 'THE', 'SUN', 'IS', 'BRILLIANT', 'AFTER', 'RAIN', 'AND', 'NOW', 'HE', 'IS', 'POURING', 'DOWN', 'HIS', 'BEAMS', 'AND', 'MAKING', 'SPARKLES', 'AMONG', 'THE', 'WET', 'STRAW', 'AND', 'LIGHTING', 'UP', 'EVERY', 'PATCH', 'OF', 'VIVID', 'GREEN', 'MOSS', 'ON', 'THE', 'RED', 'TILES', 'OF', 'THE', 'COW', 'SHED', 'AND', 'TURNING', 'EVEN', 'THE', 'MUDDY', 'WATER', 'THAT', 'IS', 'HURRYING', 'ALONG', 'THE', 'CHANNEL', 'TO', 'THE', 'DRAIN', 'INTO', 'A', 'MIRROR', 'FOR', 'THE', 'YELLOW', 'BILLED', 'DUCKS', 'WHO', 'ARE', 'SEIZING', 'THE', 'OPPORTUNITY', 'OF', 'GETTING', 'A', 'DRINK', 'WITH', 'AS', 'MUCH', 'BODY', 'IN', 'IT', 'AS', 'POSSIBLE'] +2094-142345-0008-316: hyp=['BUT', 'THERE', 'IS', 'ALWAYS', 'AS', 'STRONGER', 'SENSE', 'OF', 'LIFE', 'WHEN', 'THE', 'SUN', 'IS', 'BRILLIANT', 'AFTER', 'RAIN', 'AND', 'NOW', 'HE', 'IS', 'POURING', 'DOWN', 'HIS', 'BEAMS', 'AND', 'MAKING', 'SPARKLES', 'AMONG', 'THE', 'WET', 'STRAW', 'AND', 'LIGHTING', 'UP', 'EVERY', 'PATCH', 'OF', 'VIVID', 'GREEN', 'MOSS', 'ON', 'THE', 'RED', 'TILES', 'OF', 'THE', 'COWSHED', 'AND', 'TURNING', 'EVEN', 'THE', 'MUDDY', 'WATER', 'THAT', 'IS', 'HURRYING', 'ALONG', 'THE', 'CHANNEL', 'TO', 'THE', 'DRAIN', 'INTO', 'A', 'MIRROR', 'FOR', 'THE', 'YELLOW', 'BUILD', 'DUCKS', 'WHO', 'ARE', 'SEIZING', 'THE', 'OPPORTUNITY', 'OF', 'GETTING', 'A', 'DRINK', 'WITH', 'AS', 'MUCH', 'BODY', 'IN', 'IT', 'AS', 'POSSIBLE'] +2094-142345-0009-317: ref=['FOR', 'THE', 'GREAT', 'BARN', 'DOORS', 'ARE', 'THROWN', 'WIDE', 'OPEN', 'AND', 'MEN', 'ARE', 'BUSY', 'THERE', 'MENDING', 'THE', 'HARNESS', 'UNDER', 'THE', 'SUPERINTENDENCE', 'OF', 'MISTER', 'GOBY', 'THE', 'WHITTAW', 'OTHERWISE', 'SADDLER', 'WHO', 'ENTERTAINS', 'THEM', 'WITH', 'THE', 'LATEST', 'TREDDLESTON', 'GOSSIP'] +2094-142345-0009-317: hyp=['FOR', 'THE', 'GREAT', 'BARN', 'DOORS', 'ARE', 'THROWN', 'WIDE', 'OPEN', 'AND', 'MEN', 'ARE', 'BUSY', 'THERE', 'MENDING', 'THE', 'HARNESS', 'UNDER', 'THE', 'SUPERINTENDENCE', 'OF', 'MISTER', 'GOBY', 'THE', 'WIDOW', 'OTHERWISE', 'SADDLER', 'WHO', 'ENTERTAINS', 'THEM', 'WITH', 'THE', 'LATEST', 'TREADLESTON', 'GOSSIP'] +2094-142345-0010-318: ref=['HETTY', 'SORREL', 'OFTEN', 'TOOK', 'THE', 'OPPORTUNITY', 'WHEN', 'HER', "AUNT'S", 'BACK', 'WAS', 'TURNED', 'OF', 'LOOKING', 'AT', 'THE', 'PLEASING', 'REFLECTION', 'OF', 'HERSELF', 'IN', 'THOSE', 'POLISHED', 'SURFACES', 'FOR', 'THE', 'OAK', 'TABLE', 'WAS', 'USUALLY', 'TURNED', 'UP', 'LIKE', 'A', 'SCREEN', 'AND', 'WAS', 'MORE', 'FOR', 'ORNAMENT', 'THAN', 'FOR', 'USE', 'AND', 'SHE', 'COULD', 'SEE', 'HERSELF', 'SOMETIMES', 'IN', 'THE', 'GREAT', 'ROUND', 'PEWTER', 'DISHES', 'THAT', 'WERE', 'RANGED', 'ON', 'THE', 'SHELVES', 'ABOVE', 'THE', 'LONG', 'DEAL', 'DINNER', 'TABLE', 'OR', 'IN', 'THE', 'HOBS', 'OF', 'THE', 'GRATE', 'WHICH', 'ALWAYS', 'SHONE', 'LIKE', 'JASPER'] +2094-142345-0010-318: hyp=["HETTY'S", 'SURREL', 'OFTEN', 'TOOK', 'THE', 'OPPORTUNITY', 'WHEN', 'HER', "AUNT'S", 'BACK', 'WAS', 'TURNED', 'OF', 'LOOKING', 'AT', 'THE', 'PLEASING', 'REFLECTION', 'OF', 'HERSELF', 'IN', 'THOSE', 'POLISHED', 'SERVICES', 'FOR', 'THE', 'OAK', 'TABLE', 'WAS', 'USUALLY', 'TURNED', 'UP', 'LIKE', 'A', 'SCREEN', 'AND', 'WAS', 'MORE', 'FOR', 'ORNAMENT', 'THAN', 'FOR', 'USE', 'AND', 'SHE', 'COULD', 'SEE', 'HERSELF', 'SOMETIMES', 'IN', 'THE', 'GREAT', 'ROUND', 'PEWTER', 'DISHES', 'THAT', 'WERE', 'RANGED', 'ON', 'THE', 'SHELVES', 'ABOVE', 'THE', 'LONG', 'DEAL', 'DINNER', 'TABLE', 'OR', 'IN', 'THE', 'HOBS', 'OF', 'THE', 'GRATE', 'WHICH', 'ALWAYS', 'SHONE', 'LIKE', 'JASPER'] +2094-142345-0011-319: ref=['DO', 'NOT', 'SUPPOSE', 'HOWEVER', 'THAT', 'MISSUS', 'POYSER', 'WAS', 'ELDERLY', 'OR', 'SHREWISH', 'IN', 'HER', 'APPEARANCE', 'SHE', 'WAS', 'A', 'GOOD', 'LOOKING', 'WOMAN', 'NOT', 'MORE', 'THAN', 'EIGHT', 'AND', 'THIRTY', 'OF', 'FAIR', 'COMPLEXION', 'AND', 'SANDY', 'HAIR', 'WELL', 'SHAPEN', 'LIGHT', 'FOOTED'] +2094-142345-0011-319: hyp=['DO', 'NOT', 'SUPPOSE', 'HOWEVER', 'THAT', 'MISSUS', 'POYSER', 'WAS', 'ELDERLY', 'OR', 'SHREWISH', 'IN', 'HER', 'APPEARANCE', 'SHE', 'WAS', 'A', 'GOOD', 'LOOKING', 'WOMAN', 'NOT', 'MORE', 'THAN', 'EIGHT', 'AND', 'THIRTY', 'A', 'FAIR', 'COMPLEXION', 'AND', 'SANDY', 'HAIR', 'WHILE', 'SHAKEN', 'LIGHTFOOTED'] +2094-142345-0012-320: ref=['THE', 'FAMILY', 'LIKENESS', 'BETWEEN', 'HER', 'AND', 'HER', 'NIECE', 'DINAH', 'MORRIS', 'WITH', 'THE', 'CONTRAST', 'BETWEEN', 'HER', 'KEENNESS', 'AND', "DINAH'S", 'SERAPHIC', 'GENTLENESS', 'OF', 'EXPRESSION', 'MIGHT', 'HAVE', 'SERVED', 'A', 'PAINTER', 'AS', 'AN', 'EXCELLENT', 'SUGGESTION', 'FOR', 'A', 'MARTHA', 'AND', 'MARY'] +2094-142345-0012-320: hyp=['THE', 'FAMILY', 'LIKENESS', 'BETWEEN', 'HER', 'AND', 'HER', 'NIECE', 'DINAH', 'MORRIS', 'WITH', 'A', 'CONTRAST', 'BETWEEN', 'HER', 'KEENNESS', 'AND', "DINAH'S", 'SERAPHIC', 'GENTLENESS', 'OF', 'EXPRESSION', 'MIGHT', 'HAVE', 'SERVED', 'A', 'PAINTER', 'AS', 'AN', 'EXCELLENT', 'SUGGESTION', 'FOR', 'MARTHA', 'AND', 'MARY'] +2094-142345-0013-321: ref=['HER', 'TONGUE', 'WAS', 'NOT', 'LESS', 'KEEN', 'THAN', 'HER', 'EYE', 'AND', 'WHENEVER', 'A', 'DAMSEL', 'CAME', 'WITHIN', 'EARSHOT', 'SEEMED', 'TO', 'TAKE', 'UP', 'AN', 'UNFINISHED', 'LECTURE', 'AS', 'A', 'BARREL', 'ORGAN', 'TAKES', 'UP', 'A', 'TUNE', 'PRECISELY', 'AT', 'THE', 'POINT', 'WHERE', 'IT', 'HAD', 'LEFT', 'OFF'] +2094-142345-0013-321: hyp=['HER', 'TONGUE', 'WAS', 'NOT', 'LESS', 'KEEN', 'THAN', 'HER', 'EYE', 'AND', 'WHENEVER', 'A', 'DAMSEL', 'CAME', 'WITHIN', 'EAR', 'SHOT', 'SEEMED', 'TO', 'TAKE', 'UP', 'AN', 'UNFINISHED', 'LECTURE', 'AS', 'A', 'BARREL', 'ORGAN', 'TAKES', 'UP', 'A', 'TUNE', 'PRECISELY', 'AT', 'THE', 'POINT', 'WHERE', 'IT', 'HAD', 'LEFT', 'OFF'] +2094-142345-0014-322: ref=['THE', 'FACT', 'THAT', 'IT', 'WAS', 'CHURNING', 'DAY', 'WAS', 'ANOTHER', 'REASON', 'WHY', 'IT', 'WAS', 'INCONVENIENT', 'TO', 'HAVE', 'THE', 'WHITTAWS', 'AND', 'WHY', 'CONSEQUENTLY', 'MISSUS', 'POYSER', 'SHOULD', 'SCOLD', 'MOLLY', 'THE', 'HOUSEMAID', 'WITH', 'UNUSUAL', 'SEVERITY'] +2094-142345-0014-322: hyp=['THE', 'FACT', 'THAT', 'IT', 'WAS', 'CHURNING', 'DAY', 'WAS', 'ANOTHER', 'REASON', 'WHY', 'IT', 'WAS', 'INCONVENIENT', 'TO', 'HAVE', 'THE', 'WIDOWS', 'AND', 'WHY', 'CONSEQUENTLY', 'MISSUS', 'POYSER', 'SHOULD', 'SCOLD', 'MOLLY', 'THE', 'HOUSEMAID', 'WITH', 'UNUSUAL', 'SEVERITY'] +2094-142345-0015-323: ref=['TO', 'ALL', 'APPEARANCE', 'MOLLY', 'HAD', 'GOT', 'THROUGH', 'HER', 'AFTER', 'DINNER', 'WORK', 'IN', 'AN', 'EXEMPLARY', 'MANNER', 'HAD', 'CLEANED', 'HERSELF', 'WITH', 'GREAT', 'DISPATCH', 'AND', 'NOW', 'CAME', 'TO', 'ASK', 'SUBMISSIVELY', 'IF', 'SHE', 'SHOULD', 'SIT', 'DOWN', 'TO', 'HER', 'SPINNING', 'TILL', 'MILKING', 'TIME'] +2094-142345-0015-323: hyp=['TO', 'ALL', 'APPEARANCE', 'MOLLY', 'HAD', 'GOT', 'THROUGH', 'HER', 'AFTER', 'DINNER', 'WORK', 'IN', 'AN', 'EXEMPLARY', 'MANNER', 'HAD', 'CLEANED', 'HERSELF', 'WITH', 'GREAT', 'DISPATCH', 'AND', 'NOW', 'CAME', 'TO', 'ASK', 'SUBMISSIVELY', 'IF', 'SHE', 'SHOULD', 'SIT', 'DOWN', 'TO', 'HER', 'SPINNING', 'TILL', 'MILKING', 'TIME'] +2094-142345-0016-324: ref=['SPINNING', 'INDEED'] +2094-142345-0016-324: hyp=['SPINNING', 'INDEED'] +2094-142345-0017-325: ref=['I', 'NEVER', 'KNEW', 'YOUR', 'EQUALS', 'FOR', 'GALLOWSNESS'] +2094-142345-0017-325: hyp=['I', 'NEVER', 'KNEW', 'YOUR', 'EQUALS', 'FOR', 'GALLOWSNESS'] +2094-142345-0018-326: ref=['WHO', 'TAUGHT', 'YOU', 'TO', 'SCRUB', 'A', 'FLOOR', 'I', 'SHOULD', 'LIKE', 'TO', 'KNOW'] +2094-142345-0018-326: hyp=['WHO', 'TAUGHT', 'YOU', 'TO', 'SCRUB', 'A', 'FLOOR', 'I', 'SHOULD', 'LIKE', 'TO', 'KNOW'] +2094-142345-0019-327: ref=['COMB', 'THE', 'WOOL', 'FOR', 'THE', 'WHITTAWS', 'INDEED'] +2094-142345-0019-327: hyp=['COMB', 'THE', 'WOOL', 'FOR', 'THE', 'WIDOWS', 'INDEED'] +2094-142345-0020-328: ref=["THAT'S", 'WHAT', "YOU'D", 'LIKE', 'TO', 'BE', 'DOING', 'IS', 'IT'] +2094-142345-0020-328: hyp=["THAT'S", 'WHAT', "YOU'D", 'LIKE', 'TO', 'BE', 'DOING', 'IS', 'IT'] +2094-142345-0021-329: ref=["THAT'S", 'THE', 'WAY', 'WITH', 'YOU', "THAT'S", 'THE', 'ROAD', "YOU'D", 'ALL', 'LIKE', 'TO', 'GO', 'HEADLONGS', 'TO', 'RUIN'] +2094-142345-0021-329: hyp=["THAT'S", 'THE', 'WAY', 'WITH', 'YOU', "THAT'S", 'THE', 'ROAD', "YOU'D", 'ALL', 'LIKE', 'TO', 'GO', 'HEADLONGS', 'TO', 'RUIN'] +2094-142345-0022-330: ref=['MISTER', "OTTLEY'S", 'INDEED'] +2094-142345-0022-330: hyp=['MISTER', "OUTLEY'S", 'INDEED'] +2094-142345-0023-331: ref=["YOU'RE", 'A', 'RARE', 'UN', 'FOR', 'SITTING', 'DOWN', 'TO', 'YOUR', 'WORK', 'A', 'LITTLE', 'WHILE', 'AFTER', "IT'S", 'TIME', 'TO', 'PUT', 'BY'] +2094-142345-0023-331: hyp=['YOU', 'ARE', 'A', 'RARE', 'AND', 'PROCEEDING', 'DOWN', 'TO', 'YOUR', 'WORK', 'A', 'LITTLE', 'WHILE', 'AFTER', 'ITS', 'TIME', 'TO', 'PUT', 'BY'] +2094-142345-0024-332: ref=['MUNNY', 'MY', "IRON'S", 'TWITE', 'TOLD', 'PEASE', 'PUT', 'IT', 'DOWN', 'TO', 'WARM'] +2094-142345-0024-332: hyp=['MONEY', 'MY', 'IRON', 'STRIKE', 'TOLD', 'PEASE', 'PUT', 'IT', 'DOWN', 'TO', 'WARM'] +2094-142345-0025-333: ref=['COLD', 'IS', 'IT', 'MY', 'DARLING', 'BLESS', 'YOUR', 'SWEET', 'FACE'] +2094-142345-0025-333: hyp=['COLD', 'IS', 'IT', 'MY', 'DARLING', 'BLESS', 'YOUR', 'SWEET', 'FACE'] +2094-142345-0026-334: ref=["SHE'S", 'GOING', 'TO', 'PUT', 'THE', 'IRONING', 'THINGS', 'AWAY'] +2094-142345-0026-334: hyp=["SHE'S", 'GOING', 'TO', 'PUT', 'THE', 'IRONING', 'THINGS', 'AWAY'] +2094-142345-0027-335: ref=['MUNNY', 'I', 'TOULD', 'IKE', 'TO', 'DO', 'INTO', 'DE', 'BARN', 'TO', 'TOMMY', 'TO', 'SEE', 'DE', 'WHITTAWD'] +2094-142345-0027-335: hyp=['MONEY', 'I', 'DID', 'LIKE', 'TO', 'DO', 'INTO', 'THE', 'BARN', 'TO', 'TOMMY', 'TO', 'SEE', 'THE', 'WID', 'ODD'] +2094-142345-0028-336: ref=['NO', 'NO', 'NO', 'TOTTY', 'UD', 'GET', 'HER', 'FEET', 'WET', 'SAID', 'MISSUS', 'POYSER', 'CARRYING', 'AWAY', 'HER', 'IRON'] +2094-142345-0028-336: hyp=['NO', 'NO', 'NO', 'TOTTY', 'HAD', 'GET', 'HER', 'FEET', 'WET', 'SAID', 'MISSUS', 'POYSER', 'CARRYING', 'AWAY', 'HER', 'IRON'] +2094-142345-0029-337: ref=['DID', 'EVER', 'ANYBODY', 'SEE', 'THE', 'LIKE', 'SCREAMED', 'MISSUS', 'POYSER', 'RUNNING', 'TOWARDS', 'THE', 'TABLE', 'WHEN', 'HER', 'EYE', 'HAD', 'FALLEN', 'ON', 'THE', 'BLUE', 'STREAM'] +2094-142345-0029-337: hyp=['DID', 'EVER', 'ANYBODY', 'SEE', 'THE', 'LIKE', 'SCREAMED', 'MISSUS', 'POYSER', 'RUNNING', 'TOWARDS', 'THE', 'TABLE', 'WHEN', 'HER', 'EYE', 'HAD', 'FALLEN', 'ON', 'THE', 'BLUE', 'STREAM'] +2094-142345-0030-338: ref=['TOTTY', 'HOWEVER', 'HAD', 'DESCENDED', 'FROM', 'HER', 'CHAIR', 'WITH', 'GREAT', 'SWIFTNESS', 'AND', 'WAS', 'ALREADY', 'IN', 'RETREAT', 'TOWARDS', 'THE', 'DAIRY', 'WITH', 'A', 'SORT', 'OF', 'WADDLING', 'RUN', 'AND', 'AN', 'AMOUNT', 'OF', 'FAT', 'ON', 'THE', 'NAPE', 'OF', 'HER', 'NECK', 'WHICH', 'MADE', 'HER', 'LOOK', 'LIKE', 'THE', 'METAMORPHOSIS', 'OF', 'A', 'WHITE', 'SUCKLING', 'PIG'] +2094-142345-0030-338: hyp=['TOTTY', 'HOWEVER', 'HAD', 'DESCENDED', 'FROM', 'HER', 'CHAIR', 'WITH', 'GREAT', 'SWIFTNESS', 'AND', 'WAS', 'ALREADY', 'IN', 'RETREAT', 'TOWARDS', 'THE', 'DAIRY', 'WITH', 'A', 'SORT', 'OF', 'WADDLING', 'RUN', 'AND', 'AN', 'AMOUNT', 'OF', 'FAT', 'ON', 'THE', 'NAPE', 'OF', 'HER', 'NECK', 'WHICH', 'MADE', 'HER', 'LOOK', 'LIKE', 'THE', 'METAMORPHOSIS', 'OF', 'A', 'WHITE', 'SUCKLING', 'PIG'] +2094-142345-0031-339: ref=['AND', 'SHE', 'WAS', 'VERY', 'FOND', 'OF', 'YOU', 'TOO', 'AUNT', 'RACHEL'] +2094-142345-0031-339: hyp=['AND', 'SHE', 'WAS', 'VERY', 'FOND', 'OF', 'YOU', 'TOO', 'AUNT', 'RACHEL'] +2094-142345-0032-340: ref=['I', 'OFTEN', 'HEARD', 'HER', 'TALK', 'OF', 'YOU', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'WAY'] +2094-142345-0032-340: hyp=['I', 'OFTEN', 'HEARD', 'HER', 'TALK', 'OF', 'YOU', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'WAY'] +2094-142345-0033-341: ref=['WHEN', 'SHE', 'HAD', 'THAT', 'BAD', 'ILLNESS', 'AND', 'I', 'WAS', 'ONLY', 'ELEVEN', 'YEARS', 'OLD', 'SHE', 'USED', 'TO', 'SAY', "YOU'LL", 'HAVE', 'A', 'FRIEND', 'ON', 'EARTH', 'IN', 'YOUR', 'AUNT', 'RACHEL', 'IF', "I'M", 'TAKEN', 'FROM', 'YOU', 'FOR', 'SHE', 'HAS', 'A', 'KIND', 'HEART', 'AND', "I'M", 'SURE', "I'VE", 'FOUND', 'IT', 'SO'] +2094-142345-0033-341: hyp=['WHEN', 'SHE', 'HAD', 'THAT', 'BAN', 'ILLNESS', 'AND', 'I', 'WAS', 'ONLY', 'ELEVEN', 'YEARS', 'OLD', 'SHE', 'USED', 'TO', 'SAY', "YOU'LL", 'HAVE', 'A', 'FRIEND', 'ON', 'EARTH', 'IN', 'YOUR', 'AUNT', 'RACHEL', 'IF', "I'M", 'TAKEN', 'FROM', 'YOU', 'FOR', 'SHE', 'HAS', 'A', 'KIND', 'HEART', 'AND', "I'M", 'SURE', "I'VE", 'FOUND', 'IT', 'SO'] +2094-142345-0034-342: ref=['AND', "THERE'S", 'LINEN', 'IN', 'THE', 'HOUSE', 'AS', 'I', 'COULD', 'WELL', 'SPARE', 'YOU', 'FOR', "I'VE", 'GOT', 'LOTS', 'O', 'SHEETING', 'AND', 'TABLE', 'CLOTHING', 'AND', 'TOWELLING', 'AS', "ISN'T", 'MADE', 'UP'] +2094-142345-0034-342: hyp=['AND', "THERE'S", 'LINEN', 'IN', 'THE', 'HOUSE', 'AS', 'I', 'COULD', 'WELL', 'SPARE', 'YOU', 'FOR', 'I', 'GOT', 'LOTS', 'OF', 'SHEETING', 'AND', 'TABLE', 'CLOTHING', 'AND', 'TOWELINGS', "ISN'T", 'MADE', 'UP'] +2094-142345-0035-343: ref=['BUT', 'NOT', 'MORE', 'THAN', "WHAT'S", 'IN', 'THE', 'BIBLE', 'AUNT', 'SAID', 'DINAH'] +2094-142345-0035-343: hyp=['BUT', 'NOT', 'MORE', 'THAN', "WHAT'S", 'IN', 'THE', 'BIBLE', 'AND', 'SAID', 'DINAH'] +2094-142345-0036-344: ref=['NAY', 'DEAR', 'AUNT', 'YOU', 'NEVER', 'HEARD', 'ME', 'SAY', 'THAT', 'ALL', 'PEOPLE', 'ARE', 'CALLED', 'TO', 'FORSAKE', 'THEIR', 'WORK', 'AND', 'THEIR', 'FAMILIES'] +2094-142345-0036-344: hyp=['NAY', 'DEAR', 'AUNT', 'YOU', 'NEVER', 'HEARD', 'ME', 'SAY', 'THAT', 'ALL', 'PEOPLE', 'ARE', 'CALLED', 'TO', 'FORSAKE', 'THEIR', 'WORK', 'AND', 'THEIR', 'FAMILIES'] +2094-142345-0037-345: ref=['WE', 'CAN', 'ALL', 'BE', 'SERVANTS', 'OF', 'GOD', 'WHEREVER', 'OUR', 'LOT', 'IS', 'CAST', 'BUT', 'HE', 'GIVES', 'US', 'DIFFERENT', 'SORTS', 'OF', 'WORK', 'ACCORDING', 'AS', 'HE', 'FITS', 'US', 'FOR', 'IT', 'AND', 'CALLS', 'US', 'TO', 'IT'] +2094-142345-0037-345: hyp=['WE', 'CAN', 'ALL', 'BE', 'SERVANTS', 'OF', 'GOD', 'WHEREVER', 'OUR', 'LOT', 'IS', 'CAST', 'BUT', 'HE', 'GIVES', 'US', 'DIFFERENT', 'SORTS', 'OF', 'WORK', 'ACCORDING', 'AS', 'HE', 'FITS', 'US', 'FOR', 'IT', 'AND', 'CALLS', 'US', 'TO', 'IT'] +2094-142345-0038-346: ref=['I', 'CAN', 'NO', 'MORE', 'HELP', 'SPENDING', 'MY', 'LIFE', 'IN', 'TRYING', 'TO', 'DO', 'WHAT', 'I', 'CAN', 'FOR', 'THE', 'SOULS', 'OF', 'OTHERS', 'THAN', 'YOU', 'COULD', 'HELP', 'RUNNING', 'IF', 'YOU', 'HEARD', 'LITTLE', 'TOTTY', 'CRYING', 'AT', 'THE', 'OTHER', 'END', 'OF', 'THE', 'HOUSE', 'THE', 'VOICE', 'WOULD', 'GO', 'TO', 'YOUR', 'HEART', 'YOU', 'WOULD', 'THINK', 'THE', 'DEAR', 'CHILD', 'WAS', 'IN', 'TROUBLE', 'OR', 'IN', 'DANGER', 'AND', 'YOU', "COULDN'T", 'REST', 'WITHOUT', 'RUNNING', 'TO', 'HELP', 'HER', 'AND', 'COMFORT', 'HER'] +2094-142345-0038-346: hyp=['I', 'CAN', 'NO', 'MORE', 'HELP', 'SPENDING', 'MY', 'LIFE', 'IN', 'TRYING', 'TO', 'DO', 'WHAT', 'I', 'CAN', 'FOR', 'THE', 'SOULS', 'OF', 'OTHERS', 'THEN', 'YOU', 'COULD', 'HELP', 'RUNNING', 'IF', 'YOU', 'HEARD', 'LITTLE', 'TOTTY', 'CRYING', 'AT', 'THE', 'OTHER', 'END', 'OF', 'THE', 'HOUSE', 'THE', 'VOICE', 'WOULD', 'GO', 'TO', 'YOUR', 'HEART', 'YOU', 'WOULD', 'THINK', 'THE', 'DEAR', 'CHILD', 'WAS', 'IN', 'TROUBLE', 'OR', 'IN', 'DANGER', 'AND', 'YOU', "COULDN'T", 'REST', 'WITHOUT', 'RUNNING', 'TO', 'HELP', 'HER', 'AND', 'COMFORT', 'HER'] +2094-142345-0039-347: ref=["I'VE", 'STRONG', 'ASSURANCE', 'THAT', 'NO', 'EVIL', 'WILL', 'HAPPEN', 'TO', 'YOU', 'AND', 'MY', 'UNCLE', 'AND', 'THE', 'CHILDREN', 'FROM', 'ANYTHING', "I'VE", 'DONE'] +2094-142345-0039-347: hyp=["I'VE", 'STRONG', 'ASSURANCE', 'THAT', 'NO', 'EVIL', 'WILL', 'HAPPEN', 'TO', 'YOU', 'AND', 'MY', 'UNCLE', 'AND', 'THE', 'CHILDREN', 'FROM', 'ANYTHING', 'I', 'HAVE', 'DONE'] +2094-142345-0040-348: ref=['I', "DIDN'T", 'PREACH', 'WITHOUT', 'DIRECTION'] +2094-142345-0040-348: hyp=['I', "DIDN'T", 'PREACH', 'WITHOUT', 'DIRECTION'] +2094-142345-0041-349: ref=['DIRECTION'] +2094-142345-0041-349: hyp=['DIRECTION'] +2094-142345-0042-350: ref=['I', 'HANNA', 'COMMON', 'PATIENCE', 'WITH', 'YOU'] +2094-142345-0042-350: hyp=['I', 'HAD', 'A', 'COMMON', 'PATIENCE', 'WITH', 'YOU'] +2094-142345-0043-351: ref=['BY', 'THIS', 'TIME', 'THE', 'TWO', 'GENTLEMEN', 'HAD', 'REACHED', 'THE', 'PALINGS', 'AND', 'HAD', 'GOT', 'DOWN', 'FROM', 'THEIR', 'HORSES', 'IT', 'WAS', 'PLAIN', 'THEY', 'MEANT', 'TO', 'COME', 'IN'] +2094-142345-0043-351: hyp=['BY', 'THIS', 'TIME', 'THE', 'TWO', 'GENTLEMEN', 'HAD', 'REACHED', 'THE', 'PALINGS', 'AND', 'HAD', 'GOT', 'DOWN', 'FROM', 'THEIR', 'HORSES', 'IT', 'WAS', 'PLAIN', 'THEY', 'MEANT', 'TO', 'COME', 'IN'] +2094-142345-0044-352: ref=['SAID', 'MISTER', 'IRWINE', 'WITH', 'HIS', 'STATELY', 'CORDIALITY'] +2094-142345-0044-352: hyp=['SAID', 'MISTER', 'IRWINE', 'WITH', 'HIS', 'STATELY', 'CORDIALITY'] +2094-142345-0045-353: ref=['OH', 'SIR', "DON'T", 'MENTION', 'IT', 'SAID', 'MISSUS', 'POYSER'] +2094-142345-0045-353: hyp=['OH', 'SIR', "DON'T", 'MENTION', 'IT', 'SAID', 'MISSUS', 'POYSER'] +2094-142345-0046-354: ref=['I', 'DELIGHT', 'IN', 'YOUR', 'KITCHEN'] +2094-142345-0046-354: hyp=['I', 'DELIGHT', 'IN', 'YOUR', 'KITCHEN'] +2094-142345-0047-355: ref=['POYSER', 'IS', 'NOT', 'AT', 'HOME', 'IS', 'HE'] +2094-142345-0047-355: hyp=['POYSER', 'IS', 'NOT', 'AT', 'HOME', 'IS', 'HE'] +2094-142345-0048-356: ref=['SAID', 'CAPTAIN', 'DONNITHORNE', 'SEATING', 'HIMSELF', 'WHERE', 'HE', 'COULD', 'SEE', 'ALONG', 'THE', 'SHORT', 'PASSAGE', 'TO', 'THE', 'OPEN', 'DAIRY', 'DOOR'] +2094-142345-0048-356: hyp=['SAID', 'CAPTAIN', 'DONNITHORNE', 'SITTING', 'HIMSELF', 'WHERE', 'HE', 'COULD', 'SEE', 'ALONG', 'THE', 'SHORT', 'PASSAGE', 'TO', 'THE', 'OPEN', 'DAIRY', 'DOOR'] +2094-142345-0049-357: ref=['NO', 'SIR', 'HE', "ISN'T", "HE'S", 'GONE', 'TO', 'ROSSETER', 'TO', 'SEE', 'MISTER', 'WEST', 'THE', 'FACTOR', 'ABOUT', 'THE', 'WOOL'] +2094-142345-0049-357: hyp=['NO', 'SIR', 'HE', "ISN'T", "HE'S", 'GONE', 'TO', 'ROSSOTER', 'TO', 'SEE', 'MISTER', 'WEST', 'THE', 'FACTOR', 'ABOUT', 'THE', 'WOOL'] +2094-142345-0050-358: ref=['BUT', "THERE'S", 'FATHER', 'THE', 'BARN', 'SIR', 'IF', "HE'D", 'BE', 'OF', 'ANY', 'USE'] +2094-142345-0050-358: hyp=['BUT', "THERE'S", 'FATHER', 'IN', 'BARN', 'SIR', 'IF', "HE'D", 'BE', 'OF', 'ANY', 'USE'] +2094-142345-0051-359: ref=['NO', 'THANK', 'YOU', "I'LL", 'JUST', 'LOOK', 'AT', 'THE', 'WHELPS', 'AND', 'LEAVE', 'A', 'MESSAGE', 'ABOUT', 'THEM', 'WITH', 'YOUR', 'SHEPHERD'] +2094-142345-0051-359: hyp=['NO', 'THANK', 'YOU', "I'LL", 'JUST', 'LOOK', 'AT', 'THE', 'WHELMS', 'AND', 'LEAVE', 'A', 'MESSAGE', 'ABOUT', 'THEM', 'WITH', 'YOUR', 'SHEPHERD'] +2094-142345-0052-360: ref=['I', 'MUST', 'COME', 'ANOTHER', 'DAY', 'AND', 'SEE', 'YOUR', 'HUSBAND', 'I', 'WANT', 'TO', 'HAVE', 'A', 'CONSULTATION', 'WITH', 'HIM', 'ABOUT', 'HORSES'] +2094-142345-0052-360: hyp=['I', 'MUST', 'COME', 'ANOTHER', 'DAY', 'AND', 'SEE', 'YOUR', 'HUSBAND', 'I', 'WANT', 'TO', 'HAVE', 'A', 'CONSULTATION', 'WITH', 'HIM', 'ABOUT', 'HORSES'] +2094-142345-0053-361: ref=['FOR', 'IF', "HE'S", 'ANYWHERE', 'ON', 'THE', 'FARM', 'WE', 'CAN', 'SEND', 'FOR', 'HIM', 'IN', 'A', 'MINUTE'] +2094-142345-0053-361: hyp=['FOR', 'IF', "HE'S", 'ANYWHERE', 'ON', 'THE', 'FARM', 'WE', 'CAN', 'SEND', 'FOR', 'HIM', 'IN', 'A', 'MINUTE'] +2094-142345-0054-362: ref=['OH', 'SIR', 'SAID', 'MISSUS', 'POYSER', 'RATHER', 'ALARMED', 'YOU', "WOULDN'T", 'LIKE', 'IT', 'AT', 'ALL'] +2094-142345-0054-362: hyp=['OH', 'SIR', 'SAID', 'MISSUS', 'POYSER', 'RATHER', 'ALARMED', 'YOU', "WOULDN'T", 'LIKE', 'IT', 'AT', 'ALL'] +2094-142345-0055-363: ref=['BUT', 'YOU', 'KNOW', 'MORE', 'ABOUT', 'THAT', 'THAN', 'I', 'DO', 'SIR'] +2094-142345-0055-363: hyp=['BUT', 'YOU', 'KNOW', 'MORE', 'ABOUT', 'THAT', 'THAN', 'I', 'DO', 'SIR'] +2094-142345-0056-364: ref=['I', 'THINK', 'I', 'SHOULD', 'BE', 'DOING', 'YOU', 'A', 'SERVICE', 'TO', 'TURN', 'YOU', 'OUT', 'OF', 'SUCH', 'A', 'PLACE'] +2094-142345-0056-364: hyp=['I', 'THINK', 'I', 'SHOULD', 'BE', 'DOING', 'YOU', 'A', 'SERVICE', 'TO', 'TURN', 'YOU', 'OUT', 'OF', 'SUCH', 'A', 'PLACE'] +2094-142345-0057-365: ref=['I', 'KNOW', 'HIS', 'FARM', 'IS', 'IN', 'BETTER', 'ORDER', 'THAN', 'ANY', 'OTHER', 'WITHIN', 'TEN', 'MILES', 'OF', 'US', 'AND', 'AS', 'FOR', 'THE', 'KITCHEN', 'HE', 'ADDED', 'SMILING', 'I', "DON'T", 'BELIEVE', "THERE'S", 'ONE', 'IN', 'THE', 'KINGDOM', 'TO', 'BEAT', 'IT'] +2094-142345-0057-365: hyp=['I', 'KNOWS', 'FARM', 'IS', 'IN', 'BETTER', 'ORDER', 'THAN', 'ANY', 'OTHER', 'WITHIN', 'TEN', 'MILES', 'OF', 'US', 'AND', 'AS', 'FOR', 'THE', 'KITCHEN', 'HE', 'ADDED', 'SMILING', 'I', "DON'T", 'BELIEVE', "THERE'S", 'ONE', 'IN', 'THE', 'KINGDOM', 'TO', 'BEAT', 'IT'] +2094-142345-0058-366: ref=['BY', 'THE', 'BY', "I'VE", 'NEVER', 'SEEN', 'YOUR', 'DAIRY', 'I', 'MUST', 'SEE', 'YOUR', 'DAIRY', 'MISSUS', 'POYSER'] +2094-142345-0058-366: hyp=['BY', 'THE', 'BYE', 'I', 'HAVE', 'NEVER', 'SEEN', 'YOUR', 'DAIRY', 'I', 'MUST', 'SEE', 'YOUR', 'DEARIE', 'MISSUS', 'POYSER'] +2094-142345-0059-367: ref=['THIS', 'MISSUS', 'POYSER', 'SAID', 'BLUSHING', 'AND', 'BELIEVING', 'THAT', 'THE', 'CAPTAIN', 'WAS', 'REALLY', 'INTERESTED', 'IN', 'HER', 'MILK', 'PANS', 'AND', 'WOULD', 'ADJUST', 'HIS', 'OPINION', 'OF', 'HER', 'TO', 'THE', 'APPEARANCE', 'OF', 'HER', 'DAIRY'] +2094-142345-0059-367: hyp=['THIS', 'MISSUS', 'POYSER', 'SAID', 'BLUSHING', 'AND', 'BELIEVING', 'THAT', 'THE', 'CAPTAIN', 'WAS', 'REALLY', 'INTERESTED', 'IN', 'HER', 'MILK', 'PANS', 'AND', 'WOULD', 'ADJUST', 'HIS', 'OPINION', 'OF', 'HER', 'TO', 'THE', 'APPEARANCE', 'OF', 'HER', 'DAIRY'] +2094-142345-0060-368: ref=['OH', "I'VE", 'NO', 'DOUBT', "IT'S", 'IN', 'CAPITAL', 'ORDER'] +2094-142345-0060-368: hyp=['OH', "I'VE", 'NO', 'DOUBT', "IT'S", 'IN', 'CAPITAL', 'ORDER'] +2300-131720-0000-1816: ref=['THE', 'PARIS', 'PLANT', 'LIKE', 'THAT', 'AT', 'THE', 'CRYSTAL', 'PALACE', 'WAS', 'A', 'TEMPORARY', 'EXHIBIT'] +2300-131720-0000-1816: hyp=['THE', 'PARIS', 'PLANT', 'LIKE', 'THAT', 'AT', 'THE', 'CRYSTAL', 'PALACE', 'WAS', 'A', 'TEMPORARY', 'EXHIBIT'] +2300-131720-0001-1817: ref=['THE', 'LONDON', 'PLANT', 'WAS', 'LESS', 'TEMPORARY', 'BUT', 'NOT', 'PERMANENT', 'SUPPLYING', 'BEFORE', 'IT', 'WAS', 'TORN', 'OUT', 'NO', 'FEWER', 'THAN', 'THREE', 'THOUSAND', 'LAMPS', 'IN', 'HOTELS', 'CHURCHES', 'STORES', 'AND', 'DWELLINGS', 'IN', 'THE', 'VICINITY', 'OF', 'HOLBORN', 'VIADUCT'] +2300-131720-0001-1817: hyp=['THE', 'LONDON', 'PLANT', 'WAS', 'LESS', 'TEMPORARY', 'BUT', 'NOT', 'PERMANENT', 'SUPPLYING', 'BEFORE', 'IT', 'WAS', 'TORN', 'OUT', 'NO', 'FEWER', 'THAN', 'THREE', 'THOUSAND', 'LAMPS', 'IN', 'HOTELS', 'CHURCHES', 'STORES', 'AND', 'DWELLINGS', 'IN', 'THE', 'VICINITY', 'OF', 'HOLBORN', 'VIADUC'] +2300-131720-0002-1818: ref=['THERE', 'MESSRS', 'JOHNSON', 'AND', 'HAMMER', 'PUT', 'INTO', 'PRACTICE', 'MANY', 'OF', 'THE', 'IDEAS', 'NOW', 'STANDARD', 'IN', 'THE', 'ART', 'AND', 'SECURED', 'MUCH', 'USEFUL', 'DATA', 'FOR', 'THE', 'WORK', 'IN', 'NEW', 'YORK', 'OF', 'WHICH', 'THE', 'STORY', 'HAS', 'JUST', 'BEEN', 'TOLD'] +2300-131720-0002-1818: hyp=['THERE', 'MESSRS', 'JOHNSON', 'AND', 'HAMMER', 'PUT', 'INTO', 'PRACTICE', 'MANY', 'OF', 'THE', 'IDEAS', 'NOW', 'STANDARD', 'IN', 'THE', 'ART', 'AND', 'SECURED', 'MUCH', 'USEFUL', 'DATA', 'FOR', 'THE', 'WORK', 'IN', 'NEW', 'YORK', 'OF', 'WHICH', 'THE', 'STORY', 'HAS', 'JUST', 'BEEN', 'TOLD'] +2300-131720-0003-1819: ref=['THE', 'DYNAMO', 'ELECTRIC', 'MACHINE', 'THOUGH', 'SMALL', 'WAS', 'ROBUST', 'FOR', 'UNDER', 'ALL', 'THE', 'VARYING', 'SPEEDS', 'OF', 'WATER', 'POWER', 'AND', 'THE', 'VICISSITUDES', 'OF', 'THE', 'PLANT', 'TO', 'WHICH', 'IT', 'BELONGED', 'IT', 'CONTINUED', 'IN', 'ACTIVE', 'USE', 'UNTIL', 'EIGHTEEN', 'NINETY', 'NINE', 'SEVENTEEN', 'YEARS'] +2300-131720-0003-1819: hyp=['THE', 'DYNAMO', 'ELECTRIC', 'MACHINE', 'THOUGH', 'SMALL', 'WAS', 'ROBUST', 'FOR', 'UNDER', 'ALL', 'THE', 'VARYING', 'SPEEDS', 'OF', 'WATER', 'POWER', 'AND', 'THE', 'VICISSITUDES', 'OF', 'THE', 'PLANT', 'TO', 'WHICH', 'IT', 'BELONGED', 'IT', 'CONTINUED', 'IN', 'ACTIVE', 'USE', 'UNTIL', 'EIGHTEEN', 'NINETY', 'NINE', 'SEVENTEEN', 'YEARS'] +2300-131720-0004-1820: ref=['OWING', 'TO', 'HIS', 'INSISTENCE', 'ON', 'LOW', 'PRESSURE', 'DIRECT', 'CURRENT', 'FOR', 'USE', 'IN', 'DENSELY', 'POPULATED', 'DISTRICTS', 'AS', 'THE', 'ONLY', 'SAFE', 'AND', 'TRULY', 'UNIVERSAL', 'PROFITABLE', 'WAY', 'OF', 'DELIVERING', 'ELECTRICAL', 'ENERGY', 'TO', 'THE', 'CONSUMERS', 'EDISON', 'HAS', 'BEEN', 'FREQUENTLY', 'SPOKEN', 'OF', 'AS', 'AN', 'OPPONENT', 'OF', 'THE', 'ALTERNATING', 'CURRENT'] +2300-131720-0004-1820: hyp=['OWING', 'TO', 'HIS', 'INSISTENCE', 'ON', 'LOW', 'PRESSURE', 'DIRECT', 'CURRENT', 'FOR', 'USE', 'IN', 'DENSELY', 'POPULATED', 'DISTRICTS', 'AS', 'THE', 'ONLY', 'SAFE', 'AND', 'TRULY', 'UNIVERSAL', 'PROFITABLE', 'WAY', 'OF', 'DELIVERING', 'ELECTRICAL', 'ENERGY', 'TO', 'THE', 'CONSUMERS', 'EDISON', 'HAS', 'BEEN', 'FREQUENTLY', 'SPOKEN', 'OF', 'AS', 'AN', 'OPPONENT', 'OF', 'THE', 'ALTERNATING', 'CURRENT'] +2300-131720-0005-1821: ref=['WHY', 'IF', 'WE', 'ERECT', 'A', 'STATION', 'AT', 'THE', 'FALLS', 'IT', 'IS', 'A', 'GREAT', 'ECONOMY', 'TO', 'GET', 'IT', 'UP', 'TO', 'THE', 'CITY'] +2300-131720-0005-1821: hyp=['WHY', 'IF', 'WE', 'ERECT', 'A', 'STATION', 'AT', 'THE', 'FALLS', 'IT', 'IS', 'A', 'GREAT', 'ECONOMY', 'TO', 'GET', 'IT', 'UP', 'TO', 'THE', 'CITY'] +2300-131720-0006-1822: ref=['THERE', 'SEEMS', 'NO', 'GOOD', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'IT', 'WILL', 'CHANGE'] +2300-131720-0006-1822: hyp=['THERE', 'SEEMS', 'NO', 'GOOD', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'IT', 'WILL', 'CHANGE'] +2300-131720-0007-1823: ref=['BROAD', 'AS', 'THE', 'PRAIRIES', 'AND', 'FREE', 'IN', 'THOUGHT', 'AS', 'THE', 'WINDS', 'THAT', 'SWEEP', 'THEM', 'HE', 'IS', 'IDIOSYNCRATICALLY', 'OPPOSED', 'TO', 'LOOSE', 'AND', 'WASTEFUL', 'METHODS', 'TO', 'PLANS', 'OF', 'EMPIRE', 'THAT', 'NEGLECT', 'THE', 'POOR', 'AT', 'THE', 'GATE'] +2300-131720-0007-1823: hyp=['BROAD', 'AS', 'THE', 'PRAIRIES', 'AND', 'FREE', 'IN', 'THOUGHT', 'AS', 'THE', 'WINDS', 'THAT', 'SWEPT', 'THEM', 'HE', 'IS', 'IDIO', 'SENCRATICALLY', 'OPPOSED', 'TO', 'LOOSE', 'AND', 'WASTEFUL', 'METHODS', 'TO', 'PLANS', 'OF', 'EMPIRE', 'THAT', 'NEGLECT', 'THE', 'POOR', 'AT', 'THE', 'GATE'] +2300-131720-0008-1824: ref=['EVERYTHING', 'HE', 'HAS', 'DONE', 'HAS', 'BEEN', 'AIMED', 'AT', 'THE', 'CONSERVATION', 'OF', 'ENERGY', 'THE', 'CONTRACTION', 'OF', 'SPACE', 'THE', 'INTENSIFICATION', 'OF', 'CULTURE'] +2300-131720-0008-1824: hyp=['EVERYTHING', 'HE', 'HAS', 'DONE', 'HAS', 'BEEN', 'AIMED', 'AT', 'THE', 'CONSERVATION', 'OF', 'ENERGY', 'THE', 'CONTRACTION', 'OF', 'SPACE', 'THE', 'INTENSIFICATION', 'OF', 'CULTURE'] +2300-131720-0009-1825: ref=['FOR', 'SOME', 'YEARS', 'IT', 'WAS', 'NOT', 'FOUND', 'FEASIBLE', 'TO', 'OPERATE', 'MOTORS', 'ON', 'ALTERNATING', 'CURRENT', 'CIRCUITS', 'AND', 'THAT', 'REASON', 'WAS', 'OFTEN', 'URGED', 'AGAINST', 'IT', 'SERIOUSLY'] +2300-131720-0009-1825: hyp=['FOR', 'SOME', 'YEARS', 'IT', 'WAS', 'NOT', 'FOUND', 'FEASIBLE', 'TO', 'OPERATE', 'MOTORS', 'ON', 'ALTERNATING', 'CURRENT', 'CIRCUITS', 'AND', 'THAT', 'REASON', 'WAS', 'OFTEN', 'URGED', 'AGAINST', 'IT', 'SERIOUSLY'] +2300-131720-0010-1826: ref=['IT', 'COULD', 'NOT', 'BE', 'USED', 'FOR', 'ELECTROPLATING', 'OR', 'DEPOSITION', 'NOR', 'COULD', 'IT', 'CHARGE', 'STORAGE', 'BATTERIES', 'ALL', 'OF', 'WHICH', 'ARE', 'EASILY', 'WITHIN', 'THE', 'ABILITY', 'OF', 'THE', 'DIRECT', 'CURRENT'] +2300-131720-0010-1826: hyp=['IT', 'COULD', 'NOT', 'BE', 'USED', 'FOR', 'ELECTROPLATING', 'OR', 'DEPOSITION', 'NOR', 'COULD', 'IT', 'CHARGE', 'STORAGE', 'BATTERIES', 'ALL', 'OF', 'WHICH', 'ARE', 'EASILY', 'WITHIN', 'THE', 'ABILITY', 'OF', 'THE', 'DIRECT', 'CURRENT'] +2300-131720-0011-1827: ref=['BUT', 'WHEN', 'IT', 'CAME', 'TO', 'BE', 'A', 'QUESTION', 'OF', 'LIGHTING', 'A', 'SCATTERED', 'SUBURB', 'A', 'GROUP', 'OF', 'DWELLINGS', 'ON', 'THE', 'OUTSKIRTS', 'A', 'REMOTE', 'COUNTRY', 'RESIDENCE', 'OR', 'A', 'FARM', 'HOUSE', 'THE', 'ALTERNATING', 'CURRENT', 'IN', 'ALL', 'ELEMENTS', 'SAVE', 'ITS', 'DANGER', 'WAS', 'AND', 'IS', 'IDEAL'] +2300-131720-0011-1827: hyp=['BUT', 'WHEN', 'IT', 'CAME', 'TO', 'BE', 'A', 'QUESTION', 'OF', 'LIGHTING', 'A', 'SCATTERED', 'SUBURB', 'A', 'GROUP', 'OF', 'DWELLINGS', 'ON', 'THE', 'OUTSKIRTS', 'A', 'REMOTE', 'COUNTRY', 'RESIDENCE', 'OR', 'A', 'FARM', 'HOUSE', 'THE', 'ALTERNATING', 'CURRENT', 'IN', 'ALL', 'ELEMENTS', 'SAVE', 'ITS', 'DANGER', 'WAS', 'AND', 'IS', 'IDEAL'] +2300-131720-0012-1828: ref=['EDISON', 'WAS', 'INTOLERANT', 'OF', 'SHAM', 'AND', 'SHODDY', 'AND', 'NOTHING', 'WOULD', 'SATISFY', 'HIM', 'THAT', 'COULD', 'NOT', 'STAND', 'CROSS', 'EXAMINATION', 'BY', 'MICROSCOPE', 'TEST', 'TUBE', 'AND', 'GALVANOMETER'] +2300-131720-0012-1828: hyp=['EDISON', 'WAS', 'INTOLERANT', 'OF', 'SHAM', 'AND', 'SHOTTY', 'AND', 'NOTHING', 'WOULD', 'SATISFY', 'HIM', 'THAT', 'COULD', 'NOT', 'STAND', 'CROSS', 'EXAMINATION', 'BY', 'MICROSCOPE', 'TEST', 'TUBE', 'AND', 'GALVANOMETER'] +2300-131720-0013-1829: ref=['UNLESS', 'HE', 'COULD', 'SECURE', 'AN', 'ENGINE', 'OF', 'SMOOTHER', 'RUNNING', 'AND', 'MORE', 'EXACTLY', 'GOVERNED', 'AND', 'REGULATED', 'THAN', 'THOSE', 'AVAILABLE', 'FOR', 'HIS', 'DYNAMO', 'AND', 'LAMP', 'EDISON', 'REALIZED', 'THAT', 'HE', 'WOULD', 'FIND', 'IT', 'ALMOST', 'IMPOSSIBLE', 'TO', 'GIVE', 'A', 'STEADY', 'LIGHT'] +2300-131720-0013-1829: hyp=['UNLESS', 'HE', 'COULD', 'SECURE', 'AN', 'ENGINE', 'OF', 'SMOOTHER', 'RUNNING', 'AND', 'MORE', 'EXACTLY', 'GOVERN', 'AND', 'REGULATED', 'THAN', 'THOSE', 'AVAILABLE', 'FOR', 'HIS', 'DYNAMO', 'AND', 'LAMP', 'EDISON', 'REALIZED', 'THAT', 'HE', 'WOULD', 'FIND', 'IT', 'ALMOST', 'IMPOSSIBLE', 'TO', 'GIVE', 'A', 'STEADY', 'LIGHT'] +2300-131720-0014-1830: ref=['MISTER', 'EDISON', 'WAS', 'A', 'LEADER', 'FAR', 'AHEAD', 'OF', 'THE', 'TIME'] +2300-131720-0014-1830: hyp=['MISTER', 'EDISON', 'WAS', 'A', 'LEADER', 'FAR', 'AHEAD', 'OF', 'THE', 'TIME'] +2300-131720-0015-1831: ref=['HE', 'OBTAINED', 'THE', 'DESIRED', 'SPEED', 'AND', 'LOAD', 'WITH', 'A', 'FRICTION', 'BRAKE', 'ALSO', 'REGULATOR', 'OF', 'SPEED', 'BUT', 'WAITED', 'FOR', 'AN', 'INDICATOR', 'TO', 'VERIFY', 'IT'] +2300-131720-0015-1831: hyp=['HE', 'OBTAINED', 'THE', 'DESIRED', 'SPEED', 'AND', 'LOAD', 'WITH', 'A', 'FRICTION', 'BREAK', 'ALSO', 'REGULATOR', 'OF', 'SPEED', 'BUT', 'WAITED', 'FOR', 'AN', 'INDICATOR', 'TO', 'VERIFY', 'IT'] +2300-131720-0016-1832: ref=['THEN', 'AGAIN', 'THERE', 'WAS', 'NO', 'KNOWN', 'WAY', 'TO', 'LUBRICATE', 'AN', 'ENGINE', 'FOR', 'CONTINUOUS', 'RUNNING', 'AND', 'MISTER', 'EDISON', 'INFORMED', 'ME', 'THAT', 'AS', 'A', 'MARINE', 'ENGINE', 'STARTED', 'BEFORE', 'THE', 'SHIP', 'LEFT', 'NEW', 'YORK', 'AND', 'CONTINUED', 'RUNNING', 'UNTIL', 'IT', 'REACHED', 'ITS', 'HOME', 'PORT', 'SO', 'AN', 'ENGINE', 'FOR', 'HIS', 'PURPOSES', 'MUST', 'PRODUCE', 'LIGHT', 'AT', 'ALL', 'TIMES'] +2300-131720-0016-1832: hyp=['THEN', 'AGAIN', 'THERE', 'WAS', 'NO', 'KNOWN', 'WAY', 'TO', 'LUBRICADE', 'AN', 'ENGINE', 'FOR', 'CONTINUOUS', 'RUNNING', 'AND', 'MISTER', 'EDISON', 'INFORMED', 'ME', 'THAT', 'AS', 'A', 'MARINE', 'ENGINE', 'STARTED', 'BEFORE', 'THE', 'SHIP', 'LEFT', 'NEW', 'YORK', 'AND', 'CONTINUED', 'RUNNING', 'UNTIL', 'IT', 'REACHED', 'ITS', 'HOME', 'PORT', 'SO', 'AN', 'ENGINE', 'FOR', 'HIS', 'PURPOSES', 'MUST', 'PRODUCE', 'LIGHT', 'AT', 'ALL', 'TIMES'] +2300-131720-0017-1833: ref=['EDISON', 'HAD', 'INSTALLED', 'HIS', 'HISTORIC', 'FIRST', 'GREAT', 'CENTRAL', 'STATION', 'SYSTEM', 'IN', 'NEW', 'YORK', 'ON', 'THE', 'MULTIPLE', 'ARC', 'SYSTEM', 'COVERED', 'BY', 'HIS', 'FEEDER', 'AND', 'MAIN', 'INVENTION', 'WHICH', 'RESULTED', 'IN', 'A', 'NOTABLE', 'SAVING', 'IN', 'THE', 'COST', 'OF', 'CONDUCTORS', 'AS', 'AGAINST', 'A', 'STRAIGHT', 'TWO', 'WIRE', 'SYSTEM', 'THROUGHOUT', 'OF', 'THE', 'TREE', 'KIND'] +2300-131720-0017-1833: hyp=['EDISON', 'HAD', 'INSTALLED', 'HIS', 'HISTORIC', 'FIRST', 'GREAT', 'CENTRAL', 'STATION', 'SYSTEM', 'IN', 'NEW', 'YORK', 'ON', 'THE', 'MULTIPLE', 'ARC', 'SYSTEM', 'COVERED', 'BY', 'HIS', 'FEEDER', 'AND', 'MAIN', 'INVENTION', 'WHICH', 'RESULTED', 'IN', 'A', 'NOTABLE', 'SAVING', 'IN', 'THE', 'COST', 'OF', 'CONDUCTORS', 'AS', 'AGAINST', 'A', 'STRAIT', 'TWO', 'WIRE', 'SYSTEM', 'THROUGHOUT', 'OF', 'THE', 'TREE', 'KIND'] +2300-131720-0018-1834: ref=['HE', 'SOON', 'FORESAW', 'THAT', 'STILL', 'GREATER', 'ECONOMY', 'WOULD', 'BE', 'NECESSARY', 'FOR', 'COMMERCIAL', 'SUCCESS', 'NOT', 'ALONE', 'FOR', 'THE', 'LARGER', 'TERRITORY', 'OPENING', 'BUT', 'FOR', 'THE', 'COMPACT', 'DISTRICTS', 'OF', 'LARGE', 'CITIES'] +2300-131720-0018-1834: hyp=['HE', 'SOON', 'FORESAW', 'THAT', 'STILL', 'GREATER', 'ECONOMY', 'WOULD', 'BE', 'NECESSARY', 'FOR', 'COMMERCIAL', 'SUCCESS', 'NOT', 'ALONE', 'FOR', 'THE', 'LARGER', 'TERRITORY', 'OPENING', 'BUT', 'FOR', 'THE', 'COMPACT', 'DISTRICT', 'OF', 'LARGE', 'CITIES'] +2300-131720-0019-1835: ref=['THE', 'STRONG', 'POSITION', 'HELD', 'BY', 'THE', 'EDISON', 'SYSTEM', 'UNDER', 'THE', 'STRENUOUS', 'COMPETITION', 'THAT', 'WAS', 'ALREADY', 'SPRINGING', 'UP', 'WAS', 'ENORMOUSLY', 'IMPROVED', 'BY', 'THE', 'INTRODUCTION', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'AND', 'IT', 'GAVE', 'AN', 'IMMEDIATE', 'IMPETUS', 'TO', 'INCANDESCENT', 'LIGHTING'] +2300-131720-0019-1835: hyp=['THE', 'STRONG', 'POSITION', 'HELD', 'BY', 'THE', 'EDISON', 'SYSTEM', 'UNDER', 'THE', 'STRENUOUS', 'COMPETITION', 'IT', 'WAS', 'ALREADY', 'SPRINGING', 'UP', 'WAS', 'ENORMOUSLY', 'IMPROVED', 'BY', 'THE', 'INTRODUCTION', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'AND', 'HE', 'GAVE', 'AN', 'IMMEDIATE', 'IMPETUS', 'TO', 'INCONDESCENT', 'LIGHTING'] +2300-131720-0020-1836: ref=['IT', 'WAS', 'SPECIALLY', 'SUITED', 'FOR', 'A', 'TRIAL', 'PLANT', 'ALSO', 'IN', 'THE', 'EARLY', 'DAYS', 'WHEN', 'A', 'YIELD', 'OF', 'SIX', 'OR', 'EIGHT', 'LAMPS', 'TO', 'THE', 'HORSE', 'POWER', 'WAS', 'CONSIDERED', 'SUBJECT', 'FOR', 'CONGRATULATION'] +2300-131720-0020-1836: hyp=['IT', 'WAS', 'SPECIALLY', 'SUITED', 'FOR', 'A', 'TRIAL', 'PLANT', 'ALSO', 'IN', 'THE', 'EARLY', 'DAYS', 'WHEN', 'A', 'YIELD', 'OF', 'SIX', 'OR', 'EIGHT', 'LAMPS', 'TO', 'THE', 'HORSE', 'BOWER', 'WAS', 'CONSIDERED', 'SUBJECT', 'FOR', 'CONGRATULATION'] +2300-131720-0021-1837: ref=['THE', 'STREET', 'CONDUCTORS', 'WERE', 'OF', 'THE', 'OVERHEAD', 'POLE', 'LINE', 'CONSTRUCTION', 'AND', 'WERE', 'INSTALLED', 'BY', 'THE', 'CONSTRUCTION', 'COMPANY', 'THAT', 'HAD', 'BEEN', 'ORGANIZED', 'BY', 'EDISON', 'TO', 'BUILD', 'AND', 'EQUIP', 'CENTRAL', 'STATIONS'] +2300-131720-0021-1837: hyp=['THE', 'STREET', 'CONDUCTORS', 'WERE', 'OF', 'THE', 'OVERHEAD', 'POLE', 'LINE', 'CONSTRUCTION', 'AND', 'WERE', 'INSTALLED', 'BY', 'THE', 'CONSTRUCTION', 'COMPANY', 'THAT', 'HAD', 'BEEN', 'ORGANIZED', 'BY', 'EDISON', 'TO', 'BUILD', 'AN', 'EQUIP', 'CENTRAL', 'STATIONS'] +2300-131720-0022-1838: ref=['MEANWHILE', 'HE', 'HAD', 'CALLED', 'UPON', 'ME', 'TO', 'MAKE', 'A', 'REPORT', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'KNOWN', 'IN', 'ENGLAND', 'AS', 'THE', 'HOPKINSON', 'BOTH', 'DOCTOR', 'JOHN', 'HOPKINSON', 'AND', 'MISTER', 'EDISON', 'BEING', 'INDEPENDENT', 'INVENTORS', 'AT', 'PRACTICALLY', 'THE', 'SAME', 'TIME'] +2300-131720-0022-1838: hyp=['MEANWHILE', 'HE', 'HAD', 'CALLED', 'UPON', 'ME', 'TO', 'MAKE', 'A', 'REPORT', 'OF', 'THE', 'THREE', 'WIRE', 'SYSTEM', 'KNOWN', 'IN', 'ENGLAND', 'AS', 'THE', 'HOPKINSON', 'BOTH', 'DOCTOR', 'JOHN', 'HOPKINSON', 'AND', 'MISTER', 'EDISON', 'BEING', 'INDEPENDENT', 'IN', 'VENORS', 'AT', 'PRACTICALLY', 'THE', 'SAME', 'TIME'] +2300-131720-0023-1839: ref=['I', 'THINK', 'HE', 'WAS', 'PERHAPS', 'MORE', 'APPRECIATIVE', 'THAN', 'I', 'WAS', 'OF', 'THE', 'DISCIPLINE', 'OF', 'THE', 'EDISON', 'CONSTRUCTION', 'DEPARTMENT', 'AND', 'THOUGHT', 'IT', 'WOULD', 'BE', 'WELL', 'FOR', 'US', 'TO', 'WAIT', 'UNTIL', 'THE', 'MORNING', 'OF', 'THE', 'FOURTH', 'BEFORE', 'WE', 'STARTED', 'UP'] +2300-131720-0023-1839: hyp=['I', 'THINK', 'HE', 'WAS', 'PERHAPS', 'MORE', 'APPRECIATIVE', 'THAT', 'I', 'WAS', 'OF', 'THE', 'DISCIPLINE', 'OF', 'THE', 'EDISON', 'CONSTRUCTION', 'DEPARTMENT', 'AND', 'THOUGHT', 'IT', 'WOULD', 'BE', 'WELL', 'FOR', 'US', 'TO', 'WAIT', 'UNTIL', 'THE', 'MORNING', 'OF', 'THE', 'FOURTH', 'BEFORE', 'WE', 'STARTED', 'UP'] +2300-131720-0024-1840: ref=['BUT', 'THE', 'PLANT', 'RAN', 'AND', 'IT', 'WAS', 'THE', 'FIRST', 'THREE', 'WIRE', 'STATION', 'IN', 'THIS', 'COUNTRY'] +2300-131720-0024-1840: hyp=['BUT', 'THE', 'PLANT', 'RAN', 'AND', 'IT', 'WAS', 'THE', 'FIRST', 'THREE', 'WIRE', 'STATION', 'IN', 'THIS', 'COUNTRY'] +2300-131720-0025-1841: ref=['THEY', 'WERE', 'LATER', 'USED', 'AS', 'RESERVE', 'MACHINES', 'AND', 'FINALLY', 'WITH', 'THE', 'ENGINE', 'RETIRED', 'FROM', 'SERVICE', 'AS', 'PART', 'OF', 'THE', 'COLLECTION', 'OF', 'EDISONIA', 'BUT', 'THEY', 'REMAIN', 'IN', 'PRACTICALLY', 'AS', 'GOOD', 'CONDITION', 'AS', 'WHEN', 'INSTALLED', 'IN', 'EIGHTEEN', 'EIGHTY', 'THREE'] +2300-131720-0025-1841: hyp=['THEY', 'WERE', 'LATER', 'USED', 'AS', 'RESERVED', 'MACHINES', 'AND', 'FINALLY', 'WITH', 'THE', 'ENGINE', 'RETIRED', 'FROM', 'SERVICE', 'AS', 'PART', 'OF', 'THE', 'COLLECTION', 'OF', 'EDISONIA', 'BUT', 'THEY', 'REMAIN', 'IN', 'PRACTICALLY', 'AS', 'GOOD', 'CONDITION', 'AS', 'WHEN', 'INSTALLED', 'IN', 'EIGHTEEN', 'EIGHTY', 'THREE'] +2300-131720-0026-1842: ref=['THE', 'ARC', 'LAMP', 'INSTALLED', 'OUTSIDE', 'A', "CUSTOMER'S", 'PREMISES', 'OR', 'IN', 'A', 'CIRCUIT', 'FOR', 'PUBLIC', 'STREET', 'LIGHTING', 'BURNED', 'SO', 'MANY', 'HOURS', 'NIGHTLY', 'SO', 'MANY', 'NIGHTS', 'IN', 'THE', 'MONTH', 'AND', 'WAS', 'PAID', 'FOR', 'AT', 'THAT', 'RATE', 'SUBJECT', 'TO', 'REBATE', 'FOR', 'HOURS', 'WHEN', 'THE', 'LAMP', 'MIGHT', 'BE', 'OUT', 'THROUGH', 'ACCIDENT'] +2300-131720-0026-1842: hyp=['THE', 'ARC', 'LAMP', 'INSTALLED', 'OUTSIDE', 'A', "CUSTOMER'S", 'PREMISES', 'OR', 'IN', 'A', 'CIRCUIT', 'FOR', 'PUBLIC', 'STREET', 'LIGHTING', 'BURNED', 'SO', 'MANY', 'HOURS', 'NIGHTLY', 'SO', 'MANY', 'NIGHTS', 'IN', 'THE', 'MONTH', 'AND', 'WAS', 'PAID', 'FOR', 'AT', 'THAT', 'RATE', 'SUBJECT', 'TO', 'REBATE', 'FOR', 'HOURS', 'WHEN', 'THE', 'LAMP', 'MIGHT', 'BE', 'OUT', 'THROUGH', 'ACCIDENT'] +2300-131720-0027-1843: ref=['EDISON', 'HELD', 'THAT', 'THE', 'ELECTRICITY', 'SOLD', 'MUST', 'BE', 'MEASURED', 'JUST', 'LIKE', 'GAS', 'OR', 'WATER', 'AND', 'HE', 'PROCEEDED', 'TO', 'DEVELOP', 'A', 'METER'] +2300-131720-0027-1843: hyp=['EDISON', 'HELD', 'THAT', 'THE', 'ELECTRICITY', 'SOLD', 'MUST', 'BE', 'MEASURED', 'JUST', 'LIKE', 'GAS', 'OR', 'WATER', 'AND', 'HE', 'PROCEEDED', 'TO', 'DEVELOP', 'A', 'METER'] +2300-131720-0028-1844: ref=['THERE', 'WAS', 'INFINITE', 'SCEPTICISM', 'AROUND', 'HIM', 'ON', 'THE', 'SUBJECT', 'AND', 'WHILE', 'OTHER', 'INVENTORS', 'WERE', 'ALSO', 'GIVING', 'THE', 'SUBJECT', 'THEIR', 'THOUGHT', 'THE', 'PUBLIC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'ANYTHING', 'SO', 'UTTERLY', 'INTANGIBLE', 'AS', 'ELECTRICITY', 'THAT', 'COULD', 'NOT', 'BE', 'SEEN', 'OR', 'WEIGHED', 'AND', 'ONLY', 'GAVE', 'SECONDARY', 'EVIDENCE', 'OF', 'ITSELF', 'AT', 'THE', 'EXACT', 'POINT', 'OF', 'USE', 'COULD', 'NOT', 'BE', 'BROUGHT', 'TO', 'ACCURATE', 'REGISTRATION'] +2300-131720-0028-1844: hyp=['THERE', 'WAS', 'INFINITE', 'SCEPTICISM', 'AROUND', 'HIM', 'ON', 'THE', 'SUBJECT', 'AND', 'WHILE', 'OTHER', 'INVENTORS', 'WERE', 'ALSO', 'GIVING', 'THE', 'SUBJECT', 'THEIR', 'THOUGHT', 'THE', 'PUBLIC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'ANYTHING', 'SO', 'UTTERLY', 'INTANGIBLE', 'AS', 'ELECTRICITY', 'THAT', 'COULD', 'NOT', 'BE', 'SEEN', 'OR', 'WEIGHED', 'AND', 'ONLY', 'GAVE', 'SECONDARY', 'EVIDENCE', 'OF', 'ITSELF', 'AT', 'THE', 'EXACT', 'POINT', 'OF', 'USE', 'COULD', 'NOT', 'BE', 'BROUGHT', 'TO', 'ACCURATE', 'REGISTRATION'] +2300-131720-0029-1845: ref=['HENCE', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'NO', 'LONGER', 'USED', 'DESPITE', 'ITS', 'EXCELLENT', 'QUALITIES'] +2300-131720-0029-1845: hyp=['HENCE', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'NO', 'LONGER', 'USED', 'DESPITE', 'ITS', 'EXCELLENT', 'QUALITIES'] +2300-131720-0030-1846: ref=['THE', 'PRINCIPLE', 'EMPLOYED', 'IN', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'THAT', 'WHICH', 'EXEMPLIFIES', 'THE', 'POWER', 'OF', 'ELECTRICITY', 'TO', 'DECOMPOSE', 'A', 'CHEMICAL', 'SUBSTANCE'] +2300-131720-0030-1846: hyp=['THE', 'PRINCIPLE', 'EMPLOYED', 'IN', 'THE', 'EDISON', 'ELECTROLYTIC', 'METER', 'IS', 'THAT', 'WHICH', 'EXEMPLIFIES', 'THE', 'POWER', 'OF', 'ELECTRICITY', 'TO', 'DECOMPOSE', 'A', 'CHEMICAL', 'SUBSTANCE'] +2300-131720-0031-1847: ref=['ASSOCIATED', 'WITH', 'THIS', 'SIMPLE', 'FORM', 'OF', 'APPARATUS', 'WERE', 'VARIOUS', 'INGENIOUS', 'DETAILS', 'AND', 'REFINEMENTS', 'TO', 'SECURE', 'REGULARITY', 'OF', 'OPERATION', 'FREEDOM', 'FROM', 'INACCURACY', 'AND', 'IMMUNITY', 'FROM', 'SUCH', 'TAMPERING', 'AS', 'WOULD', 'PERMIT', 'THEFT', 'OF', 'CURRENT', 'OR', 'DAMAGE'] +2300-131720-0031-1847: hyp=['ASSOCIATED', 'WITH', 'THIS', 'SIMPLE', 'FORM', 'OF', 'APPARATUS', 'WERE', 'VARIOUS', 'INGENIOUS', 'DETAILS', 'AND', 'REFINEMENTS', 'TO', 'SECURE', 'REGULARITY', 'OF', 'OPERATION', 'FREEDOM', 'FROM', 'INACCURACY', 'AND', 'IMMUNITY', 'FROM', 'SUCH', 'TAMPERING', 'AS', 'WOULD', 'PERMIT', 'THEFT', 'OF', 'CURRENT', 'OR', 'DAMAGE'] +2300-131720-0032-1848: ref=['THE', 'STANDARD', 'EDISON', 'METER', 'PRACTICE', 'WAS', 'TO', 'REMOVE', 'THE', 'CELLS', 'ONCE', 'A', 'MONTH', 'TO', 'THE', 'METER', 'ROOM', 'OF', 'THE', 'CENTRAL', 'STATION', 'COMPANY', 'FOR', 'EXAMINATION', 'ANOTHER', 'SET', 'BEING', 'SUBSTITUTED'] +2300-131720-0032-1848: hyp=['THE', 'STANDARD', 'EDISON', 'METER', 'PRACTICE', 'WAS', 'TO', 'REMOVE', 'THE', 'CELLS', 'ONCE', 'A', 'MONTH', 'TO', 'THE', 'METEOR', 'ROOM', 'OF', 'THE', 'CENTRAL', 'STATION', 'COMPANY', 'FOR', 'EXAMINATION', 'ANOTHER', 'SET', 'BEING', 'SUBSTITUTED'] +2300-131720-0033-1849: ref=['IN', 'DECEMBER', 'EIGHTEEN', 'EIGHTY', 'EIGHT', 'MISTER', 'W', 'J', 'JENKS', 'READ', 'AN', 'INTERESTING', 'PAPER', 'BEFORE', 'THE', 'AMERICAN', 'INSTITUTE', 'OF', 'ELECTRICAL', 'ENGINEERS', 'ON', 'THE', 'SIX', 'YEARS', 'OF', 'PRACTICAL', 'EXPERIENCE', 'HAD', 'UP', 'TO', 'THAT', 'TIME', 'WITH', 'THE', 'METER', 'THEN', 'MORE', 'GENERALLY', 'IN', 'USE', 'THAN', 'ANY', 'OTHER'] +2300-131720-0033-1849: hyp=['IN', 'DECEMBER', 'EIGHTEEN', 'EIGHTY', 'EIGHT', 'MISTER', 'W', 'J', 'JENKS', 'READ', 'AN', 'INTERESTING', 'PAPER', 'BEFORE', 'THE', 'AMERICAN', 'INSTITUTE', 'OF', 'ELECTRICAL', 'ENGINEERS', 'ON', 'THE', 'SIX', 'YEARS', 'OF', 'PRACTICAL', 'EXPERIENCE', 'HAD', 'UP', 'TO', 'THAT', 'TIME', 'WITH', 'THE', 'METRE', 'THEN', 'MORE', 'GENERALLY', 'IN', 'USE', 'THAN', 'ANY', 'OTHER'] +2300-131720-0034-1850: ref=['THE', 'OTHERS', 'HAVING', 'BEEN', 'IN', 'OPERATION', 'TOO', 'SHORT', 'A', 'TIME', 'TO', 'SHOW', 'DEFINITE', 'RESULTS', 'ALTHOUGH', 'THEY', 'ALSO', 'WENT', 'QUICKLY', 'TO', 'A', 'DIVIDEND', 'BASIS'] +2300-131720-0034-1850: hyp=['THE', 'OTHERS', 'HAVING', 'BEEN', 'IN', 'OPERATION', 'TOO', 'SHORT', 'A', 'TIME', 'TO', 'SHOW', 'DEFINITE', 'RESULTS', 'ALTHOUGH', 'THEY', 'ALSO', 'WENT', 'QUICKLY', 'TO', 'A', 'DIVIDEND', 'BASIS'] +2300-131720-0035-1851: ref=['IN', 'THIS', 'CONNECTION', 'IT', 'SHOULD', 'BE', 'MENTIONED', 'THAT', 'THE', 'ASSOCIATION', 'OF', 'EDISON', 'ILLUMINATING', 'COMPANIES', 'IN', 'THE', 'SAME', 'YEAR', 'ADOPTED', 'RESOLUTIONS', 'UNANIMOUSLY', 'TO', 'THE', 'EFFECT', 'THAT', 'THE', 'EDISON', 'METER', 'WAS', 'ACCURATE', 'AND', 'THAT', 'ITS', 'USE', 'WAS', 'NOT', 'EXPENSIVE', 'FOR', 'STATIONS', 'ABOVE', 'ONE', 'THOUSAND', 'LIGHTS', 'AND', 'THAT', 'THE', 'BEST', 'FINANCIAL', 'RESULTS', 'WERE', 'INVARIABLY', 'SECURED', 'IN', 'A', 'STATION', 'SELLING', 'CURRENT', 'BY', 'METER'] +2300-131720-0035-1851: hyp=['IN', 'THIS', 'CONNECTION', 'IT', 'SHOULD', 'BE', 'MENTIONED', 'THAT', 'THE', 'ASSOCIATION', 'OF', 'EDISON', 'ILLUMINATING', 'COMPANIES', 'IN', 'THE', 'SAME', 'YEAR', 'ADOPTED', 'RESOLUTIONS', 'UNANIMOUSLY', 'TO', 'THE', 'EFFECT', 'THAT', 'THE', 'EDISON', 'METER', 'WAS', 'ACCURATE', 'AND', 'THAT', 'ITS', 'USE', 'WAS', 'NOT', 'EXPENSIVE', 'FOR', 'STATIONS', 'ABOVE', 'ONE', 'THOUSAND', 'LIGHTS', 'AND', 'THAT', 'THE', 'BEST', 'FINANCIAL', 'RESULTS', 'WERE', 'INVARIABLY', 'SECURED', 'IN', 'A', 'STATION', 'SELLING', 'CURRENT', 'BY', 'METRE'] +2300-131720-0036-1852: ref=['THE', 'METER', 'CONTINUED', 'IN', 'GENERAL', 'SERVICE', 'DURING', 'EIGHTEEN', 'NINETY', 'NINE', 'AND', 'PROBABLY', 'UP', 'TO', 'THE', 'CLOSE', 'OF', 'THE', 'CENTURY'] +2300-131720-0036-1852: hyp=['THE', 'METRE', 'CONTINUED', 'IN', 'GENERAL', 'SERVICE', 'DURING', 'EIGHTEEN', 'NINETY', 'NINE', 'AND', 'PROBABLY', 'UP', 'TO', 'THE', 'CLOSE', 'OF', 'THE', 'CENTURY'] +2300-131720-0037-1853: ref=['HE', 'WEIGHED', 'AND', 'REWEIGHED', 'THE', 'METER', 'PLATES', 'AND', 'PURSUED', 'EVERY', 'LINE', 'OF', 'INVESTIGATION', 'IMAGINABLE', 'BUT', 'ALL', 'IN', 'VAIN'] +2300-131720-0037-1853: hyp=['HE', 'WEIGHED', 'AND', 'REWAYED', 'THE', 'METERPLATES', 'AND', 'PURSUED', 'EVERY', 'LINE', 'OF', 'INVESTIGATION', 'IMAGINABLE', 'BUT', 'ALL', 'IN', 'VAIN'] +2300-131720-0038-1854: ref=['HE', 'FELT', 'HE', 'WAS', 'UP', 'AGAINST', 'IT', 'AND', 'THAT', 'PERHAPS', 'ANOTHER', 'KIND', 'OF', 'A', 'JOB', 'WOULD', 'SUIT', 'HIM', 'BETTER'] +2300-131720-0038-1854: hyp=['HE', 'FELT', 'HE', 'WAS', 'UP', 'AGAINST', 'IT', 'AND', 'THAT', 'PERHAPS', 'ANOTHER', 'KIND', 'OF', 'A', 'JOB', 'WOULD', 'SUIT', 'HIM', 'BETTER'] +2300-131720-0039-1855: ref=['THE', 'PROBLEM', 'WAS', 'SOLVED'] +2300-131720-0039-1855: hyp=['THE', 'PROBLEM', 'WAS', 'SOLVED'] +2300-131720-0040-1856: ref=['WE', 'WERE', 'MORE', 'INTERESTED', 'IN', 'THE', 'TECHNICAL', 'CONDITION', 'OF', 'THE', 'STATION', 'THAN', 'IN', 'THE', 'COMMERCIAL', 'PART'] +2300-131720-0040-1856: hyp=['WE', 'WERE', 'MORE', 'INTERESTED', 'IN', 'THE', 'TECHNICAL', 'CONDITION', 'OF', 'THE', 'STATION', 'THAN', 'IN', 'THE', 'COMMERCIAL', 'PART'] +2300-131720-0041-1857: ref=['WE', 'HAD', 'METERS', 'IN', 'WHICH', 'THERE', 'WERE', 'TWO', 'BOTTLES', 'OF', 'LIQUID'] +2300-131720-0041-1857: hyp=['WE', 'HAD', 'METRES', 'IN', 'WHICH', 'THERE', 'WERE', 'TWO', 'BOTTLES', 'OF', 'LIQUID'] +237-126133-0000-2407: ref=['HERE', 'SHE', 'WOULD', 'STAY', 'COMFORTED', 'AND', 'SOOTHED', 'AMONG', 'THE', 'LOVELY', 'PLANTS', 'AND', 'RICH', 'EXOTICS', 'REJOICING', 'THE', 'HEART', 'OF', 'OLD', 'TURNER', 'THE', 'GARDENER', 'WHO', 'SINCE', "POLLY'S", 'FIRST', 'RAPTUROUS', 'ENTRANCE', 'HAD', 'TAKEN', 'HER', 'INTO', 'HIS', 'GOOD', 'GRACES', 'FOR', 'ALL', 'TIME'] +237-126133-0000-2407: hyp=['HERE', 'SHE', 'WOULD', 'STAY', 'COMFORTED', 'AND', 'SOOTHE', 'AMONG', 'THE', 'LOVELY', 'PLANTS', 'AND', 'RICH', 'EXOTICS', 'REJOICING', 'THE', 'HEART', 'OF', 'OLD', 'TURNER', 'THE', 'GARDENER', 'WHO', 'SINCE', "POLLY'S", 'FIRST', 'RAPTUROUS', 'ENTRANCE', 'HAD', 'TAKEN', 'HER', 'INTO', 'HIS', 'GOOD', 'GRACES', 'FOR', 'ALL', 'TIME'] +237-126133-0001-2408: ref=['EVERY', 'CHANCE', 'SHE', 'COULD', 'STEAL', 'AFTER', 'PRACTICE', 'HOURS', 'WERE', 'OVER', 'AND', 'AFTER', 'THE', 'CLAMOROUS', 'DEMANDS', 'OF', 'THE', 'BOYS', 'UPON', 'HER', 'TIME', 'WERE', 'FULLY', 'SATISFIED', 'WAS', 'SEIZED', 'TO', 'FLY', 'ON', 'THE', 'WINGS', 'OF', 'THE', 'WIND', 'TO', 'THE', 'FLOWERS'] +237-126133-0001-2408: hyp=['EVERY', 'CHANCE', 'SHE', 'COULD', 'STEAL', 'AFTER', 'PRACTICE', 'HOURS', 'WERE', 'OVER', 'AND', 'AFTER', 'THE', 'CLAMOROUS', 'DEMANDS', 'OF', 'THE', 'BOYS', 'UPON', 'HER', 'TIME', 'WERE', 'FULLY', 'SATISFIED', 'WAS', 'SEIZED', 'TO', 'FLY', 'ON', 'THE', 'WINGS', 'OF', 'THE', 'WIND', 'TO', 'THE', 'FLOWERS'] +237-126133-0002-2409: ref=['THEN', 'DEAR', 'SAID', 'MISSUS', 'WHITNEY', 'YOU', 'MUST', 'BE', 'KINDER', 'TO', 'HER', 'THAN', 'EVER', 'THINK', 'WHAT', 'IT', 'WOULD', 'BE', 'FOR', 'ONE', 'OF', 'YOU', 'TO', 'BE', 'AWAY', 'FROM', 'HOME', 'EVEN', 'AMONG', 'FRIENDS'] +237-126133-0002-2409: hyp=['THEN', 'DEAR', 'SAID', 'MISSUS', 'WHITNEY', 'YOU', 'MUST', 'BE', 'KINDER', 'TO', 'HER', 'THAN', 'EVER', 'THINK', 'WHAT', 'IT', 'WOULD', 'BE', 'FOR', 'ONE', 'OF', 'YOU', 'TO', 'BE', 'AWAY', 'FROM', 'HOME', 'EVEN', 'AMONG', 'FRIENDS'] +237-126133-0003-2410: ref=['SOMEHOW', 'OF', 'ALL', 'THE', 'DAYS', 'WHEN', 'THE', 'HOME', 'FEELING', 'WAS', 'THE', 'STRONGEST', 'THIS', 'DAY', 'IT', 'SEEMED', 'AS', 'IF', 'SHE', 'COULD', 'BEAR', 'IT', 'NO', 'LONGER'] +237-126133-0003-2410: hyp=['SOMEHOW', 'OF', 'ALL', 'THE', 'DAYS', 'WHEN', 'THE', 'HOME', 'FEELING', 'WAS', 'THE', 'STRONGEST', 'THIS', 'DAY', 'IT', 'SEEMED', 'AS', 'IF', 'SHE', 'COULD', 'BEAR', 'IT', 'NO', 'LONGER'] +237-126133-0004-2411: ref=['IF', 'SHE', 'COULD', 'ONLY', 'SEE', 'PHRONSIE', 'FOR', 'JUST', 'ONE', 'MOMENT'] +237-126133-0004-2411: hyp=['IF', 'SHE', 'COULD', 'ONLY', 'SEE', 'PHRONSIE', 'FOR', 'JUST', 'ONE', 'MOMENT'] +237-126133-0005-2412: ref=['OH', "SHE'S", 'ALWAYS', 'AT', 'THE', 'PIANO', 'SAID', 'VAN', 'SHE', 'MUST', 'BE', 'THERE', 'NOW', 'SOMEWHERE', 'AND', 'THEN', 'SOMEBODY', 'LAUGHED'] +237-126133-0005-2412: hyp=['OH', "SHE'S", 'ALWAYS', 'AT', 'THE', 'PIANO', 'SAID', 'VAN', 'SHE', 'MUST', 'BE', 'THERE', 'NOW', 'SOMEWHERE', 'AND', 'THEN', 'SOMEBODY', 'LAUGHED'] +237-126133-0006-2413: ref=['AT', 'THIS', 'THE', 'BUNDLE', 'OPENED', 'SUDDENLY', 'AND', 'OUT', 'POPPED', 'PHRONSIE'] +237-126133-0006-2413: hyp=['AT', 'THIS', 'THE', 'BUNDLE', 'OPENED', 'SUDDENLY', 'AND', 'OUT', 'POPPED', 'PHRONSIE'] +237-126133-0007-2414: ref=['BUT', 'POLLY', "COULDN'T", 'SPEAK', 'AND', 'IF', 'JASPER', "HADN'T", 'CAUGHT', 'HER', 'JUST', 'IN', 'TIME', 'SHE', 'WOULD', 'HAVE', 'TUMBLED', 'OVER', 'BACKWARD', 'FROM', 'THE', 'STOOL', 'PHRONSIE', 'AND', 'ALL'] +237-126133-0007-2414: hyp=['BUT', 'POLLY', "COULDN'T", 'SPEAK', 'AND', 'IF', 'JASPER', "HADN'T", 'CAUGHT', 'HER', 'JUST', 'IN', 'TIME', 'SHE', 'WOULD', 'HAVE', 'TUMBLED', 'OVER', 'BACKWARD', 'FROM', 'THE', 'STOOL', 'PHRONSIE', 'AND', 'ALL'] +237-126133-0008-2415: ref=['ASKED', 'PHRONSIE', 'WITH', 'HER', 'LITTLE', 'FACE', 'CLOSE', 'TO', "POLLY'S", 'OWN'] +237-126133-0008-2415: hyp=['ASKED', 'PHRONSIE', 'WITH', 'HER', 'LITTLE', 'FACE', 'CLOSE', 'TO', "POLLY'S", 'OWN'] +237-126133-0009-2416: ref=['NOW', "YOU'LL", 'STAY', 'CRIED', 'VAN', 'SAY', 'POLLY', "WON'T", 'YOU'] +237-126133-0009-2416: hyp=['NOW', "YOU'LL", 'STAY', 'CRIED', 'VAN', 'SAY', 'POLLY', "WON'T", 'YOU'] +237-126133-0010-2417: ref=['OH', 'YOU', 'ARE', 'THE', 'DEAREST', 'AND', 'BEST', 'MISTER', 'KING', 'I', 'EVER', 'SAW', 'BUT', 'HOW', 'DID', 'YOU', 'MAKE', 'MAMMY', 'LET', 'HER', 'COME'] +237-126133-0010-2417: hyp=['OH', 'YOU', 'ARE', 'THE', 'DEAREST', 'AND', 'BEST', 'MISTER', 'KING', 'I', 'EVER', 'SAW', 'BUT', 'HOW', 'DID', 'YOU', 'MAKE', 'MAMMY', 'LET', 'HER', 'COME'] +237-126133-0011-2418: ref=["ISN'T", 'HE', 'SPLENDID', 'CRIED', 'JASPER', 'IN', 'INTENSE', 'PRIDE', 'SWELLING', 'UP', 'FATHER', 'KNEW', 'HOW', 'TO', 'DO', 'IT'] +237-126133-0011-2418: hyp=["ISN'T", 'HE', 'SPLENDID', 'CRIED', 'JASPER', 'IN', 'INTENSE', 'PRIDE', 'SWELLING', 'UP', 'FATHER', 'KNEW', 'HOW', 'TO', 'DO', 'IT'] +237-126133-0012-2419: ref=['THERE', 'THERE', 'HE', 'SAID', 'SOOTHINGLY', 'PATTING', 'HER', 'BROWN', 'FUZZY', 'HEAD'] +237-126133-0012-2419: hyp=['THERE', 'THERE', 'HE', 'SAID', 'SOOTHINGLY', 'PATTING', 'HER', 'BROWN', 'FUZZY', 'HEAD'] +237-126133-0013-2420: ref=['I', 'KNOW', 'GASPED', 'POLLY', 'CONTROLLING', 'HER', 'SOBS', 'I', "WON'T", 'ONLY', 'I', "CAN'T", 'THANK', 'YOU'] +237-126133-0013-2420: hyp=['I', 'KNOW', 'GASPED', 'POLLY', 'CONTROLLING', 'HER', 'SOBS', 'I', "WON'T", 'ONLY', 'I', "CAN'T", 'THANK', 'YOU'] +237-126133-0014-2421: ref=['ASKED', 'PHRONSIE', 'IN', 'INTENSE', 'INTEREST', 'SLIPPING', 'DOWN', 'OUT', 'OF', "POLLY'S", 'ARMS', 'AND', 'CROWDING', 'UP', 'CLOSE', 'TO', "JASPER'S", 'SIDE'] +237-126133-0014-2421: hyp=['ASKED', 'PHRONSIE', 'IN', 'INTENSE', 'INTEREST', 'SLIPPING', 'DOWN', 'OUT', 'OF', "POLLY'S", 'ARMS', 'AND', 'CROWDING', 'UP', 'CLOSE', 'TO', "JASPER'S", 'SIDE'] +237-126133-0015-2422: ref=['YES', 'ALL', 'ALONE', 'BY', 'HIMSELF', 'ASSERTED', 'JASPER', 'VEHEMENTLY', 'AND', 'WINKING', 'FURIOUSLY', 'TO', 'THE', 'OTHERS', 'TO', 'STOP', 'THEIR', 'LAUGHING', 'HE', 'DID', 'NOW', 'TRULY', 'PHRONSIE'] +237-126133-0015-2422: hyp=['YES', 'ALL', 'ALONE', 'BY', 'HIMSELF', 'ASSERTED', 'JASPER', 'VEHEMENTLY', 'AND', 'WINKING', 'FURIOUSLY', 'TO', 'THE', 'OTHERS', 'TO', 'STOP', 'THEIR', 'LAUGHING', 'HE', 'DID', 'NOW', 'TRULY', 'PHRONSIE'] +237-126133-0016-2423: ref=['OH', 'NO', 'JASPER', 'I', 'MUST', 'GO', 'BY', 'MY', 'VERY', 'OWN', 'SELF'] +237-126133-0016-2423: hyp=['OH', 'NO', 'JAPS', 'HER', 'I', 'MUST', 'GO', 'BY', 'MY', 'VERY', 'OWN', 'SELF'] +237-126133-0017-2424: ref=['THERE', 'JAP', "YOU'VE", 'CAUGHT', 'IT', 'LAUGHED', 'PERCY', 'WHILE', 'THE', 'OTHERS', 'SCREAMED', 'AT', 'THE', 'SIGHT', 'OF', "JASPER'S", 'FACE'] +237-126133-0017-2424: hyp=['THERE', 'JAP', "YOU'VE", 'CAUGHT', 'IT', 'LAUGHED', 'PERCY', 'WHILE', 'THE', 'OTHERS', 'SCREAMED', 'AT', 'THE', 'SIGHT', 'OF', "JASPER'S", 'FACE'] +237-126133-0018-2425: ref=["DON'T", 'MIND', 'IT', 'POLLY', 'WHISPERED', 'JASPER', "TWASN'T", 'HER', 'FAULT'] +237-126133-0018-2425: hyp=["DON'T", 'MIND', 'IT', 'POLLY', 'WHISPERED', 'JASPER', "TWASN'T", 'HER', 'FAULT'] +237-126133-0019-2426: ref=['DEAR', 'ME', 'EJACULATED', 'THE', 'OLD', 'GENTLEMAN', 'IN', 'THE', 'UTMOST', 'AMAZEMENT', 'AND', 'SUCH', 'A', 'TIME', 'AS', "I'VE", 'HAD', 'TO', 'GET', 'HER', 'HERE', 'TOO'] +237-126133-0019-2426: hyp=['DEAR', 'ME', 'EJACULATED', 'THE', 'OLD', 'GENTLEMAN', 'IN', 'THE', 'UTMOST', 'AMAZEMENT', 'AND', 'SUCH', 'A', 'TIME', 'AS', "I'VE", 'HAD', 'TO', 'GET', 'HER', 'HERE', 'TOO'] +237-126133-0020-2427: ref=['HOW', 'DID', 'HER', 'MOTHER', 'EVER', 'LET', 'HER', 'GO'] +237-126133-0020-2427: hyp=['HOW', 'DID', 'HER', 'MOTHER', 'EVER', 'LET', 'HER', 'GO'] +237-126133-0021-2428: ref=['SHE', 'ASKED', 'IMPULSIVELY', 'I', "DIDN'T", 'BELIEVE', 'YOU', 'COULD', 'PERSUADE', 'HER', 'FATHER'] +237-126133-0021-2428: hyp=['SHE', 'ASKED', 'IMPULSIVELY', 'I', "DIDN'T", 'BELIEVE', 'YOU', 'COULD', 'PERSUADE', 'HER', 'FATHER'] +237-126133-0022-2429: ref=['I', "DIDN'T", 'HAVE', 'ANY', 'FEARS', 'IF', 'I', 'WORKED', 'IT', 'RIGHTLY', 'SAID', 'THE', 'OLD', 'GENTLEMAN', 'COMPLACENTLY'] +237-126133-0022-2429: hyp=['I', "DIDN'T", 'HAVE', 'ANY', 'FEARS', 'IF', 'I', 'WORKED', 'IT', 'RIGHTLY', 'SAID', 'THE', 'OLD', 'GENTLEMAN', 'COMPLACENTLY'] +237-126133-0023-2430: ref=['HE', 'CRIED', 'IN', 'HIGH', 'DUDGEON', 'JUST', 'AS', 'IF', 'HE', 'OWNED', 'THE', 'WHOLE', 'OF', 'THE', 'PEPPERS', 'AND', 'COULD', 'DISPOSE', 'OF', 'THEM', 'ALL', 'TO', 'SUIT', 'HIS', 'FANCY'] +237-126133-0023-2430: hyp=['HE', 'CRIED', 'AND', 'HIGH', 'DUDGEON', 'JUST', 'AS', 'IF', 'HE', 'OWNED', 'THE', 'WHOLE', 'OF', 'THE', 'PEPPERS', 'AND', 'COULD', 'DISPOSE', 'OF', 'THEM', 'ALL', 'TO', 'SUIT', 'HIS', 'FANCY'] +237-126133-0024-2431: ref=['AND', 'THE', 'OLD', 'GENTLEMAN', 'WAS', 'SO', 'DELIGHTED', 'WITH', 'HIS', 'SUCCESS', 'THAT', 'HE', 'HAD', 'TO', 'BURST', 'OUT', 'INTO', 'A', 'SERIES', 'OF', 'SHORT', 'HAPPY', 'BITS', 'OF', 'LAUGHTER', 'THAT', 'OCCUPIED', 'QUITE', 'A', 'SPACE', 'OF', 'TIME'] +237-126133-0024-2431: hyp=['AND', 'THE', 'OLD', 'GENTLEMAN', 'WAS', 'SO', 'DELIGHTED', 'WITH', 'HIS', 'SUCCESS', 'THAT', 'HE', 'HAD', 'TO', 'BURST', 'OUT', 'INTO', 'A', 'SERIES', 'OF', 'SHORT', 'HAPPY', 'BITS', 'OF', 'LAUGHTER', 'THAT', 'OCCUPIED', 'QUITE', 'A', 'SPACE', 'OF', 'TIME'] +237-126133-0025-2432: ref=['AT', 'LAST', 'HE', 'CAME', 'OUT', 'OF', 'THEM', 'AND', 'WIPED', 'HIS', 'FACE', 'VIGOROUSLY'] +237-126133-0025-2432: hyp=['AT', 'LAST', 'HE', 'CAME', 'OUT', 'OF', 'THEM', 'AND', 'WIPED', 'HIS', 'FACE', 'VIGOROUSLY'] +237-134493-0000-2388: ref=['IT', 'IS', 'SIXTEEN', 'YEARS', 'SINCE', 'JOHN', 'BERGSON', 'DIED'] +237-134493-0000-2388: hyp=['IT', 'IS', 'SIXTEEN', 'YEARS', 'SINCE', 'JOHN', 'BERKS', 'AND', 'DIED'] +237-134493-0001-2389: ref=['HIS', 'WIFE', 'NOW', 'LIES', 'BESIDE', 'HIM', 'AND', 'THE', 'WHITE', 'SHAFT', 'THAT', 'MARKS', 'THEIR', 'GRAVES', 'GLEAMS', 'ACROSS', 'THE', 'WHEAT', 'FIELDS'] +237-134493-0001-2389: hyp=['HIS', 'WIFE', 'NOW', 'LIES', 'BESIDE', 'HIM', 'AND', 'THE', 'WHITE', 'SHAFT', 'THAT', 'MARKS', 'THEIR', 'GRAVES', 'GLEAMS', 'ACROSS', 'THE', 'WHEAT', 'FIELDS'] +237-134493-0002-2390: ref=['FROM', 'THE', 'NORWEGIAN', 'GRAVEYARD', 'ONE', 'LOOKS', 'OUT', 'OVER', 'A', 'VAST', 'CHECKER', 'BOARD', 'MARKED', 'OFF', 'IN', 'SQUARES', 'OF', 'WHEAT', 'AND', 'CORN', 'LIGHT', 'AND', 'DARK', 'DARK', 'AND', 'LIGHT'] +237-134493-0002-2390: hyp=['FROM', 'THE', 'NORWEGIAN', 'GRAVEYARD', 'ONE', 'LOOKS', 'OUT', 'OVER', 'A', 'VAST', 'CHECKER', 'BOARD', 'MARKED', 'OFF', 'IN', 'SQUARES', 'OF', 'WHEAT', 'AND', 'CORN', 'LIGHT', 'AND', 'DARK', 'AND', 'LIGHT'] +237-134493-0003-2391: ref=['FROM', 'THE', 'GRAVEYARD', 'GATE', 'ONE', 'CAN', 'COUNT', 'A', 'DOZEN', 'GAYLY', 'PAINTED', 'FARMHOUSES', 'THE', 'GILDED', 'WEATHER', 'VANES', 'ON', 'THE', 'BIG', 'RED', 'BARNS', 'WINK', 'AT', 'EACH', 'OTHER', 'ACROSS', 'THE', 'GREEN', 'AND', 'BROWN', 'AND', 'YELLOW', 'FIELDS'] +237-134493-0003-2391: hyp=['FROM', 'THE', 'GRAVEYARD', 'GATE', 'ONE', 'CAN', 'COUNT', 'A', 'DOZEN', 'GAILY', 'PAINTED', 'FARM', 'HOUSES', 'THE', 'GILDED', 'WEATHER', 'VEINS', 'ON', 'THE', 'BIG', 'RED', 'BARNS', 'WINK', 'AT', 'EACH', 'OTHER', 'ACROSS', 'THE', 'GREEN', 'AND', 'BROWN', 'AND', 'YELLOW', 'FIELDS'] +237-134493-0004-2392: ref=['THE', 'AIR', 'AND', 'THE', 'EARTH', 'ARE', 'CURIOUSLY', 'MATED', 'AND', 'INTERMINGLED', 'AS', 'IF', 'THE', 'ONE', 'WERE', 'THE', 'BREATH', 'OF', 'THE', 'OTHER'] +237-134493-0004-2392: hyp=['THE', 'AIR', 'AND', 'THE', 'EARTH', 'ARE', 'CURIOUSLY', 'MATED', 'AND', 'INTERMINGLED', 'AS', 'IF', 'THE', 'ONE', 'WERE', 'THE', 'BREATH', 'OF', 'THE', 'OTHER'] +237-134493-0005-2393: ref=['HE', 'WAS', 'A', 'SPLENDID', 'FIGURE', 'OF', 'A', 'BOY', 'TALL', 'AND', 'STRAIGHT', 'AS', 'A', 'YOUNG', 'PINE', 'TREE', 'WITH', 'A', 'HANDSOME', 'HEAD', 'AND', 'STORMY', 'GRAY', 'EYES', 'DEEPLY', 'SET', 'UNDER', 'A', 'SERIOUS', 'BROW'] +237-134493-0005-2393: hyp=['HE', 'WAS', 'A', 'SPLENDID', 'FIGURE', 'OF', 'A', 'BOY', 'TALL', 'AND', 'STRAIGHT', 'AS', 'A', 'YOUNG', 'PINE', 'TREE', 'WITH', 'A', 'HANDSOME', 'HEAD', 'AND', 'STORMY', 'GRAY', 'EYES', 'DEEPLY', 'SET', 'UNDER', 'A', 'SERIOUS', 'BROW'] +237-134493-0006-2394: ref=["THAT'S", 'NOT', 'MUCH', 'OF', 'A', 'JOB', 'FOR', 'AN', 'ATHLETE', 'HERE', "I'VE", 'BEEN', 'TO', 'TOWN', 'AND', 'BACK'] +237-134493-0006-2394: hyp=["THAT'S", 'NOT', 'MUCH', 'OF', 'A', 'JOB', 'FOR', 'AN', 'ATHLETE', 'HERE', "I'VE", 'BEEN', 'TO', 'TOWN', 'AND', 'BACK'] +237-134493-0007-2395: ref=['ALEXANDRA', 'LETS', 'YOU', 'SLEEP', 'LATE'] +237-134493-0007-2395: hyp=['ALEXANDER', "THAT'S", 'YOU', 'SLEEP', 'LATE'] +237-134493-0008-2396: ref=['SHE', 'GATHERED', 'UP', 'HER', 'REINS'] +237-134493-0008-2396: hyp=['SHE', 'GATHERED', 'UP', 'HER', 'REINS'] +237-134493-0009-2397: ref=['PLEASE', 'WAIT', 'FOR', 'ME', 'MARIE', 'EMIL', 'COAXED'] +237-134493-0009-2397: hyp=['PLEASE', 'WAIT', 'FOR', 'ME', 'MARIE', 'AMYL', 'COAXED'] +237-134493-0010-2398: ref=['I', 'NEVER', 'SEE', "LOU'S", 'SCYTHE', 'OVER', 'HERE'] +237-134493-0010-2398: hyp=['I', 'NEVER', 'SEE', 'LOOSE', 'SIGH', 'OVER', 'HERE'] +237-134493-0011-2399: ref=['HOW', 'BROWN', "YOU'VE", 'GOT', 'SINCE', 'YOU', 'CAME', 'HOME', 'I', 'WISH', 'I', 'HAD', 'AN', 'ATHLETE', 'TO', 'MOW', 'MY', 'ORCHARD'] +237-134493-0011-2399: hyp=['HOW', 'BROWN', "YOU'VE", 'GOT', 'SINCE', 'YOU', 'CAME', 'HOME', 'I', 'WISH', 'I', 'HAD', 'AN', 'ATHLETE', 'TO', 'MOW', 'MY', 'ORCHARD'] +237-134493-0012-2400: ref=['I', 'GET', 'WET', 'TO', 'MY', 'KNEES', 'WHEN', 'I', 'GO', 'DOWN', 'TO', 'PICK', 'CHERRIES'] +237-134493-0012-2400: hyp=['I', 'GET', 'WET', 'TO', 'MY', 'KNEES', 'WHEN', 'I', 'GO', 'DOWN', 'TO', 'PICTURES'] +237-134493-0013-2401: ref=['INDEED', 'HE', 'HAD', 'LOOKED', 'AWAY', 'WITH', 'THE', 'PURPOSE', 'OF', 'NOT', 'SEEING', 'IT'] +237-134493-0013-2401: hyp=['INDEED', 'HE', 'HAD', 'LOOKED', 'AWAY', 'WITH', 'A', 'PURPOSE', 'OF', 'NOT', 'SEEING', 'IT'] +237-134493-0014-2402: ref=['THEY', 'THINK', "YOU'RE", 'PROUD', 'BECAUSE', "YOU'VE", 'BEEN', 'AWAY', 'TO', 'SCHOOL', 'OR', 'SOMETHING'] +237-134493-0014-2402: hyp=['THEY', 'THINK', 'YOU', 'ARE', 'PROUD', 'BECAUSE', "YOU'VE", 'BEEN', 'AWAY', 'TO', 'SCHOOL', 'OR', 'SOMETHING'] +237-134493-0015-2403: ref=['THERE', 'WAS', 'SOMETHING', 'INDIVIDUAL', 'ABOUT', 'THE', 'GREAT', 'FARM', 'A', 'MOST', 'UNUSUAL', 'TRIMNESS', 'AND', 'CARE', 'FOR', 'DETAIL'] +237-134493-0015-2403: hyp=['THERE', 'WAS', 'SOMETHING', 'INDIVIDUAL', 'ABOUT', 'THE', 'GREAT', 'FARM', 'A', 'MOST', 'UNUSUAL', 'TRIMNESS', 'AND', 'CARE', 'FOR', 'DETAIL'] +237-134493-0016-2404: ref=['ON', 'EITHER', 'SIDE', 'OF', 'THE', 'ROAD', 'FOR', 'A', 'MILE', 'BEFORE', 'YOU', 'REACHED', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'STOOD', 'TALL', 'OSAGE', 'ORANGE', 'HEDGES', 'THEIR', 'GLOSSY', 'GREEN', 'MARKING', 'OFF', 'THE', 'YELLOW', 'FIELDS'] +237-134493-0016-2404: hyp=['ON', 'EITHER', 'SIDE', 'OF', 'THE', 'ROAD', 'FOR', 'A', 'MILE', 'BEFORE', 'YOU', 'REACHED', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'STOOD', 'TALL', 'O', 'SAGE', 'ORANGE', 'HEDGES', 'THEIR', 'GLOSSY', 'GREEN', 'MARKING', 'OFF', 'THE', 'YELLOW', 'FIELDS'] +237-134493-0017-2405: ref=['ANY', 'ONE', 'THEREABOUTS', 'WOULD', 'HAVE', 'TOLD', 'YOU', 'THAT', 'THIS', 'WAS', 'ONE', 'OF', 'THE', 'RICHEST', 'FARMS', 'ON', 'THE', 'DIVIDE', 'AND', 'THAT', 'THE', 'FARMER', 'WAS', 'A', 'WOMAN', 'ALEXANDRA', 'BERGSON'] +237-134493-0017-2405: hyp=['ANY', 'ONE', 'THEREABOUTS', 'WOULD', 'HAVE', 'TOLD', 'YOU', 'THAT', 'THIS', 'WAS', 'ONE', 'OF', 'THE', 'RICHEST', 'FARMS', 'ON', 'THE', 'DIVIDE', 'AND', 'THAT', 'THE', 'FARMER', 'WAS', 'A', 'WOMAN', 'ALEXANDRA', 'BERGSON'] +237-134493-0018-2406: ref=['THERE', 'IS', 'EVEN', 'A', 'WHITE', 'ROW', 'OF', 'BEEHIVES', 'IN', 'THE', 'ORCHARD', 'UNDER', 'THE', 'WALNUT', 'TREES'] +237-134493-0018-2406: hyp=['THERE', 'IS', 'EVEN', 'A', 'WHITE', 'ROW', 'OF', 'BEEHIVES', 'IN', 'THE', 'ORCHARD', 'UNDER', 'THE', 'WALNUT', 'TREES'] +237-134500-0000-2345: ref=['FRANK', 'READ', 'ENGLISH', 'SLOWLY', 'AND', 'THE', 'MORE', 'HE', 'READ', 'ABOUT', 'THIS', 'DIVORCE', 'CASE', 'THE', 'ANGRIER', 'HE', 'GREW'] +237-134500-0000-2345: hyp=['FRANK', 'READ', 'ENGLISH', 'SLOWLY', 'AND', 'THE', 'MORE', 'HE', 'READ', 'ABOUT', 'THIS', 'DIVORCE', 'CASE', 'THE', 'ANGRIER', 'HE', 'GREW'] +237-134500-0001-2346: ref=['MARIE', 'SIGHED'] +237-134500-0001-2346: hyp=['MARIE', 'SIGHED'] +237-134500-0002-2347: ref=['A', 'BRISK', 'WIND', 'HAD', 'COME', 'UP', 'AND', 'WAS', 'DRIVING', 'PUFFY', 'WHITE', 'CLOUDS', 'ACROSS', 'THE', 'SKY'] +237-134500-0002-2347: hyp=['A', 'BRAY', 'SQUINT', 'HAD', 'COME', 'UP', 'AND', 'WAS', 'DRIVING', 'PUFFY', 'WHITE', 'CLOUDS', 'ACROSS', 'THE', 'SKY'] +237-134500-0003-2348: ref=['THE', 'ORCHARD', 'WAS', 'SPARKLING', 'AND', 'RIPPLING', 'IN', 'THE', 'SUN'] +237-134500-0003-2348: hyp=['THE', 'ORCHARD', 'WAS', 'SPARKLING', 'AND', 'RIPPLING', 'IN', 'THE', 'SUN'] +237-134500-0004-2349: ref=['THAT', 'INVITATION', 'DECIDED', 'HER'] +237-134500-0004-2349: hyp=['THAT', 'INVITATION', 'DECIDED', 'HER'] +237-134500-0005-2350: ref=['OH', 'BUT', "I'M", 'GLAD', 'TO', 'GET', 'THIS', 'PLACE', 'MOWED'] +237-134500-0005-2350: hyp=['OH', 'BUT', "I'M", 'GLAD', 'TO', 'GET', 'THIS', 'PLACE', 'MOWED'] +237-134500-0006-2351: ref=['JUST', 'SMELL', 'THE', 'WILD', 'ROSES', 'THEY', 'ARE', 'ALWAYS', 'SO', 'SPICY', 'AFTER', 'A', 'RAIN'] +237-134500-0006-2351: hyp=['JUST', 'SMELL', 'THE', 'WILD', 'ROSES', 'THEY', 'ARE', 'ALWAYS', 'SO', 'SPICY', 'AFTER', 'A', 'RAIN'] +237-134500-0007-2352: ref=['WE', 'NEVER', 'HAD', 'SO', 'MANY', 'OF', 'THEM', 'IN', 'HERE', 'BEFORE'] +237-134500-0007-2352: hyp=['WE', 'NEVER', 'HAD', 'SO', 'MANY', 'OF', 'THEM', 'IN', 'HERE', 'BEFORE'] +237-134500-0008-2353: ref=['I', 'SUPPOSE', "IT'S", 'THE', 'WET', 'SEASON', 'WILL', 'YOU', 'HAVE', 'TO', 'CUT', 'THEM', 'TOO'] +237-134500-0008-2353: hyp=['I', 'SUPPOSE', "IT'S", 'THE', 'WET', 'SEASON', 'WILL', 'YOU', 'HAVE', 'TO', 'CUT', 'THEM', 'TOO'] +237-134500-0009-2354: ref=['I', 'SUPPOSE', "THAT'S", 'THE', 'WET', 'SEASON', 'TOO', 'THEN'] +237-134500-0009-2354: hyp=['I', 'SUPPOSE', "THAT'S", 'THE', 'WHITE', 'SEASON', 'TOO', 'THEN'] +237-134500-0010-2355: ref=["IT'S", 'EXCITING', 'TO', 'SEE', 'EVERYTHING', 'GROWING', 'SO', 'FAST', 'AND', 'TO', 'GET', 'THE', 'GRASS', 'CUT'] +237-134500-0010-2355: hyp=["IT'S", 'EXCITING', 'TO', 'SEE', 'EVERYTHING', 'GROWING', 'SO', 'FAST', 'AND', 'TO', 'GET', 'THE', 'GRASS', 'CUT'] +237-134500-0011-2356: ref=["AREN'T", 'YOU', 'SPLASHED', 'LOOK', 'AT', 'THE', 'SPIDER', 'WEBS', 'ALL', 'OVER', 'THE', 'GRASS'] +237-134500-0011-2356: hyp=["AREN'T", 'YOU', 'SPLASHED', 'LOOK', 'AT', 'THE', 'SPIDER', 'WHIPS', 'ALL', 'OVER', 'THE', 'GRASS'] +237-134500-0012-2357: ref=['IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HEARD', 'THE', 'CHERRIES', 'DROPPING', 'SMARTLY', 'INTO', 'THE', 'PAIL', 'AND', 'HE', 'BEGAN', 'TO', 'SWING', 'HIS', 'SCYTHE', 'WITH', 'THAT', 'LONG', 'EVEN', 'STROKE', 'THAT', 'FEW', 'AMERICAN', 'BOYS', 'EVER', 'LEARN'] +237-134500-0012-2357: hyp=['IN', 'A', 'FEW', 'MOMENTS', 'HE', 'HEARD', 'THE', 'CHERRIES', 'DROPPING', 'SMARTLY', 'INTO', 'THE', 'PAIL', 'AND', 'HE', 'BEGAN', 'TO', 'SWING', 'HIS', 'SCYTHE', 'WITH', 'THAT', 'LONG', 'EVEN', 'STROKE', 'THAT', 'FEW', 'AMERICAN', 'BOYS', 'EVER', 'LEARN'] +237-134500-0013-2358: ref=['MARIE', 'PICKED', 'CHERRIES', 'AND', 'SANG', 'SOFTLY', 'TO', 'HERSELF', 'STRIPPING', 'ONE', 'GLITTERING', 'BRANCH', 'AFTER', 'ANOTHER', 'SHIVERING', 'WHEN', 'SHE', 'CAUGHT', 'A', 'SHOWER', 'OF', 'RAINDROPS', 'ON', 'HER', 'NECK', 'AND', 'HAIR'] +237-134500-0013-2358: hyp=['MARIE', 'PICKED', 'THE', 'CHERRIES', 'AND', 'SANG', 'SOFTLY', 'TO', 'HERSELF', 'STRIPPING', 'ONE', 'GLITTERING', 'RANCH', 'AFTER', 'ANOTHER', 'SHIVERING', 'WHEN', 'SHE', 'CAUGHT', 'A', 'SHOWER', 'OF', 'RAINDROPS', 'ON', 'HER', 'NECK', 'AND', 'HAIR'] +237-134500-0014-2359: ref=['AND', 'EMIL', 'MOWED', 'HIS', 'WAY', 'SLOWLY', 'DOWN', 'TOWARD', 'THE', 'CHERRY', 'TREES'] +237-134500-0014-2359: hyp=['AND', 'AMIEL', 'MOWED', 'HIS', 'WAY', 'SLOWLY', 'DOWN', 'TOWARD', 'THE', 'CHERRY', 'TREES'] +237-134500-0015-2360: ref=['THAT', 'SUMMER', 'THE', 'RAINS', 'HAD', 'BEEN', 'SO', 'MANY', 'AND', 'OPPORTUNE', 'THAT', 'IT', 'WAS', 'ALMOST', 'MORE', 'THAN', 'SHABATA', 'AND', 'HIS', 'MAN', 'COULD', 'DO', 'TO', 'KEEP', 'UP', 'WITH', 'THE', 'CORN', 'THE', 'ORCHARD', 'WAS', 'A', 'NEGLECTED', 'WILDERNESS'] +237-134500-0015-2360: hyp=['THAT', 'SUMMER', 'THE', 'RAINS', 'HAD', 'BEEN', 'SO', 'MANY', 'AND', 'OPPORTUNE', 'THAT', 'IT', 'WAS', 'ALMOST', 'MORE', 'THAN', 'CHEBATA', 'AND', 'HIS', 'MAN', 'COULD', 'DO', 'TO', 'KEEP', 'UP', 'WITH', 'THE', 'CORN', 'THE', 'ORCHARD', 'WAS', 'A', 'NEGLECTED', 'WILDERNESS'] +237-134500-0016-2361: ref=['I', "DON'T", 'KNOW', 'ALL', 'OF', 'THEM', 'BUT', 'I', 'KNOW', 'LINDENS', 'ARE'] +237-134500-0016-2361: hyp=['I', "DON'T", 'KNOW', 'ALL', 'OF', 'THEM', 'BUT', 'I', 'KNOW', 'LINDENS', 'ARE'] +237-134500-0017-2362: ref=['IF', 'I', 'FEEL', 'THAT', 'WAY', 'I', 'FEEL', 'THAT', 'WAY'] +237-134500-0017-2362: hyp=['IF', 'I', 'FEEL', 'THAT', 'WAY', 'I', 'FEEL', 'THAT', 'WAY'] +237-134500-0018-2363: ref=['HE', 'REACHED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'AND', 'BEGAN', 'TO', 'PICK', 'THE', 'SWEET', 'INSIPID', 'FRUIT', 'LONG', 'IVORY', 'COLORED', 'BERRIES', 'TIPPED', 'WITH', 'FAINT', 'PINK', 'LIKE', 'WHITE', 'CORAL', 'THAT', 'FALL', 'TO', 'THE', 'GROUND', 'UNHEEDED', 'ALL', 'SUMMER', 'THROUGH'] +237-134500-0018-2363: hyp=['HE', 'REACHED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'AND', 'BEGAN', 'TO', 'PICK', 'THE', 'SWEET', 'INSIPID', 'FRUIT', 'LONG', 'IVORY', 'COLORED', 'BERRIES', 'TIPPED', 'WITH', 'FAINT', 'PINK', 'LIKE', 'WHITE', 'CORAL', 'THAT', 'FALL', 'TO', 'THE', 'GROUND', 'UNHEEDED', 'ALL', 'SUMMER', 'THROUGH'] +237-134500-0019-2364: ref=['HE', 'DROPPED', 'A', 'HANDFUL', 'INTO', 'HER', 'LAP'] +237-134500-0019-2364: hyp=['HE', 'DROPPED', 'A', 'HANDFUL', 'INTO', 'HER', 'LAP'] +237-134500-0020-2365: ref=['YES', "DON'T", 'YOU'] +237-134500-0020-2365: hyp=['YES', "DON'T", 'YOU'] +237-134500-0021-2366: ref=['OH', 'EVER', 'SO', 'MUCH', 'ONLY', 'HE', 'SEEMS', 'KIND', 'OF', 'STAID', 'AND', 'SCHOOL', 'TEACHERY'] +237-134500-0021-2366: hyp=['OH', 'EVER', 'SO', 'MUCH', 'ONLY', 'HE', 'SEEMS', 'KIND', 'OF', 'STAID', 'AND', 'SCHOOL', 'TEACHERY'] +237-134500-0022-2367: ref=['WHEN', 'SHE', 'USED', 'TO', 'TELL', 'ME', 'ABOUT', 'HIM', 'I', 'ALWAYS', 'WONDERED', 'WHETHER', 'SHE', "WASN'T", 'A', 'LITTLE', 'IN', 'LOVE', 'WITH', 'HIM'] +237-134500-0022-2367: hyp=['WHEN', 'SHE', 'USED', 'TO', 'TELL', 'ME', 'ABOUT', 'HIM', 'I', 'ALWAYS', 'WONDERED', 'WHETHER', 'SHE', "WASN'T", 'A', 'LITTLE', 'IN', 'LOVE', 'WITH', 'HIM'] +237-134500-0023-2368: ref=['IT', 'WOULD', 'SERVE', 'YOU', 'ALL', 'RIGHT', 'IF', 'SHE', 'WALKED', 'OFF', 'WITH', 'CARL'] +237-134500-0023-2368: hyp=['IT', 'WOULD', 'SERVE', 'YOU', 'ALL', 'RIGHT', 'IF', 'SHE', 'WALKED', 'OFF', 'WITH', 'KARL'] +237-134500-0024-2369: ref=['I', 'LIKE', 'TO', 'TALK', 'TO', 'CARL', 'ABOUT', 'NEW', 'YORK', 'AND', 'WHAT', 'A', 'FELLOW', 'CAN', 'DO', 'THERE'] +237-134500-0024-2369: hyp=['I', 'LIKE', 'TO', 'TALK', 'TO', 'KARL', 'ABOUT', 'NEW', 'YORK', 'AND', 'WHAT', 'A', 'FELLOW', 'CAN', 'DO', 'THERE'] +237-134500-0025-2370: ref=['OH', 'EMIL'] +237-134500-0025-2370: hyp=['OH', 'AMIEL'] +237-134500-0026-2371: ref=['SURELY', 'YOU', 'ARE', 'NOT', 'THINKING', 'OF', 'GOING', 'OFF', 'THERE'] +237-134500-0026-2371: hyp=['SURELY', 'YOU', 'ARE', 'NOT', 'THINKING', 'OF', 'GOING', 'OFF', 'THERE'] +237-134500-0027-2372: ref=["MARIE'S", 'FACE', 'FELL', 'UNDER', 'HIS', 'BROODING', 'GAZE'] +237-134500-0027-2372: hyp=["MARIE'S", 'FACE', 'FELL', 'UNDER', 'HIS', 'BROODING', 'GAZE'] +237-134500-0028-2373: ref=["I'M", 'SURE', 'ALEXANDRA', 'HOPES', 'YOU', 'WILL', 'STAY', 'ON', 'HERE', 'SHE', 'MURMURED'] +237-134500-0028-2373: hyp=['I', 'AM', 'SURE', 'ALEXANDER', 'HELPS', 'YOU', 'WILL', 'STAY', 'ON', 'HERE', 'SHE', 'MURMURED'] +237-134500-0029-2374: ref=['I', "DON'T", 'WANT', 'TO', 'STAND', 'AROUND', 'AND', 'LOOK', 'ON'] +237-134500-0029-2374: hyp=['I', "DON'T", 'WANT', 'TO', 'STAND', 'AROUND', 'AND', 'LOOK', 'ON'] +237-134500-0030-2375: ref=['I', 'WANT', 'TO', 'BE', 'DOING', 'SOMETHING', 'ON', 'MY', 'OWN', 'ACCOUNT'] +237-134500-0030-2375: hyp=['I', 'WANT', 'TO', 'BE', 'DOING', 'SOMETHING', 'ON', 'MY', 'OWN', 'ACCOUNT'] +237-134500-0031-2376: ref=['SOMETIMES', 'I', "DON'T", 'WANT', 'TO', 'DO', 'ANYTHING', 'AT', 'ALL', 'AND', 'SOMETIMES', 'I', 'WANT', 'TO', 'PULL', 'THE', 'FOUR', 'CORNERS', 'OF', 'THE', 'DIVIDE', 'TOGETHER', 'HE', 'THREW', 'OUT', 'HIS', 'ARM', 'AND', 'BROUGHT', 'IT', 'BACK', 'WITH', 'A', 'JERK', 'SO', 'LIKE', 'A', 'TABLE', 'CLOTH'] +237-134500-0031-2376: hyp=['SOMETIMES', 'I', "DON'T", 'WANT', 'TO', 'DO', 'ANYTHING', 'AT', 'ALL', 'AND', 'SOMETIMES', 'I', 'WANT', 'TO', 'PULL', 'THE', 'FOUR', 'CORNERS', 'OF', 'THE', 'DIVIDE', 'TOGETHER', 'HE', 'THREW', 'OUT', 'HIS', 'ARM', 'AND', 'BROUGHT', 'IT', 'BACK', 'WITH', 'A', 'JERK', 'SO', 'LIKE', 'A', 'TABLECLOTH'] +237-134500-0032-2377: ref=['I', 'GET', 'TIRED', 'OF', 'SEEING', 'MEN', 'AND', 'HORSES', 'GOING', 'UP', 'AND', 'DOWN', 'UP', 'AND', 'DOWN'] +237-134500-0032-2377: hyp=['I', 'GET', 'TIRED', 'OF', 'SEEING', 'MAN', 'AND', 'HORSES', 'GOING', 'UP', 'AND', 'DOWN', 'UP', 'AND', 'DOWN'] +237-134500-0033-2378: ref=['I', 'WISH', 'YOU', "WEREN'T", 'SO', 'RESTLESS', 'AND', "DIDN'T", 'GET', 'SO', 'WORKED', 'UP', 'OVER', 'THINGS', 'SHE', 'SAID', 'SADLY'] +237-134500-0033-2378: hyp=['I', 'WISH', 'YOU', "WEREN'T", 'SO', 'RESTLESS', 'AND', "DIDN'T", 'GET', 'SO', 'WORKED', 'UP', 'OVER', 'THINGS', 'SHE', 'SAID', 'SADLY'] +237-134500-0034-2379: ref=['THANK', 'YOU', 'HE', 'RETURNED', 'SHORTLY'] +237-134500-0034-2379: hyp=['THANK', 'YOU', 'HE', 'RETURNED', 'SHORTLY'] +237-134500-0035-2380: ref=['AND', 'YOU', 'NEVER', 'USED', 'TO', 'BE', 'CROSS', 'TO', 'ME'] +237-134500-0035-2380: hyp=['AND', 'WHO', 'NEVER', 'USED', 'TO', 'BE', 'CROSS', 'TO', 'ME'] +237-134500-0036-2381: ref=['I', "CAN'T", 'PLAY', 'WITH', 'YOU', 'LIKE', 'A', 'LITTLE', 'BOY', 'ANY', 'MORE', 'HE', 'SAID', 'SLOWLY', "THAT'S", 'WHAT', 'YOU', 'MISS', 'MARIE'] +237-134500-0036-2381: hyp=['I', "CAN'T", 'PLAY', 'WITH', 'YOU', 'LIKE', 'A', 'LITTLE', 'BOY', 'ANY', 'MORE', 'HE', 'SAID', 'SLOWLY', "THAT'S", 'WHAT', 'YOU', 'MISS', 'MARIE'] +237-134500-0037-2382: ref=['BUT', 'EMIL', 'IF', 'I', 'UNDERSTAND', 'THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER', 'WE', 'CAN', 'NEVER', 'DO', 'NICE', 'THINGS', 'TOGETHER', 'ANY', 'MORE'] +237-134500-0037-2382: hyp=['BUT', 'AMIEL', 'IF', 'I', 'UNDERSTAND', 'IN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER', 'WE', 'CAN', 'NEVER', 'DO', 'NICE', 'THINGS', 'TOGETHER', 'ANY', 'MORE'] +237-134500-0038-2383: ref=['AND', 'ANYHOW', "THERE'S", 'NOTHING', 'TO', 'UNDERSTAND'] +237-134500-0038-2383: hyp=['AND', 'ANYHOW', "THERE'S", 'NOTHING', 'TOO', 'UNDERSTAND'] +237-134500-0039-2384: ref=['THAT', "WON'T", 'LAST', 'IT', 'WILL', 'GO', 'AWAY', 'AND', 'THINGS', 'WILL', 'BE', 'JUST', 'AS', 'THEY', 'USED', 'TO'] +237-134500-0039-2384: hyp=['THAT', "WON'T", 'LAST', 'IT', 'WILL', 'GO', 'AWAY', 'AND', 'THINGS', 'WILL', 'BE', 'JUST', 'AS', 'THEY', 'USED', 'TO'] +237-134500-0040-2385: ref=['I', 'PRAY', 'FOR', 'YOU', 'BUT', "THAT'S", 'NOT', 'THE', 'SAME', 'AS', 'IF', 'YOU', 'PRAYED', 'YOURSELF'] +237-134500-0040-2385: hyp=['I', 'PRAY', 'FOR', 'YOU', 'BUT', "THAT'S", 'NOT', 'THE', 'SAME', 'AS', 'IF', 'YOU', 'PRAYED', 'YOURSELF'] +237-134500-0041-2386: ref=['I', "CAN'T", 'PRAY', 'TO', 'HAVE', 'THE', 'THINGS', 'I', 'WANT', 'HE', 'SAID', 'SLOWLY', 'AND', 'I', "WON'T", 'PRAY', 'NOT', 'TO', 'HAVE', 'THEM', 'NOT', 'IF', "I'M", 'DAMNED', 'FOR', 'IT'] +237-134500-0041-2386: hyp=['I', "CAN'T", 'PRAY', 'TO', 'HAVE', 'THE', 'THINGS', 'I', 'WANT', 'HE', 'SAID', 'SLOWLY', 'AND', 'I', "WON'T", 'PRAY', 'NOT', 'TO', 'HAVE', 'THEM', 'NOT', 'IF', "I'M", 'DAMNED', 'FOR', 'IT'] +237-134500-0042-2387: ref=['THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER'] +237-134500-0042-2387: hyp=['THEN', 'ALL', 'OUR', 'GOOD', 'TIMES', 'ARE', 'OVER'] +260-123286-0000-200: ref=['SATURDAY', 'AUGUST', 'FIFTEENTH', 'THE', 'SEA', 'UNBROKEN', 'ALL', 'ROUND', 'NO', 'LAND', 'IN', 'SIGHT'] +260-123286-0000-200: hyp=['SATURDAY', 'AUGUST', 'FIFTEENTH', 'THE', 'SEA', 'UNBROKEN', 'ALL', 'ROUND', 'NO', 'LAND', 'IN', 'SIGHT'] +260-123286-0001-201: ref=['THE', 'HORIZON', 'SEEMS', 'EXTREMELY', 'DISTANT'] +260-123286-0001-201: hyp=['THE', 'HORIZON', 'SEEMS', 'EXTREMELY', 'DISTANT'] +260-123286-0002-202: ref=['ALL', 'MY', 'DANGER', 'AND', 'SUFFERINGS', 'WERE', 'NEEDED', 'TO', 'STRIKE', 'A', 'SPARK', 'OF', 'HUMAN', 'FEELING', 'OUT', 'OF', 'HIM', 'BUT', 'NOW', 'THAT', 'I', 'AM', 'WELL', 'HIS', 'NATURE', 'HAS', 'RESUMED', 'ITS', 'SWAY'] +260-123286-0002-202: hyp=['ALL', 'MY', 'DANGER', 'AND', 'SUFFERINGS', 'WERE', 'NEEDED', 'TO', 'STRIKE', 'A', 'SPARK', 'OF', 'HUMAN', 'FEELING', 'OUT', 'OF', 'HIM', 'BUT', 'NOW', 'THAT', 'I', 'AM', 'WELL', 'HIS', 'NATURE', 'HAS', 'RESUMED', 'ITS', 'SWAY'] +260-123286-0003-203: ref=['YOU', 'SEEM', 'ANXIOUS', 'MY', 'UNCLE', 'I', 'SAID', 'SEEING', 'HIM', 'CONTINUALLY', 'WITH', 'HIS', 'GLASS', 'TO', 'HIS', 'EYE', 'ANXIOUS'] +260-123286-0003-203: hyp=['YOU', 'SEEM', 'ANXIOUS', 'MY', 'UNCLE', 'I', 'SAID', 'SEEING', 'HIM', 'CONTINUALLY', 'WITH', 'HIS', 'GLASS', 'TO', 'HIS', 'EYE', 'ANXIOUS'] +260-123286-0004-204: ref=['ONE', 'MIGHT', 'BE', 'WITH', 'LESS', 'REASON', 'THAN', 'NOW'] +260-123286-0004-204: hyp=['ONE', 'MIGHT', 'BE', 'WITH', 'LESS', 'REASON', 'THAN', 'NOW'] +260-123286-0005-205: ref=['I', 'AM', 'NOT', 'COMPLAINING', 'THAT', 'THE', 'RATE', 'IS', 'SLOW', 'BUT', 'THAT', 'THE', 'SEA', 'IS', 'SO', 'WIDE'] +260-123286-0005-205: hyp=['I', 'AM', 'NOT', 'COMPLAINING', 'THAT', 'THE', 'RATE', 'IS', 'SLOW', 'BUT', 'THAT', 'THE', 'SEAT', 'IS', 'SO', 'WIDE'] +260-123286-0006-206: ref=['WE', 'ARE', 'LOSING', 'TIME', 'AND', 'THE', 'FACT', 'IS', 'I', 'HAVE', 'NOT', 'COME', 'ALL', 'THIS', 'WAY', 'TO', 'TAKE', 'A', 'LITTLE', 'SAIL', 'UPON', 'A', 'POND', 'ON', 'A', 'RAFT'] +260-123286-0006-206: hyp=['WE', 'ARE', 'LOSING', 'TIME', 'AND', 'THE', 'FACT', 'IS', 'I', 'HAVE', 'NOT', 'COME', 'ALL', 'THIS', 'WAY', 'TO', 'TAKE', 'A', 'LITTLE', 'SAIL', 'UPON', 'A', 'POND', 'ON', 'A', 'RAFT'] +260-123286-0007-207: ref=['HE', 'CALLED', 'THIS', 'SEA', 'A', 'POND', 'AND', 'OUR', 'LONG', 'VOYAGE', 'TAKING', 'A', 'LITTLE', 'SAIL'] +260-123286-0007-207: hyp=['HE', 'CALLED', 'THE', 'SEA', 'UPON', 'AND', 'OUR', 'LONG', 'VOYAGE', 'TAKING', 'A', 'LITTLE', 'SAIL'] +260-123286-0008-208: ref=['THEREFORE', "DON'T", 'TALK', 'TO', 'ME', 'ABOUT', 'VIEWS', 'AND', 'PROSPECTS'] +260-123286-0008-208: hyp=['THEREFORE', "DON'T", 'TALK', 'TO', 'ME', 'ABOUT', 'VIEWS', 'AND', 'PROSPECTS'] +260-123286-0009-209: ref=['I', 'TAKE', 'THIS', 'AS', 'MY', 'ANSWER', 'AND', 'I', 'LEAVE', 'THE', 'PROFESSOR', 'TO', 'BITE', 'HIS', 'LIPS', 'WITH', 'IMPATIENCE'] +260-123286-0009-209: hyp=['I', 'TAKE', 'THIS', 'AS', 'MY', 'ANSWER', 'AND', 'I', 'LEAVE', 'THE', 'PROFESSOR', 'TO', 'BITE', 'HIS', 'LIPS', 'WITH', 'IMPATIENCE'] +260-123286-0010-210: ref=['SUNDAY', 'AUGUST', 'SIXTEENTH'] +260-123286-0010-210: hyp=['SUNDAY', 'AUGUST', 'SIXTEENTH'] +260-123286-0011-211: ref=['NOTHING', 'NEW', 'WEATHER', 'UNCHANGED', 'THE', 'WIND', 'FRESHENS'] +260-123286-0011-211: hyp=['NOTHING', 'NEW', 'WHETHER', 'UNCHANGED', 'THE', 'WIND', 'FRESHENS'] +260-123286-0012-212: ref=['BUT', 'THERE', 'SEEMED', 'NO', 'REASON', 'TO', 'FEAR'] +260-123286-0012-212: hyp=['BUT', 'THERE', 'SEEMED', 'NO', 'REASON', 'OF', 'FEAR'] +260-123286-0013-213: ref=['THE', 'SHADOW', 'OF', 'THE', 'RAFT', 'WAS', 'CLEARLY', 'OUTLINED', 'UPON', 'THE', 'SURFACE', 'OF', 'THE', 'WAVES'] +260-123286-0013-213: hyp=['THE', 'SHADOW', 'OF', 'THE', 'RAFT', 'WAS', 'CLEARLY', 'OUTLINED', 'UPON', 'THE', 'SURFACE', 'OF', 'THE', 'WAVES'] +260-123286-0014-214: ref=['TRULY', 'THIS', 'SEA', 'IS', 'OF', 'INFINITE', 'WIDTH'] +260-123286-0014-214: hyp=['TRULY', 'THE', 'SEA', 'IS', 'OF', 'INFINITE', 'WIDTH'] +260-123286-0015-215: ref=['IT', 'MUST', 'BE', 'AS', 'WIDE', 'AS', 'THE', 'MEDITERRANEAN', 'OR', 'THE', 'ATLANTIC', 'AND', 'WHY', 'NOT'] +260-123286-0015-215: hyp=['IT', 'MUST', 'BE', 'AS', 'WIDE', 'AS', 'THE', 'MEDITERRANEAN', 'OR', 'THE', 'ATLANTIC', 'AND', 'WHY', 'NOT'] +260-123286-0016-216: ref=['THESE', 'THOUGHTS', 'AGITATED', 'ME', 'ALL', 'DAY', 'AND', 'MY', 'IMAGINATION', 'SCARCELY', 'CALMED', 'DOWN', 'AFTER', 'SEVERAL', 'HOURS', 'SLEEP'] +260-123286-0016-216: hyp=['THESE', 'THOUGHTS', 'AGITATED', 'ME', 'ALL', 'DAY', 'AND', 'MY', 'IMAGINATION', 'SCARCELY', 'CALMED', 'DOWN', 'AFTER', 'SEVERAL', 'HOURS', 'SLEEVE'] +260-123286-0017-217: ref=['I', 'SHUDDER', 'AS', 'I', 'RECALL', 'THESE', 'MONSTERS', 'TO', 'MY', 'REMEMBRANCE'] +260-123286-0017-217: hyp=['I', 'SHUDDER', 'AS', 'I', 'RECALL', 'THESE', 'MONSTERS', 'TO', 'MY', 'REMEMBRANCE'] +260-123286-0018-218: ref=['I', 'SAW', 'AT', 'THE', 'HAMBURG', 'MUSEUM', 'THE', 'SKELETON', 'OF', 'ONE', 'OF', 'THESE', 'CREATURES', 'THIRTY', 'FEET', 'IN', 'LENGTH'] +260-123286-0018-218: hyp=['I', 'SAW', 'AT', 'THE', 'HAMBURG', 'MUSEUM', 'THE', 'SKELETON', 'OF', 'ONE', 'OF', 'THESE', 'CREATURES', 'THIRTY', 'FEET', 'IN', 'LENGTH'] +260-123286-0019-219: ref=['I', 'SUPPOSE', 'PROFESSOR', 'LIEDENBROCK', 'WAS', 'OF', 'MY', 'OPINION', 'TOO', 'AND', 'EVEN', 'SHARED', 'MY', 'FEARS', 'FOR', 'AFTER', 'HAVING', 'EXAMINED', 'THE', 'PICK', 'HIS', 'EYES', 'TRAVERSED', 'THE', 'OCEAN', 'FROM', 'SIDE', 'TO', 'SIDE'] +260-123286-0019-219: hyp=['I', 'SUPPOSE', 'PROFESSOR', 'LIEDENBROCK', 'WAS', 'OF', 'MY', 'OPINION', 'TOO', 'AND', 'EVEN', 'SHARED', 'MY', 'FEARS', 'FOR', 'AFTER', 'HAVING', 'EXAMINED', 'THE', 'PICK', 'HIS', 'EYES', 'TRAVERSED', 'THE', 'OCEAN', 'FROM', 'SIDE', 'TO', 'SIDE'] +260-123286-0020-220: ref=['TUESDAY', 'AUGUST', 'EIGHTEENTH'] +260-123286-0020-220: hyp=['TUESDAY', 'AUGUST', 'EIGHTEENTH'] +260-123286-0021-221: ref=['DURING', 'HIS', 'WATCH', 'I', 'SLEPT'] +260-123286-0021-221: hyp=['DURING', 'HIS', 'WATCH', 'I', 'SLEPT'] +260-123286-0022-222: ref=['TWO', 'HOURS', 'AFTERWARDS', 'A', 'TERRIBLE', 'SHOCK', 'AWOKE', 'ME'] +260-123286-0022-222: hyp=['TWO', 'HOURS', 'AFTERWARDS', 'A', 'TERRIBLE', 'SHOCK', 'AWOKE', 'ME'] +260-123286-0023-223: ref=['THE', 'RAFT', 'WAS', 'HEAVED', 'UP', 'ON', 'A', 'WATERY', 'MOUNTAIN', 'AND', 'PITCHED', 'DOWN', 'AGAIN', 'AT', 'A', 'DISTANCE', 'OF', 'TWENTY', 'FATHOMS'] +260-123286-0023-223: hyp=['THE', 'RAFT', 'WAS', 'HEAVED', 'UP', 'ON', 'A', 'WATERY', 'MOUNTAIN', 'AND', 'PITCHED', 'DOWN', 'AGAIN', 'AT', 'A', 'DISTANCE', 'OF', 'TWENTY', 'FATHOMS'] +260-123286-0024-224: ref=["THERE'S", 'A', 'WHALE', 'A', 'WHALE', 'CRIED', 'THE', 'PROFESSOR'] +260-123286-0024-224: hyp=["THERE'S", 'A', 'WAIL', 'A', 'WELL', 'CRIED', 'THE', 'PROFESSOR'] +260-123286-0025-225: ref=['FLIGHT', 'WAS', 'OUT', 'OF', 'THE', 'QUESTION', 'NOW', 'THE', 'REPTILES', 'ROSE', 'THEY', 'WHEELED', 'AROUND', 'OUR', 'LITTLE', 'RAFT', 'WITH', 'A', 'RAPIDITY', 'GREATER', 'THAN', 'THAT', 'OF', 'EXPRESS', 'TRAINS'] +260-123286-0025-225: hyp=['FIGHT', 'WAS', 'OUT', 'OF', 'THE', 'QUESTION', 'NOW', 'THE', 'REPTILES', 'ROSE', 'THEY', 'WHEELED', 'AROUND', 'OUR', 'LITTLE', 'RAFT', 'WITH', 'A', 'RAPIDITY', 'GREATER', 'THAN', 'THAT', 'OF', 'EXPRESS', 'TRAINS'] +260-123286-0026-226: ref=['TWO', 'MONSTERS', 'ONLY', 'WERE', 'CREATING', 'ALL', 'THIS', 'COMMOTION', 'AND', 'BEFORE', 'MY', 'EYES', 'ARE', 'TWO', 'REPTILES', 'OF', 'THE', 'PRIMITIVE', 'WORLD'] +260-123286-0026-226: hyp=['TWO', 'MASTERS', 'ONLY', 'WERE', 'CREATING', 'ALL', 'THIS', 'COMMOTION', 'AND', 'BEFORE', 'MY', 'EYES', 'ARE', 'TOO', 'REPTILES', 'OF', 'THE', 'PRIMITIVE', 'WORLD'] +260-123286-0027-227: ref=['I', 'CAN', 'DISTINGUISH', 'THE', 'EYE', 'OF', 'THE', 'ICHTHYOSAURUS', 'GLOWING', 'LIKE', 'A', 'RED', 'HOT', 'COAL', 'AND', 'AS', 'LARGE', 'AS', 'A', "MAN'S", 'HEAD'] +260-123286-0027-227: hyp=['I', 'CAN', 'DISTINGUISH', 'THE', 'EYE', 'OF', 'THE', 'ICT', 'THEASURUS', 'GLOWING', 'LIKE', 'A', 'RED', 'HOT', 'CO', 'AND', 'AS', 'LARGE', 'AS', 'A', "MAN'S", 'HEAD'] +260-123286-0028-228: ref=['ITS', 'JAW', 'IS', 'ENORMOUS', 'AND', 'ACCORDING', 'TO', 'NATURALISTS', 'IT', 'IS', 'ARMED', 'WITH', 'NO', 'LESS', 'THAN', 'ONE', 'HUNDRED', 'AND', 'EIGHTY', 'TWO', 'TEETH'] +260-123286-0028-228: hyp=['ITS', 'JAW', 'IS', 'ENORMOUS', 'AND', 'ACCORDING', 'TO', 'NATURALISTS', 'IT', 'IS', 'ARMED', 'WITH', 'NO', 'LESS', 'THAN', 'ONE', 'HUNDRED', 'AND', 'EIGHTY', 'TWO', 'TEETH'] +260-123286-0029-229: ref=['THOSE', 'HUGE', 'CREATURES', 'ATTACKED', 'EACH', 'OTHER', 'WITH', 'THE', 'GREATEST', 'ANIMOSITY'] +260-123286-0029-229: hyp=['THOSE', 'HUGE', 'CREATURES', 'ATTACKED', 'EACH', 'OTHER', 'WITH', 'THE', 'GREATEST', 'ANIMOSITY'] +260-123286-0030-230: ref=['SUDDENLY', 'THE', 'ICHTHYOSAURUS', 'AND', 'THE', 'PLESIOSAURUS', 'DISAPPEAR', 'BELOW', 'LEAVING', 'A', 'WHIRLPOOL', 'EDDYING', 'IN', 'THE', 'WATER'] +260-123286-0030-230: hyp=['SUDDENLY', 'THE', 'ICTUSORIS', 'AND', 'THE', 'PLUSIASURUS', 'DISAPPEAR', 'BELOW', 'LEAVING', 'A', 'WAR', 'POOL', 'EDDYING', 'IN', 'THE', 'WATER'] +260-123286-0031-231: ref=['AS', 'FOR', 'THE', 'ICHTHYOSAURUS', 'HAS', 'HE', 'RETURNED', 'TO', 'HIS', 'SUBMARINE', 'CAVERN'] +260-123286-0031-231: hyp=['AS', 'FOR', 'THE', 'ITTHIASORIS', 'HAS', 'HE', 'RETURNED', 'WHOSE', 'SUBMARINE', 'CAVERN'] +260-123288-0000-232: ref=['THE', 'ROARINGS', 'BECOME', 'LOST', 'IN', 'THE', 'DISTANCE'] +260-123288-0000-232: hyp=['THE', 'ROARINGS', 'BECOME', 'LOST', 'IN', 'THE', 'DISTANCE'] +260-123288-0001-233: ref=['THE', 'WEATHER', 'IF', 'WE', 'MAY', 'USE', 'THAT', 'TERM', 'WILL', 'CHANGE', 'BEFORE', 'LONG'] +260-123288-0001-233: hyp=['THE', 'WEATHER', 'IF', 'WE', 'MAY', 'USE', 'THE', 'TERM', 'WILL', 'CHANGE', 'BEFORE', 'LAWN'] +260-123288-0002-234: ref=['THE', 'ATMOSPHERE', 'IS', 'CHARGED', 'WITH', 'VAPOURS', 'PERVADED', 'WITH', 'THE', 'ELECTRICITY', 'GENERATED', 'BY', 'THE', 'EVAPORATION', 'OF', 'SALINE', 'WATERS'] +260-123288-0002-234: hyp=['THE', 'ATMOSPHERE', 'IS', 'CHARGED', 'WITH', 'VAPORS', 'PERVADED', 'WITH', 'THE', 'ELECTRICITY', 'GENERATED', 'BY', 'THE', 'EVAPORATION', 'OF', 'SAILING', 'WATERS'] +260-123288-0003-235: ref=['THE', 'ELECTRIC', 'LIGHT', 'CAN', 'SCARCELY', 'PENETRATE', 'THROUGH', 'THE', 'DENSE', 'CURTAIN', 'WHICH', 'HAS', 'DROPPED', 'OVER', 'THE', 'THEATRE', 'ON', 'WHICH', 'THE', 'BATTLE', 'OF', 'THE', 'ELEMENTS', 'IS', 'ABOUT', 'TO', 'BE', 'WAGED'] +260-123288-0003-235: hyp=['THE', 'ELECTRIC', 'LIGHT', 'CAN', 'SCARCELY', 'PENETRATE', 'THROUGH', 'THE', 'DENSE', 'CURTAIN', 'WHICH', 'IS', 'DROPPED', 'OVER', 'THE', 'THEATRE', 'ON', 'WHICH', 'THE', 'BATTLE', 'OF', 'THE', 'ELEMENTS', 'IS', 'ABOUT', 'TO', 'BE', 'WAGED'] +260-123288-0004-236: ref=['THE', 'AIR', 'IS', 'HEAVY', 'THE', 'SEA', 'IS', 'CALM'] +260-123288-0004-236: hyp=['THE', 'AIR', 'IS', 'HEAVY', 'THE', 'SEA', 'IS', 'CALM'] +260-123288-0005-237: ref=['FROM', 'TIME', 'TO', 'TIME', 'A', 'FLEECY', 'TUFT', 'OF', 'MIST', 'WITH', 'YET', 'SOME', 'GLEAMING', 'LIGHT', 'LEFT', 'UPON', 'IT', 'DROPS', 'DOWN', 'UPON', 'THE', 'DENSE', 'FLOOR', 'OF', 'GREY', 'AND', 'LOSES', 'ITSELF', 'IN', 'THE', 'OPAQUE', 'AND', 'IMPENETRABLE', 'MASS'] +260-123288-0005-237: hyp=['FROM', 'TIME', 'TO', 'TIME', 'A', 'FLEECY', 'TUFT', 'OF', 'MISTS', 'WITH', 'YET', 'SOME', 'GLEAMING', 'LIGHT', 'LEFT', 'UPON', 'IT', 'DROPS', 'DOWN', 'UPON', 'THE', 'DENSE', 'FLOOR', 'OF', 'GREY', 'AND', 'LOSES', 'ITSELF', 'IN', 'THE', 'OPE', 'AND', 'IMPENETRABLE', 'MASS'] +260-123288-0006-238: ref=['THE', 'ATMOSPHERE', 'IS', 'EVIDENTLY', 'CHARGED', 'AND', 'SURCHARGED', 'WITH', 'ELECTRICITY'] +260-123288-0006-238: hyp=['THE', 'ATMOSPHERE', 'AS', 'EVIDENTLY', 'CHARGED', 'IN', 'SURCHARGED', 'WITH', 'ELECTRICITY'] +260-123288-0007-239: ref=['THE', 'WIND', 'NEVER', 'LULLS', 'BUT', 'TO', 'ACQUIRE', 'INCREASED', 'STRENGTH', 'THE', 'VAST', 'BANK', 'OF', 'HEAVY', 'CLOUDS', 'IS', 'A', 'HUGE', 'RESERVOIR', 'OF', 'FEARFUL', 'WINDY', 'GUSTS', 'AND', 'RUSHING', 'STORMS'] +260-123288-0007-239: hyp=['THE', 'WIND', 'NEVER', 'LULLS', 'BUT', 'TO', 'ACQUIRE', 'INCREASED', 'STRENGTH', 'THE', 'VAST', 'BANK', 'OF', 'HEAVY', 'CLOUDS', 'IS', 'A', 'HUGE', 'RESERVOIR', 'OF', 'FEARFUL', 'WINDY', 'GUSTS', 'AND', 'RUSHING', 'STORMS'] +260-123288-0008-240: ref=["THERE'S", 'A', 'HEAVY', 'STORM', 'COMING', 'ON', 'I', 'CRIED', 'POINTING', 'TOWARDS', 'THE', 'HORIZON'] +260-123288-0008-240: hyp=["THERE'S", 'A', 'HEAVY', 'STORM', 'COMING', 'ON', 'I', 'CRIED', 'POINTING', 'TOWARDS', 'THE', 'HORIZON'] +260-123288-0009-241: ref=['THOSE', 'CLOUDS', 'SEEM', 'AS', 'IF', 'THEY', 'WERE', 'GOING', 'TO', 'CRUSH', 'THE', 'SEA'] +260-123288-0009-241: hyp=['THOSE', 'CLOUDS', 'SEEM', 'AS', 'IF', 'THEY', 'WERE', 'GOING', 'TO', 'CRUSH', 'THE', 'SEA'] +260-123288-0010-242: ref=['ON', 'THE', 'MAST', 'ALREADY', 'I', 'SEE', 'THE', 'LIGHT', 'PLAY', 'OF', 'A', 'LAMBENT', 'SAINT', "ELMO'S", 'FIRE', 'THE', 'OUTSTRETCHED', 'SAIL', 'CATCHES', 'NOT', 'A', 'BREATH', 'OF', 'WIND', 'AND', 'HANGS', 'LIKE', 'A', 'SHEET', 'OF', 'LEAD'] +260-123288-0010-242: hyp=['ON', 'THE', 'MAST', 'ALREADY', 'I', 'SEE', 'THE', 'LIGHT', 'PLAY', 'OF', 'A', 'LAMENT', 'SAINT', "AIRABLE'S", 'FIRE', 'THE', 'OUTSTRETCHED', 'SILL', 'CATCHES', 'NOT', 'A', 'BREATH', 'OF', 'WIND', 'AND', 'HANGS', 'LIKE', 'A', 'SHEET', 'OF', 'LEAD'] +260-123288-0011-243: ref=['BUT', 'IF', 'WE', 'HAVE', 'NOW', 'CEASED', 'TO', 'ADVANCE', 'WHY', 'DO', 'WE', 'YET', 'LEAVE', 'THAT', 'SAIL', 'LOOSE', 'WHICH', 'AT', 'THE', 'FIRST', 'SHOCK', 'OF', 'THE', 'TEMPEST', 'MAY', 'CAPSIZE', 'US', 'IN', 'A', 'MOMENT'] +260-123288-0011-243: hyp=['BUT', 'IF', 'WE', 'HAVE', 'NOW', 'CEASED', 'TO', 'ADVANCE', 'WHY', 'DO', 'WE', 'YET', 'LEAVE', 'THAT', 'SALE', 'LOOSE', 'WHICH', 'AT', 'THE', 'FIRST', 'SHOCK', 'OF', 'A', 'TEMPEST', 'MAY', 'CAPSIZE', 'US', 'IN', 'A', 'MOMENT'] +260-123288-0012-244: ref=['THAT', 'WILL', 'BE', 'SAFEST', 'NO', 'NO', 'NEVER'] +260-123288-0012-244: hyp=['THAT', 'WILL', 'BE', 'THE', 'SAFEST', 'NO', 'NO', 'NEVER'] +260-123288-0013-245: ref=['THE', 'PILED', 'UP', 'VAPOURS', 'CONDENSE', 'INTO', 'WATER', 'AND', 'THE', 'AIR', 'PUT', 'INTO', 'VIOLENT', 'ACTION', 'TO', 'SUPPLY', 'THE', 'VACUUM', 'LEFT', 'BY', 'THE', 'CONDENSATION', 'OF', 'THE', 'MISTS', 'ROUSES', 'ITSELF', 'INTO', 'A', 'WHIRLWIND'] +260-123288-0013-245: hyp=['THE', 'PILED', 'UP', 'VAPORS', 'CONTENTS', 'INTO', 'WATER', 'AND', 'THE', 'AIR', 'PUT', 'INTO', 'VIOLENT', 'ACTION', 'TO', 'SUPPLY', 'THE', 'VACUUM', 'LEFT', 'BY', 'THE', 'CONDENSATION', 'OF', 'THE', 'MIST', 'ROUSES', 'ITSELF', 'INTO', 'A', 'WHIRLWIND'] +260-123288-0014-246: ref=['HANS', 'STIRS', 'NOT'] +260-123288-0014-246: hyp=['HANS', 'STIRS', 'NOT'] +260-123288-0015-247: ref=['FROM', 'THE', 'UNDER', 'SURFACE', 'OF', 'THE', 'CLOUDS', 'THERE', 'ARE', 'CONTINUAL', 'EMISSIONS', 'OF', 'LURID', 'LIGHT', 'ELECTRIC', 'MATTER', 'IS', 'IN', 'CONTINUAL', 'EVOLUTION', 'FROM', 'THEIR', 'COMPONENT', 'MOLECULES', 'THE', 'GASEOUS', 'ELEMENTS', 'OF', 'THE', 'AIR', 'NEED', 'TO', 'BE', 'SLAKED', 'WITH', 'MOISTURE', 'FOR', 'INNUMERABLE', 'COLUMNS', 'OF', 'WATER', 'RUSH', 'UPWARDS', 'INTO', 'THE', 'AIR', 'AND', 'FALL', 'BACK', 'AGAIN', 'IN', 'WHITE', 'FOAM'] +260-123288-0015-247: hyp=['FROM', 'THE', 'UNDER', 'SURFACE', 'OF', 'THE', 'CLOUDS', 'THERE', 'ARE', 'CONTINUAL', 'MISSIONS', 'OF', 'LURID', 'LIGHT', 'ELECTRIC', 'MATTER', 'IS', 'IN', 'CONTINUAL', 'EVOLUTION', 'FROM', 'THEIR', 'COMPONENT', 'MOLECULES', 'THE', 'GASEOUS', 'ELEMENTS', 'OF', 'THE', 'AIR', 'NEED', 'TO', 'BE', 'SLAKED', 'WITH', 'MOISTURE', 'FOR', 'INNUMERABLE', 'COLUMNS', 'OF', 'WATER', 'RUSH', 'UPWARDS', 'INTO', 'THE', 'AIR', 'AND', 'FALL', 'BACK', 'AGAIN', 'IN', 'WHITE', 'FOAM'] +260-123288-0016-248: ref=['I', 'REFER', 'TO', 'THE', 'THERMOMETER', 'IT', 'INDICATES', 'THE', 'FIGURE', 'IS', 'OBLITERATED'] +260-123288-0016-248: hyp=['I', 'REFER', 'TO', 'THE', 'THERMOMETER', 'IT', 'INDICATES', 'THE', 'FIGURE', 'IS', 'OBLITERATED'] +260-123288-0017-249: ref=['IS', 'THE', 'ATMOSPHERIC', 'CONDITION', 'HAVING', 'ONCE', 'REACHED', 'THIS', 'DENSITY', 'TO', 'BECOME', 'FINAL'] +260-123288-0017-249: hyp=['IS', 'THE', 'ATMOSPHERIC', 'CONDITION', 'HAVING', 'ONCE', 'REACHED', 'OSTENSITY', 'TO', 'BECOME', 'FINAL'] +260-123288-0018-250: ref=['THE', 'RAFT', 'BEARS', 'ON', 'STILL', 'TO', 'THE', 'SOUTH', 'EAST'] +260-123288-0018-250: hyp=['THE', 'RAFT', 'BEARS', 'ON', 'STILL', 'TO', 'THE', 'SOUTH', 'EAST'] +260-123288-0019-251: ref=['AT', 'NOON', 'THE', 'VIOLENCE', 'OF', 'THE', 'STORM', 'REDOUBLES'] +260-123288-0019-251: hyp=['AT', 'NOON', 'THE', 'VIOLENCE', 'OF', 'THE', 'STORM', 'REDOUBLES'] +260-123288-0020-252: ref=['EACH', 'OF', 'US', 'IS', 'LASHED', 'TO', 'SOME', 'PART', 'OF', 'THE', 'RAFT'] +260-123288-0020-252: hyp=['EACH', 'OF', 'US', 'IS', 'LASHED', 'IN', 'SOME', 'PART', 'OF', 'THE', 'RAFT'] +260-123288-0021-253: ref=['THE', 'WAVES', 'RISE', 'ABOVE', 'OUR', 'HEADS'] +260-123288-0021-253: hyp=['THE', 'WAVES', 'RISE', 'ABOVE', 'OUR', 'HEADS'] +260-123288-0022-254: ref=['THEY', 'SEEM', 'TO', 'BE', 'WE', 'ARE', 'LOST', 'BUT', 'I', 'AM', 'NOT', 'SURE'] +260-123288-0022-254: hyp=['THEY', 'SEEMED', 'TO', 'BE', 'WE', 'ARE', 'LOST', 'BUT', 'I', 'AM', 'NOT', 'SURE'] +260-123288-0023-255: ref=['HE', 'NODS', 'HIS', 'CONSENT'] +260-123288-0023-255: hyp=['HE', 'GNAWEDS', 'HIS', 'CONSENT'] +260-123288-0024-256: ref=['THE', 'FIREBALL', 'HALF', 'OF', 'IT', 'WHITE', 'HALF', 'AZURE', 'BLUE', 'AND', 'THE', 'SIZE', 'OF', 'A', 'TEN', 'INCH', 'SHELL', 'MOVED', 'SLOWLY', 'ABOUT', 'THE', 'RAFT', 'BUT', 'REVOLVING', 'ON', 'ITS', 'OWN', 'AXIS', 'WITH', 'ASTONISHING', 'VELOCITY', 'AS', 'IF', 'WHIPPED', 'ROUND', 'BY', 'THE', 'FORCE', 'OF', 'THE', 'WHIRLWIND'] +260-123288-0024-256: hyp=['THE', 'FIRE', 'BALL', 'HALF', 'OF', 'IT', 'WHITE', 'HALF', 'AZURE', 'BLUE', 'AND', 'THE', 'SIZE', 'OF', 'A', 'TEN', 'INCH', 'CHILL', 'MOVED', 'SLOWLY', 'ABOUT', 'THE', 'RAFT', 'BUT', 'REVOLVING', 'ON', 'ITS', 'OWN', 'AXIS', 'WITH', 'ASTONISHING', 'VELOCITY', 'AS', 'IF', 'WHIPPED', 'ROUND', 'BY', 'THE', 'FORCE', 'OF', 'THE', 'WHIRLWIND'] +260-123288-0025-257: ref=['HERE', 'IT', 'COMES', 'THERE', 'IT', 'GLIDES', 'NOW', 'IT', 'IS', 'UP', 'THE', 'RAGGED', 'STUMP', 'OF', 'THE', 'MAST', 'THENCE', 'IT', 'LIGHTLY', 'LEAPS', 'ON', 'THE', 'PROVISION', 'BAG', 'DESCENDS', 'WITH', 'A', 'LIGHT', 'BOUND', 'AND', 'JUST', 'SKIMS', 'THE', 'POWDER', 'MAGAZINE', 'HORRIBLE'] +260-123288-0025-257: hyp=['HERE', 'IT', 'COMES', 'THERE', 'IT', 'GLIDES', 'NOW', 'IT', 'IS', 'UP', 'THE', 'RAGGED', 'STUMP', 'OF', 'THE', 'MAST', 'THENCE', 'IT', 'LIGHTLY', 'LEAPS', 'ON', 'THE', 'PROVISION', 'BAG', 'DESCENDS', 'WITH', 'A', 'LIGHT', 'BOUND', 'AND', 'JUST', 'SKIMS', 'THE', 'POWDER', 'MAGAZINE', 'HORRIBLE'] +260-123288-0026-258: ref=['WE', 'SHALL', 'BE', 'BLOWN', 'UP', 'BUT', 'NO', 'THE', 'DAZZLING', 'DISK', 'OF', 'MYSTERIOUS', 'LIGHT', 'NIMBLY', 'LEAPS', 'ASIDE', 'IT', 'APPROACHES', 'HANS', 'WHO', 'FIXES', 'HIS', 'BLUE', 'EYE', 'UPON', 'IT', 'STEADILY', 'IT', 'THREATENS', 'THE', 'HEAD', 'OF', 'MY', 'UNCLE', 'WHO', 'FALLS', 'UPON', 'HIS', 'KNEES', 'WITH', 'HIS', 'HEAD', 'DOWN', 'TO', 'AVOID', 'IT'] +260-123288-0026-258: hyp=['WE', 'SHALL', 'BE', 'BLOWN', 'UP', 'BUT', 'NO', 'THE', 'DAZZLING', 'DISK', 'OF', 'MYSTERIOUS', 'LIGHT', 'NIMBLY', 'LEAPS', 'ASIDE', 'IT', 'APPROACHES', 'HANS', 'WHO', 'FIXES', 'HIS', 'BLUE', 'EYE', 'UPON', 'IT', 'STEADILY', 'IT', 'THREATENS', 'THE', 'HEAD', 'OF', 'MY', 'UNCLE', 'WHO', 'FALLS', 'UPON', 'HIS', 'KNEES', 'WITH', 'HIS', 'HEAD', 'DOWN', 'TO', 'AVOID', 'IT'] +260-123288-0027-259: ref=['A', 'SUFFOCATING', 'SMELL', 'OF', 'NITROGEN', 'FILLS', 'THE', 'AIR', 'IT', 'ENTERS', 'THE', 'THROAT', 'IT', 'FILLS', 'THE', 'LUNGS'] +260-123288-0027-259: hyp=['A', 'SUFFOCATING', 'SMELL', 'OF', 'NITROGEN', 'FILLS', 'THE', 'AIR', 'IT', 'ENTERS', 'THE', 'THROAT', 'IT', 'FILLS', 'THE', 'LUNGS'] +260-123288-0028-260: ref=['WE', 'SUFFER', 'STIFLING', 'PAINS'] +260-123288-0028-260: hyp=['WE', 'SUFFER', 'STIFLING', 'PAINS'] +260-123440-0000-179: ref=['AND', 'HOW', 'ODD', 'THE', 'DIRECTIONS', 'WILL', 'LOOK'] +260-123440-0000-179: hyp=['AND', 'HOW', 'ODD', 'THE', 'DIRECTIONS', 'WILL', 'LOOK'] +260-123440-0001-180: ref=['POOR', 'ALICE'] +260-123440-0001-180: hyp=['POOR', 'ALICE'] +260-123440-0002-181: ref=['IT', 'WAS', 'THE', 'WHITE', 'RABBIT', 'RETURNING', 'SPLENDIDLY', 'DRESSED', 'WITH', 'A', 'PAIR', 'OF', 'WHITE', 'KID', 'GLOVES', 'IN', 'ONE', 'HAND', 'AND', 'A', 'LARGE', 'FAN', 'IN', 'THE', 'OTHER', 'HE', 'CAME', 'TROTTING', 'ALONG', 'IN', 'A', 'GREAT', 'HURRY', 'MUTTERING', 'TO', 'HIMSELF', 'AS', 'HE', 'CAME', 'OH', 'THE', 'DUCHESS', 'THE', 'DUCHESS'] +260-123440-0002-181: hyp=['IT', 'WAS', 'THE', 'WHITE', 'RABBIT', 'RETURNING', 'SPLENDIDLY', 'DRESSED', 'WITH', 'A', 'PAIR', 'OF', 'WHITE', 'KID', 'GLOVES', 'IN', 'ONE', 'HAND', 'AND', 'A', 'LARGE', 'FAN', 'IN', 'THE', 'OTHER', 'HE', 'CAME', 'TROTTING', 'ALONG', 'IN', 'A', 'GREAT', 'HURRY', 'MUTTERING', 'TO', 'HIMSELF', 'AS', 'HE', 'CAME', 'OH', 'THE', 'DUCHESS', 'THE', 'DUCHESS'] +260-123440-0003-182: ref=['OH', "WON'T", 'SHE', 'BE', 'SAVAGE', 'IF', "I'VE", 'KEPT', 'HER', 'WAITING'] +260-123440-0003-182: hyp=['OH', "WON'T", 'SHE', 'BE', 'SAVAGE', 'IF', "I'VE", 'KEPT', 'HER', 'WAITING'] +260-123440-0004-183: ref=['ALICE', 'TOOK', 'UP', 'THE', 'FAN', 'AND', 'GLOVES', 'AND', 'AS', 'THE', 'HALL', 'WAS', 'VERY', 'HOT', 'SHE', 'KEPT', 'FANNING', 'HERSELF', 'ALL', 'THE', 'TIME', 'SHE', 'WENT', 'ON', 'TALKING', 'DEAR', 'DEAR', 'HOW', 'QUEER', 'EVERYTHING', 'IS', 'TO', 'DAY'] +260-123440-0004-183: hyp=['ALICE', 'TOOK', 'UP', 'THE', 'FAN', 'AND', 'GLOVES', 'AND', 'AS', 'THE', 'HALL', 'WAS', 'VERY', 'HOT', 'SHE', 'KEPT', 'FANNING', 'HERSELF', 'ALL', 'THE', 'TIME', 'SHE', 'WENT', 'ON', 'TALKING', 'DEAR', 'DEAR', 'HOW', 'QUEER', 'EVERYTHING', 'IS', 'TO', 'DAY'] +260-123440-0005-184: ref=['AND', 'YESTERDAY', 'THINGS', 'WENT', 'ON', 'JUST', 'AS', 'USUAL'] +260-123440-0005-184: hyp=['AND', 'YESTERDAY', 'THANKS', 'WENT', 'ON', 'JUST', 'AS', 'USUAL'] +260-123440-0006-185: ref=['I', 'WONDER', 'IF', "I'VE", 'BEEN', 'CHANGED', 'IN', 'THE', 'NIGHT'] +260-123440-0006-185: hyp=['I', 'WONDER', 'IF', "I'VE", 'BEEN', 'CHANGED', 'IN', 'THE', 'NIGHT'] +260-123440-0007-186: ref=['I', 'ALMOST', 'THINK', 'I', 'CAN', 'REMEMBER', 'FEELING', 'A', 'LITTLE', 'DIFFERENT'] +260-123440-0007-186: hyp=['I', 'ALMOST', 'THINK', 'I', 'CAN', 'REMEMBER', 'FEELING', 'LITTLE', 'DIFFERENT'] +260-123440-0008-187: ref=["I'LL", 'TRY', 'IF', 'I', 'KNOW', 'ALL', 'THE', 'THINGS', 'I', 'USED', 'TO', 'KNOW'] +260-123440-0008-187: hyp=["I'LL", 'TRY', 'IF', 'I', 'KNOW', 'ALL', 'THE', 'THINGS', 'I', 'USED', 'TO', 'KNOW'] +260-123440-0009-188: ref=['I', 'SHALL', 'NEVER', 'GET', 'TO', 'TWENTY', 'AT', 'THAT', 'RATE'] +260-123440-0009-188: hyp=['I', 'SHALL', 'NEVER', 'GET', 'TO', 'TWENTY', 'AT', 'THAT', 'RATE'] +260-123440-0010-189: ref=['HOW', 'CHEERFULLY', 'HE', 'SEEMS', 'TO', 'GRIN', 'HOW', 'NEATLY', 'SPREAD', 'HIS', 'CLAWS', 'AND', 'WELCOME', 'LITTLE', 'FISHES', 'IN', 'WITH', 'GENTLY', 'SMILING', 'JAWS'] +260-123440-0010-189: hyp=['HOW', 'CHEERFULLY', 'HE', 'SEEMS', 'TO', 'GRIN', 'HOW', 'NEATLY', 'SPREAD', 'HIS', 'CLAWS', 'AND', 'WELCOME', 'LITTLE', 'FISHES', 'IN', 'WITH', 'GENTLY', 'SMILING', 'JAWS'] +260-123440-0011-190: ref=['NO', "I'VE", 'MADE', 'UP', 'MY', 'MIND', 'ABOUT', 'IT', 'IF', "I'M", 'MABEL', "I'LL", 'STAY', 'DOWN', 'HERE'] +260-123440-0011-190: hyp=['NO', "I'VE", 'MADE', 'UP', 'MY', 'MIND', 'ABOUT', 'IT', 'IF', 'I', 'MAYBEL', "I'LL", 'STAY', 'DOWN', 'HERE'] +260-123440-0012-191: ref=["IT'LL", 'BE', 'NO', 'USE', 'THEIR', 'PUTTING', 'THEIR', 'HEADS', 'DOWN', 'AND', 'SAYING', 'COME', 'UP', 'AGAIN', 'DEAR'] +260-123440-0012-191: hyp=["IT'LL", 'BE', 'NO', 'USE', "THEY'RE", 'PUTTING', 'THEIR', 'HEADS', 'DOWN', 'AND', 'SAYING', 'COME', 'UP', 'AGAIN', 'DEAR'] +260-123440-0013-192: ref=['I', 'AM', 'SO', 'VERY', 'TIRED', 'OF', 'BEING', 'ALL', 'ALONE', 'HERE'] +260-123440-0013-192: hyp=['I', 'AM', 'SO', 'VERY', 'TIRED', 'OF', 'BEING', 'ALL', 'ALONE', 'HERE'] +260-123440-0014-193: ref=['AND', 'I', 'DECLARE', "IT'S", 'TOO', 'BAD', 'THAT', 'IT', 'IS'] +260-123440-0014-193: hyp=['AND', 'I', 'DECLARE', "IT'S", 'TOO', 'BAD', 'THAT', 'IT', 'IS'] +260-123440-0015-194: ref=['I', 'WISH', 'I', "HADN'T", 'CRIED', 'SO', 'MUCH', 'SAID', 'ALICE', 'AS', 'SHE', 'SWAM', 'ABOUT', 'TRYING', 'TO', 'FIND', 'HER', 'WAY', 'OUT'] +260-123440-0015-194: hyp=['I', 'WISH', 'I', "HADN'T", 'CRIED', 'SO', 'MUCH', 'SAID', 'ALICE', 'AS', 'SHE', 'SWAM', 'ABOUT', 'TRYING', 'TO', 'FIND', 'HER', 'WAY', 'OUT'] +260-123440-0016-195: ref=['I', 'SHALL', 'BE', 'PUNISHED', 'FOR', 'IT', 'NOW', 'I', 'SUPPOSE', 'BY', 'BEING', 'DROWNED', 'IN', 'MY', 'OWN', 'TEARS'] +260-123440-0016-195: hyp=['I', 'SHALL', 'BE', 'PUNISHED', 'FOR', 'IT', 'NOW', 'I', 'SUPPOSE', 'BY', 'BEING', 'DROWNED', 'IN', 'MY', 'OWN', 'TEARS'] +260-123440-0017-196: ref=['THAT', 'WILL', 'BE', 'A', 'QUEER', 'THING', 'TO', 'BE', 'SURE'] +260-123440-0017-196: hyp=['THAT', 'WILL', 'BE', 'A', 'QUEER', 'THING', 'TO', 'BE', 'SURE'] +260-123440-0018-197: ref=['I', 'AM', 'VERY', 'TIRED', 'OF', 'SWIMMING', 'ABOUT', 'HERE', 'O', 'MOUSE'] +260-123440-0018-197: hyp=['I', 'AM', 'VERY', 'TIRED', 'OF', 'SWIMMING', 'ABOUT', 'HERE', 'O', 'MOUSE'] +260-123440-0019-198: ref=['CRIED', 'ALICE', 'AGAIN', 'FOR', 'THIS', 'TIME', 'THE', 'MOUSE', 'WAS', 'BRISTLING', 'ALL', 'OVER', 'AND', 'SHE', 'FELT', 'CERTAIN', 'IT', 'MUST', 'BE', 'REALLY', 'OFFENDED'] +260-123440-0019-198: hyp=['CRIED', 'ALICE', 'AGAIN', 'FOR', 'THIS', 'TIME', 'THE', 'MOUSE', 'WAS', 'BRISTLING', 'ALL', 'OVER', 'AND', 'SHE', 'FELT', 'CERTAIN', 'IT', 'MUST', 'BE', 'REALLY', 'OFFENDED'] +260-123440-0020-199: ref=['WE', "WON'T", 'TALK', 'ABOUT', 'HER', 'ANY', 'MORE', 'IF', "YOU'D", 'RATHER', 'NOT', 'WE', 'INDEED'] +260-123440-0020-199: hyp=['WE', "WON'T", 'TALK', 'ABOUT', 'HER', 'ANY', 'MORE', 'IF', "YOU'D", 'RATHER', 'NOT', 'WE', 'INDEED'] +2830-3979-0000-1120: ref=['WE', 'WANT', 'YOU', 'TO', 'HELP', 'US', 'PUBLISH', 'SOME', 'LEADING', 'WORK', 'OF', "LUTHER'S", 'FOR', 'THE', 'GENERAL', 'AMERICAN', 'MARKET', 'WILL', 'YOU', 'DO', 'IT'] +2830-3979-0000-1120: hyp=['WE', 'WANT', 'YOU', 'TO', 'HELP', 'US', 'PUBLISH', 'SOME', 'LEADING', 'WORK', 'OF', 'LUTHERS', 'FOR', 'THE', 'GENERAL', 'AMERICAN', 'MARKET', 'WILL', 'YOU', 'DO', 'IT'] +2830-3979-0001-1121: ref=['THE', 'CONDITION', 'IS', 'THAT', 'I', 'WILL', 'BE', 'PERMITTED', 'TO', 'MAKE', 'LUTHER', 'TALK', 'AMERICAN', 'STREAMLINE', 'HIM', 'SO', 'TO', 'SPEAK', 'BECAUSE', 'YOU', 'WILL', 'NEVER', 'GET', 'PEOPLE', 'WHETHER', 'IN', 'OR', 'OUTSIDE', 'THE', 'LUTHERAN', 'CHURCH', 'ACTUALLY', 'TO', 'READ', 'LUTHER', 'UNLESS', 'WE', 'MAKE', 'HIM', 'TALK', 'AS', 'HE', 'WOULD', 'TALK', 'TODAY', 'TO', 'AMERICANS'] +2830-3979-0001-1121: hyp=['THE', 'CONDITION', 'IS', 'THAT', 'I', 'WILL', 'BE', 'PERMITTED', 'TO', 'MAKE', 'LUTHER', 'TALK', 'AMERICAN', 'STREAM', 'LINE', 'HIM', 'SO', 'TO', 'SPEAK', 'BECAUSE', 'YOU', 'WILL', 'NEVER', 'GET', 'PEOPLE', 'WHETHER', 'IN', 'OR', 'OUTSIDE', 'THE', 'LUTHERAN', 'CHURCH', 'ACTUALLY', 'TO', 'READ', 'LUTHER', 'UNLESS', 'WE', 'MAKE', 'HIM', 'TALK', 'AS', 'HE', 'WOULD', 'TALK', 'TO', 'DAY', 'TO', 'AMERICANS'] +2830-3979-0002-1122: ref=['LET', 'US', 'BEGIN', 'WITH', 'THAT', 'HIS', 'COMMENTARY', 'ON', 'GALATIANS'] +2830-3979-0002-1122: hyp=['LET', 'US', 'BEGIN', 'WITH', 'THAT', 'HIS', 'COMMENTARY', 'ONGOLATIONS'] +2830-3979-0003-1123: ref=['THE', 'UNDERTAKING', 'WHICH', 'SEEMED', 'SO', 'ATTRACTIVE', 'WHEN', 'VIEWED', 'AS', 'A', 'LITERARY', 'TASK', 'PROVED', 'A', 'MOST', 'DIFFICULT', 'ONE', 'AND', 'AT', 'TIMES', 'BECAME', 'OPPRESSIVE'] +2830-3979-0003-1123: hyp=['THE', 'UNDERTAKING', 'WHICH', 'SEEMED', 'SO', 'ATTRACTIVE', 'WHEN', 'VIEWED', 'AS', 'A', 'LITERARY', 'TASK', 'PROVED', 'A', 'MOST', 'DIFFICULT', 'ONE', 'AND', 'AT', 'TIMES', 'BECAME', 'OPPRESSIVE'] +2830-3979-0004-1124: ref=['IT', 'WAS', 'WRITTEN', 'IN', 'LATIN'] +2830-3979-0004-1124: hyp=['IT', 'WAS', 'WRITTEN', 'IN', 'LATIN'] +2830-3979-0005-1125: ref=['THE', 'WORK', 'HAD', 'TO', 'BE', 'CONDENSED'] +2830-3979-0005-1125: hyp=['THE', 'WORK', 'HAD', 'TO', 'BE', 'CONDENSED'] +2830-3979-0006-1126: ref=['A', 'WORD', 'SHOULD', 'NOW', 'BE', 'SAID', 'ABOUT', 'THE', 'ORIGIN', 'OF', "LUTHER'S", 'COMMENTARY', 'ON', 'GALATIANS'] +2830-3979-0006-1126: hyp=['A', 'WORD', 'SHOULD', 'NOW', 'BE', 'SAID', 'ABOUT', 'THE', 'ORIGIN', 'OF', "LUTHER'S", 'COMMENTARY', 'ANGULATIONS'] +2830-3979-0007-1127: ref=['MUCH', 'LATER', 'WHEN', 'A', 'FRIEND', 'OF', 'HIS', 'WAS', 'PREPARING', 'AN', 'EDITION', 'OF', 'ALL', 'HIS', 'LATIN', 'WORKS', 'HE', 'REMARKED', 'TO', 'HIS', 'HOME', 'CIRCLE', 'IF', 'I', 'HAD', 'MY', 'WAY', 'ABOUT', 'IT', 'THEY', 'WOULD', 'REPUBLISH', 'ONLY', 'THOSE', 'OF', 'MY', 'BOOKS', 'WHICH', 'HAVE', 'DOCTRINE', 'MY', 'GALATIANS', 'FOR', 'INSTANCE'] +2830-3979-0007-1127: hyp=['MUCH', 'LATER', 'WHEN', 'A', 'FRIEND', 'OF', 'HIS', 'WAS', 'PREPARING', 'AN', 'ADDITION', 'OF', 'ALL', 'HIS', 'LATIN', 'WORKS', 'HE', 'REMARKED', 'TO', 'HIS', 'HOME', 'CIRCLE', 'IF', 'I', 'HAD', 'MY', 'WAY', 'ABOUT', 'IT', 'THEY', 'WOULD', 'REPUBLISH', 'ONLY', 'THOSE', 'OF', 'MY', 'BOOKS', 'WHICH', 'HAVE', 'DOCTRINE', 'MIGALLATIONS', 'FOR', 'INSTANCE'] +2830-3979-0008-1128: ref=['IN', 'OTHER', 'WORDS', 'THESE', 'THREE', 'MEN', 'TOOK', 'DOWN', 'THE', 'LECTURES', 'WHICH', 'LUTHER', 'ADDRESSED', 'TO', 'HIS', 'STUDENTS', 'IN', 'THE', 'COURSE', 'OF', 'GALATIANS', 'AND', 'ROERER', 'PREPARED', 'THE', 'MANUSCRIPT', 'FOR', 'THE', 'PRINTER'] +2830-3979-0008-1128: hyp=['IN', 'OTHER', 'WORDS', 'THESE', 'THREE', 'MEN', 'TOOK', 'DOWN', 'THE', 'LECTURES', 'WHICH', 'LUTHER', 'ADDRESSED', 'TO', 'HIS', 'STUDENTS', 'IN', 'THE', 'COURSE', 'OF', 'GALATIANS', 'AND', 'ROAR', 'PREPARED', 'THE', 'MANUSCRIPT', 'FOR', 'THE', 'PRINTER'] +2830-3979-0009-1129: ref=['IT', 'PRESENTS', 'LIKE', 'NO', 'OTHER', 'OF', "LUTHER'S", 'WRITINGS', 'THE', 'CENTRAL', 'THOUGHT', 'OF', 'CHRISTIANITY', 'THE', 'JUSTIFICATION', 'OF', 'THE', 'SINNER', 'FOR', 'THE', 'SAKE', 'OF', "CHRIST'S", 'MERITS', 'ALONE'] +2830-3979-0009-1129: hyp=['IT', 'PRESENTS', 'LIKE', 'NO', 'OTHER', 'OF', "LUTHER'S", 'WRITINGS', 'THE', 'CENTRAL', 'THOUGHT', 'OF', 'CHRISTIANITY', 'THE', 'JUSTIFICATION', 'OF', 'THE', 'SINNER', 'FOR', 'THE', 'SAKE', 'OF', "CHRIST'S", 'MERITS', 'ALONE'] +2830-3979-0010-1130: ref=['BUT', 'THE', 'ESSENCE', 'OF', "LUTHER'S", 'LECTURES', 'IS', 'THERE'] +2830-3979-0010-1130: hyp=['BUT', 'THE', 'ESSENCE', 'OF', "LUTHER'S", 'LECTURES', 'IS', 'THERE'] +2830-3979-0011-1131: ref=['THE', 'LORD', 'WHO', 'HAS', 'GIVEN', 'US', 'POWER', 'TO', 'TEACH', 'AND', 'TO', 'HEAR', 'LET', 'HIM', 'ALSO', 'GIVE', 'US', 'THE', 'POWER', 'TO', 'SERVE', 'AND', 'TO', 'DO', 'LUKE', 'TWO'] +2830-3979-0011-1131: hyp=['THE', 'LORD', 'WHO', 'HAS', 'GIVEN', 'US', 'POWER', 'TO', 'TEACH', 'AND', 'TO', 'HEAR', 'LET', 'HIM', 'ALSO', 'GIVE', 'US', 'THE', 'POWER', 'TO', 'SERVE', 'AND', 'TO', 'DO', 'LUKE', 'TOO'] +2830-3979-0012-1132: ref=['THE', 'WORD', 'OF', 'OUR', 'GOD', 'SHALL', 'STAND', 'FOREVER'] +2830-3979-0012-1132: hyp=['THE', 'WORD', 'OF', 'OUR', 'GOD', 'SHALL', 'STAND', 'FOR', 'EVER'] +2830-3980-0000-1043: ref=['IN', 'EVERY', 'WAY', 'THEY', 'SOUGHT', 'TO', 'UNDERMINE', 'THE', 'AUTHORITY', 'OF', 'SAINT', 'PAUL'] +2830-3980-0000-1043: hyp=['IN', 'EVERY', 'WAY', 'THEY', 'SOUGHT', 'TO', 'UNDERMINE', 'THE', 'AUTHORITY', 'OF', 'SAINT', 'PAUL'] +2830-3980-0001-1044: ref=['THEY', 'SAID', 'TO', 'THE', 'GALATIANS', 'YOU', 'HAVE', 'NO', 'RIGHT', 'TO', 'THINK', 'HIGHLY', 'OF', 'PAUL'] +2830-3980-0001-1044: hyp=['THEY', 'SAID', 'TO', 'THE', 'GALATIANS', 'YOU', 'HAVE', 'NO', 'RIGHT', 'TO', 'THINK', 'HIGHLY', 'OF', 'PAUL'] +2830-3980-0002-1045: ref=['HE', 'WAS', 'THE', 'LAST', 'TO', 'TURN', 'TO', 'CHRIST'] +2830-3980-0002-1045: hyp=['HE', 'WAS', 'THE', 'LAST', 'TO', 'TURN', 'TO', 'CHRIST'] +2830-3980-0003-1046: ref=['PAUL', 'CAME', 'LATER', 'AND', 'IS', 'BENEATH', 'US'] +2830-3980-0003-1046: hyp=['PAUL', 'CAME', 'LATER', 'IN', 'HIS', 'BENEATH', 'US'] +2830-3980-0004-1047: ref=['INDEED', 'HE', 'PERSECUTED', 'THE', 'CHURCH', 'OF', 'CHRIST', 'FOR', 'A', 'LONG', 'TIME'] +2830-3980-0004-1047: hyp=['INDEED', 'HE', 'PERSECUTED', 'THE', 'CHURCH', 'OF', 'CHRIST', 'FOR', 'A', 'LONG', 'TIME'] +2830-3980-0005-1048: ref=['DO', 'YOU', 'SUPPOSE', 'THAT', 'GOD', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'FEW', 'LUTHERAN', 'HERETICS', 'WOULD', 'DISOWN', 'HIS', 'ENTIRE', 'CHURCH'] +2830-3980-0005-1048: hyp=['DO', 'YOU', 'SUPPOSE', 'THAT', 'GOD', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'FEW', 'LUTHERAN', 'HERETICS', 'WOULD', 'DISOWN', 'HIS', 'ENTIRE', 'CHURCH'] +2830-3980-0006-1049: ref=['AGAINST', 'THESE', 'BOASTING', 'FALSE', 'APOSTLES', 'PAUL', 'BOLDLY', 'DEFENDS', 'HIS', 'APOSTOLIC', 'AUTHORITY', 'AND', 'MINISTRY'] +2830-3980-0006-1049: hyp=['AGAINST', 'THESE', 'BOASTING', 'FALSE', 'APOSTLES', 'PAUL', 'BOLDLY', 'DEFENDS', 'HIS', 'APOSTOLIC', 'AUTHORITY', 'AND', 'MINISTRY'] +2830-3980-0007-1050: ref=['AS', 'THE', 'AMBASSADOR', 'OF', 'A', 'GOVERNMENT', 'IS', 'HONORED', 'FOR', 'HIS', 'OFFICE', 'AND', 'NOT', 'FOR', 'HIS', 'PRIVATE', 'PERSON', 'SO', 'THE', 'MINISTER', 'OF', 'CHRIST', 'SHOULD', 'EXALT', 'HIS', 'OFFICE', 'IN', 'ORDER', 'TO', 'GAIN', 'AUTHORITY', 'AMONG', 'MEN'] +2830-3980-0007-1050: hyp=['AS', 'THE', 'AMBASSADOR', 'OF', 'A', 'GOVERNMENT', 'IS', 'HONORED', 'FOR', 'HIS', 'OFFICE', 'AND', 'NOT', 'FOR', 'HIS', 'PRIVATE', 'PERSON', 'SO', 'THE', 'MINISTER', 'OF', 'CHRIST', 'SHOULD', 'EXALT', 'HIS', 'OFFICE', 'IN', 'ORDER', 'TO', 'GAIN', 'AUTHORITY', 'AMONG', 'MEN'] +2830-3980-0008-1051: ref=['PAUL', 'TAKES', 'PRIDE', 'IN', 'HIS', 'MINISTRY', 'NOT', 'TO', 'HIS', 'OWN', 'PRAISE', 'BUT', 'TO', 'THE', 'PRAISE', 'OF', 'GOD'] +2830-3980-0008-1051: hyp=['PAUL', 'TAKES', 'PRIDE', 'IN', 'HIS', 'MINISTRY', 'NOT', 'TO', 'HIS', 'OWN', 'PHRASE', 'BUT', 'TO', 'THE', 'PRAISE', 'OF', 'GOD'] +2830-3980-0009-1052: ref=['PAUL', 'AN', 'APOSTLE', 'NOT', 'OF', 'MEN', 'ET', 'CETERA'] +2830-3980-0009-1052: hyp=['PAUL', 'AN', 'APOSTLE', 'NOT', 'OF', 'MEN', 'ET', 'CETERA'] +2830-3980-0010-1053: ref=['EITHER', 'HE', 'CALLS', 'MINISTERS', 'THROUGH', 'THE', 'AGENCY', 'OF', 'MEN', 'OR', 'HE', 'CALLS', 'THEM', 'DIRECTLY', 'AS', 'HE', 'CALLED', 'THE', 'PROPHETS', 'AND', 'APOSTLES'] +2830-3980-0010-1053: hyp=['EITHER', 'HE', 'CALLS', 'MINISTERS', 'THROUGH', 'THE', 'AGENCY', 'OF', 'MEN', 'OR', 'HE', 'CALLS', 'THEM', 'DIRECTLY', 'AS', 'HE', 'CALLED', 'THE', 'PROPHETS', 'AND', 'APOSTLES'] +2830-3980-0011-1054: ref=['PAUL', 'DECLARES', 'THAT', 'THE', 'FALSE', 'APOSTLES', 'WERE', 'CALLED', 'OR', 'SENT', 'NEITHER', 'BY', 'MEN', 'NOR', 'BY', 'MAN'] +2830-3980-0011-1054: hyp=['PAUL', 'DECLARES', 'THAT', 'THE', 'FALSE', 'APOSTLES', 'RECALL', 'THEIR', 'SCENT', 'NEITHER', 'BY', 'MEN', 'NOR', 'BY', 'MAN'] +2830-3980-0012-1055: ref=['THE', 'MOST', 'THEY', 'COULD', 'CLAIM', 'IS', 'THAT', 'THEY', 'WERE', 'SENT', 'BY', 'OTHERS'] +2830-3980-0012-1055: hyp=['THE', 'MOST', 'THEY', 'COULD', 'CLAIM', 'IS', 'THAT', 'THEY', 'WERE', 'SENT', 'BY', 'OTHERS'] +2830-3980-0013-1056: ref=['HE', 'MENTIONS', 'THE', 'APOSTLES', 'FIRST', 'BECAUSE', 'THEY', 'WERE', 'APPOINTED', 'DIRECTLY', 'BY', 'GOD'] +2830-3980-0013-1056: hyp=['HE', 'MENTIONS', 'THE', 'APOSTLES', 'FIRST', 'BECAUSE', 'THEY', 'WERE', 'APPOINTED', 'DIRECTLY', 'BY', 'GOD'] +2830-3980-0014-1057: ref=['THE', 'CALL', 'IS', 'NOT', 'TO', 'BE', 'TAKEN', 'LIGHTLY'] +2830-3980-0014-1057: hyp=['THE', 'CALL', 'IS', 'NOT', 'TO', 'BE', 'TAKEN', 'LIGHTLY'] +2830-3980-0015-1058: ref=['FOR', 'A', 'PERSON', 'TO', 'POSSESS', 'KNOWLEDGE', 'IS', 'NOT', 'ENOUGH'] +2830-3980-0015-1058: hyp=['FOR', 'A', 'PERSON', 'TO', 'POSSESS', 'KNOWLEDGE', 'IS', 'NOT', 'ENOUGH'] +2830-3980-0016-1059: ref=['IT', 'SPOILS', "ONE'S", 'BEST', 'WORK'] +2830-3980-0016-1059: hyp=['IT', 'SPOILS', "ONE'S", 'BEST', 'WORK'] +2830-3980-0017-1060: ref=['WHEN', 'I', 'WAS', 'A', 'YOUNG', 'MAN', 'I', 'THOUGHT', 'PAUL', 'WAS', 'MAKING', 'TOO', 'MUCH', 'OF', 'HIS', 'CALL'] +2830-3980-0017-1060: hyp=['WHEN', 'I', 'WAS', 'A', 'YOUNG', 'MAN', 'I', 'THOUGHT', 'PAUL', 'WAS', 'MAKING', 'TOO', 'MUCH', 'OF', 'HIS', 'CALL'] +2830-3980-0018-1061: ref=['I', 'DID', 'NOT', 'THEN', 'REALIZE', 'THE', 'IMPORTANCE', 'OF', 'THE', 'MINISTRY'] +2830-3980-0018-1061: hyp=['I', 'DID', 'NOT', 'THEN', 'REALIZE', 'THE', 'IMPORTANCE', 'OF', 'THE', 'MINISTRY'] +2830-3980-0019-1062: ref=['I', 'KNEW', 'NOTHING', 'OF', 'THE', 'DOCTRINE', 'OF', 'FAITH', 'BECAUSE', 'WE', 'WERE', 'TAUGHT', 'SOPHISTRY', 'INSTEAD', 'OF', 'CERTAINTY', 'AND', 'NOBODY', 'UNDERSTOOD', 'SPIRITUAL', 'BOASTING'] +2830-3980-0019-1062: hyp=['I', 'KNEW', 'NOTHING', 'OF', 'THE', 'DOCTRINE', 'OF', 'FAITH', 'BECAUSE', 'WE', 'WERE', 'TAUGHT', 'SOPHISTRY', 'INSTEAD', 'OF', 'CERTAINTY', 'AND', 'NOBODY', 'UNDERSTOOD', 'SPIRITUAL', 'BOASTING'] +2830-3980-0020-1063: ref=['THIS', 'IS', 'NO', 'SINFUL', 'PRIDE', 'IT', 'IS', 'HOLY', 'PRIDE'] +2830-3980-0020-1063: hyp=['THIS', 'IS', 'NO', 'SINFUL', 'PRIDE', 'IT', 'IS', 'WHOLLY', 'PRIDE'] +2830-3980-0021-1064: ref=['AND', 'GOD', 'THE', 'FATHER', 'WHO', 'RAISED', 'HIM', 'FROM', 'THE', 'DEAD'] +2830-3980-0021-1064: hyp=['AND', 'GOD', 'THE', 'FATHER', 'WHO', 'RAISED', 'HIM', 'FROM', 'THE', 'DEAD'] +2830-3980-0022-1065: ref=['THE', 'CLAUSE', 'SEEMS', 'SUPERFLUOUS', 'ON', 'FIRST', 'SIGHT'] +2830-3980-0022-1065: hyp=['THE', 'CLAWS', 'SEEMED', 'SUPERVOUS', 'ON', 'FIRST', 'SIGHT'] +2830-3980-0023-1066: ref=['THESE', 'PERVERTERS', 'OF', 'THE', 'RIGHTEOUSNESS', 'OF', 'CHRIST', 'RESIST', 'THE', 'FATHER', 'AND', 'THE', 'SON', 'AND', 'THE', 'WORKS', 'OF', 'THEM', 'BOTH'] +2830-3980-0023-1066: hyp=['THESE', 'PERVERTERS', 'OF', 'THE', 'RIGHTEOUSNESS', 'OF', 'CHRIST', 'RESIST', 'THE', 'FATHER', 'AND', 'THE', 'SON', 'AND', 'THE', 'WORKS', 'OF', 'THEM', 'BOTH'] +2830-3980-0024-1067: ref=['IN', 'THIS', 'WHOLE', 'EPISTLE', 'PAUL', 'TREATS', 'OF', 'THE', 'RESURRECTION', 'OF', 'CHRIST'] +2830-3980-0024-1067: hyp=['IN', 'THIS', 'WHOLE', 'EPISTLE', 'PAUL', 'TREATS', 'OF', 'THE', 'RESURRECTION', 'OF', 'CHRIST'] +2830-3980-0025-1068: ref=['BY', 'HIS', 'RESURRECTION', 'CHRIST', 'WON', 'THE', 'VICTORY', 'OVER', 'LAW', 'SIN', 'FLESH', 'WORLD', 'DEVIL', 'DEATH', 'HELL', 'AND', 'EVERY', 'EVIL'] +2830-3980-0025-1068: hyp=['BY', 'HIS', 'RESURRECTION', 'CHRIST', 'WON', 'THE', 'VICTORY', 'OVER', 'LAW', 'SIN', 'FLESH', 'WORLD', 'DEVIL', 'DEATH', 'HELL', 'AND', 'EVERY', 'EVIL'] +2830-3980-0026-1069: ref=['VERSE', 'TWO'] +2830-3980-0026-1069: hyp=['FIRST', 'TWO'] +2830-3980-0027-1070: ref=['AND', 'ALL', 'THE', 'BRETHREN', 'WHICH', 'ARE', 'WITH', 'ME'] +2830-3980-0027-1070: hyp=['AND', 'ALL', 'THE', 'BRETHREN', 'WHICH', 'ARE', 'WITH', 'ME'] +2830-3980-0028-1071: ref=['THIS', 'SHOULD', 'GO', 'FAR', 'IN', 'SHUTTING', 'THE', 'MOUTHS', 'OF', 'THE', 'FALSE', 'APOSTLES'] +2830-3980-0028-1071: hyp=['THIS', 'SHOULD', 'GO', 'FAR', 'IN', 'SHUTTING', 'THE', 'MOUTHS', 'OF', 'THE', 'FALSE', 'APOSTLES'] +2830-3980-0029-1072: ref=['ALTHOUGH', 'THE', 'BRETHREN', 'WITH', 'ME', 'ARE', 'NOT', 'APOSTLES', 'LIKE', 'MYSELF', 'YET', 'THEY', 'ARE', 'ALL', 'OF', 'ONE', 'MIND', 'WITH', 'ME', 'THINK', 'WRITE', 'AND', 'TEACH', 'AS', 'I', 'DO'] +2830-3980-0029-1072: hyp=['ALTHOUGH', 'THE', 'BRETHREN', 'WITH', 'ME', 'ARE', 'NOT', 'APOSTLES', 'LIKE', 'MYSELF', 'YET', 'THEY', 'ARE', 'ALL', 'OF', 'ONE', 'MIND', 'WITH', 'ME', 'THINK', 'WRITE', 'AND', 'TEACH', 'AS', 'I', 'DO'] +2830-3980-0030-1073: ref=['THEY', 'DO', 'NOT', 'GO', 'WHERE', 'THE', 'ENEMIES', 'OF', 'THE', 'GOSPEL', 'PREDOMINATE', 'THEY', 'GO', 'WHERE', 'THE', 'CHRISTIANS', 'ARE'] +2830-3980-0030-1073: hyp=['THEY', 'DO', 'NOT', 'GO', 'WHERE', 'THE', 'ENEMIES', 'OF', 'THE', 'GOSPEL', 'PREDOMINATE', 'THEY', 'GO', 'WITH', 'THE', 'CHRISTIANS', 'ARE'] +2830-3980-0031-1074: ref=['WHY', 'DO', 'THEY', 'NOT', 'INVADE', 'THE', 'CATHOLIC', 'PROVINCES', 'AND', 'PREACH', 'THEIR', 'DOCTRINE', 'TO', 'GODLESS', 'PRINCES', 'BISHOPS', 'AND', 'DOCTORS', 'AS', 'WE', 'HAVE', 'DONE', 'BY', 'THE', 'HELP', 'OF', 'GOD'] +2830-3980-0031-1074: hyp=['WHY', 'DO', 'THEY', 'NOT', 'INVADE', 'THE', 'CATHOLIC', 'PROVINCES', 'AND', 'PREACH', 'THEIR', 'DOCTRINE', 'TO', 'GODLESS', 'PRINCES', 'BISHOPS', 'AND', 'DOCTORS', 'AS', 'WE', 'HAVE', 'DONE', 'BY', 'THE', 'HELP', 'OF', 'GOD'] +2830-3980-0032-1075: ref=['WE', 'LOOK', 'FOR', 'THAT', 'REWARD', 'WHICH', 'EYE', 'HATH', 'NOT', 'SEEN', 'NOR', 'EAR', 'HEARD', 'NEITHER', 'HATH', 'ENTERED', 'INTO', 'THE', 'HEART', 'OF', 'MAN'] +2830-3980-0032-1075: hyp=['WE', 'LOOK', 'FOR', 'THAT', 'REWARD', 'WHICH', 'I', 'HATH', 'NOT', 'SEEN', 'NOR', 'EAR', 'HEARD', 'NEITHER', 'HATH', 'ENTERED', 'INTO', 'THE', 'HEART', 'OF', 'MAN'] +2830-3980-0033-1076: ref=['NOT', 'ALL', 'THE', 'GALATIANS', 'HAD', 'BECOME', 'PERVERTED'] +2830-3980-0033-1076: hyp=['NOT', 'ALL', 'THE', 'GALATIANS', 'HAD', 'BECOME', 'PERVERTED'] +2830-3980-0034-1077: ref=['THESE', 'MEANS', 'CANNOT', 'BE', 'CONTAMINATED'] +2830-3980-0034-1077: hyp=['THESE', 'MEANS', 'CANNOT', 'BE', 'CONTAMINATED'] +2830-3980-0035-1078: ref=['THEY', 'REMAIN', 'DIVINE', 'REGARDLESS', 'OF', "MEN'S", 'OPINION'] +2830-3980-0035-1078: hyp=['THEY', 'REMAINED', 'DIVINE', 'REGARDLESS', 'OF', "MEN'S", 'OPINION'] +2830-3980-0036-1079: ref=['WHEREVER', 'THE', 'MEANS', 'OF', 'GRACE', 'ARE', 'FOUND', 'THERE', 'IS', 'THE', 'HOLY', 'CHURCH', 'EVEN', 'THOUGH', 'ANTICHRIST', 'REIGNS', 'THERE'] +2830-3980-0036-1079: hyp=['WHEREVER', 'THE', 'MEANS', 'OF', 'GRACE', 'ARE', 'FOUND', 'THERE', 'IS', 'THE', 'HOLY', 'CHURCH', 'EVEN', 'THOUGH', 'ANTICHRIST', 'REIGNS', 'THERE'] +2830-3980-0037-1080: ref=['SO', 'MUCH', 'FOR', 'THE', 'TITLE', 'OF', 'THE', 'EPISTLE', 'NOW', 'FOLLOWS', 'THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'VERSE', 'THREE'] +2830-3980-0037-1080: hyp=['SO', 'MUCH', 'FOR', 'THE', 'TITLE', 'OF', 'THE', 'EPISTLE', 'NOW', 'FOLLOWS', 'THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'VERSE', 'THREE'] +2830-3980-0038-1081: ref=['GRACE', 'BE', 'TO', 'YOU', 'AND', 'PEACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0038-1081: hyp=['GRACE', 'BE', 'TO', 'YOU', 'IN', 'PEACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0039-1082: ref=['THE', 'TERMS', 'OF', 'GRACE', 'AND', 'PEACE', 'ARE', 'COMMON', 'TERMS', 'WITH', 'PAUL', 'AND', 'ARE', 'NOW', 'PRETTY', 'WELL', 'UNDERSTOOD'] +2830-3980-0039-1082: hyp=['THE', 'TERMS', 'OF', 'GRACE', 'AND', 'PEACE', 'ARE', 'COMMON', 'TERMS', 'WITH', 'PAUL', 'AND', 'ARE', 'NOW', 'PRETTY', 'WELL', 'UNDERSTOOD'] +2830-3980-0040-1083: ref=['THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'IS', 'REFRESHING'] +2830-3980-0040-1083: hyp=['THE', 'GREETING', 'OF', 'THE', 'APOSTLE', 'IS', 'REFRESHING'] +2830-3980-0041-1084: ref=['GRACE', 'INVOLVES', 'THE', 'REMISSION', 'OF', 'SINS', 'PEACE', 'AND', 'A', 'HAPPY', 'CONSCIENCE'] +2830-3980-0041-1084: hyp=['GRACE', 'INVOLVES', 'THE', 'REMISSION', 'OF', 'SINS', 'PEACE', 'AND', 'A', 'HAPPY', 'CONSCIENCE'] +2830-3980-0042-1085: ref=['THE', 'WORLD', 'BRANDS', 'THIS', 'A', 'PERNICIOUS', 'DOCTRINE'] +2830-3980-0042-1085: hyp=['THE', 'WORLD', 'BRAINS', 'THIS', 'A', 'PERNICIOUS', 'DOCTRINE'] +2830-3980-0043-1086: ref=['EXPERIENCE', 'PROVES', 'THIS'] +2830-3980-0043-1086: hyp=['EXPERIENCE', 'PROVES', 'THIS'] +2830-3980-0044-1087: ref=['HOWEVER', 'THE', 'GRACE', 'AND', 'PEACE', 'OF', 'GOD', 'WILL'] +2830-3980-0044-1087: hyp=['HOWEVER', 'THE', 'GRACE', 'AND', 'PEACE', 'OF', 'GOD', 'WILL'] +2830-3980-0045-1088: ref=['MEN', 'SHOULD', 'NOT', 'SPECULATE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0045-1088: hyp=['MEN', 'SHOULD', 'NOT', 'SPECULATE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0046-1089: ref=['WAS', 'IT', 'NOT', 'ENOUGH', 'TO', 'SAY', 'FROM', 'GOD', 'THE', 'FATHER'] +2830-3980-0046-1089: hyp=['WAS', 'IT', 'NOT', 'ENOUGH', 'TO', 'SAY', 'FROM', 'GOD', 'THE', 'FATHER'] +2830-3980-0047-1090: ref=['TO', 'DO', 'SO', 'IS', 'TO', 'LOSE', 'GOD', 'ALTOGETHER', 'BECAUSE', 'GOD', 'BECOMES', 'INTOLERABLE', 'WHEN', 'WE', 'SEEK', 'TO', 'MEASURE', 'AND', 'TO', 'COMPREHEND', 'HIS', 'INFINITE', 'MAJESTY'] +2830-3980-0047-1090: hyp=['TO', 'DO', 'SO', 'IS', 'TO', 'LOSE', 'GOD', 'ALTOGETHER', 'BECAUSE', 'GOD', 'BECOMES', 'INTOLERABLE', 'WHEN', 'WE', 'SEEK', 'TO', 'MEASURE', 'AND', 'TO', 'COMPREHEND', 'HIS', 'INFINITE', 'MAJESTY'] +2830-3980-0048-1091: ref=['HE', 'CAME', 'DOWN', 'TO', 'EARTH', 'LIVED', 'AMONG', 'MEN', 'SUFFERED', 'WAS', 'CRUCIFIED', 'AND', 'THEN', 'HE', 'DIED', 'STANDING', 'CLEARLY', 'BEFORE', 'US', 'SO', 'THAT', 'OUR', 'HEARTS', 'AND', 'EYES', 'MAY', 'FASTEN', 'UPON', 'HIM'] +2830-3980-0048-1091: hyp=['HE', 'CAME', 'DOWN', 'TO', 'EARTH', 'LIVED', 'AMONG', 'MEN', 'SUFFERED', 'WAS', 'CRUCIFIED', 'AND', 'THEN', 'HE', 'DIED', 'STANDING', 'CLEARLY', 'BEFORE', 'US', 'SO', 'THAT', 'OUR', 'HEARTS', 'AND', 'EYES', 'MAY', 'FASTEN', 'UPON', 'HIM'] +2830-3980-0049-1092: ref=['EMBRACE', 'HIM', 'AND', 'FORGET', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0049-1092: hyp=['EMBRACE', 'HIM', 'AND', 'FORGET', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD'] +2830-3980-0050-1093: ref=['DID', 'NOT', 'CHRIST', 'HIMSELF', 'SAY', 'I', 'AM', 'THE', 'WAY', 'AND', 'THE', 'TRUTH', 'AND', 'THE', 'LIFE', 'NO', 'MAN', 'COMETH', 'UNTO', 'THE', 'FATHER', 'BUT', 'BY', 'ME'] +2830-3980-0050-1093: hyp=['DID', 'NOT', 'CHRIST', 'HIMSELF', 'SAY', 'I', 'AM', 'THE', 'WAY', 'AND', 'THE', 'TRUTH', 'AND', 'THE', 'LIFE', 'NO', 'MAN', 'COMETH', 'UNTO', 'THE', 'FATHER', 'BUT', 'BY', 'ME'] +2830-3980-0051-1094: ref=['WHEN', 'YOU', 'ARGUE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD', 'APART', 'FROM', 'THE', 'QUESTION', 'OF', 'JUSTIFICATION', 'YOU', 'MAY', 'BE', 'AS', 'PROFOUND', 'AS', 'YOU', 'LIKE'] +2830-3980-0051-1094: hyp=['WHEN', 'YOU', 'ARGUE', 'ABOUT', 'THE', 'NATURE', 'OF', 'GOD', 'APART', 'FROM', 'THE', 'QUESTION', 'OF', 'JUSTIFICATION', 'YOU', 'MAY', 'BE', 'AS', 'PROFOUND', 'AS', 'YOU', 'LIKE'] +2830-3980-0052-1095: ref=['WE', 'ARE', 'TO', 'HEAR', 'CHRIST', 'WHO', 'HAS', 'BEEN', 'APPOINTED', 'BY', 'THE', 'FATHER', 'AS', 'OUR', 'DIVINE', 'TEACHER'] +2830-3980-0052-1095: hyp=['WE', 'ARE', 'TO', 'HEAR', 'CHRIST', 'WHO', 'HAS', 'BEEN', 'APPOINTED', 'BY', 'THE', 'FATHER', 'AS', 'OUR', 'DIVINE', 'TEACHER'] +2830-3980-0053-1096: ref=['AT', 'THE', 'SAME', 'TIME', 'PAUL', 'CONFIRMS', 'OUR', 'CREED', 'THAT', 'CHRIST', 'IS', 'VERY', 'GOD'] +2830-3980-0053-1096: hyp=['AT', 'THE', 'SAME', 'TIME', 'PAUL', 'CONFIRMS', 'OUR', 'CREED', 'THAT', 'CHRIST', 'IS', 'VERY', 'GOD'] +2830-3980-0054-1097: ref=['THAT', 'CHRIST', 'IS', 'VERY', 'GOD', 'IS', 'APPARENT', 'IN', 'THAT', 'PAUL', 'ASCRIBES', 'TO', 'HIM', 'DIVINE', 'POWERS', 'EQUALLY', 'WITH', 'THE', 'FATHER', 'AS', 'FOR', 'INSTANCE', 'THE', 'POWER', 'TO', 'DISPENSE', 'GRACE', 'AND', 'PEACE'] +2830-3980-0054-1097: hyp=['THAT', 'CHRIST', 'IS', 'VERY', 'GOD', 'IS', 'APPARENT', 'IN', 'THAT', 'PAUL', 'ASCRIBES', 'TO', 'HIM', 'DIVINE', 'POWERS', 'EQUALLY', 'WITH', 'THE', 'FATHER', 'AS', 'FOR', 'INSTANCE', 'THE', 'POWER', 'DOES', 'SPENCE', 'GRACE', 'AND', 'PEACE'] +2830-3980-0055-1098: ref=['TO', 'BESTOW', 'PEACE', 'AND', 'GRACE', 'LIES', 'IN', 'THE', 'PROVINCE', 'OF', 'GOD', 'WHO', 'ALONE', 'CAN', 'CREATE', 'THESE', 'BLESSINGS', 'THE', 'ANGELS', 'CANNOT'] +2830-3980-0055-1098: hyp=['TO', 'BESTOW', 'PEACE', 'AND', 'GRACE', 'LIES', 'IN', 'THE', 'PROVINCE', 'OF', 'GOD', 'WHO', 'ALONE', 'CAN', 'CREATE', 'THESE', 'BLESSINGS', 'THE', 'ANGELS', 'CANNOT'] +2830-3980-0056-1099: ref=['OTHERWISE', 'PAUL', 'SHOULD', 'HAVE', 'WRITTEN', 'GRACE', 'FROM', 'GOD', 'THE', 'FATHER', 'AND', 'PEACE', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0056-1099: hyp=['OTHERWISE', 'PAUL', 'SHOULD', 'HAVE', 'WRITTEN', 'GRACE', 'FROM', 'GOD', 'THE', 'FATHER', 'IN', 'PEACE', 'FROM', 'OUR', 'LORD', 'JESUS', 'CHRIST'] +2830-3980-0057-1100: ref=['THE', 'ARIANS', 'TOOK', 'CHRIST', 'FOR', 'A', 'NOBLE', 'AND', 'PERFECT', 'CREATURE', 'SUPERIOR', 'EVEN', 'TO', 'THE', 'ANGELS', 'BECAUSE', 'BY', 'HIM', 'GOD', 'CREATED', 'HEAVEN', 'AND', 'EARTH'] +2830-3980-0057-1100: hyp=['THE', 'ARIANS', 'TOOK', 'CHRIST', 'FOR', 'A', 'NOBLE', 'AND', 'PERFECT', 'CREATURE', 'SUPERIOR', 'EVEN', 'TO', 'THE', 'ANGELS', 'BECAUSE', 'BY', 'HIM', 'GOD', 'CREATED', 'HEAVEN', 'AND', 'EARTH'] +2830-3980-0058-1101: ref=['MOHAMMED', 'ALSO', 'SPEAKS', 'HIGHLY', 'OF', 'CHRIST'] +2830-3980-0058-1101: hyp=['MOHAMMED', 'ALSO', 'SPEAKS', 'HIGHLY', 'OF', 'CHRIST'] +2830-3980-0059-1102: ref=['PAUL', 'STICKS', 'TO', 'HIS', 'THEME'] +2830-3980-0059-1102: hyp=['PAUL', 'STICKS', 'TO', 'HIS', 'THEME'] +2830-3980-0060-1103: ref=['HE', 'NEVER', 'LOSES', 'SIGHT', 'OF', 'THE', 'PURPOSE', 'OF', 'HIS', 'EPISTLE'] +2830-3980-0060-1103: hyp=['HE', 'NEVER', 'LOSES', 'SIGHT', 'OF', 'THE', 'PURPOSE', 'OF', 'HIS', 'EPISTLE'] +2830-3980-0061-1104: ref=['NOT', 'GOLD', 'OR', 'SILVER', 'OR', 'PASCHAL', 'LAMBS', 'OR', 'AN', 'ANGEL', 'BUT', 'HIMSELF', 'WHAT', 'FOR'] +2830-3980-0061-1104: hyp=['NOT', 'GOLD', 'OR', 'SILVER', 'OR', 'PASSIONAL', 'LAMBS', 'OR', 'AN', 'ANGEL', 'BUT', 'HIMSELF', 'WHAT', 'FOR'] +2830-3980-0062-1105: ref=['NOT', 'FOR', 'A', 'CROWN', 'OR', 'A', 'KINGDOM', 'OR', 'OUR', 'GOODNESS', 'BUT', 'FOR', 'OUR', 'SINS'] +2830-3980-0062-1105: hyp=['NOT', 'FOR', 'A', 'CROWN', 'OR', 'A', 'KINGDOM', 'OR', 'A', 'GOODNESS', 'BEFORE', 'OUR', 'SINS'] +2830-3980-0063-1106: ref=['UNDERSCORE', 'THESE', 'WORDS', 'FOR', 'THEY', 'ARE', 'FULL', 'OF', 'COMFORT', 'FOR', 'SORE', 'CONSCIENCES'] +2830-3980-0063-1106: hyp=['UNDERSCORE', 'THESE', 'WORDS', 'FOR', 'THEY', 'ARE', 'FULL', 'OF', 'COMFORT', 'FOR', 'SORE', 'CONSCIENCES'] +2830-3980-0064-1107: ref=['HOW', 'MAY', 'WE', 'OBTAIN', 'REMISSION', 'OF', 'OUR', 'SINS'] +2830-3980-0064-1107: hyp=['HOW', 'MAY', 'WE', 'OBTAIN', 'REMISSION', 'OF', 'OUR', 'SINS'] +2830-3980-0065-1108: ref=['PAUL', 'ANSWERS', 'THE', 'MAN', 'WHO', 'IS', 'NAMED', 'JESUS', 'CHRIST', 'AND', 'THE', 'SON', 'OF', 'GOD', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0065-1108: hyp=['PAUL', 'ANSWERS', 'THE', 'MAN', 'WHO', 'IS', 'NAMED', 'JESUS', 'CHRIST', 'AND', 'THE', 'SON', 'OF', 'GOD', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0066-1109: ref=['SINCE', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS', 'IT', 'STANDS', 'TO', 'REASON', 'THAT', 'THEY', 'CANNOT', 'BE', 'PUT', 'AWAY', 'BY', 'OUR', 'OWN', 'EFFORTS'] +2830-3980-0066-1109: hyp=['SINCE', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS', 'IT', 'STANDS', 'TO', 'REASON', 'THAT', 'THEY', 'CANNOT', 'BE', 'PUT', 'AWAY', 'BY', 'OUR', 'OWN', 'EFFORTS'] +2830-3980-0067-1110: ref=['THIS', 'SENTENCE', 'ALSO', 'DEFINES', 'OUR', 'SINS', 'AS', 'GREAT', 'SO', 'GREAT', 'IN', 'FACT', 'THAT', 'THE', 'WHOLE', 'WORLD', 'COULD', 'NOT', 'MAKE', 'AMENDS', 'FOR', 'A', 'SINGLE', 'SIN'] +2830-3980-0067-1110: hyp=['THIS', 'SENTENCE', 'ALSO', 'DEFINES', 'OUR', 'SINS', 'AS', 'GREAT', 'SO', 'GREAT', 'IN', 'FACT', 'THAT', 'THE', 'WHOLE', 'WORLD', 'COULD', 'NOT', 'MAKE', 'AMENDS', 'FOR', 'A', 'SINGLE', 'SIN'] +2830-3980-0068-1111: ref=['THE', 'GREATNESS', 'OF', 'THE', 'RANSOM', 'CHRIST', 'THE', 'SON', 'OF', 'GOD', 'INDICATES', 'THIS'] +2830-3980-0068-1111: hyp=['THE', 'GREATNESS', 'OF', 'THE', 'RANSOM', 'CHRIST', 'THE', 'SON', 'OF', 'GOD', 'INDICATES', 'THIS'] +2830-3980-0069-1112: ref=['THE', 'VICIOUS', 'CHARACTER', 'OF', 'SIN', 'IS', 'BROUGHT', 'OUT', 'BY', 'THE', 'WORDS', 'WHO', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0069-1112: hyp=['THE', 'VICIOUS', 'CHARACTER', 'OF', 'SIN', 'IS', 'BROUGHT', 'OUT', 'BY', 'THE', 'WORDS', 'WHO', 'GAVE', 'HIMSELF', 'FOR', 'OUR', 'SINS'] +2830-3980-0070-1113: ref=['BUT', 'WE', 'ARE', 'CARELESS', 'WE', 'MAKE', 'LIGHT', 'OF', 'SIN'] +2830-3980-0070-1113: hyp=['BUT', 'WE', 'ARE', 'CARELESS', 'WE', 'MAKE', 'LIGHT', 'OF', 'SIN'] +2830-3980-0071-1114: ref=['WE', 'THINK', 'THAT', 'BY', 'SOME', 'LITTLE', 'WORK', 'OR', 'MERIT', 'WE', 'CAN', 'DISMISS', 'SIN'] +2830-3980-0071-1114: hyp=['WE', 'THINK', 'THAT', 'BY', 'SOME', 'LITTLE', 'WORK', 'OR', 'MERIT', 'WE', 'CAN', 'DISMISS', 'IN'] +2830-3980-0072-1115: ref=['THIS', 'PASSAGE', 'THEN', 'BEARS', 'OUT', 'THE', 'FACT', 'THAT', 'ALL', 'MEN', 'ARE', 'SOLD', 'UNDER', 'SIN'] +2830-3980-0072-1115: hyp=['THIS', 'PASSAGE', 'THEN', 'BEARS', 'OUT', 'THE', 'FACT', 'THAT', 'ALL', 'MEN', 'ARE', 'SOLD', 'UNDER', 'SIN'] +2830-3980-0073-1116: ref=['THIS', 'ATTITUDE', 'SPRINGS', 'FROM', 'A', 'FALSE', 'CONCEPTION', 'OF', 'SIN', 'THE', 'CONCEPTION', 'THAT', 'SIN', 'IS', 'A', 'SMALL', 'MATTER', 'EASILY', 'TAKEN', 'CARE', 'OF', 'BY', 'GOOD', 'WORKS', 'THAT', 'WE', 'MUST', 'PRESENT', 'OURSELVES', 'UNTO', 'GOD', 'WITH', 'A', 'GOOD', 'CONSCIENCE', 'THAT', 'WE', 'MUST', 'FEEL', 'NO', 'SIN', 'BEFORE', 'WE', 'MAY', 'FEEL', 'THAT', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS'] +2830-3980-0073-1116: hyp=['THIS', 'ATTITUDE', 'SPRINGS', 'FROM', 'A', 'FALSE', 'CONCEPTION', 'OF', 'SIN', 'THE', 'CONCEPTION', 'THAT', 'SIN', 'IS', 'A', 'SMALL', 'MATTER', 'EASILY', 'TAKEN', 'CARE', 'OF', 'BY', 'GOOD', 'WORKS', 'THAT', 'WE', 'MUST', 'PRESENT', 'OURSELVES', 'INTO', 'GOD', 'WITH', 'A', 'GOOD', 'CONSCIENCE', 'THAT', 'WE', 'MUST', 'FEEL', 'NO', 'SIN', 'BEFORE', 'WE', 'MAY', 'FEEL', 'THAT', 'CHRIST', 'WAS', 'GIVEN', 'FOR', 'OUR', 'SINS'] +2830-3980-0074-1117: ref=['THIS', 'ATTITUDE', 'IS', 'UNIVERSAL', 'AND', 'PARTICULARLY', 'DEVELOPED', 'IN', 'THOSE', 'WHO', 'CONSIDER', 'THEMSELVES', 'BETTER', 'THAN', 'OTHERS'] +2830-3980-0074-1117: hyp=['THIS', 'ATTITUDE', 'IS', 'UNIVERSAL', 'AND', 'PARTICULARLY', 'DEVELOPED', 'IN', 'THOSE', 'WHO', 'CONSIDER', 'THEMSELVES', 'BETTER', 'THAN', 'OTHERS'] +2830-3980-0075-1118: ref=['BUT', 'THE', 'REAL', 'SIGNIFICANCE', 'AND', 'COMFORT', 'OF', 'THE', 'WORDS', 'FOR', 'OUR', 'SINS', 'IS', 'LOST', 'UPON', 'THEM'] +2830-3980-0075-1118: hyp=['BUT', 'THE', 'REAL', 'SIGNIFICANCE', 'AND', 'COMFORT', 'OF', 'THE', 'WORDS', 'FOR', 'OUR', 'SINS', 'IS', 'LOST', 'UPON', 'THEM'] +2830-3980-0076-1119: ref=['ON', 'THE', 'OTHER', 'HAND', 'WE', 'ARE', 'NOT', 'TO', 'REGARD', 'THEM', 'AS', 'SO', 'TERRIBLE', 'THAT', 'WE', 'MUST', 'DESPAIR'] +2830-3980-0076-1119: hyp=['ON', 'THE', 'OTHER', 'HAND', 'WE', 'ARE', 'NOT', 'TO', 'REGARD', 'THEM', 'AS', 'SO', 'TERRIBLE', 'THAT', 'WE', 'MUST', 'DESPAIR'] +2961-960-0000-497: ref=['HE', 'PASSES', 'ABRUPTLY', 'FROM', 'PERSONS', 'TO', 'IDEAS', 'AND', 'NUMBERS', 'AND', 'FROM', 'IDEAS', 'AND', 'NUMBERS', 'TO', 'PERSONS', 'FROM', 'THE', 'HEAVENS', 'TO', 'MAN', 'FROM', 'ASTRONOMY', 'TO', 'PHYSIOLOGY', 'HE', 'CONFUSES', 'OR', 'RATHER', 'DOES', 'NOT', 'DISTINGUISH', 'SUBJECT', 'AND', 'OBJECT', 'FIRST', 'AND', 'FINAL', 'CAUSES', 'AND', 'IS', 'DREAMING', 'OF', 'GEOMETRICAL', 'FIGURES', 'LOST', 'IN', 'A', 'FLUX', 'OF', 'SENSE'] +2961-960-0000-497: hyp=['HE', 'PASSES', 'ABRUPTLY', 'FROM', 'PERSONS', 'TO', 'IDEAS', 'AND', 'NUMBERS', 'AND', 'FROM', 'IDEAS', 'AND', 'NUMBERS', 'TO', 'PERSONS', 'FROM', 'THE', 'HEAVENS', 'TO', 'MEN', 'FROM', 'ASTRONOMY', 'TO', 'PHYSIOLOGY', 'HE', 'CONFUSES', 'OR', 'RATHER', 'DOES', 'NOT', 'DISTINGUISH', 'SUBJECT', 'AND', 'OBJECT', 'FIRST', 'AND', 'FINAL', 'CAUSES', 'AND', 'IS', 'DREAMING', 'OF', 'GEOMETRICAL', 'FIGURES', 'LOST', 'IN', 'A', 'FLUX', 'OF', 'SENSE'] +2961-960-0001-498: ref=['THE', 'INFLUENCE', 'WITH', 'THE', 'TIMAEUS', 'HAS', 'EXERCISED', 'UPON', 'POSTERITY', 'IS', 'DUE', 'PARTLY', 'TO', 'A', 'MISUNDERSTANDING'] +2961-960-0001-498: hyp=['THE', 'INFLUENCE', 'WHICH', 'THE', 'TIMAS', 'HAS', 'EXERCISED', 'UPON', 'POSTERITY', 'IS', 'DUE', 'PARTLY', 'TO', 'A', 'MISUNDERSTANDING'] +2961-960-0002-499: ref=['IN', 'THE', 'SUPPOSED', 'DEPTHS', 'OF', 'THIS', 'DIALOGUE', 'THE', 'NEO', 'PLATONISTS', 'FOUND', 'HIDDEN', 'MEANINGS', 'AND', 'CONNECTIONS', 'WITH', 'THE', 'JEWISH', 'AND', 'CHRISTIAN', 'SCRIPTURES', 'AND', 'OUT', 'OF', 'THEM', 'THEY', 'ELICITED', 'DOCTRINES', 'QUITE', 'AT', 'VARIANCE', 'WITH', 'THE', 'SPIRIT', 'OF', 'PLATO'] +2961-960-0002-499: hyp=['IN', 'THE', 'SUPPOSED', 'DEPTHS', 'OF', 'THIS', 'DIALOGUE', 'THE', 'NEO', 'PLATINISTS', 'FOUND', 'HIDDEN', 'MEANINGS', 'IN', 'CONNECTIONS', 'WITH', 'THE', 'JEWISH', 'AND', 'CHRISTIAN', 'SCRIPTURES', 'AND', 'OUT', 'OF', 'THEM', 'THEY', 'ELICITED', 'DOCTRINES', 'QUITE', 'AT', 'VARIANCE', 'WITH', 'THE', 'SPIRIT', 'OF', 'PLATO'] +2961-960-0003-500: ref=['THEY', 'WERE', 'ABSORBED', 'IN', 'HIS', 'THEOLOGY', 'AND', 'WERE', 'UNDER', 'THE', 'DOMINION', 'OF', 'HIS', 'NAME', 'WHILE', 'THAT', 'WHICH', 'WAS', 'TRULY', 'GREAT', 'AND', 'TRULY', 'CHARACTERISTIC', 'IN', 'HIM', 'HIS', 'EFFORT', 'TO', 'REALIZE', 'AND', 'CONNECT', 'ABSTRACTIONS', 'WAS', 'NOT', 'UNDERSTOOD', 'BY', 'THEM', 'AT', 'ALL'] +2961-960-0003-500: hyp=['THEY', 'WERE', 'ABSORBED', 'IN', 'HIS', 'THEOLOGY', 'AND', 'WERE', 'UNDER', 'THE', 'DOMINION', 'OF', 'HIS', 'NAME', 'WHILE', 'THAT', 'WHICH', 'WAS', 'TRULY', 'GREAT', 'AND', 'TRULY', 'CORRECTURISTIC', 'IN', 'HIM', 'HIS', 'EFFORT', 'TO', 'REALIZE', 'AND', 'CONNECT', 'ABSTRACTIONS', 'WAS', 'NOT', 'UNDERSTOOD', 'BY', 'THEM', 'AT', 'ALL'] +2961-960-0004-501: ref=['THERE', 'IS', 'NO', 'DANGER', 'OF', 'THE', 'MODERN', 'COMMENTATORS', 'ON', 'THE', 'TIMAEUS', 'FALLING', 'INTO', 'THE', 'ABSURDITIES', 'OF', 'THE', 'NEO', 'PLATONISTS'] +2961-960-0004-501: hyp=['THERE', 'IS', 'NO', 'DANGER', 'OF', 'THE', 'MODERN', 'COMMENTATORS', 'ON', 'THE', 'TIMEUS', 'FALLING', 'INTO', 'THE', 'ABSURDITIES', 'OF', 'THE', 'NEOPLATANISTS'] +2961-960-0005-502: ref=['IN', 'THE', 'PRESENT', 'DAY', 'WE', 'ARE', 'WELL', 'AWARE', 'THAT', 'AN', 'ANCIENT', 'PHILOSOPHER', 'IS', 'TO', 'BE', 'INTERPRETED', 'FROM', 'HIMSELF', 'AND', 'BY', 'THE', 'CONTEMPORARY', 'HISTORY', 'OF', 'THOUGHT'] +2961-960-0005-502: hyp=['IN', 'THE', 'PRESENT', 'DAY', 'WE', 'ARE', 'WELL', 'AWARE', 'THAT', 'AN', 'ANCIENT', 'PHILOSOPHER', 'IS', 'TO', 'BE', 'INTERPRETED', 'FROM', 'HIMSELF', 'AND', 'BY', 'THE', 'CONTEMPORARY', 'HISTORY', 'OF', 'THOUGHT'] +2961-960-0006-503: ref=['THE', 'FANCIES', 'OF', 'THE', 'NEO', 'PLATONISTS', 'ARE', 'ONLY', 'INTERESTING', 'TO', 'US', 'BECAUSE', 'THEY', 'EXHIBIT', 'A', 'PHASE', 'OF', 'THE', 'HUMAN', 'MIND', 'WHICH', 'PREVAILED', 'WIDELY', 'IN', 'THE', 'FIRST', 'CENTURIES', 'OF', 'THE', 'CHRISTIAN', 'ERA', 'AND', 'IS', 'NOT', 'WHOLLY', 'EXTINCT', 'IN', 'OUR', 'OWN', 'DAY'] +2961-960-0006-503: hyp=['THE', 'FANCIES', 'OF', 'THE', 'NEW', 'PLATINISTS', 'ARE', 'ONLY', 'INTERESTING', 'TO', 'US', 'BECAUSE', 'THEY', 'EXHIBIT', 'A', 'PHASE', 'OF', 'THE', 'HUMAN', 'MIND', 'WHICH', 'PREVAIL', 'WIDELY', 'IN', 'THE', 'FIRST', 'CENTURIES', 'OF', 'THE', 'CHRISTIAN', 'ERA', 'AND', 'IS', 'NOT', 'WHOLLY', 'EXTINCT', 'IN', 'OUR', 'OWN', 'DAY'] +2961-960-0007-504: ref=['BUT', 'THEY', 'HAVE', 'NOTHING', 'TO', 'DO', 'WITH', 'THE', 'INTERPRETATION', 'OF', 'PLATO', 'AND', 'IN', 'SPIRIT', 'THEY', 'ARE', 'OPPOSED', 'TO', 'HIM'] +2961-960-0007-504: hyp=['BUT', 'THEY', 'HAVE', 'NOTHING', 'TO', 'DO', 'WITH', 'THE', 'INTERPRETATION', 'OF', 'PLATO', 'AND', 'IN', 'SPIRIT', 'THEY', 'ARE', 'OPPOSED', 'TO', 'HIM'] +2961-960-0008-505: ref=['WE', 'DO', 'NOT', 'KNOW', 'HOW', 'PLATO', 'WOULD', 'HAVE', 'ARRANGED', 'HIS', 'OWN', 'DIALOGUES', 'OR', 'WHETHER', 'THE', 'THOUGHT', 'OF', 'ARRANGING', 'ANY', 'OF', 'THEM', 'BESIDES', 'THE', 'TWO', 'TRILOGIES', 'WHICH', 'HE', 'HAS', 'EXPRESSLY', 'CONNECTED', 'WAS', 'EVER', 'PRESENT', 'TO', 'HIS', 'MIND'] +2961-960-0008-505: hyp=['WE', 'DO', 'NOT', 'KNOW', 'HOW', 'PLATO', 'WOULD', 'HAVE', 'ARRANGED', 'HIS', 'OWN', 'DIALECTS', 'OR', 'WHETHER', 'THE', 'THOUGHT', 'OF', 'ARRANGING', 'ANY', 'OF', 'THEM', 'BESIDES', 'THE', 'TUTRILOGIES', 'WHICH', 'HE', 'HAS', 'EXPRESSLY', 'CONNECTED', 'WAS', 'EVER', 'PRESENT', 'TO', 'HIS', 'MIND'] +2961-960-0009-506: ref=['THE', 'DIALOGUE', 'IS', 'PRIMARILY', 'CONCERNED', 'WITH', 'THE', 'ANIMAL', 'CREATION', 'INCLUDING', 'UNDER', 'THIS', 'TERM', 'THE', 'HEAVENLY', 'BODIES', 'AND', 'WITH', 'MAN', 'ONLY', 'AS', 'ONE', 'AMONG', 'THE', 'ANIMALS'] +2961-960-0009-506: hyp=['THE', 'DIALOGUE', 'IS', 'PRIMARILY', 'CONCERNED', 'WITH', 'THE', 'ANIMAL', 'CREATION', 'INCLUDING', 'UNDER', 'THIS', 'TERM', 'THE', 'HEAVENLY', 'BODIES', 'AND', 'WITH', 'MAN', 'ONLY', 'AS', 'ONE', 'AMONG', 'THE', 'ANIMALS'] +2961-960-0010-507: ref=['BUT', 'HE', 'HAS', 'NOT', 'AS', 'YET', 'DEFINED', 'THIS', 'INTERMEDIATE', 'TERRITORY', 'WHICH', 'LIES', 'SOMEWHERE', 'BETWEEN', 'MEDICINE', 'AND', 'MATHEMATICS', 'AND', 'HE', 'WOULD', 'HAVE', 'FELT', 'THAT', 'THERE', 'WAS', 'AS', 'GREAT', 'AN', 'IMPIETY', 'IN', 'RANKING', 'THEORIES', 'OF', 'PHYSICS', 'FIRST', 'IN', 'THE', 'ORDER', 'OF', 'KNOWLEDGE', 'AS', 'IN', 'PLACING', 'THE', 'BODY', 'BEFORE', 'THE', 'SOUL'] +2961-960-0010-507: hyp=['BUT', 'HE', 'HAS', 'NOT', 'AS', 'YET', 'THE', 'FIND', 'THIS', 'INTERMEDIATE', 'TERRITORY', 'WHICH', 'LIES', 'SOMEWHERE', 'BETWEEN', 'MEDICINE', 'AND', 'MATHEMATICS', 'AND', 'HE', 'WOULD', 'HAVE', 'FELT', 'THAT', 'THERE', 'WAS', 'AS', 'GREAT', 'AN', 'IMPIETY', 'IN', 'RANKING', 'THEORIES', 'OF', 'PHYSICS', 'FIRST', 'IN', 'THE', 'ORDER', 'OF', 'KNOWLEDGE', 'AS', 'IN', 'PLACING', 'THE', 'BODY', 'BEFORE', 'THE', 'SOUL'] +2961-960-0011-508: ref=['WITH', 'HERACLEITUS', 'HE', 'ACKNOWLEDGES', 'THE', 'PERPETUAL', 'FLUX', 'LIKE', 'ANAXAGORAS', 'HE', 'ASSERTS', 'THE', 'PREDOMINANCE', 'OF', 'MIND', 'ALTHOUGH', 'ADMITTING', 'AN', 'ELEMENT', 'OF', 'NECESSITY', 'WHICH', 'REASON', 'IS', 'INCAPABLE', 'OF', 'SUBDUING', 'LIKE', 'THE', 'PYTHAGOREANS', 'HE', 'SUPPOSES', 'THE', 'MYSTERY', 'OF', 'THE', 'WORLD', 'TO', 'BE', 'CONTAINED', 'IN', 'NUMBER'] +2961-960-0011-508: hyp=['WITH', 'HERACLITUS', 'HE', 'ACKNOWLEDGES', 'THE', 'PERPETUAL', 'FLUX', 'LIKE', 'AN', 'EXAGGARIST', 'HE', 'ASSERTS', 'THE', 'PREDOMINANCE', 'OF', 'MIND', 'ALTHOUGH', 'ADMITTING', 'AN', 'ELEMENT', 'OF', 'NECESSITY', 'WHICH', 'REASON', 'IS', 'INCAPABLE', 'OF', 'SUBDUING', 'LIKE', 'THE', 'PITHAGORIANS', 'HE', 'SUPPOSES', 'THE', 'MYSTERY', 'OF', 'THE', 'WORLD', 'TO', 'BE', 'CONTAINED', 'IN', 'NUMBER'] +2961-960-0012-509: ref=['MANY', 'IF', 'NOT', 'ALL', 'THE', 'ELEMENTS', 'OF', 'THE', 'PRE', 'SOCRATIC', 'PHILOSOPHY', 'ARE', 'INCLUDED', 'IN', 'THE', 'TIMAEUS'] +2961-960-0012-509: hyp=['MANY', 'IF', 'NOT', 'ALL', 'THE', 'ELEMENTS', 'OF', 'THE', 'PRESOCRATIC', 'PHILOSOPHY', 'ARE', 'INCLUDED', 'IN', 'THE', 'TIMIUS'] +2961-960-0013-510: ref=['IT', 'IS', 'PROBABLE', 'THAT', 'THE', 'RELATION', 'OF', 'THE', 'IDEAS', 'TO', 'GOD', 'OR', 'OF', 'GOD', 'TO', 'THE', 'WORLD', 'WAS', 'DIFFERENTLY', 'CONCEIVED', 'BY', 'HIM', 'AT', 'DIFFERENT', 'TIMES', 'OF', 'HIS', 'LIFE'] +2961-960-0013-510: hyp=['IT', 'IS', 'PROBABLE', 'THAT', 'THE', 'RELATION', 'OF', 'THE', 'IDEAS', 'TO', 'GOD', 'OR', 'OF', 'GOD', 'TO', 'THE', 'WORLD', 'WAS', 'DIFFERENTLY', 'CONCEIVED', 'BY', 'HIM', 'AT', 'DIFFERENT', 'TIMES', 'OF', 'HIS', 'LIFE'] +2961-960-0014-511: ref=['THE', 'IDEAS', 'ALSO', 'REMAIN', 'BUT', 'THEY', 'HAVE', 'BECOME', 'TYPES', 'IN', 'NATURE', 'FORMS', 'OF', 'MEN', 'ANIMALS', 'BIRDS', 'FISHES'] +2961-960-0014-511: hyp=['THE', 'IDEAS', 'ALSO', 'REMAIN', 'BUT', 'THEY', 'HAVE', 'BECOME', 'TYPES', 'IN', 'NATURE', 'FORMS', 'OF', 'MEN', 'ANIMALS', 'BIRDS', 'FISHES'] +2961-960-0015-512: ref=['THE', 'STYLE', 'AND', 'PLAN', 'OF', 'THE', 'TIMAEUS', 'DIFFER', 'GREATLY', 'FROM', 'THAT', 'OF', 'ANY', 'OTHER', 'OF', 'THE', 'PLATONIC', 'DIALOGUES'] +2961-960-0015-512: hyp=['THE', 'STYLE', 'AND', 'PLAN', 'OF', 'THE', 'TIMIRS', 'DIFFER', 'GREATLY', 'FROM', 'THAT', 'OF', 'ANY', 'OTHER', 'OF', 'THE', 'PLATONIC', 'DIALOGUES'] +2961-960-0016-513: ref=['BUT', 'PLATO', 'HAS', 'NOT', 'THE', 'SAME', 'MASTERY', 'OVER', 'HIS', 'INSTRUMENT', 'WHICH', 'HE', 'EXHIBITS', 'IN', 'THE', 'PHAEDRUS', 'OR', 'SYMPOSIUM'] +2961-960-0016-513: hyp=['BUT', 'PLATO', 'HAS', 'NOT', 'THE', 'SAME', 'MYSTERY', 'OVER', 'HIS', 'INSTRUMENT', 'WHICH', 'HE', 'EXHIBITS', 'IN', 'THE', 'FEEDRESS', 'OR', 'SUPPOSIUM'] +2961-960-0017-514: ref=['NOTHING', 'CAN', 'EXCEED', 'THE', 'BEAUTY', 'OR', 'ART', 'OF', 'THE', 'INTRODUCTION', 'IN', 'WHICH', 'HE', 'IS', 'USING', 'WORDS', 'AFTER', 'HIS', 'ACCUSTOMED', 'MANNER'] +2961-960-0017-514: hyp=['NOTHING', 'CAN', 'EXCEED', 'THE', 'BEAUTY', 'OR', 'ART', 'OF', 'INTRODUCTION', 'IN', 'WHICH', 'HIS', 'USING', 'WORDS', 'AFTER', 'HIS', 'ACCUSTOMED', 'MANNER'] +2961-960-0018-515: ref=['BUT', 'IN', 'THE', 'REST', 'OF', 'THE', 'WORK', 'THE', 'POWER', 'OF', 'LANGUAGE', 'SEEMS', 'TO', 'FAIL', 'HIM', 'AND', 'THE', 'DRAMATIC', 'FORM', 'IS', 'WHOLLY', 'GIVEN', 'UP'] +2961-960-0018-515: hyp=['BUT', 'IN', 'THE', 'REST', 'OF', 'THE', 'WORK', 'THE', 'POWER', 'OF', 'LANGUAGE', 'SEEMS', 'TO', 'FAIL', 'HIM', 'AND', 'THE', 'DRAMATIC', 'FORM', 'IS', 'WHOLLY', 'GIVEN', 'UP'] +2961-960-0019-516: ref=['HE', 'COULD', 'WRITE', 'IN', 'ONE', 'STYLE', 'BUT', 'NOT', 'IN', 'ANOTHER', 'AND', 'THE', 'GREEK', 'LANGUAGE', 'HAD', 'NOT', 'AS', 'YET', 'BEEN', 'FASHIONED', 'BY', 'ANY', 'POET', 'OR', 'PHILOSOPHER', 'TO', 'DESCRIBE', 'PHYSICAL', 'PHENOMENA'] +2961-960-0019-516: hyp=['HE', 'COULD', 'WRITE', 'IN', 'ONE', 'STYLE', 'BUT', 'NOT', 'IN', 'ANOTHER', 'AND', 'THE', 'GREEK', 'LANGUAGE', 'HAD', 'NOT', 'AS', 'YET', 'BEEN', 'FASHIONED', 'BY', 'ANY', 'POET', 'OR', 'PHILOSOPHER', 'TO', 'DESCRIBE', 'PHYSICAL', 'PHENOMENA'] +2961-960-0020-517: ref=['AND', 'HENCE', 'WE', 'FIND', 'THE', 'SAME', 'SORT', 'OF', 'CLUMSINESS', 'IN', 'THE', 'TIMAEUS', 'OF', 'PLATO', 'WHICH', 'CHARACTERIZES', 'THE', 'PHILOSOPHICAL', 'POEM', 'OF', 'LUCRETIUS'] +2961-960-0020-517: hyp=['AND', 'HENCE', 'WE', 'FIND', 'THE', 'SAME', 'SORT', 'OF', 'CLUMSINESS', 'IN', 'THE', 'TIMAS', 'OF', 'PLATO', 'WHICH', 'CHARACTERIZES', 'THE', 'PHILOSOPHICAL', 'POEM', 'OF', 'LUCRETIUS'] +2961-960-0021-518: ref=['THERE', 'IS', 'A', 'WANT', 'OF', 'FLOW', 'AND', 'OFTEN', 'A', 'DEFECT', 'OF', 'RHYTHM', 'THE', 'MEANING', 'IS', 'SOMETIMES', 'OBSCURE', 'AND', 'THERE', 'IS', 'A', 'GREATER', 'USE', 'OF', 'APPOSITION', 'AND', 'MORE', 'OF', 'REPETITION', 'THAN', 'OCCURS', 'IN', "PLATO'S", 'EARLIER', 'WRITINGS'] +2961-960-0021-518: hyp=['THERE', 'IS', 'A', 'WANT', 'OF', 'FLOW', 'AND', 'OFTEN', 'A', 'DEFECT', 'OF', 'RHYTHM', 'THE', 'MEANING', 'IS', 'SOMETIMES', 'OBSCURE', 'AND', 'THERE', 'IS', 'A', 'GREATER', 'USE', 'OF', 'APPOSITION', 'IN', 'MORE', 'OF', 'REPETITION', 'THAN', 'OCCURS', 'IN', "PLATO'S", 'EARLIER', 'WRITINGS'] +2961-960-0022-519: ref=['PLATO', 'HAD', 'NOT', 'THE', 'COMMAND', 'OF', 'HIS', 'MATERIALS', 'WHICH', 'WOULD', 'HAVE', 'ENABLED', 'HIM', 'TO', 'PRODUCE', 'A', 'PERFECT', 'WORK', 'OF', 'ART'] +2961-960-0022-519: hyp=['PLATO', 'HAD', 'NOT', 'THE', 'COMMAND', 'OF', 'HIS', 'MATERIALS', 'WHICH', 'WOULD', 'HAVE', 'ENABLED', 'HIM', 'TO', 'PRODUCE', 'A', 'PERFECT', 'WORK', 'OF', 'ART'] +2961-961-0000-520: ref=['SOCRATES', 'BEGINS', 'THE', 'TIMAEUS', 'WITH', 'A', 'SUMMARY', 'OF', 'THE', 'REPUBLIC'] +2961-961-0000-520: hyp=['SOCRATES', 'BEGINS', 'TO', 'TIMAS', 'WITH', 'A', 'SUMMARY', 'OF', 'THE', 'REPUBLIC'] +2961-961-0001-521: ref=['AND', 'NOW', 'HE', 'DESIRES', 'TO', 'SEE', 'THE', 'IDEAL', 'STATE', 'SET', 'IN', 'MOTION', 'HE', 'WOULD', 'LIKE', 'TO', 'KNOW', 'HOW', 'SHE', 'BEHAVED', 'IN', 'SOME', 'GREAT', 'STRUGGLE'] +2961-961-0001-521: hyp=['AND', 'NOW', 'HE', 'DESIRES', 'TO', 'SEE', 'THE', 'IDEAL', 'STATE', 'SET', 'IN', 'MOTION', 'HE', 'WOULD', 'LIKE', 'TO', 'KNOW', 'HOW', 'SHE', 'BEHAVED', 'IN', 'SOME', 'GREAT', 'STRUGGLE'] +2961-961-0002-522: ref=['AND', 'THEREFORE', 'TO', 'YOU', 'I', 'TURN', 'TIMAEUS', 'CITIZEN', 'OF', 'LOCRIS', 'WHO', 'ARE', 'AT', 'ONCE', 'A', 'PHILOSOPHER', 'AND', 'A', 'STATESMAN', 'AND', 'TO', 'YOU', 'CRITIAS', 'WHOM', 'ALL', 'ATHENIANS', 'KNOW', 'TO', 'BE', 'SIMILARLY', 'ACCOMPLISHED', 'AND', 'TO', 'HERMOCRATES', 'WHO', 'IS', 'ALSO', 'FITTED', 'BY', 'NATURE', 'AND', 'EDUCATION', 'TO', 'SHARE', 'IN', 'OUR', 'DISCOURSE'] +2961-961-0002-522: hyp=['AND', 'THEREFORE', 'TO', 'YOU', 'I', 'TURN', 'TO', 'ME', 'AS', 'CITIZEN', 'OF', 'LOCRIS', 'WHO', 'ARE', 'AT', 'ONCE', 'A', 'PHILOSOPHER', 'IN', 'A', 'STATESMAN', 'AND', 'TO', 'YOU', 'CRITIUS', 'WHOM', 'ALL', 'ATHENIANS', 'KNOW', 'TO', 'BE', 'SIMILARLY', 'ACCOMPLISHED', 'AND', 'TO', 'HERMOCRATES', 'WHOSE', 'ALSO', 'FITTED', 'BY', 'NATURE', 'AND', 'EDUCATION', 'TO', 'SHARE', 'IN', 'OUR', 'DISCOURSE'] +2961-961-0003-523: ref=['I', 'WILL', 'IF', 'TIMAEUS', 'APPROVES', 'I', 'APPROVE'] +2961-961-0003-523: hyp=['I', 'WILL', 'IF', 'TO', 'ME', 'AS', 'APPROVES', 'I', 'APPROVE'] +2961-961-0004-524: ref=['LISTEN', 'THEN', 'SOCRATES', 'TO', 'A', 'TALE', 'OF', "SOLON'S", 'WHO', 'BEING', 'THE', 'FRIEND', 'OF', 'DROPIDAS', 'MY', 'GREAT', 'GRANDFATHER', 'TOLD', 'IT', 'TO', 'MY', 'GRANDFATHER', 'CRITIAS', 'AND', 'HE', 'TOLD', 'ME'] +2961-961-0004-524: hyp=['LISTEN', 'THEN', 'SOCRATES', 'TO', 'A', 'TALE', 'OF', 'SILENCE', 'WHO', 'BEING', 'THE', 'FRIEND', 'OF', 'DROPIDUS', 'BY', 'GREAT', 'GRANDFATHER', 'TOLD', 'IT', 'TO', 'MY', 'GRANDFATHER', 'CRITIUS', 'AND', 'HE', 'TOLD', 'ME'] +2961-961-0005-525: ref=['SOME', 'POEMS', 'OF', 'SOLON', 'WERE', 'RECITED', 'BY', 'THE', 'BOYS'] +2961-961-0005-525: hyp=['SOME', 'POEMS', 'OF', 'SOLID', 'WERE', 'RECITED', 'BY', 'THE', 'BOYS'] +2961-961-0006-526: ref=['AND', 'WHAT', 'WAS', 'THE', 'SUBJECT', 'OF', 'THE', 'POEM', 'SAID', 'THE', 'PERSON', 'WHO', 'MADE', 'THE', 'REMARK'] +2961-961-0006-526: hyp=['AND', 'WHAT', 'WAS', 'THE', 'SUBJECT', 'OF', 'THE', 'POEM', 'SAID', 'THE', 'PERSON', 'WHO', 'MADE', 'THE', 'REMARK'] +2961-961-0007-527: ref=['THE', 'SUBJECT', 'WAS', 'A', 'VERY', 'NOBLE', 'ONE', 'HE', 'DESCRIBED', 'THE', 'MOST', 'FAMOUS', 'ACTION', 'IN', 'WHICH', 'THE', 'ATHENIAN', 'PEOPLE', 'WERE', 'EVER', 'ENGAGED'] +2961-961-0007-527: hyp=['THE', 'SUBJECT', 'WAS', 'A', 'VERY', 'NOBLE', 'ONE', 'HE', 'DESCRIBED', 'THE', 'MOST', 'FAMOUS', 'ACTION', 'IN', 'WHICH', 'THE', 'ATHENIAN', 'PEOPLE', 'WERE', 'EVER', 'ENGAGED'] +2961-961-0008-528: ref=['BUT', 'THE', 'MEMORY', 'OF', 'THEIR', 'EXPLOITS', 'HAS', 'PASSED', 'AWAY', 'OWING', 'TO', 'THE', 'LAPSE', 'OF', 'TIME', 'AND', 'THE', 'EXTINCTION', 'OF', 'THE', 'ACTORS'] +2961-961-0008-528: hyp=['BUT', 'THE', 'MEMORY', 'OF', 'THEIR', 'EXPLOITS', 'HAD', 'PASSED', 'AWAY', 'OWING', 'TO', 'THE', 'LAPSE', 'OF', 'TIME', 'AND', 'THE', 'EXTINCTION', 'OF', 'THE', 'ACTORS'] +2961-961-0009-529: ref=['TELL', 'US', 'SAID', 'THE', 'OTHER', 'THE', 'WHOLE', 'STORY', 'AND', 'WHERE', 'SOLON', 'HEARD', 'THE', 'STORY'] +2961-961-0009-529: hyp=['TELL', 'US', 'SAID', 'THE', 'OTHER', 'THE', 'WHOLE', 'STORY', 'AND', 'WEAR', 'SOLEMN', 'HEARD', 'THIS', 'STORY'] +2961-961-0010-530: ref=['BUT', 'IN', 'EGYPT', 'THE', 'TRADITIONS', 'OF', 'OUR', 'OWN', 'AND', 'OTHER', 'LANDS', 'ARE', 'BY', 'US', 'REGISTERED', 'FOR', 'EVER', 'IN', 'OUR', 'TEMPLES'] +2961-961-0010-530: hyp=['BUT', 'IN', 'EGYPT', 'THE', 'TRADITIONS', 'OF', 'OUR', 'OWN', 'AND', 'OTHER', 'LANDS', 'ARE', 'BY', 'US', 'REGISTERED', 'FOREVER', 'IN', 'OUR', 'TEMPLES'] +2961-961-0011-531: ref=['THE', 'GENEALOGIES', 'WHICH', 'YOU', 'HAVE', 'RECITED', 'TO', 'US', 'OUT', 'OF', 'YOUR', 'OWN', 'ANNALS', 'SOLON', 'ARE', 'A', 'MERE', "CHILDREN'S", 'STORY'] +2961-961-0011-531: hyp=['THE', 'GENEALOGIES', 'WHICH', 'YOU', 'HAVE', 'RECITED', 'TO', 'US', 'OUT', 'OF', 'YOUR', 'OWN', 'ANNAL', 'SOLEMN', 'ARE', 'A', 'MERE', "CHILDREN'S", 'STORY'] +2961-961-0012-532: ref=['FOR', 'IN', 'THE', 'TIMES', 'BEFORE', 'THE', 'GREAT', 'FLOOD', 'ATHENS', 'WAS', 'THE', 'GREATEST', 'AND', 'BEST', 'OF', 'CITIES', 'AND', 'DID', 'THE', 'NOBLEST', 'DEEDS', 'AND', 'HAD', 'THE', 'BEST', 'CONSTITUTION', 'OF', 'ANY', 'UNDER', 'THE', 'FACE', 'OF', 'HEAVEN'] +2961-961-0012-532: hyp=['FOR', 'IN', 'THE', 'TIMES', 'BEFORE', 'THE', 'GREAT', 'FLOOD', 'ATHENS', 'WAS', 'THE', 'GREATEST', 'AND', 'BEST', 'OF', 'CITIES', 'AND', 'DEAD', 'THE', 'NOBLEST', 'DEEDS', 'AND', 'HAD', 'THE', 'BEST', 'CONSTITUTION', 'OF', 'ANY', 'UNDER', 'THE', 'FACE', 'OF', 'HEAVEN'] +2961-961-0013-533: ref=['SOLON', 'MARVELLED', 'AND', 'DESIRED', 'TO', 'BE', 'INFORMED', 'OF', 'THE', 'PARTICULARS'] +2961-961-0013-533: hyp=['SULLEN', 'MARVELLED', 'AND', 'DESIRED', 'TO', 'BE', 'INFORMED', 'OF', 'THE', 'PARTICULARS'] +2961-961-0014-534: ref=['NINE', 'THOUSAND', 'YEARS', 'HAVE', 'ELAPSED', 'SINCE', 'SHE', 'FOUNDED', 'YOURS', 'AND', 'EIGHT', 'THOUSAND', 'SINCE', 'SHE', 'FOUNDED', 'OURS', 'AS', 'OUR', 'ANNALS', 'RECORD'] +2961-961-0014-534: hyp=['NINE', 'THOUSAND', 'YEARS', 'HAVE', 'ELAPSED', 'SINCE', 'YOU', 'FOUND', 'IT', 'YOURS', 'AND', 'EIGHT', 'THOUSAND', 'SINCE', 'YOU', 'FOUND', 'IT', 'OURS', 'AS', 'OUR', 'ANNALS', 'RECORD'] +2961-961-0015-535: ref=['MANY', 'LAWS', 'EXIST', 'AMONG', 'US', 'WHICH', 'ARE', 'THE', 'COUNTERPART', 'OF', 'YOURS', 'AS', 'THEY', 'WERE', 'IN', 'THE', 'OLDEN', 'TIME'] +2961-961-0015-535: hyp=['MANY', 'LAWS', 'EXIST', 'AMONG', 'US', 'WHICH', 'ARE', 'THE', 'COUNTERPART', 'OF', 'YOURS', 'AS', 'THEY', 'WERE', 'IN', 'THE', 'OLDEN', 'TIME'] +2961-961-0016-536: ref=['I', 'WILL', 'BRIEFLY', 'DESCRIBE', 'THEM', 'TO', 'YOU', 'AND', 'YOU', 'SHALL', 'READ', 'THE', 'ACCOUNT', 'OF', 'THEM', 'AT', 'YOUR', 'LEISURE', 'IN', 'THE', 'SACRED', 'REGISTERS'] +2961-961-0016-536: hyp=['I', 'WILL', 'BRIEFLY', 'DESCRIBE', 'THEM', 'TO', 'YOU', 'AND', 'YOU', 'SHALL', 'READ', 'THE', 'ACCOUNT', 'OF', 'THEM', 'AT', 'YOUR', 'LEISURE', 'IN', 'THE', 'SACRED', 'REGISTERS'] +2961-961-0017-537: ref=['OBSERVE', 'AGAIN', 'WHAT', 'CARE', 'THE', 'LAW', 'TOOK', 'IN', 'THE', 'PURSUIT', 'OF', 'WISDOM', 'SEARCHING', 'OUT', 'THE', 'DEEP', 'THINGS', 'OF', 'THE', 'WORLD', 'AND', 'APPLYING', 'THEM', 'TO', 'THE', 'USE', 'OF', 'MAN'] +2961-961-0017-537: hyp=['OBSERVE', 'AGAIN', 'WHAT', 'CARE', 'THE', 'LAW', 'TOOK', 'IN', 'THE', 'PURSUIT', 'OF', 'WISDOM', 'SEARCHING', 'OUT', 'THE', 'DEEP', 'THINGS', 'OF', 'THE', 'WORLD', 'AND', 'APPLYING', 'THEM', 'TO', 'THE', 'USE', 'OF', 'MEN'] +2961-961-0018-538: ref=['THE', 'MOST', 'FAMOUS', 'OF', 'THEM', 'ALL', 'WAS', 'THE', 'OVERTHROW', 'OF', 'THE', 'ISLAND', 'OF', 'ATLANTIS'] +2961-961-0018-538: hyp=['THE', 'MOST', 'FAME', 'AS', 'OF', 'THEM', 'ALL', 'WAS', 'THE', 'OVERTHROW', 'OF', 'THE', 'ISLAND', 'OF', 'ATLANTIS'] +2961-961-0019-539: ref=['FOR', 'AT', 'THE', 'PERIL', 'OF', 'HER', 'OWN', 'EXISTENCE', 'AND', 'WHEN', 'THE', 'OTHER', 'HELLENES', 'HAD', 'DESERTED', 'HER', 'SHE', 'REPELLED', 'THE', 'INVADER', 'AND', 'OF', 'HER', 'OWN', 'ACCORD', 'GAVE', 'LIBERTY', 'TO', 'ALL', 'THE', 'NATIONS', 'WITHIN', 'THE', 'PILLARS'] +2961-961-0019-539: hyp=['FOR', 'AT', 'THE', 'PERIL', 'OF', 'HER', 'OWN', 'EXISTENCE', 'AND', 'WHEN', 'THE', 'OTTER', 'HELLENES', 'HAD', 'DESERTED', 'HER', 'SHE', 'REPELLED', 'INVADER', 'AND', 'OF', 'HER', 'OWN', 'ACCORD', 'GAVE', 'LIBERTY', 'TO', 'ALL', 'THE', 'NATIONS', 'WITHIN', 'THE', 'PILLARS'] +2961-961-0020-540: ref=['THIS', 'IS', 'THE', 'EXPLANATION', 'OF', 'THE', 'SHALLOWS', 'WHICH', 'ARE', 'FOUND', 'IN', 'THAT', 'PART', 'OF', 'THE', 'ATLANTIC', 'OCEAN'] +2961-961-0020-540: hyp=['THIS', 'IS', 'THE', 'EXPLANATION', 'OF', 'THE', 'SHALLOWS', 'WHICH', 'ARE', 'FOUND', 'IN', 'THAT', 'PART', 'OF', 'THE', 'ATLANTIC', 'OCEAN'] +2961-961-0021-541: ref=['BUT', 'I', 'WOULD', 'NOT', 'SPEAK', 'AT', 'THE', 'TIME', 'BECAUSE', 'I', 'WANTED', 'TO', 'REFRESH', 'MY', 'MEMORY'] +2961-961-0021-541: hyp=['BUT', 'I', 'WOULD', 'NOT', 'SPEAK', 'AT', 'THE', 'TIME', 'BECAUSE', 'I', 'WANTED', 'TO', 'REFRESH', 'MY', 'MEMORY'] +2961-961-0022-542: ref=['THEN', 'NOW', 'LET', 'ME', 'EXPLAIN', 'TO', 'YOU', 'THE', 'ORDER', 'OF', 'OUR', 'ENTERTAINMENT', 'FIRST', 'TIMAEUS', 'WHO', 'IS', 'A', 'NATURAL', 'PHILOSOPHER', 'WILL', 'SPEAK', 'OF', 'THE', 'ORIGIN', 'OF', 'THE', 'WORLD', 'GOING', 'DOWN', 'TO', 'THE', 'CREATION', 'OF', 'MAN', 'AND', 'THEN', 'I', 'SHALL', 'RECEIVE', 'THE', 'MEN', 'WHOM', 'HE', 'HAS', 'CREATED', 'AND', 'SOME', 'OF', 'WHOM', 'WILL', 'HAVE', 'BEEN', 'EDUCATED', 'BY', 'YOU', 'AND', 'INTRODUCE', 'THEM', 'TO', 'YOU', 'AS', 'THE', 'LOST', 'ATHENIAN', 'CITIZENS', 'OF', 'WHOM', 'THE', 'EGYPTIAN', 'RECORD', 'SPOKE'] +2961-961-0022-542: hyp=['THEN', 'THOU', 'LET', 'ME', 'EXPLAIN', 'TO', 'YOU', 'THE', 'ORDER', 'OF', 'OUR', 'ENTERTAINMENT', 'FIRST', 'TIMAS', 'WHO', 'IS', 'A', 'NATURAL', 'PHILOSOPHER', 'WILL', 'SPEAK', 'OF', 'THE', 'ORIGIN', 'OF', 'THE', 'WORLD', 'GOING', 'DOWN', 'TO', 'THE', 'CREATION', 'OF', 'MEN', 'AND', 'THEN', 'I', 'SHALL', 'RECEIVE', 'THE', 'MEN', 'WHOM', 'HE', 'HAS', 'CREATED', 'AND', 'SOME', 'OF', 'WHOM', 'WILL', 'HAVE', 'BEEN', 'EDUCATED', 'BY', 'YOU', 'AND', 'INTRODUCE', 'THEM', 'TO', 'YOU', 'AS', 'THE', 'LOST', 'ATHENIAN', 'CITIZENS', 'OF', 'WHOM', 'THE', 'EGYPTIAN', 'RECORDS', 'SPOKE'] +3570-5694-0000-2433: ref=['BUT', 'ALREADY', 'AT', 'A', 'POINT', 'IN', 'ECONOMIC', 'EVOLUTION', 'FAR', 'ANTEDATING', 'THE', 'EMERGENCE', 'OF', 'THE', 'LADY', 'SPECIALISED', 'CONSUMPTION', 'OF', 'GOODS', 'AS', 'AN', 'EVIDENCE', 'OF', 'PECUNIARY', 'STRENGTH', 'HAD', 'BEGUN', 'TO', 'WORK', 'OUT', 'IN', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM'] +3570-5694-0000-2433: hyp=['BETTER', 'ALREADY', 'AT', 'A', 'POINT', 'IN', 'ECONOMIC', 'EVOLUTION', 'FAR', 'ANTIDATING', 'THE', 'EMERGENCE', 'OF', 'THE', 'LADY', 'SPECIALIZED', 'CONSUMPTION', 'OF', 'GOODS', 'AS', 'AN', 'EVIDENCE', 'OF', 'PECUNIARY', 'STRENGTH', 'HAD', 'BEGUN', 'TO', 'WORK', 'OUT', 'IN', 'A', 'MORE', 'OR', 'LESS', 'CELEBRATE', 'SYSTEM'] +3570-5694-0001-2434: ref=['THE', 'UTILITY', 'OF', 'CONSUMPTION', 'AS', 'AN', 'EVIDENCE', 'OF', 'WEALTH', 'IS', 'TO', 'BE', 'CLASSED', 'AS', 'A', 'DERIVATIVE', 'GROWTH'] +3570-5694-0001-2434: hyp=['THEATILITY', 'OF', 'CONSUMPTION', 'AS', 'AN', 'EVIDENCE', 'OF', 'WEALTH', 'IS', 'TO', 'BE', 'CLASSED', 'AS', 'A', 'DERIVATIVE', 'GROWTH'] +3570-5694-0002-2435: ref=['SUCH', 'CONSUMPTION', 'AS', 'FALLS', 'TO', 'THE', 'WOMEN', 'IS', 'MERELY', 'INCIDENTAL', 'TO', 'THEIR', 'WORK', 'IT', 'IS', 'A', 'MEANS', 'TO', 'THEIR', 'CONTINUED', 'LABOUR', 'AND', 'NOT', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THEIR', 'OWN', 'COMFORT', 'AND', 'FULNESS', 'OF', 'LIFE'] +3570-5694-0002-2435: hyp=['SUCH', 'CONSUMPTION', 'AS', 'FALLS', 'THROUGH', 'THE', 'WOMEN', 'IS', 'MERELY', 'INCIDENTAL', 'TO', 'THEIR', 'WORK', 'IT', 'IS', 'A', 'MEANS', 'TO', 'THEIR', 'CONTINUED', 'LABOR', 'AND', 'NOT', 'TO', 'CONSUMPTION', 'DIRECTED', 'TO', 'THEIR', 'OWN', 'COMFORT', 'AND', 'FULLNESS', 'OF', 'LIFE'] +3570-5694-0003-2436: ref=['WITH', 'A', 'FURTHER', 'ADVANCE', 'IN', 'CULTURE', 'THIS', 'TABU', 'MAY', 'CHANGE', 'INTO', 'SIMPLE', 'CUSTOM', 'OF', 'A', 'MORE', 'OR', 'LESS', 'RIGOROUS', 'CHARACTER', 'BUT', 'WHATEVER', 'BE', 'THE', 'THEORETICAL', 'BASIS', 'OF', 'THE', 'DISTINCTION', 'WHICH', 'IS', 'MAINTAINED', 'WHETHER', 'IT', 'BE', 'A', 'TABU', 'OR', 'A', 'LARGER', 'CONVENTIONALITY', 'THE', 'FEATURES', 'OF', 'THE', 'CONVENTIONAL', 'SCHEME', 'OF', 'CONSUMPTION', 'DO', 'NOT', 'CHANGE', 'EASILY'] +3570-5694-0003-2436: hyp=['WITH', 'A', 'FURTHER', 'ADVANCE', 'IN', 'CULTURE', 'THIS', 'TABOO', 'MAY', 'CHANGED', 'INTO', 'SIMPLE', 'CUSTOM', 'OF', 'A', 'MORE', 'OR', 'LESS', 'RIGOROUS', 'CHARACTER', 'BUT', 'WHATEVER', 'BE', 'THE', 'THEORETICAL', 'BASIS', 'OF', 'THE', 'DISTINCTION', 'WHICH', 'IS', 'MAINTAINED', 'WHETHER', 'IT', 'BE', 'AT', 'A', 'BOO', 'OR', 'A', 'LARGER', 'CONVENTIONALITY', 'THE', 'FEATURES', 'OF', 'THE', 'CONVENTIONAL', 'SCHEME', 'OF', 'CONSUMPTION', 'DO', 'NOT', 'CHANGE', 'EASILY'] +3570-5694-0004-2437: ref=['IN', 'THE', 'NATURE', 'OF', 'THINGS', 'LUXURIES', 'AND', 'THE', 'COMFORTS', 'OF', 'LIFE', 'BELONG', 'TO', 'THE', 'LEISURE', 'CLASS'] +3570-5694-0004-2437: hyp=['IN', 'THE', 'NATURE', 'OF', 'THINGS', 'LUXURIES', 'AND', 'THE', 'COMFORTS', 'OF', 'LIFE', 'BELONG', 'TO', 'THE', 'LEISURE', 'CLASS'] +3570-5694-0005-2438: ref=['UNDER', 'THE', 'TABU', 'CERTAIN', 'VICTUALS', 'AND', 'MORE', 'PARTICULARLY', 'CERTAIN', 'BEVERAGES', 'ARE', 'STRICTLY', 'RESERVED', 'FOR', 'THE', 'USE', 'OF', 'THE', 'SUPERIOR', 'CLASS'] +3570-5694-0005-2438: hyp=['UNDER', 'THE', 'TABOO', 'CERTAIN', 'VICTUALS', 'AND', 'MORE', 'PARTICULARLY', 'CERTAIN', 'BEVERAGES', 'ARE', 'STRICTLY', 'RESERVED', 'FOR', 'THE', 'USE', 'OF', 'THE', 'SUPERIOR', 'CLASS'] +3570-5694-0006-2439: ref=['DRUNKENNESS', 'AND', 'THE', 'OTHER', 'PATHOLOGICAL', 'CONSEQUENCES', 'OF', 'THE', 'FREE', 'USE', 'OF', 'STIMULANTS', 'THEREFORE', 'TEND', 'IN', 'THEIR', 'TURN', 'TO', 'BECOME', 'HONORIFIC', 'AS', 'BEING', 'A', 'MARK', 'AT', 'THE', 'SECOND', 'REMOVE', 'OF', 'THE', 'SUPERIOR', 'STATUS', 'OF', 'THOSE', 'WHO', 'ARE', 'ABLE', 'TO', 'AFFORD', 'THE', 'INDULGENCE'] +3570-5694-0006-2439: hyp=['DRINKENNESS', 'AND', 'THE', 'OTHER', 'PATHOLOGICAL', 'CONSEQUENCES', 'OF', 'THE', 'FREE', 'USE', 'OF', 'STIMULANTS', 'THEREFORE', 'TEND', 'IN', 'THEIR', 'TURN', 'TO', 'BECOME', 'UNERRIFIC', 'AS', 'BEING', 'A', 'MARK', 'AT', 'THE', 'SECOND', 'REMOVE', 'OF', 'THE', 'SUPERIOR', 'STATUS', 'OF', 'THOSE', 'WHO', 'ARE', 'ABLE', 'TO', 'AFFORD', 'THE', 'INDULGENCE'] +3570-5694-0007-2440: ref=['IT', 'HAS', 'EVEN', 'HAPPENED', 'THAT', 'THE', 'NAME', 'FOR', 'CERTAIN', 'DISEASED', 'CONDITIONS', 'OF', 'THE', 'BODY', 'ARISING', 'FROM', 'SUCH', 'AN', 'ORIGIN', 'HAS', 'PASSED', 'INTO', 'EVERYDAY', 'SPEECH', 'AS', 'A', 'SYNONYM', 'FOR', 'NOBLE', 'OR', 'GENTLE'] +3570-5694-0007-2440: hyp=['IT', 'HAS', 'EVEN', 'HAPPENED', 'THAT', 'THE', 'NAME', 'FOR', 'CERTAIN', 'DISEASED', 'CONDITIONS', 'OF', 'THE', 'BODY', 'ARISING', 'FROM', 'SUCH', 'AN', 'ORIGIN', 'HAS', 'PASSED', 'INTO', 'EVERYDAY', 'SPEECH', 'AS', 'A', 'SYNONYM', 'FOR', 'NOBLE', 'OR', 'GENTLE'] +3570-5694-0008-2441: ref=['THE', 'CONSUMPTION', 'OF', 'LUXURIES', 'IN', 'THE', 'TRUE', 'SENSE', 'IS', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THE', 'COMFORT', 'OF', 'THE', 'CONSUMER', 'HIMSELF', 'AND', 'IS', 'THEREFORE', 'A', 'MARK', 'OF', 'THE', 'MASTER'] +3570-5694-0008-2441: hyp=['THE', 'CONSUMPTION', 'OF', 'LUXURIES', 'IN', 'THE', 'TRUE', 'SENSE', 'IS', 'A', 'CONSUMPTION', 'DIRECTED', 'TO', 'THE', 'COMFORT', 'OF', 'THE', 'CONSUMER', 'HIMSELF', 'AND', 'IS', 'THEREFORE', 'A', 'MARK', 'OF', 'THE', 'MASTER'] +3570-5694-0009-2442: ref=['WITH', 'MANY', 'QUALIFICATIONS', 'WITH', 'MORE', 'QUALIFICATIONS', 'AS', 'THE', 'PATRIARCHAL', 'TRADITION', 'HAS', 'GRADUALLY', 'WEAKENED', 'THE', 'GENERAL', 'RULE', 'IS', 'FELT', 'TO', 'BE', 'RIGHT', 'AND', 'BINDING', 'THAT', 'WOMEN', 'SHOULD', 'CONSUME', 'ONLY', 'FOR', 'THE', 'BENEFIT', 'OF', 'THEIR', 'MASTERS'] +3570-5694-0009-2442: hyp=['WITH', 'MANY', 'QUALIFICATIONS', 'WITH', 'MORE', 'QUALIFICATIONS', 'AS', 'THE', 'PATRIARCHAL', 'TRADITION', 'HAS', 'GRADUALLY', 'WEAKENED', 'THE', 'GENERAL', 'RULE', 'IS', 'FELT', 'TO', 'BE', 'RIGHT', 'AND', 'BINDING', 'THAT', 'WOMEN', 'SHOULD', 'CONSUME', 'ONLY', 'FOR', 'THE', 'BENEFIT', 'OF', 'THEIR', 'MASTERS'] +3570-5694-0010-2443: ref=['THE', 'OBJECTION', 'OF', 'COURSE', 'PRESENTS', 'ITSELF', 'THAT', 'EXPENDITURE', 'ON', "WOMEN'S", 'DRESS', 'AND', 'HOUSEHOLD', 'PARAPHERNALIA', 'IS', 'AN', 'OBVIOUS', 'EXCEPTION', 'TO', 'THIS', 'RULE', 'BUT', 'IT', 'WILL', 'APPEAR', 'IN', 'THE', 'SEQUEL', 'THAT', 'THIS', 'EXCEPTION', 'IS', 'MUCH', 'MORE', 'OBVIOUS', 'THAN', 'SUBSTANTIAL'] +3570-5694-0010-2443: hyp=['THE', 'OBJECTION', 'OF', 'COURSE', 'PRESENTS', 'ITSELF', 'THAT', 'EXPENDITURE', 'ON', "WOMEN'S", 'DRESS', 'AND', 'HOUSEHOLD', 'PARAPHERNALIA', 'IS', 'AN', 'OBVIOUS', 'EXCEPTION', 'TO', 'THIS', 'RULE', 'BUT', 'IT', 'WILL', 'APPEAR', 'IN', 'THE', 'SEQUEL', 'THAT', 'THIS', 'EXCEPTION', 'IS', 'MUCH', 'MORE', 'OBVIOUS', 'THAN', 'SUBSTANTIAL'] +3570-5694-0011-2444: ref=['THE', 'CUSTOM', 'OF', 'FESTIVE', 'GATHERINGS', 'PROBABLY', 'ORIGINATED', 'IN', 'MOTIVES', 'OF', 'CONVIVIALITY', 'AND', 'RELIGION', 'THESE', 'MOTIVES', 'ARE', 'ALSO', 'PRESENT', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'BUT', 'THEY', 'DO', 'NOT', 'CONTINUE', 'TO', 'BE', 'THE', 'SOLE', 'MOTIVES'] +3570-5694-0011-2444: hyp=['THE', 'CUSTOM', 'OF', 'FESTIVE', 'GATHERINGS', 'PROBABLY', 'ORIGINATED', 'IN', 'MOTIVES', 'OF', 'CONVIVIALITY', 'AND', 'RELIGION', 'THESE', 'MOTIVES', 'ARE', 'ALSO', 'PRESENT', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'BUT', 'THEY', 'DO', 'NOT', 'CONTINUE', 'TO', 'BE', 'THE', 'SOLE', 'MOTIVES'] +3570-5694-0012-2445: ref=['THERE', 'IS', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM', 'OF', 'RANK', 'AND', 'GRADES'] +3570-5694-0012-2445: hyp=['THERE', 'IS', 'A', 'MORE', 'OR', 'LESS', 'ELABORATE', 'SYSTEM', 'OF', 'RANK', 'AND', 'GRATES'] +3570-5694-0013-2446: ref=['THIS', 'DIFFERENTIATION', 'IS', 'FURTHERED', 'BY', 'THE', 'INHERITANCE', 'OF', 'WEALTH', 'AND', 'THE', 'CONSEQUENT', 'INHERITANCE', 'OF', 'GENTILITY'] +3570-5694-0013-2446: hyp=['THIS', 'DIFFERENTIATION', 'IS', 'FURTHERED', 'BY', 'THE', 'INHERITANCE', 'OF', 'WEALTH', 'AND', 'THE', 'CONSEQUENT', 'INHERITANCE', 'OF', 'GENTILITY'] +3570-5694-0014-2447: ref=['MANY', 'OF', 'THESE', 'AFFILIATED', 'GENTLEMEN', 'OF', 'LEISURE', 'ARE', 'AT', 'THE', 'SAME', 'TIME', 'LESSER', 'MEN', 'OF', 'SUBSTANCE', 'IN', 'THEIR', 'OWN', 'RIGHT', 'SO', 'THAT', 'SOME', 'OF', 'THEM', 'ARE', 'SCARCELY', 'AT', 'ALL', 'OTHERS', 'ONLY', 'PARTIALLY', 'TO', 'BE', 'RATED', 'AS', 'VICARIOUS', 'CONSUMERS'] +3570-5694-0014-2447: hyp=['MANY', 'OF', 'THESE', 'HAVE', 'FILLIOTTED', 'GENTLEMEN', 'OF', 'LEISURE', 'ARE', 'AT', 'THE', 'SAME', 'TIME', 'LESS', 'AMEN', 'OF', 'SUBSTANCE', 'IN', 'THEIR', 'OWN', 'RIGHT', 'SO', 'THAT', 'SOME', 'OF', 'THEM', 'ARE', 'SCARCELY', 'AT', 'ALL', 'OTHERS', 'ONLY', 'PARTIALLY', 'TO', 'BE', 'RATED', 'AS', 'VICARIOUS', 'CONSUMERS'] +3570-5694-0015-2448: ref=['SO', 'MANY', 'OF', 'THEM', 'HOWEVER', 'AS', 'MAKE', 'UP', 'THE', 'RETAINER', 'AND', 'HANGERS', 'ON', 'OF', 'THE', 'PATRON', 'MAY', 'BE', 'CLASSED', 'AS', 'VICARIOUS', 'CONSUMER', 'WITHOUT', 'QUALIFICATION'] +3570-5694-0015-2448: hyp=['SO', 'MANY', 'OF', 'THEM', 'HOWEVER', 'AS', 'MAKE', 'UP', 'THE', 'RETAINER', 'AND', 'HANGERS', 'ON', 'OF', 'THE', 'PATRON', 'MAY', 'BE', 'CLASSED', 'AS', 'VICARIOUS', 'CONSUMER', 'WITHOUT', 'QUALIFICATION'] +3570-5694-0016-2449: ref=['MANY', 'OF', 'THESE', 'AGAIN', 'AND', 'ALSO', 'MANY', 'OF', 'THE', 'OTHER', 'ARISTOCRACY', 'OF', 'LESS', 'DEGREE', 'HAVE', 'IN', 'TURN', 'ATTACHED', 'TO', 'THEIR', 'PERSONS', 'A', 'MORE', 'OR', 'LESS', 'COMPREHENSIVE', 'GROUP', 'OF', 'VICARIOUS', 'CONSUMER', 'IN', 'THE', 'PERSONS', 'OF', 'THEIR', 'WIVES', 'AND', 'CHILDREN', 'THEIR', 'SERVANTS', 'RETAINERS', 'ET', 'CETERA'] +3570-5694-0016-2449: hyp=['MANY', 'OF', 'THESE', 'AGAIN', 'AND', 'ALSO', 'MANY', 'OF', 'THE', 'OTHER', 'ARISTOCRACY', 'OF', 'LESS', 'DEGREE', 'HAVE', 'IN', 'TURN', 'ATTACHED', 'TO', 'THEIR', 'PERSONS', 'A', 'MORE', 'OR', 'LESS', 'COMPREHENSIVE', 'GROUP', 'OF', 'VICARIOUS', 'CONSUMER', 'IN', 'THE', 'PERSONS', 'OF', 'THEIR', 'WIVES', 'AND', 'CHILDREN', 'THEIR', 'SERVANTS', 'RETAINERS', 'ET', 'CETERA'] +3570-5694-0017-2450: ref=['THE', 'WEARING', 'OF', 'UNIFORMS', 'OR', 'LIVERIES', 'IMPLIES', 'A', 'CONSIDERABLE', 'DEGREE', 'OF', 'DEPENDENCE', 'AND', 'MAY', 'EVEN', 'BE', 'SAID', 'TO', 'BE', 'A', 'MARK', 'OF', 'SERVITUDE', 'REAL', 'OR', 'OSTENSIBLE'] +3570-5694-0017-2450: hyp=['THE', 'WEARING', 'OF', 'UNIFORMS', 'A', 'LIVERIES', 'IMPLIES', 'A', 'CONSIDERABLE', 'DEGREE', 'OF', 'DEPENDENCE', 'AND', 'MAY', 'EVEN', 'BE', 'SAID', 'TO', 'BE', 'A', 'MARK', 'OF', 'SERVITUDE', 'REAL', 'OR', 'OSTENSIBLE'] +3570-5694-0018-2451: ref=['THE', 'WEARERS', 'OF', 'UNIFORMS', 'AND', 'LIVERIES', 'MAY', 'BE', 'ROUGHLY', 'DIVIDED', 'INTO', 'TWO', 'CLASSES', 'THE', 'FREE', 'AND', 'THE', 'SERVILE', 'OR', 'THE', 'NOBLE', 'AND', 'THE', 'IGNOBLE'] +3570-5694-0018-2451: hyp=['THE', 'WEARERS', 'OF', 'UNIFORMS', 'AND', 'LIVERIES', 'MAY', 'BE', 'ROUGHLY', 'DIVIDED', 'INTO', 'TWO', 'CLASSES', 'THE', 'FREE', 'AND', 'THE', 'SERVILE', 'OR', 'THE', 'NOBLE', 'AND', 'THE', 'IGNOBLE'] +3570-5694-0019-2452: ref=['BUT', 'THE', 'GENERAL', 'DISTINCTION', 'IS', 'NOT', 'ON', 'THAT', 'ACCOUNT', 'TO', 'BE', 'OVERLOOKED'] +3570-5694-0019-2452: hyp=['BUT', 'THE', 'GENERAL', 'DISTINCTION', 'IS', 'NOT', 'ON', 'THAT', 'ACCOUNT', 'TO', 'BE', 'OVERLOOKED'] +3570-5694-0020-2453: ref=['SO', 'THOSE', 'OFFICES', 'WHICH', 'ARE', 'BY', 'RIGHT', 'THE', 'PROPER', 'EMPLOYMENT', 'OF', 'THE', 'LEISURE', 'CLASS', 'ARE', 'NOBLE', 'SUCH', 'AS', 'GOVERNMENT', 'FIGHTING', 'HUNTING', 'THE', 'CARE', 'OF', 'ARMS', 'AND', 'ACCOUTREMENTS', 'AND', 'THE', 'LIKE', 'IN', 'SHORT', 'THOSE', 'WHICH', 'MAY', 'BE', 'CLASSED', 'AS', 'OSTENSIBLY', 'PREDATORY', 'EMPLOYMENTS'] +3570-5694-0020-2453: hyp=['SO', 'THOSE', 'OFFICERS', 'WHICH', 'ARE', 'BY', 'RIGHT', 'THE', 'PROPER', 'EMPLOYMENT', 'OF', 'THE', 'LEISURE', 'CLASS', 'ARE', 'NOBLE', 'SUCH', 'AS', 'GOVERNMENT', 'FIGHTING', 'HUNTING', 'THE', 'CARE', 'OF', 'ARMS', 'AND', 'ACCOUTREMENTS', 'AND', 'THE', 'LIKE', 'IN', 'SHORT', 'THOSE', 'WHICH', 'MAY', 'BE', 'CLASSED', 'AS', 'OSTENSIBLY', 'PREDATORY', 'EMPLOYMENTS'] +3570-5694-0021-2454: ref=['WHENEVER', 'AS', 'IN', 'THESE', 'CASES', 'THE', 'MENIAL', 'SERVICE', 'IN', 'QUESTION', 'HAS', 'TO', 'DO', 'DIRECTLY', 'WITH', 'THE', 'PRIMARY', 'LEISURE', 'EMPLOYMENTS', 'OF', 'FIGHTING', 'AND', 'HUNTING', 'IT', 'EASILY', 'ACQUIRES', 'A', 'REFLECTED', 'HONORIFIC', 'CHARACTER'] +3570-5694-0021-2454: hyp=['WHENEVER', 'AS', 'IN', 'THESE', 'CASES', 'THE', 'MENIAL', 'SERVICE', 'IN', 'QUESTION', 'HAS', 'TO', 'DO', 'DIRECTLY', 'WITH', 'A', 'PRIMARY', 'LEISURE', 'EMPLOYMENTS', 'OF', 'FIGHTING', 'AND', 'HUNTING', 'IT', 'EASILY', 'ACQUIRES', 'A', 'REFLECTED', 'HONORIFIC', 'CHARACTER'] +3570-5694-0022-2455: ref=['THE', 'LIVERY', 'BECOMES', 'OBNOXIOUS', 'TO', 'NEARLY', 'ALL', 'WHO', 'ARE', 'REQUIRED', 'TO', 'WEAR', 'IT'] +3570-5694-0022-2455: hyp=['THE', 'LIVERY', 'BECOMES', 'OBNOXIOUS', 'TO', 'NEARLY', 'ALL', 'WHO', 'ARE', 'REQUIRED', 'TO', 'WEAR', 'IT'] +3570-5695-0000-2456: ref=['IN', 'A', 'GENERAL', 'WAY', 'THOUGH', 'NOT', 'WHOLLY', 'NOR', 'CONSISTENTLY', 'THESE', 'TWO', 'GROUPS', 'COINCIDE'] +3570-5695-0000-2456: hyp=['AND', 'A', 'GENERAL', 'WAY', 'THOUGH', 'NOT', 'WHOLLY', 'NOR', 'CONSISTENTLY', 'THESE', 'TWO', 'GROUPS', 'COINCIDE'] +3570-5695-0001-2457: ref=['THE', 'DEPENDENT', 'WHO', 'WAS', 'FIRST', 'DELEGATED', 'FOR', 'THESE', 'DUTIES', 'WAS', 'THE', 'WIFE', 'OR', 'THE', 'CHIEF', 'WIFE', 'AND', 'AS', 'WOULD', 'BE', 'EXPECTED', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'OF', 'THE', 'INSTITUTION', 'WHEN', 'THE', 'NUMBER', 'OF', 'PERSONS', 'BY', 'WHOM', 'THESE', 'DUTIES', 'ARE', 'CUSTOMARILY', 'PERFORMED', 'GRADUALLY', 'NARROWS', 'THE', 'WIFE', 'REMAINS', 'THE', 'LAST'] +3570-5695-0001-2457: hyp=['THE', 'DEPENDENT', 'WHO', 'WAS', 'FIRST', 'DELEGATED', 'FOR', 'THESE', 'DUTIES', 'WAS', 'THE', 'WIFE', 'OR', 'THE', 'CHIEF', 'WIFE', 'AND', 'AS', 'WOULD', 'BE', 'EXPECTED', 'IN', 'THE', 'LATER', 'DEVELOPMENT', 'OF', 'THE', 'INSTITUTION', 'WHEN', 'THE', 'NUMBER', 'OF', 'PERSONS', 'BY', 'WHOM', 'THESE', 'DUTIES', 'ARE', 'CUSTOMARY', 'PERFORMED', 'GRADUAL', 'AND', 'NARROWS', 'THE', 'WIFE', 'REMAINS', 'THE', 'LAST'] +3570-5695-0002-2458: ref=['BUT', 'AS', 'WE', 'DESCEND', 'THE', 'SOCIAL', 'SCALE', 'THE', 'POINT', 'IS', 'PRESENTLY', 'REACHED', 'WHERE', 'THE', 'DUTIES', 'OF', 'VICARIOUS', 'LEISURE', 'AND', 'CONSUMPTION', 'DEVOLVE', 'UPON', 'THE', 'WIFE', 'ALONE'] +3570-5695-0002-2458: hyp=['BUT', 'AS', 'WE', 'DESCEND', 'THE', 'SOCIAL', 'SCALE', 'THE', 'POINT', 'IS', 'PRESENTLY', 'REACHED', 'WHERE', 'THE', 'DUTIES', 'OF', 'YCARIOUS', 'LEISURE', 'AND', 'CONSUMPTION', 'DEVOLVE', 'UPON', 'THE', 'WIFE', 'ALONE'] +3570-5695-0003-2459: ref=['IN', 'THE', 'COMMUNITIES', 'OF', 'THE', 'WESTERN', 'CULTURE', 'THIS', 'POINT', 'IS', 'AT', 'PRESENT', 'FOUND', 'AMONG', 'THE', 'LOWER', 'MIDDLE', 'CLASS'] +3570-5695-0003-2459: hyp=['IN', 'THE', 'COMMUNITIES', 'OF', 'THE', 'WESTERN', 'CULTURE', 'THIS', 'POINT', 'IS', 'AT', 'PRESENT', 'FOUND', 'AMONG', 'THE', 'LOWER', 'MIDDLE', 'CLASS'] +3570-5695-0004-2460: ref=['IF', 'BEAUTY', 'OR', 'COMFORT', 'IS', 'ACHIEVED', 'AND', 'IT', 'IS', 'A', 'MORE', 'OR', 'LESS', 'FORTUITOUS', 'CIRCUMSTANCE', 'IF', 'THEY', 'ARE', 'THEY', 'MUST', 'BE', 'ACHIEVED', 'BY', 'MEANS', 'AND', 'METHODS', 'THAT', 'COMMEND', 'THEMSELVES', 'TO', 'THE', 'GREAT', 'ECONOMIC', 'LAW', 'OF', 'WASTED', 'EFFORT'] +3570-5695-0004-2460: hyp=['IF', 'BEAUTY', 'OR', 'COMFORT', 'IS', 'ACHIEVED', 'AND', 'IT', 'IS', 'A', 'MORE', 'OR', 'LESS', 'FORTUITOUS', 'CIRCUMSTANCE', 'IF', 'THEY', 'ARE', 'THEY', 'MUST', 'BE', 'ACHIEVED', 'BY', 'MEANS', 'AND', 'METHODS', 'THAT', 'COMMEND', 'THEMSELVES', 'TO', 'THE', 'GREAT', 'ECONOMIC', 'LAW', 'OF', 'WASTED', 'EFFORT'] +3570-5695-0005-2461: ref=['THE', 'MAN', 'OF', 'THE', 'HOUSEHOLD', 'ALSO', 'CAN', 'DO', 'SOMETHING', 'IN', 'THIS', 'DIRECTION', 'AND', 'INDEED', 'HE', 'COMMONLY', 'DOES', 'BUT', 'WITH', 'A', 'STILL', 'LOWER', 'DESCENT', 'INTO', 'THE', 'LEVELS', 'OF', 'INDIGENCE', 'ALONG', 'THE', 'MARGIN', 'OF', 'THE', 'SLUMS', 'THE', 'MAN', 'AND', 'PRESENTLY', 'ALSO', 'THE', 'CHILDREN', 'VIRTUALLY', 'CEASE', 'TO', 'CONSUME', 'VALUABLE', 'GOODS', 'FOR', 'APPEARANCES', 'AND', 'THE', 'WOMAN', 'REMAINS', 'VIRTUALLY', 'THE', 'SOLE', 'EXPONENT', 'OF', 'THE', "HOUSEHOLD'S", 'PECUNIARY', 'DECENCY'] +3570-5695-0005-2461: hyp=['THE', 'MAN', 'OF', 'THE', 'HOUSEHOLD', 'ALSO', 'CAN', 'DO', 'SOMETHING', 'IN', 'THIS', 'DIRECTION', 'AND', 'INDEED', 'HE', 'COMMONLY', 'DOES', 'BUT', 'WITH', 'A', 'STILL', 'LOWER', 'DISSENT', 'INTO', 'THE', 'LEVELS', 'OF', 'INDIGENCE', 'ALONG', 'THE', 'MARGIN', 'OF', 'THE', 'SLUMS', 'THE', 'MAN', 'AND', 'PRESENTLY', 'ALSO', 'THE', 'CHILDREN', 'VIRTUALLY', 'SEIZED', 'TO', 'CONSUME', 'VALUABLE', 'GOODS', 'FOR', 'APPEARANCES', 'AND', 'THE', 'WOMAN', 'REMAINS', 'VIRTUALLY', 'THE', 'SOLE', 'EXPONENT', 'OF', 'THE', "HOUSEHOLD'S", 'PECUNIARY', 'DECENCY'] +3570-5695-0006-2462: ref=['VERY', 'MUCH', 'OF', 'SQUALOR', 'AND', 'DISCOMFORT', 'WILL', 'BE', 'ENDURED', 'BEFORE', 'THE', 'LAST', 'TRINKET', 'OR', 'THE', 'LAST', 'PRETENSE', 'OF', 'PECUNIARY', 'DECENCY', 'IS', 'PUT', 'AWAY'] +3570-5695-0006-2462: hyp=['VERY', 'MUCH', 'OF', 'SQUALOR', 'AND', 'DISCOMFORT', 'WILL', 'BE', 'ENDURED', 'BEFORE', 'THE', 'LAST', 'TRINKET', 'OR', 'THE', 'LAST', 'PRETENCE', 'OF', 'PECUNIARY', 'DECENCIES', 'PUT', 'AWAY'] +3570-5695-0007-2463: ref=['THERE', 'IS', 'NO', 'CLASS', 'AND', 'NO', 'COUNTRY', 'THAT', 'HAS', 'YIELDED', 'SO', 'ABJECTLY', 'BEFORE', 'THE', 'PRESSURE', 'OF', 'PHYSICAL', 'WANT', 'AS', 'TO', 'DENY', 'THEMSELVES', 'ALL', 'GRATIFICATION', 'OF', 'THIS', 'HIGHER', 'OR', 'SPIRITUAL', 'NEED'] +3570-5695-0007-2463: hyp=['THERE', 'IS', 'NO', 'CLASS', 'AND', 'NO', 'COUNTRY', 'THAT', 'HAS', 'YIELDED', 'SO', 'OBJECTLY', 'BEFORE', 'THE', 'PRESSURE', 'OF', 'PHYSICAL', 'WANT', 'AS', 'TO', 'DENY', 'THEMSELVES', 'ALL', 'GRATIFICATION', 'OF', 'THIS', 'HIGHER', 'OR', 'SPIRITUAL', 'NEED'] +3570-5695-0008-2464: ref=['THE', 'QUESTION', 'IS', 'WHICH', 'OF', 'THE', 'TWO', 'METHODS', 'WILL', 'MOST', 'EFFECTIVELY', 'REACH', 'THE', 'PERSONS', 'WHOSE', 'CONVICTIONS', 'IT', 'IS', 'DESIRED', 'TO', 'AFFECT'] +3570-5695-0008-2464: hyp=['THE', 'QUESTION', 'IS', 'WHICH', 'OF', 'THE', 'TWO', 'METHODS', 'WILL', 'MOST', 'EFFECTIVELY', 'REACH', 'THE', 'PERSONS', 'WHOSE', 'CONVICTIONS', 'IT', 'IS', 'DESIRED', 'TO', 'EFFECT'] +3570-5695-0009-2465: ref=['EACH', 'WILL', 'THEREFORE', 'SERVE', 'ABOUT', 'EQUALLY', 'WELL', 'DURING', 'THE', 'EARLIER', 'STAGES', 'OF', 'SOCIAL', 'GROWTH'] +3570-5695-0009-2465: hyp=['EACH', 'WILL', 'THEREFORE', 'SERVE', 'ABOUT', 'EQUALLY', 'WELL', 'DURING', 'THE', 'EARLIER', 'STAGES', 'OF', 'SOCIAL', 'GROWTH'] +3570-5695-0010-2466: ref=['THE', 'MODERN', 'ORGANIZATION', 'OF', 'INDUSTRY', 'WORKS', 'IN', 'THE', 'SAME', 'DIRECTION', 'ALSO', 'BY', 'ANOTHER', 'LINE'] +3570-5695-0010-2466: hyp=['THE', 'MODERN', 'ORGANIZATION', 'OF', 'INDUSTRY', 'WORKS', 'IN', 'THE', 'SAME', 'DIRECTION', 'ALSO', 'BY', 'ANOTHER', 'LINE'] +3570-5695-0011-2467: ref=['IT', 'IS', 'EVIDENT', 'THEREFORE', 'THAT', 'THE', 'PRESENT', 'TREND', 'OF', 'THE', 'DEVELOPMENT', 'IS', 'IN', 'THE', 'DIRECTION', 'OF', 'HEIGHTENING', 'THE', 'UTILITY', 'OF', 'CONSPICUOUS', 'CONSUMPTION', 'AS', 'COMPARED', 'WITH', 'LEISURE'] +3570-5695-0011-2467: hyp=['IT', 'IS', 'EVIDENT', 'THEREFORE', 'THAT', 'THE', 'PRESENT', 'TREND', 'OF', 'THE', 'DEVELOPMENT', 'IS', 'IN', 'THE', 'DIRECTION', 'OF', 'HEIGHTENING', 'THE', 'UTILITY', 'OF', 'CONSPICUOUS', 'CONSUMPTION', 'AS', 'COMPARED', 'WITH', 'LEISURE'] +3570-5695-0012-2468: ref=['IT', 'IS', 'ALSO', 'NOTICEABLE', 'THAT', 'THE', 'SERVICEABILITY', 'OF', 'CONSUMPTION', 'AS', 'A', 'MEANS', 'OF', 'REPUTE', 'AS', 'WELL', 'AS', 'THE', 'INSISTENCE', 'ON', 'IT', 'AS', 'AN', 'ELEMENT', 'OF', 'DECENCY', 'IS', 'AT', 'ITS', 'BEST', 'IN', 'THOSE', 'PORTIONS', 'OF', 'THE', 'COMMUNITY', 'WHERE', 'THE', 'HUMAN', 'CONTACT', 'OF', 'THE', 'INDIVIDUAL', 'IS', 'WIDEST', 'AND', 'THE', 'MOBILITY', 'OF', 'THE', 'POPULATION', 'IS', 'GREATEST'] +3570-5695-0012-2468: hyp=['IT', 'IS', 'ALSO', 'NOTICEABLE', 'THAT', 'THE', 'SURFABILITY', 'OF', 'CONSUMPTION', 'AS', 'A', 'MEANS', 'OF', 'REPUTE', 'AS', 'WELL', 'AS', 'THE', 'INSISTENCE', 'ON', 'IT', 'AS', 'AN', 'ELEMENT', 'OF', 'DECENCY', 'IS', 'AT', 'ITS', 'BEST', 'IN', 'THOSE', 'PORTIONS', 'OF', 'THE', 'COMMUNITY', 'WHERE', 'THE', 'HUMAN', 'CONTACT', 'OF', 'THE', 'INDIVIDUAL', 'IS', 'WIDEST', 'AND', 'THE', 'MOBILITY', 'OF', 'THE', 'POPULATION', 'IS', 'GREATEST'] +3570-5695-0013-2469: ref=['CONSUMPTION', 'BECOMES', 'A', 'LARGER', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'LIVING', 'IN', 'THE', 'CITY', 'THAN', 'IN', 'THE', 'COUNTRY'] +3570-5695-0013-2469: hyp=['CONSUMPTION', 'BECOMES', 'A', 'LARGER', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'LIVING', 'IN', 'THE', 'CITY', 'THAN', 'IN', 'THE', 'COUNTRY'] +3570-5695-0014-2470: ref=['AMONG', 'THE', 'COUNTRY', 'POPULATION', 'ITS', 'PLACE', 'IS', 'TO', 'SOME', 'EXTENT', 'TAKEN', 'BY', 'SAVINGS', 'AND', 'HOME', 'COMFORTS', 'KNOWN', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'NEIGHBORHOOD', 'GOSSIP', 'SUFFICIENTLY', 'TO', 'SERVE', 'THE', 'LIKE', 'GENERAL', 'PURPOSE', 'OF', 'PECUNIARY', 'REPUTE'] +3570-5695-0014-2470: hyp=['AMONG', 'THE', 'COUNTRY', 'POPULATION', 'ITS', 'PLACES', 'TO', 'SOME', 'EXTENT', 'TAKEN', 'BY', 'SAVINGS', 'AND', 'HOME', 'COMFORTS', 'KNOWN', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'NEIGHBOURHOOD', 'GOSSIPS', 'SUFFICIENTLY', 'TO', 'SERVE', 'THE', 'LIKE', 'GENERAL', 'PURPOSE', 'OF', 'PECUNIARY', 'REPUTE'] +3570-5695-0015-2471: ref=['THE', 'RESULT', 'IS', 'A', 'GREAT', 'MOBILITY', 'OF', 'THE', 'LABOR', 'EMPLOYED', 'IN', 'PRINTING', 'PERHAPS', 'GREATER', 'THAN', 'IN', 'ANY', 'OTHER', 'EQUALLY', 'WELL', 'DEFINED', 'AND', 'CONSIDERABLE', 'BODY', 'OF', 'WORKMEN'] +3570-5695-0015-2471: hyp=['THE', 'RESULT', 'IS', 'A', 'GREAT', 'MOBILITY', 'OF', 'THE', 'LABOR', 'EMPLOYED', 'IN', 'PRINTING', 'PERHAPS', 'GREATER', 'THAN', 'IN', 'ANY', 'OTHER', 'EQUALLY', 'WELL', 'DEFINED', 'AND', 'CONSIDERABLE', 'BODY', 'OF', 'WORKMEN'] +3570-5696-0000-2472: ref=['UNDER', 'THE', 'SIMPLE', 'TEST', 'OF', 'EFFECTIVENESS', 'FOR', 'ADVERTISING', 'WE', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'LEISURE', 'AND', 'THE', 'CONSPICUOUS', 'CONSUMPTION', 'OF', 'GOODS', 'DIVIDING', 'THE', 'FIELD', 'OF', 'PECUNIARY', 'EMULATION', 'PRETTY', 'EVENLY', 'BETWEEN', 'THEM', 'AT', 'THE', 'OUTSET'] +3570-5696-0000-2472: hyp=['UNDER', 'THE', 'SIMPLE', 'TEST', 'OF', 'EFFECTIVENESS', 'FOR', 'ADVERTISING', 'WE', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'LEISURE', 'AND', 'THE', 'CONSPICUOUS', 'CONSUMPTION', 'OF', 'GOODS', 'DIVIDING', 'THE', 'FIELD', 'OF', 'PECUNIARY', 'EMULATION', 'PRETTY', 'EVENLY', 'BETWEEN', 'THEM', 'AT', 'THE', 'OUTSET'] +3570-5696-0001-2473: ref=['BUT', 'THE', 'ACTUAL', 'COURSE', 'OF', 'DEVELOPMENT', 'HAS', 'BEEN', 'SOMEWHAT', 'DIFFERENT', 'FROM', 'THIS', 'IDEAL', 'SCHEME', 'LEISURE', 'HELD', 'THE', 'FIRST', 'PLACE', 'AT', 'THE', 'START', 'AND', 'CAME', 'TO', 'HOLD', 'A', 'RANK', 'VERY', 'MUCH', 'ABOVE', 'WASTEFUL', 'CONSUMPTION', 'OF', 'GOODS', 'BOTH', 'AS', 'A', 'DIRECT', 'EXPONENT', 'OF', 'WEALTH', 'AND', 'AS', 'AN', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'DECENCY', 'DURING', 'THE', 'QUASI', 'PEACEABLE', 'CULTURE'] +3570-5696-0001-2473: hyp=['BUT', 'THE', 'ACTUAL', 'COURSE', 'OF', 'DEVELOPMENT', 'HAS', 'BEEN', 'SOMEWHAT', 'DIFFERENT', 'FROM', 'THIS', 'IDEAL', 'SCHEME', 'LEISURE', 'HELD', 'THE', 'FIRST', 'PLACE', 'AT', 'THE', 'START', 'AND', 'CAME', 'TO', 'ALL', 'THE', 'RANK', 'VEREMENT', 'ABOVE', 'WASTEFUL', 'CONSUMPTION', 'OF', 'GOODS', 'BOTH', 'AS', 'A', 'DIRECT', 'EXPONENT', 'OF', 'WEALTH', 'AND', 'AS', 'AN', 'ELEMENT', 'IN', 'THE', 'STANDARD', 'OF', 'DECENCY', 'DURING', 'THE', 'COURSE', 'I', 'PEACEABLE', 'CULTURE'] +3570-5696-0002-2474: ref=['OTHER', 'CIRCUMSTANCES', 'PERMITTING', 'THAT', 'INSTINCT', 'DISPOSES', 'MEN', 'TO', 'LOOK', 'WITH', 'FAVOR', 'UPON', 'PRODUCTIVE', 'EFFICIENCY', 'AND', 'ON', 'WHATEVER', 'IS', 'OF', 'HUMAN', 'USE'] +3570-5696-0002-2474: hyp=['ARE', 'THE', 'CIRCUMSTANCES', 'PERMITTING', 'THAT', 'INSTINCT', 'DISPOSES', 'MEN', 'TO', 'LOOK', 'WITH', 'FAVOUR', 'UPON', 'PRODUCTIVE', 'EFFICIENCY', 'AND', 'ON', 'WHATEVER', 'IS', 'OF', 'HUMAN', 'USE'] +3570-5696-0003-2475: ref=['A', 'RECONCILIATION', 'BETWEEN', 'THE', 'TWO', 'CONFLICTING', 'REQUIREMENTS', 'IS', 'EFFECTED', 'BY', 'A', 'RESORT', 'TO', 'MAKE', 'BELIEVE', 'MANY', 'AND', 'INTRICATE', 'POLITE', 'OBSERVANCES', 'AND', 'SOCIAL', 'DUTIES', 'OF', 'A', 'CEREMONIAL', 'NATURE', 'ARE', 'DEVELOPED', 'MANY', 'ORGANIZATIONS', 'ARE', 'FOUNDED', 'WITH', 'SOME', 'SPECIOUS', 'OBJECT', 'OF', 'AMELIORATION', 'EMBODIED', 'IN', 'THEIR', 'OFFICIAL', 'STYLE', 'AND', 'TITLE', 'THERE', 'IS', 'MUCH', 'COMING', 'AND', 'GOING', 'AND', 'A', 'DEAL', 'OF', 'TALK', 'TO', 'THE', 'END', 'THAT', 'THE', 'TALKERS', 'MAY', 'NOT', 'HAVE', 'OCCASION', 'TO', 'REFLECT', 'ON', 'WHAT', 'IS', 'THE', 'EFFECTUAL', 'ECONOMIC', 'VALUE', 'OF', 'THEIR', 'TRAFFIC'] +3570-5696-0003-2475: hyp=['I', 'RECONCILIATION', 'BETWEEN', 'THE', 'TWO', 'CONFLICTING', 'REQUIREMENTS', 'IS', 'AFFECTED', 'BY', 'RESORT', 'TO', 'MAKE', 'BELIEVE', 'MEN', 'IN', 'INTRICATE', 'POLITE', 'OBSERVANCES', 'AND', 'SOCIAL', 'DUTIES', 'OF', 'A', 'CEREMONIAL', 'NATURE', 'ARE', 'DEVELOPED', 'MANY', 'ORGANIZATIONS', 'ARE', 'FOUNDED', 'WITH', 'SOME', 'SPECIOUS', 'OBJECT', 'OF', 'AMELIORATION', 'EMBODIED', 'IN', 'THEIR', 'OFFICIAL', 'STYLANT', 'TITLE', 'THERE', 'IS', 'MUCH', 'COMING', 'AND', 'GOING', 'AND', 'A', 'DEAL', 'OF', 'TALK', 'TO', 'THE', 'END', 'THAT', 'THE', 'TALK', 'IS', 'MAY', 'NOT', 'HAVE', 'OCCASION', 'TO', 'REFLECT', 'ON', 'WHAT', 'IS', 'THE', 'EFFECTUAL', 'ECONOMIC', 'VALUE', 'OF', 'THEIR', 'TRAFFIC'] +3570-5696-0004-2476: ref=['THE', 'SALIENT', 'FEATURES', 'OF', 'THIS', 'DEVELOPMENT', 'OF', 'DOMESTIC', 'SERVICE', 'HAVE', 'ALREADY', 'BEEN', 'INDICATED'] +3570-5696-0004-2476: hyp=['THE', 'SAILOR', 'AND', 'FEATURES', 'OF', 'THIS', 'DEVELOPMENT', 'OF', 'DOMESTIC', 'SERVICE', 'HAVE', 'ALREADY', 'BEEN', 'INDICATED'] +3570-5696-0005-2477: ref=['THROUGHOUT', 'THE', 'ENTIRE', 'EVOLUTION', 'OF', 'CONSPICUOUS', 'EXPENDITURE', 'WHETHER', 'OF', 'GOODS', 'OR', 'OF', 'SERVICES', 'OR', 'HUMAN', 'LIFE', 'RUNS', 'THE', 'OBVIOUS', 'IMPLICATION', 'THAT', 'IN', 'ORDER', 'TO', 'EFFECTUALLY', 'MEND', 'THE', "CONSUMER'S", 'GOOD', 'FAME', 'IT', 'MUST', 'BE', 'AN', 'EXPENDITURE', 'OF', 'SUPERFLUITIES'] +3570-5696-0005-2477: hyp=['THROUGHOUT', 'THE', 'ENTIRE', 'REVOLUTION', 'OF', 'CONSPICUOUS', 'EXPENDITURE', 'WHETHER', 'OF', 'GOODS', 'OR', 'OF', 'SERVICES', 'OR', 'HUMAN', 'LIFE', 'RUNS', 'THE', 'OBVIOUS', 'IMPLICATION', 'THAT', 'IN', 'ORDER', 'TO', 'EFFECTUALLY', 'MEND', 'THE', 'CONSUMERS', 'GOOD', 'FAME', 'IT', 'MUST', 'BE', 'AN', 'EXPENDITURE', 'OF', 'SUPERFLUITIES'] +3570-5696-0006-2478: ref=['AS', 'USED', 'IN', 'THE', 'SPEECH', 'OF', 'EVERYDAY', 'LIFE', 'THE', 'WORD', 'CARRIES', 'AN', 'UNDERTONE', 'OF', 'DEPRECATION'] +3570-5696-0006-2478: hyp=['AS', 'USED', 'IN', 'THE', 'SPEECH', 'OF', 'EVERY', 'DAY', 'LIFE', 'THE', 'WORD', 'CARRIES', 'AN', 'UNDERTONE', 'OF', 'DEPRECATION'] +3570-5696-0007-2479: ref=['THE', 'USE', 'OF', 'THE', 'WORD', 'WASTE', 'AS', 'A', 'TECHNICAL', 'TERM', 'THEREFORE', 'IMPLIES', 'NO', 'DEPRECATION', 'OF', 'THE', 'MOTIVES', 'OR', 'OF', 'THE', 'ENDS', 'SOUGHT', 'BY', 'THE', 'CONSUMER', 'UNDER', 'THIS', 'CANON', 'OF', 'CONSPICUOUS', 'WASTE'] +3570-5696-0007-2479: hyp=['THE', 'USE', 'OF', 'THE', 'WORD', 'WASTE', 'AS', 'A', 'TECHNICAL', 'TERM', 'THEREFORE', 'IMPLIES', 'NO', 'DEPRECATION', 'OF', 'THE', 'MOTIVES', 'OR', 'OF', 'THE', 'ENDS', 'SOUGHT', 'BY', 'THE', 'CONSUMER', 'UNDER', 'THIS', 'CANON', 'OF', 'CONSPICUOUS', 'WASTE'] +3570-5696-0008-2480: ref=['BUT', 'IT', 'IS', 'ON', 'OTHER', 'GROUNDS', 'WORTH', 'NOTING', 'THAT', 'THE', 'TERM', 'WASTE', 'IN', 'THE', 'LANGUAGE', 'OF', 'EVERYDAY', 'LIFE', 'IMPLIES', 'DEPRECATION', 'OF', 'WHAT', 'IS', 'CHARACTERIZED', 'AS', 'WASTEFUL'] +3570-5696-0008-2480: hyp=['BUT', 'IT', 'IS', 'ANOTHER', 'GROUNDS', 'WORTH', 'NOTING', 'THAT', 'THE', 'TERM', 'WASTE', 'IN', 'THE', 'LANGUAGE', 'OF', 'EVERY', 'DAY', 'LIFE', 'IMPLIES', 'DEPRECATION', 'OF', 'WHAT', 'IS', 'CHARACTERIZED', 'AS', 'WASTEFUL'] +3570-5696-0009-2481: ref=['IN', 'STRICT', 'ACCURACY', 'NOTHING', 'SHOULD', 'BE', 'INCLUDED', 'UNDER', 'THE', 'HEAD', 'OF', 'CONSPICUOUS', 'WASTE', 'BUT', 'SUCH', 'EXPENDITURE', 'AS', 'IS', 'INCURRED', 'ON', 'THE', 'GROUND', 'OF', 'AN', 'INVIDIOUS', 'PECUNIARY', 'COMPARISON'] +3570-5696-0009-2481: hyp=['IN', 'STRICT', 'ACCURACY', 'NOTHING', 'SHOULD', 'BE', 'INCLUDED', 'UNDER', 'THE', 'HEAD', 'OF', 'CONSPICUOUS', 'WASTE', 'BUT', 'SUCH', 'EXPENDITURE', 'AS', 'IS', 'INCURRED', 'ON', 'THE', 'GROUND', 'OF', 'AN', 'INVIDIOUS', 'PECUNIARY', 'COMPARISON'] +3570-5696-0010-2482: ref=['AN', 'ARTICLE', 'MAY', 'BE', 'USEFUL', 'AND', 'WASTEFUL', 'BOTH', 'AND', 'ITS', 'UTILITY', 'TO', 'THE', 'CONSUMER', 'MAY', 'BE', 'MADE', 'UP', 'OF', 'USE', 'AND', 'WASTE', 'IN', 'THE', 'MOST', 'VARYING', 'PROPORTIONS'] +3570-5696-0010-2482: hyp=['AN', 'ARTICLE', 'MAY', 'BE', 'USEFUL', 'AND', 'WASTEFUL', 'BOTH', 'AND', 'ITS', 'UTILITY', 'TO', 'THE', 'CONSUMER', 'MAY', 'BE', 'MADE', 'UP', 'OF', 'USE', 'AND', 'WASTE', 'IN', 'THE', 'MOST', 'VARYING', 'PROPORTIONS'] +3575-170457-0000-369: ref=['AND', 'OFTEN', 'HAS', 'MY', 'MOTHER', 'SAID', 'WHILE', 'ON', 'HER', 'LAP', 'I', 'LAID', 'MY', 'HEAD', 'SHE', 'FEARED', 'FOR', 'TIME', 'I', 'WAS', 'NOT', 'MADE', 'BUT', 'FOR', 'ETERNITY'] +3575-170457-0000-369: hyp=['AND', 'OFTEN', 'HAS', 'MY', 'MOTHER', 'SAID', 'WHILE', 'ON', 'HER', 'LAP', 'I', 'LAID', 'MY', 'HEAD', 'SHE', 'FEARED', 'FOR', 'TIME', 'I', 'WAS', 'NOT', 'MADE', 'BUT', 'FOR', 'ETERNITY'] +3575-170457-0001-370: ref=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DENIED', 'EACH', "OTHER'S", 'SOCIETY'] +3575-170457-0001-370: hyp=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DENIED', 'EACH', "OTHER'S", 'SOCIETY'] +3575-170457-0002-371: ref=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DIVIDED'] +3575-170457-0002-371: hyp=['WHY', 'ARE', 'WE', 'TO', 'BE', 'DIVIDED'] +3575-170457-0003-372: ref=['SURELY', 'IT', 'MUST', 'BE', 'BECAUSE', 'WE', 'ARE', 'IN', 'DANGER', 'OF', 'LOVING', 'EACH', 'OTHER', 'TOO', 'WELL', 'OF', 'LOSING', 'SIGHT', 'OF', 'THE', 'CREATOR', 'IN', 'IDOLATRY', 'OF', 'THE', 'CREATURE'] +3575-170457-0003-372: hyp=['SURELY', 'IT', 'MUST', 'BE', 'BECAUSE', 'WE', 'ARE', 'IN', 'DANGER', 'OF', 'LOVING', 'EACH', 'OTHER', 'TOO', 'WELL', 'OF', 'LOSING', 'SIGHT', 'OF', 'THE', 'CREATOR', 'AND', 'IDOLATRY', 'OF', 'THE', 'CREATURE'] +3575-170457-0004-373: ref=['WE', 'USED', 'TO', 'DISPUTE', 'ABOUT', 'POLITICS', 'AND', 'RELIGION'] +3575-170457-0004-373: hyp=['WE', 'USED', 'TO', 'DISPUTE', 'ABOUT', 'POLITICS', 'AND', 'RELIGION'] +3575-170457-0005-374: ref=['SHE', 'A', 'TORY', 'AND', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'ALWAYS', 'IN', 'A', 'MINORITY', 'OF', 'ONE', 'IN', 'OUR', 'HOUSE', 'OF', 'VIOLENT', 'DISSENT', 'AND', 'RADICALISM'] +3575-170457-0005-374: hyp=['SHE', 'ATTORIAN', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'ALWAYS', 'IN', 'A', 'MINORITY', 'OF', 'ONE', 'IN', 'OUR', 'HOUSE', 'A', 'VIOLENT', 'DESCENT', 'AND', 'RADICALISM'] +3575-170457-0006-375: ref=['HER', 'FEEBLE', 'HEALTH', 'GAVE', 'HER', 'HER', 'YIELDING', 'MANNER', 'FOR', 'SHE', 'COULD', 'NEVER', 'OPPOSE', 'ANY', 'ONE', 'WITHOUT', 'GATHERING', 'UP', 'ALL', 'HER', 'STRENGTH', 'FOR', 'THE', 'STRUGGLE'] +3575-170457-0006-375: hyp=['HER', 'FEEBLE', 'HEALTH', 'GAVE', 'HER', 'HER', 'YIELDING', 'MANNER', 'FOR', 'SHE', 'COULD', 'NEVER', 'OPPOSE', 'ANY', 'ONE', 'WITHOUT', 'GATHERING', 'UP', 'ALL', 'HER', 'STRENGTH', 'FOR', 'THE', 'STRUGGLE'] +3575-170457-0007-376: ref=['HE', 'SPOKE', 'FRENCH', 'PERFECTLY', 'I', 'HAVE', 'BEEN', 'TOLD', 'WHEN', 'NEED', 'WAS', 'BUT', 'DELIGHTED', 'USUALLY', 'IN', 'TALKING', 'THE', 'BROADEST', 'YORKSHIRE'] +3575-170457-0007-376: hyp=['HE', 'SPOKE', 'FRENCH', 'PERFECTLY', 'I', 'HAVE', 'BEEN', 'TOLD', 'WHEN', 'NEED', 'WAS', 'BUT', 'DELIGHTED', 'USUALLY', 'IN', 'TALKING', 'THE', 'BROADEST', 'YORKSHIRE'] +3575-170457-0008-377: ref=['AND', 'SO', 'LIFE', 'AND', 'DEATH', 'HAVE', 'DISPERSED', 'THE', 'CIRCLE', 'OF', 'VIOLENT', 'RADICALS', 'AND', 'DISSENTERS', 'INTO', 'WHICH', 'TWENTY', 'YEARS', 'AGO', 'THE', 'LITTLE', 'QUIET', 'RESOLUTE', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'RECEIVED', 'AND', 'BY', 'WHOM', 'SHE', 'WAS', 'TRULY', 'LOVED', 'AND', 'HONOURED'] +3575-170457-0008-377: hyp=['AND', 'SO', 'LIFE', 'AND', 'DEATH', 'HAVE', 'DISPERSED', 'THE', 'CIRCLE', 'OF', 'VIOLENT', 'RADICALS', 'AND', 'DISSENTERS', 'INTO', 'WHICH', 'TWENTY', 'YEARS', 'AGO', 'THE', 'LITTLE', 'QUIET', 'RESOLUTE', "CLERGYMAN'S", 'DAUGHTER', 'WAS', 'RECEIVED', 'AND', 'BY', 'WHOM', 'SHE', 'WAS', 'TRULY', 'LOVED', 'AND', 'HONORED'] +3575-170457-0009-378: ref=['JANUARY', 'AND', 'FEBRUARY', 'OF', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'HAD', 'PASSED', 'AWAY', 'AND', 'STILL', 'THERE', 'WAS', 'NO', 'REPLY', 'FROM', 'SOUTHEY'] +3575-170457-0009-378: hyp=['JANUARY', 'AND', 'FEBRUARY', 'OF', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'HAD', 'PASSED', 'AWAY', 'AND', 'STILL', 'THERE', 'WAS', 'NO', 'REPLY', 'FROM', 'SALVIE'] +3575-170457-0010-379: ref=['I', 'AM', 'NOT', 'DEPRECIATING', 'IT', 'WHEN', 'I', 'SAY', 'THAT', 'IN', 'THESE', 'TIMES', 'IT', 'IS', 'NOT', 'RARE'] +3575-170457-0010-379: hyp=['I', 'AM', 'NOT', 'DEPRECIATING', 'IT', 'WHEN', 'I', 'SAY', 'THAT', 'IN', 'THESE', 'TIMES', 'IT', 'IS', 'NOT', 'RARE'] +3575-170457-0011-380: ref=['BUT', 'IT', 'IS', 'NOT', 'WITH', 'A', 'VIEW', 'TO', 'DISTINCTION', 'THAT', 'YOU', 'SHOULD', 'CULTIVATE', 'THIS', 'TALENT', 'IF', 'YOU', 'CONSULT', 'YOUR', 'OWN', 'HAPPINESS'] +3575-170457-0011-380: hyp=['BUT', 'IT', 'IS', 'NOT', 'WITH', 'A', 'VIEW', 'TO', 'DISTINCTION', 'THAT', 'YOU', 'SHOULD', 'CULTIVATE', 'THIS', 'TALENT', 'IF', 'YOU', 'CONSULT', 'YOUR', 'OWN', 'HAPPINESS'] +3575-170457-0012-381: ref=['YOU', 'WILL', 'SAY', 'THAT', 'A', 'WOMAN', 'HAS', 'NO', 'NEED', 'OF', 'SUCH', 'A', 'CAUTION', 'THERE', 'CAN', 'BE', 'NO', 'PERIL', 'IN', 'IT', 'FOR', 'HER'] +3575-170457-0012-381: hyp=['YOU', 'WILL', 'SAY', 'THAT', 'A', 'WOMAN', 'HAS', 'NO', 'NEED', 'OF', 'SUCH', 'A', 'CAUTION', 'THERE', 'CAN', 'BE', 'NO', 'PERIL', 'IN', 'IT', 'FOR', 'HER'] +3575-170457-0013-382: ref=['THE', 'MORE', 'SHE', 'IS', 'ENGAGED', 'IN', 'HER', 'PROPER', 'DUTIES', 'THE', 'LESS', 'LEISURE', 'WILL', 'SHE', 'HAVE', 'FOR', 'IT', 'EVEN', 'AS', 'AN', 'ACCOMPLISHMENT', 'AND', 'A', 'RECREATION'] +3575-170457-0013-382: hyp=['THE', 'MORE', 'SHE', 'IS', 'ENGAGED', 'IN', 'HER', 'PROPER', 'DUTIES', 'THE', 'LESS', 'LEISURE', 'WILL', 'SHE', 'HAVE', 'FOR', 'IT', 'EVEN', 'AS', 'AN', 'ACCOMPLISHMENT', 'AND', 'A', 'RECREATION'] +3575-170457-0014-383: ref=['TO', 'THOSE', 'DUTIES', 'YOU', 'HAVE', 'NOT', 'YET', 'BEEN', 'CALLED', 'AND', 'WHEN', 'YOU', 'ARE', 'YOU', 'WILL', 'BE', 'LESS', 'EAGER', 'FOR', 'CELEBRITY'] +3575-170457-0014-383: hyp=['TO', 'THOSE', 'DUTIES', 'YOU', 'HAVE', 'NOT', 'YET', 'BEEN', 'CALLED', 'AND', 'WHEN', 'YOU', 'ARE', 'YOU', 'WILL', 'BE', 'LESS', 'EAGER', 'FOR', 'CELEBRITY'] +3575-170457-0015-384: ref=['BUT', 'DO', 'NOT', 'SUPPOSE', 'THAT', 'I', 'DISPARAGE', 'THE', 'GIFT', 'WHICH', 'YOU', 'POSSESS', 'NOR', 'THAT', 'I', 'WOULD', 'DISCOURAGE', 'YOU', 'FROM', 'EXERCISING', 'IT', 'I', 'ONLY', 'EXHORT', 'YOU', 'SO', 'TO', 'THINK', 'OF', 'IT', 'AND', 'SO', 'TO', 'USE', 'IT', 'AS', 'TO', 'RENDER', 'IT', 'CONDUCIVE', 'TO', 'YOUR', 'OWN', 'PERMANENT', 'GOOD'] +3575-170457-0015-384: hyp=['BUT', 'DO', 'NOT', 'SUPPOSE', 'THAT', 'I', 'DISPARAGE', 'THE', 'GIFT', 'WHICH', 'YOU', 'POSSESS', 'NOR', 'THAT', 'I', 'WOULD', 'DISCOURAGE', 'YOU', 'FROM', 'EXERCISING', 'IT', 'I', 'ONLY', 'EXHORT', 'YOU', 'SO', 'TO', 'THINK', 'OF', 'IT', 'AND', 'SO', 'TO', 'USE', 'IT', 'AS', 'TO', 'RENDER', 'IT', 'CONDUCIVE', 'TO', 'YOUR', 'OWN', 'PERMANENT', 'GOOD'] +3575-170457-0016-385: ref=['FAREWELL', 'MADAM'] +3575-170457-0016-385: hyp=['FAREWELL', 'MADAME'] +3575-170457-0017-386: ref=['THOUGH', 'I', 'MAY', 'BE', 'BUT', 'AN', 'UNGRACIOUS', 'ADVISER', 'YOU', 'WILL', 'ALLOW', 'ME', 'THEREFORE', 'TO', 'SUBSCRIBE', 'MYSELF', 'WITH', 'THE', 'BEST', 'WISHES', 'FOR', 'YOUR', 'HAPPINESS', 'HERE', 'AND', 'HEREAFTER', 'YOUR', 'TRUE', 'FRIEND', 'ROBERT', 'SOUTHEY'] +3575-170457-0017-386: hyp=['THOUGH', 'I', 'MAY', 'BE', 'BUT', 'AN', 'UNGRACIOUS', 'ADVISER', 'YOU', 'WILL', 'ALLOW', 'ME', 'THEREFORE', 'TO', 'SUBSCRIBE', 'MYSELF', 'WITH', 'THE', 'BEST', 'WISHES', 'FOR', 'YOUR', 'HAPPINESS', 'HERE', 'AND', 'HEREAFTER', 'YOUR', 'TRUE', 'FRIEND', 'ROBERT', 'SELVIE'] +3575-170457-0018-387: ref=['SIR', 'MARCH', 'SIXTEENTH'] +3575-170457-0018-387: hyp=['SIR', 'MARCH', 'SIXTEENTH'] +3575-170457-0019-388: ref=['I', 'HAD', 'NOT', 'VENTURED', 'TO', 'HOPE', 'FOR', 'SUCH', 'A', 'REPLY', 'SO', 'CONSIDERATE', 'IN', 'ITS', 'TONE', 'SO', 'NOBLE', 'IN', 'ITS', 'SPIRIT'] +3575-170457-0019-388: hyp=['I', 'HAVE', 'NOT', 'VENTURED', 'TO', 'HOPE', 'FOR', 'SUCH', 'A', 'REPLY', 'SO', 'CONSIDERATE', 'IN', 'ITS', 'TONE', 'SO', 'NOBLE', 'IN', 'ITS', 'SPIRIT'] +3575-170457-0020-389: ref=['I', 'KNOW', 'THE', 'FIRST', 'LETTER', 'I', 'WROTE', 'TO', 'YOU', 'WAS', 'ALL', 'SENSELESS', 'TRASH', 'FROM', 'BEGINNING', 'TO', 'END', 'BUT', 'I', 'AM', 'NOT', 'ALTOGETHER', 'THE', 'IDLE', 'DREAMING', 'BEING', 'IT', 'WOULD', 'SEEM', 'TO', 'DENOTE'] +3575-170457-0020-389: hyp=['I', 'KNOW', 'THE', 'FIRST', 'LETTER', 'I', 'WROTE', 'TO', 'YOU', 'WAS', 'ALL', 'SENSELESS', 'TRASH', 'FROM', 'BEGINNING', 'TO', 'END', 'BUT', 'I', 'AM', 'NOT', 'ALTOGETHER', 'THE', 'IDLE', 'DREAMING', 'BEING', 'IT', 'WOULD', 'SEEM', 'TO', 'DENOTE'] +3575-170457-0021-390: ref=['I', 'THOUGHT', 'IT', 'THEREFORE', 'MY', 'DUTY', 'WHEN', 'I', 'LEFT', 'SCHOOL', 'TO', 'BECOME', 'A', 'GOVERNESS'] +3575-170457-0021-390: hyp=['I', 'THOUGHT', 'IT', 'THEREFORE', 'MY', 'DUTY', 'WHEN', 'I', 'LEFT', 'SCHOOL', 'TO', 'BECOME', 'A', 'GOVERNESS'] +3575-170457-0022-391: ref=['IN', 'THE', 'EVENINGS', 'I', 'CONFESS', 'I', 'DO', 'THINK', 'BUT', 'I', 'NEVER', 'TROUBLE', 'ANY', 'ONE', 'ELSE', 'WITH', 'MY', 'THOUGHTS'] +3575-170457-0022-391: hyp=['IN', 'THE', 'EVENINGS', 'I', 'CONFESSED', 'I', 'DO', 'THINK', 'BUT', 'I', 'NEVER', 'TROUBLE', 'ANYONE', 'ELSE', 'WITH', 'MY', 'THOUGHTS'] +3575-170457-0023-392: ref=['I', 'CAREFULLY', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PREOCCUPATION', 'AND', 'ECCENTRICITY', 'WHICH', 'MIGHT', 'LEAD', 'THOSE', 'I', 'LIVE', 'AMONGST', 'TO', 'SUSPECT', 'THE', 'NATURE', 'OF', 'MY', 'PURSUITS'] +3575-170457-0023-392: hyp=['I', 'CAREFULLY', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PREOCCUPATION', 'AND', 'EXCENTRICITY', 'WHICH', 'MIGHT', 'LEAD', 'THOSE', 'I', 'LIVE', 'AMONGST', 'TO', 'SUSPECT', 'THE', 'NATURE', 'OF', 'MY', 'PURSUITS'] +3575-170457-0024-393: ref=['I', "DON'T", 'ALWAYS', 'SUCCEED', 'FOR', 'SOMETIMES', 'WHEN', "I'M", 'TEACHING', 'OR', 'SEWING', 'I', 'WOULD', 'RATHER', 'BE', 'READING', 'OR', 'WRITING', 'BUT', 'I', 'TRY', 'TO', 'DENY', 'MYSELF', 'AND', 'MY', "FATHER'S", 'APPROBATION', 'AMPLY', 'REWARDED', 'ME', 'FOR', 'THE', 'PRIVATION'] +3575-170457-0024-393: hyp=['I', "DON'T", 'ALWAYS', 'SUCCEED', 'FOR', 'SOMETIMES', 'WHEN', "I'M", 'TEACHING', 'OR', 'SEWING', 'I', 'WOULD', 'RATHER', 'BE', 'READING', 'A', 'WRITING', 'BUT', 'I', 'TRIED', 'TO', 'DENY', 'MYSELF', 'AND', 'MY', "FATHER'S", 'APPROBATION', 'AMPLY', 'REWARDED', 'ME', 'FOR', 'THE', 'PRIVATION'] +3575-170457-0025-394: ref=['AGAIN', 'I', 'THANK', 'YOU', 'THIS', 'INCIDENT', 'I', 'SUPPOSE', 'WILL', 'BE', 'RENEWED', 'NO', 'MORE', 'IF', 'I', 'LIVE', 'TO', 'BE', 'AN', 'OLD', 'WOMAN', 'I', 'SHALL', 'REMEMBER', 'IT', 'THIRTY', 'YEARS', 'HENCE', 'AS', 'A', 'BRIGHT', 'DREAM'] +3575-170457-0025-394: hyp=['AGAIN', 'I', 'THANK', 'YOU', 'THIS', 'INCIDENT', 'I', 'SUPPOSE', 'WILL', 'BE', 'RENEWED', 'NO', 'MORE', 'IF', 'I', 'LIVE', 'TO', 'BE', 'AN', 'OLD', 'WOMAN', 'I', 'SHALL', 'REMEMBER', 'IT', 'THIRTY', 'YEARS', 'HENCE', 'AS', 'A', 'BRIGHT', 'DREAM'] +3575-170457-0026-395: ref=['P', 'S', 'PRAY', 'SIR', 'EXCUSE', 'ME', 'FOR', 'WRITING', 'TO', 'YOU', 'A', 'SECOND', 'TIME', 'I', 'COULD', 'NOT', 'HELP', 'WRITING', 'PARTLY', 'TO', 'TELL', 'YOU', 'HOW', 'THANKFUL', 'I', 'AM', 'FOR', 'YOUR', 'KINDNESS', 'AND', 'PARTLY', 'TO', 'LET', 'YOU', 'KNOW', 'THAT', 'YOUR', 'ADVICE', 'SHALL', 'NOT', 'BE', 'WASTED', 'HOWEVER', 'SORROWFULLY', 'AND', 'RELUCTANTLY', 'IT', 'MAY', 'BE', 'AT', 'FIRST', 'FOLLOWED', 'C', 'B'] +3575-170457-0026-395: hyp=['P', 'S', 'PRAY', 'SIR', 'EXCUSE', 'ME', 'FOR', 'WRITING', 'TO', 'YOU', 'A', 'SECOND', 'TIME', 'I', 'COULD', 'NOT', 'HELP', 'WRITING', 'PARTLY', 'TO', 'TELL', 'YOU', 'HOW', 'THANKFUL', 'I', 'AM', 'FOR', 'YOUR', 'KINDNESS', 'AND', 'PARTLY', 'TO', 'LET', 'YOU', 'KNOW', 'THAT', 'YOUR', 'ADVICE', 'SHALL', 'NOT', 'BE', 'WASTED', 'HOWEVER', 'SORROWFULLY', 'AND', 'RELUCTANTLY', 'IT', 'MAY', 'BE', 'AT', 'FIRST', 'FOLLOWED', 'C', 'B'] +3575-170457-0027-396: ref=['I', 'CANNOT', 'DENY', 'MYSELF', 'THE', 'GRATIFICATION', 'OF', 'INSERTING', "SOUTHEY'S", 'REPLY'] +3575-170457-0027-396: hyp=['I', 'CANNOT', 'DENY', 'MYSELF', 'THE', 'GRATIFICATION', 'OF', 'INSERTING', 'SO', 'THESE', 'REPLY'] +3575-170457-0028-397: ref=['KESWICK', 'MARCH', 'TWENTY', 'SECOND', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'DEAR', 'MADAM'] +3575-170457-0028-397: hyp=['KEZWICK', 'MARCH', 'TWENTY', 'SECOND', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'DEAR', 'MADAME'] +3575-170457-0029-398: ref=['YOUR', 'LETTER', 'HAS', 'GIVEN', 'ME', 'GREAT', 'PLEASURE', 'AND', 'I', 'SHOULD', 'NOT', 'FORGIVE', 'MYSELF', 'IF', 'I', 'DID', 'NOT', 'TELL', 'YOU', 'SO'] +3575-170457-0029-398: hyp=['YOUR', 'LETTER', 'HAS', 'GIVEN', 'ME', 'GREAT', 'PLEASURE', 'AND', 'I', 'SHOULD', 'NOT', 'FORGIVE', 'MYSELF', 'IF', 'I', 'DID', 'NOT', 'TELL', 'YOU', 'SO'] +3575-170457-0030-399: ref=['OF', 'THIS', 'SECOND', 'LETTER', 'ALSO', 'SHE', 'SPOKE', 'AND', 'TOLD', 'ME', 'THAT', 'IT', 'CONTAINED', 'AN', 'INVITATION', 'FOR', 'HER', 'TO', 'GO', 'AND', 'SEE', 'THE', 'POET', 'IF', 'EVER', 'SHE', 'VISITED', 'THE', 'LAKES'] +3575-170457-0030-399: hyp=['OF', 'THIS', 'SECOND', 'LETTER', 'ALSO', 'SHE', 'SPOKE', 'AND', 'TOLD', 'ME', 'THAT', 'IT', 'CONTAINED', 'AN', 'INVITATION', 'FOR', 'HER', 'TO', 'GO', 'AND', 'SEE', 'THE', 'POET', 'IF', 'EVER', 'SHE', 'VISITED', 'THE', 'LAKES'] +3575-170457-0031-400: ref=['ON', 'AUGUST', 'TWENTY', 'SEVENTH', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'SHE', 'WRITES'] +3575-170457-0031-400: hyp=['ON', 'AUGUST', 'TWENTY', 'SEVENTH', 'EIGHTEEN', 'THIRTY', 'SEVEN', 'SHE', 'WRITES'] +3575-170457-0032-401: ref=['COME', 'COME', 'I', 'AM', 'GETTING', 'REALLY', 'TIRED', 'OF', 'YOUR', 'ABSENCE'] +3575-170457-0032-401: hyp=['COME', 'COME', "I'M", 'GETTING', 'REALLY', 'TIRED', 'OF', 'YOUR', 'ABSENCE'] +3575-170457-0033-402: ref=['SATURDAY', 'AFTER', 'SATURDAY', 'COMES', 'ROUND', 'AND', 'I', 'CAN', 'HAVE', 'NO', 'HOPE', 'OF', 'HEARING', 'YOUR', 'KNOCK', 'AT', 'THE', 'DOOR', 'AND', 'THEN', 'BEING', 'TOLD', 'THAT', 'MISS', 'E', 'IS', 'COME', 'OH', 'DEAR'] +3575-170457-0033-402: hyp=['SATURDAY', 'AFTER', 'SATURDAY', 'COMES', 'AROUND', 'AND', 'I', 'CAN', 'HAVE', 'NO', 'HOPE', 'OF', 'HEARING', 'YOUR', 'KNOCK', 'AT', 'THE', 'DOOR', 'AND', 'THEN', 'BEING', 'TOLD', 'THAT', 'MISSY', 'IS', 'COME', 'OH', 'DEAR'] +3575-170457-0034-403: ref=['IN', 'THIS', 'MONOTONOUS', 'LIFE', 'OF', 'MINE', 'THAT', 'WAS', 'A', 'PLEASANT', 'EVENT'] +3575-170457-0034-403: hyp=['IN', 'THIS', 'MONOTONOUS', 'LIFE', 'OF', 'MIND', 'THAT', 'WAS', 'A', 'PLEASANT', 'EVENT'] +3575-170457-0035-404: ref=['I', 'WISH', 'IT', 'WOULD', 'RECUR', 'AGAIN', 'BUT', 'IT', 'WILL', 'TAKE', 'TWO', 'OR', 'THREE', 'INTERVIEWS', 'BEFORE', 'THE', 'STIFFNESS', 'THE', 'ESTRANGEMENT', 'OF', 'THIS', 'LONG', 'SEPARATION', 'WILL', 'WEAR', 'AWAY'] +3575-170457-0035-404: hyp=['I', 'WISH', 'YOU', 'WERE', 'RECUR', 'AGAIN', 'BUT', 'IT', 'WILL', 'TAKE', 'TWO', 'OR', 'THREE', 'INTERVIEWS', 'BEFORE', 'THE', 'STIFFNESS', 'THE', 'ESTRANGEMENT', 'OF', 'THIS', 'LONG', 'SEPARATION', 'WILL', 'WEAR', 'AWAY'] +3575-170457-0036-405: ref=['MY', 'EYES', 'FILL', 'WITH', 'TEARS', 'WHEN', 'I', 'CONTRAST', 'THE', 'BLISS', 'OF', 'SUCH', 'A', 'STATE', 'BRIGHTENED', 'BY', 'HOPES', 'OF', 'THE', 'FUTURE', 'WITH', 'THE', 'MELANCHOLY', 'STATE', 'I', 'NOW', 'LIVE', 'IN', 'UNCERTAIN', 'THAT', 'I', 'EVER', 'FELT', 'TRUE', 'CONTRITION', 'WANDERING', 'IN', 'THOUGHT', 'AND', 'DEED', 'LONGING', 'FOR', 'HOLINESS', 'WHICH', 'I', 'SHALL', 'NEVER', 'NEVER', 'OBTAIN', 'SMITTEN', 'AT', 'TIMES', 'TO', 'THE', 'HEART', 'WITH', 'THE', 'CONVICTION', 'THAT', 'GHASTLY', 'CALVINISTIC', 'DOCTRINES', 'ARE', 'TRUE', 'DARKENED', 'IN', 'SHORT', 'BY', 'THE', 'VERY', 'SHADOWS', 'OF', 'SPIRITUAL', 'DEATH'] +3575-170457-0036-405: hyp=['MY', 'EYES', 'FILLED', 'TEARS', 'WHEN', 'I', 'CONTRAST', 'THE', 'BLISS', 'OF', 'SUCH', 'A', 'STATE', 'BRIGHTENED', 'BY', 'HOPES', 'OF', 'THE', 'FUTURE', 'WITH', 'THE', 'MELANCHOLY', 'STATE', 'I', 'NOW', 'LIVE', 'IN', 'UNCERTAIN', 'THAT', 'I', 'EVER', 'FELT', 'TRUE', 'CONTRITION', 'WONDERING', 'IN', 'THOUGHT', 'INDEED', 'LONGING', 'FOR', 'HOLINESS', 'WHICH', 'I', 'SHALL', 'NEVER', 'NEVER', 'OBTAIN', 'SMITTEN', 'AT', 'TIMES', 'TO', 'THE', 'HEART', 'WITH', 'THE', 'CONVICTION', 'THAT', 'GHASTLY', 'CALVINISTIC', 'DOCTRINES', 'ARE', 'TRUE', 'DARKENED', 'IN', 'SHORT', 'BY', 'THE', 'VERY', 'SHADOWS', 'OF', 'SPIRITUAL', 'DEATH'] +3575-170457-0037-406: ref=['IF', 'CHRISTIAN', 'PERFECTION', 'BE', 'NECESSARY', 'TO', 'SALVATION', 'I', 'SHALL', 'NEVER', 'BE', 'SAVED', 'MY', 'HEART', 'IS', 'A', 'VERY', 'HOTBED', 'FOR', 'SINFUL', 'THOUGHTS', 'AND', 'WHEN', 'I', 'DECIDE', 'ON', 'AN', 'ACTION', 'I', 'SCARCELY', 'REMEMBER', 'TO', 'LOOK', 'TO', 'MY', 'REDEEMER', 'FOR', 'DIRECTION'] +3575-170457-0037-406: hyp=['IF', 'CHRISTIAN', 'PERFECTION', 'BE', 'NECESSARY', 'TO', 'SALVATION', 'I', 'SHALL', 'NEVER', 'BE', 'SAVED', 'MY', 'HEART', 'IS', 'A', 'VERY', 'HOT', 'BED', 'FOR', 'SINFUL', 'THOUGHTS', 'AND', 'WHEN', 'I', 'DECIDE', 'ON', 'AN', 'ACTION', 'I', 'SCARCELY', 'REMEMBER', 'TO', 'LOOK', 'TO', 'MY', 'REDEEMER', 'FOR', 'A', 'DIRECTION'] +3575-170457-0038-407: ref=['AND', 'MEANTIME', 'I', 'KNOW', 'THE', 'GREATNESS', 'OF', 'JEHOVAH', 'I', 'ACKNOWLEDGE', 'THE', 'PERFECTION', 'OF', 'HIS', 'WORD', 'I', 'ADORE', 'THE', 'PURITY', 'OF', 'THE', 'CHRISTIAN', 'FAITH', 'MY', 'THEORY', 'IS', 'RIGHT', 'MY', 'PRACTICE', 'HORRIBLY', 'WRONG'] +3575-170457-0038-407: hyp=['AND', 'MEANTIME', 'I', 'KNOW', 'THE', 'GREATNESS', 'OF', 'JEHOVAH', 'I', 'ACKNOWLEDGE', 'THE', 'PERFECTION', 'OF', 'HIS', 'WORD', 'I', 'ADORE', 'THE', 'PURITY', 'OF', 'THE', 'CHRISTIAN', 'FAITH', 'MY', 'THEORY', 'IS', 'RIGHT', 'MY', 'PRACTICE', 'HORRIBLY', 'WRONG'] +3575-170457-0039-408: ref=['THE', 'CHRISTMAS', 'HOLIDAYS', 'CAME', 'AND', 'SHE', 'AND', 'ANNE', 'RETURNED', 'TO', 'THE', 'PARSONAGE', 'AND', 'TO', 'THAT', 'HAPPY', 'HOME', 'CIRCLE', 'IN', 'WHICH', 'ALONE', 'THEIR', 'NATURES', 'EXPANDED', 'AMONGST', 'ALL', 'OTHER', 'PEOPLE', 'THEY', 'SHRIVELLED', 'UP', 'MORE', 'OR', 'LESS'] +3575-170457-0039-408: hyp=['THE', 'CHRISTMAS', 'HOLIDAYS', 'CAME', 'AND', 'SHE', 'AND', 'ANNE', 'RETURNED', 'TO', 'THE', 'PARSONAGE', 'AND', 'TO', 'THAT', 'HAPPY', 'HOME', 'CIRCLE', 'IN', 'WHICH', 'ALONE', 'THEIR', 'NATURES', 'EXPANDED', 'AMONGST', 'ALL', 'OTHER', 'PEOPLE', 'THEY', 'SHRIVELLED', 'UP', 'MORE', 'OR', 'LESS'] +3575-170457-0040-409: ref=['INDEED', 'THERE', 'WERE', 'ONLY', 'ONE', 'OR', 'TWO', 'STRANGERS', 'WHO', 'COULD', 'BE', 'ADMITTED', 'AMONG', 'THE', 'SISTERS', 'WITHOUT', 'PRODUCING', 'THE', 'SAME', 'RESULT'] +3575-170457-0040-409: hyp=['INDEED', 'THERE', 'WERE', 'ONLY', 'ONE', 'OR', 'TWO', 'STRANGERS', 'WHO', 'COULD', 'BE', 'ADMITTED', 'AMONG', 'THE', 'SISTERS', 'WITHOUT', 'PRODUCING', 'THE', 'SAME', 'RESULT'] +3575-170457-0041-410: ref=['SHE', 'WAS', 'GONE', 'OUT', 'INTO', 'THE', 'VILLAGE', 'ON', 'SOME', 'ERRAND', 'WHEN', 'AS', 'SHE', 'WAS', 'DESCENDING', 'THE', 'STEEP', 'STREET', 'HER', 'FOOT', 'SLIPPED', 'ON', 'THE', 'ICE', 'AND', 'SHE', 'FELL', 'IT', 'WAS', 'DARK', 'AND', 'NO', 'ONE', 'SAW', 'HER', 'MISCHANCE', 'TILL', 'AFTER', 'A', 'TIME', 'HER', 'GROANS', 'ATTRACTED', 'THE', 'ATTENTION', 'OF', 'A', 'PASSER', 'BY'] +3575-170457-0041-410: hyp=['SHE', 'WAS', 'GONE', 'OUT', 'INTO', 'THE', 'VILLAGE', 'ON', 'SOME', 'ERRAND', 'WHEN', 'AS', 'SHE', 'WAS', 'DESCENDING', 'THE', 'STEEP', 'STREET', 'HER', 'FOOT', 'SLIPPED', 'ON', 'THE', 'ICE', 'AND', 'SHE', 'FELL', 'HE', 'WAS', 'DARK', 'AND', 'NO', 'ONE', 'SAW', 'HER', 'MISCHANCE', 'TILL', 'AFTER', 'A', 'TIME', 'HER', 'GROANS', 'ATTRACTED', 'THE', 'ATTENTION', 'OF', 'A', 'PASSER', 'BY'] +3575-170457-0042-411: ref=['UNFORTUNATELY', 'THE', 'FRACTURE', 'COULD', 'NOT', 'BE', 'SET', 'TILL', 'SIX', "O'CLOCK", 'THE', 'NEXT', 'MORNING', 'AS', 'NO', 'SURGEON', 'WAS', 'TO', 'BE', 'HAD', 'BEFORE', 'THAT', 'TIME', 'AND', 'SHE', 'NOW', 'LIES', 'AT', 'OUR', 'HOUSE', 'IN', 'A', 'VERY', 'DOUBTFUL', 'AND', 'DANGEROUS', 'STATE'] +3575-170457-0042-411: hyp=['UNFORTUNATELY', 'THE', 'FRACTURE', 'COULD', 'NOT', 'BE', 'SET', 'TILL', 'SIX', "O'CLOCK", 'THE', 'NEXT', 'MORNING', 'AS', 'NO', 'SURGEON', 'WAS', 'TO', 'BE', 'HAD', 'BEFORE', 'THAT', 'TIME', 'AND', 'SHE', 'NOW', 'LIES', 'AT', 'HER', 'HOUSE', 'IN', 'A', 'VERY', 'DOUBTFUL', 'AND', 'DANGEROUS', 'STATE'] +3575-170457-0043-412: ref=['HOWEVER', 'REMEMBERING', 'WHAT', 'YOU', 'TOLD', 'ME', 'NAMELY', 'THAT', 'YOU', 'HAD', 'COMMENDED', 'THE', 'MATTER', 'TO', 'A', 'HIGHER', 'DECISION', 'THAN', 'OURS', 'AND', 'THAT', 'YOU', 'WERE', 'RESOLVED', 'TO', 'SUBMIT', 'WITH', 'RESIGNATION', 'TO', 'THAT', 'DECISION', 'WHATEVER', 'IT', 'MIGHT', 'BE', 'I', 'HOLD', 'IT', 'MY', 'DUTY', 'TO', 'YIELD', 'ALSO', 'AND', 'TO', 'BE', 'SILENT', 'IT', 'MAY', 'BE', 'ALL', 'FOR', 'THE', 'BEST'] +3575-170457-0043-412: hyp=['HOWEVER', 'REMEMBERING', 'WHAT', 'YOU', 'TOLD', 'ME', 'NAMELY', 'THAT', 'YOU', 'HAD', 'COMMENDED', 'THE', 'MATTER', 'TO', 'A', 'HIGHER', 'DECISION', 'THAN', 'OURS', 'AND', 'THAT', 'YOU', 'WERE', 'RESOLVED', 'TO', 'SUBMIT', 'WITH', 'RESIGNATION', 'TO', 'THAT', 'DECISION', 'WHATEVER', 'IT', 'MIGHT', 'BE', 'I', 'HOLD', 'IT', 'MY', 'DUTY', 'TO', 'YIELD', 'ALSO', 'AND', 'TO', 'BE', 'SILENT', 'AND', 'MAY', 'BE', 'ALL', 'FOR', 'THE', 'BEST'] +3575-170457-0044-413: ref=['AFTER', 'THIS', 'DISAPPOINTMENT', 'I', 'NEVER', 'DARE', 'RECKON', 'WITH', 'CERTAINTY', 'ON', 'THE', 'ENJOYMENT', 'OF', 'A', 'PLEASURE', 'AGAIN', 'IT', 'SEEMS', 'AS', 'IF', 'SOME', 'FATALITY', 'STOOD', 'BETWEEN', 'YOU', 'AND', 'ME'] +3575-170457-0044-413: hyp=['AFTER', 'THIS', 'DISAPPOINTMENT', 'I', 'NEVER', 'DARE', 'RECKON', 'WITH', 'CERTAINTY', 'ON', 'THE', 'ENJOYMENT', 'OF', 'A', 'PLEASURE', 'AGAIN', 'IT', 'SEEMS', 'AS', 'IF', 'SOME', 'FATALITY', 'STOOD', 'BETWEEN', 'YOU', 'AND', 'ME'] +3575-170457-0045-414: ref=['I', 'AM', 'NOT', 'GOOD', 'ENOUGH', 'FOR', 'YOU', 'AND', 'YOU', 'MUST', 'BE', 'KEPT', 'FROM', 'THE', 'CONTAMINATION', 'OF', 'TOO', 'INTIMATE', 'SOCIETY'] +3575-170457-0045-414: hyp=['I', 'AM', 'NOT', 'GOOD', 'ENOUGH', 'FOR', 'YOU', 'AND', 'YOU', 'MUST', 'BE', 'KEPT', 'FROM', 'THE', 'CONTAMINATION', 'OF', 'TWO', 'INTIMATE', 'SOCIETY'] +3575-170457-0046-415: ref=['A', 'GOOD', 'NEIGHBOUR', 'OF', 'THE', 'BRONTES', 'A', 'CLEVER', 'INTELLIGENT', 'YORKSHIRE', 'WOMAN', 'WHO', 'KEEPS', 'A', "DRUGGIST'S", 'SHOP', 'IN', 'HAWORTH', 'AND', 'FROM', 'HER', 'OCCUPATION', 'HER', 'EXPERIENCE', 'AND', 'EXCELLENT', 'SENSE', 'HOLDS', 'THE', 'POSITION', 'OF', 'VILLAGE', 'DOCTRESS', 'AND', 'NURSE', 'AND', 'AS', 'SUCH', 'HAS', 'BEEN', 'A', 'FRIEND', 'IN', 'MANY', 'A', 'TIME', 'OF', 'TRIAL', 'AND', 'SICKNESS', 'AND', 'DEATH', 'IN', 'THE', 'HOUSEHOLDS', 'ROUND', 'TOLD', 'ME', 'A', 'CHARACTERISTIC', 'LITTLE', 'INCIDENT', 'CONNECTED', 'WITH', "TABBY'S", 'FRACTURED', 'LEG'] +3575-170457-0046-415: hyp=['A', 'GOOD', 'NEIGHBOR', 'OF', 'THE', 'BRONTES', 'A', 'CLEVER', 'INTELLIGENT', 'YORKSHIRE', 'WOMAN', 'WHO', 'KEEPS', 'A', 'DRUGGIST', 'SHOP', 'IN', 'HAWORTH', 'FROM', 'HER', 'OCCUPATION', 'HER', 'EXPERIENCE', 'AND', 'EXCELLENT', 'SENSE', 'HOLDS', 'THE', 'POSITION', 'OF', 'VILLAGE', 'DOCTRIS', 'AND', 'NURSE', 'AND', 'AS', 'SUCH', 'HAS', 'BEEN', 'A', 'FRIEND', 'IN', 'MANY', 'A', 'TIME', 'OF', 'TRIAL', 'AND', 'SICKNESS', 'AND', 'DEATH', 'IN', 'THE', 'HOUSEHOLDS', 'ROUND', 'TOLD', 'ME', 'A', 'CHARACTERISTIC', 'LITTLE', 'INCIDENT', 'CONNECTED', 'WITH', "TABBY'S", 'FRACTURED', 'LEG'] +3575-170457-0047-416: ref=['TABBY', 'HAD', 'LIVED', 'WITH', 'THEM', 'FOR', 'TEN', 'OR', 'TWELVE', 'YEARS', 'AND', 'WAS', 'AS', 'CHARLOTTE', 'EXPRESSED', 'IT', 'ONE', 'OF', 'THE', 'FAMILY'] +3575-170457-0047-416: hyp=['TABBY', 'HAD', 'LIVED', 'WITH', 'THEM', 'FOR', 'TEN', 'OR', 'TWELVE', 'YEARS', 'AND', 'WAS', 'AS', 'CHARLOTTE', 'EXPRESSED', 'IT', 'ONE', 'OF', 'THE', 'FAMILY'] +3575-170457-0048-417: ref=['HE', 'REFUSED', 'AT', 'FIRST', 'TO', 'LISTEN', 'TO', 'THE', 'CAREFUL', 'ADVICE', 'IT', 'WAS', 'REPUGNANT', 'TO', 'HIS', 'LIBERAL', 'NATURE'] +3575-170457-0048-417: hyp=['HE', 'REFUSE', 'AT', 'FIRST', 'TO', 'LISTEN', 'TO', 'THE', 'CAREFUL', 'ADVICE', 'IT', 'WAS', 'REPUGNANT', 'TO', 'HIS', 'LIBERAL', 'NATURE'] +3575-170457-0049-418: ref=['THIS', 'DECISION', 'WAS', 'COMMUNICATED', 'TO', 'THE', 'GIRLS'] +3575-170457-0049-418: hyp=['THIS', 'DECISION', 'WAS', 'COMMUNICATED', 'TO', 'THE', 'GIRLS'] +3575-170457-0050-419: ref=['TABBY', 'HAD', 'TENDED', 'THEM', 'IN', 'THEIR', 'CHILDHOOD', 'THEY', 'AND', 'NONE', 'OTHER', 'SHOULD', 'TEND', 'HER', 'IN', 'HER', 'INFIRMITY', 'AND', 'AGE'] +3575-170457-0050-419: hyp=['TABBY', 'HAD', 'TENDED', 'THEM', 'IN', 'THEIR', 'CHILDHOOD', 'THEY', 'AND', 'NONE', 'OTHER', 'SHOULD', 'TEND', 'HER', 'IN', 'HER', 'INFIRMITY', 'AND', 'AGE'] +3575-170457-0051-420: ref=['AT', 'TEA', 'TIME', 'THEY', 'WERE', 'SAD', 'AND', 'SILENT', 'AND', 'THE', 'MEAL', 'WENT', 'AWAY', 'UNTOUCHED', 'BY', 'ANY', 'OF', 'THE', 'THREE'] +3575-170457-0051-420: hyp=['AT', 'TEA', 'TIME', 'THEY', 'WERE', 'SAD', 'AND', 'SILENT', 'AND', 'THE', 'MEAL', 'WENT', 'AWAY', 'UNTOUCHED', 'BY', 'ANY', 'OF', 'THE', 'THREE'] +3575-170457-0052-421: ref=['SHE', 'HAD', 'ANOTHER', 'WEIGHT', 'ON', 'HER', 'MIND', 'THIS', 'CHRISTMAS'] +3575-170457-0052-421: hyp=['SHE', 'HAD', 'ANOTHER', 'WEIGHT', 'ON', 'HER', 'MIND', 'THIS', 'CHRISTMAS'] +3575-170457-0053-422: ref=['BUT', 'ANNE', 'HAD', 'BEGUN', 'TO', 'SUFFER', 'JUST', 'BEFORE', 'THE', 'HOLIDAYS', 'AND', 'CHARLOTTE', 'WATCHED', 'OVER', 'HER', 'YOUNGER', 'SISTERS', 'WITH', 'THE', 'JEALOUS', 'VIGILANCE', 'OF', 'SOME', 'WILD', 'CREATURE', 'THAT', 'CHANGES', 'HER', 'VERY', 'NATURE', 'IF', 'DANGER', 'THREATENS', 'HER', 'YOUNG'] +3575-170457-0053-422: hyp=['BUT', 'ANNE', 'HAD', 'BEGUN', 'TO', 'SUFFER', 'JUST', 'BEFORE', 'THE', 'HOLIDAYS', 'AND', 'CHARLOTTE', 'WATCHED', 'OVER', 'HER', 'YOUNGER', 'SISTERS', 'WITH', 'A', 'JEALOUS', 'VIGILANCE', 'OF', 'SOME', 'WILD', 'CREATURE', 'THAT', 'CHANGES', 'HER', 'VERY', 'NATURE', 'IF', 'DANGER', 'THREATENS', 'HER', 'YOUNG'] +3575-170457-0054-423: ref=['STUNG', 'BY', 'ANXIETY', 'FOR', 'THIS', 'LITTLE', 'SISTER', 'SHE', 'UPBRAIDED', 'MISS', 'W', 'FOR', 'HER', 'FANCIED', 'INDIFFERENCE', 'TO', "ANNE'S", 'STATE', 'OF', 'HEALTH'] +3575-170457-0054-423: hyp=['STUNG', 'BY', 'ANXIETY', 'FOR', 'THIS', 'LITTLE', 'SISTER', 'SHE', 'UPBRAIDED', 'MISS', 'W', 'FOR', 'HER', 'FANCIED', 'INDIFFERENCE', 'TO', 'AN', 'STATE', 'OF', 'HEALTH'] +3575-170457-0055-424: ref=['STILL', 'HER', 'HEART', 'HAD', 'RECEIVED', 'A', 'SHOCK', 'IN', 'THE', 'PERCEPTION', 'OF', "ANNE'S", 'DELICACY', 'AND', 'ALL', 'THESE', 'HOLIDAYS', 'SHE', 'WATCHED', 'OVER', 'HER', 'WITH', 'THE', 'LONGING', 'FOND', 'ANXIETY', 'WHICH', 'IS', 'SO', 'FULL', 'OF', 'SUDDEN', 'PANGS', 'OF', 'FEAR'] +3575-170457-0055-424: hyp=['STILL', 'HER', 'HEART', 'HAD', 'RECEIVED', 'A', 'SHOCK', 'IN', 'THE', 'PERCEPTION', 'OF', "ANNE'S", 'DELICACY', 'AND', 'ALL', 'THESE', 'HOLIDAYS', 'SHE', 'WATCHED', 'OVER', 'HER', 'WITH', 'THE', 'LONGING', 'FOND', 'ANXIETY', 'WHICH', 'IS', 'SO', 'FULL', 'OF', 'SUDDEN', 'PANGS', 'OF', 'FEAR'] +3575-170457-0056-425: ref=['I', 'DOUBT', 'WHETHER', 'BRANWELL', 'WAS', 'MAINTAINING', 'HIMSELF', 'AT', 'THIS', 'TIME'] +3575-170457-0056-425: hyp=['I', 'DOUBT', 'WHETHER', 'BROWNWELL', 'WAS', 'MAINTAINING', 'HIMSELF', 'AT', 'THIS', 'TIME'] +3729-6852-0000-1660: ref=['TO', 'CELEBRATE', 'THE', 'ARRIVAL', 'OF', 'HER', 'SON', 'SILVIA', 'GAVE', 'A', 'SPLENDID', 'SUPPER', 'TO', 'WHICH', 'SHE', 'HAD', 'INVITED', 'ALL', 'HER', 'RELATIVES', 'AND', 'IT', 'WAS', 'A', 'GOOD', 'OPPORTUNITY', 'FOR', 'ME', 'TO', 'MAKE', 'THEIR', 'ACQUAINTANCE'] +3729-6852-0000-1660: hyp=['TO', 'CELEBRATE', 'THE', 'ARRIVAL', 'OF', 'HER', 'SON', 'SYLVIA', 'GAVE', 'A', 'SPLENDID', 'SUPPER', 'TO', 'WHICH', 'SHE', 'HAD', 'INVITED', 'ALL', 'HER', 'RELATIVES', 'AND', 'IT', 'WAS', 'A', 'GOOD', 'OPPORTUNITY', 'FOR', 'ME', 'TO', 'MAKE', 'THEIR', 'ACQUAINTANCE'] +3729-6852-0001-1661: ref=['WITHOUT', 'SAYING', 'IT', 'POSITIVELY', 'SHE', 'MADE', 'ME', 'UNDERSTAND', 'THAT', 'BEING', 'HERSELF', 'AN', 'ILLUSTRIOUS', 'MEMBER', 'OF', 'THE', 'REPUBLIC', 'OF', 'LETTERS', 'SHE', 'WAS', 'WELL', 'AWARE', 'THAT', 'SHE', 'WAS', 'SPEAKING', 'TO', 'AN', 'INSECT'] +3729-6852-0001-1661: hyp=['WITHOUT', 'SAYING', 'IT', 'POSITIVELY', 'SHE', 'MADE', 'ME', 'UNDERSTAND', 'THAT', 'BEING', 'HERSELF', 'AN', 'ILLUSTRIOUS', 'MEMBER', 'OF', 'THE', 'REPUBLIC', 'OF', 'LETTERS', 'SHE', 'WAS', 'WELL', 'AWARE', 'THAT', 'SHE', 'WAS', 'SPEAKING', 'TO', 'AN', 'INSECT'] +3729-6852-0002-1662: ref=['IN', 'ORDER', 'TO', 'PLEASE', 'HER', 'I', 'SPOKE', 'TO', 'HER', 'OF', 'THE', 'ABBE', 'CONTI', 'AND', 'I', 'HAD', 'OCCASION', 'TO', 'QUOTE', 'TWO', 'LINES', 'OF', 'THAT', 'PROFOUND', 'WRITER'] +3729-6852-0002-1662: hyp=['IN', 'ORDER', 'TO', 'PLEASE', 'HER', 'I', 'SPOKE', 'TO', 'HER', 'OF', 'THE', 'ABBEY', 'CONTI', 'AND', 'I', 'HAD', 'OCCASION', 'TO', 'QUOTE', 'TWO', 'LINES', 'OF', 'THAT', 'PROFOUND', 'WRITER'] +3729-6852-0003-1663: ref=['MADAM', 'CORRECTED', 'ME', 'WITH', 'A', 'PATRONIZING', 'AIR', 'FOR', 'MY', 'PRONUNCIATION', 'OF', 'THE', 'WORD', 'SCEVRA', 'WHICH', 'MEANS', 'DIVIDED', 'SAYING', 'THAT', 'IT', 'OUGHT', 'TO', 'BE', 'PRONOUNCED', 'SCEURA', 'AND', 'SHE', 'ADDED', 'THAT', 'I', 'OUGHT', 'TO', 'BE', 'VERY', 'GLAD', 'TO', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'ON', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'ARRIVAL', 'IN', 'PARIS', 'TELLING', 'ME', 'THAT', 'IT', 'WOULD', 'BE', 'AN', 'IMPORTANT', 'DAY', 'IN', 'MY', 'LIFE'] +3729-6852-0003-1663: hyp=['MADAME', 'CORRECTED', 'ME', 'WITH', 'A', 'PATRONIZING', 'AIR', 'FOR', 'MY', 'PRONUNCIATION', 'OF', 'THE', 'WORD', 'SKRA', 'WHICH', 'MEANS', 'DIVIDED', 'SAYING', 'THAT', 'IT', 'OUGHT', 'TO', 'BE', 'PRONOUNCED', 'SKURA', 'AND', 'SHE', 'ADDED', 'THAT', 'I', 'OUGHT', 'TO', 'BE', 'VERY', 'GLAD', 'TO', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'ON', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'ARRIVAL', 'IN', 'PARIS', 'TELLING', 'ME', 'THAT', 'IT', 'WOULD', 'BE', 'AN', 'IMPORTANT', 'DAY', 'IN', 'MY', 'LIFE'] +3729-6852-0004-1664: ref=['HER', 'FACE', 'WAS', 'AN', 'ENIGMA', 'FOR', 'IT', 'INSPIRED', 'EVERYONE', 'WITH', 'THE', 'WARMEST', 'SYMPATHY', 'AND', 'YET', 'IF', 'YOU', 'EXAMINED', 'IT', 'ATTENTIVELY', 'THERE', 'WAS', 'NOT', 'ONE', 'BEAUTIFUL', 'FEATURE', 'SHE', 'COULD', 'NOT', 'BE', 'CALLED', 'HANDSOME', 'BUT', 'NO', 'ONE', 'COULD', 'HAVE', 'THOUGHT', 'HER', 'UGLY'] +3729-6852-0004-1664: hyp=['HER', 'FACE', 'WAS', 'AN', 'ENIGMA', 'FOR', 'IT', 'INSPIRED', 'EVERY', 'ONE', 'WITH', 'THE', 'WARMEST', 'SYMPATHY', 'AND', 'YET', 'IF', 'YOU', 'EXAMINED', 'IT', 'ATTENTIVELY', 'THERE', 'WAS', 'NOT', 'ONE', 'BEAUTIFUL', 'FEATURE', 'SHE', 'COULD', 'NOT', 'BE', 'CALLED', 'HANDSOME', 'BUT', 'NO', 'ONE', 'COULD', 'HAVE', 'THOUGHT', 'HER', 'UGLY'] +3729-6852-0005-1665: ref=['SILVIA', 'WAS', 'THE', 'ADORATION', 'OF', 'FRANCE', 'AND', 'HER', 'TALENT', 'WAS', 'THE', 'REAL', 'SUPPORT', 'OF', 'ALL', 'THE', 'COMEDIES', 'WHICH', 'THE', 'GREATEST', 'AUTHORS', 'WROTE', 'FOR', 'HER', 'ESPECIALLY', 'OF', 'THE', 'PLAYS', 'OF', 'MARIVAUX', 'FOR', 'WITHOUT', 'HER', 'HIS', 'COMEDIES', 'WOULD', 'NEVER', 'HAVE', 'GONE', 'TO', 'POSTERITY'] +3729-6852-0005-1665: hyp=['SYLVIA', 'WAS', 'THE', 'ADORATION', 'OF', 'FRANCE', 'AND', 'HER', 'TALENT', 'WAS', 'THE', 'REAL', 'SUPPORT', 'OF', 'ALL', 'THE', 'COMEDIES', 'WHICH', 'THE', 'GREATEST', 'AUTHORS', 'WROTE', 'FOR', 'HER', 'ESPECIALLY', 'OF', 'THE', 'PLAYS', 'OF', 'MARIVAUX', 'FOR', 'WITHOUT', 'HER', 'HIS', 'COMEDIES', 'WOULD', 'NEVER', 'HAVE', 'GONE', 'TO', 'PROSTERITY'] +3729-6852-0006-1666: ref=['SILVIA', 'DID', 'NOT', 'THINK', 'THAT', 'HER', 'GOOD', 'CONDUCT', 'WAS', 'A', 'MERIT', 'FOR', 'SHE', 'KNEW', 'THAT', 'SHE', 'WAS', 'VIRTUOUS', 'ONLY', 'BECAUSE', 'HER', 'SELF', 'LOVE', 'COMPELLED', 'HER', 'TO', 'BE', 'SO', 'AND', 'SHE', 'NEVER', 'EXHIBITED', 'ANY', 'PRIDE', 'OR', 'ASSUMED', 'ANY', 'SUPERIORITY', 'TOWARDS', 'HER', 'THEATRICAL', 'SISTERS', 'ALTHOUGH', 'SATISFIED', 'TO', 'SHINE', 'BY', 'THEIR', 'TALENT', 'OR', 'THEIR', 'BEAUTY', 'THEY', 'CARED', 'LITTLE', 'ABOUT', 'RENDERING', 'THEMSELVES', 'CONSPICUOUS', 'BY', 'THEIR', 'VIRTUE'] +3729-6852-0006-1666: hyp=['SYLVIA', 'DID', 'NOT', 'THINK', 'THAT', 'HER', 'GOOD', 'CONDUCT', 'WAS', 'A', 'MERIT', 'FOR', 'SHE', 'KNEW', 'THAT', 'SHE', 'WAS', 'VIRTUOUS', 'ONLY', 'BECAUSE', 'HER', 'SELF', 'LOVE', 'COMPELLED', 'HER', 'TO', 'BE', 'SO', 'AND', 'SHE', 'NEVER', 'EXHIBITED', 'ANY', 'PRIDE', 'OR', 'ASSUMED', 'ANY', 'SUPERIORITY', 'TOWARDS', 'HER', 'THEATRICAL', 'SISTERS', 'ALTHOUGH', 'SATISFIED', 'TO', 'SHINE', 'BY', 'THEIR', 'TALENT', 'OR', 'THEIR', 'BEAUTY', 'THEY', 'CARED', 'LITTLE', 'ABOUT', 'RENDERING', 'THEMSELVES', 'CONSPICUOUS', 'BY', 'THEIR', 'VIRTUE'] +3729-6852-0007-1667: ref=['TWO', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'I', 'SAW', 'HER', 'PERFORM', 'THE', 'CHARACTER', 'OF', 'MARIANNE', 'IN', 'THE', 'COMEDY', 'OF', 'MARIVAUX', 'AND', 'IN', 'SPITE', 'OF', 'HER', 'AGE', 'AND', 'DECLINING', 'HEALTH', 'THE', 'ILLUSION', 'WAS', 'COMPLETE'] +3729-6852-0007-1667: hyp=['TWO', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'I', 'SAW', 'HER', 'PERFORM', 'THE', 'CHARACTER', 'OF', 'MARIANNE', 'IN', 'THE', 'COMEDY', 'OF', 'MARAVO', 'AND', 'IN', 'SPITE', 'OF', 'HER', 'AGE', 'AND', 'DECLINING', 'HEALTH', 'THE', 'ILLUSION', 'WAS', 'COMPLETE'] +3729-6852-0008-1668: ref=['SHE', 'WAS', 'HONOURABLY', 'BURIED', 'IN', 'THE', 'CHURCH', 'OF', 'SAINT', 'SAUVEUR', 'WITHOUT', 'THE', 'SLIGHTEST', 'OPPOSITION', 'FROM', 'THE', 'VENERABLE', 'PRIEST', 'WHO', 'FAR', 'FROM', 'SHARING', 'THE', 'ANTI', 'CHRISTAIN', 'INTOLERANCY', 'OF', 'THE', 'CLERGY', 'IN', 'GENERAL', 'SAID', 'THAT', 'HER', 'PROFESSION', 'AS', 'AN', 'ACTRESS', 'HAD', 'NOT', 'HINDERED', 'HER', 'FROM', 'BEING', 'A', 'GOOD', 'CHRISTIAN', 'AND', 'THAT', 'THE', 'EARTH', 'WAS', 'THE', 'COMMON', 'MOTHER', 'OF', 'ALL', 'HUMAN', 'BEINGS', 'AS', 'JESUS', 'CHRIST', 'HAD', 'BEEN', 'THE', 'SAVIOUR', 'OF', 'ALL', 'MANKIND'] +3729-6852-0008-1668: hyp=['SHE', 'WAS', 'HONOURABLY', 'BURIED', 'IN', 'THE', 'CHURCH', 'OF', 'SAINT', 'SAVERE', 'WITHOUT', 'THE', 'SLIGHTEST', 'OPPOSITION', 'FROM', 'THE', 'VENERABLE', 'PRIEST', 'WHO', 'FAR', 'FROM', 'SHARING', 'THE', 'ANTI', 'CHRISTIAN', 'INTOLERANCY', 'OF', 'THE', 'CLERGY', 'IN', 'GENERAL', 'SAID', 'THAT', 'HER', 'PROFESSION', 'AS', 'AN', 'ACTRESS', 'HAD', 'NOT', 'HINDERED', 'HER', 'FROM', 'BEING', 'A', 'GOOD', 'CHRISTIAN', 'AND', 'THAT', 'THE', 'EARTH', 'WAS', 'A', 'COMMON', 'MOTHER', 'OF', 'ALL', 'HUMAN', 'BEINGS', 'AS', 'JESUS', 'CHRIST', 'HAD', 'BEEN', 'THE', 'SAVIOUR', 'OF', 'ALL', 'MANKIND'] +3729-6852-0009-1669: ref=['YOU', 'WILL', 'FORGIVE', 'ME', 'DEAR', 'READER', 'IF', 'I', 'HAVE', 'MADE', 'YOU', 'ATTEND', 'THE', 'FUNERAL', 'OF', 'SILVIA', 'TEN', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'BELIEVE', 'ME', 'I', 'HAVE', 'NO', 'INTENTION', 'OF', 'PERFORMING', 'A', 'MIRACLE', 'YOU', 'MAY', 'CONSOLE', 'YOURSELF', 'WITH', 'THE', 'IDEA', 'THAT', 'I', 'SHALL', 'SPARE', 'YOU', 'THAT', 'UNPLEASANT', 'TASK', 'WHEN', 'POOR', 'SILVIA', 'DIES'] +3729-6852-0009-1669: hyp=['YOU', 'WILL', 'FORGIVE', 'ME', 'DEAR', 'READER', 'IF', 'I', 'HAVE', 'MADE', 'YOU', 'ATTEND', 'THE', 'FUNERAL', 'OF', 'SYLVIA', 'TEN', 'YEARS', 'BEFORE', 'HER', 'DEATH', 'BELIEVE', 'ME', 'I', 'HAVE', 'NO', 'INTENTION', 'OF', 'PERFORMING', 'A', 'MIRACLE', 'YOU', 'MAY', 'CONSOLE', 'YOURSELF', 'WITH', 'THE', 'IDEA', 'THAT', 'I', 'SHALL', 'SPARE', 'YOU', 'THAT', 'UNPLEASANT', 'TASK', 'WHEN', 'POOR', 'SYLVIA', 'DIES'] +3729-6852-0010-1670: ref=['I', 'NEVER', 'HAD', 'ANY', 'FAMILY'] +3729-6852-0010-1670: hyp=['I', 'NEVER', 'HAD', 'ANY', 'FAMILY'] +3729-6852-0011-1671: ref=['I', 'HAD', 'A', 'NAME', 'I', 'BELIEVE', 'IN', 'MY', 'YOUNG', 'DAYS', 'BUT', 'I', 'HAVE', 'FORGOTTEN', 'IT', 'SINCE', 'I', 'HAVE', 'BEEN', 'IN', 'SERVICE'] +3729-6852-0011-1671: hyp=['I', 'HAD', 'A', 'NAME', 'I', 'BELIEVE', 'IN', 'MY', 'YOUNG', 'DAYS', 'BUT', 'I', 'HAVE', 'FORGOTTEN', 'IT', 'SINCE', 'I', 'HAVE', 'BEEN', 'IN', 'SERVICE'] +3729-6852-0012-1672: ref=['I', 'SHALL', 'CALL', 'YOU', 'ESPRIT'] +3729-6852-0012-1672: hyp=['I', 'SHALL', 'CALL', 'YOU', 'A', 'SPREE'] +3729-6852-0013-1673: ref=['YOU', 'DO', 'ME', 'A', 'GREAT', 'HONOUR'] +3729-6852-0013-1673: hyp=['YOU', 'DO', 'ME', 'A', 'GREAT', 'HONOUR'] +3729-6852-0014-1674: ref=['HERE', 'GO', 'AND', 'GET', 'ME', 'CHANGE', 'FOR', 'A', 'LOUIS', 'I', 'HAVE', 'IT', 'SIR'] +3729-6852-0014-1674: hyp=['HERE', 'GO', 'AND', 'GET', 'ME', 'CHANGE', 'FOR', 'A', 'LOUIS', 'I', 'HAVE', 'IT', 'SIR'] +3729-6852-0015-1675: ref=['AT', 'YOUR', 'SERVICE', 'SIR'] +3729-6852-0015-1675: hyp=['AT', 'YOUR', 'SERVICE', 'SIR'] +3729-6852-0016-1676: ref=['MADAME', 'QUINSON', 'BESIDES', 'CAN', 'ANSWER', 'YOUR', 'ENQUIRIES'] +3729-6852-0016-1676: hyp=['MADAME', 'QUINCENT', 'BESIDES', 'CAN', 'ANSWER', 'YOUR', 'INQUIRIES'] +3729-6852-0017-1677: ref=['I', 'SEE', 'A', 'QUANTITY', 'OF', 'CHAIRS', 'FOR', 'HIRE', 'AT', 'THE', 'RATE', 'OF', 'ONE', 'SOU', 'MEN', 'READING', 'THE', 'NEWSPAPER', 'UNDER', 'THE', 'SHADE', 'OF', 'THE', 'TREES', 'GIRLS', 'AND', 'MEN', 'BREAKFASTING', 'EITHER', 'ALONE', 'OR', 'IN', 'COMPANY', 'WAITERS', 'WHO', 'WERE', 'RAPIDLY', 'GOING', 'UP', 'AND', 'DOWN', 'A', 'NARROW', 'STAIRCASE', 'HIDDEN', 'UNDER', 'THE', 'FOLIAGE'] +3729-6852-0017-1677: hyp=['I', 'SEE', 'A', 'QUANTITY', 'OF', 'CHAIRS', 'FOR', 'HIRE', 'AT', 'THE', 'RATE', 'OF', 'ONE', 'SOUS', 'MEN', 'READING', 'THE', 'NEWSPAPER', 'UNDER', 'THE', 'SHADE', 'OF', 'THE', 'TREES', 'GIRLS', 'AND', 'MEN', 'BREAKFASTING', 'EITHER', 'ALONE', 'OR', 'IN', 'COMPANY', 'WAITERS', 'WHO', 'WERE', 'RAPIDLY', 'GOING', 'UP', 'AND', 'DOWN', 'A', 'NARROW', 'STAIRCASE', 'HIDDEN', 'UNDER', 'THE', 'FOLIAGE'] +3729-6852-0018-1678: ref=['I', 'SIT', 'DOWN', 'AT', 'A', 'SMALL', 'TABLE', 'A', 'WAITER', 'COMES', 'IMMEDIATELY', 'TO', 'ENQUIRE', 'MY', 'WISHES'] +3729-6852-0018-1678: hyp=['I', 'SIT', 'DOWN', 'AT', 'A', 'SMALL', 'TABLE', 'A', 'WAITER', 'COMES', 'IMMEDIATELY', 'TO', 'INQUIRE', 'MY', 'WISHES'] +3729-6852-0019-1679: ref=['I', 'TELL', 'HIM', 'TO', 'GIVE', 'ME', 'SOME', 'COFFEE', 'IF', 'IT', 'IS', 'GOOD'] +3729-6852-0019-1679: hyp=['I', 'TELL', 'HIM', 'TO', 'GIVE', 'ME', 'SOME', 'COFFEE', 'IF', 'IT', 'IS', 'GOOD'] +3729-6852-0020-1680: ref=['THEN', 'TURNING', 'TOWARDS', 'ME', 'HE', 'SAYS', 'THAT', 'I', 'LOOK', 'LIKE', 'A', 'FOREIGNER', 'AND', 'WHEN', 'I', 'SAY', 'THAT', 'I', 'AM', 'AN', 'ITALIAN', 'HE', 'BEGINS', 'TO', 'SPEAK', 'TO', 'ME', 'OF', 'THE', 'COURT', 'OF', 'THE', 'CITY', 'OF', 'THE', 'THEATRES', 'AND', 'AT', 'LAST', 'HE', 'OFFERS', 'TO', 'ACCOMPANY', 'ME', 'EVERYWHERE'] +3729-6852-0020-1680: hyp=['THEN', 'TURNING', 'TOWARDS', 'ME', 'HE', 'SAYS', 'THAT', 'I', 'LOOK', 'LIKE', 'A', 'FOREIGNER', 'AND', 'WHEN', 'I', 'SAY', 'THAT', 'I', 'AM', 'AN', 'ITALIAN', 'HE', 'BEGINS', 'TO', 'SPEAK', 'TO', 'ME', 'OF', 'THE', 'CORPS', 'THE', 'CITY', 'OF', 'THE', 'THEATRES', 'AND', 'AT', 'LAST', 'HE', 'OFFERS', 'TO', 'ACCOMPANY', 'ME', 'EVERYWHERE'] +3729-6852-0021-1681: ref=['I', 'THANK', 'HIM', 'AND', 'TAKE', 'MY', 'LEAVE'] +3729-6852-0021-1681: hyp=['I', 'THANK', 'HIM', 'AND', 'TAKE', 'MY', 'LEAVE'] +3729-6852-0022-1682: ref=['I', 'ADDRESS', 'HIM', 'IN', 'ITALIAN', 'AND', 'HE', 'ANSWERS', 'VERY', 'WITTILY', 'BUT', 'HIS', 'WAY', 'OF', 'SPEAKING', 'MAKES', 'ME', 'SMILE', 'AND', 'I', 'TELL', 'HIM', 'WHY'] +3729-6852-0022-1682: hyp=['I', 'ADDRESS', 'HIM', 'IN', 'ITALIAN', 'AND', 'HE', 'ANSWERS', 'VERY', 'WITTILY', 'BUT', 'HIS', 'WAY', 'OF', 'SPEAKING', 'MAKES', 'ME', 'SMILE', 'AND', 'I', 'TELL', 'HIM', 'WHY'] +3729-6852-0023-1683: ref=['MY', 'REMARK', 'PLEASES', 'HIM', 'BUT', 'I', 'SOON', 'PROVE', 'TO', 'HIM', 'THAT', 'IT', 'IS', 'NOT', 'THE', 'RIGHT', 'WAY', 'TO', 'SPEAK', 'HOWEVER', 'PERFECT', 'MAY', 'HAVE', 'BEEN', 'THE', 'LANGUAGE', 'OF', 'THAT', 'ANCIENT', 'WRITER'] +3729-6852-0023-1683: hyp=['MY', 'REMARK', 'PLEASES', 'HIM', 'BUT', 'I', 'SOON', 'PROVE', 'TO', 'HIM', 'THAT', 'IT', 'IS', 'NOT', 'THE', 'RIGHT', 'WAY', 'TO', 'SPEAK', 'HOWEVER', 'PERFECT', 'MAY', 'HAVE', 'BEEN', 'THE', 'LANGUAGE', 'OF', 'THAT', 'ANCIENT', 'WRITER'] +3729-6852-0024-1684: ref=['I', 'SEE', 'A', 'CROWD', 'IN', 'ONE', 'CORNER', 'OF', 'THE', 'GARDEN', 'EVERYBODY', 'STANDING', 'STILL', 'AND', 'LOOKING', 'UP'] +3729-6852-0024-1684: hyp=['I', 'SEE', 'A', 'CROWD', 'IN', 'ONE', 'CORNER', 'OF', 'THE', 'GARDEN', 'EVERYBODY', 'STANDING', 'STILL', 'AND', 'LOOKING', 'UP'] +3729-6852-0025-1685: ref=['IS', 'THERE', 'NOT', 'A', 'MERIDIAN', 'EVERYWHERE'] +3729-6852-0025-1685: hyp=['IS', 'THERE', 'NOT', 'A', 'MERIDIAN', 'EVERYWHERE'] +3729-6852-0026-1686: ref=['YES', 'BUT', 'THE', 'MERIDIAN', 'OF', 'THE', 'PALAIS', 'ROYAL', 'IS', 'THE', 'MOST', 'EXACT'] +3729-6852-0026-1686: hyp=['YES', 'BUT', 'THE', 'MERIDIAN', 'OF', 'THE', 'PALAIS', 'ROYAL', 'IS', 'THE', 'MOST', 'EXACT'] +3729-6852-0027-1687: ref=['THAT', 'IS', 'TRUE', 'BADAUDERIE'] +3729-6852-0027-1687: hyp=['THAT', 'IS', 'TRUE', 'BAD', 'DEALT', 'GREE'] +3729-6852-0028-1688: ref=['ALL', 'THESE', 'HONEST', 'PERSONS', 'ARE', 'WAITING', 'THEIR', 'TURN', 'TO', 'GET', 'THEIR', 'SNUFF', 'BOXES', 'FILLED'] +3729-6852-0028-1688: hyp=['ALL', 'THESE', 'HONEST', 'PERSONS', 'ARE', 'WAITING', 'THEIR', 'TURN', 'TO', 'GET', 'THEIR', 'SNUFF', 'BOXES', 'FILLED'] +3729-6852-0029-1689: ref=['IT', 'IS', 'SOLD', 'EVERYWHERE', 'BUT', 'FOR', 'THE', 'LAST', 'THREE', 'WEEKS', 'NOBODY', 'WILL', 'USE', 'ANY', 'SNUFF', 'BUT', 'THAT', 'SOLD', 'AT', 'THE', 'CIVET', 'CAT'] +3729-6852-0029-1689: hyp=['IT', 'IS', 'SOLD', 'EVERYWHERE', 'BUT', 'FOR', 'THE', 'LAST', 'THREE', 'WEEKS', 'NOBODY', 'WILL', 'USE', 'ANY', 'SNUFF', 'BUT', "THAT'S", 'SOLD', 'AT', 'THE', 'SAVE', 'CAT'] +3729-6852-0030-1690: ref=['IS', 'IT', 'BETTER', 'THAN', 'ANYWHERE', 'ELSE'] +3729-6852-0030-1690: hyp=['IS', 'IT', 'BETTER', 'THAN', 'ANYWHERE', 'ELSE'] +3729-6852-0031-1691: ref=['BUT', 'HOW', 'DID', 'SHE', 'MANAGE', 'TO', 'RENDER', 'IT', 'SO', 'FASHIONABLE'] +3729-6852-0031-1691: hyp=['BUT', 'HOW', 'DID', 'SHE', 'MANAGE', 'TO', 'RENDER', 'IT', 'SO', 'FASHIONABLE'] +3729-6852-0032-1692: ref=['SIMPLY', 'BY', 'STOPPING', 'HER', 'CARRIAGE', 'TWO', 'OR', 'THREE', 'TIMES', 'BEFORE', 'THE', 'SHOP', 'TO', 'HAVE', 'HER', 'SNUFF', 'BOX', 'FILLED', 'AND', 'BY', 'SAYING', 'ALOUD', 'TO', 'THE', 'YOUNG', 'GIRL', 'WHO', 'HANDED', 'BACK', 'THE', 'BOX', 'THAT', 'HER', 'SNUFF', 'WAS', 'THE', 'VERY', 'BEST', 'IN', 'PARIS'] +3729-6852-0032-1692: hyp=['SIMPLY', 'BY', 'STOPPING', 'HER', 'CARRIAGE', 'TWO', 'OR', 'THREE', 'TIMES', 'BEFORE', 'THE', 'SHOP', 'TO', 'HAVE', 'HER', 'SNUFF', 'BOX', 'FILLED', 'AND', 'BY', 'SAYING', 'ALOUD', 'TO', 'THE', 'YOUNG', 'GIRL', 'WHO', 'HANDED', 'BACK', 'THE', 'BOX', 'THAT', 'HER', 'SNUFF', 'WAS', 'THE', 'VERY', 'BEST', 'IN', 'PARIS'] +3729-6852-0033-1693: ref=['YOU', 'ARE', 'NOW', 'IN', 'THE', 'ONLY', 'COUNTRY', 'IN', 'THE', 'WORLD', 'WHERE', 'WIT', 'CAN', 'MAKE', 'A', 'FORTUNE', 'BY', 'SELLING', 'EITHER', 'A', 'GENUINE', 'OR', 'A', 'FALSE', 'ARTICLE', 'IN', 'THE', 'FIRST', 'CASE', 'IT', 'RECEIVES', 'THE', 'WELCOME', 'OF', 'INTELLIGENT', 'AND', 'TALENTED', 'PEOPLE', 'AND', 'IN', 'THE', 'SECOND', 'FOOLS', 'ARE', 'ALWAYS', 'READY', 'TO', 'REWARD', 'IT', 'FOR', 'SILLINESS', 'IS', 'TRULY', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'PEOPLE', 'HERE', 'AND', 'HOWEVER', 'WONDERFUL', 'IT', 'MAY', 'APPEAR', 'SILLINESS', 'IS', 'THE', 'DAUGHTER', 'OF', 'WIT'] +3729-6852-0033-1693: hyp=['YOU', 'ARE', 'NOW', 'IN', 'THE', 'ONLY', 'COUNTRY', 'IN', 'THE', 'WORLD', 'WHERE', 'WIT', 'CAN', 'MAKE', 'A', 'FORTUNE', 'BY', 'SELLING', 'EITHER', 'A', 'GENUINE', 'OR', 'A', 'FALSE', 'ARTICLE', 'IN', 'THE', 'FIRST', 'CASE', 'IT', 'RECEIVES', 'THE', 'WELCOME', 'OF', 'INTELLIGENT', 'AND', 'TALENTED', 'PEOPLE', 'AND', 'IN', 'THE', 'SECOND', 'FOOLS', 'ARE', 'ALWAYS', 'READY', 'TO', 'REWARD', 'IT', 'FOR', 'SILLINESS', 'IS', 'TRULY', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'PEOPLE', 'HERE', 'AND', 'HOWEVER', 'WONDERFUL', 'IT', 'MAY', 'APPEAR', 'SILLINESS', 'IS', 'THE', 'DAUGHTER', 'OF', 'WIT'] +3729-6852-0034-1694: ref=['LET', 'A', 'MAN', 'RUN', 'AND', 'EVERYBODY', 'WILL', 'RUN', 'AFTER', 'HIM', 'THE', 'CROWD', 'WILL', 'NOT', 'STOP', 'UNLESS', 'THE', 'MAN', 'IS', 'PROVED', 'TO', 'BE', 'MAD', 'BUT', 'TO', 'PROVE', 'IT', 'IS', 'INDEED', 'A', 'DIFFICULT', 'TASK', 'BECAUSE', 'WE', 'HAVE', 'A', 'CROWD', 'OF', 'MEN', 'WHO', 'MAD', 'FROM', 'THEIR', 'BIRTH', 'ARE', 'STILL', 'CONSIDERED', 'WISE'] +3729-6852-0034-1694: hyp=['LET', 'A', 'MAN', 'RUN', 'AND', 'EVERYBODY', 'WILL', 'RUN', 'AFTER', 'HIM', 'THE', 'CROWD', 'WILL', 'NOT', 'STOP', 'UNLESS', 'THE', 'MAN', 'IS', 'PROVED', 'TO', 'BE', 'MAD', 'BUT', 'TO', 'PROVE', 'IT', 'IS', 'INDEED', 'A', 'DIFFICULT', 'TASK', 'BECAUSE', 'WE', 'HAVE', 'A', 'CROWD', 'OF', 'MEN', 'WHO', 'MAD', 'FROM', 'THEIR', 'BIRTH', 'ARE', 'STILL', 'CONSIDERED', 'WISE'] +3729-6852-0035-1695: ref=['IT', 'SEEMS', 'TO', 'ME', 'I', 'REPLIED', 'THAT', 'SUCH', 'APPROVAL', 'SUCH', 'RATIFICATION', 'OF', 'THE', 'OPINION', 'EXPRESSED', 'BY', 'THE', 'KING', 'THE', 'PRINCES', 'OF', 'THE', 'BLOOD', 'ET', 'CETERA', 'IS', 'RATHER', 'A', 'PROOF', 'OF', 'THE', 'AFFECTION', 'FELT', 'FOR', 'THEM', 'BY', 'THE', 'NATION', 'FOR', 'THE', 'FRENCH', 'CARRY', 'THAT', 'AFFECTION', 'TO', 'SUCH', 'AN', 'EXTENT', 'THAT', 'THEY', 'BELIEVE', 'THEM', 'INFALLIBLE'] +3729-6852-0035-1695: hyp=['IT', 'SEEMS', 'TO', 'ME', 'I', 'REPLIED', 'THAT', 'SUCH', 'APPROVAL', 'SUCH', 'RATIFICATION', 'OF', 'THE', 'OPINION', 'EXPRESSED', 'BY', 'THE', 'KING', 'THE', 'PRINCES', 'OF', 'THE', 'BLOOD', 'ET', 'CETERA', 'IS', 'RATHER', 'A', 'PROOF', 'OF', 'THE', 'AFFECTION', 'FELT', 'FOR', 'THEM', 'BY', 'THE', 'NATION', 'FOR', 'THE', 'FRENCH', 'CARRY', 'THAT', 'AFFECTION', 'TO', 'SUCH', 'AN', 'EXTENT', 'THAT', 'THEY', 'BELIEVED', 'THEM', 'INFALLIBLE'] +3729-6852-0036-1696: ref=['WHEN', 'THE', 'KING', 'COMES', 'TO', 'PARIS', 'EVERYBODY', 'CALLS', 'OUT', 'VIVE', 'LE', 'ROI'] +3729-6852-0036-1696: hyp=['WHEN', 'THE', 'KING', 'COMES', 'TO', 'PARIS', 'EVERYBODY', 'CALLS', 'OUT', 'VIVE', 'LE', 'ROY'] +3729-6852-0037-1697: ref=['SHE', 'INTRODUCED', 'ME', 'TO', 'ALL', 'HER', 'GUESTS', 'AND', 'GAVE', 'ME', 'SOME', 'PARTICULARS', 'RESPECTING', 'EVERY', 'ONE', 'OF', 'THEM'] +3729-6852-0037-1697: hyp=['SHE', 'INTRODUCED', 'ME', 'TO', 'ALL', 'HER', 'GUESTS', 'AND', 'GAVE', 'ME', 'SOME', 'PARTICULARS', 'RESPECTING', 'EVERY', 'ONE', 'OF', 'THEM'] +3729-6852-0038-1698: ref=['WHAT', 'SIR', 'I', 'SAID', 'TO', 'HIM', 'AM', 'I', 'FORTUNATE', 'ENOUGH', 'TO', 'SEE', 'YOU'] +3729-6852-0038-1698: hyp=['WHAT', 'SIR', 'I', 'SAID', 'TO', 'HIM', 'AM', 'I', 'FORTUNATE', 'ENOUGH', 'TO', 'SEE', 'YOU'] +3729-6852-0039-1699: ref=['HE', 'HIMSELF', 'RECITED', 'THE', 'SAME', 'PASSAGE', 'IN', 'FRENCH', 'AND', 'POLITELY', 'POINTED', 'OUT', 'THE', 'PARTS', 'IN', 'WHICH', 'HE', 'THOUGHT', 'THAT', 'I', 'HAD', 'IMPROVED', 'ON', 'THE', 'ORIGINAL'] +3729-6852-0039-1699: hyp=['HE', 'HIMSELF', 'RECITED', 'THE', 'SAME', 'PASSAGE', 'IN', 'FRENCH', 'AND', 'POLITELY', 'POINTED', 'OUT', 'THE', 'PARTS', 'IN', 'WHICH', 'HE', 'THOUGHT', 'THAT', 'I', 'HAD', 'IMPROVED', 'ON', 'THE', 'ORIGINAL'] +3729-6852-0040-1700: ref=['FOR', 'THE', 'FIRST', 'DAY', 'SIR', 'I', 'THINK', 'THAT', 'WHAT', 'YOU', 'HAVE', 'DONE', 'GIVES', 'GREAT', 'HOPES', 'OF', 'YOU', 'AND', 'WITHOUT', 'ANY', 'DOUBT', 'YOU', 'WILL', 'MAKE', 'RAPID', 'PROGRESS'] +3729-6852-0040-1700: hyp=['FOR', 'THE', 'FIRST', 'DAY', 'SIR', 'I', 'THINK', 'THAT', 'WHAT', 'YOU', 'HAVE', 'DONE', 'GIVES', 'GREAT', 'HOPES', 'OF', 'YOU', 'AND', 'WITHOUT', 'ANY', 'DOUBT', 'YOU', 'WILL', 'MAKE', 'RAPID', 'PROGRESS'] +3729-6852-0041-1701: ref=['I', 'BELIEVE', 'IT', 'SIR', 'AND', 'THAT', 'IS', 'WHAT', 'I', 'FEAR', 'THEREFORE', 'THE', 'PRINCIPAL', 'OBJECT', 'OF', 'MY', 'VISIT', 'HERE', 'IS', 'TO', 'DEVOTE', 'MYSELF', 'ENTIRELY', 'TO', 'THE', 'STUDY', 'OF', 'THE', 'FRENCH', 'LANGUAGE'] +3729-6852-0041-1701: hyp=['I', 'BELIEVE', 'IT', 'SIR', 'AND', 'THAT', 'IS', 'WHAT', 'I', 'FEAR', 'THEREFORE', 'THE', 'PRINCIPAL', 'OBJECT', 'OF', 'MY', 'VISIT', 'HERE', 'IS', 'TO', 'DEVOTE', 'MYSELF', 'ENTIRELY', 'TO', 'THE', 'STUDY', 'OF', 'THE', 'FRENCH', 'LANGUAGE'] +3729-6852-0042-1702: ref=['I', 'AM', 'A', 'VERY', 'UNPLEASANT', 'PUPIL', 'ALWAYS', 'ASKING', 'QUESTIONS', 'CURIOUS', 'TROUBLESOME', 'INSATIABLE', 'AND', 'EVEN', 'SUPPOSING', 'THAT', 'I', 'COULD', 'MEET', 'WITH', 'THE', 'TEACHER', 'I', 'REQUIRE', 'I', 'AM', 'AFRAID', 'I', 'AM', 'NOT', 'RICH', 'ENOUGH', 'TO', 'PAY', 'HIM'] +3729-6852-0042-1702: hyp=['I', 'AM', 'A', 'VERY', 'UNPLEASANT', 'PUPIL', 'ALWAYS', 'ASKING', 'QUESTIONS', 'CURIOUS', 'TROUBLESOME', 'INSATIABLE', 'AND', 'EVEN', 'SUPPOSING', 'THAT', 'I', 'COULD', 'MEET', 'WITH', 'THE', 'TEACHER', 'I', 'REQUIRE', 'I', 'AM', 'AFRAID', 'I', 'AM', 'NOT', 'RICH', 'ENOUGH', 'TO', 'PAY', 'HIM'] +3729-6852-0043-1703: ref=['I', 'RESIDE', 'IN', 'THE', 'MARAIS', 'RUE', 'DE', 'DOUZE', 'PORTES'] +3729-6852-0043-1703: hyp=['I', 'RESIDE', 'IN', 'THE', 'MARAE', 'GRUE', 'DE', 'DUSPORT'] +3729-6852-0044-1704: ref=['I', 'WILL', 'MAKE', 'YOU', 'TRANSLATE', 'THEM', 'INTO', 'FRENCH', 'AND', 'YOU', 'NEED', 'NOT', 'BE', 'AFRAID', 'OF', 'MY', 'FINDING', 'YOU', 'INSATIABLE'] +3729-6852-0044-1704: hyp=['I', 'WILL', 'MAKE', 'YOU', 'TRANSLATE', 'THEM', 'INTO', 'FRENCH', 'AND', 'YOU', 'NEED', 'NOT', 'BE', 'AFRAID', 'OF', 'MY', 'FINDING', 'YOU', 'INSATIABLE'] +3729-6852-0045-1705: ref=['HE', 'HAD', 'A', 'GOOD', 'APPETITE', 'COULD', 'TELL', 'A', 'GOOD', 'STORY', 'WITHOUT', 'LAUGHING', 'WAS', 'CELEBRATED', 'FOR', 'HIS', 'WITTY', 'REPARTEES', 'AND', 'HIS', 'SOCIABLE', 'MANNERS', 'BUT', 'HE', 'SPENT', 'HIS', 'LIFE', 'AT', 'HOME', 'SELDOM', 'GOING', 'OUT', 'AND', 'SEEING', 'HARDLY', 'ANYONE', 'BECAUSE', 'HE', 'ALWAYS', 'HAD', 'A', 'PIPE', 'IN', 'HIS', 'MOUTH', 'AND', 'WAS', 'SURROUNDED', 'BY', 'AT', 'LEAST', 'TWENTY', 'CATS', 'WITH', 'WHICH', 'HE', 'WOULD', 'AMUSE', 'HIMSELF', 'ALL', 'DAY'] +3729-6852-0045-1705: hyp=['HE', 'HAD', 'A', 'GOOD', 'APPETITE', 'COTEL', 'A', 'GOOD', 'STORY', 'WITHOUT', 'LAUGHING', 'WAS', 'CELEBRATED', 'FOR', 'HIS', 'WITTY', 'REPARTEES', 'AND', 'HIS', 'SOCIABLE', 'MANNERS', 'BUT', 'HE', 'SPENT', 'HIS', 'LIFE', 'AT', 'HOME', 'SELDOM', 'GOING', 'OUT', 'AND', 'SEEING', 'HARDLY', 'ANY', 'ONE', 'BECAUSE', 'HE', 'ALWAYS', 'HAD', 'A', 'PIPE', 'IN', 'HIS', 'MOUTH', 'AND', 'WAS', 'SURROUNDED', 'BY', 'AT', 'LEAST', 'TWENTY', 'CATS', 'WITH', 'WHICH', 'HE', 'WOULD', 'AMUSE', 'HIMSELF', 'ALL', 'DAY'] +3729-6852-0046-1706: ref=['HIS', 'HOUSEKEEPER', 'HAD', 'THE', 'MANAGEMENT', 'OF', 'EVERYTHING', 'SHE', 'NEVER', 'ALLOWED', 'HIM', 'TO', 'BE', 'IN', 'NEED', 'OF', 'ANYTHING', 'AND', 'SHE', 'GAVE', 'NO', 'ACCOUNT', 'OF', 'HIS', 'MONEY', 'WHICH', 'SHE', 'KEPT', 'ALTOGETHER', 'BECAUSE', 'HE', 'NEVER', 'ASKED', 'HER', 'TO', 'RENDER', 'ANY', 'ACCOUNTS'] +3729-6852-0046-1706: hyp=['HIS', 'HOUSEKEEPER', 'HAD', 'THE', 'MANAGEMENT', 'OF', 'EVERYTHING', 'SHE', 'NEVER', 'ALLOWED', 'HIM', 'TO', 'BE', 'IN', 'NEED', 'OF', 'ANYTHING', 'AND', 'SHE', 'GAVE', 'NO', 'ACCOUNT', 'OF', 'HIS', 'MONEY', 'WHICH', 'SHE', 'KEPT', 'ALTOGETHER', 'BECAUSE', 'HE', 'NEVER', 'ASKED', 'HER', 'TO', 'RENDER', 'ANY', 'ACCOUNTS'] +4077-13751-0000-1258: ref=['ON', 'THE', 'SIXTH', 'OF', 'APRIL', 'EIGHTEEN', 'THIRTY', 'THE', 'CHURCH', 'OF', 'JESUS', 'CHRIST', 'OF', 'LATTER', 'DAY', 'SAINTS', 'WAS', 'FORMALLY', 'ORGANIZED', 'AND', 'THUS', 'TOOK', 'ON', 'A', 'LEGAL', 'EXISTENCE'] +4077-13751-0000-1258: hyp=['ON', 'THE', 'SIXTH', 'OF', 'APRIL', 'EIGHTEEN', 'THIRTY', 'THE', 'CHURCH', 'OF', 'JESUS', 'CHRIST', 'OF', 'LATTER', 'DAY', 'SAINTS', 'WAS', 'FORMERLY', 'ORGANIZED', 'AND', 'THUS', 'TOOK', 'ON', 'A', 'LEGAL', 'EXISTENCE'] +4077-13751-0001-1259: ref=['ITS', 'ORIGIN', 'WAS', 'SMALL', 'A', 'GERM', 'AN', 'INSIGNIFICANT', 'SEED', 'HARDLY', 'TO', 'BE', 'THOUGHT', 'OF', 'AS', 'LIKELY', 'TO', 'AROUSE', 'OPPOSITION'] +4077-13751-0001-1259: hyp=['ITS', 'ORIGIN', 'WAS', 'SMALL', 'A', 'GERM', 'AN', 'INSIGNIFICANT', 'SEED', 'HARDLY', 'TO', 'BE', 'THOUGHT', 'OF', 'AS', 'LIKELY', 'TO', 'AROUSE', 'OPPOSITION'] +4077-13751-0002-1260: ref=['INSTEAD', 'OF', 'BUT', 'SIX', 'REGULARLY', 'AFFILIATED', 'MEMBERS', 'AND', 'AT', 'MOST', 'TWO', 'SCORE', 'OF', 'ADHERENTS', 'THE', 'ORGANIZATION', 'NUMBERS', 'TODAY', 'MANY', 'HUNDRED', 'THOUSAND', 'SOULS'] +4077-13751-0002-1260: hyp=['INSTEAD', 'OF', 'BUT', 'SIX', 'REGULARLY', 'AFFILIATED', 'MEMBERS', 'AND', 'AT', 'MOST', 'TWO', 'SCORE', 'OF', 'ADHERENTS', 'THE', 'ORGANIZATION', 'NUMBERS', 'TO', 'DAY', 'MANY', 'HUNDRED', 'THOUSAND', 'SOULS'] +4077-13751-0003-1261: ref=['IN', 'PLACE', 'OF', 'A', 'SINGLE', 'HAMLET', 'IN', 'THE', 'SMALLEST', 'CORNER', 'OF', 'WHICH', 'THE', 'MEMBERS', 'COULD', 'HAVE', 'CONGREGATED', 'THERE', 'NOW', 'ARE', 'ABOUT', 'SEVENTY', 'STAKES', 'OF', 'ZION', 'AND', 'ABOUT', 'SEVEN', 'HUNDRED', 'ORGANIZED', 'WARDS', 'EACH', 'WARD', 'AND', 'STAKE', 'WITH', 'ITS', 'FULL', 'COMPLEMENT', 'OF', 'OFFICERS', 'AND', 'PRIESTHOOD', 'ORGANIZATIONS'] +4077-13751-0003-1261: hyp=['IN', 'PLACE', 'OF', 'A', 'SINGLE', 'HAMLET', 'IN', 'THE', 'SMALLEST', 'CORNER', 'OF', 'WHICH', 'THE', 'MEMBERS', 'COULD', 'HAVE', 'CONGREGATED', 'THERE', 'NOW', 'ARE', 'ABOUT', 'SEVENTY', 'STAKES', 'OF', 'ZION', 'AND', 'ABOUT', 'SEVEN', 'HUNDRED', 'ORGANIZED', 'WARDS', 'EACH', 'WARD', 'AND', 'STAKE', 'WITH', 'ITS', 'FULL', 'COMPLEMENT', 'OF', 'OFFICERS', 'AND', 'PRIESTHOOD', 'ORGANIZATIONS'] +4077-13751-0004-1262: ref=['THE', 'PRACTISE', 'OF', 'GATHERING', 'ITS', 'PROSELYTES', 'INTO', 'ONE', 'PLACE', 'PREVENTS', 'THE', 'BUILDING', 'UP', 'AND', 'STRENGTHENING', 'OF', 'FOREIGN', 'BRANCHES', 'AND', 'INASMUCH', 'AS', 'EXTENSIVE', 'AND', 'STRONG', 'ORGANIZATIONS', 'ARE', 'SELDOM', 'MET', 'WITH', 'ABROAD', 'VERY', 'ERRONEOUS', 'IDEAS', 'EXIST', 'CONCERNING', 'THE', 'STRENGTH', 'OF', 'THE', 'CHURCH'] +4077-13751-0004-1262: hyp=['THE', 'PRACTICE', 'OF', 'GATHERING', 'ITS', 'PROSELYTES', 'INTO', 'ONE', 'PLACE', 'PREVENTS', 'THE', 'BILLING', 'UP', 'AND', 'STRENGTHENING', 'OF', 'FOREIGN', 'BRANCHES', 'AND', 'INASMUCH', 'AS', 'EXTENSIVE', 'AND', 'STRONG', 'ORGANIZATIONS', 'ARE', 'SELDOM', 'MET', 'WITH', 'ABROAD', 'VERY', 'ERRONEOUS', 'IDEAS', 'EXIST', 'CONCERNING', 'THE', 'STRENGTH', 'OF', 'THE', 'CHURCH'] +4077-13751-0005-1263: ref=['NEVERTHELESS', 'THE', 'MUSTARD', 'SEED', 'AMONG', 'THE', 'SMALLEST', 'OF', 'ALL', 'SEEDS', 'HAS', 'ATTAINED', 'THE', 'PROPORTIONS', 'OF', 'A', 'TREE', 'AND', 'THE', 'BIRDS', 'OF', 'THE', 'AIR', 'ARE', 'NESTING', 'IN', 'ITS', 'BRANCHES', 'THE', 'ACORN', 'IS', 'NOW', 'AN', 'OAK', 'OFFERING', 'PROTECTION', 'AND', 'THE', 'SWEETS', 'OF', 'SATISFACTION', 'TO', 'EVERY', 'EARNEST', 'PILGRIM', 'JOURNEYING', 'ITS', 'WAY', 'FOR', 'TRUTH'] +4077-13751-0005-1263: hyp=['NEVERTHELESS', 'THE', 'MUSTARD', 'SEED', 'AMONG', 'THE', 'SMALLEST', 'OF', 'ALL', 'SEEDS', 'HESITATED', 'THE', 'PROPORTIONS', 'OF', 'A', 'TREE', 'AND', 'THE', 'BIRDS', 'OF', 'THE', 'AIR', 'ARE', 'NESTING', 'IN', 'ITS', 'BRANCHES', 'THE', 'ACORN', 'IS', 'NOW', 'IN', 'OAK', 'OFFERING', 'PROTECTION', 'AND', 'THE', 'SWEETS', 'OF', 'SATISFACTION', 'TO', 'EVERY', 'EARNEST', 'PILGRIM', 'JOURNEYING', 'ITS', 'WAY', 'FOR', 'TRUTH'] +4077-13751-0006-1264: ref=['THEIR', 'EYES', 'WERE', 'FROM', 'THE', 'FIRST', 'TURNED', 'IN', 'ANTICIPATION', 'TOWARD', 'THE', 'EVENING', 'SUN', 'NOT', 'MERELY', 'THAT', 'THE', 'WORK', 'OF', 'PROSELYTING', 'SHOULD', 'BE', 'CARRIED', 'ON', 'IN', 'THE', 'WEST', 'BUT', 'THAT', 'THE', 'HEADQUARTERS', 'OF', 'THE', 'CHURCH', 'SHOULD', 'BE', 'THERE', 'ESTABLISHED'] +4077-13751-0006-1264: hyp=['THEIR', 'EYES', 'WERE', 'FROM', 'THE', 'FIRST', 'TURNED', 'IN', 'ANTICIPATION', 'TOWARD', 'THE', 'EVENING', 'SUN', 'NOT', 'MERELY', 'THAT', 'THE', 'WORK', 'OF', 'PROSELLING', 'SHOULD', 'BE', 'CARRIED', 'ON', 'IN', 'THE', 'WEST', 'BUT', 'THAT', 'THE', 'HEADQUARTERS', 'OF', 'THE', 'CHURCH', 'SHOULD', 'BE', 'THEIR', 'ESTABLISHED'] +4077-13751-0007-1265: ref=['THE', 'BOOK', 'OF', 'MORMON', 'HAD', 'TAUGHT', 'THE', 'PEOPLE', 'THE', 'TRUE', 'ORIGIN', 'AND', 'DESTINY', 'OF', 'THE', 'AMERICAN', 'INDIANS', 'AND', 'TOWARD', 'THIS', 'DARK', 'SKINNED', 'REMNANT', 'OF', 'A', 'ONCE', 'MIGHTY', 'PEOPLE', 'THE', 'MISSIONARIES', 'OF', 'MORMONISM', 'EARLY', 'TURNED', 'THEIR', 'EYES', 'AND', 'WITH', 'THEIR', 'EYES', 'WENT', 'THEIR', 'HEARTS', 'AND', 'THEIR', 'HOPES'] +4077-13751-0007-1265: hyp=['THE', 'BOOK', 'OR', 'MORMON', 'HAD', 'TAUGHT', 'THAT', 'PEOPLE', 'THE', 'TRUE', 'ORIGIN', 'AND', 'DESTINY', 'OF', 'THE', 'AMERICAN', 'INDIANS', 'AND', 'TOWARD', 'THIS', 'DARK', 'SKINNED', 'REMNANT', 'OF', 'A', 'ONCE', 'MIGHTY', 'PEOPLE', 'THE', 'MISSIONARIES', 'OF', 'MORMONISM', 'EARLY', 'TURNED', 'THEIR', 'EYES', 'AND', 'WITH', 'THEIR', 'EYES', 'WENT', 'THEIR', 'HEARTS', 'AND', 'THEIR', 'HOPES'] +4077-13751-0008-1266: ref=['IT', 'IS', 'NOTABLE', 'THAT', 'THE', 'INDIAN', 'TRIBES', 'HAVE', 'GENERALLY', 'REGARDED', 'THE', 'RELIGION', 'OF', 'THE', 'LATTER', 'DAY', 'SAINTS', 'WITH', 'FAVOR', 'SEEING', 'IN', 'THE', 'BOOK', 'OF', 'MORMON', 'STRIKING', 'AGREEMENT', 'WITH', 'THEIR', 'OWN', 'TRADITIONS'] +4077-13751-0008-1266: hyp=['IT', 'IS', 'NOTABLE', 'THAT', 'THE', 'INDIAN', 'TRIBES', 'HAVE', 'GENERALLY', 'REGARDED', 'THEIR', 'RELIGION', 'OF', 'THE', 'LATTER', 'DAY', 'SAINTS', 'WITH', 'FAVOR', 'SEEING', 'IN', 'THE', 'BOOK', 'A', 'MORMON', 'STRIKING', 'AGREEMENT', 'WITH', 'THEIR', 'OWN', 'TRADITIONS'] +4077-13751-0009-1267: ref=['THE', 'FIRST', 'WELL', 'ESTABLISHED', 'SEAT', 'OF', 'THE', 'CHURCH', 'WAS', 'IN', 'THE', 'PRETTY', 'LITTLE', 'TOWN', 'OF', 'KIRTLAND', 'OHIO', 'ALMOST', 'WITHIN', 'SIGHT', 'OF', 'LAKE', 'ERIE', 'AND', 'HERE', 'SOON', 'ROSE', 'THE', 'FIRST', 'TEMPLE', 'OF', 'MODERN', 'TIMES'] +4077-13751-0009-1267: hyp=['THE', 'FIRST', 'WELL', 'ESTABLISHED', 'SEAT', 'OF', 'THE', 'CHURCH', 'WAS', 'IN', 'THE', 'PRETTY', 'LITTLE', 'TOWN', 'OF', 'CURTLEND', 'OHIO', 'ALMOST', 'WITHIN', 'SIGHT', 'OF', 'LAKE', 'ERIE', 'AND', 'HERE', 'SOON', 'ROSE', 'THE', 'FIRST', 'TEMPLE', 'OF', 'MODERN', 'TIMES'] +4077-13751-0010-1268: ref=['TO', 'THE', 'FERVENT', 'LATTER', 'DAY', 'SAINT', 'A', 'TEMPLE', 'IS', 'NOT', 'SIMPLY', 'A', 'CHURCH', 'BUILDING', 'A', 'HOUSE', 'FOR', 'RELIGIOUS', 'ASSEMBLY'] +4077-13751-0010-1268: hyp=['TO', 'THE', 'FERVENT', 'LATTER', 'DAY', 'SAINT', 'A', 'TEMPLE', 'IS', 'NOT', 'SIMPLY', 'A', 'CHURCH', 'BUILDING', 'A', 'HOUSE', 'FOR', 'RELIGIOUS', 'ASSEMBLY'] +4077-13751-0011-1269: ref=['SOON', 'THOUSANDS', 'OF', 'CONVERTS', 'HAD', 'RENTED', 'OR', 'PURCHASED', 'HOMES', 'IN', 'MISSOURI', 'INDEPENDENCE', 'JACKSON', 'COUNTY', 'BEING', 'THEIR', 'CENTER', 'BUT', 'FROM', 'THE', 'FIRST', 'THEY', 'WERE', 'UNPOPULAR', 'AMONG', 'THE', 'MISSOURIANS'] +4077-13751-0011-1269: hyp=['SOON', 'THOUSANDS', 'OF', 'CONVERTS', 'HAD', 'RENTED', 'OR', 'PURCHASED', 'HOMES', 'IN', 'MISSOURI', 'INDEPENDENCE', 'JACKSON', 'COUNTY', 'BEING', 'THEIR', 'CENTER', 'BUT', 'FROM', 'THE', 'FIRST', 'THEY', 'WERE', 'UNPOPULAR', 'AMONG', 'THE', 'MISSOURIANS'] +4077-13751-0012-1270: ref=['THE', 'LIEUTENANT', 'GOVERNOR', 'LILBURN', 'W', 'BOGGS', 'AFTERWARD', 'GOVERNOR', 'WAS', 'A', 'PRONOUNCED', 'MORMON', 'HATER', 'AND', 'THROUGHOUT', 'THE', 'PERIOD', 'OF', 'THE', 'TROUBLES', 'HE', 'MANIFESTED', 'SYMPATHY', 'WITH', 'THE', 'PERSECUTORS'] +4077-13751-0012-1270: hyp=['THE', 'LIEUTENANT', 'GOVERNOR', 'LITTLE', 'BURN', 'W', 'BOX', 'AFTERWARD', 'GOVERNOR', 'WAS', 'A', 'PRONOUNCED', 'MORMON', 'HAYTER', 'AND', 'THROUGHOUT', 'THE', 'PERIOD', 'OF', 'THE', 'TROUBLES', 'HE', 'MANIFEST', 'HIS', 'SYMPATHY', 'WITH', 'THE', 'PERSECUTORS'] +4077-13751-0013-1271: ref=['THEIR', 'SUFFERINGS', 'HAVE', 'NEVER', 'YET', 'BEEN', 'FITLY', 'CHRONICLED', 'BY', 'HUMAN', 'SCRIBE'] +4077-13751-0013-1271: hyp=['THEIR', 'SUFFERINGS', 'HAVE', 'NEVER', 'YET', 'BEEN', 'FITLY', 'CHRONICLED', 'BY', 'HUMAN', 'SCRIBE'] +4077-13751-0014-1272: ref=['MAKING', 'THEIR', 'WAY', 'ACROSS', 'THE', 'RIVER', 'MOST', 'OF', 'THE', 'REFUGEES', 'FOUND', 'SHELTER', 'AMONG', 'THE', 'MORE', 'HOSPITABLE', 'PEOPLE', 'OF', 'CLAY', 'COUNTY', 'AND', 'AFTERWARD', 'ESTABLISHED', 'THEMSELVES', 'IN', 'CALDWELL', 'COUNTY', 'THEREIN', 'FOUNDING', 'THE', 'CITY', 'OF', 'FAR', 'WEST'] +4077-13751-0014-1272: hyp=['MAKING', 'THEIR', 'WAY', 'ACROSS', 'THE', 'RIVER', 'MOST', 'OF', 'THE', 'REFUGEES', 'FOUND', 'SHELTER', 'AMONG', 'THE', 'MORE', 'HOSPITABLE', 'PEOPLE', 'OF', 'CLAY', 'COUNTY', 'AND', 'AFTERWARD', 'ESTABLISHED', 'THEMSELVES', 'IN', 'COLDWELL', 'COUNTY', 'THEY', 'WERE', 'IN', 'FOUNDING', 'THE', 'CITY', 'OF', 'FAR', 'WEST'] +4077-13751-0015-1273: ref=['A', 'SMALL', 'SETTLEMENT', 'HAD', 'BEEN', 'FOUNDED', 'BY', 'MORMON', 'FAMILIES', 'ON', 'SHOAL', 'CREEK', 'AND', 'HERE', 'ON', 'THE', 'THIRTIETH', 'OF', 'OCTOBER', 'EIGHTEEN', 'THIRTY', 'EIGHT', 'A', 'COMPANY', 'OF', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'FELL', 'UPON', 'THE', 'HAPLESS', 'SETTLERS', 'AND', 'BUTCHERED', 'A', 'SCORE'] +4077-13751-0015-1273: hyp=['A', 'SMALL', 'SETTLEMENT', 'HAD', 'BEEN', 'FOUNDED', 'BY', 'MORMON', 'FAMILIES', 'ON', 'SHOAL', 'CREEK', 'AND', 'HERE', 'ON', 'THE', 'THIRTIETH', 'OF', 'OCTOBER', 'EIGHTEEN', 'THIRTY', 'EIGHT', 'A', 'COMPANY', 'OF', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'FELL', 'UPON', 'THE', 'HAPLESS', 'SETTLERS', 'AND', 'BUTCHER', 'TO', 'SCORE'] +4077-13751-0016-1274: ref=['BE', 'IT', 'SAID', 'TO', 'THE', 'HONOR', 'OF', 'SOME', 'OF', 'THE', 'OFFICERS', 'ENTRUSTED', 'WITH', 'THE', 'TERRIBLE', 'COMMISSION', 'THAT', 'WHEN', 'THEY', 'LEARNED', 'ITS', 'TRUE', 'SIGNIFICANCE', 'THEY', 'RESIGNED', 'THEIR', 'AUTHORITY', 'RATHER', 'THAN', 'HAVE', 'ANYTHING', 'TO', 'DO', 'WITH', 'WHAT', 'THEY', 'DESIGNATED', 'A', 'COLD', 'BLOODED', 'BUTCHERY'] +4077-13751-0016-1274: hyp=['BE', 'IT', 'SAID', 'TO', 'THE', 'HONOR', 'OF', 'SOME', 'OF', 'THE', 'OFFICERS', 'ENTRUSTED', 'WITH', 'A', 'TERRIBLE', 'COMMISSION', 'THAT', 'WHEN', 'THEY', 'LEARNED', 'ITS', 'TRUE', 'SIGNIFICANCE', 'THEY', 'RESIGNED', 'THEIR', 'AUTHORITY', 'RATHER', 'THAN', 'HAVE', 'ANYTHING', 'TO', 'DO', 'WITH', 'WHAT', 'THEY', 'DESIGNATED', 'A', 'COLD', 'BLOODED', 'BUTCHERY'] +4077-13751-0017-1275: ref=['OH', 'WHAT', 'A', 'RECORD', 'TO', 'READ', 'WHAT', 'A', 'PICTURE', 'TO', 'GAZE', 'UPON', 'HOW', 'AWFUL', 'THE', 'FACT'] +4077-13751-0017-1275: hyp=['OH', 'WHAT', 'A', 'RECORD', 'TO', 'READ', 'WHAT', 'A', 'PICTURE', 'TO', 'GAZE', 'UPON', 'HOW', 'AWFUL', 'THE', 'FACT'] +4077-13751-0018-1276: ref=['AMERICAN', 'SCHOOL', 'BOYS', 'READ', 'WITH', 'EMOTIONS', 'OF', 'HORROR', 'OF', 'THE', 'ALBIGENSES', 'DRIVEN', 'BEATEN', 'AND', 'KILLED', 'WITH', 'A', 'PAPAL', 'LEGATE', 'DIRECTING', 'THE', 'BUTCHERY', 'AND', 'OF', 'THE', 'VAUDOIS', 'HUNTED', 'AND', 'HOUNDED', 'LIKE', 'BEASTS', 'AS', 'THE', 'EFFECT', 'OF', 'A', 'ROYAL', 'DECREE', 'AND', 'THEY', 'YET', 'SHALL', 'READ', 'IN', 'THE', 'HISTORY', 'OF', 'THEIR', 'OWN', 'COUNTRY', 'OF', 'SCENES', 'AS', 'TERRIBLE', 'AS', 'THESE', 'IN', 'THE', 'EXHIBITION', 'OF', 'INJUSTICE', 'AND', 'INHUMAN', 'HATE'] +4077-13751-0018-1276: hyp=['AMERICAN', 'SCHOOLBOYS', 'READ', 'WITH', 'EMOTIONS', 'OF', 'HORROR', 'OF', 'THE', 'ALBIGENZAS', 'DRIVEN', 'BEATEN', 'AND', 'KILLED', 'WITH', 'A', 'PEPPEL', 'LEGATE', 'DIRECTING', 'THE', 'BUTCHERY', 'AND', 'OF', 'THE', 'FAUDOIR', 'HUNTED', 'AND', 'HOUNDED', 'LIKE', 'BEASTS', 'AS', 'THE', 'EFFECT', 'OF', 'A', 'ROYAL', 'DECREE', 'AND', 'THEY', 'YET', 'SHALL', 'READ', 'IN', 'THE', 'HISTORY', 'OF', 'THEIR', 'OWN', 'COUNTRY', 'OF', 'SCENES', 'AS', 'TERRIBLE', 'AS', 'THESE', 'IN', 'THE', 'EXHIBITION', 'OF', 'INJUSTICE', 'AND', 'INHUMAN', 'HATE'] +4077-13751-0019-1277: ref=['WHO', 'BEGAN', 'THE', 'QUARREL', 'WAS', 'IT', 'THE', 'MORMONS'] +4077-13751-0019-1277: hyp=['WHO', 'BEGAN', 'THE', 'QUARREL', 'WAS', 'IT', 'THE', 'MORMONS'] +4077-13751-0020-1278: ref=['AS', 'A', 'SAMPLE', 'OF', 'THE', 'PRESS', 'COMMENTS', 'AGAINST', 'THE', 'BRUTALITY', 'OF', 'THE', 'MISSOURIANS', 'I', 'QUOTE', 'A', 'PARAGRAPH', 'FROM', 'THE', 'QUINCY', 'ARGUS', 'MARCH', 'SIXTEENTH', 'EIGHTEEN', 'THIRTY', 'NINE'] +4077-13751-0020-1278: hyp=['AS', 'THE', 'SAMPLE', 'OF', 'THE', 'PRESS', 'COMETS', 'AGAINST', 'THE', 'BRUTALITY', 'OF', 'THE', 'MISERIES', 'I', 'QUOTE', 'A', 'PARAGRAPH', 'FROM', 'THE', 'QUINCY', 'ARGUS', 'MARCH', 'SIXTEENTH', 'EIGHTEEN', 'THIRTY', 'NINE'] +4077-13751-0021-1279: ref=['IT', 'WILL', 'BE', 'OBSERVED', 'THAT', 'AN', 'ORGANIZED', 'MOB', 'AIDED', 'BY', 'MANY', 'OF', 'THE', 'CIVIL', 'AND', 'MILITARY', 'OFFICERS', 'OF', 'MISSOURI', 'WITH', 'GOVERNOR', 'BOGGS', 'AT', 'THEIR', 'HEAD', 'HAVE', 'BEEN', 'THE', 'PROMINENT', 'ACTORS', 'IN', 'THIS', 'BUSINESS', 'INCITED', 'TOO', 'IT', 'APPEARS', 'AGAINST', 'THE', 'MORMONS', 'BY', 'POLITICAL', 'HATRED', 'AND', 'BY', 'THE', 'ADDITIONAL', 'MOTIVES', 'OF', 'PLUNDER', 'AND', 'REVENGE'] +4077-13751-0021-1279: hyp=['IT', 'WILL', 'BE', 'OBSERVED', 'THAT', 'AN', 'ORGANIZED', 'MOB', 'AIDED', 'BY', 'MANY', 'OF', 'THE', 'CIVIL', 'AND', 'MILITARY', 'OFFICERS', 'OF', 'MISSOURI', 'WITH', 'GOVERNOR', 'BOX', 'AT', 'THEIR', 'HEAD', 'HAVE', 'BEEN', 'THE', 'PROMINENT', 'ACTORS', 'IN', 'THIS', 'BUSINESS', 'INCITED', 'TOO', 'IT', 'APPEARS', 'AGAINST', 'THE', 'MORMONS', 'BY', 'POLITICAL', 'HATRED', 'AND', 'BY', 'THE', 'ADDITIONAL', 'MOTIVES', 'OF', 'PLUNDER', 'AND', 'REVENGE'] +4077-13754-0000-1241: ref=['THE', 'ARMY', 'FOUND', 'THE', 'PEOPLE', 'IN', 'POVERTY', 'AND', 'LEFT', 'THEM', 'IN', 'COMPARATIVE', 'WEALTH'] +4077-13754-0000-1241: hyp=['THE', 'ARMY', 'FOUND', 'THE', 'PEOPLE', 'IN', 'POVERTY', 'AND', 'LEFT', 'THEM', 'IN', 'COMPARATIVE', 'WEALTH'] +4077-13754-0001-1242: ref=['BUT', 'A', 'WORD', 'FURTHER', 'CONCERNING', 'THE', 'EXPEDITION', 'IN', 'GENERAL'] +4077-13754-0001-1242: hyp=['BUT', 'A', 'WORD', 'FURTHER', 'CONCERNING', 'THE', 'EXPEDITION', 'IN', 'GENERAL'] +4077-13754-0002-1243: ref=['IT', 'WAS', 'THROUGH', "FLOYD'S", 'ADVICE', 'THAT', 'BUCHANAN', 'ORDERED', 'THE', 'MILITARY', 'EXPEDITION', 'TO', 'UTAH', 'OSTENSIBLY', 'TO', 'INSTALL', 'CERTAIN', 'FEDERAL', 'OFFICIALS', 'AND', 'TO', 'REPRESS', 'AN', 'ALLEGED', 'INFANTILE', 'REBELLION', 'WHICH', 'IN', 'FACT', 'HAD', 'NEVER', 'COME', 'INTO', 'EXISTENCE', 'BUT', 'IN', 'REALITY', 'TO', 'FURTHER', 'THE', 'INTERESTS', 'OF', 'THE', 'SECESSIONISTS'] +4077-13754-0002-1243: hyp=['IT', 'WAS', 'THROUGH', "FLOYD'S", 'ADVICE', 'THAT', 'BUCATED', 'ORDER', 'THE', 'MILITARY', 'EXPEDITION', 'TO', 'UTAH', 'OSTENSIBLY', 'TO', 'INSTALL', 'CERTAIN', 'FEDERAL', 'OFFICIALS', 'AND', 'TO', 'REPRESS', 'AN', 'ALLEGED', 'INFANTILE', 'REBELLION', 'WHICH', 'IN', 'FACT', 'HAD', 'NEVER', 'COME', 'INTO', 'EXISTENCE', 'BUT', 'IN', 'REALITY', 'TO', 'FURTHER', 'THE', 'INTRICTS', 'OF', 'THE', 'SECESSIONISTS'] +4077-13754-0003-1244: ref=['MOREOVER', 'HAD', 'THE', 'PEOPLE', 'BEEN', 'INCLINED', 'TO', 'REBELLION', 'WHAT', 'GREATER', 'OPPORTUNITY', 'COULD', 'THEY', 'HAVE', 'WISHED'] +4077-13754-0003-1244: hyp=['MOREOVER', 'HAD', 'THE', 'PEOPLE', 'BEEN', 'INCLINED', 'TO', 'REBELLION', 'WHAT', 'GREATER', 'OPPORTUNITY', 'COULD', 'THEY', 'HAVE', 'WISHED'] +4077-13754-0004-1245: ref=['ALREADY', 'A', 'NORTH', 'AND', 'A', 'SOUTH', 'WERE', 'TALKED', 'OF', 'WHY', 'NOT', 'SET', 'UP', 'ALSO', 'A', 'WEST'] +4077-13754-0004-1245: hyp=['ALREADY', 'A', 'NORTH', 'AND', 'THE', 'SOUTH', 'WERE', 'TALKED', 'OF', 'WHY', 'NOT', 'SET', 'UP', 'ALSO', 'A', 'WEST'] +4077-13754-0005-1246: ref=['THEY', 'KNEW', 'NO', 'NORTH', 'NO', 'SOUTH', 'NO', 'EAST', 'NO', 'WEST', 'THEY', 'STOOD', 'POSITIVELY', 'BY', 'THE', 'CONSTITUTION', 'AND', 'WOULD', 'HAVE', 'NOTHING', 'TO', 'DO', 'IN', 'THE', 'BLOODY', 'STRIFE', 'BETWEEN', 'BROTHERS', 'UNLESS', 'INDEED', 'THEY', 'WERE', 'SUMMONED', 'BY', 'THE', 'AUTHORITY', 'TO', 'WHICH', 'THEY', 'HAD', 'ALREADY', 'ONCE', 'LOYALLY', 'RESPONDED', 'TO', 'FURNISH', 'MEN', 'AND', 'ARMS', 'FOR', 'THEIR', "COUNTRY'S", 'NEED'] +4077-13754-0005-1246: hyp=['THEY', 'KNEW', 'NO', 'NORTH', 'NOR', 'SOUTH', 'NOR', 'EAST', 'NO', 'WEST', 'THEY', 'STOOD', 'POSITIVELY', 'BY', 'THE', 'CONSTITUTION', 'AND', 'WOULD', 'HAVE', 'NOTHING', 'TO', 'DO', 'IN', 'THE', 'BLOODY', 'STRIFE', 'BETWEEN', 'BROTHERS', 'UNLESS', 'INDEED', 'THEY', 'WERE', 'SUMMONED', 'BY', 'THE', 'AUTHORITY', 'TO', 'WHICH', 'THEY', 'HAD', 'ALREADY', 'ONCE', 'LOYALLY', 'RESPONDED', 'TO', 'FURNISH', 'MEN', 'IN', 'ARMS', 'FOR', 'THEIR', "COUNTRY'S", 'NEED'] +4077-13754-0006-1247: ref=['WHAT', 'THE', 'LATTER', 'DAY', 'SAINTS', 'CALL', 'CELESTIAL', 'MARRIAGE', 'IS', 'CHARACTERISTIC', 'OF', 'THE', 'CHURCH', 'AND', 'IS', 'IN', 'VERY', 'GENERAL', 'PRACTISE', 'BUT', 'OF', 'CELESTIAL', 'MARRIAGE', 'PLURALITY', 'OF', 'WIVES', 'WAS', 'AN', 'INCIDENT', 'NEVER', 'AN', 'ESSENTIAL'] +4077-13754-0006-1247: hyp=['WHAT', 'THE', 'LATTER', 'DAY', 'SAINTS', 'CALL', 'CELESTIAL', 'MARRIAGE', 'IS', 'CHARACTERISTIC', 'OF', 'THE', 'CHURCH', 'AND', 'IS', 'IN', 'VERY', 'GENERAL', 'PRACTICE', 'BUT', 'OF', 'CELESTIAL', 'MARRIAGE', 'PLURALITY', 'OF', 'WIVES', 'WAS', 'AN', 'INCIDENT', 'NEVER', 'AN', 'ESSENTIAL'] +4077-13754-0007-1248: ref=['WE', 'BELIEVE', 'IN', 'A', 'LITERAL', 'RESURRECTION', 'AND', 'AN', 'ACTUAL', 'HEREAFTER', 'IN', 'WHICH', 'FUTURE', 'STATE', 'SHALL', 'BE', 'RECOGNIZED', 'EVERY', 'SANCTIFIED', 'AND', 'AUTHORIZED', 'RELATIONSHIP', 'EXISTING', 'HERE', 'ON', 'EARTH', 'OF', 'PARENT', 'AND', 'CHILD', 'BROTHER', 'AND', 'SISTER', 'HUSBAND', 'AND', 'WIFE'] +4077-13754-0007-1248: hyp=['WE', 'BELIEVE', 'IN', 'A', 'LITERAL', 'RESURRECTION', 'AND', 'AN', 'ACTUAL', 'HEREAFTER', 'IN', 'WHICH', 'FUTURE', 'STATES', 'SHALL', 'BE', 'RECOGNIZED', 'EVERY', 'SANCTIFIED', 'AND', 'AUTHORIZED', 'RELATIONSHIP', 'EXISTING', 'HERE', 'ON', 'EARTH', 'OF', 'PARENT', 'AND', 'CHILD', 'BROTHER', 'AND', 'SISTER', 'HUSBAND', 'AND', 'WIFE'] +4077-13754-0008-1249: ref=['IT', 'HAS', 'BEEN', 'MY', 'PRIVILEGE', 'TO', 'TREAD', 'THE', 'SOIL', 'OF', 'MANY', 'LANDS', 'TO', 'OBSERVE', 'THE', 'CUSTOMS', 'AND', 'STUDY', 'THE', 'HABITS', 'OF', 'MORE', 'NATIONS', 'THAN', 'ONE', 'AND', 'I', 'HAVE', 'YET', 'TO', 'FIND', 'THE', 'PLACE', 'AND', 'MEET', 'THE', 'PEOPLE', 'WHERE', 'AND', 'WITH', 'WHOM', 'THE', 'PURITY', 'OF', 'MAN', 'AND', 'WOMAN', 'IS', 'HELD', 'MORE', 'PRECIOUS', 'THAN', 'AMONG', 'THE', 'MALIGNED', 'MORMONS', 'IN', 'THE', 'MOUNTAIN', 'VALLEYS', 'OF', 'THE', 'WEST'] +4077-13754-0008-1249: hyp=['IT', 'HAS', 'BEEN', 'MY', 'PRIVILEGE', 'TO', 'TREAD', 'THE', 'SOIL', 'OF', 'MANY', 'LANDS', 'TO', 'OBSERVE', 'THE', 'CUSTOMS', 'AND', 'STUDY', 'THE', 'HABITS', 'OF', 'MORE', 'NATIONS', 'THAN', 'ONE', 'AND', 'I', 'HAVE', 'YET', 'TO', 'FIND', 'THE', 'PLACE', 'AND', 'MEET', 'THE', 'PEOPLE', 'WHERE', 'AND', 'WITH', 'WHOM', 'THE', 'PURITY', 'OF', 'MAN', 'AND', 'WOMAN', 'IS', 'HELD', 'MORE', 'PRECIOUS', 'THAN', 'AMONG', 'THE', 'MALIGNED', 'MORMONS', 'IN', 'THE', 'MOUNTAIN', 'VALLEYS', 'OF', 'THE', 'WEST'] +4077-13754-0009-1250: ref=['AT', 'THE', 'INCEPTION', 'OF', 'PLURAL', 'MARRIAGE', 'AMONG', 'THE', 'LATTER', 'DAY', 'SAINTS', 'THERE', 'WAS', 'NO', 'LAW', 'NATIONAL', 'OR', 'STATE', 'AGAINST', 'ITS', 'PRACTISE'] +4077-13754-0009-1250: hyp=['AT', 'THE', 'INCEPTION', 'OF', 'PEARL', 'MARRIAGE', 'AMONG', 'THE', 'LATTER', 'DAY', 'SAINTS', 'THERE', 'WAS', 'NO', 'LAW', 'NATIONAL', 'OR', 'STATE', 'AGAINST', 'ITS', 'PRACTICE'] +4077-13754-0010-1251: ref=['IN', 'EIGHTEEN', 'SIXTY', 'TWO', 'A', 'LAW', 'WAS', 'ENACTED', 'WITH', 'THE', 'PURPOSE', 'OF', 'SUPPRESSING', 'PLURAL', 'MARRIAGE', 'AND', 'AS', 'HAD', 'BEEN', 'PREDICTED', 'IN', 'THE', 'NATIONAL', 'SENATE', 'PRIOR', 'TO', 'ITS', 'PASSAGE', 'IT', 'LAY', 'FOR', 'MANY', 'YEARS', 'A', 'DEAD', 'LETTER'] +4077-13754-0010-1251: hyp=['IN', 'EIGHTEEN', 'SIXTY', 'TWO', 'A', 'LAW', 'WAS', 'ENACTED', 'WITH', 'A', 'PURPOSE', 'OF', 'SUPPRESSING', 'PLURAL', 'MARRIAGE', 'AND', 'AS', 'HAD', 'BEEN', 'PREDICTED', 'IN', 'THE', 'NATIONAL', 'SENATE', 'PRAYER', 'TO', 'ITS', 'PASSAGE', 'IT', 'LAY', 'FOR', 'MANY', 'YEARS', 'A', 'DEAD', 'LETTER'] +4077-13754-0011-1252: ref=['FEDERAL', 'JUDGES', 'AND', 'UNITED', 'STATES', 'ATTORNEYS', 'IN', 'UTAH', 'WHO', 'WERE', 'NOT', 'MORMONS', 'NOR', 'LOVERS', 'OF', 'MORMONISM', 'REFUSED', 'TO', 'ENTERTAIN', 'COMPLAINTS', 'OR', 'PROSECUTE', 'CASES', 'UNDER', 'THE', 'LAW', 'BECAUSE', 'OF', 'ITS', 'MANIFEST', 'INJUSTICE', 'AND', 'INADEQUACY'] +4077-13754-0011-1252: hyp=['FEDERAL', 'JUDGES', 'AND', 'UNITED', 'STATES', 'ATTORNEYS', 'IN', 'UTA', 'WHO', 'WERE', 'NOT', 'MORMONS', 'NOR', 'LOVERS', 'OF', 'WOMENISM', 'REFUSED', 'TO', 'ENTERTAIN', 'COMPLAINTS', 'OR', 'PROSECUTE', 'CASES', 'UNDER', 'THE', 'LAW', 'BECAUSE', 'OF', 'ITS', 'MANIFEST', 'INJUSTICE', 'AND', 'INADEQUACY'] +4077-13754-0012-1253: ref=['THIS', 'MEANT', 'THAT', 'FOR', 'AN', 'ALLEGED', 'MISDEMEANOR', 'FOR', 'WHICH', 'CONGRESS', 'PRESCRIBED', 'A', 'MAXIMUM', 'PENALTY', 'OF', 'SIX', 'MONTHS', 'IMPRISONMENT', 'AND', 'A', 'FINE', 'OF', 'THREE', 'HUNDRED', 'DOLLARS', 'A', 'MAN', 'MIGHT', 'BE', 'IMPRISONED', 'FOR', 'LIFE', 'AYE', 'FOR', 'MANY', 'TERMS', 'OF', 'A', "MAN'S", 'NATURAL', 'LIFE', 'DID', 'THE', "COURT'S", 'POWER', 'TO', 'ENFORCE', 'ITS', 'SENTENCES', 'EXTEND', 'SO', 'FAR', 'AND', 'MIGHT', 'BE', 'FINED', 'MILLIONS', 'OF', 'DOLLARS'] +4077-13754-0012-1253: hyp=['THIS', 'MEANT', 'THAT', 'FOR', 'AN', 'ALLEGED', 'MISDEMEANOR', 'FOR', 'WHICH', 'CONGRESS', 'PRESCRIBED', 'A', 'MAXIMUM', 'PENALTY', 'OF', 'SIX', 'MONTHS', 'IMPRISONMENT', 'AND', 'A', 'FINE', 'OF', 'THREE', 'HUNDRED', 'DOLLARS', 'A', 'MAN', 'MIGHT', 'BE', 'IMPRISONED', 'FOR', 'LIFE', 'I', 'FOR', 'MANY', 'TERMS', 'OF', 'A', "MAN'S", 'NATURAL', 'LIFE', 'DID', 'THE', 'COURTS', 'POWER', 'TO', 'ENFORCE', 'ITS', 'SENTENCES', 'EXTEND', 'SO', 'FAR', 'AND', 'MIGHT', 'BE', 'FINED', 'MILLIONS', 'OF', 'DOLLARS'] +4077-13754-0013-1254: ref=['BEFORE', 'THIS', 'TRAVESTY', 'ON', 'THE', 'ADMINISTRATION', 'OF', 'LAW', 'COULD', 'BE', 'BROUGHT', 'BEFORE', 'THE', 'COURT', 'OF', 'LAST', 'RESORT', 'AND', 'THERE', 'MEET', 'WITH', 'THE', 'REVERSAL', 'AND', 'REBUKE', 'IT', 'DESERVED', 'MEN', 'WERE', 'IMPRISONED', 'UNDER', 'SENTENCES', 'OF', 'MANY', 'YEARS', 'DURATION'] +4077-13754-0013-1254: hyp=['BEFORE', 'THIS', 'TRAVESTY', 'ON', 'THE', 'ADMINISTRATION', 'OF', 'LAW', 'COULD', 'BE', 'BROUGHT', 'BEFORE', 'THE', 'COURT', 'OF', 'LAST', 'RESORT', 'AND', 'THERE', 'MET', 'WITH', 'THE', 'REVERSAL', 'AND', 'REBUKE', 'IT', 'DESERVED', 'MEN', 'WERE', 'IMPRISONED', 'UNDER', 'SENTENCE', 'OF', 'MANY', 'YEARS', 'DURATION'] +4077-13754-0014-1255: ref=['THE', 'PEOPLE', 'CONTESTED', 'THESE', 'MEASURES', 'ONE', 'BY', 'ONE', 'IN', 'THE', 'COURTS', 'PRESENTING', 'IN', 'CASE', 'AFTER', 'CASE', 'THE', 'DIFFERENT', 'PHASES', 'OF', 'THE', 'SUBJECT', 'AND', 'URGING', 'THE', 'UNCONSTITUTIONALITY', 'OF', 'THE', 'MEASURE'] +4077-13754-0014-1255: hyp=['THE', 'PEOPLE', 'CONTESTED', 'THESE', 'MEASURES', 'ONE', 'BY', 'ONE', 'IN', 'THE', 'COURTS', 'PRESENTING', 'IN', 'CASE', 'AFTER', 'CASE', 'THE', 'DIFFERENT', 'PHASES', 'OF', 'THE', 'SUBJECT', 'AND', 'URGING', 'THE', 'UNCONSTITUTIONALITY', 'OF', 'THE', 'MEASURE'] +4077-13754-0015-1256: ref=['THEN', 'THE', 'CHURCH', 'WAS', 'DISINCORPORATED', 'AND', 'ITS', 'PROPERTY', 'BOTH', 'REAL', 'AND', 'PERSONAL', 'CONFISCATED', 'AND', 'ESCHEATED', 'TO', 'THE', 'GOVERNMENT', 'OF', 'THE', 'UNITED', 'STATES', 'AND', 'ALTHOUGH', 'THE', 'PERSONAL', 'PROPERTY', 'WAS', 'SOON', 'RESTORED', 'REAL', 'ESTATE', 'OF', 'GREAT', 'VALUE', 'LONG', 'LAY', 'IN', 'THE', 'HANDS', 'OF', 'THE', "COURT'S", 'RECEIVER', 'AND', 'THE', 'MORMON', 'CHURCH', 'HAD', 'TO', 'PAY', 'THE', 'NATIONAL', 'GOVERNMENT', 'HIGH', 'RENTAL', 'ON', 'ITS', 'OWN', 'PROPERTY'] +4077-13754-0015-1256: hyp=['THEN', 'THE', 'CHURCH', 'WAS', 'DISINCORPORATED', 'AND', 'ITS', 'PROPERTY', 'BOTH', 'REAL', 'AND', 'PERSONAL', 'CONFISCATED', 'AND', 'ISIATED', 'TO', 'THE', 'GOVERNMENT', 'OF', 'THE', 'UNITED', 'STATES', 'AND', 'ALTHOUGH', 'THE', 'PERSONAL', 'PROPERTY', 'WAS', 'SOON', 'RESTORED', 'REAL', 'ESTATE', 'OF', 'GREAT', 'VALUE', 'LONG', 'LAY', 'IN', 'THE', 'HANDS', 'OF', 'THE', 'COURTS', 'RECEIVER', 'AND', 'THE', 'MORMON', 'CHURCH', 'HAD', 'TO', 'PAY', 'THE', 'NATIONAL', 'GOVERNMENT', 'HIGHER', 'RENTAL', 'ON', 'ITS', 'OWN', 'PROPERTY'] +4077-13754-0016-1257: ref=['AND', 'SO', 'THE', 'STORY', 'OF', 'MORMONISM', 'RUNS', 'ON', 'ITS', 'FINALE', 'HAS', 'NOT', 'YET', 'BEEN', 'WRITTEN', 'THE', 'CURRENT', 'PRESS', 'PRESENTS', 'CONTINUOUSLY', 'NEW', 'STAGES', 'OF', 'ITS', 'PROGRESS', 'NEW', 'DEVELOPMENTS', 'OF', 'ITS', 'PLAN'] +4077-13754-0016-1257: hyp=['AND', 'SO', 'THE', 'STORY', 'OF', 'MORMONISM', 'RUNS', 'ON', 'ITS', 'FINALE', 'HAS', 'NOT', 'YET', 'BEEN', 'WRITTEN', 'THE', 'CURRENT', 'PRESS', 'PRESENTS', 'CONTINUOUSLY', 'NEW', 'STAGES', 'OF', 'ITS', 'PROGRESS', 'NEW', 'DEVELOPMENTS', 'OF', 'ITS', 'PLAN'] +4446-2271-0000-1133: ref=['MAINHALL', 'LIKED', 'ALEXANDER', 'BECAUSE', 'HE', 'WAS', 'AN', 'ENGINEER'] +4446-2271-0000-1133: hyp=['MAIN', 'HALL', 'LIKED', 'ALEXANDER', 'BECAUSE', 'HE', 'WAS', 'AN', 'ENGINEER'] +4446-2271-0001-1134: ref=['HE', 'HAD', 'PRECONCEIVED', 'IDEAS', 'ABOUT', 'EVERYTHING', 'AND', 'HIS', 'IDEA', 'ABOUT', 'AMERICANS', 'WAS', 'THAT', 'THEY', 'SHOULD', 'BE', 'ENGINEERS', 'OR', 'MECHANICS'] +4446-2271-0001-1134: hyp=['WE', 'HAD', 'FREQUENCY', 'IDEAS', 'ABOUT', 'EVERYTHING', 'AND', 'HIS', 'IDEA', 'ABOUT', 'AMERICANS', 'WAS', 'THAT', 'THEY', 'SHOULD', 'BE', 'ENGINEERS', 'OR', 'MECHANICS'] +4446-2271-0002-1135: ref=["IT'S", 'TREMENDOUSLY', 'WELL', 'PUT', 'ON', 'TOO'] +4446-2271-0002-1135: hyp=['ITS', 'TREMENDOUSLY', 'WELL', 'PUT', 'ON', 'TOO'] +4446-2271-0003-1136: ref=["IT'S", 'BEEN', 'ON', 'ONLY', 'TWO', 'WEEKS', 'AND', "I'VE", 'BEEN', 'HALF', 'A', 'DOZEN', 'TIMES', 'ALREADY'] +4446-2271-0003-1136: hyp=["IT'S", 'BEEN', 'ON', 'ONLY', 'TWO', 'WEEKS', 'AND', "I'VE", 'BEEN', 'HALF', 'A', 'DOZEN', 'TIMES', 'ALREADY'] +4446-2271-0004-1137: ref=['DO', 'YOU', 'KNOW', 'ALEXANDER', 'MAINHALL', 'LOOKED', 'WITH', 'PERPLEXITY', 'UP', 'INTO', 'THE', 'TOP', 'OF', 'THE', 'HANSOM', 'AND', 'RUBBED', 'HIS', 'PINK', 'CHEEK', 'WITH', 'HIS', 'GLOVED', 'FINGER', 'DO', 'YOU', 'KNOW', 'I', 'SOMETIMES', 'THINK', 'OF', 'TAKING', 'TO', 'CRITICISM', 'SERIOUSLY', 'MYSELF'] +4446-2271-0004-1137: hyp=['DO', 'YOU', 'KNOW', 'ALEXANDER', 'MAIN', 'HALL', 'LOOKED', 'WITH', 'PERPLEXITY', 'UP', 'INTO', 'THE', 'TOP', 'OF', 'THE', 'HANSOM', 'AND', 'RUBBED', 'HIS', 'PINK', 'CHEEK', 'WITH', 'HIS', 'GLOVED', 'FINGER', 'DO', 'YOU', 'KNOW', 'I', 'SOMETIMES', 'THINK', 'OF', 'TAKING', 'TO', 'CRITICISM', 'SERIOUSLY', 'MYSELF'] +4446-2271-0005-1138: ref=['SHE', 'SAVES', 'HER', 'HAND', 'TOO', "SHE'S", 'AT', 'HER', 'BEST', 'IN', 'THE', 'SECOND', 'ACT'] +4446-2271-0005-1138: hyp=['SHE', 'SAVES', 'HER', 'HAND', 'TOO', "SHE'S", 'AT', 'HER', 'BEST', 'IN', 'THE', 'SECOND', 'ACT'] +4446-2271-0006-1139: ref=["HE'S", 'BEEN', 'WANTING', 'TO', 'MARRY', 'HILDA', 'THESE', 'THREE', 'YEARS', 'AND', 'MORE'] +4446-2271-0006-1139: hyp=["HE'S", 'BEEN', 'WANTING', 'TO', 'MARRY', 'HILDER', 'THESE', 'THREE', 'YEARS', 'AND', 'MORE'] +4446-2271-0007-1140: ref=['SHE', "DOESN'T", 'TAKE', 'UP', 'WITH', 'ANYBODY', 'YOU', 'KNOW'] +4446-2271-0007-1140: hyp=['SHE', "DOESN'T", 'TAKE', 'UP', 'WITH', 'ANYBODY', 'YOU', 'KNOW'] +4446-2271-0008-1141: ref=['IRENE', 'BURGOYNE', 'ONE', 'OF', 'HER', 'FAMILY', 'TOLD', 'ME', 'IN', 'CONFIDENCE', 'THAT', 'THERE', 'WAS', 'A', 'ROMANCE', 'SOMEWHERE', 'BACK', 'IN', 'THE', 'BEGINNING'] +4446-2271-0008-1141: hyp=['IRENE', 'WERE', 'GOING', 'ONE', 'OF', 'HER', 'FAMILY', 'TOLD', 'ME', 'IN', 'CONFIDENCE', 'THAT', 'THERE', 'WAS', 'A', 'ROMANCE', 'SOMEWHERE', 'BACK', 'IN', 'THE', 'BEGINNING'] +4446-2271-0009-1142: ref=['MAINHALL', 'VOUCHED', 'FOR', 'HER', 'CONSTANCY', 'WITH', 'A', 'LOFTINESS', 'THAT', 'MADE', 'ALEXANDER', 'SMILE', 'EVEN', 'WHILE', 'A', 'KIND', 'OF', 'RAPID', 'EXCITEMENT', 'WAS', 'TINGLING', 'THROUGH', 'HIM'] +4446-2271-0009-1142: hyp=['MAIN', 'HOLE', 'VOUCHED', 'FOR', 'HER', 'CONSTANCY', 'WITH', 'A', 'LOFTINESS', 'THAT', 'MADE', 'ALEXANDER', 'SMILE', 'EVEN', 'WHILE', 'A', 'KIND', 'OF', 'RAPID', 'EXCITEMENT', 'WAS', 'TINGLING', 'THROUGH', 'HIM'] +4446-2271-0010-1143: ref=["HE'S", 'ANOTHER', "WHO'S", 'AWFULLY', 'KEEN', 'ABOUT', 'HER', 'LET', 'ME', 'INTRODUCE', 'YOU'] +4446-2271-0010-1143: hyp=["HE'S", 'ANOTHER', "WHO'S", 'AWFULLY', 'KEEN', 'ABOUT', 'HER', 'LET', 'ME', 'INTRODUCE', 'YOU'] +4446-2271-0011-1144: ref=['SIR', 'HARRY', 'TOWNE', 'MISTER', 'BARTLEY', 'ALEXANDER', 'THE', 'AMERICAN', 'ENGINEER'] +4446-2271-0011-1144: hyp=['SIR', 'HARRYTOWN', 'MISTER', 'BARTLEY', 'ALEXANDER', 'THE', 'AMERICAN', 'ENGINEER'] +4446-2271-0012-1145: ref=['I', 'SAY', 'SIR', 'HARRY', 'THE', 'LITTLE', "GIRL'S", 'GOING', 'FAMOUSLY', 'TO', 'NIGHT', "ISN'T", 'SHE'] +4446-2271-0012-1145: hyp=['I', 'SAY', 'SIR', 'HARRY', 'THE', 'LITTLE', "GIRL'S", 'GOING', 'FAMOUSLY', 'TO', 'NIGHT', "ISN'T", 'SHE'] +4446-2271-0013-1146: ref=['DO', 'YOU', 'KNOW', 'I', 'THOUGHT', 'THE', 'DANCE', 'A', 'BIT', 'CONSCIOUS', 'TO', 'NIGHT', 'FOR', 'THE', 'FIRST', 'TIME'] +4446-2271-0013-1146: hyp=['YOU', 'KNOW', 'I', 'THOUGHT', 'THE', 'DANCE', 'A', 'BIT', 'CONSCIOUS', 'TO', 'NIGHT', 'FOR', 'THE', 'FIRST', 'TIME'] +4446-2271-0014-1147: ref=['WESTMERE', 'AND', 'I', 'WERE', 'BACK', 'AFTER', 'THE', 'FIRST', 'ACT', 'AND', 'WE', 'THOUGHT', 'SHE', 'SEEMED', 'QUITE', 'UNCERTAIN', 'OF', 'HERSELF'] +4446-2271-0014-1147: hyp=['WESTMER', 'AND', 'I', 'WERE', 'BACK', 'AFTER', 'THE', 'FIRST', 'ACT', 'AND', 'WE', 'THOUGHT', 'SHE', 'SEEMED', 'QUITE', 'UNCERTAIN', 'OF', 'HERSELF'] +4446-2271-0015-1148: ref=['A', 'LITTLE', 'ATTACK', 'OF', 'NERVES', 'POSSIBLY'] +4446-2271-0015-1148: hyp=['A', 'LITTLE', 'ATTACK', 'OF', 'NERVES', 'POSSIBLY'] +4446-2271-0016-1149: ref=['HE', 'WAS', 'BEGINNING', 'TO', 'FEEL', 'A', 'KEEN', 'INTEREST', 'IN', 'THE', 'SLENDER', 'BAREFOOT', 'DONKEY', 'GIRL', 'WHO', 'SLIPPED', 'IN', 'AND', 'OUT', 'OF', 'THE', 'PLAY', 'SINGING', 'LIKE', 'SOME', 'ONE', 'WINDING', 'THROUGH', 'A', 'HILLY', 'FIELD'] +4446-2271-0016-1149: hyp=['IT', 'WAS', 'BEGINNING', 'TO', 'FEEL', 'THE', 'KEEN', 'INTEREST', 'IN', 'THE', 'SLENDER', 'BAREFOOT', 'DONKEY', 'GIRL', 'WHO', 'SLIPPED', 'IN', 'AND', 'OUT', 'OF', 'THE', 'PLAY', 'SINGING', 'LIKE', 'SOME', 'ONE', 'WINDING', 'THROUGH', 'A', 'HILLY', 'FIELD'] +4446-2271-0017-1150: ref=['ONE', 'NIGHT', 'WHEN', 'HE', 'AND', 'WINIFRED', 'WERE', 'SITTING', 'TOGETHER', 'ON', 'THE', 'BRIDGE', 'HE', 'TOLD', 'HER', 'THAT', 'THINGS', 'HAD', 'HAPPENED', 'WHILE', 'HE', 'WAS', 'STUDYING', 'ABROAD', 'THAT', 'HE', 'WAS', 'SORRY', 'FOR', 'ONE', 'THING', 'IN', 'PARTICULAR', 'AND', 'HE', 'ASKED', 'HER', 'WHETHER', 'SHE', 'THOUGHT', 'SHE', 'OUGHT', 'TO', 'KNOW', 'ABOUT', 'THEM'] +4446-2271-0017-1150: hyp=['ONE', 'NIGHT', 'WHEN', 'HE', 'AND', 'WINIFRED', 'WERE', 'SITTING', 'TOGETHER', 'ON', 'THE', 'BRIDGE', 'HE', 'TOLD', 'HER', 'THAT', 'THINGS', 'HAD', 'HAPPENED', 'WHILE', 'HE', 'WAS', 'STUDYING', 'ABROAD', 'THAT', 'HE', 'WAS', 'SORRY', 'FOR', 'ONE', 'THING', 'IN', 'PARTICULAR', 'AND', 'HE', 'ASKED', 'HER', 'WHETHER', 'SHE', 'THOUGHT', 'SHE', 'OUGHT', 'TO', 'KNOW', 'ABOUT', 'THEM'] +4446-2271-0018-1151: ref=['SHE', 'CONSIDERED', 'A', 'MOMENT', 'AND', 'THEN', 'SAID', 'NO', 'I', 'THINK', 'NOT', 'THOUGH', 'I', 'AM', 'GLAD', 'YOU', 'ASK', 'ME'] +4446-2271-0018-1151: hyp=['SHE', 'CONSIDERED', 'FOR', 'A', 'MOMENT', 'AND', 'THEN', 'SAID', 'NO', 'I', 'THINK', 'NOT', 'THE', 'WAY', 'I', 'AM', 'GLAD', 'YOU', 'ASK', 'ME'] +4446-2271-0019-1152: ref=['AFTER', 'THAT', 'IT', 'WAS', 'EASY', 'TO', 'FORGET', 'ACTUALLY', 'TO', 'FORGET'] +4446-2271-0019-1152: hyp=['AFTER', 'THAT', 'IT', 'WAS', 'EASY', 'TO', 'FORGET', 'ACTUALLY', 'TO', 'FORGET'] +4446-2271-0020-1153: ref=['OF', 'COURSE', 'HE', 'REFLECTED', 'SHE', 'ALWAYS', 'HAD', 'THAT', 'COMBINATION', 'OF', 'SOMETHING', 'HOMELY', 'AND', 'SENSIBLE', 'AND', 'SOMETHING', 'UTTERLY', 'WILD', 'AND', 'DAFT'] +4446-2271-0020-1153: hyp=['OF', 'COURSE', 'HE', 'REFLECTED', 'SHE', 'ALWAYS', 'HAD', 'THAT', 'COMBINATION', 'OF', 'SOMETHING', 'HOMELY', 'AND', 'SENSIBLE', 'AND', 'SOMETHING', 'UTTERLY', 'WILD', 'AND', 'DAFT'] +4446-2271-0021-1154: ref=['SHE', 'MUST', 'CARE', 'ABOUT', 'THE', 'THEATRE', 'A', 'GREAT', 'DEAL', 'MORE', 'THAN', 'SHE', 'USED', 'TO'] +4446-2271-0021-1154: hyp=['SHE', 'MUST', 'CARE', 'ABOUT', 'THE', 'THEATRE', 'A', 'GREAT', 'DEAL', 'MORE', 'THAN', 'SHE', 'USED', 'TO'] +4446-2271-0022-1155: ref=["I'M", 'GLAD', "SHE'S", 'HELD', 'HER', 'OWN', 'SINCE'] +4446-2271-0022-1155: hyp=["I'M", 'GLAD', "SHE'S", 'HELD', 'HER', 'OWN', 'SINCE'] +4446-2271-0023-1156: ref=['AFTER', 'ALL', 'WE', 'WERE', 'AWFULLY', 'YOUNG'] +4446-2271-0023-1156: hyp=['AFTER', 'ALL', 'WE', 'WERE', 'AWFULLY', 'YOUNG'] +4446-2271-0024-1157: ref=['I', "SHOULDN'T", 'WONDER', 'IF', 'SHE', 'COULD', 'LAUGH', 'ABOUT', 'IT', 'WITH', 'ME', 'NOW'] +4446-2271-0024-1157: hyp=['I', "SHOULDN'T", 'WONDER', 'IF', 'SHE', 'COULD', 'LAUGH', 'ABOUT', 'IT', 'WITH', 'ME', 'NOW'] +4446-2273-0000-1158: ref=['HILDA', 'WAS', 'VERY', 'NICE', 'TO', 'HIM', 'AND', 'HE', 'SAT', 'ON', 'THE', 'EDGE', 'OF', 'HIS', 'CHAIR', 'FLUSHED', 'WITH', 'HIS', 'CONVERSATIONAL', 'EFFORTS', 'AND', 'MOVING', 'HIS', 'CHIN', 'ABOUT', 'NERVOUSLY', 'OVER', 'HIS', 'HIGH', 'COLLAR'] +4446-2273-0000-1158: hyp=['HILDA', 'WAS', 'VERY', 'NICE', 'TO', 'HIM', 'AND', 'HE', 'SAT', 'ON', 'THE', 'EDGE', 'OF', 'HIS', 'CHAIR', 'FLUSHED', 'WITH', 'HIS', 'CONVERSATIONAL', 'EFFORTS', 'AND', 'MOVING', 'HIS', 'CHIN', 'ABOUT', 'NERVOUSLY', 'OVER', 'HIS', 'HIGH', 'COLLAR'] +4446-2273-0001-1159: ref=['THEY', 'ASKED', 'HIM', 'TO', 'COME', 'TO', 'SEE', 'THEM', 'IN', 'CHELSEA', 'AND', 'THEY', 'SPOKE', 'VERY', 'TENDERLY', 'OF', 'HILDA'] +4446-2273-0001-1159: hyp=['THEY', 'ASKED', 'HIM', 'TO', 'COME', 'TO', 'SEE', 'THEM', 'IN', 'CHELSEA', 'AND', 'THEY', 'SPOKE', 'VERY', 'TENDERLY', 'OF', 'HILDA'] +4446-2273-0002-1160: ref=['LAMB', "WOULDN'T", 'CARE', 'A', 'GREAT', 'DEAL', 'ABOUT', 'MANY', 'OF', 'THEM', 'I', 'FANCY'] +4446-2273-0002-1160: hyp=['LAMB', "WOULDN'T", 'CARE', 'A', 'GREAT', 'DEAL', 'ABOUT', 'MANY', 'OF', 'THEM', 'I', 'FANCY'] +4446-2273-0003-1161: ref=['WHEN', 'BARTLEY', 'ARRIVED', 'AT', 'BEDFORD', 'SQUARE', 'ON', 'SUNDAY', 'EVENING', 'MARIE', 'THE', 'PRETTY', 'LITTLE', 'FRENCH', 'GIRL', 'MET', 'HIM', 'AT', 'THE', 'DOOR', 'AND', 'CONDUCTED', 'HIM', 'UPSTAIRS'] +4446-2273-0003-1161: hyp=['WHEN', 'BARTLEY', 'ARRIVED', 'AT', 'BEDFORD', 'SQUARE', 'ON', 'SUNDAY', 'EVENING', 'MARIE', 'THE', 'PRETTY', 'LITTLE', 'FRENCH', 'GIRL', 'MET', 'HIM', 'AT', 'THE', 'DOOR', 'AND', 'CONDUCTED', 'HIM', 'UPSTAIRS'] +4446-2273-0004-1162: ref=['I', 'SHOULD', 'NEVER', 'HAVE', 'ASKED', 'YOU', 'IF', 'MOLLY', 'HAD', 'BEEN', 'HERE', 'FOR', 'I', 'REMEMBER', 'YOU', "DON'T", 'LIKE', 'ENGLISH', 'COOKERY'] +4446-2273-0004-1162: hyp=['I', 'SHOULD', 'NEVER', 'HAVE', 'ASKED', 'YOU', 'IF', 'MOLLY', 'HAD', 'BEEN', 'HERE', 'FOR', 'I', 'REMEMBER', 'YOU', "DON'T", 'LIKE', 'ENGLISH', 'COOKERY'] +4446-2273-0005-1163: ref=['I', "HAVEN'T", 'HAD', 'A', 'CHANCE', 'YET', 'TO', 'TELL', 'YOU', 'WHAT', 'A', 'JOLLY', 'LITTLE', 'PLACE', 'I', 'THINK', 'THIS', 'IS'] +4446-2273-0005-1163: hyp=['I', "HAVEN'T", 'HAD', 'A', 'CHANCE', 'YET', 'TO', 'TELL', 'YOU', 'WHAT', 'A', 'JOLLY', 'LITTLE', 'PLACE', 'I', 'THINK', 'THIS', 'IS'] +4446-2273-0006-1164: ref=['THEY', 'ARE', 'ALL', 'SKETCHES', 'MADE', 'ABOUT', 'THE', 'VILLA', "D'ESTE", 'YOU', 'SEE'] +4446-2273-0006-1164: hyp=['THEY', 'ARE', 'ALL', 'SKETCHES', 'MADE', 'ABOUT', 'THE', 'VILLA', 'DESTA', 'YOU', 'SEE'] +4446-2273-0007-1165: ref=['THOSE', 'FELLOWS', 'ARE', 'ALL', 'VERY', 'LOYAL', 'EVEN', 'MAINHALL'] +4446-2273-0007-1165: hyp=['THOSE', 'FELLOWS', 'ARE', 'ALL', 'VERY', 'LOYAL', 'EVEN', 'MAIN', 'HALL'] +4446-2273-0008-1166: ref=["I'VE", 'MANAGED', 'TO', 'SAVE', 'SOMETHING', 'EVERY', 'YEAR', 'AND', 'THAT', 'WITH', 'HELPING', 'MY', 'THREE', 'SISTERS', 'NOW', 'AND', 'THEN', 'AND', 'TIDING', 'POOR', 'COUSIN', 'MIKE', 'OVER', 'BAD', 'SEASONS'] +4446-2273-0008-1166: hyp=["I'VE", 'MANAGED', 'TO', 'SAVE', 'SOMETHING', 'EVERY', 'YEAR', 'AND', 'THAT', 'WITH', 'HELPING', 'MY', 'THREE', 'SISTERS', 'NOW', 'AND', 'THEN', 'AND', 'TIDING', 'POOR', 'COUSIN', 'MICHAEL', 'OVER', 'BAD', 'SEASONS'] +4446-2273-0009-1167: ref=["IT'S", 'NOT', 'PARTICULARLY', 'RARE', 'SHE', 'SAID', 'BUT', 'SOME', 'OF', 'IT', 'WAS', 'MY', "MOTHER'S"] +4446-2273-0009-1167: hyp=["IT'S", 'NOT', 'PARTICULARLY', 'RARE', 'SHE', 'SAID', 'BUT', 'SOME', 'OF', 'IT', 'WAS', 'MY', "MOTHER'S"] +4446-2273-0010-1168: ref=['THERE', 'WAS', 'WATERCRESS', 'SOUP', 'AND', 'SOLE', 'AND', 'A', 'DELIGHTFUL', 'OMELETTE', 'STUFFED', 'WITH', 'MUSHROOMS', 'AND', 'TRUFFLES', 'AND', 'TWO', 'SMALL', 'RARE', 'DUCKLINGS', 'AND', 'ARTICHOKES', 'AND', 'A', 'DRY', 'YELLOW', 'RHONE', 'WINE', 'OF', 'WHICH', 'BARTLEY', 'HAD', 'ALWAYS', 'BEEN', 'VERY', 'FOND'] +4446-2273-0010-1168: hyp=['THERE', 'WAS', 'WATERCRESS', 'SOUP', 'AND', 'SOLE', 'AND', 'A', 'DELIGHTFUL', 'OMELET', 'STUFFED', 'WITH', 'MUSHROOMS', 'AND', 'TRUFFLES', 'AND', 'TWO', 'SMALL', 'RARE', 'DUCKLINGS', 'AND', 'ART', 'OF', 'CHOKES', 'AND', 'A', 'DRY', 'YELLOW', 'ROAN', 'WINE', 'OF', 'WHICH', 'BARTLEY', 'HAD', 'ALWAYS', 'BEEN', 'VERY', 'FOND'] +4446-2273-0011-1169: ref=['THERE', 'IS', 'NOTHING', 'ELSE', 'THAT', 'LOOKS', 'SO', 'JOLLY'] +4446-2273-0011-1169: hyp=['THERE', 'IS', 'NOTHING', 'ELSE', 'THAT', 'LOOKS', 'SO', 'JOLLY'] +4446-2273-0012-1170: ref=['THANK', 'YOU', 'BUT', 'I', "DON'T", 'LIKE', 'IT', 'SO', 'WELL', 'AS', 'THIS'] +4446-2273-0012-1170: hyp=['THANK', 'YOU', 'BUT', 'I', "DON'T", 'LIKE', 'IT', 'SO', 'WELL', 'AS', 'THIS'] +4446-2273-0013-1171: ref=['HAVE', 'YOU', 'BEEN', 'IN', 'PARIS', 'MUCH', 'THESE', 'LATE', 'YEARS'] +4446-2273-0013-1171: hyp=['HAVE', 'YOU', 'BEEN', 'IN', 'PARIS', 'MUCH', 'THESE', 'LATE', 'YEARS'] +4446-2273-0014-1172: ref=['THERE', 'ARE', 'FEW', 'CHANGES', 'IN', 'THE', 'OLD', 'QUARTER'] +4446-2273-0014-1172: hyp=['THERE', 'ARE', 'A', 'FEW', 'CHANGES', 'IN', 'THE', 'OLD', 'QUARTER'] +4446-2273-0015-1173: ref=["DON'T", 'I', 'THOUGH', "I'M", 'SO', 'SORRY', 'TO', 'HEAR', 'IT', 'HOW', 'DID', 'HER', 'SON', 'TURN', 'OUT'] +4446-2273-0015-1173: hyp=["DON'T", 'I', 'THOUGH', "I'M", 'SO', 'SORRY', 'TO', 'HEAR', 'IT', 'HOW', 'DID', 'HER', 'SON', 'TURN', 'OUT'] +4446-2273-0016-1174: ref=['HER', 'HAIR', 'IS', 'STILL', 'LIKE', 'FLAX', 'AND', 'HER', 'BLUE', 'EYES', 'ARE', 'JUST', 'LIKE', 'A', "BABY'S", 'AND', 'SHE', 'HAS', 'THE', 'SAME', 'THREE', 'FRECKLES', 'ON', 'HER', 'LITTLE', 'NOSE', 'AND', 'TALKS', 'ABOUT', 'GOING', 'BACK', 'TO', 'HER', 'BAINS', 'DE', 'MER'] +4446-2273-0016-1174: hyp=['HER', 'HAIR', 'IS', 'STILL', 'LIKE', 'FLAX', 'AND', 'HER', 'BLUE', 'EYES', 'ARE', 'JUST', 'LIKE', 'A', "BABY'S", 'AND', 'SHE', 'HAS', 'THE', 'SAME', 'THREE', 'FRECKLES', 'ON', 'HER', 'LITTLE', 'NOSE', 'AND', 'TALKS', 'ABOUT', 'GOING', 'BACK', 'TO', 'HER', 'BANDA', 'MARE'] +4446-2273-0017-1175: ref=['HOW', 'JOLLY', 'IT', 'WAS', 'BEING', 'YOUNG', 'HILDA'] +4446-2273-0017-1175: hyp=['HOW', 'JOLLY', 'IT', 'WAS', 'BEING', 'YOUNG', 'HILDA'] +4446-2273-0018-1176: ref=['DO', 'YOU', 'REMEMBER', 'THAT', 'FIRST', 'WALK', 'WE', 'TOOK', 'TOGETHER', 'IN', 'PARIS'] +4446-2273-0018-1176: hyp=['DO', 'YOU', 'REMEMBER', 'THAT', 'FIRST', 'WALK', 'WE', 'TOOK', 'TOGETHER', 'IN', 'PARIS'] +4446-2273-0019-1177: ref=['COME', "WE'LL", 'HAVE', 'OUR', 'COFFEE', 'IN', 'THE', 'OTHER', 'ROOM', 'AND', 'YOU', 'CAN', 'SMOKE'] +4446-2273-0019-1177: hyp=['COME', "WE'LL", 'HAVE', 'OUR', 'COFFEE', 'IN', 'THE', 'OTHER', 'ROOM', 'AND', 'YOU', 'CAN', 'SMOKE'] +4446-2273-0020-1178: ref=['I', 'THINK', 'WE', 'DID', 'SHE', 'ANSWERED', 'DEMURELY'] +4446-2273-0020-1178: hyp=['I', 'THINK', 'WE', 'DID', 'SHE', 'ANSWERED', 'DEMURELY'] +4446-2273-0021-1179: ref=['WHAT', 'SHE', 'WANTED', 'FROM', 'US', 'WAS', 'NEITHER', 'OUR', 'FLOWERS', 'NOR', 'OUR', 'FRANCS', 'BUT', 'JUST', 'OUR', 'YOUTH'] +4446-2273-0021-1179: hyp=['WHAT', 'SHE', 'WANTED', 'FROM', 'US', 'WAS', 'NEITHER', 'OUR', 'FLOWERS', 'NOR', 'OUR', 'FRANKS', 'BUT', 'JUST', 'OUR', 'YOUTH'] +4446-2273-0022-1180: ref=['THEY', 'WERE', 'BOTH', 'REMEMBERING', 'WHAT', 'THE', 'WOMAN', 'HAD', 'SAID', 'WHEN', 'SHE', 'TOOK', 'THE', 'MONEY', 'GOD', 'GIVE', 'YOU', 'A', 'HAPPY', 'LOVE'] +4446-2273-0022-1180: hyp=['THEY', 'WERE', 'BOTH', 'REMEMBERING', 'WHAT', 'THE', 'WOMAN', 'HAD', 'SAID', 'WHEN', 'SHE', 'TOOK', 'THE', 'MONEY', 'GOD', 'GIVE', 'YOU', 'A', 'HAPPY', 'LOVE'] +4446-2273-0023-1181: ref=['THE', 'STRANGE', 'WOMAN', 'AND', 'HER', 'PASSIONATE', 'SENTENCE', 'THAT', 'RANG', 'OUT', 'SO', 'SHARPLY', 'HAD', 'FRIGHTENED', 'THEM', 'BOTH'] +4446-2273-0023-1181: hyp=['THE', 'STRANGE', 'WOMAN', 'AND', 'HER', 'PASSIONATE', 'SENTENCE', 'THAT', 'RANG', 'OUT', 'SO', 'SHARPLY', 'HAD', 'FRIGHTENED', 'THEM', 'BOTH'] +4446-2273-0024-1182: ref=['BARTLEY', 'STARTED', 'WHEN', 'HILDA', 'RANG', 'THE', 'LITTLE', 'BELL', 'BESIDE', 'HER', 'DEAR', 'ME', 'WHY', 'DID', 'YOU', 'DO', 'THAT'] +4446-2273-0024-1182: hyp=['BARTLEY', 'STARTED', 'WHEN', 'HILDA', 'RANG', 'THE', 'LITTLE', 'BELL', 'BESIDE', 'HER', 'DEAR', 'ME', 'WHY', 'DID', 'YOU', 'DO', 'THAT'] +4446-2273-0025-1183: ref=['IT', 'WAS', 'VERY', 'JOLLY', 'HE', 'MURMURED', 'LAZILY', 'AS', 'MARIE', 'CAME', 'IN', 'TO', 'TAKE', 'AWAY', 'THE', 'COFFEE'] +4446-2273-0025-1183: hyp=['IT', 'WAS', 'VERY', 'JOLLY', 'HE', 'MURMURED', 'LAZILY', 'AS', 'MARIE', 'CAME', 'IN', 'TO', 'TAKE', 'AWAY', 'THE', 'COFFEE'] +4446-2273-0026-1184: ref=['HAVE', 'I', 'TOLD', 'YOU', 'ABOUT', 'MY', 'NEW', 'PLAY'] +4446-2273-0026-1184: hyp=['HAVE', 'I', 'TOLD', 'YOU', 'ABOUT', 'MY', 'NEW', 'PLAY'] +4446-2273-0027-1185: ref=['WHEN', 'SHE', 'FINISHED', 'ALEXANDER', 'SHOOK', 'HIMSELF', 'OUT', 'OF', 'A', 'REVERIE'] +4446-2273-0027-1185: hyp=['WHEN', 'SHE', 'FINISHED', 'ALEXANDER', 'SHOOK', 'HIMSELF', 'OUT', 'OF', 'A', 'REVERIE'] +4446-2273-0028-1186: ref=['NONSENSE', 'OF', 'COURSE', 'I', "CAN'T", 'REALLY', 'SING', 'EXCEPT', 'THE', 'WAY', 'MY', 'MOTHER', 'AND', 'GRANDMOTHER', 'DID', 'BEFORE', 'ME'] +4446-2273-0028-1186: hyp=['NONSENSE', 'OF', 'COURSE', 'I', "CAN'T", 'REALLY', 'SING', 'EXCEPT', 'THE', 'WAY', 'MY', 'MOTHER', 'AND', 'GRANDMOTHER', 'DID', 'BEFORE', 'ME'] +4446-2273-0029-1187: ref=["IT'S", 'REALLY', 'TOO', 'WARM', 'IN', 'THIS', 'ROOM', 'TO', 'SING', "DON'T", 'YOU', 'FEEL', 'IT'] +4446-2273-0029-1187: hyp=["IT'S", 'REALLY', 'TOO', 'WARM', 'IN', 'THIS', 'ROOM', 'TO', 'SING', "DON'T", 'YOU', 'FEEL', 'IT'] +4446-2273-0030-1188: ref=['ALEXANDER', 'WENT', 'OVER', 'AND', 'OPENED', 'THE', 'WINDOW', 'FOR', 'HER'] +4446-2273-0030-1188: hyp=['ALEXANDER', 'WENT', 'OVER', 'AND', 'OPENED', 'THE', 'WINDOW', 'FOR', 'HER'] +4446-2273-0031-1189: ref=['THERE', 'JUST', 'IN', 'FRONT'] +4446-2273-0031-1189: hyp=['THERE', 'JUST', 'IN', 'FRONT'] +4446-2273-0032-1190: ref=['HE', 'STOOD', 'A', 'LITTLE', 'BEHIND', 'HER', 'AND', 'TRIED', 'TO', 'STEADY', 'HIMSELF', 'AS', 'HE', 'SAID', "IT'S", 'SOFT', 'AND', 'MISTY', 'SEE', 'HOW', 'WHITE', 'THE', 'STARS', 'ARE'] +4446-2273-0032-1190: hyp=['HE', 'STOOD', 'A', 'LITTLE', 'BEHIND', 'HER', 'AND', 'TRIED', 'TO', 'STEADY', 'HIMSELF', 'AS', 'HE', 'SAID', "IT'S", 'SOFT', 'AND', 'MISTY', 'SEE', 'HOW', 'WHITE', 'THE', 'STARS', 'ARE'] +4446-2273-0033-1191: ref=['FOR', 'A', 'LONG', 'TIME', 'NEITHER', 'HILDA', 'NOR', 'BARTLEY', 'SPOKE'] +4446-2273-0033-1191: hyp=['FOR', 'A', 'LONG', 'TIME', 'NEITHER', 'HILDA', 'NOR', 'BARTLEY', 'SPOKE'] +4446-2273-0034-1192: ref=['HE', 'FELT', 'A', 'TREMOR', 'RUN', 'THROUGH', 'THE', 'SLENDER', 'YELLOW', 'FIGURE', 'IN', 'FRONT', 'OF', 'HIM'] +4446-2273-0034-1192: hyp=['HE', 'FELT', 'A', 'TREMOR', 'RUN', 'THROUGH', 'THE', 'SLENDER', 'YELLOW', 'FIGURE', 'IN', 'FRONT', 'OF', 'HIM'] +4446-2273-0035-1193: ref=['BARTLEY', 'LEANED', 'OVER', 'HER', 'SHOULDER', 'WITHOUT', 'TOUCHING', 'HER', 'AND', 'WHISPERED', 'IN', 'HER', 'EAR', 'YOU', 'ARE', 'GIVING', 'ME', 'A', 'CHANCE', 'YES'] +4446-2273-0035-1193: hyp=['BARTLEY', 'LEANED', 'OVER', 'HER', 'SHOULDER', 'WITHOUT', 'TOUCHING', 'HER', 'AND', 'WHISPERED', 'IN', 'HER', 'EAR', 'YOU', 'ARE', 'GIVING', 'ME', 'A', 'CHANCE', 'YES'] +4446-2273-0036-1194: ref=['ALEXANDER', 'UNCLENCHED', 'THE', 'TWO', 'HANDS', 'AT', 'HIS', 'SIDES'] +4446-2273-0036-1194: hyp=['ALEXANDER', 'CLENCHED', 'THE', 'TWO', 'HANDS', 'AT', 'HIS', 'SIDES'] +4446-2275-0000-1195: ref=['THE', 'STOP', 'AT', 'QUEENSTOWN', 'THE', 'TEDIOUS', 'PASSAGE', 'UP', 'THE', 'MERSEY', 'WERE', 'THINGS', 'THAT', 'HE', 'NOTED', 'DIMLY', 'THROUGH', 'HIS', 'GROWING', 'IMPATIENCE'] +4446-2275-0000-1195: hyp=['THE', 'STOP', 'AT', 'QUEENSTOWN', 'THE', 'TEDIOUS', 'PASSAGE', 'UP', 'THE', 'MERCY', 'WERE', 'THINGS', 'THAT', 'HE', 'NOTED', 'DIMLY', 'THROUGH', 'HIS', 'GROWING', 'IMPATIENCE'] +4446-2275-0001-1196: ref=['SHE', 'BLUSHED', 'AND', 'SMILED', 'AND', 'FUMBLED', 'HIS', 'CARD', 'IN', 'HER', 'CONFUSION', 'BEFORE', 'SHE', 'RAN', 'UPSTAIRS'] +4446-2275-0001-1196: hyp=['SHE', 'BLUSHED', 'AND', 'SMILED', 'AND', 'FUMBLED', 'HIS', 'CARD', 'IN', 'HER', 'CONFUSION', 'BEFORE', 'SHE', 'RAN', 'UPSTAIRS'] +4446-2275-0002-1197: ref=['ALEXANDER', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'HALLWAY', 'BUTTONING', 'AND', 'UNBUTTONING', 'HIS', 'OVERCOAT', 'UNTIL', 'SHE', 'RETURNED', 'AND', 'TOOK', 'HIM', 'UP', 'TO', "HILDA'S", 'LIVING', 'ROOM'] +4446-2275-0002-1197: hyp=['ALEXANDER', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'HALLWAY', 'BUTTONING', 'AND', 'UNBUTTONING', 'HIS', 'OVERCOAT', 'UNTIL', 'SHE', 'RETURNED', 'AND', 'TOOK', 'HIM', 'UP', 'TO', "HILDA'S", 'LIVING', 'ROOM'] +4446-2275-0003-1198: ref=['THE', 'ROOM', 'WAS', 'EMPTY', 'WHEN', 'HE', 'ENTERED'] +4446-2275-0003-1198: hyp=['THE', 'ROOM', 'WAS', 'EMPTY', 'WHEN', 'HE', 'ENTERED'] +4446-2275-0004-1199: ref=['ALEXANDER', 'DID', 'NOT', 'SIT', 'DOWN'] +4446-2275-0004-1199: hyp=['ALEXANDER', 'DID', 'NOT', 'SIT', 'DOWN'] +4446-2275-0005-1200: ref=['I', 'FELT', 'IT', 'IN', 'MY', 'BONES', 'WHEN', 'I', 'WOKE', 'THIS', 'MORNING', 'THAT', 'SOMETHING', 'SPLENDID', 'WAS', 'GOING', 'TO', 'TURN', 'UP'] +4446-2275-0005-1200: hyp=['I', 'FELT', 'IT', 'IN', 'MY', 'BONES', 'WHEN', 'I', 'WOKE', 'THIS', 'MORNING', 'THAT', 'SOMETHING', 'SPLENDID', 'WAS', 'GOING', 'TO', 'TURN', 'UP'] +4446-2275-0006-1201: ref=['I', 'THOUGHT', 'IT', 'MIGHT', 'BE', 'SISTER', 'KATE', 'OR', 'COUSIN', 'MIKE', 'WOULD', 'BE', 'HAPPENING', 'ALONG'] +4446-2275-0006-1201: hyp=['I', 'THOUGHT', 'IT', 'MIGHT', 'BE', 'SISTER', 'KATE', 'OR', 'COUSIN', 'MIKE', 'WOULD', 'BE', 'HAPPENING', 'ALONG'] +4446-2275-0007-1202: ref=['SHE', 'PUSHED', 'HIM', 'TOWARD', 'THE', 'BIG', 'CHAIR', 'BY', 'THE', 'FIRE', 'AND', 'SAT', 'DOWN', 'ON', 'A', 'STOOL', 'AT', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'HEARTH', 'HER', 'KNEES', 'DRAWN', 'UP', 'TO', 'HER', 'CHIN', 'LAUGHING', 'LIKE', 'A', 'HAPPY', 'LITTLE', 'GIRL'] +4446-2275-0007-1202: hyp=['SHE', 'PUSHED', 'HIM', 'TOWARD', 'THE', 'BIG', 'CHAIR', 'BY', 'THE', 'FIRE', 'AND', 'SAT', 'DOWN', 'ON', 'A', 'STOOL', 'AT', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'HEARTH', 'HER', 'KNEES', 'DRAWN', 'UP', 'TO', 'HER', 'CHIN', 'LAUGHING', 'LIKE', 'A', 'HAPPY', 'LITTLE', 'GIRL'] +4446-2275-0008-1203: ref=['WHEN', 'DID', 'YOU', 'COME', 'BARTLEY', 'AND', 'HOW', 'DID', 'IT', 'HAPPEN', 'YOU', "HAVEN'T", 'SPOKEN', 'A', 'WORD'] +4446-2275-0008-1203: hyp=['WHEN', 'DID', 'YOU', 'COME', 'BARTLEY', 'AND', 'HOW', 'DID', 'IT', 'HAPPEN', 'YOU', "HAVEN'T", 'SPOKEN', 'A', 'WORD'] +4446-2275-0009-1204: ref=['I', 'GOT', 'IN', 'ABOUT', 'TEN', 'MINUTES', 'AGO'] +4446-2275-0009-1204: hyp=['I', 'GOT', 'IN', 'ABOUT', 'TEN', 'MINUTES', 'AGO'] +4446-2275-0010-1205: ref=['ALEXANDER', 'LEANED', 'FORWARD', 'AND', 'WARMED', 'HIS', 'HANDS', 'BEFORE', 'THE', 'BLAZE'] +4446-2275-0010-1205: hyp=['ALEXANDER', 'LEANED', 'FORWARD', 'AND', 'WARMED', 'HIS', 'HANDS', 'BEFORE', 'THE', 'BLAZE'] +4446-2275-0011-1206: ref=['BARTLEY', 'BENT', 'LOWER', 'OVER', 'THE', 'FIRE'] +4446-2275-0011-1206: hyp=['BARTLEY', 'BENT', 'LOWERED', 'OVER', 'THE', 'FIRE'] +4446-2275-0012-1207: ref=['SHE', 'LOOKED', 'AT', 'HIS', 'HEAVY', 'SHOULDERS', 'AND', 'BIG', 'DETERMINED', 'HEAD', 'THRUST', 'FORWARD', 'LIKE', 'A', 'CATAPULT', 'IN', 'LEASH'] +4446-2275-0012-1207: hyp=['SHE', 'LOOKED', 'AT', 'HIS', 'HEAVY', 'SHOULDERS', 'IN', 'BIG', 'DETERMINED', 'HEAD', 'THRUST', 'FORWARD', 'LIKE', 'A', 'CATAPULT', 'IN', 'LEASH'] +4446-2275-0013-1208: ref=["I'LL", 'DO', 'ANYTHING', 'YOU', 'WISH', 'ME', 'TO', 'BARTLEY', 'SHE', 'SAID', 'TREMULOUSLY'] +4446-2275-0013-1208: hyp=["I'LL", 'DO', 'ANYTHING', 'YOU', 'WISH', 'ME', 'TO', 'BARTLEY', 'SHE', 'SAID', 'TREMULOUSLY'] +4446-2275-0014-1209: ref=['I', "CAN'T", 'STAND', 'SEEING', 'YOU', 'MISERABLE'] +4446-2275-0014-1209: hyp=['I', "CAN'T", 'STAND', 'SEEING', 'YOU', 'MISERABLE'] +4446-2275-0015-1210: ref=['HE', 'PULLED', 'UP', 'A', 'WINDOW', 'AS', 'IF', 'THE', 'AIR', 'WERE', 'HEAVY'] +4446-2275-0015-1210: hyp=['HE', 'PULLED', 'UP', 'A', 'WINDOW', 'AS', 'IF', 'THE', 'AIR', 'WERE', 'HEAVY'] +4446-2275-0016-1211: ref=['HILDA', 'WATCHED', 'HIM', 'FROM', 'HER', 'CORNER', 'TREMBLING', 'AND', 'SCARCELY', 'BREATHING', 'DARK', 'SHADOWS', 'GROWING', 'ABOUT', 'HER', 'EYES', 'IT'] +4446-2275-0016-1211: hyp=['HILDA', 'WATCHED', 'HIM', 'FROM', 'THE', 'CORNER', 'TREMBLING', 'AND', 'SCARCELY', 'BREATHING', 'DARK', 'SHADOWS', 'GROWING', 'ABOUT', 'HER', 'EYES'] +4446-2275-0017-1212: ref=['BUT', "IT'S", 'WORSE', 'NOW', "IT'S", 'UNBEARABLE'] +4446-2275-0017-1212: hyp=['BUT', "IT'S", 'WORSE', 'NOW', "IT'S", 'UNBEARABLE'] +4446-2275-0018-1213: ref=['I', 'GET', 'NOTHING', 'BUT', 'MISERY', 'OUT', 'OF', 'EITHER'] +4446-2275-0018-1213: hyp=['I', 'GET', 'NOTHING', 'BUT', 'MISERY', 'OUT', 'OF', 'EITHER'] +4446-2275-0019-1214: ref=['THE', 'WORLD', 'IS', 'ALL', 'THERE', 'JUST', 'AS', 'IT', 'USED', 'TO', 'BE', 'BUT', 'I', "CAN'T", 'GET', 'AT', 'IT', 'ANY', 'MORE'] +4446-2275-0019-1214: hyp=['THE', 'WORLD', 'IS', 'ALL', 'THERE', 'JUST', 'AS', 'IT', 'USED', 'TO', 'BE', 'BUT', 'I', "CAN'T", 'GET', 'AT', 'IT', 'ANY', 'MORE'] +4446-2275-0020-1215: ref=['IT', 'WAS', 'MYSELF', 'I', 'WAS', 'DEFYING', 'HILDA'] +4446-2275-0020-1215: hyp=['IT', 'WAS', 'MYSELF', 'I', 'WAS', 'DEFYING', 'HELDA'] +4446-2275-0021-1216: ref=["HILDA'S", 'FACE', 'QUIVERED', 'BUT', 'SHE', 'WHISPERED', 'YES', 'I', 'THINK', 'IT', 'MUST', 'HAVE', 'BEEN'] +4446-2275-0021-1216: hyp=["HELDA'S", 'FACE', 'QUIVERED', 'BUT', 'SHE', 'WHISPERED', 'YES', 'I', 'THINK', 'IT', 'MUST', 'HAVE', 'BEEN'] +4446-2275-0022-1217: ref=['BUT', 'WHY', "DIDN'T", 'YOU', 'TELL', 'ME', 'WHEN', 'YOU', 'WERE', 'HERE', 'IN', 'THE', 'SUMMER'] +4446-2275-0022-1217: hyp=['BUT', 'WHY', "DIDN'T", 'YOU', 'TELL', 'ME', 'WHEN', 'YOU', 'WERE', 'HERE', 'IN', 'THE', 'SUMMER'] +4446-2275-0023-1218: ref=['ALEXANDER', 'GROANED', 'I', 'MEANT', 'TO', 'BUT', 'SOMEHOW', 'I', "COULDN'T"] +4446-2275-0023-1218: hyp=['ALEXANDER', 'GROANED', 'I', 'MEANT', 'TO', 'BUT', 'SOMEHOW', 'I', "COULDN'T"] +4446-2275-0024-1219: ref=['SHE', 'PRESSED', 'HIS', 'HAND', 'GENTLY', 'IN', 'GRATITUDE'] +4446-2275-0024-1219: hyp=['SHE', 'PRESSED', 'HIS', 'HAND', 'GENTLY', 'IN', 'GRATITUDE'] +4446-2275-0025-1220: ref=["WEREN'T", 'YOU', 'HAPPY', 'THEN', 'AT', 'ALL'] +4446-2275-0025-1220: hyp=["WEREN'T", 'YOU', 'HAPPY', 'THEN', 'AT', 'ALL'] +4446-2275-0026-1221: ref=['SHE', 'CLOSED', 'HER', 'EYES', 'AND', 'TOOK', 'A', 'DEEP', 'BREATH', 'AS', 'IF', 'TO', 'DRAW', 'IN', 'AGAIN', 'THE', 'FRAGRANCE', 'OF', 'THOSE', 'DAYS'] +4446-2275-0026-1221: hyp=['SHE', 'CLOSED', 'HER', 'EYES', 'AND', 'TOOK', 'A', 'DEEP', 'BREATH', 'AS', 'IF', 'TO', 'DRAW', 'IN', 'AGAIN', 'THE', 'FRAGRANCE', 'OF', 'THOSE', 'DAYS'] +4446-2275-0027-1222: ref=['HE', 'MOVED', 'UNEASILY', 'AND', 'HIS', 'CHAIR', 'CREAKED'] +4446-2275-0027-1222: hyp=['HE', 'MOVED', 'UNEASILY', 'AND', 'HIS', 'CHAIR', 'CREAKED'] +4446-2275-0028-1223: ref=['YES', 'YES', 'SHE', 'HURRIED', 'PULLING', 'HER', 'HAND', 'GENTLY', 'AWAY', 'FROM', 'HIM'] +4446-2275-0028-1223: hyp=['YES', 'YES', 'SHE', 'HURRIED', 'PULLING', 'HER', 'HAND', 'GENTLY', 'AWAY', 'FROM', 'HIM'] +4446-2275-0029-1224: ref=['PLEASE', 'TELL', 'ME', 'ONE', 'THING', 'BARTLEY', 'AT', 'LEAST', 'TELL', 'ME', 'THAT', 'YOU', 'BELIEVE', 'I', 'THOUGHT', 'I', 'WAS', 'MAKING', 'YOU', 'HAPPY'] +4446-2275-0029-1224: hyp=['PLEASE', 'TELL', 'ME', 'ONE', 'THING', 'BARTLEY', 'AT', 'LEAST', 'TELL', 'ME', 'THAT', 'YOU', 'BELIEVE', 'I', 'THOUGHT', 'I', 'WAS', 'MAKING', 'YOU', 'HAPPY'] +4446-2275-0030-1225: ref=['YES', 'HILDA', 'I', 'KNOW', 'THAT', 'HE', 'SAID', 'SIMPLY'] +4446-2275-0030-1225: hyp=['YES', 'HELDA', 'I', 'KNOW', 'THAT', 'HE', 'SAID', 'SIMPLY'] +4446-2275-0031-1226: ref=['I', 'UNDERSTAND', 'BARTLEY', 'I', 'WAS', 'WRONG'] +4446-2275-0031-1226: hyp=['I', 'UNDERSTAND', 'BARTLEY', 'I', 'WAS', 'WRONG'] +4446-2275-0032-1227: ref=['BUT', 'I', "DIDN'T", 'KNOW', "YOU'VE", 'ONLY', 'TO', 'TELL', 'ME', 'NOW'] +4446-2275-0032-1227: hyp=['BUT', 'I', "DIDN'T", 'KNOW', "YOU'VE", 'ONLY', 'TO', 'TELL', 'ME', 'NOW'] +4446-2275-0033-1228: ref=['WHAT', 'I', 'MEAN', 'IS', 'THAT', 'I', 'WANT', 'YOU', 'TO', 'PROMISE', 'NEVER', 'TO', 'SEE', 'ME', 'AGAIN', 'NO', 'MATTER', 'HOW', 'OFTEN', 'I', 'COME', 'NO', 'MATTER', 'HOW', 'HARD', 'I', 'BEG'] +4446-2275-0033-1228: hyp=['WHAT', 'I', 'MEAN', 'IS', 'THAT', 'I', 'WANT', 'YOU', 'TO', 'PROMISE', 'NEVER', 'TO', 'SEE', 'ME', 'AGAIN', 'NO', 'MATTER', 'HOW', 'OFTEN', 'I', 'COME', 'NO', 'MATTER', 'HOW', 'HARD', 'I', 'BEG'] +4446-2275-0034-1229: ref=['KEEP', 'AWAY', 'IF', 'YOU', 'WISH', 'WHEN', 'HAVE', 'I', 'EVER', 'FOLLOWED', 'YOU'] +4446-2275-0034-1229: hyp=['KEEP', 'AWAY', 'IF', 'YOU', 'WISH', 'WHEN', 'HAVE', 'I', 'EVER', 'FOLLOWED', 'YOU'] +4446-2275-0035-1230: ref=['ALEXANDER', 'ROSE', 'AND', 'SHOOK', 'HIMSELF', 'ANGRILY', 'YES', 'I', 'KNOW', "I'M", 'COWARDLY'] +4446-2275-0035-1230: hyp=['ALEXANDER', 'ROSE', 'AND', 'SHOOK', 'HIMSELF', 'ANGRILY', 'YES', 'I', 'KNOW', "I'M", 'COWARDLY'] +4446-2275-0036-1231: ref=['HE', 'TOOK', 'HER', 'ROUGHLY', 'IN', 'HIS', 'ARMS', 'DO', 'YOU', 'KNOW', 'WHAT', 'I', 'MEAN'] +4446-2275-0036-1231: hyp=['HE', 'TOOK', 'A', 'ROUGHLY', 'IN', 'HIS', 'ARMS', 'DO', 'YOU', 'KNOW', 'WHAT', 'I', 'MEAN'] +4446-2275-0037-1232: ref=['OH', 'BARTLEY', 'WHAT', 'AM', 'I', 'TO', 'DO'] +4446-2275-0037-1232: hyp=['OH', 'BARTLEY', 'WHAT', 'AM', 'I', 'TO', 'DO'] +4446-2275-0038-1233: ref=['I', 'WILL', 'ASK', 'THE', 'LEAST', 'IMAGINABLE', 'BUT', 'I', 'MUST', 'HAVE', 'SOMETHING'] +4446-2275-0038-1233: hyp=['I', 'WILL', 'ASK', 'THE', 'LEAST', 'IMAGINABLE', 'BUT', 'I', 'MUST', 'HAVE', 'SOMETHING'] +4446-2275-0039-1234: ref=['I', 'MUST', 'KNOW', 'ABOUT', 'YOU'] +4446-2275-0039-1234: hyp=['I', 'MUST', 'KNOW', 'ABOUT', 'YOU'] +4446-2275-0040-1235: ref=['THE', 'SIGHT', 'OF', 'YOU', 'BARTLEY', 'TO', 'SEE', 'YOU', 'LIVING', 'AND', 'HAPPY', 'AND', 'SUCCESSFUL', 'CAN', 'I', 'NEVER', 'MAKE', 'YOU', 'UNDERSTAND', 'WHAT', 'THAT', 'MEANS', 'TO', 'ME'] +4446-2275-0040-1235: hyp=['THE', 'SIGHT', 'OF', 'YOU', 'BARTLEY', 'TO', 'SEE', 'YOU', 'LIVING', 'AND', 'HAPPY', 'AND', 'SUCCESSFUL', 'CAN', 'I', 'NEVER', 'MAKE', 'YOU', 'UNDERSTAND', 'WHAT', 'THAT', 'MEANS', 'TO', 'ME'] +4446-2275-0041-1236: ref=['YOU', 'SEE', 'LOVING', 'SOME', 'ONE', 'AS', 'I', 'LOVE', 'YOU', 'MAKES', 'THE', 'WHOLE', 'WORLD', 'DIFFERENT'] +4446-2275-0041-1236: hyp=['YOU', 'SEE', 'LOVING', 'SOME', 'ONE', 'AS', 'I', 'LOVE', 'YOU', 'MAKES', 'THE', 'WHOLE', 'WORLD', 'DIFFERENT'] +4446-2275-0042-1237: ref=['AND', 'THEN', 'YOU', 'CAME', 'BACK', 'NOT', 'CARING', 'VERY', 'MUCH', 'BUT', 'IT', 'MADE', 'NO', 'DIFFERENCE'] +4446-2275-0042-1237: hyp=['AND', 'THEN', 'YOU', 'CAME', 'BACK', 'NOT', 'CARING', 'VERY', 'MUCH', 'BUT', 'IT', 'MADE', 'NO', 'DIFFERENCE'] +4446-2275-0043-1238: ref=['BARTLEY', 'BENT', 'OVER', 'AND', 'TOOK', 'HER', 'IN', 'HIS', 'ARMS', 'KISSING', 'HER', 'MOUTH', 'AND', 'HER', 'WET', 'TIRED', 'EYES'] +4446-2275-0043-1238: hyp=['BARTLEY', 'BENT', 'OVER', 'AND', 'TOOK', 'HER', 'IN', 'HIS', 'ARMS', 'KISSING', 'HER', 'MOUTH', 'AND', 'HER', 'WET', 'TIRED', 'EYES'] +4446-2275-0044-1239: ref=["DON'T", 'CRY', "DON'T", 'CRY', 'HE', 'WHISPERED'] +4446-2275-0044-1239: hyp=['A', 'TALL', 'CRY', "DON'T", 'CRY', 'HE', 'WHISPERED'] +4446-2275-0045-1240: ref=["WE'VE", 'TORTURED', 'EACH', 'OTHER', 'ENOUGH', 'FOR', 'TONIGHT'] +4446-2275-0045-1240: hyp=['WITH', 'TORTURED', 'EACH', 'OTHER', 'ENOUGH', 'FOR', 'TO', 'NIGHT'] +4507-16021-0000-1469: ref=['CHAPTER', 'ONE', 'ORIGIN'] +4507-16021-0000-1469: hyp=['CHAPTER', 'ONE', 'ORIGIN'] +4507-16021-0001-1470: ref=['IT', 'ENGENDERS', 'A', 'WHOLE', 'WORLD', 'LA', 'PEGRE', 'FOR', 'WHICH', 'READ', 'THEFT', 'AND', 'A', 'HELL', 'LA', 'PEGRENNE', 'FOR', 'WHICH', 'READ', 'HUNGER'] +4507-16021-0001-1470: hyp=['IT', 'ENGENDERS', 'A', 'WHOLE', 'WORLD', 'LAPE', 'FOR', 'WHICH', 'RED', 'THEFT', 'AND', 'A', 'HELL', 'LA', 'PAGRIN', 'FOR', 'WHICH', 'RED', 'HUNGER'] +4507-16021-0002-1471: ref=['THUS', 'IDLENESS', 'IS', 'THE', 'MOTHER'] +4507-16021-0002-1471: hyp=['THUS', 'IDLENESS', 'IS', 'THE', 'MOTHER'] +4507-16021-0003-1472: ref=['SHE', 'HAS', 'A', 'SON', 'THEFT', 'AND', 'A', 'DAUGHTER', 'HUNGER'] +4507-16021-0003-1472: hyp=['SHE', 'HAS', 'A', 'SON', 'THEFT', 'AND', 'A', 'DAUGHTER', 'HUNGER'] +4507-16021-0004-1473: ref=['WHAT', 'IS', 'SLANG'] +4507-16021-0004-1473: hyp=['WHAT', 'IS', 'SLANG'] +4507-16021-0005-1474: ref=['WE', 'HAVE', 'NEVER', 'UNDERSTOOD', 'THIS', 'SORT', 'OF', 'OBJECTIONS'] +4507-16021-0005-1474: hyp=['WE', 'HAVE', 'NEVER', 'UNDERSTOOD', 'THIS', 'SORT', 'OF', 'OBJECTIONS'] +4507-16021-0006-1475: ref=['SLANG', 'IS', 'ODIOUS'] +4507-16021-0006-1475: hyp=['SLANG', 'IS', 'ODIOUS'] +4507-16021-0007-1476: ref=['SLANG', 'MAKES', 'ONE', 'SHUDDER'] +4507-16021-0007-1476: hyp=['SLANG', 'MAKES', 'ONE', 'SHUDDER'] +4507-16021-0008-1477: ref=['WHO', 'DENIES', 'THAT', 'OF', 'COURSE', 'IT', 'DOES'] +4507-16021-0008-1477: hyp=['WHO', 'DENIES', 'THAT', 'OF', 'COURSE', 'IT', 'DOES'] +4507-16021-0009-1478: ref=['WHEN', 'IT', 'IS', 'A', 'QUESTION', 'OF', 'PROBING', 'A', 'WOUND', 'A', 'GULF', 'A', 'SOCIETY', 'SINCE', 'WHEN', 'HAS', 'IT', 'BEEN', 'CONSIDERED', 'WRONG', 'TO', 'GO', 'TOO', 'FAR', 'TO', 'GO', 'TO', 'THE', 'BOTTOM'] +4507-16021-0009-1478: hyp=['WHEN', 'IT', 'IS', 'A', 'QUESTION', 'OF', 'PROBING', 'A', 'WOUND', 'A', 'GULF', 'A', 'SOCIETY', 'SINCE', 'ONE', 'HAS', 'IT', 'BEEN', 'CONSIDERED', 'WRONG', 'TO', 'GO', 'TOO', 'FAR', 'TO', 'GO', 'TO', 'THE', 'BOTTOM'] +4507-16021-0010-1479: ref=['WE', 'HAVE', 'ALWAYS', 'THOUGHT', 'THAT', 'IT', 'WAS', 'SOMETIMES', 'A', 'COURAGEOUS', 'ACT', 'AND', 'AT', 'LEAST', 'A', 'SIMPLE', 'AND', 'USEFUL', 'DEED', 'WORTHY', 'OF', 'THE', 'SYMPATHETIC', 'ATTENTION', 'WHICH', 'DUTY', 'ACCEPTED', 'AND', 'FULFILLED', 'MERITS'] +4507-16021-0010-1479: hyp=['WE', 'HAVE', 'ALWAYS', 'THOUGHT', 'THAT', 'IT', 'WAS', 'SOMETIMES', 'A', 'COURAGEOUS', 'ACT', 'AND', 'AT', 'LEAST', 'A', 'SIMPLE', 'AND', 'USEFUL', 'DEED', 'WORTHY', 'OF', 'THE', 'SYMPATHETIC', 'ATTENTION', 'WHICH', 'DUTY', 'ACCEPTED', 'AND', 'FULFILLED', 'MERITS'] +4507-16021-0011-1480: ref=['WHY', 'SHOULD', 'ONE', 'NOT', 'EXPLORE', 'EVERYTHING', 'AND', 'STUDY', 'EVERYTHING'] +4507-16021-0011-1480: hyp=['WHY', 'SHOULD', 'ONE', 'NOT', 'EXPLORE', 'EVERYTHING', 'AND', 'STUDY', 'EVERYTHING'] +4507-16021-0012-1481: ref=['WHY', 'SHOULD', 'ONE', 'HALT', 'ON', 'THE', 'WAY'] +4507-16021-0012-1481: hyp=['WHY', 'SHOULD', 'ONE', 'HALT', 'ON', 'THE', 'WAY'] +4507-16021-0013-1482: ref=['NOTHING', 'IS', 'MORE', 'LUGUBRIOUS', 'THAN', 'THE', 'CONTEMPLATION', 'THUS', 'IN', 'ITS', 'NUDITY', 'IN', 'THE', 'BROAD', 'LIGHT', 'OF', 'THOUGHT', 'OF', 'THE', 'HORRIBLE', 'SWARMING', 'OF', 'SLANG'] +4507-16021-0013-1482: hyp=['NOTHING', 'IS', 'MORE', 'LUGUBRIOUS', 'THAN', 'THE', 'CONTEMPLATION', 'THUS', 'IN', 'ITS', 'NUDITY', 'IN', 'THE', 'BROAD', 'LIGHT', 'OF', 'THOUGHT', 'OF', 'THE', 'HORRIBLE', 'SWARMING', 'OF', 'SLANG'] +4507-16021-0014-1483: ref=['NOW', 'WHEN', 'HAS', 'HORROR', 'EVER', 'EXCLUDED', 'STUDY'] +4507-16021-0014-1483: hyp=['NO', 'WHEN', 'HAS', 'HORROR', 'EVER', 'EXCLUDED', 'STUDY'] +4507-16021-0015-1484: ref=['SINCE', 'WHEN', 'HAS', 'MALADY', 'BANISHED', 'MEDICINE'] +4507-16021-0015-1484: hyp=['SINCE', 'WHEN', 'HAS', 'MALADY', 'BANISHED', 'MEDICINE'] +4507-16021-0016-1485: ref=['CAN', 'ONE', 'IMAGINE', 'A', 'NATURALIST', 'REFUSING', 'TO', 'STUDY', 'THE', 'VIPER', 'THE', 'BAT', 'THE', 'SCORPION', 'THE', 'CENTIPEDE', 'THE', 'TARANTULA', 'AND', 'ONE', 'WHO', 'WOULD', 'CAST', 'THEM', 'BACK', 'INTO', 'THEIR', 'DARKNESS', 'SAYING', 'OH', 'HOW', 'UGLY', 'THAT', 'IS'] +4507-16021-0016-1485: hyp=['CAN', 'ONE', 'IMAGINE', 'A', 'NATURALIST', 'REFUSING', 'TO', 'STUDY', 'THE', 'VIPER', 'THE', 'BAT', 'THE', 'SCORPION', 'THE', 'CENTIPEDE', 'THE', 'TURANSULA', 'AND', 'ONE', 'WHO', 'WOULD', 'CAST', 'THEM', 'BACK', 'INTO', 'THEIR', 'DARKNESS', 'SAYING', 'O', 'HOW', 'UGLY', 'THAT', 'IS'] +4507-16021-0017-1486: ref=['HE', 'WOULD', 'BE', 'LIKE', 'A', 'PHILOLOGIST', 'REFUSING', 'TO', 'EXAMINE', 'A', 'FACT', 'IN', 'LANGUAGE', 'A', 'PHILOSOPHER', 'HESITATING', 'TO', 'SCRUTINIZE', 'A', 'FACT', 'IN', 'HUMANITY'] +4507-16021-0017-1486: hyp=['HE', 'WOULD', 'BE', 'LIKE', 'A', 'PHILOLOGIST', 'REFUSING', 'TO', 'EXAMINE', 'A', 'FACT', 'IN', 'LANGUAGE', 'A', 'PHILOSOPHER', 'HESITATING', 'TO', 'SCRUTINIZE', 'A', 'FACT', 'IN', 'HUMANITY'] +4507-16021-0018-1487: ref=['WHAT', 'IS', 'SLANG', 'PROPERLY', 'SPEAKING'] +4507-16021-0018-1487: hyp=['WHAT', 'IS', 'SLANG', 'PROPERLY', 'SPEAKING'] +4507-16021-0019-1488: ref=['IT', 'IS', 'THE', 'LANGUAGE', 'OF', 'WRETCHEDNESS'] +4507-16021-0019-1488: hyp=['IT', 'IS', 'THE', 'LANGUAGE', 'OF', 'WRETCHEDNESS'] +4507-16021-0020-1489: ref=['WE', 'MAY', 'BE', 'STOPPED', 'THE', 'FACT', 'MAY', 'BE', 'PUT', 'TO', 'US', 'IN', 'GENERAL', 'TERMS', 'WHICH', 'IS', 'ONE', 'WAY', 'OF', 'ATTENUATING', 'IT', 'WE', 'MAY', 'BE', 'TOLD', 'THAT', 'ALL', 'TRADES', 'PROFESSIONS', 'IT', 'MAY', 'BE', 'ADDED', 'ALL', 'THE', 'ACCIDENTS', 'OF', 'THE', 'SOCIAL', 'HIERARCHY', 'AND', 'ALL', 'FORMS', 'OF', 'INTELLIGENCE', 'HAVE', 'THEIR', 'OWN', 'SLANG'] +4507-16021-0020-1489: hyp=['WE', 'MAY', 'BE', 'STOPPED', 'THE', 'FACT', 'MAY', 'BE', 'PUT', 'TO', 'US', 'IN', 'GENERAL', 'TERMS', 'WHICH', 'IS', 'ONE', 'WAY', 'OF', 'ATTENUATING', 'IT', 'WE', 'MAY', 'BE', 'TOLD', 'THAT', 'ALL', 'TRADES', 'PROFESSIONS', 'IT', 'MAY', 'BE', 'ADDED', 'ALL', 'THE', 'ACCIDENTS', 'OF', 'THE', 'SOCIAL', 'HIERARCHY', 'AND', 'ALL', 'FORMS', 'OF', 'INTELLIGENCE', 'HAVE', 'THEIR', 'OWN', 'SLANG'] +4507-16021-0021-1490: ref=['THE', 'PAINTER', 'WHO', 'SAYS', 'MY', 'GRINDER', 'THE', 'NOTARY', 'WHO', 'SAYS', 'MY', 'SKIP', 'THE', 'GUTTER', 'THE', 'HAIRDRESSER', 'WHO', 'SAYS', 'MY', 'MEALYBACK', 'THE', 'COBBLER', 'WHO', 'SAYS', 'MY', 'CUB', 'TALKS', 'SLANG'] +4507-16021-0021-1490: hyp=['THE', 'PAINTER', 'WHO', 'SAYS', 'MY', 'GRINDER', 'THE', 'NOTARY', 'WHO', 'SAYS', 'MY', 'SKIP', 'THE', 'GUTTER', 'THE', 'HAIR', 'DRESSER', 'WHO', 'SAYS', 'MY', 'MEALEY', 'BACK', 'THE', 'COBBLER', 'WHO', 'SAYS', 'MY', 'CUB', 'TALKS', 'SLING'] +4507-16021-0022-1491: ref=['THERE', 'IS', 'THE', 'SLANG', 'OF', 'THE', 'AFFECTED', 'LADY', 'AS', 'WELL', 'AS', 'OF', 'THE', 'PRECIEUSES'] +4507-16021-0022-1491: hyp=['THERE', 'IS', 'THE', 'SLAYING', 'OF', 'THE', 'AFFECTED', 'LADY', 'AS', 'WELL', 'AS', 'OF', 'THE', 'PURSUS'] +4507-16021-0023-1492: ref=['THE', 'SUGAR', 'MANUFACTURER', 'WHO', 'SAYS', 'LOAF', 'CLARIFIED', 'LUMPS', 'BASTARD', 'COMMON', 'BURNT', 'THIS', 'HONEST', 'MANUFACTURER', 'TALKS', 'SLANG'] +4507-16021-0023-1492: hyp=['THE', 'SUGAR', 'MANUFACTURER', 'WHO', 'SAYS', 'LOAF', 'CLARIFIED', 'LUMPS', 'BASTARD', 'COMMON', 'BURNT', 'THIS', 'HONEST', 'MANUFACTURER', 'TALKS', 'SLANG'] +4507-16021-0024-1493: ref=['ALGEBRA', 'MEDICINE', 'BOTANY', 'HAVE', 'EACH', 'THEIR', 'SLANG'] +4507-16021-0024-1493: hyp=['ALGEBRA', 'MEDICINE', 'BARTANY', 'HAVE', 'EACH', 'THEIR', 'SLANG'] +4507-16021-0025-1494: ref=['TO', 'MEET', 'THE', 'NEEDS', 'OF', 'THIS', 'CONFLICT', 'WRETCHEDNESS', 'HAS', 'INVENTED', 'A', 'LANGUAGE', 'OF', 'COMBAT', 'WHICH', 'IS', 'SLANG'] +4507-16021-0025-1494: hyp=['TO', 'MEET', 'THE', 'NEEDS', 'OF', 'THIS', 'CONFLICT', 'WRETCHEDNESS', 'HAS', 'INVENTED', 'A', 'LANGUAGE', 'OF', 'COMBAT', 'WHICH', 'IS', 'SLANG'] +4507-16021-0026-1495: ref=['TO', 'KEEP', 'AFLOAT', 'AND', 'TO', 'RESCUE', 'FROM', 'OBLIVION', 'TO', 'HOLD', 'ABOVE', 'THE', 'GULF', 'WERE', 'IT', 'BUT', 'A', 'FRAGMENT', 'OF', 'SOME', 'LANGUAGE', 'WHICH', 'MAN', 'HAS', 'SPOKEN', 'AND', 'WHICH', 'WOULD', 'OTHERWISE', 'BE', 'LOST', 'THAT', 'IS', 'TO', 'SAY', 'ONE', 'OF', 'THE', 'ELEMENTS', 'GOOD', 'OR', 'BAD', 'OF', 'WHICH', 'CIVILIZATION', 'IS', 'COMPOSED', 'OR', 'BY', 'WHICH', 'IT', 'IS', 'COMPLICATED', 'TO', 'EXTEND', 'THE', 'RECORDS', 'OF', 'SOCIAL', 'OBSERVATION', 'IS', 'TO', 'SERVE', 'CIVILIZATION', 'ITSELF'] +4507-16021-0026-1495: hyp=['TO', 'KEEP', 'AFLOAT', 'AND', 'TO', 'RESCUE', 'FROM', 'OBLIVION', 'TO', 'HOLD', 'ABOVE', 'THE', 'GULF', 'WHERE', 'IT', 'BUT', 'A', 'FRAGMENT', 'OF', 'SOME', 'LANGUAGE', 'WHICH', 'MAN', 'HAS', 'SPOKEN', 'AND', 'WHICH', 'WOULD', 'OTHERWISE', 'BE', 'LOST', 'THAT', 'IS', 'TO', 'SAY', 'ONE', 'OF', 'THE', 'ELEMENTS', 'GOOD', 'OR', 'BAD', 'OF', 'WHICH', 'CIVILIZATION', 'IS', 'COMPOSED', 'OR', 'BY', 'WHICH', 'IT', 'IS', 'COMPLICATED', 'TO', 'EXTEND', 'THE', 'RECORDS', 'OF', 'SOCIAL', 'OBSERVATION', 'IS', 'TO', 'SERVE', 'CIVILIZATION', 'ITSELF'] +4507-16021-0027-1496: ref=['PHOENICIAN', 'VERY', 'GOOD'] +4507-16021-0027-1496: hyp=['PHOENICIAN', 'VERY', 'GOOD'] +4507-16021-0028-1497: ref=['EVEN', 'DIALECT', 'LET', 'THAT', 'PASS'] +4507-16021-0028-1497: hyp=['EVEN', 'DIALECT', 'LET', 'THAT', 'PASS'] +4507-16021-0029-1498: ref=['TO', 'THIS', 'WE', 'REPLY', 'IN', 'ONE', 'WORD', 'ONLY'] +4507-16021-0029-1498: hyp=['TO', 'THIS', 'WE', 'REPLY', 'IN', 'ONE', 'WORD', 'ONLY'] +4507-16021-0030-1499: ref=['ASSUREDLY', 'IF', 'THE', 'TONGUE', 'WHICH', 'A', 'NATION', 'OR', 'A', 'PROVINCE', 'HAS', 'SPOKEN', 'IS', 'WORTHY', 'OF', 'INTEREST', 'THE', 'LANGUAGE', 'WHICH', 'HAS', 'BEEN', 'SPOKEN', 'BY', 'A', 'MISERY', 'IS', 'STILL', 'MORE', 'WORTHY', 'OF', 'ATTENTION', 'AND', 'STUDY'] +4507-16021-0030-1499: hyp=['ASSUREDLY', 'IF', 'THE', 'TONGUE', 'WHICH', 'A', 'NATION', 'OR', 'A', 'PROVINCE', 'HAS', 'SPOKEN', 'IS', 'WORTHY', 'OF', 'INTEREST', 'THE', 'LANGUAGE', 'WHICH', 'HAS', 'BEEN', 'SPOKEN', 'BY', 'A', 'MISERY', 'IS', 'STILL', 'MORE', 'WORTHY', 'OF', 'ATTENTION', 'AND', 'STUDY'] +4507-16021-0031-1500: ref=['AND', 'THEN', 'WE', 'INSIST', 'UPON', 'IT', 'THE', 'STUDY', 'OF', 'SOCIAL', 'DEFORMITIES', 'AND', 'INFIRMITIES', 'AND', 'THE', 'TASK', 'OF', 'POINTING', 'THEM', 'OUT', 'WITH', 'A', 'VIEW', 'TO', 'REMEDY', 'IS', 'NOT', 'A', 'BUSINESS', 'IN', 'WHICH', 'CHOICE', 'IS', 'PERMITTED'] +4507-16021-0031-1500: hyp=['AND', 'THEN', 'WE', 'INSIST', 'UPON', 'IT', 'THE', 'STUDY', 'OF', 'SOCIAL', 'DEFORMITIES', 'AND', 'INFIRMITIES', 'AND', 'THE', 'TASK', 'OF', 'POINTING', 'THEM', 'OUT', 'WITH', 'THE', 'VIEW', 'TO', 'REMEDY', 'IS', 'NOT', 'A', 'BUSINESS', 'IN', 'WHICH', 'CHOICES', 'PERMITTED'] +4507-16021-0032-1501: ref=['HE', 'MUST', 'DESCEND', 'WITH', 'HIS', 'HEART', 'FULL', 'OF', 'CHARITY', 'AND', 'SEVERITY', 'AT', 'THE', 'SAME', 'TIME', 'AS', 'A', 'BROTHER', 'AND', 'AS', 'A', 'JUDGE', 'TO', 'THOSE', 'IMPENETRABLE', 'CASEMATES', 'WHERE', 'CRAWL', 'PELL', 'MELL', 'THOSE', 'WHO', 'BLEED', 'AND', 'THOSE', 'WHO', 'DEAL', 'THE', 'BLOW', 'THOSE', 'WHO', 'WEEP', 'AND', 'THOSE', 'WHO', 'CURSE', 'THOSE', 'WHO', 'FAST', 'AND', 'THOSE', 'WHO', 'DEVOUR', 'THOSE', 'WHO', 'ENDURE', 'EVIL', 'AND', 'THOSE', 'WHO', 'INFLICT', 'IT'] +4507-16021-0032-1501: hyp=['HE', 'MUST', 'DESCEND', 'WITH', 'HIS', 'HEART', 'FULL', 'OF', 'CHARITY', 'AND', 'SEVERITY', 'AT', 'THE', 'SAME', 'TIME', 'AS', 'A', 'BROTHER', 'AND', 'AS', 'A', 'JUDGE', 'TO', 'THOSE', 'IMPENETRABLE', 'CASEMATES', 'WERE', 'CRAWL', 'PELL', 'MELL', 'THOSE', 'WHO', 'BLEED', 'AND', 'THOSE', 'WHO', 'DEAL', 'THE', 'BLOW', 'THOSE', 'WHO', 'WEEP', 'IN', 'THOSE', 'WHO', 'CURSE', 'THOSE', 'WHO', 'FAST', 'IN', 'THOSE', 'WHO', 'DEVOUR', 'THOSE', 'WHO', 'ENDURE', 'EVIL', 'AND', 'THOSE', 'WHO', 'INFLICT', 'IT'] +4507-16021-0033-1502: ref=['DO', 'WE', 'REALLY', 'KNOW', 'THE', 'MOUNTAIN', 'WELL', 'WHEN', 'WE', 'ARE', 'NOT', 'ACQUAINTED', 'WITH', 'THE', 'CAVERN'] +4507-16021-0033-1502: hyp=['DO', 'WE', 'REALLY', 'KNOW', 'THE', 'MOUNTAIN', 'WELL', 'WHEN', 'WE', 'ARE', 'NOT', 'ACQUAINTED', 'WITH', 'THE', 'CAVERN'] +4507-16021-0034-1503: ref=['THEY', 'CONSTITUTE', 'TWO', 'DIFFERENT', 'ORDERS', 'OF', 'FACTS', 'WHICH', 'CORRESPOND', 'TO', 'EACH', 'OTHER', 'WHICH', 'ARE', 'ALWAYS', 'INTERLACED', 'AND', 'WHICH', 'OFTEN', 'BRING', 'FORTH', 'RESULTS'] +4507-16021-0034-1503: hyp=['THEY', 'CONSTITUTE', 'TWO', 'DIFFERENT', 'ORDERS', 'OF', 'FACTS', 'WHICH', 'CORRESPOND', 'TO', 'EACH', 'OTHER', 'WHICH', 'ARE', 'ALWAYS', 'INTERLACED', 'AND', 'WHICH', 'OFTEN', 'BRING', 'FORTH', 'RESULTS'] +4507-16021-0035-1504: ref=['TRUE', 'HISTORY', 'BEING', 'A', 'MIXTURE', 'OF', 'ALL', 'THINGS', 'THE', 'TRUE', 'HISTORIAN', 'MINGLES', 'IN', 'EVERYTHING'] +4507-16021-0035-1504: hyp=['TRUE', 'HISTORY', 'BEING', 'A', 'MIXTURE', 'OF', 'ALL', 'THINGS', 'THE', 'TRUE', 'HISTORIAN', 'MINGLES', 'IN', 'EVERYTHING'] +4507-16021-0036-1505: ref=['FACTS', 'FORM', 'ONE', 'OF', 'THESE', 'AND', 'IDEAS', 'THE', 'OTHER'] +4507-16021-0036-1505: hyp=['FACTS', 'FORM', 'ONE', 'OF', 'THESE', 'AND', 'IDEAS', 'THE', 'OTHER'] +4507-16021-0037-1506: ref=['THERE', 'IT', 'CLOTHES', 'ITSELF', 'IN', 'WORD', 'MASKS', 'IN', 'METAPHOR', 'RAGS'] +4507-16021-0037-1506: hyp=['THERE', 'IT', 'CLOTHES', 'ITSELF', 'IN', 'WORD', 'MASKS', 'IN', 'METAPHOR', 'RAGS'] +4507-16021-0038-1507: ref=['IN', 'THIS', 'GUISE', 'IT', 'BECOMES', 'HORRIBLE'] +4507-16021-0038-1507: hyp=['IN', 'THIS', 'SKIES', 'IT', 'BECOMES', 'HORRIBLE'] +4507-16021-0039-1508: ref=['ONE', 'PERCEIVES', 'WITHOUT', 'UNDERSTANDING', 'IT', 'A', 'HIDEOUS', 'MURMUR', 'SOUNDING', 'ALMOST', 'LIKE', 'HUMAN', 'ACCENTS', 'BUT', 'MORE', 'NEARLY', 'RESEMBLING', 'A', 'HOWL', 'THAN', 'AN', 'ARTICULATE', 'WORD'] +4507-16021-0039-1508: hyp=['ONE', 'PERCEIVES', 'WITHOUT', 'UNDERSTANDING', 'IT', 'A', 'HIDEOUS', 'MURMUR', 'SOUNDING', 'ALMOST', 'LIKE', 'HUMAN', 'ACCENTS', 'BUT', 'MORE', 'NEARLY', 'RESEMBLING', 'A', 'HOWL', 'THAN', 'AN', 'ARTICULATE', 'WORD'] +4507-16021-0040-1509: ref=['ONE', 'THINKS', 'ONE', 'HEARS', 'HYDRAS', 'TALKING'] +4507-16021-0040-1509: hyp=['ONE', 'THINKS', 'ONE', 'HEARS', 'HYDRAS', 'TALKING'] +4507-16021-0041-1510: ref=['IT', 'IS', 'UNINTELLIGIBLE', 'IN', 'THE', 'DARK'] +4507-16021-0041-1510: hyp=['IT', 'IS', 'UNINTELLIGIBLE', 'IN', 'THE', 'DARK'] +4507-16021-0042-1511: ref=['IT', 'IS', 'BLACK', 'IN', 'MISFORTUNE', 'IT', 'IS', 'BLACKER', 'STILL', 'IN', 'CRIME', 'THESE', 'TWO', 'BLACKNESSES', 'AMALGAMATED', 'COMPOSE', 'SLANG'] +4507-16021-0042-1511: hyp=['IT', 'IS', 'BLACK', 'AND', 'MISFORTUNE', 'IT', 'IS', 'BLACKER', 'STILL', 'AND', 'CRIME', 'THESE', 'TWO', 'BLACKNESSES', 'AMALGAMATED', 'COMPOSED', 'SLING'] +4507-16021-0043-1512: ref=['THE', 'EARTH', 'IS', 'NOT', 'DEVOID', 'OF', 'RESEMBLANCE', 'TO', 'A', 'JAIL'] +4507-16021-0043-1512: hyp=['THE', 'EARTH', 'IS', 'NOT', 'DEVOID', 'OF', 'RESEMBLANCE', 'TO', 'A', 'JAIL'] +4507-16021-0044-1513: ref=['LOOK', 'CLOSELY', 'AT', 'LIFE'] +4507-16021-0044-1513: hyp=['LOOK', 'CLOSELY', 'AT', 'LIFE'] +4507-16021-0045-1514: ref=['IT', 'IS', 'SO', 'MADE', 'THAT', 'EVERYWHERE', 'WE', 'FEEL', 'THE', 'SENSE', 'OF', 'PUNISHMENT'] +4507-16021-0045-1514: hyp=['IT', 'IS', 'SO', 'MADE', 'THAT', 'EVERYWHERE', 'WE', 'FEEL', 'THE', 'SENSE', 'OF', 'PUNISHMENT'] +4507-16021-0046-1515: ref=['EACH', 'DAY', 'HAS', 'ITS', 'OWN', 'GREAT', 'GRIEF', 'OR', 'ITS', 'LITTLE', 'CARE'] +4507-16021-0046-1515: hyp=['EACH', 'DAY', 'HAS', 'ITS', 'OWN', 'GREAT', 'GRIEF', 'FOR', 'ITS', 'LITTLE', 'CARE'] +4507-16021-0047-1516: ref=['YESTERDAY', 'YOU', 'WERE', 'TREMBLING', 'FOR', 'A', 'HEALTH', 'THAT', 'IS', 'DEAR', 'TO', 'YOU', 'TO', 'DAY', 'YOU', 'FEAR', 'FOR', 'YOUR', 'OWN', 'TO', 'MORROW', 'IT', 'WILL', 'BE', 'ANXIETY', 'ABOUT', 'MONEY', 'THE', 'DAY', 'AFTER', 'TO', 'MORROW', 'THE', 'DIATRIBE', 'OF', 'A', 'SLANDERER', 'THE', 'DAY', 'AFTER', 'THAT', 'THE', 'MISFORTUNE', 'OF', 'SOME', 'FRIEND', 'THEN', 'THE', 'PREVAILING', 'WEATHER', 'THEN', 'SOMETHING', 'THAT', 'HAS', 'BEEN', 'BROKEN', 'OR', 'LOST', 'THEN', 'A', 'PLEASURE', 'WITH', 'WHICH', 'YOUR', 'CONSCIENCE', 'AND', 'YOUR', 'VERTEBRAL', 'COLUMN', 'REPROACH', 'YOU', 'AGAIN', 'THE', 'COURSE', 'OF', 'PUBLIC', 'AFFAIRS'] +4507-16021-0047-1516: hyp=['YESTERDAY', 'YOU', 'WERE', 'TREMBLING', 'FOR', 'A', 'HEALTH', 'THAT', 'IS', 'DEAR', 'TO', 'YOU', 'TO', 'DAY', 'YOU', 'FEAR', 'FOR', 'YOUR', 'OWN', 'TO', 'MORROW', 'IT', 'WILL', 'BE', 'ANXIETY', 'ABOUT', 'MONEY', 'THE', 'DAY', 'AFTER', 'TO', 'MORROW', 'THE', 'DIETRIBE', 'OF', 'A', 'SLANDERER', 'THE', 'DAY', 'AFTER', 'THAT', 'THE', 'MISFORTUNE', 'OF', 'SOME', 'FRIEND', 'THEN', 'THE', 'PREVAILING', 'WEATHER', 'THEN', 'SOMETHING', 'THAT', 'HAS', 'BEEN', 'BROKEN', 'OR', 'LOST', 'THEN', 'A', 'PLEASURE', 'WITH', 'WHICH', 'YOUR', 'CONSCIENCE', 'AND', 'YOUR', 'VERTEBRAL', 'COLUMN', 'REPROACH', 'YOU', 'AGAIN', 'THE', 'COURSE', 'OF', 'PUBLIC', 'AFFAIRS'] +4507-16021-0048-1517: ref=['THIS', 'WITHOUT', 'RECKONING', 'IN', 'THE', 'PAINS', 'OF', 'THE', 'HEART', 'AND', 'SO', 'IT', 'GOES', 'ON'] +4507-16021-0048-1517: hyp=['THIS', 'WITHOUT', 'RECKONING', 'IN', 'THE', 'PAINS', 'OF', 'THE', 'HEART', 'AND', 'SO', 'TO', 'GOES', 'ON'] +4507-16021-0049-1518: ref=['THERE', 'IS', 'HARDLY', 'ONE', 'DAY', 'OUT', 'OF', 'A', 'HUNDRED', 'WHICH', 'IS', 'WHOLLY', 'JOYOUS', 'AND', 'SUNNY'] +4507-16021-0049-1518: hyp=['THERE', 'IS', 'HARDLY', 'ONE', 'DAY', 'OUT', 'OF', 'A', 'HUNDRED', 'WHICH', 'IS', 'WHOLLY', 'JOYOUS', 'AND', 'SUNNY'] +4507-16021-0050-1519: ref=['AND', 'YOU', 'BELONG', 'TO', 'THAT', 'SMALL', 'CLASS', 'WHO', 'ARE', 'HAPPY'] +4507-16021-0050-1519: hyp=['AND', 'YOU', 'BELONG', 'TO', 'THAT', 'SMALL', 'CLASS', 'WHO', 'ARE', 'HAPPY'] +4507-16021-0051-1520: ref=['IN', 'THIS', 'WORLD', 'EVIDENTLY', 'THE', 'VESTIBULE', 'OF', 'ANOTHER', 'THERE', 'ARE', 'NO', 'FORTUNATE'] +4507-16021-0051-1520: hyp=['IN', 'THIS', "WORLD'S", 'EVIDENTLY', 'THE', 'VESTIBULE', 'OF', 'ANOTHER', 'THERE', 'ARE', 'NO', 'FORTUNATE'] +4507-16021-0052-1521: ref=['THE', 'REAL', 'HUMAN', 'DIVISION', 'IS', 'THIS', 'THE', 'LUMINOUS', 'AND', 'THE', 'SHADY'] +4507-16021-0052-1521: hyp=['THE', 'REAL', 'HUMAN', 'DIVISION', 'IS', 'THIS', 'THE', 'LUMINOUS', 'AND', 'THE', 'SHADY'] +4507-16021-0053-1522: ref=['TO', 'DIMINISH', 'THE', 'NUMBER', 'OF', 'THE', 'SHADY', 'TO', 'AUGMENT', 'THE', 'NUMBER', 'OF', 'THE', 'LUMINOUS', 'THAT', 'IS', 'THE', 'OBJECT'] +4507-16021-0053-1522: hyp=['TO', 'DIMINISH', 'THE', 'NUMBER', 'OF', 'THE', 'SHADY', 'TO', 'AUGMENT', 'THE', 'NUMBER', 'OF', 'THE', 'LUMINOUS', 'THAT', 'IS', 'THE', 'OBJECT'] +4507-16021-0054-1523: ref=['THAT', 'IS', 'WHY', 'WE', 'CRY', 'EDUCATION', 'SCIENCE'] +4507-16021-0054-1523: hyp=['THAT', 'IS', 'WHY', 'WE', 'CRY', 'EDUCATION', 'SCIENCE'] +4507-16021-0055-1524: ref=['TO', 'TEACH', 'READING', 'MEANS', 'TO', 'LIGHT', 'THE', 'FIRE', 'EVERY', 'SYLLABLE', 'SPELLED', 'OUT', 'SPARKLES'] +4507-16021-0055-1524: hyp=['TO', 'TEACH', 'READING', 'MEANS', 'TO', 'LIGHT', 'THE', 'FIRE', 'EVERY', 'SYLLABLE', "SPELL'D", 'OUT', 'SPARKLES'] +4507-16021-0056-1525: ref=['HOWEVER', 'HE', 'WHO', 'SAYS', 'LIGHT', 'DOES', 'NOT', 'NECESSARILY', 'SAY', 'JOY'] +4507-16021-0056-1525: hyp=['HOWEVER', 'HE', 'WHO', 'SAYS', 'LIGHT', 'DOES', 'NOT', 'NECESSARILY', 'SAY', 'JOY'] +4507-16021-0057-1526: ref=['PEOPLE', 'SUFFER', 'IN', 'THE', 'LIGHT', 'EXCESS', 'BURNS'] +4507-16021-0057-1526: hyp=['PEOPLE', 'SUFFER', 'IN', 'THE', 'LIGHT', 'EXCESS', 'BURNS'] +4507-16021-0058-1527: ref=['THE', 'FLAME', 'IS', 'THE', 'ENEMY', 'OF', 'THE', 'WING'] +4507-16021-0058-1527: hyp=['THE', 'FLAME', 'IS', 'THE', 'ENEMY', 'OF', 'THE', 'WING'] +4507-16021-0059-1528: ref=['TO', 'BURN', 'WITHOUT', 'CEASING', 'TO', 'FLY', 'THEREIN', 'LIES', 'THE', 'MARVEL', 'OF', 'GENIUS'] +4507-16021-0059-1528: hyp=['TO', 'BURN', 'WITHOUT', 'CEASING', 'TO', 'FLY', 'THEREIN', 'LIES', 'THE', 'MARVEL', 'OF', 'GENIUS'] +4970-29093-0000-2093: ref=["YOU'LL", 'NEVER', 'DIG', 'IT', 'OUT', 'OF', 'THE', 'ASTOR', 'LIBRARY'] +4970-29093-0000-2093: hyp=["YOU'LL", 'NEVER', 'DIG', 'IT', 'OUT', 'OF', 'THE', 'ASTER', 'LIBRARY'] +4970-29093-0001-2094: ref=['TO', 'THE', 'YOUNG', 'AMERICAN', 'HERE', 'OR', 'ELSEWHERE', 'THE', 'PATHS', 'TO', 'FORTUNE', 'ARE', 'INNUMERABLE', 'AND', 'ALL', 'OPEN', 'THERE', 'IS', 'INVITATION', 'IN', 'THE', 'AIR', 'AND', 'SUCCESS', 'IN', 'ALL', 'HIS', 'WIDE', 'HORIZON'] +4970-29093-0001-2094: hyp=['TO', 'THE', 'YOUNG', 'AMERICAN', 'HERE', 'OR', 'ELSEWHERE', 'THE', 'PATHS', 'TO', 'FORTUNE', 'ARE', 'INNUMERABLE', 'AND', 'ALL', 'OPEN', 'THERE', 'IS', 'INVITATION', 'IN', 'THE', 'AIR', 'AND', 'SUCCESS', 'IN', 'ALL', 'HIS', 'WIDE', 'HORIZON'] +4970-29093-0002-2095: ref=['HE', 'HAS', 'NO', 'TRADITIONS', 'TO', 'BIND', 'HIM', 'OR', 'GUIDE', 'HIM', 'AND', 'HIS', 'IMPULSE', 'IS', 'TO', 'BREAK', 'AWAY', 'FROM', 'THE', 'OCCUPATION', 'HIS', 'FATHER', 'HAS', 'FOLLOWED', 'AND', 'MAKE', 'A', 'NEW', 'WAY', 'FOR', 'HIMSELF'] +4970-29093-0002-2095: hyp=['HE', 'HAS', 'NO', 'TRADITIONS', 'TO', 'BIND', 'HIM', 'OR', 'GUIDE', 'HIM', 'AND', 'HIS', 'IMPULSE', 'IS', 'TO', 'BREAK', 'AWAY', 'FROM', 'THE', 'OCCUPATION', 'HIS', 'FATHER', 'HAS', 'FOLLOWED', 'AND', 'MAKE', 'A', 'NEW', 'WAY', 'FOR', 'HIMSELF'] +4970-29093-0003-2096: ref=['THE', 'MODEST', 'FELLOW', 'WOULD', 'HAVE', 'LIKED', 'FAME', 'THRUST', 'UPON', 'HIM', 'FOR', 'SOME', 'WORTHY', 'ACHIEVEMENT', 'IT', 'MIGHT', 'BE', 'FOR', 'A', 'BOOK', 'OR', 'FOR', 'THE', 'SKILLFUL', 'MANAGEMENT', 'OF', 'SOME', 'GREAT', 'NEWSPAPER', 'OR', 'FOR', 'SOME', 'DARING', 'EXPEDITION', 'LIKE', 'THAT', 'OF', 'LIEUTENANT', 'STRAIN', 'OR', 'DOCTOR', 'KANE'] +4970-29093-0003-2096: hyp=['THE', 'MODEST', 'FELLOW', 'WOULD', 'HAVE', 'LIKED', 'FAME', 'THRUST', 'UPON', 'HIM', 'FOR', 'SOME', 'WORTHY', 'ACHIEVEMENT', 'IT', 'MIGHT', 'BE', 'FOR', 'A', 'BOOK', 'OR', 'FOR', 'THE', 'SKILFUL', 'MANAGEMENT', 'OF', 'SOME', 'GREAT', 'NEWSPAPER', 'OR', 'FOR', 'SOME', 'DARING', 'EXPEDITION', 'LIKE', 'THAT', 'OF', 'LIEUTENANT', 'STRAYNE', 'OR', 'DOCTOR', 'KANE'] +4970-29093-0004-2097: ref=['HE', 'WAS', 'UNABLE', 'TO', 'DECIDE', 'EXACTLY', 'WHAT', 'IT', 'SHOULD', 'BE'] +4970-29093-0004-2097: hyp=['HE', 'WAS', 'UNABLE', 'TO', 'DECIDE', 'EXACTLY', 'WHAT', 'IT', 'SHOULD', 'BE'] +4970-29093-0005-2098: ref=['SOMETIMES', 'HE', 'THOUGHT', 'HE', 'WOULD', 'LIKE', 'TO', 'STAND', 'IN', 'A', 'CONSPICUOUS', 'PULPIT', 'AND', 'HUMBLY', 'PREACH', 'THE', 'GOSPEL', 'OF', 'REPENTANCE', 'AND', 'IT', 'EVEN', 'CROSSED', 'HIS', 'MIND', 'THAT', 'IT', 'WOULD', 'BE', 'NOBLE', 'TO', 'GIVE', 'HIMSELF', 'TO', 'A', 'MISSIONARY', 'LIFE', 'TO', 'SOME', 'BENIGHTED', 'REGION', 'WHERE', 'THE', 'DATE', 'PALM', 'GROWS', 'AND', 'THE', "NIGHTINGALE'S", 'VOICE', 'IS', 'IN', 'TUNE', 'AND', 'THE', 'BUL', 'BUL', 'SINGS', 'ON', 'THE', 'OFF', 'NIGHTS'] +4970-29093-0005-2098: hyp=['SOMETIMES', 'HE', 'THOUGHT', 'HE', 'WOULD', 'LIKE', 'TO', 'STAND', 'IN', 'A', 'CONSPICUOUS', 'PULPIT', 'AND', 'HUMBLY', 'PREACH', 'THE', 'GOSPEL', 'OF', 'REPENTANCE', 'AND', 'IT', 'EVEN', 'CROSSED', 'HIS', 'MIND', 'THAT', 'IT', 'WOULD', 'BE', 'NOBLE', 'TO', 'GIVE', 'HIMSELF', 'TO', 'A', 'MISSIONARY', 'LIFE', 'TO', 'SOME', 'BENIGHTED', 'REGION', 'WHERE', 'THE', 'DATE', 'PALM', 'GROVES', 'AND', 'THE', "NIGHTINGALE'S", 'VOICE', 'IS', 'IN', 'TUNE', 'AND', 'THE', 'BULL', 'BOWL', 'SINGS', 'ON', 'THE', 'OPT', 'NIGHTS'] +4970-29093-0006-2099: ref=['LAW', 'SEEMED', 'TO', 'HIM', 'WELL', 'ENOUGH', 'AS', 'A', 'SCIENCE', 'BUT', 'HE', 'NEVER', 'COULD', 'DISCOVER', 'A', 'PRACTICAL', 'CASE', 'WHERE', 'IT', 'APPEARED', 'TO', 'HIM', 'WORTH', 'WHILE', 'TO', 'GO', 'TO', 'LAW', 'AND', 'ALL', 'THE', 'CLIENTS', 'WHO', 'STOPPED', 'WITH', 'THIS', 'NEW', 'CLERK', 'IN', 'THE', 'ANTE', 'ROOM', 'OF', 'THE', 'LAW', 'OFFICE', 'WHERE', 'HE', 'WAS', 'WRITING', 'PHILIP', 'INVARIABLY', 'ADVISED', 'TO', 'SETTLE', 'NO', 'MATTER', 'HOW', 'BUT', 'SETTLE', 'GREATLY', 'TO', 'THE', 'DISGUST', 'OF', 'HIS', 'EMPLOYER', 'WHO', 'KNEW', 'THAT', 'JUSTICE', 'BETWEEN', 'MAN', 'AND', 'MAN', 'COULD', 'ONLY', 'BE', 'ATTAINED', 'BY', 'THE', 'RECOGNIZED', 'PROCESSES', 'WITH', 'THE', 'ATTENDANT', 'FEES'] +4970-29093-0006-2099: hyp=['LAW', 'SEEMED', 'TO', 'HIM', 'WELL', 'ENOUGH', 'AS', 'A', 'SCIENCE', 'BUT', 'HE', 'NEVER', 'COULD', 'DISCOVER', 'A', 'PRACTICAL', 'CASE', 'WHERE', 'IT', 'APPEARED', 'TO', 'HIM', 'WORTH', 'WHILE', 'TO', 'GO', 'TO', 'LAW', 'AND', 'ALL', 'THE', 'CLIENTS', 'WHO', 'STOPPED', 'WITH', 'THIS', 'NEW', 'CLERK', 'IN', 'THE', 'ANTE', 'ROOM', 'OF', 'THE', 'LAW', 'OFFICE', 'WHERE', 'HE', 'WAS', 'WRITING', 'PHILIP', 'INVARIABLY', 'ADVISED', 'TO', 'SETTLE', 'NO', 'MATTER', 'HOW', 'BUT', 'SETTLE', 'GREATLY', 'TO', 'THE', 'DISGUST', 'OF', 'HIS', 'EMPLOYER', 'WHO', 'KNEW', 'THAT', 'JUSTICE', 'BETWEEN', 'MAN', 'AND', 'MAN', 'COULD', 'ONLY', 'BE', 'ATTAINED', 'BY', 'THE', 'RECOGNIZED', 'PROCESSES', 'WITH', 'THE', 'ATTENDANT', 'FEES'] +4970-29093-0007-2100: ref=['IT', 'IS', 'SUCH', 'A', 'NOBLE', 'AMBITION', 'THAT', 'IT', 'IS', 'A', 'PITY', 'IT', 'HAS', 'USUALLY', 'SUCH', 'A', 'SHALLOW', 'FOUNDATION'] +4970-29093-0007-2100: hyp=['IT', 'IS', 'SUCH', 'A', 'NOBLE', 'AMBITION', 'THAT', 'IT', 'IS', 'A', 'PITY', 'IT', 'HAS', 'USUALLY', 'SUCH', 'A', 'SHALLOW', 'FOUNDATION'] +4970-29093-0008-2101: ref=['HE', 'WANTED', 'TO', 'BEGIN', 'AT', 'THE', 'TOP', 'OF', 'THE', 'LADDER'] +4970-29093-0008-2101: hyp=['HE', 'WANTED', 'TO', 'BEGIN', 'AT', 'THE', 'TOP', 'OF', 'THE', 'LADDER'] +4970-29093-0009-2102: ref=['PHILIP', 'THEREFORE', 'READ', 'DILIGENTLY', 'IN', 'THE', 'ASTOR', 'LIBRARY', 'PLANNED', 'LITERARY', 'WORKS', 'THAT', 'SHOULD', 'COMPEL', 'ATTENTION', 'AND', 'NURSED', 'HIS', 'GENIUS'] +4970-29093-0009-2102: hyp=['PHILIP', 'THEREFORE', 'READ', 'DILIGENTLY', 'IN', 'THE', 'ASTER', 'LIBRARY', 'PLANNED', 'LITERARY', 'WORKS', 'THAT', 'SHOULD', 'COMPEL', 'ATTENTION', 'AND', 'NURSED', 'HIS', 'GENIUS'] +4970-29093-0010-2103: ref=['HE', 'HAD', 'NO', 'FRIEND', 'WISE', 'ENOUGH', 'TO', 'TELL', 'HIM', 'TO', 'STEP', 'INTO', 'THE', 'DORKING', 'CONVENTION', 'THEN', 'IN', 'SESSION', 'MAKE', 'A', 'SKETCH', 'OF', 'THE', 'MEN', 'AND', 'WOMEN', 'ON', 'THE', 'PLATFORM', 'AND', 'TAKE', 'IT', 'TO', 'THE', 'EDITOR', 'OF', 'THE', 'DAILY', 'GRAPEVINE', 'AND', 'SEE', 'WHAT', 'HE', 'COULD', 'GET', 'A', 'LINE', 'FOR', 'IT'] +4970-29093-0010-2103: hyp=['HE', 'HAD', 'NO', 'FRIEND', 'WISE', 'ENOUGH', 'TO', 'TELL', 'HIM', 'TO', 'STEP', 'INTO', 'THE', 'DORKING', 'CONVENTION', 'THAN', 'IN', 'SESSION', 'MAKE', 'A', 'SKETCH', 'OF', 'THE', 'MEN', 'AND', 'WOMEN', 'ON', 'THE', 'PLATFORM', 'AND', 'TAKE', 'IT', 'TO', 'THE', 'EDITOR', 'OF', 'THE', 'DAILY', 'GRAPE', 'VINE', 'AND', 'SEE', 'WHAT', 'HE', 'COULD', 'GET', 'A', 'LINE', 'FOR', 'IT'] +4970-29093-0011-2104: ref=['O', 'VERY', 'WELL', 'SAID', 'GRINGO', 'TURNING', 'AWAY', 'WITH', 'A', 'SHADE', 'OF', 'CONTEMPT', "YOU'LL", 'FIND', 'IF', 'YOU', 'ARE', 'GOING', 'INTO', 'LITERATURE', 'AND', 'NEWSPAPER', 'WORK', 'THAT', 'YOU', "CAN'T", 'AFFORD', 'A', 'CONSCIENCE', 'LIKE', 'THAT'] +4970-29093-0011-2104: hyp=['OH', 'VERY', 'WELL', 'SAID', 'GREENOW', 'TURNING', 'AWAY', 'WITH', 'A', 'SHADE', 'OF', 'CONTEMPT', "YOU'LL", 'FIND', 'IF', 'YOU', 'ARE', 'GOING', 'INTO', 'LITERATURE', 'AND', 'NEWSPAPER', 'WORK', 'THAT', 'YOU', "CAN'T", 'AFFORD', 'A', 'CONSCIENCE', 'LIKE', 'THAT'] +4970-29093-0012-2105: ref=['BUT', 'PHILIP', 'DID', 'AFFORD', 'IT', 'AND', 'HE', 'WROTE', 'THANKING', 'HIS', 'FRIENDS', 'AND', 'DECLINING', 'BECAUSE', 'HE', 'SAID', 'THE', 'POLITICAL', 'SCHEME', 'WOULD', 'FAIL', 'AND', 'OUGHT', 'TO', 'FAIL'] +4970-29093-0012-2105: hyp=['BUT', 'PHILIP', 'DID', 'AFFORD', 'IT', 'AND', 'HE', 'WROTE', 'THANKING', 'HIS', 'FRIENDS', 'AND', 'DECLINING', 'BECAUSE', 'HE', 'SAID', 'THE', 'POLITICAL', 'SCHEME', 'WOULD', 'FAIL', 'AND', 'OUGHT', 'TO', 'FAIL'] +4970-29093-0013-2106: ref=['AND', 'HE', 'WENT', 'BACK', 'TO', 'HIS', 'BOOKS', 'AND', 'TO', 'HIS', 'WAITING', 'FOR', 'AN', 'OPENING', 'LARGE', 'ENOUGH', 'FOR', 'HIS', 'DIGNIFIED', 'ENTRANCE', 'INTO', 'THE', 'LITERARY', 'WORLD'] +4970-29093-0013-2106: hyp=['AND', 'HE', 'WENT', 'BACK', 'TO', 'HIS', 'BOOKS', 'AND', 'TO', 'HIS', 'WAITING', 'FOR', 'AN', 'OPENING', 'LARGE', 'ENOUGH', 'FOR', 'HIS', 'DIGNIFIED', 'ENTRANCE', 'INTO', 'THE', 'LITERARY', 'WORLD'] +4970-29093-0014-2107: ref=['WELL', "I'M", 'GOING', 'AS', 'AN', 'ENGINEER', 'YOU', 'CAN', 'GO', 'AS', 'ONE'] +4970-29093-0014-2107: hyp=['WELL', "I'M", 'GOING', 'AS', 'AN', 'ENGINEER', 'YOU', 'COULD', 'GO', 'AS', 'ONE'] +4970-29093-0015-2108: ref=['YOU', 'CAN', 'BEGIN', 'BY', 'CARRYING', 'A', 'ROD', 'AND', 'PUTTING', 'DOWN', 'THE', 'FIGURES'] +4970-29093-0015-2108: hyp=['YOU', 'CAN', 'BEGIN', 'BY', 'CARRYING', 'A', 'ROD', 'AND', 'PUTTING', 'DOWN', 'THE', 'FIGURES'] +4970-29093-0016-2109: ref=['NO', 'ITS', 'NOT', 'TOO', 'SOON'] +4970-29093-0016-2109: hyp=['NO', "IT'S", 'OUGHT', 'TOO', 'SOON'] +4970-29093-0017-2110: ref=["I'VE", 'BEEN', 'READY', 'TO', 'GO', 'ANYWHERE', 'FOR', 'SIX', 'MONTHS'] +4970-29093-0017-2110: hyp=["I'VE", 'BEEN', 'READY', 'TO', 'GO', 'ANYWHERE', 'FOR', 'SIX', 'MONTHS'] +4970-29093-0018-2111: ref=['THE', 'TWO', 'YOUNG', 'MEN', 'WHO', 'WERE', 'BY', 'THIS', 'TIME', 'FULL', 'OF', 'THE', 'ADVENTURE', 'WENT', 'DOWN', 'TO', 'THE', 'WALL', 'STREET', 'OFFICE', 'OF', "HENRY'S", 'UNCLE', 'AND', 'HAD', 'A', 'TALK', 'WITH', 'THAT', 'WILY', 'OPERATOR'] +4970-29093-0018-2111: hyp=['THE', 'TWO', 'YOUNG', 'MEN', 'WHO', 'WERE', 'BY', 'THIS', 'TIME', 'FULL', 'OF', 'THE', 'ADVENTURE', 'WENT', 'DOWN', 'TO', 'THE', 'WALL', 'STREET', 'OFFICE', 'OF', "HENRY'S", 'UNCLE', 'AND', 'HAD', 'A', 'TALK', 'WITH', 'THAT', 'WILY', 'OPERATOR'] +4970-29093-0019-2112: ref=['THE', 'NIGHT', 'WAS', 'SPENT', 'IN', 'PACKING', 'UP', 'AND', 'WRITING', 'LETTERS', 'FOR', 'PHILIP', 'WOULD', 'NOT', 'TAKE', 'SUCH', 'AN', 'IMPORTANT', 'STEP', 'WITHOUT', 'INFORMING', 'HIS', 'FRIENDS'] +4970-29093-0019-2112: hyp=['THE', 'NIGHT', 'WAS', 'SPENT', 'IN', 'PACKING', 'UP', 'AND', 'WRITING', 'LETTERS', 'FOR', 'PHILIP', 'WOULD', 'NOT', 'TAKE', 'SUCH', 'AN', 'IMPORTANT', 'STEP', 'WITHOUT', 'INFORMING', 'HIS', 'FRIENDS'] +4970-29093-0020-2113: ref=['WHY', "IT'S", 'IN', 'MISSOURI', 'SOMEWHERE', 'ON', 'THE', 'FRONTIER', 'I', 'THINK', "WE'LL", 'GET', 'A', 'MAP'] +4970-29093-0020-2113: hyp=['WHY', "IT'S", 'A', 'MISSOURI', 'SOMEWHERE', 'ON', 'THE', 'FRONTIER', 'I', 'THINK', "WE'LL", 'GET', 'A', 'MAP'] +4970-29093-0021-2114: ref=['I', 'WAS', 'AFRAID', 'IT', 'WAS', 'NEARER', 'HOME'] +4970-29093-0021-2114: hyp=['I', 'WAS', 'AFRAID', 'IT', 'WAS', 'NEARER', 'HOME'] +4970-29093-0022-2115: ref=['HE', 'KNEW', 'HIS', 'UNCLE', 'WOULD', 'BE', 'GLAD', 'TO', 'HEAR', 'THAT', 'HE', 'HAD', 'AT', 'LAST', 'TURNED', 'HIS', 'THOUGHTS', 'TO', 'A', 'PRACTICAL', 'MATTER'] +4970-29093-0022-2115: hyp=['HE', 'KNEW', 'HIS', 'UNCLE', 'WOULD', 'BE', 'GLAD', 'TO', 'HEAR', 'THAT', 'HE', 'HAD', 'AT', 'LAST', 'TURNED', 'HIS', 'THOUGHTS', 'TO', 'A', 'PRACTICAL', 'MATTER'] +4970-29093-0023-2116: ref=['HE', 'WELL', 'KNEW', 'THE', 'PERILS', 'OF', 'THE', 'FRONTIER', 'THE', 'SAVAGE', 'STATE', 'OF', 'SOCIETY', 'THE', 'LURKING', 'INDIANS', 'AND', 'THE', 'DANGERS', 'OF', 'FEVER'] +4970-29093-0023-2116: hyp=['HE', 'WELL', 'KNEW', 'THE', 'PERILS', 'OF', 'THE', 'FRONTIER', 'THE', 'SAVAGE', 'STATE', 'OF', 'SOCIETY', 'THE', 'LURKING', 'INDIANS', 'AND', 'THE', 'DANGERS', 'OF', 'FEVER'] +4970-29095-0000-2054: ref=['SHE', 'WAS', 'TIRED', 'OF', 'OTHER', 'THINGS'] +4970-29095-0000-2054: hyp=['SHE', 'WAS', 'TIRED', 'OF', 'OTHER', 'THINGS'] +4970-29095-0001-2055: ref=['SHE', 'TRIED', 'THIS', 'MORNING', 'AN', 'AIR', 'OR', 'TWO', 'UPON', 'THE', 'PIANO', 'SANG', 'A', 'SIMPLE', 'SONG', 'IN', 'A', 'SWEET', 'BUT', 'SLIGHTLY', 'METALLIC', 'VOICE', 'AND', 'THEN', 'SEATING', 'HERSELF', 'BY', 'THE', 'OPEN', 'WINDOW', 'READ', "PHILIP'S", 'LETTER'] +4970-29095-0001-2055: hyp=['SHE', 'TRIED', 'THIS', 'MORNING', 'AN', 'AIR', 'OR', 'TWO', 'UPON', 'THE', 'PIANO', 'SAYING', 'A', 'SIMPLE', 'SONG', 'AND', 'A', 'SWEET', 'BUT', 'SLIGHTLY', 'METALLIC', 'VOICE', 'AND', 'THEN', 'SEATING', 'HERSELF', 'BY', 'THE', 'OPEN', 'WINDOW', 'READ', "PHILIP'S", 'LETTER'] +4970-29095-0002-2056: ref=['WELL', 'MOTHER', 'SAID', 'THE', 'YOUNG', 'STUDENT', 'LOOKING', 'UP', 'WITH', 'A', 'SHADE', 'OF', 'IMPATIENCE'] +4970-29095-0002-2056: hyp=['WELL', 'MOTHER', 'SAID', 'THE', 'YOUNG', 'STUDENT', 'LOOKING', 'UP', 'WITH', 'A', 'SHADE', 'OF', 'IMPATIENCE'] +4970-29095-0003-2057: ref=['I', 'HOPE', 'THEE', 'TOLD', 'THE', 'ELDERS', 'THAT', 'FATHER', 'AND', 'I', 'ARE', 'RESPONSIBLE', 'FOR', 'THE', 'PIANO', 'AND', 'THAT', 'MUCH', 'AS', 'THEE', 'LOVES', 'MUSIC', 'THEE', 'IS', 'NEVER', 'IN', 'THE', 'ROOM', 'WHEN', 'IT', 'IS', 'PLAYED'] +4970-29095-0003-2057: hyp=['I', 'HOPE', 'THEE', 'TOLD', 'THE', 'ELDERS', 'THAT', 'FATHER', 'AND', 'I', 'ARE', 'RESPONSIBLE', 'FOR', 'THE', 'PIANO', 'AND', 'THAT', 'MUCH', 'AS', 'THEE', 'LOVES', 'MUSIC', 'THEE', 'IS', 'NEVER', 'IN', 'THE', 'ROOM', 'WHEN', 'IT', 'IS', 'PLAYED'] +4970-29095-0004-2058: ref=['I', 'HEARD', 'FATHER', 'TELL', 'COUSIN', 'ABNER', 'THAT', 'HE', 'WAS', 'WHIPPED', 'SO', 'OFTEN', 'FOR', 'WHISTLING', 'WHEN', 'HE', 'WAS', 'A', 'BOY', 'THAT', 'HE', 'WAS', 'DETERMINED', 'TO', 'HAVE', 'WHAT', 'COMPENSATION', 'HE', 'COULD', 'GET', 'NOW'] +4970-29095-0004-2058: hyp=['I', 'HEARD', 'FATHER', 'TELL', 'COUSIN', 'ABNER', 'THAT', 'HE', 'WAS', 'WHIPPED', 'SO', 'OFTEN', 'FOR', 'WHISTLING', 'WHEN', 'HE', 'WAS', 'A', 'BOY', 'THAT', 'HE', 'WAS', 'DETERMINED', 'TO', 'HAVE', 'WHAT', 'COMPENSATION', 'HE', 'COULD', 'GET', 'NOW'] +4970-29095-0005-2059: ref=['THY', 'WAYS', 'GREATLY', 'TRY', 'ME', 'RUTH', 'AND', 'ALL', 'THY', 'RELATIONS'] +4970-29095-0005-2059: hyp=['THY', 'WAYS', 'GREATLY', 'TRY', 'ME', 'RUTH', 'AND', 'ALL', 'THY', 'RELATIONS'] +4970-29095-0006-2060: ref=['IS', 'THY', 'FATHER', 'WILLING', 'THEE', 'SHOULD', 'GO', 'AWAY', 'TO', 'A', 'SCHOOL', 'OF', 'THE', "WORLD'S", 'PEOPLE'] +4970-29095-0006-2060: hyp=['IS', 'THY', 'FATHER', 'WILLING', 'THEE', 'SHOULD', 'GO', 'AWAY', 'TO', 'A', 'SCHOOL', 'OF', 'THE', "WORLD'S", 'PEOPLE'] +4970-29095-0007-2061: ref=['I', 'HAVE', 'NOT', 'ASKED', 'HIM', 'RUTH', 'REPLIED', 'WITH', 'A', 'LOOK', 'THAT', 'MIGHT', 'IMPLY', 'THAT', 'SHE', 'WAS', 'ONE', 'OF', 'THOSE', 'DETERMINED', 'LITTLE', 'BODIES', 'WHO', 'FIRST', 'MADE', 'UP', 'HER', 'OWN', 'MIND', 'AND', 'THEN', 'COMPELLED', 'OTHERS', 'TO', 'MAKE', 'UP', 'THEIRS', 'IN', 'ACCORDANCE', 'WITH', 'HERS'] +4970-29095-0007-2061: hyp=['I', 'HAVE', 'NOT', 'ASKED', 'HIM', 'RUTH', 'REPLIED', 'WITH', 'A', 'LOOK', 'THAT', 'MIGHT', 'IMPLY', 'THAT', 'SHE', 'WAS', 'ONE', 'OF', 'THOSE', 'DETERMINED', 'LITTLE', 'BODIES', 'WHO', 'FIRST', 'MADE', 'UP', 'HER', 'OWN', 'MIND', 'AND', 'THEN', 'COMPELLED', 'OTHERS', 'TO', 'MAKE', 'UP', 'THEIRS', 'IN', 'ACCORDANCE', 'WITH', 'HERS'] +4970-29095-0008-2062: ref=['MOTHER', "I'M", 'GOING', 'TO', 'STUDY', 'MEDICINE'] +4970-29095-0008-2062: hyp=['MOTHER', 'I', 'AM', 'GOING', 'TO', 'STUDY', 'MEDICINE'] +4970-29095-0009-2063: ref=['MARGARET', 'BOLTON', 'ALMOST', 'LOST', 'FOR', 'A', 'MOMENT', 'HER', 'HABITUAL', 'PLACIDITY'] +4970-29095-0009-2063: hyp=['MARGARET', 'BOLTON', 'ALMOST', 'LOST', 'FOR', 'A', 'MOMENT', 'HER', 'HABITUAL', 'PLACIDITY'] +4970-29095-0010-2064: ref=['THEE', 'STUDY', 'MEDICINE'] +4970-29095-0010-2064: hyp=['THE', 'STUDY', 'MEDICINE'] +4970-29095-0011-2065: ref=['DOES', 'THEE', 'THINK', 'THEE', 'COULD', 'STAND', 'IT', 'SIX', 'MONTHS'] +4970-29095-0011-2065: hyp=['DOES', 'THEE', 'THINK', 'THEE', 'COULD', 'STAND', 'AT', 'SIX', 'MONTHS'] +4970-29095-0012-2066: ref=['AND', 'BESIDES', 'SUPPOSE', 'THEE', 'DOES', 'LEARN', 'MEDICINE'] +4970-29095-0012-2066: hyp=['AND', 'BESIDES', 'SUPPOSE', 'THEE', 'DOES', 'LEARN', 'MEDICINE'] +4970-29095-0013-2067: ref=['I', 'WILL', 'PRACTICE', 'IT'] +4970-29095-0013-2067: hyp=['I', 'WILL', 'PRACTISE', 'IT'] +4970-29095-0014-2068: ref=['WHERE', 'THEE', 'AND', 'THY', 'FAMILY', 'ARE', 'KNOWN'] +4970-29095-0014-2068: hyp=["WHERE'S", 'THEE', 'AND', 'THY', 'FAMILY', 'ARE', 'KNOWN'] +4970-29095-0015-2069: ref=['IF', 'I', 'CAN', 'GET', 'PATIENTS'] +4970-29095-0015-2069: hyp=['IF', 'I', 'CAN', 'GET', 'PATIENCE'] +4970-29095-0016-2070: ref=['RUTH', 'SAT', 'QUITE', 'STILL', 'FOR', 'A', 'TIME', 'WITH', 'FACE', 'INTENT', 'AND', 'FLUSHED', 'IT', 'WAS', 'OUT', 'NOW'] +4970-29095-0016-2070: hyp=['RUTH', 'SAT', 'QUITE', 'STILL', 'FOR', 'A', 'TIME', 'WITH', 'FACE', 'INTENT', 'AND', 'FLUSHED', 'IT', 'WAS', 'OUT', 'NOW'] +4970-29095-0017-2071: ref=['THE', 'SIGHT', 'SEERS', 'RETURNED', 'IN', 'HIGH', 'SPIRITS', 'FROM', 'THE', 'CITY'] +4970-29095-0017-2071: hyp=['THE', 'SIGHTSEERS', 'RETURNED', 'IN', 'HIGH', 'SPIRITS', 'FROM', 'THE', 'CITY'] +4970-29095-0018-2072: ref=['RUTH', 'ASKED', 'THE', 'ENTHUSIASTS', 'IF', 'THEY', 'WOULD', 'LIKE', 'TO', 'LIVE', 'IN', 'SUCH', 'A', 'SOUNDING', 'MAUSOLEUM', 'WITH', 'ITS', 'GREAT', 'HALLS', 'AND', 'ECHOING', 'ROOMS', 'AND', 'NO', 'COMFORTABLE', 'PLACE', 'IN', 'IT', 'FOR', 'THE', 'ACCOMMODATION', 'OF', 'ANY', 'BODY'] +4970-29095-0018-2072: hyp=['RUTH', 'ASKED', 'THE', 'ENTHUSIASTS', 'IF', 'THEY', 'WOULD', 'LIKE', 'TO', 'LIVE', 'IN', 'SUCH', 'A', 'SOUNDING', 'MUZOLEUM', 'WITH', 'ITS', 'GREAT', 'HALLS', 'AND', 'ECHOING', 'ROOMS', 'AND', 'NO', 'COMFORTABLE', 'PLACE', 'IN', 'IT', 'FOR', 'THE', 'ACCOMMODATION', 'OF', 'ANY', 'BODY'] +4970-29095-0019-2073: ref=['AND', 'THEN', 'THERE', 'WAS', 'BROAD', 'STREET'] +4970-29095-0019-2073: hyp=['AND', 'THEN', 'THERE', 'WAS', 'BROAD', 'STREET'] +4970-29095-0020-2074: ref=['THERE', 'CERTAINLY', 'WAS', 'NO', 'END', 'TO', 'IT', 'AND', 'EVEN', 'RUTH', 'WAS', 'PHILADELPHIAN', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'A', 'STREET', 'OUGHT', 'NOT', 'TO', 'HAVE', 'ANY', 'END', 'OR', 'ARCHITECTURAL', 'POINT', 'UPON', 'WHICH', 'THE', 'WEARY', 'EYE', 'COULD', 'REST'] +4970-29095-0020-2074: hyp=['THERE', 'IS', 'CERTAINLY', 'WAS', 'NO', 'END', 'TO', 'IT', 'AND', 'EVEN', 'RUTH', 'WAS', 'PHILADELPHIAN', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'A', 'STREET', 'OUGHT', 'NOT', 'TO', 'HAVE', 'ANY', 'END', 'OR', 'ARCHITECTURAL', 'BLINT', 'UPON', 'WHICH', 'THE', 'WEARY', 'EYE', 'COULD', 'REST'] +4970-29095-0021-2075: ref=['BUT', 'NEITHER', 'SAINT', 'GIRARD', 'NOR', 'BROAD', 'STREET', 'NEITHER', 'WONDERS', 'OF', 'THE', 'MINT', 'NOR', 'THE', 'GLORIES', 'OF', 'THE', 'HALL', 'WHERE', 'THE', 'GHOSTS', 'OF', 'OUR', 'FATHERS', 'SIT', 'ALWAYS', 'SIGNING', 'THE', 'DECLARATION', 'IMPRESSED', 'THE', 'VISITORS', 'SO', 'MUCH', 'AS', 'THE', 'SPLENDORS', 'OF', 'THE', 'CHESTNUT', 'STREET', 'WINDOWS', 'AND', 'THE', 'BARGAINS', 'ON', 'EIGHTH', 'STREET'] +4970-29095-0021-2075: hyp=['BUT', 'NEITHER', 'SAINT', 'GERARD', 'NOR', 'BROAD', 'STREET', 'NEITHER', 'WONDERS', 'OF', 'THE', 'MENT', 'NOR', 'THE', 'GLORIES', 'OF', 'THE', 'HALL', 'WHERE', 'THE', 'GHOSTS', 'OF', 'OUR', 'FATHERS', 'SIT', 'ALWAYS', 'SIGNING', 'THE', 'DECLARATION', 'IMPRESS', 'THE', 'VISITORS', 'SO', 'MUCH', 'AS', 'THE', 'SPLENDORS', 'OF', 'THE', 'CHESTNUT', 'STREET', 'WINDOWS', 'AND', 'THE', 'BARGAINS', 'ON', 'EIGHTH', 'STREET'] +4970-29095-0022-2076: ref=['IS', 'THEE', 'GOING', 'TO', 'THE', 'YEARLY', 'MEETING', 'RUTH', 'ASKED', 'ONE', 'OF', 'THE', 'GIRLS'] +4970-29095-0022-2076: hyp=['IS', 'THEE', 'GOING', 'TO', 'THE', 'YEARLY', 'MEETING', 'RUTH', 'ASKED', 'ONE', 'OF', 'THE', 'GIRLS'] +4970-29095-0023-2077: ref=['I', 'HAVE', 'NOTHING', 'TO', 'WEAR', 'REPLIED', 'THAT', 'DEMURE', 'PERSON'] +4970-29095-0023-2077: hyp=['I', 'HAVE', 'NOTHING', 'TO', 'WEAR', 'REPLIED', 'THAT', 'DEMURE', 'PERSON'] +4970-29095-0024-2078: ref=['IT', 'HAS', 'OCCUPIED', 'MOTHER', 'A', 'LONG', 'TIME', 'TO', 'FIND', 'AT', 'THE', 'SHOPS', 'THE', 'EXACT', 'SHADE', 'FOR', 'HER', 'NEW', 'BONNET'] +4970-29095-0024-2078: hyp=['IT', 'HAS', 'OCCUPIED', 'MOTHER', 'A', 'LONG', 'TIME', 'TO', 'FIND', 'THAT', 'THE', 'SHOPS', 'THE', 'EXACT', 'SHADE', 'FOR', 'HER', 'NEW', 'BONNET'] +4970-29095-0025-2079: ref=['AND', 'THEE', "WON'T", 'GO', 'WHY', 'SHOULD', 'I'] +4970-29095-0025-2079: hyp=['AND', 'THEE', "WON'T", 'GO', 'WHY', 'SHOULD', 'I'] +4970-29095-0026-2080: ref=['IF', 'I', 'GO', 'TO', 'MEETING', 'AT', 'ALL', 'I', 'LIKE', 'BEST', 'TO', 'SIT', 'IN', 'THE', 'QUIET', 'OLD', 'HOUSE', 'IN', 'GERMANTOWN', 'WHERE', 'THE', 'WINDOWS', 'ARE', 'ALL', 'OPEN', 'AND', 'I', 'CAN', 'SEE', 'THE', 'TREES', 'AND', 'HEAR', 'THE', 'STIR', 'OF', 'THE', 'LEAVES'] +4970-29095-0026-2080: hyp=['IF', 'I', 'GO', 'TO', 'MEETING', 'AT', 'ALL', 'I', 'LIKE', 'BEST', 'TO', 'SIT', 'IN', 'THE', 'QUIET', 'OLD', 'HOUSE', 'IN', 'GERMANTOWN', 'WHERE', 'THE', 'WINDOWS', 'ARE', 'ALL', 'OPEN', 'AND', 'I', 'CAN', 'SEE', 'THE', 'TREES', 'AND', 'HERE', 'THE', 'STIR', 'OF', 'THE', 'LEAVES'] +4970-29095-0027-2081: ref=["IT'S", 'SUCH', 'A', 'CRUSH', 'AT', 'THE', 'YEARLY', 'MEETING', 'AT', 'ARCH', 'STREET', 'AND', 'THEN', "THERE'S", 'THE', 'ROW', 'OF', 'SLEEK', 'LOOKING', 'YOUNG', 'MEN', 'WHO', 'LINE', 'THE', 'CURBSTONE', 'AND', 'STARE', 'AT', 'US', 'AS', 'WE', 'COME', 'OUT'] +4970-29095-0027-2081: hyp=["IT'S", 'SUCH', 'A', 'CRUSH', 'AT', 'THE', 'YEARLY', 'MEETING', 'AT', 'ARCH', 'STREET', 'AND', 'THEN', "THERE'S", 'THE', 'ROW', 'OF', 'SLEEK', 'LOOKING', 'YOUNG', 'MEN', 'WHO', 'LIE', 'IN', 'THE', 'CURBSTONE', 'AND', 'STARE', 'AT', 'US', 'AS', 'WE', 'COME', 'OUT'] +4970-29095-0028-2082: ref=['HE', "DOESN'T", 'SAY', 'BUT', "IT'S", 'ON', 'THE', 'FRONTIER', 'AND', 'ON', 'THE', 'MAP', 'EVERYTHING', 'BEYOND', 'IT', 'IS', 'MARKED', 'INDIANS', 'AND', 'DESERT', 'AND', 'LOOKS', 'AS', 'DESOLATE', 'AS', 'A', 'WEDNESDAY', 'MEETING', 'HUMPH', 'IT', 'WAS', 'TIME', 'FOR', 'HIM', 'TO', 'DO', 'SOMETHING'] +4970-29095-0028-2082: hyp=['HE', "DOESN'T", 'SAY', 'BUT', "IT'S", 'ON', 'THE', 'FRONTIER', 'AND', 'ON', 'THE', 'MAP', 'EVERYTHING', 'BEYOND', 'IT', 'IS', 'MARKED', 'INDIANS', 'AND', 'DESERT', 'AND', 'LOOKS', 'AS', 'DESOLATE', 'AS', 'A', 'WINDSAY', 'MEETING', 'IT', 'WAS', 'TIME', 'FOR', 'HIM', 'TO', 'DO', 'SOMETHING'] +4970-29095-0029-2083: ref=['IS', 'HE', 'GOING', 'TO', 'START', 'A', 'DAILY', 'NEWSPAPER', 'AMONG', 'THE', 'KICK', 'A', 'POOS'] +4970-29095-0029-2083: hyp=['IS', 'HE', 'GOING', 'TO', 'START', 'A', 'DAILY', 'NEWSPAPER', 'AMONG', 'THE', 'KICKAPOOS'] +4970-29095-0030-2084: ref=['FATHER', "THEE'S", 'UNJUST', 'TO', 'PHILIP', "HE'S", 'GOING', 'INTO', 'BUSINESS'] +4970-29095-0030-2084: hyp=['FATHER', 'THESE', 'UNJUSTIFILL', 'UP', "HE'S", 'GOING', 'INTO', 'BUSINESS'] +4970-29095-0031-2085: ref=['HE', "DOESN'T", 'SAY', 'EXACTLY', 'WHAT', 'IT', 'IS', 'SAID', 'RUTH', 'A', 'LITTLE', 'DUBIOUSLY', 'BUT', "IT'S", 'SOMETHING', 'ABOUT', 'LAND', 'AND', 'RAILROADS', 'AND', 'THEE', 'KNOWS', 'FATHER', 'THAT', 'FORTUNES', 'ARE', 'MADE', 'NOBODY', 'KNOWS', 'EXACTLY', 'HOW', 'IN', 'A', 'NEW', 'COUNTRY'] +4970-29095-0031-2085: hyp=['HE', "DOESN'T", 'SAY', 'EXACTLY', 'WHAT', 'IT', 'IS', 'SAID', 'RUTH', 'A', 'LITTLE', 'DUBIOUSLY', 'BUT', "IT'S", 'SOMETHING', 'ABOUT', 'LAND', 'AND', 'RAILROADS', 'AND', 'THE', 'NOSE', 'FATHER', 'THAT', 'FORTUNES', 'ARE', 'MADE', 'NOBODY', 'KNOWS', 'EXACTLY', 'HOW', 'IN', 'A', 'NEW', 'COUNTRY'] +4970-29095-0032-2086: ref=['BUT', 'PHILIP', 'IS', 'HONEST', 'AND', 'HE', 'HAS', 'TALENT', 'ENOUGH', 'IF', 'HE', 'WILL', 'STOP', 'SCRIBBLING', 'TO', 'MAKE', 'HIS', 'WAY'] +4970-29095-0032-2086: hyp=['BUT', 'PHILIP', 'IS', 'HONEST', 'AND', 'HE', 'HAS', 'TALENT', 'ENOUGH', 'IF', 'HE', 'WILL', 'STOP', 'SCRIBBLING', 'TO', 'MAKE', 'HIS', 'WAY'] +4970-29095-0033-2087: ref=['WHAT', 'A', 'BOX', 'WOMEN', 'ARE', 'PUT', 'INTO', 'MEASURED', 'FOR', 'IT', 'AND', 'PUT', 'IN', 'YOUNG', 'IF', 'WE', 'GO', 'ANYWHERE', "IT'S", 'IN', 'A', 'BOX', 'VEILED', 'AND', 'PINIONED', 'AND', 'SHUT', 'IN', 'BY', 'DISABILITIES'] +4970-29095-0033-2087: hyp=['WHAT', 'A', 'BOXWOMEN', 'ARE', 'PUT', 'INTO', 'MEASURED', 'FOR', 'IT', 'AND', 'PUT', 'IN', 'YOUNG', 'IF', 'WE', 'GO', 'ANYWHERE', "IT'S", 'IN', 'A', 'BOX', 'VEILED', 'AND', 'PINIONED', 'AND', 'SHUT', 'IN', 'BY', 'DISABILITIES'] +4970-29095-0034-2088: ref=['WHY', 'SHOULD', 'I', 'RUST', 'AND', 'BE', 'STUPID', 'AND', 'SIT', 'IN', 'INACTION', 'BECAUSE', 'I', 'AM', 'A', 'GIRL'] +4970-29095-0034-2088: hyp=['WHY', 'SHOULD', 'I', 'RUST', 'AND', 'BE', 'STUPID', 'AND', 'SIT', 'IN', 'AN', 'ACTION', 'BECAUSE', 'I', 'AM', 'A', 'GIRL'] +4970-29095-0035-2089: ref=['AND', 'IF', 'I', 'HAD', 'A', 'FORTUNE', 'WOULD', 'THEE', 'WANT', 'ME', 'TO', 'LEAD', 'A', 'USELESS', 'LIFE'] +4970-29095-0035-2089: hyp=['AND', 'IF', 'I', 'HAD', 'A', 'FORTUNE', 'WOULD', 'THEE', 'WANT', 'ME', 'TO', 'LEAD', 'A', 'USELESS', 'LIFE'] +4970-29095-0036-2090: ref=['HAS', 'THEE', 'CONSULTED', 'THY', 'MOTHER', 'ABOUT', 'A', 'CAREER', 'I', 'SUPPOSE', 'IT', 'IS', 'A', 'CAREER', 'THEE', 'WANTS'] +4970-29095-0036-2090: hyp=['HAS', 'THE', 'CONSULTED', 'THY', 'MOTHER', 'ABOUT', 'A', 'CAREER', 'I', 'SUPPOSE', 'IT', 'IS', 'A', 'CAREER', 'OF', 'THEE', 'WANTS'] +4970-29095-0037-2091: ref=['BUT', 'THAT', 'WISE', 'AND', 'PLACID', 'WOMAN', 'UNDERSTOOD', 'THE', 'SWEET', 'REBEL', 'A', 'GREAT', 'DEAL', 'BETTER', 'THAN', 'RUTH', 'UNDERSTOOD', 'HERSELF'] +4970-29095-0037-2091: hyp=['BUT', 'THAT', 'WISE', 'AND', 'PLACID', 'WOMAN', 'UNDERSTOOD', 'THE', 'SWEET', 'REBEL', 'A', 'GREAT', 'DEAL', 'BETTER', 'THAN', 'RUTH', 'UNDERSTOOD', 'HERSELF'] +4970-29095-0038-2092: ref=['RUTH', 'WAS', 'GLAD', 'TO', 'HEAR', 'THAT', 'PHILIP', 'HAD', 'MADE', 'A', 'PUSH', 'INTO', 'THE', 'WORLD', 'AND', 'SHE', 'WAS', 'SURE', 'THAT', 'HIS', 'TALENT', 'AND', 'COURAGE', 'WOULD', 'MAKE', 'A', 'WAY', 'FOR', 'HIM'] +4970-29095-0038-2092: hyp=['RUTH', 'WAS', 'GLAD', 'TO', 'HEAR', 'THAT', 'PHILIP', 'HAD', 'MADE', 'A', 'PUSH', 'INTO', 'THE', 'WORLD', 'AND', 'SHE', 'WAS', 'SURE', 'THAT', 'HIS', 'TALENT', 'AND', 'COURAGE', 'WOULD', 'MAKE', 'AWAY', 'FOR', 'HIM'] +4992-23283-0000-2140: ref=['BUT', 'THE', 'MORE', 'FORGETFULNESS', 'HAD', 'THEN', 'PREVAILED', 'THE', 'MORE', 'POWERFUL', 'WAS', 'THE', 'FORCE', 'OF', 'REMEMBRANCE', 'WHEN', 'SHE', 'AWOKE'] +4992-23283-0000-2140: hyp=['BUT', 'THE', 'MORE', 'FORGETFULNESS', 'HAD', 'THEN', 'PREVAILED', 'THE', 'MORE', 'POWERFUL', 'WAS', 'THE', 'FORCE', 'OF', 'REMEMBRANCE', 'WHEN', 'SHE', 'AWOKE'] +4992-23283-0001-2141: ref=['MISS', "MILNER'S", 'HEALTH', 'IS', 'NOT', 'GOOD'] +4992-23283-0001-2141: hyp=['MISS', "MILNER'S", 'HEALTH', 'IS', 'NOT', 'GOOD'] +4992-23283-0002-2142: ref=['SAID', 'MISSUS', 'HORTON', 'A', 'FEW', 'MINUTES', 'AFTER'] +4992-23283-0002-2142: hyp=['SAID', 'MISSUS', 'WHARTON', 'A', 'FEW', 'MINUTES', 'AFTER'] +4992-23283-0003-2143: ref=['SO', 'THERE', 'IS', 'TO', 'ME', 'ADDED', 'SANDFORD', 'WITH', 'A', 'SARCASTIC', 'SNEER'] +4992-23283-0003-2143: hyp=['SO', 'THERE', 'IS', 'TO', 'ME', 'ADDED', 'SANDFORD', 'WITH', 'A', 'SARCASTIC', 'SNEER'] +4992-23283-0004-2144: ref=['AND', 'YET', 'YOU', 'MUST', 'OWN', 'HER', 'BEHAVIOUR', 'HAS', 'WARRANTED', 'THEM', 'HAS', 'IT', 'NOT', 'BEEN', 'IN', 'THIS', 'PARTICULAR', 'INCOHERENT', 'AND', 'UNACCOUNTABLE'] +4992-23283-0004-2144: hyp=['AND', 'YET', 'YOU', 'MUST', 'OWN', 'HER', 'BEHAVIOR', 'HAS', 'WARRANTED', 'THEM', 'HAS', 'IT', 'NOT', 'BEEN', 'IN', 'THIS', 'PARTICULAR', 'INCOHERENT', 'AND', 'UNACCOUNTABLE'] +4992-23283-0005-2145: ref=['NOT', 'THAT', 'I', 'KNOW', 'OF', 'NOT', 'ONE', 'MORE', 'THAT', 'I', 'KNOW', 'OF', 'HE', 'REPLIED', 'WITH', 'ASTONISHMENT', 'AT', 'WHAT', 'SHE', 'HAD', 'INSINUATED', 'AND', 'YET', 'WITH', 'A', 'PERFECT', 'ASSURANCE', 'THAT', 'SHE', 'WAS', 'IN', 'THE', 'WRONG'] +4992-23283-0005-2145: hyp=['NOT', 'THAT', 'I', 'KNOW', 'OF', 'NOT', 'ONE', 'MORE', 'THAT', 'I', 'KNOW', 'OF', 'HE', 'REPLIED', 'WITH', 'ASTONISHMENT', 'AT', 'WHAT', 'SHE', 'HAD', 'INSINUATED', 'AND', 'YET', 'WITH', 'A', 'PERFECT', 'ASSURANCE', 'THAT', 'SHE', 'WAS', 'IN', 'THE', 'WRONG'] +4992-23283-0006-2146: ref=['PERHAPS', 'I', 'AM', 'MISTAKEN', 'ANSWERED', 'SHE'] +4992-23283-0006-2146: hyp=['PERHAPS', 'I', 'AM', 'MISTAKEN', 'ANSWERED', 'SHE'] +4992-23283-0007-2147: ref=['TO', 'ASK', 'ANY', 'MORE', 'QUESTIONS', 'OF', 'YOU', 'I', 'BELIEVE', 'WOULD', 'BE', 'UNFAIR'] +4992-23283-0007-2147: hyp=['TO', 'ASK', 'ANY', 'MORE', 'QUESTIONS', 'OF', 'YOU', 'I', 'BELIEVE', 'WOULD', 'BE', 'UNFAIR'] +4992-23283-0008-2148: ref=['HE', 'SEEMED', 'TO', 'WAIT', 'FOR', 'HER', 'REPLY', 'BUT', 'AS', 'SHE', 'MADE', 'NONE', 'HE', 'PROCEEDED'] +4992-23283-0008-2148: hyp=['HE', 'SEEMED', 'TO', 'WAIT', 'FOR', 'HER', 'REPLY', 'BUT', 'AS', 'SHE', 'MADE', 'NONE', 'HE', 'PROCEEDED'] +4992-23283-0009-2149: ref=['OH', 'MY', 'LORD', 'CRIED', 'MISS', 'WOODLEY', 'WITH', 'A', 'MOST', 'FORCIBLE', 'ACCENT', 'YOU', 'ARE', 'THE', 'LAST', 'PERSON', 'ON', 'EARTH', 'SHE', 'WOULD', 'PARDON', 'ME', 'FOR', 'ENTRUSTING'] +4992-23283-0009-2149: hyp=['OH', 'MY', 'LORD', 'CRIED', 'MISS', 'WOODLEY', 'WITH', 'A', 'MOST', 'FORCIBLE', 'ACCENT', 'YOU', 'ARE', 'THE', 'LAST', 'PERSON', 'ON', 'EARTH', 'SHE', 'WOULD', 'PARDON', 'ME', 'FOR', 'INTRUSTING'] +4992-23283-0010-2150: ref=['BUT', 'IN', 'SUCH', 'A', 'CASE', 'MISS', "MILNER'S", 'ELECTION', 'OF', 'A', 'HUSBAND', 'SHALL', 'NOT', 'DIRECT', 'MINE'] +4992-23283-0010-2150: hyp=['BUT', 'IN', 'SUCH', 'A', 'CASE', 'MISS', "MILNER'S", 'ELECTION', 'OF', 'A', 'HUSBAND', 'SHALL', 'NOT', 'DIRECT', 'MINE'] +4992-23283-0011-2151: ref=['IF', 'SHE', 'DOES', 'NOT', 'KNOW', 'HOW', 'TO', 'ESTIMATE', 'HER', 'OWN', 'VALUE', 'I', 'DO'] +4992-23283-0011-2151: hyp=['IF', 'SHE', 'DOES', 'NOT', 'KNOW', 'HOW', 'TO', 'ESTIMATE', 'HER', 'OWN', 'VALUE', 'I', 'DO'] +4992-23283-0012-2152: ref=['INDEPENDENT', 'OF', 'HER', 'FORTUNE', 'SHE', 'HAS', 'BEAUTY', 'TO', 'CAPTIVATE', 'THE', 'HEART', 'OF', 'ANY', 'MAN', 'AND', 'WITH', 'ALL', 'HER', 'FOLLIES', 'SHE', 'HAS', 'A', 'FRANKNESS', 'IN', 'HER', 'MANNER', 'AN', 'UNAFFECTED', 'WISDOM', 'IN', 'HER', 'THOUGHTS', 'A', 'VIVACITY', 'IN', 'HER', 'CONVERSATION', 'AND', 'WITHAL', 'A', 'SOFTNESS', 'IN', 'HER', 'DEMEANOUR', 'THAT', 'MIGHT', 'ALONE', 'ENGAGE', 'THE', 'AFFECTIONS', 'OF', 'A', 'MAN', 'OF', 'THE', 'NICEST', 'SENTIMENTS', 'AND', 'THE', 'STRONGEST', 'UNDERSTANDING'] +4992-23283-0012-2152: hyp=['INDEPENDENT', 'OF', 'HER', 'FORTUNE', 'SHE', 'HAS', 'BEAUTY', 'TO', 'CAPTIVATE', 'THE', 'HEART', 'OF', 'ANY', 'MAN', 'AND', 'WITH', 'ALL', 'HER', 'FOLLIES', 'SHE', 'HAS', 'A', 'FRANKNESS', 'IN', 'HER', 'MANNER', 'AN', 'UNAFFECTED', 'WISDOM', 'IN', 'HER', 'THOUGHTS', 'OF', 'A', 'VIVACITY', 'IN', 'HER', 'CONVERSATION', 'AND', 'WITHAL', 'A', 'SOFTNESS', 'IN', 'HER', 'DEMEANOUR', 'THAT', 'MIGHT', 'ALONE', 'ENGAGE', 'THE', 'AFFECTIONS', 'OF', 'A', 'MAN', 'OF', 'THE', 'NICEST', 'SENTIMENTS', 'AND', 'THE', 'STRONGEST', 'UNDERSTANDING'] +4992-23283-0013-2153: ref=['MY', 'LORD', 'MISS', "MILNER'S", 'TASTE', 'IS', 'NOT', 'A', 'DEPRAVED', 'ONE', 'IT', 'IS', 'BUT', 'TOO', 'REFINED'] +4992-23283-0013-2153: hyp=['MY', 'LORD', 'MISS', "MILNER'S", 'TASTE', 'IS', 'NOT', 'A', 'DEPRAVED', 'ONE', 'IT', 'IS', 'BUT', 'TOO', 'REFINED'] +4992-23283-0014-2154: ref=['WHAT', 'CAN', 'YOU', 'MEAN', 'BY', 'THAT', 'MISS', 'WOODLEY', 'YOU', 'TALK', 'MYSTERIOUSLY'] +4992-23283-0014-2154: hyp=['WHAT', 'CAN', 'YOU', 'MEAN', 'BY', 'THAT', 'MISS', 'WOODLEY', 'YOU', 'TALK', 'MYSTERIOUSLY'] +4992-23283-0015-2155: ref=['IS', 'SHE', 'NOT', 'AFRAID', 'THAT', 'I', 'WILL', 'THWART', 'HER', 'INCLINATIONS'] +4992-23283-0015-2155: hyp=['IS', 'SHE', 'NOT', 'AFRAID', 'THAT', 'I', 'WILL', 'THWART', 'HER', 'INCLINATIONS'] +4992-23283-0016-2156: ref=['AGAIN', 'HE', 'SEARCHED', 'HIS', 'OWN', 'THOUGHTS', 'NOR', 'INEFFECTUALLY', 'AS', 'BEFORE'] +4992-23283-0016-2156: hyp=['AGAIN', 'HE', 'SEARCHED', 'HIS', 'OWN', 'THOUGHTS', 'NOR', 'INEFFECTUALLY', 'AS', 'BEFORE'] +4992-23283-0017-2157: ref=['MISS', 'WOODLEY', 'WAS', 'TOO', 'LITTLE', 'VERSED', 'IN', 'THE', 'SUBJECT', 'TO', 'KNOW', 'THIS', 'WOULD', 'HAVE', 'BEEN', 'NOT', 'TO', 'LOVE', 'AT', 'ALL', 'AT', 'LEAST', 'NOT', 'TO', 'THE', 'EXTENT', 'OF', 'BREAKING', 'THROUGH', 'ENGAGEMENTS', 'AND', 'ALL', 'THE', 'VARIOUS', 'OBSTACLES', 'THAT', 'STILL', 'MILITATED', 'AGAINST', 'THEIR', 'UNION'] +4992-23283-0017-2157: hyp=['MISS', 'WOODLEY', 'WAS', 'TOO', 'LITTLE', 'VERSED', 'IN', 'THE', 'SUBJECT', 'TO', 'KNOW', 'THIS', 'WOULD', 'HAVE', 'BEEN', 'NOT', 'TO', 'LOVE', 'AT', 'ALL', 'AT', 'LEAST', 'NOT', 'TO', 'THE', 'EXTENT', 'OF', 'BREAKING', 'THROUGH', 'ENGAGEMENTS', 'AND', 'ALL', 'THE', 'VARIOUS', 'OBSTACLES', 'THAT', 'STILL', 'MITIGATED', 'AGAINST', 'THEIR', 'UNION'] +4992-23283-0018-2158: ref=['TO', 'RELIEVE', 'HER', 'FROM', 'BOTH', 'HE', 'LAID', 'HIS', 'HAND', 'WITH', 'FORCE', 'UPON', 'HIS', 'HEART', 'AND', 'SAID', 'DO', 'YOU', 'BELIEVE', 'ME'] +4992-23283-0018-2158: hyp=['TO', 'RELIEVE', 'HER', 'FROM', 'BOTH', 'HE', 'LAID', 'HIS', 'HAND', 'WITH', 'FORCE', 'UPON', 'HIS', 'HEART', 'AND', 'SAID', 'DO', 'YOU', 'BELIEVE', 'ME'] +4992-23283-0019-2159: ref=['I', 'WILL', 'MAKE', 'NO', 'UNJUST', 'USE', 'OF', 'WHAT', 'I', 'KNOW', 'HE', 'REPLIED', 'WITH', 'FIRMNESS', 'I', 'BELIEVE', 'YOU', 'MY', 'LORD'] +4992-23283-0019-2159: hyp=['I', 'WILL', 'MAKE', 'NO', 'UNJUST', 'USE', 'OF', 'WHAT', 'I', 'KNOW', 'HE', 'REPLIED', 'WITH', 'FIRMNESS', 'I', 'BELIEVE', 'YOU', 'MY', 'LORD'] +4992-23283-0020-2160: ref=['I', 'HAVE', 'NEVER', 'YET', 'HOWEVER', 'BEEN', 'VANQUISHED', 'BY', 'THEM', 'AND', 'EVEN', 'UPON', 'THIS', 'OCCASION', 'MY', 'REASON', 'SHALL', 'COMBAT', 'THEM', 'TO', 'THE', 'LAST', 'AND', 'MY', 'REASON', 'SHALL', 'FAIL', 'ME', 'BEFORE', 'I', 'DO', 'WRONG'] +4992-23283-0020-2160: hyp=['I', 'HAVE', 'NEVER', 'YET', 'HOWEVER', 'BEEN', 'VANQUISHED', 'BY', 'THEM', 'AND', 'EVEN', 'UPON', 'THIS', 'OCCASION', 'MY', 'REASON', 'SHALL', 'COMBAT', 'THEM', 'TO', 'THE', 'LAST', 'AND', 'MY', 'REASON', 'SHALL', 'FAIL', 'ME', 'BEFORE', 'I', 'DO', 'WRONG'] +4992-41797-0000-2117: ref=['YES', 'DEAD', 'THESE', 'FOUR', 'YEARS', 'AN', 'A', 'GOOD', 'JOB', 'FOR', 'HER', 'TOO'] +4992-41797-0000-2117: hyp=['YES', 'DEAD', 'THESE', 'FOUR', 'YEARS', 'AND', 'A', 'GOOD', 'JOB', 'FOR', 'HER', 'TOO'] +4992-41797-0001-2118: ref=['WELL', 'AS', 'I', 'SAY', "IT'S", 'AN', 'AWFUL', 'QUEER', 'WORLD', 'THEY', 'CLAP', 'ALL', 'THE', 'BURGLARS', 'INTO', 'JAIL', 'AND', 'THE', 'MURDERERS', 'AND', 'THE', 'WIFE', 'BEATERS', "I'VE", 'ALLERS', 'THOUGHT', 'A', 'GENTLE', 'REPROOF', 'WOULD', 'BE', 'ENOUGH', 'PUNISHMENT', 'FOR', 'A', 'WIFE', 'BEATER', 'CAUSE', 'HE', 'PROBABLY', 'HAS', 'A', 'LOT', 'O', 'PROVOCATION', 'THAT', 'NOBODY', 'KNOWS', 'AND', 'THE', 'FIREBUGS', "CAN'T", 'THINK', 'O', 'THE', 'RIGHT', 'NAME', 'SOMETHING', 'LIKE', 'CENDENARIES', 'AN', 'THE', 'BREAKERS', 'O', 'THE', 'PEACE', 'AN', 'WHAT', 'NOT', 'AN', 'YET', 'THE', 'LAW', 'HAS', 'NOTHIN', 'TO', 'SAY', 'TO', 'A', 'MAN', 'LIKE', 'HEN', 'LORD'] +4992-41797-0001-2118: hyp=['WELL', 'AS', 'I', 'SAY', "IT'S", 'AN', 'AWFUL', 'QUEER', 'WORLD', 'THEY', 'CLAP', 'ALL', 'THE', 'BURGLARS', 'AND', 'DOWN', 'THE', 'MURDERERS', 'AND', 'THE', 'WHITE', 'BEATERS', 'I', 'ALLERS', 'THOUGHT', 'A', 'GENTLE', 'REPROOF', 'WOULD', 'BE', 'ENOUGH', 'PUNISHMENT', 'FOR', 'A', 'WIFE', 'BEATER', 'CAUSE', 'HE', 'PROBABLY', 'HAS', 'A', 'LOT', 'OF', 'PROVOCATION', 'THAT', 'NOBODY', 'KNOWS', 'AND', 'THE', 'FIRE', 'BUGS', "CAN'T", 'THINK', 'OF', 'THE', 'RIGHT', 'NAME', 'SOMETHIN', 'LIKE', 'SENDIARIES', 'AND', 'THE', 'BREAKERS', 'OF', 'THE', 'PEACE', 'AND', 'WHAT', 'NOT', 'AND', 'YET', 'THE', 'LAW', 'HAS', 'NOTHING', 'TO', 'SAY', 'TO', 'A', 'MAN', 'LIKE', 'HANDLED'] +4992-41797-0002-2119: ref=['GRANDFATHER', 'WAS', 'ALEXANDER', 'CAREY', 'L', 'L', 'D', 'DOCTOR', 'OF', 'LAWS', 'THAT', 'IS'] +4992-41797-0002-2119: hyp=['GRANDFATHER', 'WAS', 'ALEXANDER', 'CAREY', 'L', 'D', 'DOCTOR', 'OF', 'LAWS', 'THAT', 'IS'] +4992-41797-0003-2120: ref=['MISTER', 'POPHAM', 'LAID', 'DOWN', 'HIS', 'BRUSH'] +4992-41797-0003-2120: hyp=['MISTER', 'POPHAM', 'LAID', 'DOWN', 'HIS', 'BRUSH'] +4992-41797-0004-2121: ref=['I', 'SWAN', 'TO', 'MAN', 'HE', 'EJACULATED', 'IF', 'YOU', "DON'T", 'WORK', 'HARD', 'YOU', "CAN'T", 'KEEP', 'UP', 'WITH', 'THE', 'TIMES', 'DOCTOR', 'OF', 'LAWS'] +4992-41797-0004-2121: hyp=['I', 'SWAY', 'INTO', 'MEN', 'HE', 'EJACULATED', 'IF', 'YOU', "DON'T", 'WORK', 'HARD', 'YOU', "CAN'T", 'KEEP', 'UP', 'WITH', 'THE', 'TIMES', 'DOCTOR', 'OF', 'LAWS'] +4992-41797-0005-2122: ref=['DONE', 'HE', "AIN'T", 'DONE', 'A', 'THING', "HE'D", 'OUGHTER', 'SENCE', 'HE', 'WAS', 'BORN'] +4992-41797-0005-2122: hyp=['DONE', 'HE', "AIN'T", 'DONE', 'A', 'THING', 'HE', 'ORDERED', 'SINCE', 'HE', 'WAS', 'BORN'] +4992-41797-0006-2123: ref=['HE', 'KEEPS', 'THE', 'THOU', 'SHALT', 'NOT', 'COMMANDMENTS', 'FIRST', 'RATE', 'HEN', 'LORD', 'DOES'] +4992-41797-0006-2123: hyp=['HE', 'KEEPS', 'THE', 'THOU', 'SHALT', 'NOT', 'COMMANDMENTS', 'FIRST', 'RATE', 'HENLOORD', 'DOES'] +4992-41797-0007-2124: ref=['HE', 'GIVE', 'UP', 'HIS', 'POSITION', 'AND', 'SHUT', 'THE', 'FAMILY', 'UP', 'IN', 'THAT', 'TOMB', 'OF', 'A', 'HOUSE', 'SO', 'T', 'HE', 'COULD', 'STUDY', 'HIS', 'BOOKS'] +4992-41797-0007-2124: hyp=['HE', 'GAVE', 'UP', 'HIS', 'POSITION', 'AND', 'SHUT', 'THE', 'FAMILY', 'UP', 'IN', 'THAT', 'TOMB', 'OF', 'A', 'HOUSE', 'SODIN', 'HE', "COULDN'T", 'STUDY', 'HIS', 'BOOKS'] +4992-41797-0008-2125: ref=['MISTER', 'POPHAM', 'EXAGGERATED', 'NOTHING', 'BUT', 'ON', 'THE', 'CONTRARY', 'LEFT', 'MUCH', 'UNSAID', 'IN', 'HIS', 'NARRATIVE', 'OF', 'THE', 'FAMILY', 'AT', 'THE', 'HOUSE', 'OF', 'LORDS'] +4992-41797-0008-2125: hyp=['MISTER', 'POPHAM', 'EXAGGERATED', 'NOTHING', 'BUT', 'ON', 'THE', 'CONTRARY', 'LEFT', 'MUCH', 'UNSAID', 'IN', 'HIS', 'NARRATIVE', 'OF', 'THE', 'FAMILY', 'AT', 'THE', 'HOUSE', 'OF', 'LORDS'] +4992-41797-0009-2126: ref=['HENRY', 'LORD', 'WITH', 'THE', 'DEGREE', 'OF', 'PH', 'D', 'TO', 'HIS', 'CREDIT', 'HAD', 'BEEN', 'PROFESSOR', 'OF', 'ZOOLOGY', 'AT', 'A', 'NEW', 'ENGLAND', 'COLLEGE', 'BUT', 'HAD', 'RESIGNED', 'HIS', 'POST', 'IN', 'ORDER', 'TO', 'WRITE', 'A', 'SERIES', 'OF', 'SCIENTIFIC', 'TEXT', 'BOOKS'] +4992-41797-0009-2126: hyp=['HENRY', 'LORD', 'WITH', 'THE', 'DEGREE', 'OF', 'PH', 'TO', 'HIS', 'CREDIT', 'HAD', 'BEEN', 'PROFESSOR', 'OF', 'ZOOLOGY', 'AT', 'A', 'NEW', 'ENGLAND', 'COLLEGE', 'BUT', 'HAD', 'RESIGNED', 'HIS', 'POST', 'IN', 'ORDER', 'TO', 'WRITE', 'A', 'SERIES', 'OF', 'SCIENTIFIC', 'TEXT', 'BOOKS'] +4992-41797-0010-2127: ref=['ALWAYS', 'IRRITABLE', 'COLD', 'INDIFFERENT', 'HE', 'HAD', 'GROWN', 'RAPIDLY', 'MORE', 'SO', 'AS', 'YEARS', 'WENT', 'ON'] +4992-41797-0010-2127: hyp=['ALWAYS', 'IRRITABLE', 'COLD', 'INDIFFERENT', 'HE', 'HAD', 'GROWN', 'RAPIDLY', 'MORE', 'SO', 'AS', 'YEARS', 'WENT', 'ON'] +4992-41797-0011-2128: ref=['WHATEVER', 'APPEALED', 'TO', 'HER', 'SENSE', 'OF', 'BEAUTY', 'WAS', 'STRAIGHTWAY', 'TRANSFERRED', 'TO', 'PAPER', 'OR', 'CANVAS'] +4992-41797-0011-2128: hyp=['WHATEVER', 'APPEALED', 'TO', 'HER', 'SENSE', 'OF', 'BEAUTY', 'WAS', 'STRAIGHTWAY', 'TRANSFERRED', 'TO', 'PAPER', 'OR', 'GAMBUS'] +4992-41797-0012-2129: ref=['SHE', 'IS', 'WILD', 'TO', 'KNOW', 'HOW', 'TO', 'DO', 'THINGS'] +4992-41797-0012-2129: hyp=['SHE', 'IS', 'WILD', 'TO', 'KNOW', 'HOW', 'TO', 'DO', 'THINGS'] +4992-41797-0013-2130: ref=['SHE', 'MAKES', 'EFFORT', 'AFTER', 'EFFORT', 'TREMBLING', 'WITH', 'EAGERNESS', 'AND', 'WHEN', 'SHE', 'FAILS', 'TO', 'REPRODUCE', 'WHAT', 'SHE', 'SEES', 'SHE', 'WORKS', 'HERSELF', 'INTO', 'A', 'FRENZY', 'OF', 'GRIEF', 'AND', 'DISAPPOINTMENT'] +4992-41797-0013-2130: hyp=['SHE', 'MAKES', 'EFFORT', 'AFTER', 'EFFORT', 'TREMBLING', 'WITH', 'EAGERNESS', 'THAN', 'WHEN', 'SHE', 'FAILS', 'TO', 'REPRODUCE', 'WHAT', 'SHE', 'SEES', 'SHE', 'WORKS', 'HERSELF', 'INTO', 'A', 'FRENZY', 'OF', 'GRIEF', 'AND', 'DISAPPOINTMENT'] +4992-41797-0014-2131: ref=['WHEN', 'SHE', 'COULD', 'NOT', 'MAKE', 'A', 'RABBIT', 'OR', 'A', 'BIRD', 'LOOK', 'REAL', 'ON', 'PAPER', 'SHE', 'SEARCHED', 'IN', 'HER', "FATHER'S", 'BOOKS', 'FOR', 'PICTURES', 'OF', 'ITS', 'BONES'] +4992-41797-0014-2131: hyp=['WHEN', 'SHE', 'COULD', 'NOT', 'MAKE', 'A', 'RABBIT', 'OR', 'A', 'BIRD', 'LOOK', 'REAL', 'ON', 'PAPER', 'SHE', 'SEARCHED', 'IN', 'HER', "FATHER'S", 'BOOKS', 'FOR', 'PICTURES', 'OF', 'ITS', 'BONES'] +4992-41797-0015-2132: ref=['CYRIL', 'THERE', 'MUST', 'BE', 'SOME', 'BETTER', 'WAY', 'OF', 'DOING', 'I', 'JUST', 'DRAW', 'THE', 'OUTLINE', 'OF', 'AN', 'ANIMAL', 'AND', 'THEN', 'I', 'PUT', 'HAIRS', 'OR', 'FEATHERS', 'ON', 'IT', 'THEY', 'HAVE', 'NO', 'BODIES'] +4992-41797-0015-2132: hyp=['CYRIL', 'THERE', 'MUST', 'BE', 'SOME', 'BETTER', 'WAY', 'OF', 'DOING', 'I', 'JUST', 'DRAW', 'THE', 'OUTLINE', 'OF', 'AN', 'ANIMAL', 'AND', 'THEN', 'I', 'PUT', 'HAIRS', 'OR', 'FEATHERS', 'ON', 'IT', 'THEY', 'HAVE', 'NO', 'BODIES'] +4992-41797-0016-2133: ref=['THEY', "COULDN'T", 'RUN', 'NOR', 'MOVE', "THEY'RE", 'JUST', 'PASTEBOARD'] +4992-41797-0016-2133: hyp=['THEY', "COULDN'T", 'RUN', 'OR', 'MOVE', "THEY'RE", 'JUST', 'PASTEBOARD'] +4992-41797-0017-2134: ref=['HE', "WOULDN'T", 'SEARCH', 'SO', "DON'T", 'WORRY', 'REPLIED', 'CYRIL', 'QUIETLY', 'AND', 'THE', 'TWO', 'LOOKED', 'AT', 'EACH', 'OTHER', 'AND', 'KNEW', 'THAT', 'IT', 'WAS', 'SO'] +4992-41797-0017-2134: hyp=['HE', "WOULDN'T", 'SEARCH', 'SO', "DON'T", 'WORRY', 'REPLIED', 'CYRIL', 'QUIETLY', 'AND', 'THE', 'TWO', 'LOOKED', 'AT', 'EACH', 'OTHER', 'AND', 'KNEW', 'THAT', 'IT', 'WAS', 'SO'] +4992-41797-0018-2135: ref=['THERE', 'IN', 'THE', 'CEDAR', 'HOLLOW', 'THEN', 'LIVED', 'OLIVE', 'LORD', 'AN', 'ANGRY', 'RESENTFUL', 'LITTLE', 'CREATURE', 'WEIGHED', 'DOWN', 'BY', 'A', 'FIERCE', 'SENSE', 'OF', 'INJURY'] +4992-41797-0018-2135: hyp=['THERE', 'IN', 'THE', 'CEDAR', 'HOLLOWED', 'THEN', 'LIVED', 'OLIVE', 'LORD', 'AN', 'ANGRY', 'RESENTFUL', 'LITTLE', 'CREATURE', 'WEIGHED', 'DOWN', 'BY', 'A', 'FIERCE', 'SENSE', 'OF', 'INJURY'] +4992-41797-0019-2136: ref=["OLIVE'S", 'MOURNFUL', 'BLACK', 'EYES', 'MET', "NANCY'S", 'SPARKLING', 'BROWN', 'ONES'] +4992-41797-0019-2136: hyp=['ALL', 'OF', 'THIS', 'MOURNFUL', 'BLACK', 'EYES', 'MET', "NANCY'S", 'SPARKLING', 'BROWN', 'ONES'] +4992-41797-0020-2137: ref=["NANCY'S", 'CURLY', 'CHESTNUT', 'CROP', 'SHONE', 'IN', 'THE', 'SUN', 'AND', "OLIVE'S", 'THICK', 'BLACK', 'PLAITS', 'LOOKED', 'BLACKER', 'BY', 'CONTRAST'] +4992-41797-0020-2137: hyp=["NANCY'S", 'CURLY', 'CHESTNUT', 'CROP', 'SHONE', 'IN', 'THE', 'SUN', 'AND', "OLIVE'S", 'THICK', 'BLACK', 'PLATES', 'LOOKED', 'BLACKER', 'BY', 'CONTRAST'] +4992-41797-0021-2138: ref=["SHE'S", 'WONDERFUL', 'MORE', 'WONDERFUL', 'THAN', 'ANYBODY', "WE'VE", 'EVER', 'SEEN', 'ANYWHERE', 'AND', 'SHE', 'DRAWS', 'BETTER', 'THAN', 'THE', 'TEACHER', 'IN', 'CHARLESTOWN'] +4992-41797-0021-2138: hyp=['SHE', 'IS', 'WONDERFUL', 'MORE', 'WONDERFUL', 'IN', 'ANYBODY', "WE'VE", 'EVER', 'SEEN', 'ANYWHERE', 'AND', 'SHE', 'DRAWS', 'BETTER', 'THAN', 'THE', 'TEACHER', 'IN', 'CHARLESTOWN'] +4992-41797-0022-2139: ref=["SHE'S", 'OLDER', 'THAN', 'I', 'AM', 'BUT', 'SO', 'TINY', 'AND', 'SAD', 'AND', 'SHY', 'THAT', 'SHE', 'SEEMS', 'LIKE', 'A', 'CHILD'] +4992-41797-0022-2139: hyp=["SHE'S", 'OLDER', 'THAN', 'I', 'AM', 'BUT', 'SO', 'TINY', 'AND', 'SAD', 'AND', 'SHY', 'THAT', 'SHE', 'SEEMS', 'LIKE', 'A', 'CHILD'] +4992-41806-0000-2161: ref=['NATTY', 'HARMON', 'TRIED', 'THE', 'KITCHEN', 'PUMP', 'SECRETLY', 'SEVERAL', 'TIMES', 'DURING', 'THE', 'EVENING', 'FOR', 'THE', 'WATER', 'HAD', 'TO', 'RUN', 'UP', 'HILL', 'ALL', 'THE', 'WAY', 'FROM', 'THE', 'WELL', 'TO', 'THE', 'KITCHEN', 'SINK', 'AND', 'HE', 'BELIEVED', 'THIS', 'TO', 'BE', 'A', 'CONTINUAL', 'MIRACLE', 'THAT', 'MIGHT', 'GIVE', 'OUT', 'AT', 'ANY', 'MOMENT'] +4992-41806-0000-2161: hyp=['NATTY', 'HARMON', 'TRIED', 'THE', 'KITCHEN', 'PUMP', 'SECRETLY', 'SEVERAL', 'TIMES', 'DURING', 'THE', 'EVENING', 'FOR', 'THE', 'WATER', 'HAD', 'TO', 'RUN', 'UP', 'HILL', 'ALL', 'THE', 'WAY', 'FROM', 'THE', 'WELL', 'TO', 'THE', 'KITCHEN', 'SINK', 'AND', 'HE', 'BELIEVED', 'THIS', 'TO', 'BE', 'CONTINUAL', 'MIRACLE', 'THAT', 'MIGHT', 'GIVE', 'OUT', 'AT', 'ANY', 'MOMENT'] +4992-41806-0001-2162: ref=['TO', 'NIGHT', 'THERE', 'WAS', 'NO', 'NEED', 'OF', 'EXTRA', 'HEAT', 'AND', 'THERE', 'WERE', 'GREAT', 'CEREMONIES', 'TO', 'BE', 'OBSERVED', 'IN', 'LIGHTING', 'THE', 'FIRES', 'ON', 'THE', 'HEARTHSTONES'] +4992-41806-0001-2162: hyp=['TO', 'NIGHT', 'THERE', 'WAS', 'NO', 'NEED', 'OF', 'EXTRA', 'HEAT', 'AND', 'THERE', 'WERE', 'GREAT', 'CEREMONIES', 'TO', 'BE', 'OBSERVED', 'IN', 'LIGHTING', 'THE', 'FIRES', 'ON', 'THE', 'HEARTHSTONES'] +4992-41806-0002-2163: ref=['THEY', 'BEGAN', 'WITH', 'THE', 'ONE', 'IN', 'THE', 'FAMILY', 'SITTING', 'ROOM', 'COLONEL', 'WHEELER', 'RALPH', 'THURSTON', 'MISTER', 'AND', 'MISSUS', 'BILL', 'HARMON', 'WITH', 'NATTY', 'AND', 'RUFUS', 'MISTER', 'AND', 'MISSUS', 'POPHAM', 'WITH', 'DIGBY', 'AND', 'LALLIE', 'JOY', 'ALL', 'STANDING', 'IN', 'ADMIRING', 'GROUPS', 'AND', 'THRILLING', 'WITH', 'DELIGHT', 'AT', 'THE', 'ORDER', 'OF', 'EVENTS'] +4992-41806-0002-2163: hyp=['THEY', 'BEGAN', 'WITH', 'THE', 'ONE', 'IN', 'THE', 'FAMILY', 'SITTING', 'ROOM', 'COLONEL', 'WHEELER', 'RALPH', 'THURSTON', 'MISTER', 'AND', 'MISSUS', 'BILL', 'HARMON', 'WITH', 'NATTY', 'AND', 'RUFFUS', 'MISTER', 'AND', 'MISSUS', 'POPPUM', 'WITH', 'DIGBY', 'AND', 'LALLY', 'JOY', 'ALL', 'STANDING', 'IN', 'ADMIRING', 'GROUPS', 'AND', 'THRILLING', 'WITH', 'DELIGHT', 'AT', 'THE', 'ORDER', 'OF', 'EVENTS'] +4992-41806-0003-2164: ref=['KATHLEEN', 'WAVED', 'THE', 'TORCH', 'TO', 'AND', 'FRO', 'AS', 'SHE', 'RECITED', 'SOME', 'BEAUTIFUL', 'LINES', 'WRITTEN', 'FOR', 'SOME', 'SUCH', 'PURPOSE', 'AS', 'THAT', 'WHICH', 'CALLED', 'THEM', 'TOGETHER', 'TO', 'NIGHT'] +4992-41806-0003-2164: hyp=['CATHERINE', 'WAVED', 'THE', 'TORCH', 'TO', 'AND', 'FRO', 'AS', 'SHE', 'RECITED', 'SOME', 'BEAUTIFUL', 'LINES', 'WRITTEN', 'FOR', 'SOME', 'SUCH', 'PURPOSE', 'AS', 'THAT', 'WHICH', 'CALLED', 'THEM', 'TOGETHER', 'TO', 'NIGHT'] +4992-41806-0004-2165: ref=['BURN', 'FIRE', 'BURN', 'FLICKER', 'FLICKER', 'FLAME'] +4992-41806-0004-2165: hyp=['BURNE', 'FIRE', 'BURN', 'FLICKER', 'FLICKER', 'FLAME'] +4992-41806-0005-2166: ref=['NEXT', 'CAME', "OLIVE'S", 'TURN', 'TO', 'HELP', 'IN', 'THE', 'CEREMONIES'] +4992-41806-0005-2166: hyp=['NEXT', 'CAME', 'OLIVES', 'TURN', 'TO', 'HELP', 'IN', 'THE', 'CEREMONIES'] +4992-41806-0006-2167: ref=['RALPH', 'THURSTON', 'HAD', 'FOUND', 'A', 'LINE', 'OF', 'LATIN', 'FOR', 'THEM', 'IN', 'HIS', 'BELOVED', 'HORACE', 'TIBI', 'SPLENDET', 'FOCUS', 'FOR', 'YOU', 'THE', 'HEARTH', 'FIRE', 'SHINES'] +4992-41806-0006-2167: hyp=['RALPH', 'THURSTON', 'HAD', 'FOUND', 'A', 'LINE', 'OF', 'LATIN', 'FOR', 'THEM', 'IN', 'HIS', 'BELOVED', 'HORRANCE', 'TIBEE', 'SPLENDID', 'FOCUS', 'FOR', 'YOU', 'THE', 'HEARTH', 'FIRE', 'SHINES'] +4992-41806-0007-2168: ref=['OLIVE', 'HAD', 'PAINTED', 'THE', 'MOTTO', 'ON', 'A', 'LONG', 'NARROW', 'PANEL', 'OF', 'CANVAS', 'AND', 'GIVING', 'IT', 'TO', 'MISTER', 'POPHAM', 'STOOD', 'BY', 'THE', 'FIRESIDE', 'WHILE', 'HE', 'DEFTLY', 'FITTED', 'IT', 'INTO', 'THE', 'PLACE', 'PREPARED', 'FOR', 'IT'] +4992-41806-0007-2168: hyp=['OLIVE', 'HAD', 'PAINTED', 'THE', 'MOTTO', 'ON', 'A', 'LONG', 'NARROW', 'PANEL', 'OF', 'CANVAS', 'AND', 'GIVING', 'IT', 'TO', 'MISTER', 'POPHAM', 'STOOD', 'BY', 'THE', 'FIRESIDE', 'WHILE', 'HE', 'DEFTLY', 'FITTED', 'IT', 'INTO', 'THE', 'PLACE', 'PREPARED', 'FOR', 'IT'] +4992-41806-0008-2169: ref=['OLIVE', 'HAS', 'ANOTHER', 'LOVELY', 'GIFT', 'FOR', 'THE', 'YELLOW', 'HOUSE', 'SAID', 'MOTHER', 'CAREY', 'RISING', 'AND', 'TO', 'CARRY', 'OUT', 'THE', 'NEXT', 'PART', 'OF', 'THE', 'PROGRAMME', 'WE', 'SHALL', 'HAVE', 'TO', 'GO', 'IN', 'PROCESSION', 'UPSTAIRS', 'TO', 'MY', 'BEDROOM'] +4992-41806-0008-2169: hyp=['OLIVE', 'HAS', 'ANOTHER', 'LOVELY', 'GIFT', 'FOR', 'THE', 'YELLOW', 'HOUSE', 'SAID', 'MOTHER', 'CAREY', 'RISING', 'AND', 'TO', 'CARRY', 'OUT', 'THE', 'NEXT', 'PART', 'OF', 'THE', 'PROGRAMME', 'WE', 'SHALL', 'HAVE', 'TO', 'GO', 'IN', 'PROCESSION', 'UPSTAIRS', 'TO', 'MY', 'BEDROOM'] +4992-41806-0009-2170: ref=['EXCLAIMED', 'BILL', 'HARMON', 'TO', 'HIS', 'WIFE', 'AS', 'THEY', 'WENT', 'THROUGH', 'THE', 'LIGHTED', 'HALL'] +4992-41806-0009-2170: hyp=['EXCLAIMED', 'BILL', 'HARMON', 'TO', 'HIS', 'WIFE', 'AS', 'THEY', 'WENT', 'THROUGH', 'THE', 'LIGHTED', 'HALL'] +4992-41806-0010-2171: ref=["AIN'T", 'THEY', 'THE', 'GREATEST'] +4992-41806-0010-2171: hyp=["AIN'T", 'THEY', 'THE', 'GREATEST'] +4992-41806-0011-2172: ref=['MOTHER', 'CAREY', 'POURED', 'COFFEE', 'NANCY', 'CHOCOLATE', 'AND', 'THE', 'OTHERS', 'HELPED', 'SERVE', 'THE', 'SANDWICHES', 'AND', 'CAKE', 'DOUGHNUTS', 'AND', 'TARTS'] +4992-41806-0011-2172: hyp=['MOTHER', 'CAREY', 'POURED', 'COFFEE', 'NANCY', 'CHOCOLATE', 'AND', 'THE', 'OTHERS', 'HELP', 'SERVED', 'THE', 'SANDWICHES', 'AND', 'CAKE', 'DOUGHNUTS', 'AND', 'TARTS'] +4992-41806-0012-2173: ref=['AT', 'THAT', 'MOMENT', 'THE', 'GENTLEMAN', 'ENTERED', 'BEARING', 'A', 'HUGE', 'OBJECT', 'CONCEALED', 'BY', 'A', 'PIECE', 'OF', 'GREEN', 'FELT'] +4992-41806-0012-2173: hyp=['AT', 'THAT', 'MOMENT', 'THE', 'GENTLEMAN', 'ENTERED', 'BEARING', 'A', 'HUGE', 'OBJECT', 'CONCEALED', 'BY', 'A', 'PIECE', 'OF', 'GREEN', 'FILT'] +4992-41806-0013-2174: ref=['APPROACHING', 'THE', 'DINING', 'TABLE', 'HE', 'CAREFULLY', 'PLACED', 'THE', 'ARTICLE', 'IN', 'THE', 'CENTRE', 'AND', 'REMOVED', 'THE', 'CLOTH'] +4992-41806-0013-2174: hyp=['APPROACHING', 'THE', 'DINING', 'TABLE', 'HE', 'CAREFULLY', 'PLACED', 'THE', 'ARTICLE', 'IN', 'THE', 'CENTRE', 'AND', 'REMOVED', 'THE', 'CLOTH'] +4992-41806-0014-2175: ref=['THINKS', 'I', 'TO', 'MYSELF', 'I', 'NEVER', 'SEEN', 'ANYTHING', 'OSH', 'POPHAM', "COULDN'T", 'MEND', 'IF', 'HE', 'TOOK', 'TIME', 'ENOUGH', 'AND', 'GLUE', 'ENOUGH', 'SO', 'I', 'CARRIED', 'THIS', 'LITTLE', 'FELLER', 'HOME', 'IN', 'A', 'BUSHEL', 'BASKET', 'ONE', 'NIGHT', 'LAST', 'MONTH', 'AN', "I'VE", 'SPENT', 'ELEVEN', "EVENIN'S", 'PUTTIN', 'HIM', 'TOGETHER'] +4992-41806-0014-2175: hyp=['THINK', 'SAD', 'TO', 'MYSELF', 'I', 'NEVER', 'SEEN', 'ANYTHING', 'I', 'WAS', 'POPLED', "GOODN'T", 'MEN', 'IF', 'HE', 'TOOK', 'TIME', 'ENOUGH', 'AND', 'GLUE', 'ENOUGH', 'SO', 'I', 'CARRIED', 'THIS', 'LITTLE', 'FELLER', 'HOME', 'IN', 'A', 'BUSHEL', 'BASKET', 'ONE', 'NIGHT', 'LAST', 'MONTH', 'AND', "I'VE", 'SPENT', 'ELEVEN', 'EVENINGS', 'PUTTING', 'HIM', 'TOGETHER'] +4992-41806-0015-2176: ref=['MISSUS', 'HARMON', 'THOUGHT', 'HE', 'SANG', 'TOO', 'MUCH', 'AND', 'TOLD', 'HER', 'HUSBAND', 'PRIVATELY', 'THAT', 'IF', 'HE', 'WAS', 'A', 'CANARY', 'BIRD', 'SHE', 'SHOULD', 'WANT', 'TO', 'KEEP', 'A', 'TABLE', 'COVER', 'OVER', 'HIS', 'HEAD', 'MOST', 'OF', 'THE', 'TIME', 'BUT', 'HE', 'WAS', 'IMMENSELY', 'POPULAR', 'WITH', 'THE', 'REST', 'OF', 'HIS', 'AUDIENCE'] +4992-41806-0015-2176: hyp=['MISSUS', 'HARMON', 'THOUGHT', 'HE', 'SANG', 'TOO', 'MUCH', 'AND', 'TOLD', 'HER', 'HUSBAND', 'PRIVATELY', 'THAT', 'IF', 'HE', 'WAS', 'A', 'CANARY', 'BIRD', 'SHE', 'SHOULD', 'WANT', 'TO', 'KEEP', 'A', 'TABLE', 'COVER', 'OVER', 'HIS', 'HEAD', 'MOST', 'OF', 'THE', 'TIME', 'BUT', 'HE', 'WAS', 'IMMENSELY', 'POPULAR', 'WITH', 'THE', 'REST', 'OF', 'HIS', 'AUDIENCE'] +4992-41806-0016-2177: ref=['THE', 'FACE', 'OF', 'THE', 'MAHOGANY', 'SHONE', 'WITH', 'DELIGHT', 'AND', 'WHY', 'NOT', 'WHEN', 'IT', 'WAS', 'DOING', 'EVERYTHING', 'ALMOST', 'EVERYTHING', 'WITHIN', 'THE', 'SCOPE', 'OF', 'A', 'PIANO', 'AND', 'YET', 'THE', 'FAMILY', 'HAD', 'ENJOYED', 'WEEKS', 'OF', 'GOOD', 'NOURISHING', 'MEALS', 'ON', 'WHAT', 'HAD', 'BEEN', 'SAVED', 'BY', 'ITS', 'EXERTIONS'] +4992-41806-0016-2177: hyp=['THE', 'FACE', 'OF', 'THE', 'MAHOGANY', 'SHONE', 'WITH', 'DELIGHT', 'AND', 'WHY', 'NOT', 'WHEN', 'IT', 'WAS', 'DOING', 'EVERYTHING', 'ALMOST', 'EVERYTHING', 'WITHIN', 'THE', 'SCOPE', 'OF', 'A', 'PIANO', 'AND', 'YET', 'THE', 'FAMILY', 'HAD', 'ENJOYED', 'WEEKS', 'OF', 'GOOD', 'NOURISHING', 'MEALS', 'ON', 'WHAT', 'HAD', 'BEEN', 'SAVED', 'BY', 'ITS', 'EXERTIONS'] +4992-41806-0017-2178: ref=['WE', 'SHUT', 'OUR', 'EYES', 'THE', 'FLOWERS', 'BLOOM', 'ON', 'WE', 'MURMUR', 'BUT', 'THE', 'CORN', 'EARS', 'FILL', 'WE', 'CHOOSE', 'THE', 'SHADOW', 'BUT', 'THE', 'SUN', 'THAT', 'CASTS', 'IT', 'SHINES', 'BEHIND', 'US', 'STILL'] +4992-41806-0017-2178: hyp=['WE', 'SHUT', 'OUR', 'EYES', 'THE', 'FLOWERS', 'BLOOM', 'ON', 'WE', 'MURMUR', 'BUT', 'THE', 'CORNEERS', 'FILL', 'WE', 'CHOOSE', 'THE', 'SHADOW', 'BUT', 'THE', 'SUN', 'THAT', 'CAST', 'IT', 'SHINES', 'BEHIND', 'US', 'STILL'] +5105-28233-0000-1649: ref=['LENGTH', 'OF', 'SERVICE', 'FOURTEEN', 'YEARS', 'THREE', 'MONTHS', 'AND', 'FIVE', 'DAYS'] +5105-28233-0000-1649: hyp=['LENGTH', 'OF', 'SERVICE', 'FOURTEEN', 'YEARS', 'THREE', 'MONTHS', 'AND', 'FIVE', 'DAYS'] +5105-28233-0001-1650: ref=['HE', 'SEEMED', 'BORN', 'TO', 'PLEASE', 'WITHOUT', 'BEING', 'CONSCIOUS', 'OF', 'THE', 'POWER', 'HE', 'POSSESSED'] +5105-28233-0001-1650: hyp=['HE', 'SEEMED', 'BORN', 'TO', 'PLEASE', 'WITHOUT', 'BEING', 'CONSCIOUS', 'OF', 'THE', 'POWER', 'HE', 'POSSESSED'] +5105-28233-0002-1651: ref=['IT', 'MUST', 'BE', 'OWNED', 'AND', 'NO', 'ONE', 'WAS', 'MORE', 'READY', 'TO', 'CONFESS', 'IT', 'THAN', 'HIMSELF', 'THAT', 'HIS', 'LITERARY', 'ATTAINMENTS', 'WERE', 'BY', 'NO', 'MEANS', 'OF', 'A', 'HIGH', 'ORDER'] +5105-28233-0002-1651: hyp=['IT', 'MUST', 'BE', 'OWNED', 'AND', 'NO', 'ONE', 'WAS', 'MORE', 'READY', 'TO', 'CONFESS', 'IT', 'THAN', 'HIMSELF', 'THAT', 'HIS', 'LITERARY', 'ATTAINMENTS', 'WERE', 'BY', 'NO', 'MEANS', 'OF', 'A', 'HIGH', 'ORDER'] +5105-28233-0003-1652: ref=['WE', "DON'T", 'SPIN', 'TOPS', 'IS', 'A', 'FAVORITE', 'SAYING', 'AMONGST', 'ARTILLERY', 'OFFICERS', 'INDICATING', 'THAT', 'THEY', 'DO', 'NOT', 'SHIRK', 'THEIR', 'DUTY', 'BY', 'FRIVOLOUS', 'PURSUITS', 'BUT', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'SERVADAC', 'BEING', 'NATURALLY', 'IDLE', 'WAS', 'VERY', 'MUCH', 'GIVEN', 'TO', 'SPINNING', 'TOPS'] +5105-28233-0003-1652: hyp=['WE', "DON'T", 'SPEND', 'TOPS', 'IS', 'A', 'FAVORITE', 'SAYING', 'AMONGST', 'ARTILLERY', 'OFFICERS', 'INDICATING', 'THAT', 'THEY', 'DO', 'NOT', 'SHIRK', 'THEIR', 'DUTY', 'BY', 'FRIVOLOUS', 'PURSUITS', 'BUT', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'SERVADAC', 'BEING', 'NATURALLY', 'IDLE', 'WAS', 'VERY', 'MUCH', 'GIVEN', 'TO', 'SPINNING', 'TOPS'] +5105-28233-0004-1653: ref=['ONCE', 'IN', 'ACTION', 'HE', 'WAS', 'LEADING', 'A', 'DETACHMENT', 'OF', 'INFANTRY', 'THROUGH', 'AN', 'INTRENCHMENT'] +5105-28233-0004-1653: hyp=['ONCE', 'IN', 'ACTION', 'HE', 'WAS', 'LEADING', 'A', 'DETACHMENT', 'OF', 'INFANTRY', 'THROUGH', 'AN', 'ENTRENCHMENT'] +5105-28233-0005-1654: ref=['SOMETIMES', 'HE', 'WOULD', 'WANDER', 'ON', 'FOOT', 'UPON', 'THE', 'SANDY', 'SHORE', 'AND', 'SOMETIMES', 'HE', 'WOULD', 'ENJOY', 'A', 'RIDE', 'ALONG', 'THE', 'SUMMIT', 'OF', 'THE', 'CLIFF', 'ALTOGETHER', 'BEING', 'IN', 'NO', 'HURRY', 'AT', 'ALL', 'TO', 'BRING', 'HIS', 'TASK', 'TO', 'AN', 'END'] +5105-28233-0005-1654: hyp=['SOMETIMES', 'HE', 'WOULD', 'WANDER', 'ON', 'FOOT', 'UPON', 'THE', 'SANDY', 'SHORE', 'AND', 'SOMETIMES', 'HE', 'WOULD', 'ENJOY', 'A', 'RIDE', 'ALONG', 'THE', 'SUMMIT', 'OF', 'THE', 'CLIFF', 'ALTOGETHER', 'BEING', 'IN', 'NO', 'HURRY', 'AT', 'ALL', 'TO', 'BRING', 'HIS', 'TASK', 'TO', 'AN', 'END'] +5105-28233-0006-1655: ref=['NO', 'CATHEDRAL', 'NOT', 'EVEN', 'BURGOS', 'ITSELF', 'COULD', 'VIE', 'WITH', 'THE', 'CHURCH', 'AT', 'MONTMARTRE'] +5105-28233-0006-1655: hyp=['NO', 'CATHEDRAL', 'NOT', 'EVEN', 'BURGOS', 'ITSELF', 'COULD', 'VIE', 'WITH', 'THE', 'CHURCH', 'AT', 'MONT', 'MARTRA'] +5105-28233-0007-1656: ref=['BEN', "ZOOF'S", 'MOST', 'AMBITIOUS', 'DESIRE', 'WAS', 'TO', 'INDUCE', 'THE', 'CAPTAIN', 'TO', 'GO', 'WITH', 'HIM', 'AND', 'END', 'HIS', 'DAYS', 'IN', 'HIS', 'MUCH', 'LOVED', 'HOME', 'AND', 'SO', 'INCESSANTLY', 'WERE', "SERVADAC'S", 'EARS', 'BESIEGED', 'WITH', 'DESCRIPTIONS', 'OF', 'THE', 'UNPARALLELED', 'BEAUTIES', 'AND', 'ADVANTAGES', 'OF', 'THIS', 'EIGHTEENTH', 'ARRONDISSEMENT', 'OF', 'PARIS', 'THAT', 'HE', 'COULD', 'SCARCELY', 'HEAR', 'THE', 'NAME', 'OF', 'MONTMARTRE', 'WITHOUT', 'A', 'CONSCIOUS', 'THRILL', 'OF', 'AVERSION'] +5105-28233-0007-1656: hyp=['BEN', "ZOOF'S", 'MOST', 'AMBITIOUS', 'DESIRE', 'WAS', 'TO', 'INDUCE', 'THE', 'CAPTAIN', 'TO', 'GO', 'WITH', 'HIM', 'AND', 'END', 'HIS', 'DAYS', 'IN', 'HIS', 'MUCH', 'LOVED', 'HOME', 'AND', 'SO', 'INCESSANTLY', 'WERE', "SERVADAC'S", 'EARS', 'BESIEGED', 'WITH', 'DESCRIPTIONS', 'OF', 'THE', 'UNPARALLELED', 'BEAUTIES', 'AND', 'ADVANTAGES', 'OF', 'THIS', 'EIGHTEENTH', 'ARE', 'ON', 'DE', 'SAINT', 'OF', 'PARIS', 'THAT', 'HE', 'COULD', 'SCARCELY', 'HEAR', 'THE', 'NAME', 'OF', 'MONTMARTRA', 'WITHOUT', 'A', 'CONSCIOUS', 'THRILL', 'OF', 'AVERSION'] +5105-28233-0008-1657: ref=['WHEN', 'A', 'PRIVATE', 'IN', 'THE', 'EIGHTH', 'CAVALRY', 'HE', 'HAD', 'BEEN', 'ON', 'THE', 'POINT', 'OF', 'QUITTING', 'THE', 'ARMY', 'AT', 'TWENTY', 'EIGHT', 'YEARS', 'OF', 'AGE', 'BUT', 'UNEXPECTEDLY', 'HE', 'HAD', 'BEEN', 'APPOINTED', 'ORDERLY', 'TO', 'CAPTAIN', 'SERVADAC'] +5105-28233-0008-1657: hyp=['WHEN', 'A', 'PRIVATE', 'AND', 'THE', 'EIGHTH', 'CAVALRY', 'HE', 'HAD', 'BEEN', 'ON', 'THE', 'POINT', 'OF', 'QUITTING', 'THE', 'ARMY', 'AT', 'TWENTY', 'EIGHT', 'YEARS', 'OF', 'AGE', 'BUT', 'UNEXPECTEDLY', 'HE', 'HAD', 'BEEN', 'APPOINTED', 'ORDERLY', 'TO', 'CAPTAIN', 'SERVADAC'] +5105-28233-0009-1658: ref=['THE', 'BOND', 'OF', 'UNION', 'THUS', 'EFFECTED', 'COULD', 'NEVER', 'BE', 'SEVERED', 'AND', 'ALTHOUGH', 'BEN', "ZOOF'S", 'ACHIEVEMENTS', 'HAD', 'FAIRLY', 'EARNED', 'HIM', 'THE', 'RIGHT', 'OF', 'RETIREMENT', 'HE', 'FIRMLY', 'DECLINED', 'ALL', 'HONORS', 'OR', 'ANY', 'PENSION', 'THAT', 'MIGHT', 'PART', 'HIM', 'FROM', 'HIS', 'SUPERIOR', 'OFFICER'] +5105-28233-0009-1658: hyp=['THE', 'BOND', 'OF', 'UNION', 'THUS', 'EFFECTED', 'COULD', 'NEVER', 'BE', 'SEVERED', 'AND', 'ALTHOUGH', 'BEN', "ZOV'S", 'ACHIEVEMENTS', 'HAD', 'FAIRLY', 'EARNED', 'HIM', 'THE', 'RIGHT', 'OF', 'RETIREMENT', 'HE', 'FIRMLY', 'DECLINED', 'ALL', 'HONORS', 'OR', 'ANY', 'PENSION', 'THAT', 'MIGHT', 'PART', 'HIM', 'FROM', 'HIS', 'SUPERIOR', 'OFFICER'] +5105-28233-0010-1659: ref=['UNLIKE', 'HIS', 'MASTER', 'HE', 'MADE', 'NO', 'PRETENSION', 'TO', 'ANY', 'GIFT', 'OF', 'POETIC', 'POWER', 'BUT', 'HIS', 'INEXHAUSTIBLE', 'MEMORY', 'MADE', 'HIM', 'A', 'LIVING', 'ENCYCLOPAEDIA', 'AND', 'FOR', 'HIS', 'STOCK', 'OF', 'ANECDOTES', 'AND', "TROOPER'S", 'TALES', 'HE', 'WAS', 'MATCHLESS'] +5105-28233-0010-1659: hyp=['I', 'MAKE', 'HIS', 'MASTER', 'HE', 'MADE', 'NO', 'PRETENSION', 'TO', 'ANY', 'GIFT', 'OF', 'POETIC', 'POWER', 'BUT', 'HIS', 'INEXHAUSTIBLE', 'MEMORY', 'MADE', 'HIM', 'A', 'LIVING', 'ENCYCLOPAEDIA', 'AND', 'FOR', 'HIS', 'STOCK', 'OF', 'ANECDOTES', 'AND', "TROOPER'S", 'TALES', 'HE', 'WAS', 'MATCHLESS'] +5105-28240-0000-1624: ref=['FAST', 'AS', 'HIS', 'LEGS', 'COULD', 'CARRY', 'HIM', 'SERVADAC', 'HAD', 'MADE', 'HIS', 'WAY', 'TO', 'THE', 'TOP', 'OF', 'THE', 'CLIFF'] +5105-28240-0000-1624: hyp=['FAST', 'AS', 'HIS', 'LEGS', 'COULD', 'CARRY', 'HIM', 'SERVADAC', 'HAD', 'MADE', 'HIS', 'WAY', 'TO', 'THE', 'TOP', 'OF', 'THE', 'CLIFF'] +5105-28240-0001-1625: ref=['IT', 'WAS', 'QUITE', 'TRUE', 'THAT', 'A', 'VESSEL', 'WAS', 'IN', 'SIGHT', 'HARDLY', 'MORE', 'THAN', 'SIX', 'MILES', 'FROM', 'THE', 'SHORE', 'BUT', 'OWING', 'TO', 'THE', 'INCREASE', 'IN', 'THE', "EARTH'S", 'CONVEXITY', 'AND', 'THE', 'CONSEQUENT', 'LIMITATION', 'OF', 'THE', 'RANGE', 'OF', 'VISION', 'THE', 'RIGGING', 'OF', 'THE', 'TOPMASTS', 'ALONE', 'WAS', 'VISIBLE', 'ABOVE', 'THE', 'WATER'] +5105-28240-0001-1625: hyp=['IT', 'WAS', 'QUITE', 'TRUE', 'THAT', 'A', 'VESSEL', 'WAS', 'IN', 'SIGHT', 'HARDLY', 'MORE', 'THAN', 'SIX', 'MILES', 'FROM', 'THE', 'SHORE', 'BUT', 'OWING', 'TO', 'THE', 'INCREASE', 'IN', 'THE', "EARTH'S", 'CONVEXITY', 'AND', 'THE', 'CONSEQUENT', 'LIMITATION', 'OF', 'THE', 'RANGE', 'OF', 'VISION', 'THE', 'RIGGING', 'OF', 'THE', 'TOPMASTS', 'ALONE', 'WAS', 'VISIBLE', 'ABOVE', 'THE', 'WATER'] +5105-28240-0002-1626: ref=['EXCLAIMED', 'SERVADAC', 'KEEPING', 'HIS', 'EYE', 'UNMOVED', 'AT', 'HIS', 'TELESCOPE'] +5105-28240-0002-1626: hyp=['EXCLAIMED', 'SERVADAC', 'KEEPING', 'HIS', 'EYE', 'UNMOVED', 'AT', 'HIS', 'TELESCOPE'] +5105-28240-0003-1627: ref=['SHE', 'IS', 'UNDER', 'SAIL', 'BUT', 'SHE', 'IS', 'COUNT', "TIMASCHEFF'S", 'YACHT', 'HE', 'WAS', 'RIGHT'] +5105-28240-0003-1627: hyp=['SHE', 'IS', 'UNDER', 'SALE', 'BUT', 'SHE', 'IS', 'COUNT', "TIMASCHEFF'S", 'YACHT', 'HE', 'WAS', 'RIGHT'] +5105-28240-0004-1628: ref=['IF', 'THE', 'COUNT', 'WERE', 'ON', 'BOARD', 'A', 'STRANGE', 'FATALITY', 'WAS', 'BRINGING', 'HIM', 'TO', 'THE', 'PRESENCE', 'OF', 'HIS', 'RIVAL'] +5105-28240-0004-1628: hyp=['IF', 'THE', 'COUNT', 'WERE', 'ON', 'BOARD', 'A', 'STRANGE', 'FATALITY', 'WAS', 'BRINGING', 'HIM', 'TO', 'THE', 'PRESENCE', 'OF', 'HIS', 'RIVAL'] +5105-28240-0005-1629: ref=['HE', 'RECKONED', 'THEREFORE', 'NOT', 'ONLY', 'UPON', 'ASCERTAINING', 'THE', 'EXTENT', 'OF', 'THE', 'LATE', 'CATASTROPHE', 'BUT', 'UPON', 'LEARNING', 'ITS', 'CAUSE'] +5105-28240-0005-1629: hyp=['HE', 'RECKONED', 'THEREFORE', 'NOT', 'ONLY', 'UPON', 'ASCERTAINING', 'THE', 'EXTENT', 'OF', 'THE', 'LATE', 'CATASTROPHE', 'BUT', 'UPON', 'LEARNING', 'ITS', 'CAUSE'] +5105-28240-0006-1630: ref=['THE', 'WIND', 'BEING', 'ADVERSE', 'THE', 'DOBRYNA', 'DID', 'NOT', 'MAKE', 'VERY', 'RAPID', 'PROGRESS', 'BUT', 'AS', 'THE', 'WEATHER', 'IN', 'SPITE', 'OF', 'A', 'FEW', 'CLOUDS', 'REMAINED', 'CALM', 'AND', 'THE', 'SEA', 'WAS', 'QUITE', 'SMOOTH', 'SHE', 'WAS', 'ENABLED', 'TO', 'HOLD', 'A', 'STEADY', 'COURSE'] +5105-28240-0006-1630: hyp=['THE', 'WIND', 'BEING', 'ADVERSE', 'THE', 'DOBRINA', 'DID', 'NOT', 'MAKE', 'VERY', 'RAPID', 'PROGRESS', 'BUT', 'AS', 'THE', 'WEATHER', 'IN', 'SPITE', 'OF', 'A', 'FEW', 'CLOUDS', 'REMAINED', 'CALM', 'AND', 'THE', 'SEA', 'WAS', 'QUITE', 'SMOOTH', 'SHE', 'WAS', 'ENABLED', 'TO', 'HOLD', 'A', 'STEADY', 'COURSE'] +5105-28240-0007-1631: ref=['SERVADAC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'THE', 'DOBRYNA', 'WAS', 'ENDEAVORING', 'TO', 'PUT', 'IN'] +5105-28240-0007-1631: hyp=['SERVADAC', 'TOOK', 'IT', 'FOR', 'GRANTED', 'THAT', 'THE', 'DOBRINA', 'WAS', 'ENDEAVORING', 'TO', 'PUT', 'IN'] +5105-28240-0008-1632: ref=['A', 'NARROW', 'CHANNEL', 'FORMED', 'A', 'PASSAGE', 'THROUGH', 'THE', 'RIDGE', 'OF', 'ROCKS', 'THAT', 'PROTECTED', 'IT', 'FROM', 'THE', 'OPEN', 'SEA', 'AND', 'WHICH', 'EVEN', 'IN', 'THE', 'ROUGHEST', 'WEATHER', 'WOULD', 'ENSURE', 'THE', 'CALMNESS', 'OF', 'ITS', 'WATERS'] +5105-28240-0008-1632: hyp=['A', 'NARROW', 'CHANNEL', 'FORMED', 'A', 'PASSAGE', 'THROUGH', 'THE', 'RIDGE', 'OF', 'ROCKS', 'THAT', 'PROTECTED', 'IT', 'FROM', 'THE', 'OPEN', 'SEA', 'AND', 'WHICH', 'EVEN', 'IN', 'THE', 'ROUGHEST', 'WEATHER', 'WOULD', 'INSURE', 'THE', 'CALMNESS', 'OF', 'ITS', 'WATERS'] +5105-28240-0009-1633: ref=['SLIGHTLY', 'CHANGING', 'HER', 'COURSE', 'SHE', 'FIRST', 'STRUCK', 'HER', 'MAINSAIL', 'AND', 'IN', 'ORDER', 'TO', 'FACILITATE', 'THE', 'MOVEMENTS', 'OF', 'HER', 'HELMSMAN', 'SOON', 'CARRIED', 'NOTHING', 'BUT', 'HER', 'TWO', 'TOPSAILS', 'BRIGANTINE', 'AND', 'JIB'] +5105-28240-0009-1633: hyp=['SLIGHTLY', 'CHANGING', 'HER', 'COURSE', 'SHE', 'FIRST', 'STRUCK', 'HER', 'MAINSAIL', 'AND', 'IN', 'ORDER', 'TO', 'FACILITATE', 'THE', 'MOVEMENTS', 'OF', 'HER', 'HELMSMAN', 'SOON', 'CARRIED', 'NOTHING', 'BUT', 'HER', 'TWO', 'TOPSAILS', 'BRIGANTINE', 'AND', 'JIB'] +5105-28240-0010-1634: ref=['CAPTAIN', 'SERVADAC', 'HASTENED', 'TOWARDS', 'HIM'] +5105-28240-0010-1634: hyp=['CAPTAIN', 'SERVADAC', 'HASTENED', 'TOWARD', 'HIM'] +5105-28240-0011-1635: ref=['I', 'LEFT', 'YOU', 'ON', 'A', 'CONTINENT', 'AND', 'HERE', 'I', 'HAVE', 'THE', 'HONOR', 'OF', 'FINDING', 'YOU', 'ON', 'AN', 'ISLAND'] +5105-28240-0011-1635: hyp=['I', 'LEFT', 'YOU', 'ON', 'A', 'CONTINENT', 'AND', 'HERE', 'I', 'HAVE', 'THE', 'HONOR', 'OF', 'FINDING', 'YOU', 'ON', 'AN', 'ISLAND'] +5105-28240-0012-1636: ref=['NEVER', 'MIND', 'NOW', 'INTERPOSED', 'THE', 'CAPTAIN', 'WE', 'WILL', 'TALK', 'OF', 'THAT', 'BY', 'AND', 'BY'] +5105-28240-0012-1636: hyp=['NEVER', 'MIND', 'NOW', 'INTERPOSED', 'THE', 'CAPTAIN', 'WE', 'WILL', 'TALK', 'OF', 'THAT', 'BY', 'AND', 'BY'] +5105-28240-0013-1637: ref=['NOTHING', 'MORE', 'THAN', 'YOU', 'KNOW', 'YOURSELF'] +5105-28240-0013-1637: hyp=['NOTHING', 'MORE', 'THAN', 'YOU', 'KNOW', 'YOURSELF'] +5105-28240-0014-1638: ref=['ARE', 'YOU', 'CERTAIN', 'THAT', 'THIS', 'IS', 'THE', 'MEDITERRANEAN'] +5105-28240-0014-1638: hyp=['ARE', 'YOU', 'CERTAIN', 'THAT', 'THIS', 'IS', 'THE', 'MEDITERRANEAN'] +5105-28240-0015-1639: ref=['FOR', 'SOME', 'MOMENTS', 'HE', 'SEEMED', 'PERFECTLY', 'STUPEFIED', 'THEN', 'RECOVERING', 'HIMSELF', 'HE', 'BEGAN', 'TO', 'OVERWHELM', 'THE', 'COUNT', 'WITH', 'A', 'TORRENT', 'OF', 'QUESTIONS'] +5105-28240-0015-1639: hyp=['FOR', 'SOME', 'MOMENTS', 'HE', 'SEEMED', 'PERFECTLY', 'STUPEFIED', 'AND', 'THEN', 'RECOVERING', 'HIMSELF', 'HE', 'BEGAN', 'TO', 'OVERWHELM', 'THE', 'COUNT', 'WITH', 'A', 'TORRENT', 'OF', 'QUESTIONS'] +5105-28240-0016-1640: ref=['TO', 'ALL', 'THESE', 'INQUIRIES', 'THE', 'COUNT', 'RESPONDED', 'IN', 'THE', 'AFFIRMATIVE'] +5105-28240-0016-1640: hyp=['TO', 'ALL', 'THESE', 'INQUIRIES', 'THE', 'COUNT', 'RESPONDED', 'IN', 'THE', 'AFFIRMATIVE'] +5105-28240-0017-1641: ref=['SOME', 'MYSTERIOUS', 'FORCE', 'SEEMED', 'TO', 'HAVE', 'BROUGHT', 'ABOUT', 'A', 'CONVULSION', 'OF', 'THE', 'ELEMENTS'] +5105-28240-0017-1641: hyp=['SOME', 'MYSTERIOUS', 'FORCE', 'SEEMED', 'TO', 'HAVE', 'BROUGHT', 'ABOUT', 'A', 'CONVULSION', 'OF', 'THE', 'ELEMENTS'] +5105-28240-0018-1642: ref=['YOU', 'WILL', 'TAKE', 'ME', 'ON', 'BOARD', 'COUNT', 'WILL', 'YOU', 'NOT'] +5105-28240-0018-1642: hyp=['YOU', 'WILL', 'TAKE', 'ME', 'ON', 'BOARD', 'COUNT', 'WILL', 'YOU', 'NOT'] +5105-28240-0019-1643: ref=['MY', 'YACHT', 'IS', 'AT', 'YOUR', 'SERVICE', 'SIR', 'EVEN', 'SHOULD', 'YOU', 'REQUIRE', 'TO', 'MAKE', 'A', 'TOUR', 'ROUND', 'THE', 'WORLD'] +5105-28240-0019-1643: hyp=['MY', 'YACHT', 'IS', 'AT', 'YOUR', 'SERVICE', 'SIR', 'EVEN', 'SHOULD', 'YOU', 'REQUIRE', 'TO', 'MAKE', 'A', 'TOUR', 'AROUND', 'THE', 'WORLD'] +5105-28240-0020-1644: ref=['THE', 'COUNT', 'SHOOK', 'HIS', 'HEAD'] +5105-28240-0020-1644: hyp=['THE', 'COUNT', 'SHOOK', 'HIS', 'HEAD'] +5105-28240-0021-1645: ref=['BEFORE', 'STARTING', 'IT', 'WAS', 'INDISPENSABLE', 'THAT', 'THE', 'ENGINE', 'OF', 'THE', 'DOBRYNA', 'SHOULD', 'BE', 'REPAIRED', 'TO', 'SAIL', 'UNDER', 'CANVAS', 'ONLY', 'WOULD', 'IN', 'CONTRARY', 'WINDS', 'AND', 'ROUGH', 'SEAS', 'BE', 'BOTH', 'TEDIOUS', 'AND', 'DIFFICULT'] +5105-28240-0021-1645: hyp=['BEFORE', 'STARTING', 'IT', 'WAS', 'INDISPENSABLE', 'THAT', 'THE', 'ENGINE', 'OF', 'THE', 'DOBRINA', 'SHOULD', 'BE', 'REPAIRED', 'TO', 'SAIL', 'UNDER', 'CANVAS', 'ONLY', 'WOULD', 'IN', 'CONTRARY', 'WINDS', 'AND', 'ROUGH', 'SEAS', 'BE', 'BOTH', 'TEDIOUS', 'AND', 'DIFFICULT'] +5105-28240-0022-1646: ref=['IT', 'WAS', 'ON', 'THE', 'LAST', 'DAY', 'OF', 'JANUARY', 'THAT', 'THE', 'REPAIRS', 'OF', 'THE', 'SCHOONER', 'WERE', 'COMPLETED'] +5105-28240-0022-1646: hyp=['IT', 'WAS', 'ON', 'THE', 'LAST', 'DAY', 'OF', 'JANUARY', 'THAT', 'THE', 'REPAIRS', 'OF', 'THE', 'SCHOONER', 'WERE', 'COMPLETED'] +5105-28240-0023-1647: ref=['A', 'SLIGHT', 'DIMINUTION', 'IN', 'THE', 'EXCESSIVELY', 'HIGH', 'TEMPERATURE', 'WHICH', 'HAD', 'PREVAILED', 'FOR', 'THE', 'LAST', 'FEW', 'WEEKS', 'WAS', 'THE', 'ONLY', 'APPARENT', 'CHANGE', 'IN', 'THE', 'GENERAL', 'ORDER', 'OF', 'THINGS', 'BUT', 'WHETHER', 'THIS', 'WAS', 'TO', 'BE', 'ATTRIBUTED', 'TO', 'ANY', 'ALTERATION', 'IN', 'THE', "EARTH'S", 'ORBIT', 'WAS', 'A', 'QUESTION', 'WHICH', 'WOULD', 'STILL', 'REQUIRE', 'SEVERAL', 'DAYS', 'TO', 'DECIDE'] +5105-28240-0023-1647: hyp=['A', 'SLIGHT', 'DIMINUTION', 'IN', 'THE', 'EXCESSIVELY', 'HIGH', 'TEMPERATURE', 'WHICH', 'HAD', 'PREVAILED', 'FOR', 'THE', 'LAST', 'FEW', 'WEEKS', 'WAS', 'THE', 'ONLY', 'APPARENT', 'CHANGE', 'IN', 'THE', 'GENERAL', 'ORDER', 'OF', 'THINGS', 'BUT', 'WHETHER', 'THIS', 'WAS', 'TO', 'BE', 'ATTRIBUTED', 'TO', 'ANY', 'ALTERATION', 'IN', 'THE', "EARTH'S", 'ORBIT', 'WAS', 'A', 'QUESTION', 'WHICH', 'WOULD', 'STILL', 'REQUIRE', 'SEVERAL', 'DAYS', 'TO', 'DECIDE'] +5105-28240-0024-1648: ref=['DOUBTS', 'NOW', 'AROSE', 'AND', 'SOME', 'DISCUSSION', 'FOLLOWED', 'WHETHER', 'OR', 'NOT', 'IT', 'WAS', 'DESIRABLE', 'FOR', 'BEN', 'ZOOF', 'TO', 'ACCOMPANY', 'HIS', 'MASTER'] +5105-28240-0024-1648: hyp=['DOUBTS', 'NOW', 'AROSE', 'AND', 'SOME', 'DISCUSSION', 'FOLLOWED', 'WHETHER', 'OR', 'NOT', 'IT', 'WAS', 'DESIRABLE', 'FOR', 'BEN', 'ZOOF', 'TO', 'ACCOMPANY', 'HIS', 'MASTER'] +5105-28241-0000-1604: ref=['HER', 'SEA', 'GOING', 'QUALITIES', 'WERE', 'EXCELLENT', 'AND', 'WOULD', 'HAVE', 'AMPLY', 'SUFFICED', 'FOR', 'A', 'CIRCUMNAVIGATION', 'OF', 'THE', 'GLOBE'] +5105-28241-0000-1604: hyp=['HER', 'SEA', 'GOING', 'QUALITIES', 'WERE', 'EXCELLENT', 'AND', 'WOULD', 'HAVE', 'AMPLY', 'SUFFICED', 'FOR', 'A', 'CIRCUMNAVIGATION', 'OF', 'THE', 'GLOBE'] +5105-28241-0001-1605: ref=['AFTER', 'AN', 'APPRENTICESHIP', 'ON', 'A', 'MERCHANT', 'SHIP', 'HE', 'HAD', 'ENTERED', 'THE', 'IMPERIAL', 'NAVY', 'AND', 'HAD', 'ALREADY', 'REACHED', 'THE', 'RANK', 'OF', 'LIEUTENANT', 'WHEN', 'THE', 'COUNT', 'APPOINTED', 'HIM', 'TO', 'THE', 'CHARGE', 'OF', 'HIS', 'OWN', 'PRIVATE', 'YACHT', 'IN', 'WHICH', 'HE', 'WAS', 'ACCUSTOMED', 'TO', 'SPEND', 'BY', 'FAR', 'THE', 'GREATER', 'PART', 'OF', 'HIS', 'TIME', 'THROUGHOUT', 'THE', 'WINTER', 'GENERALLY', 'CRUISING', 'IN', 'THE', 'MEDITERRANEAN', 'WHILST', 'IN', 'THE', 'SUMMER', 'HE', 'VISITED', 'MORE', 'NORTHERN', 'WATERS'] +5105-28241-0001-1605: hyp=['AFTER', 'AN', 'APPRENTICESHIP', 'ON', 'A', 'MERCHANT', 'SHIP', 'HE', 'HAD', 'ENTERED', 'THE', 'IMPERIAL', 'NAVY', 'AND', 'HAD', 'ALREADY', 'REACHED', 'THE', 'RANK', 'OF', 'LIEUTENANT', 'WHEN', 'THE', 'COUNT', 'APPOINTED', 'HIM', 'TO', 'THE', 'CHARGE', 'OF', 'HIS', 'OWN', 'PRIVATE', 'YACHT', 'IN', 'WHICH', 'HE', 'WAS', 'ACCUSTOMED', 'TO', 'SPEND', 'BY', 'FAR', 'THE', 'GREATER', 'PART', 'OF', 'HIS', 'TIME', 'THROUGHOUT', 'THE', 'WINTER', 'GENERALLY', 'CRUISING', 'IN', 'THE', 'MEDITERRANEAN', 'WHILST', 'IN', 'THE', 'SUMMER', 'HE', 'VISITED', 'MORE', 'NORTHERN', 'WATERS'] +5105-28241-0002-1606: ref=['THE', 'LATE', 'ASTOUNDING', 'EVENTS', 'HOWEVER', 'HAD', 'RENDERED', 'PROCOPE', 'MANIFESTLY', 'UNEASY', 'AND', 'NOT', 'THE', 'LESS', 'SO', 'FROM', 'HIS', 'CONSCIOUSNESS', 'THAT', 'THE', 'COUNT', 'SECRETLY', 'PARTOOK', 'OF', 'HIS', 'OWN', 'ANXIETY'] +5105-28241-0002-1606: hyp=['THE', 'LATE', 'ASTOUNDING', 'EVENTS', 'HOWEVER', 'HAD', 'RENDERED', 'PROCOPE', 'MANIFESTLY', 'UNEASY', 'AND', 'NOT', 'THE', 'LESS', 'SO', 'FROM', 'HIS', 'CONSCIOUSNESS', 'THAT', 'THE', 'COUNT', 'SECRETLY', 'PARTOOK', 'OF', 'HIS', 'OWN', 'ANXIETY'] +5105-28241-0003-1607: ref=['STEAM', 'UP', 'AND', 'CANVAS', 'SPREAD', 'THE', 'SCHOONER', 'STARTED', 'EASTWARDS'] +5105-28241-0003-1607: hyp=['STEAM', 'UP', 'AND', 'CANVAS', 'SPREAD', 'THE', 'SCHOONER', 'STARTED', 'EASTWARDS'] +5105-28241-0004-1608: ref=['ALTHOUGH', 'ONLY', 'A', 'MODERATE', 'BREEZE', 'WAS', 'BLOWING', 'THE', 'SEA', 'WAS', 'ROUGH', 'A', 'CIRCUMSTANCE', 'TO', 'BE', 'ACCOUNTED', 'FOR', 'ONLY', 'BY', 'THE', 'DIMINUTION', 'IN', 'THE', 'FORCE', 'OF', 'THE', "EARTH'S", 'ATTRACTION', 'RENDERING', 'THE', 'LIQUID', 'PARTICLES', 'SO', 'BUOYANT', 'THAT', 'BY', 'THE', 'MERE', 'EFFECT', 'OF', 'OSCILLATION', 'THEY', 'WERE', 'CARRIED', 'TO', 'A', 'HEIGHT', 'THAT', 'WAS', 'QUITE', 'UNPRECEDENTED'] +5105-28241-0004-1608: hyp=['ALTHOUGH', 'ONLY', 'A', 'MODERATE', 'BREEZE', 'WAS', 'BLOWING', 'THE', 'SEA', 'WAS', 'ROUGH', 'A', 'CIRCUMSTANCE', 'TO', 'BE', 'ACCOUNTED', 'FOR', 'ONLY', 'BY', 'THE', 'DIMINUTION', 'IN', 'THE', 'FORCE', 'OF', 'THE', "EARTH'S", 'ATTRACTION', 'RENDERING', 'THE', 'LIQUID', 'PARTICLE', 'SO', 'BUOYANT', 'THAT', 'BY', 'THE', 'MERE', 'EFFECT', 'OF', 'OSCILLATION', 'THEY', 'WERE', 'CARRIED', 'TO', 'A', 'HEIGHT', 'THAT', 'WAS', 'QUITE', 'UNPRECEDENTED'] +5105-28241-0005-1609: ref=['FOR', 'A', 'FEW', 'MILES', 'SHE', 'FOLLOWED', 'THE', 'LINE', 'HITHERTO', 'PRESUMABLY', 'OCCUPIED', 'BY', 'THE', 'COAST', 'OF', 'ALGERIA', 'BUT', 'NO', 'LAND', 'APPEARED', 'TO', 'THE', 'SOUTH'] +5105-28241-0005-1609: hyp=['FOR', 'A', 'FEW', 'MILES', 'SHE', 'FOLLOWED', 'THE', 'LINE', 'HITHERTO', 'PRESUMABLY', 'OCCUPIED', 'BY', 'THE', 'COAST', 'OF', 'ALGERIA', 'BUT', 'NO', 'LAND', 'APPEARED', 'TO', 'THE', 'SOUTH'] +5105-28241-0006-1610: ref=['THE', 'LOG', 'AND', 'THE', 'COMPASS', 'THEREFORE', 'WERE', 'ABLE', 'TO', 'BE', 'CALLED', 'UPON', 'TO', 'DO', 'THE', 'WORK', 'OF', 'THE', 'SEXTANT', 'WHICH', 'HAD', 'BECOME', 'UTTERLY', 'USELESS'] +5105-28241-0006-1610: hyp=['THE', 'LOG', 'AND', 'THE', 'COMPASS', 'THEREFORE', 'WERE', 'ABLE', 'TO', 'BE', 'CALLED', 'UPON', 'TO', 'DO', 'THE', 'WORK', 'OF', 'THE', 'SEXTANT', 'WHICH', 'HAD', 'BECOME', 'UTTERLY', 'USELESS'] +5105-28241-0007-1611: ref=['THERE', 'IS', 'NO', 'FEAR', 'OF', 'THAT', 'SIR'] +5105-28241-0007-1611: hyp=["THERE'S", 'NO', 'FEAR', 'OF', 'THAT', 'SIR'] +5105-28241-0008-1612: ref=['THE', 'EARTH', 'HAS', 'UNDOUBTEDLY', 'ENTERED', 'UPON', 'A', 'NEW', 'ORBIT', 'BUT', 'SHE', 'IS', 'NOT', 'INCURRING', 'ANY', 'PROBABLE', 'RISK', 'OF', 'BEING', 'PRECIPITATED', 'ONTO', 'THE', 'SUN'] +5105-28241-0008-1612: hyp=['AT', 'THE', 'EARTH', 'HAS', 'UNDOUBTEDLY', 'ENTERED', 'UPON', 'A', 'NEW', 'ORBIT', 'BUT', 'SHE', 'IS', 'NOT', 'INCURRING', 'ANY', 'PROBABLE', 'RISK', 'OF', 'BEING', 'PRECIPITATED', 'ON', 'TO', 'THE', 'SUN'] +5105-28241-0009-1613: ref=['AND', 'WHAT', 'DEMONSTRATION', 'DO', 'YOU', 'OFFER', 'ASKED', 'SERVADAC', 'EAGERLY', 'THAT', 'IT', 'WILL', 'NOT', 'HAPPEN'] +5105-28241-0009-1613: hyp=['AND', 'WHAT', 'DEMONSTRATION', 'DO', 'YOU', 'OFFER', 'ASKED', 'SERVADAC', 'EAGERLY', 'THAT', 'IT', 'WILL', 'NOT', 'HAPPEN'] +5105-28241-0010-1614: ref=['OCEAN', 'REIGNED', 'SUPREME'] +5105-28241-0010-1614: hyp=['OCEAN', 'RAINED', 'SUPREME'] +5105-28241-0011-1615: ref=['ALL', 'THE', 'IMAGES', 'OF', 'HIS', 'PAST', 'LIFE', 'FLOATED', 'UPON', 'HIS', 'MEMORY', 'HIS', 'THOUGHTS', 'SPED', 'AWAY', 'TO', 'HIS', 'NATIVE', 'FRANCE', 'ONLY', 'TO', 'RETURN', 'AGAIN', 'TO', 'WONDER', 'WHETHER', 'THE', 'DEPTHS', 'OF', 'OCEAN', 'WOULD', 'REVEAL', 'ANY', 'TRACES', 'OF', 'THE', 'ALGERIAN', 'METROPOLIS'] +5105-28241-0011-1615: hyp=['ALL', 'THE', 'IMAGES', 'OF', 'HIS', 'PAST', 'LIFE', 'FLOATED', 'UPON', 'HIS', 'MEMORY', 'HIS', 'THOUGHTS', 'SPED', 'AWAY', 'TO', 'HIS', 'NATIVE', 'FRANCE', 'ONLY', 'TO', 'RETURN', 'AGAIN', 'TO', 'WONDER', 'WHETHER', 'THE', 'DEPTHS', 'OF', 'OCEAN', 'WOULD', 'REVEAL', 'ANY', 'TRACES', 'OF', 'THE', 'ALGERIAN', 'METROPOLIS'] +5105-28241-0012-1616: ref=['IS', 'IT', 'NOT', 'IMPOSSIBLE', 'HE', 'MURMURED', 'ALOUD', 'THAT', 'ANY', 'CITY', 'SHOULD', 'DISAPPEAR', 'SO', 'COMPLETELY'] +5105-28241-0012-1616: hyp=['IS', 'IT', 'NOT', 'IMPOSSIBLE', 'HE', 'MURMURED', 'ALOUD', 'THAT', 'ANY', 'CITY', 'SHOULD', 'DISAPPEAR', 'SO', 'COMPLETELY'] +5105-28241-0013-1617: ref=['WOULD', 'NOT', 'THE', 'LOFTIEST', 'EMINENCES', 'OF', 'THE', 'CITY', 'AT', 'LEAST', 'BE', 'VISIBLE'] +5105-28241-0013-1617: hyp=['WOULD', 'NOT', 'THE', 'LOFTIEST', 'EMINENCES', 'OF', 'THE', 'CITY', 'AT', 'LEAST', 'BE', 'VISIBLE'] +5105-28241-0014-1618: ref=['ANOTHER', 'CIRCUMSTANCE', 'WAS', 'MOST', 'REMARKABLE'] +5105-28241-0014-1618: hyp=['ANOTHER', 'CIRCUMSTANCE', 'WAS', 'MOST', 'REMARKABLE'] +5105-28241-0015-1619: ref=['TO', 'THE', 'SURPRISE', 'OF', 'ALL', 'AND', 'ESPECIALLY', 'OF', 'LIEUTENANT', 'PROCOPE', 'THE', 'LINE', 'INDICATED', 'A', 'BOTTOM', 'AT', 'A', 'NEARLY', 'UNIFORM', 'DEPTH', 'OF', 'FROM', 'FOUR', 'TO', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'THE', 'SOUNDING', 'WAS', 'PERSEVERED', 'WITH', 'CONTINUOUSLY', 'FOR', 'MORE', 'THAN', 'TWO', 'HOURS', 'OVER', 'A', 'CONSIDERABLE', 'AREA', 'THE', 'DIFFERENCES', 'OF', 'LEVEL', 'WERE', 'INSIGNIFICANT', 'NOT', 'CORRESPONDING', 'IN', 'ANY', 'DEGREE', 'TO', 'WHAT', 'WOULD', 'BE', 'EXPECTED', 'OVER', 'THE', 'SITE', 'OF', 'A', 'CITY', 'THAT', 'HAD', 'BEEN', 'TERRACED', 'LIKE', 'THE', 'SEATS', 'OF', 'AN', 'AMPHITHEATER'] +5105-28241-0015-1619: hyp=['TO', 'THE', 'SURPRISE', 'OF', 'ALL', 'AND', 'ESPECIALLY', 'OF', 'LIEUTENANT', 'PROCOPE', 'THE', 'LINE', 'INDICATED', 'A', 'BOTTOM', 'AT', 'A', 'NEARLY', 'UNIFORM', 'DEPTH', 'OF', 'FROM', 'FOUR', 'TO', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'THE', 'SOUNDING', 'WAS', 'PERSEVERED', 'WITH', 'CONTINUOUSLY', 'FOR', 'MORE', 'THAN', 'TWO', 'HOURS', 'OVER', 'A', 'CONSIDERABLE', 'AREA', 'THE', 'DIFFERENCES', 'OF', 'LEVEL', 'WERE', 'INSIGNIFICANT', 'NOT', 'CORRESPONDING', 'IN', 'ANY', 'DEGREE', 'TO', 'WHAT', 'WOULD', 'BE', 'EXPECTED', 'OVER', 'THE', 'SITE', 'OF', 'A', 'CITY', 'THAT', 'HAD', 'BEEN', 'TERRACED', 'LIKE', 'THE', 'SEATS', 'OF', 'AN', 'AMPHITHEATRE'] +5105-28241-0016-1620: ref=['YOU', 'MUST', 'SEE', 'LIEUTENANT', 'I', 'SHOULD', 'THINK', 'THAT', 'WE', 'ARE', 'NOT', 'SO', 'NEAR', 'THE', 'COAST', 'OF', 'ALGERIA', 'AS', 'YOU', 'IMAGINED'] +5105-28241-0016-1620: hyp=['YOU', 'MUST', 'SEE', 'LIEUTENANT', 'I', 'SHOULD', 'THINK', 'THAT', 'WE', 'ARE', 'NOT', 'SO', 'NEAR', 'THE', 'COAST', 'OF', 'ALGERIA', 'AS', 'YOU', 'IMAGINED'] +5105-28241-0017-1621: ref=['AFTER', 'PONDERING', 'AWHILE', 'HE', 'SAID', 'IF', 'WE', 'WERE', 'FARTHER', 'AWAY', 'I', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'A', 'DEPTH', 'OF', 'TWO', 'OR', 'THREE', 'HUNDRED', 'FATHOMS', 'INSTEAD', 'OF', 'FIVE', 'FATHOMS', 'FIVE', 'FATHOMS'] +5105-28241-0017-1621: hyp=['AFTER', 'PONDERING', 'A', 'WHILE', 'HE', 'SAID', 'IF', 'WE', 'WERE', 'FARTHER', 'AWAY', 'I', 'SHOULD', 'EXPECT', 'TO', 'FIND', 'A', 'DEPTH', 'OF', 'TWO', 'OR', 'THREE', 'HUNDRED', 'FATHOMS', 'INSTEAD', 'OF', 'FIVE', 'FATHOMS', 'FIVE', 'FATHOMS'] +5105-28241-0018-1622: ref=['ITS', 'DEPTH', 'REMAINED', 'INVARIABLE', 'STILL', 'FOUR', 'OR', 'AT', 'MOST', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'ITS', 'BOTTOM', 'WAS', 'ASSIDUOUSLY', 'DREDGED', 'IT', 'WAS', 'ONLY', 'TO', 'PROVE', 'IT', 'BARREN', 'OF', 'MARINE', 'PRODUCTION', 'OF', 'ANY', 'TYPE'] +5105-28241-0018-1622: hyp=['ITS', 'DEPTH', 'REMAINED', 'INVARIABLE', 'STILL', 'FOUR', 'OR', 'AT', 'MOST', 'FIVE', 'FATHOMS', 'AND', 'ALTHOUGH', 'ITS', 'BOTTOM', 'WAS', 'ASSIDUOUSLY', 'DREDGED', 'IT', 'WAS', 'ONLY', 'TO', 'PROVE', 'IT', 'BARREN', 'OF', 'MARINE', 'PRODUCTION', 'OF', 'ANY', 'TYPE'] +5105-28241-0019-1623: ref=['NOTHING', 'WAS', 'TO', 'BE', 'DONE', 'BUT', 'TO', 'PUT', 'ABOUT', 'AND', 'RETURN', 'IN', 'DISAPPOINTMENT', 'TOWARDS', 'THE', 'NORTH'] +5105-28241-0019-1623: hyp=['NOTHING', 'WAS', 'TO', 'BE', 'DONE', 'BUT', 'TO', 'PUT', 'ABOUT', 'AND', 'RETURN', 'AND', 'DISAPPOINTMENT', 'TOWARD', 'THE', 'NORTH'] +5142-33396-0000-898: ref=['AT', 'ANOTHER', 'TIME', 'HARALD', 'ASKED'] +5142-33396-0000-898: hyp=['AT', 'ANOTHER', 'TIME', 'HAROLD', 'ASKED'] +5142-33396-0001-899: ref=['WHAT', 'IS', 'YOUR', 'COUNTRY', 'OLAF', 'HAVE', 'YOU', 'ALWAYS', 'BEEN', 'A', 'THRALL', 'THE', "THRALL'S", 'EYES', 'FLASHED'] +5142-33396-0001-899: hyp=['WHAT', 'IS', 'YOUR', 'COUNTRY', 'OLAF', 'HAVE', 'YOU', 'ALWAYS', 'BEEN', 'A', 'THRALL', 'THE', "THRALL'S", 'EYES', 'FLASHED'] +5142-33396-0002-900: ref=['TWO', 'HUNDRED', 'WARRIORS', 'FEASTED', 'IN', 'HIS', 'HALL', 'AND', 'FOLLOWED', 'HIM', 'TO', 'BATTLE'] +5142-33396-0002-900: hyp=['TWO', 'HUNDRED', 'WARRIORS', 'FEASTED', 'IN', 'HIS', 'HALL', 'AND', 'FOLLOWED', 'HIM', 'TO', 'BATTLE'] +5142-33396-0003-901: ref=['THE', 'REST', 'OF', 'YOU', 'OFF', 'A', 'VIKING', 'HE', 'HAD', 'THREE', 'SHIPS'] +5142-33396-0003-901: hyp=['THE', 'REST', 'OF', 'YOU', 'OFF', 'A', 'VIKING', 'HE', 'HAD', 'THREE', 'SHIPS'] +5142-33396-0004-902: ref=['THESE', 'HE', 'GAVE', 'TO', 'THREE', 'OF', 'MY', 'BROTHERS'] +5142-33396-0004-902: hyp=['THESE', 'HE', 'GAVE', 'TO', 'THREE', 'OF', 'MY', 'BROTHERS'] +5142-33396-0005-903: ref=['BUT', 'I', 'STAYED', 'THAT', 'SPRING', 'AND', 'BUILT', 'ME', 'A', 'BOAT'] +5142-33396-0005-903: hyp=['BUT', 'I', 'STAYED', 'THAT', 'SPRING', 'AND', 'BUILT', 'ME', 'A', 'BOAT'] +5142-33396-0006-904: ref=['I', 'MADE', 'HER', 'FOR', 'ONLY', 'TWENTY', 'OARS', 'BECAUSE', 'I', 'THOUGHT', 'FEW', 'MEN', 'WOULD', 'FOLLOW', 'ME', 'FOR', 'I', 'WAS', 'YOUNG', 'FIFTEEN', 'YEARS', 'OLD'] +5142-33396-0006-904: hyp=['I', 'MADE', 'HER', 'ONLY', 'TWENTY', 'WARS', 'BECAUSE', 'I', 'THOUGHT', 'FEW', 'MEN', 'WOULD', 'FOLLOW', 'ME', 'FOR', 'I', 'WAS', 'YOUNG', 'FIFTEEN', 'YEARS', 'OLD'] +5142-33396-0007-905: ref=['AT', 'THE', 'PROW', 'I', 'CARVED', 'THE', 'HEAD', 'WITH', 'OPEN', 'MOUTH', 'AND', 'FORKED', 'TONGUE', 'THRUST', 'OUT'] +5142-33396-0007-905: hyp=['AT', 'THE', 'PROW', 'I', 'CARVED', 'THE', 'HEAD', 'WITH', 'OPEN', 'MOUTH', 'AND', 'FORKED', 'TONGUE', 'THRUST', 'OUT'] +5142-33396-0008-906: ref=['I', 'PAINTED', 'THE', 'EYES', 'RED', 'FOR', 'ANGER'] +5142-33396-0008-906: hyp=['I', 'PAINTED', 'THE', 'EYES', 'RED', 'FOR', 'ANGER'] +5142-33396-0009-907: ref=['THERE', 'STAND', 'SO', 'I', 'SAID', 'AND', 'GLARE', 'AND', 'HISS', 'AT', 'MY', 'FOES'] +5142-33396-0009-907: hyp=['THERE', 'STAND', 'SO', 'I', 'SAID', 'AND', 'GLARE', 'AND', 'HISS', 'AT', 'MY', 'FOES'] +5142-33396-0010-908: ref=['IN', 'THE', 'STERN', 'I', 'CURVED', 'THE', 'TAIL', 'UP', 'ALMOST', 'AS', 'HIGH', 'AS', 'THE', 'HEAD'] +5142-33396-0010-908: hyp=['IN', 'A', 'STERN', 'I', 'CARVED', 'THE', 'TAIL', 'UP', 'ALMOST', 'AS', 'HIGH', 'AS', 'THE', 'HEAD'] +5142-33396-0011-909: ref=['THERE', 'SHE', 'SAT', 'ON', 'THE', 'ROLLERS', 'AS', 'FAIR', 'A', 'SHIP', 'AS', 'I', 'EVER', 'SAW'] +5142-33396-0011-909: hyp=['THERE', 'SHE', 'SAT', 'ON', 'THE', 'ROLLERS', 'AS', 'FAIR', 'A', 'SHIP', 'AS', 'I', 'EVER', 'SAW'] +5142-33396-0012-910: ref=['THEN', 'I', 'WILL', 'GET', 'ME', 'A', 'FARM', 'AND', 'WILL', 'WINTER', 'IN', 'THAT', 'LAND', 'NOW', 'WHO', 'WILL', 'FOLLOW', 'ME'] +5142-33396-0012-910: hyp=['THEN', 'I', 'WILL', 'GET', 'ME', 'A', 'FARM', 'AND', 'WILL', 'WIN', 'HER', 'IN', 'THAT', 'LAND', 'NOW', 'WHO', 'WILL', 'FOLLOW', 'ME'] +5142-33396-0013-911: ref=['HE', 'IS', 'BUT', 'A', 'BOY', 'THE', 'MEN', 'SAID'] +5142-33396-0013-911: hyp=['HE', 'IS', 'BUT', 'A', 'BOY', 'THE', 'MAN', 'SAID'] +5142-33396-0014-912: ref=['THIRTY', 'MEN', 'ONE', 'AFTER', 'ANOTHER', 'RAISED', 'THEIR', 'HORNS', 'AND', 'SAID'] +5142-33396-0014-912: hyp=['THIRTY', 'MEN', 'ONE', 'AFTER', 'ANOTHER', 'RAISED', 'THEIR', 'HORNS', 'AND', 'SAID'] +5142-33396-0015-913: ref=['AS', 'OUR', 'BOAT', 'FLASHED', 'DOWN', 'THE', 'ROLLERS', 'INTO', 'THE', 'WATER', 'I', 'MADE', 'THIS', 'SONG', 'AND', 'SANG', 'IT'] +5142-33396-0015-913: hyp=['AS', 'OUR', 'BOAT', 'FLASHED', 'DOWN', 'THE', 'ROLLERS', 'INTO', 'THE', 'WATER', 'I', 'MADE', 'THIS', 'SONG', 'AND', 'SANG', 'IT'] +5142-33396-0016-914: ref=['SO', 'WE', 'HARRIED', 'THE', 'COAST', 'OF', 'NORWAY'] +5142-33396-0016-914: hyp=['SO', 'WE', 'HURRIED', 'THE', 'COAST', 'OF', 'NORWAY'] +5142-33396-0017-915: ref=['WE', 'ATE', 'AT', 'MANY', "MEN'S", 'TABLES', 'UNINVITED'] +5142-33396-0017-915: hyp=['WE', 'ATE', 'IT', 'MANY', "MEN'S", 'TABLES', 'UNINVITED'] +5142-33396-0018-916: ref=['MY', "DRAGON'S", 'BELLY', 'IS', 'NEVER', 'FULL', 'AND', 'ON', 'BOARD', 'WENT', 'THE', 'GOLD'] +5142-33396-0018-916: hyp=['I', "DRAGON'S", 'BELLY', 'IS', 'NEVER', 'FULL', 'AND', 'ON', 'BOARD', 'WENT', 'THE', 'GOLD'] +5142-33396-0019-917: ref=['OH', 'IT', 'IS', 'BETTER', 'TO', 'LIVE', 'ON', 'THE', 'SEA', 'AND', 'LET', 'OTHER', 'MEN', 'RAISE', 'YOUR', 'CROPS', 'AND', 'COOK', 'YOUR', 'MEALS'] +5142-33396-0019-917: hyp=['OH', 'IT', 'IS', 'BETTER', 'TO', 'LIVE', 'ON', 'THE', 'SEA', 'AND', 'LET', 'OTHER', 'MEN', 'RAISE', 'YOUR', 'CROPS', 'AND', 'COOK', 'YOUR', 'MEALS'] +5142-33396-0020-918: ref=['A', 'HOUSE', 'SMELLS', 'OF', 'SMOKE', 'A', 'SHIP', 'SMELLS', 'OF', 'FROLIC'] +5142-33396-0020-918: hyp=['A', 'HOUSE', 'SMELLS', 'OF', 'SMOKE', 'A', "SHIP'S", 'MILLS', 'OF', 'FROLIC'] +5142-33396-0021-919: ref=['UP', 'AND', 'DOWN', 'THE', 'WATER', 'WE', 'WENT', 'TO', 'GET', 'MUCH', 'WEALTH', 'AND', 'MUCH', 'FROLIC'] +5142-33396-0021-919: hyp=['UP', 'AND', 'DOWN', 'THE', 'WATER', 'WE', 'WENT', 'TO', 'GET', 'MUCH', 'WEALTH', 'AND', 'MUCH', 'FROLIC'] +5142-33396-0022-920: ref=['WHAT', 'OF', 'THE', 'FARM', 'OLAF', 'NOT', 'YET', 'I', 'ANSWERED', 'VIKING', 'IS', 'BETTER', 'FOR', 'SUMMER'] +5142-33396-0022-920: hyp=['WHAT', 'OF', 'THE', 'FARM', 'ALL', 'OFF', 'NOT', 'YET', 'I', 'ANSWERED', 'VIKING', 'IS', 'BETTER', 'FOR', 'SUMMER'] +5142-33396-0023-921: ref=['IT', 'WAS', 'SO', 'DARK', 'THAT', 'I', 'COULD', 'SEE', 'NOTHING', 'BUT', 'A', 'FEW', 'SPARKS', 'ON', 'THE', 'HEARTH'] +5142-33396-0023-921: hyp=['IT', 'WAS', 'SO', 'DARK', 'THAT', 'I', 'COULD', 'SEE', 'NOTHING', 'BUT', 'A', 'FEW', 'SPARKS', 'ON', 'THE', 'HEARTH'] +5142-33396-0024-922: ref=['I', 'STOOD', 'WITH', 'MY', 'BACK', 'TO', 'THE', 'WALL', 'FOR', 'I', 'WANTED', 'NO', 'SWORD', 'REACHING', 'OUT', 'OF', 'THE', 'DARK', 'FOR', 'ME'] +5142-33396-0024-922: hyp=['I', 'STOOD', 'WITH', 'MY', 'BACK', 'TO', 'THE', 'WALL', 'FOR', 'I', 'WANTED', 'NO', 'SWORD', 'REACHING', 'OUT', 'OF', 'THE', 'DARK', 'FOR', 'ME'] +5142-33396-0025-923: ref=['COME', 'COME', 'I', 'CALLED', 'WHEN', 'NO', 'ONE', 'OBEYED', 'A', 'FIRE'] +5142-33396-0025-923: hyp=['COME', 'COME', 'I', 'CALLED', 'WHEN', 'NO', 'ONE', 'OBEYED', 'A', 'FIRE'] +5142-33396-0026-924: ref=['MY', 'MEN', 'LAUGHED', 'YES', 'A', 'STINGY', 'HOST'] +5142-33396-0026-924: hyp=['MY', 'MEN', 'LAUGHED', 'YES', 'A', 'STINGY', 'HOST'] +5142-33396-0027-925: ref=['HE', 'ACTS', 'AS', 'THOUGH', 'HE', 'HAD', 'NOT', 'EXPECTED', 'US'] +5142-33396-0027-925: hyp=['HE', 'ACTS', 'AS', 'THOUGH', 'HE', 'IS', 'NOT', 'EXPECTED', 'US'] +5142-33396-0028-926: ref=['ON', 'A', 'BENCH', 'IN', 'A', 'FAR', 'CORNER', 'WERE', 'A', 'DOZEN', 'PEOPLE', 'HUDDLED', 'TOGETHER'] +5142-33396-0028-926: hyp=['ON', 'A', 'BENCH', 'IN', 'A', 'FAR', 'CORNER', 'WERE', 'A', 'DOZEN', 'PEOPLE', 'HUDDLED', 'TOGETHER'] +5142-33396-0029-927: ref=['BRING', 'IN', 'THE', 'TABLE', 'WE', 'ARE', 'HUNGRY'] +5142-33396-0029-927: hyp=['BRING', 'IN', 'THE', 'TABLE', 'WE', 'ARE', 'HUNGRY'] +5142-33396-0030-928: ref=['THE', 'THRALLS', 'WERE', 'BRINGING', 'IN', 'A', 'GREAT', 'POT', 'OF', 'MEAT'] +5142-33396-0030-928: hyp=['THE', 'THRALLS', 'WERE', 'RINGING', 'IN', 'A', 'GREAT', 'POT', 'OF', 'MEAT'] +5142-33396-0031-929: ref=['THEY', 'SET', 'UP', 'A', 'CRANE', 'OVER', 'THE', 'FIRE', 'AND', 'HUNG', 'THE', 'POT', 'UPON', 'IT', 'AND', 'WE', 'SAT', 'AND', 'WATCHED', 'IT', 'BOIL', 'WHILE', 'WE', 'JOKED', 'AT', 'LAST', 'THE', 'SUPPER', 'BEGAN'] +5142-33396-0031-929: hyp=['THEY', 'SET', 'UP', 'A', 'CRANE', 'OVER', 'THE', 'FIRE', 'AND', 'HUNG', 'THE', 'POT', 'UPON', 'IT', 'AND', 'WE', 'SAT', 'AND', 'WATCHED', 'IT', 'BOIL', 'WHILE', 'WE', 'JOKED', 'AT', 'LAST', 'THE', 'SUPPER', 'BEGAN'] +5142-33396-0032-930: ref=['THE', 'FARMER', 'SAT', 'GLOOMILY', 'ON', 'THE', 'BENCH', 'AND', 'WOULD', 'NOT', 'EAT', 'AND', 'YOU', 'CANNOT', 'WONDER', 'FOR', 'HE', 'SAW', 'US', 'PUTTING', 'POTFULS', 'OF', 'HIS', 'GOOD', 'BEEF', 'AND', 'BASKET', 'LOADS', 'OF', 'BREAD', 'INTO', 'OUR', 'BIG', 'MOUTHS'] +5142-33396-0032-930: hyp=['THE', 'FARMER', 'SAT', 'GLOOMILY', 'ON', 'THE', 'BENCH', 'AND', 'WOULD', 'NOT', 'EAT', 'AND', 'YOU', 'CANNOT', 'WONDER', 'FOR', 'HE', 'SAW', 'US', 'PUTTING', 'POTFULS', 'OF', 'HIS', 'GOOD', 'BEEF', 'AND', 'BASKEY', 'LOADS', 'OF', 'BREAD', 'AND', 'OUR', 'BIG', 'MOUTHS'] +5142-33396-0033-931: ref=['YOU', 'WOULD', 'NOT', 'EAT', 'WITH', 'US', 'YOU', 'CANNOT', 'SAY', 'NO', 'TO', 'HALF', 'OF', 'MY', 'ALE', 'I', 'DRINK', 'THIS', 'TO', 'YOUR', 'HEALTH'] +5142-33396-0033-931: hyp=['YOU', 'WOULD', 'NOT', 'EAT', 'WITH', 'US', 'YOU', 'CANNOT', 'SAY', 'NO', 'TO', 'HALF', 'OF', 'MY', 'ALE', 'I', 'DRINK', 'THIS', 'TO', 'YOUR', 'HEALTH'] +5142-33396-0034-932: ref=['THEN', 'I', 'DRANK', 'HALF', 'OF', 'THE', 'HORNFUL', 'AND', 'SENT', 'THE', 'REST', 'ACROSS', 'THE', 'FIRE', 'TO', 'THE', 'FARMER', 'HE', 'TOOK', 'IT', 'AND', 'SMILED', 'SAYING'] +5142-33396-0034-932: hyp=['THEN', 'I', 'DRANK', 'HALF', 'OF', 'THE', 'HORNFUL', 'AND', 'SET', 'THE', 'REST', 'ACROSS', 'THE', 'FIRE', 'TO', 'THE', 'FARMER', 'HE', 'TOOK', 'IT', 'AND', 'SMILED', 'SAYING'] +5142-33396-0035-933: ref=['DID', 'YOU', 'EVER', 'HAVE', 'SUCH', 'A', 'LORDLY', 'GUEST', 'BEFORE', 'I', 'WENT', 'ON'] +5142-33396-0035-933: hyp=['DID', 'YOU', 'EVER', 'HAVE', 'SUCH', 'A', 'LORDLY', 'GUEST', 'BEFORE', 'I', 'WENT', 'ON'] +5142-33396-0036-934: ref=['SO', 'I', 'WILL', 'GIVE', 'OUT', 'THIS', 'LAW', 'THAT', 'MY', 'MEN', 'SHALL', 'NEVER', 'LEAVE', 'YOU', 'ALONE'] +5142-33396-0036-934: hyp=['SO', 'I', 'WILL', 'GIVE', 'OUT', 'THIS', 'LAW', 'THAT', 'MY', 'MEN', 'SHALL', 'NEVER', 'LEAVE', 'YOU', 'ALONE'] +5142-33396-0037-935: ref=['HAKON', 'THERE', 'SHALL', 'BE', 'YOUR', 'CONSTANT', 'COMPANION', 'FRIEND', 'FARMER'] +5142-33396-0037-935: hyp=['HAWKIN', 'THERE', 'SHALL', 'BE', 'YOUR', 'CONSTANT', 'COMPANION', 'FRIEND', 'FARMER'] +5142-33396-0038-936: ref=['HE', 'SHALL', 'NOT', 'LEAVE', 'YOU', 'DAY', 'OR', 'NIGHT', 'WHETHER', 'YOU', 'ARE', 'WORKING', 'OR', 'PLAYING', 'OR', 'SLEEPING'] +5142-33396-0038-936: hyp=['HE', 'SHALL', 'NOT', 'LEAVE', 'YOU', 'DAY', 'OR', 'NIGHT', 'WHETHER', 'YOU', 'ARE', 'WORKING', 'OR', 'PLAYING', 'OR', 'SLEEPING'] +5142-33396-0039-937: ref=['I', 'NAMED', 'NINE', 'OTHERS', 'AND', 'SAID'] +5142-33396-0039-937: hyp=['I', 'NAME', 'NINE', 'OTHERS', 'AND', 'SAID'] +5142-33396-0040-938: ref=['AND', 'THESE', 'SHALL', 'FOLLOW', 'YOUR', 'THRALLS', 'IN', 'THE', 'SAME', 'WAY'] +5142-33396-0040-938: hyp=['AND', 'THESE', 'SHALL', 'FOLLOW', 'YOUR', 'THRALLS', 'IN', 'THE', 'SAME', 'WAY'] +5142-33396-0041-939: ref=['SO', 'I', 'SET', 'GUARDS', 'OVER', 'EVERY', 'ONE', 'IN', 'THAT', 'HOUSE'] +5142-33396-0041-939: hyp=['SO', 'I', 'SET', 'GUARDS', 'OVER', 'EVERYONE', 'IN', 'THAT', 'HOUSE'] +5142-33396-0042-940: ref=['SO', 'NO', 'TALES', 'GOT', 'OUT', 'TO', 'THE', 'NEIGHBORS', 'BESIDES', 'IT', 'WAS', 'A', 'LONELY', 'PLACE', 'AND', 'BY', 'GOOD', 'LUCK', 'NO', 'ONE', 'CAME', 'THAT', 'WAY'] +5142-33396-0042-940: hyp=['SO', 'NO', 'TALES', 'GOT', 'OUT', 'TO', 'THE', 'NEIGHBORS', 'BESIDES', 'IT', 'WAS', 'A', 'LONELY', 'PLACE', 'AND', 'BY', 'GOOD', 'LUCK', 'NO', 'ONE', 'CAME', 'THAT', 'WAY'] +5142-33396-0043-941: ref=['THEIR', 'EYES', 'DANCED', 'BIG', 'THORLEIF', 'STOOD', 'UP', 'AND', 'STRETCHED', 'HIMSELF'] +5142-33396-0043-941: hyp=['THEIR', 'EYES', 'DANCED', 'BIG', 'TORE', 'LEAF', 'STOOD', 'UP', 'AND', 'STRETCHED', 'HIMSELF'] +5142-33396-0044-942: ref=['I', 'AM', 'STIFF', 'WITH', 'LONG', 'SITTING', 'HE', 'SAID', 'I', 'ITCH', 'FOR', 'A', 'FIGHT', 'I', 'TURNED', 'TO', 'THE', 'FARMER'] +5142-33396-0044-942: hyp=["I'M", 'STIFF', 'WITH', 'LONG', 'CITY', 'HE', 'SAID', 'I', 'ITCH', 'FOR', 'A', 'FIGHT', 'I', 'TURNED', 'TO', 'THE', 'FARMER'] +5142-33396-0045-943: ref=['THIS', 'IS', 'OUR', 'LAST', 'FEAST', 'WITH', 'YOU', 'I', 'SAID'] +5142-33396-0045-943: hyp=['THIS', 'IS', 'OUR', 'LAST', 'FEAST', 'WITH', 'YOU', 'I', 'SAID'] +5142-33396-0046-944: ref=['BY', 'THE', 'BEARD', 'OF', 'ODIN', 'I', 'CRIED', 'YOU', 'HAVE', 'TAKEN', 'OUR', 'JOKE', 'LIKE', 'A', 'MAN'] +5142-33396-0046-944: hyp=['BY', 'THE', 'BEARD', 'OF', 'ODIN', 'I', 'CRIED', 'YOU', 'HAVE', 'TAKEN', 'OUR', 'JOKE', 'LIKE', 'A', 'MAN'] +5142-33396-0047-945: ref=['MY', 'MEN', 'POUNDED', 'THE', 'TABLE', 'WITH', 'THEIR', 'FISTS'] +5142-33396-0047-945: hyp=['MY', 'MEN', 'POUNDED', 'THE', 'TABLE', 'WITH', 'THEIR', 'FISTS'] +5142-33396-0048-946: ref=['BY', 'THE', 'HAMMER', 'OF', 'THOR', 'SHOUTED', 'GRIM', 'HERE', 'IS', 'NO', 'STINGY', 'COWARD'] +5142-33396-0048-946: hyp=['BY', 'THE', 'HAMMER', 'OTHOR', 'SHOUTED', 'GRIM', 'THERE', 'IS', 'NO', 'STINGY', 'COWARD'] +5142-33396-0049-947: ref=['HERE', 'FRIEND', 'TAKE', 'IT', 'AND', 'HE', 'THRUST', 'IT', 'INTO', 'THE', "FARMER'S", 'HAND'] +5142-33396-0049-947: hyp=['HERE', 'FRIEND', 'TAKE', 'IT', 'AND', 'HE', 'THRUST', 'INTO', 'THE', "FARMER'S", 'HAND'] +5142-33396-0050-948: ref=['MAY', 'YOU', 'DRINK', "HEART'S", 'EASE', 'FROM', 'IT', 'FOR', 'MANY', 'YEARS'] +5142-33396-0050-948: hyp=['MAY', 'YOU', 'DRINK', 'HEARTS', 'EASE', 'FROM', 'IT', 'FOR', 'MANY', 'YEARS'] +5142-33396-0051-949: ref=['AND', 'WITH', 'IT', 'I', 'LEAVE', 'YOU', 'A', 'NAME', 'SIF', 'THE', 'FRIENDLY', 'I', 'SHALL', 'HOPE', 'TO', 'DRINK', 'WITH', 'YOU', 'SOMETIME', 'IN', 'VALHALLA'] +5142-33396-0051-949: hyp=['AND', 'WITH', 'IT', 'I', 'LEAVE', 'YOU', 'A', 'NAME', 'SIFT', 'THE', 'FRIENDLY', 'I', 'SHALL', 'HOPE', 'TO', 'DRINK', 'WITH', 'YOU', 'SOME', 'TIME', 'IN', 'VALHALLA'] +5142-33396-0052-950: ref=['HERE', 'IS', 'A', 'RING', 'FOR', 'SIF', 'THE', 'FRIENDLY', 'AND', 'HERE', 'IS', 'A', 'BRACELET', 'A', 'SWORD', 'WOULD', 'NOT', 'BE', 'ASHAMED', 'TO', 'HANG', 'AT', 'YOUR', 'SIDE'] +5142-33396-0052-950: hyp=['HERE', 'IS', 'A', 'RING', 'FOR', 'SIFT', 'THE', 'FRIENDLY', 'AND', 'HERE', 'IS', 'A', 'BRACELET', 'AND', 'A', 'SWORD', 'WOULD', 'NOT', 'BE', 'ASHAMED', 'TO', 'HANG', 'AT', 'YOUR', 'SIDE'] +5142-33396-0053-951: ref=['I', 'TOOK', 'FIVE', 'GREAT', 'BRACELETS', 'OF', 'GOLD', 'FROM', 'OUR', 'TREASURE', 'CHEST', 'AND', 'GAVE', 'THEM', 'TO', 'HIM'] +5142-33396-0053-951: hyp=['I', 'TOOK', 'FIVE', 'GREAT', 'BRACELETS', 'OF', 'GOLD', 'FROM', 'OUR', 'TREASURE', 'CHEST', 'AND', 'GAVE', 'THEM', 'TO', 'HIM'] +5142-33396-0054-952: ref=['THAT', 'IS', 'THE', 'BEST', 'WAY', 'TO', 'DECIDE', 'FOR', 'THE', 'SPEAR', 'WILL', 'ALWAYS', 'POINT', 'SOMEWHERE', 'AND', 'ONE', 'THING', 'IS', 'AS', 'GOOD', 'AS', 'ANOTHER'] +5142-33396-0054-952: hyp=['THAT', 'IS', 'THE', 'BEST', 'WAY', 'TO', 'DECIDE', 'FOR', 'THE', 'SPEAR', 'WILL', 'ALWAYS', 'POINT', 'SOMEWHERE', 'AND', 'ONE', 'THING', 'IS', 'AS', 'GOOD', 'AS', 'ANOTHER'] +5142-33396-0055-953: ref=['THAT', 'TIME', 'IT', 'POINTED', 'US', 'INTO', 'YOUR', "FATHER'S", 'SHIPS'] +5142-33396-0055-953: hyp=['THAT', 'TIME', 'IT', 'POINTED', 'US', 'INTO', 'YOUR', "FATHER'S", 'SHIPS'] +5142-33396-0056-954: ref=['HERE', 'THEY', 'SAID', 'IS', 'A', 'RASCAL', 'WHO', 'HAS', 'BEEN', 'HARRYING', 'OUR', 'COASTS'] +5142-33396-0056-954: hyp=['HERE', 'THEY', 'SAID', 'AS', 'A', 'RASCAL', 'WHO', 'HAS', 'BEEN', 'HARRYING', 'OUR', 'COASTS'] +5142-33396-0057-955: ref=['WE', 'SUNK', 'HIS', 'SHIP', 'AND', 'MEN', 'BUT', 'HIM', 'WE', 'BROUGHT', 'TO', 'YOU'] +5142-33396-0057-955: hyp=['WE', 'SUNK', 'HIS', 'SHIP', 'AND', 'MEN', 'BUT', 'HIM', 'WE', 'BROUGHT', 'TO', 'YOU'] +5142-33396-0058-956: ref=['A', 'ROBBER', 'VIKING', 'SAID', 'THE', 'KING', 'AND', 'SCOWLED', 'AT', 'ME'] +5142-33396-0058-956: hyp=['A', 'ROBBER', 'VIKING', 'SAID', 'THE', 'KING', 'AND', 'HE', 'SCOWLED', 'AT', 'ME'] +5142-33396-0059-957: ref=['YES', 'AND', 'WITH', 'ALL', 'YOUR', 'FINGERS', 'IT', 'TOOK', 'YOU', 'A', 'YEAR', 'TO', 'CATCH', 'ME', 'THE', 'KING', 'FROWNED', 'MORE', 'ANGRILY'] +5142-33396-0059-957: hyp=['YES', 'AND', 'WITH', 'ALL', 'YOUR', 'FINGERS', 'IT', 'TOOK', 'YOU', 'A', 'YEAR', 'TO', 'CATCH', 'ME', 'THE', 'KING', 'FROWNED', 'MORE', 'ANGRILY'] +5142-33396-0060-958: ref=['TAKE', 'HIM', 'OUT', 'THORKEL', 'AND', 'LET', 'HIM', 'TASTE', 'YOUR', 'SWORD'] +5142-33396-0060-958: hyp=['TAKE', 'HIM', 'OUT', 'TORCAL', 'AND', 'LET', 'HIM', 'TASTE', 'YOUR', 'SWORD'] +5142-33396-0061-959: ref=['YOUR', 'MOTHER', 'THE', 'QUEEN', 'WAS', 'STANDING', 'BY'] +5142-33396-0061-959: hyp=['YOUR', 'MOTHER', 'THE', 'QUEEN', 'WAS', 'STANDING', 'BY'] +5142-33396-0062-960: ref=['NOW', 'SHE', 'PUT', 'HER', 'HAND', 'ON', 'HIS', 'ARM', 'AND', 'SMILED', 'AND', 'SAID'] +5142-33396-0062-960: hyp=['NOW', 'SHE', 'PUT', 'HER', 'HAND', 'ON', 'HIS', 'ARM', 'AND', 'SMILED', 'AND', 'SAID'] +5142-33396-0063-961: ref=['AND', 'WOULD', 'HE', 'NOT', 'BE', 'A', 'GOOD', 'GIFT', 'FOR', 'OUR', 'BABY'] +5142-33396-0063-961: hyp=['AND', 'WOULD', 'HE', 'NOT', 'BE', 'A', 'GOOD', 'GIFT', 'FOR', 'OUR', 'BABY'] +5142-33396-0064-962: ref=['YOUR', 'FATHER', 'THOUGHT', 'A', 'MOMENT', 'THEN', 'LOOKED', 'AT', 'YOUR', 'MOTHER', 'AND', 'SMILED'] +5142-33396-0064-962: hyp=['YOUR', 'FATHER', 'THOUGHT', 'A', 'MOMENT', 'AND', 'LOOKED', 'AT', 'YOUR', 'MOTHER', 'AND', 'SMILED'] +5142-33396-0065-963: ref=['SOFT', 'HEART', 'HE', 'SAID', 'GENTLY', 'TO', 'HER', 'THEN', 'TO', 'THORKEL', 'WELL', 'LET', 'HIM', 'GO', 'THORKEL'] +5142-33396-0065-963: hyp=['SOFT', 'HEART', 'HE', 'SAID', 'GENTLY', 'TO', 'HER', 'THEN', 'TO', 'TORQUAL', 'WELL', 'LET', 'HIM', 'GO', 'TORKO'] +5142-33396-0066-964: ref=['THEN', 'HE', 'TURNED', 'TO', 'ME', 'AGAIN', 'FROWNING'] +5142-33396-0066-964: hyp=['THEN', 'HE', 'TURNED', 'TO', 'ME', 'AGAIN', 'FROWNING'] +5142-33396-0067-965: ref=['BUT', 'YOUNG', 'SHARP', 'TONGUE', 'NOW', 'THAT', 'WE', 'HAVE', 'CAUGHT', 'YOU', 'WE', 'WILL', 'PUT', 'YOU', 'INTO', 'A', 'TRAP', 'THAT', 'YOU', 'CANNOT', 'GET', 'OUT', 'OF'] +5142-33396-0067-965: hyp=['BUT', 'YOUNG', 'SHARP', 'TONGUE', 'NOW', 'THAT', "WE'VE", 'CAUGHT', 'YOU', 'WILL', 'PUT', 'YOU', 'INTO', 'A', 'TRAP', 'THAT', 'YOU', 'CANNOT', 'GET', 'OUT', 'OF'] +5142-33396-0068-966: ref=['SO', 'I', 'LIVED', 'AND', 'NOW', 'AM', 'YOUR', 'TOOTH', 'THRALL', 'WELL', 'IT', 'IS', 'THE', 'LUCK', 'OF', 'WAR'] +5142-33396-0068-966: hyp=['SO', 'I', 'LIVED', 'AND', 'NOW', "I'M", 'YOUR', 'TOOTH', 'THRALL', 'WELL', 'IT', 'IS', 'THE', 'LUCK', 'OF', 'WAR'] +5142-36377-0000-870: ref=['IT', 'WAS', 'ONE', 'OF', 'THE', 'MASTERLY', 'AND', 'CHARMING', 'STORIES', 'OF', 'DUMAS', 'THE', 'ELDER'] +5142-36377-0000-870: hyp=['IT', 'WAS', 'ONE', 'OF', 'THE', 'MASTERLY', 'AND', 'CHARMING', 'STORIES', 'OF', 'DUMAS', 'THE', 'ELDER'] +5142-36377-0001-871: ref=['IN', 'FIVE', 'MINUTES', 'I', 'WAS', 'IN', 'A', 'NEW', 'WORLD', 'AND', 'MY', 'MELANCHOLY', 'ROOM', 'WAS', 'FULL', 'OF', 'THE', 'LIVELIEST', 'FRENCH', 'COMPANY'] +5142-36377-0001-871: hyp=['IN', 'FIVE', 'MINUTES', 'I', 'WAS', 'IN', 'A', 'NEW', 'WORLD', 'AND', 'MY', 'MELANCHOLY', 'ROOM', 'WAS', 'FULL', 'OF', 'THE', 'LIVELIEST', 'FRENCH', 'COMPANY'] +5142-36377-0002-872: ref=['THE', 'SOUND', 'OF', 'AN', 'IMPERATIVE', 'AND', 'UNCOMPROMISING', 'BELL', 'RECALLED', 'ME', 'IN', 'DUE', 'TIME', 'TO', 'THE', 'REGIONS', 'OF', 'REALITY'] +5142-36377-0002-872: hyp=['THE', 'SOUND', 'OF', 'AN', 'IMPERATIVE', 'AND', 'UNCOMPROMISING', 'BELL', 'RECALLED', 'ME', 'IN', 'DUE', 'TIME', 'TO', 'THE', 'REGIONS', 'OF', 'REALITY'] +5142-36377-0003-873: ref=['AMBROSE', 'MET', 'ME', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'STAIRS', 'AND', 'SHOWED', 'ME', 'THE', 'WAY', 'TO', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0003-873: hyp=['AMBROSE', 'MET', 'ME', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'STAIRS', 'AND', 'SHOWED', 'ME', 'THE', 'WAY', 'TO', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0004-874: ref=['SHE', 'SIGNED', 'TO', 'ME', 'WITH', 'A', 'GHOSTLY', 'SOLEMNITY', 'TO', 'TAKE', 'THE', 'VACANT', 'PLACE', 'ON', 'THE', 'LEFT', 'OF', 'HER', 'FATHER'] +5142-36377-0004-874: hyp=['SHE', 'SIGNED', 'TO', 'ME', 'WITH', 'A', 'GHOSTLY', 'SOLEMNITY', 'TO', 'TAKE', 'THE', 'VACANT', 'PLACE', 'ON', 'THE', 'LEFT', 'OF', 'HER', 'FATHER'] +5142-36377-0005-875: ref=['THE', 'DOOR', 'OPENED', 'AGAIN', 'WHILE', 'I', 'WAS', 'STILL', 'STUDYING', 'THE', 'TWO', 'BROTHERS', 'WITHOUT', 'I', 'HONESTLY', 'CONFESS', 'BEING', 'VERY', 'FAVORABLY', 'IMPRESSED', 'BY', 'EITHER', 'OF', 'THEM'] +5142-36377-0005-875: hyp=['THE', 'DOOR', 'OPENED', 'AGAIN', 'WHILE', 'I', 'WAS', 'STILL', 'STUDYING', 'THE', 'TWO', 'BROTHERS', 'WITHOUT', 'I', 'HONESTLY', 'CONFESS', 'BEING', 'VERY', 'FAVORABLY', 'IMPRESSED', 'BY', 'EITHER', 'OF', 'THEM'] +5142-36377-0006-876: ref=['A', 'NEW', 'MEMBER', 'OF', 'THE', 'FAMILY', 'CIRCLE', 'WHO', 'INSTANTLY', 'ATTRACTED', 'MY', 'ATTENTION', 'ENTERED', 'THE', 'ROOM'] +5142-36377-0006-876: hyp=['A', 'NEW', 'MEMBER', 'OF', 'THE', 'FAMILY', 'CIRCLE', 'WHO', 'INSTANTLY', 'ATTRACTED', 'MY', 'ATTENTION', 'ENTERED', 'THE', 'ROOM'] +5142-36377-0007-877: ref=['A', 'LITTLE', 'CRACKED', 'THAT', 'IN', 'THE', 'POPULAR', 'PHRASE', 'WAS', 'MY', 'IMPRESSION', 'OF', 'THE', 'STRANGER', 'WHO', 'NOW', 'MADE', 'HIS', 'APPEARANCE', 'IN', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0007-877: hyp=['A', 'LITTLE', 'CRACKED', 'THAT', 'IN', 'THE', 'POPULAR', 'PHRASE', 'WAS', 'MY', 'IMPRESSION', 'OF', 'THE', 'STRANGER', 'WHO', 'NOW', 'MADE', 'HIS', 'APPEARANCE', 'IN', 'THE', 'SUPPER', 'ROOM'] +5142-36377-0008-878: ref=['MISTER', 'MEADOWCROFT', 'THE', 'ELDER', 'HAVING', 'NOT', 'SPOKEN', 'ONE', 'WORD', 'THUS', 'FAR', 'HIMSELF', 'INTRODUCED', 'THE', 'NEWCOMER', 'TO', 'ME', 'WITH', 'A', 'SIDE', 'GLANCE', 'AT', 'HIS', 'SONS', 'WHICH', 'HAD', 'SOMETHING', 'LIKE', 'DEFIANCE', 'IN', 'IT', 'A', 'GLANCE', 'WHICH', 'AS', 'I', 'WAS', 'SORRY', 'TO', 'NOTICE', 'WAS', 'RETURNED', 'WITH', 'THE', 'DEFIANCE', 'ON', 'THEIR', 'SIDE', 'BY', 'THE', 'TWO', 'YOUNG', 'MEN'] +5142-36377-0008-878: hyp=['MISTER', 'MEDICRAFT', 'THE', 'ELDER', 'HAVING', 'NOT', 'SPOKEN', 'ONE', 'WORD', 'THUS', 'FAR', 'HIMSELF', 'INTRODUCED', 'THE', 'NEWCOMER', 'TO', 'ME', 'WITH', 'A', 'SIDE', 'GLANCE', 'AT', 'HIS', 'SONS', 'WHICH', 'HAD', 'SOMETHING', 'LIKE', 'DEFIANCE', 'IN', 'IT', 'A', 'GLANCE', 'WHICH', 'AS', 'I', 'WAS', 'SORRY', 'TO', 'NOTICE', 'WAS', 'RETURNED', 'WITH', 'THE', 'DEFIANCE', 'ON', 'THEIR', 'SIDE', 'BY', 'THE', 'TWO', 'YOUNG', 'MEN'] +5142-36377-0009-879: ref=['PHILIP', 'LEFRANK', 'THIS', 'IS', 'MY', 'OVERLOOKER', 'MISTER', 'JAGO', 'SAID', 'THE', 'OLD', 'MAN', 'FORMALLY', 'PRESENTING', 'US'] +5142-36377-0009-879: hyp=['PHILIP', 'LE', 'FRANK', 'THIS', 'IS', 'MY', 'OVERLOOKER', 'MISTER', 'YAGO', 'SAID', 'THE', 'OLD', 'MAN', 'FORMERLY', 'PRESENTING', 'US'] +5142-36377-0010-880: ref=['HE', 'IS', 'NOT', 'WELL', 'HE', 'HAS', 'COME', 'OVER', 'THE', 'OCEAN', 'FOR', 'REST', 'AND', 'CHANGE', 'OF', 'SCENE'] +5142-36377-0010-880: hyp=['HE', 'IS', 'NOT', 'WELL', 'HE', 'HAS', 'COME', 'OVER', 'THE', 'OCEAN', 'FOR', 'REST', 'AND', 'CHANGE', 'IS', 'SEEN'] +5142-36377-0011-881: ref=['MISTER', 'JAGO', 'IS', 'AN', 'AMERICAN', 'PHILIP'] +5142-36377-0011-881: hyp=['THE', 'TRIAGO', 'IS', 'AN', 'AMERICAN', 'PHILIP'] +5142-36377-0012-882: ref=['MAKE', 'ACQUAINTANCE', 'WITH', 'MISTER', 'JAGO', 'SIT', 'TOGETHER'] +5142-36377-0012-882: hyp=['MAKE', 'ACQUAINTANCE', 'WITH', 'MISCHIAGO', 'SIP', 'TOGETHER'] +5142-36377-0013-883: ref=['THEY', 'POINTEDLY', 'DREW', 'BACK', 'FROM', 'JOHN', 'JAGO', 'AS', 'HE', 'APPROACHED', 'THE', 'EMPTY', 'CHAIR', 'NEXT', 'TO', 'ME', 'AND', 'MOVED', 'ROUND', 'TO', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'TABLE'] +5142-36377-0013-883: hyp=['THEY', 'POINTEDLY', 'DREW', 'BACK', 'FROM', 'JOHN', 'JAGO', 'AS', 'HE', 'APPROACHED', 'THE', 'EMPTY', 'CHAIR', 'NEXT', 'TO', 'ME', 'AND', 'MOVED', 'ROUND', 'TO', 'THE', 'OPPOSITE', 'SIDE', 'OF', 'THE', 'TABLE'] +5142-36377-0014-884: ref=['A', 'PRETTY', 'GIRL', 'AND', 'SO', 'FAR', 'AS', 'I', 'COULD', 'JUDGE', 'BY', 'APPEARANCES', 'A', 'GOOD', 'GIRL', 'TOO', 'DESCRIBING', 'HER', 'GENERALLY', 'I', 'MAY', 'SAY', 'THAT', 'SHE', 'HAD', 'A', 'SMALL', 'HEAD', 'WELL', 'CARRIED', 'AND', 'WELL', 'SET', 'ON', 'HER', 'SHOULDERS', 'BRIGHT', 'GRAY', 'EYES', 'THAT', 'LOOKED', 'AT', 'YOU', 'HONESTLY', 'AND', 'MEANT', 'WHAT', 'THEY', 'LOOKED', 'A', 'TRIM', 'SLIGHT', 'LITTLE', 'FIGURE', 'TOO', 'SLIGHT', 'FOR', 'OUR', 'ENGLISH', 'NOTIONS', 'OF', 'BEAUTY', 'A', 'STRONG', 'AMERICAN', 'ACCENT', 'AND', 'A', 'RARE', 'THING', 'IN', 'AMERICA', 'A', 'PLEASANTLY', 'TONED', 'VOICE', 'WHICH', 'MADE', 'THE', 'ACCENT', 'AGREEABLE', 'TO', 'ENGLISH', 'EARS'] +5142-36377-0014-884: hyp=['A', 'PRETTY', 'GIRL', 'AND', 'SO', 'FAR', 'AS', 'I', 'COULD', 'JUDGE', 'MY', 'APPEARANCES', 'A', 'GOOD', 'GIRL', 'TOO', 'DESCRIBING', 'HER', 'GENERALLY', 'I', 'MAY', 'SAY', 'THAT', 'SHE', 'HAD', 'A', 'SMALL', 'HEAD', 'WELL', 'CARRIED', 'AND', 'WELL', 'SET', 'ON', 'HER', 'SHOULDERS', 'BRIGHT', 'GREY', 'EYES', 'THAT', 'LOOKED', 'AT', 'YOU', 'HONESTLY', 'AND', 'MEANT', 'WHAT', 'THEY', 'LOOKED', 'A', 'TRIM', 'SLIGHT', 'LITTLE', 'FIGURE', 'TOO', 'SLIGHT', 'FOR', 'OUR', 'ENGLISH', 'NOTIONS', 'OF', 'BEAUTY', 'A', 'STRONG', 'AMERICAN', 'ACCENT', 'AND', 'A', 'RARE', 'THING', 'IN', 'AMERICA', 'A', 'PLEASANTLY', 'TONED', 'VOICE', 'WHICH', 'MADE', 'THE', 'ACCENT', 'AGREEABLE', 'TO', 'ENGLISH', 'YEARS'] +5142-36377-0015-885: ref=['OUR', 'FIRST', 'IMPRESSIONS', 'OF', 'PEOPLE', 'ARE', 'IN', 'NINE', 'CASES', 'OUT', 'OF', 'TEN', 'THE', 'RIGHT', 'IMPRESSIONS'] +5142-36377-0015-885: hyp=['OUR', 'FIRST', 'IMPRESSIONS', 'OF', 'PEOPLE', 'ARE', 'IN', 'NINE', 'CASES', 'AT', 'A', 'TEN', 'THE', 'RIGHT', 'IMPRESSIONS'] +5142-36377-0016-886: ref=['FOR', 'ONCE', 'IN', 'A', 'WAY', 'I', 'PROVED', 'A', 'TRUE', 'PROPHET'] +5142-36377-0016-886: hyp=['FOR', 'ONCE', 'IN', 'A', 'WAY', 'I', 'PROVED', 'A', 'TRUE', 'PROPHET'] +5142-36377-0017-887: ref=['THE', 'ONLY', 'CHEERFUL', 'CONVERSATION', 'WAS', 'THE', 'CONVERSATION', 'ACROSS', 'THE', 'TABLE', 'BETWEEN', 'NAOMI', 'AND', 'ME'] +5142-36377-0017-887: hyp=['THE', 'ONLY', 'CHEERFUL', 'CONVERSATION', 'WAS', 'THE', 'CONVERSATION', 'ACROSS', 'THE', 'TABLE', 'BETWEEN', 'NAOMI', 'AND', 'ME'] +5142-36377-0018-888: ref=['HE', 'LOOKED', 'UP', 'AT', 'NAOMI', 'DOUBTINGLY', 'FROM', 'HIS', 'PLATE', 'AND', 'LOOKED', 'DOWN', 'AGAIN', 'SLOWLY', 'WITH', 'A', 'FROWN'] +5142-36377-0018-888: hyp=['HE', 'LOOKED', 'UP', 'AND', 'NOW', 'AND', 'ME', 'DOUBTINGLY', 'FROM', 'HIS', 'PLATE', 'AND', 'LOOKED', 'DOWN', 'AGAIN', 'SLOWLY', 'WITH', 'A', 'FROWN'] +5142-36377-0019-889: ref=['WHEN', 'I', 'ADDRESSED', 'HIM', 'HE', 'ANSWERED', 'CONSTRAINEDLY'] +5142-36377-0019-889: hyp=['WHEN', 'I', 'ADDRESSED', 'HIM', 'HE', 'ANSWERED', 'CONSTRAINEDLY'] +5142-36377-0020-890: ref=['A', 'MORE', 'DREARY', 'AND', 'MORE', 'DISUNITED', 'FAMILY', 'PARTY', 'I', 'NEVER', 'SAT', 'AT', 'THE', 'TABLE', 'WITH'] +5142-36377-0020-890: hyp=['A', 'MORE', 'DREARY', 'AND', 'MORE', 'DISUNITED', 'FAMILY', 'PARTY', 'I', 'NEVER', 'SAT', 'AT', 'THE', 'TABLE', 'WITH'] +5142-36377-0021-891: ref=['ENVY', 'HATRED', 'MALICE', 'AND', 'UNCHARITABLENESS', 'ARE', 'NEVER', 'SO', 'ESSENTIALLY', 'DETESTABLE', 'TO', 'MY', 'MIND', 'AS', 'WHEN', 'THEY', 'ARE', 'ANIMATED', 'BY', 'A', 'SENSE', 'OF', 'PROPRIETY', 'AND', 'WORK', 'UNDER', 'THE', 'SURFACE', 'BUT', 'FOR', 'MY', 'INTEREST', 'IN', 'NAOMI', 'AND', 'MY', 'OTHER', 'INTEREST', 'IN', 'THE', 'LITTLE', 'LOVE', 'LOOKS', 'WHICH', 'I', 'NOW', 'AND', 'THEN', 'SURPRISED', 'PASSING', 'BETWEEN', 'HER', 'AND', 'AMBROSE', 'I', 'SHOULD', 'NEVER', 'HAVE', 'SAT', 'THROUGH', 'THAT', 'SUPPER'] +5142-36377-0021-891: hyp=['ENVY', 'HATRED', 'MALICE', 'AND', 'UNCHARITABLENESS', 'ARE', 'NEVER', 'SO', 'ESSENTIALLY', 'DETESTABLE', 'TO', 'MY', 'MIND', 'AS', 'WHEN', 'THEY', 'ARE', 'ANIMATED', 'BY', 'THE', 'SENSE', 'OF', 'PROPRIETY', 'AND', 'WORK', 'UNDER', 'THE', 'SURFACE', 'BUT', 'FOR', 'MY', 'INTEREST', 'IN', 'THEY', 'OWE', 'ME', 'AND', 'MY', 'OTHER', 'INTEREST', 'IN', 'THE', 'LITTLE', 'LOVE', 'LOOKS', 'WHICH', 'I', 'NOW', 'AND', 'THEN', 'SURPRISED', 'PASSING', 'BETWEEN', 'HER', 'AND', 'AMBROSE', 'I', 'SHOULD', 'NEVER', 'HAVE', 'SAT', 'THROUGH', 'THAT', 'SUPPER'] +5142-36377-0022-892: ref=['I', 'WISH', 'YOU', 'GOOD', 'NIGHT', 'SHE', 'LAID', 'HER', 'BONY', 'HANDS', 'ON', 'THE', 'BACK', 'OF', 'MISTER', "MEADOWCROFT'S", 'INVALID', 'CHAIR', 'CUT', 'HIM', 'SHORT', 'IN', 'HIS', 'FAREWELL', 'SALUTATION', 'TO', 'ME', 'AND', 'WHEELED', 'HIM', 'OUT', 'TO', 'HIS', 'BED', 'AS', 'IF', 'SHE', 'WERE', 'WHEELING', 'HIM', 'OUT', 'TO', 'HIS', 'GRAVE'] +5142-36377-0022-892: hyp=['I', 'WISH', 'YOU', 'GOOD', 'NIGHT', 'SHE', 'LAID', 'HER', 'BONY', 'HANDS', 'ON', 'THE', 'BACK', 'OF', 'MISTER', "METICOFF'S", 'INVALID', 'CHAIR', 'CAUGHT', 'HIM', 'SHORT', 'IN', 'HIS', 'FAREWELL', 'SALUTATION', 'TO', 'ME', 'AND', 'WHEELED', 'HIM', 'OUT', 'TO', 'HIS', 'BED', 'AS', 'IF', 'SHE', 'WERE', 'WHEELING', 'HIM', 'OUT', 'TO', 'HIS', 'GRAVE'] +5142-36377-0023-893: ref=['YOU', 'WERE', 'QUITE', 'RIGHT', 'TO', 'SAY', 'NO', 'AMBROSE', 'BEGAN', 'NEVER', 'SMOKE', 'WITH', 'JOHN', 'JAGO', 'HIS', 'CIGARS', 'WILL', 'POISON', 'YOU'] +5142-36377-0023-893: hyp=['YOU', 'WERE', 'QUITE', 'RIGHT', 'TO', 'SAY', 'NO', 'AMBROSE', 'BEGAN', 'NEVER', 'SMOKE', 'WITH', 'JOHN', 'IAGO', 'HIS', 'CIGARS', 'WILL', 'POISON', 'YOU'] +5142-36377-0024-894: ref=['NAOMI', 'SHOOK', 'HER', 'FOREFINGER', 'REPROACHFULLY', 'AT', 'THEM', 'AS', 'IF', 'THE', 'TWO', 'STURDY', 'YOUNG', 'FARMERS', 'HAD', 'BEEN', 'TWO', 'CHILDREN'] +5142-36377-0024-894: hyp=['THEY', 'ONLY', 'SHOOK', 'HER', 'FOREFINGER', 'REPROACHFULLY', 'AT', 'THEM', 'AS', 'IF', 'THE', 'TWO', 'STURDY', 'YOUNG', 'FARMERS', 'HAD', 'BEEN', 'TWO', 'CHILDREN'] +5142-36377-0025-895: ref=['SILAS', 'SLUNK', 'AWAY', 'WITHOUT', 'A', 'WORD', 'OF', 'PROTEST', 'AMBROSE', 'STOOD', 'HIS', 'GROUND', 'EVIDENTLY', 'BENT', 'ON', 'MAKING', 'HIS', 'PEACE', 'WITH', 'NAOMI', 'BEFORE', 'HE', 'LEFT', 'HER', 'SEEING', 'THAT', 'I', 'WAS', 'IN', 'THE', 'WAY', 'I', 'WALKED', 'ASIDE', 'TOWARD', 'A', 'GLASS', 'DOOR', 'AT', 'THE', 'LOWER', 'END', 'OF', 'THE', 'ROOM'] +5142-36377-0025-895: hyp=['SILAS', 'SLUNK', 'AWAY', 'WITHOUT', 'A', 'WORD', 'OF', 'PROTEST', 'AMBROSE', 'STOOD', 'HIS', 'GROUND', 'EVIDENTLY', 'BENT', 'ON', 'MAKING', 'HIS', 'PEACE', 'WHEN', 'NAOMI', 'BEFORE', 'HE', 'LEFT', 'HER', 'SEEING', 'THAT', 'I', 'WAS', 'IN', 'THE', 'WAY', 'I', 'WALKED', 'ASIDE', 'TOWARD', 'A', 'GLASS', 'DOOR', 'AT', 'THE', 'LOWER', 'END', 'OF', 'THE', 'ROOM'] +5142-36586-0000-967: ref=['IT', 'IS', 'MANIFEST', 'THAT', 'MAN', 'IS', 'NOW', 'SUBJECT', 'TO', 'MUCH', 'VARIABILITY'] +5142-36586-0000-967: hyp=['IT', 'IS', 'MANIFEST', 'THAT', 'MAN', 'IS', 'NOW', 'SUBJECT', 'TO', 'MUCH', 'VARIABILITY'] +5142-36586-0001-968: ref=['SO', 'IT', 'IS', 'WITH', 'THE', 'LOWER', 'ANIMALS'] +5142-36586-0001-968: hyp=['SO', 'IT', 'IS', 'WITH', 'THE', 'LOWER', 'ANIMALS'] +5142-36586-0002-969: ref=['THE', 'VARIABILITY', 'OF', 'MULTIPLE', 'PARTS'] +5142-36586-0002-969: hyp=['THE', 'VERY', 'ABILITY', 'OF', 'MULTIPLE', 'PARTS'] +5142-36586-0003-970: ref=['BUT', 'THIS', 'SUBJECT', 'WILL', 'BE', 'MORE', 'PROPERLY', 'DISCUSSED', 'WHEN', 'WE', 'TREAT', 'OF', 'THE', 'DIFFERENT', 'RACES', 'OF', 'MANKIND'] +5142-36586-0003-970: hyp=['BUT', 'THIS', 'SUBJECT', 'WILL', 'BE', 'MORE', 'PROPERLY', 'DISCUSSED', 'WHEN', 'WE', 'TREAT', 'OF', 'THE', 'DIFFERENT', 'RACES', 'OF', 'MANKIND'] +5142-36586-0004-971: ref=['EFFECTS', 'OF', 'THE', 'INCREASED', 'USE', 'AND', 'DISUSE', 'OF', 'PARTS'] +5142-36586-0004-971: hyp=['EFFECTS', 'OF', 'THE', 'INCREASED', 'USE', 'AND', 'DISUSE', 'OF', 'PARTS'] +5142-36600-0000-896: ref=['CHAPTER', 'SEVEN', 'ON', 'THE', 'RACES', 'OF', 'MAN'] +5142-36600-0000-896: hyp=['CHAPTER', 'SEVEN', 'ON', 'THE', 'RACES', 'OF', 'MAN'] +5142-36600-0001-897: ref=['IN', 'DETERMINING', 'WHETHER', 'TWO', 'OR', 'MORE', 'ALLIED', 'FORMS', 'OUGHT', 'TO', 'BE', 'RANKED', 'AS', 'SPECIES', 'OR', 'VARIETIES', 'NATURALISTS', 'ARE', 'PRACTICALLY', 'GUIDED', 'BY', 'THE', 'FOLLOWING', 'CONSIDERATIONS', 'NAMELY', 'THE', 'AMOUNT', 'OF', 'DIFFERENCE', 'BETWEEN', 'THEM', 'AND', 'WHETHER', 'SUCH', 'DIFFERENCES', 'RELATE', 'TO', 'FEW', 'OR', 'MANY', 'POINTS', 'OF', 'STRUCTURE', 'AND', 'WHETHER', 'THEY', 'ARE', 'OF', 'PHYSIOLOGICAL', 'IMPORTANCE', 'BUT', 'MORE', 'ESPECIALLY', 'WHETHER', 'THEY', 'ARE', 'CONSTANT'] +5142-36600-0001-897: hyp=['AND', 'DETERMINING', 'WHETHER', 'TWO', 'OR', 'MORE', 'ALLIED', 'FORMS', 'OUGHT', 'TO', 'BE', 'RANKED', 'A', 'SPECIES', 'OR', 'VARIETIES', 'NATURALISTS', 'ARE', 'PRACTICALLY', 'GUIDED', 'BY', 'THE', 'FOLLOWING', 'CONSIDERATIONS', 'NAMELY', 'THE', 'AMOUNT', 'OF', 'DIFFERENCE', 'BETWEEN', 'THEM', 'AND', 'WHETHER', 'SUCH', 'DIFFERENCES', 'RELATE', 'TO', 'FEW', 'OR', 'MANY', 'POINTS', 'OF', 'STRUCTURE', 'AND', 'WHETHER', 'THEY', 'ARE', 'OF', 'PHYSIOLOGICAL', 'IMPORTANCE', 'BUT', 'MORE', 'ESPECIALLY', 'WHETHER', 'THEY', 'ARE', 'CONSTANT'] +5639-40744-0000-137: ref=['ELEVEN', "O'CLOCK", 'HAD', 'STRUCK', 'IT', 'WAS', 'A', 'FINE', 'CLEAR', 'NIGHT', 'THEY', 'WERE', 'THE', 'ONLY', 'PERSONS', 'ON', 'THE', 'ROAD', 'AND', 'THEY', 'SAUNTERED', 'LEISURELY', 'ALONG', 'TO', 'AVOID', 'PAYING', 'THE', 'PRICE', 'OF', 'FATIGUE', 'FOR', 'THE', 'RECREATION', 'PROVIDED', 'FOR', 'THE', 'TOLEDANS', 'IN', 'THEIR', 'VALLEY', 'OR', 'ON', 'THE', 'BANKS', 'OF', 'THEIR', 'RIVER'] +5639-40744-0000-137: hyp=['ELEVEN', "O'CLOCK", 'HAD', 'STRUCK', 'IT', 'WAS', 'A', 'FINE', 'CLEAR', 'NIGHT', 'THERE', 'WERE', 'THE', 'ONLY', 'PERSONS', 'ON', 'THE', 'ROAD', 'AND', 'THEY', 'SAUNTERED', 'LEISURELY', 'ALONG', 'TO', 'AVOID', 'PAYING', 'THE', 'PRICE', 'OF', 'FATIGUE', 'FOR', 'THE', 'RECREATION', 'PROVIDED', 'FOR', 'THE', 'TOLEDANS', 'IN', 'THE', 'VALLEY', 'OR', 'ON', 'THE', 'BANKS', 'OF', 'THEIR', 'RIVER'] +5639-40744-0001-138: ref=['SECURE', 'AS', 'HE', 'THOUGHT', 'IN', 'THE', 'CAREFUL', 'ADMINISTRATION', 'OF', 'JUSTICE', 'IN', 'THAT', 'CITY', 'AND', 'THE', 'CHARACTER', 'OF', 'ITS', 'WELL', 'DISPOSED', 'INHABITANTS', 'THE', 'GOOD', 'HIDALGO', 'WAS', 'FAR', 'FROM', 'THINKING', 'THAT', 'ANY', 'DISASTER', 'COULD', 'BEFAL', 'HIS', 'FAMILY'] +5639-40744-0001-138: hyp=['SECURE', 'AS', 'HE', 'THOUGHT', 'IN', 'THE', 'CAREFUL', 'ADMINISTRATION', 'OF', 'JUSTICE', 'IN', 'THAT', 'CITY', 'AND', 'THE', 'CHARACTER', 'OF', 'ITS', 'WELL', 'DISPOSED', 'INHABITANTS', 'THE', 'GOOD', 'HADALGO', 'WAS', 'FAR', 'FROM', 'THINKING', 'THAT', 'ANY', 'DISASTER', 'COULD', 'BEFALL', 'HIS', 'FAMILY'] +5639-40744-0002-139: ref=['RODOLFO', 'AND', 'HIS', 'COMPANIONS', 'WITH', 'THEIR', 'FACES', 'MUFFLED', 'IN', 'THEIR', 'CLOAKS', 'STARED', 'RUDELY', 'AND', 'INSOLENTLY', 'AT', 'THE', 'MOTHER', 'THE', 'DAUGHTER', 'AND', 'THE', 'SERVANT', 'MAID'] +5639-40744-0002-139: hyp=['RUDOLPHO', 'AND', 'HIS', 'COMPANIONS', 'WERE', 'THEIR', 'FACES', 'MUFFLED', 'IN', 'THEIR', 'CLOAKS', 'STARED', 'RUDELY', 'AND', 'INSOLENTLY', 'AT', 'THE', 'MOTHER', 'THE', 'DAUGHTER', 'AND', 'THE', 'SERVANT', 'MAID'] +5639-40744-0003-140: ref=['IN', 'A', 'MOMENT', 'HE', 'COMMUNICATED', 'HIS', 'THOUGHTS', 'TO', 'HIS', 'COMPANIONS', 'AND', 'IN', 'THE', 'NEXT', 'MOMENT', 'THEY', 'RESOLVED', 'TO', 'TURN', 'BACK', 'AND', 'CARRY', 'HER', 'OFF', 'TO', 'PLEASE', 'RODOLFO', 'FOR', 'THE', 'RICH', 'WHO', 'ARE', 'OPEN', 'HANDED', 'ALWAYS', 'FIND', 'PARASITES', 'READY', 'TO', 'ENCOURAGE', 'THEIR', 'BAD', 'PROPENSITIES', 'AND', 'THUS', 'TO', 'CONCEIVE', 'THIS', 'WICKED', 'DESIGN', 'TO', 'COMMUNICATE', 'IT', 'APPROVE', 'IT', 'RESOLVE', 'ON', 'RAVISHING', 'LEOCADIA', 'AND', 'TO', 'CARRY', 'THAT', 'DESIGN', 'INTO', 'EFFECT', 'WAS', 'THE', 'WORK', 'OF', 'A', 'MOMENT'] +5639-40744-0003-140: hyp=['IN', 'A', 'MOMENT', 'HE', 'COMMUNICATED', 'HIS', 'THOUGHTS', 'TO', 'HIS', 'COMPANIONS', 'AND', 'IN', 'THE', 'NEXT', 'MOMENT', 'THEY', 'RESOLVED', 'TO', 'TURN', 'BACK', 'AND', 'CARRY', 'HER', 'OFF', 'TO', 'PLEASE', 'RUDOLPHO', 'FOR', 'THE', 'RICH', 'WHO', 'ARE', 'OPEN', 'HANDED', 'ALWAYS', 'FIND', 'PARASITES', 'READY', 'TO', 'ENCOURAGE', 'THEIR', 'BAD', 'PROPENSITIES', 'AND', 'THUS', 'TO', 'CONCEIVE', 'THIS', 'WICKED', 'DESIGN', 'TO', 'COMMUNICATE', 'IT', 'APPROVE', 'IT', 'RESOLVE', 'ON', 'RAVISHING', 'LOCATIA', 'AND', 'TO', 'CARRY', 'THAT', 'DESIGN', 'INTO', 'EFFECT', 'WAS', 'THE', 'WORK', 'OF', 'A', 'MOMENT'] +5639-40744-0004-141: ref=['THEY', 'DREW', 'THEIR', 'SWORDS', 'HID', 'THEIR', 'FACES', 'IN', 'THE', 'FLAPS', 'OF', 'THEIR', 'CLOAKS', 'TURNED', 'BACK', 'AND', 'SOON', 'CAME', 'IN', 'FRONT', 'OF', 'THE', 'LITTLE', 'PARTY', 'WHO', 'HAD', 'NOT', 'YET', 'DONE', 'GIVING', 'THANKS', 'TO', 'GOD', 'FOR', 'THEIR', 'ESCAPE', 'FROM', 'THOSE', 'AUDACIOUS', 'MEN'] +5639-40744-0004-141: hyp=['THEY', 'DREW', 'THEIR', 'SWORDS', 'HID', 'THEIR', 'FACES', 'IN', 'THE', 'FLAPS', 'OF', 'THEIR', 'CLOAKS', 'TURNED', 'BACK', 'AND', 'SOON', 'CAME', 'IN', 'FRONT', 'OF', 'THE', 'LITTLE', 'PARTY', 'WHO', 'HAD', 'NOT', 'YET', 'DONE', 'GIVING', 'THANKS', 'TO', 'GOD', 'FOR', 'THEIR', 'ESCAPE', 'FROM', 'THOSE', 'AUDACIOUS', 'MEN'] +5639-40744-0005-142: ref=['FINALLY', 'THE', 'ONE', 'PARTY', 'WENT', 'OFF', 'EXULTING', 'AND', 'THE', 'OTHER', 'WAS', 'LEFT', 'IN', 'DESOLATION', 'AND', 'WOE'] +5639-40744-0005-142: hyp=['FINALLY', 'THE', 'ONE', 'PARTY', 'WENT', 'OFF', 'EXULTING', 'AND', 'THE', 'OTHER', 'WAS', 'LEFT', 'IN', 'DESOLATION', 'AND', 'WOE'] +5639-40744-0006-143: ref=['RODOLFO', 'ARRIVED', 'AT', 'HIS', 'OWN', 'HOUSE', 'WITHOUT', 'ANY', 'IMPEDIMENT', 'AND', "LEOCADIA'S", 'PARENTS', 'REACHED', 'THEIRS', 'HEART', 'BROKEN', 'AND', 'DESPAIRING'] +5639-40744-0006-143: hyp=['RUDOLPHO', 'ARRIVED', 'AT', 'HIS', 'OWN', 'HOUSE', 'WITHOUT', 'ANY', 'IMPEDIMENT', "ALYOCADIA'S", 'PARENTS', 'REACHED', 'THEIRS', 'HEART', 'BROKEN', 'AND', 'DESPAIRING'] +5639-40744-0007-144: ref=['MEANWHILE', 'RODOLFO', 'HAD', 'LEOCADIA', 'SAFE', 'IN', 'HIS', 'CUSTODY', 'AND', 'IN', 'HIS', 'OWN', 'APARTMENT'] +5639-40744-0007-144: hyp=['MEANWHILE', 'RUDOLPHO', 'HAD', 'LOCALIA', 'SAFE', 'IN', 'HIS', 'CUSTODY', 'AND', 'IN', 'HIS', 'OWN', 'APARTMENT'] +5639-40744-0008-145: ref=['WHO', 'TOUCHES', 'ME', 'AM', 'I', 'IN', 'BED'] +5639-40744-0008-145: hyp=['WHO', 'TOUCHES', 'ME', 'AM', 'I', 'IN', 'BED'] +5639-40744-0009-146: ref=['MOTHER', 'DEAR', 'FATHER', 'DO', 'YOU', 'HEAR', 'ME'] +5639-40744-0009-146: hyp=['MOTHER', 'DEAR', 'FATHER', 'DO', 'YOU', 'HEAR', 'ME'] +5639-40744-0010-147: ref=['IT', 'IS', 'THE', 'ONLY', 'AMENDS', 'I', 'ASK', 'OF', 'YOU', 'FOR', 'THE', 'WRONG', 'YOU', 'HAVE', 'DONE', 'ME'] +5639-40744-0010-147: hyp=['IT', 'IS', 'THE', 'ONLY', 'AMENDS', 'I', 'ASK', 'OF', 'YOU', 'FOR', 'THE', 'WRONG', 'YOU', 'HAVE', 'DONE', 'ME'] +5639-40744-0011-148: ref=['SHE', 'FOUND', 'THE', 'DOOR', 'BUT', 'IT', 'WAS', 'LOCKED', 'OUTSIDE'] +5639-40744-0011-148: hyp=['SHE', 'FOUND', 'THE', 'DOOR', 'BUT', 'IT', 'WAS', 'LOCKED', 'OUTSIDE'] +5639-40744-0012-149: ref=['SHE', 'SUCCEEDED', 'IN', 'OPENING', 'THE', 'WINDOW', 'AND', 'THE', 'MOONLIGHT', 'SHONE', 'IN', 'SO', 'BRIGHTLY', 'THAT', 'SHE', 'COULD', 'DISTINGUISH', 'THE', 'COLOUR', 'OF', 'SOME', 'DAMASK', 'HANGINGS', 'IN', 'THE', 'ROOM'] +5639-40744-0012-149: hyp=['SHE', 'SUCCEEDED', 'IN', 'OPENING', 'THE', 'WINDOW', 'AND', 'THE', 'MOONLIGHT', 'SHONE', 'IN', 'SO', 'BRIGHTLY', 'THAT', 'SHE', 'COULD', 'DISTINGUISH', 'THE', 'COLOUR', 'OF', 'SOME', 'DAMASK', 'HANGING', 'IN', 'THE', 'ROOM'] +5639-40744-0013-150: ref=['SHE', 'SAW', 'THAT', 'THE', 'BED', 'WAS', 'GILDED', 'AND', 'SO', 'RICH', 'THAT', 'IT', 'SEEMED', 'THAT', 'OF', 'A', 'PRINCE', 'RATHER', 'THAN', 'OF', 'A', 'PRIVATE', 'GENTLEMAN'] +5639-40744-0013-150: hyp=['SHE', 'SAW', 'THAT', 'THE', 'BED', 'WAS', 'GILDED', 'AND', 'SO', 'RICH', 'THAT', 'IT', 'SEEMED', 'THAT', 'OF', 'A', 'PRINCE', 'THE', 'RATHER', 'THAT', 'OF', 'A', 'PRIVATE', 'GENTLEMAN'] +5639-40744-0014-151: ref=['AMONG', 'OTHER', 'THINGS', 'ON', 'WHICH', 'SHE', 'CAST', 'HER', 'EYES', 'WAS', 'A', 'SMALL', 'CRUCIFIX', 'OF', 'SOLID', 'SILVER', 'STANDING', 'ON', 'A', 'CABINET', 'NEAR', 'THE', 'WINDOW'] +5639-40744-0014-151: hyp=['AMONG', 'OTHER', 'THINGS', 'ON', 'WHICH', 'SHE', 'CAST', 'HER', 'EYES', 'WAS', 'A', 'SMALL', 'CRUCIFIX', 'OF', 'SOLID', 'SILVER', 'STANDING', 'ON', 'A', 'CABINET', 'NEAR', 'THE', 'WINDOW'] +5639-40744-0015-152: ref=['THIS', 'PERSON', 'WAS', 'RODOLFO', 'WHO', 'THOUGH', 'HE', 'HAD', 'GONE', 'TO', 'LOOK', 'FOR', 'HIS', 'FRIENDS', 'HAD', 'CHANGED', 'HIS', 'MIND', 'IN', 'THAT', 'RESPECT', 'NOT', 'THINKING', 'IT', 'ADVISABLE', 'TO', 'ACQUAINT', 'THEM', 'WITH', 'WHAT', 'HAD', 'PASSED', 'BETWEEN', 'HIM', 'AND', 'THE', 'GIRL'] +5639-40744-0015-152: hyp=['THIS', 'PERSON', 'WAS', 'RUDOLPU', 'WHO', 'THOUGH', 'HE', 'HAD', 'GONE', 'TO', 'LOOK', 'FOR', 'HIS', 'FRIENDS', 'HAD', 'CHANGED', 'HIS', 'MIND', 'IN', 'THAT', 'RESPECT', 'NOT', 'THINKING', 'IT', 'ADVISABLE', 'TO', 'ACQUAINT', 'THEM', 'WITH', 'WHAT', 'HAD', 'PASSED', 'BETWEEN', 'HIM', 'AND', 'THE', 'GIRL'] +5639-40744-0016-153: ref=['ON', 'THE', 'CONTRARY', 'HE', 'RESOLVED', 'TO', 'TELL', 'THEM', 'THAT', 'REPENTING', 'OF', 'HIS', 'VIOLENCE', 'AND', 'MOVED', 'BY', 'HER', 'TEARS', 'HE', 'HAD', 'ONLY', 'CARRIED', 'HER', 'HALF', 'WAY', 'TOWARDS', 'HIS', 'HOUSE', 'AND', 'THEN', 'LET', 'HER', 'GO'] +5639-40744-0016-153: hyp=['ON', 'THE', 'CONTRARY', 'HE', 'RESOLVED', 'TO', 'TELL', 'THEM', 'THAT', 'REPENTING', 'OF', 'HIS', 'VIOLENCE', 'AND', 'MOVED', 'BY', 'A', 'TEARS', 'HE', 'HAD', 'ONLY', 'CARRIED', 'HER', 'HALF', 'WAY', 'TOWARDS', 'HIS', 'HOUSE', 'AND', 'THEN', 'LET', 'HER', 'GO'] +5639-40744-0017-154: ref=['CHOKING', 'WITH', 'EMOTION', 'LEOCADI', 'MADE', 'A', 'SIGN', 'TO', 'HER', 'PARENTS', 'THAT', 'SHE', 'WISHED', 'TO', 'BE', 'ALONE', 'WITH', 'THEM'] +5639-40744-0017-154: hyp=['CHOKING', 'WITH', 'EMOTION', 'LOCATIA', 'MADE', 'A', 'SIGN', 'TO', 'HER', 'PARENTS', 'THAT', 'SHE', 'WISHED', 'TO', 'BE', 'ALONE', 'WITH', 'THEM'] +5639-40744-0018-155: ref=['THAT', 'WOULD', 'BE', 'VERY', 'WELL', 'MY', 'CHILD', 'REPLIED', 'HER', 'FATHER', 'IF', 'YOUR', 'PLAN', 'WERE', 'NOT', 'LIABLE', 'TO', 'BE', 'FRUSTRATED', 'BY', 'ORDINARY', 'CUNNING', 'BUT', 'NO', 'DOUBT', 'THIS', 'IMAGE', 'HAS', 'BEEN', 'ALREADY', 'MISSED', 'BY', 'ITS', 'OWNER', 'AND', 'HE', 'WILL', 'HAVE', 'SET', 'IT', 'DOWN', 'FOR', 'CERTAIN', 'THAT', 'IT', 'WAS', 'TAKEN', 'OUT', 'OF', 'THE', 'ROOM', 'BY', 'THE', 'PERSON', 'HE', 'LOCKED', 'UP', 'THERE'] +5639-40744-0018-155: hyp=['THAT', 'WOULD', 'BE', 'VERY', 'WELL', 'MY', 'CHILD', 'REPLIED', 'HER', 'FATHER', 'IF', 'YOUR', 'PLAN', 'WERE', 'NOT', 'LIABLE', 'TO', 'BE', 'FRUSTRATED', 'BY', 'ORDINARY', 'CUNNING', 'BUT', 'NO', 'DOUBT', 'THIS', 'IMAGE', 'HAD', 'BEEN', 'ALREADY', 'MISSED', 'BY', 'ITS', 'OWNER', 'AND', 'HE', 'WILL', 'HAVE', 'SET', 'IT', 'DOWN', 'FOR', 'CERTAIN', 'THAT', 'IT', 'WAS', 'TAKEN', 'OUT', 'OF', 'THE', 'ROOM', 'BY', 'THE', 'PERSON', 'HE', 'LOCKED', 'UP', 'THERE'] +5639-40744-0019-156: ref=['WHAT', 'YOU', 'HAD', 'BEST', 'DO', 'MY', 'CHILD', 'IS', 'TO', 'KEEP', 'IT', 'AND', 'PRAY', 'TO', 'IT', 'THAT', 'SINCE', 'IT', 'WAS', 'A', 'WITNESS', 'TO', 'YOUR', 'UNDOING', 'IT', 'WILL', 'DEIGN', 'TO', 'VINDICATE', 'YOUR', 'CAUSE', 'BY', 'ITS', 'RIGHTEOUS', 'JUDGMENT'] +5639-40744-0019-156: hyp=['WHAT', 'YOU', 'HAD', 'BEST', 'DO', 'MY', 'CHILD', 'IS', 'TO', 'KEEP', 'IT', 'AND', 'PRAY', 'TO', 'IT', 'THAT', 'SINS', 'IT', 'WAS', 'A', 'WITNESS', 'TO', 'YOUR', 'UNDOING', 'IT', 'WILL', 'DEIGN', 'TO', 'VINDICATE', 'YOUR', 'CAUSE', 'BY', 'ITS', 'RIGHTEOUS', 'JUDGMENT'] +5639-40744-0020-157: ref=['THUS', 'DID', 'THIS', 'HUMANE', 'AND', 'RIGHT', 'MINDED', 'FATHER', 'COMFORT', 'HIS', 'UNHAPPY', 'DAUGHTER', 'AND', 'HER', 'MOTHER', 'EMBRACING', 'HER', 'AGAIN', 'DID', 'ALL', 'SHE', 'COULD', 'TO', 'SOOTHE', 'HER', 'FEELINGS'] +5639-40744-0020-157: hyp=['THUS', 'DID', 'THE', 'HUMANE', 'AND', 'RIGHT', 'MINDED', 'FATHER', 'COMFORT', 'HIS', 'UNHAPPY', 'DAUGHTER', 'AND', 'HER', 'MOTHER', 'EMBRACING', 'HER', 'AGAIN', 'DID', 'ALL', 'SHE', 'COULD', 'TO', 'SOOTHE', 'THE', 'FEELINGS'] +5639-40744-0021-158: ref=['SHE', 'MEANWHILE', 'PASSED', 'HER', 'LIFE', 'WITH', 'HER', 'PARENTS', 'IN', 'THE', 'STRICTEST', 'RETIREMENT', 'NEVER', 'LETTING', 'HERSELF', 'BE', 'SEEN', 'BUT', 'SHUNNING', 'EVERY', 'EYE', 'LEST', 'IT', 'SHOULD', 'READ', 'HER', 'MISFORTUNE', 'IN', 'HER', 'FACE'] +5639-40744-0021-158: hyp=['SHE', 'MEANWHILE', 'PAST', 'HER', 'LIFE', 'WITH', 'HER', 'PARENTS', 'IN', 'THE', 'STRICTEST', 'RETIREMENT', 'NEVER', 'LETTING', 'HERSELF', 'BE', 'SEEN', 'BUT', 'SHUNNING', 'EVERY', 'EYE', 'LEST', 'IT', 'SHOULD', 'READ', 'HER', 'MISFORTUNE', 'IN', 'HER', 'FACE'] +5639-40744-0022-159: ref=['TIME', 'ROLLED', 'ON', 'THE', 'HOUR', 'OF', 'HER', 'DELIVERY', 'ARRIVED', 'IT', 'TOOK', 'PLACE', 'IN', 'THE', 'UTMOST', 'SECRECY', 'HER', 'MOTHER', 'TAKING', 'UPON', 'HER', 'THE', 'OFFICE', 'OF', 'MIDWIFE', 'AND', 'SHE', 'GAVE', 'BIRTH', 'TO', 'A', 'SON', 'ONE', 'OF', 'THE', 'MOST', 'BEAUTIFUL', 'EVER', 'SEEN'] +5639-40744-0022-159: hyp=['TIME', 'ROLLED', 'ON', 'THE', 'HOUR', 'OF', 'HER', 'DELIVERY', 'ARRIVED', 'IT', 'TOOK', 'PLACE', 'IN', 'THE', 'UTMOST', 'SECRECY', 'HER', 'MOTHER', 'TAKING', 'UP', 'ON', 'HER', 'THE', 'OFFICE', 'OF', 'MIDWIFE', 'AS', 'SHE', 'GAVE', 'BIRTH', 'TO', 'A', 'SON', 'ONE', 'OF', 'THE', 'MOST', 'BEAUTIFUL', 'EVER', 'SEEN'] +5639-40744-0023-160: ref=['WHEN', 'THE', 'BOY', 'WALKED', 'THROUGH', 'THE', 'STREETS', 'BLESSINGS', 'WERE', 'SHOWERED', 'UPON', 'HIM', 'BY', 'ALL', 'WHO', 'SAW', 'HIM', 'BLESSINGS', 'UPON', 'HIS', 'BEAUTY', 'UPON', 'THE', 'MOTHER', 'THAT', 'BORE', 'HIM', 'UPON', 'THE', 'FATHER', 'THAT', 'BEGOT', 'HIM', 'UPON', 'THOSE', 'WHO', 'BROUGHT', 'HIM', 'UP', 'SO', 'WELL'] +5639-40744-0023-160: hyp=['AND', 'THE', 'BOY', 'WALKED', 'THROUGH', 'THE', 'STREETS', 'BLESSINGS', 'WHERE', 'SHOWERED', 'UP', 'ON', 'HIM', 'BY', 'ALL', 'WHO', 'SAW', 'HIM', 'BLESSING', 'UPON', 'HIS', 'BEAUTY', 'UPON', 'THE', 'MOTHER', 'THAT', 'BORE', 'HIM', 'UPON', 'THE', 'FATHER', 'THAT', 'BEGOT', 'HIM', 'UPON', 'THOSE', 'WHO', 'BROUGHT', 'HIM', 'UP', 'SO', 'WELL'] +5639-40744-0024-161: ref=['ONE', 'DAY', 'WHEN', 'THE', 'BOY', 'WAS', 'SENT', 'BY', 'HIS', 'GRANDFATHER', 'WITH', 'A', 'MESSAGE', 'TO', 'A', 'RELATION', 'HE', 'PASSED', 'ALONG', 'A', 'STREET', 'IN', 'WHICH', 'THERE', 'WAS', 'A', 'GREAT', 'CONCOURSE', 'OF', 'HORSEMEN'] +5639-40744-0024-161: hyp=['ONE', 'DAY', 'WHEN', 'THE', 'BOY', 'WAS', 'SENT', 'BY', 'HIS', 'GRANDFATHER', 'WITH', 'A', 'MESSAGE', 'TO', 'A', 'RELATION', 'HE', 'PASSED', 'ALONG', 'A', 'STREET', 'IN', 'WHICH', 'THERE', 'WAS', 'A', 'GREAT', 'CONCOURSE', 'OF', 'HORSEMEN'] +5639-40744-0025-162: ref=['THE', 'BED', 'SHE', 'TOO', 'WELL', 'REMEMBERED', 'WAS', 'THERE', 'AND', 'ABOVE', 'ALL', 'THE', 'CABINET', 'ON', 'WHICH', 'HAD', 'STOOD', 'THE', 'IMAGE', 'SHE', 'HAD', 'TAKEN', 'AWAY', 'WAS', 'STILL', 'ON', 'THE', 'SAME', 'SPOT'] +5639-40744-0025-162: hyp=['THE', 'BED', 'SHE', 'TOO', 'WELL', 'REMEMBERED', 'WAS', 'THERE', 'AND', 'ABOVE', 'ALL', 'THE', 'CABINET', 'ON', 'WHICH', 'HAD', 'STOOD', 'THE', 'IMAGE', 'SHE', 'HAD', 'TAKEN', 'AWAY', 'WAS', 'STILL', 'ON', 'THE', 'SAME', 'SPOT'] +5639-40744-0026-163: ref=['LUIS', 'WAS', 'OUT', 'OF', 'DANGER', 'IN', 'A', 'FORTNIGHT', 'IN', 'A', 'MONTH', 'HE', 'ROSE', 'FROM', 'HIS', 'BED', 'AND', 'DURING', 'ALL', 'THAT', 'TIME', 'HE', 'WAS', 'VISITED', 'DAILY', 'BY', 'HIS', 'MOTHER', 'AND', 'GRANDMOTHER', 'AND', 'TREATED', 'BY', 'THE', 'MASTER', 'AND', 'MISTRESS', 'OF', 'THE', 'HOUSE', 'AS', 'IF', 'HE', 'WAS', 'THEIR', 'OWN', 'CHILD'] +5639-40744-0026-163: hyp=['LOUIS', 'WAS', 'OUT', 'OF', 'DANGER', 'IN', 'A', 'FORTNIGHT', 'IN', 'A', 'MONTH', 'HE', 'ROSE', 'FROM', 'HIS', 'BED', 'AND', 'DREWING', 'ALL', 'THAT', 'TIME', 'HE', 'WAS', 'VISITED', 'DAILY', 'BY', 'HIS', 'MOTHER', 'AND', 'GRANDMOTHER', 'AND', 'TREATED', 'BY', 'THE', 'MASTER', 'AND', 'MISTRESS', 'OF', 'THE', 'HOUSE', 'AS', 'IF', 'HE', 'WAS', 'THEIR', 'OWN', 'CHILD'] +5639-40744-0027-164: ref=['THUS', 'SAYING', 'AND', 'PRESSING', 'THE', 'CRUCIFIX', 'TO', 'HER', 'BREAST', 'SHE', 'FELL', 'FAINTING', 'INTO', 'THE', 'ARMS', 'OF', 'DONA', 'ESTAFANIA', 'WHO', 'AS', 'A', 'GENTLEWOMAN', 'TO', 'WHOSE', 'SEX', 'PITY', 'IS', 'AS', 'NATURAL', 'AS', 'CRUELTY', 'IS', 'TO', 'MAN', 'INSTANTLY', 'PRESSED', 'HER', 'LIPS', 'TO', 'THOSE', 'OF', 'THE', 'FAINTING', 'GIRL', 'SHEDDING', 'OVER', 'HER', 'SO', 'MANY', 'TEARS', 'THAT', 'THERE', 'NEEDED', 'NO', 'OTHER', 'SPRINKLING', 'OF', 'WATER', 'TO', 'RECOVER', 'LEOCADIA', 'FROM', 'HER', 'SWOON'] +5639-40744-0027-164: hyp=['THUS', 'SAYING', 'AND', 'PRESSING', 'THE', 'CRUCIFIX', 'TO', 'HER', 'BREAST', 'SHE', 'FELL', 'FAINTING', 'INTO', 'THE', 'ARMS', 'OF', 'DONA', 'ESTAFANIA', 'WHO', 'AS', 'A', 'GENTLEWOMAN', 'TO', 'WHOSE', 'SEX', 'PITY', 'IS', 'A', 'NATURAL', 'AS', 'CRUELTY', 'AS', 'TO', 'MAN', 'INSTANTLY', 'PRESSED', 'HER', 'LIPS', 'TO', 'THOSE', 'OF', 'THE', 'FAINTING', 'GIRL', 'SHEDDING', 'OVER', 'HER', 'SO', 'MANY', 'TEARS', 'THAT', 'THERE', 'NEEDED', 'NO', 'OTHER', 'SPRINKLING', 'OF', 'WATER', 'TO', 'RECOVER', 'LOCATIA', 'FROM', 'HER', 'SWOON'] +5639-40744-0028-165: ref=['I', 'HAVE', 'GREAT', 'THINGS', 'TO', 'TELL', 'YOU', 'SENOR', 'SAID', 'DONA', 'ESTAFANIA', 'TO', 'HER', 'HUSBAND', 'THE', 'CREAM', 'AND', 'SUBSTANCE', 'OF', 'WHICH', 'IS', 'THIS', 'THE', 'FAINTING', 'GIRL', 'BEFORE', 'YOU', 'IS', 'YOUR', 'DAUGHTER', 'AND', 'THAT', 'BOY', 'IS', 'YOUR', 'GRANDSON'] +5639-40744-0028-165: hyp=['I', 'HAVE', 'GREAT', 'THINGS', 'TO', 'TELL', 'YOU', 'SENOR', 'SAID', 'DORIS', 'DA', 'FANIA', 'TO', 'HER', 'HUSBAND', 'THE', 'CREAM', 'AND', 'SUBSTANCE', 'OF', 'WHICH', 'IS', 'THIS', 'THE', 'FAINTING', 'GIRL', 'BEFORE', 'YOU', 'IS', 'YOUR', 'DAUGHTER', 'AND', 'THE', 'BOY', 'IS', 'YOUR', 'GRANDSON'] +5639-40744-0029-166: ref=['THIS', 'TRUTH', 'WHICH', 'I', 'HAVE', 'LEARNED', 'FROM', 'HER', 'LIPS', 'IS', 'CONFIRMED', 'BY', 'HIS', 'FACE', 'IN', 'WHICH', 'WE', 'HAVE', 'BOTH', 'BEHELD', 'THAT', 'OF', 'OUR', 'SON'] +5639-40744-0029-166: hyp=['THIS', 'TRUTH', 'WHICH', 'I', 'HAVE', 'LEARNED', 'FROM', 'HER', 'LIPS', 'IS', 'CONFIRMED', 'BY', 'HIS', 'FACE', 'IN', 'WHICH', 'WE', 'HAVE', 'BOTH', 'BEHELD', 'THAT', 'OF', 'OUR', 'SON'] +5639-40744-0030-167: ref=['JUST', 'THEN', 'LEOCADIA', 'CAME', 'TO', 'HERSELF', 'AND', 'EMBRACING', 'THE', 'CROSS', 'SEEMED', 'CHANGED', 'INTO', 'A', 'SEA', 'OF', 'TEARS', 'AND', 'THE', 'GENTLEMAN', 'REMAINED', 'IN', 'UTTER', 'BEWILDERMENT', 'UNTIL', 'HIS', 'WIFE', 'HAD', 'REPEATED', 'TO', 'HIM', 'FROM', 'BEGINNING', 'TO', 'END', "LEOCADIA'S", 'WHOLE', 'STORY', 'AND', 'HE', 'BELIEVED', 'IT', 'THROUGH', 'THE', 'BLESSED', 'DISPENSATION', 'OF', 'HEAVEN', 'WHICH', 'HAD', 'CONFIRMED', 'IT', 'BY', 'SO', 'MANY', 'CONVINCING', 'TESTIMONIES'] +5639-40744-0030-167: hyp=['JUST', 'THEN', 'LOCATIA', 'CAME', 'TO', 'HERSELF', 'AND', 'EMBRACING', 'THE', 'CROSS', 'SEEMED', 'CHANGED', 'INTO', 'A', 'SEA', 'OF', 'TEARS', 'AND', 'THE', 'GENTLEMAN', 'REMAINING', 'IN', 'OUT', 'OF', 'A', 'WILDERMENT', 'UNTIL', 'HIS', 'WIFE', 'HAD', 'REPEATED', 'TO', 'HIM', 'FROM', 'BEGINNING', 'TO', 'END', 'LOCATEOUS', 'WHOLE', 'STORY', 'AND', 'HE', 'BELIEVED', 'IT', 'THROUGH', 'THE', 'BLESSED', 'DISPENSATION', 'OF', 'HEAVEN', 'WHICH', 'HAD', 'CONFIRMED', 'IT', 'BY', 'SO', 'MANY', 'CONVINCING', 'TESTIMONIES'] +5639-40744-0031-168: ref=['SO', 'PERSUASIVE', 'WERE', 'HER', 'ENTREATIES', 'AND', 'SO', 'STRONG', 'HER', 'ASSURANCES', 'THAT', 'NO', 'HARM', 'WHATEVER', 'COULD', 'RESULT', 'TO', 'THEM', 'FROM', 'THE', 'INFORMATION', 'SHE', 'SOUGHT', 'THEY', 'WERE', 'INDUCED', 'TO', 'CONFESS', 'THAT', 'ONE', "SUMMER'S", 'NIGHT', 'THE', 'SAME', 'SHE', 'HAD', 'MENTIONED', 'THEMSELVES', 'AND', 'ANOTHER', 'FRIEND', 'BEING', 'OUT', 'ON', 'A', 'STROLL', 'WITH', 'RODOLFO', 'THEY', 'HAD', 'BEEN', 'CONCERNED', 'IN', 'THE', 'ABDUCTION', 'OF', 'A', 'GIRL', 'WHOM', 'RODOLFO', 'CARRIED', 'OFF', 'WHILST', 'THE', 'REST', 'OF', 'THEM', 'DETAINED', 'HER', 'FAMILY', 'WHO', 'MADE', 'A', 'GREAT', 'OUTCRY', 'AND', 'WOULD', 'HAVE', 'DEFENDED', 'HER', 'IF', 'THEY', 'COULD'] +5639-40744-0031-168: hyp=['SO', 'PERSUASIVE', 'WERE', 'HER', 'ENTREATIES', 'AND', 'SO', 'STRONG', 'HER', 'ASSURANCES', 'THAT', 'NO', 'HARM', 'WHATEVER', 'COULD', 'RESULT', 'TO', 'THEM', 'FROM', 'THE', 'INFORMATION', 'SHE', 'SOUGHT', 'THEY', 'WERE', 'INDUCED', 'TO', 'CONFESS', 'THAT', 'ONE', "SUMMER'S", 'NIGHT', 'THE', 'SAME', 'SHE', 'HAD', 'MENTIONED', 'THEMSELVES', 'AND', 'ANOTHER', 'FRIEND', 'BEING', 'OUT', 'ON', 'THE', 'STROLL', 'WITH', 'RUDOLPHO', 'THEY', 'HAD', 'BEEN', 'CONCERNED', 'IN', 'THE', 'ABDUCTION', 'OF', 'A', 'GIRL', 'WHOM', 'RUDOLPHO', 'CARRIED', 'OFF', 'WHILST', 'THE', 'REST', 'OF', 'THEM', 'DETAINED', 'HER', 'FAMILY', 'WHO', 'MADE', 'A', 'GREAT', 'OUTCRY', 'AND', 'WOULD', 'HAVE', 'DEFENDED', 'HER', 'IF', 'THEY', 'COULD'] +5639-40744-0032-169: ref=['FOR', "GOD'S", 'SAKE', 'MY', 'LADY', 'MOTHER', 'GIVE', 'ME', 'A', 'WIFE', 'WHO', 'WOULD', 'BE', 'AN', 'AGREEABLE', 'COMPANION', 'NOT', 'ONE', 'WHO', 'WILL', 'DISGUST', 'ME', 'SO', 'THAT', 'WE', 'MAY', 'BOTH', 'BEAR', 'EVENLY', 'AND', 'WITH', 'MUTUAL', 'GOOD', 'WILL', 'THE', 'YOKE', 'IMPOSED', 'ON', 'US', 'BY', 'HEAVEN', 'INSTEAD', 'OF', 'PULLING', 'THIS', 'WAY', 'AND', 'THAT', 'WAY', 'AND', 'FRETTING', 'EACH', 'OTHER', 'TO', 'DEATH'] +5639-40744-0032-169: hyp=['FOR', "GOD'S", 'SAKE', 'MY', 'LADY', 'MOTHER', 'GIVE', 'ME', 'A', 'WIFE', 'WHO', 'WILL', 'BE', 'AN', 'AGREEABLE', 'COMPANION', 'NOT', 'ONE', 'WHO', 'WILL', 'DISGUST', 'ME', 'SO', 'THAT', 'WE', 'MAY', 'BOTH', 'BEAR', 'EVENLY', 'AND', 'WITH', 'MUTUAL', 'GOOD', 'WILL', 'THE', 'YOKE', 'AND', 'POST', 'ON', 'US', 'BY', 'HEAVEN', 'INSTEAD', 'OF', 'PULLING', 'THIS', 'WAY', 'AND', 'THAT', 'WAY', 'AND', 'FRETTING', 'EACH', 'OTHER', 'TO', 'DEATH'] +5639-40744-0033-170: ref=['HER', 'BEARING', 'WAS', 'GRACEFUL', 'AND', 'ANIMATED', 'SHE', 'LED', 'HER', 'SON', 'BY', 'THE', 'HAND', 'AND', 'BEFORE', 'HER', 'WALKED', 'TWO', 'MAIDS', 'WITH', 'WAX', 'LIGHTS', 'AND', 'SILVER', 'CANDLESTICKS'] +5639-40744-0033-170: hyp=['HER', 'BEARING', 'WAS', 'GRACEFUL', 'AND', 'ANIMATED', 'SHE', 'LED', 'HER', 'SON', 'BY', 'THE', 'HAND', 'AND', 'BEFORE', 'HER', 'WALKED', 'TWO', 'MAIDS', 'WITH', 'WAX', 'LIGHTS', 'AND', 'SILVER', 'CANDLESTICKS'] +5639-40744-0034-171: ref=['ALL', 'ROSE', 'TO', 'DO', 'HER', 'REVERENCE', 'AS', 'IF', 'SOMETHING', 'FROM', 'HEAVEN', 'HAD', 'MIRACULOUSLY', 'APPEARED', 'BEFORE', 'THEM', 'BUT', 'GAZING', 'ON', 'HER', 'ENTRANCED', 'WITH', 'ADMIRATION', 'NOT', 'ONE', 'OF', 'THEM', 'WAS', 'ABLE', 'TO', 'ADDRESS', 'A', 'SINGLE', 'WORD', 'TO', 'HER'] +5639-40744-0034-171: hyp=['ALL', 'ROSE', 'TO', 'DO', 'HER', 'REVERENCE', 'AS', 'IF', 'SOMETHING', 'FROM', 'HEAVEN', 'HAD', 'MIRACULOUSLY', 'APPEARED', 'BEFORE', 'THEM', 'BUT', 'GAZING', 'ON', 'HER', 'AND', 'TRANCED', 'WITH', 'ADMIRATION', 'NOT', 'ONE', 'OF', 'THEM', 'WAS', 'ABLE', 'TO', 'ADDRESS', 'A', 'SINGLE', 'WORD', 'TO', 'HER'] +5639-40744-0035-172: ref=['SHE', 'REFLECTED', 'HOW', 'NEAR', 'SHE', 'STOOD', 'TO', 'THE', 'CRISIS', 'WHICH', 'WAS', 'TO', 'DETERMINE', 'WHETHER', 'SHE', 'WAS', 'TO', 'BE', 'BLESSED', 'OR', 'UNHAPPY', 'FOR', 'EVER', 'AND', 'RACKED', 'BY', 'THE', 'INTENSITY', 'OF', 'HER', 'EMOTIONS', 'SHE', 'SUDDENLY', 'CHANGED', 'COLOUR', 'HER', 'HEAD', 'DROPPED', 'AND', 'SHE', 'FELL', 'FORWARD', 'IN', 'A', 'SWOON', 'INTO', 'THE', 'ARMS', 'OF', 'THE', 'DISMAYED', 'ESTAFANIA'] +5639-40744-0035-172: hyp=['SHE', 'REFLECTED', 'HOW', 'NEAR', 'SHE', 'STOOD', 'TO', 'THE', 'CRISIS', 'WHICH', 'WAS', 'TO', 'DETERMINE', 'WHETHER', 'SHE', 'WAS', 'TO', 'BE', 'BLESSED', 'OR', 'UNHAPPY', 'FOR', 'EVER', 'AND', 'RACKED', 'BY', 'THE', 'INTENSITY', 'OF', 'HER', 'EMOTIONS', 'SHE', 'SUDDENLY', 'CHANGED', 'COLOR', 'HER', 'HEAD', 'DROPPED', 'AND', 'SHE', 'FELL', 'FORWARD', 'IN', 'A', 'SWOON', 'INTO', 'THE', 'ARMS', 'OF', 'THE', 'DISMAYED', 'STEFFANIA'] +5639-40744-0036-173: ref=['HIS', 'MOTHER', 'HAD', 'LEFT', 'HER', 'TO', 'HIM', 'AS', 'BEING', 'HER', 'DESTINED', 'PROTECTOR', 'BUT', 'WHEN', 'SHE', 'SAW', 'THAT', 'HE', 'TOO', 'WAS', 'INSENSIBLE', 'SHE', 'WAS', 'NEAR', 'MAKING', 'A', 'THIRD', 'AND', 'WOULD', 'HAVE', 'DONE', 'SO', 'HAD', 'HE', 'NOT', 'COME', 'TO', 'HIMSELF'] +5639-40744-0036-173: hyp=['HIS', 'MOTHER', 'HAD', 'LEFT', 'HER', 'TO', 'HIM', 'AS', 'BEING', 'HER', 'DESTINED', 'PROTECTOR', 'BUT', 'WHEN', 'SHE', 'SAW', 'THAT', 'HE', 'TOO', 'WAS', 'INSENSIBLE', 'SHE', 'WAS', 'NEAR', 'MAKING', 'A', 'THIRD', 'AND', 'WOULD', 'HAVE', 'DONE', 'SO', 'HAD', 'HE', 'NOT', 'COME', 'TO', 'HIMSELF'] +5639-40744-0037-174: ref=['KNOW', 'THEN', 'SON', 'OF', 'MY', 'HEART', 'THAT', 'THIS', 'FAINTING', 'LADY', 'IS', 'YOUR', 'REAL', 'BRIDE', 'I', 'SAY', 'REAL', 'BECAUSE', 'SHE', 'IS', 'THE', 'ONE', 'WHOM', 'YOUR', 'FATHER', 'AND', 'I', 'HAVE', 'CHOSEN', 'FOR', 'YOU', 'AND', 'THE', 'PORTRAIT', 'WAS', 'A', 'PRETENCE'] +5639-40744-0037-174: hyp=['KNOW', 'THEN', 'SON', 'OF', 'MY', 'HEART', 'THAT', 'THIS', 'FAINTING', 'LADY', 'IS', 'YOUR', 'REAL', 'BRIDE', 'I', 'SAY', 'REAL', 'BECAUSE', 'SHE', 'IS', 'THE', 'ONE', 'WHOM', 'YOUR', 'FATHER', 'AND', 'I', 'HAVE', 'CHOSEN', 'FOR', 'YOU', 'AND', 'A', 'PORTRAIT', 'WAS', 'A', 'PRETENCE'] +5639-40744-0038-175: ref=['JUST', 'AT', 'THE', 'MOMENT', 'WHEN', 'THE', 'TEARS', 'OF', 'THE', 'PITYING', 'BEHOLDERS', 'FLOWED', 'FASTEST', 'AND', 'THEIR', 'EJACULATIONS', 'WERE', 'MOST', 'EXPRESSIVE', 'OF', 'DESPAIR', 'LEOCADIA', 'GAVE', 'SIGNS', 'OF', 'RECOVERY', 'AND', 'BROUGHT', 'BACK', 'GLADNESS', 'TO', 'THE', 'HEARTS', 'OF', 'ALL'] +5639-40744-0038-175: hyp=['JUST', 'AT', 'A', 'MOMENT', 'WHEN', 'THE', 'TEARS', 'OF', 'THE', 'PITYING', 'BEHOLDERS', 'FLOWED', 'FASTEST', 'AND', 'THERE', 'EJACULATIONS', 'WERE', 'MOST', 'EXPRESSIVE', 'OF', 'DESPAIR', 'LE', 'OCCADIA', 'GAVE', 'SIGNS', 'OF', 'RECOVERY', 'AND', 'BROUGHT', 'BACK', 'GLADNESS', 'THROUGH', 'THE', 'HEARTS', 'OF', 'ALL'] +5639-40744-0039-176: ref=['WHEN', 'SHE', 'CAME', 'TO', 'HER', 'SENSES', 'AND', 'BLUSHING', 'TO', 'FIND', 'HERSELF', 'IN', "RODOLFO'S", 'ARMS', 'WOULD', 'HAVE', 'DISENGAGED', 'HERSELF', 'NO', 'SENORA', 'HE', 'SAID', 'THAT', 'MUST', 'NOT', 'BE', 'STRIVE', 'NOT', 'TO', 'WITHDRAW', 'FROM', 'THE', 'ARMS', 'OF', 'HIM', 'WHO', 'HOLDS', 'YOU', 'IN', 'HIS', 'SOUL'] +5639-40744-0039-176: hyp=['WHEN', 'SHE', 'CAME', 'TO', 'HER', 'SENSES', 'AND', 'BLUSHING', 'TO', 'FIND', 'HERSELF', 'IN', "RIDOLPH'S", 'ARMS', 'WOULD', 'HAVE', 'DISENGAGED', 'HERSELF', 'NO', 'SENORA', 'HE', 'SAID', 'THAT', 'MUST', 'NOT', 'BE', 'STRIVE', 'NOT', 'TO', 'WITHDRAW', 'FROM', 'THE', 'ARMS', 'OF', 'HIM', 'WHO', 'HOLDS', 'YOU', 'IN', 'HIS', 'SOUL'] +5639-40744-0040-177: ref=['THIS', 'WAS', 'DONE', 'FOR', 'THE', 'EVENT', 'TOOK', 'PLACE', 'AT', 'A', 'TIME', 'WHEN', 'THE', 'CONSENT', 'OF', 'THE', 'PARTIES', 'WAS', 'SUFFICIENT', 'FOR', 'THE', 'CELEBRATION', 'OF', 'A', 'MARRIAGE', 'WITHOUT', 'ANY', 'OF', 'THE', 'PRELIMINARY', 'FORMALITIES', 'WHICH', 'ARE', 'NOW', 'SO', 'PROPERLY', 'REQUIRED'] +5639-40744-0040-177: hyp=['THIS', 'WAS', 'DONE', 'FOR', 'THE', 'EVENT', 'TOOK', 'PLACE', 'AT', 'A', 'TIME', 'WHEN', 'THE', 'CONSENT', 'OF', 'THE', 'PARTIES', 'WAS', 'SUFFICIENT', 'FOR', 'THE', 'CELEBRATION', 'OF', 'THE', 'MARRIAGE', 'WITHOUT', 'ANY', 'OF', 'THE', 'PRELIMINARY', 'FORMALITIES', 'WHICH', 'ARE', 'NOW', 'SO', 'PROPERLY', 'REQUIRED'] +5639-40744-0041-178: ref=['NOR', 'WAS', 'RODOLFO', 'LESS', 'SURPRISED', 'THAN', 'THEY', 'AND', 'THE', 'BETTER', 'TO', 'ASSURE', 'HIMSELF', 'OF', 'SO', 'WONDERFUL', 'A', 'FACT', 'HE', 'BEGGED', 'LEOCADIA', 'TO', 'GIVE', 'HIM', 'SOME', 'TOKEN', 'WHICH', 'SHOULD', 'MAKE', 'PERFECTLY', 'CLEAR', 'TO', 'HIM', 'THAT', 'WHICH', 'INDEED', 'HE', 'DID', 'NOT', 'DOUBT', 'SINCE', 'IT', 'WAS', 'AUTHENTICATED', 'BY', 'HIS', 'PARENTS'] +5639-40744-0041-178: hyp=['NOR', 'WAS', 'RUDOLPHAL', 'LESS', 'SURPRISED', 'THAN', 'THEY', 'AND', 'A', 'BETTER', 'TO', 'ASSURE', 'HIMSELF', 'OF', 'SO', 'WONDERFUL', 'A', 'FACT', 'HE', 'BEGGED', 'LOCATIA', 'TO', 'GIVE', 'HIM', 'SOME', 'TOKEN', 'WHICH', 'SHOULD', 'MAKE', 'PERFECTLY', 'CLEAR', 'TO', 'HIM', 'THAT', 'WHICH', 'INDEED', 'HE', 'DID', 'NOT', 'DOUBT', 'SINCE', 'IT', 'WAS', 'AUTHENTICATED', 'BY', 'HIS', 'PARENTS'] +5683-32865-0000-2483: ref=['YOU', 'KNOW', 'CAPTAIN', 'LAKE'] +5683-32865-0000-2483: hyp=['YOU', 'KNOW', 'CAPTAIN', 'LAKE'] +5683-32865-0001-2484: ref=['SAID', 'LORD', 'CHELFORD', 'ADDRESSING', 'ME'] +5683-32865-0001-2484: hyp=['SAID', 'LORD', 'CHELFORD', 'ADDRESSING', 'ME'] +5683-32865-0002-2485: ref=['HE', 'HAD', 'HIS', 'HAND', 'UPON', "LAKE'S", 'SHOULDER'] +5683-32865-0002-2485: hyp=['HE', 'HAD', 'HIS', 'HAND', 'UPON', "LAKE'S", 'SHOULDER'] +5683-32865-0003-2486: ref=['THEY', 'ARE', 'COUSINS', 'YOU', 'KNOW', 'WE', 'ARE', 'ALL', 'COUSINS'] +5683-32865-0003-2486: hyp=['THEY', 'ARE', 'COUSINS', 'YOU', 'KNOW', 'WE', 'ARE', 'ALL', 'COUSINS'] +5683-32865-0004-2487: ref=['WHATEVER', 'LORD', 'CHELFORD', 'SAID', 'MISS', 'BRANDON', 'RECEIVED', 'IT', 'VERY', 'GRACIOUSLY', 'AND', 'EVEN', 'WITH', 'A', 'MOMENTARY', 'SMILE'] +5683-32865-0004-2487: hyp=['WHATEVER', 'LORD', 'CHELFORD', 'SAID', 'MISS', 'BRANDON', 'RECEIVED', 'IT', 'VERY', 'GRACIOUSLY', 'AND', 'EVEN', 'WITH', 'A', 'MOMENTARY', 'SMILE'] +5683-32865-0005-2488: ref=['BUT', 'HER', 'GREETING', 'TO', 'CAPTAIN', 'LAKE', 'WAS', 'MORE', 'THAN', 'USUALLY', 'HAUGHTY', 'AND', 'FROZEN', 'AND', 'HER', 'FEATURES', 'I', 'FANCIED', 'PARTICULARLY', 'PROUD', 'AND', 'PALE'] +5683-32865-0005-2488: hyp=['BUT', 'HER', 'GREETING', 'TO', 'CAPTAIN', 'LEAK', 'WAS', 'MORE', 'THAN', 'USUALLY', 'HAUGHTY', 'AND', 'FROZEN', 'AND', 'HER', 'FEATURES', 'I', 'FANCIED', 'PARTICULARLY', 'PROUD', 'AND', 'PALE'] +5683-32865-0006-2489: ref=['AT', 'DINNER', 'LAKE', 'WAS', 'EASY', 'AND', 'AMUSING'] +5683-32865-0006-2489: hyp=['AT', 'DINNER', 'LAKE', 'WAS', 'EASY', 'AND', 'AMUSING'] +5683-32865-0007-2490: ref=["I'M", 'GLAD', 'YOU', 'LIKE', 'IT', 'SAYS', 'WYLDER', 'CHUCKLING', 'BENIGNANTLY', 'ON', 'IT', 'OVER', 'HIS', 'SHOULDER'] +5683-32865-0007-2490: hyp=['I', 'AM', 'GLAD', 'YOU', 'LIKE', 'IT', 'SAYS', 'WYLDER', 'CHUCKLING', 'BENIGNANTLY', 'ON', 'IT', 'OVER', 'HIS', 'SHOULDER'] +5683-32865-0008-2491: ref=['I', 'BELIEVE', 'I', 'HAVE', 'A', 'LITTLE', 'TASTE', 'THAT', 'WAY', 'THOSE', 'ARE', 'ALL', 'REAL', 'YOU', 'KNOW', 'THOSE', 'JEWELS'] +5683-32865-0008-2491: hyp=['I', 'BELIEVE', 'I', 'HAVE', 'A', 'LITTLE', 'TASTE', 'THAT', 'WAY', 'THOSE', 'ARE', 'ALL', 'REAL', 'YOU', 'KNOW', 'THOSE', 'JEWELS'] +5683-32865-0009-2492: ref=['AND', 'HE', 'PLACED', 'IT', 'IN', 'THAT', "GENTLEMAN'S", 'FINGERS', 'WHO', 'NOW', 'TOOK', 'HIS', 'TURN', 'AT', 'THE', 'LAMP', 'AND', 'CONTEMPLATED', 'THE', 'LITTLE', 'PARALLELOGRAM', 'WITH', 'A', 'GLEAM', 'OF', 'SLY', 'AMUSEMENT'] +5683-32865-0009-2492: hyp=['AND', 'HE', 'PLACED', 'IT', 'IN', 'THAT', "GENTLEMAN'S", 'FINGERS', 'WHO', 'NOW', 'TOOK', 'HIS', 'TURN', 'AT', 'THE', 'LAMP', 'AND', 'CONTEMPLATED', 'THE', 'LITTLE', 'PARALLELLOGRAM', 'WITH', 'A', 'GLEAM', 'OF', 'SLY', 'AMUSEMENT'] +5683-32865-0010-2493: ref=['I', 'WAS', 'THINKING', "IT'S", 'VERY', 'LIKE', 'THE', 'ACE', 'OF', 'HEARTS', 'ANSWERED', 'THE', 'CAPTAIN', 'SOFTLY', 'SMILING', 'ON'] +5683-32865-0010-2493: hyp=['I', 'WAS', 'THINKING', "IT'S", 'VERY', 'LIKE', 'THE', 'ACE', 'OF', 'HEARTS', 'ANSWERED', 'THE', 'CAPTAIN', 'SOFTLY', 'SMILING', 'ON'] +5683-32865-0011-2494: ref=['WHEREUPON', 'LAKE', 'LAUGHED', 'QUIETLY', 'STILL', 'LOOKING', 'ON', 'THE', 'ACE', 'OF', 'HEARTS', 'WITH', 'HIS', 'SLY', 'EYES'] +5683-32865-0011-2494: hyp=['WHEREUPON', 'LAKE', 'LAUGHED', 'QUIETLY', 'STILL', 'LOOKING', 'ON', 'THE', 'ACE', 'OF', 'HEARTS', 'WITH', 'HIS', 'SLY', 'EYES'] +5683-32865-0012-2495: ref=['AND', 'WYLDER', 'LAUGHED', 'TOO', 'MORE', 'SUDDENLY', 'AND', 'NOISILY', 'THAN', 'THE', 'HUMOUR', 'OF', 'THE', 'JOKE', 'SEEMED', 'QUITE', 'TO', 'CALL', 'FOR', 'AND', 'GLANCED', 'A', 'GRIM', 'LOOK', 'FROM', 'THE', 'CORNERS', 'OF', 'HIS', 'EYES', 'ON', 'LAKE', 'BUT', 'THE', 'GALLANT', 'CAPTAIN', 'DID', 'NOT', 'SEEM', 'TO', 'PERCEIVE', 'IT', 'AND', 'AFTER', 'A', 'FEW', 'SECONDS', 'MORE', 'HE', 'HANDED', 'IT', 'VERY', 'INNOCENTLY', 'BACK', 'TO', 'MISSUS', 'DOROTHY', 'ONLY', 'REMARKING'] +5683-32865-0012-2495: hyp=['AND', 'WHILE', 'THEIR', 'LEFT', 'TOO', 'MORE', 'SUDDENLY', 'AND', 'NOISILY', 'THAN', 'THE', 'HUMOR', 'OF', 'THE', 'JOKE', 'SEEMED', 'QUITE', 'TO', 'CALL', 'FOR', 'AND', 'GLANCED', 'A', 'GRIM', 'LOOK', 'FROM', 'THE', 'CORNERS', 'OF', 'HIS', 'EYES', 'UNLIKE', 'BUT', 'THE', 'GALLANT', 'CAPTAIN', 'DID', 'NOT', 'SEEM', 'TO', 'PERCEIVE', 'IT', 'AND', 'AFTER', 'A', 'FEW', 'SECONDS', 'MORE', 'HE', 'HANDED', 'IT', 'VERY', 'INNOCENTLY', 'BACK', 'TO', 'MISSUS', 'DOROTHY', 'ONLY', 'REMARKING'] +5683-32865-0013-2496: ref=['DO', 'YOU', 'KNOW', 'LAKE', 'OH', 'I', 'REALLY', "CAN'T", 'TELL', 'BUT', "HE'LL", 'SOON', 'TIRE', 'OF', 'COUNTRY', 'LIFE'] +5683-32865-0013-2496: hyp=['DO', 'YOU', 'KNOW', 'LAKE', 'OH', 'I', 'REALLY', "CAN'T", 'TELL', 'BUT', "HE'LL", 'SOON', 'TIRE', 'OF', 'COUNTRY', 'LIFE'] +5683-32865-0014-2497: ref=["HE'S", 'NOT', 'A', 'MAN', 'FOR', 'COUNTRY', 'QUARTERS'] +5683-32865-0014-2497: hyp=["HE'S", 'NOT', 'A', 'MAN', 'FOR', 'COUNTRY', 'QUARTERS'] +5683-32865-0015-2498: ref=['I', 'HAD', 'A', 'HORRID', 'DREAM', 'ABOUT', 'HIM', 'LAST', 'NIGHT', 'THAT'] +5683-32865-0015-2498: hyp=['I', 'HAD', 'A', 'HORRID', 'DREAM', 'ABOUT', 'HIM', 'LAST', 'NIGHT', 'THAT'] +5683-32865-0016-2499: ref=['OH', 'I', 'KNOW', "THAT'S", 'LORNE', 'BRANDON'] +5683-32865-0016-2499: hyp=['OH', 'I', 'KNOW', "THAT'S", 'LORN', 'BRANDON'] +5683-32865-0017-2500: ref=['ALL', 'THE', 'TIME', 'HE', 'WAS', 'TALKING', 'TO', 'ME', 'HIS', 'ANGRY', 'LITTLE', 'EYES', 'WERE', 'FOLLOWING', 'LAKE'] +5683-32865-0017-2500: hyp=['ALL', 'THE', 'TIME', 'HE', 'WAS', 'TALKING', 'TO', 'ME', 'HIS', 'ANGRY', 'LITTLE', 'EYES', 'WERE', 'FOLLOWING', 'LAKE'] +5683-32866-0000-2527: ref=['MISS', 'LAKE', 'DECLINED', 'THE', 'CARRIAGE', 'TO', 'NIGHT'] +5683-32866-0000-2527: hyp=['MISS', 'LAKE', 'DECLINED', 'THE', 'CARRIAGE', 'TO', 'NIGHT'] +5683-32866-0001-2528: ref=['AND', 'HE', 'ADDED', 'SOMETHING', 'STILL', 'LESS', 'COMPLIMENTARY'] +5683-32866-0001-2528: hyp=['AND', 'HE', 'ADDED', 'SOME', 'THINGS', 'STILL', 'LESS', 'COMPLIMENTARY'] +5683-32866-0002-2529: ref=['BUT', "DON'T", 'THESE', 'VERY', 'WISE', 'THINGS', 'SOMETIMES', 'TURN', 'OUT', 'VERY', 'FOOLISHLY'] +5683-32866-0002-2529: hyp=['BUT', "DON'T", 'THESE', 'VERY', 'WISE', 'THINGS', 'SOMETIMES', 'TURN', 'OUT', 'VERY', 'FOOLISHLY'] +5683-32866-0003-2530: ref=['IN', 'THE', 'MEANTIME', 'I', 'HAD', 'FORMED', 'A', 'NEW', 'IDEA', 'OF', 'HER'] +5683-32866-0003-2530: hyp=['IN', 'THE', 'MEANTIME', 'I', 'HAD', 'FORMED', 'A', 'NEW', 'IDEA', 'OF', 'HER'] +5683-32866-0004-2531: ref=['BY', 'THIS', 'TIME', 'LORD', 'CHELFORD', 'AND', 'WYLDER', 'RETURNED', 'AND', 'DISGUSTED', 'RATHER', 'WITH', 'MYSELF', 'I', 'RUMINATED', 'ON', 'MY', 'WANT', 'OF', 'GENERAL', 'SHIP'] +5683-32866-0004-2531: hyp=['BY', 'THIS', 'TIME', 'LORD', 'CHELFORD', 'AND', 'WYLDER', 'RETURNED', 'AND', 'DISGUSTED', 'RATHER', 'WITH', 'MYSELF', 'I', 'RUMINATED', 'ON', 'MY', 'WANT', 'OF', 'GENERALSHIP'] +5683-32866-0005-2532: ref=['AND', 'HE', 'MADE', 'A', 'LITTLE', 'DIP', 'OF', 'HIS', 'CANE', 'TOWARDS', 'BRANDON', 'HALL', 'OVER', 'HIS', 'SHOULDER'] +5683-32866-0005-2532: hyp=['AND', 'HE', 'MADE', 'A', 'LITTLE', 'DIP', 'OF', 'HIS', 'CANE', 'TOWARDS', 'BRANDON', 'HALL', 'OVER', 'HIS', 'SHOULDER'] +5683-32866-0006-2533: ref=['YES', 'SO', 'THEY', 'SAID', 'BUT', 'THAT', 'WOULD', 'I', 'THINK', 'HAVE', 'BEEN', 'WORSE'] +5683-32866-0006-2533: hyp=['YES', 'SO', 'THEY', 'SAID', 'BUT', 'THAT', 'WOULD', 'I', 'THINK', 'HAVE', 'BEEN', 'WORSE'] +5683-32866-0007-2534: ref=['IF', 'A', "FELLOW'S", 'BEEN', 'A', 'LITTLE', 'BIT', 'WILD', "HE'S", 'BEELZEBUB', 'AT', 'ONCE'] +5683-32866-0007-2534: hyp=['IF', 'A', "FELLOW'S", 'BEEN', 'A', 'LITTLE', 'BIT', 'WILD', 'HE', 'IS', 'BEALES', 'A', 'BUB', 'AT', 'ONCE'] +5683-32866-0008-2535: ref=["BRACTON'S", 'A', 'VERY', 'GOOD', 'FELLOW', 'I', 'CAN', 'ASSURE', 'YOU'] +5683-32866-0008-2535: hyp=["BRACTON'S", 'A', 'VERY', 'GOOD', 'FELLOW', 'I', 'CAN', 'ASSURE', 'YOU'] +5683-32866-0009-2536: ref=['I', "DON'T", 'KNOW', 'AND', "CAN'T", 'SAY', 'HOW', 'YOU', 'FINE', 'GENTLEMEN', 'DEFINE', 'WICKEDNESS', 'ONLY', 'AS', 'AN', 'OBSCURE', 'FEMALE', 'I', 'SPEAK', 'ACCORDING', 'TO', 'MY', 'LIGHTS', 'AND', 'HE', 'IS', 'GENERALLY', 'THOUGHT', 'THE', 'WICKEDEST', 'MAN', 'IN', 'THIS', 'COUNTY'] +5683-32866-0009-2536: hyp=['I', "DON'T", 'KNOW', 'ONE', "CAN'T", 'SAY', 'HOW', 'YOU', 'FIND', 'GENTLEMEN', 'TO', 'FIND', 'WICKEDNESS', 'ONLY', 'AS', 'AN', 'OBSCURE', 'FEMALE', 'I', 'SPEAK', 'ACCORDING', 'TO', 'MY', 'LIGHTS', 'AND', 'HE', 'IS', 'GENERALLY', 'THOUGHT', 'THE', 'WICKEDEST', 'MAN', 'IN', 'THIS', 'COUNTY'] +5683-32866-0010-2537: ref=['WELL', 'YOU', 'KNOW', 'RADIE', 'WOMEN', 'LIKE', 'WICKED', 'FELLOWS', 'IT', 'IS', 'CONTRAST', 'I', 'SUPPOSE', 'BUT', 'THEY', 'DO', 'AND', "I'M", 'SURE', 'FROM', 'WHAT', 'BRACTON', 'HAS', 'SAID', 'TO', 'ME', 'I', 'KNOW', 'HIM', 'INTIMATELY', 'THAT', 'DORCAS', 'LIKES', 'HIM', 'AND', 'I', "CAN'T", 'CONCEIVE', 'WHY', 'THEY', 'ARE', 'NOT', 'MARRIED'] +5683-32866-0010-2537: hyp=['WELL', 'YOU', 'KNOW', 'RADIE', 'WOMEN', 'LIKE', 'WICKED', 'FELLOWS', 'IT', 'IS', 'CONTRAST', 'I', 'SUPPOSE', 'BUT', 'THEY', 'DO', 'AND', "I'M", 'SURE', 'FROM', 'WHAT', 'BRACTON', 'HAS', 'SAID', 'TO', 'ME', 'I', 'KNOW', 'HIM', 'INTIMATELY', 'THAT', 'DORCAS', 'LIKES', 'HIM', 'AND', 'I', "CAN'T", 'CONCEIVE', 'WHY', 'THEY', 'ARE', 'NOT', 'MARRIED'] +5683-32866-0011-2538: ref=['THEIR', 'WALK', 'CONTINUED', 'SILENT', 'FOR', 'THE', 'GREATER', 'PART', 'NEITHER', 'WAS', 'QUITE', 'SATISFIED', 'WITH', 'THE', 'OTHER', 'BUT', 'RACHEL', 'AT', 'LAST', 'SAID'] +5683-32866-0011-2538: hyp=['THEIR', 'WALK', 'CONTINUED', 'SILENT', 'FOR', 'THE', 'GREATER', 'PART', 'NEITHER', 'WAS', 'QUITE', 'SATISFIED', 'WITH', 'THE', 'OTHER', 'BUT', 'RACHEL', 'AT', 'LAST', 'SAID'] +5683-32866-0012-2539: ref=['NOW', "THAT'S", 'IMPOSSIBLE', 'RADIE', 'FOR', 'I', 'REALLY', "DON'T", 'THINK', 'I', 'ONCE', 'THOUGHT', 'OF', 'HIM', 'ALL', 'THIS', 'EVENING', 'EXCEPT', 'JUST', 'WHILE', 'WE', 'WERE', 'TALKING'] +5683-32866-0012-2539: hyp=['NOW', "THAT'S", 'IMPOSSIBLE', 'RADIE', 'FOR', 'I', 'REALLY', "DON'T", 'THINK', 'I', 'ONCE', 'THOUGHT', 'OF', 'HIM', 'ALL', 'THIS', 'EVENING', 'EXCEPT', 'JUST', 'WHILE', 'WE', 'WERE', 'TALKING'] +5683-32866-0013-2540: ref=['THERE', 'WAS', 'A', 'BRIGHT', 'MOONLIGHT', 'BROKEN', 'BY', 'THE', 'SHADOWS', 'OF', 'OVERHANGING', 'BOUGHS', 'AND', 'WITHERED', 'LEAVES', 'AND', 'THE', 'MOTTLED', 'LIGHTS', 'AND', 'SHADOWS', 'GLIDED', 'ODDLY', 'ACROSS', 'HIS', 'PALE', 'FEATURES'] +5683-32866-0013-2540: hyp=['THERE', 'WAS', 'A', 'BRIGHT', 'MOONLIGHT', 'BROKEN', 'BY', 'THE', 'SHADOWS', 'OF', 'OVERHANGING', 'BOUGHS', 'AND', 'WITHERED', 'LEAVES', 'AND', 'THE', 'MOTTLED', 'LIGHTS', 'AND', 'SHADOWS', 'GLIDED', 'ODDLY', 'ACROSS', 'HIS', 'PALE', 'FEATURES'] +5683-32866-0014-2541: ref=["DON'T", 'INSULT', 'ME', 'STANLEY', 'BY', 'TALKING', 'AGAIN', 'AS', 'YOU', 'DID', 'THIS', 'MORNING'] +5683-32866-0014-2541: hyp=["DON'T", 'INSULT', 'ME', 'STANLEY', 'BY', 'TALKING', 'AGAIN', 'AS', 'YOU', 'DID', 'THIS', 'MORNING'] +5683-32866-0015-2542: ref=['WHAT', 'I', 'SAY', 'IS', 'ALTOGETHER', 'ON', 'YOUR', 'OWN', 'ACCOUNT'] +5683-32866-0015-2542: hyp=['WHAT', 'I', 'SAY', 'IS', 'ALTOGETHER', 'ON', 'YOUR', 'OWN', 'ACCOUNT'] +5683-32866-0016-2543: ref=['MARK', 'MY', 'WORDS', "YOU'LL", 'FIND', 'HIM', 'TOO', 'STRONG', 'FOR', 'YOU', 'AYE', 'AND', 'TOO', 'DEEP'] +5683-32866-0016-2543: hyp=['MARK', 'MY', 'WORDS', "YOU'LL", 'FIND', 'HIM', 'TOO', 'STRONG', 'FOR', 'YOU', 'I', 'AND', 'TOO', 'DEEP'] +5683-32866-0017-2544: ref=['I', 'AM', 'VERY', 'UNEASY', 'ABOUT', 'IT', 'WHATEVER', 'IT', 'IS', 'I', "CAN'T", 'HELP', 'IT'] +5683-32866-0017-2544: hyp=['I', 'AM', 'VERY', 'UNEASY', 'ABOUT', 'IT', 'WHATEVER', 'IT', 'IS', 'I', "CAN'T", 'HELP', 'IT'] +5683-32866-0018-2545: ref=['TO', 'MY', 'MIND', 'THERE', 'HAS', 'ALWAYS', 'BEEN', 'SOMETHING', 'INEXPRESSIBLY', 'AWFUL', 'IN', 'FAMILY', 'FEUDS'] +5683-32866-0018-2545: hyp=['TO', 'MY', 'MIND', 'THERE', 'HAS', 'ALWAYS', 'BEEN', 'SOMETHING', 'INEXPRESSIBLY', 'AWFUL', 'IN', 'FAMILY', 'FEUDS'] +5683-32866-0019-2546: ref=['THE', 'MYSTERY', 'OF', 'THEIR', 'ORIGIN', 'THEIR', 'CAPACITY', 'FOR', 'EVOLVING', 'LATENT', 'FACULTIES', 'OF', 'CRIME', 'AND', 'THE', 'STEADY', 'VITALITY', 'WITH', 'WHICH', 'THEY', 'SURVIVE', 'THE', 'HEARSE', 'AND', 'SPEAK', 'THEIR', 'DEEP', 'MOUTHED', 'MALIGNITIES', 'IN', 'EVERY', 'NEW', 'BORN', 'GENERATION', 'HAVE', 'ASSOCIATED', 'THEM', 'SOMEHOW', 'IN', 'MY', 'MIND', 'WITH', 'A', 'SPELL', 'OF', 'LIFE', 'EXCEEDING', 'AND', 'DISTINCT', 'FROM', 'HUMAN', 'AND', 'A', 'SPECIAL', 'SATANIC', 'ACTION'] +5683-32866-0019-2546: hyp=['THE', 'MYSTERY', 'OF', 'THEIR', 'ORIGIN', 'THEIR', 'CAPACITY', 'FOR', 'EVOLVING', 'LATENT', 'FACULTIES', 'OF', 'CRIME', 'AND', 'THE', 'STUDY', 'VITALITY', 'WITH', 'WHICH', 'THEY', 'SURVIVE', 'THE', 'HEARSE', 'AND', 'SPEAK', 'THEIR', 'DEEP', 'MOUTH', 'MALIGNITIES', 'IN', 'EVERY', 'NEW', 'BORN', 'GENERATION', 'HAVE', 'ASSOCIATED', 'THEM', 'SOMEHOW', 'IN', 'MY', 'MIND', 'WITH', 'A', 'SPELL', 'OF', 'LIFE', 'EXCEEDING', 'AND', 'DISTINCT', 'FROM', 'HUMAN', 'AND', 'ESPECIAL', 'SATANIC', 'ACTION'] +5683-32866-0020-2547: ref=['THE', 'FLOOR', 'MORE', 'THAN', 'ANYTHING', 'ELSE', 'SHOWED', 'THE', 'GREAT', 'AGE', 'OF', 'THE', 'ROOM'] +5683-32866-0020-2547: hyp=['THE', 'FLOOR', 'MORE', 'THAN', 'ANYTHING', 'ELSE', 'SHOWED', 'THE', 'GREAT', 'AGE', 'OF', 'THE', 'ROOM'] +5683-32866-0021-2548: ref=['MY', 'BED', 'WAS', 'UNEXCEPTIONABLY', 'COMFORTABLE', 'BUT', 'IN', 'MY', 'THEN', 'MOOD', 'I', 'COULD', 'HAVE', 'WISHED', 'IT', 'A', 'GREAT', 'DEAL', 'MORE', 'MODERN'] +5683-32866-0021-2548: hyp=['MY', 'BED', 'WAS', 'UNEXCEPTIONALLY', 'COMFORTABLE', 'BUT', 'IN', 'MY', 'THEN', 'MOOD', 'I', 'COULD', 'HAVE', 'WISHED', 'IT', 'A', 'GREAT', 'DEAL', 'MORE', 'MODERN'] +5683-32866-0022-2549: ref=['ITS', 'CURTAINS', 'WERE', 'OF', 'THICK', 'AND', 'FADED', 'TAPESTRY'] +5683-32866-0022-2549: hyp=['ITS', 'CURTAINS', 'WERE', 'OF', 'THICK', 'AND', 'FADED', 'TAPESTRY'] +5683-32866-0023-2550: ref=['ALL', 'THE', 'FURNITURE', 'BELONGED', 'TO', 'OTHER', 'TIMES'] +5683-32866-0023-2550: hyp=['ALL', 'THE', 'FURNITURE', 'BELONGED', 'TO', 'OTHER', 'TIMES'] +5683-32866-0024-2551: ref=['I', "SHAN'T", 'TROUBLE', 'YOU', 'ABOUT', 'MY', 'TRAIN', 'OF', 'THOUGHTS', 'OR', 'FANCIES', 'BUT', 'I', 'BEGAN', 'TO', 'FEEL', 'VERY', 'LIKE', 'A', 'GENTLEMAN', 'IN', 'A', 'GHOST', 'STORY', 'WATCHING', 'EXPERIMENTALLY', 'IN', 'A', 'HAUNTED', 'CHAMBER'] +5683-32866-0024-2551: hyp=['I', "SHAN'T", 'TROUBLE', 'YOU', 'ABOUT', 'MY', 'TRAIN', 'OF', 'THOUGHTS', 'OR', 'FANCIES', 'BUT', 'I', 'BEGAN', 'TO', 'FEEL', 'VERY', 'LIKE', 'A', 'GENTLEMAN', 'IN', 'A', 'GHOST', 'STORY', 'WATCHING', 'EXPERIMENTALLY', 'IN', 'A', 'HAUNTED', 'CHAMBER'] +5683-32866-0025-2552: ref=['I', 'DID', 'NOT', 'EVEN', 'TAKE', 'THE', 'PRECAUTION', 'OF', 'SMOKING', 'UP', 'THE', 'CHIMNEY'] +5683-32866-0025-2552: hyp=['I', 'DID', 'NOT', 'EVEN', 'TAKE', 'THE', 'PRECAUTION', 'OF', 'SMOKING', 'UP', 'THE', 'CHIMNEY'] +5683-32866-0026-2553: ref=['I', 'BOLDLY', 'LIGHTED', 'MY', 'CHEROOT'] +5683-32866-0026-2553: hyp=['I', 'BOLDLY', 'LIGHTED', 'MY', 'JEROOT'] +5683-32866-0027-2554: ref=['A', 'COLD', 'BRIGHT', 'MOON', 'WAS', 'SHINING', 'WITH', 'CLEAR', 'SHARP', 'LIGHTS', 'AND', 'SHADOWS'] +5683-32866-0027-2554: hyp=['A', 'COLD', 'BRIGHT', 'MOON', 'WAS', 'SHINING', 'WITH', 'CLEAR', 'SHARP', 'LIGHTS', 'AND', 'SHADOWS'] +5683-32866-0028-2555: ref=['THE', 'SOMBRE', 'OLD', 'TREES', 'LIKE', 'GIGANTIC', 'HEARSE', 'PLUMES', 'BLACK', 'AND', 'AWFUL'] +5683-32866-0028-2555: hyp=['THE', 'SOMBRE', 'OLD', 'TREES', 'LIKE', 'GIGANTIC', 'HEARSE', 'PLUMES', 'BLACK', 'AND', 'AWFUL'] +5683-32866-0029-2556: ref=['SOMEHOW', 'I', 'HAD', 'GROWN', 'NERVOUS'] +5683-32866-0029-2556: hyp=['SOMEHOW', 'I', 'HAD', 'GROWN', 'NERVOUS'] +5683-32866-0030-2557: ref=['A', 'LITTLE', 'BIT', 'OF', 'PLASTER', 'TUMBLED', 'DOWN', 'THE', 'CHIMNEY', 'AND', 'STARTLED', 'ME', 'CONFOUNDEDLY'] +5683-32866-0030-2557: hyp=['A', 'LITTLE', 'BIT', 'OF', 'PLASTER', 'TUMBLED', 'DOWN', 'THE', 'CHIMNEY', 'AND', 'STARTLED', 'ME', 'CONFOUNDEDLY'] +5683-32879-0000-2501: ref=['IT', 'WAS', 'NOT', 'VERY', 'MUCH', 'PAST', 'ELEVEN', 'THAT', 'MORNING', 'WHEN', 'THE', 'PONY', 'CARRIAGE', 'FROM', 'BRANDON', 'DREW', 'UP', 'BEFORE', 'THE', 'LITTLE', 'GARDEN', 'WICKET', 'OF', "REDMAN'S", 'FARM'] +5683-32879-0000-2501: hyp=['IT', 'WAS', 'NOT', 'VERY', 'MUCH', 'PAST', 'ELEVEN', 'THAT', 'MORNING', 'WHEN', 'THE', 'PONY', 'CARRIAGE', 'FROM', 'BRANDON', 'DREW', 'UP', 'BEFORE', 'THE', 'LITTLE', 'GARDEN', 'WICKET', 'OF', "REDMAN'S", 'FARM'] +5683-32879-0001-2502: ref=['WELL', 'SHE', 'WAS', 'BETTER', 'THOUGH', 'SHE', 'HAD', 'HAD', 'A', 'BAD', 'NIGHT'] +5683-32879-0001-2502: hyp=['WHILE', 'SHE', 'WAS', 'BETTER', 'THOUGH', 'SHE', 'HAD', 'HAD', 'A', 'BAD', 'NIGHT'] +5683-32879-0002-2503: ref=['SO', 'THERE', 'CAME', 'A', 'STEP', 'AND', 'A', 'LITTLE', 'RUSTLING', 'OF', 'FEMININE', 'DRAPERIES', 'THE', 'SMALL', 'DOOR', 'OPENED', 'AND', 'RACHEL', 'ENTERED', 'WITH', 'HER', 'HAND', 'EXTENDED', 'AND', 'A', 'PALE', 'SMILE', 'OF', 'WELCOME'] +5683-32879-0002-2503: hyp=['SO', 'THERE', 'CAME', 'A', 'STEP', 'AND', 'A', 'LITTLE', 'RUSTLING', 'OF', 'FEMININE', 'DRAPERIES', 'THE', 'SMALL', 'DOOR', 'OPENED', 'AND', 'RACHEL', 'ENTERED', 'WITH', 'HER', 'HAND', 'EXTENDED', 'AND', 'A', 'PALE', 'SMILE', 'OF', 'WELCOME'] +5683-32879-0003-2504: ref=['WOMEN', 'CAN', 'HIDE', 'THEIR', 'PAIN', 'BETTER', 'THAN', 'WE', 'MEN', 'AND', 'BEAR', 'IT', 'BETTER', 'TOO', 'EXCEPT', 'WHEN', 'SHAME', 'DROPS', 'FIRE', 'INTO', 'THE', 'DREADFUL', 'CHALICE'] +5683-32879-0003-2504: hyp=['WOMEN', 'CAN', 'HIDE', 'THEIR', 'PAIN', 'BETTER', 'THAN', 'WE', 'MEN', 'AND', 'BEAR', 'IT', 'BETTER', 'TOO', 'EXCEPT', 'WHEN', 'SHAME', 'DROPS', 'FIRE', 'INTO', 'THE', 'DREADFUL', 'CHALICE'] +5683-32879-0004-2505: ref=['BUT', 'POOR', 'RACHEL', 'LAKE', 'HAD', 'MORE', 'THAN', 'THAT', 'STOICAL', 'HYPOCRISY', 'WHICH', 'ENABLES', 'THE', 'TORTURED', 'SPIRITS', 'OF', 'HER', 'SEX', 'TO', 'LIFT', 'A', 'PALE', 'FACE', 'THROUGH', 'THE', 'FLAMES', 'AND', 'SMILE'] +5683-32879-0004-2505: hyp=['BUT', 'POOR', 'RACHEL', 'LAKE', 'HAD', 'MORE', 'THAN', 'THAT', 'STOICAL', 'HYPOCRISY', 'WHICH', 'ENABLES', 'THE', 'TORTURED', 'SPIRITS', 'OF', 'HER', 'SEX', 'TO', 'LIFT', 'A', 'PALE', 'FACE', 'THROUGH', 'THE', 'FLAMES', 'AND', 'SMILE'] +5683-32879-0005-2506: ref=['THIS', 'TRANSIENT', 'SPRING', 'AND', 'LIGHTING', 'UP', 'ARE', 'BEAUTIFUL', 'A', 'GLAMOUR', 'BEGUILING', 'OUR', 'SENSES'] +5683-32879-0005-2506: hyp=['THIS', 'TRANSIENT', 'SPRING', 'AND', 'LIGHTING', 'UP', 'OUR', 'BEAUTIFUL', 'A', 'GLAMOUR', 'BEGUILING', 'OUR', 'SENSES'] +5683-32879-0006-2507: ref=['THERE', 'WAS', 'SOMETHING', 'OF', 'SWEETNESS', 'AND', 'FONDNESS', 'IN', 'HER', 'TONES', 'AND', 'MANNER', 'WHICH', 'WAS', 'NEW', 'TO', 'RACHEL', 'AND', 'COMFORTING', 'AND', 'SHE', 'RETURNED', 'THE', 'GREETING', 'AS', 'KINDLY', 'AND', 'FELT', 'MORE', 'LIKE', 'HER', 'FORMER', 'SELF'] +5683-32879-0006-2507: hyp=['THERE', 'WAS', 'SOMETHING', 'OF', 'SWEETNESS', 'AND', 'FONDNESS', 'IN', 'HER', 'TONES', 'AND', 'MANNER', 'WHICH', 'WAS', 'NEW', 'TO', 'RACHEL', 'AND', 'COMFORTING', 'AND', 'SHE', 'RETURNED', 'THE', 'GREETING', 'AS', 'KINDLY', 'AND', 'FELT', 'MORE', 'LIKE', 'HER', 'FORMER', 'SELF'] +5683-32879-0007-2508: ref=["RACHEL'S", 'PALE', 'AND', 'SHARPENED', 'FEATURES', 'AND', 'DILATED', 'EYE', 'STRUCK', 'HER', 'WITH', 'A', 'PAINFUL', 'SURPRISE'] +5683-32879-0007-2508: hyp=["RACHEL'S", 'PALE', 'AND', 'SHARPENED', 'FEATURES', 'AND', 'DILATED', 'EYE', 'STRUCK', 'HER', 'WITH', 'A', 'PAINFUL', 'SURPRISE'] +5683-32879-0008-2509: ref=['YOU', 'HAVE', 'BEEN', 'SO', 'ILL', 'MY', 'POOR', 'RACHEL'] +5683-32879-0008-2509: hyp=['YOU', 'HAVE', 'BEEN', 'SO', 'ILL', 'MY', 'POOR', 'RACHEL'] +5683-32879-0009-2510: ref=['ILL', 'AND', 'TROUBLED', 'DEAR', 'TROUBLED', 'IN', 'MIND', 'AND', 'MISERABLY', 'NERVOUS'] +5683-32879-0009-2510: hyp=['ILL', 'AND', 'TROUBLED', 'DEAR', 'TROUBLED', 'IN', 'MIND', 'AND', 'MISERABLY', 'NERVOUS'] +5683-32879-0010-2511: ref=['POOR', 'RACHEL', 'HER', 'NATURE', 'RECOILED', 'FROM', 'DECEIT', 'AND', 'SHE', 'TOLD', 'AT', 'ALL', 'EVENTS', 'AS', 'MUCH', 'OF', 'THE', 'TRUTH', 'AS', 'SHE', 'DARED'] +5683-32879-0010-2511: hyp=['POOR', 'RACHEL', 'HER', 'NATURE', 'RECOILED', 'FROM', 'DECEIT', 'AND', 'SHE', 'TOLD', 'AT', 'ALL', 'EVENTS', 'AS', 'MUCH', 'OF', 'THE', 'TRUTH', 'AS', 'SHE', 'DARED'] +5683-32879-0011-2512: ref=['SHE', 'SPOKE', 'WITH', 'A', 'SUDDEN', 'ENERGY', 'WHICH', 'PARTOOK', 'OF', 'FEAR', 'AND', 'PASSION', 'AND', 'FLUSHED', 'HER', 'THIN', 'CHEEK', 'AND', 'MADE', 'HER', 'LANGUID', 'EYES', 'FLASH'] +5683-32879-0011-2512: hyp=['SHE', 'SPOKE', 'WITH', 'A', 'SUDDEN', 'ENERGY', 'WHICH', 'PARTOOK', 'A', 'FEAR', 'AND', 'PASSION', 'AND', 'FLUSHED', 'HER', 'THIN', 'CHEEK', 'AND', 'MADE', 'HER', 'LANGUID', 'EYES', 'FLASH'] +5683-32879-0012-2513: ref=['THANK', 'YOU', 'RACHEL', 'MY', 'COUSIN', 'RACHEL', 'MY', 'ONLY', 'FRIEND'] +5683-32879-0012-2513: hyp=['THANK', 'YOU', 'RACHEL', 'MY', 'COUSIN', 'RACHEL', 'MY', 'ONLY', 'FRIEND'] +5683-32879-0013-2514: ref=['CHELFORD', 'HAD', 'A', 'NOTE', 'FROM', 'MISTER', 'WYLDER', 'THIS', 'MORNING', 'ANOTHER', 'NOTE', 'HIS', 'COMING', 'DELAYED', 'AND', 'SOMETHING', 'OF', 'HIS', 'HAVING', 'TO', 'SEE', 'SOME', 'PERSON', 'WHO', 'IS', 'ABROAD', 'CONTINUED', 'DORCAS', 'AFTER', 'A', 'LITTLE', 'PAUSE'] +5683-32879-0013-2514: hyp=['CHELFORD', 'HAD', 'A', 'NOTE', 'FROM', 'MISTER', 'WYLDER', 'THIS', 'MORNING', 'ANOTHER', 'NOTE', 'HIS', 'COMING', 'DELAYED', 'AND', 'SOMETHING', 'OF', 'HIS', 'HAVING', 'TO', 'SEE', 'SOME', 'PERSON', 'WHO', 'WAS', 'ABROAD', 'CONTINUED', 'DORCAS', 'AFTER', 'A', 'LITTLE', 'PAUSE'] +5683-32879-0014-2515: ref=['YES', 'SOMETHING', 'EVERYTHING', 'SAID', 'RACHEL', 'HURRIEDLY', 'LOOKING', 'FROWNINGLY', 'AT', 'A', 'FLOWER', 'WHICH', 'SHE', 'WAS', 'TWIRLING', 'IN', 'HER', 'FINGERS'] +5683-32879-0014-2515: hyp=['YES', 'SOMETHING', 'EVERYTHING', 'SAID', 'RACHEL', 'HURRIEDLY', 'LOOKING', 'FROWNINGLY', 'AT', 'A', 'FLOWER', 'WHICH', 'SHE', 'WAS', 'TWIRLING', 'IN', 'HER', 'FINGERS'] +5683-32879-0015-2516: ref=['YES', 'SAID', 'RACHEL'] +5683-32879-0015-2516: hyp=['YES', 'SAID', 'RACHEL'] +5683-32879-0016-2517: ref=['AND', 'THE', 'WAN', 'ORACLE', 'HAVING', 'SPOKEN', 'SHE', 'SATE', 'DOWN', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'ABSTRACTION', 'AGAIN', 'BESIDE', 'DORCAS', 'AND', 'SHE', 'LOOKED', 'FULL', 'IN', 'HER', "COUSIN'S", 'EYES'] +5683-32879-0016-2517: hyp=['AND', 'THE', 'WAN', 'ORACLE', 'HAVING', 'SPOKEN', 'SHE', 'SAT', 'DOWN', 'IN', 'THE', 'SAME', 'SORT', 'OF', 'ABSTRACTION', 'AGAIN', 'BESIDE', 'DORCAS', 'AND', 'SHE', 'LOOKED', 'FULL', 'IN', 'HER', "COUSIN'S", 'EYES'] +5683-32879-0017-2518: ref=['OF', 'MARK', 'WYLDER', 'I', 'SAY', 'THIS', 'HIS', 'NAME', 'HAS', 'BEEN', 'FOR', 'YEARS', 'HATEFUL', 'TO', 'ME', 'AND', 'RECENTLY', 'IT', 'HAS', 'BECOME', 'FRIGHTFUL', 'AND', 'YOU', 'WILL', 'PROMISE', 'ME', 'SIMPLY', 'THIS', 'THAT', 'YOU', 'WILL', 'NEVER', 'ASK', 'ME', 'TO', 'SPEAK', 'AGAIN', 'ABOUT', 'HIM'] +5683-32879-0017-2518: hyp=['OF', 'MARK', 'WYLDER', 'I', 'SAY', 'THIS', 'HIS', 'NAME', 'HAS', 'BEEN', 'FOR', 'YEARS', 'HATEFUL', 'TO', 'ME', 'AND', 'RECENTLY', 'IT', 'HAS', 'BECOME', 'FRIGHTFUL', 'AND', 'YOU', 'WILL', 'PROMISE', 'ME', 'SIMPLY', 'THIS', 'THAT', 'YOU', 'WILL', 'NEVER', 'ASK', 'ME', 'TO', 'SPEAK', 'AGAIN', 'ABOUT', 'HIM'] +5683-32879-0018-2519: ref=['IT', 'IS', 'AN', 'ANTIPATHY', 'AN', 'ANTIPATHY', 'I', 'CANNOT', 'GET', 'OVER', 'DEAR', 'DORCAS', 'YOU', 'MAY', 'THINK', 'IT', 'A', 'MADNESS', 'BUT', "DON'T", 'BLAME', 'ME'] +5683-32879-0018-2519: hyp=['IT', 'IS', 'AN', 'ANTIPATHY', 'AN', 'ANTIPATHY', 'I', 'CANNOT', 'GET', 'OVER', 'DEAR', 'DORCAS', 'YOU', 'MAY', 'THINK', 'IT', 'A', 'MADNESS', 'BUT', "DON'T", 'BLAME', 'ME'] +5683-32879-0019-2520: ref=['I', 'HAVE', 'VERY', 'FEW', 'TO', 'LOVE', 'ME', 'NOW', 'AND', 'I', 'THOUGHT', 'YOU', 'MIGHT', 'LOVE', 'ME', 'AS', 'I', 'HAVE', 'BEGUN', 'TO', 'LOVE', 'YOU'] +5683-32879-0019-2520: hyp=['I', 'HAVE', 'VERY', 'FEW', 'TO', 'LOVE', 'ME', 'NOW', 'AND', 'I', 'THOUGHT', 'YOU', 'MIGHT', 'LOVE', 'ME', 'AS', 'I', 'HAVE', 'BEGUN', 'TO', 'LOVE', 'YOU'] +5683-32879-0020-2521: ref=['AND', 'SHE', 'THREW', 'HER', 'ARMS', 'ROUND', 'HER', "COUSIN'S", 'NECK', 'AND', 'BRAVE', 'RACHEL', 'AT', 'LAST', 'BURST', 'INTO', 'TEARS'] +5683-32879-0020-2521: hyp=['AND', 'SHE', 'THREW', 'HER', 'ARMS', 'ROUND', 'HER', "COUSIN'S", 'NECK', 'AND', 'BRAVE', 'RACHEL', 'AT', 'LAST', 'BURST', 'INTO', 'TEARS'] +5683-32879-0021-2522: ref=['DORCAS', 'IN', 'HER', 'STRANGE', 'WAY', 'WAS', 'MOVED'] +5683-32879-0021-2522: hyp=['DORCAS', 'IN', 'HER', 'STRANGE', 'WAY', 'WAS', 'MOVED'] +5683-32879-0022-2523: ref=['I', 'LIKE', 'YOU', 'STILL', 'RACHEL', "I'M", 'SURE', "I'LL", 'ALWAYS', 'LIKE', 'YOU'] +5683-32879-0022-2523: hyp=['I', 'LIKE', 'YOU', 'STILL', 'RACHEL', "I'M", 'SURE', "I'LL", 'ALWAYS', 'LIKE', 'YOU'] +5683-32879-0023-2524: ref=['YOU', 'RESEMBLE', 'ME', 'RACHEL', 'YOU', 'ARE', 'FEARLESS', 'AND', 'INFLEXIBLE', 'AND', 'GENEROUS'] +5683-32879-0023-2524: hyp=['YOU', 'RESEMBLE', 'ME', 'RACHEL', 'YOU', 'ARE', 'FEARLESS', 'AND', 'INFLEXIBLE', 'AND', 'GENEROUS'] +5683-32879-0024-2525: ref=['YES', 'RACHEL', 'I', 'DO', 'LOVE', 'YOU'] +5683-32879-0024-2525: hyp=['YES', 'RACHEL', 'I', 'DO', 'LOVE', 'YOU'] +5683-32879-0025-2526: ref=['THANK', 'YOU', 'DORCAS', 'DEAR'] +5683-32879-0025-2526: hyp=['THANK', 'YOU', 'DORCAS', 'DEAR'] +61-70968-0000-2179: ref=['HE', 'BEGAN', 'A', 'CONFUSED', 'COMPLAINT', 'AGAINST', 'THE', 'WIZARD', 'WHO', 'HAD', 'VANISHED', 'BEHIND', 'THE', 'CURTAIN', 'ON', 'THE', 'LEFT'] +61-70968-0000-2179: hyp=['HE', 'BEGAN', 'A', 'CONFUSED', 'COMPLAINT', 'AGAINST', 'THE', 'WIZARD', 'WHO', 'HAD', 'VANISHED', 'BEHIND', 'THE', 'CURTAIN', 'ON', 'THE', 'LEFT'] +61-70968-0001-2180: ref=['GIVE', 'NOT', 'SO', 'EARNEST', 'A', 'MIND', 'TO', 'THESE', 'MUMMERIES', 'CHILD'] +61-70968-0001-2180: hyp=['GIVE', 'NOT', 'SO', 'EARNEST', 'A', 'MIND', 'TO', 'THESE', 'MUMMERIES', 'CHILD'] +61-70968-0002-2181: ref=['A', 'GOLDEN', 'FORTUNE', 'AND', 'A', 'HAPPY', 'LIFE'] +61-70968-0002-2181: hyp=['A', 'GOLDEN', 'FORTUNE', 'AND', 'A', 'HAPPY', 'LIFE'] +61-70968-0003-2182: ref=['HE', 'WAS', 'LIKE', 'UNTO', 'MY', 'FATHER', 'IN', 'A', 'WAY', 'AND', 'YET', 'WAS', 'NOT', 'MY', 'FATHER'] +61-70968-0003-2182: hyp=['HE', 'WAS', 'LIKE', 'UNTO', 'MY', 'FATHER', 'IN', 'A', 'WAY', 'AND', 'YET', 'WAS', 'NOT', 'MY', 'FATHER'] +61-70968-0004-2183: ref=['ALSO', 'THERE', 'WAS', 'A', 'STRIPLING', 'PAGE', 'WHO', 'TURNED', 'INTO', 'A', 'MAID'] +61-70968-0004-2183: hyp=['ALSO', 'THERE', 'WAS', 'A', 'STRIPLING', 'PAGE', 'WHO', 'TURNED', 'INTO', 'A', 'MAID'] +61-70968-0005-2184: ref=['THIS', 'WAS', 'SO', 'SWEET', 'A', 'LADY', 'SIR', 'AND', 'IN', 'SOME', 'MANNER', 'I', 'DO', 'THINK', 'SHE', 'DIED'] +61-70968-0005-2184: hyp=['THIS', 'WAS', 'SO', 'SWEET', 'A', 'LADY', 'SIR', 'AND', 'IN', 'SOME', 'MANNER', 'I', 'DO', 'THINK', 'SHE', 'DIED'] +61-70968-0006-2185: ref=['BUT', 'THEN', 'THE', 'PICTURE', 'WAS', 'GONE', 'AS', 'QUICKLY', 'AS', 'IT', 'CAME'] +61-70968-0006-2185: hyp=['BUT', 'THEN', 'THE', 'PICTURE', 'WAS', 'GONE', 'AS', 'QUICKLY', 'AS', 'IT', 'CAME'] +61-70968-0007-2186: ref=['SISTER', 'NELL', 'DO', 'YOU', 'HEAR', 'THESE', 'MARVELS'] +61-70968-0007-2186: hyp=['SISTER', 'NELL', 'DO', 'YOU', 'HEAR', 'THESE', 'MARVELS'] +61-70968-0008-2187: ref=['TAKE', 'YOUR', 'PLACE', 'AND', 'LET', 'US', 'SEE', 'WHAT', 'THE', 'CRYSTAL', 'CAN', 'SHOW', 'TO', 'YOU'] +61-70968-0008-2187: hyp=['TAKE', 'YOUR', 'PLACE', 'AND', 'LET', 'US', 'SEE', 'WHAT', 'THE', 'CRYSTAL', 'CAN', 'SHOW', 'TO', 'YOU'] +61-70968-0009-2188: ref=['LIKE', 'AS', 'NOT', 'YOUNG', 'MASTER', 'THOUGH', 'I', 'AM', 'AN', 'OLD', 'MAN'] +61-70968-0009-2188: hyp=['LIKE', 'AS', 'NOT', 'YOUNG', 'MASTER', 'THOUGH', 'I', 'AM', 'AN', 'OLD', 'MAN'] +61-70968-0010-2189: ref=['FORTHWITH', 'ALL', 'RAN', 'TO', 'THE', 'OPENING', 'OF', 'THE', 'TENT', 'TO', 'SEE', 'WHAT', 'MIGHT', 'BE', 'AMISS', 'BUT', 'MASTER', 'WILL', 'WHO', 'PEEPED', 'OUT', 'FIRST', 'NEEDED', 'NO', 'MORE', 'THAN', 'ONE', 'GLANCE'] +61-70968-0010-2189: hyp=['FORTHWITH', 'ALL', 'RAN', 'TO', 'THE', 'OPENING', 'OF', 'THE', 'TENT', 'TO', 'SEE', 'WHAT', 'MIGHT', 'BE', 'AMISS', 'BUT', 'MASTER', 'WILL', 'WHO', 'PEEPED', 'OUT', 'FIRST', 'NEEDED', 'NO', 'MORE', 'THAN', 'ONE', 'GLANCE'] +61-70968-0011-2190: ref=['HE', 'GAVE', 'WAY', 'TO', 'THE', 'OTHERS', 'VERY', 'READILY', 'AND', 'RETREATED', 'UNPERCEIVED', 'BY', 'THE', 'SQUIRE', 'AND', 'MISTRESS', 'FITZOOTH', 'TO', 'THE', 'REAR', 'OF', 'THE', 'TENT'] +61-70968-0011-2190: hyp=['HE', 'GAVE', 'WAY', 'TO', 'THE', 'OTHERS', 'VERY', 'READILY', 'AND', 'RETREATED', 'UNPERCEIVED', 'BY', 'THE', 'SQUIRE', 'AND', 'MISTRESS', 'FITZOOTH', 'TO', 'THE', 'REAR', 'OF', 'THE', 'TENT'] +61-70968-0012-2191: ref=['CRIES', 'OF', 'A', 'NOTTINGHAM', 'A', 'NOTTINGHAM'] +61-70968-0012-2191: hyp=['CRIES', 'OF', 'UNNOTTINGHAM', 'ARE', 'NOTTINGHAM'] +61-70968-0013-2192: ref=['BEFORE', 'THEM', 'FLED', 'THE', 'STROLLER', 'AND', 'HIS', 'THREE', 'SONS', 'CAPLESS', 'AND', 'TERRIFIED'] +61-70968-0013-2192: hyp=['BEFORE', 'THEM', 'FLED', 'THE', 'STROLLER', 'AND', 'HIS', 'THREE', 'SONS', 'CAPLESS', 'AND', 'TERRIFIED'] +61-70968-0014-2193: ref=['WHAT', 'IS', 'THE', 'TUMULT', 'AND', 'RIOTING', 'CRIED', 'OUT', 'THE', 'SQUIRE', 'AUTHORITATIVELY', 'AND', 'HE', 'BLEW', 'TWICE', 'ON', 'A', 'SILVER', 'WHISTLE', 'WHICH', 'HUNG', 'AT', 'HIS', 'BELT'] +61-70968-0014-2193: hyp=['WHAT', 'IS', 'THE', 'TUMULT', 'AND', 'RIOTING', 'CRIED', 'OUT', 'THE', 'SQUIRE', 'AUTHORITATIVELY', 'AND', 'HE', 'BLEW', 'TWICE', 'ON', 'THE', 'SILVER', 'WHISTLE', 'WHICH', 'HUNG', 'AT', 'HIS', 'BELT'] +61-70968-0015-2194: ref=['NAY', 'WE', 'REFUSED', 'THEIR', 'REQUEST', 'MOST', 'POLITELY', 'MOST', 'NOBLE', 'SAID', 'THE', 'LITTLE', 'STROLLER'] +61-70968-0015-2194: hyp=['NAY', 'WE', 'REFUSED', 'THEIR', 'REQUEST', 'MOST', 'POLITELY', 'MOST', 'NOBLE', 'SAID', 'THE', 'LITTLE', 'STROLLER'] +61-70968-0016-2195: ref=['AND', 'THEN', 'THEY', 'BECAME', 'VEXED', 'AND', 'WOULD', 'HAVE', 'SNATCHED', 'YOUR', 'PURSE', 'FROM', 'US'] +61-70968-0016-2195: hyp=['AND', 'THEN', 'THEY', 'BECAME', 'VEXED', 'AND', 'WOULD', 'HAVE', 'SNATCHED', 'YOUR', 'PURSE', 'FROM', 'US'] +61-70968-0017-2196: ref=['I', 'COULD', 'NOT', 'SEE', 'MY', 'BOY', 'INJURED', 'EXCELLENCE', 'FOR', 'BUT', 'DOING', 'HIS', 'DUTY', 'AS', 'ONE', 'OF', "CUMBERLAND'S", 'SONS'] +61-70968-0017-2196: hyp=['I', 'COULD', 'NOT', 'SEE', 'MY', 'BOY', 'INJURE', 'EXCELLENCE', 'FOR', 'BUT', 'DOING', 'HIS', 'DUTY', 'AS', 'ONE', 'OF', "CUMBERLAND'S", 'SONS'] +61-70968-0018-2197: ref=['SO', 'I', 'DID', 'PUSH', 'THIS', 'FELLOW'] +61-70968-0018-2197: hyp=['SO', 'I', 'DID', 'PUSH', 'THIS', 'FELLOW'] +61-70968-0019-2198: ref=['IT', 'IS', 'ENOUGH', 'SAID', 'GEORGE', 'GAMEWELL', 'SHARPLY', 'AND', 'HE', 'TURNED', 'UPON', 'THE', 'CROWD'] +61-70968-0019-2198: hyp=['IT', 'IS', 'ENOUGH', 'SAID', 'GEORGE', 'GAMEWELL', 'SHARPLY', 'AS', 'HE', 'TURNED', 'UPON', 'THE', 'CROWD'] +61-70968-0020-2199: ref=['SHAME', 'ON', 'YOU', 'CITIZENS', 'CRIED', 'HE', 'I', 'BLUSH', 'FOR', 'MY', 'FELLOWS', 'OF', 'NOTTINGHAM'] +61-70968-0020-2199: hyp=['SHAME', 'ON', 'YOU', 'CITIZENS', 'CRIED', 'HE', 'I', 'BLUSH', 'FOR', 'MY', 'FELLOWS', 'OF', 'NOTTINGHAM'] +61-70968-0021-2200: ref=['SURELY', 'WE', 'CAN', 'SUBMIT', 'WITH', 'GOOD', 'GRACE'] +61-70968-0021-2200: hyp=['SURELY', 'WE', 'CAN', 'SUBMIT', 'WITH', 'GOOD', 'GRACE'] +61-70968-0022-2201: ref=['TIS', 'FINE', 'FOR', 'YOU', 'TO', 'TALK', 'OLD', 'MAN', 'ANSWERED', 'THE', 'LEAN', 'SULLEN', 'APPRENTICE'] +61-70968-0022-2201: hyp=['TIS', 'FINE', 'FOR', 'YOU', 'TO', 'TALK', 'OLD', 'MAN', 'ANSWERED', 'THE', 'LEAN', 'SULLEN', 'APPRENTICE'] +61-70968-0023-2202: ref=['BUT', 'I', 'WRESTLED', 'WITH', 'THIS', 'FELLOW', 'AND', 'DO', 'KNOW', 'THAT', 'HE', 'PLAYED', 'UNFAIRLY', 'IN', 'THE', 'SECOND', 'BOUT'] +61-70968-0023-2202: hyp=['BUT', 'I', 'WRESTLED', 'WITH', 'THIS', 'FELLOW', 'AND', 'DO', 'KNOW', 'THAT', 'HE', 'PLAYED', 'UNFAIRLY', 'IN', 'THE', 'SECOND', 'BOUT'] +61-70968-0024-2203: ref=['SPOKE', 'THE', 'SQUIRE', 'LOSING', 'ALL', 'PATIENCE', 'AND', 'IT', 'WAS', 'TO', 'YOU', 'THAT', 'I', 'GAVE', 'ANOTHER', 'PURSE', 'IN', 'CONSOLATION'] +61-70968-0024-2203: hyp=['SPOKE', 'THE', 'SQUIRE', 'LOSING', 'ALL', 'PATIENT', 'AND', 'IT', 'WAS', 'TO', 'YOU', 'THAT', 'I', 'GAVE', 'ANOTHER', 'PERSON', 'CONSOLATION'] +61-70968-0025-2204: ref=['COME', 'TO', 'ME', 'MEN', 'HERE', 'HERE', 'HE', 'RAISED', 'HIS', 'VOICE', 'STILL', 'LOUDER'] +61-70968-0025-2204: hyp=['COME', 'TO', 'ME', 'MEN', 'HERE', 'HERE', 'HE', 'RAISED', 'HIS', 'VOICE', 'STILL', 'LOUDER'] +61-70968-0026-2205: ref=['THE', 'STROLLERS', 'TOOK', 'THEIR', 'PART', 'IN', 'IT', 'WITH', 'HEARTY', 'ZEST', 'NOW', 'THAT', 'THEY', 'HAD', 'SOME', 'CHANCE', 'OF', 'BEATING', 'OFF', 'THEIR', 'FOES'] +61-70968-0026-2205: hyp=['THE', 'STROLLERS', 'TOOK', 'THEIR', 'PART', 'IN', 'IT', 'WITH', 'HEARTY', 'ZEST', 'NOW', 'THAT', 'THEY', 'HAD', 'SOME', 'CHANCE', 'OF', 'BEATING', 'OFF', 'THEIR', 'FOES'] +61-70968-0027-2206: ref=['ROBIN', 'AND', 'THE', 'LITTLE', 'TUMBLER', 'BETWEEN', 'THEM', 'TRIED', 'TO', 'FORCE', 'THE', 'SQUIRE', 'TO', 'STAND', 'BACK', 'AND', 'VERY', 'VALIANTLY', 'DID', 'THESE', 'TWO', 'COMPORT', 'THEMSELVES'] +61-70968-0027-2206: hyp=['ROBIN', 'AND', 'THE', 'LITTLE', 'TUMBLER', 'BETWEEN', 'THEM', 'TRIED', 'TO', 'FORCE', 'THE', 'SQUIRE', 'TO', 'STAND', 'BACK', 'AND', 'VERY', 'VALIANTLY', 'DID', 'THESE', 'TWO', 'COMPORT', 'THEMSELVES'] +61-70968-0028-2207: ref=['THE', 'HEAD', 'AND', 'CHIEF', 'OF', 'THE', 'RIOT', 'THE', 'NOTTINGHAM', 'APPRENTICE', 'WITH', 'CLENCHED', 'FISTS', 'THREATENED', 'MONTFICHET'] +61-70968-0028-2207: hyp=['THE', 'HEAD', 'AND', 'CHIEF', 'OF', 'THE', 'RIOT', 'DENOTTINGHAM', 'APPRENTICED', 'WITH', 'CLENCHED', 'FISTS', 'THREATENED', 'MONTFICHET'] +61-70968-0029-2208: ref=['THE', 'SQUIRE', 'HELPED', 'TO', 'THRUST', 'THEM', 'ALL', 'IN', 'AND', 'ENTERED', 'SWIFTLY', 'HIMSELF'] +61-70968-0029-2208: hyp=['THE', 'SQUIRE', 'HELPED', 'TO', 'THRUST', 'THEM', 'ALL', 'IN', 'AND', 'ENTERED', 'SWIFTLY', 'HIMSELF'] +61-70968-0030-2209: ref=['NOW', 'BE', 'SILENT', 'ON', 'YOUR', 'LIVES', 'HE', 'BEGAN', 'BUT', 'THE', 'CAPTURED', 'APPRENTICE', 'SET', 'UP', 'AN', 'INSTANT', 'SHOUT'] +61-70968-0030-2209: hyp=['NOW', 'BE', 'SILENT', 'ON', 'YOUR', 'LIVES', 'HE', 'BEGAN', 'BUT', 'THE', 'CAPTURED', 'APPRENTICE', 'SET', 'UP', 'AN', 'INSTANT', 'SHOUT'] +61-70968-0031-2210: ref=['SILENCE', 'YOU', 'KNAVE', 'CRIED', 'MONTFICHET'] +61-70968-0031-2210: hyp=['SILENCE', 'YOU', 'KNAVE', 'CRIED', 'MONTFICHET'] +61-70968-0032-2211: ref=['HE', 'FELT', 'FOR', 'AND', 'FOUND', 'THE', "WIZARD'S", 'BLACK', 'CLOTH', 'THE', 'SQUIRE', 'WAS', 'QUITE', 'OUT', 'OF', 'BREATH'] +61-70968-0032-2211: hyp=['HE', 'FELT', 'FOR', 'AND', 'FOUND', 'THE', "WIZARD'S", 'BLACK', 'CLOTH', 'THE', 'SQUIRE', 'WAS', 'QUITE', 'OUT', 'OF', 'BREATH'] +61-70968-0033-2212: ref=['THRUSTING', 'OPEN', 'THE', 'PROPER', 'ENTRANCE', 'OF', 'THE', 'TENT', 'ROBIN', 'SUDDENLY', 'RUSHED', 'FORTH', 'WITH', 'HIS', 'BURDEN', 'WITH', 'A', 'GREAT', 'SHOUT'] +61-70968-0033-2212: hyp=['THRUSTING', 'OPEN', 'THE', 'PROPER', 'ENTRANCE', 'OF', 'THE', 'TENT', 'ROBIN', 'SUDDENLY', 'RUSHED', 'FORTH', 'WITH', 'HIS', 'BURDEN', 'WITH', 'A', 'GREAT', 'SHOUT'] +61-70968-0034-2213: ref=['A', 'MONTFICHET', 'A', 'MONTFICHET', 'GAMEWELL', 'TO', 'THE', 'RESCUE'] +61-70968-0034-2213: hyp=['A', 'MONTFICHET', 'A', 'MONTFICHET', 'GAMEWELL', 'TO', 'THE', 'RESCUE'] +61-70968-0035-2214: ref=['TAKING', 'ADVANTAGE', 'OF', 'THIS', 'THE', "SQUIRE'S", 'FEW', 'MEN', 'REDOUBLED', 'THEIR', 'EFFORTS', 'AND', 'ENCOURAGED', 'BY', "ROBIN'S", 'AND', 'THE', 'LITTLE', "STROLLER'S", 'CRIES', 'FOUGHT', 'THEIR', 'WAY', 'TO', 'HIM'] +61-70968-0035-2214: hyp=['TAKING', 'ADVANTAGE', 'OF', 'THIS', 'THE', "SQUIRE'S", 'FEW', 'MEN', 'REDOUBLED', 'THEIR', 'EFFORTS', 'AND', 'ENCOURAGED', 'BY', 'ROBINS', 'AND', 'THE', 'LITTLE', "STROLLER'S", 'CRIES', 'FOUGHT', 'THEIR', 'WAY', 'TO', 'HIM'] +61-70968-0036-2215: ref=['GEORGE', 'MONTFICHET', 'WILL', 'NEVER', 'FORGET', 'THIS', 'DAY'] +61-70968-0036-2215: hyp=['GEORGE', 'MONTFICHET', 'WILL', 'NEVER', 'FORGET', 'THIS', 'DAY'] +61-70968-0037-2216: ref=['WHAT', 'IS', 'YOUR', 'NAME', 'LORDING', 'ASKED', 'THE', 'LITTLE', 'STROLLER', 'PRESENTLY'] +61-70968-0037-2216: hyp=['WHAT', 'IS', 'YOUR', 'NAME', 'LORDING', 'ASKED', 'THE', 'LITTLE', 'STROLLER', 'PRESENTLY'] +61-70968-0038-2217: ref=['ROBIN', 'FITZOOTH'] +61-70968-0038-2217: hyp=['ROBIN', 'FITZOOTH'] +61-70968-0039-2218: ref=['AND', 'MINE', 'IS', 'WILL', 'STUTELEY', 'SHALL', 'WE', 'BE', 'COMRADES'] +61-70968-0039-2218: hyp=['AND', 'MINE', 'IS', 'WILL', 'STUTELEY', 'SHALL', 'WE', 'BE', 'COMRADES'] +61-70968-0040-2219: ref=['RIGHT', 'WILLINGLY', 'FOR', 'BETWEEN', 'US', 'WE', 'HAVE', 'WON', 'THE', 'BATTLE', 'ANSWERED', 'ROBIN'] +61-70968-0040-2219: hyp=['RIGHT', 'WILLINGLY', 'FOR', 'BETWEEN', 'US', 'WE', 'HAVE', 'WON', 'THE', 'BATTLE', 'ANSWERED', 'ROBIN'] +61-70968-0041-2220: ref=['I', 'LIKE', 'YOU', 'WILL', 'YOU', 'ARE', 'THE', 'SECOND', 'WILL', 'THAT', 'I', 'HAVE', 'MET', 'AND', 'LIKED', 'WITHIN', 'TWO', 'DAYS', 'IS', 'THERE', 'A', 'SIGN', 'IN', 'THAT'] +61-70968-0041-2220: hyp=['I', 'LIKE', 'YOU', 'WILL', 'YOU', 'ARE', 'THE', 'SECOND', 'WILL', 'THAT', 'I', 'HAVE', 'MET', 'AND', 'LIKED', 'WITHIN', 'TWO', 'DAYS', 'IS', 'THERE', 'A', 'SIGN', 'IN', 'THAT'] +61-70968-0042-2221: ref=['MONTFICHET', 'CALLED', 'OUT', 'FOR', 'ROBIN', 'TO', 'GIVE', 'HIM', 'AN', 'ARM'] +61-70968-0042-2221: hyp=['MONTFICHET', 'CALLED', 'OUT', 'FOR', 'ROBIN', 'TO', 'GIVE', 'HIM', 'AN', 'ARM'] +61-70968-0043-2222: ref=['FRIENDS', 'SAID', 'MONTFICHET', 'FAINTLY', 'TO', 'THE', 'WRESTLERS', 'BEAR', 'US', 'ESCORT', 'SO', 'FAR', 'AS', 'THE', "SHERIFF'S", 'HOUSE'] +61-70968-0043-2222: hyp=['FRIENDS', 'SAID', 'MONTFICHET', 'FAINTLY', 'TO', 'THE', 'WRESTLERS', 'BEAR', 'US', 'ESCORT', 'SO', 'FAR', 'AS', 'THE', "SHERIFF'S", 'HOUSE'] +61-70968-0044-2223: ref=['IT', 'WILL', 'NOT', 'BE', 'SAFE', 'FOR', 'YOU', 'TO', 'STAY', 'HERE', 'NOW'] +61-70968-0044-2223: hyp=['IT', 'WILL', 'NOT', 'BE', 'SAFE', 'FOR', 'YOU', 'TO', 'STAY', 'HERE', 'NOW'] +61-70968-0045-2224: ref=['PRAY', 'FOLLOW', 'US', 'WITH', 'MINE', 'AND', 'MY', 'LORD', "SHERIFF'S", 'MEN'] +61-70968-0045-2224: hyp=['PRAY', 'FOLLOW', 'US', 'WITH', 'MINE', 'AND', 'MY', 'LORD', "SHERIFF'S", 'MEN'] +61-70968-0046-2225: ref=['NOTTINGHAM', 'CASTLE', 'WAS', 'REACHED', 'AND', 'ADMITTANCE', 'WAS', 'DEMANDED'] +61-70968-0046-2225: hyp=['NODDING', 'HIM', 'CASTLE', 'WAS', 'REACHED', 'AND', 'ADMITTANCE', 'WAS', 'DEMANDED'] +61-70968-0047-2226: ref=['MASTER', 'MONCEUX', 'THE', 'SHERIFF', 'OF', 'NOTTINGHAM', 'WAS', 'MIGHTILY', 'PUT', 'ABOUT', 'WHEN', 'TOLD', 'OF', 'THE', 'RIOTING'] +61-70968-0047-2226: hyp=['MASTER', 'MONCEUX', 'THE', 'SHERIFF', 'OF', 'NOTTINGHAM', 'WAS', 'MIGHTILY', 'PUT', 'ABOUT', 'WHEN', 'TOLD', 'OF', 'THE', 'RIOTING'] +61-70968-0048-2227: ref=['AND', 'HENRY', 'MIGHT', 'RETURN', 'TO', 'ENGLAND', 'AT', 'ANY', 'MOMENT'] +61-70968-0048-2227: hyp=['AND', 'HENRY', 'MIGHT', 'RETURN', 'TO', 'ENGLAND', 'AT', 'ANY', 'MOMENT'] +61-70968-0049-2228: ref=['HAVE', 'YOUR', 'WILL', 'CHILD', 'IF', 'THE', 'BOY', 'ALSO', 'WILLS', 'IT', 'MONTFICHET', 'ANSWERED', 'FEELING', 'TOO', 'ILL', 'TO', 'OPPOSE', 'ANYTHING', 'VERY', 'STRONGLY', 'JUST', 'THEN'] +61-70968-0049-2228: hyp=['HAVE', 'YOUR', 'WILL', 'CHILD', 'IF', 'THE', 'BOY', 'ALSO', 'WILDS', 'IT', 'MONTFICHET', 'ANSWERED', 'FEELING', 'TOO', 'ILL', 'TO', 'OPPOSE', 'ANYTHING', 'VERY', 'STRONGLY', 'JUST', 'THEN'] +61-70968-0050-2229: ref=['HE', 'MADE', 'AN', 'EFFORT', 'TO', 'HIDE', 'HIS', 'CONDITION', 'FROM', 'THEM', 'ALL', 'AND', 'ROBIN', 'FELT', 'HIS', 'FINGERS', 'TIGHTEN', 'UPON', 'HIS', 'ARM'] +61-70968-0050-2229: hyp=['HE', 'MADE', 'AN', 'EFFORT', 'TO', 'HIDE', 'HIS', 'CONDITION', 'FROM', 'THEM', 'ALL', 'AND', 'ROBIN', 'FELT', 'HIS', 'FINGERS', 'TIGHTEN', 'UPON', 'HIS', 'ARM'] +61-70968-0051-2230: ref=['BEG', 'ME', 'A', 'ROOM', 'OF', 'THE', 'SHERIFF', 'CHILD', 'QUICKLY'] +61-70968-0051-2230: hyp=['BEGGED', 'ME', 'A', 'ROOM', 'OF', 'THE', 'SHERIFF', 'CHILD', 'QUICKLY'] +61-70968-0052-2231: ref=['BUT', 'WHO', 'IS', 'THIS', 'FELLOW', 'PLUCKING', 'AT', 'YOUR', 'SLEEVE'] +61-70968-0052-2231: hyp=['BUT', 'WHO', 'IS', 'THIS', 'FELLOW', 'PLUCKING', 'IT', 'OR', 'STEVE'] +61-70968-0053-2232: ref=['HE', 'IS', 'MY', 'ESQUIRE', 'EXCELLENCY', 'RETURNED', 'ROBIN', 'WITH', 'DIGNITY'] +61-70968-0053-2232: hyp=['HE', 'IS', 'MY', 'ESQUIRE', 'EXCELLENCY', 'RETURNED', 'ROBIN', 'WITH', 'DIGNITY'] +61-70968-0054-2233: ref=['MISTRESS', 'FITZOOTH', 'HAD', 'BEEN', 'CARRIED', 'OFF', 'BY', 'THE', "SHERIFF'S", 'DAUGHTER', 'AND', 'HER', 'MAIDS', 'AS', 'SOON', 'AS', 'THEY', 'HAD', 'ENTERED', 'THE', 'HOUSE', 'SO', 'THAT', 'ROBIN', 'ALONE', 'HAD', 'THE', 'CARE', 'OF', 'MONTFICHET'] +61-70968-0054-2233: hyp=['MISTRESS', 'FITZOOTH', 'HAD', 'BEEN', 'CARRIED', 'OFF', 'BY', 'THE', "SHERIFF'S", 'DAUGHTER', 'AND', 'HER', 'MAIDS', 'AS', 'SOON', 'AS', 'THEY', 'HAD', 'ENTERED', 'THE', 'HOUSE', 'SO', 'THAT', 'ROBIN', 'ALONE', 'HAD', 'THE', 'CARE', 'OF', 'MONT', 'VICHET'] +61-70968-0055-2234: ref=['ROBIN', 'WAS', 'GLAD', 'WHEN', 'AT', 'LENGTH', 'THEY', 'WERE', 'LEFT', 'TO', 'THEIR', 'OWN', 'DEVICES'] +61-70968-0055-2234: hyp=['ROBIN', 'WAS', 'GLAD', 'WHEN', 'AT', 'LENGTH', 'THEY', 'WERE', 'LEFT', 'TO', 'THEIR', 'OWN', 'DEVICES'] +61-70968-0056-2235: ref=['THE', 'WINE', 'DID', 'CERTAINLY', 'BRING', 'BACK', 'THE', 'COLOR', 'TO', 'THE', "SQUIRE'S", 'CHEEKS'] +61-70968-0056-2235: hyp=['THE', 'WINE', 'DID', 'CERTAINLY', 'BRING', 'BACK', 'THE', 'COLOR', 'TO', 'THE', "SQUIRE'S", 'CHEEKS'] +61-70968-0057-2236: ref=['THESE', 'ESCAPADES', 'ARE', 'NOT', 'FOR', 'OLD', 'GAMEWELL', 'LAD', 'HIS', 'DAY', 'HAS', 'COME', 'TO', 'TWILIGHT'] +61-70968-0057-2236: hyp=['THESE', 'ESCAPADES', 'ARE', 'NOT', 'FOR', 'OLD', 'GAMEWELL', 'LAD', 'HIS', 'DAY', 'HAS', 'COME', 'TO', 'TWILIGHT'] +61-70968-0058-2237: ref=['WILL', 'YOU', 'FORGIVE', 'ME', 'NOW'] +61-70968-0058-2237: hyp=['WILL', 'YOU', 'FORGIVE', 'ME', 'NOW'] +61-70968-0059-2238: ref=['IT', 'WILL', 'BE', 'NO', 'DISAPPOINTMENT', 'TO', 'ME'] +61-70968-0059-2238: hyp=['IT', 'WILL', 'BE', 'NO', 'DISAPPOINTMENT', 'TO', 'ME'] +61-70968-0060-2239: ref=['NO', 'THANKS', 'I', 'AM', 'GLAD', 'TO', 'GIVE', 'YOU', 'SUCH', 'EASY', 'HAPPINESS'] +61-70968-0060-2239: hyp=['NO', 'THANKS', 'I', 'AM', 'GLAD', 'TO', 'GIVE', 'YOU', 'SUCH', 'EASY', 'HAPPINESS'] +61-70968-0061-2240: ref=['YOU', 'ARE', 'A', 'WORTHY', 'LEECH', 'WILL', 'PRESENTLY', 'WHISPERED', 'ROBIN', 'THE', 'WINE', 'HAS', 'WORKED', 'A', 'MARVEL'] +61-70968-0061-2240: hyp=['YOU', 'ARE', 'A', 'WORTHY', 'LEECH', 'WILL', 'PRESENTLY', 'WHISPERED', 'ROBIN', 'THE', 'WINE', 'HAS', 'WORKED', 'A', 'MARVEL'] +61-70968-0062-2241: ref=['AY', 'AND', 'SHOW', 'YOU', 'SOME', 'PRETTY', 'TRICKS'] +61-70968-0062-2241: hyp=['I', 'AND', 'SHOW', 'YOU', 'SOME', 'PRETTY', 'TRICKS'] +61-70970-0000-2242: ref=['YOUNG', 'FITZOOTH', 'HAD', 'BEEN', 'COMMANDED', 'TO', 'HIS', "MOTHER'S", 'CHAMBER', 'SO', 'SOON', 'AS', 'HE', 'HAD', 'COME', 'OUT', 'FROM', 'HIS', 'CONVERSE', 'WITH', 'THE', 'SQUIRE'] +61-70970-0000-2242: hyp=['YOUNG', 'FITZOOTH', 'HAD', 'BEEN', 'COMMANDED', 'TO', 'HIS', "MOTHER'S", 'CHAMBER', 'SO', 'SOON', 'AS', 'HE', 'HAD', 'COME', 'OUT', 'FROM', 'HIS', 'CONVERSE', 'WITH', 'THE', 'SQUIRE'] +61-70970-0001-2243: ref=['THERE', 'BEFELL', 'AN', 'ANXIOUS', 'INTERVIEW', 'MISTRESS', 'FITZOOTH', 'ARGUING', 'FOR', 'AND', 'AGAINST', 'THE', "SQUIRE'S", 'PROJECT', 'IN', 'A', 'BREATH'] +61-70970-0001-2243: hyp=['THERE', 'BEFEL', 'AN', 'ANXIOUS', 'INTERVIEW', 'MISTRESS', 'FITZOOTH', 'ARGUING', 'FOUR', 'AND', 'AGAINST', 'THE', "SQUIRE'S", 'PROJECT', 'IN', 'A', 'BREATH'] +61-70970-0002-2244: ref=['MOST', 'OF', 'ALL', 'ROBIN', 'THOUGHT', 'OF', 'HIS', 'FATHER', 'WHAT', 'WOULD', 'HE', 'COUNSEL'] +61-70970-0002-2244: hyp=['MOST', 'OF', 'ALL', 'ROBIN', 'THOUGHT', 'OF', 'HIS', 'FATHER', 'WHAT', 'WOULD', 'HE', 'COUNSEL'] +61-70970-0003-2245: ref=['IF', 'FOR', 'A', 'WHIM', 'YOU', 'BEGGAR', 'YOURSELF', 'I', 'CANNOT', 'STAY', 'YOU'] +61-70970-0003-2245: hyp=['IF', 'FOR', 'A', 'WHIM', 'YOU', 'BEGGAR', 'YOURSELF', 'I', 'CANNOT', 'STAY', 'YOU'] +61-70970-0004-2246: ref=['BUT', 'TAKE', 'IT', 'WHILST', 'I', 'LIVE', 'AND', 'WEAR', "MONTFICHET'S", 'SHIELD', 'IN', 'THE', 'DAYS', 'WHEN', 'MY', 'EYES', 'CAN', 'BE', 'REJOICED', 'BY', 'SO', 'BRAVE', 'A', 'SIGHT', 'FOR', 'YOU', 'WILL', "NE'ER", 'DISGRACE', 'OUR', 'SCUTCHEON', 'I', 'WARRANT', 'ME'] +61-70970-0004-2246: hyp=['BUT', 'TAKE', 'IT', 'WHILST', 'I', 'LIVE', 'AND', 'WHERE', "MONTFICHET'S", 'SHIELD', 'IN', 'THE', 'DAYS', 'WHEN', 'MY', 'EYES', 'CAN', 'BE', 'REJOICED', 'BY', 'SO', 'BRAVE', 'A', 'SIGHT', 'FOR', 'YOU', 'WILL', 'NEVER', 'DISGRACE', 'OUR', 'DUCHEN', 'I', 'WARRANT', 'ME'] +61-70970-0005-2247: ref=['THE', 'LAD', 'HAD', 'CHECKED', 'HIM', 'THEN'] +61-70970-0005-2247: hyp=['THE', 'LAD', 'HAD', 'CHECKED', 'HIM', 'THEN'] +61-70970-0006-2248: ref=['NEVER', 'THAT', 'SIR', 'HE', 'HAD', 'SAID'] +61-70970-0006-2248: hyp=['NEVER', 'THAT', 'SIR', 'HE', 'HAD', 'SAID'] +61-70970-0007-2249: ref=['HE', 'WAS', 'IN', 'DEEP', 'CONVERSE', 'WITH', 'THE', 'CLERK', 'AND', 'ENTERED', 'THE', 'HALL', 'HOLDING', 'HIM', 'BY', 'THE', 'ARM'] +61-70970-0007-2249: hyp=['HE', 'WAS', 'IN', 'DEEP', 'CONVERSE', 'WITH', 'THE', 'CLERK', 'AND', 'ENTERED', 'THE', 'HALL', 'HOLDING', 'HIM', 'BY', 'THE', 'ARM'] +61-70970-0008-2250: ref=['NOW', 'TO', 'BED', 'BOY'] +61-70970-0008-2250: hyp=['NOW', 'TO', 'BED', 'BOY'] +61-70970-0009-2251: ref=['TIS', 'LATE', 'AND', 'I', 'GO', 'MYSELF', 'WITHIN', 'A', 'SHORT', 'SPACE'] +61-70970-0009-2251: hyp=['TIS', 'LATE', 'AND', 'I', 'GO', 'MYSELF', 'WITHIN', 'A', 'SHORT', 'SPACE'] +61-70970-0010-2252: ref=['DISMISS', 'YOUR', 'SQUIRE', 'ROBIN', 'AND', 'BID', 'ME', 'GOOD', 'E', 'E', 'N'] +61-70970-0010-2252: hyp=['DISMISS', 'YOUR', 'SQUIRE', 'ROBIN', 'AND', 'BID', 'ME', 'GOOD', 'EVEN'] +61-70970-0011-2253: ref=['AS', 'ANY', 'IN', 'ENGLAND', 'I', 'WOULD', 'SAY', 'SAID', 'GAMEWELL', 'PROUDLY', 'THAT', 'IS', 'IN', 'HIS', 'DAY'] +61-70970-0011-2253: hyp=['AS', 'ANY', 'IN', 'ENGLAND', 'I', 'WOULD', 'SAY', 'SAID', 'GAMEWELL', 'PROUDLY', 'THAT', 'IS', 'IN', 'HIS', 'DAY'] +61-70970-0012-2254: ref=['YET', 'HE', 'WILL', 'TEACH', 'YOU', 'A', 'FEW', 'TRICKS', 'WHEN', 'MORNING', 'IS', 'COME'] +61-70970-0012-2254: hyp=['YET', 'HE', 'WILL', 'TEACH', 'YOU', 'A', 'FEW', 'TRICKS', 'WHEN', 'MORNING', 'IS', 'COME'] +61-70970-0013-2255: ref=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'ALTER', 'HIS', 'SLEEPING', 'ROOM', 'TO', 'ONE', 'NEARER', 'TO', "GAMEWELL'S", 'CHAMBER'] +61-70970-0013-2255: hyp=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'ALTER', 'HIS', 'SLEEPING', 'ROOM', 'TO', 'ONE', 'NEARER', 'TO', "GAMEWELL'S", 'CHAMBER'] +61-70970-0014-2256: ref=['PRESENTLY', 'HE', 'CROSSED', 'THE', 'FLOOR', 'OF', 'HIS', 'ROOM', 'WITH', 'DECIDED', 'STEP'] +61-70970-0014-2256: hyp=['PRESENTLY', 'HE', 'CROSSED', 'THE', 'FLOOR', 'OF', 'HIS', 'ROOM', 'WITH', 'DECIDED', 'STEP'] +61-70970-0015-2257: ref=['WILL', 'CRIED', 'HE', 'SOFTLY', 'AND', 'STUTELEY', 'WHO', 'HAD', 'CHOSEN', 'HIS', 'COUCH', 'ACROSS', 'THE', 'DOOR', 'OF', 'HIS', 'YOUNG', "MASTER'S", 'CHAMBER', 'SPRANG', 'UP', 'AT', 'ONCE', 'IN', 'ANSWER'] +61-70970-0015-2257: hyp=['WILL', 'CRIED', 'HE', 'SOFTLY', 'AND', 'STUTELEY', 'WHO', 'HAD', 'CHOSEN', 'HIS', 'COUCH', 'ACROSS', 'THE', 'DOOR', 'OF', 'HIS', 'YOUNG', "MASTER'S", 'CHAMBER', 'SPRANG', 'UP', 'AT', 'ONCE', 'IN', 'ANSWER'] +61-70970-0016-2258: ref=['WE', 'WILL', 'GO', 'OUT', 'TOGETHER', 'TO', 'THE', 'BOWER', 'THERE', 'IS', 'A', 'WAY', 'DOWN', 'TO', 'THE', 'COURT', 'FROM', 'MY', 'WINDOW'] +61-70970-0016-2258: hyp=['WE', 'WILL', 'GO', 'OUT', 'TOGETHER', 'TO', 'THE', 'BOWER', 'THERE', 'IS', 'A', 'WAY', 'DOWN', 'TO', 'THE', 'COURT', 'FROM', 'MY', 'WINDOW'] +61-70970-0017-2259: ref=['REST', 'AND', 'BE', 'STILL', 'UNTIL', 'I', 'WARN', 'YOU'] +61-70970-0017-2259: hyp=['REST', 'AND', 'BE', 'STILL', 'UNTIL', 'I', 'WARN', 'YOU'] +61-70970-0018-2260: ref=['THE', 'HOURS', 'PASSED', 'WEARILY', 'BY', 'AND', 'MOVEMENT', 'COULD', 'YET', 'BE', 'HEARD', 'ABOUT', 'THE', 'HALL'] +61-70970-0018-2260: hyp=['THE', 'HOURS', 'PASSED', 'WEARILY', 'BY', 'AND', 'MOVEMENT', 'COULD', 'YET', 'BE', 'HEARD', 'ABOUT', 'THE', 'HALL'] +61-70970-0019-2261: ref=['AT', 'LAST', 'ALL', 'WAS', 'QUIET', 'AND', 'BLACK', 'IN', 'THE', 'COURTYARD', 'OF', 'GAMEWELL'] +61-70970-0019-2261: hyp=['AT', 'LAST', 'ALL', 'WAS', 'QUIET', 'AND', 'BLACK', 'IN', 'THE', 'COURTYARD', 'OF', 'GAMEWELL'] +61-70970-0020-2262: ref=['WILL', 'WHISPERED', 'ROBIN', 'OPENING', 'HIS', 'DOOR', 'AS', 'HE', 'SPOKE', 'ARE', 'YOU', 'READY'] +61-70970-0020-2262: hyp=['WILL', 'WHISPERED', 'ROBIN', 'OPENING', 'HIS', 'DOOR', 'AS', 'HE', 'SPOKE', 'ARE', 'YOU', 'READY'] +61-70970-0021-2263: ref=['THEY', 'THEN', 'RENEWED', 'THEIR', 'JOURNEY', 'AND', 'UNDER', 'THE', 'BETTER', 'LIGHT', 'MADE', 'A', 'SAFE', 'CROSSING', 'OF', 'THE', 'STABLE', 'ROOFS'] +61-70970-0021-2263: hyp=['THEY', 'THEN', 'RENEWED', 'THEIR', 'JOURNEY', 'AND', 'UNDER', 'THE', 'BETTER', 'LIGHT', 'MADE', 'A', 'SAFE', 'CROSSING', 'OF', 'THE', 'STABLE', 'ROOFS'] +61-70970-0022-2264: ref=['ROBIN', 'ENTERED', 'THE', 'HUT', 'DRAGGING', 'THE', 'UNWILLING', 'ESQUIRE', 'AFTER', 'HIM'] +61-70970-0022-2264: hyp=['ROBIN', 'ENTERED', 'THE', 'HUT', 'DRAGGING', 'THE', 'UNWILLING', 'ESQUIRE', 'AFTER', 'HIM'] +61-70970-0023-2265: ref=['BE', 'NOT', 'SO', 'FOOLISH', 'FRIEND', 'SAID', 'FITZOOTH', 'CROSSLY'] +61-70970-0023-2265: hyp=['BE', 'NOT', 'SO', 'FOOLISH', 'FRIEND', 'SAID', 'FITZOOTH', 'CROSSLY'] +61-70970-0024-2266: ref=['THEY', 'MOVED', 'THEREAFTER', 'CAUTIOUSLY', 'ABOUT', 'THE', 'HUT', 'GROPING', 'BEFORE', 'AND', 'ABOUT', 'THEM', 'TO', 'FIND', 'SOMETHING', 'TO', 'SHOW', 'THAT', 'WARRENTON', 'HAD', 'FULFILLED', 'HIS', 'MISSION'] +61-70970-0024-2266: hyp=['THEY', 'MOVED', 'THEREAFTER', 'CAUTIOUSLY', 'ABOUT', 'THE', 'HUT', 'GROPING', 'BEFORE', 'AND', 'ABOUT', 'THEM', 'TO', 'FIND', 'SOMETHING', 'TO', 'SHOW', 'THAT', 'THE', 'WARRENTON', 'HAD', 'FULFILLED', 'HIS', 'MISSION'] +61-70970-0025-2267: ref=['THEY', 'WERE', 'UPON', 'THE', 'VERGE', 'OF', 'AN', 'OPEN', 'TRAP', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'HUT', 'AND', 'STUTELEY', 'HAD', 'TRIPPED', 'OVER', 'THE', 'EDGE', 'OF', 'THE', 'REVERSED', 'FLAP', 'MOUTH', 'OF', 'THIS', 'PIT'] +61-70970-0025-2267: hyp=['THEY', 'WERE', 'UPON', 'THE', 'VERGE', 'OF', 'AN', 'OPEN', 'TRAP', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'HUT', 'AND', 'STUTELEY', 'HAD', 'TRIPPED', 'OVER', 'THE', 'EDGE', 'OF', 'THE', 'REVERSED', 'FLAP', 'MOUTH', 'OF', 'THIS', 'PIT'] +61-70970-0026-2268: ref=["FITZOOTH'S", 'HAND', 'RESTED', 'AT', 'LAST', 'UPON', 'THE', 'TOP', 'RUNG', 'OF', 'A', 'LADDER', 'AND', 'SLOWLY', 'THE', 'TRUTH', 'CAME', 'TO', 'HIM'] +61-70970-0026-2268: hyp=["FITZOOTH'S", 'HAND', 'RESTED', 'AT', 'LAST', 'UPON', 'THE', 'TOPRUNG', 'OF', 'A', 'LADDER', 'AND', 'SLOWLY', 'THE', 'TRUTH', 'CAME', 'TO', 'HIM'] +61-70970-0027-2269: ref=['ROBIN', 'CAREFULLY', 'DESCENDED', 'THE', 'LADDER', 'AND', 'FOUND', 'HIMSELF', 'SOON', 'UPON', 'FIRM', 'ROCKY', 'GROUND'] +61-70970-0027-2269: hyp=['ROBIN', 'CAREFULLY', 'DESCENDED', 'THE', 'LADDER', 'AND', 'FOUND', 'HIMSELF', 'SOON', 'UPON', 'FIRM', 'ROCKY', 'GROUND'] +61-70970-0028-2270: ref=['STUTELEY', 'WAS', 'BY', 'HIS', 'SIDE', 'IN', 'A', 'FLASH', 'AND', 'THEN', 'THEY', 'BOTH', 'BEGAN', 'FEELING', 'ABOUT', 'THEM', 'TO', 'ASCERTAIN', 'THE', 'SHAPE', 'AND', 'CHARACTER', 'OF', 'THIS', 'VAULT'] +61-70970-0028-2270: hyp=['A', 'STUTELEY', 'WAS', 'BY', 'HIS', 'SIDE', 'IN', 'A', 'FLASH', 'AND', 'THEN', 'THEY', 'BOTH', 'BEGAN', 'FEELING', 'ABOUT', 'THEM', 'TO', 'ASCERTAIN', 'THE', 'SHAPE', 'AND', 'CHARACTER', 'OF', 'THIS', 'VAULT'] +61-70970-0029-2271: ref=['FROM', 'THE', 'BLACKNESS', 'BEHIND', 'THE', 'LIGHT', 'THEY', 'HEARD', 'A', 'VOICE', "WARRENTON'S"] +61-70970-0029-2271: hyp=['FROM', 'THE', 'BLACKNESS', 'BEHIND', 'THE', 'LIGHT', 'THEY', 'HEARD', 'A', 'VOICE', 'WARRENTONS'] +61-70970-0030-2272: ref=['SAVE', 'ME', 'MASTERS', 'BUT', 'YOU', 'STARTLED', 'ME', 'RARELY'] +61-70970-0030-2272: hyp=['SAVE', 'ME', 'MASTERS', 'BUT', 'YOU', 'STARTLED', 'ME', 'RARELY'] +61-70970-0031-2273: ref=['CRIED', 'HE', 'WAVING', 'THE', 'LANTHORN', 'BEFORE', 'HIM', 'TO', 'MAKE', 'SURE', 'THAT', 'THESE', 'WERE', 'NO', 'GHOSTS', 'IN', 'FRONT', 'OF', 'HIM'] +61-70970-0031-2273: hyp=['CRIED', 'HE', 'WAVING', 'THE', 'LANTERN', 'BEFORE', 'HIM', 'TO', 'MAKE', 'SURE', 'THAT', 'THESE', 'WERE', 'NO', 'GHOSTS', 'IN', 'FRONT', 'OF', 'HIM'] +61-70970-0032-2274: ref=['ENQUIRED', 'ROBIN', 'WITH', 'HIS', 'SUSPICIONS', 'STILL', 'UPON', 'HIM'] +61-70970-0032-2274: hyp=['INQUIRED', 'ROBIN', 'WITH', 'HIS', 'SUSPICION', 'STILL', 'UPON', 'HIM'] +61-70970-0033-2275: ref=['TRULY', 'SUCH', 'A', 'HORSE', 'SHOULD', 'BE', 'WORTH', 'MUCH', 'IN', 'NOTTINGHAM', 'FAIR'] +61-70970-0033-2275: hyp=['TRULY', 'SUCH', 'A', 'HORSE', 'WOULD', 'BE', 'WORTH', 'MUCH', 'IN', 'NOTTINGHAM', 'FAIR'] +61-70970-0034-2276: ref=['NAY', 'NAY', 'LORDING', 'ANSWERED', 'WARRENTON', 'WITH', 'A', 'HALF', 'LAUGH'] +61-70970-0034-2276: hyp=['NAY', 'NAY', 'LORDING', 'ANSWERED', 'WARRENTON', 'WITH', 'A', 'HALF', 'LAUGH'] +61-70970-0035-2277: ref=['WARRENTON', 'SPOKE', 'THUS', 'WITH', 'SIGNIFICANCE', 'TO', 'SHOW', 'ROBIN', 'THAT', 'HE', 'WAS', 'NOT', 'TO', 'THINK', "GEOFFREY'S", 'CLAIMS', 'TO', 'THE', 'ESTATE', 'WOULD', 'BE', 'PASSED', 'BY'] +61-70970-0035-2277: hyp=['WARRENTON', 'SPOKE', 'THUS', 'WITH', 'SIGNIFICANCE', 'TO', 'SHOW', 'ROBIN', 'THAT', 'HE', 'WAS', 'NOT', 'TO', 'THINK', "JEFFREY'S", 'CLAIMS', 'TO', 'THE', 'ESTATE', 'WOULD', 'BE', 'PASSED', 'BY'] +61-70970-0036-2278: ref=['ROBIN', 'FITZOOTH', 'SAW', 'THAT', 'HIS', 'DOUBTS', 'OF', 'WARRENTON', 'HAD', 'BEEN', 'UNFAIR', 'AND', 'HE', 'BECAME', 'ASHAMED', 'OF', 'HIMSELF', 'FOR', 'HARBORING', 'THEM'] +61-70970-0036-2278: hyp=['ROBIN', 'FITZOOTH', 'SAW', 'THAT', 'HIS', 'DOUBTS', 'OF', 'WARRENTON', 'HAD', 'BEEN', 'UNFAIR', 'AND', 'HE', 'BECAME', 'ASHAMED', 'OF', 'HIMSELF', 'FOR', 'HARBOURING', 'THEM'] +61-70970-0037-2279: ref=['HIS', 'TONES', 'RANG', 'PLEASANTLY', 'ON', "WARRENTON'S", 'EARS', 'AND', 'FORTHWITH', 'A', 'GOOD', 'FELLOWSHIP', 'WAS', 'HERALDED', 'BETWEEN', 'THEM'] +61-70970-0037-2279: hyp=['HIS', 'TONES', 'RANG', 'PLEASANTLY', "UNWARRANTON'S", 'EARS', 'AND', 'FORTHWITH', 'THE', 'GOOD', 'FELLOWSHIP', 'WAS', 'HERALDED', 'BETWEEN', 'THEM'] +61-70970-0038-2280: ref=['THE', 'OLD', 'SERVANT', 'TOLD', 'HIM', 'QUIETLY', 'AS', 'THEY', 'CREPT', 'BACK', 'TO', 'GAMEWELL', 'THAT', 'THIS', 'PASSAGE', 'WAY', 'LED', 'FROM', 'THE', 'HUT', 'IN', 'THE', 'PLEASANCE', 'TO', 'SHERWOOD', 'AND', 'THAT', 'GEOFFREY', 'FOR', 'THE', 'TIME', 'WAS', 'HIDING', 'WITH', 'THE', 'OUTLAWS', 'IN', 'THE', 'FOREST'] +61-70970-0038-2280: hyp=['THE', 'OLD', 'SERVANT', 'TOLD', 'HIM', 'QUIETLY', 'AS', 'THEY', 'CREPT', 'BACK', 'TO', 'GAMEWELL', 'THAT', 'THIS', 'PASSAGEWAY', 'LED', 'FROM', 'THE', 'HUT', 'IN', 'THE', 'PLEASANTS', 'TO', 'SHERWOOD', 'AND', 'THAT', 'JEFFREY', 'FOR', 'THE', 'TIME', 'WAS', 'HIDING', 'WITH', 'THE', 'OUTLAWS', 'IN', 'THE', 'FOREST'] +61-70970-0039-2281: ref=['HE', 'IMPLORES', 'US', 'TO', 'BE', 'DISCREET', 'AS', 'THE', 'GRAVE', 'IN', 'THIS', 'MATTER', 'FOR', 'IN', 'SOOTH', 'HIS', 'LIFE', 'IS', 'IN', 'THE', 'HOLLOW', 'OF', 'OUR', 'HANDS'] +61-70970-0039-2281: hyp=['HE', 'IMPLIES', 'US', 'TO', 'BE', 'DISCREET', 'AS', 'THE', 'GRAVE', 'IN', 'THIS', 'MATTER', 'FOR', 'IN', 'SOOTH', 'HIS', 'LIFE', 'IS', 'IN', 'THE', 'HOLLOW', 'OF', 'OUR', 'HANDS'] +61-70970-0040-2282: ref=['THEY', 'REGAINED', 'THEIR', 'APARTMENT', 'APPARENTLY', 'WITHOUT', 'DISTURBING', 'THE', 'HOUSEHOLD', 'OF', 'GAMEWELL'] +61-70970-0040-2282: hyp=['THEY', 'REGAIN', 'THEIR', 'APARTMENT', 'APPARENTLY', 'WITHOUT', 'DISTURBING', 'THE', 'HOUSEHOLD', 'OF', 'GAMEWELL'] +672-122797-0000-1529: ref=['OUT', 'IN', 'THE', 'WOODS', 'STOOD', 'A', 'NICE', 'LITTLE', 'FIR', 'TREE'] +672-122797-0000-1529: hyp=['OUT', 'IN', 'THE', 'WOOD', 'STOOD', 'A', 'NICE', 'LITTLE', 'FIR', 'TREE'] +672-122797-0001-1530: ref=['THE', 'PLACE', 'HE', 'HAD', 'WAS', 'A', 'VERY', 'GOOD', 'ONE', 'THE', 'SUN', 'SHONE', 'ON', 'HIM', 'AS', 'TO', 'FRESH', 'AIR', 'THERE', 'WAS', 'ENOUGH', 'OF', 'THAT', 'AND', 'ROUND', 'HIM', 'GREW', 'MANY', 'LARGE', 'SIZED', 'COMRADES', 'PINES', 'AS', 'WELL', 'AS', 'FIRS'] +672-122797-0001-1530: hyp=['THE', 'PLACE', 'HE', 'HAD', 'WAS', 'A', 'VERY', 'GOOD', 'ONE', 'THE', 'SUN', 'SHONE', 'ON', 'HIM', 'AS', 'TO', 'FRESH', 'AIR', 'THERE', 'WAS', 'ENOUGH', 'OF', 'THAT', 'AND', 'ROUND', 'HIM', 'GREW', 'MANY', 'LARGE', 'SIZED', 'COMRADES', 'PINES', 'AS', 'WELL', 'AS', 'FURS'] +672-122797-0002-1531: ref=['HE', 'DID', 'NOT', 'THINK', 'OF', 'THE', 'WARM', 'SUN', 'AND', 'OF', 'THE', 'FRESH', 'AIR', 'HE', 'DID', 'NOT', 'CARE', 'FOR', 'THE', 'LITTLE', 'COTTAGE', 'CHILDREN', 'THAT', 'RAN', 'ABOUT', 'AND', 'PRATTLED', 'WHEN', 'THEY', 'WERE', 'IN', 'THE', 'WOODS', 'LOOKING', 'FOR', 'WILD', 'STRAWBERRIES'] +672-122797-0002-1531: hyp=['HE', 'DID', 'NOT', 'THINK', 'OF', 'THE', 'WARM', 'SUN', 'AND', 'OF', 'THE', 'FRESH', 'AIR', 'HE', 'DID', 'NOT', 'CARE', 'FOR', 'THE', 'LITTLE', 'COTTAGE', 'CHILDREN', 'THAT', 'RAN', 'ABOUT', 'IN', 'PRATTLED', 'WHEN', 'THEY', 'WERE', 'IN', 'THE', 'WOODS', 'LOOKING', 'FOR', 'WILD', 'STRAWBERRIES'] +672-122797-0003-1532: ref=['BUT', 'THIS', 'WAS', 'WHAT', 'THE', 'TREE', 'COULD', 'NOT', 'BEAR', 'TO', 'HEAR'] +672-122797-0003-1532: hyp=['BUT', 'THIS', 'WAS', 'WHAT', 'THE', 'TREE', 'COULD', 'NOT', 'BEAR', 'TO', 'HEAR'] +672-122797-0004-1533: ref=['IN', 'WINTER', 'WHEN', 'THE', 'SNOW', 'LAY', 'GLITTERING', 'ON', 'THE', 'GROUND', 'A', 'HARE', 'WOULD', 'OFTEN', 'COME', 'LEAPING', 'ALONG', 'AND', 'JUMP', 'RIGHT', 'OVER', 'THE', 'LITTLE', 'TREE'] +672-122797-0004-1533: hyp=['IN', 'WINTER', 'WHEN', 'THE', 'SNOW', 'LAY', 'GLITTERING', 'ON', 'THE', 'GROUND', 'A', 'HARE', 'WOULD', 'OFTEN', 'COME', 'LEAPING', 'ALONG', 'AND', 'JUMP', 'RIGHT', 'OVER', 'THE', 'LITTLE', 'TREE'] +672-122797-0005-1534: ref=['OH', 'THAT', 'MADE', 'HIM', 'SO', 'ANGRY'] +672-122797-0005-1534: hyp=['OH', 'THAT', 'MADE', 'HIM', 'SO', 'ANGRY'] +672-122797-0006-1535: ref=['TO', 'GROW', 'AND', 'GROW', 'TO', 'GET', 'OLDER', 'AND', 'BE', 'TALL', 'THOUGHT', 'THE', 'TREE', 'THAT', 'AFTER', 'ALL', 'IS', 'THE', 'MOST', 'DELIGHTFUL', 'THING', 'IN', 'THE', 'WORLD'] +672-122797-0006-1535: hyp=['TO', 'GROW', 'AND', 'GROW', 'TO', 'GET', 'OLDER', 'AND', 'BE', 'TALL', 'THOUGHT', 'THE', 'TREE', 'THAT', 'AFTER', 'ALL', 'IS', 'THE', 'MOST', 'DELIGHTFUL', 'THING', 'IN', 'THE', 'WORLD'] +672-122797-0007-1536: ref=['IN', 'AUTUMN', 'THE', 'WOOD', 'CUTTERS', 'ALWAYS', 'CAME', 'AND', 'FELLED', 'SOME', 'OF', 'THE', 'LARGEST', 'TREES'] +672-122797-0007-1536: hyp=['IN', 'AUTUMN', 'THE', 'WOODCUTTERS', 'ALWAYS', 'CAME', 'AND', 'FELLED', 'SOME', 'OF', 'THE', 'LARGEST', 'TREES'] +672-122797-0008-1537: ref=['THIS', 'HAPPENED', 'EVERY', 'YEAR', 'AND', 'THE', 'YOUNG', 'FIR', 'TREE', 'THAT', 'HAD', 'NOW', 'GROWN', 'TO', 'A', 'VERY', 'COMELY', 'SIZE', 'TREMBLED', 'AT', 'THE', 'SIGHT', 'FOR', 'THE', 'MAGNIFICENT', 'GREAT', 'TREES', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'NOISE', 'AND', 'CRACKING', 'THE', 'BRANCHES', 'WERE', 'LOPPED', 'OFF', 'AND', 'THE', 'TREES', 'LOOKED', 'LONG', 'AND', 'BARE', 'THEY', 'WERE', 'HARDLY', 'TO', 'BE', 'RECOGNISED', 'AND', 'THEN', 'THEY', 'WERE', 'LAID', 'IN', 'CARTS', 'AND', 'THE', 'HORSES', 'DRAGGED', 'THEM', 'OUT', 'OF', 'THE', 'WOOD'] +672-122797-0008-1537: hyp=['THIS', 'HAPPENED', 'EVERY', 'YEAR', 'AND', 'THE', 'YOUNG', 'FIR', 'TREE', 'THAT', 'HAD', 'NOW', 'GROWN', 'TO', 'A', 'VERY', 'COMELY', 'SIZED', 'TREMBLED', 'AT', 'THE', 'SIGHT', 'FOR', 'THE', 'MAGNIFICENT', 'GREAT', 'TREES', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'NOISE', 'AND', 'CRACKING', 'THE', 'BRANCHES', 'WERE', 'LOPPED', 'OFF', 'AND', 'THE', 'TREES', 'LOOKED', 'LONG', 'AND', 'BARE', 'THEY', 'WERE', 'HARDLY', 'TO', 'BE', 'RECOGNIZED', 'AND', 'THEN', 'THEY', 'WERE', 'LAID', 'IN', 'CARTS', 'AND', 'THE', 'HORSES', 'DRAGGED', 'THEM', 'OUT', 'OF', 'THE', 'WOOD'] +672-122797-0009-1538: ref=['HAVE', 'YOU', 'NOT', 'MET', 'THEM', 'ANYWHERE'] +672-122797-0009-1538: hyp=['HAVE', 'YOU', 'NOT', 'MET', 'THE', 'MANYWHERE'] +672-122797-0010-1539: ref=['REJOICE', 'IN', 'THY', 'GROWTH', 'SAID', 'THE', 'SUNBEAMS'] +672-122797-0010-1539: hyp=['REJOICE', 'IN', 'THY', 'GROWTH', 'SAID', 'THE', 'SUNBEAMS'] +672-122797-0011-1540: ref=['AND', 'THEN', 'WHAT', 'HAPPENS', 'THEN'] +672-122797-0011-1540: hyp=['AND', 'THEN', 'WHAT', 'HAPPENS', 'THEN'] +672-122797-0012-1541: ref=['I', 'WOULD', 'FAIN', 'KNOW', 'IF', 'I', 'AM', 'DESTINED', 'FOR', 'SO', 'GLORIOUS', 'A', 'CAREER', 'CRIED', 'THE', 'TREE', 'REJOICING'] +672-122797-0012-1541: hyp=['I', 'WOULD', 'FAIN', 'KNOW', 'IF', 'I', 'AM', 'DESTINED', 'FOR', 'SO', 'GLORIOUS', 'A', 'CAREER', 'CRIED', 'THE', 'TREE', 'REJOICING'] +672-122797-0013-1542: ref=['I', 'AM', 'NOW', 'TALL', 'AND', 'MY', 'BRANCHES', 'SPREAD', 'LIKE', 'THE', 'OTHERS', 'THAT', 'WERE', 'CARRIED', 'OFF', 'LAST', 'YEAR', 'OH'] +672-122797-0013-1542: hyp=['I', 'AM', 'NOW', 'TALL', 'AND', 'MY', 'BRANCHES', 'SPREAD', 'LIKE', 'THE', 'OTHERS', 'THAT', 'WERE', 'CARRIED', 'OFF', 'LAST', 'YEAR', 'OH'] +672-122797-0014-1543: ref=['WERE', 'I', 'BUT', 'ALREADY', 'ON', 'THE', 'CART'] +672-122797-0014-1543: hyp=['WERE', 'I', 'BUT', 'ALREADY', 'ON', 'THE', 'CART'] +672-122797-0015-1544: ref=['WERE', 'I', 'IN', 'THE', 'WARM', 'ROOM', 'WITH', 'ALL', 'THE', 'SPLENDOR', 'AND', 'MAGNIFICENCE'] +672-122797-0015-1544: hyp=['WHERE', 'I', 'IN', 'THE', 'WARM', 'ROOM', 'WITH', 'ALL', 'BUT', 'SPLENDOUR', 'AND', 'MAGNIFICENCE'] +672-122797-0016-1545: ref=['YES', 'THEN', 'SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'WILL', 'SURELY', 'FOLLOW', 'OR', 'WHEREFORE', 'SHOULD', 'THEY', 'THUS', 'ORNAMENT', 'ME'] +672-122797-0016-1545: hyp=['YES', 'AND', 'SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'WILL', 'SURELY', 'FOLLOW', 'OR', 'WHEREFORE', 'SHOULD', 'THEY', 'THUS', 'ORNAMENT', 'ME'] +672-122797-0017-1546: ref=['SOMETHING', 'BETTER', 'SOMETHING', 'STILL', 'GRANDER', 'MUST', 'FOLLOW', 'BUT', 'WHAT'] +672-122797-0017-1546: hyp=['SOMETHING', 'BETTER', 'OR', 'SOME', 'THING', 'STILL', 'GRANDER', 'MUST', 'FOLLOW', 'BUT', 'WHAT'] +672-122797-0018-1547: ref=['REJOICE', 'IN', 'OUR', 'PRESENCE', 'SAID', 'THE', 'AIR', 'AND', 'THE', 'SUNLIGHT'] +672-122797-0018-1547: hyp=['REJOICE', 'IN', 'OUR', 'PRESENCE', 'SAID', 'THE', 'HEIR', 'IN', 'THE', 'SUNLIGHT'] +672-122797-0019-1548: ref=['REJOICE', 'IN', 'THY', 'OWN', 'FRESH', 'YOUTH'] +672-122797-0019-1548: hyp=['REJOICE', 'IN', 'THY', 'OWN', 'FRESH', 'YOUTH'] +672-122797-0020-1549: ref=['BUT', 'THE', 'TREE', 'DID', 'NOT', 'REJOICE', 'AT', 'ALL', 'HE', 'GREW', 'AND', 'GREW', 'AND', 'WAS', 'GREEN', 'BOTH', 'WINTER', 'AND', 'SUMMER'] +672-122797-0020-1549: hyp=['BUT', 'THE', 'TREE', 'DID', 'NOT', 'REJOICE', 'AT', 'ALL', 'HE', 'GREW', 'AND', 'GREW', 'AND', 'WAS', 'GREEN', 'BOTH', 'WINTER', 'AND', 'SUMMER'] +672-122797-0021-1550: ref=['AND', 'TOWARDS', 'CHRISTMAS', 'HE', 'WAS', 'ONE', 'OF', 'THE', 'FIRST', 'THAT', 'WAS', 'CUT', 'DOWN'] +672-122797-0021-1550: hyp=['AND', 'TOWARDS', 'CHRISTMAS', 'HE', 'WAS', 'ONE', 'OF', 'THE', 'FIRST', 'THAT', 'WAS', 'CUT', 'DOWN'] +672-122797-0022-1551: ref=['THE', 'AXE', 'STRUCK', 'DEEP', 'INTO', 'THE', 'VERY', 'PITH', 'THE', 'TREE', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'A', 'SIGH', 'HE', 'FELT', 'A', 'PANG', 'IT', 'WAS', 'LIKE', 'A', 'SWOON', 'HE', 'COULD', 'NOT', 'THINK', 'OF', 'HAPPINESS', 'FOR', 'HE', 'WAS', 'SORROWFUL', 'AT', 'BEING', 'SEPARATED', 'FROM', 'HIS', 'HOME', 'FROM', 'THE', 'PLACE', 'WHERE', 'HE', 'HAD', 'SPRUNG', 'UP'] +672-122797-0022-1551: hyp=['THE', 'AXE', 'STRUCK', 'DEEP', 'INTO', 'THE', 'VERY', 'PITH', 'THE', 'TREE', 'FELL', 'TO', 'THE', 'EARTH', 'WITH', 'A', 'SIGH', 'HE', 'FELT', 'A', 'PANG', 'IT', 'WAS', 'LIKE', 'A', 'SWOON', 'HE', 'COULD', 'NOT', 'THINK', 'OF', 'HAPPINESS', 'FOR', 'HE', 'WAS', 'SORROWFUL', 'AT', 'BEING', 'SEPARATED', 'FROM', 'HIS', 'HOME', 'FROM', 'THE', 'PLACE', 'WHERE', 'HE', 'HAD', 'SPRUNG', 'UP'] +672-122797-0023-1552: ref=['HE', 'WELL', 'KNEW', 'THAT', 'HE', 'SHOULD', 'NEVER', 'SEE', 'HIS', 'DEAR', 'OLD', 'COMRADES', 'THE', 'LITTLE', 'BUSHES', 'AND', 'FLOWERS', 'AROUND', 'HIM', 'ANYMORE', 'PERHAPS', 'NOT', 'EVEN', 'THE', 'BIRDS'] +672-122797-0023-1552: hyp=['HE', 'WELL', 'KNEW', 'THAT', 'HE', 'SHOULD', 'NEVER', 'SEE', 'HIS', 'DEAR', 'OLD', 'COMRADES', 'THE', 'LITTLE', 'BUSHES', 'AND', 'FLOWERS', 'AROUND', 'HIM', 'ANY', 'MORE', 'PERHAPS', 'NOT', 'EVEN', 'THE', 'BIRDS'] +672-122797-0024-1553: ref=['THE', 'DEPARTURE', 'WAS', 'NOT', 'AT', 'ALL', 'AGREEABLE'] +672-122797-0024-1553: hyp=['THE', 'DEPARTURE', 'WAS', 'NOT', 'AT', 'ALL', 'AGREEABLE'] +672-122797-0025-1554: ref=['THE', 'TREE', 'ONLY', 'CAME', 'TO', 'HIMSELF', 'WHEN', 'HE', 'WAS', 'UNLOADED', 'IN', 'A', 'COURT', 'YARD', 'WITH', 'THE', 'OTHER', 'TREES', 'AND', 'HEARD', 'A', 'MAN', 'SAY', 'THAT', 'ONE', 'IS', 'SPLENDID', 'WE', "DON'T", 'WANT', 'THE', 'OTHERS'] +672-122797-0025-1554: hyp=['THE', 'TREE', 'ONLY', 'CAME', 'TO', 'HIMSELF', 'WHEN', 'HE', 'WAS', 'UNLOADED', 'IN', 'A', 'COURTYARD', 'WITH', 'THE', 'OTHER', 'TREES', 'AND', 'HEARD', 'A', 'MAN', 'SAY', 'THAT', 'ONE', 'IS', 'SPLENDID', 'WE', "DON'T", 'WANT', 'THE', 'OTHERS'] +672-122797-0026-1555: ref=['THERE', 'TOO', 'WERE', 'LARGE', 'EASY', 'CHAIRS', 'SILKEN', 'SOFAS', 'LARGE', 'TABLES', 'FULL', 'OF', 'PICTURE', 'BOOKS', 'AND', 'FULL', 'OF', 'TOYS', 'WORTH', 'HUNDREDS', 'AND', 'HUNDREDS', 'OF', 'CROWNS', 'AT', 'LEAST', 'THE', 'CHILDREN', 'SAID', 'SO'] +672-122797-0026-1555: hyp=['THERE', 'TOO', 'WERE', 'LARGE', 'EASY', 'CHAIRS', 'SILKEN', 'SOFAS', 'LARGE', 'TABLES', 'FULL', 'OF', 'PICTURE', 'BOOKS', 'AND', 'FULL', 'OF', 'TOYS', 'WORTH', 'HUNDREDS', 'AND', 'HUNDREDS', 'OF', 'CROWNS', 'AT', 'LEAST', 'THE', 'CHILDREN', 'SAID', 'SO'] +672-122797-0027-1556: ref=['THE', 'SERVANTS', 'AS', 'WELL', 'AS', 'THE', 'YOUNG', 'LADIES', 'DECORATED', 'IT'] +672-122797-0027-1556: hyp=['THE', 'SERVANTS', 'AS', 'WELL', 'AS', 'THE', 'YOUNG', 'LADIES', 'DECORATED', 'IT'] +672-122797-0028-1557: ref=['THIS', 'EVENING', 'THEY', 'ALL', 'SAID'] +672-122797-0028-1557: hyp=['THIS', 'EVENING', 'THEY', 'ALL', 'SAID'] +672-122797-0029-1558: ref=['HOW', 'IT', 'WILL', 'SHINE', 'THIS', 'EVENING'] +672-122797-0029-1558: hyp=['HOW', 'IT', 'WILL', 'SHINE', 'THIS', 'EVENING'] +672-122797-0030-1559: ref=['PERHAPS', 'THE', 'OTHER', 'TREES', 'FROM', 'THE', 'FOREST', 'WILL', 'COME', 'TO', 'LOOK', 'AT', 'ME'] +672-122797-0030-1559: hyp=['PERHAPS', 'THE', 'OTHER', 'TREES', 'FROM', 'THE', 'FOREST', 'WILL', 'COME', 'TO', 'LOOK', 'AT', 'ME'] +672-122797-0031-1560: ref=['IT', 'BLAZED', 'UP', 'FAMOUSLY', 'HELP', 'HELP'] +672-122797-0031-1560: hyp=['IT', 'BLAZED', 'UP', 'FAMOUSLY', 'HELP', 'HELP'] +672-122797-0032-1561: ref=['CRIED', 'THE', 'YOUNG', 'LADIES', 'AND', 'THEY', 'QUICKLY', 'PUT', 'OUT', 'THE', 'FIRE'] +672-122797-0032-1561: hyp=['CRIED', 'THE', 'YOUNG', 'LADIES', 'AND', 'THEY', 'QUICKLY', 'PUT', 'OUT', 'THE', 'FIRE'] +672-122797-0033-1562: ref=['A', 'STORY'] +672-122797-0033-1562: hyp=['A', 'STORY'] +672-122797-0034-1563: ref=['A', 'STORY', 'CRIED', 'THE', 'CHILDREN', 'DRAWING', 'A', 'LITTLE', 'FAT', 'MAN', 'TOWARDS', 'THE', 'TREE'] +672-122797-0034-1563: hyp=['A', 'STORY', 'CRIED', 'THE', 'CHILDREN', 'DRAWING', 'A', 'LITTLE', 'FAT', 'MAN', 'TOWARDS', 'THE', 'TREE'] +672-122797-0035-1564: ref=['BUT', 'I', 'SHALL', 'TELL', 'ONLY', 'ONE', 'STORY'] +672-122797-0035-1564: hyp=['BUT', 'I', 'SHALL', 'TELL', 'ONLY', 'ONE', 'STORY'] +672-122797-0036-1565: ref=['HUMPY', 'DUMPY', 'FELL', 'DOWNSTAIRS', 'AND', 'YET', 'HE', 'MARRIED', 'THE', 'PRINCESS'] +672-122797-0036-1565: hyp=['HUMPY', "DON'T", 'BE', 'FELL', 'DOWNSTAIRS', 'AND', 'YET', 'HE', 'MARRIED', 'THE', 'PRINCESS'] +672-122797-0037-1566: ref=["THAT'S", 'THE', 'WAY', 'OF', 'THE', 'WORLD'] +672-122797-0037-1566: hyp=["THAT'S", 'THE', 'WAY', 'OF', 'THE', 'WORLD'] +672-122797-0038-1567: ref=['THOUGHT', 'THE', 'FIR', 'TREE', 'AND', 'BELIEVED', 'IT', 'ALL', 'BECAUSE', 'THE', 'MAN', 'WHO', 'TOLD', 'THE', 'STORY', 'WAS', 'SO', 'GOOD', 'LOOKING', 'WELL', 'WELL'] +672-122797-0038-1567: hyp=['THOUGHT', 'THE', 'FIR', 'TREE', 'AND', 'BELIEVED', 'IT', 'ALL', 'BECAUSE', 'THE', 'MAN', 'WHO', 'TOLD', 'THE', 'STORY', 'WAS', 'SO', 'GOOD', 'LOOKING', 'WELL', 'WELL'] +672-122797-0039-1568: ref=['I', "WON'T", 'TREMBLE', 'TO', 'MORROW', 'THOUGHT', 'THE', 'FIR', 'TREE'] +672-122797-0039-1568: hyp=['I', "WON'T", 'TREMBLE', 'TO', 'MORROW', 'THOUGHT', 'THE', 'FIR', 'TREE'] +672-122797-0040-1569: ref=['AND', 'THE', 'WHOLE', 'NIGHT', 'THE', 'TREE', 'STOOD', 'STILL', 'AND', 'IN', 'DEEP', 'THOUGHT'] +672-122797-0040-1569: hyp=['AND', 'THE', 'WHOLE', 'NIGHT', 'THE', 'TREE', 'STOOD', 'STILL', 'AND', 'IN', 'DEEP', 'THOUGHT'] +672-122797-0041-1570: ref=['IN', 'THE', 'MORNING', 'THE', 'SERVANT', 'AND', 'THE', 'HOUSEMAID', 'CAME', 'IN'] +672-122797-0041-1570: hyp=['IN', 'THE', 'MORNING', 'THE', 'SERVANT', 'AND', 'THE', 'HOUSEMAID', 'CAME', 'IN'] +672-122797-0042-1571: ref=['BUT', 'THEY', 'DRAGGED', 'HIM', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'UP', 'THE', 'STAIRS', 'INTO', 'THE', 'LOFT', 'AND', 'HERE', 'IN', 'A', 'DARK', 'CORNER', 'WHERE', 'NO', 'DAYLIGHT', 'COULD', 'ENTER', 'THEY', 'LEFT', 'HIM'] +672-122797-0042-1571: hyp=['BUT', 'THEY', 'DRAGGED', 'HIM', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'UP', 'THE', 'STAIRS', 'INTO', 'THE', 'LOFT', 'AND', 'HERE', 'IT', 'A', 'DARK', 'CORNER', 'WHERE', 'NO', 'DAYLIGHT', 'COULD', 'ENTER', 'THEY', 'LEFT', 'HIM'] +672-122797-0043-1572: ref=["WHAT'S", 'THE', 'MEANING', 'OF', 'THIS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0043-1572: hyp=["WHAT'S", 'THE', 'MEANING', 'OF', 'THIS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0044-1573: ref=['AND', 'HE', 'LEANED', 'AGAINST', 'THE', 'WALL', 'LOST', 'IN', 'REVERIE'] +672-122797-0044-1573: hyp=['AND', 'HE', 'LEANED', 'AGAINST', 'THE', 'WALL', 'LOST', 'IN', 'REVERIE'] +672-122797-0045-1574: ref=['TIME', 'ENOUGH', 'HAD', 'HE', 'TOO', 'FOR', 'HIS', 'REFLECTIONS', 'FOR', 'DAYS', 'AND', 'NIGHTS', 'PASSED', 'ON', 'AND', 'NOBODY', 'CAME', 'UP', 'AND', 'WHEN', 'AT', 'LAST', 'SOMEBODY', 'DID', 'COME', 'IT', 'WAS', 'ONLY', 'TO', 'PUT', 'SOME', 'GREAT', 'TRUNKS', 'IN', 'A', 'CORNER', 'OUT', 'OF', 'THE', 'WAY'] +672-122797-0045-1574: hyp=['TIME', 'ENOUGH', 'HAD', 'HE', 'TOO', 'FOR', 'HIS', 'REFLECTIONS', 'FOR', 'DAYS', 'AND', 'NIGHTS', 'PASSED', 'ON', 'AND', 'NOBODY', 'CAME', 'UP', 'AND', 'WHEN', 'AT', 'LAST', 'SOMEBODY', 'DID', 'COME', 'IT', 'WAS', 'ONLY', 'TO', 'PUT', 'SOME', 'GREAT', 'TRUNKS', 'IN', 'A', 'CORNER', 'OUT', 'OF', 'THE', 'WAY'] +672-122797-0046-1575: ref=['TIS', 'NOW', 'WINTER', 'OUT', 'OF', 'DOORS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0046-1575: hyp=['TIS', 'NOW', 'WINTER', 'OUT', 'OF', 'DOORS', 'THOUGHT', 'THE', 'TREE'] +672-122797-0047-1576: ref=['HOW', 'KIND', 'MAN', 'IS', 'AFTER', 'ALL'] +672-122797-0047-1576: hyp=['HOW', 'KIND', 'MAN', 'IS', 'AFTER', 'ALL'] +672-122797-0048-1577: ref=['IF', 'IT', 'ONLY', 'WERE', 'NOT', 'SO', 'DARK', 'HERE', 'AND', 'SO', 'TERRIBLY', 'LONELY'] +672-122797-0048-1577: hyp=['IF', 'IT', 'ONLY', 'WERE', 'NOT', 'SO', 'DARK', 'HERE', 'AND', 'SO', 'TERRIBLY', 'LONELY'] +672-122797-0049-1578: ref=['SQUEAK', 'SQUEAK'] +672-122797-0049-1578: hyp=['SQUEAK', 'SQUEAK'] +672-122797-0050-1579: ref=['THEY', 'SNUFFED', 'ABOUT', 'THE', 'FIR', 'TREE', 'AND', 'RUSTLED', 'AMONG', 'THE', 'BRANCHES'] +672-122797-0050-1579: hyp=['THEY', 'SNUFFED', 'ABOUT', 'THE', 'FIR', 'TREE', 'AND', 'RUSTLED', 'AMONG', 'THE', 'BRANCHES'] +672-122797-0051-1580: ref=['I', 'AM', 'BY', 'NO', 'MEANS', 'OLD', 'SAID', 'THE', 'FIR', 'TREE'] +672-122797-0051-1580: hyp=['I', 'AM', 'BY', 'NO', 'MEANS', 'OLD', 'SAID', 'THE', 'FIR', 'TREE'] +672-122797-0052-1581: ref=["THERE'S", 'MANY', 'A', 'ONE', 'CONSIDERABLY', 'OLDER', 'THAN', 'I', 'AM'] +672-122797-0052-1581: hyp=["THERE'S", 'MANY', 'A', 'ONE', 'CONSIDERABLY', 'OLDER', 'THAN', 'I', 'AM'] +672-122797-0053-1582: ref=['THEY', 'WERE', 'SO', 'EXTREMELY', 'CURIOUS'] +672-122797-0053-1582: hyp=['THEY', 'WERE', 'SO', 'EXTREMELY', 'CURIOUS'] +672-122797-0054-1583: ref=['I', 'KNOW', 'NO', 'SUCH', 'PLACE', 'SAID', 'THE', 'TREE'] +672-122797-0054-1583: hyp=['I', 'KNOW', 'NO', 'SUCH', 'PLACE', 'SAID', 'THE', 'TREE'] +672-122797-0055-1584: ref=['AND', 'THEN', 'HE', 'TOLD', 'ALL', 'ABOUT', 'HIS', 'YOUTH', 'AND', 'THE', 'LITTLE', 'MICE', 'HAD', 'NEVER', 'HEARD', 'THE', 'LIKE', 'BEFORE', 'AND', 'THEY', 'LISTENED', 'AND', 'SAID'] +672-122797-0055-1584: hyp=['AND', 'THEN', 'HE', 'TOLD', 'ALL', 'ABOUT', 'HIS', 'YOUTH', 'AND', 'THE', 'LITTLE', 'MICE', 'HAD', 'NEVER', 'HEARD', 'THE', 'LIKE', 'BEFORE', 'AND', 'THEY', 'LISTENED', 'AND', 'SAID'] +672-122797-0056-1585: ref=['SAID', 'THE', 'FIR', 'TREE', 'THINKING', 'OVER', 'WHAT', 'HE', 'HAD', 'HIMSELF', 'RELATED'] +672-122797-0056-1585: hyp=['SAID', 'THE', 'FUR', 'TREE', 'THINKING', 'OVER', 'WHAT', 'HE', 'HAD', 'HIMSELF', 'RELATED'] +672-122797-0057-1586: ref=['YES', 'IN', 'REALITY', 'THOSE', 'WERE', 'HAPPY', 'TIMES'] +672-122797-0057-1586: hyp=['YES', 'IN', 'REALITY', 'THOSE', 'WERE', 'HAPPY', 'TIMES'] +672-122797-0058-1587: ref=['WHO', 'IS', 'HUMPY', 'DUMPY', 'ASKED', 'THE', 'MICE'] +672-122797-0058-1587: hyp=['WHO', "IT'S", 'HUMPY', 'DUMPEY', 'ASKED', 'THE', 'MICE'] +672-122797-0059-1588: ref=['ONLY', 'THAT', 'ONE', 'ANSWERED', 'THE', 'TREE'] +672-122797-0059-1588: hyp=['ONLY', 'THAT', 'ONE', 'ANSWERED', 'THE', 'TREE'] +672-122797-0060-1589: ref=['IT', 'IS', 'A', 'VERY', 'STUPID', 'STORY'] +672-122797-0060-1589: hyp=['IT', 'IS', 'A', 'VERY', 'STUPID', 'STORY'] +672-122797-0061-1590: ref=["DON'T", 'YOU', 'KNOW', 'ONE', 'ABOUT', 'BACON', 'AND', 'TALLOW', 'CANDLES', "CAN'T", 'YOU', 'TELL', 'ANY', 'LARDER', 'STORIES'] +672-122797-0061-1590: hyp=["DON'T", 'YOU', 'KNOW', 'ONE', 'ABOUT', 'BACON', 'AND', 'TALLOW', 'CANDLES', "CAN'T", 'YOU', 'TELL', 'ANY', 'LARDER', 'STORIES'] +672-122797-0062-1591: ref=['NO', 'SAID', 'THE', 'TREE'] +672-122797-0062-1591: hyp=['NO', 'SAID', 'THE', 'TREE'] +672-122797-0063-1592: ref=['THEN', 'GOOD', 'BYE', 'SAID', 'THE', 'RATS', 'AND', 'THEY', 'WENT', 'HOME'] +672-122797-0063-1592: hyp=['THEN', 'GOOD', 'BYE', 'SAID', 'THE', 'RATS', 'AND', 'THEY', 'WENT', 'HOME'] +672-122797-0064-1593: ref=['AT', 'LAST', 'THE', 'LITTLE', 'MICE', 'STAYED', 'AWAY', 'ALSO', 'AND', 'THE', 'TREE', 'SIGHED', 'AFTER', 'ALL', 'IT', 'WAS', 'VERY', 'PLEASANT', 'WHEN', 'THE', 'SLEEK', 'LITTLE', 'MICE', 'SAT', 'ROUND', 'ME', 'AND', 'LISTENED', 'TO', 'WHAT', 'I', 'TOLD', 'THEM'] +672-122797-0064-1593: hyp=['AT', 'LAST', 'THE', 'LITTLE', 'MICE', 'STAYED', 'AWAY', 'ALSO', 'AND', 'THE', 'TREE', 'SIGHED', 'AFTER', 'ALL', 'IT', 'WAS', 'VERY', 'PLEASANT', 'WHEN', 'THE', 'SLEEK', 'LITTLE', 'MICE', 'SAT', 'ROUND', 'ME', 'AND', 'LISTENED', 'TO', 'WHAT', 'I', 'TOLD', 'THEM'] +672-122797-0065-1594: ref=['NOW', 'THAT', 'TOO', 'IS', 'OVER'] +672-122797-0065-1594: hyp=['NOW', 'THAT', 'TOO', 'IS', 'OVER'] +672-122797-0066-1595: ref=['WHY', 'ONE', 'MORNING', 'THERE', 'CAME', 'A', 'QUANTITY', 'OF', 'PEOPLE', 'AND', 'SET', 'TO', 'WORK', 'IN', 'THE', 'LOFT'] +672-122797-0066-1595: hyp=['WHY', 'ONE', 'MORNING', 'THERE', 'CAME', 'A', 'QUANTITY', 'OF', 'PEOPLE', 'AND', 'SET', 'TO', 'WORK', 'IN', 'THE', 'LOFT'] +672-122797-0067-1596: ref=['THE', 'TRUNKS', 'WERE', 'MOVED', 'THE', 'TREE', 'WAS', 'PULLED', 'OUT', 'AND', 'THROWN', 'RATHER', 'HARD', 'IT', 'IS', 'TRUE', 'DOWN', 'ON', 'THE', 'FLOOR', 'BUT', 'A', 'MAN', 'DREW', 'HIM', 'TOWARDS', 'THE', 'STAIRS', 'WHERE', 'THE', 'DAYLIGHT', 'SHONE'] +672-122797-0067-1596: hyp=['THE', 'TRUNKS', 'WERE', 'MOVED', 'THE', 'TREE', 'WAS', 'PULLED', 'OUT', 'AND', 'THROWN', 'RATHER', 'HARD', 'IT', 'IS', 'TRUE', 'DOWN', 'ON', 'THE', 'FLOOR', 'BUT', 'A', 'MAN', 'DREW', 'HIM', 'TOWARDS', 'THE', 'STAIRS', 'WHERE', 'THE', 'DAYLIGHT', 'SHONE'] +672-122797-0068-1597: ref=['BUT', 'IT', 'WAS', 'NOT', 'THE', 'FIR', 'TREE', 'THAT', 'THEY', 'MEANT'] +672-122797-0068-1597: hyp=['BUT', 'IT', 'WAS', 'NOT', 'THE', 'FIR', 'TREE', 'THAT', 'THEY', 'MEANT'] +672-122797-0069-1598: ref=['IT', 'WAS', 'IN', 'A', 'CORNER', 'THAT', 'HE', 'LAY', 'AMONG', 'WEEDS', 'AND', 'NETTLES'] +672-122797-0069-1598: hyp=['IT', 'WAS', 'IN', 'A', 'CORNER', 'THAT', 'HE', 'LAY', 'AMONG', 'WEEDS', 'AND', 'NETTLES'] +672-122797-0070-1599: ref=['THE', 'GOLDEN', 'STAR', 'OF', 'TINSEL', 'WAS', 'STILL', 'ON', 'THE', 'TOP', 'OF', 'THE', 'TREE', 'AND', 'GLITTERED', 'IN', 'THE', 'SUNSHINE'] +672-122797-0070-1599: hyp=['THE', 'GOLDEN', 'STAR', 'OF', 'TINSEL', 'WAS', 'STILL', 'ON', 'THE', 'TOP', 'OF', 'THE', 'TREE', 'AND', 'GLITTERED', 'IN', 'THE', 'SUNSHINE'] +672-122797-0071-1600: ref=['IN', 'THE', 'COURT', 'YARD', 'SOME', 'OF', 'THE', 'MERRY', 'CHILDREN', 'WERE', 'PLAYING', 'WHO', 'HAD', 'DANCED', 'AT', 'CHRISTMAS', 'ROUND', 'THE', 'FIR', 'TREE', 'AND', 'WERE', 'SO', 'GLAD', 'AT', 'THE', 'SIGHT', 'OF', 'HIM'] +672-122797-0071-1600: hyp=['IN', 'THE', 'COURTYARD', 'SOME', 'OF', 'THE', 'MARRIED', 'CHILDREN', 'WERE', 'PLAYING', 'WHO', 'HAD', 'DANCED', 'AT', 'CHRISTMAS', 'ROUND', 'THE', 'FIR', 'TREE', 'AND', 'WERE', 'SO', 'GLAD', 'AT', 'THE', 'SIGHT', 'OF', 'HIM'] +672-122797-0072-1601: ref=['AND', 'THE', "GARDENER'S", 'BOY', 'CHOPPED', 'THE', 'TREE', 'INTO', 'SMALL', 'PIECES', 'THERE', 'WAS', 'A', 'WHOLE', 'HEAP', 'LYING', 'THERE'] +672-122797-0072-1601: hyp=['AND', 'THE', "GARDENER'S", 'BOY', 'CHOPPED', 'THE', 'TREE', 'INTO', 'SMALL', 'PIECES', 'THERE', 'WAS', 'A', 'WHOLE', 'HEAP', 'LYING', 'THERE'] +672-122797-0073-1602: ref=['THE', 'WOOD', 'FLAMED', 'UP', 'SPLENDIDLY', 'UNDER', 'THE', 'LARGE', 'BREWING', 'COPPER', 'AND', 'IT', 'SIGHED', 'SO', 'DEEPLY'] +672-122797-0073-1602: hyp=['THE', 'WOOD', 'FLAMED', 'UP', 'SPLENDIDLY', 'UNDER', 'THE', 'LARGE', 'BREWING', 'COPPER', 'AND', 'ITS', 'SIDE', 'SO', 'DEEPLY'] +672-122797-0074-1603: ref=['HOWEVER', 'THAT', 'WAS', 'OVER', 'NOW', 'THE', 'TREE', 'GONE', 'THE', 'STORY', 'AT', 'AN', 'END'] +672-122797-0074-1603: hyp=['HOWEVER', 'THAT', 'WAS', 'OVER', 'NOW', 'THE', 'TREE', 'GONE', 'THE', 'STORY', 'AT', 'AN', 'END'] +6829-68769-0000-1858: ref=['KENNETH', 'AND', 'BETH', 'REFRAINED', 'FROM', 'TELLING', 'THE', 'OTHER', 'GIRLS', 'OR', 'UNCLE', 'JOHN', 'OF', 'OLD', 'WILL', "ROGERS'S", 'VISIT', 'BUT', 'THEY', 'GOT', 'MISTER', 'WATSON', 'IN', 'THE', 'LIBRARY', 'AND', 'QUESTIONED', 'HIM', 'CLOSELY', 'ABOUT', 'THE', 'PENALTY', 'FOR', 'FORGING', 'A', 'CHECK'] +6829-68769-0000-1858: hyp=['KENNETH', 'AND', 'BETH', 'REFRAINED', 'FROM', 'TELLING', 'THE', 'OTHER', 'GIRLS', 'OR', 'UNCLE', 'JOHN', 'OF', 'OLD', 'WILL', 'ROGERS', 'VISIT', 'BUT', 'THEY', 'GOT', 'MISTER', 'WATSON', 'IN', 'THE', 'LIBRARY', 'AND', 'QUESTIONED', 'HIM', 'CLOSELY', 'ABOUT', 'THE', 'PENALTY', 'FOR', 'FORGING', 'A', 'CHECK'] +6829-68769-0001-1859: ref=['IT', 'WAS', 'A', 'SERIOUS', 'CRIME', 'INDEED', 'MISTER', 'WATSON', 'TOLD', 'THEM', 'AND', 'TOM', 'GATES', 'BADE', 'FAIR', 'TO', 'SERVE', 'A', 'LENGTHY', 'TERM', 'IN', "STATE'S", 'PRISON', 'AS', 'A', 'CONSEQUENCE', 'OF', 'HIS', 'RASH', 'ACT'] +6829-68769-0001-1859: hyp=['IT', 'WAS', 'A', 'SERIOUS', 'CRIME', 'INDEED', 'MISTER', 'WATSON', 'TOLD', 'THEM', 'AND', 'TOM', 'GATES', 'BADE', 'FAIR', 'TO', 'SERVE', 'A', 'LENGTHY', 'TERM', 'IN', 'THE', "STATE'S", 'PRISON', 'AS', 'A', 'CONSEQUENCE', 'OF', 'HIS', 'RASH', 'ACT'] +6829-68769-0002-1860: ref=['I', "CAN'T", 'SEE', 'IT', 'IN', 'THAT', 'LIGHT', 'SAID', 'THE', 'OLD', 'LAWYER'] +6829-68769-0002-1860: hyp=['I', "CAN'T", 'SEE', 'IT', 'IN', 'THAT', 'LIGHT', 'SAID', 'THE', 'OLD', 'LAWYER'] +6829-68769-0003-1861: ref=['IT', 'WAS', 'A', 'DELIBERATE', 'THEFT', 'FROM', 'HIS', 'EMPLOYERS', 'TO', 'PROTECT', 'A', 'GIRL', 'HE', 'LOVED'] +6829-68769-0003-1861: hyp=['IT', 'WAS', 'A', 'DELIBERATE', 'THEFT', 'FROM', 'HIS', 'EMPLOYERS', 'TO', 'PROTECT', 'A', 'GIRL', 'HE', 'LOVED'] +6829-68769-0004-1862: ref=['BUT', 'THEY', 'COULD', 'NOT', 'HAVE', 'PROVEN', 'A', 'CASE', 'AGAINST', 'LUCY', 'IF', 'SHE', 'WAS', 'INNOCENT', 'AND', 'ALL', 'THEIR', 'THREATS', 'OF', 'ARRESTING', 'HER', 'WERE', 'PROBABLY', 'MERE', 'BLUFF'] +6829-68769-0004-1862: hyp=['BUT', 'THEY', 'COULD', 'NOT', 'HAVE', 'PROVEN', 'A', 'CASE', 'AGAINST', 'LUCY', 'IF', 'SHE', 'WAS', 'INNOCENT', 'AND', 'ALL', 'THEIR', 'THREATS', 'OF', 'ARRESTING', 'HER', 'WERE', 'PROBABLY', 'A', 'MERE', 'BLUFF'] +6829-68769-0005-1863: ref=['HE', 'WAS', 'SOFT', 'HEARTED', 'AND', 'IMPETUOUS', 'SAID', 'BETH', 'AND', 'BEING', 'IN', 'LOVE', 'HE', "DIDN'T", 'STOP', 'TO', 'COUNT', 'THE', 'COST'] +6829-68769-0005-1863: hyp=['HE', 'WAS', 'A', 'SOFT', 'HEARTED', 'AND', 'IMPETUOUS', 'SAID', 'BETH', 'AND', 'BEING', 'IN', 'LOVE', 'HE', "DIDN'T", 'STOP', 'TO', 'COUNT', 'THE', 'COST'] +6829-68769-0006-1864: ref=['IF', 'THE', 'PROSECUTION', 'WERE', 'WITHDRAWN', 'AND', 'THE', 'CASE', 'SETTLED', 'WITH', 'THE', 'VICTIM', 'OF', 'THE', 'FORGED', 'CHECK', 'THEN', 'THE', 'YOUNG', 'MAN', 'WOULD', 'BE', 'ALLOWED', 'HIS', 'FREEDOM'] +6829-68769-0006-1864: hyp=['IF', 'THE', 'PROSECUTION', 'WERE', 'WITHDRAWN', 'AND', 'THE', 'CASE', 'SETTLED', 'WITH', 'THE', 'VICTIM', 'OF', 'THE', 'FORGED', 'CHECK', 'THEN', 'THE', 'YOUNG', 'MAN', 'WOULD', 'BE', 'ALLOWED', 'HIS', 'FREEDOM'] +6829-68769-0007-1865: ref=['BUT', 'UNDER', 'THE', 'CIRCUMSTANCES', 'I', 'DOUBT', 'IF', 'SUCH', 'AN', 'ARRANGEMENT', 'COULD', 'BE', 'MADE'] +6829-68769-0007-1865: hyp=['BUT', 'UNDER', 'THE', 'CIRCUMSTANCES', 'I', 'DOUBT', 'OF', 'SUCH', 'AN', 'ARRANGEMENT', 'COULD', 'BE', 'MADE'] +6829-68769-0008-1866: ref=['FAIRVIEW', 'WAS', 'TWELVE', 'MILES', 'AWAY', 'BUT', 'BY', 'TEN', "O'CLOCK", 'THEY', 'DREW', 'UP', 'AT', 'THE', 'COUNTY', 'JAIL'] +6829-68769-0008-1866: hyp=['FAIR', 'VIEWS', 'TWELVE', 'MILES', 'AWAY', 'BUT', 'BY', 'TEN', "O'CLOCK", 'THEY', 'DREW', 'UP', 'AT', 'THE', 'COUNTY', 'TRAIL'] +6829-68769-0009-1867: ref=['THEY', 'WERE', 'RECEIVED', 'IN', 'THE', 'LITTLE', 'OFFICE', 'BY', 'A', 'MAN', 'NAMED', 'MARKHAM', 'WHO', 'WAS', 'THE', 'JAILER'] +6829-68769-0009-1867: hyp=['THEY', 'WERE', 'RECEIVED', 'IN', 'THE', 'LITTLE', 'OFFICE', 'BY', 'A', 'MAN', 'NAMED', 'MARKHAM', 'WHO', 'WAS', 'THE', 'JAILER'] +6829-68769-0010-1868: ref=['WE', 'WISH', 'TO', 'TALK', 'WITH', 'HIM', 'ANSWERED', 'KENNETH', 'TALK'] +6829-68769-0010-1868: hyp=['WE', 'WISH', 'TO', 'TALK', 'WITH', 'HIM', 'ANSWERED', 'KENNETH', 'TALK'] +6829-68769-0011-1869: ref=["I'M", 'RUNNING', 'FOR', 'REPRESENTATIVE', 'ON', 'THE', 'REPUBLICAN', 'TICKET', 'SAID', 'KENNETH', 'QUIETLY'] +6829-68769-0011-1869: hyp=["I'M", 'RUNNING', 'FOR', 'REPRESENTATIVE', 'ON', 'THE', 'REPUBLICAN', 'TICKET', 'SAID', 'KENNETH', 'QUIETLY'] +6829-68769-0012-1870: ref=['OH', 'SAY', "THAT'S", 'DIFFERENT', 'OBSERVED', 'MARKHAM', 'ALTERING', 'HIS', 'DEMEANOR'] +6829-68769-0012-1870: hyp=["I'LL", 'SAY', "THAT'S", 'DIFFERENT', 'OBSERVED', 'MARKHAM', 'ALTERING', 'HIS', 'DEMEANOR'] +6829-68769-0013-1871: ref=['MAY', 'WE', 'SEE', 'GATES', 'AT', 'ONCE', 'ASKED', 'KENNETH'] +6829-68769-0013-1871: hyp=['MAYBE', 'SEA', 'GATES', 'AT', 'ONCE', 'ASKED', 'KENNETH'] +6829-68769-0014-1872: ref=['THEY', 'FOLLOWED', 'THE', 'JAILER', 'ALONG', 'A', 'SUCCESSION', 'OF', 'PASSAGES'] +6829-68769-0014-1872: hyp=['THEY', 'FOLLOWED', 'THE', 'JAILER', 'ALONG', 'A', 'SUCCESSION', 'OF', 'PASSAGES'] +6829-68769-0015-1873: ref=['SOMETIMES', "I'M", 'THAT', 'YEARNING', 'FOR', 'A', 'SMOKE', "I'M", 'NEARLY', 'CRAZY', 'AN', 'I', 'DUNNO', 'WHICH', 'IS', 'WORST', 'DYIN', 'ONE', 'WAY', 'OR', 'ANOTHER'] +6829-68769-0015-1873: hyp=['SOMETIMES', 'ON', 'THAT', 'YEARNIN', 'FOR', 'A', 'SMOKE', "I'M", 'NEARLY', 'CRAZY', 'AND', 'I', 'DUNNO', 'WHICH', 'IS', 'WORSE', 'DYIN', 'ONE', 'WAY', 'OR', 'THE', 'OTHER'] +6829-68769-0016-1874: ref=['HE', 'UNLOCKED', 'THE', 'DOOR', 'AND', 'CALLED', "HERE'S", 'VISITORS', 'TOM'] +6829-68769-0016-1874: hyp=['HE', 'UNLOCKED', 'THE', 'DOOR', 'AND', 'CALLED', "HERE'S", 'VISITORS', 'TOM'] +6829-68769-0017-1875: ref=['WORSE', 'TOM', 'WORSE', 'N', 'EVER', 'REPLIED', 'THE', 'JAILER', 'GLOOMILY'] +6829-68769-0017-1875: hyp=['WORSE', 'TOM', 'WORSE', 'THAN', 'EVER', 'REPLIED', 'THE', 'JAILER', 'GLOOMILY'] +6829-68769-0018-1876: ref=['MISS', 'DE', 'GRAF', 'SAID', 'KENNETH', 'NOTICING', 'THE', "BOY'S", 'FACE', 'CRITICALLY', 'AS', 'HE', 'STOOD', 'WHERE', 'THE', 'LIGHT', 'FROM', 'THE', 'PASSAGE', 'FELL', 'UPON', 'IT'] +6829-68769-0018-1876: hyp=['MISTER', 'GRAF', 'SAID', 'KENNETH', 'NOTICING', 'THE', "BOY'S", 'FACE', 'CRITICALLY', 'AS', 'HE', 'STOOD', 'WHERE', 'THE', 'LIGHT', 'FROM', 'THE', 'PASSAGE', 'FELL', 'UPON', 'IT'] +6829-68769-0019-1877: ref=['SORRY', 'WE', "HAVEN'T", 'ANY', 'RECEPTION', 'ROOM', 'IN', 'THE', 'JAIL'] +6829-68769-0019-1877: hyp=['SORRY', 'WE', "HAVEN'T", 'ANY', 'RECEPTION', 'ROOM', 'IN', 'THE', 'JAIL'] +6829-68769-0020-1878: ref=['SIT', 'DOWN', 'PLEASE', 'SAID', 'GATES', 'IN', 'A', 'CHEERFUL', 'AND', 'PLEASANT', 'VOICE', "THERE'S", 'A', 'BENCH', 'HERE'] +6829-68769-0020-1878: hyp=['SIT', 'DOWN', 'PLEASE', 'SAID', 'GATES', 'IN', 'A', 'CHEERFUL', 'AND', 'PLEASANT', 'VOICE', "THERE'S", 'A', 'BENCH', 'HERE'] +6829-68769-0021-1879: ref=['A', 'FRESH', 'WHOLESOME', 'LOOKING', 'BOY', 'WAS', 'TOM', 'GATES', 'WITH', 'STEADY', 'GRAY', 'EYES', 'AN', 'INTELLIGENT', 'FOREHEAD', 'BUT', 'A', 'SENSITIVE', 'RATHER', 'WEAK', 'MOUTH'] +6829-68769-0021-1879: hyp=['A', 'FRESH', 'WHOLESOME', 'LOOKING', 'BOY', 'WAS', 'TOM', 'GATES', 'WHOSE', 'STEADY', 'GRAY', 'EYES', 'AN', 'INTELLIGENT', 'FOREHEAD', 'BUT', 'A', 'SENSITIVE', 'RATHER', 'WEAK', 'MOUTH'] +6829-68769-0022-1880: ref=['WE', 'HAVE', 'HEARD', 'SOMETHING', 'OF', 'YOUR', 'STORY', 'SAID', 'KENNETH', 'AND', 'ARE', 'INTERESTED', 'IN', 'IT'] +6829-68769-0022-1880: hyp=['WE', 'HAVE', 'HEARD', 'SOMETHING', 'OF', 'YOUR', 'STORY', 'SAID', 'KENNETH', 'AND', 'ARE', 'INTERESTED', 'IN', 'IT'] +6829-68769-0023-1881: ref=['I', "DIDN'T", 'STOP', 'TO', 'THINK', 'WHETHER', 'IT', 'WAS', 'FOOLISH', 'OR', 'NOT', 'I', 'DID', 'IT', 'AND', "I'M", 'GLAD', 'I', 'DID'] +6829-68769-0023-1881: hyp=['I', "DIDN'T", 'STOP', 'TO', 'THINK', 'WHETHER', 'IT', 'WAS', 'FOOLISH', 'OR', 'NOT', 'I', 'DID', 'IT', 'AND', "I'M", 'GLAD', 'I', 'DID', 'IT'] +6829-68769-0024-1882: ref=['OLD', 'WILL', 'IS', 'A', 'FINE', 'FELLOW', 'BUT', 'POOR', 'AND', 'HELPLESS', 'SINCE', 'MISSUS', 'ROGERS', 'HAD', 'HER', 'ACCIDENT'] +6829-68769-0024-1882: hyp=['OLD', 'WILL', 'IS', 'A', 'FINE', 'FELLOW', 'BUT', 'POOR', 'AND', 'HELPLESS', 'SINCE', 'MISSUS', 'ROGERS', 'HAD', 'HER', 'ACCIDENT'] +6829-68769-0025-1883: ref=['THEN', 'ROGERS', "WOULDN'T", 'DO', 'ANYTHING', 'BUT', 'LEAD', 'HER', 'AROUND', 'AND', 'WAIT', 'UPON', 'HER', 'AND', 'THE', 'PLACE', 'WENT', 'TO', 'RACK', 'AND', 'RUIN'] +6829-68769-0025-1883: hyp=['THEN', 'ROGERS', "WOULDN'T", 'DO', 'ANYTHING', 'BUT', 'LEAD', 'HER', 'AROUND', 'AND', 'WAIT', 'UPON', 'HER', 'AND', 'THE', 'PLACE', 'WENT', 'TO', 'RACK', 'AND', 'RUIN'] +6829-68769-0026-1884: ref=['HE', 'SPOKE', 'SIMPLY', 'BUT', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'NARROW', 'CELL', 'IN', 'FRONT', 'OF', 'THEM'] +6829-68769-0026-1884: hyp=['HE', 'SPOKE', 'SIMPLY', 'BUT', 'PACED', 'UP', 'AND', 'DOWN', 'THE', 'NARROW', 'CELL', 'IN', 'FRONT', 'OF', 'THEM'] +6829-68769-0027-1885: ref=['WHOSE', 'NAME', 'DID', 'YOU', 'SIGN', 'TO', 'THE', 'CHECK', 'ASKED', 'KENNETH'] +6829-68769-0027-1885: hyp=['WHOSE', 'NAME', 'DID', 'YOU', 'SIGN', 'TO', 'THE', 'CHECK', 'ASKED', 'KENNETH'] +6829-68769-0028-1886: ref=['HE', 'IS', 'SUPPOSED', 'TO', 'SIGN', 'ALL', 'THE', 'CHECKS', 'OF', 'THE', 'CONCERN'] +6829-68769-0028-1886: hyp=['HE', 'IS', 'SUPPOSED', 'TO', 'SIGN', 'ALL', 'THE', 'CHECKS', 'OF', 'THE', 'CONCERN'] +6829-68769-0029-1887: ref=["IT'S", 'A', 'STOCK', 'COMPANY', 'AND', 'RICH'] +6829-68769-0029-1887: hyp=["IT'S", 'A', 'STOCK', 'COMPANY', 'IN', 'RICH'] +6829-68769-0030-1888: ref=['I', 'WAS', 'BOOKKEEPER', 'SO', 'IT', 'WAS', 'EASY', 'TO', 'GET', 'A', 'BLANK', 'CHECK', 'AND', 'FORGE', 'THE', 'SIGNATURE'] +6829-68769-0030-1888: hyp=['I', 'WAS', 'BIT', 'KEEPER', 'SO', 'IT', 'WAS', 'EASY', 'TO', 'GET', 'A', 'BLANK', 'CHECK', 'AND', 'FORGE', 'THE', 'SIGNATURE'] +6829-68769-0031-1889: ref=['AS', 'REGARDS', 'MY', 'ROBBING', 'THE', 'COMPANY', "I'LL", 'SAY', 'THAT', 'I', 'SAVED', 'THEM', 'A', 'HEAVY', 'LOSS', 'ONE', 'DAY'] +6829-68769-0031-1889: hyp=['AS', 'REGARDS', 'MY', 'ROBBING', 'THE', 'COMPANY', "I'LL", 'SAY', 'THAT', 'I', 'SAVED', 'HIM', 'A', 'HEAVY', 'LOST', 'ONE', 'DAY'] +6829-68769-0032-1890: ref=['I', 'DISCOVERED', 'AND', 'PUT', 'OUT', 'A', 'FIRE', 'THAT', 'WOULD', 'HAVE', 'DESTROYED', 'THE', 'WHOLE', 'PLANT', 'BUT', 'MARSHALL', 'NEVER', 'EVEN', 'THANKED', 'ME'] +6829-68769-0032-1890: hyp=['I', 'DISCOVERED', 'AND', 'PUT', 'OUT', 'A', 'FIRE', 'THAT', 'WOULD', 'HAVE', 'DESTROYED', 'THE', 'WHOLE', 'PLANT', 'BUT', 'MARTIAL', 'NEVER', 'EVEN', 'THANKED', 'ME'] +6829-68769-0033-1891: ref=['IT', 'WAS', 'BETTER', 'FOR', 'HIM', 'TO', 'THINK', 'THE', 'GIRL', 'UNFEELING', 'THAN', 'TO', 'KNOW', 'THE', 'TRUTH'] +6829-68769-0033-1891: hyp=['IT', 'WAS', 'BETTER', 'FOR', 'HIM', 'TO', 'THINK', 'THE', 'GIRL', 'UNFEELING', 'THAN', 'TO', 'KNOW', 'THE', 'TRUTH'] +6829-68769-0034-1892: ref=["I'M", 'GOING', 'TO', 'SEE', 'MISTER', 'MARSHALL', 'SAID', 'KENNETH', 'AND', 'DISCOVER', 'WHAT', 'I', 'CAN', 'DO', 'TO', 'ASSIST', 'YOU', 'THANK', 'YOU', 'SIR'] +6829-68769-0034-1892: hyp=["I'M", 'GOING', 'TO', 'SEE', 'MISTER', 'MARSHAL', 'SAID', 'KENNETH', 'AND', 'DISCOVER', 'WHAT', 'I', 'CAN', 'DO', 'TO', 'ASSIST', 'YOU', 'THANK', 'YOU', 'SIR'] +6829-68769-0035-1893: ref=['IT', "WON'T", 'BE', 'MUCH', 'BUT', "I'M", 'GRATEFUL', 'TO', 'FIND', 'A', 'FRIEND'] +6829-68769-0035-1893: hyp=['IT', "WON'T", 'BE', 'MUCH', 'BUT', "I'M", 'GRATEFUL', 'TO', 'FIND', 'A', 'FRIEND'] +6829-68769-0036-1894: ref=['THEY', 'LEFT', 'HIM', 'THEN', 'FOR', 'THE', 'JAILER', 'ARRIVED', 'TO', 'UNLOCK', 'THE', 'DOOR', 'AND', 'ESCORT', 'THEM', 'TO', 'THE', 'OFFICE'] +6829-68769-0036-1894: hyp=['THEY', 'LEFT', 'HIM', 'THEN', 'FOR', 'THE', 'JAILER', 'ARRIVED', 'TO', 'UNLOCK', 'THE', 'DOOR', 'AND', 'ESCORT', 'THEM', 'TO', 'THE', 'OFFICE'] +6829-68769-0037-1895: ref=["I'VE", 'SEEN', 'LOTS', 'OF', 'THAT', 'KIND', 'IN', 'MY', 'DAY'] +6829-68769-0037-1895: hyp=["I'VE", 'SEEN', 'LOTS', 'OF', 'THAT', 'KIND', 'IN', 'MY', 'DAY'] +6829-68769-0038-1896: ref=['AND', 'IT', 'RUINS', 'A', "MAN'S", 'DISPOSITION'] +6829-68769-0038-1896: hyp=['AND', 'IT', 'RUINS', 'A', "MAN'S", 'DISPOSITION'] +6829-68769-0039-1897: ref=['HE', 'LOOKED', 'UP', 'RATHER', 'UNGRACIOUSLY', 'BUT', 'MOTIONED', 'THEM', 'TO', 'BE', 'SEATED'] +6829-68769-0039-1897: hyp=['HE', 'LOOKED', 'UP', 'RATHER', 'UNGRACIOUSLY', 'BUT', 'MOTIONED', 'THEM', 'TO', 'BE', 'SEATED'] +6829-68769-0040-1898: ref=['SOME', 'GIRL', 'HAS', 'BEEN', 'HERE', 'TWICE', 'TO', 'INTERVIEW', 'MY', 'MEN', 'AND', 'I', 'HAVE', 'REFUSED', 'TO', 'ADMIT', 'HER'] +6829-68769-0040-1898: hyp=['SOME', 'GIRL', 'HAS', 'BEEN', 'IN', 'HERE', 'TWICE', 'TO', 'INTERVIEW', 'MY', 'MEN', 'AND', 'I', 'HAVE', 'REFUSED', 'TO', 'ADMIT', 'HER'] +6829-68769-0041-1899: ref=["I'M", 'NOT', 'ELECTIONEERING', 'JUST', 'NOW'] +6829-68769-0041-1899: hyp=["I'M", 'NOT', 'ELECTIONEERING', 'JUST', 'NOW'] +6829-68769-0042-1900: ref=['OH', 'WELL', 'SIR', 'WHAT', 'ABOUT', 'HIM'] +6829-68769-0042-1900: hyp=['OH', 'WELL', 'SIR', 'WHAT', 'ABOUT', 'EM'] +6829-68769-0043-1901: ref=['AND', 'HE', 'DESERVES', 'A', 'TERM', 'IN', "STATE'S", 'PRISON'] +6829-68769-0043-1901: hyp=['AND', 'HE', 'DESERVES', 'A', 'TERM', 'IN', "STATE'S", 'PRISON'] +6829-68769-0044-1902: ref=['IT', 'HAS', 'COST', 'ME', 'TWICE', 'SIXTY', 'DOLLARS', 'IN', 'ANNOYANCE'] +6829-68769-0044-1902: hyp=['IT', 'HAS', 'COST', 'ME', 'TWICE', 'SIXTY', 'DOLLARS', 'IN', 'ANNOYANCE'] +6829-68769-0045-1903: ref=["I'LL", 'PAY', 'ALL', 'THE', 'COSTS', 'BESIDES'] +6829-68769-0045-1903: hyp=["I'LL", 'PAY', 'ALL', 'THE', 'COST', 'BESIDES'] +6829-68769-0046-1904: ref=["YOU'RE", 'FOOLISH', 'WHY', 'SHOULD', 'YOU', 'DO', 'ALL', 'THIS'] +6829-68769-0046-1904: hyp=["YOU'RE", 'FOOLISH', 'WHY', 'SHOULD', 'YOU', 'DO', 'ALL', 'THIS'] +6829-68769-0047-1905: ref=['I', 'HAVE', 'MY', 'OWN', 'REASONS', 'MISTER', 'MARSHALL'] +6829-68769-0047-1905: hyp=['I', 'HAVE', 'MY', 'OWN', 'REASONS', 'MISTER', 'MARSHALL'] +6829-68769-0048-1906: ref=['GIVE', 'ME', 'A', 'CHECK', 'FOR', 'A', 'HUNDRED', 'AND', 'FIFTY', 'AND', "I'LL", 'TURN', 'OVER', 'TO', 'YOU', 'THE', 'FORGED', 'CHECK', 'AND', 'QUASH', 'FURTHER', 'PROCEEDINGS'] +6829-68769-0048-1906: hyp=['GIVE', 'ME', 'A', 'CHECK', 'FOR', 'A', 'HUNDRED', 'AND', 'FIFTY', 'AND', "I'LL", 'TURN', 'OVER', 'TO', 'YOU', 'THE', 'FORGED', 'CHECK', 'AND', 'CASH', 'FURTHER', 'PROCEEDINGS'] +6829-68769-0049-1907: ref=['HE', 'DETESTED', 'THE', 'GRASPING', 'DISPOSITION', 'THAT', 'WOULD', 'ENDEAVOR', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'HIS', 'EVIDENT', 'DESIRE', 'TO', 'HELP', 'YOUNG', 'GATES'] +6829-68769-0049-1907: hyp=['HE', 'DETESTED', 'THE', 'GRASPING', 'DISPOSITION', 'THAT', 'WOULD', 'ENDEAVOR', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'HIS', 'EVIDENT', 'DESIRE', 'TO', 'HELP', 'YOUNG', 'GATES'] +6829-68769-0050-1908: ref=['BETH', 'UNEASY', 'AT', 'HIS', 'SILENCE', 'NUDGED', 'HIM'] +6829-68769-0050-1908: hyp=['BETH', 'UNEASY', 'AT', 'A', 'SILENCE', 'NUDGED', 'HIM'] +6829-68769-0051-1909: ref=['THERE', 'WAS', 'A', 'GRIM', 'SMILE', 'OF', 'AMUSEMENT', 'ON', 'HIS', 'SHREWD', 'FACE'] +6829-68769-0051-1909: hyp=['THERE', 'WAS', 'A', 'GRIM', 'SMILE', 'OF', 'AMUSEMENT', 'ON', 'HIS', 'SHREWD', 'FACE'] +6829-68769-0052-1910: ref=['HE', 'MIGHT', 'HAVE', 'HAD', 'THAT', 'FORGED', 'CHECK', 'FOR', 'THE', 'FACE', 'OF', 'IT', 'IF', "HE'D", 'BEEN', 'SHARP'] +6829-68769-0052-1910: hyp=['HE', 'MIGHT', 'HAVE', 'HAD', 'THAT', 'FORGED', 'CHECK', 'FOR', 'THE', 'FACE', 'OF', 'IT', 'IF', "HE'D", 'BEEN', 'SHARP'] +6829-68769-0053-1911: ref=['AND', 'TO', 'THINK', 'WE', 'CAN', 'SAVE', 'ALL', 'THAT', 'MISERY', 'AND', 'DESPAIR', 'BY', 'THE', 'PAYMENT', 'OF', 'A', 'HUNDRED', 'AND', 'FIFTY', 'DOLLARS'] +6829-68769-0053-1911: hyp=['AND', 'TO', 'THINK', 'WE', 'CAN', 'SAVE', 'ALL', 'THAT', 'MISERY', 'AND', 'DESPAIR', 'BY', 'THE', 'PAYMENT', 'OF', 'A', 'HUNDRED', 'AND', 'FIFTY', 'DOLLARS'] +6829-68771-0000-1912: ref=['SO', 'TO', 'THE', 'SURPRISE', 'OF', 'THE', 'DEMOCRATIC', 'COMMITTEE', 'AND', 'ALL', 'HIS', 'FRIENDS', 'MISTER', 'HOPKINS', 'ANNOUNCED', 'THAT', 'HE', 'WOULD', 'OPPOSE', "FORBES'S", 'AGGRESSIVE', 'CAMPAIGN', 'WITH', 'AN', 'EQUAL', 'AGGRESSIVENESS', 'AND', 'SPEND', 'AS', 'MANY', 'DOLLARS', 'IN', 'DOING', 'SO', 'AS', 'MIGHT', 'BE', 'NECESSARY'] +6829-68771-0000-1912: hyp=['SO', 'TO', 'THE', 'SURPRISE', 'OF', 'THE', 'DEMOCRATIC', 'COMMITTEE', 'AND', 'ALL', 'HIS', 'FRIENDS', 'MISTER', 'HOPKINS', 'ANNOUNCED', 'THAT', 'HE', 'WOULD', 'OPPOSE', 'FORCE', 'AGGRESSIVE', 'CAMPAIGN', 'WITH', 'AN', 'EQUAL', 'AGGRESSIVENESS', 'AND', 'SPEND', 'AS', 'MANY', 'DOLLARS', 'IN', 'DOING', 'SO', 'AS', 'MIGHT', 'BE', 'NECESSARY'] +6829-68771-0001-1913: ref=['ONE', 'OF', 'MISTER', "HOPKINS'S", 'FIRST', 'TASKS', 'AFTER', 'CALLING', 'HIS', 'FAITHFUL', 'HENCHMEN', 'AROUND', 'HIM', 'WAS', 'TO', 'MAKE', 'A', 'CAREFUL', 'CANVASS', 'OF', 'THE', 'VOTERS', 'OF', 'HIS', 'DISTRICT', 'TO', 'SEE', 'WHAT', 'WAS', 'STILL', 'TO', 'BE', 'ACCOMPLISHED'] +6829-68771-0001-1913: hyp=['ONE', 'OF', 'MISTER', 'HOPKINS', 'FIRST', 'TASKS', 'AFTER', 'CALLING', 'HIS', 'FAITHFUL', 'HENCHMAN', 'AROUND', 'HIM', 'WAS', 'TO', 'MAKE', 'A', 'CAREFUL', 'CANVASS', 'OF', 'THE', 'VOTERS', 'OF', 'HIS', 'DISTRICT', 'TO', 'SEE', 'WHAT', 'WAS', 'STILL', 'TO', 'BE', 'ACCOMPLISHED'] +6829-68771-0002-1914: ref=['THE', 'WEAK', 'KNEED', 'CONTINGENCY', 'MUST', 'BE', 'STRENGTHENED', 'AND', 'FORTIFIED', 'AND', 'A', 'COUPLE', 'OF', 'HUNDRED', 'VOTES', 'IN', 'ONE', 'WAY', 'OR', 'ANOTHER', 'SECURED', 'FROM', 'THE', 'OPPOSITION'] +6829-68771-0002-1914: hyp=['THE', 'WEAK', 'NEED', 'CONTINGENCY', 'MUST', 'BE', 'STRENGTHENED', 'AND', 'FORTIFIED', 'AND', 'A', 'COUPLE', 'OF', 'HUNDRED', 'VOTES', 'IN', 'ONE', 'WAY', 'OR', 'THE', 'OTHER', 'SECURED', 'FROM', 'THE', 'OPPOSITION'] +6829-68771-0003-1915: ref=['THE', 'DEMOCRATIC', 'COMMITTEE', 'FIGURED', 'OUT', 'A', 'WAY', 'TO', 'DO', 'THIS'] +6829-68771-0003-1915: hyp=['THE', 'DEMOCRATIC', 'COMMITTEE', 'FIGURED', 'OUT', 'A', 'WAY', 'TO', 'DO', 'THIS'] +6829-68771-0004-1916: ref=['UNDER', 'ORDINARY', 'CONDITIONS', 'REYNOLDS', 'WAS', 'SURE', 'TO', 'BE', 'ELECTED', 'BUT', 'THE', 'COMMITTEE', 'PROPOSED', 'TO', 'SACRIFICE', 'HIM', 'IN', 'ORDER', 'TO', 'ELECT', 'HOPKINS'] +6829-68771-0004-1916: hyp=['UNDER', 'ORDINARY', 'CONDITIONS', 'REYNOLDS', 'WAS', 'SURE', 'TO', 'BE', 'ELECTED', 'BUT', 'THE', 'COMMITTEE', 'PROPOSED', 'TO', 'SACRIFICE', 'HIM', 'IN', 'ORDER', 'TO', 'ELECT', 'HOPKINS'] +6829-68771-0005-1917: ref=['THE', 'ONLY', 'THING', 'NECESSARY', 'WAS', 'TO', 'FIX', 'SETH', 'REYNOLDS', 'AND', 'THIS', 'HOPKINS', 'ARRANGED', 'PERSONALLY'] +6829-68771-0005-1917: hyp=['THE', 'ONLY', 'THING', 'NECESSARY', 'WAS', 'TO', 'FIX', 'SETH', 'REYNOLDS', 'AND', 'THIS', 'HOPKINS', 'ARRANGED', 'PERSONALLY'] +6829-68771-0006-1918: ref=['AND', 'THIS', 'WAS', 'WHY', 'KENNETH', 'AND', 'BETH', 'DISCOVERED', 'HIM', 'CONVERSING', 'WITH', 'THE', 'YOUNG', 'WOMAN', 'IN', 'THE', 'BUGGY'] +6829-68771-0006-1918: hyp=['AND', 'THIS', 'WAS', 'WHY', 'KENNETH', 'AND', 'BETH', 'DISCOVERED', 'HIM', 'CONVERSING', 'WITH', 'THE', 'YOUNG', 'WOMAN', 'IN', 'THE', 'BUGGY'] +6829-68771-0007-1919: ref=['THE', 'DESCRIPTION', 'SHE', 'GAVE', 'OF', 'THE', 'COMING', 'RECEPTION', 'TO', 'THE', "WOMAN'S", 'POLITICAL', 'LEAGUE', 'WAS', 'SO', 'HUMOROUS', 'AND', 'DIVERTING', 'THAT', 'THEY', 'WERE', 'BOTH', 'LAUGHING', 'HEARTILY', 'OVER', 'THE', 'THING', 'WHEN', 'THE', 'YOUNG', 'PEOPLE', 'PASSED', 'THEM', 'AND', 'THUS', 'MISTER', 'HOPKINS', 'FAILED', 'TO', 'NOTICE', 'WHO', 'THE', 'OCCUPANTS', 'OF', 'THE', 'OTHER', 'VEHICLE', 'WERE'] +6829-68771-0007-1919: hyp=['THE', 'DESCRIPTION', 'SHE', 'GAVE', 'OF', 'THE', 'COMING', 'RECEPTION', 'TO', 'THE', "WOMEN'S", 'POLITICAL', 'LEAGUE', 'WAS', 'SO', 'HUMOROUS', 'AND', 'DIVERTING', 'THAT', 'THEY', 'WERE', 'BOTH', 'LAUGHING', 'HEARTILY', 'OVER', 'THE', 'THING', 'WHEN', 'THE', 'YOUNG', 'PEOPLE', 'PASSED', 'THEM', 'AND', 'THUS', 'MISTER', 'HOPKINS', 'FAILED', 'TO', 'NOTICE', 'WHO', 'THE', 'OCCUPANT', 'OF', 'THE', 'OTHER', 'VEHICLE', 'WERE'] +6829-68771-0008-1920: ref=['THESE', 'WOMEN', 'WERE', 'FLATTERED', 'BY', 'THE', 'ATTENTION', 'OF', 'THE', 'YOUNG', 'LADY', 'AND', 'HAD', 'PROMISED', 'TO', 'ASSIST', 'IN', 'ELECTING', 'MISTER', 'FORBES'] +6829-68771-0008-1920: hyp=['THESE', 'WOMEN', 'WERE', 'FLATTERED', 'BY', 'THE', 'ATTENTION', 'OF', 'THE', 'YOUNG', 'LADY', 'AND', 'HAD', 'PROMISED', 'TO', 'ASSIST', 'IN', 'ELECTING', 'MISTER', 'FORBES'] +6829-68771-0009-1921: ref=['LOUISE', 'HOPED', 'FOR', 'EXCELLENT', 'RESULTS', 'FROM', 'THIS', 'ORGANIZATION', 'AND', 'WISHED', 'THE', 'ENTERTAINMENT', 'TO', 'BE', 'SO', 'EFFECTIVE', 'IN', 'WINNING', 'THEIR', 'GOOD', 'WILL', 'THAT', 'THEY', 'WOULD', 'WORK', 'EARNESTLY', 'FOR', 'THE', 'CAUSE', 'IN', 'WHICH', 'THEY', 'WERE', 'ENLISTED'] +6829-68771-0009-1921: hyp=['LOUISE', 'HOPED', 'FOR', 'EXCELLENT', 'RESULTS', 'FROM', 'THIS', 'ORGANIZATION', 'AND', 'WISHED', 'THE', 'ENTERTAINMENT', 'TO', 'BE', 'SO', 'EFFECTIVE', 'IN', 'WINNING', 'THEIR', 'GOOD', 'WILL', 'THAT', 'THEY', 'WOULD', 'WORK', 'EARNESTLY', 'FOR', 'THE', 'CAUSE', 'IN', 'WHICH', 'THEY', 'WERE', 'ENLISTED'] +6829-68771-0010-1922: ref=['THE', 'FAIRVIEW', 'BAND', 'WAS', 'ENGAGED', 'TO', 'DISCOURSE', 'AS', 'MUCH', 'HARMONY', 'AS', 'IT', 'COULD', 'PRODUCE', 'AND', 'THE', 'RESOURCES', 'OF', 'THE', 'GREAT', 'HOUSE', 'WERE', 'TAXED', 'TO', 'ENTERTAIN', 'THE', 'GUESTS'] +6829-68771-0010-1922: hyp=['THE', 'FAIR', 'VIEW', 'BAND', 'WAS', 'ENGAGED', 'TO', 'DISCOURSE', 'AS', 'MUCH', 'HARMONY', 'AS', 'IT', 'COULD', 'PRODUCE', 'AND', 'THE', 'RESOURCES', 'OF', 'THE', 'GREAT', 'HOUSE', 'WERE', 'TAXED', 'TO', 'ENTERTAIN', 'THE', 'GUESTS'] +6829-68771-0011-1923: ref=['TABLES', 'WERE', 'SPREAD', 'ON', 'THE', 'LAWN', 'AND', 'A', 'DAINTY', 'BUT', 'SUBSTANTIAL', 'REPAST', 'WAS', 'TO', 'BE', 'SERVED'] +6829-68771-0011-1923: hyp=['TABLES', 'WERE', 'SPREAD', 'ON', 'THE', 'LAWN', 'AND', 'A', 'DAINTY', 'BUT', 'SUBSTANTIAL', 'REPAST', 'WAS', 'TO', 'BE', 'SERVED'] +6829-68771-0012-1924: ref=['THIS', 'WAS', 'THE', 'FIRST', 'OCCASION', 'WITHIN', 'A', 'GENERATION', 'WHEN', 'SUCH', 'AN', 'ENTERTAINMENT', 'HAD', 'BEEN', 'GIVEN', 'AT', 'ELMHURST', 'AND', 'THE', 'ONLY', 'ONE', 'WITHIN', 'THE', 'MEMORY', 'OF', 'MAN', 'WHERE', 'THE', 'NEIGHBORS', 'AND', 'COUNTRY', 'PEOPLE', 'HAD', 'BEEN', 'INVITED', 'GUESTS'] +6829-68771-0012-1924: hyp=['THIS', 'WAS', 'THE', 'FIRST', 'OCCASION', 'WITHIN', 'A', 'GENERATION', 'WHEN', 'SUCH', 'AN', 'ENTERTAINMENT', 'HAD', 'BEEN', 'GIVEN', 'AT', 'ELMHURST', 'AND', 'THE', 'ONLY', 'ONE', 'WITHIN', 'THE', 'MEMORY', 'OF', 'MAN', 'WERE', 'THE', 'NEIGHBORS', 'AND', 'COUNTRY', 'PEOPLE', 'HAD', 'BEEN', 'THE', 'INVITED', 'GUESTS'] +6829-68771-0013-1925: ref=['THE', 'ATTENDANCE', 'WAS', 'UNEXPECTEDLY', 'LARGE', 'AND', 'THE', 'GIRLS', 'WERE', 'DELIGHTED', 'FORESEEING', 'GREAT', 'SUCCESS', 'FOR', 'THEIR', 'FETE'] +6829-68771-0013-1925: hyp=['THE', 'ATTENDANTS', 'WAS', 'UNEXPECTEDLY', 'LARGE', 'AND', 'THE', 'GIRLS', 'WERE', 'DELIGHTED', 'FOR', 'SEEING', 'GREAT', 'SUCCESS', 'FOR', 'THEIR', 'FIGHT'] +6829-68771-0014-1926: ref=['WE', 'OUGHT', 'TO', 'HAVE', 'MORE', 'ATTENDANTS', 'BETH', 'SAID', 'LOUISE', 'APPROACHING', 'HER', 'COUSIN'] +6829-68771-0014-1926: hyp=['WE', 'OUGHT', 'TO', 'HAVE', 'MORE', 'ATTENDANCE', 'BETH', 'SAID', 'LOUISE', 'APPROACHING', 'HER', 'COUSIN'] +6829-68771-0015-1927: ref=["WON'T", 'YOU', 'RUN', 'INTO', 'THE', 'HOUSE', 'AND', 'SEE', 'IF', 'MARTHA', "CAN'T", 'SPARE', 'ONE', 'OR', 'TWO', 'MORE', 'MAIDS'] +6829-68771-0015-1927: hyp=["WON'T", 'YOU', 'RUN', 'INTO', 'THE', 'HOUSE', 'AND', 'SEE', 'IF', 'MARTHA', "CAN'T", 'SPARE', 'ONE', 'OR', 'TWO', 'MORE', 'MATES'] +6829-68771-0016-1928: ref=['SHE', 'WAS', 'VERY', 'FOND', 'OF', 'THE', 'YOUNG', 'LADIES', 'WHOM', 'SHE', 'HAD', 'KNOWN', 'WHEN', 'AUNT', 'JANE', 'WAS', 'THE', 'MISTRESS', 'HERE', 'AND', 'BETH', 'WAS', 'HER', 'ESPECIAL', 'FAVORITE'] +6829-68771-0016-1928: hyp=['SHE', 'WAS', 'VERY', 'FOND', 'OF', 'THE', 'YOUNG', 'LADIES', 'WHOM', 'SHE', 'HAD', 'KNOWN', 'WHEN', 'AUNT', 'JANE', 'WAS', 'THEIR', 'MISTRESS', 'HERE', 'AND', 'BETH', 'WAS', 'HER', 'ESPECIAL', 'FAVORITE'] +6829-68771-0017-1929: ref=['THE', 'HOUSEKEEPER', 'LED', 'THE', 'WAY', 'AND', 'BETH', 'FOLLOWED'] +6829-68771-0017-1929: hyp=['THE', 'HOUSEKEEPER', 'LED', 'THE', 'WAY', 'IN', 'BETH', 'FOLLOWED'] +6829-68771-0018-1930: ref=['FOR', 'A', 'MOMENT', 'BETH', 'STOOD', 'STARING', 'WHILE', 'THE', 'NEW', 'MAID', 'REGARDED', 'HER', 'WITH', 'COMPOSURE', 'AND', 'A', 'SLIGHT', 'SMILE', 'UPON', 'HER', 'BEAUTIFUL', 'FACE'] +6829-68771-0018-1930: hyp=['FOR', 'A', 'MOMENT', 'BETH', 'STOOD', 'STARING', 'WHILE', 'THE', 'NEW', 'MAID', 'REGARDED', 'HER', 'WITH', 'COMPOSURE', 'AND', 'A', 'SLIGHT', 'SMILE', 'UPON', 'HER', 'BEAUTIFUL', 'FACE'] +6829-68771-0019-1931: ref=['SHE', 'WAS', 'DRESSED', 'IN', 'THE', 'REGULATION', 'COSTUME', 'OF', 'THE', 'MAIDS', 'AT', 'ELMHURST', 'A', 'PLAIN', 'BLACK', 'GOWN', 'WITH', 'WHITE', 'APRON', 'AND', 'CAP'] +6829-68771-0019-1931: hyp=['SHE', 'WAS', 'DRESSED', 'IN', 'THE', 'REGULATION', 'COSTUME', 'OF', 'THE', 'MAIDS', 'AT', 'ELMHURST', 'A', 'PLAYING', 'BLACK', 'GOWN', 'WITH', 'A', 'WHITE', 'APRON', 'AND', 'CAP'] +6829-68771-0020-1932: ref=['THEN', 'SHE', 'GAVE', 'A', 'LITTLE', 'LAUGH', 'AND', 'REPLIED', 'NO', 'MISS', 'BETH', "I'M", 'ELIZABETH', 'PARSONS'] +6829-68771-0020-1932: hyp=['THEN', 'SHE', 'GAVE', 'A', 'LITTLE', 'LAUGH', 'AND', 'REPLIED', 'NO', 'MISS', 'BETH', "I'M", 'ELIZABETH', "PARSON'S"] +6829-68771-0021-1933: ref=['BUT', 'IT', "CAN'T", 'BE', 'PROTESTED', 'THE', 'GIRL'] +6829-68771-0021-1933: hyp=['BUT', 'IT', "CAN'T", 'BE', 'PROTESTED', 'THE', 'GIRL'] +6829-68771-0022-1934: ref=['I', 'ATTEND', 'TO', 'THE', 'HOUSEHOLD', 'MENDING', 'YOU', 'KNOW', 'AND', 'CARE', 'FOR', 'THE', 'LINEN'] +6829-68771-0022-1934: hyp=['I', 'ATTEND', 'TO', 'THE', 'HOUSEHOLD', 'MENDING', 'YOU', 'KNOW', 'AND', 'CARE', 'FOR', 'THE', 'LINEN'] +6829-68771-0023-1935: ref=['YOU', 'SPEAK', 'LIKE', 'AN', 'EDUCATED', 'PERSON', 'SAID', 'BETH', 'WONDERINGLY', 'WHERE', 'IS', 'YOUR', 'HOME'] +6829-68771-0023-1935: hyp=['YOU', 'SPEAK', 'LIKE', 'AN', 'EDUCATED', 'PERSON', 'SAID', 'BETH', 'WONDERINGLY', 'WHERE', 'IS', 'YOUR', 'HOME'] +6829-68771-0024-1936: ref=['FOR', 'THE', 'FIRST', 'TIME', 'THE', 'MAID', 'SEEMED', 'A', 'LITTLE', 'CONFUSED', 'AND', 'HER', 'GAZE', 'WANDERED', 'FROM', 'THE', 'FACE', 'OF', 'HER', 'VISITOR'] +6829-68771-0024-1936: hyp=['FOR', 'THE', 'FIRST', 'TIME', 'THE', 'MAID', 'SEEMED', 'A', 'LITTLE', 'CONFUSED', 'AND', 'HER', 'GAZE', 'WANDERED', 'FROM', 'THE', 'FACE', 'OF', 'HER', 'VISITOR'] +6829-68771-0025-1937: ref=['SHE', 'SAT', 'DOWN', 'IN', 'A', 'ROCKING', 'CHAIR', 'AND', 'CLASPING', 'HER', 'HANDS', 'IN', 'HER', 'LAP', 'ROCKED', 'SLOWLY', 'BACK', 'AND', 'FORTH', "I'M", 'SORRY', 'SAID', 'BETH'] +6829-68771-0025-1937: hyp=['SHE', 'SAT', 'DOWN', 'IN', 'A', 'ROCKING', 'CHAIR', 'AND', 'CLASPING', 'HER', 'HANDS', 'IN', 'HER', 'LAP', 'ROCK', 'SLOWLY', 'BACK', 'AND', 'FORTH', "I'M", 'SORRY', 'SAID', 'BETH'] +6829-68771-0026-1938: ref=['ELIZA', 'PARSONS', 'SHOOK', 'HER', 'HEAD'] +6829-68771-0026-1938: hyp=['ELIZA', 'PARSON', 'SHOOK', 'HER', 'HEAD'] +6829-68771-0027-1939: ref=['THEY', 'THEY', 'EXCITE', 'ME', 'IN', 'SOME', 'WAY', 'AND', 'I', 'I', "CAN'T", 'BEAR', 'THEM', 'YOU', 'MUST', 'EXCUSE', 'ME'] +6829-68771-0027-1939: hyp=['FATE', 'THEY', 'EXCITE', 'ME', 'IN', 'SOME', 'WAY', 'AND', 'I', 'I', "CAN'T", 'BEAR', 'THEM', 'YOU', 'MUST', 'EXCUSE', 'ME'] +6829-68771-0028-1940: ref=['SHE', 'EVEN', 'SEEMED', 'MILDLY', 'AMUSED', 'AT', 'THE', 'ATTENTION', 'SHE', 'ATTRACTED'] +6829-68771-0028-1940: hyp=['SHE', 'EVEN', 'SEEMED', 'MILDLY', 'AMUSED', 'AT', 'THE', 'ATTENTION', 'SHE', 'ATTRACTED'] +6829-68771-0029-1941: ref=['BETH', 'WAS', 'A', 'BEAUTIFUL', 'GIRL', 'THE', 'HANDSOMEST', 'OF', 'THE', 'THREE', 'COUSINS', 'BY', 'FAR', 'YET', 'ELIZA', 'SURPASSED', 'HER', 'IN', 'NATURAL', 'CHARM', 'AND', 'SEEMED', 'WELL', 'AWARE', 'OF', 'THE', 'FACT'] +6829-68771-0029-1941: hyp=['BETH', 'WAS', 'A', 'BEAUTIFUL', 'GIRL', 'THE', 'HANDSOMEST', 'OF', 'THE', 'THREE', 'COUSINS', 'BY', 'FAR', 'YET', 'ELIZA', 'SURPASSED', 'HER', 'A', 'NATURAL', 'CHARM', 'AND', 'SEEMED', 'WELL', 'AWARE', 'OF', 'THE', 'FACT'] +6829-68771-0030-1942: ref=['HER', 'MANNER', 'WAS', 'NEITHER', 'INDEPENDENT', 'NOR', 'ASSERTIVE', 'BUT', 'RATHER', 'ONE', 'OF', 'WELL', 'BRED', 'COMPOSURE', 'AND', 'CALM', 'RELIANCE'] +6829-68771-0030-1942: hyp=['HER', 'MANNER', 'WAS', 'NEITHER', 'INDEPENDENT', 'NOR', 'ASSERTIVE', 'BUT', 'RATHER', 'ONE', 'OF', 'WELL', 'BRED', 'COMPOSURE', 'AND', 'CALM', 'RELIANCE'] +6829-68771-0031-1943: ref=['HER', 'EYES', 'WANDERED', 'TO', 'THE', "MAID'S", 'HANDS'] +6829-68771-0031-1943: hyp=['HER', 'EYES', 'WANDERED', 'TO', 'THE', "MAID'S", 'HANDS'] +6829-68771-0032-1944: ref=['HOWEVER', 'HER', 'FEATURES', 'AND', 'FORM', 'MIGHT', 'REPRESS', 'ANY', 'EVIDENCE', 'OF', 'NERVOUSNESS', 'THESE', 'HANDS', 'TOLD', 'A', 'DIFFERENT', 'STORY'] +6829-68771-0032-1944: hyp=['HOWEVER', 'HER', 'FEATURES', 'INFORM', 'MIGHT', 'REPRESS', 'ANY', 'EVIDENCE', 'OF', 'NERVOUSNESS', 'THESE', 'HANDS', 'TOLD', 'A', 'DIFFERENT', 'STORY'] +6829-68771-0033-1945: ref=['SHE', 'ROSE', 'QUICKLY', 'TO', 'HER', 'FEET', 'WITH', 'AN', 'IMPETUOUS', 'GESTURE', 'THAT', 'MADE', 'HER', 'VISITOR', 'CATCH', 'HER', 'BREATH'] +6829-68771-0033-1945: hyp=['SHE', 'ROSE', 'QUICKLY', 'TO', 'HER', 'FEET', 'WITH', 'AN', 'IMPETUOUS', 'GESTURE', 'THAT', 'MADE', 'HER', 'VISITOR', 'CATCH', 'HER', 'BREATH'] +6829-68771-0034-1946: ref=['I', 'WISH', 'I', 'KNEW', 'MYSELF', 'SHE', 'CRIED', 'FIERCELY'] +6829-68771-0034-1946: hyp=['I', 'WISH', 'I', 'KNEW', 'MYSELF', 'SHE', 'CRIED', 'FIERCELY'] +6829-68771-0035-1947: ref=['WILL', 'YOU', 'LEAVE', 'ME', 'ALONE', 'IN', 'MY', 'OWN', 'ROOM', 'OR', 'MUST', 'I', 'GO', 'AWAY', 'TO', 'ESCAPE', 'YOU'] +6829-68771-0035-1947: hyp=['WILL', 'YOU', 'LEAVE', 'ME', 'ALONE', 'IN', 'MY', 'OWN', 'ROOM', 'OR', 'MUST', 'I', 'GO', 'AWAY', 'TO', 'ESCAPE', 'YOU'] +6829-68771-0036-1948: ref=['ELIZA', 'CLOSED', 'THE', 'DOOR', 'BEHIND', 'HER', 'WITH', 'A', 'DECIDED', 'SLAM', 'AND', 'A', 'KEY', 'CLICKED', 'IN', 'THE', 'LOCK'] +6829-68771-0036-1948: hyp=['ELIZA', 'CLOSED', 'THE', 'DOOR', 'BEHIND', 'HER', 'WITH', 'A', 'DECIDED', 'SLAM', 'AND', 'A', 'KEY', 'CLICKED', 'IN', 'THE', 'LOCK'] +6930-75918-0000-0: ref=['CONCORD', 'RETURNED', 'TO', 'ITS', 'PLACE', 'AMIDST', 'THE', 'TENTS'] +6930-75918-0000-0: hyp=['CONCORD', 'RETURNED', 'TO', 'ITS', 'PLACE', 'AMIDST', 'THE', 'TENTS'] +6930-75918-0001-1: ref=['THE', 'ENGLISH', 'FORWARDED', 'TO', 'THE', 'FRENCH', 'BASKETS', 'OF', 'FLOWERS', 'OF', 'WHICH', 'THEY', 'HAD', 'MADE', 'A', 'PLENTIFUL', 'PROVISION', 'TO', 'GREET', 'THE', 'ARRIVAL', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'THE', 'FRENCH', 'IN', 'RETURN', 'INVITED', 'THE', 'ENGLISH', 'TO', 'A', 'SUPPER', 'WHICH', 'WAS', 'TO', 'BE', 'GIVEN', 'THE', 'NEXT', 'DAY'] +6930-75918-0001-1: hyp=['THE', 'ENGLISH', 'FOOTED', 'TO', 'THE', 'FRENCH', 'BASKETS', 'OF', 'FLOWERS', 'OF', 'WHICH', 'THEY', 'HAD', 'MADE', 'A', 'PLENTIFUL', 'PROVISION', 'TO', 'GREET', 'THE', 'ARRIVAL', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'THE', 'FRENCH', 'IN', 'RETURN', 'INVITED', 'THE', 'ENGLISH', 'TO', 'A', 'SUPPER', 'WHICH', 'WAS', 'TO', 'BE', 'GIVEN', 'THE', 'NEXT', 'DAY'] +6930-75918-0002-2: ref=['CONGRATULATIONS', 'WERE', 'POURED', 'IN', 'UPON', 'THE', 'PRINCESS', 'EVERYWHERE', 'DURING', 'HER', 'JOURNEY'] +6930-75918-0002-2: hyp=['CONGRATULATIONS', 'WERE', 'POURED', 'IN', 'UPON', 'THE', 'PRINCESS', 'EVERYWHERE', 'DURING', 'HER', 'JOURNEY'] +6930-75918-0003-3: ref=['FROM', 'THE', 'RESPECT', 'PAID', 'HER', 'ON', 'ALL', 'SIDES', 'SHE', 'SEEMED', 'LIKE', 'A', 'QUEEN', 'AND', 'FROM', 'THE', 'ADORATION', 'WITH', 'WHICH', 'SHE', 'WAS', 'TREATED', 'BY', 'TWO', 'OR', 'THREE', 'SHE', 'APPEARED', 'AN', 'OBJECT', 'OF', 'WORSHIP', 'THE', 'QUEEN', 'MOTHER', 'GAVE', 'THE', 'FRENCH', 'THE', 'MOST', 'AFFECTIONATE', 'RECEPTION', 'FRANCE', 'WAS', 'HER', 'NATIVE', 'COUNTRY', 'AND', 'SHE', 'HAD', 'SUFFERED', 'TOO', 'MUCH', 'UNHAPPINESS', 'IN', 'ENGLAND', 'FOR', 'ENGLAND', 'TO', 'HAVE', 'MADE', 'HER', 'FORGET', 'FRANCE'] +6930-75918-0003-3: hyp=['FROM', 'THE', 'RESPECT', 'PAID', 'HER', 'ON', 'ALL', 'SIDES', 'SHE', 'SEEMED', 'LIKE', 'A', 'QUEEN', 'AND', 'FROM', 'THE', 'ADORATION', 'WITH', 'WHICH', 'SHE', 'WAS', 'TREATED', 'BY', 'TWO', 'OR', 'THREE', 'SHE', 'APPEARED', 'AN', 'OBJECT', 'OF', 'WORSHIP', 'THE', 'QUEEN', 'MOTHER', 'GAVE', 'THE', 'FRENCH', 'THE', 'MOST', 'AFFECTIONATE', 'RECEPTION', 'FRANCE', 'WAS', 'HER', 'NATIVE', 'COUNTRY', 'AND', 'SHE', 'HAD', 'SUFFERED', 'TOO', 'MUCH', 'UNHAPPINESS', 'IN', 'ENGLAND', 'FOR', 'ENGLAND', 'TO', 'HAVE', 'MADE', 'HER', 'FORGET', 'FRANCE'] +6930-75918-0004-4: ref=['SHE', 'TAUGHT', 'HER', 'DAUGHTER', 'THEN', 'BY', 'HER', 'OWN', 'AFFECTION', 'FOR', 'IT', 'THAT', 'LOVE', 'FOR', 'A', 'COUNTRY', 'WHERE', 'THEY', 'HAD', 'BOTH', 'BEEN', 'HOSPITABLY', 'RECEIVED', 'AND', 'WHERE', 'A', 'BRILLIANT', 'FUTURE', 'OPENED', 'BEFORE', 'THEM'] +6930-75918-0004-4: hyp=['SHE', 'TAUGHT', 'HER', 'DAUGHTER', 'THEN', 'BY', 'HER', 'OWN', 'AFFECTION', 'FOR', 'IT', 'THAT', 'LOVE', 'FOR', 'A', 'COUNTRY', 'WHERE', 'THEY', 'HAD', 'BOTH', 'BEEN', 'HOSPITABLY', 'RECEIVED', 'AND', 'WHERE', 'A', 'BRILLIANT', 'FUTURE', 'OPENED', 'FOR', 'THEM'] +6930-75918-0005-5: ref=['THE', 'COUNT', 'HAD', 'THROWN', 'HIMSELF', 'BACK', 'ON', 'HIS', 'SEAT', 'LEANING', 'HIS', 'SHOULDERS', 'AGAINST', 'THE', 'PARTITION', 'OF', 'THE', 'TENT', 'AND', 'REMAINED', 'THUS', 'HIS', 'FACE', 'BURIED', 'IN', 'HIS', 'HANDS', 'WITH', 'HEAVING', 'CHEST', 'AND', 'RESTLESS', 'LIMBS'] +6930-75918-0005-5: hyp=['THE', 'COUNT', 'HAD', 'THROWN', 'HIMSELF', 'BACK', 'ON', 'HIS', 'SEAT', 'LEANING', 'HIS', 'SHOULDERS', 'AGAINST', 'THE', 'PARTITION', 'OF', 'THE', 'TENT', 'AND', 'REMAINED', 'THUS', 'HIS', 'FACE', 'BURIED', 'IN', 'HIS', 'HANDS', 'WITH', 'HEAVING', 'CHEST', 'AND', 'RESTLESS', 'LIMBS'] +6930-75918-0006-6: ref=['THIS', 'HAS', 'INDEED', 'BEEN', 'A', 'HARASSING', 'DAY', 'CONTINUED', 'THE', 'YOUNG', 'MAN', 'HIS', 'EYES', 'FIXED', 'UPON', 'HIS', 'FRIEND'] +6930-75918-0006-6: hyp=['THIS', 'HAS', 'INDEED', 'BEEN', 'A', 'HARASSING', 'DAY', 'CONTINUED', 'THE', 'YOUNG', 'MAN', 'HIS', 'EYES', 'FIXED', 'UPON', 'HIS', 'FRIEND'] +6930-75918-0007-7: ref=['YOU', 'WILL', 'BE', 'FRANK', 'WITH', 'ME', 'I', 'ALWAYS', 'AM'] +6930-75918-0007-7: hyp=['YOU', 'WILL', 'BE', 'FRANK', 'WITH', 'ME', 'I', 'ALWAYS', 'AM'] +6930-75918-0008-8: ref=['CAN', 'YOU', 'IMAGINE', 'WHY', 'BUCKINGHAM', 'HAS', 'BEEN', 'SO', 'VIOLENT', 'I', 'SUSPECT'] +6930-75918-0008-8: hyp=['CAN', 'YOU', 'IMAGINE', 'MY', 'BUCKINGHAM', 'HAS', 'BEEN', 'SO', 'VIOLENT', 'I', 'SUSPECT'] +6930-75918-0009-9: ref=['IT', 'IS', 'YOU', 'WHO', 'ARE', 'MISTAKEN', 'RAOUL', 'I', 'HAVE', 'READ', 'HIS', 'DISTRESS', 'IN', 'HIS', 'EYES', 'IN', 'HIS', 'EVERY', 'GESTURE', 'AND', 'ACTION', 'THE', 'WHOLE', 'DAY'] +6930-75918-0009-9: hyp=['IT', 'IS', 'YOU', 'WHO', 'ARE', 'MISTAKEN', 'RAOUL', 'I', 'HAVE', 'READ', 'HIS', 'DISTRESS', 'IN', 'HIS', 'EYES', 'IN', 'HIS', 'EVERY', 'GESTURE', 'AND', 'ACTION', 'THE', 'WHOLE', 'DAY'] +6930-75918-0010-10: ref=['I', 'CAN', 'PERCEIVE', 'LOVE', 'CLEARLY', 'ENOUGH'] +6930-75918-0010-10: hyp=['I', 'CAN', 'PERCEIVE', 'LOVE', 'CLEARLY', 'ENOUGH'] +6930-75918-0011-11: ref=['I', 'AM', 'CONVINCED', 'OF', 'WHAT', 'I', 'SAY', 'SAID', 'THE', 'COUNT'] +6930-75918-0011-11: hyp=['I', 'AM', 'CONVINCED', 'OF', 'WHAT', 'I', 'SAY', 'SAID', 'THE', 'COUNT'] +6930-75918-0012-12: ref=['IT', 'IS', 'ANNOYANCE', 'THEN'] +6930-75918-0012-12: hyp=['IT', 'IS', 'ANNOYANCE', 'THEN'] +6930-75918-0013-13: ref=['IN', 'THOSE', 'VERY', 'TERMS', 'I', 'EVEN', 'ADDED', 'MORE'] +6930-75918-0013-13: hyp=['IN', 'THOSE', 'VERY', 'TERMS', 'I', 'EVEN', 'ADDED', 'MORE'] +6930-75918-0014-14: ref=['BUT', 'CONTINUED', 'RAOUL', 'NOT', 'INTERRUPTED', 'BY', 'THIS', 'MOVEMENT', 'OF', 'HIS', 'FRIEND', 'HEAVEN', 'BE', 'PRAISED', 'THE', 'FRENCH', 'WHO', 'ARE', 'PRONOUNCED', 'TO', 'BE', 'THOUGHTLESS', 'AND', 'INDISCREET', 'RECKLESS', 'EVEN', 'ARE', 'CAPABLE', 'OF', 'BRINGING', 'A', 'CALM', 'AND', 'SOUND', 'JUDGMENT', 'TO', 'BEAR', 'ON', 'MATTERS', 'OF', 'SUCH', 'HIGH', 'IMPORTANCE'] +6930-75918-0014-14: hyp=['BUT', 'CONTINUED', 'RAOUL', 'NOT', 'INTERRUPTED', 'BY', 'THIS', 'MOVEMENT', 'OF', 'HIS', 'FRIEND', 'HEAVEN', 'BE', 'PRAISED', 'THE', 'FRENCH', 'WHO', 'ARE', 'PRONOUNCED', 'TO', 'BE', 'THOUGHTLESS', 'AND', 'INDISCREET', 'RECKLESS', 'EVEN', 'ARE', 'CAPABLE', 'OF', 'BRINGING', 'A', 'CALM', 'AND', 'SOUND', 'JUDGMENT', 'TO', 'BARON', 'MATTERS', 'OF', 'SUCH', 'HIGH', 'IMPORTANCE'] +6930-75918-0015-15: ref=['THUS', 'IT', 'IS', 'THAT', 'THE', 'HONOR', 'OF', 'THREE', 'IS', 'SAVED', 'OUR', "COUNTRY'S", 'OUR', "MASTER'S", 'AND', 'OUR', 'OWN'] +6930-75918-0015-15: hyp=['THUS', 'IT', 'IS', 'THAT', 'THE', 'HONOR', 'OF', 'THREE', 'IS', 'SAVED', 'OUR', 'COUNTRY', 'OUR', 'MASTERS', 'AND', 'OUR', 'OWN'] +6930-75918-0016-16: ref=['YES', 'I', 'NEED', 'REPOSE', 'MANY', 'THINGS', 'HAVE', 'AGITATED', 'ME', 'TO', 'DAY', 'BOTH', 'IN', 'MIND', 'AND', 'BODY', 'WHEN', 'YOU', 'RETURN', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'THE', 'SAME', 'MAN'] +6930-75918-0016-16: hyp=['YES', 'I', 'NEED', 'REPOSE', 'MANY', 'THINGS', 'HAVE', 'AGITATED', 'ME', 'TO', 'DAY', 'BOTH', 'IN', 'MIND', 'AND', 'BODY', 'WHEN', 'YOU', 'RETURN', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'THE', 'SAME', 'MAN'] +6930-75918-0017-17: ref=['BUT', 'IN', 'THIS', 'FRIENDLY', 'PRESSURE', 'RAOUL', 'COULD', 'DETECT', 'THE', 'NERVOUS', 'AGITATION', 'OF', 'A', 'GREAT', 'INTERNAL', 'CONFLICT'] +6930-75918-0017-17: hyp=['BUT', 'IN', 'THIS', 'FRIENDLY', 'PRESSURE', 'RAOUL', 'COULD', 'DETECT', 'THE', 'NERVOUS', 'AGITATION', 'OF', 'A', 'GREAT', 'INTERNAL', 'CONFLICT'] +6930-75918-0018-18: ref=['THE', 'NIGHT', 'WAS', 'CLEAR', 'STARLIT', 'AND', 'SPLENDID', 'THE', 'TEMPEST', 'HAD', 'PASSED', 'AWAY', 'AND', 'THE', 'SWEET', 'INFLUENCES', 'OF', 'THE', 'EVENING', 'HAD', 'RESTORED', 'LIFE', 'PEACE', 'AND', 'SECURITY', 'EVERYWHERE'] +6930-75918-0018-18: hyp=['THE', 'NIGHT', 'WAS', 'CLEAR', 'STARLIT', 'AND', 'SPLENDID', 'THE', 'TEMPEST', 'HAD', 'PASSED', 'AWAY', 'AND', 'THE', 'SWEET', 'INFLUENCES', 'OF', 'THE', 'EVENING', 'HAD', 'RESTORED', 'LIFE', 'PEACE', 'AND', 'SECURITY', 'EVERYWHERE'] +6930-75918-0019-19: ref=['UPON', 'THE', 'LARGE', 'SQUARE', 'IN', 'FRONT', 'OF', 'THE', 'HOTEL', 'THE', 'SHADOWS', 'OF', 'THE', 'TENTS', 'INTERSECTED', 'BY', 'THE', 'GOLDEN', 'MOONBEAMS', 'FORMED', 'AS', 'IT', 'WERE', 'A', 'HUGE', 'MOSAIC', 'OF', 'JET', 'AND', 'YELLOW', 'FLAGSTONES'] +6930-75918-0019-19: hyp=['UPON', 'THE', 'LARGE', 'SQUARE', 'IN', 'FRONT', 'OF', 'THE', 'HOTEL', 'THE', 'SHADOWS', 'OF', 'THE', 'TENTS', 'INTERSECTED', 'BY', 'THE', 'GOLDEN', 'MOONBEAMS', 'FORMED', 'AS', 'IT', 'WERE', 'A', 'HUGE', 'MOSAIC', 'OF', 'JET', 'AND', 'YELLOW', 'FLAGSTONES'] +6930-75918-0020-20: ref=['BRAGELONNE', 'WATCHED', 'FOR', 'SOME', 'TIME', 'THE', 'CONDUCT', 'OF', 'THE', 'TWO', 'LOVERS', 'LISTENED', 'TO', 'THE', 'LOUD', 'AND', 'UNCIVIL', 'SLUMBERS', 'OF', 'MANICAMP', 'WHO', 'SNORED', 'AS', 'IMPERIOUSLY', 'AS', 'THOUGH', 'HE', 'WAS', 'WEARING', 'HIS', 'BLUE', 'AND', 'GOLD', 'INSTEAD', 'OF', 'HIS', 'VIOLET', 'SUIT'] +6930-75918-0020-20: hyp=['BRAGGLIN', 'WATCHED', 'FOR', 'SOME', 'TIME', 'THE', 'CONDUCT', 'OF', 'THE', 'TWO', 'LOVERS', 'LISTENED', 'TO', 'THE', 'LOUD', 'AND', 'UNCIVIL', 'SLUMBERS', 'OF', 'MANICAMP', 'WHO', 'SNORED', 'AS', 'IMPERIOUSLY', 'AS', 'THOUGH', 'HE', 'WAS', 'WEARING', 'HIS', 'BLUE', 'AND', 'GOLD', 'INSTEAD', 'OF', 'HIS', 'VIOLET', 'SUIT'] +6930-76324-0000-21: ref=['GOLIATH', 'MAKES', 'ANOTHER', 'DISCOVERY'] +6930-76324-0000-21: hyp=['GOLIATH', 'MAKES', 'ANOTHER', 'DISCOVERY'] +6930-76324-0001-22: ref=['THEY', 'WERE', 'CERTAINLY', 'NO', 'NEARER', 'THE', 'SOLUTION', 'OF', 'THEIR', 'PROBLEM'] +6930-76324-0001-22: hyp=['THERE', 'WERE', 'CERTAINLY', 'NO', 'NEAR', 'THE', 'SOLUTION', 'OF', 'THEIR', 'PROBLEM'] +6930-76324-0002-23: ref=['THE', 'POOR', 'LITTLE', 'THINGS', 'CRIED', 'CYNTHIA', 'THINK', 'OF', 'THEM', 'HAVING', 'BEEN', 'TURNED', 'TO', 'THE', 'WALL', 'ALL', 'THESE', 'YEARS'] +6930-76324-0002-23: hyp=['THE', 'POOR', 'LITTLE', 'THINGS', 'CRIED', 'CYNTHIA', 'THINK', 'OF', 'THEM', 'HAVING', 'BEEN', 'TURNED', 'TO', 'THE', 'WALL', 'ALL', 'THESE', 'YEARS'] +6930-76324-0003-24: ref=['NOW', 'WHAT', 'WAS', 'THE', 'SENSE', 'OF', 'IT', 'TWO', 'INNOCENT', 'BABIES', 'LIKE', 'THAT'] +6930-76324-0003-24: hyp=['NOW', 'WHAT', 'IS', 'THE', 'SENSE', 'OF', 'IT', 'TOO', 'INNOCENT', 'BABIES', 'LIKE', 'THAT'] +6930-76324-0004-25: ref=['BUT', 'JOYCE', 'HAD', 'NOT', 'BEEN', 'LISTENING', 'ALL', 'AT', 'ONCE', 'SHE', 'PUT', 'DOWN', 'HER', 'CANDLE', 'ON', 'THE', 'TABLE', 'AND', 'FACED', 'HER', 'COMPANION'] +6930-76324-0004-25: hyp=['BUT', 'JOYCE', 'HAD', 'NOT', 'BEEN', 'LISTENING', 'ALL', 'AT', 'ONCE', 'SHE', 'PUT', 'DOWN', 'HER', 'CANDLE', 'ON', 'THE', 'TABLE', 'AND', 'FACED', 'HER', 'COMPANION'] +6930-76324-0005-26: ref=['THE', 'TWIN', 'BROTHER', 'DID', 'SOMETHING', 'SHE', "DIDN'T", 'LIKE', 'AND', 'SHE', 'TURNED', 'HIS', 'PICTURE', 'TO', 'THE', 'WALL'] +6930-76324-0005-26: hyp=['THE', 'TWIN', 'BROTHER', 'DID', 'SOMETHING', 'SHE', "DIDN'T", 'LIKE', 'AND', 'SHE', 'TURNED', 'HIS', 'PICTURE', 'TO', 'THE', 'WALL'] +6930-76324-0006-27: ref=['HERS', 'HAPPENED', 'TO', 'BE', 'IN', 'THE', 'SAME', 'FRAME', 'TOO', 'BUT', 'SHE', 'EVIDENTLY', "DIDN'T", 'CARE', 'ABOUT', 'THAT'] +6930-76324-0006-27: hyp=['HERS', 'HAPPENED', 'TO', 'BE', 'ON', 'THE', 'SAME', 'FRAME', 'TOO', 'BUT', 'SHE', 'EVIDENTLY', "DIDN'T", 'CARE', 'ABOUT', 'IT'] +6930-76324-0007-28: ref=['NOW', 'WHAT', 'HAVE', 'YOU', 'TO', 'SAY', 'CYNTHIA', 'SPRAGUE'] +6930-76324-0007-28: hyp=['NOW', 'WHAT', 'HAVE', 'YOU', 'TO', 'SAY', "CYNTHIA'S", 'BROGG'] +6930-76324-0008-29: ref=['I', 'THOUGHT', 'WE', 'WERE', 'STUMPED', 'AGAIN', 'WHEN', 'I', 'FIRST', 'SAW', 'THAT', 'PICTURE', 'BUT', "IT'S", 'BEEN', 'OF', 'SOME', 'USE', 'AFTER', 'ALL'] +6930-76324-0008-29: hyp=['I', 'THOUGHT', 'WE', 'WERE', 'STUMPED', 'AGAIN', 'WHEN', 'I', 'FIRST', 'SAW', 'THAT', 'PICTURE', 'BUT', 'IT', 'SPIN', 'OF', 'SOME', 'USE', 'AFTER', 'ALL'] +6930-76324-0009-30: ref=['DO', 'YOU', 'SUPPOSE', 'THE', 'MINIATURE', 'WAS', 'A', 'COPY', 'OF', 'THE', 'SAME', 'THING'] +6930-76324-0009-30: hyp=['DO', 'YOU', 'SUPPOSE', 'THE', 'MINIATURE', 'WAS', 'A', 'COPY', 'OF', 'THE', 'SAME', 'THING'] +6930-76324-0010-31: ref=['WHAT', 'IN', 'THE', 'WORLD', 'IS', 'THAT', 'QUERIED', 'JOYCE'] +6930-76324-0010-31: hyp=['ONE', 'IN', 'THE', 'WORLD', 'IS', 'IT', 'QUERIED', 'JOYCE'] +6930-76324-0011-32: ref=['THEY', 'WORRY', 'ME', 'TERRIBLY', 'AND', 'BESIDES', "I'D", 'LIKE', 'TO', 'SEE', 'WHAT', 'THIS', 'LOVELY', 'FURNITURE', 'LOOKS', 'LIKE', 'WITHOUT', 'SUCH', 'QUANTITIES', 'OF', 'DUST', 'ALL', 'OVER', 'IT', 'GOOD', 'SCHEME', 'CYN'] +6930-76324-0011-32: hyp=['MAY', 'WORRY', 'ME', 'TERRIBLY', 'EMBICIDES', "I'D", 'LIKE', 'TO', 'SEE', 'WHAT', 'THIS', 'LOVELY', 'FURNITURE', 'LOOKS', 'LIKE', 'WITHOUT', 'SUCH', 'QUANTITIES', 'OF', 'DUST', 'ALL', 'OVER', 'IT', 'GOOD', 'SCHEME', 'SIN'] +6930-76324-0012-33: ref=["WE'LL", 'COME', 'IN', 'HERE', 'THIS', 'AFTERNOON', 'WITH', 'OLD', 'CLOTHES', 'ON', 'AND', 'HAVE', 'A', 'REGULAR', 'HOUSE', 'CLEANING'] +6930-76324-0012-33: hyp=['OR', 'COME', 'IN', 'HERE', 'THIS', 'AFTERNOON', 'WITH', 'OLD', 'CLOTHES', 'ON', 'AND', 'HALF', 'A', 'REGULAR', 'HOUSE', 'CLEANING'] +6930-76324-0013-34: ref=['IT', "CAN'T", 'HURT', 'ANYTHING', "I'M", 'SURE', 'FOR', 'WE', "WON'T", 'DISTURB', 'THINGS', 'AT', 'ALL'] +6930-76324-0013-34: hyp=['YOU', "CAN'T", 'HURT', 'ANYTHING', "I'M", 'SURE', 'FOR', 'WE', "WON'T", 'DISTURB', 'THINGS', 'AT', 'ALL'] +6930-76324-0014-35: ref=['THIS', 'THOUGHT', 'HOWEVER', 'DID', 'NOT', 'ENTER', 'THE', 'HEADS', 'OF', 'THE', 'ENTHUSIASTIC', 'PAIR'] +6930-76324-0014-35: hyp=['THIS', 'THOUGHT', 'HOWEVER', 'DID', 'NOT', 'ENTER', 'THE', 'HEADS', 'OF', 'THE', 'ENTHUSIASTIC', 'PAIR'] +6930-76324-0015-36: ref=['SMUGGLING', 'THE', 'HOUSE', 'CLEANING', 'PARAPHERNALIA', 'INTO', 'THE', 'CELLAR', 'WINDOW', 'UNOBSERVED', 'THAT', 'AFTERNOON', 'PROVED', 'NO', 'EASY', 'TASK', 'FOR', 'CYNTHIA', 'HAD', 'ADDED', 'A', 'WHISK', 'BROOM', 'AND', 'DUST', 'PAN', 'TO', 'THE', 'OUTFIT'] +6930-76324-0015-36: hyp=['SMUGGLING', 'IN', 'THE', 'HOUSE', 'CLEANING', 'PAIR', 'OF', 'ANALIA', 'INTO', 'THE', 'CELLAR', 'WINDOW', 'UNOBSERVED', 'THAT', 'AFTERNOON', 'PROVED', 'NO', 'EASY', 'TASK', 'FOR', 'CYNTHIA', 'HAD', 'ADDED', 'A', 'WHISK', 'BROOM', 'AND', 'DUST', 'PAN', 'TO', 'THE', 'OUTFIT'] +6930-76324-0016-37: ref=['THE', 'LURE', 'PROVED', 'TOO', 'MUCH', 'FOR', 'HIM', 'AND', 'HE', 'CAME', 'SPORTING', 'AFTER', 'IT', 'AS', 'FRISKILY', 'AS', 'A', 'YOUNG', 'KITTEN', 'MUCH', 'TO', "CYNTHIA'S", 'DELIGHT', 'WHEN', 'SHE', 'CAUGHT', 'SIGHT', 'OF', 'HIM'] +6930-76324-0016-37: hyp=['THE', 'LURE', 'PROVED', 'TOO', 'MUCH', 'FOR', 'HIM', 'AND', 'HE', 'CAME', 'SPORTING', 'AFTER', 'IT', 'AS', 'FRISKLY', 'AS', 'A', 'YOUNG', 'KITTEN', 'MUCH', 'TO', "CYNTHIA'S", 'DELIGHT', 'WHEN', 'SHE', 'CAUGHT', 'SIGHT', 'OF', 'HIM'] +6930-76324-0017-38: ref=['OH', 'LET', 'HIM', 'COME', 'ALONG', 'SHE', 'URGED', 'I', 'DO', 'LOVE', 'TO', 'SEE', 'HIM', 'ABOUT', 'THAT', 'OLD', 'HOUSE'] +6930-76324-0017-38: hyp=['OH', 'LET', 'HIM', 'COME', 'ALONG', 'SHE', 'URGED', 'I', 'DO', 'LOVE', 'TO', 'SEE', 'HIM', 'ABOUT', 'THAT', 'OLD', 'HOUSE'] +6930-76324-0018-39: ref=['HE', 'MAKES', 'IT', 'SORT', 'OF', 'COZIER'] +6930-76324-0018-39: hyp=['HE', 'MAKES', 'IT', 'SORT', 'OF', 'COSIER'] +6930-76324-0019-40: ref=['NOW', "LET'S", 'DUST', 'THE', 'FURNITURE', 'AND', 'PICTURES'] +6930-76324-0019-40: hyp=['NOW', 'ITS', 'DUST', 'THE', 'FURNITURE', 'AND', 'PICTURES'] +6930-76324-0020-41: ref=['YET', 'LITTLE', 'AS', 'IT', 'WAS', 'IT', 'HAD', 'ALREADY', 'MADE', 'A', 'VAST', 'DIFFERENCE', 'IN', 'THE', 'ASPECT', 'OF', 'THE', 'ROOM'] +6930-76324-0020-41: hyp=['YET', 'LITTLE', 'AS', 'IT', 'WAS', 'IT', 'HAD', 'ALREADY', 'MADE', 'A', 'VAST', 'DIFFERENCE', 'IN', 'THE', 'ASPECT', 'OF', 'THE', 'ROOM'] +6930-76324-0021-42: ref=['SURFACE', 'DUST', 'AT', 'LEAST', 'HAD', 'BEEN', 'REMOVED', 'AND', 'THE', 'FINE', 'OLD', 'FURNITURE', 'GAVE', 'A', 'HINT', 'OF', 'ITS', 'REAL', 'ELEGANCE', 'AND', 'POLISH'] +6930-76324-0021-42: hyp=['SURFACE', 'DUS', 'AT', 'LEAST', 'HAD', 'BEEN', 'REMOVED', 'AND', 'THE', 'FINE', 'OLD', 'FURNITURE', 'GAVE', 'A', 'HINT', 'OF', 'ITS', 'REAL', 'ELEGANCE', 'AND', 'POLISH'] +6930-76324-0022-43: ref=['THEN', 'SHE', 'SUDDENLY', 'REMARKED'] +6930-76324-0022-43: hyp=['THEN', 'SHE', 'SUDDENLY', 'REMARKED'] +6930-76324-0023-44: ref=['AND', 'MY', 'POCKET', 'MONEY', 'IS', 'GETTING', 'LOW', 'AGAIN', 'AND', 'YOU', "HAVEN'T", 'ANY', 'LEFT', 'AS', 'USUAL'] +6930-76324-0023-44: hyp=['AND', 'MY', 'POCKET', 'MONEY', 'IS', 'GETTING', 'LOW', 'AGAIN', 'AND', 'YOU', "HAVEN'T", 'ANY', 'LEFT', 'AS', 'USUAL'] +6930-76324-0024-45: ref=['THEY', 'SAY', 'ILLUMINATION', 'BY', 'CANDLE', 'LIGHT', 'IS', 'THE', 'PRETTIEST', 'IN', 'THE', 'WORLD'] +6930-76324-0024-45: hyp=['THEY', 'SAY', 'ILLUMINATION', 'BY', 'CANDLE', 'LIGHT', 'IS', 'THE', 'PRETTIEST', 'IN', 'THE', 'WORLD'] +6930-76324-0025-46: ref=['WHY', "IT'S", 'GOLIATH', 'AS', 'USUAL', 'THEY', 'BOTH', 'CRIED', 'PEERING', 'IN'] +6930-76324-0025-46: hyp=['WHY', 'IT', 'GOLIATH', 'AS', 'USUAL', 'THEY', 'BOTH', 'CRIED', 'PEERING', 'IN'] +6930-76324-0026-47: ref=["ISN'T", 'HE', 'THE', 'GREATEST', 'FOR', 'GETTING', 'INTO', 'ODD', 'CORNERS'] +6930-76324-0026-47: hyp=["ISN'T", 'HE', 'THE', 'GREATEST', 'FOR', 'GETTING', 'INTO', 'ODD', 'CORNERS'] +6930-76324-0027-48: ref=['FORGETTING', 'ALL', 'THEIR', 'WEARINESS', 'THEY', 'SEIZED', 'THEIR', 'CANDLES', 'AND', 'SCURRIED', 'THROUGH', 'THE', 'HOUSE', 'FINDING', 'AN', 'OCCASIONAL', 'PAPER', 'TUCKED', 'AWAY', 'IN', 'SOME', 'ODD', 'CORNER'] +6930-76324-0027-48: hyp=['FORGETTING', 'ALL', 'THEIR', 'WEARINESS', 'THEY', 'SEIZED', 'THEIR', 'CANDLES', 'AND', 'SCURRIED', 'THROUGH', 'THE', 'HOUSE', 'FINDING', 'ON', 'OCCASIONAL', 'PAPER', 'TUCKED', 'AWAY', 'IN', 'SOME', 'ODD', 'CORNER'] +6930-76324-0028-49: ref=['WELL', "I'M", 'CONVINCED', 'THAT', 'THE', 'BOARDED', 'UP', 'HOUSE', 'MYSTERY', 'HAPPENED', 'NOT', 'EARLIER', 'THAN', 'APRIL', 'SIXTEENTH', 'EIGHTEEN', 'SIXTY', 'ONE', 'AND', 'PROBABLY', 'NOT', 'MUCH', 'LATER'] +6930-76324-0028-49: hyp=['WELL', "I'M", 'CONVINCED', 'THAT', 'THE', 'BOARDED', 'UP', 'HOUSE', 'MYSTERY', 'HAPPENED', 'NOT', 'EARLIER', 'THAN', 'APRIL', 'SIXTEENTH', 'EIGHTEEN', 'SIXTY', 'ONE', 'AND', 'PROBABLY', 'NOT', 'MUCH', 'LATER'] +6930-81414-0000-50: ref=['NO', 'WORDS', 'WERE', 'SPOKEN', 'NO', 'LANGUAGE', 'WAS', 'UTTERED', 'SAVE', 'THAT', 'OF', 'WAILING', 'AND', 'HISSING', 'AND', 'THAT', 'SOMEHOW', 'WAS', 'INDISTINCT', 'AS', 'IF', 'IT', 'EXISTED', 'IN', 'FANCY', 'AND', 'NOT', 'IN', 'REALITY'] +6930-81414-0000-50: hyp=['NO', 'WORDS', 'WERE', 'SPOKEN', 'NO', 'LANGUAGE', 'WAS', 'UTTERED', 'SAVE', 'THAT', 'OF', 'WAILING', 'AND', 'HISSING', 'AND', 'THAT', 'SOMEHOW', 'WAS', 'INDISTINCT', 'AS', 'IF', 'IT', 'EXISTED', 'IN', 'FANCY', 'AND', 'NOT', 'IN', 'REALITY'] +6930-81414-0001-51: ref=['I', 'HEARD', 'A', 'NOISE', 'BEHIND', 'I', 'TURNED', 'AND', 'SAW', 'KAFFAR', 'HIS', 'BLACK', 'EYES', 'SHINING', 'WHILE', 'IN', 'HIS', 'HAND', 'HE', 'HELD', 'A', 'GLEAMING', 'KNIFE', 'HE', 'LIFTED', 'IT', 'ABOVE', 'HIS', 'HEAD', 'AS', 'IF', 'TO', 'STRIKE', 'BUT', 'I', 'HAD', 'THE', 'STRENGTH', 'OF', 'TEN', 'MEN', 'AND', 'I', 'HURLED', 'HIM', 'FROM', 'ME'] +6930-81414-0001-51: hyp=['I', 'HEARD', 'A', 'NOISE', 'BEHIND', 'I', 'TURNED', 'AND', 'SAW', 'KAFFIR', 'HIS', 'BLACK', 'EYES', 'SHINING', 'WHILE', 'IN', 'HIS', 'HAND', 'HE', 'HELD', 'A', 'GLEAMING', 'KNIFE', 'HE', 'LIFTED', 'IT', 'ABOVE', 'HIS', 'HEAD', 'AS', 'IF', 'TO', 'STRIKE', 'BUT', 'I', 'HAD', 'THE', 'STRENGTH', 'OF', 'TEN', 'MEN', 'AND', 'I', 'HURLED', 'HIM', 'FROM', 'ME'] +6930-81414-0002-52: ref=['ONWARD', 'SAID', 'A', 'DISTANT', 'VOICE'] +6930-81414-0002-52: hyp=['ONWARD', 'SAID', 'A', 'DISTANT', 'VOICE'] +6930-81414-0003-53: ref=['NO', 'SOUND', 'BROKE', 'THE', 'STILLNESS', 'OF', 'THE', 'NIGHT'] +6930-81414-0003-53: hyp=['NO', 'SOUND', 'BROKE', 'THE', 'STILLNESS', 'OF', 'THE', 'NIGHT'] +6930-81414-0004-54: ref=['THE', 'STORY', 'OF', 'ITS', 'EVIL', 'INFLUENCE', 'CAME', 'BACK', 'TO', 'ME', 'AND', 'IN', 'MY', 'BEWILDERED', 'CONDITION', 'I', 'WONDERED', 'WHETHER', 'THERE', 'WAS', 'NOT', 'SOME', 'TRUTH', 'IN', 'WHAT', 'HAD', 'BEEN', 'SAID'] +6930-81414-0004-54: hyp=['THE', 'STORY', 'OF', 'ITS', 'EVIL', 'INFLUENCE', 'CAME', 'BACK', 'TO', 'ME', 'AND', 'IN', 'MY', 'BEWILDERED', 'CONDITION', 'I', 'WONDERED', 'WHETHER', 'THERE', 'WAS', 'NOT', 'SOME', 'TRUTH', 'IN', 'WHAT', 'HAD', 'BEEN', 'SAID'] +6930-81414-0005-55: ref=['WHAT', 'WAS', 'THAT'] +6930-81414-0005-55: hyp=['WHAT', 'WAS', 'THAT'] +6930-81414-0006-56: ref=['WHAT', 'THEN', 'A', 'HUMAN', 'HAND', 'LARGE', 'AND', 'SHAPELY', 'APPEARED', 'DISTINCTLY', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'POND'] +6930-81414-0006-56: hyp=['WHAT', 'THEN', 'A', 'HUMAN', 'HAND', 'LARGE', 'AND', 'SHABBLY', 'APPEARED', 'DISTINCTLY', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'POND'] +6930-81414-0007-57: ref=['NOTHING', 'MORE', 'NOT', 'EVEN', 'THE', 'WRIST', 'TO', 'WHICH', 'IT', 'MIGHT', 'BE', 'ATTACHED'] +6930-81414-0007-57: hyp=['NOTHING', 'MORE', 'NOT', 'EVEN', 'THE', 'WRIST', 'TO', 'WHICH', 'IT', 'MIGHT', 'BE', 'ATTACHED'] +6930-81414-0008-58: ref=['IT', 'DID', 'NOT', 'BECKON', 'OR', 'INDEED', 'MOVE', 'AT', 'ALL', 'IT', 'WAS', 'AS', 'STILL', 'AS', 'THE', 'HAND', 'OF', 'DEATH'] +6930-81414-0008-58: hyp=['IT', 'DID', 'NOT', 'BECKON', 'OR', 'INDEED', 'MOVE', 'AT', 'ALL', 'IT', 'WAS', 'AS', 'STILL', 'AS', 'THE', 'HAND', 'OF', 'DEATH'] +6930-81414-0009-59: ref=['I', 'AWOKE', 'TO', 'CONSCIOUSNESS', 'FIGHTING', 'AT', 'FIRST', 'IT', 'SEEMED', 'AS', 'IF', 'I', 'WAS', 'FIGHTING', 'WITH', 'A', 'PHANTOM', 'BUT', 'GRADUALLY', 'MY', 'OPPONENT', 'BECAME', 'MORE', 'REAL', 'TO', 'ME', 'IT', 'WAS', 'KAFFAR'] +6930-81414-0009-59: hyp=['I', 'AWOKE', 'TO', 'CONSCIOUSNESS', 'FIGHTING', 'AT', 'FIRST', 'IT', 'SEEMED', 'AS', 'IF', 'I', 'WAS', 'FIGHTING', 'WITH', 'THE', 'PHANTOM', 'BUT', 'GRADUALLY', 'MY', 'OPPONENT', 'BECAME', 'MORE', 'REAL', 'TO', 'ME', 'IT', 'WAS', 'KAFFIR'] +6930-81414-0010-60: ref=['A', 'SOUND', 'OF', 'VOICES', 'A', 'FLASH', 'OF', 'LIGHT'] +6930-81414-0010-60: hyp=['A', 'SOUND', 'OF', 'VOICES', 'A', 'FLASH', 'OF', 'LIGHT'] +6930-81414-0011-61: ref=['A', 'FEELING', 'OF', 'FREEDOM', 'AND', 'I', 'WAS', 'AWAKE', 'WHERE'] +6930-81414-0011-61: hyp=['A', 'FEELING', 'OF', 'FREEDOM', 'AND', 'I', 'WAS', 'AWAKE', 'WHERE'] +6930-81414-0012-62: ref=['SAID', 'ANOTHER', 'VOICE', 'WHICH', 'I', 'RECOGNIZED', 'AS', "VOLTAIRE'S", 'KAFFAR'] +6930-81414-0012-62: hyp=['SAID', 'ANOTHER', 'VOICE', 'WHICH', 'I', 'RECOGNIZED', 'AS', "VOLTAIRE'S", 'KAFFIR'] +6930-81414-0013-63: ref=['I', 'HAD', 'SCARCELY', 'KNOWN', 'WHAT', 'I', 'HAD', 'BEEN', 'SAYING', 'OR', 'DOING', 'UP', 'TO', 'THIS', 'TIME', 'BUT', 'AS', 'HE', 'SPOKE', 'I', 'LOOKED', 'AT', 'MY', 'HAND'] +6930-81414-0013-63: hyp=['I', 'HAD', 'SCARCELY', 'KNOWN', 'WHEN', 'I', 'HAD', 'BEEN', 'SAYING', 'OR', 'DOING', 'UP', 'TO', 'THIS', 'TIME', 'BUT', 'AS', 'HE', 'SPOKE', 'I', 'LOOKED', 'AT', 'MY', 'HAND'] +6930-81414-0014-64: ref=['IN', 'THE', 'LIGHT', 'OF', 'THE', 'MOON', 'I', 'SAW', 'A', 'KNIFE', 'RED', 'WITH', 'BLOOD', 'AND', 'MY', 'HAND', 'TOO', 'WAS', 'ALSO', 'DISCOLOURED'] +6930-81414-0014-64: hyp=['IN', 'THE', 'LIGHT', 'OF', 'THE', 'MOON', 'I', 'SAW', 'A', 'KNIFE', 'RED', 'WITH', 'BLOOD', 'AND', 'MY', 'HAND', 'TOO', 'WAS', 'ALSO', 'DISCOLORED'] +6930-81414-0015-65: ref=['I', 'DO', 'NOT', 'KNOW', 'I', 'AM', 'DAZED', 'BEWILDERED'] +6930-81414-0015-65: hyp=['I', 'DO', 'NOT', 'KNOW', 'I', 'AM', 'DAZED', 'BEWILDERED'] +6930-81414-0016-66: ref=['BUT', 'THAT', 'IS', "KAFFAR'S", 'KNIFE'] +6930-81414-0016-66: hyp=['BUT', 'THAT', 'IS', "KAFFIR'S", 'KNIFE'] +6930-81414-0017-67: ref=['I', 'KNOW', 'HE', 'HAD', 'IT', 'THIS', 'VERY', 'EVENING'] +6930-81414-0017-67: hyp=['I', 'KNOW', 'HE', 'HAD', 'IT', 'THIS', 'VERY', 'EVENING'] +6930-81414-0018-68: ref=['I', 'REMEMBER', 'SAYING', 'HAVE', 'WE', 'BEEN', 'TOGETHER'] +6930-81414-0018-68: hyp=['I', 'REMEMBERED', 'SAYING', 'HAVE', 'WE', 'BEEN', 'TOGETHER'] +6930-81414-0019-69: ref=['VOLTAIRE', 'PICKED', 'UP', 'SOMETHING', 'FROM', 'THE', 'GROUND', 'AND', 'LOOKED', 'AT', 'IT'] +6930-81414-0019-69: hyp=['OLD', 'CHAIR', 'PICKED', 'UP', 'SOMETHING', 'FROM', 'THE', 'GROUND', 'AND', 'LOOKED', 'AT', 'IT'] +6930-81414-0020-70: ref=['I', 'SAY', 'YOU', 'DO', 'KNOW', 'WHAT', 'THIS', 'MEANS', 'AND', 'YOU', 'MUST', 'TELL', 'US'] +6930-81414-0020-70: hyp=['I', 'SAY', 'YOU', 'DO', 'KNOW', 'WHAT', 'THIS', 'MEANS', 'AND', 'YOU', 'MUST', 'TELL', 'US'] +6930-81414-0021-71: ref=['A', 'TERRIBLE', 'THOUGHT', 'FLASHED', 'INTO', 'MY', 'MIND'] +6930-81414-0021-71: hyp=['A', 'TERRIBLE', 'THOUGHT', 'FLASHED', 'INTO', 'MY', 'MIND'] +6930-81414-0022-72: ref=['I', 'HAD', 'AGAIN', 'BEEN', 'ACTING', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'THIS', "MAN'S", 'POWER'] +6930-81414-0022-72: hyp=['I', 'HAD', 'AGAIN', 'BEEN', 'ACTING', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'THIS', "MAN'S", 'POWER'] +6930-81414-0023-73: ref=['PERCHANCE', 'TOO', "KAFFAR'S", 'DEATH', 'MIGHT', 'SERVE', 'HIM', 'IN', 'GOOD', 'STEAD'] +6930-81414-0023-73: hyp=['PERCHANCE', 'TOO', "KAFFIR'S", 'DEATH', 'MIGHT', 'SERVE', 'HIM', 'IN', 'GOOD', 'STEAD'] +6930-81414-0024-74: ref=['MY', 'TONGUE', 'REFUSED', 'TO', 'ARTICULATE', 'MY', 'POWER', 'OF', 'SPEECH', 'LEFT', 'ME'] +6930-81414-0024-74: hyp=['MY', 'TONGUE', 'REFUSED', 'TO', 'ARTICULATE', 'MY', 'POWER', 'OF', 'SPEECH', 'LAUGHED', 'ME'] +6930-81414-0025-75: ref=['MY', 'POSITION', 'WAS', 'TOO', 'TERRIBLE'] +6930-81414-0025-75: hyp=['MY', 'POSITION', 'WAS', 'TOO', 'TERRIBLE'] +6930-81414-0026-76: ref=['MY', 'OVERWROUGHT', 'NERVES', 'YIELDED', 'AT', 'LAST'] +6930-81414-0026-76: hyp=['MY', 'OVERWROUGHT', 'NERVES', 'YIELDED', 'AT', 'LAST'] +6930-81414-0027-77: ref=['FOR', 'SOME', 'TIME', 'AFTER', 'THAT', 'I', 'REMEMBERED', 'NOTHING', 'DISTINCTLY'] +6930-81414-0027-77: hyp=['FOR', 'SOME', 'TIME', 'AFTER', 'THAT', 'I', 'REMEMBERED', 'NOTHING', 'DISTINCTLY'] +7021-79730-0000-1399: ref=['THE', 'THREE', 'MODES', 'OF', 'MANAGEMENT'] +7021-79730-0000-1399: hyp=['THE', 'THREE', 'MODES', 'OF', 'MANAGEMENT'] +7021-79730-0001-1400: ref=['TO', 'SUPPOSE', 'THAT', 'THE', 'OBJECT', 'OF', 'THIS', 'WORK', 'IS', 'TO', 'AID', 'IN', 'EFFECTING', 'SUCH', 'A', 'SUBSTITUTION', 'AS', 'THAT', 'IS', 'ENTIRELY', 'TO', 'MISTAKE', 'ITS', 'NATURE', 'AND', 'DESIGN'] +7021-79730-0001-1400: hyp=['TO', 'SUPPOSE', 'THAT', 'THE', 'OBJECT', 'OF', 'THIS', 'WORK', 'IS', 'TO', 'AID', 'IN', 'EFFECTING', 'SUCH', 'A', 'SUBSTITUTION', 'AS', 'THAT', 'IS', 'ENTIRELY', 'TO', 'MISTAKE', 'ITS', 'NATURE', 'AND', 'DESIGN'] +7021-79730-0002-1401: ref=['BY', 'REASON', 'AND', 'AFFECTION'] +7021-79730-0002-1401: hyp=['BY', 'REASON', 'AND', 'AFFECTION'] +7021-79730-0003-1402: ref=['AS', 'THE', 'CHAISE', 'DRIVES', 'AWAY', 'MARY', 'STANDS', 'BEWILDERED', 'AND', 'PERPLEXED', 'ON', 'THE', 'DOOR', 'STEP', 'HER', 'MIND', 'IN', 'A', 'TUMULT', 'OF', 'EXCITEMENT', 'IN', 'WHICH', 'HATRED', 'OF', 'THE', 'DOCTOR', 'DISTRUST', 'AND', 'SUSPICION', 'OF', 'HER', 'MOTHER', 'DISAPPOINTMENT', 'VEXATION', 'AND', 'ILL', 'HUMOR', 'SURGE', 'AND', 'SWELL', 'AMONG', 'THOSE', 'DELICATE', 'ORGANIZATIONS', 'ON', 'WHICH', 'THE', 'STRUCTURE', 'AND', 'DEVELOPMENT', 'OF', 'THE', 'SOUL', 'SO', 'CLOSELY', 'DEPEND', 'DOING', 'PERHAPS', 'AN', 'IRREPARABLE', 'INJURY'] +7021-79730-0003-1402: hyp=['AS', 'THE', 'CHASE', 'DRIVES', 'AWAY', 'MARY', 'STANDS', 'BEWILDERED', 'AND', 'PERPLEXED', 'ON', 'THE', 'DOORSTEP', 'HER', 'MIND', 'IN', 'A', 'TUMULT', 'OF', 'EXCITEMENT', 'IN', 'WHICH', 'HATRED', 'OF', 'THE', 'DOCTOR', 'DISTRUST', 'AND', 'SUSPICION', 'OF', 'HER', 'MOTHER', 'DISAPPOINTMENT', 'VEXATION', 'AND', 'ILL', 'HUMOUR', 'SURGE', 'AND', 'SWELL', 'AMONG', 'THOSE', 'DELICATE', 'ORGANIZATIONS', 'ON', 'WHICH', 'THE', 'STRUCTURE', 'AND', 'DEVELOPMENT', 'OF', 'THE', 'SOUL', 'SO', 'CLOSELY', 'DEPEND', 'DOING', 'PERHAPS', 'AN', 'IRREPARABLE', 'INJURY'] +7021-79730-0004-1403: ref=['THE', 'MOTHER', 'AS', 'SOON', 'AS', 'THE', 'CHAISE', 'IS', 'SO', 'FAR', 'TURNED', 'THAT', 'MARY', 'CAN', 'NO', 'LONGER', 'WATCH', 'THE', 'EXPRESSION', 'OF', 'HER', 'COUNTENANCE', 'GOES', 'AWAY', 'FROM', 'THE', 'DOOR', 'WITH', 'A', 'SMILE', 'OF', 'COMPLACENCY', 'AND', 'SATISFACTION', 'UPON', 'HER', 'FACE', 'AT', 'THE', 'INGENUITY', 'AND', 'SUCCESS', 'OF', 'HER', 'LITTLE', 'ARTIFICE'] +7021-79730-0004-1403: hyp=['THE', 'MOTHER', 'AS', 'SOON', 'AS', 'THE', 'CHASE', 'IS', 'SO', 'FAR', 'TURNED', 'THAT', 'MARY', 'CAN', 'NO', 'LONGER', 'WATCH', 'THE', 'EXPRESSION', 'OF', 'HER', 'COUNTENANCE', 'GOES', 'AWAY', 'FROM', 'THE', 'DOOR', 'WITH', 'A', 'SMILE', 'OF', 'COMPLACENCY', 'AND', 'SATISFACTION', 'ON', 'HER', 'FACE', 'AT', 'THE', 'INGENUITY', 'AND', 'SUCCESS', 'OF', 'HER', 'LITTLE', 'ARTIFICE'] +7021-79730-0005-1404: ref=['SO', 'YOU', 'WILL', 'BE', 'A', 'GOOD', 'GIRL', 'I', 'KNOW', 'AND', 'NOT', 'MAKE', 'ANY', 'TROUBLE', 'BUT', 'WILL', 'STAY', 'AT', 'HOME', 'CONTENTEDLY', "WON'T", 'YOU'] +7021-79730-0005-1404: hyp=['SO', 'YOU', 'WILL', 'BE', 'A', 'GOOD', 'GIRL', 'I', 'KNOW', 'AND', 'NOT', 'MAKE', 'ANY', 'TROUBLE', 'BUT', 'WILL', 'STAY', 'AT', 'HOME', 'CONTENTEDLY', "WON'T", 'YOU'] +7021-79730-0006-1405: ref=['THE', 'MOTHER', 'IN', 'MANAGING', 'THE', 'CASE', 'IN', 'THIS', 'WAY', 'RELIES', 'PARTLY', 'ON', 'CONVINCING', 'THE', 'REASON', 'OF', 'THE', 'CHILD', 'AND', 'PARTLY', 'ON', 'AN', 'APPEAL', 'TO', 'HER', 'AFFECTION'] +7021-79730-0006-1405: hyp=['THE', 'MOTHER', 'IN', 'MANAGING', 'THE', 'CASE', 'IN', 'THIS', 'WAY', 'REALIZE', 'PARTLY', 'ON', 'CONVINCING', 'THE', 'REASON', 'OF', 'THE', 'CHILD', 'AND', 'PARTLY', 'ON', 'AN', 'APPEAL', 'TO', 'HER', 'AFFECTION'] +7021-79730-0007-1406: ref=['IF', 'YOU', 'SHOULD', 'NOT', 'BE', 'A', 'GOOD', 'GIRL', 'BUT', 'SHOULD', 'SHOW', 'SIGNS', 'OF', 'MAKING', 'US', 'ANY', 'TROUBLE', 'I', 'SHALL', 'HAVE', 'TO', 'SEND', 'YOU', 'OUT', 'SOMEWHERE', 'TO', 'THE', 'BACK', 'PART', 'OF', 'THE', 'HOUSE', 'UNTIL', 'WE', 'ARE', 'GONE'] +7021-79730-0007-1406: hyp=['IF', 'YOU', 'SHOULD', 'NOT', 'BE', 'A', 'GOOD', 'GIRL', 'BUT', 'SHOULD', 'SHOW', 'SIGNS', 'OF', 'MAKING', 'US', 'ANY', 'TROUBLE', 'I', 'SHALL', 'HAVE', 'TO', 'SEND', 'YOU', 'OUT', 'SOMEWHERE', 'TO', 'THE', 'BACK', 'PART', 'OF', 'THE', 'HOUSE', 'UNTIL', 'WE', 'ARE', 'GONE'] +7021-79730-0008-1407: ref=['BUT', 'THIS', 'LAST', 'SUPPOSITION', 'IS', 'ALMOST', 'ALWAYS', 'UNNECESSARY', 'FOR', 'IF', 'MARY', 'HAS', 'BEEN', 'HABITUALLY', 'MANAGED', 'ON', 'THIS', 'PRINCIPLE', 'SHE', 'WILL', 'NOT', 'MAKE', 'ANY', 'TROUBLE'] +7021-79730-0008-1407: hyp=['BUT', 'THIS', 'LAST', 'SUPPOSITION', 'IS', 'ALMOST', 'ALWAYS', 'UNNECESSARY', 'FOR', 'IF', 'MARY', 'HAS', 'BEEN', 'HABITUALLY', 'MANAGED', 'ON', 'THIS', 'PRINCIPLE', 'SHE', 'WILL', 'NOT', 'MAKE', 'ANY', 'TROUBLE'] +7021-79730-0009-1408: ref=['IT', 'IS', 'INDEED', 'TRUE', 'THAT', 'THE', 'IMPORTANCE', 'OF', 'TACT', 'AND', 'SKILL', 'IN', 'THE', 'TRAINING', 'OF', 'THE', 'YOUNG', 'AND', 'OF', 'CULTIVATING', 'THEIR', 'REASON', 'AND', 'SECURING', 'THEIR', 'AFFECTION', 'CAN', 'NOT', 'BE', 'OVERRATED'] +7021-79730-0009-1408: hyp=['IT', 'IS', 'INDEED', 'TRUE', 'THAT', 'THE', 'IMPORTANCE', 'OF', 'TACT', 'AND', 'SKILL', 'IN', 'THE', 'TRAINING', 'OF', 'THE', 'YOUNG', 'AND', 'OF', 'CULTIVATING', 'THEIR', 'REASON', 'AND', 'SECURING', 'THEIR', 'AFFECTION', 'CANNOT', 'BE', 'OVERRATED'] +7021-79740-0000-1384: ref=['TO', 'SUCH', 'PERSONS', 'THESE', 'INDIRECT', 'MODES', 'OF', 'TRAINING', 'CHILDREN', 'IN', 'HABITS', 'OF', 'SUBORDINATION', 'TO', 'THEIR', 'WILL', 'OR', 'RATHER', 'OF', 'YIELDING', 'TO', 'THEIR', 'INFLUENCE', 'ARE', 'SPECIALLY', 'USEFUL'] +7021-79740-0000-1384: hyp=['TO', 'SUCH', 'PERSONS', 'THESE', 'INDIRECT', 'MODES', 'OF', 'TRAINING', 'CHILDREN', 'IN', 'HABITS', 'OF', 'SUBORDINATION', 'TO', 'THEIR', 'WILL', 'OR', 'RATHER', 'OF', 'YIELDING', 'TO', 'THEIR', 'INFLUENCE', 'ARE', 'SPECIALLY', 'USEFUL'] +7021-79740-0001-1385: ref=['DELLA', 'HAD', 'A', 'YOUNG', 'SISTER', 'NAMED', 'MARIA', 'AND', 'A', 'COUSIN', 'WHOSE', 'NAME', 'WAS', 'JANE'] +7021-79740-0001-1385: hyp=['DELLA', 'HAD', 'A', 'YOUNG', 'SISTER', 'NAMED', 'MARIA', 'AND', 'A', 'COUSIN', 'WHOSE', 'NAME', 'WAS', 'JANE'] +7021-79740-0002-1386: ref=['NOW', 'DELIA', 'CONTRIVED', 'TO', 'OBTAIN', 'A', 'GREAT', 'INFLUENCE', 'AND', 'ASCENDENCY', 'OVER', 'THE', 'MINDS', 'OF', 'THE', 'CHILDREN', 'BY', 'MEANS', 'OF', 'THESE', 'DOLLS'] +7021-79740-0002-1386: hyp=['NOW', 'GELIA', 'CONTRIVED', 'TO', 'OBTAIN', 'A', 'GREAT', 'INFLUENCE', 'AND', 'ASCENDANCY', 'OVER', 'THE', 'MINDS', 'OF', 'THE', 'CHILDREN', 'BY', 'MEANS', 'OF', 'THESE', 'DOLLS'] +7021-79740-0003-1387: ref=['TO', 'GIVE', 'AN', 'IDEA', 'OF', 'THESE', 'CONVERSATIONS', 'I', 'WILL', 'REPORT', 'ONE', 'OF', 'THEM', 'IN', 'FULL'] +7021-79740-0003-1387: hyp=['TO', 'GIVE', 'AN', 'IDEA', 'OF', 'THESE', 'CONVERSATIONS', 'I', 'WILL', 'REPORT', 'ONE', 'OF', 'THEM', 'IN', 'FULL'] +7021-79740-0004-1388: ref=['YOU', 'HAVE', 'COME', 'ANDELLA', 'ANDELLA', 'WAS', 'THE', 'NAME', 'OF', "JANE'S", 'DOLL', 'TO', 'MAKE', 'ROSALIE', 'A', 'VISIT'] +7021-79740-0004-1388: hyp=['YOU', 'HAVE', 'COME', 'AND', 'DELA', 'AND', 'DELLA', 'WAS', 'THE', 'NAME', 'OF', 'JANE', 'STALL', 'TO', 'MAKE', 'ROSALIE', 'A', 'VISIT'] +7021-79740-0005-1389: ref=['I', 'AM', 'VERY', 'GLAD'] +7021-79740-0005-1389: hyp=['I', 'AM', 'VERY', 'GLAD'] +7021-79740-0006-1390: ref=['I', 'EXPECT', 'YOU', 'HAVE', 'BEEN', 'A', 'VERY', 'GOOD', 'GIRL', 'ANDELLA', 'SINCE', 'YOU', 'WERE', 'HERE', 'LAST'] +7021-79740-0006-1390: hyp=['I', 'EXPECT', 'YOU', 'HAVE', 'BEEN', 'A', 'VERY', 'GOOD', 'GIRL', 'ANDELLA', 'SINCE', 'YOU', 'WERE', 'HERE', 'LAST'] +7021-79740-0007-1391: ref=['THEN', 'TURNING', 'TO', 'JANE', 'SHE', 'ASKED', 'IN', 'A', 'SOMEWHAT', 'ALTERED', 'TONE', 'HAS', 'SHE', 'BEEN', 'A', 'GOOD', 'GIRL', 'JANE'] +7021-79740-0007-1391: hyp=['THEN', 'TURNING', 'TO', 'JANE', 'SHE', 'ASKED', 'IN', 'A', 'SOMEWHAT', 'ALTERED', 'TONE', 'HAS', 'SHE', 'BEEN', 'A', 'GOOD', 'GIRL', 'JANE'] +7021-79740-0008-1392: ref=['FOR', 'INSTANCE', 'ONE', 'DAY', 'THE', 'CHILDREN', 'HAD', 'BEEN', 'PLAYING', 'UPON', 'THE', 'PIAZZA', 'WITH', 'BLOCKS', 'AND', 'OTHER', 'PLAYTHINGS', 'AND', 'FINALLY', 'HAD', 'GONE', 'INTO', 'THE', 'HOUSE', 'LEAVING', 'ALL', 'THE', 'THINGS', 'ON', 'THE', 'FLOOR', 'OF', 'THE', 'PIAZZA', 'INSTEAD', 'OF', 'PUTTING', 'THEM', 'AWAY', 'IN', 'THEIR', 'PLACES', 'AS', 'THEY', 'OUGHT', 'TO', 'HAVE', 'DONE'] +7021-79740-0008-1392: hyp=['FOR', 'INSTANCE', 'ONE', 'DAY', 'THE', 'CHILDREN', 'HAD', 'BEEN', 'PLAYING', 'UPON', 'THE', 'PIAZZA', 'WITH', 'BLOCKS', 'AND', 'OTHER', 'PLAYTHINGS', 'AND', 'FINALLY', 'HAD', 'GONE', 'INTO', 'THE', 'HOUSE', 'LEAVING', 'ALL', 'THE', 'THINGS', 'ON', 'THE', 'FLOOR', 'OF', 'THE', 'PIAZZA', 'INSTEAD', 'OF', 'PUTTING', 'THEM', 'AWAY', 'IN', 'THEIR', 'PLACES', 'AS', 'THEY', 'OUGHT', 'TO', 'HAVE', 'DONE'] +7021-79740-0009-1393: ref=['THEY', 'WERE', 'NOW', 'PLAYING', 'WITH', 'THEIR', 'DOLLS', 'IN', 'THE', 'PARLOR'] +7021-79740-0009-1393: hyp=['THEY', 'WERE', 'NOW', 'PLAYING', 'WITH', 'THEIR', 'DOLLS', 'IN', 'THE', 'PARLOUR'] +7021-79740-0010-1394: ref=['DELIA', 'CAME', 'TO', 'THE', 'PARLOR', 'AND', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'MYSTERY', 'BECKONED', 'THE', 'CHILDREN', 'ASIDE', 'AND', 'SAID', 'TO', 'THEM', 'IN', 'A', 'WHISPER', 'LEAVE', 'ANDELLA', 'AND', 'ROSALIE', 'HERE', 'AND', "DON'T", 'SAY', 'A', 'WORD', 'TO', 'THEM'] +7021-79740-0010-1394: hyp=['DELHIA', 'CAME', 'TO', 'THE', 'PARLOUR', 'AND', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'MYSTERY', 'BECKONED', 'THE', 'CHILDREN', 'ASIDE', 'AND', 'SAID', 'TO', 'THEM', 'IN', 'A', 'WHISPER', 'LEAVE', 'AND', 'DELLA', 'AND', 'ROSALIE', 'HERE', 'AND', "DON'T", 'SAY', 'A', 'WORD', 'TO', 'THEM'] +7021-79740-0011-1395: ref=['SO', 'SAYING', 'SHE', 'LED', 'THE', 'WAY', 'ON', 'TIPTOE', 'FOLLOWED', 'BY', 'THE', 'CHILDREN', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'ROUND', 'BY', 'A', 'CIRCUITOUS', 'ROUTE', 'TO', 'THE', 'PIAZZA', 'THERE'] +7021-79740-0011-1395: hyp=['SO', 'SAYING', 'SHE', 'LED', 'THE', 'WAY', 'ON', 'TIPTOE', 'FOLLOWED', 'BY', 'THE', 'CHILDREN', 'OUT', 'OF', 'THE', 'ROOM', 'AND', 'ROUND', 'BY', 'A', 'CIRCUITOUS', 'ROUTE', 'TO', 'THE', 'PIAZZA', 'THERE'] +7021-79740-0012-1396: ref=['SAID', 'SHE', 'POINTING', 'TO', 'THE', 'PLAYTHINGS', 'SEE'] +7021-79740-0012-1396: hyp=['SAID', 'SHE', 'POINTING', 'TO', 'THE', 'PLAYTHINGS', 'SEE'] +7021-79740-0013-1397: ref=['PUT', 'THESE', 'PLAYTHINGS', 'ALL', 'AWAY', 'QUICK', 'AND', 'CAREFULLY', 'AND', 'WE', 'WILL', 'NOT', 'LET', 'THEM', 'KNOW', 'ANY', 'THING', 'ABOUT', 'YOUR', 'LEAVING', 'THEM', 'OUT'] +7021-79740-0013-1397: hyp=['PUT', 'THESE', 'PLAYTHINGS', 'ALL', 'AWAY', 'QUICK', 'AND', 'CAREFULLY', 'AND', 'WE', 'WILL', 'NOT', 'LET', 'THEM', 'KNOW', 'ANYTHING', 'ABOUT', 'YOUR', 'LEAVING', 'THEM', 'OUT'] +7021-79740-0014-1398: ref=['AND', 'THIS', 'METHOD', 'OF', 'TREATING', 'THE', 'CASE', 'WAS', 'MUCH', 'MORE', 'EFFECTUAL', 'IN', 'MAKING', 'THEM', 'DISPOSED', 'TO', 'AVOID', 'COMMITTING', 'A', 'SIMILAR', 'FAULT', 'ANOTHER', 'TIME', 'THAN', 'ANY', 'DIRECT', 'REBUKES', 'OR', 'EXPRESSIONS', 'OF', 'DISPLEASURE', 'ADDRESSED', 'PERSONALLY', 'TO', 'THEM', 'WOULD', 'HAVE', 'BEEN'] +7021-79740-0014-1398: hyp=['AND', 'THIS', 'METHOD', 'OF', 'TREATING', 'THE', 'CASE', 'WAS', 'MUCH', 'MORE', 'EFFECTUAL', 'IN', 'MAKING', 'THEM', 'DISPOSED', 'TO', 'AVOID', 'COMMITTING', 'A', 'SIMILAR', 'FAULT', 'ANOTHER', 'TIME', 'THAN', 'ANY', 'DIRECT', 'REBUKES', 'OR', 'EXPRESSIONS', 'OF', 'DISPLEASURE', 'ADDRESSED', 'PERSONALLY', 'TO', 'THEM', 'WOULD', 'HAVE', 'BEEN'] +7021-79759-0000-1378: ref=['NATURE', 'OF', 'THE', 'EFFECT', 'PRODUCED', 'BY', 'EARLY', 'IMPRESSIONS'] +7021-79759-0000-1378: hyp=['NATURE', 'OF', 'THE', 'EFFECT', 'PRODUCED', 'BY', 'EARLY', 'IMPRESSIONS'] +7021-79759-0001-1379: ref=['THAT', 'IS', 'COMPARATIVELY', 'NOTHING'] +7021-79759-0001-1379: hyp=['THAT', 'IS', 'COMPARATIVELY', 'NOTHING'] +7021-79759-0002-1380: ref=['THEY', 'ARE', 'CHIEFLY', 'FORMED', 'FROM', 'COMBINATIONS', 'OF', 'THE', 'IMPRESSIONS', 'MADE', 'IN', 'CHILDHOOD'] +7021-79759-0002-1380: hyp=['THEY', 'ARE', 'CHIEFLY', 'FORMED', 'FROM', 'COMBINATIONS', 'OF', 'THE', 'IMPRESSIONS', 'MADE', 'IN', 'CHILDHOOD'] +7021-79759-0003-1381: ref=['VAST', 'IMPORTANCE', 'AND', 'INFLUENCE', 'OF', 'THIS', 'MENTAL', 'FURNISHING'] +7021-79759-0003-1381: hyp=['VAST', 'IMPORTANCE', 'AND', 'INFLUENCE', 'OF', 'THIS', 'MENTAL', 'FURNISHING'] +7021-79759-0004-1382: ref=['WITHOUT', 'GOING', 'TO', 'ANY', 'SUCH', 'EXTREME', 'AS', 'THIS', 'WE', 'CAN', 'EASILY', 'SEE', 'ON', 'REFLECTION', 'HOW', 'VAST', 'AN', 'INFLUENCE', 'ON', 'THE', 'IDEAS', 'AND', 'CONCEPTIONS', 'AS', 'WELL', 'AS', 'ON', 'THE', 'PRINCIPLES', 'OF', 'ACTION', 'IN', 'MATURE', 'YEARS', 'MUST', 'BE', 'EXERTED', 'BY', 'THE', 'NATURE', 'AND', 'CHARACTER', 'OF', 'THE', 'IMAGES', 'WHICH', 'THE', 'PERIOD', 'OF', 'INFANCY', 'AND', 'CHILDHOOD', 'IMPRESSES', 'UPON', 'THE', 'MIND'] +7021-79759-0004-1382: hyp=['WITHOUT', 'GOING', 'TO', 'ANY', 'SUCH', 'EXTREME', 'AS', 'THIS', 'WE', 'CAN', 'EASILY', 'SEE', 'ON', 'REFLECTION', 'HOW', 'VAST', 'AN', 'INFLUENCE', 'ON', 'THE', 'IDEAS', 'AND', 'CONCEPTIONS', 'AS', 'WELL', 'AS', 'ON', 'THE', 'PRINCIPLES', 'OF', 'ACTION', 'AND', 'MATURE', 'YEARS', 'MUST', 'BE', 'EXERTED', 'BY', 'THE', 'NATURE', 'AND', 'CHARACTER', 'OF', 'THE', 'IMAGES', 'WHICH', 'THE', 'PERIOD', 'OF', 'INFANCY', 'AND', 'CHILDHOOD', 'IMPRESS', 'UPON', 'THE', 'MIND'] +7021-79759-0005-1383: ref=['THE', 'PAIN', 'PRODUCED', 'BY', 'AN', 'ACT', 'OF', 'HASTY', 'AND', 'ANGRY', 'VIOLENCE', 'TO', 'WHICH', 'A', 'FATHER', 'SUBJECTS', 'HIS', 'SON', 'MAY', 'SOON', 'PASS', 'AWAY', 'BUT', 'THE', 'MEMORY', 'OF', 'IT', 'DOES', 'NOT', 'PASS', 'AWAY', 'WITH', 'THE', 'PAIN'] +7021-79759-0005-1383: hyp=['THE', 'PAIN', 'PRODUCED', 'BY', 'AN', 'ACT', 'OF', 'HASTY', 'AND', 'ANGRY', 'VIOLENCE', 'TO', 'WHICH', 'A', 'FATHER', 'SUBJECTS', 'HIS', 'SON', 'MAY', 'SOON', 'PASS', 'AWAY', 'BUT', 'THE', 'MEMORY', 'OF', 'IT', 'DOES', 'NOT', 'PASS', 'AWAY', 'WITH', 'THE', 'PAIN'] +7021-85628-0000-1409: ref=['BUT', 'ANDERS', 'CARED', 'NOTHING', 'ABOUT', 'THAT'] +7021-85628-0000-1409: hyp=['BUT', 'ANDREWS', 'CARED', 'NOTHING', 'ABOUT', 'THAT'] +7021-85628-0001-1410: ref=['HE', 'MADE', 'A', 'BOW', 'SO', 'DEEP', 'THAT', 'HIS', 'BACK', 'CAME', 'NEAR', 'BREAKING', 'AND', 'HE', 'WAS', 'DUMBFOUNDED', 'I', 'CAN', 'TELL', 'YOU', 'WHEN', 'HE', 'SAW', 'IT', 'WAS', 'NOBODY', 'BUT', 'ANDERS'] +7021-85628-0001-1410: hyp=['HE', 'MADE', 'A', 'BOW', 'SO', 'DEEP', 'THAT', 'HIS', 'BACK', 'CAME', 'NEAR', 'BREAKING', 'AND', 'HE', 'WAS', 'DUMBFOUNDED', 'I', 'CAN', 'TELL', 'YOU', 'WHEN', 'HE', 'SAW', 'IT', 'WAS', 'NOBODY', 'BUT', "ANDREW'S"] +7021-85628-0002-1411: ref=['HE', 'WAS', 'SUCH', 'A', 'BIG', 'BOY', 'THAT', 'HE', 'WORE', 'HIGH', 'BOOTS', 'AND', 'CARRIED', 'A', 'JACK', 'KNIFE'] +7021-85628-0002-1411: hyp=['HE', 'WAS', 'SUCH', 'A', 'BIG', 'BOY', 'THAT', 'HE', 'WORE', 'HIGH', 'BOOTS', 'AND', 'CARRIED', 'A', 'JACK', 'KNIFE'] +7021-85628-0003-1412: ref=['NOW', 'THIS', 'KNIFE', 'WAS', 'A', 'SPLENDID', 'ONE', 'THOUGH', 'HALF', 'THE', 'BLADE', 'WAS', 'GONE', 'AND', 'THE', 'HANDLE', 'WAS', 'A', 'LITTLE', 'CRACKED', 'AND', 'ANDERS', 'KNEW', 'THAT', 'ONE', 'IS', 'ALMOST', 'A', 'MAN', 'AS', 'SOON', 'AS', 'ONE', 'HAS', 'A', 'JACK', 'KNIFE'] +7021-85628-0003-1412: hyp=['NOW', 'THIS', 'KNIFE', 'WAS', 'A', 'SPLENDID', 'ONE', 'THOUGH', 'HALF', 'THE', 'BLADE', 'WAS', 'GONE', 'AND', 'THE', 'HANDLE', 'WAS', 'A', 'LITTLE', 'CRACKED', 'AND', 'ANDREWS', 'KNEW', 'THAT', 'ONE', 'IS', 'ALMOST', 'A', 'MAN', 'AS', 'SOON', 'AS', 'ONE', 'HAS', 'A', 'JACKKNIFE'] +7021-85628-0004-1413: ref=['YES', 'WHY', 'NOT', 'THOUGHT', 'ANDERS'] +7021-85628-0004-1413: hyp=['YES', 'WHY', 'NOT', 'THOUGHT', 'ANDERS'] +7021-85628-0005-1414: ref=['SEEING', 'THAT', 'I', 'AM', 'SO', 'FINE', 'I', 'MAY', 'AS', 'WELL', 'GO', 'AND', 'VISIT', 'THE', 'KING'] +7021-85628-0005-1414: hyp=['SEEING', 'THAT', 'I', 'AM', 'SO', 'FINE', 'I', 'MAY', 'AS', 'WELL', 'GO', 'AND', 'VISIT', 'THE', 'KING'] +7021-85628-0006-1415: ref=['I', 'AM', 'GOING', 'TO', 'THE', 'COURT', 'BALL', 'ANSWERED', 'ANDERS'] +7021-85628-0006-1415: hyp=['I', 'AM', 'GOING', 'TO', 'THE', 'COURT', 'BALL', 'ANSWERED', 'ANDREWS'] +7021-85628-0007-1416: ref=['AND', 'SHE', 'TOOK', 'ANDERS', 'HAND', 'AND', 'WALKED', 'WITH', 'HIM', 'UP', 'THE', 'BROAD', 'MARBLE', 'STAIRS', 'WHERE', 'SOLDIERS', 'WERE', 'POSTED', 'AT', 'EVERY', 'THIRD', 'STEP', 'AND', 'THROUGH', 'THE', 'MAGNIFICENT', 'HALLS', 'WHERE', 'COURTIERS', 'IN', 'SILK', 'AND', 'VELVET', 'STOOD', 'BOWING', 'WHEREVER', 'HE', 'WENT'] +7021-85628-0007-1416: hyp=['AND', 'SHE', 'TOOK', "ANDER'S", 'HAND', 'AND', 'WALKED', 'WITH', 'HIM', 'UP', 'THE', 'BROAD', 'MARBLE', 'STAIRS', 'WHERE', 'SOLDIERS', 'WERE', 'POSTED', 'AT', 'EVERY', 'THIRD', 'STEP', 'AND', 'THROUGH', 'THE', 'MAGNIFICENT', 'HALLS', 'WHERE', 'COURTIERS', 'IN', 'SILK', 'AND', 'VELVET', 'STOOD', 'BOWING', 'WHEREVER', 'HE', 'WENT'] +7021-85628-0008-1417: ref=['FOR', 'LIKE', 'AS', 'NOT', 'THEY', 'MUST', 'HAVE', 'THOUGHT', 'HIM', 'A', 'PRINCE', 'WHEN', 'THEY', 'SAW', 'HIS', 'FINE', 'CAP'] +7021-85628-0008-1417: hyp=['FOR', 'LIKE', 'AS', 'NOT', 'THEY', 'MUST', 'HAVE', 'THOUGHT', 'HIM', 'A', 'PRINCE', 'WHEN', 'THEY', 'SAW', 'HIS', 'FINE', 'CAP'] +7021-85628-0009-1418: ref=['AT', 'THE', 'FARTHER', 'END', 'OF', 'THE', 'LARGEST', 'HALL', 'A', 'TABLE', 'WAS', 'SET', 'WITH', 'GOLDEN', 'CUPS', 'AND', 'GOLDEN', 'PLATES', 'IN', 'LONG', 'ROWS'] +7021-85628-0009-1418: hyp=['AT', 'THE', 'FARTHER', 'END', 'OF', 'THE', 'LARGEST', 'HALL', 'A', 'TABLE', 'WAS', 'SET', 'WITH', 'GOLDEN', 'CUPS', 'AND', 'GOLDEN', 'PLATES', 'IN', 'LONG', 'ROWS'] +7021-85628-0010-1419: ref=['ON', 'HUGE', 'SILVER', 'PLATTERS', 'WERE', 'PYRAMIDS', 'OF', 'TARTS', 'AND', 'CAKES', 'AND', 'RED', 'WINE', 'SPARKLED', 'IN', 'GLITTERING', 'DECANTERS'] +7021-85628-0010-1419: hyp=['ON', 'HUGE', 'SILVER', 'PLATTERS', 'WERE', 'PYRAMIDS', 'OF', 'TARTS', 'AND', 'CAKES', 'AND', 'RED', 'WINE', 'SPARKLED', 'IN', 'GLITTERING', 'DECANTERS'] +7021-85628-0011-1420: ref=['THE', 'PRINCESS', 'SAT', 'DOWN', 'UNDER', 'A', 'BLUE', 'CANOPY', 'WITH', 'BOUQUETS', 'OF', 'ROSES', 'AND', 'SHE', 'LET', 'ANDERS', 'SIT', 'IN', 'A', 'GOLDEN', 'CHAIR', 'BY', 'HER', 'SIDE'] +7021-85628-0011-1420: hyp=['THE', 'PRINCESS', 'SAT', 'DOWN', 'UNDER', 'A', 'BLUE', 'CANOPY', 'WITH', 'BOUQUETS', 'OF', 'ROSES', 'AND', 'SHE', 'LET', 'ANDREW', 'SIT', 'IN', 'A', 'GOLDEN', 'CHAIR', 'BY', 'HER', 'SIDE'] +7021-85628-0012-1421: ref=['BUT', 'YOU', 'MUST', 'NOT', 'EAT', 'WITH', 'YOUR', 'CAP', 'ON', 'YOUR', 'HEAD', 'SHE', 'SAID', 'AND', 'WAS', 'GOING', 'TO', 'TAKE', 'IT', 'OFF'] +7021-85628-0012-1421: hyp=['BUT', 'YOU', 'MUST', 'NOT', 'EAT', 'WITH', 'YOUR', 'CAP', 'ON', 'YOUR', 'HEAD', 'SHE', 'SAID', 'AND', 'WAS', 'GOING', 'TO', 'TAKE', 'IT', 'OFF'] +7021-85628-0013-1422: ref=['THE', 'PRINCESS', 'CERTAINLY', 'WAS', 'BEAUTIFUL', 'AND', 'HE', 'WOULD', 'HAVE', 'DEARLY', 'LIKED', 'TO', 'BE', 'KISSED', 'BY', 'HER', 'BUT', 'THE', 'CAP', 'WHICH', 'HIS', 'MOTHER', 'HAD', 'MADE', 'HE', 'WOULD', 'NOT', 'GIVE', 'UP', 'ON', 'ANY', 'CONDITION'] +7021-85628-0013-1422: hyp=['THE', 'PRINCESS', 'CERTAINLY', 'WAS', 'BEAUTIFUL', 'AND', 'HE', 'WOULD', 'HAVE', 'DEARLY', 'LIKED', 'TO', 'BE', 'KISSED', 'BY', 'HER', 'BUT', 'THE', 'CAP', 'WHICH', 'HIS', 'MOTHER', 'HAD', 'MADE', 'HE', 'WOULD', 'NOT', 'GIVE', 'UP', 'ON', 'ANY', 'CONDITION'] +7021-85628-0014-1423: ref=['HE', 'ONLY', 'SHOOK', 'HIS', 'HEAD'] +7021-85628-0014-1423: hyp=['HE', 'ONLY', 'SHOOK', 'HIS', 'HEAD'] +7021-85628-0015-1424: ref=['WELL', 'BUT', 'NOW', 'SAID', 'THE', 'PRINCESS', 'AND', 'SHE', 'FILLED', 'HIS', 'POCKETS', 'WITH', 'CAKES', 'AND', 'PUT', 'HER', 'OWN', 'HEAVY', 'GOLD', 'CHAIN', 'AROUND', 'HIS', 'NECK', 'AND', 'BENT', 'DOWN', 'AND', 'KISSED', 'HIM'] +7021-85628-0015-1424: hyp=['WELL', 'BUT', 'NOW', 'SAID', 'THE', 'PRINCESS', 'AND', 'SHE', 'FILLED', 'HIS', 'POCKETS', 'WITH', 'CAKES', 'AND', 'PUT', 'HER', 'OWN', 'HEAVY', 'GOLD', 'CHAIN', 'AROUND', 'HIS', 'NECK', 'AND', 'BENT', 'DOWN', 'AND', 'KISSED', 'HIM'] +7021-85628-0016-1425: ref=['THAT', 'IS', 'A', 'VERY', 'FINE', 'CAP', 'YOU', 'HAVE', 'HE', 'SAID'] +7021-85628-0016-1425: hyp=['THAT', 'IS', 'A', 'VERY', 'FINE', 'CAP', 'YOU', 'HAVE', 'HE', 'SAID'] +7021-85628-0017-1426: ref=['SO', 'IT', 'IS', 'SAID', 'ANDERS'] +7021-85628-0017-1426: hyp=['SO', 'IT', 'IS', 'SAID', 'ANDREWS'] +7021-85628-0018-1427: ref=['AND', 'IT', 'IS', 'MADE', 'OF', "MOTHER'S", 'BEST', 'YARN', 'AND', 'SHE', 'KNITTED', 'IT', 'HERSELF', 'AND', 'EVERYBODY', 'WANTS', 'TO', 'GET', 'IT', 'AWAY', 'FROM', 'ME'] +7021-85628-0018-1427: hyp=['AND', 'IT', 'IS', 'MADE', 'OF', "MOTHER'S", 'BEST', 'YARN', 'AND', 'SHE', 'KNITTED', 'IT', 'HERSELF', 'AND', 'EVERYBODY', 'WANTS', 'TO', 'GET', 'IT', 'AWAY', 'FROM', 'ME'] +7021-85628-0019-1428: ref=['WITH', 'ONE', 'JUMP', 'ANDERS', 'GOT', 'OUT', 'OF', 'HIS', 'CHAIR'] +7021-85628-0019-1428: hyp=['WITH', 'ONE', 'JUMP', 'ANDREWS', 'GOT', 'OUT', 'OF', 'HIS', 'CHAIR'] +7021-85628-0020-1429: ref=['HE', 'DARTED', 'LIKE', 'AN', 'ARROW', 'THROUGH', 'ALL', 'THE', 'HALLS', 'DOWN', 'ALL', 'THE', 'STAIRS', 'AND', 'ACROSS', 'THE', 'YARD'] +7021-85628-0020-1429: hyp=['HE', 'DARTED', 'LIKE', 'AN', 'ARROW', 'THROUGH', 'ALL', 'THE', 'HALLS', 'DOWN', 'ALL', 'THE', 'STAIRS', 'AND', 'ACROSS', 'THE', 'YARD'] +7021-85628-0021-1430: ref=['HE', 'STILL', 'HELD', 'ON', 'TO', 'IT', 'WITH', 'BOTH', 'HANDS', 'AS', 'HE', 'RUSHED', 'INTO', 'HIS', "MOTHER'S", 'COTTAGE'] +7021-85628-0021-1430: hyp=['HE', 'STILL', 'HELD', 'ON', 'TO', 'IT', 'WITH', 'BOTH', 'HANDS', 'AS', 'HE', 'RUSHED', 'INTO', 'HIS', "MOTHER'S", 'COTTAGE'] +7021-85628-0022-1431: ref=['AND', 'ALL', 'HIS', 'BROTHERS', 'AND', 'SISTERS', 'STOOD', 'ROUND', 'AND', 'LISTENED', 'WITH', 'THEIR', 'MOUTHS', 'OPEN'] +7021-85628-0022-1431: hyp=['AND', 'ALL', 'HIS', 'BROTHERS', 'AND', 'SISTERS', 'STOOD', 'ROUND', 'AND', 'LISTENED', 'WITH', 'THEIR', 'MOUTHS', 'OPEN'] +7021-85628-0023-1432: ref=['BUT', 'WHEN', 'HIS', 'BIG', 'BROTHER', 'HEARD', 'THAT', 'HE', 'HAD', 'REFUSED', 'TO', 'GIVE', 'HIS', 'CAP', 'FOR', 'A', "KING'S", 'GOLDEN', 'CROWN', 'HE', 'SAID', 'THAT', 'ANDERS', 'WAS', 'A', 'STUPID'] +7021-85628-0023-1432: hyp=['BUT', 'WHEN', 'HIS', 'BIG', 'BROTHER', 'HEARD', 'THAT', 'HE', 'HAD', 'REFUSED', 'TO', 'GIVE', 'HIS', 'CAP', 'FOR', 'A', "KING'S", 'GOLDEN', 'CROWN', 'HE', 'SAID', 'THAT', 'ANDERS', 'WAS', 'A', 'STUPID'] +7021-85628-0024-1433: ref=['ANDERS', 'FACE', 'GREW', 'RED'] +7021-85628-0024-1433: hyp=["ANDREW'S", 'FACE', 'GREW', 'RED'] +7021-85628-0025-1434: ref=['BUT', 'HIS', 'MOTHER', 'HUGGED', 'HIM', 'CLOSE'] +7021-85628-0025-1434: hyp=['BUT', 'HIS', 'MOTHER', 'HUGGED', 'HIM', 'CLOSE'] +7021-85628-0026-1435: ref=['NO', 'MY', 'LITTLE', 'SON', 'SHE', 'SAID'] +7021-85628-0026-1435: hyp=['NO', 'MY', 'LITTLE', 'FUN', 'SHE', 'SAID'] +7021-85628-0027-1436: ref=['IF', 'YOU', 'DRESSED', 'IN', 'SILK', 'AND', 'GOLD', 'FROM', 'TOP', 'TO', 'TOE', 'YOU', 'COULD', 'NOT', 'LOOK', 'ANY', 'NICER', 'THAN', 'IN', 'YOUR', 'LITTLE', 'RED', 'CAP'] +7021-85628-0027-1436: hyp=['IF', 'YOU', 'DRESSED', 'IN', 'SILK', 'AND', 'GOLD', 'FROM', 'TOP', 'TO', 'TOE', 'YOU', 'COULD', 'NOT', 'LOOK', 'ANY', 'NICER', 'THAN', 'IN', 'YOUR', 'LITTLE', 'RED', 'CAP'] +7127-75946-0000-467: ref=['AT', 'THE', 'CONCLUSION', 'OF', 'THE', 'BANQUET', 'WHICH', 'WAS', 'SERVED', 'AT', 'FIVE', "O'CLOCK", 'THE', 'KING', 'ENTERED', 'HIS', 'CABINET', 'WHERE', 'HIS', 'TAILORS', 'WERE', 'AWAITING', 'HIM', 'FOR', 'THE', 'PURPOSE', 'OF', 'TRYING', 'ON', 'THE', 'CELEBRATED', 'COSTUME', 'REPRESENTING', 'SPRING', 'WHICH', 'WAS', 'THE', 'RESULT', 'OF', 'SO', 'MUCH', 'IMAGINATION', 'AND', 'HAD', 'COST', 'SO', 'MANY', 'EFFORTS', 'OF', 'THOUGHT', 'TO', 'THE', 'DESIGNERS', 'AND', 'ORNAMENT', 'WORKERS', 'OF', 'THE', 'COURT'] +7127-75946-0000-467: hyp=['AT', 'THE', 'CONCLUSION', 'OF', 'THE', 'BANQUET', 'WHICH', 'WAS', 'SERVED', 'AT', 'FIVE', "O'CLOCK", 'THE', 'KING', 'ENTERED', 'HIS', 'CABINET', 'WHERE', 'HIS', 'TAILORS', 'WERE', 'AWAITING', 'HIM', 'FOR', 'THE', 'PURPOSE', 'OF', 'TRYING', 'ON', 'THE', 'CELEBRATED', 'COSTUME', 'REPRESENTING', 'SPRING', 'WHICH', 'WAS', 'THE', 'RESULT', 'OF', 'SO', 'MUCH', 'IMAGINATION', 'AND', 'HAD', 'COST', 'SO', 'MANY', 'EFFORTS', 'OF', 'THOUGHT', 'TO', 'THE', 'DESIGNERS', 'AND', 'ORNAMENT', 'WORKERS', 'OF', 'THE', 'COURT'] +7127-75946-0001-468: ref=['AH', 'VERY', 'WELL'] +7127-75946-0001-468: hyp=['AH', 'VERY', 'WELL'] +7127-75946-0002-469: ref=['LET', 'HIM', 'COME', 'IN', 'THEN', 'SAID', 'THE', 'KING', 'AND', 'AS', 'IF', 'COLBERT', 'HAD', 'BEEN', 'LISTENING', 'AT', 'THE', 'DOOR', 'FOR', 'THE', 'PURPOSE', 'OF', 'KEEPING', 'HIMSELF', 'AU', 'COURANT', 'WITH', 'THE', 'CONVERSATION', 'HE', 'ENTERED', 'AS', 'SOON', 'AS', 'THE', 'KING', 'HAD', 'PRONOUNCED', 'HIS', 'NAME', 'TO', 'THE', 'TWO', 'COURTIERS'] +7127-75946-0002-469: hyp=['LET', 'HIM', 'COME', 'IN', 'THEN', 'SAID', 'THE', 'KING', 'AND', 'AS', 'IF', 'COLBERT', 'HAD', 'BEEN', 'LISTENING', 'AT', 'THE', 'DOOR', 'FOR', 'THE', 'PURPOSE', 'OF', 'KEEPING', 'HIMSELF', 'ACCURANT', 'WITH', 'THE', 'CONVERSATION', 'HE', 'ENTERED', 'AS', 'SOON', 'AS', 'THE', 'KING', 'HAD', 'PRONOUNCED', 'HIS', 'NAME', 'TO', 'THE', 'TWO', 'COURTIERS'] +7127-75946-0003-470: ref=['GENTLEMEN', 'TO', 'YOUR', 'POSTS', 'WHEREUPON', 'SAINT', 'AIGNAN', 'AND', 'VILLEROY', 'TOOK', 'THEIR', 'LEAVE'] +7127-75946-0003-470: hyp=['GENTLEMEN', 'TO', 'YOUR', 'POSTS', 'WHEREUPON', 'SAINT', 'ENG', 'YON', 'AND', 'VILLAIRY', 'TOOK', 'THEIR', 'LEAVE'] +7127-75946-0004-471: ref=['CERTAINLY', 'SIRE', 'BUT', 'I', 'MUST', 'HAVE', 'MONEY', 'TO', 'DO', 'THAT', 'WHAT'] +7127-75946-0004-471: hyp=['CERTAINLY', 'SIRE', 'BUT', 'I', 'MUST', 'HAVE', 'MONEY', 'TO', 'DO', 'THAT', 'WHAT'] +7127-75946-0005-472: ref=['WHAT', 'DO', 'YOU', 'MEAN', 'INQUIRED', 'LOUIS'] +7127-75946-0005-472: hyp=['WHAT', 'DO', 'YOU', 'MEAN', 'INQUIRED', 'LOUISE'] +7127-75946-0006-473: ref=['HE', 'HAS', 'GIVEN', 'THEM', 'WITH', 'TOO', 'MUCH', 'GRACE', 'NOT', 'TO', 'HAVE', 'OTHERS', 'STILL', 'TO', 'GIVE', 'IF', 'THEY', 'ARE', 'REQUIRED', 'WHICH', 'IS', 'THE', 'CASE', 'AT', 'THE', 'PRESENT', 'MOMENT'] +7127-75946-0006-473: hyp=['HE', 'HAS', 'GIVEN', 'THEM', 'WITH', 'TOO', 'MUCH', 'GRACE', 'NOT', 'TO', 'HAVE', 'OTHERS', 'STILL', 'TO', 'GIVE', 'IF', 'THEY', 'ARE', 'REQUIRED', 'WHICH', 'IS', 'THE', 'CASE', 'AT', 'THE', 'PRESENT', 'MOMENT'] +7127-75946-0007-474: ref=['IT', 'IS', 'NECESSARY', 'THEREFORE', 'THAT', 'HE', 'SHOULD', 'COMPLY', 'THE', 'KING', 'FROWNED'] +7127-75946-0007-474: hyp=['IT', 'IS', 'NECESSARY', 'THEREFORE', 'THAT', 'HE', 'SHOULD', 'COMPLY', 'THE', 'KING', 'FROWNED'] +7127-75946-0008-475: ref=['DOES', 'YOUR', 'MAJESTY', 'THEN', 'NO', 'LONGER', 'BELIEVE', 'THE', 'DISLOYAL', 'ATTEMPT'] +7127-75946-0008-475: hyp=['DOES', 'YOUR', 'MAJESTY', 'THEN', 'NO', 'LONGER', 'BELIEVE', 'THE', 'DISLOYAL', 'ATTEMPT'] +7127-75946-0009-476: ref=['NOT', 'AT', 'ALL', 'YOU', 'ARE', 'ON', 'THE', 'CONTRARY', 'MOST', 'AGREEABLE', 'TO', 'ME'] +7127-75946-0009-476: hyp=['NOT', 'AT', 'ALL', 'YOU', 'ARE', 'ON', 'THE', 'CONTRARY', 'MOST', 'AGREEABLE', 'TO', 'ME'] +7127-75946-0010-477: ref=['YOUR', "MAJESTY'S", 'PLAN', 'THEN', 'IN', 'THIS', 'AFFAIR', 'IS'] +7127-75946-0010-477: hyp=['YOUR', "MAJESTY'S", 'PLAN', 'THEN', 'IN', 'THIS', 'AFFAIR', 'IS'] +7127-75946-0011-478: ref=['YOU', 'WILL', 'TAKE', 'THEM', 'FROM', 'MY', 'PRIVATE', 'TREASURE'] +7127-75946-0011-478: hyp=['YOU', 'WILL', 'TAKE', 'THEM', 'FROM', 'MY', 'PRIVATE', 'TREASURE'] +7127-75946-0012-479: ref=['THE', 'NEWS', 'CIRCULATED', 'WITH', 'THE', 'RAPIDITY', 'OF', 'LIGHTNING', 'DURING', 'ITS', 'PROGRESS', 'IT', 'KINDLED', 'EVERY', 'VARIETY', 'OF', 'COQUETRY', 'DESIRE', 'AND', 'WILD', 'AMBITION'] +7127-75946-0012-479: hyp=['THE', 'NEWS', 'CIRCULATED', 'WITH', 'THE', 'RAPIDITY', 'OF', 'LIGHTNING', 'DURING', 'ITS', 'PROGRESS', 'IT', 'KINDLED', 'EVERY', 'VARIETY', 'OF', 'COQUETRY', 'DESIRE', 'AND', 'WILD', 'AMBITION'] +7127-75946-0013-480: ref=['THE', 'KING', 'HAD', 'COMPLETED', 'HIS', 'TOILETTE', 'BY', 'NINE', "O'CLOCK", 'HE', 'APPEARED', 'IN', 'AN', 'OPEN', 'CARRIAGE', 'DECORATED', 'WITH', 'BRANCHES', 'OF', 'TREES', 'AND', 'FLOWERS'] +7127-75946-0013-480: hyp=['THE', 'KING', 'HAD', 'COMPLETED', 'HIS', 'TOILET', 'BY', 'NINE', "O'CLOCK", 'HE', 'APPEARED', 'IN', 'AN', 'OPEN', 'CARRIAGE', 'DECORATED', 'WITH', 'BRANCHES', 'OF', 'TREES', 'AND', 'FLOWERS'] +7127-75946-0014-481: ref=['THE', 'QUEENS', 'HAD', 'TAKEN', 'THEIR', 'SEATS', 'UPON', 'A', 'MAGNIFICENT', 'DIAS', 'OR', 'PLATFORM', 'ERECTED', 'UPON', 'THE', 'BORDERS', 'OF', 'THE', 'LAKE', 'IN', 'A', 'THEATER', 'OF', 'WONDERFUL', 'ELEGANCE', 'OF', 'CONSTRUCTION'] +7127-75946-0014-481: hyp=['THE', 'QUEENS', 'HAD', 'TAKEN', 'THEIR', 'SEATS', 'UPON', 'A', 'MAGNIFICENT', 'DAIS', 'OR', 'PLATFORM', 'ERECTED', 'UPON', 'THE', 'BORDERS', 'OF', 'THE', 'LAKE', 'IN', 'A', 'THEATRE', 'OF', 'WONDERFUL', 'ELEGANCE', 'OF', 'CONSTRUCTION'] +7127-75946-0015-482: ref=['SUDDENLY', 'FOR', 'THE', 'PURPOSE', 'OF', 'RESTORING', 'PEACE', 'AND', 'ORDER', 'SPRING', 'ACCOMPANIED', 'BY', 'HIS', 'WHOLE', 'COURT', 'MADE', 'HIS', 'APPEARANCE'] +7127-75946-0015-482: hyp=['SUDDENLY', 'FOR', 'THE', 'PURPOSE', 'OF', 'RESTORING', 'PEACE', 'AND', 'ORDER', 'SPRANG', 'ACCOMPANIED', 'BY', 'HIS', 'WHOLE', 'COURT', 'MADE', 'HIS', 'APPEARANCE'] +7127-75946-0016-483: ref=['THE', 'SEASONS', 'ALLIES', 'OF', 'SPRING', 'FOLLOWED', 'HIM', 'CLOSELY', 'TO', 'FORM', 'A', 'QUADRILLE', 'WHICH', 'AFTER', 'MANY', 'WORDS', 'OF', 'MORE', 'OR', 'LESS', 'FLATTERING', 'IMPORT', 'WAS', 'THE', 'COMMENCEMENT', 'OF', 'THE', 'DANCE'] +7127-75946-0016-483: hyp=['THE', 'SEASONS', 'ALLIES', 'OF', 'SPRING', 'FOLLOWED', 'HIM', 'CLOSELY', 'TO', 'FORM', 'A', 'QUADRILLE', 'WHICH', 'AFTER', 'MANY', 'WORDS', 'OF', 'MORE', 'OR', 'LESS', 'FLATTERING', 'IMPORT', 'WAS', 'THE', 'COMMENCEMENT', 'OF', 'THE', 'DANCE'] +7127-75946-0017-484: ref=['HIS', 'LEGS', 'THE', 'BEST', 'SHAPED', 'AT', 'COURT', 'WERE', 'DISPLAYED', 'TO', 'GREAT', 'ADVANTAGE', 'IN', 'FLESH', 'COLORED', 'SILKEN', 'HOSE', 'OF', 'SILK', 'SO', 'FINE', 'AND', 'SO', 'TRANSPARENT', 'THAT', 'IT', 'SEEMED', 'ALMOST', 'LIKE', 'FLESH', 'ITSELF'] +7127-75946-0017-484: hyp=['HIS', 'LEGS', 'THE', 'BEST', 'SHAPED', 'AT', 'COURT', 'WERE', 'DISPLAYED', 'TO', 'GREAT', 'ADVANTAGE', 'IN', 'FLESH', 'COLORED', 'SILKEN', 'HOSE', 'A', 'SILK', 'SO', 'FINE', 'AND', 'SO', 'TRANSPARENT', 'THAT', 'IT', 'SEEMED', 'ALMOST', 'LIKE', 'FLESH', 'ITSELF'] +7127-75946-0018-485: ref=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'CARRIAGE', 'WHICH', 'RESEMBLED', 'THE', 'BUOYANT', 'MOVEMENTS', 'OF', 'AN', 'IMMORTAL', 'AND', 'HE', 'DID', 'NOT', 'DANCE', 'SO', 'MUCH', 'AS', 'SEEM', 'TO', 'SOAR', 'ALONG'] +7127-75946-0018-485: hyp=['THERE', 'WAS', 'SOMETHING', 'IN', 'HIS', 'CARRIAGE', 'WHICH', 'RESEMBLED', 'THE', 'BUOYANT', 'MOVEMENTS', 'OF', 'AN', 'IMMORTAL', 'AND', 'HE', 'DID', 'NOT', 'DANCE', 'SO', 'MUCH', 'AS', 'SEEMED', 'TO', 'SOAR', 'ALONG'] +7127-75946-0019-486: ref=['YES', 'IT', 'IS', 'SUPPRESSED'] +7127-75946-0019-486: hyp=['YES', 'IT', 'IS', 'SUPPRESSED'] +7127-75946-0020-487: ref=['FAR', 'FROM', 'IT', 'SIRE', 'YOUR', 'MAJESTY', 'HAVING', 'GIVEN', 'NO', 'DIRECTIONS', 'ABOUT', 'IT', 'THE', 'MUSICIANS', 'HAVE', 'RETAINED', 'IT'] +7127-75946-0020-487: hyp=['FAR', 'FROM', 'IT', 'SIRE', 'YOUR', 'MAJESTY', 'HEAVEN', 'GIVEN', 'NO', 'DIRECTIONS', 'ABOUT', 'IT', 'THE', 'MUSICIANS', 'HAVE', 'RETAINED', 'IT'] +7127-75946-0021-488: ref=['YES', 'SIRE', 'AND', 'READY', 'DRESSED', 'FOR', 'THE', 'BALLET'] +7127-75946-0021-488: hyp=['YES', 'SIRE', 'AND', 'READY', 'DRESSED', 'FOR', 'THE', 'BALLET'] +7127-75946-0022-489: ref=['SIRE', 'HE', 'SAID', 'YOUR', "MAJESTY'S", 'MOST', 'DEVOTED', 'SERVANT', 'APPROACHES', 'TO', 'PERFORM', 'A', 'SERVICE', 'ON', 'THIS', 'OCCASION', 'WITH', 'SIMILAR', 'ZEAL', 'THAT', 'HE', 'HAS', 'ALREADY', 'SHOWN', 'ON', 'THE', 'FIELD', 'OF', 'BATTLE'] +7127-75946-0022-489: hyp=['SIRE', 'HE', 'SAID', 'YOUR', "MAJESTY'S", 'MOST', 'DEVOTED', 'SERVANT', 'APPROACHES', 'TO', 'PERFORM', 'A', 'SERVICE', 'ON', 'THIS', 'OCCASION', 'WITH', 'SIMILAR', 'ZEAL', 'THAT', 'HE', 'HAS', 'ALREADY', 'SHOWN', 'ON', 'THE', 'FIELD', 'OF', 'BATTLE'] +7127-75946-0023-490: ref=['THE', 'KING', 'SEEMED', 'ONLY', 'PLEASED', 'WITH', 'EVERY', 'ONE', 'PRESENT'] +7127-75946-0023-490: hyp=['THE', 'KING', 'SEEMED', 'ONLY', 'PLEASED', 'WITH', 'EVERY', 'ONE', 'PRESENT'] +7127-75946-0024-491: ref=['MONSIEUR', 'WAS', 'THE', 'ONLY', 'ONE', 'WHO', 'DID', 'NOT', 'UNDERSTAND', 'ANYTHING', 'ABOUT', 'THE', 'MATTER'] +7127-75946-0024-491: hyp=['MONSIEUR', 'WAS', 'THE', 'ONLY', 'ONE', 'WHO', 'DID', 'NOT', 'UNDERSTAND', 'ANYTHING', 'ABOUT', 'THE', 'MATTER'] +7127-75946-0025-492: ref=['THE', 'BALLET', 'BEGAN', 'THE', 'EFFECT', 'WAS', 'MORE', 'THAN', 'BEAUTIFUL'] +7127-75946-0025-492: hyp=['THE', 'BALLET', 'BEGAN', 'THE', 'EFFECT', 'WAS', 'MORE', 'THAN', 'BEAUTIFUL'] +7127-75946-0026-493: ref=['WHEN', 'THE', 'MUSIC', 'BY', 'ITS', 'BURSTS', 'OF', 'MELODY', 'CARRIED', 'AWAY', 'THESE', 'ILLUSTRIOUS', 'DANCERS', 'WHEN', 'THE', 'SIMPLE', 'UNTUTORED', 'PANTOMIME', 'OF', 'THAT', 'PERIOD', 'ONLY', 'THE', 'MORE', 'NATURAL', 'ON', 'ACCOUNT', 'OF', 'THE', 'VERY', 'INDIFFERENT', 'ACTING', 'OF', 'THE', 'AUGUST', 'ACTORS', 'HAD', 'REACHED', 'ITS', 'CULMINATING', 'POINT', 'OF', 'TRIUMPH', 'THE', 'THEATER', 'SHOOK', 'WITH', 'TUMULTUOUS', 'APPLAUSE'] +7127-75946-0026-493: hyp=['WHEN', 'THE', 'MUSIC', 'BY', 'ITS', 'BURSTS', 'OF', 'MELODY', 'CARRIED', 'AWAY', 'THESE', 'ILLUSTRIOUS', 'DANCERS', 'WHEN', 'THIS', 'SIMPLE', 'UNTUTORED', 'PANTOMIME', 'OF', 'THAT', 'PERIOD', 'ONLY', 'THE', 'MORE', 'NATURAL', 'ON', 'ACCOUNT', 'OF', 'THE', 'VERY', 'INDIFFERENT', 'ACTING', 'OF', 'THE', 'AUGUST', 'ACTORS', 'HAD', 'REACHED', 'ITS', 'CULMINATING', 'POINT', 'OF', 'TRIUMPH', 'THE', 'THEATRE', 'SHOOK', 'WITH', 'TUMULTUOUS', 'APPLAUSE'] +7127-75946-0027-494: ref=['DISDAINFUL', 'OF', 'A', 'SUCCESS', 'OF', 'WHICH', 'MADAME', 'SHOWED', 'NO', 'ACKNOWLEDGEMENT', 'HE', 'THOUGHT', 'OF', 'NOTHING', 'BUT', 'BOLDLY', 'REGAINING', 'THE', 'MARKED', 'PREFERENCE', 'OF', 'THE', 'PRINCESS'] +7127-75946-0027-494: hyp=['DISDAINFUL', 'OF', 'A', 'SUCCESS', 'OF', 'WHICH', 'MADAME', 'SHOWED', 'NO', 'ACKNOWLEDGMENT', 'HE', 'THOUGHT', 'OF', 'NOTHING', 'BUT', 'BOLDLY', 'REGAINING', 'THE', 'MARKET', 'PREFERENCE', 'OF', 'THE', 'PRINCESS'] +7127-75946-0028-495: ref=['BY', 'DEGREES', 'ALL', 'HIS', 'HAPPINESS', 'ALL', 'HIS', 'BRILLIANCY', 'SUBSIDED', 'INTO', 'REGRET', 'AND', 'UNEASINESS', 'SO', 'THAT', 'HIS', 'LIMBS', 'LOST', 'THEIR', 'POWER', 'HIS', 'ARMS', 'HUNG', 'HEAVILY', 'BY', 'HIS', 'SIDES', 'AND', 'HIS', 'HEAD', 'DROOPED', 'AS', 'THOUGH', 'HE', 'WAS', 'STUPEFIED'] +7127-75946-0028-495: hyp=['BY', 'DEGREES', 'ALL', 'HIS', 'HAPPINESS', 'ALL', 'HIS', 'BRILLIANCY', 'SUBSIDED', 'INTO', 'REGRET', 'AND', 'UNEASINESS', 'SO', 'THAT', 'HIS', 'LIMBS', 'LOST', 'THEIR', 'POWER', 'HIS', 'ARMS', 'HUNG', 'HEAVILY', 'BY', 'HIS', 'SIDES', 'AND', 'HIS', 'HEAD', 'DROOPED', 'AS', 'THOUGH', 'HE', 'WAS', 'STUPEFIED'] +7127-75946-0029-496: ref=['THE', 'KING', 'WHO', 'HAD', 'FROM', 'THIS', 'MOMENT', 'BECOME', 'IN', 'REALITY', 'THE', 'PRINCIPAL', 'DANCER', 'IN', 'THE', 'QUADRILLE', 'CAST', 'A', 'LOOK', 'UPON', 'HIS', 'VANQUISHED', 'RIVAL'] +7127-75946-0029-496: hyp=['THE', 'KING', 'WHO', 'HAD', 'FROM', 'THIS', 'MOMENT', 'BECOME', 'IN', 'REALITY', 'THE', 'PRINCIPAL', 'DANCER', 'IN', 'THE', 'QUADRILLE', 'CAST', 'A', 'LOOK', 'UPON', 'HIS', 'VANQUISHED', 'RIVAL'] +7127-75947-0000-426: ref=['EVERY', 'ONE', 'COULD', 'OBSERVE', 'HIS', 'AGITATION', 'AND', 'PROSTRATION', 'A', 'PROSTRATION', 'WHICH', 'WAS', 'INDEED', 'THE', 'MORE', 'REMARKABLE', 'SINCE', 'PEOPLE', 'WERE', 'NOT', 'ACCUSTOMED', 'TO', 'SEE', 'HIM', 'WITH', 'HIS', 'ARMS', 'HANGING', 'LISTLESSLY', 'BY', 'HIS', 'SIDE', 'HIS', 'HEAD', 'BEWILDERED', 'AND', 'HIS', 'EYES', 'WITH', 'ALL', 'THEIR', 'BRIGHT', 'INTELLIGENCE', 'BEDIMMED'] +7127-75947-0000-426: hyp=['EVERY', 'ONE', 'COULD', 'OBSERVE', 'HIS', 'AGITATION', 'AND', 'PROSTRATION', 'A', 'PROSTRATION', 'WHICH', 'WAS', 'INDEED', 'THE', 'MORE', 'REMARKABLE', 'SINCE', 'PEOPLE', 'WERE', 'NOT', 'ACCUSTOMED', 'TO', 'SEE', 'HIM', 'WITH', 'HIS', 'ARMS', 'HANGING', 'LISTLESSLY', 'BY', 'HIS', 'SIDE', 'HIS', 'HEAD', 'BEWILDERED', 'AND', 'HIS', 'EYES', 'WITH', 'ALL', 'THEIR', 'BRIGHT', 'INTELLIGENCE', 'BE', 'DIMMED'] +7127-75947-0001-427: ref=['UPON', 'THIS', 'MADAME', 'DEIGNED', 'TO', 'TURN', 'HER', 'EYES', 'LANGUISHINGLY', 'TOWARDS', 'THE', 'COMTE', 'OBSERVING'] +7127-75947-0001-427: hyp=['UPON', 'THIS', 'MADAME', 'DEIGNED', 'TO', 'TURN', 'HER', 'EYES', 'LANGUISHINGLY', 'TOWARDS', 'THE', 'COMTE', 'OBSERVING'] +7127-75947-0002-428: ref=['DO', 'YOU', 'THINK', 'SO', 'SHE', 'REPLIED', 'WITH', 'INDIFFERENCE'] +7127-75947-0002-428: hyp=['DO', 'YOU', 'THINK', 'SO', 'SHE', 'REPLIED', 'WITH', 'INDIFFERENCE'] +7127-75947-0003-429: ref=['YES', 'THE', 'CHARACTER', 'WHICH', 'YOUR', 'ROYAL', 'HIGHNESS', 'ASSUMED', 'IS', 'IN', 'PERFECT', 'HARMONY', 'WITH', 'YOUR', 'OWN'] +7127-75947-0003-429: hyp=['YES', 'THE', 'CHARACTER', 'WHICH', 'YOU', 'ARE', 'ROYAL', 'HIGHNESS', 'ASSUMED', 'IS', 'IN', 'PERFECT', 'HARMONY', 'WITH', 'YOUR', 'OWN'] +7127-75947-0004-430: ref=['EXPLAIN', 'YOURSELF'] +7127-75947-0004-430: hyp=['EXPLAIN', 'YOURSELF'] +7127-75947-0005-431: ref=['I', 'ALLUDE', 'TO', 'THE', 'GODDESS'] +7127-75947-0005-431: hyp=['I', 'ALLUDE', 'TO', 'THE', 'GODDESS'] +7127-75947-0006-432: ref=['THE', 'PRINCESS', 'INQUIRED', 'NO'] +7127-75947-0006-432: hyp=['THE', 'PRINCESS', 'INQUIRED', 'NO'] +7127-75947-0007-433: ref=['SHE', 'THEN', 'ROSE', 'HUMMING', 'THE', 'AIR', 'TO', 'WHICH', 'SHE', 'WAS', 'PRESENTLY', 'GOING', 'TO', 'DANCE'] +7127-75947-0007-433: hyp=['SHE', 'THEN', 'ROSE', 'HUMMING', 'THE', 'AIR', 'TO', 'WHICH', 'SHE', 'WAS', 'PRESENTLY', 'GOING', 'TO', 'DANCE'] +7127-75947-0008-434: ref=['THE', 'ARROW', 'PIERCED', 'HIS', 'HEART', 'AND', 'WOUNDED', 'HIM', 'MORTALLY'] +7127-75947-0008-434: hyp=['THE', 'ARROW', 'PIERCED', 'HIS', 'HEART', 'AND', 'WOUNDED', 'HIM', 'MORTALLY'] +7127-75947-0009-435: ref=['A', 'QUARTER', 'OF', 'AN', 'HOUR', 'AFTERWARDS', 'HE', 'RETURNED', 'TO', 'THE', 'THEATER', 'BUT', 'IT', 'WILL', 'BE', 'READILY', 'BELIEVED', 'THAT', 'IT', 'WAS', 'ONLY', 'A', 'POWERFUL', 'EFFORT', 'OF', 'REASON', 'OVER', 'HIS', 'GREAT', 'EXCITEMENT', 'THAT', 'ENABLED', 'HIM', 'TO', 'GO', 'BACK', 'OR', 'PERHAPS', 'FOR', 'LOVE', 'IS', 'THUS', 'STRANGELY', 'CONSTITUTED', 'HE', 'FOUND', 'IT', 'IMPOSSIBLE', 'EVEN', 'TO', 'REMAIN', 'MUCH', 'LONGER', 'SEPARATED', 'FROM', 'THE', 'PRESENCE', 'OF', 'ONE', 'WHO', 'HAD', 'BROKEN', 'HIS', 'HEART'] +7127-75947-0009-435: hyp=['A', 'QUARTER', 'OF', 'AN', 'HOUR', 'AFTERWARDS', 'HE', 'RETURNED', 'TO', 'THE', 'THEATRE', 'BUT', 'IT', 'WILL', 'BE', 'READILY', 'BELIEVED', 'THAT', 'IT', 'WAS', 'ONLY', 'A', 'POWERFUL', 'EFFORT', 'OF', 'REASON', 'OVER', 'HIS', 'GREAT', 'EXCITEMENT', 'THAT', 'ENABLED', 'HIM', 'TO', 'GO', 'BACK', 'OR', 'PERHAPS', 'FOR', 'LOVE', 'IS', 'THUS', 'STRANGELY', 'CONSTITUTED', 'HE', 'FOUND', 'IT', 'IMPOSSIBLE', 'EVEN', 'TO', 'REMAIN', 'MUCH', 'LONGER', 'SEPARATED', 'FROM', 'THEIR', 'PRESENCE', 'OF', 'ONE', 'WHO', 'HAD', 'BROKEN', 'HIS', 'HEART'] +7127-75947-0010-436: ref=['WHEN', 'SHE', 'PERCEIVED', 'THE', 'YOUNG', 'MAN', 'SHE', 'ROSE', 'LIKE', 'A', 'WOMAN', 'SURPRISED', 'IN', 'THE', 'MIDST', 'OF', 'IDEAS', 'SHE', 'WAS', 'DESIROUS', 'OF', 'CONCEALING', 'FROM', 'HERSELF'] +7127-75947-0010-436: hyp=['WHEN', 'SHE', 'PERCEIVED', 'THE', 'YOUNG', 'MAN', 'SHE', 'ROSE', 'LIKE', 'A', 'WOMAN', 'SURPRISED', 'IN', 'THE', 'MIDST', 'OF', 'IDEAS', 'SHE', 'WAS', 'DESIROUS', 'OF', 'CONCEALING', 'FROM', 'HERSELF'] +7127-75947-0011-437: ref=['REMAIN', 'I', 'IMPLORE', 'YOU', 'THE', 'EVENING', 'IS', 'MOST', 'LOVELY'] +7127-75947-0011-437: hyp=['REMAIN', 'I', 'IMPLORE', 'YOU', 'THE', 'EVENING', 'IS', 'MOST', 'LOVELY'] +7127-75947-0012-438: ref=['INDEED', 'AH'] +7127-75947-0012-438: hyp=['INDEED', 'AH'] +7127-75947-0013-439: ref=['I', 'REMEMBER', 'NOW', 'AND', 'I', 'CONGRATULATE', 'MYSELF', 'DO', 'YOU', 'LOVE', 'ANY', 'ONE'] +7127-75947-0013-439: hyp=['I', 'REMEMBER', 'NOW', 'AND', 'I', 'CONGRATULATE', 'MYSELF', 'DO', 'YOU', 'LOVE', 'ANY', 'ONE'] +7127-75947-0014-440: ref=['FORGIVE', 'ME', 'I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'AM', 'SAYING', 'A', 'THOUSAND', 'TIMES', 'FORGIVE', 'ME', 'MADAME', 'WAS', 'RIGHT', 'QUITE', 'RIGHT', 'THIS', 'BRUTAL', 'EXILE', 'HAS', 'COMPLETELY', 'TURNED', 'MY', 'BRAIN'] +7127-75947-0014-440: hyp=['FORGIVE', 'ME', 'I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'AM', 'SAYING', 'A', 'THOUSAND', 'TIMES', 'FORGIVE', 'ME', 'MADAME', 'WAS', 'RIGHT', 'QUITE', 'RIGHT', 'THIS', 'BRUTAL', 'EXILE', 'HAS', 'COMPLETELY', 'TURNED', 'MY', 'BRAIN'] +7127-75947-0015-441: ref=['THERE', 'CANNOT', 'BE', 'A', 'DOUBT', 'HE', 'RECEIVED', 'YOU', 'KINDLY', 'FOR', 'IN', 'FACT', 'YOU', 'RETURNED', 'WITHOUT', 'HIS', 'PERMISSION'] +7127-75947-0015-441: hyp=['THERE', 'CANNOT', 'BE', 'A', 'DOUBT', 'HE', 'RECEIVED', 'YOU', 'KINDLY', 'FOR', 'IN', 'FACT', 'YOU', 'RETURNED', 'WITHOUT', 'HIS', 'PERMISSION'] +7127-75947-0016-442: ref=['OH', 'MADEMOISELLE', 'WHY', 'HAVE', 'I', 'NOT', 'A', 'DEVOTED', 'SISTER', 'OR', 'A', 'TRUE', 'FRIEND', 'SUCH', 'AS', 'YOURSELF'] +7127-75947-0016-442: hyp=['OH', 'MADEMOISELLE', 'WHY', 'HAVE', 'I', 'NOT', 'A', 'DEVOTED', 'SISTER', 'OR', 'A', 'TRUE', 'FRIEND', 'SUCH', 'AS', 'YOURSELF'] +7127-75947-0017-443: ref=['WHAT', 'ALREADY', 'HERE', 'THEY', 'SAID', 'TO', 'HER'] +7127-75947-0017-443: hyp=['WHAT', 'ALREADY', 'HERE', 'THEY', 'SAID', 'TO', 'HER'] +7127-75947-0018-444: ref=['I', 'HAVE', 'BEEN', 'HERE', 'THIS', 'QUARTER', 'OF', 'AN', 'HOUR', 'REPLIED', 'LA', 'VALLIERE'] +7127-75947-0018-444: hyp=['I', 'HAVE', 'BEEN', 'HERE', 'THIS', 'QUARTER', 'OF', 'AN', 'HOUR', 'REPLIED', 'LA', 'VALLIERS'] +7127-75947-0019-445: ref=['DID', 'NOT', 'THE', 'DANCING', 'AMUSE', 'YOU', 'NO'] +7127-75947-0019-445: hyp=['DID', 'NOT', 'THE', 'DANCING', 'AMUSE', 'YOU', 'NO'] +7127-75947-0020-446: ref=['NO', 'MORE', 'THAN', 'THE', 'DANCING'] +7127-75947-0020-446: hyp=['NO', 'MORE', 'THAN', 'THE', 'DANCING'] +7127-75947-0021-447: ref=['LA', 'VALLIERE', 'IS', 'QUITE', 'A', 'POETESS', 'SAID', 'TONNAY', 'CHARENTE'] +7127-75947-0021-447: hyp=['LA', 'VALLIERS', 'IS', 'QUITE', 'A', 'POETES', 'SAID', 'TONY', 'SCHERANT'] +7127-75947-0022-448: ref=['I', 'AM', 'A', 'WOMAN', 'AND', 'THERE', 'ARE', 'FEW', 'LIKE', 'ME', 'WHOEVER', 'LOVES', 'ME', 'FLATTERS', 'ME', 'WHOEVER', 'FLATTERS', 'ME', 'PLEASES', 'ME', 'AND', 'WHOEVER', 'PLEASES', 'WELL', 'SAID', 'MONTALAIS', 'YOU', 'DO', 'NOT', 'FINISH'] +7127-75947-0022-448: hyp=['I', 'AM', 'A', 'WOMAN', 'AND', 'THERE', 'ARE', 'FEW', 'LIKE', 'ME', 'WHOEVER', 'LOVES', 'ME', 'FLATTERS', 'ME', 'WHOEVER', 'FLATTERS', 'ME', 'PLEASES', 'ME', 'AND', 'WHOEVER', 'PLEASES', 'WELL', 'SAID', 'MONTALAIS', 'YOU', 'DO', 'NOT', 'FINISH'] +7127-75947-0023-449: ref=['IT', 'IS', 'TOO', 'DIFFICULT', 'REPLIED', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'LAUGHING', 'LOUDLY'] +7127-75947-0023-449: hyp=['IT', 'IS', 'TOO', 'DIFFICULT', 'REPLIED', 'MADEMOISELLE', 'DENISCHALANT', 'LAUGHING', 'LOUDLY'] +7127-75947-0024-450: ref=['LOOK', 'YONDER', 'DO', 'YOU', 'NOT', 'SEE', 'THE', 'MOON', 'SLOWLY', 'RISING', 'SILVERING', 'THE', 'TOPMOST', 'BRANCHES', 'OF', 'THE', 'CHESTNUTS', 'AND', 'THE', 'OAKS'] +7127-75947-0024-450: hyp=['LOOK', 'YONDER', 'DO', 'YOU', 'NOT', 'SEE', 'THE', 'MOON', 'SLOWLY', 'RISING', 'SILVERING', 'THE', 'TOPMOST', 'BRANCHES', 'OF', 'THE', 'CHESTNUTS', 'AND', 'THE', 'YOLKS'] +7127-75947-0025-451: ref=['EXQUISITE', 'SOFT', 'TURF', 'OF', 'THE', 'WOODS', 'THE', 'HAPPINESS', 'WHICH', 'YOUR', 'FRIENDSHIP', 'CONFERS', 'UPON', 'ME'] +7127-75947-0025-451: hyp=['EXQUISITE', 'SOFT', 'TURF', 'OF', 'THE', 'WOODS', 'THE', 'HAPPINESS', 'WHICH', 'YOUR', 'FRIENDSHIP', 'CONFERS', 'UPON', 'ME'] +7127-75947-0026-452: ref=['WELL', 'SAID', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'I', 'ALSO', 'THINK', 'A', 'GOOD', 'DEAL', 'BUT', 'I', 'TAKE', 'CARE'] +7127-75947-0026-452: hyp=['WELL', 'SAID', 'MADEMOISELLE', 'DETERNATION', 'I', 'ALSO', 'THINK', 'A', 'GOOD', 'DEAL', 'BUT', 'I', 'TAKE', 'CARE'] +7127-75947-0027-453: ref=['TO', 'SAY', 'NOTHING', 'SAID', 'MONTALAIS', 'SO', 'THAT', 'WHEN', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'THINKS', 'ATHENAIS', 'IS', 'THE', 'ONLY', 'ONE', 'WHO', 'KNOWS', 'IT'] +7127-75947-0027-453: hyp=['TO', 'SAY', 'NOTHING', 'SAID', 'MONTALAIS', 'SO', 'THAT', 'WHEN', 'MADEMOISELLE', 'DE', 'TOURNISHER', 'AUNT', 'THINKS', 'ETHNE', 'IS', 'THE', 'ONLY', 'ONE', 'WHO', 'KNOWS', 'IT'] +7127-75947-0028-454: ref=['QUICK', 'QUICK', 'THEN', 'AMONG', 'THE', 'HIGH', 'REED', 'GRASS', 'SAID', 'MONTALAIS', 'STOOP', 'ATHENAIS', 'YOU', 'ARE', 'SO', 'TALL'] +7127-75947-0028-454: hyp=['QUICK', 'QUICK', 'THEN', 'AMONG', 'THE', 'HIGH', 'REED', 'GRASS', 'SAID', 'MONTALAIS', 'STOOP', 'ETHINE', 'YOU', 'ARE', 'SO', 'TALL'] +7127-75947-0029-455: ref=['THE', 'YOUNG', 'GIRLS', 'HAD', 'INDEED', 'MADE', 'THEMSELVES', 'SMALL', 'INDEED', 'INVISIBLE'] +7127-75947-0029-455: hyp=['THE', 'YOUNG', 'GIRLS', 'HAD', 'INDEED', 'MADE', 'THEMSELVES', 'SMALL', 'INDEED', 'INVISIBLE'] +7127-75947-0030-456: ref=['SHE', 'WAS', 'HERE', 'JUST', 'NOW', 'SAID', 'THE', 'COUNT'] +7127-75947-0030-456: hyp=['SHE', 'WAS', 'HERE', 'JUST', 'NOW', 'SAID', 'THE', 'COUNT'] +7127-75947-0031-457: ref=['YOU', 'ARE', 'POSITIVE', 'THEN'] +7127-75947-0031-457: hyp=['YOU', 'ARE', 'POSITIVE', 'THEN'] +7127-75947-0032-458: ref=['YES', 'BUT', 'PERHAPS', 'I', 'FRIGHTENED', 'HER', 'IN', 'WHAT', 'WAY'] +7127-75947-0032-458: hyp=['YES', 'BUT', 'PERHAPS', 'I', 'FRIGHTENED', 'HER', 'AND', 'WHAT', 'WAY'] +7127-75947-0033-459: ref=['HOW', 'IS', 'IT', 'LA', 'VALLIERE', 'SAID', 'MADEMOISELLE', 'DE', 'TONNAY', 'CHARENTE', 'THAT', 'THE', 'VICOMTE', 'DE', 'BRAGELONNE', 'SPOKE', 'OF', 'YOU', 'AS', 'LOUISE'] +7127-75947-0033-459: hyp=['HOW', 'IS', 'IT', 'LA', 'VALLIERS', 'SAID', 'MADEMOISELLE', 'DETENNACHELANT', 'THAT', 'THE', 'VICOMTE', 'DE', 'BREG', 'ALONE', 'SPOKE', 'OF', 'YOU', 'AS', 'LOUISE'] +7127-75947-0034-460: ref=['IT', 'SEEMS', 'THE', 'KING', 'WILL', 'NOT', 'CONSENT', 'TO', 'IT'] +7127-75947-0034-460: hyp=['IT', 'SEEMS', 'THE', 'KING', 'WILL', 'NOT', 'CONSENT', 'TO', 'IT'] +7127-75947-0035-461: ref=['GOOD', 'GRACIOUS', 'HAS', 'THE', 'KING', 'ANY', 'RIGHT', 'TO', 'INTERFERE', 'IN', 'MATTERS', 'OF', 'THAT', 'KIND'] +7127-75947-0035-461: hyp=['GOOD', 'GRACIOUS', 'HAS', 'THE', 'KING', 'ANY', 'RIGHT', 'TO', 'INTERFERE', 'IN', 'MATTERS', 'OF', 'THAT', 'KIND'] +7127-75947-0036-462: ref=['I', 'GIVE', 'MY', 'CONSENT'] +7127-75947-0036-462: hyp=['I', 'GIVE', 'MY', 'CONSENT'] +7127-75947-0037-463: ref=['OH', 'I', 'AM', 'SPEAKING', 'SERIOUSLY', 'REPLIED', 'MONTALAIS', 'AND', 'MY', 'OPINION', 'IN', 'THIS', 'CASE', 'IS', 'QUITE', 'AS', 'GOOD', 'AS', 'THE', "KING'S", 'I', 'SUPPOSE', 'IS', 'IT', 'NOT', 'LOUISE'] +7127-75947-0037-463: hyp=['OH', 'I', 'AM', 'SPEAKING', 'SERIOUSLY', 'REPLIED', 'MONTALAIS', 'AND', 'MY', 'OPINION', 'IN', 'THIS', 'CASE', 'IS', 'QUITE', 'AS', 'GOOD', 'AS', 'THE', 'KING', 'AS', 'I', 'SUPPOSE', 'IS', 'IT', 'NOT', 'LOUISE'] +7127-75947-0038-464: ref=['LET', 'US', 'RUN', 'THEN', 'SAID', 'ALL', 'THREE', 'AND', 'GRACEFULLY', 'LIFTING', 'UP', 'THE', 'LONG', 'SKIRTS', 'OF', 'THEIR', 'SILK', 'DRESSES', 'THEY', 'LIGHTLY', 'RAN', 'ACROSS', 'THE', 'OPEN', 'SPACE', 'BETWEEN', 'THE', 'LAKE', 'AND', 'THE', 'THICKEST', 'COVERT', 'OF', 'THE', 'PARK'] +7127-75947-0038-464: hyp=['LET', 'US', 'RUN', 'THEN', 'SAID', 'ALL', 'THREE', 'AND', 'GRACEFULLY', 'LIFTING', 'UP', 'THE', 'LONG', 'SKIRTS', 'OF', 'THEIR', 'SILK', 'DRESSES', 'THEY', 'LIGHTLY', 'RAN', 'ACROSS', 'THE', 'OPEN', 'SPACE', 'BETWEEN', 'THE', 'LAKE', 'AND', 'THE', 'THICKEST', 'COVERT', 'OF', 'THE', 'PARK'] +7127-75947-0039-465: ref=['IN', 'FACT', 'THE', 'SOUND', 'OF', "MADAME'S", 'AND', 'THE', "QUEEN'S", 'CARRIAGES', 'COULD', 'BE', 'HEARD', 'IN', 'THE', 'DISTANCE', 'UPON', 'THE', 'HARD', 'DRY', 'GROUND', 'OF', 'THE', 'ROADS', 'FOLLOWED', 'BY', 'THE', 'MOUNTED', 'CAVALIERS'] +7127-75947-0039-465: hyp=['IN', 'FACT', 'THE', 'SOUND', 'OF', "MADAME'S", 'AND', 'THE', "QUEEN'S", 'CARRIAGES', 'COULD', 'BE', 'HEARD', 'IN', 'THE', 'DISTANCE', 'UPON', 'THE', 'HARD', 'DRY', 'GROUND', 'OF', 'THE', 'ROADS', 'FOLLOWED', 'BY', 'THE', 'MOUNTAIN', 'CAVALIERS'] +7127-75947-0040-466: ref=['IN', 'THIS', 'WAY', 'THE', 'FETE', 'OF', 'THE', 'WHOLE', 'COURT', 'WAS', 'A', 'FETE', 'ALSO', 'FOR', 'THE', 'MYSTERIOUS', 'INHABITANTS', 'OF', 'THE', 'FOREST', 'FOR', 'CERTAINLY', 'THE', 'DEER', 'IN', 'THE', 'BRAKE', 'THE', 'PHEASANT', 'ON', 'THE', 'BRANCH', 'THE', 'FOX', 'IN', 'ITS', 'HOLE', 'WERE', 'ALL', 'LISTENING'] +7127-75947-0040-466: hyp=['IN', 'THIS', 'WAY', 'THE', 'FETE', 'OF', 'THE', 'WHOLE', 'COURT', 'WAS', 'A', 'FETE', 'ALSO', 'FOR', 'THE', 'MYSTERIOUS', 'INHABITANTS', 'OF', 'THE', 'FOREST', 'FOR', 'CERTAINLY', 'THE', 'DEER', 'IN', 'THE', 'BRAKE', 'THE', 'PHEASANT', 'ON', 'THE', 'BRANCH', 'THE', 'FOX', 'IN', 'ITS', 'HOLE', 'WERE', 'ALL', 'LISTENING'] +7176-88083-0000-707: ref=['ALL', 'ABOUT', 'HIM', 'WAS', 'A', 'TUMULT', 'OF', 'BRIGHT', 'AND', 'BROKEN', 'COLOR', 'SCATTERED', 'IN', 'BROAD', 'SPLASHES'] +7176-88083-0000-707: hyp=['ALL', 'ABOUT', 'HIM', 'WAS', 'A', 'TUMULT', 'OF', 'BRIGHT', 'AND', 'BROKEN', 'COLOR', 'SCATTERED', 'AND', 'BROAD', 'SPLASHES'] +7176-88083-0001-708: ref=['THE', 'MERGANSER', 'HAD', 'A', 'CRESTED', 'HEAD', 'OF', 'IRIDESCENT', 'GREEN', 'BLACK', 'A', 'BROAD', 'COLLAR', 'OF', 'LUSTROUS', 'WHITE', 'BLACK', 'BACK', 'BLACK', 'AND', 'WHITE', 'WINGS', 'WHITE', 'BELLY', 'SIDES', 'FINELY', 'PENCILLED', 'IN', 'BLACK', 'AND', 'WHITE', 'AND', 'A', 'BREAST', 'OF', 'RICH', 'CHESTNUT', 'RED', 'STREAKED', 'WITH', 'BLACK'] +7176-88083-0001-708: hyp=['THE', 'MERGANCER', 'HAD', 'A', 'CRUSTED', 'HEAD', 'OF', 'IRIDESCENT', 'GREEN', 'BLACK', 'A', 'BROAD', 'COLLAR', 'OF', 'LUSTROUS', 'WHITE', 'BLACK', 'BACK', 'BLACK', 'AND', 'WHITE', 'WINGS', 'WHITE', 'BELLY', 'SIDES', 'FINELY', 'PENCILLED', 'IN', 'BLACK', 'AND', 'WHITE', 'AND', 'A', 'BREAST', 'OF', 'RICH', 'CHESTNUT', 'RED', 'STREAKED', 'WITH', 'BLACK'] +7176-88083-0002-709: ref=['HIS', 'FEET', 'WERE', 'RED', 'HIS', 'LONG', 'NARROW', 'BEAK', 'WITH', 'ITS', 'SAW', 'TOOTHED', 'EDGES', 'AND', 'SHARP', 'HOOKED', 'TIP', 'WAS', 'BRIGHT', 'RED'] +7176-88083-0002-709: hyp=['HIS', 'FEET', 'WERE', 'RED', 'HIS', 'LONG', 'NARROW', 'BEAK', 'WITH', 'ITS', 'SALL', 'TOOTHED', 'EDGES', 'AND', 'SHARP', 'HOOKED', 'TIP', 'WAS', 'BRIGHT', 'RED'] +7176-88083-0003-710: ref=['BUT', 'HERE', 'HE', 'WAS', 'AT', 'A', 'TERRIBLE', 'DISADVANTAGE', 'AS', 'COMPARED', 'WITH', 'THE', 'OWLS', 'HAWKS', 'AND', 'EAGLES', 'HE', 'HAD', 'NO', 'RENDING', 'CLAWS'] +7176-88083-0003-710: hyp=['BUT', 'HERE', 'HE', 'WAS', 'AT', 'A', 'TERRIBLE', 'DISADVANTAGE', 'AS', 'COMPARED', 'WITH', 'THE', 'OWLS', 'HAWKS', 'AND', 'EAGLES', 'HE', 'HAD', 'NO', 'RENDING', 'CLAWS'] +7176-88083-0004-711: ref=['BUT', 'SUDDENLY', 'STRAIGHT', 'AND', 'SWIFT', 'AS', 'A', 'DIVING', 'CORMORANT', 'HE', 'SHOT', 'DOWN', 'INTO', 'THE', 'TORRENT', 'AND', 'DISAPPEARED', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0004-711: hyp=['BUT', 'SUDDENLY', 'STRAIGHT', 'AND', 'SWIFT', 'AS', 'A', 'DIVING', 'COMRADE', 'HE', 'SHOT', 'DOWN', 'INTO', 'THE', 'TORRENT', 'AND', 'DISAPPEARED', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0005-712: ref=['ONCE', 'FAIRLY', 'A', 'WING', 'HOWEVER', 'HE', 'WHEELED', 'AND', 'MADE', 'BACK', 'HURRIEDLY', 'FOR', 'HIS', 'PERCH'] +7176-88083-0005-712: hyp=['ONCE', 'FAIRLY', 'A', 'WING', 'HOWEVER', 'HE', 'WHEELED', 'AND', 'MADE', 'BACK', 'HURRIEDLY', 'FOR', 'HIS', 'PERCH'] +7176-88083-0006-713: ref=['IT', 'MIGHT', 'HAVE', 'SEEMED', 'THAT', 'A', 'TROUT', 'OF', 'THIS', 'SIZE', 'WAS', 'A', 'FAIRLY', 'SUBSTANTIAL', 'MEAL'] +7176-88083-0006-713: hyp=['AND', 'MIGHT', 'HAVE', 'SEEMED', 'THAT', 'A', 'TROUT', 'OF', 'THIS', 'SIZE', 'WAS', 'A', 'FAIRLY', 'SUBSTANTIAL', 'MEAL'] +7176-88083-0007-714: ref=['BUT', 'SUCH', 'WAS', 'HIS', 'KEENNESS', 'THAT', 'EVEN', 'WHILE', 'THE', 'WIDE', 'FLUKES', 'OF', 'HIS', 'ENGORGED', 'VICTIM', 'WERE', 'STILL', 'STICKING', 'OUT', 'AT', 'THE', 'CORNERS', 'OF', 'HIS', 'BEAK', 'HIS', 'FIERCE', 'RED', 'EYES', 'WERE', 'ONCE', 'MORE', 'PEERING', 'DOWNWARD', 'INTO', 'THE', 'TORRENT', 'IN', 'SEARCH', 'OF', 'FRESH', 'PREY'] +7176-88083-0007-714: hyp=['BUT', 'SUCH', 'WAS', 'HIS', 'KEENNESS', 'THAT', 'EVEN', 'WHILE', 'THE', 'WIDE', 'FLUKES', 'OF', 'HIS', 'ENGORGED', 'VICTIM', 'WERE', 'STILL', 'STICKING', 'OUT', 'AT', 'THE', 'CORNERS', 'OF', 'HIS', 'BEAK', 'HIS', 'FIERCE', 'RED', 'EYES', 'WERE', 'ONCE', 'MORE', 'PEERING', 'DOWNWARD', 'INTO', 'THE', 'TORRENT', 'IN', 'SEARCH', 'OF', 'FRESH', 'PREY'] +7176-88083-0008-715: ref=['IN', 'DESPAIR', 'HE', 'HURLED', 'HIMSELF', 'DOWNWARD', 'TOO', 'SOON'] +7176-88083-0008-715: hyp=['IN', 'DESPAIR', 'HE', 'HURLED', 'HIMSELF', 'DOWNWARD', 'TOO', 'SOON'] +7176-88083-0009-716: ref=['THE', 'GREAT', 'HAWK', 'FOLLOWED', 'HURRIEDLY', 'TO', 'RETRIEVE', 'HIS', 'PREY', 'FROM', 'THE', 'GROUND'] +7176-88083-0009-716: hyp=['THE', 'GREAT', 'HAWK', 'FOWLED', 'HURRIEDLY', 'TO', 'RETRIEVE', 'HIS', 'PREY', 'FROM', 'THE', 'GROUND'] +7176-88083-0010-717: ref=['THE', 'CAT', 'GROWLED', 'SOFTLY', 'PICKED', 'UP', 'THE', 'PRIZE', 'IN', 'HER', 'JAWS', 'AND', 'TROTTED', 'INTO', 'THE', 'BUSHES', 'TO', 'DEVOUR', 'IT'] +7176-88083-0010-717: hyp=['THE', 'CAT', 'GROWLED', 'SOFTLY', 'PICKED', 'UP', 'THE', 'PRIZE', 'IN', 'HER', 'JAWS', 'AND', 'TROTTED', 'INTO', 'THE', 'BUSHES', 'TO', 'DEVOUR', 'IT'] +7176-88083-0011-718: ref=['IN', 'FACT', 'HE', 'HAD', 'JUST', 'FINISHED', 'IT', 'THE', 'LAST', 'OF', 'THE', "TROUT'S", 'TAIL', 'HAD', 'JUST', 'VANISHED', 'WITH', 'A', 'SPASM', 'DOWN', 'HIS', 'STRAINED', 'GULLET', 'WHEN', 'THE', 'BAFFLED', 'HAWK', 'CAUGHT', 'SIGHT', 'OF', 'HIM', 'AND', 'SWOOPED'] +7176-88083-0011-718: hyp=['IN', 'FACT', 'HE', 'HAD', 'JUST', 'FINISHED', 'IT', 'THE', 'LAST', 'OF', 'THE', "TROUT'S", 'TAIL', 'HAD', 'JUST', 'VANISHED', 'WITH', 'A', 'SPASM', 'DOWN', 'HIS', 'STRAINED', 'GULLET', 'WHEN', 'THE', 'BAFFLED', 'HAWK', 'CAUGHT', 'SIGHT', 'OF', 'HIM', 'AND', 'SWOOPED'] +7176-88083-0012-719: ref=['THE', 'HAWK', 'ALIGHTED', 'ON', 'THE', 'DEAD', 'BRANCH', 'AND', 'SAT', 'UPRIGHT', 'MOTIONLESS', 'AS', 'IF', 'SURPRISED'] +7176-88083-0012-719: hyp=['THE', 'HAWK', 'ALIGHTED', 'ON', 'THE', 'DEAD', 'BRANCH', 'AND', 'SAT', 'UPRIGHT', 'MOTIONLESS', 'AS', 'IF', 'SURPRISED'] +7176-88083-0013-720: ref=['LIKE', 'HIS', 'UNFORTUNATE', 'LITTLE', 'COUSIN', 'THE', 'TEAL', 'HE', 'TOO', 'HAD', 'FELT', 'THE', 'FEAR', 'OF', 'DEATH', 'SMITTEN', 'INTO', 'HIS', 'HEART', 'AND', 'WAS', 'HEADING', 'DESPERATELY', 'FOR', 'THE', 'REFUGE', 'OF', 'SOME', 'DARK', 'OVERHANGING', 'BANK', 'DEEP', 'FRINGED', 'WITH', 'WEEDS', 'WHERE', 'THE', 'DREADFUL', 'EYE', 'OF', 'THE', 'HAWK', 'SHOULD', 'NOT', 'DISCERN', 'HIM'] +7176-88083-0013-720: hyp=['LIKE', 'HIS', 'UNFORTUNATE', 'LITTLE', 'COUSIN', 'THE', 'TEAL', 'HE', 'TOO', 'HAD', 'FELT', 'THE', 'FEAR', 'OF', 'DEATH', 'SMITTEN', 'INTO', 'HIS', 'HEART', 'AND', 'WAS', 'HEADING', 'DESPERATELY', 'FOR', 'THE', 'REFUGE', 'OF', 'SOME', 'DARK', 'OVERHANGING', 'BANK', 'DEEP', 'FRINGED', 'WITH', 'WEEDS', 'WHERE', 'THE', 'DREADFUL', 'EYE', 'OF', 'THE', 'HAWK', 'SHOULD', 'NOT', 'DISCERN', 'HIM'] +7176-88083-0014-721: ref=['THE', 'HAWK', 'SAT', 'UPON', 'THE', 'BRANCH', 'AND', 'WATCHED', 'HIS', 'QUARRY', 'SWIMMING', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0014-721: hyp=['THE', 'HAWK', 'SAT', 'UPON', 'THE', 'BRANCH', 'AND', 'WATCHED', 'HIS', 'QUARRY', 'SWIMMING', 'BENEATH', 'THE', 'SURFACE'] +7176-88083-0015-722: ref=['ALMOST', 'INSTANTLY', 'HE', 'WAS', 'FORCED', 'TO', 'THE', 'TOP'] +7176-88083-0015-722: hyp=['ALMOST', 'INSTANTLY', 'HE', 'WAS', 'FORCED', 'TO', 'THE', 'TOP'] +7176-88083-0016-723: ref=['STRAIGHTWAY', 'THE', 'HAWK', 'GLIDED', 'FROM', 'HIS', 'PERCH', 'AND', 'DARTED', 'AFTER', 'HIM'] +7176-88083-0016-723: hyp=['STRAIGHTWAY', 'IN', 'THE', 'HAWK', 'GLIDED', 'FROM', 'HIS', 'PERCH', 'AND', 'DARTED', 'AFTER', 'HIM'] +7176-88083-0017-724: ref=['BUT', 'AT', 'THIS', 'POINT', 'IN', 'THE', 'RAPIDS', 'IT', 'WAS', 'IMPOSSIBLE', 'FOR', 'HIM', 'TO', 'STAY', 'DOWN'] +7176-88083-0017-724: hyp=['BUT', 'AT', 'THIS', 'POINT', 'IN', 'THE', 'RAPIDS', 'IT', 'WAS', 'IMPOSSIBLE', 'FOR', 'HIM', 'TO', 'STAY', 'DOWN'] +7176-88083-0018-725: ref=['BUT', 'THIS', 'FREQUENTER', 'OF', 'THE', 'HEIGHTS', 'OF', 'AIR', 'FOR', 'ALL', 'HIS', 'SAVAGE', 'VALOR', 'WAS', 'TROUBLED', 'AT', 'THE', 'LEAPING', 'WAVES', 'AND', 'THE', 'TOSSING', 'FOAM', 'OF', 'THESE', 'MAD', 'RAPIDS', 'HE', 'DID', 'NOT', 'UNDERSTAND', 'THEM'] +7176-88083-0018-725: hyp=['BUT', 'THIS', 'FREQUENTER', 'OF', 'THE', 'HEIGHTS', 'OF', 'AIR', 'FOR', 'ALL', 'HIS', 'SAVAGE', 'VALOUR', 'WAS', 'TROUBLED', 'AT', 'THE', 'LEAPING', 'WAVES', 'AND', 'THE', 'TOSSING', 'FOAM', 'OF', 'THESE', 'MAD', 'RAPIDS', 'HE', 'DID', 'NOT', 'UNDERSTAND', 'THEM'] +7176-88083-0019-726: ref=['AS', 'HE', 'FLEW', 'HIS', 'DOWN', 'REACHING', 'CLUTCHING', 'TALONS', 'WERE', 'NOT', 'HALF', 'A', 'YARD', 'ABOVE', 'THE', "FUGITIVE'S", 'HEAD'] +7176-88083-0019-726: hyp=['AS', 'HE', 'FLEW', 'HIS', 'DOWNREACHING', 'CLUTCHING', 'TALONS', 'WERE', 'NOT', 'HALF', 'A', 'YARD', 'ABOVE', 'THE', "FUGITIVE'S", 'HEAD'] +7176-88083-0020-727: ref=['WHERE', 'THE', 'WAVES', 'FOR', 'AN', 'INSTANT', 'SANK', 'THEY', 'CAME', 'CLOSER', 'BUT', 'NOT', 'QUITE', 'WITHIN', 'GRASPING', 'REACH'] +7176-88083-0020-727: hyp=['WHERE', 'THE', 'WAVE', 'IS', 'FOR', 'AN', 'INSTANT', 'SANK', 'THEY', 'CAME', 'CLOSER', 'BUT', 'NOT', 'QUITE', 'WITHIN', 'GRASPING', 'REACH'] +7176-88083-0021-728: ref=['BUT', 'AS', 'BEFORE', 'THE', 'LEAPING', 'WAVES', 'OF', 'THE', 'RAPIDS', 'WERE', 'TOO', 'MUCH', 'FOR', 'HIS', 'PURSUER', 'AND', 'HE', 'WAS', 'ABLE', 'TO', 'FLAP', 'HIS', 'WAY', 'ONWARD', 'IN', 'A', 'CLOUD', 'OF', 'FOAM', 'WHILE', 'DOOM', 'HUNG', 'LOW', 'ABOVE', 'HIS', 'HEAD', 'YET', 'HESITATED', 'TO', 'STRIKE'] +7176-88083-0021-728: hyp=['BUT', 'AS', 'BEFORE', 'THE', 'LEAPING', 'WAVES', 'OF', 'THE', 'RAPIDS', 'WERE', 'TOO', 'MUCH', 'FOR', 'HIS', 'PURSUER', 'AND', 'HE', 'WAS', 'ABLE', 'TO', 'FLAP', 'HIS', 'WAY', 'ONWARD', 'IN', 'A', 'CLOUD', 'OF', 'FOAM', 'WHILE', 'DOOM', 'HUNG', 'LOW', 'ABOVE', 'HIS', 'HEAD', 'YET', 'HESITATED', 'TO', 'STRIKE'] +7176-88083-0022-729: ref=['THE', 'HAWK', 'EMBITTERED', 'BY', 'THE', 'LOSS', 'OF', 'HIS', 'FIRST', 'QUARRY', 'HAD', 'BECOME', 'AS', 'DOGGED', 'IN', 'PURSUIT', 'AS', 'A', 'WEASEL', 'NOT', 'TO', 'BE', 'SHAKEN', 'OFF', 'OR', 'EVADED', 'OR', 'DECEIVED'] +7176-88083-0022-729: hyp=['THE', 'HAWK', 'EMBITTERED', 'BY', 'THE', 'LOSS', 'OF', 'HIS', 'FIRST', 'QUARRY', 'HAD', 'BECOME', 'AS', 'DOGGED', 'IN', 'PURSUIT', 'AS', 'A', 'WEASEL', 'NOT', 'TO', 'BE', 'SHAKEN', 'OFF', 'OR', 'EVADED', 'OR', 'DECEIVED'] +7176-88083-0023-730: ref=['HE', 'HAD', 'A', 'LOT', 'OF', 'LINE', 'OUT', 'AND', 'THE', 'PLACE', 'WAS', 'NONE', 'TOO', 'FREE', 'FOR', 'A', 'LONG', 'CAST', 'BUT', 'HE', 'WAS', 'IMPATIENT', 'TO', 'DROP', 'HIS', 'FLIES', 'AGAIN', 'ON', 'THE', 'SPOT', 'WHERE', 'THE', 'BIG', 'FISH', 'WAS', 'FEEDING'] +7176-88083-0023-730: hyp=['HE', 'HAD', 'A', 'LOT', 'OF', 'LINE', 'OUT', 'AND', 'THE', 'PLACE', 'WAS', 'NONE', 'TOO', 'FREE', 'FOR', 'A', 'LONG', 'CAST', 'BUT', 'HE', 'WAS', 'IMPATIENT', 'TO', 'DROP', 'HIS', 'FLIES', 'AGAIN', 'ON', 'THE', 'SPOT', 'WHERE', 'THE', 'BIG', 'FISH', 'WAS', 'FEEDING'] +7176-88083-0024-731: ref=['THE', 'LAST', 'DROP', 'FLY', 'AS', 'LUCK', 'WOULD', 'HAVE', 'IT', 'CAUGHT', 'JUST', 'IN', 'THE', 'CORNER', 'OF', 'THE', "HAWK'S", 'ANGRILY', 'OPEN', 'BEAK', 'HOOKING', 'ITSELF', 'FIRMLY'] +7176-88083-0024-731: hyp=['THE', 'LAST', 'DROP', 'FLY', 'AS', 'LUCK', 'WOULD', 'HAVE', 'IT', 'CAUGHT', 'JUST', 'IN', 'THE', 'CORNER', 'OF', 'THE', "HAWK'S", 'ANGRILY', 'OPEN', 'BEAK', 'HOOKING', 'ITSELF', 'FIRMLY'] +7176-88083-0025-732: ref=['AT', 'THE', 'SUDDEN', 'SHARP', 'STING', 'OF', 'IT', 'THE', 'GREAT', 'BIRD', 'TURNED', 'HIS', 'HEAD', 'AND', 'NOTICED', 'FOR', 'THE', 'FIRST', 'TIME', 'THE', 'FISHERMAN', 'STANDING', 'ON', 'THE', 'BANK'] +7176-88083-0025-732: hyp=['AT', 'THE', 'SUDDEN', 'SHARP', 'STING', 'OF', 'IT', 'THE', 'GREAT', 'BIRD', 'TURNED', 'HIS', 'HEAD', 'AND', 'NOTICED', 'FOR', 'THE', 'FIRST', 'TIME', 'THE', 'FISHERMAN', 'STANDING', 'ON', 'THE', 'BANK'] +7176-88083-0026-733: ref=['THE', 'DRAG', 'UPON', 'HIS', 'BEAK', 'AND', 'THE', 'LIGHT', 'CHECK', 'UPON', 'HIS', 'WINGS', 'WERE', 'INEXPLICABLE', 'TO', 'HIM', 'AND', 'APPALLING'] +7176-88083-0026-733: hyp=['THE', 'DRAG', 'UPON', 'HIS', 'BEAK', 'AND', 'THE', 'LIGHT', 'CHECK', 'UPON', 'HIS', 'WINGS', 'WERE', 'INEXPLICABLE', 'TO', 'HIM', 'AND', 'APPALLING'] +7176-88083-0027-734: ref=['THEN', 'THE', 'LEADER', 'PARTED', 'FROM', 'THE', 'LINE'] +7176-88083-0027-734: hyp=['THAN', 'THE', 'LEADER', 'PARTED', 'FROM', 'THE', 'LINE'] +7176-92135-0000-661: ref=['HE', 'IS', 'A', 'WELCOME', 'FIGURE', 'AT', 'THE', 'GARDEN', 'PARTIES', 'OF', 'THE', 'ELECT', 'WHO', 'ARE', 'ALWAYS', 'READY', 'TO', 'ENCOURAGE', 'HIM', 'BY', 'ACCEPTING', 'FREE', 'SEATS', 'FOR', 'HIS', 'PLAY', 'ACTOR', 'MANAGERS', 'NOD', 'TO', 'HIM', 'EDITORS', 'ALLOW', 'HIM', 'TO', 'CONTRIBUTE', 'WITHOUT', 'CHARGE', 'TO', 'A', 'SYMPOSIUM', 'ON', 'THE', 'PRICE', 'OF', 'GOLF', 'BALLS'] +7176-92135-0000-661: hyp=['HE', 'IS', 'A', 'WELCOME', 'FIGURE', 'AT', 'THE', 'GARDEN', 'PARTIES', 'OF', 'THE', 'ELECT', 'WHO', 'ARE', 'ALWAYS', 'READY', 'TO', 'ENCOURAGE', 'HIM', 'BY', 'ACCEPTING', 'FREE', 'SEATS', 'FOR', 'HIS', 'PLAY', 'ACTOR', 'MANAGERS', 'NOD', 'TO', 'HIM', 'EDITORS', 'ALLOW', 'HIM', 'TO', 'CONTRIBUTE', 'WITHOUT', 'CHARGE', 'TO', 'A', 'SUPPOSIUM', 'ON', 'THE', 'PRICE', 'OF', 'GOLF', 'BALLS'] +7176-92135-0001-662: ref=['IN', 'SHORT', 'HE', 'BECOMES', 'A', 'PROMINENT', 'FIGURE', 'IN', 'LONDON', 'SOCIETY', 'AND', 'IF', 'HE', 'IS', 'NOT', 'CAREFUL', 'SOMEBODY', 'WILL', 'SAY', 'SO'] +7176-92135-0001-662: hyp=['IN', 'SHORT', 'HE', 'BECOMES', 'A', 'PROMINENT', 'FIGURE', 'IN', 'LONDON', 'SOCIETY', 'AND', 'IF', 'HE', 'IS', 'NOT', 'CAREFUL', 'SOMEBODY', 'WILL', 'SAY', 'SO'] +7176-92135-0002-663: ref=['BUT', 'EVEN', 'THE', 'UNSUCCESSFUL', 'DRAMATIST', 'HAS', 'HIS', 'MOMENTS'] +7176-92135-0002-663: hyp=['BUT', 'EVEN', 'THE', 'UNSUCCESSFUL', 'DRAMATIST', 'HAS', 'HIS', 'MOMENTS'] +7176-92135-0003-664: ref=['YOUR', 'PLAY', 'MUST', 'BE', 'NOT', 'MERELY', 'A', 'GOOD', 'PLAY', 'BUT', 'A', 'SUCCESSFUL', 'ONE'] +7176-92135-0003-664: hyp=['YOU', 'ARE', 'PLAY', 'MUST', 'BE', 'NOT', 'MERELY', 'A', 'GOOD', 'PLAY', 'BUT', 'A', 'SUCCESSFUL', 'ONE'] +7176-92135-0004-665: ref=['FRANKLY', 'I', 'CANNOT', 'ALWAYS', 'SAY'] +7176-92135-0004-665: hyp=['FRANKLY', 'I', 'CANNOT', 'ALWAYS', 'SAY'] +7176-92135-0005-666: ref=['BUT', 'SUPPOSE', 'YOU', 'SAID', "I'M", 'FOND', 'OF', 'WRITING', 'MY', 'PEOPLE', 'ALWAYS', 'SAY', 'MY', 'LETTERS', 'HOME', 'ARE', 'GOOD', 'ENOUGH', 'FOR', 'PUNCH'] +7176-92135-0005-666: hyp=['BUT', 'SUPPOSE', 'YOU', 'SAID', "I'M", 'FOND', 'OF', 'WRITING', 'MY', 'PEOPLE', 'ALWAYS', 'SAY', 'MY', 'LETTERS', 'HOME', 'ARE', 'GOOD', 'ENOUGH', 'FOR', 'PUNCH'] +7176-92135-0006-667: ref=["I'VE", 'GOT', 'A', 'LITTLE', 'IDEA', 'FOR', 'A', 'PLAY', 'ABOUT', 'A', 'MAN', 'AND', 'A', 'WOMAN', 'AND', 'ANOTHER', 'WOMAN', 'AND', 'BUT', 'PERHAPS', "I'D", 'BETTER', 'KEEP', 'THE', 'PLOT', 'A', 'SECRET', 'FOR', 'THE', 'MOMENT'] +7176-92135-0006-667: hyp=["I'VE", 'GOT', 'A', 'LITTLE', 'IDEA', 'FOR', 'A', 'PLAY', 'ABOUT', 'A', 'MAN', 'AND', 'A', 'WOMAN', 'AND', 'ANOTHER', 'WOMAN', 'AND', 'BUT', 'PERHAPS', 'I', 'BETTER', 'KEEP', 'THE', 'PLOT', 'A', 'SECRET', 'FOR', 'THE', 'MOMENT'] +7176-92135-0007-668: ref=['ANYHOW', "IT'S", 'JOLLY', 'EXCITING', 'AND', 'I', 'CAN', 'DO', 'THE', 'DIALOGUE', 'ALL', 'RIGHT'] +7176-92135-0007-668: hyp=['ANYHOW', "IT'S", 'A', 'JOLLY', 'EXCITING', 'AND', 'I', 'CAN', 'DO', 'THE', 'DIALOGUE', 'ALL', 'RIGHT'] +7176-92135-0008-669: ref=['LEND', 'ME', 'YOUR', 'EAR', 'FOR', 'TEN', 'MINUTES', 'AND', 'YOU', 'SHALL', 'LEARN', 'JUST', 'WHAT', 'STAGECRAFT', 'IS'] +7176-92135-0008-669: hyp=['LEND', 'ME', 'YOUR', 'EAR', 'FOR', 'TEN', 'MINUTES', 'AND', 'YOU', 'SHALL', 'LEARN', 'JUST', 'WHAT', 'STAGE', 'CRAFT', 'IS'] +7176-92135-0009-670: ref=['AND', 'I', 'SHOULD', 'BEGIN', 'WITH', 'A', 'SHORT', 'HOMILY', 'ON', 'SOLILOQUY'] +7176-92135-0009-670: hyp=['AND', 'I', 'SHOULD', 'BEGIN', 'WITH', 'A', 'SHORT', 'HOMILY', 'ON', 'SOLILOQUY'] +7176-92135-0010-671: ref=['HAM', 'TO', 'BE', 'OR', 'NOT', 'TO', 'BE'] +7176-92135-0010-671: hyp=['HIM', 'TO', 'BE', 'OR', 'NOT', 'TO', 'BE'] +7176-92135-0011-672: ref=['NOW', 'THE', 'OBJECT', 'OF', 'THIS', 'SOLILOQUY', 'IS', 'PLAIN'] +7176-92135-0011-672: hyp=['NOW', 'THE', 'OBJECT', 'OF', 'THIS', 'SOLOQUY', 'IS', 'PLAIN'] +7176-92135-0012-673: ref=['INDEED', 'IRRESOLUTION', 'BEING', 'THE', 'KEYNOTE', 'OF', "HAMLET'S", 'SOLILOQUY', 'A', 'CLEVER', 'PLAYER', 'COULD', 'TO', 'SOME', 'EXTENT', 'INDICATE', 'THE', 'WHOLE', 'THIRTY', 'LINES', 'BY', 'A', 'SILENT', 'WORKING', 'OF', 'THE', 'JAW', 'BUT', 'AT', 'THE', 'SAME', 'TIME', 'IT', 'WOULD', 'BE', 'IDLE', 'TO', 'DENY', 'THAT', 'HE', 'WOULD', 'MISS', 'THE', 'FINER', 'SHADES', 'OF', 'THE', "DRAMATIST'S", 'MEANING'] +7176-92135-0012-673: hyp=['INDEED', 'IRRESOLUTION', 'MEAN', 'THE', 'KEEN', 'OUT', 'OF', "HAMLET'S", 'SOLILOQUY', 'A', 'CLEVER', 'PLAYER', 'COULD', 'TO', 'SOME', 'EXTENT', 'INDICATE', 'THE', 'WHOLE', 'THIRTY', 'LINES', 'BY', 'A', 'SILAGE', 'WORKING', 'OF', 'THE', 'JOB', 'BUT', 'AT', 'THE', 'SAME', 'TIME', 'IT', 'WOULD', 'BE', 'IDLE', 'TO', 'DENY', 'THAT', 'HE', 'WOULD', 'MISS', 'THE', 'FINER', 'SHADES', 'OF', 'THE', "DRAMATIST'S", 'MEANING'] +7176-92135-0013-674: ref=['WE', 'MODERNS', 'HOWEVER', 'SEE', 'THE', 'ABSURDITY', 'OF', 'IT'] +7176-92135-0013-674: hyp=['WE', 'MODERNS', 'HOWEVER', 'SEE', 'THE', 'ABSURDITY', 'OF', 'IT'] +7176-92135-0014-675: ref=['IF', 'IT', 'BE', 'GRANTED', 'FIRST', 'THAT', 'THE', 'THOUGHTS', 'OF', 'A', 'CERTAIN', 'CHARACTER', 'SHOULD', 'BE', 'KNOWN', 'TO', 'THE', 'AUDIENCE', 'AND', 'SECONDLY', 'THAT', 'SOLILOQUY', 'OR', 'THE', 'HABIT', 'OF', 'THINKING', 'ALOUD', 'IS', 'IN', 'OPPOSITION', 'TO', 'MODERN', 'STAGE', 'TECHNIQUE', 'HOW', 'SHALL', 'A', 'SOLILOQUY', 'BE', 'AVOIDED', 'WITHOUT', 'DAMAGE', 'TO', 'THE', 'PLAY'] +7176-92135-0014-675: hyp=['IF', 'IT', 'BE', 'GRANTED', 'FIRST', 'THAT', 'THE', 'THOUGHTS', 'OF', 'A', 'CERTAIN', 'CHARACTER', 'SHOULD', 'BE', 'KNOWN', 'TO', 'THE', 'AUDIENCE', 'AND', 'SECONDLY', 'THAT', 'SOLILOQUY', 'OR', 'THE', 'HABIT', 'OF', 'THINKING', 'ALOUD', 'IS', 'IN', 'OPPOSITION', 'TO', 'MODERN', 'STAGE', 'TYPE', 'HALL', 'SHALL', 'A', 'SOLILOQUY', 'BE', 'AVOIDED', 'WITHOUT', 'DAMAGE', 'TO', 'THE', 'PLAY'] +7176-92135-0015-676: ref=['AND', 'SO', 'ON', 'TILL', 'YOU', 'GET', 'TO', 'THE', 'END', 'WHEN', 'OPHELIA', 'MIGHT', 'SAY', 'AH', 'YES', 'OR', 'SOMETHING', 'NON', 'COMMITTAL', 'OF', 'THAT', 'SORT'] +7176-92135-0015-676: hyp=['AND', 'SO', 'ON', 'TILL', 'YOU', 'GET', 'THE', 'END', 'ONE', 'OF', 'VILLIA', 'MIGHT', 'SAY', 'AH', 'YES', 'OR', 'SOMETHING', 'NON', 'COMMITTAL', 'OF', 'THAT', 'SORT'] +7176-92135-0016-677: ref=['THIS', 'WOULD', 'BE', 'AN', 'EASY', 'WAY', 'OF', 'DOING', 'IT', 'BUT', 'IT', 'WOULD', 'NOT', 'BE', 'THE', 'BEST', 'WAY', 'FOR', 'THE', 'REASON', 'THAT', 'IT', 'IS', 'TOO', 'EASY', 'TO', 'CALL', 'ATTENTION', 'TO', 'ITSELF'] +7176-92135-0016-677: hyp=['THIS', 'WOULD', 'BE', 'AN', 'EASY', 'WAY', 'OF', 'DOING', 'IT', 'BUT', 'IT', 'WOULD', 'NOT', 'BE', 'THE', 'BEST', 'WAY', 'FOR', 'THE', 'REASON', 'THAT', 'IT', 'IS', 'TOO', 'EASY', 'TO', 'CALL', 'ATTENTION', 'TO', 'ITSELF'] +7176-92135-0017-678: ref=['IN', 'THE', 'OLD', 'BADLY', 'MADE', 'PLAY', 'IT', 'WAS', 'FREQUENTLY', 'NECESSARY', 'FOR', 'ONE', 'OF', 'THE', 'CHARACTERS', 'TO', 'TAKE', 'THE', 'AUDIENCE', 'INTO', 'HIS', 'CONFIDENCE'] +7176-92135-0017-678: hyp=['IN', 'THE', 'OLD', 'BADLY', 'MADE', 'PLAY', 'IT', 'WAS', 'FREQUENTLY', 'NECESSARY', 'FOR', 'ONE', 'OF', 'THE', 'CHARACTERS', 'TO', 'TAKE', 'THE', 'AUDIENCE', 'INTO', 'HIS', 'CONFIDENCE'] +7176-92135-0018-679: ref=['IN', 'THE', 'MODERN', 'WELL', 'CONSTRUCTED', 'PLAY', 'HE', 'SIMPLY', 'RINGS', 'UP', 'AN', 'IMAGINARY', 'CONFEDERATE', 'AND', 'TELLS', 'HIM', 'WHAT', 'HE', 'IS', 'GOING', 'TO', 'DO', 'COULD', 'ANYTHING', 'BE', 'MORE', 'NATURAL'] +7176-92135-0018-679: hyp=['IN', 'THE', 'MODERN', 'WELL', 'CONSTRUCTED', 'PLAY', 'HE', 'SIMPLY', 'RINGS', 'UP', 'AN', 'IMAGINARY', 'CONFEDERATE', 'AND', 'TELLS', 'HIM', 'WHAT', 'HE', 'IS', 'GOING', 'TO', 'DO', 'COULD', 'ANYTHING', 'BE', 'MORE', 'NATURAL'] +7176-92135-0019-680: ref=['I', 'WANT', 'DOUBLE', 'NINE', 'HAL', 'LO'] +7176-92135-0019-680: hyp=['I', 'WANT', 'DOUBLE', 'NINE', 'HELLO'] +7176-92135-0020-681: ref=['DOUBLE', 'NINE', 'TWO', 'THREE', 'ELSINORE', 'DOUBLE', 'NINE', 'YES', 'HALLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0020-681: hyp=['DOUBLED', 'NINE', 'TWO', 'THREE', 'ELZINORE', 'DOUBLE', 'NOT', 'YES', 'HELLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0021-682: ref=['I', 'SAY', "I'VE", 'BEEN', 'WONDERING', 'ABOUT', 'THIS', 'BUSINESS'] +7176-92135-0021-682: hyp=['I', 'SAY', "I'VE", 'BEEN', 'WANDERING', 'ABOUT', 'THIS', 'BUSINESS'] +7176-92135-0022-683: ref=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER', 'IN', 'THE', 'MIND', 'TO', 'SUFFER', 'THE', 'SLINGS', 'AND', 'ARROWS', 'WHAT', 'NO', 'HAMLET', 'SPEAKING'] +7176-92135-0022-683: hyp=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER', 'IN', 'THE', 'MIND', 'TO', 'SUFFER', 'THE', 'SLINGS', 'AND', 'ARROWS', 'WHAT', 'NO', 'HAMLET', 'SPEAKING'] +7176-92135-0023-684: ref=['YOU', 'GAVE', 'ME', 'DOUBLE', 'FIVE', 'I', 'WANT', 'DOUBLE', 'NINE', 'HALLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0023-684: hyp=['YOU', 'GAVE', 'ME', 'DOUBLE', 'FIVE', 'I', 'WANT', 'DOUBLE', 'NINE', 'HELLO', 'IS', 'THAT', 'YOU', 'HORATIO', 'HAMLET', 'SPEAKING'] +7176-92135-0024-685: ref=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER'] +7176-92135-0024-685: hyp=['TO', 'BE', 'OR', 'NOT', 'TO', 'BE', 'THAT', 'IS', 'THE', 'QUESTION', 'WHETHER', 'TIS', 'NOBLER'] +7176-92135-0025-686: ref=['IT', 'IS', 'TO', 'LET', 'HAMLET', 'IF', 'THAT', 'HAPPEN', 'TO', 'BE', 'THE', 'NAME', 'OF', 'YOUR', 'CHARACTER', 'ENTER', 'WITH', 'A', 'SMALL', 'DOG', 'PET', 'FALCON', 'MONGOOSE', 'TAME', 'BEAR', 'OR', 'WHATEVER', 'ANIMAL', 'IS', 'MOST', 'IN', 'KEEPING', 'WITH', 'THE', 'PART', 'AND', 'CONFIDE', 'IN', 'THIS', 'ANIMAL', 'SUCH', 'SORROWS', 'HOPES', 'OR', 'SECRET', 'HISTORY', 'AS', 'THE', 'AUDIENCE', 'HAS', 'GOT', 'TO', 'KNOW'] +7176-92135-0025-686: hyp=['IT', 'IS', 'TO', 'LET', 'HAMLET', 'IF', 'THAT', 'HAPPENED', 'TO', 'BE', 'THE', 'NAME', 'OF', 'YOUR', 'CHARACTER', 'INTO', 'A', 'SMALL', 'DOG', 'PET', 'FALCON', 'MONGOOSE', 'TAME', 'BEAR', 'ORDER', 'ANIMAL', 'IS', 'MOST', 'IN', 'KEEPING', 'WITH', 'THE', 'PART', 'AND', 'CONFIDE', 'IN', 'THIS', 'ANIMAL', 'SUCH', 'SORROWS', 'HOPES', 'OR', 'SECRET', 'HISTORY', 'AS', 'THE', 'AUDIENCE', 'HAS', 'GOT', 'TO', 'KNOW'] +7176-92135-0026-687: ref=['ENTER', 'HAMLET', 'WITH', 'HIS', 'FAVOURITE', 'BOAR', 'HOUND'] +7176-92135-0026-687: hyp=['ENTER', 'HAMLET', 'WITH', 'HIS', 'FAVOURITE', 'BOARHOUND'] +7176-92135-0027-688: ref=['LADY', 'LARKSPUR', 'STARTS', 'SUDDENLY', 'AND', 'TURNS', 'TOWARDS', 'HIM'] +7176-92135-0027-688: hyp=['LADY', 'LARKSBURG', 'START', 'SUDDENLY', 'AND', 'TURNED', 'TOWARD', 'HIM'] +7176-92135-0028-689: ref=['LARKSPUR', 'BIT', 'ME', 'AGAIN', 'THIS', 'MORNING', 'FOR', 'THE', 'THIRD', 'TIME'] +7176-92135-0028-689: hyp=['LARKS', 'WERE', 'BID', 'ME', 'AGAIN', 'THIS', 'MORNING', 'FOR', 'THE', 'THIRD', 'TIME'] +7176-92135-0029-690: ref=['I', 'WANT', 'TO', 'GET', 'AWAY', 'FROM', 'IT', 'ALL', 'SWOONS'] +7176-92135-0029-690: hyp=['I', 'WANT', 'TO', 'GET', 'AWAY', 'FROM', 'IT', 'ALL', 'SWOON'] +7176-92135-0030-691: ref=['ENTER', 'LORD', 'ARTHUR', 'FLUFFINOSE'] +7176-92135-0030-691: hyp=['ENTER', 'LORD', 'ARTHUR', "FLUFFINO'S"] +7176-92135-0031-692: ref=['AND', 'THERE', 'YOU', 'ARE', 'YOU', 'WILL', 'OF', 'COURSE', 'APPRECIATE', 'THAT', 'THE', 'UNFINISHED', 'SENTENCES', 'NOT', 'ONLY', 'SAVE', 'TIME', 'BUT', 'ALSO', 'MAKE', 'THE', 'MANOEUVRING', 'VERY', 'MUCH', 'MORE', 'NATURAL'] +7176-92135-0031-692: hyp=['AND', 'THERE', 'YOU', 'ARE', 'YOU', 'WILL', 'OF', 'COURSE', 'APPRECIATE', 'THAT', 'THE', 'UNFINISHANCES', 'NOT', 'ONLY', 'SAVE', 'TIME', 'BUT', 'ALSO', 'MAKE', 'THE', 'MANOEUVRING', 'VERY', 'MUCH', 'MORE', 'NATURAL'] +7176-92135-0032-693: ref=['HOW', 'YOU', 'MAY', 'BE', 'WONDERING', 'ARE', 'YOU', 'TO', 'BEGIN', 'YOUR', 'MASTERPIECE'] +7176-92135-0032-693: hyp=['HOW', 'YOU', 'MAY', 'BE', 'WONDERING', 'ARE', 'YE', 'TO', 'BEGIN', 'YOUR', 'MASTERPIECE'] +7176-92135-0033-694: ref=['RELAPSES', 'INTO', 'SILENCE', 'FOR', 'THE', 'REST', 'OF', 'THE', 'EVENING'] +7176-92135-0033-694: hyp=['RELAPSES', 'INTO', 'SILENCE', 'FOR', 'THE', 'REST', 'OF', 'THE', 'EVENING'] +7176-92135-0034-695: ref=['THE', 'DUCHESS', 'OF', 'SOUTHBRIDGE', 'TO', 'LORD', 'REGGIE', 'OH', 'REGGIE', 'WHAT', 'DID', 'YOU', 'SAY'] +7176-92135-0034-695: hyp=['THE', 'DUCHESS', 'OF', 'SOUTHBRIDGE', 'TWO', 'LORD', 'REGGIE', 'OH', 'REGGIE', 'WHAT', 'DID', 'YOU', 'SAY'] +7176-92135-0035-696: ref=['THEN', 'LORD', 'TUPPENY', 'WELL', 'WHAT', 'ABOUT', 'AUCTION'] +7176-92135-0035-696: hyp=['THEN', 'LORD', 'TOPPENNY', 'WELL', 'WHAT', 'ABOUT', 'AUCTION'] +7176-92135-0036-697: ref=['THE', 'CROWD', 'DRIFTS', 'OFF', 'LEAVING', 'THE', 'HERO', 'AND', 'HEROINE', 'ALONE', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'STAGE', 'AND', 'THEN', 'YOU', 'CAN', 'BEGIN'] +7176-92135-0036-697: hyp=['THE', 'CROWD', 'DRIFTS', 'OFF', 'LEAPING', 'THE', 'HERO', 'AND', 'HEROINE', 'ALONE', 'IN', 'THE', 'MIDDLE', 'OF', 'THE', 'STAGE', 'AND', 'THEN', 'YOU', 'CAN', 'BEGIN'] +7176-92135-0037-698: ref=['THEN', 'IS', 'THE', 'TIME', 'TO', 'INTRODUCE', 'A', 'MEAL', 'ON', 'THE', 'STAGE'] +7176-92135-0037-698: hyp=['THEN', 'IS', 'THE', 'TIME', 'TO', 'INTRODUCE', 'A', 'MEAL', 'ON', 'THE', 'STAGE'] +7176-92135-0038-699: ref=['A', 'STAGE', 'MEAL', 'IS', 'POPULAR', 'BECAUSE', 'IT', 'PROVES', 'TO', 'THE', 'AUDIENCE', 'THAT', 'THE', 'ACTORS', 'EVEN', 'WHEN', 'CALLED', 'CHARLES', 'HAWTREY', 'OR', 'OWEN', 'NARES', 'ARE', 'REAL', 'PEOPLE', 'JUST', 'LIKE', 'YOU', 'AND', 'ME'] +7176-92135-0038-699: hyp=['A', 'SAGE', 'MEAL', 'IS', 'POPULAR', 'BECAUSE', 'IT', 'PROVED', 'TO', 'THE', 'AUDIENCE', 'THAT', 'THE', 'ACTORS', 'EVEN', 'WHEN', 'CALLED', 'CHARLES', 'HOLTREE', 'OR', 'OWENAIRS', 'ARE', 'REAL', 'PEOPLE', 'JUST', 'LIKE', 'YOU', 'AND', 'ME'] +7176-92135-0039-700: ref=['TEA', 'PLEASE', 'MATTHEWS', 'BUTLER', 'IMPASSIVELY'] +7176-92135-0039-700: hyp=['T', 'PLEASE', 'MATTHEWS', 'BUTLER', 'IMPASSIVELY'] +7176-92135-0040-701: ref=['HOSTESS', 'REPLACES', 'LUMP', 'AND', 'INCLINES', 'EMPTY', 'TEAPOT', 'OVER', 'TRAY', 'FOR', 'A', 'MOMENT', 'THEN', 'HANDS', 'HIM', 'A', 'CUP', 'PAINTED', 'BROWN', 'INSIDE', 'THUS', 'DECEIVING', 'THE', 'GENTLEMAN', 'WITH', 'THE', 'TELESCOPE', 'IN', 'THE', 'UPPER', 'CIRCLE'] +7176-92135-0040-701: hyp=['HOSTESS', 'REPLACES', 'LUMP', 'AND', 'INCLINES', 'EMPTY', 'TEAPOT', 'OVERTRAY', 'FOR', 'A', 'MOMENT', 'THEN', 'HANDSOME', 'A', 'CUP', 'PAINTED', 'BROWN', 'INSIDE', 'LUST', 'DECEIVING', 'THE', 'GENTLEMAN', 'WITH', 'THE', 'TELESCOPE', 'IN', 'THE', 'UPPER', 'CIRCLE'] +7176-92135-0041-702: ref=['RE', 'ENTER', 'BUTLER', 'AND', 'THREE', 'FOOTMEN', 'WHO', 'REMOVE', 'THE', 'TEA', 'THINGS', 'HOSTESS', 'TO', 'GUEST'] +7176-92135-0041-702: hyp=['RE', 'ENTER', 'BUTLER', 'AND', 'THREE', 'FOOTMEN', 'WHO', 'MOVED', 'THE', 'TEA', 'THINGS', 'HOSTESS', 'TWO', 'GUESTS'] +7176-92135-0042-703: ref=['IN', 'NOVELS', 'THE', 'HERO', 'HAS', 'OFTEN', 'PUSHED', 'HIS', 'MEALS', 'AWAY', 'UNTASTED', 'BUT', 'NO', 'STAGE', 'HERO', 'WOULD', 'DO', 'ANYTHING', 'SO', 'UNNATURAL', 'AS', 'THIS'] +7176-92135-0042-703: hyp=['AND', 'NOVELS', 'THE', 'HERO', 'HAS', 'OFTEN', 'PUSHED', 'HIS', 'MEALS', 'AWAY', 'UNTASTED', 'BUT', 'NO', 'STEED', 'HERO', 'WOULD', 'DO', 'ANYTHING', 'SO', 'UNNATURAL', 'AS', 'THIS'] +7176-92135-0043-704: ref=['TWO', 'BITES', 'ARE', 'MADE', 'AND', 'THE', 'BREAD', 'IS', 'CRUMBLED', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'EAGERNESS', 'INDEED', 'ONE', 'FEELS', 'THAT', 'IN', 'REAL', 'LIFE', 'THE', 'GUEST', 'WOULD', 'CLUTCH', 'HOLD', 'OF', 'THE', 'FOOTMAN', 'AND', 'SAY', 'HALF', 'A', 'MO', 'OLD', 'CHAP', 'I', "HAVEN'T", 'NEARLY', 'FINISHED', 'BUT', 'THE', 'ACTOR', 'IS', 'BETTER', 'SCHOOLED', 'THAN', 'THIS'] +7176-92135-0043-704: hyp=['TWO', 'WHITES', 'ARE', 'MADE', 'AND', 'THE', 'BREAD', 'IS', 'CRUMBLED', 'WITH', 'AN', 'AIR', 'OF', 'GREAT', 'EAGERNESS', 'INDEED', 'ONE', 'FEELS', 'THAT', 'IN', 'REAL', 'LIFE', 'THE', 'GUEST', 'WOULD', 'CLUTCH', 'HOLD', 'OF', 'THE', 'FOOTMAN', 'AND', 'SAY', 'HALF', 'A', 'MOLD', 'CHAP', 'I', "HAVEN'T", 'NEARLY', 'FINISHED', 'BUT', 'THE', 'ACTOR', 'IS', 'BETTER', 'SCHOOLED', 'THAN', 'THIS'] +7176-92135-0044-705: ref=['BUT', 'IT', 'IS', 'THE', 'CIGARETTE', 'WHICH', 'CHIEFLY', 'HAS', 'BROUGHT', 'THE', 'MODERN', 'DRAMA', 'TO', 'ITS', 'PRESENT', 'STATE', 'OF', 'PERFECTION'] +7176-92135-0044-705: hyp=['BUT', 'IT', 'IS', 'A', 'CIGARETTE', 'WHICH', 'CHIEFLY', 'HAS', 'BROUGHT', 'THE', 'MODERN', 'DRAMA', 'TO', 'ITS', 'PRESENT', 'STATE', 'OF', 'PERFECTION'] +7176-92135-0045-706: ref=['LORD', 'JOHN', 'TAKING', 'OUT', 'GOLD', 'CIGARETTE', 'CASE', 'FROM', 'HIS', 'LEFT', 'HAND', 'UPPER', 'WAISTCOAT', 'POCKET'] +7176-92135-0045-706: hyp=['LORD', 'JOHN', 'TAKING', 'OUT', 'GOLD', 'SICK', 'RED', 'CASE', 'FROM', 'HIS', 'LEFT', 'HAND', 'UPPER', 'WAISTCOAT', 'POCKET'] +7729-102255-0000-261: ref=['THE', 'BOGUS', 'LEGISLATURE', 'NUMBERED', 'THIRTY', 'SIX', 'MEMBERS'] +7729-102255-0000-261: hyp=['THE', 'BOGUS', 'LEGISLATURE', 'NUMBERED', 'THIRTY', 'SIX', 'MEMBERS'] +7729-102255-0001-262: ref=['THIS', 'WAS', 'AT', 'THE', 'MARCH', 'ELECTION', 'EIGHTEEN', 'FIFTY', 'FIVE'] +7729-102255-0001-262: hyp=['THIS', 'WAS', 'AT', 'THE', 'MARCH', 'ELECTION', 'EIGHTEEN', 'FIFTY', 'FIVE'] +7729-102255-0002-263: ref=['THAT', "SUMMER'S", 'EMIGRATION', 'HOWEVER', 'BEING', 'MAINLY', 'FROM', 'THE', 'FREE', 'STATES', 'GREATLY', 'CHANGED', 'THE', 'RELATIVE', 'STRENGTH', 'OF', 'THE', 'TWO', 'PARTIES'] +7729-102255-0002-263: hyp=['THAT', "SUMMER'S", 'EMIGRATION', 'HOWEVER', 'BEING', 'MAINLY', 'FROM', 'THE', 'FREE', 'STATES', 'GREATLY', 'CHANGED', 'THE', 'RELATIVE', 'STRENGTH', 'OF', 'THE', 'TWO', 'PARTIES'] +7729-102255-0003-264: ref=['FOR', 'GENERAL', 'SERVICE', 'THEREFORE', 'REQUIRING', 'NO', 'SPECIAL', 'EFFORT', 'THE', 'NUMERICAL', 'STRENGTH', 'OF', 'THE', 'FACTIONS', 'WAS', 'ABOUT', 'EQUAL', 'WHILE', 'ON', 'EXTRAORDINARY', 'OCCASIONS', 'THE', 'TWO', 'THOUSAND', 'BORDER', 'RUFFIAN', 'RESERVE', 'LYING', 'A', 'LITTLE', 'FARTHER', 'BACK', 'FROM', 'THE', 'STATE', 'LINE', 'COULD', 'AT', 'ANY', 'TIME', 'EASILY', 'TURN', 'THE', 'SCALE'] +7729-102255-0003-264: hyp=['FOR', 'GENERAL', 'SERVICE', 'THEREFORE', 'REQUIRING', 'NO', 'SPECIAL', 'EFFORT', 'THE', 'NUMERICAL', 'STRENGTH', 'OF', 'THE', 'FACTIONS', 'WAS', 'ABOUT', 'EQUAL', 'WHILE', 'ON', 'EXTRAORDINARY', 'OCCASIONS', 'THE', 'TWO', 'THOUSAND', 'BORDER', 'RUFFIAN', 'RESERVED', 'LYING', 'A', 'LITTLE', 'FARTHER', 'BACK', 'FROM', 'THE', 'STATE', 'LINE', 'COULD', 'AT', 'ANY', 'TIME', 'EASILY', 'TURN', 'THE', 'SCALE'] +7729-102255-0004-265: ref=['THE', 'FREE', 'STATE', 'MEN', 'HAD', 'ONLY', 'THEIR', 'CONVICTIONS', 'THEIR', 'INTELLIGENCE', 'THEIR', 'COURAGE', 'AND', 'THE', 'MORAL', 'SUPPORT', 'OF', 'THE', 'NORTH', 'THE', 'CONSPIRACY', 'HAD', 'ITS', 'SECRET', 'COMBINATION', 'THE', 'TERRITORIAL', 'OFFICIALS', 'THE', 'LEGISLATURE', 'THE', 'BOGUS', 'LAWS', 'THE', 'COURTS', 'THE', 'MILITIA', 'OFFICERS', 'THE', 'PRESIDENT', 'AND', 'THE', 'ARMY'] +7729-102255-0004-265: hyp=['THE', 'FREE', 'STATE', 'MEN', 'HAD', 'ONLY', 'THEIR', 'CONVICTIONS', 'THEIR', 'INTELLIGENCE', 'THEIR', 'COURAGE', 'AND', 'THE', 'MORAL', 'SUPPORT', 'OF', 'THE', 'NORTH', 'THE', 'CONSPIRACY', 'HAD', 'ITS', 'SECRET', 'COMBINATION', 'THE', 'TERRITORIAL', 'OFFICIALS', 'THE', 'LEGISLATURE', 'THE', 'BOGUS', 'LAWS', 'THE', 'COURTS', 'THE', 'MILITIA', 'OFFICERS', 'THE', 'PRESIDENT', 'AND', 'THE', 'ARMY'] +7729-102255-0005-266: ref=['THIS', 'WAS', 'A', 'FORMIDABLE', 'ARRAY', 'OF', 'ADVANTAGES', 'SLAVERY', 'WAS', 'PLAYING', 'WITH', 'LOADED', 'DICE'] +7729-102255-0005-266: hyp=['THIS', 'WAS', 'A', 'FORMIDABLE', 'ARRAY', 'OF', 'ADVANTAGES', 'SLAVERY', 'WAS', 'PLAYING', 'WITH', 'LOADED', 'DICE'] +7729-102255-0006-267: ref=['COMING', 'BY', 'WAY', 'OF', 'THE', 'MISSOURI', 'RIVER', 'TOWNS', 'HE', 'FELL', 'FIRST', 'AMONG', 'BORDER', 'RUFFIAN', 'COMPANIONSHIP', 'AND', 'INFLUENCES', 'AND', 'PERHAPS', 'HAVING', 'HIS', 'INCLINATIONS', 'ALREADY', 'MOLDED', 'BY', 'HIS', 'WASHINGTON', 'INSTRUCTIONS', 'HIS', 'EARLY', 'IMPRESSIONS', 'WERE', 'DECIDEDLY', 'ADVERSE', 'TO', 'THE', 'FREE', 'STATE', 'CAUSE'] +7729-102255-0006-267: hyp=['COMING', 'BY', 'WAY', 'OF', 'THE', 'MISSOURI', 'RIVER', 'TOWNS', 'HE', 'FELL', 'FIRST', 'AMONG', 'BORDER', 'RUFFIAN', 'COMPANIONSHIP', 'AND', 'INFLUENCES', 'AND', 'PERHAPS', 'HAVING', 'HIS', 'INCLINATIONS', 'ALREADY', 'MOULDED', 'BY', 'HIS', 'WASHINGTON', 'INSTRUCTIONS', 'HIS', 'EARLY', 'IMPRESSIONS', 'WERE', 'DECIDEDLY', 'ADVERSE', 'TO', 'THE', 'FREE', 'STATE', 'CAUSE'] +7729-102255-0007-268: ref=['HIS', 'RECEPTION', 'SPEECH', 'AT', 'WESTPORT', 'IN', 'WHICH', 'HE', 'MAINTAINED', 'THE', 'LEGALITY', 'OF', 'THE', 'LEGISLATURE', 'AND', 'HIS', 'DETERMINATION', 'TO', 'ENFORCE', 'THEIR', 'LAWS', 'DELIGHTED', 'HIS', 'PRO', 'SLAVERY', 'AUDITORS'] +7729-102255-0007-268: hyp=['HIS', 'RECEPTION', 'SPEECH', 'AT', 'WESTPORT', 'IN', 'WHICH', 'HE', 'MAINTAINED', 'THE', 'LEGALITY', 'OF', 'THE', 'LEGISLATURE', 'AND', 'HIS', 'DETERMINATION', 'TO', 'ENFORCE', 'THEIR', 'LAWS', 'DELIGHTED', 'HIS', 'PRO', 'SLAVERY', 'AUDITORS'] +7729-102255-0008-269: ref=['ALL', 'THE', 'TERRITORIAL', 'DIGNITARIES', 'WERE', 'PRESENT', 'GOVERNOR', 'SHANNON', 'PRESIDED', 'JOHN', 'CALHOUN', 'THE', 'SURVEYOR', 'GENERAL', 'MADE', 'THE', 'PRINCIPAL', 'SPEECH', 'A', 'DENUNCIATION', 'OF', 'THE', 'ABOLITIONISTS', 'SUPPORTING', 'THE', 'TOPEKA', 'MOVEMENT', 'CHIEF', 'JUSTICE', 'LECOMPTE', 'DIGNIFIED', 'THE', 'OCCASION', 'WITH', 'APPROVING', 'REMARKS'] +7729-102255-0008-269: hyp=['ALL', 'THE', 'TERRITORIAL', 'DIGNITARIES', 'WERE', 'PRESENT', 'GOVERNOR', 'SHANNON', 'PRESIDED', 'JOHN', 'CALHOUN', 'THE', 'SURVEYOR', 'GENERAL', 'MADE', 'THE', 'PRINCIPAL', 'SPEECH', 'A', 'DENUNCIATION', 'OF', 'THE', 'ABOLITIONIST', 'SUPPORTING', 'THE', 'TOPICA', 'MOVEMENT', 'CHIEF', 'JUSTICE', 'LECOMTE', 'DIGNIFIED', 'THE', 'OCCASION', 'WITH', 'APPROVING', 'REMARKS'] +7729-102255-0009-270: ref=['ALL', 'DISSENT', 'ALL', 'NON', 'COMPLIANCE', 'ALL', 'HESITATION', 'ALL', 'MERE', 'SILENCE', 'EVEN', 'WERE', 'IN', 'THEIR', 'STRONGHOLD', 'TOWNS', 'LIKE', 'LEAVENWORTH', 'BRANDED', 'AS', 'ABOLITIONISM', 'DECLARED', 'TO', 'BE', 'HOSTILITY', 'TO', 'THE', 'PUBLIC', 'WELFARE', 'AND', 'PUNISHED', 'WITH', 'PROSCRIPTION', 'PERSONAL', 'VIOLENCE', 'EXPULSION', 'AND', 'FREQUENTLY', 'DEATH'] +7729-102255-0009-270: hyp=['ALL', 'DESCENT', 'ALL', 'NON', 'COMPLIANCE', 'ALL', 'HESITATION', 'ALL', 'MERE', 'SILENCE', 'EVEN', 'WERE', 'IN', 'THEIR', 'STRONGHOLD', 'TOWNS', 'LIKE', 'LEVIN', 'WORTH', 'BRANDED', 'AS', 'ABOLITIONISM', 'DECLARED', 'TO', 'BE', 'HOSTILITY', 'TO', 'THE', 'PUBLIC', 'WELFARE', 'AND', 'PUNISHED', 'WITH', 'PROSCRIPTION', 'PERSONAL', 'VIOLENCE', 'EXPULSION', 'AND', 'FREQUENTLY', 'DEATH'] +7729-102255-0010-271: ref=['OF', 'THE', 'LYNCHINGS', 'THE', 'MOBS', 'AND', 'THE', 'MURDERS', 'IT', 'WOULD', 'BE', 'IMPOSSIBLE', 'EXCEPT', 'IN', 'A', 'VERY', 'EXTENDED', 'WORK', 'TO', 'NOTE', 'THE', 'FREQUENT', 'AND', 'ATROCIOUS', 'DETAILS'] +7729-102255-0010-271: hyp=['OF', 'THE', 'LUNCHINGS', 'THE', 'MOBS', 'AND', 'THE', 'MURDERS', 'IT', 'WOULD', 'BE', 'IMPOSSIBLE', 'EXCEPT', 'IN', 'A', 'VERY', 'EXTENDED', 'WORK', 'TO', 'NOTE', 'THE', 'FREQUENT', 'AND', 'ATROCIOUS', 'DETAILS'] +7729-102255-0011-272: ref=['THE', 'PRESENT', 'CHAPTERS', 'CAN', 'ONLY', 'TOUCH', 'UPON', 'THE', 'MORE', 'SALIENT', 'MOVEMENTS', 'OF', 'THE', 'CIVIL', 'WAR', 'IN', 'KANSAS', 'WHICH', 'HAPPILY', 'WERE', 'NOT', 'SANGUINARY', 'IF', 'HOWEVER', 'THE', 'INDIVIDUAL', 'AND', 'MORE', 'ISOLATED', 'CASES', 'OF', 'BLOODSHED', 'COULD', 'BE', 'DESCRIBED', 'THEY', 'WOULD', 'SHOW', 'A', 'STARTLING', 'AGGREGATE', 'OF', 'BARBARITY', 'AND', 'LOSS', 'OF', 'LIFE', 'FOR', "OPINION'S", 'SAKE'] +7729-102255-0011-272: hyp=['THE', 'PRESENT', 'CHAPTERS', 'CAN', 'ONLY', 'TOUCH', 'UPON', 'THE', 'MORE', 'SALIENT', 'MOVEMENTS', 'OF', 'THE', 'CIVIL', 'WAR', 'IN', 'KANSAS', 'WHICH', 'HAPPILY', 'ARE', 'NOT', 'SANGUINARY', 'IF', 'HOWEVER', 'THE', 'INDIVIDUAL', 'AND', 'MORE', 'ISOLATED', 'CASES', 'OF', 'BLOODSHED', 'COULD', 'BE', 'DESCRIBED', 'THEY', 'WOULD', 'SHOW', 'A', 'STARTLING', 'AGGREGATE', 'OF', 'BARBARITY', 'AND', 'A', 'LOSS', 'OF', 'LIFE', 'FOR', "OPINION'S", 'SAKE'] +7729-102255-0012-273: ref=['SEVERAL', 'HUNDRED', 'FREE', 'STATE', 'MEN', 'PROMPTLY', 'RESPONDED', 'TO', 'THE', 'SUMMONS'] +7729-102255-0012-273: hyp=['SEVERAL', 'HUNDRED', 'FREE', 'STATE', 'MEN', 'PROMPTLY', 'RESPONDED', 'TO', 'THE', 'SUMMONS'] +7729-102255-0013-274: ref=['IT', 'WAS', 'IN', 'FACT', 'THE', 'BEST', 'WEAPON', 'OF', 'ITS', 'DAY'] +7729-102255-0013-274: hyp=['IT', 'WAS', 'IN', 'FACT', 'THE', 'BEST', 'WEAPON', 'OF', 'ITS', 'DAY'] +7729-102255-0014-275: ref=['THE', 'LEADERS', 'OF', 'THE', 'CONSPIRACY', 'BECAME', 'DISTRUSTFUL', 'OF', 'THEIR', 'POWER', 'TO', 'CRUSH', 'THE', 'TOWN'] +7729-102255-0014-275: hyp=['THE', 'LEADERS', 'OF', 'THE', 'CONSPIRACY', 'BECAME', 'DISTRUSTFUL', 'OF', 'THEIR', 'POWER', 'TO', 'CRUSH', 'THE', 'TOWN'] +7729-102255-0015-276: ref=['ONE', 'OF', 'HIS', 'MILITIA', 'GENERALS', 'SUGGESTED', 'THAT', 'THE', 'GOVERNOR', 'SHOULD', 'REQUIRE', 'THE', 'OUTLAWS', 'AT', 'LAWRENCE', 'AND', 'ELSEWHERE', 'TO', 'SURRENDER', 'THE', 'SHARPS', 'RIFLES', 'ANOTHER', 'WROTE', 'ASKING', 'HIM', 'TO', 'CALL', 'OUT', 'THE', 'GOVERNMENT', 'TROOPS', 'AT', 'FORT', 'LEAVENWORTH'] +7729-102255-0015-276: hyp=['ONE', 'OF', 'HIS', 'MILITIA', 'GENERALS', 'SUGGESTED', 'THAT', 'THE', 'GOVERNOR', 'SHOULD', 'REQUIRE', 'THE', 'OUTLAWS', 'AT', 'LAWRENCE', 'AND', 'ELSEWHERE', 'TO', 'SURRENDER', 'THE', "SHARP'S", 'RIFLES', 'ANOTHER', 'WROTE', 'ASKING', 'HIM', 'TO', 'CALL', 'OUT', 'THE', 'GOVERNMENT', 'TROOPS', 'AT', 'FORT', 'LEVINWORTH'] +7729-102255-0016-277: ref=['THE', 'GOVERNOR', 'ON', 'HIS', 'PART', 'BECOMING', 'DOUBTFUL', 'OF', 'THE', 'LEGALITY', 'OF', 'EMPLOYING', 'MISSOURI', 'MILITIA', 'TO', 'ENFORCE', 'KANSAS', 'LAWS', 'WAS', 'ALSO', 'EAGER', 'TO', 'SECURE', 'THE', 'HELP', 'OF', 'FEDERAL', 'TROOPS'] +7729-102255-0016-277: hyp=['THE', 'GOVERNOR', 'ON', 'HIS', 'PART', 'BECOMING', 'DOUBTFUL', 'OF', 'THE', 'LEGALITY', 'OF', 'EMPLOYING', 'MISSOURI', 'MILITIA', 'TO', 'ENFORCE', 'KANSAS', 'LAWS', 'WAS', 'ALSO', 'EAGER', 'TO', 'SECURE', 'THE', 'HELP', 'OF', 'FEDERAL', 'TROOPS'] +7729-102255-0017-278: ref=['SHERIFF', 'JONES', 'HAD', 'HIS', 'POCKETS', 'ALWAYS', 'FULL', 'OF', 'WRITS', 'ISSUED', 'IN', 'THE', 'SPIRIT', 'OF', 'PERSECUTION', 'BUT', 'WAS', 'OFTEN', 'BAFFLED', 'BY', 'THE', 'SHARP', 'WITS', 'AND', 'READY', 'RESOURCES', 'OF', 'THE', 'FREE', 'STATE', 'PEOPLE', 'AND', 'SOMETIMES', 'DEFIED', 'OUTRIGHT'] +7729-102255-0017-278: hyp=['SHERIFF', 'JONES', 'HAD', 'HIS', 'POCKETS', 'ALWAYS', 'FULL', 'OF', 'WRITS', 'ISSUED', 'IN', 'THE', 'SPIRIT', 'OF', 'PERSECUTION', 'BUT', 'WAS', 'OFTEN', 'BAFFLED', 'BY', 'THE', 'SHARP', 'WITS', 'AND', 'READY', 'RESOURCES', 'OF', 'THE', 'FREE', 'STATE', 'PEOPLE', 'AND', 'SOMETIMES', 'DEFIED', 'OUTRIGHT'] +7729-102255-0018-279: ref=['LITTLE', 'BY', 'LITTLE', 'HOWEVER', 'THE', 'LATTER', 'BECAME', 'HEMMED', 'AND', 'BOUND', 'IN', 'THE', 'MESHES', 'OF', 'THE', 'VARIOUS', 'DEVICES', 'AND', 'PROCEEDINGS', 'WHICH', 'THE', 'TERRITORIAL', 'OFFICIALS', 'EVOLVED', 'FROM', 'THE', 'BOGUS', 'LAWS'] +7729-102255-0018-279: hyp=['LITTLE', 'BY', 'LITTLE', 'HOWEVER', 'THE', 'LATTER', 'BECAME', 'HEMMED', 'AND', 'BOUND', 'IN', 'THE', 'MESHES', 'OF', 'THE', 'VARIOUS', 'DEVICES', 'AND', 'PROCEEDINGS', 'WHICH', 'THE', 'TERRITORIAL', 'OFFICIALS', 'EVOLVED', 'FROM', 'THE', 'VOGUS', 'LAWS'] +7729-102255-0019-280: ref=['TO', 'EMBARRASS', 'THIS', 'DAMAGING', 'EXPOSURE', 'JUDGE', 'LECOMPTE', 'ISSUED', 'A', 'WRIT', 'AGAINST', 'THE', 'EX', 'GOVERNOR', 'ON', 'A', 'FRIVOLOUS', 'CHARGE', 'OF', 'CONTEMPT'] +7729-102255-0019-280: hyp=['TO', 'EMBARRASS', 'THIS', 'DAMAGING', 'EXPOSURE', 'JUDGE', 'LECOMTE', 'ISSUED', 'A', 'WRIT', 'AGAINST', 'THE', 'EX', 'GOVERNOR', 'ON', 'A', 'FRIVOLOUS', 'CHARGE', 'OF', 'CONTEMPT'] +7729-102255-0020-281: ref=['THE', 'INCIDENT', 'WAS', 'NOT', 'VIOLENT', 'NOR', 'EVEN', 'DRAMATIC', 'NO', 'POSSE', 'WAS', 'SUMMONED', 'NO', 'FURTHER', 'EFFORT', 'MADE', 'AND', 'REEDER', 'FEARING', 'PERSONAL', 'VIOLENCE', 'SOON', 'FLED', 'IN', 'DISGUISE'] +7729-102255-0020-281: hyp=['THE', 'INCIDENT', 'WAS', 'NOT', 'VIOLENT', 'NOR', 'EVEN', 'DRAMATIC', 'NO', 'POSSE', 'WAS', 'SUMMON', 'NO', 'FURTHER', 'EFFORT', 'MADE', 'AND', 'READER', 'FEARING', 'PERSONAL', 'VIOLENCE', 'SOON', 'FLED', 'IN', 'DISGUISE'] +7729-102255-0021-282: ref=['BUT', 'THE', 'AFFAIR', 'WAS', 'MAGNIFIED', 'AS', 'A', 'CROWNING', 'PROOF', 'THAT', 'THE', 'FREE', 'STATE', 'MEN', 'WERE', 'INSURRECTIONISTS', 'AND', 'OUTLAWS'] +7729-102255-0021-282: hyp=['BUT', 'THE', 'AFFAIR', 'WAS', 'MAGNIFIED', 'AS', 'A', 'CROWNING', 'PROOF', 'THAT', 'THE', 'FREE', 'STATE', 'MEN', 'WERE', 'INSURRECTIONISTS', 'AND', 'OUTLAWS'] +7729-102255-0022-283: ref=['FROM', 'THESE', 'AGAIN', 'SPRANG', 'BARRICADED', 'AND', 'FORTIFIED', 'DWELLINGS', 'CAMPS', 'AND', 'SCOUTING', 'PARTIES', 'FINALLY', 'CULMINATING', 'IN', 'ROVING', 'GUERRILLA', 'BANDS', 'HALF', 'PARTISAN', 'HALF', 'PREDATORY'] +7729-102255-0022-283: hyp=['FROM', 'THESE', 'AGAIN', 'SPRANG', 'BARRICADED', 'AND', 'FORTIFIED', 'DWELLINGS', 'CAMPS', 'AND', 'SCOUT', 'PARTIES', 'FINALLY', 'CULMINATING', 'IN', 'ROVING', 'GUERRILLA', 'VANS', 'HALF', 'PARTISAN', 'HALF', 'PREDATORY'] +7729-102255-0023-284: ref=['THEIR', 'DISTINCTIVE', 'CHARACTERS', 'HOWEVER', 'DISPLAY', 'ONE', 'BROAD', 'AND', 'UNFAILING', 'DIFFERENCE'] +7729-102255-0023-284: hyp=['THEIR', 'DISTINCTIVE', 'CHARACTERS', 'HOWEVER', 'DISPLAY', 'ONE', 'BROAD', 'AND', 'UNFAILING', 'DIFFERENCE'] +7729-102255-0024-285: ref=['THE', 'FREE', 'STATE', 'MEN', 'CLUNG', 'TO', 'THEIR', 'PRAIRIE', 'TOWNS', 'AND', 'PRAIRIE', 'RAVINES', 'WITH', 'ALL', 'THE', 'OBSTINACY', 'AND', 'COURAGE', 'OF', 'TRUE', 'DEFENDERS', 'OF', 'THEIR', 'HOMES', 'AND', 'FIRESIDES'] +7729-102255-0024-285: hyp=['THE', 'FREE', 'STATE', 'MEN', 'CLUNG', 'TO', 'THEIR', 'PRAIRIE', 'TOWNS', 'AND', 'PRAIRI', 'RAVINES', 'WITH', 'ALL', 'THE', 'OBSTINACY', 'AND', 'COURAGE', 'OF', 'TRUE', 'DEFENDERS', 'OF', 'THEIR', 'HOMES', 'AND', 'FIRESIDES'] +7729-102255-0025-286: ref=['THEIR', 'ASSUMED', 'CHARACTER', 'CHANGED', 'WITH', 'THEIR', 'CHANGING', 'OPPORTUNITIES', 'OR', 'NECESSITIES'] +7729-102255-0025-286: hyp=['THERE', 'ASSUMED', 'CHARACTER', 'CHANGED', 'WITH', 'THEIR', 'CHANGING', 'OPPORTUNITIES', 'OR', 'NECESSITIES'] +7729-102255-0026-287: ref=['IN', 'THE', 'SHOOTING', 'OF', 'SHERIFF', 'JONES', 'IN', 'LAWRENCE', 'AND', 'IN', 'THE', 'REFUSAL', 'OF', 'EX', 'GOVERNOR', 'BEEDER', 'TO', 'ALLOW', 'THE', 'DEPUTY', 'MARSHAL', 'TO', 'ARREST', 'HIM', 'THEY', 'DISCOVERED', 'GRAVE', 'OFFENSES', 'AGAINST', 'THE', 'TERRITORIAL', 'AND', 'UNITED', 'STATES', 'LAWS'] +7729-102255-0026-287: hyp=['IN', 'THE', 'SHOOTING', 'OF', "SHERIFF'S", 'JONES', 'IN', 'LAWRENCE', 'AND', 'IN', 'THE', 'REFUSAL', 'OF', 'EX', 'GOVERNOR', 'READER', 'TO', 'ALLOW', 'THE', 'DEPUTY', 'MARSHAL', 'TO', 'ARREST', 'HIM', 'THEY', 'DISCOVERED', 'GRAVE', 'OFFENCES', 'AGAINST', 'THE', 'TERRITORIAL', 'AND', 'THE', 'UNITED', 'STATES', 'LAWS'] +7729-102255-0027-288: ref=['FOOTNOTE', 'SUMNER', 'TO', 'SHANNON', 'MAY', 'TWELFTH', 'EIGHTEEN', 'FIFTY', 'SIX'] +7729-102255-0027-288: hyp=['FOOTNOTE', 'SUMNER', 'TO', 'SHANNON', 'MAY', 'TWELFTH', 'EIGHTEEN', 'FIFTY', 'SIX'] +7729-102255-0028-289: ref=['PRIVATE', 'PERSONS', 'WHO', 'HAD', 'LEASED', 'THE', 'FREE', 'STATE', 'HOTEL', 'VAINLY', 'BESOUGHT', 'THE', 'VARIOUS', 'AUTHORITIES', 'TO', 'PREVENT', 'THE', 'DESTRUCTION', 'OF', 'THEIR', 'PROPERTY'] +7729-102255-0028-289: hyp=['PRIVATE', 'PERSONS', 'WHO', 'HAD', 'LEAST', 'THE', 'FREE', 'STATE', 'HOTEL', 'VAINLY', 'BESOUGHT', 'THE', 'VARIOUS', 'AUTHORITIES', 'TO', 'PRESENT', 'THE', 'DESTRUCTION', 'OF', 'THEIR', 'PROPERTY'] +7729-102255-0029-290: ref=['TEN', 'DAYS', 'WERE', 'CONSUMED', 'IN', 'THESE', 'NEGOTIATIONS', 'BUT', 'THE', 'SPIRIT', 'OF', 'VENGEANCE', 'REFUSED', 'TO', 'YIELD'] +7729-102255-0029-290: hyp=['TEN', 'DAYS', 'WERE', 'CONSUMED', 'IN', 'THESE', 'NEGOTIATIONS', 'BUT', 'THE', 'SPIRIT', 'OF', 'VENGEANCE', 'REFUSED', 'TO', 'YIELD'] +7729-102255-0030-291: ref=['HE', 'SUMMONED', 'HALF', 'A', 'DOZEN', 'CITIZENS', 'TO', 'JOIN', 'HIS', 'POSSE', 'WHO', 'FOLLOWED', 'OBEYED', 'AND', 'ASSISTED', 'HIM'] +7729-102255-0030-291: hyp=['HE', 'SUMMONED', 'HALF', 'A', 'DOZEN', 'CITIZENS', 'TO', 'JOIN', 'HIS', 'POSSE', 'WHO', 'FOLLOWED', 'OBEYED', 'AND', 'ASSISTED', 'HIM'] +7729-102255-0031-292: ref=['HE', 'CONTINUED', 'HIS', 'PRETENDED', 'SEARCH', 'AND', 'TO', 'GIVE', 'COLOR', 'TO', 'HIS', 'ERRAND', 'MADE', 'TWO', 'ARRESTS'] +7729-102255-0031-292: hyp=['HE', 'CONTINUED', 'HIS', 'PRETENDED', 'SEARCH', 'AND', 'TO', 'GIVE', 'COLOR', 'TO', 'HIS', 'ERRAND', 'MADE', 'TO', 'ARREST'] +7729-102255-0032-293: ref=['THE', 'FREE', 'STATE', 'HOTEL', 'A', 'STONE', 'BUILDING', 'IN', 'DIMENSIONS', 'FIFTY', 'BY', 'SEVENTY', 'FEET', 'THREE', 'STORIES', 'HIGH', 'AND', 'HANDSOMELY', 'FURNISHED', 'PREVIOUSLY', 'OCCUPIED', 'ONLY', 'FOR', 'LODGING', 'ROOMS', 'ON', 'THAT', 'DAY', 'FOR', 'THE', 'FIRST', 'TIME', 'OPENED', 'ITS', 'TABLE', 'ACCOMMODATIONS', 'TO', 'THE', 'PUBLIC', 'AND', 'PROVIDED', 'A', 'FREE', 'DINNER', 'IN', 'HONOR', 'OF', 'THE', 'OCCASION'] +7729-102255-0032-293: hyp=['THE', 'FREE', 'STATE', 'HOTEL', 'A', 'STONE', 'BUILDING', 'IN', 'DIMENSIONS', 'FIFTY', 'BY', 'SEVENTY', 'FEET', 'THREE', 'STORIES', 'HIGH', 'AND', 'HANDSOMELY', 'FURNISHED', 'PREVIOUSLY', 'OCCUPIED', 'ONLY', 'FOR', 'LODGING', 'ROOMS', 'ON', 'THAT', 'DAY', 'FOR', 'THE', 'FIRST', 'TIME', 'OPENED', 'ITS', 'TABLE', 'ACCOMMODATIONS', 'TO', 'THE', 'PUBLIC', 'AND', 'PROVIDED', 'A', 'FREE', 'DINNER', 'IN', 'HONOR', 'OF', 'THE', 'OCCASION'] +7729-102255-0033-294: ref=['AS', 'HE', 'HAD', 'PROMISED', 'TO', 'PROTECT', 'THE', 'HOTEL', 'THE', 'REASSURED', 'CITIZENS', 'BEGAN', 'TO', 'LAUGH', 'AT', 'THEIR', 'OWN', 'FEARS'] +7729-102255-0033-294: hyp=['AS', 'HE', 'HAD', 'PROMISED', 'TO', 'PROTECT', 'THE', 'HOTEL', 'THE', 'REASSURED', 'CITIZENS', 'BEGAN', 'TO', 'LAUGH', 'AT', 'THEIR', 'OWN', 'FEARS'] +7729-102255-0034-295: ref=['TO', 'THEIR', 'SORROW', 'THEY', 'WERE', 'SOON', 'UNDECEIVED'] +7729-102255-0034-295: hyp=['TO', 'THEIR', 'SORROW', 'THEY', 'WERE', 'SOON', 'UNDECEIVED'] +7729-102255-0035-296: ref=['THE', 'MILITARY', 'FORCE', 'PARTLY', 'RABBLE', 'PARTLY', 'ORGANIZED', 'HAD', 'MEANWHILE', 'MOVED', 'INTO', 'THE', 'TOWN'] +7729-102255-0035-296: hyp=['THE', 'MILITARY', 'FORCE', 'PARTLY', 'RABBLE', 'PARTLY', 'ORGANIZED', 'HAD', 'MEANWHILE', 'MOVED', 'INTO', 'THE', 'TOWN'] +7729-102255-0036-297: ref=['HE', 'PLANTED', 'A', 'COMPANY', 'BEFORE', 'THE', 'HOTEL', 'AND', 'DEMANDED', 'A', 'SURRENDER', 'OF', 'THE', 'ARMS', 'BELONGING', 'TO', 'THE', 'FREE', 'STATE', 'MILITARY', 'COMPANIES'] +7729-102255-0036-297: hyp=['HE', 'PLANTED', 'ACCOMPANIED', 'BEFORE', 'THE', 'HOTEL', 'AND', 'DEMANDED', 'A', 'SURRENDER', 'OF', 'THE', 'ARMS', 'BELONGING', 'TO', 'THE', 'FREE', 'STATE', 'MILITARY', 'COMPANIES'] +7729-102255-0037-298: ref=['HALF', 'AN', 'HOUR', 'LATER', 'TURNING', 'A', 'DEAF', 'EAR', 'TO', 'ALL', 'REMONSTRANCE', 'HE', 'GAVE', 'THE', 'PROPRIETORS', 'UNTIL', 'FIVE', "O'CLOCK", 'TO', 'REMOVE', 'THEIR', 'FAMILIES', 'AND', 'PERSONAL', 'PROPERTY', 'FROM', 'THE', 'FREE', 'STATE', 'HOTEL'] +7729-102255-0037-298: hyp=['HALF', 'AN', 'HOUR', 'LATER', 'TURNING', 'A', 'DEAF', 'EAR', 'TO', 'ALL', 'REMONSTRANCE', 'HE', 'GAVE', 'THE', 'PROPRIETORS', 'UNTIL', 'FIVE', "O'CLOCK", 'TO', 'REMOVE', 'THEIR', 'FAMILIES', 'AND', 'PERSONAL', 'PROPERTY', 'FROM', 'THE', 'FREE', 'STATE', 'HOTEL'] +7729-102255-0038-299: ref=['ATCHISON', 'WHO', 'HAD', 'BEEN', 'HARANGUING', 'THE', 'MOB', 'PLANTED', 'HIS', 'TWO', 'GUNS', 'BEFORE', 'THE', 'BUILDING', 'AND', 'TRAINED', 'THEM', 'UPON', 'IT'] +7729-102255-0038-299: hyp=['ATTITSON', 'WHO', 'HAD', 'BEEN', 'HARANGUING', 'THE', 'MOB', 'PLANTED', 'HIS', 'TWO', 'GUNS', 'BEFORE', 'THE', 'BUILDING', 'AND', 'TRAINED', 'THEM', 'UPON', 'IT'] +7729-102255-0039-300: ref=['THE', 'INMATES', 'BEING', 'REMOVED', 'AT', 'THE', 'APPOINTED', 'HOUR', 'A', 'FEW', 'CANNON', 'BALLS', 'WERE', 'FIRED', 'THROUGH', 'THE', 'STONE', 'WALLS'] +7729-102255-0039-300: hyp=['THE', 'INMATES', 'BEING', 'REMOVED', 'AT', 'THE', 'APPOINTED', 'HOUR', 'A', 'FEW', 'CANNON', 'BALLS', 'WERE', 'FIRED', 'THROUGH', 'THE', 'STONE', 'WALLS'] +7729-102255-0040-301: ref=['IN', 'THIS', 'INCIDENT', 'CONTRASTING', 'THE', 'CREATIVE', 'AND', 'THE', 'DESTRUCTIVE', 'SPIRIT', 'OF', 'THE', 'FACTIONS', 'THE', 'EMIGRANT', 'AID', 'SOCIETY', 'OF', 'MASSACHUSETTS', 'FINDS', 'ITS', 'MOST', 'HONORABLE', 'AND', 'TRIUMPHANT', 'VINDICATION'] +7729-102255-0040-301: hyp=['IN', 'THIS', 'INCIDENT', 'CONTRASTING', 'THE', 'CREATIVE', 'AND', 'THE', 'DESTRUCTIVE', 'SPIRIT', 'OF', 'THE', 'FACTIONS', 'THE', 'IMMIGRANT', 'AIDS', 'SOCIETY', 'OF', 'MASSACHUSETTS', 'FINDS', 'ITS', 'MOST', 'HONORABLE', 'AND', 'TRIUMPHANT', 'VINDICATION'] +7729-102255-0041-302: ref=['THE', 'WHOLE', 'PROCEEDING', 'WAS', 'SO', 'CHILDISH', 'THE', 'MISERABLE', 'PLOT', 'SO', 'TRANSPARENT', 'THE', 'OUTRAGE', 'SO', 'GROSS', 'AS', 'TO', 'BRING', 'DISGUST', 'TO', 'THE', 'BETTER', 'CLASS', 'OF', 'BORDER', 'RUFFIANS', 'WHO', 'WERE', 'WITNESSES', 'AND', 'ACCESSORIES'] +7729-102255-0041-302: hyp=['THE', 'WHOLE', 'PROCEEDING', 'WAS', 'SO', 'CHILDISH', 'THE', 'MISERABLE', 'PLOT', 'SO', 'TRANSPARENT', 'THE', 'OUTRAGED', 'SO', 'GROSS', 'AS', 'TO', 'BRING', 'DISGUST', 'TO', 'THE', 'BETTER', 'CLASS', 'OF', 'BORDER', 'RUFFIANS', 'WHO', 'WERE', 'WITNESSES', 'AND', 'ACCESSORIES'] +7729-102255-0042-303: ref=['RELOCATED', 'FOOTNOTE', 'GOVERNOR', 'ROBINSON', 'BEING', 'ON', 'HIS', 'WAY', 'EAST', 'THE', 'STEAMBOAT', 'ON', 'WHICH', 'HE', 'WAS', 'TRAVELING', 'STOPPED', 'AT', 'LEXINGTON', 'MISSOURI'] +7729-102255-0042-303: hyp=['RE', 'LOCATED', 'FOOTNOTE', 'GOVERNOR', 'ROBINSON', 'BEING', 'ON', 'HIS', 'WAY', 'EAST', 'THE', 'STEAMBOAT', 'ON', 'WHICH', 'HE', 'WAS', 'TRAVELLING', 'STOPPED', 'AT', 'LEXINGTON', 'MISSOURI'] +7729-102255-0043-304: ref=['IN', 'A', 'FEW', 'DAYS', 'AN', 'OFFICER', 'CAME', 'WITH', 'A', 'REQUISITION', 'FROM', 'GOVERNOR', 'SHANNON', 'AND', 'TOOK', 'THE', 'PRISONER', 'BY', 'LAND', 'TO', 'WESTPORT', 'AND', 'AFTERWARDS', 'FROM', 'THERE', 'TO', 'KANSAS', 'CITY', 'AND', 'LEAVENWORTH'] +7729-102255-0043-304: hyp=['IN', 'A', 'FEW', 'DAYS', 'AN', 'OFFICER', 'CAME', 'WITH', 'A', 'REQUISITION', 'FROM', 'GOVERNOR', 'SHANNON', 'AND', 'TOOK', 'THE', 'PRISONER', 'BY', 'LANDA', 'WESTPORT', 'AND', 'AFTERWARDS', 'FROM', 'THERE', 'TO', 'KANSAS', 'CITY', 'IN', 'LEVINWORTH'] +7729-102255-0044-305: ref=['HERE', 'HE', 'WAS', 'PLACED', 'IN', 'THE', 'CUSTODY', 'OF', 'CAPTAIN', 'MARTIN', 'OF', 'THE', 'KICKAPOO', 'RANGERS', 'WHO', 'PROVED', 'A', 'KIND', 'JAILER', 'AND', 'MATERIALLY', 'ASSISTED', 'IN', 'PROTECTING', 'HIM', 'FROM', 'THE', 'DANGEROUS', 'INTENTIONS', 'OF', 'THE', 'MOB', 'WHICH', 'AT', 'THAT', 'TIME', 'HELD', 'LEAVENWORTH', 'UNDER', 'A', 'REIGN', 'OF', 'TERROR'] +7729-102255-0044-305: hyp=['HARRY', 'WAS', 'PLACED', 'IN', 'THE', 'CUSTODY', 'OF', 'CAPTAIN', 'MARTIN', 'OF', 'THE', 'KICKAPOO', 'RANGERS', 'WHO', 'PROVED', 'A', 'KIND', 'JAILER', 'AND', 'MATERIALLY', 'ASSISTED', 'IN', 'PROTECTING', 'HIM', 'FROM', 'THE', 'DANGEROUS', 'INTENTIONS', 'OF', 'THE', 'MOB', 'WHICH', 'AT', 'THAT', 'TIME', 'HELD', 'LEVIN', 'WORTH', 'UNDER', 'THE', 'REIGN', 'OF', 'TERROR'] +7729-102255-0045-306: ref=['CAPTAIN', 'MARTIN', 'SAID', 'I', 'SHALL', 'GIVE', 'YOU', 'A', 'PISTOL', 'TO', 'HELP', 'PROTECT', 'YOURSELF', 'IF', 'WORSE', 'COMES', 'TO', 'WORST'] +7729-102255-0045-306: hyp=['CAPTAIN', 'MARTIN', 'SAID', 'I', 'SHALL', 'GIVE', 'YOU', 'A', 'PISTOL', 'TO', 'HELP', 'PROTECT', 'YOURSELF', 'IF', 'WORSE', 'COMES', 'TO', 'WORST'] +7729-102255-0046-307: ref=['IN', 'THE', 'EARLY', 'MORNING', 'OF', 'THE', 'NEXT', 'DAY', 'MAY', 'TWENTY', 'NINTH', 'A', 'COMPANY', 'OF', 'DRAGOONS', 'WITH', 'ONE', 'EMPTY', 'SADDLE', 'CAME', 'DOWN', 'FROM', 'THE', 'FORT', 'AND', 'WHILE', 'THE', 'PRO', 'SLAVERY', 'MEN', 'STILL', 'SLEPT', 'THE', 'PRISONER', 'AND', 'HIS', 'ESCORT', 'WERE', 'ON', 'THEIR', 'WAY', 'ACROSS', 'THE', 'PRAIRIES', 'TO', 'LECOMPTON', 'IN', 'THE', 'CHARGE', 'OF', 'OFFICERS', 'OF', 'THE', 'UNITED', 'STATES', 'ARMY'] +7729-102255-0046-307: hyp=['IN', 'THE', 'EARLY', 'MORNING', 'OF', 'THE', 'NEXT', 'DAY', 'MAY', 'TWENTY', 'NINTH', 'A', 'COMPANY', 'OF', 'DRAGOONS', 'WITH', 'ONE', 'EMPTY', 'SADDLE', 'CAME', 'DOWN', 'FROM', 'THE', 'FORT', 'AND', 'WHILE', 'THE', 'PRO', 'SLAVERY', 'MEN', 'STILL', 'SLEPT', 'THE', 'PRISONER', 'AND', 'HIS', 'ESCORT', 'WERE', 'ON', 'THEIR', 'WAY', 'ACROSS', 'THE', 'PRAIRIES', 'TO', 'LECOMPTON', 'IN', 'THE', 'CHARGE', 'OF', 'OFFICERS', 'OF', 'THE', 'UNITED', 'STATES', 'ARMY'] +8224-274381-0000-1451: ref=['THOUGH', 'THROWN', 'INTO', 'PRISON', 'FOR', 'THIS', 'ENTERPRISE', 'AND', 'DETAINED', 'SOME', 'TIME', 'HE', 'WAS', 'NOT', 'DISCOURAGED', 'BUT', 'STILL', 'CONTINUED', 'BY', 'HIS', 'COUNTENANCE', 'AND', 'PROTECTION', 'TO', 'INFUSE', 'SPIRIT', 'INTO', 'THE', 'DISTRESSED', 'ROYALISTS'] +8224-274381-0000-1451: hyp=['THOUGH', 'THROWN', 'INTO', 'PRISON', 'FOR', 'THIS', 'ENTERPRISE', 'AND', 'DETAINED', 'SOME', 'TIME', 'HE', 'WAS', 'NOT', 'DISCOURAGED', 'BUT', 'STILL', 'CONTINUED', 'BY', 'HIS', 'COUNTENANCE', 'AND', 'PROTECTION', 'TO', 'INFUSE', 'SPIRIT', 'INTO', 'THE', 'DISTRESSED', 'ROYALISTS'] +8224-274381-0001-1452: ref=['AMONG', 'OTHER', 'PERSONS', 'OF', 'DISTINCTION', 'WHO', 'UNITED', 'THEMSELVES', 'TO', 'HIM', 'WAS', 'LORD', 'NAPIER', 'OF', 'MERCHISTON', 'SON', 'OF', 'THE', 'FAMOUS', 'INVENTOR', 'OF', 'THE', 'LOGARITHMS', 'THE', 'PERSON', 'TO', 'WHOM', 'THE', 'TITLE', 'OF', 'A', 'GREAT', 'MAN', 'IS', 'MORE', 'JUSTLY', 'DUE', 'THAN', 'TO', 'ANY', 'OTHER', 'WHOM', 'HIS', 'COUNTRY', 'EVER', 'PRODUCED'] +8224-274381-0001-1452: hyp=['AMONG', 'OTHER', 'PERSONS', 'OF', 'DISTINCTION', 'WHO', 'UNITED', 'THEMSELVES', 'TO', 'HIM', 'WAS', 'LORD', 'NAPIER', 'OF', 'MURCHISTON', 'SON', 'OF', 'THE', 'FAMOUS', 'INVENTOR', 'OF', 'THE', 'LOGARITHMS', 'THE', 'PERSON', 'TO', 'WHOM', 'THE', 'TITLE', 'OF', 'A', 'GREAT', 'MAN', 'IS', 'MORE', 'JUSTLY', 'DUE', 'THAN', 'TO', 'ANY', 'OTHER', 'WHOM', 'HIS', 'COUNTRY', 'EVER', 'PRODUCED'] +8224-274381-0002-1453: ref=['WHILE', 'THE', 'FORMER', 'FORETOLD', 'THAT', 'THE', 'SCOTTISH', 'COVENANTERS', 'WERE', 'SECRETLY', 'FORMING', 'A', 'UNION', 'WITH', 'THE', 'ENGLISH', 'PARLIAMENT', 'AND', 'INCULCATED', 'THE', 'NECESSITY', 'OF', 'PREVENTING', 'THEM', 'BY', 'SOME', 'VIGOROUS', 'UNDERTAKING', 'THE', 'LATTER', 'STILL', 'INSISTED', 'THAT', 'EVERY', 'SUCH', 'ATTEMPT', 'WOULD', 'PRECIPITATE', 'THEM', 'INTO', 'MEASURES', 'TO', 'WHICH', 'OTHERWISE', 'THEY', 'WERE', 'NOT', 'PERHAPS', 'INCLINED'] +8224-274381-0002-1453: hyp=['WHILE', 'THE', 'FORMER', 'FORETOLD', 'THAT', 'THE', 'SCOTTISH', 'COVENANTERS', 'WERE', 'SECRETLY', 'FORMING', 'A', 'UNION', 'WITH', 'THE', 'ENGLISH', 'PARLIAMENT', 'AND', 'INCALCATED', 'THE', 'NECESSITY', 'OF', 'PREVENTING', 'THEM', 'BY', 'SOME', 'VIGOROUS', 'UNDERTAKING', 'THE', 'LATTER', 'STILL', 'INSISTED', 'THAT', 'EVERY', 'SUCH', 'ATTEMPT', 'WOULD', 'PRECIPITATE', 'THEM', 'INTO', 'MEASURES', 'TO', 'WHICH', 'OTHERWISE', 'THEY', 'WERE', 'NOT', 'PERHAPS', 'INCLINED'] +8224-274381-0003-1454: ref=['THE', "KING'S", 'EARS', 'WERE', 'NOW', 'OPEN', 'TO', "MONTROSE'S", 'COUNSELS', 'WHO', 'PROPOSED', 'NONE', 'BUT', 'THE', 'BOLDEST', 'AND', 'MOST', 'DARING', 'AGREEABLY', 'TO', 'THE', 'DESPERATE', 'STATE', 'OF', 'THE', 'ROYAL', 'CAUSE', 'IN', 'SCOTLAND'] +8224-274381-0003-1454: hyp=['THE', "KING'S", 'EARS', 'WERE', 'NOW', 'OPEN', 'TO', "MONTROSE'S", 'COUNCILS', 'WHO', 'PROPOSED', 'NONE', 'BUT', 'THE', 'BOLDEST', 'AND', 'MOST', 'DARING', 'AGREEABLY', 'TO', 'THE', 'DESPERATE', 'STATE', 'OF', 'THE', 'ROYAL', 'CAUSE', 'IN', 'SCOTLAND'] +8224-274381-0004-1455: ref=['FIVE', 'HUNDRED', 'MEN', 'MORE', 'WHO', 'HAD', 'BEEN', 'LEVIED', 'BY', 'THE', 'COVENANTERS', 'WERE', 'PERSUADED', 'TO', 'EMBRACE', 'THE', 'ROYAL', 'CAUSE', 'AND', 'WITH', 'THIS', 'COMBINED', 'FORCE', 'HE', 'HASTENED', 'TO', 'ATTACK', 'LORD', 'ELCHO', 'WHO', 'LAY', 'AT', 'PERTH', 'WITH', 'AN', 'ARMY', 'OF', 'SIX', 'THOUSAND', 'MEN', 'ASSEMBLED', 'UPON', 'THE', 'FIRST', 'NEWS', 'OF', 'THE', 'IRISH', 'INVASION'] +8224-274381-0004-1455: hyp=['FIVE', 'HUNDRED', 'MEN', 'MORE', 'WHO', 'HAD', 'BEEN', 'LEVIED', 'BY', 'THE', 'COVENANTERS', 'WERE', 'PERSUADED', 'TO', 'EMBRACE', 'THE', 'ROYAL', 'CAUSE', 'AND', 'WITH', 'THIS', 'COMBINED', 'FORCE', 'HE', 'HASTENED', 'TO', 'ATTACK', 'LORD', 'ELKOE', 'WHO', 'LAY', 'AT', 'PERTH', 'WITH', 'AN', 'ARMY', 'OF', 'SIX', 'THOUSAND', 'MEN', 'ASSEMBLED', 'UPON', 'THE', 'FIRST', 'NEWS', 'OF', 'THE', 'IRISH', 'INVASION'] +8224-274381-0005-1456: ref=['DREADING', 'THE', 'SUPERIOR', 'POWER', 'OF', 'ARGYLE', 'WHO', 'HAVING', 'JOINED', 'HIS', 'VASSALS', 'TO', 'A', 'FORCE', 'LEVIED', 'BY', 'THE', 'PUBLIC', 'WAS', 'APPROACHING', 'WITH', 'A', 'CONSIDERABLE', 'ARMY', 'MONTROSE', 'HASTENED', 'NORTHWARDS', 'IN', 'ORDER', 'TO', 'ROUSE', 'AGAIN', 'THE', 'MARQUIS', 'OF', 'HUNTLEY', 'AND', 'THE', 'GORDONS', 'WHO', 'HAVING', 'BEFORE', 'HASTILY', 'TAKEN', 'ARMS', 'HAD', 'BEEN', 'INSTANTLY', 'SUPPRESSED', 'BY', 'THE', 'COVENANTERS'] +8224-274381-0005-1456: hyp=['DREADING', 'THE', 'SUPERIOR', 'POWER', 'OF', 'ARGYLE', 'WHO', 'HAVING', 'JOINED', 'HIS', 'VASSALS', 'TO', 'A', 'FORCE', 'LEVIED', 'BY', 'THE', 'PUBLIC', 'WAS', 'APPROACHING', 'WITH', 'A', 'CONSIDERABLE', 'ARMY', 'MONTROSE', 'HASTENED', 'NORTHWARD', 'IN', 'ORDER', 'TO', 'ROUSE', 'AGAIN', 'THE', 'MARQUIS', 'OF', 'HUNTLY', 'AND', 'THE', 'GORDONS', 'WHO', 'HAVING', 'BEFORE', 'HASTILY', 'TAKEN', 'ARMS', 'HAD', 'BEEN', 'INSTANTLY', 'SUPPRESSED', 'BY', 'THE', 'COVENANTERS'] +8224-274381-0006-1457: ref=['THIS', "NOBLEMAN'S", 'CHARACTER', 'THOUGH', 'CELEBRATED', 'FOR', 'POLITICAL', 'COURAGE', 'AND', 'CONDUCT', 'WAS', 'VERY', 'LOW', 'FOR', 'MILITARY', 'PROWESS', 'AND', 'AFTER', 'SOME', 'SKIRMISHES', 'IN', 'WHICH', 'HE', 'WAS', 'WORSTED', 'HE', 'HERE', 'ALLOWED', 'MONTROSE', 'TO', 'ESCAPE', 'HIM'] +8224-274381-0006-1457: hyp=['THIS', "NOBLEMAN'S", 'CHARACTER', 'THOUGH', 'CELEBRATED', 'FOR', 'POLITICAL', 'COURAGE', 'AND', 'CONDUCT', 'WAS', 'VERY', 'LOW', 'FOR', 'MILITARY', 'PROWESS', 'AND', 'AFTER', 'SOME', 'SKIRMISHES', 'IN', 'WHICH', 'HE', 'WAS', 'WORSTED', 'HE', 'HERE', 'ALLOWED', 'MONTROSE', 'TO', 'ESCAPE', 'HIM'] +8224-274381-0007-1458: ref=['BY', 'QUICK', 'MARCHES', 'THROUGH', 'THESE', 'INACCESSIBLE', 'MOUNTAINS', 'THAT', 'GENERAL', 'FREED', 'HIMSELF', 'FROM', 'THE', 'SUPERIOR', 'FORCES', 'OF', 'THE', 'COVENANTERS'] +8224-274381-0007-1458: hyp=['BY', 'QUICK', 'MARCHES', 'THROUGH', 'THESE', 'INACCESSIBLE', 'MOUNTAINS', 'THAT', 'GENERAL', 'FREED', 'HIMSELF', 'FROM', 'THE', 'SUPERIOR', 'FORCES', 'OF', 'THE', 'COVENANTERS'] +8224-274381-0008-1459: ref=['WITH', 'THESE', 'AND', 'SOME', 'REENFORCEMENTS', 'OF', 'THE', 'ATHOLEMEN', 'AND', 'MACDONALDS', 'WHOM', 'HE', 'HAD', 'RECALLED', 'MONTROSE', 'FELL', 'SUDDENLY', 'UPON', "ARGYLE'S", 'COUNTRY', 'AND', 'LET', 'LOOSE', 'UPON', 'IT', 'ALL', 'THE', 'RAGE', 'OF', 'WAR', 'CARRYING', 'OFF', 'THE', 'CATTLE', 'BURNING', 'THE', 'HOUSES', 'AND', 'PUTTING', 'THE', 'INHABITANTS', 'TO', 'THE', 'SWORD'] +8224-274381-0008-1459: hyp=['WITH', 'THESE', 'AND', 'SOME', 'REINFORCEMENTS', 'OF', 'THE', 'ETHEL', 'MEN', 'AND', 'MAC', 'DONALDS', 'WHOM', 'HE', 'HAD', 'RECALLED', 'MONTROSE', 'FELL', 'SUDDENLY', 'UPON', "ARGYLE'S", 'COUNTRY', 'AND', 'LET', 'LOOSE', 'UPON', 'IT', 'ALL', 'THE', 'RAGE', 'OF', 'WAR', 'CARRYING', 'OFF', 'THE', 'CATTLE', 'BURNING', 'THE', 'HOUSES', 'AND', 'PUTTING', 'THE', 'INHABITANTS', 'TO', 'THE', 'SWORD'] +8224-274381-0009-1460: ref=['THIS', 'SEVERITY', 'BY', 'WHICH', 'MONTROSE', 'SULLIED', 'HIS', 'VICTORIES', 'WAS', 'THE', 'RESULT', 'OF', 'PRIVATE', 'ANIMOSITY', 'AGAINST', 'THE', 'CHIEFTAIN', 'AS', 'MUCH', 'AS', 'OF', 'ZEAL', 'FOR', 'THE', 'PUBLIC', 'CAUSE', 'ARGYLE', 'COLLECTING', 'THREE', 'THOUSAND', 'MEN', 'MARCHED', 'IN', 'QUEST', 'OF', 'THE', 'ENEMY', 'WHO', 'HAD', 'RETIRED', 'WITH', 'THEIR', 'PLUNDER', 'AND', 'HE', 'LAY', 'AT', 'INNERLOCHY', 'SUPPOSING', 'HIMSELF', 'STILL', 'AT', 'A', 'CONSIDERABLE', 'DISTANCE', 'FROM', 'THEM'] +8224-274381-0009-1460: hyp=['THIS', 'SEVERITY', 'BY', 'WHICH', 'MONTROSE', 'SULLIED', 'HIS', 'VICTORIES', 'WAS', 'THE', 'RESULT', 'OF', 'PRIVATE', 'ANIMOSITY', 'AGAINST', 'THE', 'CHIEFTAIN', 'AS', 'MUCH', 'AS', 'OF', 'ZEAL', 'FOR', 'THE', 'PUBLIC', 'CAUSE', 'ARGYLE', 'COLLECTING', 'THREE', 'THOUSAND', 'MEN', 'MARCHED', 'IN', 'QUEST', 'OF', 'THE', 'ENEMY', 'WHO', 'HAD', 'RETIRED', 'WITH', 'THEIR', 'PLUNDER', 'AND', 'HE', 'LAY', 'AT', 'INERLOCKY', 'SUPPOSING', 'HIMSELF', 'STILL', 'AT', 'A', 'CONSIDERABLE', 'DISTANCE', 'FROM', 'THEM'] +8224-274381-0010-1461: ref=['BY', 'A', 'QUICK', 'AND', 'UNEXPECTED', 'MARCH', 'MONTROSE', 'HASTENED', 'TO', 'INNERLOCHY', 'AND', 'PRESENTED', 'HIMSELF', 'IN', 'ORDER', 'OF', 'BATTLE', 'BEFORE', 'THE', 'SURPRISED', 'BUT', 'NOT', 'AFFRIGHTENED', 'COVENANTERS'] +8224-274381-0010-1461: hyp=['BY', 'A', 'QUICK', 'AND', 'UNEXPECTED', 'MARCH', 'MONTROSE', 'HASTENED', 'TO', 'IN', 'A', 'LOCKY', 'AND', 'PRESENTED', 'HIMSELF', 'IN', 'ORDER', 'OF', 'BATTLE', 'BEFORE', 'THE', 'SURPRISED', 'BUT', 'NOT', 'A', 'FRIGHTENED', 'COVENANTERS'] +8224-274381-0011-1462: ref=['HIS', 'CONDUCT', 'AND', 'PRESENCE', 'OF', 'MIND', 'IN', 'THIS', 'EMERGENCE', 'APPEARED', 'CONSPICUOUS'] +8224-274381-0011-1462: hyp=['HIS', 'CONDUCT', 'AND', 'PRESENCE', 'OF', 'MIND', 'IN', 'THIS', 'EMERGENCE', 'APPEARED', 'CONSPICUOUS'] +8224-274381-0012-1463: ref=['MONTROSE', 'WEAK', 'IN', 'CAVALRY', 'HERE', 'LINED', 'HIS', 'TROOPS', 'OF', 'HORSE', 'WITH', 'INFANTRY', 'AND', 'AFTER', 'PUTTING', 'THE', "ENEMY'S", 'HORSE', 'TO', 'ROUT', 'FELL', 'WITH', 'UNITED', 'FORCE', 'UPON', 'THEIR', 'FOOT', 'WHO', 'WERE', 'ENTIRELY', 'CUT', 'IN', 'PIECES', 'THOUGH', 'WITH', 'THE', 'LOSS', 'OF', 'THE', 'GALLANT', 'LORD', 'GORDON', 'ON', 'THE', 'PART', 'OF', 'THE', 'ROYALISTS'] +8224-274381-0012-1463: hyp=['MONTROSE', 'WEAK', 'IN', 'CAVALRY', 'HERE', 'LINED', 'HIS', 'TROOPS', 'OF', 'HORSE', 'WITH', 'INFANTRY', 'AND', 'AFTER', 'PUTTING', 'THE', "ENEMY'S", 'HORSE', 'TO', 'ROUT', 'FELL', 'WITH', 'UNITED', 'FORCE', 'UPON', 'THEIR', 'FOOT', 'WHO', 'WERE', 'ENTIRELY', 'CUT', 'IN', 'PIECES', 'THOUGH', 'WITH', 'THE', 'LOSS', 'OF', 'THE', 'GALLANT', 'LORD', 'GORDON', 'ON', 'THE', 'PART', 'OF', 'THE', 'ROYALISTS'] +8224-274381-0013-1464: ref=['FROM', 'THE', 'SAME', 'MEN', 'NEW', 'REGIMENTS', 'AND', 'NEW', 'COMPANIES', 'WERE', 'FORMED', 'DIFFERENT', 'OFFICERS', 'APPOINTED', 'AND', 'THE', 'WHOLE', 'MILITARY', 'FORCE', 'PUT', 'INTO', 'SUCH', 'HANDS', 'AS', 'THE', 'INDEPENDENTS', 'COULD', 'RELY', 'ON'] +8224-274381-0013-1464: hyp=['FROM', 'THE', 'SAME', 'MEN', 'NEW', 'REGIMENTS', 'AND', 'NEW', 'COMPANIES', 'WERE', 'FORMED', 'DIFFERENT', 'OFFICERS', 'APPOINTED', 'AND', 'THE', 'WHOLE', 'MILITARY', 'FORCE', 'PUT', 'INTO', 'SUCH', 'HANDS', 'AS', 'THE', 'INDEPENDENTS', 'COULD', 'RELY', 'ON'] +8224-274381-0014-1465: ref=['BESIDES', 'MEMBERS', 'OF', 'PARLIAMENT', 'WHO', 'WERE', 'EXCLUDED', 'MANY', 'OFFICERS', 'UNWILLING', 'TO', 'SERVE', 'UNDER', 'THE', 'NEW', 'GENERALS', 'THREW', 'UP', 'THEIR', 'COMMISSIONS', 'AND', 'UNWARILY', 'FACILITATED', 'THE', 'PROJECT', 'OF', 'PUTTING', 'THE', 'ARMY', 'ENTIRELY', 'INTO', 'THE', 'HANDS', 'OF', 'THAT', 'FACTION'] +8224-274381-0014-1465: hyp=['BESIDES', 'MEMBERS', 'OF', 'PARLIAMENT', 'WHO', 'WERE', 'EXCLUDED', 'MANY', 'OFFICERS', 'UNWILLING', 'TO', 'SERVE', 'UNDER', 'THE', 'NEW', 'GENERALS', 'THREW', 'UP', 'THEIR', 'COMMISSIONS', 'AND', 'THEN', 'WARILY', 'FACILITATED', 'THE', 'PROJECT', 'OF', 'PUTTING', 'THE', 'ARMY', 'ENTIRELY', 'INTO', 'THE', 'HANDS', 'OF', 'THAT', 'FACTION'] +8224-274381-0015-1466: ref=['THOUGH', 'THE', 'DISCIPLINE', 'OF', 'THE', 'FORMER', 'PARLIAMENTARY', 'ARMY', 'WAS', 'NOT', 'CONTEMPTIBLE', 'A', 'MORE', 'EXACT', 'PLAN', 'WAS', 'INTRODUCED', 'AND', 'RIGOROUSLY', 'EXECUTED', 'BY', 'THESE', 'NEW', 'COMMANDERS'] +8224-274381-0015-1466: hyp=['THOUGH', 'THE', 'DISCIPLINE', 'OF', 'THE', 'FORMER', 'PARLIAMENTARY', 'ARMY', 'WAS', 'NOT', 'CONTEMPTIBLE', 'A', 'MORE', 'EXACT', 'PLAN', 'WAS', 'INTRODUCED', 'AND', 'RIGOROUSLY', 'EXECUTED', 'BY', 'THESE', 'NEW', 'COMMANDERS'] +8224-274381-0016-1467: ref=['VALOR', 'INDEED', 'WAS', 'VERY', 'GENERALLY', 'DIFFUSED', 'OVER', 'THE', 'ONE', 'PARTY', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'DURING', 'THIS', 'PERIOD', 'DISCIPLINE', 'ALSO', 'WAS', 'ATTAINED', 'BY', 'THE', 'FORCES', 'OF', 'THE', 'PARLIAMENT', 'BUT', 'THE', 'PERFECTION', 'OF', 'THE', 'MILITARY', 'ART', 'IN', 'CONCERTING', 'THE', 'GENERAL', 'PLANS', 'OF', 'ACTION', 'AND', 'THE', 'OPERATIONS', 'OF', 'THE', 'FIELD', 'SEEMS', 'STILL', 'ON', 'BOTH', 'SIDES', 'TO', 'HAVE', 'BEEN', 'IN', 'A', 'GREAT', 'MEASURE', 'WANTING'] +8224-274381-0016-1467: hyp=['VALOR', 'INDEED', 'WAS', 'VERY', 'GENERALLY', 'DIFFUSED', 'OVER', 'THE', 'ONE', 'PARTY', 'AS', 'WELL', 'AS', 'THE', 'OTHER', 'DURING', 'THIS', 'PERIOD', 'DISCIPLINE', 'ALSO', 'WAS', 'ATTAINED', 'BY', 'THE', 'FORCES', 'OF', 'THE', 'PARLIAMENT', 'BUT', 'THE', 'PERFECTION', 'OF', 'THE', 'MILITARY', 'ART', 'IN', 'CONCERTING', 'THE', 'GENERAL', 'PLANS', 'OF', 'ACTION', 'AND', 'THE', 'OPERATIONS', 'OF', 'THE', 'FIELD', 'SEEMS', 'STILL', 'ON', 'BOTH', 'SIDES', 'TO', 'HAVE', 'BEEN', 'IN', 'A', 'GREAT', 'MEASURE', 'WANTING'] +8224-274381-0017-1468: ref=['HISTORIANS', 'AT', 'LEAST', 'PERHAPS', 'FROM', 'THEIR', 'OWN', 'IGNORANCE', 'AND', 'INEXPERIENCE', 'HAVE', 'NOT', 'REMARKED', 'ANY', 'THING', 'BUT', 'A', 'HEADLONG', 'IMPETUOUS', 'CONDUCT', 'EACH', 'PARTY', 'HURRYING', 'TO', 'A', 'BATTLE', 'WHERE', 'VALOR', 'AND', 'FORTUNE', 'CHIEFLY', 'DETERMINED', 'THE', 'SUCCESS'] +8224-274381-0017-1468: hyp=['HISTORIANS', 'AT', 'LEAST', 'PERHAPS', 'FROM', 'THEIR', 'OWN', 'IGNORANCE', 'AND', 'INEXPERIENCE', 'HAVE', 'NOT', 'REMARKED', 'ANY', 'THING', 'BUT', 'A', 'HEADLONG', 'IMPETUOUS', 'CONDUCT', 'EACH', 'PARTY', 'HURRYING', 'TO', 'A', 'BATTLE', 'WHERE', 'VALOR', 'AND', 'FORTUNE', 'CHIEFLY', 'DETERMINE', 'THE', 'SUCCESS'] +8224-274384-0000-1437: ref=['HE', 'PASSED', 'THROUGH', 'HENLEY', 'SAINT', 'ALBANS', 'AND', 'CAME', 'SO', 'NEAR', 'TO', 'LONDON', 'AS', 'HARROW', 'ON', 'THE', 'HILL'] +8224-274384-0000-1437: hyp=['HE', 'PASSED', 'THROUGH', 'HENLEY', 'SAINT', "ALBAN'S", 'AND', 'CAME', 'SO', 'NEAR', 'TO', 'LONDON', 'AS', 'HARROW', 'ON', 'THE', 'HILL'] +8224-274384-0001-1438: ref=['THE', 'SCOTTISH', 'GENERALS', 'AND', 'COMMISSIONERS', 'AFFECTED', 'GREAT', 'SURPRISE', 'ON', 'THE', 'APPEARANCE', 'OF', 'THE', 'KING', 'AND', 'THOUGH', 'THEY', 'PAID', 'HIM', 'ALL', 'THE', 'EXTERIOR', 'RESPECT', 'DUE', 'TO', 'HIS', 'DIGNITY', 'THEY', 'INSTANTLY', 'SET', 'A', 'GUARD', 'UPON', 'HIM', 'UNDER', 'COLOR', 'OF', 'PROTECTION', 'AND', 'MADE', 'HIM', 'IN', 'REALITY', 'A', 'PRISONER'] +8224-274384-0001-1438: hyp=['THE', 'SCOTTISH', 'GENERALS', 'AND', 'COMMISSIONERS', 'AFFECTED', 'GREAT', 'SURPRISE', 'ON', 'THE', 'APPEARANCE', 'OF', 'THE', 'KING', 'AND', 'THOUGH', 'THEY', 'PAID', 'HIM', 'ALL', 'THE', 'EXTERIOR', 'RESPECT', 'DUE', 'TO', 'HIS', 'DIGNITY', 'THEY', 'INSTANTLY', 'SET', 'A', 'GUARD', 'UPON', 'HIM', 'UNDER', 'COLOR', 'OF', 'PROTECTION', 'AND', 'MADE', 'HIM', 'IN', 'REALITY', 'A', 'PRISONER'] +8224-274384-0002-1439: ref=['THEY', 'INFORMED', 'THE', 'ENGLISH', 'PARLIAMENT', 'OF', 'THIS', 'UNEXPECTED', 'INCIDENT', 'AND', 'ASSURED', 'THEM', 'THAT', 'THEY', 'HAD', 'ENTERED', 'INTO', 'NO', 'PRIVATE', 'TREATY', 'WITH', 'THE', 'KING'] +8224-274384-0002-1439: hyp=['THEY', 'INFORMED', 'THE', 'ENGLISH', 'PARLIAMENT', 'OF', 'THIS', 'UNEXPECTED', 'INCIDENT', 'AND', 'ASSURED', 'THEM', 'THAT', 'THEY', 'HAD', 'ENTERED', 'INTO', 'NO', 'PRIVATE', 'TREATY', 'WITH', 'THE', 'KING'] +8224-274384-0003-1440: ref=['OR', 'HATH', 'HE', 'GIVEN', 'US', 'ANY', 'GIFT'] +8224-274384-0003-1440: hyp=['OR', 'HATH', 'HE', 'GIVEN', 'US', 'ANY', 'GIFT'] +8224-274384-0004-1441: ref=['AND', 'THE', 'MEN', 'OF', 'ISRAEL', 'ANSWERED', 'THE', 'MEN', 'OF', 'JUDAH', 'AND', 'SAID', 'WE', 'HAVE', 'TEN', 'PARTS', 'IN', 'THE', 'KING', 'AND', 'WE', 'HAVE', 'ALSO', 'MORE', 'RIGHT', 'IN', 'DAVID', 'THAN', 'YE', 'WHY', 'THEN', 'DID', 'YE', 'DESPISE', 'US', 'THAT', 'OUR', 'ADVICE', 'SHOULD', 'NOT', 'BE', 'FIRST', 'HAD', 'IN', 'BRINGING', 'BACK', 'OUR', 'KING'] +8224-274384-0004-1441: hyp=['AND', 'THE', 'MEN', 'OF', 'ISRAEL', 'ANSWERED', 'THE', 'MEN', 'OF', 'JUDAH', 'AND', 'SAID', 'WE', 'HAVE', 'TEN', 'PARTS', 'IN', 'THE', 'KING', 'AND', 'WE', 'HAVE', 'ALSO', 'MORE', 'RIGHT', 'IN', 'DAVID', 'THAN', 'YE', 'WHY', 'THEN', 'DID', 'YE', 'DESPISE', 'US', 'THAT', 'OUR', 'ADVICE', 'SHOULD', 'NOT', 'BE', 'FIRST', 'HAD', 'IN', 'BRINGING', 'BACK', 'OUR', 'KING'] +8224-274384-0005-1442: ref=['ANOTHER', 'PREACHER', 'AFTER', 'REPROACHING', 'HIM', 'TO', 'HIS', 'FACE', 'WITH', 'HIS', 'MISGOVERNMENT', 'ORDERED', 'THIS', 'PSALM', 'TO', 'BE', 'SUNG'] +8224-274384-0005-1442: hyp=['ANOTHER', 'PREACHER', 'AFTER', 'REPROACHING', 'HIM', 'TO', 'HIS', 'FACE', 'WITH', 'HIS', 'MISGOVERNMENT', 'ORDERED', 'THIS', 'SUM', 'TO', 'BE', 'SUNG'] +8224-274384-0006-1443: ref=['THE', 'KING', 'STOOD', 'UP', 'AND', 'CALLED', 'FOR', 'THAT', 'PSALM', 'WHICH', 'BEGINS', 'WITH', 'THESE', 'WORDS'] +8224-274384-0006-1443: hyp=['THE', 'KING', 'STOOD', 'UP', 'AND', 'CALLED', 'FOR', 'THAT', 'PSALM', 'WHICH', 'BEGINS', 'WITH', 'THESE', 'WORDS'] +8224-274384-0007-1444: ref=['HAVE', 'MERCY', 'LORD', 'ON', 'ME', 'I', 'PRAY', 'FOR', 'MEN', 'WOULD', 'ME', 'DEVOUR'] +8224-274384-0007-1444: hyp=['HAVE', 'MERCY', 'LORD', 'ON', 'ME', 'I', 'PRAY', 'FOR', 'MEN', 'WOULD', 'ME', 'DEVOUR'] +8224-274384-0008-1445: ref=['THE', 'GOOD', 'NATURED', 'AUDIENCE', 'IN', 'PITY', 'TO', 'FALLEN', 'MAJESTY', 'SHOWED', 'FOR', 'ONCE', 'GREATER', 'DEFERENCE', 'TO', 'THE', 'KING', 'THAN', 'TO', 'THE', 'MINISTER', 'AND', 'SUNG', 'THE', 'PSALM', 'WHICH', 'THE', 'FORMER', 'HAD', 'CALLED', 'FOR'] +8224-274384-0008-1445: hyp=['THE', 'GOOD', 'NATURED', 'AUDIENCE', 'IN', 'PITY', 'TO', 'FALL', 'AND', 'MAJESTY', 'SHOWED', 'FOR', 'ONCE', 'GREATER', 'DEFERENCE', 'TO', 'THE', 'KING', 'THAN', 'TO', 'THE', 'MINISTER', 'AND', 'SUNG', 'THE', 'PSALM', 'WHICH', 'THE', 'FORMER', 'HAD', 'CALLED', 'FOR'] +8224-274384-0009-1446: ref=['THE', 'PARLIAMENT', 'AND', 'THE', 'SCOTS', 'LAID', 'THEIR', 'PROPOSALS', 'BEFORE', 'THE', 'KING'] +8224-274384-0009-1446: hyp=['THE', 'PARLIAMENT', 'AND', 'THE', 'SCOTS', 'LAID', 'THEIR', 'PROPOSALS', 'BEFORE', 'THE', 'KING'] +8224-274384-0010-1447: ref=['BEFORE', 'THE', 'SETTLEMENT', 'OF', 'TERMS', 'THE', 'ADMINISTRATION', 'MUST', 'BE', 'POSSESSED', 'ENTIRELY', 'BY', 'THE', 'PARLIAMENTS', 'OF', 'BOTH', 'KINGDOMS', 'AND', 'HOW', 'INCOMPATIBLE', 'THAT', 'SCHEME', 'WITH', 'THE', 'LIBERTY', 'OF', 'THE', 'KING', 'IS', 'EASILY', 'IMAGINED'] +8224-274384-0010-1447: hyp=['BEFORE', 'THE', 'SETTLEMENT', 'OF', 'TERMS', 'THE', 'ADMINISTRATION', 'MUST', 'BE', 'POSSESSED', 'ENTIRELY', 'BY', 'THE', 'PARLIAMENTS', 'OF', 'BOTH', 'KINGDOMS', 'AND', 'HOW', 'INCOMPATIBLE', 'THAT', 'SCHEME', 'WITH', 'THE', 'LIBERTY', 'OF', 'THE', 'KING', 'IS', 'EASILY', 'IMAGINED'] +8224-274384-0011-1448: ref=['THE', 'ENGLISH', 'IT', 'IS', 'EVIDENT', 'HAD', 'THEY', 'NOT', 'BEEN', 'PREVIOUSLY', 'ASSURED', 'OF', 'RECEIVING', 'THE', 'KING', 'WOULD', 'NEVER', 'HAVE', 'PARTED', 'WITH', 'SO', 'CONSIDERABLE', 'A', 'SUM', 'AND', 'WHILE', 'THEY', 'WEAKENED', 'THEMSELVES', 'BY', 'THE', 'SAME', 'MEASURE', 'HAVE', 'STRENGTHENED', 'A', 'PEOPLE', 'WITH', 'WHOM', 'THEY', 'MUST', 'AFTERWARDS', 'HAVE', 'SO', 'MATERIAL', 'AN', 'INTEREST', 'TO', 'DISCUSS'] +8224-274384-0011-1448: hyp=['THE', 'ENGLISH', 'IT', 'IS', 'EVIDENT', 'HAD', 'THEY', 'NOT', 'BEEN', 'PREVIOUSLY', 'ASSURED', 'OF', 'RECEIVING', 'THE', 'KING', 'WOULD', 'NEVER', 'HAVE', 'PARTED', 'WITH', 'SO', 'CONSIDERABLE', 'A', 'SUM', 'AND', 'WHILE', 'THEY', 'WEAKENED', 'THEMSELVES', 'BY', 'THE', 'SAME', 'MEASURE', 'HAVE', 'STRENGTHENED', 'A', 'PEOPLE', 'WITH', 'WHOM', 'THEY', 'MUST', 'AFTERWARDS', 'HAVE', 'SO', 'MATERIAL', 'AN', 'INTEREST', 'TO', 'DISCUSS'] +8224-274384-0012-1449: ref=['IF', 'ANY', 'STILL', 'RETAINED', 'RANCOR', 'AGAINST', 'HIM', 'IN', 'HIS', 'PRESENT', 'CONDITION', 'THEY', 'PASSED', 'IN', 'SILENCE', 'WHILE', 'HIS', 'WELL', 'WISHERS', 'MORE', 'GENEROUS', 'THAN', 'PRUDENT', 'ACCOMPANIED', 'HIS', 'MARCH', 'WITH', 'TEARS', 'WITH', 'ACCLAMATIONS', 'AND', 'WITH', 'PRAYERS', 'FOR', 'HIS', 'SAFETY'] +8224-274384-0012-1449: hyp=['IF', 'ANY', 'STILL', 'RETAINED', 'RANK', 'OR', 'AGAINST', 'HIM', 'IN', 'HIS', 'PRESENT', 'CONDITION', 'THEY', 'PASSED', 'IN', 'SILENCE', 'WHILE', 'HIS', 'WELL', 'WISHERS', 'MORE', 'GENEROUS', 'THAN', 'PRUDENT', 'ACCOMPANIED', 'HIS', 'MARCH', 'WITH', 'TEARS', 'WITH', 'ACCLAMATIONS', 'AND', 'WITH', 'PRAYERS', 'FOR', 'HIS', 'SAFETY'] +8224-274384-0013-1450: ref=['HIS', 'DEATH', 'IN', 'THIS', 'CONJUNCTURE', 'WAS', 'A', 'PUBLIC', 'MISFORTUNE'] +8224-274384-0013-1450: hyp=['HIS', 'DEATH', 'IN', 'THIS', 'CONJUNCTURE', 'WAS', 'A', 'PUBLIC', 'MISFORTUNE'] +8230-279154-0000-617: ref=['THE', 'ANALYSIS', 'OF', 'KNOWLEDGE', 'WILL', 'OCCUPY', 'US', 'UNTIL', 'THE', 'END', 'OF', 'THE', 'THIRTEENTH', 'LECTURE', 'AND', 'IS', 'THE', 'MOST', 'DIFFICULT', 'PART', 'OF', 'OUR', 'WHOLE', 'ENTERPRISE'] +8230-279154-0000-617: hyp=['THE', 'ANALYSIS', 'OF', 'KNOWLEDGE', 'WILL', 'OCCUPY', 'US', 'UNTIL', 'THE', 'END', 'OF', 'THE', 'THIRTEENTH', 'LECTURE', 'AND', 'IS', 'THE', 'MOST', 'DIFFICULT', 'PART', 'OF', 'OUR', 'WHOLE', 'ENTERPRISE'] +8230-279154-0001-618: ref=['WHAT', 'IS', 'CALLED', 'PERCEPTION', 'DIFFERS', 'FROM', 'SENSATION', 'BY', 'THE', 'FACT', 'THAT', 'THE', 'SENSATIONAL', 'INGREDIENTS', 'BRING', 'UP', 'HABITUAL', 'ASSOCIATES', 'IMAGES', 'AND', 'EXPECTATIONS', 'OF', 'THEIR', 'USUAL', 'CORRELATES', 'ALL', 'OF', 'WHICH', 'ARE', 'SUBJECTIVELY', 'INDISTINGUISHABLE', 'FROM', 'THE', 'SENSATION'] +8230-279154-0001-618: hyp=['WHAT', 'IS', 'CALLED', 'PERCEPTION', 'DIFFERS', 'FROM', 'SENSATION', 'BY', 'THE', 'FACT', 'THAT', 'THE', 'SENSATIONAL', 'INGREDIENTS', 'BRING', 'UP', 'HABITUAL', 'ASSOCIATES', 'IMAGES', 'AND', 'EXPECTATIONS', 'OF', 'THEIR', 'USUAL', 'COROLLETS', 'ALL', 'OF', 'WHICH', 'ARE', 'SUBJECTIVELY', 'INDISTINGUISHABLE', 'FROM', 'THE', 'SENSATION'] +8230-279154-0002-619: ref=['WHETHER', 'OR', 'NOT', 'THIS', 'PRINCIPLE', 'IS', 'LIABLE', 'TO', 'EXCEPTIONS', 'EVERYONE', 'WOULD', 'AGREE', 'THAT', 'IS', 'HAS', 'A', 'BROAD', 'MEASURE', 'OF', 'TRUTH', 'THOUGH', 'THE', 'WORD', 'EXACTLY', 'MIGHT', 'SEEM', 'AN', 'OVERSTATEMENT', 'AND', 'IT', 'MIGHT', 'SEEM', 'MORE', 'CORRECT', 'TO', 'SAY', 'THAT', 'IDEAS', 'APPROXIMATELY', 'REPRESENT', 'IMPRESSIONS'] +8230-279154-0002-619: hyp=['WHETHER', 'OR', 'NOT', 'THIS', 'PRINCIPLE', 'IS', 'LIABLE', 'TO', 'EXCEPTIONS', 'EVERY', 'ONE', 'WOULD', 'AGREE', 'THAT', 'IT', 'HAS', 'A', 'BROAD', 'MEASURE', 'OF', 'TRUTH', 'THOUGH', 'THE', 'WORD', 'EXACTLY', 'MIGHT', 'SEEM', 'AN', 'OVERSTATEMENT', 'AND', 'IT', 'MIGHT', 'SEEM', 'MORE', 'CORRECT', 'TO', 'SAY', 'THAT', 'IDEAS', 'APPROXIMATELY', 'REPRESENT', 'IMPRESSIONS'] +8230-279154-0003-620: ref=['AND', 'WHAT', 'SORT', 'OF', 'EVIDENCE', 'IS', 'LOGICALLY', 'POSSIBLE'] +8230-279154-0003-620: hyp=['AND', 'WHAT', 'SORT', 'OF', 'EVIDENCE', 'IS', 'LOGICALLY', 'POSSIBLE'] +8230-279154-0004-621: ref=['THERE', 'IS', 'NO', 'LOGICAL', 'IMPOSSIBILITY', 'IN', 'THE', 'HYPOTHESIS', 'THAT', 'THE', 'WORLD', 'SPRANG', 'INTO', 'BEING', 'FIVE', 'MINUTES', 'AGO', 'EXACTLY', 'AS', 'IT', 'THEN', 'WAS', 'WITH', 'A', 'POPULATION', 'THAT', 'REMEMBERED', 'A', 'WHOLLY', 'UNREAL', 'PAST'] +8230-279154-0004-621: hyp=['THERE', 'IS', 'NO', 'LOGICAL', 'IMPOSSIBILITY', 'IN', 'THE', 'HYPOTHESIS', 'THAT', 'THE', 'WORLD', 'SPRANG', 'INTO', 'BEING', 'FIVE', 'MINUTES', 'AGO', 'EXACTLY', 'AS', 'IT', 'THEN', 'WAS', 'WITH', 'A', 'POPULATION', 'THAT', 'REMEMBERED', 'A', 'WHOLLY', 'UNREAL', 'PAST'] +8230-279154-0005-622: ref=['ALL', 'THAT', 'I', 'AM', 'DOING', 'IS', 'TO', 'USE', 'ITS', 'LOGICAL', 'TENABILITY', 'AS', 'A', 'HELP', 'IN', 'THE', 'ANALYSIS', 'OF', 'WHAT', 'OCCURS', 'WHEN', 'WE', 'REMEMBER'] +8230-279154-0005-622: hyp=['ALL', 'THAT', 'I', 'AM', 'DOING', 'IS', 'TO', 'USE', 'ITS', 'LOGICAL', 'TENABILITY', 'AS', 'A', 'HELP', 'IN', 'THE', 'ANALYSIS', 'OF', 'WHAT', 'OCCURS', 'WHEN', 'WE', 'REMEMBER'] +8230-279154-0006-623: ref=['THE', 'BEHAVIOURIST', 'WHO', 'ATTEMPTS', 'TO', 'MAKE', 'PSYCHOLOGY', 'A', 'RECORD', 'OF', 'BEHAVIOUR', 'HAS', 'TO', 'TRUST', 'HIS', 'MEMORY', 'IN', 'MAKING', 'THE', 'RECORD'] +8230-279154-0006-623: hyp=['THE', 'BEHAVIOURIST', 'WHO', 'ATTEMPTS', 'TO', 'MAKE', 'PSYCHOLOGY', 'A', 'RECORD', 'OF', 'BEHAVIOR', 'HAS', 'TO', 'TRUST', 'HIS', 'MEMORY', 'IN', 'MAKING', 'THE', 'RECORD'] +8230-279154-0007-624: ref=['HABIT', 'IS', 'A', 'CONCEPT', 'INVOLVING', 'THE', 'OCCURRENCE', 'OF', 'SIMILAR', 'EVENTS', 'AT', 'DIFFERENT', 'TIMES', 'IF', 'THE', 'BEHAVIOURIST', 'FEELS', 'CONFIDENT', 'THAT', 'THERE', 'IS', 'SUCH', 'A', 'PHENOMENON', 'AS', 'HABIT', 'THAT', 'CAN', 'ONLY', 'BE', 'BECAUSE', 'HE', 'TRUSTS', 'HIS', 'MEMORY', 'WHEN', 'IT', 'ASSURES', 'HIM', 'THAT', 'THERE', 'HAVE', 'BEEN', 'OTHER', 'TIMES'] +8230-279154-0007-624: hyp=['HABIT', 'IS', 'A', 'CONCEPT', 'INVOLVING', 'THE', 'OCCURRENCE', 'OF', 'SIMILAR', 'EVENTS', 'AT', 'DIFFERENT', 'TIMES', 'IF', 'THE', 'BEHAVIOURIST', 'FILLS', 'CONFIDENT', 'THAT', 'THERE', 'IS', 'SUCH', 'A', 'PHENOMENON', 'AS', 'HABIT', 'THAT', 'CAN', 'ONLY', 'BE', 'BECAUSE', 'HE', 'TRUSTS', 'HIS', 'MEMORY', 'WHEN', 'IT', 'ASSURES', 'HIM', 'THAT', 'THERE', 'HAVE', 'BEEN', 'OTHER', 'TIMES'] +8230-279154-0008-625: ref=['BUT', 'I', 'DO', 'NOT', 'THINK', 'SUCH', 'AN', 'INFERENCE', 'IS', 'WARRANTED'] +8230-279154-0008-625: hyp=['BUT', 'I', 'DO', 'NOT', 'THINK', 'SUCH', 'AN', 'EFFERENCE', 'IS', 'WARRANTED'] +8230-279154-0009-626: ref=['OUR', 'CONFIDENCE', 'OR', 'LACK', 'OF', 'CONFIDENCE', 'IN', 'THE', 'ACCURACY', 'OF', 'A', 'MEMORY', 'IMAGE', 'MUST', 'IN', 'FUNDAMENTAL', 'CASES', 'BE', 'BASED', 'UPON', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'IMAGE', 'ITSELF', 'SINCE', 'WE', 'CANNOT', 'EVOKE', 'THE', 'PAST', 'BODILY', 'AND', 'COMPARE', 'IT', 'WITH', 'THE', 'PRESENT', 'IMAGE'] +8230-279154-0009-626: hyp=['OUR', 'CONFIDENCE', 'OR', 'LACK', 'OF', 'CONFIDENCE', 'IN', 'THE', 'ACCURACY', 'OF', 'A', 'MEMORY', 'IMAGE', 'MUST', 'IN', 'FUNDAMENTAL', 'CASES', 'BE', 'BASED', 'UPON', 'A', 'CHARACTERISTIC', 'OF', 'THE', 'IMAGE', 'ITSELF', 'SINCE', 'WE', 'CANNOT', 'EVOKE', 'THE', 'PAST', 'BODILY', 'AND', 'COMPARE', 'IT', 'WITH', 'THE', 'PRESENT', 'IMAGE'] +8230-279154-0010-627: ref=['WE', 'SOMETIMES', 'HAVE', 'IMAGES', 'THAT', 'ARE', 'BY', 'NO', 'MEANS', 'PECULIARLY', 'VAGUE', 'WHICH', 'YET', 'WE', 'DO', 'NOT', 'TRUST', 'FOR', 'EXAMPLE', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'FATIGUE', 'WE', 'MAY', 'SEE', 'A', "FRIEND'S", 'FACE', 'VIVIDLY', 'AND', 'CLEARLY', 'BUT', 'HORRIBLY', 'DISTORTED'] +8230-279154-0010-627: hyp=['WE', 'SOMETIMES', 'HAVE', 'IMAGES', 'THAT', 'ARE', 'BY', 'NO', 'MEANS', 'PECULIARLY', 'VAGUE', 'WHICH', 'YET', 'WE', 'DO', 'NOT', 'TRUST', 'FOR', 'EXAMPLE', 'UNDER', 'THE', 'INFLUENCE', 'OF', 'FATIGUE', 'WE', 'MAY', 'SEE', 'A', "FRIEND'S", 'FACE', 'VIVIDLY', 'AND', 'CLEARLY', 'BUT', 'HORRIBLY', 'DISTORTED'] +8230-279154-0011-628: ref=['SOME', 'IMAGES', 'LIKE', 'SOME', 'SENSATIONS', 'FEEL', 'VERY', 'FAMILIAR', 'WHILE', 'OTHERS', 'FEEL', 'STRANGE'] +8230-279154-0011-628: hyp=['SOME', 'IMAGES', 'LIKE', 'SOME', 'SENSATIONS', 'FEEL', 'VERY', 'FAMILIAR', 'WHILE', 'OTHERS', 'FEEL', 'STRANGE'] +8230-279154-0012-629: ref=['FAMILIARITY', 'IS', 'A', 'FEELING', 'CAPABLE', 'OF', 'DEGREES'] +8230-279154-0012-629: hyp=['FAMILIARITY', 'IS', 'A', 'FILLING', 'CAPABLE', 'OF', 'DEGREES'] +8230-279154-0013-630: ref=['IN', 'AN', 'IMAGE', 'OF', 'A', 'WELL', 'KNOWN', 'FACE', 'FOR', 'EXAMPLE', 'SOME', 'PARTS', 'MAY', 'FEEL', 'MORE', 'FAMILIAR', 'THAN', 'OTHERS', 'WHEN', 'THIS', 'HAPPENS', 'WE', 'HAVE', 'MORE', 'BELIEF', 'IN', 'THE', 'ACCURACY', 'OF', 'THE', 'FAMILIAR', 'PARTS', 'THAN', 'IN', 'THAT', 'OF', 'THE', 'UNFAMILIAR', 'PARTS'] +8230-279154-0013-630: hyp=['IN', 'AN', 'IMAGE', 'OF', 'A', 'WELL', 'KNOWN', 'FACE', 'FOR', 'EXAMPLE', 'SOME', 'PARTS', 'MAY', 'FEEL', 'MORE', 'FAMILIAR', 'THAN', 'OTHERS', 'WHEN', 'THIS', 'HAPPENS', 'WE', 'HAVE', 'MORE', 'BELIEF', 'IN', 'THE', 'ACCURACY', 'OF', 'THE', 'FAMILIAR', 'PARTS', 'THAN', 'IN', 'THAT', 'OF', 'THE', 'UNFAMILIAR', 'PARTS'] +8230-279154-0014-631: ref=['I', 'COME', 'NOW', 'TO', 'THE', 'OTHER', 'CHARACTERISTIC', 'WHICH', 'MEMORY', 'IMAGES', 'MUST', 'HAVE', 'IN', 'ORDER', 'TO', 'ACCOUNT', 'FOR', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0014-631: hyp=['I', 'COME', 'NOW', 'TO', 'THE', 'OTHER', 'CHARACTERISTIC', 'WHICH', 'MEMORY', 'IMAGES', 'MUST', 'HAVE', 'IN', 'ORDER', 'TO', 'ACCOUNT', 'FOR', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0015-632: ref=['THEY', 'MUST', 'HAVE', 'SOME', 'CHARACTERISTIC', 'WHICH', 'MAKES', 'US', 'REGARD', 'THEM', 'AS', 'REFERRING', 'TO', 'MORE', 'OR', 'LESS', 'REMOTE', 'PORTIONS', 'OF', 'THE', 'PAST'] +8230-279154-0015-632: hyp=['THEY', 'MUST', 'HAVE', 'SOME', 'CHARACTERISTIC', 'WHICH', 'MAKES', 'US', 'REGARD', 'THEM', 'AS', 'REFERRING', 'TO', 'MORE', 'OR', 'LESS', 'REMOTE', 'PORTIONS', 'OF', 'THE', 'PAST'] +8230-279154-0016-633: ref=['IN', 'ACTUAL', 'FACT', 'THERE', 'ARE', 'DOUBTLESS', 'VARIOUS', 'FACTORS', 'THAT', 'CONCUR', 'IN', 'GIVING', 'US', 'THE', 'FEELING', 'OF', 'GREATER', 'OR', 'LESS', 'REMOTENESS', 'IN', 'SOME', 'REMEMBERED', 'EVENT'] +8230-279154-0016-633: hyp=['IN', 'ACTUAL', 'FACT', 'THERE', 'ARE', 'DOUBTLESS', 'VARIOUS', 'FACTORS', 'THAT', 'CONCUR', 'IN', 'GIVING', 'US', 'THE', 'FEELING', 'OF', 'GREATER', 'OR', 'LESS', 'REMOTENESS', 'IN', 'SOME', 'REMEMBERED', 'EVENT'] +8230-279154-0017-634: ref=['THERE', 'MAY', 'BE', 'A', 'SPECIFIC', 'FEELING', 'WHICH', 'COULD', 'BE', 'CALLED', 'THE', 'FEELING', 'OF', 'PASTNESS', 'ESPECIALLY', 'WHERE', 'IMMEDIATE', 'MEMORY', 'IS', 'CONCERNED'] +8230-279154-0017-634: hyp=['THERE', 'MAY', 'BE', 'A', 'SPECIFIC', 'FEELING', 'WHICH', 'COULD', 'BE', 'CALLED', 'THE', 'FILLING', 'OF', 'PASTNESS', 'ESPECIALLY', 'WHERE', 'IMMEDIATE', 'MEMORY', 'IS', 'CONCERNED'] +8230-279154-0018-635: ref=['THERE', 'IS', 'OF', 'COURSE', 'A', 'DIFFERENCE', 'BETWEEN', 'KNOWING', 'THE', 'TEMPORAL', 'RELATION', 'OF', 'A', 'REMEMBERED', 'EVENT', 'TO', 'THE', 'PRESENT', 'AND', 'KNOWING', 'THE', 'TIME', 'ORDER', 'OF', 'TWO', 'REMEMBERED', 'EVENTS'] +8230-279154-0018-635: hyp=['THERE', 'IS', 'OF', 'COURSE', 'A', 'DIFFERENCE', 'BETWEEN', 'KNOWING', 'THE', 'TEMPORAL', 'RELATION', 'OF', 'A', 'REMEMBERED', 'EVENT', 'TO', 'THE', 'PRESENT', 'AND', 'KNOWING', 'THE', 'TIME', 'ORDER', 'OF', 'TWO', 'REMEMBERED', 'EVENTS'] +8230-279154-0019-636: ref=['IT', 'WOULD', 'SEEM', 'THAT', 'ONLY', 'RATHER', 'RECENT', 'EVENTS', 'CAN', 'BE', 'PLACED', 'AT', 'ALL', 'ACCURATELY', 'BY', 'MEANS', 'OF', 'FEELINGS', 'GIVING', 'THEIR', 'TEMPORAL', 'RELATION', 'TO', 'THE', 'PRESENT', 'BUT', 'IT', 'IS', 'CLEAR', 'THAT', 'SUCH', 'FEELINGS', 'MUST', 'PLAY', 'AN', 'ESSENTIAL', 'PART', 'IN', 'THE', 'PROCESS', 'OF', 'DATING', 'REMEMBERED', 'EVENTS'] +8230-279154-0019-636: hyp=['IT', 'WOULD', 'SEEM', 'THAT', 'ONLY', 'RATHER', 'RECENT', 'EVENTS', 'CAN', 'BE', 'PLACED', 'AT', 'ALL', 'ACCURATELY', 'BY', 'MEANS', 'OF', 'FEELINGS', 'GIVING', 'THEIR', 'TEMPORAL', 'RELATION', 'TO', 'THE', 'PRESENT', 'BUT', 'IT', 'IS', 'CLEAR', 'THAT', 'SUCH', 'FEELINGS', 'MUST', 'PLAY', 'AN', 'ESSENTIAL', 'PART', 'IN', 'THE', 'PROCESS', 'OF', 'DATING', 'REMEMBERED', 'EVENTS'] +8230-279154-0020-637: ref=['IF', 'WE', 'HAD', 'RETAINED', 'THE', 'SUBJECT', 'OR', 'ACT', 'IN', 'KNOWLEDGE', 'THE', 'WHOLE', 'PROBLEM', 'OF', 'MEMORY', 'WOULD', 'HAVE', 'BEEN', 'COMPARATIVELY', 'SIMPLE'] +8230-279154-0020-637: hyp=['IF', 'WE', 'HAD', 'RETAINED', 'THE', 'SUBJECT', 'OR', 'ACT', 'IN', 'KNOWLEDGE', 'THE', 'WHOLE', 'PROBLEM', 'OF', 'MEMORY', 'WOULD', 'HAVE', 'BEEN', 'COMPARATIVELY', 'SIMPLE'] +8230-279154-0021-638: ref=['REMEMBERING', 'HAS', 'TO', 'BE', 'A', 'PRESENT', 'OCCURRENCE', 'IN', 'SOME', 'WAY', 'RESEMBLING', 'OR', 'RELATED', 'TO', 'WHAT', 'IS', 'REMEMBERED'] +8230-279154-0021-638: hyp=['REMEMBERING', 'HAS', 'TO', 'BE', 'A', 'PRESENT', 'OCCURRENCE', 'IN', 'SOME', 'WAY', 'RESEMBLING', 'OR', 'RELATED', 'TO', 'WHAT', 'IS', 'REMEMBERED'] +8230-279154-0022-639: ref=['SOME', 'POINTS', 'MAY', 'BE', 'TAKEN', 'AS', 'FIXED', 'AND', 'SUCH', 'AS', 'ANY', 'THEORY', 'OF', 'MEMORY', 'MUST', 'ARRIVE', 'AT'] +8230-279154-0022-639: hyp=['SOME', 'POINTS', 'MAY', 'BE', 'TAKEN', 'AS', 'FIXED', 'AND', 'SUCH', 'AS', 'ANY', 'THEORY', 'OF', 'MEMORY', 'MUST', 'ARRIVE', 'AT'] +8230-279154-0023-640: ref=['IN', 'THIS', 'CASE', 'AS', 'IN', 'MOST', 'OTHERS', 'WHAT', 'MAY', 'BE', 'TAKEN', 'AS', 'CERTAIN', 'IN', 'ADVANCE', 'IS', 'RATHER', 'VAGUE'] +8230-279154-0023-640: hyp=['IN', 'THIS', 'CASE', 'AS', 'IN', 'MOST', 'OTHERS', 'WHAT', 'MAY', 'BE', 'TAKEN', 'AS', 'CERTAIN', 'IN', 'ADVANCE', 'IS', 'RATHER', 'VAGUE'] +8230-279154-0024-641: ref=['THE', 'FIRST', 'OF', 'OUR', 'VAGUE', 'BUT', 'INDUBITABLE', 'DATA', 'IS', 'THAT', 'THERE', 'IS', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0024-641: hyp=['THE', 'FIRST', 'OF', 'OUR', 'VAGUE', 'BUT', 'INDUBITABLE', 'DATA', 'IS', 'THAT', 'THERE', 'IS', 'KNOWLEDGE', 'OF', 'THE', 'PAST'] +8230-279154-0025-642: ref=['WE', 'MIGHT', 'PROVISIONALLY', 'THOUGH', 'PERHAPS', 'NOT', 'QUITE', 'CORRECTLY', 'DEFINE', 'MEMORY', 'AS', 'THAT', 'WAY', 'OF', 'KNOWING', 'ABOUT', 'THE', 'PAST', 'WHICH', 'HAS', 'NO', 'ANALOGUE', 'IN', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'FUTURE', 'SUCH', 'A', 'DEFINITION', 'WOULD', 'AT', 'LEAST', 'SERVE', 'TO', 'MARK', 'THE', 'PROBLEM', 'WITH', 'WHICH', 'WE', 'ARE', 'CONCERNED', 'THOUGH', 'SOME', 'EXPECTATIONS', 'MAY', 'DESERVE', 'TO', 'RANK', 'WITH', 'MEMORY', 'AS', 'REGARDS', 'IMMEDIACY'] +8230-279154-0025-642: hyp=['WE', 'MIGHT', 'PROVISIONALLY', 'THOUGH', 'PERHAPS', 'NOT', 'QUITE', 'CORRECTLY', 'DEFINE', 'MEMORY', 'AS', 'THAT', 'WAY', 'OF', 'KNOWING', 'ABOUT', 'THE', 'PAST', 'WHICH', 'HAS', 'NO', 'ANALOGUE', 'IN', 'OUR', 'KNOWLEDGE', 'OF', 'THE', 'FUTURE', 'SUCH', 'A', 'DEFINITION', 'WOULD', 'AT', 'LEAST', 'SERVE', 'TO', 'MARK', 'THE', 'PROBLEM', 'WITH', 'WHICH', 'WE', 'ARE', 'CONCERNED', 'THOUGH', 'SOME', 'EXPECTATIONS', 'MAY', 'DESERVE', 'TO', 'RANK', 'WITH', 'MEMORY', 'AS', 'REGARDS', 'IMMEDIACY'] +8230-279154-0026-643: ref=['THIS', 'DISTINCTION', 'IS', 'VITAL', 'TO', 'THE', 'UNDERSTANDING', 'OF', 'MEMORY', 'BUT', 'IT', 'IS', 'NOT', 'SO', 'EASY', 'TO', 'CARRY', 'OUT', 'IN', 'PRACTICE', 'AS', 'IT', 'IS', 'TO', 'DRAW', 'IN', 'THEORY'] +8230-279154-0026-643: hyp=['THIS', 'DISTINCTION', 'IS', 'VITAL', 'TO', 'THE', 'UNDERSTANDING', 'OF', 'MEMORY', 'BUT', 'IT', 'IS', 'NOT', 'SO', 'EASY', 'TO', 'CARRY', 'OUT', 'IN', 'PRACTICE', 'AS', 'IT', 'IS', 'TO', 'DRAW', 'IN', 'THEORY'] +8230-279154-0027-644: ref=['A', 'GRAMOPHONE', 'BY', 'THE', 'HELP', 'OF', 'SUITABLE', 'RECORDS', 'MIGHT', 'RELATE', 'TO', 'US', 'THE', 'INCIDENTS', 'OF', 'ITS', 'PAST', 'AND', 'PEOPLE', 'ARE', 'NOT', 'SO', 'DIFFERENT', 'FROM', 'GRAMOPHONES', 'AS', 'THEY', 'LIKE', 'TO', 'BELIEVE'] +8230-279154-0027-644: hyp=['A', 'GRAMOPHONE', 'BY', 'THE', 'HELP', 'OF', 'SUITABLE', 'RECORDS', 'MIGHT', 'RELATE', 'TO', 'US', 'THE', 'INCIDENTS', 'OF', 'ITS', 'PAST', 'AND', 'PEOPLE', 'ARE', 'NOT', 'SO', 'DIFFERENT', 'FROM', 'GRAMOPHONES', 'AS', 'THEY', 'LIKE', 'TO', 'BELIEVE'] +8230-279154-0028-645: ref=['I', 'CAN', 'SET', 'TO', 'WORK', 'NOW', 'TO', 'REMEMBER', 'THINGS', 'I', 'NEVER', 'REMEMBERED', 'BEFORE', 'SUCH', 'AS', 'WHAT', 'I', 'HAD', 'TO', 'EAT', 'FOR', 'BREAKFAST', 'THIS', 'MORNING', 'AND', 'IT', 'CAN', 'HARDLY', 'BE', 'WHOLLY', 'HABIT', 'THAT', 'ENABLES', 'ME', 'TO', 'DO', 'THIS'] +8230-279154-0028-645: hyp=['I', 'CAN', 'SET', 'TO', 'WORK', 'NOW', 'TO', 'REMEMBER', 'THINGS', 'I', 'NEVER', 'REMEMBERED', 'BEFORE', 'SUCH', 'AS', 'WHAT', 'I', 'HAD', 'TO', 'EAT', 'FOR', 'BREAKFAST', 'THIS', 'MORNING', 'AND', 'IT', 'CAN', 'HARDLY', 'BE', 'WHOLLY', 'HABIT', 'THAT', 'ENABLES', 'ME', 'TO', 'DO', 'THIS'] +8230-279154-0029-646: ref=['THE', 'FACT', 'THAT', 'A', 'MAN', 'CAN', 'RECITE', 'A', 'POEM', 'DOES', 'NOT', 'SHOW', 'THAT', 'HE', 'REMEMBERS', 'ANY', 'PREVIOUS', 'OCCASION', 'ON', 'WHICH', 'HE', 'HAS', 'RECITED', 'OR', 'READ', 'IT'] +8230-279154-0029-646: hyp=['THE', 'FACT', 'THAT', 'A', 'MAN', 'CAN', 'RECITE', 'A', 'POEM', 'DOES', 'NOT', 'SHOW', 'THAT', 'HE', 'REMEMBERS', 'ANY', 'PREVIOUS', 'OCCASION', 'ON', 'WHICH', 'HE', 'HAS', 'RECITED', 'OR', 'READ', 'IT'] +8230-279154-0030-647: ref=["SEMON'S", 'TWO', 'BOOKS', 'MENTIONED', 'IN', 'AN', 'EARLIER', 'LECTURE', 'DO', 'NOT', 'TOUCH', 'KNOWLEDGE', 'MEMORY', 'AT', 'ALL', 'CLOSELY'] +8230-279154-0030-647: hyp=['SIMMONS', 'TWO', 'BOOKS', 'MENTIONED', 'IN', 'AN', 'EARLIER', 'LECTURE', 'DO', 'NOT', 'TOUCH', 'KNOWLEDGE', 'MEMORY', 'AT', 'ALL', 'CLOSELY'] +8230-279154-0031-648: ref=['THEY', 'GIVE', 'LAWS', 'ACCORDING', 'TO', 'WHICH', 'IMAGES', 'OF', 'PAST', 'OCCURRENCES', 'COME', 'INTO', 'OUR', 'MINDS', 'BUT', 'DO', 'NOT', 'DISCUSS', 'OUR', 'BELIEF', 'THAT', 'THESE', 'IMAGES', 'REFER', 'TO', 'PAST', 'OCCURRENCES', 'WHICH', 'IS', 'WHAT', 'CONSTITUTES', 'KNOWLEDGE', 'MEMORY'] +8230-279154-0031-648: hyp=['THEY', 'GIVE', 'LAWS', 'ACCORDING', 'TO', 'WHICH', 'IMAGES', 'OF', 'PAST', 'OCCURRENCES', 'COME', 'INTO', 'OUR', 'MINDS', 'BUT', 'DO', 'NOT', 'DISCUSS', 'OUR', 'BELIEF', 'THAT', 'THESE', 'IMAGES', 'REFER', 'TO', 'PAST', 'OCCURRENCES', 'WHICH', 'IS', 'WHAT', 'CONSTITUTES', 'KNOWLEDGE', 'MEMORY'] +8230-279154-0032-649: ref=['IT', 'IS', 'THIS', 'THAT', 'IS', 'OF', 'INTEREST', 'TO', 'THEORY', 'OF', 'KNOWLEDGE'] +8230-279154-0032-649: hyp=['IT', 'IS', 'THIS', 'THAT', 'IS', 'OF', 'INTEREST', 'TO', 'THEORY', 'OF', 'KNOWLEDGE'] +8230-279154-0033-650: ref=['IT', 'IS', 'BY', 'NO', 'MEANS', 'ALWAYS', 'RELIABLE', 'ALMOST', 'EVERYBODY', 'HAS', 'AT', 'SOME', 'TIME', 'EXPERIENCED', 'THE', 'WELL', 'KNOWN', 'ILLUSION', 'THAT', 'ALL', 'THAT', 'IS', 'HAPPENING', 'NOW', 'HAPPENED', 'BEFORE', 'AT', 'SOME', 'TIME'] +8230-279154-0033-650: hyp=['IT', 'IS', 'BY', 'NO', 'MEANS', 'ALWAYS', 'RELIABLE', 'ALMOST', 'EVERYBODY', 'HAS', 'AT', 'SOME', 'TIME', 'EXPERIENCED', 'THE', 'WELL', 'KNOWN', 'ILLUSION', 'THAT', 'ALL', 'THAT', 'IS', 'HAPPENING', 'NOW', 'HAPPENED', 'BEFORE', 'AT', 'SOME', 'TIME'] +8230-279154-0034-651: ref=['WHENEVER', 'THE', 'SENSE', 'OF', 'FAMILIARITY', 'OCCURS', 'WITHOUT', 'A', 'DEFINITE', 'OBJECT', 'IT', 'LEADS', 'US', 'TO', 'SEARCH', 'THE', 'ENVIRONMENT', 'UNTIL', 'WE', 'ARE', 'SATISFIED', 'THAT', 'WE', 'HAVE', 'FOUND', 'THE', 'APPROPRIATE', 'OBJECT', 'WHICH', 'LEADS', 'US', 'TO', 'THE', 'JUDGMENT', 'THIS', 'IS', 'FAMILIAR'] +8230-279154-0034-651: hyp=['WHENEVER', 'THE', 'SENSE', 'OF', 'FAMILIARITY', 'OCCURS', 'WITHOUT', 'A', 'DEFINITE', 'OBJECT', 'IT', 'LEADS', 'US', 'TO', 'SEARCH', 'THE', 'ENVIRONMENT', 'UNTIL', 'WE', 'ARE', 'SATISFIED', 'THAT', 'WE', 'HAVE', 'FOUND', 'THE', 'APPROPRIATE', 'OBJECT', 'WHICH', 'LEADS', 'US', 'TO', 'THE', 'JUDGMENT', 'THIS', 'IS', 'FAMILIAR'] +8230-279154-0035-652: ref=['THUS', 'NO', 'KNOWLEDGE', 'AS', 'TO', 'THE', 'PAST', 'IS', 'TO', 'BE', 'DERIVED', 'FROM', 'THE', 'FEELING', 'OF', 'FAMILIARITY', 'ALONE'] +8230-279154-0035-652: hyp=['THUS', 'NO', 'KNOWLEDGE', 'AS', 'TO', 'THE', 'PAST', 'IS', 'TO', 'BE', 'DERIVED', 'FROM', 'THE', 'FEELING', 'OF', 'FAMILIARITY', 'ALONE'] +8230-279154-0036-653: ref=['A', 'FURTHER', 'STAGE', 'IS', 'RECOGNITION'] +8230-279154-0036-653: hyp=['A', 'FURTHER', 'STAGE', 'IS', 'RECOGNITION'] +8230-279154-0037-654: ref=['RECOGNITION', 'IN', 'THIS', 'SENSE', 'DOES', 'NOT', 'NECESSARILY', 'INVOLVE', 'MORE', 'THAN', 'A', 'HABIT', 'OF', 'ASSOCIATION', 'THE', 'KIND', 'OF', 'OBJECT', 'WE', 'ARE', 'SEEING', 'AT', 'THE', 'MOMENT', 'IS', 'ASSOCIATED', 'WITH', 'THE', 'WORD', 'CAT', 'OR', 'WITH', 'AN', 'AUDITORY', 'IMAGE', 'OF', 'PURRING', 'OR', 'WHATEVER', 'OTHER', 'CHARACTERISTIC', 'WE', 'MAY', 'HAPPEN', 'TO', 'RECOGNIZE', 'IN', 'THE', 'CAT', 'OF', 'THE', 'MOMENT'] +8230-279154-0037-654: hyp=['RECOGNITION', 'IN', 'THIS', 'SENSE', 'DOES', 'NOT', 'NECESSARILY', 'INVOLVE', 'MORE', 'THAN', 'A', 'HABIT', 'OF', 'ASSOCIATION', 'THE', 'KIND', 'OF', 'OBJECT', 'WE', 'ARE', 'SEEING', 'AT', 'THE', 'MOMENT', 'IS', 'ASSOCIATED', 'WITH', 'THE', 'WORD', 'CAT', 'OR', 'WITH', 'AN', 'AUDITORY', 'IMAGE', 'OF', 'PURRING', 'OR', 'WHATEVER', 'OTHER', 'CHARACTERISTIC', 'WE', 'MAY', 'HAPPEN', 'TO', 'RECOGNIZE', 'IN', 'THE', 'CAT', 'OF', 'THE', 'MOMENT'] +8230-279154-0038-655: ref=['WE', 'ARE', 'OF', 'COURSE', 'IN', 'FACT', 'ABLE', 'TO', 'JUDGE', 'WHEN', 'WE', 'RECOGNIZE', 'AN', 'OBJECT', 'THAT', 'WE', 'HAVE', 'SEEN', 'IT', 'BEFORE', 'BUT', 'THIS', 'JUDGMENT', 'IS', 'SOMETHING', 'OVER', 'AND', 'ABOVE', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'AND', 'MAY', 'VERY', 'PROBABLY', 'BE', 'IMPOSSIBLE', 'TO', 'ANIMALS', 'THAT', 'NEVERTHELESS', 'HAVE', 'THE', 'EXPERIENCE', 'OF', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'OF', 'THE', 'WORD'] +8230-279154-0038-655: hyp=['WE', 'ARE', 'OF', 'COURSE', 'IN', 'FACT', 'ABLE', 'TO', 'JUDGE', 'WHEN', 'WE', 'RECOGNIZE', 'AN', 'OBJECT', 'THAT', 'WE', 'HAVE', 'SEEN', 'IT', 'BEFORE', 'BUT', 'THIS', 'JUDGMENT', 'IS', 'SOMETHING', 'OVER', 'AND', 'ABOVE', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'AND', 'MAY', 'VERY', 'PROBABLY', 'BE', 'IMPOSSIBLE', 'TO', 'ANIMALS', 'THAT', 'NEVERTHELESS', 'HAVE', 'THE', 'EXPERIENCE', 'OF', 'RECOGNITION', 'IN', 'THIS', 'FIRST', 'SENSE', 'OF', 'THE', 'WORD'] +8230-279154-0039-656: ref=['THIS', 'KNOWLEDGE', 'IS', 'MEMORY', 'IN', 'ONE', 'SENSE', 'THOUGH', 'IN', 'ANOTHER', 'IT', 'IS', 'NOT'] +8230-279154-0039-656: hyp=['THIS', 'KNOWLEDGE', 'IS', 'MEMORY', 'IN', 'ONE', 'SENSE', 'THOUGH', 'IN', 'ANOTHER', 'IT', 'IS', 'NOT'] +8230-279154-0040-657: ref=['THERE', 'ARE', 'HOWEVER', 'SEVERAL', 'POINTS', 'IN', 'WHICH', 'SUCH', 'AN', 'ACCOUNT', 'OF', 'RECOGNITION', 'IS', 'INADEQUATE', 'TO', 'BEGIN', 'WITH', 'IT', 'MIGHT', 'SEEM', 'AT', 'FIRST', 'SIGHT', 'MORE', 'CORRECT', 'TO', 'DEFINE', 'RECOGNITION', 'AS', 'I', 'HAVE', 'SEEN', 'THIS', 'BEFORE', 'THAN', 'AS', 'THIS', 'HAS', 'EXISTED', 'BEFORE'] +8230-279154-0040-657: hyp=['THERE', 'ARE', 'HOWEVER', 'SEVERAL', 'POINTS', 'IN', 'WHICH', 'SUCH', 'AN', 'ACCOUNT', 'OF', 'RECOGNITION', 'IS', 'INADEQUATE', 'TO', 'BEGIN', 'WITH', 'IT', 'MIGHT', 'SEEM', 'AT', 'FIRST', 'SIGHT', 'MORE', 'CORRECT', 'TO', 'DEFINE', 'RECOGNITION', 'AS', 'I', 'HAVE', 'SEEN', 'THIS', 'BEFORE', 'THAN', 'AS', 'THIS', 'HAS', 'EXISTED', 'BEFORE'] +8230-279154-0041-658: ref=['THE', 'DEFINITION', 'OF', 'MY', 'EXPERIENCE', 'IS', 'DIFFICULT', 'BROADLY', 'SPEAKING', 'IT', 'IS', 'EVERYTHING', 'THAT', 'IS', 'CONNECTED', 'WITH', 'WHAT', 'I', 'AM', 'EXPERIENCING', 'NOW', 'BY', 'CERTAIN', 'LINKS', 'OF', 'WHICH', 'THE', 'VARIOUS', 'FORMS', 'OF', 'MEMORY', 'ARE', 'AMONG', 'THE', 'MOST', 'IMPORTANT'] +8230-279154-0041-658: hyp=['THE', 'DEFINITION', 'OF', 'MY', 'EXPERIENCE', 'IS', 'DIFFICULT', 'BROADLY', 'SPEAKING', 'IT', 'IS', 'EVERYTHING', 'THAT', 'IS', 'CONNECTED', 'WITH', 'WHAT', 'I', 'AM', 'EXPERIENCING', 'NOW', 'BY', 'CERTAIN', 'LINKS', 'OF', 'WHICH', 'THE', 'VARIOUS', 'FORMS', 'OF', 'MEMORY', 'ARE', 'AMONG', 'THE', 'MOST', 'IMPORTANT'] +8230-279154-0042-659: ref=['THUS', 'IF', 'I', 'RECOGNIZE', 'A', 'THING', 'THE', 'OCCASION', 'OF', 'ITS', 'PREVIOUS', 'EXISTENCE', 'IN', 'VIRTUE', 'OF', 'WHICH', 'I', 'RECOGNIZE', 'IT', 'FORMS', 'PART', 'OF', 'MY', 'EXPERIENCE', 'BY', 'DEFINITION', 'RECOGNITION', 'WILL', 'BE', 'ONE', 'OF', 'THE', 'MARKS', 'BY', 'WHICH', 'MY', 'EXPERIENCE', 'IS', 'SINGLED', 'OUT', 'FROM', 'THE', 'REST', 'OF', 'THE', 'WORLD'] +8230-279154-0042-659: hyp=['THUS', 'IF', 'I', 'RECOGNIZE', 'A', 'THING', 'THE', 'OCCASION', 'OF', 'ITS', 'PREVIOUS', 'EXISTENCE', 'IN', 'VIRTUE', 'OF', 'WHICH', 'I', 'RECOGNIZE', 'IT', 'FORMS', 'PART', 'OF', 'MY', 'EXPERIENCE', 'BY', 'DEFINITION', 'RECOGNITION', 'WILL', 'BE', 'ONE', 'OF', 'THE', 'MARKS', 'BY', 'WHICH', 'MY', 'EXPERIENCE', 'IS', 'SINGLED', 'OUT', 'FROM', 'THE', 'REST', 'OF', 'THE', 'WORLD'] +8230-279154-0043-660: ref=['OF', 'COURSE', 'THE', 'WORDS', 'THIS', 'HAS', 'EXISTED', 'BEFORE', 'ARE', 'A', 'VERY', 'INADEQUATE', 'TRANSLATION', 'OF', 'WHAT', 'ACTUALLY', 'HAPPENS', 'WHEN', 'WE', 'FORM', 'A', 'JUDGMENT', 'OF', 'RECOGNITION', 'BUT', 'THAT', 'IS', 'UNAVOIDABLE', 'WORDS', 'ARE', 'FRAMED', 'TO', 'EXPRESS', 'A', 'LEVEL', 'OF', 'THOUGHT', 'WHICH', 'IS', 'BY', 'NO', 'MEANS', 'PRIMITIVE', 'AND', 'ARE', 'QUITE', 'INCAPABLE', 'OF', 'EXPRESSING', 'SUCH', 'AN', 'ELEMENTARY', 'OCCURRENCE', 'AS', 'RECOGNITION'] +8230-279154-0043-660: hyp=['OF', 'COURSE', 'THE', 'WORDS', 'THIS', 'HAS', 'EXISTED', 'BEFORE', 'ARE', 'OF', 'VERY', 'INADEQUATE', 'TRANSLATION', 'OF', 'WHAT', 'ACTUALLY', 'HAPPENS', 'WHEN', 'WE', 'FORM', 'A', 'JUDGMENT', 'OF', 'RECOGNITION', 'BUT', 'THAT', 'IS', 'UNAVOIDABLE', 'WORDS', 'ARE', 'FRAMED', 'TO', 'EXPRESS', 'A', 'LEVEL', 'OF', 'THOUGHT', 'WHICH', 'IS', 'BY', 'NO', 'MEANS', 'PRIMITIVE', 'AND', 'ARE', 'QUITE', 'INCAPABLE', 'OF', 'EXPRESSING', 'SUCH', 'AN', 'ELEMENTARY', 'OCCURRENCE', 'AS', 'RECOGNITION'] +8455-210777-0000-972: ref=['I', 'REMAINED', 'THERE', 'ALONE', 'FOR', 'MANY', 'HOURS', 'BUT', 'I', 'MUST', 'ACKNOWLEDGE', 'THAT', 'BEFORE', 'I', 'LEFT', 'THE', 'CHAMBERS', 'I', 'HAD', 'GRADUALLY', 'BROUGHT', 'MYSELF', 'TO', 'LOOK', 'AT', 'THE', 'MATTER', 'IN', 'ANOTHER', 'LIGHT'] +8455-210777-0000-972: hyp=['I', 'REMAINED', 'THERE', 'ALONE', 'FOR', 'MANY', 'HOURS', 'BUT', 'I', 'MUST', 'ACKNOWLEDGE', 'THAT', 'BEFORE', 'I', 'LEFT', 'THE', 'CHAMBERS', 'I', 'HAD', 'GRADUALLY', 'BROUGHT', 'MYSELF', 'TO', 'LOOK', 'AT', 'THE', 'MATTER', 'IN', 'ANOTHER', 'LIGHT'] +8455-210777-0001-973: ref=['HAD', 'EVA', 'CRASWELLER', 'NOT', 'BEEN', 'GOOD', 'LOOKING', 'HAD', 'JACK', 'BEEN', 'STILL', 'AT', 'COLLEGE', 'HAD', 'SIR', 'KENNINGTON', 'OVAL', 'REMAINED', 'IN', 'ENGLAND', 'HAD', 'MISTER', 'BUNNIT', 'AND', 'THE', 'BAR', 'KEEPER', 'NOT', 'SUCCEEDED', 'IN', 'STOPPING', 'MY', 'CARRIAGE', 'ON', 'THE', 'HILL', 'SHOULD', 'I', 'HAVE', 'SUCCEEDED', 'IN', 'ARRANGING', 'FOR', 'THE', 'FINAL', 'DEPARTURE', 'OF', 'MY', 'OLD', 'FRIEND'] +8455-210777-0001-973: hyp=['HAD', 'EITHER', 'CRUSHWELLER', 'NOT', 'BEEN', 'GOOD', 'LOOKING', 'HAD', 'JACK', 'BEEN', 'STILL', 'AT', 'COLLEGE', 'HAD', 'SIR', 'KENNINGTON', 'OVAL', 'REMAINED', 'IN', 'ENGLAND', 'HAD', 'MISTER', 'BUNNOT', 'AND', 'THE', 'BAR', 'KEEPER', 'NOT', 'SUCCEEDED', 'IN', 'STOPPING', 'MY', 'CARRIAGE', 'ON', 'THE', 'HILL', 'SHOULD', 'I', 'HAVE', 'SUCCEEDED', 'IN', 'A', 'RAGING', 'FOR', 'THE', 'FINAL', 'DEPARTURE', 'OF', 'MY', 'OLD', 'FRIEND'] +8455-210777-0002-974: ref=['ON', 'ARRIVING', 'AT', 'HOME', 'AT', 'MY', 'OWN', 'RESIDENCE', 'I', 'FOUND', 'THAT', 'OUR', 'SALON', 'WAS', 'FILLED', 'WITH', 'A', 'BRILLIANT', 'COMPANY'] +8455-210777-0002-974: hyp=['ON', 'ARRIVING', 'AT', 'HOME', 'AT', 'MY', 'OWN', 'RESIDENCE', 'I', 'FOUND', 'THAT', 'OUR', 'SALON', 'WAS', 'FILLED', 'WITH', 'A', 'BRILLIANT', 'COMPANY'] +8455-210777-0003-975: ref=['AS', 'I', 'SPOKE', 'I', 'MADE', 'HIM', 'A', 'GRACIOUS', 'BOW', 'AND', 'I', 'THINK', 'I', 'SHOWED', 'HIM', 'BY', 'MY', 'MODE', 'OF', 'ADDRESS', 'THAT', 'I', 'DID', 'NOT', 'BEAR', 'ANY', 'GRUDGE', 'AS', 'TO', 'MY', 'INDIVIDUAL', 'SELF'] +8455-210777-0003-975: hyp=['AS', 'I', 'SPOKE', 'I', 'MADE', 'HIM', 'A', 'GRACIOUS', 'BOW', 'AND', 'I', 'THINK', 'I', 'SHOWED', 'HIM', 'BY', 'MY', 'MODE', 'OF', 'ADDRESS', 'THAT', 'I', 'DID', 'NOT', 'BEAR', 'ANY', 'GRUDGE', 'AS', 'TO', 'MY', 'INDIVIDUAL', 'SELF'] +8455-210777-0004-976: ref=['I', 'HAVE', 'COME', 'TO', 'YOUR', 'SHORES', 'MISTER', 'PRESIDENT', 'WITH', 'THE', 'PURPOSE', 'OF', 'SEEING', 'HOW', 'THINGS', 'ARE', 'PROGRESSING', 'IN', 'THIS', 'DISTANT', 'QUARTER', 'OF', 'THE', 'WORLD'] +8455-210777-0004-976: hyp=['I', 'HAVE', 'COME', 'TO', 'YOUR', 'SHORES', 'MISTER', 'PRESIDENT', 'WITH', 'THE', 'PURPOSE', 'OF', 'SEEING', 'HOW', 'THINGS', 'ARE', 'PROGRESSING', 'IN', 'THIS', 'DISTANT', 'QUARTER', 'OF', 'THE', 'WORLD'] +8455-210777-0005-977: ref=['WE', 'HAVE', 'OUR', 'LITTLE', 'STRUGGLES', 'HERE', 'AS', 'ELSEWHERE', 'AND', 'ALL', 'THINGS', 'CANNOT', 'BE', 'DONE', 'BY', 'ROSE', 'WATER'] +8455-210777-0005-977: hyp=['WE', 'HAVE', 'OUR', 'LITTLE', 'STRUGGLES', 'HERE', 'AS', 'ELSEWHERE', 'AND', 'ALL', 'THINGS', 'CANNOT', 'BE', 'DONE', 'BY', 'ROSE', 'WATER'] +8455-210777-0006-978: ref=['WE', 'ARE', 'QUITE', 'SATISFIED', 'NOW', 'CAPTAIN', 'BATTLEAX', 'SAID', 'MY', 'WIFE'] +8455-210777-0006-978: hyp=['WE', 'ARE', 'QUITE', 'SATISFIED', 'NOW', 'CAPTAIN', 'BATTLE', 'AXE', 'SAID', 'MY', 'WIFE'] +8455-210777-0007-979: ref=['QUITE', 'SATISFIED', 'SAID', 'EVA'] +8455-210777-0007-979: hyp=['QUITE', 'SATISFIED', 'SAID', 'EVA'] +8455-210777-0008-980: ref=['THE', 'LADIES', 'IN', 'COMPLIANCE', 'WITH', 'THAT', 'SOFTNESS', 'OF', 'HEART', 'WHICH', 'IS', 'THEIR', 'CHARACTERISTIC', 'ARE', 'ON', 'ONE', 'SIDE', 'AND', 'THE', 'MEN', 'BY', 'WHOM', 'THE', 'WORLD', 'HAS', 'TO', 'BE', 'MANAGED', 'ARE', 'ON', 'THE', 'OTHER'] +8455-210777-0008-980: hyp=['THE', 'LADIES', 'IN', 'COMPLIANCE', 'WITH', 'THAT', 'SOFTNESS', 'OF', 'HEART', 'WHICH', 'IS', 'THEIR', 'CHARACTERISTIC', 'ARE', 'ON', 'ONE', 'SIDE', 'AND', 'THE', 'MEN', 'BY', 'WHOM', 'THE', 'WORLD', 'HAS', 'TO', 'BE', 'MANAGED', 'OR', 'ON', 'THE', 'OTHER'] +8455-210777-0009-981: ref=['NO', 'DOUBT', 'IN', 'PROCESS', 'OF', 'TIME', 'THE', 'LADIES', 'WILL', 'FOLLOW'] +8455-210777-0009-981: hyp=['NO', 'DOUBT', 'IN', 'PROCESS', 'OF', 'TIME', 'THE', 'LADIES', 'WILL', 'FOLLOW'] +8455-210777-0010-982: ref=['THEIR', 'MASTERS', 'SAID', 'MISSUS', 'NEVERBEND'] +8455-210777-0010-982: hyp=['THEIR', 'MASTER', 'SAID', 'MISSUS', 'NEVERBEND'] +8455-210777-0011-983: ref=['I', 'DID', 'NOT', 'MEAN', 'SAID', 'CAPTAIN', 'BATTLEAX', 'TO', 'TOUCH', 'UPON', 'PUBLIC', 'SUBJECTS', 'AT', 'SUCH', 'A', 'MOMENT', 'AS', 'THIS'] +8455-210777-0011-983: hyp=['I', 'DID', 'NOT', 'MEAN', 'SAID', 'CAPTAIN', 'BATTLE', 'AXE', 'TO', 'TOUCH', 'UPON', 'PUBLIC', 'SUBJECTS', 'AT', 'SUCH', 'A', 'MOMENT', 'AS', 'THIS'] +8455-210777-0012-984: ref=['MISSUS', 'NEVERBEND', 'YOU', 'MUST', 'INDEED', 'BE', 'PROUD', 'OF', 'YOUR', 'SON'] +8455-210777-0012-984: hyp=['MISSUS', 'NEVERBEND', 'YOU', 'MUST', 'INDEED', 'BE', 'PROUD', 'OF', 'YOUR', 'SON'] +8455-210777-0013-985: ref=['JACK', 'HAD', 'BEEN', 'STANDING', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'ROOM', 'TALKING', 'TO', 'EVA', 'AND', 'WAS', 'NOW', 'REDUCED', 'TO', 'SILENCE', 'BY', 'HIS', 'PRAISES'] +8455-210777-0013-985: hyp=['JACK', 'HAD', 'BEEN', 'STANDING', 'IN', 'THE', 'FAR', 'CORNER', 'OF', 'THE', 'ROOM', 'TALKING', 'TO', 'EVA', 'AND', 'WAS', 'NOW', 'REDUCED', 'TO', 'SILENCE', 'BY', 'HIS', 'PRAISES'] +8455-210777-0014-986: ref=['SIR', 'KENNINGTON', 'OVAL', 'IS', 'A', 'VERY', 'FINE', 'PLAYER', 'SAID', 'MY', 'WIFE'] +8455-210777-0014-986: hyp=['SIR', 'KENNINGTON', 'OVAL', 'IS', 'A', 'VERY', 'FINE', 'PLAYER', 'SAID', 'MY', 'WIFE'] +8455-210777-0015-987: ref=['I', 'AND', 'MY', 'WIFE', 'AND', 'SON', 'AND', 'THE', 'TWO', 'CRASWELLERS', 'AND', 'THREE', 'OR', 'FOUR', 'OTHERS', 'AGREED', 'TO', 'DINE', 'ON', 'BOARD', 'THE', 'SHIP', 'ON', 'THE', 'NEXT'] +8455-210777-0015-987: hyp=['I', 'AM', 'MY', 'WIFE', 'AND', 'SON', 'AND', 'THE', 'TWO', 'CRESTWELLERS', 'AND', 'THREE', 'OR', 'FOUR', 'OTHERS', 'AGREED', 'TO', 'DINE', 'ON', 'BOARD', 'THE', 'SHIP', 'ON', 'THE', 'NEXT'] +8455-210777-0016-988: ref=['THIS', 'I', 'FELT', 'WAS', 'PAID', 'TO', 'ME', 'AS', 'BEING', 'PRESIDENT', 'OF', 'THE', 'REPUBLIC', 'AND', 'I', 'ENDEAVOURED', 'TO', 'BEHAVE', 'MYSELF', 'WITH', 'SUCH', 'MINGLED', 'HUMILITY', 'AND', 'DIGNITY', 'AS', 'MIGHT', 'BEFIT', 'THE', 'OCCASION', 'BUT', 'I', 'COULD', 'NOT', 'BUT', 'FEEL', 'THAT', 'SOMETHING', 'WAS', 'WANTING', 'TO', 'THE', 'SIMPLICITY', 'OF', 'MY', 'ORDINARY', 'LIFE'] +8455-210777-0016-988: hyp=['THIS', 'I', 'FELT', 'WAS', 'PAID', 'TO', 'ME', 'AS', 'BEING', 'PRESIDENT', 'OF', 'THE', 'REPUBLIC', 'AND', 'I', 'ENDEAVOURED', 'TO', 'BEHAVE', 'MYSELF', 'WITH', 'SUCH', 'MINGLED', 'HUMILITY', 'AND', 'DIGNITY', 'AS', 'MIGHT', 'BE', 'FIT', 'THE', 'OCCASION', 'BUT', 'I', 'COULD', 'NOT', 'BUT', 'FEEL', 'THAT', 'SOMETHING', 'WAS', 'WANTING', 'TO', 'THE', 'SIMPLICITY', 'OF', 'MY', 'ORDINARY', 'LIFE'] +8455-210777-0017-989: ref=['MY', 'WIFE', 'ON', 'THE', 'SPUR', 'OF', 'THE', 'MOMENT', 'MANAGED', 'TO', 'GIVE', 'THE', 'GENTLEMEN', 'A', 'VERY', 'GOOD', 'DINNER'] +8455-210777-0017-989: hyp=['MY', 'WIFE', 'ON', 'THE', 'SPUR', 'OF', 'THE', 'MOMENT', 'MANAGED', 'TO', 'GIVE', 'THE', 'GENTLEMAN', 'A', 'VERY', 'GOOD', 'DINNER'] +8455-210777-0018-990: ref=['THIS', 'SHE', 'SAID', 'WAS', 'TRUE', 'HOSPITALITY', 'AND', 'I', 'AM', 'NOT', 'SURE', 'THAT', 'I', 'DID', 'NOT', 'AGREE', 'WITH', 'HER'] +8455-210777-0018-990: hyp=['THIS', 'SHE', 'SAID', 'WAS', 'TRUE', 'HOSPITALITY', 'AND', 'I', 'AM', 'NOT', 'SURE', 'THAT', 'I', 'DID', 'NOT', 'AGREE', 'WITH', 'THERE'] +8455-210777-0019-991: ref=['THEN', 'THERE', 'WERE', 'THREE', 'OR', 'FOUR', 'LEADING', 'MEN', 'OF', 'THE', 'COMMUNITY', 'WITH', 'THEIR', 'WIVES', 'WHO', 'WERE', 'FOR', 'THE', 'MOST', 'PART', 'THE', 'FATHERS', 'AND', 'MOTHERS', 'OF', 'THE', 'YOUNG', 'LADIES'] +8455-210777-0019-991: hyp=['THEN', 'THERE', 'WERE', 'THREE', 'OR', 'FOUR', 'LEADING', 'MEN', 'OF', 'THE', 'COMMUNITY', 'WITH', 'THEIR', 'WIVES', 'WHO', 'WERE', 'FOR', 'THE', 'MOST', 'PART', 'THE', 'FATHERS', 'AND', 'MOTHERS', 'OF', 'THE', 'YOUNG', 'LADIES'] +8455-210777-0020-992: ref=['OH', 'YES', 'SAID', 'JACK', 'AND', "I'M", 'NOWHERE'] +8455-210777-0020-992: hyp=['OH', 'YES', 'SAID', 'JACK', 'AND', "I'M", 'NOWHERE'] +8455-210777-0021-993: ref=['BUT', 'I', 'MEAN', 'TO', 'HAVE', 'MY', 'INNINGS', 'BEFORE', 'LONG'] +8455-210777-0021-993: hyp=['BUT', 'I', 'MEAN', 'TO', 'HAVE', 'MY', 'INNINGS', 'BEFORE', 'LONG'] +8455-210777-0022-994: ref=['OF', 'WHAT', 'MISSUS', 'NEVERBEND', 'HAD', 'GONE', 'THROUGH', 'IN', 'PROVIDING', 'BIRDS', 'BEASTS', 'AND', 'FISHES', 'NOT', 'TO', 'TALK', 'OF', 'TARTS', 'AND', 'JELLIES', 'FOR', 'THE', 'DINNER', 'OF', 'THAT', 'DAY', 'NO', 'ONE', 'BUT', 'MYSELF', 'CAN', 'HAVE', 'ANY', 'IDEA', 'BUT', 'IT', 'MUST', 'BE', 'ADMITTED', 'THAT', 'SHE', 'ACCOMPLISHED', 'HER', 'TASK', 'WITH', 'THOROUGH', 'SUCCESS'] +8455-210777-0022-994: hyp=['OF', 'WHAT', 'MISSUS', 'NEVERBEND', 'HAD', 'GONE', 'THROUGH', 'IN', 'PROVIDING', 'BIRDS', 'BEASTS', 'AND', 'FISHES', 'NOT', 'TO', 'TALK', 'OF', 'TARTS', 'AND', 'JELLIES', 'FOR', 'THE', 'DINNER', 'OF', 'THAT', 'DAY', 'NO', 'ONE', 'BUT', 'MYSELF', 'CAN', 'HAVE', 'ANY', 'IDEA', 'BUT', 'IT', 'MUST', 'BE', 'ADMITTED', 'THAT', 'SHE', 'ACCOMPLISHED', 'HER', 'TASK', 'WITH', 'THOROUGH', 'SUCCESS'] +8455-210777-0023-995: ref=['WE', 'SAT', 'WITH', 'THE', 'OFFICERS', 'SOME', 'LITTLE', 'TIME', 'AFTER', 'DINNER', 'AND', 'THEN', 'WENT', 'ASHORE'] +8455-210777-0023-995: hyp=['WE', 'SAT', 'WITH', 'THE', 'OFFICER', 'SOME', 'LITTLE', 'TIME', 'AFTER', 'DINNER', 'AND', 'THEN', 'WENT', 'ASHORE'] +8455-210777-0024-996: ref=['HOW', 'MUCH', 'OF', 'EVIL', 'OF', 'REAL', 'ACCOMPLISHED', 'EVIL', 'HAD', 'THERE', 'NOT', 'OCCURRED', 'TO', 'ME', 'DURING', 'THE', 'LAST', 'FEW', 'DAYS'] +8455-210777-0024-996: hyp=['HOW', 'MUCH', 'OF', 'EVIL', 'OF', 'REAL', 'ACCOMPLISHED', 'EVIL', 'HAD', 'THERE', 'NOT', 'OCCURRED', 'TO', 'ME', 'DURING', 'THE', 'LAST', 'FEW', 'DAYS'] +8455-210777-0025-997: ref=['WHAT', 'COULD', 'I', 'DO', 'NOW', 'BUT', 'JUST', 'LAY', 'MYSELF', 'DOWN', 'AND', 'DIE'] +8455-210777-0025-997: hyp=['WHAT', 'COULD', 'I', 'DO', 'NOW', 'BUT', 'JUST', 'LAY', 'MYSELF', 'DOWN', 'AND', 'DIE'] +8455-210777-0026-998: ref=['AND', 'THE', 'DEATH', 'OF', 'WHICH', 'I', 'DREAMT', 'COULD', 'NOT', 'ALAS'] +8455-210777-0026-998: hyp=['AND', 'THE', 'DEATH', 'OF', 'WHICH', 'I', 'DREAMT', 'COULD', 'NOT', 'ALAS'] +8455-210777-0027-999: ref=['WHEN', 'THIS', 'CAPTAIN', 'SHOULD', 'HAVE', 'TAKEN', 'HIMSELF', 'AND', 'HIS', 'VESSEL', 'BACK', 'TO', 'ENGLAND', 'I', 'WOULD', 'RETIRE', 'TO', 'A', 'SMALL', 'FARM', 'WHICH', 'I', 'POSSESSED', 'AT', 'THE', 'FARTHEST', 'SIDE', 'OF', 'THE', 'ISLAND', 'AND', 'THERE', 'IN', 'SECLUSION', 'WOULD', 'I', 'END', 'MY', 'DAYS'] +8455-210777-0027-999: hyp=['WHEN', 'THIS', 'CAPTAIN', 'SHOULD', 'HAVE', 'TAKEN', 'HIMSELF', 'AND', 'HIS', 'VESSEL', 'BACK', 'TO', 'ENGLAND', 'I', 'WOULD', 'RETIRE', 'TO', 'A', 'SMALL', 'FARM', 'WHICH', 'I', 'POSSESSED', 'AT', 'THE', 'FURTHEST', 'SIDE', 'OF', 'THE', 'ISLAND', 'AND', 'THERE', 'IN', 'SECLUSION', 'WHAT', 'I', 'END', 'MY', 'DAYS'] +8455-210777-0028-1000: ref=['JACK', 'WOULD', 'BECOME', "EVA'S", 'HAPPY', 'HUSBAND', 'AND', 'WOULD', 'REMAIN', 'AMIDST', 'THE', 'HURRIED', 'DUTIES', 'OF', 'THE', 'EAGER', 'WORLD'] +8455-210777-0028-1000: hyp=['JACK', 'WOULD', 'BECOME', "EVA'S", 'HAPPY', 'HUSBAND', 'AND', 'WOULD', 'REMAIN', 'AMIDST', 'THE', 'HURRIED', 'DUTIES', 'OF', 'THE', 'EAGER', 'WORLD'] +8455-210777-0029-1001: ref=['THINKING', 'OF', 'ALL', 'THIS', 'I', 'WENT', 'TO', 'SLEEP'] +8455-210777-0029-1001: hyp=['THINKING', 'OF', 'ALL', 'THIS', 'I', 'WENT', 'TO', 'SLEEP'] +8455-210777-0030-1002: ref=['MISTER', 'NEVERBEND', 'BEGAN', 'THE', 'CAPTAIN', 'AND', 'I', 'OBSERVED', 'THAT', 'UP', 'TO', 'THAT', 'MOMENT', 'HE', 'HAD', 'GENERALLY', 'ADDRESSED', 'ME', 'AS', 'PRESIDENT', 'IT', 'CANNOT', 'BE', 'DENIED', 'THAT', 'WE', 'HAVE', 'COME', 'HERE', 'ON', 'AN', 'UNPLEASANT', 'MISSION'] +8455-210777-0030-1002: hyp=['MISTER', 'NEVERBEND', 'BEGAN', 'THE', 'CAPTAIN', 'AND', 'I', 'OBSERVE', 'THAT', 'UP', 'TO', 'THAT', 'MOMENT', 'HE', 'HAD', 'GENERALLY', 'ADDRESSED', 'ME', 'AS', 'PRESIDENT', 'IT', 'CANNOT', 'BE', 'DENIED', 'THAT', 'WE', 'HAVE', 'COME', 'HERE', 'ON', 'AN', 'UNPLEASANT', 'MISSION'] +8455-210777-0031-1003: ref=['YOU', 'HAVE', 'RECEIVED', 'US', 'WITH', 'ALL', 'THAT', 'COURTESY', 'AND', 'HOSPITALITY', 'FOR', 'WHICH', 'YOUR', 'CHARACTER', 'IN', 'ENGLAND', 'STANDS', 'SO', 'HIGH'] +8455-210777-0031-1003: hyp=['YOU', 'HAVE', 'RECEIVED', 'US', 'WITH', 'ALL', 'THAT', 'COURTESY', 'AND', 'HOSPITALITY', 'FOR', 'WHICH', 'YOUR', 'CHARACTER', 'AND', 'IN', 'ENGLAND', 'STAND', 'SO', 'HIGH'] +8455-210777-0032-1004: ref=['IT', 'IS', 'A', 'DUTY', 'SAID', 'I'] +8455-210777-0032-1004: hyp=['IT', 'IS', 'A', 'DUTY', 'SAID', 'I'] +8455-210777-0033-1005: ref=['BUT', 'YOUR', 'POWER', 'IS', 'SO', 'SUPERIOR', 'TO', 'ANY', 'THAT', 'I', 'CAN', 'ADVANCE', 'AS', 'TO', 'MAKE', 'US', 'HERE', 'FEEL', 'THAT', 'THERE', 'IS', 'NO', 'DISGRACE', 'IN', 'YIELDING', 'TO', 'IT'] +8455-210777-0033-1005: hyp=['BUT', 'YOUR', 'POWER', 'IS', 'SO', 'SUPERIOR', 'TO', 'ANY', 'THAT', 'I', 'CAN', 'ADVANCE', 'AS', 'TO', 'MAKE', 'US', 'HERE', 'FEEL', 'THAT', 'THERE', 'IS', 'NO', 'DISGRACE', 'IN', 'YIELDING', 'TO', 'IT'] +8455-210777-0034-1006: ref=['NOT', 'A', 'DOUBT', 'BUT', 'HAD', 'YOUR', 'FORCE', 'BEEN', 'ONLY', 'DOUBLE', 'OR', 'TREBLE', 'OUR', 'OWN', 'I', 'SHOULD', 'HAVE', 'FOUND', 'IT', 'MY', 'DUTY', 'TO', 'STRUGGLE', 'WITH', 'YOU'] +8455-210777-0034-1006: hyp=['NOT', 'A', 'DOUBT', 'BUT', 'HAD', 'YOUR', 'FORCE', 'BEEN', 'ONLY', 'DOUBLE', 'OR', 'TROUBLE', 'OUR', 'OWN', 'I', 'SHOULD', 'HAVE', 'FOUND', 'IT', 'MY', 'DUTY', 'TO', 'STRUGGLE', 'WITH', 'YOU'] +8455-210777-0035-1007: ref=['THAT', 'IS', 'ALL', 'QUITE', 'TRUE', 'MISTER', 'NEVERBEND', 'SAID', 'SIR', 'FERDINANDO', 'BROWN'] +8455-210777-0035-1007: hyp=['THAT', 'IS', 'ALL', 'QUITE', 'TRUE', 'MISTER', 'NEVERBEND', 'SAID', 'SIR', 'FERDINANDO', 'BROWN'] +8455-210777-0036-1008: ref=['I', 'CAN', 'AFFORD', 'TO', 'SMILE', 'BECAUSE', 'I', 'AM', 'ABSOLUTELY', 'POWERLESS', 'BEFORE', 'YOU', 'BUT', 'I', 'DO', 'NOT', 'THE', 'LESS', 'FEEL', 'THAT', 'IN', 'A', 'MATTER', 'IN', 'WHICH', 'THE', 'PROGRESS', 'OF', 'THE', 'WORLD', 'IS', 'CONCERNED', 'I', 'OR', 'RATHER', 'WE', 'HAVE', 'BEEN', 'PUT', 'DOWN', 'BY', 'BRUTE', 'FORCE'] +8455-210777-0036-1008: hyp=['I', 'CAN', 'AFFORD', 'TO', 'SMILE', 'BECAUSE', 'I', 'AM', 'ABSOLUTELY', 'POWERLESS', 'BEFORE', 'YOU', 'BUT', 'I', 'DO', 'NOT', 'THE', 'LESS', 'FEEL', 'THAT', 'IN', 'A', 'MATTER', 'OF', 'WHICH', 'THE', 'PROGRESS', 'OF', 'THE', 'WORLD', 'IS', 'CONCERNED', 'I', 'OR', 'RATHER', 'WE', 'HAVE', 'BEEN', 'PUT', 'DOWN', 'BY', 'BRUTE', 'FORCE'] +8455-210777-0037-1009: ref=['YOU', 'HAVE', 'COME', 'TO', 'US', 'THREATENING', 'US', 'WITH', 'ABSOLUTE', 'DESTRUCTION'] +8455-210777-0037-1009: hyp=['YOU', 'HAVE', 'COME', 'TO', 'US', 'THREATENING', 'US', 'WITH', 'ABSOLUTE', 'DESTRUCTION'] +8455-210777-0038-1010: ref=['THEREFORE', 'I', 'FEEL', 'MYSELF', 'QUITE', 'ABLE', 'AS', 'PRESIDENT', 'OF', 'THIS', 'REPUBLIC', 'TO', 'RECEIVE', 'YOU', 'WITH', 'A', 'COURTESY', 'DUE', 'TO', 'THE', 'SERVANTS', 'OF', 'A', 'FRIENDLY', 'ALLY'] +8455-210777-0038-1010: hyp=['THEREFORE', 'I', 'FEEL', 'MYSELF', 'QUITE', 'ABLE', 'AS', 'PRESIDENT', 'OF', 'THIS', 'REPUBLIC', 'TO', 'RECEIVE', 'YOU', 'WITH', 'A', 'COURTESY', 'DUE', 'TO', 'THE', 'SERVANTS', 'OF', 'A', 'FRIENDLY', 'ALLY'] +8455-210777-0039-1011: ref=['I', 'CAN', 'ASSURE', 'YOU', 'HE', 'HAS', 'NOT', 'EVEN', 'ALLOWED', 'ME', 'TO', 'SEE', 'THE', 'TRIGGER', 'SINCE', 'I', 'HAVE', 'BEEN', 'ON', 'BOARD'] +8455-210777-0039-1011: hyp=['I', 'CAN', 'ASSURE', 'YOU', 'HE', 'HAS', 'NOT', 'EVEN', 'ALLOWED', 'ME', 'TO', 'SEE', 'THE', 'TRIGGER', 'SINCE', 'I', 'HAVE', 'BEEN', 'ON', 'BOARD'] +8455-210777-0040-1012: ref=['THEN', 'SAID', 'SIR', 'FERDINANDO', 'THERE', 'IS', 'NOTHING', 'FOR', 'IT', 'BUT', 'THAT', 'HE', 'MUST', 'TAKE', 'YOU', 'WITH', 'HIM'] +8455-210777-0040-1012: hyp=['THEN', 'SAID', 'SIR', 'FERDINANDO', 'THERE', 'IS', 'NOTHING', 'FOR', 'IT', 'BUT', 'THAT', 'WE', 'MUST', 'TAKE', 'YOU', 'WITH', 'HIM'] +8455-210777-0041-1013: ref=['THERE', 'CAME', 'UPON', 'ME', 'A', 'SUDDEN', 'SHOCK', 'WHEN', 'I', 'HEARD', 'THESE', 'WORDS', 'WHICH', 'EXCEEDED', 'ANYTHING', 'WHICH', 'I', 'HAD', 'YET', 'FELT'] +8455-210777-0041-1013: hyp=['THERE', 'CAME', 'UPON', 'ME', 'A', 'SUDDEN', 'SHOCK', 'WHEN', 'I', 'HEARD', 'THESE', 'WORDS', 'WHICH', 'EXCEEDED', 'ANYTHING', 'WHICH', 'I', 'HAD', 'YET', 'FELT'] +8455-210777-0042-1014: ref=['YOU', 'HEAR', 'WHAT', 'SIR', 'FERDINANDO', 'BROWN', 'HAS', 'SAID', 'REPLIED', 'CAPTAIN', 'BATTLEAX'] +8455-210777-0042-1014: hyp=['YOU', 'HEAR', 'WHAT', 'SIR', 'FERDINANDO', 'BROWN', 'HAS', 'SAID', 'REPLIED', 'CAPTAIN', 'BATTLEX'] +8455-210777-0043-1015: ref=['BUT', 'WHAT', 'IS', 'THE', 'DELICATE', 'MISSION', 'I', 'ASKED'] +8455-210777-0043-1015: hyp=['BUT', 'WHAT', 'IS', 'THE', 'DELICATE', 'MISSION', 'I', 'ASKED'] +8455-210777-0044-1016: ref=['I', 'WAS', 'TO', 'BE', 'TAKEN', 'AWAY', 'AND', 'CARRIED', 'TO', 'ENGLAND', 'OR', 'ELSEWHERE', 'OR', 'DROWNED', 'UPON', 'THE', 'VOYAGE', 'IT', 'MATTERED', 'NOT', 'WHICH'] +8455-210777-0044-1016: hyp=['I', 'WAS', 'TO', 'BE', 'TAKEN', 'AWAY', 'AND', 'CARRIED', 'TO', 'ENGLAND', 'OR', 'ELSEWHERE', 'OR', 'DROWNED', 'UPON', 'THE', 'VOYAGE', 'IT', 'MATTERED', 'NOT', 'WHICH'] +8455-210777-0045-1017: ref=['THEN', 'THE', 'REPUBLIC', 'OF', 'BRITANNULA', 'WAS', 'TO', 'BE', 'DECLARED', 'AS', 'NON', 'EXISTENT', 'AND', 'THE', 'BRITISH', 'FLAG', 'WAS', 'TO', 'BE', 'EXALTED', 'AND', 'A', 'BRITISH', 'GOVERNOR', 'INSTALLED', 'IN', 'THE', 'EXECUTIVE', 'CHAMBERS'] +8455-210777-0045-1017: hyp=['THEN', 'THE', 'REPUBLIC', 'OF', 'BRITAIN', 'YULA', 'WAS', 'TO', 'BE', 'DECLARED', 'AS', 'NON', 'EXISTENT', 'AND', 'THE', 'BRITISH', 'FLAG', 'WAS', 'TO', 'BE', 'EXALTED', 'AND', 'A', 'BRITISH', 'GOVERNOR', 'INSTALLED', 'IN', 'THE', 'EXECUTIVE', 'CHAMBERS'] +8455-210777-0046-1018: ref=['YOU', 'MAY', 'BE', 'QUITE', 'SURE', "IT'S", 'THERE', 'SAID', 'CAPTAIN', 'BATTLEAX', 'AND', 'THAT', 'I', 'CAN', 'SO', 'USE', 'IT', 'AS', 'TO', 'HALF', 'OBLITERATE', 'YOUR', 'TOWN', 'WITHIN', 'TWO', 'MINUTES', 'OF', 'MY', 'RETURN', 'ON', 'BOARD'] +8455-210777-0046-1018: hyp=['YOU', 'MAY', 'BE', 'QUITE', 'SURE', 'TO', 'THERE', 'SAID', 'CAPTAIN', 'BATTLE', 'AXE', 'AND', 'THAT', 'I', 'CAN', 'SO', 'USE', 'IT', 'AS', 'TO', 'HALF', 'OBLITERATE', 'YOUR', 'TOWN', 'WITHIN', 'TWO', 'MINUTES', 'OF', 'MY', 'RETURN', 'ON', 'BOARD'] +8455-210777-0047-1019: ref=['YOU', 'PROPOSE', 'TO', 'KIDNAP', 'ME', 'I', 'SAID'] +8455-210777-0047-1019: hyp=['YOU', 'PROPOSE', 'TO', 'KIDNAP', 'ME', 'I', 'SAID'] +8455-210777-0048-1020: ref=['WHAT', 'WOULD', 'BECOME', 'OF', 'YOUR', 'GUN', 'WERE', 'I', 'TO', 'KIDNAP', 'YOU'] +8455-210777-0048-1020: hyp=['WHAT', 'WILL', 'BECOME', 'OF', 'YOUR', 'GUN', 'WERE', 'I', 'TO', 'KIDNAP', 'YOU'] +8455-210777-0049-1021: ref=['LIEUTENANT', 'CROSSTREES', 'IS', 'A', 'VERY', 'GALLANT', 'OFFICER'] +8455-210777-0049-1021: hyp=['LIEUTENANT', 'CROSS', 'TREES', 'IS', 'A', 'VERY', 'GALLANT', 'OFFICER'] +8455-210777-0050-1022: ref=['ONE', 'OF', 'US', 'ALWAYS', 'REMAINS', 'ON', 'BOARD', 'WHILE', 'THE', 'OTHER', 'IS', 'ON', 'SHORE'] +8455-210777-0050-1022: hyp=['ONE', 'OF', 'US', 'ALWAYS', 'REMAINS', 'ON', 'BOARD', 'WHILE', 'THE', 'OTHER', 'IS', 'ON', 'SHORE'] +8455-210777-0051-1023: ref=['WHAT', 'WORLD', 'WIDE', 'INIQUITY', 'SUCH', 'A', 'SPEECH', 'AS', 'THAT', 'DISCLOSES', 'SAID', 'I', 'STILL', 'TURNING', 'MYSELF', 'TO', 'THE', 'CAPTAIN', 'FOR', 'THOUGH', 'I', 'WOULD', 'HAVE', 'CRUSHED', 'THEM', 'BOTH', 'BY', 'MY', 'WORDS', 'HAD', 'IT', 'BEEN', 'POSSIBLE', 'MY', 'DISLIKE', 'CENTRED', 'ITSELF', 'ON', 'SIR', 'FERDINANDO'] +8455-210777-0051-1023: hyp=['WHAT', 'WORLD', 'WIDE', 'INIQUITY', 'SUCH', 'A', 'SPEECH', 'AS', 'THAT', 'DISCLOSES', 'SAID', 'I', 'STILL', 'TURNING', 'MYSELF', 'TO', 'THE', 'CAPTAIN', 'FOR', 'THOUGH', 'I', 'WOULD', 'HAVE', 'CRUSHED', 'THEM', 'BOTH', 'BY', 'MY', 'WORDS', 'HAD', 'IT', 'BEEN', 'POSSIBLE', 'MY', 'DISLIKE', 'SENATE', 'ITSELF', 'ON', 'SIR', 'FERDINANDO'] +8455-210777-0052-1024: ref=['YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'SUGGEST', 'SAID', 'HE', 'THAT', 'THAT', 'IS', 'A', 'MATTER', 'OF', 'OPINION'] +8455-210777-0052-1024: hyp=['YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'SUGGEST', 'SAID', 'HE', 'THAT', 'THAT', 'IS', 'A', 'MATTER', 'OF', 'OPINION'] +8455-210777-0053-1025: ref=['WERE', 'I', 'TO', 'COMPLY', 'WITH', 'YOUR', 'ORDERS', 'WITHOUT', 'EXPRESSING', 'MY', 'OWN', 'OPINION', 'I', 'SHOULD', 'SEEM', 'TO', 'HAVE', 'DONE', 'SO', 'WILLINGLY', 'HEREAFTER'] +8455-210777-0053-1025: hyp=['WERE', 'I', 'TO', 'COMPLY', 'WITH', 'YOUR', 'ORDERS', 'WITHOUT', 'EXPRESSING', 'MY', 'OWN', 'OPINION', 'I', 'SHOULD', 'SEEM', 'TO', 'HAVE', 'DONE', 'SO', 'WILLINGLY', 'HEREAFTER'] +8455-210777-0054-1026: ref=['THE', 'LETTER', 'RAN', 'AS', 'FOLLOWS'] +8455-210777-0054-1026: hyp=['THE', 'LETTER', 'RAN', 'AS', 'FOLLOWS'] +8455-210777-0055-1027: ref=['SIR', 'I', 'HAVE', 'IT', 'IN', 'COMMAND', 'TO', 'INFORM', 'YOUR', 'EXCELLENCY', 'THAT', 'YOU', 'HAVE', 'BEEN', 'APPOINTED', 'GOVERNOR', 'OF', 'THE', 'CROWN', 'COLONY', 'WHICH', 'IS', 'CALLED', 'BRITANNULA'] +8455-210777-0055-1027: hyp=['SIR', 'I', 'HAVE', 'IT', 'IN', 'COMMAND', 'TO', 'INFORM', 'YOUR', 'EXCELLENCY', 'THAT', 'YOU', 'HAVE', 'BEEN', 'APPOINTED', 'GOVERNOR', 'OF', 'THE', 'CROWN', 'COLONY', 'WHICH', 'IS', 'CALLED', 'BRITAIN', 'ULLA'] +8455-210777-0056-1028: ref=['THE', 'PECULIAR', 'CIRCUMSTANCES', 'OF', 'THE', 'COLONY', 'ARE', 'WITHIN', 'YOUR', "EXCELLENCY'S", 'KNOWLEDGE'] +8455-210777-0056-1028: hyp=['THE', 'PECULIAR', 'CIRCUMSTANCES', 'OF', 'THE', 'COLONY', 'ARE', 'WITHIN', 'YOUR', "EXCELLENCY'S", 'KNOWLEDGE'] +8455-210777-0057-1029: ref=['BUT', 'IN', 'THEIR', 'SELECTION', 'OF', 'A', 'CONSTITUTION', 'THE', 'BRITANNULISTS', 'HAVE', 'UNFORTUNATELY', 'ALLOWED', 'THEMSELVES', 'BUT', 'ONE', 'DELIBERATIVE', 'ASSEMBLY', 'AND', 'HENCE', 'HAVE', 'SPRUNG', 'THEIR', 'PRESENT', 'DIFFICULTIES'] +8455-210777-0057-1029: hyp=['BUT', 'IN', 'THEIR', 'SELECTION', 'OF', 'A', 'CONSTITUTION', 'THE', 'BRITAIN', 'UILISTS', 'HAVE', 'UNFORTUNATELY', 'ALLOWED', 'THEMSELVES', 'BUT', 'ONE', 'DELIBERATE', 'ASSEMBLY', 'AND', 'HENCE', 'HAS', 'SPRUNG', 'THEIR', 'PRESENT', 'DIFFICULTIES'] +8455-210777-0058-1030: ref=['IT', 'IS', 'FOUNDED', 'ON', 'THE', 'ACKNOWLEDGED', 'WEAKNESS', 'OF', 'THOSE', 'WHO', 'SURVIVE', 'THAT', 'PERIOD', 'OF', 'LIFE', 'AT', 'WHICH', 'MEN', 'CEASE', 'TO', 'WORK'] +8455-210777-0058-1030: hyp=['IT', 'IS', 'FOUNDED', 'ON', 'THE', 'ACKNOWLEDGED', 'WEAKNESS', 'OF', 'THOSE', 'WHO', 'SURVIVE', 'THAT', 'PERIOD', 'OF', 'LIFE', 'AT', 'WHICH', 'MEN', 'CEASE', 'TO', 'WORK'] +8455-210777-0059-1031: ref=['BUT', 'IT', 'IS', 'SURMISED', 'THAT', 'YOU', 'WILL', 'FIND', 'DIFFICULTIES', 'IN', 'THE', 'WAY', 'OF', 'YOUR', 'ENTERING', 'AT', 'ONCE', 'UPON', 'YOUR', 'GOVERNMENT'] +8455-210777-0059-1031: hyp=['BUT', 'IT', 'IS', 'SURMISED', 'THAT', 'YOU', 'WILL', 'FIND', 'DIFFICULTIES', 'IN', 'THE', 'WAY', 'OF', 'YOUR', 'ENTERING', 'AT', 'ONCE', 'UPON', 'YOUR', 'GOVERNOR'] +8455-210777-0060-1032: ref=['THE', 'JOHN', 'BRIGHT', 'IS', 'ARMED', 'WITH', 'A', 'WEAPON', 'OF', 'GREAT', 'POWER', 'AGAINST', 'WHICH', 'IT', 'IS', 'IMPOSSIBLE', 'THAT', 'THE', 'PEOPLE', 'OF', 'BRITANNULA', 'SHOULD', 'PREVAIL'] +8455-210777-0060-1032: hyp=['THE', 'JOHN', 'BRIGHT', 'HIS', 'ARM', 'WITH', 'A', 'WEAPON', 'OF', 'GREAT', 'POWER', 'AGAINST', 'WHICH', 'IT', 'IS', 'IMPOSSIBLE', 'THAT', 'THE', 'PEOPLE', 'OF', 'BRITAIN', 'EULO', 'SHOULD', 'PREVAIL'] +8455-210777-0061-1033: ref=['YOU', 'WILL', 'CARRY', 'OUT', 'WITH', 'YOU', 'ONE', 'HUNDRED', 'MEN', 'OF', 'THE', 'NORTH', 'NORTH', 'WEST', 'BIRMINGHAM', 'REGIMENT', 'WHICH', 'WILL', 'PROBABLY', 'SUFFICE', 'FOR', 'YOUR', 'OWN', 'SECURITY', 'AS', 'IT', 'IS', 'THOUGHT', 'THAT', 'IF', 'MISTER', 'NEVERBEND', 'BE', 'WITHDRAWN', 'THE', 'PEOPLE', 'WILL', 'REVERT', 'EASILY', 'TO', 'THEIR', 'OLD', 'HABITS', 'OF', 'OBEDIENCE'] +8455-210777-0061-1033: hyp=['YOU', 'WILL', 'CARRY', 'OUT', 'WITH', 'YOU', 'ONE', 'HUNDRED', 'MEN', 'OF', 'THE', 'NORTH', 'NORTH', 'WEST', 'BIRMINGHAM', 'REGIMENT', 'WHICH', 'WILL', 'PROBABLY', 'SUFFICE', 'FOR', 'YOUR', 'OWN', 'SECURITY', 'AS', 'IT', 'IS', 'THOUGHT', 'THAT', 'IF', 'MISTER', 'NEVERBEND', 'BE', 'WITHDRAWN', 'THE', 'PEOPLE', 'WILL', 'REVERT', 'EASILY', 'TO', 'THEIR', 'OLD', 'HABITS', 'OF', 'OBEDIENCE'] +8455-210777-0062-1034: ref=['WHEN', 'DO', 'YOU', 'INTEND', 'THAT', 'THE', 'JOHN', 'BRIGHT', 'SHALL', 'START'] +8455-210777-0062-1034: hyp=['WHEN', 'DO', 'YOU', 'INTEND', 'THAT', 'THAT', 'JOHN', 'BRIGHT', 'SHALL', 'START'] +8455-210777-0063-1035: ref=['TO', 'DAY', 'I', 'SHOUTED'] +8455-210777-0063-1035: hyp=['TO', 'DAY', 'I', 'SHOUTED'] +8455-210777-0064-1036: ref=['AND', 'I', 'HAVE', 'NO', 'ONE', 'READY', 'TO', 'WHOM', 'I', 'CAN', 'GIVE', 'UP', 'THE', 'ARCHIVES', 'OF', 'THE', 'GOVERNMENT'] +8455-210777-0064-1036: hyp=['AND', 'I', 'HAVE', 'NO', 'ONE', 'READY', 'TO', 'WHOM', 'I', 'CAN', 'GIVE', 'UP', 'THE', 'ARCHIVES', 'OF', 'THE', 'GOVERNMENT'] +8455-210777-0065-1037: ref=['I', 'SHALL', 'BE', 'HAPPY', 'TO', 'TAKE', 'CHARGE', 'OF', 'THEM', 'SAID', 'SIR', 'FERDINANDO'] +8455-210777-0065-1037: hyp=['I', 'SHALL', 'BE', 'HAPPY', 'TO', 'TAKE', 'CHARGE', 'OF', 'THEM', 'SAID', 'SIR', 'FERDINANDO'] +8455-210777-0066-1038: ref=['THEY', 'OF', 'COURSE', 'MUST', 'ALL', 'BE', 'ALTERED'] +8455-210777-0066-1038: hyp=['THEY', 'OF', 'COURSE', 'MUST', 'ALL', 'BE', 'ALTERED'] +8455-210777-0067-1039: ref=['OR', 'OF', 'THE', 'HABITS', 'OF', 'OUR', 'PEOPLE', 'IT', 'IS', 'QUITE', 'IMPOSSIBLE'] +8455-210777-0067-1039: hyp=['OR', 'OF', 'THE', 'HABITS', 'OF', 'OUR', 'PEOPLE', 'IT', 'IS', 'QUITE', 'IMPOSSIBLE'] +8455-210777-0068-1040: ref=['YOUR', 'POWER', 'IS', 'SUFFICIENT', 'I', 'SAID'] +8455-210777-0068-1040: hyp=['YOUR', 'POWER', 'IS', 'SUFFICIENT', 'I', 'SAID'] +8455-210777-0069-1041: ref=['IF', 'YOU', 'WILL', 'GIVE', 'US', 'YOUR', 'PROMISE', 'TO', 'MEET', 'CAPTAIN', 'BATTLEAX', 'HERE', 'AT', 'THIS', 'TIME', 'TO', 'MORROW', 'WE', 'WILL', 'STRETCH', 'A', 'POINT', 'AND', 'DELAY', 'THE', 'DEPARTURE', 'OF', 'THE', 'JOHN', 'BRIGHT', 'FOR', 'TWENTY', 'FOUR', 'HOURS'] +8455-210777-0069-1041: hyp=['IF', 'YOU', 'WILL', 'GIVE', 'US', 'YOUR', 'PROMISE', 'TO', 'MEET', 'CAPTAIN', 'ADELAX', 'HERE', 'AT', 'THIS', 'TIME', 'TO', 'MORROW', 'WE', 'WILL', 'STRETCH', 'A', 'POINT', 'AND', 'DELAY', 'THE', 'DEPARTURE', 'OF', 'THE', 'JOHN', 'BRIGHT', 'FOR', 'TWENTY', 'FOUR', 'HOURS'] +8455-210777-0070-1042: ref=['AND', 'THIS', 'PLAN', 'WAS', 'ADOPTED', 'TOO', 'IN', 'ORDER', 'TO', 'EXTRACT', 'FROM', 'ME', 'A', 'PROMISE', 'THAT', 'I', 'WOULD', 'DEPART', 'IN', 'PEACE'] +8455-210777-0070-1042: hyp=['AND', 'THIS', 'PLAN', 'WAS', 'ADOPTED', 'TOO', 'IN', 'ORDER', 'TO', 'EXTRACT', 'FROM', 'ME', 'A', 'PROMISE', 'THAT', 'I', 'WOULD', 'DEPART', 'IN', 'PEACE'] +8463-287645-0000-543: ref=['THIS', 'WAS', 'WHAT', 'DID', 'THE', 'MISCHIEF', 'SO', 'FAR', 'AS', 'THE', 'RUNNING', 'AWAY', 'WAS', 'CONCERNED'] +8463-287645-0000-543: hyp=['THIS', 'WAS', 'WHAT', 'DID', 'THE', 'MISCHIEF', 'SO', 'FAR', 'AS', 'THE', 'RUNNING', 'AWAY', 'WAS', 'CONCERNED'] +8463-287645-0001-544: ref=['IT', 'IS', 'HARDLY', 'NECESSARY', 'TO', 'SAY', 'MORE', 'OF', 'THEM', 'HERE'] +8463-287645-0001-544: hyp=['IT', 'IS', 'HARDLY', 'NECESSARY', 'TO', 'SAY', 'MORE', 'OF', 'THEM', 'HERE'] +8463-287645-0002-545: ref=['FROM', 'THE', 'MANNER', 'IN', 'WHICH', 'HE', 'EXPRESSED', 'HIMSELF', 'WITH', 'REGARD', 'TO', 'ROBERT', 'HOLLAN', 'NO', 'MAN', 'IN', 'THE', 'WHOLE', 'RANGE', 'OF', 'HIS', 'RECOLLECTIONS', 'WILL', 'BE', 'LONGER', 'REMEMBERED', 'THAN', 'HE', 'HIS', 'ENTHRALMENT', 'WHILE', 'UNDER', 'HOLLAN', 'WILL', 'HARDLY', 'EVER', 'BE', 'FORGOTTEN'] +8463-287645-0002-545: hyp=['FROM', 'THE', 'MANNER', 'IN', 'WHICH', 'HE', 'EXPRESSED', 'HIMSELF', 'WITH', 'REGARD', 'TO', 'ROBERT', 'HOLLAND', 'NO', 'MAN', 'IN', 'THE', 'WHOLE', 'RANGE', 'OF', 'HIS', 'RECOLLECTIONS', 'WILL', 'BE', 'LONGER', 'REMEMBERED', 'THAN', 'HE', 'HIS', 'ENTHRALLMENT', 'WHILE', 'UNDER', 'HOLLAND', 'WILL', 'HARDLY', 'EVER', 'BE', 'FORGOTTEN'] +8463-287645-0003-546: ref=['OF', 'THIS', 'PARTY', 'EDWARD', 'A', 'BOY', 'OF', 'SEVENTEEN', 'CALLED', 'FORTH', 'MUCH', 'SYMPATHY', 'HE', 'TOO', 'WAS', 'CLAIMED', 'BY', 'HOLLAN'] +8463-287645-0003-546: hyp=['OF', 'THIS', 'PARTY', 'EDWARD', 'A', 'BOY', 'OF', 'SEVENTEEN', 'CALLED', 'FORTH', 'MUCH', 'SYMPATHY', 'HE', 'TOO', 'WAS', 'CLAIMED', 'BY', 'HOLLAND'] +8463-287645-0004-547: ref=['JOHN', 'WESLEY', 'COMBASH', 'JACOB', 'TAYLOR', 'AND', 'THOMAS', 'EDWARD', 'SKINNER'] +8463-287645-0004-547: hyp=['JOHN', 'WESLEY', 'COMBATCH', 'JACOB', 'TAYLOR', 'AND', 'THOMAS', 'EDWARD', 'SKINNER'] +8463-287645-0005-548: ref=['A', 'FEW', 'YEARS', 'BACK', 'ONE', 'OF', 'THEIR', 'SLAVES', 'A', 'COACHMAN', 'WAS', 'KEPT', 'ON', 'THE', 'COACH', 'BOX', 'ONE', 'COLD', 'NIGHT', 'WHEN', 'THEY', 'WERE', 'OUT', 'AT', 'A', 'BALL', 'UNTIL', 'HE', 'BECAME', 'ALMOST', 'FROZEN', 'TO', 'DEATH', 'IN', 'FACT', 'HE', 'DID', 'DIE', 'IN', 'THE', 'INFIRMARY', 'FROM', 'THE', 'EFFECTS', 'OF', 'THE', 'FROST', 'ABOUT', 'ONE', 'WEEK', 'AFTERWARDS'] +8463-287645-0005-548: hyp=['IF', 'YOU', 'YEARS', 'BACK', 'ONE', 'OF', 'THEIR', 'SLAVES', 'A', 'COACHMAN', 'WAS', 'KEPT', 'ON', 'THE', 'COACH', 'BOX', 'ONE', 'CALLED', 'NIGHT', 'WHEN', 'THEY', 'WERE', 'OUT', 'AT', 'A', 'BALL', 'UNTIL', 'HE', 'BECAME', 'ALMOST', 'FROZEN', 'TO', 'DEATH', 'IN', 'FACT', 'HE', 'DID', 'DIE', 'IN', 'THE', 'INFIRMARY', 'FROM', 'THE', 'EFFECTS', 'OF', 'THE', 'FROST', 'ABOUT', 'ONE', 'WEEK', 'AFTERWARDS'] +8463-287645-0006-549: ref=['THE', 'DOCTOR', 'WHO', 'ATTENDED', 'THE', 'INJURED', 'CREATURE', 'IN', 'THIS', 'CASE', 'WAS', 'SIMPLY', 'TOLD', 'THAT', 'SHE', 'SLIPPED', 'AND', 'FELL', 'DOWN', 'STAIRS', 'AS', 'SHE', 'WAS', 'COMING', 'DOWN'] +8463-287645-0006-549: hyp=['THE', 'DOCTOR', 'WHO', 'ATTENDED', 'THE', 'ANCIENT', 'CREATURE', 'IN', 'THIS', 'CASE', 'WAS', 'SIMPLY', 'TOLD', 'THAT', 'SHE', 'SLIPPED', 'AND', 'FELL', 'DOWN', 'THE', 'STAIRS', 'AS', 'SHE', 'WAS', 'COMING', 'DOWN'] +8463-287645-0007-550: ref=['ANOTHER', 'CASE', 'SAID', 'JOHN', 'WESLEY', 'WAS', 'A', 'LITTLE', 'GIRL', 'HALF', 'GROWN', 'WHO', 'WAS', 'WASHING', 'WINDOWS', 'UP', 'STAIRS', 'ONE', 'DAY', 'AND', 'UNLUCKILY', 'FELL', 'ASLEEP', 'IN', 'THE', 'WINDOW', 'AND', 'IN', 'THIS', 'POSITION', 'WAS', 'FOUND', 'BY', 'HER', 'MISTRESS', 'IN', 'A', 'RAGE', 'THE', 'MISTRESS', 'HIT', 'HER', 'A', 'HEAVY', 'SLAP', 'KNOCKED', 'HER', 'OUT', 'OF', 'THE', 'WINDOW', 'AND', 'SHE', 'FELL', 'TO', 'THE', 'PAVEMENT', 'AND', 'DIED', 'IN', 'A', 'FEW', 'HOURS', 'FROM', 'THE', 'EFFECTS', 'THEREOF'] +8463-287645-0007-550: hyp=['ANOTHER', 'CASE', 'SAID', 'JOHN', 'WESTLEY', 'WAS', 'A', 'LITTLE', 'GIRL', 'HALF', 'GROWN', 'WHO', 'WAS', 'WASHING', 'WINDOWS', 'UPSTAIRS', 'ONE', 'DAY', 'AND', 'UNLUCKILY', 'FELL', 'ASLEEP', 'IN', 'THE', 'WINDOW', 'AND', 'IN', 'THIS', 'POSITION', 'WAS', 'FOUND', 'BY', 'HER', 'MISTRESS', 'IN', 'A', 'RAGE', 'THE', 'MISTRESS', 'HID', 'HER', 'A', 'HEAVY', 'SLAP', 'KNOCKED', 'HER', 'OUT', 'OF', 'THE', 'WINDOW', 'AND', 'SHE', 'FELL', 'TO', 'THE', 'PAVEMENT', 'AND', 'DIED', 'IN', 'A', 'FEW', 'HOURS', 'FROM', 'THE', 'EFFECTS', 'THEREOF'] +8463-287645-0008-551: ref=['AS', 'USUAL', 'NOTHING', 'WAS', 'DONE', 'IN', 'THE', 'WAY', 'OF', 'PUNISHMENT'] +8463-287645-0008-551: hyp=['AS', 'USUAL', 'NOTHING', 'WAS', 'DONE', 'IN', 'THE', 'WAY', 'OF', 'PUNISHMENT'] +8463-287645-0009-552: ref=['I', 'NEVER', 'KNEW', 'OF', 'BUT', 'ONE', 'MAN', 'WHO', 'COULD', 'EVER', 'PLEASE', 'HIM'] +8463-287645-0009-552: hyp=['I', 'NEVER', 'KNEW', 'OF', 'BUT', 'ONE', 'MAN', 'WHO', 'COULD', 'EVER', 'PLEASE', 'HIM'] +8463-287645-0010-553: ref=['HE', 'WORKED', 'ME', 'VERY', 'HARD', 'HE', 'WANTED', 'TO', 'BE', 'BEATING', 'ME', 'ALL', 'THE', 'TIME'] +8463-287645-0010-553: hyp=['HE', 'WORKED', 'ME', 'VERY', 'HARD', 'HE', 'WANTED', 'TO', 'BE', 'BEATING', 'ME', 'ALL', 'THE', 'TIME'] +8463-287645-0011-554: ref=['SHE', 'WAS', 'A', 'LARGE', 'HOMELY', 'WOMAN', 'THEY', 'WERE', 'COMMON', 'WHITE', 'PEOPLE', 'WITH', 'NO', 'REPUTATION', 'IN', 'THE', 'COMMUNITY'] +8463-287645-0011-554: hyp=['SHE', 'WAS', 'A', 'LARGE', 'HOMELY', 'WOMAN', 'THEY', 'WERE', 'COMMON', 'WHITE', 'PEOPLE', 'WITH', 'NO', 'REPUTATION', 'IN', 'THE', 'COMMUNITY'] +8463-287645-0012-555: ref=['SUBSTANTIALLY', 'THIS', 'WAS', "JACOB'S", 'UNVARNISHED', 'DESCRIPTION', 'OF', 'HIS', 'MASTER', 'AND', 'MISTRESS'] +8463-287645-0012-555: hyp=['SUBSTANTIALLY', 'THIS', 'WAS', "JACOB'S", 'UNVARNISHED', 'DESCRIPTION', 'OF', 'HIS', 'MASTER', 'AND', 'MISTRESS'] +8463-287645-0013-556: ref=['AS', 'TO', 'HIS', 'AGE', 'AND', 'ALSO', 'THE', 'NAME', 'OF', 'HIS', 'MASTER', "JACOB'S", 'STATEMENT', 'VARIED', 'SOMEWHAT', 'FROM', 'THE', 'ADVERTISEMENT'] +8463-287645-0013-556: hyp=['AS', 'TO', 'HIS', 'AGE', 'AND', 'ALSO', 'THE', 'NAME', 'OF', 'HIS', 'MASTER', "JACOB'S", 'STATEMENT', 'VARIED', 'SOMEWHAT', 'FROM', 'THE', 'ADVERTISEMENT'] +8463-287645-0014-557: ref=['OF', 'STARTING', 'I', "DIDN'T", 'KNOW', 'THE', 'WAY', 'TO', 'COME'] +8463-287645-0014-557: hyp=['OF', 'STARTING', 'I', "DIDN'T", 'KNOW', 'THE', 'WAY', 'TO', 'COME'] +8463-294825-0000-558: ref=["IT'S", 'ALMOST', 'BEYOND', 'CONJECTURE'] +8463-294825-0000-558: hyp=["IT'S", 'ALMOST', 'BEYOND', 'CONJECTURE'] +8463-294825-0001-559: ref=['THIS', 'REALITY', 'BEGINS', 'TO', 'EXPLAIN', 'THE', 'DARK', 'POWER', 'AND', 'OTHERWORLDLY', 'FASCINATION', 'OF', 'TWENTY', 'THOUSAND', 'LEAGUES', 'UNDER', 'THE', 'SEAS'] +8463-294825-0001-559: hyp=['THIS', 'REALITY', 'BEGINS', 'TO', 'EXPLAIN', 'THE', 'DARK', 'POWER', 'AND', 'OTHER', 'WORLDLY', 'FASCINATION', 'OF', 'TWENTY', 'THOUSAND', 'LEAGUES', 'UNDER', 'THE', 'SEAS'] +8463-294825-0002-560: ref=['FIRST', 'AS', 'A', 'PARIS', 'STOCKBROKER', 'LATER', 'AS', 'A', 'CELEBRATED', 'AUTHOR', 'AND', 'YACHTSMAN', 'HE', 'WENT', 'ON', 'FREQUENT', 'VOYAGES', 'TO', 'BRITAIN', 'AMERICA', 'THE', 'MEDITERRANEAN'] +8463-294825-0002-560: hyp=['FIRST', 'AS', 'A', 'PARIS', 'DOCKBROKER', 'LATER', 'AS', 'A', 'CELEBRATED', 'AUTHOR', 'AND', 'YACHTSMAN', 'HE', 'WENT', 'ON', 'FREQUENT', 'VOYAGES', 'TO', 'BRITAIN', 'AMERICA', 'THE', 'MEDITERRANEAN'] +8463-294825-0003-561: ref=['NEMO', 'BUILDS', 'A', 'FABULOUS', 'FUTURISTIC', 'SUBMARINE', 'THE', 'NAUTILUS', 'THEN', 'CONDUCTS', 'AN', 'UNDERWATER', 'CAMPAIGN', 'OF', 'VENGEANCE', 'AGAINST', 'HIS', 'IMPERIALIST', 'OPPRESSOR'] +8463-294825-0003-561: hyp=['NEMO', 'BUILDS', 'A', 'FABULOUS', 'FUTURESTIC', 'SUBMARINE', 'THE', 'NAUTILUS', 'THEN', 'CONDUCTS', 'AN', 'UNDERWATER', 'CAMPAIGN', 'OF', 'VENGEANCE', 'AGAINST', 'HIS', 'IMPERIALIST', 'OPPRESSOR'] +8463-294825-0004-562: ref=['IN', 'ALL', 'THE', 'NOVEL', 'HAD', 'A', 'DIFFICULT', 'GESTATION'] +8463-294825-0004-562: hyp=['IN', 'ALL', 'THE', 'NOVEL', 'HEAD', 'A', 'DIFFICULT', 'JUST', 'STATION'] +8463-294825-0005-563: ref=['OTHER', 'SUBTLETIES', 'OCCUR', 'INSIDE', 'EACH', 'EPISODE', 'THE', 'TEXTURES', 'SPARKLING', 'WITH', 'WIT', 'INFORMATION', 'AND', 'INSIGHT'] +8463-294825-0005-563: hyp=['OTHER', 'SUBTLETIES', 'OCCUR', 'INSIDE', 'EACH', 'EPISODE', 'THE', 'TEXTURES', 'SPARKLING', 'WITH', 'WIT', 'INFORMATION', 'AND', 'INSIGHT'] +8463-294825-0006-564: ref=['HIS', 'SPECIFICATIONS', 'FOR', 'AN', 'OPEN', 'SEA', 'SUBMARINE', 'AND', 'A', 'SELF', 'CONTAINED', 'DIVING', 'SUIT', 'WERE', 'DECADES', 'BEFORE', 'THEIR', 'TIME', 'YET', 'MODERN', 'TECHNOLOGY', 'BEARS', 'THEM', 'OUT', 'TRIUMPHANTLY'] +8463-294825-0006-564: hyp=['HIS', 'SPECIFICATIONS', 'FOR', 'AN', 'OPEN', 'SEA', 'SUBMARINE', 'AND', 'A', 'SELF', 'CONTAINING', 'DIVING', 'SUIT', 'WERE', 'DECADES', 'BEFORE', 'THEIR', 'TIME', 'YET', 'MODERN', 'TECHNOLOGY', 'BEARS', 'THEM', 'OUT', 'TRIUMPHANTLY'] +8463-294825-0007-565: ref=['EVEN', 'THE', 'SUPPORTING', 'CAST', 'IS', 'SHREWDLY', 'DRAWN', 'PROFESSOR', 'ARONNAX', 'THE', 'CAREER', 'SCIENTIST', 'CAUGHT', 'IN', 'AN', 'ETHICAL', 'CONFLICT', 'CONSEIL', 'THE', 'COMPULSIVE', 'CLASSIFIER', 'WHO', 'SUPPLIES', 'HUMOROUS', 'TAG', 'LINES', 'FOR', "VERNE'S", 'FAST', 'FACTS', 'THE', 'HARPOONER', 'NED', 'LAND', 'A', 'CREATURE', 'OF', 'CONSTANT', 'APPETITES', 'MAN', 'AS', 'HEROIC', 'ANIMAL'] +8463-294825-0007-565: hyp=['EVEN', 'THE', 'SUPPORTING', 'CAST', 'IS', 'SHREWDLY', 'DRAWN', 'PROFESSOR', 'ARONNAX', 'THE', 'CAREER', 'SCIENTIST', 'CAUGHT', 'IN', 'AN', 'ETHICAL', 'CONFLICT', 'CONSEIL', 'THE', 'COMPULSIVE', 'CLASSIFIER', 'WHO', 'SUPPLIES', 'HUMOROUS', 'TAG', 'LINES', 'FOR', 'VERNS', 'FAST', 'FACTS', 'THE', 'HARPOONER', 'NED', 'LAND', 'A', 'CREATURE', 'OF', 'CONSTANT', 'APPETITES', 'MAN', 'AS', 'HEROIC', 'ANIMAL'] +8463-294825-0008-566: ref=['BUT', 'MUCH', 'OF', 'THE', "NOVEL'S", 'BROODING', 'POWER', 'COMES', 'FROM', 'CAPTAIN', 'NEMO'] +8463-294825-0008-566: hyp=['BUT', 'MUCH', 'OF', 'THE', 'NOVELS', 'BROODING', 'POWER', 'COMES', 'FROM', 'CAPTAIN', 'NEMO'] +8463-294825-0009-567: ref=['THIS', 'COMPULSION', 'LEADS', 'NEMO', 'INTO', 'UGLY', 'CONTRADICTIONS', "HE'S", 'A', 'FIGHTER', 'FOR', 'FREEDOM', 'YET', 'ALL', 'WHO', 'BOARD', 'HIS', 'SHIP', 'ARE', 'IMPRISONED', 'THERE', 'FOR', 'GOOD', 'HE', 'WORKS', 'TO', 'SAVE', 'LIVES', 'BOTH', 'HUMAN', 'AND', 'ANIMAL', 'YET', 'HE', 'HIMSELF', 'CREATES', 'A', 'HOLOCAUST', 'HE', 'DETESTS', 'IMPERIALISM', 'YET', 'HE', 'LAYS', 'PERSONAL', 'CLAIM', 'TO', 'THE', 'SOUTH', 'POLE'] +8463-294825-0009-567: hyp=['THIS', 'COMPULSION', 'LEADS', 'NEMO', 'INTO', 'UGLY', 'CONTRADICTIONS', 'HE', 'IS', 'A', 'FIGHTER', 'FOR', 'FREEDOM', 'YET', 'ALL', 'WHO', 'BOARD', 'HIS', 'SHIP', 'ARE', 'IMPRISONED', 'THERE', 'FOR', 'GOOD', 'HE', 'WORKS', 'TO', 'SAVE', 'LIVES', 'BOTH', 'HUMAN', 'AND', 'ANIMAL', 'YET', 'HE', 'HIMSELF', 'CREATES', 'A', 'HOHLAST', 'HE', 'DETESTS', 'IMPERIALISM', 'YET', 'HE', 'LAYS', 'PERSONAL', 'CLAIM', 'TO', 'THE', 'SOUTH', 'POLE'] +8463-294825-0010-568: ref=['AND', 'IN', 'THIS', 'LAST', 'ACTION', 'HE', 'FALLS', 'INTO', 'THE', 'CLASSIC', 'SIN', 'OF', 'PRIDE'] +8463-294825-0010-568: hyp=['AND', 'IN', 'THIS', 'LAST', 'ACTION', 'HE', 'FALLS', 'INTO', 'THE', 'CLASSIC', 'SIN', 'OF', 'PRIDE'] +8463-294825-0011-569: ref=["HE'S", 'SWIFTLY', 'PUNISHED'] +8463-294825-0011-569: hyp=['HIS', 'SWIFTLY', 'PUNISHED'] +8463-294825-0012-570: ref=['THE', 'NAUTILUS', 'NEARLY', 'PERISHES', 'IN', 'THE', 'ANTARCTIC', 'AND', 'NEMO', 'SINKS', 'INTO', 'A', 'GROWING', 'DEPRESSION'] +8463-294825-0012-570: hyp=['THE', 'NAUTILUS', 'NEARLY', 'PERISHES', 'IN', 'THE', 'ANTARCTIC', 'AND', 'NEMO', 'SINKS', 'INTO', 'A', 'GROWING', 'DEPRESSION'] +8463-294825-0013-571: ref=['FOR', 'MANY', 'THEN', 'THIS', 'BOOK', 'HAS', 'BEEN', 'A', 'SOURCE', 'OF', 'FASCINATION', 'SURELY', 'ONE', 'OF', 'THE', 'MOST', 'INFLUENTIAL', 'NOVELS', 'EVER', 'WRITTEN', 'AN', 'INSPIRATION', 'FOR', 'SUCH', 'SCIENTISTS', 'AND', 'DISCOVERERS', 'AS', 'ENGINEER', 'SIMON', 'LAKE', 'OCEANOGRAPHER', 'WILLIAM', 'BEEBE', 'POLAR', 'TRAVELER', 'SIR', 'ERNEST', 'SHACKLETON'] +8463-294825-0013-571: hyp=['FOR', 'MANY', 'THEN', 'THIS', 'BOOK', 'HAS', 'BEEN', 'A', 'SOURCE', 'OF', 'FASCINATION', 'SURELY', 'ONE', 'OF', 'THE', 'MOST', 'INFLUENTIAL', 'NOVELS', 'EVER', 'WRITTEN', 'AND', 'INSPIRATION', 'FOR', 'SUCH', 'SCIENTISTS', 'AND', 'DISCOVERERS', 'AS', 'ENGINEERS', 'SIMON', 'LAKE', 'OCEANOGRAPHER', 'WILLIAM', 'B', 'POLLAR', 'TRAVELLERS', 'ARE', 'EARNEST', 'SHACKLETON'] +8463-294825-0014-572: ref=['FATHOM', 'SIX', 'FEET'] +8463-294825-0014-572: hyp=['FATHOM', 'SIX', 'FEET'] +8463-294825-0015-573: ref=['GRAM', 'ROUGHLY', 'ONE', 'TWENTY', 'EIGHTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0015-573: hyp=['GRAHAM', 'ROUGHLY', 'WON', 'TWENTY', 'EIGHTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0016-574: ref=['MILLIGRAM', 'ROUGHLY', 'ONE', 'TWENTY', 'EIGHT', 'THOUSAND', 'OF', 'AN', 'OUNCE'] +8463-294825-0016-574: hyp=['MILAGRAM', 'ROUGHLY', 'WON', 'TWENTY', 'EIGHT', 'THOUSANDTH', 'OF', 'AN', 'OUNCE'] +8463-294825-0017-575: ref=['LITER', 'ROUGHLY', 'ONE', 'QUART'] +8463-294825-0017-575: hyp=['LEADER', 'ROUGHLY', 'WON', 'COURT'] +8463-294825-0018-576: ref=['METER', 'ROUGHLY', 'ONE', 'YARD', 'THREE', 'INCHES'] +8463-294825-0018-576: hyp=['METER', 'ROUGHLY', 'ONE', 'YARD', 'THREE', 'INCHES'] +8463-294825-0019-577: ref=['MILLIMETER', 'ROUGHLY', 'ONE', 'TWENTY', 'FIFTH', 'OF', 'AN', 'INCH'] +8463-294825-0019-577: hyp=['MILLAMETER', 'ROUGHLY', 'WON', 'TWENTY', 'FIFTH', 'OF', 'AN', 'INCH'] +8463-294828-0000-578: ref=['CHAPTER', 'THREE', 'AS', 'MASTER', 'WISHES'] +8463-294828-0000-578: hyp=['CHAPTER', 'THREE', 'AS', 'MASTER', 'WISHES'] +8463-294828-0001-579: ref=['THREE', 'SECONDS', 'BEFORE', 'THE', 'ARRIVAL', 'OF', 'J', 'B', "HOBSON'S", 'LETTER', 'I', 'NO', 'MORE', 'DREAMED', 'OF', 'CHASING', 'THE', 'UNICORN', 'THAN', 'OF', 'TRYING', 'FOR', 'THE', 'NORTHWEST', 'PASSAGE'] +8463-294828-0001-579: hyp=['THREE', 'SECONDS', 'BEFORE', 'THE', 'ARRIVAL', 'OF', 'J', 'B', "HOBSON'S", 'LETTER', 'I', 'KNOW', 'MORE', 'DREAMED', 'OF', 'CHASING', 'THE', 'UNICORN', 'THAN', 'OF', 'TRYING', 'FOR', 'THE', 'NORTH', 'WEST', 'PASSAGE'] +8463-294828-0002-580: ref=['EVEN', 'SO', 'I', 'HAD', 'JUST', 'RETURNED', 'FROM', 'AN', 'ARDUOUS', 'JOURNEY', 'EXHAUSTED', 'AND', 'BADLY', 'NEEDING', 'A', 'REST'] +8463-294828-0002-580: hyp=['EVEN', 'SO', 'I', 'HAD', 'JUST', 'RETURNED', 'FROM', 'AN', 'ARDUOUS', 'JOURNEY', 'EXHAUSTED', 'AND', 'BADLY', 'NEEDING', 'ARREST'] +8463-294828-0003-581: ref=['I', 'WANTED', 'NOTHING', 'MORE', 'THAN', 'TO', 'SEE', 'MY', 'COUNTRY', 'AGAIN', 'MY', 'FRIENDS', 'MY', 'MODEST', 'QUARTERS', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'MY', 'DEARLY', 'BELOVED', 'COLLECTIONS'] +8463-294828-0003-581: hyp=['I', 'WANTED', 'NOTHING', 'MORE', 'THAN', 'TO', 'SEE', 'MY', 'COUNTRY', 'AGAIN', 'MY', 'FRIENDS', 'MY', 'MODEST', 'QUARTERS', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'MY', 'DEARLY', 'BELOVED', 'COLLECTIONS'] +8463-294828-0004-582: ref=['BUT', 'NOW', 'NOTHING', 'COULD', 'HOLD', 'ME', 'BACK'] +8463-294828-0004-582: hyp=['BUT', 'NOW', 'NOTHING', 'COULD', 'HOLD', 'ME', 'BACK'] +8463-294828-0005-583: ref=['CONSEIL', 'WAS', 'MY', 'MANSERVANT'] +8463-294828-0005-583: hyp=['CONSEIL', 'WAS', 'MY', "MAN'S", 'SERVANT'] +8463-294828-0006-584: ref=['FROM', 'RUBBING', 'SHOULDERS', 'WITH', 'SCIENTISTS', 'IN', 'OUR', 'LITTLE', 'UNIVERSE', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'THE', 'BOY', 'HAD', 'COME', 'TO', 'KNOW', 'A', 'THING', 'OR', 'TWO'] +8463-294828-0006-584: hyp=['FROM', 'RUBBING', 'SHOULDERS', 'WITH', 'SCIENTISTS', 'IN', 'OUR', 'LITTLE', 'UNIVERSE', 'BY', 'THE', 'BOTANICAL', 'GARDENS', 'THE', 'BOY', 'HAD', 'COME', 'TO', 'KNOW', 'A', 'THING', 'OR', 'TWO'] +8463-294828-0007-585: ref=['CLASSIFYING', 'WAS', 'EVERYTHING', 'TO', 'HIM', 'SO', 'HE', 'KNEW', 'NOTHING', 'ELSE', 'WELL', 'VERSED', 'IN', 'THE', 'THEORY', 'OF', 'CLASSIFICATION', 'HE', 'WAS', 'POORLY', 'VERSED', 'IN', 'ITS', 'PRACTICAL', 'APPLICATION', 'AND', 'I', 'DOUBT', 'THAT', 'HE', 'COULD', 'TELL', 'A', 'SPERM', 'WHALE', 'FROM', 'A', 'BALEEN', 'WHALE'] +8463-294828-0007-585: hyp=['CLASSIFYING', 'WAS', 'EVERYTHING', 'TO', 'HIM', 'SO', 'HE', 'KNEW', 'NOTHING', 'ELSE', 'WILL', 'VERSED', 'IN', 'A', 'THEORY', 'OF', 'CLASSIFICATION', 'HE', 'WAS', 'POORLY', 'VERSED', 'IN', 'ITS', 'PRACTICAL', 'APPLICATION', 'AND', 'I', 'DOUBT', 'THAT', 'HE', 'COULD', 'TELL', 'A', 'SPERM', 'WHALE', 'FROM', 'A', 'BALINE', 'WHALE'] +8463-294828-0008-586: ref=['AND', 'YET', 'WHAT', 'A', 'FINE', 'GALLANT', 'LAD'] +8463-294828-0008-586: hyp=['AND', 'YET', 'WHAT', 'A', 'FINE', 'GALLANT', 'LAD'] +8463-294828-0009-587: ref=['NOT', 'ONCE', 'DID', 'HE', 'COMMENT', 'ON', 'THE', 'LENGTH', 'OR', 'THE', 'HARDSHIPS', 'OF', 'A', 'JOURNEY'] +8463-294828-0009-587: hyp=['NOT', 'ONCE', 'DID', 'HE', 'COMMENT', 'ON', 'THE', 'LENGTH', 'OR', 'THE', 'HARDSHIPS', 'OF', 'THE', 'JOURNEY'] +8463-294828-0010-588: ref=['NEVER', 'DID', 'HE', 'OBJECT', 'TO', 'BUCKLING', 'UP', 'HIS', 'SUITCASE', 'FOR', 'ANY', 'COUNTRY', 'WHATEVER', 'CHINA', 'OR', 'THE', 'CONGO', 'NO', 'MATTER', 'HOW', 'FAR', 'OFF', 'IT', 'WAS'] +8463-294828-0010-588: hyp=['NEVER', 'DID', 'HE', 'OBJECT', 'TO', 'BUCKLING', 'UP', 'HIS', 'SUIT', 'CASE', 'FOR', 'ANY', 'COUNTRY', 'WHATEVER', 'CHINA', 'OR', 'THE', 'CONGO', 'NO', 'MATTER', 'HOW', 'FAR', 'OFF', 'IT', 'WAS'] +8463-294828-0011-589: ref=['HE', 'WENT', 'HERE', 'THERE', 'AND', 'EVERYWHERE', 'IN', 'PERFECT', 'CONTENTMENT'] +8463-294828-0011-589: hyp=['HE', 'WENT', 'HERE', 'THERE', 'AND', 'EVERYWHERE', 'IN', 'PERFECT', 'CONTENTMENT'] +8463-294828-0012-590: ref=['PLEASE', 'FORGIVE', 'ME', 'FOR', 'THIS', 'UNDERHANDED', 'WAY', 'OF', 'ADMITTING', 'I', 'HAD', 'TURNED', 'FORTY'] +8463-294828-0012-590: hyp=['PLEASE', 'FORGIVE', 'ME', 'FOR', 'THIS', 'UNDERHANDED', 'WAY', 'OF', 'ADMITTING', 'THAT', 'I', 'HAD', 'TURNED', 'FORTY'] +8463-294828-0013-591: ref=['HE', 'WAS', 'A', 'FANATIC', 'ON', 'FORMALITY', 'AND', 'HE', 'ONLY', 'ADDRESSED', 'ME', 'IN', 'THE', 'THIRD', 'PERSON', 'TO', 'THE', 'POINT', 'WHERE', 'IT', 'GOT', 'TIRESOME'] +8463-294828-0013-591: hyp=['HE', 'WAS', 'A', 'FANATIC', 'ON', 'FORMALITY', 'AND', 'HE', 'ONLY', 'ADDRESSED', 'ME', 'IN', 'THE', 'THIRD', 'PERSON', 'TO', 'THE', 'POINT', 'WHERE', 'IT', 'GOT', 'TO', 'HYAHSOME'] +8463-294828-0014-592: ref=['THERE', 'WAS', 'GOOD', 'REASON', 'TO', 'STOP', 'AND', 'THINK', 'EVEN', 'FOR', 'THE', "WORLD'S", 'MOST', 'EMOTIONLESS', 'MAN'] +8463-294828-0014-592: hyp=['THERE', 'WAS', 'GOOD', 'REASON', 'TO', 'STOP', 'AND', 'THINK', 'EVEN', 'FOR', 'THE', "WORLD'S", 'MOST', 'EMOTIONLESS', 'MAN'] +8463-294828-0015-593: ref=['CONSEIL', 'I', 'CALLED', 'A', 'THIRD', 'TIME', 'CONSEIL', 'APPEARED'] +8463-294828-0015-593: hyp=['CONSEIL', 'I', 'CALLED', 'A', 'THIRD', 'TIME', 'CONSEIL', 'APPEARED'] +8463-294828-0016-594: ref=['DID', 'MASTER', 'SUMMON', 'ME', 'HE', 'SAID', 'ENTERING'] +8463-294828-0016-594: hyp=['DEAD', 'MASTER', 'SUMMON', 'ME', 'HE', 'SAID', 'ENTERING'] +8463-294828-0017-595: ref=['PACK', 'AS', 'MUCH', 'INTO', 'MY', 'TRUNK', 'AS', 'YOU', 'CAN', 'MY', 'TRAVELING', 'KIT', 'MY', 'SUITS', 'SHIRTS', 'AND', 'SOCKS', "DON'T", 'BOTHER', 'COUNTING', 'JUST', 'SQUEEZE', 'IT', 'ALL', 'IN', 'AND', 'HURRY'] +8463-294828-0017-595: hyp=['PACK', 'AS', 'MUCH', 'INTO', 'MY', 'TRUNK', 'AS', 'YOU', 'CAN', 'MY', 'TRAVELLING', 'KIT', 'MY', 'SUITS', 'SHIRTS', 'AND', 'SOCKS', "DON'T", 'BOTHER', 'COUNTING', 'JUST', 'SQUEEZE', 'IT', 'ALL', 'IN', 'AND', 'HURRY'] +8463-294828-0018-596: ref=["WE'LL", 'DEAL', 'WITH', 'THEM', 'LATER', 'WHAT'] +8463-294828-0018-596: hyp=["WE'LL", 'DEAL', 'WITH', 'THEM', 'LATER', 'WHAT'] +8463-294828-0019-597: ref=['ANYHOW', "WE'LL", 'LEAVE', 'INSTRUCTIONS', 'TO', 'SHIP', 'THE', 'WHOLE', 'MENAGERIE', 'TO', 'FRANCE'] +8463-294828-0019-597: hyp=['ANYHOW', "WE'LL", 'LIVE', 'INSTRUCTIONS', 'TO', 'SHIP', 'THE', 'WHOLE', 'MENAGERIE', 'TO', 'FRANCE'] +8463-294828-0020-598: ref=['YES', 'WE', 'ARE', 'CERTAINLY', 'I', 'REPLIED', 'EVASIVELY', 'BUT', 'AFTER', 'WE', 'MAKE', 'A', 'DETOUR'] +8463-294828-0020-598: hyp=['YES', 'WE', 'ARE', 'CERTAINLY', 'I', 'REPLIED', 'EVASIVELY', 'BUT', 'AFTER', 'WE', 'MAKE', 'A', 'DETOUR'] +8463-294828-0021-599: ref=['A', 'ROUTE', 'SLIGHTLY', 'LESS', 'DIRECT', "THAT'S", 'ALL'] +8463-294828-0021-599: hyp=['A', 'ROUT', 'SLIGHTLY', 'LESS', 'DIRECT', "THAT'S", 'ALL'] +8463-294828-0022-600: ref=["WE'RE", 'LEAVING', 'ON', 'THE', 'ABRAHAM', 'LINCOLN'] +8463-294828-0022-600: hyp=['WERE', 'LEAVING', 'ON', 'THE', 'ABRAHAM', 'LINCOLN'] +8463-294828-0023-601: ref=['YOU', 'SEE', 'MY', 'FRIEND', "IT'S", 'AN', 'ISSUE', 'OF', 'THE', 'MONSTER', 'THE', 'NOTORIOUS', 'NARWHALE'] +8463-294828-0023-601: hyp=['YOU', 'SEE', 'MY', 'FRIEND', "IT'S", 'AN', 'ISSUE', 'OF', 'THE', 'MONSTER', 'THE', 'NOTORIOUS', 'NARWHALE'] +8463-294828-0024-602: ref=['WE', "DON'T", 'KNOW', 'WHERE', 'IT', 'WILL', 'TAKE', 'US'] +8463-294828-0024-602: hyp=['WE', "DON'T", 'KNOW', 'WHERE', 'IT', 'WILL', 'TAKE', 'US'] +8463-294828-0025-603: ref=['BUT', "WE'RE", 'GOING', 'JUST', 'THE', 'SAME'] +8463-294828-0025-603: hyp=['BUT', "WE'RE", 'GOING', 'JUST', 'THE', 'SAME'] +8463-294828-0026-604: ref=['WE', 'HAVE', 'A', 'COMMANDER', "WHO'S", 'GAME', 'FOR', 'ANYTHING'] +8463-294828-0026-604: hyp=['WE', 'HAVE', 'A', 'COMMANDER', 'WHOSE', 'GAME', 'FOR', 'ANYTHING'] +8463-294828-0027-605: ref=['I', 'LEFT', 'INSTRUCTIONS', 'FOR', 'SHIPPING', 'MY', 'CONTAINERS', 'OF', 'STUFFED', 'ANIMALS', 'AND', 'DRIED', 'PLANTS', 'TO', 'PARIS', 'FRANCE'] +8463-294828-0027-605: hyp=['I', 'LEFT', 'INSTRUCTIONS', 'FOR', 'SHIPPING', 'MY', 'CONTAINERS', 'OF', 'STUFFED', 'ANIMALS', 'AND', 'DRIED', 'PLANTS', 'TO', 'PARIS', 'FRANCE'] +8463-294828-0028-606: ref=['I', 'OPENED', 'A', 'LINE', 'OF', 'CREDIT', 'SUFFICIENT', 'TO', 'COVER', 'THE', 'BABIRUSA', 'AND', 'CONSEIL', 'AT', 'MY', 'HEELS', 'I', 'JUMPED', 'INTO', 'A', 'CARRIAGE'] +8463-294828-0028-606: hyp=['I', 'OPENED', 'A', 'LINE', 'OF', 'CREDIT', 'SUFFICIENT', 'TO', 'COVER', 'THE', 'BARBAROUSA', 'AND', 'CONSEIL', 'AT', 'MY', 'HEELS', 'I', 'JUMPED', 'INTO', 'A', 'CARRIAGE'] +8463-294828-0029-607: ref=['OUR', 'BAGGAGE', 'WAS', 'IMMEDIATELY', 'CARRIED', 'TO', 'THE', 'DECK', 'OF', 'THE', 'FRIGATE', 'I', 'RUSHED', 'ABOARD'] +8463-294828-0029-607: hyp=['OUR', 'BAGGAGE', 'WAS', 'IMMEDIATELY', 'CARRIED', 'TO', 'THE', 'DECK', 'OF', 'THE', 'FRIGATE', 'I', 'RUSHED', 'ABOARD'] +8463-294828-0030-608: ref=['I', 'ASKED', 'FOR', 'COMMANDER', 'FARRAGUT'] +8463-294828-0030-608: hyp=['I', 'ASKED', 'FOR', 'COMMANDER', 'FARRAGUT'] +8463-294828-0031-609: ref=['ONE', 'OF', 'THE', 'SAILORS', 'LED', 'ME', 'TO', 'THE', 'AFTERDECK', 'WHERE', 'I', 'STOOD', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'SMART', 'LOOKING', 'OFFICER', 'WHO', 'EXTENDED', 'HIS', 'HAND', 'TO', 'ME'] +8463-294828-0031-609: hyp=['ONE', 'OF', 'THE', 'SAILORS', 'LED', 'ME', 'TO', 'THE', 'AFTER', 'DECK', 'WHERE', 'I', 'STOOD', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'SMART', 'LOOKING', 'OFFICER', 'WHO', 'EXTENDED', 'HIS', 'HAND', 'TO', 'ME'] +8463-294828-0032-610: ref=['IN', 'PERSON', 'WELCOME', 'ABOARD', 'PROFESSOR', 'YOUR', 'CABIN', 'IS', 'WAITING', 'FOR', 'YOU'] +8463-294828-0032-610: hyp=['IN', 'PERSON', 'WELCOME', 'ABOARD', 'PROFESSOR', 'YOUR', 'CABIN', 'IS', 'WAITING', 'FOR', 'YOU'] +8463-294828-0033-611: ref=['I', 'WAS', 'WELL', 'SATISFIED', 'WITH', 'MY', 'CABIN', 'WHICH', 'WAS', 'LOCATED', 'IN', 'THE', 'STERN', 'AND', 'OPENED', 'INTO', 'THE', 'OFFICERS', 'MESS'] +8463-294828-0033-611: hyp=['I', 'WAS', 'WELL', 'SATISFIED', 'WITH', 'MY', 'CABIN', 'WHICH', 'WAS', 'LOCATED', 'IN', 'THE', 'STERN', 'AND', 'OPENED', 'INTO', 'THE', "OFFICER'S", 'MESS'] +8463-294828-0034-612: ref=["WE'LL", 'BE', 'QUITE', 'COMFORTABLE', 'HERE', 'I', 'TOLD', 'CONSEIL'] +8463-294828-0034-612: hyp=['WILL', 'BE', 'QUITE', 'COMFORTABLE', 'HERE', 'I', 'TOLD', 'CONSEIL'] +8463-294828-0035-613: ref=['AND', 'SO', 'IF', "I'D", 'BEEN', 'DELAYED', 'BY', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'OR', 'EVEN', 'LESS', 'THE', 'FRIGATE', 'WOULD', 'HAVE', 'GONE', 'WITHOUT', 'ME', 'AND', 'I', 'WOULD', 'HAVE', 'MISSED', 'OUT', 'ON', 'THIS', 'UNEARTHLY', 'EXTRAORDINARY', 'AND', 'INCONCEIVABLE', 'EXPEDITION', 'WHOSE', 'TRUE', 'STORY', 'MIGHT', 'WELL', 'MEET', 'WITH', 'SOME', 'SKEPTICISM'] +8463-294828-0035-613: hyp=['AND', 'SO', 'IF', 'I', 'HAD', 'BEEN', 'DELAYED', 'BY', 'A', 'QUARTER', 'OF', 'AN', 'HOUR', 'OR', 'EVEN', 'LESS', 'THE', 'FRIGATE', 'WOULD', 'HAVE', 'GONE', 'WITHOUT', 'ME', 'AND', 'I', 'WOULD', 'HAVE', 'MISSED', 'OUT', 'ON', 'THIS', 'UNEARTHLY', 'EXTRAORDINARY', 'AND', 'INCONCEIVABLE', 'EXPEDITION', 'WHOSE', 'TRUE', 'STORY', 'MIGHT', 'WELL', 'MEET', 'WITH', 'SOME', 'SKEPTICISM'] +8463-294828-0036-614: ref=['THE', 'WHARVES', 'OF', 'BROOKLYN', 'AND', 'EVERY', 'PART', 'OF', 'NEW', 'YORK', 'BORDERING', 'THE', 'EAST', 'RIVER', 'WERE', 'CROWDED', 'WITH', 'CURIOSITY', 'SEEKERS'] +8463-294828-0036-614: hyp=['THE', 'WHARVES', 'OF', 'BROOKLYN', 'AND', 'EVERY', 'PART', 'OF', 'NEW', 'YORK', 'BORDERING', 'THE', 'EAST', 'RIVER', 'WERE', 'CROWDED', 'WITH', 'CURIOSITY', 'SEEKERS'] +8463-294828-0037-615: ref=['DEPARTING', 'FROM', 'FIVE', 'HUNDRED', 'THOUSAND', 'THROATS', 'THREE', 'CHEERS', 'BURST', 'FORTH', 'IN', 'SUCCESSION'] +8463-294828-0037-615: hyp=['DEPARTING', 'FROM', 'FIVE', 'HUNDRED', 'THOUSAND', 'THROATS', 'THREE', 'CHEERS', 'BURST', 'FORTH', 'IN', 'SUCCESSION'] +8463-294828-0038-616: ref=['THOUSANDS', 'OF', 'HANDKERCHIEFS', 'WERE', 'WAVING', 'ABOVE', 'THESE', 'TIGHTLY', 'PACKED', 'MASSES', 'HAILING', 'THE', 'ABRAHAM', 'LINCOLN', 'UNTIL', 'IT', 'REACHED', 'THE', 'WATERS', 'OF', 'THE', 'HUDSON', 'RIVER', 'AT', 'THE', 'TIP', 'OF', 'THE', 'LONG', 'PENINSULA', 'THAT', 'FORMS', 'NEW', 'YORK', 'CITY'] +8463-294828-0038-616: hyp=['THOUSANDS', 'OF', 'HANDKERCHIEFS', 'WERE', 'WAVING', 'ABOVE', 'THESE', 'TIGHTLY', 'PACKED', 'MASSES', 'HAILING', 'THE', 'ABRAHAM', 'LINCOLN', 'UNTIL', 'IT', 'REACHED', 'THE', 'WATERS', 'OF', 'THE', 'HUDSON', 'RIVER', 'AT', 'THE', 'TIP', 'OF', 'THE', 'LONG', 'PRONUNCILA', 'THAT', 'FORMS', 'NEW', 'YORK', 'CITY'] +8555-284447-0000-2299: ref=['THEN', 'HE', 'RUSHED', 'DOWN', 'STAIRS', 'INTO', 'THE', 'COURTYARD', 'SHOUTING', 'LOUDLY', 'FOR', 'HIS', 'SOLDIERS', 'AND', 'THREATENING', 'TO', 'PATCH', 'EVERYBODY', 'IN', 'HIS', 'DOMINIONS', 'IF', 'THE', 'SAILORMAN', 'WAS', 'NOT', 'RECAPTURED'] +8555-284447-0000-2299: hyp=['THEN', 'HE', 'RUSHED', 'DOWNSTAIRS', 'INTO', 'THE', 'COURTYARD', 'SHOUTING', 'LOUDLY', 'FOR', 'HIS', 'SOLDIERS', 'AND', 'THREATENING', 'TO', 'PATCH', 'EVERYBODY', 'IN', 'HIS', 'DOMINIONS', 'AT', 'THE', 'SAILORMAN', 'WAS', 'NOT', 'RECAPTURED'] +8555-284447-0001-2300: ref=['HOLD', 'HIM', 'FAST', 'MY', 'MEN', 'AND', 'AS', 'SOON', 'AS', "I'VE", 'HAD', 'MY', 'COFFEE', 'AND', 'OATMEAL', "I'LL", 'TAKE', 'HIM', 'TO', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'AND', 'PATCH', 'HIM'] +8555-284447-0001-2300: hyp=['HOLD', 'HIM', 'FAST', 'TO', 'MY', 'MEN', 'AND', 'AS', 'SOON', 'AS', "I'VE", 'HAD', 'MY', 'COFFEE', 'AN', 'OATMEAL', 'I', 'WILL', 'TAKE', 'HIM', 'TO', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'AND', 'PATCH', 'HIM'] +8555-284447-0002-2301: ref=['I', "WOULDN'T", 'MIND', 'A', 'CUP', 'O', 'COFFEE', 'MYSELF', 'SAID', "CAP'N", 'BILL', "I'VE", 'HAD', "CONSID'BLE", 'EXERCISE', 'THIS', 'MORNIN', 'AND', "I'M", 'ALL', 'READY', 'FOR', 'BREAKFAS'] +8555-284447-0002-2301: hyp=['I', "WOULDN'T", 'MIND', 'A', 'CUP', 'OF', 'COFFEE', 'MYSELF', 'SAID', "CAP'N", 'BILL', 'I', 'HAVE', 'HAD', 'CONSIDERABLE', 'EXERCISE', 'THIS', 'MORNING', 'AND', "I'M", 'ALREADY', 'FOR', 'BREAKFAST'] +8555-284447-0003-2302: ref=['BUT', "CAP'N", 'BILL', 'MADE', 'NO', 'SUCH', 'ATTEMPT', 'KNOWING', 'IT', 'WOULD', 'BE', 'USELESS'] +8555-284447-0003-2302: hyp=['BUT', "CAP'N", 'BILL', 'MADE', 'NO', 'SUCH', 'ATTEMPT', 'KNOWING', 'IT', 'WOULD', 'BE', 'USELESS'] +8555-284447-0004-2303: ref=['AS', 'SOON', 'AS', 'THEY', 'ENTERED', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'THE', 'BOOLOOROO', 'GAVE', 'A', 'YELL', 'OF', 'DISAPPOINTMENT'] +8555-284447-0004-2303: hyp=['AS', 'SOON', 'AS', 'THEY', 'ENTERED', 'THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'THE', 'BOOLOOROO', 'GAVE', 'A', 'YELL', 'OF', 'DISAPPOINTMENT'] +8555-284447-0005-2304: ref=['THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'WAS', 'HIGH', 'AND', 'BIG', 'AND', 'AROUND', 'IT', 'RAN', 'ROWS', 'OF', 'BENCHES', 'FOR', 'THE', 'SPECTATORS', 'TO', 'SIT', 'UPON'] +8555-284447-0005-2304: hyp=['THE', 'ROOM', 'OF', 'THE', 'GREAT', 'KNIFE', 'WAS', 'HIGH', 'AND', 'BIG', 'AND', 'AROUND', 'IT', 'RAN', 'ROWS', 'OF', 'BENCHES', 'FOR', 'THE', 'SPECTATORS', 'TO', 'SIT', 'UPON'] +8555-284447-0006-2305: ref=['IN', 'ONE', 'PLACE', 'AT', 'THE', 'HEAD', 'OF', 'THE', 'ROOM', 'WAS', 'A', 'RAISED', 'PLATFORM', 'FOR', 'THE', 'ROYAL', 'FAMILY', 'WITH', 'ELEGANT', 'THRONE', 'CHAIRS', 'FOR', 'THE', 'KING', 'AND', 'QUEEN', 'AND', 'SIX', 'SMALLER', 'BUT', 'RICHLY', 'UPHOLSTERED', 'CHAIRS', 'FOR', 'THE', 'SNUBNOSED', 'PRINCESSES'] +8555-284447-0006-2305: hyp=['IN', 'ONE', 'PLACE', 'AT', 'THE', 'HEAD', 'OF', 'THE', 'ROOM', 'WAS', 'A', 'RAISED', 'PLATFORM', 'FOR', 'THE', 'ROYAL', 'FAMILY', 'WITH', 'ELEGANT', 'THRONE', 'CHAIRS', 'FOR', 'THE', 'KING', 'AND', 'QUEEN', 'AND', 'SIX', 'SMALLER', 'BUT', 'RICHLY', 'UPHOLSTERED', 'CHAIRS', 'FOR', 'THE', 'SNUBNOSED', 'PRINCESSES'] +8555-284447-0007-2306: ref=['THEREFORE', 'HER', 'MAJESTY', 'PAID', 'NO', 'ATTENTION', 'TO', 'ANYONE', 'AND', 'NO', 'ONE', 'PAID', 'ANY', 'ATTENTION', 'TO', 'HER'] +8555-284447-0007-2306: hyp=['THEREFORE', 'HER', 'MAJESTY', 'PAID', 'NO', 'ATTENTION', 'TO', 'ANY', 'ONE', 'AND', 'NO', 'ONE', 'PAID', 'ANY', 'ATTENTION', 'TO', 'HER'] +8555-284447-0008-2307: ref=['RICH', 'JEWELS', 'OF', 'BLUE', 'STONES', 'GLITTERED', 'UPON', 'THEIR', 'PERSONS', 'AND', 'THE', 'ROYAL', 'LADIES', 'WERE', 'FULLY', 'AS', 'GORGEOUS', 'AS', 'THEY', 'WERE', 'HAUGHTY', 'AND', 'OVERBEARING'] +8555-284447-0008-2307: hyp=['RICH', 'JEWELS', 'OF', 'BLUE', 'STONES', 'GLITTERED', 'UPON', 'THEIR', 'PERSONS', 'AND', 'THE', 'ROYAL', 'LADIES', 'WERE', 'FULLY', 'AS', 'GORGEOUS', 'AS', 'THEY', 'WERE', 'HALTING', 'AND', 'OVERBEARING'] +8555-284447-0009-2308: ref=['MORNIN', 'GIRLS', 'HOPE', 'YE', 'FEEL', 'AS', 'WELL', 'AS', 'YE', 'LOOK'] +8555-284447-0009-2308: hyp=['MORNING', 'GIRLS', 'OPIEVILLE', 'AS', 'WELL', 'AS', 'YE', 'LOOK'] +8555-284447-0010-2309: ref=['CONTROL', 'YOURSELVES', 'MY', 'DEARS', 'REPLIED', 'THE', 'BOOLOOROO', 'THE', 'WORST', 'PUNISHMENT', 'I', 'KNOW', 'HOW', 'TO', 'INFLICT', 'ON', 'ANYONE', 'THIS', 'PRISONER', 'IS', 'ABOUT', 'TO', 'SUFFER', "YOU'LL", 'SEE', 'A', 'VERY', 'PRETTY', 'PATCHING', 'MY', 'ROYAL', 'DAUGHTERS'] +8555-284447-0010-2309: hyp=['CONTROL', 'YOURSELVES', 'MY', 'DEARS', 'REPLIED', 'THE', 'BOOLOOROO', 'THE', 'WORST', 'PUNISHMENT', 'I', 'KNOW', 'HOW', 'TO', 'INFLICT', 'ON', 'ANY', 'ONE', 'THIS', 'PRISONER', 'IS', 'ABOUT', 'TO', 'SUFFER', 'YOU', 'WILL', 'SEE', 'A', 'VERY', 'PRETTY', 'PATCHING', 'MY', 'ROYAL', 'DAUGHTERS'] +8555-284447-0011-2310: ref=['SUPPOSE', "IT'S", 'A', 'FRIEND'] +8555-284447-0011-2310: hyp=['SUPPOSE', "IT'S", 'OF', 'BRAND'] +8555-284447-0012-2311: ref=['THE', 'CAPTAIN', 'SHOOK', 'HIS', 'HEAD'] +8555-284447-0012-2311: hyp=['THE', 'CAPTAIN', 'SHOOK', 'HIS', 'HEAD'] +8555-284447-0013-2312: ref=['WHY', 'YOU', 'SAID', 'TO', 'FETCH', 'THE', 'FIRST', 'LIVING', 'CREATURE', 'WE', 'MET', 'AND', 'THAT', 'WAS', 'THIS', 'BILLYGOAT', 'REPLIED', 'THE', 'CAPTAIN', 'PANTING', 'HARD', 'AS', 'HE', 'HELD', 'FAST', 'TO', 'ONE', 'OF', 'THE', "GOAT'S", 'HORNS'] +8555-284447-0013-2312: hyp=['WHY', 'YOU', 'SIT', 'TO', 'FETCH', 'THE', 'FIRST', 'LIVING', 'CREATURE', 'WE', 'MET', 'AND', 'THAT', 'WAS', 'THE', 'SPILLY', 'GOAT', 'REPLIED', 'THE', 'CAPTAIN', 'PANTING', 'HARD', 'AS', 'HE', 'HELD', 'FAST', 'TO', 'ONE', 'OF', 'THE', "GOAT'S", 'HORNS'] +8555-284447-0014-2313: ref=['THE', 'IDEA', 'OF', 'PATCHING', "CAP'N", 'BILL', 'TO', 'A', 'GOAT', 'WAS', 'VASTLY', 'AMUSING', 'TO', 'HIM', 'AND', 'THE', 'MORE', 'HE', 'THOUGHT', 'OF', 'IT', 'THE', 'MORE', 'HE', 'ROARED', 'WITH', 'LAUGHTER'] +8555-284447-0014-2313: hyp=['THE', 'IDEA', 'OF', 'PATCHING', "CAP'N", 'BILL', 'TO', 'A', 'GOAT', 'WAS', 'VASTLY', 'AMUSING', 'TO', 'HIM', 'AND', 'THE', 'MORE', 'HE', 'THOUGHT', 'OF', 'IT', 'THE', 'MORE', 'HE', 'ROARED', 'WITH', 'LAUGHTER'] +8555-284447-0015-2314: ref=['THEY', 'LOOK', 'SOMETHING', 'ALIKE', 'YOU', 'KNOW', 'SUGGESTED', 'THE', 'CAPTAIN', 'OF', 'THE', 'GUARDS', 'LOOKING', 'FROM', 'ONE', 'TO', 'THE', 'OTHER', 'DOUBTFULLY', 'AND', "THEY'RE", 'NEARLY', 'THE', 'SAME', 'SIZE', 'IF', 'YOU', 'STAND', 'THE', 'GOAT', 'ON', 'HIS', 'HIND', 'LEGS', "THEY'VE", 'BOTH', 'GOT', 'THE', 'SAME', 'STYLE', 'OF', 'WHISKERS', 'AND', "THEY'RE", 'BOTH', 'OF', 'EM', 'OBSTINATE', 'AND', 'DANGEROUS', 'SO', 'THEY', 'OUGHT', 'TO', 'MAKE', 'A', 'GOOD', 'PATCH', 'SPLENDID'] +8555-284447-0015-2314: hyp=['THEY', 'LOOK', 'SOMETHING', 'ALIKE', 'YOU', 'KNOW', 'SUGGESTED', 'THE', 'CAPTAIN', 'OF', 'THE', 'GUARDS', 'LOOKING', 'FROM', 'ONE', 'TO', 'THE', 'OTHER', 'DOUBTFULLY', 'AND', "THEY'RE", 'NEARLY', 'THE', 'SAME', 'SIZE', 'IF', 'HE', 'STAND', 'A', 'BOAT', 'ON', 'HIS', 'HIND', 'LEGS', "THEY'VE", 'BOTH', 'GOT', 'THE', 'SAME', 'STYLE', 'OF', 'WHISKERS', 'AND', "THEY'RE", 'BOTH', 'OF', 'THEM', 'OBSTINATE', 'AND', 'DANGEROUS', 'SO', 'THEY', 'OUGHT', 'TO', 'MAKE', 'A', 'GOOD', 'PATCH', 'SPLENDID'] +8555-284447-0016-2315: ref=['FINE', 'GLORIOUS'] +8555-284447-0016-2315: hyp=['FINE', 'GLORIOUS'] +8555-284447-0017-2316: ref=['WHEN', 'THIS', 'HAD', 'BEEN', 'ACCOMPLISHED', 'THE', 'BOOLOOROO', 'LEANED', 'OVER', 'TO', 'TRY', 'TO', 'DISCOVER', 'WHY', 'THE', 'FRAME', 'ROLLED', 'AWAY', 'SEEMINGLY', 'OF', 'ITS', 'OWN', 'ACCORD', 'AND', 'HE', 'WAS', 'THE', 'MORE', 'PUZZLED', 'BECAUSE', 'IT', 'HAD', 'NEVER', 'DONE', 'SUCH', 'A', 'THING', 'BEFORE'] +8555-284447-0017-2316: hyp=['WHEN', 'THIS', 'HAD', 'BEEN', 'ACCOMPLISHED', 'THE', 'BOOLOOROO', 'LEANED', 'OVER', 'TO', 'TRY', 'TO', 'DISCOVER', 'WHY', 'THE', 'FRAME', 'ROLLED', 'AWAY', 'SEEMINGLY', 'OF', 'ITS', 'OWN', 'ACCORD', 'AND', 'HE', 'WAS', 'THE', 'MORE', 'PUZZLED', 'BECAUSE', 'IT', 'HAD', 'NEVER', 'DONE', 'SUCH', 'A', 'THING', 'BEFORE'] +8555-284447-0018-2317: ref=['AT', 'ONCE', 'THE', 'GOAT', 'GAVE', 'A', 'LEAP', 'ESCAPED', 'FROM', 'THE', 'SOLDIERS', 'AND', 'WITH', 'BOWED', 'HEAD', 'RUSHED', 'UPON', 'THE', 'BOOLOOROO'] +8555-284447-0018-2317: hyp=['AT', 'ONCE', 'THE', 'GOAT', 'GAVE', 'A', 'LEAP', 'ESCAPE', 'FROM', 'THE', 'SOLDIERS', 'AND', 'WITH', 'BOWED', 'HEAD', 'RUSHED', 'UPON', 'THE', 'BOOLOOROO'] +8555-284447-0019-2318: ref=['BEFORE', 'ANY', 'COULD', 'STOP', 'HIM', 'HE', 'BUTTED', 'HIS', 'MAJESTY', 'SO', 'FURIOUSLY', 'THAT', 'THE', 'KING', 'SOARED', 'FAR', 'INTO', 'THE', 'AIR', 'AND', 'TUMBLED', 'IN', 'A', 'HEAP', 'AMONG', 'THE', 'BENCHES', 'WHERE', 'HE', 'LAY', 'MOANING', 'AND', 'GROANING'] +8555-284447-0019-2318: hyp=['BEFORE', 'ANY', 'COULD', 'STOP', 'HIM', 'HE', 'BUTTED', 'HIS', 'MAJESTY', 'SO', 'FURIOUSLY', 'THAT', 'THE', 'KING', 'SOARED', 'FAR', 'INTO', 'THE', 'AIR', 'AND', 'TUMBLED', 'IN', 'A', 'HEAP', 'AMONG', 'THE', 'BENCHES', 'WHERE', 'HE', 'LAY', 'MOANING', 'AND', 'GROANING'] +8555-284447-0020-2319: ref=['THE', "GOAT'S", 'WARLIKE', 'SPIRIT', 'WAS', 'ROUSED', 'BY', 'THIS', 'SUCCESSFUL', 'ATTACK'] +8555-284447-0020-2319: hyp=['THE', 'GOATS', 'WORE', 'LIKE', 'SPIRIT', 'WAS', 'ROUSED', 'BY', 'THIS', 'SUCCESSFUL', 'ATTACK'] +8555-284447-0021-2320: ref=['THEN', 'THEY', 'SPED', 'IN', 'GREAT', 'HASTE', 'FOR', 'THE', 'DOOR', 'AND', 'THE', 'GOAT', 'GAVE', 'A', 'FINAL', 'BUTT', 'THAT', 'SENT', 'THE', 'ROW', 'OF', 'ROYAL', 'LADIES', 'ALL', 'DIVING', 'INTO', 'THE', 'CORRIDOR', 'IN', 'ANOTHER', 'TANGLE', 'WHEREUPON', 'THEY', 'SHRIEKED', 'IN', 'A', 'MANNER', 'THAT', 'TERRIFIED', 'EVERYONE', 'WITHIN', 'SOUND', 'OF', 'THEIR', 'VOICES'] +8555-284447-0021-2320: hyp=['THEN', 'THEY', 'SPED', 'IN', 'GREAT', 'HASTE', 'FOR', 'THE', 'DOOR', 'AND', 'THE', 'GOAT', 'GAVE', 'A', 'FINAL', 'BUT', 'THAT', 'SENT', 'A', 'ROW', 'OF', 'ROYAL', 'LADIES', 'ALL', 'DIVING', 'INTO', 'THE', 'CORRIDOR', 'IN', 'ANOTHER', 'TANGLE', 'WHEREUPON', 'THEY', 'SHRIEKED', 'IN', 'A', 'MANNER', 'THAT', 'TERRIFIED', 'EVERYONE', 'WITHIN', 'SOUND', 'OF', 'THEIR', 'VOICES'] +8555-284447-0022-2321: ref=['I', 'HAD', 'A', 'NOTION', 'IT', 'WAS', 'YOU', 'MATE', 'AS', 'SAVED', 'ME', 'FROM', 'THE', 'KNIFE'] +8555-284447-0022-2321: hyp=['I', 'HAD', 'A', 'NOTION', 'IT', 'WAS', 'YOU', 'MADE', 'TO', 'SEE', 'ME', 'FROM', 'THE', 'KNIFE'] +8555-284447-0023-2322: ref=['I', "COULDN'T", 'SHIVER', 'MUCH', 'BEIN', 'BOUND', 'SO', 'TIGHT', 'BUT', 'WHEN', "I'M", 'LOOSE', 'I', 'MEAN', 'TO', 'HAVE', 'JUS', 'ONE', 'GOOD', 'SHIVER', 'TO', 'RELIEVE', 'MY', "FEELIN'S"] +8555-284447-0023-2322: hyp=['I', "COULDN'", 'SHIVER', 'MUCH', 'BEING', 'BOUND', 'SO', 'TIGHT', 'BUT', 'WHEN', "I'M", 'LOOSE', 'I', 'MEAN', 'TO', 'HAVE', 'JUST', 'SWUNG', 'GOOD', 'SHIVER', 'TO', 'RELIEVE', 'MY', 'FEELINS'] +8555-284447-0024-2323: ref=['COME', 'AND', 'GET', 'THE', 'BOOLOOROO', 'SHE', 'SAID', 'GOING', 'TOWARD', 'THE', 'BENCHES'] +8555-284447-0024-2323: hyp=['COME', 'AND', 'GET', 'THE', 'BOOLOOROO', 'SHE', 'SAID', 'GOING', 'TOWARD', 'THE', 'BENCHES'] +8555-284449-0000-2324: ref=['SO', 'THEY', 'WERE', 'QUITE', 'WILLING', 'TO', 'OBEY', 'THE', 'ORDERS', 'OF', 'THEIR', 'GIRL', 'QUEEN', 'AND', 'IN', 'A', 'SHORT', 'TIME', 'THE', 'BLASTS', 'OF', 'TRUMPETS', 'AND', 'ROLL', 'OF', 'DRUMS', 'AND', 'CLASHING', 'OF', 'CYMBALS', 'TOLD', 'TROT', 'AND', "CAP'N", 'BILL', 'THAT', 'THE', 'BLUE', 'BANDS', 'HAD', 'ASSEMBLED', 'BEFORE', 'THE', 'PALACE'] +8555-284449-0000-2324: hyp=['SO', 'THEY', 'WERE', 'QUITE', 'WILLING', 'TO', 'OBEY', 'THE', 'ORDERS', 'OF', 'THEIR', 'GIRL', 'QUEEN', 'AND', 'IN', 'A', 'SHORT', 'TIME', 'THE', 'BLAST', 'OF', 'TRUMPETS', 'AND', 'ROLL', 'OF', 'DRUMS', 'AND', 'CLASHING', 'OF', 'CYMBALS', 'TOLD', 'TROT', 'AND', "CAP'N", 'BILL', 'THAT', 'THE', 'BLUE', 'BANDS', 'HAD', 'A', 'SIMPLED', 'BEFORE', 'THE', 'PALACE'] +8555-284449-0001-2325: ref=['THEN', 'THEY', 'ALL', 'MARCHED', 'OUT', 'A', 'LITTLE', 'WAY', 'INTO', 'THE', 'FIELDS', 'AND', 'FOUND', 'THAT', 'THE', 'ARMY', 'OF', 'PINKIES', 'HAD', 'ALREADY', 'FORMED', 'AND', 'WAS', 'ADVANCING', 'STEADILY', 'TOWARD', 'THEM'] +8555-284449-0001-2325: hyp=['THEN', 'THEY', 'ALL', 'MARCHED', 'OUT', 'A', 'LITTLE', 'WAY', 'INTO', 'THE', 'FIELDS', 'AND', 'FOUND', 'THAT', 'THE', 'ARMY', 'OF', 'PINKIES', 'HAD', 'ALREADY', 'FORMED', 'AND', 'WAS', 'ADVANCING', 'STEADILY', 'TOWARD', 'THEM'] +8555-284449-0002-2326: ref=['AT', 'THE', 'HEAD', 'OF', 'THE', 'PINKIES', 'WERE', 'GHIP', 'GHISIZZLE', 'AND', 'BUTTON', 'BRIGHT', 'WHO', 'HAD', 'THE', 'PARROT', 'ON', 'HIS', 'SHOULDER', 'AND', 'THEY', 'WERE', 'SUPPORTED', 'BY', 'CAPTAIN', 'CORALIE', 'AND', 'CAPTAIN', 'TINTINT', 'AND', 'ROSALIE', 'THE', 'WITCH'] +8555-284449-0002-2326: hyp=['AT', 'THE', 'HEAD', 'OF', 'THE', 'PINKIES', 'WERE', 'GHIP', 'GHISIZZLE', 'AND', 'BUTTON', 'BRIGHT', 'WHO', 'HAD', 'THE', 'PARROT', 'ON', 'HIS', 'SHOULDER', 'AND', 'THEY', 'WERE', 'SUPPORTED', 'BY', 'CAPTAIN', 'CORLEY', 'AND', 'CAPTAIN', 'TINTANT', 'AND', 'ROSALIE', 'THE', 'WITCH'] +8555-284449-0003-2327: ref=['WHEN', 'THE', 'BLUESKINS', 'SAW', 'GHIP', 'GHISIZZLE', 'THEY', 'RAISED', 'ANOTHER', 'GREAT', 'SHOUT', 'FOR', 'HE', 'WAS', 'THE', 'FAVORITE', 'OF', 'THE', 'SOLDIERS', 'AND', 'VERY', 'POPULAR', 'WITH', 'ALL', 'THE', 'PEOPLE'] +8555-284449-0003-2327: hyp=['WHEN', 'THE', 'BLUESKIN', 'SAW', 'GHIP', 'GHISIZZLE', 'THEY', 'RAISED', 'ANOTHER', 'GREAT', 'SHOUT', 'FOR', 'HE', 'WAS', 'THE', 'FAVOURITE', 'OF', 'THE', 'SOLDIERS', 'AND', 'VERY', 'POPULAR', 'WITH', 'ALL', 'THE', 'PEOPLE'] +8555-284449-0004-2328: ref=['SINCE', 'LAST', 'THURSDAY', 'I', 'GHIP', 'GHISIZZLE', 'HAVE', 'BEEN', 'THE', 'LAWFUL', 'BOOLOOROO', 'OF', 'THE', 'BLUE', 'COUNTRY', 'BUT', 'NOW', 'THAT', 'YOU', 'ARE', 'CONQUERED', 'BY', 'QUEEN', 'TROT', 'I', 'SUPPOSE', 'I', 'AM', 'CONQUERED', 'TOO', 'AND', 'YOU', 'HAVE', 'NO', 'BOOLOOROO', 'AT', 'ALL'] +8555-284449-0004-2328: hyp=['SINCE', 'LAST', 'THURSDAY', 'I', 'GHISIZZLE', 'HAVE', 'BEEN', 'THE', 'LAWFUL', 'BOOLOOROO', 'OF', 'THE', 'BLUE', 'COUNTRY', 'BUT', 'NOW', 'THAT', 'YOU', 'ARE', 'CONQUERED', 'BY', 'QUEEN', 'TROT', 'I', 'SUPPOSE', 'I', 'AM', 'CONQUERED', 'TOO', 'AND', 'YOU', 'HAVE', 'NO', 'BOOLOOROO', 'AT', 'ALL'] +8555-284449-0005-2329: ref=['WHEN', 'HE', 'FINISHED', 'SHE', 'SAID', 'CHEERFULLY'] +8555-284449-0005-2329: hyp=['WHEN', 'HE', 'FINISHED', 'SHE', 'SAID', 'CHEERFULLY'] +8555-284449-0006-2330: ref=["DON'T", 'WORRY', 'SIZZLE', 'DEAR', "IT'LL", 'ALL', 'COME', 'RIGHT', 'PRETTY', 'SOON'] +8555-284449-0006-2330: hyp=["DON'T", 'WORRY', 'SIZZLE', 'DEAR', 'IT', 'ALL', 'COME', 'RIGHT', 'PRETTY', 'SOON'] +8555-284449-0007-2331: ref=['NOW', 'THEN', "LET'S", 'ENTER', 'THE', 'CITY', 'AN', 'ENJOY', 'THE', 'GRAND', 'FEAST', "THAT'S", 'BEING', 'COOKED', "I'M", 'NEARLY', 'STARVED', 'MYSELF', 'FOR', 'THIS', 'CONQUERIN', 'KINGDOMS', 'IS', 'HARD', 'WORK'] +8555-284449-0007-2331: hyp=['NOW', 'THEN', "LET'S", 'ENTER', 'THE', 'CITY', 'AND', 'ENJOY', 'THE', 'GREAT', 'FEAST', 'ITS', 'BEING', 'COOKED', "I'M", 'NEARLY', 'STARVED', 'MYSELF', 'FOR', 'THIS', 'CONQUERING', "KINGDOM'S", 'IS', 'HARD', 'WORK'] +8555-284449-0008-2332: ref=['THEN', 'SHE', 'GAVE', 'ROSALIE', 'BACK', 'HER', 'MAGIC', 'RING', 'THANKING', 'THE', 'KIND', 'WITCH', 'FOR', 'ALL', 'SHE', 'HAD', 'DONE', 'FOR', 'THEM'] +8555-284449-0008-2332: hyp=['THEN', 'SHE', 'GAVE', 'ROSALIE', 'BACK', 'HER', 'MAGIC', 'RING', 'THANKING', 'THE', 'KIND', 'WHICH', 'FOR', 'ALL', 'SHE', 'HAD', 'DONE', 'FOR', 'THEM'] +8555-284449-0009-2333: ref=['YOU', 'ARE', 'MATE', 'REPLIED', 'THE', 'SAILOR'] +8555-284449-0009-2333: hyp=['YOU', 'ARE', 'MATE', 'REPLIED', 'THE', 'SAILOR'] +8555-284449-0010-2334: ref=['IT', 'WILL', 'BE', 'SUCH', 'A', 'SATISFACTION'] +8555-284449-0010-2334: hyp=['IT', 'WILL', 'BE', 'SUCH', 'A', 'SATISFACTION'] +8555-284449-0011-2335: ref=['THE', 'GUARDS', 'HAD', 'A', 'TERRIBLE', 'STRUGGLE', 'WITH', 'THE', 'GOAT', 'WHICH', 'WAS', 'LOOSE', 'IN', 'THE', 'ROOM', 'AND', 'STILL', 'WANTED', 'TO', 'FIGHT', 'BUT', 'FINALLY', 'THEY', 'SUBDUED', 'THE', 'ANIMAL', 'AND', 'THEN', 'THEY', 'TOOK', 'THE', 'BOOLOOROO', 'OUT', 'OF', 'THE', 'FRAME', 'HE', 'WAS', 'TIED', 'IN', 'AND', 'BROUGHT', 'BOTH', 'HIM', 'AND', 'THE', 'GOAT', 'BEFORE', 'QUEEN', 'TROT', 'WHO', 'AWAITED', 'THEM', 'IN', 'THE', 'THRONE', 'ROOM', 'OF', 'THE', 'PALACE'] +8555-284449-0011-2335: hyp=['THE', 'GUARDS', 'HAD', 'A', 'TERRIBLE', 'STRUGGLE', 'WITH', 'THE', 'GOAT', 'WHICH', 'WAS', 'LOOSE', 'IN', 'THE', 'ROOM', 'AND', 'STILL', 'WANTED', 'TO', 'FIGHT', 'BUT', 'FINALLY', 'THEY', 'SUBDUED', 'THE', 'ANIMAL', 'AND', 'THEN', 'THEY', 'TOOK', 'THE', 'BOOLOOROO', 'OUT', 'OF', 'THE', 'FRAME', 'WHOSE', 'TIED', 'IN', 'AND', 'BROUGHT', 'BOTH', 'HIM', 'AND', 'THE', 'GOAT', 'BEFORE', 'QUEEN', 'TROT', 'WHO', 'AWAITED', 'THEM', 'IN', 'THE', 'THRONE', 'ROOM', 'OF', 'THE', 'PALACE'] +8555-284449-0012-2336: ref=["I'LL", 'GLADLY', 'DO', 'THAT', 'PROMISED', 'THE', 'NEW', 'BOOLOOROO', 'AND', "I'LL", 'FEED', 'THE', 'HONORABLE', 'GOAT', 'ALL', 'THE', 'SHAVINGS', 'AND', 'LEATHER', 'AND', 'TIN', 'CANS', 'HE', 'CAN', 'EAT', 'BESIDES', 'THE', 'GRASS'] +8555-284449-0012-2336: hyp=['I', 'WILL', 'GLADLY', 'DO', 'THAT', 'PROMISED', 'THE', 'NEW', 'BOOLOOROO', 'AND', "I'LL", 'FEED', 'THE', 'HON', 'GO', 'TO', 'ALL', 'THE', 'SHAVINGS', 'AND', 'LEATHER', 'AND', 'TIN', 'CANS', 'HE', 'CAN', 'EAT', 'BESIDES', 'THE', 'GRASS'] +8555-284449-0013-2337: ref=['SCUSE', 'ME', 'SAID', 'TROT', 'I', 'NEGLECTED', 'TO', 'TELL', 'YOU', 'THAT', "YOU'RE", 'NOT', 'THE', 'BOOLOOROO', 'ANY', 'MORE'] +8555-284449-0013-2337: hyp=['EXCUSE', 'ME', 'SAID', 'TROT', 'I', 'NEGLECTED', 'TO', 'TELL', 'YOU', 'THAT', "YOU'RE", 'NOT', 'THE', 'BOOLOOROO', 'ANY', 'MORE'] +8555-284449-0014-2338: ref=['THE', 'FORMER', 'BOOLOOROO', 'GROANED'] +8555-284449-0014-2338: hyp=['THE', 'FORMER', 'BOOLOOROO', 'GROANED'] +8555-284449-0015-2339: ref=["I'LL", 'NOT', 'BE', 'WICKED', 'ANY', 'MORE', 'SIGHED', 'THE', 'OLD', 'BOOLOOROO', "I'LL", 'REFORM'] +8555-284449-0015-2339: hyp=['HOW', 'NOW', 'BE', 'WICKED', 'ANY', 'MORE', 'SIGHED', 'THE', 'OLD', 'BOOLOOROO', "I'LL", 'REFORM'] +8555-284449-0016-2340: ref=['AS', 'A', 'PRIVATE', 'CITIZEN', 'I', 'SHALL', 'BE', 'A', 'MODEL', 'OF', 'DEPORTMENT', 'BECAUSE', 'IT', 'WOULD', 'BE', 'DANGEROUS', 'TO', 'BE', 'OTHERWISE'] +8555-284449-0016-2340: hyp=['AS', 'A', 'PRIVATE', 'CITIZEN', 'I', 'SHALL', 'BE', 'A', 'MODEL', 'OF', 'DEPORTMENT', 'BECAUSE', 'IT', 'WOULD', 'BE', 'DANGEROUS', 'TO', 'BE', 'OTHERWISE'] +8555-284449-0017-2341: ref=['WHEN', 'FIRST', 'THEY', 'ENTERED', 'THE', 'THRONE', 'ROOM', 'THEY', 'TRIED', 'TO', 'BE', 'AS', 'HAUGHTY', 'AND', 'SCORNFUL', 'AS', 'EVER', 'BUT', 'THE', 'BLUES', 'WHO', 'WERE', 'ASSEMBLED', 'THERE', 'ALL', 'LAUGHED', 'AT', 'THEM', 'AND', 'JEERED', 'THEM', 'FOR', 'THERE', 'WAS', 'NOT', 'A', 'SINGLE', 'PERSON', 'IN', 'ALL', 'THE', 'BLUE', 'COUNTRY', 'WHO', 'LOVED', 'THE', 'PRINCESSES', 'THE', 'LEAST', 'LITTLE', 'BIT'] +8555-284449-0017-2341: hyp=['WHEN', 'FIRST', 'THEY', 'ENTERED', 'THE', 'THRONE', 'ROOM', 'THEY', 'TRIED', 'TO', 'BE', 'AS', 'HAUGHTY', 'AND', 'SCORNFUL', 'AS', 'EVER', 'BUT', 'THE', 'BLUES', 'WHO', 'WERE', 'ASSEMBLED', 'THERE', 'ALL', 'LAUGHED', 'AT', 'THEM', 'AND', 'JEERED', 'THEM', 'FOR', 'THERE', 'WAS', 'NOT', 'A', 'SINGLE', 'PERSON', 'IN', 'ALL', 'THE', 'BLUE', 'COUNTRY', 'WHO', 'LOVED', 'THE', 'PRINCESSES', 'THE', 'LEAST', 'LITTLE', 'BIT'] +8555-284449-0018-2342: ref=['SO', 'GHIP', 'GHISIZZLE', 'ORDERED', 'THE', 'CAPTAIN', 'TO', 'TAKE', 'A', 'FILE', 'OF', 'SOLDIERS', 'AND', 'ESCORT', 'THE', 'RAVING', 'BEAUTIES', 'TO', 'THEIR', 'NEW', 'HOME'] +8555-284449-0018-2342: hyp=['SO', 'GHIP', 'GHISIZZLE', 'ORDERED', 'THE', 'CAPTAIN', 'TO', 'TAKE', 'A', 'FILE', 'OF', 'SOLDIERS', 'AND', 'ESCORT', 'THE', 'RAVING', 'BEAUTIES', 'TO', 'THEIR', 'NEW', 'HOME'] +8555-284449-0019-2343: ref=['THAT', 'EVENING', 'TROT', 'GAVE', 'A', 'GRAND', 'BALL', 'IN', 'THE', 'PALACE', 'TO', 'WHICH', 'THE', 'MOST', 'IMPORTANT', 'OF', 'THE', 'PINKIES', 'AND', 'THE', 'BLUESKINS', 'WERE', 'INVITED'] +8555-284449-0019-2343: hyp=['THAT', 'EVENING', 'TROT', 'GAVE', 'A', 'GRAND', 'BALL', 'IN', 'THE', 'PALACE', 'TO', 'WHICH', 'THE', 'MOST', 'IMPORTANT', 'OF', 'THE', 'PINKIES', 'IN', 'THE', 'BLUESKINS', 'WERE', 'INVITED'] +8555-284449-0020-2344: ref=['THE', 'COMBINED', 'BANDS', 'OF', 'BOTH', 'THE', 'COUNTRIES', 'PLAYED', 'THE', 'MUSIC', 'AND', 'A', 'FINE', 'SUPPER', 'WAS', 'SERVED'] +8555-284449-0020-2344: hyp=['THE', 'COMBINED', 'BANDS', 'OF', 'BOTH', 'THE', 'COUNTRIES', 'PLAYED', 'THE', 'MUSIC', 'AND', 'A', 'FINE', 'SUPPER', 'WAS', 'SERVED'] +8555-292519-0000-2283: ref=['BRIGHTER', 'THAN', 'EARLY', "DAWN'S", 'MOST', 'BRILLIANT', 'DYE', 'ARE', 'BLOWN', 'CLEAR', 'BANDS', 'OF', 'COLOR', 'THROUGH', 'THE', 'SKY', 'THAT', 'SWIRL', 'AND', 'SWEEP', 'AND', 'MEET', 'TO', 'BREAK', 'AND', 'FOAM', 'LIKE', 'RAINBOW', 'VEILS', 'UPON', 'A', "BUBBLE'S", 'DOME'] +8555-292519-0000-2283: hyp=['BRIGHTER', 'THAN', 'EARLY', 'DAWNS', 'MOST', 'BRILLIANT', 'DYE', 'ARE', 'BLOWN', 'CLEAR', 'BANDS', 'OF', 'COLOUR', 'THROUGH', 'THE', 'SKY', 'THAT', 'SWIRL', 'AND', 'SWEEP', 'AND', 'MEET', 'TO', 'BREAK', 'AND', 'FOAM', 'LIKE', 'RAINBOW', 'VEILS', 'UPON', 'A', "BUBBLE'S", 'DOME'] +8555-292519-0001-2284: ref=['GUIDED', 'BY', 'YOU', 'HOW', 'WE', 'MIGHT', 'STROLL', 'TOWARDS', 'DEATH', 'OUR', 'ONLY', 'MUSIC', 'ONE', "ANOTHER'S", 'BREATH', 'THROUGH', 'GARDENS', 'INTIMATE', 'WITH', 'HOLLYHOCKS', 'WHERE', 'SILENT', 'POPPIES', 'BURN', 'BETWEEN', 'THE', 'ROCKS', 'BY', 'POOLS', 'WHERE', 'BIRCHES', 'BEND', 'TO', 'CONFIDANTS', 'ABOVE', 'GREEN', 'WATERS', 'SCUMMED', 'WITH', 'LILY', 'PLANTS'] +8555-292519-0001-2284: hyp=['GUIDED', 'BY', 'YOU', 'HOW', 'WE', 'MIGHT', 'STROLL', 'TOWARDS', 'DEATH', 'OUR', 'ONLY', 'MUSIC', 'ONE', "ANOTHER'S", 'BREATH', 'THROUGH', 'GARDENS', 'INTIMATE', 'WITH', 'HOLLYHOCKS', 'WHERE', 'SILENT', 'POPPIES', 'BURNED', 'BETWEEN', 'THE', 'ROCKS', 'BY', 'POOLS', 'WHERE', 'BIRCHES', 'BEND', 'TO', 'CONFIDANTS', 'ABOVE', 'GREEN', 'WATERS', 'SCUMBED', 'WITH', 'A', 'LILY', 'PLANTS'] +8555-292519-0002-2285: ref=['VENICE'] +8555-292519-0002-2285: hyp=['VENICE'] +8555-292519-0003-2286: ref=['IN', 'A', 'SUNSET', 'GLOWING', 'OF', 'CRIMSON', 'AND', 'GOLD', 'SHE', 'LIES', 'THE', 'GLORY', 'OF', 'THE', 'WORLD', 'A', 'BEACHED', "KING'S", 'GALLEY', 'WHOSE', 'SAILS', 'ARE', 'FURLED', 'WHO', 'IS', 'HUNG', 'WITH', 'TAPESTRIES', 'RICH', 'AND', 'OLD'] +8555-292519-0003-2286: hyp=['IN', 'A', 'SUNSET', 'GLOWING', 'OF', 'CRIMSON', 'AND', 'GOLD', 'SHE', 'LIES', 'THE', 'GLORY', 'OF', 'THE', 'WORLD', 'A', 'BEECHED', "KING'S", 'GALLEY', 'WHO', 'SAILS', 'ARE', 'FURLED', 'WHO', 'IS', 'HUNG', 'WITH', 'TAPESTRIES', 'RICH', 'AND', 'OLD'] +8555-292519-0004-2287: ref=['THE', 'PITY', 'THAT', 'WE', 'MUST', 'COME', 'AND', 'GO'] +8555-292519-0004-2287: hyp=['THE', 'PITY', 'THAT', 'WE', 'MUST', 'COME', 'AND', 'GO'] +8555-292519-0005-2288: ref=['WHILE', 'THE', 'OLD', 'GOLD', 'AND', 'THE', 'MARBLE', 'STAYS', 'FOREVER', 'GLEAMING', 'ITS', 'SOFT', 'STRONG', 'BLAZE', 'CALM', 'IN', 'THE', 'EARLY', 'EVENING', 'GLOW'] +8555-292519-0005-2288: hyp=['WHILE', 'THE', 'OLD', 'GOLD', 'AND', 'THE', 'MARBLE', 'STAYS', 'FOR', 'EVER', 'GLEAMING', 'ITS', 'SOFT', 'STRONG', 'BLAZE', 'CALM', 'IN', 'THE', 'EARLY', 'EVENING', 'GLOW'] +8555-292519-0006-2289: ref=['THE', 'PLEASANT', 'GRAVEYARD', 'OF', 'MY', 'SOUL', 'WITH', 'SENTIMENTAL', 'CYPRESS', 'TREES', 'AND', 'FLOWERS', 'IS', 'FILLED', 'THAT', 'I', 'MAY', 'STROLL', 'IN', 'MEDITATION', 'AT', 'MY', 'EASE'] +8555-292519-0006-2289: hyp=['THE', 'PLEASANT', 'GRAVEYARD', 'OF', 'MY', 'SOUL', 'WITH', 'SENTIMENTAL', 'CYPRESS', 'TREES', 'AND', 'FLOWERS', 'IS', 'FILLED', 'THAT', 'I', 'MAY', 'STROLL', 'IN', 'MEDITATION', 'AT', 'MY', 'EASE'] +8555-292519-0007-2290: ref=['IT', 'IS', 'MY', 'HEART', 'HUNG', 'IN', 'THE', 'SKY', 'AND', 'NO', 'CLOUDS', 'EVER', 'FLOAT', 'BETWEEN', 'THE', 'GRAVE', 'FLOWERS', 'AND', 'MY', 'HEART', 'ON', 'HIGH'] +8555-292519-0007-2290: hyp=['IT', 'IS', 'MY', 'HEART', 'HUNG', 'IN', 'THE', 'SKY', 'AND', 'NO', 'CLOUDS', 'EVER', 'FLOAT', 'BETWEEN', 'THE', 'GRAY', 'FLOWERS', 'AND', 'MY', 'HEART', 'ON', 'HIGH'] +8555-292519-0008-2291: ref=['OVER', 'THE', 'TRACK', 'LINED', 'CITY', 'STREET', 'THE', 'YOUNG', 'MEN', 'THE', 'GRINNING', 'MEN', 'PASS'] +8555-292519-0008-2291: hyp=['OVER', 'THE', 'TRACK', 'LINED', 'CITY', 'STREET', 'THE', 'YOUNG', 'MAN', 'THE', 'GRINNING', 'MEN', 'PASS'] +8555-292519-0009-2292: ref=['HO', 'YE', 'SAILS', 'THAT', 'SEEM', 'TO', 'WANDER', 'IN', 'DREAM', 'FILLED', 'MEADOWS', 'SAY', 'IS', 'THE', 'SHORE', 'WHERE', 'I', 'STAND', 'THE', 'ONLY', 'FIELD', 'OF', 'STRUGGLE', 'OR', 'ARE', 'YE', 'HIT', 'AND', 'BATTERED', 'OUT', 'THERE', 'BY', 'WAVES', 'AND', 'WIND', 'GUSTS', 'AS', 'YE', 'TACK', 'OVER', 'A', 'CLASHING', 'SEA', 'OF', 'WATERY', 'ECHOES'] +8555-292519-0009-2292: hyp=['HOME', 'YE', 'SAILS', 'THAT', 'SEEM', 'TO', 'WONDER', 'AND', 'DREAM', 'FILLED', 'MEADOWS', 'SAY', 'IS', 'THE', 'SHORE', 'WHERE', 'I', 'STAND', 'THE', 'ONLY', 'FIELD', 'OF', 'STRUGGLE', 'OR', 'ARE', 'YE', 'HIT', 'AND', 'BATTERED', 'OUT', 'THERE', 'BY', 'WAVES', 'AND', 'WIND', 'GUSTS', 'AS', 'YE', 'TACK', 'OVER', 'A', 'CLASHING', 'SEA', 'OF', 'WATERY', 'ECHOES'] +8555-292519-0010-2293: ref=['OLD', 'DANCES', 'ARE', 'SIMPLIFIED', 'OF', 'THEIR', 'YEARNING', 'BLEACHED', 'BY', 'TIME'] +8555-292519-0010-2293: hyp=['OLD', 'DANCES', 'ARE', 'SIMPLIFIED', 'OF', 'THEIR', 'YEARNING', 'BLEACHED', 'BY', 'TIME'] +8555-292519-0011-2294: ref=['HE', 'HAD', 'GOT', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0011-2294: hyp=['HE', 'HAD', 'GOT', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0012-2295: ref=['THROUGH', 'THE', 'BLACK', 'NIGHT', 'RAIN', 'HE', 'SANG', 'TO', 'HER', 'WINDOW', 'BARS'] +8555-292519-0012-2295: hyp=['THROUGH', 'THE', 'BLACK', 'NIGHT', 'RAIN', 'HE', 'SANG', 'TO', 'HER', 'WINDOW', 'BARS'] +8555-292519-0013-2296: ref=['THAT', 'WAS', 'BUT', 'RUSTLING', 'OF', 'DRIPPING', 'PLANTS', 'IN', 'THE', 'DARK'] +8555-292519-0013-2296: hyp=['THAT', 'WAS', 'BUT', 'RUSTLING', 'OF', 'TRIPPING', 'PLANTS', 'IN', 'THE', 'DARK'] +8555-292519-0014-2297: ref=['SHE', 'WAS', 'ALONE', 'THAT', 'NIGHT'] +8555-292519-0014-2297: hyp=['SHE', 'WAS', 'ALONE', 'THAT', 'NIGHT'] +8555-292519-0015-2298: ref=['HE', 'HAD', 'BROKEN', 'INTO', 'HER', 'COURTYARD'] +8555-292519-0015-2298: hyp=['HE', 'HAD', 'BROKEN', 'INTO', 'HER', 'COURTYARD'] +908-157963-0000-1321: ref=['TO', 'FADE', 'AWAY', 'LIKE', 'MORNING', 'BEAUTY', 'FROM', 'HER', 'MORTAL', 'DAY', 'DOWN', 'BY', 'THE', 'RIVER', 'OF', 'ADONA', 'HER', 'SOFT', 'VOICE', 'IS', 'HEARD', 'AND', 'THUS', 'HER', 'GENTLE', 'LAMENTATION', 'FALLS', 'LIKE', 'MORNING', 'DEW'] +908-157963-0000-1321: hyp=['TO', 'FADE', 'AWAY', 'LIKE', 'MORNING', 'BEAUTY', 'FROM', 'HER', 'MORTAL', 'DAY', 'DOWN', 'BY', 'THE', 'RIVER', 'OF', 'ADONNA', 'HER', 'SOFT', 'VOICES', 'HEARD', 'AND', 'THUS', 'HER', 'GENTLE', 'LAMENTATION', 'FALLS', 'LIKE', 'MORNING', 'DEW'] +908-157963-0001-1322: ref=['O', 'LIFE', 'OF', 'THIS', 'OUR', 'SPRING'] +908-157963-0001-1322: hyp=['O', 'LIFE', 'OF', 'THIS', 'OUR', 'SPRING'] +908-157963-0002-1323: ref=['WHY', 'FADES', 'THE', 'LOTUS', 'OF', 'THE', 'WATER'] +908-157963-0002-1323: hyp=['WHY', 'FADES', 'THE', 'LOTUS', 'OF', 'THE', 'WATER'] +908-157963-0003-1324: ref=['WHY', 'FADE', 'THESE', 'CHILDREN', 'OF', 'THE', 'SPRING'] +908-157963-0003-1324: hyp=['WHY', 'FADE', 'THESE', 'CHILDREN', 'OF', 'THE', 'SPRING'] +908-157963-0004-1325: ref=['THEL', 'IS', 'LIKE', 'A', 'WATRY', 'BOW', 'AND', 'LIKE', 'A', 'PARTING', 'CLOUD', 'LIKE', 'A', 'REFLECTION', 'IN', 'A', 'GLASS', 'LIKE', 'SHADOWS', 'IN', 'THE', 'WATER', 'LIKE', 'DREAMS', 'OF', 'INFANTS', 'LIKE', 'A', 'SMILE', 'UPON', 'AN', 'INFANTS', 'FACE'] +908-157963-0004-1325: hyp=['FELL', 'IS', 'LIKE', 'A', 'WATERY', 'BOW', 'AND', 'LIKE', 'A', 'PARTING', 'CLOUD', 'LIKE', 'A', 'REFLECTION', 'IN', 'A', 'GLASS', 'LIKE', 'SHADOWS', 'IN', 'THE', 'WATER', 'LIKE', 'DREAMS', 'OF', 'INFANTS', 'LIKE', 'A', 'SMILE', 'UPON', 'AN', "INFANT'S", 'FACE'] +908-157963-0005-1326: ref=['LIKE', 'THE', 'DOVES', 'VOICE', 'LIKE', 'TRANSIENT', 'DAY', 'LIKE', 'MUSIC', 'IN', 'THE', 'AIR', 'AH'] +908-157963-0005-1326: hyp=['LIKE', 'THE', 'DOVES', 'BOYS', 'LIKE', 'TRANSIENT', 'DAY', 'LIKE', 'MUSIC', 'IN', 'THE', 'AIR', 'AH'] +908-157963-0006-1327: ref=['AND', 'GENTLE', 'SLEEP', 'THE', 'SLEEP', 'OF', 'DEATH', 'AND', 'GENTLY', 'HEAR', 'THE', 'VOICE', 'OF', 'HIM', 'THAT', 'WALKETH', 'IN', 'THE', 'GARDEN', 'IN', 'THE', 'EVENING', 'TIME'] +908-157963-0006-1327: hyp=['AND', 'GENTLE', 'SLEEP', 'THE', 'SLEEP', 'OF', 'DEATH', 'AND', 'GENTLY', 'HEAR', 'THE', 'VOICE', 'OF', 'HIM', 'THAT', 'WALKETH', 'IN', 'THE', 'GARDEN', 'IN', 'THE', 'EVENING', 'TIME'] +908-157963-0007-1328: ref=['THE', 'LILLY', 'OF', 'THE', 'VALLEY', 'BREATHING', 'IN', 'THE', 'HUMBLE', 'GRASS', 'ANSWERD', 'THE', 'LOVELY', 'MAID', 'AND', 'SAID', 'I', 'AM', 'A', 'WATRY', 'WEED', 'AND', 'I', 'AM', 'VERY', 'SMALL', 'AND', 'LOVE', 'TO', 'DWELL', 'IN', 'LOWLY', 'VALES', 'SO', 'WEAK', 'THE', 'GILDED', 'BUTTERFLY', 'SCARCE', 'PERCHES', 'ON', 'MY', 'HEAD', 'YET', 'I', 'AM', 'VISITED', 'FROM', 'HEAVEN', 'AND', 'HE', 'THAT', 'SMILES', 'ON', 'ALL', 'WALKS', 'IN', 'THE', 'VALLEY', 'AND', 'EACH', 'MORN', 'OVER', 'ME', 'SPREADS', 'HIS', 'HAND', 'SAYING', 'REJOICE', 'THOU', 'HUMBLE', 'GRASS', 'THOU', 'NEW', 'BORN', 'LILY', 'FLOWER'] +908-157963-0007-1328: hyp=['THE', 'LILY', 'OF', 'THE', 'VALLEY', 'BREATHING', 'IN', 'THE', 'HUMBLE', 'GRASS', 'ANSWERED', 'THE', 'LOVELY', 'MAIDEN', 'SAID', 'I', 'AM', 'A', 'WATCHERY', 'WEED', 'AND', 'I', 'AM', 'VERY', 'SMALL', 'AND', 'LOVE', 'TO', 'DWELL', 'IN', 'LOWLY', 'VALES', 'SO', 'WEAK', 'THE', 'GILDED', 'BUTTERFLY', 'SCARCE', 'PURCHASE', 'ON', 'MY', 'HEAD', 'YET', 'I', 'AM', 'VISITED', 'FROM', 'HEAVEN', 'AND', 'HE', 'THAT', 'SMILES', 'ON', 'ALL', 'WALKS', 'IN', 'THE', 'VALLEY', 'AND', 'EACH', 'MORN', 'OVER', 'ME', 'SPREADS', 'HIS', 'HAND', 'SAYING', 'REJOICE', 'THOU', 'HUMBLE', 'GRASS', 'THOU', 'NEWBORN', 'LILY', 'FLOWER'] +908-157963-0008-1329: ref=['THOU', 'GENTLE', 'MAID', 'OF', 'SILENT', 'VALLEYS', 'AND', 'OF', 'MODEST', 'BROOKS', 'FOR', 'THOU', 'SHALL', 'BE', 'CLOTHED', 'IN', 'LIGHT', 'AND', 'FED', 'WITH', 'MORNING', 'MANNA', 'TILL', 'SUMMERS', 'HEAT', 'MELTS', 'THEE', 'BESIDE', 'THE', 'FOUNTAINS', 'AND', 'THE', 'SPRINGS', 'TO', 'FLOURISH', 'IN', 'ETERNAL', 'VALES', 'THEY', 'WHY', 'SHOULD', 'THEL', 'COMPLAIN'] +908-157963-0008-1329: hyp=['THOU', 'GENTLE', 'MAID', 'OF', 'SILENT', 'VALLEYS', 'AND', 'OF', 'MODEST', 'BROOKS', 'FOR', 'THOU', 'SHALT', 'BE', 'CLOTHED', 'IN', 'LIGHT', 'AND', 'FED', 'WITH', 'MORNING', 'MANA', 'TILL', "SUMMER'S", 'HEAT', 'MELTS', 'THEE', 'BESIDE', 'THE', 'FOUNTAINS', 'AND', 'THE', 'SPRINGS', 'TO', 'FLOURISH', 'IN', 'ETERNAL', 'VALES', 'THEY', 'WHY', 'SHOULD', 'THOU', 'COMPLAIN'] +908-157963-0009-1330: ref=['WHY', 'SHOULD', 'THE', 'MISTRESS', 'OF', 'THE', 'VALES', 'OF', 'HAR', 'UTTER', 'A', 'SIGH'] +908-157963-0009-1330: hyp=['WHY', 'SHOULD', 'THE', 'MISTRESS', 'OF', 'THE', 'VEILS', 'OF', 'HAR', 'UTTER', 'A', 'SIGH'] +908-157963-0010-1331: ref=['SHE', 'CEASD', 'AND', 'SMILD', 'IN', 'TEARS', 'THEN', 'SAT', 'DOWN', 'IN', 'HER', 'SILVER', 'SHRINE'] +908-157963-0010-1331: hyp=['SHE', 'CEASED', 'AND', 'SMILED', 'IN', 'TEARS', 'THEN', 'SAT', 'DOWN', 'IN', 'HER', 'SILVER', 'SHRINE'] +908-157963-0011-1332: ref=['WHICH', 'THOU', 'DOST', 'SCATTER', 'ON', 'EVERY', 'LITTLE', 'BLADE', 'OF', 'GRASS', 'THAT', 'SPRINGS', 'REVIVES', 'THE', 'MILKED', 'COW', 'AND', 'TAMES', 'THE', 'FIRE', 'BREATHING', 'STEED'] +908-157963-0011-1332: hyp=['WHICH', 'THOU', 'DOST', 'SCATTER', 'ON', 'EVERY', 'LITTLE', 'BLADE', 'OF', 'GRASS', 'THAT', 'SPRINGS', 'REVIVES', 'THE', 'MILKED', 'COW', 'AND', 'TAMES', 'THE', 'FIRE', 'BREATHING', 'STEED'] +908-157963-0012-1333: ref=['BUT', 'THEL', 'IS', 'LIKE', 'A', 'FAINT', 'CLOUD', 'KINDLED', 'AT', 'THE', 'RISING', 'SUN', 'I', 'VANISH', 'FROM', 'MY', 'PEARLY', 'THRONE', 'AND', 'WHO', 'SHALL', 'FIND', 'MY', 'PLACE'] +908-157963-0012-1333: hyp=['BUT', 'THOU', 'IS', 'LIKE', 'A', 'FAINT', 'CLOUD', 'KINDLED', 'AT', 'THE', 'RISING', 'SUN', 'I', 'VANISH', 'FROM', 'MY', 'PEARLY', 'THRONE', 'AND', 'WHO', 'SHALL', 'FIND', 'MY', 'PLACE'] +908-157963-0013-1334: ref=['AND', 'WHY', 'IT', 'SCATTERS', 'ITS', 'BRIGHT', 'BEAUTY', 'THRO', 'THE', 'HUMID', 'AIR'] +908-157963-0013-1334: hyp=['AND', 'WYAT', 'SCATTERS', 'ITS', 'BRIGHT', 'BEAUTY', 'THROUGH', 'THE', 'HUMAN', 'AIR'] +908-157963-0014-1335: ref=['DESCEND', 'O', 'LITTLE', 'CLOUD', 'AND', 'HOVER', 'BEFORE', 'THE', 'EYES', 'OF', 'THEL'] +908-157963-0014-1335: hyp=['DESCEND', 'O', 'A', 'LITTLE', 'CLOUD', 'AND', 'HOVER', 'BEFORE', 'THE', 'EYES', 'OF', 'FELL'] +908-157963-0015-1336: ref=['O', 'LITTLE', 'CLOUD', 'THE', 'VIRGIN', 'SAID', 'I', 'CHARGE', 'THEE', 'TO', 'TELL', 'ME', 'WHY', 'THOU', 'COMPLAINEST', 'NOW', 'WHEN', 'IN', 'ONE', 'HOUR', 'THOU', 'FADE', 'AWAY', 'THEN', 'WE', 'SHALL', 'SEEK', 'THEE', 'BUT', 'NOT', 'FIND', 'AH', 'THEL', 'IS', 'LIKE', 'TO', 'THEE'] +908-157963-0015-1336: hyp=['O', 'LITTLE', 'CLOUD', 'THE', 'VIRGIN', 'SAID', 'I', 'CHARGE', 'THEE', 'TO', 'TELL', 'ME', 'WHY', 'THOU', 'COMPLAINEST', 'NOW', 'WHEN', 'IN', 'ONE', 'HOUR', 'THOU', 'FADE', 'AWAY', 'THEN', 'WE', 'SHALL', 'SEEK', 'THEE', 'BUT', 'NOT', 'FIND', 'AH', 'FELL', 'IS', 'LIKE', 'TO', 'THEE'] +908-157963-0016-1337: ref=['I', 'PASS', 'AWAY', 'YET', 'I', 'COMPLAIN', 'AND', 'NO', 'ONE', 'HEARS', 'MY', 'VOICE'] +908-157963-0016-1337: hyp=['I', 'PASS', 'AWAY', 'YET', 'I', 'COMPLAIN', 'AND', 'NO', 'ONE', 'HEARS', 'MY', 'VOICE'] +908-157963-0017-1338: ref=['THE', 'CLOUD', 'THEN', 'SHEWD', 'HIS', 'GOLDEN', 'HEAD', 'AND', 'HIS', 'BRIGHT', 'FORM', "EMERG'D"] +908-157963-0017-1338: hyp=['THE', 'CLOUD', 'THEN', 'SHOWED', 'HIS', 'GOLDEN', 'HEAD', 'AND', 'HIS', 'BRIGHT', 'FORM', 'EMERGED'] +908-157963-0018-1339: ref=['AND', 'FEAREST', 'THOU', 'BECAUSE', 'I', 'VANISH', 'AND', 'AM', 'SEEN', 'NO', 'MORE'] +908-157963-0018-1339: hyp=['AND', "FEAR'ST", 'THOU', 'BECAUSE', 'I', 'VANISH', 'AND', 'AM', 'SEEN', 'NO', 'MORE'] +908-157963-0019-1340: ref=['IT', 'IS', 'TO', 'TENFOLD', 'LIFE', 'TO', 'LOVE', 'TO', 'PEACE', 'AND', 'RAPTURES', 'HOLY', 'UNSEEN', 'DESCENDING', 'WEIGH', 'MY', 'LIGHT', 'WINGS', 'UPON', 'BALMY', 'FLOWERS', 'AND', 'COURT', 'THE', 'FAIR', 'EYED', 'DEW', 'TO', 'TAKE', 'ME', 'TO', 'HER', 'SHINING', 'TENT', 'THE', 'WEEPING', 'VIRGIN', 'TREMBLING', 'KNEELS', 'BEFORE', 'THE', 'RISEN', 'SUN'] +908-157963-0019-1340: hyp=['IT', 'IS', 'TO', 'TENFOLD', 'LIFE', 'TO', 'LOVE', 'TO', 'PEACE', 'AND', 'RAPTURES', 'WHOLLY', 'UNSEEN', 'DESCENDING', 'WEIGH', 'MY', 'LIGHT', 'WINGS', 'UPON', 'BALMY', 'FLOWERS', 'AND', 'COURT', 'THE', 'FAIR', 'EYED', 'DEW', 'TO', 'TAKE', 'ME', 'TO', 'HER', 'SHINING', 'TENT', 'THE', 'WEEPING', 'VIRGIN', 'TREMBLING', 'KNEELS', 'BEFORE', 'THE', 'RISEN', 'SUN'] +908-157963-0020-1341: ref=['TILL', 'WE', 'ARISE', "LINK'D", 'IN', 'A', 'GOLDEN', 'BAND', 'AND', 'NEVER', 'PART', 'BUT', 'WALK', 'UNITED', 'BEARING', 'FOOD', 'TO', 'ALL', 'OUR', 'TENDER', 'FLOWERS'] +908-157963-0020-1341: hyp=['TILL', 'WE', 'ARISE', 'LINKED', 'IN', 'A', 'GOLDEN', 'BAND', 'AND', 'NEVER', 'PART', 'BUT', 'WALK', 'UNITED', 'BEARING', 'FOOD', 'TO', 'ALL', 'OUR', 'TENDER', 'FLOWERS'] +908-157963-0021-1342: ref=['LIVES', 'NOT', 'ALONE', 'NOR', 'OR', 'ITSELF', 'FEAR', 'NOT', 'AND', 'I', 'WILL', 'CALL', 'THE', 'WEAK', 'WORM', 'FROM', 'ITS', 'LOWLY', 'BED', 'AND', 'THOU', 'SHALT', 'HEAR', 'ITS', 'VOICE'] +908-157963-0021-1342: hyp=['LIVES', 'NOT', 'ALONE', 'NOR', 'OF', 'ITSELF', 'FEAR', 'NOT', 'AND', 'I', 'WILL', 'CALL', 'THE', 'WEAK', 'WORM', 'FROM', 'ITS', 'LOWLY', 'BED', 'AND', 'THOU', 'SHALT', 'HEAR', 'ITS', 'VOICE'] +908-157963-0022-1343: ref=['COME', 'FORTH', 'WORM', 'AND', 'THE', 'SILENT', 'VALLEY', 'TO', 'THY', 'PENSIVE', 'QUEEN'] +908-157963-0022-1343: hyp=['COME', 'FORTH', 'WORM', 'AND', 'THE', 'SILENT', 'VALLEY', 'TO', 'THY', 'PENSIVE', 'QUEEN'] +908-157963-0023-1344: ref=['THE', 'HELPLESS', 'WORM', 'AROSE', 'AND', 'SAT', 'UPON', 'THE', 'LILLYS', 'LEAF', 'AND', 'THE', 'BRIGHT', 'CLOUD', 'SAILD', 'ON', 'TO', 'FIND', 'HIS', 'PARTNER', 'IN', 'THE', 'VALE'] +908-157963-0023-1344: hyp=['THE', 'HELPLESS', 'WORM', 'AROSE', 'AND', 'SAT', 'UPON', 'THE', "LILY'S", 'LEAF', 'AND', 'THE', 'BRIGHT', 'CLOUD', 'SAILED', 'ON', 'TO', 'FIND', 'HIS', 'PARTNER', 'IN', 'THE', 'VALE'] +908-157963-0024-1345: ref=['IMAGE', 'OF', 'WEAKNESS', 'ART', 'THOU', 'BUT', 'A', 'WORM'] +908-157963-0024-1345: hyp=['IMAGE', 'OF', 'WEAKNESS', 'ART', 'THOU', 'BUT', 'A', 'WORM'] +908-157963-0025-1346: ref=['I', 'SEE', 'THEY', 'LAY', 'HELPLESS', 'AND', 'NAKED', 'WEEPING', 'AND', 'NONE', 'TO', 'ANSWER', 'NONE', 'TO', 'CHERISH', 'THEE', 'WITH', 'MOTHERS', 'SMILES'] +908-157963-0025-1346: hyp=['I', 'SEE', 'THEY', 'LAY', 'HELPLESS', 'AND', 'NAKED', 'WEEPING', 'AND', 'NONE', 'TO', 'ANSWER', 'NONE', 'TO', 'CHERISH', 'THEE', 'WITH', "MOTHER'S", 'SMILES'] +908-157963-0026-1347: ref=['AND', 'SAYS', 'THOU', 'MOTHER', 'OF', 'MY', 'CHILDREN', 'I', 'HAVE', 'LOVED', 'THEE', 'AND', 'I', 'HAVE', 'GIVEN', 'THEE', 'A', 'CROWN', 'THAT', 'NONE', 'CAN', 'TAKE', 'AWAY'] +908-157963-0026-1347: hyp=['AND', 'SAYS', 'THOU', 'MOTHER', 'OF', 'MY', 'CHILDREN', 'I', 'HAVE', 'LOVED', 'THEE', 'AND', 'I', 'HAVE', 'GIVEN', 'THEE', 'A', 'CROWN', 'THAT', 'NONE', 'CAN', 'TAKE', 'AWAY'] +908-157963-0027-1348: ref=['AND', 'LAY', 'ME', 'DOWN', 'IN', 'THY', 'COLD', 'BED', 'AND', 'LEAVE', 'MY', 'SHINING', 'LOT'] +908-157963-0027-1348: hyp=['AND', 'LAY', 'ME', 'DOWN', 'IN', 'THY', 'COLD', 'BED', 'AND', 'LEAVE', 'MY', 'SHINING', 'LOT'] +908-157963-0028-1349: ref=['OR', 'AN', 'EYE', 'OF', 'GIFTS', 'AND', 'GRACES', 'SHOWRING', 'FRUITS', 'AND', 'COINED', 'GOLD'] +908-157963-0028-1349: hyp=['OR', 'AN', 'EYE', 'OF', 'GIFTS', 'AND', 'GRACES', 'SHOWERING', 'FRUITS', 'AND', 'COINED', 'GOLD'] +908-157963-0029-1350: ref=['WHY', 'A', 'TONGUE', "IMPRESS'D", 'WITH', 'HONEY', 'FROM', 'EVERY', 'WIND'] +908-157963-0029-1350: hyp=['WHY', 'A', 'TONGUE', 'IMPRESSED', 'WITH', 'HONEY', 'FROM', 'EVERY', 'WIND'] +908-157963-0030-1351: ref=['WHY', 'AN', 'EAR', 'A', 'WHIRLPOOL', 'FIERCE', 'TO', 'DRAW', 'CREATIONS', 'IN'] +908-157963-0030-1351: hyp=['WHY', 'AN', 'EAR', 'A', 'WHIRLPOOL', 'FIERCE', 'TO', 'DRAW', 'CREATIONS', 'IN'] +908-31957-0000-1352: ref=['ALL', 'IS', 'SAID', 'WITHOUT', 'A', 'WORD'] +908-31957-0000-1352: hyp=['ALL', 'IS', 'SAID', 'WITHOUT', 'A', 'WORD'] +908-31957-0001-1353: ref=['I', 'SIT', 'BENEATH', 'THY', 'LOOKS', 'AS', 'CHILDREN', 'DO', 'IN', 'THE', 'NOON', 'SUN', 'WITH', 'SOULS', 'THAT', 'TREMBLE', 'THROUGH', 'THEIR', 'HAPPY', 'EYELIDS', 'FROM', 'AN', 'UNAVERRED', 'YET', 'PRODIGAL', 'INWARD', 'JOY'] +908-31957-0001-1353: hyp=['I', 'SIT', 'BENEATH', 'THY', 'LOOKS', 'AS', 'CHILDREN', 'DO', 'IN', 'THE', 'NOON', 'SUN', 'WITH', 'SOULS', 'THAT', 'TREMBLE', 'THROUGH', 'THEIR', 'HAPPY', 'EYELIDS', 'FROM', 'AN', 'UNAVERRED', 'YET', 'CHRONICAL', 'INWARD', 'JOY'] +908-31957-0002-1354: ref=['I', 'DID', 'NOT', 'WRONG', 'MYSELF', 'SO', 'BUT', 'I', 'PLACED', 'A', 'WRONG', 'ON', 'THEE'] +908-31957-0002-1354: hyp=['I', 'DID', 'NOT', 'WRONG', 'MYSELF', 'SO', 'BUT', 'I', 'PLACED', 'A', 'WRONG', 'ON', 'THEE'] +908-31957-0003-1355: ref=['WHEN', 'CALLED', 'BEFORE', 'I', 'TOLD', 'HOW', 'HASTILY', 'I', 'DROPPED', 'MY', 'FLOWERS', 'OR', 'BRAKE', 'OFF', 'FROM', 'A', 'GAME'] +908-31957-0003-1355: hyp=['WHEN', 'CALLED', 'BEFORE', 'I', 'TOLD', 'HOW', 'HASTILY', 'I', 'DROPPED', 'MY', 'FLOWERS', 'OR', 'BREAK', 'OFF', 'FROM', 'A', 'GAME'] +908-31957-0004-1356: ref=['SHALL', 'I', 'NEVER', 'MISS', 'HOME', 'TALK', 'AND', 'BLESSING', 'AND', 'THE', 'COMMON', 'KISS', 'THAT', 'COMES', 'TO', 'EACH', 'IN', 'TURN', 'NOR', 'COUNT', 'IT', 'STRANGE', 'WHEN', 'I', 'LOOK', 'UP', 'TO', 'DROP', 'ON', 'A', 'NEW', 'RANGE', 'OF', 'WALLS', 'AND', 'FLOORS', 'ANOTHER', 'HOME', 'THAN', 'THIS'] +908-31957-0004-1356: hyp=['SHALL', 'I', 'NEVER', 'MISS', 'HOME', 'TALK', 'AND', 'BLESSING', 'AND', 'THE', 'COMMON', 'KISS', 'THAT', 'COMES', 'TO', 'EACH', 'IN', 'TURN', 'NOR', 'COUNT', 'IT', 'STRANGE', 'WHEN', 'I', 'LOOK', 'UP', 'TO', 'DROP', 'ON', 'A', 'NEW', 'RANGE', 'OF', 'WALLS', 'AND', 'FLOORS', 'ANOTHER', 'HOME', 'THAN', 'THIS'] +908-31957-0005-1357: ref=['ALAS', 'I', 'HAVE', 'GRIEVED', 'SO', 'I', 'AM', 'HARD', 'TO', 'LOVE'] +908-31957-0005-1357: hyp=['ALAS', 'I', 'HAVE', 'GRIEVED', 'SO', 'I', 'AM', 'HARD', 'TO', 'LOVE'] +908-31957-0006-1358: ref=['OPEN', 'THY', 'HEART', 'WIDE', 'AND', 'FOLD', 'WITHIN', 'THE', 'WET', 'WINGS', 'OF', 'THY', 'DOVE'] +908-31957-0006-1358: hyp=['OPEN', 'THY', 'HEART', 'WIDE', 'AND', 'FOLD', 'WITHIN', 'THE', 'WET', 'WINGS', 'OF', 'THY', 'DOVE'] +908-31957-0007-1359: ref=['COULD', 'IT', 'MEAN', 'TO', 'LAST', 'A', 'LOVE', 'SET', 'PENDULOUS', 'BETWEEN', 'SORROW', 'AND', 'SORROW'] +908-31957-0007-1359: hyp=['COULD', 'IT', 'MEAN', 'TO', 'LAST', 'A', 'LOVE', 'SET', 'PENDULOUS', 'BETWEEN', 'SORROW', 'AND', 'SORROW'] +908-31957-0008-1360: ref=['NAY', 'I', 'RATHER', 'THRILLED', 'DISTRUSTING', 'EVERY', 'LIGHT', 'THAT', 'SEEMED', 'TO', 'GILD', 'THE', 'ONWARD', 'PATH', 'AND', 'FEARED', 'TO', 'OVERLEAN', 'A', 'FINGER', 'EVEN'] +908-31957-0008-1360: hyp=['NAY', 'I', 'RATHER', 'THRILLED', 'DISTRUSTING', 'EVERY', 'LIGHT', 'THAT', 'SEEMED', 'TO', 'GILD', 'THE', 'ONWARD', 'PATH', 'IN', 'FEAR', 'TO', 'OVERLENE', 'A', 'FINGER', 'EVEN'] +908-31957-0009-1361: ref=['AND', 'THOUGH', 'I', 'HAVE', 'GROWN', 'SERENE', 'AND', 'STRONG', 'SINCE', 'THEN', 'I', 'THINK', 'THAT', 'GOD', 'HAS', 'WILLED', 'A', 'STILL', 'RENEWABLE', 'FEAR'] +908-31957-0009-1361: hyp=['AND', 'THOUGH', 'I', 'HAVE', 'GROWN', 'SERENE', 'AND', 'STRONG', 'SINCE', 'THEN', 'I', 'THINK', 'THAT', 'GOD', 'HAS', 'WILLED', 'A', 'STILL', 'RENEWABLE', 'FEAR'] +908-31957-0010-1362: ref=['O', 'LOVE', 'O', 'TROTH'] +908-31957-0010-1362: hyp=['O', 'LOVE', 'O', 'TROTH'] +908-31957-0011-1363: ref=['AND', 'LOVE', 'BE', 'FALSE'] +908-31957-0011-1363: hyp=['AND', 'LOVE', 'BE', 'FALSE'] +908-31957-0012-1364: ref=['IF', 'HE', 'TO', 'KEEP', 'ONE', 'OATH', 'MUST', 'LOSE', 'ONE', 'JOY', 'BY', 'HIS', "LIFE'S", 'STAR', 'FORETOLD'] +908-31957-0012-1364: hyp=['IF', 'HE', 'TO', 'KEEP', 'ONE', 'OATH', 'MUST', 'LOSE', 'ONE', 'JOY', 'BY', 'HIS', "LIFE'S", 'STAR', 'FORETOLD'] +908-31957-0013-1365: ref=['SLOW', 'TO', 'WORLD', 'GREETINGS', 'QUICK', 'WITH', 'ITS', 'O', 'LIST', 'WHEN', 'THE', 'ANGELS', 'SPEAK'] +908-31957-0013-1365: hyp=['SLOW', 'TO', 'WORLD', 'GREETINGS', 'QUICK', 'WITH', 'ITS', 'O', 'LIST', 'WHEN', 'THE', 'ANGEL', 'SPEAK'] +908-31957-0014-1366: ref=['A', 'RING', 'OF', 'AMETHYST', 'I', 'COULD', 'NOT', 'WEAR', 'HERE', 'PLAINER', 'TO', 'MY', 'SIGHT', 'THAN', 'THAT', 'FIRST', 'KISS'] +908-31957-0014-1366: hyp=['A', 'RING', 'OF', 'AMETHYST', 'I', 'COULD', 'NOT', 'WEAR', 'HERE', 'PLAINER', 'TO', 'MY', 'SIGHT', 'THAN', 'THAT', 'FIRST', 'KISS'] +908-31957-0015-1367: ref=['THAT', 'WAS', 'THE', 'CHRISM', 'OF', 'LOVE', 'WHICH', "LOVE'S", 'OWN', 'CROWN', 'WITH', 'SANCTIFYING', 'SWEETNESS', 'DID', 'PRECEDE', 'THE', 'THIRD', 'UPON', 'MY', 'LIPS', 'WAS', 'FOLDED', 'DOWN', 'IN', 'PERFECT', 'PURPLE', 'STATE', 'SINCE', 'WHEN', 'INDEED', 'I', 'HAVE', 'BEEN', 'PROUD', 'AND', 'SAID', 'MY', 'LOVE', 'MY', 'OWN'] +908-31957-0015-1367: hyp=['THAT', 'WAS', 'THE', 'CHRISM', 'OF', 'LOVE', 'WHICH', 'LOVES', 'OWN', 'CROWN', 'WITH', 'SANCTIFYING', 'SWEETNESS', 'DID', 'PROCEED', 'THE', 'THIRD', 'UPON', 'MY', 'LIPS', 'WAS', 'FOLDED', 'DOWN', 'IMPERFECT', 'PURPLE', 'STATE', 'SINCE', 'WHEN', 'INDEED', 'I', 'HAVE', 'BEEN', 'PROUD', 'AND', 'SAID', 'MY', 'LOVE', 'MY', 'OWN'] +908-31957-0016-1368: ref=['DEAREST', 'TEACH', 'ME', 'SO', 'TO', 'POUR', 'OUT', 'GRATITUDE', 'AS', 'THOU', 'DOST', 'GOOD'] +908-31957-0016-1368: hyp=['DEAREST', 'TEACH', 'ME', 'SO', 'TO', 'POUR', 'OUT', 'GRATITUDE', 'AS', 'THOU', 'DOST', 'GOOD'] +908-31957-0017-1369: ref=['MUSSULMANS', 'AND', 'GIAOURS', 'THROW', 'KERCHIEFS', 'AT', 'A', 'SMILE', 'AND', 'HAVE', 'NO', 'RUTH', 'FOR', 'ANY', 'WEEPING'] +908-31957-0017-1369: hyp=['MUSSULMANS', 'AND', 'GEYORS', 'THROW', 'KERCHIEFS', 'AT', 'A', 'SMILE', 'AND', 'HAVE', 'NO', 'RUTH', 'FOR', 'ANY', 'WEEPING'] +908-31957-0018-1370: ref=['BUT', 'THOU', 'ART', 'NOT', 'SUCH', 'A', 'LOVER', 'MY', 'BELOVED'] +908-31957-0018-1370: hyp=['BUT', 'THOU', 'ART', 'NOT', 'SUCH', 'A', 'LOVER', 'MY', 'BELOVED'] +908-31957-0019-1371: ref=['THOU', 'CANST', 'WAIT', 'THROUGH', 'SORROW', 'AND', 'SICKNESS', 'TO', 'BRING', 'SOULS', 'TO', 'TOUCH', 'AND', 'THINK', 'IT', 'SOON', 'WHEN', 'OTHERS', 'CRY', 'TOO', 'LATE'] +908-31957-0019-1371: hyp=['THOU', 'CANST', 'WAIT', 'THROUGH', 'SORROW', 'AND', 'SICKNESS', 'TO', 'BRING', 'SOULS', 'TO', 'TOUCH', 'AND', 'THINK', 'IT', 'SOON', 'WHEN', 'OTHERS', 'CRY', 'TOO', 'LATE'] +908-31957-0020-1372: ref=['I', 'THANK', 'ALL', 'WHO', 'HAVE', 'LOVED', 'ME', 'IN', 'THEIR', 'HEARTS', 'WITH', 'THANKS', 'AND', 'LOVE', 'FROM', 'MINE'] +908-31957-0020-1372: hyp=['I', 'THINK', 'ALL', 'WHO', 'HAVE', 'LOVED', 'ME', 'IN', 'THEIR', 'HEARTS', 'WITH', 'THANKS', 'AND', 'LOVE', 'FROM', 'MINE'] +908-31957-0021-1373: ref=['OH', 'TO', 'SHOOT', 'MY', "SOUL'S", 'FULL', 'MEANING', 'INTO', 'FUTURE', 'YEARS', 'THAT', 'THEY', 'SHOULD', 'LEND', 'IT', 'UTTERANCE', 'AND', 'SALUTE', 'LOVE', 'THAT', 'ENDURES', 'FROM', 'LIFE', 'THAT', 'DISAPPEARS'] +908-31957-0021-1373: hyp=['OH', 'TO', 'SHOOT', 'MY', "SOUL'S", 'FULL', 'MEANING', 'INTO', 'FUTURE', 'YEARS', 'THAT', 'THEY', 'SHOULD', 'LEND', 'IT', 'UTTERANCE', 'AND', 'SALUTE', 'LOVE', 'THAT', 'ENDURES', 'FROM', 'LIFE', 'THAT', 'DISAPPEARS'] +908-31957-0022-1374: ref=['THEN', 'I', 'LONG', 'TRIED', 'BY', 'NATURAL', 'ILLS', 'RECEIVED', 'THE', 'COMFORT', 'FAST', 'WHILE', 'BUDDING', 'AT', 'THY', 'SIGHT', 'MY', "PILGRIM'S", 'STAFF', 'GAVE', 'OUT', 'GREEN', 'LEAVES', 'WITH', 'MORNING', 'DEWS', 'IMPEARLED'] +908-31957-0022-1374: hyp=['THEN', 'I', 'LONG', 'TRIED', 'BY', 'NATURAL', 'ILLS', 'RECEIVED', 'THE', 'COMFORT', 'FAST', 'WHILE', 'BUDDING', 'AT', 'THY', 'SIGHT', 'MY', "PILGRIM'S", 'STAFF', 'GAVE', 'OUT', 'GREEN', 'LEAVES', 'WITH', 'MORNING', 'DEWS', 'IMPERILLED'] +908-31957-0023-1375: ref=['I', 'LOVE', 'THEE', 'FREELY', 'AS', 'MEN', 'STRIVE', 'FOR', 'RIGHT', 'I', 'LOVE', 'THEE', 'PURELY', 'AS', 'THEY', 'TURN', 'FROM', 'PRAISE'] +908-31957-0023-1375: hyp=['I', 'LOVE', 'THEE', 'FREELY', 'AS', 'MEN', 'STRIVE', 'FOR', 'RIGHT', 'I', 'LOVE', 'THEE', 'PURELY', 'AS', 'THEY', 'TURN', 'FROM', 'PREISE'] +908-31957-0024-1376: ref=['I', 'LOVE', 'THEE', 'WITH', 'THE', 'PASSION', 'PUT', 'TO', 'USE', 'IN', 'MY', 'OLD', 'GRIEFS', 'AND', 'WITH', 'MY', "CHILDHOOD'S", 'FAITH'] +908-31957-0024-1376: hyp=['I', 'LOVE', 'THEE', 'WITH', 'THE', 'PASSION', 'PUT', 'TO', 'USE', 'IN', 'MY', 'OLD', 'GREEDS', 'AND', 'WITH', 'MY', "CHILDHOOD'S", 'FAITH'] +908-31957-0025-1377: ref=['I', 'LOVE', 'THEE', 'WITH', 'A', 'LOVE', 'I', 'SEEMED', 'TO', 'LOSE', 'WITH', 'MY', 'LOST', 'SAINTS', 'I', 'LOVE', 'THEE', 'WITH', 'THE', 'BREATH', 'SMILES', 'TEARS', 'OF', 'ALL', 'MY', 'LIFE', 'AND', 'IF', 'GOD', 'CHOOSE', 'I', 'SHALL', 'BUT', 'LOVE', 'THEE', 'BETTER', 'AFTER', 'DEATH'] +908-31957-0025-1377: hyp=['I', 'LOVE', 'THEE', 'WITH', 'A', 'LOVE', 'I', 'SEEMED', 'TO', 'LOSE', 'WITH', 'MY', 'LOST', 'SAINTS', 'I', 'LOVE', 'THEE', 'WITH', 'THE', 'BREATH', 'SMILES', 'TEARS', 'OF', 'ALL', 'MY', 'LIFE', 'AND', 'IF', 'GOD', 'CHOOSE', 'I', 'SHALL', 'BUT', 'LOVE', 'THEE', 'BETTER', 'AFTER', 'DEATH'] diff --git a/log/modified_beam_search/recogs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/recogs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..7e3cd2382bdbe2d57fe3e75f4446cf08af7c0191 --- /dev/null +++ b/log/modified_beam_search/recogs-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,5878 @@ +1688-142285-0000-1948: ref=["THERE'S", 'IRON', 'THEY', 'SAY', 'IN', 'ALL', 'OUR', 'BLOOD', 'AND', 'A', 'GRAIN', 'OR', 'TWO', 'PERHAPS', 'IS', 'GOOD', 'BUT', 'HIS', 'HE', 'MAKES', 'ME', 'HARSHLY', 'FEEL', 'HAS', 'GOT', 'A', 'LITTLE', 'TOO', 'MUCH', 'OF', 'STEEL', 'ANON'] +1688-142285-0000-1948: hyp=["THERE'S", 'IRON', 'THEY', 'SAY', 'IN', 'ALL', 'OUR', 'BLOOD', 'AND', 'A', 'GRAIN', 'OR', 'TWO', 'PERHAPS', 'IS', 'GOOD', 'BUT', 'HIS', 'HE', 'MAKES', 'ME', 'HARSHLY', 'FEEL', 'HAS', 'GOT', 'A', 'LITTLE', 'TOO', 'MUCH', 'OF', 'STEEL', 'ANON'] +1688-142285-0001-1949: ref=['MARGARET', 'SAID', 'MISTER', 'HALE', 'AS', 'HE', 'RETURNED', 'FROM', 'SHOWING', 'HIS', 'GUEST', 'DOWNSTAIRS', 'I', 'COULD', 'NOT', 'HELP', 'WATCHING', 'YOUR', 'FACE', 'WITH', 'SOME', 'ANXIETY', 'WHEN', 'MISTER', 'THORNTON', 'MADE', 'HIS', 'CONFESSION', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY'] +1688-142285-0001-1949: hyp=['MARGARET', 'SAID', 'MISTER', 'HALE', 'AS', 'HE', 'RETURNED', 'FROM', 'SHOWING', 'HIS', 'GUEST', 'DOWNSTAIRS', 'I', 'COULD', 'NOT', 'HELP', 'WATCHING', 'YOUR', 'FACE', 'WITH', 'SOME', 'ANXIETY', 'WHEN', 'MISTER', 'THORNTON', 'MADE', 'HIS', 'CONFESSION', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY'] +1688-142285-0002-1950: ref=['YOU', "DON'T", 'MEAN', 'THAT', 'YOU', 'THOUGHT', 'ME', 'SO', 'SILLY'] +1688-142285-0002-1950: hyp=['YOU', "DON'T", 'MEAN', 'THAT', 'YOU', 'THOUGHT', 'ME', 'SO', 'SILLY'] +1688-142285-0003-1951: ref=['I', 'REALLY', 'LIKED', 'THAT', 'ACCOUNT', 'OF', 'HIMSELF', 'BETTER', 'THAN', 'ANYTHING', 'ELSE', 'HE', 'SAID'] +1688-142285-0003-1951: hyp=['I', 'REALLY', 'LIKE', 'THAT', 'ACCOUNT', 'OF', 'HIMSELF', 'BETTER', 'THAN', 'ANYTHING', 'ELSE', 'HE', 'SAID'] +1688-142285-0004-1952: ref=['HIS', 'STATEMENT', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY', 'WAS', 'THE', 'THING', 'I', 'LIKED', 'BEST', 'OF', 'ALL'] +1688-142285-0004-1952: hyp=['HIS', 'STATEMENT', 'OF', 'HAVING', 'BEEN', 'A', 'SHOP', 'BOY', 'WAS', 'THE', 'THING', 'I', 'LIKE', 'BEST', 'OF', 'ALL'] +1688-142285-0005-1953: ref=['YOU', 'WHO', 'WERE', 'ALWAYS', 'ACCUSING', 'PEOPLE', 'OF', 'BEING', 'SHOPPY', 'AT', 'HELSTONE'] +1688-142285-0005-1953: hyp=['YOU', 'WHO', 'WERE', 'ALWAYS', 'ACCUSING', 'PEOPLE', 'OF', 'BEING', 'SHOPPY', 'AT', 'HELSTONE'] +1688-142285-0006-1954: ref=['I', "DON'T", 'THINK', 'MISTER', 'HALE', 'YOU', 'HAVE', 'DONE', 'QUITE', 'RIGHT', 'IN', 'INTRODUCING', 'SUCH', 'A', 'PERSON', 'TO', 'US', 'WITHOUT', 'TELLING', 'US', 'WHAT', 'HE', 'HAD', 'BEEN'] +1688-142285-0006-1954: hyp=['I', "DON'T", 'THINK', 'MISTER', 'HALE', 'YOU', 'HAVE', 'DONE', 'QUITE', 'RIGHT', 'HE', 'INTRODUCING', 'SUCH', 'A', 'PERSON', 'TO', 'US', 'WITHOUT', 'TELLING', 'US', 'WHAT', 'HE', 'HAD', 'BEEN'] +1688-142285-0007-1955: ref=['I', 'REALLY', 'WAS', 'VERY', 'MUCH', 'AFRAID', 'OF', 'SHOWING', 'HIM', 'HOW', 'MUCH', 'SHOCKED', 'I', 'WAS', 'AT', 'SOME', 'PARTS', 'OF', 'WHAT', 'HE', 'SAID'] +1688-142285-0007-1955: hyp=['I', 'REALLY', 'WAS', 'VERY', 'MUCH', 'AFRAID', 'OF', 'SHOWING', 'HIM', 'HOW', 'MUCH', 'SHOCKED', 'I', 'WAS', 'AT', 'SOME', 'PART', 'OF', 'WHAT', 'HE', 'SAID'] +1688-142285-0008-1956: ref=['HIS', 'FATHER', 'DYING', 'IN', 'MISERABLE', 'CIRCUMSTANCES'] +1688-142285-0008-1956: hyp=['HIS', 'FATHER', 'DYING', 'IN', 'MISERABLE', 'CIRCUMSTANCES'] +1688-142285-0009-1957: ref=['WHY', 'IT', 'MIGHT', 'HAVE', 'BEEN', 'IN', 'THE', 'WORKHOUSE'] +1688-142285-0009-1957: hyp=['WHY', 'IT', 'MIGHT', 'HAVE', 'BEEN', 'IN', 'THE', 'WORKHOUSE'] +1688-142285-0010-1958: ref=['HIS', 'FATHER', 'SPECULATED', 'WILDLY', 'FAILED', 'AND', 'THEN', 'KILLED', 'HIMSELF', 'BECAUSE', 'HE', 'COULD', 'NOT', 'BEAR', 'THE', 'DISGRACE'] +1688-142285-0010-1958: hyp=['HIS', 'FATHER', 'SPECULATED', 'WILDLY', 'FAILED', 'AND', 'THEN', 'KILLED', 'HIMSELF', 'BECAUSE', 'HE', 'COULD', 'NOT', 'BEAR', 'THE', 'DISGRACE'] +1688-142285-0011-1959: ref=['ALL', 'HIS', 'FORMER', 'FRIENDS', 'SHRUNK', 'FROM', 'THE', 'DISCLOSURES', 'THAT', 'HAD', 'TO', 'BE', 'MADE', 'OF', 'HIS', 'DISHONEST', 'GAMBLING', 'WILD', 'HOPELESS', 'STRUGGLES', 'MADE', 'WITH', 'OTHER', "PEOPLE'S", 'MONEY', 'TO', 'REGAIN', 'HIS', 'OWN', 'MODERATE', 'PORTION', 'OF', 'WEALTH'] +1688-142285-0011-1959: hyp=['ALL', 'HIS', 'FORMER', 'FRIENDS', 'SHRUNK', 'FROM', 'THE', 'DISCLOSURES', 'THAT', 'HAD', 'TO', 'BE', 'MADE', 'OF', 'HIS', 'DISHONEST', 'GAMBLING', 'WILD', 'HOPELESS', 'STRUGGLES', 'MADE', 'WITH', 'OTHER', "PEOPLE'S", 'MONEY', 'TO', 'REGAIN', 'HIS', 'OWN', 'MODERATE', 'PORTION', 'OF', 'WEALTH'] +1688-142285-0012-1960: ref=['NO', 'ONE', 'CAME', 'FORWARDS', 'TO', 'HELP', 'THE', 'MOTHER', 'AND', 'THIS', 'BOY'] +1688-142285-0012-1960: hyp=['NO', 'ONE', 'CAME', 'FORWARDS', 'TO', 'HELP', 'THE', 'MOTHER', 'AND', 'THIS', 'BOY'] +1688-142285-0013-1961: ref=['AT', 'LEAST', 'NO', 'FRIEND', 'CAME', 'FORWARDS', 'IMMEDIATELY', 'AND', 'MISSUS', 'THORNTON', 'IS', 'NOT', 'ONE', 'I', 'FANCY', 'TO', 'WAIT', 'TILL', 'TARDY', 'KINDNESS', 'COMES', 'TO', 'FIND', 'HER', 'OUT'] +1688-142285-0013-1961: hyp=['AT', 'LEAST', 'NO', 'FRIEND', 'CAME', 'FORWARDS', 'IMMEDIATELY', 'AND', 'MISTER', 'THORNTON', 'IS', 'NOT', 'ONE', 'I', 'FANCY', 'TO', 'WAIT', 'TILL', 'TIDY', 'KINDNESS', 'COMES', 'TO', 'FIND', 'HER', 'OUT'] +1688-142285-0014-1962: ref=['SO', 'THEY', 'LEFT', 'MILTON'] +1688-142285-0014-1962: hyp=['SO', 'THEY', 'LEFT', 'MILTON'] +1688-142285-0015-1963: ref=['HOW', 'TAINTED', 'ASKED', 'HER', 'FATHER'] +1688-142285-0015-1963: hyp=['HOW', 'TAINTED', 'ASKED', 'HER', 'FATHER'] +1688-142285-0016-1964: ref=['OH', 'PAPA', 'BY', 'THAT', 'TESTING', 'EVERYTHING', 'BY', 'THE', 'STANDARD', 'OF', 'WEALTH'] +1688-142285-0016-1964: hyp=['OH', 'PAPA', 'BY', 'THAT', 'TESTING', 'EVERYTHING', 'BY', 'THE', 'STANDARD', 'OF', 'WEALTH'] +1688-142285-0017-1965: ref=['WHEN', 'HE', 'SPOKE', 'OF', 'THE', 'MECHANICAL', 'POWERS', 'HE', 'EVIDENTLY', 'LOOKED', 'UPON', 'THEM', 'ONLY', 'AS', 'NEW', 'WAYS', 'OF', 'EXTENDING', 'TRADE', 'AND', 'MAKING', 'MONEY'] +1688-142285-0017-1965: hyp=['WHEN', 'HE', 'SPOKE', 'OF', 'THE', 'MECHANICAL', 'POWERS', 'HE', 'EVIDENTLY', 'LOOKED', 'UPON', 'THEM', 'ONLY', 'AS', 'NEW', 'WAYS', 'OF', 'EXTENDING', 'TRADE', 'AND', 'MAKING', 'MONEY'] +1688-142285-0018-1966: ref=['AND', 'THE', 'POOR', 'MEN', 'AROUND', 'HIM', 'THEY', 'WERE', 'POOR', 'BECAUSE', 'THEY', 'WERE', 'VICIOUS', 'OUT', 'OF', 'THE', 'PALE', 'OF', 'HIS', 'SYMPATHIES', 'BECAUSE', 'THEY', 'HAD', 'NOT', 'HIS', 'IRON', 'NATURE', 'AND', 'THE', 'CAPABILITIES', 'THAT', 'IT', 'GIVES', 'HIM', 'FOR', 'BEING', 'RICH'] +1688-142285-0018-1966: hyp=['AND', 'THE', 'POOR', 'MEN', 'AROUND', 'HIM', 'THEY', 'WERE', 'POOR', 'BECAUSE', 'THEY', 'WERE', 'VICIOUS', 'OUT', 'OF', 'THE', 'PALE', 'OF', 'HIS', 'SYMPATHIES', 'BECAUSE', 'THEY', 'HAD', 'NOT', 'HIS', 'IRON', 'NATURE', 'AND', 'THE', 'CAPABILITIES', 'THAT', 'IT', 'GIVES', 'HIM', 'FOR', 'BEING', 'RICH'] +1688-142285-0019-1967: ref=['NOT', 'VICIOUS', 'HE', 'NEVER', 'SAID', 'THAT'] +1688-142285-0019-1967: hyp=['NOT', 'VICIOUS', 'HE', 'NEVER', 'SAID', 'THAT'] +1688-142285-0020-1968: ref=['IMPROVIDENT', 'AND', 'SELF', 'INDULGENT', 'WERE', 'HIS', 'WORDS'] +1688-142285-0020-1968: hyp=['IMPROVIDENT', 'AND', 'SELF', 'INDULGENT', 'WERE', 'HIS', 'WORDS'] +1688-142285-0021-1969: ref=['MARGARET', 'WAS', 'COLLECTING', 'HER', "MOTHER'S", 'WORKING', 'MATERIALS', 'AND', 'PREPARING', 'TO', 'GO', 'TO', 'BED'] +1688-142285-0021-1969: hyp=['MARGARET', 'WAS', 'COLLECTING', 'HER', "MOTHER'S", 'WORKING', 'MATERIALS', 'AND', 'PREPARING', 'TO', 'GO', 'TO', 'BED'] +1688-142285-0022-1970: ref=['JUST', 'AS', 'SHE', 'WAS', 'LEAVING', 'THE', 'ROOM', 'SHE', 'HESITATED', 'SHE', 'WAS', 'INCLINED', 'TO', 'MAKE', 'AN', 'ACKNOWLEDGMENT', 'WHICH', 'SHE', 'THOUGHT', 'WOULD', 'PLEASE', 'HER', 'FATHER', 'BUT', 'WHICH', 'TO', 'BE', 'FULL', 'AND', 'TRUE', 'MUST', 'INCLUDE', 'A', 'LITTLE', 'ANNOYANCE'] +1688-142285-0022-1970: hyp=['JUST', 'AS', 'SHE', 'WAS', 'LEAVING', 'THE', 'ROOM', 'SHE', 'HESITATED', 'SHE', 'WAS', 'INCLINED', 'TO', 'MAKE', 'AN', 'ACKNOWLEDGMENT', 'WHICH', 'SHE', 'THOUGHT', 'WOULD', 'PLEASE', 'HER', 'FATHER', 'BUT', 'WHICH', 'TO', 'BE', 'FULL', 'AND', 'TRUE', 'MUST', 'INCLUDE', 'A', 'LITTLE', 'ANNOYANCE'] +1688-142285-0023-1971: ref=['HOWEVER', 'OUT', 'IT', 'CAME'] +1688-142285-0023-1971: hyp=['HOWEVER', 'OUT', 'IT', 'CAME'] +1688-142285-0024-1972: ref=['PAPA', 'I', 'DO', 'THINK', 'MISTER', 'THORNTON', 'A', 'VERY', 'REMARKABLE', 'MAN', 'BUT', 'PERSONALLY', 'I', "DON'T", 'LIKE', 'HIM', 'AT', 'ALL'] +1688-142285-0024-1972: hyp=['PAPA', 'I', 'DO', 'THINK', 'MISTER', 'THORNTON', 'A', 'VERY', 'REMARKABLE', 'MAN', 'BUT', 'PERSONALLY', 'I', "DON'T", 'LIKE', 'HIM', 'AT', 'ALL'] +1688-142285-0025-1973: ref=['AND', 'I', 'DO', 'SAID', 'HER', 'FATHER', 'LAUGHING'] +1688-142285-0025-1973: hyp=['AND', 'I', 'DO', 'SAID', 'HER', 'FATHER', 'LAUGHING'] +1688-142285-0026-1974: ref=['PERSONALLY', 'AS', 'YOU', 'CALL', 'IT', 'AND', 'ALL'] +1688-142285-0026-1974: hyp=['PERSONALLY', 'AS', 'YOU', 'CALL', 'IT', 'AND', 'ALL'] +1688-142285-0027-1975: ref=['I', "DON'T", 'SET', 'HIM', 'UP', 'FOR', 'A', 'HERO', 'OR', 'ANYTHING', 'OF', 'THAT', 'KIND'] +1688-142285-0027-1975: hyp=['I', "DON'T", 'SET', 'HIM', 'UP', 'FOR', 'A', 'HERO', 'OR', 'ANYTHING', 'OF', 'THAT', 'KIND'] +1688-142285-0028-1976: ref=['BUT', 'GOOD', 'NIGHT', 'CHILD'] +1688-142285-0028-1976: hyp=['BUT', 'GOOD', 'NIGHT', 'CHILD'] +1688-142285-0029-1977: ref=['THERE', 'WERE', 'SEVERAL', 'OTHER', 'SIGNS', 'OF', 'SOMETHING', 'WRONG', 'ABOUT', 'MISSUS', 'HALE'] +1688-142285-0029-1977: hyp=['THERE', 'WERE', 'SEVERAL', 'OTHER', 'SIGNS', 'OF', 'SOMETHING', 'WRONG', 'ABOUT', 'MISSUS', 'HALE'] +1688-142285-0030-1978: ref=['SHE', 'AND', 'DIXON', 'HELD', 'MYSTERIOUS', 'CONSULTATIONS', 'IN', 'HER', 'BEDROOM', 'FROM', 'WHICH', 'DIXON', 'WOULD', 'COME', 'OUT', 'CRYING', 'AND', 'CROSS', 'AS', 'WAS', 'HER', 'CUSTOM', 'WHEN', 'ANY', 'DISTRESS', 'OF', 'HER', 'MISTRESS', 'CALLED', 'UPON', 'HER', 'SYMPATHY'] +1688-142285-0030-1978: hyp=['SHE', 'AND', 'DIXON', 'HELD', 'MYSTERIOUS', 'CONSULTATIONS', 'IN', 'HER', 'BEDROOM', 'FROM', 'WHICH', 'DIXON', 'WOULD', 'COME', 'OUT', 'CRYING', 'AND', 'CROSS', 'AS', 'WAS', 'ACCUSTOM', 'WHEN', 'ANY', 'DISTRESS', 'OF', 'HER', 'MISTRESS', 'CALLED', 'UPON', 'HER', 'SYMPATHY'] +1688-142285-0031-1979: ref=['ONCE', 'MARGARET', 'HAD', 'GONE', 'INTO', 'THE', 'CHAMBER', 'SOON', 'AFTER', 'DIXON', 'LEFT', 'IT', 'AND', 'FOUND', 'HER', 'MOTHER', 'ON', 'HER', 'KNEES', 'AND', 'AS', 'MARGARET', 'STOLE', 'OUT', 'SHE', 'CAUGHT', 'A', 'FEW', 'WORDS', 'WHICH', 'WERE', 'EVIDENTLY', 'A', 'PRAYER', 'FOR', 'STRENGTH', 'AND', 'PATIENCE', 'TO', 'ENDURE', 'SEVERE', 'BODILY', 'SUFFERING'] +1688-142285-0031-1979: hyp=['ONCE', 'MARGARET', 'HAD', 'GONE', 'INTO', 'THE', 'CHAMBER', 'SOON', 'AFTER', 'DIXON', 'LIFTED', 'AND', 'FOUND', 'HER', 'MOTHER', 'ON', 'HER', 'KNEES', 'AND', 'AS', 'MARGARET', 'STOLE', 'OUT', 'SHE', 'CAUGHT', 'A', 'FEW', 'WORDS', 'WHICH', 'WERE', 'EVIDENTLY', 'A', 'PRAYER', 'FOR', 'STRENGTH', 'AND', 'PATIENCE', 'TO', 'INDUCE', 'SEVERE', 'BODILY', 'SUFFERING'] +1688-142285-0032-1980: ref=['BUT', 'THOUGH', 'SHE', 'RECEIVED', 'CARESSES', 'AND', 'FOND', 'WORDS', 'BACK', 'AGAIN', 'IN', 'SUCH', 'PROFUSION', 'AS', 'WOULD', 'HAVE', 'GLADDENED', 'HER', 'FORMERLY', 'YET', 'SHE', 'FELT', 'THAT', 'THERE', 'WAS', 'A', 'SECRET', 'WITHHELD', 'FROM', 'HER', 'AND', 'SHE', 'BELIEVED', 'IT', 'BORE', 'SERIOUS', 'REFERENCE', 'TO', 'HER', "MOTHER'S", 'HEALTH'] +1688-142285-0032-1980: hyp=['BUT', 'THOUGH', 'SHE', 'RECEIVED', 'CARESSES', 'AND', 'FOND', 'WORDS', 'BACK', 'AGAIN', 'IN', 'SUCH', 'PROFUSION', 'AS', 'WOULD', 'HAVE', 'GLADDENED', 'HER', 'FORMERLY', 'YET', 'SHE', 'FELT', 'THAT', 'THERE', 'WAS', 'A', 'SECRET', 'WITHHELD', 'FROM', 'HER', 'AND', 'SHE', 'BELIEVED', 'IT', 'BORE', 'SERIOUS', 'REFERENCE', 'TO', 'HER', "MOTHER'S", 'HEALTH'] +1688-142285-0033-1981: ref=['SHE', 'LAY', 'AWAKE', 'VERY', 'LONG', 'THIS', 'NIGHT', 'PLANNING', 'HOW', 'TO', 'LESSEN', 'THE', 'EVIL', 'INFLUENCE', 'OF', 'THEIR', 'MILTON', 'LIFE', 'ON', 'HER', 'MOTHER'] +1688-142285-0033-1981: hyp=['SHE', 'LAY', 'AWAKE', 'VERY', 'LONG', 'THIS', 'NIGHT', 'PLANNING', 'HOW', 'TO', 'LISTEN', 'THE', 'EVIL', 'INFLUENCE', 'OF', 'THEIR', 'MILTON', 'LIFE', 'ON', 'HER', 'MOTHER'] +1688-142285-0034-1982: ref=['A', 'SERVANT', 'TO', 'GIVE', 'DIXON', 'PERMANENT', 'ASSISTANCE', 'SHOULD', 'BE', 'GOT', 'IF', 'SHE', 'GAVE', 'UP', 'HER', 'WHOLE', 'TIME', 'TO', 'THE', 'SEARCH', 'AND', 'THEN', 'AT', 'ANY', 'RATE', 'HER', 'MOTHER', 'MIGHT', 'HAVE', 'ALL', 'THE', 'PERSONAL', 'ATTENTION', 'SHE', 'REQUIRED', 'AND', 'HAD', 'BEEN', 'ACCUSTOMED', 'TO', 'HER', 'WHOLE', 'LIFE'] +1688-142285-0034-1982: hyp=['A', 'SERVANT', 'TO', 'GIVE', 'DIXON', 'PERMANENT', 'ASSISTANCE', 'SHOULD', 'BE', 'GOT', 'IF', 'SHE', 'GAVE', 'UP', 'THE', 'WHOLE', 'TIME', 'TO', 'THE', 'SEARCH', 'AND', 'THEN', 'AT', 'ANY', 'RATE', 'HER', 'MOTHER', 'MIGHT', 'HAVE', 'ALL', 'THE', 'PERSONAL', 'ATTENTIONS', 'SHE', 'REQUIRED', 'AND', 'HAD', 'BEEN', 'ACCUSTOMED', 'TO', 'HER', 'WHOLE', 'LIFE'] +1688-142285-0035-1983: ref=['VISITING', 'REGISTER', 'OFFICES', 'SEEING', 'ALL', 'MANNER', 'OF', 'UNLIKELY', 'PEOPLE', 'AND', 'VERY', 'FEW', 'IN', 'THE', 'LEAST', 'LIKELY', 'ABSORBED', "MARGARET'S", 'TIME', 'AND', 'THOUGHTS', 'FOR', 'SEVERAL', 'DAYS'] +1688-142285-0035-1983: hyp=['VISITING', 'REGISTER', 'OFFICERS', 'SEEING', 'ALL', 'MANNER', 'OF', 'UNLIKELY', 'PEOPLE', 'AND', 'VERY', 'FEW', 'IN', 'THE', 'LEAST', 'LIKELY', 'ABSORBED', "MARGARET'S", 'TIME', 'AND', 'THOUGHTS', 'FOR', 'SEVERAL', 'DAYS'] +1688-142285-0036-1984: ref=['ONE', 'AFTERNOON', 'SHE', 'MET', 'BESSY', 'HIGGINS', 'IN', 'THE', 'STREET', 'AND', 'STOPPED', 'TO', 'SPEAK', 'TO', 'HER'] +1688-142285-0036-1984: hyp=['ONE', 'AFTERNOON', 'SHE', 'MET', 'BESSY', 'HIGGINS', 'IN', 'THE', 'STREET', 'AND', 'STOPPED', 'TO', 'SPEAK', 'TO', 'HER'] +1688-142285-0037-1985: ref=['WELL', 'BESSY', 'HOW', 'ARE', 'YOU'] +1688-142285-0037-1985: hyp=['WELL', 'BUSY', 'HOW', 'ARE', 'YOU'] +1688-142285-0038-1986: ref=['BETTER', 'AND', 'NOT', 'BETTER', 'IF', 'YO', 'KNOW', 'WHAT', 'THAT', 'MEANS'] +1688-142285-0038-1986: hyp=['BETTER', 'AND', 'NOT', 'BETTER', 'IF', 'YOU', 'KNOW', 'WHAT', 'THAT', 'MEANS'] +1688-142285-0039-1987: ref=['NOT', 'EXACTLY', 'REPLIED', 'MARGARET', 'SMILING'] +1688-142285-0039-1987: hyp=['NOT', 'EXACTLY', 'REPLIED', 'MARGARET', 'SMILING'] +1688-142285-0040-1988: ref=["I'M", 'BETTER', 'IN', 'NOT', 'BEING', 'TORN', 'TO', 'PIECES', 'BY', 'COUGHING', "O'NIGHTS", 'BUT', "I'M", 'WEARY', 'AND', 'TIRED', 'O', 'MILTON', 'AND', 'LONGING', 'TO', 'GET', 'AWAY', 'TO', 'THE', 'LAND', 'O', 'BEULAH', 'AND', 'WHEN', 'I', 'THINK', "I'M", 'FARTHER', 'AND', 'FARTHER', 'OFF', 'MY', 'HEART', 'SINKS', 'AND', "I'M", 'NO', 'BETTER', "I'M", 'WORSE'] +1688-142285-0040-1988: hyp=["I'M", 'BETTER', 'IN', 'NOT', 'BEING', 'TAUGHT', 'TO', 'PIECES', 'BY', 'COUGHING', 'OR', 'NIGHTS', 'BUT', "I'M", 'WEARY', 'AND', 'TIRED', 'OF', 'MILTON', 'AND', 'LONGING', 'TO', 'GET', 'AWAY', 'TO', 'THE', 'LAND', 'OF', 'BOOLA', 'AND', 'WHEN', 'I', 'THINK', "I'M", 'FARTHER', 'AND', 'FARTHER', 'OFF', 'MY', 'HEART', 'SINKS', 'AND', "I'M", 'NO', 'BETTER', "I'M", 'WORSE'] +1688-142285-0041-1989: ref=['MARGARET', 'TURNED', 'ROUND', 'TO', 'WALK', 'ALONGSIDE', 'OF', 'THE', 'GIRL', 'IN', 'HER', 'FEEBLE', 'PROGRESS', 'HOMEWARD'] +1688-142285-0041-1989: hyp=['MARGARET', 'TURNED', 'AROUND', 'TO', 'WALK', 'LONG', 'SIDE', 'OF', 'THE', 'GIRL', 'IN', 'HER', 'FEEBLE', 'PROGRESS', 'HOMEWARD'] +1688-142285-0042-1990: ref=['BUT', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'SHE', 'DID', 'NOT', 'SPEAK'] +1688-142285-0042-1990: hyp=['BUT', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'SHE', 'DID', 'NOT', 'SPEAK'] +1688-142285-0043-1991: ref=['AT', 'LAST', 'SHE', 'SAID', 'IN', 'A', 'LOW', 'VOICE'] +1688-142285-0043-1991: hyp=['AT', 'LAST', 'SHE', 'SAID', 'IN', 'A', 'LOW', 'VOICE'] +1688-142285-0044-1992: ref=['BESSY', 'DO', 'YOU', 'WISH', 'TO', 'DIE'] +1688-142285-0044-1992: hyp=['BESSY', 'DO', 'YOU', 'WISH', 'TO', 'DIE'] +1688-142285-0045-1993: ref=['BESSY', 'WAS', 'SILENT', 'IN', 'HER', 'TURN', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'THEN', 'SHE', 'REPLIED'] +1688-142285-0045-1993: hyp=['BESSY', 'WAS', 'SILENT', 'IN', 'HER', 'TURN', 'FOR', 'A', 'MINUTE', 'OR', 'TWO', 'THEN', 'SHE', 'REPLIED'] +1688-142285-0046-1994: ref=['NOUGHT', 'WORSE', 'THAN', 'MANY', 'OTHERS', 'I', 'RECKON'] +1688-142285-0046-1994: hyp=['NOT', 'WORSE', 'THAN', 'MANY', 'OTHERS', 'I', 'RECKON'] +1688-142285-0047-1995: ref=['BUT', 'WHAT', 'WAS', 'IT'] +1688-142285-0047-1995: hyp=['BUT', 'WHAT', 'WAS', 'IT'] +1688-142285-0048-1996: ref=['YOU', 'KNOW', "I'M", 'A', 'STRANGER', 'HERE', 'SO', 'PERHAPS', "I'M", 'NOT', 'SO', 'QUICK', 'AT', 'UNDERSTANDING', 'WHAT', 'YOU', 'MEAN', 'AS', 'IF', "I'D", 'LIVED', 'ALL', 'MY', 'LIFE', 'AT', 'MILTON'] +1688-142285-0048-1996: hyp=['YOU', 'KNOW', "I'M", 'A', 'STRANGER', 'HERE', 'SO', 'PERHAPS', "I'M", 'NOT', 'SO', 'QUICK', 'AT', 'UNDERSTANDING', 'WHAT', 'YOU', 'MEAN', 'AS', 'IF', "I'D", 'LIVED', 'ALL', 'MY', 'LIFE', 'IN', 'MILTON'] +1688-142285-0049-1997: ref=['I', 'HAD', 'FORGOTTEN', 'WHAT', 'I', 'SAID', 'FOR', 'THE', 'TIME', 'CONTINUED', 'MARGARET', 'QUIETLY'] +1688-142285-0049-1997: hyp=['I', 'HAD', 'FORGOTTEN', 'WHAT', 'I', 'SAID', 'FOR', 'THE', 'TIME', 'CONTINUED', 'MARGARET', 'QUIETLY'] +1688-142285-0050-1998: ref=['I', 'SHOULD', 'HAVE', 'THOUGHT', 'OF', 'IT', 'AGAIN', 'WHEN', 'I', 'WAS', 'LESS', 'BUSY', 'MAY', 'I', 'GO', 'WITH', 'YOU', 'NOW'] +1688-142285-0050-1998: hyp=['I', 'SHOULD', 'HAVE', 'THOUGHT', 'OF', 'IT', 'AGAIN', 'WHEN', 'I', 'WAS', 'LESS', 'BUSY', 'MAY', 'I', 'GO', 'WITH', 'YOU', 'NOW'] +1688-142285-0051-1999: ref=['THE', 'SHARPNESS', 'IN', 'HER', 'EYE', 'TURNED', 'TO', 'A', 'WISTFUL', 'LONGING', 'AS', 'SHE', 'MET', "MARGARET'S", 'SOFT', 'AND', 'FRIENDLY', 'GAZE'] +1688-142285-0051-1999: hyp=['THE', 'SHARPNESS', 'IN', 'HER', 'EYE', 'TURNED', 'TO', 'A', 'WISTFUL', 'LONGING', 'AS', 'SHE', 'MET', 'MARGARET', 'SOFT', 'AND', 'FRIENDLY', 'GAZE'] +1688-142285-0052-2000: ref=['AS', 'THEY', 'TURNED', 'UP', 'INTO', 'A', 'SMALL', 'COURT', 'OPENING', 'OUT', 'OF', 'A', 'SQUALID', 'STREET', 'BESSY', 'SAID'] +1688-142285-0052-2000: hyp=['AS', 'THEY', 'TURNED', 'UP', 'INTO', 'A', 'SMALL', 'COURT', 'OPENING', 'OUT', 'INTO', 'A', 'SQUALID', 'STREET', 'BESSY', 'SAID'] +1688-142285-0053-2001: ref=["YO'LL", 'NOT', 'BE', 'DAUNTED', 'IF', "FATHER'S", 'AT', 'HOME', 'AND', 'SPEAKS', 'A', 'BIT', 'GRUFFISH', 'AT', 'FIRST'] +1688-142285-0053-2001: hyp=['YOU', 'WILL', 'NOT', 'BE', 'DAUNTED', 'IF', "FATHER'S", 'AT', 'HOME', 'AND', 'SPEAKS', 'A', 'BIT', 'GRUFFISH', 'AT', 'FIRST'] +1688-142285-0054-2002: ref=['BUT', 'NICHOLAS', 'WAS', 'NOT', 'AT', 'HOME', 'WHEN', 'THEY', 'ENTERED'] +1688-142285-0054-2002: hyp=['BUT', 'NICHOLAS', 'WAS', 'NOT', 'AT', 'HOME', 'WHEN', 'THEY', 'ENTERED'] +1688-142285-0055-2003: ref=['GASPED', 'BESSY', 'AT', 'LAST'] +1688-142285-0055-2003: hyp=['GASPED', 'BESSY', 'AT', 'LAST'] +1688-142285-0056-2004: ref=['BESSY', 'TOOK', 'A', 'LONG', 'AND', 'FEVERISH', 'DRAUGHT', 'AND', 'THEN', 'FELL', 'BACK', 'AND', 'SHUT', 'HER', 'EYES'] +1688-142285-0056-2004: hyp=['BESSY', 'TOOK', 'A', 'LONG', 'AND', 'FEVERISH', 'DRAUGHT', 'AND', 'THEN', 'FELL', 'BACK', 'AND', 'SHUT', 'HER', 'EYES'] +1688-142285-0057-2005: ref=['MARGARET', 'BENT', 'OVER', 'AND', 'SAID', 'BESSY', "DON'T", 'BE', 'IMPATIENT', 'WITH', 'YOUR', 'LIFE', 'WHATEVER', 'IT', 'IS', 'OR', 'MAY', 'HAVE', 'BEEN'] +1688-142285-0057-2005: hyp=['MARGARET', 'BENT', 'OVER', 'AND', 'SAID', 'BESSY', "DON'T", 'BE', 'IMPATIENT', 'WITH', 'YOUR', 'LIFE', 'WHATEVER', 'IT', 'IS', 'OR', 'MAY', 'HAVE', 'BEEN'] +1688-142285-0058-2006: ref=['REMEMBER', 'WHO', 'GAVE', 'IT', 'YOU', 'AND', 'MADE', 'IT', 'WHAT', 'IT', 'IS'] +1688-142285-0058-2006: hyp=['REMEMBER', 'WHO', 'GAVE', 'IT', 'TO', 'YOU', 'AND', 'MADE', 'IT', 'WHAT', 'IT', 'IS'] +1688-142285-0059-2007: ref=['NOW', "I'LL", 'NOT', 'HAVE', 'MY', 'WENCH', 'PREACHED', 'TO'] +1688-142285-0059-2007: hyp=['NOW', "I'LL", 'NOT', 'HAVE', 'MY', 'WENCH', 'PREACH', 'TO'] +1688-142285-0060-2008: ref=['BUT', 'SURELY', 'SAID', 'MARGARET', 'FACING', 'ROUND', 'YOU', 'BELIEVE', 'IN', 'WHAT', 'I', 'SAID', 'THAT', 'GOD', 'GAVE', 'HER', 'LIFE', 'AND', 'ORDERED', 'WHAT', 'KIND', 'OF', 'LIFE', 'IT', 'WAS', 'TO', 'BE'] +1688-142285-0060-2008: hyp=['BUT', 'SURELY', 'SAID', 'MARGARET', 'FACING', 'ROUND', 'YOU', 'BELIEVE', 'IN', 'WHAT', 'I', 'SAID', 'THAT', 'GOD', 'GAVE', 'HER', 'LIFE', 'AND', 'ORDERED', 'WHAT', 'KIND', 'OF', 'LIFE', 'IT', 'WAS', 'TO', 'BE'] +1688-142285-0061-2009: ref=['I', 'BELIEVE', 'WHAT', 'I', 'SEE', 'AND', 'NO', 'MORE'] +1688-142285-0061-2009: hyp=['I', 'BELIEVE', 'WHAT', 'I', 'SEE', 'AND', 'NO', 'MORE'] +1688-142285-0062-2010: ref=["THAT'S", 'WHAT', 'I', 'BELIEVE', 'YOUNG', 'WOMAN'] +1688-142285-0062-2010: hyp=["THAT'S", 'WHAT', 'I', 'BELIEVE', 'YOUNG', 'WOMAN'] +1688-142285-0063-2011: ref=['I', "DON'T", 'BELIEVE', 'ALL', 'I', 'HEAR', 'NO', 'NOT', 'BY', 'A', 'BIG', 'DEAL'] +1688-142285-0063-2011: hyp=['I', "DON'T", 'BELIEVE', 'ALL', 'I', 'HEAR', 'NO', 'NOT', 'BY', 'A', 'BIG', 'DEAL'] +1688-142285-0064-2012: ref=['BUT', "HOO'S", 'COME', 'AT', 'LAST', 'AND', "HOO'S", 'WELCOME', 'AS', 'LONG', 'AS', "HOO'LL", 'KEEP', 'FROM', 'PREACHING', 'ON', 'WHAT', 'HOO', 'KNOWS', 'NOUGHT', 'ABOUT'] +1688-142285-0064-2012: hyp=['BUT', 'WHOSE', 'COME', 'AT', 'LAST', 'AND', "WHO'S", 'WELCOME', 'AS', 'LONG', 'AS', "HE'LL", 'KEEP', 'FROM', 'PREACHING', 'ON', 'WHAT', 'WHO', 'KNOWS', 'NOT', 'ABOUT'] +1688-142285-0065-2013: ref=["IT'S", 'SIMPLE', 'AND', 'NOT', 'FAR', 'TO', 'FETCH', 'NOR', 'HARD', 'TO', 'WORK'] +1688-142285-0065-2013: hyp=["IT'S", 'SIMPLE', 'AND', 'NOT', 'FAR', 'TO', 'FETCH', 'NOR', 'HARD', 'TO', 'WORK'] +1688-142285-0066-2014: ref=['BUT', 'THE', 'GIRL', 'ONLY', 'PLEADED', 'THE', 'MORE', 'WITH', 'MARGARET'] +1688-142285-0066-2014: hyp=['BUT', 'THE', 'GIRL', 'ONLY', 'PLEADED', 'THE', 'MORE', 'WITH', 'MARGARET'] +1688-142285-0067-2015: ref=["DON'T", 'THINK', 'HARDLY', 'ON', 'HIM', "HE'S", 'A', 'GOOD', 'MAN', 'HE', 'IS'] +1688-142285-0067-2015: hyp=["DON'T", 'THINK', 'HARDLY', 'ON', 'HIM', "HE'S", 'A', 'GOOD', 'MAN', 'HE', 'IS'] +1688-142285-0068-2016: ref=['I', 'SOMETIMES', 'THINK', 'I', 'SHALL', 'BE', 'MOPED', 'WI', 'SORROW', 'EVEN', 'IN', 'THE', 'CITY', 'OF', 'GOD', 'IF', 'FATHER', 'IS', 'NOT', 'THERE'] +1688-142285-0068-2016: hyp=['I', 'SOMETIMES', 'THINK', 'I', 'SHALL', 'BE', 'MILKED', 'WITH', 'SORROW', 'EVEN', 'IN', 'THE', 'CITY', 'OF', 'GOD', 'IF', 'EITHER', 'IS', 'NOT', 'THERE'] +1688-142285-0069-2017: ref=['THE', 'FEVERISH', 'COLOUR', 'CAME', 'INTO', 'HER', 'CHEEK', 'AND', 'THE', 'FEVERISH', 'FLAME', 'INTO', 'HER', 'EYE'] +1688-142285-0069-2017: hyp=['THE', 'FEVERISH', 'COLOUR', 'CAME', 'INTO', 'A', 'CHEEKS', 'AND', 'THE', 'FEVERISH', 'FLAME', 'INTO', 'HER', 'EYE'] +1688-142285-0070-2018: ref=['BUT', 'YOU', 'WILL', 'BE', 'THERE', 'FATHER', 'YOU', 'SHALL', 'OH', 'MY', 'HEART'] +1688-142285-0070-2018: hyp=['BUT', "YOU'LL", 'BE', 'THEIR', 'FATHER', 'YOU', 'SHALL', 'OH', 'MY', 'HEART'] +1688-142285-0071-2019: ref=['SHE', 'PUT', 'HER', 'HAND', 'TO', 'IT', 'AND', 'BECAME', 'GHASTLY', 'PALE'] +1688-142285-0071-2019: hyp=['SHE', 'PUT', 'HER', 'HAND', 'TO', 'IT', 'AND', 'BECAME', 'GHASTLY', 'PALE'] +1688-142285-0072-2020: ref=['MARGARET', 'HELD', 'HER', 'IN', 'HER', 'ARMS', 'AND', 'PUT', 'THE', 'WEARY', 'HEAD', 'TO', 'REST', 'UPON', 'HER', 'BOSOM'] +1688-142285-0072-2020: hyp=['MARGARET', 'HELD', 'HER', 'IN', 'HER', 'ARMS', 'AND', 'PUT', 'THE', 'WEARY', 'HEAD', 'TO', 'REST', 'UPON', 'HER', 'BOSOM'] +1688-142285-0073-2021: ref=['PRESENTLY', 'THE', 'SPASM', 'THAT', 'FORESHADOWED', 'DEATH', 'HAD', 'PASSED', 'AWAY', 'AND', 'BESSY', 'ROUSED', 'HERSELF', 'AND', 'SAID'] +1688-142285-0073-2021: hyp=['PRESENTLY', 'THE', 'SPASM', 'THAT', 'FORESHADOWED', 'DEATH', 'HAD', 'PASSED', 'AWAY', 'AND', 'BUSY', 'ROUSED', 'HERSELF', 'AND', 'SAID'] +1688-142285-0074-2022: ref=["I'LL", 'GO', 'TO', 'BED', "IT'S", 'BEST', 'PLACE', 'BUT', 'CATCHING', 'AT', "MARGARET'S", 'GOWN', "YO'LL", 'COME', 'AGAIN', 'I', 'KNOW', 'YO', 'WILL', 'BUT', 'JUST', 'SAY', 'IT'] +1688-142285-0074-2022: hyp=["I'LL", 'GO', 'TO', 'BED', "IT'S", 'BEST', 'PLACE', 'BUT', 'CATCHING', 'THAT', "MARGARET'S", 'GOWN', "YOU'LL", 'COME', 'AGAIN', 'I', 'KNOW', 'YOU', 'WILL', 'BUT', 'JUST', 'SAY', 'IT'] +1688-142285-0075-2023: ref=['I', 'WILL', 'COME', 'TO', 'MORROW', 'SAID', 'MARGARET'] +1688-142285-0075-2023: hyp=['OH', 'COME', 'TO', 'MORROW', 'SAID', 'MARGARET'] +1688-142285-0076-2024: ref=['MARGARET', 'WENT', 'AWAY', 'VERY', 'SAD', 'AND', 'THOUGHTFUL'] +1688-142285-0076-2024: hyp=['MARGARET', 'WENT', 'AWAY', 'VERY', 'SAD', 'AND', 'THOUGHTFUL'] +1688-142285-0077-2025: ref=['SHE', 'WAS', 'LATE', 'FOR', 'TEA', 'AT', 'HOME'] +1688-142285-0077-2025: hyp=['SHE', 'WAS', 'LATE', 'FOR', 'TEA', 'AT', 'HOME'] +1688-142285-0078-2026: ref=['HAVE', 'YOU', 'MET', 'WITH', 'A', 'SERVANT', 'DEAR'] +1688-142285-0078-2026: hyp=['HAVE', 'YOU', 'MET', 'WITH', 'A', 'SERVANT', 'DEAR'] +1688-142285-0079-2027: ref=['NO', 'MAMMA', 'THAT', 'ANNE', 'BUCKLEY', 'WOULD', 'NEVER', 'HAVE', 'DONE'] +1688-142285-0079-2027: hyp=['NO', 'MAMMA', 'THAT', 'ANNE', 'BUCKLEY', 'WOULD', 'NEVER', 'HAVE', 'DONE'] +1688-142285-0080-2028: ref=['SUPPOSE', 'I', 'TRY', 'SAID', 'MISTER', 'HALE'] +1688-142285-0080-2028: hyp=["S'POSE", 'I', 'TRY', 'SAID', 'MISTER', 'HALE'] +1688-142285-0081-2029: ref=['EVERYBODY', 'ELSE', 'HAS', 'HAD', 'THEIR', 'TURN', 'AT', 'THIS', 'GREAT', 'DIFFICULTY', 'NOW', 'LET', 'ME', 'TRY'] +1688-142285-0081-2029: hyp=['EVERYBODY', 'ELSE', 'HAS', 'HAD', 'THEY', 'TURN', 'UP', 'THIS', 'GREAT', 'DIFFICULTY', 'NOW', 'LET', 'ME', 'TRY'] +1688-142285-0082-2030: ref=['I', 'MAY', 'BE', 'THE', 'CINDERELLA', 'TO', 'PUT', 'ON', 'THE', 'SLIPPER', 'AFTER', 'ALL'] +1688-142285-0082-2030: hyp=['I', 'MAY', 'BE', 'THE', 'CINRILLA', 'TO', 'PUT', 'ON', 'THE', 'SLIPPER', 'AFTER', 'ALL'] +1688-142285-0083-2031: ref=['WHAT', 'WOULD', 'YOU', 'DO', 'PAPA', 'HOW', 'WOULD', 'YOU', 'SET', 'ABOUT', 'IT'] +1688-142285-0083-2031: hyp=['WHAT', 'WOULD', 'YOU', 'DO', 'PAPA', 'HOW', 'WOULD', 'YOU', 'SET', 'ABOUT', 'IT'] +1688-142285-0084-2032: ref=['WHY', 'I', 'WOULD', 'APPLY', 'TO', 'SOME', 'GOOD', 'HOUSE', 'MOTHER', 'TO', 'RECOMMEND', 'ME', 'ONE', 'KNOWN', 'TO', 'HERSELF', 'OR', 'HER', 'SERVANTS'] +1688-142285-0084-2032: hyp=['WHY', 'I', 'WOULD', 'APPLY', 'IT', 'TO', 'SOME', 'GOOD', 'HOUSE', 'MOTHER', 'TO', 'RECOMMEND', 'ME', 'ONE', 'KNOWN', 'TO', 'HERSELF', 'OR', 'HER', 'SERVANTS'] +1688-142285-0085-2033: ref=['VERY', 'GOOD', 'BUT', 'WE', 'MUST', 'FIRST', 'CATCH', 'OUR', 'HOUSE', 'MOTHER'] +1688-142285-0085-2033: hyp=['VERY', 'GOOD', 'BUT', 'WE', 'MUST', 'FIRST', 'CATCH', 'OUR', 'HOUSE', 'MOTHER'] +1688-142285-0086-2034: ref=['THE', 'MOTHER', 'OF', 'WHOM', 'HE', 'SPOKE', 'TO', 'US', 'SAID', 'MARGARET'] +1688-142285-0086-2034: hyp=['THE', 'MOTHER', 'OF', 'WHOM', 'HE', 'SPOKE', 'TO', 'US', 'SAID', 'MARGARET'] +1688-142285-0087-2035: ref=['MISSUS', 'THORNTON', 'THE', 'ONLY', 'MOTHER', 'HE', 'HAS', 'I', 'BELIEVE', 'SAID', 'MISTER', 'HALE', 'QUIETLY'] +1688-142285-0087-2035: hyp=['MISTER', 'THORNTON', 'THE', 'ONLY', 'MOTHER', 'HE', 'HAS', 'I', 'BELIEVE', 'SAID', 'MISTER', 'HALE', 'QUIETLY'] +1688-142285-0088-2036: ref=['I', 'SHALL', 'LIKE', 'TO', 'SEE', 'HER', 'SHE', 'MUST', 'BE', 'AN', 'UNCOMMON', 'PERSON', 'HER', 'MOTHER', 'ADDED'] +1688-142285-0088-2036: hyp=['I', 'SHALL', 'LIKE', 'TO', 'SEE', 'HER', 'SHE', 'MUST', 'BE', 'AN', 'UNCOMMON', 'PERSON', 'HER', 'MOTHER', 'ADDED'] +1688-142285-0089-2037: ref=['PERHAPS', 'SHE', 'MAY', 'HAVE', 'A', 'RELATION', 'WHO', 'MIGHT', 'SUIT', 'US', 'AND', 'BE', 'GLAD', 'OF', 'OUR', 'PLACE'] +1688-142285-0089-2037: hyp=['PERHAPS', 'SHE', 'MAY', 'HAVE', 'A', 'RELATION', 'WHO', 'MIGHT', 'SUIT', 'US', 'AND', 'BE', 'GLAD', 'OF', 'OUR', 'PLACE'] +1688-142285-0090-2038: ref=['SHE', 'SOUNDED', 'TO', 'BE', 'SUCH', 'A', 'CAREFUL', 'ECONOMICAL', 'PERSON', 'THAT', 'I', 'SHOULD', 'LIKE', 'ANY', 'ONE', 'OUT', 'OF', 'THE', 'SAME', 'FAMILY'] +1688-142285-0090-2038: hyp=['SHE', 'SOUNDED', 'TO', 'BE', 'SUCH', 'A', 'CAREFUL', 'ECONOMICAL', 'PERSON', 'THAT', 'I', 'SHOULD', 'LIKE', 'ANY', 'ONE', 'OUT', 'OF', 'THE', 'SAME', 'FAMILY'] +1688-142285-0091-2039: ref=['MY', 'DEAR', 'SAID', 'MISTER', 'HALE', 'ALARMED', 'PRAY', "DON'T", 'GO', 'OFF', 'ON', 'THAT', 'IDEA'] +1688-142285-0091-2039: hyp=['MY', 'DEAR', 'SAID', 'MISTER', 'HALE', 'ALARMED', 'PRAY', "DON'T", 'GO', 'OFF', 'ON', 'THAT', 'IDEA'] +1688-142285-0092-2040: ref=['I', 'AM', 'SURE', 'AT', 'ANY', 'RATE', 'SHE', 'WOULD', 'NOT', 'LIKE', 'STRANGERS', 'TO', 'KNOW', 'ANYTHING', 'ABOUT', 'IT'] +1688-142285-0092-2040: hyp=['I', 'AM', 'SURE', 'AT', 'ANY', 'RATE', 'SHE', 'WOULD', 'NOT', 'LIKE', 'STRANGERS', 'TO', 'KNOW', 'ANYTHING', 'ABOUT', 'IT'] +1688-142285-0093-2041: ref=['TAKE', 'NOTICE', 'THAT', 'IS', 'NOT', 'MY', 'KIND', 'OF', 'HAUGHTINESS', 'PAPA', 'IF', 'I', 'HAVE', 'ANY', 'AT', 'ALL', 'WHICH', 'I', "DON'T", 'AGREE', 'TO', 'THOUGH', "YOU'RE", 'ALWAYS', 'ACCUSING', 'ME', 'OF', 'IT'] +1688-142285-0093-2041: hyp=['TAKE', 'NOTICE', 'THAT', 'THIS', 'IS', 'NOT', 'MY', 'KIND', 'OF', 'FORTNESS', 'PAPA', 'IF', 'I', 'HAVE', 'ANY', 'AT', 'ALL', 'WHICH', 'I', "DON'T", 'AGREE', 'TO', 'THOUGH', 'YOU', 'ALWAYS', 'ACCUSING', 'ME', 'OF', 'IT'] +1688-142285-0094-2042: ref=['I', "DON'T", 'KNOW', 'POSITIVELY', 'THAT', 'IT', 'IS', 'HERS', 'EITHER', 'BUT', 'FROM', 'LITTLE', 'THINGS', 'I', 'HAVE', 'GATHERED', 'FROM', 'HIM', 'I', 'FANCY', 'SO'] +1688-142285-0094-2042: hyp=['I', "DON'T", 'KNOW', 'POSITIVELY', 'THAT', 'IT', 'IS', 'HERS', 'EITHER', 'BUT', 'FROM', 'LITTLE', 'THINGS', 'I', 'HAVE', 'GATHERED', 'FROM', 'HIM', 'I', 'FANCY', 'SO'] +1688-142285-0095-2043: ref=['THEY', 'CARED', 'TOO', 'LITTLE', 'TO', 'ASK', 'IN', 'WHAT', 'MANNER', 'HER', 'SON', 'HAD', 'SPOKEN', 'ABOUT', 'HER'] +1688-142285-0095-2043: hyp=['THEY', 'CARED', 'TOO', 'LITTLE', 'TO', 'ASK', 'IN', 'WHAT', 'MANNER', 'HER', 'SON', 'HAD', 'SPOKEN', 'ABOUT', 'HER'] +1998-15444-0000-2204: ref=['IF', 'CALLED', 'TO', 'A', 'CASE', 'SUPPOSED', 'OR', 'SUSPECTED', 'TO', 'BE', 'ONE', 'OF', 'POISONING', 'THE', 'MEDICAL', 'MAN', 'HAS', 'TWO', 'DUTIES', 'TO', 'PERFORM', 'TO', 'SAVE', 'THE', "PATIENT'S", 'LIFE', 'AND', 'TO', 'PLACE', 'HIMSELF', 'IN', 'A', 'POSITION', 'TO', 'GIVE', 'EVIDENCE', 'IF', 'CALLED', 'ON', 'TO', 'DO', 'SO'] +1998-15444-0000-2204: hyp=['IF', 'CALLED', 'TO', 'A', 'CASE', 'SUPPOSED', 'OF', 'SUSPECTED', 'TO', 'BE', 'ONE', 'OF', 'POISONING', 'THE', 'MEDICAL', 'MAN', 'HAS', 'TWO', 'DUTIES', 'TO', 'PERFORM', 'TO', 'SAVE', 'THE', "PATIENT'S", 'LIFE', 'AND', 'TO', 'PLACE', 'HIMSELF', 'IN', 'A', 'POSITION', 'TO', 'GIVE', 'EVIDENCE', 'OF', 'CALLED', 'UNTO', 'SO'] +1998-15444-0001-2205: ref=['HE', 'SHOULD', 'MAKE', 'INQUIRIES', 'AS', 'TO', 'SYMPTOMS', 'AND', 'TIME', 'AT', 'WHICH', 'FOOD', 'OR', 'MEDICINE', 'WAS', 'LAST', 'TAKEN'] +1998-15444-0001-2205: hyp=['HE', 'SHOULD', 'MAKE', 'INQUIRIES', 'AS', 'TO', 'SYMPTOMS', 'AND', 'TIME', 'AT', 'WHICH', 'FOOD', 'OR', 'MEDICINE', 'WAS', 'LAST', 'TAKEN'] +1998-15444-0002-2206: ref=['HE', 'SHOULD', 'NOTICE', 'THE', 'POSITION', 'AND', 'TEMPERATURE', 'OF', 'THE', 'BODY', 'THE', 'CONDITION', 'OF', 'RIGOR', 'MORTIS', 'MARKS', 'OF', 'VIOLENCE', 'APPEARANCE', 'OF', 'LIPS', 'AND', 'MOUTH'] +1998-15444-0002-2206: hyp=['HE', 'SHOULD', 'NOTICE', 'THE', 'POSITION', 'AND', 'TEMPERATURE', 'OF', 'THE', 'BODY', 'THE', 'CONDITION', 'OF', 'RIGA', 'MORTARS', 'MARKS', 'OF', 'IDENTS', 'APPEARANCE', 'OF', 'LIPS', 'AND', 'MOUTH'] +1998-15444-0003-2207: ref=['IN', 'MAKING', 'A', 'POST', 'MORTEM', 'EXAMINATION', 'THE', 'ALIMENTARY', 'CANAL', 'SHOULD', 'BE', 'REMOVED', 'AND', 'PRESERVED', 'FOR', 'FURTHER', 'INVESTIGATION'] +1998-15444-0003-2207: hyp=['IN', 'MAKING', 'A', 'POST', 'MODER', 'MAXIMMUNITION', 'THE', 'ELEMENTARY', 'CANAL', 'SHOULD', 'BE', 'REMOVED', 'AND', 'PRESERVED', 'FOR', 'FURTHER', 'INVESTIGATION'] +1998-15444-0004-2208: ref=['THE', 'GUT', 'AND', 'THE', 'GULLET', 'BEING', 'CUT', 'ACROSS', 'BETWEEN', 'THESE', 'LIGATURES', 'THE', 'STOMACH', 'MAY', 'BE', 'REMOVED', 'ENTIRE', 'WITHOUT', 'SPILLING', 'ITS', 'CONTENTS'] +1998-15444-0004-2208: hyp=['THE', 'GUT', 'AND', 'THE', 'COLLEGE', 'BEING', 'CUT', 'ACROSS', 'BETWEEN', 'THESE', 'LIGATURES', 'THE', 'STOMACH', 'MAY', 'BE', 'REMOVED', 'AND', 'TIRED', 'WITHOUT', 'SPINNING', 'ITS', 'CONTENTS'] +1998-15444-0005-2209: ref=['IF', 'THE', 'MEDICAL', 'PRACTITIONER', 'IS', 'IN', 'DOUBT', 'ON', 'ANY', 'POINT', 'HE', 'SHOULD', 'OBTAIN', 'TECHNICAL', 'ASSISTANCE', 'FROM', 'SOMEONE', 'WHO', 'HAS', 'PAID', 'ATTENTION', 'TO', 'THE', 'SUBJECT'] +1998-15444-0005-2209: hyp=['IF', 'THE', 'MEDICA', 'PRACTITIONERS', 'ENDOWED', 'ON', 'ANY', 'POINT', 'HE', 'SHOULD', 'OBTAIN', 'TECHNICAL', 'ASSISTANCE', 'FROM', 'SOME', 'ONE', 'WHO', 'HAS', 'PAID', 'ATTENTION', 'TO', 'THE', 'SUBJECT'] +1998-15444-0006-2210: ref=['IN', 'A', 'CASE', 'OF', 'ATTEMPTED', 'SUICIDE', 'BY', 'POISONING', 'IS', 'IT', 'THE', 'DUTY', 'OF', 'THE', 'DOCTOR', 'TO', 'INFORM', 'THE', 'POLICE'] +1998-15444-0006-2210: hyp=['IN', 'A', 'CASE', 'OF', 'ATTEMPTED', 'SUICIDE', 'BY', 'POISONING', 'IS', 'IT', 'THE', 'DUTY', 'OF', 'THE', 'DOCTOR', 'TO', 'INFORM', 'THE', 'POLICE'] +1998-15444-0007-2211: ref=['THE', 'BEST', 'EMETIC', 'IS', 'THAT', 'WHICH', 'IS', 'AT', 'HAND'] +1998-15444-0007-2211: hyp=['THE', 'BEST', 'AMATIC', 'IS', 'THAT', 'WHICH', 'IS', 'AT', 'HAND'] +1998-15444-0008-2212: ref=['THE', 'DOSE', 'FOR', 'AN', 'ADULT', 'IS', 'TEN', 'MINIMS'] +1998-15444-0008-2212: hyp=['THE', 'DOSE', 'FOR', 'NO', 'DOUBT', 'IS', 'TEN', 'MINIMS'] +1998-15444-0009-2213: ref=['APOMORPHINE', 'IS', 'NOT', 'ALLIED', 'IN', 'PHYSIOLOGICAL', 'ACTION', 'TO', 'MORPHINE', 'AND', 'MAY', 'BE', 'GIVEN', 'IN', 'CASES', 'OF', 'NARCOTIC', 'POISONING'] +1998-15444-0009-2213: hyp=['EPIMORPHONE', 'IS', 'NOT', 'ALIT', 'IN', 'PHYSIOLOGICAL', 'ACTION', 'TO', 'MORPHINE', 'AND', 'MAY', 'BE', 'GIVEN', 'IN', 'CASES', 'OF', 'NAUCOTIC', 'POISONING'] +1998-15444-0010-2214: ref=['TICKLING', 'THE', 'FAUCES', 'WITH', 'A', 'FEATHER', 'MAY', 'EXCITE', 'VOMITING'] +1998-15444-0010-2214: hyp=['TICKLING', 'THE', 'FORCES', 'WITH', 'THE', 'FEATHER', 'MAY', 'EXCITE', 'RHOMETTING'] +1998-15444-0011-2215: ref=['IN', 'USING', 'THE', 'ELASTIC', 'STOMACH', 'TUBE', 'SOME', 'FLUID', 'SHOULD', 'BE', 'INTRODUCED', 'INTO', 'THE', 'STOMACH', 'BEFORE', 'ATTEMPTING', 'TO', 'EMPTY', 'IT', 'OR', 'A', 'PORTION', 'OF', 'THE', 'MUCOUS', 'MEMBRANE', 'MAY', 'BE', 'SUCKED', 'INTO', 'THE', 'APERTURE'] +1998-15444-0011-2215: hyp=['IN', 'USING', 'THE', 'ELASTIC', 'STOMACH', 'TUBE', 'SOME', 'FLUID', 'SHOULD', 'BE', 'INTRODUCED', 'INTO', 'THE', 'STOMACH', 'BEFORE', 'ATTEMPTING', 'TO', 'EMPTY', 'IT', 'OR', 'A', 'PORTION', 'OF', 'THE', 'MUCOUS', 'MEMORANE', 'MAY', 'BE', 'SACKED', 'INTO', 'THE', 'APERTURE'] +1998-15444-0012-2216: ref=['THE', 'TUBE', 'SHOULD', 'BE', 'EXAMINED', 'TO', 'SEE', 'THAT', 'IT', 'IS', 'NOT', 'BROKEN', 'OR', 'CRACKED', 'AS', 'ACCIDENTS', 'HAVE', 'HAPPENED', 'FROM', 'NEGLECTING', 'THIS', 'PRECAUTION'] +1998-15444-0012-2216: hyp=['THE', 'TUBE', 'SHOULD', 'BE', 'EXAMINED', 'TO', 'SEE', 'THAT', 'IT', 'IS', 'NOT', 'BROKEN', 'OR', 'CRACKED', 'AS', 'ACCIDENTS', 'HAVE', 'HAPPENED', 'FROM', 'NEGLECTING', 'THIS', 'PRECAUTION'] +1998-15444-0013-2217: ref=['ANTIDOTES', 'ARE', 'USUALLY', 'GIVEN', 'HYPODERMICALLY', 'OR', 'IF', 'BY', 'MOUTH', 'IN', 'THE', 'FORM', 'OF', 'TABLETS'] +1998-15444-0013-2217: hyp=['AND', 'HE', 'DOES', 'A', 'USUALLY', 'GIVEN', 'HYPODERMICALLY', 'OR', 'IF', 'THE', 'MOUTH', 'AND', 'THE', 'FORM', 'OF', 'TABLETS'] +1998-15444-0014-2218: ref=['IN', 'THE', 'ABSENCE', 'OF', 'A', 'HYPODERMIC', 'SYRINGE', 'THE', 'REMEDY', 'MAY', 'BE', 'GIVEN', 'BY', 'THE', 'RECTUM'] +1998-15444-0014-2218: hyp=['IN', 'THE', 'ABSENCE', 'OF', 'THE', 'HYPODERMIC', 'SYRINGE', 'THE', 'REMEDY', 'MAY', 'BE', 'GIVEN', 'BY', 'THE', 'RECTUM'] +1998-15444-0015-2219: ref=['NOTICE', 'THE', 'SMELL', 'COLOUR', 'AND', 'GENERAL', 'APPEARANCE', 'OF', 'THE', 'MATTER', 'SUBMITTED', 'FOR', 'EXAMINATION'] +1998-15444-0015-2219: hyp=['NOTICE', 'THE', 'SMILE', 'COLOUR', 'AND', 'GENERAL', 'APPEARANCE', 'OF', 'THE', 'MATTER', 'SUBMITTED', 'FOR', 'EXAMINATION'] +1998-15444-0016-2220: ref=['FOR', 'THE', 'SEPARATION', 'OF', 'AN', 'ALKALOID', 'THE', 'FOLLOWING', 'IS', 'THE', 'PROCESS', 'OF', 'STAS', 'OTTO'] +1998-15444-0016-2220: hyp=['FOR', 'THE', 'SEPARATION', 'OF', 'AN', 'ALKALOID', 'THE', 'FOLLOWING', 'IS', 'THE', 'PROCESS', 'OF', 'STATU'] +1998-15444-0017-2221: ref=['THIS', 'PROCESS', 'IS', 'BASED', 'UPON', 'THE', 'PRINCIPLE', 'THAT', 'THE', 'SALTS', 'OF', 'THE', 'ALKALOIDS', 'ARE', 'SOLUBLE', 'IN', 'ALCOHOL', 'AND', 'WATER', 'AND', 'INSOLUBLE', 'IN', 'ETHER'] +1998-15444-0017-2221: hyp=['THIS', 'PROCESS', 'IS', 'BASED', 'UPON', 'THE', 'PRINCIPLE', 'THAT', 'THE', 'SOULS', 'OF', 'THE', 'ACOLITES', 'ARE', 'SOLUBLE', 'IN', 'ACCULENT', 'WATER', 'AND', 'INSOLUBLE', 'IN', 'ETHER'] +1998-15444-0018-2222: ref=['THE', 'PURE', 'ALKALOIDS', 'WITH', 'THE', 'EXCEPTION', 'OF', 'MORPHINE', 'IN', 'ITS', 'CRYSTALLINE', 'FORM', 'ARE', 'SOLUBLE', 'IN', 'ETHER'] +1998-15444-0018-2222: hyp=['THE', 'PURE', 'AKALOIDS', 'WERE', 'THE', 'EXCEPTION', 'OF', 'MORPHINE', 'IN', 'ITS', 'CRYSTALLINE', 'FORM', 'A', 'SOLUBLE', 'BENEATH', 'THEM'] +1998-15444-0019-2223: ref=['TWO', 'COOL', 'THE', 'MIXTURE', 'AND', 'FILTER', 'WASH', 'THE', 'RESIDUE', 'WITH', 'STRONG', 'ALCOHOL', 'AND', 'MIX', 'THE', 'FILTRATES'] +1998-15444-0019-2223: hyp=['TWO', 'UR', 'THE', 'MIXTURE', 'AND', 'FILTER', 'WASH', 'THE', 'RESIDUE', 'WITH', 'STRONG', 'ALCOHOL', 'AND', 'MIX', 'THE', 'FIR', 'TRADES'] +1998-15444-0020-2224: ref=['THE', 'RESIDUE', 'MAY', 'BE', 'SET', 'ASIDE', 'FOR', 'THE', 'DETECTION', 'OF', 'THE', 'METALLIC', 'POISONS', 'IF', 'SUSPECTED', 'EXPEL', 'THE', 'ALCOHOL', 'BY', 'CAREFUL', 'EVAPORATION'] +1998-15444-0020-2224: hyp=['THE', 'READY', 'YOU', 'MAY', 'BE', 'SAID', 'ASIDE', 'FOR', 'THE', 'DETECTION', 'OF', 'THE', 'METALLIC', 'POISONS', 'OF', 'SUSPECTED', 'EXPEL', 'THE', 'ACCOHOL', 'BY', 'CAREFUL', 'EVAPORATION'] +1998-15444-0021-2225: ref=['ON', 'THE', 'EVAPORATION', 'OF', 'THE', 'ALCOHOL', 'THE', 'RESINOUS', 'AND', 'FATTY', 'MATTERS', 'SEPARATE'] +1998-15444-0021-2225: hyp=['ON', 'THE', 'EVAPORATION', 'OF', 'THE', 'ALCOHOL', 'THE', 'VEZENOUS', 'AND', 'FATIGMATIS', 'SEPARATE'] +1998-15444-0022-2226: ref=['EVAPORATE', 'THE', 'FILTRATE', 'TO', 'A', 'SYRUP', 'AND', 'EXTRACT', 'WITH', 'SUCCESSIVE', 'PORTIONS', 'OF', 'ABSOLUTE', 'ALCOHOL'] +1998-15444-0022-2226: hyp=['EVAPORATE', 'THE', 'FEDERATE', 'TO', 'A', 'CYRUP', 'AN', 'EXTRACT', 'WITH', 'SUCCESSIVE', 'PORTIONS', 'OF', 'ABSOLUTE', 'ALCOHOL'] +1998-15444-0023-2227: ref=['SEPARATE', 'THE', 'ETHEREAL', 'SOLUTION', 'AND', 'EVAPORATE'] +1998-15444-0023-2227: hyp=['SEPARATE', 'THE', 'ETHEREAL', 'SOLUTION', 'AND', 'EVAPORATE'] +1998-15444-0024-2228: ref=['FIVE', 'A', 'PART', 'OF', 'THIS', 'ETHEREAL', 'SOLUTION', 'IS', 'POURED', 'INTO', 'A', 'WATCH', 'GLASS', 'AND', 'ALLOWED', 'TO', 'EVAPORATE'] +1998-15444-0024-2228: hyp=['FIVE', 'A', 'PART', 'OF', 'THIS', 'ETHEREAL', 'SOLUTION', 'IS', 'PUT', 'INTO', 'A', 'WATCH', 'GLASS', 'AND', 'ALLOW', 'TO', 'EVAPORATE'] +1998-15444-0025-2229: ref=['TO', 'PURIFY', 'IT', 'ADD', 'A', 'SMALL', 'QUANTITY', 'OF', 'DILUTE', 'SULPHURIC', 'ACID', 'AND', 'AFTER', 'EVAPORATING', 'TO', 'THREE', 'QUARTERS', 'OF', 'ITS', 'BULK', 'ADD', 'A', 'SATURATED', 'SOLUTION', 'OF', 'CARBONATE', 'OF', 'POTASH', 'OR', 'SODA'] +1998-15444-0025-2229: hyp=['TO', 'PURIFY', 'IT', 'EDISM', 'A', 'QUANTITY', 'OF', 'DELUDE', 'SUFFER', 'ACID', 'AND', 'AFTER', 'EVAPORATING', 'TO', 'THREE', 'QUARTERS', 'OF', 'ITS', 'BARK', 'ADD', 'A', 'SITUATED', 'SOLUTION', 'OF', 'CARBONATE', 'OF', 'POTASH', 'OR', 'SODA'] +1998-15444-0026-2230: ref=['BOIL', 'THE', 'FINELY', 'DIVIDED', 'SUBSTANCE', 'WITH', 'ABOUT', 'ONE', 'EIGHTH', 'ITS', 'BULK', 'OF', 'PURE', 'HYDROCHLORIC', 'ACID', 'ADD', 'FROM', 'TIME', 'TO', 'TIME', 'POTASSIC', 'CHLORATE', 'UNTIL', 'THE', 'SOLIDS', 'ARE', 'REDUCED', 'TO', 'A', 'STRAW', 'YELLOW', 'FLUID'] +1998-15444-0026-2230: hyp=['BOY', 'THE', 'FINALLY', 'DIVIDEST', 'ABSTANCE', 'WITH', 'ABOUT', 'ONE', 'EIGHTHS', 'ITS', 'BARK', 'OF', 'PURE', 'HYDROCLOIC', 'ACID', 'ADD', 'FROM', 'TIME', 'TO', 'TIME', 'POTASSIC', 'LOW', 'RAGE', 'UNTIL', 'THE', 'SOLIDS', 'ARE', 'REDUCED', 'TO', 'A', 'STRAW', 'YELLOW', 'FLUID'] +1998-15444-0027-2231: ref=['THE', 'RESIDUE', 'OF', 'THE', 'MATERIAL', 'AFTER', 'DIGESTION', 'WITH', 'HYDROCHLORIC', 'ACID', 'AND', 'POTASSIUM', 'CHLORATE', 'MAY', 'HAVE', 'TO', 'BE', 'EXAMINED', 'FOR', 'SILVER', 'LEAD', 'AND', 'BARIUM'] +1998-15444-0027-2231: hyp=['THE', 'RESIDUE', 'OF', 'THE', 'MATERIAL', 'AFTER', 'DIGESTION', 'WAS', 'HYDROGLOIC', 'ACID', 'AND', 'PROTESTING', 'CHLORODE', 'MAY', 'HAVE', 'TO', 'BE', 'EXAMINED', 'FOR', 'SILVER', 'LEAD', 'AND', 'BARIUM'] +1998-29454-0000-2157: ref=['A', 'THOUSAND', 'BLESSINGS', 'FROM', 'A', 'GRATEFUL', 'HEART'] +1998-29454-0000-2157: hyp=['A', 'THOUSAND', 'BLESSINGS', 'FROM', 'A', 'GRATEFUL', 'HEART'] +1998-29454-0001-2158: ref=['PERUSAL', 'SAID', 'THE', 'PAWNBROKER', "THAT'S", 'THE', 'WAY', 'TO', 'PERNOUNCE', 'IT'] +1998-29454-0001-2158: hyp=['PERUSAL', 'SAID', 'THE', 'PAN', 'BROKER', "THAT'S", 'THE', 'WAY', 'TO', 'PRONOUNCE', 'IT'] +1998-29454-0002-2159: ref=['HIS', 'BOOKS', 'TOLD', 'HIM', 'THAT', 'TREASURE', 'IS', 'BEST', 'HIDDEN', 'UNDER', 'LOOSE', 'BOARDS', 'UNLESS', 'OF', 'COURSE', 'YOUR', 'HOUSE', 'HAS', 'A', 'SECRET', 'PANEL', 'WHICH', 'HIS', 'HAD', 'NOT'] +1998-29454-0002-2159: hyp=['HIS', 'BOOKS', 'TOLD', 'HIM', 'THE', 'TREASURES', 'BEST', 'HIDDEN', 'UNDER', 'LOOSE', 'BOARDS', 'AND', 'AS', 'OF', 'COURSE', 'YOUR', 'HOUSE', 'HAD', 'A', 'SECRET', 'PANNER', 'WHICH', 'HIS', 'HAD', 'NOT'] +1998-29454-0003-2160: ref=['HE', 'GOT', 'IT', 'UP', 'AND', 'PUSHED', 'HIS', 'TREASURES', 'AS', 'FAR', 'IN', 'AS', 'HE', 'COULD', 'ALONG', 'THE', 'ROUGH', 'CRUMBLY', 'SURFACE', 'OF', 'THE', 'LATH', 'AND', 'PLASTER'] +1998-29454-0003-2160: hyp=['HE', 'GOT', 'IT', 'UP', 'AND', 'PUSHED', 'HIS', 'TREASURES', 'AS', 'FAR', 'IN', 'AS', 'HE', 'COULD', 'ALONG', 'THE', 'ROUGH', 'CRAMBLY', 'SURFACE', 'OF', 'THE', 'LAST', 'AND', 'PLASTER'] +1998-29454-0004-2161: ref=['WHEN', 'DICKIE', 'CAME', 'DOWN', 'HIS', 'AUNT', 'SLIGHTLY', 'SLAPPED', 'HIM', 'AND', 'HE', 'TOOK', 'THE', 'HALFPENNY', 'AND', 'LIMPED', 'OFF', 'OBEDIENTLY'] +1998-29454-0004-2161: hyp=['WHEN', 'DICKIE', 'CAME', 'DOWN', 'HIS', 'AUNT', 'SAT', 'HE', 'SLEPT', 'HIM', 'AND', 'HE', 'TOOK', 'THE', 'HALFPENNY', 'AND', 'LIMPED', 'OFF', 'OBEDIENTLY'] +1998-29454-0005-2162: ref=['HE', 'HAD', 'NEVER', 'SEEN', 'ONE', 'BEFORE', 'AND', 'IT', 'INTERESTED', 'HIM', 'EXTREMELY'] +1998-29454-0005-2162: hyp=['HE', 'HAD', 'NEVER', 'SEEN', 'ONE', 'BEFORE', 'AND', 'IT', 'INTERESTED', 'HIM', 'EXTREMELY'] +1998-29454-0006-2163: ref=['HE', 'LOOKED', 'ABOUT', 'HIM', 'AND', 'KNEW', 'THAT', 'HE', 'DID', 'NOT', 'AT', 'ALL', 'KNOW', 'WHERE', 'HE', 'WAS'] +1998-29454-0006-2163: hyp=['HE', 'LOOKED', 'ABOUT', 'HIM', 'AND', 'KNEW', 'THAT', 'HE', 'DID', 'NOT', 'AT', 'ALL', 'KNOW', 'WHERE', 'HE', 'WAS'] +1998-29454-0007-2164: ref=["WHAT'S", 'UP', 'MATEY', 'LOST', 'YOUR', 'WAY', 'DICKIE', 'EXPLAINED'] +1998-29454-0007-2164: hyp=["WHAT'S", 'THAT', 'MAY', 'TEA', 'LOST', 'YOUR', 'WAY', 'DICKIE', 'EXPLAINED'] +1998-29454-0008-2165: ref=['WHEN', 'HE', 'SAID', 'AVE', 'I', 'BIN', 'ASLEEP'] +1998-29454-0008-2165: hyp=['WHEN', 'HE', 'SAID', 'HAVE', 'I', 'BEEN', 'ASLEEP'] +1998-29454-0009-2166: ref=['HERE', 'WE', 'ARE', 'SAID', 'THE', 'MAN'] +1998-29454-0009-2166: hyp=['HERE', 'WE', 'ARE', 'SAID', 'THE', 'MAN'] +1998-29454-0010-2167: ref=['NOT', 'EXACKLY', 'SAID', 'THE', 'MAN', 'BUT', "IT'S", 'ALL', 'RIGHT'] +1998-29454-0010-2167: hyp=['NOT', 'EXACTLY', 'SAID', 'THE', 'MAN', 'BUT', "IT'S", 'ALL', 'RIGHT'] +1998-29454-0011-2168: ref=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'MAN', 'ASKED', 'DICKIE', 'IF', 'HE', 'COULD', 'WALK', 'A', 'LITTLE', 'WAY', 'AND', 'WHEN', 'DICKIE', 'SAID', 'HE', 'COULD', 'THEY', 'SET', 'OUT', 'IN', 'THE', 'MOST', 'FRIENDLY', 'WAY', 'SIDE', 'BY', 'SIDE'] +1998-29454-0011-2168: hyp=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'MAN', 'ASKED', 'DICKIE', 'IF', 'HE', 'COULD', 'WALK', 'A', 'LITTLE', 'WAY', 'AND', 'WHEN', 'DICKIE', 'SAID', 'HE', 'COULD', 'THEY', 'SET', 'OUT', 'IN', 'THE', 'MOST', 'FRIENDLY', 'WAY', 'SIDE', 'BY', 'SIDE'] +1998-29454-0012-2169: ref=['AND', 'THE', 'TEA', 'AND', 'ALL', 'AN', 'THE', 'EGG'] +1998-29454-0012-2169: hyp=['AND', 'THE', 'TINEL', 'AND', 'THE', 'EGG'] +1998-29454-0013-2170: ref=['AND', 'THIS', 'IS', 'THE', 'PRETTIEST', 'PLACE', 'EVER', 'I', 'SEE'] +1998-29454-0013-2170: hyp=['AND', 'THIS', 'IS', 'THE', 'PRETTIEST', 'PLACE', 'EVER', 'I', 'SEE'] +1998-29454-0014-2171: ref=['I', 'SHALL', 'CATCH', 'IT', 'A', 'FAIR', 'TREAT', 'AS', 'IT', 'IS'] +1998-29454-0014-2171: hyp=['I', 'SHOULD', 'CATCH', 'IT', 'IF', 'HER', 'TREAT', 'AS', 'IT', 'IS'] +1998-29454-0015-2172: ref=['SHE', 'WAS', 'WAITIN', 'FOR', 'THE', 'WOOD', 'TO', 'BOIL', 'THE', 'KETTLE', 'WHEN', 'I', 'COME', 'OUT', 'MOTHER'] +1998-29454-0015-2172: hyp=['SHE', 'WAS', 'WAITING', 'FOR', 'THE', 'WOOD', 'TO', 'BOIL', 'THE', 'CATTLE', 'WHEN', 'TO', 'COME', 'OUT', 'MOTHER'] +1998-29454-0016-2173: ref=["AIN'T", 'BAD', 'WHEN', "SHE'S", 'IN', 'A', 'GOOD', 'TEMPER'] +1998-29454-0016-2173: hyp=['AND', 'BAD', 'WHEN', "SHE'S", 'IN', 'A', 'GOOD', 'TEMPER'] +1998-29454-0017-2174: ref=['THAT', "AIN'T", 'WHAT', "SHE'LL", 'BE', 'IN', 'WHEN', 'YOU', 'GETS', 'BACK'] +1998-29454-0017-2174: hyp=['THAT', 'ANNE', 'BUT', 'HER', 'BEING', 'WHEN', 'YOU', 'GETS', 'BACK'] +1998-29454-0018-2175: ref=['I', 'GOT', 'TO', 'STICK', 'IT', 'SAID', 'DICKIE', 'SADLY', "I'D", 'BEST', 'BE', 'GETTING', 'HOME'] +1998-29454-0018-2175: hyp=['I', 'GOT', 'A', 'STICK', 'IT', 'SAID', 'DICKY', 'SADLY', "I'D", 'BEST', 'BE', 'GETTING', 'HOME'] +1998-29454-0019-2176: ref=['I', "WOULDN'T", 'GO', 'OME', 'NOT', 'IF', 'I', 'WAS', 'YOU', 'SAID', 'THE', 'MAN'] +1998-29454-0019-2176: hyp=['I', "WOULDN'T", 'GO', 'HOME', 'NOT', 'A', 'FAIR', 'US', 'YOU', 'SAID', 'THE', 'MAN'] +1998-29454-0020-2177: ref=['NO', 'SAID', 'DICKIE', 'OH', 'NO', 'NO', 'I', 'NEVER'] +1998-29454-0020-2177: hyp=['NO', 'SAID', 'DICKIE', 'OH', 'NO', 'NO', 'I', 'NEVER'] +1998-29454-0021-2178: ref=['I', "AIN'T", 'IT', 'YER', 'HAVE', 'I', 'LIKE', 'WHAT', 'YER', 'AUNT', 'DO'] +1998-29454-0021-2178: hyp=['I', 'ENTER', 'HAVE', 'I', 'LIKE', 'WHAT', 'YOU', "AREN'TO"] +1998-29454-0022-2179: ref=['WELL', "THAT'LL", 'SHOW', 'YOU', 'THE', 'SORT', 'OF', 'MAN', 'I', 'AM'] +1998-29454-0022-2179: hyp=['WELL', 'THAT', 'SHOW', 'YOU', 'A', 'SORT', 'OF', 'MEN', 'I', 'AM'] +1998-29454-0023-2180: ref=['THE', "MAN'S", 'MANNER', 'WAS', 'SO', 'KIND', 'AND', 'HEARTY', 'THE', 'WHOLE', 'ADVENTURE', 'WAS', 'SO', 'WONDERFUL', 'AND', 'NEW', 'IS', 'IT', 'COUNTRY', 'WHERE', 'YOU', 'GOING'] +1998-29454-0023-2180: hyp=['THE', "MAN'S", 'MANNER', 'WAS', 'SO', 'KIND', 'AND', 'HEARTY', 'THE', 'WHOLE', 'ADVENTUR', 'WAS', 'SO', 'WONDERFUL', 'AND', 'NEW', 'IS', 'IT', 'COUNTRY', 'WHERE', 'YOU', 'GOING'] +1998-29454-0024-2181: ref=['THE', 'SUN', 'SHOT', 'LONG', 'GOLDEN', 'BEAMS', 'THROUGH', 'THE', 'GAPS', 'IN', 'THE', 'HEDGE'] +1998-29454-0024-2181: hyp=['THE', 'SUN', 'HAD', 'LONG', 'GOLDEN', 'BEAMS', 'THROUGH', 'THE', 'GAPS', 'AND', 'THE', 'HEDGE'] +1998-29454-0025-2182: ref=['A', 'BIRD', 'PAUSED', 'IN', 'ITS', 'FLIGHT', 'ON', 'A', 'BRANCH', 'QUITE', 'CLOSE', 'AND', 'CLUNG', 'THERE', 'SWAYING'] +1998-29454-0025-2182: hyp=['A', 'BIRD', 'PASSED', 'IN', 'ITS', 'FLIGHT', 'ON', 'BRANCH', 'QUITE', 'CLOSE', 'AND', 'CLUNG', 'THEIR', 'SWAIN'] +1998-29454-0026-2183: ref=['HE', 'TOOK', 'OUT', 'OF', 'HIS', 'POCKET', 'A', 'NEW', 'ENVELOPE', 'A', 'NEW', 'SHEET', 'OF', 'PAPER', 'AND', 'A', 'NEW', 'PENCIL', 'READY', 'SHARPENED', 'BY', 'MACHINERY'] +1998-29454-0026-2183: hyp=['HE', 'TOOK', 'OUT', 'OF', 'HIS', 'POCKET', 'AND', 'YOUR', 'ENVELOPE', 'AND', 'YOU', 'SEED', 'OF', 'PAPER', 'AND', 'A', 'NEW', 'PENCIL', 'READY', 'SHARPENED', 'BY', 'MACHINERY'] +1998-29454-0027-2184: ref=['AN', 'I', 'ASKS', 'YOU', 'LET', 'ME', 'COME', 'ALONGER', 'YOU', 'GOT', 'THAT'] +1998-29454-0027-2184: hyp=['AND', 'I', 'ASK', 'YOU', 'LET', 'ME', 'COME', 'ALONG', 'OF', 'YOU', 'GOT', 'THAT'] +1998-29454-0028-2185: ref=['GET', 'IT', 'WROTE', 'DOWN', 'THEN', 'DONE'] +1998-29454-0028-2185: hyp=['GENISH', 'WROTE', 'DOWN', 'THEN', 'DONE'] +1998-29454-0029-2186: ref=['THEN', 'HE', 'FOLDED', 'IT', 'AND', 'PUT', 'IT', 'IN', 'HIS', 'POCKET'] +1998-29454-0029-2186: hyp=['THEN', 'HE', 'FOLDED', 'IT', 'AND', 'PUT', 'IT', 'IN', 'HIS', 'POCKET'] +1998-29454-0030-2187: ref=['NOW', "WE'RE", 'SQUARE', 'HE', 'SAID'] +1998-29454-0030-2187: hyp=['NOW', 'HE', 'IS', 'QUEER', 'HE', 'SAID'] +1998-29454-0031-2188: ref=['THEY', 'COULD', 'PUT', 'A', 'MAN', 'AWAY', 'FOR', 'LESS', 'THAN', 'THAT'] +1998-29454-0031-2188: hyp=['THEY', 'COULD', 'PUT', 'A', 'MEN', 'AWAY', 'FOR', 'LESS', 'THAN', 'THAT'] +1998-29454-0032-2189: ref=['I', 'SEE', 'THAT', 'THERE', 'IN', 'A', 'BOOK', 'SAID', 'DICKIE', 'CHARMED'] +1998-29454-0032-2189: hyp=['I', 'SEE', 'THAT', 'THEN', 'A', 'BOOK', 'SAID', 'DICK', 'HAD', 'CHARMED'] +1998-29454-0033-2190: ref=['HE', 'REWARD', 'THE', 'WAKE', 'THE', 'LAST', 'OF', 'THE', 'ENGLISH', 'AND', 'I', 'WUNNERED', 'WHAT', 'IT', 'STOOD', 'FOR'] +1998-29454-0033-2190: hyp=['HE', 'REWARD', 'THE', 'WAKE', 'THE', 'LAST', 'OF', 'THE', 'ENGLISH', 'AND', 'I', 'WANTED', 'WHAT', 'IT', 'STOOD', 'FOR'] +1998-29454-0034-2191: ref=['WILD', 'ONES', "AIN'T", 'ALF', 'THE', 'SIZE', 'I', 'LAY'] +1998-29454-0034-2191: hyp=['WILD', 'ONES', 'AND', 'A', 'HALF', 'SIGHS', 'I', 'LAY'] +1998-29454-0035-2192: ref=['ADVENTURES', 'I', 'SHOULD', 'THINK', 'SO'] +1998-29454-0035-2192: hyp=['ADVENTURES', 'I', 'SHOULD', 'THINK', 'SO'] +1998-29454-0036-2193: ref=['AH', 'SAID', 'DICKIE', 'AND', 'A', 'FULL', 'SILENCE', 'FELL', 'BETWEEN', 'THEM'] +1998-29454-0036-2193: hyp=['AH', 'SAID', 'DICKY', 'AND', 'A', 'FOOT', 'SILENCE', 'FELL', 'BETWEEN', 'THEM'] +1998-29454-0037-2194: ref=['THAT', 'WAS', 'CHARMING', 'BUT', 'IT', 'WAS', 'PLEASANT', 'TOO', 'TO', 'WASH', 'THE', 'MUD', 'OFF', 'ON', 'THE', 'WET', 'GRASS'] +1998-29454-0037-2194: hyp=['THAT', 'WAS', 'CHARMING', 'BUT', 'IT', 'WAS', 'PLEASANT', 'TOO', 'TO', 'WASH', 'THE', 'MATVE', 'ON', 'THE', 'WET', 'GRASS'] +1998-29454-0038-2195: ref=['DICKIE', 'ALWAYS', 'REMEMBERED', 'THAT', 'MOMENT'] +1998-29454-0038-2195: hyp=['DICKY', 'ALWAYS', 'REMEMBERED', 'THAT', 'MOMENT'] +1998-29454-0039-2196: ref=['SO', 'YOU', 'SHALL', 'SAID', 'MISTER', 'BEALE', 'A', "REG'LER", 'WASH', 'ALL', 'OVER', 'THIS', 'VERY', 'NIGHT', 'I', 'ALWAYS', 'LIKE', 'A', 'WASH', 'MESELF'] +1998-29454-0039-2196: hyp=['SO', 'YOU', 'SHALL', 'SAID', 'MISTER', 'BEALE', 'A', "REG'LAR", 'WASH', 'ALL', 'OVER', 'THIS', 'VERY', 'NIGHT', 'I', 'ALWAYS', 'LIKE', 'A', 'WASH', 'MYSELF'] +1998-29454-0040-2197: ref=['SOME', 'BLOKES', 'THINK', 'IT', 'PAYS', 'TO', 'BE', 'DIRTY', 'BUT', 'IT', "DON'T"] +1998-29454-0040-2197: hyp=['SOME', 'LOOSE', 'THINK', 'IT', 'PAYS', 'TO', 'BE', 'DIRTY', 'BUT', 'IT', "DON'T"] +1998-29454-0041-2198: ref=['IF', "YOU'RE", 'CLEAN', 'THEY', 'SAY', 'HONEST', 'POVERTY', 'AN', 'IF', "YOU'RE", 'DIRTY', 'THEY', 'SAY', 'SERVE', 'YOU', 'RIGHT'] +1998-29454-0041-2198: hyp=['IF', 'YOU', 'CLEAN', 'THEY', 'SAY', 'HONEST', 'POVERTY', 'AND', 'IF', "YOU'RE", 'DIRTY', 'THEY', 'SAY', 'SERVE', 'YOU', 'RIGHT'] +1998-29454-0042-2199: ref=['YOU', 'ARE', 'GOOD', 'SAID', 'DICKIE', 'I', 'DO', 'LIKE', 'YOU'] +1998-29454-0042-2199: hyp=['YOU', 'ARE', 'GOOD', 'SAID', 'DICKIE', 'I', 'DO', 'LIKE', 'YOU'] +1998-29454-0043-2200: ref=['I', 'KNOW', 'YOU', 'WILL', 'SAID', 'DICKIE', 'WITH', 'ENTHUSIASM', 'I', 'KNOW', 'OW', 'GOOD', 'YOU', 'ARE'] +1998-29454-0043-2200: hyp=['I', 'KNOW', 'YOU', 'WILL', 'SAID', 'DICKIE', 'WITH', 'ENTHUSIASM', 'I', 'KNOW', 'HOW', 'GOOD', 'YOU', 'ARE'] +1998-29454-0044-2201: ref=['BLESS', 'ME', 'SAID', 'MISTER', 'BEALE', 'UNCOMFORTABLY', 'WELL', 'THERE'] +1998-29454-0044-2201: hyp=['BLESS', 'ME', 'SAID', 'MISTER', 'BEALE', 'UNCOMFORTABLY', 'WELL', 'THEN'] +1998-29454-0045-2202: ref=['STEP', 'OUT', 'SONNY', 'OR', "WE'LL", 'NEVER', 'GET', 'THERE', 'THIS', 'SIDE', 'CHRISTMAS'] +1998-29454-0045-2202: hyp=['SABATANI', 'OR', "WE'LL", 'NEVER', 'GET', 'THERE', 'THIS', 'OUT', 'OF', 'CHRISTMAS'] +1998-29454-0046-2203: ref=['WELL', "YOU'LL", 'KNOW', 'ALL', 'ABOUT', 'IT', 'PRESENTLY'] +1998-29454-0046-2203: hyp=['WELL', 'YOU', 'KNOW', 'ALL', 'ABOUT', 'IT', 'PRESENTLY'] +1998-29455-0000-2232: ref=['THE', 'SINGING', 'AND', 'LAUGHING', 'WENT', 'ON', 'LONG', 'AFTER', 'HE', 'HAD', 'FALLEN', 'ASLEEP', 'AND', 'IF', 'LATER', 'IN', 'THE', 'EVENING', 'THERE', 'WERE', 'LOUD', 'VOICED', 'ARGUMENTS', 'OR', 'QUARRELS', 'EVEN', 'DICKIE', 'DID', 'NOT', 'HEAR', 'THEM'] +1998-29455-0000-2232: hyp=['THE', 'SINGING', 'AND', 'LAUGHING', 'WENT', 'ON', 'LONG', 'AFTER', 'HE', 'HAD', 'FALLEN', 'ASLEEP', 'AND', 'IF', 'LATE', 'IN', 'THE', 'EVENING', 'THEY', 'WERE', 'LOUD', 'VOICE', 'ARGUMENTS', 'OR', 'QUARRELS', 'EVEN', 'DICKY', 'DID', 'NOT', 'HEAR', 'THEM'] +1998-29455-0001-2233: ref=["WHAT'S", 'ALL', 'THAT', 'THERE', 'DICKIE', 'ASKED', 'POINTING', 'TO', 'THE', 'ODD', 'KNOBBLY', 'BUNDLES', 'OF', 'ALL', 'SORTS', 'AND', 'SHAPES', 'TIED', 'ON', 'TO', 'THE', "PERAMBULATOR'S", 'FRONT'] +1998-29455-0001-2233: hyp=["WHAT'S", 'ON', 'THAT', 'THERE', 'DICKIE', 'ASKED', 'POINTING', 'TO', 'THE', 'OTT', 'KNOBLY', 'BUNDLES', 'OF', 'ALL', 'SORTS', 'AND', 'SHAPES', 'TIED', 'ON', 'TO', 'THE', "PERAMBULATOR'S", 'FRONT'] +1998-29455-0002-2234: ref=['TELL', 'YER', 'WHAT', 'MATE', 'LOOKS', 'TO', 'ME', 'AS', 'IF', "I'D", 'TOOK', 'A', 'FANCY', 'TO', 'YOU'] +1998-29455-0002-2234: hyp=['TELL', 'YOU', 'WHAT', 'MATE', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'I', 'TOOK', 'A', 'FANCY', 'TO', 'YOU'] +1998-29455-0003-2235: ref=['SWELP', 'ME', 'HE', 'SAID', 'HELPLESSLY'] +1998-29455-0003-2235: hyp=['SWAP', 'ME', 'HE', 'SAID', 'HELPLESSLY'] +1998-29455-0004-2236: ref=['OH', 'LOOK', 'SAID', 'DICKIE', 'THE', 'FLOWERS'] +1998-29455-0004-2236: hyp=['O', 'LOOK', 'SAID', 'DICKY', 'THE', 'FLOWERS'] +1998-29455-0005-2237: ref=["THEY'RE", 'ONLY', 'WEEDS', 'SAID', 'BEALE'] +1998-29455-0005-2237: hyp=['THEY', 'ARE', 'ONLY', 'REEDS', 'SAID', 'BEALE'] +1998-29455-0006-2238: ref=['BUT', 'I', 'SHALL', 'HAVE', 'THEM', 'WHILE', "THEY'RE", 'ALIVE', 'SAID', 'DICKIE', 'AS', 'HE', 'HAD', 'SAID', 'TO', 'THE', 'PAWNBROKER', 'ABOUT', 'THE', 'MOONFLOWERS'] +1998-29455-0006-2238: hyp=['BUT', 'I', 'SHALL', 'HAVE', 'THEM', 'WHERE', 'THEY', 'ARE', 'ALIVE', 'SAID', 'DICKY', 'AS', 'HE', 'HAD', 'SAID', 'TO', 'THE', 'PAWNBROKER', 'BY', 'THE', 'MOONFLOWERS'] +1998-29455-0007-2239: ref=['HI', 'THERE', 'GOES', 'A', 'RABBIT'] +1998-29455-0007-2239: hyp=['AY', 'THERE', 'GOES', 'A', 'RABBIT'] +1998-29455-0008-2240: ref=['SEE', 'IM', 'CROST', 'THE', 'ROAD', 'THERE', 'SEE', 'HIM'] +1998-29455-0008-2240: hyp=['SEEM', 'QUEST', 'ABOUT', 'THERE', 'SEE', 'EM'] +1998-29455-0009-2241: ref=['HOW', 'BEAUTIFUL', 'SAID', 'DICKIE', 'WRIGGLING', 'WITH', 'DELIGHT'] +1998-29455-0009-2241: hyp=['HOW', 'BEAUTIFUL', 'SAID', 'DICKY', 'WRIGGLING', 'WIS', 'DELIGHT'] +1998-29455-0010-2242: ref=['THIS', 'LIFE', 'OF', 'THE', 'RABBIT', 'AS', 'DESCRIBED', 'BY', 'MISTER', 'BEALE', 'WAS', 'THE', "CHILD'S", 'FIRST', 'GLIMPSE', 'OF', 'FREEDOM', "I'D", 'LIKE', 'TO', 'BE', 'A', 'RABBIT'] +1998-29455-0010-2242: hyp=['THIS', 'LIFE', 'OF', 'THE', 'RABBIT', 'AS', 'DESCRIBED', 'BY', 'MISTER', 'BEALE', 'WAS', 'THE', "CHILD'S", 'FIRST', 'GLIMPSE', 'OF', 'FREEDOM', "I'D", 'LIKE', 'TO', 'BE', 'A', 'RABBIT'] +1998-29455-0011-2243: ref=["OW'M", 'I', 'TO', 'WHEEL', 'THE', 'BLOOMIN', 'PRAM', 'IF', 'YOU', 'GOES', 'ON', 'LIKE', 'AS', 'IF', 'YOU', 'WAS', 'A', 'BAG', 'OF', 'EELS'] +1998-29455-0011-2243: hyp=['ALL', 'MY', 'TOWER', 'THE', 'ROOM', 'AND', 'PRAM', 'IF', 'YOU', 'GO', 'SON', 'LIKE', 'US', 'IF', 'YOU', 'WAS', 'A', 'BICK', 'OF', 'FIELDS'] +1998-29455-0012-2244: ref=['I', 'LIKE', 'YOU', 'NEXTER', 'MY', 'OWN', 'DADDY', 'AND', 'MISTER', 'BAXTER', 'NEXT', 'DOOR'] +1998-29455-0012-2244: hyp=['I', 'LIKE', 'YOU', 'NEXT', 'TO', 'MY', 'OWN', 'DIRTY', 'AND', 'MISTER', 'BEXT', 'THE', 'NEXT', 'DOOR'] +1998-29455-0013-2245: ref=["THAT'S", 'ALL', 'RIGHT', 'SAID', 'MISTER', 'BEALE', 'AWKWARDLY'] +1998-29455-0013-2245: hyp=["THAT'S", 'ALL', 'RIGHT', 'SAID', 'MISTER', 'BEALE', 'AWKWARDLY'] +1998-29455-0014-2246: ref=['DICKIE', 'QUICK', 'TO', 'IMITATE', 'TOUCHED', 'HIS'] +1998-29455-0014-2246: hyp=['DICKIE', 'QUICKLY', 'IMITATE', 'TOUCHED', 'HIS'] +1998-29455-0015-2247: ref=['POOR', 'LITTLE', 'MAN', 'SAID', 'THE', 'LADY', 'YOU', 'MISS', 'YOUR', 'MOTHER', "DON'T", 'YOU'] +1998-29455-0015-2247: hyp=['POOR', 'LITTLE', 'MAN', 'SAID', 'THE', 'LADY', 'YOU', 'MISS', 'YOUR', 'MOTHER', "DON'T", 'YOU'] +1998-29455-0016-2248: ref=['OH', 'WELL', 'DONE', 'LITTLE', 'UN', 'SAID', 'MISTER', 'BEALE', 'TO', 'HIMSELF'] +1998-29455-0016-2248: hyp=['OH', 'WELL', 'DONE', 'LITTLE', 'ONE', 'SAID', 'MISTER', 'BEE', 'TO', 'HIMSELF'] +1998-29455-0017-2249: ref=['THE', 'TWO', 'TRAVELLERS', 'WERE', 'LEFT', 'FACING', 'EACH', 'OTHER', 'THE', 'RICHER', 'BY', 'A', 'PENNY', 'AND', 'OH', 'WONDERFUL', 'GOOD', 'FORTUNE', 'A', 'WHOLE', 'HALF', 'CROWN'] +1998-29455-0017-2249: hyp=['THE', 'TWO', 'TRAVELLERS', 'WERE', 'LEFT', 'FACING', 'EACH', 'OTHER', 'THE', 'RICHER', 'BY', 'A', 'PENNY', 'AND', 'O', 'WONDERFUL', 'GOOD', 'FORTUNE', 'A', 'WHOLE', 'HALF', 'CROWN'] +1998-29455-0018-2250: ref=['NO', 'I', 'NEVER', 'SAID', 'DICKIE', "ERE'S", 'THE', 'STEEVER'] +1998-29455-0018-2250: hyp=['NO', 'I', 'NEVER', 'SAID', 'DICKIE', 'YES', 'THE', 'STEVEN'] +1998-29455-0019-2251: ref=['YOU', 'STICK', 'TO', 'THAT', 'SAID', 'BEALE', 'RADIANT', 'WITH', 'DELIGHT', "YOU'RE", 'A', 'FAIR', 'MASTERPIECE', 'YOU', 'ARE', 'YOU', 'EARNED', 'IT', 'HONEST', 'IF', 'EVER', 'A', 'KID', 'DONE'] +1998-29455-0019-2251: hyp=['YOU', 'STICK', 'TO', 'THAT', 'SAID', 'BEER', 'RADIANT', 'WITH', 'DELIGHT', 'YOU', 'ARE', 'A', 'FAIR', 'MASTERPIECE', 'YOU', 'ARE', 'YOU', 'EARNED', 'IT', 'HONEST', 'IF', 'EVER', 'KID', 'DONE'] +1998-29455-0020-2252: ref=['THEY', 'WENT', 'ON', 'UP', 'THE', 'HILL', 'AS', 'HAPPY', 'AS', 'ANY', 'ONE', 'NEED', 'WISH', 'TO', 'BE'] +1998-29455-0020-2252: hyp=['THEY', 'WENT', 'ON', 'UP', 'THE', 'HILL', 'AS', 'HAPPY', 'AS', 'ANY', 'ONE', 'NEED', 'WISH', 'TO', 'BE'] +1998-29455-0021-2253: ref=['PLEASE', 'DO', 'NOT', 'BE', 'TOO', 'SHOCKED'] +1998-29455-0021-2253: hyp=['PLEASE', "DON'T", 'BE', 'TOO', 'SHOCKED'] +1998-29455-0022-2254: ref=['REMEMBER', 'THAT', 'NEITHER', 'OF', 'THEM', 'KNEW', 'ANY', 'BETTER'] +1998-29455-0022-2254: hyp=['REMEMBER', 'THAT', 'NEITHER', 'OF', 'THEM', 'KNEW', 'ANY', 'BETTER'] +1998-29455-0023-2255: ref=['TO', 'THE', 'ELDER', 'TRAMP', 'LIES', 'AND', 'BEGGING', 'WERE', 'NATURAL', 'MEANS', 'OF', 'LIVELIHOOD'] +1998-29455-0023-2255: hyp=['TO', 'THE', 'OTHER', 'TRAMP', 'LIES', 'AND', 'PEGGING', 'WHEN', 'NATURAL', 'MEANS', 'OF', 'LIVELIHOOD'] +1998-29455-0024-2256: ref=['BUT', 'YOU', 'SAID', 'THE', 'BED', 'WITH', 'THE', 'GREEN', 'CURTAINS', 'URGED', 'DICKIE'] +1998-29455-0024-2256: hyp=['BUT', 'YOU', 'SAID', 'THE', 'BED', 'WAS', 'THE', 'GREEN', 'CURTAINS', 'URGED', 'DICKIE'] +1998-29455-0025-2257: ref=['WHICH', 'THIS', "AIN'T", 'NOT', 'BY', 'NO', 'MEANS'] +1998-29455-0025-2257: hyp=['WHICH', 'THIS', 'END', 'NOT', 'BY', 'NO', 'MEANS'] +1998-29455-0026-2258: ref=['THE', 'NIGHT', 'IS', 'FULL', 'OF', 'INTERESTING', 'LITTLE', 'SOUNDS', 'THAT', 'WILL', 'NOT', 'AT', 'FIRST', 'LET', 'YOU', 'SLEEP', 'THE', 'RUSTLE', 'OF', 'LITTLE', 'WILD', 'THINGS', 'IN', 'THE', 'HEDGES', 'THE', 'BARKING', 'OF', 'DOGS', 'IN', 'DISTANT', 'FARMS', 'THE', 'CHIRP', 'OF', 'CRICKETS', 'AND', 'THE', 'CROAKING', 'OF', 'FROGS'] +1998-29455-0026-2258: hyp=['THE', 'NIGHT', 'IS', 'FULL', 'OF', 'INTERESTING', 'LITTLE', 'SOUNDS', 'THAT', 'WILL', 'NOT', 'AT', 'FIRST', 'LET', 'YOU', 'SLEEP', 'THE', 'RUSTLE', 'OF', 'LITTLE', 'WHITE', 'THINGS', 'IN', 'THE', 'HATCHES', 'THE', 'BARKING', 'OF', 'DOGS', 'AND', 'DISTANT', 'FARMS', 'THE', 'CHIRP', 'OF', 'CRICKETS', 'AND', 'THE', 'CROAKING', 'OF', 'FROGS'] +1998-29455-0027-2259: ref=['THE', 'NEW', 'GAME', 'OF', 'BEGGING', 'AND', 'INVENTING', 'STORIES', 'TO', 'INTEREST', 'THE', 'PEOPLE', 'FROM', 'WHOM', 'IT', 'WAS', 'WORTH', 'WHILE', 'TO', 'BEG', 'WENT', 'ON', 'GAILY', 'DAY', 'BY', 'DAY', 'AND', 'WEEK', 'BY', 'WEEK', 'AND', 'DICKIE', 'BY', 'CONSTANT', 'PRACTICE', 'GREW', 'SO', 'CLEVER', 'AT', 'TAKING', 'HIS', 'PART', 'IN', 'THE', 'ACTING', 'THAT', 'MISTER', 'BEALE', 'WAS', 'QUITE', 'DAZED', 'WITH', 'ADMIRATION'] +1998-29455-0027-2259: hyp=['THE', 'NEW', 'GAME', 'OF', 'BEGGING', 'AND', 'INVENTING', 'STORIES', 'TO', 'INTEREST', 'THE', 'PEOPLE', 'FROM', 'WHOM', 'IT', 'WAS', 'WORSE', 'WIDE', 'TO', 'BEG', 'WENT', 'ON', 'GAILY', 'DAY', 'BY', 'DAY', 'AND', 'WEEK', 'BY', 'WEEK', 'AND', 'DICKIE', 'BY', 'CONSTANT', 'PRACTICE', 'GREW', 'SO', 'CLEVER', 'TAKING', 'HIS', 'PART', 'IN', 'THE', 'ACTING', 'THAT', 'MISTER', 'BEA', 'WAS', 'QUITE', 'DAZED', 'WITH', 'ADMIRATION'] +1998-29455-0028-2260: ref=['BLESSED', 'IF', 'I', 'EVER', 'SEE', 'SUCH', 'A', 'NIPPER', 'HE', 'SAID', 'OVER', 'AND', 'OVER', 'AGAIN'] +1998-29455-0028-2260: hyp=['BLESSED', 'FOR', 'EVER', 'SEE', 'SUCH', 'A', 'NIBBER', 'HE', 'SAID', 'OVER', 'AND', 'OVER', 'AGAIN'] +1998-29455-0029-2261: ref=['CLEVER', 'AS', 'A', 'TRAINDAWG', 'E', 'IS', 'AN', 'ALL', 'OUTER', 'IS', 'OWN', 'EAD'] +1998-29455-0029-2261: hyp=['CLEVER', 'AS', 'A', 'TRAIN', 'DOG', 'IS', 'IN', 'OR', "OUTER'S", 'OWNETTE'] +1998-29455-0030-2262: ref=['I', "AIN'T", 'SURE', 'AS', 'I', "ADN'T", 'BETTER', 'STICK', 'TO', 'THE', 'ROAD', 'AND', 'KEEP', 'AWAY', 'FROM', 'OLD', 'ANDS', 'LIKE', 'YOU', 'JIM'] +1998-29455-0030-2262: hyp=['I', 'AM', 'SURE', 'AS', 'I', "HADN'T", 'BETTER', 'STICK', 'TO', 'THE', 'ROAD', 'AND', 'KEEP', 'AWAY', 'FROM', 'OLD', 'ENDS', 'LIKE', 'EUGEUM'] +1998-29455-0031-2263: ref=['I', 'OPE', "E'S", 'CLEVER', 'ENOUGH', 'TO', 'DO', 'WOT', "E'S", 'TOLD', 'KEEP', 'IS', 'MUG', 'SHUT', "THAT'S", 'ALL'] +1998-29455-0031-2263: hyp=['I', 'OPEUS', 'LOVE', 'ENOUGH', 'TO', 'DO', 'WHAT', 'HE', 'STOWED', 'HE', 'WAS', 'MUCH', 'AT', "THAT'S", 'ALL'] +1998-29455-0032-2264: ref=['IF', "E'S", 'STRAIGHT', "E'LL", 'DO', 'FOR', 'ME', 'AND', 'IF', 'HE', "AIN'T", "I'LL", 'DO', 'FOR', 'IM', 'SEE'] +1998-29455-0032-2264: hyp=['IF', 'HE', 'STRAIGHT', "YOU'LL", 'DO', 'FOR', 'ME', 'AND', 'IF', 'HE', 'AND', "I'LL", 'DO', 'FOR', 'HIM', 'SEE'] +1998-29455-0033-2265: ref=['SEE', 'THAT', 'BLOKE', 'JUST', 'NOW', 'SAID', 'MISTER', 'BEALE', 'YUSS', 'SAID', 'DICKIE'] +1998-29455-0033-2265: hyp=['SEE', 'THAT', 'LOG', 'DOES', 'NOW', 'SAID', 'MISTER', 'BEALE', 'YES', 'SAID', 'DICKIE'] +1998-29455-0034-2266: ref=['WELL', 'YOU', 'NEVER', 'SEE', 'IM'] +1998-29455-0034-2266: hyp=['WELL', 'YOU', 'NEVER', 'SEE', 'EM'] +1998-29455-0035-2267: ref=['IF', 'ANY', 'ONE', 'ARSTS', 'YOU', 'IF', 'YOU', 'EVER', 'SEE', 'IM', 'YOU', 'NEVER', 'SET', 'EYES', 'ON', 'IM', 'IN', 'ALL', 'YOUR', 'BORN', 'NOT', 'TO', 'REMEMBER', 'IM'] +1998-29455-0035-2267: hyp=['IF', 'ANY', 'ONE', 'ASKS', 'YOU', 'IF', 'YOU', 'EVER', 'SEE', 'HIM', 'YOU', 'NEVER', 'SAID', 'EYES', 'ON', 'HIM', 'IN', 'ALL', "YOU'RE", 'BORN', 'NOT', 'TO', 'REMEMBER', 'HIM'] +1998-29455-0036-2268: ref=['DICKIE', 'WAS', 'FULL', 'OF', 'QUESTIONS', 'BUT', 'MISTER', 'BEALE', 'HAD', 'NO', 'ANSWERS', 'FOR', 'THEM'] +1998-29455-0036-2268: hyp=['DICKY', 'WAS', 'FULL', 'OF', 'QUESTIONS', 'BUT', 'MISTER', 'BEE', 'HAD', 'NO', 'ANSWERS', 'WERE', 'THEM'] +1998-29455-0037-2269: ref=['NOR', 'WAS', 'IT', 'SUNDAY', 'ON', 'WHICH', 'THEY', 'TOOK', 'A', 'REST', 'AND', 'WASHED', 'THEIR', 'SHIRTS', 'ACCORDING', 'TO', 'MISTER', "BEALE'S", 'RULE', 'OF', 'LIFE'] +1998-29455-0037-2269: hyp=['NOR', 'WAS', 'IT', 'SUNDAY', 'ON', 'WHICH', 'THEY', 'TOOK', 'A', 'REST', 'AND', 'WASHED', 'THEIR', 'SHIRTS', 'ACCORDING', 'TO', 'MISTER', "BEALE'S", 'RULE', 'OF', 'LIFE'] +1998-29455-0038-2270: ref=['THEY', 'DID', 'NOT', 'STAY', 'THERE', 'BUT', 'WALKED', 'OUT', 'ACROSS', 'THE', 'DOWNS', 'WHERE', 'THE', 'SKYLARKS', 'WERE', 'SINGING', 'AND', 'ON', 'A', 'DIP', 'OF', 'THE', 'DOWNS', 'CAME', 'UPON', 'GREAT', 'STONE', 'WALLS', 'AND', 'TOWERS', 'VERY', 'STRONG', 'AND', 'GRAY'] +1998-29455-0038-2270: hyp=['THEY', 'DID', 'NOT', 'STAY', 'THERE', 'BUT', 'WALKED', 'OUT', 'ACROSS', 'THE', 'DOWNS', 'WITH', 'THE', 'SKYLACKS', 'WAS', 'SINGING', 'AND', 'ON', 'A', 'DIP', 'OF', 'THE', 'DOWNS', 'CAME', 'UPON', 'GREAT', 'STONE', 'WARDS', 'AND', 'TOWERS', 'VERY', 'STRONG', 'AND', 'GRAY'] +1998-29455-0039-2271: ref=["WHAT'S", 'THAT', 'THERE', 'SAID', 'DICKIE'] +1998-29455-0039-2271: hyp=["WHAT'S", 'THAT', 'THERE', 'SAID', 'DICKY'] +2033-164914-0000-661: ref=['REPLIED', 'HE', 'OF', 'A', 'TRUTH', 'I', 'HEARD', 'HIM', 'NOT', 'AND', 'I', 'WOT', 'HIM', 'NOT', 'AND', 'FOLKS', 'ARE', 'ALL', 'SLEEPING'] +2033-164914-0000-661: hyp=['REPLIED', 'HE', 'OF', 'A', 'TRUTH', 'I', 'HEARD', 'HIM', 'NOT', 'AND', 'I', 'WOT', 'HIM', 'NOT', 'AND', 'FOLKS', 'ARE', 'ALL', 'SLEEPING'] +2033-164914-0001-662: ref=['BUT', 'SHE', 'SAID', 'WHOMSOEVER', 'THOU', 'SEEST', 'AWAKE', 'HE', 'IS', 'THE', 'RECITER'] +2033-164914-0001-662: hyp=['BUT', 'SHE', 'SAID', 'WHOMSOEVER', 'THOU', 'SEEST', 'AWAKE', 'HE', 'IS', 'THE', 'RESIDER'] +2033-164914-0002-663: ref=['THEN', 'SAID', 'THE', 'EUNUCH', 'ART', 'THOU', 'HE', 'WHO', 'REPEATED', 'POETRY', 'BUT', 'NOW', 'AND', 'MY', 'LADY', 'HEARD', 'HIM'] +2033-164914-0002-663: hyp=['THEN', 'SAID', 'THE', 'EUNUCH', 'ART', 'THOU', 'HE', 'WHO', 'REPEATED', 'POETRY', 'BUT', 'NOW', 'AND', 'MY', 'LADY', 'HEARD', 'HIM'] +2033-164914-0003-664: ref=['REJOINED', 'THE', 'EUNUCH', 'WHO', 'THEN', 'WAS', 'THE', 'RECITER', 'POINT', 'HIM', 'OUT', 'TO', 'ME'] +2033-164914-0003-664: hyp=['REJOINED', 'THE', 'EUNUCH', 'WHO', 'THEN', 'WAS', 'THE', 'RECITER', 'POINT', 'HIM', 'OUT', 'TO', 'ME'] +2033-164914-0004-665: ref=['BY', 'ALLAH', 'REPLIED', 'THE', 'FIREMAN', 'I', 'TELL', 'THEE', 'THE', 'TRUTH'] +2033-164914-0004-665: hyp=['BY', 'ALLAH', 'REPLIED', 'THE', 'FIREMAN', 'I', 'TELL', 'THEE', 'THE', 'TRUTH'] +2033-164914-0005-666: ref=['TELL', 'ME', 'WHAT', 'HAPPENED', 'QUOTH', 'ZAU', 'AL', 'MAKAN'] +2033-164914-0005-666: hyp=['TELL', 'ME', 'WHAT', 'HAPPENED', 'QUOTH', 'OWL', 'MAKAN'] +2033-164914-0006-667: ref=['WHAT', 'AILS', 'THEE', 'THEN', 'THAT', 'THOU', 'MUST', 'NEEDS', 'RECITE', 'VERSES', 'SEEING', 'THAT', 'WE', 'ARE', 'TIRED', 'OUT', 'WITH', 'WALKING', 'AND', 'WATCHING', 'AND', 'ALL', 'THE', 'FOLK', 'ARE', 'ASLEEP', 'FOR', 'THEY', 'REQUIRE', 'SLEEP', 'TO', 'REST', 'THEM', 'OF', 'THEIR', 'FATIGUE'] +2033-164914-0006-667: hyp=['WHAT', 'ELSE', 'THEE', 'THEN', 'THAT', 'THOU', 'MUST', 'NEEDS', 'RESIDE', 'VERSES', 'SEEING', 'THAT', 'WE', 'ARE', 'TIRED', 'OUT', 'WITH', 'WALKING', 'AND', 'WATCHING', 'AND', 'ALL', 'THE', 'FOLK', 'ARE', 'ASLEEP', 'FOR', 'THEY', 'REQUIRE', 'SLEEP', 'TO', 'REST', 'THEM', 'OF', 'THEIR', 'FATIGUE'] +2033-164914-0007-668: ref=['AND', 'HE', 'ALSO', 'IMPROVISED', 'THE', 'TWO', 'FOLLOWING', 'DISTICHS'] +2033-164914-0007-668: hyp=['AND', 'HE', 'ALSO', 'PROVISED', 'THE', 'TWO', 'FOLLOWING', 'DISTICHS'] +2033-164914-0008-669: ref=['WHEN', 'NUZHAT', 'AL', 'ZAMAN', 'HEARD', 'THE', 'FIRST', 'IMPROVISATION', 'SHE', 'CALLED', 'TO', 'MIND', 'HER', 'FATHER', 'AND', 'HER', 'MOTHER', 'AND', 'HER', 'BROTHER', 'AND', 'THEIR', 'WHILOME', 'HOME', 'THEN', 'SHE', 'WEPT', 'AND', 'CRIED', 'AT', 'THE', 'EUNUCH', 'AND', 'SAID', 'TO', 'HIM', 'WOE', 'TO', 'THEE'] +2033-164914-0008-669: hyp=['WHEN', 'UZHAT', 'AL', 'ZAMAN', 'HEARD', 'THE', 'FIRST', 'IMPROVISATION', 'SHE', 'CALLED', 'TO', 'MINE', 'HER', 'FATHER', 'AND', 'HER', 'MOTHER', 'AND', 'HER', 'BROTHER', 'AND', 'THEIR', 'WILL', 'ON', 'HOME', 'THEN', 'SHE', 'WEPT', 'AND', 'CRIED', 'TO', 'THE', 'EUNUCH', 'AND', 'SAID', 'TO', 'HIM', 'WOE', 'TO', 'THEE'] +2033-164914-0009-670: ref=['HE', 'WHO', 'RECITED', 'THE', 'FIRST', 'TIME', 'HATH', 'RECITED', 'A', 'SECOND', 'TIME', 'AND', 'I', 'HEARD', 'HIM', 'HARD', 'BY'] +2033-164914-0009-670: hyp=['HE', 'WHO', 'RECITED', 'THE', 'FIRST', 'TIME', 'HAD', 'RECITED', 'A', 'SECOND', 'TIME', 'AND', 'I', 'HEARD', 'HIM', 'HARD', 'BY'] +2033-164914-0010-671: ref=['BY', 'ALLAH', 'AN', 'THOU', 'FETCH', 'HIM', 'NOT', 'TO', 'ME', 'I', 'WILL', 'ASSUREDLY', 'ROUSE', 'THE', 'CHAMBERLAIN', 'ON', 'THEE', 'AND', 'HE', 'SHALL', 'BEAT', 'THEE', 'AND', 'CAST', 'THEE', 'OUT'] +2033-164914-0010-671: hyp=['BY', 'ALLAH', 'AN', 'THOU', 'FETCH', 'HIM', 'NOT', 'TO', 'ME', 'I', 'WILL', 'ASSUREDLY', 'ROUSE', 'THE', 'CHAMBERLAIN', 'ON', 'THEE', 'AND', 'HE', 'SHALL', 'BEAT', 'THEE', 'AND', 'CAST', 'THEE', 'OUT'] +2033-164914-0011-672: ref=['BUT', 'TAKE', 'THESE', 'HUNDRED', 'DINERS', 'AND', 'GIVE', 'THEM', 'TO', 'THE', 'SINGER', 'AND', 'BRING', 'HIM', 'TO', 'ME', 'GENTLY', 'AND', 'DO', 'HIM', 'NO', 'HURT'] +2033-164914-0011-672: hyp=['BUT', 'TAKE', 'THESE', 'HUNDRED', 'DINNERS', 'AND', 'GIVE', 'THEM', 'TO', 'THE', 'SINGER', 'AND', 'BRING', 'HIM', 'TO', 'ME', 'GENTLY', 'AND', 'DO', 'HIM', 'NO', 'HURT'] +2033-164914-0012-673: ref=['RETURN', 'QUICKLY', 'AND', 'LINGER', 'NOT'] +2033-164914-0012-673: hyp=['RETURN', 'QUICKLY', 'AND', 'LINGER', 'NOT'] +2033-164914-0013-674: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'THIRD', 'NIGHT'] +2033-164914-0013-674: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'THIRD', 'NIGHT'] +2033-164914-0014-675: ref=['BUT', 'THE', 'EUNUCH', 'SAID', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'TILL', 'THOU', 'SHOW', 'ME', 'WHO', 'IT', 'WAS', 'THAT', 'RECITED', 'THE', 'VERSES', 'FOR', 'I', 'DREAD', 'RETURNING', 'TO', 'MY', 'LADY', 'WITHOUT', 'HIM'] +2033-164914-0014-675: hyp=['BUT', 'THE', 'EUNUCH', 'SAID', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'TILL', 'THOU', 'SHOW', 'ME', 'WHO', 'IT', 'WAS', 'THAT', 'RECITED', 'THE', 'VERSES', 'FOR', 'I', 'DREAD', 'RETURNING', 'TO', 'MY', 'LADY', 'WITHOUT', 'HIM'] +2033-164914-0015-676: ref=['NOW', 'WHEN', 'THE', 'FIREMAN', 'HEARD', 'THESE', 'WORDS', 'HE', 'FEARED', 'FOR', 'ZAU', 'AL', 'MAKAN', 'AND', 'WEPT', 'WITH', 'EXCEEDING', 'WEEPING', 'AND', 'SAID', 'TO', 'THE', 'EUNUCH', 'BY', 'ALLAH', 'IT', 'WAS', 'NOT', 'I', 'AND', 'I', 'KNOW', 'HIM', 'NOT'] +2033-164914-0015-676: hyp=['NOW', 'WHEN', 'THE', 'FIREMAN', 'HEARD', 'THESE', 'WORDS', 'HE', 'FEARED', 'FOR', 'ZOUAL', 'MAKAN', 'AND', 'WEPT', 'WITH', 'EXCEEDING', 'WEEPING', 'AND', 'SAID', 'TO', 'THE', 'EUNUCH', 'BY', 'ALLAH', 'IT', 'WAS', 'NOT', 'I', 'AND', 'I', 'KNOW', 'HIM', 'NOT'] +2033-164914-0016-677: ref=['SO', 'GO', 'THOU', 'TO', 'THY', 'STATION', 'AND', 'IF', 'THOU', 'AGAIN', 'MEET', 'ANY', 'ONE', 'AFTER', 'THIS', 'HOUR', 'RECITING', 'AUGHT', 'OF', 'POETRY', 'WHETHER', 'HE', 'BE', 'NEAR', 'OR', 'FAR', 'IT', 'WILL', 'BE', 'I', 'OR', 'SOME', 'ONE', 'I', 'KNOW', 'AND', 'THOU', 'SHALT', 'NOT', 'LEARN', 'OF', 'HIM', 'BUT', 'BY', 'ME'] +2033-164914-0016-677: hyp=['SO', 'GO', 'THOU', 'TO', 'THY', 'STATION', 'AND', 'IF', 'THOU', 'AGAIN', 'MEET', 'ANY', 'ONE', 'AFTER', 'THIS', 'HOUR', 'RECITING', 'AUGHT', 'OF', 'POETRY', 'WHETHER', 'HE', 'BE', 'NEAR', 'OR', 'FAR', 'IT', 'WILL', 'BE', 'I', 'OR', 'SOME', 'ONE', 'I', 'KNOW', 'AND', 'THOU', 'SHALT', 'NOT', 'LEARN', 'OF', 'HIM', 'BUT', 'BY', 'ME'] +2033-164914-0017-678: ref=['THEN', 'HE', 'KISSED', 'THE', "EUNUCH'S", 'HEAD', 'AND', 'SPAKE', 'HIM', 'FAIR', 'TILL', 'HE', 'WENT', 'AWAY', 'BUT', 'THE', 'CASTRATO', 'FETCHED', 'A', 'ROUND', 'AND', 'RETURNING', 'SECRETLY', 'CAME', 'AND', 'STOOD', 'BEHIND', 'THE', 'FIREMAN', 'FEARING', 'TO', 'GO', 'BACK', 'TO', 'HIS', 'MISTRESS', 'WITHOUT', 'TIDINGS'] +2033-164914-0017-678: hyp=['THEN', 'HE', 'KISSED', 'THE', "EUNUCH'S", 'HEAD', 'AND', 'SPAKE', 'HIM', 'FAIR', 'TILL', 'HE', 'WENT', 'AWAY', 'BUT', 'THE', 'GASTRATO', 'FETCHED', 'THE', 'ROUND', 'AND', 'RETURNING', 'SECRETLY', 'CAME', 'AND', 'STOOD', 'BEHIND', 'THE', 'FIREMAN', 'FEARING', 'TO', 'GO', 'BACK', 'TO', 'HIS', 'MISTRESS', 'WITHOUT', 'TIDINGS'] +2033-164914-0018-679: ref=['I', 'SAY', 'WHAT', 'MADE', 'MY', 'IGNOMY', "WHATE'ER", 'THE', 'BITTER', 'CUP', 'I', 'DRAIN', 'FAR', 'BE', 'FRO', 'ME', 'THAT', 'LAND', 'TO', 'FLEE', 'NOR', 'WILL', 'I', 'BOW', 'TO', 'THOSE', 'WHO', 'BLAME', 'AND', 'FOR', 'SUCH', 'LOVE', 'WOULD', 'DEAL', 'ME', 'SHAME'] +2033-164914-0018-679: hyp=['I', 'SAY', 'WHAT', 'MADE', 'MY', 'IGNOMINY', 'WHATEVER', 'THE', 'BEACHER', 'CARP', 'I', 'DRAIN', 'FAR', 'BE', 'FROM', 'ME', 'THY', 'LAND', 'TO', 'FLEE', 'NOR', 'WILL', 'I', 'BOW', 'TO', 'THOSE', 'WHO', 'BLAME', 'AND', 'FOR', 'SUCH', 'LOVE', 'WOULD', 'DEAL', 'ME', 'SHAME'] +2033-164914-0019-680: ref=['THEN', 'SAID', 'THE', 'EUNUCH', 'TO', 'ZAU', 'AL', 'MAKAN', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'LORD'] +2033-164914-0019-680: hyp=['THEN', 'SAID', 'THE', 'EUNUCH', 'TO', 'ZAWAL', 'MAKAN', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'LORD'] +2033-164914-0020-681: ref=['O', 'MY', 'LORD', 'CONTINUED', 'THE', 'EUNUCH', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164914-0020-681: hyp=['O', 'MY', 'LORD', 'CONTINUED', 'THE', 'EUNUCH', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THAT', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164914-0021-682: ref=['WE', 'WILL', 'DO', 'THEE', 'NO', 'UPRIGHT', 'O', 'MY', 'SON', 'NOR', 'WRONG', 'THEE', 'IN', 'AUGHT', 'BUT', 'OUR', 'OBJECT', 'IS', 'THAT', 'THOU', 'BEND', 'THY', 'GRACIOUS', 'STEPS', 'WITH', 'ME', 'TO', 'MY', 'MISTRESS', 'TO', 'RECEIVE', 'HER', 'ANSWER', 'AND', 'RETURN', 'IN', 'WEAL', 'AND', 'SAFETY', 'AND', 'THOU', 'SHALT', 'HAVE', 'A', 'HANDSOME', 'PRESENT', 'AS', 'ONE', 'WHO', 'BRINGETH', 'GOOD', 'NEWS'] +2033-164914-0021-682: hyp=['WE', 'WILL', 'DO', 'THEE', 'NO', 'UPRIGHT', 'O', 'MY', 'SON', 'NOR', 'WRONG', 'THEE', 'IN', 'AUGHT', 'BUT', 'OUR', 'OBJECT', 'IS', 'THAT', 'THOU', 'BEND', 'THY', 'GRACIOUS', 'STEPS', 'WITH', 'ME', 'TO', 'MY', 'MISTRESS', 'TO', 'RECEIVE', 'HER', 'ANSWER', 'AND', 'RETURNING', 'WHEEL', 'AND', 'SAFETY', 'AND', 'THOU', 'SHALT', 'HAVE', 'A', 'HANDSOME', 'PRESENT', 'AS', 'ONE', 'WHO', 'BRINGETH', 'GOOD', 'NEWS'] +2033-164914-0022-683: ref=['THEN', 'THE', 'EUNUCH', 'WENT', 'OUT', 'TO', 'ZAU', 'AL', 'MAKAN', 'AND', 'SAID', 'TO', 'HIM', 'RECITE', 'WHAT', 'VERSES', 'THOU', 'KNOWEST', 'FOR', 'MY', 'LADY', 'IS', 'HERE', 'HARD', 'BY', 'LISTENING', 'TO', 'THEE', 'AND', 'AFTER', 'I', 'WILL', 'ASK', 'THEE', 'OF', 'THY', 'NAME', 'AND', 'THY', 'NATIVE', 'COUNTRY', 'AND', 'THY', 'CONDITION'] +2033-164914-0022-683: hyp=['THEN', 'THE', 'EUNUCH', 'WENT', 'OUT', 'TO', 'ZAO', 'MAKAN', 'AND', 'SAID', 'TO', 'HIM', 'RECITE', 'WHAT', 'VERSE', 'IS', 'THOU', 'KNOWEST', 'FOR', 'MY', "LADY'S", 'HEAR', 'HARD', 'BY', 'LISTENING', 'TO', 'THEE', 'AND', 'AFTER', 'I', 'WILL', 'ASK', 'THEE', 'OF', 'THY', 'NAME', 'AND', 'THINE', 'NATIVE', 'COUNTRY', 'AND', 'THY', 'CONDITION'] +2033-164915-0000-643: ref=['AND', 'ALSO', 'THESE'] +2033-164915-0000-643: hyp=['AND', 'ALSO', 'THESE'] +2033-164915-0001-644: ref=['THEN', 'SHE', 'THREW', 'HERSELF', 'UPON', 'HIM', 'AND', 'HE', 'GATHERED', 'HER', 'TO', 'HIS', 'BOSOM', 'AND', 'THE', 'TWAIN', 'FELL', 'DOWN', 'IN', 'A', 'FAINTING', 'FIT'] +2033-164915-0001-644: hyp=['THEN', 'SHE', 'THREW', 'HERSELF', 'UPON', 'HIM', 'AND', 'HE', 'GATHERED', 'HER', 'TO', 'HIS', 'BOSOM', 'AND', 'ITWAIN', 'FELL', 'DOWN', 'IN', 'A', 'FAINTING', 'FIT'] +2033-164915-0002-645: ref=['WHEN', 'THE', 'EUNUCH', 'SAW', 'THIS', 'CASE', 'HE', 'WONDERED', 'AT', 'THEM', 'AND', 'THROWING', 'OVER', 'THEM', 'SOMEWHAT', 'TO', 'COVER', 'THEM', 'WAITED', 'TILL', 'THEY', 'SHOULD', 'RECOVER'] +2033-164915-0002-645: hyp=['WHEN', 'THE', 'EUNUCHS', 'SAW', 'THESE', 'CASE', 'HE', 'WONDERED', 'AT', 'THEM', 'AND', 'THROWING', 'OVER', 'THEM', 'SOMEWHAT', 'TO', 'COVER', 'THEM', 'WAITED', 'TILL', 'THEY', 'SHOULD', 'RECOVER'] +2033-164915-0003-646: ref=['AFTER', 'A', 'WHILE', 'THEY', 'CAME', 'TO', 'THEMSELVES', 'AND', 'NUZHAT', 'AL', 'ZAMAN', 'REJOICED', 'WITH', 'EXCEEDING', 'JOY', 'OPPRESSION', 'AND', 'DEPRESSION', 'LEFT', 'HER', 'AND', 'GLADNESS', 'TOOK', 'THE', 'MASTERY', 'OF', 'HER', 'AND', 'SHE', 'REPEATED', 'THESE', 'VERSES'] +2033-164915-0003-646: hyp=['AFTER', 'A', 'WHILE', 'THEY', 'CAME', 'TO', 'THEMSELVES', 'AND', 'USHART', 'AL', 'ZAMAN', 'REJOICED', 'WITH', 'EXCEEDING', 'JOY', 'OPPRESSION', 'AND', 'DEPRESSION', 'LAUGHTER', 'AND', 'GLADNESS', 'TOOK', 'THE', 'MASTERY', 'OF', 'HER', 'AND', 'SHE', 'REPEATED', 'THESE', 'VERSES'] +2033-164915-0004-647: ref=['ACCORDINGLY', 'SHE', 'TOLD', 'HIM', 'ALL', 'THAT', 'HAD', 'COME', 'TO', 'HER', 'SINCE', 'THEIR', 'SEPARATION', 'AT', 'THE', 'KHAN', 'AND', 'WHAT', 'HAD', 'HAPPENED', 'TO', 'HER', 'WITH', 'THE', 'BADAWI', 'HOW', 'THE', 'MERCHANT', 'HAD', 'BOUGHT', 'HER', 'OF', 'HIM', 'AND', 'HAD', 'TAKEN', 'HER', 'TO', 'HER', 'BROTHER', 'SHARRKAN', 'AND', 'HAD', 'SOLD', 'HER', 'TO', 'HIM', 'HOW', 'HE', 'HAD', 'FREED', 'HER', 'AT', 'THE', 'TIME', 'OF', 'BUYING', 'HOW', 'HE', 'HAD', 'MADE', 'A', 'MARRIAGE', 'CONTRACT', 'WITH', 'HER', 'AND', 'HAD', 'GONE', 'IN', 'TO', 'HER', 'AND', 'HOW', 'THE', 'KING', 'THEIR', 'SIRE', 'HAD', 'SENT', 'AND', 'ASKED', 'FOR', 'HER', 'FROM', 'SHARRKAN'] +2033-164915-0004-647: hyp=['ACCORDINGLY', 'SHE', 'TOLD', 'HIM', 'ALL', 'THAT', 'HAD', 'COME', 'TO', 'HER', 'SINCE', 'THEIR', 'SEPARATION', 'AT', 'THE', 'KHAN', 'AND', 'WHAT', 'HAD', 'HAPPENED', 'TO', 'HER', 'WITH', 'THE', 'BADAWI', 'HOW', 'THE', 'MERCHANT', 'HAD', 'BOUGHT', 'HER', 'OF', 'HIM', 'AND', 'HAD', 'TAKEN', 'HER', 'TO', 'HER', 'BROTHER', 'SHARKAN', 'AND', 'HAD', 'SOLD', 'HER', 'TO', 'HIM', 'HOW', 'HE', 'HAD', 'FREED', 'HER', 'AT', 'THE', 'TIME', 'OF', 'BUYING', 'HOW', 'HE', 'HAD', 'MADE', 'HER', 'MARRIAGE', 'CONTRACT', 'WITH', 'HER', 'AND', 'HAD', 'GONE', 'IN', 'TO', 'HER', 'AND', 'HOW', 'THE', 'KING', 'THEIR', 'SIRE', 'HAD', 'SENT', 'AND', 'ASKED', 'FOR', 'HER', 'FROM', 'SHARKAN'] +2033-164915-0005-648: ref=['BUT', 'NOW', 'GO', 'TO', 'THY', 'MASTER', 'AND', 'BRING', 'HIM', 'QUICKLY', 'TO', 'ME'] +2033-164915-0005-648: hyp=['BUT', 'NOW', 'GO', 'TO', 'THY', 'MASTER', 'AND', 'BRING', 'HIM', 'QUICKLY', 'TO', 'ME'] +2033-164915-0006-649: ref=['THE', 'CHAMBERLAIN', 'CALLED', 'THE', 'CASTRATO', 'AND', 'CHARGED', 'HIM', 'TO', 'DO', 'ACCORDINGLY', 'SO', 'HE', 'REPLIED', 'I', 'HEAR', 'AND', 'I', 'OBEY', 'AND', 'HE', 'TOOK', 'HIS', 'PAGES', 'WITH', 'HIM', 'AND', 'WENT', 'OUT', 'IN', 'SEARCH', 'OF', 'THE', 'STOKER', 'TILL', 'HE', 'FOUND', 'HIM', 'IN', 'THE', 'REAR', 'OF', 'THE', 'CARAVAN', 'GIRTHING', 'HIS', 'ASS', 'AND', 'PREPARING', 'FOR', 'FLIGHT'] +2033-164915-0006-649: hyp=['THE', 'CHAMBERLAIN', 'CALLED', 'THE', 'CASTRATO', 'AND', 'CHARGED', 'HIM', 'TO', 'DO', 'ACCORDINGLY', 'SO', 'HE', 'REPLIED', 'I', 'HEAR', 'AND', 'I', 'OBEY', 'AND', 'HE', 'TOOK', 'HIS', 'PAGES', 'WITH', 'HIM', 'AND', 'WENT', 'OUT', 'IN', 'SEARCH', 'OF', 'THE', 'STOCKER', 'TILL', 'HE', 'FOUND', 'HIM', 'IN', 'THE', 'REAR', 'OF', 'THE', 'CARAVAN', 'GIRDING', 'HIS', 'ASS', 'AND', 'PREPARING', 'FOR', 'FLIGHT'] +2033-164915-0007-650: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'THE', 'STOKER', 'GIRTHED', 'HIS', 'ASS', 'FOR', 'FLIGHT', 'AND', 'BESPAKE', 'HIMSELF', 'SAYING', 'OH', 'WOULD', 'I', 'KNEW', 'WHAT', 'IS', 'BECOME', 'OF', 'HIM'] +2033-164915-0007-650: hyp=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'THE', 'STOCKER', 'GIRDED', 'HIS', 'EYES', 'FOR', 'FLIGHT', 'AND', 'BESPAKE', 'HIMSELF', 'SAYING', 'OH', 'WOULD', 'I', 'KNEW', 'WHAT', 'IS', 'BECOME', 'OF', 'HIM'] +2033-164915-0008-651: ref=['I', 'BELIEVE', 'HE', 'HATH', 'DENOUNCED', 'ME', 'TO', 'THE', 'EUNUCH', 'HENCE', 'THESE', 'PAGES', 'ET', 'ABOUT', 'ME', 'AND', 'HE', 'HATH', 'MADE', 'ME', 'AN', 'ACCOMPLICE', 'IN', 'HIS', 'CRIME'] +2033-164915-0008-651: hyp=['I', 'BELIEVE', 'HE', 'HATH', 'DENOUNCED', 'ME', 'TO', 'THE', 'EUNUCH', 'HENCE', 'THESE', 'PAGES', 'AT', 'ABOUT', 'ME', 'AND', 'HE', 'HATH', 'MADE', 'ME', 'AN', 'ACCOMPLICE', 'IN', 'HIS', 'CRIME'] +2033-164915-0009-652: ref=['WHY', 'DIDST', 'THOU', 'SAY', 'I', 'NEVER', 'REPEATED', 'THESE', 'COUPLETS', 'NOR', 'DO', 'I', 'KNOW', 'WHO', 'REPEATED', 'THEM', 'WHEN', 'IT', 'WAS', 'THY', 'COMPANION'] +2033-164915-0009-652: hyp=['WHY', 'DIDST', 'THOU', 'SAY', 'I', 'NEVER', 'REPEATED', 'THESE', 'COUPLETS', 'NOR', 'DO', 'I', 'KNOW', 'WHO', 'REPEATED', 'THEM', 'WHEN', 'IT', 'WAS', 'THY', 'COMPANION'] +2033-164915-0010-653: ref=['BUT', 'NOW', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'BETWEEN', 'THIS', 'PLACE', 'AND', 'BAGHDAD', 'AND', 'WHAT', 'BETIDETH', 'THY', 'COMRADE', 'SHALL', 'BETIDE', 'THEE'] +2033-164915-0010-653: hyp=['BUT', 'NOW', 'I', 'WILL', 'NOT', 'LEAVE', 'THEE', 'BETWEEN', 'THIS', 'PLACE', 'AND', 'BAGDAD', 'AND', 'WHAT', 'BETIDETH', 'THY', 'COMRADE', 'SHALL', 'BETIDE', 'THEE'] +2033-164915-0011-654: ref=['TWAS', 'AS', 'I', 'FEARED', 'THE', 'COMING', 'ILLS', 'DISCERNING', 'BUT', 'UNTO', 'ALLAH', 'WE', 'ARE', 'ALL', 'RETURNING'] +2033-164915-0011-654: hyp=['TOWARDS', 'AS', 'I', 'FEARED', 'THE', 'CARMINALS', 'DISCERNING', 'BUT', 'ON', 'TO', 'ALLAH', 'WE', 'ARE', 'ALL', 'RETURNING'] +2033-164915-0012-655: ref=['THEN', 'THE', 'EUNUCH', 'CRIED', 'UPON', 'THE', 'PAGES', 'SAYING', 'TAKE', 'HIM', 'OFF', 'THE', 'ASS'] +2033-164915-0012-655: hyp=['THEN', 'THE', 'EUNUCH', 'CRIED', 'UPON', 'IN', 'THE', 'PAGES', 'SAYING', 'TAKE', 'HIM', 'OFF', 'THE', 'ASS'] +2033-164915-0013-656: ref=['AND', 'HE', 'ANSWERED', 'I', 'AM', 'THE', 'CHAMBERLAIN', 'OF', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'KING', 'SHARRKAN', 'SON', 'OF', 'OMAR', 'BIN', 'AL', "NU'UMAN", 'LORD', 'OF', 'BAGHDAD', 'AND', 'OF', 'THE', 'LAND', 'OF', 'KHORASAN', 'AND', 'I', 'BRING', 'TRIBUTE', 'AND', 'PRESENTS', 'FROM', 'HIM', 'TO', 'HIS', 'FATHER', 'IN', 'BAGHDAD'] +2033-164915-0013-656: hyp=['AND', 'HE', 'ANSWERED', 'I', 'AM', 'THE', 'CHAMBERLAIN', 'OF', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'KING', 'SHARKAN', 'SUNG', 'OVER', 'MARBIN', 'AL', 'NUMA', 'LORD', 'OF', 'WABDAD', 'AND', 'OF', 'THE', 'LAND', 'OF', 'KHORASAN', 'AND', 'I', 'BRING', 'TRIBUTE', 'AND', 'PRESENTS', 'FROM', 'HIM', 'TO', 'HIS', 'FATHER', 'IN', 'BAGHDAD'] +2033-164915-0014-657: ref=['SO', 'FARE', 'YE', 'FORWARDS', 'NO', 'HARM', 'SHALL', 'BEFAL', 'YOU', 'TILL', 'YOU', 'JOIN', 'HIS', 'GRAND', 'WAZIR', 'DANDAN'] +2033-164915-0014-657: hyp=['SOPHIA', 'FORWARDS', 'NO', 'HARM', 'SHALL', 'BEFALL', 'YOU', 'TILL', 'YOU', 'JOIN', 'HIS', 'GRAND', 'WAZIR', 'THAN', 'DAN'] +2033-164915-0015-658: ref=['THEN', 'HE', 'BADE', 'HIM', 'BE', 'SEATED', 'AND', 'QUESTIONED', 'HIM', 'AND', 'HE', 'REPLIED', 'THAT', 'HE', 'WAS', 'CHAMBERLAIN', 'TO', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'AND', 'WAS', 'BOUND', 'TO', 'KING', 'OMAR', 'WITH', 'PRESENTS', 'AND', 'THE', 'TRIBUTE', 'OF', 'SYRIA'] +2033-164915-0015-658: hyp=['THEN', 'HE', 'BADE', 'HIM', 'BE', 'SEATED', 'AND', 'QUESTIONED', 'HIM', 'AND', 'HE', 'REPLIED', 'THAT', 'HE', 'WAS', 'TREMBLING', 'TO', 'THE', 'EMIR', 'OF', 'DAMASCUS', 'AND', 'WAS', 'BOUND', 'TO', 'KING', 'OMAR', 'WITH', 'PRESENTS', 'AND', 'THE', 'TRIBUTE', 'OF', 'SYRIA'] +2033-164915-0016-659: ref=['SO', 'IT', 'WAS', 'AGREED', 'THAT', 'WE', 'GO', 'TO', 'DAMASCUS', 'AND', 'FETCH', 'THENCE', 'THE', "KING'S", 'SON', 'SHARRKAN', 'AND', 'MAKE', 'HIM', 'SULTAN', 'OVER', 'HIS', "FATHER'S", 'REALM'] +2033-164915-0016-659: hyp=['SO', 'IT', 'WAS', 'AGREED', 'THAT', 'WE', 'GO', 'TO', 'DAMASCUS', 'AND', 'FETCH', 'THENCE', 'THE', "KING'S", 'SON', 'SHARKAN', 'AND', 'MAY', 'CAME', 'SULTAN', 'OVER', 'HIS', "FATHER'S", 'REALM'] +2033-164915-0017-660: ref=['AND', 'AMONGST', 'THEM', 'WERE', 'SOME', 'WHO', 'WOULD', 'HAVE', 'CHOSEN', 'THE', 'CADET', 'ZAU', 'AL', 'MAKAN', 'FOR', 'QUOTH', 'THEY', 'HIS', 'NAME', 'BE', 'LIGHT', 'OF', 'THE', 'PLACE', 'AND', 'HE', 'HATH', 'A', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'HIGHS', 'THE', 'DELIGHT', 'OF', 'THE', 'TIME', 'BUT', 'THEY', 'SET', 'OUT', 'FIVE', 'YEARS', 'AGO', 'FOR', 'AL', 'HIJAZ', 'AND', 'NONE', 'WOTTETH', 'WHAT', 'IS', 'BECOME', 'OF', 'THEM'] +2033-164915-0017-660: hyp=['AND', 'AMONGST', 'THEM', 'WERE', 'SOME', 'WHO', 'WOULD', 'HAVE', 'CHOSEN', 'THE', 'CADET', 'THOUA', 'MAKAN', 'FOR', 'QUOTH', 'THEY', 'HIS', 'NAME', 'BE', 'LIGHT', 'OF', 'THE', 'PLACE', 'AND', 'HE', 'HATH', 'A', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'HIES', 'THE', 'DELIGHT', 'OF', 'THE', 'TIME', 'BUT', 'THEY', 'SET', 'OUT', 'FIVE', 'YEARS', 'AGO', 'FOR', 'AL', 'KI', 'JAS', 'AND', 'NONE', 'WOTTETH', 'WHAT', 'IS', 'BECOME', 'OF', 'THEM'] +2033-164916-0000-684: ref=['SO', 'HE', 'TURNED', 'TO', 'THE', 'WAZIR', 'DANDAN', 'AND', 'SAID', 'TO', 'HIM', 'VERILY', 'YOUR', 'TALE', 'IS', 'A', 'WONDER', 'OF', 'WONDERS'] +2033-164916-0000-684: hyp=['SO', 'HE', 'TURNED', 'TO', 'THE', 'WAZIR', 'DANDAN', 'AND', 'SAID', 'TO', 'HIM', 'VERILY', 'YOUR', 'TALE', 'IS', 'A', 'WANDER', 'OF', 'WONDERS'] +2033-164916-0001-685: ref=['KNOW', 'O', 'CHIEF', 'WAZIR', 'THAT', 'HERE', 'WHERE', 'YOU', 'HAVE', 'ENCOUNTERED', 'ME', 'ALLAH', 'HATH', 'GIVEN', 'YOU', 'REST', 'FROM', 'FATIGUE', 'AND', 'BRINGETH', 'YOU', 'YOUR', 'DESIRE', 'AFTER', 'THE', 'EASIEST', 'OF', 'FASHIONS', 'FOR', 'THAT', 'HIS', 'ALMIGHTY', 'WILL', 'RESTORETH', 'TO', 'YOU', 'ZAU', 'AL', 'MAKAN', 'AND', 'HIS', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'WHEREBY', 'WE', 'WILL', 'SETTLE', 'THE', 'MATTER', 'AS', 'WE', 'EASILY', 'CAN'] +2033-164916-0001-685: hyp=['NO', 'O', 'CHIEF', 'WAZIR', 'THAT', 'HERE', 'WHERE', 'YOU', 'HAVE', 'ENCOUNTERED', 'ME', 'ALLAH', 'HATH', 'GIVEN', 'YOU', 'REST', 'FROM', 'FATIGUE', 'AND', 'BRINGETH', 'YOU', 'YOUR', 'DESIRE', 'AFTER', 'THE', 'EASIEST', 'OF', 'FASHIONS', 'FOR', 'LET', 'HIS', 'ALMIGHTY', 'WILL', 'RESTORE', 'IT', 'TO', 'YOU', 'THOU', 'A', 'MAKAN', 'AND', 'HIS', 'SISTER', 'NUZHAT', 'AL', 'ZAMAN', 'WHEREBY', 'WE', 'WILL', 'SETTLE', 'THE', 'MATTER', 'AS', 'WE', 'EASILY', 'CAN'] +2033-164916-0002-686: ref=['WHEN', 'THE', 'MINISTER', 'HEARD', 'THESE', 'WORDS', 'HE', 'REJOICED', 'WITH', 'GREAT', 'JOY', 'AND', 'SAID', 'O', 'CHAMBERLAIN', 'TELL', 'ME', 'THE', 'TALE', 'OF', 'THE', 'TWAIN', 'AND', 'WHAT', 'BEFEL', 'THEM', 'AND', 'THE', 'CAUSE', 'OF', 'THEIR', 'LONG', 'ABSENCE'] +2033-164916-0002-686: hyp=['WHEN', 'THE', 'MEANS', 'SIR', 'HEARD', 'THESE', 'WORDS', 'HE', 'REJOICED', 'WITH', 'GRAY', 'JOY', 'AND', 'SAID', 'O', 'CHAMBERLAIN', 'TELL', 'ME', 'THE', 'TALE', 'OF', 'THE', 'TWAIN', 'AND', 'WHAT', 'BEFELL', 'THEM', 'AND', 'THE', 'CAUSE', 'OF', 'THEIR', 'LONG', 'ABSENCE'] +2033-164916-0003-687: ref=['ZAU', 'AL', 'MAKAN', 'BOWED', 'HIS', 'HEAD', 'AWHILE', 'AND', 'THEN', 'SAID', 'I', 'ACCEPT', 'THIS', 'POSITION', 'FOR', 'INDEED', 'THERE', 'WAS', 'NO', 'REFUSING', 'AND', 'HE', 'WAS', 'CERTIFIED', 'THAT', 'THE', 'CHAMBERLAIN', 'HAD', 'COUNSELLED', 'HIM', 'WELL', 'AND', 'WISELY', 'AND', 'SET', 'HIM', 'ON', 'THE', 'RIGHT', 'WAY'] +2033-164916-0003-687: hyp=['ZAO', 'MAKAN', 'BOWED', 'HIS', 'HEAD', 'A', 'WHILE', 'AND', 'THEN', 'SAID', 'I', 'ACCEPT', 'THE', 'POSITION', 'FOR', 'INDEED', 'THERE', 'WAS', 'NO', 'REFUSING', 'AND', 'HE', 'WAS', 'CERTIFIED', 'THAT', 'THE', 'CHAMBERLAIN', 'HAD', 'COUNSELLED', 'HIM', 'WELL', 'AND', 'WISELY', 'AND', 'SAT', 'HIM', 'ON', 'THE', 'RIGHT', 'WAY'] +2033-164916-0004-688: ref=['THEN', 'HE', 'ADDED', 'O', 'MY', 'UNCLE', 'HOW', 'SHALL', 'I', 'DO', 'WITH', 'MY', 'BROTHER', 'SHARRKAN'] +2033-164916-0004-688: hyp=['THEN', 'HE', 'ADDED', 'O', 'MY', 'UNCLE', 'HOW', 'SHALL', 'I', 'DO', 'WITH', 'MY', 'BROTHER', 'SHARKAN'] +2033-164916-0005-689: ref=['AFTER', 'AWHILE', 'THE', 'DUST', 'DISPERSED', 'AND', 'THERE', 'APPEARED', 'UNDER', 'IT', 'THE', 'ARMY', 'OF', 'BAGHDAD', 'AND', 'KHORASAN', 'A', 'CONQUERING', 'HOST', 'LIKE', 'THE', 'FULL', 'TIDE', 'SEA', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164916-0005-689: hyp=['AFTER', 'A', 'WHILE', 'THE', 'DUST', 'DISPERSED', 'THEM', 'AND', 'THERE', 'APPEARED', 'UNDER', 'IT', 'THE', 'ARMY', 'OF', 'BAGHDAD', 'AND', 'KHORASAN', 'A', 'CONQUERING', 'HOST', 'LIKE', 'THE', 'POOL', 'TIDE', 'SEA', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THAT', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'TO', 'SAY', 'HER', 'PERMITTED', 'SAY'] +2033-164916-0006-690: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'EIGHTH', 'NIGHT'] +2033-164916-0006-690: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVENTY', 'EIGHTH', 'NIGHT'] +2033-164916-0007-691: ref=['AND', 'IN', 'IT', 'ALL', 'REJOICED', 'AT', 'THE', 'ACCESSION', 'OF', 'THE', 'LIGHT', 'OF', 'THE', 'PLACE'] +2033-164916-0007-691: hyp=['ANY', 'NEAT', 'OR', 'REJOICED', 'AT', 'THE', 'ACCESSION', 'OF', 'THE', 'LIGHT', 'OF', 'THE', 'PLACE'] +2033-164916-0008-692: ref=['LASTLY', 'THE', 'MINISTER', 'WENT', 'IN', 'AND', 'KISSED', 'THE', 'GROUND', 'BEFORE', 'ZAU', 'AL', 'MAKAN', 'WHO', 'ROSE', 'TO', 'MEET', 'HIM', 'SAYING', 'WELCOME', 'O', 'WAZIR', 'AND', 'SIRE', 'SANS', 'PEER'] +2033-164916-0008-692: hyp=['LASTLY', 'THE', 'MINISTER', 'WENT', 'IN', 'AND', 'KISSED', 'THE', 'GROUND', 'BEFORE', 'ZAO', 'MAKAN', 'WHO', 'ROSE', 'TO', 'MEET', 'HIM', 'SAYING', 'WELCOME', 'O', 'WAZIR', 'AND', 'SIRES', 'SONSPIER'] +2033-164916-0009-693: ref=['MOREOVER', 'THE', 'SULTAN', 'COMMANDED', 'HIS', 'WAZIR', 'DANDAN', 'CALL', 'A', 'TEN', 'DAYS', 'HALT', 'OF', 'THE', 'ARMY', 'THAT', 'HE', 'MIGHT', 'BE', 'PRIVATE', 'WITH', 'HIM', 'AND', 'LEARN', 'FROM', 'HIM', 'HOW', 'AND', 'WHEREFORE', 'HIS', 'FATHER', 'HAD', 'BEEN', 'SLAIN'] +2033-164916-0009-693: hyp=['MOREOVER', 'THE', 'SULTAN', 'COMMANDED', 'HIS', 'WAZIR', 'DAN', 'CALL', 'AT', 'TEN', 'DAYS', 'HALT', 'OF', 'THE', 'ARMY', 'THAT', 'HE', 'MIGHT', 'BE', 'PRIVATE', 'WITH', 'HIM', 'AND', 'LEARN', 'FROM', 'HIM', 'HOW', 'AND', 'WHEREFORE', 'HIS', 'FATHER', 'HAD', 'BEEN', 'SLAIN'] +2033-164916-0010-694: ref=['HE', 'THEN', 'REPAIRED', 'TO', 'THE', 'HEART', 'OF', 'THE', 'ENCAMPMENT', 'AND', 'ORDERED', 'THE', 'HOST', 'TO', 'HALT', 'TEN', 'DAYS'] +2033-164916-0010-694: hyp=['HE', 'THEN', 'REPAIRED', 'TO', 'THE', 'HEARTS', 'OF', 'THE', 'ENCAMPMENT', 'AND', 'ORDERED', 'THAT', 'THE', 'HOST', 'TO', 'HALT', 'TEN', 'DAYS'] +2414-128291-0000-2689: ref=['WHAT', 'HATH', 'HAPPENED', 'UNTO', 'ME'] +2414-128291-0000-2689: hyp=['WHAT', 'HATH', 'HAPPENED', 'TO', 'ME'] +2414-128291-0001-2690: ref=['HE', 'ASKED', 'HIMSELF', 'SOMETHING', 'WARM', 'AND', 'LIVING', 'QUICKENETH', 'ME', 'IT', 'MUST', 'BE', 'IN', 'THE', 'NEIGHBOURHOOD'] +2414-128291-0001-2690: hyp=['HE', 'ASKED', 'HIMSELF', 'SOMETHING', 'WARM', 'AND', 'LIVING', 'QUICKENED', 'ME', 'IT', 'MUST', 'BE', 'IN', 'THE', 'NEIGHBOURHOOD'] +2414-128291-0002-2691: ref=['WHEN', 'HOWEVER', 'ZARATHUSTRA', 'WAS', 'QUITE', 'NIGH', 'UNTO', 'THEM', 'THEN', 'DID', 'HE', 'HEAR', 'PLAINLY', 'THAT', 'A', 'HUMAN', 'VOICE', 'SPAKE', 'IN', 'THE', 'MIDST', 'OF', 'THE', 'KINE', 'AND', 'APPARENTLY', 'ALL', 'OF', 'THEM', 'HAD', 'TURNED', 'THEIR', 'HEADS', 'TOWARDS', 'THE', 'SPEAKER'] +2414-128291-0002-2691: hyp=['WHEN', 'HOWEVER', 'THE', 'TWO', 'STRAW', 'WAS', 'QUITE', 'NIGH', 'AND', 'TO', 'THEM', 'THEN', 'DID', 'HE', 'HEAR', 'PLAINLY', 'WITH', 'HUMAN', 'VOICE', 'PIKE', 'IN', 'THE', 'MIDST', 'OF', 'THE', 'KIND', 'AND', 'THE', 'FRIENDLY', 'ALL', 'OF', 'THEM', 'HAD', 'TURNED', 'THEIR', 'HEADS', 'TOWARDS', 'THE', 'SPEAKER'] +2414-128291-0003-2692: ref=['WHAT', 'DO', 'I', 'HERE', 'SEEK'] +2414-128291-0003-2692: hyp=['FOR', 'DO', 'I', 'HERE', 'SEEK'] +2414-128291-0004-2693: ref=['ANSWERED', 'HE', 'THE', 'SAME', 'THAT', 'THOU', 'SEEKEST', 'THOU', 'MISCHIEF', 'MAKER', 'THAT', 'IS', 'TO', 'SAY', 'HAPPINESS', 'UPON', 'EARTH'] +2414-128291-0004-2693: hyp=['ANSWERED', 'HE', 'THE', 'SAME', 'THAT', 'THOU', 'SEEKEST', 'THOU', 'MISCHIEF', 'MAKER', 'THAT', 'IS', 'TO', 'SAY', 'HAPPINESS', 'UPON', 'EARTH'] +2414-128291-0005-2694: ref=['FOR', 'I', 'TELL', 'THEE', 'THAT', 'I', 'HAVE', 'ALREADY', 'TALKED', 'HALF', 'A', 'MORNING', 'UNTO', 'THEM', 'AND', 'JUST', 'NOW', 'WERE', 'THEY', 'ABOUT', 'TO', 'GIVE', 'ME', 'THEIR', 'ANSWER'] +2414-128291-0005-2694: hyp=['FOR', 'I', 'TELL', 'THEE', 'THAT', 'I', 'HAVE', 'ALL', 'WE', 'TALKED', 'HALF', 'A', 'MORNING', 'UNTO', 'THEM', 'AND', 'JUST', 'NOW', 'WITH', 'ABOUT', 'TO', 'GIVE', 'ME', 'THE', 'ANSWER'] +2414-128291-0006-2695: ref=['HE', 'WOULD', 'NOT', 'BE', 'RID', 'OF', 'HIS', 'AFFLICTION'] +2414-128291-0006-2695: hyp=['HE', 'WOULD', 'NOT', 'BE', 'RID', 'OF', 'HIS', 'AFFLICATION'] +2414-128291-0007-2696: ref=['WHO', 'HATH', 'NOT', 'AT', 'PRESENT', 'HIS', 'HEART', 'HIS', 'MOUTH', 'AND', 'HIS', 'EYES', 'FULL', 'OF', 'DISGUST'] +2414-128291-0007-2696: hyp=['WHO', 'HAD', 'NOT', 'AT', 'PRESENT', 'HIS', 'HEART', 'HIS', 'MOUTH', 'AND', 'HIS', 'EYES', 'FULL', 'OF', 'DISGUST'] +2414-128291-0008-2697: ref=['THOU', 'ALSO', 'THOU', 'ALSO'] +2414-128291-0008-2697: hyp=['THOU', 'ALSO', 'THOU', 'ALSO'] +2414-128291-0009-2698: ref=['BUT', 'BEHOLD', 'THESE', 'KINE'] +2414-128291-0009-2698: hyp=['BUT', 'BEHOLD', 'HIS', 'KIND'] +2414-128291-0010-2699: ref=['THE', 'KINE', 'HOWEVER', 'GAZED', 'AT', 'IT', 'ALL', 'AND', 'WONDERED'] +2414-128291-0010-2699: hyp=['DECLINE', 'HOWEVER', 'GAZED', 'AT', 'IT', 'ALL', 'AND', 'WONDERED'] +2414-128291-0011-2700: ref=['WANTON', 'AVIDITY', 'BILIOUS', 'ENVY', 'CAREWORN', 'REVENGE', 'POPULACE', 'PRIDE', 'ALL', 'THESE', 'STRUCK', 'MINE', 'EYE'] +2414-128291-0011-2700: hyp=['WARRENTON', 'ALD', 'DUTY', 'BILIOUS', 'ENVY', 'CAREWORN', 'REVENGE', 'POPULOUS', 'PRIDE', 'ALL', 'THIS', 'STRUCK', 'MIGHT', 'EYE'] +2414-128291-0012-2701: ref=['IT', 'IS', 'NO', 'LONGER', 'TRUE', 'THAT', 'THE', 'POOR', 'ARE', 'BLESSED'] +2414-128291-0012-2701: hyp=['IT', 'IS', 'NO', 'LONGER', 'TRUE', 'NEITHER', 'POOR', 'ARE', 'BLESSED'] +2414-128291-0013-2702: ref=['THE', 'KINGDOM', 'OF', 'HEAVEN', 'HOWEVER', 'IS', 'WITH', 'THE', 'KINE', 'AND', 'WHY', 'IS', 'IT', 'NOT', 'WITH', 'THE', 'RICH'] +2414-128291-0013-2702: hyp=['THE', 'KINGDOM', 'OF', 'HEAVEN', 'HOWEVER', 'IS', 'WITH', 'A', 'KIND', 'AND', 'WHY', 'IS', 'IT', 'NOT', 'WITH', 'A', 'RICH'] +2414-128291-0014-2703: ref=['WHY', 'DOST', 'THOU', 'TEMPT', 'ME'] +2414-128291-0014-2703: hyp=['WHY', 'THOSE', 'THOU', 'TEMPT', 'ME'] +2414-128291-0015-2704: ref=['ANSWERED', 'THE', 'OTHER'] +2414-128291-0015-2704: hyp=['ANSWERED', 'HER'] +2414-128291-0016-2705: ref=['THOU', 'KNOWEST', 'IT', 'THYSELF', 'BETTER', 'EVEN', 'THAN', 'I'] +2414-128291-0016-2705: hyp=['THOU', 'KNOWEST', 'IT', 'THYSELF', 'BETTER', 'EVEN', 'THAN', 'I'] +2414-128291-0017-2706: ref=['THUS', 'SPAKE', 'THE', 'PEACEFUL', 'ONE', 'AND', 'PUFFED', 'HIMSELF', 'AND', 'PERSPIRED', 'WITH', 'HIS', 'WORDS', 'SO', 'THAT', 'THE', 'KINE', 'WONDERED', 'ANEW'] +2414-128291-0017-2706: hyp=['DOES', 'SPEAK', 'THE', 'BEATHFUL', 'ONE', 'AND', 'PUFFED', 'HIMSELF', 'AND', 'POSPIRED', 'WITH', 'HIS', 'WORDS', 'FOR', 'IN', 'THE', 'KIND', 'WOUNDED', 'I', 'KNEW'] +2414-128291-0018-2707: ref=['THOU', 'DOEST', 'VIOLENCE', 'TO', 'THYSELF', 'THOU', 'PREACHER', 'ON', 'THE', 'MOUNT', 'WHEN', 'THOU', 'USEST', 'SUCH', 'SEVERE', 'WORDS'] +2414-128291-0018-2707: hyp=['THOU', 'DOEST', 'VIOLENCE', 'TO', 'THYSELF', 'THOU', 'PREACHER', 'ON', 'THE', 'MOUNT', 'AND', 'THOU', 'USEST', 'SUCH', 'SAVIER', 'WORDS'] +2414-128291-0019-2708: ref=['THEY', 'ALSO', 'ABSTAIN', 'FROM', 'ALL', 'HEAVY', 'THOUGHTS', 'WHICH', 'INFLATE', 'THE', 'HEART'] +2414-128291-0019-2708: hyp=['THEY', 'ALSO', 'ABSTAINED', 'FROM', 'ALL', 'HEAVY', 'TORCH', 'WHICH', 'INFLATE', 'THE', 'HEART'] +2414-128291-0020-2709: ref=['WELL'] +2414-128291-0020-2709: hyp=['WELL'] +2414-128291-0021-2710: ref=['SAID', 'ZARATHUSTRA', 'THOU', 'SHOULDST', 'ALSO', 'SEE', 'MINE', 'ANIMALS', 'MINE', 'EAGLE', 'AND', 'MY', 'SERPENT', 'THEIR', 'LIKE', 'DO', 'NOT', 'AT', 'PRESENT', 'EXIST', 'ON', 'EARTH'] +2414-128291-0021-2710: hyp=['SAYS', 'THE', 'ACCUSTRA', 'THOU', 'SHOULDEST', 'ALSO', 'SEE', 'MY', 'ANIMALS', 'MIGHT', 'EAGLE', 'AND', 'MY', 'SERPENT', 'THEIR', 'LIKE', 'DO', 'NOT', 'AT', 'PRESENT', 'EXIST', 'ON', 'EARTH'] +2414-128291-0022-2711: ref=['AND', 'TALK', 'TO', 'MINE', 'ANIMALS', 'OF', 'THE', 'HAPPINESS', 'OF', 'ANIMALS'] +2414-128291-0022-2711: hyp=['AND', 'TALKED', 'TO', 'MINE', 'ANIMALS', 'OF', 'THE', 'HAPPINESS', 'OF', 'ANIMALS'] +2414-128291-0023-2712: ref=['NOW', 'HOWEVER', 'TAKE', 'LEAVE', 'AT', 'ONCE', 'OF', 'THY', 'KINE', 'THOU', 'STRANGE', 'ONE'] +2414-128291-0023-2712: hyp=['NOW', 'HOWEVER', 'TAKE', 'LEAVE', 'AT', 'ONCE', 'OF', 'THEIR', 'KIND', 'THOU', 'STRANGE', 'ONE'] +2414-128291-0024-2713: ref=['THOU', 'AMIABLE', 'ONE'] +2414-128291-0024-2713: hyp=['THOU', 'ADMIABLE', 'ONE'] +2414-128291-0025-2714: ref=['FOR', 'THEY', 'ARE', 'THY', 'WARMEST', 'FRIENDS', 'AND', 'PRECEPTORS'] +2414-128291-0025-2714: hyp=['FOR', 'THEY', 'ARE', 'THY', 'WARMEST', 'FRIENDS', 'AND', 'PERCEPTIVES'] +2414-128291-0026-2715: ref=['THOU', 'EVIL', 'FLATTERER'] +2414-128291-0026-2715: hyp=['THOU', 'EVE', 'IS', 'SLATTERER'] +2414-128292-0000-2618: ref=['WHITHER', 'HATH', 'MY', 'LONESOMENESS', 'GONE', 'SPAKE', 'HE'] +2414-128292-0000-2618: hyp=['WHITHER', 'HAD', 'MY', 'LONESOME', 'DISCOUR', 'SPAKE', 'HE'] +2414-128292-0001-2619: ref=['MY', 'SHADOW', 'CALLETH', 'ME'] +2414-128292-0001-2619: hyp=['MY', 'SHADOW', 'CALLETH', 'ME'] +2414-128292-0002-2620: ref=['WHAT', 'MATTER', 'ABOUT', 'MY', 'SHADOW'] +2414-128292-0002-2620: hyp=['WHAT', 'MATTER', 'ABOUT', 'MY', 'SHADOW'] +2414-128292-0003-2621: ref=['LET', 'IT', 'RUN', 'AFTER', 'ME', 'I', 'RUN', 'AWAY', 'FROM', 'IT'] +2414-128292-0003-2621: hyp=['LET', 'IT', 'RUN', 'AFTER', 'ME', 'I', 'RAN', 'AWAY', 'FROM', 'IT'] +2414-128292-0004-2622: ref=['THUS', 'SPAKE', 'ZARATHUSTRA', 'TO', 'HIS', 'HEART', 'AND', 'RAN', 'AWAY'] +2414-128292-0004-2622: hyp=['THE', 'SPEAKER', 'TOO', 'STRIKE', 'TO', 'HIS', 'HEART', 'AND', 'RAN', 'AWAY'] +2414-128292-0005-2623: ref=['VERILY', 'MY', 'FOLLY', 'HATH', 'GROWN', 'BIG', 'IN', 'THE', 'MOUNTAINS'] +2414-128292-0005-2623: hyp=['VERILY', 'MY', 'FOLLY', 'HATH', 'GROWN', 'BIG', 'IN', 'THE', 'MOUNTAINS'] +2414-128292-0006-2624: ref=['NOW', 'DO', 'I', 'HEAR', 'SIX', 'OLD', 'FOOLS', 'LEGS', 'RATTLING', 'BEHIND', 'ONE', 'ANOTHER'] +2414-128292-0006-2624: hyp=['NOW', 'DO', 'I', 'HEAR', 'SIX', 'OLD', 'FOOTS', 'LEGS', 'RATTLING', 'BEHIND', 'ONE', 'ANOTHER'] +2414-128292-0007-2625: ref=['BUT', 'DOTH', 'ZARATHUSTRA', 'NEED', 'TO', 'BE', 'FRIGHTENED', 'BY', 'HIS', 'SHADOW'] +2414-128292-0007-2625: hyp=['BY', 'DIRTS', 'ARTISTRA', 'NEED', 'TO', 'BE', 'FRIGHTENED', 'BY', 'A', 'SHADOW'] +2414-128292-0008-2626: ref=['ALSO', 'METHINKETH', 'THAT', 'AFTER', 'ALL', 'IT', 'HATH', 'LONGER', 'LEGS', 'THAN', 'MINE'] +2414-128292-0008-2626: hyp=['ALSO', 'METHINK', 'IT', 'THAT', 'AFTER', 'ALL', 'IT', 'HAD', 'LONGER', 'LESS', 'THAN', 'MINE'] +2414-128292-0009-2627: ref=['FOR', 'WHEN', 'ZARATHUSTRA', 'SCRUTINISED', 'HIM', 'WITH', 'HIS', 'GLANCE', 'HE', 'WAS', 'FRIGHTENED', 'AS', 'BY', 'A', 'SUDDEN', 'APPARITION', 'SO', 'SLENDER', 'SWARTHY', 'HOLLOW', 'AND', 'WORN', 'OUT', 'DID', 'THIS', 'FOLLOWER', 'APPEAR'] +2414-128292-0009-2627: hyp=['FOR', 'WHEN', 'THEIR', 'TWO', 'STREETS', 'CRIED', 'HIM', 'WITH', 'HIS', 'GLANCE', 'HE', 'WAS', 'FRIGHTENED', 'AS', 'BY', 'ASSERTED', 'APPARITION', 'SO', 'SLENDER', 'SWARTHY', 'HOLLOW', 'AND', 'WORN', 'OUT', 'WITH', 'HIS', 'FOLLOWER', 'APPEARED'] +2414-128292-0010-2628: ref=['ASKED', 'ZARATHUSTRA', 'VEHEMENTLY', 'WHAT', 'DOEST', 'THOU', 'HERE'] +2414-128292-0010-2628: hyp=['I', 'OBJECT', 'TO', 'ESTRAVA', 'IMAGEDLY', 'WHAT', 'DO', 'WEST', 'THOU', 'HERE'] +2414-128292-0011-2629: ref=['AND', 'WHY', 'CALLEST', 'THOU', 'THYSELF', 'MY', 'SHADOW'] +2414-128292-0011-2629: hyp=['AND', 'WHY', 'CALLEST', 'THOU', 'THYSELF', 'MY', 'SHADOW'] +2414-128292-0012-2630: ref=['THOU', 'ART', 'NOT', 'PLEASING', 'UNTO', 'ME'] +2414-128292-0012-2630: hyp=['THOU', 'ART', 'NOT', 'PLEASING', 'IN', 'TO', 'ME'] +2414-128292-0013-2631: ref=['MUST', 'I', 'EVER', 'BE', 'ON', 'THE', 'WAY'] +2414-128292-0013-2631: hyp=['MUST', 'I', 'EVER', 'BE', 'ON', 'THE', 'WAY'] +2414-128292-0014-2632: ref=['O', 'EARTH', 'THOU', 'HAST', 'BECOME', 'TOO', 'ROUND', 'FOR', 'ME'] +2414-128292-0014-2632: hyp=['O', 'ART', 'THOU', 'HAST', 'BECOME', 'TO', 'ROUND', 'FOR', 'ME'] +2414-128292-0015-2633: ref=['WHEN', 'THE', 'DEVIL', 'CASTETH', 'HIS', 'SKIN', 'DOTH', 'NOT', 'HIS', 'NAME', 'ALSO', 'FALL', 'AWAY', 'IT', 'IS', 'ALSO', 'SKIN'] +2414-128292-0015-2633: hyp=['WITH', 'A', 'DEVIL', 'CAST', 'AT', 'HIS', 'SKIN', 'DOTH', 'NOT', 'HIS', 'NAME', 'ALSO', 'FALL', 'AWAY', 'IT', 'IS', 'ALSO', 'SKINNED'] +2414-128292-0016-2634: ref=['THE', 'DEVIL', 'HIMSELF', 'IS', 'PERHAPS', 'SKIN'] +2414-128292-0016-2634: hyp=['THE', 'DEVIL', 'HIMSELF', 'IS', 'PERHAPS', 'KIN'] +2414-128292-0017-2635: ref=['SOMETIMES', 'I', 'MEANT', 'TO', 'LIE', 'AND', 'BEHOLD'] +2414-128292-0017-2635: hyp=['SOMETIMES', 'I', 'MEANT', 'TO', 'LIE', 'AND', 'BEHOLD'] +2414-128292-0018-2636: ref=['THEN', 'ONLY', 'DID', 'I', 'HIT', 'THE', 'TRUTH'] +2414-128292-0018-2636: hyp=['THEN', 'ALLEY', 'DID', 'I', 'HATE', 'THE', 'TRUTH'] +2414-128292-0019-2637: ref=['HOW', 'HAVE', 'I', 'STILL', 'INCLINATION'] +2414-128292-0019-2637: hyp=['HOW', 'HAVE', 'I', 'STILL', 'INCLINATIONS'] +2414-128292-0020-2638: ref=['HAVE', 'I', 'STILL', 'A', 'GOAL'] +2414-128292-0020-2638: hyp=['ERE', 'I', 'STILL', 'A', 'GOLD'] +2414-128292-0021-2639: ref=['A', 'HAVEN', 'TOWARDS', 'WHICH', 'MY', 'SAIL', 'IS', 'SET'] +2414-128292-0021-2639: hyp=['A', 'HEROD', 'DOORS', 'WHICH', 'MY', 'SAILORS', 'SAID'] +2414-128292-0022-2640: ref=['FOR', 'IT', 'DO', 'I', 'ASK', 'AND', 'SEEK', 'AND', 'HAVE', 'SOUGHT', 'BUT', 'HAVE', 'NOT', 'FOUND', 'IT'] +2414-128292-0022-2640: hyp=['FOR', 'IT', 'TOO', 'I', 'ASK', 'AND', 'SEEK', 'AND', 'HATH', 'THOUGHT', 'IT', 'HAVE', 'NOT', 'FOUND', 'IT'] +2414-128292-0023-2641: ref=['O', 'ETERNAL', 'EVERYWHERE', 'O', 'ETERNAL', 'NOWHERE', 'O', 'ETERNAL', 'IN', 'VAIN'] +2414-128292-0023-2641: hyp=['OR', 'ETERNAL', 'EVERYWHERE', 'WHO', 'HAD', 'TURNED', 'OUT', 'NOWHERE', 'WHO', 'HAD', 'TURNED', 'IN', 'VAIN'] +2414-128292-0024-2642: ref=['THOU', 'ART', 'MY', 'SHADOW'] +2414-128292-0024-2642: hyp=['THOU', 'ART', 'MY', 'SHADOW'] +2414-128292-0025-2643: ref=['SAID', 'HE', 'AT', 'LAST', 'SADLY'] +2414-128292-0025-2643: hyp=['SAID', 'HE', 'AT', 'LAST', 'SADLY'] +2414-128292-0026-2644: ref=['THY', 'DANGER', 'IS', 'NOT', 'SMALL', 'THOU', 'FREE', 'SPIRIT', 'AND', 'WANDERER'] +2414-128292-0026-2644: hyp=['THY', 'DANGER', 'IS', 'NOT', 'SMALL', 'THOU', 'FREE', 'SPIRIT', 'AND', 'WONDER'] +2414-128292-0027-2645: ref=['THEY', 'SLEEP', 'QUIETLY', 'THEY', 'ENJOY', 'THEIR', 'NEW', 'SECURITY'] +2414-128292-0027-2645: hyp=['THE', 'SLEEP', 'QUIETLY', 'THEY', 'ENJOYED', 'THEIR', 'NEW', 'SECURITY'] +2414-128292-0028-2646: ref=['BEWARE', 'LEST', 'IN', 'THE', 'END', 'A', 'NARROW', 'FAITH', 'CAPTURE', 'THEE', 'A', 'HARD', 'RIGOROUS', 'DELUSION'] +2414-128292-0028-2646: hyp=['BE', 'REALIZED', 'IN', 'THE', 'AID', 'A', 'NARROW', 'FATE', 'CAPTURED', 'THE', 'A', 'HARD', 'RECKLESS', 'VOLUTION'] +2414-128292-0029-2647: ref=['FOR', 'NOW', 'EVERYTHING', 'THAT', 'IS', 'NARROW', 'AND', 'FIXED', 'SEDUCETH', 'AND', 'TEMPTETH', 'THEE'] +2414-128292-0029-2647: hyp=['FOR', 'NOW', 'EVERYTHING', 'THAT', 'IS', 'NARROW', 'AND', 'FIXED', 'SEDUCE', 'IT', 'AND', 'TEMPT', 'IT', 'THEE'] +2414-128292-0030-2648: ref=['THOU', 'HAST', 'LOST', 'THY', 'GOAL'] +2414-128292-0030-2648: hyp=['THOU', 'HAST', 'LOST', 'DANGLE'] +2414-128292-0031-2649: ref=['THOU', 'POOR', 'ROVER', 'AND', 'RAMBLER', 'THOU', 'TIRED', 'BUTTERFLY'] +2414-128292-0031-2649: hyp=['THOUGH', 'POOR', 'ROVER', 'AND', 'RAMBLER', 'NOW', 'TIRED', 'BUT', 'TO', 'FLY'] +2414-128292-0032-2650: ref=['WILT', 'THOU', 'HAVE', 'A', 'REST', 'AND', 'A', 'HOME', 'THIS', 'EVENING'] +2414-128292-0032-2650: hyp=['WILT', 'THOU', 'HAVE', 'ARREST', 'AND', 'A', 'HOME', 'THIS', 'EVENING'] +2414-159411-0000-2653: ref=['ONCE', 'UPON', 'A', 'TIME', 'A', 'BRAHMAN', 'WHO', 'WAS', 'WALKING', 'ALONG', 'THE', 'ROAD', 'CAME', 'UPON', 'AN', 'IRON', 'CAGE', 'IN', 'WHICH', 'A', 'GREAT', 'TIGER', 'HAD', 'BEEN', 'SHUT', 'UP', 'BY', 'THE', 'VILLAGERS', 'WHO', 'CAUGHT', 'HIM'] +2414-159411-0000-2653: hyp=['ONCE', 'UPON', 'HER', 'TIME', 'A', 'BRAHMAN', 'WHO', 'WAS', 'WALKING', 'ALONG', 'THE', 'ROAD', 'CAME', 'UPON', 'AN', 'IRON', 'CAGE', 'IN', 'WHICH', 'A', 'GREAT', 'TIGER', 'ADMIRED', 'UP', 'BY', 'THE', 'VILLAGES', 'WHO', 'CAUGHT', 'HIM'] +2414-159411-0001-2654: ref=['THE', 'BRAHMAN', 'ANSWERED', 'NO', 'I', 'WILL', 'NOT', 'FOR', 'IF', 'I', 'LET', 'YOU', 'OUT', 'OF', 'THE', 'CAGE', 'YOU', 'WILL', 'EAT', 'ME'] +2414-159411-0001-2654: hyp=['THE', 'BRAMIN', 'ANSWERED', 'NO', 'I', 'WILL', 'NOT', 'FOR', 'IF', 'I', 'LET', 'YOU', 'OUT', 'OF', 'THE', 'CAGE', 'YOU', 'WILL', 'EAT', 'ME'] +2414-159411-0002-2655: ref=['OH', 'FATHER', 'OF', 'MERCY', 'ANSWERED', 'THE', 'TIGER', 'IN', 'TRUTH', 'THAT', 'I', 'WILL', 'NOT'] +2414-159411-0002-2655: hyp=['ALL', 'FATHER', 'OF', 'MERCY', 'ANSWERED', 'THE', 'TIGER', 'IN', 'TRUTH', 'THAT', 'I', 'WILL', 'NOT'] +2414-159411-0003-2656: ref=['I', 'WILL', 'NEVER', 'BE', 'SO', 'UNGRATEFUL', 'ONLY', 'LET', 'ME', 'OUT', 'THAT', 'I', 'MAY', 'DRINK', 'SOME', 'WATER', 'AND', 'RETURN'] +2414-159411-0003-2656: hyp=['I', 'WILL', 'NEVER', 'BE', 'SO', 'UNGRATEFUL', 'ONLY', 'LET', 'ME', 'OUT', 'THAT', 'I', 'MAY', 'DRINK', 'SOME', 'WATER', 'AND', 'RETURN'] +2414-159411-0004-2657: ref=['THEN', 'THE', 'BRAHMAN', 'TOOK', 'PITY', 'ON', 'HIM', 'AND', 'OPENED', 'THE', 'CAGE', 'DOOR', 'BUT', 'NO', 'SOONER', 'HAD', 'HE', 'DONE', 'SO', 'THAN', 'THE', 'TIGER', 'JUMPING', 'OUT', 'SAID', 'NOW', 'I', 'WILL', 'EAT', 'YOU', 'FIRST', 'AND', 'DRINK', 'THE', 'WATER', 'AFTERWARDS'] +2414-159411-0004-2657: hyp=['AND', 'IN', 'THE', 'BRAMMING', 'TOOK', 'PITY', 'ON', 'HIM', 'AND', 'OPENED', 'THE', 'CAGE', 'DOOR', 'BUT', 'NO', 'SOONER', 'HAD', 'HE', 'TURNED', 'SO', 'THAN', 'THE', 'TIGER', 'JUMPING', 'OUT', 'SAID', 'NOW', 'I', 'WILL', 'EAT', 'YOU', 'FIRST', 'AND', 'DRINK', 'THE', 'WATER', 'AFTERWARDS'] +2414-159411-0005-2658: ref=['SO', 'THE', 'BRAHMAN', 'AND', 'THE', 'TIGER', 'WALKED', 'ON', 'TILL', 'THEY', 'CAME', 'TO', 'A', 'BANYAN', 'TREE', 'AND', 'THE', 'BRAHMAN', 'SAID', 'TO', 'IT', 'BANYAN', 'TREE', 'BANYAN', 'TREE', 'HEAR', 'AND', 'GIVE', 'JUDGMENT'] +2414-159411-0005-2658: hyp=['SO', 'THE', 'BRAMID', 'AND', 'THE', 'TIGER', 'WALKED', 'ON', 'TILL', 'THEY', 'CAME', 'TO', 'A', 'BENDONED', 'TREE', 'AND', 'THE', 'BRAMMEN', 'SAID', 'TO', 'IT', 'BANNY', 'TREE', 'BANDREE', 'HERE', 'AND', 'GIVE', 'GERMAN'] +2414-159411-0006-2659: ref=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'BANYAN', 'TREE'] +2414-159411-0006-2659: hyp=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'BEN', 'TREE'] +2414-159411-0007-2660: ref=['THIS', 'TIGER', 'SAID', 'THE', 'BRAHMAN', 'BEGGED', 'ME', 'TO', 'LET', 'HIM', 'OUT', 'OF', 'HIS', 'CAGE', 'TO', 'DRINK', 'A', 'LITTLE', 'WATER', 'AND', 'HE', 'PROMISED', 'NOT', 'TO', 'HURT', 'ME', 'IF', 'I', 'DID', 'SO', 'BUT', 'NOW', 'THAT', 'I', 'HAVE', 'LET', 'HIM', 'OUT', 'HE', 'WISHES', 'TO', 'EAT', 'ME'] +2414-159411-0007-2660: hyp=['THE', 'STAGER', 'SAID', 'THE', 'BRAMIN', 'BEG', 'ME', 'TO', 'LET', 'HIM', 'OUT', 'OF', 'HIS', 'CAGE', 'TO', 'DRINK', 'A', 'LITTLE', 'WATER', 'AND', 'HE', 'PROMISED', 'NOT', 'TO', 'HURT', 'ME', 'IF', 'I', 'DID', 'SO', 'BUT', 'NOW', 'THAT', 'I', 'HAVE', 'LEFT', 'HIM', 'OUT', 'HE', 'WISHES', 'TO', 'EAT', 'ME'] +2414-159411-0008-2661: ref=['IS', 'IT', 'JUST', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NO'] +2414-159411-0008-2661: hyp=["IT'S", 'A', 'JEALOUS', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NO'] +2414-159411-0009-2662: ref=['LET', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'ARE', 'AN', 'UNGRATEFUL', 'RACE'] +2414-159411-0009-2662: hyp=['LAID', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'ARE', 'AN', 'UNGRATEFUL', 'RACE'] +2414-159411-0010-2663: ref=['SIR', 'CAMEL', 'SIR', 'CAMEL', 'CRIED', 'THE', 'BRAHMAN', 'HEAR', 'AND', 'GIVE', 'JUDGMENT'] +2414-159411-0010-2663: hyp=['SIR', 'CAMELO', 'SIR', 'CAMEO', 'CRIED', 'THE', 'BRAHMAN', 'HERE', 'AND', 'GIVE', 'JAGIMENT'] +2414-159411-0011-2664: ref=['AT', 'A', 'LITTLE', 'DISTANCE', 'THEY', 'FOUND', 'A', 'BULLOCK', 'LYING', 'BY', 'THE', 'ROADSIDE'] +2414-159411-0011-2664: hyp=['AT', 'A', 'LITTLE', 'DISTANCE', 'THEY', 'FOUND', 'A', 'BULLOCK', 'LYING', 'BY', 'THE', "ROAD'S", 'HEAD'] +2414-159411-0012-2665: ref=['IS', 'IT', 'FAIR', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NOT'] +2414-159411-0012-2665: hyp=['IS', 'IT', 'FAIR', 'THAT', 'HE', 'SHOULD', 'DO', 'SO', 'OR', 'NOT'] +2414-159411-0013-2666: ref=['LET', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'HAVE', 'NO', 'PITY'] +2414-159411-0013-2666: hyp=['LET', 'THE', 'TIGER', 'EAT', 'THE', 'MAN', 'FOR', 'MEN', 'HAVE', 'NO', 'PITY'] +2414-159411-0014-2667: ref=['THREE', 'OUT', 'OF', 'THE', 'SIX', 'HAD', 'GIVEN', 'JUDGMENT', 'AGAINST', 'THE', 'BRAHMAN', 'BUT', 'STILL', 'HE', 'DID', 'NOT', 'LOSE', 'ALL', 'HOPE', 'AND', 'DETERMINED', 'TO', 'ASK', 'THE', 'OTHER', 'THREE'] +2414-159411-0014-2667: hyp=['THREE', 'OUT', 'OF', 'THE', 'SIX', 'AND', 'GIVEN', 'JUDGMENT', 'AGAINST', 'THE', 'BRAHMAN', 'WAS', 'STILL', 'HE', 'DID', 'NOT', 'LOSE', 'ALL', 'HOPE', 'AND', 'TO', 'TURN', 'MIND', 'TO', 'ASK', 'THE', 'OTHER', 'THREE'] +2414-159411-0015-2668: ref=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JUDGMENT', 'ASKED', 'THE', 'EAGLE'] +2414-159411-0015-2668: hyp=['ON', 'WHAT', 'MUST', 'I', 'GIVE', 'JULIET', 'ASKED', 'THE', 'EAGLE'] +2414-159411-0016-2669: ref=['THE', 'BRAHMAN', 'STATED', 'THE', 'CASE', 'AND', 'THE', 'EAGLE', 'ANSWERED', 'WHENEVER', 'MEN', 'SEE', 'ME', 'THEY', 'TRY', 'TO', 'SHOOT', 'ME', 'THEY', 'CLIMB', 'THE', 'ROCKS', 'AND', 'STEAL', 'AWAY', 'MY', 'LITTLE', 'ONES'] +2414-159411-0016-2669: hyp=['THE', 'BRAHMAN', 'SUITED', 'THE', 'CASE', 'AND', 'THE', 'EAGLE', 'ANSWERED', 'WHENEVER', 'MEN', 'SEE', 'ME', 'THEY', 'TRY', 'TO', 'SHOOT', 'ME', 'DECLINED', 'THE', 'ROCKS', 'AND', 'STEED', 'AWAY', 'MY', 'LITTLE', 'ONES'] +2414-159411-0017-2670: ref=['THEN', 'THE', 'TIGER', 'BEGAN', 'TO', 'ROAR', 'AND', 'SAID', 'THE', 'JUDGMENT', 'OF', 'ALL', 'IS', 'AGAINST', 'YOU', 'O', 'BRAHMAN'] +2414-159411-0017-2670: hyp=['THEN', 'THE', 'TIGER', 'BEGAN', 'TO', 'ROAR', 'AND', 'SAID', 'JUDGMENT', 'OF', 'ALL', 'IS', 'AGAINST', 'YOU', 'O', 'BRAHMIN'] +2414-159411-0018-2671: ref=['AFTER', 'THIS', 'THEY', 'SAW', 'AN', 'ALLIGATOR', 'AND', 'THE', 'BRAHMAN', 'RELATED', 'THE', 'MATTER', 'TO', 'HIM', 'HOPING', 'FOR', 'A', 'MORE', 'FAVORABLE', 'VERDICT'] +2414-159411-0018-2671: hyp=['AFTER', 'THIS', 'THEY', 'SAW', 'AN', 'ALLIGATOR', 'AND', 'THE', 'BRAMA', 'RELATED', 'THE', 'MATTER', 'TO', 'HIM', 'HOPING', 'FOR', 'A', 'MORE', 'FAVOURABLE', 'VERDICT'] +2414-159411-0019-2672: ref=['BUT', 'THE', 'ALLIGATOR', 'SAID', 'WHENEVER', 'I', 'PUT', 'MY', 'NOSE', 'OUT', 'OF', 'THE', 'WATER', 'MEN', 'TORMENT', 'ME', 'AND', 'TRY', 'TO', 'KILL', 'ME'] +2414-159411-0019-2672: hyp=['WITH', 'THE', 'ADDER', 'TO', 'SIT', 'WHENEVER', 'I', 'PUT', 'MY', 'NOSE', 'OUT', 'OF', 'THE', 'WATER', 'MEANTIME', 'AND', 'ME', 'AND', 'TRIED', 'TO', 'KILL', 'ME'] +2414-159411-0020-2673: ref=['THE', 'BRAHMAN', 'GAVE', 'HIMSELF', 'UP', 'AS', 'LOST', 'BUT', 'AGAIN', 'HE', 'PRAYED', 'THE', 'TIGER', 'TO', 'HAVE', 'PATIENCE', 'AND', 'LET', 'HIM', 'ASK', 'THE', 'OPINION', 'OF', 'THE', 'SIXTH', 'JUDGE'] +2414-159411-0020-2673: hyp=['NO', 'BROWN', 'MEN', 'GAVE', 'HIMSELF', 'UP', 'AS', 'LOST', 'BUT', 'AGAIN', 'HE', 'PRAYED', 'THE', 'TIGER', 'TO', 'HAVE', 'PATIENCE', 'AND', 'LET', 'HIM', 'ASK', 'THE', 'OPINION', 'OF', 'THE', 'SIX', 'JUDGE'] +2414-159411-0021-2674: ref=['NOW', 'THE', 'SIXTH', 'WAS', 'A', 'JACKAL'] +2414-159411-0021-2674: hyp=['ON', 'THE', 'SIXTH', 'WAS', 'A', 'JACKAL'] +2414-159411-0022-2675: ref=['THE', 'BRAHMAN', 'TOLD', 'HIS', 'STORY', 'AND', 'SAID', 'TO', 'HIM', 'UNCLE', 'JACKAL', 'UNCLE', 'JACKAL', 'SAY', 'WHAT', 'IS', 'YOUR', 'JUDGMENT'] +2414-159411-0022-2675: hyp=['THE', 'GRANDMOTHER', 'TOLD', 'HIS', 'STORY', 'AND', 'SAID', 'TO', 'HIM', 'UNCLE', 'JACKAL', 'AND', 'WILL', 'JACKAL', 'SAY', 'WHAT', 'IS', 'YOUR', 'JUDGMENT'] +2414-159411-0023-2676: ref=['SHOW', 'ME', 'THE', 'PLACE'] +2414-159411-0023-2676: hyp=['SHOW', 'ME', 'THE', 'PACE'] +2414-159411-0024-2677: ref=['WHEN', 'THEY', 'GOT', 'THERE', 'THE', 'JACKAL', 'SAID', 'NOW', 'BRAHMAN', 'SHOW', 'ME', 'EXACTLY', 'WHERE', 'YOU', 'STOOD'] +2414-159411-0024-2677: hyp=['AND', 'THE', 'COURT', 'DEER', 'THE', 'JACKAL', 'SAID', 'NABRAMIN', 'SHOW', 'ME', 'EXACTLY', 'WHERE', 'YOU', 'STOOD'] +2414-159411-0025-2678: ref=['EXACTLY', 'THERE', 'WAS', 'IT', 'ASKED', 'THE', 'JACKAL'] +2414-159411-0025-2678: hyp=['EXACTLY', 'THERE', 'WAS', 'IT', 'ASKED', 'THE', 'JACK', 'WHO'] +2414-159411-0026-2679: ref=['EXACTLY', 'HERE', 'REPLIED', 'THE', 'BRAHMAN'] +2414-159411-0026-2679: hyp=['EXACTLY', 'HERE', 'REPLIED', 'THE', 'PROMIN'] +2414-159411-0027-2680: ref=['WHERE', 'WAS', 'THE', 'TIGER', 'THEN'] +2414-159411-0027-2680: hyp=['THERE', 'WAS', 'THE', 'TIGER', 'THEN'] +2414-159411-0028-2681: ref=['WHY', 'I', 'STOOD', 'SO', 'SAID', 'THE', 'TIGER', 'JUMPING', 'INTO', 'THE', 'CAGE', 'AND', 'MY', 'HEAD', 'WAS', 'ON', 'THIS', 'SIDE'] +2414-159411-0028-2681: hyp=['WHY', 'I', 'STOOD', 'SO', 'SAID', 'THE', 'TIGER', 'JUMPING', 'INTO', 'THE', 'CAGE', 'AND', 'MY', 'HEAD', 'WAS', 'ON', 'THIS', 'SIDE'] +2414-159411-0029-2682: ref=['VERY', 'GOOD', 'SAID', 'THE', 'JACKAL', 'BUT', 'I', 'CANNOT', 'JUDGE', 'WITHOUT', 'UNDERSTANDING', 'THE', 'WHOLE', 'MATTER', 'EXACTLY'] +2414-159411-0029-2682: hyp=['VERY', 'GOOD', 'SAID', 'TO', 'JACK', 'WHO', 'BUT', 'I', 'CANNOT', 'JUDGE', 'WITHOUT', 'UNDERSTANDING', 'THE', 'WHOLE', 'MATTER', 'EXACTLY'] +2414-159411-0030-2683: ref=['SHUT', 'AND', 'BOLTED', 'SAID', 'THE', 'BRAHMAN'] +2414-159411-0030-2683: hyp=['SHUT', 'AND', 'BOLTED', 'SAID', 'DE', 'BRAMIN'] +2414-159411-0031-2684: ref=['THEN', 'SHUT', 'AND', 'BOLT', 'IT', 'SAID', 'THE', 'JACKAL'] +2414-159411-0031-2684: hyp=['THEN', 'SHET', 'AND', 'BOLTED', 'SAID', 'TO', 'JACKAL'] +2414-159411-0032-2685: ref=['WHEN', 'THE', 'BRAHMAN', 'HAD', 'DONE', 'THIS', 'THE', 'JACKAL', 'SAID', 'OH', 'YOU', 'WICKED', 'AND', 'UNGRATEFUL', 'TIGER'] +2414-159411-0032-2685: hyp=['WHEN', 'THE', 'BRAHMAN', 'HAD', 'TURNED', 'THIS', 'THE', 'JACKAL', 'SAID', 'OH', 'YOU', 'WICKED', 'AND', 'UNGRATEFUL', 'TIRE'] +2414-159411-0033-2686: ref=['WHEN', 'THE', 'GOOD', 'BRAHMAN', 'OPENED', 'YOUR', 'CAGE', 'DOOR', 'IS', 'TO', 'EAT', 'HIM', 'THE', 'ONLY', 'RETURN', 'YOU', 'WOULD', 'MAKE'] +2414-159411-0033-2686: hyp=['WITH', 'A', 'GOOD', 'BRAMIN', 'OPENED', 'YOU', 'CARED', 'TO', 'HER', 'IS', 'TO', 'EAT', 'HIM', 'THE', 'ONLY', 'RETURN', 'YOU', 'WOULD', 'MAKE'] +2414-159411-0034-2687: ref=['PROCEED', 'ON', 'YOUR', 'JOURNEY', 'FRIEND', 'BRAHMAN'] +2414-159411-0034-2687: hyp=['PROCEED', 'ON', 'YOUR', 'JOURNEY', 'FRIEND', 'DRAMIN'] +2414-159411-0035-2688: ref=['YOUR', 'ROAD', 'LIES', 'THAT', 'WAY', 'AND', 'MINE', 'THIS'] +2414-159411-0035-2688: hyp=['HE', 'RULED', 'LIVES', 'THAT', 'WAY', 'IN', 'MIND', 'THIS'] +2414-165385-0000-2651: ref=['THUS', 'ACCOMPLISHED', 'HE', 'EXCITED', 'THE', 'ADMIRATION', 'OF', 'EVERY', 'SILLY', 'COQUETTE', 'AND', 'THE', 'ENVY', 'OF', 'EVERY', 'FLUTTERING', 'COXCOMB', 'BUT', 'BY', 'ALL', 'YOUNG', 'GENTLEMEN', 'AND', 'LADIES', 'OF', 'UNDERSTANDING', 'HE', 'WAS', 'HEARTILY', 'DESPISED', 'AS', 'A', 'MERE', 'CIVILIZED', 'MONKEY'] +2414-165385-0000-2651: hyp=['THUS', 'ACCOMPLISHED', 'HE', 'EXCITED', 'ADMIRATION', 'OF', 'EVERY', 'SILLY', 'COCKET', 'AND', 'THE', 'ENVY', 'OF', 'EVERY', 'FACTIVE', 'ACCOUNT', 'BUT', 'BY', 'ALL', 'YOUNG', 'GENTLEMEN', 'AND', 'LADIES', 'OF', 'UNDERSTANDING', 'HE', 'WAS', 'HEARTILY', 'DESPISED', 'AS', 'A', 'MERE', 'CIVILIZED', 'MONKEY'] +2414-165385-0001-2652: ref=['THAT', 'HIS', 'SOUL', 'MIGHT', 'AFTERWARDS', 'OCCUPY', 'SUCH', 'A', 'STATION', 'AS', 'WOULD', 'BE', 'MOST', 'SUITABLE', 'TO', 'HIS', 'CHARACTER', 'IT', 'WAS', 'SENTENCED', 'TO', 'INHABIT', 'THE', 'BODY', 'OF', 'THAT', 'FINICAL', 'GRINNING', 'AND', 'MISCHIEVOUS', 'LITTLE', 'MIMICK', 'WITH', 'FOUR', 'LEGS', 'WHICH', 'YOU', 'NOW', 'BEHOLD', 'BEFORE', 'YOU'] +2414-165385-0001-2652: hyp=['AND', 'THAT', 'HIS', 'SOUL', 'MIGHT', 'AFTERWARDS', 'OCCUPY', 'SUCH', 'A', 'STATION', 'AS', 'WOULD', 'BE', 'MOST', 'SUITABLE', 'TO', 'HIS', 'CHARACTER', 'IT', 'WAS', 'INTENSE', 'TO', 'INHABIT', 'A', 'BODY', 'OF', 'THAT', 'FINNICAL', 'GRINNING', 'AND', 'MISCHIEVOUS', 'LITTLE', 'MIMIC', 'WITH', 'FULL', 'LEGS', 'WHICH', 'YOU', 'NOW', 'BEHOLD', 'BEFORE', 'YOU'] +2609-156975-0000-2367: ref=['THEN', 'MOSES', 'WAS', 'AFRAID', 'AND', 'SAID', 'SURELY', 'THE', 'THING', 'IS', 'KNOWN'] +2609-156975-0000-2367: hyp=['THEN', 'MOSES', 'WAS', 'AFRAID', 'AND', 'SAID', 'SURELY', 'THE', 'THING', 'IS', 'KNOWN'] +2609-156975-0001-2368: ref=['HOLD', 'ON', 'HOLD', 'FAST', 'HOLD', 'OUT', 'PATIENCE', 'IS', 'GENIUS'] +2609-156975-0001-2368: hyp=['ERON', 'HER', 'FAST', 'HOTEL', 'PATIENCE', 'IS', 'GENIUS'] +2609-156975-0002-2369: ref=['LET', 'US', 'HAVE', 'FAITH', 'THAT', 'RIGHT', 'MAKES', 'MIGHT', 'AND', 'IN', 'THAT', 'FAITH', 'LET', 'US', 'DARE', 'TO', 'DO', 'OUR', 'DUTY', 'AS', 'WE', 'UNDERSTAND', 'IT', 'LINCOLN'] +2609-156975-0002-2369: hyp=['LET', 'US', 'HAVE', 'FAITH', 'THAT', 'RIGHT', 'MATRON', 'MIGHT', 'AND', 'IN', 'THAT', 'FAITH', 'THAT', 'STARED', 'TO', 'DO', 'OUR', 'DUTY', 'AS', 'WE', 'UNDERSTAND', 'IT', 'LINCOLN'] +2609-156975-0003-2370: ref=['THE', 'EGYPTIAN', 'BACKGROUND', 'OF', 'THE', 'BONDAGE'] +2609-156975-0003-2370: hyp=['THE', 'EGYPTIAN', 'BACKGROUND', 'OF', 'THE', 'BONDAGE'] +2609-156975-0004-2371: ref=['EVERY', 'ONE', 'WHO', 'IS', 'TURBULENT', 'HAS', 'BEEN', 'FOUND', 'BY', 'KING', 'MERNEPTAH', 'THE', 'TESTIMONY', 'OF', 'THE', 'OLDEST', 'BIBLICAL', 'NARRATIVES', 'REGARDING', 'THE', 'SOJOURN', 'OF', 'THE', 'HEBREWS', 'IN', 'EGYPT', 'IS', 'ALSO', 'IN', 'PERFECT', 'ACCORD', 'WITH', 'THE', 'PICTURE', 'WHICH', 'THE', 'CONTEMPORARY', 'EGYPTIAN', 'INSCRIPTIONS', 'GIVE', 'OF', 'THE', 'PERIOD'] +2609-156975-0004-2371: hyp=['EVERY', 'ONE', 'WHOSE', 'TURBOT', 'HAS', 'BEEN', 'FOUND', 'BY', 'KING', 'MARNETTE', 'PATH', 'DETACHEMONY', 'AS', 'THE', 'OLDEST', 'BAPLICO', 'NARRATIVE', 'REGARDING', 'THE', 'SOJOURN', 'OF', 'THE', 'HEBREWS', 'IN', 'EGYPT', 'IS', 'ALSO', 'IN', 'PERFECT', 'ACCORD', 'WITH', 'THE', 'PICTURE', 'WHICH', 'A', 'CONTEMPORARY', 'EGYPTIAN', 'SCRIPTIONS', 'GIVE', 'THE', 'PERIOD'] +2609-156975-0005-2372: ref=['THE', 'ABSENCE', 'OF', 'DETAILED', 'REFERENCE', 'TO', 'THE', 'HEBREWS', 'IS', 'THEREFORE', 'PERFECTLY', 'NATURAL'] +2609-156975-0005-2372: hyp=['THE', 'ABSENCE', 'OF', 'THE', 'DETAILED', 'REFERENCES', 'THE', 'HEBREWS', 'IS', 'THEREFORE', 'PERFECTLY', 'NATURAL'] +2609-156975-0006-2373: ref=['IT', 'SEEMS', 'PROBABLE', 'THAT', 'NOT', 'ALL', 'BUT', 'ONLY', 'PART', 'OF', 'THE', 'TRIBES', 'WHICH', 'ULTIMATELY', 'COALESCED', 'INTO', 'THE', 'HEBREW', 'NATION', 'FOUND', 'THEIR', 'WAY', 'TO', 'EGYPT'] +2609-156975-0006-2373: hyp=['IT', 'SEEMS', 'PROBABLE', 'THAT', 'NOT', 'ALL', 'BUT', 'ONLY', 'PART', 'IN', 'THE', 'TRIBES', 'WHICH', 'ULTIMATE', 'COVETTES', 'INTO', 'THE', 'HEBREW', 'NATION', 'FOUND', 'THEIR', 'WAY', 'TO', 'EGYPT'] +2609-156975-0007-2374: ref=['THE', 'STORIES', 'REGARDING', 'JOSEPH', 'THE', 'TRADITIONAL', 'FATHER', 'OF', 'EPHRAIM', 'AND', 'MANASSEH', 'IMPLY', 'THAT', 'THESE', 'STRONG', 'CENTRAL', 'TRIBES', 'POSSIBLY', 'TOGETHER', 'WITH', 'THE', 'SOUTHERN', 'TRIBES', 'OF', 'BENJAMIN', 'AND', 'JUDAH', 'WERE', 'THE', 'CHIEF', 'ACTORS', 'IN', 'THIS', 'OPENING', 'SCENE', 'IN', "ISRAEL'S", 'HISTORY'] +2609-156975-0007-2374: hyp=['THE', 'STORIES', 'REGARDING', 'JOSEPH', 'THEIR', 'TRADITIONAL', 'FOUND', 'THEIR', 'ATRONE', 'AND', 'MANOT', 'SAY', 'INCLINE', 'THAT', 'THESE', 'STRONG', 'CENTRAL', 'TRIBES', 'POSSIBLY', 'TOGETHER', 'WITH', 'THE', 'SOUTHERN', 'TRINES', 'OF', 'BINTAMEN', 'AND', 'JUDAH', 'WHERE', 'THE', 'CHIEF', 'ACTORS', 'WHO', 'THAT', 'SOMETHING', 'SCENE', 'IN', "ISRAEL'S", 'HISTORY'] +2609-156975-0008-2375: ref=['THE', 'BIBLICAL', 'NARRATIVES', 'APPARENTLY', 'DISAGREE', 'REGARDING', 'THE', 'DURATION', 'OF', 'THE', 'SOJOURN', 'IN', 'EGYPT'] +2609-156975-0008-2375: hyp=['THE', 'BEVOCO', 'NARRATIVES', 'APPARENTLY', 'DISAGRATING', 'GUARDING', 'THE', 'DURATION', 'OF', 'THE', 'SAJOURN', 'IN', 'EGYPT'] +2609-156975-0009-2376: ref=['THE', 'LATER', 'TRADITIONS', 'TEND', 'TO', 'EXTEND', 'THE', 'PERIOD'] +2609-156975-0009-2376: hyp=['THE', 'LATER', 'JUDICINES', 'INTERESTING', 'THE', 'PERIOD'] +2609-156975-0010-2377: ref=['HERE', 'WERE', 'FOUND', 'SEVERAL', 'INSCRIPTIONS', 'BEARING', 'THE', 'EGYPTIAN', 'NAME', 'OF', 'THE', 'CITY', 'P', 'ATUM', 'HOUSE', 'OF', 'THE', 'GOD', 'ATUM'] +2609-156975-0010-2377: hyp=['HE', 'WERE', 'FOUND', 'CHEVARIN', 'SCRIPTIONS', 'BEARING', 'THE', 'EGYPTIAN', 'NAME', 'OF', 'THE', 'CITY', 'PATUM', 'OUTSIDE', 'THE', 'GOD', 'ATOM'] +2609-156975-0011-2378: ref=['A', 'CONTEMPORARY', 'INSCRIPTION', 'ALSO', 'STATES', 'THAT', 'HE', 'FOUNDED', 'NEAR', 'PITHUM', 'THE', 'HOUSE', 'OF', 'RAMSES', 'A', 'CITY', 'WITH', 'A', 'ROYAL', 'RESIDENCE', 'AND', 'TEMPLES'] +2609-156975-0011-2378: hyp=['A', 'CONTEMPORARY', 'INSCRIPTION', 'ONCE', 'ESTATES', 'THAT', 'HE', 'FOUND', 'A', 'NEAR', 'PITTHAM', 'THE', 'HOUSE', 'OF', 'RAMESES', 'A', 'CITY', 'WITH', 'THE', 'ROYAL', 'RESIDENCE', 'AND', 'SIMPLES'] +2609-156975-0012-2379: ref=['THAT', 'THE', 'HEBREWS', 'WERE', 'RESTIVE', 'UNDER', 'THIS', 'TYRANNY', 'WAS', 'NATURAL', 'INEVITABLE'] +2609-156975-0012-2379: hyp=['THAT', 'THE', 'HEBREWS', 'WERE', 'RENTS', 'OF', 'UNDER', 'THIS', 'SOON', 'WAS', 'NATURALLY', 'INEVITABLE'] +2609-156975-0013-2380: ref=['WAS', 'ANY', 'OTHER', 'PROCEDURE', 'TO', 'BE', 'EXPECTED', 'FROM', 'A', 'DESPOTIC', 'RULER', 'OF', 'THAT', 'LAND', 'AND', 'DAY'] +2609-156975-0013-2380: hyp=['WAS', 'ANY', 'OTHER', 'PROCEDURE', 'TO', 'BE', 'SPECTRE', 'FROM', 'IT', 'THAT', 'SPONNET', 'ROAR', 'OF', 'THAT', 'LAND', 'AND', 'DAY'] +2609-156975-0014-2381: ref=['THE', 'MAKING', 'OF', 'A', 'LOYAL', 'PATRIOT'] +2609-156975-0014-2381: hyp=['THE', 'MAKING', 'OF', 'THE', 'LOYAL', 'PATRIOT'] +2609-156975-0015-2382: ref=['THE', 'STORY', 'OF', 'MOSES', 'BIRTH', 'AND', 'EARLY', 'CHILDHOOD', 'IS', 'ONE', 'OF', 'THE', 'MOST', 'INTERESTING', 'CHAPTERS', 'IN', 'BIBLICAL', 'HISTORY'] +2609-156975-0015-2382: hyp=['THE', 'STORY', 'OF', 'MOSES', 'BERTH', 'AN', 'EARLY', 'CHILDHOOD', 'IS', 'ONE', 'OF', 'THE', 'MOST', 'INTERESTING', 'CHAPTERS', 'IN', 'BEPPOCO', 'HISTORY'] +2609-156975-0016-2383: ref=['WAS', 'MOSES', 'JUSTIFIED', 'IN', 'RESISTING', 'THE', 'EGYPTIAN', 'TASKMASTER'] +2609-156975-0016-2383: hyp=['WITH', 'MOVES', "IT'S", 'JUST', 'FUN', 'AND', 'RESISTS', 'IN', 'THE', 'EGYPTIAN', 'TAX', 'MASTER'] +2609-156975-0017-2384: ref=['IS', 'PEONAGE', 'ALWAYS', 'DISASTROUS', 'NOT', 'ONLY', 'TO', 'ITS', 'VICTIMS', 'BUT', 'ALSO', 'TO', 'THE', 'GOVERNMENT', 'IMPOSING', 'IT'] +2609-156975-0017-2384: hyp=['HIS', 'PINIONS', 'ALWAYS', 'DISASTRATES', 'NOT', 'OWING', 'TO', 'ITS', 'VICTIMS', 'BUT', 'ALSO', 'TO', 'THE', 'GOVERNMENT', 'IMPOSING', 'IT'] +2609-156975-0018-2385: ref=['NATURALLY', 'HE', 'WENT', 'TO', 'THE', 'LAND', 'OF', 'MIDIAN'] +2609-156975-0018-2385: hyp=['NATURALLY', 'HE', 'WENT', 'TO', 'THE', 'LAND', 'OF', 'MEDIAN'] +2609-156975-0019-2386: ref=['THE', 'WILDERNESS', 'TO', 'THE', 'EAST', 'OF', 'EGYPT', 'HAD', 'FOR', 'CENTURIES', 'BEEN', 'THE', 'PLACE', 'OF', 'REFUGE', 'FOR', 'EGYPTIAN', 'FUGITIVES'] +2609-156975-0019-2386: hyp=['THE', 'WILDERNESS', 'TO', 'THE', 'EAST', 'OF', 'EGYPT', 'AND', 'FOR', 'CENTURIES', 'BEEN', 'THE', 'PLATES', 'OF', 'REFUGERY', 'EGYPTIAN', 'FUGITIVE'] +2609-156975-0020-2387: ref=['FROM', 'ABOUT', 'TWO', 'THOUSAND', 'B', 'C'] +2609-156975-0020-2387: hyp=['FROM', 'A', 'BOUT', 'TWO', 'THOUSAND', 'C'] +2609-156975-0021-2388: ref=['ON', 'THE', 'BORDERS', 'OF', 'THE', 'WILDERNESS', 'HE', 'FOUND', 'CERTAIN', 'BEDOUIN', 'HERDSMEN', 'WHO', 'RECEIVED', 'HIM', 'HOSPITABLY'] +2609-156975-0021-2388: hyp=['ON', 'THE', 'BORDERS', 'OF', 'THE', 'WIDERNESS', 'HE', 'FOUND', 'CERTAIN', 'BEDOING', 'HERDSMEN', 'WHO', 'RECEIVED', 'HIM', 'HOW', 'SPECTABLY'] +2609-156975-0022-2389: ref=['THESE', 'SAND', 'WANDERERS', 'SENT', 'HIM', 'ON', 'FROM', 'TRIBE', 'TO', 'TRIBE', 'UNTIL', 'HE', 'REACHED', 'THE', 'LAND', 'OF', 'KEDEM', 'EAST', 'OF', 'THE', 'DEAD', 'SEA', 'WHERE', 'HE', 'REMAINED', 'FOR', 'A', 'YEAR', 'AND', 'A', 'HALF'] +2609-156975-0022-2389: hyp=['THESE', 'SEND', 'WONDERERS', 'SENT', 'HIM', 'ON', 'FROM', 'TIME', 'TO', 'TIME', 'INTO', 'A', 'REACH', 'THE', 'LAND', 'OF', 'KIDAM', 'EACH', 'OF', 'THE', 'DEAD', 'SEA', 'WHERE', 'HE', 'REMAINED', 'FOR', 'A', 'YEAR', 'AND', 'A', 'HALF'] +2609-156975-0023-2390: ref=['LATER', 'HE', 'FOUND', 'HIS', 'WAY', 'TO', 'THE', 'COURT', 'OF', 'ONE', 'OF', 'THE', 'LOCAL', 'KINGS', 'IN', 'CENTRAL', 'PALESTINE', 'WHERE', 'HE', 'MARRIED', 'AND', 'BECAME', 'IN', 'TIME', 'A', 'PROSPEROUS', 'LOCAL', 'PRINCE'] +2609-156975-0023-2390: hyp=['LATER', 'HE', 'FOUND', 'HIS', 'WAY', 'TO', 'THE', 'COURT', 'OF', 'ONE', 'OF', 'THE', 'LOCAL', 'KINGS', 'AND', 'CENTRAL', 'PALASTEIN', 'WHERE', 'HE', 'MARRIED', 'AND', 'BECAME', 'IN', 'THE', 'TIME', 'A', 'PROSPEROUS', 'LOCAL', 'PRINCE'] +2609-156975-0024-2391: ref=['THE', 'SCHOOL', 'OF', 'THE', 'WILDERNESS'] +2609-156975-0024-2391: hyp=['THE', 'SCHOOL', 'AND', 'THE', 'WEARINESS'] +2609-156975-0025-2392: ref=['THE', 'STORY', 'OF', 'MOSES', 'IS', 'IN', 'MANY', 'WAYS', 'CLOSELY', 'PARALLEL', 'TO', 'THAT', 'OF', 'SINUHIT'] +2609-156975-0025-2392: hyp=['THE', 'STORY', 'MOSES', 'IS', 'IN', 'MANY', 'WAYS', 'CLOSELY', 'PARALLEL', 'TO', 'THAT', 'AS', 'SOON', 'WIT'] +2609-156975-0026-2393: ref=['THE', 'PRIEST', 'OF', 'THE', 'SUB', 'TRIBE', 'OF', 'THE', 'KENITES', 'RECEIVED', 'HIM', 'INTO', 'HIS', 'HOME', 'AND', 'GAVE', 'HIM', 'HIS', 'DAUGHTER', 'IN', 'MARRIAGE'] +2609-156975-0026-2393: hyp=['THE', 'PRIEST', 'OF', 'THE', 'SUBTERRAB', 'OF', 'THE', 'KANITE', 'RECEIVED', 'HIM', 'INTO', 'HIS', 'HOME', 'AND', 'GAVE', 'HIM', 'HIS', 'DAUGHTER', 'IN', 'MARRIAGE'] +2609-156975-0027-2394: ref=['NOTE', 'THE', 'CHARACTERISTIC', 'ORIENTAL', 'IDEA', 'OF', 'MARRIAGE'] +2609-156975-0027-2394: hyp=['NOTE', 'THE', 'CARE', 'OF', 'A', 'RIDICT', 'ORIENTAL', 'AND', 'GIVE', "MARY'S"] +2609-156975-0028-2395: ref=['HERE', 'MOSES', 'LEARNED', 'THE', 'LESSONS', 'THAT', 'WERE', 'ESSENTIAL', 'FOR', 'HIS', 'TRAINING', 'AS', 'THE', 'LEADER', 'AND', 'DELIVERER', 'OF', 'HIS', 'PEOPLE'] +2609-156975-0028-2395: hyp=['HERE', 'MOSES', 'LEARNED', 'THAT', 'LESSONS', 'THAT', 'WERE', 'ESSENTIAL', 'FOR', 'HIS', 'TRAINING', 'AS', 'A', 'LEADER', 'AND', 'DELIVERER', 'OF', 'HIS', 'PEOPLE'] +2609-156975-0029-2396: ref=['AFTER', 'THE', 'CAPTURE', 'OF', 'JERICHO', 'CERTAIN', 'OF', 'THEM', 'WENT', 'UP', 'WITH', 'THE', 'SOUTHERN', 'TRIBES', 'TO', 'CONQUER', 'SOUTHERN', 'PALESTINE'] +2609-156975-0029-2396: hyp=['ANSWERED', 'THE', 'CAPTURE', 'OF', 'JERICHO', 'CERTAIN', 'OF', 'THEM', 'WENT', 'UP', 'WITH', 'A', 'SUDDEN', 'TRIUMPHS', 'WHO', 'CONCUR', 'SOUTHERN', 'PALESTINE'] +2609-156975-0030-2397: ref=['MANY', 'MODERN', 'SCHOLARS', 'DRAW', 'THE', 'CONCLUSION', 'FROM', 'THE', 'BIBLICAL', 'NARRATIVE', 'THAT', 'IT', 'WAS', 'FROM', 'THE', 'KENITES', 'THAT', 'MOSES', 'FIRST', 'LEARNED', 'OF', 'YAHWEH', 'OR', 'AS', 'THE', 'DISTINCTIVE', 'NAME', 'OF', "ISRAEL'S", 'GOD', 'WAS', 'TRANSLATED', 'BY', 'LATER', 'JEWISH', 'SCRIBES', 'JEHOVAH'] +2609-156975-0030-2397: hyp=['MANY', 'MODERN', 'SCHOLARS', 'DRAWING', 'THE', 'CONCLUSION', 'FROM', 'THE', 'BIBBICAL', 'NARRATIVE', 'THAT', 'IT', 'WAS', 'FROM', 'THE', 'KENITE', 'SNAT', 'MOSES', 'FIRST', 'LEARNED', 'OF', 'YANAWAY', 'OR', 'AS', 'THE', 'DISTINCTIVE', 'NAME', 'OF', 'ISRAEL', 'GUN', 'WAS', 'TRANSGRATED', 'BY', 'LATER', 'TO', 'ITS', 'GRIMES', 'JEHOVAH'] +2609-156975-0031-2398: ref=['DO', 'THE', 'EARLIEST', 'HEBREW', 'TRADITIONS', 'IMPLY', 'THAT', 'THE', 'ANCESTORS', 'OF', 'THE', 'ISRAELITES', 'WERE', 'WORSHIPPERS', 'OF', 'JEHOVAH'] +2609-156975-0031-2398: hyp=['DO', 'THE', 'AREIAT', 'SEA', 'BERTRADIZANCE', 'IMPLY', 'THAT', 'THE', 'INCES', 'OF', 'THE', 'ISRAITS', 'WERE', 'WORSE', 'SUPPOSED', 'OF', 'JEHOVAH'] +2609-156975-0032-2399: ref=['THE', 'TITLE', 'OF', 'HIS', 'FATHER', 'IN', 'LAW', 'IMPLIES', 'THAT', 'THIS', 'PRIEST', 'MINISTERED', 'AT', 'SOME', 'WILDERNESS', 'SANCTUARY'] +2609-156975-0032-2399: hyp=['THE', 'TANA', 'OF', 'HIS', 'FUNDEMENT', 'IMPLIES', 'AT', 'THIS', 'PRIEST', 'MINISTERED', 'AT', 'SOME', 'LITERN', 'SANCTUARY'] +2609-156975-0033-2400: ref=['MOSES', 'IN', 'THE', 'HOME', 'OF', 'THE', 'MIDIAN', 'PRIEST', 'WAS', 'BROUGHT', 'INTO', 'DIRECT', 'AND', 'CONSTANT', 'CONTACT', 'WITH', 'THE', 'JEHOVAH', 'WORSHIP'] +2609-156975-0033-2400: hyp=['MOSES', 'IN', 'THE', 'HOME', 'OF', 'THE', 'MENDIAN', 'PRIESTS', 'WAS', 'BROUGHT', 'INTO', 'DIRECT', 'AND', 'CONSTANT', 'CONTACT', 'WITH', 'THE', 'JEHOVAH', 'WORSHIP'] +2609-156975-0034-2401: ref=['THE', 'CRUEL', 'FATE', 'OF', 'HIS', 'PEOPLE', 'AND', 'THE', 'PAINFUL', 'EXPERIENCE', 'IN', 'EGYPT', 'THAT', 'HAD', 'DRIVEN', 'HIM', 'INTO', 'THE', 'WILDERNESS', 'PREPARED', 'HIS', 'MIND', 'TO', 'RECEIVE', 'THIS', 'TRAINING'] +2609-156975-0034-2401: hyp=['THE', 'CRUEL', 'FATE', 'OF', 'THIS', 'PEOPLE', 'IN', 'THE', 'PAINFUL', 'EXPERIENCE', 'IN', 'EGYPT', 'THAT', 'HAD', 'DRIVEN', 'HIM', 'INTO', 'THE', 'WILDERNESS', 'PREPARED', 'HIS', 'MIND', 'TO', 'RECEIVE', 'THIS', 'TRAINING'] +2609-156975-0035-2402: ref=['HIS', 'QUEST', 'WAS', 'FOR', 'A', 'JUST', 'AND', 'STRONG', 'GOD', 'ABLE', 'TO', 'DELIVER', 'THE', 'OPPRESSED'] +2609-156975-0035-2402: hyp=['HIS', 'FRENCH', 'WAS', 'FOR', 'A', 'JETS', 'AND', 'STRONG', 'GOD', 'ABLE', 'TO', 'DRIVER', 'THE', 'OPPRESSED'] +2609-156975-0036-2403: ref=['THE', 'WILDERNESS', 'WITH', 'ITS', 'LURKING', 'FOES', 'AND', 'THE', 'EVER', 'PRESENT', 'DREAD', 'OF', 'HUNGER', 'AND', 'THIRST', 'DEEPENED', 'HIS', 'SENSE', 'OF', 'NEED', 'AND', 'OF', 'DEPENDENCE', 'UPON', 'A', 'POWER', 'ABLE', 'TO', 'GUIDE', 'THE', 'DESTINIES', 'OF', 'MEN'] +2609-156975-0036-2403: hyp=['THE', 'WIDERNESS', 'WITH', 'ITS', 'LURKING', 'FOES', 'AND', 'THE', 'EVER', 'PRESENT', 'DREAD', 'OF', 'HUNGER', 'AND', 'THIRST', 'DEEP', 'IN', 'DESCENTS', 'OF', 'NEED', 'AND', 'OF', 'DEPENDENCE', 'UPON', 'THE', 'POWER', 'ABLE', 'TO', 'GOD', 'THE', "DEBT'S", 'NEEDS', 'OF', 'MEN'] +2609-156975-0037-2404: ref=['THE', 'PEASANTS', 'OF', 'THE', 'VAST', 'ANTOLIAN', 'PLAIN', 'IN', 'CENTRAL', 'ASIA', 'MINOR', 'STILL', 'CALL', 'EVERY', 'LIFE', 'GIVING', 'SPRING', 'GOD', 'HATH', 'GIVEN'] +2609-156975-0037-2404: hyp=['THE', 'PEASANTS', 'OF', 'THE', 'VATS', 'IN', 'TOWING', 'IN', 'PLAIN', 'OF', 'CENTRAL', 'AS', 'A', 'MINOR', 'SO', 'WILL', 'CALL', 'EVERY', 'LIFE', 'GIVEN', 'SPRING', 'GOD', 'HATH', 'GIVEN'] +2609-156975-0038-2405: ref=['THE', 'CONSTANT', 'NECESSITY', 'OF', 'MEETING', 'THE', 'DANGERS', 'OF', 'THE', 'WILDERNESS', 'AND', 'OF', 'DEFENDING', 'THE', 'FLOCKS', 'ENTRUSTED', 'TO', 'MOSES', 'CARE', 'DEVELOPED', 'HIS', 'COURAGE', 'AND', 'POWER', 'OF', 'LEADERSHIP', 'AND', 'ACTION'] +2609-156975-0038-2405: hyp=['THEY', "CAN'T", 'SENTIN', 'NECESSITY', 'A', 'MEETING', 'THE', 'DANGERS', 'OF', 'THE', 'WIDERNESS', 'AND', 'THE', 'DEFENDING', 'THE', 'FLAUNT', 'AND', 'TRITES', 'OF', 'JAMIES', 'ITS', 'CARE', 'DEVELOPED', 'HIS', 'COURAGE', 'AND', 'POWER', 'OF', 'LEGERSHIP', 'AND', 'ACTION'] +2609-157645-0000-2352: ref=['EVIDENTLY', 'THE', 'INTENTION', 'WAS', 'TO', 'MAKE', 'THINGS', 'PLEASANT', 'FOR', 'THE', 'ROYAL', 'FOE', 'OF', 'TOBACCO', 'DURING', 'HIS', 'VISIT'] +2609-157645-0000-2352: hyp=['EVIDENTLY', 'THE', 'INTENTION', 'WHICH', 'MADE', 'THINGS', 'PRESENT', 'FOR', 'THE', 'ROYAL', 'FOLK', 'A', 'TOBACCO', 'DURING', 'HIS', 'VISIT'] +2609-157645-0001-2353: ref=['THE', 'PROHIBITION', 'IN', 'THE', 'REGULATION', 'QUOTED', 'OF', 'SMOKING', 'IN', 'SAINT', "MARY'S", 'CHURCH', 'REFERRED', 'IT', 'MAY', 'BE', 'NOTED', 'TO', 'THE', 'ACT', 'WHICH', 'WAS', 'HELD', 'THEREIN'] +2609-157645-0001-2353: hyp=['THE', 'PROBITS', 'AND', 'THE', 'REGULATING', 'QUOTED', 'HER', 'SMOKING', 'AND', 'SAINT', "MARY'S", 'CHURCH', 'REFERRED', 'IT', 'MAY', 'BE', 'NOTED', 'TO', 'THE', 'ACT', 'WHICH', 'WAS', 'HELD', 'THEREIN'] +2609-157645-0002-2354: ref=['SOMETIMES', 'TOBACCO', 'WAS', 'USED', 'IN', 'CHURCH', 'FOR', 'DISINFECTING', 'OR', 'DEODORIZING', 'PURPOSES'] +2609-157645-0002-2354: hyp=['SOMETIMES', 'TOBACCO', 'IS', 'USED', 'IN', 'CHURCH', 'FOR', 'DISINFACTANT', 'ORDEALIZING', 'PURPOSES'] +2609-157645-0003-2355: ref=['BLACKBURN', 'ARCHBISHOP', 'OF', 'YORK', 'WAS', 'A', 'GREAT', 'SMOKER'] +2609-157645-0003-2355: hyp=['BLACKBURN', 'ARCHBISHOP', 'OF', 'YORK', 'WAS', 'A', 'GREAT', 'SMOKER'] +2609-157645-0004-2356: ref=['ON', 'ONE', 'OCCASION', 'HE', 'WAS', 'AT', 'SAINT', "MARY'S", 'CHURCH', 'NOTTINGHAM', 'FOR', 'A', 'CONFIRMATION'] +2609-157645-0004-2356: hyp=['ON', 'ONE', 'OCCASION', 'HE', 'WAS', 'AT', 'SAINT', "MARY'S", 'CHURCH', 'NOT', 'IN', 'HAM', 'FOR', 'A', 'CONFIRMATON'] +2609-157645-0005-2357: ref=['ANOTHER', 'EIGHTEENTH', 'CENTURY', 'CLERICAL', 'WORTHY', 'THE', 'FAMOUS', 'DOCTOR', 'PARR', 'AN', 'INVETERATE', 'SMOKER', 'WAS', 'ACCUSTOMED', 'TO', 'DO', 'WHAT', 'MISTER', 'DISNEY', 'PREVENTED', 'ARCHBISHOP', 'BLACKBURN', 'FROM', 'DOING', 'HE', 'SMOKED', 'IN', 'HIS', 'VESTRY', 'AT', 'HATTON'] +2609-157645-0005-2357: hyp=['ANOTHER', 'EIGHTEENTH', 'CENTURY', 'CLERICAL', 'WORTHY', 'THE', 'FAMOUS', 'DOCTROPOS', 'AN', 'INVETERATE', 'SMOKER', 'WAS', 'ACCUSTOMED', 'TO', 'DO', 'AT', 'MIDSRSANY', 'PREVENTED', 'ARCHBISH', 'AT', 'BLACKBURN', 'FROM', 'DOING', 'HE', 'SMOKED', 'IN', 'HIS', 'VETCHERY', 'AT', 'HATTON'] +2609-157645-0006-2358: ref=['PARR', 'WAS', 'SUCH', 'A', 'CONTINUAL', 'SMOKER', 'THAT', 'ANYONE', 'WHO', 'CAME', 'INTO', 'HIS', 'COMPANY', 'IF', 'HE', 'HAD', 'NEVER', 'SMOKED', 'BEFORE', 'HAD', 'TO', 'LEARN', 'THE', 'USE', 'OF', 'A', 'PIPE', 'AS', 'A', 'MEANS', 'OF', 'SELF', 'DEFENCE'] +2609-157645-0006-2358: hyp=['POOR', 'WAS', 'SUCH', 'A', 'CONTINUOUS', 'SMOKER', 'THAT', 'ANY', 'ONE', 'WHO', 'CAME', 'INTO', 'HIS', 'COMPANY', 'FIT', 'HAD', 'NEVER', 'SMOKED', 'BEFORE', 'AND', 'TO', 'LEARNED', 'THE', 'USE', 'OF', 'A', 'PIPE', 'AS', 'A', 'MEANS', 'OF', 'SELF', 'DEFENCE'] +2609-157645-0007-2359: ref=['ONE', 'SUNDAY', 'SAYS', 'MISTER', 'DITCHFIELD', 'HE', 'HAD', 'AN', 'EXTRA', 'PIPE', 'AND', 'JOSHUA', 'THE', 'CLERK', 'TOLD', 'HIM', 'THAT', 'THE', 'PEOPLE', 'WERE', 'GETTING', 'IMPATIENT'] +2609-157645-0007-2359: hyp=['ONE', 'SUNDAY', 'SAYS', 'MISTER', 'DIXFIELD', 'HE', 'HAD', 'IN', 'NATURE', 'PIPE', 'AND', 'JANSHIRE', 'THE', 'CLERK', 'TOLD', 'HIM', 'THAT', 'THE', 'PEOPLE', 'WERE', 'GETTING', 'THEM', 'IMPATIENT'] +2609-157645-0008-2360: ref=['LET', 'THEM', 'SING', 'ANOTHER', 'PSALM', 'SAID', 'THE', 'CURATE'] +2609-157645-0008-2360: hyp=['THEM', 'TO', 'THEM', 'SING', 'AND', 'NEITHER', 'PSALMS', 'SAY', 'THAT', 'THE', 'CURATE'] +2609-157645-0009-2361: ref=['THEY', 'HAVE', 'SIR', 'REPLIED', 'THE', 'CLERK'] +2609-157645-0009-2361: hyp=['THEY', 'HAVE', 'SIR', 'REPLIED', 'THE', 'CLERK'] +2609-157645-0010-2362: ref=['THEN', 'LET', 'THEM', 'SING', 'THE', 'HUNDRED', 'AND', 'NINETEENTH', 'REPLIED', 'THE', 'CURATE'] +2609-157645-0010-2362: hyp=['THEN', 'LET', 'THEM', 'SING', 'THE', 'HUNDRED', 'AND', 'NINETEENTH', 'REPLIED', 'THE', 'CURATE'] +2609-157645-0011-2363: ref=['SIX', 'ARMS', 'THE', 'NEAREST', 'WITHIN', 'REACH', 'PRESENTED', 'WITH', 'AN', 'OBEDIENT', 'START', 'AS', 'MANY', 'TOBACCO', 'POUCHES', 'TO', 'THE', 'MAN', 'OF', 'OFFICE'] +2609-157645-0011-2363: hyp=['SIX', 'ARMS', 'THE', 'NURSE', 'WITHIN', 'REACH', 'PRESENTED', 'WITH', 'AN', 'OBEDIENT', 'START', 'AND', 'AS', 'MANY', 'TOBACCO', 'PIUCES', 'TO', 'THE', 'MAN', 'OF', 'OFFICE'] +2609-157645-0012-2364: ref=['DAVID', 'DEANS', 'HOWEVER', 'DID', 'NOT', 'AT', 'ALL', 'APPROVE', 'THIS', 'IRREVERENCE'] +2609-157645-0012-2364: hyp=['DAVID', 'DEAN', 'SAMURED', 'DID', 'NOT', 'AT', 'ALL', 'IMPROVE', 'THIS', 'IRREVERENCE'] +2609-157645-0013-2365: ref=['GOING', 'TO', 'CHURCH', 'AT', 'HAYES', 'IN', 'THOSE', 'DAYS', 'MUST', 'HAVE', 'BEEN', 'QUITE', 'AN', 'EXCITING', 'EXPERIENCE'] +2609-157645-0013-2365: hyp=['GO', 'INTO', 'CHURCH', 'AUNT', 'HAZE', 'AND', 'THUS', 'THE', "DAY'S", 'MISS', 'HAVE', 'BEEN', 'ACQUAINT', 'AN', 'THESE', 'SIGNING', 'INSPIRANTS'] +2609-157645-0014-2366: ref=['WHEN', 'THESE', 'MEN', 'IN', 'THE', 'COURSE', 'OF', 'MY', 'REMONSTRANCE', 'FOUND', 'THAT', 'I', 'WAS', 'NOT', 'GOING', 'TO', 'CONTINUE', 'THE', 'CUSTOM', 'THEY', 'NO', 'LONGER', 'CARED', 'TO', 'BE', 'COMMUNICANTS'] +2609-157645-0014-2366: hyp=['WHEN', 'THESE', 'MEN', 'AIMED', 'THE', 'COURSE', 'OF', 'MY', 'REMONSTRANCE', 'FOUND', 'OUT', 'THAT', 'WAS', 'NOT', 'GOING', 'TO', 'CONTINUE', 'THE', 'CUSTOM', 'THEY', 'NO', 'LONGER', 'CARED', 'TO', 'BE', 'COMMUNICANTS'] +2609-169640-0000-2406: ref=['PROAS', 'IN', 'THAT', 'QUARTER', 'WERE', 'USUALLY', 'DISTRUSTED', 'BY', 'SHIPS', 'IT', 'IS', 'TRUE', 'BUT', 'THE', 'SEA', 'IS', 'FULL', 'OF', 'THEM', 'AND', 'FAR', 'MORE', 'ARE', 'INNOCENT', 'THAN', 'ARE', 'GUILTY', 'OF', 'ANY', 'ACTS', 'OF', 'VIOLENCE'] +2609-169640-0000-2406: hyp=['PRATS', 'IN', 'THAT', 'QUARTER', 'WERE', 'USUALLY', 'DISTRUDGED', 'BY', 'THE', 'STIPS', 'AT', 'IS', 'TRUE', 'BUT', 'THE', 'SEA', 'IS', 'FOUR', 'OF', 'THEM', 'FOR', 'MORE', 'OR', 'INNOCENT', 'THAN', 'OUR', 'GUILTY', 'OF', 'ANY', 'ACT', 'OF', 'ONLENETS'] +2609-169640-0001-2407: ref=['AN', 'HOUR', 'AFTER', 'THE', 'SUN', 'HAD', 'SET', 'THE', 'WIND', 'FELL', 'TO', 'A', 'LIGHT', 'AIR', 'THAT', 'JUST', 'KEPT', 'STEERAGE', 'WAY', 'ON', 'THE', 'SHIP'] +2609-169640-0001-2407: hyp=['NOW', 'I', 'AFTER', 'THE', 'SUN', 'HAD', 'SET', 'THE', 'WIND', 'FELL', 'TO', 'AN', 'LIGHT', 'AIR', 'BUT', 'JEST', 'KEPT', 'STEERAGE', 'WAY', 'ON', 'THE', 'SHIP'] +2609-169640-0002-2408: ref=['FORTUNATELY', 'THE', 'JOHN', 'WAS', 'NOT', 'ONLY', 'FAST', 'BUT', 'SHE', 'MINDED', 'HER', 'HELM', 'AS', 'A', 'LIGHT', 'FOOTED', 'GIRL', 'TURNS', 'IN', 'A', 'LIVELY', 'DANCE'] +2609-169640-0002-2408: hyp=['FORTUNATELY', 'THE', 'JOHN', 'WAS', 'NOT', 'ONLY', 'FAST', 'BUT', 'SEA', 'MINDED', 'HER', 'HAIL', 'AS', 'THE', 'LIGHTFOOTED', 'GIRL', 'TURNED', 'IN', 'A', 'LOVELY', 'DANCE'] +2609-169640-0003-2409: ref=['I', 'NEVER', 'WAS', 'IN', 'A', 'BETTER', 'STEERING', 'SHIP', 'MOST', 'ESPECIALLY', 'IN', 'MODERATE', 'WEATHER'] +2609-169640-0003-2409: hyp=['AND', 'NEVER', 'WAS', 'IN', 'A', 'BETTER', 'STERN', 'SHIP', 'POESY', 'SPENTRY', 'AND', 'MODERATE', 'WEATHER'] +2609-169640-0004-2410: ref=['MISTER', 'MARBLE', 'HE', 'I', 'DO', 'BELIEVE', 'WAS', 'FAIRLY', 'SNOOZING', 'ON', 'THE', 'HEN', 'COOPS', 'BEING', 'LIKE', 'THE', 'SAILS', 'AS', 'ONE', 'MIGHT', 'SAY', 'BARELY', 'ASLEEP'] +2609-169640-0004-2410: hyp=['MISTER', 'MARBLE', 'HE', 'OUGHT', 'TO', 'BELIEVE', 'WAS', "FAIRLY'S", 'NEWSING', 'ON', 'THE', 'HINCOUX', 'BEING', 'LIKE', 'THE', 'SAILORS', 'AS', 'ONE', 'MIGHT', 'SAY', 'VARIOUS', 'SLEEP'] +2609-169640-0005-2411: ref=['AT', 'THAT', 'MOMENT', 'I', 'HEARD', 'A', 'NOISE', 'ONE', 'FAMILIAR', 'TO', 'SEAMEN', 'THAT', 'OF', 'AN', 'OAR', 'FALLING', 'IN', 'A', 'BOAT'] +2609-169640-0005-2411: hyp=['AT', 'THAT', 'MOMENT', 'I', 'HAD', 'A', 'NOISE', 'WHEN', 'FAMILIAR', 'TO', 'SEE', 'MEN', 'THAT', 'OF', 'AN', 'OAR', 'FOLLOWING', 'IN', 'THE', 'BOAT'] +2609-169640-0006-2412: ref=['I', 'SANG', 'OUT', 'SAIL', 'HO', 'AND', 'CLOSE', 'ABOARD'] +2609-169640-0006-2412: hyp=['AS', 'I', 'YET', 'SAIL', 'HO', 'AND', 'CLOSE', 'ABROAD'] +2609-169640-0007-2413: ref=['HE', 'WAS', 'TOO', 'MUCH', 'OF', 'A', 'SEAMAN', 'TO', 'REQUIRE', 'A', 'SECOND', 'LOOK', 'IN', 'ORDER', 'TO', 'ASCERTAIN', 'WHAT', 'WAS', 'TO', 'BE', 'DONE'] +2609-169640-0007-2413: hyp=['HE', 'WAS', 'CHIMNETS', 'OF', 'A', 'SEAMAN', 'TO', 'REQUIRE', 'A', 'SECOND', 'LOOK', 'IN', 'ORDER', 'TO', 'ASSERT', 'BUT', 'WAS', 'TO', 'BE', 'DONE'] +2609-169640-0008-2414: ref=['ALTHOUGH', 'THEY', 'WENT', 'THREE', 'FEET', 'TO', 'OUR', 'TWO', 'THIS', 'GAVE', 'US', 'A', 'MOMENT', 'OF', 'BREATHING', 'TIME'] +2609-169640-0008-2414: hyp=['ON', 'THOSE', 'THEY', 'WENT', 'THREE', 'FEET', 'TO', 'OUR', 'TWO', 'THIS', 'GAVE', 'UP', 'SOME', 'MOMENT', 'OF', 'BREASING', 'TIME'] +2609-169640-0009-2415: ref=['AS', 'OUR', 'SHEETS', 'WERE', 'ALL', 'FLYING', 'FORWARD', 'AND', 'REMAINED', 'SO', 'FOR', 'A', 'FEW', 'MINUTES', 'IT', 'GAVE', 'ME', 'LEISURE', 'TO', 'LOOK', 'ABOUT'] +2609-169640-0009-2415: hyp=['AS', 'OUR', 'SEATS', 'WERE', 'OFF', 'LYING', 'FORWARD', 'AND', 'REMAINED', 'SO', 'FOR', 'A', 'FEW', 'MINUTES', 'IT', 'GAVE', 'ME', 'A', 'LEISURE', 'TO', 'LOOK', 'ABOUT'] +2609-169640-0010-2416: ref=['I', 'SOON', 'SAW', 'BOTH', 'PROAS', 'AND', 'GLAD', 'ENOUGH', 'WAS', 'I', 'TO', 'PERCEIVE', 'THAT', 'THEY', 'HAD', 'NOT', 'APPROACHED', 'MATERIALLY', 'NEARER'] +2609-169640-0010-2416: hyp=['I', 'SOON', 'SAW', 'BOTH', 'PROTS', 'IN', 'GRINDING', 'UP', 'WAS', 'I', 'TO', 'PERCEIVE', 'THAT', 'THEY', 'HAD', 'NOT', 'APPROACHED', 'MATERIALLY', 'NEAR'] +2609-169640-0011-2417: ref=['MISTER', 'KITE', 'OBSERVED', 'THIS', 'ALSO', 'AND', 'REMARKED', 'THAT', 'OUR', 'MOVEMENTS', 'HAD', 'BEEN', 'SO', 'PROMPT', 'AS', 'TO', 'TAKE', 'THE', 'RASCALS', 'ABACK'] +2609-169640-0011-2417: hyp=['MISTER', 'KITE', 'OBSERVED', 'IT', 'ALSO', 'IN', 'REMARK', 'THAT', 'OUR', 'MOVEMENTS', 'HAD', 'BEEN', 'SO', 'PROMPT', 'AS', 'TO', 'TAKE', 'THE', 'RATS', 'WAS', 'ABACK'] +2609-169640-0012-2418: ref=['A', 'BREATHLESS', 'STILLNESS', 'SUCCEEDED'] +2609-169640-0012-2418: hyp=['A', 'BRENT', 'WITCH', 'STILLNESS', 'SUCCEEDED'] +2609-169640-0013-2419: ref=['THE', 'PROAS', 'DID', 'NOT', 'ALTER', 'THEIR', 'COURSE', 'BUT', 'NEARED', 'US', 'FAST'] +2609-169640-0013-2419: hyp=['THE', 'POETS', 'DID', 'NOT', 'ENTER', 'THE', 'COURSE', 'BUT', 'NEARED', 'ITS', 'FAST'] +2609-169640-0014-2420: ref=['I', 'HEARD', 'THE', 'RATTLING', 'OF', 'THE', 'BOARDING', 'PIKES', 'TOO', 'AS', 'THEY', 'WERE', 'CUT', 'ADRIFT', 'FROM', 'THE', 'SPANKER', 'BOOM', 'AND', 'FELL', 'UPON', 'THE', 'DECKS'] +2609-169640-0014-2420: hyp=['I', 'HEARD', 'THE', 'RIDING', 'OF', 'THE', 'BOARDING', 'PINES', 'TOO', 'AS', 'THEY', 'WERE', 'CUT', 'ADRIFT', 'FROM', 'THE', 'SPANKER', 'BOOM', 'AND', 'FELL', 'UPON', 'THE', 'DECKS'] +2609-169640-0015-2421: ref=['KITE', 'WENT', 'AFT', 'AND', 'RETURNED', 'WITH', 'THREE', 'OR', 'FOUR', 'MUSKETS', 'AND', 'AS', 'MANY', 'PIKES'] +2609-169640-0015-2421: hyp=['TIGHTLY', 'ACT', 'AND', 'RETURNED', 'WITH', 'THREE', 'OR', 'FOUR', 'MUSKETS', 'AND', 'AS', 'MANY', 'PIKES'] +2609-169640-0016-2422: ref=['THE', 'STILLNESS', 'THAT', 'REIGNED', 'ON', 'BOTH', 'SIDES', 'WAS', 'LIKE', 'THAT', 'OF', 'DEATH'] +2609-169640-0016-2422: hyp=['THE', 'STILLNESS', 'DOWN', 'RINGS', 'ON', 'BOTH', 'SIDES', 'WAS', 'LIKE', 'THAT', 'OF', 'DEATH'] +2609-169640-0017-2423: ref=['THE', 'JOHN', 'BEHAVED', 'BEAUTIFULLY', 'AND', 'CAME', 'ROUND', 'LIKE', 'A', 'TOP'] +2609-169640-0017-2423: hyp=['THE', 'JOHN', 'BEHAVED', 'BEAUTIFULLY', 'HE', 'CAME', 'ROUND', 'LIKE', 'A', 'TOP'] +2609-169640-0018-2424: ref=['THE', 'QUESTION', 'WAS', 'NOW', 'WHETHER', 'WE', 'COULD', 'PASS', 'THEM', 'OR', 'NOT', 'BEFORE', 'THEY', 'GOT', 'NEAR', 'ENOUGH', 'TO', 'GRAPPLE'] +2609-169640-0018-2424: hyp=['THE', 'QUESTION', 'WAS', 'NOW', 'WHETHER', 'WE', 'COULD', 'PASS', 'THEM', 'OR', 'NOT', 'BEFORE', 'THEY', 'GOT', 'NEARING', 'UP', 'TO', 'GRANTPLE'] +2609-169640-0019-2425: ref=['THE', 'CAPTAIN', 'BEHAVED', 'PERFECTLY', 'WELL', 'IN', 'THIS', 'CRITICAL', 'INSTANT', 'COMMANDING', 'A', 'DEAD', 'SILENCE', 'AND', 'THE', 'CLOSEST', 'ATTENTION', 'TO', 'HIS', 'ORDERS'] +2609-169640-0019-2425: hyp=['THE', 'CAPTAIN', 'BEHAVED', 'PERFECTLY', 'AWAY', 'ON', 'ITS', 'CRITICAL', 'INSTANT', 'COMMANDING', 'A', 'DEAD', 'SILENCE', 'IN', 'THE', 'CLOSETS', 'INTENTION', 'TO', 'HIS', 'ORDERS'] +2609-169640-0020-2426: ref=['NOT', 'A', 'SOUL', 'ON', 'BOARD', 'THE', 'JOHN', 'WAS', 'HURT'] +2609-169640-0020-2426: hyp=['NOW', "I'M", 'SO', 'ON', 'BOARD', 'THE', 'JOHN', 'WAS', 'SHARP'] +2609-169640-0021-2427: ref=['ON', 'OUR', 'SIDE', 'WE', 'GAVE', 'THE', 'GENTLEMEN', 'THE', 'FOUR', 'SIXES', 'TWO', 'AT', 'THE', 'NEAREST', 'AND', 'TWO', 'AT', 'THE', 'STERN', 'MOST', 'PROA', 'WHICH', 'WAS', 'STILL', 'NEAR', 'A', "CABLE'S", 'LENGTH', 'DISTANT'] +2609-169640-0021-2427: hyp=['WHEN', 'OUR', 'SON', 'WE', 'GAVE', 'THE', 'GENTLEMAN', 'THE', 'FOUR', 'SAXES', 'TO', 'AUNT', 'THE', 'NURSE', 'AND', 'TWO', 'AT', 'THE', 'STERNMOST', 'PRO', 'WHICH', 'WAS', 'STILL', 'NEAR', 'A', "CABLE'S", 'LENGTH', 'OF', 'ITS'] +2609-169640-0022-2428: ref=['THEY', 'WERE', 'LIKE', 'THE', 'YELLS', 'OF', 'FIENDS', 'IN', 'ANGUISH'] +2609-169640-0022-2428: hyp=['THEY', 'WERE', 'NIGHT', 'THE', 'YEARS', 'OF', 'FIEND', 'IN', 'ENGLISH'] +2609-169640-0023-2429: ref=['I', 'DOUBT', 'IF', 'WE', 'TOUCHED', 'A', 'MAN', 'IN', 'THE', 'NEAREST', 'PROA'] +2609-169640-0023-2429: hyp=['AND', 'OUT', 'IF', 'WE', 'TOUCHED', 'A', 'MAN', 'IN', 'THE', 'NEAR', 'ITS', 'PRAYER'] +2609-169640-0024-2430: ref=['IN', 'THIS', 'STATE', 'THE', 'SHIP', 'PASSED', 'AHEAD', 'ALL', 'HER', 'CANVAS', 'BEING', 'FULL', 'LEAVING', 'THE', 'PROA', 'MOTIONLESS', 'IN', 'HER', 'WAKE'] +2609-169640-0024-2430: hyp=['AND', 'THAT', 'STATE', 'THE', 'SHIP', 'PASSED', 'AHEAD', 'OF', 'HER', 'CANVAS', 'BEEN', 'FOR', 'LEAVING', 'THE', 'PROTINENT', 'IN', 'HER', 'WAKE'] +3005-163389-0000-1108: ref=['THEY', 'SWARMED', 'UP', 'IN', 'FRONT', 'OF', "SHERBURN'S", 'PALINGS', 'AS', 'THICK', 'AS', 'THEY', 'COULD', 'JAM', 'TOGETHER', 'AND', 'YOU', "COULDN'T", 'HEAR', 'YOURSELF', 'THINK', 'FOR', 'THE', 'NOISE'] +3005-163389-0000-1108: hyp=['THEY', 'SWARMED', 'UP', 'IN', 'FRONT', 'A', "SHERBOURNE'S", 'PALINGS', 'AS', 'THICK', 'AS', 'THEY', 'COULD', 'JAMMED', 'TOGETHER', 'AND', 'YOU', "COULDN'T", 'HEAR', 'YOURSELF', 'THINK', 'FOR', 'THE', 'NOISE'] +3005-163389-0001-1109: ref=['SOME', 'SUNG', 'OUT', 'TEAR', 'DOWN', 'THE', 'FENCE', 'TEAR', 'DOWN', 'THE', 'FENCE'] +3005-163389-0001-1109: hyp=['SOME', 'SUNG', 'OUT', 'TEAR', 'DOWN', 'THE', 'FENCE', 'TEAR', 'DOWN', 'THE', 'FENCE'] +3005-163389-0002-1110: ref=['THE', 'STILLNESS', 'WAS', 'AWFUL', 'CREEPY', 'AND', 'UNCOMFORTABLE'] +3005-163389-0002-1110: hyp=['THE', 'STILLNESS', 'WAS', 'AWFUL', 'CREEPY', 'AND', 'UNCOMFORTABLE'] +3005-163389-0003-1111: ref=['SHERBURN', 'RUN', 'HIS', 'EYE', 'SLOW', 'ALONG', 'THE', 'CROWD', 'AND', 'WHEREVER', 'IT', 'STRUCK', 'THE', 'PEOPLE', 'TRIED', 'A', 'LITTLE', 'TO', 'OUT', 'GAZE', 'HIM', 'BUT', 'THEY', "COULDN'T", 'THEY', 'DROPPED', 'THEIR', 'EYES', 'AND', 'LOOKED', 'SNEAKY'] +3005-163389-0003-1111: hyp=['SHERBIN', 'RUN', 'HIS', 'EYE', 'SLOW', 'ALONG', 'THE', 'CROWD', 'AND', 'WHEREVER', 'IT', 'STRUCK', 'THE', 'PEOPLE', 'TRIED', 'A', 'LITTLE', 'TO', 'OUTGAZE', 'HIM', 'BUT', 'THEY', "COULDN'T", 'THEY', 'DROPPED', 'THEIR', 'EYES', 'AND', 'LOOKED', 'SNEAKY'] +3005-163389-0004-1112: ref=['THE', 'AVERAGE', "MAN'S", 'A', 'COWARD'] +3005-163389-0004-1112: hyp=['THE', 'AVERAGE', "MAN'S", 'A', 'COWARD'] +3005-163389-0005-1113: ref=['BECAUSE', "THEY'RE", 'AFRAID', 'THE', "MAN'S", 'FRIENDS', 'WILL', 'SHOOT', 'THEM', 'IN', 'THE', 'BACK', 'IN', 'THE', 'DARKAND', "IT'S", 'JUST', 'WHAT', 'THEY', 'WOULD', 'DO'] +3005-163389-0005-1113: hyp=['BECAUSE', "THEY'RE", 'AFRAID', 'THE', "MAN'S", 'FRIENDS', 'WILL', 'SHOOT', 'THEM', 'IN', 'THE', 'BACK', 'IN', 'THE', 'DARK', 'AND', 'IS', 'JUST', 'WHAT', 'THEY', 'WOULD', 'DO'] +3005-163389-0006-1114: ref=['SO', 'THEY', 'ALWAYS', 'ACQUIT', 'AND', 'THEN', 'A', 'MAN', 'GOES', 'IN', 'THE', 'NIGHT', 'WITH', 'A', 'HUNDRED', 'MASKED', 'COWARDS', 'AT', 'HIS', 'BACK', 'AND', 'LYNCHES', 'THE', 'RASCAL'] +3005-163389-0006-1114: hyp=['SO', 'THEY', 'ALWAYS', 'ACQUIT', 'AND', 'THEN', 'A', 'MAN', 'GOES', 'IN', 'THE', 'NIGHT', 'WITH', 'A', 'HUNDRED', 'MASSED', 'COWARDS', 'AT', 'HIS', 'BACK', 'AND', 'LYNCHES', 'THE', 'RASCAL'] +3005-163389-0007-1115: ref=['YOU', "DIDN'T", 'WANT', 'TO', 'COME'] +3005-163389-0007-1115: hyp=['YOU', "DIDN'T", 'WANT', 'TO', 'COME'] +3005-163389-0008-1116: ref=['BUT', 'A', 'MOB', 'WITHOUT', 'ANY', 'MAN', 'AT', 'THE', 'HEAD', 'OF', 'IT', 'IS', 'BENEATH', 'PITIFULNESS'] +3005-163389-0008-1116: hyp=['BUT', 'A', 'MOB', 'WITHOUT', 'ANY', 'MAN', 'AT', 'THE', 'HEAD', 'OF', 'IT', 'IS', 'BENEATH', 'PITIFULNESS'] +3005-163389-0009-1117: ref=['NOW', 'LEAVE', 'AND', 'TAKE', 'YOUR', 'HALF', 'A', 'MAN', 'WITH', 'YOU', 'TOSSING', 'HIS', 'GUN', 'UP', 'ACROSS', 'HIS', 'LEFT', 'ARM', 'AND', 'COCKING', 'IT', 'WHEN', 'HE', 'SAYS', 'THIS'] +3005-163389-0009-1117: hyp=['NOW', 'LEE', 'AND', 'TAKE', 'YOUR', 'HALF', 'A', 'MAN', 'WITH', 'YOU', 'TAUSEN', 'HE', 'HAS', 'GUN', 'UP', 'ACROSS', 'HIS', 'LEFT', 'ARM', 'AND', 'COCKING', 'IT', 'WHEN', 'HE', 'SAYS', 'THIS'] +3005-163389-0010-1118: ref=['THE', 'CROWD', 'WASHED', 'BACK', 'SUDDEN', 'AND', 'THEN', 'BROKE', 'ALL', 'APART', 'AND', 'WENT', 'TEARING', 'OFF', 'EVERY', 'WHICH', 'WAY', 'AND', 'BUCK', 'HARKNESS', 'HE', 'HEELED', 'IT', 'AFTER', 'THEM', 'LOOKING', 'TOLERABLE', 'CHEAP'] +3005-163389-0010-1118: hyp=['THE', 'CROWD', 'WASHED', 'BACK', 'SUDDEN', 'AND', 'THEN', 'BROKE', 'ALL', 'APART', 'AND', 'WENT', 'TEARING', 'OFF', 'EVERY', 'WITCH', 'WAY', 'AND', 'BUCK', 'HARKNESS', 'HE', 'HEALED', 'IT', 'AFTER', 'THEM', 'LOOK', 'INTOLERABLE', 'CHEEK'] +3005-163389-0011-1119: ref=['YOU', "CAN'T", 'BE', 'TOO', 'CAREFUL'] +3005-163389-0011-1119: hyp=['HE', "CAN'T", 'BE', 'TOO', 'CAREFUL'] +3005-163389-0012-1120: ref=['THEY', 'ARGUED', 'AND', 'TRIED', 'TO', 'KEEP', 'HIM', 'OUT', 'BUT', 'HE', "WOULDN'T", 'LISTEN', 'AND', 'THE', 'WHOLE', 'SHOW', 'COME', 'TO', 'A', 'STANDSTILL'] +3005-163389-0012-1120: hyp=['THEY', 'ARGUED', 'AND', 'TRIED', 'TO', 'KEEP', 'HIM', 'OUT', 'BUT', 'HE', "WOULDN'T", 'LISTEN', 'AND', 'A', 'WHOLE', 'SHOW', 'COME', 'TO', 'A', 'FANSTILL'] +3005-163389-0013-1121: ref=['AND', 'ONE', 'OR', 'TWO', 'WOMEN', 'BEGUN', 'TO', 'SCREAM'] +3005-163389-0013-1121: hyp=['AND', 'ONE', 'OR', 'TWO', 'WOMEN', 'BEGAN', 'TO', 'SCREAM'] +3005-163389-0014-1122: ref=['SO', 'THEN', 'THE', 'RINGMASTER', 'HE', 'MADE', 'A', 'LITTLE', 'SPEECH', 'AND', 'SAID', 'HE', 'HOPED', 'THERE', "WOULDN'T", 'BE', 'NO', 'DISTURBANCE', 'AND', 'IF', 'THE', 'MAN', 'WOULD', 'PROMISE', 'HE', "WOULDN'T", 'MAKE', 'NO', 'MORE', 'TROUBLE', 'HE', 'WOULD', 'LET', 'HIM', 'RIDE', 'IF', 'HE', 'THOUGHT', 'HE', 'COULD', 'STAY', 'ON', 'THE', 'HORSE'] +3005-163389-0014-1122: hyp=['SO', 'THEN', 'A', 'RING', 'MASTER', 'HE', 'MADE', 'A', 'LITTLE', 'SPEECH', 'AND', 'SAID', 'HE', 'HOPED', 'THERE', "WOULDN'T", 'BE', 'NO', 'DISTURBANCE', 'AND', 'IF', 'THE', 'MAN', 'WOULD', 'PROMISE', 'HE', "WOULDN'T", 'MAKE', 'NO', 'MORE', 'TROUBLE', 'HE', 'WOULD', 'LET', 'HIM', 'RIDE', 'IF', 'HE', 'THOUGHT', 'HE', 'COULD', 'STAY', 'ON', 'THE', 'HORSE'] +3005-163389-0015-1123: ref=['IT', "WARN'T", 'FUNNY', 'TO', 'ME', 'THOUGH', 'I', 'WAS', 'ALL', 'OF', 'A', 'TREMBLE', 'TO', 'SEE', 'HIS', 'DANGER'] +3005-163389-0015-1123: hyp=['IT', "WARN'T", 'FUNNY', 'TO', 'ME', 'THOUGH', 'I', 'WAS', 'ALL', 'OF', 'A', 'TREMBLE', 'TO', 'SEE', 'HIS', 'DANGER'] +3005-163389-0016-1124: ref=['AND', 'THE', 'HORSE', 'A', 'GOING', 'LIKE', 'A', 'HOUSE', 'AFIRE', 'TOO'] +3005-163389-0016-1124: hyp=['AND', 'A', 'HORSE', 'A', 'GOING', 'LIKE', 'A', 'HOUSE', 'AFAR', 'TOO'] +3005-163389-0017-1125: ref=['HE', 'SHED', 'THEM', 'SO', 'THICK', 'THEY', 'KIND', 'OF', 'CLOGGED', 'UP', 'THE', 'AIR', 'AND', 'ALTOGETHER', 'HE', 'SHED', 'SEVENTEEN', 'SUITS'] +3005-163389-0017-1125: hyp=['HE', 'SHARED', 'THEM', 'SO', 'THICK', 'THAT', 'KIND', 'OF', 'CLOGGED', 'UP', 'THE', 'AIR', 'AND', 'ALTOGETHER', 'HE', 'SHED', 'SEVENTEEN', 'SUITS'] +3005-163389-0018-1126: ref=['WHY', 'IT', 'WAS', 'ONE', 'OF', 'HIS', 'OWN', 'MEN'] +3005-163389-0018-1126: hyp=['WHY', 'IT', 'WAS', 'ONE', 'OF', 'HIS', 'OWN', 'MEN'] +3005-163390-0000-1185: ref=['ANDBUT', 'NEVER', 'MIND', 'THE', 'REST', 'OF', 'HIS', 'OUTFIT', 'IT', 'WAS', 'JUST', 'WILD', 'BUT', 'IT', 'WAS', 'AWFUL', 'FUNNY'] +3005-163390-0000-1185: hyp=['AND', 'BUT', 'NEVER', 'MIND', 'THE', 'REST', 'OF', 'HIS', 'OUTFIT', 'IT', 'WAS', 'JUST', 'WILD', 'BUT', 'IT', 'WAS', 'AWFUL', 'FUNNY'] +3005-163390-0001-1186: ref=['THE', 'PEOPLE', 'MOST', 'KILLED', 'THEMSELVES', 'LAUGHING', 'AND', 'WHEN', 'THE', 'KING', 'GOT', 'DONE', 'CAPERING', 'AND', 'CAPERED', 'OFF', 'BEHIND', 'THE', 'SCENES', 'THEY', 'ROARED', 'AND', 'CLAPPED', 'AND', 'STORMED', 'AND', 'HAW', 'HAWED', 'TILL', 'HE', 'COME', 'BACK', 'AND', 'DONE', 'IT', 'OVER', 'AGAIN', 'AND', 'AFTER', 'THAT', 'THEY', 'MADE', 'HIM', 'DO', 'IT', 'ANOTHER', 'TIME'] +3005-163390-0001-1186: hyp=['THE', 'PEOPLE', 'MOST', 'KILLED', 'THEMSELVES', 'LAUGHING', 'AND', 'WHEN', 'THE', 'KING', 'GOT', 'DONE', 'CAPERING', 'AND', 'CAPERED', 'OFF', 'BEHIND', 'THE', 'SCENES', 'THEY', 'ROARED', 'AND', 'CLAPPED', 'AND', 'STORMED', 'AND', 'HAW', 'HAWED', 'TILL', 'HE', 'COME', 'BACK', 'AND', 'DONE', 'IT', 'OVER', 'AGAIN', 'AND', 'AFTER', 'THAT', 'THEY', 'MADE', 'HIM', 'DO', 'IT', 'ANOTHER', 'TIME'] +3005-163390-0002-1187: ref=['TWENTY', 'PEOPLE', 'SINGS', 'OUT'] +3005-163390-0002-1187: hyp=['TWENTY', 'PEOPLE', 'SANGS', 'OUT'] +3005-163390-0003-1188: ref=['THE', 'DUKE', 'SAYS', 'YES'] +3005-163390-0003-1188: hyp=['THE', 'DUKE', 'SAYS', 'YES'] +3005-163390-0004-1189: ref=['EVERYBODY', 'SINGS', 'OUT', 'SOLD'] +3005-163390-0004-1189: hyp=['EVERYBODY', 'SINGS', 'OUT', 'SOLD'] +3005-163390-0005-1190: ref=['BUT', 'A', 'BIG', 'FINE', 'LOOKING', 'MAN', 'JUMPS', 'UP', 'ON', 'A', 'BENCH', 'AND', 'SHOUTS', 'HOLD', 'ON'] +3005-163390-0005-1190: hyp=['BUT', 'A', 'BIG', 'FINE', 'LOOKING', 'MAN', 'JUMPS', 'UP', 'ON', 'A', 'BENCH', 'AN', 'SHOUTS', 'HOLD', 'ON'] +3005-163390-0006-1191: ref=['JUST', 'A', 'WORD', 'GENTLEMEN', 'THEY', 'STOPPED', 'TO', 'LISTEN'] +3005-163390-0006-1191: hyp=['JUST', 'A', 'WORD', 'GENTLEMEN', 'THEY', 'STOPPED', 'TO', 'LISTEN'] +3005-163390-0007-1192: ref=['WHAT', 'WE', 'WANT', 'IS', 'TO', 'GO', 'OUT', 'OF', 'HERE', 'QUIET', 'AND', 'TALK', 'THIS', 'SHOW', 'UP', 'AND', 'SELL', 'THE', 'REST', 'OF', 'THE', 'TOWN'] +3005-163390-0007-1192: hyp=['WHAT', 'WE', 'WANT', 'IS', 'TO', 'GO', 'OUT', 'OF', 'HERE', 'QUIET', 'AND', 'TALK', 'TO', 'SHOW', 'UP', 'AND', 'SELL', 'THE', 'REST', 'O', 'THE', 'TOWN'] +3005-163390-0008-1193: ref=['YOU', 'BET', 'IT', 'IS', 'THE', 'JEDGE', 'IS', 'RIGHT', 'EVERYBODY', 'SINGS', 'OUT'] +3005-163390-0008-1193: hyp=['YOU', 'BET', 'IT', 'IS', 'THE', 'JUDGE', 'IS', 'RIGHT', 'EVERYBODY', 'SINGS', 'OUT'] +3005-163390-0009-1194: ref=['WE', 'STRUCK', 'THE', 'RAFT', 'AT', 'THE', 'SAME', 'TIME', 'AND', 'IN', 'LESS', 'THAN', 'TWO', 'SECONDS', 'WE', 'WAS', 'GLIDING', 'DOWN', 'STREAM', 'ALL', 'DARK', 'AND', 'STILL', 'AND', 'EDGING', 'TOWARDS', 'THE', 'MIDDLE', 'OF', 'THE', 'RIVER', 'NOBODY', 'SAYING', 'A', 'WORD'] +3005-163390-0009-1194: hyp=['WE', 'STRUCK', 'THE', 'RAFT', 'AT', 'THE', 'SAME', 'TIME', 'AND', 'IN', 'LESS', 'THAN', 'TWO', 'SECONDS', 'WE', 'WAS', 'GLIDING', 'DOWN', 'STREAM', 'ALL', 'DARK', 'AND', 'STILL', 'AND', 'EDGING', 'TOWARDS', 'THE', 'MIDDLE', 'OF', 'THE', 'RIVER', 'NOBODY', 'SAYING', 'A', 'WORD'] +3005-163390-0010-1195: ref=['WE', 'NEVER', 'SHOWED', 'A', 'LIGHT', 'TILL', 'WE', 'WAS', 'ABOUT', 'TEN', 'MILE', 'BELOW', 'THE', 'VILLAGE'] +3005-163390-0010-1195: hyp=['WE', 'NEVER', 'SHOWED', 'A', 'LIGHT', 'TILL', 'WE', 'WAS', 'ABOUT', 'TEN', 'MILE', 'BELOW', 'THE', 'VILLAGE'] +3005-163390-0011-1196: ref=['GREENHORNS', 'FLATHEADS'] +3005-163390-0011-1196: hyp=['GREENHORNS', 'FLAT', 'HEADS'] +3005-163390-0012-1197: ref=['NO', 'I', 'SAYS', 'IT', "DON'T"] +3005-163390-0012-1197: hyp=['NO', 'I', 'SAY', 'IS', 'IT', "DON'T"] +3005-163390-0013-1198: ref=['WELL', 'IT', "DON'T", 'BECAUSE', "IT'S", 'IN', 'THE', 'BREED', 'I', 'RECKON', "THEY'RE", 'ALL', 'ALIKE'] +3005-163390-0013-1198: hyp=['WELL', 'IT', "DON'T", 'BECAUSE', "IT'S", 'IN', 'TO', 'BREATHE', 'I', 'RECKON', "THEY'RE", 'ALL', 'ALIKE'] +3005-163390-0014-1199: ref=['WELL', "THAT'S", 'WHAT', "I'M", 'A', 'SAYING', 'ALL', 'KINGS', 'IS', 'MOSTLY', 'RAPSCALLIONS', 'AS', 'FUR', 'AS', 'I', 'CAN', 'MAKE', 'OUT', 'IS', 'DAT', 'SO'] +3005-163390-0014-1199: hyp=['WELL', "THAT'S", 'WHAT', "I'M", 'A', 'SAYING', 'ALL', 'KINGS', 'IS', 'MOSTLY', 'RASCALIONS', 'AS', 'FUR', 'AS', 'I', 'KIN', 'MAKE', 'OUT', 'IS', 'DAT', 'SO'] +3005-163390-0015-1200: ref=['AND', 'LOOK', 'AT', 'CHARLES', 'SECOND', 'AND', 'LOUIS', 'FOURTEEN', 'AND', 'LOUIS', 'FIFTEEN', 'AND', 'JAMES', 'SECOND', 'AND', 'EDWARD', 'SECOND', 'AND', 'RICHARD', 'THIRD', 'AND', 'FORTY', 'MORE', 'BESIDES', 'ALL', 'THEM', 'SAXON', 'HEPTARCHIES', 'THAT', 'USED', 'TO', 'RIP', 'AROUND', 'SO', 'IN', 'OLD', 'TIMES', 'AND', 'RAISE', 'CAIN'] +3005-163390-0015-1200: hyp=['AND', 'LOOK', 'AT', 'CHARLES', 'SECOND', 'AND', 'LOUIS', 'FOURTEEN', 'AND', 'LOUIS', 'FIFTEEN', 'AND', 'JAMES', 'SECOND', 'AND', 'EDWARD', 'SECOND', 'AND', 'RICHARD', 'THIRD', 'AND', 'FORTY', 'MORE', 'BESIDES', 'ALL', 'THEM', 'SAXON', 'HEPTARCHIES', 'THAT', 'USED', 'TO', 'RIP', 'AROUND', 'SO', 'WHEN', 'OLD', 'TIMES', 'AND', 'RAISE', 'GAME'] +3005-163390-0016-1201: ref=['MY', 'YOU', 'OUGHT', 'TO', 'SEEN', 'OLD', 'HENRY', 'THE', 'EIGHT', 'WHEN', 'HE', 'WAS', 'IN', 'BLOOM', 'HE', 'WAS', 'A', 'BLOSSOM'] +3005-163390-0016-1201: hyp=['MY', 'YOU', 'OUGHT', 'TO', 'SEE', 'AN', 'OLD', 'HENRY', 'THE', 'EIGHT', 'WHEN', 'HE', 'WAS', 'IN', 'BLOOM', 'HE', 'WAS', 'A', 'BLOSSOM'] +3005-163390-0017-1202: ref=['RING', 'UP', 'FAIR', 'ROSAMUN'] +3005-163390-0017-1202: hyp=['RING', 'UP', 'FAIR', 'ROSAMOND'] +3005-163390-0018-1203: ref=['WELL', 'HENRY', 'HE', 'TAKES', 'A', 'NOTION', 'HE', 'WANTS', 'TO', 'GET', 'UP', 'SOME', 'TROUBLE', 'WITH', 'THIS', 'COUNTRY'] +3005-163390-0018-1203: hyp=['WELL', 'HENRY', 'HE', 'TAKES', 'A', 'NOTION', 'HE', 'WANTS', 'TO', 'GET', 'UP', 'SOME', 'TROUBLE', 'WITH', 'THIS', 'COUNTRY'] +3005-163390-0019-1204: ref=["S'POSE", 'HE', 'OPENED', 'HIS', 'MOUTHWHAT', 'THEN'] +3005-163390-0019-1204: hyp=["S'POSE", 'HE', 'OPENED', 'HIS', 'MOUTH', 'WHAT', 'THEN'] +3005-163390-0020-1205: ref=['ALL', 'I', 'SAY', 'IS', 'KINGS', 'IS', 'KINGS', 'AND', 'YOU', 'GOT', 'TO', 'MAKE', 'ALLOWANCES'] +3005-163390-0020-1205: hyp=['ALL', 'I', 'SAY', 'IS', 'KINGS', 'AS', 'KINGS', 'AN', 'YE', 'GOT', 'TO', 'MAKE', 'ALLOWANCES'] +3005-163390-0021-1206: ref=['TAKE', 'THEM', 'ALL', 'AROUND', "THEY'RE", 'A', 'MIGHTY', 'ORNERY', 'LOT', "IT'S", 'THE', 'WAY', "THEY'RE", 'RAISED'] +3005-163390-0021-1206: hyp=['TAKE', 'THEM', 'ALL', 'AROUND', "THEY'RE", 'A', 'MIGHTY', 'ORNERY', 'LOT', "IT'S", 'THE', 'WAY', "THEY'RE", 'RAISED'] +3005-163390-0022-1207: ref=['WELL', 'THEY', 'ALL', 'DO', 'JIM'] +3005-163390-0022-1207: hyp=['WELL', 'THEY', 'ALL', 'DO', 'JIM'] +3005-163390-0023-1208: ref=['NOW', 'DE', 'DUKE', "HE'S", 'A', 'TOLERBLE', 'LIKELY', 'MAN', 'IN', 'SOME', 'WAYS'] +3005-163390-0023-1208: hyp=['NOW', 'TO', 'DO', "HE'S", 'A', 'TOLERABLE', 'LIKE', 'THE', 'MAN', 'IN', 'SOME', 'WAYS'] +3005-163390-0024-1209: ref=['THIS', "ONE'S", 'A', 'MIDDLING', 'HARD', 'LOT', 'FOR', 'A', 'DUKE'] +3005-163390-0024-1209: hyp=['THIS', "ONE'S", 'A', 'MIDDLIN', 'HARD', 'LOT', 'FOR', 'A', 'DUKE'] +3005-163390-0025-1210: ref=['WHEN', 'I', 'WAKED', 'UP', 'JUST', 'AT', 'DAYBREAK', 'HE', 'WAS', 'SITTING', 'THERE', 'WITH', 'HIS', 'HEAD', 'DOWN', 'BETWIXT', 'HIS', 'KNEES', 'MOANING', 'AND', 'MOURNING', 'TO', 'HIMSELF'] +3005-163390-0025-1210: hyp=['WHEN', 'I', 'WAKED', 'UP', 'JIST', 'AT', 'DAYBREAK', 'HE', 'WAS', 'SITTING', 'THERE', 'WITH', 'HIS', 'HEAD', 'DOWN', 'BETWIXT', 'HIS', 'KNEES', 'MOANING', 'AND', 'MOURNING', 'TO', 'HIMSELF'] +3005-163390-0026-1211: ref=['IT', "DON'T", 'SEEM', 'NATURAL', 'BUT', 'I', 'RECKON', "IT'S", 'SO'] +3005-163390-0026-1211: hyp=['IT', "DON'T", 'SEEM', 'NATURAL', 'BUT', 'I', 'RECKON', "IT'S", 'SO'] +3005-163390-0027-1212: ref=['HE', 'WAS', 'OFTEN', 'MOANING', 'AND', 'MOURNING', 'THAT', 'WAY', 'NIGHTS', 'WHEN', 'HE', 'JUDGED', 'I', 'WAS', 'ASLEEP', 'AND', 'SAYING', 'PO', 'LITTLE', 'LIZABETH'] +3005-163390-0027-1212: hyp=['HE', 'WAS', 'OFTEN', 'MOANING', 'AND', 'MOURNING', 'IN', 'THAT', 'WAY', 'NIGHTS', 'WHEN', 'HE', 'JUDGED', 'I', 'WAS', 'ASLEEP', 'AND', 'SAYING', 'POE', 'LITTLE', 'ELIZABETH'] +3005-163390-0028-1213: ref=['DOAN', 'YOU', 'HEAR', 'ME', 'SHET', 'DE', 'DO'] +3005-163390-0028-1213: hyp=["DON'T", 'YOU', 'HEAR', 'ME', 'SHUT', 'DEAD', 'DOUGH'] +3005-163390-0029-1214: ref=['I', 'LAY', 'I', 'MAKE', 'YOU', 'MINE'] +3005-163390-0029-1214: hyp=['I', 'LAY', 'I', 'MAKE', 'YOU', 'MINE'] +3005-163390-0030-1215: ref=['JIS', 'AS', 'LOUD', 'AS', 'I', 'COULD', 'YELL'] +3005-163390-0030-1215: hyp=['IT', 'IS', 'LOUD', 'AS', 'I', 'COULD', 'YELL'] +3005-163391-0000-1127: ref=['WHICH', 'WAS', 'SOUND', 'ENOUGH', 'JUDGMENT', 'BUT', 'YOU', 'TAKE', 'THE', 'AVERAGE', 'MAN', 'AND', 'HE', "WOULDN'T", 'WAIT', 'FOR', 'HIM', 'TO', 'HOWL'] +3005-163391-0000-1127: hyp=['WHICH', 'WAS', 'SOUND', 'ENOUGH', 'JUDGMENT', 'BUT', 'YOU', 'TAKE', 'THE', 'AVERAGE', 'MAN', 'AND', 'HE', "WOULDN'T", 'WAIT', 'FOR', 'HIM', 'TO', 'HOWE'] +3005-163391-0001-1128: ref=['THE', "KING'S", 'DUDS', 'WAS', 'ALL', 'BLACK', 'AND', 'HE', 'DID', 'LOOK', 'REAL', 'SWELL', 'AND', 'STARCHY'] +3005-163391-0001-1128: hyp=['THE', "KING'S", 'DEADS', 'WAS', 'ALL', 'BLACK', 'AND', 'HE', 'DID', 'LOOK', 'REAL', 'SWELLIN', 'STARCHY'] +3005-163391-0002-1129: ref=['WHY', 'BEFORE', 'HE', 'LOOKED', 'LIKE', 'THE', 'ORNERIEST', 'OLD', 'RIP', 'THAT', 'EVER', 'WAS', 'BUT', 'NOW', 'WHEN', "HE'D", 'TAKE', 'OFF', 'HIS', 'NEW', 'WHITE', 'BEAVER', 'AND', 'MAKE', 'A', 'BOW', 'AND', 'DO', 'A', 'SMILE', 'HE', 'LOOKED', 'THAT', 'GRAND', 'AND', 'GOOD', 'AND', 'PIOUS', 'THAT', "YOU'D", 'SAY', 'HE', 'HAD', 'WALKED', 'RIGHT', 'OUT', 'OF', 'THE', 'ARK', 'AND', 'MAYBE', 'WAS', 'OLD', 'LEVITICUS', 'HIMSELF'] +3005-163391-0002-1129: hyp=['WHY', 'BEFORE', 'HE', 'LOOKED', 'LIKE', 'THE', 'ORNEIST', 'OLD', 'RIP', 'THAT', 'EVER', 'WAS', 'BUT', 'NOW', 'WHEN', "HE'D", 'TAKE', 'OFF', 'HIS', 'NEW', 'WHITE', 'BEAVER', 'AND', 'MAKE', 'A', 'BOW', 'AND', 'DO', 'A', 'SMILE', 'HE', 'LOOKED', 'THAT', 'GRAND', 'AND', 'GOOD', 'AND', 'PIOUS', 'THAT', "YOU'D", 'SAY', 'HE', 'HAD', 'WALKED', 'RIGHT', 'OUT', 'OF', 'THE', 'ARK', 'AND', 'MAYBE', 'WAS', 'OLD', 'LEVIKUS', 'HIMSELF'] +3005-163391-0003-1130: ref=['JIM', 'CLEANED', 'UP', 'THE', 'CANOE', 'AND', 'I', 'GOT', 'MY', 'PADDLE', 'READY'] +3005-163391-0003-1130: hyp=['JIM', 'CLEANED', 'UP', 'THE', 'CANOE', 'AND', 'I', 'GOT', 'MY', 'PADDLE', 'READY'] +3005-163391-0004-1131: ref=['WHER', 'YOU', 'BOUND', 'FOR', 'YOUNG', 'MAN'] +3005-163391-0004-1131: hyp=['WERE', 'YOU', 'BOUND', 'FOR', 'YOUNG', 'MAN'] +3005-163391-0005-1132: ref=['GIT', 'ABOARD', 'SAYS', 'THE', 'KING'] +3005-163391-0005-1132: hyp=['GET', 'ABOARD', 'SAYS', 'THE', 'KING'] +3005-163391-0006-1133: ref=['I', 'DONE', 'SO', 'AND', 'THEN', 'WE', 'ALL', 'THREE', 'STARTED', 'ON', 'AGAIN'] +3005-163391-0006-1133: hyp=['I', 'DONE', 'SO', 'AND', 'THEY', 'WE', 'ALL', 'THREE', 'STARTED', 'ON', 'AGAIN'] +3005-163391-0007-1134: ref=['THE', 'YOUNG', 'CHAP', 'WAS', 'MIGHTY', 'THANKFUL', 'SAID', 'IT', 'WAS', 'TOUGH', 'WORK', 'TOTING', 'HIS', 'BAGGAGE', 'SUCH', 'WEATHER'] +3005-163391-0007-1134: hyp=['THE', 'YOUNG', 'CHAP', 'WAS', 'MIGHTY', 'THANKFUL', 'SAID', 'HE', 'WAS', 'TOUGH', 'WORK', 'TOATING', 'HIS', 'BAGGAGE', 'SUCH', 'WEATHER'] +3005-163391-0008-1135: ref=['HE', 'ASKED', 'THE', 'KING', 'WHERE', 'HE', 'WAS', 'GOING', 'AND', 'THE', 'KING', 'TOLD', 'HIM', "HE'D", 'COME', 'DOWN', 'THE', 'RIVER', 'AND', 'LANDED', 'AT', 'THE', 'OTHER', 'VILLAGE', 'THIS', 'MORNING', 'AND', 'NOW', 'HE', 'WAS', 'GOING', 'UP', 'A', 'FEW', 'MILE', 'TO', 'SEE', 'AN', 'OLD', 'FRIEND', 'ON', 'A', 'FARM', 'UP', 'THERE', 'THE', 'YOUNG', 'FELLOW', 'SAYS'] +3005-163391-0008-1135: hyp=['PIERRE', 'THE', 'KING', 'WHERE', 'HE', 'WAS', 'GOING', 'AND', 'THE', 'KING', 'TOLD', 'HIM', "HE'D", 'COME', 'DOWN', 'A', 'RIVER', 'AND', 'LAND', 'IT', 'AT', 'THE', 'OTHER', 'VILLAGE', 'THIS', 'MORNING', 'AND', 'NOW', 'HE', 'WAS', 'GOING', 'UP', 'A', 'FEW', 'MILES', 'TO', 'SEE', 'AN', 'OLD', 'FRIEND', 'ON', 'A', 'FARM', 'UP', 'THERE', 'THE', 'YOUNG', 'FELLOW', 'SAYS'] +3005-163391-0009-1136: ref=['BUT', 'THEN', 'I', 'SAYS', 'AGAIN', 'NO', 'I', 'RECKON', 'IT', "AIN'T", 'HIM', 'OR', 'ELSE', 'HE', "WOULDN'T", 'BE', 'PADDLING', 'UP', 'THE', 'RIVER', 'YOU', "AIN'T", 'HIM', 'ARE', 'YOU'] +3005-163391-0009-1136: hyp=['BUT', 'THEN', 'I', 'SAYS', 'AGAIN', 'NO', 'I', 'RECKON', 'IT', "AIN'T", 'HIM', 'OR', 'ELSE', 'HE', "WOULDN'T", 'BE', 'PADDLIN', 'UP', 'THE', 'RIVER', 'YOU', "AIN'T", 'HIM', 'ARE', 'YOU'] +3005-163391-0010-1137: ref=['NO', 'MY', "NAME'S", 'BLODGETT', 'ELEXANDER', 'BLODGETT', 'REVEREND', 'ELEXANDER', 'BLODGETT', 'I', "S'POSE", 'I', 'MUST', 'SAY', 'AS', "I'M", 'ONE', 'O', 'THE', "LORD'S", 'POOR', 'SERVANTS'] +3005-163391-0010-1137: hyp=['NO', 'MY', "NAME'S", 'BLADGE', 'IT', 'ALEXANDER', 'BLADGET', 'REVEREND', 'ALEXANDER', 'BLOTCHETT', 'I', "S'POSE", 'I', 'MUST', 'SAY', 'AS', "I'M", 'ONE', 'OF', 'THE', 'LARGE', 'POOR', 'SERVANTS'] +3005-163391-0011-1138: ref=['YOU', 'SEE', 'HE', 'WAS', 'PRETTY', 'OLD', 'AND', "GEORGE'S", "G'YIRLS", 'WAS', 'TOO', 'YOUNG', 'TO', 'BE', 'MUCH', 'COMPANY', 'FOR', 'HIM', 'EXCEPT', 'MARY', 'JANE', 'THE', 'RED', 'HEADED', 'ONE', 'AND', 'SO', 'HE', 'WAS', 'KINDER', 'LONESOME', 'AFTER', 'GEORGE', 'AND', 'HIS', 'WIFE', 'DIED', 'AND', "DIDN'T", 'SEEM', 'TO', 'CARE', 'MUCH', 'TO', 'LIVE'] +3005-163391-0011-1138: hyp=['YOU', 'SEE', 'HE', 'WAS', 'PRETTY', 'OLD', 'AN', 'GEORGE', 'IS', 'GOOD', "EARL'S", 'WAS', 'TOO', 'YOUNG', 'TO', 'BE', 'MUCH', 'COMPANY', 'FOR', 'HIM', 'EXCEPT', 'MARY', 'JANE', 'THE', 'RED', 'HEADED', 'ONE', 'AND', 'SO', 'HE', 'WAS', 'KIND', 'OR', 'LONESOME', 'AFTER', 'GEORGE', 'AND', 'HIS', 'WIFE', 'DIED', 'AND', "DIDN'T", 'SEEM', 'TO', 'CARE', 'MUCH', 'TO', 'LIVE'] +3005-163391-0012-1139: ref=['TOO', 'BAD', 'TOO', 'BAD', 'HE', "COULDN'T", 'A', 'LIVED', 'TO', 'SEE', 'HIS', 'BROTHERS', 'POOR', 'SOUL'] +3005-163391-0012-1139: hyp=['DO', 'BAD', 'TOO', 'BAD', 'HE', "COULDN'T", 'HAVE', 'LIVED', 'TO', 'SEE', 'HIS', "BROTHER'S", 'POOR', 'SOUL'] +3005-163391-0013-1140: ref=["I'M", 'GOING', 'IN', 'A', 'SHIP', 'NEXT', 'WEDNESDAY', 'FOR', 'RYO', 'JANEERO', 'WHERE', 'MY', 'UNCLE', 'LIVES'] +3005-163391-0013-1140: hyp=["I'M", 'GOIN', 'IN', 'A', 'SHIP', 'NEXT', 'WEDNESDAY', 'FER', 'RYEO', 'GENERO', 'WHERE', 'MY', 'UNCLE', 'IS'] +3005-163391-0014-1141: ref=['BUT', "IT'LL", 'BE', 'LOVELY', 'WISHT', 'I', 'WAS', 'A', 'GOING'] +3005-163391-0014-1141: hyp=['BUT', "IT'LL", 'BE', 'LOVELY', 'WISHED', 'I', 'WAS', 'A', 'GOIN'] +3005-163391-0015-1142: ref=['MARY', "JANE'S", 'NINETEEN', "SUSAN'S", 'FIFTEEN', 'AND', "JOANNA'S", 'ABOUT', "FOURTEENTHAT'S", 'THE', 'ONE', 'THAT', 'GIVES', 'HERSELF', 'TO', 'GOOD', 'WORKS', 'AND', 'HAS', 'A', 'HARE', 'LIP', 'POOR', 'THINGS'] +3005-163391-0015-1142: hyp=['MARY', "JANE'S", 'NINETEEN', "SUSAN'S", 'FIFTEEN', 'AND', "JOANNA'S", 'ABOUT', 'FOURTEEN', "THAT'S", 'THE', 'ONE', 'THAT', 'GIVES', 'HERSELF', 'TO', 'GOOD', 'WORKS', 'AND', 'HAS', 'A', 'HAIR', 'LIP', 'POOR', 'THINGS'] +3005-163391-0016-1143: ref=['WELL', 'THEY', 'COULD', 'BE', 'WORSE', 'OFF'] +3005-163391-0016-1143: hyp=['WELL', 'THEY', 'COULD', 'BE', 'WORSE', 'OFF'] +3005-163391-0017-1144: ref=['OLD', 'PETER', 'HAD', 'FRIENDS', 'AND', 'THEY', "AIN'T", 'GOING', 'TO', 'LET', 'THEM', 'COME', 'TO', 'NO', 'HARM'] +3005-163391-0017-1144: hyp=['O', 'PETER', 'HAD', 'FRIENDS', 'AND', 'THEY', "AIN'T", 'GOIN', 'TO', 'LET', 'THEM', 'COME', 'TO', 'NO', 'HARM'] +3005-163391-0018-1145: ref=['BLAMED', 'IF', 'HE', "DIDN'T", 'INQUIRE', 'ABOUT', 'EVERYBODY', 'AND', 'EVERYTHING', 'IN', 'THAT', 'BLESSED', 'TOWN', 'AND', 'ALL', 'ABOUT', 'THE', 'WILKSES', 'AND', 'ABOUT', "PETER'S", 'BUSINESSWHICH', 'WAS', 'A', 'TANNER', 'AND', 'ABOUT', "GEORGE'SWHICH", 'WAS', 'A', 'CARPENTER', 'AND', 'ABOUT', "HARVEY'SWHICH", 'WAS', 'A', 'DISSENTERING', 'MINISTER', 'AND', 'SO', 'ON', 'AND', 'SO', 'ON', 'THEN', 'HE', 'SAYS'] +3005-163391-0018-1145: hyp=['BLAMED', 'IF', 'HE', "DIDN'T", 'ACQUIRE', 'ABOUT', 'EVERYBODY', 'AND', 'EVERYTHING', 'AND', 'THAT', 'BLESSED', 'TOWN', 'AND', 'ALL', 'ABOUT', 'THE', 'WILKS', 'AND', 'ABOUT', "PETER'S", 'BUSINESS', 'WHICH', 'WAS', 'A', 'TANNER', 'AND', 'ABOUT', "GEORGE'S", 'WHICH', 'WAS', 'A', 'CARPENTER', 'AND', 'ABOUT', 'HARVEST', 'WHICH', 'WAS', 'A', 'DISSENTERING', 'MINISTER', 'AND', 'SO', 'ON', 'AND', 'SO', 'ON', 'THEN', 'HE', 'SAYS'] +3005-163391-0019-1146: ref=['WHEN', "THEY'RE", 'DEEP', 'THEY', "WON'T", 'STOP', 'FOR', 'A', 'HAIL'] +3005-163391-0019-1146: hyp=['WHEN', 'HER', 'DEEP', 'THEY', "WON'T", 'STOP', 'FOR', 'A', 'HAIL'] +3005-163391-0020-1147: ref=['WAS', 'PETER', 'WILKS', 'WELL', 'OFF'] +3005-163391-0020-1147: hyp=['WAS', 'PETER', 'WILKES', 'WELL', 'OFF'] +3005-163391-0021-1148: ref=['WHEN', 'WE', 'STRUCK', 'THE', 'BOAT', 'SHE', 'WAS', 'ABOUT', 'DONE', 'LOADING', 'AND', 'PRETTY', 'SOON', 'SHE', 'GOT', 'OFF'] +3005-163391-0021-1148: hyp=['WHEN', 'WASTER', 'UP', 'THE', 'BOAT', 'SHE', 'WAS', 'ABOUT', 'DONE', 'LOADING', 'AND', 'PRETTY', 'SOON', 'SHE', 'GOT', 'OFF'] +3005-163391-0022-1149: ref=['NOW', 'HUSTLE', 'BACK', 'RIGHT', 'OFF', 'AND', 'FETCH', 'THE', 'DUKE', 'UP', 'HERE', 'AND', 'THE', 'NEW', 'CARPET', 'BAGS'] +3005-163391-0022-1149: hyp=['NOW', 'HUSTLE', 'BACK', 'RIGHT', 'OFF', 'AND', 'FETCH', 'THE', 'DUKE', 'UP', 'HERE', 'AND', 'THE', 'NEW', 'CARPET', 'BAGS'] +3005-163391-0023-1150: ref=['SO', 'THEN', 'THEY', 'WAITED', 'FOR', 'A', 'STEAMBOAT'] +3005-163391-0023-1150: hyp=['SO', 'THEN', 'THEY', 'WAITED', 'FOR', 'A', 'STEAMBOAT'] +3005-163391-0024-1151: ref=['BUT', 'THE', 'KING', 'WAS', "CA'M", 'HE', 'SAYS'] +3005-163391-0024-1151: hyp=['THAT', 'THE', 'KING', 'WAS', 'CALM', 'HE', 'SAYS'] +3005-163391-0025-1152: ref=['THEY', 'GIVE', 'A', 'GLANCE', 'AT', 'ONE', 'ANOTHER', 'AND', 'NODDED', 'THEIR', 'HEADS', 'AS', 'MUCH', 'AS', 'TO', 'SAY', 'WHAT', 'D', 'I', 'TELL', 'YOU'] +3005-163391-0025-1152: hyp=['THEY', 'GIVE', 'A', 'GLANCE', 'AT', 'ONE', 'ANOTHER', 'AND', 'NODDED', 'THEIR', 'HEADS', 'AS', 'MUCH', 'AS', 'TO', 'SAY', 'WOULD', 'DATA', 'TELL', 'YOU'] +3005-163391-0026-1153: ref=['THEN', 'ONE', 'OF', 'THEM', 'SAYS', 'KIND', 'OF', 'SOFT', 'AND', 'GENTLE'] +3005-163391-0026-1153: hyp=['THEN', 'ONE', 'OF', 'THEM', 'SAYS', 'KIND', 'O', 'SOFT', 'AND', 'GENTLE'] +3005-163399-0000-1154: ref=['PHELPS', 'WAS', 'ONE', 'OF', 'THESE', 'LITTLE', 'ONE', 'HORSE', 'COTTON', 'PLANTATIONS', 'AND', 'THEY', 'ALL', 'LOOK', 'ALIKE'] +3005-163399-0000-1154: hyp=['PHELPS', 'IS', 'ONE', 'OF', 'THESE', 'LITTLE', 'ONE', 'HORSE', 'COTTON', 'PLANTATIONS', 'AND', 'THEY', 'ALL', 'LOOK', 'ALIKE'] +3005-163399-0001-1155: ref=['I', 'WENT', 'AROUND', 'AND', 'CLUMB', 'OVER', 'THE', 'BACK', 'STILE', 'BY', 'THE', 'ASH', 'HOPPER', 'AND', 'STARTED', 'FOR', 'THE', 'KITCHEN'] +3005-163399-0001-1155: hyp=['I', 'WENT', 'AROUND', 'AND', 'CLIMBED', 'OVER', 'THE', 'BACK', 'STILE', 'BY', 'THE', 'ASHHOPPER', 'AND', 'STARTED', 'FOR', 'THE', 'KITCHEN'] +3005-163399-0002-1156: ref=['I', 'OUT', 'WITH', 'A', "YES'M", 'BEFORE', 'I', 'THOUGHT'] +3005-163399-0002-1156: hyp=['AH', 'OUT', 'WI', 'THE', "YES'M", 'FOUR', 'I', 'THOUGHT'] +3005-163399-0003-1157: ref=['SO', 'THEN', 'SHE', 'STARTED', 'FOR', 'THE', 'HOUSE', 'LEADING', 'ME', 'BY', 'THE', 'HAND', 'AND', 'THE', 'CHILDREN', 'TAGGING', 'AFTER'] +3005-163399-0003-1157: hyp=['SO', 'THEN', 'SHE', 'STARTED', 'FOR', 'THE', 'HOUSE', 'LEADING', 'ME', 'BY', 'THE', 'HAND', 'AND', 'THE', 'CHILDREN', 'TAGGING', 'AFTER'] +3005-163399-0004-1158: ref=['WHEN', 'WE', 'GOT', 'THERE', 'SHE', 'SET', 'ME', 'DOWN', 'IN', 'A', 'SPLIT', 'BOTTOMED', 'CHAIR', 'AND', 'SET', 'HERSELF', 'DOWN', 'ON', 'A', 'LITTLE', 'LOW', 'STOOL', 'IN', 'FRONT', 'OF', 'ME', 'HOLDING', 'BOTH', 'OF', 'MY', 'HANDS', 'AND', 'SAYS'] +3005-163399-0004-1158: hyp=['WHEN', 'WE', 'GOT', 'THERE', 'SHE', 'SET', 'ME', 'DOWN', 'IN', 'A', 'SPLIT', 'BOTTOM', 'CHAIR', 'AND', 'SET', 'HERSELF', 'DOWN', 'ON', 'A', 'LITTLE', 'LOW', 'STOOL', 'IN', 'FRONT', 'OF', 'ME', 'HOLDING', 'BOTH', 'OF', 'MY', 'HANDS', 'AND', 'SAYS'] +3005-163399-0005-1159: ref=['WELL', "IT'S", 'LUCKY', 'BECAUSE', 'SOMETIMES', 'PEOPLE', 'DO', 'GET', 'HURT'] +3005-163399-0005-1159: hyp=['WELL', "IT'S", 'LUCKY', 'BECAUSE', 'SOMETIMES', 'PEOPLE', 'DO', 'GET', 'HURT'] +3005-163399-0006-1160: ref=['AND', 'I', 'THINK', 'HE', 'DIED', 'AFTERWARDS', 'HE', 'WAS', 'A', 'BAPTIST'] +3005-163399-0006-1160: hyp=['AND', 'I', 'THINK', 'HE', 'DIED', 'AFTERWARDS', 'HE', 'WAS', 'A', 'BAPTIST'] +3005-163399-0007-1161: ref=['YES', 'IT', 'WAS', 'MORTIFICATIONTHAT', 'WAS', 'IT'] +3005-163399-0007-1161: hyp=['YES', 'IT', 'WAS', 'MORTIFICATION', 'THAT', 'WAS', 'IT'] +3005-163399-0008-1162: ref=['YOUR', "UNCLE'S", 'BEEN', 'UP', 'TO', 'THE', 'TOWN', 'EVERY', 'DAY', 'TO', 'FETCH', 'YOU'] +3005-163399-0008-1162: hyp=['YOUR', "UNCLE'S", 'BEEN', 'UP', 'TO', 'THE', 'TOWN', 'EVERY', 'DAY', 'TO', 'FETCH', 'YOU'] +3005-163399-0009-1163: ref=['YOU', 'MUST', 'A', 'MET', 'HIM', 'ON', 'THE', 'ROAD', "DIDN'T", 'YOU', 'OLDISH', 'MAN', 'WITH', 'A'] +3005-163399-0009-1163: hyp=['YOU', 'MUST', 'A', 'MET', 'HIM', 'ON', 'THE', 'ROAD', "DIDN'T", 'YOU', 'OLDISH', 'MAN', 'WIDTH', 'A'] +3005-163399-0010-1164: ref=['WHY', 'CHILD', 'IT', 'LL', 'BE', 'STOLE'] +3005-163399-0010-1164: hyp=['WHY', 'CHILD', "IT'LL", 'BESTOW'] +3005-163399-0011-1165: ref=['IT', 'WAS', 'KINDER', 'THIN', 'ICE', 'BUT', 'I', 'SAYS'] +3005-163399-0011-1165: hyp=['IT', 'WAS', 'KINDER', 'THIN', 'ICE', 'BUT', 'I', 'SAYS'] +3005-163399-0012-1166: ref=['I', 'HAD', 'MY', 'MIND', 'ON', 'THE', 'CHILDREN', 'ALL', 'THE', 'TIME', 'I', 'WANTED', 'TO', 'GET', 'THEM', 'OUT', 'TO', 'ONE', 'SIDE', 'AND', 'PUMP', 'THEM', 'A', 'LITTLE', 'AND', 'FIND', 'OUT', 'WHO', 'I', 'WAS'] +3005-163399-0012-1166: hyp=['I', 'HAD', 'MY', 'MIND', 'ON', 'THE', 'CHILDREN', 'ALL', 'THE', 'TIME', 'I', 'WANTED', 'TO', 'GIT', 'THEM', 'OUT', 'TO', 'ONE', 'SIDE', 'AND', 'PUMP', 'THEM', 'A', 'LITTLE', 'AND', 'FIND', 'OUT', 'WHO', 'I', 'WAS'] +3005-163399-0013-1167: ref=['PRETTY', 'SOON', 'SHE', 'MADE', 'THE', 'COLD', 'CHILLS', 'STREAK', 'ALL', 'DOWN', 'MY', 'BACK', 'BECAUSE', 'SHE', 'SAYS'] +3005-163399-0013-1167: hyp=['BERTIE', 'SOON', 'SHE', 'MADE', 'THE', 'COLD', 'CHILL', 'STREAK', 'ALL', 'DOWN', 'MY', 'BACK', 'BECAUSE', 'SHE', 'SAYS'] +3005-163399-0014-1168: ref=['I', 'SEE', 'IT', "WARN'T", 'A', 'BIT', 'OF', 'USE', 'TO', 'TRY', 'TO', 'GO', 'AHEAD', "I'D", 'GOT', 'TO', 'THROW', 'UP', 'MY', 'HAND'] +3005-163399-0014-1168: hyp=['I', 'SEE', 'IT', "WARN'T", 'A', 'BIT', 'OF', 'USE', 'TO', 'TRY', 'TO', 'GO', 'AHEAD', "I'D", 'GOT', 'TO', 'THROW', 'UP', 'MY', 'HAND'] +3005-163399-0015-1169: ref=['SO', 'I', 'SAYS', 'TO', 'MYSELF', "HERE'S", 'ANOTHER', 'PLACE', 'WHERE', 'I', 'GOT', 'TO', 'RESK', 'THE', 'TRUTH'] +3005-163399-0015-1169: hyp=['SO', 'I', 'SAYS', 'TO', 'MYSELF', 'HERE', 'IS', 'ANOTHER', 'PLACE', 'WHERE', 'I', 'GOT', 'TO', 'REST', 'THE', 'TRUTH'] +3005-163399-0016-1170: ref=['I', 'OPENED', 'MY', 'MOUTH', 'TO', 'BEGIN', 'BUT', 'SHE', 'GRABBED', 'ME', 'AND', 'HUSTLED', 'ME', 'IN', 'BEHIND', 'THE', 'BED', 'AND', 'SAYS', 'HERE', 'HE', 'COMES'] +3005-163399-0016-1170: hyp=['I', 'OPENED', 'MY', 'MOUTH', 'TO', 'BEGIN', 'BUT', 'SHE', 'GRABBED', 'ME', 'AND', 'HUSTLED', 'ME', 'IN', 'BEHIND', 'THE', 'BED', 'AND', 'SAYS', 'HERE', 'HE', 'COMES'] +3005-163399-0017-1171: ref=['CHILDREN', "DON'T", 'YOU', 'SAY', 'A', 'WORD'] +3005-163399-0017-1171: hyp=['CHILDREN', "DON'T", 'YOU', 'SAY', 'A', 'WORD'] +3005-163399-0018-1172: ref=['I', 'SEE', 'I', 'WAS', 'IN', 'A', 'FIX', 'NOW'] +3005-163399-0018-1172: hyp=['I', 'SEE', 'I', 'WAS', 'IN', 'A', 'FIX', 'NOW'] +3005-163399-0019-1173: ref=['MISSUS', 'PHELPS', 'SHE', 'JUMPS', 'FOR', 'HIM', 'AND', 'SAYS'] +3005-163399-0019-1173: hyp=['MISSUS', 'PHILP', 'SHE', 'JUMPS', 'FOR', 'HIM', 'AND', 'SAYS'] +3005-163399-0020-1174: ref=['HAS', 'HE', 'COME', 'NO', 'SAYS', 'HER', 'HUSBAND'] +3005-163399-0020-1174: hyp=['AS', 'HE', 'COME', 'NO', 'SAYS', 'HER', 'HUSBAND'] +3005-163399-0021-1175: ref=['I', "CAN'T", 'IMAGINE', 'SAYS', 'THE', 'OLD', 'GENTLEMAN', 'AND', 'I', 'MUST', 'SAY', 'IT', 'MAKES', 'ME', 'DREADFUL', 'UNEASY'] +3005-163399-0021-1175: hyp=['I', "CAN'T", 'IMAGINE', 'SAYS', 'THE', 'OLD', 'GENTLEMAN', 'AND', 'I', 'MUST', 'SAY', 'IT', 'MAKES', 'ME', 'DREADFUL', 'UNEASY'] +3005-163399-0022-1176: ref=['UNEASY', 'SHE', 'SAYS', "I'M", 'READY', 'TO', 'GO', 'DISTRACTED'] +3005-163399-0022-1176: hyp=['UNEASY', 'SHE', 'SAYS', "I'M", 'READY', 'TO', 'GO', 'DISTRACTED'] +3005-163399-0023-1177: ref=['HE', 'MUST', 'A', 'COME', 'AND', "YOU'VE", 'MISSED', 'HIM', 'ALONG', 'THE', 'ROAD'] +3005-163399-0023-1177: hyp=['HE', 'MUST', 'HAVE', 'COME', 'AND', "YOU'VE", 'MISSED', 'HIM', 'ALONG', 'THE', 'ROAD'] +3005-163399-0024-1178: ref=['OH', "DON'T", 'DISTRESS', 'ME', 'ANY', "MORE'N", "I'M", 'ALREADY', 'DISTRESSED'] +3005-163399-0024-1178: hyp=['OH', "DON'T", 'DISTRESS', 'ME', 'ANY', 'MORE', "I'M", 'ALREADY', 'DISTRESSED'] +3005-163399-0025-1179: ref=['WHY', 'SILAS', 'LOOK', 'YONDER', 'UP', 'THE', 'ROAD', "AIN'T", 'THAT', 'SOMEBODY', 'COMING'] +3005-163399-0025-1179: hyp=['WHY', 'SILAS', 'LOOK', 'YONDER', 'UP', 'THE', 'ROAD', "AIN'T", 'THAT', 'SOMEBODY', 'COMIN'] +3005-163399-0026-1180: ref=['THE', 'OLD', 'GENTLEMAN', 'STARED', 'AND', 'SAYS'] +3005-163399-0026-1180: hyp=['THE', 'OLD', 'GENTLEMAN', 'STARED', 'AND', 'SAYS'] +3005-163399-0027-1181: ref=['I', "HAIN'T", 'NO', 'IDEA', 'WHO', 'IS', 'IT'] +3005-163399-0027-1181: hyp=['I', "HAIN'T", 'NO', 'IDEA', 'WHO', 'IS', 'IT'] +3005-163399-0028-1182: ref=["IT'S", 'TOM', 'SAWYER'] +3005-163399-0028-1182: hyp=['IS', 'TOM', 'SAWYER'] +3005-163399-0029-1183: ref=['BEING', 'TOM', 'SAWYER', 'WAS', 'EASY', 'AND', 'COMFORTABLE', 'AND', 'IT', 'STAYED', 'EASY', 'AND', 'COMFORTABLE', 'TILL', 'BY', 'AND', 'BY', 'I', 'HEAR', 'A', 'STEAMBOAT', 'COUGHING', 'ALONG', 'DOWN', 'THE', 'RIVER'] +3005-163399-0029-1183: hyp=['BEING', 'TOM', 'SAWYER', 'WAS', 'EASY', 'AND', 'COMFORTABLE', 'AND', 'ITS', 'STEESEY', 'AND', 'COMFORTABLE', 'TILL', 'BY', 'AND', 'BY', 'I', 'HEAR', 'A', 'STEAMBOAT', 'COFFIN', 'ALONG', 'DOWN', 'THE', 'RIVER'] +3005-163399-0030-1184: ref=['THEN', 'I', 'SAYS', 'TO', 'MYSELF', "S'POSE", 'TOM', 'SAWYER', 'COMES', 'DOWN', 'ON', 'THAT', 'BOAT'] +3005-163399-0030-1184: hyp=['THEN', 'I', 'SAYS', 'TO', 'MYSELF', "S'POSE", 'TOM', 'SAWYER', 'COMES', 'DOWN', 'ON', 'THAT', 'BOAT'] +3080-5032-0000-312: ref=['BUT', 'I', 'AM', 'HUGELY', 'PLEASED', 'THAT', 'YOU', 'HAVE', 'SEEN', 'MY', 'LADY'] +3080-5032-0000-312: hyp=['BUT', 'I', 'AM', 'HUGELY', 'PLEASED', 'THAT', 'YOU', 'HAVE', 'SEEN', 'MY', 'LADY'] +3080-5032-0001-313: ref=['I', 'KNEW', 'YOU', 'COULD', 'NOT', 'CHOOSE', 'BUT', 'LIKE', 'HER', 'BUT', 'YET', 'LET', 'ME', 'TELL', 'YOU', 'YOU', 'HAVE', 'SEEN', 'BUT', 'THE', 'WORST', 'OF', 'HER'] +3080-5032-0001-313: hyp=['I', 'KNEW', 'YOU', 'COULD', 'NOT', 'CHOOSE', 'BUT', 'LIKE', 'HER', 'BUT', 'YET', 'LET', 'ME', 'TELL', 'YOU', 'YOU', 'HAVE', 'SEEN', 'BUT', 'THE', 'WORST', 'OF', 'HER'] +3080-5032-0002-314: ref=['HER', 'CONVERSATION', 'HAS', 'MORE', 'CHARMS', 'THAN', 'CAN', 'BE', 'IN', 'MERE', 'BEAUTY', 'AND', 'HER', 'HUMOUR', 'AND', 'DISPOSITION', 'WOULD', 'MAKE', 'A', 'DEFORMED', 'PERSON', 'APPEAR', 'LOVELY'] +3080-5032-0002-314: hyp=['HER', 'CONVERSATION', 'HAS', 'MORE', 'CHARMS', 'AND', 'CAN', 'BE', 'IN', 'MERE', 'BEAUTY', 'AND', 'A', 'HUMOUR', 'AND', 'DISPOSITION', 'WOULD', 'MAKE', 'A', 'DEFORMED', 'PERSON', 'APPEAR', 'LOVELY'] +3080-5032-0003-315: ref=['WHY', 'DID', 'YOU', 'NOT', 'SEND', 'ME', 'THAT', 'NEWS', 'AND', 'A', 'GARLAND'] +3080-5032-0003-315: hyp=['WHY', 'DID', 'YOU', 'NOT', 'SEND', 'ME', 'THAT', 'NEWS', 'AND', 'A', 'GARLAND'] +3080-5032-0004-316: ref=['WELL', 'THE', 'BEST', "ON'T", 'IS', 'I', 'HAVE', 'A', 'SQUIRE', 'NOW', 'THAT', 'IS', 'AS', 'GOOD', 'AS', 'A', 'KNIGHT'] +3080-5032-0004-316: hyp=['WHY', 'THE', 'BEST', 'ON', 'IT', 'IS', 'THAT', 'I', 'HAVE', 'A', 'SQUIRE', 'NOW', 'THAT', 'IS', 'AS', 'GOOD', 'AS', 'A', 'KNIGHT'] +3080-5032-0005-317: ref=['IN', 'EARNEST', 'WE', 'HAVE', 'HAD', 'SUCH', 'A', 'SKIRMISH', 'AND', 'UPON', 'SO', 'FOOLISH', 'AN', 'OCCASION', 'AS', 'I', 'CANNOT', 'TELL', 'WHICH', 'IS', 'STRANGEST'] +3080-5032-0005-317: hyp=['IN', 'EARNEST', 'WE', 'HAVE', 'HAD', 'SUCH', 'A', 'SKIRMISH', 'AND', 'UPON', 'SO', 'FOOLISH', 'AN', 'OCCASION', 'AS', 'I', 'CANNOT', 'TELL', 'WHICH', 'YOUR', "STRANGER'S"] +3080-5032-0006-318: ref=['ALL', 'THE', 'PEOPLE', 'THAT', 'I', 'HAD', 'EVER', 'IN', 'MY', 'LIFE', 'REFUSED', 'WERE', 'BROUGHT', 'AGAIN', 'UPON', 'THE', 'STAGE', 'LIKE', 'RICHARD', 'THE', 'THREE', 'S', 'GHOSTS', 'TO', 'REPROACH', 'ME', 'WITHAL', 'AND', 'ALL', 'THE', 'KINDNESS', 'HIS', 'DISCOVERIES', 'COULD', 'MAKE', 'I', 'HAD', 'FOR', 'YOU', 'WAS', 'LAID', 'TO', 'MY', 'CHARGE'] +3080-5032-0006-318: hyp=['ALL', 'THE', 'PEOPLE', 'THAT', 'I', 'HAD', 'EVER', 'IN', 'MY', 'LIFE', 'REFUSED', 'WERE', 'BROUGHT', 'AGAIN', 'UPON', 'THE', 'STAGE', 'LIKE', 'RICHARD', 'THE', "THIRD'S", 'GHOSTS', 'TO', 'REPROACH', 'ME', 'WITH', 'A', 'IN', 'ALL', 'THE', 'KINDNESS', 'HIS', 'DISCOVERIES', 'COULD', 'MAKE', 'I', 'HAD', 'FOR', 'YOU', 'WAS', 'LAID', 'TO', 'MY', 'CHARGE'] +3080-5032-0007-319: ref=['MY', 'BEST', 'QUALITIES', 'IF', 'I', 'HAVE', 'ANY', 'THAT', 'ARE', 'GOOD', 'SERVED', 'BUT', 'FOR', 'AGGRAVATIONS', 'OF', 'MY', 'FAULT', 'AND', 'I', 'WAS', 'ALLOWED', 'TO', 'HAVE', 'WIT', 'AND', 'UNDERSTANDING', 'AND', 'DISCRETION', 'IN', 'OTHER', 'THINGS', 'THAT', 'IT', 'MIGHT', 'APPEAR', 'I', 'HAD', 'NONE', 'IN', 'THIS'] +3080-5032-0007-319: hyp=['MY', 'BEST', 'QUALITIES', 'IF', 'I', 'HAVE', 'ANY', 'THAT', 'ARE', 'GOOD', 'SERVED', 'BUT', 'FOR', 'AGGRAVATIONS', 'OF', 'MY', 'FAULT', 'AND', 'I', 'WAS', 'ALLOWED', 'TO', 'HAVE', 'WIT', 'AND', 'UNDERSTANDING', 'AND', 'DISCRETION', 'IN', 'OTHER', 'THINGS', 'THAT', 'IT', 'MIGHT', 'APPEAR', 'I', 'HAD', 'NONE', 'IN', 'THIS'] +3080-5032-0008-320: ref=['TIS', 'A', 'STRANGE', 'CHANGE', 'AND', 'I', 'AM', 'VERY', 'SORRY', 'FOR', 'IT', 'BUT', "I'LL", 'SWEAR', 'I', 'KNOW', 'NOT', 'HOW', 'TO', 'HELP', 'IT'] +3080-5032-0008-320: hyp=['TIS', 'A', 'STRANGE', 'CHANGE', 'AND', 'I', 'AM', 'VERY', 'SORRY', 'FOR', 'IT', 'BUT', "I'LL", 'SWEAR', 'I', 'KNOW', 'NOT', 'HOW', 'TO', 'HELP', 'IT'] +3080-5032-0009-321: ref=['MISTER', 'FISH', 'IS', 'THE', 'SQUIRE', 'OF', 'DAMES', 'AND', 'HAS', 'SO', 'MANY', 'MISTRESSES', 'THAT', 'ANYBODY', 'MAY', 'PRETEND', 'A', 'SHARE', 'IN', 'HIM', 'AND', 'BE', 'BELIEVED', 'BUT', 'THOUGH', 'I', 'HAVE', 'THE', 'HONOUR', 'TO', 'BE', 'HIS', 'NEAR', 'NEIGHBOUR', 'TO', 'SPEAK', 'FREELY', 'I', 'CANNOT', 'BRAG', 'MUCH', 'THAT', 'HE', 'MAKES', 'ANY', 'COURT', 'TO', 'ME', 'AND', 'I', 'KNOW', 'NO', 'YOUNG', 'WOMAN', 'IN', 'THE', 'COUNTRY', 'THAT', 'HE', 'DOES', 'NOT', 'VISIT', 'OFTEN'] +3080-5032-0009-321: hyp=['MISTER', 'FISH', 'IS', 'A', 'SQUIRE', 'OF', 'DAMES', 'AND', 'HAS', 'SO', 'MANY', 'MISTRESSES', 'THAT', 'ANYBODY', 'MAY', 'PRETEND', 'TO', 'SHARE', 'IN', 'HIM', 'AND', 'BE', 'BELIEVED', 'THAT', 'THOUGH', 'I', 'HAVE', 'THE', 'HONOUR', 'TO', 'BE', 'HIS', 'NEAR', 'NEIGHBOUR', 'TO', 'SPEAK', 'FREELY', 'I', 'CANNOT', 'BRAG', 'MUCH', 'THAT', 'HE', 'MAKES', 'ANY', 'COURT', 'TO', 'ME', 'AND', 'I', 'KNOW', 'NO', 'YOUNG', 'WOMAN', 'IN', 'THE', 'COUNTRY', 'THAT', 'HE', 'DOES', 'NOT', 'VISIT', 'OFTEN'] +3080-5032-0010-322: ref=['I', 'THINK', 'MY', 'YOUNGEST', 'BROTHER', 'COMES', 'DOWN', 'WITH', 'HIM'] +3080-5032-0010-322: hyp=['I', 'THINK', 'MY', 'YOUNGEST', 'BROTHER', 'COMES', 'DOWN', 'WITH', 'HIM'] +3080-5032-0011-323: ref=['I', 'CAN', 'NO', 'SOONER', 'GIVE', 'YOU', 'SOME', 'LITTLE', 'HINTS', 'WHEREABOUTS', 'THEY', 'LIVE', 'BUT', 'YOU', 'KNOW', 'THEM', 'PRESENTLY', 'AND', 'I', 'MEANT', 'YOU', 'SHOULD', 'BE', 'BEHOLDING', 'TO', 'ME', 'FOR', 'YOUR', 'ACQUAINTANCE'] +3080-5032-0011-323: hyp=['I', 'CAN', 'NO', 'SOONER', 'GIVE', 'YOU', 'SOME', 'LITTLE', 'HINTS', 'WHEREABOUT', 'THEY', 'LIVE', 'BUT', 'YOU', 'KNOW', 'THEM', 'PRESENTLY', 'AND', 'I', 'MEANT', 'YOU', 'SHOULD', 'BE', 'BEHOLDING', 'TO', 'ME', 'FOR', 'YOUR', 'ACQUAINTANCE'] +3080-5032-0012-324: ref=['BUT', 'IT', 'SEEMS', 'THIS', 'GENTLEMAN', 'IS', 'NOT', 'SO', 'EASY', 'ACCESS', 'BUT', 'YOU', 'MAY', 'ACKNOWLEDGE', 'SOMETHING', 'DUE', 'TO', 'ME', 'IF', 'I', 'INCLINE', 'HIM', 'TO', 'LOOK', 'GRACIOUSLY', 'UPON', 'YOU', 'AND', 'THEREFORE', 'THERE', 'IS', 'NOT', 'MUCH', 'HARM', 'DONE'] +3080-5032-0012-324: hyp=['BUT', 'IT', 'SEEMS', 'THIS', 'GENTLEMAN', 'IS', 'NOT', 'SO', 'EASY', 'AXIS', 'BUT', 'YOU', 'MAY', 'ACKNOWLEDGE', 'SOMETHING', 'DUE', 'TO', 'ME', 'IF', 'I', 'INCLINE', 'HIM', 'TO', 'LOOK', 'GRACIOUSLY', 'UPON', 'YOU', 'AND', 'THEREFORE', 'THERE', 'IS', 'NOT', 'MUCH', 'HARM', 'DONE'] +3080-5032-0013-325: ref=['I', 'HAVE', 'MISSED', 'FOUR', 'FITS', 'AND', 'HAD', 'BUT', 'FIVE', 'AND', 'HAVE', 'RECOVERED', 'SO', 'MUCH', 'STRENGTH', 'AS', 'MADE', 'ME', 'VENTURE', 'TO', 'MEET', 'YOUR', 'LETTER', 'ON', 'WEDNESDAY', 'A', 'MILE', 'FROM', 'HOME'] +3080-5032-0013-325: hyp=['I', 'HAVE', 'MISSED', 'FOUR', 'FITS', 'AND', 'HAVE', 'HAD', 'BUT', 'FIVE', 'AND', 'HAVE', 'RECOVERED', 'SO', 'MUCH', 'STRENGTH', 'AS', 'MADE', 'ME', 'VENTURE', 'TO', 'MEET', 'YOUR', 'LETTER', 'ON', 'WEDNESDAY', 'A', 'MILE', 'FROM', 'HOME'] +3080-5032-0014-326: ref=['BUT', 'BESIDES', 'I', 'CAN', 'GIVE', 'YOU', 'OTHERS'] +3080-5032-0014-326: hyp=['BUT', 'BESIDES', 'I', 'CAN', 'GIVE', 'YOU', 'OTHERS'] +3080-5032-0015-327: ref=['I', 'AM', 'HERE', 'MUCH', 'MORE', 'OUT', 'OF', "PEOPLE'S", 'WAY', 'THAN', 'IN', 'TOWN', 'WHERE', 'MY', 'AUNT', 'AND', 'SUCH', 'AS', 'PRETEND', 'AN', 'INTEREST', 'IN', 'ME', 'AND', 'A', 'POWER', 'OVER', 'ME', 'DO', 'SO', 'PERSECUTE', 'ME', 'WITH', 'THEIR', 'GOOD', 'NATURE', 'AND', 'TAKE', 'IT', 'SO', 'ILL', 'THAT', 'THEY', 'ARE', 'NOT', 'ACCEPTED', 'AS', 'I', 'WOULD', 'LIVE', 'IN', 'A', 'HOLLOW', 'TREE', 'TO', 'AVOID', 'THEM'] +3080-5032-0015-327: hyp=['I', 'AM', 'HERE', 'MUCH', 'MORE', 'OUT', 'OF', "PEOPLE'S", 'WAY', 'THAN', 'IN', 'TOWN', 'WHERE', 'MY', 'AUNTS', 'AND', 'SUCH', 'HAS', 'PRETEND', 'AND', 'INTEREST', 'IN', 'ME', 'AND', 'A', 'POWER', 'OVER', 'ME', 'DO', 'SO', 'PERSECUTE', 'ME', 'WITH', 'DEAR', 'GOOD', 'NATURE', 'AND', 'TAKE', 'IT', 'SO', 'ILL', 'THAT', 'THEY', 'ARE', 'NOT', 'ACCEPTED', 'AS', 'I', 'WOULD', 'LIVE', 'IN', 'A', 'HOLLOW', 'TREE', 'TO', 'AVOID', 'THEM'] +3080-5032-0016-328: ref=['YOU', 'WILL', 'THINK', 'HIM', 'ALTERED', 'AND', 'IF', 'IT', 'BE', 'POSSIBLE', 'MORE', 'MELANCHOLY', 'THAN', 'HE', 'WAS'] +3080-5032-0016-328: hyp=['YOU', 'WILL', 'THINK', 'HIM', 'ALTERED', 'AND', 'IF', 'IT', 'BE', 'POSSIBLE', 'MORE', 'MELANCHOLY', 'THAN', 'HE', 'WAS'] +3080-5032-0017-329: ref=['IF', 'MARRIAGE', 'AGREES', 'NO', 'BETTER', 'WITH', 'OTHER', 'PEOPLE', 'THAN', 'IT', 'DOES', 'WITH', 'HIM', 'I', 'SHALL', 'PRAY', 'THAT', 'ALL', 'MY', 'FRIENDS', 'MAY', 'SCAPE', 'IT'] +3080-5032-0017-329: hyp=['IF', 'MARRIAGE', 'AGREES', 'NO', 'BETTER', 'WHETHER', 'PEOPLE', 'THAN', 'IT', 'DOES', 'WITH', 'HIM', 'I', 'SHALL', 'PRAY', 'THAT', 'ALL', 'MY', 'FRIENDS', 'MAY', 'ESCAPE', 'IT'] +3080-5032-0018-330: ref=['WELL', 'IN', 'EARNEST', 'IF', 'I', 'WERE', 'A', 'PRINCE', 'THAT', 'LADY', 'SHOULD', 'BE', 'MY', 'MISTRESS', 'BUT', 'I', 'CAN', 'GIVE', 'NO', 'RULE', 'TO', 'ANY', 'ONE', 'ELSE', 'AND', 'PERHAPS', 'THOSE', 'THAT', 'ARE', 'IN', 'NO', 'DANGER', 'OF', 'LOSING', 'THEIR', 'HEARTS', 'TO', 'HER', 'MAY', 'BE', 'INFINITELY', 'TAKEN', 'WITH', 'ONE', 'I', 'SHOULD', 'NOT', 'VALUE', 'AT', 'ALL', 'FOR', 'SO', 'SAYS', 'THE', 'JUSTINIAN', 'WISE', 'PROVIDENCE', 'HAS', 'ORDAINED', 'IT', 'THAT', 'BY', 'THEIR', 'DIFFERENT', 'HUMOURS', 'EVERYBODY', 'MIGHT', 'FIND', 'SOMETHING', 'TO', 'PLEASE', 'THEMSELVES', 'WITHAL', 'WITHOUT', 'ENVYING', 'THEIR', 'NEIGHBOURS'] +3080-5032-0018-330: hyp=['WELL', 'IN', 'HONEST', 'IF', 'I', 'WERE', 'A', 'PRINCE', 'THAT', 'LADY', 'SHOULD', 'BE', 'MY', 'MISTRESS', 'BUT', 'I', 'CAN', 'GIVE', 'NO', 'RULE', 'TO', 'ANY', 'ONE', 'ELSE', 'AND', 'PERHAPS', 'THOSE', 'THAT', 'ARE', 'IN', 'NO', 'DANGER', 'OF', 'LOSING', 'THEIR', 'HEARTS', 'TO', 'HER', 'MAY', 'BE', 'INFINITELY', 'TAKEN', 'WITH', 'ONE', 'I', 'SHOULD', 'NOT', 'VALUE', 'AT', 'ALL', 'FOR', 'SO', 'SAYS', 'THE', 'JUSTINIAN', 'WISE', 'PROVIDENCE', 'HAS', 'ORDAINED', 'IT', 'THAT', 'BY', 'THEIR', 'DIFFERENT', 'HUMOURS', 'EVERYBODY', 'MIGHT', 'FIND', 'SOMETHING', 'TO', 'PLEASE', 'THEMSELVES', 'WITHAL', 'WITHOUT', 'ENVYING', 'THEIR', 'NEIGHBORS'] +3080-5032-0019-331: ref=['THE', 'MATTER', 'IS', 'NOT', 'GREAT', 'FOR', 'I', 'CONFESS', 'I', 'DO', 'NATURALLY', 'HATE', 'THE', 'NOISE', 'AND', 'TALK', 'OF', 'THE', 'WORLD', 'AND', 'SHOULD', 'BE', 'BEST', 'PLEASED', 'NEVER', 'TO', 'BE', 'KNOWN', "IN'T", 'UPON', 'ANY', 'OCCASION', 'WHATSOEVER', 'YET', 'SINCE', 'IT', 'CAN', 'NEVER', 'BE', 'WHOLLY', 'AVOIDED', 'ONE', 'MUST', 'SATISFY', 'ONESELF', 'BY', 'DOING', 'NOTHING', 'THAT', 'ONE', 'NEED', 'CARE', 'WHO', 'KNOWS'] +3080-5032-0019-331: hyp=['THE', 'MATTER', 'IS', 'NOT', 'GREAT', 'FOR', 'I', 'CONFESS', 'I', 'DO', 'NATURALLY', 'HATE', 'THE', 'NOISE', 'AND', 'TALK', 'OF', 'THE', 'WORLD', 'AND', 'SHOULD', 'BE', 'BEST', 'PLEASED', 'NEVER', 'TO', 'BE', 'KNOWN', 'IN', 'IT', 'UPON', 'ANY', 'OCCASION', 'WHATSOEVER', 'YET', 'SINCE', 'IT', 'CAN', 'NEVER', 'BE', 'WHOLLY', 'AVOIDED', 'ONE', 'MUST', 'SATISFY', 'ONESELF', 'BY', 'DOING', 'NOTHING', 'THAT', 'ONE', 'NEED', 'CARE', 'WHO', 'KNOWS'] +3080-5032-0020-332: ref=['IF', 'I', 'HAD', 'A', 'PICTURE', 'THAT', 'WERE', 'FIT', 'FOR', 'YOU', 'YOU', 'SHOULD', 'HAVE', 'IT'] +3080-5032-0020-332: hyp=['IF', 'I', 'HAD', 'A', 'PICTURE', 'THAT', 'WERE', 'FIT', 'FOR', 'YOU', 'YOU', 'SHOULD', 'HAVE', 'IT'] +3080-5032-0021-333: ref=['HOW', 'CAN', 'YOU', 'TALK', 'OF', 'DEFYING', 'FORTUNE', 'NOBODY', 'LIVES', 'WITHOUT', 'IT', 'AND', 'THEREFORE', 'WHY', 'SHOULD', 'YOU', 'IMAGINE', 'YOU', 'COULD'] +3080-5032-0021-333: hyp=['HOW', 'CAN', 'YOU', 'TALK', 'OF', 'DEFYING', 'FORTUNE', 'NOBODY', 'LIVES', 'WITHOUT', 'IT', 'AND', 'THEREFORE', 'WHY', 'SHOULD', 'YOU', 'IMAGINE', 'YOU', 'COULD'] +3080-5032-0022-334: ref=['I', 'KNOW', 'NOT', 'HOW', 'MY', 'BROTHER', 'COMES', 'TO', 'BE', 'SO', 'WELL', 'INFORMED', 'AS', 'YOU', 'SAY', 'BUT', 'I', 'AM', 'CERTAIN', 'HE', 'KNOWS', 'THE', 'UTMOST', 'OF', 'THE', 'INJURIES', 'YOU', 'HAVE', 'RECEIVED', 'FROM', 'HER'] +3080-5032-0022-334: hyp=['I', 'KNOW', 'NOT', 'HOW', 'MY', 'BROTHER', 'COMES', 'TO', 'BE', 'SO', 'WELL', 'INFORMED', 'AS', 'YOU', 'SAY', 'BUT', 'I', 'AM', 'CERTAIN', 'HE', 'KNOWS', 'UTMOST', 'OF', 'THE', 'INJURIES', 'YOU', 'HAVE', 'RECEIVED', 'FROM', 'HER'] +3080-5032-0023-335: ref=['WE', 'HAVE', 'HAD', 'ANOTHER', 'DEBATE', 'BUT', 'MUCH', 'MORE', 'CALMLY'] +3080-5032-0023-335: hyp=['WE', 'HAVE', 'HAD', 'ANOTHER', 'DEBATE', 'BUT', 'MUCH', 'MORE', 'CALMLY'] +3080-5032-0024-336: ref=['AND', 'BESIDES', 'THERE', 'WAS', 'A', 'TIME', 'WHEN', 'WE', 'OURSELVES', 'WERE', 'INDIFFERENT', 'TO', 'ONE', 'ANOTHER', 'DID', 'I', 'DO', 'SO', 'THEN', 'OR', 'HAVE', 'I', 'LEARNED', 'IT', 'SINCE'] +3080-5032-0024-336: hyp=['THEN', 'BESIDES', 'THERE', 'WAS', 'A', 'TIME', 'WHEN', 'WE', 'OURSELVES', 'WERE', 'INDIFFERENT', 'TO', 'ONE', 'ANOTHER', 'DID', 'I', 'DO', 'SO', 'THEN', 'OR', 'HAVE', 'I', 'LEARNED', 'IT', 'SINCE'] +3080-5032-0025-337: ref=['I', 'HAVE', 'BEEN', 'STUDYING', 'HOW', 'TOM', 'CHEEKE', 'MIGHT', 'COME', 'BY', 'HIS', 'INTELLIGENCE', 'AND', 'I', 'VERILY', 'BELIEVE', 'HE', 'HAS', 'IT', 'FROM', 'MY', 'COUSIN', 'PETERS'] +3080-5032-0025-337: hyp=['I', 'HAVE', 'BEEN', 'STUDYING', 'HOW', 'TOM', 'CHEEK', 'MIGHT', 'COME', 'BY', 'HIS', 'INTELLIGENCE', 'AND', 'I', 'VERY', 'BELIEVE', 'HE', 'HAS', 'IT', 'FROM', 'MY', 'COUSIN', 'PETERS'] +3080-5032-0026-338: ref=['HOW', 'KINDLY', 'DO', 'I', 'TAKE', 'THESE', 'CIVILITIES', 'OF', 'YOUR', "FATHER'S", 'IN', 'EARNEST', 'YOU', 'CANNOT', 'IMAGINE', 'HOW', 'HIS', 'LETTER', 'PLEASED', 'ME'] +3080-5032-0026-338: hyp=['HOW', 'KINDLY', 'DO', 'I', 'TAKE', 'THE', 'CIVILITIES', 'OF', 'YOUR', 'FATHERS', 'IN', 'EARNEST', 'YOU', 'CANNOT', 'IMAGINE', 'HOW', 'HIS', 'LETTER', 'PLEASED', 'ME'] +3080-5040-0000-278: ref=['WOULD', 'IT', 'WOULD', 'LEAVE', 'ME', 'AND', 'THEN', 'I', 'COULD', 'BELIEVE', 'I', 'SHALL', 'NOT', 'ALWAYS', 'HAVE', 'OCCASION', 'FOR', 'IT'] +3080-5040-0000-278: hyp=['WOULD', 'IT', 'WOULD', 'LEAVE', 'ME', 'AND', 'THEN', 'I', 'COULD', 'BELIEVE', 'I', 'SHALL', 'NOT', 'ALWAYS', 'HAVE', 'OCCASION', 'FOR', 'IT'] +3080-5040-0001-279: ref=['MY', 'POOR', 'LADY', 'VAVASOUR', 'IS', 'CARRIED', 'TO', 'THE', 'TOWER', 'AND', 'HER', 'GREAT', 'BELLY', 'COULD', 'NOT', 'EXCUSE', 'HER', 'BECAUSE', 'SHE', 'WAS', 'ACQUAINTED', 'BY', 'SOMEBODY', 'THAT', 'THERE', 'WAS', 'A', 'PLOT', 'AGAINST', 'THE', 'PROTECTOR', 'AND', 'DID', 'NOT', 'DISCOVER', 'IT'] +3080-5040-0001-279: hyp=['MY', 'POOR', 'LADY', 'VAVASOR', 'IS', 'CHARACTER', 'TOWER', 'AND', 'HER', 'GREAT', 'BELLY', 'COULD', 'NOT', 'EXCUSE', 'HER', 'BECAUSE', 'SHE', 'WAS', 'ACQUAINTED', 'BY', 'SOMEBODY', 'THAT', 'THERE', 'WAS', 'A', 'PLOT', 'AGAINST', 'THE', 'PROTECTOR', 'AND', 'DID', 'NOT', 'DISCOVER', 'IT'] +3080-5040-0002-280: ref=['SHE', 'HAS', 'TOLD', 'NOW', 'ALL', 'THAT', 'WAS', 'TOLD', 'HER', 'BUT', 'VOWS', 'SHE', 'WILL', 'NEVER', 'SAY', 'FROM', 'WHENCE', 'SHE', 'HAD', 'IT', 'WE', 'SHALL', 'SEE', 'WHETHER', 'HER', 'RESOLUTIONS', 'ARE', 'AS', 'UNALTERABLE', 'AS', 'THOSE', 'OF', 'MY', 'LADY', 'TALMASH'] +3080-5040-0002-280: hyp=['SHE', 'HAS', 'TOLD', 'NOW', 'ALL', 'THAT', 'WAS', 'TOLD', 'HER', 'BUT', 'VOWS', 'SHE', 'WILL', 'NEVER', 'SAY', 'FROM', 'WHENCE', 'SHE', 'HAD', 'IT', 'WE', 'SHALL', 'SEE', 'WHETHER', 'HER', 'RESOLUTIONS', 'ARE', 'AS', 'UNALTERABLE', 'AS', 'THOSE', 'OF', 'MY', 'LADY', 'THOMAS'] +3080-5040-0003-281: ref=['I', 'WONDER', 'HOW', 'SHE', 'BEHAVED', 'HERSELF', 'WHEN', 'SHE', 'WAS', 'MARRIED'] +3080-5040-0003-281: hyp=['I', 'WONDER', 'HOW', 'SHE', 'BEHAVED', 'HERSELF', 'WHEN', 'SHE', 'WAS', 'MARRIED'] +3080-5040-0004-282: ref=['I', 'NEVER', 'SAW', 'ANY', 'ONE', 'YET', 'THAT', 'DID', 'NOT', 'LOOK', 'SIMPLY', 'AND', 'OUT', 'OF', 'COUNTENANCE', 'NOR', 'EVER', 'KNEW', 'A', 'WEDDING', 'WELL', 'DESIGNED', 'BUT', 'ONE', 'AND', 'THAT', 'WAS', 'OF', 'TWO', 'PERSONS', 'WHO', 'HAD', 'TIME', 'ENOUGH', 'I', 'CONFESS', 'TO', 'CONTRIVE', 'IT', 'AND', 'NOBODY', 'TO', 'PLEASE', "IN'T", 'BUT', 'THEMSELVES'] +3080-5040-0004-282: hyp=['I', 'NEVER', 'SAW', 'ANY', 'ONE', 'YET', 'THAT', 'DID', 'NOT', 'LOOK', 'SIMPLY', 'AND', 'OUT', 'OF', 'COUNTENANCE', 'WHATEVER', 'KNEW', 'A', 'WEDDING', 'WELL', 'DESIGNED', 'BUT', 'ONE', 'AND', 'THAT', 'WAS', 'OF', 'TWO', 'PERSONS', 'WHO', 'HAD', 'TIME', 'ENOUGH', 'I', 'CONFESS', 'TO', 'CONTRIVE', 'IT', 'AND', 'NOBODY', 'TO', 'PLEASE', 'IN', 'BUT', 'THEMSELVES'] +3080-5040-0005-283: ref=['THE', 'TRUTH', 'IS', 'I', 'COULD', 'NOT', 'ENDURE', 'TO', 'BE', 'MISSUS', 'BRIDE', 'IN', 'A', 'PUBLIC', 'WEDDING', 'TO', 'BE', 'MADE', 'THE', 'HAPPIEST', 'PERSON', 'ON', 'EARTH'] +3080-5040-0005-283: hyp=['THE', 'TRUTH', 'IS', 'I', 'COULD', 'NOT', 'ENDURE', 'TO', 'BE', 'MISSUS', 'BRIDE', 'IN', 'A', 'PUBLIC', 'WEDDING', 'TO', 'BE', 'MADE', 'THE', 'HAPPIEST', 'PERSON', 'ON', 'EARTH'] +3080-5040-0006-284: ref=['DO', 'NOT', 'TAKE', 'IT', 'ILL', 'FOR', 'I', 'WOULD', 'ENDURE', 'IT', 'IF', 'I', 'COULD', 'RATHER', 'THAN', 'FAIL', 'BUT', 'IN', 'EARNEST', 'I', 'DO', 'NOT', 'THINK', 'IT', 'WERE', 'POSSIBLE', 'FOR', 'ME'] +3080-5040-0006-284: hyp=['DO', 'NOT', 'TAKE', 'IT', 'ILL', 'FOR', 'I', 'WOULD', 'ENDURE', 'IT', 'IF', 'I', 'COULD', 'RATHER', 'THAN', 'FAIL', 'BUT', 'IN', 'EARNEST', 'I', 'DO', 'NOT', 'THINK', 'IT', 'WERE', 'POSSIBLE', 'FOR', 'ME'] +3080-5040-0007-285: ref=['YET', 'IN', 'EARNEST', 'YOUR', 'FATHER', 'WILL', 'NOT', 'FIND', 'MY', 'BROTHER', 'PEYTON', 'WANTING', 'IN', 'CIVILITY', 'THOUGH', 'HE', 'IS', 'NOT', 'A', 'MAN', 'OF', 'MUCH', 'COMPLIMENT', 'UNLESS', 'IT', 'BE', 'IN', 'HIS', 'LETTERS', 'TO', 'ME', 'NOR', 'AN', 'UNREASONABLE', 'PERSON', 'IN', 'ANYTHING', 'SO', 'HE', 'WILL', 'ALLOW', 'HIM', 'OUT', 'OF', 'HIS', 'KINDNESS', 'TO', 'HIS', 'WIFE', 'TO', 'SET', 'A', 'HIGHER', 'VALUE', 'UPON', 'HER', 'SISTER', 'THAN', 'SHE', 'DESERVES'] +3080-5040-0007-285: hyp=['YET', 'IN', 'EARNEST', 'YOUR', 'FATHER', 'WILL', 'NOT', 'FIND', 'MY', 'BROTHER', 'PEYTON', 'WANTING', 'IN', 'CIVILITY', 'THOUGH', 'HE', 'IS', 'NOT', 'A', 'MAN', 'OF', 'MUCH', 'COMPLIMENT', 'UNLESS', 'IT', 'BE', 'IN', 'HIS', 'LETTERS', 'TO', 'ME', 'NOR', 'AN', 'UNREASONABLE', 'PERSON', 'IN', 'ANYTHING', 'SO', 'HE', 'WILL', 'ALLOW', 'HIM', 'OUT', 'OF', 'HIS', 'KINDNESS', 'TO', 'HIS', 'WIFE', 'TO', 'SET', 'A', 'HIGHER', 'VALUE', 'UPON', 'HER', 'SISTER', 'THAN', 'SHE', 'DESERVES'] +3080-5040-0008-286: ref=['MY', 'AUNT', 'TOLD', 'ME', 'NO', 'LONGER', 'AGONE', 'THAN', 'YESTERDAY', 'THAT', 'I', 'WAS', 'THE', 'MOST', 'WILFUL', 'WOMAN', 'THAT', 'EVER', 'SHE', 'KNEW', 'AND', 'HAD', 'AN', 'OBSTINACY', 'OF', 'SPIRIT', 'NOTHING', 'COULD', 'OVERCOME', 'TAKE', 'HEED'] +3080-5040-0008-286: hyp=['MY', 'AUNT', 'TOLD', 'ME', 'NO', 'LONGER', 'A', 'GONDEN', 'YESTERDAY', 'THAT', 'I', 'WAS', 'THE', 'MOST', 'WILFUL', 'WOMAN', 'THAT', 'EVER', 'SHE', 'KNEW', 'AND', 'HAD', 'AN', 'OBSTINACY', 'OF', 'SPIRIT', 'NOTHING', 'COULD', 'OVERCOME', 'TAKE', 'HEED'] +3080-5040-0009-287: ref=['YOU', 'SEE', 'I', 'GIVE', 'YOU', 'FAIR', 'WARNING'] +3080-5040-0009-287: hyp=['YOU', 'SEE', 'I', 'GIVE', 'YOU', 'FAIR', 'WARNING'] +3080-5040-0010-288: ref=['BY', 'THE', 'NEXT', 'I', 'SHALL', 'BE', 'GONE', 'INTO', 'KENT', 'AND', 'MY', 'OTHER', 'JOURNEY', 'IS', 'LAID', 'ASIDE', 'WHICH', 'I', 'AM', 'NOT', 'DISPLEASED', 'AT', 'BECAUSE', 'IT', 'WOULD', 'HAVE', 'BROKEN', 'OUR', 'INTERCOURSE', 'VERY', 'MUCH'] +3080-5040-0010-288: hyp=['BY', 'THE', 'NEXT', 'I', 'SHALL', 'BE', 'GONE', 'INTO', 'KENT', 'AND', 'MY', 'OTHER', 'JOURNEY', 'IS', 'LAID', 'ASIDE', 'WHICH', 'I', 'AM', 'NOT', 'DISPLEASED', 'AT', 'BECAUSE', 'IT', 'WOULD', 'HAVE', 'BROKEN', 'OUR', 'INTERCOURSE', 'VERY', 'MUCH'] +3080-5040-0011-289: ref=['HERE', 'ARE', 'SOME', 'VERSES', 'OF', "COWLEY'S", 'TELL', 'ME', 'HOW', 'YOU', 'LIKE', 'THEM'] +3080-5040-0011-289: hyp=['HERE', 'ARE', 'SOME', 'VERSES', 'OF', 'COLLEASE', 'TELL', 'ME', 'HOW', 'YOU', 'LIKE', 'THEM'] +3080-5040-0012-290: ref=['I', 'TOLD', 'YOU', 'IN', 'MY', 'LAST', 'THAT', 'MY', 'SUFFOLK', 'JOURNEY', 'WAS', 'LAID', 'ASIDE', 'AND', 'THAT', 'INTO', 'KENT', 'HASTENED'] +3080-5040-0012-290: hyp=['I', 'TOLD', 'YOU', 'IN', 'MY', 'LAST', 'THAT', 'MY', 'SUFFOLD', 'JOURNEY', 'WAS', 'LAID', 'ASIDE', 'AND', 'THAT', 'INTO', 'KENT', 'HASTENED'] +3080-5040-0013-291: ref=['IF', 'I', 'DROWN', 'BY', 'THE', 'WAY', 'THIS', 'WILL', 'BE', 'MY', 'LAST', 'LETTER', 'AND', 'LIKE', 'A', 'WILL', 'I', 'BEQUEATH', 'ALL', 'MY', 'KINDNESS', 'TO', 'YOU', 'IN', 'IT', 'WITH', 'A', 'CHARGE', 'NEVER', 'TO', 'BESTOW', 'IT', 'ALL', 'UPON', 'ANOTHER', 'MISTRESS', 'LEST', 'MY', 'GHOST', 'RISE', 'AGAIN', 'AND', 'HAUNT', 'YOU'] +3080-5040-0013-291: hyp=['IF', 'I', 'DROWN', 'BY', 'THE', 'WAY', 'THIS', 'WILL', 'BE', 'MY', 'LAST', 'LETTER', 'AND', 'LIKE', 'A', 'WILL', 'I', 'BEQUEATH', 'ALL', 'MY', 'KINDNESS', 'TO', 'YOU', 'IN', 'IT', 'WITH', 'A', 'CHARGE', 'NEVER', 'TO', 'BESTOW', 'IT', 'ALL', 'UPON', 'ANOTHER', 'MISTRESS', 'LEST', 'MY', 'GHOST', 'RISE', 'AGAIN', 'AND', 'HAUNT', 'YOU'] +3080-5040-0014-292: ref=['INDEED', 'I', 'LIKE', 'HIM', 'EXTREMELY', 'AND', 'HE', 'IS', 'COMMENDED', 'TO', 'ME', 'BY', 'PEOPLE', 'THAT', 'KNOW', 'HIM', 'VERY', 'WELL', 'AND', 'ARE', 'ABLE', 'TO', 'JUDGE', 'FOR', 'A', 'MOST', 'EXCELLENT', 'SERVANT', 'AND', 'FAITHFUL', 'AS', 'POSSIBLE'] +3080-5040-0014-292: hyp=['INDEED', 'I', 'LIKE', 'HIM', 'EXTREMELY', 'AND', 'HE', 'IS', 'COMMENDED', 'TO', 'ME', 'BY', 'PEOPLE', 'THAT', 'KNOW', 'HIM', 'VERY', 'WELL', 'AND', 'ARE', 'ABLE', 'TO', 'JUDGE', 'FOR', 'A', 'MOST', 'EXCELLENT', 'SERVANT', 'AND', 'FAITHFUL', 'AS', 'POSSIBLE'] +3080-5040-0015-293: ref=['BECAUSE', 'YOU', 'FIND', 'FAULT', 'WITH', 'MY', 'OTHER', 'LETTERS', 'THIS', 'IS', 'LIKE', 'TO', 'BE', 'SHORTER', 'THAN', 'THEY', 'I', 'DID', 'NOT', 'INTEND', 'IT', 'SO', 'THOUGH', 'I', 'CAN', 'ASSURE', 'YOU'] +3080-5040-0015-293: hyp=['BECAUSE', 'YOU', 'FIND', 'FAULT', 'WITH', 'MY', 'OTHER', 'LETTERS', 'THIS', 'IS', 'LIKE', 'TO', 'BE', 'SHORTER', 'THAN', 'THEY', 'I', 'DID', 'NOT', 'INTEND', 'IT', 'SO', 'THOUGH', 'I', 'CAN', 'ASSURE', 'YOU'] +3080-5040-0016-294: ref=['I', 'DO', 'NOT', 'FIND', 'IT', 'THOUGH', 'I', 'AM', 'TOLD', 'I', 'WAS', 'SO', 'EXTREMELY', 'WHEN', 'I', 'BELIEVED', 'YOU', 'LOVED', 'ME'] +3080-5040-0016-294: hyp=['I', 'DO', 'NOT', 'FIND', 'IT', 'THOUGH', 'I', 'AM', 'TOLD', 'I', 'WAS', 'SO', 'EXTREMELY', 'WHEN', 'I', 'BELIEVED', 'YOU', 'LOVE', 'ME'] +3080-5040-0017-295: ref=['BUT', 'I', 'AM', 'CALLED', 'UPON'] +3080-5040-0017-295: hyp=['BUT', 'I', 'AM', 'CALLED', 'UPON'] +3080-5040-0018-296: ref=['DIRECTED', 'FOR', 'YOUR', 'MASTER'] +3080-5040-0018-296: hyp=['DIRECTED', 'FOR', 'YOUR', 'MASTER'] +3080-5040-0019-297: ref=['I', 'SEE', 'YOU', 'CAN', 'CHIDE', 'WHEN', 'YOU', 'PLEASE', 'AND', 'WITH', 'AUTHORITY', 'BUT', 'I', 'DESERVE', 'IT', 'I', 'CONFESS', 'AND', 'ALL', 'I', 'CAN', 'SAY', 'FOR', 'MYSELF', 'IS', 'THAT', 'MY', 'FAULT', 'PROCEEDED', 'FROM', 'A', 'VERY', 'GOOD', 'PRINCIPLE', 'IN', 'ME'] +3080-5040-0019-297: hyp=['I', 'SEE', 'YOU', 'CAN', 'CHID', 'WHEN', 'YOU', 'PLEASE', 'AND', 'WITH', 'AUTHORITY', 'BUT', 'I', 'DESERVE', 'IT', 'I', 'CONFESS', 'AND', 'ALL', 'I', 'CAN', 'SAY', 'FOR', 'MYSELF', 'IS', 'THAT', 'MY', 'FAULT', 'PROCEEDED', 'FROM', 'A', 'VERY', 'GOOD', 'PRINCIPLE', 'IN', 'ME'] +3080-5040-0020-298: ref=['WE', 'DARE', 'NOT', 'LET', 'OUR', 'TONGUES', 'LIE', 'MORE', 'ON', 'ONE', 'SIDE', 'OF', 'OUR', 'MOUTHS', 'THAN', "T'OTHER", 'FOR', 'FEAR', 'OF', 'OVERTURNING', 'IT'] +3080-5040-0020-298: hyp=['WE', 'DARE', 'NOT', 'LET', 'OUR', 'TONGUES', 'LIE', 'MORE', 'ON', 'ONE', 'SIDE', 'OF', 'OUR', 'MOTHS', 'THAN', 'THE', 'OTHER', 'FOR', 'FEAR', 'OF', 'OVERTURNING', 'IT'] +3080-5040-0021-299: ref=['YOU', 'ARE', 'SATISFIED', 'I', 'HOPE', 'ERE', 'THIS', 'THAT', 'I', 'SCAPED', 'DROWNING'] +3080-5040-0021-299: hyp=['YOU', 'ARE', 'SATISFIED', 'I', 'HOPE', 'AT', 'THIS', 'THAT', 'I', 'ESCAPED', 'DROWNING'] +3080-5040-0022-300: ref=['BUT', 'I', 'AM', 'TROUBLED', 'MUCH', 'YOU', 'SHOULD', 'MAKE', 'SO', 'ILL', 'A', 'JOURNEY', 'TO', 'SO', 'LITTLE', 'PURPOSE', 'INDEED', 'I', 'WRIT', 'BY', 'THE', 'FIRST', 'POST', 'AFTER', 'MY', 'ARRIVAL', 'HERE', 'AND', 'CANNOT', 'IMAGINE', 'HOW', 'YOU', 'CAME', 'TO', 'MISS', 'OF', 'MY', 'LETTERS'] +3080-5040-0022-300: hyp=['BUT', 'I', 'AM', 'TROUBLED', 'MUCH', 'YOU', 'SHOULD', 'MAKE', 'SO', 'ILL', 'A', 'JOURNEY', 'TO', 'SAW', 'LITTLE', 'PURPOSE', 'INDEED', 'I', 'WRITE', 'BY', 'THE', 'FIRST', 'POST', 'AFTER', 'MY', 'ARRIVAL', 'HERE', 'AND', 'CANNOT', 'IMAGINE', 'HOW', 'YOU', 'CAME', 'TO', 'MISS', 'OF', 'MY', 'LETTERS'] +3080-5040-0023-301: ref=['HOW', 'WELCOME', 'YOU', 'WILL', 'BE', 'BUT', 'ALAS'] +3080-5040-0023-301: hyp=['OH', 'WELCOME', 'YOU', 'WILL', 'BE', 'BUT', 'ALAS'] +3080-5040-0024-302: ref=['FOR', 'MY', 'LIFE', 'I', 'CANNOT', 'BEAT', 'INTO', 'THEIR', 'HEADS', 'A', 'PASSION', 'THAT', 'MUST', 'BE', 'SUBJECT', 'TO', 'NO', 'DECAY', 'AN', 'EVEN', 'PERFECT', 'KINDNESS', 'THAT', 'MUST', 'LAST', 'PERPETUALLY', 'WITHOUT', 'THE', 'LEAST', 'INTERMISSION'] +3080-5040-0024-302: hyp=['FOR', 'MY', 'LIFE', 'I', 'CANNOT', 'BEAT', 'INTO', 'THEIR', 'HEADS', 'A', 'PASSION', 'THAT', 'MUST', 'BE', 'SUBJECT', 'TO', 'NO', 'DECAY', 'AND', 'EVEN', 'PERFECT', 'KINDNESS', 'THAT', 'MUST', 'LAST', 'PERPETUALLY', 'WITHOUT', 'THE', 'LEAST', 'INTERMISSION'] +3080-5040-0025-303: ref=['THEY', 'LAUGH', 'TO', 'HEAR', 'ME', 'SAY', 'THAT', 'ONE', 'UNKIND', 'WORD', 'WOULD', 'DESTROY', 'ALL', 'THE', 'SATISFACTION', 'OF', 'MY', 'LIFE', 'AND', 'THAT', 'I', 'SHOULD', 'EXPECT', 'OUR', 'KINDNESS', 'SHOULD', 'INCREASE', 'EVERY', 'DAY', 'IF', 'IT', 'WERE', 'POSSIBLE', 'BUT', 'NEVER', 'LESSEN'] +3080-5040-0025-303: hyp=['THEY', 'LAUGH', 'TO', 'HEAR', 'ME', 'SAY', 'THAT', 'ONE', 'UNKIND', 'WORD', 'WOULD', 'DESTROY', 'ALL', 'THE', 'SATISFACTION', 'OF', 'MY', 'LIFE', 'AND', 'THAT', 'I', 'SHOULD', 'EXPECT', 'OUR', 'KINDNESS', 'SHOULD', 'INCREASE', 'EVERY', 'DAY', 'IF', 'IT', 'WERE', 'POSSIBLE', 'BUT', 'NEVER', 'LESSEN'] +3080-5040-0026-304: ref=['WE', 'GO', 'ABROAD', 'ALL', 'DAY', 'AND', 'PLAY', 'ALL', 'NIGHT', 'AND', 'SAY', 'OUR', 'PRAYERS', 'WHEN', 'WE', 'HAVE', 'TIME'] +3080-5040-0026-304: hyp=['WE', 'GO', 'ABROAD', 'ALL', 'DAY', 'AND', 'PLAY', 'ALL', 'NIGHT', 'AND', 'SEE', 'OUR', 'PRAY', 'AS', 'WHEN', 'WE', 'HAVE', 'TIME'] +3080-5040-0027-305: ref=['WELL', 'IN', 'SOBER', 'EARNEST', 'NOW', 'I', 'WOULD', 'NOT', 'LIVE', 'THUS', 'A', 'TWELVEMONTH', 'TO', 'GAIN', 'ALL', 'THAT', 'THE', 'KING', 'HAS', 'LOST', 'UNLESS', 'IT', 'WERE', 'TO', 'GIVE', 'IT', 'HIM', 'AGAIN'] +3080-5040-0027-305: hyp=['WHILE', 'IN', 'SOBER', 'EARNEST', 'NOW', 'I', 'WOULD', 'NOT', 'LIVE', 'THUS', 'AT', 'TWELVEMONTH', 'TO', 'GAIN', 'ALL', 'THAT', 'KING', 'HAS', 'LOST', 'UNLESS', 'IT', 'WERE', 'TO', 'GIVE', 'IT', 'HIM', 'AGAIN'] +3080-5040-0028-306: ref=['WILL', 'YOU', 'BE', 'SO', 'GOOD', 'NATURED'] +3080-5040-0028-306: hyp=['WILL', 'YOU', 'BE', 'SO', 'GOOD', 'NATURED'] +3080-5040-0029-307: ref=['HE', 'HAS', 'ONE', 'SON', 'AND', 'TIS', 'THE', 'FINEST', 'BOY', 'THAT', "E'ER", 'YOU', 'SAW', 'AND', 'HAS', 'A', 'NOBLE', 'SPIRIT', 'BUT', 'YET', 'STANDS', 'IN', 'THAT', 'AWE', 'OF', 'HIS', 'FATHER', 'THAT', 'ONE', 'WORD', 'FROM', 'HIM', 'IS', 'AS', 'MUCH', 'AS', 'TWENTY', 'WHIPPINGS'] +3080-5040-0029-307: hyp=['HE', 'HAS', 'ONE', 'SON', 'AND', 'TIS', 'THE', 'FINEST', 'BOY', 'THAT', "E'ER", 'YOU', 'SAW', 'AND', 'HAS', 'A', 'NOBLE', 'SPIRIT', 'BUT', 'YET', 'STANDS', 'IN', 'THAT', 'AWE', 'OF', 'HIS', 'FATHER', 'THAT', 'ONE', 'WORD', 'FROM', 'HIM', 'IS', 'AS', 'MUCH', 'AS', 'TWENTY', 'WHIPPINGS'] +3080-5040-0030-308: ref=['YOU', 'MUST', 'GIVE', 'ME', 'LEAVE', 'TO', 'ENTERTAIN', 'YOU', 'THUS', 'WITH', 'DISCOURSES', 'OF', 'THE', 'FAMILY', 'FOR', 'I', 'CAN', 'TELL', 'YOU', 'NOTHING', 'ELSE', 'FROM', 'HENCE'] +3080-5040-0030-308: hyp=['YOU', 'MUST', 'GIVE', 'ME', 'LEAVE', 'TO', 'ENTERTAIN', 'YOURSELVES', 'WITH', 'DISCOURSES', 'OF', 'THE', 'FAMILY', 'FOR', 'I', 'CAN', 'TELL', 'YOU', 'NOTHING', 'ELSE', 'FROM', 'HENCE'] +3080-5040-0031-309: ref=['NOT', 'TO', 'KNOW', 'WHEN', 'YOU', 'WOULD', 'COME', 'HOME', 'I', 'CAN', 'ASSURE', 'YOU', 'NOR', 'FOR', 'ANY', 'OTHER', 'OCCASION', 'OF', 'MY', 'OWN', 'BUT', 'WITH', 'A', 'COUSIN', 'OF', 'MINE', 'THAT', 'HAD', 'LONG', 'DESIGNED', 'TO', 'MAKE', 'HERSELF', 'SPORT', 'WITH', 'HIM', 'AND', 'DID', 'NOT', 'MISS', 'OF', 'HER', 'AIM'] +3080-5040-0031-309: hyp=['NOT', 'TO', 'KNOW', 'WHEN', 'YOU', 'HAD', 'COME', 'HOME', 'I', 'CAN', 'ASSURE', 'YOU', 'NO', 'FOR', 'ANY', 'OTHER', 'CASION', 'OF', 'MY', 'OWN', 'BUT', 'WITH', 'A', 'COUSIN', 'OF', 'MINE', 'THAT', 'HAD', 'LONG', 'DESIGN', 'TO', 'MAKE', 'HERSELF', 'SPORT', 'WITH', 'HIM', 'AND', 'DID', 'NOT', 'MISS', 'OF', 'HER', 'AIM'] +3080-5040-0032-310: ref=['IN', 'MY', 'LIFE', 'I', 'NEVER', 'HEARD', 'SO', 'RIDICULOUS', 'A', 'DISCOURSE', 'AS', 'HE', 'MADE', 'US', 'AND', 'NO', 'OLD', 'WOMAN', 'WHO', 'PASSES', 'FOR', 'A', 'WITCH', 'COULD', 'HAVE', 'BEEN', 'MORE', 'PUZZLED', 'TO', 'SEEK', 'WHAT', 'TO', 'SAY', 'TO', 'REASONABLE', 'PEOPLE', 'THAN', 'HE', 'WAS'] +3080-5040-0032-310: hyp=['IN', 'MY', 'LIFE', 'I', 'NEVER', 'HEARD', 'SO', 'RIDICULOUS', 'A', 'DISCOURSE', 'AS', 'HE', 'MADE', 'US', 'AND', 'NO', 'OLD', 'WOMAN', 'WHO', 'PAUSES', 'FOR', 'A', 'WITCH', 'COULD', 'HAVE', 'BEEN', 'MORE', 'PUZZLED', 'TO', 'SEEK', 'WHAT', 'TO', 'SAY', 'TO', 'REASONABLE', 'PEOPLE', 'THAN', 'HE', 'WAS'] +3080-5040-0033-311: ref=['EVER', 'SINCE', 'THIS', 'ADVENTURE', 'I', 'HAVE', 'HAD', 'SO', 'GREAT', 'A', 'BELIEF', 'IN', 'ALL', 'THINGS', 'OF', 'THIS', 'NATURE', 'THAT', 'I', 'COULD', 'NOT', 'FORBEAR', 'LAYING', 'A', 'PEAS', 'COD', 'WITH', 'NINE', 'PEAS', "IN'T", 'UNDER', 'MY', 'DOOR', 'YESTERDAY', 'AND', 'WAS', 'INFORMED', 'BY', 'IT', 'THAT', 'MY', "HUSBAND'S", 'NAME', 'SHOULD', 'BE', 'THOMAS', 'HOW', 'DO', 'YOU', 'LIKE', 'THAT'] +3080-5040-0033-311: hyp=['EVER', 'SINCE', 'THIS', 'ADVENTURE', 'I', 'HAVE', 'HAD', 'SO', 'GREAT', 'A', 'BELIEF', 'IN', 'ALL', 'THINGS', 'FOR', 'THIS', 'NATURE', 'THAT', 'I', 'COULD', 'NOT', 'FORBEAR', 'LAYING', 'A', "PEA'S", 'CART', 'WITH', 'NINE', 'PEAS', 'INTO', 'UNDER', 'MY', 'DOOR', 'YESTERDAY', 'AND', 'WAS', 'INFORMED', 'BY', 'IT', 'THAT', 'MY', "HUSBAND'S", 'NAME', 'SHOULD', 'BE', 'THOMAS', 'HOW', 'DO', 'YOU', 'LIKE', 'THAT'] +3331-159605-0000-695: ref=['SHE', 'PULLED', 'HER', 'HAIR', 'DOWN', 'TURNED', 'HER', 'SKIRT', 'BACK', 'PUT', 'HER', 'FEET', 'ON', 'THE', 'FENDER', 'AND', 'TOOK', 'PUTTEL', 'INTO', 'HER', 'LAP', 'ALL', 'OF', 'WHICH', 'ARRANGEMENTS', 'SIGNIFIED', 'THAT', 'SOMETHING', 'VERY', 'IMPORTANT', 'HAD', 'GOT', 'TO', 'BE', 'THOUGHT', 'OVER', 'AND', 'SETTLED'] +3331-159605-0000-695: hyp=['SHE', 'PULLED', 'HER', 'HAIR', 'DOWN', 'TURNED', 'AS', 'GOOD', 'BACK', 'PUT', 'HER', 'FEET', 'ON', 'THE', 'FENDER', 'AND', 'TOOK', 'POTTER', 'INTO', 'HER', 'LAP', 'ALL', 'OF', 'WHICH', 'ARRANGEMENTS', 'SIGNIFIED', 'THAT', 'SOMETHING', 'VERY', 'IMPORTANT', 'HAD', 'GOT', 'TO', 'BE', 'THOUGHT', 'OVER', 'AND', 'SETTLED'] +3331-159605-0001-696: ref=['THE', 'MORE', 'PROPOSALS', 'THE', 'MORE', 'CREDIT'] +3331-159605-0001-696: hyp=['THE', 'MORE', 'PROPOSALS', 'THE', 'MORE', 'CREDITED'] +3331-159605-0002-697: ref=['I', 'VE', 'TRIED', 'IT', 'AND', 'LIKED', 'IT', 'AND', 'MAYBE', 'THIS', 'IS', 'THE', 'CONSEQUENCE', 'OF', 'THAT', "NIGHT'S", 'FUN'] +3331-159605-0002-697: hyp=["I'VE", 'TRIED', 'IT', 'AND', 'LIKED', 'IT', 'AND', 'MAYBE', 'THIS', 'IS', 'THE', 'CONSEQUENCE', 'OF', 'THAT', "NIGHT'S", 'FUN'] +3331-159605-0003-698: ref=['JUST', 'SUPPOSE', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'DOES', 'ASK', 'ME', 'AND', 'I', 'SAY', 'YES'] +3331-159605-0003-698: hyp=['JUST', 'SUPPOSE', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'DOES', 'ASK', 'ME', 'AND', 'I', 'SAY', 'YES'] +3331-159605-0004-699: ref=['WHAT', 'A', 'SPITEFUL', 'THING', 'I', 'AM'] +3331-159605-0004-699: hyp=['WHAT', 'A', 'SPITEFUL', 'THING', 'I', 'AM'] +3331-159605-0005-700: ref=['I', 'COULD', 'DO', 'SO', 'MUCH', 'FOR', 'ALL', 'AT', 'HOME', 'HOW', 'I', 'SHOULD', 'ENJOY', 'THAT'] +3331-159605-0005-700: hyp=['I', 'COULD', 'DO', 'SO', 'MUCH', 'FOR', 'ALL', 'AT', 'HOME', 'HOW', 'I', 'SHOULD', 'ENJOY', 'THAT'] +3331-159605-0006-701: ref=['LET', 'ME', 'SEE', 'HOW', 'CAN', 'I', 'BEGIN'] +3331-159605-0006-701: hyp=['THAT', 'MISS', 'C', 'HOW', 'CAN', 'I', 'BEGIN'] +3331-159605-0007-702: ref=['HE', 'HAS', 'KNOWN', 'HER', 'ALL', 'HER', 'LIFE', 'AND', 'HAS', 'A', 'GOOD', 'INFLUENCE', 'OVER', 'HER'] +3331-159605-0007-702: hyp=['HE', 'HAS', 'KNOWN', 'HER', 'ALL', 'HER', 'LIFE', 'AND', 'HAS', 'A', 'GOOD', 'INFLUENCE', 'OVER', 'HER'] +3331-159605-0008-703: ref=['NOW', 'AS', 'POLLY', 'WAS', 'BY', 'NO', 'MEANS', 'A', 'PERFECT', 'CREATURE', 'I', 'AM', 'FREE', 'TO', 'CONFESS', 'THAT', 'THE', 'OLD', 'TEMPTATION', 'ASSAILED', 'HER', 'MORE', 'THAN', 'ONCE', 'THAT', 'WEEK', 'FOR', 'WHEN', 'THE', 'FIRST', 'EXCITEMENT', 'OF', 'THE', 'DODGING', 'REFORM', 'HAD', 'SUBSIDED', 'SHE', 'MISSED', 'THE', 'PLEASANT', 'LITTLE', 'INTERVIEWS', 'THAT', 'USED', 'TO', 'PUT', 'A', 'CERTAIN', 'FLAVOR', 'OF', 'ROMANCE', 'INTO', 'HER', 'DULL', 'HARD', 'WORKING', 'DAYS'] +3331-159605-0008-703: hyp=['NOW', 'AS', 'POLLY', 'WAS', 'BY', 'NO', 'MEANS', 'A', 'PERFECT', 'CREATURE', 'I', 'AM', 'FREE', 'TO', 'CONFESS', 'THAT', 'THE', 'OLD', 'TEMPTATION', 'ASSAILED', 'HIM', 'MORE', 'THAN', 'ONCE', 'THE', 'WEEK', 'FOR', 'WHEN', 'THE', 'FIRST', 'EXCITEMENT', 'OF', 'THE', 'DODGING', 'REFORM', 'HAD', 'SUBSIDED', 'SHE', 'MISSED', 'THE', 'PLEASANT', 'LITTLE', 'INTERVIEWS', 'THAT', 'USED', 'TO', 'PUT', 'A', 'CERTAIN', 'FLAVOUR', 'OF', 'ROMANS', 'INTO', 'HER', 'DULL', 'HARD', 'WORKING', 'DAYS'] +3331-159605-0009-704: ref=['I', "DON'T", 'THINK', 'IT', 'WAS', 'HIS', 'WEALTH', 'ACCOMPLISHMENTS', 'OR', 'POSITION', 'THAT', 'MOST', 'ATTRACTED', 'POLLY', 'THOUGH', 'THESE', 'DOUBTLESS', 'POSSESSED', 'A', 'GREATER', 'INFLUENCE', 'THAN', 'SHE', 'SUSPECTED'] +3331-159605-0009-704: hyp=['I', "DON'T", 'THINK', 'IT', 'WAS', 'HIS', 'WEALTH', 'ACCOMPLISHMENTS', 'OPPOSITION', 'THAT', 'MOST', 'ATTRACTED', 'POLLY', 'THOUGH', 'THESE', 'DOUBTLESS', 'POSSESSED', 'A', 'GREATER', 'INFLUENCE', 'THAN', 'SHE', 'SUSPECTED'] +3331-159605-0010-705: ref=['IT', 'WAS', 'THAT', 'INDESCRIBABLE', 'SOMETHING', 'WHICH', 'WOMEN', 'ARE', 'QUICK', 'TO', 'SEE', 'AND', 'FEEL', 'IN', 'MEN', 'WHO', 'HAVE', 'BEEN', 'BLESSED', 'WITH', 'WISE', 'AND', 'GOOD', 'MOTHERS'] +3331-159605-0010-705: hyp=['IT', 'WAS', 'THAT', 'INDESCRIBABLE', 'SOMETHING', 'WHICH', 'WOMEN', 'ARE', 'QUICK', 'TO', 'SEE', 'AND', 'FEEL', 'IN', 'MEN', 'WHO', 'HAVE', 'BEEN', 'BLESSED', 'WISE', 'AND', 'GOOD', 'MOTHERS'] +3331-159605-0011-706: ref=['THIS', 'HAD', 'AN', 'ESPECIAL', 'CHARM', 'TO', 'POLLY', 'FOR', 'SHE', 'SOON', 'FOUND', 'THAT', 'THIS', 'SIDE', 'OF', 'HIS', 'CHARACTER', 'WAS', 'NOT', 'SHOWN', 'TO', 'EVERY', 'ONE'] +3331-159605-0011-706: hyp=['THIS', 'HAD', 'AN', 'ESPECIAL', 'CHARM', 'TO', 'POLLY', 'FOR', 'SHE', 'SOON', 'FOUND', 'THAT', 'THIS', 'SIDE', 'OF', 'HIS', 'CHARACTER', 'WAS', 'NOT', 'SHOWN', 'TO', 'EVERY', 'ONE'] +3331-159605-0012-707: ref=['LATELY', 'THIS', 'HAD', 'CHANGED', 'ESPECIALLY', 'TOWARDS', 'POLLY', 'AND', 'IT', 'FLATTERED', 'HER', 'MORE', 'THAN', 'SHE', 'WOULD', 'CONFESS', 'EVEN', 'TO', 'HERSELF'] +3331-159605-0012-707: hyp=['PLATELY', 'THIS', 'HAD', 'CHANGED', 'ESPECIALLY', 'TOWARDS', 'POLLY', 'AND', 'IT', 'FLUTTERED', 'HER', 'MORE', 'THAN', 'SHE', 'WOULD', 'CONFESS', 'EVEN', 'TO', 'HERSELF'] +3331-159605-0013-708: ref=['AT', 'FIRST', 'SHE', 'TRIED', 'TO', 'THINK', 'SHE', 'COULD', 'BUT', 'UNFORTUNATELY', 'HEARTS', 'ARE', 'SO', 'CONTRARY', 'THAT', 'THEY', "WON'T", 'BE', 'OBEDIENT', 'TO', 'REASON', 'WILL', 'OR', 'EVEN', 'GRATITUDE'] +3331-159605-0013-708: hyp=['AT', 'FIRST', 'SHE', 'TRIED', 'TO', 'THINK', 'SHE', 'COULD', 'BUT', 'UNFORTUNATELY', 'HEARTS', 'ARE', 'SO', 'CONTRARY', 'THAT', 'THEY', "WON'T", 'BE', 'OBEDIENT', 'TO', 'REASON', 'WILL', 'OR', 'EVEN', 'CREDITUDE'] +3331-159605-0014-709: ref=['POLLY', 'FELT', 'A', 'VERY', 'CORDIAL', 'FRIENDSHIP', 'FOR', 'MISTER', 'SYDNEY', 'BUT', 'NOT', 'ONE', 'PARTICLE', 'OF', 'THE', 'LOVE', 'WHICH', 'IS', 'THE', 'ONLY', 'COIN', 'IN', 'WHICH', 'LOVE', 'CAN', 'BE', 'TRULY', 'PAID'] +3331-159605-0014-709: hyp=['POLLY', 'FELT', 'A', 'VERY', 'CORDIAL', 'FRIENDSHIP', 'FOR', 'MISTER', 'SYDNEY', 'BUT', 'NOT', 'ONE', 'PARTICLE', 'OF', 'THE', 'LAW', 'PITCHES', 'THE', 'ONLY', 'KIND', 'IN', 'WHICH', 'LOVE', 'CAN', 'BE', 'TRULY', 'PAID'] +3331-159605-0015-710: ref=['THIS', 'FINISHED', "POLLY'S", 'INDECISION', 'AND', 'AFTER', 'THAT', 'NIGHT', 'SHE', 'NEVER', 'ALLOWED', 'HERSELF', 'TO', 'DWELL', 'UPON', 'THE', 'PLEASANT', 'TEMPTATION', 'WHICH', 'CAME', 'IN', 'A', 'GUISE', 'PARTICULARLY', 'ATTRACTIVE', 'TO', 'A', 'YOUNG', 'GIRL', 'WITH', 'A', 'SPICE', 'OF', 'THE', 'OLD', 'EVE', 'IN', 'HER', 'COMPOSITION'] +3331-159605-0015-710: hyp=['THIS', 'FINISHED', "POLLY'S", 'INDECISION', 'AND', 'AFTER', 'THAT', 'NIGHT', 'SHE', 'NEVER', 'ALLOWED', 'HERSELF', 'TO', 'DWELL', 'UPON', 'THE', 'PLEASANT', 'TEMPTATION', 'WHICH', 'CAME', 'IN', 'A', 'GUISE', 'PARTICULARLY', 'ATTRACTIVE', 'TO', 'A', 'YOUNG', 'GIRL', 'WITH', 'A', 'SPIES', 'OF', 'THE', 'OLD', 'EVE', 'IN', 'HER', 'COMPOSITION'] +3331-159605-0016-711: ref=['WHEN', 'SATURDAY', 'CAME', 'POLLY', 'STARTED', 'AS', 'USUAL', 'FOR', 'A', 'VISIT', 'TO', 'BECKY', 'AND', 'BESS', 'BUT', 'COULD', "N'T", 'RESIST', 'STOPPING', 'AT', 'THE', 'SHAWS', 'TO', 'LEAVE', 'A', 'LITTLE', 'PARCEL', 'FOR', 'FAN', 'THOUGH', 'IT', 'WAS', 'CALLING', 'TIME'] +3331-159605-0016-711: hyp=['WHEN', 'SEDATE', 'CAME', 'POLLY', 'STARTED', 'AS', 'USUAL', 'FOR', 'A', 'VISIT', 'TO', 'BACKY', 'AND', 'BESS', 'BUT', "COULDN'T", 'RESIST', 'STOPPING', 'AT', 'THE', 'SHORES', 'TO', 'LEAVE', 'A', 'LITTLE', 'PARCEL', 'FOR', 'FAN', 'THAT', 'WAS', 'CALLING', 'TIME'] +3331-159605-0017-712: ref=['A', 'FOOLISH', 'LITTLE', 'SPEECH', 'TO', 'MAKE', 'TO', 'A', 'DOG', 'BUT', 'YOU', 'SEE', 'POLLY', 'WAS', 'ONLY', 'A', 'TENDER', 'HEARTED', 'GIRL', 'TRYING', 'TO', 'DO', 'HER', 'DUTY'] +3331-159605-0017-712: hyp=['A', 'FOOLISH', 'LITTLE', 'SPEECH', 'TO', 'MAKE', 'TO', 'A', 'DARK', 'BUT', 'YOU', 'SEE', 'POLLY', 'WAS', 'ONLY', 'A', 'TENDER', 'HEARTED', 'GIRL', 'TRYING', 'TO', 'HER', 'DUTY'] +3331-159605-0018-713: ref=['TAKE', 'HOLD', 'OF', 'MASTER', "CHARLEY'S", 'HAND', 'MISS', 'MAMIE', 'AND', 'WALK', 'PRETTY', 'LIKE', 'WILLY', 'AND', 'FLOSSY', 'SAID', 'THE', 'MAID'] +3331-159605-0018-713: hyp=['TAKE', 'HOLD', 'OF', 'MASSA', "CHARLIE'S", 'HAND', 'MISS', 'MAMMY', 'AND', 'WALK', 'PRETTY', 'LIKE', 'BILLY', 'AND', 'FLOSSIE', 'SAID', 'THE', 'MAID'] +3331-159605-0019-714: ref=['AT', 'A', 'STREET', 'CORNER', 'A', 'BLACK', 'EYED', 'SCHOOL', 'BOY', 'WAS', 'PARTING', 'FROM', 'A', 'ROSY', 'FACED', 'SCHOOL', 'GIRL', 'WHOSE', 'MUSIC', 'ROLL', 'HE', 'WAS', 'RELUCTANTLY', 'SURRENDERING'] +3331-159605-0019-714: hyp=['AT', 'A', 'STREET', 'CORNER', 'A', 'BLACK', 'EYED', 'SCHOOLBOY', 'WAS', 'PARTING', 'FROM', 'A', 'ROSY', 'FACED', 'SCHOOL', 'GIRL', 'WHOSE', 'MUSIC', 'ROLL', 'HE', 'WAS', 'RELUCTANTLY', 'SURRENDERING'] +3331-159605-0020-715: ref=['HOW', 'HE', 'GOT', 'THERE', 'WAS', 'NEVER', 'VERY', 'CLEAR', 'TO', 'POLLY', 'BUT', 'THERE', 'HE', 'WAS', 'FLUSHED', 'AND', 'A', 'LITTLE', 'OUT', 'OF', 'BREATH', 'BUT', 'LOOKING', 'SO', 'GLAD', 'TO', 'SEE', 'HER', 'THAT', 'SHE', 'HAD', "N'T", 'THE', 'HEART', 'TO', 'BE', 'STIFF', 'AND', 'COOL', 'AS', 'SHE', 'HAD', 'FULLY', 'INTENDED', 'TO', 'BE', 'WHEN', 'THEY', 'MET'] +3331-159605-0020-715: hyp=['HOW', 'HE', 'GOT', 'THERE', 'WAS', 'NEVER', 'VERY', 'CLEAR', 'TO', 'POLLY', 'BUT', 'THERE', 'HE', 'WAS', 'FLUSHED', 'AND', 'A', 'LITTLE', 'OUT', 'OF', 'BREATH', 'BUT', 'LOOKING', 'SO', 'GLAD', 'TO', 'SEE', 'HER', 'TILL', 'SHE', 'HAD', 'NOT', 'THE', 'HEART', 'TO', 'BE', 'STIFF', 'AND', 'COOL', 'AS', 'SHE', 'HAD', 'FULLY', 'INTENDED', 'TO', 'BE', 'WHEN', 'THEY', 'MET'] +3331-159605-0021-716: ref=['SHE', 'REALLY', 'COULD', "N'T", 'HELP', 'IT', 'IT', 'WAS', 'SO', 'PLEASANT', 'TO', 'SEE', 'HIM', 'AGAIN', 'JUST', 'WHEN', 'SHE', 'WAS', 'FEELING', 'SO', 'LONELY'] +3331-159605-0021-716: hyp=['SHE', 'REALLY', 'COULD', 'NOT', 'HELP', 'IT', 'IT', 'WAS', 'SO', 'PLEASANT', 'TO', 'SEE', 'HIM', 'AGAIN', 'JUST', 'WHEN', 'SHE', 'WAS', 'FEELING', 'SO', 'LONELY'] +3331-159605-0022-717: ref=['THAT', 'IS', 'THE', 'WAY', 'I', 'GET', 'TO', 'THE', 'ROTHS', 'ANSWERED', 'POLLY'] +3331-159605-0022-717: hyp=['THAT', 'IS', 'THE', 'WAY', 'I', 'GET', 'TO', 'THE', 'ROSS', 'ANSWERED', 'POLLY'] +3331-159605-0023-718: ref=['SHE', 'DID', 'NOT', 'MEAN', 'TO', 'TELL', 'BUT', 'HIS', 'FRANKNESS', 'WAS', 'SO', 'AGREEABLE', 'SHE', 'FORGOT', 'HERSELF'] +3331-159605-0023-718: hyp=['SHE', 'DID', 'NOT', 'MEAN', 'TO', 'TELL', 'BUT', 'HIS', 'FRANKNESS', 'WAS', 'SO', 'AGREEABLE', 'SHE', 'FORGOT', 'HERSELF'] +3331-159605-0024-719: ref=['BUT', 'I', 'KNOW', 'HER', 'BETTER', 'AND', 'I', 'ASSURE', 'YOU', 'THAT', 'SHE', 'DOES', 'IMPROVE', 'SHE', 'TRIES', 'TO', 'MEND', 'HER', 'FAULTS', 'THOUGH', 'SHE', "WON'T", 'OWN', 'IT', 'AND', 'WILL', 'SURPRISE', 'YOU', 'SOME', 'DAY', 'BY', 'THE', 'AMOUNT', 'OF', 'HEART', 'AND', 'SENSE', 'AND', 'GOODNESS', 'SHE', 'HAS', 'GOT'] +3331-159605-0024-719: hyp=['BUT', 'I', 'KNOW', 'HER', 'BETTER', 'AND', 'I', 'ASSURE', 'YOU', 'THAT', 'SHE', "DOESN'T", 'PROVE', 'SHE', 'TRIES', 'TO', 'MENTAL', 'FAULTS', 'THOUGH', 'SHE', "WON'T", 'OWN', 'IT', 'AND', 'WAS', 'SURPRISE', 'YOU', 'SOME', 'DAY', 'BY', 'THE', 'AMOUNT', 'OF', 'HEART', 'AND', 'SENSE', 'AND', 'GOODNESS', 'SHE', 'HAS', 'GOT'] +3331-159605-0025-720: ref=['THANK', 'YOU', 'NO'] +3331-159605-0025-720: hyp=['THANK', 'YOU', 'NO'] +3331-159605-0026-721: ref=['HOW', 'LOVELY', 'THE', 'PARK', 'LOOKS', 'SHE', 'SAID', 'IN', 'GREAT', 'CONFUSION'] +3331-159605-0026-721: hyp=['HOW', 'LOVELY', 'THE', 'PARK', 'LOOKS', 'SHE', 'SAID', 'IN', 'GREAT', 'CONFUSION'] +3331-159605-0027-722: ref=['ASKED', 'THE', 'ARTFUL', 'YOUNG', 'MAN', 'LAYING', 'A', 'TRAP', 'INTO', 'WHICH', 'POLLY', 'IMMEDIATELY', 'FELL'] +3331-159605-0027-722: hyp=['ASKED', 'THE', 'ARTFUL', 'YOUNG', 'MAN', 'LAYING', 'A', 'TRAP', 'INTO', 'WHICH', 'POLLY', 'IMMEDIATELY', 'FELL'] +3331-159605-0028-723: ref=['HE', 'WAS', 'QUICKER', 'TO', 'TAKE', 'A', 'HINT', 'THAN', 'SHE', 'HAD', 'EXPECTED', 'AND', 'BEING', 'BOTH', 'PROUD', 'AND', 'GENEROUS', 'RESOLVED', 'TO', 'SETTLE', 'THE', 'MATTER', 'AT', 'ONCE', 'FOR', "POLLY'S", 'SAKE', 'AS', 'WELL', 'AS', 'HIS', 'OWN'] +3331-159605-0028-723: hyp=['HE', 'WAS', 'QUICKER', 'TO', 'TAKE', 'A', 'HINT', 'THAN', 'SHE', 'HAD', 'EXPECTED', 'AND', 'BEING', 'BOTH', 'PROUD', 'AND', 'GENEROUS', 'WE', 'SOFT', 'TO', 'SETTLE', 'THE', 'MATTER', 'AT', 'ONCE', 'FOR', "POLLY'S", 'SAKE', 'AS', 'WELL', 'AS', 'HIS', 'OWN'] +3331-159605-0029-724: ref=['SO', 'WHEN', 'SHE', 'MADE', 'HER', 'LAST', 'BRILLIANT', 'REMARK', 'HE', 'SAID', 'QUIETLY', 'WATCHING', 'HER', 'FACE', 'KEENLY', 'ALL', 'THE', 'WHILE', 'I', 'THOUGHT', 'SO', 'WELL', 'I', 'M', 'GOING', 'OUT', 'OF', 'TOWN', 'ON', 'BUSINESS', 'FOR', 'SEVERAL', 'WEEKS', 'SO', 'YOU', 'CAN', 'ENJOY', 'YOUR', 'LITTLE', 'BIT', 'OF', 'COUNTRY', 'WITHOUT', 'BEING', 'ANNOYED', 'BY', 'ME', 'ANNOYED'] +3331-159605-0029-724: hyp=['SO', 'WHEN', 'SHE', 'MADE', 'HER', 'LAST', 'POINT', 'REMARK', 'HE', 'SAID', 'QUIETLY', 'WATCHING', 'HER', 'FACE', 'KEENLY', 'ALL', 'THE', 'WHILE', 'I', 'THOUGHT', 'SO', 'WELL', "I'M", 'GOING', 'OUT', 'OF', 'TOWN', 'ON', 'BUSINESS', 'FOR', 'SEVERAL', 'WEEKS', 'SO', 'YOU', 'CAN', 'ENJOY', 'YOU', 'LITTLE', 'BIT', 'OF', 'COUNTRY', 'WITHOUT', 'BEING', 'ANNOYED', 'BY', 'ME', 'ANNOY', 'IT'] +3331-159605-0030-725: ref=['SHE', 'THOUGHT', 'SHE', 'HAD', 'A', 'GOOD', 'DEAL', 'OF', 'THE', 'COQUETTE', 'IN', 'HER', 'AND', 'I', 'VE', 'NO', 'DOUBT', 'THAT', 'WITH', 'TIME', 'AND', 'TRAINING', 'SHE', 'WOULD', 'HAVE', 'BECOME', 'A', 'VERY', 'DANGEROUS', 'LITTLE', 'PERSON', 'BUT', 'NOW', 'SHE', 'WAS', 'FAR', 'TOO', 'TRANSPARENT', 'AND', 'STRAIGHTFORWARD', 'BY', 'NATURE', 'EVEN', 'TO', 'TELL', 'A', 'WHITE', 'LIE', 'CLEVERLY'] +3331-159605-0030-725: hyp=['SHE', 'THOUGHT', 'SHE', 'HAD', 'A', 'GOOD', 'DEAL', 'OF', 'THE', 'COQUETTE', 'IN', 'HER', 'AND', "I'VE", 'NO', 'DOUBT', 'THAT', 'WITH', 'TIME', 'AND', 'TRAINING', 'SHE', 'WOULD', 'HAVE', 'BECOME', 'A', 'VERY', 'DANGEROUS', 'LITTLE', 'PERSON', 'BUT', 'NOW', 'SHE', 'WAS', 'FAR', 'TO', 'TRANSPARENT', 'AND', 'STRAIGHTFORWARD', 'BY', 'NATURE', 'EVEN', 'TO', 'TELL', 'A', 'WIDE', 'LIKE', 'LEVELLY'] +3331-159605-0031-726: ref=['HE', 'WAS', 'GONE', 'BEFORE', 'SHE', 'COULD', 'DO', 'ANYTHING', 'BUT', 'LOOK', 'UP', 'AT', 'HIM', 'WITH', 'A', 'REMORSEFUL', 'FACE', 'AND', 'SHE', 'WALKED', 'ON', 'FEELING', 'THAT', 'THE', 'FIRST', 'AND', 'PERHAPS', 'THE', 'ONLY', 'LOVER', 'SHE', 'WOULD', 'EVER', 'HAVE', 'HAD', 'READ', 'HIS', 'ANSWER', 'AND', 'ACCEPTED', 'IT', 'IN', 'SILENCE'] +3331-159605-0031-726: hyp=['HE', 'WAS', 'GONE', 'BEFORE', 'SHE', 'COULD', 'DO', 'ANYTHING', 'BUT', 'LOOK', 'UP', 'AT', 'HIM', 'WITH', 'A', 'REMORSEFUL', 'FACE', 'AND', 'SHE', 'WALKED', 'ON', 'FEELING', 'THAT', 'THE', 'FIRST', 'AND', 'PERHAPS', 'THE', 'ONLY', 'LOVER', 'SHE', 'WOULD', 'EVER', 'HAVE', 'HAD', 'READ', 'HIS', 'ANSWER', 'AND', 'ACCEPTED', 'IN', 'SILENCE'] +3331-159605-0032-727: ref=['POLLY', 'DID', 'NOT', 'RETURN', 'TO', 'HER', 'FAVORITE', 'WALK', 'TILL', 'SHE', 'LEARNED', 'FROM', 'MINNIE', 'THAT', 'UNCLE', 'HAD', 'REALLY', 'LEFT', 'TOWN', 'AND', 'THEN', 'SHE', 'FOUND', 'THAT', 'HIS', 'FRIENDLY', 'COMPANY', 'AND', 'CONVERSATION', 'WAS', 'WHAT', 'HAD', 'MADE', 'THE', 'WAY', 'SO', 'PLEASANT', 'AFTER', 'ALL'] +3331-159605-0032-727: hyp=['PARLEY', 'DID', 'NOT', 'RETURN', 'TO', 'HER', 'FAVOURITE', 'WALK', 'TILL', 'SHE', 'LEARNED', 'FOR', 'MINNIE', 'THAT', 'UNCLE', 'HAD', 'REALLY', 'LEFT', 'TOWN', 'AND', 'THEN', 'SHE', 'FOUND', 'THAT', 'HIS', 'FRIENDLY', 'COMPANY', 'AND', 'CONVERSATION', 'WAS', 'WHAT', 'HAD', 'MADE', 'THE', 'WAY', 'SO', 'PLEASANT', 'AFTER', 'ALL'] +3331-159605-0033-728: ref=['WAGGING', 'TO', 'AND', 'FRO', 'AS', 'USUAL', "WHAT'S", 'THE', 'NEWS', 'WITH', 'YOU'] +3331-159605-0033-728: hyp=['WORKING', 'TO', 'AND', 'FRO', 'AS', 'USUAL', "WHAT'S", 'THE', 'NEWS', 'WITH', 'YOU'] +3331-159605-0034-729: ref=['PERHAPS', 'SHE', 'LL', 'JILT', 'HIM'] +3331-159605-0034-729: hyp=['PERHAPS', "SHE'LL", 'CHILLED', 'HIM'] +3331-159605-0035-730: ref=['UTTERLY', 'DONE', 'WITH', 'AND', 'LAID', 'UPON', 'THE', 'SHELF'] +3331-159605-0035-730: hyp=['UTTERLY', 'DONE', 'WITH', 'AND', 'LAID', 'UPON', 'THE', 'SHELF'] +3331-159605-0036-731: ref=['MINNIE', 'SAID', 'THE', 'OTHER', 'DAY', 'SHE', 'WISHED', 'SHE', 'WAS', 'A', 'PIGEON', 'SO', 'SHE', 'COULD', 'PADDLE', 'IN', 'THE', 'PUDDLES', 'AND', 'NOT', 'FUSS', 'ABOUT', 'RUBBERS'] +3331-159605-0036-731: hyp=['MINNIE', 'SAID', 'THE', 'OTHER', 'DAY', 'SHE', 'WISHED', 'SHE', 'WAS', 'A', 'PITCHEN', 'SO', 'SHE', 'COULD', 'PADDLE', 'IN', 'THE', 'BOTTLES', 'AND', 'NUT', 'FUSS', 'ABOUT', 'RUBBERS'] +3331-159605-0037-732: ref=['NOW', "DON'T", 'BE', 'AFFECTED', 'POLLY', 'BUT', 'JUST', 'TELL', 'ME', 'LIKE', 'A', 'DEAR', 'HAS', "N'T", 'HE', 'PROPOSED'] +3331-159605-0037-732: hyp=['NOW', "DON'T", 'BE', 'AFFECTED', 'POLLY', 'BUT', 'JUST', 'TELL', 'ME', 'LIKE', 'A', 'DEAR', 'HAS', 'NOT', 'HE', 'PROPOSED'] +3331-159605-0038-733: ref=["DON'T", 'YOU', 'THINK', 'HE', 'MEANS', 'TO'] +3331-159605-0038-733: hyp=["DON'T", 'YOU', 'THINK', 'HE', 'MEANS', 'TO'] +3331-159605-0039-734: ref=['TRULY', 'TRULY', 'FAN'] +3331-159605-0039-734: hyp=['TRULY', 'JULIE', 'FAN'] +3331-159605-0040-735: ref=['I', "DON'T", 'MEAN', 'TO', 'BE', 'PRYING', 'BUT', 'I', 'REALLY', 'THOUGHT', 'HE', 'DID'] +3331-159605-0040-735: hyp=['I', "DON'T", 'MEAN', 'TO', 'BE', 'PRYING', 'BUT', 'I', 'REALLY', 'THOUGHT', 'HE', 'DID'] +3331-159605-0041-736: ref=['WELL', 'I', 'ALWAYS', 'MEANT', 'TO', 'TRY', 'IT', 'IF', 'I', 'GOT', 'A', 'CHANCE', 'AND', 'I', 'HAVE'] +3331-159605-0041-736: hyp=['WELL', 'I', 'ALWAYS', 'MEANT', 'TO', 'TRY', 'IT', 'IF', 'I', 'GOT', 'A', 'CHANCE', 'AND', 'I', 'HAVE'] +3331-159605-0042-737: ref=['I', 'JUST', 'GAVE', 'HIM', 'A', 'HINT', 'AND', 'HE', 'TOOK', 'IT'] +3331-159605-0042-737: hyp=['I', 'JUST', 'GAVE', 'HIM', 'A', 'HINT', 'AND', 'HE', 'TOOK', 'IT'] +3331-159605-0043-738: ref=['HE', 'MEANT', 'TO', 'GO', 'AWAY', 'BEFORE', 'THAT', 'SO', "DON'T", 'THINK', 'HIS', 'HEART', 'IS', 'BROKEN', 'OR', 'MIND', 'WHAT', 'SILLY', 'TATTLERS', 'SAY'] +3331-159605-0043-738: hyp=['HE', 'MEANT', 'TO', 'GO', 'AWAY', 'BEFORE', 'THAT', 'SO', "DON'T", 'THINK', 'HIS', 'HEART', 'IS', 'BROKEN', 'O', 'MIND', 'WHAT', 'SIDY', 'TEDLERS', 'SAY'] +3331-159605-0044-739: ref=['HE', 'UNDERSTOOD', 'AND', 'BEING', 'A', 'GENTLEMAN', 'MADE', 'NO', 'FUSS'] +3331-159605-0044-739: hyp=['HE', 'UNDERSTOOD', 'AND', 'BEING', 'A', 'GENTLEMAN', 'MADE', 'NO', 'FUSS'] +3331-159605-0045-740: ref=['BUT', 'POLLY', 'IT', 'WOULD', 'HAVE', 'BEEN', 'A', 'GRAND', 'THING', 'FOR', 'YOU'] +3331-159605-0045-740: hyp=['BUT', 'POLLY', 'IT', 'WOULD', 'HAVE', 'BEEN', 'A', 'GRAND', 'THING', 'FOR', 'YOU'] +3331-159605-0046-741: ref=['I', 'M', 'ODD', 'YOU', 'KNOW', 'AND', 'PREFER', 'TO', 'BE', 'AN', 'INDEPENDENT', 'SPINSTER', 'AND', 'TEACH', 'MUSIC', 'ALL', 'MY', 'DAYS'] +3331-159605-0046-741: hyp=['I', 'AM', 'AUGHT', 'YOU', 'KNOW', 'AND', 'PREFER', 'TO', 'BE', 'AN', 'INDEPENDENT', 'SPINSTER', 'AND', 'TEACH', 'MUSIC', 'ALL', 'MY', 'DAYS'] +3331-159609-0000-742: ref=['NEVER', 'MIND', 'WHAT', 'THE', 'BUSINESS', 'WAS', 'IT', 'SUFFICES', 'TO', 'SAY', 'THAT', 'IT', 'WAS', 'A', 'GOOD', 'BEGINNING', 'FOR', 'A', 'YOUNG', 'MAN', 'LIKE', 'TOM', 'WHO', 'HAVING', 'BEEN', 'BORN', 'AND', 'BRED', 'IN', 'THE', 'MOST', 'CONSERVATIVE', 'CLASS', 'OF', 'THE', 'MOST', 'CONCEITED', 'CITY', 'IN', 'NEW', 'ENGLAND', 'NEEDED', 'JUST', 'THE', 'HEALTHY', 'HEARTY', 'SOCIAL', 'INFLUENCES', 'OF', 'THE', 'WEST', 'TO', 'WIDEN', 'HIS', 'VIEWS', 'AND', 'MAKE', 'A', 'MAN', 'OF', 'HIM'] +3331-159609-0000-742: hyp=['NEVER', 'MIND', 'WHAT', 'THE', 'BUSINESS', 'WAS', 'ITS', 'SURFACES', 'TO', 'SAY', 'THAT', 'IT', 'WAS', 'A', 'GOOD', 'BEGINNING', 'FOR', 'A', 'YOUNG', 'MAN', 'LIKE', 'TOM', 'WHO', 'HAVING', 'BEEN', 'BORN', 'AND', 'BRED', 'IN', 'THE', 'MOST', 'CONSERVATIVE', 'GLASS', 'OF', 'THE', 'MOST', 'CONCEITED', 'CITY', 'IN', 'NEW', 'ENGLAND', 'NEEDED', 'JUST', 'THE', 'HEALTHY', 'HEARTY', 'SOCIAL', 'INFLUENCES', 'OF', 'THE', 'WEST', 'TO', 'WIDEN', 'HIS', 'VIEWS', 'AND', 'MAKE', 'A', 'MAN', 'OF', 'HIM'] +3331-159609-0001-743: ref=['FORTUNATELY', 'EVERY', 'ONE', 'WAS', 'SO', 'BUSY', 'WITH', 'THE', 'NECESSARY', 'PREPARATIONS', 'THAT', 'THERE', 'WAS', 'NO', 'TIME', 'FOR', 'ROMANCE', 'OF', 'ANY', 'SORT', 'AND', 'THE', 'FOUR', 'YOUNG', 'PEOPLE', 'WORKED', 'TOGETHER', 'AS', 'SOBERLY', 'AND', 'SENSIBLY', 'AS', 'IF', 'ALL', 'SORTS', 'OF', 'EMOTIONS', 'WERE', 'NOT', 'BOTTLED', 'UP', 'IN', 'THEIR', 'RESPECTIVE', 'HEARTS'] +3331-159609-0001-743: hyp=['FORTUNATELY', 'EVERY', 'ONE', 'WAS', 'SO', 'BUSY', 'WITH', 'THE', 'NECESSARY', 'PREPARATIONS', 'THAT', 'THERE', 'WAS', 'NO', 'TIME', 'FOR', 'ROMANS', 'OF', 'ANY', 'SORT', 'AND', 'THE', 'FOUR', 'YOUNG', 'PEOPLE', 'WORKED', 'TOGETHER', 'AS', 'SOBERLY', 'AND', 'SENSIBLY', 'AS', 'IF', 'ALL', 'SORTS', 'OF', 'EMOTIONS', 'WERE', 'NOT', 'BOTHERED', 'UP', 'IN', 'THEIR', 'RESPECTIVE', 'HEARTS'] +3331-159609-0002-744: ref=['PITY', 'THAT', 'THE', 'END', 'SHOULD', 'COME', 'SO', 'SOON', 'BUT', 'THE', 'HOUR', 'DID', 'ITS', 'WORK', 'AND', 'WENT', 'ITS', 'WAY', 'LEAVING', 'A', 'CLEARER', 'ATMOSPHERE', 'BEHIND', 'THOUGH', 'THE', 'YOUNG', 'FOLKS', 'DID', 'NOT', 'SEE', 'IT', 'THEN', 'FOR', 'THEIR', 'EYES', 'WERE', 'DIM', 'BECAUSE', 'OF', 'THE', 'PARTINGS', 'THAT', 'MUST', 'BE'] +3331-159609-0002-744: hyp=['PITY', 'THAT', 'THE', 'END', 'SHOULD', 'COME', 'SO', 'SOON', 'BUT', 'THE', 'HOUR', 'DID', 'ITS', 'WORK', 'AND', 'WHEN', 'ITS', 'WAY', 'LEAVING', 'A', 'CLEARER', 'ATMOSPHERE', 'BEHIND', 'THAT', 'THE', 'YOUNG', 'FOLKS', 'DID', 'NOT', 'SEE', 'IT', 'THEN', 'FOR', 'THEIR', 'EYES', 'WERE', 'DIM', 'BECAUSE', 'OF', 'THE', 'PARTINGS', 'THAT', 'MUST', 'BE'] +3331-159609-0003-745: ref=['IF', 'IT', 'HAD', 'NOT', 'BEEN', 'FOR', 'TWO', 'THINGS', 'I', 'FEAR', 'SHE', 'NEVER', 'WOULD', 'HAVE', 'STOOD', 'A', 'SUMMER', 'IN', 'TOWN', 'BUT', 'SYDNEY', 'OFTEN', 'CALLED', 'TILL', 'HIS', 'VACATION', 'CAME', 'AND', 'A', 'VOLUMINOUS', 'CORRESPONDENCE', 'WITH', 'POLLY', 'BEGUILED', 'THE', 'LONG', 'DAYS'] +3331-159609-0003-745: hyp=['IF', 'IT', 'HAD', 'NOT', 'BEEN', 'FOR', 'TWO', 'THINGS', 'I', 'FEAR', 'SHE', 'NEVER', 'WOULD', 'HAVE', 'STOOD', 'A', 'SUMMER', 'IN', 'TOWN', 'BUT', 'SYDNEY', 'OFTEN', 'CALLED', 'TO', 'HIS', 'VACATION', 'CAME', 'AND', 'A', 'VOLUMINOUS', 'CORRESPONDENCE', 'WITH', 'POLLY', 'BEGUILED', 'THE', 'LONG', 'DAYS'] +3331-159609-0004-746: ref=['TOM', 'WROTE', 'ONCE', 'A', 'WEEK', 'TO', 'HIS', 'MOTHER', 'BUT', 'THE', 'LETTERS', 'WERE', 'SHORT', 'AND', 'NOT', 'VERY', 'SATISFACTORY', 'FOR', 'MEN', 'NEVER', 'DO', 'TELL', 'THE', 'INTERESTING', 'LITTLE', 'THINGS', 'THAT', 'WOMEN', 'BEST', 'LIKE', 'TO', 'HEAR'] +3331-159609-0004-746: hyp=['TUMULT', 'ONES', 'A', 'WEEK', 'TO', 'HIS', 'MOTHER', 'BUT', 'THEY', 'LET', 'US', 'WERE', 'SHORT', 'AND', 'NOT', 'VERY', 'SATISFACTORY', 'FOR', 'MEN', 'NEVER', 'DO', 'SO', 'THE', 'INTERESTING', 'LITTLE', 'THINGS', 'THAT', 'WOMEN', 'BEST', 'LIKE', 'TO', 'HEAR'] +3331-159609-0005-747: ref=['NO', 'I', 'M', 'ONLY', 'TIRED', 'HAD', 'A', 'GOOD', 'DEAL', 'TO', 'DO', 'LATELY', 'AND', 'THE', 'DULL', 'WEATHER', 'MAKES', 'ME', 'JUST', 'A', 'TRIFLE', 'BLUE'] +3331-159609-0005-747: hyp=['NO', 'I', 'AM', 'ONLY', 'TIRED', 'HAD', 'A', 'GOOD', 'DEAL', 'TO', 'DO', 'LATELY', 'AND', 'THE', 'DULL', 'WEATHER', 'MAKES', 'ME', 'CHOSE', 'TO', 'TRAVEL', 'BLUE'] +3331-159609-0006-748: ref=['FORGIVE', 'ME', 'POLLY', 'BUT', 'I', "CAN'T", 'HELP', 'SAYING', 'IT', 'FOR', 'IT', 'IS', 'THERE', 'AND', 'I', 'WANT', 'TO', 'BE', 'AS', 'TRUE', 'TO', 'YOU', 'AS', 'YOU', 'WERE', 'TO', 'ME', 'IF', 'I', 'CAN'] +3331-159609-0006-748: hyp=['FORGIVE', 'ME', 'POLLY', 'BUT', 'I', "CAN'T", 'HELP', 'SAYING', 'IT', 'FOR', 'IT', 'IS', 'THERE', 'AND', 'I', 'WANT', 'TO', 'BE', 'AS', 'TRUE', 'TO', 'YOU', 'AS', 'YOU', 'WERE', 'TO', 'ME', 'IF', 'I', 'CAN'] +3331-159609-0007-749: ref=['I', 'TRY', 'NOT', 'TO', 'DECEIVE', 'MYSELF', 'BUT', 'IT', 'DOES', 'SEEM', 'AS', 'IF', 'THERE', 'WAS', 'A', 'CHANCE', 'OF', 'HAPPINESS', 'FOR', 'ME'] +3331-159609-0007-749: hyp=['I', 'TRIED', 'NOT', 'A', 'DECEIVE', 'MYSELF', 'BUT', 'IT', 'DOES', 'SEEM', 'AS', 'IF', 'THERE', 'WAS', 'A', 'CHANCE', 'OF', 'HAPPINESS', 'FOR', 'ME'] +3331-159609-0008-750: ref=['THANK', 'HEAVEN', 'FOR', 'THAT'] +3331-159609-0008-750: hyp=['THANK', 'HEAVEN', 'FOR', 'THAT'] +3331-159609-0009-751: ref=['CRIED', 'POLLY', 'WITH', 'THE', 'HEARTIEST', 'SATISFACTION', 'IN', 'HER', 'VOICE'] +3331-159609-0009-751: hyp=['CRIED', 'POLLY', 'WITH', 'THE', 'HARDIEST', 'SATISFACTION', 'IN', 'HER', 'VOICE'] +3331-159609-0010-752: ref=['POOR', 'POLLY', 'WAS', 'SO', 'TAKEN', 'BY', 'SURPRISE', 'THAT', 'SHE', 'HAD', 'NOT', 'A', 'WORD', 'TO', 'SAY'] +3331-159609-0010-752: hyp=['POOR', 'POLLY', 'WAS', 'SO', 'TAKEN', 'BY', 'SURPRISE', 'THAT', 'SHE', 'HAD', 'NOT', 'A', 'WORD', 'TO', 'SAY'] +3331-159609-0011-753: ref=['NONE', 'WERE', 'NEEDED', 'HER', 'TELLTALE', 'FACE', 'ANSWERED', 'FOR', 'HER', 'AS', 'WELL', 'AS', 'THE', 'IMPULSE', 'WHICH', 'MADE', 'HER', 'HIDE', 'HER', 'HEAD', 'IN', 'THE', 'SOFA', 'CUSHION', 'LIKE', 'A', 'FOOLISH', 'OSTRICH', 'WHEN', 'THE', 'HUNTERS', 'ARE', 'AFTER', 'IT'] +3331-159609-0011-753: hyp=['NONE', 'WERE', 'NEEDED', 'HOTELED', 'HER', 'FACE', 'ANSWERED', 'FOR', 'HER', 'AS', 'WELL', 'AS', 'THE', 'IMPULSE', 'WHICH', 'MADE', 'HER', 'HIDE', 'HER', 'HEAD', 'IN', 'THE', 'SOFA', 'CUSHION', 'LIKE', 'A', 'FOOLISH', 'OSTRICH', 'AND', 'THE', 'HANDLES', 'ARE', 'AFTER', 'IT'] +3331-159609-0012-754: ref=['ONCE', 'OR', 'TWICE', 'BUT', 'SORT', 'OF', 'JOKINGLY', 'AND', 'I', 'THOUGHT', 'IT', 'WAS', 'ONLY', 'SOME', 'LITTLE', 'FLIRTATION'] +3331-159609-0012-754: hyp=['ONCE', 'OR', 'TWICE', 'BUT', 'SORT', 'OF', 'CHOKINGLY', 'AND', 'I', 'THOUGHT', 'IT', 'WAS', 'ONLY', 'SOME', 'LITTLE', 'FLIRTATION'] +3331-159609-0013-755: ref=['IT', 'WAS', 'SO', 'STUPID', 'OF', 'ME', 'NOT', 'TO', 'GUESS', 'BEFORE'] +3331-159609-0013-755: hyp=['IT', 'WAS', 'SO', 'STUPID', 'OF', 'ME', 'NOT', 'TO', 'GUESS', 'BEFORE'] +3331-159609-0014-756: ref=['IT', 'WAS', 'SO', 'TENDER', 'EARNEST', 'AND', 'DEFIANT', 'THAT', 'FANNY', 'FORGOT', 'THE', 'DEFENCE', 'OF', 'HER', 'OWN', 'LOVER', 'IN', 'ADMIRATION', 'OF', "POLLY'S", 'LOYALTY', 'TO', 'HERS', 'FOR', 'THIS', 'FAITHFUL', 'ALL', 'ABSORBING', 'LOVE', 'WAS', 'A', 'NEW', 'REVELATION', 'TO', 'FANNY', 'WHO', 'WAS', 'USED', 'TO', 'HEARING', 'HER', 'FRIENDS', 'BOAST', 'OF', 'TWO', 'OR', 'THREE', 'LOVERS', 'A', 'YEAR', 'AND', 'CALCULATE', 'THEIR', 'RESPECTIVE', 'VALUES', 'WITH', 'ALMOST', 'AS', 'MUCH', 'COOLNESS', 'AS', 'THE', 'YOUNG', 'MEN', 'DISCUSSED', 'THE', 'FORTUNES', 'OF', 'THE', 'GIRLS', 'THEY', 'WISHED', 'FOR', 'BUT', 'COULD', 'NOT', 'AFFORD', 'TO', 'MARRY'] +3331-159609-0014-756: hyp=['IT', 'WAS', 'HER', 'TENDER', 'EARNEST', 'AND', 'DEFIANT', 'THAT', 'FANNY', 'FORGOT', 'THE', 'DEFENCE', 'OF', 'HER', 'OWN', 'LOVE', 'IN', 'ADMIRATION', 'OF', "POLLY'S", 'LOYALTY', 'TO', 'HERS', 'FOR', 'THIS', 'FAITHFUL', 'ALL', 'ABSORBING', 'LOVE', 'WAS', 'A', 'NEW', 'REVELATION', 'TO', 'FANNY', 'WHO', 'WAS', 'USED', 'TO', 'HEARING', 'HER', 'FRIENDS', 'BOAST', 'OF', 'TWO', 'OR', 'THREE', 'LOVERS', 'A', 'YEAR', 'AND', 'CALCULATE', 'THEIR', 'RESPECTIVE', 'VALUES', 'WITH', 'ALMOST', 'AS', 'MUCH', 'COOLNESS', 'AS', 'THE', 'YOUNG', 'MEN', 'DISCUSSED', 'THE', 'FORTUNES', 'OF', 'THE', 'GIRLS', 'THEY', 'WISHED', 'FOR', 'BUT', 'COULD', 'NOT', 'AFFORD', 'TO', 'MARRY'] +3331-159609-0015-757: ref=['I', 'HOPE', 'MARIA', 'BAILEY', 'IS', 'ALL', 'HE', 'THINKS', 'HER', 'SHE', 'ADDED', 'SOFTLY', 'FOR', 'I', 'COULD', "N'T", 'BEAR', 'TO', 'HAVE', 'HIM', 'DISAPPOINTED', 'AGAIN'] +3331-159609-0015-757: hyp=['I', 'HOPE', 'MARIA', "BAILEY'S", 'ONLY', 'THINK', 'SIR', 'SHE', 'ADDED', 'SOFTLY', 'FOR', 'I', 'COULD', 'NOT', 'BEAR', 'TO', 'HAVE', 'HIM', 'DISAPPOINTED', 'AGAIN'] +3331-159609-0016-758: ref=['SAID', 'FANNY', 'TURNING', 'HOPEFUL', 'ALL', 'AT', 'ONCE'] +3331-159609-0016-758: hyp=['SAID', 'FANNY', 'TURNING', 'HOPEFUL', 'ALL', 'AT', 'ONCE'] +3331-159609-0017-759: ref=['SUPPOSE', 'I', 'SAY', 'A', 'WORD', 'TO', 'TOM', 'JUST', 'INQUIRE', 'AFTER', 'HIS', 'HEART', 'IN', 'A', 'GENERAL', 'WAY', 'YOU', 'KNOW', 'AND', 'GIVE', 'HIM', 'A', 'CHANCE', 'TO', 'TELL', 'ME', 'IF', 'THERE', 'IS', 'ANYTHING', 'TO', 'TELL'] +3331-159609-0017-759: hyp=['SUPPOSE', 'I', 'SAY', 'A', 'WORD', 'TO', 'TOM', 'JUST', 'INQUIRE', 'AFTER', 'HIS', 'HEART', 'IN', 'A', 'GENERAL', 'WAY', 'YOU', 'KNOW', 'AND', 'GIVE', 'HIM', 'A', 'CHANCE', 'TO', 'TELL', 'ME', 'IF', "THERE'S", 'ANYTHING', 'TO', 'TELL'] +3331-159609-0018-760: ref=['BEAR', 'IT', 'PEOPLE', 'ALWAYS', 'DO', 'BEAR', 'THINGS', 'SOMEHOW', 'ANSWERED', 'POLLY', 'LOOKING', 'AS', 'IF', 'SENTENCE', 'HAD', 'BEEN', 'PASSED', 'UPON', 'HER'] +3331-159609-0018-760: hyp=['BEAR', 'IT', 'PEOPLE', 'ALWAYS', 'DO', 'BARE', 'THINGS', 'SOMEHOW', 'ANSWERED', 'POLLY', 'LOOKING', 'AS', 'IF', 'SENTENCE', 'HAD', 'BEEN', 'PASSED', 'UPON', 'HER'] +3331-159609-0019-761: ref=['IT', 'WAS', 'A', 'VERY', 'DIFFERENT', 'WINTER', 'FROM', 'THE', 'LAST', 'FOR', 'BOTH', 'THE', 'GIRLS'] +3331-159609-0019-761: hyp=['IT', 'WAS', 'A', 'VERY', 'DIFFERENT', 'WINDOW', 'FROM', 'THE', 'LAST', 'ABOVE', 'THE', 'GIRLS'] +3331-159609-0020-762: ref=['IF', 'FANNY', 'WANTED', 'TO', 'SHOW', 'HIM', 'WHAT', 'SHE', 'COULD', 'DO', 'TOWARD', 'MAKING', 'A', 'PLEASANT', 'HOME', 'SHE', 'CERTAINLY', 'SUCCEEDED', 'BETTER', 'THAN', 'SHE', 'SUSPECTED', 'FOR', 'IN', 'SPITE', 'OF', 'MANY', 'FAILURES', 'AND', 'DISCOURAGEMENTS', 'BEHIND', 'THE', 'SCENES', 'THE', 'LITTLE', 'HOUSE', 'BECAME', 'A', 'MOST', 'ATTRACTIVE', 'PLACE', 'TO', 'MISTER', 'SYDNEY', 'AT', 'LEAST', 'FOR', 'HE', 'WAS', 'MORE', 'THE', 'HOUSE', 'FRIEND', 'THAN', 'EVER', 'AND', 'SEEMED', 'DETERMINED', 'TO', 'PROVE', 'THAT', 'CHANGE', 'OF', 'FORTUNE', 'MADE', 'NO', 'DIFFERENCE', 'TO', 'HIM'] +3331-159609-0020-762: hyp=['IF', 'ANY', 'WANTED', 'TO', 'SHOW', 'HIM', 'WHAT', 'SHE', 'COULD', 'DO', 'TOWARD', 'MAKING', 'A', 'PLEASANT', 'HOME', 'SHE', 'CERTAINLY', 'SUCCEEDED', 'BY', 'THEN', 'SHE', 'SUSPECTED', 'FOR', 'IN', 'SPITE', 'OF', 'MANY', 'FAILURES', 'AND', 'DISCOURAGEMENTS', 'BEHIND', 'THE', 'SCENES', 'THE', 'LITTLE', 'HOUSE', 'BECAME', 'A', 'MOST', 'ATTRACTIVE', 'PLACE', 'TO', 'MISTER', 'SIDNEY', 'AT', 'LEAST', 'FOR', 'HE', 'WAS', 'MORE', 'THE', 'HOUSE', 'FRIEND', 'THAN', 'EVER', 'AND', 'SEEMED', 'DETERMINED', 'TO', 'PROVE', 'THAT', 'CHANGE', 'OF', 'FORTUNE', 'MADE', 'NO', 'DIFFERENCE', 'TO', 'HIM'] +3331-159609-0021-763: ref=['SHE', 'KEPT', 'MUCH', 'AT', 'HOME', 'WHEN', 'THE', "DAY'S", 'WORK', 'WAS', 'DONE', 'FINDING', 'IT', 'PLEASANTER', 'TO', 'SIT', 'DREAMING', 'OVER', 'BOOK', 'OR', 'SEWING', 'ALONE', 'THAN', 'TO', 'EXERT', 'HERSELF', 'EVEN', 'TO', 'GO', 'TO', 'THE', 'SHAWS'] +3331-159609-0021-763: hyp=['SHE', 'KEPT', 'MUCH', 'AT', 'HOME', 'WHEN', 'THE', "DAY'S", 'WORK', 'WAS', 'DONE', 'FINDING', 'IT', 'PLEASANTER', 'TO', 'SIT', 'DREAMING', 'OF', 'A', 'BOOK', 'OR', 'SEWING', 'ALONE', 'THAN', 'TO', 'EXERT', 'HERSELF', 'EVEN', 'TO', 'GO', 'TO', 'THE', 'SHORES'] +3331-159609-0022-764: ref=['POLLY', 'WAS', 'NOT', 'AT', 'ALL', 'LIKE', 'HERSELF', 'THAT', 'WINTER', 'AND', 'THOSE', 'NEAREST', 'TO', 'HER', 'SAW', 'AND', 'WONDERED', 'AT', 'IT', 'MOST'] +3331-159609-0022-764: hyp=['POLLY', 'WAS', 'NOT', 'AT', 'ALL', 'LIKE', 'HERSELF', 'THAT', 'WINDOW', 'AND', 'THOSE', 'NEAREST', 'TO', 'HER', 'SAW', 'AND', 'WANDERED', 'AT', 'IT', 'MOST'] +3331-159609-0023-765: ref=['FOR', 'NED', 'WAS', 'SO', 'ABSORBED', 'IN', 'BUSINESS', 'THAT', 'HE', 'IGNORED', 'THE', 'WHOLE', 'BAILEY', 'QUESTION', 'AND', 'LEFT', 'THEM', 'IN', 'UTTER', 'DARKNESS'] +3331-159609-0023-765: hyp=['FOR', 'NED', 'WAS', 'SO', 'ABSORBED', 'IN', 'BUSINESS', 'THAT', 'HE', 'NURED', 'THE', 'WHOLE', 'BAILIQUE', 'QUESTION', 'AND', 'LEFT', 'THEM', 'IN', 'OTHER', 'DARKNESS'] +3331-159609-0024-766: ref=['FANNY', 'CAME', 'WALKING', 'IN', 'UPON', 'HER', 'ONE', 'DAY', 'LOOKING', 'AS', 'IF', 'SHE', 'BROUGHT', 'TIDINGS', 'OF', 'SUCH', 'GREAT', 'JOY', 'THAT', 'SHE', 'HARDLY', 'KNEW', 'HOW', 'TO', 'TELL', 'THEM'] +3331-159609-0024-766: hyp=['THEN', 'HE', 'CAME', 'WALKING', 'IN', 'UPON', 'HER', 'ONE', 'DAY', 'LOOKING', 'AS', 'IF', 'SHE', 'POURED', 'HIDINGS', 'OF', 'SUCH', 'GREAT', 'JOY', 'THAT', 'SHE', 'HARDLY', 'KNEW', 'HOW', 'TO', 'TELL', 'THEM'] +3331-159609-0025-767: ref=['BUT', 'IF', 'WORK', 'BASKETS', 'WERE', 'GIFTED', 'WITH', 'POWERS', 'OF', 'SPEECH', 'THEY', 'COULD', 'TELL', 'STORIES', 'MORE', 'TRUE', 'AND', 'TENDER', 'THAN', 'ANY', 'WE', 'READ'] +3331-159609-0025-767: hyp=['BUT', 'IF', 'WORD', 'BASKETS', 'WERE', 'GIFTED', 'WITH', 'POWERS', 'OF', 'SPEECH', 'THEY', 'COULD', 'TELL', 'STORIES', 'MORE', 'TRUE', 'AND', 'TENDER', 'THAN', 'ANY', 'REED'] +3528-168656-0000-864: ref=['SHE', 'HAD', 'EVEN', 'BEEN', 'IN', 'SOCIETY', 'BEFORE', 'THE', 'REVOLUTION'] +3528-168656-0000-864: hyp=['SHE', 'HAD', 'EVEN', 'BEEN', 'IN', 'SOCIETY', 'BEFORE', 'THE', 'REVOLUTION'] +3528-168656-0001-865: ref=['IT', 'WAS', 'HER', 'PLEASURE', 'AND', 'HER', 'VANITY', 'TO', 'DRAG', 'IN', 'THESE', 'NAMES', 'ON', 'EVERY', 'PRETEXT'] +3528-168656-0001-865: hyp=['IT', 'WAS', 'HER', 'PLEASURE', 'AND', 'HER', 'VANITY', 'TO', 'DRAG', 'IN', 'THESE', 'NAMES', 'ON', 'EVERY', 'PRETEXT'] +3528-168656-0002-866: ref=['EVERY', 'YEAR', 'SHE', 'SOLEMNLY', 'RENEWED', 'HER', 'VOWS', 'AND', 'AT', 'THE', 'MOMENT', 'OF', 'TAKING', 'THE', 'OATH', 'SHE', 'SAID', 'TO', 'THE', 'PRIEST', 'MONSEIGNEUR', 'SAINT', 'FRANCOIS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'JULIEN', 'MONSEIGNEUR', 'SAINT', 'JULIEN', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'EUSEBIUS', 'MONSEIGNEUR', 'SAINT', 'EUSEBIUS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'PROCOPIUS', 'ET', 'CETERA', 'ET', 'CETERA'] +3528-168656-0002-866: hyp=['EVERY', 'YEAR', 'SHE', 'SOLEMNLY', 'RENEWED', 'HER', 'VOWS', 'AND', 'AT', 'THE', 'MOMENT', 'OF', 'TAKING', 'THE', 'OATH', 'SHE', 'SAID', 'TO', 'THE', 'PRIEST', 'MONSEIGNEUR', 'SAINT', 'FRANCOIS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAY', 'JULIAN', 'MONSEIGNEUR', 'SAINT', 'JULIAN', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'EUSCIBIUS', 'MONSIEUR', 'SAINT', 'USUBIUS', 'GAVE', 'IT', 'TO', 'MONSEIGNEUR', 'SAINT', 'PROCOPIAS', 'ET', 'CETERA', 'ET', 'CETERA'] +3528-168656-0003-867: ref=['AND', 'THE', 'SCHOOL', 'GIRLS', 'WOULD', 'BEGIN', 'TO', 'LAUGH', 'NOT', 'IN', 'THEIR', 'SLEEVES', 'BUT', 'UNDER', 'THEIR', 'VEILS', 'CHARMING', 'LITTLE', 'STIFLED', 'LAUGHS', 'WHICH', 'MADE', 'THE', 'VOCAL', 'MOTHERS', 'FROWN'] +3528-168656-0003-867: hyp=['AND', 'THE', 'SCHOOLGIRLS', 'WOULD', 'BEGIN', 'TO', 'LAUGH', 'NOT', 'IN', 'THEIR', 'SLEEVES', 'BUT', 'UNDER', 'THE', 'VEILS', 'CHARMING', 'LITTLE', 'STIFLED', 'LAUGHS', 'WHICH', 'MADE', 'THE', 'VOCAL', 'MOTHERS', 'FROWN'] +3528-168656-0004-868: ref=['IT', 'WAS', 'A', 'CENTURY', 'WHICH', 'SPOKE', 'THROUGH', 'HER', 'BUT', 'IT', 'WAS', 'THE', 'EIGHTEENTH', 'CENTURY'] +3528-168656-0004-868: hyp=['IT', 'WAS', 'A', 'CENTURY', 'WHICH', 'SPOKE', 'THROUGH', 'HER', 'BUT', 'IT', 'WAS', 'THE', 'EIGHTEENTH', 'CENTURY'] +3528-168656-0005-869: ref=['THE', 'RULE', 'OF', 'FONTEVRAULT', 'DID', 'NOT', 'FORBID', 'THIS'] +3528-168656-0005-869: hyp=['THE', 'RULE', 'OF', 'FONTREVAL', 'DID', 'NOT', 'FORBID', 'THIS'] +3528-168656-0006-870: ref=['SHE', 'WOULD', 'NOT', 'SHOW', 'THIS', 'OBJECT', 'TO', 'ANYONE'] +3528-168656-0006-870: hyp=['SHE', 'WOULD', 'NOT', 'SHOW', 'THE', 'SUBJECT', 'TO', 'ANY', 'ONE'] +3528-168656-0007-871: ref=['THUS', 'IT', 'FURNISHED', 'A', 'SUBJECT', 'OF', 'COMMENT', 'FOR', 'ALL', 'THOSE', 'WHO', 'WERE', 'UNOCCUPIED', 'OR', 'BORED', 'IN', 'THE', 'CONVENT'] +3528-168656-0007-871: hyp=['THUS', 'IT', 'FURNISHED', 'A', 'SUBJECT', 'OF', 'COMMENT', 'FOR', 'ALL', 'THOSE', 'WHO', 'WERE', 'UNOCCUPIED', 'OR', 'BORED', 'IN', 'THE', 'CONVENT'] +3528-168656-0008-872: ref=['SOME', 'UNIQUE', 'CHAPLET', 'SOME', 'AUTHENTIC', 'RELIC'] +3528-168656-0008-872: hyp=['SOME', 'UNIQUE', 'CHAPLET', 'SOME', 'AUTHENTIC', 'RELIC'] +3528-168656-0009-873: ref=['THEY', 'LOST', 'THEMSELVES', 'IN', 'CONJECTURES'] +3528-168656-0009-873: hyp=['THEY', 'LOST', 'THEMSELVES', 'IN', 'CONJECTURES'] +3528-168656-0010-874: ref=['WHEN', 'THE', 'POOR', 'OLD', 'WOMAN', 'DIED', 'THEY', 'RUSHED', 'TO', 'HER', 'CUPBOARD', 'MORE', 'HASTILY', 'THAN', 'WAS', 'FITTING', 'PERHAPS', 'AND', 'OPENED', 'IT'] +3528-168656-0010-874: hyp=['WHEN', 'THE', 'POOR', 'OLD', 'WOMAN', 'DIED', 'THEY', 'RUSHED', 'TO', 'HER', 'CUPBOARD', 'MORE', 'HASTILY', 'THAN', 'WAS', 'FITTING', 'PERHAPS', 'AND', 'OPENED', 'IT'] +3528-168656-0011-875: ref=['HE', 'IS', 'RESISTING', 'FLUTTERING', 'HIS', 'TINY', 'WINGS', 'AND', 'STILL', 'MAKING', 'AN', 'EFFORT', 'TO', 'FLY', 'BUT', 'THE', 'DANCER', 'IS', 'LAUGHING', 'WITH', 'A', 'SATANICAL', 'AIR'] +3528-168656-0011-875: hyp=['HE', 'IS', 'RESISTING', 'FLUTTERING', 'HIS', 'TINY', 'WINGS', 'AND', 'STILL', 'MAKING', 'AN', 'EFFORT', 'TO', 'FLY', 'BUT', 'THE', 'DANCERS', 'LAUGHING', 'WITH', 'US', 'SATANICAL', 'AIR'] +3528-168656-0012-876: ref=['MORAL', 'LOVE', 'CONQUERED', 'BY', 'THE', 'COLIC'] +3528-168656-0012-876: hyp=['MORAL', 'LOVE', 'CONQUERED', 'BY', 'THE', 'COLIC'] +3528-168669-0000-877: ref=['THE', 'PRIORESS', 'RETURNED', 'AND', 'SEATED', 'HERSELF', 'ONCE', 'MORE', 'ON', 'HER', 'CHAIR'] +3528-168669-0000-877: hyp=['THE', 'PRIORS', 'RETURNED', 'AND', 'SEATED', 'HERSELF', 'ONCE', 'MORE', 'ON', 'HER', 'CHAIR'] +3528-168669-0001-878: ref=['WE', 'WILL', 'PRESENT', 'A', 'STENOGRAPHIC', 'REPORT', 'OF', 'THE', 'DIALOGUE', 'WHICH', 'THEN', 'ENSUED', 'TO', 'THE', 'BEST', 'OF', 'OUR', 'ABILITY'] +3528-168669-0001-878: hyp=['WE', 'WILL', 'PRESENT', 'A', 'SYNOGRAPHIC', 'REPORT', 'OF', 'THE', 'DIALOGUE', 'WHICH', 'THEN', 'ENSUED', 'TO', 'THE', 'BEST', 'OF', 'OUR', 'ABILITY'] +3528-168669-0002-879: ref=['FATHER', 'FAUVENT'] +3528-168669-0002-879: hyp=['FATHER', 'FERVENT'] +3528-168669-0003-880: ref=['REVEREND', 'MOTHER', 'DO', 'YOU', 'KNOW', 'THE', 'CHAPEL'] +3528-168669-0003-880: hyp=['REVEREND', 'MOTHER', 'DO', 'YOU', 'KNOW', 'THE', 'CHAPEL'] +3528-168669-0004-881: ref=['AND', 'YOU', 'HAVE', 'BEEN', 'IN', 'THE', 'CHOIR', 'IN', 'PURSUANCE', 'OF', 'YOUR', 'DUTIES', 'TWO', 'OR', 'THREE', 'TIMES'] +3528-168669-0004-881: hyp=['AND', 'YOU', 'HAVE', 'BEEN', 'IN', 'THE', 'CHOIR', 'IN', 'PURSUANCE', 'OF', 'YOUR', 'DUTIES', 'TWO', 'OR', 'THREE', 'TIMES'] +3528-168669-0005-882: ref=['THERE', 'IS', 'A', 'STONE', 'TO', 'BE', 'RAISED', 'HEAVY'] +3528-168669-0005-882: hyp=['THERE', 'IS', 'A', 'STONE', 'TO', 'BE', 'RAISED', 'HEAVY'] +3528-168669-0006-883: ref=['THE', 'SLAB', 'OF', 'THE', 'PAVEMENT', 'WHICH', 'IS', 'AT', 'THE', 'SIDE', 'OF', 'THE', 'ALTAR'] +3528-168669-0006-883: hyp=['THE', 'SLAB', 'OF', 'THE', 'PAVEMENT', 'WHICH', 'IS', 'AT', 'THE', 'SIDE', 'OF', 'THE', 'ALTAR'] +3528-168669-0007-884: ref=['THE', 'SLAB', 'WHICH', 'CLOSES', 'THE', 'VAULT', 'YES'] +3528-168669-0007-884: hyp=['THE', 'FLAP', 'WHICH', 'CLOSES', 'THE', 'VAULT', 'YES'] +3528-168669-0008-885: ref=['IT', 'WOULD', 'BE', 'A', 'GOOD', 'THING', 'TO', 'HAVE', 'TWO', 'MEN', 'FOR', 'IT'] +3528-168669-0008-885: hyp=['IT', 'WOULD', 'BE', 'A', 'GOOD', 'THING', 'TO', 'HAVE', 'TWO', 'MEN', 'FOR', 'IT'] +3528-168669-0009-886: ref=['A', 'WOMAN', 'IS', 'NEVER', 'A', 'MAN'] +3528-168669-0009-886: hyp=['A', 'WOMAN', 'IS', 'NEVER', 'A', 'MAN'] +3528-168669-0010-887: ref=['BECAUSE', 'DOM', 'MABILLON', 'GIVES', 'FOUR', 'HUNDRED', 'AND', 'SEVENTEEN', 'EPISTLES', 'OF', 'SAINT', 'BERNARD', 'WHILE', 'MERLONUS', 'HORSTIUS', 'ONLY', 'GIVES', 'THREE', 'HUNDRED', 'AND', 'SIXTY', 'SEVEN', 'I', 'DO', 'NOT', 'DESPISE', 'MERLONUS', 'HORSTIUS', 'NEITHER', 'DO', 'I'] +3528-168669-0010-887: hyp=['BECAUSE', 'DON', 'MARBYLON', 'GIVES', 'FOUR', 'HUNDRED', 'AND', 'SEVENTEEN', 'EPISTLES', 'OF', 'SAINT', 'BERNARD', 'WHILE', 'MERLUNUS', 'HORSES', 'ONLY', 'GIVES', 'THREE', 'HUNDRED', 'AND', 'SIXTY', 'SEVEN', 'I', 'DO', 'NOT', 'DESPISE', 'MERLINUS', 'HORSES', 'NEITHER', 'DO', 'I'] +3528-168669-0011-888: ref=['MERIT', 'CONSISTS', 'IN', 'WORKING', 'ACCORDING', 'TO', "ONE'S", 'STRENGTH', 'A', 'CLOISTER', 'IS', 'NOT', 'A', 'DOCK', 'YARD'] +3528-168669-0011-888: hyp=['MARRIAGE', 'CONSISTS', 'IN', 'WORKING', 'ACCORDING', 'TO', "ONE'S", 'STRENGTH', 'A', 'CLOISTER', 'IS', 'NOT', 'A', 'DOCKYARD'] +3528-168669-0012-889: ref=['AND', 'A', 'WOMAN', 'IS', 'NOT', 'A', 'MAN', 'BUT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'ONE', 'THOUGH'] +3528-168669-0012-889: hyp=['ADD', 'A', 'WOMAN', 'IS', 'NOT', 'A', 'MAN', 'BUT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'ONE', 'THOUGH'] +3528-168669-0013-890: ref=['AND', 'CAN', 'YOU', 'GET', 'A', 'LEVER'] +3528-168669-0013-890: hyp=['AND', 'CAN', 'YOU', 'GET', 'A', 'LOVER'] +3528-168669-0014-891: ref=['THERE', 'IS', 'A', 'RING', 'IN', 'THE', 'STONE'] +3528-168669-0014-891: hyp=['THERE', 'IS', 'A', 'RING', 'IN', 'THE', 'STONE'] +3528-168669-0015-892: ref=['I', 'WILL', 'PUT', 'THE', 'LEVER', 'THROUGH', 'IT'] +3528-168669-0015-892: hyp=['I', 'WILL', 'PUT', 'THE', 'LOVER', 'THROUGH', 'IT'] +3528-168669-0016-893: ref=['THAT', 'IS', 'GOOD', 'REVEREND', 'MOTHER', 'I', 'WILL', 'OPEN', 'THE', 'VAULT'] +3528-168669-0016-893: hyp=['THAT', 'IS', 'GOOD', 'REVEREND', 'MOTHER', 'I', 'WILL', 'OPEN', 'THE', 'VAULT'] +3528-168669-0017-894: ref=['WILL', 'THAT', 'BE', 'ALL', 'NO'] +3528-168669-0017-894: hyp=['WILL', 'THAT', 'BE', 'ALL', 'NO'] +3528-168669-0018-895: ref=['GIVE', 'ME', 'YOUR', 'ORDERS', 'VERY', 'REVEREND', 'MOTHER'] +3528-168669-0018-895: hyp=['GIVE', 'ME', 'YOUR', 'ORDERS', 'VERY', 'REVEREND', 'MOTHER'] +3528-168669-0019-896: ref=['FAUVENT', 'WE', 'HAVE', 'CONFIDENCE', 'IN', 'YOU'] +3528-168669-0019-896: hyp=['FOR', 'THAT', 'WE', 'HAVE', 'CONFIDENCE', 'IN', 'YOU'] +3528-168669-0020-897: ref=['I', 'AM', 'HERE', 'TO', 'DO', 'ANYTHING', 'YOU', 'WISH'] +3528-168669-0020-897: hyp=['I', 'AM', 'HERE', 'TO', 'DO', 'ANYTHING', 'YOU', 'WISH'] +3528-168669-0021-898: ref=['AND', 'TO', 'HOLD', 'YOUR', 'PEACE', 'ABOUT', 'EVERYTHING', 'YES', 'REVEREND', 'MOTHER'] +3528-168669-0021-898: hyp=['AND', 'TO', 'HOLD', 'YOUR', 'PEACE', 'ABOUT', 'EVERYTHING', 'YES', 'ROBIN', 'MOTHER'] +3528-168669-0022-899: ref=['WHEN', 'THE', 'VAULT', 'IS', 'OPEN', 'I', 'WILL', 'CLOSE', 'IT', 'AGAIN'] +3528-168669-0022-899: hyp=['WHEN', 'THE', 'VOLT', 'IS', 'OPEN', 'I', 'WILL', 'CLOSE', 'IT', 'AGAIN'] +3528-168669-0023-900: ref=['BUT', 'BEFORE', 'THAT', 'WHAT', 'REVEREND', 'MOTHER'] +3528-168669-0023-900: hyp=['BUT', 'BEFORE', 'THAT', 'WHAT', 'REVEREND', 'MOTHER'] +3528-168669-0024-901: ref=['FATHER', 'FAUVENT', 'REVEREND', 'MOTHER'] +3528-168669-0024-901: hyp=['FATHER', 'FERVENT', 'REVEREND', 'MOTHER'] +3528-168669-0025-902: ref=['YOU', 'KNOW', 'THAT', 'A', 'MOTHER', 'DIED', 'THIS', 'MORNING'] +3528-168669-0025-902: hyp=['YOU', 'KNOW', 'THAT', 'A', 'MOTHER', 'DIED', 'THIS', 'MORNING'] +3528-168669-0026-903: ref=['NO', 'DID', 'YOU', 'NOT', 'HEAR', 'THE', 'BELL'] +3528-168669-0026-903: hyp=['NO', 'DID', 'YOU', 'NOT', 'HEAR', 'THE', 'BELL'] +3528-168669-0027-904: ref=['NOTHING', 'CAN', 'BE', 'HEARD', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN', 'REALLY'] +3528-168669-0027-904: hyp=['NOTHING', 'CAN', 'BE', 'HEARD', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN', 'REALLY'] +3528-168669-0028-905: ref=['AND', 'THEN', 'THE', 'WIND', 'IS', 'NOT', 'BLOWING', 'IN', 'MY', 'DIRECTION', 'THIS', 'MORNING'] +3528-168669-0028-905: hyp=['AND', 'THEN', 'THE', 'WIND', 'IS', 'NOT', 'BLOWING', 'IN', 'MY', 'DIRECTION', 'THIS', 'MORNING'] +3528-168669-0029-906: ref=['IT', 'WAS', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0029-906: hyp=['IT', 'WAS', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0030-907: ref=['THREE', 'YEARS', 'AGO', 'MADAME', 'DE', 'BETHUNE', 'A', 'JANSENIST', 'TURNED', 'ORTHODOX', 'MERELY', 'FROM', 'HAVING', 'SEEN', 'MOTHER', 'CRUCIFIXION', 'AT', 'PRAYER', 'AH'] +3528-168669-0030-907: hyp=['THREE', 'YEARS', 'AGO', 'MADAME', 'DE', 'BESOON', 'A', 'JENSONIST', 'TURNED', 'ORTHODOX', 'MERELY', 'FROM', 'HAVING', 'SEEN', 'MOTHER', 'CRUCIFIXION', 'AT', 'PRAYER', 'AH'] +3528-168669-0031-908: ref=['THE', 'MOTHERS', 'HAVE', 'TAKEN', 'HER', 'TO', 'THE', 'DEAD', 'ROOM', 'WHICH', 'OPENS', 'ON', 'THE', 'CHURCH', 'I', 'KNOW'] +3528-168669-0031-908: hyp=['THE', 'MOTHERS', 'HAVE', 'TAKEN', 'HER', 'TO', 'THE', 'DEAD', 'ROOM', 'WHICH', 'OPENS', 'ON', 'THE', 'CHURCH', 'I', 'KNOW'] +3528-168669-0032-909: ref=['A', 'FINE', 'SIGHT', 'IT', 'WOULD', 'BE', 'TO', 'SEE', 'A', 'MAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'MORE', 'OFTEN'] +3528-168669-0032-909: hyp=['A', 'FINE', 'SIGHT', 'IT', 'WOULD', 'BE', 'TO', 'SEE', 'A', 'MAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'MORE', 'OFTEN'] +3528-168669-0033-910: ref=['HEY', 'MORE', 'OFTEN'] +3528-168669-0033-910: hyp=['HEY', 'MORE', 'OFTEN'] +3528-168669-0034-911: ref=['WHAT', 'DO', 'YOU', 'SAY'] +3528-168669-0034-911: hyp=['WHAT', 'DO', 'YOU', 'SAY'] +3528-168669-0035-912: ref=['I', 'SAY', 'MORE', 'OFTEN', 'MORE', 'OFTEN', 'THAN', 'WHAT'] +3528-168669-0035-912: hyp=['I', 'SAY', 'MORE', 'OFTEN', 'MORE', 'OFTEN', 'THAN', 'WHAT'] +3528-168669-0036-913: ref=['REVEREND', 'MOTHER', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN', 'THAN', 'WHAT', 'I', 'SAID', 'MORE', 'OFTEN'] +3528-168669-0036-913: hyp=['REVEREND', 'MOTHER', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN', 'THAN', 'WHAT', 'I', 'SAID', 'MORE', 'OFTEN'] +3528-168669-0037-914: ref=['BUT', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN'] +3528-168669-0037-914: hyp=['BUT', 'I', 'DID', 'NOT', 'SAY', 'MORE', 'OFTEN'] +3528-168669-0038-915: ref=['AT', 'THAT', 'MOMENT', 'NINE', "O'CLOCK", 'STRUCK'] +3528-168669-0038-915: hyp=['AT', 'THAT', 'MOMENT', 'NINE', "O'CLOCK", 'STRUCK'] +3528-168669-0039-916: ref=['AT', 'NINE', "O'CLOCK", 'IN', 'THE', 'MORNING', 'AND', 'AT', 'ALL', 'HOURS', 'PRAISED', 'AND', 'ADORED', 'BE', 'THE', 'MOST', 'HOLY', 'SACRAMENT', 'OF', 'THE', 'ALTAR', 'SAID', 'THE', 'PRIORESS'] +3528-168669-0039-916: hyp=['AT', 'NINE', "O'CLOCK", 'IN', 'THE', 'MORNING', 'AND', 'AT', 'ALL', 'HOURS', 'PRAISED', 'AND', 'ENDURED', 'BE', 'THE', 'MOST', 'HOLY', 'SACRAMENT', 'OF', 'THE', 'ALTAR', 'SAID', 'THE', 'PROGRESS'] +3528-168669-0040-917: ref=['IT', 'CUT', 'MORE', 'OFTEN', 'SHORT'] +3528-168669-0040-917: hyp=['IT', 'CUT', 'MORE', 'OFTEN', 'SHORT'] +3528-168669-0041-918: ref=['FAUCHELEVENT', 'MOPPED', 'HIS', 'FOREHEAD'] +3528-168669-0041-918: hyp=['FAUCHELEVENT', 'MOPPED', 'HIS', 'FOREHEAD'] +3528-168669-0042-919: ref=['IN', 'HER', 'LIFETIME', 'MOTHER', 'CRUCIFIXION', 'MADE', 'CONVERTS', 'AFTER', 'HER', 'DEATH', 'SHE', 'WILL', 'PERFORM', 'MIRACLES', 'SHE', 'WILL'] +3528-168669-0042-919: hyp=['IN', 'HER', 'LIFETIME', 'MOTHER', 'CRUCIFIXION', 'MADE', 'CONVERTS', 'AFTER', 'HER', 'DEATH', 'SHE', 'WILL', 'PERFORM', 'MIRACLES', 'SHE', 'WILL'] +3528-168669-0043-920: ref=['FATHER', 'FAUVENT', 'THE', 'COMMUNITY', 'HAS', 'BEEN', 'BLESSED', 'IN', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0043-920: hyp=['FATHER', 'FUVENT', 'THE', 'COMMUNITY', 'HAS', 'BEEN', 'BLESSED', 'IN', 'MOTHER', 'CURSE', 'FICTION'] +3528-168669-0044-921: ref=['SHE', 'RETAINED', 'HER', 'CONSCIOUSNESS', 'TO', 'THE', 'VERY', 'LAST', 'MOMENT'] +3528-168669-0044-921: hyp=['SHE', 'RETAINED', 'HER', 'CONSCIOUSNESS', 'TO', 'THE', 'VERY', 'LAST', 'MOMENT'] +3528-168669-0045-922: ref=['SHE', 'GAVE', 'US', 'HER', 'LAST', 'COMMANDS'] +3528-168669-0045-922: hyp=['SHE', 'GAVE', 'US', 'HER', 'LAST', 'COMMANDS'] +3528-168669-0046-923: ref=['IF', 'YOU', 'HAD', 'A', 'LITTLE', 'MORE', 'FAITH', 'AND', 'IF', 'YOU', 'COULD', 'HAVE', 'BEEN', 'IN', 'HER', 'CELL', 'SHE', 'WOULD', 'HAVE', 'CURED', 'YOUR', 'LEG', 'MERELY', 'BY', 'TOUCHING', 'IT', 'SHE', 'SMILED'] +3528-168669-0046-923: hyp=['IF', 'YOU', 'HAD', 'A', 'LITTLE', 'MORE', 'FAITH', 'AND', 'IF', 'YOU', 'COULD', 'HAVE', 'BEEN', 'IN', 'HERSELF', 'SHE', 'WOULD', 'HAVE', 'CURED', 'YOUR', 'LEG', 'MERELY', 'BY', 'TOUCHING', 'IT', 'SHE', 'SMILED'] +3528-168669-0047-924: ref=['THERE', 'WAS', 'SOMETHING', 'OF', 'PARADISE', 'IN', 'THAT', 'DEATH'] +3528-168669-0047-924: hyp=['THERE', 'WAS', 'SOMETHING', 'OF', 'PARADISE', 'IN', 'THAT', 'DEATH'] +3528-168669-0048-925: ref=['FAUCHELEVENT', 'THOUGHT', 'THAT', 'IT', 'WAS', 'AN', 'ORISON', 'WHICH', 'SHE', 'WAS', 'FINISHING'] +3528-168669-0048-925: hyp=['FAUCHELEVENT', 'THOUGHT', 'THAT', 'IT', 'WAS', 'AN', 'ORISON', 'WHICH', 'SHE', 'WAS', 'FINISHING'] +3528-168669-0049-926: ref=['FAUCHELEVENT', 'HELD', 'HIS', 'PEACE', 'SHE', 'WENT', 'ON'] +3528-168669-0049-926: hyp=['FORCHELEVENT', 'HELD', 'HIS', 'PEACE', 'SHE', 'WENT', 'ON'] +3528-168669-0050-927: ref=['I', 'HAVE', 'CONSULTED', 'UPON', 'THIS', 'POINT', 'MANY', 'ECCLESIASTICS', 'LABORING', 'IN', 'OUR', 'LORD', 'WHO', 'OCCUPY', 'THEMSELVES', 'IN', 'THE', 'EXERCISES', 'OF', 'THE', 'CLERICAL', 'LIFE', 'AND', 'WHO', 'BEAR', 'WONDERFUL', 'FRUIT'] +3528-168669-0050-927: hyp=['I', 'HAVE', 'CONSULTED', 'UPON', 'THIS', 'POINT', 'MANY', 'ECCLESIASTICS', 'LABOURING', 'IN', 'OUR', 'LORD', 'WHO', 'OCCUPY', 'THEMSELVES', 'IN', 'THE', 'EXERCISES', 'OF', 'THE', 'CLERICAL', 'LIFE', 'AND', 'WHO', 'BEAR', 'WONDERFUL', 'FRUIT'] +3528-168669-0051-928: ref=['FORTUNATELY', 'THE', 'PRIORESS', 'COMPLETELY', 'ABSORBED', 'IN', 'HER', 'OWN', 'THOUGHTS', 'DID', 'NOT', 'HEAR', 'IT'] +3528-168669-0051-928: hyp=['FORTUNATELY', 'THE', 'PRIESTS', 'COMPLETELY', 'ABSORBED', 'IN', 'HER', 'OWN', 'THOUGHTS', 'DID', 'NOT', 'HEAR', 'IT'] +3528-168669-0052-929: ref=['SHE', 'CONTINUED', 'FATHER', 'FAUVENT'] +3528-168669-0052-929: hyp=['SHE', 'CONTINUED', 'FURTHER', 'PREVENT'] +3528-168669-0053-930: ref=['YES', 'REVEREND', 'MOTHER'] +3528-168669-0053-930: hyp=['YES', 'REVEREND', 'MOTHER'] +3528-168669-0054-931: ref=['SAINT', 'TERENTIUS', 'BISHOP', 'OF', 'PORT', 'WHERE', 'THE', 'MOUTH', 'OF', 'THE', 'TIBER', 'EMPTIES', 'INTO', 'THE', 'SEA', 'REQUESTED', 'THAT', 'ON', 'HIS', 'TOMB', 'MIGHT', 'BE', 'ENGRAVED', 'THE', 'SIGN', 'WHICH', 'WAS', 'PLACED', 'ON', 'THE', 'GRAVES', 'OF', 'PARRICIDES', 'IN', 'THE', 'HOPE', 'THAT', 'PASSERS', 'BY', 'WOULD', 'SPIT', 'ON', 'HIS', 'TOMB', 'THIS', 'WAS', 'DONE'] +3528-168669-0054-931: hyp=['SAINT', 'TORRENTIUS', 'BISHOP', 'OF', 'PORT', 'WEAR', 'THE', 'MOUTH', 'OF', 'THE', 'TYBER', 'EMPTIES', 'INTO', 'THE', 'SEA', 'REQUESTED', 'THAT', 'ON', 'HIS', 'TOMB', 'MIGHT', 'BE', 'ENGRAVED', 'THE', 'SIGN', 'WHICH', 'WAS', 'PLACED', 'ON', 'THE', 'GRAVES', 'OF', 'PARASITES', 'IN', 'THE', 'HOPE', 'THAT', 'PASSERS', 'BY', 'WOULD', 'SPIT', 'ON', 'HIS', 'TOMB', 'THIS', 'WAS', 'DONE'] +3528-168669-0055-932: ref=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'SO', 'BE', 'IT'] +3528-168669-0055-932: hyp=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'SO', 'BE', 'IT'] +3528-168669-0056-933: ref=['FOR', 'THAT', 'MATTER', 'NO', 'REVEREND', 'MOTHER'] +3528-168669-0056-933: hyp=['FOR', 'THAT', 'MATTER', 'NO', 'REVEREND', 'MOTHER'] +3528-168669-0057-934: ref=['FATHER', 'FAUVENT', 'MOTHER', 'CRUCIFIXION', 'WILL', 'BE', 'INTERRED', 'IN', 'THE', 'COFFIN', 'IN', 'WHICH', 'SHE', 'HAS', 'SLEPT', 'FOR', 'THE', 'LAST', 'TWENTY', 'YEARS', 'THAT', 'IS', 'JUST'] +3528-168669-0057-934: hyp=['FATHER', 'PREVENT', 'MOTHER', 'CRUCIFIXION', 'WILL', 'BE', 'INTERRED', 'IN', 'THE', 'COFFIN', 'IN', 'WHICH', 'SHE', 'HAS', 'SLEPT', 'FOR', 'THE', 'LAST', 'TWENTY', 'YEARS', 'THAT', 'IS', 'JUST'] +3528-168669-0058-935: ref=['IT', 'IS', 'A', 'CONTINUATION', 'OF', 'HER', 'SLUMBER'] +3528-168669-0058-935: hyp=['IT', 'IS', 'A', 'CONTINUATION', 'OF', 'HER', 'SLUMBER'] +3528-168669-0059-936: ref=['SO', 'I', 'SHALL', 'HAVE', 'TO', 'NAIL', 'UP', 'THAT', 'COFFIN', 'YES'] +3528-168669-0059-936: hyp=['SO', 'I', 'SHALL', 'HAVE', 'TO', 'NAIL', 'UP', 'THAT', 'COFFIN', 'YES'] +3528-168669-0060-937: ref=['I', 'AM', 'AT', 'THE', 'ORDERS', 'OF', 'THE', 'VERY', 'REVEREND', 'COMMUNITY'] +3528-168669-0060-937: hyp=['I', 'AM', 'AT', 'THE', 'ORDERS', 'OF', 'THE', 'VERY', 'REVEREND', 'COMMUNITY'] +3528-168669-0061-938: ref=['THE', 'FOUR', 'MOTHER', 'PRECENTORS', 'WILL', 'ASSIST', 'YOU'] +3528-168669-0061-938: hyp=['BEFORE', 'MOTHER', 'PRESENTERS', 'WILL', 'ASSIST', 'YOU'] +3528-168669-0062-939: ref=['NO', 'IN', 'LOWERING', 'THE', 'COFFIN'] +3528-168669-0062-939: hyp=['NO', 'IN', 'LORING', 'THE', 'COFFIN'] +3528-168669-0063-940: ref=['WHERE', 'INTO', 'THE', 'VAULT'] +3528-168669-0063-940: hyp=['WHERE', 'INTO', 'THE', 'VAULT'] +3528-168669-0064-941: ref=['FAUCHELEVENT', 'STARTED', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR'] +3528-168669-0064-941: hyp=['FAUCHELEVENT', 'STARTED', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR'] +3528-168669-0065-942: ref=['UNDER', 'THE', 'ALTAR', 'BUT'] +3528-168669-0065-942: hyp=['UNDER', 'THE', 'ALTAR', 'BUT'] +3528-168669-0066-943: ref=['YOU', 'WILL', 'HAVE', 'AN', 'IRON', 'BAR', 'YES', 'BUT'] +3528-168669-0066-943: hyp=['YOU', 'WILL', 'HAVE', 'AN', 'IRON', 'BAR', 'YES', 'BUT'] +3528-168669-0067-944: ref=['YOU', 'WILL', 'RAISE', 'THE', 'STONE', 'WITH', 'THE', 'BAR', 'BY', 'MEANS', 'OF', 'THE', 'RING', 'BUT'] +3528-168669-0067-944: hyp=['YOU', 'WILL', 'RAISE', 'THE', 'STONE', 'WITH', 'THE', 'BAR', 'BY', 'MEANS', 'OF', 'THE', 'RING', 'BUT'] +3528-168669-0068-945: ref=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL', 'NOT', 'TO', 'GO', 'TO', 'PROFANE', 'EARTH', 'TO', 'REMAIN', 'THERE', 'IN', 'DEATH', 'WHERE', 'SHE', 'PRAYED', 'WHILE', 'LIVING', 'SUCH', 'WAS', 'THE', 'LAST', 'WISH', 'OF', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0068-945: hyp=['THE', 'DEAD', 'MUST', 'BE', 'OBEYED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL', 'NOT', 'TO', 'GO', 'TO', 'PROFANE', 'EARTH', 'TO', 'REMAIN', 'THERE', 'IN', 'DEATH', 'WHERE', 'SHE', 'PRAYED', 'WHILE', 'LIVING', 'SUCH', 'WAS', 'THE', 'LAST', 'WISH', 'OF', 'MOTHER', 'CRUCIFIXION'] +3528-168669-0069-946: ref=['SHE', 'ASKED', 'IT', 'OF', 'US', 'THAT', 'IS', 'TO', 'SAY', 'COMMANDED', 'US'] +3528-168669-0069-946: hyp=['SHE', 'ASKED', 'IT', 'OF', 'US', 'THAT', 'IS', 'TO', 'SAY', 'COMMANDED', 'US'] +3528-168669-0070-947: ref=['BUT', 'IT', 'IS', 'FORBIDDEN'] +3528-168669-0070-947: hyp=['BUT', 'IT', 'IS', 'FORBIDDEN'] +3528-168669-0071-948: ref=['OH', 'I', 'AM', 'A', 'STONE', 'IN', 'YOUR', 'WALLS'] +3528-168669-0071-948: hyp=['OH', 'I', 'AM', 'A', 'STONE', 'IN', 'YOUR', 'WALLS'] +3528-168669-0072-949: ref=['THINK', 'FATHER', 'FAUVENT', 'IF', 'SHE', 'WERE', 'TO', 'WORK', 'MIRACLES', 'HERE'] +3528-168669-0072-949: hyp=['THINK', 'FATHER', 'FERVENT', 'IF', 'SHE', 'WERE', 'TO', 'WORK', 'MIRACLES', 'HERE'] +3528-168669-0073-950: ref=['WHAT', 'A', 'GLORY', 'OF', 'GOD', 'FOR', 'THE', 'COMMUNITY', 'AND', 'MIRACLES', 'ISSUE', 'FROM', 'TOMBS'] +3528-168669-0073-950: hyp=['WHAT', 'A', 'GLORY', 'OF', 'GOD', 'FOR', 'THE', 'COMMUNITY', 'AND', 'MIRACLES', 'ISSUE', 'FROM', 'TOMBS'] +3528-168669-0074-951: ref=['BUT', 'REVEREND', 'MOTHER', 'IF', 'THE', 'AGENT', 'OF', 'THE', 'SANITARY', 'COMMISSION'] +3528-168669-0074-951: hyp=['BUT', 'REVEREND', 'MOTHER', 'IF', 'THE', 'AGENTIVE', 'THE', 'SANITARY', 'COMMISSION'] +3528-168669-0075-952: ref=['BUT', 'THE', 'COMMISSARY', 'OF', 'POLICE'] +3528-168669-0075-952: hyp=['BUT', 'THE', 'COMMISSARY', 'OF', 'POLICE'] +3528-168669-0076-953: ref=['CHONODEMAIRE', 'ONE', 'OF', 'THE', 'SEVEN', 'GERMAN', 'KINGS', 'WHO', 'ENTERED', 'AMONG', 'THE', 'GAULS', 'UNDER', 'THE', 'EMPIRE', 'OF', 'CONSTANTIUS', 'EXPRESSLY', 'RECOGNIZED', 'THE', 'RIGHT', 'OF', 'NUNS', 'TO', 'BE', 'BURIED', 'IN', 'RELIGION', 'THAT', 'IS', 'TO', 'SAY', 'BENEATH', 'THE', 'ALTAR'] +3528-168669-0076-953: hyp=['SHADOW', 'DE', 'MAR', 'ONE', 'OF', 'THE', 'SEVEN', 'GERMAN', 'KINGS', 'WHO', 'ENTERED', 'AMONG', 'THE', 'GAULS', 'UNDER', 'THE', 'EMPIRE', 'OF', 'CONSTANTIUS', 'EXPRESSLY', 'RECOGNIZED', 'THE', 'RIGHT', 'OF', 'NUNS', 'TO', 'BE', 'BURIED', 'IN', 'RELIGION', 'THAT', 'IS', 'TO', 'SAY', 'BENEATH', 'THE', 'ALTAR'] +3528-168669-0077-954: ref=['THE', 'WORLD', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'THE', 'CROSS'] +3528-168669-0077-954: hyp=['THE', 'WORLD', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'THE', 'CROSS'] +3528-168669-0078-955: ref=['MARTIN', 'THE', 'ELEVENTH', 'GENERAL', 'OF', 'THE', 'CARTHUSIANS', 'GAVE', 'TO', 'HIS', 'ORDER', 'THIS', 'DEVICE', 'STAT', 'CRUX', 'DUM', 'VOLVITUR', 'ORBIS'] +3528-168669-0078-955: hyp=['MERTON', 'THE', 'ELEVENTH', 'GENERAL', 'OF', 'THE', 'CARTHUSIANS', 'GAVE', 'TO', 'HIS', 'ORDER', 'THIS', 'DEVICE', 'STAT', 'CREW', 'DOOMFUL', 'ORBIS'] +3528-168669-0079-956: ref=['THE', 'PRIORESS', 'WHO', 'WAS', 'USUALLY', 'SUBJECTED', 'TO', 'THE', 'BARRIER', 'OF', 'SILENCE', 'AND', 'WHOSE', 'RESERVOIR', 'WAS', 'OVERFULL', 'ROSE', 'AND', 'EXCLAIMED', 'WITH', 'THE', 'LOQUACITY', 'OF', 'A', 'DAM', 'WHICH', 'HAS', 'BROKEN', 'AWAY'] +3528-168669-0079-956: hyp=['THE', 'PIRRUS', 'WHO', 'WAS', 'USUALLY', 'SUBJECTED', 'TO', 'THE', 'BARRIER', 'OF', 'SILENCE', 'AND', 'WHOSE', 'RESERVOIR', 'WAS', 'OVER', 'FULL', 'ROSE', 'AND', 'EXCLAIMED', 'WITH', 'THE', 'LOQUACITY', 'OF', 'A', 'DAM', 'WHICH', 'HAS', 'BROKEN', 'AWAY'] +3528-168669-0080-957: ref=['I', 'HAVE', 'ON', 'MY', 'RIGHT', 'BENOIT', 'AND', 'ON', 'MY', 'LEFT', 'BERNARD', 'WHO', 'WAS', 'BERNARD'] +3528-168669-0080-957: hyp=['I', 'HAVE', 'ON', 'MY', 'RIGHT', 'BENOIS', 'AND', 'ALL', 'MY', 'LEFT', 'BERNARD', 'WHO', 'WAS', 'BERNARD'] +3528-168669-0081-958: ref=['THE', 'FIRST', 'ABBOT', 'OF', 'CLAIRVAUX'] +3528-168669-0081-958: hyp=['THE', 'FIRST', 'ABBOT', 'OF', 'CLERVAL'] +3528-168669-0082-959: ref=['HIS', 'ORDER', 'HAS', 'PRODUCED', 'FORTY', 'POPES', 'TWO', 'HUNDRED', 'CARDINALS', 'FIFTY', 'PATRIARCHS', 'SIXTEEN', 'HUNDRED', 'ARCHBISHOPS', 'FOUR', 'THOUSAND', 'SIX', 'HUNDRED', 'BISHOPS', 'FOUR', 'EMPERORS', 'TWELVE', 'EMPRESSES', 'FORTY', 'SIX', 'KINGS', 'FORTY', 'ONE', 'QUEENS', 'THREE', 'THOUSAND', 'SIX', 'HUNDRED', 'CANONIZED', 'SAINTS', 'AND', 'HAS', 'BEEN', 'IN', 'EXISTENCE', 'FOR', 'FOURTEEN', 'HUNDRED', 'YEARS'] +3528-168669-0082-959: hyp=['HIS', 'ORDER', 'HAS', 'PRODUCED', 'FORTY', 'POPES', 'TWO', 'HUNDRED', 'CARDINALS', 'FIFTY', 'PATRIARCHS', 'SIXTEEN', 'HUNDRED', 'ARCHBISHOPS', 'FOUR', 'THOUSAND', 'SIX', 'HUNDRED', 'BISHOPS', 'FOUR', 'EMPERORS', 'TWELVE', 'EMPRESSES', 'FORTY', 'SIX', 'KINGS', 'FORTY', 'ONE', 'QUEENS', 'THREE', 'THOUSAND', 'SIX', 'HUNDRED', 'CANNONIZED', 'SAINTS', 'AND', 'HAS', 'BEEN', 'IN', 'EXISTENCE', 'FOR', 'FOURTEEN', 'HUNDRED', 'YEARS'] +3528-168669-0083-960: ref=['ON', 'ONE', 'SIDE', 'SAINT', 'BERNARD', 'ON', 'THE', 'OTHER', 'THE', 'AGENT', 'OF', 'THE', 'SANITARY', 'DEPARTMENT'] +3528-168669-0083-960: hyp=['ON', 'ONE', 'SIDE', 'SAINT', 'BERNARD', 'ON', 'THE', 'OTHER', 'THE', 'AGENT', 'OF', 'THE', 'SENATORY', 'DEPARTMENT'] +3528-168669-0084-961: ref=['GOD', 'SUBORDINATED', 'TO', 'THE', 'COMMISSARY', 'OF', 'POLICE', 'SUCH', 'IS', 'THE', 'AGE', 'SILENCE', 'FAUVENT'] +3528-168669-0084-961: hyp=['GOD', 'SUBORDINATED', 'TO', 'THE', 'COMMISSARY', 'OF', 'POLICE', 'SUCH', 'AS', 'THE', 'AGE', 'SILENCE', 'FOR', 'VAUGHAN'] +3528-168669-0085-962: ref=['NO', 'ONE', 'DOUBTS', 'THE', 'RIGHT', 'OF', 'THE', 'MONASTERY', 'TO', 'SEPULTURE'] +3528-168669-0085-962: hyp=['NO', 'ONE', 'DOUBTS', 'THE', 'RIGHT', 'OF', 'THE', 'MONASTERY', 'TO', 'SEPULTURE'] +3528-168669-0086-963: ref=['ONLY', 'FANATICS', 'AND', 'THOSE', 'IN', 'ERROR', 'DENY', 'IT'] +3528-168669-0086-963: hyp=['ONLY', 'FANATICS', 'AND', 'THOSE', 'IN', 'ERROR', 'DENY', 'IT'] +3528-168669-0087-964: ref=['WE', 'LIVE', 'IN', 'TIMES', 'OF', 'TERRIBLE', 'CONFUSION'] +3528-168669-0087-964: hyp=['WE', 'LIVE', 'IN', 'TIMES', 'OF', 'TERRIBLE', 'CONFUSION'] +3528-168669-0088-965: ref=['WE', 'ARE', 'IGNORANT', 'AND', 'IMPIOUS'] +3528-168669-0088-965: hyp=['WE', 'ARE', 'IGNORANT', 'AND', 'IMPIOUS'] +3528-168669-0089-966: ref=['AND', 'THEN', 'RELIGION', 'IS', 'ATTACKED', 'WHY'] +3528-168669-0089-966: hyp=['AND', 'THEN', 'RELIGION', 'IS', 'ATTACKED', 'WHY'] +3528-168669-0090-967: ref=['BECAUSE', 'THERE', 'HAVE', 'BEEN', 'BAD', 'PRIESTS', 'BECAUSE', 'SAGITTAIRE', 'BISHOP', 'OF', 'GAP', 'WAS', 'THE', 'BROTHER', 'OF', 'SALONE', 'BISHOP', 'OF', 'EMBRUN', 'AND', 'BECAUSE', 'BOTH', 'OF', 'THEM', 'FOLLOWED', 'MOMMOL'] +3528-168669-0090-967: hyp=['BECAUSE', 'THERE', 'HAVE', 'BEEN', 'BAD', 'PRIESTS', 'BECAUSE', 'SAGOTARA', 'BISHOP', 'OF', 'GAP', 'WAS', 'A', 'BROTHER', 'OF', 'SALON', 'BISHOP', 'OF', 'AMBRON', 'AND', 'BECAUSE', 'BOTH', 'OF', 'THEM', 'FOLLOWED', 'MAMMA'] +3528-168669-0091-968: ref=['THEY', 'PERSECUTE', 'THE', 'SAINTS'] +3528-168669-0091-968: hyp=['THEY', 'PERSECUTE', 'THE', 'SAINTS'] +3528-168669-0092-969: ref=['THEY', 'SHUT', 'THEIR', 'EYES', 'TO', 'THE', 'TRUTH', 'DARKNESS', 'IS', 'THE', 'RULE'] +3528-168669-0092-969: hyp=['THEY', 'SHUT', 'THEIR', 'EYES', 'TO', 'THE', 'TRUTH', 'DARKNESS', 'IS', 'THE', 'RULE'] +3528-168669-0093-970: ref=['THE', 'MOST', 'FEROCIOUS', 'BEASTS', 'ARE', 'BEASTS', 'WHICH', 'ARE', 'BLIND'] +3528-168669-0093-970: hyp=['THE', 'MOST', 'FEROCIOUS', 'BEASTS', 'ARE', 'BEASTS', 'WHICH', 'ARE', 'BLIND'] +3528-168669-0094-971: ref=['OH', 'HOW', 'WICKED', 'PEOPLE', 'ARE'] +3528-168669-0094-971: hyp=['OH', 'HOW', 'WICKED', 'PEOPLE', 'ARE'] +3528-168669-0095-972: ref=['BY', 'ORDER', 'OF', 'THE', 'KING', 'SIGNIFIES', 'TO', 'DAY', 'BY', 'ORDER', 'OF', 'THE', 'REVOLUTION'] +3528-168669-0095-972: hyp=['BY', 'ORDER', 'OF', 'THE', 'KING', 'SIGNIFIES', 'TO', 'DAY', 'BY', 'ORDER', 'OF', 'THE', 'REVOLUTION'] +3528-168669-0096-973: ref=['ONE', 'NO', 'LONGER', 'KNOWS', 'WHAT', 'IS', 'DUE', 'TO', 'THE', 'LIVING', 'OR', 'TO', 'THE', 'DEAD', 'A', 'HOLY', 'DEATH', 'IS', 'PROHIBITED'] +3528-168669-0096-973: hyp=['ONE', 'NO', 'LONGER', 'KNOWS', 'WHAT', 'IS', 'DUE', 'TO', 'THE', 'LIVING', 'OR', 'TO', 'THE', 'DEAD', 'A', 'HOLY', 'DEATH', 'IS', 'PROHIBITED'] +3528-168669-0097-974: ref=['GAUTHIER', 'BISHOP', 'OF', 'CHALONS', 'HELD', 'HIS', 'OWN', 'IN', 'THIS', 'MATTER', 'AGAINST', 'OTHO', 'DUKE', 'OF', 'BURGUNDY'] +3528-168669-0097-974: hyp=['GATHIERRE', 'BISHOP', 'OF', 'CHALON', 'HELD', 'HIS', 'OWN', 'IN', 'THIS', 'MATTER', 'AGAINST', 'OTHO', 'DUKE', 'OF', 'BURGUNDY'] +3528-168669-0098-975: ref=['THE', 'PRIORESS', 'TOOK', 'BREATH', 'THEN', 'TURNED', 'TO', 'FAUCHELEVENT'] +3528-168669-0098-975: hyp=['THE', 'PRIESTS', 'TOOK', 'BREATH', 'THEN', 'TURNED', 'TO', 'FAUCHELEVENT'] +3528-168669-0099-976: ref=['YOU', 'WILL', 'CLOSE', 'THE', 'COFFIN', 'THE', 'SISTERS', 'WILL', 'CARRY', 'IT', 'TO', 'THE', 'CHAPEL'] +3528-168669-0099-976: hyp=['YOU', 'WILL', 'CLOSE', 'THE', 'COFFIN', 'THE', 'SISTERS', 'WILL', 'CARRY', 'IT', 'TO', 'THE', 'CHAPEL'] +3528-168669-0100-977: ref=['THE', 'OFFICE', 'FOR', 'THE', 'DEAD', 'WILL', 'THEN', 'BE', 'SAID'] +3528-168669-0100-977: hyp=['THE', 'OFFICE', 'FOR', 'THE', 'DEAD', 'WILL', 'THEN', 'BE', 'SET'] +3528-168669-0101-978: ref=['BUT', 'SHE', 'WILL', 'HEAR', 'SHE', 'WILL', 'NOT', 'LISTEN'] +3528-168669-0101-978: hyp=['BUT', 'SHE', 'WILL', 'HEAR', 'SHE', 'WILL', 'NOT', 'LISTEN'] +3528-168669-0102-979: ref=['BESIDES', 'WHAT', 'THE', 'CLOISTER', 'KNOWS', 'THE', 'WORLD', 'LEARNS', 'NOT'] +3528-168669-0102-979: hyp=['BESIDES', 'WHAT', 'THE', 'CLOSER', 'KNOWS', 'THE', 'WORLD', 'LEARNS', 'NOT'] +3528-168669-0103-980: ref=['A', 'PAUSE', 'ENSUED'] +3528-168669-0103-980: hyp=['A', 'PAUSE', 'IN', 'SUIT'] +3528-168669-0104-981: ref=['YOU', 'WILL', 'REMOVE', 'YOUR', 'BELL'] +3528-168669-0104-981: hyp=['YOU', 'WILL', 'REMOVE', 'YOUR', 'BELT'] +3528-168669-0105-982: ref=['HAS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'PAID', 'HIS', 'VISIT'] +3528-168669-0105-982: hyp=['HAS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'PAID', 'HIS', 'VISIT'] +3528-168669-0106-983: ref=['HE', 'WILL', 'PAY', 'IT', 'AT', 'FOUR', "O'CLOCK", 'TO', 'DAY'] +3528-168669-0106-983: hyp=['HE', 'WILL', 'PAY', 'IT', 'AT', 'FOUR', "O'CLOCK", 'TO', 'DAY'] +3528-168669-0107-984: ref=['THE', 'PEAL', 'WHICH', 'ORDERS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEAD', 'TO', 'BE', 'SUMMONED', 'HAS', 'ALREADY', 'BEEN', 'RUNG'] +3528-168669-0107-984: hyp=['THE', 'PEAL', 'WHICH', 'ORDERS', 'THE', 'DOCTOR', 'FOR', 'THE', 'DEBT', 'TO', 'BE', 'SUMMONED', 'HAS', 'ALREADY', 'BEEN', 'RUN'] +3528-168669-0108-985: ref=['BUT', 'YOU', 'DO', 'NOT', 'UNDERSTAND', 'ANY', 'OF', 'THE', 'PEALS'] +3528-168669-0108-985: hyp=['BUT', 'YOU', 'DO', 'NOT', 'UNDERSTAND', 'ANY', 'OF', 'THE', 'PEALS'] +3528-168669-0109-986: ref=['THAT', 'IS', 'WELL', 'FATHER', 'FAUVENT'] +3528-168669-0109-986: hyp=['THAT', 'IS', 'WELL', 'FATHER', 'VENT'] +3528-168669-0110-987: ref=['WHERE', 'WILL', 'YOU', 'OBTAIN', 'IT'] +3528-168669-0110-987: hyp=['WHERE', 'WILL', 'YOU', 'OBTAIN', 'IT'] +3528-168669-0111-988: ref=['I', 'HAVE', 'MY', 'HEAP', 'OF', 'OLD', 'IRON', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN'] +3528-168669-0111-988: hyp=['I', 'HAVE', 'MY', 'HEAP', 'OF', 'OLD', 'IRON', 'AT', 'THE', 'BOTTOM', 'OF', 'THE', 'GARDEN'] +3528-168669-0112-989: ref=['REVEREND', 'MOTHER', 'WHAT'] +3528-168669-0112-989: hyp=['REVERE', 'MOTHER', 'WHAT'] +3528-168669-0113-990: ref=['IF', 'YOU', 'WERE', 'EVER', 'TO', 'HAVE', 'ANY', 'OTHER', 'JOBS', 'OF', 'THIS', 'SORT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'MAN', 'FOR', 'YOU', 'A', 'PERFECT', 'TURK'] +3528-168669-0113-990: hyp=['IF', 'YOU', 'WERE', 'EVER', 'TO', 'HAVE', 'ANY', 'OTHER', 'JOBS', 'OF', 'THIS', 'SORT', 'MY', 'BROTHER', 'IS', 'THE', 'STRONG', 'MAN', 'FOR', 'YOU', 'A', 'PERFECT', 'TURK'] +3528-168669-0114-991: ref=['YOU', 'WILL', 'DO', 'IT', 'AS', 'SPEEDILY', 'AS', 'POSSIBLE'] +3528-168669-0114-991: hyp=['YOU', 'WILL', 'DO', 'IT', 'AS', 'SPEEDILY', 'AS', 'POSSIBLE'] +3528-168669-0115-992: ref=['I', 'CANNOT', 'WORK', 'VERY', 'FAST', 'I', 'AM', 'INFIRM', 'THAT', 'IS', 'WHY', 'I', 'REQUIRE', 'AN', 'ASSISTANT', 'I', 'LIMP'] +3528-168669-0115-992: hyp=['I', 'CANNOT', 'WORK', 'VERY', 'FAST', 'I', 'AM', 'INFIRM', 'THAT', 'IS', 'WHY', 'I', 'REQUIRE', 'AN', 'ASSISTANT', 'I', 'LIMP'] +3528-168669-0116-993: ref=['EVERYTHING', 'MUST', 'HAVE', 'BEEN', 'COMPLETED', 'A', 'GOOD', 'QUARTER', 'OF', 'AN', 'HOUR', 'BEFORE', 'THAT'] +3528-168669-0116-993: hyp=['EVERYTHING', 'MUST', 'HAVE', 'BEEN', 'COMPLETED', 'A', 'GOOD', 'QUARTER', 'OF', 'AN', 'HOUR', 'BEFORE', 'THAT'] +3528-168669-0117-994: ref=['I', 'WILL', 'DO', 'ANYTHING', 'TO', 'PROVE', 'MY', 'ZEAL', 'TOWARDS', 'THE', 'COMMUNITY', 'THESE', 'ARE', 'MY', 'ORDERS', 'I', 'AM', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN'] +3528-168669-0117-994: hyp=['I', 'WILL', 'DO', 'ANYTHING', 'TO', 'PROVE', 'MY', 'ZEAL', 'TOWARDS', 'THE', 'COMMUNITY', 'THESE', 'ARE', 'MY', 'ORDERS', 'I', 'AM', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN'] +3528-168669-0118-995: ref=['AT', 'ELEVEN', "O'CLOCK", 'EXACTLY', 'I', 'AM', 'TO', 'BE', 'IN', 'THE', 'CHAPEL'] +3528-168669-0118-995: hyp=['AT', 'ELEVEN', "O'CLOCK", 'EXACTLY', 'I', 'AM', 'TO', 'BE', 'IN', 'THE', 'CHAPEL'] +3528-168669-0119-996: ref=['MOTHER', 'ASCENSION', 'WILL', 'BE', 'THERE', 'TWO', 'MEN', 'WOULD', 'BE', 'BETTER'] +3528-168669-0119-996: hyp=['MOTHER', 'ASCENSION', 'WILL', 'BE', 'THERE', 'TWO', 'MEN', 'WOULD', 'BE', 'BETTER'] +3528-168669-0120-997: ref=['HOWEVER', 'NEVER', 'MIND', 'I', 'SHALL', 'HAVE', 'MY', 'LEVER'] +3528-168669-0120-997: hyp=['HOWEVER', 'NEVER', 'MIND', 'I', 'SHALL', 'HAVE', 'MY', 'LOVER'] +3528-168669-0121-998: ref=['AFTER', 'WHICH', 'THERE', 'WILL', 'BE', 'NO', 'TRACE', 'OF', 'ANYTHING'] +3528-168669-0121-998: hyp=['AFTER', 'WHICH', 'THERE', 'WILL', 'BE', 'NO', 'TRACE', 'OF', 'ANYTHING'] +3528-168669-0122-999: ref=['THE', 'GOVERNMENT', 'WILL', 'HAVE', 'NO', 'SUSPICION'] +3528-168669-0122-999: hyp=['THE', 'GOVERNMENT', 'WILL', 'HAVE', 'NO', 'SUSPICION'] +3528-168669-0123-1000: ref=['THE', 'EMPTY', 'COFFIN', 'REMAINS', 'THIS', 'PRODUCED', 'A', 'PAUSE'] +3528-168669-0123-1000: hyp=['THE', 'EMPTY', 'COFFIN', 'REMAINS', 'THIS', 'PRODUCED', 'A', 'PULSE'] +3528-168669-0124-1001: ref=['WHAT', 'IS', 'TO', 'BE', 'DONE', 'WITH', 'THAT', 'COFFIN', 'FATHER', 'FAUVENT'] +3528-168669-0124-1001: hyp=['WHAT', 'IS', 'TO', 'BE', 'DONE', 'WITH', 'THAT', 'COFFIN', 'FATHER', 'PREVENT'] +3528-168669-0125-1002: ref=['IT', 'WILL', 'BE', 'GIVEN', 'TO', 'THE', 'EARTH', 'EMPTY'] +3528-168669-0125-1002: hyp=['IT', 'WILL', 'BE', 'GIVEN', 'TO', 'THE', 'EARTH', 'EMPTY'] +3528-168669-0126-1003: ref=['AH', 'THE', 'DE', 'EXCLAIMED', 'FAUCHELEVENT'] +3528-168669-0126-1003: hyp=['AH', 'LIDA', 'EXCLAIMED', 'FAUCHELEVENT'] +3528-168669-0127-1004: ref=['THE', 'VIL', 'STUCK', 'FAST', 'IN', 'HIS', 'THROAT'] +3528-168669-0127-1004: hyp=['THE', 'VILLE', 'STUCK', 'FAST', 'IN', 'HIS', 'THROAT'] +3528-168669-0128-1005: ref=['HE', 'MADE', 'HASTE', 'TO', 'IMPROVISE', 'AN', 'EXPEDIENT', 'TO', 'MAKE', 'HER', 'FORGET', 'THE', 'OATH'] +3528-168669-0128-1005: hyp=['HE', 'MADE', 'HASTE', 'TO', 'IMPROVISE', 'AN', 'EXPEDIENT', 'TO', 'MAKE', 'HER', 'FORGET', 'THE', 'OATH'] +3528-168669-0129-1006: ref=['I', 'WILL', 'PUT', 'EARTH', 'IN', 'THE', 'COFFIN', 'REVEREND', 'MOTHER', 'THAT', 'WILL', 'PRODUCE', 'THE', 'EFFECT', 'OF', 'A', 'CORPSE'] +3528-168669-0129-1006: hyp=['I', 'WILL', 'PUT', 'EARTH', 'IN', 'THE', 'COFFIN', 'REVERED', 'MOTHER', 'THAT', 'WILL', 'PRODUCE', 'THE', 'EFFECT', 'OF', 'A', 'CORPSE'] +3528-168669-0130-1007: ref=['I', 'WILL', 'MAKE', 'THAT', 'MY', 'SPECIAL', 'BUSINESS'] +3528-168669-0130-1007: hyp=['I', 'WILL', 'MAKE', 'THAT', 'MY', 'SPECIAL', 'BUSINESS'] +3538-142836-0000-1567: ref=['GENERAL', 'OBSERVATIONS', 'ON', 'PRESERVES', 'CONFECTIONARY', 'ICES', 'AND', 'DESSERT', 'DISHES'] +3538-142836-0000-1567: hyp=['JOE', 'OBSERVATIONS', 'ON', 'PRESERVES', 'CONFECTIONARY', 'ICES', 'AND', 'DESSERT', 'DISHES'] +3538-142836-0001-1568: ref=['THE', 'EXPENSE', 'OF', 'PRESERVING', 'THEM', 'WITH', 'SUGAR', 'IS', 'A', 'SERIOUS', 'OBJECTION', 'FOR', 'EXCEPT', 'THE', 'SUGAR', 'IS', 'USED', 'IN', 'CONSIDERABLE', 'QUANTITIES', 'THE', 'SUCCESS', 'IS', 'VERY', 'UNCERTAIN'] +3538-142836-0001-1568: hyp=['THE', 'EXPENSE', 'OF', 'PRESERVING', 'THEM', 'WITH', 'SUGAR', 'IS', 'A', 'SERIOUS', 'OBJECTION', 'FOR', 'EXCEPT', 'A', 'SUGAR', 'IS', 'USED', 'IN', 'CONSIDERABLE', 'QUALITIES', 'THE', 'SUCCESS', 'IS', 'VERY', 'UNCERTAIN'] +3538-142836-0002-1569: ref=['FRUIT', 'GATHERED', 'IN', 'WET', 'OR', 'FOGGY', 'WEATHER', 'WILL', 'SOON', 'BE', 'MILDEWED', 'AND', 'BE', 'OF', 'NO', 'SERVICE', 'FOR', 'PRESERVES'] +3538-142836-0002-1569: hyp=['FRUIT', 'GATHERED', 'IN', 'WET', 'OR', 'FOGGY', 'WEATHER', 'WILL', 'SOON', 'BE', 'MILDED', 'AND', 'BE', 'OF', 'NO', 'SERVICE', 'FOR', 'PRESERVES'] +3538-142836-0003-1570: ref=['BUT', 'TO', 'DISTINGUISH', 'THESE', 'PROPERLY', 'REQUIRES', 'VERY', 'GREAT', 'ATTENTION', 'AND', 'CONSIDERABLE', 'EXPERIENCE'] +3538-142836-0003-1570: hyp=['BUT', 'TO', 'DISTINGUISH', 'HIS', 'PROPER', 'REQUIRES', 'VERY', 'GREAT', 'ATTENTION', 'AND', 'CONSIDERABLE', 'EXPERIENCE'] +3538-142836-0004-1571: ref=['IF', 'YOU', 'DIP', 'THE', 'FINGER', 'INTO', 'THE', 'SYRUP', 'AND', 'APPLY', 'IT', 'TO', 'THE', 'THUMB', 'THE', 'TENACITY', 'OF', 'THE', 'SYRUP', 'WILL', 'ON', 'SEPARATING', 'THE', 'FINGER', 'AND', 'THUMB', 'AFFORD', 'A', 'THREAD', 'WHICH', 'SHORTLY', 'BREAKS', 'THIS', 'IS', 'THE', 'LITTLE', 'THREAD'] +3538-142836-0004-1571: hyp=['IF', 'YOU', 'DIP', 'THE', 'FINGER', 'INTO', 'THE', 'SERF', 'AND', 'APPLY', 'IT', 'TO', 'THE', 'THUMB', 'THE', 'TENACITY', 'OF', 'THE', 'SERF', 'WILL', 'ON', 'SEPARATING', 'THE', 'FINGER', 'AND', 'THUMB', 'AFFORD', 'A', 'THREAD', 'WHICH', 'SHORTLY', 'BREAKS', 'THIS', 'IS', 'THE', 'LITTLE', 'THREAD'] +3538-142836-0005-1572: ref=['LET', 'IT', 'BOIL', 'UP', 'AGAIN', 'THEN', 'TAKE', 'IT', 'OFF', 'AND', 'REMOVE', 'CAREFULLY', 'THE', 'SCUM', 'THAT', 'HAS', 'RISEN'] +3538-142836-0005-1572: hyp=['LET', 'IT', 'BOIL', 'UP', 'AGAIN', 'THEN', 'TAKE', 'IT', 'OFF', 'AND', 'REMOVE', 'CAREFULLY', 'THE', 'SCUM', 'THAT', 'HAS', 'RISEN'] +3538-142836-0006-1573: ref=['IT', 'IS', 'CONSIDERED', 'TO', 'BE', 'SUFFICIENTLY', 'BOILED', 'WHEN', 'SOME', 'TAKEN', 'UP', 'IN', 'A', 'SPOON', 'POURS', 'OUT', 'LIKE', 'OIL'] +3538-142836-0006-1573: hyp=['IT', 'IS', 'CONSIDERED', 'TO', 'BE', 'SUFFICIENTLY', 'BOILED', 'WHEN', 'SOME', 'TAKEN', 'UP', 'IN', 'A', 'SPOON', 'POURS', 'OUT', 'LIKE', 'OIL'] +3538-142836-0007-1574: ref=['BEFORE', 'SUGAR', 'WAS', 'IN', 'USE', 'HONEY', 'WAS', 'EMPLOYED', 'TO', 'PRESERVE', 'MANY', 'VEGETABLE', 'PRODUCTIONS', 'THOUGH', 'THIS', 'SUBSTANCE', 'HAS', 'NOW', 'GIVEN', 'WAY', 'TO', 'THE', 'JUICE', 'OF', 'THE', 'SUGAR', 'CANE'] +3538-142836-0007-1574: hyp=['BEFORE', 'SUGAR', 'WAS', 'IN', 'USE', 'HONEY', 'WAS', 'EMPLOYED', 'TO', 'PRESERVE', 'MANY', 'VEGETABLE', 'PRODUCTIONS', 'THOUGH', 'THIS', 'SUBSTANCE', 'IS', 'NOW', 'GIVEN', 'WAY', 'TO', 'THE', 'JUICE', 'OF', 'THE', 'SUGAR', 'CANE'] +3538-142836-0008-1575: ref=['FOURTEEN', 'NINETY', 'NINE'] +3538-142836-0008-1575: hyp=['FOURTEEN', 'NINETY', 'NINE'] +3538-142836-0009-1576: ref=['BOIL', 'THEM', 'UP', 'THREE', 'DAYS', 'SUCCESSIVELY', 'SKIMMING', 'EACH', 'TIME', 'AND', 'THEY', 'WILL', 'THEN', 'BE', 'FINISHED', 'AND', 'IN', 'A', 'STATE', 'FIT', 'TO', 'BE', 'PUT', 'INTO', 'POTS', 'FOR', 'USE'] +3538-142836-0009-1576: hyp=['BOIL', 'THEM', 'UP', 'THREE', 'DAYS', 'SUCCESSIVELY', 'SKIMMING', 'EACH', 'TIME', 'AND', 'THEY', 'WILL', 'THEN', 'BE', 'FINISHED', 'AND', 'IN', 'A', 'STATE', 'FIT', 'TO', 'BE', 'PUT', 'INTO', 'POTS', 'FOR', 'USE'] +3538-142836-0010-1577: ref=['THE', 'REASON', 'WHY', 'THE', 'FRUIT', 'IS', 'EMPTIED', 'OUT', 'OF', 'THE', 'PRESERVING', 'PAN', 'INTO', 'AN', 'EARTHEN', 'PAN', 'IS', 'THAT', 'THE', 'ACID', 'OF', 'THE', 'FRUIT', 'ACTS', 'UPON', 'THE', 'COPPER', 'OF', 'WHICH', 'THE', 'PRESERVING', 'PANS', 'ARE', 'USUALLY', 'MADE'] +3538-142836-0010-1577: hyp=['THE', 'REASON', 'WHY', 'THE', 'FRUIT', 'IS', 'EMPTIED', 'OUT', 'OF', 'THE', 'PRESERVING', 'PAN', 'INTO', 'AN', 'EARTHEN', 'PAN', 'IS', 'THAT', 'THE', 'ACID', 'OF', 'THE', 'FRUIT', 'ACTS', 'UPON', 'THE', 'COPPER', 'OF', 'WHICH', 'THE', 'PRESERVING', 'PANS', 'ARE', 'USUALLY', 'MADE'] +3538-142836-0011-1578: ref=['FROM', 'THIS', 'EXAMPLE', 'THE', 'PROCESS', 'OF', 'PRESERVING', 'FRUITS', 'BY', 'SYRUP', 'WILL', 'BE', 'EASILY', 'COMPREHENDED'] +3538-142836-0011-1578: hyp=['FROM', 'THIS', 'EXAMPLE', 'THE', 'PROCESS', 'OF', 'PRESERVING', 'FRUITS', 'BY', 'SYRUP', 'WOULD', 'BE', 'EASILY', 'COMPREHENDED'] +3538-142836-0012-1579: ref=['THEY', 'SHOULD', 'BE', 'DRIED', 'IN', 'THE', 'STOVE', 'OR', 'OVEN', 'ON', 'A', 'SIEVE', 'AND', 'TURNED', 'EVERY', 'SIX', 'OR', 'EIGHT', 'HOURS', 'FRESH', 'POWDERED', 'SUGAR', 'BEING', 'SIFTED', 'OVER', 'THEM', 'EVERY', 'TIME', 'THEY', 'ARE', 'TURNED'] +3538-142836-0012-1579: hyp=['THIS', 'SHOULD', 'BE', 'DRIED', 'IN', 'THE', 'STOVE', 'OR', 'OVEN', 'ON', 'A', 'SEA', 'AND', 'TURNED', 'EVERY', 'SIX', 'OR', 'EIGHT', 'HOURS', 'FRESH', 'PADDED', 'SUGAR', 'BEING', 'SIFTED', 'OVER', 'THEM', 'EVERY', 'TIME', 'THEY', 'ARE', 'TURNED'] +3538-142836-0013-1580: ref=['IN', 'THIS', 'WAY', 'IT', 'IS', 'ALSO', 'THAT', 'ORANGE', 'AND', 'LEMON', 'CHIPS', 'ARE', 'PRESERVED'] +3538-142836-0013-1580: hyp=['IN', 'THIS', 'WAY', 'IT', 'IS', 'ALSO', 'THAT', 'ORANGE', 'AND', 'LINENSHIPS', 'ARE', 'PRESERVED'] +3538-142836-0014-1581: ref=['MARMALADES', 'JAMS', 'AND', 'FRUIT', 'PASTES', 'ARE', 'OF', 'THE', 'SAME', 'NATURE', 'AND', 'ARE', 'NOW', 'IN', 'VERY', 'GENERAL', 'REQUEST'] +3538-142836-0014-1581: hyp=['MARMALADES', 'JAMS', 'AND', 'FRUIT', 'PACE', 'ARE', 'OF', 'THE', 'SAME', 'NATURE', 'AND', 'ARE', 'NOW', 'IN', 'VERY', 'GENERAL', 'QUEST'] +3538-142836-0015-1582: ref=['MARMALADES', 'AND', 'JAMS', 'DIFFER', 'LITTLE', 'FROM', 'EACH', 'OTHER', 'THEY', 'ARE', 'PRESERVES', 'OF', 'A', 'HALF', 'LIQUID', 'CONSISTENCY', 'MADE', 'BY', 'BOILING', 'THE', 'PULP', 'OF', 'FRUITS', 'AND', 'SOMETIMES', 'PART', 'OF', 'THE', 'RINDS', 'WITH', 'SUGAR'] +3538-142836-0015-1582: hyp=['MARVELL', 'EATS', 'AND', 'JAMES', 'DIFFER', 'LITTLE', 'FROM', 'EACH', 'OTHER', 'THEIR', 'PRESERVES', 'OF', 'HALF', 'LIQUID', 'CONSISTENCY', 'MADE', 'BY', 'BOILING', 'THE', 'PULP', 'OF', 'FRUITS', 'AND', 'SOMETIMES', 'PART', 'OF', 'THE', 'RHINES', 'WITH', 'SUGAR'] +3538-142836-0016-1583: ref=['THAT', 'THEY', 'MAY', 'KEEP', 'IT', 'IS', 'NECESSARY', 'NOT', 'TO', 'BE', 'SPARING', 'OF', 'SUGAR', 'FIFTEEN', 'O', 'THREE'] +3538-142836-0016-1583: hyp=['THAT', 'THEY', 'MAY', 'KEEP', 'IT', 'IS', 'NECESSARY', 'NOT', 'TO', 'BE', 'SPARING', 'OF', 'SUGAR', 'FIFTEEN', 'O', 'THREE'] +3538-142836-0017-1584: ref=['IN', 'ALL', 'THE', 'OPERATIONS', 'FOR', 'PRESERVE', 'MAKING', 'WHEN', 'THE', 'PRESERVING', 'PAN', 'IS', 'USED', 'IT', 'SHOULD', 'NOT', 'BE', 'PLACED', 'ON', 'THE', 'FIRE', 'BUT', 'ON', 'A', 'TRIVET', 'UNLESS', 'THE', 'JAM', 'IS', 'MADE', 'ON', 'A', 'HOT', 'PLATE', 'WHEN', 'THIS', 'IS', 'NOT', 'NECESSARY'] +3538-142836-0017-1584: hyp=['IN', 'ALL', 'THE', 'OPERATIONS', 'FOR', 'PRESERVE', 'MAKING', 'WHEN', 'THE', 'PRESERVING', 'PAN', 'IS', 'USED', 'IT', 'SHOULD', 'NOT', 'BE', 'PLACED', 'ON', 'THE', 'FIRE', 'BUT', 'ON', 'A', 'TRIBUT', 'UNLESS', 'THE', 'JAME', 'IS', 'MADE', 'ON', 'A', 'HOT', 'PLATE', 'WHEN', 'THIS', 'IS', 'NOT', 'NECESSARY'] +3538-142836-0018-1585: ref=['CONFECTIONARY', 'FIFTEEN', 'O', 'EIGHT'] +3538-142836-0018-1585: hyp=['CONFECTIONARY', 'FIFTEEN', 'O', 'EIGHT'] +3538-142836-0019-1586: ref=['IN', 'SPEAKING', 'OF', 'CONFECTIONARY', 'IT', 'SHOULD', 'BE', 'REMARKED', 'THAT', 'ALL', 'THE', 'VARIOUS', 'PREPARATIONS', 'ABOVE', 'NAMED', 'COME', 'STRICTLY', 'SPEAKING', 'UNDER', 'THAT', 'HEAD', 'FOR', 'THE', 'VARIOUS', 'FRUITS', 'FLOWERS', 'HERBS', 'ROOTS', 'AND', 'JUICES', 'WHICH', 'WHEN', 'BOILED', 'WITH', 'SUGAR', 'WERE', 'FORMERLY', 'EMPLOYED', 'IN', 'PHARMACY', 'AS', 'WELL', 'AS', 'FOR', 'SWEETMEATS', 'WERE', 'CALLED', 'CONFECTIONS', 'FROM', 'THE', 'LATIN', 'WORD', 'CONFICERE', 'TO', 'MAKE', 'UP', 'BUT', 'THE', 'TERM', 'CONFECTIONARY', 'EMBRACES', 'A', 'VERY', 'LARGE', 'CLASS', 'INDEED', 'OF', 'SWEET', 'FOOD', 'MANY', 'KINDS', 'OF', 'WHICH', 'SHOULD', 'NOT', 'BE', 'ATTEMPTED', 'IN', 'THE', 'ORDINARY', 'CUISINE'] +3538-142836-0019-1586: hyp=['IN', 'SPEAKING', 'OF', 'CONFECTIONARY', 'SHOULD', 'BE', 'REMARKED', 'THAT', 'ALL', 'THE', 'VARIOUS', 'PREPARATIONS', 'ABOVE', 'NAMED', 'COME', 'STRICTLY', 'SPEAKING', 'UNDER', 'THAT', 'HEAD', 'FOR', 'THE', 'VARIOUS', 'FRUITS', 'FLOWERS', 'HERBS', 'OR', 'SAUCES', 'WHICH', 'ONE', 'BOILED', 'WITH', 'SUGAR', 'WERE', 'FORMERLY', 'EMPLOYED', 'IN', 'PHARMACY', 'AS', 'WELL', 'AS', 'FOR', 'SWEETMEATS', 'WERE', 'CALLED', 'CONFECTIONS', 'FROM', 'THE', 'LATIN', 'WORD', 'CONFUSE', 'TO', 'MAKE', 'UP', 'BUT', 'THE', 'TERM', 'CONFECTIONARY', 'EMBRACES', 'A', 'VERY', 'LARGE', 'CLASS', 'INDEED', 'OF', 'SWEET', 'FOOD', 'MANY', 'KINDS', 'OF', 'WHICH', 'SHOULD', 'NOT', 'BE', 'ATTEMPTED', 'IN', 'THE', 'ORDINARY', 'COSEINE'] +3538-142836-0020-1587: ref=['THE', 'THOUSAND', 'AND', 'ONE', 'ORNAMENTAL', 'DISHES', 'THAT', 'ADORN', 'THE', 'TABLES', 'OF', 'THE', 'WEALTHY', 'SHOULD', 'BE', 'PURCHASED', 'FROM', 'THE', 'CONFECTIONER', 'THEY', 'CANNOT', 'PROFITABLY', 'BE', 'MADE', 'AT', 'HOME'] +3538-142836-0020-1587: hyp=['A', 'THOUSAND', 'AND', 'ONE', 'ORNAMENTAL', 'DISHES', 'THAT', 'ADORN', 'THE', 'TABLES', 'OF', 'THE', 'WEALTHY', 'SHOULD', 'BE', 'PURCHASED', 'FROM', 'THE', 'CONFECTIONER', 'THEY', 'CANNOT', 'PROFITABLY', 'BE', 'MADE', 'AT', 'HOME'] +3538-142836-0021-1588: ref=['HOWEVER', 'AS', 'LATE', 'AS', 'THE', 'REIGNS', 'OF', 'OUR', 'TWO', 'LAST', 'GEORGES', 'FABULOUS', 'SUMS', 'WERE', 'OFTEN', 'EXPENDED', 'UPON', 'FANCIFUL', 'DESSERTS'] +3538-142836-0021-1588: hyp=['HOWEVER', 'AS', 'LATE', 'AS', 'THE', 'REIGN', 'OF', 'OUR', 'TWO', 'LAST', 'GEORGES', 'FABULOUS', 'SUMS', 'WERE', 'OFTEN', 'EXPENDED', 'UPON', 'FANCIFUL', 'DESERTS'] +3538-142836-0022-1589: ref=['THE', 'SHAPE', 'OF', 'THE', 'DISHES', 'VARIES', 'AT', 'DIFFERENT', 'PERIODS', 'THE', 'PREVAILING', 'FASHION', 'AT', 'PRESENT', 'BEING', 'OVAL', 'AND', 'CIRCULAR', 'DISHES', 'ON', 'STEMS'] +3538-142836-0022-1589: hyp=['THE', 'SHAPE', 'OF', 'THE', 'DISH', 'IS', 'VARIES', 'AT', 'DIFFERENT', 'PERIODS', 'THE', 'PREVAILING', 'FASHION', 'AT', 'PRESENT', 'BEING', 'OVAL', 'AND', 'CIRCULAR', 'DISHES', 'ON', 'STEMS'] +3538-142836-0023-1590: ref=['ICES'] +3538-142836-0023-1590: hyp=['ISIS'] +3538-142836-0024-1591: ref=['AT', 'DESSERTS', 'OR', 'AT', 'SOME', 'EVENING', 'PARTIES', 'ICES', 'ARE', 'SCARCELY', 'TO', 'BE', 'DISPENSED', 'WITH'] +3538-142836-0024-1591: hyp=['I', 'DESERTS', 'OR', 'AT', 'SOME', 'EVENING', 'PARTIES', 'ICES', 'ARE', 'SCARCELY', 'DID', 'BE', 'DISPENSED', 'WITH'] +3538-142836-0025-1592: ref=['THE', 'SPADDLE', 'IS', 'GENERALLY', 'MADE', 'OF', 'COPPER', 'KEPT', 'BRIGHT', 'AND', 'CLEAN'] +3538-142836-0025-1592: hyp=['THE', 'SPATTLE', 'IS', 'GENERALLY', 'MADE', 'OF', 'COPPER', 'KEPT', 'BRIGHT', 'AND', 'CLEAN'] +3538-142836-0026-1593: ref=['THEY', 'SHOULD', 'BE', 'TAKEN', 'IMMEDIATELY', 'AFTER', 'THE', 'REPAST', 'OR', 'SOME', 'HOURS', 'AFTER', 'BECAUSE', 'THE', 'TAKING', 'THESE', 'SUBSTANCES', 'DURING', 'THE', 'PROCESS', 'OF', 'DIGESTION', 'IS', 'APT', 'TO', 'PROVOKE', 'INDISPOSITION'] +3538-142836-0026-1593: hyp=['THEY', 'SHOULD', 'BE', 'TAKEN', 'IMMEDIATELY', 'AFTER', 'THE', 'REPAST', 'OR', 'SOME', 'HOURS', 'AFTER', 'BECAUSE', 'THE', 'TAKING', 'OF', 'THESE', 'SUBSTANCES', 'DURING', 'THE', 'PROCESS', 'OF', 'DIGESTION', 'IS', 'APT', 'TO', 'PROVOKE', 'INDISPOSITION'] +3538-163619-0000-1500: ref=['THERE', 'WAS', 'ONCE', 'ON', 'A', 'TIME', 'A', 'WIDOWER', 'WHO', 'HAD', 'A', 'SON', 'AND', 'A', 'DAUGHTER', 'BY', 'HIS', 'FIRST', 'WIFE'] +3538-163619-0000-1500: hyp=['THERE', 'WAS', 'ONCE', 'ON', 'THE', 'TIME', 'A', 'WIDOWER', 'WHO', 'HAD', 'A', 'SUDDEN', 'AND', 'A', 'DAUGHTER', 'BY', 'HIS', 'FIRST', 'WIF'] +3538-163619-0001-1501: ref=['FROM', 'THE', 'VERY', 'DAY', 'THAT', 'THE', 'NEW', 'WIFE', 'CAME', 'INTO', 'THE', 'HOUSE', 'THERE', 'WAS', 'NO', 'PEACE', 'FOR', 'THE', "MAN'S", 'CHILDREN', 'AND', 'NOT', 'A', 'CORNER', 'TO', 'BE', 'FOUND', 'WHERE', 'THEY', 'COULD', 'GET', 'ANY', 'REST', 'SO', 'THE', 'BOY', 'THOUGHT', 'THAT', 'THE', 'BEST', 'THING', 'HE', 'COULD', 'DO', 'WAS', 'TO', 'GO', 'OUT', 'INTO', 'THE', 'WORLD', 'AND', 'TRY', 'TO', 'EARN', 'HIS', 'OWN', 'BREAD'] +3538-163619-0001-1501: hyp=['FROM', 'THE', 'VERY', 'DAY', 'THAT', 'THE', 'NEW', 'WIFE', 'CAME', 'INTO', 'THE', 'HOUSE', 'THERE', 'WAS', 'NO', 'PEACE', 'FOR', 'THE', "MAN'S", 'CHILDREN', 'AND', 'NOT', 'A', 'CORNER', 'TO', 'BE', 'FOUND', 'WHERE', 'THEY', 'COULD', 'GET', 'ANY', 'REST', 'SO', 'THE', 'BOY', 'THOUGHT', 'THAT', 'THE', 'BEST', 'THING', 'HE', 'COULD', 'DO', 'WAS', 'TO', 'GO', 'OUT', 'INTO', 'THE', 'WORLD', 'AND', 'TRY', 'TO', 'EARN', 'HIS', 'OWN', 'BREAD'] +3538-163619-0002-1502: ref=['BUT', 'HIS', 'SISTER', 'WHO', 'WAS', 'STILL', 'AT', 'HOME', 'FARED', 'WORSE', 'AND', 'WORSE'] +3538-163619-0002-1502: hyp=['BUT', 'HIS', 'SISTER', 'WHO', 'WAS', 'STILL', 'AT', 'HOME', 'FARED', 'WORSE', 'AND', 'WORSE'] +3538-163619-0003-1503: ref=['KISS', 'ME', 'GIRL', 'SAID', 'THE', 'HEAD'] +3538-163619-0003-1503: hyp=['KISS', 'ME', 'GO', 'SAID', 'THE', 'HEAD'] +3538-163619-0004-1504: ref=['WHEN', 'THE', 'KING', 'ENTERED', 'AND', 'SAW', 'IT', 'HE', 'STOOD', 'STILL', 'AS', 'IF', 'HE', 'WERE', 'IN', 'FETTERS', 'AND', 'COULD', 'NOT', 'STIR', 'FROM', 'THE', 'SPOT', 'FOR', 'THE', 'PICTURE', 'SEEMED', 'TO', 'HIM', 'SO', 'BEAUTIFUL'] +3538-163619-0004-1504: hyp=['WHEN', 'THE', 'KING', 'ENTERED', 'AND', 'SOUGHT', 'HE', 'STOOD', 'STILL', 'AS', 'IF', 'HE', 'WERE', 'IN', 'FETTERS', 'AND', 'COULD', 'NOT', 'STIR', 'FROM', 'THE', 'SPOT', 'FOR', 'THE', 'PICTURE', 'SEEMED', 'TO', 'HIM', 'SO', 'BEAUTIFUL'] +3538-163619-0005-1505: ref=['THE', 'YOUTH', 'PROMISED', 'TO', 'MAKE', 'ALL', 'THE', 'HASTE', 'HE', 'COULD', 'AND', 'SET', 'FORTH', 'FROM', 'THE', "KING'S", 'PALACE'] +3538-163619-0005-1505: hyp=['THESE', 'PROMISED', 'TO', 'MAKE', 'ALL', 'THE', 'HASTE', 'HE', 'COULD', 'AND', 'SET', 'FORTH', 'FROM', 'THE', "KING'S", 'PALACE'] +3538-163619-0006-1506: ref=['AT', 'LAST', 'THEY', 'CAME', 'IN', 'SIGHT', 'OF', 'LAND'] +3538-163619-0006-1506: hyp=['AT', 'LAST', 'THEY', 'CAME', 'IN', 'SIGHT', 'OF', 'LAND'] +3538-163619-0007-1507: ref=['WELL', 'IF', 'MY', 'BROTHER', 'SAYS', 'SO', 'I', 'MUST', 'DO', 'IT', 'SAID', 'THE', "MAN'S", 'DAUGHTER', 'AND', 'SHE', 'FLUNG', 'HER', 'CASKET', 'INTO', 'THE', 'SEA'] +3538-163619-0007-1507: hyp=['WELL', 'OF', 'MY', 'BROTHER', 'SAYS', 'SO', 'I', 'MUST', 'DO', 'IT', 'SAID', 'THE', "MAN'S", 'DAUGHTER', 'AND', 'SHE', 'FLUNG', 'HER', 'CASKET', 'INTO', 'THE', 'SEA'] +3538-163619-0008-1508: ref=['WHAT', 'IS', 'MY', 'BROTHER', 'SAYING', 'ASKED', 'HIS', 'SISTER', 'AGAIN'] +3538-163619-0008-1508: hyp=['WHAT', 'IS', 'MY', 'BROTHER', 'SAYING', 'ASKED', 'HIS', 'SISTER', 'AGAIN'] +3538-163619-0009-1509: ref=['ON', 'THE', 'FIRST', 'THURSDAY', 'NIGHT', 'AFTER', 'THIS', 'A', 'BEAUTIFUL', 'MAIDEN', 'CAME', 'INTO', 'THE', 'KITCHEN', 'OF', 'THE', 'PALACE', 'AND', 'BEGGED', 'THE', 'KITCHEN', 'MAID', 'WHO', 'SLEPT', 'THERE', 'TO', 'LEND', 'HER', 'A', 'BRUSH'] +3538-163619-0009-1509: hyp=['ON', 'THE', 'FIRST', 'THURSDAY', 'NIGHT', 'AFTER', 'THIS', 'A', 'BEAUTIFUL', 'MAIDEN', 'CAME', 'INTO', 'THE', 'KITCHEN', 'OF', 'THE', 'PALACE', 'AND', 'BEGGED', 'THE', 'KITCHEN', 'MAID', 'WHO', 'SLEPT', 'THERE', 'TO', 'LEND', 'HER', 'A', 'BRUSH'] +3538-163619-0010-1510: ref=['SHE', 'BEGGED', 'VERY', 'PRETTILY', 'AND', 'GOT', 'IT', 'AND', 'THEN', 'SHE', 'BRUSHED', 'HER', 'HAIR', 'AND', 'THE', 'GOLD', 'DROPPED', 'FROM', 'IT'] +3538-163619-0010-1510: hyp=['SHE', 'BEGGED', 'VERY', 'PRETTILY', 'AND', 'GOT', 'IT', 'AND', 'THEN', 'SHE', 'BRUSHED', 'HER', 'HAIR', 'AND', 'THE', 'GOLD', 'DROPPED', 'FROM', 'IT'] +3538-163619-0011-1511: ref=['OUT', 'ON', 'THEE', 'UGLY', 'BUSHY', 'BRIDE', 'SLEEPING', 'SO', 'SOFT', 'BY', 'THE', 'YOUNG', "KING'S", 'SIDE', 'ON', 'SAND', 'AND', 'STONES', 'MY', 'BED', 'I', 'MAKE', 'AND', 'MY', 'BROTHER', 'SLEEPS', 'WITH', 'THE', 'COLD', 'SNAKE', 'UNPITIED', 'AND', 'UNWEPT'] +3538-163619-0011-1511: hyp=['OUT', 'ON', 'ME', 'UGLY', 'BUSHY', 'BRIDE', 'SLEEPING', 'SO', 'SOFT', 'BY', 'THE', 'YOUNG', "KING'S", 'SIDE', 'ON', 'SAND', 'AND', 'STONES', 'MY', 'BED', 'I', 'MAKE', 'AND', 'MY', 'BROTHER', 'SLEEPS', 'WITH', 'THE', 'COLD', 'SNAKE', 'UNPITIED', 'AND', 'UNWEPT'] +3538-163619-0012-1512: ref=['I', 'SHALL', 'COME', 'TWICE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN', 'SAID', 'SHE'] +3538-163619-0012-1512: hyp=['I', 'SHALL', 'COME', 'TWICE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN', 'SAID', 'SHE'] +3538-163619-0013-1513: ref=['THIS', 'TIME', 'ALSO', 'AS', 'BEFORE', 'SHE', 'BORROWED', 'A', 'BRUSH', 'AND', 'BRUSHED', 'HER', 'HAIR', 'WITH', 'IT', 'AND', 'THE', 'GOLD', 'DROPPED', 'DOWN', 'AS', 'SHE', 'DID', 'IT', 'AND', 'AGAIN', 'SHE', 'SENT', 'THE', 'DOG', 'OUT', 'THREE', 'TIMES', 'AND', 'WHEN', 'DAY', 'DAWNED', 'SHE', 'DEPARTED', 'BUT', 'AS', 'SHE', 'WAS', 'GOING', 'SHE', 'SAID', 'AS', 'SHE', 'HAD', 'SAID', 'BEFORE', 'I', 'SHALL', 'COME', 'ONCE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN'] +3538-163619-0013-1513: hyp=['THIS', 'TIME', 'ALSO', 'AS', 'BEFORE', 'SHE', 'BORROWED', 'A', 'BRUSH', 'AND', 'BRUSHED', 'HER', 'HAIR', 'WITH', 'IT', 'AND', 'THE', 'GOLD', 'DROPPED', 'DOWN', 'AS', 'SHE', 'DID', 'IT', 'AND', 'AGAIN', 'SHE', 'SENT', 'THE', 'DOG', 'OUT', 'THREE', 'TIMES', 'AND', 'WHEN', 'DAY', 'DAWNED', 'SHE', 'DEPARTED', 'BUT', 'AS', 'SHE', 'WAS', 'GOING', 'SHE', 'SAID', 'AS', 'SHE', 'HAD', 'SAID', 'BEFORE', 'I', 'SHALL', 'COME', 'ONCE', 'MORE', 'AND', 'THEN', 'NEVER', 'AGAIN'] +3538-163619-0014-1514: ref=['NO', 'ONE', 'CAN', 'TELL', 'HOW', 'DELIGHTED', 'THE', 'KING', 'WAS', 'TO', 'GET', 'RID', 'OF', 'THAT', 'HIDEOUS', 'BUSHY', 'BRIDE', 'AND', 'GET', 'A', 'QUEEN', 'WHO', 'WAS', 'BRIGHT', 'AND', 'BEAUTIFUL', 'AS', 'DAY', 'ITSELF'] +3538-163619-0014-1514: hyp=['NO', 'ONE', 'CAN', 'TELL', 'HOW', 'DELIGHTED', 'THE', 'KING', 'WAS', 'TO', 'GET', 'RID', 'OF', 'THAT', 'HIDEOUS', 'BUSHY', 'BRIDE', 'AND', 'GET', 'A', 'QUEEN', 'WHO', 'WAS', 'BRIGHT', 'AND', 'BEAUTIFUL', 'AS', 'DAY', 'ITSELF'] +3538-163622-0000-1515: ref=['WILT', 'THOU', 'SERVE', 'ME', 'AND', 'WATCH', 'MY', 'SEVEN', 'FOALS', 'ASKED', 'THE', 'KING'] +3538-163622-0000-1515: hyp=['WILT', 'THOU', 'SERVE', 'ME', 'AND', 'WATCH', 'MY', 'SEVEN', 'FOOLS', 'ASKED', 'THE', 'KING'] +3538-163622-0001-1516: ref=['THE', 'YOUTH', 'THOUGHT', 'THAT', 'IT', 'WAS', 'VERY', 'EASY', 'WORK', 'TO', 'WATCH', 'THE', 'FOALS', 'AND', 'THAT', 'HE', 'COULD', 'DO', 'IT', 'WELL', 'ENOUGH'] +3538-163622-0001-1516: hyp=['THE', 'YOUTH', 'THOUGHT', 'IT', 'WAS', 'VERY', 'EASY', 'WORK', 'TO', 'WATCH', 'THE', 'FOALS', 'AND', 'THAT', 'HE', 'COULD', 'DO', 'IT', 'WELL', 'ENOUGH'] +3538-163622-0002-1517: ref=['HAST', 'THOU', 'WATCHED', 'FAITHFULLY', 'AND', 'WELL', 'THE', 'WHOLE', 'DAY', 'LONG', 'SAID', 'THE', 'KING', 'WHEN', 'THE', 'LAD', 'CAME', 'INTO', 'HIS', 'PRESENCE', 'IN', 'THE', 'EVENING'] +3538-163622-0002-1517: hyp=['HAST', 'THOU', 'WATCHED', 'FAITHFULLY', 'AND', 'WELL', 'BEHOLDAY', 'LONG', 'SAID', 'THE', 'KING', 'WHEN', 'THE', 'LAD', 'CAME', 'INTO', 'HIS', 'PRESENCE', 'IN', 'THE', 'EVENING'] +3538-163622-0003-1518: ref=['YES', 'THAT', 'I', 'HAVE', 'SAID', 'THE', 'YOUTH'] +3538-163622-0003-1518: hyp=['YES', 'THAT', 'I', 'HAVE', 'SAID', 'THE', 'YOUTH'] +3538-163622-0004-1519: ref=['HE', 'HAD', 'GONE', 'OUT', 'ONCE', 'TO', 'SEEK', 'A', 'PLACE', 'HE', 'SAID', 'BUT', 'NEVER', 'WOULD', 'HE', 'DO', 'SUCH', 'A', 'THING', 'AGAIN'] +3538-163622-0004-1519: hyp=['HE', 'HAD', 'GONE', 'AT', 'ONCE', 'TO', 'SEEK', 'A', 'PLACE', 'HE', 'SAID', 'BUT', 'NEVER', 'WOULD', 'HE', 'DO', 'SUCH', 'A', 'THING', 'AGAIN'] +3538-163622-0005-1520: ref=['THEN', 'THE', 'KING', 'PROMISED', 'HIM', 'THE', 'SAME', 'PUNISHMENT', 'AND', 'THE', 'SAME', 'REWARD', 'THAT', 'HE', 'HAD', 'PROMISED', 'HIS', 'BROTHER'] +3538-163622-0005-1520: hyp=['THEN', 'THE', 'KING', 'PROMISED', 'HIM', 'THE', 'SAME', 'PUNISHMENT', 'AND', 'THE', 'SAME', 'REWARD', 'THAT', 'HE', 'HAD', 'PROMISED', 'HIS', 'BROTHER'] +3538-163622-0006-1521: ref=['WHEN', 'HE', 'HAD', 'RUN', 'AFTER', 'THE', 'FOALS', 'FOR', 'A', 'LONG', 'LONG', 'TIME', 'AND', 'WAS', 'HOT', 'AND', 'TIRED', 'HE', 'PASSED', 'BY', 'A', 'CLEFT', 'IN', 'THE', 'ROCK', 'WHERE', 'AN', 'OLD', 'WOMAN', 'WAS', 'SITTING', 'SPINNING', 'WITH', 'A', 'DISTAFF', 'AND', 'SHE', 'CALLED', 'TO', 'HIM'] +3538-163622-0006-1521: hyp=['WHEN', 'HE', 'HAD', 'RUN', 'AFTER', 'THE', 'FALLS', 'FOR', 'A', 'LONG', 'LONG', 'TIME', 'AND', 'WAS', 'HOT', 'AND', 'TIRED', 'HE', 'PASSED', 'BY', 'CLEF', 'IN', 'THE', 'ROCK', 'WHERE', 'AN', 'OLD', 'WOMAN', 'WAS', 'SITTING', 'SPINNING', 'WITH', 'A', 'DISTAFF', 'AND', 'SHE', 'CALLED', 'TO', 'HIM'] +3538-163622-0007-1522: ref=['COME', 'HITHER', 'COME', 'HITHER', 'MY', 'HANDSOME', 'SON', 'AND', 'LET', 'ME', 'COMB', 'YOUR', 'HAIR'] +3538-163622-0007-1522: hyp=['COMMANDER', 'COME', 'HITHER', 'MY', 'HANDSOME', 'SON', 'AND', 'LET', 'ME', 'CALM', 'YOUR', 'HAIR'] +3538-163622-0008-1523: ref=['THE', 'YOUTH', 'LIKED', 'THE', 'THOUGHT', 'OF', 'THIS', 'LET', 'THE', 'FOALS', 'RUN', 'WHERE', 'THEY', 'CHOSE', 'AND', 'SEATED', 'HIMSELF', 'IN', 'THE', 'CLEFT', 'OF', 'THE', 'ROCK', 'BY', 'THE', 'SIDE', 'OF', 'THE', 'OLD', 'HAG'] +3538-163622-0008-1523: hyp=['THE', 'YOUTH', 'LIKED', 'THE', 'THOUGHT', 'OF', 'THIS', 'LET', 'THE', 'FOLDS', 'RUM', 'WHERE', 'THEY', 'CHOSE', 'AND', 'SEATED', 'HIMSELF', 'IN', 'THE', 'CLEFT', 'OF', 'THE', 'ROCK', 'BY', 'THE', 'SIDE', 'OF', 'THE', 'OLD', 'HAG'] +3538-163622-0009-1524: ref=['SO', 'THERE', 'HE', 'SAT', 'WITH', 'HIS', 'HEAD', 'ON', 'HER', 'LAP', 'TAKING', 'HIS', 'EASE', 'THE', 'LIVELONG', 'DAY'] +3538-163622-0009-1524: hyp=['SO', 'THERE', 'HE', 'SAT', 'WITH', 'HIS', 'HEAD', 'ON', 'HER', 'LAP', 'TAKING', 'HIS', 'EASE', 'THE', 'LIVE', 'LONG', 'DAY'] +3538-163622-0010-1525: ref=['ON', 'THE', 'THIRD', 'DAY', 'CINDERLAD', 'WANTED', 'TO', 'SET', 'OUT'] +3538-163622-0010-1525: hyp=['ON', 'THE', 'THIRD', 'DAY', 'SAID', 'THE', 'LAD', 'WANTED', 'TO', 'SET', 'OUT'] +3538-163622-0011-1526: ref=['THE', 'TWO', 'BROTHERS', 'LAUGHED', 'AT', 'HIM', 'AND', 'HIS', 'FATHER', 'AND', 'MOTHER', 'BEGGED', 'HIM', 'NOT', 'TO', 'GO', 'BUT', 'ALL', 'TO', 'NO', 'PURPOSE', 'AND', 'CINDERLAD', 'SET', 'OUT', 'ON', 'HIS', 'WAY'] +3538-163622-0011-1526: hyp=['THE', 'TWO', 'BROTHERS', 'LAUGHED', 'AT', 'HIM', 'AND', 'HIS', 'FATHER', 'AND', 'MOTHER', 'BEGGED', 'HIM', 'NOT', 'TO', 'GO', 'BUT', 'ALL', 'TO', 'NO', 'PURPOSE', 'AND', 'SINDERLAD', 'SET', 'OUT', 'ON', 'HIS', 'WAY'] +3538-163622-0012-1527: ref=['I', 'AM', 'WALKING', 'ABOUT', 'IN', 'SEARCH', 'OF', 'A', 'PLACE', 'SAID', 'CINDERLAD'] +3538-163622-0012-1527: hyp=['I', 'AM', 'WALKING', 'ABOUT', 'IN', 'SEARCH', 'OF', 'A', 'PLACE', 'SAID', 'CINDERLAD'] +3538-163622-0013-1528: ref=['I', 'WOULD', 'MUCH', 'RATHER', 'HAVE', 'THE', 'PRINCESS', 'SAID', 'CINDERLAD'] +3538-163622-0013-1528: hyp=['I', 'WOULD', 'MUCH', 'RATHER', 'HAVE', 'THE', 'PRINCESS', 'SAID', 'CINDER', 'LAD'] +3538-163622-0014-1529: ref=['AND', 'THUS', 'THEY', 'JOURNEYED', 'ONWARDS', 'A', 'LONG', 'LONG', 'WAY'] +3538-163622-0014-1529: hyp=['AND', 'THUS', 'THEY', 'JOURNEYED', 'ONWARDS', 'A', 'LONG', 'LONG', 'WAY'] +3538-163622-0015-1530: ref=['WHEN', 'THEY', 'HAD', 'GONE', 'THUS', 'FOR', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FOAL', 'AGAIN', 'ASKED', 'DOST', 'THOU', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0015-1530: hyp=['WHEN', 'THEY', 'HAD', 'GONE', 'THUS', 'FOR', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FULL', 'AGAIN', 'ASKED', 'DOST', 'THOU', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0016-1531: ref=['YES', 'NOW', 'I', 'SEE', 'SOMETHING', 'THAT', 'IS', 'WHITE', 'SAID', 'CINDERLAD'] +3538-163622-0016-1531: hyp=['OH', 'YES', 'NOW', 'I', 'SEE', 'SOMETHING', 'THAT', 'IS', 'WHITE', 'SAID', 'CINDERLAD'] +3538-163622-0017-1532: ref=['IT', 'LOOKS', 'LIKE', 'THE', 'TRUNK', 'OF', 'A', 'GREAT', 'THICK', 'BIRCH', 'TREE'] +3538-163622-0017-1532: hyp=['IT', 'LOOKS', 'LIKE', 'THE', 'TRUNK', 'OF', 'A', 'GREAT', 'THICK', 'BIRCH', 'TREE'] +3538-163622-0018-1533: ref=['CINDERLAD', 'TRIED', 'BUT', 'COULD', 'NOT', 'DO', 'IT', 'SO', 'HE', 'HAD', 'TO', 'TAKE', 'A', 'DRAUGHT', 'FROM', 'THE', 'PITCHER', 'AND', 'THEN', 'ONE', 'MORE', 'AND', 'AFTER', 'THAT', 'STILL', 'ANOTHER', 'AND', 'THEN', 'HE', 'WAS', 'ABLE', 'TO', 'WIELD', 'THE', 'SWORD', 'WITH', 'PERFECT', 'EASE'] +3538-163622-0018-1533: hyp=['CINDER', 'LAD', 'TRIED', 'BUT', 'COULD', 'NOT', 'DO', 'IT', 'SO', 'HE', 'HAD', 'TO', 'TAKE', 'A', 'DRAUGHT', 'FROM', 'THE', 'PITCHER', 'AND', 'THEN', 'ONE', 'MORE', 'AND', 'AFTER', 'THAT', 'STILL', 'ANOTHER', 'AND', 'THEN', 'HE', 'WAS', 'ABLE', 'TO', 'WIELD', 'THE', 'SWORD', 'WITH', 'PERFECT', 'EASE'] +3538-163622-0019-1534: ref=['FOR', 'WE', 'ARE', 'BROTHERS', 'OF', 'THE', 'PRINCESS', 'WHOM', 'THOU', 'ART', 'TO', 'HAVE', 'WHEN', 'THOU', 'CANST', 'TELL', 'THE', 'KING', 'WHAT', 'WE', 'EAT', 'AND', 'DRINK', 'BUT', 'THERE', 'IS', 'A', 'MIGHTY', 'TROLL', 'WHO', 'HAS', 'CAST', 'A', 'SPELL', 'OVER', 'US'] +3538-163622-0019-1534: hyp=['FOR', 'WE', 'ARE', 'BROTHERS', 'OF', 'THE', 'PRINCESS', 'WHOM', 'THOU', 'ART', 'TO', 'HAVE', 'WHEN', 'THOU', 'CANST', 'TELL', 'THE', 'KING', 'WHAT', 'WE', 'EAT', 'AND', 'DRINK', 'BUT', 'THERE', 'IS', 'A', 'MIGHTY', 'TROLL', 'WHO', 'HAS', 'CAST', 'A', 'SPELL', 'OVER', 'US'] +3538-163622-0020-1535: ref=['WHEN', 'THEY', 'HAD', 'TRAVELLED', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FOAL', 'SAID', 'DOST', 'THOU', 'SEE', 'ANYTHING'] +3538-163622-0020-1535: hyp=['WHEN', 'THEY', 'HAD', 'TRAVELLED', 'A', 'LONG', 'LONG', 'WAY', 'THE', 'FALL', 'SAID', 'DOST', 'THOU', 'SEE', 'ANYTHING'] +3538-163622-0021-1536: ref=['AND', 'NOW', 'INQUIRED', 'THE', 'FOAL', 'SEEST', 'THOU', 'NOTHING', 'NOW'] +3538-163622-0021-1536: hyp=['AND', 'NOW', 'INQUIRED', 'THE', 'WHOLE', 'CEASE', 'DONE', 'NOTHING', 'NOW'] +3538-163622-0022-1537: ref=['NOW', 'THEN', 'SAID', 'THE', 'FOAL', 'DOST', 'THOU', 'NOT', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0022-1537: hyp=['NOW', 'THEN', 'SAID', 'THE', 'FOOL', 'DOST', 'THOU', 'NOT', 'SEE', 'ANYTHING', 'NOW'] +3538-163622-0023-1538: ref=['THAT', 'IS', 'A', 'RIVER', 'SAID', 'THE', 'FOAL', 'AND', 'WE', 'HAVE', 'TO', 'CROSS', 'IT'] +3538-163622-0023-1538: hyp=['THAT', 'IS', 'A', 'RIVER', 'SAID', 'THE', 'FOAL', 'AND', 'WE', 'HAVE', 'TO', 'CROSS', 'IT'] +3538-163622-0024-1539: ref=['I', 'HAVE', 'DONE', 'MY', 'BEST', 'REPLIED', 'CINDERLAD'] +3538-163622-0024-1539: hyp=['I', 'HAVE', 'DONE', 'MY', 'BEST', 'REPLIED', 'SIR', 'LAD'] +3538-163624-0000-1540: ref=['ONCE', 'UPON', 'A', 'TIME', 'THERE', 'WAS', 'A', 'KING', 'IN', 'THE', 'NORTH', 'WHO', 'HAD', 'WON', 'MANY', 'WARS', 'BUT', 'NOW', 'HE', 'WAS', 'OLD'] +3538-163624-0000-1540: hyp=['ONCE', 'UPON', 'A', 'TIME', 'THERE', 'WAS', 'A', 'KING', 'IN', 'THE', 'NORTH', 'WHO', 'HAD', 'WON', 'MANY', 'WARS', 'BUT', 'NOW', 'HE', 'WAS', 'OLD'] +3538-163624-0001-1541: ref=['THE', 'OLD', 'KING', 'WENT', 'OUT', 'AND', 'FOUGHT', 'BRAVELY', 'BUT', 'AT', 'LAST', 'HIS', 'SWORD', 'BROKE', 'AND', 'HE', 'WAS', 'WOUNDED', 'AND', 'HIS', 'MEN', 'FLED'] +3538-163624-0001-1541: hyp=['THE', 'OLD', 'KING', 'WENT', 'OUT', 'AND', 'THOUGHT', 'BRAVELY', 'BUT', 'AT', 'LAST', 'HIS', 'SWORD', 'BROKE', 'AND', 'HE', 'WAS', 'WOUNDED', 'AND', 'HIS', 'MEN', 'FLED'] +3538-163624-0002-1542: ref=['BUT', 'IN', 'THE', 'NIGHT', 'WHEN', 'THE', 'BATTLE', 'WAS', 'OVER', 'HIS', 'YOUNG', 'WIFE', 'CAME', 'OUT', 'AND', 'SEARCHED', 'FOR', 'HIM', 'AMONG', 'THE', 'SLAIN', 'AND', 'AT', 'LAST', 'SHE', 'FOUND', 'HIM', 'AND', 'ASKED', 'WHETHER', 'HE', 'MIGHT', 'BE', 'HEALED'] +3538-163624-0002-1542: hyp=['BUT', 'IN', 'THE', 'NIGHT', 'WHEN', 'THE', 'BATTLE', 'IS', 'OVER', 'HIS', 'YOUNG', 'WIFE', 'CAME', 'OUT', 'IN', 'SEARCHED', 'FOR', 'HIM', 'AMONG', 'THE', 'SLAIN', 'AND', 'AT', 'LAST', 'SHE', 'FOUND', 'HIM', 'AND', 'ASKED', 'WHETHER', 'HE', 'MIGHT', 'BE', 'HEALED'] +3538-163624-0003-1543: ref=['SO', 'HE', 'ASKED', 'THE', 'QUEEN', 'HOW', 'DO', 'YOU', 'KNOW', 'IN', 'THE', 'DARK', 'OF', 'NIGHT', 'WHETHER', 'THE', 'HOURS', 'ARE', 'WEARING', 'TO', 'THE', 'MORNING', 'AND', 'SHE', 'SAID'] +3538-163624-0003-1543: hyp=['SO', 'YES', 'THE', 'QUEEN', 'HOW', 'DO', 'YOU', 'KNOW', 'IN', 'THE', 'DARK', 'OF', 'NIGHT', 'WHETHER', 'THE', 'HOURS', 'ARE', 'WEARING', 'TO', 'THE', 'MORNING', 'AND', 'SHE', 'SAID'] +3538-163624-0004-1544: ref=['THEN', 'THE', 'OLD', 'MAN', 'SAID', 'DRIVE', 'ALL', 'THE', 'HORSES', 'INTO', 'THE', 'RIVER', 'AND', 'CHOOSE', 'THE', 'ONE', 'THAT', 'SWIMS', 'ACROSS'] +3538-163624-0004-1544: hyp=['THEN', 'THE', 'OLD', 'MAN', 'SAID', 'DRIVE', 'ALL', 'THE', 'HORSES', 'INTO', 'THE', 'RIVER', 'AND', 'CHOOSE', 'THE', 'ONE', 'THAT', 'SWIMS', 'ACROSS'] +3538-163624-0005-1545: ref=['HE', 'IS', 'NO', 'BIGGER', 'THAN', 'OTHER', 'DRAGONS', 'SAID', 'THE', 'TUTOR', 'AND', 'IF', 'YOU', 'WERE', 'AS', 'BRAVE', 'AS', 'YOUR', 'FATHER', 'YOU', 'WOULD', 'NOT', 'FEAR', 'HIM'] +3538-163624-0005-1545: hyp=['HE', 'HAS', 'NO', 'BIGGER', 'THAN', 'OTHER', 'DRAGONS', 'SAID', 'THE', 'TUTOR', 'AND', 'IF', 'YOURS', 'BRAVE', 'AS', 'YOUR', 'FATHER', 'YOU', 'WOULD', 'NOT', 'FEAR', 'HIM'] +3538-163624-0006-1546: ref=['THEN', 'THE', 'PERSON', 'WHO', 'HAD', 'KILLED', 'OTTER', 'WENT', 'DOWN', 'AND', 'CAUGHT', 'THE', 'DWARF', 'WHO', 'OWNED', 'ALL', 'THE', 'TREASURE', 'AND', 'TOOK', 'IT', 'FROM', 'HIM'] +3538-163624-0006-1546: hyp=['THEN', 'THE', 'PERSON', 'WHO', 'HAD', 'KILLED', 'OTTER', 'WENT', 'DOWN', 'AND', 'CAUGHT', 'THE', 'DWARF', 'WHO', 'OWNED', 'ALL', 'THE', 'TREASURE', 'AND', 'TOOK', 'IT', 'FROM', 'HIM'] +3538-163624-0007-1547: ref=['ONLY', 'ONE', 'RING', 'WAS', 'LEFT', 'WHICH', 'THE', 'DWARF', 'WORE', 'AND', 'EVEN', 'THAT', 'WAS', 'TAKEN', 'FROM', 'HIM'] +3538-163624-0007-1547: hyp=['ONLY', 'ONE', 'RING', 'WAS', 'LEFT', 'WHICH', 'THE', 'DWARF', 'WORE', 'AND', 'EVEN', 'THAT', 'WAS', 'TAKEN', 'FROM', 'HIM'] +3538-163624-0008-1548: ref=['SO', 'REGIN', 'MADE', 'A', 'SWORD', 'AND', 'SIGURD', 'TRIED', 'IT', 'WITH', 'A', 'BLOW', 'ON', 'A', 'LUMP', 'OF', 'IRON', 'AND', 'THE', 'SWORD', 'BROKE'] +3538-163624-0008-1548: hyp=['SO', 'RIGAN', 'MADE', 'A', 'SWORD', 'AND', 'CIGAR', 'TRIED', 'IT', 'WITH', 'THE', 'BLOW', 'ON', 'A', 'LUMP', 'OF', 'IRON', 'AND', 'THE', 'SWORD', 'BROKE'] +3538-163624-0009-1549: ref=['THEN', 'SIGURD', 'WENT', 'TO', 'HIS', 'MOTHER', 'AND', 'ASKED', 'FOR', 'THE', 'BROKEN', 'PIECES', 'OF', 'HIS', "FATHER'S", 'BLADE', 'AND', 'GAVE', 'THEM', 'TO', 'REGIN'] +3538-163624-0009-1549: hyp=['THEN', 'CIGARET', 'WENT', 'TO', 'HIS', 'MOTHER', 'AND', 'ASKED', 'FOR', 'THE', 'BROKEN', 'PIECES', 'OF', 'HIS', "FATHER'S", 'BLADE', 'AND', 'GAVE', 'THEM', 'TO', 'RIGAN'] +3538-163624-0010-1550: ref=['SO', 'SIGURD', 'SAID', 'THAT', 'SWORD', 'WOULD', 'DO'] +3538-163624-0010-1550: hyp=['SO', 'CIGARET', 'SAID', 'THAT', 'SWORD', 'WOULD', 'DO'] +3538-163624-0011-1551: ref=['THEN', 'HE', 'SAW', 'THE', 'TRACK', 'WHICH', 'THE', 'DRAGON', 'MADE', 'WHEN', 'HE', 'WENT', 'TO', 'A', 'CLIFF', 'TO', 'DRINK', 'AND', 'THE', 'TRACK', 'WAS', 'AS', 'IF', 'A', 'GREAT', 'RIVER', 'HAD', 'ROLLED', 'ALONG', 'AND', 'LEFT', 'A', 'DEEP', 'VALLEY'] +3538-163624-0011-1551: hyp=['THEN', 'HE', 'SAW', 'THE', 'TRACK', 'WHICH', 'THE', 'DRAGON', 'HAD', 'MADE', 'WHEN', 'HE', 'WENT', 'TO', 'A', 'CLIFF', 'TO', 'DRINK', 'AND', 'THE', 'TRACK', 'WAS', 'AS', 'IF', 'A', 'GREAT', 'RIVER', 'HAD', 'ROLLED', 'ALONG', 'AND', 'LEFT', 'A', 'DEEP', 'VALLEY'] +3538-163624-0012-1552: ref=['BUT', 'SIGURD', 'WAITED', 'TILL', 'HALF', 'OF', 'HIM', 'HAD', 'CRAWLED', 'OVER', 'THE', 'PIT', 'AND', 'THEN', 'HE', 'THRUST', 'THE', 'SWORD', 'GRAM', 'RIGHT', 'INTO', 'HIS', 'VERY', 'HEART'] +3538-163624-0012-1552: hyp=['BUT', 'CIGARET', 'WAITED', 'TILL', 'HALF', 'OF', 'HIM', 'HAD', 'CRAWLED', 'OVER', 'THE', 'PIT', 'AND', 'THEN', 'HE', 'THRUST', 'THE', 'SWORD', 'GRAHAM', 'RIGHT', 'INTO', 'HIS', 'VERY', 'HEART'] +3538-163624-0013-1553: ref=['SIGURD', 'SAID', 'I', 'WOULD', 'TOUCH', 'NONE', 'OF', 'IT', 'IF', 'BY', 'LOSING', 'IT', 'I', 'SHOULD', 'NEVER', 'DIE'] +3538-163624-0013-1553: hyp=['CIGARET', 'SAID', 'I', 'WOULD', 'TOUCH', 'NONE', 'OF', 'IT', 'IF', 'BY', 'LOSING', 'IT', 'I', 'SHOULD', 'NEVER', 'DIE'] +3538-163624-0014-1554: ref=['BUT', 'ALL', 'MEN', 'DIE', 'AND', 'NO', 'BRAVE', 'MAN', 'LETS', 'DEATH', 'FRIGHTEN', 'HIM', 'FROM', 'HIS', 'DESIRE'] +3538-163624-0014-1554: hyp=['BUT', 'ALL', 'MEN', 'DIE', 'AND', 'NO', 'BRAVE', 'MAN', "LET'S", 'DEATH', 'FRIGHTEN', 'HIM', 'FROM', 'HIS', 'DESIRE'] +3538-163624-0015-1555: ref=['DIE', 'THOU', 'FAFNIR', 'AND', 'THEN', 'FAFNIR', 'DIED'] +3538-163624-0015-1555: hyp=['GUY', 'THOU', 'FAFNER', 'AND', 'THEN', 'STAFFNER', 'DIED'] +3538-163624-0016-1556: ref=['THEN', 'SIGURD', 'RODE', 'BACK', 'AND', 'MET', 'REGIN', 'AND', 'REGIN', 'ASKED', 'HIM', 'TO', 'ROAST', "FAFNIR'S", 'HEART', 'AND', 'LET', 'HIM', 'TASTE', 'OF', 'IT'] +3538-163624-0016-1556: hyp=['THEN', 'SIGURD', 'RODE', 'BACK', 'AND', 'MET', 'RIGAN', 'AND', 'RIGAN', 'ASKED', 'HIM', 'TO', 'ROAST', "FAFNER'S", 'HEART', 'AND', 'LET', 'HIM', 'TASTE', 'OF', 'IT'] +3538-163624-0017-1557: ref=['SO', 'SIGURD', 'PUT', 'THE', 'HEART', 'OF', 'FAFNIR', 'ON', 'A', 'STAKE', 'AND', 'ROASTED', 'IT'] +3538-163624-0017-1557: hyp=['SO', 'SIR', 'GOOD', 'PUT', 'THE', 'HEART', 'OF', 'FAFFNER', 'ON', 'A', 'STAKE', 'AND', 'ROASTED', 'IT'] +3538-163624-0018-1558: ref=['THERE', 'IS', 'SIGURD', 'ROASTING', "FAFNIR'S", 'HEART', 'FOR', 'ANOTHER', 'WHEN', 'HE', 'SHOULD', 'TASTE', 'OF', 'IT', 'HIMSELF', 'AND', 'LEARN', 'ALL', 'WISDOM'] +3538-163624-0018-1558: hyp=["THERE'S", 'CIGARET', 'ROASTING', "FAFTENER'S", 'HEART', 'FOR', 'ANOTHER', 'WHEN', 'HE', 'SHOULD', 'TASTE', 'OF', 'IT', 'HIMSELF', 'AND', 'LEARN', 'ALL', 'WISDOM'] +3538-163624-0019-1559: ref=['THAT', 'LET', 'HIM', 'DO', 'AND', 'THEN', 'RIDE', 'OVER', 'HINDFELL', 'TO', 'THE', 'PLACE', 'WHERE', 'BRYNHILD', 'SLEEPS'] +3538-163624-0019-1559: hyp=['THAT', 'LET', 'HIM', 'DO', 'THEN', 'RIDE', 'OVER', 'HINFELD', 'TO', 'THE', 'PLACE', 'WHERE', 'BRINEHILL', 'SLEEPS'] +3538-163624-0020-1560: ref=['THERE', 'MUST', 'SHE', 'SLEEP', 'TILL', 'THOU', 'COMEST', 'FOR', 'HER', 'WAKING', 'RISE', 'UP', 'AND', 'RIDE', 'FOR', 'NOW', 'SURE', 'SHE', 'WILL', 'SWEAR', 'THE', 'VOW', 'FEARLESS', 'OF', 'BREAKING'] +3538-163624-0020-1560: hyp=['THERE', 'MUST', 'SHE', 'SLEEP', 'TILL', 'THOU', 'COMES', 'FOR', 'HER', 'WAKING', 'RISE', 'UP', 'AND', 'RIDE', 'FOR', 'NOW', 'SURE', 'SHE', 'WILL', 'SWEAR', 'THE', 'VOW', 'FEARLESS', 'OF', 'BREAKING'] +3538-163624-0021-1561: ref=['THEN', 'HE', 'TOOK', 'THE', 'HELMET', 'OFF', 'THE', 'HEAD', 'OF', 'THE', 'SLEEPER', 'AND', 'BEHOLD', 'SHE', 'WAS', 'A', 'MOST', 'BEAUTIFUL', 'LADY'] +3538-163624-0021-1561: hyp=['THEN', 'HE', 'TOOK', 'THE', 'HELMET', 'OFF', 'THE', 'HEAD', 'OF', 'THE', 'SLEEPER', 'AND', 'BEHOLD', 'SHE', 'WAS', 'A', 'MOST', 'BEAUTIFUL', 'LADY'] +3538-163624-0022-1562: ref=['THEN', 'SIGURD', 'RODE', 'AWAY', 'AND', 'HE', 'CAME', 'TO', 'THE', 'HOUSE', 'OF', 'A', 'KING', 'WHO', 'HAD', 'A', 'FAIR', 'DAUGHTER'] +3538-163624-0022-1562: hyp=['THEN', 'CIGARET', 'RODE', 'AWAY', 'AND', 'HE', 'CAME', 'TO', 'THE', 'HOUSE', 'OF', 'A', 'KING', 'WHO', 'HAD', 'A', 'FAIR', 'DAUGHTER'] +3538-163624-0023-1563: ref=['THEN', "BRYNHILD'S", 'FATHER', 'TOLD', 'GUNNAR', 'THAT', 'SHE', 'WOULD', 'MARRY', 'NONE', 'BUT', 'HIM', 'WHO', 'COULD', 'RIDE', 'THE', 'FLAME', 'IN', 'FRONT', 'OF', 'HER', 'ENCHANTED', 'TOWER', 'AND', 'THITHER', 'THEY', 'RODE', 'AND', 'GUNNAR', 'SET', 'HIS', 'HORSE', 'AT', 'THE', 'FLAME', 'BUT', 'HE', 'WOULD', 'NOT', 'FACE', 'IT'] +3538-163624-0023-1563: hyp=['WHEN', 'BURNE', 'HAD', 'FATHER', 'TOLD', 'GUNNER', 'THAT', 'SHE', 'WOULD', 'MARRY', 'NONE', 'BUT', 'HIM', 'WHO', 'COULD', 'RIDE', 'THE', 'FLAME', 'IN', 'FRONT', 'OF', 'HER', 'ENCHANTED', 'TOWER', 'AND', 'THAT', 'AS', 'THEY', 'RODE', 'AND', 'GUTTER', 'SET', 'HIS', 'HORSE', 'AT', 'THE', 'FLAME', 'BUT', 'HE', 'WOULD', 'NOT', 'FACE', 'IT'] +3538-163624-0024-1564: ref=['FOR', 'ONE', 'DAY', 'WHEN', 'BRYNHILD', 'AND', 'GUDRUN', 'WERE', 'BATHING', 'BRYNHILD', 'WADED', 'FARTHEST', 'OUT', 'INTO', 'THE', 'RIVER', 'AND', 'SAID', 'SHE', 'DID', 'THAT', 'TO', 'SHOW', 'SHE', 'WAS', "GUIRUN'S", 'SUPERIOR'] +3538-163624-0024-1564: hyp=['FOR', 'ONE', 'DAY', 'WHEN', 'BURNEHILD', 'AND', 'GUNDRON', 'WERE', 'BATHING', 'BURNE', 'HELD', 'WAITED', 'FARTHEST', 'OUT', 'INTO', 'THE', 'RIVER', 'AND', 'SAID', 'SHE', 'DID', 'THAT', 'TO', 'SHOW', 'SHE', 'WAS', 'GUNDER', 'AND', 'SUPERIOR'] +3538-163624-0025-1565: ref=['FOR', 'HER', 'HUSBAND', 'SHE', 'SAID', 'HAD', 'RIDDEN', 'THROUGH', 'THE', 'FLAME', 'WHEN', 'NO', 'OTHER', 'MAN', 'DARED', 'FACE', 'IT'] +3538-163624-0025-1565: hyp=['FOR', 'HER', 'HUSBAND', 'SHE', 'SAID', 'HAD', 'RIDDEN', 'THROUGH', 'THE', 'FLAME', 'WHEN', 'NO', 'OTHER', 'MAN', 'DARED', 'FACE', 'IT'] +3538-163624-0026-1566: ref=['NOT', 'LONG', 'TO', 'WAIT', 'HE', 'SAID', 'TILL', 'THE', 'BITTER', 'SWORD', 'STANDS', 'FAST', 'IN', 'MY', 'HEART', 'AND', 'THOU', 'WILL', 'NOT', 'LIVE', 'LONG', 'WHEN', 'I', 'AM', 'DEAD'] +3538-163624-0026-1566: hyp=['NOT', 'LONG', 'TO', 'WAIT', 'HE', 'SAID', 'TILL', 'THE', 'BITTER', 'SWORD', 'STANDS', 'FAST', 'IN', 'MY', 'HEART', 'AND', 'THOU', 'WILT', 'NOT', 'LIVE', 'LONG', 'WHEN', 'I', 'AM', 'DEAD'] +367-130732-0000-1466: ref=['LOBSTERS', 'AND', 'LOBSTERS'] +367-130732-0000-1466: hyp=['LOBSTERS', 'AND', 'LOBSTERS'] +367-130732-0001-1467: ref=['WHEN', 'IS', 'A', 'LOBSTER', 'NOT', 'A', 'LOBSTER', 'WHEN', 'IT', 'IS', 'A', 'CRAYFISH'] +367-130732-0001-1467: hyp=['WHEN', 'AS', 'A', 'LOBSTER', 'NOT', 'A', 'LOBSTER', 'WHEN', 'IT', 'IS', 'A', 'CRAYFISH'] +367-130732-0002-1468: ref=['THIS', 'QUESTION', 'AND', 'ANSWER', 'MIGHT', 'WELL', 'GO', 'INTO', 'THE', 'PRIMER', 'OF', 'INFORMATION', 'FOR', 'THOSE', 'WHO', 'COME', 'TO', 'SAN', 'FRANCISCO', 'FROM', 'THE', 'EAST', 'FOR', 'WHAT', 'IS', 'CALLED', 'A', 'LOBSTER', 'IN', 'SAN', 'FRANCISCO', 'IS', 'NOT', 'A', 'LOBSTER', 'AT', 'ALL', 'BUT', 'A', 'CRAYFISH'] +367-130732-0002-1468: hyp=['THIS', 'QUESTION', 'IN', 'ANSWER', 'MIGHT', 'WELL', 'GO', 'INTO', 'THE', 'PRIMARY', 'OF', 'INFORMATION', 'FOR', 'LUCIKAM', 'THE', 'SENT', 'FRANCISCO', 'FROM', 'THE', 'EAST', 'FOR', 'WHAT', 'IS', 'CALLED', 'A', 'LOBSTERN', 'SAN', 'FRANCISCO', 'IS', 'NOT', 'A', 'LOBSURD', 'AT', 'ALL', 'BUT', 'A', 'CRAYFISH'] +367-130732-0003-1469: ref=['THE', 'PACIFIC', 'CRAYFISH', 'HOWEVER', 'SERVES', 'EVERY', 'PURPOSE', 'AND', 'WHILE', 'MANY', 'CONTEND', 'THAT', 'ITS', 'MEAT', 'IS', 'NOT', 'SO', 'DELICATE', 'IN', 'FLAVOR', 'AS', 'THAT', 'OF', 'ITS', 'EASTERN', 'COUSIN', 'THE', 'CALIFORNIAN', 'WILL', 'AS', 'STRENUOUSLY', 'INSIST', 'THAT', 'IT', 'IS', 'BETTER', 'BUT', 'OF', 'COURSE', 'SOMETHING', 'MUST', 'ALWAYS', 'BE', 'ALLOWED', 'FOR', 'THE', 'PATRIOTISM', 'OF', 'THE', 'CALIFORNIAN'] +367-130732-0003-1469: hyp=['THE', 'PACIFIC', 'CRATER', 'SHOWER', 'SERVES', 'EVERY', 'PURPOSE', 'AND', 'WHILE', 'MANY', 'CONTEND', 'THAT', 'ITS', 'MEAT', 'IS', 'NOT', 'SO', 'DELICATE', 'AND', 'FLARE', 'AS', 'THAT', 'OF', 'ITS', 'EASTERN', 'COUSIN', 'THE', 'CALIFORNIAN', 'WALLA', 'STRENUOUSLY', 'INSISTS', 'AND', 'IT', 'IS', 'BETTER', 'BUT', 'OF', 'COURSE', 'SOMETHING', 'MUST', 'ALWAYS', 'BE', 'ALLOWED', 'FOR', 'THE', 'PATRIOTISM', 'OF', 'THE', 'CALIFORNIAN'] +367-130732-0004-1470: ref=['A', 'BOOK', 'COULD', 'BE', 'WRITTEN', 'ABOUT', 'THIS', 'RESTAURANT', 'AND', 'THEN', 'ALL', 'WOULD', 'NOT', 'BE', 'TOLD', 'FOR', 'ALL', 'ITS', 'SECRETS', 'CAN', 'NEVER', 'BE', 'KNOWN'] +367-130732-0004-1470: hyp=['A', 'BOOK', 'COULD', 'BE', 'WRITTEN', 'ABOUT', 'THIS', 'RESTAURANT', 'AND', 'THEN', 'ALL', 'WOULD', 'NOT', 'BE', 'TOLD', 'FOR', 'ALL', 'ITS', 'SECRETS', 'CAN', 'NEVER', 'BE', 'KNOWN'] +367-130732-0005-1471: ref=['IT', 'WAS', 'HERE', 'THAT', 'MOST', 'MAGNIFICENT', 'DINNERS', 'WERE', 'ARRANGED', 'IT', 'WAS', 'HERE', 'THAT', 'EXTRAORDINARY', 'DISHES', 'WERE', 'CONCOCTED', 'BY', 'CHEFS', 'OF', 'WORLD', 'WIDE', 'FAME', 'IT', 'WAS', 'HERE', 'THAT', 'LOBSTER', 'A', 'LA', 'NEWBERG', 'REACHED', 'ITS', 'HIGHEST', 'PERFECTION', 'AND', 'THIS', 'IS', 'THE', 'RECIPE', 'THAT', 'WAS', 'FOLLOWED', 'WHEN', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'DELMONICO'] +367-130732-0005-1471: hyp=['IT', 'WAS', 'HERE', 'THAT', 'MOST', 'MAGNIFICENT', 'DINNERS', 'WERE', 'ARRANGED', 'IT', 'WAS', 'HERE', 'THAT', 'EXTRAORDINARY', 'DISHES', 'WERE', 'CALLED', 'CONCOCTED', 'BY', 'CHEFTS', 'OF', 'WOOLWRIGHT', 'FAME', 'IT', 'WAS', 'HERE', 'THAT', 'LOBSTER', 'ALENUBERG', 'REACHED', 'ITS', 'HIGHEST', 'PERFECTION', 'AND', 'THIS', 'IS', 'THE', 'RECIPE', 'THAT', 'WAS', 'FOLLOW', 'WHEN', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'DEMONICO'] +367-130732-0006-1472: ref=['LOBSTER', 'A', 'LA', 'NEWBERG'] +367-130732-0006-1472: hyp=['LOBSTER', 'OLLA', 'NEWBERG'] +367-130732-0007-1473: ref=['ONE', 'POUND', 'OF', 'LOBSTER', 'MEAT', 'ONE', 'TEASPOONFUL', 'OF', 'BUTTER', 'ONE', 'HALF', 'PINT', 'OF', 'CREAM', 'YOLKS', 'OF', 'FOUR', 'EGGS', 'ONE', 'WINE', 'GLASS', 'OF', 'SHERRY', 'LOBSTER', 'FAT'] +367-130732-0007-1473: hyp=['ONE', 'POUND', 'OF', 'LOBS', 'TO', 'MEAT', 'ONE', 'TEASPOONFUL', 'OF', 'BUTTER', 'ONE', 'HALF', 'PINT', 'OF', 'CREAM', 'YOLKS', 'OF', 'FOUR', 'EGGS', 'ONE', 'WINE', 'GLASS', 'OF', 'SHERRY', 'LOBSTER', 'FAT'] +367-130732-0008-1474: ref=['PUT', 'THIS', 'IN', 'A', 'DOUBLE', 'BOILER', 'AND', 'LET', 'COOK', 'UNTIL', 'THICK', 'STIRRING', 'CONSTANTLY'] +367-130732-0008-1474: hyp=['PUT', 'THIS', 'IN', 'A', 'DOUBLE', 'WHIRLER', 'AND', 'LET', 'COOK', 'UNTIL', 'THICK', 'STIRRING', 'CONSTANTLY'] +367-130732-0009-1475: ref=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'THIN', 'SLICES', 'OF', 'DRY', 'TOAST'] +367-130732-0009-1475: hyp=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'FLITTON', 'SLICES', 'OF', 'DRY', 'TOAST'] +367-130732-0010-1476: ref=['KING', 'OF', 'SHELL', 'FISH'] +367-130732-0010-1476: hyp=['KING', 'OF', 'SHELLFISH'] +367-130732-0011-1477: ref=['ONE', 'HAS', 'TO', 'COME', 'TO', 'SAN', 'FRANCISCO', 'TO', 'PARTAKE', 'OF', 'THE', 'KING', 'OF', 'SHELL', 'FISH', 'THE', 'MAMMOTH', 'PACIFIC', 'CRAB'] +367-130732-0011-1477: hyp=['ONE', 'HAS', 'TO', 'COME', 'TO', 'SENT', 'FRANCISCO', 'TO', 'PARTAKE', 'OF', 'THE', 'KING', 'OF', 'SHELLFISH', 'THE', 'MAMMOTH', 'PACIFIC', 'CRAB'] +367-130732-0012-1478: ref=['I', 'SAY', 'COME', 'TO', 'SAN', 'FRANCISCO', 'ADVISEDLY', 'FOR', 'WHILE', 'THE', 'CRAB', 'IS', 'FOUND', 'ALL', 'ALONG', 'THE', 'COAST', 'IT', 'IS', 'PREPARED', 'NOWHERE', 'SO', 'DELICIOUSLY', 'AS', 'IN', 'SAN', 'FRANCISCO'] +367-130732-0012-1478: hyp=['I', 'SAY', 'COME', 'TO', 'SAN', 'FRANCISCO', 'ADVISEDLY', 'FOR', 'WHILE', 'THE', 'CRAB', 'IS', 'FOUND', 'ALL', 'ALONG', 'THE', 'COAST', 'IT', 'IS', 'PREPARED', 'NOWHERE', 'SO', 'DELICIOUSLY', 'AS', 'IN', 'SAN', 'FRANCISCO'] +367-130732-0013-1479: ref=["GOBEY'S", 'PASSED', 'WITH', 'THE', 'FIRE', 'AND', 'THE', 'LITTLE', 'RESTAURANT', 'BEARING', 'HIS', 'NAME', 'AND', 'IN', 'CHARGE', 'OF', 'HIS', 'WIDOW', 'IN', 'UNION', 'SQUARE', 'AVENUE', 'HAS', 'NOT', 'ATTAINED', 'THE', 'FAME', 'OF', 'THE', 'OLD', 'PLACE'] +367-130732-0013-1479: hyp=["GOBY'S", 'PASS', 'WITH', 'THE', 'FIRE', 'AND', 'THE', 'LITTLE', 'RESTAURANT', 'BEARING', 'HIS', 'NAME', 'AND', 'IN', 'CHARGE', 'OF', 'HIS', 'WIDOW', 'IN', 'UNION', 'SQUARE', 'AVENUE', 'HAS', 'NOT', 'ATTAINED', 'THE', 'FAME', 'OF', 'THE', 'OLD', 'PLACE'] +367-130732-0014-1480: ref=['IT', 'IS', 'POSSIBLE', 'THAT', 'SHE', 'KNOWS', 'THE', 'SECRET', 'OF', 'PREPARING', 'CRAB', 'AS', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', "GOBEY'S", 'OF', 'BEFORE', 'THE', 'FIRE', 'BUT', 'HIS', 'PRESTIGE', 'DID', 'NOT', 'DESCEND', 'TO', 'HER'] +367-130732-0014-1480: hyp=['IT', 'IS', 'POSSIBLE', 'THAT', 'SHE', 'KNOWS', 'THE', 'SECRET', 'OF', 'PREPARING', 'CRAB', 'AS', 'IT', 'WAS', 'PREPARED', 'IN', 'THE', 'GOBIES', 'OF', 'BEFORE', 'THE', 'FIRE', 'BUT', 'HIS', 'PRESTIGE', 'HAD', 'NOT', 'DESCEND', 'TO', 'HER'] +367-130732-0015-1481: ref=["GOBEY'S", 'CRAB', 'STEW'] +367-130732-0015-1481: hyp=['GOBIAS', 'CRABS', 'DO'] +367-130732-0016-1482: ref=['TAKE', 'THE', 'MEAT', 'OF', 'ONE', 'LARGE', 'CRAB', 'SCRAPING', 'OUT', 'ALL', 'OF', 'THE', 'FAT', 'FROM', 'THE', 'SHELL'] +367-130732-0016-1482: hyp=['TAKE', 'THE', 'MEAT', 'OF', 'ONE', 'LARGE', 'CRAB', 'SCRAPING', 'OUT', 'ALL', 'THE', 'BAT', 'FROM', 'THE', 'SHELL'] +367-130732-0017-1483: ref=['SOAK', 'THE', 'CRAB', 'MEAT', 'IN', 'THE', 'SHERRY', 'TWO', 'HOURS', 'BEFORE', 'COOKING'] +367-130732-0017-1483: hyp=['SOAK', 'THE', 'CRAB', 'ME', 'IN', 'THE', 'SHERRY', 'TWO', 'HOURS', 'BEFORE', 'COOKING'] +367-130732-0018-1484: ref=['CHOP', 'FINE', 'THE', 'ONION', 'SWEET', 'PEPPER', 'AND', 'TOMATO', 'WITH', 'THE', 'ROSEMARY'] +367-130732-0018-1484: hyp=['CHOP', 'FINE', 'THE', 'ONION', 'SWEEP', 'PEPPER', 'INTOMATO', 'WITH', 'THE', 'ROSEMARY'] +367-130732-0019-1485: ref=['HEAT', 'THIS', 'IN', 'A', 'STEWPAN', 'AND', 'WHEN', 'SIMMERING', 'ADD', 'THE', 'SHERRY', 'AND', 'CRAB', 'MEAT', 'AND', 'LET', 'ALL', 'COOK', 'TOGETHER', 'WITH', 'A', 'SLOW', 'FIRE', 'FOR', 'EIGHT', 'MINUTES'] +367-130732-0019-1485: hyp=['HEATLESS', 'IN', 'A', 'STEWPENT', 'AND', 'WHEN', 'SIMMERING', 'ADD', 'THE', 'SHERRY', 'AND', 'CRAB', 'ME', 'AND', 'LET', 'ALL', 'COOK', 'TOGETHER', 'WITH', 'THE', 'SLOW', 'FIRE', 'FOR', 'EIGHT', 'MINUTES'] +367-130732-0020-1486: ref=['SERVE', 'IN', 'A', 'CHAFING', 'DISH', 'WITH', 'TOASTED', 'CRACKERS', 'OR', 'THIN', 'SLICES', 'OF', 'TOASTED', 'BREAD'] +367-130732-0020-1486: hyp=['SERVE', 'IN', 'THE', 'CHAFING', 'DISH', 'WITH', 'TOASTED', 'CRACKERS', 'OR', 'THIN', 'SLICES', 'OF', 'TOASTED', 'BREAD'] +367-130732-0021-1487: ref=['LOBSTER', 'IN', 'MINIATURE'] +367-130732-0021-1487: hyp=['LOBSTER', 'IN', 'MINIATURE'] +367-130732-0022-1488: ref=['SO', 'FAR', 'IT', 'HAS', 'BEEN', 'USED', 'MOSTLY', 'FOR', 'GARNISHMENT', 'OF', 'OTHER', 'DISHES', 'AND', 'IT', 'IS', 'ONLY', 'RECENTLY', 'THAT', 'THE', 'HOF', 'BRAU', 'HAS', 'BEEN', 'MAKING', 'A', 'SPECIALTY', 'OF', 'THEM'] +367-130732-0022-1488: hyp=['SO', 'FAR', 'IT', 'HAS', 'BEEN', 'USED', 'MOSTLY', 'FOR', 'GARNISHMENT', 'OF', 'OTHER', 'DISHES', 'AND', 'IT', 'IS', 'ONLY', 'RECENTLY', 'THAT', 'THE', 'WHOLE', 'BROW', 'HAS', 'BEEN', 'MAKING', 'ESPECIALTY', 'OF', 'THEM'] +367-130732-0023-1489: ref=['ALL', 'OF', 'THE', 'BETTER', 'CLASS', 'RESTAURANTS', 'HOWEVER', 'WILL', 'SERVE', 'THEM', 'IF', 'YOU', 'ORDER', 'THEM'] +367-130732-0023-1489: hyp=['ALL', 'THE', 'BETTER', 'CLASS', 'RESTAURANTS', 'HOWEVER', 'WILL', 'SERVE', 'THEM', 'IF', 'YOU', 'ORDER', 'THEM'] +367-130732-0024-1490: ref=['THIS', 'IS', 'THE', 'RECIPE', 'FOR', 'EIGHT', 'PEOPLE', 'AND', 'IT', 'IS', 'WELL', 'WORTH', 'TRYING', 'IF', 'YOU', 'ARE', 'GIVING', 'A', 'DINNER', 'OF', 'IMPORTANCE'] +367-130732-0024-1490: hyp=['THIS', 'IS', 'THE', 'RECIPE', 'FOR', 'EIGHT', 'PEOPLE', 'AND', 'IT', 'IS', 'WELL', 'IT', 'WORTH', 'TRYING', 'IF', 'YOU', 'ARE', 'GIVING', 'A', 'DINNER', 'OF', 'IMPORTANCE'] +367-130732-0025-1491: ref=['BISQUE', 'OF', 'CRAWFISH'] +367-130732-0025-1491: hyp=['DISK', 'OF', 'CRAWFISH'] +367-130732-0026-1492: ref=['TAKE', 'THIRTY', 'CRAWFISH', 'FROM', 'WHICH', 'REMOVE', 'THE', 'GUT', 'CONTAINING', 'THE', 'GALL', 'IN', 'THE', 'FOLLOWING', 'MANNER', 'TAKE', 'FIRM', 'HOLD', 'OF', 'THE', 'CRAWFISH', 'WITH', 'THE', 'LEFT', 'HAND', 'SO', 'AS', 'TO', 'AVOID', 'BEING', 'PINCHED', 'BY', 'ITS', 'CLAWS', 'WITH', 'THE', 'THUMB', 'AND', 'FOREFINGER', 'OF', 'THE', 'RIGHT', 'HAND', 'PINCH', 'THE', 'EXTREME', 'END', 'OF', 'THE', 'CENTRAL', 'FIN', 'OF', 'THE', 'TAIL', 'AND', 'WITH', 'A', 'SUDDEN', 'JERK', 'THE', 'GUT', 'WILL', 'BE', 'WITHDRAWN'] +367-130732-0026-1492: hyp=['TAKE', 'THIRTY', 'CRAWFISH', 'FROM', 'WHICH', 'REMOVE', 'THE', 'GUT', 'CONTAINING', 'THE', 'GALL', 'IN', 'THE', 'FOLLOWING', 'MANNER', 'TAKE', 'FIRM', 'HOLD', 'OF', 'THE', 'CRAWFISH', 'WITH', 'THE', 'LEFT', 'HAND', 'SO', 'AS', 'TO', 'AVOID', 'BEING', 'PINCHED', 'BY', 'ITS', 'CLOTHS', 'WITH', 'THE', 'THUMB', 'AND', 'FOREFINGER', 'OF', 'THE', 'RIGHT', 'HAND', 'PINCH', 'THE', 'EXTREME', 'END', 'OF', 'THE', 'CENTRAL', 'FIN', 'OF', 'THE', 'TAIL', 'AND', 'WITH', 'A', 'SUDDEN', 'JERK', 'THE', 'GUT', 'WILL', 'BE', 'WITHDRAWN'] +367-130732-0027-1493: ref=['MINCE', 'OR', 'CUT', 'INTO', 'SMALL', 'DICE', 'A', 'CARROT', 'AN', 'ONION', 'ONE', 'HEAD', 'OF', 'CELERY', 'AND', 'A', 'FEW', 'PARSLEY', 'ROOTS', 'AND', 'TO', 'THESE', 'ADD', 'A', 'BAY', 'LEAF', 'A', 'SPRIG', 'OF', 'THYME', 'A', 'LITTLE', 'MINIONETTE', 'PEPPER', 'AND', 'TWO', 'OUNCES', 'OF', 'BUTTER'] +367-130732-0027-1493: hyp=['MINSER', 'CUT', 'INTO', 'SMALL', 'DICE', 'A', 'CARROT', 'AND', 'ONION', 'ONE', 'HEAD', 'OF', 'CELERY', 'AND', 'A', 'FEW', 'PARSLEY', 'ROOTS', 'AND', 'TO', 'THESE', 'AT', 'A', 'BAY', 'LEAF', 'A', 'SPRIG', 'OF', 'THYME', 'A', 'LITTLE', 'MINOR', 'NUT', 'PEPPER', 'AND', 'TWO', 'OUNCE', 'OF', 'BUTTER'] +367-130732-0028-1494: ref=['PUT', 'THESE', 'INGREDIENTS', 'INTO', 'A', 'STEWPAN', 'AND', 'FRY', 'THEM', 'TEN', 'MINUTES', 'THEN', 'THROW', 'IN', 'THE', 'CRAWFISH', 'AND', 'POUR', 'ON', 'THEM', 'HALF', 'A', 'BOTTLE', 'OF', 'FRENCH', 'WHITE', 'WINE'] +367-130732-0028-1494: hyp=['PUT', 'THESE', 'INGREDIENTS', 'INTO', 'A', 'STEWPAN', 'AND', 'FRY', 'THEM', 'TEN', 'MINUTES', 'THEN', 'THROW', 'IN', 'THE', 'CROPPISH', 'AND', 'POUR', 'ON', 'THEM', 'HALF', 'A', 'BOTTLE', 'OF', 'FRENCH', 'WHITE', 'WINE'] +367-130732-0029-1495: ref=['ALLOW', 'THIS', 'TO', 'BOIL', 'AND', 'THEN', 'ADD', 'A', 'QUART', 'OF', 'STRONG', 'CONSOMME', 'AND', 'LET', 'ALL', 'CONTINUE', 'BOILING', 'FOR', 'HALF', 'AN', 'HOUR'] +367-130732-0029-1495: hyp=['ALLOW', 'US', 'TO', 'BOIL', 'AND', 'THEN', 'ADD', 'A', 'QUART', 'OF', 'STRONG', 'CONSUM', 'AND', 'LET', 'ALL', 'CONTINUE', 'BOILING', 'FOR', 'HALF', 'AN', 'HOUR'] +367-130732-0030-1496: ref=['PICK', 'OUT', 'THE', 'CRAWFISH', 'AND', 'STRAIN', 'THE', 'BROTH', 'THROUGH', 'A', 'NAPKIN', 'BY', 'PRESSURE', 'INTO', 'A', 'BASIN', 'IN', 'ORDER', 'TO', 'EXTRACT', 'ALL', 'THE', 'ESSENCE', 'FROM', 'THE', 'VEGETABLES'] +367-130732-0030-1496: hyp=['PICK', 'OUT', 'THE', 'CRAWFISH', 'AND', 'STRAIN', 'THE', 'BROTH', 'THROUGH', 'A', 'NAPKIN', 'BY', 'PRESSURE', 'INTO', 'A', 'BASIN', 'IN', 'ORDER', 'TO', 'EXTRACT', 'ALL', 'THE', 'ESSENCE', 'FROM', 'THE', 'VEGETABLES'] +367-130732-0031-1497: ref=['PICK', 'THE', 'SHELLS', 'OFF', 'TWENTY', 'FIVE', 'OF', 'THE', 'CRAWFISH', 'TAILS', 'TRIM', 'THEM', 'NEATLY', 'AND', 'SET', 'THEM', 'ASIDE', 'UNTIL', 'WANTED'] +367-130732-0031-1497: hyp=['PICK', 'THE', 'SHELLS', 'OF', 'TWENTY', 'FIVE', 'OF', 'THE', 'CRAWFISH', 'TAILS', 'TRIM', 'THEM', 'NEATLY', 'AND', 'SET', 'THEM', 'ASIDE', 'UNTIL', 'WANTON'] +367-130732-0032-1498: ref=['RESERVE', 'SOME', 'OF', 'THE', 'SPAWN', 'ALSO', 'HALF', 'OF', 'THE', 'BODY', 'SHELLS', 'WITH', 'WHICH', 'TO', 'MAKE', 'THE', 'CRAWFISH', 'BUTTER', 'TO', 'FINISH', 'THE', 'SOUP'] +367-130732-0032-1498: hyp=['RESERVE', 'SOME', 'OF', 'THE', 'SPAWN', 'ALSO', 'HAPPENED', 'THE', 'BODY', 'SHELLS', 'WITH', 'WHICH', 'TO', 'MAKE', 'THE', 'COFFISH', 'BUTTER', 'TO', 'FINISH', 'THE', 'SOUP'] +367-130732-0033-1499: ref=['THIS', 'BUTTER', 'IS', 'MADE', 'AS', 'FOLLOWS', 'PLACE', 'THE', 'SHELLS', 'ON', 'A', 'BAKING', 'SHEET', 'IN', 'THE', 'OVEN', 'TO', 'DRY', 'LET', 'THE', 'SHELLS', 'COOL', 'AND', 'THEN', 'POUND', 'THEM', 'IN', 'A', 'MORTAR', 'WITH', 'A', 'LITTLE', 'LOBSTER', 'CORAL', 'AND', 'FOUR', 'OUNCES', 'OF', 'FRESH', 'BUTTER', 'THOROUGHLY', 'BRUISING', 'THE', 'WHOLE', 'TOGETHER', 'SO', 'AS', 'TO', 'MAKE', 'A', 'FINE', 'PASTE'] +367-130732-0033-1499: hyp=['THIS', 'BUTTER', 'IS', 'MADE', 'AS', 'FOLLOWS', 'PLACE', 'THE', 'SHELLS', 'IN', 'A', 'BAKING', 'SHEET', 'IN', 'THE', 'OVEN', 'TO', 'DRY', 'LET', 'THE', 'SHELLS', 'COOL', 'AND', 'THEN', 'POUND', 'THEM', 'IN', 'A', 'MORTAR', 'WITH', 'A', 'LITTLE', 'LOBSTER', 'COAL', 'AND', 'FOUR', 'OUNCES', 'OF', 'FRESH', 'BUTTER', 'THOROUGHLY', 'BRUISING', 'THE', 'WHOLE', 'TOGETHER', 'SO', 'AS', 'TO', 'MAKE', 'A', 'FINE', 'PASTE'] +367-293981-0000-1445: ref=['I', 'SWEAR', 'IT', 'ANSWERED', 'SANCHO'] +367-293981-0000-1445: hyp=['I', 'SWEAR', 'ANSWERED', 'SANCHO'] +367-293981-0001-1446: ref=['I', 'SAY', 'SO', 'CONTINUED', 'DON', 'QUIXOTE', 'BECAUSE', 'I', 'HATE', 'TAKING', 'AWAY', "ANYONE'S", 'GOOD', 'NAME'] +367-293981-0001-1446: hyp=['I', 'SAY', 'SO', 'CONTINUED', 'DON', 'QUIXOTE', 'BECAUSE', 'I', 'HATE', 'TAKING', 'AWAY', 'ANY', "ONE'S", 'GOOD', 'NAME'] +367-293981-0002-1447: ref=['I', 'SAY', 'REPLIED', 'SANCHO', 'THAT', 'I', 'SWEAR', 'TO', 'HOLD', 'MY', 'TONGUE', 'ABOUT', 'IT', 'TILL', 'THE', 'END', 'OF', 'YOUR', "WORSHIP'S", 'DAYS', 'AND', 'GOD', 'GRANT', 'I', 'MAY', 'BE', 'ABLE', 'TO', 'LET', 'IT', 'OUT', 'TOMORROW'] +367-293981-0002-1447: hyp=['I', 'SAY', 'REPLIED', 'SANCHO', 'THAT', 'I', 'SWEAR', 'TO', 'HOLD', 'MY', 'TONGUE', 'ABOUT', 'IT', 'TILL', 'THE', 'END', 'OF', 'YOUR', 'WORSHIP', 'STAYS', 'AND', 'GONE', 'GRANT', 'I', 'MAY', 'BE', 'ABLE', 'TO', 'LET', 'IT', 'OUT', 'TO', 'MORROW'] +367-293981-0003-1448: ref=['THOUGH', 'YOUR', 'WORSHIP', 'WAS', 'NOT', 'SO', 'BADLY', 'OFF', 'HAVING', 'IN', 'YOUR', 'ARMS', 'THAT', 'INCOMPARABLE', 'BEAUTY', 'YOU', 'SPOKE', 'OF', 'BUT', 'I', 'WHAT', 'DID', 'I', 'HAVE', 'EXCEPT', 'THE', 'HEAVIEST', 'WHACKS', 'I', 'THINK', 'I', 'HAD', 'IN', 'ALL', 'MY', 'LIFE'] +367-293981-0003-1448: hyp=['THOUGH', 'YOUR', 'WORSHIP', 'WAS', 'NOT', 'SO', 'BADLY', 'OFF', 'HAVING', 'IN', 'YOUR', 'ARMS', 'THE', 'INN', 'COMPARABLE', 'BEAUTY', 'YOU', 'SPOKE', 'OF', 'BUT', 'I', 'WHAT', 'DID', 'I', 'HAVE', 'EXCEPT', 'THE', 'HEAVIEST', 'WAX', 'THAT', 'I', 'THINK', 'I', 'HAD', 'IN', 'ALL', 'MY', 'LIFE'] +367-293981-0004-1449: ref=['UNLUCKY', 'ME', 'AND', 'THE', 'MOTHER', 'THAT', 'BORE', 'ME'] +367-293981-0004-1449: hyp=['UNLUCKY', 'ME', 'AND', 'THE', 'MOTHER', 'THAT', 'BORE', 'ME'] +367-293981-0005-1450: ref=["DIDN'T", 'I', 'SAY', 'SO', 'WORSE', 'LUCK', 'TO', 'MY', 'LINE', 'SAID', 'SANCHO'] +367-293981-0005-1450: hyp=["DIDN'T", 'I', 'SAY', 'SO', 'WORSE', 'LUCK', 'TO', 'MY', 'LINE', 'SAID', 'SANCHO'] +367-293981-0006-1451: ref=['IT', 'CANNOT', 'BE', 'THE', 'MOOR', 'ANSWERED', 'DON', 'QUIXOTE', 'FOR', 'THOSE', 'UNDER', 'ENCHANTMENT', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'BY', 'ANYONE'] +367-293981-0006-1451: hyp=['IT', 'CANNOT', 'BE', 'THE', 'MORE', 'ANSWERED', 'DON', 'QUIXOTE', 'FOR', 'THOSE', 'UNDER', 'ENCHANTMENT', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'BY', 'ANYONE'] +367-293981-0007-1452: ref=['IF', 'THEY', "DON'T", 'LET', 'THEMSELVES', 'BE', 'SEEN', 'THEY', 'LET', 'THEMSELVES', 'BE', 'FELT', 'SAID', 'SANCHO', 'IF', 'NOT', 'LET', 'MY', 'SHOULDERS', 'SPEAK', 'TO', 'THE', 'POINT'] +367-293981-0007-1452: hyp=['IF', 'THEY', 'DO', 'NOT', 'LET', 'THEMSELVES', 'BE', 'SEEN', 'THEY', 'LET', 'THEMSELVES', 'BE', 'FELT', 'SAID', 'SANCHO', 'IF', 'NOT', 'LET', 'MY', 'SHOULDER', 'SPEAK', 'TO', 'THE', 'POINT'] +367-293981-0008-1453: ref=['MINE', 'COULD', 'SPEAK', 'TOO', 'SAID', 'DON', 'QUIXOTE', 'BUT', 'THAT', 'IS', 'NOT', 'A', 'SUFFICIENT', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'WHAT', 'WE', 'SEE', 'IS', 'THE', 'ENCHANTED', 'MOOR'] +367-293981-0008-1453: hyp=['MIKE', 'HAD', 'SPEAK', 'TOO', 'SAID', 'DON', 'QUIXOTE', 'BUT', 'THAT', 'IS', 'NOT', 'A', 'SUSPICIENT', 'REASON', 'FOR', 'BELIEVING', 'THAT', 'WHAT', 'WE', 'SEE', 'IS', 'THE', 'ENCHANTED', 'MOOR'] +367-293981-0009-1454: ref=['THE', 'OFFICER', 'TURNED', 'TO', 'HIM', 'AND', 'SAID', 'WELL', 'HOW', 'GOES', 'IT', 'GOOD', 'MAN'] +367-293981-0009-1454: hyp=['THE', 'OFFICERS', 'TURNED', 'ROOM', 'AND', 'SAID', 'WELL', 'HOW', 'GOES', 'A', 'GOOD', 'MAN'] +367-293981-0010-1455: ref=['SANCHO', 'GOT', 'UP', 'WITH', 'PAIN', 'ENOUGH', 'IN', 'HIS', 'BONES', 'AND', 'WENT', 'AFTER', 'THE', 'INNKEEPER', 'IN', 'THE', 'DARK', 'AND', 'MEETING', 'THE', 'OFFICER', 'WHO', 'WAS', 'LOOKING', 'TO', 'SEE', 'WHAT', 'HAD', 'BECOME', 'OF', 'HIS', 'ENEMY', 'HE', 'SAID', 'TO', 'HIM', 'SENOR', 'WHOEVER', 'YOU', 'ARE', 'DO', 'US', 'THE', 'FAVOUR', 'AND', 'KINDNESS', 'TO', 'GIVE', 'US', 'A', 'LITTLE', 'ROSEMARY', 'OIL', 'SALT', 'AND', 'WINE', 'FOR', 'IT', 'IS', 'WANTED', 'TO', 'CURE', 'ONE', 'OF', 'THE', 'BEST', 'KNIGHTS', 'ERRANT', 'ON', 'EARTH', 'WHO', 'LIES', 'ON', 'YONDER', 'BED', 'WOUNDED', 'BY', 'THE', 'HANDS', 'OF', 'THE', 'ENCHANTED', 'MOOR', 'THAT', 'IS', 'IN', 'THIS', 'INN'] +367-293981-0010-1455: hyp=['SANCHO', 'GOT', 'UP', 'WITH', 'PAIN', 'ENOUGH', 'IN', 'HIS', 'BONES', 'AND', 'WENT', 'OUT', 'TO', 'THE', 'INNKEEPER', 'IN', 'THE', 'DARK', 'IN', 'MEETING', 'THE', 'OFFICER', 'WHO', 'WAS', 'LOOKING', 'TO', 'SEE', 'WHAT', 'HAD', 'BECOME', 'OF', 'HIS', 'ENEMY', 'HE', 'SAID', 'TO', 'HIM', 'SIGNOR', 'WHOEVER', 'YOU', 'ARE', 'DO', 'US', 'THE', 'FAVOUR', 'AND', 'KINDNESS', 'TO', 'GIVE', 'US', 'A', 'LITTLE', 'ROSEMARY', 'OIL', 'SALT', 'AND', 'WHITE', 'FOR', 'IT', 'IS', 'WATER', 'TO', 'CURE', 'ONE', 'OF', 'OUR', 'BEST', 'KNIGHTS', 'ERRANT', 'ON', 'EARTH', 'WHO', 'LIES', 'ON', 'YONDER', 'BED', 'WOUNDED', 'BY', 'THE', 'HANDS', 'OF', 'THE', 'ENCHANTED', 'MOOR', 'THAT', 'IS', 'IN', 'THIS', 'INN'] +367-293981-0011-1456: ref=['TO', 'BE', 'BRIEF', 'HE', 'TOOK', 'THE', 'MATERIALS', 'OF', 'WHICH', 'HE', 'MADE', 'A', 'COMPOUND', 'MIXING', 'THEM', 'ALL', 'AND', 'BOILING', 'THEM', 'A', 'GOOD', 'WHILE', 'UNTIL', 'IT', 'SEEMED', 'TO', 'HIM', 'THEY', 'HAD', 'COME', 'TO', 'PERFECTION'] +367-293981-0011-1456: hyp=['TO', 'BE', 'BRIEF', 'HE', 'TOOK', 'THE', 'MATERIORS', 'OF', 'WHICH', 'HE', 'MADE', 'A', 'COMPOUND', 'MIXING', 'THEM', 'ALL', 'BOILING', 'THEM', 'A', 'GOOD', 'WHILE', 'IT', 'UNTIL', 'IT', 'SEEMED', 'TO', 'HIM', 'THEY', 'HAD', 'COME', 'TO', 'PERFECTION'] +367-293981-0012-1457: ref=['SANCHO', 'PANZA', 'WHO', 'ALSO', 'REGARDED', 'THE', 'AMENDMENT', 'OF', 'HIS', 'MASTER', 'AS', 'MIRACULOUS', 'BEGGED', 'HIM', 'TO', 'GIVE', 'HIM', 'WHAT', 'WAS', 'LEFT', 'IN', 'THE', 'PIGSKIN', 'WHICH', 'WAS', 'NO', 'SMALL', 'QUANTITY'] +367-293981-0012-1457: hyp=['SANCHO', 'PANZA', 'WHO', 'ALSO', 'REGARDED', 'THE', 'AMENDMENT', 'OF', 'HIS', 'MASTER', 'AS', 'MIRACULOUS', 'BEGGED', 'HIM', 'TO', 'GIVE', 'HIM', 'WHAT', 'WAS', 'LEFT', 'IN', 'THE', 'PICTION', 'WHICH', 'WAS', 'NO', 'SMALL', 'QUANTITY'] +367-293981-0013-1458: ref=['DON', 'QUIXOTE', 'CONSENTED', 'AND', 'HE', 'TAKING', 'IT', 'WITH', 'BOTH', 'HANDS', 'IN', 'GOOD', 'FAITH', 'AND', 'WITH', 'A', 'BETTER', 'WILL', 'GULPED', 'DOWN', 'AND', 'DRAINED', 'OFF', 'VERY', 'LITTLE', 'LESS', 'THAN', 'HIS', 'MASTER'] +367-293981-0013-1458: hyp=['DON', 'QUIXOTE', 'CONSENTED', 'AND', 'HE', 'TAKING', 'IT', 'WITH', 'BOTH', 'HANDS', 'IN', 'GOOD', 'FAITH', 'AND', 'WITH', 'A', 'BETTER', 'WILL', 'GULPED', 'IT', 'DOWN', 'AND', 'DRAINED', 'OFF', 'VERY', 'LITTLE', 'LESS', 'THAN', 'HIS', 'MASTER'] +367-293981-0014-1459: ref=['IF', 'YOUR', 'WORSHIP', 'KNEW', 'THAT', 'RETURNED', 'SANCHO', 'WOE', 'BETIDE', 'ME', 'AND', 'ALL', 'MY', 'KINDRED', 'WHY', 'DID', 'YOU', 'LET', 'ME', 'TASTE', 'IT'] +367-293981-0014-1459: hyp=['IF', 'YOUR', 'WORSHIP', 'KNEW', 'THAT', 'RETURNED', 'SANCHO', "WE'LL", 'BETIDE', 'ME', 'IN', 'ALL', 'MY', 'KINDRED', 'WHY', 'DID', 'YOU', 'LET', 'ME', 'TASTE', 'HIM'] +367-293981-0015-1460: ref=['SEARCH', 'YOUR', 'MEMORY', 'AND', 'IF', 'YOU', 'FIND', 'ANYTHING', 'OF', 'THIS', 'KIND', 'YOU', 'NEED', 'ONLY', 'TELL', 'ME', 'OF', 'IT', 'AND', 'I', 'PROMISE', 'YOU', 'BY', 'THE', 'ORDER', 'OF', 'KNIGHTHOOD', 'WHICH', 'I', 'HAVE', 'RECEIVED', 'TO', 'PROCURE', 'YOU', 'SATISFACTION', 'AND', 'REPARATION', 'TO', 'THE', 'UTMOST', 'OF', 'YOUR', 'DESIRE'] +367-293981-0015-1460: hyp=['SEARCH', 'YOUR', 'MEMORY', 'AND', 'IF', 'YOU', 'FIND', 'ANYTHING', 'OF', 'THIS', 'KIND', 'YOU', 'NEED', 'ONLY', 'TELL', 'ME', 'OF', 'IT', 'AND', 'I', 'PROMISE', 'YOU', 'BY', 'THE', 'ORDER', 'OF', 'KNIGHTHOOD', 'WHICH', 'I', 'HAVE', 'RECEIVED', 'TO', 'PROCURE', 'YOU', 'SATISFACTION', 'IN', 'REPARATION', 'TO', 'THE', 'UTMOST', 'OF', 'YOUR', 'DESIRE'] +367-293981-0016-1461: ref=['THEN', 'THIS', 'IS', 'AN', 'INN', 'SAID', 'DON', 'QUIXOTE'] +367-293981-0016-1461: hyp=['THEN', 'THIS', 'IS', 'AN', 'IN', 'SAID', 'DON', 'QUIXOTE'] +367-293981-0017-1462: ref=['AND', 'A', 'VERY', 'RESPECTABLE', 'ONE', 'SAID', 'THE', 'INNKEEPER'] +367-293981-0017-1462: hyp=['IN', 'A', 'VERY', 'RESPECTABLE', 'ONE', 'SAID', 'THE', 'INNKEEPER'] +367-293981-0018-1463: ref=['THE', 'CRIES', 'OF', 'THE', 'POOR', 'BLANKETED', 'WRETCH', 'WERE', 'SO', 'LOUD', 'THAT', 'THEY', 'REACHED', 'THE', 'EARS', 'OF', 'HIS', 'MASTER', 'WHO', 'HALTING', 'TO', 'LISTEN', 'ATTENTIVELY', 'WAS', 'PERSUADED', 'THAT', 'SOME', 'NEW', 'ADVENTURE', 'WAS', 'COMING', 'UNTIL', 'HE', 'CLEARLY', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'HIS', 'SQUIRE', 'WHO', 'UTTERED', 'THEM'] +367-293981-0018-1463: hyp=['THE', 'CRIES', 'OF', 'THE', 'POOR', 'BLANKET', 'WRETCH', 'WERE', 'SO', 'LOUD', 'THAT', 'THEY', 'REACHED', 'THE', 'EARS', 'OF', 'HIS', 'MASTER', 'WHO', 'HALTING', 'TO', 'LISTEN', 'ATTENTIVELY', 'WAS', 'PERSUADED', 'THAT', 'SOME', 'NEW', 'ADVENTURE', 'WAS', 'COMING', 'UNTIL', 'HE', 'CLEARLY', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'HIS', 'SQUIRE', 'WHO', 'UTTERED', 'THEM'] +367-293981-0019-1464: ref=['HE', 'SAW', 'HIM', 'RISING', 'AND', 'FALLING', 'IN', 'THE', 'AIR', 'WITH', 'SUCH', 'GRACE', 'AND', 'NIMBLENESS', 'THAT', 'HAD', 'HIS', 'RAGE', 'ALLOWED', 'HIM', 'IT', 'IS', 'MY', 'BELIEF', 'HE', 'WOULD', 'HAVE', 'LAUGHED'] +367-293981-0019-1464: hyp=['HE', 'SAW', 'HIM', 'RISING', 'AND', 'FALLING', 'IN', 'THE', 'AIR', 'WITH', 'SUCH', 'GRACE', 'AND', 'NIMBLENESS', 'THAT', 'HAD', 'HIS', 'RAGE', 'ALLOWED', 'HIM', 'IT', 'IS', 'MY', 'BELIEF', 'HE', 'WOULD', 'HAVE', 'LAUGHED'] +367-293981-0020-1465: ref=['SANCHO', 'TOOK', 'IT', 'AND', 'AS', 'HE', 'WAS', 'RAISING', 'IT', 'TO', 'HIS', 'MOUTH', 'HE', 'WAS', 'STOPPED', 'BY', 'THE', 'CRIES', 'OF', 'HIS', 'MASTER', 'EXCLAIMING', 'SANCHO', 'MY', 'SON', 'DRINK', 'NOT', 'WATER', 'DRINK', 'IT', 'NOT', 'MY', 'SON', 'FOR', 'IT', 'WILL', 'KILL', 'THEE', 'SEE', 'HERE', 'I', 'HAVE', 'THE', 'BLESSED', 'BALSAM', 'AND', 'HE', 'HELD', 'UP', 'THE', 'FLASK', 'OF', 'LIQUOR', 'AND', 'WITH', 'DRINKING', 'TWO', 'DROPS', 'OF', 'IT', 'THOU', 'WILT', 'CERTAINLY', 'BE', 'RESTORED'] +367-293981-0020-1465: hyp=['SANCHO', 'TOOK', 'IT', 'AND', 'AS', 'HE', 'WAS', 'RAISING', 'IT', 'TO', 'HIS', 'MOUTH', 'HE', 'WAS', 'STOPPED', 'BY', 'THE', 'CRIES', 'OF', 'HIS', 'MASTER', 'EXCLAIMING', 'SANCHO', 'MY', 'SON', 'DRINK', 'NOT', 'WATER', 'DRINK', 'IT', 'OUT', 'MY', 'SON', 'FOR', 'IT', 'WILL', 'KILL', 'THEE', 'SEE', 'HERE', 'I', 'HAD', 'THE', 'BLESSED', 'BALSAM', 'AND', 'HE', 'HELD', 'UP', 'THE', 'FLASK', 'OF', 'LIQUOR', 'AND', 'WITH', 'DRINKING', 'TWO', 'DROPS', 'WHAT', 'THOU', 'WILT', 'CERTAINLY', 'BE', 'RESTORED'] +3764-168670-0000-1666: ref=['THE', 'STRIDES', 'OF', 'A', 'LAME', 'MAN', 'ARE', 'LIKE', 'THE', 'OGLING', 'GLANCES', 'OF', 'A', 'ONE', 'EYED', 'MAN', 'THEY', 'DO', 'NOT', 'REACH', 'THEIR', 'GOAL', 'VERY', 'PROMPTLY'] +3764-168670-0000-1666: hyp=['THE', 'STRIDES', 'OF', 'A', 'LAME', 'MAN', 'LIKE', 'THE', 'OGLING', 'GLANCES', 'OF', 'A', 'ONE', 'EYED', 'MAN', 'THEY', 'DO', 'NOT', 'REACH', 'THEIR', 'GOAL', 'VERY', 'PROMPTLY'] +3764-168670-0001-1667: ref=['COSETTE', 'HAD', 'WAKED', 'UP'] +3764-168670-0001-1667: hyp=['COSETTE', 'HAD', 'WAKED', 'UP'] +3764-168670-0002-1668: ref=['JEAN', 'VALJEAN', 'HAD', 'PLACED', 'HER', 'NEAR', 'THE', 'FIRE'] +3764-168670-0002-1668: hyp=['JEAN', 'VALJEAN', 'HAD', 'PLACED', 'HER', 'NEAR', 'THE', 'FIRE'] +3764-168670-0003-1669: ref=['YOU', 'WILL', 'WAIT', 'FOR', 'ME', 'AT', 'A', "LADY'S", 'HOUSE', 'I', 'SHALL', 'COME', 'TO', 'FETCH', 'YOU'] +3764-168670-0003-1669: hyp=['YOU', 'WILL', 'WAIT', 'FOR', 'ME', 'AT', 'A', "LADY'S", 'HOUSE', 'I', 'SHALL', 'COME', 'TO', 'FETCH', 'YOU'] +3764-168670-0004-1670: ref=['EVERYTHING', 'IS', 'ARRANGED', 'AND', 'NOTHING', 'IS', 'SAID', 'FAUCHELEVENT'] +3764-168670-0004-1670: hyp=['EVERYTHING', 'IS', 'ARRANGED', 'AND', 'NOTHING', 'IS', 'SAID', 'FAUCHELEVENT'] +3764-168670-0005-1671: ref=['I', 'HAVE', 'PERMISSION', 'TO', 'BRING', 'YOU', 'IN', 'BUT', 'BEFORE', 'BRINGING', 'YOU', 'IN', 'YOU', 'MUST', 'BE', 'GOT', 'OUT'] +3764-168670-0005-1671: hyp=['I', 'HAVE', 'PERMISSION', 'TO', 'BRING', 'YOU', 'IN', 'BUT', 'BEFORE', 'BRINGING', 'YOU', 'IN', 'YOU', 'MUST', 'BE', 'GOT', 'OUT'] +3764-168670-0006-1672: ref=["THAT'S", 'WHERE', 'THE', 'DIFFICULTY', 'LIES'] +3764-168670-0006-1672: hyp=["THAT'S", 'WHERE', 'THE', 'DIFFICULTY', 'LIES'] +3764-168670-0007-1673: ref=['IT', 'IS', 'EASY', 'ENOUGH', 'WITH', 'THE', 'CHILD', 'YOU', 'WILL', 'CARRY', 'HER', 'OUT'] +3764-168670-0007-1673: hyp=['IT', 'IS', 'EASY', 'ENOUGH', 'WITH', 'THE', 'CHILD', 'YOU', 'WILL', 'CARRY', 'HER', 'OUT'] +3764-168670-0008-1674: ref=['AND', 'SHE', 'WILL', 'HOLD', 'HER', 'TONGUE', 'I', 'ANSWER', 'FOR', 'THAT'] +3764-168670-0008-1674: hyp=['AND', 'SHE', 'WILL', 'HOLD', 'HER', 'TONGUE', 'I', 'ANSWER', 'FOR', 'THAT'] +3764-168670-0009-1675: ref=['FAUCHELEVENT', 'GRUMBLED', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'JEAN', 'VALJEAN'] +3764-168670-0009-1675: hyp=['FOR', 'SCHLEVENT', 'GRUMBLED', 'MORE', 'TO', 'HIMSELF', 'THAN', 'TO', 'JEAN', 'VALJEAN'] +3764-168670-0010-1676: ref=['YOU', 'UNDERSTAND', 'FATHER', 'MADELEINE', 'THE', 'GOVERNMENT', 'WILL', 'NOTICE', 'IT'] +3764-168670-0010-1676: hyp=['YOU', 'UNDERSTAND', 'FATHER', 'MADELEINE', 'THE', 'GOVERNMENT', 'WILL', 'NOTICE', 'IT'] +3764-168670-0011-1677: ref=['JEAN', 'VALJEAN', 'STARED', 'HIM', 'STRAIGHT', 'IN', 'THE', 'EYE', 'AND', 'THOUGHT', 'THAT', 'HE', 'WAS', 'RAVING'] +3764-168670-0011-1677: hyp=['JEAN', 'VALJEAN', 'STARED', 'HIM', 'STRAIGHT', 'IN', 'THE', 'EYE', 'AND', 'THOUGHT', 'THAT', 'HE', 'WAS', 'RAVING'] +3764-168670-0012-1678: ref=['FAUCHELEVENT', 'WENT', 'ON'] +3764-168670-0012-1678: hyp=['FOUCHELEVENT', 'WENT', 'ON'] +3764-168670-0013-1679: ref=['IT', 'IS', 'TO', 'MORROW', 'THAT', 'I', 'AM', 'TO', 'BRING', 'YOU', 'IN', 'THE', 'PRIORESS', 'EXPECTS', 'YOU'] +3764-168670-0013-1679: hyp=['IT', 'IS', 'TO', 'MORROW', 'THAT', 'I', 'AM', 'TO', 'BRING', 'YOU', 'IN', 'THE', 'PRIORS', 'EXPECTS', 'YOU'] +3764-168670-0014-1680: ref=['THEN', 'HE', 'EXPLAINED', 'TO', 'JEAN', 'VALJEAN', 'THAT', 'THIS', 'WAS', 'HIS', 'RECOMPENSE', 'FOR', 'A', 'SERVICE', 'WHICH', 'HE', 'FAUCHELEVENT', 'WAS', 'TO', 'RENDER', 'TO', 'THE', 'COMMUNITY'] +3764-168670-0014-1680: hyp=['THEN', 'HE', 'EXPLAINED', 'TO', 'JEAN', 'VALJEAN', 'THAT', 'THIS', 'WAS', 'HIS', 'RECOMPENSE', 'FOR', 'A', 'SERVICE', 'WHICH', 'HE', 'FOUCHELEVENT', 'WAS', 'SURRENDER', 'TO', 'THE', 'COMMUNITY'] +3764-168670-0015-1681: ref=['THAT', 'THE', 'NUN', 'WHO', 'HAD', 'DIED', 'THAT', 'MORNING', 'HAD', 'REQUESTED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'COFFIN', 'WHICH', 'HAD', 'SERVED', 'HER', 'FOR', 'A', 'BED', 'AND', 'INTERRED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL'] +3764-168670-0015-1681: hyp=['THAT', 'THE', 'NUN', 'WHO', 'HAD', 'DIED', 'THAT', 'MORNING', 'HAD', 'REQUESTED', 'TO', 'BE', 'BURIED', 'IN', 'THE', 'COFFIN', 'WHICH', 'HAD', 'SERVED', 'HER', 'FOR', 'A', 'BED', 'AND', 'INTERRED', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CHAPEL'] +3764-168670-0016-1682: ref=['THAT', 'THE', 'PRIORESS', 'AND', 'THE', 'VOCAL', 'MOTHERS', 'INTENDED', 'TO', 'FULFIL', 'THE', 'WISH', 'OF', 'THE', 'DECEASED'] +3764-168670-0016-1682: hyp=['THAT', 'THE', 'PRIORS', 'AND', 'THE', 'VOCAL', 'MOTHERS', 'INTENDED', 'TO', 'FULFIL', 'THE', 'WISH', 'OF', 'THE', 'DECEASED'] +3764-168670-0017-1683: ref=['THAT', 'HE', 'FAUCHELEVENT', 'WAS', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN', 'IN', 'THE', 'CELL', 'RAISE', 'THE', 'STONE', 'IN', 'THE', 'CHAPEL', 'AND', 'LOWER', 'THE', 'CORPSE', 'INTO', 'THE', 'VAULT'] +3764-168670-0017-1683: hyp=['THAT', 'HE', 'FOR', 'SCHLEVENT', 'WAS', 'TO', 'NAIL', 'UP', 'THE', 'COFFIN', 'IN', 'THE', 'CELL', 'RAISE', 'THE', 'STONE', 'IN', 'THE', 'CHAPEL', 'AND', 'BLOW', 'THE', 'CORPSE', 'INTO', 'THE', 'VAULT'] +3764-168670-0018-1684: ref=['AND', 'THEN', 'THAT', 'THERE', 'WAS', 'ANOTHER', 'THE', 'EMPTY', 'COFFIN'] +3764-168670-0018-1684: hyp=['AND', 'THEN', 'THAT', 'THERE', 'WAS', 'ANOTHER', 'THE', 'EMPTY', 'COFFIN'] +3764-168670-0019-1685: ref=['WHAT', 'IS', 'THAT', 'EMPTY', 'COFFIN'] +3764-168670-0019-1685: hyp=['WHAT', 'IS', 'THAT', 'EMPTY', 'COFFIN'] +3764-168670-0020-1686: ref=['ASKED', 'JEAN', 'VALJEAN', 'FAUCHELEVENT', 'REPLIED'] +3764-168670-0020-1686: hyp=['ASKED', 'JEAN', 'VALJEAN', 'FAUCHELEVENT', 'REPLIED'] +3764-168670-0021-1687: ref=['WHAT', 'COFFIN', 'WHAT', 'ADMINISTRATION'] +3764-168670-0021-1687: hyp=['WHAT', 'COFFIN', 'WHAT', 'ADMINISTRATION'] +3764-168670-0022-1688: ref=['FAUCHELEVENT', 'WHO', 'WAS', 'SEATED', 'SPRANG', 'UP', 'AS', 'THOUGH', 'A', 'BOMB', 'HAD', 'BURST', 'UNDER', 'HIS', 'CHAIR', 'YOU'] +3764-168670-0022-1688: hyp=['SO', 'SLAVENT', 'WHO', 'WAS', 'SEATED', 'SPRANG', 'UP', 'AS', 'THOUGH', 'A', 'BOMB', 'HAD', 'BURST', 'UNDER', 'HIS', 'CHAIR', 'YOU'] +3764-168670-0023-1689: ref=['YOU', 'KNOW', 'FAUCHELEVENT', 'WHAT', 'YOU', 'HAVE', 'SAID', 'MOTHER', 'CRUCIFIXION', 'IS', 'DEAD'] +3764-168670-0023-1689: hyp=['YOU', 'KNOW', 'FOURCHELEVENT', 'WHAT', 'YOU', 'HAVE', 'SAID', 'MOTHER', 'CRUCIFIXION', 'IS', 'DEAD'] +3764-168670-0024-1690: ref=['AND', 'I', 'ADD', 'AND', 'FATHER', 'MADELEINE', 'IS', 'BURIED', 'AH'] +3764-168670-0024-1690: hyp=['AND', 'I', 'ADD', 'AND', 'FATHER', 'MADELEINE', 'IS', 'BURIED', 'AH'] +3764-168670-0025-1691: ref=['YOU', 'ARE', 'NOT', 'LIKE', 'OTHER', 'MEN', 'FATHER', 'MADELEINE'] +3764-168670-0025-1691: hyp=['YOU', 'ARE', 'NOT', 'LIKE', 'OTHER', 'MEN', 'FATHER', 'MADELEINE'] +3764-168670-0026-1692: ref=['THIS', 'OFFERS', 'THE', 'MEANS', 'BUT', 'GIVE', 'ME', 'SOME', 'INFORMATION', 'IN', 'THE', 'FIRST', 'PLACE'] +3764-168670-0026-1692: hyp=['THIS', 'OFFERS', 'THE', 'MEANS', 'BUT', 'GIVE', 'ME', 'SOME', 'INFORMATION', 'IN', 'THE', 'FIRST', 'PLACE'] +3764-168670-0027-1693: ref=['HOW', 'LONG', 'IS', 'THE', 'COFFIN', 'SIX', 'FEET'] +3764-168670-0027-1693: hyp=['HOW', 'LONG', 'IS', 'THE', 'COFFIN', 'SIX', 'FEET'] +3764-168670-0028-1694: ref=['IT', 'IS', 'A', 'CHAMBER', 'ON', 'THE', 'GROUND', 'FLOOR', 'WHICH', 'HAS', 'A', 'GRATED', 'WINDOW', 'OPENING', 'ON', 'THE', 'GARDEN', 'WHICH', 'IS', 'CLOSED', 'ON', 'THE', 'OUTSIDE', 'BY', 'A', 'SHUTTER', 'AND', 'TWO', 'DOORS', 'ONE', 'LEADS', 'INTO', 'THE', 'CONVENT', 'THE', 'OTHER', 'INTO', 'THE', 'CHURCH', 'WHAT', 'CHURCH'] +3764-168670-0028-1694: hyp=['IT', 'IS', 'A', 'CHAMBER', 'ON', 'THE', 'GROUND', 'FLOOR', 'WHICH', 'HAS', 'A', 'GRATED', 'WINDOW', 'OPENING', 'ON', 'THE', 'GARDEN', 'WHICH', 'IS', 'CLOSED', 'ON', 'THE', 'OUTSIDE', 'BY', 'A', 'SHUTTER', 'AND', 'TWO', 'DOORS', 'ONE', 'LEADS', 'INTO', 'THE', 'CONVENT', 'THE', 'OTHER', 'INTO', 'THE', 'CHURCH', 'A', 'WATCH'] +3764-168670-0029-1695: ref=['THE', 'CHURCH', 'IN', 'THE', 'STREET', 'THE', 'CHURCH', 'WHICH', 'ANY', 'ONE', 'CAN', 'ENTER'] +3764-168670-0029-1695: hyp=['THE', 'CHURCH', 'IN', 'THE', 'STREET', 'AT', 'THE', 'CHURCH', 'WHICH', 'ANY', 'ONE', 'CAN', 'ENTER'] +3764-168670-0030-1696: ref=['HAVE', 'YOU', 'THE', 'KEYS', 'TO', 'THOSE', 'TWO', 'DOORS'] +3764-168670-0030-1696: hyp=['HAVE', 'YOU', 'THE', 'KEYS', 'TO', 'THOSE', 'TWO', 'DOORS'] +3764-168670-0031-1697: ref=['NO', 'I', 'HAVE', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CONVENT', 'THE', 'PORTER', 'HAS', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CHURCH'] +3764-168670-0031-1697: hyp=['AND', 'NO', 'I', 'HAVE', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CONVENT', 'THE', 'PORTER', 'HAS', 'THE', 'KEY', 'TO', 'THE', 'DOOR', 'WHICH', 'COMMUNICATES', 'WITH', 'THE', 'CHURCH'] +3764-168670-0032-1698: ref=['ONLY', 'TO', 'ALLOW', 'THE', "UNDERTAKER'S", 'MEN', 'TO', 'ENTER', 'WHEN', 'THEY', 'COME', 'TO', 'GET', 'THE', 'COFFIN'] +3764-168670-0032-1698: hyp=['ONLY', 'TO', 'ALLOW', 'THE', "UNDERTAKER'S", 'MEN', 'TO', 'ENTER', 'WHEN', 'THEY', 'COME', 'TO', 'GET', 'THE', 'COFFIN'] +3764-168670-0033-1699: ref=['WHO', 'NAILS', 'UP', 'THE', 'COFFIN', 'I', 'DO'] +3764-168670-0033-1699: hyp=['WHO', 'NAILS', 'UP', 'THE', 'COFFIN', 'I', 'DO'] +3764-168670-0034-1700: ref=['WHO', 'SPREADS', 'THE', 'PALL', 'OVER', 'IT'] +3764-168670-0034-1700: hyp=['WHO', 'SPREADS', 'THE', 'POOL', 'OVER', 'IT'] +3764-168670-0035-1701: ref=['NOT', 'ANOTHER', 'MAN', 'EXCEPT', 'THE', 'POLICE', 'DOCTOR', 'CAN', 'ENTER', 'THE', 'DEAD', 'ROOM', 'THAT', 'IS', 'EVEN', 'WRITTEN', 'ON', 'THE', 'WALL'] +3764-168670-0035-1701: hyp=['NOT', 'ANOTHER', 'MAN', 'EXCEPT', 'THE', 'POLICE', 'DOCTOR', 'CAN', 'ENTER', 'THE', 'DEDUREUM', 'THAT', 'IS', 'EVEN', 'WRITTEN', 'ON', 'THE', 'WALL'] +3764-168670-0036-1702: ref=['COULD', 'YOU', 'HIDE', 'ME', 'IN', 'THAT', 'ROOM', 'TO', 'NIGHT', 'WHEN', 'EVERY', 'ONE', 'IS', 'ASLEEP'] +3764-168670-0036-1702: hyp=['COULD', 'YOU', 'HIDE', 'ME', 'IN', 'THAT', 'ROOM', 'TO', 'NIGHT', 'WHEN', 'EVERY', 'ONE', 'IS', 'ASLEEP'] +3764-168670-0037-1703: ref=['ABOUT', 'THREE', "O'CLOCK", 'IN', 'THE', 'AFTERNOON'] +3764-168670-0037-1703: hyp=['ABOUT', 'THREE', "O'CLOCK", 'IN', 'THE', 'AFTERNOON'] +3764-168670-0038-1704: ref=['I', 'SHALL', 'BE', 'HUNGRY', 'I', 'WILL', 'BRING', 'YOU', 'SOMETHING'] +3764-168670-0038-1704: hyp=['I', 'SHALL', 'BE', 'HUNGRY', 'I', 'WILL', 'BRING', 'YOU', 'SOMETHING'] +3764-168670-0039-1705: ref=['YOU', 'CAN', 'COME', 'AND', 'NAIL', 'ME', 'UP', 'IN', 'THE', 'COFFIN', 'AT', 'TWO', "O'CLOCK"] +3764-168670-0039-1705: hyp=['YOU', 'CAN', 'COME', 'AND', 'NAIL', 'ME', 'UP', 'IN', 'THE', 'COFFIN', 'AT', 'TWO', "O'CLOCK"] +3764-168670-0040-1706: ref=['FAUCHELEVENT', 'RECOILED', 'AND', 'CRACKED', 'HIS', 'FINGER', 'JOINTS', 'BUT', 'THAT', 'IS', 'IMPOSSIBLE'] +3764-168670-0040-1706: hyp=['FUCHELEVENT', 'RECOILED', 'AND', 'CRACKED', 'HIS', 'FINGER', 'JOINTS', 'BUT', 'THAT', 'IS', 'IMPOSSIBLE'] +3764-168670-0041-1707: ref=['BAH', 'IMPOSSIBLE', 'TO', 'TAKE', 'A', 'HAMMER', 'AND', 'DRIVE', 'SOME', 'NAILS', 'IN', 'A', 'PLANK'] +3764-168670-0041-1707: hyp=['BAH', 'IMPOSSIBLE', 'TO', 'TAKE', 'A', 'HAMMER', 'AND', 'DRIVE', 'SOME', 'NAILS', 'IN', 'A', 'PLANK'] +3764-168670-0042-1708: ref=['JEAN', 'VALJEAN', 'HAD', 'BEEN', 'IN', 'WORSE', 'STRAITS', 'THAN', 'THIS'] +3764-168670-0042-1708: hyp=['JEAN', 'VALJEAN', 'HAD', 'BEEN', 'IN', 'WORSE', 'STRAITS', 'THAN', 'THIS'] +3764-168670-0043-1709: ref=['ANY', 'MAN', 'WHO', 'HAS', 'BEEN', 'A', 'PRISONER', 'UNDERSTANDS', 'HOW', 'TO', 'CONTRACT', 'HIMSELF', 'TO', 'FIT', 'THE', 'DIAMETER', 'OF', 'THE', 'ESCAPE'] +3764-168670-0043-1709: hyp=['ANY', 'MAN', 'WHO', 'HAS', 'BEEN', 'A', 'PRISONER', 'UNDERSTANDS', 'HOW', 'TO', 'CONTRACT', 'HIMSELF', 'TO', 'FIT', 'THE', 'DIAMETER', 'OF', 'THE', 'ESCAPE'] +3764-168670-0044-1710: ref=['WHAT', 'DOES', 'NOT', 'A', 'MAN', 'UNDERGO', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'CURE'] +3764-168670-0044-1710: hyp=['WHAT', 'DOES', 'NOT', 'A', 'MAN', 'UNDERGO', 'FOR', 'THE', 'SAKE', 'OF', 'A', 'CURE'] +3764-168670-0045-1711: ref=['TO', 'HAVE', 'HIMSELF', 'NAILED', 'UP', 'IN', 'A', 'CASE', 'AND', 'CARRIED', 'OFF', 'LIKE', 'A', 'BALE', 'OF', 'GOODS', 'TO', 'LIVE', 'FOR', 'A', 'LONG', 'TIME', 'IN', 'A', 'BOX', 'TO', 'FIND', 'AIR', 'WHERE', 'THERE', 'IS', 'NONE', 'TO', 'ECONOMIZE', 'HIS', 'BREATH', 'FOR', 'HOURS', 'TO', 'KNOW', 'HOW', 'TO', 'STIFLE', 'WITHOUT', 'DYING', 'THIS', 'WAS', 'ONE', 'OF', 'JEAN', "VALJEAN'S", 'GLOOMY', 'TALENTS'] +3764-168670-0045-1711: hyp=['TO', 'HAVE', 'HIMSELF', 'NAILED', 'UP', 'IN', 'A', 'CASE', 'AND', 'CARRIED', 'OFF', 'LIKE', 'A', 'BAIL', 'OF', 'GOODS', 'TO', 'LIVE', 'FOR', 'A', 'LONG', 'TIME', 'IN', 'A', 'BOX', 'TO', 'FIND', 'AIR', 'WHERE', 'THERE', 'IS', 'NONE', 'TO', 'ECONOMIZE', 'HIS', 'BREATH', 'FOR', 'HOURS', 'TO', 'KNOW', 'HOW', 'TO', 'STIFLE', 'WITHOUT', 'DYING', 'THIS', 'WAS', 'ONE', 'OF', 'JEAN', "VALJEAN'S", 'GLOOMY', 'TALENTS'] +3764-168670-0046-1712: ref=['YOU', 'SURELY', 'MUST', 'HAVE', 'A', 'GIMLET', 'YOU', 'WILL', 'MAKE', 'A', 'FEW', 'HOLES', 'HERE', 'AND', 'THERE', 'AROUND', 'MY', 'MOUTH', 'AND', 'YOU', 'WILL', 'NAIL', 'THE', 'TOP', 'PLANK', 'ON', 'LOOSELY', 'GOOD', 'AND', 'WHAT', 'IF', 'YOU', 'SHOULD', 'HAPPEN', 'TO', 'COUGH', 'OR', 'TO', 'SNEEZE'] +3764-168670-0046-1712: hyp=['YOU', 'SURELY', 'MUST', 'HAVE', 'A', 'GIMLET', 'YOU', 'WILL', 'MAKE', 'A', 'FEW', 'HOLES', 'HERE', 'AND', 'THERE', 'AROUND', 'MY', 'MOUTH', 'AND', 'YOU', 'WILL', 'NAIL', 'THE', 'TOP', 'PLANK', 'ON', 'LOOSELY', 'GOOD', 'AND', 'WHAT', 'IF', 'YOU', 'SHOULD', 'HAPPEN', 'TO', 'COUGH', 'OR', 'TO', 'SNEEZE'] +3764-168670-0047-1713: ref=['A', 'MAN', 'WHO', 'IS', 'MAKING', 'HIS', 'ESCAPE', 'DOES', 'NOT', 'COUGH', 'OR', 'SNEEZE'] +3764-168670-0047-1713: hyp=['A', 'MAN', 'WHO', 'IS', 'MAKING', 'HIS', 'ESCAPE', 'DOES', 'NOT', 'COUGH', 'OR', 'SNEEZE'] +3764-168670-0048-1714: ref=['WHO', 'IS', 'THERE', 'WHO', 'HAS', 'NOT', 'SAID', 'TO', 'A', 'CAT', 'DO', 'COME', 'IN'] +3764-168670-0048-1714: hyp=['WHO', 'IS', 'THERE', 'WHO', 'HAS', 'NOT', 'SAID', 'TO', 'A', 'CAT', 'DO', 'COME', 'IN'] +3764-168670-0049-1715: ref=['THE', 'OVER', 'PRUDENT', 'CATS', 'AS', 'THEY', 'ARE', 'AND', 'BECAUSE', 'THEY', 'ARE', 'CATS', 'SOMETIMES', 'INCUR', 'MORE', 'DANGER', 'THAN', 'THE', 'AUDACIOUS'] +3764-168670-0049-1715: hyp=['THE', 'OVER', 'PRUDENT', 'COUNTS', 'AS', 'THEY', 'ARE', 'AND', 'BECAUSE', 'THEY', 'ARE', 'CATS', 'SOMETIMES', 'INCUR', 'MORE', 'DANGER', 'THAN', 'THE', 'AUDACIOUS'] +3764-168670-0050-1716: ref=['BUT', 'JEAN', "VALJEAN'S", 'COOLNESS', 'PREVAILED', 'OVER', 'HIM', 'IN', 'SPITE', 'OF', 'HIMSELF', 'HE', 'GRUMBLED'] +3764-168670-0050-1716: hyp=['BUT', 'JEAN', "VALJEAN'S", 'COOLNESS', 'PREVAILED', 'OVER', 'HIM', 'IN', 'SPITE', 'OF', 'HIMSELF', 'HE', 'GRUMBLED'] +3764-168670-0051-1717: ref=['IF', 'YOU', 'ARE', 'SURE', 'OF', 'COMING', 'OUT', 'OF', 'THE', 'COFFIN', 'ALL', 'RIGHT', 'I', 'AM', 'SURE', 'OF', 'GETTING', 'YOU', 'OUT', 'OF', 'THE', 'GRAVE'] +3764-168670-0051-1717: hyp=['IF', 'YOU', 'ARE', 'SURE', 'OF', 'COMING', 'OUT', 'OF', 'THE', 'COFFIN', 'ALL', 'RIGHT', 'I', 'AM', 'SURE', 'OF', 'GETTING', 'OUT', 'OF', 'THE', 'GRAVE'] +3764-168670-0052-1718: ref=['AN', 'OLD', 'FELLOW', 'OF', 'THE', 'OLD', 'SCHOOL', 'THE', 'GRAVE', 'DIGGER', 'PUTS', 'THE', 'CORPSES', 'IN', 'THE', 'GRAVE', 'AND', 'I', 'PUT', 'THE', 'GRAVE', 'DIGGER', 'IN', 'MY', 'POCKET'] +3764-168670-0052-1718: hyp=['AN', 'OLD', 'FELLOW', 'OF', 'THE', 'OLD', 'SCHOOL', 'THE', 'GRAVE', 'DIGGER', 'PUTS', 'THE', 'CORPSES', 'IN', 'THE', 'GRAVE', 'AND', 'I', 'PUT', 'THE', 'GRAVE', 'DIGGER', 'IN', 'MY', 'POCKET'] +3764-168670-0053-1719: ref=['I', 'SHALL', 'FOLLOW', 'THAT', 'IS', 'MY', 'BUSINESS'] +3764-168670-0053-1719: hyp=['I', 'SHALL', 'FOLLOW', 'THAT', 'IS', 'MY', 'BUSINESS'] +3764-168670-0054-1720: ref=['THE', 'HEARSE', 'HALTS', 'THE', "UNDERTAKER'S", 'MEN', 'KNOT', 'A', 'ROPE', 'AROUND', 'YOUR', 'COFFIN', 'AND', 'LOWER', 'YOU', 'DOWN'] +3764-168670-0054-1720: hyp=['THE', 'HOUSE', 'HALTS', 'THE', 'UNDERTAKERS', 'MEN', 'NOT', 'A', 'ROPE', 'AROUND', 'YOUR', 'COFFIN', 'AND', 'LOWER', 'YOU', 'DOWN'] +3764-168670-0055-1721: ref=['THE', 'PRIEST', 'SAYS', 'THE', 'PRAYERS', 'MAKES', 'THE', 'SIGN', 'OF', 'THE', 'CROSS', 'SPRINKLES', 'THE', 'HOLY', 'WATER', 'AND', 'TAKES', 'HIS', 'DEPARTURE'] +3764-168670-0055-1721: hyp=['THE', 'PRIESTS', 'AS', 'THE', 'PRAYERS', 'MAKES', 'THE', 'SIGN', 'OF', 'THE', 'CROSS', 'SPRINKLES', 'THE', 'HOLY', 'WATER', 'AND', 'TAKES', 'HIS', 'DEPARTURE'] +3764-168670-0056-1722: ref=['ONE', 'OF', 'TWO', 'THINGS', 'WILL', 'HAPPEN', 'HE', 'WILL', 'EITHER', 'BE', 'SOBER', 'OR', 'HE', 'WILL', 'NOT', 'BE', 'SOBER'] +3764-168670-0056-1722: hyp=['ONE', 'OF', 'TWO', 'THINGS', 'WILL', 'HAPPEN', 'HE', 'WILL', 'EITHER', 'BE', 'SOBER', 'OR', 'HE', 'WILL', 'NOT', 'BE', 'SOBER'] +3764-168670-0057-1723: ref=['THAT', 'IS', 'SETTLED', 'FATHER', 'FAUCHELEVENT', 'ALL', 'WILL', 'GO', 'WELL'] +3764-168670-0057-1723: hyp=['THAT', 'IS', 'SETTLED', 'FATHER', 'FAUCHELEVENT', 'ALL', 'WILL', 'GO', 'WELL'] +3764-168671-0000-1724: ref=['ON', 'THE', 'FOLLOWING', 'DAY', 'AS', 'THE', 'SUN', 'WAS', 'DECLINING', 'THE', 'VERY', 'RARE', 'PASSERS', 'BY', 'ON', 'THE', 'BOULEVARD', 'DU', 'MAINE', 'PULLED', 'OFF', 'THEIR', 'HATS', 'TO', 'AN', 'OLD', 'FASHIONED', 'HEARSE', 'ORNAMENTED', 'WITH', 'SKULLS', 'CROSS', 'BONES', 'AND', 'TEARS'] +3764-168671-0000-1724: hyp=['ON', 'THE', 'FOLLOWING', 'DAY', 'AS', 'THE', 'SUN', 'WAS', 'DECLINING', 'THE', 'VERY', 'RARE', 'PASSES', 'BY', 'ON', 'THE', 'BOULEVARD', 'DUMEN', 'PULLED', 'OFF', 'THEIR', 'HATS', 'TO', 'AN', 'OLD', 'FASHIONED', 'HEARSE', 'ORNAMENTED', 'WITH', 'SKULLS', 'CROSS', 'BONES', 'AND', 'TEARS'] +3764-168671-0001-1725: ref=['THIS', 'HEARSE', 'CONTAINED', 'A', 'COFFIN', 'COVERED', 'WITH', 'A', 'WHITE', 'CLOTH', 'OVER', 'WHICH', 'SPREAD', 'A', 'LARGE', 'BLACK', 'CROSS', 'LIKE', 'A', 'HUGE', 'CORPSE', 'WITH', 'DROOPING', 'ARMS'] +3764-168671-0001-1725: hyp=['THIS', 'HEARSE', 'CONTAINED', 'A', 'COFFIN', 'COVERED', 'WITH', 'A', 'WHITE', 'CLOTH', 'OVER', 'WHICH', 'SPREAD', 'A', 'LARGE', 'BLACK', 'CROSS', 'LIKE', 'A', 'HUGE', 'CORPSE', 'WITH', 'DROOPING', 'ARMS'] +3764-168671-0002-1726: ref=['A', 'MOURNING', 'COACH', 'IN', 'WHICH', 'COULD', 'BE', 'SEEN', 'A', 'PRIEST', 'IN', 'HIS', 'SURPLICE', 'AND', 'A', 'CHOIR', 'BOY', 'IN', 'HIS', 'RED', 'CAP', 'FOLLOWED'] +3764-168671-0002-1726: hyp=['THE', 'MORNING', 'COACH', 'IN', 'WHICH', 'COULD', 'BE', 'SEEN', 'A', 'PRIEST', 'IN', 'HIS', 'SURPLICE', 'AND', 'A', 'CHOIR', 'BOY', 'IN', 'HIS', 'RED', 'CAP', 'FOLLOWED'] +3764-168671-0003-1727: ref=['BEHIND', 'IT', 'CAME', 'AN', 'OLD', 'MAN', 'IN', 'THE', 'GARMENTS', 'OF', 'A', 'LABORER', 'WHO', 'LIMPED', 'ALONG'] +3764-168671-0003-1727: hyp=['BEHIND', 'IT', 'CAME', 'AN', 'OLD', 'MAN', 'IN', 'THE', 'GARMENTS', 'OF', 'A', 'LABORER', 'WHO', 'LIMPED', 'ALONG'] +3764-168671-0004-1728: ref=['THE', 'GRAVE', 'DIGGERS', 'BEING', 'THUS', 'BOUND', 'TO', 'SERVICE', 'IN', 'THE', 'EVENING', 'IN', 'SUMMER', 'AND', 'AT', 'NIGHT', 'IN', 'WINTER', 'IN', 'THIS', 'CEMETERY', 'THEY', 'WERE', 'SUBJECTED', 'TO', 'A', 'SPECIAL', 'DISCIPLINE'] +3764-168671-0004-1728: hyp=['THE', 'GRAVE', 'DIGGERS', 'BEING', 'THUS', 'BOUND', 'TO', 'SERVICE', 'IN', 'THE', 'EVENING', 'IN', 'SUMMER', 'AND', 'AT', 'NIGHT', 'IN', 'WINTER', 'IN', 'THIS', 'CEMETERY', 'THEY', 'WERE', 'SUBJECTED', 'TO', 'A', 'SPECIAL', 'DISCIPLINE'] +3764-168671-0005-1729: ref=['THESE', 'GATES', 'THEREFORE', 'SWUNG', 'INEXORABLY', 'ON', 'THEIR', 'HINGES', 'AT', 'THE', 'INSTANT', 'WHEN', 'THE', 'SUN', 'DISAPPEARED', 'BEHIND', 'THE', 'DOME', 'OF', 'THE', 'INVALIDES'] +3764-168671-0005-1729: hyp=['THESE', 'GATES', 'THEREFORE', 'SWUNG', 'INEXORABLY', 'ON', 'THEIR', 'HINGES', 'AT', 'THE', 'INSTANT', 'WHEN', 'THE', 'SUN', 'DISAPPEARED', 'BEHIND', 'THE', 'DOME', 'OF', 'THE', 'INVALID'] +3764-168671-0006-1730: ref=['DAMPNESS', 'WAS', 'INVADING', 'IT', 'THE', 'FLOWERS', 'WERE', 'DESERTING', 'IT'] +3764-168671-0006-1730: hyp=['DAMPNESS', 'WAS', 'INVADING', 'IT', 'THE', 'FLOWERS', 'WERE', 'DESERTING', 'IT'] +3764-168671-0007-1731: ref=['THE', 'BOURGEOIS', 'DID', 'NOT', 'CARE', 'MUCH', 'ABOUT', 'BEING', 'BURIED', 'IN', 'THE', 'VAUGIRARD', 'IT', 'HINTED', 'AT', 'POVERTY', 'PERE', 'LACHAISE', 'IF', 'YOU', 'PLEASE'] +3764-168671-0007-1731: hyp=['THE', 'BOURGEOIS', 'DID', 'NOT', 'CARE', 'MUCH', 'ABOUT', 'BEING', 'BURIED', 'IN', 'THE', 'VIGORE', 'IT', 'HINTED', 'AT', 'POVERTY', 'BAT', 'LACHES', 'IF', 'YOU', 'PLEASE'] +3764-168671-0008-1732: ref=['TO', 'BE', 'BURIED', 'IN', 'PERE', 'LACHAISE', 'IS', 'EQUIVALENT', 'TO', 'HAVING', 'FURNITURE', 'OF', 'MAHOGANY', 'IT', 'IS', 'RECOGNIZED', 'AS', 'ELEGANT'] +3764-168671-0008-1732: hyp=['TO', 'BE', 'BURIED', 'IN', 'PEGLASHES', 'IS', 'EQUIVALENT', 'TO', 'HAVING', 'FURNITURE', 'OF', 'MAHOGANY', 'IT', 'IS', 'RECOGNIZED', 'AS', 'ELEGANT'] +3764-168671-0009-1733: ref=['THE', 'INTERMENT', 'OF', 'MOTHER', 'CRUCIFIXION', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'THE', 'EXIT', 'OF', 'COSETTE', 'THE', 'INTRODUCTION', 'OF', 'JEAN', 'VALJEAN', 'TO', 'THE', 'DEAD', 'ROOM', 'ALL', 'HAD', 'BEEN', 'EXECUTED', 'WITHOUT', 'DIFFICULTY', 'AND', 'THERE', 'HAD', 'BEEN', 'NO', 'HITCH', 'LET', 'US', 'REMARK', 'IN', 'PASSING', 'THAT', 'THE', 'BURIAL', 'OF', 'MOTHER', 'CRUCIFIXION', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CONVENT', 'IS', 'A', 'PERFECTLY', 'VENIAL', 'OFFENCE', 'IN', 'OUR', 'SIGHT'] +3764-168671-0009-1733: hyp=['THE', 'INTERMENT', 'OF', 'MOTHER', 'CRUCIFIXION', 'IN', 'THE', 'VAULT', 'UNDER', 'THE', 'ALTAR', 'THE', 'EXIT', 'OF', 'COSETTE', 'THE', 'INTRODUCTION', 'OF', 'JEAN', 'VALJEAN', 'INTO', 'THE', 'DEAD', 'ROOM', 'ALL', 'HAD', 'BEEN', 'EXECUTED', 'WITHOUT', 'DIFFICULTY', 'AND', 'THERE', 'HAD', 'BEEN', 'NO', 'HITCH', 'LET', 'US', 'REMARK', 'IN', 'PASSING', 'THAT', 'THE', 'BURIAL', 'OF', 'MOTHER', 'CRUCIFIXION', 'UNDER', 'THE', 'ALTAR', 'OF', 'THE', 'CONVENT', 'IS', 'A', 'PERFECTLY', 'VENIAL', 'OFFENCE', 'IN', 'OUR', 'SIGHT'] +3764-168671-0010-1734: ref=['IT', 'IS', 'ONE', 'OF', 'THE', 'FAULTS', 'WHICH', 'RESEMBLE', 'A', 'DUTY'] +3764-168671-0010-1734: hyp=['IT', 'IS', 'ONE', 'OF', 'THE', 'FAULTS', 'WHICH', 'RESEMBLE', 'A', 'DUTY'] +3764-168671-0011-1735: ref=['THE', 'NUNS', 'HAD', 'COMMITTED', 'IT', 'NOT', 'ONLY', 'WITHOUT', 'DIFFICULTY', 'BUT', 'EVEN', 'WITH', 'THE', 'APPLAUSE', 'OF', 'THEIR', 'OWN', 'CONSCIENCES'] +3764-168671-0011-1735: hyp=['THE', 'NUNS', 'HAD', 'COMMITTED', 'IT', 'NOT', 'ONLY', 'WITHOUT', 'DIFFICULTY', 'BUT', 'EVEN', 'WITH', 'THE', 'APPLAUSE', 'OF', 'THEIR', 'OWN', 'CONSCIENCES'] +3764-168671-0012-1736: ref=['IN', 'THE', 'CLOISTER', 'WHAT', 'IS', 'CALLED', 'THE', 'GOVERNMENT', 'IS', 'ONLY', 'AN', 'INTERMEDDLING', 'WITH', 'AUTHORITY', 'AN', 'INTERFERENCE', 'WHICH', 'IS', 'ALWAYS', 'QUESTIONABLE'] +3764-168671-0012-1736: hyp=['IN', 'THE', 'CLOISTER', 'WHAT', 'IS', 'CALLED', 'THE', 'GOVERNMENT', 'IS', 'ONLY', 'AN', 'INTERMEDDLING', 'WITH', 'AUTHORITY', 'AN', 'INTERFERENCE', 'WHICH', 'IS', 'ALWAYS', 'QUESTIONABLE'] +3764-168671-0013-1737: ref=['MAKE', 'AS', 'MANY', 'LAWS', 'AS', 'YOU', 'PLEASE', 'MEN', 'BUT', 'KEEP', 'THEM', 'FOR', 'YOURSELVES'] +3764-168671-0013-1737: hyp=['MAKE', 'AS', 'MANY', 'LAWS', 'AS', 'YOU', 'PLEASE', 'MEN', 'BUT', 'KEEP', 'THEM', 'FOR', 'YOURSELVES'] +3764-168671-0014-1738: ref=['A', 'PRINCE', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'PRINCIPLE'] +3764-168671-0014-1738: hyp=['A', 'PRINCE', 'IS', 'NOTHING', 'IN', 'THE', 'PRESENCE', 'OF', 'A', 'PRINCIPLE'] +3764-168671-0015-1739: ref=['FAUCHELEVENT', 'LIMPED', 'ALONG', 'BEHIND', 'THE', 'HEARSE', 'IN', 'A', 'VERY', 'CONTENTED', 'FRAME', 'OF', 'MIND'] +3764-168671-0015-1739: hyp=['FLUCHELEVENT', 'LIMPED', 'ALONG', 'BEHIND', 'THE', 'HEARSE', 'IN', 'A', 'VERY', 'CONTENTED', 'FRAME', 'OF', 'MIND'] +3764-168671-0016-1740: ref=['JEAN', "VALJEAN'S", 'COMPOSURE', 'WAS', 'ONE', 'OF', 'THOSE', 'POWERFUL', 'TRANQUILLITIES', 'WHICH', 'ARE', 'CONTAGIOUS'] +3764-168671-0016-1740: hyp=['JEAN', "VALJEAN'S", 'COMPOSURE', 'WAS', 'ONE', 'OF', 'THOSE', 'POWERFUL', 'TRANQUILLITIES', 'WHICH', 'ARE', 'CONTAGIOUS'] +3764-168671-0017-1741: ref=['WHAT', 'REMAINED', 'TO', 'BE', 'DONE', 'WAS', 'A', 'MERE', 'NOTHING'] +3764-168671-0017-1741: hyp=['WHAT', 'REMAINED', 'TO', 'BE', 'DONE', 'WAS', 'A', 'MERE', 'NOTHING'] +3764-168671-0018-1742: ref=['HE', 'PLAYED', 'WITH', 'FATHER', 'MESTIENNE'] +3764-168671-0018-1742: hyp=['HE', 'PLAYED', 'WITH', 'FATHER', 'MESTINE'] +3764-168671-0019-1743: ref=['HE', 'DID', 'WHAT', 'HE', 'LIKED', 'WITH', 'HIM', 'HE', 'MADE', 'HIM', 'DANCE', 'ACCORDING', 'TO', 'HIS', 'WHIM'] +3764-168671-0019-1743: hyp=['HE', 'DID', 'WHAT', 'HE', 'LIKED', 'WITH', 'HIM', 'HE', 'MADE', 'HIM', 'DANCE', 'ACCORDING', 'TO', 'HIS', 'WHIM'] +3764-168671-0020-1744: ref=['THE', 'PERMISSION', 'FOR', 'INTERMENT', 'MUST', 'BE', 'EXHIBITED'] +3764-168671-0020-1744: hyp=['THE', 'PERMISSION', 'FOR', 'INTERMENT', 'MUST', 'BE', 'EXHIBITED'] +3764-168671-0021-1745: ref=['HE', 'WAS', 'A', 'SORT', 'OF', 'LABORING', 'MAN', 'WHO', 'WORE', 'A', 'WAISTCOAT', 'WITH', 'LARGE', 'POCKETS', 'AND', 'CARRIED', 'A', 'MATTOCK', 'UNDER', 'HIS', 'ARM'] +3764-168671-0021-1745: hyp=['HE', 'WAS', 'A', 'SORT', 'OF', 'LABORING', 'MAN', 'WHO', 'WORE', 'A', 'WAISTCOAT', 'WITH', 'LARGE', 'POCKETS', 'AND', 'CARRIED', 'A', 'MATTOCK', 'UNDER', 'HIS', 'ARM'] +3764-168671-0022-1746: ref=['THE', 'MAN', 'REPLIED', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0022-1746: hyp=['THE', 'MAN', 'REPLIED', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0023-1747: ref=['THE', 'GRAVE', 'DIGGER', 'YES'] +3764-168671-0023-1747: hyp=['THE', 'GRAVE', 'DIGGER', 'YES'] +3764-168671-0024-1748: ref=['YOU', 'I'] +3764-168671-0024-1748: hyp=['YOU', 'I'] +3764-168671-0025-1749: ref=['FATHER', 'MESTIENNE', 'IS', 'THE', 'GRAVE', 'DIGGER', 'HE', 'WAS'] +3764-168671-0025-1749: hyp=['FATHER', 'MISCHIENNE', 'IS', 'THE', 'GRAVE', 'DIGGER', 'HE', 'WAS'] +3764-168671-0026-1750: ref=['FAUCHELEVENT', 'HAD', 'EXPECTED', 'ANYTHING', 'BUT', 'THIS', 'THAT', 'A', 'GRAVE', 'DIGGER', 'COULD', 'DIE'] +3764-168671-0026-1750: hyp=['FUSSION', 'OF', 'WHAT', 'HAD', 'EXPECTED', 'ANYTHING', 'BUT', 'THIS', 'THAT', 'A', 'GRAVE', 'DIGGER', 'COULD', 'DIE'] +3764-168671-0027-1751: ref=['IT', 'IS', 'TRUE', 'NEVERTHELESS', 'THAT', 'GRAVE', 'DIGGERS', 'DO', 'DIE', 'THEMSELVES'] +3764-168671-0027-1751: hyp=['IT', 'IS', 'TRUE', 'NEVERTHELESS', 'THAT', 'GRAVE', 'DIGGERS', 'DO', 'DIE', 'THEMSELVES'] +3764-168671-0028-1752: ref=['HE', 'HAD', 'HARDLY', 'THE', 'STRENGTH', 'TO', 'STAMMER'] +3764-168671-0028-1752: hyp=['HE', 'HAD', 'HARDLY', 'THE', 'STRENGTH', 'TO', 'STAMMER'] +3764-168671-0029-1753: ref=['BUT', 'HE', 'PERSISTED', 'FEEBLY', 'FATHER', 'MESTIENNE', 'IS', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0029-1753: hyp=['BUT', 'HE', 'PERSISTED', 'FEEBLY', 'FATHER', 'MISSED', 'HERE', 'IS', 'THE', 'GRAVE', 'DIGGER'] +3764-168671-0030-1754: ref=['DO', 'YOU', 'KNOW', 'WHO', 'LITTLE', 'FATHER', 'LENOIR', 'IS', 'HE', 'IS', 'A', 'JUG', 'OF', 'RED', 'WINE'] +3764-168671-0030-1754: hyp=['DO', 'YOU', 'KNOW', 'WHO', 'LITTLE', 'FATHERLAND', 'WARRITZ', 'HE', 'IS', 'A', 'JUG', 'OF', 'RED', 'WINE'] +3764-168671-0031-1755: ref=['BUT', 'YOU', 'ARE', 'A', 'JOLLY', 'FELLOW', 'TOO'] +3764-168671-0031-1755: hyp=['BUT', "YOU'RE", 'A', 'JOLLY', 'FELLOW', 'TOO'] +3764-168671-0032-1756: ref=['ARE', 'YOU', 'NOT', 'COMRADE', "WE'LL", 'GO', 'AND', 'HAVE', 'A', 'DRINK', 'TOGETHER', 'PRESENTLY'] +3764-168671-0032-1756: hyp=['ARE', 'YOU', 'NOT', 'COMRADE', "WE'LL", 'GO', 'AND', 'HAVE', 'A', 'DRINK', 'TOGETHER', 'PRESENTLY'] +3764-168671-0033-1757: ref=['THE', 'MAN', 'REPLIED'] +3764-168671-0033-1757: hyp=['THE', 'MAN', 'REPLIED'] +3764-168671-0034-1758: ref=['HE', 'LIMPED', 'MORE', 'OUT', 'OF', 'ANXIETY', 'THAN', 'FROM', 'INFIRMITY'] +3764-168671-0034-1758: hyp=['HE', 'LIMPED', 'MORE', 'OUT', 'OF', 'ANXIETY', 'THAN', 'FROM', 'INFIRMITY'] +3764-168671-0035-1759: ref=['THE', 'GRAVE', 'DIGGER', 'WALKED', 'ON', 'IN', 'FRONT', 'OF', 'HIM'] +3764-168671-0035-1759: hyp=['THE', 'GRAVE', 'DIGGER', 'WALKED', 'ON', 'IN', 'FRONT', 'OF', 'HIM'] +3764-168671-0036-1760: ref=['FAUCHELEVENT', 'PASSED', 'THE', 'UNEXPECTED', 'GRIBIER', 'ONCE', 'MORE', 'IN', 'REVIEW'] +3764-168671-0036-1760: hyp=['FAUCHELEVENT', 'PASSED', 'THE', 'UNEXPECTED', 'CLAVIER', 'ONCE', 'MORE', 'IN', 'REVIEW'] +3764-168671-0037-1761: ref=['FAUCHELEVENT', 'WHO', 'WAS', 'ILLITERATE', 'BUT', 'VERY', 'SHARP', 'UNDERSTOOD', 'THAT', 'HE', 'HAD', 'TO', 'DEAL', 'WITH', 'A', 'FORMIDABLE', 'SPECIES', 'OF', 'MAN', 'WITH', 'A', 'FINE', 'TALKER', 'HE', 'MUTTERED'] +3764-168671-0037-1761: hyp=['FASHIONEVENT', 'WHO', 'WAS', 'ILLITERATE', 'BUT', 'VERY', 'SHARP', 'UNDERSTOOD', 'THAT', 'HE', 'HAD', 'TO', 'DEAL', 'WITH', 'A', 'FORMIDABLE', 'SPECIES', 'OF', 'MAN', 'WITH', 'A', 'FINE', 'TALKER', 'HE', 'MUTTERED'] +3764-168671-0038-1762: ref=['SO', 'FATHER', 'MESTIENNE', 'IS', 'DEAD'] +3764-168671-0038-1762: hyp=['MISS', 'OH', 'FATHER', 'MESTINE', 'IS', 'DEAD'] +3764-168671-0039-1763: ref=['THE', 'MAN', 'REPLIED', 'COMPLETELY'] +3764-168671-0039-1763: hyp=['THE', 'MAN', 'REPLIED', 'COMPLETELY'] +3764-168671-0040-1764: ref=['THE', 'GOOD', 'GOD', 'CONSULTED', 'HIS', 'NOTE', 'BOOK', 'WHICH', 'SHOWS', 'WHEN', 'THE', 'TIME', 'IS', 'UP', 'IT', 'WAS', 'FATHER', "MESTIENNE'S", 'TURN', 'FATHER', 'MESTIENNE', 'DIED'] +3764-168671-0040-1764: hyp=['THE', 'GOOD', 'GOD', 'CONSULTED', 'HIS', 'NOTE', 'BOOK', 'WHICH', 'SHARES', 'WHEN', 'THE', 'TIME', 'IS', 'UP', 'IT', 'WAS', 'FATHER', "MESTINE'S", 'TURN', 'FATHER', 'MISS', 'HE', 'HAD', 'DIED'] +3764-168671-0041-1765: ref=['STAMMERED', 'FAUCHELEVENT', 'IT', 'IS', 'MADE'] +3764-168671-0041-1765: hyp=['STAMMERED', 'FAUCHELEVENT', 'IT', 'IS', 'MADE'] +3764-168671-0042-1766: ref=['YOU', 'ARE', 'A', 'PEASANT', 'I', 'AM', 'A', 'PARISIAN'] +3764-168671-0042-1766: hyp=['YOU', 'ARE', 'A', 'PEASANT', 'I', 'AM', 'A', 'PARISIAN'] +3764-168671-0043-1767: ref=['FAUCHELEVENT', 'THOUGHT', 'I', 'AM', 'LOST'] +3764-168671-0043-1767: hyp=['FOR', 'CHAUVELT', 'THOUGHT', 'I', 'AM', 'LOST'] +3764-168671-0044-1768: ref=['THEY', 'WERE', 'ONLY', 'A', 'FEW', 'TURNS', 'OF', 'THE', 'WHEEL', 'DISTANT', 'FROM', 'THE', 'SMALL', 'ALLEY', 'LEADING', 'TO', 'THE', 'NUNS', 'CORNER'] +3764-168671-0044-1768: hyp=['THEY', 'WERE', 'ONLY', 'A', 'FEW', 'TURNS', 'OF', 'THE', 'WHEEL', 'DISTANT', 'FROM', 'THE', 'SMALL', 'ALLEY', 'LEADING', 'TO', 'THE', "NUN'S", 'CORNER'] +3764-168671-0045-1769: ref=['AND', 'HE', 'ADDED', 'WITH', 'THE', 'SATISFACTION', 'OF', 'A', 'SERIOUS', 'MAN', 'WHO', 'IS', 'TURNING', 'A', 'PHRASE', 'WELL'] +3764-168671-0045-1769: hyp=['AND', 'HE', 'ADDED', 'WITH', 'THE', 'SATISFACTION', 'OF', 'A', 'SERIOUS', 'MAN', 'WHO', 'IS', 'TURNING', 'A', 'PHRASE', 'WELL'] +3764-168671-0046-1770: ref=['FORTUNATELY', 'THE', 'SOIL', 'WHICH', 'WAS', 'LIGHT', 'AND', 'WET', 'WITH', 'THE', 'WINTER', 'RAINS', 'CLOGGED', 'THE', 'WHEELS', 'AND', 'RETARDED', 'ITS', 'SPEED'] +3764-168671-0046-1770: hyp=['FORTUNATELY', 'THE', 'SOIL', 'WHICH', 'WAS', 'LIGHT', 'AND', 'WET', 'WITH', 'THE', 'WINTER', 'RAINS', 'CLOGGED', 'THE', 'WHEELS', 'AND', 'RETARDED', 'ITS', 'SPEED'] +3764-168671-0047-1771: ref=['MY', 'FATHER', 'WAS', 'A', 'PORTER', 'AT', 'THE', 'PRYTANEUM', 'TOWN', 'HALL'] +3764-168671-0047-1771: hyp=['MY', 'FATHER', 'WAS', 'A', 'PORTER', 'AT', 'THE', 'BRITTANNIUM', 'TOWN', 'HALL'] +3764-168671-0048-1772: ref=['BUT', 'HE', 'HAD', 'REVERSES', 'HE', 'HAD', 'LOSSES', 'ON', 'CHANGE', 'I', 'WAS', 'OBLIGED', 'TO', 'RENOUNCE', 'THE', 'PROFESSION', 'OF', 'AUTHOR', 'BUT', 'I', 'AM', 'STILL', 'A', 'PUBLIC', 'WRITER'] +3764-168671-0048-1772: hyp=['BUT', 'HE', 'HAD', 'REVERSES', 'HE', 'HAD', 'LOSSES', 'UNCHANGED', 'I', 'WAS', 'OBLIGED', 'TO', 'RENOUNCE', 'THE', 'PROFESSION', 'OF', 'AUTHOR', 'BUT', 'I', 'AM', 'STILL', 'A', 'PUBLIC', 'WRITER'] +3764-168671-0049-1773: ref=['SO', 'YOU', 'ARE', 'NOT', 'A', 'GRAVE', 'DIGGER', 'THEN'] +3764-168671-0049-1773: hyp=['BUT', 'SIR', 'YOU', 'ARE', 'NOT', 'A', 'GRAVE', 'DIGGER', 'THEN'] +3764-168671-0050-1774: ref=['RETURNED', 'FAUCHELEVENT', 'CLUTCHING', 'AT', 'THIS', 'BRANCH', 'FEEBLE', 'AS', 'IT', 'WAS'] +3764-168671-0050-1774: hyp=['RETURNED', 'FAUCHELEVENT', 'CLUTCHING', 'AT', 'THIS', 'BRANCH', 'FEEBLE', 'AS', 'IT', 'WAS'] +3764-168671-0051-1775: ref=['HERE', 'A', 'REMARK', 'BECOMES', 'NECESSARY'] +3764-168671-0051-1775: hyp=['HERE', 'A', 'REMARK', 'BECOMES', 'NECESSARY'] +3764-168671-0052-1776: ref=['FAUCHELEVENT', 'WHATEVER', 'HIS', 'ANGUISH', 'OFFERED', 'A', 'DRINK', 'BUT', 'HE', 'DID', 'NOT', 'EXPLAIN', 'HIMSELF', 'ON', 'ONE', 'POINT', 'WHO', 'WAS', 'TO', 'PAY'] +3764-168671-0052-1776: hyp=['A', 'FAUCHELEVENT', 'WHATEVER', 'HIS', 'ANGUISH', 'OFFERED', 'A', 'DRINK', 'BUT', 'HE', 'DID', 'NOT', 'EXPLAIN', 'HIMSELF', 'ON', 'ONE', 'POINT', 'WHO', 'WAS', 'TO', 'PAY'] +3764-168671-0053-1777: ref=['THE', 'GRAVE', 'DIGGER', 'WENT', 'ON', 'WITH', 'A', 'SUPERIOR', 'SMILE'] +3764-168671-0053-1777: hyp=['THE', 'GRAVE', 'DIGGER', 'WENT', 'ON', 'WITH', 'THE', 'SUPERIOR', 'SMILE'] +3764-168671-0054-1778: ref=['ONE', 'MUST', 'EAT'] +3764-168671-0054-1778: hyp=['ONE', 'MUST', 'EAT'] +3997-180294-0000-1800: ref=['THE', 'DUKE', 'COMES', 'EVERY', 'MORNING', 'THEY', 'WILL', 'TELL', 'HIM', 'WHEN', 'HE', 'COMES', 'THAT', 'I', 'AM', 'ASLEEP', 'AND', 'PERHAPS', 'HE', 'WILL', 'WAIT', 'UNTIL', 'I', 'WAKE'] +3997-180294-0000-1800: hyp=['THE', 'DUKE', 'COMES', 'EVERY', 'MORNING', 'THEY', 'WILL', 'TELL', 'HIM', 'WHEN', 'HE', 'COMES', 'THAT', 'I', 'AM', 'ASLEEP', 'AND', 'PERHAPS', 'HE', 'WILL', 'WAIT', 'UNTIL', 'I', 'AWAKE'] +3997-180294-0001-1801: ref=['YES', 'BUT', 'IF', 'I', 'SHOULD', 'ALREADY', 'ASK', 'FOR', 'SOMETHING', 'WHAT'] +3997-180294-0001-1801: hyp=['YES', 'BUT', 'IF', 'I', 'SHOULD', 'ALREADY', 'ASK', 'FOR', 'SOMETHING', 'WHAT'] +3997-180294-0002-1802: ref=['WELL', 'DO', 'IT', 'FOR', 'ME', 'FOR', 'I', 'SWEAR', 'TO', 'YOU', 'THAT', 'I', "DON'T", 'LOVE', 'YOU', 'AS', 'THE', 'OTHERS', 'HAVE', 'LOVED', 'YOU'] +3997-180294-0002-1802: hyp=['WELL', 'DO', 'IT', 'FOR', 'ME', 'FOR', 'I', 'SWEAR', 'TO', 'YOU', 'THY', "DON'T", 'LOVE', 'YOU', 'AS', 'THE', 'OTHERS', 'HAVE', 'LOVED', 'YOU'] +3997-180294-0003-1803: ref=['THERE', 'ARE', 'BOLTS', 'ON', 'THE', 'DOOR', 'WRETCH'] +3997-180294-0003-1803: hyp=['THERE', 'ARE', 'BOLTS', 'IN', 'THE', 'DOOR', 'WRETCH'] +3997-180294-0004-1804: ref=['I', "DON'T", 'KNOW', 'HOW', 'IT', 'IS', 'BUT', 'IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'I', 'DO'] +3997-180294-0004-1804: hyp=['I', "DON'T", 'KNOW', 'HOW', 'IT', 'IS', 'BUT', 'IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'I', 'DO'] +3997-180294-0005-1805: ref=['NOW', 'GO', 'I', "CAN'T", 'KEEP', 'MY', 'EYES', 'OPEN'] +3997-180294-0005-1805: hyp=['THOU', 'GO', 'I', "CAN'T", 'KEEP', 'MY', 'EYES', 'OPEN'] +3997-180294-0006-1806: ref=['IT', 'SEEMED', 'TO', 'ME', 'AS', 'IF', 'THIS', 'SLEEPING', 'CITY', 'BELONGED', 'TO', 'ME', 'I', 'SEARCHED', 'MY', 'MEMORY', 'FOR', 'THE', 'NAMES', 'OF', 'THOSE', 'WHOSE', 'HAPPINESS', 'I', 'HAD', 'ONCE', 'ENVIED', 'AND', 'I', 'COULD', 'NOT', 'RECALL', 'ONE', 'WITHOUT', 'FINDING', 'MYSELF', 'THE', 'HAPPIER'] +3997-180294-0006-1806: hyp=['IT', 'SEEMS', 'TO', 'ME', 'AS', 'IF', 'THIS', 'SLEEPING', 'CITY', 'BELONGS', 'TO', 'ME', 'I', 'SEARCHED', 'MY', 'MEMORY', 'FOR', 'THE', 'NAMES', 'OF', 'THOSE', 'WHOSE', 'HAPPINESS', 'I', 'HAD', 'ONCE', 'ENVIED', 'AND', 'I', 'COULD', 'NOT', 'RECALL', 'ONE', 'WITHOUT', 'FINDING', 'MYSELF', 'THE', 'HAPPIER'] +3997-180294-0007-1807: ref=['EDUCATION', 'FAMILY', 'FEELING', 'THE', 'SENSE', 'OF', 'DUTY', 'THE', 'FAMILY', 'ARE', 'STRONG', 'SENTINELS', 'BUT', 'THERE', 'ARE', 'NO', 'SENTINELS', 'SO', 'VIGILANT', 'AS', 'NOT', 'TO', 'BE', 'DECEIVED', 'BY', 'A', 'GIRL', 'OF', 'SIXTEEN', 'TO', 'WHOM', 'NATURE', 'BY', 'THE', 'VOICE', 'OF', 'THE', 'MAN', 'SHE', 'LOVES', 'GIVES', 'THE', 'FIRST', 'COUNSELS', 'OF', 'LOVE', 'ALL', 'THE', 'MORE', 'ARDENT', 'BECAUSE', 'THEY', 'SEEM', 'SO', 'PURE'] +3997-180294-0007-1807: hyp=['EDUCATION', 'FAMILY', 'FEELING', 'THE', 'SENSE', 'OF', 'DUTY', 'THE', 'FAMILY', 'ARE', 'STRONG', 'SENTINELS', 'BUT', 'THERE', 'ARE', 'NO', 'SENTINELS', 'SO', 'VIGILANT', 'AS', 'NOT', 'TO', 'BE', 'DECEIVED', 'BY', 'A', 'GIRL', 'OF', 'SIXTEEN', 'TO', 'WHOM', 'NATURE', 'BY', 'THE', 'VOICE', 'OF', 'THE', 'MAN', 'SHE', 'LOVES', 'GIVES', 'THE', 'FIRST', 'COUNCIL', 'OF', 'LOVE', 'ALL', 'THE', 'MORE', 'ARDENTS', 'BECAUSE', 'THEY', 'SEEM', 'SO', 'PURE'] +3997-180294-0008-1808: ref=['THE', 'MORE', 'A', 'GIRL', 'BELIEVES', 'IN', 'GOODNESS', 'THE', 'MORE', 'EASILY', 'WILL', 'SHE', 'GIVE', 'WAY', 'IF', 'NOT', 'TO', 'HER', 'LOVER', 'AT', 'LEAST', 'TO', 'LOVE', 'FOR', 'BEING', 'WITHOUT', 'MISTRUST', 'SHE', 'IS', 'WITHOUT', 'FORCE', 'AND', 'TO', 'WIN', 'HER', 'LOVE', 'IS', 'A', 'TRIUMPH', 'THAT', 'CAN', 'BE', 'GAINED', 'BY', 'ANY', 'YOUNG', 'MAN', 'OF', 'FIVE', 'AND', 'TWENTY', 'SEE', 'HOW', 'YOUNG', 'GIRLS', 'ARE', 'WATCHED', 'AND', 'GUARDED'] +3997-180294-0008-1808: hyp=['THE', 'MORE', 'GIRL', 'BELIEVES', 'IN', 'GOODNESS', 'THE', 'MORE', 'IS', 'WE', 'WILL', 'SHE', 'GIVE', 'WAY', 'IF', 'NOT', 'TO', 'HER', 'LOVER', 'AT', 'LEAST', 'TO', 'LOVE', 'FOR', 'BEING', 'WITHOUT', 'MISTRUST', 'SHE', 'IS', 'WITHOUT', 'FORCE', 'AND', 'TO', 'WIN', 'HER', 'LOVE', 'AS', 'A', 'TRIUMPH', 'THAT', 'CAN', 'BE', 'GAINED', 'BY', 'ANY', 'YOUNG', 'MAN', 'OF', 'FIVE', 'AND', 'TWENTY', 'SEE', 'HOW', 'YOUNG', 'GIRLS', 'ARE', 'WATCHED', 'AND', 'GUARDED'] +3997-180294-0009-1809: ref=['THEN', 'HOW', 'SURELY', 'MUST', 'THEY', 'DESIRE', 'THE', 'WORLD', 'WHICH', 'IS', 'HIDDEN', 'FROM', 'THEM', 'HOW', 'SURELY', 'MUST', 'THEY', 'FIND', 'IT', 'TEMPTING', 'HOW', 'SURELY', 'MUST', 'THEY', 'LISTEN', 'TO', 'THE', 'FIRST', 'VOICE', 'WHICH', 'COMES', 'TO', 'TELL', 'ITS', 'SECRETS', 'THROUGH', 'THEIR', 'BARS', 'AND', 'BLESS', 'THE', 'HAND', 'WHICH', 'IS', 'THE', 'FIRST', 'TO', 'RAISE', 'A', 'CORNER', 'OF', 'THE', 'MYSTERIOUS', 'VEIL'] +3997-180294-0009-1809: hyp=['THEN', 'HOW', 'SURELY', 'MUST', 'THEY', 'DESIRE', 'THE', 'WORLD', 'WHICH', 'IS', 'HIDDEN', 'FROM', 'THEM', 'HAS', 'SURELY', 'MUST', 'THEY', 'FIND', 'IT', 'TEMPTING', 'HOW', 'SURELY', 'MUST', 'THEY', 'LISTEN', 'TO', 'THE', 'FIRST', 'VOICE', 'WHICH', 'COMES', 'TO', 'TELL', 'ITS', 'SECRETS', 'THROUGH', 'THEIR', 'BARS', 'AND', 'BLESS', 'THE', 'HAND', 'WHICH', 'IS', 'THE', 'FIRST', 'TO', 'RAISE', 'A', 'CORNER', 'OF', 'THE', 'MYSTERY', 'VEIL'] +3997-180294-0010-1810: ref=['WITH', 'THEM', 'THE', 'BODY', 'HAS', 'WORN', 'OUT', 'THE', 'SOUL', 'THE', 'SENSES', 'HAVE', 'BURNED', 'UP', 'THE', 'HEART', 'DISSIPATION', 'HAS', 'BLUNTED', 'THE', 'FEELINGS'] +3997-180294-0010-1810: hyp=['WITH', 'THEM', 'THE', 'BODY', 'HAS', 'WORN', 'OUT', 'THE', 'SOUL', 'THE', 'SENSES', 'HALF', 'BURNED', 'UP', 'THE', 'HEART', 'DISSIPATION', 'HAS', 'BLUNTED', 'THE', 'FEELINGS'] +3997-180294-0011-1811: ref=['THEY', 'LOVE', 'BY', 'PROFESSION', 'AND', 'NOT', 'BY', 'INSTINCT'] +3997-180294-0011-1811: hyp=['THEY', 'LOVED', 'BY', 'PROFESSION', 'AND', 'NOT', 'BY', 'INSTINCT'] +3997-180294-0012-1812: ref=['WHEN', 'A', 'CREATURE', 'WHO', 'HAS', 'ALL', 'HER', 'PAST', 'TO', 'REPROACH', 'HERSELF', 'WITH', 'IS', 'TAKEN', 'ALL', 'AT', 'ONCE', 'BY', 'A', 'PROFOUND', 'SINCERE', 'IRRESISTIBLE', 'LOVE', 'OF', 'WHICH', 'SHE', 'HAD', 'NEVER', 'FELT', 'HERSELF', 'CAPABLE', 'WHEN', 'SHE', 'HAS', 'CONFESSED', 'HER', 'LOVE', 'HOW', 'ABSOLUTELY', 'THE', 'MAN', 'WHOM', 'SHE', 'LOVES', 'DOMINATES', 'HER'] +3997-180294-0012-1812: hyp=['WHEN', 'A', 'CREATURE', 'WHO', 'HAS', 'ALL', 'HER', 'PAST', 'TO', 'REPROACH', 'HERSELF', 'WITH', 'IS', 'TAKEN', 'ALL', 'AT', 'ONCE', 'BY', 'A', 'PROFOUND', 'SINCERE', 'IRRESISTIBLE', 'LOVE', 'OF', 'WHICH', 'SHE', 'HAD', 'NEVER', 'FELT', 'HERSELF', 'CAPABLE', 'WHEN', 'SHE', 'HAS', 'CONFESSED', 'HER', 'LOVE', 'HOW', 'ABSOLUTELY', 'THE', 'MAN', 'WHOM', 'SHE', 'LOVES', 'DOMINATES', 'HER'] +3997-180294-0013-1813: ref=['THEY', 'KNOW', 'NOT', 'WHAT', 'PROOF', 'TO', 'GIVE'] +3997-180294-0013-1813: hyp=['THEY', 'KNOW', 'NOT', 'WHAT', 'PROOF', 'TO', 'GIVE'] +3997-180294-0014-1814: ref=['IN', 'ORDER', 'TO', 'DISTURB', 'THE', 'LABOURERS', 'IN', 'THE', 'FIELD', 'WAS', 'ONE', 'DAY', 'DEVOURED', 'BY', 'A', 'WOLF', 'BECAUSE', 'THOSE', 'WHOM', 'HE', 'HAD', 'SO', 'OFTEN', 'DECEIVED', 'NO', 'LONGER', 'BELIEVED', 'IN', 'HIS', 'CRIES', 'FOR', 'HELP'] +3997-180294-0014-1814: hyp=['IN', 'ORDER', 'TO', 'DISTURB', 'THE', 'LABORERS', 'IN', 'THE', 'FIELDS', 'WAS', 'ONE', 'DAY', 'DEVOURED', 'BY', 'A', 'WOLF', 'BECAUSE', 'THOSE', 'WHOM', 'HE', 'HAD', 'SO', 'OFTEN', 'DECEIVED', 'NO', 'LONGER', 'BELIEVED', 'IN', 'HIS', 'CRIES', 'FOR', 'HELP'] +3997-180294-0015-1815: ref=['IT', 'IS', 'THE', 'SAME', 'WITH', 'THESE', 'UNHAPPY', 'WOMEN', 'WHEN', 'THEY', 'LOVE', 'SERIOUSLY'] +3997-180294-0015-1815: hyp=['THIS', 'IS', 'THE', 'SAME', 'WITH', 'THESE', 'UNHAPPY', 'WOMEN', 'WHEN', 'THEY', 'LOVE', 'SERIOUSLY'] +3997-180294-0016-1816: ref=['BUT', 'WHEN', 'THE', 'MAN', 'WHO', 'INSPIRES', 'THIS', 'REDEEMING', 'LOVE', 'IS', 'GREAT', 'ENOUGH', 'IN', 'SOUL', 'TO', 'RECEIVE', 'IT', 'WITHOUT', 'REMEMBERING', 'THE', 'PAST', 'WHEN', 'HE', 'GIVES', 'HIMSELF', 'UP', 'TO', 'IT', 'WHEN', 'IN', 'SHORT', 'HE', 'LOVES', 'AS', 'HE', 'IS', 'LOVED', 'THIS', 'MAN', 'DRAINS', 'AT', 'ONE', 'DRAUGHT', 'ALL', 'EARTHLY', 'EMOTIONS', 'AND', 'AFTER', 'SUCH', 'A', 'LOVE', 'HIS', 'HEART', 'WILL', 'BE', 'CLOSED', 'TO', 'EVERY', 'OTHER'] +3997-180294-0016-1816: hyp=['BUT', 'WHEN', 'THE', 'MAN', 'WHO', 'INSPIRES', 'THIS', 'REDEEMING', 'LOVE', 'IS', 'GREAT', 'ENOUGH', 'IN', 'SOUL', 'TO', 'RECEIVE', 'IT', 'WITHOUT', 'REMEMBERING', 'THE', 'PAST', 'WHEN', 'HE', 'GIVES', 'HIMSELF', 'UP', 'TO', 'IT', 'WHEN', 'IN', 'SHORT', 'HE', 'LOVES', 'AS', 'HE', 'IS', 'LOVED', 'THIS', 'MAN', 'DREAMS', 'AT', 'ONE', 'DROUGHT', 'ALL', 'EARTHLY', 'EMOTIONS', 'AND', 'AFTER', 'SUCH', 'A', 'LOVE', 'HIS', 'HEART', 'WILL', 'BE', 'CLOSED', 'TO', 'EVERY', 'OTHER'] +3997-180294-0017-1817: ref=['BUT', 'TO', 'RETURN', 'TO', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'LIAISON'] +3997-180294-0017-1817: hyp=['BUT', 'TO', 'RETURN', 'TO', 'THE', 'FIRST', 'DAY', 'OF', 'MY', 'LEAR', 'SONG'] +3997-180294-0018-1818: ref=['WHEN', 'I', 'REACHED', 'HOME', 'I', 'WAS', 'IN', 'A', 'STATE', 'OF', 'MAD', 'GAIETY'] +3997-180294-0018-1818: hyp=['WHEN', 'I', 'REACHED', 'HOME', 'I', 'WAS', 'IN', 'A', 'STATE', 'OF', 'MAD', 'GAIETY'] +3997-180294-0019-1819: ref=['THE', 'WOMAN', 'BECOMES', 'THE', "MAN'S", 'MISTRESS', 'AND', 'LOVES', 'HIM'] +3997-180294-0019-1819: hyp=['THE', 'WOMAN', 'BECOMES', 'THE', "MAN'S", 'MISTRESS', 'AND', 'LOVES', 'HIM'] +3997-180294-0020-1820: ref=['HOW', 'WHY'] +3997-180294-0020-1820: hyp=['HOW', 'WHY'] +3997-180294-0021-1821: ref=['MY', 'WHOLE', 'BEING', 'WAS', 'EXALTED', 'INTO', 'JOY', 'AT', 'THE', 'MEMORY', 'OF', 'THE', 'WORDS', 'WE', 'HAD', 'EXCHANGED', 'DURING', 'THAT', 'FIRST', 'NIGHT'] +3997-180294-0021-1821: hyp=['MY', 'WHOLE', 'BEING', 'WAS', 'EXALTED', 'INTO', 'JOY', 'AT', 'THE', 'MEMORY', 'OF', 'THE', 'WORDS', 'WE', 'HAD', 'EXCHANGED', 'DURING', 'THAT', 'FIRST', 'NIGHT'] +3997-180294-0022-1822: ref=['HERE', 'ARE', 'MY', 'ORDERS', 'TO', 'NIGHT', 'AT', 'THE', 'VAUDEVILLE'] +3997-180294-0022-1822: hyp=['HERE', 'ARE', 'MY', 'ORDERS', 'TO', 'NIGHT', 'AT', 'A', 'VAUDEVILLE'] +3997-180294-0023-1823: ref=['COME', 'DURING', 'THE', 'THIRD', "ENTR'ACTE"] +3997-180294-0023-1823: hyp=['CALM', 'DURING', 'THE', 'THIRD', 'AND', 'TRACT'] +3997-180294-0024-1824: ref=['THE', 'BOXES', 'FILLED', 'ONE', 'AFTER', 'ANOTHER'] +3997-180294-0024-1824: hyp=['THE', 'BOXES', 'FILLED', 'ONE', 'AFTER', 'ANOTHER'] +3997-180294-0025-1825: ref=['ONLY', 'ONE', 'REMAINED', 'EMPTY', 'THE', 'STAGE', 'BOX'] +3997-180294-0025-1825: hyp=['ONLY', 'ONE', 'REMAINS', 'EMPTY', 'THE', 'STAGE', 'BOX'] +3997-180294-0026-1826: ref=['AT', 'THE', 'BEGINNING', 'OF', 'THE', 'THIRD', 'ACT', 'I', 'HEARD', 'THE', 'DOOR', 'OF', 'THE', 'BOX', 'ON', 'WHICH', 'MY', 'EYES', 'HAD', 'BEEN', 'ALMOST', 'CONSTANTLY', 'FIXED', 'OPEN', 'AND', 'MARGUERITE', 'APPEARED'] +3997-180294-0026-1826: hyp=['AT', 'THE', 'BEGINNING', 'OF', 'THE', 'THIRD', 'ACT', 'I', 'HEARD', 'THE', 'DOOR', 'OF', 'THE', 'BOX', 'ON', 'WHICH', 'MY', 'EYES', 'HAD', 'BEEN', 'ALMOST', 'CONSTANTLY', 'FIXED', 'OPEN', 'AND', 'MARGUERITE', 'APPEARED'] +3997-180294-0027-1827: ref=['DID', 'SHE', 'LOVE', 'ME', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'THE', 'MORE', 'BEAUTIFUL', 'SHE', 'LOOKED', 'THE', 'HAPPIER', 'I', 'SHOULD', 'BE'] +3997-180294-0027-1827: hyp=['THAT', 'SHE', 'LOVED', 'ME', 'ENOUGH', 'TO', 'BELIEVE', 'THAT', 'THE', 'MORE', 'BEAUTIFUL', 'SHE', 'LOOKS', 'THE', 'HAPPIER', 'I', 'SHOULD', 'BE'] +3997-180294-0028-1828: ref=['WHAT', 'IS', 'THE', 'MATTER', 'WITH', 'YOU', 'TO', 'NIGHT', 'SAID', 'MARGUERITE', 'RISING', 'AND', 'COMING', 'TO', 'THE', 'BACK', 'OF', 'THE', 'BOX', 'AND', 'KISSING', 'ME', 'ON', 'THE', 'FOREHEAD'] +3997-180294-0028-1828: hyp=['WHAT', 'IS', 'THE', 'MATTER', 'WITH', 'YOU', 'TO', 'NIGHT', 'SAID', 'MARGUERITE', 'RISING', 'AND', 'COMING', 'TO', 'THE', 'BACK', 'OF', 'THE', 'BOX', 'AND', 'KISSING', 'ME', 'ON', 'THE', 'FOREHEAD'] +3997-180294-0029-1829: ref=['YOU', 'SHOULD', 'GO', 'TO', 'BED', 'SHE', 'REPLIED', 'WITH', 'THAT', 'IRONICAL', 'AIR', 'WHICH', 'WENT', 'SO', 'WELL', 'WITH', 'HER', 'DELICATE', 'AND', 'WITTY', 'FACE'] +3997-180294-0029-1829: hyp=['YOU', 'SHOULD', 'GO', 'TO', 'BED', 'SHE', 'REPLIED', 'WITH', 'THAT', 'IRONIC', 'AIR', 'WHICH', 'WENT', 'SO', 'WELL', 'WITH', 'HER', 'DELICATE', 'AND', 'WITTY', 'FACE'] +3997-180294-0030-1830: ref=['WHERE', 'AT', 'HOME'] +3997-180294-0030-1830: hyp=['WHERE', 'AT', 'HOME'] +3997-180294-0031-1831: ref=['YOU', 'STILL', 'LOVE', 'ME', 'CAN', 'YOU', 'ASK'] +3997-180294-0031-1831: hyp=['YOU', 'STILL', 'LOVE', 'ME', 'CAN', 'YOU', 'ASK'] +3997-180294-0032-1832: ref=['BECAUSE', 'YOU', "DON'T", 'LIKE', 'SEEING', 'HIM'] +3997-180294-0032-1832: hyp=['BECAUSE', 'YOU', "DON'T", 'LIKE', 'SEEING', 'HIM'] +3997-180294-0033-1833: ref=['NONETHELESS', 'I', 'WAS', 'VERY', 'UNHAPPY', 'ALL', 'THE', 'REST', 'OF', 'THE', 'EVENING', 'AND', 'WENT', 'AWAY', 'VERY', 'SADLY', 'AFTER', 'HAVING', 'SEEN', 'PRUDENCE', 'THE', 'COUNT', 'AND', 'MARGUERITE', 'GET', 'INTO', 'THE', 'CARRIAGE', 'WHICH', 'WAS', 'WAITING', 'FOR', 'THEM', 'AT', 'THE', 'DOOR'] +3997-180294-0033-1833: hyp=['NONE', 'THE', 'LESS', 'I', 'WAS', 'VERY', 'UNHAPPY', 'ALL', 'THE', 'REST', 'OF', 'THE', 'EVENING', 'AND', 'WENT', 'AWAY', 'VERY', 'SADLY', 'AFTER', 'HAVING', 'SEEN', 'PRUDENCE', 'THE', 'COUNT', 'AND', 'MARGUERITE', 'GAINED', 'TO', 'THE', 'CARRIAGE', 'WHICH', 'WAS', 'WINNING', 'FOR', 'THEM', 'AT', 'THE', 'DOOR'] +3997-180297-0000-1834: ref=['I', 'HAVE', 'NOT', 'COME', 'TO', 'HINDER', 'YOU', 'FROM', 'LEAVING', 'PARIS'] +3997-180297-0000-1834: hyp=['I', 'HAVE', 'NOT', 'COME', 'TO', 'HINDER', 'YOU', 'FROM', 'LEAVING', 'PARIS'] +3997-180297-0001-1835: ref=['YOU', 'IN', 'THE', 'WAY', 'MARGUERITE', 'BUT', 'HOW'] +3997-180297-0001-1835: hyp=['YOU', 'IN', 'THE', 'WAY', 'MARGUERITE', 'BUT', 'HOW'] +3997-180297-0002-1836: ref=['WELL', 'YOU', 'MIGHT', 'HAVE', 'HAD', 'A', 'WOMAN', 'HERE', 'SAID', 'PRUDENCE', 'AND', 'IT', 'WOULD', 'HARDLY', 'HAVE', 'BEEN', 'AMUSING', 'FOR', 'HER', 'TO', 'SEE', 'TWO', 'MORE', 'ARRIVE'] +3997-180297-0002-1836: hyp=['WELL', 'YOU', 'MIGHT', 'HAVE', 'HAD', 'A', 'WOMAN', 'HERE', 'SAID', 'PRUDENCE', 'AND', 'IT', 'WOULD', 'HARDLY', 'HAVE', 'BEEN', 'AMUSING', 'FOR', 'HER', 'TO', 'SEE', 'TWO', 'MORE', 'ARRIVE'] +3997-180297-0003-1837: ref=['DURING', 'THIS', 'REMARK', 'MARGUERITE', 'LOOKED', 'AT', 'ME', 'ATTENTIVELY'] +3997-180297-0003-1837: hyp=['DURING', 'THIS', 'REMARK', 'MARGUERITE', 'LOOKED', 'AT', 'ME', 'ATTENTIVELY'] +3997-180297-0004-1838: ref=['MY', 'DEAR', 'PRUDENCE', 'I', 'ANSWERED', 'YOU', 'DO', 'NOT', 'KNOW', 'WHAT', 'YOU', 'ARE', 'SAYING'] +3997-180297-0004-1838: hyp=['MY', 'DEAR', 'PRUDENCE', 'I', 'ANSWERED', 'YOU', 'DO', 'NOT', 'KNOW', 'WHAT', 'YOU', 'ARE', 'SAYING'] +3997-180297-0005-1839: ref=['YES', 'BUT', 'BESIDES', 'NOT', 'WISHING', 'TO', 'PUT', 'YOU', 'OUT', 'I', 'WAS', 'SURE', 'THAT', 'IF', 'YOU', 'CAME', 'AS', 'FAR', 'AS', 'MY', 'DOOR', 'YOU', 'WOULD', 'WANT', 'TO', 'COME', 'UP', 'AND', 'AS', 'I', 'COULD', 'NOT', 'LET', 'YOU', 'I', 'DID', 'NOT', 'WISH', 'TO', 'LET', 'YOU', 'GO', 'AWAY', 'BLAMING', 'ME', 'FOR', 'SAYING', 'NO'] +3997-180297-0005-1839: hyp=['YES', 'BUT', 'BESIDES', 'NOT', 'WISHING', 'TO', 'PUT', 'YOU', 'OUT', 'I', 'WAS', 'SURE', 'THAT', 'IF', 'YOU', 'CAME', 'AS', 'FAR', 'AS', 'MY', 'DOOR', 'YOU', 'WOULD', 'WANT', 'TO', 'COME', 'UP', 'AND', 'AS', 'I', 'COULD', 'NOT', 'LET', 'YOU', 'I', 'DID', 'NOT', 'WISH', 'TO', 'LET', 'YOU', 'GO', 'AWAY', 'BLAMING', 'ME', 'FOR', 'SAYING', 'NO'] +3997-180297-0006-1840: ref=['BECAUSE', 'I', 'AM', 'WATCHED', 'AND', 'THE', 'LEAST', 'SUSPICION', 'MIGHT', 'DO', 'ME', 'THE', 'GREATEST', 'HARM'] +3997-180297-0006-1840: hyp=['BECAUSE', 'I', 'AM', 'WATCHED', 'AND', 'THE', 'LEAST', 'SUSPICION', 'MIGHT', 'TO', 'ME', 'THE', 'GREATEST', 'HARM'] +3997-180297-0007-1841: ref=['IS', 'THAT', 'REALLY', 'THE', 'ONLY', 'REASON'] +3997-180297-0007-1841: hyp=['IS', 'THAT', 'REALLY', 'THE', 'ONLY', 'REASON'] +3997-180297-0008-1842: ref=['IF', 'THERE', 'WERE', 'ANY', 'OTHER', 'I', 'WOULD', 'TELL', 'YOU', 'FOR', 'WE', 'ARE', 'NOT', 'TO', 'HAVE', 'ANY', 'SECRETS', 'FROM', 'ONE', 'ANOTHER', 'NOW'] +3997-180297-0008-1842: hyp=['IF', 'THERE', 'WERE', 'ANY', 'OTHER', 'I', 'WOULD', 'TELL', 'YOU', 'FOR', 'WE', 'ARE', 'NOT', 'TO', 'HAVE', 'ANY', 'SECRETS', 'FROM', 'ONE', 'ANOTHER', 'NOW'] +3997-180297-0009-1843: ref=['HONESTLY', 'DO', 'YOU', 'CARE', 'FOR', 'ME', 'A', 'LITTLE', 'A', 'GREAT', 'DEAL'] +3997-180297-0009-1843: hyp=['ON', 'THE', 'STREET', 'DO', 'YOU', 'CARE', 'FOR', 'ME', 'A', 'LITTLE', 'A', 'GREAT', 'DEAL'] +3997-180297-0010-1844: ref=['I', 'FANCIED', 'FOR', 'A', 'MOMENT', 'THAT', 'I', 'MIGHT', 'GIVE', 'MYSELF', 'THAT', 'HAPPINESS', 'FOR', 'SIX', 'MONTHS', 'YOU', 'WOULD', 'NOT', 'HAVE', 'IT', 'YOU', 'INSISTED', 'ON', 'KNOWING', 'THE', 'MEANS'] +3997-180297-0010-1844: hyp=['I', 'FANCIED', 'FOR', 'A', 'MOMENT', 'THAT', 'IT', 'MIGHT', 'GIVE', 'MYSELF', 'THAT', 'HAPPINESS', 'FOR', 'SIX', 'MONTHS', 'YOU', 'WILL', 'NOT', 'HAVE', 'IT', 'YOU', 'INSISTED', 'ON', 'KNOWING', 'THE', 'MEANS'] +3997-180297-0011-1845: ref=['WELL', 'GOOD', 'HEAVENS', 'THE', 'MEANS', 'WERE', 'EASY', 'ENOUGH', 'TO', 'GUESS'] +3997-180297-0011-1845: hyp=['WELL', 'GOOD', 'HEAVENS', 'THE', 'MEANS', 'WERE', 'EASY', 'ENOUGH', 'TO', 'GUESS'] +3997-180297-0012-1846: ref=['I', 'LISTENED', 'AND', 'I', 'GAZED', 'AT', 'MARGUERITE', 'WITH', 'ADMIRATION'] +3997-180297-0012-1846: hyp=['I', 'LISTENED', 'AND', 'I', 'GAZED', 'AT', 'MARGUERITE', 'WITH', 'ADMIRATION'] +3997-180297-0013-1847: ref=['WHEN', 'I', 'THOUGHT', 'THAT', 'THIS', 'MARVELLOUS', 'CREATURE', 'WHOSE', 'FEET', 'I', 'HAD', 'ONCE', 'LONGED', 'TO', 'KISS', 'WAS', 'WILLING', 'TO', 'LET', 'ME', 'TAKE', 'MY', 'PLACE', 'IN', 'HER', 'THOUGHTS', 'MY', 'PART', 'IN', 'HER', 'LIFE', 'AND', 'THAT', 'I', 'WAS', 'NOT', 'YET', 'CONTENT', 'WITH', 'WHAT', 'SHE', 'GAVE', 'ME', 'I', 'ASKED', 'IF', "MAN'S", 'DESIRE', 'HAS', 'INDEED', 'LIMITS', 'WHEN', 'SATISFIED', 'AS', 'PROMPTLY', 'AS', 'MINE', 'HAD', 'BEEN', 'IT', 'REACHED', 'AFTER', 'SOMETHING', 'FURTHER'] +3997-180297-0013-1847: hyp=['WHEN', 'THEY', 'THOUGHT', 'THAT', 'THIS', 'MARVELLOUS', 'CREATURE', 'WHOSE', 'FEET', 'I', 'HAD', 'ONCE', 'LONGED', 'TO', 'KISS', 'WAS', 'WILLING', 'TO', 'LET', 'ME', 'TAKE', 'MY', 'PLACE', 'IN', 'HER', 'THOUGHTS', 'BY', 'PARTS', 'IN', 'HER', 'LIFE', 'AND', 'THAT', 'I', 'WAS', 'NOT', 'YET', 'CONTENT', 'WITH', 'WHAT', 'SHE', 'GAVE', 'ME', 'I', 'ASKED', 'IF', "MEN'S", 'DESIRE', 'HAD', 'INDEED', 'LIMITS', 'WHEN', 'SATISFIED', 'AS', 'PROMPTLY', 'AS', 'MINE', 'HAD', 'BEEN', 'IT', 'REACHED', 'AFTER', 'SOMETHING', 'FURTHER'] +3997-180297-0014-1848: ref=['TRULY', 'SHE', 'CONTINUED', 'WE', 'POOR', 'CREATURES', 'OF', 'CHANCE', 'HAVE', 'FANTASTIC', 'DESIRES', 'AND', 'INCONCEIVABLE', 'LOVES'] +3997-180297-0014-1848: hyp=['TRULY', 'SHE', 'CONTINUED', 'WE', 'POOR', 'CREATURES', 'OF', 'CHANCE', 'HAVE', 'FANTASTIC', 'DESIRE', 'AND', 'INCONCEIVABLE', 'LOVES'] +3997-180297-0015-1849: ref=['WE', 'ARE', 'NOT', 'ALLOWED', 'TO', 'HAVE', 'HEARTS', 'UNDER', 'PENALTY', 'OF', 'BEING', 'HOOTED', 'DOWN', 'AND', 'OF', 'RUINING', 'OUR', 'CREDIT'] +3997-180297-0015-1849: hyp=['WE', 'ARE', 'NOT', 'ALLOWED', 'TO', 'HAVE', 'HEARTS', 'UNDER', 'PENALTY', 'OF', 'BEING', 'HOOTED', 'DOWN', 'AND', 'OF', 'RUINING', 'OUR', 'CREDIT'] +3997-180297-0016-1850: ref=['WE', 'NO', 'LONGER', 'BELONG', 'TO', 'OURSELVES'] +3997-180297-0016-1850: hyp=['WE', 'NO', 'LONGER', 'BELONG', 'TO', 'OURSELVES'] +3997-180297-0017-1851: ref=['WE', 'STAND', 'FIRST', 'IN', 'THEIR', 'SELF', 'ESTEEM', 'LAST', 'IN', 'THEIR', 'ESTEEM'] +3997-180297-0017-1851: hyp=['WE', 'STAND', 'FIRST', 'IN', 'THEIR', 'SELF', 'ESTEEM', 'LAST', 'IN', 'THEIR', 'ESTEEM'] +3997-180297-0018-1852: ref=['NEVER', 'DO', 'THEY', 'GIVE', 'YOU', 'ADVICE', 'WHICH', 'IS', 'NOT', 'LUCRATIVE'] +3997-180297-0018-1852: hyp=['NEVER', 'DID', 'HE', 'GIVE', 'YOU', 'ADVICE', 'WHICH', 'IS', 'NOT', 'LOOK', 'ATTENTIVE'] +3997-180297-0019-1853: ref=['IT', 'MEANS', 'LITTLE', 'ENOUGH', 'TO', 'THEM', 'THAT', 'WE', 'SHOULD', 'HAVE', 'TEN', 'LOVERS', 'EXTRA', 'AS', 'LONG', 'AS', 'THEY', 'GET', 'DRESSES', 'OR', 'A', 'BRACELET', 'OUT', 'OF', 'THEM', 'AND', 'THAT', 'THEY', 'CAN', 'DRIVE', 'IN', 'OUR', 'CARRIAGE', 'FROM', 'TIME', 'TO', 'TIME', 'OR', 'COME', 'TO', 'OUR', 'BOX', 'AT', 'THE', 'THEATRE'] +3997-180297-0019-1853: hyp=['IT', 'MEANS', 'LITTLE', 'ENOUGH', 'TO', 'THEM', 'THAT', 'WE', 'SHOULD', 'HAVE', 'TEN', 'LOVERS', 'EXTRA', 'AS', 'LONG', 'AS', 'THEY', 'GET', 'DRESSES', 'OR', 'A', 'BRACELET', 'OUT', 'OF', 'THEM', 'AND', 'THAT', 'THEY', 'CAN', 'DRIVE', 'AND', 'ARE', 'CARRIAGE', 'FROM', 'TIME', 'TO', 'TIME', 'OR', 'COME', 'TO', 'OUR', 'BOX', 'AT', 'THE', 'FUTURE'] +3997-180297-0020-1854: ref=['SUCH', 'A', 'MAN', 'I', 'FOUND', 'IN', 'THE', 'DUKE', 'BUT', 'THE', 'DUKE', 'IS', 'OLD', 'AND', 'OLD', 'AGE', 'NEITHER', 'PROTECTS', 'NOR', 'CONSOLES'] +3997-180297-0020-1854: hyp=['SUCH', 'A', 'MAN', 'I', 'FOUND', 'IN', 'THE', 'DUKE', 'BUT', 'THE', 'DUKE', 'IS', 'OLD', 'AND', 'THE', 'OLD', 'AGE', 'NEITHER', 'PROTECTS', 'NOR', 'CONSOLES'] +3997-180297-0021-1855: ref=['I', 'THOUGHT', 'I', 'COULD', 'ACCEPT', 'THE', 'LIFE', 'WHICH', 'HE', 'OFFERED', 'ME', 'BUT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +3997-180297-0021-1855: hyp=['I', 'THOUGHT', 'I', 'COULD', 'ACCEPT', 'THE', 'LIFE', 'WHICH', 'HE', 'OFFERED', 'ME', 'OR', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +3997-180297-0022-1856: ref=['WHAT', 'I', 'LOVED', 'IN', 'YOU', 'WAS', 'NOT', 'THE', 'MAN', 'WHO', 'WAS', 'BUT', 'THE', 'MAN', 'WHO', 'WAS', 'GOING', 'TO', 'BE'] +3997-180297-0022-1856: hyp=['WHAT', 'I', 'LOVED', 'IN', 'YOU', 'WAS', 'NOT', 'THE', 'MAN', 'WHO', 'WAS', 'BUT', 'THE', 'MAN', 'WHO', 'WAS', 'GOING', 'TO', 'BE'] +3997-180297-0023-1857: ref=['MARGUERITE', 'TIRED', 'OUT', 'WITH', 'THIS', 'LONG', 'CONFESSION', 'THREW', 'HERSELF', 'BACK', 'ON', 'THE', 'SOFA', 'AND', 'TO', 'STIFLE', 'A', 'SLIGHT', 'COUGH', 'PUT', 'UP', 'HER', 'HANDKERCHIEF', 'TO', 'HER', 'LIPS', 'AND', 'FROM', 'THAT', 'TO', 'HER', 'EYES'] +3997-180297-0023-1857: hyp=['MARGUERITE', 'TIRED', 'OUT', 'WITH', 'THIS', 'LONG', 'CONFESSION', 'THREW', 'HERSELF', 'BACK', 'ON', 'THE', 'SOFA', 'AND', 'TO', 'STIFLE', 'A', 'SLIGHT', 'COUGH', 'PUT', 'UP', 'HER', 'HANDKERCHIEF', 'TO', 'HER', 'LIPS', 'AND', 'FROM', 'THAT', 'TO', 'HER', 'EYES'] +3997-180297-0024-1858: ref=['MARGUERITE', 'DO', 'WITH', 'ME', 'AS', 'YOU', 'WILL', 'I', 'AM', 'YOUR', 'SLAVE', 'YOUR', 'DOG', 'BUT', 'IN', 'THE', 'NAME', 'OF', 'HEAVEN', 'TEAR', 'UP', 'THE', 'LETTER', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'AND', 'DO', 'NOT', 'MAKE', 'ME', 'LEAVE', 'YOU', 'TO', 'MORROW', 'IT', 'WOULD', 'KILL', 'ME'] +3997-180297-0024-1858: hyp=['MARGUERITE', 'DO', 'WITH', 'ME', 'AS', 'YOU', 'WILL', 'I', 'AM', 'YOUR', 'SLAVE', 'YOUR', 'DOG', 'BUT', 'IN', 'THE', 'NAME', 'OF', 'HEAVEN', 'TEAR', 'UP', 'THE', 'LETTER', 'WHICH', 'I', 'WROTE', 'TO', 'YOU', 'AND', 'DO', 'NOT', 'MAKE', 'ME', 'LEAVE', 'YOU', 'TO', 'MORROW', 'IT', 'WOULD', 'KILL', 'ME'] +3997-180297-0025-1859: ref=['MARGUERITE', 'DREW', 'THE', 'LETTER', 'FROM', 'HER', 'BOSOM', 'AND', 'HANDING', 'IT', 'TO', 'ME', 'WITH', 'A', 'SMILE', 'OF', 'INFINITE', 'SWEETNESS', 'SAID'] +3997-180297-0025-1859: hyp=['MARGUERITE', 'DREW', 'THE', 'LETTER', 'FROM', 'HER', 'BOSOM', 'AND', 'HANDING', 'IT', 'TO', 'ME', 'WITH', 'A', 'SMILE', 'OF', 'INFINITE', 'SWEETNESS', 'SAID'] +3997-180297-0026-1860: ref=['HERE', 'IT', 'IS', 'I', 'HAVE', 'BROUGHT', 'IT', 'BACK'] +3997-180297-0026-1860: hyp=['HERE', 'IT', 'IS', 'I', 'HAVE', 'BROUGHT', 'IT', 'BACK'] +3997-180297-0027-1861: ref=['I', 'TORE', 'THE', 'LETTER', 'INTO', 'FRAGMENTS', 'AND', 'KISSED', 'WITH', 'TEARS', 'THE', 'HAND', 'THAT', 'GAVE', 'IT', 'TO', 'ME'] +3997-180297-0027-1861: hyp=['I', 'TORE', 'THE', 'LETTER', 'INTO', 'FRAGMENTS', 'AND', 'KISSED', 'IT', 'WITH', 'TEARS', 'THE', 'HAND', 'THAT', 'I', 'GAVE', 'IT', 'TO', 'ME'] +3997-180297-0028-1862: ref=['LOOK', 'HERE', 'PRUDENCE', 'DO', 'YOU', 'KNOW', 'WHAT', 'HE', 'WANTS', 'SAID', 'MARGUERITE'] +3997-180297-0028-1862: hyp=['LOOK', 'HERE', 'PRUDENCE', 'DO', 'YOU', 'KNOW', 'WHAT', 'HE', 'WANTS', 'SAID', 'MARGUERITE'] +3997-180297-0029-1863: ref=['HE', 'WANTS', 'YOU', 'TO', 'FORGIVE', 'HIM'] +3997-180297-0029-1863: hyp=['HE', 'WANTS', 'YOU', 'TO', 'FORGIVE', 'HIM'] +3997-180297-0030-1864: ref=['ONE', 'HAS', 'TO', 'BUT', 'HE', 'WANTS', 'MORE', 'THAN', 'THAT', 'WHAT', 'THEN'] +3997-180297-0030-1864: hyp=['ONE', 'HAS', 'TWO', 'BUT', 'HE', 'ONCE', 'MORE', 'THAN', 'THAT', 'WHAT', 'THEN'] +3997-180297-0031-1865: ref=['I', 'EMBRACED', 'MARGUERITE', 'UNTIL', 'SHE', 'WAS', 'ALMOST', 'STIFLED'] +3997-180297-0031-1865: hyp=['I', 'EMBRACED', 'MARGUERITE', 'UNTIL', 'SHE', 'WAS', 'ALMOST', 'STIFLED'] +3997-182399-0000-1779: ref=['OL', 'MISTAH', 'BUZZARD', 'GRINNED'] +3997-182399-0000-1779: hyp=['ALL', 'MISTER', 'BUZZARD', 'GRINNED'] +3997-182399-0001-1780: ref=['THIS', 'SOUNDED', 'LIKE', 'ANOTHER', 'STORY'] +3997-182399-0001-1780: hyp=['THIS', 'SOUNDED', 'LIKE', 'ANOTHER', 'STORY'] +3997-182399-0002-1781: ref=['HE', 'WAS', 'CURIOUS', 'ABOUT', 'THAT', 'BLACK', 'HEADED', 'COUSIN', 'OF', 'OL', 'MISTAH', 'BUZZARD', 'VERY', 'CURIOUS', 'INDEED'] +3997-182399-0002-1781: hyp=['HE', 'WAS', 'CURIOUS', 'ABOUT', 'THAT', 'BLACK', 'HEADED', 'COUSIN', 'OF', 'ALL', 'MISTER', 'BUZZARD', 'VERY', 'CURIOUS', 'INDEED'] +3997-182399-0003-1782: ref=['ANYWAY', 'HE', 'WOULD', 'FIND', 'OUT'] +3997-182399-0003-1782: hyp=['ANYWAY', 'HE', 'WOULD', 'FIND', 'OUT'] +3997-182399-0004-1783: ref=['PLEASE', 'MISTER', 'BUZZARD', 'PLEASE', 'TELL', 'US', 'THE', 'STORY', 'HE', 'BEGGED'] +3997-182399-0004-1783: hyp=['PLEASE', 'MISTER', 'BUZZARD', 'PLEASE', 'TELL', 'US', 'THE', 'STORY', 'HE', 'BEGGED'] +3997-182399-0005-1784: ref=['NOW', 'OL', 'MISTAH', 'BUZZARD', 'IS', 'NATURALLY', 'GOOD', 'NATURED', 'AND', 'ACCOMMODATING', 'AND', 'WHEN', 'PETER', 'BEGGED', 'SO', 'HARD', 'HE', 'JUST', "COULDN'T", 'FIND', 'IT', 'IN', 'HIS', 'HEART', 'TO', 'REFUSE'] +3997-182399-0005-1784: hyp=['NOW', 'ALL', 'MISTER', 'BUZZARD', 'IS', 'NATURALLY', 'GOOD', 'NATURED', 'AND', 'ACCOMMODATING', 'AND', 'WHEN', 'PETER', 'BAGS', 'SO', 'HARD', 'HE', 'JUST', "COULDN'T", 'FIND', 'IT', 'IN', 'HIS', 'HEART', 'TO', 'REFUSE'] +3997-182399-0006-1785: ref=['WAY', 'BACK', 'IN', 'THE', 'DAYS', 'WHEN', 'GRANDPAP', 'BUZZARD', 'HAD', 'HIS', 'LIL', 'FALLING', 'OUT', 'WITH', 'OL', 'KING', 'EAGLE', 'AND', 'DONE', 'FLY', 'SO', 'HIGH', 'HE', "SCO'TCH", 'THE', 'FEATHERS', 'OFFEN', 'HIS', 'HAID', 'HE', 'HAD', 'A', 'COUSIN', 'DID', 'GRANDPAP', 'BUZZARD', 'AND', 'THIS', 'COUSIN', 'WAS', 'JES', 'NATURALLY', 'LAZY', 'AND', 'NO', 'COUNT'] +3997-182399-0006-1785: hyp=['WAY', 'BACK', 'IN', 'THE', 'DAYS', 'WHEN', 'GRANDPAPAZZARD', 'HAD', 'HIS', 'LITTLE', 'FALLING', 'OUT', 'WITH', 'OLD', 'KING', 'EAGLE', 'AND', 'DON', 'FLIES', 'SO', 'HIGH', 'HE', 'SCORCHED', 'THE', 'FEATHERS', 'OFTEN', 'HIS', 'HEAD', 'HE', 'HAD', 'A', 'COUSIN', 'DID', 'GRANDPAP', 'BUZZARD', 'AND', 'THIS', 'COUSIN', 'WAS', 'JUST', 'NATURALLY', 'LAZY', 'AND', 'NO', 'COUNT'] +3997-182399-0007-1786: ref=['LIKE', 'MOST', 'NO', 'COUNT', 'PEOPLE', 'HE', 'USED', 'TO', 'MAKE', 'A', 'REGULAR', 'NUISANCE', 'OF', 'HISSELF', 'POKING', 'HIS', 'NOSE', 'INTO', "EV'YBODY'S", 'BUSINESS', 'AND', 'NEVER', 'TENDING', 'TO', 'HIS', 'OWN'] +3997-182399-0007-1786: hyp=['LIKE', 'MOST', 'NO', 'COUNT', 'PEOPLE', 'HE', 'USED', 'TO', 'MAKE', 'A', 'REGULAR', 'NUISANCE', 'OF', 'HIMSELF', 'POKING', 'HIS', 'NOSE', 'INTO', "EVERYBODY'S", 'BUSINESS', 'AND', 'NEVER', 'TENDING', 'TO', 'HIS', 'OWN'] +3997-182399-0008-1787: ref=["WASN'T", 'ANYTHING', 'GOING', 'ON', 'THAT', 'THIS', 'TRIFLING', 'MEMBER', 'OF', 'THE', 'BUZZARD', "FAM'LY", "DIDN'T", 'FIND', 'OUT', 'ABOUT', 'AND', 'MEDDLE', 'IN', 'HE', 'COULD', 'ASK', 'MO', 'QUESTIONS', 'THAN', 'PETER', 'RABBIT', 'CAN', 'AN', 'ANYBODY', 'THAT', 'CAN', 'DO', 'THAT', 'HAS', 'GOT', 'TO', 'ASK', 'A', 'LOT'] +3997-182399-0008-1787: hyp=["WASN'T", 'ANYTHING', 'GOING', 'ON', 'THAT', 'THIS', 'TRIFLING', 'MEMBER', 'OF', 'THE', 'BUZZARD', 'FAMILY', "DIDN'T", 'FIND', 'OUT', 'ABOUT', 'A', 'MEDDLE', 'IN', 'HE', 'COULD', 'ASK', 'MORE', 'QUESTIONS', 'THAN', 'PETER', 'RABBIT', 'KENN', 'AND', 'ANYBODY', 'THAT', 'CAN', 'DO', 'THAT', 'HAS', 'GOT', 'TO', 'ASK', 'A', 'LOT'] +3997-182399-0009-1788: ref=['EVERYBODY', 'LOOKED', 'AT', 'PETER', 'AND', 'LAUGHED'] +3997-182399-0009-1788: hyp=['EVERYBODY', 'LOOKED', 'AT', 'PETER', 'AND', 'LAUGHED'] +3997-182399-0010-1789: ref=['SO', 'WE', 'UNS', 'SIT', 'ON', 'THE', 'CHIMNEY', 'TOPS', 'WHENEVER', 'OL', 'JACK', 'FROST', 'GETS', 'TO', 'STRAYING', 'DOWN', 'WHERE', 'HE', 'HAVE', 'NO', 'BUSINESS'] +3997-182399-0010-1789: hyp=['SO', 'WE', 'UNS', 'SET', 'ON', 'THE', 'CHIMNEY', 'TOPS', 'WHENEVER', 'OLD', 'JACK', 'FROST', 'GETS', 'TO', 'STRAIN', 'DOWN', 'WHERE', 'HE', 'HAVE', 'NO', 'BUSINESS'] +3997-182399-0011-1790: ref=['ONE', 'DAY', 'THIS', 'NO', 'COUNT', 'TRIFLING', 'COUSIN', 'OF', 'GRANDPAP', 'BUZZARD', 'GET', 'COLD', 'IN', 'HIS', 'FEET'] +3997-182399-0011-1790: hyp=['ONE', 'DAY', "THERE'S", 'NO', 'COUNT', 'TRIFLING', 'COUSIN', 'OF', 'GRANDPAP', 'BUZZARD', 'GET', 'COLD', 'IN', 'HIS', 'FEET'] +3997-182399-0012-1791: ref=['IT', 'WAS', 'ON', 'A', 'LIL', 'OL', 'HOUSE', 'A', 'LIL', 'OL', 'TUMBLE', 'DOWN', 'HOUSE'] +3997-182399-0012-1791: hyp=['IT', 'WAS', 'ON', 'THE', 'LITTLE', 'OLD', 'HOUSE', 'A', 'LITTLE', 'OLD', 'TUMBLE', 'DOWN', 'HOUSE'] +3997-182399-0013-1792: ref=['WHY', 'HE', 'JES', 'STRETCH', 'HIS', 'FOOL', 'HAID', 'AS', 'FAR', 'DOWN', 'THAT', 'CHIMNEY', 'AS', 'HE', 'CAN', 'AN', 'LISTEN', 'AN', 'LISTEN'] +3997-182399-0013-1792: hyp=['WHY', 'HE', 'JUST', 'STRETCH', 'HIS', 'FULL', 'HEAD', 'AS', 'FAR', 'DOWN', 'THE', 'CHIMNEY', 'AS', 'HE', 'CAN', 'AND', 'LISTEN', 'AND', 'LISTEN'] +3997-182399-0014-1793: ref=['BUT', 'HE', "DON'T", 'MIND', 'THAT'] +3997-182399-0014-1793: hyp=['BUT', 'HE', "DON'T", 'MIND', 'THAT'] +3997-182399-0015-1794: ref=['WILL', "YO'", 'ALLS', 'PLEASE', 'SPEAK', 'A', 'LIL', 'LOUDER', 'HE', 'HOLLER', 'DOWN', 'THE', 'CHIMNEY', 'JES', 'LIKE', 'THAT'] +3997-182399-0015-1794: hyp=['OH', 'YOU', 'ALL', 'PLEASE', 'SPEAK', 'A', 'LOW', 'LOUDER', 'HE', 'HOLLERED', 'DOWN', 'THE', 'CHIMNEY', 'JUST', 'LIKE', 'THAT'] +3997-182399-0016-1795: ref=['YES', 'SAH', 'SHE', "SHO'LY", 'WAS', 'PLUMB', 'SCARED'] +3997-182399-0016-1795: hyp=['YES', 'SAH', 'SHE', 'SURELY', 'YOU', 'WAS', 'PLUM', 'SCARED'] +3997-182399-0017-1796: ref=['THEY', 'LIKE', 'TO', 'CHOKE', 'THAT', 'NO', 'COUNT', 'BUZZARD', 'TO', 'DEATH'] +3997-182399-0017-1796: hyp=["THEY'D", 'LIKE', 'TO', 'CHOKE', 'THAT', 'NO', 'COMPOSER', 'TO', 'DEATH'] +3997-182399-0018-1797: ref=['WHEN', 'HE', 'GET', 'HOME', 'HE', 'TRY', 'AN', 'TRY', 'TO', 'BRUSH', 'THAT', 'SOOT', 'OFF', 'BUT', 'IT', 'DONE', 'GET', 'INTO', 'THE', 'SKIN', 'AN', 'IT', 'STAY', 'THERE'] +3997-182399-0018-1797: hyp=['WHEN', 'HE', 'GET', 'HOME', "HE'D", 'TRY', 'AND', 'TRIES', 'TO', 'BRUSH', 'US', 'SOOT', 'OFF', 'BUT', 'IT', 'DONE', 'GET', 'INTO', 'THE', 'SKIN', 'AND', "IT'S", 'STAY', 'THERE'] +3997-182399-0019-1798: ref=['A', 'LITTLE', 'SIGH', 'OF', 'SATISFACTION', 'WENT', 'AROUND', 'THE', 'CIRCLE', 'OF', 'LISTENERS'] +3997-182399-0019-1798: hyp=['A', 'LITTLE', 'SIGH', 'OF', 'SATISFACTION', 'WENT', 'ROUND', 'THE', 'CIRCLE', 'OF', 'LISTENERS'] +3997-182399-0020-1799: ref=['IT', 'WAS', 'JUST', 'AS', 'GOOD', 'AS', 'ONE', 'OF', 'GRANDFATHER', "FROG'S"] +3997-182399-0020-1799: hyp=['IT', 'WAS', 'JUST', 'AS', 'GOOD', 'AS', 'ONE', 'OF', 'GRANDFATHER', 'FROGS'] +4198-12259-0000-203: ref=['DRAW', 'REACH', 'FILL', 'MIX', 'GIVE', 'IT', 'ME', 'WITHOUT', 'WATER'] +4198-12259-0000-203: hyp=['DRAW', 'REACH', 'FILL', 'MIX', 'GIVE', 'IT', 'ME', 'WITHOUT', 'WATER'] +4198-12259-0001-204: ref=['SO', 'MY', 'FRIEND', 'SO', 'WHIP', 'ME', 'OFF', 'THIS', 'GLASS', 'NEATLY', 'BRING', 'ME', 'HITHER', 'SOME', 'CLARET', 'A', 'FULL', 'WEEPING', 'GLASS', 'TILL', 'IT', 'RUN', 'OVER'] +4198-12259-0001-204: hyp=['SO', 'MY', 'FRIEND', 'SO', 'WHIP', 'ME', 'OFF', 'THIS', 'GLASS', 'NEATLY', 'BRING', 'ME', 'HITHER', 'SOME', 'CLARE', 'IT', 'A', 'FULL', 'WEEPING', 'GLASS', 'TILL', 'IT', 'RUN', 'OVER'] +4198-12259-0002-205: ref=['A', 'CESSATION', 'AND', 'TRUCE', 'WITH', 'THIRST'] +4198-12259-0002-205: hyp=['A', 'CESSATION', 'AND', 'TRUCE', 'WITH', 'THIRST'] +4198-12259-0003-206: ref=['YOU', 'HAVE', 'CATCHED', 'A', 'COLD', 'GAMMER', 'YEA', 'FORSOOTH', 'SIR'] +4198-12259-0003-206: hyp=['YOU', 'HAVE', 'CAST', 'A', 'COLD', 'GAMMER', 'YEA', 'FORSOOTH', 'SIR'] +4198-12259-0004-207: ref=['BY', 'THE', 'BELLY', 'OF', 'SANCT', 'BUFF', 'LET', 'US', 'TALK', 'OF', 'OUR', 'DRINK', 'I', 'NEVER', 'DRINK', 'BUT', 'AT', 'MY', 'HOURS', 'LIKE', 'THE', "POPE'S", 'MULE'] +4198-12259-0004-207: hyp=['BY', 'THE', 'BELLY', 'OF', 'SAINT', 'BUFF', 'LET', 'US', 'TALK', 'OF', 'OUR', 'DRINK', 'I', 'NEVER', 'DRINK', 'BUT', 'AT', 'MY', 'HOURS', 'LIKE', 'THE', "POPE'S", 'MULE'] +4198-12259-0005-208: ref=['WHICH', 'WAS', 'FIRST', 'THIRST', 'OR', 'DRINKING'] +4198-12259-0005-208: hyp=['WHICH', 'WAS', 'FIRST', 'THUS', 'TO', 'DRINKING'] +4198-12259-0006-209: ref=['WHAT', 'IT', 'SEEMS', 'I', 'DO', 'NOT', 'DRINK', 'BUT', 'BY', 'AN', 'ATTORNEY'] +4198-12259-0006-209: hyp=['WHAT', 'IT', 'SEEMS', 'I', 'DO', 'NOT', 'DRINK', 'BUT', 'BUY', 'AN', 'ATTORNEY'] +4198-12259-0007-210: ref=['DRINK', 'ALWAYS', 'AND', 'YOU', 'SHALL', 'NEVER', 'DIE'] +4198-12259-0007-210: hyp=['DRINK', 'ALWAYS', 'AND', 'YOU', 'SHALL', 'NEVER', 'DIE'] +4198-12259-0008-211: ref=['IF', 'I', 'DRINK', 'NOT', 'I', 'AM', 'A', 'GROUND', 'DRY', 'GRAVELLED', 'AND', 'SPENT', 'I', 'AM', 'STARK', 'DEAD', 'WITHOUT', 'DRINK', 'AND', 'MY', 'SOUL', 'READY', 'TO', 'FLY', 'INTO', 'SOME', 'MARSH', 'AMONGST', 'FROGS', 'THE', 'SOUL', 'NEVER', 'DWELLS', 'IN', 'A', 'DRY', 'PLACE', 'DROUTH', 'KILLS', 'IT'] +4198-12259-0008-211: hyp=['IF', 'I', 'DRINK', 'NOT', 'I', 'AM', 'A', 'GROUND', 'DRY', 'GRAVELLED', 'AND', 'SPENT', 'I', 'AM', 'STARK', 'DEAD', 'WITHOUT', 'DRINK', 'AND', 'MY', 'SOUL', 'READY', 'TO', 'FLY', 'INTO', 'SOME', 'MARS', 'A', "MONTH'S", 'FROGS', 'THE', 'SOUL', 'NEVER', 'DWELLS', 'IN', 'A', 'DRY', 'PLACE', 'DROUGHT', 'KILL', 'IT'] +4198-12259-0009-212: ref=['HE', 'DRINKS', 'IN', 'VAIN', 'THAT', 'FEELS', 'NOT', 'THE', 'PLEASURE', 'OF', 'IT'] +4198-12259-0009-212: hyp=['HE', 'DRINK', 'SO', 'VAIN', 'THAT', 'FILLS', 'NOT', 'THE', 'PLEASURE', 'OF', 'IT'] +4198-12259-0010-213: ref=['IT', 'IS', 'ENOUGH', 'TO', 'BREAK', 'BOTH', 'GIRDS', 'AND', 'PETREL'] +4198-12259-0010-213: hyp=['IT', 'IS', 'ENOUGH', 'TO', 'BREAK', 'BOTH', 'GORGE', 'AND', 'PETAL'] +4198-12259-0011-214: ref=['WHAT', 'DIFFERENCE', 'IS', 'THERE', 'BETWEEN', 'A', 'BOTTLE', 'AND', 'A', 'FLAGON'] +4198-12259-0011-214: hyp=['WHAT', 'DIFFERENCE', 'IS', 'THERE', 'BETWEEN', 'A', 'BOTTLE', 'AND', 'A', 'FLAGON'] +4198-12259-0012-215: ref=['BRAVELY', 'AND', 'WELL', 'PLAYED', 'UPON', 'THE', 'WORDS'] +4198-12259-0012-215: hyp=['BRAVELY', 'AND', 'WELL', 'PLAYED', 'UPON', 'THE', 'WORDS'] +4198-12259-0013-216: ref=['OUR', 'FATHERS', 'DRANK', 'LUSTILY', 'AND', 'EMPTIED', 'THEIR', 'CANS'] +4198-12259-0013-216: hyp=['OUR', 'FATHERS', 'DRANK', 'LUSTILY', 'AND', 'EMPTIED', 'THEIR', 'CANES'] +4198-12259-0014-217: ref=['WELL', 'CACKED', 'WELL', 'SUNG'] +4198-12259-0014-217: hyp=['WELL', 'CAGLE', 'WELL', 'SUNG'] +4198-12259-0015-218: ref=['COME', 'LET', 'US', 'DRINK', 'WILL', 'YOU', 'SEND', 'NOTHING', 'TO', 'THE', 'RIVER'] +4198-12259-0015-218: hyp=['COME', 'LET', 'US', 'DRINK', 'WILL', 'YOU', 'SEND', 'NOTHING', 'TO', 'THE', 'RIVER'] +4198-12259-0016-219: ref=['I', 'DRINK', 'NO', 'MORE', 'THAN', 'A', 'SPONGE'] +4198-12259-0016-219: hyp=['I', 'DRINK', 'NO', 'MORE', 'THAN', 'HIS', 'SPINES'] +4198-12259-0017-220: ref=['I', 'DRINK', 'LIKE', 'A', 'TEMPLAR', 'KNIGHT'] +4198-12259-0017-220: hyp=['I', 'DRINK', 'LIKE', 'A', 'TEMPLAR', 'NIGHT'] +4198-12259-0018-221: ref=['AND', 'I', 'TANQUAM', 'SPONSUS'] +4198-12259-0018-221: hyp=['AND', 'I', 'TANK', 'QUON', 'SPONSES'] +4198-12259-0019-222: ref=['AND', 'I', 'SICUT', 'TERRA', 'SINE', 'AQUA'] +4198-12259-0019-222: hyp=['AND', 'I', 'SICUT', 'TERRACE', 'IN', 'AQUA'] +4198-12259-0020-223: ref=['GIVE', 'ME', 'A', 'SYNONYMON', 'FOR', 'A', 'GAMMON', 'OF', 'BACON'] +4198-12259-0020-223: hyp=['GIVE', 'ME', 'A', 'SYNONYM', 'FOR', 'A', 'GAMIN', 'OF', 'BACON'] +4198-12259-0021-224: ref=['IT', 'IS', 'THE', 'COMPULSORY', 'OF', 'DRINKERS', 'IT', 'IS', 'A', 'PULLEY'] +4198-12259-0021-224: hyp=['IT', 'IS', 'THE', 'COMPULSORY', 'OF', 'DRINKERS', 'IT', 'IS', 'A', 'PULLEY'] +4198-12259-0022-225: ref=['A', 'LITTLE', 'RAIN', 'ALLAYS', 'A', 'GREAT', 'DEAL', 'OF', 'WIND', 'LONG', 'TIPPLING', 'BREAKS', 'THE', 'THUNDER'] +4198-12259-0022-225: hyp=['A', 'LITTLE', 'RAIN', 'IT', 'LAYS', 'A', 'GREAT', 'DEAL', 'OF', 'WIND', 'LONG', 'TIPPLING', 'BREAKS', 'THE', 'THUNDER'] +4198-12259-0023-226: ref=['BUT', 'IF', 'THERE', 'CAME', 'SUCH', 'LIQUOR', 'FROM', 'MY', 'BALLOCK', 'WOULD', 'YOU', 'NOT', 'WILLINGLY', 'THEREAFTER', 'SUCK', 'THE', 'UDDER', 'WHENCE', 'IT', 'ISSUED'] +4198-12259-0023-226: hyp=['BUT', 'IF', 'THERE', 'CAME', 'SUCH', 'LIQUOR', 'FOR', 'MY', 'BALLOCK', 'WERE', 'YOU', 'NOT', 'WILLINGLY', 'THEREAFTER', 'SUCK', 'THE', 'UTTER', 'WHENCE', 'IT', 'ISSUED'] +4198-12259-0024-227: ref=['HERE', 'PAGE', 'FILL'] +4198-12259-0024-227: hyp=['HERE', 'PAGE', 'FILL'] +4198-12259-0025-228: ref=['I', 'APPEAL', 'FROM', 'THIRST', 'AND', 'DISCLAIM', 'ITS', 'JURISDICTION'] +4198-12259-0025-228: hyp=['I', 'APPEAL', 'FROM', 'THIRST', 'AND', 'DISCLAIM', 'ITS', 'JURISDICTION'] +4198-12259-0026-229: ref=['I', 'WAS', 'WONT', 'HERETOFORE', 'TO', 'DRINK', 'OUT', 'ALL', 'BUT', 'NOW', 'I', 'LEAVE', 'NOTHING'] +4198-12259-0026-229: hyp=['I', 'WAS', 'WONT', 'HERE', 'TO', 'FOR', 'TO', 'DRINK', 'OUT', 'ALL', 'BUT', 'NOW', 'I', 'LEAVE', 'NOTHING'] +4198-12259-0027-230: ref=['HEYDAY', 'HERE', 'ARE', 'TRIPES', 'FIT', 'FOR', 'OUR', 'SPORT', 'AND', 'IN', 'EARNEST', 'EXCELLENT', 'GODEBILLIOS', 'OF', 'THE', 'DUN', 'OX', 'YOU', 'KNOW', 'WITH', 'THE', 'BLACK', 'STREAK'] +4198-12259-0027-230: hyp=['HAY', 'THEE', 'HERE', 'A', "TRIPE'S", 'FIT', 'FOR', 'OUR', 'SPORT', 'AND', 'IN', 'EARNEST', 'EXCELLENT', 'GO', 'TO', 'BE', 'YOURS', 'OF', 'THE', 'DUN', 'AX', 'YOU', 'KNOW', 'WITH', 'THE', 'BLACK', 'STREET'] +4198-12259-0028-231: ref=['O', 'FOR', "GOD'S", 'SAKE', 'LET', 'US', 'LASH', 'THEM', 'SOUNDLY', 'YET', 'THRIFTILY'] +4198-12259-0028-231: hyp=['OH', 'FOR', "GOD'S", 'SAKE', 'LET', 'US', 'LAST', 'THEM', 'SOUNDLY', 'YET', 'THRIFTILY'] +4198-12259-0029-232: ref=['SPARROWS', 'WILL', 'NOT', 'EAT', 'UNLESS', 'YOU', 'BOB', 'THEM', 'ON', 'THE', 'TAIL', 'NOR', 'CAN', 'I', 'DRINK', 'IF', 'I', 'BE', 'NOT', 'FAIRLY', 'SPOKE', 'TO'] +4198-12259-0029-232: hyp=['SPARROWS', 'WHEN', 'I', 'EAT', 'UNLESS', 'YOU', 'BOB', 'THEM', 'ON', 'THE', 'TAIL', 'NOR', 'CAN', 'I', 'DRINK', 'IF', 'I', 'BE', 'NOT', 'FAIRLY', 'SPOKE', 'TO'] +4198-12259-0030-233: ref=['HO', 'THIS', 'WILL', 'BANG', 'IT', 'SOUNDLY'] +4198-12259-0030-233: hyp=['OH', 'THIS', 'WAS', 'BEING', "IT'S", 'ONLY'] +4198-12259-0031-234: ref=['BUT', 'THIS', 'SHALL', 'BANISH', 'IT', 'UTTERLY'] +4198-12259-0031-234: hyp=['BUT', 'THIS', 'SHALL', 'BANISH', 'THEE', 'UTTERLY'] +4198-12259-0032-235: ref=['LET', 'US', 'WIND', 'OUR', 'HORNS', 'BY', 'THE', 'SOUND', 'OF', 'FLAGONS', 'AND', 'BOTTLES', 'AND', 'CRY', 'ALOUD', 'THAT', 'WHOEVER', 'HATH', 'LOST', 'HIS', 'THIRST', 'COME', 'NOT', 'HITHER', 'TO', 'SEEK', 'IT'] +4198-12259-0032-235: hyp=['LET', 'US', 'WIND', 'OUR', 'HORNS', 'BY', 'THE', 'SOUND', 'OF', 'FLAGONS', 'AND', 'BOTTLES', 'AND', 'CRY', 'ALOUD', 'THERE', 'WHOEVER', 'HATH', 'LOST', 'HIS', 'THIRST', 'COME', 'NIGH', 'HITHER', 'TO', 'SEEK', 'IT'] +4198-12259-0033-236: ref=['THE', 'GREAT', 'GOD', 'MADE', 'THE', 'PLANETS', 'AND', 'WE', 'MAKE', 'THE', 'PLATTERS', 'NEAT'] +4198-12259-0033-236: hyp=['THE', 'GREAT', 'GOD', 'MADE', 'THE', 'PLANETS', 'AND', 'WE', 'MAKE', 'THE', 'PLATTERS', 'NEAT'] +4198-12259-0034-237: ref=['APPETITE', 'COMES', 'WITH', 'EATING', 'SAYS', 'ANGESTON', 'BUT', 'THE', 'THIRST', 'GOES', 'AWAY', 'WITH', 'DRINKING'] +4198-12259-0034-237: hyp=['APPETITE', 'COUNT', 'WITH', 'EATING', 'SAYS', 'ANGISTON', 'BUT', 'THE', 'THIRST', 'GOES', 'AWAY', 'WITH', 'DRINKING'] +4198-12259-0035-238: ref=['I', 'HAVE', 'A', 'REMEDY', 'AGAINST', 'THIRST', 'QUITE', 'CONTRARY', 'TO', 'THAT', 'WHICH', 'IS', 'GOOD', 'AGAINST', 'THE', 'BITING', 'OF', 'A', 'MAD', 'DOG'] +4198-12259-0035-238: hyp=['I', 'HAVE', 'A', 'REMEDY', 'AGAINST', 'THIRST', 'QUITE', 'CONTRARY', 'TO', 'THAT', 'WHICH', 'IS', 'GOOD', 'AGAINST', 'ABIDING', 'OF', 'A', 'MAN', 'DOLE'] +4198-12259-0036-239: ref=['WHITE', 'WINE', 'HERE', 'WINE', 'BOYS'] +4198-12259-0036-239: hyp=['WHY', 'HERE', 'WHY', 'BOYS'] +4198-12259-0037-240: ref=['O', 'LACHRYMA', 'CHRISTI', 'IT', 'IS', 'OF', 'THE', 'BEST', 'GRAPE'] +4198-12259-0037-240: hyp=['O', 'LACK', 'REMO', 'CHRISTI', 'IT', 'IS', 'OF', 'THE', 'BEST', 'GRAPE'] +4198-12259-0038-241: ref=["I'FAITH", 'PURE', 'GREEK', 'GREEK', 'O', 'THE', 'FINE', 'WHITE', 'WINE'] +4198-12259-0038-241: hyp=['I', 'FAITH', 'PURE', 'GREEK', 'GREEK', 'O', 'THE', 'FINE', 'WHITE', 'WINE'] +4198-12259-0039-242: ref=['THERE', 'IS', 'NO', 'ENCHANTMENT', 'NOR', 'CHARM', 'THERE', 'EVERY', 'ONE', 'OF', 'YOU', 'HATH', 'SEEN', 'IT'] +4198-12259-0039-242: hyp=['THERE', 'IS', 'NO', 'ENCHANTMENT', 'NOR', 'CHARM', 'THERE', 'EVERY', 'ONE', 'OF', 'YOU', 'HATH', 'SEEN', 'IT'] +4198-12259-0040-243: ref=['MY', 'PRENTICESHIP', 'IS', 'OUT', 'I', 'AM', 'A', 'FREE', 'MAN', 'AT', 'THIS', 'TRADE'] +4198-12259-0040-243: hyp=['MY', 'PREDICUP', 'IS', 'OUT', "I'M", 'A', 'FREE', 'MAN', 'AT', 'THIS', 'TRADE'] +4198-12259-0041-244: ref=['I', 'SHOULD', 'SAY', 'MASTER', 'PAST'] +4198-12259-0041-244: hyp=['AS', 'YOU', 'SEE', 'MASTER', 'PASS'] +4198-12259-0042-245: ref=['O', 'THE', 'DRINKERS', 'THOSE', 'THAT', 'ARE', 'A', 'DRY', 'O', 'POOR', 'THIRSTY', 'SOULS'] +4198-12259-0042-245: hyp=['OH', 'THE', 'DRINKERS', 'THOSE', 'THAT', 'ARE', 'DRY', 'O', 'PORT', 'THIRSTY', 'SOULS'] +4198-12259-0043-246: ref=['CLEAR', 'OFF', 'NEAT', 'SUPERNACULUM'] +4198-12259-0043-246: hyp=['CLEAR', 'OFF', 'MEAT', 'SUPERNACULUM'] +4198-12281-0000-187: ref=['ALTHOUGH', 'THE', 'PLAGUE', 'WAS', 'THERE', 'IN', 'THE', 'MOST', 'PART', 'OF', 'ALL', 'THE', 'HOUSES', 'THEY', 'NEVERTHELESS', 'ENTERED', 'EVERYWHERE', 'THEN', 'PLUNDERED', 'AND', 'CARRIED', 'AWAY', 'ALL', 'THAT', 'WAS', 'WITHIN', 'AND', 'YET', 'FOR', 'ALL', 'THIS', 'NOT', 'ONE', 'OF', 'THEM', 'TOOK', 'ANY', 'HURT', 'WHICH', 'IS', 'A', 'MOST', 'WONDERFUL', 'CASE'] +4198-12281-0000-187: hyp=['ALTHOUGH', 'THE', 'PLAGUE', 'WAS', 'THERE', 'IN', 'THE', 'MOST', 'PART', 'OF', 'ALL', 'THE', 'HOUSES', 'THEY', 'NEVERTHELESS', 'ENTERED', 'EVERYWHERE', 'THEN', 'PLUNDERED', 'AND', 'CARRIED', 'AWAY', 'ALL', 'THAT', 'WAS', 'WITHIN', 'AND', 'YET', 'FOR', 'ALL', 'THIS', 'NOT', 'ONE', 'OF', 'THEM', 'TOOK', 'ANY', 'HURT', 'WHICH', 'IS', 'A', 'MOST', 'WONDERFUL', 'CASE'] +4198-12281-0001-188: ref=['I', 'BESEECH', 'YOU', 'THINK', 'UPON', 'IT'] +4198-12281-0001-188: hyp=['I', 'BESEECH', 'YOU', 'THINK', 'UPON', 'IT'] +4198-12281-0002-189: ref=['NEVERTHELESS', 'AT', 'ALL', 'ADVENTURES', 'THEY', 'RANG', 'THE', 'BELLS', 'AD', 'CAPITULUM', 'CAPITULANTES'] +4198-12281-0002-189: hyp=['NEVERTHELESS', 'AT', 'ALL', 'VENTURES', 'THEY', 'RANG', 'THE', 'BELLS', 'AT', 'CAPITULUM', 'CAPITULAT', 'DAYS'] +4198-12281-0003-190: ref=['BY', 'THE', 'VIRTUE', 'OF', 'GOD', 'WHY', 'DO', 'NOT', 'YOU', 'SING', 'PANNIERS', 'FAREWELL', 'VINTAGE', 'IS', 'DONE'] +4198-12281-0003-190: hyp=['BY', 'THE', 'VIRTUE', 'OF', 'GOD', 'WHY', 'DO', 'NOT', 'YOU', 'SING', 'PANNIERS', 'FAREWELL', 'VENTAGE', 'IS', 'NONE'] +4198-12281-0004-191: ref=['BY', 'THE', 'BELLY', 'OF', 'SANCT', 'JAMES', 'WHAT', 'SHALL', 'WE', 'POOR', 'DEVILS', 'DRINK', 'THE', 'WHILE'] +4198-12281-0004-191: hyp=['BY', 'THE', 'BELLY', 'OF', 'SAINT', 'JAMES', 'WHICH', 'SHALL', 'WE', 'POOR', 'DEVILS', 'DRINK', 'THE', 'WHILE'] +4198-12281-0005-192: ref=['LORD', 'GOD', 'DA', 'MIHI', 'POTUM'] +4198-12281-0005-192: hyp=['LORD', 'GOD', 'DALMY', 'HE', 'POT', 'EM'] +4198-12281-0006-193: ref=['LET', 'HIM', 'BE', 'CARRIED', 'TO', 'PRISON', 'FOR', 'TROUBLING', 'THE', 'DIVINE', 'SERVICE'] +4198-12281-0006-193: hyp=['LET', 'HIM', 'BE', 'CARRIED', 'THE', 'PRISON', 'FOR', 'TROUBLING', 'THE', 'DIVINE', 'SERVICE'] +4198-12281-0007-194: ref=['WHEREFORE', 'IS', 'IT', 'THAT', 'OUR', 'DEVOTIONS', 'WERE', 'INSTITUTED', 'TO', 'BE', 'SHORT', 'IN', 'THE', 'TIME', 'OF', 'HARVEST', 'AND', 'VINTAGE', 'AND', 'LONG', 'IN', 'THE', 'ADVENT', 'AND', 'ALL', 'THE', 'WINTER'] +4198-12281-0007-194: hyp=['WHEREFORE', 'IS', 'IT', 'THAT', 'OUR', 'DEVOTIONS', 'WERE', 'INSTITUTED', 'TO', 'BE', 'SHORT', 'IN', 'THE', 'TIME', 'OF', 'HARVEST', 'AND', 'VINTAGE', 'AND', 'LONG', 'IN', 'ADVENT', 'IN', 'ALL', 'THE', 'WINTER'] +4198-12281-0008-195: ref=['HARK', 'YOU', 'MY', 'MASTERS', 'YOU', 'THAT', 'LOVE', 'THE', 'WINE', "COP'S", 'BODY', 'FOLLOW', 'ME', 'FOR', 'SANCT', 'ANTHONY', 'BURN', 'ME', 'AS', 'FREELY', 'AS', 'A', 'FAGGOT', 'IF', 'THEY', 'GET', 'LEAVE', 'TO', 'TASTE', 'ONE', 'DROP', 'OF', 'THE', 'LIQUOR', 'THAT', 'WILL', 'NOT', 'NOW', 'COME', 'AND', 'FIGHT', 'FOR', 'RELIEF', 'OF', 'THE', 'VINE'] +4198-12281-0008-195: hyp=["AREN'T", 'YOU', 'MY', 'MASTERS', 'YOU', 'THAT', 'LOVE', 'THEM', 'WHY', "COP'S", 'BODY', 'FOLLOW', 'ME', 'FOR', 'SAINT', 'ANTHONY', 'BURN', 'ME', 'AS', 'FREELY', 'AS', 'A', 'FAGGOT', 'THEY', 'GET', 'LEAVE', 'TO', 'TASTE', 'ONE', 'DROP', 'OF', 'THE', 'LIQUOR', 'THAT', 'WOULD', 'NOT', 'NOW', 'COME', 'AND', 'FIGHT', 'FOR', 'RELIEF', 'OF', 'THE', 'VINE'] +4198-12281-0009-196: ref=['TO', 'OTHERS', 'AGAIN', 'HE', 'UNJOINTED', 'THE', 'SPONDYLES', 'OR', 'KNUCKLES', 'OF', 'THE', 'NECK', 'DISFIGURED', 'THEIR', 'CHAPS', 'GASHED', 'THEIR', 'FACES', 'MADE', 'THEIR', 'CHEEKS', 'HANG', 'FLAPPING', 'ON', 'THEIR', 'CHIN', 'AND', 'SO', 'SWINGED', 'AND', 'BALAMMED', 'THEM', 'THAT', 'THEY', 'FELL', 'DOWN', 'BEFORE', 'HIM', 'LIKE', 'HAY', 'BEFORE', 'A', 'MOWER'] +4198-12281-0009-196: hyp=['TO', 'OTHERS', 'AGAIN', 'HE', 'UNJOINTED', 'THE', 'SPIND', 'MULES', 'OR', 'KNUCKLES', 'OF', 'THE', 'NECK', 'THIS', 'FIGURED', 'THEIR', 'CHAPS', 'GASH', 'THEIR', 'FACES', 'MADE', 'THEIR', 'CHEEKS', 'HANG', 'FLAPPING', 'ON', 'THEIR', 'CHIN', 'AND', 'SO', 'SWINGED', 'AND', 'BLAMMED', 'THEM', 'THAT', 'THEY', 'FELL', 'DOWN', 'BEFORE', 'HIM', 'LIKE', 'HAY', 'BEFORE', 'HIM', 'OVER'] +4198-12281-0010-197: ref=['TO', 'SOME', 'WITH', 'A', 'SMART', 'SOUSE', 'ON', 'THE', 'EPIGASTER', 'HE', 'WOULD', 'MAKE', 'THEIR', 'MIDRIFF', 'SWAG', 'THEN', 'REDOUBLING', 'THE', 'BLOW', 'GAVE', 'THEM', 'SUCH', 'A', 'HOMEPUSH', 'ON', 'THE', 'NAVEL', 'THAT', 'HE', 'MADE', 'THEIR', 'PUDDINGS', 'TO', 'GUSH', 'OUT'] +4198-12281-0010-197: hyp=['TO', 'SOME', 'WOULD', 'THEY', 'SMART', 'SOUS', 'ON', 'THEIR', 'EPIGASTER', 'HE', 'WOULD', 'MAKE', 'THEIR', 'MIDRIFTS', 'WAG', 'THEN', 'REDOUBLING', 'THE', 'BLOW', 'GAVE', 'THEM', 'SUCH', 'A', 'HOME', 'PUSH', 'ON', 'THE', 'NAVEL', 'THAT', 'HE', 'MADE', 'THEIR', 'PUDDINGS', 'TO', 'GUSH', 'OUT'] +4198-12281-0011-198: ref=['BELIEVE', 'THAT', 'IT', 'WAS', 'THE', 'MOST', 'HORRIBLE', 'SPECTACLE', 'THAT', 'EVER', 'ONE', 'SAW'] +4198-12281-0011-198: hyp=['BELIEVE', 'THEN', 'IT', 'WAS', 'THE', 'MOST', 'HORRIBLE', 'SPECTACLE', 'THAT', 'EVER', 'WON', 'SAW'] +4198-12281-0012-199: ref=['O', 'THE', 'HOLY', 'LADY', 'NYTOUCH', 'SAID', 'ONE', 'THE', 'GOOD', 'SANCTESS', 'O', 'OUR', 'LADY', 'OF', 'SUCCOURS', 'SAID', 'ANOTHER', 'HELP', 'HELP'] +4198-12281-0012-199: hyp=['OH', 'THE', 'HOLY', 'LADY', 'KNIGHTSAGE', 'SAID', 'ONE', 'THE', 'GOOD', 'SANCTUS', 'O', 'OUR', 'LADY', 'OFURUS', 'SAID', 'ANOTHER', 'HELP', 'HELP'] +4198-12281-0013-200: ref=['SOME', 'DIED', 'WITHOUT', 'SPEAKING', 'OTHERS', 'SPOKE', 'WITHOUT', 'DYING', 'SOME', 'DIED', 'IN', 'SPEAKING', 'OTHERS', 'SPOKE', 'IN', 'DYING'] +4198-12281-0013-200: hyp=['SOME', 'DIED', 'WITHOUT', 'SPEAKING', 'OTHERS', 'SPOKE', 'WITHOUT', 'DYING', 'SOME', 'DIED', 'IN', 'SPEAKING', 'OTHERS', 'SPOKE', 'AND', 'DYING'] +4198-12281-0014-201: ref=['CAN', 'YOU', 'TELL', 'WITH', 'WHAT', 'INSTRUMENTS', 'THEY', 'DID', 'IT'] +4198-12281-0014-201: hyp=['CAN', 'YOU', 'TELL', 'WITH', 'WHAT', 'INSTRUMENTS', 'THEY', 'DID', 'IT'] +4198-12281-0015-202: ref=['IN', 'THE', 'MEANTIME', 'FRIAR', 'JOHN', 'WITH', 'HIS', 'FORMIDABLE', 'BATON', 'OF', 'THE', 'CROSS', 'GOT', 'TO', 'THE', 'BREACH', 'WHICH', 'THE', 'ENEMIES', 'HAD', 'MADE', 'AND', 'THERE', 'STOOD', 'TO', 'SNATCH', 'UP', 'THOSE', 'THAT', 'ENDEAVOURED', 'TO', 'ESCAPE'] +4198-12281-0015-202: hyp=['IN', 'THE', 'MEANTIME', 'FRY', 'JOHN', 'WITH', 'HIS', 'FORMIDABLE', 'BATON', 'OF', 'THE', 'CROSS', 'GOT', 'TO', 'THE', 'BREACH', 'WHICH', 'THE', 'ENEMIES', 'HAD', 'MADE', 'AND', 'THERE', 'STOOD', 'TO', 'SNATCH', 'UP', 'THOSE', 'THAT', 'ENDEAVOURED', 'TO', 'ESCAPE'] +4198-61336-0000-247: ref=['IT', 'IS', 'SIGNIFICANT', 'TO', 'NOTE', 'IN', 'THIS', 'CONNECTION', 'THAT', 'THE', 'NEW', 'KING', 'WAS', 'AN', 'UNSWERVING', 'ADHERENT', 'OF', 'THE', 'CULT', 'OF', 'ASHUR', 'BY', 'THE', 'ADHERENTS', 'OF', 'WHICH', 'HE', 'WAS', 'PROBABLY', 'STRONGLY', 'SUPPORTED'] +4198-61336-0000-247: hyp=['IT', 'IS', 'SIGNIFICANT', 'TO', 'NOTE', 'IN', 'THIS', 'CONNECTION', 'THAT', 'THE', 'NEW', 'KING', 'WAS', 'AN', 'UNSWERVING', 'ADHERENT', 'OF', 'THE', 'CULT', 'OF', 'ASHER', 'BY', 'THE', 'ADHERENCE', 'OF', 'WHICH', 'HE', 'WAS', 'PROBABLY', 'STRONGLY', 'SUPPORTED'] +4198-61336-0001-248: ref=['AT', 'THE', 'BEGINNING', 'OF', 'HIS', 'REIGN', 'THERE', 'WAS', 'MUCH', 'SOCIAL', 'DISCONTENT', 'AND', 'SUFFERING'] +4198-61336-0001-248: hyp=['AT', 'THE', 'BEGINNING', 'OF', 'HIS', 'REIGN', 'THERE', 'WAS', 'MUCH', 'SOCIAL', 'DISCONTENT', 'AND', 'SUFFERING'] +4198-61336-0002-249: ref=['WELL', 'MIGHT', 'SHARDURIS', 'EXCLAIM', 'IN', 'THE', 'WORDS', 'OF', 'THE', 'PROPHET', 'WHERE', 'IS', 'THE', 'KING', 'OF', 'ARPAD'] +4198-61336-0002-249: hyp=['WELL', 'MIGHT', 'YOUR', 'DEARUS', 'EXCLAIM', 'IN', 'THE', 'WORDS', 'OF', 'THE', 'PROPHET', 'WHERE', 'IS', 'THE', 'KING', 'OF', 'ARPET'] +4198-61336-0003-250: ref=['TIGLATH', 'PILESER', 'HOWEVER', 'CROSSED', 'THE', 'EUPHRATES', 'AND', 'MOVING', 'NORTHWARD', 'DELIVERED', 'AN', 'UNEXPECTED', 'ATTACK', 'ON', 'THE', 'URARTIAN', 'ARMY', 'IN', 'QUMMUKH'] +4198-61336-0003-250: hyp=['TIGG', 'LAST', 'BELIEVER', 'HOWEVER', 'CROSSED', 'THE', 'EUPHADIS', 'AND', 'MOVING', 'NORTHWARD', 'DELIVERED', 'AN', 'UNEXPECTED', 'ATTACK', 'ON', 'THE', 'RACIAN', 'ARMY', 'AND', 'KUMAK'] +4198-61336-0004-251: ref=['A', 'FIERCE', 'BATTLE', 'ENSUED', 'AND', 'ONE', 'OF', 'ITS', 'DRAMATIC', 'INCIDENTS', 'WAS', 'A', 'SINGLE', 'COMBAT', 'BETWEEN', 'THE', 'RIVAL', 'KINGS'] +4198-61336-0004-251: hyp=['A', 'FIERCE', 'BATTLE', 'ENSUED', 'AND', 'ONE', 'OF', 'HIS', 'DRAMATIC', 'INCIDENTS', 'WAS', 'A', 'SINGLE', 'COMBAT', 'BETWEEN', 'THE', 'RIVAL', 'KINGS'] +4198-61336-0005-252: ref=['AN', 'ATTEMPT', 'WAS', 'MADE', 'TO', 'CAPTURE', 'KING', 'SHARDURIS', 'WHO', 'LEAPT', 'FROM', 'HIS', 'CHARIOT', 'AND', 'MADE', 'HASTY', 'ESCAPE', 'ON', 'HORSEBACK', 'HOTLY', 'PURSUED', 'IN', 'THE', 'GATHERING', 'DARKNESS', 'BY', 'AN', 'ASSYRIAN', 'CONTINGENT', 'OF', 'CAVALRY'] +4198-61336-0005-252: hyp=['AN', 'ATTEMPT', 'WAS', 'MADE', 'TO', 'CAPTURE', 'KING', 'CHAUDURUS', 'WHO', 'LEAPT', 'FROM', 'HIS', 'CHARIOT', 'AND', 'MADE', 'HASTY', 'ESCAPE', 'ON', 'HORSEBACK', 'HOTLY', 'PURSUED', 'AND', 'THE', 'GATHERING', 'DARKNESS', 'BY', 'AN', 'ASSYRIAN', 'CONTENDENT', 'OF', 'CAVALRY'] +4198-61336-0006-253: ref=['DESPITE', 'THE', 'BLOW', 'DEALT', 'AGAINST', 'URARTU', 'ASSYRIA', 'DID', 'NOT', 'IMMEDIATELY', 'REGAIN', 'POSSESSION', 'OF', 'NORTH', 'SYRIA'] +4198-61336-0006-253: hyp=['DESPITE', 'THE', 'BLOW', 'DEALT', 'AGAINST', 'YOU', 'ARE', 'TO', 'ASSYRIA', 'DID', 'NOT', 'IMMEDIATELY', 'REGAIN', 'POSSESSION', 'OF', 'NORTH', 'SYRIA'] +4198-61336-0007-254: ref=['THE', 'SHIFTY', 'MATI', 'ILU', 'EITHER', 'CHERISHED', 'THE', 'HOPE', 'THAT', 'SHARDURIS', 'WOULD', 'RECOVER', 'STRENGTH', 'AND', 'AGAIN', 'INVADE', 'NORTH', 'SYRIA', 'OR', 'THAT', 'HE', 'MIGHT', 'HIMSELF', 'ESTABLISH', 'AN', 'EMPIRE', 'IN', 'THAT', 'REGION'] +4198-61336-0007-254: hyp=['THE', 'SHIFTY', 'MANTIL', 'ILU', 'EITHER', 'CHERISHED', 'THE', 'HOPE', 'THAT', 'SHALL', 'DORIS', 'WOULD', 'RECOVER', 'STRENGTH', 'AND', 'AGAIN', 'INVADE', 'NORTH', 'SYRIA', 'OR', 'THAT', 'HE', 'MIGHT', 'HIMSELF', 'ESTABLISH', 'AN', 'EMPIRE', 'IN', 'THAT', 'REGION'] +4198-61336-0008-255: ref=['TIGLATH', 'PILESER', 'HAD', 'THEREFORE', 'TO', 'MARCH', 'WESTWARD', 'AGAIN'] +4198-61336-0008-255: hyp=['TIG', 'GLASS', 'BELIEVER', 'HAD', 'THEREFORE', 'TO', 'MARCH', 'WESTWARD', 'AGAIN'] +4198-61336-0009-256: ref=['FOR', 'THREE', 'YEARS', 'HE', 'CONDUCTED', 'VIGOROUS', 'CAMPAIGNS', 'IN', 'THE', 'WESTERN', 'LAND', 'WHERE', 'HE', 'MET', 'WITH', 'VIGOROUS', 'RESISTANCE'] +4198-61336-0009-256: hyp=['FOR', 'THREE', 'YEARS', 'HE', 'CONDUCTED', 'VIGOROUS', 'CAMPAIGNS', 'IN', 'THE', 'WESTERN', 'LAND', 'WHERE', 'HE', 'MET', 'WITH', 'VIGOROUS', 'RESISTANCE'] +4198-61336-0010-257: ref=['ARPAD', 'WAS', 'CAPTURED', 'AND', 'MATI', 'ILU', 'DEPOSED', 'AND', 'PROBABLY', 'PUT', 'TO', 'DEATH'] +4198-61336-0010-257: hyp=['OUR', 'PAD', 'WAS', 'CAPTURED', 'AND', 'MEANT', 'TO', 'ILL', 'YOU', 'DEPOSED', 'AND', 'PROBABLY', 'PUT', 'TO', 'DEATH'] +4198-61336-0011-258: ref=['ONCE', 'AGAIN', 'THE', 'HEBREWS', 'CAME', 'INTO', 'CONTACT', 'WITH', 'ASSYRIA'] +4198-61336-0011-258: hyp=['ONCE', 'AGAIN', 'THE', 'HEBREWS', 'CAME', 'INTO', 'CONTACT', 'WITH', 'THE', 'ZERIA'] +4198-61336-0012-259: ref=['ITS', 'FALL', 'MAY', 'NOT', 'HAVE', 'BEEN', 'UNCONNECTED', 'WITH', 'THE', 'TREND', 'OF', 'EVENTS', 'IN', 'ASSYRIA', 'DURING', 'THE', 'CLOSING', 'YEARS', 'OF', 'THE', 'MIDDLE', 'EMPIRE'] +4198-61336-0012-259: hyp=["IT'S", 'FOR', 'ME', 'NOT', 'HAVE', 'BEEN', 'UNCONNECTED', 'WITH', 'THE', 'TREND', 'OF', 'EVENTS', 'IN', 'A', 'SYRIA', 'DURING', 'THE', 'CLOSING', 'YEARS', 'OF', 'THE', 'MIDDLE', 'EMPIRE'] +4198-61336-0013-260: ref=['JEHOASH', 'THE', 'GRANDSON', 'OF', 'JEHU', 'HAD', 'ACHIEVED', 'SUCCESSES', 'IN', 'CONFLICT', 'WITH', 'DAMASCUS'] +4198-61336-0013-260: hyp=['JOESH', 'THE', 'GRANDSON', 'OF', 'JEHU', 'HAD', 'ACHIEVED', 'SUCCESSES', 'IN', 'CONFLICT', 'WITH', 'DAMASCUS'] +4198-61336-0014-261: ref=['SIX', 'MONTHS', 'AFTERWARDS', 'HE', 'WAS', 'ASSASSINATED', 'BY', 'SHALLUM'] +4198-61336-0014-261: hyp=['SIX', 'MONTHS', 'AFTERWARD', 'HE', 'WAS', 'ASSASSINATED', 'BY', 'CHARLEM'] +4198-61336-0015-262: ref=['THIS', 'USURPER', 'HELD', 'SWAY', 'AT', 'SAMARIA', 'FOR', 'ONLY', 'A', 'MONTH'] +4198-61336-0015-262: hyp=['THIS', 'USURPER', 'HELDS', 'WEIGH', 'AT', 'SAMARIA', 'FOR', 'ONLY', 'A', 'MONTH'] +4198-61336-0016-263: ref=['NO', 'RESISTANCE', 'WAS', 'POSSIBLE', 'ON', 'THE', 'PART', 'OF', 'MENAHEM', 'THE', 'USURPER', 'WHO', 'WAS', 'PROBABLY', 'READY', 'TO', 'WELCOME', 'THE', 'ASSYRIAN', 'CONQUEROR', 'SO', 'THAT', 'BY', 'ARRANGING', 'AN', 'ALLIANCE', 'HE', 'MIGHT', 'SECURE', 'HIS', 'OWN', 'POSITION'] +4198-61336-0016-263: hyp=['NO', 'RESISTANCE', 'WAS', 'POSSIBLE', 'ON', 'THE', 'PART', 'OF', 'MANY', 'HIM', 'THE', 'USURPER', 'WHO', 'WAS', 'PROBABLY', 'READY', 'TO', 'WELCOME', 'THE', 'ASSYRIAN', 'CONQUEROR', 'SO', 'THAT', 'BY', 'ARRANGING', 'AN', 'ALLIANCE', 'HE', 'MIGHT', 'SECURE', 'HIS', 'OWN', 'POSITION'] +4198-61336-0017-264: ref=['TIGLATH', 'PILESER', 'NEXT', 'OPERATED', 'AGAINST', 'THE', 'MEDIAN', 'AND', 'OTHER', 'HILL', 'TRIBES', 'IN', 'THE', 'NORTH', 'EAST'] +4198-61336-0017-264: hyp=['TAKE', 'THAT', 'PLEASURE', 'NEXT', 'OPERATED', 'AGAINST', 'THE', 'MEDIAN', 'AND', 'OTHER', 'HILL', 'TRIBES', 'IN', 'THE', 'NORTHEAST'] +4198-61336-0018-265: ref=['HE', 'OVERTHREW', 'BUILDINGS', 'DESTROYED', 'ORCHARDS', 'AND', 'TRANSPORTED', 'TO', 'NINEVEH', 'THOSE', 'OF', 'THE', 'INHABITANTS', 'HE', 'HAD', 'NOT', 'PUT', 'TO', 'THE', 'SWORD', 'WITH', 'ALL', 'THE', 'LIVE', 'STOCK', 'HE', 'COULD', 'LAY', 'HANDS', 'ON'] +4198-61336-0018-265: hyp=['HE', 'OVERTHREW', 'BUILDINGS', 'DESTROYED', 'ORCHARDS', 'AND', 'TRANSPORTED', 'TO', 'NINEVEH', 'THOSE', 'OF', 'THE', 'INHABITANTS', 'HE', 'HAD', 'NOT', 'PUT', 'TO', 'THIS', 'WOOD', 'WITH', 'ALL', 'THE', 'LIVE', 'STOCK', 'HE', 'COULD', 'LAY', 'HANDS', 'ON'] +4198-61336-0019-266: ref=['THUS', 'WAS', 'URARTU', 'CRIPPLED', 'AND', 'HUMILIATED', 'IT', 'NEVER', 'REGAINED', 'ITS', 'FORMER', 'PRESTIGE', 'AMONG', 'THE', 'NORTHERN', 'STATES'] +4198-61336-0019-266: hyp=['THUS', 'WAS', 'HERE', 'TO', 'CRIPPLED', 'AND', 'HUMILIATED', 'IT', 'NEVER', 'REGAINED', 'ITS', 'FORMER', 'PRESTIGE', 'AMONG', 'THE', 'NORTHERN', 'STATES'] +4198-61336-0020-267: ref=['IN', 'THE', 'FOLLOWING', 'YEAR', 'TIGLATH', 'PILESER', 'RETURNED', 'TO', 'SYRIA'] +4198-61336-0020-267: hyp=['IN', 'THE', 'FOLLOWING', 'YEAR', 'TIG', 'LAS', 'BELIEVER', 'RETURNED', 'TO', 'SYRIA'] +4198-61336-0021-268: ref=['MENAHEM', 'KING', 'OF', 'ISRAEL', 'HAD', 'DIED', 'AND', 'WAS', 'SUCCEEDED', 'BY', 'HIS', 'SON', 'PEKAHIAH'] +4198-61336-0021-268: hyp=['MANY', 'HIM', 'KING', 'OF', 'ISRAEL', 'HAD', 'DIED', 'AND', 'WAS', 'SUCCEEDED', 'BY', 'HIS', 'SON', 'PEKAHIA'] +4198-61336-0022-269: ref=['JUDAH', 'HAD', 'TAKEN', 'ADVANTAGE', 'OF', 'THE', 'DISTURBED', 'CONDITIONS', 'IN', 'ISRAEL', 'TO', 'ASSERT', 'ITS', 'INDEPENDENCE'] +4198-61336-0022-269: hyp=['JULIA', 'HAD', 'TAKEN', 'ADVANTAGE', 'OF', 'THE', 'DISTURBED', 'CONDITIONS', 'IN', 'ISRAEL', 'TO', 'ASSERT', 'ITS', 'INDEPENDENCE'] +4198-61336-0023-270: ref=['HE', 'CONDEMNED', 'ISRAEL', 'FOR', 'ITS', 'IDOLATRIES', 'AND', 'CRIED'] +4198-61336-0023-270: hyp=['HE', 'CONDEMNED', 'ISRAEL', 'FOR', 'ITS', 'IDOLATRIES', 'AND', 'CRIED'] +4198-61336-0024-271: ref=['FOR', 'THUS', 'SAITH', 'THE', 'LORD', 'UNTO', 'THE', 'HOUSE', 'OF', 'ISRAEL', 'SEEK', 'YE', 'ME', 'AND', 'YE', 'SHALL', 'LIVE', 'HAVE', 'YE', 'OFFERED', 'UNTO', 'ME', 'SACRIFICES', 'AND', 'OFFERINGS', 'IN', 'THE', 'WILDERNESS', 'FORTY', 'YEARS', 'O', 'HOUSE', 'OF', 'ISRAEL'] +4198-61336-0024-271: hyp=['FOR', 'THIS', 'SAITH', 'THE', 'LORD', 'UNTO', 'THE', 'HOUSE', 'OF', 'ISRAEL', 'SEEK', 'YE', 'ME', 'TO', 'LIVE', 'HAVE', 'YE', 'OFFERED', 'UNTO', 'ME', 'SACRIFICES', 'AND', 'OFFERINGS', 'IN', 'THE', 'WILDERNESS', 'FORTY', 'YEARS', 'O', 'HOUSE', 'OF', 'ISRAEL'] +4198-61336-0025-272: ref=['THE', 'REMNANT', 'OF', 'THE', 'PHILISTINES', 'SHALL', 'PERISH'] +4198-61336-0025-272: hyp=['THE', 'REMNANT', 'OF', 'THE', 'PHILISTINES', 'SHALL', 'PERISH'] +4198-61336-0026-273: ref=['ISRAEL', 'WAS', 'ALSO', 'DEALT', 'WITH'] +4198-61336-0026-273: hyp=['ISRAEL', 'WAS', 'ALSO', 'DEALT', 'WITH'] +4198-61336-0027-274: ref=['HE', 'SWEPT', 'THROUGH', 'ISRAEL', 'LIKE', 'A', 'HURRICANE'] +4198-61336-0027-274: hyp=['HE', 'SWEPT', 'THROUGH', 'ISRAEL', 'LIKE', 'A', 'HURRICANE'] +4198-61336-0028-275: ref=['THE', 'PHILISTINES', 'AND', 'THE', 'ARABIANS', 'OF', 'THE', 'DESERT', 'WERE', 'ALSO', 'SUBDUED'] +4198-61336-0028-275: hyp=['THE', 'FAIRLY', 'STEAMS', 'AND', 'THE', 'ARABIANS', 'OF', 'THE', 'DESERT', 'WERE', 'ALSO', 'SUBDUED'] +4198-61336-0029-276: ref=['HE', 'INVADED', 'BABYLONIA'] +4198-61336-0029-276: hyp=['HE', 'INVADED', 'BABYLONIA'] +4198-61336-0030-277: ref=['UKINZER', 'TOOK', 'REFUGE', 'IN', 'HIS', 'CAPITAL', 'SHAPIA', 'WHICH', 'HELD', 'OUT', 'SUCCESSFULLY', 'ALTHOUGH', 'THE', 'SURROUNDING', 'COUNTRY', 'WAS', 'RAVAGED', 'AND', 'DESPOILED'] +4198-61336-0030-277: hyp=['A', 'KINDRED', 'TOOK', 'REFUGE', 'IN', 'HIS', 'CAPITAL', 'SHAPIA', 'WHICH', 'HELD', 'OUT', 'SUCCESSFULLY', 'ALTHOUGH', 'THE', 'SURROUNDING', 'COUNTRY', 'WAS', 'RAVAGED', 'AND', 'DESPOILED'] +4294-14317-0000-1866: ref=['AS', 'I', 'THOUGHT', 'THAT', 'THIS', 'WAS', 'DUE', 'TO', 'SOME', 'FAULT', 'IN', 'THE', 'EARTH', 'I', 'WANTED', 'TO', 'MAKE', 'THESE', 'FIRST', 'EXPERIMENTS', 'BEFORE', 'I', 'UNDERTOOK', 'MY', 'PERSEUS'] +4294-14317-0000-1866: hyp=['AS', 'I', 'THOUGHT', 'THAT', 'THIS', 'WAS', 'DUE', 'TO', 'SOME', 'FAULT', 'IN', 'THE', 'EARTH', 'I', 'WANTED', 'TO', 'MAKE', 'THESE', 'FIRST', 'EXPERIMENTS', 'BEFORE', 'I', 'UNDERTOOK', 'MY', 'PERSEUS'] +4294-14317-0001-1867: ref=['WHEN', 'I', 'SAW', 'THAT', 'THIS', 'BUST', 'CAME', 'OUT', 'SHARP', 'AND', 'CLEAN', 'I', 'SET', 'AT', 'ONCE', 'TO', 'CONSTRUCT', 'A', 'LITTLE', 'FURNACE', 'IN', 'THE', 'WORKSHOP', 'ERECTED', 'FOR', 'ME', 'BY', 'THE', 'DUKE', 'AFTER', 'MY', 'OWN', 'PLANS', 'AND', 'DESIGN', 'IN', 'THE', 'HOUSE', 'WHICH', 'THE', 'DUKE', 'HAD', 'GIVEN', 'ME'] +4294-14317-0001-1867: hyp=['WHEN', 'I', 'SAW', 'THIS', 'BEST', 'CAME', 'OUT', 'SHARP', 'AND', 'CLEAN', 'I', 'SAID', 'AT', 'ONCE', 'TO', 'CONSTRUCT', 'A', 'LITTLE', 'FURNACE', 'IN', 'THE', 'WORKSHOP', 'ERECTED', 'FOR', 'ME', 'BY', 'THE', 'DUKE', 'AFTER', 'MY', 'OWN', 'PLANS', 'AND', 'DESIGN', 'IN', 'THE', 'HOUSE', 'WHICH', 'THE', 'DUKE', 'HAD', 'GIVEN', 'ME'] +4294-14317-0002-1868: ref=['IT', 'WAS', 'AN', 'EXTREMELY', 'DIFFICULT', 'TASK', 'AND', 'I', 'WAS', 'ANXIOUS', 'TO', 'OBSERVE', 'ALL', 'THE', 'NICETIES', 'OF', 'ART', 'WHICH', 'I', 'HAD', 'LEARNED', 'SO', 'AS', 'NOT', 'TO', 'LAPSE', 'INTO', 'SOME', 'ERROR'] +4294-14317-0002-1868: hyp=['IT', 'WAS', 'AN', 'EXTREMELY', 'DIFFICULT', 'TASK', 'AND', 'I', 'WAS', 'ANXIOUS', 'TO', 'OBSERVE', 'ALL', 'THE', 'NICETIES', 'OF', 'ART', 'WHICH', 'I', 'HAD', 'LEARNED', 'SO', 'AS', 'NOT', 'TO', 'LAPSE', 'INTO', 'SOME', 'ERROR'] +4294-14317-0003-1869: ref=['I', 'IN', 'MY', 'TURN', 'FEEL', 'THE', 'SAME', 'DESIRE', 'AND', 'HOPE', 'TO', 'PLAY', 'MY', 'PART', 'LIKE', 'THEM', 'THEREFORE', 'MY', 'LORD', 'GIVE', 'ME', 'THE', 'LEAVE', 'TO', 'GO'] +4294-14317-0003-1869: hyp=['I', 'IN', 'MY', 'TURN', 'FEEL', 'THE', 'SAME', 'DESIRE', 'AND', 'HOPE', 'TO', 'PLAY', 'MY', 'PART', 'LIKE', 'THEM', 'THEREFORE', 'MY', 'LORD', 'GIVE', 'ME', 'THE', 'LEAVE', 'TO', 'GO'] +4294-14317-0004-1870: ref=['BUT', 'BEWARE', 'OF', 'LETTING', 'BANDINELLO', 'QUIT', 'YOU', 'RATHER', 'BESTOW', 'UPON', 'HIM', 'ALWAYS', 'MORE', 'THAN', 'HE', 'DEMANDS', 'FOR', 'IF', 'HE', 'GOES', 'INTO', 'FOREIGN', 'PARTS', 'HIS', 'IGNORANCE', 'IS', 'SO', 'PRESUMPTUOUS', 'THAT', 'HE', 'IS', 'JUST', 'THE', 'MAN', 'TO', 'DISGRACE', 'OUR', 'MOST', 'ILLUSTRIOUS', 'SCHOOL'] +4294-14317-0004-1870: hyp=['BUT', 'BEWARE', 'OF', 'LETTING', 'BEND', 'NELLO', 'QUIT', 'YOU', 'RATHER', 'BESTOW', 'UPON', 'HIM', 'ALWAYS', 'MORE', 'THAN', 'HE', 'DEMANDS', 'FOR', 'IF', 'HE', 'GOES', 'INTO', 'FOREIGN', 'PARTS', 'HIS', 'IGNORANCE', 'IS', 'SO', 'PRESUMPTUOUS', 'THAT', 'HE', 'IS', 'JUST', 'THE', 'MAN', 'TO', 'DISGRACE', 'OUR', 'MOST', 'ILLUSTRIOUS', 'SCHOOL'] +4294-14317-0005-1871: ref=['I', 'ASK', 'NO', 'FURTHER', 'REWARD', 'FOR', 'MY', 'LABOURS', 'UP', 'TO', 'THIS', 'TIME', 'THAN', 'THE', 'GRACIOUS', 'FAVOUR', 'OF', 'YOUR', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0005-1871: hyp=['I', 'ASKED', 'NO', 'FURTHER', 'REWARD', 'FOR', 'MY', 'LABOURS', 'UP', 'TO', 'THIS', 'TIME', 'THAN', 'THE', 'GRACIOUS', 'FAVOUR', 'OF', 'YOUR', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0006-1872: ref=['THEN', 'I', 'THANKED', 'HIM', 'AND', 'SAID', 'I', 'HAD', 'NO', 'GREATER', 'DESIRE', 'THAN', 'TO', 'SHOW', 'THOSE', 'ENVIOUS', 'FOLK', 'THAT', 'I', 'HAD', 'IT', 'IN', 'ME', 'TO', 'EXECUTE', 'THE', 'PROMISED', 'WORK'] +4294-14317-0006-1872: hyp=['THEN', 'I', 'THANKED', 'HIM', 'AND', 'SAID', 'I', 'HAD', 'NO', 'GREATER', 'DESIRE', 'THAN', 'TO', 'SHOW', 'THOSE', 'ENVIOUS', 'FOLK', 'THAT', 'I', 'HAD', 'IT', 'IN', 'ME', 'TO', 'EXECUTE', 'THE', 'PROMISED', 'WORK'] +4294-14317-0007-1873: ref=['I', 'HAD', 'BETTER', 'LOOK', 'TO', 'MY', 'CONDUCT', 'FOR', 'IT', 'HAD', 'COME', 'TO', 'HIS', 'EARS', 'THAT', 'I', 'RELIED', 'UPON', 'HIS', 'FAVOUR', 'TO', 'TAKE', 'IN', 'FIRST', 'ONE', 'MAN', 'AND', 'THEN', 'ANOTHER'] +4294-14317-0007-1873: hyp=['I', 'HAD', 'BETTER', 'LOOK', 'TO', 'MY', 'CONDUCT', 'FOR', 'IT', 'HAS', 'COME', 'TO', 'HIS', 'EARS', 'THAT', 'I', 'RELIED', 'UPON', 'HIS', 'FAVOUR', 'TO', 'TAKE', 'IN', 'FIRST', 'ONE', 'MAN', 'AND', 'THEN', 'ANOTHER'] +4294-14317-0008-1874: ref=['I', 'BEGGED', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY', 'TO', 'NAME', 'A', 'SINGLE', 'PERSON', 'WHOM', 'I', 'HAD', 'EVER', 'TAKEN', 'IN'] +4294-14317-0008-1874: hyp=['I', 'BEGGED', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY', 'TO', 'NAME', 'A', 'SINGLE', 'PERSON', 'WHY', 'HAD', 'EVER', 'TAKEN', 'IN'] +4294-14317-0009-1875: ref=['I', 'SAID', 'MY', 'LORD', 'I', 'THANK', 'YOU', 'AND', 'BEG', 'YOU', 'TO', 'CONDESCEND', 'SO', 'FAR', 'AS', 'TO', 'LISTEN', 'TO', 'FOUR', 'WORDS', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'LENT', 'ME', 'A', 'PAIR', 'OF', 'OLD', 'SCALES', 'TWO', 'ANVILS', 'AND', 'THREE', 'LITTLE', 'HAMMERS', 'WHICH', 'ARTICLES', 'I', 'BEGGED', 'HIS', 'WORKMAN', 'GIORGIO', 'DA', 'CORTONA', 'FIFTEEN', 'DAYS', 'AGO', 'TO', 'FETCH', 'BACK'] +4294-14317-0009-1875: hyp=['I', 'SAID', 'MY', 'LORD', 'I', 'THANK', 'YOU', 'AND', 'BEG', 'YOU', 'TO', 'CONDESCEND', 'SO', 'FAR', 'AS', 'TO', 'LISTEN', 'TO', 'FOUR', 'WORDS', 'IT', 'IS', 'TRUE', 'THAT', 'HE', 'LENT', 'ME', 'A', 'PAIR', 'OF', 'OLD', 'SCALES', 'TWO', 'ANVILS', 'AND', 'THREE', 'LITTLE', 'HAMMERS', 'WHICH', 'ARTICLES', 'I', 'BEGGED', 'HIS', 'WORKMEN', 'GEORGIO', 'DESCORTONA', 'FIFTEEN', 'DAYS', 'AGO', 'TO', 'FETCH', 'BACK'] +4294-14317-0010-1876: ref=['GIORGIO', 'CAME', 'FOR', 'THEM', 'HIMSELF'] +4294-14317-0010-1876: hyp=['GEORGO', 'CAME', 'FOR', 'THEM', 'HIS', 'HEALTH'] +4294-14317-0011-1877: ref=['I', 'HOPE', 'TO', 'PROVE', 'ON', 'WHAT', 'ACCOUNT', 'THAT', 'SCOUNDREL', 'TRIES', 'TO', 'BRING', 'ME', 'INTO', 'DISGRACE'] +4294-14317-0011-1877: hyp=['I', 'HOPE', 'TO', 'PROVE', 'ON', 'WHAT', 'ACCOUNT', 'THAT', 'SCOUNDREL', 'TRIES', 'TO', 'BRING', 'ME', 'INTO', 'DISGRACE'] +4294-14317-0012-1878: ref=['WHEN', 'HE', 'HAD', 'HEARD', 'THIS', 'SPEECH', 'THE', 'DUKE', 'ROSE', 'UP', 'IN', 'ANGER', 'AND', 'SENT', 'FOR', 'BERNARDONE', 'WHO', 'WAS', 'FORCED', 'TO', 'TAKE', 'FLIGHT', 'AS', 'FAR', 'AS', 'VENICE', 'HE', 'AND', 'ANTONIO', 'LANDI', 'WITH', 'HIM'] +4294-14317-0012-1878: hyp=['WHEN', 'HE', 'HAD', 'HEARD', 'THIS', 'SPEECH', 'THE', 'DUKE', 'ROSE', 'UP', 'IN', 'ANGER', 'AND', 'SENT', 'FOR', 'BERNARDONE', 'WHO', 'WAS', 'FORCED', 'TO', 'TAKE', 'FLIGHT', 'AS', 'FAR', 'AS', 'VENICE', 'HE', 'AND', 'ANTONIA', 'LANDY', 'WITH', 'HIM'] +4294-14317-0013-1879: ref=['YOU', 'HAD', 'BETTER', 'PUT', 'THIS', 'TO', 'THE', 'PROOF', 'AND', 'I', 'WILL', 'GO', 'AT', 'ONCE', 'TO', 'THE', 'BARGELLO'] +4294-14317-0013-1879: hyp=['YOU', 'HAD', 'BETTER', 'PUT', 'THIS', 'TO', 'THE', 'PROOF', 'AND', 'I', 'WILL', 'GO', 'AT', 'ONCE', 'TO', 'THE', 'BARGIENLO'] +4294-14317-0014-1880: ref=['I', 'AM', 'WILLING', 'TO', 'ENTER', 'INTO', 'COMPETITION', 'WITH', 'THE', 'ANCIENTS', 'AND', 'FEEL', 'ABLE', 'TO', 'SURPASS', 'THEM', 'FOR', 'SINCE', 'THOSE', 'EARLY', 'DAYS', 'IN', 'WHICH', 'I', 'MADE', 'THE', 'MEDALS', 'OF', 'POPE', 'CLEMENT', 'I', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'THAT', 'I', 'CAN', 'NOW', 'PRODUCE', 'FAR', 'BETTER', 'PIECES', 'OF', 'THE', 'KIND', 'I', 'THINK', 'I', 'CAN', 'ALSO', 'OUTDO', 'THE', 'COINS', 'I', 'STRUCK', 'FOR', 'DUKE', 'ALESSANDRO', 'WHICH', 'ARE', 'STILL', 'HELD', 'IN', 'HIGH', 'ESTEEM', 'IN', 'LIKE', 'MANNER', 'I', 'COULD', 'MAKE', 'FOR', 'YOU', 'LARGE', 'PIECES', 'OF', 'GOLD', 'AND', 'SILVER', 'PLATE', 'AS', 'I', 'DID', 'SO', 'OFTEN', 'FOR', 'THAT', 'NOBLE', 'MONARCH', 'KING', 'FRANCIS', 'OF', 'FRANCE', 'THANKS', 'TO', 'THE', 'GREAT', 'CONVENIENCES', 'HE', 'ALLOWED', 'ME', 'WITHOUT', 'EVER', 'LOSING', 'TIME', 'FOR', 'THE', 'EXECUTION', 'OF', 'COLOSSAL', 'STATUES', 'OR', 'OTHER', 'WORKS', 'OF', 'THE', 'SCULPTORS', 'CRAFT'] +4294-14317-0014-1880: hyp=['I', 'AM', 'WILLING', 'TO', 'ENTER', 'INTO', 'COMPETITION', 'WITH', 'THE', 'ANCIENTS', 'AND', 'FEEL', 'ABLE', 'TO', 'SURPASS', 'THEM', 'FOR', 'SINCE', 'THOSE', 'EARLY', 'DAYS', 'IN', 'WHICH', 'I', 'MADE', 'THE', 'METALS', 'OF', 'POPE', 'CLEMENT', 'I', 'HAVE', 'LEARNED', 'SO', 'MUCH', 'THAT', 'I', 'CAN', 'NOW', 'PRODUCE', 'FAR', 'BETTER', 'PIECES', 'OF', 'THE', 'KIND', 'I', 'THINK', 'I', 'CAN', 'ALSO', 'OUTDO', 'THE', 'COINS', 'I', 'STRUCK', 'FOR', 'DUKE', 'ALISANDRO', 'WHICH', 'IS', 'STILL', 'HELD', 'IN', 'HIGH', 'ESTEEM', 'IN', 'LIKE', 'MANNER', 'I', 'COULD', 'MAKE', 'FOR', 'YOU', 'LARGE', 'PIECES', 'OF', 'GOLD', 'AND', 'SILVER', 'PLATE', 'AS', 'I', 'DID', 'SO', 'OFTEN', 'FOR', 'THAT', 'NOBLE', 'MONARCH', 'KING', 'FRANCIS', 'OF', 'FRANCE', 'THANKS', 'TO', 'THE', 'GREAT', 'CONVENIENCES', 'HE', 'ALLOWED', 'ME', 'WITHOUT', 'EVER', 'LOSING', 'TIME', 'FOR', 'THE', 'EXECUTION', 'OF', 'COLOSSAL', 'STATUES', 'OR', 'OTHER', 'WORKS', 'OF', 'THE', "SCULPTOR'S", 'CRAFT'] +4294-14317-0015-1881: ref=['AFTER', 'SEVERAL', 'MONTHS', 'WERE', 'WASTED', 'AND', 'PIERO', 'WOULD', 'NEITHER', 'WORK', 'NOR', 'PUT', 'MEN', 'TO', 'WORK', 'UPON', 'THE', 'PIECE', 'I', 'MADE', 'HIM', 'GIVE', 'IT', 'BACK'] +4294-14317-0015-1881: hyp=['AFTER', 'SEVERAL', 'MONTHS', 'WERE', 'WASTED', 'AND', 'PIERO', 'WOULD', 'NEITHER', 'WORK', 'NOR', 'PUT', 'MEN', 'TO', 'WORK', 'UPON', 'THE', 'PIECE', 'I', 'MADE', 'HIM', 'GIVE', 'IT', 'BACK'] +4294-14317-0016-1882: ref=['AMONG', 'ARTISTS', 'CERTAIN', 'ENRAGED', 'SCULPTORS', 'LAUGHED', 'AT', 'ME', 'AND', 'CALLED', 'ME', 'THE', 'NEW', 'SCULPTOR'] +4294-14317-0016-1882: hyp=['AMONG', 'ARTISTS', 'CERTAIN', 'ENRAGE', 'SCULPTORS', 'LAUGHED', 'AT', 'ME', 'AND', 'CALLED', 'ME', 'THE', 'NEW', 'SCULPTOR'] +4294-14317-0017-1883: ref=['NOW', 'I', 'HOPE', 'TO', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'AN', 'OLD', 'SCULPTOR', 'IF', 'GOD', 'SHALL', 'GRANT', 'ME', 'THE', 'BOON', 'OF', 'FINISHING', 'MY', 'PERSEUS', 'FOR', 'THAT', 'NOBLE', 'PIAZZA', 'OF', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0017-1883: hyp=['NOW', 'I', 'HOPE', 'TO', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'AN', 'OLD', 'SCULPTOR', 'IF', 'GOD', 'SHALL', 'GRANT', 'ME', 'THE', 'BOON', 'OF', 'FINISHING', 'MY', 'PERSEUS', 'FOR', 'THAT', 'NOBLE', 'PIAZZA', 'OF', 'HIS', 'MOST', 'ILLUSTRIOUS', 'EXCELLENCY'] +4294-14317-0018-1884: ref=['HAVING', 'THIS', 'EXCELLENT', 'RESOLVE', 'IN', 'HEART', 'I', 'REACHED', 'MY', 'HOME'] +4294-14317-0018-1884: hyp=['HAVING', 'THIS', 'EXCELLENT', 'RESOLVE', 'IN', 'HEART', 'I', 'REACHED', 'MY', 'HOME'] +4294-32859-0000-1942: ref=['WYLDER', 'WAS', 'RATHER', 'SURLY', 'AFTER', 'THE', 'LADIES', 'HAD', 'FLOATED', 'AWAY', 'FROM', 'THE', 'SCENE', 'AND', 'HE', 'DRANK', 'HIS', 'LIQUOR', 'DOGGEDLY'] +4294-32859-0000-1942: hyp=['WYLDER', 'WAS', 'RATHER', 'SURLY', 'AFTER', 'THE', 'LADIES', 'HAD', 'FLOATED', 'AWAY', 'FROM', 'THE', 'SCENE', 'AND', 'HE', 'DRANK', 'HIS', 'LIQUOR', 'DOGGEDLY'] +4294-32859-0001-1943: ref=['IT', 'WAS', 'HIS', 'FANCY', 'I', 'SUPPOSE', 'TO', 'REVIVE', 'CERTAIN', 'SENTIMENTAL', 'RELATIONS', 'WHICH', 'HAD', 'IT', 'MAY', 'BE', 'ONCE', 'EXISTED', 'BETWEEN', 'HIM', 'AND', 'MISS', 'LAKE', 'AND', 'HE', 'WAS', 'A', 'PERSON', 'OF', 'THAT', 'COMBATIVE', 'TEMPERAMENT', 'THAT', 'MAGNIFIES', 'AN', 'OBJECT', 'IN', 'PROPORTION', 'AS', 'ITS', 'PURSUIT', 'IS', 'THWARTED'] +4294-32859-0001-1943: hyp=['IT', 'WAS', 'HIS', 'FANCY', 'I', 'SUPPOSE', 'TO', 'REVIVE', 'CERTAIN', 'SENTIMENTAL', 'RELATIONS', 'WHICH', 'HAD', 'IT', 'MAY', 'BE', 'ONCE', 'EXISTED', 'BETWEEN', 'HIM', 'AND', 'MISS', 'LAKE', 'AND', 'HE', 'WAS', 'A', 'PERSON', 'OF', 'THAT', 'COMBATIVE', 'TEMPERAMENT', 'THAT', 'MAGNIFIES', 'AN', 'OBJECT', 'IN', 'PROPORTION', 'AS', 'ITS', 'PURSUIT', 'IS', 'THWARTED'] +4294-32859-0002-1944: ref=['THE', 'STORY', 'OF', 'FRIDOLIN', 'AND', "RETZCH'S", 'PRETTY', 'OUTLINES'] +4294-32859-0002-1944: hyp=['THE', 'STORY', 'OF', 'FRIDOLIN', 'AND', 'WRETCH', 'IS', 'PRETTY', 'OUTLINES'] +4294-32859-0003-1945: ref=['SIT', 'DOWN', 'BESIDE', 'ME', 'AND', "I'LL", 'TELL', 'YOU', 'THE', 'STORY'] +4294-32859-0003-1945: hyp=['SIT', 'DOWN', 'BESIDE', 'ME', 'AND', "I'LL", 'TELL', 'YOU', 'THE', 'STORY'] +4294-32859-0004-1946: ref=['HE', 'ASSISTED', 'AT', 'IT', 'BUT', 'TOOK', 'NO', 'PART', 'AND', 'IN', 'FACT', 'WAS', 'LISTENING', 'TO', 'THAT', 'OTHER', 'CONVERSATION', 'WHICH', 'SOUNDED', 'WITH', 'ITS', 'PLEASANT', 'GABBLE', 'AND', 'LAUGHTER', 'LIKE', 'A', 'LITTLE', 'MUSICAL', 'TINKLE', 'OF', 'BELLS', 'IN', 'THE', 'DISTANCE'] +4294-32859-0004-1946: hyp=['HE', 'ASSISTED', 'AT', 'IT', 'BUT', 'TOOK', 'NO', 'PART', 'AND', 'IN', 'FACT', 'WAS', 'LISTENING', 'TO', 'THAT', 'OTHER', 'CONVERSATION', 'WHICH', 'SOUNDED', 'WITH', 'ITS', 'PLEASANT', 'GABBLE', 'AND', 'LAUGHTER', 'LIKE', 'A', 'LITTLE', 'MUSICAL', 'TINKLE', 'OF', 'BELLS', 'IN', 'THE', 'DISTANCE'] +4294-32859-0005-1947: ref=['BUT', 'HONEST', 'MARK', 'FORGOT', 'THAT', 'YOUNG', 'LADIES', 'DO', 'NOT', 'ALWAYS', 'COME', 'OUT', 'QUITE', 'ALONE', 'AND', 'JUMP', 'UNASSISTED', 'INTO', 'THEIR', 'VEHICLES'] +4294-32859-0005-1947: hyp=['BUT', 'HONEST', 'MARK', 'FORGOT', 'THAT', 'YOUNG', 'LADIES', 'DO', 'NOT', 'ALWAYS', 'COME', 'OUT', 'QUITE', 'ALONE', 'AND', 'JUMP', 'UNASSISTED', 'INTO', 'THEIR', 'VEHICLES'] +4294-35475-0000-1885: ref=['BUT', 'THE', 'MIDDLE', 'SON', 'WAS', 'LITTLE', 'AND', 'LORN', 'HE', 'WAS', 'NEITHER', 'DARK', 'NOR', 'FAIR', 'HE', 'WAS', 'NEITHER', 'HANDSOME', 'NOR', 'STRONG'] +4294-35475-0000-1885: hyp=['BUT', 'THE', 'MIDDLE', 'SUN', 'WAS', 'LITTLE', 'AND', 'LORN', 'HE', 'WAS', 'NEITHER', 'DARK', 'NOR', 'FAIR', 'HE', 'WAS', 'NEITHER', 'HANDSOME', 'NOR', 'STRONG'] +4294-35475-0001-1886: ref=['THROWING', 'HIMSELF', 'ON', 'HIS', 'KNEES', 'BEFORE', 'THE', 'KING', 'HE', 'CRIED', 'OH', 'ROYAL', 'SIRE', 'BESTOW', 'UPON', 'ME', 'ALSO', 'A', 'SWORD', 'AND', 'A', 'STEED', 'THAT', 'I', 'MAY', 'UP', 'AND', 'AWAY', 'TO', 'FOLLOW', 'MY', 'BRETHREN'] +4294-35475-0001-1886: hyp=['ROWING', 'HIMSELF', 'ON', 'HIS', 'KNEES', 'BEFORE', 'THE', 'KING', 'HE', 'CRIED', 'O', 'ROYAL', 'SIRE', 'BESTOW', 'UPON', 'ME', 'ALSO', 'A', 'SWORD', 'AND', 'A', 'STEED', 'THAT', 'I', 'MAY', 'UP', 'AND', 'WAIT', 'TO', 'FOLLOW', 'MY', 'BRETHREN'] +4294-35475-0002-1887: ref=['BUT', 'THE', 'KING', 'LAUGHED', 'HIM', 'TO', 'SCORN', 'THOU', 'A', 'SWORD', 'HE', 'QUOTH'] +4294-35475-0002-1887: hyp=['BUT', 'THE', 'KING', 'LAUGHED', 'HIM', 'TO', 'SCORN', 'THOU', 'A', 'SWORD', 'HE', 'QUOTH'] +4294-35475-0003-1888: ref=['IN', 'SOOTH', 'THOU', 'SHALT', 'HAVE', 'ONE', 'BUT', 'IT', 'SHALL', 'BE', 'ONE', 'BEFITTING', 'THY', 'MAIDEN', 'SIZE', 'AND', 'COURAGE', 'IF', 'SO', 'SMALL', 'A', 'WEAPON', 'CAN', 'BE', 'FOUND', 'IN', 'ALL', 'MY', 'KINGDOM'] +4294-35475-0003-1888: hyp=['IN', 'SOOTH', 'THOU', 'SHALT', 'HAVE', 'ONE', 'BUT', 'IT', 'SHALL', 'BE', 'ONE', 'BEFITTING', 'THY', 'MAIDEN', 'SIGHS', 'AND', 'COURAGE', 'IF', 'SO', 'SMALL', 'A', 'WEAPON', 'CAN', 'BE', 'FOUND', 'IN', 'ALL', 'MY', 'KINGDOM'] +4294-35475-0004-1889: ref=['FORTHWITH', 'THE', 'GRINNING', 'JESTER', 'BEGAN', 'SHRIEKING', 'WITH', 'LAUGHTER', 'SO', 'THAT', 'THE', 'BELLS', 'UPON', 'HIS', 'MOTLEY', 'CAP', 'WERE', 'ALL', 'SET', 'A', 'JANGLING'] +4294-35475-0004-1889: hyp=['FORTHWITH', 'THE', 'GRINNING', 'GESTURE', 'BEGAN', 'SHRIEKING', 'WITH', 'LAUGHTER', 'SO', 'THAT', 'THE', 'BELLS', 'UPON', 'HIS', 'MOTLEY', 'CAP', 'WERE', 'ALL', 'SET', 'A', 'JANGLING'] +4294-35475-0005-1890: ref=['I', 'DID', 'BUT', 'LAUGH', 'TO', 'THINK', 'THE', 'SWORD', 'OF', 'ETHELRIED', 'HAD', 'BEEN', 'SO', 'QUICKLY', 'FOUND', 'RESPONDED', 'THE', 'JESTER', 'AND', 'HE', 'POINTED', 'TO', 'THE', 'SCISSORS', 'HANGING', 'FROM', 'THE', "TAILOR'S", 'GIRDLE'] +4294-35475-0005-1890: hyp=['I', 'DID', 'BUT', 'LAUGH', 'TO', 'THINK', 'THE', 'SWORD', 'OF', 'EFFLARIDE', 'HAD', 'BEEN', 'SO', 'QUICKLY', 'FOUND', 'RESPONDED', 'THE', 'JESTER', 'AND', 'HE', 'POINTED', 'TO', 'THE', 'SCISSORS', 'HANGING', 'FROM', 'THE', "TAILOR'S", 'GIRDLE'] +4294-35475-0006-1891: ref=['ONE', 'NIGHT', 'AS', 'HE', 'LAY', 'IN', 'A', 'DEEP', 'FOREST', 'TOO', 'UNHAPPY', 'TO', 'SLEEP', 'HE', 'HEARD', 'A', 'NOISE', 'NEAR', 'AT', 'HAND', 'IN', 'THE', 'BUSHES'] +4294-35475-0006-1891: hyp=['ONE', 'NIGHT', 'AS', 'HE', 'LAY', 'IN', 'A', 'DEEP', 'FOREST', 'TWO', 'UNHAPPY', 'TO', 'SLEEP', 'HE', 'HEARD', 'A', 'NOISE', 'NEAR', 'AT', 'HAND', 'IN', 'THE', 'BUSHES'] +4294-35475-0007-1892: ref=['THOU', 'SHALT', 'HAVE', 'THY', 'LIBERTY', 'HE', 'CRIED', 'EVEN', 'THOUGH', 'THOU', 'SHOULDST', 'REND', 'ME', 'IN', 'PIECES', 'THE', 'MOMENT', 'THOU', 'ART', 'FREE'] +4294-35475-0007-1892: hyp=['THOU', 'SHALT', 'HAVE', 'THY', 'LIBERTY', 'HE', 'CRIED', 'EVEN', 'THOUGH', 'THOU', 'SHOULDST', 'RUN', 'ME', 'IN', 'PIECES', 'THE', 'MOMENT', 'THOU', 'ART', 'FREE'] +4294-35475-0008-1893: ref=['IT', 'HAD', 'SUDDENLY', 'DISAPPEARED', 'AND', 'IN', 'ITS', 'PLACE', 'STOOD', 'A', 'BEAUTIFUL', 'FAIRY', 'WITH', 'FILMY', 'WINGS', 'WHICH', 'SHONE', 'LIKE', 'RAINBOWS', 'IN', 'THE', 'MOONLIGHT'] +4294-35475-0008-1893: hyp=['HE', 'HAD', 'HITHER', 'SUDDENLY', 'DISAPPEARED', 'AND', 'IN', 'ITS', 'PLACE', 'STOOD', 'A', 'BEAUTIFUL', 'FAIRY', 'WITH', 'FILMY', 'WINGS', 'WHICH', 'SHONE', 'LIKE', 'RAINBOWS', 'IN', 'THE', 'MOONLIGHT'] +4294-35475-0009-1894: ref=['AT', 'THIS', 'MOMENT', 'THERE', 'WAS', 'A', 'DISTANT', 'RUMBLING', 'AS', 'OF', 'THUNDER', 'TIS', 'THE', 'OGRE', 'CRIED', 'THE', 'FAIRY', 'WE', 'MUST', 'HASTEN'] +4294-35475-0009-1894: hyp=['AT', 'THIS', 'MOMENT', 'THERE', 'WAS', 'A', 'DISTANT', 'RUMBLING', 'AS', 'OF', 'THUNDER', 'TIS', 'THE', 'OGRE', 'CRIED', 'THE', 'FAIRY', 'WE', 'MUST', 'HASTEN'] +4294-35475-0010-1895: ref=['SCISSORS', 'GROW', 'A', "GIANT'S", 'HEIGHT', 'AND', 'SAVE', 'US', 'FROM', 'THE', "OGRE'S", 'MIGHT'] +4294-35475-0010-1895: hyp=['SCISSORS', 'GROW', 'A', "GIANT'S", 'HEIGHT', 'AND', 'SAVE', 'US', 'FROM', 'THE', 'OGRES', 'MIGHT'] +4294-35475-0011-1896: ref=['HE', 'COULD', 'SEE', 'THE', 'OGRE', 'STANDING', 'POWERLESS', 'TO', 'HURT', 'HIM', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'CHASM', 'AND', 'GNASHING', 'HIS', 'TEETH', 'EACH', 'ONE', 'OF', 'WHICH', 'WAS', 'AS', 'BIG', 'AS', 'A', 'MILLSTON'] +4294-35475-0011-1896: hyp=['HE', 'COULD', 'SEE', 'THE', 'OGRE', 'STANDING', 'POWERLESS', 'TO', 'HURT', 'HIM', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'CHASM', 'AND', 'GNASHING', 'HIS', 'TEETH', 'EACH', 'ONE', 'OF', 'WHICH', 'WAS', 'AS', 'BIG', 'AS', 'A', 'MILLSTONE'] +4294-35475-0012-1897: ref=['THE', 'SIGHT', 'WAS', 'SO', 'TERRIBLE', 'THAT', 'HE', 'TURNED', 'ON', 'HIS', 'HEEL', 'AND', 'FLED', 'AWAY', 'AS', 'FAST', 'AS', 'HIS', 'FEET', 'COULD', 'CARRY', 'HIM'] +4294-35475-0012-1897: hyp=['THE', 'SIGHT', 'WAS', 'SO', 'TERRIBLE', 'THAT', 'HE', 'TURNED', 'ON', 'HIS', 'HEEL', 'AND', 'FLED', 'AWAY', 'AS', 'FAST', 'AS', 'HIS', 'FEET', 'COULD', 'CARRY', 'HIM'] +4294-35475-0013-1898: ref=['THOU', 'SHALT', 'NOT', 'BE', 'LEFT', 'A', 'PRISONER', 'IN', 'THIS', 'DISMAL', 'SPOT', 'WHILE', 'I', 'HAVE', 'THE', 'POWER', 'TO', 'HELP', 'THEE'] +4294-35475-0013-1898: hyp=['THOU', 'SHALT', 'NOT', 'BE', 'LEFT', 'A', 'PRISONER', 'IN', 'THIS', 'DISMAL', 'SPOT', 'WHILE', 'I', 'HAVE', 'THE', 'POWER', 'TO', 'HELP', 'THEE'] +4294-35475-0014-1899: ref=['HE', 'LIFTED', 'THE', 'SCISSORS', 'AND', 'WITH', 'ONE', 'STROKE', 'DESTROYED', 'THE', 'WEB', 'AND', 'GAVE', 'THE', 'FLY', 'ITS', 'FREEDOM'] +4294-35475-0014-1899: hyp=['HE', 'LIFTED', 'THE', 'SCISSORS', 'AND', 'WITH', 'ONE', 'STROKE', 'DESTROYED', 'THE', 'WEB', 'AND', 'GAVE', 'THE', 'FLY', 'TO', 'READ', 'THEM'] +4294-35475-0015-1900: ref=['A', 'FAINT', 'GLIMMER', 'OF', 'LIGHT', 'ON', 'THE', 'OPPOSITE', 'WALL', 'SHOWS', 'ME', 'THE', 'KEYHOLE'] +4294-35475-0015-1900: hyp=['A', 'FAINT', 'GLIMMER', 'OF', 'LIGHT', 'ON', 'THE', 'OPPOSITE', 'WALL', 'SHOWS', 'ME', 'THE', 'KEYHOLE'] +4294-35475-0016-1901: ref=['THE', 'PRINCE', 'SPENT', 'ALL', 'THE', 'FOLLOWING', 'TIME', 'UNTIL', 'MIDNIGHT', 'TRYING', 'TO', 'THINK', 'OF', 'A', 'SUITABLE', 'VERSE', 'TO', 'SAY', 'TO', 'THE', 'SCISSORS'] +4294-35475-0016-1901: hyp=['THE', 'PRINCE', 'SPENT', 'ALL', 'THE', 'FOLLOWING', 'TIME', 'UNTIL', 'MIDNIGHT', 'TRYING', 'TO', 'THINK', 'OF', 'A', 'SUITABLE', 'VERSE', 'TO', 'SAY', 'TO', 'THE', 'SCISSORS'] +4294-35475-0017-1902: ref=['AS', 'HE', 'UTTERED', 'THE', 'WORDS', 'THE', 'SCISSORS', 'LEAPED', 'OUT', 'OF', 'HIS', 'HAND', 'AND', 'BEGAN', 'TO', 'CUT', 'THROUGH', 'THE', 'WOODEN', 'SHUTTERS', 'AS', 'EASILY', 'AS', 'THROUGH', 'A', 'CHEESE'] +4294-35475-0017-1902: hyp=['AS', 'HE', 'UTTERED', 'THE', 'WORDS', 'THE', 'SCISSORS', 'LEAPED', 'OUT', 'OF', 'HIS', 'HAND', 'AND', 'BEGAN', 'TO', 'CUT', 'THROUGH', 'THE', 'WOODEN', 'SHUTTERS', 'AS', 'EASILY', 'AS', 'THROUGH', 'ITS', 'CHEESE'] +4294-35475-0018-1903: ref=['IN', 'A', 'VERY', 'SHORT', 'TIME', 'THE', 'PRINCE', 'HAD', 'CRAWLED', 'THROUGH', 'THE', 'OPENING'] +4294-35475-0018-1903: hyp=['IN', 'THE', 'VERY', 'SHORT', 'TIME', 'THE', 'PRINCE', 'HAD', 'CRAWLED', 'THROUGH', 'THE', 'OPENING'] +4294-35475-0019-1904: ref=['WHILE', 'HE', 'STOOD', 'LOOKING', 'AROUND', 'HIM', 'IN', 'BEWILDERMENT', 'A', 'FIREFLY', 'ALIGHTED', 'ON', 'HIS', 'ARM', 'FLASHING', 'ITS', 'LITTLE', 'LANTERN', 'IN', 'THE', "PRINCE'S", 'FACE', 'IT', 'CRIED', 'THIS', 'WAY', 'MY', 'FRIEND', 'THE', 'FLY', 'SENT', 'ME', 'TO', 'GUIDE', 'YOU', 'TO', 'A', 'PLACE', 'OF', 'SAFETY'] +4294-35475-0019-1904: hyp=['WHILE', 'HE', 'STOOD', 'LOOKING', 'AROUND', 'HIM', 'IN', 'BEWILDERMENT', 'A', 'FIREFLY', 'ALIGHTED', 'ON', 'HIS', 'HEART', 'FLASHING', 'ITS', 'LITTLE', 'LANTERN', 'IN', 'THE', "PRINCE'S", 'FACE', 'IT', 'CRIED', 'THIS', 'WAY', 'MY', 'FRIEND', 'THE', 'FLY', 'SENT', 'ME', 'TO', 'GUIDE', 'YOU', 'TO', 'A', 'PLACE', 'OF', 'SAFETY'] +4294-35475-0020-1905: ref=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'ME', 'CRIED', 'THE', 'POOR', 'PEASANT'] +4294-35475-0020-1905: hyp=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'ME', 'CRIED', 'THE', 'POOR', 'PEASANT'] +4294-35475-0021-1906: ref=['MY', 'GRAIN', 'MUST', 'FALL', 'AND', 'ROT', 'IN', 'THE', 'FIELD', 'FROM', 'OVERRIPENESS', 'BECAUSE', 'I', 'HAVE', 'NOT', 'THE', 'STRENGTH', 'TO', 'RISE', 'AND', 'HARVEST', 'IT', 'THEN', 'INDEED', 'MUST', 'WE', 'ALL', 'STARVE'] +4294-35475-0021-1906: hyp=['MY', 'GRAIN', 'MUST', 'FALL', 'IN', 'ROT', 'IN', 'THE', 'FIELD', 'FROM', 'OVER', 'RIPENESS', 'BECAUSE', 'I', 'HAVE', 'NOT', 'THE', 'STRENGTH', 'TO', 'RISE', 'IN', 'HARVEST', 'IT', 'THEN', 'INDEED', 'MUST', 'WE', 'ALL', 'STARVE'] +4294-35475-0022-1907: ref=['THE', 'GRANDAME', 'WHOM', 'HE', 'SUPPLIED', 'WITH', 'FAGOTS', 'THE', 'MERCHANT', 'WHOM', 'HE', 'RESCUED', 'FROM', 'ROBBERS', 'THE', "KING'S", 'COUNCILLOR', 'TO', 'WHOM', 'HE', 'GAVE', 'AID', 'ALL', 'BECAME', 'HIS', 'FRIENDS', 'UP', 'AND', 'DOWN', 'THE', 'LAND', 'TO', 'BEGGAR', 'OR', 'LORD', 'HOMELESS', 'WANDERER', 'OR', 'HIGH', 'BORN', 'DAME', 'HE', 'GLADLY', 'GAVE', 'UNSELFISH', 'SERVICE', 'ALL', 'UNSOUGHT', 'AND', 'SUCH', 'AS', 'HE', 'HELPED', 'STRAIGHTWAY', 'BECAME', 'HIS', 'FRIENDS'] +4294-35475-0022-1907: hyp=['THE', 'GRAND', 'DAME', 'WHOM', 'HE', 'SUPPLIED', 'WITH', 'FAGOTS', 'THE', 'MERCHANT', 'WHOM', 'HE', 'RESCUED', 'FROM', 'ROBBERS', 'THE', "KING'S", 'COUNSELLOR', 'TO', 'WHOM', 'HE', 'GAVE', 'AID', 'ALL', 'BECAME', 'HIS', 'FRIENDS', 'UP', 'AND', 'DOWN', 'THE', 'LAND', 'TO', 'BEGGAR', 'O', 'LORD', 'HOMELESS', 'WANDERER', 'HIGH', 'BORN', 'DAME', 'HE', 'GLADLY', 'GAVE', 'UNSELFISH', 'SERVICE', 'ALL', 'UNSOUGHT', 'AND', 'SUCH', 'AS', 'HE', 'HELPED', 'STRAIGHTWAY', 'BECAME', 'HIS', 'FRIENDS'] +4294-35475-0023-1908: ref=['TO', 'HIM', 'WHO', 'COULD', 'BRING', 'HER', 'BACK', 'TO', 'HER', "FATHER'S", 'CASTLE', 'SHOULD', 'BE', 'GIVEN', 'THE', 'THRONE', 'AND', 'KINGDOM', 'AS', 'WELL', 'AS', 'THE', 'PRINCESS', 'HERSELF', 'SO', 'FROM', 'FAR', 'AND', 'NEAR', 'INDEED', 'FROM', 'ALMOST', 'EVERY', 'COUNTRY', 'UNDER', 'THE', 'SUN', 'CAME', 'KNIGHTS', 'AND', 'PRINCES', 'TO', 'FIGHT', 'THE', 'OGRE'] +4294-35475-0023-1908: hyp=['TO', 'HIM', 'WHO', 'COULD', 'BRING', 'HER', 'BACK', 'TO', 'HER', "FATHER'S", 'CASTLE', 'SHOULD', 'BE', 'GIVEN', 'THE', 'THRONE', 'AND', 'KINGDOM', 'AS', 'WELL', 'AS', 'THE', 'PRINCESS', 'HERSELF', 'SO', 'FROM', 'FAR', 'AND', 'NEAR', 'INDEED', 'FROM', 'ALMOST', 'EVERY', 'COUNTRY', 'UNDER', 'THE', 'SUN', 'CAME', 'NIGHTS', 'AND', 'PRINCES', 'TO', 'FIGHT', 'THE', 'OGRE'] +4294-35475-0024-1909: ref=['AMONG', 'THOSE', 'WHO', 'DREW', 'BACK', 'WERE', "ETHELRIED'S", 'BROTHERS', 'THE', 'THREE', 'THAT', 'WERE', 'DARK', 'AND', 'THE', 'THREE', 'THAT', 'WERE', 'FAIR'] +4294-35475-0024-1909: hyp=['AMONG', 'THOSE', 'WHO', 'DREW', 'BACK', 'WHERE', "ETHELRIED'S", 'BROTHERS', 'THE', 'THREE', 'THAT', 'WERE', 'DARK', 'AND', 'THE', 'THREE', 'THAT', 'WERE', 'FAIR'] +4294-35475-0025-1910: ref=['BUT', 'ETHELRIED', 'HEEDED', 'NOT', 'THEIR', 'TAUNTS'] +4294-35475-0025-1910: hyp=['BUT', 'ETHEL', 'READ', 'HE', 'DID', 'NOT', 'THEIR', 'TAUNTS'] +4294-35475-0026-1911: ref=['SO', 'THEY', 'ALL', 'CRIED', 'OUT', 'LONG', 'AND', 'LOUD', 'LONG', 'LIVE', 'THE', 'PRINCE', 'PRINCE', 'CISEAUX'] +4294-35475-0026-1911: hyp=['SO', 'THEY', 'ALL', 'CRIED', 'OUT', 'LONG', 'AND', 'LOUD', 'LONG', 'LIVE', 'THE', 'PRINCE', 'PRINCESO'] +4294-9934-0000-1912: ref=['HE', 'FELT', 'WHAT', 'THE', 'EARTH', 'MAY', 'POSSIBLY', 'FEEL', 'AT', 'THE', 'MOMENT', 'WHEN', 'IT', 'IS', 'TORN', 'OPEN', 'WITH', 'THE', 'IRON', 'IN', 'ORDER', 'THAT', 'GRAIN', 'MAY', 'BE', 'DEPOSITED', 'WITHIN', 'IT', 'IT', 'FEELS', 'ONLY', 'THE', 'WOUND', 'THE', 'QUIVER', 'OF', 'THE', 'GERM', 'AND', 'THE', 'JOY', 'OF', 'THE', 'FRUIT', 'ONLY', 'ARRIVE', 'LATER'] +4294-9934-0000-1912: hyp=['HE', 'FELT', 'WITH', 'THE', 'EARTH', 'MAY', 'POSSIBLY', 'FEEL', 'AT', 'THE', 'MOMENT', 'WHEN', 'IT', 'IS', 'TORN', 'OPEN', 'WITH', 'THE', 'IRON', 'IN', 'ORDER', 'THAT', 'GRAIN', 'MAY', 'BE', 'DEPOSITED', 'WITHIN', 'IT', 'IT', 'FEELS', 'ONLY', 'THE', 'WOUND', 'THE', 'QUIVER', 'OF', 'THE', 'GERM', 'THE', 'JOY', 'OF', 'THE', 'FRUIT', 'ONLY', 'ARRIVED', 'LATER'] +4294-9934-0001-1913: ref=['HE', 'HAD', 'BUT', 'JUST', 'ACQUIRED', 'A', 'FAITH', 'MUST', 'HE', 'THEN', 'REJECT', 'IT', 'ALREADY'] +4294-9934-0001-1913: hyp=["HE'D", 'BUT', 'JUST', 'ACQUIRED', 'A', 'FAITH', 'MUST', 'HE', 'THEN', 'REJECTED', 'ALREADY'] +4294-9934-0002-1914: ref=['HE', 'AFFIRMED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'HE', 'DECLARED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'DOUBT', 'AND', 'HE', 'BEGAN', 'TO', 'DOUBT', 'IN', 'SPITE', 'OF', 'HIMSELF'] +4294-9934-0002-1914: hyp=['HE', 'AFFIRMED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'HE', 'DECLARED', 'TO', 'HIMSELF', 'THAT', 'HE', 'WOULD', 'NOT', 'DOUBT', 'AND', 'HE', 'BEGAN', 'TO', 'DOUBT', 'IN', 'SPITE', 'OF', 'HIMSELF'] +4294-9934-0003-1915: ref=['TO', 'STAND', 'BETWEEN', 'TWO', 'RELIGIONS', 'FROM', 'ONE', 'OF', 'WHICH', 'YOU', 'HAVE', 'NOT', 'AS', 'YET', 'EMERGED', 'AND', 'ANOTHER', 'INTO', 'WHICH', 'YOU', 'HAVE', 'NOT', 'YET', 'ENTERED', 'IS', 'INTOLERABLE', 'AND', 'TWILIGHT', 'IS', 'PLEASING', 'ONLY', 'TO', 'BAT', 'LIKE', 'SOULS'] +4294-9934-0003-1915: hyp=['TO', 'STAND', 'BETWEEN', 'TWO', 'RELIGIONS', 'FROM', 'ONE', 'OF', 'WHICH', 'YOU', 'HAVE', 'NOT', 'AS', 'YET', 'EMERGED', 'IN', 'ANOTHER', 'INTO', 'WHICH', 'YOU', 'HAVE', 'NOT', 'YET', 'ENTERED', 'IS', 'INTOLERABLE', 'AND', 'TWILIGHT', 'IS', 'PLEASING', 'ONLY', 'TO', 'BAT', 'LIKE', 'SOULS'] +4294-9934-0004-1916: ref=['MARIUS', 'WAS', 'CLEAR', 'EYED', 'AND', 'HE', 'REQUIRED', 'THE', 'TRUE', 'LIGHT'] +4294-9934-0004-1916: hyp=['MARIUS', 'WAS', 'CLEAR', 'EYED', 'AND', 'HE', 'REQUIRED', 'THE', 'TRUE', 'LIGHT'] +4294-9934-0005-1917: ref=['THE', 'HALF', 'LIGHTS', 'OF', 'DOUBT', 'PAINED', 'HIM'] +4294-9934-0005-1917: hyp=['THE', 'HALF', 'LIGHTS', 'OF', 'DOUBT', 'PAINED', 'HIM'] +4294-9934-0006-1918: ref=['WHATEVER', 'MAY', 'HAVE', 'BEEN', 'HIS', 'DESIRE', 'TO', 'REMAIN', 'WHERE', 'HE', 'WAS', 'HE', 'COULD', 'NOT', 'HALT', 'THERE', 'HE', 'WAS', 'IRRESISTIBLY', 'CONSTRAINED', 'TO', 'CONTINUE', 'TO', 'ADVANCE', 'TO', 'EXAMINE', 'TO', 'THINK', 'TO', 'MARCH', 'FURTHER'] +4294-9934-0006-1918: hyp=['WHATEVER', 'MAY', 'HAVE', 'BEEN', 'HIS', 'DESIRE', 'TO', 'REMAIN', 'WHERE', 'HE', 'WAS', 'HE', 'COULD', 'NOT', 'HALT', 'THERE', 'HE', 'WAS', 'IRRESISTIBLY', 'CONSTRAINED', 'TO', 'CONTINUE', 'TO', 'ADVANCE', 'TO', 'EXAMINE', 'TO', 'THINK', 'TO', 'MARCH', 'FURTHER'] +4294-9934-0007-1919: ref=['HE', 'FEARED', 'AFTER', 'HAVING', 'TAKEN', 'SO', 'MANY', 'STEPS', 'WHICH', 'HAD', 'BROUGHT', 'HIM', 'NEARER', 'TO', 'HIS', 'FATHER', 'TO', 'NOW', 'TAKE', 'A', 'STEP', 'WHICH', 'SHOULD', 'ESTRANGE', 'HIM', 'FROM', 'THAT', 'FATHER'] +4294-9934-0007-1919: hyp=['HE', 'FEARED', 'AFTER', 'HAVING', 'TAKEN', 'SO', 'MANY', 'STEPS', 'WHICH', 'HAD', 'BROUGHT', 'HIM', 'NEARER', 'TO', 'HIS', 'FATHER', 'TO', 'NOW', 'TAKE', 'A', 'STEP', 'WHICH', 'SHOULD', 'ESTRANGE', 'HIM', 'FROM', 'THAT', 'FATHER'] +4294-9934-0008-1920: ref=['HIS', 'DISCOMFORT', 'WAS', 'AUGMENTED', 'BY', 'ALL', 'THE', 'REFLECTIONS', 'WHICH', 'OCCURRED', 'TO', 'HIM'] +4294-9934-0008-1920: hyp=['HIS', 'DISCOMFORT', 'WAS', 'AUGMENTED', 'BY', 'ALL', 'THE', 'REFLECTIONS', 'WHICH', 'OCCURRED', 'TO', 'HIM'] +4294-9934-0009-1921: ref=['IN', 'THE', 'TROUBLED', 'STATE', 'OF', 'HIS', 'CONSCIENCE', 'HE', 'NO', 'LONGER', 'THOUGHT', 'OF', 'CERTAIN', 'SERIOUS', 'SIDES', 'OF', 'EXISTENCE'] +4294-9934-0009-1921: hyp=['IN', 'THE', 'TROUBLED', 'STATE', 'OF', 'HIS', 'CONSCIENCE', 'HE', 'NO', 'LONGER', 'THOUGHT', 'OF', 'CERTAIN', 'SERIOUS', 'SIDES', 'OF', 'EXISTENCE'] +4294-9934-0010-1922: ref=['THEY', 'SOON', 'ELBOWED', 'HIM', 'ABRUPTLY'] +4294-9934-0010-1922: hyp=['THEY', 'SOON', 'ELBOWED', 'HIM', 'ABRUPTLY'] +4294-9934-0011-1923: ref=['REQUEST', 'COURFEYRAC', 'TO', 'COME', 'AND', 'TALK', 'WITH', 'ME', 'SAID', 'MARIUS'] +4294-9934-0011-1923: hyp=['REQUEST', 'HER', 'FOR', 'ACT', 'TO', 'COME', 'AND', 'TALK', 'WITH', 'ME', 'SAID', 'MARIUS'] +4294-9934-0012-1924: ref=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'YOU', 'SAID', 'COURFEYRAC'] +4294-9934-0012-1924: hyp=['WHAT', 'IS', 'TO', 'BECOME', 'OF', 'YOU', 'SAID', 'CURFYRAC'] +4294-9934-0013-1925: ref=['WHAT', 'ARE', 'YOU', 'GOING', 'TO', 'DO', 'I', 'DO', 'NOT', 'KNOW'] +4294-9934-0013-1925: hyp=['WHAT', 'ARE', 'YOU', 'GOING', 'TO', 'DO', 'I', 'DO', 'NOT', 'KNOW'] +4294-9934-0014-1926: ref=['SILVER', 'GOLD', 'HERE', 'IT', 'IS'] +4294-9934-0014-1926: hyp=['SILVER', 'GOLD', 'HERE', 'IT', 'IS'] +4294-9934-0015-1927: ref=['YOU', 'WILL', 'THEN', 'HAVE', 'ONLY', 'A', 'PAIR', 'OF', 'TROUSERS', 'A', 'WAISTCOAT', 'A', 'HAT', 'AND', 'A', 'COAT', 'AND', 'MY', 'BOOTS'] +4294-9934-0015-1927: hyp=['YOU', 'WILL', 'THEN', 'HAVE', 'ONLY', 'A', 'PAIR', 'OF', 'TROUSERS', 'A', 'WAISTCOAT', 'A', 'HAT', 'AND', 'A', 'COAT', 'AND', 'MY', 'BOOTS'] +4294-9934-0016-1928: ref=['THAT', 'WILL', 'BE', 'ENOUGH'] +4294-9934-0016-1928: hyp=['THAT', 'WILL', 'BE', 'ENOUGH'] +4294-9934-0017-1929: ref=['NO', 'IT', 'IS', 'NOT', 'GOOD', 'WHAT', 'WILL', 'YOU', 'DO', 'AFTER', 'THAT'] +4294-9934-0017-1929: hyp=['NO', 'IT', 'IS', 'NOT', 'GOOD', 'WHAT', 'WILL', 'YOU', 'DO', 'AFTER', 'THAT'] +4294-9934-0018-1930: ref=['DO', 'YOU', 'KNOW', 'GERMAN', 'NO'] +4294-9934-0018-1930: hyp=['DO', 'YOU', 'KNOW', 'GERMAN', 'NO'] +4294-9934-0019-1931: ref=['IT', 'IS', 'BADLY', 'PAID', 'WORK', 'BUT', 'ONE', 'CAN', 'LIVE', 'BY', 'IT'] +4294-9934-0019-1931: hyp=['IT', 'IS', 'BADLY', 'PAID', 'WORK', 'BUT', 'ONE', 'CAN', 'LIVE', 'BY', 'IT'] +4294-9934-0020-1932: ref=['THE', 'CLOTHES', 'DEALER', 'WAS', 'SENT', 'FOR'] +4294-9934-0020-1932: hyp=['THE', 'CLOTHES', 'DEALER', 'WAS', 'SENT', 'FOR'] +4294-9934-0021-1933: ref=['HE', 'PAID', 'TWENTY', 'FRANCS', 'FOR', 'THE', 'CAST', 'OFF', 'GARMENTS', 'THEY', 'WENT', 'TO', 'THE', "WATCHMAKER'S"] +4294-9934-0021-1933: hyp=['HE', 'PAID', 'TWENTY', 'FRANCS', 'FOR', 'THE', 'CAST', 'OFF', 'GARMENTS', 'THEY', 'WENT', 'TO', 'THE', 'WATCHMAKERS'] +4294-9934-0022-1934: ref=['HE', 'BOUGHT', 'THE', 'WATCH', 'FOR', 'FORTY', 'FIVE', 'FRANCS'] +4294-9934-0022-1934: hyp=['HE', 'BOUGHT', 'THE', 'WATCH', 'FOR', 'FORTY', 'FIVE', 'FRANCS'] +4294-9934-0023-1935: ref=['HELLO', 'I', 'HAD', 'FORGOTTEN', 'THAT', 'SAID', 'MARIUS'] +4294-9934-0023-1935: hyp=['HALLO', 'I', 'HAD', 'FORGOTTEN', 'THAT', 'SAID', 'MARIUS'] +4294-9934-0024-1936: ref=['THE', 'LANDLORD', 'PRESENTED', 'HIS', 'BILL', 'WHICH', 'HAD', 'TO', 'BE', 'PAID', 'ON', 'THE', 'SPOT'] +4294-9934-0024-1936: hyp=['THE', 'LANDLORD', 'PRESENTED', 'HIS', 'BILL', 'WHICH', 'HAD', 'TO', 'BE', 'PAID', 'ON', 'THE', 'SPOT'] +4294-9934-0025-1937: ref=['I', 'HAVE', 'TEN', 'FRANCS', 'LEFT', 'SAID', 'MARIUS'] +4294-9934-0025-1937: hyp=['I', 'HAVE', 'TEN', 'FRANCS', 'LEFT', 'SAID', 'MARIUS'] +4294-9934-0026-1938: ref=['THAT', 'WILL', 'BE', 'SWALLOWING', 'A', 'TONGUE', 'VERY', 'FAST', 'OR', 'A', 'HUNDRED', 'SOUS', 'VERY', 'SLOWLY'] +4294-9934-0026-1938: hyp=['THAT', 'WILL', 'BE', 'SWALLOWING', 'A', 'TONGUE', 'VERY', 'FAST', 'OR', 'A', 'HUNDRED', 'SOUS', 'VERY', 'SLOWLY'] +4294-9934-0027-1939: ref=['ONE', 'MORNING', 'ON', 'HIS', 'RETURN', 'FROM', 'THE', 'LAW', 'SCHOOL', 'MARIUS', 'FOUND', 'A', 'LETTER', 'FROM', 'HIS', 'AUNT', 'AND', 'THE', 'SIXTY', 'PISTOLES', 'THAT', 'IS', 'TO', 'SAY', 'SIX', 'HUNDRED', 'FRANCS', 'IN', 'GOLD', 'IN', 'A', 'SEALED', 'BOX'] +4294-9934-0027-1939: hyp=['ONE', 'MORNING', 'ON', 'HIS', 'RETURN', 'FROM', 'THE', 'LAST', 'SCHOOL', 'MARIUS', 'FOUND', 'A', 'LETTER', 'FROM', 'HIS', 'AUNT', 'AND', 'THE', 'SIXTY', 'PISTOL', 'THAT', 'IS', 'TO', 'SAY', 'SIX', 'HUNDRED', 'FRANCS', 'IN', 'GOLD', 'AND', 'A', 'SEALED', 'BOX'] +4294-9934-0028-1940: ref=['MARIUS', 'SENT', 'BACK', 'THE', 'THIRTY', 'LOUIS', 'TO', 'HIS', 'AUNT', 'WITH', 'A', 'RESPECTFUL', 'LETTER', 'IN', 'WHICH', 'HE', 'STATED', 'THAT', 'HE', 'HAD', 'SUFFICIENT', 'MEANS', 'OF', 'SUBSISTENCE', 'AND', 'THAT', 'HE', 'SHOULD', 'BE', 'ABLE', 'THENCEFORTH', 'TO', 'SUPPLY', 'ALL', 'HIS', 'NEEDS'] +4294-9934-0028-1940: hyp=['MARIUS', 'SENT', 'BACK', 'THE', 'THIRTY', 'LOUIS', 'TO', 'HIS', 'AUNT', 'WITH', 'THE', 'RESPECTFUL', 'LETTER', 'IN', 'WHICH', 'SHE', 'STATED', 'THAT', 'HE', 'HAD', 'SUSPICION', 'MEANS', 'OF', 'SUBSISTENCE', 'AND', 'THAT', 'HE', 'SHOULD', 'BE', 'ABLE', 'THENCEFORTH', 'TO', 'SUPPLY', 'ALL', 'HIS', 'NEEDS'] +4294-9934-0029-1941: ref=['AT', 'THAT', 'MOMENT', 'HE', 'HAD', 'THREE', 'FRANCS', 'LEFT'] +4294-9934-0029-1941: hyp=['AT', 'THAT', 'MOMENT', 'HE', 'HAD', 'THREE', 'FRANCS', 'LEFT'] +4350-10919-0000-2716: ref=['HE', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'NO', 'GOOD', 'TALKING', 'TO', 'THE', 'OLD', 'MAN', 'AND', 'THAT', 'THE', 'PRINCIPAL', 'PERSON', 'IN', 'THE', 'HOUSE', 'WAS', 'THE', 'MOTHER'] +4350-10919-0000-2716: hyp=['HE', 'PERCEIVED', 'THAT', 'IT', 'WAS', 'NO', 'GOOD', 'TALKING', 'TO', 'THE', 'OLD', 'MAN', 'AND', 'THAT', 'THE', 'PRINCIPAL', 'PERSON', 'IN', 'THE', 'HOUSE', 'WAS', 'THE', 'MOTHER'] +4350-10919-0001-2717: ref=['BEFORE', 'HER', 'HE', 'DECIDED', 'TO', 'SCATTER', 'HIS', 'PEARLS'] +4350-10919-0001-2717: hyp=['BEFORE', 'HER', 'HE', 'DECIDED', 'TO', 'SCATTER', 'HIS', 'PEARLS'] +4350-10919-0002-2718: ref=['THE', 'PRINCESS', 'WAS', 'DISTRACTED', 'AND', 'DID', 'NOT', 'KNOW', 'WHAT', 'TO', 'DO', 'SHE', 'FELT', 'SHE', 'HAD', 'SINNED', 'AGAINST', 'KITTY'] +4350-10919-0002-2718: hyp=['THE', 'PRINCESS', 'WAS', 'DISTRACTED', 'AND', 'DID', 'NOT', 'KNOW', 'WHAT', 'TO', 'DO', 'SHE', 'FELT', 'SHE', 'HAD', 'SENT', 'AGAINST', 'KITTY'] +4350-10919-0003-2719: ref=['WELL', 'DOCTOR', 'DECIDE', 'OUR', 'FATE', 'SAID', 'THE', 'PRINCESS', 'TELL', 'ME', 'EVERYTHING'] +4350-10919-0003-2719: hyp=['WELL', 'DOCTOR', 'DECIDE', 'OUR', 'FATE', 'SAID', 'THE', 'PRINCESS', 'TELL', 'ME', 'EVERYTHING'] +4350-10919-0004-2720: ref=['IS', 'THERE', 'HOPE', 'SHE', 'MEANT', 'TO', 'SAY', 'BUT', 'HER', 'LIPS', 'QUIVERED', 'AND', 'SHE', 'COULD', 'NOT', 'UTTER', 'THE', 'QUESTION', 'WELL', 'DOCTOR'] +4350-10919-0004-2720: hyp=['IS', 'THERE', 'HOPE', 'SHE', 'MEANT', 'TO', 'SAY', 'BUT', 'HER', 'LIPS', 'QUIVERED', 'AND', 'SHE', 'COULD', 'NOT', 'UTTER', 'THE', 'QUESTION', 'WELL', 'DOCTOR'] +4350-10919-0005-2721: ref=['AS', 'YOU', 'PLEASE', 'THE', 'PRINCESS', 'WENT', 'OUT', 'WITH', 'A', 'SIGH'] +4350-10919-0005-2721: hyp=['AS', 'YOU', 'PLEASE', 'THE', 'PRINCESS', 'WENT', 'OUT', 'WITH', 'A', 'SIGH'] +4350-10919-0006-2722: ref=['THE', 'FAMILY', 'DOCTOR', 'RESPECTFULLY', 'CEASED', 'IN', 'THE', 'MIDDLE', 'OF', 'HIS', 'OBSERVATIONS'] +4350-10919-0006-2722: hyp=['THE', 'FAMILY', 'DOCTOR', 'RESPECTFULLY', 'CEASED', 'IN', 'THE', 'MIDDLE', 'OF', 'HIS', 'OBSERVATIONS'] +4350-10919-0007-2723: ref=['AND', 'THERE', 'ARE', 'INDICATIONS', 'MALNUTRITION', 'NERVOUS', 'EXCITABILITY', 'AND', 'SO', 'ON'] +4350-10919-0007-2723: hyp=['AND', 'THERE', 'ARE', 'INDICATIONS', 'MALLETRICIAN', 'NERVOUS', 'EXCITABILITY', 'AND', 'SO', 'ON'] +4350-10919-0008-2724: ref=['THE', 'QUESTION', 'STANDS', 'THUS', 'IN', 'PRESENCE', 'OF', 'INDICATIONS', 'OF', 'TUBERCULOUS', 'PROCESS', 'WHAT', 'IS', 'TO', 'BE', 'DONE', 'TO', 'MAINTAIN', 'NUTRITION'] +4350-10919-0008-2724: hyp=['THE', 'QUESTION', 'SENDS', 'THUS', 'IN', 'PRESENCE', 'OF', 'INDICATIONS', 'OF', "TIBERICAN'S", 'PROCESS', 'WHAT', 'IS', 'TO', 'BE', 'DONE', 'TO', 'MAINTAIN', 'NUTRITION'] +4350-10919-0009-2725: ref=['YES', "THAT'S", 'AN', 'UNDERSTOOD', 'THING', 'RESPONDED', 'THE', 'CELEBRATED', 'PHYSICIAN', 'AGAIN', 'GLANCING', 'AT', 'HIS', 'WATCH'] +4350-10919-0009-2725: hyp=['YES', 'I', 'CAN', 'UNDERSTOOD', 'THING', 'RESPONDED', 'THE', 'CELEBRATED', 'PHYSICIAN', 'AGAIN', 'GLANCING', 'AT', 'HIS', 'WATCH'] +4350-10919-0010-2726: ref=['BEG', 'PARDON', 'IS', 'THE', 'YAUSKY', 'BRIDGE', 'DONE', 'YET', 'OR', 'SHALL', 'I', 'HAVE', 'TO', 'DRIVE', 'AROUND'] +4350-10919-0010-2726: hyp=['BEG', 'PARDON', 'IS', 'THEOSKEY', 'BRIDGE', 'DON', 'YET', 'OR', 'SHALL', 'I', 'HAVE', 'TO', 'DRIVE', 'HER', 'ON'] +4350-10919-0011-2727: ref=['HE', 'ASKED', 'AH', 'IT', 'IS'] +4350-10919-0011-2727: hyp=['HE', 'ASKED', 'AH', 'IT', 'IS'] +4350-10919-0012-2728: ref=['OH', 'WELL', 'THEN', 'I', 'CAN', 'DO', 'IT', 'IN', 'TWENTY', 'MINUTES'] +4350-10919-0012-2728: hyp=['OH', 'WELL', 'THEN', 'I', 'CAN', 'DO', 'IT', 'IN', 'TWENTY', 'MINUTES'] +4350-10919-0013-2729: ref=['AND', 'HOW', 'ABOUT', 'A', 'TOUR', 'ABROAD', 'ASKED', 'THE', 'FAMILY', 'DOCTOR'] +4350-10919-0013-2729: hyp=['AND', 'HOW', 'ABOUT', 'IT', 'WERE', 'BROAD', 'ASKED', 'THE', 'FELLOW', 'DOCTOR'] +4350-10919-0014-2730: ref=['WHAT', 'IS', 'WANTED', 'IS', 'MEANS', 'OF', 'IMPROVING', 'NUTRITION', 'AND', 'NOT', 'FOR', 'LOWERING', 'IT'] +4350-10919-0014-2730: hyp=['WHAT', 'IS', 'WANTED', 'IS', 'THE', 'MEANS', 'OF', 'IMPROVING', 'UTRITION', 'AND', 'NOT', 'FOR', 'LOWERING', 'IT'] +4350-10919-0015-2731: ref=['THE', 'FAMILY', 'DOCTOR', 'LISTENED', 'ATTENTIVELY', 'AND', 'RESPECTFULLY'] +4350-10919-0015-2731: hyp=['THE', 'FAMILY', 'DOCTOR', 'LISTENED', 'ATTENTIVELY', 'AND', 'RESPECTFULLY'] +4350-10919-0016-2732: ref=['BUT', 'IN', 'FAVOR', 'OF', 'FOREIGN', 'TRAVEL', 'I', 'WOULD', 'URGE', 'THE', 'CHANGE', 'OF', 'HABITS', 'THE', 'REMOVAL', 'FROM', 'CONDITIONS', 'CALLING', 'UP', 'REMINISCENCES'] +4350-10919-0016-2732: hyp=['BUT', 'IN', 'FAVOUR', 'OF', 'FOREIGN', 'TRAVEL', 'I', 'WOULD', 'URGE', 'THE', 'CHANGE', 'OF', 'HABITS', 'THE', 'REMOVAL', 'FROM', 'CONDITIONS', 'CALLING', 'UP', 'REMINISCENCES'] +4350-10919-0017-2733: ref=['AND', 'THEN', 'THE', 'MOTHER', 'WISHES', 'IT', 'HE', 'ADDED'] +4350-10919-0017-2733: hyp=['AND', 'THEN', 'THE', 'MOTHER', 'WISHES', 'IT', 'HE', 'ADDED'] +4350-10919-0018-2734: ref=['AH', 'WELL', 'IN', 'THAT', 'CASE', 'TO', 'BE', 'SURE', 'LET', 'THEM', 'GO', 'ONLY', 'THOSE', 'GERMAN', 'QUACKS', 'ARE', 'MISCHIEVOUS'] +4350-10919-0018-2734: hyp=['AH', 'WELL', 'IN', 'THAT', 'CASE', 'TO', 'BE', 'SURE', 'LET', 'THEM', 'GO', 'ONLY', 'THOSE', 'GERMAN', 'QUACKS', 'ARE', 'MISCHIEVOUS'] +4350-10919-0019-2735: ref=['OH', "TIME'S", 'UP', 'ALREADY', 'AND', 'HE', 'WENT', 'TO', 'THE', 'DOOR'] +4350-10919-0019-2735: hyp=['OH', 'TIMES', 'UP', 'ALREADY', 'AND', 'HE', 'WENT', 'TO', 'THE', 'DOOR'] +4350-10919-0020-2736: ref=['THE', 'CELEBRATED', 'DOCTOR', 'ANNOUNCED', 'TO', 'THE', 'PRINCESS', 'A', 'FEELING', 'OF', 'WHAT', 'WAS', 'DUE', 'FROM', 'HIM', 'DICTATED', 'HIS', 'DOING', 'SO', 'THAT', 'HE', 'OUGHT', 'TO', 'SEE', 'THE', 'PATIENT', 'ONCE', 'MORE'] +4350-10919-0020-2736: hyp=['THE', 'CELEBRATED', 'DOCTOR', 'ANNOUNCED', 'TO', 'THE', 'PRINCESS', 'A', 'FEELING', 'OF', 'WHAT', 'WAS', 'DUE', 'FROM', 'HIM', 'DECLATED', 'HIS', 'DOING', 'SO', 'THAT', 'HE', 'OUGHT', 'TO', 'SEE', 'THE', 'PATIENT', 'ONCE', 'MORE'] +4350-10919-0021-2737: ref=['OH', 'NO', 'ONLY', 'A', 'FEW', 'DETAILS', 'PRINCESS', 'COME', 'THIS', 'WAY'] +4350-10919-0021-2737: hyp=['OH', 'NO', 'ONLY', 'A', 'FEW', 'DETAILS', 'PRINCESS', 'COME', 'THIS', 'WAY'] +4350-10919-0022-2738: ref=['AND', 'THE', 'MOTHER', 'ACCOMPANIED', 'BY', 'THE', 'DOCTOR', 'WENT', 'INTO', 'THE', 'DRAWING', 'ROOM', 'TO', 'KITTY'] +4350-10919-0022-2738: hyp=['AND', 'THE', 'MOTHER', 'ACCOMPANIED', 'BY', 'THE', 'DOCTOR', 'WENT', 'INTO', 'THE', 'DRAWING', 'ROOM', 'TO', 'KITTY'] +4350-10919-0023-2739: ref=['WHEN', 'THE', 'DOCTOR', 'CAME', 'IN', 'SHE', 'FLUSHED', 'CRIMSON', 'AND', 'HER', 'EYES', 'FILLED', 'WITH', 'TEARS'] +4350-10919-0023-2739: hyp=['WHEN', 'THE', 'DOCTOR', 'CAME', 'IN', 'SHE', 'FLUSHED', 'CRIMSON', 'AND', 'HER', 'EYES', 'FILLED', 'WITH', 'TEARS'] +4350-10919-0024-2740: ref=['SHE', 'ANSWERED', 'HIM', 'AND', 'ALL', 'AT', 'ONCE', 'GOT', 'UP', 'FURIOUS'] +4350-10919-0024-2740: hyp=['SHE', 'ANSWERED', 'HIM', 'AND', 'ALL', 'AT', 'ONCE', 'GOT', 'UP', 'FURIOUS'] +4350-10919-0025-2741: ref=['EXCUSE', 'ME', 'DOCTOR', 'BUT', 'THERE', 'IS', 'REALLY', 'NO', 'OBJECT', 'IN', 'THIS'] +4350-10919-0025-2741: hyp=['EXCUSE', 'ME', 'DOCTOR', 'BUT', 'THERE', 'IS', 'REALLY', 'NO', 'OBJECT', 'IN', 'THIS'] +4350-10919-0026-2742: ref=['THIS', 'IS', 'THE', 'THIRD', 'TIME', "YOU'VE", 'ASKED', 'ME', 'THE', 'SAME', 'THING'] +4350-10919-0026-2742: hyp=['THIS', 'IS', 'THE', 'THIRD', 'TIME', "YOU'VE", 'ASKED', 'ME', 'THE', 'SAME', 'THING'] +4350-10919-0027-2743: ref=['THE', 'CELEBRATED', 'DOCTOR', 'DID', 'NOT', 'TAKE', 'OFFENSE'] +4350-10919-0027-2743: hyp=['THE', 'CELEBRATED', 'DOCTOR', 'DID', 'NOT', 'TAKE', 'OFFENCE'] +4350-10919-0028-2744: ref=['NERVOUS', 'IRRITABILITY', 'HE', 'SAID', 'TO', 'THE', 'PRINCESS', 'WHEN', 'KITTY', 'HAD', 'LEFT', 'THE', 'ROOM', 'HOWEVER', 'I', 'HAD', 'FINISHED'] +4350-10919-0028-2744: hyp=['NERVOUS', 'IRRITABILITY', 'HE', 'SAID', 'TO', 'THE', 'PRINCESS', 'WHEN', 'KITTY', 'HAD', 'LEFT', 'THE', 'ROOM', 'HOWEVER', 'I', 'HAD', 'FINISHED'] +4350-10919-0029-2745: ref=['AND', 'THE', 'DOCTOR', 'BEGAN', 'SCIENTIFICALLY', 'EXPLAINING', 'TO', 'THE', 'PRINCESS', 'AS', 'AN', 'EXCEPTIONALLY', 'INTELLIGENT', 'WOMAN', 'THE', 'CONDITION', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'AND', 'CONCLUDED', 'BY', 'INSISTING', 'ON', 'THE', 'DRINKING', 'OF', 'THE', 'WATERS', 'WHICH', 'WERE', 'CERTAINLY', 'HARMLESS'] +4350-10919-0029-2745: hyp=['AND', 'THE', 'DOCTOR', 'BEGAN', 'SCIENTIFICALLY', 'EXPLAINING', 'TO', 'THE', 'PRINCESS', 'AS', 'AN', 'EXCEPTIONALLY', 'INTELLIGENT', 'WOMAN', 'THE', 'CONDITION', 'OF', 'THE', 'YOUNG', 'PRINCESS', 'AND', 'CONCLUDED', 'BY', 'INSISTING', 'ON', 'THE', 'DRINKING', 'OF', 'THE', 'WATERS', 'WHICH', 'WERE', 'CERTAINLY', 'HARMLESS'] +4350-10919-0030-2746: ref=['AT', 'THE', 'QUESTION', 'SHOULD', 'THEY', 'GO', 'ABROAD', 'THE', 'DOCTOR', 'PLUNGED', 'INTO', 'DEEP', 'MEDITATION', 'AS', 'THOUGH', 'RESOLVING', 'A', 'WEIGHTY', 'PROBLEM'] +4350-10919-0030-2746: hyp=['BUT', 'THE', 'QUESTION', 'SHOULD', 'THEY', 'GO', 'ABROAD', 'THE', 'DOCTOR', 'PLUNGED', 'INTO', 'DEEP', 'MEDITATION', 'AS', 'THOUGH', 'RESOLVING', 'A', 'WEIGHTY', 'PROBLEM'] +4350-10919-0031-2747: ref=['FINALLY', 'HIS', 'DECISION', 'WAS', 'PRONOUNCED', 'THEY', 'WERE', 'TO', 'GO', 'ABROAD', 'BUT', 'TO', 'PUT', 'NO', 'FAITH', 'IN', 'FOREIGN', 'QUACKS', 'AND', 'TO', 'APPLY', 'TO', 'HIM', 'IN', 'ANY', 'NEED'] +4350-10919-0031-2747: hyp=['FINALLY', 'HIS', 'DECISION', 'WAS', 'PRONOUNCED', 'THEY', 'WERE', 'TO', 'GO', 'ABROAD', 'BUT', 'TO', 'PUT', 'NO', 'FAITH', 'IN', 'FOREIGN', 'QUACKS', 'AND', 'TO', 'APPLY', 'TO', 'HIM', 'IN', 'ANY', 'NEED'] +4350-10919-0032-2748: ref=['IT', 'SEEMED', 'AS', 'THOUGH', 'SOME', 'PIECE', 'OF', 'GOOD', 'FORTUNE', 'HAD', 'COME', 'TO', 'PASS', 'AFTER', 'THE', 'DOCTOR', 'HAD', 'GONE'] +4350-10919-0032-2748: hyp=['IT', 'SEEMED', 'AS', 'THOUGH', 'SOME', 'PIECE', 'OF', 'GOOD', 'FORTUNE', 'HAD', 'COME', 'TO', 'PASS', 'AFTER', 'THE', 'DOCTOR', 'HAD', 'GONE'] +4350-10919-0033-2749: ref=['THE', 'MOTHER', 'WAS', 'MUCH', 'MORE', 'CHEERFUL', 'WHEN', 'SHE', 'WENT', 'BACK', 'TO', 'HER', 'DAUGHTER', 'AND', 'KITTY', 'PRETENDED', 'TO', 'BE', 'MORE', 'CHEERFUL'] +4350-10919-0033-2749: hyp=['THE', 'MOTHER', 'WAS', 'MUCH', 'MORE', 'CHEERFUL', 'WHEN', 'SHE', 'WENT', 'BACK', 'TO', 'HER', 'DAUGHTER', 'AND', 'KITTY', 'PRETENDED', 'TO', 'BE', 'MORE', 'CHEERFUL'] +4350-9170-0000-2750: ref=['EDUCATED', 'PEOPLE', 'OF', 'THE', 'UPPER', 'CLASSES', 'ARE', 'TRYING', 'TO', 'STIFLE', 'THE', 'EVER', 'GROWING', 'SENSE', 'OF', 'THE', 'NECESSITY', 'OF', 'TRANSFORMING', 'THE', 'EXISTING', 'SOCIAL', 'ORDER'] +4350-9170-0000-2750: hyp=['EDUCATED', 'PEOPLE', 'OF', 'THE', 'UPPER', 'CLASSES', 'ARE', 'TRYING', 'TO', 'STIFLE', 'THE', 'EVERGREWING', 'SENSE', 'OF', 'THE', 'NECESSITY', 'OF', 'TRANSFORMING', 'THE', 'EXISTING', 'SOCIAL', 'ORDER'] +4350-9170-0001-2751: ref=['THIS', 'IS', 'ABSOLUTELY', 'INCORRECT'] +4350-9170-0001-2751: hyp=['MISSUS', 'ABSOLUTELY', 'AND', 'CORRECT'] +4350-9170-0002-2752: ref=['IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IT', 'IS', 'SUPPOSED', 'THAT', 'SINCE', 'THE', 'AIM', 'OF', 'LIFE', 'IS', 'FOUND', 'IN', 'GROUPS', 'OF', 'INDIVIDUALS', 'INDIVIDUALS', 'WILL', 'VOLUNTARILY', 'SACRIFICE', 'THEIR', 'OWN', 'INTERESTS', 'FOR', 'THE', 'INTERESTS', 'OF', 'THE', 'GROUP'] +4350-9170-0002-2752: hyp=['IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IT', 'IS', 'SUPPOSED', 'THAT', 'SINCE', 'THE', 'AIM', 'OF', 'LIFE', 'IS', 'FOUND', 'IN', 'GROUPS', 'OF', 'INDIVIDUALS', 'INDIVIDUALS', 'WHO', 'VOLUNTARILY', 'SACRIFICE', 'THEIR', 'OWN', 'INTEREST', 'FOR', 'THE', 'INTEREST', 'OF', 'THE', 'GROUP'] +4350-9170-0003-2753: ref=['THE', 'CHAMPIONS', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'USUALLY', 'TRY', 'TO', 'CONNECT', 'THE', 'IDEA', 'OF', 'AUTHORITY', 'THAT', 'IS', 'OF', 'VIOLENCE', 'WITH', 'THE', 'IDEA', 'OF', 'MORAL', 'INFLUENCE', 'BUT', 'THIS', 'CONNECTION', 'IS', 'QUITE', 'IMPOSSIBLE'] +4350-9170-0003-2753: hyp=['THE', 'CHAMPIONS', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'USUALLY', 'TRY', 'TO', 'CONNECT', 'THE', 'IDEA', 'OF', 'AUTHORITY', 'THAT', 'IS', 'OF', 'VIOLENCE', 'WITH', 'THE', 'IDEA', 'OF', 'MORAL', 'INFLUENCE', 'BUT', 'THIS', 'CONNECTION', 'IS', 'QUITE', 'IMPOSSIBLE'] +4350-9170-0004-2754: ref=['THE', 'MAN', 'WHO', 'IS', 'CONTROLLED', 'BY', 'MORAL', 'INFLUENCE', 'ACTS', 'IN', 'ACCORDANCE', 'WITH', 'HIS', 'OWN', 'DESIRES'] +4350-9170-0004-2754: hyp=['THE', 'MAN', 'WHO', 'HAS', 'CONTROLLED', 'BY', 'MORAL', 'INFLUENCE', 'ACTS', 'IN', 'ACCORDANCE', 'WITH', 'HIS', 'OWN', 'DESIRES'] +4350-9170-0005-2755: ref=['THE', 'BASIS', 'OF', 'AUTHORITY', 'IS', 'BODILY', 'VIOLENCE'] +4350-9170-0005-2755: hyp=['THE', 'BASIS', 'OF', 'AUTHORITY', 'IS', 'BODILY', 'VIOLENCE'] +4350-9170-0006-2756: ref=['THE', 'POSSIBILITY', 'OF', 'APPLYING', 'BODILY', 'VIOLENCE', 'TO', 'PEOPLE', 'IS', 'PROVIDED', 'ABOVE', 'ALL', 'BY', 'AN', 'ORGANIZATION', 'OF', 'ARMED', 'MEN', 'TRAINED', 'TO', 'ACT', 'IN', 'UNISON', 'IN', 'SUBMISSION', 'TO', 'ONE', 'WILL'] +4350-9170-0006-2756: hyp=['THE', 'POSSIBILITY', 'OF', 'APPLYING', 'BODILY', 'VIOLENCE', 'THE', 'PEOPLE', 'IS', 'PROVIDED', 'ABOVE', 'ALL', 'BY', 'AN', 'ORGANIZATION', 'OF', 'ARMED', 'MEN', 'TRAINED', 'TO', 'ACT', 'IN', 'UNISON', 'AND', 'SUBMISSION', 'TO', 'ONE', 'WILL'] +4350-9170-0007-2757: ref=['THESE', 'BANDS', 'OF', 'ARMED', 'MEN', 'SUBMISSIVE', 'TO', 'A', 'SINGLE', 'WILL', 'ARE', 'WHAT', 'CONSTITUTE', 'THE', 'ARMY'] +4350-9170-0007-2757: hyp=['THESE', 'BANDS', 'OF', 'ARMED', 'MEN', 'SUBMISSIVE', 'TO', 'A', 'SINGLE', 'WILL', 'ARE', 'WHAT', 'CONSTITUTE', 'THE', 'ARMY'] +4350-9170-0008-2758: ref=['THE', 'ARMY', 'HAS', 'ALWAYS', 'BEEN', 'AND', 'STILL', 'IS', 'THE', 'BASIS', 'OF', 'POWER'] +4350-9170-0008-2758: hyp=['THE', 'ARMY', 'HAS', 'ALWAYS', 'BEEN', 'AND', 'STILL', 'IS', 'THE', 'BASIS', 'OF', 'POWER'] +4350-9170-0009-2759: ref=['POWER', 'IS', 'ALWAYS', 'IN', 'THE', 'HANDS', 'OF', 'THOSE', 'WHO', 'CONTROL', 'THE', 'ARMY', 'AND', 'ALL', 'MEN', 'IN', 'POWER', 'FROM', 'THE', 'ROMAN', 'CAESARS', 'TO', 'THE', 'RUSSIAN', 'AND', 'GERMAN', 'EMPERORS', 'TAKE', 'MORE', 'INTEREST', 'IN', 'THEIR', 'ARMY', 'THAN', 'IN', 'ANYTHING', 'AND', 'COURT', 'POPULARITY', 'IN', 'THE', 'ARMY', 'KNOWING', 'THAT', 'IF', 'THAT', 'IS', 'ON', 'THEIR', 'SIDE', 'THEIR', 'POWER', 'IS', 'SECURE'] +4350-9170-0009-2759: hyp=['POWER', 'IS', 'ALWAYS', 'IN', 'THE', 'HANDS', 'OF', 'THOSE', 'WHO', 'CONTROL', 'THE', 'ARMY', 'AND', 'ALL', 'MEN', 'IN', 'POWER', 'FROM', 'THE', 'ROMAN', 'CAESARS', 'TO', 'THE', 'RUSSIAN', 'AND', 'GERMAN', 'EMPERORS', 'TAKE', 'MORE', 'INTEREST', 'IN', 'THEIR', 'ARMY', 'THAN', 'IN', 'ANYTHING', 'IN', 'COURT', 'POPULARITY', 'IN', 'THE', 'ARMY', 'KNOWING', 'THAT', 'IF', 'THAT', 'IS', 'ON', 'THEIR', 'SIDE', 'THEIR', 'POWER', 'IS', 'SECURE'] +4350-9170-0010-2760: ref=['INDEED', 'IT', 'COULD', 'NOT', 'BE', 'OTHERWISE'] +4350-9170-0010-2760: hyp=['INDEED', 'IT', 'COULD', 'NOT', 'BE', 'OTHERWISE'] +4350-9170-0011-2761: ref=['ONLY', 'UNDER', 'THOSE', 'CONDITIONS', 'COULD', 'THE', 'SOCIAL', 'ORGANIZATION', 'BE', 'JUSTIFIED'] +4350-9170-0011-2761: hyp=['ONLY', 'UNDER', 'THOSE', 'CONDITIONS', 'COULD', 'THE', 'SOCIAL', 'ORGANIZATION', 'BE', 'JUSTIFIED'] +4350-9170-0012-2762: ref=['BUT', 'SINCE', 'THIS', 'IS', 'NOT', 'THE', 'CASE', 'AND', 'ON', 'THE', 'CONTRARY', 'MEN', 'IN', 'POWER', 'ARE', 'ALWAYS', 'FAR', 'FROM', 'BEING', 'SAINTS', 'THROUGH', 'THE', 'VERY', 'FACT', 'OF', 'THEIR', 'POSSESSION', 'OF', 'POWER', 'THE', 'SOCIAL', 'ORGANIZATION', 'BASED', 'ON', 'POWER', 'HAS', 'NO', 'JUSTIFICATION'] +4350-9170-0012-2762: hyp=['BUT', 'SINCE', 'THIS', 'IS', 'NOT', 'THE', 'CASE', 'AND', 'ON', 'THE', 'CONTRARY', 'MEN', 'AND', 'POWER', 'ARE', 'ALWAYS', 'FAR', 'FROM', 'BEING', 'SAINTS', 'THROUGH', 'THE', 'VERY', 'FACT', 'OF', 'THEIR', 'POSSESSION', 'OF', 'POWER', 'THE', 'SOCIAL', 'ORGANIZATION', 'BASED', 'ON', 'POWER', 'HAS', 'NO', 'JUSTIFICATION'] +4350-9170-0013-2763: ref=['EVEN', 'IF', 'THERE', 'WAS', 'ONCE', 'A', 'TIME', 'WHEN', 'OWING', 'TO', 'THE', 'LOW', 'STANDARD', 'OF', 'MORALS', 'AND', 'THE', 'DISPOSITION', 'OF', 'MEN', 'TO', 'VIOLENCE', 'THE', 'EXISTENCE', 'OF', 'AN', 'AUTHORITY', 'TO', 'RESTRAIN', 'SUCH', 'VIOLENCE', 'WAS', 'AN', 'ADVANTAGE', 'BECAUSE', 'THE', 'VIOLENCE', 'OF', 'GOVERNMENT', 'WAS', 'LESS', 'THAN', 'THE', 'VIOLENCE', 'OF', 'INDIVIDUALS', 'ONE', 'CANNOT', 'BUT', 'SEE', 'THAT', 'THIS', 'ADVANTAGE', 'COULD', 'NOT', 'BE', 'LASTING'] +4350-9170-0013-2763: hyp=['EVEN', 'IF', 'THERE', 'WAS', 'ONCE', 'A', 'TIME', 'WHEN', 'OWING', 'TO', 'THE', 'LOW', 'STANDARDS', 'OF', 'MORALS', 'ON', 'THE', 'DISPOSITION', 'OF', 'MEN', 'TO', 'VIOLENCE', 'THE', 'EXISTENCE', 'OF', 'AN', 'AUTHORITY', 'TO', 'RESTRAIN', 'SUCH', 'VIOLENCE', 'WAS', 'AN', 'ADVANTAGE', 'BECAUSE', 'THE', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'WAS', 'LESS', 'THAN', 'THE', 'VIOLENCE', 'OF', 'INDIVIDUALS', 'ONE', 'CANNOT', 'BUT', 'SEE', 'THAT', 'THIS', 'ADVANTAGE', 'COULD', 'NOT', 'BE', 'LASTING'] +4350-9170-0014-2764: ref=['BETWEEN', 'THE', 'MEMBERS', 'OF', 'ONE', 'STATE', 'SUBJECT', 'TO', 'A', 'SINGLE', 'AUTHORITY', 'THE', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'SEEMS', 'STILL', 'LESS', 'AND', 'THE', 'LIFE', 'OF', 'THE', 'STATE', 'SEEMS', 'EVEN', 'MORE', 'SECURE'] +4350-9170-0014-2764: hyp=['BETWEEN', 'THE', 'MEMBERS', 'OF', 'ONE', 'STATE', 'SUBJECT', 'TO', 'A', 'SINGLE', 'AUTHORITY', 'THE', 'STRIPE', 'BETWEEN', 'THE', 'INDIVIDUALS', 'SEEMS', 'STILL', 'LESS', 'AND', 'A', 'LIFE', 'OF', 'THE', 'STATE', 'SEEMS', 'EVEN', 'MORE', 'SECURE'] +4350-9170-0015-2765: ref=['IT', 'WAS', 'PRODUCED', 'ON', 'ONE', 'HAND', 'BY', 'THE', 'NATURAL', 'GROWTH', 'OF', 'POPULATION', 'AND', 'ON', 'THE', 'OTHER', 'BY', 'STRUGGLE', 'AND', 'CONQUEST'] +4350-9170-0015-2765: hyp=['IT', 'WAS', 'PRODUCED', 'ON', 'ONE', 'HAND', 'BY', 'THE', 'NATURAL', 'GROWTH', 'OF', 'POPULATION', 'AND', 'ON', 'THE', 'OTHER', 'BY', 'STRUGGLE', 'AND', 'CONQUEST'] +4350-9170-0016-2766: ref=['AFTER', 'CONQUEST', 'THE', 'POWER', 'OF', 'THE', 'EMPEROR', 'PUTS', 'AN', 'END', 'TO', 'INTERNAL', 'DISSENSIONS', 'AND', 'SO', 'THE', 'STATE', 'CONCEPTION', 'OF', 'LIFE', 'JUSTIFIES', 'ITSELF'] +4350-9170-0016-2766: hyp=['AFTER', 'CONQUEST', 'THE', 'POWER', 'OF', 'THE', 'EMPEROR', 'PUTS', 'AN', 'END', 'TO', 'INTERNAL', 'DISSENSIONS', 'AND', 'SO', 'THE', 'STATE', 'CONCEPTION', 'OF', 'LIFE', 'JUSTIFIES', 'ITSELF'] +4350-9170-0017-2767: ref=['BUT', 'THIS', 'JUSTIFICATION', 'IS', 'NEVER', 'MORE', 'THAN', 'TEMPORARY'] +4350-9170-0017-2767: hyp=['BUT', 'THIS', 'JUSTIFICATION', 'IS', 'NEVER', 'MORE', 'THAN', 'TEMPORARY'] +4350-9170-0018-2768: ref=['INTERNAL', 'DISSENSIONS', 'DISAPPEAR', 'ONLY', 'IN', 'PROPORTION', 'TO', 'THE', 'DEGREE', 'OF', 'OPPRESSION', 'EXERTED', 'BY', 'THE', 'AUTHORITY', 'OVER', 'THE', 'DISSENTIENT', 'INDIVIDUALS'] +4350-9170-0018-2768: hyp=['AND', 'HERALD', 'DISSENSIONS', 'DISAPPEAR', 'ONLY', 'IN', 'PROPORTION', 'TO', 'THE', 'DEGREE', 'OF', 'OPPRESSION', 'EXERTED', 'BY', 'THE', 'AUTHORITY', 'OVER', 'THE', 'DYSINTHIAN', 'INDIVIDUALS'] +4350-9170-0019-2769: ref=['GOVERNMENT', 'AUTHORITY', 'EVEN', 'IF', 'IT', 'DOES', 'SUPPRESS', 'PRIVATE', 'VIOLENCE', 'ALWAYS', 'INTRODUCES', 'INTO', 'THE', 'LIFE', 'OF', 'MEN', 'FRESH', 'FORMS', 'OF', 'VIOLENCE', 'WHICH', 'TEND', 'TO', 'BECOME', 'GREATER', 'AND', 'GREATER', 'IN', 'PROPORTION', 'TO', 'THE', 'DURATION', 'AND', 'STRENGTH', 'OF', 'THE', 'GOVERNMENT'] +4350-9170-0019-2769: hyp=['GOVERN', 'AUTHORITY', 'EVEN', 'IF', 'IT', 'DOES', 'SUPPRESS', 'PRIVATE', 'VIOLENCE', 'ALWAYS', 'INTRODUCES', 'INTO', 'THE', 'LIFE', 'OF', 'MEN', 'FRESH', 'FORMS', 'OF', 'VIOLENCE', 'WHICH', 'TEND', 'TO', 'BECOME', 'GREATER', 'AND', 'GREATER', 'IN', 'PROPORTION', 'TO', 'THE', 'DURATION', 'AND', 'STRENGTH', 'OF', 'THE', 'GOVERNMENT'] +4350-9170-0020-2770: ref=['AND', 'THEREFORE', 'THE', 'OPPRESSION', 'OF', 'THE', 'OPPRESSED', 'ALWAYS', 'GOES', 'ON', 'GROWING', 'UP', 'TO', 'THE', 'FURTHEST', 'LIMIT', 'BEYOND', 'WHICH', 'IT', 'CANNOT', 'GO', 'WITHOUT', 'KILLING', 'THE', 'GOOSE', 'WITH', 'THE', 'GOLDEN', 'EGGS'] +4350-9170-0020-2770: hyp=['AND', 'THEREFORE', 'THE', 'OPPRESSION', 'OF', 'THE', 'OPPRESSED', 'ALWAYS', 'GOES', 'ON', 'GROWING', 'UP', 'TO', 'THE', 'FURTHEST', 'LIMIT', 'BEYOND', 'WHICH', 'IT', 'CANNOT', 'GO', 'WITHOUT', 'KILLING', 'THE', 'GOOSE', 'WITH', 'THE', 'GOLD', 'NICE'] +4350-9170-0021-2771: ref=['THE', 'MOST', 'CONVINCING', 'EXAMPLE', 'OF', 'THIS', 'IS', 'TO', 'BE', 'FOUND', 'IN', 'THE', 'CONDITION', 'OF', 'THE', 'WORKING', 'CLASSES', 'OF', 'OUR', 'EPOCH', 'WHO', 'ARE', 'IN', 'REALITY', 'NO', 'BETTER', 'THAN', 'THE', 'SLAVES', 'OF', 'ANCIENT', 'TIMES', 'SUBDUED', 'BY', 'CONQUEST'] +4350-9170-0021-2771: hyp=['THE', 'MOST', 'CONVINCING', 'EXAMPLE', 'OF', 'THIS', 'IS', 'TO', 'BE', 'FOUND', 'IN', 'THE', 'CONDITION', 'OF', 'THE', 'WORKING', 'CLASSES', 'OF', 'OUR', 'EPOCH', 'WHO', 'ARE', 'IN', 'REALITY', 'NO', 'BETTER', 'THAN', 'THE', 'SLAVES', 'OF', 'ANCIENT', 'TIMES', 'SUBDUED', 'BY', 'CONQUEST'] +4350-9170-0022-2772: ref=['SO', 'IT', 'HAS', 'ALWAYS', 'BEEN'] +4350-9170-0022-2772: hyp=['SO', 'IT', 'IS', 'ALWAYS', 'THEN'] +4350-9170-0023-2773: ref=['FOOTNOTE', 'THE', 'FACT', 'THAT', 'IN', 'AMERICA', 'THE', 'ABUSES', 'OF', 'AUTHORITY', 'EXIST', 'IN', 'SPITE', 'OF', 'THE', 'SMALL', 'NUMBER', 'OF', 'THEIR', 'TROOPS', 'NOT', 'ONLY', 'FAILS', 'TO', 'DISPROVE', 'THIS', 'POSITION', 'BUT', 'POSITIVELY', 'CONFIRMS', 'IT'] +4350-9170-0023-2773: hyp=['FOOTNOTE', 'THE', 'FACT', 'THAT', 'IN', 'AMERICA', 'THE', 'ABUSES', 'OF', 'AUTHORITY', 'EXIST', 'IN', 'SPITE', 'OF', 'THE', 'SMALL', 'NUMBER', 'OF', 'THEIR', 'TROOPS', 'NOT', 'ONLY', 'FAILS', 'TO', 'DISPROVE', 'THIS', 'POSITION', 'BUT', 'POSITIVELY', 'CONFIRMS', 'IT'] +4350-9170-0024-2774: ref=['THE', 'UPPER', 'CLASSES', 'KNOW', 'THAT', 'AN', 'ARMY', 'OF', 'FIFTY', 'THOUSAND', 'WILL', 'SOON', 'BE', 'INSUFFICIENT', 'AND', 'NO', 'LONGER', 'RELYING', 'ON', "PINKERTON'S", 'MEN', 'THEY', 'FEEL', 'THAT', 'THE', 'SECURITY', 'OF', 'THEIR', 'POSITION', 'DEPENDS', 'ON', 'THE', 'INCREASED', 'STRENGTH', 'OF', 'THE', 'ARMY'] +4350-9170-0024-2774: hyp=['THE', 'UPPER', 'CLASSES', 'KNOW', 'THAT', 'AN', 'ARMY', 'OF', 'FIFTY', 'THOUSAND', 'WILL', 'SOON', 'BE', 'INSUFFICIENT', 'AND', 'NO', 'LONGER', 'RELYING', 'ON', "PINKERTON'S", 'MEN', 'THEY', 'FEEL', 'THAT', 'SECURITY', 'OF', 'THEIR', 'POSITION', 'DEPENDS', 'ON', 'THE', 'INCREASED', 'STRENGTH', 'OF', 'THE', 'ARMY'] +4350-9170-0025-2775: ref=['THE', 'REASON', 'TO', 'WHICH', 'HE', 'GAVE', 'EXPRESSION', 'IS', 'ESSENTIALLY', 'THE', 'SAME', 'AS', 'THAT', 'WHICH', 'MADE', 'THE', 'FRENCH', 'KINGS', 'AND', 'THE', 'POPES', 'ENGAGE', 'SWISS', 'AND', 'SCOTCH', 'GUARDS', 'AND', 'MAKES', 'THE', 'RUSSIAN', 'AUTHORITIES', 'OF', 'TO', 'DAY', 'SO', 'CAREFULLY', 'DISTRIBUTE', 'THE', 'RECRUITS', 'SO', 'THAT', 'THE', 'REGIMENTS', 'FROM', 'THE', 'FRONTIERS', 'ARE', 'STATIONED', 'IN', 'CENTRAL', 'DISTRICTS', 'AND', 'THE', 'REGIMENTS', 'FROM', 'THE', 'CENTER', 'ARE', 'STATIONED', 'ON', 'THE', 'FRONTIERS'] +4350-9170-0025-2775: hyp=['THE', 'REASON', 'TO', 'WHICH', 'HE', 'GAVE', 'EXPRESSION', 'IS', 'ESSENTIALLY', 'THE', 'SAME', 'AS', 'THAT', 'WHICH', 'MADE', 'THE', 'FRENCH', 'KINGS', 'AND', 'THE', 'POPES', 'ENGAGE', 'SWISS', 'AND', 'SCOTCH', 'GUARDS', 'AND', 'MAKES', 'THE', 'RUSSIAN', 'AUTHORITIES', 'OF', 'TO', 'DAY', 'SO', 'CAREFULLY', 'DISTRIBUTE', 'THE', 'RECRUITS', 'SO', 'THAT', 'THE', 'REGIMENTS', 'FROM', 'THE', 'FRONTIER', 'THEIR', 'STATIONED', 'IN', 'CENTRAL', 'DISTRICTS', 'AND', 'THE', 'REGIMENTS', 'FROM', 'THE', 'CENTRE', 'ARE', 'STATIONED', 'ON', 'THE', 'FRONTIERS'] +4350-9170-0026-2776: ref=['THE', 'MEANING', 'OF', "CAPRIVI'S", 'SPEECH', 'PUT', 'INTO', 'PLAIN', 'LANGUAGE', 'IS', 'THAT', 'FUNDS', 'ARE', 'NEEDED', 'NOT', 'TO', 'RESIST', 'FOREIGN', 'FOES', 'BUT', 'TO', 'BUY', 'UNDER', 'OFFICERS', 'TO', 'BE', 'READY', 'TO', 'ACT', 'AGAINST', 'THE', 'ENSLAVED', 'TOILING', 'MASSES'] +4350-9170-0026-2776: hyp=['THE', 'MEANING', 'OF', 'THE', 'PREVIOUS', 'SPEECH', 'PUT', 'INTO', 'PLAIN', 'LANGUAGE', 'IS', 'THAT', 'FUNDS', 'ARE', 'NEEDED', 'NOT', 'TO', 'RESIST', 'FOREIGN', 'FOES', 'BUT', 'TO', 'BUY', 'UNDER', 'OFFICERS', 'TO', 'BE', 'READY', 'TO', 'ACT', 'AGAINST', 'THE', 'ENSLAVED', 'TOILING', 'MASSES'] +4350-9170-0027-2777: ref=['AND', 'THIS', 'ABNORMAL', 'ORDER', 'OF', 'THINGS', 'IS', 'MAINTAINED', 'BY', 'THE', 'ARMY'] +4350-9170-0027-2777: hyp=['AND', 'THIS', 'ABNORMAL', 'ORDER', 'OF', 'THANKS', 'IS', 'MAINTAINED', 'BY', 'THE', 'ARMY'] +4350-9170-0028-2778: ref=['BUT', 'THERE', 'IS', 'NOT', 'ONLY', 'ONE', 'GOVERNMENT', 'THERE', 'ARE', 'OTHER', 'GOVERNMENTS', 'EXPLOITING', 'THEIR', 'SUBJECTS', 'BY', 'VIOLENCE', 'IN', 'THE', 'SAME', 'WAY', 'AND', 'ALWAYS', 'READY', 'TO', 'POUNCE', 'DOWN', 'ON', 'ANY', 'OTHER', 'GOVERNMENT', 'AND', 'CARRY', 'OFF', 'THE', 'FRUITS', 'OF', 'THE', 'TOIL', 'OF', 'ITS', 'ENSLAVED', 'SUBJECTS'] +4350-9170-0028-2778: hyp=['BUT', 'THERE', 'IS', 'NOT', 'ONLY', 'ONE', 'GOVERNMENT', 'THERE', 'ARE', 'OTHER', 'GOVERNMENTS', 'EXPLODING', 'THEIR', 'SUBJECTS', 'BY', 'VIOLENCE', 'IN', 'THE', 'SAME', 'WAY', 'AND', 'ARE', 'ALWAYS', 'READY', 'TO', 'POUNCE', 'DOWN', 'ON', 'ANY', 'OTHER', 'GOVERNMENT', 'AND', 'CARRY', 'OFF', 'THE', 'FRUITS', 'OF', 'THE', 'TOIL', 'OF', 'ITS', 'ENSLAVE', 'SUBJECTS'] +4350-9170-0029-2779: ref=['AND', 'SO', 'EVERY', 'GOVERNMENT', 'NEEDS', 'AN', 'ARMY', 'ALSO', 'TO', 'PROTECT', 'ITS', 'BOOTY', 'FROM', 'ITS', 'NEIGHBOR', 'BRIGANDS'] +4350-9170-0029-2779: hyp=['AND', 'SO', 'EVERY', 'GOVERNMENT', 'NEEDS', 'AN', 'ARMY', 'ALSO', 'TO', 'PROTECT', 'ITS', 'BOOTY', 'FROM', 'ITS', 'NEIGHBOR', 'BRIGANDS'] +4350-9170-0030-2780: ref=['THIS', 'INCREASE', 'IS', 'CONTAGIOUS', 'AS', 'MONTESQUIEU', 'POINTED', 'OUT', 'ONE', 'HUNDRED', 'FIFTY', 'YEARS', 'AGO'] +4350-9170-0030-2780: hyp=['THIS', 'INCREASES', 'CONTAGIOUS', 'AS', 'MONTESQUIEU', 'POINTED', 'OUT', 'A', 'HUNDRED', 'AND', 'FIFTY', 'YEARS', 'AGO'] +4350-9170-0031-2781: ref=['EVERY', 'INCREASE', 'IN', 'THE', 'ARMY', 'OF', 'ONE', 'STATE', 'WITH', 'THE', 'AIM', 'OF', 'SELF', 'DEFENSE', 'AGAINST', 'ITS', 'SUBJECTS', 'BECOMES', 'A', 'SOURCE', 'OF', 'DANGER', 'FOR', 'NEIGHBORING', 'STATES', 'AND', 'CALLS', 'FOR', 'A', 'SIMILAR', 'INCREASE', 'IN', 'THEIR', 'ARMIES'] +4350-9170-0031-2781: hyp=['EVERY', 'INCREASE', 'IN', 'THE', 'ARMY', 'OF', 'ONE', 'STATE', 'WITH', 'THE', 'AIM', 'OF', 'SELF', 'DEFENCE', 'AGAINST', 'ITS', 'SUBJECTS', 'BECOMES', 'A', 'SORT', 'OF', 'DANGER', 'FOR', 'NEIGHBORING', 'STATES', 'AND', 'CALLS', 'FOR', 'A', 'SIMILAR', 'INCREASE', 'IN', 'THEIR', 'ARMIES'] +4350-9170-0032-2782: ref=['THE', 'DESPOTISM', 'OF', 'A', 'GOVERNMENT', 'ALWAYS', 'INCREASES', 'WITH', 'THE', 'STRENGTH', 'OF', 'THE', 'ARMY', 'AND', 'ITS', 'EXTERNAL', 'SUCCESSES', 'AND', 'THE', 'AGGRESSIVENESS', 'OF', 'A', 'GOVERNMENT', 'INCREASES', 'WITH', 'ITS', 'INTERNAL', 'DESPOTISM'] +4350-9170-0032-2782: hyp=['THE', 'DESPOTISM', 'OF', 'THE', 'GOVERNMENT', 'ALWAYS', 'INCREASES', 'WITH', 'THE', 'STRENGTH', 'OF', 'THE', 'ARMY', 'AND', 'ITS', 'EXTERNAL', 'SUCCESSES', 'AND', 'THE', 'AGGRESSIVENESS', 'OF', 'A', 'GOVERNMENT', 'INCREASES', 'WITH', 'ITS', 'INTERNAL', 'DESPOTISM'] +4350-9170-0033-2783: ref=['THE', 'RIVALRY', 'OF', 'THE', 'EUROPEAN', 'STATES', 'IN', 'CONSTANTLY', 'INCREASING', 'THEIR', 'FORCES', 'HAS', 'REDUCED', 'THEM', 'TO', 'THE', 'NECESSITY', 'OF', 'HAVING', 'RECOURSE', 'TO', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'SINCE', 'BY', 'THAT', 'MEANS', 'THE', 'GREATEST', 'POSSIBLE', 'NUMBER', 'OF', 'SOLDIERS', 'IS', 'OBTAINED', 'AT', 'THE', 'LEAST', 'POSSIBLE', 'EXPENSE'] +4350-9170-0033-2783: hyp=['THE', 'RIVALRY', 'OF', 'THE', 'EUROPEAN', 'STATES', 'AND', 'CONSTANTLY', 'INCREASING', 'THEIR', 'FORCES', 'HAS', 'REDUCED', 'THEM', 'TO', 'THE', 'NECESSITY', 'OF', 'HAVING', 'RECOURSE', 'TO', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'SINCE', 'BY', 'THAT', 'MEANS', 'THE', 'GREATEST', 'POSSIBLE', 'NUMBER', 'OF', 'SOLDIERS', 'IS', 'OBTAINED', 'AT', 'THE', 'LEAST', 'POSSIBLE', 'EXPENSE'] +4350-9170-0034-2784: ref=['AND', 'BY', 'THIS', 'MEANS', 'ALL', 'CITIZENS', 'ARE', 'UNDER', 'ARMS', 'TO', 'SUPPORT', 'THE', 'INIQUITIES', 'PRACTICED', 'UPON', 'THEM', 'ALL', 'CITIZENS', 'HAVE', 'BECOME', 'THEIR', 'OWN', 'OPPRESSORS'] +4350-9170-0034-2784: hyp=['AND', 'BY', 'THIS', 'MEANS', 'ALL', 'CITIZENS', 'ARE', 'UNDER', 'ARMS', 'TO', 'SUPPORT', 'THE', 'INIQUITIES', 'PRACTISED', 'UPON', 'THEM', 'ALL', 'CITIZENS', 'HAVE', 'BECOME', 'THEIR', 'OWN', 'IMPRESSORS'] +4350-9170-0035-2785: ref=['THIS', 'INCONSISTENCY', 'HAS', 'BECOME', 'OBVIOUS', 'IN', 'UNIVERSAL', 'MILITARY', 'SERVICE'] +4350-9170-0035-2785: hyp=['THIS', 'INCONSISTENCY', 'HAS', 'BECOME', 'OBVIOUS', 'AND', 'UNIVERSAL', 'MILITARY', 'SERVICE'] +4350-9170-0036-2786: ref=['IN', 'FACT', 'THE', 'WHOLE', 'SIGNIFICANCE', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'CONSISTS', 'IN', "MAN'S", 'RECOGNITION', 'OF', 'THE', 'BARBARITY', 'OF', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'AND', 'THE', 'TRANSITORINESS', 'OF', 'PERSONAL', 'LIFE', 'ITSELF', 'AND', 'THE', 'TRANSFERENCE', 'OF', 'THE', 'AIM', 'OF', 'LIFE', 'TO', 'GROUPS', 'OF', 'PERSONS'] +4350-9170-0036-2786: hyp=['IN', 'FACT', 'THE', 'WHOLE', 'SIGNIFICANCE', 'OF', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'CONSISTS', 'IN', "MAN'S", 'RECOGNITION', 'OF', 'THE', 'BARBARITY', 'OF', 'STRIFE', 'BETWEEN', 'INDIVIDUALS', 'AND', 'THE', 'TRANSITORINESS', 'OF', 'PERSONAL', 'LIFE', 'ITSELF', 'AND', 'THE', 'TRANSFERENCE', 'OF', 'THE', 'AIM', 'OF', 'LIFE', 'THE', 'GROUPS', 'OF', 'PERSONS'] +4350-9170-0037-2787: ref=['BUT', 'WITH', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'IT', 'COMES', 'TO', 'PASS', 'THAT', 'MEN', 'AFTER', 'MAKING', 'EVERY', 'SACRIFICE', 'TO', 'GET', 'RID', 'OF', 'THE', 'CRUELTY', 'OF', 'STRIFE', 'AND', 'THE', 'INSECURITY', 'OF', 'EXISTENCE', 'ARE', 'CALLED', 'UPON', 'TO', 'FACE', 'ALL', 'THE', 'PERILS', 'THEY', 'HAD', 'MEANT', 'TO', 'AVOID'] +4350-9170-0037-2787: hyp=['BUT', 'WITH', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'IT', 'COMES', 'TO', 'PASS', 'THAT', 'MEN', 'AFTER', 'MAKING', 'EVERY', 'SACRIFICE', 'TO', 'GET', 'RID', 'OF', 'THE', 'CRUELTY', 'OF', 'STRIFE', 'AND', 'THE', 'INSECURITY', 'OF', 'EXISTENCE', 'ARE', 'CALLED', 'UPON', 'TO', 'FACE', 'ALL', 'THE', 'PERILS', 'THEY', 'HAD', 'MEANT', 'TO', 'AVOID'] +4350-9170-0038-2788: ref=['BUT', 'INSTEAD', 'OF', 'DOING', 'THAT', 'THEY', 'EXPOSE', 'THE', 'INDIVIDUALS', 'TO', 'THE', 'SAME', 'NECESSITY', 'OF', 'STRIFE', 'SUBSTITUTING', 'STRIFE', 'WITH', 'INDIVIDUALS', 'OF', 'OTHER', 'STATES', 'FOR', 'STRIFE', 'WITH', 'NEIGHBORS'] +4350-9170-0038-2788: hyp=['BUT', 'INSTEAD', 'OF', 'DOING', 'THAT', 'THEY', 'EXPOSED', 'THE', 'INDIVIDUALS', 'TO', 'THE', 'SAME', 'NECESSITY', 'OF', 'STRIFE', 'SUBSTITUTING', 'STRIFE', 'WITH', 'INDIVIDUALS', 'OF', 'OTHER', 'STATES', 'FOR', 'STRIFE', 'WITH', 'NEIGHBORS'] +4350-9170-0039-2789: ref=['THE', 'TAXES', 'RAISED', 'FROM', 'THE', 'PEOPLE', 'FOR', 'WAR', 'PREPARATIONS', 'ABSORB', 'THE', 'GREATER', 'PART', 'OF', 'THE', 'PRODUCE', 'OF', 'LABOR', 'WHICH', 'THE', 'ARMY', 'OUGHT', 'TO', 'DEFEND'] +4350-9170-0039-2789: hyp=['THE', 'TAXES', 'RAISED', 'FROM', 'THE', 'PEOPLE', 'FOR', 'WAR', 'PREPARATIONS', 'ABSORB', 'THE', 'GREATER', 'PART', 'OF', 'THE', 'PRODUCE', 'OF', 'LABOR', 'WHICH', 'THE', 'ARMY', 'OUGHT', 'TO', 'DEFEND'] +4350-9170-0040-2790: ref=['THE', 'DANGER', 'OF', 'WAR', 'EVER', 'READY', 'TO', 'BREAK', 'OUT', 'RENDERS', 'ALL', 'REFORMS', 'OF', 'LIFE', 'SOCIAL', 'LIFE', 'VAIN', 'AND', 'FRUITLESS'] +4350-9170-0040-2790: hyp=['THE', 'DANGER', 'OF', 'WAR', 'EVER', 'READY', 'TO', 'BREAK', 'OUT', 'RENDERS', 'ALL', 'REFORMS', 'OF', 'LIFE', 'SOCIAL', 'LIFE', 'VAIN', 'AND', 'FRUITLESS'] +4350-9170-0041-2791: ref=['BUT', 'THE', 'FATAL', 'SIGNIFICANCE', 'OF', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'AS', 'THE', 'MANIFESTATION', 'OF', 'THE', 'CONTRADICTION', 'INHERENT', 'IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IS', 'NOT', 'ONLY', 'APPARENT', 'IN', 'THAT'] +4350-9170-0041-2791: hyp=['BUT', 'THE', 'FIELD', 'SIGNIFICANCE', 'OF', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'AS', 'THE', 'MANIFESTATION', 'OF', 'THE', 'CONTRADICTION', 'INHERENT', 'IN', 'THE', 'SOCIAL', 'CONCEPTION', 'OF', 'LIFE', 'IS', 'NOT', 'ONLY', 'APPARENT', 'IN', 'THAT'] +4350-9170-0042-2792: ref=['GOVERNMENTS', 'ASSERT', 'THAT', 'ARMIES', 'ARE', 'NEEDED', 'ABOVE', 'ALL', 'FOR', 'EXTERNAL', 'DEFENSE', 'BUT', 'THAT', 'IS', 'NOT', 'TRUE'] +4350-9170-0042-2792: hyp=['GOVERNMENTS', 'ASSERT', 'THAT', 'ARMIES', 'ARE', 'NEEDED', 'ABOVE', 'ALL', 'FOR', 'EXTERNAL', 'DEFENCE', 'BUT', 'THAT', 'IS', 'NOT', 'TRUE'] +4350-9170-0043-2793: ref=['THEY', 'ARE', 'NEEDED', 'PRINCIPALLY', 'AGAINST', 'THEIR', 'SUBJECTS', 'AND', 'EVERY', 'MAN', 'UNDER', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'BECOMES', 'AN', 'ACCOMPLICE', 'IN', 'ALL', 'THE', 'ACTS', 'OF', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'AGAINST', 'THE', 'CITIZENS', 'WITHOUT', 'ANY', 'CHOICE', 'OF', 'HIS', 'OWN'] +4350-9170-0043-2793: hyp=['THERE', 'NEEDED', 'PRINCIPALLY', 'AGAINST', 'THEIR', 'SUBJECTS', 'AND', 'EVERY', 'MAN', 'UNDER', 'UNIVERSAL', 'MILITARY', 'SERVICE', 'BECOMES', 'AN', 'ACCOMPLICE', 'IN', 'ALL', 'THAT', 'ACTS', 'OF', 'VIOLENCE', 'OF', 'THE', 'GOVERNMENT', 'AGAINST', 'THE', 'CITIZENS', 'WITHOUT', 'ANY', 'CHOICE', 'OF', 'HIS', 'OWN'] +4350-9170-0044-2794: ref=['AND', 'FOR', 'THE', 'SAKE', 'OF', 'WHAT', 'AM', 'I', 'MAKING', 'THEM'] +4350-9170-0044-2794: hyp=['AND', 'FOR', 'THE', 'SAKE', 'OF', 'WHAT', 'AM', 'I', 'MAKING', 'THEM'] +4350-9170-0045-2795: ref=['I', 'AM', 'EXPECTED', 'FOR', 'THE', 'SAKE', 'OF', 'THE', 'STATE', 'TO', 'MAKE', 'THESE', 'SACRIFICES', 'TO', 'RENOUNCE', 'EVERYTHING', 'THAT', 'CAN', 'BE', 'PRECIOUS', 'TO', 'MAN', 'PEACE', 'FAMILY', 'SECURITY', 'AND', 'HUMAN', 'DIGNITY'] +4350-9170-0045-2795: hyp=['I', 'AM', 'EXPECTED', 'FOR', 'THE', 'SAKE', 'OF', 'THE', 'STATE', 'TO', 'MAKE', 'THESE', 'SACRIFICES', 'TO', 'RENOUNCE', 'EVERYTHING', 'THAT', 'CAN', 'BE', 'PRECIOUS', 'TO', 'MAN', 'PEACE', 'FAMILY', 'SECURITY', 'AND', 'HUMAN', 'DIGNITY'] +4350-9170-0046-2796: ref=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'SAY', 'WE', 'SHOULD', 'BE', 'EXPOSED', 'TO', 'THE', 'ATTACKS', 'OF', 'EVIL', 'DISPOSED', 'PERSONS', 'IN', 'OUR', 'OWN', 'COUNTRY'] +4350-9170-0046-2796: hyp=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'SAY', 'WE', 'SHOULD', 'BE', 'EXPOSED', 'TO', 'THE', 'ATTACKS', 'OF', 'EVIL', 'DISPOSED', 'PERSONS', 'IN', 'OUR', 'OWN', 'COUNTRY'] +4350-9170-0047-2797: ref=['WE', 'KNOW', 'NOW', 'THAT', 'THREATS', 'AND', 'PUNISHMENTS', 'CANNOT', 'DIMINISH', 'THEIR', 'NUMBER', 'THAT', 'THAT', 'CAN', 'ONLY', 'BE', 'DONE', 'BY', 'CHANGE', 'OF', 'ENVIRONMENT', 'AND', 'MORAL', 'INFLUENCE'] +4350-9170-0047-2797: hyp=['WE', 'NOW', 'KNOW', 'THAT', 'THREATS', 'AND', 'PUNISHMENTS', 'CANNOT', 'DIMINISH', 'THEIR', 'NUMBER', 'THAT', 'THAT', 'CAN', 'ONLY', 'BE', 'DONE', 'BY', 'CHANGE', 'OF', 'ENVIRONMENT', 'AND', 'MORAL', 'INFLUENCE'] +4350-9170-0048-2798: ref=['SO', 'THAT', 'THE', 'JUSTIFICATION', 'OF', 'STATE', 'VIOLENCE', 'ON', 'THE', 'GROUND', 'OF', 'THE', 'PROTECTION', 'IT', 'GIVES', 'US', 'FROM', 'EVIL', 'DISPOSED', 'PERSONS', 'EVEN', 'IF', 'IT', 'HAD', 'SOME', 'FOUNDATION', 'THREE', 'OR', 'FOUR', 'CENTURIES', 'AGO', 'HAS', 'NONE', 'WHATEVER', 'NOW'] +4350-9170-0048-2798: hyp=['SO', 'THAT', 'THIS', 'JUSTIFICATION', 'OF', 'STATE', 'VIOLENCE', 'ON', 'THE', 'GROUND', 'OF', 'THE', 'PROTECTION', 'IT', 'GIVES', 'US', 'FROM', 'EVIL', 'DISPOSE', 'PERSONS', 'EVEN', 'IF', 'IT', 'HAD', 'SOME', 'FOUNDATION', 'THREE', 'OR', 'FOUR', 'CENTURIES', 'AGO', 'HAS', 'NONE', 'WHATEVER', 'NOW'] +4350-9170-0049-2799: ref=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'TELL', 'US', 'WE', 'SHOULD', 'NOT', 'HAVE', 'ANY', 'RELIGION', 'EDUCATION', 'CULTURE', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'SO', 'ON'] +4350-9170-0049-2799: hyp=['EXCEPT', 'FOR', 'THE', 'STATE', 'THEY', 'TELL', 'US', 'WE', 'SHOULD', 'NOT', 'HAVE', 'ANY', 'RELIGION', 'EDUCATION', 'CULTURE', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'SO', 'ON'] +4350-9170-0050-2800: ref=['WITHOUT', 'THE', 'STATE', 'MEN', 'WOULD', 'NOT', 'HAVE', 'BEEN', 'ABLE', 'TO', 'FORM', 'THE', 'SOCIAL', 'INSTITUTIONS', 'NEEDED', 'FOR', 'DOING', 'ANY', 'THING'] +4350-9170-0050-2800: hyp=['WITHOUT', 'THE', 'STATE', 'MEN', 'WOULD', 'NOT', 'HAVE', 'BEEN', 'ABLE', 'TO', 'FORM', 'THE', 'SOCIAL', 'INSTITUTIONS', 'NEEDED', 'FOR', 'DOING', 'ANYTHING'] +4350-9170-0051-2801: ref=['THIS', 'ARGUMENT', 'TOO', 'WAS', 'WELL', 'FOUNDED', 'ONLY', 'SOME', 'CENTURIES', 'AGO'] +4350-9170-0051-2801: hyp=['THIS', 'ARGUMENT', 'TOO', 'WAS', 'WELL', 'FOUNDED', 'ONLY', 'SOME', 'CENTURIES', 'AGO'] +4350-9170-0052-2802: ref=['THE', 'GREAT', 'EXTENSION', 'OF', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'INTERCHANGE', 'OF', 'IDEAS', 'HAS', 'MADE', 'MEN', 'COMPLETELY', 'ABLE', 'TO', 'DISPENSE', 'WITH', 'STATE', 'AID', 'IN', 'FORMING', 'SOCIETIES', 'ASSOCIATIONS', 'CORPORATIONS', 'AND', 'CONGRESSES', 'FOR', 'SCIENTIFIC', 'ECONOMIC', 'AND', 'POLITICAL', 'OBJECTS'] +4350-9170-0052-2802: hyp=['THE', 'GREAT', 'EXTENSION', 'OF', 'MEANS', 'OF', 'COMMUNICATION', 'AND', 'INTERCHANGE', 'OF', 'IDEAS', 'HAS', 'MADE', 'MEN', 'COMPLETELY', 'ABLE', 'TO', 'DISPENSE', 'WITH', 'STATE', 'AID', 'IN', 'FORMING', 'SOCIETIES', 'ASSOCIATIONS', 'CORPORATIONS', 'AND', 'CONGRESSES', 'FOR', 'SCIENTIFIC', 'AGONIC', 'AND', 'POLITICAL', 'OBJECTS'] +4350-9170-0053-2803: ref=['WITHOUT', 'GOVERNMENTS', 'NATIONS', 'WOULD', 'BE', 'ENSLAVED', 'BY', 'THEIR', 'NEIGHBORS'] +4350-9170-0053-2803: hyp=['WITHOUT', "GOVERNMENT'S", 'NATIONS', 'WOULD', 'BE', 'ENSLAVED', 'BY', 'THEIR', 'NEIGHBORS'] +4350-9170-0054-2804: ref=['THE', 'GOVERNMENT', 'THEY', 'TELL', 'US', 'WITH', 'ITS', 'ARMY', 'IS', 'NECESSARY', 'TO', 'DEFEND', 'US', 'FROM', 'NEIGHBORING', 'STATES', 'WHO', 'MIGHT', 'ENSLAVE', 'US'] +4350-9170-0054-2804: hyp=['THE', 'GOVERNMENT', 'THEY', 'TELL', 'US', 'WITH', 'ITS', 'ARMY', 'IS', 'NECESSARY', 'TO', 'DEFEND', 'US', 'FROM', 'NEIGHBORING', 'STATES', 'WHO', 'MIGHT', 'ENSLAVE', 'US'] +4350-9170-0055-2805: ref=['AND', 'IF', 'DEFENSE', 'AGAINST', 'BARBAROUS', 'NATIONS', 'IS', 'MEANT', 'ONE', 'THOUSANDTH', 'PART', 'OF', 'THE', 'TROOPS', 'NOW', 'UNDER', 'ARMS', 'WOULD', 'BE', 'AMPLY', 'SUFFICIENT', 'FOR', 'THAT', 'PURPOSE'] +4350-9170-0055-2805: hyp=['AND', 'IF', 'DEFENCE', 'AGAINST', 'BARBAROUS', 'NATIONS', 'IS', 'MEANT', 'ONE', 'THOUSANDTH', 'PART', 'OF', 'THE', 'TROOPS', 'NOW', 'UNDER', 'ARMS', 'WOULD', 'BE', 'AMPLY', 'SUFFICIENT', 'FOR', 'THAT', 'PURPOSE'] +4350-9170-0056-2806: ref=['THE', 'POWER', 'OF', 'THE', 'STATE', 'FAR', 'FROM', 'BEING', 'A', 'SECURITY', 'AGAINST', 'THE', 'ATTACKS', 'OF', 'OUR', 'NEIGHBORS', 'EXPOSES', 'US', 'ON', 'THE', 'CONTRARY', 'TO', 'MUCH', 'GREATER', 'DANGER', 'OF', 'SUCH', 'ATTACKS'] +4350-9170-0056-2806: hyp=['THE', 'POWER', 'OF', 'THE', 'STATE', 'FAR', 'FROM', 'BEING', 'A', 'SECURITY', 'AGAINST', 'THE', 'ATTACKS', 'OF', 'OUR', 'NEIGHBORS', 'EXPOSES', 'US', 'ON', 'THE', 'CONTRARY', 'TO', 'MUCH', 'GREATER', 'DANGER', 'OF', 'SUCH', 'ATTACKS'] +4350-9170-0057-2807: ref=['EVEN', 'LOOKING', 'AT', 'IT', 'PRACTICALLY', 'WEIGHING', 'THAT', 'IS', 'TO', 'SAY', 'ALL', 'THE', 'BURDENS', 'LAID', 'ON', 'HIM', 'BY', 'THE', 'STATE', 'NO', 'MAN', 'CAN', 'FAIL', 'TO', 'SEE', 'THAT', 'FOR', 'HIM', 'PERSONALLY', 'TO', 'COMPLY', 'WITH', 'STATE', 'DEMANDS', 'AND', 'SERVE', 'IN', 'THE', 'ARMY', 'WOULD', 'IN', 'THE', 'MAJORITY', 'OF', 'CASES', 'BE', 'MORE', 'DISADVANTAGEOUS', 'THAN', 'TO', 'REFUSE', 'TO', 'DO', 'SO'] +4350-9170-0057-2807: hyp=['EVEN', 'LOOKING', 'AT', 'IT', 'PRACTICALLY', 'WEIGHING', 'THAT', 'IS', 'TO', 'SAY', 'ALL', 'THE', 'BIRDS', 'LAID', 'ON', 'HIM', 'BY', 'THE', 'STATES', 'NO', 'MAN', 'CAN', 'FAIL', 'TO', 'SEE', 'THAT', 'FOR', 'HIM', 'PERSONALLY', 'TO', 'COMPLY', 'WITH', 'THE', 'STATE', 'DEMANDS', 'AND', 'SERVE', 'IN', 'THE', 'ARMY', 'WOULD', 'IN', 'THE', 'MAJORITY', 'OF', 'CASES', 'BE', 'MORE', 'DISADVANTAGEOUS', 'THAN', 'TO', 'REFUSE', 'TO', 'DO', 'SO'] +4350-9170-0058-2808: ref=['TO', 'RESIST', 'WOULD', 'NEED', 'INDEPENDENT', 'THOUGHT', 'AND', 'EFFORT', 'OF', 'WHICH', 'EVERY', 'MAN', 'IS', 'NOT', 'CAPABLE'] +4350-9170-0058-2808: hyp=['TO', 'RESIST', 'WOULD', 'NEED', 'INDEPENDENT', 'THOUGHT', 'AND', 'EFFORT', 'OF', 'WHICH', 'EVERY', 'MAN', 'IS', 'NOT', 'CAPABLE'] +4350-9170-0059-2809: ref=['SO', 'MUCH', 'FOR', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'OF', 'BOTH', 'LINES', 'OF', 'CONDUCT', 'FOR', 'A', 'MAN', 'OF', 'THE', 'WEALTHY', 'CLASSES', 'AN', 'OPPRESSOR'] +4350-9170-0059-2809: hyp=['SO', 'MUCH', 'FOR', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'OF', 'BOTH', 'LINES', 'OF', 'CONDUCT', 'FOR', 'A', 'MAN', 'OF', 'THE', 'WEALTHY', 'CLASS', 'AN', 'OPPRESSOR'] +4350-9170-0060-2810: ref=['FOR', 'A', 'MAN', 'OF', 'THE', 'POOR', 'WORKING', 'CLASS', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'WILL', 'BE', 'THE', 'SAME', 'BUT', 'WITH', 'A', 'GREAT', 'INCREASE', 'OF', 'DISADVANTAGES'] +4350-9170-0060-2810: hyp=['FOR', 'A', 'MAN', 'OF', 'THE', 'POOR', 'WORKING', 'CLASS', 'THE', 'ADVANTAGES', 'AND', 'DISADVANTAGES', 'WILL', 'BE', 'THE', 'SAME', 'BUT', 'WITH', 'A', 'GREAT', 'INCREASE', 'OF', 'DISADVANTAGES'] +4852-28311-0000-2098: ref=['SAY', 'YOU', 'KNOW', 'SUMTHIN'] +4852-28311-0000-2098: hyp=['SAY', 'YOU', 'KNOW', 'SOMETHING'] +4852-28311-0001-2099: ref=['CHRIS', 'LOOKED', 'FROM', 'A', 'NICKEL', 'PLATED', 'FLASHLIGHT', 'TO', 'A', 'CAR', 'JACK', 'AND', 'SPARK', 'PLUG'] +4852-28311-0001-2099: hyp=['CHRIS', 'LOOKED', 'FROM', 'A', 'NICKEL', 'PLATED', 'FLASH', 'LIKE', 'TO', 'A', 'CAR', 'JACK', 'AND', 'SPARK', 'PLUG'] +4852-28311-0002-2100: ref=['KNOW', 'WHO', 'NEEDS', 'A', 'JOB', 'BAD', "THAT'S", 'JAKEY', 'HARRIS'] +4852-28311-0002-2100: hyp=['NO', 'ONE', 'NEEDS', 'A', 'JOB', 'BAND', "THAT'S", 'JI', 'HARRIS'] +4852-28311-0003-2101: ref=['O', 'K', 'HE', 'SAID'] +4852-28311-0003-2101: hyp=['O', 'K', 'HE', 'SAID'] +4852-28311-0004-2102: ref=['ONLY', 'WHY', "DIDN'T", 'YOU', 'ASK', 'HIM', 'YOURSELF'] +4852-28311-0004-2102: hyp=['ONLY', 'WHY', "DIDN'T", 'YOU', 'ASK', 'HIM', 'YOURSELF'] +4852-28311-0005-2103: ref=['MIKE', 'BECAME', 'UNEASY', 'AND', 'FISHED', 'AN', 'ELASTIC', 'BAND', 'OUT', 'OF', 'HIS', 'POCKET', 'MADE', 'A', 'FLICK', 'OF', 'PAPER', 'AND', 'SENT', 'IT', 'SOARING', 'OUT', 'INTO', 'M', 'STREET'] +4852-28311-0005-2103: hyp=['MIKE', 'BECAME', 'UNEASY', 'AND', 'FISHED', 'AND', 'MOLASTIC', 'BAND', 'OUT', 'OF', 'HIS', 'POCKET', 'MADE', 'A', 'FLICK', 'OF', 'PAPER', 'AND', 'SENT', 'IT', 'SOARING', 'OUT', 'IN', 'EM', 'STREET'] +4852-28311-0006-2104: ref=['WELL', 'HE', 'ADMITTED', 'I', 'DID'] +4852-28311-0006-2104: hyp=['WELL', 'HE', 'ADMITTED', 'I', 'DID'] +4852-28311-0007-2105: ref=['CHRIS', 'ASKED', 'AND', 'FOR', 'THE', 'FIRST', 'TIME', 'THAT', 'DAY', 'THE', 'HEAVY', 'WEIGHT', 'HE', 'CARRIED', 'WITHIN', 'HIM', 'LIFTED', 'AND', 'LIGHTENED', 'A', 'LITTLE'] +4852-28311-0007-2105: hyp=['CHRIS', 'ASKED', 'HIM', 'FOR', 'THE', 'FIRST', 'TIME', 'THAT', 'DAY', 'THAT', 'HEAVY', 'WEIGHT', 'HE', 'CARRIED', 'WITHIN', 'HIM', 'LIFTED', 'AND', 'LIGHTENED', 'A', 'LITTLE'] +4852-28311-0008-2106: ref=['THINK', 'HE', 'REALLY', 'NEEDS', 'IT', 'HE', 'PURSUED'] +4852-28311-0008-2106: hyp=['THEY', 'CAME', 'REALLY', 'NEEDS', 'IT', 'HE', 'PURSUED'] +4852-28311-0009-2107: ref=['HE', 'WOULD', 'HAVE', 'LIKED', 'TO', 'GET', 'THE', 'JOB', 'FOR', 'JAKEY', 'WHO', 'NEEDED', 'IT', 'BUT', 'SOMEHOW', 'THE', 'TASK', 'OF', 'FACING', 'MISTER', 'WICKER', 'ESPECIALLY', 'NOW', 'THAT', 'THE', 'LIGHT', 'WAS', 'GOING', 'AND', 'DUSK', 'EDGING', 'INTO', 'THE', 'STREETS', 'WAS', 'NOT', 'WHAT', 'CHRIS', 'HAD', 'INTENDED', 'FOR', 'ENDING', 'THE', 'AFTERNOON'] +4852-28311-0009-2107: hyp=['HE', 'WOULD', 'HAVE', 'LIKED', 'TO', 'GET', 'THE', 'JOB', 'FOR', 'JAKIE', 'WHO', 'NEEDED', 'IT', 'BUT', 'SOMEHOW', 'THE', 'TASK', 'OF', 'FACING', 'MISTER', 'WICKER', 'ESPECIALLY', 'NOW', 'THAT', 'THE', 'LIGHT', 'WAS', 'GOING', 'AND', 'DUSK', 'EDGED', 'INTO', 'THE', 'STREETS', 'WAS', 'NOT', 'WHAT', 'CHRISTEN', 'TENDED', 'FOR', 'ENDING', 'THE', 'AFTERNOON'] +4852-28311-0010-2108: ref=["MIKE'S", 'EXPRESSION', 'CHANGED', 'AT', 'ONCE', 'TO', 'ONE', 'OF', 'TRIUMPH', 'BUT', 'CHRIS', 'WAS', 'ONLY', 'PARTLY', 'ENCOURAGED'] +4852-28311-0010-2108: hyp=["MIKE'S", 'EXPRESSION', 'CHANGED', 'AT', 'WHAT', 'ONCE', 'TO', 'ONE', 'OF', 'TRIUMPH', 'BUT', 'CHRIS', 'WAS', 'ONLY', 'PARSLY', 'ENCOURAGED'] +4852-28311-0011-2109: ref=['BETCHA', "AREN'T", 'GOIN', 'AFTER', 'ALL', 'CHRIS', 'TURNED', 'ON', 'HIM'] +4852-28311-0011-2109: hyp=['PITCHER', 'AND', 'GOIN', 'AFTER', 'ALL', 'THIS', 'TURNED', 'ON', 'HIM'] +4852-28311-0012-2110: ref=['MIKE', 'WAS', 'STANDING', 'ON', 'THE', 'CORNER'] +4852-28311-0012-2110: hyp=['MIKE', 'WAS', 'STANDING', 'ON', 'THE', 'CORNER'] +4852-28311-0013-2111: ref=['AW', 'SHUCKS'] +4852-28311-0013-2111: hyp=['AH', 'SHOCKS'] +4852-28311-0014-2112: ref=['CHRIS', 'STARTED', 'OFF', 'ONCE', 'MORE', 'PASSING', 'THE', 'BLEAK', 'LITTLE', 'VICTORIAN', 'CHURCH', 'PERCHED', 'ON', 'THE', 'HILL', 'ABOVE', 'MISTER', "WICKER'S", 'HOUSE'] +4852-28311-0014-2112: hyp=['CHRIS', 'STARTED', 'OFF', 'ONCE', 'MORE', 'PASSING', 'A', 'BLEAK', 'LITTLE', 'VICTORIAN', 'CHURCH', 'PERCHED', 'ON', 'THE', 'HILL', 'ABOVE', 'MISTER', "WICKER'S", 'HOUSE'] +4852-28311-0015-2113: ref=['AN', 'EMPTY', 'LOT', 'CUT', 'INTO', 'BY', 'CHURCH', 'LANE', 'GAVE', 'A', 'LOOK', 'OF', 'ISOLATION', 'TO', 'THE', 'L', 'SHAPED', 'BRICK', 'BUILDING', 'THAT', 'SERVED', 'MISTER', 'WICKER', 'AS', 'BOTH', 'HOUSE', 'AND', 'PLACE', 'OF', 'BUSINESS'] +4852-28311-0015-2113: hyp=['AN', 'EMPTY', 'LOT', 'CUT', 'IN', 'INTO', 'BY', 'CHURCH', 'LANE', 'GAVE', 'A', 'LOOK', 'OF', 'ISOLATION', 'TO', 'THE', 'ELE', 'SHAPED', 'BRICK', 'BUILDING', 'THAT', 'SERVED', 'MISTER', "WICKER'S", 'BOTH', 'HOUSE', 'AND', 'PLACE', 'OF', 'BUSINESS'] +4852-28311-0016-2114: ref=['THE', 'LONGER', 'WING', 'TOWARD', 'THE', 'BACK', 'HAD', 'A', 'BACK', 'DOOR', 'THAT', 'OPENED', 'ONTO', 'WATER', 'STREET', 'THE', 'SPACE', 'BETWEEN', 'THE', 'HOUSE', 'AND', 'WISCONSIN', 'AVENUE', 'HAD', 'BEEN', 'MADE', 'INTO', 'A', 'NEAT', 'OBLONG', 'FLOWER', 'GARDEN', 'FENCED', 'OFF', 'FROM', 'THE', 'SIDEWALK', 'BY', 'BOX', 'SHRUBS', 'AND', 'A', 'WHITE', 'PICKET', 'FENCE'] +4852-28311-0016-2114: hyp=['NO', 'LONGER', 'WINGED', 'TOWARD', 'THE', 'BACK', 'GOT', 'IT', 'BACK', 'DOOR', 'THAT', 'OPENED', 'ON', 'A', 'WATER', 'STREET', 'THE', 'SPACE', 'BETWEEN', 'THE', 'HOUSE', 'AND', 'WISCONSIN', 'ATTIGUE', 'HAD', 'BEEN', 'MADE', 'INTO', 'A', 'NEAT', 'OBLONG', 'FLOWER', 'GARDEN', 'FENCED', 'OFF', 'FROM', 'THE', 'SIDEWALK', 'BY', 'BOX', 'SHRUGS', 'AND', 'A', 'WHITE', 'PICKET', 'FENCE'] +4852-28311-0017-2115: ref=['A', 'LIVID', 'YELLOW', 'STAINED', 'THE', 'HORIZON', 'BEYOND', 'THE', 'FACTORIES', 'AND', 'GRAY', 'CLOUDS', 'LOWERED', 'AND', 'TUMBLED', 'ABOVE'] +4852-28311-0017-2115: hyp=['A', 'LIVID', 'YELLOW', 'STAINED', 'THE', 'HORIZON', 'BEYOND', 'THE', 'FACTORIES', 'AND', 'GLAY', 'CLOUDS', 'LOWERED', 'AND', 'TUMBLED', 'ABOVE'] +4852-28311-0018-2116: ref=['THE', 'AIR', 'WAS', 'GROWING', 'CHILL', 'AND', 'CHRIS', 'DECIDED', 'TO', 'FINISH', 'HIS', 'JOB'] +4852-28311-0018-2116: hyp=['THE', 'AIR', 'WAS', 'GROWING', 'CHILL', 'AND', 'CHRIS', 'DECIDED', 'TO', 'FINISH', 'THE', 'JOB'] +4852-28311-0019-2117: ref=['ALL', 'AT', 'ONCE', 'HE', 'WONDERED', 'HOW', 'HIS', 'MOTHER', 'WAS', 'AND', 'EVERYTHING', 'IN', 'HIM', 'PINCHED', 'AND', 'TIGHTENED', 'ITSELF'] +4852-28311-0019-2117: hyp=['ALL', 'AT', 'ONCE', 'YOU', 'WONDERED', 'HOW', 'HIS', 'MOTHER', 'WAS', 'AND', 'EVERYTHING', 'IN', 'HIM', 'PINCHED', 'AND', 'TIGHTENED', 'ITSELF'] +4852-28311-0020-2118: ref=['AT', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'HE', 'REACHED', 'THE', 'HOUSE'] +4852-28311-0020-2118: hyp=['AT', 'THE', 'FOOT', 'OF', 'THE', 'HILL', 'HE', 'REACHED', 'THE', 'HOUSE'] +4852-28311-0021-2119: ref=['THERE', 'WERE', 'THREE', 'THINGS', 'THAT', 'ALWAYS', 'CAUGHT', 'HIS', 'EYE', 'AMID', 'THE', 'LITTER', 'OF', 'DUSTY', 'PIECES'] +4852-28311-0021-2119: hyp=['THERE', 'WERE', 'THREE', 'THINGS', 'THAT', 'ALWAYS', 'CAUGHT', 'HIS', 'EYE', 'AMID', 'THE', 'LITTER', 'OF', 'DUSTY', 'PIECES'] +4852-28311-0022-2120: ref=['ON', 'THE', 'LEFT', 'THE', 'COIL', 'OF', 'ROPE', 'IN', 'THE', 'CENTER', 'THE', 'MODEL', 'OF', 'A', 'SAILING', 'SHIP', 'IN', 'A', 'GREEN', 'GLASS', 'BOTTLE', 'AND', 'ON', 'THE', 'RIGHT', 'THE', 'WOODEN', 'STATUE', 'OF', 'A', 'NEGRO', 'BOY', 'IN', 'BAGGY', 'TROUSERS', 'TURKISH', 'JACKET', 'AND', 'WHITE', 'TURBAN'] +4852-28311-0022-2120: hyp=['ON', 'THE', 'LEFT', 'THE', 'COIL', 'OF', 'ROPE', 'IN', 'THE', 'CENTRE', 'OF', 'THE', 'MODEL', 'OF', 'A', 'SAILING', 'SHIP', 'IN', 'A', 'GREEN', 'GLASS', 'BOTTLE', 'AND', 'ON', 'THE', 'RIGHT', 'THE', 'WOODEN', 'STATUE', 'OF', 'A', 'NEGRO', 'BOY', 'IN', 'BAGGY', 'TROUSERS', 'TURKISH', 'JACKET', 'AND', 'WHITE', 'TURBAN'] +4852-28311-0023-2121: ref=['BUT', 'THE', 'NAME', 'STILL', 'SHOWED', 'AT', 'THE', 'PROW', 'AND', 'MANY', 'A', 'TIME', 'CHRIS', 'SAFE', 'AT', 'HOME', 'IN', 'BED', 'HAD', 'SAILED', 'IMAGINARY', 'VOYAGES', 'IN', 'THE', 'MIRABELLE'] +4852-28311-0023-2121: hyp=['BUT', 'THE', 'NAME', 'STILL', 'SHOWED', 'AT', 'THE', 'PROW', 'AND', 'MANY', 'A', 'TIME', 'CHRIS', 'SAFE', 'AT', 'HOME', 'IN', 'BED', 'HAD', 'SAILED', 'IMAGINARY', 'VOYAGES', 'IN', 'THE', 'MIRABELLE'] +4852-28311-0024-2122: ref=['HE', 'HAD', 'NEVER', 'SEEN', 'ANYONE', 'GO', 'INTO', 'MISTER', "WICKER'S", 'SHOP', 'NOW', 'HE', 'THOUGHT', 'OF', 'IT'] +4852-28311-0024-2122: hyp=["HE'D", 'NEVER', 'SEEN', 'ANYONE', 'GO', 'INTO', 'MISTER', "WICKER'S", 'SHOP', 'NOW', 'HE', 'THOUGHT', 'OF', 'IT'] +4852-28311-0025-2123: ref=['HOW', 'THEN', 'DID', 'HE', 'LIVE', 'AND', 'WHAT', 'DID', 'HE', 'EVER', 'SELL'] +4852-28311-0025-2123: hyp=['HOW', 'THEN', 'DID', 'HE', 'LIVE', 'AND', 'WHAT', 'DID', 'HE', 'EVER', 'SELL'] +4852-28311-0026-2124: ref=['A', 'SUDDEN', 'CAR', 'HORN', 'WOKE', 'HIM', 'FROM', 'HIS', 'DREAM'] +4852-28311-0026-2124: hyp=['A', 'SUDDEN', 'CAR', 'HORN', 'WALKING', 'FROM', 'THE', 'STREAM'] +4852-28312-0000-2125: ref=['OF', 'THE', 'MANY', 'TIMES', 'HE', 'HAD', 'EXAMINED', 'MISTER', "WICKER'S", 'WINDOW', 'AND', 'PORED', 'OVER', 'THE', 'ROPE', 'THE', 'SHIP', 'AND', 'THE', 'NUBIAN', 'BOY', 'HE', 'HAD', 'NEVER', 'GONE', 'INTO', 'MISTER', "WICKER'S", 'SHOP'] +4852-28312-0000-2125: hyp=['OF', 'THE', 'MANY', 'TIMES', 'EITHER', 'EXAMINED', 'MISTER', "WICKER'S", 'WINDOW', 'AND', 'POURED', 'OVER', 'THE', 'ROPE', 'TO', 'SHIP', 'AND', 'THE', 'NUBIAN', 'BOY', 'HE', 'HAD', 'NEVER', 'GONE', 'INTO', 'MISTER', "WICKER'S", 'SHOP'] +4852-28312-0001-2126: ref=['SO', 'NOW', 'ALONE', 'UNTIL', 'SOMEONE', 'SHOULD', 'ANSWER', 'THE', 'BELL', 'HE', 'LOOKED', 'EAGERLY', 'IF', 'UNEASILY', 'AROUND', 'HIM'] +4852-28312-0001-2126: hyp=['SO', 'NOW', 'ALONE', 'UNTIL', 'SOME', 'ONE', 'SHOULD', 'ANSWER', 'THE', 'BELL', 'THEY', 'LOOKED', 'EAGERLY', 'IF', 'UNEASILY', 'AROUND', 'HIM'] +4852-28312-0002-2127: ref=['WHAT', 'WITH', 'THE', 'ONE', 'WINDOW', 'AND', 'THE', 'LOWERING', 'DAY', 'OUTSIDE', 'THE', 'LONG', 'NARROW', 'SHOP', 'WAS', 'SOMBER'] +4852-28312-0002-2127: hyp=['WHAT', 'WITH', 'THE', 'ONE', 'WINDOW', 'AND', 'THE', 'LOWERING', 'DAY', 'OUTSIDE', 'THE', 'LONG', 'NARROW', 'SHOP', 'WAS', 'SOMBER'] +4852-28312-0003-2128: ref=['HEAVY', 'HAND', 'HEWN', 'BEAMS', 'CROSSED', 'IT', 'FROM', 'ONE', 'SIDE', 'TO', 'THE', 'OTHER'] +4852-28312-0003-2128: hyp=['HEAVY', 'HAND', 'YOU', 'AND', 'BEAMS', 'CROSSED', 'IT', 'FROM', 'ONE', 'SIDE', 'TO', 'THE', 'OTHER'] +4852-28312-0004-2129: ref=['MISTER', "WICKER'S", 'BACK', 'BEING', 'TOWARD', 'THE', 'SOURCE', 'OF', 'LIGHT', 'CHRIS', 'COULD', 'NOT', 'SEE', 'HIS', 'FACE'] +4852-28312-0004-2129: hyp=['MISS', 'JOKERS', 'BACK', 'BEING', 'TOWARD', 'THE', 'SOURCE', 'OF', 'LIGHT', 'CHRIS', 'COULD', 'NOT', 'SEE', 'HIS', 'FACE'] +4852-28312-0005-2130: ref=['THE', 'DOUBLE', 'FANS', 'OF', 'MINUTE', 'WRINKLES', 'BREAKING', 'FROM', 'EYE', 'CORNER', 'TO', 'TEMPLE', 'AND', 'JOINING', 'WITH', 'THOSE', 'OVER', 'THE', 'CHEEKBONES', 'WERE', 'DRAWN', 'INTO', 'THE', 'HORIZONTAL', 'LINES', 'ACROSS', 'THE', 'DOMED', 'FOREHEAD'] +4852-28312-0005-2130: hyp=['THE', 'DOUBLE', 'FANS', 'A', 'MINUTE', 'WRINKLES', 'BREAKING', 'FROM', 'EYE', 'CORNER', 'TO', 'TEMPLE', 'AND', 'JOINING', 'WITH', 'THOSE', 'OVER', 'THE', 'CHEAP', 'BONES', 'WERE', 'DRAWN', 'INTO', 'THE', 'HORIZONTAL', 'LINES', 'ACROSS', 'THE', 'DOMED', 'FOREHEAD'] +4852-28312-0006-2131: ref=['LITTLE', 'TUFTS', 'OF', 'WHITE', 'FUZZ', 'ABOVE', 'THE', 'EARS', 'WERE', 'ALL', 'THAT', 'REMAINED', 'OF', 'THE', "ANTIQUARIAN'S", 'HAIR', 'BUT', 'WHAT', 'DREW', 'AND', 'HELD', "CHRIS'S", 'GAZE', 'WERE', 'THE', 'OLD', "MAN'S", 'EYES'] +4852-28312-0006-2131: hyp=['LITTLE', 'TUFTS', 'OF', 'WHITE', 'FUZ', 'ABOVE', 'THE', 'EARS', 'WERE', 'ALL', 'THAT', 'REMAINED', 'OF', 'THE', "ANTIQUARIAN'S", 'HAIR', 'BUT', 'WHAT', 'DREW', 'AND', 'HELD', "CHRIS'S", 'GAZE', 'WITH', 'THE', 'OLD', "MAN'S", 'EYES'] +4852-28312-0007-2132: ref=['CHRIS', 'BLINKED', 'AND', 'LOOKED', 'AGAIN', 'YES', 'THEY', 'WERE', 'STILL', 'THERE'] +4852-28312-0007-2132: hyp=['CRISP', 'BINKED', 'AND', 'LOOKED', 'AGAIN', 'YES', 'THEY', 'WERE', 'STILL', 'THERE'] +4852-28312-0008-2133: ref=['CHRIS', 'SWALLOWED', 'AND', 'HIS', 'VOICE', 'CAME', 'BACK', 'TO', 'HIM'] +4852-28312-0008-2133: hyp=['GRIS', 'SWALLOW', 'AND', 'HIS', 'VOICE', 'CAME', 'BACK', 'TO', 'HIM'] +4852-28312-0009-2134: ref=['YES', 'SIR', 'HE', 'SAID'] +4852-28312-0009-2134: hyp=['YES', 'SIR', 'HE', 'SAID'] +4852-28312-0010-2135: ref=['I', 'SAW', 'YOUR', 'SIGN', 'AND', 'I', 'KNOW', 'A', 'BOY', 'WHO', 'NEEDS', 'THE', 'JOB'] +4852-28312-0010-2135: hyp=['I', 'SAW', 'YOUR', 'SIGN', 'AND', 'I', 'KNOW', 'A', 'BOY', 'WHO', 'NEEDS', 'THE', 'JOB'] +4852-28312-0011-2136: ref=["HE'S", 'A', 'SCHOOLMATE', 'OF', 'MINE'] +4852-28312-0011-2136: hyp=["HE'S", 'A', 'SCHOOLMATE', 'OF', 'MINE'] +4852-28312-0012-2137: ref=['JAKEY', 'HARRIS', 'HIS', 'NAME', 'IS', 'AND', 'HE', 'REALLY', 'NEEDS', 'THE', 'JOB'] +4852-28312-0012-2137: hyp=['JAGGY', "HEIRESS'S", 'NAME', "ISN'T", 'HE', 'REALLY', 'NEEDS', 'THE', 'JOB'] +4852-28312-0013-2138: ref=['I', 'I', 'JUST', 'WONDERED', 'IF', 'THE', 'PLACE', 'WAS', 'STILL', 'OPEN'] +4852-28312-0013-2138: hyp=['I', 'I', 'JUST', 'WANTED', 'IF', 'THE', 'PLACE', 'WAS', 'STILL', 'OPEN'] +4852-28312-0014-2139: ref=['WHAT', 'HE', 'SAW', 'WAS', 'A', 'FRESH', 'CHEEKED', 'LAD', 'TALL', 'FOR', 'THIRTEEN', 'STURDY', 'WITH', 'SINCERITY', 'AND', 'GOOD', 'HUMOR', 'IN', 'HIS', 'FACE', 'AND', 'SOMETHING', 'SENSITIVE', 'AND', 'APPEALING', 'ABOUT', 'HIS', 'EYES'] +4852-28312-0014-2139: hyp=['WHAT', 'HE', 'SAW', 'WAS', 'A', 'FRESH', 'CHEEKED', 'LAD', 'TALL', 'FOR', 'THIRTEEN', 'STURDY', 'WITH', 'SINCERITY', 'AND', 'GOOD', 'HUMOUR', 'IN', 'HIS', 'FACE', 'AND', 'SOMETHING', 'SCENTED', 'IN', 'APPEALING', 'ABOUT', 'HIS', 'EYES'] +4852-28312-0015-2140: ref=['HE', 'GUESSED', 'THERE', 'MUST', 'BE', 'A', 'LIVELY', 'FIRE', 'IN', 'THAT', 'ROOM', 'BEYOND'] +4852-28312-0015-2140: hyp=['HE', 'GUESSED', 'THERE', 'MUST', 'BE', 'A', 'LIVELY', 'FIRE', 'IN', 'THAT', 'ROOM', 'BEYOND'] +4852-28312-0016-2141: ref=['WOULD', 'THAT', 'INTERFERE', 'WITH', "JAKEY'S", 'GETTING', 'THE', 'JOB', 'SIR'] +4852-28312-0016-2141: hyp=['WOULD', 'THAT', 'INFERE', 'WITH', 'JAKI', 'GIGGS', 'GETTING', 'THE', 'JOB', 'SIR'] +4852-28312-0017-2142: ref=['BUT', 'EVEN', 'AS', 'HE', 'SLOWLY', 'TURNED', 'THE', 'THOUGHT', 'PIERCED', 'HIS', 'MIND', 'WHY', 'HAD', 'HE', 'NOT', 'SEEN', 'THE', 'REFLECTION', 'OF', 'THE', 'HEADLIGHTS', 'OF', 'THE', 'CARS', 'MOVING', 'UP', 'AROUND', 'THE', 'CORNER', 'OF', 'WATER', 'STREET', 'AND', 'UP', 'THE', 'HILL', 'TOWARD', 'THE', 'TRAFFIC', 'SIGNALS'] +4852-28312-0017-2142: hyp=['BUT', 'EVEN', 'AS', 'HE', 'SLOWLY', 'TURNED', 'THE', 'THOUGHT', 'PIERCED', 'HIS', 'MIND', 'WHY', 'HE', 'NOT', 'SEEN', 'THE', 'REFLECTION', 'OF', 'THE', 'HEADLIGHTS', 'OF', 'THE', 'CARS', 'MOVING', 'UP', 'AROUND', 'THE', 'CORRIER', 'OF', 'WALL', 'UNDER', 'STREET', 'NOT', 'THE', 'HILL', 'TOWARD', 'THE', 'LIFE', 'SIGNALS'] +4852-28312-0018-2143: ref=['THE', 'ROOM', 'SEEMED', 'OVERLY', 'STILL'] +4852-28312-0018-2143: hyp=['THE', 'ROOM', 'SEEMED', 'OVERLY', 'STILL'] +4852-28312-0019-2144: ref=['THEN', 'IN', 'THAT', 'SECOND', 'HE', 'TURNED', 'AND', 'FACED', 'ABOUT'] +4852-28312-0019-2144: hyp=['THEN', 'IN', 'THAT', 'SECOND', 'HE', 'TURNED', 'AND', 'FACED', 'ABOUT'] +4852-28312-0020-2145: ref=['THE', 'WIDE', 'BOW', 'WINDOW', 'WAS', 'THERE', 'BEFORE', 'HIM', 'THE', 'THREE', 'OBJECTS', 'HE', 'LIKED', 'BEST', 'SHOWING', 'FROSTY', 'IN', 'THE', 'MOONLIGHT', 'THAT', 'POURED', 'IN', 'FROM', 'ACROSS', 'THE', 'WATER'] +4852-28312-0020-2145: hyp=['THE', 'WIDE', 'BOW', 'WIND', 'THAT', 'WAS', 'THERE', 'BEFORE', 'HIM', 'THE', 'THREE', 'OBJECTS', 'HE', 'LIKED', 'BEST', 'SHOWING', 'FROSTY', 'IN', 'THE', 'MOONLIGHT', 'THAT', 'POURED', 'IN', 'FROM', 'ACROSS', 'THE', 'WATER'] +4852-28312-0021-2146: ref=['ACROSS', 'THE', 'WATER', 'WHERE', 'WAS', 'THE', 'FREEWAY'] +4852-28312-0021-2146: hyp=['ACROSS', 'THE', 'WATER', 'WHERE', 'IS', 'THE', 'FREE', 'WAY'] +4852-28312-0022-2147: ref=['IT', 'WAS', 'NO', 'LONGER', 'THERE', 'NOR', 'WERE', 'THE', 'HIGH', 'WALLS', 'AND', 'SMOKESTACKS', 'OF', 'FACTORIES', 'TO', 'BE', 'SEEN'] +4852-28312-0022-2147: hyp=['IT', 'WAS', 'NO', 'LONGER', 'THERE', 'NOR', 'WERE', 'THE', 'HIGH', 'WALLS', 'AND', 'SMOKESTACKS', 'OF', 'FACTORIES', 'TO', 'BE', 'SEEN'] +4852-28312-0023-2148: ref=['THE', 'WAREHOUSES', 'WERE', 'STILL', 'THERE'] +4852-28312-0023-2148: hyp=['THE', 'WAREHOUSES', 'WERE', 'STILL', 'THERE'] +4852-28312-0024-2149: ref=['FLABBERGASTED', 'AND', 'BREATHLESS', 'CHRIS', 'WAS', 'UNAWARE', 'THAT', 'HE', 'HAD', 'MOVED', 'CLOSER', 'TO', 'PEER', 'OUT', 'THE', 'WINDOW', 'IN', 'EVERY', 'DIRECTION'] +4852-28312-0024-2149: hyp=['FLAVAGASTED', 'AND', 'BREATHLESS', 'CHRIS', 'WAS', 'UNAWARE', 'THAT', 'HE', 'HAD', 'MOVED', 'CLOSER', 'TO', 'PEER', 'OUT', 'THE', 'WINDOW', 'IN', 'EVERY', 'DIRECTION'] +4852-28312-0025-2150: ref=['NO', 'ELECTRIC', 'SIGNS', 'NO', 'LAMPLIT', 'STREETS'] +4852-28312-0025-2150: hyp=['NO', 'ELECTRIC', 'SIGNS', 'NO', 'LAMPLET', 'STREETS'] +4852-28312-0026-2151: ref=['WHERE', 'THE', "PEOPLE'S", 'DRUGSTORE', 'HAD', 'STOOD', 'BUT', 'A', 'HALF', 'HOUR', 'BEFORE', 'ROSE', 'THE', 'ROOFS', 'OF', 'WHAT', 'WAS', 'EVIDENTLY', 'AN', 'INN'] +4852-28312-0026-2151: hyp=['WHERE', 'THE', "PEOPLE'S", 'DRUG', 'STORE', 'IT', 'STOOD', 'BUT', 'HALF', 'AN', 'HOUR', 'BEFORE', 'ROSE', 'THE', 'ROOFS', 'OF', 'WHAT', 'WAS', 'EVIDENTLY', 'AN', 'INN'] +4852-28312-0027-2152: ref=['A', 'COURTYARD', 'WAS', 'SPARSELY', 'LIT', 'BY', 'A', 'FLARING', 'TORCH', 'OR', 'TWO', 'SHOWING', 'A', 'SWINGING', 'SIGN', 'HUNG', 'ON', 'A', 'POST'] +4852-28312-0027-2152: hyp=['A', 'COURTYARD', 'WAS', 'FIRSTLY', 'LED', 'BY', 'A', 'FLARING', 'TORTURE', 'TO', 'SHOWING', 'A', 'SWINGING', 'SIGN', 'HUNG', 'ON', 'THE', 'POST'] +4852-28312-0028-2153: ref=['THE', 'POST', 'WAS', 'PLANTED', 'AT', 'THE', 'EDGE', 'OF', 'WHAT', 'WAS', 'NOW', 'A', 'BROAD', 'AND', 'MUDDY', 'ROAD'] +4852-28312-0028-2153: hyp=['THE', 'POSTS', 'BLOODED', 'AT', 'THE', 'EDGE', 'OF', 'IT', 'WAS', 'NOW', 'A', 'BROAD', 'AND', 'MONEY', 'ROAD'] +4852-28312-0029-2154: ref=['A', 'COACH', 'WITH', 'ITS', 'TOP', 'PILED', 'HIGH', 'WITH', 'LUGGAGE', 'STAMPED', 'TO', 'A', 'HALT', 'BESIDE', 'THE', 'FLAGGED', 'COURTYARD'] +4852-28312-0029-2154: hyp=['A', 'COACH', 'WERE', 'THEN', 'STOPPED', 'PILED', 'HIGH', 'WITH', 'LUGGAGE', 'STAMPED', 'OR', 'HALT', 'BESIDE', 'THE', 'FLAGGED', 'COURTYARD'] +4852-28312-0030-2155: ref=['THEY', 'MOVED', 'INTO', 'THE', 'INN', 'THE', 'COACH', 'RATTLED', 'OFF', 'TO', 'THE', 'STABLE'] +4852-28312-0030-2155: hyp=['THEY', 'MOVED', 'INTO', 'THE', 'INN', 'THE', 'COACH', 'RATTLED', 'OFF', 'TO', 'THE', 'STABLE'] +4852-28312-0031-2156: ref=['MY', 'WINDOW', 'HAS', 'A', 'POWER', 'FOR', 'THOSE', 'FEW', 'WHO', 'ARE', 'TO', 'SEE'] +4852-28312-0031-2156: hyp=['MY', 'WINDOW', 'AS', 'A', 'POWER', 'FOR', 'THOSE', 'FEW', 'WHO', 'ARE', 'TO', 'SEE'] +4852-28319-0000-2070: ref=['THE', 'LEARNING', 'OF', 'MAGIC', 'WAS', 'BY', 'NO', 'MEANS', 'EASY'] +4852-28319-0000-2070: hyp=['THE', 'LEARNING', 'OF', 'MAGIC', 'WAS', 'BY', 'NO', 'MEANS', 'EASY'] +4852-28319-0001-2071: ref=['HE', 'HAD', 'TOLD', 'HIS', 'MASTER', 'AT', 'ONCE', 'ABOUT', 'SIMON', 'GOSLER', 'HIS', 'HORDE', 'OF', 'MONEY', 'AND', 'HIS', 'HIDING', 'PLACES', 'FOR', 'IT'] +4852-28319-0001-2071: hyp=['HE', 'TOLD', 'HIS', 'MASTER', 'AT', 'ONCE', 'HE', 'GOT', 'SIMON', 'GOSPIR', 'HIS', 'HOARD', 'OF', 'MONEY', 'IN', 'HIS', 'HIDING', 'PLACES', 'FOR', 'IT'] +4852-28319-0002-2072: ref=['CHRIS', 'THEREFORE', 'THREW', 'HIMSELF', 'INTO', 'ALL', 'THE', 'PRELIMINARIES', 'OF', 'HIS', 'TASK'] +4852-28319-0002-2072: hyp=['CHRIS', 'THEREFORE', 'THREW', 'HIMSELF', 'AND', 'ALL', 'THE', 'PROLIMINARIES', 'OF', 'HIS', 'TASK'] +4852-28319-0003-2073: ref=['ONE', 'AFTERNOON', 'WHEN', 'HE', 'RETURNED', 'AFTER', 'A', 'REST', 'TO', 'MISTER', "WICKER'S", 'STUDY', 'HE', 'SAW', 'THAT', 'THERE', 'WAS', 'SOMETHING', 'NEW', 'IN', 'THE', 'ROOM', 'A', 'BOWL', 'WITH', 'A', 'GOLDFISH', 'IN', 'IT', 'STOOD', 'ON', 'THE', 'TABLE', 'BUT', 'MISTER', 'WICKER', 'WAS', 'NOT', 'TO', 'BE', 'SEEN'] +4852-28319-0003-2073: hyp=['ONE', 'AFTERNOON', 'WHEN', 'HE', 'HAD', 'RETURNED', 'AFTER', 'A', 'REST', 'TO', 'MISTER', "WICKER'S", 'STUDY', 'HE', 'SAW', 'THAT', 'THERE', 'WAS', 'SOMETHING', 'NEW', 'IN', 'THE', 'ROOM', 'A', 'BULL', 'WITH', 'A', 'GOLD', 'FISH', 'IN', 'IT', 'STOOD', 'ON', 'THE', 'TABLE', 'BUT', 'MISTER', 'WICKER', 'WAS', 'NOT', 'TO', 'BE', 'SEEN'] +4852-28319-0004-2074: ref=['WHAT', 'SHALL', 'I', 'DO', 'FIRST'] +4852-28319-0004-2074: hyp=['WHAT', 'SHOULD', 'I', 'DO', 'FIRST'] +4852-28319-0005-2075: ref=['HOW', 'YOU', 'HAVE', 'IMPROVED', 'MY', 'BOY', 'HE', 'EXCLAIMED', 'IT', 'IS', 'NOW', 'TIME', 'FOR', 'YOU', 'TO', 'TRY', 'AND', 'THIS', 'IS', 'AS', 'GOOD', 'A', 'CHANGE', 'AS', 'ANY'] +4852-28319-0005-2075: hyp=['HOW', 'YOU', 'OFTEN', 'PROVED', 'MY', 'BOY', 'IT', 'EXCLAIMED', 'IS', 'NOW', 'TIME', 'FOR', 'YOU', 'TO', 'TRY', 'MISSUS', 'GOT', 'A', 'CHANGE', 'AS', 'ANY'] +4852-28319-0006-2076: ref=['SUPPOSE', 'I', 'CHANGE', 'AND', "CAN'T", 'CHANGE', 'BACK'] +4852-28319-0006-2076: hyp=['SUPPOSE', 'I', 'CHANGE', 'AND', 'CATCH', 'ITS', 'BACK'] +4852-28319-0007-2077: ref=['MISTER', 'WICKER', 'WAITED', 'PATIENTLY', 'BESIDE', 'HIM', 'FOR', 'A', 'FEW', 'MOMENTS', 'FOR', 'CHRIS', 'TO', 'GET', 'UP', 'HIS', 'COURAGE'] +4852-28319-0007-2077: hyp=['MISTER', 'WICKER', 'WAITED', 'PATIENTLY', 'BESIDE', 'HIM', 'FOR', 'A', 'FEW', 'MOMENTS', 'FOR', 'CHRIS', 'TO', 'GET', 'UP', 'HIS', 'COURAGE'] +4852-28319-0008-2078: ref=['THEN', 'AS', 'NOTHING', 'HAPPENED', 'WITH', 'A', 'VOICE', 'LIKE', 'A', 'WHIP', 'MISTER', 'WICKER', 'SAID', 'START', 'AT', 'ONCE'] +4852-28319-0008-2078: hyp=['THAT', 'IS', 'NOTHING', 'HAPPENED', 'WITH', 'A', 'VOICE', 'LIKE', 'A', 'WHIP', 'MISTER', 'WICKER', 'SAID', 'STARTED', 'ONCE'] +4852-28319-0009-2079: ref=['THE', 'SENSATION', 'SPREAD', 'FASTER', 'AND', 'FASTER'] +4852-28319-0009-2079: hyp=['THE', 'SENSATION', 'SPREAD', 'FASTER', 'AND', 'FASTER'] +4852-28319-0010-2080: ref=['HIS', 'HEAD', 'SWAM', 'AND', 'HE', 'FELT', 'FAINT', 'AND', 'A', 'LITTLE', 'SICK', 'BUT', 'HE', 'PERSISTED', 'THROUGH', 'THE', 'FINAL', 'WORDS'] +4852-28319-0010-2080: hyp=['HIS', 'HEAD', 'SWAM', 'AND', 'HE', 'FELT', 'FAINT', 'IN', 'A', 'LITTLE', 'SICK', 'BUT', 'HE', 'PERSISTED', 'THROUGH', 'THE', 'FINAL', 'WORDS'] +4852-28319-0011-2081: ref=['HE', 'THOUGHT', 'NOT', 'WITHOUT', 'A', 'FEELING', 'OF', 'PRIDE', 'AND', 'COMMENCED', 'EXPERIMENTING', 'WITH', 'HIS', 'TAIL', 'AND', 'FINS', 'WITH', 'SUCH', 'ENTHUSIASM', 'AND', 'DELIGHT', 'THAT', 'SOME', 'LITTLE', 'TIME', 'ELAPSED', 'BEFORE', 'MISTER', "WICKER'S", 'VOICE', 'BOOMED', 'CLOSE', 'BY'] +4852-28319-0011-2081: hyp=['HE', 'THOUGHT', 'NOW', 'WITHOUT', 'A', 'FEELING', 'OF', 'PRIDE', 'AND', 'COMMENCED', 'THE', 'EXPERIMENTING', 'WITH', 'HIS', 'TAIL', 'AND', 'FINS', 'WITH', 'SUCH', 'ENTHUSIASM', 'AND', 'DELIGHT', 'THAT', 'SOME', 'LITTLE', 'TIME', 'ELAPSED', 'BEFORE', 'MISTER', "WICKER'S", 'VOICE', 'BOOM', 'BUT', 'WAS', 'BY'] +4852-28319-0012-2082: ref=['SEVENTY', 'FOUR', 'BOOK', 'ONE', 'THE', 'RETURN'] +4852-28319-0012-2082: hyp=['SEVENTY', 'FOUR', 'BOOK', 'ONE', 'THE', 'RETURN'] +4852-28319-0013-2083: ref=['THE', "FIGURE'S", 'SHOES', 'CARVED', 'IN', 'SOME', 'EASTERN', 'STYLE', 'HAD', 'CURVED', 'UP', 'POINTING', 'TOES'] +4852-28319-0013-2083: hyp=['THE', "FIGURE'S", 'SHOES', 'CARVED', 'IN', 'SOME', 'EASTERN', 'STYLE', 'HAD', 'CURVED', 'UP', 'POINTING', 'TOES'] +4852-28319-0014-2084: ref=['THEN', 'ALL', 'AT', 'ONCE', 'THE', 'IDEA', 'CAME', 'TO', 'CHRIS'] +4852-28319-0014-2084: hyp=['THEN', 'ALL', 'AT', 'ONCE', 'THE', 'IDEA', 'CAME', 'TO', 'CHRIS'] +4852-28319-0015-2085: ref=['IF', 'HE', 'WAS', 'TO', 'BE', 'A', 'MAGICIAN', 'COULD', 'HE', 'MAKE', 'THIS', 'BOY', 'COME', 'TO', 'LIFE'] +4852-28319-0015-2085: hyp=['IF', 'HE', 'WAS', 'TO', 'BE', 'A', 'MAGICIAN', 'COULD', 'HE', 'MAKE', 'THIS', 'BOY', 'COME', 'TO', 'LIFE'] +4852-28319-0016-2086: ref=['HE', 'SQUATTED', 'ON', 'HIS', 'HAUNCHES', 'EXAMINING', 'THE', 'CARVED', 'WOODEN', 'FIGURE', 'ATTENTIVELY', 'AND', 'FELT', 'CONVINCED', 'THAT', 'ONCE', 'ALIVE', 'THE', 'BOY', 'WOULD', 'BE', 'AN', 'IDEAL', 'AND', 'HAPPY', 'COMPANION'] +4852-28319-0016-2086: hyp=['IT', 'SQUATTED', 'ON', 'HIS', 'HAUNCHES', 'EXAMINED', 'THE', 'CARVED', 'WOODEN', 'FIGURE', 'ATTENTIVELY', 'AND', 'FELT', 'CONVINCED', 'THAT', 'ONCE', 'ALIVE', 'THE', 'BOY', 'WOULD', 'BE', 'AN', 'IDEAL', 'AND', 'HAPPY', 'COMPANION'] +4852-28319-0017-2087: ref=['BUT', 'HOW', 'DID', 'ONE', 'CHANGE', 'INANIMATE', 'TO', 'ANIMATE'] +4852-28319-0017-2087: hyp=['BUT', 'HOW', 'DID', 'ONE', 'A', 'CHANGE', 'INANIMATE', 'TO', 'ENEMY'] +4852-28319-0018-2088: ref=['CHRIS', 'GOT', 'UP', 'AND', 'STOLE', 'BACK', 'TO', 'MISTER', "WICKER'S", 'DOOR'] +4852-28319-0018-2088: hyp=['CHRIS', 'GOT', 'UP', 'AND', 'STOLE', 'BACK', 'TO', 'MISTER', "WICKER'S", 'DOOR'] +4852-28319-0019-2089: ref=['HE', 'HEARD', 'THE', 'MAGICIAN', 'GOING', 'UP', 'THE', 'SPIRAL', 'STAIRCASE', 'TO', 'HIS', 'ROOM', 'ABOVE', 'AND', 'AFTER', 'CHANGING', 'HIMSELF', 'TO', 'A', 'MOUSE', 'TO', 'SLIP', 'UNDER', 'THE', 'DOOR', 'AND', 'SEE', 'THAT', 'THE', 'ROOM', 'WAS', 'REALLY', 'EMPTY', 'CHRIS', 'RESUMED', 'HIS', 'PROPER', 'SHAPE', 'AND', 'OPENED', 'THE', 'DOORS', 'OF', 'THE', 'CUPBOARD', 'AT', 'THE', 'FAR', 'END', 'OF', 'THE', 'ROOM'] +4852-28319-0019-2089: hyp=['HE', 'HEARD', 'THAT', 'MAGICIAN', 'GOING', 'UP', 'THE', 'SPIRAL', 'STAIRCASE', 'TO', 'HIS', 'ROOM', 'ABOVE', 'AND', 'AFTER', 'CHANGING', 'HIMSELF', 'TO', 'A', 'MOUSE', 'TO', 'SLIP', 'UNDER', 'THE', 'DOOR', 'AND', 'SEE', 'THAT', 'THE', 'ROOM', 'WAS', 'REELING', 'EMPTY', 'MISTER', "JAMES'S", 'PROPER', 'SHAPE', 'AND', 'OPENED', 'THE', 'DOORS', 'OF', 'THE', 'CUPBOARD', 'AT', 'THE', 'FAR', 'END', 'OF', 'THE', 'ROOM'] +4852-28319-0020-2090: ref=['THE', 'AFTERNOON', 'RAINY', 'BEFORE', 'INCREASED', 'IN', 'STORM'] +4852-28319-0020-2090: hyp=['THE', 'AFTERNOON', 'RAINING', 'BEFORE', 'INCREASED', 'IN', 'STORM'] +4852-28319-0021-2091: ref=['DUSK', 'CAME', 'TWO', 'HOURS', 'BEFORE', 'ITS', 'TIME', 'THUNDER', 'SNARLED', 'IN', 'THE', 'SKY'] +4852-28319-0021-2091: hyp=['THUS', 'GAINED', 'TWO', 'HOURS', 'BEFORE', 'ITS', 'TIME', 'THUNDER', 'SNARLS', 'IN', 'THE', 'SKY'] +4852-28319-0022-2092: ref=['CERTAIN', 'ELEMENTS', 'WERE', 'TO', 'BE', 'MIXED', 'AND', 'POURED', 'AT', 'THE', 'PROPER', 'TIME'] +4852-28319-0022-2092: hyp=['CERTAIN', 'ELEMENTS', 'WERE', 'TO', 'BE', 'MIXED', 'AND', 'POURED', 'AT', 'THE', 'PROPER', 'TIME'] +4852-28319-0023-2093: ref=['MISTER', 'WICKER', 'BEGAN', 'MOVING', 'ABOUT', 'UPSTAIRS', 'THE', 'FLOORBOARDS', 'CREAKED', 'AND', 'STILL', 'CHRIS', 'COULD', 'NOT', 'LEAVE', 'UNTIL', 'THE', 'POTION', 'FUMED', 'AND', 'GLOWED'] +4852-28319-0023-2093: hyp=['MISTER', 'WICKER', 'BEGAN', 'MOVING', 'ABOUT', 'UPSTAIRS', 'THE', 'FLOOR', 'BOARDS', 'CREAKED', 'CREEK', 'AND', 'STILL', 'CHRIS', 'COULD', 'NOT', 'LEAVE', 'UNTIL', 'THE', 'FOCIN', 'FUMED', 'AND', 'GLOWED'] +4852-28319-0024-2094: ref=['WITH', 'INFINITE', 'CAUTION', 'CHRIS', 'CLOSED', 'THE', 'DOOR', 'SILENTLY', 'BEHIND', 'HIM', 'AND', 'RUNNING', 'LIGHTLY', 'FORWARD', 'REACHED', 'THE', 'FIGURE', 'OF', 'THE', 'NEGRO', 'BOY'] +4852-28319-0024-2094: hyp=['WITH', 'INFINITE', 'CAUTION', 'CHRIS', 'CLOSED', 'THE', 'DOORS', 'SILENTLY', 'BEHIND', 'HIM', 'AND', 'RUNNING', 'LATE', 'BEFOREWARD', 'REACHED', 'THE', 'FIGURE', 'AT', 'THE', 'NEGRO', 'BOY'] +4852-28319-0025-2095: ref=['IT', 'WAS', 'AS', 'IF', 'THE', 'STIFFNESS', 'MELTED'] +4852-28319-0025-2095: hyp=['IT', 'WAS', 'AS', 'IF', 'THE', 'STIFFNESS', 'MELTED'] +4852-28319-0026-2096: ref=['UNDER', 'HIS', 'EYES', 'THE', 'WOODEN', 'FOLDS', 'OF', 'CLOTH', 'BECAME', 'RICH', 'SILK', 'EMBROIDERY', 'GLEAMED', 'IN', 'ITS', 'REALITY', 'UPON', 'THE', 'COAT', 'AND', 'OH', 'THE', 'FACE'] +4852-28319-0026-2096: hyp=['UNDER', 'HIS', 'EYES', 'WIDEN', 'FOLDS', 'OF', 'CLOTH', 'BECAME', 'RICH', 'SILK', 'EMBROIDERY', 'GLEAMED', 'IN', 'ITS', 'REALITY', 'UPON', 'THE', 'COAT', 'AND', 'OH', 'THE', 'FACE'] +4852-28319-0027-2097: ref=['THE', 'WOODEN', 'GRIN', 'LOOSENED', 'THE', 'LARGE', 'EYES', 'TURNED', 'THE', 'HAND', 'HOLDING', 'THE', 'HARD', 'BOUQUET', 'OF', 'CARVED', 'FLOWERS', 'MOVED', 'AND', 'LET', 'THE', 'BOUQUET', 'FALL'] +4852-28319-0027-2097: hyp=['THE', 'WOODEN', 'GRIN', 'LOOSENED', 'THE', 'LARGE', 'EYES', 'TURNED', 'THE', 'HAND', 'HOLDING', 'THE', 'HARD', 'BOUQUET', 'OF', 'CARVED', 'FLOWERS', 'MOVED', 'THE', 'BOUQUET', 'FALL'] +4852-28330-0000-2044: ref=['THEY', 'WENT', 'DOWN', 'TO', 'THEIR', 'QUARTERS', 'FIRST'] +4852-28330-0000-2044: hyp=['THEY', 'WENT', 'DOWN', 'TO', 'THEIR', 'QUARTERS', 'FIRST'] +4852-28330-0001-2045: ref=['GUESS', 'MISTER', 'FINNEY', 'WENT', 'TO', 'HIS', 'QUARTERS', 'I', "DON'T", 'REMEMBER', 'SEEING', 'HIM', 'CROSS', 'THE', 'DECK', 'OR', 'COME', 'OVER', 'THAT', 'WAY', 'AT', 'ALL'] +4852-28330-0001-2045: hyp=['GUESS', 'MISTER', 'FINNEY', 'WENT', 'TO', 'HIS', 'QUARTERS', 'I', "DON'T", 'REMEMBER', 'SEEING', 'HIM', 'CROSS', 'THE', 'DECK', 'OR', 'COME', 'OVER', 'THAT', 'WAY', 'AT', 'ALL'] +4852-28330-0002-2046: ref=['NEXT', 'NED', 'CILLEY', 'WAS', 'RELIEVED', 'AT', 'THE', 'HELM', 'BY', 'ELBERT', 'JONES', 'WHO', 'TOOK', 'OVER', 'NED', 'WENT', 'ON', 'DOWN'] +4852-28330-0002-2046: hyp=['NEXT', 'NED', 'CILLEY', 'WAS', 'RELIEVED', 'TO', 'THE', 'HOME', 'BY', 'HILBER', 'JONES', 'WHO', 'TOOK', 'OVER', 'NED', 'WENT', 'ON', 'DOWN'] +4852-28330-0003-2047: ref=['IT', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'IT', 'COULD', 'HAVE', 'BEEN', 'ONE', 'OF', 'SEVERAL', 'PEOPLE', 'AND', "I'LL", 'BE', 'SWITCHED', 'IF', 'I', 'KNOW', 'WHO', "I'LL", 'KEEP', 'MY', 'EYES', 'OPEN'] +4852-28330-0003-2047: hyp=['IT', 'LOOKS', 'TO', 'ME', 'AS', 'IF', 'IT', 'COULD', 'BE', 'BEEN', 'ONE', 'OF', 'SEVERAL', 'PEOPLE', 'AND', "I'LL", 'BE', 'SWITCHED', 'IF', 'I', 'KNOW', 'WHO', 'LOOK', 'GIVE', 'MY', 'EYES', 'UP', 'AND'] +4852-28330-0004-2048: ref=['THE', 'MIRABELLE', 'WAS', 'NEARING', 'TAHITI'] +4852-28330-0004-2048: hyp=['THE', 'MIRABELLE', 'WAS', 'NEARING', 'TEDI'] +4852-28330-0005-2049: ref=["WE'VE", 'WATER', 'AND', 'FRESH', 'STORES', 'TO', 'TAKE', 'ON', 'THERE'] +4852-28330-0005-2049: hyp=['WE', 'WATER', 'AND', 'FRESH', 'STALLS', 'TO', 'TAKE', 'ON', 'THERE'] +4852-28330-0006-2050: ref=['CHRIS', 'LOST', 'NO', 'TIME', 'AS', 'SOON', 'AS', 'HE', 'COULD', 'DO', 'IT', 'WITHOUT', 'BEING', 'NOTICED', 'IN', 'HURRYING', 'DOWN', 'TO', 'HIS', 'CABIN'] +4852-28330-0006-2050: hyp=['CHRIS', 'LOST', 'NO', 'TIME', 'AS', 'SOON', 'AS', 'HE', 'COULD', 'DO', 'IT', 'WITHOUT', 'BEING', 'NOTICED', 'AND', 'HURRYING', 'DOWN', 'TO', 'HIS', 'CABIN'] +4852-28330-0007-2051: ref=['CERTAINLY', 'MY', 'BOY', 'BOOMED', 'OUT', 'THE', 'CAPTAIN', 'HIS', 'BLUE', 'EYES', 'ABRUPTLY', 'KEEN', 'AND', 'PENETRATING'] +4852-28330-0007-2051: hyp=['CERTAINLY', 'MY', 'BOY', 'BOOMED', 'OUT', 'THE', 'CAPTAIN', 'AS', 'BLUE', 'EYES', 'ABRUPTLY', 'KEEN', 'AND', 'PENETRATING'] +4852-28330-0008-2052: ref=['MISTER', 'FINNEY', 'WILL', 'BE', 'SOME', 'TIME', 'ON', 'DECK', 'WE', 'CANNOT', 'BE', 'OVERHEARD', 'IN', 'HERE'] +4852-28330-0008-2052: hyp=['MISTER', 'FINNELL', 'WILL', 'BE', 'SOME', 'TIME', 'ON', 'DECK', 'WE', 'CANNOT', 'BE', 'OUR', 'OWN', 'HEARD', 'IN', 'HERE'] +4852-28330-0009-2053: ref=['HIS', 'FACE', 'FROZE', 'WITH', 'NERVOUSNESS', 'THAT', 'THIS', 'MIGHT', 'NOT', 'DO', 'AS', 'AN', 'ANSWER', 'AND', 'HE', 'STOOD', 'STIFF', 'AND', 'STILL', 'BEFORE', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0009-2053: hyp=['HIS', 'FACE', 'ROSE', 'WITH', 'NERVOUSNESS', 'THAT', 'THIS', 'MIGHT', 'DO', 'IT', 'DO', 'AS', 'AN', 'ANSWER', 'AND', 'HE', 'STOOD', 'STIFF', 'AND', 'STILL', 'BEFORE', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0010-2054: ref=['THE', 'CAPTAIN', 'SAT', 'FORWARD', 'IN', 'HIS', 'CHAIR', 'LOOKING', 'AT', 'HIM', 'FOR', 'A', 'LONG', 'MOMENT', 'CONSIDERING'] +4852-28330-0010-2054: hyp=['THE', 'CAPTAIN', 'SAT', 'FORWARD', 'IN', 'HIS', 'CHAIR', 'LOOKING', 'AT', 'HIM', 'FOR', 'A', 'LONG', 'MOMENT', 'CONSIDERING'] +4852-28330-0011-2055: ref=['THEN', 'HE', 'SAID', 'WELL', 'I', 'DO', 'NOT', 'CARE', 'FOR', 'IT', 'I', 'CANNOT', 'SAY', 'I', 'DO'] +4852-28330-0011-2055: hyp=['THEN', 'HE', 'SAID', 'WELL', 'I', 'DO', 'NOT', 'CARE', 'FOR', 'IT', 'I', 'CANNOT', 'SAY', 'THAT', 'DO'] +4852-28330-0012-2056: ref=['THIS', 'SHIP', 'IS', 'MORE', 'TO', 'ME', 'THAN', 'WIFE', 'OR', 'MOTHER', 'OR', 'FAMILY'] +4852-28330-0012-2056: hyp=['THE', 'SHIP', 'IS', 'MORE', 'TO', 'ME', 'THAN', 'MY', 'FOREMOTHER', 'OR', 'FAMILY'] +4852-28330-0013-2057: ref=['HE', 'PAUSED', 'FINGERING', 'HIS', 'LOWER', 'LIP', 'AND', 'LOOKING', 'SIDEWAYS', 'IN', 'A', 'REFLECTIVE', 'FASHION', 'AT', 'CHRIS', 'STANDING', 'BEFORE', 'HIM'] +4852-28330-0013-2057: hyp=['AND', 'PAUSED', 'FINGERING', 'HIS', 'LOWER', 'LIP', 'AND', 'LOOKING', 'SIDEWAYS', 'INTO', 'REFLECTIVE', 'FASHION', 'AT', 'CHRIS', 'STANDING', 'BEFORE', 'HIM'] +4852-28330-0014-2058: ref=['WE', 'SHALL', 'SAY', 'NO', 'MORE', 'BUT', 'I', 'TRUST', 'YOU', 'UNDERSTAND', 'THE', 'RESPONSIBILITY', 'YOU', 'HAVE'] +4852-28330-0014-2058: hyp=['WE', 'SHALL', 'SAY', 'NO', 'MORE', 'BUT', 'I', 'TRUST', 'YOU', 'UNDERSTAND', 'THE', 'RESPONSIBILITY', 'YOU', 'HAVE'] +4852-28330-0015-2059: ref=['THIS', 'SHIP', 'ITS', 'CARGO', 'AND', 'ITS', 'MEN', 'WILL', 'BE', 'IN', 'YOUR', 'HANDS'] +4852-28330-0015-2059: hyp=['THE', 'SHIP', 'ITS', 'CARGO', 'IN', 'ITS', 'MEN', 'WILL', 'BE', 'IN', 'YOUR', 'HANDS'] +4852-28330-0016-2060: ref=['YES', 'SIR', 'I', 'THINK', 'I', 'CAN', 'DO', 'IT', 'SAFELY', 'OR', 'I', 'SHOULD', 'NOT', 'TRY', 'SIR'] +4852-28330-0016-2060: hyp=['YES', 'SIR', 'I', 'THINK', 'I', 'CAN', 'DO', 'IT', 'SAFELY', 'OR', 'I', 'SHOULD', 'NOT', 'TRY', 'SIR'] +4852-28330-0017-2061: ref=['CAPTAIN', "BLIZZARD'S", 'ROUND', 'PINK', 'FACE', 'CREASED', 'IN', 'HIS', 'WINNING', 'SMILE'] +4852-28330-0017-2061: hyp=['CAPTAIN', "BLIZZARD'S", 'ROUND', 'PINK', 'FACED', 'CREASED', 'IN', 'ITS', 'WINNING', 'SMILE'] +4852-28330-0018-2062: ref=['HE', 'THEN', 'WENT', 'ON', 'TO', 'DESCRIBE', 'WHAT', 'ELSE', 'WAS', 'TO', 'FOLLOW', 'THE', 'COVERING', 'OF', 'THE', 'SHIP', 'WITH', 'LEAVES', 'TO', 'MAKE', 'IT', 'BLEND', 'WITH', 'ITS', 'SURROUNDINGS'] +4852-28330-0018-2062: hyp=['HE', 'THEN', 'WENT', 'ON', 'TO', 'DESCRIBE', 'WHAT', 'ELSE', 'WAS', 'TO', 'FOLLOW', 'THE', 'COVERING', 'OF', 'THE', 'SHIP', 'WITH', 'LEAVES', 'TO', 'MAKE', 'IT', 'BLEND', 'WITH', 'ITS', 'SURROUNDINGS'] +4852-28330-0019-2063: ref=['CAMOUFLAGE', 'WAS', 'NOT', 'A', 'WORD', 'THE', 'CAPTAIN', 'OR', 'ANYONE', 'ELSE', 'OF', 'HIS', 'TIME', 'YET', 'UNDERSTOOD'] +4852-28330-0019-2063: hyp=['THE', 'CAMERA', 'FLASH', 'WAS', 'NOT', 'A', 'WORD', 'THE', 'CAPTAIN', 'OR', 'ANYONE', 'ELSE', 'OF', 'HIS', 'TIME', 'HE', 'HAD', 'UNDERSTOOD'] +4852-28330-0020-2064: ref=['WHAT', 'CAN', 'BE', 'SAID', 'DURING', 'THAT', 'TIME', 'SIR', 'CHRIS', 'THOUGHT', 'TO', 'ASK'] +4852-28330-0020-2064: hyp=['WHAT', 'CAN', 'BE', 'SAID', 'DURING', 'THAT', 'TIME', 'SIR', 'CHRIS', 'THOUGHT', 'TO', 'ASK'] +4852-28330-0021-2065: ref=['I', 'AM', 'SOMEWHAT', 'SKILLED', 'IN', 'MEDICAMENTS', 'I', 'HAVE', 'TO', 'BE', 'AS', 'CAPTAIN', 'OF', 'A', 'SHIP', 'AND', 'THE', 'CREW', 'KNOW', 'IT'] +4852-28330-0021-2065: hyp=['I', 'AM', 'SOMEWHAT', 'SKILLED', 'AT', 'MEDICAMENTS', 'I', 'HAVE', 'TO', 'BE', 'AS', 'A', 'CAPTAIN', 'OF', 'SHIP', 'AND', 'THE', 'CREW', 'KNOW', 'IT'] +4852-28330-0022-2066: ref=['I', 'SHALL', 'SAY', 'THAT', 'YOU', 'ARE', 'IN', 'MY', 'OWN', 'CABIN', 'SO', 'THAT', 'I', 'CAN', 'CARE', 'FOR', 'YOU'] +4852-28330-0022-2066: hyp=['I', 'SHALL', 'SAY', 'THAT', 'YOU', 'ARE', 'IN', 'MY', 'OWN', 'CABIN', 'SO', 'THAT', 'I', 'CAN', 'CARE', 'FOR', 'YOU'] +4852-28330-0023-2067: ref=['NOT', 'SINCE', 'HE', 'HAD', 'LEFT', 'MISTER', 'WICKER', 'HAD', 'CHRIS', 'FELT', 'SUCH', 'CONFIDENCE', 'AS', 'HE', 'DID', 'IN', 'THE', 'WORDS', 'AND', 'ACTIONS', 'OF', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0023-2067: hyp=['NOT', 'SINCE', 'HE', 'HAD', 'LEFT', 'MISTER', 'WICKER', 'AND', 'CHRIS', 'FELT', 'SUCH', 'CONFIDENCE', 'AS', 'HE', 'DID', 'IN', 'THE', 'WORDS', 'AND', 'ACTIONS', 'OF', 'CAPTAIN', 'BLIZZARD'] +4852-28330-0024-2068: ref=['HE', 'KNEW', 'NOW', 'THAT', 'HIS', 'ABSENCE', 'FOR', 'AS', 'LONG', 'AS', 'HE', 'HAD', 'TO', 'BE', 'AWAY', 'WOULD', 'BE', 'COVERED', 'UP', 'AND', 'SATISFACTORILY', 'ACCOUNTED', 'FOR'] +4852-28330-0024-2068: hyp=['HE', 'KNEW', 'NOW', 'THAT', 'HIS', 'ABSENCE', 'FOR', 'AS', 'LONG', 'AS', 'HE', 'HAD', 'HAD', 'TO', 'BE', 'AWAY', 'WOULD', 'BE', 'COVERED', 'UP', 'AND', 'SATISFACTORILY', 'ACCOUNTED', 'FOR'] +4852-28330-0025-2069: ref=['THEIR', 'CONVERSATION', 'HAD', 'TAKEN', 'SOME', 'LITTLE', 'WHILE'] +4852-28330-0025-2069: hyp=['THEIR', 'CONVERSATION', 'HAD', 'TAKEN', 'SOME', 'OF', 'THE', 'WHILE'] +533-1066-0000-796: ref=['WHEN', 'CHURCHYARDS', 'YAWN'] +533-1066-0000-796: hyp=['WHEN', 'CHURCHYARDS', 'YAWN'] +533-1066-0001-797: ref=['I', 'KNEW', 'WELL', 'ENOUGH', 'THAT', 'HE', 'MIGHT', 'BE', 'CARRIED', 'THOUSANDS', 'OF', 'MILES', 'IN', 'THE', 'BOX', 'CAR', 'LOCKED', 'IN', 'PERHAPS', 'WITHOUT', 'WATER', 'OR', 'FOOD'] +533-1066-0001-797: hyp=['I', 'KNEW', 'WELL', 'ENOUGH', 'THAT', 'HE', 'MIGHT', 'BE', 'CARRIED', 'THOUSAND', 'OF', 'MILES', 'IN', 'THE', 'BOX', 'CAR', 'LOCKED', 'IN', 'PERHAPS', 'WITHOUT', 'WATER', 'OR', 'FULL'] +533-1066-0002-798: ref=['I', 'AM', 'SURE', 'I', 'KISSED', 'LIDDY', 'AND', 'I', 'HAVE', 'HAD', 'TERRIBLE', 'MOMENTS', 'SINCE', 'WHEN', 'I', 'SEEM', 'TO', 'REMEMBER', 'KISSING', 'MISTER', 'JAMIESON', 'TOO', 'IN', 'THE', 'EXCITEMENT'] +533-1066-0002-798: hyp=['I', 'AM', 'SURE', 'I', 'GUESS', 'LIVY', 'AND', "I'VE", 'HAD', 'SEVERAL', 'MOMENTS', 'SINCE', 'WHEN', 'I', 'SEEMED', 'TO', 'REMEMBER', 'KISSING', 'MISTER', 'JAMIESON', 'TOO', 'WITH', 'THE', 'EXCITEMENT'] +533-1066-0003-799: ref=['FORTUNATELY', 'WARNER', 'AND', 'THE', 'DETECTIVES', 'WERE', 'KEEPING', 'BACHELOR', 'HALL', 'IN', 'THE', 'LODGE'] +533-1066-0003-799: hyp=['FORTUNATELY', 'WARNER', 'AND', 'THE', 'DETECTS', 'WERE', 'KEEPING', 'BACHELOR', 'HALL', 'IN', 'LODGE'] +533-1066-0004-800: ref=['OUT', 'OF', 'DEFERENCE', 'TO', 'LIDDY', 'THEY', 'WASHED', 'THEIR', 'DISHES', 'ONCE', 'A', 'DAY', 'AND', 'THEY', 'CONCOCTED', 'QUEER', 'MESSES', 'ACCORDING', 'TO', 'THEIR', 'SEVERAL', 'ABILITIES'] +533-1066-0004-800: hyp=['OUT', 'OF', 'THEIR', 'FIRST', 'LIVY', 'THEY', 'WASHED', 'HER', 'DISHES', 'ONCE', 'TO', 'DAY', 'AND', 'THEY', 'CONCLUDED', 'QUEER', 'MASSES', 'ACCORDING', 'TO', 'THEIR', 'SEVERAL', 'ABILITIES'] +533-1066-0005-801: ref=['MISS', 'INNES', 'HE', 'SAID', 'STOPPING', 'ME', 'AS', 'I', 'WAS', 'ABOUT', 'TO', 'GO', 'TO', 'MY', 'ROOM', 'UP', 'STAIRS', 'HOW', 'ARE', 'YOUR', 'NERVES', 'TONIGHT'] +533-1066-0005-801: hyp=['MISS', 'EANS', 'HE', 'SAID', 'STOPPING', 'ME', 'AS', 'I', 'WAS', 'ABOUT', 'TO', 'GO', 'TO', 'MY', 'ROOM', 'UP', 'STAIRS', 'HOW', 'ARE', 'YOUR', 'NERVES', 'TO', 'NIGHT'] +533-1066-0006-802: ref=['I', 'HAVE', 'NONE', 'I', 'SAID', 'HAPPILY'] +533-1066-0006-802: hyp=['I', 'HAVE', 'NONE', 'I', 'SAID', 'HAPPILY'] +533-1066-0007-803: ref=['I', 'MEAN', 'HE', 'PERSISTED', 'DO', 'YOU', 'FEEL', 'AS', 'THOUGH', 'YOU', 'COULD', 'GO', 'THROUGH', 'WITH', 'SOMETHING', 'RATHER', 'UNUSUAL'] +533-1066-0007-803: hyp=['I', 'MEAN', 'HE', 'PERSISTED', 'DO', 'YOU', 'FEEL', 'AS', 'THOUGH', 'YOU', 'COULD', 'GO', 'THROUGH', 'WITH', 'SOMETHING', 'RATHER', 'UNUSUAL'] +533-1066-0008-804: ref=['THE', 'MOST', 'UNUSUAL', 'THING', 'I', 'CAN', 'THINK', 'OF', 'WOULD', 'BE', 'A', 'PEACEFUL', 'NIGHT'] +533-1066-0008-804: hyp=['THE', 'MOST', 'UNUSUAL', 'THING', 'I', 'CAN', 'THINK', 'OF', 'WOULD', 'BE', 'A', 'PEACEFUL', 'NIGHT'] +533-1066-0009-805: ref=['SOMETHING', 'IS', 'GOING', 'TO', 'OCCUR', 'HE', 'SAID'] +533-1066-0009-805: hyp=['SOMETHING', 'IS', 'GOING', 'TO', 'OCCUR', 'HE', 'SAID'] +533-1066-0010-806: ref=['PUT', 'ON', 'HEAVY', 'SHOES', 'AND', 'SOME', 'OLD', 'DARK', 'CLOTHES', 'AND', 'MAKE', 'UP', 'YOUR', 'MIND', 'NOT', 'TO', 'BE', 'SURPRISED', 'AT', 'ANYTHING'] +533-1066-0010-806: hyp=['PUT', 'ON', 'HEAVY', 'SHOES', 'AND', 'SOME', 'ALL', 'DARK', 'CLOTHES', 'AND', 'MAKE', 'UP', 'YOUR', 'MIND', 'NOT', 'TO', 'BE', 'SURPRISED', 'AT', 'ANYTHING'] +533-1066-0011-807: ref=['LIDDY', 'WAS', 'SLEEPING', 'THE', 'SLEEP', 'OF', 'THE', 'JUST', 'WHEN', 'I', 'WENT', 'UP', 'STAIRS', 'AND', 'I', 'HUNTED', 'OUT', 'MY', 'THINGS', 'CAUTIOUSLY'] +533-1066-0011-807: hyp=['LADY', 'WAS', 'SLEEPING', 'SLEEP', 'OF', 'THE', 'JUST', 'WHEN', 'I', 'WENT', 'UPSTAIRS', 'AND', 'I', 'HUNTED', 'OUT', 'MY', 'THINGS', 'CAUTIOUSLY'] +533-1066-0012-808: ref=['THEY', 'WERE', 'TALKING', 'CONFIDENTIALLY', 'TOGETHER', 'BUT', 'WHEN', 'I', 'CAME', 'DOWN', 'THEY', 'CEASED'] +533-1066-0012-808: hyp=['DO', 'YOU', 'TALKING', 'TO', 'FILIALLY', 'TOGETHER', 'BUT', 'WHEN', 'I', 'CAME', 'DOWN', 'THEY', 'CEASED'] +533-1066-0013-809: ref=['THERE', 'WERE', 'A', 'FEW', 'PREPARATIONS', 'TO', 'BE', 'MADE', 'THE', 'LOCKS', 'TO', 'BE', 'GONE', 'OVER', 'WINTERS', 'TO', 'BE', 'INSTRUCTED', 'AS', 'TO', 'RENEWED', 'VIGILANCE', 'AND', 'THEN', 'AFTER', 'EXTINGUISHING', 'THE', 'HALL', 'LIGHT', 'WE', 'CREPT', 'IN', 'THE', 'DARKNESS', 'THROUGH', 'THE', 'FRONT', 'DOOR', 'AND', 'INTO', 'THE', 'NIGHT'] +533-1066-0013-809: hyp=['THEY', 'WERE', 'A', 'FEW', 'PREPARATIONS', 'TO', 'BE', 'MADE', 'LOGS', 'TO', 'BE', 'GONE', 'OVER', 'WINTERSPIN', 'INSTRUCTIVE', 'AS', 'TO', 'RENEWED', 'VISIONS', 'AND', 'THEN', 'AFTER', 'EXTINGUISHING', 'THE', 'WHOLE', 'LIGHT', 'WE', 'CREPT', 'IN', 'THE', 'DARKNESS', 'THROUGH', 'THE', 'FRONT', 'DOOR', 'AND', 'INTO', 'THE', 'NIGHT'] +533-1066-0014-810: ref=['I', 'ASKED', 'NO', 'QUESTIONS'] +533-1066-0014-810: hyp=['I', 'ASKED', 'NO', 'QUESTIONS'] +533-1066-0015-811: ref=['ONCE', 'ONLY', 'SOMEBODY', 'SPOKE', 'AND', 'THEN', 'IT', 'WAS', 'AN', 'EMPHATIC', 'BIT', 'OF', 'PROFANITY', 'FROM', 'DOCTOR', 'STEWART', 'WHEN', 'HE', 'RAN', 'INTO', 'A', 'WIRE', 'FENCE'] +533-1066-0015-811: hyp=['WAS', 'ONLY', 'SOMEBODY', 'SPOKE', 'AND', 'THEN', 'IT', 'WAS', 'AN', 'EMPHATIC', 'WID', 'OF', 'PROFANITY', 'FROM', 'DOCTOR', 'STEWART', 'WHEN', 'HE', 'RAN', 'INTO', 'A', 'WIRE', 'FENCE'] +533-1066-0016-812: ref=['I', 'HARDLY', 'KNOW', 'WHAT', 'I', 'EXPECTED'] +533-1066-0016-812: hyp=['I', 'ARE', 'TO', 'KNOW', 'WHAT', 'I', 'EXPECTED'] +533-1066-0017-813: ref=['THE', 'DOCTOR', 'WAS', 'PUFFING', 'SOMEWHAT', 'WHEN', 'WE', 'FINALLY', 'CAME', 'TO', 'A', 'HALT'] +533-1066-0017-813: hyp=['THE', 'DOCTOR', 'WAS', 'PUFFING', 'SOMEWHAT', 'WHEN', 'WE', 'FINALLY', 'CAME', 'TO', 'A', 'HALT'] +533-1066-0018-814: ref=['I', 'CONFESS', 'THAT', 'JUST', 'AT', 'THAT', 'MINUTE', 'EVEN', 'SUNNYSIDE', 'SEEMED', 'A', 'CHEERFUL', 'SPOT'] +533-1066-0018-814: hyp=['I', 'CONFESS', 'THAT', 'JUST', 'AT', 'THAT', 'MINUTE', 'EVEN', 'SUNNICIDE', 'SEEMED', 'A', 'CHEERFUL', 'SPOT'] +533-1066-0019-815: ref=['IN', 'SPITE', 'OF', 'MYSELF', 'I', 'DREW', 'MY', 'BREATH', 'IN', 'SHARPLY'] +533-1066-0019-815: hyp=['IN', 'SPITE', 'OF', 'MYSELF', 'I', 'DREW', 'MY', 'BREATH', 'IN', 'SHARPLY'] +533-1066-0020-816: ref=['IT', 'WAS', 'ALEX', 'ARMED', 'WITH', 'TWO', 'LONG', 'HANDLED', 'SPADES'] +533-1066-0020-816: hyp=['IT', 'WAS', 'ALEX', 'ARMED', 'WITH', 'TWO', 'LONG', 'HANDLED', 'SPADES'] +533-1066-0021-817: ref=['THE', 'DOCTOR', 'KEPT', 'A', 'KEEN', 'LOOKOUT', 'BUT', 'NO', 'ONE', 'APPEARED'] +533-1066-0021-817: hyp=['DOCTOR', 'KEPT', 'A', 'KIN', 'LOOK', 'OUT', 'BUT', 'NO', 'ONE', 'APPEARED'] +533-1066-0022-818: ref=["THERE'S", 'ONE', 'THING', 'SURE', "I'LL", 'NOT', 'BE', 'SUSPECTED', 'OF', 'COMPLICITY'] +533-1066-0022-818: hyp=["THERE'S", 'ONE', 'THING', 'SURE', "I'LL", 'NOT', 'BE', 'SUSPECTED', 'OF', 'COMPLICITY'] +533-1066-0023-819: ref=['A', 'DOCTOR', 'IS', 'GENERALLY', 'SUPPOSED', 'TO', 'BE', 'HANDIER', 'AT', 'BURYING', 'FOLKS', 'THAN', 'AT', 'DIGGING', 'THEM', 'UP'] +533-1066-0023-819: hyp=['A', 'DOCTOR', 'IS', 'GENERALLY', 'SUPPOSED', 'TO', 'BE', 'A', 'HANDIER', 'AT', 'BEARING', 'FOLKS', 'THAN', 'A', 'TIGGING', 'THEM', 'UP'] +533-1066-0024-820: ref=['I', 'HELD', 'ON', 'TO', 'HIM', 'FRANTICALLY', 'AND', 'SOMEHOW', 'I', 'GOT', 'THERE', 'AND', 'LOOKED', 'DOWN'] +533-1066-0024-820: hyp=['I', 'HELD', 'ON', 'TO', 'HIM', 'FRANTICALLY', 'AND', 'SOMEHOW', 'I', 'GOT', 'THERE', 'AND', 'LOOKED', 'DOWN'] +533-131556-0000-821: ref=['BUT', 'HOW', 'AM', 'I', 'TO', 'GET', 'OVER', 'THE', 'TEN', 'OR', 'TWELVE', 'DAYS', 'THAT', 'MUST', 'YET', 'ELAPSE', 'BEFORE', 'THEY', 'GO'] +533-131556-0000-821: hyp=['BUT', 'HOW', 'AM', 'I', 'TO', 'HER', 'OVER', 'THE', 'TEN', 'OR', 'TWELVE', 'DAYS', 'THAT', 'MUST', 'YET', 'ELAPSE', 'BEFORE', 'THEY', 'GO'] +533-131556-0001-822: ref=['FOR', 'NONE', 'COULD', 'INJURE', 'ME', 'AS', 'HE', 'HAS', 'DONE', 'OH'] +533-131556-0001-822: hyp=['FOR', 'NONE', 'COULD', 'ENDURE', 'ME', 'AS', 'HE', 'HAS', 'DONE', 'OH'] +533-131556-0002-823: ref=['THE', 'WORD', 'STARES', 'ME', 'IN', 'THE', 'FACE', 'LIKE', 'A', 'GUILTY', 'CONFESSION', 'BUT', 'IT', 'IS', 'TRUE', 'I', 'HATE', 'HIM', 'I', 'HATE', 'HIM'] +533-131556-0002-823: hyp=['THE', 'WORDS', 'TEARS', 'ME', 'IN', 'THE', 'FACE', 'LIKE', 'A', 'GUILTY', 'CONFESSION', 'BUT', 'IT', 'IS', 'TRUE', 'I', 'HATE', 'HIM', 'I', 'HATE', 'HIM'] +533-131556-0003-824: ref=['I', 'SOMETIMES', 'THINK', 'I', 'OUGHT', 'TO', 'GIVE', 'HIM', 'CREDIT', 'FOR', 'THE', 'GOOD', 'FEELING', 'HE', 'SIMULATES', 'SO', 'WELL', 'AND', 'THEN', 'AGAIN', 'I', 'THINK', 'IT', 'IS', 'MY', 'DUTY', 'TO', 'SUSPECT', 'HIM', 'UNDER', 'THE', 'PECULIAR', 'CIRCUMSTANCES', 'IN', 'WHICH', 'I', 'AM', 'PLACED'] +533-131556-0003-824: hyp=['I', 'SOMETIMES', 'THINK', 'I', 'OUGHT', 'TO', 'GIVE', 'HIM', 'CREDIT', 'FOR', 'THE', 'GOOD', 'FEELING', 'SIMILATE', 'SO', 'WELL', 'AND', 'THEN', 'AGAIN', 'I', 'THINK', 'IT', 'IS', 'MY', 'DUTY', 'TO', 'SUSPECT', 'HIM', 'UNDER', 'THE', 'PECULIAR', 'CIRCUMSTANCES', 'IN', 'WHICH', 'I', 'AM', 'PLACED'] +533-131556-0004-825: ref=['I', 'HAVE', 'DONE', 'WELL', 'TO', 'RECORD', 'THEM', 'SO', 'MINUTELY'] +533-131556-0004-825: hyp=['I', 'HAVE', 'DONE', 'WELL', 'TO', 'RECORD', 'HIM', 'SOMINUTELY'] +533-131556-0005-826: ref=['THEY', 'HAD', 'BETAKEN', 'THEMSELVES', 'TO', 'THEIR', 'WORK', 'I', 'LESS', 'TO', 'DIVERT', 'MY', 'MIND', 'THAN', 'TO', 'DEPRECATE', 'CONVERSATION', 'HAD', 'PROVIDED', 'MYSELF', 'WITH', 'A', 'BOOK'] +533-131556-0005-826: hyp=['THEY', 'HAVE', 'HAD', 'TAKEN', 'THEMSELVES', 'TO', 'THEIR', 'WORK', 'I', 'LESS', 'DIVERT', 'MY', 'MIND', 'THAN', 'TO', 'DEPRECATE', 'CONVERSATION', 'I', 'PROVIDED', 'MYSELF', 'FIT', 'A', 'BOOK'] +533-131556-0006-827: ref=['I', 'AM', 'TOO', 'WELL', 'ACQUAINTED', 'WITH', 'YOUR', 'CHARACTER', 'AND', 'CONDUCT', 'TO', 'FEEL', 'ANY', 'REAL', 'FRIENDSHIP', 'FOR', 'YOU', 'AND', 'AS', 'I', 'AM', 'WITHOUT', 'YOUR', 'TALENT', 'FOR', 'DISSIMULATION', 'I', 'CANNOT', 'ASSUME', 'THE', 'APPEARANCE', 'OF', 'IT'] +533-131556-0006-827: hyp=['I', 'AM', 'TOO', 'WELL', 'ACQUAINTED', 'WITH', 'THEIR', 'CHARACTER', 'AND', 'CONDUCT', 'TO', 'FEEL', 'ANY', 'REAL', 'FRIENDSHIP', 'FOR', 'YOU', 'AND', 'AS', 'I', 'AM', 'WITHOUT', 'YOUR', 'TALENT', 'FOR', 'DISSIMULATION', 'I', 'CANNOT', 'ASSUME', 'THE', 'APPEARANCE', 'OF', 'IT'] +533-131556-0007-828: ref=['UPON', 'PERUSING', 'THIS', 'SHE', 'TURNED', 'SCARLET', 'AND', 'BIT', 'HER', 'LIP'] +533-131556-0007-828: hyp=['UP', 'AND', 'PERUSING', 'THIS', 'SHE', 'TURNED', 'SCARLET', 'AND', 'BIT', 'HER', 'LIP'] +533-131556-0008-829: ref=['YOU', 'MAY', 'GO', 'MILICENT', 'AND', "SHE'LL", 'FOLLOW', 'IN', 'A', 'WHILE', 'MILICENT', 'WENT'] +533-131556-0008-829: hyp=['YOU', 'MAY', 'GO', 'MILLICENT', 'AND', "SHE'LL", 'FOLLOWING', 'AWHILE', 'MELLICENT', 'WENT'] +533-131556-0009-830: ref=['WILL', 'YOU', 'OBLIGE', 'ME', 'HELEN', 'CONTINUED', 'SHE'] +533-131556-0009-830: hyp=['OLY', 'OBLIGED', 'ME', 'ALAN', 'CONTINUED', 'SHE'] +533-131556-0010-831: ref=['AH', 'YOU', 'ARE', 'SUSPICIOUS'] +533-131556-0010-831: hyp=['HA', 'YOU', 'ARE', 'SUSPICIOUS'] +533-131556-0011-832: ref=['IF', 'I', 'WERE', 'SUSPICIOUS', 'I', 'REPLIED', 'I', 'SHOULD', 'HAVE', 'DISCOVERED', 'YOUR', 'INFAMY', 'LONG', 'BEFORE'] +533-131556-0011-832: hyp=['IF', 'I', 'WERE', 'SUSPICIOUS', 'I', 'REPLIED', 'I', 'SHOULD', 'HAVE', 'DISCOVERED', 'YOUR', 'INFAMYLON', 'BEFORE'] +533-131556-0012-833: ref=['I', 'ENJOY', 'A', 'MOONLIGHT', 'RAMBLE', 'AS', 'WELL', 'AS', 'YOU', 'I', 'ANSWERED', 'STEADILY', 'FIXING', 'MY', 'EYES', 'UPON', 'HER', 'AND', 'THE', 'SHRUBBERY', 'HAPPENS', 'TO', 'BE', 'ONE', 'OF', 'MY', 'FAVOURITE', 'RESORTS'] +533-131556-0012-833: hyp=['I', 'ENJOY', 'A', 'MONTH', 'RAMBLE', 'AS', 'WELL', 'AS', 'YOU', 'I', 'ANSWERED', 'STEADILY', 'FIXING', 'MY', 'EYES', 'UP', 'AND', 'EARTH', 'AND', 'SHRABBERY', 'HAPPENS', 'TO', 'BE', 'ONE', 'OF', 'MY', 'FAVORITE', 'RESORTS'] +533-131556-0013-834: ref=['SHE', 'COLOURED', 'AGAIN', 'EXCESSIVELY', 'AND', 'REMAINED', 'SILENT', 'PRESSING', 'HER', 'FINGER', 'AGAINST', 'HER', 'TEETH', 'AND', 'GAZING', 'INTO', 'THE', 'FIRE'] +533-131556-0013-834: hyp=['SHE', 'COLOURED', 'BEGAN', 'EXCESSIVELY', 'AND', 'REMAINED', 'SILENT', 'RAISING', 'HER', 'FINGER', 'AGAINST', 'HER', 'CHEEKS', 'AND', 'GAZING', 'INTO', 'THE', 'FIRE'] +533-131556-0014-835: ref=['I', 'WATCHED', 'HER', 'A', 'FEW', 'MOMENTS', 'WITH', 'A', 'FEELING', 'OF', 'MALEVOLENT', 'GRATIFICATION', 'THEN', 'MOVING', 'TOWARDS', 'THE', 'DOOR', 'I', 'CALMLY', 'ASKED', 'IF', 'SHE', 'HAD', 'ANYTHING', 'MORE', 'TO', 'SAY'] +533-131556-0014-835: hyp=['I', 'WAS', 'FOR', 'A', 'FEW', 'MOMENTS', 'TO', 'THE', 'FEELING', 'OF', 'MALEVOLENT', 'GRATIFICATION', 'THEN', 'MOVING', 'TOWARDS', 'THE', 'DOOR', 'I', 'CALMLY', 'ASKED', 'IF', 'SHE', 'HAD', 'ANYTHING', 'MORE', 'TO', 'SAY'] +533-131556-0015-836: ref=['YES', 'YES'] +533-131556-0015-836: hyp=['YES', 'YES'] +533-131556-0016-837: ref=['SUPPOSE', 'I', 'DO'] +533-131556-0016-837: hyp=['SUPPOSE', 'I', 'DO'] +533-131556-0017-838: ref=['SHE', 'PAUSED', 'IN', 'EVIDENT', 'DISCONCERTION', 'AND', 'PERPLEXITY', 'MINGLED', 'WITH', 'ANGER', 'SHE', 'DARED', 'NOT', 'SHOW'] +533-131556-0017-838: hyp=['SHE', 'PAUSED', 'IN', 'EVIDENT', 'DISCONCERTION', 'AND', 'PERPLEXITY', 'MINGLED', 'WITH', 'ANGER', 'SHE', 'DARED', 'NOT', 'SHOW'] +533-131556-0018-839: ref=['I', 'CANNOT', 'RENOUNCE', 'WHAT', 'IS', 'DEARER', 'THAN', 'LIFE', 'SHE', 'MUTTERED', 'IN', 'A', 'LOW', 'HURRIED', 'TONE'] +533-131556-0018-839: hyp=['I', 'CANNOT', 'RENOUNCE', 'WHAT', 'IS', 'DEARER', 'THAN', 'LIFE', 'SHE', 'MUTTERED', 'IN', 'A', 'LOW', 'HURRIED', 'TONE'] +533-131556-0019-840: ref=['IF', 'YOU', 'ARE', 'GENEROUS', 'HERE', 'IS', 'A', 'FITTING', 'OPPORTUNITY', 'FOR', 'THE', 'EXERCISE', 'OF', 'YOUR', 'MAGNANIMITY', 'IF', 'YOU', 'ARE', 'PROUD', 'HERE', 'AM', 'I', 'YOUR', 'RIVAL', 'READY', 'TO', 'ACKNOWLEDGE', 'MYSELF', 'YOUR', 'DEBTOR', 'FOR', 'AN', 'ACT', 'OF', 'THE', 'MOST', 'NOBLE', 'FORBEARANCE'] +533-131556-0019-840: hyp=['IF', 'YOU', 'ARE', 'GENERALS', 'HERE', 'IS', 'A', 'FEELING', 'OPPORTUNITY', 'FOR', 'THE', 'EXERCISE', 'OF', 'YOUR', 'MAGNANIMITY', 'IF', 'YOU', 'ARE', 'PROUD', 'HEAR', 'AM', 'I', 'YOUR', 'RIVAL', 'RETIC', 'TO', 'ANNOUNCE', 'MYSELF', 'YOUR', 'DAUGHTER', 'FOR', 'ACT', 'OF', 'MOST', 'NOBLE', 'FORBEARANCE'] +533-131556-0020-841: ref=['I', 'SHALL', 'NOT', 'TELL', 'HIM'] +533-131556-0020-841: hyp=['I', 'SHALL', 'NOT', 'TELL', 'HIM'] +533-131556-0021-842: ref=['GIVE', 'ME', 'NO', 'THANKS', 'IT', 'IS', 'NOT', 'FOR', 'YOUR', 'SAKE', 'THAT', 'I', 'REFRAIN'] +533-131556-0021-842: hyp=['GIVE', 'ME', 'NO', 'THANKS', 'IT', 'IS', 'NOT', 'FOR', 'YOUR', 'SAKE', 'THAT', 'I', 'REFRAIN'] +533-131556-0022-843: ref=['AND', 'MILICENT', 'WILL', 'YOU', 'TELL', 'HER'] +533-131556-0022-843: hyp=['AND', 'MILLISON', 'WILL', 'IT', 'TELL', 'HER'] +533-131556-0023-844: ref=['I', 'WOULD', 'NOT', 'FOR', 'MUCH', 'THAT', 'SHE', 'SHOULD', 'KNOW', 'THE', 'INFAMY', 'AND', 'DISGRACE', 'OF', 'HER', 'RELATION'] +533-131556-0023-844: hyp=['I', 'WILL', 'NOT', 'FOR', 'MUCH', 'THAT', 'YOU', 'SHOULD', 'NOT', 'EVEN', 'IN', 'DISGRACE', 'OF', 'HER', 'RELATION'] +533-131556-0024-845: ref=['YOU', 'USE', 'HARD', 'WORDS', 'MISSUS', 'HUNTINGDON', 'BUT', 'I', 'CAN', 'PARDON', 'YOU'] +533-131556-0024-845: hyp=['YOU', 'USE', 'OUR', 'WORDS', 'MISSUS', 'HUNTINGDON', 'BUT', 'I', 'CAN', 'PARDON', 'YOU'] +533-131556-0025-846: ref=['HOW', 'DARE', 'YOU', 'MENTION', 'HIS', 'NAME', 'TO', 'ME'] +533-131556-0025-846: hyp=['HOW', 'DARE', 'YOU', 'MENTION', 'HIS', 'NAME', 'TO', 'ME'] +533-131562-0000-847: ref=['IT', 'SEEMS', 'VERY', 'INTERESTING', 'LOVE', 'SAID', 'HE', 'LIFTING', 'HIS', 'HEAD', 'AND', 'TURNING', 'TO', 'WHERE', 'I', 'STOOD', 'WRINGING', 'MY', 'HANDS', 'IN', 'SILENT', 'RAGE', 'AND', 'ANGUISH', 'BUT', "IT'S", 'RATHER', 'LONG', "I'LL", 'LOOK', 'AT', 'IT', 'SOME', 'OTHER', 'TIME', 'AND', 'MEANWHILE', "I'LL", 'TROUBLE', 'YOU', 'FOR', 'YOUR', 'KEYS', 'MY', 'DEAR', 'WHAT', 'KEYS'] +533-131562-0000-847: hyp=['IT', 'SEEMS', 'VERY', 'INTERESTING', 'LOVE', 'SAID', 'HE', 'LIFTING', 'HIS', 'HEAD', 'AND', 'SHIRTING', 'TO', 'HER', 'EYES', 'TOO', 'WRINGING', 'MY', 'HAND', 'IN', 'SILENT', 'RATES', 'AND', 'ANGUISH', 'BUT', "IT'S", 'RATHER', 'LONG', 'I', 'LOOK', 'AT', 'IT', 'SOME', 'OTHER', 'TIME', 'AND', 'MEANWHILE', "I'LL", 'TROUBLE', 'YOU', 'FOR', 'YOUR', 'CASE', 'MY', 'DEAR', 'WHAT', 'CASE'] +533-131562-0001-848: ref=['THE', 'KEYS', 'OF', 'YOUR', 'CABINET', 'DESK', 'DRAWERS', 'AND', 'WHATEVER', 'ELSE', 'YOU', 'POSSESS', 'SAID', 'HE', 'RISING', 'AND', 'HOLDING', 'OUT', 'HIS', 'HAND'] +533-131562-0001-848: hyp=['IT', 'A', 'KISS', 'OF', 'YOUR', 'CABINET', 'DESK', 'DRAWER', 'AND', 'WHATEVER', 'ELSE', 'YOU', 'POSSESS', 'SAID', 'HE', 'RISING', 'AND', 'HOLDING', 'OUT', 'HIS', 'HAND'] +533-131562-0002-849: ref=['THE', 'KEY', 'OF', 'MY', 'DESK', 'IN', 'FACT', 'WAS', 'AT', 'THAT', 'MOMENT', 'IN', 'THE', 'LOCK', 'AND', 'THE', 'OTHERS', 'WERE', 'ATTACHED', 'TO', 'IT'] +533-131562-0002-849: hyp=['THE', 'KEY', 'OF', 'MY', 'VESK', 'IN', 'FACT', 'WAS', 'AT', 'THAT', 'MOMENT', 'IN', 'LOVE', 'AND', 'THE', 'OTHERS', 'WERE', 'ATTACHED', 'TO', 'IT'] +533-131562-0003-850: ref=['NOW', 'THEN', 'SNEERED', 'HE', 'WE', 'MUST', 'HAVE', 'A', 'CONFISCATION', 'OF', 'PROPERTY'] +533-131562-0003-850: hyp=['NOW', 'THEN', 'SNEERED', 'HE', 'WE', 'MUST', 'HAVE', 'A', 'CONFISCATION', 'OF', 'PROPERTY'] +533-131562-0004-851: ref=['AND', 'PUTTING', 'THE', 'KEYS', 'INTO', 'HIS', 'POCKET', 'HE', 'WALKED', 'INTO', 'THE', 'LIBRARY'] +533-131562-0004-851: hyp=['AND', 'PUT', 'IN', 'THE', 'KEYS', 'INTO', 'HIS', 'POCKET', 'HE', 'WALKED', 'INTO', 'THE', 'LIBRARY'] +533-131562-0005-852: ref=['THAT', 'AND', 'ALL', 'REPLIED', 'THE', 'MASTER', 'AND', 'THE', 'THINGS', 'WERE', 'CLEARED', 'AWAY'] +533-131562-0005-852: hyp=['THAT', 'AND', 'ALL', 'REPLIED', 'THE', 'MERCER', 'AND', 'THE', 'THINGS', 'WERE', 'CLEARED', 'AWAY'] +533-131562-0006-853: ref=['MISTER', 'HUNTINGDON', 'THEN', 'WENT', 'UP', 'STAIRS'] +533-131562-0006-853: hyp=['MISTER', 'HUNTINGDON', 'THEN', 'WENT', 'UPSTAIRS'] +533-131562-0007-854: ref=['MUTTERED', 'HE', 'STARTING', 'BACK', "SHE'S", 'THE', 'VERY', 'DEVIL', 'FOR', 'SPITE'] +533-131562-0007-854: hyp=['MUTTERED', 'HE', 'STARTING', 'BACK', "SHE'S", 'VERY', 'DEVIL', 'FOR', 'SPITE'] +533-131562-0008-855: ref=['I', "DIDN'T", 'SAY', "I'D", 'BROKEN', 'IT', 'DID', 'I', 'RETURNED', 'HE'] +533-131562-0008-855: hyp=['I', 'THEN', 'SAY', "I'VE", 'BROKEN', 'IT', 'DID', 'I', 'RETURNED', 'HE'] +533-131562-0009-856: ref=['I', 'SHALL', 'PUT', 'YOU', 'UPON', 'A', 'SMALL', 'MONTHLY', 'ALLOWANCE', 'IN', 'FUTURE', 'FOR', 'YOUR', 'OWN', 'PRIVATE', 'EXPENSES', 'AND', 'YOU', "NEEDN'T", 'TROUBLE', 'YOURSELF', 'ANY', 'MORE', 'ABOUT', 'MY', 'CONCERNS', 'I', 'SHALL', 'LOOK', 'OUT', 'FOR', 'A', 'STEWARD', 'MY', 'DEAR', 'I', "WON'T", 'EXPOSE', 'YOU', 'TO', 'THE', 'TEMPTATION'] +533-131562-0009-856: hyp=['I', 'SHALL', 'PUT', 'YOU', 'UP', 'IN', 'A', 'SMALL', 'MOUTHLY', 'ALLOW', 'US', 'IN', 'FUTURE', 'FOR', 'YOUR', 'OWN', 'PRIVATE', 'EXPENSES', 'AND', 'YOU', "NEEDN'T", 'TROUBLE', 'YOURSELF', 'ANY', 'MORE', 'ABOUT', 'MY', 'CONCERNS', 'I', 'SHALL', 'LOOK', 'OUT', 'FOR', 'A', 'STEWARD', 'MY', 'DEAR', 'I', "WON'T", 'EXPOSE', 'YOU', 'TO', 'THE', 'TEMPTATION'] +533-131562-0010-857: ref=['AND', 'AS', 'FOR', 'THE', 'HOUSEHOLD', 'MATTERS', 'MISSUS', 'GREAVES', 'MUST', 'BE', 'VERY', 'PARTICULAR', 'IN', 'KEEPING', 'HER', 'ACCOUNTS', 'WE', 'MUST', 'GO', 'UPON', 'AN', 'ENTIRELY', 'NEW', 'PLAN'] +533-131562-0010-857: hyp=['AND', 'AS', 'FOR', 'THE', 'HOUSE', 'OF', 'MATTERS', 'MISSUS', 'GREEBS', 'MUST', 'BE', 'VERY', 'PARTICULAR', 'IN', 'KEEPING', 'HER', 'ACCOUNTS', 'WE', 'MUST', 'GO', 'UP', 'IN', 'AN', 'ENCHANTING', 'NEW', 'PLAN'] +533-131562-0011-858: ref=['WHAT', 'GREAT', 'DISCOVERY', 'HAVE', 'YOU', 'MADE', 'NOW', 'MISTER', 'HUNTINGDON'] +533-131562-0011-858: hyp=['WHAT', 'GREAT', 'DISCOVERY', 'HAVE', 'YOU', 'MADE', 'NOW', 'MISTER', 'HONEYMAN'] +533-131562-0012-859: ref=['HAVE', 'I', 'ATTEMPTED', 'TO', 'DEFRAUD', 'YOU'] +533-131562-0012-859: hyp=['EVER', 'ATTENDED', 'TO', 'DEFRAUD', 'YOU'] +533-131562-0013-860: ref=['NOT', 'IN', 'MONEY', 'MATTERS', 'EXACTLY', 'IT', 'SEEMS', 'BUT', "IT'S", 'BEST', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'TEMPTATION'] +533-131562-0013-860: hyp=['NOT', 'IN', 'MONEY', 'MATTERS', 'EXACTLY', 'IT', 'SEEMS', 'BUT', "IT'S", 'BEST', 'TO', 'KEEP', 'OUT', 'OF', 'THE', 'WAY', 'OF', 'TEMPTATION'] +533-131562-0014-861: ref=['HERE', 'BENSON', 'ENTERED', 'WITH', 'THE', 'CANDLES', 'AND', 'THERE', 'FOLLOWED', 'A', 'BRIEF', 'INTERVAL', 'OF', 'SILENCE', 'I', 'SITTING', 'STILL', 'IN', 'MY', 'CHAIR', 'AND', 'HE', 'STANDING', 'WITH', 'HIS', 'BACK', 'TO', 'THE', 'FIRE', 'SILENTLY', 'TRIUMPHING', 'IN', 'MY', 'DESPAIR'] +533-131562-0014-861: hyp=['HERE', 'BESSON', 'ENTERED', 'THE', 'CANDLES', 'AND', 'THERE', 'FELL', 'THE', 'BRIEF', 'INTERVAL', 'OF', 'SILENCE', 'I', 'SITTING', 'STEALING', 'MY', 'CHAIR', 'AND', 'HE', 'STANDING', 'WITH', 'HIS', 'BACK', 'TO', 'THE', 'FIRE', 'SILENTLY', 'TRIUMPHING', 'IN', 'MY', 'DESPAIR'] +533-131562-0015-862: ref=['I', 'KNOW', 'THAT', 'DAY', 'AFTER', 'DAY', 'SUCH', 'FEELINGS', 'WILL', 'RETURN', 'UPON', 'ME'] +533-131562-0015-862: hyp=['I', 'KNOW', 'THAT', 'DAY', 'AFTER', 'DAY', 'SUCH', 'FEELINGS', 'TO', 'RETURN', 'UP', 'ON', 'ME'] +533-131562-0016-863: ref=['I', 'TRY', 'TO', 'LOOK', 'TO', 'HIM', 'AND', 'RAISE', 'MY', 'HEART', 'TO', 'HEAVEN', 'BUT', 'IT', 'WILL', 'CLEAVE', 'TO', 'THE', 'DUST'] +533-131562-0016-863: hyp=['I', 'TRIED', 'TO', 'LOOK', 'TO', 'HIM', 'AND', 'RAISE', 'MY', 'HEART', 'TO', 'HEAVEN', 'BUT', 'IT', 'WILL', 'CLIFF', 'TO', 'THE', 'DUST'] +533-131564-0000-768: ref=['VAIN', 'HOPE', 'I', 'FEAR'] +533-131564-0000-768: hyp=['VAIN', 'HOPE', 'I', 'FEAR'] +533-131564-0001-769: ref=['MISTER', 'AND', 'MISSUS', 'HATTERSLEY', 'HAVE', 'BEEN', 'STAYING', 'AT', 'THE', 'GROVE', 'A', 'FORTNIGHT', 'AND', 'AS', 'MISTER', 'HARGRAVE', 'IS', 'STILL', 'ABSENT', 'AND', 'THE', 'WEATHER', 'WAS', 'REMARKABLY', 'FINE', 'I', 'NEVER', 'PASSED', 'A', 'DAY', 'WITHOUT', 'SEEING', 'MY', 'TWO', 'FRIENDS', 'MILICENT', 'AND', 'ESTHER', 'EITHER', 'THERE', 'OR', 'HERE'] +533-131564-0001-769: hyp=['MISTER', 'AND', 'MISSUS', 'HAUGHTERSLEY', 'HAVE', 'BEEN', 'SEEING', 'IT', 'TO', 'GROW', 'BEFORE', 'NIGHT', 'AND', 'AS', 'MISSUS', 'HARGRAVE', 'IS', 'STILL', 'ABSENT', 'AND', 'THE', 'WEATHER', 'WAS', 'REMARKABLY', 'FINE', 'AND', 'REPAST', 'A', 'DAY', 'WITHOUT', 'SEEING', 'MY', 'TWO', 'FRIENDS', 'MIELSON', 'AND', 'ESTHER', 'EITHER', 'THERE', 'OR', 'HERE'] +533-131564-0002-770: ref=['NO', 'UNLESS', 'YOU', 'CAN', 'TELL', 'ME', 'WHEN', 'TO', 'EXPECT', 'HIM', 'HOME'] +533-131564-0002-770: hyp=['NO', 'UNLESS', 'YOU', 'CAN', 'TELL', 'ME', 'WHEN', 'TO', 'EXPECT', 'HIM', 'HOME'] +533-131564-0003-771: ref=['I', "CAN'T", 'YOU', "DON'T", 'WANT', 'HIM', 'DO', 'YOU'] +533-131564-0003-771: hyp=['I', "CAN'T", 'EVEN', 'ONE', 'WANTS', 'HIM', 'DO', 'YOU'] +533-131564-0004-772: ref=['IT', 'IS', 'A', 'RESOLUTION', 'YOU', 'OUGHT', 'TO', 'HAVE', 'FORMED', 'LONG', 'AGO'] +533-131564-0004-772: hyp=['IT', 'IS', 'A', 'RESOLUTION', 'YOU', 'ARE', 'REFORMED', 'LONG', 'AGO'] +533-131564-0005-773: ref=['WE', 'ALL', 'HAVE', 'A', 'BIT', 'OF', 'A', 'LIKING', 'FOR', 'HIM', 'AT', 'THE', 'BOTTOM', 'OF', 'OUR', 'HEARTS', 'THOUGH', 'WE', "CAN'T", 'RESPECT', 'HIM'] +533-131564-0005-773: hyp=['WE', 'ALL', 'HAVE', 'A', 'BIT', 'OF', 'A', 'LIKING', 'FOR', 'HIM', 'AT', 'THE', 'BOTTOM', 'OF', 'OUR', 'HEART', 'THOUGH', 'WE', "CAN'T", 'RESPECT', 'HIM'] +533-131564-0006-774: ref=['NO', "I'D", 'RATHER', 'BE', 'LIKE', 'MYSELF', 'BAD', 'AS', 'I', 'AM'] +533-131564-0006-774: hyp=['NO', "I'D", 'RATHER', 'BE', 'LIKE', 'MYSELF', 'WHETHER', 'I', 'AM'] +533-131564-0007-775: ref=['NEVER', 'MIND', 'MY', 'PLAIN', 'SPEAKING', 'SAID', 'I', 'IT', 'IS', 'FROM', 'THE', 'BEST', 'OF', 'MOTIVES'] +533-131564-0007-775: hyp=['NEVER', 'MIND', 'MY', 'PLAIN', 'SPEAKING', 'SAID', 'I', 'IT', 'IS', 'FROM', 'THE', 'BEST', 'OF', 'MOTIVES'] +533-131564-0008-776: ref=['BUT', 'TELL', 'ME', 'SHOULD', 'YOU', 'WISH', 'YOUR', 'SONS', 'TO', 'BE', 'LIKE', 'MISTER', 'HUNTINGDON', 'OR', 'EVEN', 'LIKE', 'YOURSELF'] +533-131564-0008-776: hyp=['BUT', 'TELL', 'ME', 'SHOULD', 'YOU', 'WISH', 'TO', 'YOUR', 'SONS', 'TO', 'BE', 'LIKE', 'MISTER', 'HUNTINGDON', 'OR', 'EVEN', 'LIKE', 'YOURSELF'] +533-131564-0009-777: ref=['OH', 'NO', 'I', "COULDN'T", 'STAND', 'THAT'] +533-131564-0009-777: hyp=['OH', 'NO', 'I', "COULDN'T", 'STAND', 'THAT'] +533-131564-0010-778: ref=['FIRE', 'AND', 'FURY'] +533-131564-0010-778: hyp=['FOREIGN', 'FURY'] +533-131564-0011-779: ref=['NOW', "DON'T", 'BURST', 'INTO', 'A', 'TEMPEST', 'AT', 'THAT'] +533-131564-0011-779: hyp=['NOW', "DON'T", 'FORCE', 'INTO', 'A', 'TEMPEST', 'AT', 'THAT'] +533-131564-0012-780: ref=['BUT', 'HANG', 'IT', "THAT'S", 'NOT', 'MY', 'FAULT'] +533-131564-0012-780: hyp=['BUT', 'HANG', 'IT', "THAT'S", 'NOT', 'MY', 'FAULT'] +533-131564-0013-781: ref=['NOT', 'YEARS', 'FOR', "SHE'S", 'ONLY', 'FIVE', 'AND', 'TWENTY'] +533-131564-0013-781: hyp=['NOT', 'EARS', 'FORCES', 'ONLY', 'FIVE', 'AND', 'TWENTY'] +533-131564-0014-782: ref=['WHAT', 'WOULD', 'YOU', 'MAKE', 'OF', 'ME', 'AND', 'THE', 'CHILDREN', 'TO', 'BE', 'SURE', 'THAT', 'WORRY', 'HER', 'TO', 'DEATH', 'BETWEEN', 'THEM'] +533-131564-0014-782: hyp=['WHAT', 'DID', 'YOU', 'MAKE', 'OF', 'ME', 'AND', 'THE', 'CHILDREN', 'TO', 'BE', 'SURE', 'THAT', 'WERE', 'HE', 'HURT', 'DEATH', 'BETWEEN', 'THEM'] +533-131564-0015-783: ref=['I', 'KNOW', 'THEY', 'ARE', 'BLESS', 'THEM'] +533-131564-0015-783: hyp=['I', 'KNOW', 'THEY', 'ARE', 'BLESS', 'THEM'] +533-131564-0016-784: ref=['HE', 'FOLLOWED', 'ME', 'INTO', 'THE', 'LIBRARY'] +533-131564-0016-784: hyp=['IF', 'ALL', 'OF', 'ME', 'INTO', 'THE', 'LIBRARY'] +533-131564-0017-785: ref=['I', 'SOUGHT', 'OUT', 'AND', 'PUT', 'INTO', 'HIS', 'HANDS', 'TWO', 'OF', "MILICENT'S", 'LETTERS', 'ONE', 'DATED', 'FROM', 'LONDON', 'AND', 'WRITTEN', 'DURING', 'ONE', 'OF', 'HIS', 'WILDEST', 'SEASONS', 'OF', 'RECKLESS', 'DISSIPATION', 'THE', 'OTHER', 'IN', 'THE', 'COUNTRY', 'DURING', 'A', 'LUCID', 'INTERVAL'] +533-131564-0017-785: hyp=['I', 'SET', 'OUT', 'AND', 'PUT', 'INTO', 'HIS', 'HANDS', 'TWO', 'OF', "MILLSON'S", 'LETTERS', 'ONE', 'DID', 'IT', 'FROM', 'LONDON', 'AND', 'WRITTEN', 'DURING', 'ONE', 'OF', 'HIS', 'WALLACE', 'SEASONS', 'OF', 'RECKLESS', 'DISSIPATION', 'THE', 'OTHER', 'IN', 'THE', 'COUNTRY', 'DURING', 'A', 'LUCID', 'INTERVAL'] +533-131564-0018-786: ref=['THE', 'FORMER', 'WAS', 'FULL', 'OF', 'TROUBLE', 'AND', 'ANGUISH', 'NOT', 'ACCUSING', 'HIM', 'BUT', 'DEEPLY', 'REGRETTING', 'HIS', 'CONNECTION', 'WITH', 'HIS', 'PROFLIGATE', 'COMPANIONS', 'ABUSING', 'MISTER', 'GRIMSBY', 'AND', 'OTHERS', 'INSINUATING', 'BITTER', 'THINGS', 'AGAINST', 'MISTER', 'HUNTINGDON', 'AND', 'MOST', 'INGENIOUSLY', 'THROWING', 'THE', 'BLAME', 'OF', 'HER', "HUSBAND'S", 'MISCONDUCT', 'ON', 'TO', 'OTHER', "MEN'S", 'SHOULDERS'] +533-131564-0018-786: hyp=['THE', 'FORMER', 'WAS', 'FULL', 'OF', 'TROUBLE', 'AND', 'ANGUISH', 'NOT', 'ACCUSING', 'HIM', 'BUT', 'DEEPLY', 'REGRETTING', 'HIS', 'CONNECTION', 'WITH', 'HIS', 'PROFLIGATE', 'COMPANIONS', 'ABUSING', 'MISTER', 'GRIMSBY', 'AND', 'OTHERS', 'INSINUATING', 'BITTER', 'THINGS', 'AGAINST', 'MISTER', 'HUNTON', 'AND', 'MOST', 'INGENUOUSLY', 'THROWING', 'THE', 'BLAME', 'OF', 'HER', "HUSBAND'S", 'MISCONDUCT', 'ON', 'THE', 'OTHER', "MAN'S", 'SHOULDERS'] +533-131564-0019-787: ref=["I'VE", 'BEEN', 'A', 'CURSED', 'RASCAL', 'GOD', 'KNOWS', 'SAID', 'HE', 'AS', 'HE', 'GAVE', 'IT', 'A', 'HEARTY', 'SQUEEZE', 'BUT', 'YOU', 'SEE', 'IF', 'I', "DON'T", 'MAKE', 'AMENDS', 'FOR', 'IT', 'D', 'N', 'ME', 'IF', 'I', "DON'T"] +533-131564-0019-787: hyp=["I'VE", 'BEEN', 'A', 'CURSED', 'RASCAL', 'GOD', 'KNOWS', 'SAID', 'HE', 'AS', 'HE', 'GAVE', 'IT', 'AN', 'EARTHLY', 'SQUEEZE', 'BUT', 'YOU', 'SEE', 'IF', 'I', "DON'T", 'MAKE', 'AMENDS', 'FOR', 'IT', 'THEN', 'ME', 'IF', 'I', "DON'T"] +533-131564-0020-788: ref=['IF', 'YOU', 'INTEND', 'TO', 'REFORM', 'INVOKE', "GOD'S", 'BLESSING', 'HIS', 'MERCY', 'AND', 'HIS', 'AID', 'NOT', 'HIS', 'CURSE'] +533-131564-0020-788: hyp=['IF', 'YOU', 'INTEND', 'TO', 'REFORM', 'INVOKE', "GOD'S", 'BLESSING', 'IS', 'MERCY', 'IN', 'THIS', 'APE', 'NOT', 'DISCOURSE'] +533-131564-0021-789: ref=['GOD', 'HELP', 'ME', 'THEN', 'FOR', "I'M", 'SURE', 'I', 'NEED', 'IT'] +533-131564-0021-789: hyp=['GOD', 'HELP', 'ME', 'THEN', 'FOR', 'I', 'AM', 'SURE', 'I', 'NEED', 'IT'] +533-131564-0022-790: ref=["WHERE'S", 'MILICENT'] +533-131564-0022-790: hyp=["WHERE'S", 'MILLISON'] +533-131564-0023-791: ref=['NAY', 'NOT', 'I', 'SAID', 'HE', 'TURNING', 'HER', 'ROUND', 'AND', 'PUSHING', 'HER', 'TOWARDS', 'ME'] +533-131564-0023-791: hyp=['NAY', 'NOT', 'I', 'SAID', 'HE', 'TURNING', 'ROUND', 'AND', 'PUSHING', 'TOWARDS', 'ME'] +533-131564-0024-792: ref=['MILICENT', 'FLEW', 'TO', 'THANK', 'ME', 'OVERFLOWING', 'WITH', 'GRATITUDE'] +533-131564-0024-792: hyp=['MILLISON', 'FLUD', 'TO', 'THANK', 'ME', 'OVERWHELMING', 'ITS', 'GRATITUDE'] +533-131564-0025-793: ref=['CRIED', 'SHE', 'I', "COULDN'T", 'HAVE', 'INFLUENCED', 'HIM', "I'M", 'SURE', 'BY', 'ANYTHING', 'THAT', 'I', 'COULD', 'HAVE', 'SAID'] +533-131564-0025-793: hyp=['CRIED', 'SHE', 'I', "COULDN'T", 'HAVE', 'EVILISED', 'HIM', "I'M", 'SURE', 'BY', 'ANYTHING', 'THAT', 'I', 'COULD', 'HAVE', 'SAID'] +533-131564-0026-794: ref=['YOU', 'NEVER', 'TRIED', 'ME', 'MILLY', 'SAID', 'HE'] +533-131564-0026-794: hyp=['YOU', 'NEVER', 'TRIED', 'ME', 'MERELY', 'SAID', 'HE'] +533-131564-0027-795: ref=['AFTER', 'THAT', 'THEY', 'WILL', 'REPAIR', 'TO', 'THEIR', 'COUNTRY', 'HOME'] +533-131564-0027-795: hyp=['AFTER', 'THAT', 'THEY', 'WILL', 'REPAIR', 'TO', 'THEIR', 'COUNTRY', 'HOME'] +5442-32873-0000-1365: ref=['CAPTAIN', 'LAKE', 'DID', 'NOT', 'LOOK', 'AT', 'ALL', 'LIKE', 'A', 'LONDON', 'DANDY', 'NOW'] +5442-32873-0000-1365: hyp=['CAPTAIN', 'LAKE', 'DID', 'NOT', 'LOOK', 'AT', 'ON', 'LIKE', 'A', 'LONDON', 'DANDY', 'NOW'] +5442-32873-0001-1366: ref=['THERE', 'WAS', 'A', 'VERY', 'NATURAL', 'SAVAGERY', 'AND', 'DEJECTION', 'THERE', 'AND', 'A', 'WILD', 'LEER', 'IN', 'HIS', 'YELLOW', 'EYES', 'RACHEL', 'SAT', 'DOWN'] +5442-32873-0001-1366: hyp=['THERE', 'WAS', 'A', 'VERY', 'NATURAL', 'SAVAGERY', 'AND', 'DEJECTION', 'THEN', 'AND', 'A', 'WILD', "URINA'S", 'YELLOW', 'EYES', 'RACHEL', 'SAT', 'DOWN'] +5442-32873-0002-1367: ref=['A', 'SLAVE', 'ONLY', 'THINK', 'A', 'SLAVE'] +5442-32873-0002-1367: hyp=['A', 'SLAVE', 'ONLY', 'THINK', 'A', 'SLAVE'] +5442-32873-0003-1368: ref=['OH', 'FRIGHTFUL', 'FRIGHTFUL', 'IS', 'IT', 'A', 'DREAM'] +5442-32873-0003-1368: hyp=['OH', 'FRIGHTFUL', 'FRIGHTFUL', 'IS', 'IT', 'A', 'DREAM'] +5442-32873-0004-1369: ref=['OH', 'FRIGHTFUL', 'FRIGHTFUL'] +5442-32873-0004-1369: hyp=['ALL', 'FRIGHTFUL', 'CRIED', 'FAWN'] +5442-32873-0005-1370: ref=['STANLEY', 'STANLEY', 'IT', 'WOULD', 'BE', 'MERCY', 'TO', 'KILL', 'ME', 'SHE', 'BROKE', 'OUT', 'AGAIN'] +5442-32873-0005-1370: hyp=['STANLEY', 'STANLEY', 'IT', 'WOULD', 'BE', 'MERCY', 'TO', 'KILL', 'ME', 'SHE', 'BROKE', 'HER', 'AGAIN'] +5442-32873-0006-1371: ref=['BRIGHT', 'AND', 'NATTY', 'WERE', 'THE', 'CHINTZ', 'CURTAINS', 'AND', 'THE', 'LITTLE', 'TOILET', 'SET', 'OUT', 'NOT', 'INELEGANTLY', 'AND', 'HER', 'PET', 'PIPING', 'GOLDFINCH', 'ASLEEP', 'ON', 'HIS', 'PERCH', 'WITH', 'HIS', 'BIT', 'OF', 'SUGAR', 'BETWEEN', 'THE', 'WIRES', 'OF', 'HIS', 'CAGE', 'HER', 'PILLOW', 'SO', 'WHITE', 'AND', 'UNPRESSED', 'WITH', 'ITS', 'LITTLE', 'EDGING', 'OF', 'LACE'] +5442-32873-0006-1371: hyp=['BRIGHT', 'AND', 'NATTY', 'WITH', 'A', 'CHIN', 'CURTAINS', 'AND', 'THE', 'LITTLE', 'TOILET', 'SET', 'OUT', 'NOT', 'IN', 'ELEGANTLY', 'AND', 'HER', 'BED', 'PIPING', 'GOLDFINCH', 'ASLEEP', 'ON', 'HIS', 'PERCH', 'WITH', 'HIS', 'BIT', 'OF', 'SUGAR', 'BETWEEN', 'THE', 'WISE', 'OF', 'HIS', 'CAGE', 'HER', 'PILLOW', 'SO', 'WHITE', 'AND', 'UNPRESSED', 'WITH', 'ITS', 'LITTLE', 'EDGING', 'OF', 'LACE'] +5442-32873-0007-1372: ref=['WHEN', 'HE', 'CAME', 'BACK', 'TO', 'THE', 'DRAWING', 'ROOM', 'A', 'TOILET', 'BOTTLE', 'OF', 'EAU', 'DE', 'COLOGNE', 'IN', 'HIS', 'HAND', 'WITH', 'HER', 'LACE', 'HANDKERCHIEF', 'HE', 'BATHED', 'HER', 'TEMPLES', 'AND', 'FOREHEAD'] +5442-32873-0007-1372: hyp=['WHEN', 'HE', 'CAME', 'BACK', 'TO', 'THE', 'DRAWING', 'ROOM', 'I', 'TOLD', 'IT', 'WHAT', 'HE', 'OF', "O'ER", 'THE', 'CLOON', 'IN', 'HIS', 'HAND', 'WITH', 'HER', 'LACE', 'HANDKERCHIEF', 'HE', 'BATHED', 'HER', 'TEMPLE', 'AND', 'FOREHEAD'] +5442-32873-0008-1373: ref=['THERE', 'WAS', 'NOTHING', 'VERY', 'BROTHERLY', 'IN', 'HIS', 'LOOK', 'AS', 'HE', 'PEERED', 'INTO', 'HER', 'PALE', 'SHARP', 'FEATURES', 'DURING', 'THE', 'PROCESS'] +5442-32873-0008-1373: hyp=['THERE', 'WAS', 'NOTHING', 'VERY', 'BROTHERLY', 'IN', 'HIS', 'LOOK', 'AS', 'HE', 'PEERED', 'INTO', 'A', 'PALE', 'SHARP', 'FEATURES', 'DURING', 'THE', 'PROCESS'] +5442-32873-0009-1374: ref=['THERE', "DON'T", 'MIND', 'ME', 'SHE', 'SAID', 'SHARPLY', 'AND', 'GETTING', 'UP', 'SHE', 'LOOKED', 'DOWN', 'AT', 'HER', 'DRESS', 'AND', 'THIN', 'SHOES', 'AND', 'SEEMING', 'TO', 'RECOLLECT', 'HERSELF', 'SHE', 'TOOK', 'THE', 'CANDLE', 'HE', 'HAD', 'JUST', 'SET', 'DOWN', 'AND', 'WENT', 'SWIFTLY', 'TO', 'HER', 'ROOM'] +5442-32873-0009-1374: hyp=['THERE', "DON'T", 'MIND', 'ME', 'SHE', 'SAID', 'SHARPLY', 'AND', 'GETTING', 'UP', 'SHE', 'LOOKED', 'DOWN', 'AT', 'HER', 'DRESS', 'AND', 'THIN', 'SHOES', 'AND', 'SEEMING', 'TO', 'RECOLLECT', 'HERSELF', 'SHE', 'TOOK', 'THE', 'CANDLE', 'HE', 'HAD', 'JUST', 'SAT', 'DOWN', 'AND', 'WHEN', 'SWIFTLY', 'TO', 'HER', 'ROOM'] +5442-32873-0010-1375: ref=['AND', 'SHE', 'THREW', 'BACK', 'HER', 'VEIL', 'AND', 'GOING', 'HURRIEDLY', 'TO', 'THE', 'TOILET', 'MECHANICALLY', 'SURVEYED', 'HERSELF', 'IN', 'THE', 'GLASS'] +5442-32873-0010-1375: hyp=['AND', 'SHE', 'THREW', 'BACK', 'HER', 'VEAL', 'AND', 'GOING', 'HURRIEDLY', 'TO', 'THE', 'TOILET', 'MECHANICALLY', 'SURVEYED', 'HERSELF', 'FROM', 'THE', 'GLANCE'] +5442-32873-0011-1376: ref=['RACHEL', 'LAKE', 'RACHEL', 'LAKE', 'WHAT', 'ARE', 'YOU', 'NOW'] +5442-32873-0011-1376: hyp=['ORIGINALLY', 'LATER', 'LAKE', 'WHAT', 'ARE', 'YOU', 'NOW'] +5442-32873-0012-1377: ref=["I'LL", 'STAY', 'HERE', 'THAT', 'IS', 'IN', 'THE', 'DRAWING', 'ROOM', 'SHE', 'ANSWERED', 'AND', 'THE', 'FACE', 'WAS', 'WITHDRAWN'] +5442-32873-0012-1377: hyp=["I'LL", 'STAY', 'HERE', 'THAT', 'IS', 'IN', 'THE', 'DRAWING', 'ROOM', 'SHE', 'ANSWERED', 'AND', 'THE', 'FACE', 'WAS', 'WITHDRAWN'] +5442-32873-0013-1378: ref=['HE', 'SLACKENED', 'HIS', 'PACE', 'AND', 'TAPPED', 'SHARPLY', 'AT', 'THE', 'LITTLE', 'WINDOW', 'OF', 'THAT', 'MODEST', 'POST', 'OFFICE', 'AT', 'WHICH', 'THE', 'YOUNG', 'LADIES', 'IN', 'THE', 'PONY', 'CARRIAGE', 'HAD', 'PULLED', 'UP', 'THE', 'DAY', 'BEFORE', 'AND', 'WITHIN', 'WHICH', 'LUKE', 'WAGGOT', 'WAS', 'WONT', 'TO', 'SLEEP', 'IN', 'A', 'SORT', 'OF', 'WOODEN', 'BOX', 'THAT', 'FOLDED', 'UP', 'AND', 'APPEARED', 'TO', 'BE', 'A', 'CHEST', 'OF', 'DRAWERS', 'ALL', 'DAY'] +5442-32873-0013-1378: hyp=['HIS', 'CLACKENED', 'HIS', 'FACE', 'AND', 'TAP', 'SHARPLY', 'AT', 'THE', 'LITTLE', 'WINDOW', 'OF', 'THE', 'MODEST', 'POST', 'OFFICE', 'AT', 'WHICH', 'THE', 'YOUNG', 'LADIES', 'IN', 'THE', 'PONY', 'CARRIAGE', 'HAD', 'PULLED', 'UP', 'THE', 'DAY', 'BEFORE', 'AND', 'WITHIN', 'WHICH', 'LUKE', 'RAGGED', 'WAS', 'WONT', 'TO', 'SLEEP', 'IN', 'A', 'SORT', 'OF', 'WOODEN', 'BOX', 'THAT', 'FOLDED', 'UP', 'AND', 'APPEARED', 'TO', 'BE', 'A', 'CHEST', 'OF', 'DRAWERS', 'ALL', 'DAY'] +5442-32873-0014-1379: ref=['LUKE', 'TOOK', 'CARE', 'OF', 'MISTER', "LARKIN'S", 'DOGS', 'AND', 'GROOMED', 'MISTER', "WYLDER'S", 'HORSE', 'AND', 'CLEANED', 'UP', 'HIS', 'DOG', 'CART', 'FOR', 'MARK', 'BEING', 'CLOSE', 'ABOUT', 'MONEY', 'AND', 'FINDING', 'THAT', 'THE', 'THING', 'WAS', 'TO', 'BE', 'DONE', 'MORE', 'CHEAPLY', 'THAT', 'WAY', 'PUT', 'UP', 'HIS', 'HORSE', 'AND', 'DOG', 'CART', 'IN', 'THE', 'POST', 'OFFICE', 'PREMISES', 'AND', 'SO', 'EVADED', 'THE', 'LIVERY', 'CHARGES', 'OF', 'THE', 'BRANDON', 'ARMS'] +5442-32873-0014-1379: hyp=['LOOK', 'TO', 'CARE', 'OF', 'MISTER', 'LARKINS', 'DOGS', 'AND', 'GROOMED', 'MISTER', "WILDER'S", 'HORSE', 'AND', 'CLEANED', 'UP', 'HIS', 'DOOR', 'CART', 'FOR', 'MARK', 'BEING', 'CLOSE', 'ABOUT', 'MONEY', 'AND', 'FINDING', 'THAT', 'THE', 'THING', 'WAS', 'TO', 'BE', 'DONE', 'MORE', 'CHEAPLY', 'THAT', 'WAY', 'PUT', 'UP', 'HIS', 'HORSE', 'AND', 'DOG', 'CART', 'IN', 'THE', 'POST', 'OF', 'HIS', 'PREMISES', 'AND', 'SO', 'EVADED', 'THE', 'LIVERY', 'CHARGES', 'OF', 'THE', 'BRANDON', 'ARMS'] +5442-32873-0015-1380: ref=['BUT', 'LUKE', 'WAS', 'NOT', 'THERE', 'AND', 'CAPTAIN', 'LAKE', 'RECOLLECTING', 'HIS', 'HABITS', 'AND', 'HIS', 'HAUNT', 'HURRIED', 'ON', 'TO', 'THE', 'SILVER', 'LION', 'WHICH', 'HAS', 'ITS', 'GABLE', 'TOWARDS', 'THE', 'COMMON', 'ONLY', 'ABOUT', 'A', 'HUNDRED', 'STEPS', 'AWAY', 'FOR', 'DISTANCES', 'ARE', 'NOT', 'GREAT', 'IN', 'GYLINGDEN'] +5442-32873-0015-1380: hyp=['THE', 'LUKE', 'WAS', 'KNOWN', 'THERE', 'AND', 'CAPTAIN', 'LAKE', 'RECOLLECTING', 'HIS', 'HABITS', 'AND', 'HIS', 'HAUNT', 'HURRIED', 'ON', 'TO', 'THE', 'SILVER', 'LION', 'WHICH', 'HAS', 'ITS', 'CABLE', 'TOWARDS', 'A', 'COMMON', 'ONLY', 'ABOUT', 'A', 'HUNDRED', 'STEPS', 'AWAY', 'FOR', 'DISTANCES', 'ARE', 'NOT', 'GREAT', 'IN', 'GYLINGDEN'] +5442-32873-0016-1381: ref=['HERE', 'WERE', 'THE', 'FLOW', 'OF', 'SOUL', 'AND', 'OF', 'STOUT', 'LONG', 'PIPES', 'LONG', 'YARNS', 'AND', 'TOLERABLY', 'LONG', 'CREDITS', 'AND', 'THE', 'HUMBLE', 'SCAPEGRACES', 'OF', 'THE', 'TOWN', 'RESORTED', 'THITHER', 'FOR', 'THE', 'PLEASURES', 'OF', 'A', 'CLUB', 'LIFE', 'AND', 'OFTEN', 'REVELLED', 'DEEP', 'INTO', 'THE', 'SMALL', 'HOURS', 'OF', 'THE', 'MORNING'] +5442-32873-0016-1381: hyp=['HERE', 'WERE', 'THE', 'FLOW', 'OF', 'SOUL', 'AND', 'OF', 'STOUT', 'LONG', 'PIPES', 'LONG', 'YARNS', 'AND', 'TOLERABLY', 'LONG', 'CREDITS', 'AND', 'THE', 'HUMBLE', 'SKIPPED', 'BRACES', 'OF', 'THE', 'TOWN', 'RESORTED', 'THITHER', 'FOR', 'THE', 'PLEASURES', 'OF', 'A', 'CLUB', 'LIFE', 'AND', 'OFTEN', 'REVELLED', 'DEEP', 'INTO', 'THE', 'SMALL', 'HOURS', 'OF', 'THE', 'MORNING'] +5442-32873-0017-1382: ref=['LOSE', 'NO', 'TIME', 'AND', "I'LL", 'GIVE', 'YOU', 'HALF', 'A', 'CROWN'] +5442-32873-0017-1382: hyp=['LOSE', 'NO', 'TIME', 'WHEN', "I'LL", 'GIVE', 'YOU', 'HALF', 'A', 'CROWN'] +5442-32873-0018-1383: ref=['LUKE', 'STUCK', 'ON', 'HIS', 'GREASY', 'WIDEAWAKE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'MORE', 'THE', 'DOG', 'CART', 'WAS', 'TRUNDLED', 'OUT', 'INTO', 'THE', 'LANE', 'AND', 'THE', 'HORSE', 'HARNESSED', 'WENT', 'BETWEEN', 'THE', 'SHAFTS', 'WITH', 'THAT', 'WONDERFUL', 'CHEERFULNESS', 'WITH', 'WHICH', 'THEY', 'BEAR', 'TO', 'BE', 'CALLED', 'UP', 'UNDER', 'STARTLING', 'CIRCUMSTANCES', 'AT', 'UNSEASONABLE', 'HOURS'] +5442-32873-0018-1383: hyp=['LUKE', 'STUCK', 'ON', 'HIS', 'GREASY', 'WIDE', 'AWAKE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'MORE', 'THE', 'DOOR', 'CART', 'WAS', 'TUMBLED', 'OUT', 'INTO', 'THE', 'LANE', 'AND', 'THE', 'HORSE', 'HARNESSED', 'WENT', 'BETWEEN', 'THE', 'SHAFTS', 'WITH', 'THAT', 'WONDERFUL', 'CHEERFULNESS', 'WITH', 'WHICH', 'THEY', 'BEARED', 'TO', 'BE', 'CALLED', 'UP', 'AND', 'THE', 'STARTLING', 'CIRCUMSTANCES', 'THAT', 'UNSEASONABLE', 'HOURS'] +5442-32873-0019-1384: ref=['IF', 'I', 'THOUGHT', "YOU'D", 'FAIL', 'ME', 'NOW', 'TAMAR', 'I', 'SHOULD', 'NEVER', 'COME', 'BACK', 'GOOD', 'NIGHT', 'TAMAR'] +5442-32873-0019-1384: hyp=['IF', 'I', 'THOUGHT', "YOU'D", 'FILL', 'ME', 'NOW', 'TO', 'MORROW', 'I', 'SHOULD', 'NEVER', 'COME', 'BACK', 'GOOD', 'NIGHT', 'TO', 'MORROW'] +5442-41168-0000-1385: ref=['THE', 'ACT', 'SAID', 'THAT', 'IN', 'CASE', 'OF', 'DIFFERENCE', 'OF', 'OPINION', 'THERE', 'MUST', 'BE', 'A', 'BALLOT'] +5442-41168-0000-1385: hyp=['THE', 'ACT', 'SAID', 'THAT', 'IN', 'CASE', 'OF', 'DIFFERENCE', 'OF', 'OPINION', 'THERE', 'MUST', 'BE', 'A', 'BALLOT'] +5442-41168-0001-1386: ref=['HE', 'WENT', 'UP', 'TO', 'THE', 'TABLE', 'AND', 'STRIKING', 'IT', 'WITH', 'HIS', 'FINGER', 'RING', 'HE', 'SHOUTED', 'LOUDLY', 'A', 'BALLOT'] +5442-41168-0001-1386: hyp=['HE', 'WENT', 'UP', 'TO', 'THE', 'TABLE', 'AND', 'STRIKING', 'IT', 'WITH', 'HIS', 'FINGERING', 'HE', 'SHOUTED', 'LOUDLY', 'A', 'BALLOT'] +5442-41168-0002-1387: ref=['HE', 'WAS', 'SHOUTING', 'FOR', 'THE', 'VERY', 'COURSE', 'SERGEY', 'IVANOVITCH', 'HAD', 'PROPOSED', 'BUT', 'IT', 'WAS', 'EVIDENT', 'THAT', 'HE', 'HATED', 'HIM', 'AND', 'ALL', 'HIS', 'PARTY', 'AND', 'THIS', 'FEELING', 'OF', 'HATRED', 'SPREAD', 'THROUGH', 'THE', 'WHOLE', 'PARTY', 'AND', 'ROUSED', 'IN', 'OPPOSITION', 'TO', 'IT', 'THE', 'SAME', 'VINDICTIVENESS', 'THOUGH', 'IN', 'A', 'MORE', 'SEEMLY', 'FORM', 'ON', 'THE', 'OTHER', 'SIDE'] +5442-41168-0002-1387: hyp=['HE', 'WAS', 'SHOUTING', 'FOR', 'THE', 'VERY', 'COARSE', 'SURGY', 'IVANOVITCH', 'HAD', 'PROPOSED', 'BUT', 'IT', 'WAS', 'EVIDENT', 'THAT', 'HE', 'HATED', 'HIM', 'AND', 'ALL', 'HIS', 'PARTY', 'AND', 'THIS', 'FEELING', 'OF', 'HATRED', 'SPREAD', 'THROUGH', 'THE', 'WHOLE', 'PARTY', 'AND', 'ROUSED', 'IN', 'OUR', 'POSITION', 'TO', 'IT', 'THE', 'SAME', 'VINDICTIVENESS', 'THOUGH', 'IN', 'A', 'MORE', 'SEEMLY', 'FORM', 'ON', 'THE', 'OTHER', 'SIDE'] +5442-41168-0003-1388: ref=['SHOUTS', 'WERE', 'RAISED', 'AND', 'FOR', 'A', 'MOMENT', 'ALL', 'WAS', 'CONFUSION', 'SO', 'THAT', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'HAD', 'TO', 'CALL', 'FOR', 'ORDER', 'A', 'BALLOT'] +5442-41168-0003-1388: hyp=['SHOUTS', 'WERE', 'RAISED', 'AND', 'FOR', 'A', 'MOMENT', 'ALL', 'WAS', 'CONFUSION', 'SO', 'THAT', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'HAD', 'TO', 'CALL', 'FOR', 'OTTO', 'A', 'BALLOT'] +5442-41168-0004-1389: ref=['WE', 'SHED', 'OUR', 'BLOOD', 'FOR', 'OUR', 'COUNTRY'] +5442-41168-0004-1389: hyp=['WE', 'SHUT', 'OUR', 'BLOOD', 'FOR', 'OUR', 'COUNTRY'] +5442-41168-0005-1390: ref=['THE', 'CONFIDENCE', 'OF', 'THE', 'MONARCH', 'NO', 'CHECKING', 'THE', 'ACCOUNTS', 'OF', 'THE', 'MARSHAL', "HE'S", 'NOT', 'A', 'CASHIER', 'BUT', "THAT'S", 'NOT', 'THE', 'POINT'] +5442-41168-0005-1390: hyp=['THE', 'CONFIDENCE', 'OF', 'THE', 'MONARCH', 'BUT', 'NO', 'COOKING', 'THE', 'ACCOUNTS', 'OF', 'THE', 'MARTIAN', 'IS', 'NOT', 'A', 'CASHIER', 'BUT', "THAT'S", 'NOT', 'THE', 'POINT'] +5442-41168-0006-1391: ref=['VOTES', 'PLEASE', 'BEASTLY'] +5442-41168-0006-1391: hyp=['VOTES', 'PLEASE', 'PEASY'] +5442-41168-0007-1392: ref=['THEY', 'EXPRESSED', 'THE', 'MOST', 'IMPLACABLE', 'HATRED'] +5442-41168-0007-1392: hyp=['THEY', 'EXPRESSED', 'THE', 'MOST', 'IMPLACABLE', 'HATRED'] +5442-41168-0008-1393: ref=['LEVIN', 'DID', 'NOT', 'IN', 'THE', 'LEAST', 'UNDERSTAND', 'WHAT', 'WAS', 'THE', 'MATTER', 'AND', 'HE', 'MARVELED', 'AT', 'THE', 'PASSION', 'WITH', 'WHICH', 'IT', 'WAS', 'DISPUTED', 'WHETHER', 'OR', 'NOT', 'THE', 'DECISION', 'ABOUT', 'FLEROV', 'SHOULD', 'BE', 'PUT', 'TO', 'THE', 'VOTE'] +5442-41168-0008-1393: hyp=['LEVIN', 'DID', 'NOT', 'IN', 'THE', 'LEAST', 'UNDERSTAND', 'WHAT', 'WAS', 'THE', 'MATTER', 'AND', 'HE', 'MARVELLED', 'AT', 'THE', 'PASSION', 'WITH', 'WHICH', 'IT', 'WAS', 'DISPUTED', 'WHETHER', 'OR', 'NOT', 'THE', 'DECISION', 'ABOUT', 'FLIROFF', 'SHOULD', 'BE', 'PUT', 'TO', 'THE', 'VOTE'] +5442-41168-0009-1394: ref=['HE', 'FORGOT', 'AS', 'SERGEY', 'IVANOVITCH', 'EXPLAINED', 'TO', 'HIM', 'AFTERWARDS', 'THIS', 'SYLLOGISM', 'THAT', 'IT', 'WAS', 'NECESSARY', 'FOR', 'THE', 'PUBLIC', 'GOOD', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'THAT', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'IT', 'WAS', 'NECESSARY', 'TO', 'HAVE', 'A', 'MAJORITY', 'OF', 'VOTES', 'THAT', 'TO', 'GET', 'A', 'MAJORITY', 'OF', 'VOTES', 'IT', 'WAS', 'NECESSARY', 'TO', 'SECURE', "FLEROV'S", 'RIGHT', 'TO', 'VOTE', 'THAT', 'TO', 'SECURE', 'THE', 'RECOGNITION', 'OF', "FLEROV'S", 'RIGHT', 'TO', 'VOTE', 'THEY', 'MUST', 'DECIDE', 'ON', 'THE', 'INTERPRETATION', 'TO', 'BE', 'PUT', 'ON', 'THE', 'ACT'] +5442-41168-0009-1394: hyp=['HE', 'FORGOT', 'AS', 'SO', 'GEVINOVITCH', 'EXPLAINED', 'TO', 'HIM', 'AFTERWARDS', 'THIS', 'SILLIGIOUS', 'EM', 'THAT', 'IT', 'WAS', 'NECESSARY', 'FOR', 'THE', 'PUBLIC', 'GOOD', 'TO', 'GET', 'RID', 'OF', 'THE', 'MARSHAL', 'OF', 'THE', 'PROVINCE', 'THAT', 'TO', 'GET', 'HER', 'TO', 'THE', 'MARSHAL', 'IT', 'WAS', 'NECESSARY', 'TO', 'HAVE', 'A', 'MAJORITY', 'OF', 'VOTES', 'THAT', 'TO', 'GET', 'A', 'MAJORITY', 'OF', 'VOTES', 'IT', 'WAS', 'NECESSARY', 'TO', 'SECURE', "FLIROV'S", 'RIGHT', 'TO', 'VOTE', 'THAT', 'TO', 'SECURED', 'THE', 'RECOGNITION', 'OF', "FLIROV'S", 'RIGHT', 'TO', 'VOTE', 'THEY', 'MUST', 'DECIDE', 'ON', 'THE', 'INTERPRETATION', 'TO', 'BE', 'PUT', 'ON', 'THE', 'ACT'] +5442-41168-0010-1395: ref=['BUT', 'LEVIN', 'FORGOT', 'ALL', 'THAT', 'AND', 'IT', 'WAS', 'PAINFUL', 'TO', 'HIM', 'TO', 'SEE', 'ALL', 'THESE', 'EXCELLENT', 'PERSONS', 'FOR', 'WHOM', 'HE', 'HAD', 'A', 'RESPECT', 'IN', 'SUCH', 'AN', 'UNPLEASANT', 'AND', 'VICIOUS', 'STATE', 'OF', 'EXCITEMENT'] +5442-41168-0010-1395: hyp=['BUT', 'LEVIN', 'FORGOT', 'ALL', 'THAT', 'AND', 'IT', 'WAS', 'PAINFUL', 'TO', 'HIM', 'TO', 'SEE', 'ALL', 'THESE', 'EXCELLENT', 'PERSONS', 'FOR', 'WHOM', 'HE', 'HAD', 'A', 'RESPECT', 'IN', 'SUCH', 'AN', 'UNPLEASANT', 'AND', 'VICIOUS', 'STATE', 'OF', 'EXCITEMENT'] +5442-41168-0011-1396: ref=['TO', 'ESCAPE', 'FROM', 'THIS', 'PAINFUL', 'FEELING', 'HE', 'WENT', 'AWAY', 'INTO', 'THE', 'OTHER', 'ROOM', 'WHERE', 'THERE', 'WAS', 'NOBODY', 'EXCEPT', 'THE', 'WAITERS', 'AT', 'THE', 'REFRESHMENT', 'BAR'] +5442-41168-0011-1396: hyp=['TO', 'US', 'GIVE', 'FROM', 'THIS', 'PAINFUL', 'FEELING', 'HE', 'WENT', 'AWAY', 'INTO', 'THE', 'OTHER', 'ROOM', 'WHERE', 'THERE', 'WAS', 'NOBODY', 'EXCEPT', 'THE', 'WAITERS', 'AT', 'THE', 'FRESHMENT', 'BAR'] +5442-41168-0012-1397: ref=['HE', 'PARTICULARLY', 'LIKED', 'THE', 'WAY', 'ONE', 'GRAY', 'WHISKERED', 'WAITER', 'WHO', 'SHOWED', 'HIS', 'SCORN', 'FOR', 'THE', 'OTHER', 'YOUNGER', 'ONES', 'AND', 'WAS', 'JEERED', 'AT', 'BY', 'THEM', 'WAS', 'TEACHING', 'THEM', 'HOW', 'TO', 'FOLD', 'UP', 'NAPKINS', 'PROPERLY'] +5442-41168-0012-1397: hyp=['HE', 'PARTICULARLY', 'LIKED', 'THE', 'WAY', 'ONE', 'GREY', 'WAS', 'GOOD', 'WAITER', 'WHO', 'SHOWED', 'US', 'GONE', 'FOR', 'THE', 'OTHER', 'YOUNGER', 'ONES', 'AND', 'WAS', 'JOURED', 'AT', 'BY', 'THEM', 'WAS', 'TEACHING', 'THEM', 'HOW', 'TO', 'FOLD', 'UP', 'NAPKINS', 'PROPERLY'] +5442-41168-0013-1398: ref=['LEVIN', 'ADVANCED', 'BUT', 'UTTERLY', 'FORGETTING', 'WHAT', 'HE', 'WAS', 'TO', 'DO', 'AND', 'MUCH', 'EMBARRASSED', 'HE', 'TURNED', 'TO', 'SERGEY', 'IVANOVITCH', 'WITH', 'THE', 'QUESTION', 'WHERE', 'AM', 'I', 'TO', 'PUT', 'IT'] +5442-41168-0013-1398: hyp=['LEVIN', 'ADVANCED', 'BUT', 'UTTERLY', 'FORGETTING', 'WHAT', 'HE', 'WAS', 'TO', 'DO', 'AND', 'MUCH', 'EMBARRASSED', 'HE', 'TURNED', 'TO', 'SERGEY', 'IVANOVITCH', 'WITH', 'THE', 'QUESTION', 'WHERE', 'AM', 'I', 'TO', 'PUT', 'IT'] +5442-41168-0014-1399: ref=['SERGEY', 'IVANOVITCH', 'FROWNED'] +5442-41168-0014-1399: hyp=['SO', 'AS', 'YE', 'IVANOVITCH', 'GROUND'] +5442-41168-0015-1400: ref=['THAT', 'IS', 'A', 'MATTER', 'FOR', 'EACH', "MAN'S", 'OWN', 'DECISION', 'HE', 'SAID', 'SEVERELY'] +5442-41168-0015-1400: hyp=['THAT', 'IS', 'A', 'MATTER', 'FOR', 'EACH', "MAN'S", 'OWN', 'DECISION', 'HE', 'SAID', 'SEVERELY'] +5442-41168-0016-1401: ref=['HAVING', 'PUT', 'IT', 'IN', 'HE', 'RECOLLECTED', 'THAT', 'HE', 'OUGHT', 'TO', 'HAVE', 'THRUST', 'HIS', 'LEFT', 'HAND', 'TOO', 'AND', 'SO', 'HE', 'THRUST', 'IT', 'IN', 'THOUGH', 'TOO', 'LATE', 'AND', 'STILL', 'MORE', 'OVERCOME', 'WITH', 'CONFUSION', 'HE', 'BEAT', 'A', 'HASTY', 'RETREAT', 'INTO', 'THE', 'BACKGROUND'] +5442-41168-0016-1401: hyp=['HAVING', 'PUT', 'IT', 'IN', 'HE', 'RECOLLECTED', 'THAT', 'HE', 'OUGHT', 'HAVE', 'THRUST', 'HIS', 'LEFT', 'HAND', 'TOO', 'AND', 'SO', 'HE', 'THRUST', 'IT', 'THOUGH', 'TOO', 'LATE', 'AND', 'STILL', 'MORE', 'OVERCOME', 'WITH', 'CONFUSION', 'HE', 'BEAT', 'A', 'HASTY', 'RETREAT', 'INTO', 'THE', 'BACKGROUND'] +5442-41168-0017-1402: ref=['A', 'HUNDRED', 'AND', 'TWENTY', 'SIX', 'FOR', 'ADMISSION', 'NINETY', 'EIGHT', 'AGAINST'] +5442-41168-0017-1402: hyp=['A', 'HUNDRED', 'AND', 'TWENTY', 'SIX', 'FOR', 'ADMISSION', 'NINETY', 'EIGHT', 'AGAINST'] +5442-41168-0018-1403: ref=['SANG', 'OUT', 'THE', 'VOICE', 'OF', 'THE', 'SECRETARY', 'WHO', 'COULD', 'NOT', 'PRONOUNCE', 'THE', 'LETTER', 'R'] +5442-41168-0018-1403: hyp=['SANG', 'ALL', 'THE', 'VOICE', 'OF', 'THE', 'SECRETARY', 'WHO', 'COULD', 'NOT', 'PRONOUNCE', 'A', 'LETTER', 'R'] +5442-41168-0019-1404: ref=['THEN', 'THERE', 'WAS', 'A', 'LAUGH', 'A', 'BUTTON', 'AND', 'TWO', 'NUTS', 'WERE', 'FOUND', 'IN', 'THE', 'BOX'] +5442-41168-0019-1404: hyp=['THEN', 'THERE', 'WAS', 'A', 'LAUGH', 'AT', 'BOTTOM', 'AND', 'TWO', 'KNOTS', 'WERE', 'FOUND', 'IN', 'THE', 'BOX'] +5442-41168-0020-1405: ref=['BUT', 'THE', 'OLD', 'PARTY', 'DID', 'NOT', 'CONSIDER', 'THEMSELVES', 'CONQUERED'] +5442-41168-0020-1405: hyp=['BUT', 'THE', 'OLD', 'PARTY', 'DID', 'NOT', 'CONSIDER', 'THEMSELVES', 'CONQUERED'] +5442-41168-0021-1406: ref=['IN', 'REPLY', 'SNETKOV', 'SPOKE', 'OF', 'THE', 'TRUST', 'THE', 'NOBLEMEN', 'OF', 'THE', 'PROVINCE', 'HAD', 'PLACED', 'IN', 'HIM', 'THE', 'AFFECTION', 'THEY', 'HAD', 'SHOWN', 'HIM', 'WHICH', 'HE', 'DID', 'NOT', 'DESERVE', 'AS', 'HIS', 'ONLY', 'MERIT', 'HAD', 'BEEN', 'HIS', 'ATTACHMENT', 'TO', 'THE', 'NOBILITY', 'TO', 'WHOM', 'HE', 'HAD', 'DEVOTED', 'TWELVE', 'YEARS', 'OF', 'SERVICE'] +5442-41168-0021-1406: hyp=['IN', 'THE', 'PLACE', 'NED', 'GOFF', 'SPOKE', 'OF', 'THE', 'TRUST', 'AND', 'NOBLEMEN', 'OF', 'THE', 'PROVINCE', 'HAD', 'PLACED', 'IN', 'HIM', 'THE', 'AFFECTANT', 'THEY', 'HAD', 'SHOWN', 'HIM', 'WHICH', 'HE', 'DID', 'NOT', 'DESERVE', 'AS', 'HIS', 'ONLY', 'MERIT', 'HAD', 'BEEN', 'HIS', 'ATTACHMENT', 'TO', 'THE', 'NOBILITY', 'TO', 'WHOM', 'HE', 'HAD', 'DEVOTED', 'TWELVE', 'YEARS', 'OF', 'SERVICE'] +5442-41168-0022-1407: ref=['THIS', 'EXPRESSION', 'IN', 'THE', "MARSHAL'S", 'FACE', 'WAS', 'PARTICULARLY', 'TOUCHING', 'TO', 'LEVIN', 'BECAUSE', 'ONLY', 'THE', 'DAY', 'BEFORE', 'HE', 'HAD', 'BEEN', 'AT', 'HIS', 'HOUSE', 'ABOUT', 'HIS', 'TRUSTEE', 'BUSINESS', 'AND', 'HAD', 'SEEN', 'HIM', 'IN', 'ALL', 'HIS', 'GRANDEUR', 'A', 'KIND', 'HEARTED', 'FATHERLY', 'MAN'] +5442-41168-0022-1407: hyp=['THIS', 'EXPRESSION', 'IN', 'THE', "MARSHAL'S", 'FACE', 'WAS', 'PARTICULARLY', 'TOUCHING', 'TO', 'LEVIN', 'BECAUSE', 'ONLY', 'THE', 'DAY', 'BEFORE', 'HE', 'HAD', 'BEEN', 'AT', 'HIS', 'HOUSE', 'ABOUT', 'HIS', 'TRUSTY', 'BUSINESS', 'AND', 'HAD', 'SEEN', 'HIM', 'IN', 'ALL', 'HIS', 'GRANDEUR', 'A', 'KIND', 'HEARTED', 'FATHERLY', 'MAN'] +5442-41168-0023-1408: ref=['IF', 'THERE', 'ARE', 'MEN', 'YOUNGER', 'AND', 'MORE', 'DESERVING', 'THAN', 'I', 'LET', 'THEM', 'SERVE'] +5442-41168-0023-1408: hyp=['IF', 'THERE', 'ARE', 'MEN', 'YOUNGER', 'AND', 'MORE', 'DESERVING', 'THAN', 'I', 'LET', 'THEMSELVE'] +5442-41168-0024-1409: ref=['AND', 'THE', 'MARSHAL', 'DISAPPEARED', 'THROUGH', 'A', 'SIDE', 'DOOR'] +5442-41168-0024-1409: hyp=['AND', 'THE', 'MARSHAL', 'DISAPPEARED', 'THROUGH', 'A', 'SIDE', 'DOOR'] +5442-41168-0025-1410: ref=['THEY', 'WERE', 'TO', 'PROCEED', 'IMMEDIATELY', 'TO', 'THE', 'ELECTION'] +5442-41168-0025-1410: hyp=['THERE', 'WERE', 'TO', 'PROCEED', 'IMMEDIATELY', 'TO', 'THE', 'ELECTION'] +5442-41168-0026-1411: ref=['TWO', 'NOBLE', 'GENTLEMEN', 'WHO', 'HAD', 'A', 'WEAKNESS', 'FOR', 'STRONG', 'DRINK', 'HAD', 'BEEN', 'MADE', 'DRUNK', 'BY', 'THE', 'PARTISANS', 'OF', 'SNETKOV', 'AND', 'A', 'THIRD', 'HAD', 'BEEN', 'ROBBED', 'OF', 'HIS', 'UNIFORM'] +5442-41168-0026-1411: hyp=['DO', 'NOBLE', 'GENTLEMEN', 'WHO', 'HAD', 'A', 'WEAKNESS', 'FOR', 'STRONG', 'DRINK', 'HAD', 'BEEN', 'MADE', 'DRUNK', 'BY', 'THE', 'PARTISANS', 'OF', 'SNATCOVE', 'AND', 'THE', 'THIRD', 'HAD', 'BEEN', 'ROBBED', 'OF', 'HIS', 'UNIFORM'] +5442-41168-0027-1412: ref=['ON', 'LEARNING', 'THIS', 'THE', 'NEW', 'PARTY', 'HAD', 'MADE', 'HASTE', 'DURING', 'THE', 'DISPUTE', 'ABOUT', 'FLEROV', 'TO', 'SEND', 'SOME', 'OF', 'THEIR', 'MEN', 'IN', 'A', 'SLEDGE', 'TO', 'CLOTHE', 'THE', 'STRIPPED', 'GENTLEMAN', 'AND', 'TO', 'BRING', 'ALONG', 'ONE', 'OF', 'THE', 'INTOXICATED', 'TO', 'THE', 'MEETING'] +5442-41168-0027-1412: hyp=['ON', 'LEARNING', 'THIS', 'THE', 'NEW', 'PARTY', 'HAD', 'MADE', 'HASTE', 'DURING', 'THE', 'DISPUTABLE', 'FLIROFF', 'TO', 'SEND', 'SOME', 'OF', 'THEIR', 'MEN', 'IN', 'A', 'SLEDGE', 'TO', 'CLOTHE', 'THE', 'STRIPPED', 'GENTLEMEN', 'AND', 'TO', 'BRING', 'ALONG', 'ONE', 'OF', 'THE', 'INTOXICATED', 'TO', 'THE', 'MEETING'] +5442-41169-0000-1413: ref=['LEVIN', 'DID', 'NOT', 'CARE', 'TO', 'EAT', 'AND', 'HE', 'WAS', 'NOT', 'SMOKING', 'HE', 'DID', 'NOT', 'WANT', 'TO', 'JOIN', 'HIS', 'OWN', 'FRIENDS', 'THAT', 'IS', 'SERGEY', 'IVANOVITCH', 'STEPAN', 'ARKADYEVITCH', 'SVIAZHSKY', 'AND', 'THE', 'REST', 'BECAUSE', 'VRONSKY', 'IN', 'HIS', "EQUERRY'S", 'UNIFORM', 'WAS', 'STANDING', 'WITH', 'THEM', 'IN', 'EAGER', 'CONVERSATION'] +5442-41169-0000-1413: hyp=['LEVIN', 'DID', 'NOT', 'CARE', 'TO', 'EAT', 'AND', 'HE', 'WAS', 'NOT', 'SMOKING', 'HE', 'DID', 'NOT', 'WANT', 'TO', 'JOIN', 'HIS', 'OWN', 'FRIENDS', 'THAT', 'IS', 'SOJI', 'IVANOVITCH', 'STEPAN', 'ARKADYEVITCH', 'SVIAZHSKY', 'AND', 'THE', 'REST', 'BECAUSE', 'VRONSKY', 'IN', 'AN', "EQUITY'S", 'UNIFORM', 'WAS', 'STANDING', 'WITH', 'THEM', 'IN', 'EAGER', 'CONVERSATION'] +5442-41169-0001-1414: ref=['HE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'SAT', 'DOWN', 'SCANNING', 'THE', 'GROUPS', 'AND', 'LISTENING', 'TO', 'WHAT', 'WAS', 'BEING', 'SAID', 'AROUND', 'HIM'] +5442-41169-0001-1414: hyp=['HE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'SAT', 'DOWN', 'SCANNING', 'THE', 'GROUPS', 'AND', 'LISTENING', 'TO', 'WHAT', 'WAS', 'BEING', 'SAID', 'AROUND', 'HIM'] +5442-41169-0002-1415: ref=["HE'S", 'SUCH', 'A', 'BLACKGUARD'] +5442-41169-0002-1415: hyp=['IS', 'SUCH', 'A', 'BLANKARD'] +5442-41169-0003-1416: ref=['I', 'HAVE', 'TOLD', 'HIM', 'SO', 'BUT', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'ONLY', 'THINK', 'OF', 'IT'] +5442-41169-0003-1416: hyp=['I', 'HAVE', 'TOLD', 'HIM', 'SO', 'BUT', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'ONLY', 'THINK', 'OF', 'IT'] +5442-41169-0004-1417: ref=['THESE', 'PERSONS', 'WERE', 'UNMISTAKABLY', 'SEEKING', 'A', 'PLACE', 'WHERE', 'THEY', 'COULD', 'TALK', 'WITHOUT', 'BEING', 'OVERHEARD'] +5442-41169-0004-1417: hyp=['THESE', 'PERSONS', 'WERE', 'UNMISTAKABLY', 'SEEKING', 'A', 'PLACE', 'WHERE', 'THEY', 'COULD', 'TALK', 'WITHOUT', 'BEING', 'OVERHEARD'] +5442-41169-0005-1418: ref=['SHALL', 'WE', 'GO', 'ON', 'YOUR', 'EXCELLENCY', 'FINE', 'CHAMPAGNE'] +5442-41169-0005-1418: hyp=['SHALL', 'WE', 'GO', 'ON', 'YOUR', 'EXCELLENCY', 'FINE', 'CHAMPAGNE'] +5442-41169-0006-1419: ref=['LAST', 'YEAR', 'AT', 'OUR', 'DISTRICT', 'MARSHAL', 'NIKOLAY', "IVANOVITCH'S"] +5442-41169-0006-1419: hyp=['LOST', 'YOUR', 'OTHER', 'DISTRICT', 'MARTIAL', 'NIKOLA', "IVANOVITCH'S"] +5442-41169-0007-1420: ref=['OH', 'STILL', 'JUST', 'THE', 'SAME', 'ALWAYS', 'AT', 'A', 'LOSS', 'THE', 'LANDOWNER', 'ANSWERED', 'WITH', 'A', 'RESIGNED', 'SMILE', 'BUT', 'WITH', 'AN', 'EXPRESSION', 'OF', 'SERENITY', 'AND', 'CONVICTION', 'THAT', 'SO', 'IT', 'MUST', 'BE'] +5442-41169-0007-1420: hyp=['OH', 'STILL', 'JUST', 'THE', 'SAME', 'ALWAYS', 'AT', 'A', 'LOSS', 'THE', 'LANDOWNER', 'ANSWERED', 'WITH', 'A', 'RESIGNED', 'SMILE', 'BUT', 'WITH', 'AN', 'EXPRESSION', 'OF', 'SERENITY', 'AND', 'CONVICTION', 'THAT', 'SO', 'IT', 'MUST', 'BE'] +5442-41169-0008-1421: ref=['WHY', 'WHAT', 'IS', 'THERE', 'TO', 'UNDERSTAND'] +5442-41169-0008-1421: hyp=['WHY', 'WHAT', 'IS', 'THAT', 'TO', 'UNDERSTAND'] +5442-41169-0009-1422: ref=["THERE'S", 'NO', 'MEANING', 'IN', 'IT', 'AT', 'ALL'] +5442-41169-0009-1422: hyp=['THERE', 'IS', 'NO', 'MEANING', 'IN', 'IT', 'AT', 'ALL'] +5442-41169-0010-1423: ref=['THEN', 'TOO', 'ONE', 'MUST', 'KEEP', 'UP', 'CONNECTIONS'] +5442-41169-0010-1423: hyp=['THEN', 'TOO', 'ONE', 'MUST', 'KEEP', 'UP', 'CONNECTIONS'] +5442-41169-0011-1424: ref=["IT'S", 'A', 'MORAL', 'OBLIGATION', 'OF', 'A', 'SORT'] +5442-41169-0011-1424: hyp=["IT'S", 'A', 'MORTAL', 'OBLIGATION', 'OF', 'A', 'SORT'] +5442-41169-0012-1425: ref=['AND', 'THEN', 'TO', 'TELL', 'THE', 'TRUTH', "THERE'S", "ONE'S", 'OWN', 'INTERESTS'] +5442-41169-0012-1425: hyp=['AND', 'THEN', 'TO', 'TELL', 'THE', 'TRUTH', "THERE'S", "ONE'S", 'OWN', 'INTEREST'] +5442-41169-0013-1426: ref=["THEY'RE", 'PROPRIETORS', 'OF', 'A', 'SORT', 'BUT', "WE'RE", 'THE', 'LANDOWNERS'] +5442-41169-0013-1426: hyp=['THEIR', 'PROPRIETORS', 'OF', 'A', 'SORT', 'BUT', 'WE', 'ARE', 'THE', 'LANDOWNERS'] +5442-41169-0014-1427: ref=['THAT', 'IT', 'MAY', 'BE', 'BUT', 'STILL', 'IT', 'OUGHT', 'TO', 'BE', 'TREATED', 'A', 'LITTLE', 'MORE', 'RESPECTFULLY'] +5442-41169-0014-1427: hyp=['THAT', 'IT', 'MAY', 'BE', 'BUT', 'STILL', 'IT', 'OUGHT', 'TO', 'BE', 'TREATED', 'A', 'LITTLE', 'MORE', 'RESPECTFULLY'] +5442-41169-0015-1428: ref=['IF', "WE'RE", 'LAYING', 'OUT', 'A', 'GARDEN', 'PLANNING', 'ONE', 'BEFORE', 'THE', 'HOUSE', 'YOU', 'KNOW', 'AND', 'THERE', "YOU'VE", 'A', 'TREE', "THAT'S", 'STOOD', 'FOR', 'CENTURIES', 'IN', 'THE', 'VERY', 'SPOT', 'OLD', 'AND', 'GNARLED', 'IT', 'MAY', 'BE', 'AND', 'YET', 'YOU', "DON'T", 'CUT', 'DOWN', 'THE', 'OLD', 'FELLOW', 'TO', 'MAKE', 'ROOM', 'FOR', 'THE', 'FLOWERBEDS', 'BUT', 'LAY', 'OUT', 'YOUR', 'BEDS', 'SO', 'AS', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'THE', 'TREE'] +5442-41169-0015-1428: hyp=['IF', 'WE', 'ARE', 'LAYING', 'OUT', 'A', 'GARDEN', 'PLANNING', 'ONE', 'BEFORE', 'THE', 'HOUSE', 'YOU', 'KNOW', 'AND', 'THERE', 'YOU', 'HAVE', 'A', 'TREE', 'THAT', 'STOOD', 'IN', 'CENTURIES', 'IN', 'THE', 'VERY', 'SPOT', 'OLD', 'AND', 'GNOLD', 'IT', 'MAY', 'BE', 'AND', 'YET', 'YOU', "DON'T", 'CUT', 'DOWN', 'THE', 'OLD', 'FELLOW', 'TO', 'MAKE', 'ROOM', 'FOR', 'THE', 'FLOWER', 'BEDS', 'BUT', 'LAY', 'OUT', 'YOUR', 'BEDS', 'SO', 'AS', 'TO', 'TAKE', 'ADVANTAGE', 'OF', 'THE', 'TREE'] +5442-41169-0016-1429: ref=['WELL', 'AND', 'HOW', 'IS', 'YOUR', 'LAND', 'DOING'] +5442-41169-0016-1429: hyp=['WELL', 'AND', 'HOW', 'IS', 'YOUR', 'LAND', 'DOING'] +5442-41169-0017-1430: ref=['BUT', "ONE'S", 'WORK', 'IS', 'THROWN', 'IN', 'FOR', 'NOTHING'] +5442-41169-0017-1430: hyp=['BUT', "ONE'S", 'WORK', 'IS', 'THROWN', 'IN', 'FOR', 'NOTHING'] +5442-41169-0018-1431: ref=['OH', 'WELL', 'ONE', 'DOES', 'IT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +5442-41169-0018-1431: hyp=['OH', 'WELL', 'ONE', 'DOES', 'IT', 'WHAT', 'WOULD', 'YOU', 'HAVE'] +5442-41169-0019-1432: ref=['AND', "WHAT'S", 'MORE', 'THE', 'LANDOWNER', 'WENT', 'ON', 'LEANING', 'HIS', 'ELBOWS', 'ON', 'THE', 'WINDOW', 'AND', 'CHATTING', 'ON', 'MY', 'SON', 'I', 'MUST', 'TELL', 'YOU', 'HAS', 'NO', 'TASTE', 'FOR', 'IT'] +5442-41169-0019-1432: hyp=['AND', 'ONCE', 'MORE', 'THE', 'LANDOWNER', 'WENT', 'ON', 'LEANING', 'HIS', 'ELBOWS', 'ON', 'THE', 'WINDOW', 'AND', 'CHATTING', 'ON', 'MY', 'SON', 'I', 'MUST', 'TELL', 'YOU', 'HAS', 'NO', 'TASTE', 'FOR', 'IT'] +5442-41169-0020-1433: ref=['SO', "THERE'LL", 'BE', 'NO', 'ONE', 'TO', 'KEEP', 'IT', 'UP', 'AND', 'YET', 'ONE', 'DOES', 'IT'] +5442-41169-0020-1433: hyp=['SO', 'THERE', 'WILL', 'BE', 'NO', 'ONE', 'TO', 'KEEP', 'IT', 'UP', 'AND', 'YET', 'ONE', 'DOES', 'IT'] +5442-41169-0021-1434: ref=['WE', 'WALKED', 'ABOUT', 'THE', 'FIELDS', 'AND', 'THE', 'GARDEN', 'NO', 'SAID', 'HE', 'STEPAN', 'VASSILIEVITCH', "EVERYTHING'S", 'WELL', 'LOOKED', 'AFTER', 'BUT', 'YOUR', "GARDEN'S", 'NEGLECTED'] +5442-41169-0021-1434: hyp=['WE', 'WALKED', 'ABOUT', 'THE', 'FIELDS', 'ON', 'THE', 'GARDEN', 'NO', 'SAID', 'HE', 'STEP', 'ON', 'MISS', 'LEVITCH', "EVERYTHING'S", 'WELL', 'LOOKED', 'AFTER', 'BUT', 'YOUR', 'GARDENS', 'NEGLECTED'] +5442-41169-0022-1435: ref=['TO', 'MY', 'THINKING', "I'D", 'CUT', 'DOWN', 'THAT', 'LIME', 'TREE'] +5442-41169-0022-1435: hyp=['TO', 'MY', 'THINKING', "I'VE", 'CUT', 'DOWN', 'THE', 'LIMETERY'] +5442-41169-0023-1436: ref=['HERE', "YOU'VE", 'THOUSANDS', 'OF', 'LIMES', 'AND', 'EACH', 'WOULD', 'MAKE', 'TWO', 'GOOD', 'BUNDLES', 'OF', 'BARK'] +5442-41169-0023-1436: hyp=['HERE', 'YOUR', 'THOUSANDS', 'OF', 'LIMES', 'AND', 'EACH', 'WOULD', 'MAKE', 'TOO', 'GOOD', 'BUNDLES', 'OF', 'BALK'] +5442-41169-0024-1437: ref=["YOU'RE", 'MARRIED', "I'VE", 'HEARD', 'SAID', 'THE', 'LANDOWNER'] +5442-41169-0024-1437: hyp=["YOU'RE", 'MARRIED', 'I', 'HEARD', 'SAID', 'THE', 'LANDOWNER'] +5442-41169-0025-1438: ref=['YES', "IT'S", 'RATHER', 'STRANGE', 'HE', 'WENT', 'ON'] +5442-41169-0025-1438: hyp=['YES', "IT'S", 'ALL', 'THE', 'STRANGE', 'HE', 'WENT', 'ON'] +5442-41169-0026-1439: ref=['THE', 'LANDOWNER', 'CHUCKLED', 'UNDER', 'HIS', 'WHITE', 'MUSTACHES'] +5442-41169-0026-1439: hyp=['THE', 'LANDOWNER', 'CHUCKLED', 'UNDER', 'HIS', 'WHITE', 'MOUSTACHES'] +5442-41169-0027-1440: ref=['WHY', "DON'T", 'WE', 'CUT', 'DOWN', 'OUR', 'PARKS', 'FOR', 'TIMBER'] +5442-41169-0027-1440: hyp=['WHY', "DON'T", 'WE', 'GOT', 'DOWN', 'OUR', 'BOX', 'FOR', 'TIMBOO'] +5442-41169-0028-1441: ref=['SAID', 'LEVIN', 'RETURNING', 'TO', 'A', 'THOUGHT', 'THAT', 'HAD', 'STRUCK', 'HIM'] +5442-41169-0028-1441: hyp=['SAID', 'LEVIN', 'RETURNING', 'TO', 'A', 'THOUGHT', 'THAT', 'HAD', 'STRUCK', 'HIM'] +5442-41169-0029-1442: ref=["THERE'S", 'A', 'CLASS', 'INSTINCT', 'TOO', 'OF', 'WHAT', 'ONE', 'OUGHT', 'AND', "OUGHTN'T", 'TO', 'DO'] +5442-41169-0029-1442: hyp=["THERE'S", 'A', 'CLASS', 'INSTINCT', 'TOO', 'OF', 'WHAT', 'ONE', 'OUGHT', 'AND', 'OUGHT', 'NOT', 'TO', 'DO'] +5442-41169-0030-1443: ref=["THERE'S", 'THE', 'PEASANTS', 'TOO', 'I', 'WONDER', 'AT', 'THEM', 'SOMETIMES', 'ANY', 'GOOD', 'PEASANT', 'TRIES', 'TO', 'TAKE', 'ALL', 'THE', 'LAND', 'HE', 'CAN'] +5442-41169-0030-1443: hyp=["THERE'S", 'THE', 'PEASANTS', 'TOO', 'I', 'WONDER', 'AT', 'THEM', 'SOMETIMES', 'ANY', 'GOOD', 'PEASANT', 'TRIES', 'TO', 'TAKE', 'ALL', 'THE', 'LAND', 'HE', 'CAN'] +5442-41169-0031-1444: ref=['WITHOUT', 'A', 'RETURN', 'TOO', 'AT', 'A', 'SIMPLE', 'LOSS'] +5442-41169-0031-1444: hyp=['WITHOUT', 'A', 'RETURN', 'TO', 'ADD', 'A', 'SIMPLE', 'LAWS'] +5484-24317-0000-571: ref=['WHEN', 'HE', 'CAME', 'FROM', 'THE', 'BATH', 'PROCLUS', 'VISITED', 'HIM', 'AGAIN'] +5484-24317-0000-571: hyp=['WHEN', 'HE', 'CAME', 'FROM', 'THE', 'BATH', 'PROCKLESS', 'VISITED', 'HIM', 'AGAIN'] +5484-24317-0001-572: ref=['BUT', 'HERMON', 'WAS', 'NOT', 'IN', 'THE', 'MOOD', 'TO', 'SHARE', 'A', 'JOYOUS', 'REVEL', 'AND', 'HE', 'FRANKLY', 'SAID', 'SO', 'ALTHOUGH', 'IMMEDIATELY', 'AFTER', 'HIS', 'RETURN', 'HE', 'HAD', 'ACCEPTED', 'THE', 'INVITATION', 'TO', 'THE', 'FESTIVAL', 'WHICH', 'THE', 'WHOLE', 'FELLOWSHIP', 'OF', 'ARTISTS', 'WOULD', 'GIVE', 'THE', 'FOLLOWING', 'DAY', 'IN', 'HONOUR', 'OF', 'THE', 'SEVENTIETH', 'BIRTHDAY', 'OF', 'THE', 'OLD', 'SCULPTOR', 'EUPHRANOR'] +5484-24317-0001-572: hyp=['BUT', 'HERMAN', 'WAS', 'NOT', 'IN', 'THE', 'MOOD', 'TO', 'SHARE', 'A', 'JOYOUS', 'REVEL', 'AND', 'HE', 'FRANKLY', 'SAID', 'SO', 'ALTHOUGH', 'IMMEDIATELY', 'AFTER', 'HIS', 'RETURN', 'HE', 'HAD', 'ACCEPTED', 'THE', 'INVITATION', 'TO', 'THE', 'FESTIVAL', 'WHICH', 'THE', 'WHOLE', 'FELLOWSHIP', 'OF', 'ARTISTS', 'WOULD', 'GIVE', 'THE', 'FOLLOWING', 'DAY', 'AND', 'HONOUR', 'OF', 'THE', 'SEVENTEENTH', 'BIRTHDAY', 'OF', 'THE', 'OLD', 'SCULPTOR', 'EUPHRANER'] +5484-24317-0002-573: ref=['SHE', 'WOULD', 'APPEAR', 'HERSELF', 'AT', 'DESSERT', 'AND', 'THE', 'BANQUET', 'MUST', 'THEREFORE', 'BEGIN', 'AT', 'AN', 'UNUSUALLY', 'EARLY', 'HOUR'] +5484-24317-0002-573: hyp=['SHE', 'WOULD', 'APPEAR', 'HERSELF', 'A', 'DESSERT', 'AND', 'THE', 'BANQUET', 'MUST', 'THEREFORE', 'BEGIN', 'AT', 'AN', 'UNUSUALLY', 'EARLY', 'HOUR'] +5484-24317-0003-574: ref=['SO', 'THE', 'ARTIST', 'FOUND', 'HIMSELF', 'OBLIGED', 'TO', 'RELINQUISH', 'HIS', 'OPPOSITION'] +5484-24317-0003-574: hyp=['SO', 'THE', 'ARTIST', 'FOUND', 'HIMSELF', 'OBLIGED', 'TO', 'RELINQUISH', 'HIS', 'OPPOSITION'] +5484-24317-0004-575: ref=['THE', 'BANQUET', 'WAS', 'TO', 'BEGIN', 'IN', 'A', 'FEW', 'HOURS', 'YET', 'HE', 'COULD', 'NOT', 'LET', 'THE', 'DAY', 'PASS', 'WITHOUT', 'SEEING', 'DAPHNE', 'AND', 'TELLING', 'HER', 'THE', 'WORDS', 'OF', 'THE', 'ORACLE'] +5484-24317-0004-575: hyp=['THE', 'BANQUET', 'WAS', 'TO', 'BEGIN', 'IN', 'A', 'FEW', 'HOURS', 'YET', 'HE', 'COULD', 'NOT', 'LET', 'THE', 'DAY', 'PASS', 'WITHOUT', 'SEEING', 'DAPHNE', 'AND', 'TELLING', 'HER', 'THE', 'WORDS', 'OF', 'THE', 'ORACLE'] +5484-24317-0005-576: ref=['HE', 'LONGED', 'WITH', 'ARDENT', 'YEARNING', 'FOR', 'THE', 'SOUND', 'OF', 'HER', 'VOICE', 'AND', 'STILL', 'MORE', 'TO', 'UNBURDEN', 'HIS', 'SORELY', 'TROUBLED', 'SOUL', 'TO', 'HER'] +5484-24317-0005-576: hyp=['HE', 'LONGED', 'WITH', 'ARDENT', 'YEARNING', 'FOR', 'THE', 'SOUND', 'OF', 'HER', 'VOICE', 'AND', 'STILL', 'MORE', 'TO', 'UNBURDEN', 'HIS', 'SORELY', 'TROUBLED', 'SOUL', 'TO', 'HER'] +5484-24317-0006-577: ref=['SINCE', 'HIS', 'RETURN', 'FROM', 'THE', 'ORACLE', 'THE', 'FEAR', 'THAT', 'THE', 'RESCUED', 'DEMETER', 'MIGHT', 'YET', 'BE', 'THE', 'WORK', 'OF', 'MYRTILUS', 'HAD', 'AGAIN', 'MASTERED', 'HIM'] +5484-24317-0006-577: hyp=['SINCE', 'HIS', 'RETURN', 'FROM', 'THE', 'ORACLE', 'THE', 'FEAR', 'THAT', 'THE', 'RESCUE', 'DEMETER', 'MIGHT', 'YET', 'BE', 'THE', 'WORK', 'OF', 'MERTULIST', 'HAD', 'AGAIN', 'MASTERED', 'HIM'] +5484-24317-0007-578: ref=['THE', 'APPROVAL', 'AS', 'WELL', 'AS', 'THE', 'DOUBTS', 'WHICH', 'IT', 'AROUSED', 'IN', 'OTHERS', 'STRENGTHENED', 'HIS', 'OPINION', 'ALTHOUGH', 'EVEN', 'NOW', 'HE', 'COULD', 'NOT', 'SUCCEED', 'IN', 'BRINGING', 'IT', 'INTO', 'HARMONY', 'WITH', 'THE', 'FACTS'] +5484-24317-0007-578: hyp=['THE', 'APPROVAL', 'AS', 'WELL', 'AS', 'A', 'DOUBT', 'WHICH', 'HAD', 'ARISED', 'IN', 'OTHERS', 'STRENGTHENED', 'HIS', 'OPINION', 'ALTHOUGH', 'EVEN', 'NOW', 'HE', 'COULD', 'NOT', 'SUCCEED', 'IN', 'BRINGING', 'IT', 'INTO', 'HARMONY', 'WITH', 'THE', 'FACTS'] +5484-24317-0008-579: ref=['THEN', 'HE', 'WENT', 'DIRECTLY', 'TO', 'THE', 'NEIGHBOURING', 'PALACE', 'THE', 'QUEEN', 'MIGHT', 'HAVE', 'APPEARED', 'ALREADY', 'AND', 'IT', 'WOULD', 'NOT', 'DO', 'TO', 'KEEP', 'HER', 'WAITING'] +5484-24317-0008-579: hyp=['THEN', 'HE', 'WENT', 'DIRECTLY', 'TO', 'THE', 'NEIGHBORING', 'PALACE', 'THE', 'QUEEN', 'MIGHT', 'HAVE', 'APPEARED', 'ALREADY', 'AND', 'IT', 'WOULD', 'NOT', 'DO', 'TO', 'KEEP', 'HER', 'WAITING'] +5484-24317-0009-580: ref=['HITHERTO', 'THE', 'MERCHANT', 'HAD', 'BEEN', 'INDUCED', 'IT', 'IS', 'TRUE', 'TO', 'ADVANCE', 'LARGE', 'SUMS', 'OF', 'MONEY', 'TO', 'THE', 'QUEEN', 'BUT', 'THE', 'LOYAL', 'DEVOTION', 'WHICH', 'HE', 'SHOWED', 'TO', 'HER', 'ROYAL', 'HUSBAND', 'HAD', 'RENDERED', 'IT', 'IMPOSSIBLE', 'TO', 'GIVE', 'HIM', 'EVEN', 'A', 'HINT', 'OF', 'THE', 'CONSPIRACY'] +5484-24317-0009-580: hyp=['HITHERTO', 'THE', 'MERCHANT', 'HAD', 'BEEN', 'INDUCED', 'IT', 'IS', 'TRUE', 'TO', 'ADVANCE', 'LARGE', 'SUMS', 'OF', 'MONEY', 'TO', 'THE', 'QUEEN', 'BUT', 'THE', 'LOYAL', 'DEVOTION', 'WHICH', 'HE', 'SHOWED', 'TO', 'HER', 'ROYAL', 'HUSBAND', 'HAD', 'RENDERED', 'AN', 'IMPOSSIBLE', 'TO', 'GIVE', 'HIM', 'EVEN', 'A', 'HINT', 'OF', 'THE', 'CONSPIRACY'] +5484-24317-0010-581: ref=['WHEN', 'HERMON', 'ENTERED', 'THE', 'RESIDENCE', 'OF', 'THE', 'GRAMMATEUS', 'IN', 'THE', 'PALACE', 'THE', 'GUESTS', 'HAD', 'ALREADY', 'ASSEMBLED'] +5484-24317-0010-581: hyp=['WHEN', 'HERMANN', 'ENTERED', 'THE', 'RESIDENCE', 'OF', 'THE', 'GRAMMATIUS', 'IN', 'THE', 'PALACE', 'THEY', 'GUESTS', 'HAD', 'ALREADY', 'ASSEMBLED'] +5484-24317-0011-582: ref=['THE', 'PLACE', 'BY', "HERMON'S", 'SIDE', 'WHICH', 'ALTHEA', 'HAD', 'CHOSEN', 'FOR', 'HERSELF', 'WOULD', 'THEN', 'BE', 'GIVEN', 'UP', 'TO', 'ARSINOE'] +5484-24317-0011-582: hyp=['THEY', 'PLACED', 'BY', "HARMONT'S", 'SIDE', 'WHICH', 'ALTHIE', 'HAD', 'CHOSEN', 'FOR', 'HERSELF', 'WOULD', 'THEN', 'BE', 'GIVEN', 'UP', 'TO', 'ARSENO'] +5484-24317-0012-583: ref=['TRUE', 'AN', 'INTERESTING', 'CONVERSATION', 'STILL', 'HAD', 'POWER', 'TO', 'CHARM', 'HIM', 'BUT', 'OFTEN', 'DURING', 'ITS', 'CONTINUANCE', 'THE', 'FULL', 'CONSCIOUSNESS', 'OF', 'HIS', 'MISFORTUNE', 'FORCED', 'ITSELF', 'UPON', 'HIS', 'MIND', 'FOR', 'THE', 'MAJORITY', 'OF', 'THE', 'SUBJECTS', 'DISCUSSED', 'BY', 'THE', 'ARTISTS', 'CAME', 'TO', 'THEM', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'SIGHT', 'AND', 'REFERRED', 'TO', 'NEW', 'CREATIONS', 'OF', 'ARCHITECTURE', 'SCULPTURE', 'AND', 'PAINTING', 'FROM', 'WHOSE', 'ENJOYMENT', 'HIS', 'BLINDNESS', 'DEBARRED', 'HIM'] +5484-24317-0012-583: hyp=['TRUE', 'AN', 'INTERESTING', 'CONVERSATION', 'STILL', 'HAD', 'POWER', 'TO', 'CHARM', 'HIM', 'BUT', 'OFTEN', 'DURING', 'ITS', 'CONTINUANCE', 'THE', 'FULL', 'CONSCIOUSNESS', 'OF', 'HIS', 'MISFORTUNE', 'FORCED', 'ITSELF', 'UPON', 'HIS', 'MIND', 'FOR', 'THE', 'MAJORITY', 'OF', 'THE', 'SUBJECTS', 'DISCUSSED', 'BY', 'THE', 'ARTISTS', 'CAME', 'TO', 'THEM', 'THROUGH', 'THE', 'MEDIUM', 'OF', 'SIGHT', 'AND', 'REFERRED', 'TO', 'NEW', 'CREATIONS', 'OF', 'ARCHITECTURE', 'SCULPTURE', 'AND', 'PAINTING', 'FROM', 'WHOSE', 'ENJOYMENT', 'HIS', 'BLINDNESS', 'DEBARRED', 'HIM'] +5484-24317-0013-584: ref=['A', 'STRANGER', 'OUT', 'OF', 'HIS', 'OWN', 'SPHERE', 'HE', 'FELT', 'CHILLED', 'AMONG', 'THESE', 'CLOSELY', 'UNITED', 'MEN', 'AND', 'WOMEN', 'TO', 'WHOM', 'NO', 'TIE', 'BOUND', 'HIM', 'SAVE', 'THE', 'PRESENCE', 'OF', 'THE', 'SAME', 'HOST'] +5484-24317-0013-584: hyp=['A', 'STRANGER', 'OUT', 'OF', 'HIS', 'OWN', 'SPHERE', 'HE', 'FELL', 'CHILLED', 'AMONG', 'THESE', 'CLOSELY', 'UNITED', 'MEN', 'AND', 'WOMEN', 'TO', 'WHOM', 'NO', 'TIE', 'BOUND', 'HIM', 'SAVE', 'THE', 'PRESENCE', 'OF', 'THE', 'SAME', 'HOST'] +5484-24317-0014-585: ref=['CRATES', 'HAD', 'REALLY', 'BEEN', 'INVITED', 'IN', 'ORDER', 'TO', 'WIN', 'HIM', 'OVER', 'TO', 'THE', "QUEEN'S", 'CAUSE', 'BUT', 'CHARMING', 'FAIR', 'HAIRED', 'NICO', 'HAD', 'BEEN', 'COMMISSIONED', 'BY', 'THE', 'CONSPIRATORS', 'TO', 'PERSUADE', 'HIM', 'TO', 'SING', "ARSINOE'S", 'PRAISES', 'AMONG', 'HIS', 'PROFESSIONAL', 'ASSOCIATES'] +5484-24317-0014-585: hyp=['CREATES', 'HAD', 'REALLY', 'BEEN', 'INVITED', 'IN', 'ORDER', 'TO', 'WIN', 'HIM', 'OVER', 'TO', 'THE', "QUEEN'S", 'CAUSE', 'BUT', 'CHARMING', 'FAIR', 'HAIRED', 'NACO', 'HAD', 'BEEN', 'COMMISSIONED', 'BY', 'THE', 'CONSPIRATORS', 'TO', 'PERSUADE', 'HIM', 'TO', 'SING', "ARSENO'S", 'PRAISES', 'AMONG', 'HIS', 'PROFESSIONAL', 'ASSOCIATES'] +5484-24317-0015-586: ref=['HIS', 'SON', 'HAD', 'BEEN', 'THIS', 'ROYAL', "DAME'S", 'FIRST', 'HUSBAND', 'AND', 'SHE', 'HAD', 'DESERTED', 'HIM', 'TO', 'MARRY', 'LYSIMACHUS', 'THE', 'AGED', 'KING', 'OF', 'THRACE'] +5484-24317-0015-586: hyp=['HIS', 'SON', 'HAD', 'BEEN', 'THE', 'ROYAL', "JAMES'S", 'FIRST', 'HUSBAND', 'AND', 'SHE', 'HAD', 'DESERTED', 'HIM', 'TO', 'MARRY', 'LISIMACUS', 'THE', 'AGED', 'KING', 'OF', 'THRACE'] +5484-24317-0016-587: ref=['THE', "KING'S", 'SISTER', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'CRIED', 'HERMON', 'INCREDULOUSLY'] +5484-24317-0016-587: hyp=['THE', "KING'S", 'SISTER', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'CRIED', 'HARMON', 'INCREDULOUSLY'] +5484-24317-0017-588: ref=['WE', 'WOMEN', 'ARE', 'ONLY', 'AS', 'OLD', 'AS', 'WE', 'LOOK', 'AND', 'THE', 'LEECHES', 'AND', 'TIRING', 'WOMEN', 'OF', 'THIS', 'BEAUTY', 'OF', 'FORTY', 'PRACTISE', 'ARTS', 'WHICH', 'GIVE', 'HER', 'THE', 'APPEARANCE', 'OF', 'TWENTY', 'FIVE', 'YET', 'PERHAPS', 'THE', 'KING', 'VALUES', 'HER', 'INTELLECT', 'MORE', 'THAN', 'HER', 'PERSON', 'AND', 'THE', 'WISDOM', 'OF', 'A', 'HUNDRED', 'SERPENTS', 'IS', 'CERTAINLY', 'UNITED', 'IN', 'THIS', "WOMAN'S", 'HEAD'] +5484-24317-0017-588: hyp=['WE', 'WOMEN', 'ARE', 'EARLIEST', 'OLD', 'AS', 'WE', 'LOOK', 'AND', 'THE', 'LEECH', 'HAS', 'ENTIRE', 'WOMAN', 'OF', 'THIS', 'BEAUTY', 'OF', 'FORTY', 'PRACTISE', 'ARTS', 'WHICH', 'GIVE', 'HER', 'THE', 'APPEARANCE', 'OF', 'TWENTY', 'FIVE', 'YET', 'PERHAPS', 'THE', 'KING', 'VALUES', 'HER', 'INTELLECT', 'MORE', 'THAN', 'HER', 'PERSON', 'AND', 'THE', 'WISDOM', 'OF', 'A', 'HUNDRED', 'SERPENTS', 'IS', 'CERTAINLY', 'UNITED', 'IN', 'THIS', "WOMAN'S", 'HEAD'] +5484-24317-0018-589: ref=['THE', 'THREE', 'MOST', 'TRUSTWORTHY', 'ONES', 'ARE', 'HERE', 'AMYNTAS', 'THE', 'LEECH', 'CHRYSIPPUS', 'AND', 'THE', 'ADMIRABLE', 'PROCLUS'] +5484-24317-0018-589: hyp=['THE', 'THREE', 'MOST', 'TRUSTWORTHY', 'ONES', 'I', 'HEAR', 'I', 'MEANTUS', 'THE', 'LIEGE', 'CHRYSIPPUS', 'IN', 'THE', 'ADMIRABLE', 'PROCLISS'] +5484-24317-0019-590: ref=['LET', 'US', 'HOPE', 'THAT', 'YOU', 'WILL', 'MAKE', 'THIS', 'THREE', 'LEAVED', 'CLOVER', 'THE', 'LUCK', 'PROMISING', 'FOUR', 'LEAVED', 'ONE'] +5484-24317-0019-590: hyp=['LET', 'US', 'HOPE', 'THAT', 'YOU', 'WILL', 'MAKE', 'THIS', 'THREE', 'LEAVED', 'CLOVER', 'THE', 'LUCK', 'PROMISING', 'FALL', 'LEAVE', 'TO', 'ONE'] +5484-24317-0020-591: ref=['YOUR', 'UNCLE', 'TOO', 'HAS', 'OFTEN', 'WITH', 'PRAISEWORTHY', 'GENEROSITY', 'HELPED', 'ARSINOE', 'IN', 'MANY', 'AN', 'EMBARRASSMENT'] +5484-24317-0020-591: hyp=['YOUR', 'UNCLE', 'TOO', 'HAS', 'OFTEN', 'WITH', 'PRAISE', 'WORTHY', 'GENEROSITY', 'HELPED', 'ALSO', 'IN', 'MANY', 'AN', 'EMBARRASSMENT'] +5484-24317-0021-592: ref=['HOW', 'LONG', 'HE', 'KEPT', 'YOU', 'WAITING', 'FOR', 'THE', 'FIRST', 'WORD', 'CONCERNING', 'A', 'WORK', 'WHICH', 'JUSTLY', 'TRANSPORTED', 'THE', 'WHOLE', 'CITY', 'WITH', 'DELIGHT'] +5484-24317-0021-592: hyp=['HOW', 'LONG', 'HE', 'KEPT', 'YOU', 'WAITING', 'FROM', 'THE', 'FIRST', 'WORD', 'CONCERNING', 'A', 'WORK', 'WHICH', 'JUSTLY', 'TRANSPORTED', 'THE', 'WHOLE', 'CITY', 'WITH', 'DELIGHT'] +5484-24317-0022-593: ref=['WHEN', 'HE', 'DID', 'FINALLY', 'SUMMON', 'YOU', 'HE', 'SAID', 'THINGS', 'WHICH', 'MUST', 'HAVE', 'WOUNDED', 'YOU'] +5484-24317-0022-593: hyp=['WHEN', 'HE', 'DID', 'FINALLY', 'SUMMON', 'YOU', 'HE', 'SAID', 'THINGS', 'WHICH', 'MUST', 'HAVE', 'WOUNDED', 'YOU'] +5484-24317-0023-594: ref=['THAT', 'IS', 'GOING', 'TOO', 'FAR', 'REPLIED', 'HERMON'] +5484-24317-0023-594: hyp=['THAT', 'IS', 'GOING', 'TOO', 'FAR', 'REPLIED', 'HERMANN'] +5484-24317-0024-595: ref=['HE', 'WINKED', 'AT', 'HER', 'AND', 'MADE', 'A', 'SIGNIFICANT', 'GESTURE', 'AS', 'HE', 'SPOKE', 'AND', 'THEN', 'INFORMED', 'THE', 'BLIND', 'ARTIST', 'HOW', 'GRACIOUSLY', 'ARSINOE', 'HAD', 'REMEMBERED', 'HIM', 'WHEN', 'SHE', 'HEARD', 'OF', 'THE', 'REMEDY', 'BY', 'WHOSE', 'AID', 'MANY', 'A', 'WONDERFUL', 'CURE', 'OF', 'BLIND', 'EYES', 'HAD', 'BEEN', 'MADE', 'IN', 'RHODES'] +5484-24317-0024-595: hyp=['HE', 'WAITED', 'HER', 'AND', 'MADE', 'A', 'SIGNIFICANT', 'GESTURE', 'AS', 'HE', 'SPOKE', 'AND', 'THEN', 'INFORMED', 'THE', 'BLIND', 'ARTIST', 'HOW', 'GRACIOUSLY', 'ARSENO', 'HAD', 'REMEMBERED', 'HIM', 'WHEN', 'SHE', 'HEARD', 'OF', 'THE', 'REMEDY', 'BY', 'WHOSE', 'AID', 'MANY', 'A', 'WONDERFUL', 'CURE', 'OF', 'BLIND', 'EYE', 'HAD', 'BEEN', 'MADE', 'IN', 'ROADS'] +5484-24317-0025-596: ref=['THE', 'ROYAL', 'LADY', 'HAD', 'INQUIRED', 'ABOUT', 'HIM', 'AND', 'HIS', 'SUFFERINGS', 'WITH', 'ALMOST', 'SISTERLY', 'INTEREST', 'AND', 'ALTHEA', 'EAGERLY', 'CONFIRMED', 'THE', 'STATEMENT'] +5484-24317-0025-596: hyp=['THE', 'ROYAL', 'LADY', 'HAD', 'INQUIRED', 'ABOUT', 'HIM', 'AND', 'HIS', 'SUFFERINGS', 'WITH', 'ALMOST', 'SISTERLY', 'INTEREST', 'AND', 'ALTHIA', 'EAGERLY', 'CONFIRMED', 'THE', 'STATEMENT'] +5484-24317-0026-597: ref=['HERMON', 'LISTENED', 'TO', 'THE', 'PAIR', 'IN', 'SILENCE'] +5484-24317-0026-597: hyp=['HERMA', 'LISTENED', 'TO', 'THE', 'PARENT', 'SILENCE'] +5484-24317-0027-598: ref=['THE', 'RHODIAN', 'WAS', 'JUST', 'BEGINNING', 'TO', 'PRAISE', 'ARSINOE', 'ALSO', 'AS', 'A', 'SPECIAL', 'FRIEND', 'AND', 'CONNOISSEUR', 'OF', 'THE', "SCULPTOR'S", 'ART', 'WHEN', 'CRATES', "HERMON'S", 'FELLOW', 'STUDENT', 'ASKED', 'THE', 'BLIND', 'ARTIST', 'IN', 'BEHALF', 'OF', 'HIS', 'BEAUTIFUL', 'COMPANION', 'WHY', 'HIS', 'DEMETER', 'WAS', 'PLACED', 'UPON', 'A', 'PEDESTAL', 'WHICH', 'TO', 'OTHERS', 'AS', 'WELL', 'AS', 'HIMSELF', 'SEEMED', 'TOO', 'HIGH', 'FOR', 'THE', 'SIZE', 'OF', 'THE', 'STATUE'] +5484-24317-0027-598: hyp=['THE', 'RADIAN', 'WAS', 'JUST', 'BEGINNING', 'TO', 'PRAISE', 'ARSENO', 'ALSO', 'AS', 'A', 'SPECIAL', 'FRIEND', 'AND', 'CONNOISSEUR', 'OF', 'THE', 'SCULPTURES', 'ART', 'WHEN', 'CRATES', "HERMANN'S", 'FELLOW', 'STUDENT', 'ASKED', 'THE', 'BLIND', 'ARTIST', 'IN', 'BEHALF', 'OF', 'HIS', 'BEAUTIFUL', 'COMPANION', 'WHY', 'HIS', 'DEMETER', 'WAS', 'PLACED', 'UPON', 'A', 'PEDESTAL', 'WITCH', 'TO', 'OTHERS', 'AS', 'WELL', 'AS', 'HIMSELF', 'SEEMED', 'TOO', 'HIGH', 'FOR', 'THE', 'SIZE', 'OF', 'THE', 'STATUE'] +5484-24317-0028-599: ref=['YET', 'WHAT', 'MATTERED', 'IT', 'EVEN', 'IF', 'THESE', 'MISERABLE', 'PEOPLE', 'CONSIDERED', 'THEMSELVES', 'DECEIVED', 'AND', 'POINTED', 'THE', 'FINGER', 'OF', 'SCORN', 'AT', 'HIM'] +5484-24317-0028-599: hyp=['YET', 'WHAT', 'MATTERED', 'IT', 'EVEN', 'IF', 'THESE', 'MISERABLE', 'PEOPLE', 'CONSIDERED', 'THEMSELVES', 'DECEIVED', 'AND', 'POINTED', 'THE', 'FINGER', 'OF', 'SCORN', 'AT', 'HIM'] +5484-24317-0029-600: ref=['A', 'WOMAN', 'WHO', 'YEARNS', 'FOR', 'THE', 'REGARD', 'OF', 'ALL', 'MEN', 'AND', 'MAKES', 'LOVE', 'A', 'TOY', 'EASILY', 'LESSENS', 'THE', 'DEMANDS', 'SHE', 'IMPOSES', 'UPON', 'INDIVIDUALS'] +5484-24317-0029-600: hyp=['A', 'WOMAN', 'WHO', 'YEARNS', 'FOR', 'THE', 'REGARD', 'OF', 'ALL', 'MEN', 'AND', 'MAKES', 'LOVE', 'A', 'TOY', 'EASILY', 'LESSENS', 'THE', 'DEMANDS', 'SHE', 'IMPOSES', 'UPON', 'INDIVIDUALS'] +5484-24317-0030-601: ref=['ONLY', 'EVEN', 'THOUGH', 'LOVE', 'HAS', 'WHOLLY', 'DISAPPEARED', 'SHE', 'STILL', 'CLAIMS', 'CONSIDERATION', 'AND', 'ALTHEA', 'DID', 'NOT', 'WISH', 'TO', 'LOSE', "HERMON'S", 'REGARD'] +5484-24317-0030-601: hyp=['ONLY', 'EVEN', 'THOUGH', 'LOVE', 'HAS', 'WHOLLY', 'DISAPPEARED', 'SHE', 'STILL', 'CLAIMS', 'CONSIDERATION', 'AND', 'ALTHEA', 'DID', 'NOT', 'WISH', 'TO', 'LOSE', "HARMON'S", 'REGARD'] +5484-24317-0031-602: ref=['HOW', 'INDIFFERENT', 'YOU', 'LOOK', 'BUT', 'I', 'TELL', 'YOU', 'HER', 'DEEP', 'BLUE', 'EYES', 'FLASHED', 'AS', 'SHE', 'SPOKE', 'THAT', 'SO', 'LONG', 'AS', 'YOU', 'WERE', 'STILL', 'A', 'GENUINE', 'CREATING', 'ARTIST', 'THE', 'CASE', 'WAS', 'DIFFERENT'] +5484-24317-0031-602: hyp=['HOW', 'INDIFFERENT', 'YOU', 'LOOK', 'BUT', 'I', 'TELL', 'YOU', 'HER', 'DEEP', 'BLUE', 'EYES', 'FLASHED', 'AS', 'SHE', 'SPOKE', 'THAT', 'SO', 'LONG', 'AS', 'YOU', 'WAS', 'STILL', 'A', 'GENUINE', 'CREATING', 'ARTIST', 'THE', 'CASE', 'WAS', 'DIFFERENT'] +5484-24317-0032-603: ref=['THOUGH', 'SO', 'LOUD', 'A', 'DENIAL', 'IS', 'WRITTEN', 'ON', 'YOUR', 'FACE', 'I', 'PERSIST', 'IN', 'MY', 'CONVICTION', 'AND', 'THAT', 'NO', 'IDLE', 'DELUSION', 'ENSNARES', 'ME', 'I', 'CAN', 'PROVE'] +5484-24317-0032-603: hyp=['THOUGH', 'SO', 'LOUD', 'A', 'DENIAL', 'IS', 'WRITTEN', 'ON', 'YOUR', 'FACE', 'I', 'PERSIST', 'IN', 'MY', 'CONVICTION', 'AND', 'THAT', 'NO', 'IDLE', 'DELUSION', 'AND', 'SNAS', 'ME', 'I', 'CAN', 'PROVE'] +5484-24317-0033-604: ref=['IT', 'WAS', 'NAY', 'IT', 'COULD', 'HAVE', 'BEEN', 'NOTHING', 'ELSE', 'THAT', 'VERY', 'SPIDER'] +5484-24317-0033-604: hyp=['IT', 'WAS', 'NAY', 'IT', 'COULD', 'HAVE', 'BEEN', 'NOTHING', 'ELSE', 'THAT', 'VERY', 'SPIDER'] +5484-24318-0000-605: ref=['NOT', 'A', 'SOUND', 'IF', 'YOU', 'VALUE', 'YOUR', 'LIVES'] +5484-24318-0000-605: hyp=['NOT', 'A', 'SOUND', 'IF', 'YOU', 'VALUE', 'YOUR', 'LIVES'] +5484-24318-0001-606: ref=['TO', 'OFFER', 'RESISTANCE', 'WOULD', 'HAVE', 'BEEN', 'MADNESS', 'FOR', 'EVEN', 'HERMON', 'PERCEIVED', 'BY', 'THE', 'LOUD', 'CLANKING', 'OF', 'WEAPONS', 'AROUND', 'THEM', 'THE', 'GREATLY', 'SUPERIOR', 'POWER', 'OF', 'THE', 'ENEMY', 'AND', 'THEY', 'WERE', 'ACTING', 'BY', 'THE', 'ORDERS', 'OF', 'THE', 'KING', 'TO', 'THE', 'PRISON', 'NEAR', 'THE', 'PLACE', 'OF', 'EXECUTION'] +5484-24318-0001-606: hyp=['TO', 'OFFER', 'RESISTANCE', 'WOULD', 'HAVE', 'BEEN', 'MADNESS', 'FOR', 'EVEN', 'HERMON', 'PERCEIVED', 'BY', 'THE', 'LOUD', 'CLANKING', 'OF', 'WEAPONS', 'AROUND', 'THEM', 'THEY', 'GREATLY', 'SUPERIOR', 'POWER', 'OF', 'THE', 'ENEMY', 'AND', 'THEY', 'WERE', 'ACTING', 'BY', 'THE', 'ORDERS', 'OF', 'THE', 'KING', 'TO', 'THE', 'PRISON', 'NEAR', 'THE', 'PLACE', 'OF', 'EXECUTION'] +5484-24318-0002-607: ref=['WAS', 'HE', 'TO', 'BE', 'LED', 'TO', 'THE', "EXECUTIONER'S", 'BLOCK'] +5484-24318-0002-607: hyp=['WAS', 'HE', 'TO', 'BE', 'LED', 'TO', 'THE', "EXECUTIONER'S", 'BLOCK'] +5484-24318-0003-608: ref=['WHAT', 'PLEASURE', 'HAD', 'LIFE', 'TO', 'OFFER', 'HIM', 'THE', 'BLIND', 'MAN', 'WHO', 'WAS', 'ALREADY', 'DEAD', 'TO', 'HIS', 'ART'] +5484-24318-0003-608: hyp=['WHAT', 'PLEASURE', 'HAD', 'LIFE', 'TO', 'OFFER', 'HIM', 'THE', 'BLIND', 'MAN', 'WHO', 'WAS', 'ALREADY', 'DEAD', 'TO', 'HIS', 'ART'] +5484-24318-0004-609: ref=['OUGHT', 'HE', 'NOT', 'TO', 'GREET', 'THIS', 'SUDDEN', 'END', 'AS', 'A', 'BOON', 'FROM', 'THE', 'IMMORTALS'] +5484-24318-0004-609: hyp=['OUGHT', 'HE', 'NOT', 'TO', 'GREET', 'HIS', 'SUDDEN', 'END', 'AS', 'THE', 'BOOM', 'FROM', 'THE', 'IMMORTALS'] +5484-24318-0005-610: ref=['DID', 'IT', 'NOT', 'SPARE', 'HIM', 'A', 'HUMILIATION', 'AS', 'GREAT', 'AND', 'PAINFUL', 'AS', 'COULD', 'BE', 'IMAGINED'] +5484-24318-0005-610: hyp=['DID', 'IT', 'NOT', 'SPARE', 'HIM', 'A', 'HUMILIATION', 'AS', 'GREAT', 'AND', 'PAINFUL', 'AS', 'COULD', 'BE', 'IMAGINED'] +5484-24318-0006-611: ref=['WHATEVER', 'MIGHT', 'AWAIT', 'HIM', 'HE', 'DESIRED', 'NO', 'BETTER', 'FATE'] +5484-24318-0006-611: hyp=['WHATEVER', 'MIGHT', 'AWAIT', 'HIM', 'HE', 'DESIRED', 'NO', 'BETTER', 'FATE'] +5484-24318-0007-612: ref=['IF', 'HE', 'HAD', 'PASSED', 'INTO', 'ANNIHILATION', 'HE', 'HERMON', 'WISHED', 'TO', 'FOLLOW', 'HIM', 'THITHER', 'AND', 'ANNIHILATION', 'CERTAINLY', 'MEANT', 'REDEMPTION', 'FROM', 'PAIN', 'AND', 'MISERY'] +5484-24318-0007-612: hyp=['IF', 'HE', 'HAD', 'PASSED', 'INTO', 'ANNIHILATION', 'HE', 'HERMAN', 'WISHED', 'TO', 'FOLLOW', 'HIM', 'THITHER', 'AND', 'ANNIHILATION', 'CERTAINLY', 'MEANT', 'REDEMPTION', 'FROM', 'PAIN', 'AND', 'MISERY'] +5484-24318-0008-613: ref=['BUT', 'IF', 'HE', 'WERE', 'DESTINED', 'TO', 'MEET', 'HIS', 'MYRTILUS', 'AND', 'HIS', 'MOTHER', 'IN', 'THE', 'WORLD', 'BEYOND', 'THE', 'GRAVE', 'WHAT', 'HAD', 'HE', 'NOT', 'TO', 'TELL', 'THEM', 'HOW', 'SURE', 'HE', 'WAS', 'OF', 'FINDING', 'A', 'JOYFUL', 'RECEPTION', 'THERE', 'FROM', 'BOTH'] +5484-24318-0008-613: hyp=['BUT', 'IF', 'HE', 'WERE', 'DESTINED', 'TO', 'MEET', 'HIS', 'BERTULAS', 'AND', 'HIS', 'MOTHER', 'IN', 'THE', 'WORLD', 'BEYOND', 'THE', 'GRAVE', 'WHAT', 'HAD', 'HE', 'NOT', 'TO', 'TELL', 'THEM', 'HOW', 'SURE', 'HE', 'WAS', 'A', 'FINDING', 'A', 'JOYFUL', 'RECEPTION', 'THERE', 'FROM', 'BOTH'] +5484-24318-0009-614: ref=['THE', 'POWER', 'WHICH', 'DELIVERED', 'HIM', 'OVER', 'TO', 'DEATH', 'JUST', 'AT', 'THAT', 'MOMENT', 'WAS', 'NOT', 'NEMESIS', 'NO', 'IT', 'WAS', 'A', 'KINDLY', 'DEITY'] +5484-24318-0009-614: hyp=['THE', 'POWER', 'WHICH', 'DELIVERED', 'HIM', 'OVER', 'TO', 'DEATH', 'JUST', 'AT', 'THAT', 'MOMENT', 'WAS', 'NOT', 'NEMESIS', 'NO', 'IT', 'WAS', 'A', 'KINDLY', 'DEITY'] +5484-24318-0010-615: ref=['YET', 'IT', 'WAS', 'NO', 'ILLUSION', 'THAT', 'DECEIVED', 'HIM'] +5484-24318-0010-615: hyp=['YET', 'IT', 'WAS', 'NO', 'ILLUSION', 'THAT', 'DECEIVED', 'HIM'] +5484-24318-0011-616: ref=['AGAIN', 'HE', 'HEARD', 'THE', 'BELOVED', 'VOICE', 'AND', 'THIS', 'TIME', 'IT', 'ADDRESSED', 'NOT', 'ONLY', 'HIM', 'BUT', 'WITH', 'THE', 'UTMOST', 'HASTE', 'THE', 'COMMANDER', 'OF', 'THE', 'SOLDIERS'] +5484-24318-0011-616: hyp=['AGAIN', 'HE', 'HEARD', 'THE', 'BELOVED', 'VOICE', 'AND', 'THIS', 'TIME', 'IT', 'ADDRESSED', 'NOT', 'ONLY', 'HIM', 'BUT', 'WITH', 'THE', 'UTMOST', 'HASTE', 'THE', 'COMMANDER', 'OF', 'THE', 'SOLDIERS'] +5484-24318-0012-617: ref=['SOMETIMES', 'WITH', 'TOUCHING', 'ENTREATY', 'SOMETIMES', 'WITH', 'IMPERIOUS', 'COMMAND', 'SHE', 'PROTESTED', 'AFTER', 'GIVING', 'HIM', 'HER', 'NAME', 'THAT', 'THIS', 'MATTER', 'COULD', 'BE', 'NOTHING', 'BUT', 'AN', 'UNFORTUNATE', 'MISTAKE'] +5484-24318-0012-617: hyp=['SOMETIMES', 'WITH', 'THE', 'TOUCHING', 'ENTREATY', 'SOMETIMES', 'WITH', 'IMPERIOUS', 'COMMAND', 'SHE', 'PROTESTED', 'AFTER', 'GIVING', 'HIM', 'HER', 'NAME', 'THAT', 'THIS', 'MATTER', 'COULD', 'BE', 'NOTHING', 'BUT', 'AN', 'UNFORTUNATE', 'MISTAKE'] +5484-24318-0013-618: ref=['LASTLY', 'WITH', 'EARNEST', 'WARMTH', 'SHE', 'BESOUGHT', 'HIM', 'BEFORE', 'TAKING', 'THE', 'PRISONERS', 'AWAY', 'TO', 'PERMIT', 'HER', 'TO', 'SPEAK', 'TO', 'THE', 'COMMANDING', 'GENERAL', 'PHILIPPUS', 'HER', "FATHER'S", 'GUEST', 'WHO', 'SHE', 'WAS', 'CERTAIN', 'WAS', 'IN', 'THE', 'PALACE'] +5484-24318-0013-618: hyp=['LASTLY', 'WITH', 'EARNEST', 'WARMTH', 'SHE', 'BESOUGHT', 'HIM', 'BEFORE', 'TAKING', 'THE', 'PRISONERS', 'AWAY', 'TO', 'PERMIT', 'HER', 'TO', 'SPEAK', 'TO', 'THE', 'COMMANDING', 'GENERAL', 'PHILIPPUS', 'HER', "FATHER'S", 'GUEST', 'WHO', 'SHE', 'WAS', 'CERTAIN', 'WAS', 'IN', 'THE', 'PALACE'] +5484-24318-0014-619: ref=['CRIED', 'HERMON', 'IN', 'GRATEFUL', 'AGITATION', 'BUT', 'SHE', 'WOULD', 'NOT', 'LISTEN', 'TO', 'HIM', 'AND', 'FOLLOWED', 'THE', 'SOLDIER', 'WHOM', 'THE', 'CAPTAIN', 'DETAILED', 'TO', 'GUIDE', 'HER', 'INTO', 'THE', 'PALACE'] +5484-24318-0014-619: hyp=['CRIED', 'HERMAND', 'IN', 'GRATEFUL', 'AGITATION', 'BUT', 'SHE', 'WOULD', 'NOT', 'LISTEN', 'TO', 'HIM', 'AND', 'FOLLOW', 'THE', 'SOLDIER', 'WHOM', 'THE', 'CAPTAIN', 'DETAILED', 'TO', 'GUIDE', 'HER', 'INTO', 'THE', 'PALACE'] +5484-24318-0015-620: ref=['TO', 'MORROW', 'YOU', 'SHALL', 'CONFESS', 'TO', 'ME', 'WHO', 'TREACHEROUSLY', 'DIRECTED', 'YOU', 'TO', 'THIS', 'DANGEROUS', 'PATH'] +5484-24318-0015-620: hyp=['TO', 'MORROW', 'YOU', 'SHALL', 'CONFESS', 'TO', 'ME', 'WHO', 'TREACHEROUSLY', 'DIRECTED', 'YOU', 'TO', 'THIS', 'DANGEROUS', 'PATH'] +5484-24318-0016-621: ref=['DAPHNE', 'AGAIN', 'PLEADED', 'FOR', 'THE', 'LIBERATION', 'OF', 'THE', 'PRISONERS', 'BUT', 'PHILIPPUS', 'SILENCED', 'HER', 'WITH', 'THE', 'GRAVE', 'EXCLAMATION', 'THE', 'ORDER', 'OF', 'THE', 'KING'] +5484-24318-0016-621: hyp=['DAPHNE', 'AGAIN', 'PLEADED', 'FOR', 'THE', 'LIBERATION', 'OF', 'THE', 'PRISONERS', 'BUT', "PHILIP'S", 'SILENCE', 'CHARRED', 'WITH', 'A', 'GRAVE', 'EXCLAMATION', 'THE', 'ORDER', 'OF', 'THE', 'KING'] +5484-24318-0017-622: ref=['AS', 'SOON', 'AS', 'THE', 'CAPTIVE', 'ARTIST', 'WAS', 'ALONE', 'WITH', 'THE', 'WOMAN', 'HE', 'LOVED', 'HE', 'CLASPED', 'HER', 'HAND', 'POURING', 'FORTH', 'INCOHERENT', 'WORDS', 'OF', 'THE', 'MOST', 'ARDENT', 'GRATITUDE', 'AND', 'WHEN', 'HE', 'FELT', 'HER', 'WARMLY', 'RETURN', 'THE', 'PRESSURE', 'HE', 'COULD', 'NOT', 'RESTRAIN', 'THE', 'DESIRE', 'TO', 'CLASP', 'HER', 'TO', 'HIS', 'HEART'] +5484-24318-0017-622: hyp=['AS', 'SOON', 'AS', 'THE', 'CAPTIVE', 'ARTIST', 'WAS', 'ALONE', 'WITH', 'A', 'WOMAN', 'HE', 'LOVED', 'HE', 'CLASPED', 'HER', 'HAND', 'POURING', 'FORTH', 'INCOHERENT', 'WORDS', 'OF', 'THE', 'MOST', 'ARDENT', 'GRATITUDE', 'AND', 'WHEN', 'HE', 'FELT', 'HER', 'WARMLY', 'RETURN', 'THE', 'PRESSURE', 'HE', 'COULD', 'NOT', 'RESTRAIN', 'THE', 'DESIRE', 'TO', 'CLASP', 'HER', 'TO', 'HIS', 'HEART'] +5484-24318-0018-623: ref=['IN', 'SPITE', 'OF', 'HIS', 'DEEP', 'MENTAL', 'DISTRESS', 'HE', 'COULD', 'HAVE', 'SHOUTED', 'ALOUD', 'IN', 'HIS', 'DELIGHT', 'AND', 'GRATITUDE'] +5484-24318-0018-623: hyp=['IN', 'SPITE', 'OF', 'HIS', 'DEEP', 'MENTAL', 'DISTRESS', 'HE', 'COULD', 'HAVE', 'SHOUTED', 'ALOUD', 'IN', 'HIS', 'DELIGHT', 'AND', 'GRATITUDE'] +5484-24318-0019-624: ref=['HE', 'MIGHT', 'NOW', 'HAVE', 'BEEN', 'PERMITTED', 'TO', 'BIND', 'FOREVER', 'TO', 'HIS', 'LIFE', 'THE', 'WOMAN', 'WHO', 'HAD', 'JUST', 'RESCUED', 'HIM', 'FROM', 'THE', 'GREATEST', 'DANGER', 'BUT', 'THE', 'CONFESSION', 'HE', 'MUST', 'MAKE', 'TO', 'HIS', 'FELLOW', 'ARTISTS', 'IN', 'THE', 'PALAESTRA', 'THE', 'FOLLOWING', 'MORNING', 'STILL', 'SEALED', 'HIS', 'LIPS', 'YET', 'IN', 'THIS', 'HOUR', 'HE', 'FELT', 'THAT', 'HE', 'WAS', 'UNITED', 'TO', 'HER', 'AND', 'OUGHT', 'NOT', 'TO', 'CONCEAL', 'WHAT', 'AWAITED', 'HIM', 'SO', 'OBEYING', 'A', 'STRONG', 'IMPULSE', 'HE', 'EXCLAIMED', 'YOU', 'KNOW', 'THAT', 'I', 'LOVE', 'YOU'] +5484-24318-0019-624: hyp=['HE', 'MIGHT', 'NOW', 'HAVE', 'BEEN', 'PERMITTED', 'TO', 'FIND', 'FOREVER', 'TO', 'HIS', 'LIFE', 'THE', 'WOMAN', 'WHO', 'HAD', 'JUST', 'RESCUED', 'HIM', 'FROM', 'THE', 'GREATEST', 'DANGER', 'BUT', 'THE', 'CONFESSION', 'HE', 'MUST', 'MAKE', 'TO', 'HIS', 'FELLOW', 'ARTISTS', 'IN', 'THE', 'PELUSTER', 'THE', 'FOLLOWING', 'MORNING', 'STILL', 'SEALED', 'HIS', 'LIPS', 'YET', 'IN', 'THIS', 'HOUR', 'HE', 'FELT', 'THAT', 'HE', 'WAS', 'UNITED', 'TO', 'HER', 'AND', 'OUGHT', 'NOT', 'TO', 'CONCEAL', 'WHAT', 'AWAITED', 'HIM', 'SO', 'OBEYING', 'A', 'STRONG', 'IMPULSE', 'HE', 'EXCLAIMED', 'YOU', 'KNOW', 'THAT', 'I', 'LOVE', 'YOU'] +5484-24318-0020-625: ref=['I', 'LOVE', 'YOU', 'AND', 'HAVE', 'LOVED', 'YOU', 'ALWAYS'] +5484-24318-0020-625: hyp=['I', 'LOVE', 'YOU', 'AND', 'HAVE', 'LOVED', 'YOU', 'ALWAYS'] +5484-24318-0021-626: ref=['DAPHNE', 'EXCLAIMED', 'TENDERLY', 'WHAT', 'MORE', 'IS', 'NEEDED'] +5484-24318-0021-626: hyp=['TAPNEY', 'EXCLAIMED', 'TENDERLY', 'WHAT', 'MORE', 'IS', 'NEEDED'] +5484-24318-0022-627: ref=['BUT', 'HERMON', 'WITH', 'DROOPING', 'HEAD', 'MURMURED', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'WHAT', 'I', 'AM', 'NOW'] +5484-24318-0022-627: hyp=['BUT', 'HERMAN', 'WITH', 'DROOPING', 'HEAD', 'MURMURED', 'TO', 'MORROW', 'I', 'SHALL', 'NO', 'LONGER', 'BE', 'WHAT', 'I', 'AM', 'NOW'] +5484-24318-0023-628: ref=['THEN', 'DAPHNE', 'RAISED', 'HER', 'FACE', 'TO', 'HIS', 'ASKING', 'SO', 'THE', 'DEMETER', 'IS', 'THE', 'WORK', 'OF', 'MYRTILUS'] +5484-24318-0023-628: hyp=['THEN', 'JAPLIN', 'RAISED', 'HER', 'FACE', 'TO', 'HIS', 'ASKING', 'SO', 'THE', 'DEMETER', 'IS', 'THE', 'WORK', 'OF', 'MERCILESS'] +5484-24318-0024-629: ref=['WHAT', 'A', 'TERRIBLE', 'ORDEAL', 'AGAIN', 'AWAITS', 'YOU'] +5484-24318-0024-629: hyp=['WHAT', 'A', 'TERRIBLE', 'ORDEAL', 'AGAIN', 'AWAITS', 'YOU'] +5484-24318-0025-630: ref=['AND', 'I', 'FOOL', 'BLINDED', 'ALSO', 'IN', 'MIND', 'COULD', 'BE', 'VEXED', 'WITH', 'YOU', 'FOR', 'IT'] +5484-24318-0025-630: hyp=['AND', 'I', 'FOOL', 'BLINDED', 'ALL', 'SO', 'IN', 'MIND', 'COULD', 'BE', 'VEXED', 'WITH', 'YOU', 'FOR', 'IT'] +5484-24318-0026-631: ref=['BRING', 'THIS', 'BEFORE', 'YOUR', 'MIND', 'AND', 'EVERYTHING', 'ELSE', 'THAT', 'YOU', 'MUST', 'ACCEPT', 'WITH', 'IT', 'IF', 'YOU', 'CONSENT', 'WHEN', 'THE', 'TIME', 'ARRIVES', 'TO', 'BECOME', 'MINE', 'CONCEAL', 'AND', 'PALLIATE', 'NOTHING'] +5484-24318-0026-631: hyp=['BRING', 'THIS', 'BEFORE', 'YOUR', 'MIND', 'AND', 'EVERYTHING', 'ELSE', 'THAT', 'YOU', 'MUST', 'ACCEPT', 'WITH', 'IT', 'IF', 'YOU', 'CONSENT', 'WITH', 'THE', 'TIME', 'ARRIVES', 'TO', 'BECOME', 'MINE', 'CONCEAL', 'IMPALIATE', 'NOTHING'] +5484-24318-0027-632: ref=['SO', 'ARCHIAS', 'INTENDED', 'TO', 'LEAVE', 'THE', 'CITY', 'ON', 'ONE', 'OF', 'HIS', 'OWN', 'SHIPS', 'THAT', 'VERY', 'DAY'] +5484-24318-0027-632: hyp=['SOROCHIS', 'INTENDED', 'TO', 'LEAVE', 'THE', 'CITY', 'ON', 'ONE', 'OF', 'HIS', 'OWN', 'SHIPS', 'THAT', 'VERY', 'DAY'] +5484-24318-0028-633: ref=['HE', 'HIMSELF', 'ON', 'THE', 'WAY', 'TO', 'EXPOSE', 'HIMSELF', 'TO', 'THE', 'MALICE', 'AND', 'MOCKERY', 'OF', 'THE', 'WHOLE', 'CITY'] +5484-24318-0028-633: hyp=['SHE', 'HIMSELF', 'ON', 'THE', 'WAY', 'TO', 'EXPOSE', 'HIMSELF', 'TO', 'THE', 'MALICE', 'AND', 'MOCKERY', 'OF', 'THE', 'WHOLE', 'CITY'] +5484-24318-0029-634: ref=['HIS', 'HEART', 'CONTRACTED', 'PAINFULLY', 'AND', 'HIS', 'SOLICITUDE', 'ABOUT', 'HIS', "UNCLE'S", 'FATE', 'INCREASED', 'WHEN', 'PHILIPPUS', 'INFORMED', 'HIM', 'THAT', 'THE', 'CONSPIRATORS', 'HAD', 'BEEN', 'ARRESTED', 'AT', 'THE', 'BANQUET', 'AND', 'HEADED', 'BY', 'AMYNTAS', 'THE', 'RHODIAN', 'CHRYSIPPUS', 'AND', 'PROCLUS', 'HAD', 'PERISHED', 'BY', 'THE', "EXECUTIONER'S", 'SWORD', 'AT', 'SUNRISE'] +5484-24318-0029-634: hyp=['HIS', 'HEART', 'CONTRACTED', 'PAINFULLY', 'AND', 'HIS', 'SOLICITUDE', 'ABOUT', 'HIS', "UNCLE'S", 'FATE', 'INCREASED', 'WHEN', 'PHILIPUS', 'INFORMED', 'HIM', 'THAT', 'THE', 'CONSPIRATORS', 'HAD', 'BEEN', 'ARRESTED', 'AT', 'THE', 'BANQUET', 'AND', 'HEADED', 'BY', 'A', 'MEANTES', 'THE', 'HERODIAN', 'CHRYSIPPUS', 'AND', 'PROCLIS', 'HAD', 'PERISHED', 'BY', 'THE', "EXECUTIONER'S", 'SWORD', 'AT', 'SUNRISE'] +5484-24318-0030-635: ref=['BESIDES', 'HE', 'KNEW', 'THAT', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'WOULD', 'NOT', 'PART', 'FROM', 'HIM', 'WITHOUT', 'GRANTING', 'HIM', 'ONE', 'LAST', 'WORD'] +5484-24318-0030-635: hyp=['BESIDES', 'HE', 'KNEW', 'THAT', 'THE', 'OBJECT', 'OF', 'HIS', 'LOVE', 'WOULD', 'NOT', 'PART', 'FROM', 'HIM', 'WITHOUT', 'GRANTING', 'HIM', 'ONE', 'LAST', 'WORD'] +5484-24318-0031-636: ref=['ON', 'THE', 'WAY', 'HIS', 'HEART', 'THROBBED', 'ALMOST', 'TO', 'BURSTING'] +5484-24318-0031-636: hyp=['ON', 'THE', 'WAY', 'HIS', 'HARD', 'THROPPED', 'ALMOST', 'TO', 'BURSTING'] +5484-24318-0032-637: ref=['EVEN', "DAPHNE'S", 'IMAGE', 'AND', 'WHAT', 'THREATENED', 'HER', 'FATHER', 'AND', 'HER', 'WITH', 'HIM', 'RECEDED', 'FAR', 'INTO', 'THE', 'BACKGROUND'] +5484-24318-0032-637: hyp=['EVEN', 'AFTER', 'THESE', 'IMAGE', 'AND', 'WHAT', 'THREATENED', 'HER', 'FATHER', 'AND', 'HER', 'WITH', 'HIM', 'WAS', 'SEATED', 'FAR', 'INTO', 'THE', 'BACKGROUND'] +5484-24318-0033-638: ref=['HE', 'WAS', 'APPEARING', 'BEFORE', 'HIS', 'COMPANIONS', 'ONLY', 'TO', 'GIVE', 'TRUTH', 'ITS', 'JUST', 'DUE'] +5484-24318-0033-638: hyp=['HE', 'WAS', 'APPEARING', 'BEFORE', 'HIS', 'COMPANIONS', 'ONLY', 'TO', 'GIVE', 'TRUTH', 'ITS', 'JUST', 'DUE'] +5484-24318-0034-639: ref=['THE', 'EGYPTIAN', 'OBEYED', 'AND', 'HIS', 'MASTER', 'CROSSED', 'THE', 'WIDE', 'SPACE', 'STREWN', 'WITH', 'SAND', 'AND', 'APPROACHED', 'THE', 'STAGE', 'WHICH', 'HAD', 'BEEN', 'ERECTED', 'FOR', 'THE', 'FESTAL', 'PERFORMANCES', 'EVEN', 'HAD', 'HIS', 'EYES', 'RETAINED', 'THE', 'POWER', 'OF', 'SIGHT', 'HIS', 'BLOOD', 'WAS', 'COURSING', 'SO', 'WILDLY', 'THROUGH', 'HIS', 'VEINS', 'THAT', 'HE', 'MIGHT', 'PERHAPS', 'HAVE', 'BEEN', 'UNABLE', 'TO', 'DISTINGUISH', 'THE', 'STATUES', 'AROUND', 'HIM', 'AND', 'THE', 'THOUSANDS', 'OF', 'SPECTATORS', 'WHO', 'CROWDED', 'CLOSELY', 'TOGETHER', 'RICHLY', 'GARLANDED', 'THEIR', 'CHEEKS', 'GLOWING', 'WITH', 'ENTHUSIASM', 'SURROUNDED', 'THE', 'ARENA', 'HERMON'] +5484-24318-0034-639: hyp=['THE', 'EGYPTIAN', 'OBEY', 'AND', 'HIS', 'MASTER', 'CROSSED', 'THE', 'WIDE', 'SPACE', 'STREWN', 'WITH', 'SAND', 'AND', 'APPROACHED', 'THE', 'STAGE', 'WHICH', 'HAD', 'BEEN', 'ERECTED', 'FOR', 'THE', 'FEAST', 'HELL', 'PERFORMANCES', 'EVEN', 'HAD', 'HIS', 'EYES', 'RETAINED', 'THE', 'POWER', 'OF', 'SIGHT', 'HIS', 'BLOOD', 'WAS', 'COARSING', 'SO', 'WIDELY', 'THROUGH', 'HIS', 'VEINS', 'THAT', 'HE', 'MIGHT', 'PERHAPS', 'HAVE', 'BEEN', 'UNABLE', 'TO', 'DISTINGUISH', 'THE', 'STATUES', 'AROUND', 'HIM', 'AND', 'THE', 'THOUSANDS', 'OF', 'SPECTATORS', 'WHO', 'CROWDED', 'CLOSELY', 'TOGETHER', 'RICHLY', 'GARLANDED', 'THEIR', 'CHIEFS', 'GLOWING', 'WITH', 'ENTHUSIASM', 'SURROUNDED', 'THE', 'ARENA', 'HERMANN'] +5484-24318-0035-640: ref=['SHOUTED', 'HIS', 'FRIEND', 'SOTELES', 'IN', 'JOYFUL', 'SURPRISE', 'IN', 'THE', 'MIDST', 'OF', 'THIS', 'PAINFUL', 'WALK', 'HERMON'] +5484-24318-0035-640: hyp=['SHOUTED', 'HIS', 'FRIEND', 'SORTILESS', 'AND', 'JOYFUL', 'SURPRISE', 'IN', 'THE', 'MIDST', 'OF', 'HIS', 'PAINFUL', 'WALK', 'HAREMON'] +5484-24318-0036-641: ref=['EVEN', 'WHILE', 'HE', 'BELIEVED', 'HIMSELF', 'TO', 'BE', 'THE', 'CREATOR', 'OF', 'THE', 'DEMETER', 'HE', 'HAD', 'BEEN', 'SERIOUSLY', 'TROUBLED', 'BY', 'THE', 'PRAISE', 'OF', 'SO', 'MANY', 'CRITICS', 'BECAUSE', 'IT', 'HAD', 'EXPOSED', 'HIM', 'TO', 'THE', 'SUSPICION', 'OF', 'HAVING', 'BECOME', 'FAITHLESS', 'TO', 'HIS', 'ART', 'AND', 'HIS', 'NATURE'] +5484-24318-0036-641: hyp=['EVEN', 'WHILE', 'HE', 'BELIEVED', 'HIMSELF', 'TO', 'BE', 'THE', 'CREATOR', 'OF', 'THE', 'DEMETER', 'HE', 'HAD', 'BEEN', 'SERIOUSLY', 'TROUBLED', 'BY', 'THE', 'PRAISE', 'OF', 'SO', 'MANY', 'CRITICS', 'BECAUSE', 'IT', 'HAD', 'EXPOSED', 'HIM', 'TO', 'THE', 'SUSPICION', 'OF', 'HAVING', 'BECOME', 'FAITHLESS', 'TO', 'HIS', 'ART', 'AND', 'HIS', 'NATURE'] +5484-24318-0037-642: ref=['HONOUR', 'TO', 'MYRTILUS', 'AND', 'HIS', 'ART', 'BUT', 'HE', 'TRUSTED', 'THIS', 'NOBLE', 'FESTAL', 'ASSEMBLAGE', 'WOULD', 'PARDON', 'THE', 'UNINTENTIONAL', 'DECEPTION', 'AND', 'AID', 'HIS', 'PRAYER', 'FOR', 'RECOVERY'] +5484-24318-0037-642: hyp=['HONOUR', 'TO', 'MYRTULAS', 'AND', 'HIS', 'ART', 'BUT', 'HE', 'TRUSTED', 'THE', 'SNOWBLE', 'FEAST', 'ELL', 'ASSEMBLAGE', 'WOULD', 'PARDON', 'THE', 'UNINTENTIONAL', 'DECEPTION', 'AND', 'AID', 'HIS', 'PRAYER', 'FOR', 'RECOVERY'] +5764-299665-0000-405: ref=['AFTERWARD', 'IT', 'WAS', 'SUPPOSED', 'THAT', 'HE', 'WAS', 'SATISFIED', 'WITH', 'THE', 'BLOOD', 'OF', 'OXEN', 'LAMBS', 'AND', 'DOVES', 'AND', 'THAT', 'IN', 'EXCHANGE', 'FOR', 'OR', 'ON', 'ACCOUNT', 'OF', 'THESE', 'SACRIFICES', 'THIS', 'GOD', 'GAVE', 'RAIN', 'SUNSHINE', 'AND', 'HARVEST'] +5764-299665-0000-405: hyp=['AFTERWARD', 'IT', 'WAS', 'SUPPOSED', 'THAT', 'HE', 'WAS', 'SATISFIED', 'WITH', 'THE', 'BLOOD', 'OF', 'OXEN', 'LAMPS', 'AND', 'DOVES', 'AND', 'THAT', 'IN', 'EXCHANGE', 'FOR', 'OR', 'IN', 'ACCOUNT', 'OF', 'THESE', 'SACRIFICES', 'THESE', 'GOD', 'GAVE', 'REIGN', 'SUNSHINE', 'AND', 'HARVEST'] +5764-299665-0001-406: ref=['WHETHER', 'HE', 'WAS', 'THE', 'CREATOR', 'OF', 'YOURSELF', 'AND', 'MYSELF'] +5764-299665-0001-406: hyp=['WHETHER', 'HE', 'WAS', 'THE', 'CREATOR', 'OF', 'YOURSELF', 'AND', 'MYSELF'] +5764-299665-0002-407: ref=['WHETHER', 'ANY', 'PRAYER', 'WAS', 'EVER', 'ANSWERED'] +5764-299665-0002-407: hyp=['WEATHER', 'AND', 'A', 'PRAYER', 'WAS', 'EVER', 'ANSWERED'] +5764-299665-0003-408: ref=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'INTELLECTUALLY', 'INFERIOR'] +5764-299665-0003-408: hyp=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'INTELLECTUAL', 'INFERIOR'] +5764-299665-0004-409: ref=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'DEFORMED', 'AND', 'HELPLESS', 'WHY', 'DID', 'HE', 'CREATE', 'THE', 'CRIMINAL', 'THE', 'IDIOTIC', 'THE', 'INSANE'] +5764-299665-0004-409: hyp=['WHY', 'DID', 'HE', 'CREATE', 'THE', 'DEFORMED', 'AND', 'HELPLESS', 'WHY', 'DID', 'HE', 'CREATE', 'THE', 'CRIMINAL', 'THE', 'IDIOTIC', 'THE', 'INSANE'] +5764-299665-0005-410: ref=['ARE', 'THE', 'FAILURES', 'UNDER', 'OBLIGATION', 'TO', 'THEIR', 'CREATOR'] +5764-299665-0005-410: hyp=['ARE', 'THE', 'FAILURES', 'AND', 'THE', 'OBLIGATION', 'TO', 'THEIR', 'CREATOR'] +5764-299665-0006-411: ref=['IS', 'HE', 'RESPONSIBLE', 'FOR', 'ALL', 'THE', 'WARS', 'THAT', 'HAVE', 'BEEN', 'WAGED', 'FOR', 'ALL', 'THE', 'INNOCENT', 'BLOOD', 'THAT', 'HAS', 'BEEN', 'SHED'] +5764-299665-0006-411: hyp=['HIS', 'IRRESPONSIBLE', 'FOR', 'ALL', 'THE', 'WALLS', 'THAT', 'HAVE', 'BEEN', 'WAGED', 'FOR', 'ALL', 'THE', 'INNOCENT', 'BLOOD', 'THAT', 'HAS', 'BEEN', 'SHED'] +5764-299665-0007-412: ref=['IS', 'HE', 'RESPONSIBLE', 'FOR', 'THE', 'CENTURIES', 'OF', 'SLAVERY', 'FOR', 'THE', 'BACKS', 'THAT', 'HAVE', 'BEEN', 'SCARRED', 'WITH', 'THE', 'LASH', 'FOR', 'THE', 'BABES', 'THAT', 'HAVE', 'BEEN', 'SOLD', 'FROM', 'THE', 'BREASTS', 'OF', 'MOTHERS', 'FOR', 'THE', 'FAMILIES', 'THAT', 'HAVE', 'BEEN', 'SEPARATED', 'AND', 'DESTROYED'] +5764-299665-0007-412: hyp=['IF', 'YOU', 'RESPONSIBLE', 'FOR', 'THE', 'CENTURIES', 'OF', 'SLAVERY', 'FOR', 'THE', 'BACKS', 'THAT', 'HAVE', 'BEEN', 'SCARRED', 'WITH', 'A', 'LASH', 'FOR', 'THE', 'BABE', 'THAT', 'HAVE', 'BEEN', 'SOLD', 'FROM', 'THE', 'BREASTS', 'OF', 'MOTHERS', 'FOR', 'THE', 'FAMILIES', 'THAT', 'HAVE', 'BEEN', 'SEPARATED', 'AND', 'DESTROYED'] +5764-299665-0008-413: ref=['IS', 'THIS', 'GOD', 'RESPONSIBLE', 'FOR', 'RELIGIOUS', 'PERSECUTION', 'FOR', 'THE', 'INQUISITION', 'FOR', 'THE', 'THUMB', 'SCREW', 'AND', 'RACK', 'AND', 'FOR', 'ALL', 'THE', 'INSTRUMENTS', 'OF', 'TORTURE'] +5764-299665-0008-413: hyp=['IS', 'THESE', 'GOT', 'RESPONSIBLE', 'FOR', 'RELIGIOUS', 'PERSECUTION', 'FOR', 'THE', 'INQUISITION', 'FOR', 'THE', "TENTH'S", 'CREW', 'AND', 'RACK', 'AND', 'FOR', 'ALL', 'THE', 'INSTRUMENTS', 'OF', 'TORTURE'] +5764-299665-0009-414: ref=['DID', 'THIS', 'GOD', 'ALLOW', 'THE', 'CRUEL', 'AND', 'VILE', 'TO', 'DESTROY', 'THE', 'BRAVE', 'AND', 'VIRTUOUS'] +5764-299665-0009-414: hyp=['THESE', 'GOT', 'THE', 'LOAD', 'THE', 'CRUEL', 'AND', 'VILE', 'TO', 'DESTROY', 'THE', 'BRAVE', 'AND', 'VIRTUOUS'] +5764-299665-0010-415: ref=['DID', 'HE', 'ALLOW', 'TYRANTS', 'TO', 'SHED', 'THE', 'BLOOD', 'OF', 'PATRIOTS'] +5764-299665-0010-415: hyp=['DID', 'HE', 'ALONE', 'TYRANTS', 'TO', 'SHED', 'A', 'BLOOD', 'OF', 'PATRIOTS'] +5764-299665-0011-416: ref=['CAN', 'WE', 'CONCEIVE', 'OF', 'A', 'DEVIL', 'BASE', 'ENOUGH', 'TO', 'PREFER', 'HIS', 'ENEMIES', 'TO', 'HIS', 'FRIENDS'] +5764-299665-0011-416: hyp=['CAN', 'WE', 'CONCEIVE', 'OF', 'A', 'DEVIL', 'BASE', 'ENOUGH', 'TO', 'PREFER', 'HIS', 'ENEMIES', 'TO', 'HIS', 'FRIENDS'] +5764-299665-0012-417: ref=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'THE', 'WILD', 'BEASTS', 'THAT', 'DEVOUR', 'HUMAN', 'BEINGS', 'FOR', 'THE', 'FANGED', 'SERPENTS', 'WHOSE', 'BITE', 'IS', 'DEATH'] +5764-299665-0012-417: hyp=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'THE', 'WILD', 'BEASTS', 'THAT', 'THE', 'FOUR', 'HUMAN', 'BEINGS', 'FOR', 'THE', 'FACT', 'SERPENTS', 'WHOSE', 'BITE', 'ITS', 'DEATH'] +5764-299665-0013-418: ref=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'A', 'WORLD', 'WHERE', 'LIFE', 'FEEDS', 'ON', 'LIFE'] +5764-299665-0013-418: hyp=['HOW', 'CAN', 'WE', 'ACCOUNT', 'FOR', 'A', 'WORLD', 'WILL', 'LIE', 'FEATS', 'ON', 'LIFE'] +5764-299665-0014-419: ref=['DID', 'INFINITE', 'WISDOM', 'INTENTIONALLY', 'PRODUCE', 'THE', 'MICROSCOPIC', 'BEASTS', 'THAT', 'FEED', 'UPON', 'THE', 'OPTIC', 'NERVE', 'THINK', 'OF', 'BLINDING', 'A', 'MAN', 'TO', 'SATISFY', 'THE', 'APPETITE', 'OF', 'A', 'MICROBE'] +5764-299665-0014-419: hyp=['DID', 'INFINITE', 'WISDOM', 'INTENTIONALLY', 'PRODUCED', 'THE', 'MICROSCOPIC', 'BEASTS', 'THAT', 'FEED', 'UPON', 'THE', 'OPTIC', 'NURSE', 'THINK', 'OF', 'BLINDING', 'A', 'MAN', 'TO', 'SATISFY', 'THE', 'APPETITE', 'OF', 'A', 'MICROBE'] +5764-299665-0015-420: ref=['FEAR', 'BUILDS', 'THE', 'ALTAR', 'AND', 'OFFERS', 'THE', 'SACRIFICE'] +5764-299665-0015-420: hyp=['FEAR', 'BIDS', 'THE', 'ALTAR', 'AND', 'OFFERS', 'THE', 'SACRIFICE'] +5764-299665-0016-421: ref=['FEAR', 'ERECTS', 'THE', 'CATHEDRAL', 'AND', 'BOWS', 'THE', 'HEAD', 'OF', 'MAN', 'IN', 'WORSHIP'] +5764-299665-0016-421: hyp=['FEAR', 'ERECTS', 'THE', 'CATEURAL', 'AND', 'BOWS', 'THE', 'HEAD', 'OF', 'MAN', 'IN', 'WORSHIP'] +5764-299665-0017-422: ref=['LIPS', 'RELIGIOUS', 'AND', 'FEARFUL', 'TREMBLINGLY', 'REPEAT', 'THIS', 'PASSAGE', 'THOUGH', 'HE', 'SLAY', 'ME', 'YET', 'WILL', 'I', 'TRUST', 'HIM'] +5764-299665-0017-422: hyp=['LITZ', 'RELIGIOUS', 'AND', 'FEARFUL', 'TREMBLINGLY', 'REPEAT', 'THIS', 'PASSAGE', 'THOUGH', 'HE', 'SLAY', 'ME', 'YET', 'WE', 'LIKE', 'TRUST', 'HIM'] +5764-299665-0018-423: ref=['CAN', 'WE', 'SAY', 'THAT', 'HE', 'CARED', 'FOR', 'THE', 'CHILDREN', 'OF', 'MEN'] +5764-299665-0018-423: hyp=['CAN', 'WE', 'SAY', 'THAT', 'HE', 'CARED', 'FOR', 'THE', 'CHILDREN', 'OF', 'MEN'] +5764-299665-0019-424: ref=['CAN', 'WE', 'SAY', 'THAT', 'HIS', 'MERCY', 'ENDURETH', 'FOREVER'] +5764-299665-0019-424: hyp=['CAN', 'WE', 'SAY', 'THAT', 'HIS', 'MERCY', 'AND', 'DURE', 'FOR', 'EVER'] +5764-299665-0020-425: ref=['DO', 'WE', 'PROVE', 'HIS', 'GOODNESS', 'BY', 'SHOWING', 'THAT', 'HE', 'HAS', 'OPENED', 'THE', 'EARTH', 'AND', 'SWALLOWED', 'THOUSANDS', 'OF', 'HIS', 'HELPLESS', 'CHILDREN', 'OR', 'THAT', 'WITH', 'THE', 'VOLCANOES', 'HE', 'HAS', 'OVERWHELMED', 'THEM', 'WITH', 'RIVERS', 'OF', 'FIRE'] +5764-299665-0020-425: hyp=['THE', 'REPROVE', 'HIS', 'GOODNESS', 'BY', 'SHOWING', 'THAT', 'HE', 'HAS', 'OPENED', 'THE', 'EARTH', 'AND', 'SWALLOWED', 'THOUSAND', 'OF', 'HIS', 'HELPLESS', 'CHILDREN', 'ALL', 'THAT', 'WE', 'THE', 'VOLCANOES', 'HE', 'HAS', 'OVERWHELMED', 'THEM', 'WITH', 'RIVERS', 'OF', 'FIRE'] +5764-299665-0021-426: ref=['WAS', 'THERE', 'GOODNESS', 'WAS', 'THERE', 'WISDOM', 'IN', 'THIS'] +5764-299665-0021-426: hyp=['WAS', 'THERE', 'GOODNESS', 'WAS', 'THERE', 'WISDOM', 'IN', 'THIS'] +5764-299665-0022-427: ref=['OUGHT', 'THE', 'SUPERIOR', 'RACES', 'TO', 'THANK', 'GOD', 'THAT', 'THEY', 'ARE', 'NOT', 'THE', 'INFERIOR'] +5764-299665-0022-427: hyp=['ALL', 'DISAPPEAR', 'RAYS', 'TWO', 'THANK', 'GOT', 'THAT', 'THEY', 'ARE', 'NOT', 'THE', 'INFERIOR'] +5764-299665-0023-428: ref=['MOST', 'PEOPLE', 'CLING', 'TO', 'THE', 'SUPERNATURAL'] +5764-299665-0023-428: hyp=['MOST', 'PEOPLE', 'CLING', 'TO', 'THE', 'SUPERNATURAL'] +5764-299665-0024-429: ref=['IF', 'THEY', 'GIVE', 'UP', 'ONE', 'GOD', 'THEY', 'IMAGINE', 'ANOTHER'] +5764-299665-0024-429: hyp=['IF', 'THEY', 'GIVE', 'UP', 'ONE', 'GOD', 'THEY', 'IMAGINE', 'ANOTHER'] +5764-299665-0025-430: ref=['WHAT', 'IS', 'THIS', 'POWER'] +5764-299665-0025-430: hyp=['WHAT', 'IS', 'THIS', 'POWER'] +5764-299665-0026-431: ref=['MAN', 'ADVANCES', 'AND', 'NECESSARILY', 'ADVANCES', 'THROUGH', 'EXPERIENCE'] +5764-299665-0026-431: hyp=['MAN', 'ADVANCES', 'AND', 'NECESSARILY', 'ADVANCES', 'TO', 'EXPERIENCE'] +5764-299665-0027-432: ref=['A', 'MAN', 'WISHING', 'TO', 'GO', 'TO', 'A', 'CERTAIN', 'PLACE', 'COMES', 'TO', 'WHERE', 'THE', 'ROAD', 'DIVIDES'] +5764-299665-0027-432: hyp=['A', 'MAN', 'WISHING', 'TO', 'GO', 'TO', 'A', 'CERTAIN', 'PLACE', 'COME', 'TO', 'WHERE', 'THE', 'REAL', 'DIVIDES'] +5764-299665-0028-433: ref=['HE', 'HAS', 'TRIED', 'THAT', 'ROAD', 'AND', 'KNOWS', 'THAT', 'IT', 'IS', 'THE', 'WRONG', 'ROAD'] +5764-299665-0028-433: hyp=['HE', 'HAS', 'TRIED', 'THAT', 'ROAD', 'AND', 'KNOWS', 'THAT', 'IT', 'IS', 'THE', 'WRONG', 'ROAD'] +5764-299665-0029-434: ref=['A', 'CHILD', 'CHARMED', 'BY', 'THE', 'BEAUTY', 'OF', 'THE', 'FLAME', 'GRASPS', 'IT', 'WITH', 'ITS', 'DIMPLED', 'HAND'] +5764-299665-0029-434: hyp=['A', 'CHILD', 'SHONE', 'BY', 'THE', 'BEAUTY', 'OF', 'THE', 'FLAME', 'GRASPED', 'IT', 'WITH', 'HIS', 'DIMPLED', 'HAND'] +5764-299665-0030-435: ref=['THE', 'POWER', 'THAT', 'WORKS', 'FOR', 'RIGHTEOUSNESS', 'HAS', 'TAUGHT', 'THE', 'CHILD', 'A', 'LESSON'] +5764-299665-0030-435: hyp=['THE', 'POWER', 'WITH', 'THAT', 'WORK', 'FOR', 'RIGHTEOUSNESS', 'HAD', 'TAUGHT', 'THE', 'CHILD', 'A', 'LESSON'] +5764-299665-0031-436: ref=['IT', 'IS', 'A', 'RESULT'] +5764-299665-0031-436: hyp=['IT', 'IS', 'A', 'RESULT'] +5764-299665-0032-437: ref=['IT', 'IS', 'INSISTED', 'BY', 'THESE', 'THEOLOGIANS', 'AND', 'BY', 'MANY', 'OF', 'THE', 'SO', 'CALLED', 'PHILOSOPHERS', 'THAT', 'THIS', 'MORAL', 'SENSE', 'THIS', 'SENSE', 'OF', 'DUTY', 'OF', 'OBLIGATION', 'WAS', 'IMPORTED', 'AND', 'THAT', 'CONSCIENCE', 'IS', 'AN', 'EXOTIC'] +5764-299665-0032-437: hyp=['IT', 'IS', 'INSISTED', 'BY', 'THESE', 'THEOLOGIANS', 'AND', 'BY', 'MANY', 'OF', 'THE', 'SOUL', 'CALLED', 'PHILOSOPHERS', 'THAT', 'THIS', 'MORAL', 'SENSE', 'THIS', 'SENSE', 'OF', 'DUTY', 'OF', 'OBLIGATION', 'WAS', 'IMPORTED', 'AND', 'THAT', 'CONSCIENCE', 'IS', 'AN', 'EXOTIC'] +5764-299665-0033-438: ref=['WE', 'LIVE', 'TOGETHER', 'IN', 'FAMILIES', 'TRIBES', 'AND', 'NATIONS'] +5764-299665-0033-438: hyp=['REALLY', 'TOGETHER', 'IN', 'FAMILIES', 'TRIBES', 'AND', 'NATIONS'] +5764-299665-0034-439: ref=['THEY', 'ARE', 'PRAISED', 'ADMIRED', 'AND', 'RESPECTED'] +5764-299665-0034-439: hyp=['THEY', 'ARE', 'PRAISED', 'ADMIRED', 'AND', 'RESPECTED'] +5764-299665-0035-440: ref=['THEY', 'ARE', 'REGARDED', 'AS', 'GOOD', 'THAT', 'IS', 'TO', 'SAY', 'AS', 'MORAL'] +5764-299665-0035-440: hyp=['THEY', 'ARE', 'REGARDED', 'AS', 'GOOD', 'THAT', 'IS', 'TO', 'SAY', 'AS', 'MORAL'] +5764-299665-0036-441: ref=['THE', 'MEMBERS', 'WHO', 'ADD', 'TO', 'THE', 'MISERY', 'OF', 'THE', 'FAMILY', 'THE', 'TRIBE', 'OR', 'THE', 'NATION', 'ARE', 'CONSIDERED', 'BAD', 'MEMBERS'] +5764-299665-0036-441: hyp=['THE', 'MEMBERS', 'WHO', 'ADD', 'TO', 'THE', 'MISERY', 'OF', 'THE', 'FAMILY', 'THE', 'TRIBE', 'OF', 'THE', 'NATION', 'ARE', 'CONSIDERED', 'BAD', 'MEMBERS'] +5764-299665-0037-442: ref=['THE', 'GREATEST', 'OF', 'HUMAN', 'BEINGS', 'HAS', 'SAID', 'CONSCIENCE', 'IS', 'BORN', 'OF', 'LOVE'] +5764-299665-0037-442: hyp=['THE', 'GREATEST', 'OF', 'HUMAN', 'BEINGS', 'HAD', 'SAID', 'CONSCIENCE', 'IS', 'BORN', 'OF', 'LOVE'] +5764-299665-0038-443: ref=['AS', 'PEOPLE', 'ADVANCE', 'THE', 'REMOTE', 'CONSEQUENCES', 'ARE', 'PERCEIVED'] +5764-299665-0038-443: hyp=['AS', 'PEOPLE', 'ADVANCE', 'THE', 'REMOTE', 'CONSEQUENCES', 'ARE', 'PERCEIVED'] +5764-299665-0039-444: ref=['THE', 'IMAGINATION', 'IS', 'CULTIVATED'] +5764-299665-0039-444: hyp=['THE', 'IMAGINATION', 'IS', 'CULTIVATED'] +5764-299665-0040-445: ref=['A', 'MAN', 'PUTS', 'HIMSELF', 'IN', 'THE', 'PLACE', 'OF', 'ANOTHER'] +5764-299665-0040-445: hyp=['A', 'MAN', 'BUT', 'HIMSELF', 'IN', 'THE', 'PLACE', 'OF', 'ANOTHER'] +5764-299665-0041-446: ref=['THE', 'SENSE', 'OF', 'DUTY', 'BECOMES', 'STRONGER', 'MORE', 'IMPERATIVE'] +5764-299665-0041-446: hyp=['THE', 'SENSE', 'OF', 'DUTY', 'BECOMES', 'STRONGER', 'MORE', 'IMPERATIVE'] +5764-299665-0042-447: ref=['MAN', 'JUDGES', 'HIMSELF'] +5764-299665-0042-447: hyp=['MAN', 'JUDGES', 'HIMSELF'] +5764-299665-0043-448: ref=['IN', 'ALL', 'THIS', 'THERE', 'IS', 'NOTHING', 'SUPERNATURAL'] +5764-299665-0043-448: hyp=['IN', 'ALL', 'THIS', 'THERE', 'IS', 'NOTHING', 'SUPERNATURAL'] +5764-299665-0044-449: ref=['MAN', 'HAS', 'DECEIVED', 'HIMSELF'] +5764-299665-0044-449: hyp=['MAN', 'HAS', 'DECEIVED', 'HIMSELF'] +5764-299665-0045-450: ref=['HAS', 'CHRISTIANITY', 'DONE', 'GOOD'] +5764-299665-0045-450: hyp=['HISTORY', 'STUNNITY', 'DONE', 'GOOD'] +5764-299665-0046-451: ref=['WHEN', 'THE', 'CHURCH', 'HAD', 'CONTROL', 'WERE', 'MEN', 'MADE', 'BETTER', 'AND', 'HAPPIER'] +5764-299665-0046-451: hyp=['WHEN', 'THE', 'CHURCH', 'HAD', 'CONTROL', 'WERE', 'MEN', 'MADE', 'BETTER', 'AND', 'HAPPIER'] +5764-299665-0047-452: ref=['WHAT', 'HAS', 'RELIGION', 'DONE', 'FOR', 'HUNGARY', 'OR', 'AUSTRIA'] +5764-299665-0047-452: hyp=['WHAT', 'HAS', 'RELIGION', 'DONE', 'FOR', 'HUNGARY', 'O', 'AUSTRIA'] +5764-299665-0048-453: ref=['COULD', 'THESE', 'COUNTRIES', 'HAVE', 'BEEN', 'WORSE', 'WITHOUT', 'RELIGION'] +5764-299665-0048-453: hyp=['GOOD', 'THESE', 'COUNTRIES', 'HAVE', 'BEEN', 'WORSE', 'WITHOUT', 'RELIGION'] +5764-299665-0049-454: ref=['COULD', 'THEY', 'HAVE', 'BEEN', 'WORSE', 'HAD', 'THEY', 'HAD', 'ANY', 'OTHER', 'RELIGION', 'THAN', 'CHRISTIANITY'] +5764-299665-0049-454: hyp=['COULD', 'THEY', 'HAVE', 'BEEN', 'WORSE', 'HAD', 'THEY', 'HAD', 'ANY', 'OTHER', 'RELIGION', 'THAN', 'CHRISTIANITY'] +5764-299665-0050-455: ref=['WHAT', 'DID', 'CHRISTIANITY', 'DO', 'FOR', 'THEM'] +5764-299665-0050-455: hyp=['WHAT', 'DID', 'CHRISTIANITY', 'DO', 'FAULT', 'THEM'] +5764-299665-0051-456: ref=['THEY', 'HATED', 'PLEASURE'] +5764-299665-0051-456: hyp=['THEY', 'HATED', 'PLEASURE'] +5764-299665-0052-457: ref=['THEY', 'MUFFLED', 'ALL', 'THE', 'BELLS', 'OF', 'GLADNESS'] +5764-299665-0052-457: hyp=['THEY', 'MUFFLED', 'ALL', 'THE', 'BELLS', 'OF', 'GLADNESS'] +5764-299665-0053-458: ref=['THE', 'RELIGION', 'OF', 'THE', 'PURITAN', 'WAS', 'AN', 'UNADULTERATED', 'CURSE'] +5764-299665-0053-458: hyp=['DURING', 'RELIGION', 'OF', 'THE', 'PURITAN', 'WAS', 'AN', 'AN', 'ADULTERATED', 'CURSE'] +5764-299665-0054-459: ref=['THE', 'PURITAN', 'BELIEVED', 'THE', 'BIBLE', 'TO', 'BE', 'THE', 'WORD', 'OF', 'GOD', 'AND', 'THIS', 'BELIEF', 'HAS', 'ALWAYS', 'MADE', 'THOSE', 'WHO', 'HELD', 'IT', 'CRUEL', 'AND', 'WRETCHED'] +5764-299665-0054-459: hyp=['THE', 'PURITAN', 'BELIEF', 'THE', 'BIBLE', 'TO', 'BE', 'THE', 'WORLD', 'OF', 'GOD', 'AND', 'THIS', 'BELIEF', 'HAS', 'ALWAYS', 'MADE', 'THOSE', 'WHO', 'HELD', 'IT', 'CRUEL', 'AND', 'WRETCHED'] +5764-299665-0055-460: ref=['LET', 'ME', 'REFER', 'TO', 'JUST', 'ONE', 'FACT', 'SHOWING', 'THE', 'INFLUENCE', 'OF', 'A', 'BELIEF', 'IN', 'THE', 'BIBLE', 'ON', 'HUMAN', 'BEINGS'] +5764-299665-0055-460: hyp=['LET', 'ME', 'REFER', 'TO', 'JUST', 'ONE', 'FACT', 'SHOWING', 'THE', 'INFLUENCE', 'OF', 'A', 'BELIEF', 'IN', 'THE', 'BIBLE', 'ON', 'HUMAN', 'BEINGS'] +5764-299665-0056-461: ref=['THE', 'QUEEN', 'RECEIVED', 'THE', 'BIBLE', 'KISSED', 'IT', 'AND', 'PLEDGED', 'HERSELF', 'TO', 'DILIGENTLY', 'READ', 'THEREIN'] +5764-299665-0056-461: hyp=['THE', 'QUEEN', 'RECEIVED', 'THE', 'BIBLE', 'KISSED', 'IT', 'AND', 'PLEDGED', 'HERSELF', 'TO', 'DILIGENTLY', 'READ', 'THEREIN'] +5764-299665-0057-462: ref=['IN', 'OTHER', 'WORDS', 'IT', 'WAS', 'JUST', 'AS', 'FIENDISH', 'JUST', 'AS', 'INFAMOUS', 'AS', 'THE', 'CATHOLIC', 'SPIRIT'] +5764-299665-0057-462: hyp=['IN', 'OTHER', 'WORDS', 'IT', 'WAS', 'JUST', 'AS', 'FIENDISH', 'JUST', 'AS', 'IN', 'FAMOUS', 'AS', 'THE', 'CATHOLIC', 'SPIRIT'] +5764-299665-0058-463: ref=['HAS', 'THE', 'BIBLE', 'MADE', 'THE', 'PEOPLE', 'OF', 'GEORGIA', 'KIND', 'AND', 'MERCIFUL'] +5764-299665-0058-463: hyp=['HAS', 'THE', 'VARIABLE', 'MADE', 'THE', 'PEOPLE', 'OF', 'GEORGE', 'A', 'KIND', 'AND', 'MERCIFUL'] +5764-299665-0059-464: ref=['RELIGION', 'HAS', 'BEEN', 'TRIED', 'AND', 'IN', 'ALL', 'COUNTRIES', 'IN', 'ALL', 'TIMES', 'HAS', 'FAILED'] +5764-299665-0059-464: hyp=['WHO', 'LEGION', 'HAVE', 'BEEN', 'TRIED', 'AND', 'IN', 'ALL', 'COUNTRIES', 'IN', 'ALL', 'TIMES', 'BEST', 'FAILED'] +5764-299665-0060-465: ref=['RELIGION', 'HAS', 'ALWAYS', 'BEEN', 'THE', 'ENEMY', 'OF', 'SCIENCE', 'OF', 'INVESTIGATION', 'AND', 'THOUGHT'] +5764-299665-0060-465: hyp=['RELIGION', 'HATH', 'ALWAYS', 'BEEN', 'THE', 'ENEMY', 'OF', 'SCIENCE', 'OF', 'INVESTIGATION', 'AND', 'THOUGHT'] +5764-299665-0061-466: ref=['RELIGION', 'HAS', 'NEVER', 'MADE', 'MAN', 'FREE'] +5764-299665-0061-466: hyp=['RELIGIONISTS', 'NEVER', 'MADE', 'MEN', 'FREE'] +5764-299665-0062-467: ref=['IT', 'HAS', 'NEVER', 'MADE', 'MAN', 'MORAL', 'TEMPERATE', 'INDUSTRIOUS', 'AND', 'HONEST'] +5764-299665-0062-467: hyp=['HE', 'JUST', 'NEVER', 'MADE', 'MAN', 'MORAL', 'TEMPERATE', 'INDUSTRIOUS', 'AND', 'HONEST'] +5764-299665-0063-468: ref=['ARE', 'CHRISTIANS', 'MORE', 'TEMPERATE', 'NEARER', 'VIRTUOUS', 'NEARER', 'HONEST', 'THAN', 'SAVAGES'] +5764-299665-0063-468: hyp=['AH', 'CHRISTIAN', 'SMALL', 'TEMPERATE', 'NEARER', 'VIRTUOUS', 'NEARER', 'HONEST', 'THAN', 'SAVAGES'] +5764-299665-0064-469: ref=['CAN', 'WE', 'CURE', 'DISEASE', 'BY', 'SUPPLICATION'] +5764-299665-0064-469: hyp=['CAN', 'WE', 'CURE', 'DISEASE', 'BY', 'SUPPLICATION'] +5764-299665-0065-470: ref=['CAN', 'WE', 'RECEIVE', 'VIRTUE', 'OR', 'HONOR', 'AS', 'ALMS'] +5764-299665-0065-470: hyp=['CAN', 'WE', 'RECEIVE', 'VIRTUE', 'OR', 'HANNER', 'AS', 'ARMS'] +5764-299665-0066-471: ref=['RELIGION', 'RESTS', 'ON', 'THE', 'IDEA', 'THAT', 'NATURE', 'HAS', 'A', 'MASTER', 'AND', 'THAT', 'THIS', 'MASTER', 'WILL', 'LISTEN', 'TO', 'PRAYER', 'THAT', 'THIS', 'MASTER', 'PUNISHES', 'AND', 'REWARDS', 'THAT', 'HE', 'LOVES', 'PRAISE', 'AND', 'FLATTERY', 'AND', 'HATES', 'THE', 'BRAVE', 'AND', 'FREE'] +5764-299665-0066-471: hyp=['RELIGION', 'RESTS', 'ON', 'THE', 'IDEA', 'THAT', 'NATURE', 'HAS', 'A', 'MASTER', 'AND', 'THAT', 'THIS', 'MASTER', 'WILL', 'LISTEN', 'TO', 'PRAYER', 'THAT', 'HIS', 'MASTER', 'PUNISHES', 'AND', 'REWARDS', 'THAT', 'HE', 'LOVES', 'PRAISE', 'AND', 'FLATTERY', 'AND', 'HATES', 'THE', 'BRAVE', 'AND', 'FREE'] +5764-299665-0067-472: ref=['WE', 'MUST', 'HAVE', 'CORNER', 'STONES'] +5764-299665-0067-472: hyp=['WE', 'MUST', 'HAVE', 'CORN', 'THE', 'STONES'] +5764-299665-0068-473: ref=['THE', 'STRUCTURE', 'MUST', 'HAVE', 'A', 'BASEMENT'] +5764-299665-0068-473: hyp=['THE', 'STRUCTURE', 'MUST', 'HAVE', 'ABASEMENT'] +5764-299665-0069-474: ref=['IF', 'WE', 'BUILD', 'WE', 'MUST', 'BEGIN', 'AT', 'THE', 'BOTTOM'] +5764-299665-0069-474: hyp=['IF', 'WE', 'BUILD', 'WE', 'MUST', 'BEGIN', 'AT', 'THE', 'BOTTOM'] +5764-299665-0070-475: ref=['I', 'HAVE', 'A', 'THEORY', 'AND', 'I', 'HAVE', 'FOUR', 'CORNER', 'STONES'] +5764-299665-0070-475: hyp=['I', 'HAVE', 'A', 'THEORY', 'AND', 'I', 'HAVE', 'FOUR', 'CORNESTONES'] +5764-299665-0071-476: ref=['THE', 'FIRST', 'STONE', 'IS', 'THAT', 'MATTER', 'SUBSTANCE', 'CANNOT', 'BE', 'DESTROYED', 'CANNOT', 'BE', 'ANNIHILATED'] +5764-299665-0071-476: hyp=['THE', 'FIRST', 'STONE', 'EAST', 'AT', 'MATHER', 'SUBSTANCE', 'CANNOT', 'BE', 'DESTROYED', 'CANNOT', 'BE', 'ANNIHILATED'] +5764-299665-0072-477: ref=['IF', 'THESE', 'CORNER', 'STONES', 'ARE', 'FACTS', 'IT', 'FOLLOWS', 'AS', 'A', 'NECESSITY', 'THAT', 'MATTER', 'AND', 'FORCE', 'ARE', 'FROM', 'AND', 'TO', 'ETERNITY', 'THAT', 'THEY', 'CAN', 'NEITHER', 'BE', 'INCREASED', 'NOR', 'DIMINISHED'] +5764-299665-0072-477: hyp=['IF', 'THESE', 'SCORN', 'THE', 'STONES', 'ARE', 'FACTS', 'IT', 'FOLLOWS', 'AS', 'A', 'NECESSITY', 'THAT', 'MATTER', 'AND', 'FORCE', 'ARE', 'FROM', 'END', 'TO', 'ETERNITY', 'THAT', 'THEY', 'CAN', 'NEITHER', 'BE', 'INCREASED', 'NOR', 'DIMINISHED'] +5764-299665-0073-478: ref=['IT', 'FOLLOWS', 'THAT', 'NOTHING', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'CREATED', 'THAT', 'THERE', 'NEVER', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'A', 'CREATOR'] +5764-299665-0073-478: hyp=['IT', 'FOLLOWS', 'THAT', 'NOTHING', 'HATH', 'BEEN', 'OR', 'CAN', 'BE', 'CREATED', 'THAT', 'THERE', 'NEVER', 'HAS', 'BEEN', 'OR', 'CAN', 'BE', 'A', 'CREATOR'] +5764-299665-0074-479: ref=['IT', 'FOLLOWS', 'THAT', 'THERE', 'COULD', 'NOT', 'HAVE', 'BEEN', 'ANY', 'INTELLIGENCE', 'ANY', 'DESIGN', 'BACK', 'OF', 'MATTER', 'AND', 'FORCE'] +5764-299665-0074-479: hyp=['IT', 'FOLLOWED', 'THAT', 'THERE', 'COULD', 'NOT', 'HAVE', 'BEEN', 'ANY', 'INTELLIGENCE', 'AND', 'A', 'DESIGN', 'BACK', 'OF', 'MATTER', 'AND', 'FORCE'] +5764-299665-0075-480: ref=['I', 'SAY', 'WHAT', 'I', 'THINK'] +5764-299665-0075-480: hyp=['I', 'SAY', 'WHAT', 'I', 'THINK'] +5764-299665-0076-481: ref=['EVERY', 'EVENT', 'HAS', 'PARENTS'] +5764-299665-0076-481: hyp=['EVERY', 'EVENT', 'HAS', 'PARENTS'] +5764-299665-0077-482: ref=['THAT', 'WHICH', 'HAS', 'NOT', 'HAPPENED', 'COULD', 'NOT'] +5764-299665-0077-482: hyp=['THAT', 'WHICH', 'HATH', 'NOT', 'HAPPENED', 'COULD', 'NOT'] +5764-299665-0078-483: ref=['IN', 'THE', 'INFINITE', 'CHAIN', 'THERE', 'IS', 'AND', 'THERE', 'CAN', 'BE', 'NO', 'BROKEN', 'NO', 'MISSING', 'LINK'] +5764-299665-0078-483: hyp=['IN', 'THE', 'INFINITE', 'CHANGE', 'WRITHS', 'AND', 'THERE', 'CAN', 'BE', 'NO', 'BROKEN', 'NO', 'MISSING', 'LINK'] +5764-299665-0079-484: ref=['WE', 'NOW', 'KNOW', 'THAT', 'OUR', 'FIRST', 'PARENTS', 'WERE', 'NOT', 'FOREIGNERS'] +5764-299665-0079-484: hyp=['WE', 'NOW', 'KNOW', 'THAT', 'OUR', 'FIRST', 'PARENTS', 'WERE', 'NOT', 'FOREIGNERS'] +5764-299665-0080-485: ref=['WE', 'NOW', 'KNOW', 'IF', 'WE', 'KNOW', 'ANYTHING', 'THAT', 'THE', 'UNIVERSE', 'IS', 'NATURAL', 'AND', 'THAT', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'NATURALLY', 'PRODUCED'] +5764-299665-0080-485: hyp=['WE', 'NOW', 'KNOW', 'IF', 'WE', 'KNOW', 'ANYTHING', 'THAT', 'THE', 'UNIVERSE', 'IS', 'NATURAL', 'AND', 'THAT', 'MAN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'NATURALLY', 'PRODUCED'] +5764-299665-0081-486: ref=['WE', 'KNOW', 'THE', 'PATHS', 'THAT', 'LIFE', 'HAS', 'TRAVELED'] +5764-299665-0081-486: hyp=['WE', 'KNOW', 'THE', 'PATHS', 'THAT', 'LIFE', 'HAS', 'TRAVELLED'] +5764-299665-0082-487: ref=['WE', 'KNOW', 'THE', 'FOOTSTEPS', 'OF', 'ADVANCE', 'THEY', 'HAVE', 'BEEN', 'TRACED'] +5764-299665-0082-487: hyp=['WE', 'KNOW', 'THE', 'FOOTSTEPS', 'OF', 'ADVANCE', 'THEY', 'HAVE', 'BEEN', 'TRACED'] +5764-299665-0083-488: ref=['FOR', 'THOUSANDS', 'OF', 'YEARS', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'TRYING', 'TO', 'REFORM', 'THE', 'WORLD'] +5764-299665-0083-488: hyp=['FOUR', 'THOUSANDS', 'OF', 'YEARS', 'MEN', 'AND', 'WOMEN', 'HAVE', 'BEEN', 'TRYING', 'TO', 'REFORM', 'THE', 'WORLD'] +5764-299665-0084-489: ref=['WHY', 'HAVE', 'THE', 'REFORMERS', 'FAILED'] +5764-299665-0084-489: hyp=['WHY', 'HAVE', 'REFORMERS', 'FAME'] +5764-299665-0085-490: ref=['THEY', 'DEPEND', 'ON', 'THE', 'LORD', 'ON', 'LUCK', 'AND', 'CHARITY'] +5764-299665-0085-490: hyp=['THEY', 'DEPEND', 'ON', 'THE', 'LOT', 'UNLUCK', 'AND', 'CHARITY'] +5764-299665-0086-491: ref=['THEY', 'LIVE', 'BY', 'FRAUD', 'AND', 'VIOLENCE', 'AND', 'BEQUEATH', 'THEIR', 'VICES', 'TO', 'THEIR', 'CHILDREN'] +5764-299665-0086-491: hyp=['THEY', 'LEAVE', 'THY', 'FRAUD', 'AND', 'VIOLENCE', 'AND', 'BEQUEATH', 'THEIR', 'VICES', 'TO', 'THEIR', 'CHILDREN'] +5764-299665-0087-492: ref=['FAILURE', 'SEEMS', 'TO', 'BE', 'THE', 'TRADEMARK', 'OF', 'NATURE', 'WHY'] +5764-299665-0087-492: hyp=['FAILURE', 'SEEMS', 'TO', 'BE', 'THE', 'TRADE', 'MARK', 'OF', 'NATURE', 'WHY'] +5764-299665-0088-493: ref=['NATURE', 'PRODUCES', 'WITHOUT', 'PURPOSE', 'SUSTAINS', 'WITHOUT', 'INTENTION', 'AND', 'DESTROYS', 'WITHOUT', 'THOUGHT'] +5764-299665-0088-493: hyp=['NATURE', 'PROVED', 'YOUTH', 'IT', 'WITHOUT', 'PURPOSE', 'SUSTAINS', 'WITHOUT', 'INTENTION', 'AND', 'DESTROYS', 'WITHOUT', 'THOUGHT'] +5764-299665-0089-494: ref=['MUST', 'THE', 'WORLD', 'FOREVER', 'REMAIN', 'THE', 'VICTIM', 'OF', 'IGNORANT', 'PASSION'] +5764-299665-0089-494: hyp=['MISTER', 'WORLD', 'FOR', 'EVER', 'REMAINED', 'A', 'VICTIM', 'OF', 'IGNORANT', 'PASSION'] +5764-299665-0090-495: ref=['WHY', 'SHOULD', 'MEN', 'AND', 'WOMEN', 'HAVE', 'CHILDREN', 'THAT', 'THEY', 'CANNOT', 'TAKE', 'CARE', 'OF', 'CHILDREN', 'THAT', 'ARE', 'BURDENS', 'AND', 'CURSES', 'WHY'] +5764-299665-0090-495: hyp=['WHY', 'SHOULD', 'MEN', 'AND', 'WOMEN', 'HAVE', 'CHILDREN', 'THAT', 'THEY', 'CANNOT', 'TAKE', 'CARE', 'OF', 'CHILDREN', 'THAT', 'ARE', 'BURGLAR', 'AND', 'CURSES', 'WHY'] +5764-299665-0091-496: ref=['PASSION', 'IS', 'AND', 'ALWAYS', 'HAS', 'BEEN', 'DEAF'] +5764-299665-0091-496: hyp=['PASSION', 'IS', 'AND', 'ALWAYS', 'HAS', 'BEEN', 'DEAF'] +5764-299665-0092-497: ref=['LAW', 'CAN', 'PUNISH', 'BUT', 'IT', 'CAN', 'NEITHER', 'REFORM', 'CRIMINALS', 'NOR', 'PREVENT', 'CRIME'] +5764-299665-0092-497: hyp=['LAW', 'CAN', 'PUNISH', 'BUT', 'IT', 'CAN', 'NEITHER', 'REFORM', 'CRIMINALS', 'NOR', 'PREVENT', 'CRIME'] +5764-299665-0093-498: ref=['THIS', 'CANNOT', 'BE', 'DONE', 'BY', 'TALK', 'OR', 'EXAMPLE'] +5764-299665-0093-498: hyp=['THESE', 'CANNOT', 'BE', 'DONE', 'BY', 'TALK', 'OR', 'EXAMPLE'] +5764-299665-0094-499: ref=['THIS', 'IS', 'THE', 'SOLUTION', 'OF', 'THE', 'WHOLE', 'QUESTION'] +5764-299665-0094-499: hyp=['THIS', 'IS', 'THE', 'SOLUTION', 'OF', 'THE', 'WHOLE', 'QUESTION'] +5764-299665-0095-500: ref=['THIS', 'FREES', 'WOMAN'] +5764-299665-0095-500: hyp=['THIS', 'FREEZE', 'WOMEN'] +5764-299665-0096-501: ref=['POVERTY', 'AND', 'CRIME', 'WILL', 'BE', 'CHILDLESS'] +5764-299665-0096-501: hyp=['POVERTY', 'AND', 'CRIME', 'WILL', 'BE', 'CHILDLESS'] +5764-299665-0097-502: ref=['IT', 'IS', 'FAR', 'BETTER', 'TO', 'BE', 'FREE', 'TO', 'LEAVE', 'THE', 'FORTS', 'AND', 'BARRICADES', 'OF', 'FEAR', 'TO', 'STAND', 'ERECT', 'AND', 'FACE', 'THE', 'FUTURE', 'WITH', 'A', 'SMILE'] +5764-299665-0097-502: hyp=['IT', 'IS', 'FAR', 'BETTER', 'TO', 'BE', 'FREE', 'TO', 'LEAVE', 'THE', 'FAULTS', 'AND', 'BARRICADES', 'OF', 'FEAR', 'TO', 'STAND', 'ERECT', 'AND', 'FAITH', 'THE', 'FUTURE', 'WE', 'TO', 'SMILE'] +6070-63485-0000-2599: ref=["THEY'RE", 'DONE', 'FOR', 'SAID', 'THE', 'SCHOOLMASTER', 'IN', 'A', 'LOW', 'KEY', 'TO', 'THE', 'CHOUETTE', 'OUT', 'WITH', 'YOUR', 'VITRIOL', 'AND', 'MIND', 'YOUR', 'EYE'] +6070-63485-0000-2599: hyp=['THERE', 'DUNFAR', 'SAID', 'THE', 'SCHOOLMASTER', 'IN', 'A', 'LOW', 'KEY', 'TO', 'THE', 'SWEAT', 'OUT', 'WITH', 'OUR', 'VITRIOL', 'AND', 'MIND', 'YOUR', 'EYE'] +6070-63485-0001-2600: ref=['THE', 'TWO', 'MONSTERS', 'TOOK', 'OFF', 'THEIR', 'SHOES', 'AND', 'MOVED', 'STEALTHILY', 'ALONG', 'KEEPING', 'IN', 'THE', 'SHADOWS', 'OF', 'THE', 'HOUSES'] +6070-63485-0001-2600: hyp=['THE', 'TWO', 'MONSTERS', 'TOOK', 'OFF', 'THEIR', 'SHOES', 'AND', 'MOVED', 'STEALTHILY', 'ALONG', 'KEEPING', 'IN', 'THE', 'SHADOWS', 'OF', 'THE', 'HOUSES'] +6070-63485-0002-2601: ref=['BY', 'MEANS', 'OF', 'THIS', 'STRATAGEM', 'THEY', 'FOLLOWED', 'SO', 'CLOSELY', 'THAT', 'ALTHOUGH', 'WITHIN', 'A', 'FEW', 'STEPS', 'OF', 'SARAH', 'AND', 'TOM', 'THEY', 'DID', 'NOT', 'HEAR', 'THEM'] +6070-63485-0002-2601: hyp=['BY', 'MEANS', 'OF', 'THIS', 'STRATAGEM', 'THEY', 'FOLLOWED', 'SO', 'CLOSELY', 'THAT', 'ALTHOUGH', 'WITHIN', 'A', 'FEW', 'STEPS', 'OF', 'SEREN', 'TOM', 'THEY', 'DID', 'NOT', 'HEAR', 'THEM'] +6070-63485-0003-2602: ref=['SARAH', 'AND', 'HER', 'BROTHER', 'HAVING', 'AGAIN', 'PASSED', 'BY', 'THE', 'TAPIS', 'FRANC', 'ARRIVED', 'CLOSE', 'TO', 'THE', 'DILAPIDATED', 'HOUSE', 'WHICH', 'WAS', 'PARTLY', 'IN', 'RUINS', 'AND', 'ITS', 'OPENED', 'CELLARS', 'FORMED', 'A', 'KIND', 'OF', 'GULF', 'ALONG', 'WHICH', 'THE', 'STREET', 'RAN', 'IN', 'THAT', 'DIRECTION'] +6070-63485-0003-2602: hyp=['SARAH', 'AND', 'HER', 'BROTHER', 'HAVING', 'AGAIN', 'PASSED', 'BY', 'THE', 'TAPPY', 'FROG', 'ARRIVED', 'CLOSE', 'TO', 'THE', 'DILAPIDATED', 'HOUSE', 'WHICH', 'WAS', 'PARTLY', 'IN', 'RUINS', 'AND', 'ITS', 'OPEN', 'CELLARS', 'FORMED', 'A', 'KIND', 'OF', 'GULF', 'ALONG', 'WHICH', 'THE', 'STREET', 'RAN', 'IN', 'THAT', 'DIRECTION'] +6070-63485-0004-2603: ref=['IN', 'AN', 'INSTANT', 'THE', 'SCHOOLMASTER', 'WITH', 'A', 'LEAP', 'RESEMBLING', 'IN', 'STRENGTH', 'AND', 'AGILITY', 'THE', 'SPRING', 'OF', 'A', 'TIGER', 'SEIZED', 'SEYTON', 'WITH', 'ONE', 'HAND', 'BY', 'THE', 'THROAT', 'AND', 'EXCLAIMED', 'YOUR', 'MONEY', 'OR', 'I', 'WILL', 'FLING', 'YOU', 'INTO', 'THIS', 'HOLE'] +6070-63485-0004-2603: hyp=['IN', 'AN', 'INSTANT', 'THE', 'SCHOOLMASTER', 'WITH', 'A', 'LEAP', 'RESEMBLING', 'IN', 'STRENGTH', 'AND', 'AGILITY', 'THE', 'SPRING', 'OF', 'A', 'TIGER', 'SEIZED', 'SEATING', 'WITH', 'ONE', 'HAND', 'BY', 'THE', 'THROAT', 'AND', 'EXCLAIMED', 'YOUR', 'MONEY', 'OR', 'I', 'WILL', 'FLING', 'YOU', 'INTO', 'THIS', 'HALL'] +6070-63485-0005-2604: ref=['NO', 'SAID', 'THE', 'OLD', 'BRUTE', 'GRUMBLINGLY', 'NO', 'NOT', 'ONE', 'RING', 'WHAT', 'A', 'SHAME'] +6070-63485-0005-2604: hyp=['NO', 'SAID', 'THE', 'OLD', 'BRUTE', 'TREMBLINGLY', 'NO', 'NOT', 'ONE', 'RING', 'WHAT', 'A', 'SHAME'] +6070-63485-0006-2605: ref=['TOM', 'SEYTON', 'DID', 'NOT', 'LOSE', 'HIS', 'PRESENCE', 'OF', 'MIND', 'DURING', 'THIS', 'SCENE', 'RAPIDLY', 'AND', 'UNEXPECTEDLY', 'AS', 'IT', 'HAD', 'OCCURRED'] +6070-63485-0006-2605: hyp=['TOM', 'SEYTON', 'DID', 'NOT', 'LOSE', 'HIS', 'PRESENCE', 'OF', 'MIND', 'DURING', 'THIS', 'SCENE', 'RAPIDLY', 'AND', 'UNEXPECTEDLY', 'AS', 'IT', 'HAD', 'OCCURRED'] +6070-63485-0007-2606: ref=['OH', 'AH', 'TO', 'LAY', 'A', 'TRAP', 'TO', 'CATCH', 'US', 'REPLIED', 'THE', 'THIEF'] +6070-63485-0007-2606: hyp=['U', 'AH', 'TO', 'LAY', 'A', 'TRAP', 'TO', 'CATCH', 'US', 'REPLIED', 'THE', 'THIEF'] +6070-63485-0008-2607: ref=['THEN', 'ADDRESSING', 'THOMAS', 'SEYTON', 'YOU', 'KNOW', 'THE', 'PLAIN', 'OF', 'SAINT', 'DENIS'] +6070-63485-0008-2607: hyp=['THEN', 'ADDRESSING', 'THOMAS', 'SETTON', 'YOU', 'KNOW', 'THE', 'PLANE', 'OF', 'SAINT', 'DENIS'] +6070-63485-0009-2608: ref=['DID', 'YOU', 'SEE', 'IN', 'THE', 'CABARET', 'WE', 'HAVE', 'JUST', 'LEFT', 'FOR', 'I', 'KNOW', 'YOU', 'AGAIN', 'THE', 'MAN', 'WHOM', 'THE', 'CHARCOAL', 'MAN', 'CAME', 'TO', 'SEEK'] +6070-63485-0009-2608: hyp=['DID', 'YOU', 'SEE', 'IN', 'THE', 'CABARET', 'WE', 'HAD', 'JUST', 'LEFT', 'FOR', 'I', 'KNOW', 'YOU', 'AGAIN', 'THE', 'MAN', 'WHOM', 'THE', 'CHARCOAL', 'MAN', 'CAME', 'TO', 'SEEK'] +6070-63485-0010-2609: ref=['CRIED', 'THE', 'SCHOOLMASTER', 'A', 'THOUSAND', 'FRANCS', 'AND', "I'LL", 'KILL', 'HIM'] +6070-63485-0010-2609: hyp=['CRIED', 'THE', 'SCHOOLMASTER', 'A', 'THOUSAND', 'FRANCS', 'AND', "I'LL", 'KILL', 'HIM'] +6070-63485-0011-2610: ref=['WRETCH', 'I', 'DO', 'NOT', 'SEEK', 'HIS', 'LIFE', 'REPLIED', 'SARAH', 'TO', 'THE', 'SCHOOLMASTER'] +6070-63485-0011-2610: hyp=['THATCH', 'I', 'DO', 'NOT', 'SEE', 'HIS', 'LIFE', 'REPLIED', 'SARAH', 'TO', 'THE', 'SCHOOLMASTER'] +6070-63485-0012-2611: ref=["LET'S", 'GO', 'AND', 'MEET', 'HIM'] +6070-63485-0012-2611: hyp=["LET'S", 'GO', 'AND', 'MEET', 'HIM'] +6070-63485-0013-2612: ref=['OLD', 'BOY', 'IT', 'WILL', 'PAY', 'FOR', 'LOOKING', 'AFTER'] +6070-63485-0013-2612: hyp=['OLD', 'BOY', 'IT', 'WILL', 'PAY', 'FOR', 'LOOKING', 'AFTER'] +6070-63485-0014-2613: ref=['WELL', 'MY', 'WIFE', 'SHALL', 'BE', 'THERE', 'SAID', 'THE', 'SCHOOLMASTER', 'YOU', 'WILL', 'TELL', 'HER', 'WHAT', 'YOU', 'WANT', 'AND', 'I', 'SHALL', 'SEE'] +6070-63485-0014-2613: hyp=['WELL', 'MY', 'WIFE', 'SHALL', 'BE', 'THERE', 'SAID', 'THE', 'SCHOOLMASTER', 'YOU', 'WILL', 'TELL', 'HER', 'WHAT', 'YOU', 'WANT', 'AND', 'I', 'SHALL', 'SEE'] +6070-63485-0015-2614: ref=['IN', 'THE', 'PLAIN', 'OF', 'SAINT', 'DENIS'] +6070-63485-0015-2614: hyp=['IN', 'THE', 'PLANE', 'OF', 'SAINT', 'DENY'] +6070-63485-0016-2615: ref=['BETWEEN', 'SAINT', 'OUEN', 'AND', 'THE', 'ROAD', 'OF', 'LA', 'REVOLTE', 'AT', 'THE', 'END', 'OF', 'THE', 'ROAD', 'AGREED'] +6070-63485-0016-2615: hyp=['BETWEEN', 'SAINT', 'LOUIS', 'AND', 'THE', 'ROAD', 'OF', 'LA', 'REVOLT', 'AT', 'THE', 'END', 'OF', 'THE', 'ROAD', 'AGREED'] +6070-63485-0017-2616: ref=['HE', 'HAD', 'FORGOTTEN', 'THE', 'ADDRESS', 'OF', 'THE', 'SELF', 'STYLED', 'FAN', 'PAINTER'] +6070-63485-0017-2616: hyp=['HE', 'HAD', 'FORGOTTEN', 'THE', 'ADDRESS', 'OF', 'THE', 'SELF', 'STYLED', 'PAMP', 'PAINTER'] +6070-63485-0018-2617: ref=['THE', 'FIACRE', 'STARTED'] +6070-63485-0018-2617: hyp=['THE', 'FIACUS', 'STARTED'] +6070-86744-0000-2569: ref=['FRANZ', 'WHO', 'SEEMED', 'ATTRACTED', 'BY', 'SOME', 'INVISIBLE', 'INFLUENCE', 'TOWARDS', 'THE', 'COUNT', 'IN', 'WHICH', 'TERROR', 'WAS', 'STRANGELY', 'MINGLED', 'FELT', 'AN', 'EXTREME', 'RELUCTANCE', 'TO', 'PERMIT', 'HIS', 'FRIEND', 'TO', 'BE', 'EXPOSED', 'ALONE', 'TO', 'THE', 'SINGULAR', 'FASCINATION', 'THAT', 'THIS', 'MYSTERIOUS', 'PERSONAGE', 'SEEMED', 'TO', 'EXERCISE', 'OVER', 'HIM', 'AND', 'THEREFORE', 'MADE', 'NO', 'OBJECTION', 'TO', "ALBERT'S", 'REQUEST', 'BUT', 'AT', 'ONCE', 'ACCOMPANIED', 'HIM', 'TO', 'THE', 'DESIRED', 'SPOT', 'AND', 'AFTER', 'A', 'SHORT', 'DELAY', 'THE', 'COUNT', 'JOINED', 'THEM', 'IN', 'THE', 'SALON'] +6070-86744-0000-2569: hyp=['FRANCE', 'WHO', 'SEEMED', 'ATTRACTED', 'BY', 'SOME', 'INVISIBLE', 'INFLUENCE', 'TO', 'WHICH', 'THE', 'COUNT', 'IN', 'WHICH', 'TERROR', 'WAS', 'STRANGELY', 'MINGLED', 'FELT', 'AN', 'EXTREME', 'RELUCTANCE', 'TO', 'PERMIT', 'HIS', 'FRIEND', 'TO', 'BE', 'EXPOSED', 'ALONE', 'TO', 'THE', 'SINGULAR', 'FASCINATION', 'THAT', 'THIS', 'MYSTERIOUS', 'PERSONAGE', 'SEEMED', 'TO', 'EXERCISE', 'OVER', 'HIM', 'AND', 'THEREFORE', 'MADE', 'NO', 'OBJECTION', 'TO', "ALBERT'S", 'REQUEST', 'BUT', 'AT', 'ONCE', 'ACCOMPANIED', 'HIM', 'TO', 'THE', 'DESIRED', 'SPOT', 'AND', 'AFTER', 'A', 'SHORT', 'DELAY', 'THE', 'COUNT', 'JOINED', 'THEM', 'IN', 'THE', 'SALON'] +6070-86744-0001-2570: ref=['MY', 'VERY', 'GOOD', 'FRIEND', 'AND', 'EXCELLENT', 'NEIGHBOR', 'REPLIED', 'THE', 'COUNT', 'WITH', 'A', 'SMILE', 'YOU', 'REALLY', 'EXAGGERATE', 'MY', 'TRIFLING', 'EXERTIONS'] +6070-86744-0001-2570: hyp=['MY', 'VERY', 'GOOD', 'FRIEND', 'AND', 'EXCELLENT', 'NEIGHBOR', 'REPLIED', 'THE', 'COUCH', 'WITH', 'A', 'SMILE', 'YOU', 'REALLY', 'EXAGGERATE', 'MY', 'TRIFLING', 'EXERTIONS'] +6070-86744-0002-2571: ref=['MY', 'FATHER', 'THE', 'COMTE', 'DE', 'MORCERF', 'ALTHOUGH', 'OF', 'SPANISH', 'ORIGIN', 'POSSESSES', 'CONSIDERABLE', 'INFLUENCE', 'BOTH', 'AT', 'THE', 'COURT', 'OF', 'FRANCE', 'AND', 'MADRID', 'AND', 'I', 'UNHESITATINGLY', 'PLACE', 'THE', 'BEST', 'SERVICES', 'OF', 'MYSELF', 'AND', 'ALL', 'TO', 'WHOM', 'MY', 'LIFE', 'IS', 'DEAR', 'AT', 'YOUR', 'DISPOSAL'] +6070-86744-0002-2571: hyp=['MY', 'FATHER', 'THE', 'COMTE', 'DE', 'MORCERF', 'ALTHOUGH', 'A', 'SPANISH', 'ORIGIN', 'POSSESSES', 'CONSIDERABLE', 'INFLUENCE', 'BOTH', 'AT', 'THE', 'COURT', 'OF', 'FRANCE', 'AND', 'MADRID', 'AND', 'I', 'UNHESITATINGLY', 'PLACED', 'THE', 'BEST', 'SERVICES', 'OF', 'MYSELF', 'AND', 'ALL', 'TO', 'WHOM', 'MY', 'LIFE', 'IS', 'DEAR', 'AT', 'YOUR', 'DISPOSAL'] +6070-86744-0003-2572: ref=['I', 'CAN', 'SCARCELY', 'CREDIT', 'IT'] +6070-86744-0003-2572: hyp=['I', 'CAN', 'SCARCELY', 'CREDIT', 'IT'] +6070-86744-0004-2573: ref=['THEN', 'IT', 'IS', 'SETTLED', 'SAID', 'THE', 'COUNT', 'AND', 'I', 'GIVE', 'YOU', 'MY', 'SOLEMN', 'ASSURANCE', 'THAT', 'I', 'ONLY', 'WAITED', 'AN', 'OPPORTUNITY', 'LIKE', 'THE', 'PRESENT', 'TO', 'REALIZE', 'PLANS', 'THAT', 'I', 'HAVE', 'LONG', 'MEDITATED'] +6070-86744-0004-2573: hyp=['THEN', 'IT', 'IS', 'SETTLED', 'SAID', 'THE', 'COUNT', 'AND', 'I', 'GIVE', 'YOU', 'MY', 'SOLEMN', 'ASSURANCE', 'THAT', 'I', 'ONLY', 'WAITED', 'IN', 'A', 'PARTICULARITY', 'LIKE', 'THE', 'PRESENT', 'TO', 'REALIZE', 'PLANS', 'THAT', 'I', 'HAVE', 'LONG', 'MEDITATED'] +6070-86744-0005-2574: ref=['SHALL', 'WE', 'MAKE', 'A', 'POSITIVE', 'APPOINTMENT', 'FOR', 'A', 'PARTICULAR', 'DAY', 'AND', 'HOUR', 'INQUIRED', 'THE', 'COUNT', 'ONLY', 'LET', 'ME', 'WARN', 'YOU', 'THAT', 'I', 'AM', 'PROVERBIAL', 'FOR', 'MY', 'PUNCTILIOUS', 'EXACTITUDE', 'IN', 'KEEPING', 'MY', 'ENGAGEMENTS', 'DAY', 'FOR', 'DAY', 'HOUR', 'FOR', 'HOUR', 'SAID', 'ALBERT', 'THAT', 'WILL', 'SUIT', 'ME', 'TO', 'A', 'DOT'] +6070-86744-0005-2574: hyp=['SHOW', 'WE', 'MAKE', 'A', 'POSITIVE', 'APPOINTMENT', 'FOR', 'A', 'PARTICULAR', 'DAY', 'AND', 'HOUR', 'INQUIRED', 'THE', 'COUNT', 'ONLY', 'LET', 'ME', 'WARN', 'YOU', 'THAT', 'I', 'AM', 'PROVERBIAL', 'FOR', 'MY', 'PUNCTILIOUS', 'EXACTITUDE', 'IN', 'KEEPING', 'MY', 'ENGAGEMENTS', 'DAY', 'FOR', 'DAY', 'HOUR', 'FOR', 'HOUR', 'SAID', 'ALBERT', 'THAT', 'WILL', 'SUIT', 'ME', 'TO', 'A', 'DOT'] +6070-86744-0006-2575: ref=['SO', 'BE', 'IT', 'THEN', 'REPLIED', 'THE', 'COUNT', 'AND', 'EXTENDING', 'HIS', 'HAND', 'TOWARDS', 'A', 'CALENDAR', 'SUSPENDED', 'NEAR', 'THE', 'CHIMNEY', 'PIECE', 'HE', 'SAID', 'TO', 'DAY', 'IS', 'THE', 'TWENTY', 'FIRST', 'OF', 'FEBRUARY', 'AND', 'DRAWING', 'OUT', 'HIS', 'WATCH', 'ADDED', 'IT', 'IS', 'EXACTLY', 'HALF', 'PAST', 'TEN', "O'CLOCK", 'NOW', 'PROMISE', 'ME', 'TO', 'REMEMBER', 'THIS', 'AND', 'EXPECT', 'ME', 'THE', 'TWENTY', 'FIRST', 'OF', 'MAY', 'AT', 'THE', 'SAME', 'HOUR', 'IN', 'THE', 'FORENOON'] +6070-86744-0006-2575: hyp=['SO', 'BE', 'IT', 'THEN', 'REPLIED', 'THE', 'COUNT', 'AND', 'EXTENDING', 'HIS', 'HAND', 'TOWARDS', 'THE', 'CALENDER', 'SUSPENDED', 'NEAR', 'THE', 'CHIMNEY', 'PIECE', 'HE', 'SAID', 'TO', 'DAY', 'IS', 'THE', 'TWENTY', 'FIRST', 'OF', 'FEBRUARY', 'AND', 'DRAWING', 'OUT', 'HIS', 'WATCH', 'ADDED', 'IT', 'IS', 'EXACTLY', 'HALF', 'PAST', 'TEN', "O'CLOCK", 'NOW', 'PROMISE', 'ME', 'TO', 'REMEMBER', 'THIS', 'AND', 'EXPECT', 'ME', 'THE', 'TWENTY', 'FIRST', 'OF', 'MAY', 'AT', 'THE', 'SAME', 'HOUR', 'IN', 'THE', 'FORENOON'] +6070-86744-0007-2576: ref=['I', 'RESIDE', 'IN', 'MY', "FATHER'S", 'HOUSE', 'BUT', 'OCCUPY', 'A', 'PAVILION', 'AT', 'THE', 'FARTHER', 'SIDE', 'OF', 'THE', 'COURT', 'YARD', 'ENTIRELY', 'SEPARATED', 'FROM', 'THE', 'MAIN', 'BUILDING'] +6070-86744-0007-2576: hyp=['I', 'RESIDE', 'IN', 'MY', "FATHER'S", 'HOUSE', 'BUT', 'OCCUPY', 'A', 'PAVILION', 'AT', 'THE', 'FARTHER', 'SIDE', 'OF', 'THE', 'COURTYARD', 'AND', 'TIRELESS', 'SEPARATED', 'FROM', 'THE', 'MAIN', 'BUILDING'] +6070-86744-0008-2577: ref=['NOW', 'THEN', 'SAID', 'THE', 'COUNT', 'RETURNING', 'HIS', 'TABLETS', 'TO', 'HIS', 'POCKET', 'MAKE', 'YOURSELF', 'PERFECTLY', 'EASY', 'THE', 'HAND', 'OF', 'YOUR', 'TIME', 'PIECE', 'WILL', 'NOT', 'BE', 'MORE', 'ACCURATE', 'IN', 'MARKING', 'THE', 'TIME', 'THAN', 'MYSELF'] +6070-86744-0008-2577: hyp=['NOW', 'THEN', 'SAID', 'THE', 'COUNT', 'RETURNING', 'HIS', 'TABLETS', 'TO', 'HIS', 'POCKET', 'MAKE', 'YOURSELF', 'PERFECTLY', 'EASY', 'THE', 'HAND', 'OF', 'YOUR', 'TIME', 'PEACE', 'WILL', 'NOT', 'BE', 'MORE', 'ACCURATE', 'IN', 'MARKING', 'THE', 'TIME', 'THAN', 'MYSELF'] +6070-86744-0009-2578: ref=['THAT', 'DEPENDS', 'WHEN', 'DO', 'YOU', 'LEAVE'] +6070-86744-0009-2578: hyp=['THAT', 'DEPENDS', 'WHEN', "D'YE", 'LEAVE'] +6070-86744-0010-2579: ref=['FOR', 'FRANCE', 'NO', 'FOR', 'VENICE', 'I', 'SHALL', 'REMAIN', 'IN', 'ITALY', 'FOR', 'ANOTHER', 'YEAR', 'OR', 'TWO'] +6070-86744-0010-2579: hyp=['FOR', 'FRANCE', 'NO', 'FOR', 'VENICE', 'I', 'SHALL', 'REMAIN', 'IN', 'ITALY', 'FOR', 'ANOTHER', 'YEAR', 'OR', 'TWO'] +6070-86744-0011-2580: ref=['THEN', 'WE', 'SHALL', 'NOT', 'MEET', 'IN', 'PARIS'] +6070-86744-0011-2580: hyp=['THEN', 'WE', 'SHALL', 'NOT', 'MEET', 'IN', 'PARIS'] +6070-86744-0012-2581: ref=['I', 'FEAR', 'I', 'SHALL', 'NOT', 'HAVE', 'THAT', 'HONOR'] +6070-86744-0012-2581: hyp=['I', 'FEAR', 'I', 'SHALL', 'NOT', 'HAVE', 'THAT', 'HONOUR'] +6070-86744-0013-2582: ref=['WELL', 'SINCE', 'WE', 'MUST', 'PART', 'SAID', 'THE', 'COUNT', 'HOLDING', 'OUT', 'A', 'HAND', 'TO', 'EACH', 'OF', 'THE', 'YOUNG', 'MEN', 'ALLOW', 'ME', 'TO', 'WISH', 'YOU', 'BOTH', 'A', 'SAFE', 'AND', 'PLEASANT', 'JOURNEY'] +6070-86744-0013-2582: hyp=['WELL', 'SINCE', 'WE', 'MUST', 'PART', 'SAID', 'THE', 'COUNT', 'HOLDING', 'OUT', 'A', 'HAND', 'TO', 'EACH', 'OF', 'THE', 'YOUNG', 'MEN', 'ALLOW', 'ME', 'TO', 'WISH', 'YOU', 'BOTH', 'AS', 'SAFE', 'AND', 'PLEASANT', 'JOURNEY'] +6070-86744-0014-2583: ref=['WHAT', 'IS', 'THE', 'MATTER', 'ASKED', 'ALBERT', 'OF', 'FRANZ', 'WHEN', 'THEY', 'HAD', 'RETURNED', 'TO', 'THEIR', 'OWN', 'APARTMENTS', 'YOU', 'SEEM', 'MORE', 'THAN', 'COMMONLY', 'THOUGHTFUL'] +6070-86744-0014-2583: hyp=['WHAT', 'IS', 'THE', 'MATTER', 'ASKED', 'ALBERT', 'OF', 'FRANZ', 'WHEN', 'THEY', 'HAD', 'RETURNED', 'TO', 'THEIR', 'OWN', 'APARTMENTS', 'YOU', 'SEE', 'MORE', 'THAN', 'COMMONLY', 'THOUGHTFUL'] +6070-86744-0015-2584: ref=['I', 'WILL', 'CONFESS', 'TO', 'YOU', 'ALBERT', 'REPLIED', 'FRANZ', 'THE', 'COUNT', 'IS', 'A', 'VERY', 'SINGULAR', 'PERSON', 'AND', 'THE', 'APPOINTMENT', 'YOU', 'HAVE', 'MADE', 'TO', 'MEET', 'HIM', 'IN', 'PARIS', 'FILLS', 'ME', 'WITH', 'A', 'THOUSAND', 'APPREHENSIONS'] +6070-86744-0015-2584: hyp=['I', 'WILL', 'CONSIST', 'TO', 'YOU', 'ALBERT', 'REPLIED', 'FRANZ', 'THE', 'COUNT', 'IS', 'A', 'VERY', 'SINGULAR', 'PERSON', 'AND', 'THE', 'APPOINTMENT', 'YOU', 'HAVE', 'MADE', 'TO', 'MEET', 'HIM', 'IN', 'PARIS', 'FILLS', 'ME', 'WITH', 'A', 'THOUSAND', 'APPREHENSIONS'] +6070-86744-0016-2585: ref=['DID', 'YOU', 'EVER', 'MEET', 'HIM', 'PREVIOUSLY', 'TO', 'COMING', 'HITHER'] +6070-86744-0016-2585: hyp=['DID', 'YOU', 'EVER', 'MEET', 'HIM', 'PREVIOUSLY', 'TO', 'COMING', 'HITHER'] +6070-86744-0017-2586: ref=['UPON', 'MY', 'HONOR', 'THEN', 'LISTEN', 'TO', 'ME'] +6070-86744-0017-2586: hyp=['UPON', 'MY', 'HONOUR', 'THEN', 'LISTEN', 'TO', 'ME'] +6070-86744-0018-2587: ref=['HE', 'DWELT', 'WITH', 'CONSIDERABLE', 'FORCE', 'AND', 'ENERGY', 'ON', 'THE', 'ALMOST', 'MAGICAL', 'HOSPITALITY', 'HE', 'HAD', 'RECEIVED', 'FROM', 'THE', 'COUNT', 'AND', 'THE', 'MAGNIFICENCE', 'OF', 'HIS', 'ENTERTAINMENT', 'IN', 'THE', 'GROTTO', 'OF', 'THE', 'THOUSAND', 'AND', 'ONE', 'NIGHTS', 'HE', 'RECOUNTED', 'WITH', 'CIRCUMSTANTIAL', 'EXACTITUDE', 'ALL', 'THE', 'PARTICULARS', 'OF', 'THE', 'SUPPER', 'THE', 'HASHISH', 'THE', 'STATUES', 'THE', 'DREAM', 'AND', 'HOW', 'AT', 'HIS', 'AWAKENING', 'THERE', 'REMAINED', 'NO', 'PROOF', 'OR', 'TRACE', 'OF', 'ALL', 'THESE', 'EVENTS', 'SAVE', 'THE', 'SMALL', 'YACHT', 'SEEN', 'IN', 'THE', 'DISTANT', 'HORIZON', 'DRIVING', 'UNDER', 'FULL', 'SAIL', 'TOWARD', 'PORTO', 'VECCHIO'] +6070-86744-0018-2587: hyp=['HE', 'DWELT', 'WITH', 'CONSIDERABLE', 'FORCE', 'AND', 'ENERGY', 'ON', 'THE', 'ALMOST', 'MAGICAL', 'HOSPITALITY', 'HE', 'HAD', 'RECEIVED', 'FROM', 'THE', 'COUNT', 'AND', 'THE', 'MAGNIFICENCE', 'OF', 'HIS', 'ENTERTAINMENT', 'IN', 'THE', 'DRATO', 'OF', 'THE', 'THOUSAND', 'AND', 'ONE', 'NIGHTS', 'HE', 'RECOUNTED', 'WITH', 'CIRCUMSTANTIAL', 'EXACTITUDE', 'ALL', 'THE', 'PARTICULARS', 'OF', 'THE', 'SUPPER', 'THE', 'HASHISH', 'THE', 'STATUES', 'THE', 'DREAM', 'AND', 'HOW', 'AT', 'HIS', 'AWAKENING', 'THERE', 'REMAINED', 'NO', 'PROOF', 'OF', 'TRACE', 'OF', 'ALL', 'THESE', 'EVENTS', 'SAVE', 'THE', 'SMALL', 'YACHT', 'SEEN', 'IN', 'THE', 'DISTANT', 'HORIZON', 'DRIVING', 'UNDER', 'FULL', 'SAIL', 'TOWARD', 'PORTO', 'VECCHIO'] +6070-86744-0019-2588: ref=['THEN', 'HE', 'DETAILED', 'THE', 'CONVERSATION', 'OVERHEARD', 'BY', 'HIM', 'AT', 'THE', 'COLOSSEUM', 'BETWEEN', 'THE', 'COUNT', 'AND', 'VAMPA', 'IN', 'WHICH', 'THE', 'COUNT', 'HAD', 'PROMISED', 'TO', 'OBTAIN', 'THE', 'RELEASE', 'OF', 'THE', 'BANDIT', 'PEPPINO', 'AN', 'ENGAGEMENT', 'WHICH', 'AS', 'OUR', 'READERS', 'ARE', 'AWARE', 'HE', 'MOST', 'FAITHFULLY', 'FULFILLED'] +6070-86744-0019-2588: hyp=['THEN', 'HE', 'DETAILED', 'THE', 'CONVERSATION', 'OVERHEARD', 'BY', 'HIM', 'AT', 'THE', 'COLISEUM', 'BETWEEN', 'THE', 'COUNT', 'AND', 'VAMPA', 'IN', 'WHICH', 'THE', 'COUNT', 'HAD', 'PROMISED', 'TO', 'OBTAIN', 'THE', 'RELEASE', 'OF', 'THE', 'BANDIT', 'PEPPINO', 'AND', 'ENGAGEMENT', 'WHICH', 'AS', 'OUR', 'READERS', 'ARE', 'AWARE', 'HE', 'MOST', 'FAITHFULLY', 'FULFILLED'] +6070-86744-0020-2589: ref=['BUT', 'SAID', 'FRANZ', 'THE', 'CORSICAN', 'BANDITS', 'THAT', 'WERE', 'AMONG', 'THE', 'CREW', 'OF', 'HIS', 'VESSEL'] +6070-86744-0020-2589: hyp=['BUT', 'SAID', 'FRANZ', 'THE', 'CORSICIAN', 'BANDITS', 'THAT', 'WERE', 'AMONG', 'THE', 'CREW', 'OF', 'HIS', 'VESSEL'] +6070-86744-0021-2590: ref=['WHY', 'REALLY', 'THE', 'THING', 'SEEMS', 'TO', 'ME', 'SIMPLE', 'ENOUGH'] +6070-86744-0021-2590: hyp=['WHY', 'REALLY', 'THE', 'THING', 'SEEMS', 'TO', 'ME', 'SIMPLE', 'ENOUGH'] +6070-86744-0022-2591: ref=['TALKING', 'OF', 'COUNTRIES', 'REPLIED', 'FRANZ', 'OF', 'WHAT', 'COUNTRY', 'IS', 'THE', 'COUNT', 'WHAT', 'IS', 'HIS', 'NATIVE', 'TONGUE', 'WHENCE', 'DOES', 'HE', 'DERIVE', 'HIS', 'IMMENSE', 'FORTUNE', 'AND', 'WHAT', 'WERE', 'THOSE', 'EVENTS', 'OF', 'HIS', 'EARLY', 'LIFE', 'A', 'LIFE', 'AS', 'MARVELLOUS', 'AS', 'UNKNOWN', 'THAT', 'HAVE', 'TINCTURED', 'HIS', 'SUCCEEDING', 'YEARS', 'WITH', 'SO', 'DARK', 'AND', 'GLOOMY', 'A', 'MISANTHROPY'] +6070-86744-0022-2591: hyp=['TALKING', 'OF', 'COUNTRIES', 'REPLIED', 'FRANZ', 'OF', 'WHAT', 'COUNTRIES', 'THE', 'COUNT', 'WHAT', 'IS', 'HIS', 'NATIVE', 'DONG', 'WHENCE', 'DOES', 'HE', 'DERIVE', 'HIS', 'IMMENSE', 'FORTUNE', 'AND', 'WHAT', 'WERE', 'THOSE', 'EVENTS', 'OF', 'HIS', 'EARLY', 'LIFE', 'A', 'LIFE', 'AS', 'MARVELLOUS', 'AS', 'UNKNOWN', 'THAT', 'HAVE', 'TINTED', 'HIS', 'SUCCEEDING', 'YEARS', 'WITH', 'SORE', 'DARK', 'AND', 'BLOOMY', 'AND', 'MISANTHROPY'] +6070-86744-0023-2592: ref=['CERTAINLY', 'THESE', 'ARE', 'QUESTIONS', 'THAT', 'IN', 'YOUR', 'PLACE', 'I', 'SHOULD', 'LIKE', 'TO', 'HAVE', 'ANSWERED'] +6070-86744-0023-2592: hyp=['CERTAINLY', 'THESE', 'ARE', 'QUESTIONS', 'THAT', 'IN', 'YOUR', 'PLACE', 'I', 'SHOULD', 'LIKE', 'TO', 'HAVE', 'ANSWERED'] +6070-86744-0024-2593: ref=['MY', 'DEAR', 'FRANZ', 'REPLIED', 'ALBERT', 'WHEN', 'UPON', 'RECEIPT', 'OF', 'MY', 'LETTER', 'YOU', 'FOUND', 'THE', 'NECESSITY', 'OF', 'ASKING', 'THE', "COUNT'S", 'ASSISTANCE', 'YOU', 'PROMPTLY', 'WENT', 'TO', 'HIM', 'SAYING', 'MY', 'FRIEND', 'ALBERT', 'DE', 'MORCERF', 'IS', 'IN', 'DANGER', 'HELP', 'ME', 'TO', 'DELIVER', 'HIM'] +6070-86744-0024-2593: hyp=['MY', 'DEAR', 'FRANCE', 'REPLIED', 'ALBERT', 'WHEN', 'UPON', 'RECEIPT', 'OF', 'MY', 'LETTER', 'YOU', 'FOUND', 'THE', 'NECESSITY', 'OF', 'ASKING', 'THE', "COUNT'S", 'ASSISTANCE', 'YOU', 'PROMPTLY', 'WENT', 'TO', 'HIM', 'SAYING', 'MY', 'FRIEND', 'ALBERT', 'DE', 'MORCERF', 'IS', 'IN', 'DANGER', 'HELPED', 'ME', 'TO', 'DELIVER', 'HIM'] +6070-86744-0025-2594: ref=['WHAT', 'ARE', 'HIS', 'MEANS', 'OF', 'EXISTENCE', 'WHAT', 'IS', 'HIS', 'BIRTHPLACE', 'OF', 'WHAT', 'COUNTRY', 'IS', 'HE', 'A', 'NATIVE'] +6070-86744-0025-2594: hyp=['WHAT', 'ARE', 'HIS', 'MEANS', 'OF', 'EXISTENCE', 'WHAT', 'IS', 'HIS', 'BOTH', 'PLEASE', 'OF', 'WHAT', 'COUNTRIES', 'HE', 'A', 'NATIVE'] +6070-86744-0026-2595: ref=['I', 'CONFESS', 'HE', 'ASKED', 'ME', 'NONE', 'NO', 'HE', 'MERELY', 'CAME', 'AND', 'FREED', 'ME', 'FROM', 'THE', 'HANDS', 'OF', 'SIGNOR', 'VAMPA', 'WHERE', 'I', 'CAN', 'ASSURE', 'YOU', 'IN', 'SPITE', 'OF', 'ALL', 'MY', 'OUTWARD', 'APPEARANCE', 'OF', 'EASE', 'AND', 'UNCONCERN', 'I', 'DID', 'NOT', 'VERY', 'PARTICULARLY', 'CARE', 'TO', 'REMAIN'] +6070-86744-0026-2595: hyp=['I', 'CONFESS', 'HE', 'ASKED', 'ME', 'NONE', 'NO', 'HE', 'MERELY', 'CAME', 'AND', 'FREED', 'ME', 'FROM', 'THE', 'HANDS', 'OF', 'SENOR', 'VAMPA', 'WHERE', 'I', 'CAN', 'ASSURE', 'YOU', 'IN', 'SPITE', 'OF', 'ALL', 'MY', 'OUTWARD', 'APPEARANCE', 'OF', 'EASE', 'AND', 'UNCONCERN', 'I', 'DID', 'NOT', 'VERY', 'PARTICULARLY', 'CARE', 'TO', 'REMAIN'] +6070-86744-0027-2596: ref=['AND', 'THIS', 'TIME', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'CONTRARY', 'TO', 'THE', 'USUAL', 'STATE', 'OF', 'AFFAIRS', 'IN', 'DISCUSSIONS', 'BETWEEN', 'THE', 'YOUNG', 'MEN', 'THE', 'EFFECTIVE', 'ARGUMENTS', 'WERE', 'ALL', 'ON', "ALBERT'S", 'SIDE'] +6070-86744-0027-2596: hyp=['AND', 'THIS', 'TIME', 'IT', 'MUST', 'BE', 'CONFESSED', 'THAT', 'CONTRARY', 'TO', 'THE', 'USUAL', 'STATE', 'OF', 'AFFAIRS', 'IN', 'DISCUSSIONS', 'BETWEEN', 'THE', 'YOUNG', 'MEN', 'THE', 'EFFECTIVE', 'ARGUMENTS', 'WERE', 'ALL', 'ON', "ALBERT'S", 'SIDE'] +6070-86744-0028-2597: ref=['WELL', 'SAID', 'FRANZ', 'WITH', 'A', 'SIGH', 'DO', 'AS', 'YOU', 'PLEASE', 'MY', 'DEAR', 'VISCOUNT', 'FOR', 'YOUR', 'ARGUMENTS', 'ARE', 'BEYOND', 'MY', 'POWERS', 'OF', 'REFUTATION'] +6070-86744-0028-2597: hyp=['WELL', 'SAID', 'FRANZ', 'WITH', 'A', 'SIGH', 'DO', 'AS', 'YOU', 'PLEASE', 'MY', 'DEAR', 'VISCOUNT', 'FOR', 'YOUR', 'ARGUMENTS', 'ARE', 'BEYOND', 'MY', 'POWERS', 'OF', 'REFUTATION'] +6070-86744-0029-2598: ref=['AND', 'NOW', 'MY', 'DEAR', 'FRANZ', 'LET', 'US', 'TALK', 'OF', 'SOMETHING', 'ELSE'] +6070-86744-0029-2598: hyp=['AND', 'NOW', 'MY', 'DEAR', 'FRANZ', 'LET', 'US', 'TALK', 'OF', 'SOMETHING', 'ELSE'] +6070-86745-0000-2549: ref=['THEN', 'SHOULD', 'ANYTHING', 'APPEAR', 'TO', 'MERIT', 'A', 'MORE', 'MINUTE', 'EXAMINATION', 'ALBERT', 'DE', 'MORCERF', 'COULD', 'FOLLOW', 'UP', 'HIS', 'RESEARCHES', 'BY', 'MEANS', 'OF', 'A', 'SMALL', 'GATE', 'SIMILAR', 'TO', 'THAT', 'CLOSE', 'TO', 'THE', "CONCIERGE'S", 'DOOR', 'AND', 'WHICH', 'MERITS', 'A', 'PARTICULAR', 'DESCRIPTION'] +6070-86745-0000-2549: hyp=['THEN', 'SHOULD', 'ANYTHING', 'APPEAR', 'TO', 'MERIT', 'A', 'MORE', 'MINUTE', 'EXAMINATION', 'ALBERT', 'THE', 'MORCERF', 'COULD', 'FOLLOW', 'UP', 'HIS', 'RESEARCHES', 'BY', 'MEANS', 'OF', 'A', 'SMALL', 'GATE', 'SIMILAR', 'TO', 'THAT', 'CLOSE', 'TO', 'THE', "CONCIERGE'S", 'DOOR', 'AND', 'WHICH', 'MERITS', 'OF', 'PARTICULAR', 'DESCRIPTION'] +6070-86745-0001-2550: ref=['SHRUBS', 'AND', 'CREEPING', 'PLANTS', 'COVERED', 'THE', 'WINDOWS', 'AND', 'HID', 'FROM', 'THE', 'GARDEN', 'AND', 'COURT', 'THESE', 'TWO', 'APARTMENTS', 'THE', 'ONLY', 'ROOMS', 'INTO', 'WHICH', 'AS', 'THEY', 'WERE', 'ON', 'THE', 'GROUND', 'FLOOR', 'THE', 'PRYING', 'EYES', 'OF', 'THE', 'CURIOUS', 'COULD', 'PENETRATE'] +6070-86745-0001-2550: hyp=['SHRUBS', 'AND', 'CREEPING', 'PLANTS', 'COVERED', 'THE', 'WINDOWS', 'AND', 'HID', 'FROM', 'THE', 'GARDEN', 'AND', 'COURT', 'THESE', 'TWO', 'APARTMENTS', 'THE', 'ONLY', 'ROOMS', 'INTO', 'WHICH', 'AS', 'THEY', 'WERE', 'ON', 'THE', 'GROUND', 'FLOOR', 'THE', 'PRYING', 'EYES', 'OF', 'THE', 'CURIOUS', 'COULD', 'PENETRATE'] +6070-86745-0002-2551: ref=['AT', 'A', 'QUARTER', 'TO', 'TEN', 'A', 'VALET', 'ENTERED', 'HE', 'COMPOSED', 'WITH', 'A', 'LITTLE', 'GROOM', 'NAMED', 'JOHN', 'AND', 'WHO', 'ONLY', 'SPOKE', 'ENGLISH', 'ALL', "ALBERT'S", 'ESTABLISHMENT', 'ALTHOUGH', 'THE', 'COOK', 'OF', 'THE', 'HOTEL', 'WAS', 'ALWAYS', 'AT', 'HIS', 'SERVICE', 'AND', 'ON', 'GREAT', 'OCCASIONS', 'THE', "COUNT'S", 'CHASSEUR', 'ALSO'] +6070-86745-0002-2551: hyp=['AT', 'A', 'QUARTER', 'TO', 'TEN', 'THE', 'VALLED', 'ENTERED', 'HE', 'COMPOSED', 'WITH', 'A', 'LITTLE', 'ROOM', 'NAMED', 'JOHN', 'AND', 'WHO', 'ONLY', 'SPOKE', 'ENGLISH', 'ALL', 'ALBERTS', 'ESTABLISHMENT', 'ALTHOUGH', 'THE', 'COOK', 'OF', 'THE', 'HOTEL', 'WAS', 'ALWAYS', 'AT', 'HIS', 'SERVICE', 'AND', 'ON', 'GREAT', 'OCCASIONS', 'THE', "COUNT'S", 'CHASSEUR', 'ALSO'] +6070-86745-0003-2552: ref=['WAIT', 'THEN', 'DURING', 'THE', 'DAY', 'TELL', 'ROSA', 'THAT', 'WHEN', 'I', 'LEAVE', 'THE', 'OPERA', 'I', 'WILL', 'SUP', 'WITH', 'HER', 'AS', 'SHE', 'WISHES'] +6070-86745-0003-2552: hyp=['WAIT', 'THEN', 'DURING', 'THE', 'DAY', 'TELL', 'ROSA', 'THAT', 'WHEN', 'I', 'LEAVE', 'THE', 'OPERA', 'I', 'WILL', 'SUP', 'WITH', 'HER', 'AS', 'SHE', 'WISHES'] +6070-86745-0004-2553: ref=['VERY', 'WELL', 'AT', 'HALF', 'PAST', 'TEN'] +6070-86745-0004-2553: hyp=['VERY', 'WELL', 'AT', 'HALF', 'PAST', 'TEN'] +6070-86745-0005-2554: ref=['IS', 'THE', 'COUNTESS', 'UP', 'YET'] +6070-86745-0005-2554: hyp=['IS', 'THE', 'COUNTESS', 'UP', 'YET'] +6070-86745-0006-2555: ref=['THE', 'VALET', 'LEFT', 'THE', 'ROOM'] +6070-86745-0006-2555: hyp=['THE', 'VALET', 'LEFT', 'THE', 'ROOM'] +6070-86745-0007-2556: ref=['GOOD', 'MORNING', 'LUCIEN', 'GOOD', 'MORNING', 'SAID', 'ALBERT', 'YOUR', 'PUNCTUALITY', 'REALLY', 'ALARMS', 'ME'] +6070-86745-0007-2556: hyp=['GOOD', 'MORNING', 'MISS', 'YOUNG', 'GOOD', 'MORNING', 'SAID', 'ALBERT', 'YOUR', 'PUNCTUALITY', 'REALLY', 'ALARMS', 'ME'] +6070-86745-0008-2557: ref=['YOU', 'WHOM', 'I', 'EXPECTED', 'LAST', 'YOU', 'ARRIVE', 'AT', 'FIVE', 'MINUTES', 'TO', 'TEN', 'WHEN', 'THE', 'TIME', 'FIXED', 'WAS', 'HALF', 'PAST'] +6070-86745-0008-2557: hyp=['YOU', 'WHOM', 'I', 'EXPECTED', 'LAST', 'YOU', 'ARRIVE', 'AT', 'FIVE', 'MINUTES', 'TO', 'TEN', 'WHEN', 'THE', 'TIME', 'FIXED', 'WAS', 'HALF', 'PAST'] +6070-86745-0009-2558: ref=['NO', 'NO', 'MY', 'DEAR', 'FELLOW', 'DO', 'NOT', 'CONFOUND', 'OUR', 'PLANS'] +6070-86745-0009-2558: hyp=['NO', 'NO', 'MY', 'DEAR', 'FELLOW', 'DO', 'NOT', 'CONFOUND', 'OUR', 'PLANS'] +6070-86745-0010-2559: ref=['YES', 'HE', 'HAS', 'NOT', 'MUCH', 'TO', 'COMPLAIN', 'OF', 'BOURGES', 'IS', 'THE', 'CAPITAL', 'OF', 'CHARLES', 'SEVEN'] +6070-86745-0010-2559: hyp=['YES', 'HE', 'HAS', 'NOT', 'MUCH', 'TO', 'COMPLAIN', 'OF', 'BOURGE', 'IS', 'THE', 'CAPITAL', 'OF', 'CHARLES', 'THE', 'SEVENTH'] +6070-86745-0011-2560: ref=['IT', 'IS', 'FOR', 'THAT', 'REASON', 'YOU', 'SEE', 'ME', 'SO', 'EARLY'] +6070-86745-0011-2560: hyp=['IT', 'IS', 'FOR', 'THAT', 'REASON', 'YOU', 'SEE', 'ME', 'SO', 'EARLY'] +6070-86745-0012-2561: ref=['I', 'RETURNED', 'HOME', 'AT', 'DAYBREAK', 'AND', 'STROVE', 'TO', 'SLEEP', 'BUT', 'MY', 'HEAD', 'ACHED', 'AND', 'I', 'GOT', 'UP', 'TO', 'HAVE', 'A', 'RIDE', 'FOR', 'AN', 'HOUR'] +6070-86745-0012-2561: hyp=['I', 'RETURNED', 'HOME', 'AT', 'DAYBREAK', 'AND', 'STROVE', 'TO', 'SLEEP', 'BUT', 'MY', 'HEAD', 'ACHED', 'AND', 'I', 'GOT', 'UP', 'TO', 'HAVE', 'A', 'RIDE', 'FOR', 'AN', 'HOUR'] +6070-86745-0013-2562: ref=['PESTE', 'I', 'WILL', 'DO', 'NOTHING', 'OF', 'THE', 'KIND', 'THE', 'MOMENT', 'THEY', 'COME', 'FROM', 'GOVERNMENT', 'YOU', 'WOULD', 'FIND', 'THEM', 'EXECRABLE'] +6070-86745-0013-2562: hyp=['PESTS', 'I', 'WILL', 'DO', 'NOTHING', 'OF', 'THE', 'KIND', 'THE', 'MOMENT', 'THEY', 'COME', 'FROM', 'GOVERNMENT', 'YOU', 'WOULD', 'FIND', 'THEM', 'EXECRABLE'] +6070-86745-0014-2563: ref=['BESIDES', 'THAT', 'DOES', 'NOT', 'CONCERN', 'THE', 'HOME', 'BUT', 'THE', 'FINANCIAL', 'DEPARTMENT'] +6070-86745-0014-2563: hyp=['BESIDES', 'THAT', 'DOES', 'NOT', 'CONCERN', 'THE', 'HOME', 'BUT', 'THE', 'FINANCIAL', 'DEPARTMENT'] +6070-86745-0015-2564: ref=['ABOUT', 'WHAT', 'ABOUT', 'THE', 'PAPERS'] +6070-86745-0015-2564: hyp=['ABOUT', 'WHAT', 'ABOUT', 'THE', 'PAPERS'] +6070-86745-0016-2565: ref=['IN', 'THE', 'ENTIRE', 'POLITICAL', 'WORLD', 'OF', 'WHICH', 'YOU', 'ARE', 'ONE', 'OF', 'THE', 'LEADERS'] +6070-86745-0016-2565: hyp=['IN', 'THE', 'ENTIRE', 'POLITICAL', 'WORLD', 'OF', 'WHICH', 'YOU', 'ARE', 'ONE', 'OF', 'THE', 'LEADERS'] +6070-86745-0017-2566: ref=['THEY', 'SAY', 'THAT', 'IT', 'IS', 'QUITE', 'FAIR', 'AND', 'THAT', 'SOWING', 'SO', 'MUCH', 'RED', 'YOU', 'OUGHT', 'TO', 'REAP', 'A', 'LITTLE', 'BLUE'] +6070-86745-0017-2566: hyp=['THEY', 'SAY', 'THAT', 'IT', 'IS', 'QUITE', 'FAIR', 'AND', 'THAT', 'SOWING', 'SO', 'MUCH', 'RED', 'YOU', 'OUGHT', 'TO', 'REAP', 'A', 'LITTLE', 'BLUE'] +6070-86745-0018-2567: ref=['COME', 'COME', 'THAT', 'IS', 'NOT', 'BAD', 'SAID', 'LUCIEN'] +6070-86745-0018-2567: hyp=['COME', 'COME', 'THAT', 'IS', 'NOT', 'BAD', 'SAID', 'LUCIAN'] +6070-86745-0019-2568: ref=['WITH', 'YOUR', 'TALENTS', 'YOU', 'WOULD', 'MAKE', 'YOUR', 'FORTUNE', 'IN', 'THREE', 'OR', 'FOUR', 'YEARS'] +6070-86745-0019-2568: hyp=['WITH', 'THE', 'OTALONS', 'HE', 'WOULD', 'MAKE', 'YOUR', 'FORTUNE', 'IN', 'THREE', 'OR', 'FOUR', 'YEARS'] +6128-63240-0000-503: ref=['THE', 'GENTLEMAN', 'HAD', 'NOT', 'EVEN', 'NEEDED', 'TO', 'SIT', 'DOWN', 'TO', 'BECOME', 'INTERESTED', 'APPARENTLY', 'HE', 'HAD', 'TAKEN', 'UP', 'THE', 'VOLUME', 'FROM', 'A', 'TABLE', 'AS', 'SOON', 'AS', 'HE', 'CAME', 'IN', 'AND', 'STANDING', 'THERE', 'AFTER', 'A', 'SINGLE', 'GLANCE', 'ROUND', 'THE', 'APARTMENT', 'HAD', 'LOST', 'HIMSELF', 'IN', 'ITS', 'PAGES'] +6128-63240-0000-503: hyp=['THE', 'GENTLEMAN', 'HAD', 'NOT', 'EVEN', 'NEEDED', 'TO', 'SIT', 'DOWN', 'TO', 'BECOME', 'INTERESTED', 'APPARENTLY', 'HE', 'HAD', 'TAKEN', 'UP', 'THE', 'VOLUME', 'FROM', 'A', 'TABLE', 'AS', 'SOON', 'AS', 'HE', 'CAME', 'IN', 'AND', 'STANDING', 'THERE', 'AFTER', 'A', 'SINGLE', 'GLANCE', 'ROUND', 'THE', 'APARTMENT', 'HAD', 'LOST', 'HIMSELF', 'IN', 'HIS', 'PAGES'] +6128-63240-0001-504: ref=['THAT', 'HAS', 'AN', 'UNFLATTERING', 'SOUND', 'FOR', 'ME', 'SAID', 'THE', 'YOUNG', 'MAN'] +6128-63240-0001-504: hyp=['THAT', 'HAS', 'AN', 'UNFLATTERING', 'SOUND', 'FOR', 'ME', 'SAID', 'THE', 'YOUNG', 'MAN'] +6128-63240-0002-505: ref=['SHE', 'IS', 'WILLING', 'TO', 'RISK', 'THAT'] +6128-63240-0002-505: hyp=['SHE', 'IS', 'WILLING', 'TO', 'RISK', 'THAT'] +6128-63240-0003-506: ref=['JUST', 'AS', 'I', 'AM', 'THE', 'VISITOR', 'INQUIRED', 'PRESENTING', 'HIMSELF', 'WITH', 'RATHER', 'A', 'WORK', 'A', 'DAY', 'ASPECT'] +6128-63240-0003-506: hyp=['JUST', 'AS', 'I', 'AM', 'THE', 'VISITOR', 'INQUIRED', 'PRESENTING', 'HIMSELF', 'WITH', 'RATHER', 'A', 'WORKADAY', 'ASPECT'] +6128-63240-0004-507: ref=['HE', 'WAS', 'TALL', 'AND', 'LEAN', 'AND', 'DRESSED', 'THROUGHOUT', 'IN', 'BLACK', 'HIS', 'SHIRT', 'COLLAR', 'WAS', 'LOW', 'AND', 'WIDE', 'AND', 'THE', 'TRIANGLE', 'OF', 'LINEN', 'A', 'LITTLE', 'CRUMPLED', 'EXHIBITED', 'BY', 'THE', 'OPENING', 'OF', 'HIS', 'WAISTCOAT', 'WAS', 'ADORNED', 'BY', 'A', 'PIN', 'CONTAINING', 'A', 'SMALL', 'RED', 'STONE'] +6128-63240-0004-507: hyp=['HE', 'WAS', 'TALL', 'AND', 'LEAN', 'AND', 'DRESSED', 'THROUGHOUT', 'IN', 'BLACK', 'HIS', 'SHIRT', 'COLLAR', 'WAS', 'LOW', 'AND', 'WIDE', 'AND', 'THE', 'TRIANGLE', 'OF', 'LINEN', 'A', 'LITTLE', 'CRAMPLED', 'EXHIBITED', 'BY', 'THE', 'OPENING', 'OF', 'HIS', 'WAISTCOAT', 'WAS', 'ADORNED', 'BY', 'A', 'PIN', 'CONTAINING', 'A', 'SMALL', 'RED', 'STONE'] +6128-63240-0005-508: ref=['IN', 'SPITE', 'OF', 'THIS', 'DECORATION', 'THE', 'YOUNG', 'MAN', 'LOOKED', 'POOR', 'AS', 'POOR', 'AS', 'A', 'YOUNG', 'MAN', 'COULD', 'LOOK', 'WHO', 'HAD', 'SUCH', 'A', 'FINE', 'HEAD', 'AND', 'SUCH', 'MAGNIFICENT', 'EYES'] +6128-63240-0005-508: hyp=['IN', 'SPITE', 'OF', 'THIS', 'DECORATION', 'THE', 'YOUNG', 'MAN', 'LOOKED', 'POOR', 'AS', 'FAR', 'AS', 'A', 'YOUNG', 'MAN', 'COULD', 'LOOK', 'WHO', 'HAD', 'SUCH', 'A', 'FINE', 'HEAD', 'AND', 'SUCH', 'MAGNIFICENT', 'EYES'] +6128-63240-0006-509: ref=['THOSE', 'OF', 'BASIL', 'RANSOM', 'WERE', 'DARK', 'DEEP', 'AND', 'GLOWING', 'HIS', 'HEAD', 'HAD', 'A', 'CHARACTER', 'OF', 'ELEVATION', 'WHICH', 'FAIRLY', 'ADDED', 'TO', 'HIS', 'STATURE', 'IT', 'WAS', 'A', 'HEAD', 'TO', 'BE', 'SEEN', 'ABOVE', 'THE', 'LEVEL', 'OF', 'A', 'CROWD', 'ON', 'SOME', 'JUDICIAL', 'BENCH', 'OR', 'POLITICAL', 'PLATFORM', 'OR', 'EVEN', 'ON', 'A', 'BRONZE', 'MEDAL'] +6128-63240-0006-509: hyp=['THOSE', 'OF', 'BASIL', 'RANSOM', 'WENT', 'DARK', 'DEEP', 'AND', 'GLOWING', 'HIS', 'HEAD', 'HAD', 'A', 'CHARACTER', 'OF', 'ELEVATION', 'WHICH', 'FAIRLY', 'ADDED', 'TO', 'HIS', 'STATUE', 'IT', 'WAS', 'A', 'HEAD', 'TO', 'BE', 'SEEN', 'ABOVE', 'THE', 'LEVEL', 'OF', 'A', 'CROWD', 'ON', 'SOME', 'JUDICIAL', 'BENCH', 'OR', 'POLITICAL', 'PLATFORM', 'OR', 'EVEN', 'ON', 'A', 'BRONZE', 'MEDAL'] +6128-63240-0007-510: ref=['THESE', 'THINGS', 'THE', 'EYES', 'ESPECIALLY', 'WITH', 'THEIR', 'SMOULDERING', 'FIRE', 'MIGHT', 'HAVE', 'INDICATED', 'THAT', 'HE', 'WAS', 'TO', 'BE', 'A', 'GREAT', 'AMERICAN', 'STATESMAN', 'OR', 'ON', 'THE', 'OTHER', 'HAND', 'THEY', 'MIGHT', 'SIMPLY', 'HAVE', 'PROVED', 'THAT', 'HE', 'CAME', 'FROM', 'CAROLINA', 'OR', 'ALABAMA'] +6128-63240-0007-510: hyp=['THESE', 'THINGS', 'THE', 'EYES', 'ESPECIALLY', 'WITH', 'THEIR', 'SMOULDERING', 'FIRE', 'MIGHT', 'HAVE', 'INDICATED', 'THAT', 'HE', 'WAS', 'TO', 'BE', 'GREAT', 'AMERICAN', 'STATESMAN', 'OR', 'ON', 'THE', 'OTHER', 'HAND', 'THERE', 'MIGHT', 'SIMPLY', 'HAVE', 'PROVED', 'THAT', 'HE', 'CAME', 'FROM', 'CAROLINA', 'OR', 'ALADAMA'] +6128-63240-0008-511: ref=['AND', 'YET', 'THE', 'READER', 'WHO', 'LIKES', 'A', 'COMPLETE', 'IMAGE', 'WHO', 'DESIRES', 'TO', 'READ', 'WITH', 'THE', 'SENSES', 'AS', 'WELL', 'AS', 'WITH', 'THE', 'REASON', 'IS', 'ENTREATED', 'NOT', 'TO', 'FORGET', 'THAT', 'HE', 'PROLONGED', 'HIS', 'CONSONANTS', 'AND', 'SWALLOWED', 'HIS', 'VOWELS', 'THAT', 'HE', 'WAS', 'GUILTY', 'OF', 'ELISIONS', 'AND', 'INTERPOLATIONS', 'WHICH', 'WERE', 'EQUALLY', 'UNEXPECTED', 'AND', 'THAT', 'HIS', 'DISCOURSE', 'WAS', 'PERVADED', 'BY', 'SOMETHING', 'SULTRY', 'AND', 'VAST', 'SOMETHING', 'ALMOST', 'AFRICAN', 'IN', 'ITS', 'RICH', 'BASKING', 'TONE', 'SOMETHING', 'THAT', 'SUGGESTED', 'THE', 'TEEMING', 'EXPANSE', 'OF', 'THE', 'COTTON', 'FIELD'] +6128-63240-0008-511: hyp=['AND', 'YET', 'THE', 'READER', 'WHO', 'LIKES', 'A', 'COMPLETE', 'IMAGE', 'WHO', 'DESIRES', 'TO', 'READ', 'WITH', 'THE', 'SENSES', 'AS', 'WELL', 'AS', 'WITH', 'THE', 'REASON', 'IS', 'ENTREATED', 'NOT', 'TO', 'FORGET', 'THAT', 'HE', 'PROLONGED', 'HIS', 'COUNTENANCE', 'AND', 'SWALLOWED', 'HIS', 'VOWELS', 'THAT', 'HE', 'WAS', 'GUILTY', 'OF', 'ELYGIANCE', 'AND', 'INTERPOLATIONS', 'WHICH', 'WERE', 'EQUALLY', 'UNEXPECTED', 'AND', 'THAT', 'HIS', 'DISCOURSE', 'WAS', 'PERVADED', 'BY', 'SOMETHING', 'SULTRY', 'AND', 'VAST', 'SOMETHING', 'ALMOST', 'AFRICAN', 'IN', 'ITS', 'RICH', 'BASKING', 'TONE', 'SOMETHING', 'THAT', 'SUGGESTED', 'THE', 'TEEMING', 'EXPANSE', 'OF', 'THE', 'COTTON', 'FIELD'] +6128-63240-0009-512: ref=['AND', 'HE', 'TOOK', 'UP', 'HIS', 'HAT', 'VAGUELY', 'A', 'SOFT', 'BLACK', 'HAT', 'WITH', 'A', 'LOW', 'CROWN', 'AND', 'AN', 'IMMENSE', 'STRAIGHT', 'BRIM'] +6128-63240-0009-512: hyp=['AND', 'HE', 'TOOK', 'UP', 'HIS', 'HAT', 'VAGUELY', 'A', 'SOFT', 'BLACK', 'HAT', 'WITH', 'A', 'LOW', 'CROWN', 'AND', 'AN', 'IMMENSE', 'STRAIGHT', 'BRIM'] +6128-63240-0010-513: ref=['WELL', 'SO', 'IT', 'IS', 'THEY', 'ARE', 'ALL', 'WITCHES', 'AND', 'WIZARDS', 'MEDIUMS', 'AND', 'SPIRIT', 'RAPPERS', 'AND', 'ROARING', 'RADICALS'] +6128-63240-0010-513: hyp=['WELL', 'SO', 'IT', 'IS', 'THERE', 'ARE', 'ALL', 'WITCHES', 'AND', 'WIZARDS', 'MEDIUMS', 'AND', 'SPIRIT', 'WRAPPERS', 'AND', 'ROWING', 'RADICALS'] +6128-63240-0011-514: ref=['IF', 'YOU', 'ARE', 'GOING', 'TO', 'DINE', 'WITH', 'HER', 'YOU', 'HAD', 'BETTER', 'KNOW', 'IT', 'OH', 'MURDER'] +6128-63240-0011-514: hyp=['IF', 'YOU', 'ARE', 'GOING', 'TO', 'DINE', 'WITH', 'HER', 'YOU', 'HAD', 'BETTER', 'KNOW', 'IT', 'OH', 'MURDER'] +6128-63240-0012-515: ref=['HE', 'LOOKED', 'AT', 'MISSUS', 'LUNA', 'WITH', 'INTELLIGENT', 'INCREDULITY'] +6128-63240-0012-515: hyp=['HE', 'LIFTED', 'MISSUS', 'LEWINA', 'WITH', 'INTELLIGENT', 'INCREDULITY'] +6128-63240-0013-516: ref=['SHE', 'WAS', 'ATTRACTIVE', 'AND', 'IMPERTINENT', 'ESPECIALLY', 'THE', 'LATTER'] +6128-63240-0013-516: hyp=['SHE', 'WAS', 'ATTRACTIVE', 'AND', 'IMPERTINENT', 'ESPECIALLY', 'THE', 'LATTER'] +6128-63240-0014-517: ref=['HAVE', 'YOU', 'BEEN', 'IN', 'EUROPE'] +6128-63240-0014-517: hyp=['HAVE', 'YOU', 'BEEN', 'IN', 'EUROPE'] +6128-63240-0015-518: ref=['NO', 'I', "HAVEN'T", 'BEEN', 'ANYWHERE'] +6128-63240-0015-518: hyp=['NO', 'I', "HAVEN'T", 'BEEN', 'ANYWHERE'] +6128-63240-0016-519: ref=['SHE', 'HATES', 'IT', 'SHE', 'WOULD', 'LIKE', 'TO', 'ABOLISH', 'IT'] +6128-63240-0016-519: hyp=['SHE', 'HATES', 'IT', 'SHE', 'WOULD', 'LIKE', 'TO', 'ABOLISH', 'IT'] +6128-63240-0017-520: ref=['THIS', 'LAST', 'REMARK', 'HE', 'MADE', 'AT', 'A', 'VENTURE', 'FOR', 'HE', 'HAD', 'NATURALLY', 'NOT', 'DEVOTED', 'ANY', 'SUPPOSITION', 'WHATEVER', 'TO', 'MISSUS', 'LUNA'] +6128-63240-0017-520: hyp=['THIS', 'LAST', 'REMARK', 'HE', 'MADE', 'THAT', 'ADVENTURE', 'FOR', 'HE', 'HAD', 'NATURALLY', 'NOT', 'DEVOTED', 'ANY', 'SUPPOSITION', 'WHATEVER', 'TO', 'MISSUS', 'LENA'] +6128-63240-0018-521: ref=['ARE', 'YOU', 'VERY', 'AMBITIOUS', 'YOU', 'LOOK', 'AS', 'IF', 'YOU', 'WERE'] +6128-63240-0018-521: hyp=['ARE', 'YOU', 'VERY', 'AMBITIOUS', 'YOU', 'LOOK', 'AS', 'IF', 'YOU', 'WERE'] +6128-63240-0019-522: ref=['AND', 'MISSUS', 'LUNA', 'ADDED', 'THAT', 'NOW', 'SHE', 'WAS', 'BACK', 'SHE', "DIDN'T", 'KNOW', 'WHAT', 'SHE', 'SHOULD', 'DO'] +6128-63240-0019-522: hyp=['AND', 'MISSUS', 'LENA', 'ADDED', 'THAT', 'NOW', 'SHE', 'WAS', 'BACK', 'SHE', "DIDN'T", 'KNOW', 'WHAT', 'SHE', 'SHOULD', 'DO'] +6128-63240-0020-523: ref=['ONE', "DIDN'T", 'EVEN', 'KNOW', 'WHAT', 'ONE', 'HAD', 'COME', 'BACK', 'FOR'] +6128-63240-0020-523: hyp=['ONE', "DIDN'T", 'EVEN', 'THERE', 'WHAT', 'ONE', 'HAD', 'COME', 'BACK', 'FOR'] +6128-63240-0021-524: ref=['BESIDES', 'OLIVE', "DIDN'T", 'WANT', 'HER', 'IN', 'BOSTON', 'AND', "DIDN'T", 'GO', 'THROUGH', 'THE', 'FORM', 'OF', 'SAYING', 'SO'] +6128-63240-0021-524: hyp=['BESIDES', 'OLIVE', "DIDN'T", 'WANT', 'HER', 'IN', 'BOSTON', 'AND', "DIDN'T", 'GO', 'THROUGH', 'THE', 'FORM', 'OF', 'SAYING', 'SO'] +6128-63240-0022-525: ref=['THAT', 'WAS', 'ONE', 'COMFORT', 'WITH', 'OLIVE', 'SHE', 'NEVER', 'WENT', 'THROUGH', 'ANY', 'FORMS'] +6128-63240-0022-525: hyp=['THAT', 'WAS', 'ONE', 'COMFORT', 'WITH', 'ALIVE', 'SHE', 'NEVER', 'WENT', 'THROUGH', 'ANY', 'FORMS'] +6128-63240-0023-526: ref=['SHE', 'STOOD', 'THERE', 'LOOKING', 'CONSCIOUSLY', 'AND', 'RATHER', 'SERIOUSLY', 'AT', 'MISTER', 'RANSOM', 'A', 'SMILE', 'OF', 'EXCEEDING', 'FAINTNESS', 'PLAYED', 'ABOUT', 'HER', 'LIPS', 'IT', 'WAS', 'JUST', 'PERCEPTIBLE', 'ENOUGH', 'TO', 'LIGHT', 'UP', 'THE', 'NATIVE', 'GRAVITY', 'OF', 'HER', 'FACE'] +6128-63240-0023-526: hyp=['SHE', 'STOOD', 'THERE', 'LOOKING', 'CONSCIOUSLY', 'AND', 'RATHER', 'SERIOUSLY', 'AND', 'MISTER', 'RANSOM', 'A', 'SMILE', 'OF', 'EXCEEDING', 'FAINTNESS', 'PLAYED', 'ABOUT', 'HER', 'LIPS', 'IT', 'WAS', 'JUST', 'PERCEPTIBLE', 'ENOUGH', 'TO', 'LIGHT', 'UP', 'THE', 'NATIVE', 'GRAVITY', 'OF', 'HER', 'FACE'] +6128-63240-0024-527: ref=['HER', 'VOICE', 'WAS', 'LOW', 'AND', 'AGREEABLE', 'A', 'CULTIVATED', 'VOICE', 'AND', 'SHE', 'EXTENDED', 'A', 'SLENDER', 'WHITE', 'HAND', 'TO', 'HER', 'VISITOR', 'WHO', 'REMARKED', 'WITH', 'SOME', 'SOLEMNITY', 'HE', 'FELT', 'A', 'CERTAIN', 'GUILT', 'OF', 'PARTICIPATION', 'IN', 'MISSUS', "LUNA'S", 'INDISCRETION', 'THAT', 'HE', 'WAS', 'INTENSELY', 'HAPPY', 'TO', 'MAKE', 'HER', 'ACQUAINTANCE'] +6128-63240-0024-527: hyp=['HER', 'VOICE', 'WAS', 'LOW', 'AND', 'AGREEABLE', 'A', 'CULTIVATED', 'VOICE', 'AND', 'SHE', 'EXTENDED', 'A', 'SLENDER', 'WHITE', 'HAND', 'TO', 'HER', 'VISITOR', 'HER', 'REMARKED', 'WITH', 'SOME', 'SOLEMNITY', 'HE', 'FELT', 'A', 'CERTAIN', 'GUILT', 'OF', 'PARTICIPATION', 'IN', 'MISSUS', "LUNA'S", 'INDISCRETION', 'THAT', 'HE', 'WAS', 'INTENSELY', 'HAPPY', 'TO', 'MAKE', 'HER', 'ACQUAINTANCE'] +6128-63240-0025-528: ref=['HE', 'OBSERVED', 'THAT', 'MISS', "CHANCELLOR'S", 'HAND', 'WAS', 'AT', 'ONCE', 'COLD', 'AND', 'LIMP', 'SHE', 'MERELY', 'PLACED', 'IT', 'IN', 'HIS', 'WITHOUT', 'EXERTING', 'THE', 'SMALLEST', 'PRESSURE'] +6128-63240-0025-528: hyp=['HE', 'OBSERVED', 'THAT', 'MISS', "CHANCELLOR'S", 'HAND', 'WAS', 'AT', 'ONCE', 'CALLED', 'IN', 'LIMP', 'SHE', 'MERELY', 'PLACED', 'IT', 'IN', 'HIS', 'WITHOUT', 'EXERTING', 'THE', 'SMALLEST', 'PRESSURE'] +6128-63240-0026-529: ref=['I', 'SHALL', 'BE', 'BACK', 'VERY', 'LATE', 'WE', 'ARE', 'GOING', 'TO', 'A', 'THEATRE', 'PARTY', "THAT'S", 'WHY', 'WE', 'DINE', 'SO', 'EARLY'] +6128-63240-0026-529: hyp=['I', 'SHALL', 'BE', 'BACK', 'VERY', 'LATE', 'WILL', "DON'T", 'YOU', 'THE', 'PARTY', "THAT'S", 'WHY', 'WE', 'DINE', 'SO', 'EARLY'] +6128-63240-0027-530: ref=['MISSUS', "LUNA'S", 'FAMILIARITY', 'EXTENDED', 'EVEN', 'TO', 'HER', 'SISTER', 'SHE', 'REMARKED', 'TO', 'MISS', 'CHANCELLOR', 'THAT', 'SHE', 'LOOKED', 'AS', 'IF', 'SHE', 'WERE', 'GOT', 'UP', 'FOR', 'A', 'SEA', 'VOYAGE'] +6128-63240-0027-530: hyp=['MISSUS', "LUNE'S", 'FAMILIARITY', 'EXTENDED', 'EVEN', 'TO', 'HER', 'SISTER', 'SHE', 'REMARKED', 'TO', 'MISS', 'CHANCELLOR', 'THAT', 'SHE', 'LOOKED', 'AS', 'IF', 'SHE', 'WERE', 'GOT', 'UP', 'FOR', 'A', 'SEA', 'VOYAGE'] +6128-63241-0000-557: ref=['POOR', 'RANSOM', 'ANNOUNCED', 'THIS', 'FACT', 'TO', 'HIMSELF', 'AS', 'IF', 'HE', 'HAD', 'MADE', 'A', 'GREAT', 'DISCOVERY', 'BUT', 'IN', 'REALITY', 'HE', 'HAD', 'NEVER', 'BEEN', 'SO', 'BOEOTIAN', 'AS', 'AT', 'THAT', 'MOMENT'] +6128-63241-0000-557: hyp=['POOR', 'RAMSON', 'ANNOUNCED', 'THIS', 'THAT', 'TO', 'HIMSELF', 'AS', 'IF', 'HE', 'HAD', 'MADE', 'A', 'GREAT', 'DISCOVERY', 'BUT', 'IN', 'REALITY', 'HE', 'HAD', 'NEVER', 'BEEN', 'SO', 'BE', 'OCHIAN', 'AS', 'AT', 'THAT', 'MOMENT'] +6128-63241-0001-558: ref=['THE', 'WOMEN', 'HE', 'HAD', 'HITHERTO', 'KNOWN', 'HAD', 'BEEN', 'MAINLY', 'OF', 'HIS', 'OWN', 'SOFT', 'CLIME', 'AND', 'IT', 'WAS', 'NOT', 'OFTEN', 'THEY', 'EXHIBITED', 'THE', 'TENDENCY', 'HE', 'DETECTED', 'AND', 'CURSORILY', 'DEPLORED', 'IN', 'MISSUS', "LUNA'S", 'SISTER'] +6128-63241-0001-558: hyp=['THE', 'WOMEN', 'HE', 'HAD', 'HITHERTO', 'KNOWN', 'HAD', 'BEEN', 'MAINLY', 'OF', 'HIS', 'OWN', 'SOFT', 'CLIMB', 'AND', 'IT', 'WAS', 'NOT', 'OFTEN', 'THEY', 'EXHIBITED', 'THE', 'TENDENCY', 'HE', 'DETECTED', 'AND', 'CURSORILY', 'DEPLORED', 'IN', 'MISSUS', "LUNA'S", 'SISTER'] +6128-63241-0002-559: ref=['RANSOM', 'WAS', 'PLEASED', 'WITH', 'THE', 'VISION', 'OF', 'THAT', 'REMEDY', 'IT', 'MUST', 'BE', 'REPEATED', 'THAT', 'HE', 'WAS', 'VERY', 'PROVINCIAL'] +6128-63241-0002-559: hyp=['RANSOM', 'WAS', 'PLEASED', 'WITH', 'THE', 'VISION', 'OF', 'THAT', 'REMEDY', 'IT', 'MUST', 'BE', 'REPEATED', 'THAT', 'HE', 'WAS', 'VERY', 'PROVINCIAL'] +6128-63241-0003-560: ref=['HE', 'WAS', 'SORRY', 'FOR', 'HER', 'BUT', 'HE', 'SAW', 'IN', 'A', 'FLASH', 'THAT', 'NO', 'ONE', 'COULD', 'HELP', 'HER', 'THAT', 'WAS', 'WHAT', 'MADE', 'HER', 'TRAGIC'] +6128-63241-0003-560: hyp=['HE', 'WAS', 'SORRY', 'FOR', 'HER', 'BUT', 'HIS', 'SORROW', 'IN', 'A', 'FLASH', 'THAT', 'NO', 'ONE', 'COULD', 'HELP', 'HER', 'THAT', 'WAS', 'WHAT', 'MADE', 'HER', 'TRAGIC'] +6128-63241-0004-561: ref=['SHE', 'COULD', 'NOT', 'DEFEND', 'HERSELF', 'AGAINST', 'A', 'RICH', 'ADMIRATION', 'A', 'KIND', 'OF', 'TENDERNESS', 'OF', 'ENVY', 'OF', 'ANY', 'ONE', 'WHO', 'HAD', 'BEEN', 'SO', 'HAPPY', 'AS', 'TO', 'HAVE', 'THAT', 'OPPORTUNITY'] +6128-63241-0004-561: hyp=['SHE', 'COULD', 'NOT', 'DEFEND', 'HERSELF', 'AGAINST', 'A', 'RICH', 'ADMIRATION', 'A', 'KIND', 'OF', 'TENDERNESS', 'OF', 'ENVY', 'OF', 'ANY', 'ONE', 'WHO', 'HAD', 'BEEN', 'SO', 'HAPPY', 'AS', 'TO', 'HAVE', 'THAT', 'OPPORTUNITY'] +6128-63241-0005-562: ref=['HIS', 'FAMILY', 'WAS', 'RUINED', 'THEY', 'HAD', 'LOST', 'THEIR', 'SLAVES', 'THEIR', 'PROPERTY', 'THEIR', 'FRIENDS', 'AND', 'RELATIONS', 'THEIR', 'HOME', 'HAD', 'TASTED', 'OF', 'ALL', 'THE', 'CRUELTY', 'OF', 'DEFEAT'] +6128-63241-0005-562: hyp=['HIS', 'FAMILY', 'WAS', 'RUINED', 'THEY', 'HAD', 'LOST', 'THEIR', 'SLAVES', 'THEIR', 'PROPERTY', 'THE', 'FRIENDS', 'AND', 'RELATIONS', 'THE', 'HOME', 'HAD', 'TASTED', 'OF', 'ALL', 'THE', 'CRUELTY', 'OF', 'DEFEAT'] +6128-63241-0006-563: ref=['THE', 'STATE', 'OF', 'MISSISSIPPI', 'SEEMED', 'TO', 'HIM', 'THE', 'STATE', 'OF', 'DESPAIR', 'SO', 'HE', 'SURRENDERED', 'THE', 'REMNANTS', 'OF', 'HIS', 'PATRIMONY', 'TO', 'HIS', 'MOTHER', 'AND', 'SISTERS', 'AND', 'AT', 'NEARLY', 'THIRTY', 'YEARS', 'OF', 'AGE', 'ALIGHTED', 'FOR', 'THE', 'FIRST', 'TIME', 'IN', 'NEW', 'YORK', 'IN', 'THE', 'COSTUME', 'OF', 'HIS', 'PROVINCE', 'WITH', 'FIFTY', 'DOLLARS', 'IN', 'HIS', 'POCKET', 'AND', 'A', 'GNAWING', 'HUNGER', 'IN', 'HIS', 'HEART'] +6128-63241-0006-563: hyp=['THE', 'STATE', 'OF', 'MISSISSIPPI', 'SEEM', 'TO', 'HIM', 'THE', 'STATE', 'OF', 'DESPAIR', 'SO', 'HE', 'SURRENDERED', 'THE', 'REMNANTS', 'OF', 'HIS', 'PATRIMONY', 'TO', 'HIS', 'MOTHER', 'AND', 'SISTERS', 'AND', 'AT', 'NEARLY', 'THIRTY', 'YEARS', 'OF', 'AGE', 'DELIGHTED', 'FOR', 'THE', 'FIRST', 'TIME', 'IN', 'NEW', 'YORK', 'IN', 'THE', 'COSTUME', 'OF', 'HIS', 'PROVINCE', 'WITH', 'FIFTY', 'DOLLARS', 'IN', 'HIS', 'POCKET', 'AND', 'A', 'GNAWING', 'HUNGER', 'IN', 'HIS', 'HEART'] +6128-63241-0007-564: ref=['IT', 'WAS', 'IN', 'THE', 'FEMALE', 'LINE', 'AS', 'BASIL', 'RANSOM', 'HAD', 'WRITTEN', 'IN', 'ANSWERING', 'HER', 'LETTER', 'WITH', 'A', 'GOOD', 'DEAL', 'OF', 'FORM', 'AND', 'FLOURISH', 'HE', 'SPOKE', 'AS', 'IF', 'THEY', 'HAD', 'BEEN', 'ROYAL', 'HOUSES'] +6128-63241-0007-564: hyp=['IT', 'WAS', 'IN', 'THE', 'FEMALE', 'LINE', 'AS', 'BALES', 'HAD', 'RANSOM', 'HAD', 'WRITTEN', 'IN', 'ANSWERING', 'HER', 'LETTER', 'WITH', 'A', 'GOOD', 'DEAL', 'OF', 'FORM', 'AND', 'FLOURISH', 'HE', 'SPOKE', 'AS', 'IF', 'THEY', 'HAD', 'BEEN', 'ROYAL', 'HOUSES'] +6128-63241-0008-565: ref=['IF', 'IT', 'HAD', 'BEEN', 'POSSIBLE', 'TO', 'SEND', 'MISSUS', 'RANSOM', 'MONEY', 'OR', 'EVEN', 'CLOTHES', 'SHE', 'WOULD', 'HAVE', 'LIKED', 'THAT', 'BUT', 'SHE', 'HAD', 'NO', 'MEANS', 'OF', 'ASCERTAINING', 'HOW', 'SUCH', 'AN', 'OFFERING', 'WOULD', 'BE', 'TAKEN'] +6128-63241-0008-565: hyp=['IF', 'IT', 'HAD', 'BEEN', 'POSSIBLE', 'TO', 'SEND', 'MISSUS', 'RANSOM', 'MONEY', 'OR', 'EVEN', 'CLOTHES', 'SHE', 'WOULD', 'HAVE', 'LIKED', 'THAT', 'BUT', 'SHE', 'HAD', 'NO', 'MEANS', 'OF', 'ASCERTAINING', 'HER', 'SUCH', 'AN', 'OFFERING', 'WOULD', 'BE', 'TAKEN'] +6128-63241-0009-566: ref=['OLIVE', 'HAD', 'A', 'FEAR', 'OF', 'EVERYTHING', 'BUT', 'HER', 'GREATEST', 'FEAR', 'WAS', 'OF', 'BEING', 'AFRAID'] +6128-63241-0009-566: hyp=['OLIVE', 'HAD', 'A', 'FEAR', 'OF', 'EVERYTHING', 'BUT', 'HER', 'GREATEST', 'FEAR', 'WAS', 'OF', 'BEING', 'AFRAID'] +6128-63241-0010-567: ref=['SHE', 'HAD', 'ERECTED', 'IT', 'INTO', 'A', 'SORT', 'OF', 'RULE', 'OF', 'CONDUCT', 'THAT', 'WHENEVER', 'SHE', 'SAW', 'A', 'RISK', 'SHE', 'WAS', 'TO', 'TAKE', 'IT', 'AND', 'SHE', 'HAD', 'FREQUENT', 'HUMILIATIONS', 'AT', 'FINDING', 'HERSELF', 'SAFE', 'AFTER', 'ALL'] +6128-63241-0010-567: hyp=['SHE', 'HAD', 'ERECTED', 'IT', 'INTO', 'A', 'SORT', 'OF', 'RULE', 'OF', 'CONDUCT', 'THAT', 'WHENEVER', 'SHE', 'SAW', 'A', 'RISK', 'SHE', 'WAS', 'TO', 'TAKE', 'IT', 'AND', 'SHE', 'HAD', 'FREQUENT', 'HUMILIATIONS', 'AT', 'FINDING', 'HERSELF', 'SAVED', 'AFTER', 'ALL'] +6128-63241-0011-568: ref=['SHE', 'WAS', 'PERFECTLY', 'SAFE', 'AFTER', 'WRITING', 'TO', 'BASIL', 'RANSOM', 'AND', 'INDEED', 'IT', 'WAS', 'DIFFICULT', 'TO', 'SEE', 'WHAT', 'HE', 'COULD', 'HAVE', 'DONE', 'TO', 'HER', 'EXCEPT', 'THANK', 'HER', 'HE', 'WAS', 'ONLY', 'EXCEPTIONALLY', 'SUPERLATIVE', 'FOR', 'HER', 'LETTER', 'AND', 'ASSURE', 'HER', 'THAT', 'HE', 'WOULD', 'COME', 'AND', 'SEE', 'HER', 'THE', 'FIRST', 'TIME', 'HIS', 'BUSINESS', 'HE', 'WAS', 'BEGINNING', 'TO', 'GET', 'A', 'LITTLE', 'SHOULD', 'TAKE', 'HIM', 'TO', 'BOSTON'] +6128-63241-0011-568: hyp=['SHE', 'WAS', 'PERFECTLY', 'SAFE', 'AFTER', 'WRITING', 'TO', 'BASIL', 'RANSOM', 'AND', 'INDEED', 'IT', 'WAS', 'DIFFICULT', 'TO', 'SEE', 'WHAT', 'HE', 'COULD', 'HAVE', 'DONE', 'TO', 'HER', 'EXCEPT', 'THANK', 'HER', 'HE', 'WAS', 'ONLY', 'EXCEPTIONALLY', 'SUPERNATIVE', 'FOR', 'HER', 'LETTER', 'AND', 'ASSURE', 'HER', 'THAT', 'HE', 'WOULD', 'COME', 'AND', 'SEE', 'HER', 'THE', 'FIRST', 'TIME', 'HIS', 'BUSINESS', 'HE', 'WAS', 'BEGINNING', 'TO', 'GET', 'A', 'LITTLE', 'SHOULD', 'TAKE', 'HIM', 'TO', 'BOSTON'] +6128-63241-0012-569: ref=['HE', 'WAS', 'TOO', 'SIMPLE', 'TOO', 'MISSISSIPPIAN', 'FOR', 'THAT', 'SHE', 'WAS', 'ALMOST', 'DISAPPOINTED'] +6128-63241-0012-569: hyp=['HE', 'WAS', 'TOO', 'SIMPLE', 'TOO', 'MISSISSIPPIAN', 'FOR', 'THAT', 'SHE', 'WAS', 'ALMOST', 'DISAPPOINTED'] +6128-63241-0013-570: ref=['OF', 'ALL', 'THINGS', 'IN', 'THE', 'WORLD', 'CONTENTION', 'WAS', 'MOST', 'SWEET', 'TO', 'HER', 'THOUGH', 'WHY', 'IT', 'IS', 'HARD', 'TO', 'IMAGINE', 'FOR', 'IT', 'ALWAYS', 'COST', 'HER', 'TEARS', 'HEADACHES', 'A', 'DAY', 'OR', 'TWO', 'IN', 'BED', 'ACUTE', 'EMOTION', 'AND', 'IT', 'WAS', 'VERY', 'POSSIBLE', 'BASIL', 'RANSOM', 'WOULD', 'NOT', 'CARE', 'TO', 'CONTEND'] +6128-63241-0013-570: hyp=['OF', 'ALL', 'THINGS', 'IN', 'THE', 'WORLD', 'CONTENTION', 'WAS', 'MOST', 'SWEET', 'TO', 'HER', 'THOUGH', 'WHY', 'IT', 'IS', 'HARD', 'TO', 'IMAGINE', 'FOR', 'IT', 'ALWAYS', 'COST', 'HER', 'TEARS', 'HEADACHES', 'A', 'DAY', 'OR', 'TWO', 'IN', 'BED', 'ACUTORATION', 'AND', 'IT', 'WAS', 'VERY', 'POSSIBLE', 'BASER', 'RANSOM', 'WOULD', 'NOT', 'CARE', 'TO', 'COMPEND'] +6128-63244-0000-531: ref=['MISS', 'CHANCELLOR', 'HERSELF', 'HAD', 'THOUGHT', 'SO', 'MUCH', 'ON', 'THE', 'VITAL', 'SUBJECT', 'WOULD', 'NOT', 'SHE', 'MAKE', 'A', 'FEW', 'REMARKS', 'AND', 'GIVE', 'THEM', 'SOME', 'OF', 'HER', 'EXPERIENCES'] +6128-63244-0000-531: hyp=['MISS', 'CHANCELLOR', 'HERSELF', 'HAD', 'THOUGHT', 'SO', 'MUCH', 'ON', 'THE', 'VITAL', 'SUBJECT', 'WOULD', 'NOT', 'SHE', 'MAKE', 'A', 'FEW', 'REMARKS', 'AND', 'GIVE', 'THEM', 'SOME', 'OF', 'HER', 'EXPERIENCES'] +6128-63244-0001-532: ref=['HOW', 'DID', 'THE', 'LADIES', 'ON', 'BEACON', 'STREET', 'FEEL', 'ABOUT', 'THE', 'BALLOT'] +6128-63244-0001-532: hyp=['HOW', 'DID', 'THE', 'LADIES', 'AND', 'BEACON', 'STREET', 'FEEL', 'ABOUT', 'THE', 'BULLET'] +6128-63244-0002-533: ref=['PERHAPS', 'SHE', 'COULD', 'SPEAK', 'FOR', 'THEM', 'MORE', 'THAN', 'FOR', 'SOME', 'OTHERS'] +6128-63244-0002-533: hyp=['THERE', 'SHE', 'COULD', 'SPEAK', 'FOR', 'THEM', 'MORE', 'THAN', 'FOR', 'SOME', 'OTHERS'] +6128-63244-0003-534: ref=['WITH', 'HER', 'IMMENSE', 'SYMPATHY', 'FOR', 'REFORM', 'SHE', 'FOUND', 'HERSELF', 'SO', 'OFTEN', 'WISHING', 'THAT', 'REFORMERS', 'WERE', 'A', 'LITTLE', 'DIFFERENT'] +6128-63244-0003-534: hyp=['WITH', 'HER', 'MOST', 'SYMPATHY', 'FOR', 'REFORM', 'SHE', 'FOUND', 'HERSELF', 'SO', 'OFTEN', 'WISHING', 'THAT', 'WE', 'FOOLING', 'AS', 'WERE', 'A', 'LITTLE', 'DIFFERENT'] +6128-63244-0004-535: ref=['OLIVE', 'HATED', 'TO', 'HEAR', 'THAT', 'FINE', 'AVENUE', 'TALKED', 'ABOUT', 'AS', 'IF', 'IT', 'WERE', 'SUCH', 'A', 'REMARKABLE', 'PLACE', 'AND', 'TO', 'LIVE', 'THERE', 'WERE', 'A', 'PROOF', 'OF', 'WORLDLY', 'GLORY'] +6128-63244-0004-535: hyp=['I', 'HAVE', 'HATED', 'DEER', 'THAT', 'FINE', 'AVENUE', 'TALKED', 'ABOUT', 'AS', 'IF', 'IT', 'WERE', 'SUCH', 'A', 'REMARKABLE', 'PLACE', 'AND', 'TO', 'LIVE', 'THERE', 'WHERE', 'A', 'PROOF', 'OF', 'WORLDLY', 'GLORY'] +6128-63244-0005-536: ref=['ALL', 'SORTS', 'OF', 'INFERIOR', 'PEOPLE', 'LIVED', 'THERE', 'AND', 'SO', 'BRILLIANT', 'A', 'WOMAN', 'AS', 'MISSUS', 'FARRINDER', 'WHO', 'LIVED', 'AT', 'ROXBURY', 'OUGHT', 'NOT', 'TO', 'MIX', 'THINGS', 'UP'] +6128-63244-0005-536: hyp=['ALL', 'SORTS', 'HAVE', 'CONTRAY', 'YOUR', 'PEOPLE', 'IF', 'THERE', 'AND', 'SO', 'BRILLIANT', 'A', 'WOMAN', 'AS', 'MISSUS', 'FARRENDER', 'WHO', 'LIVED', 'AT', 'ROXBURY', 'OUGHT', 'NOT', 'TO', 'MIX', 'THINGS', 'UP'] +6128-63244-0006-537: ref=['SHE', 'KNEW', 'HER', 'PLACE', 'IN', 'THE', 'BOSTON', 'HIERARCHY', 'AND', 'IT', 'WAS', 'NOT', 'WHAT', 'MISSUS', 'FARRINDER', 'SUPPOSED', 'SO', 'THAT', 'THERE', 'WAS', 'A', 'WANT', 'OF', 'PERSPECTIVE', 'IN', 'TALKING', 'TO', 'HER', 'AS', 'IF', 'SHE', 'HAD', 'BEEN', 'A', 'REPRESENTATIVE', 'OF', 'THE', 'ARISTOCRACY'] +6128-63244-0006-537: hyp=['SHE', 'KNEW', 'HER', 'PLACE', 'IN', 'THE', 'BOSTON', 'HILLRY', 'KEY', 'AND', 'IT', 'WAS', 'NOT', 'WHAT', 'MISSUS', 'BARRENDERS', 'SUPPOSED', 'SELL', 'HIM', 'THERE', 'WAS', 'A', 'WANT', 'OF', 'PERSPECTIVE', 'IN', 'TALKING', 'TO', 'HER', 'AS', 'IF', 'SHE', 'HAD', 'BEEN', 'I', 'REPRESENTATIVE', 'OF', 'THE', 'ARISTOCRACY'] +6128-63244-0007-538: ref=['SHE', 'WISHED', 'TO', 'WORK', 'IN', 'ANOTHER', 'FIELD', 'SHE', 'HAD', 'LONG', 'BEEN', 'PREOCCUPIED', 'WITH', 'THE', 'ROMANCE', 'OF', 'THE', 'PEOPLE'] +6128-63244-0007-538: hyp=['SHE', 'WISHED', 'TO', 'WORK', 'IN', 'ANOTHER', 'FIELD', 'SHE', 'HAD', 'LONG', 'BEEN', 'PREOCCUPIED', 'WITH', 'THE', 'ROMANCE', 'OF', 'THE', 'PEOPLE'] +6128-63244-0008-539: ref=['THIS', 'MIGHT', 'SEEM', 'ONE', 'OF', 'THE', 'MOST', 'ACCESSIBLE', 'OF', 'PLEASURES', 'BUT', 'IN', 'POINT', 'OF', 'FACT', 'SHE', 'HAD', 'NOT', 'FOUND', 'IT', 'SO'] +6128-63244-0008-539: hyp=['THIS', 'MIGHT', 'SEEM', 'ONE', 'OF', 'THE', 'MOST', 'ACCESSIBLE', 'OF', 'PLEASURES', 'BUT', 'IN', 'POINT', 'OF', 'FACT', 'SHE', 'HAD', 'NOT', 'FOUND', 'IT', 'SO'] +6128-63244-0009-540: ref=['CHARLIE', 'WAS', 'A', 'YOUNG', 'MAN', 'IN', 'A', 'WHITE', 'OVERCOAT', 'AND', 'A', 'PAPER', 'COLLAR', 'IT', 'WAS', 'FOR', 'HIM', 'IN', 'THE', 'LAST', 'ANALYSIS', 'THAT', 'THEY', 'CARED', 'MUCH', 'THE', 'MOST'] +6128-63244-0009-540: hyp=['CHARLIE', 'WAS', 'A', 'YOUNG', 'MAN', 'IN', 'A', 'WIDE', 'OVERCOAT', 'AND', 'A', 'PAPER', 'COLLAR', 'IT', 'WAS', 'BOUHAIR', 'IN', 'THE', 'LAST', 'OF', 'NICES', 'THAT', 'THE', 'CARED', 'MUCH', 'THE', 'MOST'] +6128-63244-0010-541: ref=['OLIVE', 'CHANCELLOR', 'WONDERED', 'HOW', 'MISSUS', 'FARRINDER', 'WOULD', 'TREAT', 'THAT', 'BRANCH', 'OF', 'THE', 'QUESTION'] +6128-63244-0010-541: hyp=['OLIVE', 'CHANCELLOR', 'WONDERED', 'HOW', 'MISSUS', 'KYNDER', 'WOULD', 'TREAT', 'THEIR', 'BRANCH', 'AT', 'THE', 'QUESTION'] +6128-63244-0011-542: ref=['IF', 'IT', 'BE', 'NECESSARY', 'WE', 'ARE', 'PREPARED', 'TO', 'TAKE', 'CERTAIN', 'STEPS', 'TO', 'CONCILIATE', 'THE', 'SHRINKING'] +6128-63244-0011-542: hyp=['IT', 'HAD', 'BEEN', 'NECESSARY', 'WE', 'ARE', 'PREPARED', 'TO', 'TAKE', 'CERTAIN', 'STEPS', 'TO', 'CONCILIATE', 'THE', 'SHRINKING'] +6128-63244-0012-543: ref=['OUR', 'MOVEMENT', 'IS', 'FOR', 'ALL', 'IT', 'APPEALS', 'TO', 'THE', 'MOST', 'DELICATE', 'LADIES'] +6128-63244-0012-543: hyp=["I'LL", 'MOVEMENT', 'IS', 'FULL', 'IT', 'APPEALS', 'TO', 'THE', 'MOST', 'DELICATE', 'LADIES'] +6128-63244-0013-544: ref=['RAISE', 'THE', 'STANDARD', 'AMONG', 'THEM', 'AND', 'BRING', 'ME', 'A', 'THOUSAND', 'NAMES'] +6128-63244-0013-544: hyp=['THAT', 'IS', 'THE', 'STANDARD', 'AMONG', 'THEM', 'AND', 'BRING', 'ME', 'YOUR', 'THOUSAND', 'NAMES'] +6128-63244-0014-545: ref=['I', 'LOOK', 'AFTER', 'THE', 'DETAILS', 'AS', 'WELL', 'AS', 'THE', 'BIG', 'CURRENTS', 'MISSUS', 'FARRINDER', 'ADDED', 'IN', 'A', 'TONE', 'AS', 'EXPLANATORY', 'AS', 'COULD', 'BE', 'EXPECTED', 'OF', 'SUCH', 'A', 'WOMAN', 'AND', 'WITH', 'A', 'SMILE', 'OF', 'WHICH', 'THE', 'SWEETNESS', 'WAS', 'THRILLING', 'TO', 'HER', 'LISTENER'] +6128-63244-0014-545: hyp=['I', 'LOOK', 'AFTER', 'THE', 'DETAILS', 'AS', 'WELL', 'AS', 'THE', 'BIG', 'CURRANTS', 'MISSUS', 'FERRINDER', 'ADDED', 'IN', 'A', 'TONE', 'AS', 'EXPLANATORY', 'AS', 'COULD', 'BE', 'EXPECTED', 'OF', 'SUCH', 'A', 'WOMAN', 'AND', 'WITH', 'A', 'SMILE', 'OF', 'WHICH', 'THE', 'SWEETNESS', 'WAS', 'THRILLING', 'TO', 'HER', 'LISTENER'] +6128-63244-0015-546: ref=['SAID', 'OLIVE', 'CHANCELLOR', 'WITH', 'A', 'FACE', 'WHICH', 'SEEMED', 'TO', 'PLEAD', 'FOR', 'A', 'REMISSION', 'OF', 'RESPONSIBILITY'] +6128-63244-0015-546: hyp=['SAID', 'OLD', 'CHANCELLOR', 'WITH', 'A', 'FACE', 'WHICH', 'SEEMED', 'TO', 'PLEAD', 'FOR', 'A', "REMISSIONER'S", 'RESPONSIBILITY'] +6128-63244-0016-547: ref=['I', 'WANT', 'TO', 'BE', 'NEAR', 'TO', 'THEM', 'TO', 'HELP', 'THEM'] +6128-63244-0016-547: hyp=['HOW', 'WARNED', 'TO', 'BE', 'NEAR', 'TO', 'THEM', 'TO', 'HELP', 'THEM'] +6128-63244-0017-548: ref=['IT', 'WAS', 'ONE', 'THING', 'TO', 'CHOOSE', 'FOR', 'HERSELF', 'BUT', 'NOW', 'THE', 'GREAT', 'REPRESENTATIVE', 'OF', 'THE', 'ENFRANCHISEMENT', 'OF', 'THEIR', 'SEX', 'FROM', 'EVERY', 'FORM', 'OF', 'BONDAGE', 'HAD', 'CHOSEN', 'FOR', 'HER'] +6128-63244-0017-548: hyp=['IT', 'WAS', 'ONE', 'THING', 'TO', 'CHOOSE', 'TO', 'HERSELF', 'BUT', 'NOW', 'THE', 'GREAT', 'REPRESENTATIVE', 'OF', 'THE', 'ENCRONTISEMENT', 'OF', 'THEIR', 'SEX', 'FROM', 'EVERY', 'FORM', 'OF', 'BANDAGE', 'HAD', 'CHOSEN', 'FOR', 'HER'] +6128-63244-0018-549: ref=['THE', 'UNHAPPINESS', 'OF', 'WOMEN'] +6128-63244-0018-549: hyp=['THE', 'UNHAPPINESS', 'OF', 'WOMEN'] +6128-63244-0019-550: ref=['THEY', 'WERE', 'HER', 'SISTERS', 'THEY', 'WERE', 'HER', 'OWN', 'AND', 'THE', 'DAY', 'OF', 'THEIR', 'DELIVERY', 'HAD', 'DAWNED'] +6128-63244-0019-550: hyp=['THEY', 'WERE', 'HER', 'SISTERS', 'THERE', 'WERE', 'HER', 'OWN', 'AND', 'THE', 'DAY', 'OF', 'THEIR', 'DELIVERY', 'HAD', 'DAWNED'] +6128-63244-0020-551: ref=['THIS', 'WAS', 'THE', 'ONLY', 'SACRED', 'CAUSE', 'THIS', 'WAS', 'THE', 'GREAT', 'THE', 'JUST', 'REVOLUTION', 'IT', 'MUST', 'TRIUMPH', 'IT', 'MUST', 'SWEEP', 'EVERYTHING', 'BEFORE', 'IT', 'IT', 'MUST', 'EXACT', 'FROM', 'THE', 'OTHER', 'THE', 'BRUTAL', 'BLOOD', 'STAINED', 'RAVENING', 'RACE', 'THE', 'LAST', 'PARTICLE', 'OF', 'EXPIATION'] +6128-63244-0020-551: hyp=['THIS', 'WAS', 'THE', 'ONLY', 'SACRED', 'CAUSE', 'THIS', 'WAS', 'THE', 'GREAT', 'THE', 'DESTRULICIAN', 'IT', 'WAS', 'TRIUMPH', 'IT', 'WAS', 'SWEEP', 'EVERYTHING', 'BEFORE', 'IT', 'IT', 'MUST', 'EXACT', 'FROM', 'THE', 'OTHER', 'THE', 'BRUTAL', 'BLOOD', 'STAINED', 'RAVENING', 'RACE', 'THE', 'LOST', 'PARTICLE', 'OF', 'EXPLANATION'] +6128-63244-0021-552: ref=['THEY', 'WOULD', 'BE', 'NAMES', 'OF', 'WOMEN', 'WEAK', 'INSULTED', 'PERSECUTED', 'BUT', 'DEVOTED', 'IN', 'EVERY', 'PULSE', 'OF', 'THEIR', 'BEING', 'TO', 'THE', 'CAUSE', 'AND', 'ASKING', 'NO', 'BETTER', 'FATE', 'THAN', 'TO', 'DIE', 'FOR', 'IT'] +6128-63244-0021-552: hyp=['THERE', 'HAD', 'BEEN', 'NAMES', 'OF', 'WOMEN', 'WEAK', 'INSULTED', 'PERSECUTED', 'BUT', 'DEVOTED', 'IN', 'EVERY', 'PART', 'OF', 'THEIR', 'BEING', 'TO', 'THE', 'CAUSE', 'AND', 'ASKING', 'NO', 'BETTER', 'FATE', 'THAN', 'TO', 'DIE', 'FOR', 'IT'] +6128-63244-0022-553: ref=['IT', 'WAS', 'NOT', 'CLEAR', 'TO', 'THIS', 'INTERESTING', 'GIRL', 'IN', 'WHAT', 'MANNER', 'SUCH', 'A', 'SACRIFICE', 'AS', 'THIS', 'LAST', 'WOULD', 'BE', 'REQUIRED', 'OF', 'HER', 'BUT', 'SHE', 'SAW', 'THE', 'MATTER', 'THROUGH', 'A', 'KIND', 'OF', 'SUNRISE', 'MIST', 'OF', 'EMOTION', 'WHICH', 'MADE', 'DANGER', 'AS', 'ROSY', 'AS', 'SUCCESS'] +6128-63244-0022-553: hyp=['IT', 'WILL', 'NOT', 'CLEAR', 'TO', 'THIS', 'INTERESTING', 'GIRL', 'IN', 'WHAT', 'MANNER', 'SUCH', 'A', 'SACRIFICE', 'AS', 'THIS', 'LAST', 'WOULD', 'BE', 'REQUIRED', 'OF', 'HER', 'BUT', 'SHE', 'SOLD', 'A', 'MATTER', 'THROUGH', 'A', 'KIND', 'OF', 'SUNRISE', 'MIST', 'OF', 'THE', 'NATION', 'WHICH', 'MADE', 'DANGER', 'AS', 'ROSY', 'IS', 'SUCCESS'] +6128-63244-0023-554: ref=['WHEN', 'MISS', 'BIRDSEYE', 'APPROACHED', 'IT', 'TRANSFIGURED', 'HER', 'FAMILIAR', 'HER', 'COMICAL', 'SHAPE', 'AND', 'MADE', 'THE', 'POOR', 'LITTLE', 'HUMANITARY', 'HACK', 'SEEM', 'ALREADY', 'A', 'MARTYR'] +6128-63244-0023-554: hyp=['WHEN', 'MISS', "BIRD'S", 'EYED', 'APPROACHED', 'IT', 'TRANSFIGURED', 'HER', 'FAMILIAR', 'HER', 'COMICAL', 'SHAPE', 'AND', 'MADE', 'THE', 'POOR', 'LITTLE', 'HUMANITY', 'HACK', 'SIMPLE', 'ALREADY', 'A', 'MARTYR'] +6128-63244-0024-555: ref=['OLIVE', 'CHANCELLOR', 'LOOKED', 'AT', 'HER', 'WITH', 'LOVE', 'REMEMBERED', 'THAT', 'SHE', 'HAD', 'NEVER', 'IN', 'HER', 'LONG', 'UNREWARDED', 'WEARY', 'LIFE', 'HAD', 'A', 'THOUGHT', 'OR', 'AN', 'IMPULSE', 'FOR', 'HERSELF'] +6128-63244-0024-555: hyp=['I', 'LEAVE', 'CHANCELLOR', 'LOOKED', 'AT', 'HER', 'WITH', 'LOVE', 'REMEMBERED', 'THAT', 'SHE', 'HAD', 'NEVER', 'IN', 'HER', 'LONG', 'AND', 'REWARDED', 'WEARY', 'LIFE', 'HAD', 'A', 'THOUGHT', 'OF', 'AN', 'IMPULSE', 'FOR', 'HERSELF'] +6128-63244-0025-556: ref=['SHE', 'HAD', 'BEEN', 'CONSUMED', 'BY', 'THE', 'PASSION', 'OF', 'SYMPATHY', 'IT', 'HAD', 'CRUMPLED', 'HER', 'INTO', 'AS', 'MANY', 'CREASES', 'AS', 'AN', 'OLD', 'GLAZED', 'DISTENDED', 'GLOVE'] +6128-63244-0025-556: hyp=['SHE', 'HAD', 'BEEN', 'CONSUMED', 'BY', 'THE', 'PASSION', 'OF', 'SYMPATHY', 'IT', 'HAD', 'CRUMBLED', 'HER', 'INTO', 'AS', 'MANY', 'CREASES', 'AS', 'AN', 'OLD', 'GLAZED', 'DISTENDED', 'GLOVE'] +6432-63722-0000-2431: ref=['BUT', 'SCUSE', 'ME', "DIDN'T", 'YO', 'FIGGER', 'ON', 'DOIN', 'SOME', 'DETECTIN', 'AN', 'GIVE', 'UP', 'FISHIN'] +6432-63722-0000-2431: hyp=['PECUSE', 'ME', 'THEN', "YOU'LL", 'FOR', 'GONE', 'DOING', 'SOME', 'DETECTIVE', 'AND', 'GIVEN', 'UP', "FISHIN'"] +6432-63722-0001-2432: ref=['AND', 'SHAG', 'WITH', 'THE', 'FREEDOM', 'OF', 'AN', 'OLD', 'SERVANT', 'STOOD', 'LOOKING', 'AT', 'HIS', 'MASTER', 'AS', 'IF', 'NOT', 'QUITE', 'UNDERSTANDING', 'THE', 'NEW', 'TWIST', 'THE', 'AFFAIRS', 'HAD', 'TAKEN'] +6432-63722-0001-2432: hyp=['AND', 'SHAG', 'WITH', 'THE', 'FREEDOM', 'OF', 'AN', 'OLD', 'SERVANT', 'STOOD', 'LOOKING', 'AT', 'HIS', 'MASTERY', 'AS', 'IF', 'NOT', 'QUITE', 'UNDERSTANDING', 'THE', 'NEW', 'TWIST', 'THE', 'AFFAIRS', 'HAD', 'TAKEN'] +6432-63722-0002-2433: ref=["I'M", 'GOING', 'OFF', 'FISHING', 'I', 'MAY', 'NOT', 'CATCH', 'ANYTHING', 'I', 'MAY', 'NOT', 'WANT', 'TO', 'AFTER', 'I', 'GET', 'THERE'] +6432-63722-0002-2433: hyp=["I'M", 'GOING', 'OUR', 'FISHIN', 'I', 'MAY', 'NOT', 'CATCH', 'ANYTHING', 'AND', 'MAY', 'NOT', 'WANT', 'TO', 'AFTER', 'I', 'GET', 'THERE'] +6432-63722-0003-2434: ref=['GET', 'READY', 'SHAG', 'YES', 'SAH', 'COLONEL'] +6432-63722-0003-2434: hyp=['GET', 'READY', 'SHAG', 'YES', 'I', 'CAN'] +6432-63722-0004-2435: ref=['AND', 'HAVING', 'PUT', 'HIMSELF', 'IN', 'A', 'FAIR', 'WAY', 'AS', 'HE', 'HOPED', 'TO', 'SOLVE', 'SOME', 'OF', 'THE', 'PROBLEMS', 'CONNECTED', 'WITH', 'THE', 'DARCY', 'CASE', 'COLONEL', 'ASHLEY', 'WENT', 'DOWN', 'TO', 'POLICE', 'HEADQUARTERS', 'TO', 'LEARN', 'MORE', 'FACTS', 'IN', 'CONNECTION', 'WITH', 'THE', 'MURDER', 'OF', 'THE', 'EAST', 'INDIAN'] +6432-63722-0004-2435: hyp=['AND', 'HAVING', 'PUT', 'HIMSELF', 'IN', 'A', 'FAIR', 'WAY', 'AS', 'HE', 'HOPED', 'TO', 'SOLVE', 'SOME', 'OF', 'THE', 'PROBLEMS', 'CONNECTED', 'WITH', 'THE', 'DARCY', 'CASE', 'COLONEL', 'HASHY', 'WENT', 'DOWN', 'TO', 'POLICE', 'HEADQUARTERS', 'TO', 'LEARN', 'MORE', 'FACTS', 'IN', 'THE', 'CONNECTION', 'WITH', 'THE', 'MURDER', 'OF', 'THE', 'EAST', 'INDIAN'] +6432-63722-0005-2436: ref=['PINKUS', 'AND', 'DONOVAN', "HAVEN'T", 'THEY', 'CARROLL', 'YEP'] +6432-63722-0005-2436: hyp=['PINKIS', 'AND', 'DONOVAN', "HAVEN'T", 'THEY', 'CAROL', 'HE', 'EP'] +6432-63722-0006-2437: ref=['CARROLL', 'WAS', 'TOO', 'MUCH', 'ENGAGED', 'IN', 'WATCHING', 'THE', 'BLUE', 'SMOKE', 'CURL', 'LAZILY', 'UPWARD', 'FROM', 'HIS', 'CIGAR', 'JUST', 'THEN', 'TO', 'SAY', 'MORE'] +6432-63722-0006-2437: hyp=['GAL', 'WAS', 'TOO', 'MUCH', 'ENGAGED', 'IN', 'WATCHING', 'THE', 'BLUE', 'SMOKE', 'CURL', 'LAZILY', 'UPWARD', 'FROM', 'HIS', 'CIGAR', 'JUST', 'THEN', 'TO', 'SAY', 'MORE'] +6432-63722-0007-2438: ref=['ARE', 'YOU', 'GOING', 'TO', 'WORK', 'ON', 'THAT', 'CASE', 'COLONEL'] +6432-63722-0007-2438: hyp=['ARE', 'YOU', 'GOING', 'TO', 'WORK', 'ON', 'THAT', 'CASE', 'COLONEL'] +6432-63722-0008-2439: ref=['BUT', 'HE', "HADN'T", 'ANY', 'MORE', 'TO', 'DO', 'WITH', 'IT', 'COLONEL', 'THAN', 'THAT', 'CAT'] +6432-63722-0008-2439: hyp=['BUT', 'HE', "HADN'T", 'ANY', 'MORE', 'TO', 'DO', 'WITH', 'IT', 'COLONEL', 'THAN', 'THAT', 'CAT'] +6432-63722-0009-2440: ref=['PERHAPS', 'NOT', 'ADMITTED', 'COLONEL', 'ASHLEY'] +6432-63722-0009-2440: hyp=['PERHAPS', 'NOT', 'ADMITTED', 'COLONEL', 'ASHLEY'] +6432-63722-0010-2441: ref=["WE'VE", 'GOT', 'OUR', 'MAN', 'AND', "THAT'S", 'ALL', 'WE', 'WANT'] +6432-63722-0010-2441: hyp=["WE'VE", 'GOT', 'OUR', 'MAN', 'AND', "THAT'S", 'ALL', 'WE', 'WANT'] +6432-63722-0011-2442: ref=["YOU'RE", 'ON', 'THE', 'DARCY', 'CASE', 'THEY', 'TELL', 'ME', 'IN', 'A', 'WAY', 'YES'] +6432-63722-0011-2442: hyp=["YOU'RE", 'ON', 'THE', 'DARCY', 'CASE', 'THEY', 'TELL', 'ME', 'IN', 'A', 'WAY', 'YES'] +6432-63722-0012-2443: ref=["I'M", 'WORKING', 'IN', 'THE', 'INTERESTS', 'OF', 'THE', 'YOUNG', 'MAN'] +6432-63722-0012-2443: hyp=["I'M", 'WORKING', 'IN', 'THE', 'INTEREST', 'OF', 'THE', 'YOUNG', 'MAN'] +6432-63722-0013-2444: ref=["IT'S", 'JUST', 'ONE', 'OF', 'THEM', 'COINCIDENCES', 'LIKE'] +6432-63722-0013-2444: hyp=["IT'S", 'JUST', 'ONE', 'OF', 'THEM', 'COINCIDENCES', 'LIKE'] +6432-63722-0014-2445: ref=['BUSTED', 'HIS', 'HEAD', 'IN', 'WITH', 'A', 'HEAVY', 'CANDLESTICK', 'ONE', 'OF', 'A', 'PAIR'] +6432-63722-0014-2445: hyp=['BUSTED', 'HIS', 'HEAD', 'IN', 'WITH', 'A', 'HEAVY', 'CANDLESTICK', 'ONE', 'OF', 'A', 'PAIR'] +6432-63722-0015-2446: ref=['GAD', 'EXCLAIMED', 'THE', 'COLONEL'] +6432-63722-0015-2446: hyp=['GAD', 'EXPLAINED', 'THE', 'COLONEL'] +6432-63722-0016-2447: ref=['THE', 'VERY', 'PAIR', 'I', 'WAS', 'GOING', 'TO', 'BUY'] +6432-63722-0016-2447: hyp=['THE', 'VERY', 'PAIR', 'I', 'WAS', 'GOING', 'TO', 'BUY'] +6432-63722-0017-2448: ref=['LOOK', 'HERE', 'COLONEL', 'DO', 'YOU', 'KNOW', 'ANYTHING', 'ABOUT', 'THIS'] +6432-63722-0017-2448: hyp=['LOOK', 'HERE', 'COLONEL', 'DO', 'YOU', 'KNOW', 'ANYTHING', 'ABOUT', 'THIS'] +6432-63722-0018-2449: ref=['AND', 'THE', "DETECTIVE'S", 'PROFESSIONAL', 'INSTINCTS', 'GOT', 'THE', 'UPPER', 'HAND', 'OF', 'HIS', 'FRIENDLINESS', 'NOT', 'THE', 'LEAST', 'IN', 'THE', 'WORLD', 'NOT', 'AS', 'MUCH', 'AS', 'YOU', 'DO', 'WAS', 'THE', 'COOL', 'ANSWER'] +6432-63722-0018-2449: hyp=['AND', 'THE', "DETECTIVE'S", 'PROFESSIONAL', 'INSTINCTS', 'GOT', 'THE', 'UPPER', 'HAND', 'OF', 'HIS', 'FRIENDLINESS', 'NOT', 'THE', 'LEAST', 'IN', 'THE', 'WORLD', 'NOT', 'AS', 'MUCH', 'AS', 'YOU', 'DO', 'WAS', 'THE', 'COOL', 'ANSWER'] +6432-63722-0019-2450: ref=['I', 'HAPPENED', 'TO', 'SEE', 'THOSE', 'CANDLESTICKS', 'IN', 'THE', 'WINDOW', 'OF', 'SINGA', "PHUT'S", 'SHOP', 'THE', 'OTHER', 'DAY', 'AND', 'I', 'MADE', 'UP', 'MY', 'MIND', 'TO', 'BUY', 'THEM', 'WHEN', 'I', 'HAD', 'A', 'CHANCE'] +6432-63722-0019-2450: hyp=['I', 'HAPPENED', 'TO', 'SEE', 'THOSE', 'CANDLESTICKS', 'IN', 'THE', 'WINDOW', 'OF', 'SINGA', "PHUT'S", 'SHOP', 'THE', 'OTHER', 'DAY', 'AND', 'I', 'MADE', 'UP', 'MY', 'MIND', 'TO', 'BUY', 'THEM', 'WHEN', 'I', 'HAD', 'A', 'CHANCE'] +6432-63722-0020-2451: ref=['NOW', "I'M", 'AFRAID', 'I', "WON'T", 'BUT', 'HOW', 'DID', 'IT', 'HAPPEN'] +6432-63722-0020-2451: hyp=['NOW', "I'M", 'AFRAID', 'I', "WON'T", 'BUT', 'HOW', 'DID', 'IT', 'HAPPEN'] +6432-63722-0021-2452: ref=['PHUT', 'I', "DON'T", 'KNOW', 'WHETHER', "THAT'S", 'HIS', 'FIRST', 'OR', 'HIS', 'LAST', 'NAME', 'ANYHOW', 'HE', 'HAD', 'A', 'PARTNER', 'NAMED', 'SHERE', 'ALI'] +6432-63722-0021-2452: hyp=['FAT', 'I', "DON'T", 'KNOW', 'WHETHER', "THAT'S", 'HIS', 'FIRST', 'OR', 'HIS', 'LAST', 'NAME', 'ANYHOW', 'HE', 'HAD', 'A', 'PARTNER', 'NAMED', 'SHEAR', 'ALI'] +6432-63722-0022-2453: ref=['ANYHOW', 'HE', 'AND', 'PHUT', "DIDN'T", 'GET', 'ALONG', 'VERY', 'WELL', 'IT', 'SEEMS'] +6432-63722-0022-2453: hyp=['ANYHOW', 'HE', 'INFORT', "DIDN'T", 'GET', 'ALONG', 'VERY', 'WELL', 'IT', 'SEEMS'] +6432-63722-0023-2454: ref=['NEIGHBORS', 'OFTEN', 'HEARD', 'EM', 'SCRAPPIN', 'A', 'LOT', 'AND', 'THIS', 'AFTERNOON', 'THEY', 'WENT', 'AT', 'IT', 'AGAIN', 'HOT', 'AND', 'HEAVY'] +6432-63722-0023-2454: hyp=['LABORS', 'OFTEN', 'HEARD', 'HIM', 'SCRAP', 'IN', 'A', 'LOT', 'AND', 'THIS', 'AFTERNOON', 'THEY', 'WENT', 'AT', 'IT', 'AGAIN', 'AT', 'HOT', 'AND', 'HEAVY'] +6432-63722-0024-2455: ref=['TOWARD', 'DARK', 'A', 'MAN', 'WENT', 'IN', 'TO', 'BUY', 'A', 'LAMP'] +6432-63722-0024-2455: hyp=['TO', 'OUR', 'DARK', 'A', 'MAN', 'WENT', 'IN', 'TO', 'BUY', 'A', 'LAMP'] +6432-63722-0025-2456: ref=['HE', 'FOUND', 'THE', 'PLACE', 'WITHOUT', 'A', 'LIGHT', 'IN', 'IT', 'STUMBLED', 'OVER', 'SOMETHING', 'ON', 'THE', 'FLOOR', 'AND', 'THERE', 'WAS', "ALI'S", 'BODY', 'WITH', 'THE', 'HEAD', 'BUSTED', 'IN', 'AND', 'THIS', 'HEAVY', 'CANDLESTICK', 'NEAR', 'IT'] +6432-63722-0025-2456: hyp=['HE', 'FOUND', 'THE', 'PLACE', 'WITHOUT', 'A', 'LIGHT', 'IN', 'IT', 'STUMBLED', 'OVER', 'SOMETHING', 'ON', 'THE', 'FLOOR', 'AND', 'THERE', 'WAS', "ALI'S", 'BODY', 'WITH', 'THE', 'HEAD', 'BUSTED', 'IN', 'AND', 'THIS', 'HEAVY', 'CANDLESTICK', 'NEAR', 'IT'] +6432-63722-0026-2457: ref=['SURE', 'HELD', 'SO', 'TIGHT', 'WE', 'COULD', 'HARDLY', 'GET', 'IT', 'OUT'] +6432-63722-0026-2457: hyp=['SURE', 'HELD', 'SO', 'TIGHT', 'WE', 'COULD', 'HARDLY', 'GET', 'IT', 'OUT'] +6432-63722-0027-2458: ref=['MAYBE', 'THE', 'FIGHT', 'WAS', 'ABOUT', 'WHO', 'OWNED', 'THE', 'WATCH', 'FOR', 'THE', 'DAGOS', 'TALKED', 'IN', 'THEIR', 'FOREIGN', 'LINGO', 'AND', 'NONE', 'OF', 'THE', 'NEIGHBORS', 'COULD', 'TELL', 'WHAT', 'THEY', 'WERE', 'SAYIN', 'I', 'SEE'] +6432-63722-0027-2458: hyp=['MAYBE', 'THE', 'FIGHT', 'WAS', 'ABOUT', 'WHO', 'ON', 'THE', 'WATCH', 'FOR', 'THE', 'DAGGERS', 'TALKED', 'IN', 'THEIR', 'FOREIGN', 'LINGO', 'AND', 'NONE', 'OF', 'THE', 'NEIGHBOURS', 'COULD', 'TELL', 'WHAT', 'THEY', 'WERE', 'SAYING', 'I', 'SEE'] +6432-63722-0028-2459: ref=['AND', 'THE', 'WATCH', 'HAVE', 'YOU', 'IT', 'YES', "IT'S", 'HERE'] +6432-63722-0028-2459: hyp=['AND', 'THE', 'WATCH', 'HAVE', 'YOU', 'IT', 'YES', "IT'S", 'HERE'] +6432-63722-0029-2460: ref=["THAT'S", 'THE', 'WATCH', 'ANNOUNCED', 'THE', 'HEADQUARTERS', 'DETECTIVE', 'REACHING', 'IN', 'FOR', 'IT', 'GOING', 'YET', 'SEE'] +6432-63722-0029-2460: hyp=["THAT'S", 'THE', 'WATCH', 'ANNOUNCED', 'THE', 'HEADQUARTERS', 'DETECTIVE', 'REACHING', 'IN', 'FOR', 'IT', 'GOING', 'IN', 'SEE'] +6432-63722-0030-2461: ref=["YOU'RE", 'NOT', 'AS', 'SQUEAMISH', 'AS', 'ALL', 'THAT', 'ARE', 'YOU', 'JUST', 'BECAUSE', 'IT', 'WAS', 'IN', 'A', 'DEAD', "MAN'S", 'HAND', 'AND', 'IN', 'A', "WOMAN'S"] +6432-63722-0030-2461: hyp=["YOU'RE", 'NOT', 'A', 'SCREAMISH', 'AS', 'ALL', 'THAT', 'ARE', 'YOU', 'JUST', 'BECAUSE', 'IT', 'WAS', 'IN', 'A', 'DEAD', "MAN'S", 'HANDS', 'AND', 'A', "WOMAN'S"] +6432-63722-0031-2462: ref=['AND', "DONOVAN'S", 'VOICE', 'WAS', 'PLAINLY', 'SKEPTICAL'] +6432-63722-0031-2462: hyp=['AND', "DOLOMAN'S", 'VOICE', 'WAS', 'PLAINLY', 'SCEPTICAL'] +6432-63722-0032-2463: ref=['YES', 'IT', 'MAY', 'HAVE', 'SOME', 'ROUGH', 'EDGES', 'ON', 'IT'] +6432-63722-0032-2463: hyp=['YES', 'IT', 'MAY', 'HAVE', 'SOME', 'ROUGH', 'EDGES', 'ON', 'IT'] +6432-63722-0033-2464: ref=['AND', "I'VE", 'READ', 'ENOUGH', 'ABOUT', 'GERMS', 'TO', 'KNOW', 'THE', 'DANGER', "I'D", 'ADVISE', 'YOU', 'TO', 'BE', 'CAREFUL'] +6432-63722-0033-2464: hyp=['AND', "I'VE", 'READ', 'ENOUGH', 'ABOUT', 'GERMS', 'TO', 'KNOW', 'THE', 'DANGER', "I'D", 'ADVISE', 'YOU', 'TO', 'BE', 'CAREFUL'] +6432-63722-0034-2465: ref=['IF', 'YOU', "DON'T", 'MIND', 'I', 'SHOULD', 'LIKE', 'TO', 'EXAMINE', 'THIS', 'A', 'BIT'] +6432-63722-0034-2465: hyp=['IF', 'YOU', "DON'T", 'MIND', 'I', 'SHOULD', 'LIKE', 'TO', 'EXAMINE', 'THIS', 'A', 'BIT'] +6432-63722-0035-2466: ref=['BEFORE', 'THE', 'BIG', 'WIND', 'IN', 'IRELAND', 'SUGGESTED', 'THONG', 'WITH', 'A', 'NOD', 'AT', 'HIS', 'IRISH', 'COMPATRIOT', 'SLIGHTLY', 'LAUGHED', 'THE', 'COLONEL'] +6432-63722-0035-2466: hyp=['BEFORE', 'THE', 'BIG', 'WIND', 'IN', 'IRELAND', 'SUGGESTED', 'THONG', 'WITH', 'A', 'NOD', 'OF', 'HIS', 'IRISH', 'COMPATRIOT', "SLIGHTLY'LL", 'HAVE', 'THE', 'COLONEL'] +6432-63722-0036-2467: ref=["THAT'S", 'RIGHT', 'AGREED', 'THE', 'COLONEL', 'AS', 'HE', 'CONTINUED', 'TO', 'MOVE', 'HIS', 'MAGNIFYING', 'GLASS', 'OVER', 'THE', 'SURFACE', 'OF', 'THE', 'STILL', 'TICKING', 'WATCH'] +6432-63722-0036-2467: hyp=["THAT'S", 'RIGHT', 'AGREED', 'THE', 'COLONEL', 'AS', 'HE', 'CONTINUED', 'TO', 'MOVE', 'HIS', 'MAGNIFYING', 'GLASS', 'OVER', 'THE', 'SURFACE', 'OF', 'THE', 'STILL', 'TICKING', 'WATCH'] +6432-63722-0037-2468: ref=['AND', 'A', 'CLOSE', 'OBSERVER', 'MIGHT', 'HAVE', 'OBSERVED', 'THAT', 'HE', 'DID', 'NOT', 'TOUCH', 'HIS', 'BARE', 'FINGERS', 'TO', 'THE', 'TIMEPIECE', 'BUT', 'POKED', 'IT', 'ABOUT', 'AND', 'TOUCHED', 'IT', 'HERE', 'AND', 'THERE', 'WITH', 'THE', 'END', 'OF', 'A', 'LEADPENCIL'] +6432-63722-0037-2468: hyp=['IN', 'THE', 'CLOSE', 'OBSERVER', 'MIGHT', 'HAVE', 'OBSERVED', 'THAT', 'HE', 'DID', 'NOT', 'TOUCH', 'HIS', 'BARE', 'FINGERS', 'TO', 'THE', 'TIMEPIECE', 'BUT', 'POKED', 'IT', 'ABOUT', 'AND', 'TOUCHED', 'IT', 'HERE', 'AND', 'THERE', 'WITH', 'THE', 'END', 'OF', 'A', 'LEAD', 'PENCIL'] +6432-63722-0038-2469: ref=['AND', 'DONOVAN', 'TAKE', 'A', "FRIEND'S", 'ADVICE', 'AND', "DON'T", 'BE', 'TOO', 'FREE', 'WITH', 'THAT', 'WATCH', 'TOO', 'FREE', 'WITH', 'IT'] +6432-63722-0038-2469: hyp=['AND', 'DONALD', 'TAKE', 'HER', "FRIEND'S", 'ADVICE', 'AND', "DON'T", 'BE', 'TOO', 'FREE', 'WITH', 'THAT', 'WATCH', 'TOO', 'FREE', 'WITH', 'IT'] +6432-63722-0039-2470: ref=['ASKED', 'THE', 'SURPRISED', 'DETECTIVE', 'YES'] +6432-63722-0039-2470: hyp=['AS', 'THE', 'SURPRISE', 'DETECTIVE', 'YES'] +6432-63722-0040-2471: ref=["DON'T", 'SCRATCH', 'YOURSELF', 'ON', 'IT', 'WHATEVER', 'YOU', 'DO', 'WHY', 'NOT'] +6432-63722-0040-2471: hyp=["DON'T", 'SCRATCH', 'YOURSELF', 'ON', 'IT', 'WHATEVER', 'YOU', 'DO', 'WHY', 'NOT'] +6432-63722-0041-2472: ref=['SIMPLY', 'BECAUSE', 'THIS', 'WATCH'] +6432-63722-0041-2472: hyp=['SIMPLY', 'BECAUSE', 'THIS', 'WATCH'] +6432-63722-0042-2473: ref=['SOME', 'ONE', 'OUT', 'HERE', 'TO', 'SEE', 'YOU'] +6432-63722-0042-2473: hyp=['SOME', 'ONE', 'OUT', 'HER', 'TO', 'SEE', 'YOU'] +6432-63722-0043-2474: ref=['ALL', 'RIGHT', 'BE', 'THERE', 'IN', 'A', 'SECOND'] +6432-63722-0043-2474: hyp=['ALL', 'RIGHT', 'BE', 'THERE', 'IN', 'A', 'SECOND'] +6432-63722-0044-2475: ref=['SINGA', 'PHUT', 'WAS', 'THE', 'PANTING', 'ANSWER'] +6432-63722-0044-2475: hyp=['SHANGHAT', 'WAS', 'THE', 'PANTING', 'ANSWER'] +6432-63722-0045-2476: ref=['I', 'WANT', 'TO', 'TALK', 'OVER', "DARCY'S", 'CASE', 'WITH', 'YOU', 'THE', 'COLONEL', 'HAD', 'SAID', 'AND', 'THE', 'TWO', 'HAD', 'TALKED', 'HAD', 'THOUGHT', 'HAD', 'TALKED', 'AGAIN', 'AND', 'NOW', 'WERE', 'SILENT', 'FOR', 'A', 'TIME'] +6432-63722-0045-2476: hyp=['I', 'WANT', 'TO', 'TALK', 'OVER', "DARCY'S", 'CASE', 'WITH', 'YOU', 'THE', 'COLONEL', 'HAD', 'SAID', 'AND', 'THE', 'TWO', 'HAD', 'TALKED', 'HAD', 'THOUGHT', 'HAD', 'TALKED', 'AGAIN', 'AND', 'NOW', 'WERE', 'SILENT', 'FOR', 'A', 'TIME'] +6432-63722-0046-2477: ref=['WHAT', 'ARE', 'THE', 'CHANCES', 'OF', 'GETTING', 'HIM', 'OFF', 'LEGALLY', 'IF', 'WE', 'GO', 'AT', 'IT', 'FROM', 'A', 'NEGATIVE', 'STANDPOINT', 'ASKED', 'THE', 'COLONEL'] +6432-63722-0046-2477: hyp=['WHAT', 'ARE', 'THE', 'CHANCES', 'OF', 'GETTING', 'HIM', 'OFF', 'LEGALLY', 'IF', 'WE', 'GO', 'AT', 'IT', 'FROM', 'A', 'NEGATIVE', 'STANDPOINT', 'ASKED', 'THE', 'COLONEL'] +6432-63722-0047-2478: ref=['RATHER', 'A', 'HYPOTHETICAL', 'QUESTION', 'COLONEL', 'BUT', 'I', 'SHOULD', 'SAY', 'IT', 'MIGHT', 'BE', 'A', 'FIFTY', 'FIFTY', 'PROPOSITION'] +6432-63722-0047-2478: hyp=['RATHER', 'A', 'HYPOTHETICAL', 'QUESTION', 'COLONEL', 'BUT', 'I', 'SHOULD', 'SAY', 'IT', 'MIGHT', 'BE', 'A', 'FIFTY', 'FIFTY', 'PROPOSITION'] +6432-63722-0048-2479: ref=['AT', 'BEST', 'HE', 'WOULD', 'GET', 'OFF', 'WITH', 'A', 'SCOTCH', 'VERDICT', 'OF', 'NOT', 'PROVEN', 'BUT', 'HE', "DOESN'T", 'WANT', 'THAT', 'NOR', 'DO', 'I'] +6432-63722-0048-2479: hyp=['AT', 'BEST', 'HE', 'WOULD', 'GET', 'OFF', 'WITH', 'A', 'SCOTCH', 'VERDICT', 'OF', 'NOT', 'PROVING', 'BUT', 'HE', "DOESN'T", 'WANT', 'THAT', 'NOR', 'DO', 'I'] +6432-63722-0049-2480: ref=['AND', 'YOU', 'I', "DON'T", 'WANT', 'IT', 'EITHER'] +6432-63722-0049-2480: hyp=['AND', 'YOU', 'I', "DON'T", 'WANT', 'IT', 'EITHER'] +6432-63722-0050-2481: ref=['BUT', 'I', 'WANT', 'TO', 'KNOW', 'JUST', 'WHERE', 'WE', 'STAND', 'NOW', 'I', 'KNOW'] +6432-63722-0050-2481: hyp=['BUT', 'I', 'WANT', 'TO', 'KNOW', 'JUST', 'WHERE', 'WE', 'STAND', 'NOW', 'I', 'KNOW'] +6432-63722-0051-2482: ref=['BUT', 'I', 'NEED', 'TO', 'DO', 'A', 'LITTLE', 'MORE', 'SMOKING', 'OUT', 'FIRST', 'NOW', 'I', 'WANT', 'TO', 'THINK'] +6432-63722-0051-2482: hyp=['BUT', 'I', 'NEED', 'TO', 'DO', 'A', 'LITTLE', 'MORE', 'SMOKING', 'OUT', 'FIRST', 'NOW', 'I', 'WANT', 'TO', 'THINK'] +6432-63722-0052-2483: ref=['IF', "YOU'LL", 'EXCUSE', 'ME', "I'LL", 'PRETEND', "I'M", 'FISHING', 'AND', 'I', 'MAY', 'CATCH', 'SOMETHING'] +6432-63722-0052-2483: hyp=['IF', "YOU'LL", 'EXCUSE', 'ME', "I'LL", 'PRETEND', "I'M", 'FISHING', 'AND', 'I', 'MAY', 'CATCH', 'SOMETHING'] +6432-63722-0053-2484: ref=['IN', 'FACT', 'I', 'HAVE', 'A', 'FEELING', 'THAT', "I'LL", 'LAND', 'MY', 'FISH'] +6432-63722-0053-2484: hyp=['IN', 'FACT', 'I', 'HAVE', 'A', 'FEELING', 'THAT', 'I', 'LAND', 'MY', 'FISH'] +6432-63722-0054-2485: ref=["I'D", 'RECOMMEND', 'HIM', 'TO', 'YOU', 'INSTEAD', 'OF', 'BLACKSTONE', 'THANKS', 'LAUGHED', 'KENNETH'] +6432-63722-0054-2485: hyp=['I', 'RECOMMEND', 'HIM', 'TO', 'YOU', 'INSTEAD', 'OF', 'BLACKSTONE', 'THANKS', 'LAP', 'KENNETH'] +6432-63722-0055-2486: ref=['WHAT', 'IS', 'IT', 'PERHAPS', 'I', 'CAN', 'HELP', 'YOU'] +6432-63722-0055-2486: hyp=['WHAT', 'IS', 'IT', 'PERHAPS', 'I', 'CAN', 'HELP', 'YOU'] +6432-63722-0056-2487: ref=['THE', 'OLD', 'ADAGE', 'OF', 'TWO', 'HEADS', 'YOU', 'KNOW'] +6432-63722-0056-2487: hyp=['THE', 'OLD', 'ADAGE', 'OF', 'TWO', 'HEADS', 'YOU', 'KNOW'] +6432-63722-0057-2488: ref=['YES', 'IT', 'STILL', 'HOLDS', 'GOOD'] +6432-63722-0057-2488: hyp=['YES', 'IT', 'STILL', 'HOLDS', 'GOOD'] +6432-63722-0058-2489: ref=['NO', 'ALIMONY', 'REPEATED', 'THE', 'COLONEL', 'PUZZLED', 'YES', 'JUST', 'THAT'] +6432-63722-0058-2489: hyp=['NO', 'ALIMONY', 'REPLIED', 'THE', 'COLONEL', 'PUZZLED', 'YES', 'JUST', 'THAT'] +6432-63722-0059-2490: ref=['AND', "THERE'S", 'NO', 'REASON', 'YOU', "SHOULDN'T", 'KNOW'] +6432-63722-0059-2490: hyp=['AND', "THERE'S", 'NO', 'REASON', 'YOU', "SHOULDN'T", 'KNOW'] +6432-63723-0000-2491: ref=['CHUCKLED', 'THE', 'COLONEL', 'AS', 'HE', 'SKILFULLY', 'PLAYED', 'THE', 'LUCKLESS', 'TROUT', 'NOW', 'STRUGGLING', 'TO', 'GET', 'LOOSE', 'FROM', 'THE', 'HOOK'] +6432-63723-0000-2491: hyp=['CHUCKLED', 'THE', 'COLONEL', 'AS', 'HE', 'SKILFULLY', 'PLAYED', 'THE', 'LUCKLESS', 'TROUT', 'NOW', 'STRUGGLING', 'TO', 'GET', 'LOOSE', 'FROM', 'THE', 'HOOK'] +6432-63723-0001-2492: ref=['AND', 'WHEN', 'THE', 'FISH', 'WAS', 'LANDED', 'PANTING', 'ON', 'THE', 'GRASS', 'AND', 'SHAG', 'HAD', 'BEEN', 'ROUSED', 'FROM', 'HIS', 'SLUMBER', 'TO', 'SLIP', 'THE', 'NOW', 'LIMP', 'FISH', 'INTO', 'THE', 'CREEL', 'COLONEL', 'ASHLEY', 'GAVE', 'A', 'SIGH', 'OF', 'RELIEF', 'AND', 'REMARKED', 'I', 'THINK', 'I', 'SEE', 'IT', 'NOW'] +6432-63723-0001-2492: hyp=['AND', 'WHEN', 'THE', 'FISH', 'WAS', 'LANDED', 'PANTING', 'ON', 'THE', 'GRASS', 'AND', 'SHAG', 'HAD', 'BEEN', 'ROUSED', 'FROM', 'HIS', 'SLUMBER', 'TO', 'SLIP', 'A', 'NOW', 'LIMP', 'FISH', 'INTO', 'THE', 'CREO', 'COLONEL', 'ASHLEY', 'GAVE', 'A', 'SIGH', 'OF', 'RELIEF', 'AND', 'REMARKED', 'I', 'THINK', 'I', 'SEE', 'IT', 'NOW'] +6432-63723-0002-2493: ref=['THE', 'REASON', 'SHE', 'ASKED', 'NO', 'ALIMONY', 'INQUIRED', 'KENNETH'] +6432-63723-0002-2493: hyp=['THE', 'REASON', 'SHE', 'ASKED', 'NO', 'ALIMONY', 'INQUIRED', 'KENNETH'] +6432-63723-0003-2494: ref=['NO', 'I', "WASN'T", 'THINKING', 'OF', 'THAT'] +6432-63723-0003-2494: hyp=['NO', 'I', "WASN'T", 'THINKING', 'OF', 'THAT'] +6432-63723-0004-2495: ref=['HOWEVER', "DON'T", 'THINK', "I'M", 'NOT', 'INTERESTED', 'IN', 'YOUR', 'CASE', "I'VE", 'FISHED', 'ENOUGH', 'FOR', 'TO', 'DAY'] +6432-63723-0004-2495: hyp=['HOWEVER', "DON'T", 'THINK', "I'M", 'NOT', 'INTERESTED', 'IN', 'YOUR', 'CASE', "I'VE", 'FINISHED', 'ENOUGH', 'FOR', 'TO', 'DAY'] +6432-63723-0005-2496: ref=['WELL', 'I', "DON'T", 'KNOW', 'THAT', 'YOU', 'CAN'] +6432-63723-0005-2496: hyp=['WELL', 'I', "DON'T", 'KNOW', 'THAT', 'YOU', 'CAN'] +6432-63723-0006-2497: ref=['IT', "ISN'T", 'GENERALLY', 'KNOWN', 'WENT', 'ON', 'THE', 'LAWYER', 'THAT', 'THE', 'HOTEL', "KEEPER'S", 'WIFE', 'HAS', 'LEFT', 'HIM'] +6432-63723-0006-2497: hyp=['IT', 'IS', 'IN', 'GENERALLY', 'KNOWN', 'WENT', 'ON', 'THE', 'LAWYER', 'THAT', 'THE', 'HOTEL', "KEEPER'S", 'WIFE', 'HAS', 'LEFT', 'HIM'] +6432-63723-0007-2498: ref=['IT', 'WAS', 'ONE', 'OF', 'WHAT', 'AT', 'FIRST', 'MIGHT', 'BE', 'CALLED', 'REFINED', 'CRUELTY', 'ON', 'HER', "HUSBAND'S", 'PART', 'DEGENERATING', 'GRADUALLY', 'INTO', 'THAT', 'OF', 'THE', 'BASER', 'SORT'] +6432-63723-0007-2498: hyp=['IT', 'WAS', 'ONE', 'OF', 'WHAT', 'AT', 'FIRST', 'MIGHT', 'BE', 'CALLED', 'REFINED', 'CRUELTY', 'ON', 'HER', "HUSBAND'S", 'PART', 'DEGENERATING', 'GRADUALLY', 'INTO', 'THAT', 'OF', 'A', 'BASER', 'SORT'] +6432-63723-0008-2499: ref=['YOU', "DON'T", 'MEAN', 'THAT', 'LARCH', 'STRUCK', 'HER', 'THAT', 'THERE', 'WAS', 'PHYSICAL', 'ABUSE', 'DO', 'YOU', 'ASKED', 'THE', 'COLONEL', "THAT'S", 'WHAT', 'HE', 'DID'] +6432-63723-0008-2499: hyp=['YOU', "DON'T", 'MEAN', 'THAT', 'LARGE', 'STRUCK', 'HER', 'THAT', 'THERE', 'WAS', 'PHYSICAL', 'ABUSE', 'DO', 'YOU', 'ASKED', 'THE', 'COLONEL', "THAT'S", 'WHAT', 'HE', 'DID'] +6432-63723-0009-2500: ref=['THE', 'COLONEL', 'DID', 'NOT', 'DISCLOSE', 'THE', 'FACT', 'THAT', 'IT', 'WAS', 'NO', 'NEWS', 'TO', 'HIM'] +6432-63723-0009-2500: hyp=['THE', 'COLONEL', 'DID', 'NOT', 'DISCLOSE', 'THE', 'FACT', 'THAT', 'IT', 'WAS', 'NO', 'NEWS', 'TO', 'HIM'] +6432-63723-0010-2501: ref=['AARON', "GRAFTON'S", 'STATEMENT', 'WAS', 'BEING', 'UNEXPECTEDLY', 'CONFIRMED'] +6432-63723-0010-2501: hyp=['AARON', "GRAFTON'S", 'STATEMENT', 'WAS', 'BEING', 'UNEXPECTEDLY', 'CONFIRMED'] +6432-63723-0011-2502: ref=['HE', 'REMEMBERED', 'THAT', 'CYNTHIA', 'AND', 'GRAFTON', 'HAD', 'ONCE', 'BEEN', 'IN', 'LOVE', 'WITH', 'EACH', 'OTHER'] +6432-63723-0011-2502: hyp=['HE', 'REMEMBERED', 'THAT', 'CYNTHIA', 'AND', 'GRAFTON', 'HAD', 'ONCE', 'BEEN', 'IN', 'LOVE', 'WITH', 'EACH', 'OTHER'] +6432-63723-0012-2503: ref=['SHE', 'SAID', 'HE', 'HAD', 'STRUCK', 'HER', 'MORE', 'THAN', 'ONCE', 'AND', 'SHE', 'COULD', 'STAND', 'IT', 'NO', 'LONGER'] +6432-63723-0012-2503: hyp=['SHE', 'SAID', 'HE', 'HAD', 'STRUCK', 'HER', 'MORE', 'THAN', 'ONCE', 'AND', 'SHE', 'COULD', 'STAND', 'IT', 'NO', 'LONGER'] +6432-63723-0013-2504: ref=['BECAUSE', 'LARCH', 'MADE', 'NO', 'DEFENSE'] +6432-63723-0013-2504: hyp=['BECAUSE', 'LARGE', 'MADE', 'NO', 'DEFENCE'] +6432-63723-0014-2505: ref=['LARCH', 'BY', 'REFUSING', 'TO', 'APPEAR', 'PRACTICALLY', 'ADMITTED', 'THE', 'CHARGES', 'AGAINST', 'HIM', 'AND', 'DID', 'NOT', 'OPPOSE', 'THE', 'SEPARATION'] +6432-63723-0014-2505: hyp=['LARGE', 'BY', 'REFUSING', 'TO', 'APPEAR', 'PRACTICALLY', 'ADMITTED', 'THE', 'CHARGES', 'AGAINST', 'HIM', 'AND', 'DID', 'NOT', 'OPPOSE', 'THE', 'SEPARATION'] +6432-63723-0015-2506: ref=['SO', 'I', 'HAD', 'TO', 'LET', 'HER', 'HAVE', 'HER', 'WAY', 'AND', 'WE', 'DID', 'NOT', 'ASK', 'THE', 'COURT', 'FOR', 'MONEY', 'THOUGH', 'I', 'HAD', 'NO', 'SUCH', 'SQUEAMISH', 'FEELINGS', 'WHEN', 'IT', 'CAME', 'TO', 'MY', 'COUNSEL', 'FEE'] +6432-63723-0015-2506: hyp=['SO', 'I', 'HAD', 'TO', 'LET', 'HER', 'HAVE', 'HER', 'WAY', 'AND', 'WE', 'DID', 'NOT', 'ASK', 'THE', 'COURT', 'FOR', 'MONEY', 'THOUGH', 'I', 'HAD', 'NO', 'SUCH', 'SQUEAMISH', 'FEELINGS', 'WHEN', 'IT', 'CAME', 'TO', 'MY', 'COUNCIL', 'FEET'] +6432-63723-0016-2507: ref=['NO', 'BUT', 'HE', 'WILL', 'OR', "I'LL", 'SUE', 'HIM', 'AND', 'GET', 'JUDGMENT', 'OH', "HE'LL", 'PAY', 'ALL', 'RIGHT'] +6432-63723-0016-2507: hyp=['NO', 'BUT', 'HE', 'WILL', 'OR', 'ELSE', 'SUE', 'EM', 'AND', 'GET', 'JUDGMENT', 'OH', "HE'LL", 'PAY', 'ALL', 'RIGHT'] +6432-63723-0017-2508: ref=['AND', 'IT', 'TAKES', 'ALL', 'SORTS', 'OF', 'PERSONS', 'TO', 'MAKE', 'IT', 'UP'] +6432-63723-0017-2508: hyp=['AND', 'IT', 'TAKES', 'ALL', 'SORTS', 'OF', 'PERSONS', 'TO', 'MAKE', 'IT', 'UP'] +6432-63723-0018-2509: ref=['STILL', 'I', 'WOULD', 'LIKE', 'TO', 'KNOW'] +6432-63723-0018-2509: hyp=['STILL', 'I', 'WOULD', 'LIKE', 'TO', 'KNOW'] +6432-63723-0019-2510: ref=['THE', 'MURDER', 'OF', 'MISSUS', 'DARCY', 'HAD', 'SOME', 'TIME', 'AGO', 'BEEN', 'SHIFTED', 'OFF', 'THE', 'FRONT', 'PAGE', 'THOUGH', 'IT', 'WOULD', 'GET', 'BACK', 'THERE', 'WHEN', 'THE', 'YOUNG', 'JEWELER', 'WAS', 'TRIED'] +6432-63723-0019-2510: hyp=['THE', 'MURDERER', 'OF', 'MISSUS', 'DARCY', 'HAD', 'SOME', 'TIME', 'AGO', 'BEEN', 'SHIFTED', 'OFF', 'THE', 'FRONT', 'PAGE', 'THOUGH', 'IT', 'WOULD', 'GET', 'BACK', 'THERE', 'WHEN', 'THE', 'YOUNG', 'JEWELLER', 'WAS', 'TRIED'] +6432-63723-0020-2511: ref=['IT', 'HAD', 'A', 'DOUBLE', 'REPUTATION', 'SO', 'TO', 'SPEAK'] +6432-63723-0020-2511: hyp=['IT', 'HAD', 'A', 'DOUBLE', 'REPUTATION', 'SO', 'TO', 'SPEAK'] +6432-63723-0021-2512: ref=['GRAVE', 'AND', 'EVEN', 'REVEREND', 'CONVENTIONS', 'ASSEMBLED', 'IN', 'ITS', 'BALLROOM', 'AND', 'POLITICIANS', 'OF', 'THE', 'UPPER', 'IF', 'NOT', 'BETTER', 'CLASS', 'WERE', 'FREQUENTLY', 'SEEN', 'IN', 'ITS', 'DINING', 'ROOM', 'OR', 'CAFE'] +6432-63723-0021-2512: hyp=['GRAVE', 'AND', 'EVEN', 'REVEREND', 'THE', 'CONVENTIONS', 'ASSEMBLED', 'IN', 'ITS', 'BALL', 'ROOM', 'IN', 'POLITICIANS', 'OF', 'THE', 'UPPER', 'IF', 'NOT', 'BETTER', 'CLASS', 'WERE', 'FREQUENTLY', 'SEEN', 'IN', 'ITS', 'DINING', 'ROOM', 'OR', 'CAFE'] +6432-63723-0022-2513: ref=['LARCH', 'HIMSELF', 'WAS', 'A', 'PECULIAR', 'CHARACTER'] +6432-63723-0022-2513: hyp=['LARGE', 'HIMSELF', 'WAS', 'A', 'PECULIAR', 'CHARACTER'] +6432-63723-0023-2514: ref=['IN', 'A', 'SMALLER', 'PLACE', 'HE', 'WOULD', 'HAVE', 'BEEN', 'CALLED', 'A', 'SALOON', 'KEEPER'] +6432-63723-0023-2514: hyp=['IN', 'A', 'SMALLER', 'PLACE', 'HE', 'WOULD', 'HAVE', 'BEEN', 'CALLED', 'A', 'SALOON', 'KEEPER'] +6432-63723-0024-2515: ref=['AND', 'IT', 'WAS', 'THIS', 'MAN', 'RICH', 'IT', 'WAS', 'SAID', 'HANDSOME', 'CERTAINLY', 'THAT', 'CYNTHIA', 'RATCHFORD', 'HAD', 'MARRIED'] +6432-63723-0024-2515: hyp=['AND', 'IT', 'WAS', 'THIS', 'MAN', 'RICH', 'OVER', 'SAID', 'HANDSOME', 'CERTAINLY', 'THAT', 'CENTIA', 'RETFORD', 'HAD', 'MARRIED'] +6432-63723-0025-2516: ref=['TO', 'THIS', 'WAS', 'THE', 'ANSWER', 'WHISPERED', 'MONEY'] +6432-63723-0025-2516: hyp=['TO', 'THIS', 'WAS', 'THE', 'ANSWER', 'WHISPERED', 'MONEY'] +6432-63723-0026-2517: ref=['AND', 'IN', 'A', 'WAY', 'IT', 'WAS', 'TRUE'] +6432-63723-0026-2517: hyp=['AND', 'IN', 'A', 'WAY', 'IT', 'WAS', 'TRUE'] +6432-63723-0027-2518: ref=['SHE', 'ALSO', 'SAW', 'AN', 'OPPORTUNITY', 'OF', 'PAYING', 'OLD', 'DEBTS', 'AND', 'REAPING', 'SOME', 'REVENGES'] +6432-63723-0027-2518: hyp=['SHE', 'ALSO', 'SAW', 'AN', 'OPPORTUNITY', 'OF', 'PAYING', 'OLD', 'DEBTS', 'AND', 'REAPING', 'SOME', 'REVENGES'] +6432-63723-0028-2519: ref=['AFTER', 'THE', 'MARRIAGE', 'WHICH', 'WAS', 'A', 'BRILLIANT', 'AND', 'GAY', 'ONE', 'IF', 'NOT', 'HAPPY', 'THE', 'LARCH', 'HOTEL', 'IT', 'COULD', 'HARDLY', 'BE', 'CALLED', 'A', 'HOME', 'BECAME', 'THE', 'SCENE', 'OF', 'MANY', 'FESTIVE', 'OCCASIONS'] +6432-63723-0028-2519: hyp=['AFTER', 'THE', 'MARRIAGE', 'WHICH', 'WAS', 'A', 'BRILLIANT', 'AND', 'GAY', 'ONE', 'IF', 'NOT', 'HAPPY', 'THE', 'LARGE', 'HOTEL', 'IT', 'COULD', 'HARDLY', 'BE', 'CALLED', 'HOME', 'BECAME', 'THE', 'SCENE', 'OF', 'MANY', 'FESTIVATIONS'] +6432-63723-0029-2520: ref=['THEN', 'IT', 'WAS', 'SAID', 'OF', 'LARCH', 'THAT', 'SOON', 'AFTER', 'THE', 'ECHOES', 'OF', 'THE', 'WEDDING', 'CHIMES', 'HAD', 'DIED', 'AWAY', 'HE', 'HAD', 'BEGUN', 'TO', 'TREAT', 'HIS', 'WIFE', 'WITH', 'REFINED', 'CRUELTY', 'THAT', 'HIDDEN', 'AWAY', 'FROM', 'THE', 'PUBLIC', 'UNDERNEATH', 'HIS', 'HABITUAL', 'MANNER', 'THERE', 'WAS', 'THE', 'RAWNESS', 'OF', 'THE', 'BRUTE'] +6432-63723-0029-2520: hyp=['THEN', 'IT', 'WAS', 'SAID', 'OF', 'LARGE', 'THAT', 'SOON', 'AFTER', 'THE', 'ECHOES', 'OF', 'THE', 'WEDDING', 'CHIMES', 'HAD', 'DIED', 'AWAY', 'HE', 'HAD', 'BEGUN', 'TO', 'TREAT', 'HIS', 'WIFE', 'FOR', 'THE', 'REFINED', 'CRUELTY', 'THAT', 'HIDDEN', 'AWAY', 'FROM', 'THE', 'PUBLIC', 'UNDERNEATH', 'HIS', 'HABITUAL', 'MANNER', 'THERE', 'WAS', 'THE', 'RAWNESS', 'OF', 'THE', 'BRUTE'] +6432-63723-0030-2521: ref=['BUT', 'IT', 'WAS', 'NOTICED', 'THAT', 'THE', 'OLDER', 'AND', 'MORE', 'CONSERVATIVE', 'FAMILIES', 'WERE', 'LESS', 'OFTEN', 'REPRESENTED', 'AND', 'WHEN', 'THEY', 'WERE', 'IT', 'WAS', 'BY', 'SOME', 'OF', 'THE', 'YOUNGER', 'MEMBERS', 'WHOSE', 'REPUTATIONS', 'WERE', 'ALREADY', 'SMIRCHED', 'OR', 'WHO', 'HAD', 'NOT', 'YET', 'ACQUIRED', 'ANY', 'AND', 'WERE', 'WILLING', 'TO', 'TAKE', 'A', 'CHANCE'] +6432-63723-0030-2521: hyp=['BUT', 'IT', 'WAS', 'NOTICED', 'THAT', 'THE', 'OLDER', 'AND', 'MORE', 'CONSERVATIVE', 'FAMILIES', 'WERE', 'LESS', 'OFTEN', 'REPRESENTED', 'AND', 'WHEN', 'THEY', 'WERE', 'IT', 'WAS', 'BY', 'SOME', 'OF', 'THE', 'YOUNGER', 'MEMBERS', 'WHOSE', 'REPUTATIONS', 'WERE', 'ALREADY', 'SMARCHED', 'OR', 'WHO', 'HAD', 'NOT', 'YET', 'ACQUIRED', 'ANY', 'AND', 'WERE', 'WILLING', 'TO', 'TAKE', 'A', 'CHANCE'] +6432-63723-0031-2522: ref=['IT', "WOULDN'T", 'DO', 'YOU', 'KNOW', 'AFTER', 'THAT', 'STORY', 'CAME', 'OUT', 'FOR', 'ME', 'AND', 'THE', 'VICE', 'CHANCELLOR', 'WHO', 'SAT', 'IN', 'THE', 'CASE', 'AS', 'WELL', 'AS', 'OTHER', 'JUDGES', 'AND', 'MEMBERS', 'OF', 'THE', 'BAR', 'TO', 'BE', 'SEEN', 'THERE', 'KENNETH', 'EXPLAINED', 'TO', 'THE', 'COLONEL'] +6432-63723-0031-2522: hyp=['IT', "WOULDN'T", 'DO', 'YOU', 'KNOW', 'AFTER', 'THAT', 'STORY', 'CAME', 'OUT', 'FOR', 'ME', 'IN', 'THE', 'VICE', 'CHANCELLOR', 'WHO', 'SAT', 'IN', 'A', 'CASE', 'AS', 'WELL', 'AS', 'OTHER', 'JUDGES', 'AND', 'MEMBERS', 'OF', 'THE', 'BAR', 'TO', 'BE', 'SEEN', 'THERE', 'KENNETH', 'EXPLAINED', 'TO', 'THE', 'COLONEL'] +6432-63723-0032-2523: ref=['MEANWHILE', 'COLONEL', 'ASHLEY', 'WAS', 'A', 'VERY', 'BUSY', 'MAN', 'AND', 'TO', 'NO', 'ONE', 'DID', 'HE', 'TELL', 'VERY', 'MUCH', 'ABOUT', 'HIS', 'ACTIVITIES', 'HE', 'SAW', 'DARCY', 'FREQUENTLY', 'AT', 'THE', 'JAIL', 'AND', 'TO', 'THAT', 'YOUNG', "MAN'S", 'PLEADINGS', 'THAT', 'SOMETHING', 'BE', 'DONE', 'ALWAYS', 'RETURNED', 'THE', 'ANSWER'] +6432-63723-0032-2523: hyp=['MEANWHILE', 'COLONEL', 'ASHLEY', 'WAS', 'A', 'VERY', 'BUSY', 'MAN', 'AND', 'TO', 'NO', 'ONE', 'DID', 'HE', 'TELL', 'VERY', 'MUCH', 'ABOUT', 'HIS', 'ACTIVITIES', 'HE', 'SAW', 'DARCY', 'FREQUENTLY', 'AT', 'THE', 'JAIL', 'AND', 'TO', 'THAT', 'YOUNG', "MAN'S", 'PLEADINGS', 'THAT', 'SOMETHING', 'TO', 'BE', 'DONE', 'ALWAYS', 'RETURNED', 'THE', 'ANSWER'] +6432-63723-0033-2524: ref=["DON'T", 'WORRY', 'IT', 'WILL', 'COME', 'OUT', 'ALL', 'RIGHT'] +6432-63723-0033-2524: hyp=['DONE', 'WORRY', 'IT', 'WILL', 'COME', 'OUT', 'ALL', 'RIGHT'] +6432-63723-0034-2525: ref=["I'M", 'GOING', 'TO', 'RECTIFY', 'THEM', 'BUT', 'IT', 'WILL', 'TAKE', 'TIME'] +6432-63723-0034-2525: hyp=["I'M", 'GOING', 'DIRECTIFY', 'THEM', 'BUT', 'IT', 'WILL', 'TAKE', 'TIME'] +6432-63723-0035-2526: ref=["IT'S", 'HARD', 'FOR', 'MISS', 'MASON', 'TOO', 'ALTHOUGH', "SHE'S", 'BEARING', 'UP', 'LIKE', 'A', 'MAJOR'] +6432-63723-0035-2526: hyp=["IT'S", 'HARD', 'FOR', 'MISS', 'MASON', 'TOO', 'ALTHOUGH', "SHE'S", 'BEARING', 'UP', 'LIKE', 'A', 'MAJOR'] +6432-63723-0036-2527: ref=['SO', 'KING', 'GOT', 'BAIL', 'WHO', 'PUT', 'IT', 'UP'] +6432-63723-0036-2527: hyp=['SO', 'KING', 'GOD', 'BAIL', 'WHO', 'PUT', 'IT', 'UP'] +6432-63723-0037-2528: ref=['IT', 'WAS', 'HIGH', 'LARCH'] +6432-63723-0037-2528: hyp=['IT', 'WAS', 'TIME', 'LARCH'] +6432-63723-0038-2529: ref=['THEY', 'TOOK', 'HARRY', 'AWAY', 'A', 'WHILE', 'AGO'] +6432-63723-0038-2529: hyp=['THEY', 'TOOK', 'HARRY', 'AWAY', 'A', 'WHILE', 'AGO'] +6432-63723-0039-2530: ref=['BUT', 'HIS', 'ARE', 'PRETTY', 'UNCERTAIN', 'SHOES', 'TO', 'BE', 'IN', 'JUST', 'THE', 'SAME'] +6432-63723-0039-2530: hyp=['BUT', 'HIS', 'ARE', 'PRETTY', 'UNCERTAIN', 'SHOES', 'TO', 'BE', 'IN', 'JUST', 'THE', 'SAME'] +6432-63723-0040-2531: ref=['ONLY', 'THAT', 'I', 'DARCY', 'HESITATED', 'AND', 'GREW', 'RED'] +6432-63723-0040-2531: hyp=['ONLY', 'THAT', 'I', 'DARCY', 'HESITATED', 'AND', 'GREW', 'RED'] +6432-63723-0041-2532: ref=['GOOD', 'EVENING', 'COLONEL', 'HE', 'CALLED', 'GENIALLY', 'WILL', 'YOU', 'JOIN', 'ME', 'IN', 'A', 'WELSH', 'RABBIT'] +6432-63723-0041-2532: hyp=['GOOD', 'EVENING', 'COLONEL', 'HE', 'CALLED', 'GENIALLY', 'WHERE', 'YOU', 'JOIN', 'ME', 'IN', 'A', 'WELL', 'RABBIT'] +6432-63723-0042-2533: ref=['THANK', 'YOU', 'NO'] +6432-63723-0042-2533: hyp=['THANK', 'YOU', 'NO'] +6432-63723-0043-2534: ref=["I'M", 'AFRAID', 'MY', 'DIGESTION', "ISN'T", 'QUITE', 'UP', 'TO', 'THAT', 'AS', "I'VE", 'HAD', 'TO', 'CUT', 'OUT', 'MY', 'FISHING', 'OF', 'LATE'] +6432-63723-0043-2534: hyp=["I'M", 'AFRAID', 'MY', 'DIADE', "ISN'T", 'QUITE', 'UP', 'TO', 'THAT', 'AS', "I'VE", 'HAD', 'TO', 'CUT', 'OUT', 'MY', 'FISHING', 'OF', 'LATE'] +6432-63723-0044-2535: ref=['NOW', 'AS', 'TO', 'CERTAIN', 'MATTERS', 'IN', 'THE', 'STORE', 'ON', 'THE', 'MORNING', 'OF', 'THE', 'MURDER'] +6432-63723-0044-2535: hyp=['NOW', 'AS', 'TO', 'CERTAIN', 'MATTERS', 'IN', 'THE', 'STORE', 'ON', 'THE', 'MORNING', 'OF', 'THE', 'MURDER'] +6432-63723-0045-2536: ref=['THE', 'STOPPED', 'CLOCKS', 'FOR', 'INSTANCE', 'HAVE', 'YOU', 'ANY', 'THEORY'] +6432-63723-0045-2536: hyp=['THEY', 'STOPPED', 'CLOCKS', 'FOR', 'INSTANCE', 'HAVE', 'YOU', 'ANY', 'THEORY'] +6432-63723-0046-2537: ref=['THERE', 'WERE', 'THREE', 'OF', 'THEM', 'THE', 'CENTER', 'FIGURE', 'BEING', 'THAT', 'OF', 'HARRY', 'KING', 'AND', 'HE', 'WAS', 'VERY', 'MUCH', 'INTOXICATED'] +6432-63723-0046-2537: hyp=['THERE', 'WERE', 'THREE', 'OF', 'THEM', 'THE', 'CENTRE', 'FIGURE', 'BEING', 'THAT', 'OF', 'HARRY', 'KING', 'AND', 'HE', 'WAS', 'VERY', 'MUCH', 'INTOXICATED'] +6432-63723-0047-2538: ref=['THAT', 'IS', 'NOT', 'ALWAYS', 'BUT', 'SOMETIMES', 'IT', 'HAPPENED', 'TO', 'BE', 'SO', 'NOW'] +6432-63723-0047-2538: hyp=['THAT', 'IS', 'NOT', 'ALWAYS', 'BUT', 'SOMETIMES', 'IT', 'HAPPENED', 'TO', 'BE', 'SO', 'NOW'] +6432-63723-0048-2539: ref=['I', 'BEG', 'YOUR', 'PARDON', 'HE', 'SAID', 'IN', 'THE', 'CULTURED', 'TONES', 'HE', 'KNEW', 'SO', 'WELL', 'HOW', 'TO', 'USE', 'YET', 'OF', 'WHICH', 'HE', 'MADE', 'SO', 'LITTLE', 'USE', 'OF', 'LATE'] +6432-63723-0048-2539: hyp=['I', 'BEG', 'YOUR', 'PARDON', 'HE', 'SAID', 'IN', 'THE', 'CULTURED', 'TONES', 'HE', 'KNEW', 'SO', 'WELL', 'HOW', 'TO', 'USE', 'YET', 'OF', 'WHICH', 'HE', 'MADE', 'SO', 'LITTLE', 'USE', 'OF', 'LATE'] +6432-63723-0049-2540: ref=['I', 'SAID', 'WHERE', 'HAVE', 'YOU', 'BEEN', 'REMARKED', 'THE', 'OTHER', "WE'VE", 'MISSED', 'YOU'] +6432-63723-0049-2540: hyp=['I', 'SAID', 'WHERE', 'HAVE', 'YOU', 'BEEN', 'REMARKED', 'THE', 'OTHER', "WE'VE", 'MISSED', 'YOU'] +6432-63723-0050-2541: ref=['I', 'SAID', 'I', 'WAS', 'GOLFING', 'HE', 'WENT', 'ON', 'EXCEEDINGLY', 'DISTINCTLY', 'THOUGH', 'WITH', 'AN', 'EFFORT'] +6432-63723-0050-2541: hyp=['I', 'SAID', 'I', 'WAS', 'GOLFING', 'HE', 'WENT', 'ON', 'EXCEEDINGLY', 'DISTINCTLY', 'THOUGH', 'WITH', 'AN', 'EFFORT'] +6432-63723-0051-2542: ref=['WHY', 'POLONIUS', 'SOME', 'ONE', 'ASKED'] +6432-63723-0051-2542: hyp=['WHY', 'POLONIUS', 'SOME', 'ONE', 'ASKED'] +6432-63723-0052-2543: ref=['BECAUSE', 'DEAR', 'FRIEND', 'REPLIED', 'KING', 'SOFTLY', 'HE', 'SOMEWHAT', 'RESEMBLES', 'A', 'CERTAIN', 'PERSON', 'HERE', 'WHO', 'TALKS', 'TOO', 'MUCH', 'BUT', 'WHO', 'IS', 'NOT', 'SO', 'WISE', 'AS', 'HE', 'THINKS'] +6432-63723-0052-2543: hyp=['BECAUSE', 'DEAR', 'FRIEND', 'REPLIED', 'KING', 'SOFTLY', 'HE', 'SOMEWHAT', 'RESEMBLES', 'A', 'CERTAIN', 'PERSON', 'HERE', 'WHO', 'TALKS', 'TOO', 'MUCH', 'BUT', 'WHO', 'IS', 'NOT', 'SO', 'WISE', 'AS', 'HE', 'THINKS'] +6432-63723-0053-2544: ref=['THERE', 'WAS', 'A', 'RATTLE', 'OF', 'COINS', 'ON', 'THE', 'MAHOGANY', 'BAR', 'AS', 'KING', 'SOUGHT', 'TO', 'DISENTANGLE', 'A', 'SINGLE', 'BILL', 'FROM', 'THE', 'WADDED', 'UP', 'CURRENCY', 'IN', 'HIS', 'POCKET'] +6432-63723-0053-2544: hyp=['THERE', 'WAS', 'A', 'RATTLE', 'OF', 'COIN', 'DOWN', 'THE', 'MAHOGANY', 'BARS', 'KING', 'SOUGHT', 'TO', 'DISENTANGLE', 'A', 'SINGLE', 'BILL', 'FROM', 'THE', 'WATERED', 'UP', 'CURRENCY', 'IN', 'HIS', 'POCKET'] +6432-63723-0054-2545: ref=["IT'S", "IT'S", 'AN', 'ODD', 'COIN', 'AN', 'OLD', 'ROMAN', 'ONE', 'THAT', 'MISSUS', 'DARCY', 'HAD', 'IN', 'HER', 'PRIVATE', 'COLLECTION', 'KEPT', 'IN', 'THE', 'JEWELRY', 'STORE', 'SAFE', 'WAS', 'THE', 'WHISPERED', 'ANSWER'] +6432-63723-0054-2545: hyp=["IT'S", "IT'S", 'AN', 'ODD', 'COIN', 'AN', 'OLD', 'ROMAN', 'ONE', 'THAT', 'MISSUS', 'DARCY', 'HAD', 'IN', 'HER', 'PRIVATE', 'COLLECTION', 'KEPT', 'IN', 'THE', 'JEWELRY', 'STORE', 'SAFE', 'WAS', 'THE', 'WHISPERED', 'ANSWER'] +6432-63723-0055-2546: ref=['I', 'WENT', 'OVER', 'THEM', 'THE', 'OTHER', 'DAY', 'AND', 'NOTICED', 'SOME', 'WERE', 'MISSING', 'THOUGH', 'I', 'SAW', 'THEM', 'ALL', 'WHEN', 'I', 'PAID', 'A', 'VISIT', 'TO', 'HER', 'JUST', 'A', 'SHORT', 'TIME', 'BEFORE', 'SHE', 'WAS', 'KILLED'] +6432-63723-0055-2546: hyp=['I', 'WENT', 'OVER', 'THEM', 'NEAR', 'THE', 'DAY', 'AND', 'NOTICED', 'SOME', 'WERE', 'MISSING', 'THOUGH', 'I', 'SAW', 'THEM', 'ALL', 'WHEN', 'I', 'PAID', 'A', 'VISIT', 'TO', 'HER', 'JUST', 'A', 'SHORT', 'TIME', 'BEFORE', 'SHE', 'WAS', 'KILLED'] +6432-63723-0056-2547: ref=['THAT', 'WAS', 'HERS', 'WENT', 'ON', 'THE', 'JEWELER'] +6432-63723-0056-2547: hyp=['THAT', 'WAS', 'HERS', 'WENT', 'ON', 'THE', 'JUROR'] +6432-63723-0057-2548: ref=['NOW', 'HARRY', 'KING', 'HAS', 'IT', 'EXCLAIMED', 'COLONEL', 'ASHLEY'] +6432-63723-0057-2548: hyp=['NOW', 'HARRY', 'KING', 'HAS', 'IT', 'EXCLAIMED', 'COLONEL', 'ASHLEY'] +6938-70848-0000-1216: ref=['EVEN', 'THE', 'SUN', 'CAME', 'OUT', 'PALE', 'AND', 'WATERY', 'AT', 'NOON'] +6938-70848-0000-1216: hyp=['EVEN', 'THE', 'SUN', 'CAME', 'OUT', 'PALE', 'AND', 'WATERY', 'AT', 'NOON'] +6938-70848-0001-1217: ref=['THE', 'COLDS', 'AND', 'RHEUMATISM', 'OF', 'THE', 'RAINY', 'MONTHS', 'VANISHED'] +6938-70848-0001-1217: hyp=['THE', 'GOLDS', 'AND', 'RHEUMATISM', 'OF', 'THE', 'REINY', 'MONTHS', 'VANISHED'] +6938-70848-0002-1218: ref=['ASKED', 'A', 'WORKER', 'LAST', 'SUNDAY', 'YOU', 'DID', 'IT', 'WHEN', 'THE', 'YUNKERS'] +6938-70848-0002-1218: hyp=['AS', 'TO', 'WORKER', 'LAST', 'SUNDAY', 'YOU', 'DID', 'IT', 'WHEN', 'THE', 'YUNKERS'] +6938-70848-0003-1219: ref=['WELL', "DIDN'T", 'THEY', 'SHOOT', 'US', 'ONE', 'MAN', 'EXHIBITED', 'HIS', 'ARM', 'IN', 'A', 'SLING'] +6938-70848-0003-1219: hyp=['WELL', "DIDN'T", 'ISSUED', 'US', 'ONE', 'MAN', 'EXHIBITED', 'HIS', 'ARM', 'IN', 'A', 'SLING'] +6938-70848-0004-1220: ref=["HAVEN'T", 'I', 'GOT', 'SOMETHING', 'TO', 'REMEMBER', 'THEM', 'BY', 'THE', 'DEVILS'] +6938-70848-0004-1220: hyp=["HAVEN'T", 'I', 'GUARD', 'SOMETHING', 'TO', 'REMEMBER', 'THEM', 'BY', 'THE', 'DEVILS'] +6938-70848-0005-1221: ref=['WHO', 'ARE', 'YOU', 'TO', 'DESTROY', 'THE', 'LEGAL', 'GOVERNMENT', 'WHO', 'IS', 'LENIN', 'A', 'GERMAN'] +6938-70848-0005-1221: hyp=['WHO', 'ARE', 'YOU', 'TO', 'DESTROY', 'THE', 'LEGAL', 'GOVERNMENT', 'WITH', 'LINEN', 'A', 'GERMAN'] +6938-70848-0006-1222: ref=['WHO', 'ARE', 'YOU', 'A', 'COUNTER', 'REVOLUTIONIST', 'A', 'PROVOCATOR', 'THEY', 'BELLOWED', 'AT', 'HIM'] +6938-70848-0006-1222: hyp=['WHO', 'ARE', 'YOU', 'A', 'COUNTER', 'REVOLUTIONIST', 'A', 'PROVOCATOR', 'THEY', 'BELOVED', 'AT', 'HIM'] +6938-70848-0007-1223: ref=['YOU', 'CALL', 'YOURSELVES', 'THE', 'PEOPLE', 'OF', 'RUSSIA', 'BUT', "YOU'RE", 'NOT', 'THE', 'PEOPLE', 'OF', 'RUSSIA'] +6938-70848-0007-1223: hyp=['YOU', 'CALL', 'YOURSELVES', 'THE', 'PEOPLE', 'OF', 'RACHEL', 'BUT', 'YOU', 'ARE', 'NOT', 'THE', 'PEOPLE', 'OF', 'RATIA'] +6938-70848-0008-1224: ref=['THE', 'PEASANTS', 'ARE', 'THE', 'PEOPLE', 'OF', 'RUSSIA', 'WAIT', 'UNTIL', 'THE', 'PEASANTS'] +6938-70848-0008-1224: hyp=['TO', 'PIECE', 'AND', 'OTHER', 'PEOPLE', 'OF', 'RATIA', 'WAIT', 'UNTIL', 'THE', 'PEASANTS'] +6938-70848-0009-1225: ref=['WE', 'KNOW', 'WHAT', 'THE', 'PEASANTS', 'WILL', 'SAY', "AREN'T", 'THEY', 'WORKINGMEN', 'LIKE', 'OURSELVES'] +6938-70848-0009-1225: hyp=['WE', 'KNOW', 'WHAT', 'THE', 'PEASANTS', 'WILL', 'SAY', "AREN'T", 'THEY', 'WORKING', 'MAN', 'LIKE', 'OURSELVES'] +6938-70848-0010-1226: ref=['THESE', 'MEN', 'ESPECIALLY', 'WELCOMED', 'THE', 'CALL', 'TO', 'A', 'CONGRESS', 'OF', 'PEASANTS'] +6938-70848-0010-1226: hyp=['THIS', 'MAN', 'HAS', 'SPECIALLY', 'WELCOMED', 'TO', 'CALL', 'TO', 'A', 'CONGRESS', 'OF', 'PEASANTS'] +6938-70848-0011-1227: ref=['THESE', 'LAST', 'WERE', 'THE', 'YOUNG', 'GENERATION', 'WHO', 'HAD', 'BEEN', 'SERVING', 'IN', 'THE', 'ARMY'] +6938-70848-0011-1227: hyp=['THIS', 'LAST', 'WED', 'THE', 'YOUNG', 'GENERATION', 'WHO', 'HAD', 'BEEN', 'SERVING', 'IN', 'THE', 'ARMY'] +6938-70848-0012-1228: ref=['WHEREUPON', 'THE', 'OLD', 'EXECUTIVE', 'COMMITTEE', 'LEFT', 'THE', 'HALL'] +6938-70848-0012-1228: hyp=['WHEREUPON', 'THE', 'OLD', 'EXECUTED', 'COMMITTEE', 'LEFT', 'THE', 'HALL'] +6938-70848-0013-1229: ref=['DOWN', 'WITH', 'HIM', 'THEY', 'SHRIEKED'] +6938-70848-0013-1229: hyp=['DOWN', 'WITH', 'HIM', 'THEY', 'SHRIEKED'] +6938-70848-0014-1230: ref=['FEARFUL', 'TUMULT', 'CRIES', 'DOWN', 'WITH', 'THE', 'BOLSHEVIKI'] +6938-70848-0014-1230: hyp=['FEARFUL', 'TUMULT', 'QUITE', 'DOWN', 'WITH', 'THE', 'BALL', 'CHEVIKI'] +6938-70848-0015-1231: ref=['UPON', 'MY', 'RETURN', 'I', 'VISITED', 'SMOLNY', 'NO', 'SUCH', 'ACCUSATION', 'WAS', 'MADE', 'AGAINST', 'ME', 'THERE', 'AFTER', 'A', 'BRIEF', 'CONVERSATION', 'I', 'LEFT', 'AND', "THAT'S", 'ALL', 'LET', 'ANY', 'ONE', 'PRESENT', 'MAKE', 'SUCH', 'AN', 'ACCUSATION'] +6938-70848-0015-1231: hyp=['UPON', 'MY', 'RETURN', 'I', 'VISITED', 'MOLLY', 'NO', 'SUCH', 'ACCUSATION', 'WAS', 'MADE', 'AGAINST', 'ME', 'THERE', 'AFTER', 'A', 'BRIEF', 'CONVERSATION', 'I', 'LEFT', 'AND', 'THAT', 'SOUL', 'LATINUE', 'IN', 'PRESENT', 'MAKE', 'SUCH', 'AN', 'ACCUSATION'] +6938-70848-0016-1232: ref=['MEANWHILE', 'THE', 'QUESTION', 'OF', 'THE', 'STATUS', 'OF', 'THE', 'EXECUTIVE', 'COMMITTEE', 'WAS', 'AGITATING', 'ALL', 'MINDS'] +6938-70848-0016-1232: hyp=['MEANWHILE', 'THE', 'QUESTION', 'OF', 'THE', 'STRATUS', 'OF', 'THE', 'EXECUTORY', 'COMMITTEE', 'WAS', 'AGITATING', 'ALL', 'MINDS'] +6938-70848-0017-1233: ref=['BY', 'DECLARING', 'THE', 'ASSEMBLY', 'EXTRAORDINARY', 'CONFERENCE', 'IT', 'HAD', 'BEEN', 'PLANNED', 'TO', 'BLOCK', 'THE', 'REELECTION', 'OF', 'THE', 'EXECUTIVE', 'COMMITTEE'] +6938-70848-0017-1233: hyp=['BY', 'DECLARING', 'THEIR', 'ASSEMBLY', 'EXTRAORDINARILY', 'CONFERENCE', 'IT', 'HAD', 'BEEN', 'PLANNED', 'TO', 'PLUCK', 'THIRD', 'TREE', 'LECTION', 'OF', 'THE', 'EXECUTED', 'COMMITTEE'] +6938-70848-0018-1234: ref=['BUT', 'THIS', 'WORKED', 'BOTH', 'WAYS', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONISTS', 'DECIDED', 'THAT', 'IF', 'THE', 'CONGRESS', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'EXECUTIVE', 'COMMITTEE', 'THEN', 'THE', 'EXECUTIVE', 'COMMITTEE', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'CONGRESS'] +6938-70848-0018-1234: hyp=['BUT', 'THIS', 'WORK', 'BOTH', 'WAYS', 'THE', 'LAD', 'SOCIALLY', 'REVOLUTIONIST', 'DECIDED', 'THAT', 'IF', 'THE', 'CONGRESS', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'EXUDY', 'COMMITTEE', 'TEN', 'TO', 'EXECUTE', 'COMMITTEE', 'HAD', 'NO', 'POWER', 'OVER', 'THE', 'CONGRESS'] +6938-70848-0019-1235: ref=['ON', 'THE', 'TWENTY', 'SEVENTH', 'OCCURRED', 'THE', 'DEBATE', 'ON', 'THE', 'LAND', 'QUESTION', 'WHICH', 'REVEALED', 'THE', 'DIFFERENCES', 'BETWEEN', 'THE', 'AGRARIAN', 'PROGRAMME', 'OF', 'THE', 'BOLSHEVIKI', 'AND', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONARIES'] +6938-70848-0019-1235: hyp=['ON', 'THE', 'TWENTY', 'SEVENTH', 'OCCURRED', 'THE', 'DEBATE', 'ON', 'THE', 'LAND', 'QUESTION', 'WHICH', 'REVIL', 'TO', 'DIFFERENCES', 'BETWEEN', 'THE', 'INGREDIAN', 'PROGRAM', 'OF', 'THE', 'BOLSHEVIKI', 'AND', 'THE', 'LEFT', 'SOCIALIST', 'REVOLUTIONARIES'] +6938-70848-0020-1236: ref=['THE', 'CONSTITUENT', 'ASSEMBLY', 'WILL', 'NOT', 'DARE', 'TO', 'BREAK', 'WITH', 'THE', 'WILL', 'OF', 'THE', 'PEOPLE'] +6938-70848-0020-1236: hyp=['THE', 'CONSTITUENT', 'ASSEMBLY', 'WILL', 'NOT', 'DARE', 'TO', 'BREAK', 'WITH', 'THE', 'WILL', 'OF', 'THE', 'PEOPLE'] +6938-70848-0021-1237: ref=['FOLLOWED', 'HIM', 'LENIN', 'LISTENED', 'TO', 'NOW', 'WITH', 'ABSORBING', 'INTENSITY'] +6938-70848-0021-1237: hyp=['FOLLOWED', 'HIM', 'LENIN', 'LISTENED', 'TO', 'NOW', 'WITH', 'ABSORBING', 'INTENSITY'] +6938-70848-0022-1238: ref=['THE', 'FIRST', 'STAGE', 'WAS', 'THE', 'CRUSHING', 'OF', 'AUTOCRACY', 'AND', 'THE', 'CRUSHING', 'OF', 'THE', 'POWER', 'OF', 'THE', 'INDUSTRIAL', 'CAPITALISTS', 'AND', 'LAND', 'OWNERS', 'WHOSE', 'INTERESTS', 'ARE', 'CLOSELY', 'RELATED'] +6938-70848-0022-1238: hyp=['THE', 'FIRST', 'AGE', 'WAS', 'A', 'CRUSHING', 'OF', 'AUTOCRACY', 'AND', 'THE', 'CRASHING', 'OF', 'THE', 'POWER', 'OF', 'THE', 'INDUSTRIAL', 'CAPITALIST', 'AND', 'THE', 'LANDOWNERS', 'WHOSE', 'INTERESTS', 'ARE', 'CLOTHING', 'RELATED'] +6938-70848-0023-1239: ref=['THE', 'DUMAS', 'AND', 'ZEMSTVOS', 'WERE', 'DROPPED'] +6938-70848-0023-1239: hyp=['DID', 'YOU', 'ME', 'SEND', 'THEM', 'STOOLS', 'WERE', 'DROPPED'] +6938-70848-0024-1240: ref=['HE', 'KNEW', 'THAT', 'AN', 'AGREEMENT', 'WITH', 'THE', 'BOLSHEVIKI', 'WAS', 'BEING', 'DISCUSSED', 'BUT', 'HE', 'DID', 'NOT', 'KNOW', 'THAT', 'IT', 'HAD', 'BEEN', 'CONCLUDED'] +6938-70848-0024-1240: hyp=['HE', 'KNEW', 'THAT', 'AN', 'AGREEMENT', 'WITH', 'THE', 'BOLSHEVIKI', 'WAS', 'BEING', 'DISCUSSED', 'BUT', 'HE', 'DID', 'NOT', 'KNOW', 'THAT', 'IT', 'HAD', 'BEEN', 'CONCLUDED'] +6938-70848-0025-1241: ref=['HE', 'SPOKE', 'TO', 'THE', 'RUMP', 'CONVENTION'] +6938-70848-0025-1241: hyp=['HE', 'SPOKE', 'TO', 'THE', 'RUM', 'CONVENTION'] +6938-70848-0026-1242: ref=['THE', 'VILLAGES', 'WILL', 'SAVE', 'US', 'IN', 'THE', 'END'] +6938-70848-0026-1242: hyp=['THE', 'RELIGIOUS', 'WILL', 'SAVE', 'US', 'IN', 'THE', 'END'] +6938-70848-0027-1243: ref=['BUT', 'THE', 'PRESENT', 'MOVEMENT', 'IS', 'INTERNATIONAL', 'AND', 'THAT', 'IS', 'WHY', 'IT', 'IS', 'INVINCIBLE'] +6938-70848-0027-1243: hyp=['BUT', 'THE', 'PRESENT', 'MOMENT', 'IS', 'INTERNATIONAL', 'AND', 'THAT', 'IS', 'WHY', 'IT', 'IS', 'INVINCIBLE'] +6938-70848-0028-1244: ref=['THE', 'WILL', 'OF', 'MILLIONS', 'OF', 'WORKERS', 'IS', 'NOW', 'CONCENTRATED', 'IN', 'THIS', 'HALL'] +6938-70848-0028-1244: hyp=['THE', 'WHEEL', 'OF', 'MILLIONS', 'OF', 'WORKERS', 'IS', 'SOME', 'CONCENTRATED', 'IN', 'THE', 'HALL'] +6938-70848-0029-1245: ref=['A', 'NEW', 'HUMANITY', 'WILL', 'BE', 'BORN', 'OF', 'THIS', 'WAR'] +6938-70848-0029-1245: hyp=['A', 'NEW', 'HUMANITY', 'WILL', 'BE', 'BORN', 'OF', 'THIS', 'WAR'] +6938-70848-0030-1246: ref=['I', 'GREET', 'YOU', 'WITH', 'THE', 'CHRISTENING', 'OF', 'A', 'NEW', 'RUSSIAN', 'LIFE', 'AND', 'FREEDOM'] +6938-70848-0030-1246: hyp=['I', 'GREET', 'YOU', 'WITH', 'THE', 'CHRISTIANING', 'OF', 'A', 'NEW', 'RUSSIAN', 'LIFE', 'AND', 'FREEDOM'] +7018-75788-0000-135: ref=['THEN', 'I', 'TOOK', 'UP', 'A', 'GREAT', 'STONE', 'FROM', 'AMONG', 'THE', 'TREES', 'AND', 'COMING', 'UP', 'TO', 'HIM', 'SMOTE', 'HIM', 'THEREWITH', 'ON', 'THE', 'HEAD', 'WITH', 'ALL', 'MY', 'MIGHT', 'AND', 'CRUSHED', 'IN', 'HIS', 'SKULL', 'AS', 'HE', 'LAY', 'DEAD', 'DRUNK'] +7018-75788-0000-135: hyp=['THEN', 'I', 'TOOK', 'UP', 'A', 'GREAT', 'STONE', 'FROM', 'AMONG', 'THE', 'TREES', 'AND', 'COMING', 'UP', 'TO', 'HIM', 'SMOTE', 'HIM', 'THEREWITH', 'ON', 'THE', 'HEAD', 'WITH', 'ALL', 'MY', 'MIGHT', 'AND', 'CRUSHED', 'IN', 'HIS', 'SKULL', 'AS', 'HE', 'LAY', 'DEAD', 'DRUNK'] +7018-75788-0001-136: ref=['BEHOLD', 'A', 'SHIP', 'WAS', 'MAKING', 'FOR', 'THE', 'ISLAND', 'THROUGH', 'THE', 'DASHING', 'SEA', 'AND', 'CLASHING', 'WAVES'] +7018-75788-0001-136: hyp=['BEHOLD', 'A', 'SHIP', 'WAS', 'MAKING', 'FOR', 'THE', 'ISLAND', 'THROUGH', 'THE', 'DASHING', 'SEA', 'AND', 'CLASHING', 'WAVES'] +7018-75788-0002-137: ref=['HEARING', 'THIS', 'I', 'WAS', 'SORE', 'TROUBLED', 'REMEMBERING', 'WHAT', 'I', 'HAD', 'BEFORE', 'SUFFERED', 'FROM', 'THE', 'APE', 'KIND'] +7018-75788-0002-137: hyp=['HEARING', 'THIS', 'I', 'WAS', 'SORE', 'TROUBLED', 'REMEMBERING', 'WHAT', 'I', 'HAD', 'BEFORE', 'SUFFERED', 'FROM', 'THE', 'APE', 'KIND'] +7018-75788-0003-138: ref=['UPON', 'THIS', 'HE', 'BROUGHT', 'ME', 'A', 'COTTON', 'BAG', 'AND', 'GIVING', 'IT', 'TO', 'ME', 'SAID', 'TAKE', 'THIS', 'BAG', 'AND', 'FILL', 'IT', 'WITH', 'PEBBLES', 'FROM', 'THE', 'BEACH', 'AND', 'GO', 'FORTH', 'WITH', 'A', 'COMPANY', 'OF', 'THE', 'TOWNSFOLK', 'TO', 'WHOM', 'I', 'WILL', 'GIVE', 'A', 'CHARGE', 'RESPECTING', 'THEE'] +7018-75788-0003-138: hyp=['UPON', 'THIS', 'HE', 'BROUGHT', 'ME', 'A', 'COTTON', 'BAG', 'AND', 'GIVEN', 'IT', 'TO', 'HIM', 'HE', 'SAID', 'TAKE', 'THIS', 'BAG', 'AND', 'FILL', 'IT', 'WITH', 'PEBBLES', 'FROM', 'THE', 'BEACH', 'AND', 'GO', 'FORTH', 'WITH', 'A', 'COMPANY', 'OF', 'THE', 'TOWNSFOLK', 'TO', 'WHOM', 'I', 'WILL', 'GIVE', 'A', 'CHARGE', 'RESPECTING', 'THEE'] +7018-75788-0004-139: ref=['DO', 'AS', 'THEY', 'DO', 'AND', 'BELIKE', 'THOU', 'SHALT', 'GAIN', 'WHAT', 'MAY', 'FURTHER', 'THY', 'RETURN', 'VOYAGE', 'TO', 'THY', 'NATIVE', 'LAND'] +7018-75788-0004-139: hyp=['DO', 'AS', 'THEY', 'DO', 'AND', 'BE', 'LIKE', 'THOU', 'SHALT', 'GAIN', 'WHAT', 'MAY', 'FURTHER', 'THY', 'RETURN', 'VOYAGE', 'TO', 'THY', 'NATIVE', 'LAND'] +7018-75788-0005-140: ref=['THEN', 'HE', 'CARRIED', 'ME', 'TO', 'THE', 'BEACH', 'WHERE', 'I', 'FILLED', 'MY', 'BAG', 'WITH', 'PEBBLES', 'LARGE', 'AND', 'SMALL', 'AND', 'PRESENTLY', 'WE', 'SAW', 'A', 'COMPANY', 'OF', 'FOLK', 'ISSUE', 'FROM', 'THE', 'TOWN', 'EACH', 'BEARING', 'A', 'BAG', 'LIKE', 'MINE', 'FILLED', 'WITH', 'PEBBLES'] +7018-75788-0005-140: hyp=['THEN', 'HE', 'CARRIED', 'ME', 'TO', 'THE', 'BEACH', 'WHERE', 'I', 'FILLED', 'MY', 'BAG', 'AND', 'WITH', 'PEBBLES', 'LARGE', 'AND', 'SMALL', 'AND', 'PRESENTLY', 'WE', 'SAW', 'A', 'COMPANY', 'OF', 'FOLK', 'ISSUED', 'FROM', 'THE', 'TOWN', 'EACH', 'BEARING', 'A', 'BAG', 'LIKE', 'MINE', 'FILLED', 'WITH', 'PEBBLES'] +7018-75788-0006-141: ref=['TO', 'THESE', 'HE', 'COMMITTED', 'ME', 'COMMENDING', 'ME', 'TO', 'THEIR', 'CARE', 'AND', 'SAYING', 'THIS', 'MAN', 'IS', 'A', 'STRANGER', 'SO', 'TAKE', 'HIM', 'WITH', 'YOU', 'AND', 'TEACH', 'HIM', 'HOW', 'TO', 'GATHER', 'THAT', 'HE', 'MAY', 'GET', 'HIS', 'DAILY', 'BREAD', 'AND', 'YOU', 'WILL', 'EARN', 'YOUR', 'REWARD', 'AND', 'RECOMPENSE', 'IN', 'HEAVEN'] +7018-75788-0006-141: hyp=['TO', 'THESE', 'HE', 'COMMITTED', 'ME', 'COMMENDING', 'ME', 'TO', 'THEIR', 'CARE', 'AND', 'SAYING', 'THIS', 'MAN', 'IS', 'A', 'STRANGER', 'SO', 'TAKE', 'HIM', 'WITH', 'YOU', 'AND', 'TEACH', 'HIM', 'HOW', 'TO', 'GATHER', 'THAT', 'HE', 'MAY', 'GET', 'HIS', 'DAILY', 'BREAD', 'AND', 'YOU', 'WILL', 'EARN', 'YOUR', 'REWARD', 'AND', 'RECOMPENSE', 'IN', 'HEAVEN'] +7018-75788-0007-142: ref=['NOW', 'SLEEPING', 'UNDER', 'THESE', 'TREES', 'WERE', 'MANY', 'APES', 'WHICH', 'WHEN', 'THEY', 'SAW', 'US', 'ROSE', 'AND', 'FLED', 'FROM', 'US', 'AND', 'SWARMED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'WHEREUPON', 'MY', 'COMPANIONS', 'BEGAN', 'TO', 'PELT', 'THEM', 'WITH', 'WHAT', 'THEY', 'HAD', 'IN', 'THEIR', 'BAGS', 'AND', 'THE', 'APES', 'FELL', 'TO', 'PLUCKING', 'OF', 'THE', 'FRUIT', 'OF', 'THE', 'TREES', 'AND', 'CASTING', 'THEM', 'AT', 'THE', 'FOLK'] +7018-75788-0007-142: hyp=['NOW', 'SLEEPING', 'UNDER', 'THESE', 'TREES', 'WERE', 'MANY', 'IPES', 'WHICH', 'WHEN', 'THEY', 'SAW', 'US', 'ROSE', 'AND', 'FLED', 'FROM', 'US', 'AND', 'SWARMED', 'UP', 'AMONG', 'THE', 'BRANCHES', 'WHEREUPON', 'MY', 'COMPANIONS', 'BEGAN', 'TO', 'PELT', 'THEM', 'WITH', 'WHAT', 'THEY', 'HAD', 'IN', 'THEIR', 'BAGS', 'AND', 'THE', 'APES', 'FELL', 'TO', 'PLUCKING', 'OF', 'THE', 'FRUIT', 'OF', 'THE', 'TREES', 'AND', 'CASTING', 'THEM', 'AT', 'THE', 'FOLK'] +7018-75788-0008-143: ref=['WE', 'WEIGHED', 'ANCHOR', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'SAYING', 'HER', 'PERMITTED', 'SAY'] +7018-75788-0008-143: hyp=['WE', 'WEIGHED', 'ANCHOR', 'AND', 'SHAHRAZAD', 'PERCEIVED', 'THE', 'DAWN', 'OF', 'DAY', 'AND', 'CEASED', 'SAYING', 'HER', 'PERMITTED', 'SAY'] +7018-75788-0009-144: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'FIFTY', 'NINTH', 'NIGHT'] +7018-75788-0009-144: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'FIFTY', 'NINTH', 'NIGHT'] +7018-75788-0010-145: ref=['AND', 'CEASED', 'NOT', 'SAILING', 'TILL', 'WE', 'ARRIVED', 'SAFELY', 'AT', 'BASSORAH'] +7018-75788-0010-145: hyp=['AND', 'CEASED', 'NOT', 'SAILING', 'TILL', 'WE', 'ARRIVED', 'SAFELY', 'AT', 'PESSORAR'] +7018-75788-0011-146: ref=['THERE', 'I', 'ABODE', 'A', 'LITTLE', 'AND', 'THEN', 'WENT', 'ON', 'TO', 'BAGHDAD', 'WHERE', 'I', 'ENTERED', 'MY', 'QUARTER', 'AND', 'FOUND', 'MY', 'HOUSE', 'AND', 'FOREGATHERED', 'WITH', 'MY', 'FAMILY', 'AND', 'SALUTED', 'MY', 'FRIENDS', 'WHO', 'GAVE', 'ME', 'JOY', 'OF', 'MY', 'SAFE', 'RETURN', 'AND', 'I', 'LAID', 'UP', 'ALL', 'MY', 'GOODS', 'AND', 'VALUABLES', 'IN', 'MY', 'STOREHOUSES'] +7018-75788-0011-146: hyp=['THERE', 'I', 'ABODE', 'A', 'LITTLE', 'AND', 'THEN', 'WENT', 'ON', 'TO', 'BAGDAD', 'WHERE', 'I', 'ENTERED', 'MY', 'QUARTER', 'AND', 'FOUND', 'MY', 'HOUSE', 'AND', 'FORGATHERED', 'WITH', 'MY', 'FAMILY', 'AND', 'SALUTED', 'MY', 'FRIENDS', 'WHO', 'GAVE', 'ME', 'JOY', 'OF', 'MY', 'SAFE', 'RETURN', 'AND', 'I', 'LAID', 'UP', 'ALL', 'MY', 'GOODS', 'AND', 'VALUABLES', 'IN', 'MY', 'STOREHOUSES'] +7018-75788-0012-147: ref=['AFTER', 'WHICH', 'I', 'RETURNED', 'TO', 'MY', 'OLD', 'MERRY', 'WAY', 'OF', 'LIFE', 'AND', 'FORGOT', 'ALL', 'I', 'HAD', 'SUFFERED', 'IN', 'THE', 'GREAT', 'PROFIT', 'AND', 'GAIN', 'I', 'HAD', 'MADE'] +7018-75788-0012-147: hyp=['AFTER', 'WHICH', 'I', 'RETURNED', 'TO', 'MY', 'OLD', 'MERRY', 'WAY', 'OF', 'LIFE', 'AND', 'FORGOT', 'ALL', 'I', 'HAD', 'SUFFERED', 'IN', 'THE', 'GREAT', 'PROFIT', 'AND', 'GAIN', 'I', 'HAD', 'MADE'] +7018-75788-0013-148: ref=['NEXT', 'MORNING', 'AS', 'SOON', 'AS', 'IT', 'WAS', 'LIGHT', 'HE', 'PRAYED', 'THE', 'DAWN', 'PRAYER', 'AND', 'AFTER', 'BLESSING', 'MOHAMMED', 'THE', 'CREAM', 'OF', 'ALL', 'CREATURES', 'BETOOK', 'HIMSELF', 'TO', 'THE', 'HOUSE', 'OF', 'SINDBAD', 'THE', 'SEAMAN', 'AND', 'WISHED', 'HIM', 'A', 'GOOD', 'DAY'] +7018-75788-0013-148: hyp=['NEXT', 'MORNING', 'AS', 'SOON', 'AS', 'IT', 'WAS', 'LIGHT', 'HE', 'PRAYED', 'THE', 'DAWN', 'PRAYER', 'AND', 'AFTER', 'BLESSING', 'MOHAMMED', 'THE', 'CREAM', 'OF', 'ALL', 'CREATURES', 'BETOOK', 'HIMSELF', 'TO', 'THE', 'HOUSE', 'OF', 'SINBAD', 'THE', 'SEAMAN', 'AND', 'WISHED', 'HIM', 'A', 'GOOD', 'DAY'] +7018-75788-0014-149: ref=['HERE', 'I', 'FOUND', 'A', 'GREAT', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'FULL', 'OF', 'MERCHANTS', 'AND', 'NOTABLES', 'WHO', 'HAD', 'WITH', 'THEM', 'GOODS', 'OF', 'PRICE', 'SO', 'I', 'EMBARKED', 'MY', 'BALES', 'THEREIN'] +7018-75788-0014-149: hyp=['HERE', 'I', 'FOUND', 'A', 'GREAT', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'FULL', 'OF', 'MERCHANTS', 'AND', 'NOTABLES', 'WHO', 'HAD', 'WITH', 'THEM', 'GOODS', 'OF', 'PRICE', 'SO', 'I', 'EMBARKED', 'MY', 'BALES', 'THEREIN'] +7018-75788-0015-150: ref=['HAPLY', 'AMONGST', 'YOU', 'IS', 'ONE', 'RIGHTEOUS', 'WHOSE', 'PRAYERS', 'THE', 'LORD', 'WILL', 'ACCEPT'] +7018-75788-0015-150: hyp=['HAPPILY', 'AMONGST', 'YOU', 'IS', 'ONE', 'RIGHTEOUS', 'WHOSE', 'PRAYERS', 'THE', 'LORD', 'WILL', 'ACCEPT'] +7018-75788-0016-151: ref=['PRESENTLY', 'THE', 'SHIP', 'STRUCK', 'THE', 'MOUNTAIN', 'AND', 'BROKE', 'UP', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'OF', 'HER', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75788-0016-151: hyp=['PRESENTLY', 'THE', 'SHIP', 'STRUCK', 'THE', 'MOUNTAIN', 'AND', 'BROKE', 'UP', 'AND', 'ALL', 'THEN', 'EVERYTHING', 'ON', 'BOARD', 'OF', 'HER', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75788-0017-152: ref=['BUT', 'IT', 'BURNETH', 'IN', 'THEIR', 'BELLIES', 'SO', 'THEY', 'CAST', 'IT', 'UP', 'AGAIN', 'AND', 'IT', 'CONGEALETH', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'WATER', 'WHEREBY', 'ITS', 'COLOR', 'AND', 'QUANTITIES', 'ARE', 'CHANGED', 'AND', 'AT', 'LAST', 'THE', 'WAVES', 'CAST', 'IT', 'ASHORE', 'AND', 'THE', 'TRAVELLERS', 'AND', 'MERCHANTS', 'WHO', 'KNOW', 'IT', 'COLLECT', 'IT', 'AND', 'SELL', 'IT'] +7018-75788-0017-152: hyp=['BUT', 'AT', 'BURNETH', 'IN', 'THEIR', 'BELLIES', 'SO', 'THEY', 'CAST', 'IT', 'UP', 'AGAIN', 'AND', 'IT', 'CONCEALETH', 'ON', 'THE', 'SURFACE', 'OF', 'THE', 'WATER', 'WHEREBY', 'ITS', 'COLOR', 'AND', 'QUANTITIES', 'ARE', 'CHANGED', 'AND', 'AT', 'LAST', 'THE', 'WAVES', 'CAST', 'IT', 'ASHORE', 'AND', 'THE', 'TRAVELLERS', 'AND', 'MERCHANTS', 'WHO', 'KNOW', 'IT', 'COLLECTED', 'AND', 'SELL', 'IT'] +7018-75788-0018-153: ref=['EACH', 'THAT', 'DIED', 'WE', 'WASHED', 'AND', 'SHROUDED', 'IN', 'SOME', 'OF', 'THE', 'CLOTHES', 'AND', 'LINEN', 'CAST', 'ASHORE', 'BY', 'THE', 'TIDES', 'AND', 'AFTER', 'A', 'LITTLE', 'THE', 'REST', 'OF', 'MY', 'FELLOWS', 'PERISHED', 'ONE', 'BY', 'ONE', 'TILL', 'I', 'HAD', 'BURIED', 'THE', 'LAST', 'OF', 'THE', 'PARTY', 'AND', 'ABODE', 'ALONE', 'ON', 'THE', 'ISLAND', 'WITH', 'BUT', 'A', 'LITTLE', 'PROVISION', 'LEFT', 'I', 'WHO', 'WAS', 'WONT', 'TO', 'HAVE', 'SO', 'MUCH'] +7018-75788-0018-153: hyp=['EACH', 'THAT', 'DIED', 'WE', 'WASHED', 'AND', 'SHROUDED', 'IN', 'SOME', 'OF', 'THE', 'CLOTHES', 'AND', 'LINEN', 'CAST', 'ASHORE', 'BY', 'THE', 'TIDES', 'AND', 'AFTER', 'LITTLE', 'THE', 'REST', 'OF', 'MY', 'FELLOWS', 'PERISHED', 'ONE', 'BY', 'ONE', 'TILL', 'I', 'HAD', 'BURIED', 'THE', 'LAST', 'OF', 'THE', 'PARTY', 'AND', 'ABODE', 'ALONE', 'ON', 'THE', 'ISLAND', 'WITH', 'BUT', 'A', 'LITTLE', 'PROVISION', 'LEFT', 'I', 'WHO', 'WAS', 'WONT', 'TO', 'HAVE', 'SO', 'MUCH'] +7018-75788-0019-154: ref=['BUT', 'THERE', 'IS', 'MAJESTY', 'AND', 'THERE', 'IS', 'NO', 'MIGHT', 'SAVE', 'IN', 'ALLAH', 'THE', 'GLORIOUS', 'THE', 'GREAT'] +7018-75788-0019-154: hyp=['BUT', 'THERE', 'IS', 'MAJESTY', 'AND', 'THERE', 'IS', 'NO', 'MIGHT', 'SAVE', 'IN', 'ALLAH', 'THE', 'GLORIOUS', 'THE', 'GREAT'] +7018-75789-0000-155: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'FIRST', 'NIGHT'] +7018-75789-0000-155: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'FIRST', 'NIGHT'] +7018-75789-0001-156: ref=['THEN', 'SIGHING', 'FOR', 'MYSELF', 'I', 'SET', 'TO', 'WORK', 'COLLECTING', 'A', 'NUMBER', 'OF', 'PIECES', 'OF', 'CHINESE', 'AND', 'COMORIN', 'ALOES', 'WOOD', 'AND', 'I', 'BOUND', 'THEM', 'TOGETHER', 'WITH', 'ROPES', 'FROM', 'THE', 'WRECKAGE', 'THEN', 'I', 'CHOSE', 'OUT', 'FROM', 'THE', 'BROKEN', 'UP', 'SHIPS', 'STRAIGHT', 'PLANKS', 'OF', 'EVEN', 'SIZE', 'AND', 'FIXED', 'THEM', 'FIRMLY', 'UPON', 'THE', 'ALOES', 'WOOD', 'MAKING', 'ME', 'A', 'BOAT', 'RAFT', 'A', 'LITTLE', 'NARROWER', 'THAN', 'THE', 'CHANNEL', 'OF', 'THE', 'STREAM', 'AND', 'I', 'TIED', 'IT', 'TIGHTLY', 'AND', 'FIRMLY', 'AS', 'THOUGH', 'IT', 'WERE', 'NAILED'] +7018-75789-0001-156: hyp=['THEN', 'SIGNED', 'FOR', 'MYSELF', 'I', 'SET', 'TO', 'WORK', 'COLLECTING', 'A', 'NUMBER', 'OF', 'PIECES', 'OF', 'CHINESE', 'AND', 'CORMOR', 'AND', 'ALOES', 'WOOD', 'AND', 'I', 'BOUND', 'THEM', 'TOGETHER', 'WITH', 'ROPES', 'FROM', 'THE', 'WRECKAGE', 'THEN', 'I', 'CHOSE', 'OUT', 'FROM', 'THE', 'BROKEN', 'UP', 'SHIP', 'STRAIGHT', 'PLANKS', 'OF', 'EVEN', 'SIZE', 'AND', 'FIXED', 'THEM', 'FIRMLY', 'UPON', 'THE', 'ALLIES', 'WOOD', 'MAKING', 'ME', 'A', 'BOAT', 'RAFT', 'A', 'LITTLE', 'NARROWER', 'THAN', 'THE', 'CHANNEL', 'OF', 'THE', 'STREAM', 'AND', 'I', 'TIED', 'IT', 'TIGHTLY', 'AND', 'FIRMLY', 'AS', 'THOUGH', 'IT', 'WERE', 'NAILED'] +7018-75789-0002-157: ref=['LAND', 'AFTER', 'LAND', 'SHALT', 'THOU', 'SEEK', 'AND', 'FIND', 'BUT', 'NO', 'OTHER', 'LIFE', 'ON', 'THY', 'WISH', 'SHALL', 'WAIT', 'FRET', 'NOT', 'THY', 'SOUL', 'IN', 'THY', 'THOUGHTS', 'O', 'NIGHT', 'ALL', 'WOES', 'SHALL', 'END', 'OR', 'SOONER', 'OR', 'LATE'] +7018-75789-0002-157: hyp=['LAND', 'AFTER', 'LAND', 'SHALT', 'THOU', 'SEE', 'CONFINED', 'BUT', 'NO', 'OTHER', 'LIFE', 'ON', 'THY', 'WISH', 'SHALL', 'WAIT', 'FRET', 'NOT', 'THY', 'SOUL', 'IN', 'THY', 'THOUGHTS', 'A', 'KNIGHT', 'ALL', 'THOSE', 'SHALL', 'END', 'OR', 'SOONER', 'OR', 'LATE'] +7018-75789-0003-158: ref=['I', 'ROWED', 'MY', 'CONVEYANCE', 'INTO', 'THE', 'PLACE', 'WHICH', 'WAS', 'INTENSELY', 'DARK', 'AND', 'THE', 'CURRENT', 'CARRIED', 'THE', 'RAFT', 'WITH', 'IT', 'DOWN', 'THE', 'UNDERGROUND', 'CHANNEL'] +7018-75789-0003-158: hyp=['I', 'RIDE', 'MY', 'CONVEYANCE', 'INTO', 'THE', 'PLACE', 'WHICH', 'WAS', 'INTENSELY', 'DARK', 'AND', 'THE', 'CURRENT', 'CARRIED', 'ME', 'THE', 'RAFT', 'WITH', 'IT', 'DOWN', 'THE', 'UNDERGROUND', 'CHANNEL'] +7018-75789-0004-159: ref=['AND', 'I', 'THREW', 'MYSELF', 'DOWN', 'UPON', 'MY', 'FACE', 'ON', 'THE', 'RAFT', 'BY', 'REASON', 'OF', 'THE', 'NARROWNESS', 'OF', 'THE', 'CHANNEL', 'WHILST', 'THE', 'STREAM', 'CEASED', 'NOT', 'TO', 'CARRY', 'ME', 'ALONG', 'KNOWING', 'NOT', 'NIGHT', 'FROM', 'DAY', 'FOR', 'THE', 'EXCESS', 'OF', 'THE', 'GLOOM', 'WHICH', 'ENCOMPASSED', 'ME', 'ABOUT', 'AND', 'MY', 'TERROR', 'AND', 'CONCERN', 'FOR', 'MYSELF', 'LEST', 'I', 'SHOULD', 'PERISH'] +7018-75789-0004-159: hyp=['AND', 'I', 'THREW', 'MYSELF', 'DOWN', 'UPON', 'MY', 'FACE', 'ON', 'THE', 'RAFT', 'BY', 'REASON', 'OF', 'THE', 'NARROWNESS', 'OF', 'THE', 'CHANNEL', 'WHILST', 'THE', 'STREAM', 'CEASED', 'NOT', 'TO', 'CARRY', 'ME', 'ALONG', 'KNOWING', 'NOT', 'NIGHT', 'FROM', 'DAY', 'FOR', 'THE', 'EXCESS', 'OF', 'THE', 'GLOOM', 'WHICH', 'ENCOMPASSED', 'ME', 'ABOUT', 'IN', 'MY', 'TERROR', 'AND', 'CONCERN', 'FOR', 'MYSELF', 'LEST', 'I', 'SHOULD', 'PERISH'] +7018-75789-0005-160: ref=['WHEN', 'I', 'AWOKE', 'AT', 'LAST', 'I', 'FOUND', 'MYSELF', 'IN', 'THE', 'LIGHT', 'OF', 'HEAVEN', 'AND', 'OPENING', 'MY', 'EYES', 'I', 'SAW', 'MYSELF', 'IN', 'A', 'BROAD', 'STREAM', 'AND', 'THE', 'RAFT', 'MOORED', 'TO', 'AN', 'ISLAND', 'IN', 'THE', 'MIDST', 'OF', 'A', 'NUMBER', 'OF', 'INDIANS', 'AND', 'ABYSSINIANS'] +7018-75789-0005-160: hyp=['WHEN', 'I', 'AWOKE', 'AT', 'LAST', 'I', 'FOUND', 'MYSELF', 'IN', 'THE', 'LIGHT', 'OF', 'HEAVEN', 'AND', 'OPENING', 'MY', 'EYES', 'I', 'SAW', 'MYSELF', 'IN', 'A', 'BROAD', 'STREAM', 'AND', 'THE', 'RAFT', 'MOORED', 'TO', 'AN', 'ISLAND', 'IN', 'THE', 'MIDST', 'OF', 'A', 'NUMBER', 'OF', 'INDIANS', 'AND', 'ABYSSINIANS'] +7018-75789-0006-161: ref=['BUT', 'I', 'WAS', 'DELIGHTED', 'AT', 'MY', 'ESCAPE', 'FROM', 'THE', 'RIVER'] +7018-75789-0006-161: hyp=['BUT', 'I', 'WAS', 'DELIGHTED', 'AT', 'MY', 'ESCAPE', 'FROM', 'THE', 'RIVER'] +7018-75789-0007-162: ref=['WHEN', 'THEY', 'SAW', 'I', 'UNDERSTOOD', 'THEM', 'NOT', 'AND', 'MADE', 'THEM', 'NO', 'ANSWER', 'ONE', 'OF', 'THEM', 'CAME', 'FORWARD', 'AND', 'SAID', 'TO', 'ME', 'IN', 'ARABIC', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'BROTHER'] +7018-75789-0007-162: hyp=['WHEN', 'THEY', 'SAW', 'I', 'UNDERSTOOD', 'THEM', 'NIGHT', 'AND', 'MADE', 'THEM', 'NO', 'ANSWER', 'ONE', 'OF', 'THEM', 'CAME', 'FORWARD', 'AND', 'SAID', 'TO', 'ME', 'IN', 'ARABIC', 'PEACE', 'BE', 'WITH', 'THEE', 'O', 'MY', 'BROTHER'] +7018-75789-0008-163: ref=['O', 'MY', 'BROTHER', 'ANSWERED', 'HE', 'WE', 'ARE', 'HUSBANDMEN', 'AND', 'TILLERS', 'OF', 'THE', 'SOIL', 'WHO', 'CAME', 'OUT', 'TO', 'WATER', 'OUR', 'FIELDS', 'AND', 'PLANTATIONS', 'AND', 'FINDING', 'THEE', 'ASLEEP', 'ON', 'THIS', 'RAFT', 'LAID', 'HOLD', 'OF', 'IT', 'AND', 'MADE', 'IT', 'FAST', 'BY', 'US', 'AGAINST', 'THOU', 'SHOULDST', 'AWAKE', 'AT', 'THY', 'LEISURE'] +7018-75789-0008-163: hyp=['O', 'MY', 'BROTHER', 'ANSWERED', 'HE', 'WE', 'ARE', 'HUSBANDMEN', 'AND', 'TELLERS', 'OF', 'THE', 'SOIL', 'WHO', 'CAME', 'OUT', 'TO', 'WATER', 'OUR', 'FIELDS', 'IN', 'PLANTATIONS', 'AND', 'FINDING', 'THEE', 'ASLEEP', 'ON', 'THIS', 'RAFT', 'LAID', 'HOLD', 'OF', 'IT', 'AND', 'MADE', 'IT', 'FAST', 'BY', 'US', 'AGAINST', 'THOU', 'SHOULDST', 'AWAKE', 'AT', 'THY', 'LEISURE'] +7018-75789-0009-164: ref=['I', 'ANSWERED', 'FOR', "ALLAH'S", 'SAKE', 'O', 'MY', 'LORD', 'ERE', 'I', 'SPEAK', 'GIVE', 'ME', 'SOMEWHAT', 'TO', 'EAT', 'FOR', 'I', 'AM', 'STARVING', 'AND', 'AFTER', 'ASK', 'ME', 'WHAT', 'THOU', 'WILT'] +7018-75789-0009-164: hyp=['I', 'ANSWERED', 'FOR', "ALLAH'S", 'SAKE', 'AND', 'MY', 'LORD', 'ERE', 'I', 'SPEAK', 'GIVE', 'ME', 'SOMEWHAT', 'TO', 'EAT', 'FOR', 'I', 'AM', 'STARVING', 'AND', 'AFTER', 'ASK', 'ME', 'WHAT', 'THOU', 'WILT'] +7018-75789-0010-165: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'SECOND', 'NIGHT'] +7018-75789-0010-165: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'SECOND', 'NIGHT'] +7018-75789-0011-166: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'SINDBAD', 'THE', 'SEAMAN', 'CONTINUED', 'WHEN', 'I', 'LANDED', 'AND', 'FOUND', 'MYSELF', 'AMONGST', 'THE', 'INDIANS', 'AND', 'ABYSSINIANS', 'AND', 'HAD', 'TAKEN', 'SOME', 'REST', 'THEY', 'CONSULTED', 'AMONG', 'THEMSELVES', 'AND', 'SAID', 'TO', 'ONE', 'ANOTHER', 'THERE', 'IS', 'NO', 'HELP', 'FOR', 'IT', 'BUT', 'WE', 'CARRY', 'HIM', 'WITH', 'US', 'AND', 'PRESENT', 'HIM', 'TO', 'OUR', 'KING', 'THAT', 'HE', 'MAY', 'ACQUAINT', 'HIM', 'WITH', 'HIS', 'ADVENTURES'] +7018-75789-0011-166: hyp=['SHE', 'SAID', 'IT', 'HATH', 'RAGED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'SINDBAD', 'THE', 'SEAMAN', 'CONTINUED', 'WHEN', 'I', 'LANDED', 'AND', 'FOUND', 'MYSELF', 'AMONGST', 'THE', 'INDIANS', 'AND', 'ABYSSINIANS', 'AND', 'HAD', 'TAKEN', 'SOME', 'REST', 'THEY', 'CONSULTED', 'AMONG', 'THEMSELVES', 'AND', 'SAID', 'TO', 'ONE', 'ANOTHER', 'THERE', 'IS', 'NO', 'HELP', 'FOR', 'IT', 'BUT', 'WE', 'CARRY', 'HIM', 'WITH', 'US', 'AND', 'PRESENT', 'HIM', 'TO', 'OUR', 'KING', 'THAT', 'HE', 'MAY', 'ACQUAINT', 'HIM', 'WITH', 'HIS', 'ADVENTURES'] +7018-75789-0012-167: ref=['SO', 'I', 'CONSORTED', 'WITH', 'THE', 'CHIEF', 'OF', 'THE', 'ISLANDERS', 'AND', 'THEY', 'PAID', 'ME', 'THE', 'UTMOST', 'RESPECT'] +7018-75789-0012-167: hyp=['SO', 'I', 'CONSORTED', 'WITH', 'THE', 'CHIEF', 'OF', 'THE', 'ISLANDERS', 'AND', 'THEY', 'PAID', 'ME', 'THE', 'UTMOST', 'RESPECT'] +7018-75789-0013-168: ref=['SO', 'I', 'ROSE', 'WITHOUT', 'STAY', 'OR', 'DELAY', 'AND', 'KISSED', 'THE', "KING'S", 'HAND', 'AND', 'ACQUAINTED', 'HIM', 'WITH', 'MY', 'LONGING', 'TO', 'SET', 'OUT', 'WITH', 'THE', 'MERCHANTS', 'FOR', 'THAT', 'I', 'PINED', 'AFTER', 'MY', 'PEOPLE', 'AND', 'MINE', 'OWN', 'LAND'] +7018-75789-0013-168: hyp=['SO', 'I', 'ROSE', 'WITHOUT', 'STAY', 'OR', 'DELAY', 'AND', 'KISSED', 'THE', "KING'S", 'HAND', 'AND', 'ACQUAINTED', 'HIM', 'WITH', 'MY', 'LONGING', 'TO', 'SET', 'OUT', 'WITH', 'THE', 'MERCHANTS', 'FOR', 'THAT', 'I', 'PINED', 'AFTER', 'MY', 'PEOPLE', 'AND', 'MINE', 'OWN', 'LAND'] +7018-75789-0014-169: ref=['QUOTH', 'HE', 'THOU', 'ART', 'THINE', 'OWN', 'MASTER', 'YET', 'IF', 'IT', 'BE', 'THY', 'WILL', 'TO', 'ABIDE', 'WITH', 'US', 'ON', 'OUR', 'HEAD', 'AND', 'EYES', 'BE', 'IT', 'FOR', 'THOU', 'GLADDENEST', 'US', 'WITH', 'THY', 'COMPANY'] +7018-75789-0014-169: hyp=['QUOTH', 'HE', 'THOU', 'ART', 'THINE', 'OWN', 'MASTER', 'YET', 'IF', 'IT', 'BE', 'THY', 'WILL', 'TO', 'ABIDE', 'WITH', 'US', 'ON', 'OUR', 'HEAD', 'AND', 'EYES', 'BE', 'IT', 'FOR', 'THOU', 'GLADNESSED', 'US', 'WITH', 'THY', 'COMPANY'] +7018-75789-0015-170: ref=['BY', 'ALLAH', 'O', 'MY', 'LORD', 'ANSWERED', 'I', 'THOU', 'HAST', 'INDEED', 'OVERWHELMED', 'ME', 'WITH', 'THY', 'FAVOURS', 'AND', 'WELL', 'DOINGS', 'BUT', 'I', 'WEARY', 'FOR', 'A', 'SIGHT', 'OF', 'MY', 'FRIENDS', 'AND', 'FAMILY', 'AND', 'NATIVE', 'COUNTRY'] +7018-75789-0015-170: hyp=['BY', 'ALLAH', 'ARE', 'MY', 'LORD', 'ANSWERED', 'I', 'THOU', 'HAST', 'INDEED', 'OVERWHELMED', 'ME', 'WITH', 'THY', 'FAVOURS', 'AND', 'WELL', 'DOINGS', 'BUT', 'I', 'WEARY', 'FOR', 'A', 'SIGHT', 'OF', 'MY', 'FRIENDS', 'AND', 'FAMILY', 'AND', 'NATIVE', 'COUNTRY'] +7018-75789-0016-171: ref=['THEN', 'I', 'TOOK', 'LEAVE', 'OF', 'HIM', 'AND', 'OF', 'ALL', 'MY', 'INTIMATES', 'AND', 'ACQUAINTANCES', 'IN', 'THE', 'ISLAND', 'AND', 'EMBARKED', 'WITH', 'THE', 'MERCHANTS', 'AFORESAID'] +7018-75789-0016-171: hyp=['THEN', 'I', 'TOOK', 'LEAVE', 'OF', 'HIM', 'AND', 'OF', 'ALL', 'MY', 'INTIMATES', 'AND', 'ACQUAINTANCES', 'IN', 'THE', 'ISLAND', 'AND', 'EMBARKED', 'WITH', 'THE', 'MERCHANTS', 'AFOR', 'SAID'] +7018-75789-0017-172: ref=['HE', 'ASKED', 'ME', 'WHENCE', 'THEY', 'CAME', 'AND', 'I', 'SAID', 'TO', 'HIM', 'BY', 'ALLAH', 'O', 'COMMANDER', 'OF', 'THE', 'FAITHFUL', 'I', 'KNOW', 'NOT', 'THE', 'NAME', 'OF', 'THE', 'CITY', 'NOR', 'THE', 'WAY', 'THITHER'] +7018-75789-0017-172: hyp=['HE', 'ASKED', 'ME', 'WHENCE', 'THEY', 'CAME', 'AND', 'I', 'SAID', 'TO', 'HIM', 'BY', 'ALLAH', 'O', 'COMMANDER', 'OF', 'THE', 'FAITHFUL', 'I', 'KNOW', 'NOT', 'THE', 'NAME', 'OF', 'THE', 'CITY', 'NOR', 'THE', 'WAY', 'THITHER'] +7018-75789-0018-173: ref=['FOR', 'STATE', 'PROCESSIONS', 'A', 'THRONE', 'IS', 'SET', 'FOR', 'HIM', 'UPON', 'A', 'HUGE', 'ELEPHANT', 'ELEVEN', 'CUBITS', 'HIGH', 'AND', 'UPON', 'THIS', 'HE', 'SITTETH', 'HAVING', 'HIS', 'GREAT', 'LORDS', 'AND', 'OFFICERS', 'AND', 'GUESTS', 'STANDING', 'IN', 'TWO', 'RANKS', 'ON', 'HIS', 'RIGHT', 'HAND', 'AND', 'ON', 'HIS', 'LEFT'] +7018-75789-0018-173: hyp=['FOR', 'STATE', 'PROCESSIONS', 'ARE', 'THRONE', 'IS', 'SAID', 'FOR', 'HIM', 'UPON', 'A', 'HUGE', 'ELEPHANT', 'ELEVEN', 'CUBITS', 'HIGH', 'AND', 'UPON', 'THIS', 'HE', 'SITTETH', 'HAVING', 'HIS', 'GREAT', 'LORDS', 'AND', 'OFFICERS', 'AND', 'GUESTS', 'STANDING', 'IN', 'TWO', 'RANKS', 'ON', 'HIS', 'RIGHT', 'HAND', 'AND', 'ON', 'HIS', 'LEFT'] +7018-75789-0019-174: ref=['HIS', 'LETTER', 'HATH', 'SHOWN', 'ME', 'THIS', 'AND', 'AS', 'FOR', 'THE', 'MIGHTINESS', 'OF', 'HIS', 'DOMINION', 'THOU', 'HAST', 'TOLD', 'US', 'WHAT', 'THOU', 'HAST', 'EYE', 'WITNESSED'] +7018-75789-0019-174: hyp=['HIS', 'LETTER', 'HATH', 'SHOWN', 'ME', 'THIS', 'AND', 'AS', 'FOR', 'THE', 'MIGHTINESS', 'OF', 'HIS', 'DOMINION', 'THOU', 'HAST', 'TOLD', 'US', 'WHAT', 'THOU', 'HAST', 'DIE', 'WITNESSED'] +7018-75789-0020-175: ref=['PRESENTLY', 'MY', 'FRIENDS', 'CAME', 'TO', 'ME', 'AND', 'I', 'DISTRIBUTED', 'PRESENTS', 'AMONG', 'MY', 'FAMILY', 'AND', 'GAVE', 'ALMS', 'AND', 'LARGESSE', 'AFTER', 'WHICH', 'I', 'YIELDED', 'MYSELF', 'TO', 'JOYANCE', 'AND', 'ENJOYMENT', 'MIRTH', 'AND', 'MERRY', 'MAKING', 'AND', 'FORGOT', 'ALL', 'THAT', 'I', 'HAD', 'SUFFERED'] +7018-75789-0020-175: hyp=['PRESENTLY', 'MY', 'FRIENDS', 'CAME', 'TO', 'ME', 'AND', 'I', 'DISTRIBUTED', 'PRESENTS', 'AMONG', 'MY', 'FAMILY', 'AND', 'GAVE', 'ARMS', 'IN', 'LARGESSE', 'AFTER', 'WHICH', 'I', 'YIELDED', 'MYSELF', 'TO', 'JOYANCE', 'AND', 'ENJOYMENT', 'MIRTH', 'AND', 'MERRYMAKING', 'AND', 'FORGOT', 'ALL', 'THAT', 'I', 'HAD', 'SUFFERED'] +7018-75789-0021-176: ref=['SUCH', 'THEN', 'O', 'MY', 'BROTHERS', 'IS', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFEL', 'ME', 'IN', 'MY', 'SIXTH', 'VOYAGE', 'AND', 'TO', 'MORROW', 'INSHALLAH'] +7018-75789-0021-176: hyp=['SUCH', 'THEN', 'ARE', 'MY', 'BROTHERS', 'IS', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFELL', 'ME', 'IN', 'MY', 'SIXTH', 'VOYAGE', 'AND', 'TO', 'MORROW', 'INSHALLAH'] +7018-75789-0022-177: ref=['I', 'WILL', 'TELL', 'YOU', 'THE', 'STORY', 'OF', 'MY', 'SEVENTH', 'AND', 'LAST', 'VOYAGE', 'WHICH', 'IS', 'STILL', 'MORE', 'WONDROUS', 'AND', 'MARVELLOUS', 'THAN', 'THAT', 'OF', 'THE', 'FIRST', 'SIX'] +7018-75789-0022-177: hyp=['I', 'WILL', 'TELL', 'YOU', 'THE', 'STORY', 'OF', 'MY', 'SEVENTH', 'AND', 'LAST', 'VOYAGE', 'WHICH', 'IS', 'STILL', 'MORE', 'WONDROUS', 'AND', 'MARVELLOUS', 'THAN', 'THAT', 'OF', 'THE', 'FIRST', 'SIX'] +7018-75789-0023-178: ref=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'THIRD', 'NIGHT'] +7018-75789-0023-178: hyp=['WHEN', 'IT', 'WAS', 'THE', 'FIVE', 'HUNDRED', 'AND', 'SIXTY', 'THIRD', 'NIGHT'] +7018-75789-0024-179: ref=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'SINDBAD', 'THE', 'SEAMAN', 'HAD', 'RELATED', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFEL', 'HIM', 'IN', 'HIS', 'SIXTH', 'VOYAGE', 'AND', 'ALL', 'THE', 'COMPANY', 'HAD', 'DISPERSED', 'SINDBAD', 'THE', 'LANDSMAN', 'WENT', 'HOME', 'AND', 'SLEPT', 'AS', 'OF', 'WONT'] +7018-75789-0024-179: hyp=['SHE', 'SAID', 'IT', 'HATH', 'REACHED', 'ME', 'O', 'AUSPICIOUS', 'KING', 'THAT', 'WHEN', 'SINDBAD', 'THE', 'SEAMEN', 'HAD', 'RELIGHTED', 'THE', 'HISTORY', 'OF', 'WHAT', 'BEFELL', 'HIM', 'IN', 'HIS', 'SIXTH', 'VOYAGE', 'AND', 'ALL', 'THE', 'COMPANY', 'HAD', 'DISPERSED', 'SINDBAD', 'THE', 'LANDSMAN', 'WENT', 'HOME', 'AND', 'SLEPT', 'AS', 'OF', 'WANT'] +7018-75789-0025-180: ref=['THE', 'SEVENTH', 'VOYAGE', 'OF', 'SINDBAD', 'THE', 'SEAMAN'] +7018-75789-0025-180: hyp=['THE', 'SEVENTH', 'VOYAGE', 'OF', 'SINBAD', 'THE', 'SEAMAN'] +7018-75789-0026-181: ref=['KNOW', 'O', 'COMPANY', 'THAT', 'AFTER', 'MY', 'RETURN', 'FROM', 'MY', 'SIXTH', 'VOYAGE', 'WHICH', 'BROUGHT', 'ME', 'ABUNDANT', 'PROFIT', 'I', 'RESUMED', 'MY', 'FORMER', 'LIFE', 'IN', 'ALL', 'POSSIBLE', 'JOYANCE', 'AND', 'ENJOYMENT', 'AND', 'MIRTH', 'AND', 'MAKING', 'MERRY', 'DAY', 'AND', 'NIGHT', 'AND', 'I', 'TARRIED', 'SOME', 'TIME', 'IN', 'THIS', 'SOLACE', 'AND', 'SATISFACTION', 'TILL', 'MY', 'SOUL', 'BEGAN', 'ONCE', 'MORE', 'TO', 'LONG', 'TO', 'SAIL', 'THE', 'SEAS', 'AND', 'SEE', 'FOREIGN', 'COUNTRIES', 'AND', 'COMPANY', 'WITH', 'MERCHANTS', 'AND', 'HEAR', 'NEW', 'THINGS'] +7018-75789-0026-181: hyp=['NO', 'O', 'COMPANY', 'THAT', 'AFTER', 'MY', 'RETURN', 'FROM', 'MY', 'SIXTH', 'VOYAGE', 'WHICH', 'BROUGHT', 'ME', 'ABUNDANT', 'PROPHET', 'I', 'RESUMED', 'MY', 'FORMER', 'LIFE', 'AND', 'ALL', 'POSSIBLE', 'JOYANCE', 'AND', 'ENJOYMENT', 'AND', 'MIRTH', 'AND', 'MAKING', 'MERRY', 'DAY', 'AND', 'NIGHT', 'AND', 'I', 'TARRIED', 'SOME', 'TIME', 'IN', 'THIS', 'SOLACE', 'AND', 'SATISFACTION', 'TILL', 'MY', 'SOUL', 'BEGAN', 'ONCE', 'MORE', 'TO', 'LONG', 'TO', 'SAIL', 'THE', 'SEAS', 'AND', 'SEE', 'FOREIGN', 'COUNTRIES', 'AND', 'COMPANY', 'WITH', 'MERCHANTS', 'AND', 'HERE', 'NEW', 'THINGS'] +7018-75789-0027-182: ref=['SO', 'HAVING', 'MADE', 'UP', 'MY', 'MIND', 'I', 'PACKED', 'UP', 'IN', 'BALES', 'A', 'QUANTITY', 'OF', 'PRECIOUS', 'STUFFS', 'SUITED', 'FOR', 'SEA', 'TRADE', 'AND', 'REPAIRED', 'WITH', 'THEM', 'FROM', 'BAGHDAD', 'CITY', 'TO', 'BASSORAH', 'TOWN', 'WHERE', 'I', 'FOUND', 'A', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'IN', 'HER', 'A', 'COMPANY', 'OF', 'CONSIDERABLE', 'MERCHANTS'] +7018-75789-0027-182: hyp=['SO', 'HAVING', 'MADE', 'UP', 'MY', 'MIND', 'I', 'PACKED', 'UP', 'IN', 'BALES', 'A', 'QUANTITY', 'OF', 'PRECIOUS', 'STUFFS', 'SUITED', 'FOR', 'SEA', 'TRADE', 'AND', 'REPAIRED', 'WITH', 'THEM', 'FROM', 'BAGDAD', 'CITY', 'TO', 'BASSORA', 'TOWN', 'WHERE', 'I', 'FOUND', 'A', 'SHIP', 'READY', 'FOR', 'SEA', 'AND', 'IN', 'HER', 'OUR', 'COMPANY', 'OF', 'CONSIDERABLE', 'MERCHANTS'] +7018-75789-0028-183: ref=['BUT', 'THE', 'CAPTAIN', 'AROSE', 'AND', 'TIGHTENING', 'HIS', 'GIRDLE', 'TUCKED', 'UP', 'HIS', 'SKIRTS', 'AND', 'AFTER', 'TAKING', 'REFUGE', 'WITH', 'ALLAH', 'FROM', 'SATAN', 'THE', 'STONED', 'CLOMB', 'TO', 'THE', 'MAST', 'HEAD', 'WHENCE', 'HE', 'LOOKED', 'OUT', 'RIGHT', 'AND', 'LEFT', 'AND', 'GAZING', 'AT', 'THE', 'PASSENGERS', 'AND', 'CREW', 'FELL', 'TO', 'BUFFETING', 'HIS', 'FACE', 'AND', 'PLUCKING', 'OUT', 'HIS', 'BEARD'] +7018-75789-0028-183: hyp=['BUT', 'THE', 'CAPTAIN', 'AROSE', 'AND', 'TIGHTENING', 'HIS', 'GIRDLE', 'TUCKED', 'UP', 'HIS', 'SKIRTS', 'AND', 'AFTER', 'TAKING', 'REFUGE', 'WITH', 'ALLAH', 'FROM', 'SATAN', 'THE', 'STONE', 'CLIMBED', 'TO', 'THE', 'MAST', 'HEAD', 'WHENCE', 'HE', 'LOOKED', 'OUT', 'RIGHT', 'AND', 'LEFT', 'AND', 'GAZING', 'AT', 'THE', 'PASSENGERS', 'AND', 'CREW', 'FELL', 'TO', 'BUFFET', 'IN', 'HIS', 'FACE', 'AND', 'PLUCKING', 'OUT', 'HIS', 'BEARD'] +7018-75789-0029-184: ref=['THIS', 'HE', 'SET', 'IN', 'A', 'SAUCER', 'WETTED', 'WITH', 'A', 'LITTLE', 'WATER', 'AND', 'AFTER', 'WAITING', 'A', 'SHORT', 'TIME', 'SMELT', 'AND', 'TASTED', 'IT', 'AND', 'THEN', 'HE', 'TOOK', 'OUT', 'OF', 'THE', 'CHEST', 'A', 'BOOKLET', 'WHEREIN', 'HE', 'READ', 'AWHILE', 'AND', 'SAID', 'WEEPING', 'KNOW', 'O', 'YE', 'PASSENGERS', 'THAT', 'IN', 'THIS', 'BOOK', 'IS', 'A', 'MARVELLOUS', 'MATTER', 'DENOTING', 'THAT', 'WHOSO', 'COMETH', 'HITHER', 'SHALL', 'SURELY', 'DIE', 'WITHOUT', 'HOPE', 'OF', 'ESCAPE', 'FOR', 'THAT', 'THIS', 'OCEAN', 'IS', 'CALLED', 'THE', 'SEA', 'OF', 'THE', 'CLIME', 'OF', 'THE', 'KING', 'WHEREIN', 'IS', 'THE', 'SEPULCHRE', 'OF', 'OUR', 'LORD', 'SOLOMON', 'SON', 'OF', 'DAVID', 'ON', 'BOTH', 'BE', 'PEACE'] +7018-75789-0029-184: hyp=['THIS', 'HE', 'SAID', 'IN', 'A', 'SAUCER', 'WETTED', 'WITH', 'A', 'LITTLE', 'WATER', 'AND', 'AFTER', 'WAITING', 'A', 'SHORT', 'TIME', 'SMELT', 'AND', 'TASTED', 'IT', 'AND', 'THEN', 'HE', 'TOOK', 'OUT', 'OF', 'THE', 'CHEST', 'A', 'BOOKLET', 'WHEREIN', 'HE', 'READ', 'AWHILE', 'AND', 'SAID', 'WEEPING', 'NO', 'ARE', 'YE', 'PASSENGERS', 'THAT', 'IN', 'THIS', 'BOOK', 'IS', 'A', 'MARVELLOUS', 'MATTER', 'DENOTING', 'THAT', 'WHOSO', 'COME', 'THITHER', 'SHALL', 'SURELY', 'DIE', 'WITHOUT', 'HOPE', 'OF', 'ESCAPE', 'FOR', 'THAT', 'THIS', 'OCEAN', 'IS', 'CALLED', 'THE', 'SEA', 'OF', 'THE', 'CLIME', 'OF', 'THE', 'KING', 'WHEREIN', 'IS', 'A', 'SEPULCHRE', 'OF', 'OUR', 'LORD', 'SOLOMON', 'SON', 'OF', 'DAVID', 'ON', 'BOTH', 'BE', 'PEACE'] +7018-75789-0030-185: ref=['A', 'SECOND', 'FISH', 'MADE', 'ITS', 'APPEARANCE', 'THAN', 'WHICH', 'WE', 'HAD', 'SEEN', 'NAUGHT', 'MORE', 'MONSTROUS'] +7018-75789-0030-185: hyp=['A', 'SECOND', 'FISH', 'MADE', 'ITS', 'APPEARANCE', 'AND', 'WHICH', 'WE', 'HAD', 'SEEN', 'NOUGHT', 'MORE', 'MONSTROUS'] +7018-75789-0031-186: ref=['WHEN', 'SUDDENLY', 'A', 'VIOLENT', 'SQUALL', 'OF', 'WIND', 'AROSE', 'AND', 'SMOTE', 'THE', 'SHIP', 'WHICH', 'ROSE', 'OUT', 'OF', 'THE', 'WATER', 'AND', 'SETTLED', 'UPON', 'A', 'GREAT', 'REEF', 'THE', 'HAUNT', 'OF', 'SEA', 'MONSTERS', 'WHERE', 'IT', 'BROKE', 'UP', 'AND', 'FELL', 'ASUNDER', 'INTO', 'PLANKS', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7018-75789-0031-186: hyp=['WHEN', 'SUDDENLY', 'A', 'VIOLENT', 'SQUALL', 'OF', 'WIND', 'AROSE', 'AND', 'SMOTE', 'THE', 'SHIP', 'WHICH', 'ROSE', 'OUT', 'OF', 'THE', 'WATER', 'AND', 'SETTLED', 'UPON', 'A', 'GREAT', 'REEF', 'THE', 'HAUNT', 'OF', 'SEA', 'MONSTERS', 'WHERE', 'IT', 'BROKE', 'UP', 'AND', 'FELL', 'ASUNDER', 'INTO', 'PLANKS', 'AND', 'ALL', 'AND', 'EVERYTHING', 'ON', 'BOARD', 'WERE', 'PLUNGED', 'INTO', 'THE', 'SEA'] +7105-2330-0000-2310: ref=['UNFORTUNATELY', 'THERE', 'COULD', 'BE', 'NO', 'DOUBT', 'OR', 'MISCONCEPTION', 'AS', 'TO', "PLATTERBAFF'S", 'GUILT'] +7105-2330-0000-2310: hyp=['UNFORTUNATELY', 'THERE', 'COULD', 'BE', 'NO', 'DOUBT', 'OR', 'MISCONCEPTION', 'AS', 'THE', "PLATTERBUFF'S", 'GUILT'] +7105-2330-0001-2311: ref=['HE', 'HAD', 'NOT', 'ONLY', 'PLEADED', 'GUILTY', 'BUT', 'HAD', 'EXPRESSED', 'HIS', 'INTENTION', 'OF', 'REPEATING', 'HIS', 'ESCAPADE', 'IN', 'OTHER', 'DIRECTIONS', 'AS', 'SOON', 'AS', 'CIRCUMSTANCES', 'PERMITTED', 'THROUGHOUT', 'THE', 'TRIAL', 'HE', 'WAS', 'BUSY', 'EXAMINING', 'A', 'SMALL', 'MODEL', 'OF', 'THE', 'FREE', 'TRADE', 'HALL', 'IN', 'MANCHESTER'] +7105-2330-0001-2311: hyp=['HE', 'HAD', 'NOT', 'ONLY', 'PLEADED', 'GUILTY', 'BUT', 'HAD', 'EXPRESSED', 'HIS', 'INTENTION', 'OF', 'REPEATING', 'HIS', 'ESCAPADE', 'IN', 'OTHER', 'DIRECTIONS', 'AS', 'SOON', 'AS', 'CIRCUMSTANCES', 'PERMITTED', 'THROUGHOUT', 'THE', 'TRIAL', 'HE', 'WAS', 'BUSY', 'EXAMINING', 'A', 'SMALL', 'MODEL', 'OF', 'THE', 'FREE', 'TRADE', 'HALL', 'IN', 'MANCHESTER'] +7105-2330-0002-2312: ref=['THE', 'JURY', 'COULD', 'NOT', 'POSSIBLY', 'FIND', 'THAT', 'THE', 'PRISONER', 'HAD', 'NOT', 'DELIBERATELY', 'AND', 'INTENTIONALLY', 'BLOWN', 'UP', 'THE', 'ALBERT', 'HALL', 'THE', 'QUESTION', 'WAS', 'COULD', 'THEY', 'FIND', 'ANY', 'EXTENUATING', 'CIRCUMSTANCES', 'WHICH', 'WOULD', 'PERMIT', 'OF', 'AN', 'ACQUITTAL'] +7105-2330-0002-2312: hyp=['VIRTUARY', 'COULD', 'NOT', 'POSSIBLY', 'FIND', 'THAT', 'THE', 'PRISONER', 'HAD', 'NOT', 'DELIBERATELY', 'AND', 'INTENTIONALLY', 'BLOWN', 'UP', 'WE', 'ALBERT', 'HALL', 'THE', 'QUESTION', 'WAS', 'COULD', 'THEY', 'FIND', 'ANY', 'EXTENUATING', 'CIRCUMSTANCES', 'WHICH', 'WOULD', 'PERMIT', 'OF', 'AN', 'ACQUITTAL'] +7105-2330-0003-2313: ref=['OF', 'COURSE', 'ANY', 'SENTENCE', 'WHICH', 'THE', 'LAW', 'MIGHT', 'FEEL', 'COMPELLED', 'TO', 'INFLICT', 'WOULD', 'BE', 'FOLLOWED', 'BY', 'AN', 'IMMEDIATE', 'PARDON', 'BUT', 'IT', 'WAS', 'HIGHLY', 'DESIRABLE', 'FROM', 'THE', "GOVERNMENT'S", 'POINT', 'OF', 'VIEW', 'THAT', 'THE', 'NECESSITY', 'FOR', 'SUCH', 'AN', 'EXERCISE', 'OF', 'CLEMENCY', 'SHOULD', 'NOT', 'ARISE'] +7105-2330-0003-2313: hyp=['OF', 'COURSE', 'ANY', 'SENTENCE', 'WHICH', 'THE', 'LAW', 'MIGHT', 'FILL', 'COMPELLED', 'TO', 'INFLICT', 'WOULD', 'BE', 'FOLLOWED', 'BY', 'AN', 'IMMEDIATE', 'PARDON', 'BUT', 'IT', 'WAS', 'HIGHLY', 'DESIRABLE', 'FROM', 'THE', 'GOVERNMENT', 'SPITE', 'OF', 'VIEW', 'THAT', 'THE', 'NECESSITY', 'FOR', 'SUCH', 'AN', 'EXERCISE', 'OF', 'CLEMENCY', 'SHOULD', 'NOT', 'ARISE'] +7105-2330-0004-2314: ref=['A', 'HEADLONG', 'PARDON', 'ON', 'THE', 'EVE', 'OF', 'A', 'BYE', 'ELECTION', 'WITH', 'THREATS', 'OF', 'A', 'HEAVY', 'VOTING', 'DEFECTION', 'IF', 'IT', 'WERE', 'WITHHELD', 'OR', 'EVEN', 'DELAYED', 'WOULD', 'NOT', 'NECESSARILY', 'BE', 'A', 'SURRENDER', 'BUT', 'IT', 'WOULD', 'LOOK', 'LIKE', 'ONE'] +7105-2330-0004-2314: hyp=['I', 'HAD', 'LONG', 'PARDON', 'AND', 'THE', 'EVE', 'OF', 'A', 'BI', 'ELECTION', 'WITH', 'THREATS', 'OF', 'A', 'HEAVY', 'VOTING', 'DEFECTION', 'IF', 'IT', 'WERE', 'WITHHELD', 'OR', 'EVEN', 'DELAYED', 'WOULD', 'NOT', 'NECESSARILY', 'BE', 'A', 'SURRENDER', 'BUT', 'IT', 'WOULD', 'LOOK', 'LIKE', 'ONE'] +7105-2330-0005-2315: ref=['HENCE', 'THE', 'ANXIETY', 'IN', 'THE', 'CROWDED', 'COURT', 'AND', 'IN', 'THE', 'LITTLE', 'GROUPS', 'GATHERED', 'ROUND', 'THE', 'TAPE', 'MACHINES', 'IN', 'WHITEHALL', 'AND', 'DOWNING', 'STREET', 'AND', 'OTHER', 'AFFECTED', 'CENTRES'] +7105-2330-0005-2315: hyp=['HENCE', 'THE', 'ANXIETY', 'IN', 'THE', 'CROWDED', 'COURT', 'AND', 'IN', 'THE', 'LITTLE', 'GROUPS', 'GATHERED', 'ROUND', 'THE', 'TAPE', 'MACHINES', 'IN', 'WHITEHALL', 'AND', 'DAWNING', 'STREET', 'AND', 'OTHER', 'AFFECTED', 'CENTRES'] +7105-2330-0006-2316: ref=['THE', 'JURY', 'RETURNED', 'FROM', 'CONSIDERING', 'THEIR', 'VERDICT', 'THERE', 'WAS', 'A', 'FLUTTER', 'AN', 'EXCITED', 'MURMUR', 'A', 'DEATHLIKE', 'HUSH'] +7105-2330-0006-2316: hyp=['THE', 'JURY', 'TURN', 'FROM', 'CONSIDERING', 'THEIR', 'VERDICT', 'THERE', 'WAS', 'A', 'FLUTTER', 'AN', 'EXCITED', 'MURMUR', 'A', 'DEATHLIKE', 'HUSH'] +7105-2330-0007-2317: ref=['THE', 'FOREMAN', 'DELIVERED', 'HIS', 'MESSAGE'] +7105-2330-0007-2317: hyp=['THEREFORE', 'MAN', 'DELIVERED', 'HIS', 'MESSAGE'] +7105-2330-0008-2318: ref=['THE', 'JURY', 'FIND', 'THE', 'PRISONER', 'GUILTY', 'OF', 'BLOWING', 'UP', 'THE', 'ALBERT', 'HALL'] +7105-2330-0008-2318: hyp=['THE', 'JURY', 'FIND', 'THE', 'PRISONER', 'GUILTY', 'OF', 'BLOWING', 'UP', 'THE', 'ALBERT', 'HALL'] +7105-2330-0009-2319: ref=['THE', 'JURY', 'WISH', 'TO', 'ADD', 'A', 'RIDER', 'DRAWING', 'ATTENTION', 'TO', 'THE', 'FACT', 'THAT', 'A', 'BY', 'ELECTION', 'IS', 'PENDING', 'IN', 'THE', 'PARLIAMENTARY', 'DIVISION', 'OF', 'NEMESIS', 'ON', 'HAND'] +7105-2330-0009-2319: hyp=['THE', 'JURY', 'WISH', 'TO', 'ADD', 'A', 'WRITER', 'DRAWING', 'ATTENTION', 'TO', 'THE', 'FACT', 'THAT', 'A', 'BY', 'ELECTION', 'IS', 'SPENDING', 'IN', 'THE', 'PARLIAMENTARY', 'DIVISION', 'OF', 'NEMESIS', 'ON', 'HAND'] +7105-2330-0010-2320: ref=['AND', 'MAY', 'THE', 'LORD', 'HAVE', 'MERCY', 'ON', 'THE', 'POLL', 'A', 'JUNIOR', 'COUNSEL', 'EXCLAIMED', 'IRREVERENTLY'] +7105-2330-0010-2320: hyp=['AND', 'MADE', 'THE', 'LARD', 'HAVE', 'MERCY', 'ON', 'THE', 'POLE', 'A', 'JUNIOR', 'COUNCIL', 'EXCLAIMED', 'IRREVERENTLY'] +7105-2330-0011-2321: ref=['FIFTEEN', 'HUNDRED', 'SAID', 'THE', 'PRIME', 'MINISTER', 'WITH', 'A', 'SHUDDER', "IT'S", 'TOO', 'HORRIBLE', 'TO', 'THINK', 'OF'] +7105-2330-0011-2321: hyp=['FIFTEEN', 'HUNDRED', 'SAID', 'A', 'PRIME', 'MINISTER', 'WITH', 'A', 'SHUDDER', "IT'S", 'TOO', 'HORRIBLE', 'TO', 'THINK', 'OF'] +7105-2330-0012-2322: ref=['OUR', 'MAJORITY', 'LAST', 'TIME', 'WAS', 'ONLY', 'A', 'THOUSAND', 'AND', 'SEVEN'] +7105-2330-0012-2322: hyp=['OUR', 'MAJORITY', 'LAST', 'TIME', 'WAS', 'ONLY', 'A', 'THOUSAND', 'AND', 'SEVEN'] +7105-2330-0013-2323: ref=['SEVEN', 'THIRTY', 'AMENDED', 'THE', 'PRIME', 'MINISTER', 'WE', 'MUST', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PRECIPITANCY'] +7105-2330-0013-2323: hyp=['SEVEN', 'THIRTY', 'AMENDED', 'THE', 'PRIME', 'MINISTER', 'WE', 'MUST', 'AVOID', 'ANY', 'APPEARANCE', 'OF', 'PRECIPITANCY'] +7105-2330-0014-2324: ref=['NOT', 'LATER', 'THAN', 'SEVEN', 'THIRTY', 'THEN', 'SAID', 'THE', 'CHIEF', 'ORGANISER', 'I', 'HAVE', 'PROMISED', 'THE', 'AGENT', 'DOWN', 'THERE', 'THAT', 'HE', 'SHALL', 'BE', 'ABLE', 'TO', 'DISPLAY', 'POSTERS', 'ANNOUNCING', 'PLATTERBAFF', 'IS', 'OUT', 'BEFORE', 'THE', 'POLL', 'OPENS'] +7105-2330-0014-2324: hyp=['NOT', 'LATER', 'THEN', 'SEVEN', 'THIRTY', 'THEN', 'SAID', 'THE', 'CHIEF', 'ORGANIZER', 'I', 'HAVE', 'PROMISED', 'THE', 'AGENT', 'DOWN', 'THERE', 'THAT', 'HE', 'SHALL', 'BE', 'ABLE', 'TO', 'DISPLAY', 'POSTERS', 'ANNOUNCING', 'PLATTER', 'BAFF', 'IS', 'OUT', 'BEFORE', 'THE', 'POLE', 'OPENS'] +7105-2330-0015-2325: ref=['HE', 'SAID', 'IT', 'WAS', 'OUR', 'ONLY', 'CHANCE', 'OF', 'GETTING', 'A', 'TELEGRAM', 'RADPROP', 'IS', 'IN', 'TO', 'NIGHT'] +7105-2330-0015-2325: hyp=['HE', 'SAID', 'IT', 'WAS', 'HER', 'ONLY', 'CHANCE', 'OF', 'GETTING', 'A', 'TELEGRAM', 'RED', "RAPPA'S", 'IN', 'TO', 'NIGHT'] +7105-2330-0016-2326: ref=['DESPITE', 'THE', 'EARLINESS', 'OF', 'THE', 'HOUR', 'A', 'SMALL', 'CROWD', 'HAD', 'GATHERED', 'IN', 'THE', 'STREET', 'OUTSIDE', 'AND', 'THE', 'HORRIBLE', 'MENACING', 'TRELAWNEY', 'REFRAIN', 'OF', 'THE', 'FIFTEEN', 'HUNDRED', 'VOTING', 'MEN', 'CAME', 'IN', 'A', 'STEADY', 'MONOTONOUS', 'CHANT'] +7105-2330-0016-2326: hyp=['THIS', 'SPITE', 'THE', 'EARLINESS', 'OF', 'THE', 'HOUR', 'A', 'SMALL', 'CROWD', 'HAD', 'GATHERED', 'IN', 'THE', 'STREET', 'OUTSIDE', 'AND', 'THE', 'HORRIBLE', 'MENACING', 'TREE', 'LONGER', 'REFRAIN', 'OF', 'THE', 'FIFTEEN', 'HUNDRED', 'VOTING', 'MEN', 'CAME', 'IN', 'A', 'STEADY', 'MONOTONOUS', 'CHANT'] +7105-2330-0017-2327: ref=['HE', 'EXCLAIMED', "WON'T", 'GO'] +7105-2330-0017-2327: hyp=['HE', 'EXCLAIMED', "WON'T", 'GO'] +7105-2330-0018-2328: ref=['HE', 'SAYS', 'HE', 'NEVER', 'HAS', 'LEFT', 'PRISON', 'WITHOUT', 'A', 'BRASS', 'BAND', 'TO', 'PLAY', 'HIM', 'OUT', 'AND', "HE'S", 'NOT', 'GOING', 'TO', 'GO', 'WITHOUT', 'ONE', 'NOW'] +7105-2330-0018-2328: hyp=['HE', 'SAYS', 'HE', 'NEVER', 'HAS', 'LEFT', 'PRISON', 'WITHOUT', 'A', 'BREASTPAND', 'TO', 'PLAY', 'HIM', 'OUT', 'AND', 'HE', 'SNUG', 'GOING', 'TO', 'GO', 'WITHOUT', 'ONE', 'NOW'] +7105-2330-0019-2329: ref=['SAID', 'THE', 'PRIME', 'MINISTER', 'WE', 'CAN', 'HARDLY', 'BE', 'SUPPOSED', 'TO', 'SUPPLY', 'A', 'RELEASED', 'PRISONER', 'WITH', 'A', 'BRASS', 'BAND', 'HOW', 'ON', 'EARTH', 'COULD', 'WE', 'DEFEND', 'IT', 'ON', 'THE', 'ESTIMATES'] +7105-2330-0019-2329: hyp=['SAID', 'A', 'PRIME', 'MINISTER', 'WE', 'CAN', 'HARDLY', 'BE', 'SUPPOSED', 'TO', 'SUPPLY', 'A', 'RELEASE', 'PRISONER', 'WITH', 'A', 'BRASS', 'BAND', 'HOW', 'ON', 'EARTH', 'COULD', 'WE', 'DEFEND', 'IT', 'ON', 'ESTIMATES'] +7105-2330-0020-2330: ref=['ANYWAY', 'HE', "WON'T", 'GO', 'UNLESS', 'HE', 'HAS', 'A', 'BAND'] +7105-2330-0020-2330: hyp=['AND', 'AWAY', 'HE', "WON'T", 'GO', 'UNLESS', 'HE', 'HAS', 'A', 'BAND'] +7105-2330-0021-2331: ref=['POLL', 'OPENS', 'IN', 'FIVE', 'MINUTES'] +7105-2330-0021-2331: hyp=['PAUL', 'OPENS', 'IN', 'FIVE', 'MINUTES'] +7105-2330-0022-2332: ref=['IS', 'PLATTERBAFF', 'OUT', 'YET'] +7105-2330-0022-2332: hyp=['IS', 'FLATHER', 'BATH', 'OUT', 'YET'] +7105-2330-0023-2333: ref=['IN', "HEAVEN'S", 'NAME', 'WHY'] +7105-2330-0023-2333: hyp=['IN', "HEAVEN'S", 'NAME', 'WHY'] +7105-2330-0024-2334: ref=['THE', 'CHIEF', 'ORGANISER', 'RANG', 'OFF'] +7105-2330-0024-2334: hyp=['THE', 'CHIEF', 'ORGANIZER', 'RANG', 'OFF'] +7105-2330-0025-2335: ref=['THIS', 'IS', 'NOT', 'A', 'MOMENT', 'FOR', 'STANDING', 'ON', 'DIGNITY', 'HE', 'OBSERVED', 'BLUNTLY', 'MUSICIANS', 'MUST', 'BE', 'SUPPLIED', 'AT', 'ONCE'] +7105-2330-0025-2335: hyp=['THIS', 'IS', 'NOT', 'A', 'MOMENT', 'FOR', 'STANDING', 'ON', 'DIGNITY', 'HE', 'OBSERVED', 'BLUNTLY', 'MUSICIANS', 'MUST', 'BE', 'SUPPLIED', 'AT', 'ONCE'] +7105-2330-0026-2336: ref=["CAN'T", 'YOU', 'GET', 'A', 'STRIKE', 'PERMIT', 'ASKED', 'THE', 'ORGANISER'] +7105-2330-0026-2336: hyp=["CAN'T", 'YOU', 'GET', 'THE', 'STRIKE', 'PERMIT', 'ASKED', 'THE', 'ORGANIZER'] +7105-2330-0027-2337: ref=["I'LL", 'TRY', 'SAID', 'THE', 'HOME', 'SECRETARY', 'AND', 'WENT', 'TO', 'THE', 'TELEPHONE'] +7105-2330-0027-2337: hyp=["I'LL", 'TRY', 'SAID', 'THE', 'HOME', 'SECRETARY', 'AND', 'WENT', 'TO', 'THE', 'TELEPHONE'] +7105-2330-0028-2338: ref=['EIGHT', "O'CLOCK", 'STRUCK', 'THE', 'CROWD', 'OUTSIDE', 'CHANTED', 'WITH', 'AN', 'INCREASING', 'VOLUME', 'OF', 'SOUND', 'WILL', 'VOTE', 'THE', 'OTHER', 'WAY'] +7105-2330-0028-2338: hyp=['EIGHT', "O'CLOCK", 'STRUCK', 'THE', 'CROWD', 'OUTSIDE', 'CHANTED', 'WITH', 'AN', 'INCREASING', 'VOLUME', 'OF', 'SOUND', 'WITHOUT', 'THE', 'OTHER', 'WAY'] +7105-2330-0029-2339: ref=['A', 'TELEGRAM', 'WAS', 'BROUGHT', 'IN'] +7105-2330-0029-2339: hyp=['I', 'TELEGRAM', 'WAS', 'BROUGHT', 'IN'] +7105-2330-0030-2340: ref=['IT', 'WAS', 'FROM', 'THE', 'CENTRAL', 'COMMITTEE', 'ROOMS', 'AT', 'NEMESIS'] +7105-2330-0030-2340: hyp=['IT', 'WAS', 'FROM', 'THE', 'CENTRAL', 'COME', 'INTO', 'ROOMS', 'AT', 'NEMESIS'] +7105-2330-0031-2341: ref=['WITHOUT', 'A', 'BAND', 'HE', 'WOULD', 'NOT', 'GO', 'AND', 'THEY', 'HAD', 'NO', 'BAND'] +7105-2330-0031-2341: hyp=['WITHOUT', 'A', 'BAND', 'HE', 'WOULD', 'NOT', 'GO', 'AND', 'THEY', 'HAD', 'NO', 'BAND'] +7105-2330-0032-2342: ref=['A', 'QUARTER', 'PAST', 'TEN', 'HALF', 'PAST'] +7105-2330-0032-2342: hyp=['ACQUIRED', 'THEIR', 'PAST', 'TEN', 'HALF', 'PAST'] +7105-2330-0033-2343: ref=['HAVE', 'YOU', 'ANY', 'BAND', 'INSTRUMENTS', 'OF', 'AN', 'EASY', 'NATURE', 'TO', 'PLAY'] +7105-2330-0033-2343: hyp=['HAVE', 'YOU', 'ANY', 'BAND', 'INSTRUMENTS', 'OF', 'AN', 'EASY', 'NATURE', 'TO', 'PLAY'] +7105-2330-0034-2344: ref=['DEMANDED', 'THE', 'CHIEF', 'ORGANISER', 'OF', 'THE', 'PRISON', 'GOVERNOR', 'DRUMS', 'CYMBALS', 'THOSE', 'SORT', 'OF', 'THINGS'] +7105-2330-0034-2344: hyp=['DEMANDED', 'THE', 'CHIEF', 'ORGANIZER', 'OF', 'THE', 'PRISON', 'GOVERNOR', 'DRUMS', 'SYMBOLS', 'THOSE', 'SORT', 'OF', 'THINGS'] +7105-2330-0035-2345: ref=['THE', 'WARDERS', 'HAVE', 'A', 'PRIVATE', 'BAND', 'OF', 'THEIR', 'OWN', 'SAID', 'THE', 'GOVERNOR', 'BUT', 'OF', 'COURSE', 'I', "COULDN'T", 'ALLOW', 'THE', 'MEN', 'THEMSELVES'] +7105-2330-0035-2345: hyp=['THOUGH', 'OURS', 'HAVE', 'A', 'PRIVATE', 'BAND', 'OF', 'THEIR', 'OWN', 'SAID', 'THE', 'GOVERNOR', 'BUT', 'OF', 'COURSE', 'I', "COULDN'T", 'ALLOW', 'THEM', 'IN', 'THEMSELVES'] +7105-2330-0036-2346: ref=['LEND', 'US', 'THE', 'INSTRUMENTS', 'SAID', 'THE', 'CHIEF', 'ORGANISER'] +7105-2330-0036-2346: hyp=['LEND', 'US', 'THE', 'INSTRUMENTS', 'SAID', 'THE', 'CHIEF', 'ORGANIZER'] +7105-2330-0037-2347: ref=['THE', 'POPULAR', 'SONG', 'OF', 'THE', 'MOMENT', 'REPLIED', 'THE', 'AGITATOR', 'AFTER', 'A', "MOMENT'S", 'REFLECTION'] +7105-2330-0037-2347: hyp=['THEIR', 'POPULAR', 'SONG', 'OF', 'THE', 'MOMENT', 'REPLIED', 'THE', 'AGITATOR', 'AFTER', 'A', "MOMENT'S", 'REFLECTION'] +7105-2330-0038-2348: ref=['IT', 'WAS', 'A', 'TUNE', 'THEY', 'HAD', 'ALL', 'HEARD', 'HUNDREDS', 'OF', 'TIMES', 'SO', 'THERE', 'WAS', 'NO', 'DIFFICULTY', 'IN', 'TURNING', 'OUT', 'A', 'PASSABLE', 'IMITATION', 'OF', 'IT', 'TO', 'THE', 'IMPROVISED', 'STRAINS', 'OF', 'I', "DIDN'T", 'WANT', 'TO', 'DO', 'IT', 'THE', 'PRISONER', 'STRODE', 'FORTH', 'TO', 'FREEDOM'] +7105-2330-0038-2348: hyp=['IT', 'WAS', 'A', 'TUNE', 'THEY', 'HAD', 'ALL', 'HEARD', 'HUNDREDS', 'OF', 'TIMES', 'SO', 'THERE', 'WAS', 'NO', 'DIFFICULTY', 'IN', 'TURNING', 'OUT', 'A', 'PASSABLE', 'IMITATION', 'OF', 'IT', 'TO', 'THE', 'IMPROVISED', 'TRAINS', 'OF', 'I', "DON'T", 'WANT', 'TO', 'DO', 'IT', 'THE', 'PRISONERS', 'STROLLED', 'FORTH', 'TO', 'FREEDOM'] +7105-2330-0039-2349: ref=['THE', 'WORD', 'OF', 'THE', 'SONG', 'HAD', 'REFERENCE', 'IT', 'WAS', 'UNDERSTOOD', 'TO', 'THE', 'INCARCERATING', 'GOVERNMENT', 'AND', 'NOT', 'TO', 'THE', 'DESTROYER', 'OF', 'THE', 'ALBERT', 'HALL'] +7105-2330-0039-2349: hyp=['THE', 'WORD', 'OF', 'THE', 'SONG', 'HAD', 'REFERENCE', 'IT', 'WAS', 'UNDERSTOOD', 'THAT', 'THE', 'INCARCERATING', 'GOVERNMENT', 'AND', 'NOT', 'TO', 'THE', 'DESTROYER', 'OF', 'THE', 'ALBERT', 'HALL'] +7105-2330-0040-2350: ref=['THE', 'SEAT', 'WAS', 'LOST', 'AFTER', 'ALL', 'BY', 'A', 'NARROW', 'MAJORITY'] +7105-2330-0040-2350: hyp=['THIS', 'SEAT', 'WAS', 'LOST', 'AFTER', 'ALL', 'BY', 'A', 'NARROW', 'MATURITY'] +7105-2330-0041-2351: ref=['THE', 'LOCAL', 'TRADE', 'UNIONISTS', 'TOOK', 'OFFENCE', 'AT', 'THE', 'FACT', 'OF', 'CABINET', 'MINISTERS', 'HAVING', 'PERSONALLY', 'ACTED', 'AS', 'STRIKE', 'BREAKERS', 'AND', 'EVEN', 'THE', 'RELEASE', 'OF', 'PLATTERBAFF', 'FAILED', 'TO', 'PACIFY', 'THEM'] +7105-2330-0041-2351: hyp=['THE', 'LOCAL', 'TRADE', 'UNIONISTS', 'TOOK', 'OFFENCE', 'AT', 'THE', 'FACT', 'OF', 'CABINET', 'MINISTERS', 'HAVING', 'PERSONALLY', 'ACTED', 'AS', 'STRIKE', 'BREAKERS', 'AND', 'EVEN', 'THE', 'RELEASE', 'OF', 'PLATTERBUFF', 'FAILED', 'TO', 'PACIFY', 'THEM'] +7105-2340-0000-2272: ref=['WITH', 'THAT', 'NOTORIOUS', 'FAILING', 'OF', 'HIS', 'HE', 'WAS', 'NOT', 'THE', 'SORT', 'OF', 'PERSON', 'ONE', 'WANTED', 'IN', "ONE'S", 'HOUSE'] +7105-2340-0000-2272: hyp=['WITH', 'THAT', 'NOTORIOUS', 'FAILING', 'OF', 'HIS', 'HE', 'WAS', 'NOT', 'A', 'SORT', 'OF', 'PERSON', 'ONE', 'WANTED', 'IN', "ONE'S", 'HOUSE'] +7105-2340-0001-2273: ref=['WELL', 'THE', 'FAILING', 'STILL', 'EXISTS', "DOESN'T", 'IT', 'SAID', 'HER', 'HUSBAND', 'OR', 'DO', 'YOU', 'SUPPOSE', 'A', 'REFORM', 'OF', 'CHARACTER', 'IS', 'ENTAILED', 'ALONG', 'WITH', 'THE', 'ESTATE'] +7105-2340-0001-2273: hyp=['WELL', 'THE', 'FAILING', 'STILL', 'EXISTS', 'DOESNATE', 'SAID', 'THE', 'HUSBAND', 'ORA', 'DO', 'YOU', 'SUPPOSE', 'A', 'REFORM', 'OF', 'CHARACTER', 'IS', 'ENTAILED', 'ALONG', 'WITH', 'THE', 'ESTATE'] +7105-2340-0002-2274: ref=['BESIDES', 'CYNICISM', 'APART', 'HIS', 'BEING', 'RICH', 'WILL', 'MAKE', 'A', 'DIFFERENCE', 'IN', 'THE', 'WAY', 'PEOPLE', 'WILL', 'LOOK', 'AT', 'HIS', 'FAILING'] +7105-2340-0002-2274: hyp=['BESIDES', 'CYNICISM', 'APART', 'IS', 'VERY', 'RICH', 'WE', 'MAKE', 'A', 'DIFFERENCE', 'IN', 'THE', 'WAY', 'PEOPLE', 'WILL', 'LOOK', 'AT', 'HIS', 'FEELING'] +7105-2340-0003-2275: ref=['WHEN', 'A', 'MAN', 'IS', 'ABSOLUTELY', 'WEALTHY', 'NOT', 'MERELY', 'WELL', 'TO', 'DO', 'ALL', 'SUSPICION', 'OF', 'SORDID', 'MOTIVE', 'NATURALLY', 'DISAPPEARS', 'THE', 'THING', 'BECOMES', 'MERELY', 'A', 'TIRESOME', 'MALADY'] +7105-2340-0003-2275: hyp=['WHEN', 'A', 'MAN', 'IS', 'ABSOLUTELY', 'WEALTHY', 'NOT', 'MERELY', 'WELL', 'TO', 'DO', 'ALL', 'SUSPICION', 'OF', 'SORDID', 'MOTIVE', 'NATURAL', 'DISAPPEARS', 'THE', 'THING', 'BECOMES', 'MERELY', 'A', 'PERSON', 'MALADY'] +7105-2340-0004-2276: ref=['WILFRID', 'PIGEONCOTE', 'HAD', 'SUDDENLY', 'BECOME', 'HEIR', 'TO', 'HIS', 'UNCLE', 'SIR', 'WILFRID', 'PIGEONCOTE', 'ON', 'THE', 'DEATH', 'OF', 'HIS', 'COUSIN', 'MAJOR', 'WILFRID', 'PIGEONCOTE', 'WHO', 'HAD', 'SUCCUMBED', 'TO', 'THE', 'AFTER', 'EFFECTS', 'OF', 'A', 'POLO', 'ACCIDENT'] +7105-2340-0004-2276: hyp=['WILFRIED', 'DIGEON', 'CODE', 'HAD', 'SUDDENLY', 'BECOME', 'HEIR', 'TO', 'HIS', 'UNCLE', 'SIR', 'WILL', 'FIDD', 'PIGEON', 'COAT', 'UNDER', 'THE', 'DEATH', 'OF', 'HIS', 'COUSIN', 'MAJOR', 'WILFRID', 'PIGEONCOTE', 'WHO', 'HAD', 'SUCCUMBED', 'THE', 'AFTER', 'EFFECTS', 'OF', 'APOLLO', 'ACCIDENT'] +7105-2340-0005-2277: ref=['A', 'WILFRID', 'PIGEONCOTE', 'HAD', 'COVERED', 'HIMSELF', 'WITH', 'HONOURS', 'IN', 'THE', 'COURSE', 'OF', "MARLBOROUGH'S", 'CAMPAIGNS', 'AND', 'THE', 'NAME', 'WILFRID', 'HAD', 'BEEN', 'A', 'BAPTISMAL', 'WEAKNESS', 'IN', 'THE', 'FAMILY', 'EVER', 'SINCE', 'THE', 'NEW', 'HEIR', 'TO', 'THE', 'FAMILY', 'DIGNITY', 'AND', 'ESTATES', 'WAS', 'A', 'YOUNG', 'MAN', 'OF', 'ABOUT', 'FIVE', 'AND', 'TWENTY', 'WHO', 'WAS', 'KNOWN', 'MORE', 'BY', 'REPUTATION', 'THAN', 'BY', 'PERSON', 'TO', 'A', 'WIDE', 'CIRCLE', 'OF', 'COUSINS', 'AND', 'KINSFOLK'] +7105-2340-0005-2277: hyp=['ALFRED', 'FEAJANCOTT', 'HAD', 'COVERED', 'HIMSELF', 'WITH', 'HONOURS', 'IN', 'THE', 'COURSE', 'OF', "MARLBOROUGH'S", 'CAMPAIGNS', 'AND', 'THE', 'NAME', 'WILFRID', 'HAD', 'BEEN', 'A', 'BABYSMAL', 'WEAKNESS', 'IN', 'THE', 'FAMILY', 'EVER', 'SINCE', 'THE', 'NEW', 'HEIR', 'TO', 'THE', 'FAMILY', 'DIGNITY', 'AND', 'ESTATES', 'WAS', 'A', 'YOUNG', 'MAN', 'OF', 'ABOUT', 'FIVE', 'AND', 'TWENTY', 'WHO', 'WAS', 'KNOWN', 'MORE', 'BY', 'REPETITION', 'THAN', 'BY', 'PERSON', 'TO', 'AVOID', 'CIRCLE', 'OF', 'COUSINS', 'AND', 'KINSFOLK'] +7105-2340-0006-2278: ref=['AND', 'THE', 'REPUTATION', 'WAS', 'AN', 'UNPLEASANT', 'ONE'] +7105-2340-0006-2278: hyp=['AND', 'THE', 'REPUTATION', 'WAS', 'AN', 'UNPLEASANT', 'ONE'] +7105-2340-0007-2279: ref=['FROM', 'HIS', 'LATE', 'SCHOOLDAYS', 'ONWARD', 'HE', 'HAD', 'BEEN', 'POSSESSED', 'BY', 'AN', 'ACUTE', 'AND', 'OBSTINATE', 'FORM', 'OF', 'KLEPTOMANIA', 'HE', 'HAD', 'THE', 'ACQUISITIVE', 'INSTINCT', 'OF', 'THE', 'COLLECTOR', 'WITHOUT', 'ANY', 'OF', 'THE', "COLLECTOR'S", 'DISCRIMINATION'] +7105-2340-0007-2279: hyp=['FROM', 'HIS', 'LATE', 'SCHOOL', 'DAYS', 'ONWARD', 'HE', 'HAD', 'BEEN', 'POSSESSED', 'BY', 'AN', 'ACUTE', 'AND', 'OBSTINATE', 'FORM', 'OF', 'CLUBTOMANIA', 'HE', 'HAD', 'THE', 'ACQUISITIVE', 'INSTINCT', 'OF', 'THE', 'COLLECTOR', 'WITHOUT', 'ANY', 'OF', 'THE', "COLLECTOR'S", 'DISCRIMINATION'] +7105-2340-0008-2280: ref=['THE', 'SEARCH', 'USUALLY', 'PRODUCED', 'A', 'LARGE', 'AND', 'VARIED', 'YIELD', 'THIS', 'IS', 'FUNNY', 'SAID', 'PETER', 'PIGEONCOTE', 'TO', 'HIS', 'WIFE', 'SOME', 'HALF', 'HOUR', 'AFTER', 'THEIR', 'CONVERSATION', "HERE'S", 'A', 'TELEGRAM', 'FROM', 'WILFRID', 'SAYING', "HE'S", 'PASSING', 'THROUGH', 'HERE', 'IN', 'HIS', 'MOTOR', 'AND', 'WOULD', 'LIKE', 'TO', 'STOP', 'AND', 'PAY', 'US', 'HIS', 'RESPECTS'] +7105-2340-0008-2280: hyp=['THIS', 'SEARCH', 'USUALLY', 'PRODUCED', 'A', 'LARGE', 'AND', 'VARIED', 'YIELD', 'THIS', 'IS', 'FUNNY', 'SAID', 'PETER', 'PIGEON', 'BULLET', 'TO', 'HIS', 'WIFE', "I'M", 'HALF', 'OUR', 'AFTER', 'THEIR', 'CONVERSATION', "HERE'S", 'A', 'TELEGRAM', 'FROM', 'WILFRID', 'SAYING', "HE'S", 'PASSING', 'THROUGH', 'HERE', 'IN', 'HIS', 'MOTAR', 'AND', 'WOULD', 'LIKE', 'TO', 'STOP', 'AND', 'PAY', 'US', 'HIS', 'RESPECTS'] +7105-2340-0009-2281: ref=['SIGNED', 'WILFRID', 'PIGEONCOTE'] +7105-2340-0009-2281: hyp=['SIGN', 'WILFRED', 'PIGEON', 'COAT'] +7105-2340-0010-2282: ref=['I', 'SUPPOSE', "HE'S", 'BRINGING', 'US', 'A', 'PRESENT', 'FOR', 'THE', 'SILVER', 'WEDDING', 'GOOD', 'GRACIOUS'] +7105-2340-0010-2282: hyp=['I', 'SUPPOSE', 'IS', 'BRINGING', 'US', 'A', 'PRESENT', 'FURTHER', 'SILVER', 'WEDDING', 'GOOD', 'GRACIOUS'] +7105-2340-0011-2283: ref=['THE', 'TALK', 'FLITTED', 'NERVOUSLY', 'AND', 'HURRIEDLY', 'FROM', 'ONE', 'IMPERSONAL', 'TOPIC', 'TO', 'ANOTHER'] +7105-2340-0011-2283: hyp=['THE', 'TALK', 'FLITTED', 'NERVOUSLY', 'AND', 'HURRIEDLY', 'FROM', 'ONE', 'IMPERSONAL', 'TOPIC', 'TO', 'ANOTHER'] +7105-2340-0012-2284: ref=['IN', 'THE', 'DRAWING', 'ROOM', 'AFTER', 'DINNER', 'THEIR', 'NERVOUSNESS', 'AND', 'AWKWARDNESS', 'INCREASED'] +7105-2340-0012-2284: hyp=['IN', 'THE', 'DRAWING', 'ROOM', 'AFTER', 'DINNER', 'THEIR', 'NERVOUSNESS', 'AND', 'AWKWARDNESS', 'INCREASED'] +7105-2340-0013-2285: ref=['OH', 'WE', "HAVEN'T", 'SHOWN', 'YOU', 'THE', 'SILVER', 'WEDDING', 'PRESENTS', 'SAID', 'MISSUS', 'PETER', 'SUDDENLY', 'AS', 'THOUGH', 'STRUCK', 'BY', 'A', 'BRILLIANT', 'IDEA', 'FOR', 'ENTERTAINING', 'THE', 'GUEST', 'HERE', 'THEY', 'ALL', 'ARE'] +7105-2340-0013-2285: hyp=['OH', 'WE', "HAVEN'T", 'SHOWN', 'YOU', 'THE', 'SILVER', 'WEDDING', 'PRESENTS', 'SAID', 'MISSUS', 'PETER', 'SUDDENLY', 'AS', 'THOSE', 'STRUCK', 'BY', 'A', 'BRILLIANT', 'IDEA', 'OF', 'HER', 'ENTERTAINING', 'THE', 'GUEST', 'HERE', 'THEY', 'ALL', 'ARE'] +7105-2340-0014-2286: ref=['SUCH', 'NICE', 'USEFUL', 'GIFTS', 'A', 'FEW', 'DUPLICATES', 'OF', 'COURSE'] +7105-2340-0014-2286: hyp=['SUCH', 'NICE', 'YEARS', 'FORGIVES', 'A', 'FEW', 'DEPLICATES', 'OF', 'COURSE'] +7105-2340-0015-2287: ref=['SEVEN', 'CREAM', 'JUGS', 'PUT', 'IN', 'PETER'] +7105-2340-0015-2287: hyp=['SEVEN', 'CREAM', 'JUGS', 'PUT', 'IN', 'PETER'] +7105-2340-0016-2288: ref=['WE', 'FEEL', 'THAT', 'WE', 'MUST', 'LIVE', 'ON', 'CREAM', 'FOR', 'THE', 'REST', 'OF', 'OUR', 'LIVES'] +7105-2340-0016-2288: hyp=['WE', 'FEEL', 'THAT', 'WE', 'MUST', 'LIVE', 'UNCLEAN', 'FOR', 'THE', 'REST', 'OF', 'OUR', 'LIVES'] +7105-2340-0017-2289: ref=['OF', 'COURSE', 'SOME', 'OF', 'THEM', 'CAN', 'BE', 'CHANGED'] +7105-2340-0017-2289: hyp=['OF', 'COURSE', 'SOME', 'OF', 'THEM', 'CAN', 'BE', 'CHANGED'] +7105-2340-0018-2290: ref=['I', 'PUT', 'IT', 'DOWN', 'BY', 'THE', 'CLARET', 'JUG', 'SAID', 'WILFRID', 'BUSY', 'WITH', 'ANOTHER', 'OBJECT'] +7105-2340-0018-2290: hyp=['I', 'PUT', 'IT', 'DOWN', 'BY', 'THE', 'CLARA', 'JUG', 'SAID', 'WILFRID', 'BUSY', 'WITH', 'ANOTHER', 'OBJECT'] +7105-2340-0019-2291: ref=['VIGILANCE', 'WAS', 'NOT', 'COMPLETELY', 'CROWNED', 'WITH', 'A', 'SENSE', 'OF', 'VICTORY'] +7105-2340-0019-2291: hyp=['VICHILLENZ', 'WAS', 'NOT', 'COMPLETELY', 'CROWNED', 'WITH', 'A', 'SENSE', 'OF', 'VICTORY'] +7105-2340-0020-2292: ref=['AFTER', 'THEY', 'HAD', 'SAID', 'GOOD', 'NIGHT', 'TO', 'THEIR', 'VISITOR', 'MISSUS', 'PETER', 'EXPRESSED', 'HER', 'CONVICTION', 'THAT', 'HE', 'HAD', 'TAKEN', 'SOMETHING'] +7105-2340-0020-2292: hyp=['AFTER', 'THEY', 'HAD', 'SAID', 'GOOD', 'NIGHT', 'TO', 'THEIR', 'VISITOR', 'MISSUS', 'PETER', 'EXPRESSED', 'HER', 'CONVICTION', 'THAT', 'HE', 'HAD', 'TAKEN', 'SOMETHING'] +7105-2340-0021-2293: ref=['HOW', 'ON', 'EARTH', 'ARE', 'WE', 'TO', 'KNOW', 'SAID', 'PETER', 'THE', 'MEAN', 'PIG', "HASN'T", 'BROUGHT', 'US', 'A', 'PRESENT', 'AND', "I'M", 'HANGED', 'IF', 'HE', 'SHALL', 'CARRY', 'ONE', 'OFF'] +7105-2340-0021-2293: hyp=['HOW', 'ON', 'EARTH', 'ARE', 'WE', 'TO', 'KNOW', 'SAID', 'PETER', 'THE', 'MEAN', 'PIG', "HASN'T", 'BROUGHT', 'US', 'A', 'PRESENT', 'AND', "I'M", 'HANGED', 'IF', 'HE', 'SHALL', 'CARRY', 'ONE', 'OFF'] +7105-2340-0022-2294: ref=["IT'S", 'THE', 'ONLY', 'THING', 'TO', 'DO'] +7105-2340-0022-2294: hyp=['IS', 'THE', 'ONLY', 'THING', 'TO', 'DO'] +7105-2340-0023-2295: ref=['WILFRID', 'WAS', 'LATE', 'IN', 'COMING', 'DOWN', 'TO', 'BREAKFAST', 'AND', 'HIS', 'MANNER', 'SHOWED', 'PLAINLY', 'THAT', 'SOMETHING', 'WAS', 'AMISS'] +7105-2340-0023-2295: hyp=['WILFRED', 'WAS', 'LAID', 'IN', 'COMING', 'DOWN', 'TO', 'BREAKFAST', 'AND', 'HIS', 'MANNERS', 'SHOWED', 'PLAINLY', 'THAT', 'SOMETHING', 'WAS', 'AMISS'] +7105-2340-0024-2296: ref=["IT'S", 'AN', 'UNPLEASANT', 'THING', 'TO', 'HAVE', 'TO', 'SAY', 'HE', 'BLURTED', 'OUT', 'PRESENTLY', 'BUT', "I'M", 'AFRAID', 'YOU', 'MUST', 'HAVE', 'A', 'THIEF', 'AMONG', 'YOUR', 'SERVANTS', "SOMETHING'S", 'BEEN', 'TAKEN', 'OUT', 'OF', 'MY', 'PORTMANTEAU'] +7105-2340-0024-2296: hyp=['YES', 'AND', 'AN', 'UNPLEASANT', 'THING', 'TO', 'HAVE', 'TO', 'SAY', 'HE', 'BLURTED', 'OUT', 'PRESENTLY', 'BUT', "I'M", 'AFRAID', 'YOU', 'MUST', 'HAVE', 'A', 'THIEF', 'AMONG', 'YOUR', 'SERVANTS', "SOMETHING'S", 'BEEN', 'TAKEN', 'OUT', 'OF', 'MY', 'APARTMENTAL'] +7105-2340-0025-2297: ref=['IT', 'WAS', 'A', 'LITTLE', 'PRESENT', 'FROM', 'MY', 'MOTHER', 'AND', 'MYSELF', 'FOR', 'YOUR', 'SILVER', 'WEDDING'] +7105-2340-0025-2297: hyp=['IT', 'WAS', 'A', 'LITTLE', 'PRESENT', 'FOR', 'MY', 'MOTHER', 'AND', 'MYSELF', 'FOR', 'YOUR', 'SILVER', 'WEDDING'] +7105-2340-0026-2298: ref=['I', 'SHOULD', 'HAVE', 'GIVEN', 'IT', 'TO', 'YOU', 'LAST', 'NIGHT', 'AFTER', 'DINNER', 'ONLY', 'IT', 'HAPPENED', 'TO', 'BE', 'A', 'CREAM', 'JUG', 'AND', 'YOU', 'SEEMED', 'ANNOYED', 'AT', 'HAVING', 'SO', 'MANY', 'DUPLICATES', 'SO', 'I', 'FELT', 'RATHER', 'AWKWARD', 'ABOUT', 'GIVING', 'YOU', 'ANOTHER'] +7105-2340-0026-2298: hyp=['I', 'SHOULD', 'HAVE', 'GIVEN', 'IT', 'TO', 'YOU', 'LAST', 'NIGHT', 'AFTER', 'DINNER', 'ON', 'IT', 'HAPPENED', 'TO', 'BE', 'A', 'QUEEN', 'JUG', 'AND', 'YOU', 'SEEMED', 'ANNOYED', 'AT', 'HAVING', 'SO', 'MANY', 'DUPLICATES', 'SO', 'I', 'FELT', 'RATHER', 'AWKWARD', 'OF', 'A', 'GIVING', 'YOU', 'ANOTHER'] +7105-2340-0027-2299: ref=['THE', 'SNATCHER', 'HAD', 'BEEN', 'AN', 'ORPHAN', 'THESE', 'MANY', 'YEARS'] +7105-2340-0027-2299: hyp=['THIS', 'NATURE', 'HAD', 'BEEN', 'AN', 'ORPHAN', 'THIS', 'MANY', 'YEARS'] +7105-2340-0028-2300: ref=['LADY', 'ERNESTINE', 'PIGEONCOTE', 'HIS', 'MOTHER', 'MOVED', 'IN', 'CIRCLES', 'WHICH', 'WERE', 'ENTIRELY', 'BEYOND', 'THEIR', 'COMPASS', 'OR', 'AMBITIONS', 'AND', 'THE', 'SON', 'WOULD', 'PROBABLY', 'ONE', 'DAY', 'BE', 'AN', 'AMBASSADOR'] +7105-2340-0028-2300: hyp=['LADY', 'ERNESTON', 'BEECH', 'AND', 'COLD', 'HIS', 'MOTHER', 'MOVED', 'IN', 'CIRCLES', 'WHICH', 'WERE', 'ENTIRELY', 'BEYOND', 'THEIR', 'COMPASS', 'OR', 'AMBITIONS', 'AND', 'THE', 'SON', 'WOULD', 'PROBABLY', 'ONE', 'DAY', 'BE', 'AN', 'AMBASSADOR'] +7105-2340-0029-2301: ref=['HUSBAND', 'AND', 'WIFE', 'LOOKED', 'BLANKLY', 'AND', 'DESPERATELY', 'AT', 'ONE', 'ANOTHER'] +7105-2340-0029-2301: hyp=['HUSBAND', 'AND', 'WIFE', 'LOOKED', 'BLANKLY', 'AND', 'DESPERATELY', 'AT', 'ONE', 'ANOTHER'] +7105-2340-0030-2302: ref=['IT', 'WAS', 'MISSUS', 'PETER', 'WHO', 'ARRIVED', 'FIRST', 'AT', 'AN', 'INSPIRATION', 'HOW', 'DREADFUL', 'TO', 'THINK', 'THERE', 'ARE', 'THIEVES', 'IN', 'THE', 'HOUSE', 'WE', 'KEEP', 'THE', 'DRAWING', 'ROOM', 'LOCKED', 'UP', 'AT', 'NIGHT', 'OF', 'COURSE', 'BUT', 'ANYTHING', 'MIGHT', 'BE', 'CARRIED', 'OFF', 'WHILE', 'WE', 'ARE', 'AT', 'BREAKFAST'] +7105-2340-0030-2302: hyp=['IT', 'WAS', 'MISSUS', 'PETER', 'WHO', 'ARRIVED', 'FIRST', 'AT', 'AN', 'INSPIRATION', 'HOW', 'DREADFUL', 'THE', 'THING', 'THERE', 'ARE', 'THIEVES', 'IN', 'THE', 'HOUSE', 'WE', 'GIVE', 'THE', 'DRAWING', 'ROOM', 'LOCKED', 'UP', 'AT', 'NIGHT', 'OF', 'COURSE', 'BUT', 'ANYTHING', 'MIGHT', 'BE', 'CARRIED', 'OFF', 'WHILE', 'WE', 'WERE', 'AT', 'BREAKFAST'] +7105-2340-0031-2303: ref=['SHE', 'ROSE', 'AND', 'WENT', 'OUT', 'HURRIEDLY', 'AS', 'THOUGH', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'THE', 'DRAWING', 'ROOM', 'WAS', 'NOT', 'BEING', 'STRIPPED', 'OF', 'ITS', 'SILVERWARE', 'AND', 'RETURNED', 'A', 'MOMENT', 'LATER', 'BEARING', 'A', 'CREAM', 'JUG', 'IN', 'HER', 'HANDS'] +7105-2340-0031-2303: hyp=['SHE', 'ROSE', 'AND', 'WENT', 'OUT', 'HURRIEDLY', 'AS', 'THOUGH', 'TO', 'ASSURE', 'HERSELF', 'THAT', 'THE', 'DRAWING', 'ROOM', 'WAS', 'NOT', 'BEING', 'STRIPPED', 'OF', 'ITS', 'SILVER', 'WARE', 'AND', 'RETURNED', 'A', 'MOMENT', 'LATER', 'BEARING', 'A', 'CREAM', 'CHUG', 'IN', 'HER', 'HANDS'] +7105-2340-0032-2304: ref=['THE', 'PIGEONCOTES', 'HAD', 'TURNED', 'PALER', 'THAN', 'EVER', 'MISSUS', 'PETER', 'HAD', 'A', 'FINAL', 'INSPIRATION'] +7105-2340-0032-2304: hyp=['THE', 'PIGEON', 'COATS', 'HAD', 'TURNED', 'PALER', 'THAN', 'EVER', 'MISSUS', 'PETER', 'HAD', 'A', 'FINAL', 'INSPIRATION'] +7105-2340-0033-2305: ref=['PETER', 'DASHED', 'OUT', 'OF', 'THE', 'ROOM', 'WITH', 'GLAD', 'RELIEF', 'HE', 'HAD', 'LIVED', 'SO', 'LONG', 'DURING', 'THE', 'LAST', 'FEW', 'MINUTES', 'THAT', 'A', 'GOLDEN', 'WEDDING', 'SEEMED', 'WITHIN', 'MEASURABLE', 'DISTANCE'] +7105-2340-0033-2305: hyp=['PETER', 'DASHED', 'OUT', 'OF', 'THE', 'ROOM', 'WITH', 'GLAD', 'RELIEF', 'HE', 'HAD', 'LIVED', 'SO', 'LONG', 'DURING', 'THE', 'LAST', 'FEW', 'MINUTES', 'THAT', 'A', 'GOLDEN', 'WEDDING', 'SEEMED', 'WITHIN', 'MEASURABLE', 'DISTANCE'] +7105-2340-0034-2306: ref=['MISSUS', 'PETER', 'TURNED', 'TO', 'HER', 'GUEST', 'WITH', 'CONFIDENTIAL', 'COYNESS'] +7105-2340-0034-2306: hyp=['MISSUS', 'PETER', 'TURNED', 'TO', 'HER', 'GUEST', 'WITH', 'CONFIDENTIAL', 'KINDNESS'] +7105-2340-0035-2307: ref=["PETER'S", 'LITTLE', 'WEAKNESS', 'IT', 'RUNS', 'IN', 'THE', 'FAMILY', 'GOOD', 'LORD'] +7105-2340-0035-2307: hyp=["PETER'S", 'LITTLE', 'WEAKNESS', 'A', 'TRANSIENT', 'FAMILY', 'GOOD', 'LORD'] +7105-2340-0036-2308: ref=['DO', 'YOU', 'MEAN', 'TO', 'SAY', "HE'S", 'A', 'KLEPTOMANIAC', 'LIKE', 'COUSIN', 'SNATCHER'] +7105-2340-0036-2308: hyp=['DO', 'YOU', 'MEAN', 'TO', 'SAY', "HE'S", 'A', 'CLEPTOMANIA', 'LIKE', 'COUSIN', 'SNATCHER'] +7105-2340-0037-2309: ref=['BRAVE', 'LITTLE', 'WOMAN', 'SAID', 'PETER', 'WITH', 'A', 'GASP', 'OF', 'RELIEF', 'I', 'COULD', 'NEVER', 'HAVE', 'DONE', 'IT'] +7105-2340-0037-2309: hyp=['BRAVE', 'LITTLE', 'WOMAN', 'SAID', 'PETER', 'WITH', 'A', 'GASP', 'OF', 'RELIEF', 'I', 'COULD', 'NEVER', 'HAVE', 'DONE', 'IT'] +7902-96591-0000-0: ref=['I', 'AM', 'FROM', 'THE', 'CUTTER', 'LYING', 'OFF', 'THE', 'COAST'] +7902-96591-0000-0: hyp=["I'M", 'FROM', 'THE', 'CUTTER', 'LYING', 'OFF', 'THE', 'COAST'] +7902-96591-0001-1: ref=["DON'T", 'CRY', 'HE', 'SAID', 'I', 'WAS', 'OBLIGED', 'TO', 'COME'] +7902-96591-0001-1: hyp=["DON'T", 'CRY', 'HE', 'SAID', 'I', 'WAS', 'OBLIGED', 'TO', 'COME'] +7902-96591-0002-2: ref=['AND', 'AND', 'YOU', 'HAVE', 'NOT', 'FOUND', 'OUT', 'ANYTHING', 'CAME', 'IN', 'QUICK', 'FRIGHTENED', 'TONES'] +7902-96591-0002-2: hyp=['AND', 'AND', 'YOU', 'HAVE', 'NOT', 'FOUND', 'OUT', 'ANYTHING', 'CAME', 'IN', 'QUICK', 'FRIGHTENED', 'TONES'] +7902-96591-0003-3: ref=['I', 'WISH', 'YOU', 'WOULD', 'BELIEVE', 'ME', 'THAT', 'I', 'AM', 'IN', 'AS', 'GREAT', 'TROUBLE', 'ABOUT', 'IT', 'AS', 'YOU', 'ARE'] +7902-96591-0003-3: hyp=['I', 'WISH', 'YOU', 'WOULD', 'BELIEVE', 'ME', 'THAT', 'I', 'AM', 'IN', 'AS', 'GREAT', 'TROUBLE', 'ABOUT', 'IT', 'AS', 'YOU', 'ARE'] +7902-96591-0004-4: ref=['THAT', 'MY', 'FATHER', 'SIR', 'RISDON', 'GRAEME', 'HAS', 'SMUGGLED', 'GOODS', 'HERE'] +7902-96591-0004-4: hyp=['THAT', 'MY', 'FATHER', 'SIR', 'RISDON', 'GRAHAM', 'SMUGGLED', 'GOODS', 'HERE'] +7902-96591-0005-5: ref=['HE', 'COULD', 'NOT', 'HELP', 'IT', 'HE', 'HATES', 'THE', 'SMUGGLERS', 'YOU', 'SHALL', 'NOT', 'TELL'] +7902-96591-0005-5: hyp=['HE', 'COULD', 'NOT', 'HELP', 'IT', 'HE', 'HATE', 'THIS', 'MOTHERS', 'YOU', 'SHALL', 'NOT', 'TELL'] +7902-96591-0006-6: ref=['PRAY', 'PRAY', 'SAY', 'YOU', 'WILL', 'NOT', 'ARCHY', 'WAS', 'SILENT'] +7902-96591-0006-6: hyp=['PRAY', 'PRAY', 'SAY', 'YOU', 'WILL', 'NOT', 'ARCHIE', 'WAS', 'SILENT'] +7902-96591-0007-7: ref=['THEN', 'AS', 'ARCHY', 'STOOD', 'IN', 'THE', 'DARK', 'LITERALLY', 'AGHAST', 'WITH', 'ASTONISHMENT', 'HE', 'HEARD', 'THE', 'FAINT', 'RUSTLING', 'ONCE', 'MORE', 'AND', 'AGAIN', 'ALL', 'WAS', 'SILENT'] +7902-96591-0007-7: hyp=['THEN', 'AS', 'ARCHIE', 'STOOD', 'IN', 'THE', 'DARK', 'LITERALLY', 'AGHAST', 'WITH', 'ASTONISHMENT', 'HE', 'HEARD', 'THE', 'FAINT', 'RUSTLING', 'ONCE', 'MORE', 'AND', 'AGAIN', 'ALL', 'WAS', 'SILENT'] +7902-96591-0008-8: ref=['HE', 'LAUGHED', 'BUT', 'IT', 'WAS', 'A', 'CURIOUS', 'KIND', 'OF', 'LAUGH', 'FULL', 'OF', 'VEXATION', 'INJURED', 'AMOUR', 'PROPRE', 'AS', 'THE', 'FRENCH', 'CALL', 'OUR', 'LOVE', 'OF', 'OUR', 'OWN', 'DIGNITY', 'OF', 'WHICH', 'ARCHIBALD', 'RAYSTOKE', 'IN', 'THE', 'FULL', 'FLUSH', 'OF', 'HIS', 'YOUNG', 'BELIEF', 'IN', 'HIS', 'IMPORTANCE', 'AS', 'A', 'BRITISH', 'OFFICER', 'HAD', 'A', 'PRETTY', 'GOOD', 'STOCK'] +7902-96591-0008-8: hyp=['HE', 'LAUGHED', 'BUT', 'IT', 'WAS', 'A', 'CURIOUS', 'KIND', 'OF', 'LAUGH', 'FULL', 'OF', 'VEXATION', 'INJURED', 'AMOPRA', 'AS', 'THE', 'FRENCH', 'CALLER', 'LOVE', 'OF', 'HER', 'OWN', 'DIGNITY', 'OF', 'WHICH', 'ARQUEBALD', 'RAYSTROKE', 'IN', 'THE', 'FULL', 'FLUSH', 'OF', 'HIS', 'YOUNG', 'BELIEF', 'IN', 'HIS', 'IMPORTANCE', 'AS', 'A', 'BRITISH', 'OFFICER', 'HAD', 'A', 'PRETTY', 'GOOD', 'STOCK'] +7902-96591-0009-9: ref=['IT', 'ALL', 'COMES', 'OF', 'DRESSING', 'UP', 'IN', 'THIS', 'STUPID', 'WAY', 'LIKE', 'A', 'ROUGH', 'FISHER', 'LAD'] +7902-96591-0009-9: hyp=['AND', 'ALL', 'COMES', 'OF', 'DRESSING', 'UP', 'IN', 'THE', 'STUPID', 'WAY', 'LIKE', 'A', 'ROUGH', 'FISHER', 'LAD'] +7902-96591-0010-10: ref=['COLD', 'WATER', 'CAME', 'ON', 'THIS', 'IDEA', 'DIRECTLY', 'AS', 'HE', 'RECALLED', 'THE', 'FACT', 'THAT', 'THE', 'DARKNESS', 'WAS', 'INTENSE', 'AND', 'CELIA', 'COULD', 'NOT', 'HAVE', 'SEEN', 'HIM'] +7902-96591-0010-10: hyp=['COLD', 'WATER', 'CAME', 'ON', 'THIS', 'IDEA', 'DIRECTLY', 'AS', 'HE', 'RECALLED', 'THE', 'FACT', 'THAT', 'THE', 'DARKNESS', 'WAS', 'INTENSE', 'AND', 'CELIA', 'COULD', 'NOT', 'HAVE', 'SEEN', 'HIM'] +7902-96591-0011-11: ref=["I'LL", 'SOON', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'NOT', 'GOING', 'TO', 'BE', 'PLAYED', 'WITH'] +7902-96591-0011-11: hyp=["I'LL", 'SOON', 'SHOW', 'THEM', 'THAT', 'I', 'AM', 'NOT', 'GOING', 'TO', 'BE', 'PLAYED', 'WITH'] +7902-96591-0012-12: ref=['FOR', 'IT', 'SUDDENLY', 'OCCURRED', 'TO', 'HIM', 'THAT', 'HE', 'WAS', 'NOT', 'ONLY', 'A', 'PRISONER', 'BUT', 'A', 'PRISONER', 'IN', 'THE', 'POWER', 'OF', 'A', 'VERY', 'RECKLESS', 'SET', 'OF', 'PEOPLE', 'WHO', 'WOULD', 'STOP', 'AT', 'NOTHING'] +7902-96591-0012-12: hyp=['FOR', 'IT', 'SUDDENLY', 'OCCURRED', 'TO', 'HIM', 'THAT', 'HE', 'WAS', 'NOT', 'ONLY', 'A', 'PRISONER', 'BUT', 'A', 'PRISONER', 'IN', 'THE', 'POWER', 'OF', 'A', 'VERY', 'RECKLESS', 'SET', 'OF', 'PEOPLE', 'AND', 'WOULD', 'STOP', 'AT', 'NOTHING'] +7902-96591-0013-13: ref=['NO', 'HE', 'THOUGHT', 'TO', 'HIMSELF', 'I', "DON'T", 'BELIEVE', 'THEY', 'WOULD', 'KILL', 'ME', 'BUT', 'THEY', 'WOULD', 'KNOCK', 'ME', 'ABOUT'] +7902-96591-0013-13: hyp=['NO', 'HE', 'THOUGHT', 'TO', 'HIMSELF', 'I', "DON'T", 'BELIEVE', 'THEY', 'WOULD', 'KILL', 'ME', 'BUT', 'THEY', 'WOULD', 'KNOCK', 'ME', 'ABOUT'] +7902-96591-0014-14: ref=['THE', 'KICK', 'HE', 'HAD', 'RECEIVED', 'WAS', 'A', 'FORETASTE', 'OF', 'WHAT', 'HE', 'MIGHT', 'EXPECT', 'AND', 'AFTER', 'A', 'LITTLE', 'CONSIDERATION', 'HE', 'CAME', 'TO', 'THE', 'CONCLUSION', 'THAT', 'HIS', 'DUTY', 'WAS', 'TO', 'ESCAPE', 'AND', 'GET', 'BACK', 'TO', 'THE', 'CUTTER', 'AS', 'QUICKLY', 'AS', 'HE', 'COULD'] +7902-96591-0014-14: hyp=['THE', 'KICKY', 'HAD', 'RECEIVED', 'WAS', 'A', 'FORETASTE', 'OF', 'WHAT', 'HE', 'MIGHT', 'EXPECT', 'AND', 'AFTER', 'A', 'LITTLE', 'CONSIDERATION', 'HE', 'CAME', 'TO', 'THE', 'CONCLUSION', 'THAT', 'HIS', 'DUTY', 'WAS', 'TO', 'ESCAPE', 'AND', 'GET', 'BACK', 'TO', 'THE', 'CUTTER', 'AS', 'QUICKLY', 'AS', 'HE', 'COULD'] +7902-96591-0015-15: ref=['TO', 'DO', 'THIS', 'HE', 'MUST', 'SCHEME', 'LIE', 'HID', 'TILL', 'MORNING', 'THEN', 'MAKE', 'FOR', 'THE', 'NEAREST', 'POINT', 'AND', 'SIGNAL', 'FOR', 'HELP', 'UNLESS', 'A', "BOAT'S", 'CREW', 'WERE', 'ALREADY', 'SEARCHING', 'FOR', 'HIM', 'HOW', 'TO', 'ESCAPE'] +7902-96591-0015-15: hyp=['TO', 'DO', 'THIS', 'HE', 'MUST', 'SCHEME', 'LIE', 'HID', 'TILL', 'MORNING', 'THAN', 'MAKE', 'FOR', 'THE', 'NEAREST', 'POINT', 'A', 'SIGNAL', 'FOR', 'HELP', 'UNLESS', 'A', "BOAT'S", 'CREW', 'WERE', 'ALREADY', 'SURGING', 'FOR', 'HIM', 'HOW', 'TO', 'ESCAPE'] +7902-96591-0016-16: ref=['THE', 'WINDOW', 'WAS', 'BARRED', 'BUT', 'HE', 'WENT', 'TO', 'IT', 'AND', 'TRIED', 'THE', 'BARS', 'ONE', 'BY', 'ONE', 'TO', 'FIND', 'THEM', 'ALL', 'SOLIDLY', 'FITTED', 'INTO', 'THE', 'STONE', 'SILL'] +7902-96591-0016-16: hyp=['THE', 'WINDOW', 'WAS', 'BARRED', 'BUT', 'HE', 'WENT', 'TO', 'IT', 'AND', 'TRIED', 'THE', 'BARS', 'ONE', 'BY', 'ONE', 'TO', 'FIND', 'THEM', 'ALL', 'SOLIDLY', 'FITTED', 'INTO', 'THE', 'STONE', 'SILL'] +7902-96591-0017-17: ref=['NEXT', 'MOMENT', 'AS', 'HE', 'FELT', 'HIS', 'WAY', 'ABOUT', 'HIS', 'HAND', 'TOUCHED', 'AN', 'OLD', 'FASHIONED', 'MARBLE', 'MANTELPIECE', 'FIREPLACE', 'CHIMNEY'] +7902-96591-0017-17: hyp=['NEXT', 'MOMENT', 'AS', 'HE', 'FELT', 'HIS', 'WAY', 'ABOUT', 'HIS', 'HAND', 'TOUCHED', 'AN', 'OLD', 'FASHIONED', 'MARBLE', 'MANTELPIECE', 'FIREPLACE', 'CHIMNEY'] +7902-96591-0018-18: ref=['YES', 'IF', 'OTHER', 'WAYS', 'FAILED', 'HE', 'COULD', 'ESCAPE', 'UP', 'THE', 'CHIMNEY'] +7902-96591-0018-18: hyp=['YES', 'IF', 'OTHERWAYS', 'FAILED', 'HE', 'COULD', 'ESCAPE', 'UP', 'THE', 'CHIMNEY'] +7902-96591-0019-19: ref=['NO', 'THAT', 'WAS', 'TOO', 'BAD', 'HE', 'COULD', 'NOT', 'DO', 'THAT'] +7902-96591-0019-19: hyp=['NO', 'THAT', 'WAS', 'TOO', 'BAD', 'HE', 'COULD', 'NOT', 'DO', 'THAT'] +7902-96591-0020-20: ref=['SYMPATHY', 'AND', 'PITY', 'FOR', 'THE', 'DWELLERS', 'IN', 'THE', 'HOZE', 'WERE', 'COMPLETELY', 'GONE', 'NOW', 'AND', 'HE', 'SET', 'HIS', 'TEETH', 'FAST', 'AND', 'MENTALLY', 'CALLED', 'HIMSELF', 'A', 'WEAK', 'IDIOT', 'FOR', 'EVER', 'THINKING', 'ABOUT', 'SUCH', 'PEOPLE'] +7902-96591-0020-20: hyp=['SYMPATHY', 'AND', 'PITY', 'FOR', 'THE', 'DWELLERS', 'IN', 'THE', 'HOSE', 'WERE', 'COMPLETELY', 'GONE', 'NOW', 'AND', 'HE', 'SET', 'HIS', 'TEETH', 'FAST', 'AND', 'MENTALLY', 'CALLED', 'HIMSELF', 'A', 'WEEK', 'IDIOT', 'FOR', 'EVER', 'THINKING', 'ABOUT', 'SUCH', 'PEOPLE'] +7902-96591-0021-21: ref=['A', 'NARROW', 'TABLE', 'AGAINST', 'THE', 'WALL', 'IN', 'TWO', 'PLACES'] +7902-96591-0021-21: hyp=['A', 'NARROW', 'TABLE', 'AGAINST', 'THE', 'WALL', 'IN', 'TWO', 'PLACES'] +7902-96591-0022-22: ref=['HE', 'WENT', 'AND', 'TRIED', 'TO', 'FORCE', 'HIS', 'HEAD', 'THROUGH', 'RECALLING', 'AS', 'HE', 'DID', 'THAT', 'WHERE', 'A', "PERSON'S", 'HEAD', 'WOULD', 'GO', 'THE', 'REST', 'OF', 'THE', 'BODY', 'WOULD', 'PASS'] +7902-96591-0022-22: hyp=['HE', 'WENT', 'AND', 'TRIED', 'TO', 'FORCE', 'HIS', 'HEAD', 'THROUGH', 'RECALLING', 'AS', 'HE', 'DID', 'THAT', 'WHERE', 'A', "PERSON'S", 'HEAD', 'WOULD', 'GO', 'THE', 'REST', 'OF', 'THE', 'BODY', 'WOULD', 'PASS'] +7902-96591-0023-23: ref=['BUT', 'THERE', 'WAS', 'NO', 'CHANCE', 'FOR', 'HIS', 'BODY', 'THERE', 'THE', 'HEAD', 'WOULD', 'NOT', 'GO', 'FIRST'] +7902-96591-0023-23: hyp=['BUT', 'THERE', 'WAS', 'NO', 'CHANCE', 'FOR', 'HIS', 'BODY', 'THERE', 'THE', 'HEAD', 'WOULD', 'NOT', 'GO', 'FIRST'] +7902-96591-0024-24: ref=['A', 'FELLOW', 'WHO', 'WAS', 'SHUT', 'UP', 'IN', 'PRISON', 'FOR', 'LIFE', 'MIGHT', 'DO', 'IT', 'HE', 'SAID', 'BUT', 'NOT', 'IN', 'A', 'CASE', 'LIKE', 'THIS'] +7902-96591-0024-24: hyp=['A', 'FELLOW', 'WHO', 'WAS', 'SHUT', 'UP', 'IN', 'PRISONED', 'FOR', 'LIFE', 'MIGHT', 'DO', 'IT', 'HE', 'SAID', 'BUT', 'NOT', 'IN', 'A', 'CASE', 'LIKE', 'THIS'] +7902-96592-0000-25: ref=['SURE', "YOU'VE", 'LOOKED', 'ROUND', 'EVERYWHERE', 'BOY', 'YES', 'FATHER', 'QUITE'] +7902-96592-0000-25: hyp=['SURE', 'YOU', 'LOOK', 'ROUND', 'EVERYWHERE', 'BOY', 'YES', 'FATHER', 'QUITE'] +7902-96592-0001-26: ref=["I'M", 'GOING', 'HOME', 'TO', 'BREAKFAST'] +7902-96592-0001-26: hyp=["I'M", 'GOING', 'HOME', 'TO', 'BREAKFAST'] +7902-96592-0002-27: ref=['SHALL', 'I', 'COME', 'TOO', 'FATHER', 'NO'] +7902-96592-0002-27: hyp=['SHALL', 'I', 'COME', 'TO', 'FATHER', 'NO'] +7902-96592-0003-28: ref=['STOP', 'HERE', 'TILL', 'SIR', 'RISDON', 'COMES', 'DOWN', 'AND', 'TELL', 'HIM', "I'M", 'VERY', 'SORRY', 'THAT', 'WE', 'SHOULD', 'HAVE', 'CLEARED', 'OUT', 'LAST', 'NIGHT', 'ONLY', 'A', 'BORN', 'FOOL', 'SAW', 'JERRY', "NANDY'S", 'LOBSTER', 'BOAT', 'COMING', 'INTO', 'THE', 'COVE', 'AND', 'CAME', 'RUNNING', 'TO', 'SAY', 'IT', 'WAS', 'A', 'PARTY', 'FROM', 'THE', 'CUTTER', 'YES', 'FATHER'] +7902-96592-0003-28: hyp=['STOP', 'HERE', 'TILL', 'SIR', 'RISDON', 'COMES', 'DOWN', 'AND', 'TELL', 'HIM', "I'M", 'VERY', 'SORRY', 'THAT', 'WE', 'SHOULD', 'HAVE', 'CLEARED', 'OUT', 'LAST', 'NIGHT', 'ONLY', 'A', 'BORN', 'FOOL', 'SAW', 'JERRY', "ANDY'S", 'LOBSTERBOAT', 'COMING', 'INTO', 'THE', 'COVE', 'AND', 'CAME', 'RUNNING', 'TO', 'SAY', 'IT', 'WAS', 'A', 'PARTY', 'FROM', 'THE', 'CUTTER', 'YES', 'FATHER'] +7902-96592-0004-29: ref=['TELL', 'HIM', 'NOT', 'TO', 'BE', 'UNEASY', 'TIS', 'ALL', 'RIGHT', 'AND', "I'LL", 'HAVE', 'EVERYTHING', 'CLEAR', 'AWAY', 'TO', 'NIGHT'] +7902-96592-0004-29: hyp=['TELL', 'HIM', 'NOT', 'TO', 'BE', 'UNEASY', 'TIS', 'ALL', 'RIGHT', 'AND', "I'LL", 'HAVE', 'EVERYTHING', 'CLEAR', 'AWAY', 'TO', 'NIGHT'] +7902-96592-0005-30: ref=['THE', 'DULL', 'SOUND', 'OF', 'DEPARTING', 'STEPS', 'AND', 'A', 'LOW', 'WHISTLING', 'SOUND', 'COMING', 'DOWN', 'THROUGH', 'THE', 'SKYLIGHT', 'WINDOW', 'INTO', 'THE', 'CABIN', 'WHERE', 'ARCHY', 'RAYSTOKE', 'LAY', 'WITH', 'HIS', 'HEAVY', 'EYELIDS', 'PRESSED', 'DOWN', 'BY', 'SLEEP'] +7902-96592-0005-30: hyp=['THE', 'DULL', 'SOUND', 'OF', 'DEPARTING', 'STEPS', 'AND', 'A', 'LOW', 'WHISTLING', 'SOUND', 'COMING', 'DOWN', 'THROUGH', 'THE', 'SKYLIGHT', 'WINDOW', 'INTO', 'THE', 'CABIN', 'WHERE', 'ARCHIE', 'RAYSTROKE', 'LAY', 'WITH', 'HIS', 'HEAVY', 'EYELIDS', 'PRESSED', 'DOWN', 'BY', 'SLEEP'] +7902-96592-0006-31: ref=['WHAT', 'A', 'QUEER', 'DREAM', 'HE', 'THOUGHT', 'TO', 'HIMSELF'] +7902-96592-0006-31: hyp=['WHAT', 'A', 'QUEER', 'DREAM', 'HE', 'THOUGHT', 'TO', 'HIMSELF'] +7902-96592-0007-32: ref=['BUT', 'HOW', 'QUEER', 'FOR', 'MISTER', 'GURR', 'TO', 'BE', 'TALKING', 'LIKE', 'THAT', 'TO', 'ANDREW', 'TEAL', 'THE', 'BOY', 'WHO', 'HELPED', 'THE', 'COOK'] +7902-96592-0007-32: hyp=['BUT', 'HOW', 'QUEER', 'FOR', 'MISTER', 'GORE', 'TO', 'BE', 'TALKING', 'LIKE', 'THAT', 'DANGER', 'TEALE', 'THE', 'BOY', 'WHO', 'HELPS', 'THE', 'COOK'] +7902-96592-0008-33: ref=['AND', 'WHY', 'DID', 'ANDY', 'CALL', 'MISTER', 'GURR', 'FATHER'] +7902-96592-0008-33: hyp=['AND', 'WHY', 'DID', 'ANDY', 'CALL', 'MISTER', 'GORE', 'FATHER'] +7902-96592-0009-34: ref=['THERE', 'WAS', 'AN', 'INTERVAL', 'OF', 'THINKING', 'OVER', 'THIS', 'KNOTTY', 'QUESTION', 'DURING', 'WHICH', 'THE', 'LOW', 'WHISTLING', 'WENT', 'ON'] +7902-96592-0009-34: hyp=['THERE', 'WAS', 'AN', 'INTERVAL', 'OF', 'THINKING', 'OVER', 'THIS', 'NAUGHTY', 'QUESTION', 'DURING', 'WHICH', 'THE', 'LOW', 'WHISTLING', 'WENT', 'ON'] +7902-96592-0010-35: ref=['AND', "I'M", 'HUNGRY', 'TOO', 'TIME', 'I', 'WAS', 'UP', 'I', 'SUPPOSE'] +7902-96592-0010-35: hyp=['AND', "I'M", 'HUNGRY', 'TOO', 'TOM', 'I', 'WAS', 'UP', 'I', 'SUPPOSE'] +7902-96592-0011-36: ref=['NO', 'HE', 'WAS', 'NOT', 'DREAMING', 'FOR', 'HE', 'WAS', 'LOOKING', 'OUT', 'ON', 'THE', 'SEA', 'OVER', 'WHICH', 'A', 'FAINT', 'MIST', 'HUNG', 'LIKE', 'WREATHS', 'OF', 'SMOKE'] +7902-96592-0011-36: hyp=['NO', 'HE', 'WAS', 'NOT', 'DREAMING', 'FOR', 'HE', 'WAS', 'LOOKING', 'OUT', 'ON', 'THE', 'SEA', 'OVER', 'WHICH', 'A', 'FAINT', 'MIST', 'HUNG', 'LIKE', 'WREATHS', 'OF', 'SMOKE'] +7902-96592-0012-37: ref=['WHAT', 'DID', 'THEY', 'SAY', 'FALSE', 'ALARM', 'TELL', 'SIR', 'RISDON', 'THEY', 'WOULD', 'CLEAR', 'ALL', 'AWAY', 'TO', 'NIGHT', 'SEE', 'IF', 'ANYTHING', 'HAD', 'BEEN', 'LEFT', 'ABOUT', 'LOBSTER', 'BOAT'] +7902-96592-0012-37: hyp=['WHAT', 'DID', 'THEY', 'SAY', 'FALSE', 'ALARM', 'TELL', 'SERVANTS', 'AND', 'THEY', 'WOULD', 'CLEAR', 'ALL', 'AWAY', 'TO', 'NIGHT', 'SEE', 'IF', 'ANYTHING', 'HAD', 'BEEN', 'LEFT', 'ABOUT', 'LOBSTER', 'WROTE'] +7902-96592-0013-38: ref=['ONCE', 'OUT', 'OF', 'THAT', 'ROOM', 'HE', 'COULD', 'RAN', 'AND', 'BY', 'DAYLIGHT', 'THE', 'SMUGGLERS', 'DARE', 'NOT', 'HUNT', 'HIM', 'DOWN'] +7902-96592-0013-38: hyp=['ONCE', 'OUT', 'OF', 'THAT', 'ROOM', 'HE', 'COULD', 'RAN', 'AND', 'BY', 'DAYLIGHT', 'THE', 'SMOGG', 'WAS', 'DARED', 'NOT', 'HUNT', 'HIM', 'DOWN'] +7902-96592-0014-39: ref=['OH', 'THOSE', 'BARS', 'HE', 'MENTALLY', 'EXCLAIMED', 'AND', 'HE', 'WAS', 'ADVANCING', 'TOWARD', 'THEM', 'WHEN', 'JUST', 'AS', 'HE', 'DREW', 'NEAR', 'THERE', 'WAS', 'A', 'RUSTLING', 'NOISE', 'UNDER', 'THE', 'WINDOW', 'A', 'COUPLE', 'OF', 'HANDS', 'SEIZED', 'THE', 'BARS', 'THERE', 'WAS', 'A', 'SCRATCHING', 'OF', 'BOOT', 'TOES', 'AGAINST', 'STONE', 'WORK', 'AND', "RAM'S", 'FACE', 'APPEARED', 'TO', 'GAZE', 'INTO', 'THE', 'ROOM', 'BY', 'INTENTION', 'BUT', 'INTO', 'THE', 'ASTONISHED', 'COUNTENANCE', 'OF', 'THE', 'YOUNG', 'MIDSHIPMAN', 'INSTEAD'] +7902-96592-0014-39: hyp=['OH', 'THOSE', 'BARS', 'HE', 'MENTALLY', 'EXCLAIMED', 'AND', 'HE', 'WAS', 'ADVANCING', 'TOWARDS', 'THEM', 'WHEN', 'JUST', 'AS', 'HE', 'DREW', 'NEAR', 'THERE', 'WAS', 'A', 'RUSTLING', 'NOISE', 'UNDER', 'THE', 'WINDOW', 'A', 'COUPLE', 'OF', 'HANDS', 'SEIZED', 'THE', 'BARS', 'THERE', 'WAS', 'A', 'SCRATCHING', 'OF', 'BOOT', 'TOES', 'AGAINST', 'STONE', 'WORK', 'AND', "RAM'S", 'FACE', 'APPEARED', 'TO', 'GAZE', 'INTO', 'THE', 'ROOM', 'BY', 'INTENTION', 'BUT', 'INTO', 'THE', 'ASTONISHED', 'COUNTENANCE', 'OF', 'THE', 'YOUNG', 'MIDSHIPMAN', 'INSTEAD'] +7902-96592-0015-40: ref=['RAM', 'WAS', 'THE', 'FIRST', 'TO', 'RECOVER', 'FROM', 'HIS', 'SURPRISE'] +7902-96592-0015-40: hyp=['ROOM', 'WAS', 'THE', 'FIRST', 'TO', 'RECOVER', 'FROM', 'HIS', 'SURPRISE'] +7902-96592-0016-41: ref=['HULLO', 'HE', 'SAID', 'WHO', 'ARE', 'YOU'] +7902-96592-0016-41: hyp=['HULLO', 'HE', 'SAID', 'WHO', 'ARE', 'YOU'] +7902-96592-0017-42: ref=['GO', 'ROUND', 'AND', 'OPEN', 'THE', 'DOOR', 'I', 'WAS', 'SHUT', 'IN', 'LAST', 'NIGHT', 'BY', 'MISTAKE'] +7902-96592-0017-42: hyp=['GO', 'ROUND', 'AND', 'OPEN', 'THE', 'DOOR', 'I', 'WAS', 'SHUT', 'IN', 'LAST', 'NIGHT', 'BY', 'MISTAKE'] +7902-96592-0018-43: ref=['I', 'SAW', 'YOU', 'LAST', 'NIGHT', 'AND', 'WONDERED', 'WHOSE', 'BOY', 'YOU', 'WAS'] +7902-96592-0018-43: hyp=['I', 'SAW', 'YOU', 'LAST', 'NIGHT', 'AND', 'WONDERED', 'WHOSE', 'BOY', 'YOU', 'WAS'] +7902-96592-0019-44: ref=['IT', 'WAS', 'YOU', 'FATHER', 'KICKED', 'FOR', 'SHIRKING', 'AND', 'MY', 'WELL', 'I', 'HARDLY', 'KNOWED', 'YOU'] +7902-96592-0019-44: hyp=['IT', 'WAS', 'YOUR', 'FATHER', 'KICKED', 'FOR', 'SHIRKING', 'AND', 'MY', 'WELL', 'I', 'HARDLY', 'KNOWED', 'YOU'] +7902-96592-0020-45: ref=['NONSENSE'] +7902-96592-0020-45: hyp=['NONSENSE'] +7902-96592-0021-46: ref=["WON'T", 'DO', 'SAID', 'RAM', 'GRINNING'] +7902-96592-0021-46: hyp=["WON'T", 'DO', 'SAID', 'RAM', 'GRINNIE'] +7902-96592-0022-47: ref=['THINK', 'I', "DON'T", 'KNOW', 'YOU', 'MISTER', 'ORFICER'] +7902-96592-0022-47: hyp=['THINK', 'I', "DON'T", 'KNOW', 'YOU', 'MISTER', 'ORFICER'] +7902-96592-0023-48: ref=["WON'T", 'DO', 'SAID', 'RAM', 'QUICKLY', 'I', 'KNOW', 'YOU'] +7902-96592-0023-48: hyp=["WON'T", 'DO', 'SAID', 'RUM', 'QUICKLY', 'I', 'KNOW', 'YOU'] +7902-96592-0024-49: ref=['BEEN', 'PLAYING', 'THE', 'SPY', "THAT'S", 'WHAT', "YOU'VE", 'BEEN', 'DOING', 'WHO', 'LOCKED', 'YOU', 'IN'] +7902-96592-0024-49: hyp=['THEN', 'PLAYING', 'THE', 'SPY', "THAT'S", 'WHAT', "YOU'VE", 'BEEN', 'DOING', 'WHO', 'LOCKED', 'YOU', 'IN'] +7902-96592-0025-50: ref=['ARCHY', 'STEPPED', 'BACK', 'TO', 'THE', 'DOOR', 'LISTENING', 'BUT', 'THERE', 'WAS', 'NOT', 'A', 'SOUND'] +7902-96592-0025-50: hyp=['ARCHIE', 'STEPPED', 'BACK', 'TO', 'THE', 'DOOR', 'LISTENING', 'BUT', 'THERE', 'WAS', 'NOT', 'A', 'SOUND'] +7902-96592-0026-51: ref=['HE', 'HAS', 'GONE', 'TO', 'GIVE', 'THE', 'ALARM', 'THOUGHT', 'THE', 'PRISONER', 'AND', 'HE', 'LOOKED', 'EXCITEDLY', 'ROUND', 'FOR', 'A', 'WAY', 'OF', 'ESCAPE'] +7902-96592-0026-51: hyp=['HE', 'HAS', 'GONE', 'TO', 'GIVE', 'THE', 'ALARM', 'THOUGHT', 'THE', 'PRISONER', 'AND', 'HE', 'LOOKED', 'EXCITEDLY', 'ROUND', 'FOR', 'A', 'WAY', 'OF', 'ESCAPE'] +7902-96592-0027-52: ref=['NOTHING', 'BUT', 'THE', 'CHIMNEY', 'PRESENTED', 'ITSELF'] +7902-96592-0027-52: hyp=['NOTHING', 'BUT', 'THE', 'CHIMNEY', 'PRESENTED', 'ITSELF'] +7902-96592-0028-53: ref=['A', 'HAPPY', 'INSPIRATION', 'HAD', 'COME', 'AND', 'PLACING', 'ONE', 'HAND', 'UPON', 'HIS', 'BREAST', 'HE', 'THRUST', 'IN', 'THE', 'OTHER', 'GAVE', 'A', 'TUG', 'AND', 'DREW', 'OUT', 'HIS', 'LITTLE', 'CURVED', 'DIRK', 'GLANCED', 'AT', 'THE', 'EDGE', 'RAN', 'TO', 'THE', 'WINDOW', 'AND', 'BEGAN', 'TO', 'CUT', 'AT', 'ONE', 'OF', 'THE', 'BARS', 'LABOUR', 'IN', 'VAIN'] +7902-96592-0028-53: hyp=['A', 'HAPPY', 'INSPIRATION', 'HAD', 'COME', 'AND', 'PLACING', 'ONE', 'HAND', 'UPON', 'HIS', 'CHEST', 'HE', 'THRUST', 'IN', 'THE', 'OTHER', 'GAVE', 'A', 'TUG', 'AND', 'DREW', 'OUT', 'HIS', 'LITTLE', 'CURVED', 'DIRK', 'GLANCED', 'AT', 'THE', 'EDGE', 'RAN', 'TO', 'THE', 'WINDOW', 'AND', 'BEGAN', 'TO', 'CUT', 'AT', 'ONE', 'OF', 'THE', 'BARS', 'LABOR', 'IN', 'VAIN'] +7902-96592-0029-54: ref=['HE', 'DIVIDED', 'THE', 'PAINT', 'AND', 'PRODUCED', 'A', 'FEW', 'SQUEAKS', 'AND', 'GRATING', 'SOUNDS', 'AS', 'HE', 'REALISED', 'THAT', 'THE', 'ATTEMPT', 'WAS', 'MADNESS'] +7902-96592-0029-54: hyp=['HE', 'DIVIDED', 'THE', 'PAIN', 'AND', 'PRODUCED', 'A', 'FEW', 'SQUEAKS', 'IN', 'GRATING', 'SOUNDS', 'AS', 'HE', 'REALIZED', 'THAT', 'THE', 'ATTEMPT', 'WAS', 'MADNESS'] +7902-96592-0030-55: ref=['THE', 'RESULT', 'WAS', 'NOT', 'VERY', 'SATISFACTORY', 'BUT', 'SUFFICIENTLY', 'SO', 'TO', 'MAKE', 'HIM', 'ESSAY', 'THE', 'BAR', 'OF', 'THE', 'WINDOW', 'ONCE', 'MORE', 'PRODUCING', 'A', 'GRATING', 'EAR', 'ASSAILING', 'SOUND', 'AS', 'HE', 'FOUND', 'THAT', 'NOW', 'HE', 'DID', 'MAKE', 'A', 'LITTLE', 'IMPRESSION', 'SO', 'LITTLE', 'THOUGH', 'THAT', 'THE', 'PROBABILITY', 'WAS', 'IF', 'HE', 'KEPT', 'ON', 'WORKING', 'WELL', 'FOR', 'TWENTY', 'FOUR', 'HOURS', 'HE', 'WOULD', 'NOT', 'GET', 'THROUGH'] +7902-96592-0030-55: hyp=['THE', 'RESULT', 'WAS', 'NOT', 'VERY', 'SATISFACTORY', 'BUT', 'SUFFICIENTLY', 'SO', 'TO', 'MAKE', 'HIM', 'ESSAY', 'THE', 'BAR', 'OF', 'THE', 'WINDOW', 'ONCE', 'MORE', 'PRODUCING', 'A', 'GRATING', 'IRRESCELLING', 'SOUND', 'AS', 'HE', 'FOUND', 'THAT', 'NOW', 'HE', 'DID', 'MAKE', 'A', 'LITTLE', 'IMPRESSION', 'SO', 'LITTLE', 'THOUGH', 'THAT', 'THE', 'PROBABILITY', 'WAS', 'IF', 'HE', 'KEPT', 'ON', 'WORKING', 'WELL', 'FOR', 'TWENTY', 'FOUR', 'HOURS', 'HE', 'WOULD', 'NOT', 'GET', 'THROUGH'] +7902-96592-0031-56: ref=['BUT', 'AT', 'THE', 'END', 'OF', 'FIVE', 'MINUTES', 'HE', 'STOPPED', 'AND', 'THRUST', 'BACK', 'THE', 'DIRK', 'INTO', 'ITS', 'SHEATH'] +7902-96592-0031-56: hyp=['BUT', 'AT', 'THE', 'END', 'OF', 'FIVE', 'MINUTES', 'HE', 'STOPPED', 'AND', 'THRUST', 'BACK', 'THE', 'DARK', 'INTO', 'ITS', 'SHEATH'] +7902-96592-0032-57: ref=['NO', 'I', "CAN'T", 'PART', 'WITH', 'THAT', 'HA', 'HA', 'HA', 'LAUGHED', 'THE', 'BOY', 'JEERINGLY'] +7902-96592-0032-57: hyp=['NO', 'I', "CAN'T", 'PART', 'WITH', 'THAT', 'HA', 'HA', 'LAUGHED', 'THE', 'BOY', 'JEERINGLY'] +7902-96592-0033-58: ref=['BUT', "I'LL", 'YES', "I'LL", 'GIVE', 'YOU', 'A', 'GUINEA', 'IF', 'YOU', 'WILL', 'LET', 'ME', 'OUT'] +7902-96592-0033-58: hyp=['BLOW', 'YES', "I'LL", 'GIVE', 'YOU', 'A', 'GUINEA', 'IF', 'YOU', 'WILL', 'LET', 'ME', 'OUT'] +7902-96592-0034-59: ref=['GUINEA', 'SAID', 'THE', 'BOY', 'THINK', "I'D", 'DO', 'IT', 'FOR', 'A', 'GUINEA', 'WELL', 'THEN', 'TWO'] +7902-96592-0034-59: hyp=['GUINEAS', 'OF', 'THE', 'BOY', 'THINK', "I'LL", 'DO', 'IT', 'FOR', 'A', 'GUINEA', 'WELL', 'THEN', 'TOO'] +7902-96592-0035-60: ref=['BE', 'QUICK', "THERE'S", 'A', 'GOOD', 'FELLOW', 'I', 'WANT', 'TO', 'GET', 'AWAY', 'AT', 'ONCE'] +7902-96592-0035-60: hyp=['BE', 'QUICK', "THERE'S", 'A', 'GOOD', 'FELLOW', 'I', 'WANT', 'TO', 'GET', 'AWAY', 'AT', 'ONCE'] +7902-96592-0036-61: ref=['NOT', 'YOU', 'ONLY', 'A', 'SHAM'] +7902-96592-0036-61: hyp=['NOT', 'YOU', 'ONLY', 'A', 'SHAM'] +7902-96592-0037-62: ref=['WHY', 'YOUR', 'CLOTHES', "DON'T", 'FIT', 'YOU', 'AND', 'YOUR', "CAP'S", 'PUT', 'ON', 'ALL', 'SKEW', 'REW'] +7902-96592-0037-62: hyp=['WHY', "YOU'RE", 'CLOTHES', "DON'T", 'FIT', 'YOU', 'AND', 'YOUR', 'CAPS', 'PUT', 'ON', 'ALL', 'SKEERO'] +7902-96592-0038-63: ref=['NEVER', 'MIND', 'ABOUT', 'THAT', 'LET', 'ME', 'OUT', 'OF', 'THIS', 'PLACE'] +7902-96592-0038-63: hyp=['NEVER', 'MIND', 'ABOUT', 'THAT', 'LET', 'ME', 'OUT', 'OF', 'THIS', 'PLACE'] +7902-96592-0039-64: ref=['I', 'TOLD', 'YOU', 'A', 'FISHER', 'BOY', 'CRIED', 'ARCHY', 'IMPATIENTLY', 'BUT', 'TRYING', 'NOT', 'TO', 'OFFEND', 'HIS', 'VISITOR', 'WHO', 'POSSESSED', 'THE', 'POWER', 'OF', 'CONFERRING', 'FREEDOM', 'BY', 'SPEAKING', 'SHARPLY'] +7902-96592-0039-64: hyp=['I', 'TOLD', 'YOU', 'A', 'FISHER', 'BOY', 'CRIED', 'ARCHIE', 'IMPATIENTLY', 'BUT', 'TRYING', 'NOT', 'TO', 'OFFEND', 'HIS', 'VISITOR', 'WHO', 'POSSESSED', 'THE', 'POWER', 'OF', 'CONFERRING', 'FREEDOM', 'BY', 'SPEAKING', 'SHARPLY'] +7902-96592-0040-65: ref=['NOT', 'YOU', 'LOOK', 'LIKE', 'A', 'WILD', 'BEAST', 'IN', 'A', 'CAGE', 'LIKE', 'A', 'MONKEY', 'YOU', 'INSOLENT'] +7902-96592-0040-65: hyp=['NOT', 'YOU', 'LOOK', 'LIKE', 'A', 'WILD', 'BEAST', 'IN', 'A', 'CAGE', 'LIKE', 'A', 'MONKEY', 'YOU', 'INSOLENT'] +7902-96592-0041-66: ref=['ARCHY', 'CHECKED', 'HIMSELF', 'AND', 'THE', 'BOY', 'LAUGHED'] +7902-96592-0041-66: hyp=['ARCHIE', 'CHECKED', 'HIMSELF', 'IN', 'THE', 'BOY', 'LAUGHED'] +7902-96592-0042-67: ref=['IT', 'WAS', 'YOUR', 'TURN', 'YESTERDAY', "IT'S", 'MINE', 'TO', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0042-67: hyp=['IT', 'WAS', 'YOUR', 'TURN', 'YESTERDAY', "IT'S", 'MINE', 'TO', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0043-68: ref=['YOU', 'LAUGHED', 'AND', 'FLEERED', 'AT', 'ME', 'WHEN', 'I', 'WAS', 'ON', 'THE', "CUTTER'S", 'DECK'] +7902-96592-0043-68: hyp=['YOU', 'LAUGHED', 'AND', 'FLEERED', 'AT', 'ME', 'WHEN', 'I', 'WAS', 'ON', 'THE', "CUTTER'S", 'DECK'] +7902-96592-0044-69: ref=['I', 'SAY', 'YOU', 'DO', 'LOOK', 'A', 'RUM', 'UN', 'JUST', 'LIKE', 'A', 'BIG', 'MONKEY', 'IN', 'A', 'SHOW'] +7902-96592-0044-69: hyp=['I', 'SAY', 'YOU', 'DO', 'LOOK', 'LIKE', 'A', 'ROMAN', 'JUST', 'LIKE', 'A', 'BIG', 'MONKEY', 'IN', 'A', 'SHOW'] +7902-96592-0045-70: ref=['RAM', 'SHOWED', 'HIS', 'WHITE', 'TEETH', 'AS', 'HE', 'BURST', 'OUT', 'WITH', 'A', 'LONG', 'LOW', 'FIT', 'OF', 'LAUGHTER'] +7902-96592-0045-70: hyp=['RAM', 'SHOWED', 'HIS', 'WHITE', 'TEETH', 'AS', 'HE', 'BURST', 'OUT', 'WITH', 'A', 'LONG', 'LOW', 'FIT', 'OF', 'LAUGHTER'] +7902-96592-0046-71: ref=['YOU', "ROPE'S", 'END', 'ME', 'HE', 'SAID'] +7902-96592-0046-71: hyp=['EURE', 'HOPES', 'AND', 'ME', 'HE', 'SAID'] +7902-96592-0047-72: ref=['WHY', 'I', 'COULD', 'TIE', 'YOU', 'UP', 'IN', 'A', 'KNOT', 'AND', 'HEAVE', 'YOU', 'OFF', 'THE', 'CLIFF', 'ANY', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0047-72: hyp=['WHY', 'I', 'COULD', 'TIE', 'YOU', 'UP', 'IN', 'A', 'KNOT', 'AND', 'HEAVE', 'YOU', 'OFF', 'THE', 'CLIFF', 'ANY', 'DAY', 'WHAT', 'A', 'GAME'] +7902-96592-0048-73: ref=['BIT', 'OF', 'A', 'MIDDY', 'FED', 'ON', 'SALT', 'TACK', 'AND', 'WEEVILLY', 'BISCUIT', 'TALK', 'OF', 'GIVING', 'ME', "ROPE'S", 'END'] +7902-96592-0048-73: hyp=['BIT', 'OF', 'AMIDDY', 'FED', 'ON', 'A', 'SALT', 'TACK', 'AND', 'WEEBLY', 'BISCUIT', 'TALK', 'OF', 'GIVING', 'ME', 'ROPES', 'AND'] +7902-96592-0049-74: ref=['ONCE', 'MORE', 'WILL', 'YOU', 'COME', 'AND', 'LET', 'ME', 'OUT', 'NO'] +7902-96592-0049-74: hyp=['ONCE', 'MORE', 'WILL', 'YOU', 'COME', 'AND', 'LET', 'ME', 'OUT', 'NO'] +7902-96592-0050-75: ref=['TO', 'HIS', 'ASTONISHMENT', 'THE', 'BOY', 'DID', 'NOT', 'FLINCH', 'BUT', 'THRUST', 'HIS', 'OWN', 'ARMS', 'THROUGH', 'PLACING', 'THEM', 'ABOUT', 'THE', "MIDDY'S", 'WAIST', 'CLENCHING', 'HIS', 'HANDS', 'BEHIND', 'AND', 'UTTERING', 'A', 'SHARP', 'WHISTLE'] +7902-96592-0050-75: hyp=['TO', 'HIS', 'ASTONISHMENT', 'THE', 'BOY', 'DID', 'NOT', 'FLINCH', 'BUT', 'THRUST', 'HIS', 'OWN', 'ARMS', 'THROUGH', 'REPLACING', 'THEM', 'ABOUT', 'THE', "MIDDY'S", 'WAIST', 'CLENCHING', 'HIS', 'HAND', 'BEHIND', 'AND', 'UTTERING', 'A', 'SHARP', 'WHISTLE'] +7902-96594-0000-76: ref=['SEEMED', 'IN', 'GOOD', 'SPIRITS', 'LAST', 'NIGHT', 'MISTER', 'GURR', 'EH'] +7902-96594-0000-76: hyp=['SEEMING', 'EXPERIENCE', 'LAST', 'NIGHT', 'MISTER', 'GURR', 'HEY'] +7902-96594-0001-77: ref=['YES', 'SIR', 'BUT', 'HE', 'MAY', 'TURN', 'UP', 'ON', 'THE', 'CLIFF', 'AT', 'ANY', 'MOMENT'] +7902-96594-0001-77: hyp=['YES', 'SIR', 'BUT', 'HE', 'MAY', 'TURN', 'UPON', 'THE', 'CLIFF', 'AT', 'ANY', 'MOMENT'] +7902-96594-0002-78: ref=['YES', 'MEN', 'QUITE', 'READY', 'YES', 'SIR'] +7902-96594-0002-78: hyp=['YES', 'MEN', 'QUITE', 'READY', 'YES', 'SIR'] +7902-96594-0003-79: ref=["THAT'S", 'RIGHT', 'OF', 'COURSE', 'WELL', 'ARMED'] +7902-96594-0003-79: hyp=["IT'S", 'WRITTEN', 'OF', 'COURSE', 'WELL', 'ARMED'] +7902-96594-0004-80: ref=['SOON', 'AS', 'THE', 'SIGNAL', 'COMES', 'WE', 'SHALL', 'PUSH', 'OFF'] +7902-96594-0004-80: hyp=['SOON', 'AS', 'THE', 'SIGNAL', 'COMES', 'WE', 'SHALL', 'PUSH', 'OFF'] +7902-96594-0005-81: ref=['AWKWARD', 'BIT', 'O', 'COUNTRY', 'SIR', 'SIX', 'MILES', 'ROW', 'BEFORE', 'YOU', 'CAN', 'FIND', 'A', 'PLACE', 'TO', 'LAND'] +7902-96594-0005-81: hyp=['OF', 'HER', 'BITTER', 'COUNTRY', 'SIR', 'SIX', 'MILES', 'ROW', 'FOR', 'YOU', 'CAN', 'FIND', 'A', 'PLACE', 'TO', 'LAND'] +7902-96594-0006-82: ref=['SO', 'SHALL', 'WE', 'YET', 'SIR'] +7902-96594-0006-82: hyp=['SO', 'SHALL', 'WE', 'YET', 'SIR'] +7902-96594-0007-83: ref=['YOU', "DON'T", 'THINK', 'MISTER', 'GURR', 'THAT', 'THEY', 'WOULD', 'DARE', 'TO', 'INJURE', 'HIM', 'IF', 'HE', 'WAS', 'SO', 'UNLUCKY', 'AS', 'TO', 'BE', 'CAUGHT'] +7902-96594-0007-83: hyp=['YOU', "DON'T", 'THINK', 'MISTER', 'GIRL', 'THAT', 'THEY', 'WOULD', 'DARE', 'TO', 'INJURE', 'HIM', 'IF', 'HE', 'WAS', 'SO', 'UNLUCKY', 'AS', 'TO', 'BE', 'CAUGHT'] +7902-96594-0008-84: ref=['WELL', 'SIR', 'SAID', 'THE', 'MASTER', 'HESITATING', 'SMUGGLERS', 'ARE', 'SMUGGLERS'] +7902-96594-0008-84: hyp=['WELL', 'SIR', 'SAID', 'THE', 'MASTER', 'HESITATING', 'SMUGGLERS', 'OR', 'SMUGGLERS'] +7902-96594-0009-85: ref=['CERTAINLY', 'SIR', 'SMUGGLERS', 'ARE', 'SMUGGLERS', 'INDEED'] +7902-96594-0009-85: hyp=['CERTAINLY', 'SIR', 'SMUGGLERS', 'ARE', 'SMUGGLERS', 'INDEED'] +7902-96594-0010-86: ref=['BEG', 'PARDON', 'SIR', "DIDN'T", 'MEAN', 'ANY', 'HARM'] +7902-96594-0010-86: hyp=['THEY', 'PARDON', 'SIR', "DIDN'T", 'MEAN', 'ANY', 'HARM'] +7902-96594-0011-87: ref=["I'M", 'GETTING', 'VERY', 'ANXIOUS', 'ABOUT', 'MISTER', 'RAYSTOKE', 'START', 'AT', 'ONCE', 'SIR'] +7902-96594-0011-87: hyp=['AND', 'GETTING', 'VERY', 'ANXIOUS', 'ABOUT', 'MISTER', 'RAYSTROKE', 'START', 'AT', 'ONCE', 'SIR'] +7902-96594-0012-88: ref=['NO', 'WAIT', 'ANOTHER', 'HALF', 'HOUR'] +7902-96594-0012-88: hyp=['NO', 'WHERE', 'ANOTHER', 'AND', 'HALF', 'HOUR'] +7902-96594-0013-89: ref=['VERY', 'ILL', 'ADVISED', 'THING', 'TO', 'DO'] +7902-96594-0013-89: hyp=['VERY', 'ILL', 'ADVICE', 'THING', 'TO', 'DO'] +7902-96594-0014-90: ref=['THEN', 'I', 'MUST', 'REQUEST', 'THAT', 'YOU', 'WILL', 'NOT', 'MAKE', 'IT', 'AGAIN', 'VERY', 'TRUE'] +7902-96594-0014-90: hyp=['THAT', 'I', 'MUST', 'REQUEST', 'THAT', 'YOU', 'WILL', 'NOT', 'MAKE', 'IT', 'AGAIN', 'VERY', 'TRUE'] +7902-96594-0015-91: ref=['AWK', 'WARD', 'MISTER', 'GURR', 'AWKWARD'] +7902-96594-0015-91: hyp=['AWKWARD', 'MISTER', 'GARR', 'AWKWARD'] +7902-96594-0016-92: ref=['YES', 'SIR', 'OF', 'COURSE'] +7902-96594-0016-92: hyp=['YES', 'SIR', 'OF', 'COURSE'] +7902-96594-0017-93: ref=['SAY', 'AWK', 'WARD', 'IN', 'FUTURE', 'NOT', "AWK'ARD"] +7902-96594-0017-93: hyp=['SAY', 'AWKWARD', 'IN', 'THE', 'FUTURE', 'NOT', 'UPWARD'] +7902-96594-0018-94: ref=['I', 'MEAN', 'ALL', 'ALONE', 'BY', 'MYSELF', 'SIR'] +7902-96594-0018-94: hyp=['I', 'MEAN', 'ALL', 'ALONE', 'BY', 'MYSELF', 'SIR'] +7902-96594-0019-95: ref=['WHAT', 'FOR', 'THERE', "AREN'T", 'A', 'PUBLIC', 'HOUSE', 'FOR', 'TEN', 'MILES', "DIDN'T", 'MEAN', 'THAT'] +7902-96594-0019-95: hyp=['WHAT', 'FOR', 'THERE', "AREN'T", 'A', 'PUBLIC', 'HOUSE', 'FOR', 'TEN', 'MILES', "DIDN'T", 'MEAN', 'THAT'] +7902-96594-0020-96: ref=['THEN', 'WHAT', 'DID', 'YOU', 'MEAN', 'SPEAK', 'OUT', 'AND', "DON'T", 'DO', 'THE', 'DOUBLE', 'SHUFFLE', 'ALL', 'OVER', 'MY', 'CLEAN', 'DECK', 'NO', 'SIR'] +7902-96594-0020-96: hyp=['THEN', 'WHAT', 'DID', 'JULIA', 'SPEAK', 'OUT', 'AND', "DON'T", 'DO', 'THE', 'DOUBLE', 'SHUFFLE', 'ALL', 'OVER', 'MY', 'CLEAN', 'DECK', 'NO', 'SIR'] +7902-96594-0021-97: ref=['HOPPING', 'ABOUT', 'LIKE', 'A', 'CAT', 'ON', 'HOT', 'BRICKS'] +7902-96594-0021-97: hyp=['HAVING', 'ABOUT', 'THE', 'GUQUET', 'ON', 'HOT', 'BRICKS'] +7902-96594-0022-98: ref=['NOW', 'THEN', 'WHY', 'DO', 'YOU', 'WANT', 'TO', 'GO', 'ASHORE'] +7902-96594-0022-98: hyp=['NOW', 'THEN', 'WHY', 'DO', 'YOU', 'WANT', 'TO', 'GO', 'ASHORE'] +7902-96594-0023-99: ref=['BEG', 'PARDON', "DIDN'T", 'MEAN', 'NOWT', 'SIR', 'SAID', 'THE', 'SAILOR', 'TOUCHING', 'HIS', 'FORELOCK'] +7902-96594-0023-99: hyp=['THEY', 'PARDON', "DIDN'T", 'MEAN', 'THAT', 'SIR', 'SAID', 'THE', 'SAILOR', 'TOUCHING', 'HIS', 'FORELOCK'] +7902-96594-0024-100: ref=['YES', 'SIR', 'SAID', 'THE', 'MAN', 'HUMBLY', 'SHALL', 'I', 'GO', 'AT', 'ONCE', 'SIR'] +7902-96594-0024-100: hyp=['YES', 'SIR', 'SAID', 'THE', 'MADAMELY', 'SHALL', 'I', 'GO', 'AT', 'ONCE', 'SIR'] +7902-96594-0025-101: ref=['NO', 'WAIT'] +7902-96594-0025-101: hyp=['NO', 'WAIT'] +7902-96594-0026-102: ref=['KEEP', 'A', 'SHARP', 'LOOK', 'OUT', 'ON', 'THE', 'CLIFF', 'TO', 'SEE', 'IF', 'MISTER', 'RAYSTOKE', 'IS', 'MAKING', 'SIGNALS', 'FOR', 'A', 'BOAT'] +7902-96594-0026-102: hyp=['HE', 'WAS', 'SHARP', 'LOOK', 'OUT', 'ON', 'THE', 'CLIFF', 'AS', 'EVEN', 'MISTER', 'RAE', 'STROKE', 'IS', 'MAKING', 'SIGNALS', 'FOR', 'A', 'BOAT'] +7902-96594-0027-103: ref=['HE', 'SWUNG', 'ROUND', 'WALKED', 'AFT', 'AND', 'BEGAN', 'SWEEPING', 'THE', 'SHORE', 'AGAIN', 'WITH', 'HIS', 'GLASS', 'WHILE', 'THE', 'MASTER', 'AND', 'DICK', 'EXCHANGED', 'GLANCES', 'WHICH', 'MEANT', 'A', 'GREAT', 'DEAL'] +7902-96594-0027-103: hyp=['HE', 'SWUNG', 'ROUND', 'WALKED', 'AFT', 'AND', 'BEGAN', 'SWEEPING', 'ASHORE', 'AGAIN', 'WITH', 'HIS', 'GLASS', 'WHILE', 'THE', 'MASTER', 'AND', 'DICK', 'EXCHANGED', 'GLANCES', 'WHICH', 'MEANT', 'A', 'GREAT', 'DEAL'] +7902-96594-0028-104: ref=['AT', 'LAST', 'THE', 'LITTLE', 'LIEUTENANT', 'COULD', 'BEAR', 'THE', 'ANXIETY', 'NO', 'LONGER'] +7902-96594-0028-104: hyp=['AT', 'LAST', 'THE', 'LITTLE', 'TANNIC', 'COULD', 'BEAR', 'THE', 'ANXIETY', 'NO', 'LONGER'] +7902-96594-0029-105: ref=['PIPE', 'AWAY', 'THE', 'MEN', 'TO', 'THAT', 'BOAT', 'THERE', 'HE', 'SAID', 'AND', 'AS', 'THE', 'CREW', 'SPRANG', 'IN'] +7902-96594-0029-105: hyp=['PAPER', 'WEAR', 'THEM', 'INTO', 'THAT', 'BOAT', 'THERE', 'HE', 'SAID', 'AND', 'AS', 'THE', 'CREW', 'SPRANG', 'IN'] +7902-96594-0030-106: ref=['NOW', 'MISTER', 'GURR', 'HE', 'SAID', "I'M", 'ONLY', 'GOING', 'TO', 'SAY', 'ONE', 'THING', 'TO', 'YOU', 'IN', 'THE', 'WAY', 'OF', 'INSTRUCTIONS', 'YES', 'SIR'] +7902-96594-0030-106: hyp=['NOW', 'MISTER', 'GURG', 'HE', 'SAID', "I'M", 'ONLY', 'GOING', 'TO', 'SAY', 'ONE', 'THING', 'TO', 'YOU', 'IN', 'THE', 'WAY', 'OF', 'INSTRUCTIONS', 'YES', 'SIR'] +7902-96594-0031-107: ref=['BEG', 'PARDON', 'SIR', 'SAID', 'THE', 'MASTER', 'DEPRECATINGLY'] +7902-96594-0031-107: hyp=['BEG', 'PARDON', 'SIR', 'SAID', 'THE', 'MASTER', 'DEPRECATINGLY'] +7902-96594-0032-108: ref=['STEADY', 'MY', 'LADS', 'STEADY', 'CRIED', 'THE', 'MASTER', 'KEEP', 'STROKE', 'AND', 'THEN', 'HE', 'BEGAN', 'TO', 'MAKE', 'PLANS', 'AS', 'TO', 'HIS', 'FIRST', 'PROCEEDINGS', 'ON', 'GETTING', 'ASHORE'] +7902-96594-0032-108: hyp=['STEADY', 'MY', 'LAD', 'STEADY', 'CRIED', 'THE', 'MASTER', 'KEEP', 'STROKE', 'AND', 'THEN', 'HE', 'BEGAN', 'TO', 'MAKE', 'PLANS', 'AS', 'TO', 'HIS', 'FIRST', 'PROCEEDINGS', "I'M", 'GETTING', 'ASHORE'] +7902-96595-0000-109: ref=['SAY', 'MESTER', 'GURR', 'SAID', 'DICK', 'AFTER', 'ONE', 'OF', 'THESE', 'SEARCHES', 'HE', "WOULDN'T", 'RUN', 'AWAY', 'WHAT'] +7902-96595-0000-109: hyp=['SAY', 'MISTER', 'GIRK', 'SAID', 'DICK', 'AFTER', 'ONE', 'OF', 'THESE', 'SEARCHES', 'HE', "WOULDN'T", 'RUN', 'AWAY', 'WHAT'] +7902-96595-0001-110: ref=['MISTER', 'RAYSTOKE', 'SIR', "DON'T", 'BE', 'A', 'FOOL'] +7902-96595-0001-110: hyp=['MISTER', 'RAYSTOKE', 'SIR', "DON'T", 'BE', 'A', 'FOOL'] +7902-96595-0002-111: ref=['WHAT', 'CHUCKED', 'HIM', 'OFF', 'YONDER'] +7902-96595-0002-111: hyp=['WHAT', 'TECHTAMORPH', 'YONDER'] +7902-96595-0003-112: ref=['GURR', 'GLANCED', 'ROUND', 'TO', 'SEE', 'IF', 'THE', 'MEN', 'WERE', 'LOOKING', 'AND', 'THEN', 'SAID', 'RATHER', 'HUSKILY', 'BUT', 'KINDLY'] +7902-96595-0003-112: hyp=['GER', 'GLANCED', 'ROUND', 'TO', 'SEE', 'IF', 'THE', 'MEN', 'WERE', 'LOOKING', 'AND', 'THEN', 'SAID', 'WHETHER', 'HUSKILY', 'BE', 'KINDLY'] +7902-96595-0004-113: ref=['AH', 'EJACULATED', 'DICK', 'SADLY'] +7902-96595-0004-113: hyp=['AH', 'EJACULATED', 'DICK', 'SADLY'] +7902-96595-0005-114: ref=['SAY', 'MESTER', 'GURR', 'SIR', 'WHICH', 'THANKFUL', 'I', 'AM', 'TO', 'YOU', 'FOR', 'SPEAKING', 'SO', 'BUT', 'YOU', "DON'T", 'REALLY', 'THINK', 'AS', 'HE', 'HAS', 'COME', 'TO', 'HARM'] +7902-96595-0005-114: hyp=['SAY', 'MISTER', 'GURSER', 'WHICH', 'THANKFUL', 'I', 'AM', 'FOR', 'YOU', 'FOR', 'SPEAKING', 'SO', 'BUT', 'YOU', "DON'T", 'REALLY', 'THINK', 'AS', 'HE', 'HAS', 'COME', 'TO', 'HARM'] +7902-96595-0006-115: ref=['I', 'HOPE', 'NOT', 'DICK', 'I', 'HOPE', 'NOT', 'BUT', 'SMUGGLERS', "DON'T", 'STAND', 'AT', 'ANYTHING', 'SOMETIMES'] +7902-96595-0006-115: hyp=['I', 'HOPE', 'NOT', 'DICK', 'I', 'HOPE', 'NOT', 'BUT', 'SMOKE', 'WAS', "DON'T", 'STAND', 'AT', 'ANYTHING', 'SOMETIMES'] +7902-96595-0007-116: ref=['I', 'DO', 'ASSURE', 'YOU', "THERE'S", 'NOTHING', 'HERE', 'BUT', 'WHAT', 'YOU', 'MAY', 'SEE'] +7902-96595-0007-116: hyp=['I', 'DO', 'ASSURE', 'YOU', "THERE'S", 'NOTHING', 'HERE', 'BUT', 'WHAT', 'YOU', 'MAY', 'SEE'] +7902-96595-0008-117: ref=['IF', "YOU'D", 'LET', 'ME', 'FINISH', "YOU'D", 'KNOW', 'SAID', 'GURR', 'GRUFFLY', 'ONE', 'OF', 'OUR', 'BOYS', 'IS', 'MISSING', 'SEEN', 'HIM', 'UP', 'HERE'] +7902-96595-0008-117: hyp=['IF', 'YOU', 'LET', 'ME', 'FINISH', "YOU'D", 'KNOW', 'SAID', 'GURG', 'ROUGHLY', 'ONE', 'OF', 'OUR', 'BOYS', 'IS', 'MISSING', 'SEEN', 'EM', 'UP', 'HERE'] +7902-96595-0009-118: ref=['BOY', 'BOUT', 'SEVENTEEN', 'WITH', 'A', 'RED', 'CAP', 'NO', 'SIR', 'INDEED', "I'VE", 'NOT'] +7902-96595-0009-118: hyp=['BOY', 'ABOUT', 'SEVENTEEN', 'WITH', 'A', 'RED', 'CAP', 'NO', 'SIR', 'INDEED', 'OF', 'NONE'] +7902-96595-0010-119: ref=["DON'T", 'KNOW', 'AS', 'HE', 'HAS', 'BEEN', 'SEEN', 'ABOUT', 'HERE', 'DO', 'YOU', 'SAID', 'GURR', 'LOOKING', 'AT', 'HER', 'SEARCHINGLY', 'NO', 'SIR'] +7902-96595-0010-119: hyp=["DON'T", 'KNOW', 'AS', 'HE', 'HAS', 'BEEN', 'SEEN', 'ABOUT', 'HERE', 'DO', 'YOU', 'SAID', 'GIRL', 'LOOKING', 'AT', 'HER', 'SEARCHINGLY', 'NO', 'SIR'] +7902-96595-0011-120: ref=['IF', 'SHE', 'KNEW', 'EVIL', 'HAD', 'COME', 'TO', 'THE', 'POOR', 'LAD', 'HER', 'FACE', 'WOULD', 'TELL', 'TALES', 'LIKE', 'PRINT'] +7902-96595-0011-120: hyp=['IF', 'SHE', 'KNEW', 'EVIL', 'HAD', 'COME', 'TO', 'THE', 'POOR', 'LAD', 'HER', 'FACE', 'WOULD', 'TELL', 'TALES', 'LIKE', 'PRINT'] +7902-96595-0012-121: ref=['I', 'SAID', 'A', 'LAD', 'BOUT', 'SEVENTEEN', 'IN', 'A', 'RED', 'CAP', 'LIKE', 'YOURS', 'SAID', 'GURR', 'VERY', 'SHORTLY'] +7902-96595-0012-121: hyp=['I', 'SAID', 'A', 'LAD', 'BOUT', 'SEVENTEEN', 'AND', 'A', 'RED', 'CATHOLIC', 'YOURS', 'SAID', 'GREW', 'VERY', 'SHORTLY'] +7902-96595-0013-122: ref=['THE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'AND', 'STARED', 'AS', 'IF', 'HE', "DIDN'T", 'HALF', 'UNDERSTAND', 'THE', 'DRIFT', 'OF', 'WHAT', 'WAS', 'SAID'] +7902-96595-0013-122: hyp=['THE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'AND', 'STARED', 'AS', 'IF', 'HE', "DIDN'T", 'HALF', 'UNDERSTAND', 'THE', 'DRIFT', 'OF', 'ALL', 'THIS', 'SAID'] +7902-96595-0014-123: ref=['HERE', 'MY', 'LAD', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0014-123: hyp=['HERE', 'MILAD', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0015-124: ref=['EH', 'I', 'SAY', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0015-124: hyp=['THEY', 'I', 'SAY', "WHERE'S", 'YOUR', 'MASTER'] +7902-96595-0016-125: ref=['GURR', 'TURNED', 'AWAY', 'IMPATIENTLY', 'AGAIN', 'AND', 'SIGNING', 'TO', 'HIS', 'MEN', 'TO', 'FOLLOW', 'THEY', 'ALL', 'BEGAN', 'TO', 'TRAMP', 'UP', 'THE', 'STEEP', 'TRACK', 'LEADING', 'TOWARD', 'THE', 'HOZE', 'WITH', 'THE', 'RABBITS', 'SCUTTLING', 'AWAY', 'AMONG', 'THE', 'FURZE', 'AND', 'SHOWING', 'THEIR', 'WHITE', 'COTTONY', 'TAILS', 'FOR', 'A', 'MOMENT', 'AS', 'THEY', 'DARTED', 'DOWN', 'INTO', 'THEIR', 'HOLES'] +7902-96595-0016-125: hyp=['GERT', 'TURNED', 'AWAY', 'IMPATIENTLY', 'AGAIN', 'AND', 'SIGNING', 'TO', 'HIS', 'MEN', 'TO', 'FOLLOW', 'THEY', 'ALL', 'BEGAN', 'TO', 'TRAMP', 'UP', 'THE', 'STEEP', 'TRACK', 'LEADING', 'TOWARD', 'THE', 'HOSE', 'WITH', 'THE', 'RABBIT', 'SCUTTLING', 'AWAY', 'AMONG', 'THE', 'FIRS', 'AND', 'SHOWING', 'THEIR', 'WHITE', 'COTTONY', 'TAILS', 'FOR', 'A', 'MOMENT', 'AS', 'THEY', 'DARTED', 'DOWN', 'INTO', 'THEIR', 'HOLES'] +7902-96595-0017-126: ref=['I', 'DUNNO', 'MUTTERED', 'DICK', 'AND', 'A', 'MAN', "CAN'T", 'BE', 'SURE'] +7902-96595-0017-126: hyp=['I', 'DUNNO', 'MUTTERED', 'DICK', 'AND', 'A', 'MEN', "CAN'T", 'BE', 'SURE'] +7902-96595-0018-127: ref=['GURR', 'SALUTED', 'AND', 'STATED', 'HIS', 'BUSINESS', 'WHILE', 'THE', 'BARONET', 'WHO', 'HAD', 'TURNED', 'SALLOWER', 'AND', 'MORE', 'CAREWORN', 'THAN', 'HIS', 'LOT', 'DREW', 'A', 'BREATH', 'FULL', 'OF', 'RELIEF', 'ONE', 'OF', 'YOUR', 'SHIP', 'BOYS', 'HE', 'SAID'] +7902-96595-0018-127: hyp=['DUR', 'SALUTED', 'INSTEAD', 'OF', 'HIS', 'BUSINESS', 'WHILE', 'THE', 'BARONET', 'WHO', 'HAD', 'TURNED', 'SALARY', 'MORE', 'CARE', 'MORE', 'THAN', 'HIS', 'LOT', 'DREW', 'A', 'BREATH', 'OF', 'FULL', 'OF', 'RELIEF', 'ONE', 'OF', 'YOUR', 'VOYS', 'HE', 'SAID'] +7902-96595-0019-128: ref=['A', 'LAD', 'LOOKING', 'LIKE', 'A', 'COMMON', 'SAILOR', 'AND', 'WEARING', 'A', 'RED', 'CAP', 'NO', 'SAID', 'SIR', 'RISDON'] +7902-96595-0019-128: hyp=['A', 'LAD', 'LOOKING', 'LIKE', 'A', 'COMMON', 'SAILOR', 'AND', 'WEARING', 'A', 'RED', 'CAP', 'NO', 'SAID', 'SIR', 'RISDON'] +7902-96595-0020-129: ref=['I', 'HAVE', 'SEEN', 'NO', 'ONE', 'ANSWERING', 'TO', 'THE', 'DESCRIPTION', 'HERE'] +7902-96595-0020-129: hyp=['I', 'HAVE', 'SEEN', 'NO', 'ONE', 'ANSWERING', 'TO', 'THE', 'DESCRIPTION', 'HERE'] +7902-96595-0021-130: ref=['BEG', 'PARDON', 'SIR', 'BUT', 'CAN', 'YOU', 'AS', 'A', 'GENTLEMAN', 'ASSURE', 'ME', 'THAT', 'HE', 'IS', 'NOT', 'HERE', 'CERTAINLY', 'SAID', 'SIR', 'RISDON'] +7902-96595-0021-130: hyp=['BIG', 'PARTICER', 'BECAUSE', 'YOU', 'AS', 'GENTLEMEN', 'ASSURE', 'ME', 'THAT', 'HE', 'IS', 'NOT', 'HERE', 'CERTAINLY', 'SAID', 'SIR', 'RISDON'] +7902-96595-0022-131: ref=['SURELY', 'CRIED', 'SIR', 'RISDON', 'EXCITEDLY'] +7902-96595-0022-131: hyp=['SURELY', 'CRIED', 'SIR', 'RISDON', 'EXCITEDLY'] +7902-96595-0023-132: ref=['SIR', 'RISDON', 'WAS', 'SILENT'] +7902-96595-0023-132: hyp=['SIR', 'RICHARD', 'WAS', 'SILENT'] +7902-96595-0024-133: ref=['LADY', 'GRAEME', 'LOOKED', 'GHASTLY'] +7902-96595-0024-133: hyp=['LADY', 'GRAHAM', 'LOOKED', 'GHASTLY'] +7902-96595-0025-134: ref=['YOU', 'DO', 'NOT', 'KNOW', 'NO'] +7902-96595-0025-134: hyp=['YOU', 'DO', 'NOT', 'KNOW', 'NO'] +7975-280057-0000-1008: ref=['THESE', 'HATREDS', 'WERE', 'SOON', 'TO', 'MAKE', 'TROUBLE', 'FOR', 'ME', 'OF', 'WHICH', 'I', 'HAD', 'NEVER', 'DREAMED'] +7975-280057-0000-1008: hyp=['THESE', 'HATREDS', 'WERE', 'SOON', 'TO', 'MAKE', 'TROUBLE', 'FOR', 'ME', 'OF', 'WHICH', 'I', 'HAD', 'NEVER', 'DREAMED'] +7975-280057-0001-1009: ref=['HENRY', 'WASHINGTON', 'YOUNGER', 'MY', 'FATHER', 'REPRESENTED', 'JACKSON', 'COUNTY', 'THREE', 'TIMES', 'IN', 'THE', 'LEGISLATURE', 'AND', 'WAS', 'ALSO', 'JUDGE', 'OF', 'THE', 'COUNTY', 'COURT'] +7975-280057-0001-1009: hyp=['HENRY', 'WASHINGTON', 'YOUNGER', 'MY', 'FATHER', 'REPRESENTED', 'JACKSON', 'COUNTY', 'THREE', 'TIMES', 'IN', 'THE', 'LEGISLATURE', 'AND', 'WAS', 'ALSO', 'A', 'JUDGE', 'OF', 'THE', 'COUNTY', 'COURT'] +7975-280057-0002-1010: ref=['MY', 'MOTHER', 'WHO', 'WAS', 'BURSHEBA', 'FRISTOE', 'OF', 'INDEPENDENCE', 'WAS', 'THE', 'DAUGHTER', 'OF', 'RICHARD', 'FRISTOE', 'WHO', 'FOUGHT', 'UNDER', 'GENERAL', 'ANDREW', 'JACKSON', 'AT', 'NEW', 'ORLEANS', 'JACKSON', 'COUNTY', 'HAVING', 'BEEN', 'SO', 'NAMED', 'AT', 'MY', 'GRANDFATHER', "FRISTOE'S", 'INSISTENCE'] +7975-280057-0002-1010: hyp=['MY', 'MOTHER', 'WHO', 'WAS', 'PERCEIVER', 'FOR', 'STOVE', 'OF', 'INDEPENDENCE', 'WAS', 'A', 'DAUGHTER', 'OF', 'RICHARD', 'CRISTO', 'WHO', 'FOUGHT', 'UNDER', 'GENERAL', 'ANDREW', 'JACKSON', 'THAT', 'NEW', 'ORLEANS', 'JACKSON', 'COUNTY', 'HAVING', 'BEEN', 'SO', 'NAMED', 'IN', 'MY', 'GRANDFATHER', 'FRISTOWS', 'INSISTENCE'] +7975-280057-0003-1011: ref=['I', 'CANNOT', 'REMEMBER', 'WHEN', 'I', 'DID', 'NOT', 'KNOW', 'HOW', 'TO', 'SHOOT'] +7975-280057-0003-1011: hyp=['I', 'CANNOT', 'REMEMBER', 'WHEN', 'I', 'DID', 'NOT', 'KNOW', 'HOW', 'TO', 'SHOOT'] +7975-280057-0004-1012: ref=['MY', 'BROTHER', 'JAMES', 'WAS', 'BORN', 'JANUARY', 'FIFTEENTH', 'EIGHTEEN', 'FORTY', 'EIGHT', 'JOHN', 'IN', 'EIGHTEEN', 'FIFTY', 'ONE', 'AND', 'ROBERT', 'IN', 'DECEMBER', 'EIGHTEEN', 'FIFTY', 'THREE'] +7975-280057-0004-1012: hyp=['MY', 'BROTHER', 'JAMES', 'WAS', 'BORN', 'JANUARY', 'FIFTEENTH', 'EIGHTEEN', 'FORTY', 'EIGHT', 'JOHN', 'IN', 'EIGHTEEN', 'FIFTY', 'ONE', 'AND', 'ROBERT', 'IN', 'DECEMBER', 'EIGHTEEN', 'FIFTY', 'THREE'] +7975-280057-0005-1013: ref=['MY', 'ELDEST', 'BROTHER', 'RICHARD', 'DIED', 'IN', 'EIGHTEEN', 'SIXTY'] +7975-280057-0005-1013: hyp=['MY', 'ELDEST', 'BROTHER', 'RICHARD', 'DIED', 'IN', 'EIGHTEEN', 'SIXTY'] +7975-280057-0006-1014: ref=['MY', 'FATHER', 'WAS', 'IN', 'THE', 'EMPLOY', 'OF', 'THE', 'UNITED', 'STATES', 'GOVERNMENT', 'AND', 'HAD', 'THE', 'MAIL', 'CONTRACT', 'FOR', 'FIVE', 'HUNDRED', 'MILES'] +7975-280057-0006-1014: hyp=['MY', 'FATHER', 'WAS', 'IN', 'THE', 'EMPLOY', 'OF', 'THE', 'UNITED', 'STATES', 'GOVERNMENT', 'AND', 'HAD', 'THE', 'MALE', 'CONTRACT', 'FOR', 'FIVE', 'HUNDRED', 'MILES'] +7975-280057-0007-1015: ref=['HE', 'HAD', 'STARTED', 'BACK', 'TO', 'HARRISONVILLE', 'IN', 'A', 'BUGGY', 'BUT', 'WAS', 'WAYLAID', 'ONE', 'MILE', 'SOUTH', 'OF', 'WESTPORT', 'A', 'SUBURB', 'OF', 'KANSAS', 'CITY', 'AND', 'BRUTALLY', 'MURDERED', 'FALLING', 'OUT', 'OF', 'HIS', 'BUGGY', 'INTO', 'THE', 'ROAD', 'WITH', 'THREE', 'MORTAL', 'BULLET', 'WOUNDS'] +7975-280057-0007-1015: hyp=['HE', 'HAD', 'STARTED', 'BACK', 'TO', 'HARRISONVILLE', 'IN', 'A', 'BUGGY', 'BUT', 'WAS', 'WAYLAID', 'ONE', 'MILE', 'SOUTH', 'OF', 'WESTBURT', 'A', 'SUBURB', 'OF', 'KANSA', 'CITY', 'AND', 'BRUTALLY', 'MURDERED', 'FALLING', 'OUT', 'OF', 'HIS', 'BUGGY', 'INTO', 'THE', 'ROAD', 'WITH', 'THREE', 'MORTAL', 'BULLET', 'WOUNDS'] +7975-280057-0008-1016: ref=['MISSUS', 'WASHINGTON', 'WELLS', 'AND', 'HER', 'SON', 'SAMUEL', 'ON', 'THE', 'ROAD', 'HOME', 'FROM', 'KANSAS', 'CITY', 'TO', "LEE'S", 'SUMMIT', 'RECOGNIZED', 'THE', 'BODY', 'AS', 'THAT', 'OF', 'MY', 'FATHER'] +7975-280057-0008-1016: hyp=['MISS', 'WASHINGTON', 'WALES', 'AND', 'HER', 'SON', 'SAMUEL', 'ON', 'THE', 'ROAD', 'HOME', 'FROM', 'KANSAS', 'CITY', 'TO', "LEE'S", 'SUMMIT', 'RECOGNIZED', 'THE', 'BODY', 'AS', 'THAT', 'OF', 'MY', 'FATHER'] +7975-280057-0009-1017: ref=['MISSUS', 'WELLS', 'STAYED', 'TO', 'GUARD', 'THE', 'REMAINS', 'WHILE', 'HER', 'SON', 'CARRIED', 'THE', 'NEWS', 'OF', 'THE', 'MURDER', 'TO', 'COLONEL', 'PEABODY', 'OF', 'THE', 'FEDERAL', 'COMMAND', 'WHO', 'WAS', 'THEN', 'IN', 'CAMP', 'AT', 'KANSAS', 'CITY'] +7975-280057-0009-1017: hyp=['MISS', 'WELL', 'STAYED', 'TO', 'GUARD', 'THE', 'REMAINS', 'WHILE', 'HER', 'SOON', 'CARRIED', 'THE', 'NEWS', 'OF', 'THE', 'MURDER', 'TO', 'COLONEL', 'PEABODY', 'OF', 'THE', 'FEDERAL', 'COMMAND', 'WHO', 'WAS', 'THEN', 'ENCAMP', 'AT', 'KANS', 'OF', 'CITY'] +7975-280057-0010-1018: ref=['MISSUS', 'MC', 'CORKLE', 'JUMPED', 'FROM', 'THE', 'WINDOW', 'OF', 'THE', 'HOUSE', 'AND', 'ESCAPED'] +7975-280057-0010-1018: hyp=['MISS', 'MC', 'CORKEL', 'JUMPED', 'FROM', 'THE', 'WINDOW', 'OF', 'THE', 'HOUSE', 'AND', 'ESCAPED'] +7975-280057-0011-1019: ref=['AS', 'THE', 'RAIDERS', 'LEFT', 'ONE', 'OF', 'THEM', 'SHOUTED'] +7975-280057-0011-1019: hyp=['AS', 'THE', 'RAIDERS', 'LIVED', 'ONE', 'OF', 'THEM', 'SHOUTED'] +7975-280057-0012-1020: ref=['NOW', 'OLD', 'LADY', 'CALL', 'ON', 'YOUR', 'PROTECTORS', 'WHY', "DON'T", 'YOU', 'CALL', 'ON', 'COLE', 'YOUNGER', 'NOW'] +7975-280057-0012-1020: hyp=['NOW', 'LADY', 'CALL', 'ON', 'YOUR', 'PROTECTORS', 'WHY', "DON'T", 'YOU', 'CALL', 'ON', 'CO', 'YOUNGER', 'NOW'] +7975-280057-0013-1021: ref=['EVERY', 'KNOT', 'REPRESENTED', 'A', 'HUMAN', 'LIFE'] +7975-280057-0013-1021: hyp=['EVERY', 'KNOT', 'REPRESENTED', 'A', 'HUMAN', 'LIFE'] +7975-280057-0014-1022: ref=['BUT', 'SHE', 'FAILED', 'TO', 'FIND', 'THE', 'COMFORT', 'SHE', 'SOUGHT', 'FOR', 'ANNOYANCES', 'CONTINUED', 'IN', 'A', 'MORE', 'AGGRAVATED', 'FORM'] +7975-280057-0014-1022: hyp=['BUT', 'SHE', 'FAILED', 'TO', 'FIND', 'THE', 'COMFORT', 'SHE', 'SOUGHT', 'FOR', 'ANNOYANCES', 'CONTINUED', 'IN', 'A', 'MORE', 'AGGRAVATED', 'FORM'] +7975-280057-0015-1023: ref=['TWO', 'MONTHS', 'AFTER', 'THIS', 'INCIDENT', 'THE', 'SAME', 'PERSECUTORS', 'AGAIN', 'ENTERED', 'OUR', 'HOME', 'IN', 'THE', 'DEAD', 'OF', 'THE', 'NIGHT', 'AND', 'AT', 'THE', 'POINT', 'OF', 'A', 'PISTOL', 'TRIED', 'TO', 'FORCE', 'MY', 'MOTHER', 'TO', 'SET', 'FIRE', 'TO', 'HER', 'OWN', 'HOME'] +7975-280057-0015-1023: hyp=['TWO', 'MONTHS', 'AFTER', 'THIS', 'INCIDENT', 'THE', 'SAME', 'PERSECUTORS', 'AGAIN', 'ENTERED', 'OUR', 'HOME', 'IN', 'THE', 'DAY', 'OF', 'THE', 'NIGHT', 'AND', 'AT', 'THE', 'POINT', 'OF', 'A', 'PISTOL', 'TRIED', 'TO', 'FORCE', 'MY', 'MOTHER', 'TO', 'SET', 'FIRE', 'TO', 'HER', 'OWN', 'HOME'] +7975-280057-0016-1024: ref=['I', 'HAVE', 'ALWAYS', 'FELT', 'THAT', 'THE', 'EXPOSURE', 'TO', 'WHICH', 'SHE', 'WAS', 'SUBJECTED', 'ON', 'THIS', 'CRUEL', 'JOURNEY', 'TOO', 'HARD', 'EVEN', 'FOR', 'A', 'MAN', 'TO', 'TAKE', 'WAS', 'THE', 'DIRECT', 'CAUSE', 'OF', 'HER', 'DEATH'] +7975-280057-0016-1024: hyp=['I', 'HAVE', 'ALWAYS', 'FELT', 'THAT', 'THE', 'EXPOSURE', 'TO', 'WHICH', 'SHE', 'WAS', 'SUBJECTED', 'ON', 'THIS', 'CRUEL', 'JOURNEY', 'TOO', 'HARD', 'EVEN', 'FOR', 'A', 'MAN', 'TO', 'TAKE', 'WAS', 'A', 'DIRECT', 'CAUSE', 'OF', 'HER', 'DEATH'] +7975-280057-0017-1025: ref=['FROM', 'HARRISONVILLE', 'SHE', 'WENT', 'TO', 'WAVERLY', 'WHERE', 'SHE', 'WAS', 'HOUNDED', 'CONTINUALLY'] +7975-280057-0017-1025: hyp=['FROM', 'HARRISON', 'BILL', 'SHE', 'WENT', 'TO', 'WAVERLEY', 'WHERE', 'SHE', 'WAS', 'HOUNDY', 'CONTINUALLY'] +7975-280057-0018-1026: ref=['ONE', 'OF', 'THE', 'CONDITIONS', 'UPON', 'WHICH', 'HER', 'LIFE', 'WAS', 'SPARED', 'WAS', 'THAT', 'SHE', 'WOULD', 'REPORT', 'AT', 'LEXINGTON', 'WEEKLY'] +7975-280057-0018-1026: hyp=['ONE', 'OF', 'THE', 'CONDITIONS', 'UPON', 'WHICH', 'HER', 'LIFE', 'WAS', 'SPARED', 'WAS', 'THAT', 'SHE', 'WOULD', 'REPORT', 'AT', 'LESSINGTON', 'WEAKLY'] +7975-280057-0019-1027: ref=['ONE', 'OF', 'MY', 'OLD', 'SCHOOL', 'TEACHERS', 'WHOM', 'I', 'HAVE', 'NEVER', 'SEEN', 'SINCE', 'THE', 'SPRING', 'OR', 'SUMMER', 'OF', 'EIGHTEEN', 'SIXTY', 'TWO', 'IS', 'STEPHEN', 'B', 'ELKINS', 'SENATOR', 'FROM', 'WEST', 'VIRGINIA'] +7975-280057-0019-1027: hyp=['ONE', 'OF', 'MY', 'OLD', 'SCHOOL', 'TEACHERS', 'WHOM', 'I', 'HAVE', 'NEVER', 'SEEN', 'SINCE', 'THE', 'SPRING', 'OF', 'SUMMER', 'OF', 'EIGHTEEN', 'SIXTY', 'TWO', 'IS', 'STEPHEN', 'B', 'ELKINS', 'SENATOR', 'FROM', 'WEST', 'VIRGINIA'] +7975-280057-0020-1028: ref=['WHEN', 'I', 'WAS', 'TAKEN', 'PRISONER', 'I', 'EXPECTED', 'TO', 'BE', 'SHOT', 'WITHOUT', 'CEREMONY'] +7975-280057-0020-1028: hyp=['WHEN', 'I', 'WAS', 'TAKEN', 'PRISONER', 'I', 'EXPECTED', 'TO', 'BE', 'SHOT', 'WITHOUT', 'CEREMONY'] +7975-280063-0000-1058: ref=['WE', 'TOOK', 'THE', 'OATH', 'PERHAPS', 'THREE', 'HUNDRED', 'OF', 'US', 'DOWN', 'ON', 'LUTHER', "MASON'S", 'FARM', 'A', 'FEW', 'MILES', 'FROM', 'WHERE', 'I', 'NOW', 'WRITE', 'WHERE', 'COLONEL', 'HAYS', 'HAD', 'ENCAMPED', 'AFTER', 'INDEPENDENCE'] +7975-280063-0000-1058: hyp=['WE', 'TOOK', 'THE', 'OATH', 'PERHAPS', 'THREE', 'HUNDRED', 'OF', 'US', 'DOWN', 'ON', 'LUTHER', "MASON'S", 'FARM', 'A', 'FEW', 'MILES', 'FROM', 'WHERE', 'I', 'NOW', 'RIGHT', 'WHERE', 'COLONEL', 'HAYES', 'HAD', 'ENCAMPED', 'AFTER', 'INDEPENDENCE'] +7975-280063-0001-1059: ref=['BOONE', 'MUIR', 'AND', 'MYSELF', 'MET', 'COFFEE', 'AND', 'THE', 'REST', 'BELOW', 'ROSE', 'HILL', 'ON', 'GRAND', 'RIVER'] +7975-280063-0001-1059: hyp=['WHOM', "YOU'RE", 'AND', 'MYSELF', 'MAKE', 'COUGHING', 'AND', 'THE', 'REST', 'BELOW', 'ROSE', 'HILL', 'ON', 'GRAND', 'RIVER'] +7975-280063-0002-1060: ref=['ACCORDINGLY', 'I', 'WAS', 'SHORTLY', 'AWAKENED', 'TO', 'ACCOMPANY', 'HIM', 'TO', 'LONE', 'JACK', 'WHERE', 'HE', 'WOULD', 'PERSONALLY', 'MAKE', 'KNOWN', 'THE', 'SITUATION', 'TO', 'THE', 'OTHER', 'COLONELS'] +7975-280063-0002-1060: hyp=['ACCORDINGLY', 'I', 'WAS', 'SHORTLY', 'AWAKENED', 'TO', 'ACCOMPANY', 'HIM', 'THE', 'LONG', 'JACK', 'WHERE', 'HE', 'WOULD', 'PERSONALLY', 'MAKE', 'KNOWN', 'THE', 'SITUATION', 'TO', 'THE', 'OTHER', 'COLONELS'] +7975-280063-0003-1061: ref=['FOSTER', 'HAD', 'NEARLY', 'ONE', 'THOUSAND', 'CAVALRYMEN', 'AND', 'TWO', 'PIECES', 'OF', "RABB'S", 'INDIANA', 'BATTERY', 'THAT', 'HAD', 'ALREADY', 'MADE', 'FOR', 'ITSELF', 'A', 'NAME', 'FOR', 'HARD', 'FIGHTING'] +7975-280063-0003-1061: hyp=['FOSTER', 'HAD', 'NEARLY', 'ONE', 'THOUSAND', 'CAVERNMENT', 'AND', 'TWO', 'PIECES', 'OF', 'RABS', 'INDIANA', 'BATTERY', 'THAT', 'HAD', 'ALREADY', 'MADE', 'FOR', 'ITSELF', 'A', 'NAME', 'FOR', 'HARD', 'FIGHTING'] +7975-280063-0004-1062: ref=['COME', 'IN', 'COLONEL', 'HAYS', 'EXCLAIMED', 'COLONEL', 'COCKRELL'] +7975-280063-0004-1062: hyp=['COME', 'IN', 'COLONEL', 'HAYES', 'EXCLAIMED', 'COLONEL', 'COCKROL'] +7975-280063-0005-1063: ref=['I', 'THINK', "HE'LL", 'BE', 'RATHER', 'TOUGH', 'MEAT', 'FOR', 'BREAKFAST', 'I', 'REPLIED', 'HE', 'MIGHT', 'BE', 'ALL', 'RIGHT', 'FOR', 'DINNER'] +7975-280063-0005-1063: hyp=['I', 'THINK', "HE'LL", 'BE', 'RATHER', 'TO', 'HAVE', 'ME', 'FOR', 'BREAKFAST', 'I', 'REPLIED', 'HE', 'MIGHT', 'BE', 'ALL', 'RIPER', 'DINNER'] +7975-280063-0006-1064: ref=['JACKMAN', 'WITH', 'A', 'PARTY', 'OF', 'THIRTY', 'SEASONED', 'MEN', 'CHARGED', 'THE', 'INDIANA', 'GUNS', 'AND', 'CAPTURED', 'THEM', 'BUT', 'MAJOR', 'FOSTER', 'LED', 'A', 'GALLANT', 'CHARGE', 'AGAINST', 'THE', 'INVADERS', 'AND', 'RECAPTURED', 'THE', 'PIECES'] +7975-280063-0006-1064: hyp=['JACKMEN', 'WITH', 'A', 'PARTY', 'OF', 'THIRTY', 'SEASONED', 'MEN', 'CHARGED', 'THE', 'INDIANA', 'GUNS', 'AND', 'CAPTURED', 'THEM', 'BUT', 'MAJOR', 'FOXTER', 'LIT', 'A', 'GALLANT', 'CHARGE', 'AGAINST', 'THE', 'INVADERS', 'AND', 'RECAPTURED', 'THE', 'PIECES'] +7975-280063-0007-1065: ref=['WE', 'WERE', 'OUT', 'OF', 'AMMUNITION', 'AND', 'WERE', 'HELPLESS', 'HAD', 'THE', 'FIGHT', 'BEEN', 'PRESSED'] +7975-280063-0007-1065: hyp=['WE', 'WERE', 'OUT', 'OF', 'AMMUNITION', 'AND', 'WERE', 'HELPLESS', 'HAD', 'THE', 'FIGHT', 'BEEN', 'PRESSED'] +7975-280063-0008-1066: ref=['THEY', 'DID', 'MARK', 'MY', 'CLOTHES', 'IN', 'ONE', 'OR', 'TWO', 'PLACES', 'HOWEVER'] +7975-280063-0008-1066: hyp=['THEY', 'DID', 'MARK', 'MY', 'CLOTHES', 'IN', 'ONE', 'OR', 'TWO', 'PLACES', 'HOWEVER'] +7975-280063-0009-1067: ref=['MAJOR', 'FOSTER', 'IN', 'A', 'LETTER', 'TO', 'JUDGE', 'GEORGE', 'M', 'BENNETT', 'OF', 'MINNEAPOLIS', 'SAID'] +7975-280063-0009-1067: hyp=['MEASURE', 'FOSTER', 'IN', 'A', 'LETTER', 'TO', 'JOE', 'GEORGIUM', 'BENNET', 'OF', 'MINNEAPOLIS', 'SAID'] +7975-280063-0010-1068: ref=['I', 'WAS', 'TOLD', 'BY', 'SOME', 'OF', 'OUR', 'MEN', 'FROM', 'THE', 'WESTERN', 'BORDER', 'OF', 'THE', 'STATE', 'THAT', 'THEY', 'RECOGNIZED', 'THE', 'DARING', 'YOUNG', 'RIDER', 'AS', 'COLE', 'YOUNGER'] +7975-280063-0010-1068: hyp=['I', 'WAS', 'TOLD', 'BY', 'SOME', 'OF', 'OUR', 'MEN', 'FROM', 'THE', 'WESTERN', 'BORDER', 'OF', 'THE', 'STATE', 'THAT', 'THEY', 'RECOGNIZED', 'A', 'DARING', 'OWN', "WRITER'S", 'COAL', 'YOUNGER'] +7975-280063-0011-1069: ref=['ABOUT', 'NINE', 'THIRTY', 'A', 'M', 'I', 'WAS', 'SHOT', 'DOWN'] +7975-280063-0011-1069: hyp=['ABOUT', 'NINE', 'THIRTY', 'A', 'M', 'I', 'WAS', 'SHOT', 'DOWN'] +7975-280063-0012-1070: ref=['THE', 'WOUNDED', 'OF', 'BOTH', 'FORCES', 'WERE', 'GATHERED', 'UP', 'AND', 'WERE', 'PLACED', 'IN', 'HOUSES'] +7975-280063-0012-1070: hyp=['THE', 'WOUNDED', 'OF', 'BOTH', 'FORCES', 'WERE', 'GATHERED', 'UP', 'AND', 'WERE', 'PLACED', 'IN', 'HOUSES'] +7975-280076-0000-1029: ref=['ALTHOUGH', 'EVERY', 'BOOK', 'PURPORTING', 'TO', 'NARRATE', 'THE', 'LIVES', 'OF', 'THE', 'YOUNGER', 'BROTHERS', 'HAS', 'TOLD', 'OF', 'THE', 'LIBERTY', 'ROBBERY', 'AND', 'IMPLIED', 'THAT', 'WE', 'HAD', 'A', 'PART', 'IN', 'IT', 'THE', 'YOUNGERS', 'WERE', 'NOT', 'SUSPECTED', 'AT', 'THAT', 'TIME', 'NOR', 'FOR', 'A', 'LONG', 'TIME', 'AFTERWARD'] +7975-280076-0000-1029: hyp=['ALTHOUGH', 'EVERY', 'BOOK', 'REPORTING', 'TO', 'THEIR', 'EIGHTH', 'LIVES', 'OF', 'THE', 'YOUNGER', 'BROTHERS', 'HAS', 'TOLD', 'THAT', 'THE', 'LIBERTY', 'ROBBERY', 'AND', 'IMPLIED', 'THAT', 'WE', 'HAD', 'A', 'PART', 'IN', 'IT', 'THE', 'YOUNGERS', 'WERE', 'NOT', 'SUSPECTED', 'AT', 'THAT', 'TIME', 'NOR', 'PROLONG', 'TIME', 'AFTERWARD'] +7975-280076-0001-1030: ref=['IT', 'WAS', 'CLAIMED', 'BY', 'PEOPLE', 'OF', 'LIBERTY', 'THAT', 'THEY', 'POSITIVELY', 'RECOGNIZED', 'AMONG', 'THE', 'ROBBERS', 'OLL', 'SHEPHERD', 'RED', 'MONKERS', 'AND', 'BUD', 'PENCE', 'WHO', 'HAD', 'SEEN', 'SERVICE', 'WITH', 'QUANTRELL'] +7975-280076-0001-1030: hyp=['IT', 'WAS', 'CLAIMED', 'BY', 'PEOPLE', 'OF', 'LIBERTY', 'THAT', 'THEY', 'POSIT', 'TILL', 'WE', 'RECOGNIZED', 'AMONG', 'THE', 'ROBBERS', 'ALL', 'SHEPARD', 'RED', 'MOCKERS', 'AND', 'BUD', 'PANTS', 'WHO', 'HAD', 'SEEN', 'SERVICE', 'WITH', 'QUANTRAIL'] +7975-280076-0002-1031: ref=['THIS', 'RAID', 'WAS', 'ACCOMPANIED', 'BY', 'BLOODSHED', 'JUDGE', 'MC', 'LAIN', 'THE', 'BANKER', 'BEING', 'SHOT', 'THOUGH', 'NOT', 'FATALLY'] +7975-280076-0002-1031: hyp=['THIS', 'RAY', 'WAS', 'ACCOMPANIED', 'BY', 'BLOCHHEAD', 'JUDGE', 'MC', 'LANE', 'THE', 'BANKER', 'BEING', 'SHOT', 'THOUGH', 'NOT', 'FATALLY'] +7975-280076-0003-1032: ref=['NO', 'WARRANT', 'WAS', 'ISSUED', 'FOR', 'THE', 'YOUNGERS', 'BUT', 'SUBSEQUENT', 'HISTORIANS', 'HAVE', 'INFERENTIALLY', 'AT', 'LEAST', 'ACCUSED', 'US', 'OF', 'TAKING', 'PART', 'BUT', 'AS', 'I', 'SAID', 'BEFORE', 'THERE', 'IS', 'NO', 'TRUTH', 'IN', 'THE', 'ACCUSATION'] +7975-280076-0003-1032: hyp=['THOUGH', 'WARRANT', 'WAS', 'ISSUED', 'FOR', 'THE', 'YOUNGERS', 'BUT', 'SUBSEQUENT', 'HISTORIANS', 'HAVE', 'INFERENTIALLY', 'AT', 'LEAST', 'ACCUSED', 'US', 'OF', 'TAKING', 'PART', 'BUT', 'AS', 'I', 'SAID', 'BEFORE', 'THERE', 'IS', 'NO', 'TRUTH', 'IN', 'THE', 'ACCUSATION'] +7975-280076-0004-1033: ref=['JUNE', 'THIRD', 'EIGHTEEN', 'SEVENTY', 'ONE', 'OBOCOCK', 'BROTHERS', 'BANK', 'AT', 'CORYDON', 'IOWA', 'WAS', 'ROBBED', 'OF', 'FORTY', 'THOUSAND', 'DOLLARS', 'BY', 'SEVEN', 'MEN', 'IN', 'BROAD', 'DAYLIGHT'] +7975-280076-0004-1033: hyp=['JUNE', 'THIRD', 'EIGHTEEN', 'SEVENTY', 'ONE', 'OBEK', "BROTHER'S", 'BANK', 'AT', 'CROYD', 'AND', 'IOWA', 'WAS', 'ROBBED', 'OF', 'FORTY', 'THOUSAND', 'DOLLARS', 'BY', 'SEVEN', 'MEN', 'IN', 'BROAD', 'DAYLIGHT'] +7975-280076-0005-1034: ref=['IT', 'WAS', 'CHARGED', 'THAT', 'ARTHUR', 'MC', 'COY', 'OR', 'A', 'C', 'MC', 'COY', 'AND', 'MYSELF', 'HAD', 'BEEN', 'PARTICIPANTS', 'IN', 'THE', "GAD'S", 'HILL', 'AFFAIR', 'AND', 'THE', 'TWO', 'STAGE', 'ROBBERIES'] +7975-280076-0005-1034: hyp=['IT', 'WAS', 'CHARGE', 'THAT', 'AWFUL', 'MAC', 'COY', 'OR', 'A', 'SEMICA', 'MYSELF', 'HAD', 'BEEN', 'PARTICIPANTS', 'IN', 'THE', "GAD'S", 'HILL', 'AFFAIR', 'AND', 'THE', 'TWO', 'STAGE', 'ROBBERIES'] +7975-280076-0006-1035: ref=['THE', 'PARTS', 'OF', 'THIS', 'LETTER', 'NOW', 'RELEVANT', 'ARE', 'AS', 'FOLLOWS'] +7975-280076-0006-1035: hyp=['THE', 'PARTS', 'OF', 'THIS', 'LETTER', 'NOW', 'ELEVANT', 'ARE', 'AS', 'FOLLOWS'] +7975-280076-0007-1036: ref=['YOU', 'MAY', 'USE', 'THIS', 'LETTER', 'IN', 'YOUR', 'OWN', 'WAY'] +7975-280076-0007-1036: hyp=['YOU', 'MAY', 'USE', 'THIS', 'LETTER', 'IN', 'YOUR', 'OWN', 'WAY'] +7975-280076-0008-1037: ref=['I', 'WILL', 'GIVE', 'YOU', 'THIS', 'OUTLINE', 'AND', 'SKETCH', 'OF', 'MY', 'WHEREABOUTS', 'AND', 'ACTIONS', 'AT', 'THE', 'TIME', 'OF', 'CERTAIN', 'ROBBERIES', 'WITH', 'WHICH', 'I', 'AM', 'CHARGED'] +7975-280076-0008-1037: hyp=['I', 'WILL', 'GIVE', 'YOU', 'THIS', 'OUTLINE', 'AND', 'SKETCH', 'OF', 'MY', 'WHEREABOUTS', 'AND', 'ACTIONS', 'AT', 'THE', 'TIME', 'OF', 'CERTAIN', 'ROBBERS', 'WITH', 'WHICH', 'I', 'AM', 'CHARGED'] +7975-280076-0009-1038: ref=['AT', 'THE', 'TIME', 'OF', 'THE', 'GALLATIN', 'BANK', 'ROBBERY', 'I', 'WAS', 'GATHERING', 'CATTLE', 'IN', 'ELLIS', 'COUNTY', 'TEXAS', 'CATTLE', 'THAT', 'I', 'BOUGHT', 'FROM', 'PLEAS', 'TAYLOR', 'AND', 'RECTOR'] +7975-280076-0009-1038: hyp=["IT'S", 'THE', 'TIME', 'OF', 'THE', 'YELLED', 'AND', 'BANK', 'ROBBERY', 'I', 'WAS', 'GATHERING', 'CATTLE', 'AND', 'ELLIS', 'COUNTY', 'TEXAS', 'KETTLET', 'ABOUT', 'FROM', 'PLAYERS', 'TAILOR', 'AND', 'RECTOR'] +7975-280076-0010-1039: ref=['THIS', 'CAN', 'BE', 'PROVED', 'BY', 'BOTH', 'OF', 'THEM', 'ALSO', 'BY', 'SHERIFF', 'BARKLEY', 'AND', 'FIFTY', 'OTHER', 'RESPECTABLE', 'MEN', 'OF', 'THAT', 'COUNTY'] +7975-280076-0010-1039: hyp=['THIS', 'CAN', 'BE', 'PROVED', 'BY', 'BOTH', 'OF', 'THEM', 'ALSO', 'BY', 'SHARE', 'PARKLEY', 'AND', 'FIFTY', 'OTHER', 'RESPECTABLE', 'MEN', 'OF', 'THAT', 'COUNTY'] +7975-280076-0011-1040: ref=['I', 'BROUGHT', 'THE', 'CATTLE', 'TO', 'KANSAS', 'THAT', 'FALL', 'AND', 'REMAINED', 'IN', 'SAINT', 'CLAIR', 'COUNTY', 'UNTIL', 'FEBRUARY'] +7975-280076-0011-1040: hyp=['ABRUPT', 'THE', 'CATTLE', 'THE', 'KANSAS', 'SET', 'FALL', 'AND', 'REMAINED', 'IN', 'SAINT', 'CLAIR', 'COUNTY', 'UNTIL', 'FEBRUARY'] +7975-280076-0012-1041: ref=['I', 'THEN', 'WENT', 'TO', 'ARKANSAS', 'AND', 'RETURNED', 'TO', 'SAINT', 'CLAIR', 'COUNTY', 'ABOUT', 'THE', 'FIRST', 'OF', 'MAY'] +7975-280076-0012-1041: hyp=['AND', 'THEN', 'WENT', 'TO', 'OUR', 'CONCERN', 'RETURNED', 'TO', 'SAINT', 'CLAIR', 'COUNTY', 'ABOUT', 'THE', 'FIRST', 'OF', 'MAY'] +7975-280076-0013-1042: ref=['I', 'WENT', 'TO', 'KANSAS', 'WHERE', 'OUR', 'CATTLE', 'WERE', 'IN', 'WOODSON', 'COUNTY', 'AT', 'COLONEL', "RIDGE'S"] +7975-280076-0013-1042: hyp=['AND', 'WENT', 'TO', 'KANSAS', 'WHERE', 'A', 'KETTLE', 'WERE', 'IN', 'WOODS', 'AND', 'COUNTY', 'AT', 'COLONEL', 'RIDGES'] +7975-280076-0014-1043: ref=['DURING', 'THE', 'SUMMER', 'I', 'WAS', 'EITHER', 'IN', 'SAINT', 'CLAIR', 'JACKSON', 'OR', 'KANSAS', 'BUT', 'AS', 'THERE', 'WAS', 'NO', 'ROBBERY', 'COMMITTED', 'THAT', 'SUMMER', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'WHERE', 'I', 'WAS'] +7975-280076-0014-1043: hyp=['DURING', 'THE', 'SUMMER', 'I', 'WAS', 'EITHER', 'IN', 'SAINT', 'CLAIR', 'OR', "JACK'S", 'UNDER', 'KANSAS', 'BUT', 'AS', 'THERE', 'WAS', 'NO', 'ROBBERY', 'COMMITTED', 'THAT', 'SUMMER', 'IT', 'MAKES', 'NO', 'DIFFERENCE', 'WHERE', 'I', 'WAS'] +7975-280076-0015-1044: ref=['I', 'WENT', 'THROUGH', 'INDEPENDENCE', 'AND', 'FROM', 'THERE', 'TO', 'ACE', "WEBB'S"] +7975-280076-0015-1044: hyp=['AND', 'WENT', 'THROUGH', 'INDEPENDENCE', 'AND', 'FROM', 'THERE', 'TO', 'ACE', 'WHIPS'] +7975-280076-0016-1045: ref=['THERE', 'I', 'TOOK', 'DINNER', 'AND', 'THEN', 'WENT', 'TO', 'DOCTOR', 'L', 'W', "TWYMAN'S"] +7975-280076-0016-1045: hyp=['THERE', 'I', 'TOOK', 'DINNER', 'AND', 'THEN', 'WENT', 'TO', 'DOCTOR', 'L', 'W', 'TWINS'] +7975-280076-0017-1046: ref=['OUR', 'BUSINESS', 'THERE', 'WAS', 'TO', 'SEE', 'E', 'P', 'WEST', 'HE', 'WAS', 'NOT', 'AT', 'HOME', 'BUT', 'THE', 'FAMILY', 'WILL', 'REMEMBER', 'THAT', 'WE', 'WERE', 'THERE'] +7975-280076-0017-1046: hyp=['OUR', 'BUSINESS', 'THERE', 'WAS', 'TO', 'SEE', 'E', 'WEST', 'HE', 'WAS', 'NOT', 'AT', 'HOME', 'BUT', 'THE', 'FAMILY', 'WILL', 'REMEMBER', 'THAT', 'WE', 'WERE', 'THERE'] +7975-280076-0018-1047: ref=['WE', 'CROSSED', 'ON', 'THE', 'BRIDGE', 'STAYED', 'IN', 'THE', 'CITY', 'ALL', 'NIGHT', 'AND', 'THE', 'NEXT', 'MORNING', 'WE', 'RODE', 'UP', 'THROUGH', 'THE', 'CITY'] +7975-280076-0018-1047: hyp=['WE', 'CROSSED', 'ON', 'THE', 'BRIDGE', 'STATING', 'THE', 'CITY', 'ALL', 'NIGHT', 'AND', 'THE', 'NEXT', 'MORNING', 'WE', 'RODE', 'UP', 'TO', 'THE', 'CITY'] +7975-280076-0019-1048: ref=['I', 'MET', 'SEVERAL', 'OF', 'MY', 'FRIENDS', 'AMONG', 'THEM', 'WAS', 'BOB', 'HUDSPETH'] +7975-280076-0019-1048: hyp=['AMID', 'SEVERAL', 'OF', 'MY', 'FRIENDS', 'AMONG', 'THEM', 'WAS', 'BOB', 'HUSBITH'] +7975-280076-0020-1049: ref=['WE', 'WERE', 'NOT', 'ON', 'GOOD', 'TERMS', 'AT', 'THE', 'TIME', 'NOR', 'HAVE', 'WE', 'BEEN', 'FOR', 'SEVERAL', 'YEARS'] +7975-280076-0020-1049: hyp=['WE', 'WERE', 'NOT', 'ON', 'THE', 'TERMS', 'AT', 'THE', 'TIME', 'NOR', 'HAVE', 'WE', 'BEEN', 'FOR', 'SEVERAL', 'YEARS'] +7975-280076-0021-1050: ref=['POOR', 'JOHN', 'HE', 'HAS', 'BEEN', 'HUNTED', 'DOWN', 'AND', 'SHOT', 'LIKE', 'A', 'WILD', 'BEAST', 'AND', 'NEVER', 'WAS', 'A', 'BOY', 'MORE', 'INNOCENT'] +7975-280076-0021-1050: hyp=['POOR', 'JOHN', 'HE', 'HAS', 'BEEN', 'HUNTED', 'DOWN', 'AND', 'SHOT', 'LIKE', 'A', 'WILD', 'BEAST', 'AND', 'NEVER', 'WAS', 'A', 'BOY', 'MORE', 'INNOCENT'] +7975-280076-0022-1051: ref=['DOCTOR', 'L', 'LEWIS', 'WAS', 'HIS', 'PHYSICIAN'] +7975-280076-0022-1051: hyp=['DOCTOR', 'L', 'LOOSE', 'WAS', 'HIS', 'PHYSICIAN'] +7975-280076-0023-1052: ref=['THERE', 'WERE', 'FIFTY', 'OR', 'A', 'HUNDRED', 'PERSONS', 'THERE', 'WHO', 'WILL', 'TESTIFY', 'IN', 'ANY', 'COURT', 'THAT', 'JOHN', 'AND', 'I', 'WERE', 'THERE'] +7975-280076-0023-1052: hyp=['THERE', 'WERE', 'FIFTY', 'OR', 'A', 'HUNDRED', 'PERSONS', 'THERE', 'WHO', 'WILL', 'TESTIFY', 'IN', 'ANY', 'COURT', 'THAT', 'JOHN', 'AND', 'I', 'WERE', 'THERE'] +7975-280076-0024-1053: ref=['HELVIN', 'FICKLE', 'AND', 'WIFE', 'OF', 'GREENTON', 'VALLEY', 'WERE', 'ATTENDING', 'THE', 'SPRINGS', 'AT', 'THAT', 'TIME', 'AND', 'EITHER', 'OF', 'THEM', 'WILL', 'TESTIFY', 'TO', 'THE', 'ABOVE', 'FOR', 'JOHN', 'AND', 'I', 'SAT', 'IN', 'FRONT', 'OF', 'MISTER', 'SMITH', 'WHILE', 'HE', 'WAS', 'PREACHING', 'AND', 'WAS', 'IN', 'HIS', 'COMPANY', 'FOR', 'A', 'FEW', 'MOMENTS', 'TOGETHER', 'WITH', 'HIS', 'WIFE', 'AND', 'MISTER', 'AND', 'MISSUS', 'FICKLE', 'AFTER', 'SERVICE'] +7975-280076-0024-1053: hyp=['HELD', 'AND', 'FICKLE', 'AND', 'WIFE', 'OF', 'GREENTON', 'VALLEY', 'WERE', 'ATTENDING', 'THE', 'SPRINGS', 'AT', 'THAT', 'TIME', 'AND', 'EITHER', 'OF', 'THEM', 'WILL', 'TESTIFY', 'TO', 'THE', 'ABOVE', 'FOR', 'JOHN', 'AND', 'I', 'SET', 'IN', 'FRONT', 'OF', 'MISTER', 'SMITH', 'WHILE', 'HE', 'WAS', 'PREACHING', 'AND', 'WAS', 'IN', 'HIS', 'COMPANY', 'FOR', 'A', 'FEW', 'MOMENTS', 'TOGETHER', 'WITH', 'HIS', 'WIFE', 'AND', 'MISTER', 'AND', 'MISS', 'FICKLE', 'AFTER', 'THE', 'SERVICE'] +7975-280076-0025-1054: ref=['ABOUT', 'THE', 'LAST', 'OF', 'DECEMBER', 'EIGHTEEN', 'SEVENTY', 'THREE', 'I', 'ARRIVED', 'IN', 'CARROLL', 'PARISH', 'LOUISIANA'] +7975-280076-0025-1054: hyp=['ABOUT', 'THE', 'LAST', 'OF', 'DECEMBER', 'EIGHTEEN', 'SEVENTY', 'THREE', 'I', 'ARRIVED', 'IN', 'CAROL', 'PARRISH', 'LOUISIANA'] +7975-280076-0026-1055: ref=['I', 'STAYED', 'THERE', 'UNTIL', 'THE', 'EIGHTH', 'OF', 'FEBRUARY', 'EIGHTEEN', 'SEVENTY', 'FOUR'] +7975-280076-0026-1055: hyp=['I', 'STAYED', 'THERE', 'UNTIL', 'THE', 'EIGHTH', 'OF', 'FEBRUARY', 'EIGHTEEN', 'SEVENTY', 'FOUR'] +7975-280076-0027-1056: ref=['I', 'HAD', 'NOT', 'HEARD', 'OF', 'THAT', 'WHEN', 'I', 'WROTE', 'THE', 'LETTER', 'OF', 'EIGHTEEN', 'SEVENTY', 'FOUR', 'AND', 'TO', 'CORRECT', 'ANY', 'MISAPPREHENSION', 'THAT', 'MIGHT', 'BE', 'CREATED', 'BY', 'OMITTING', 'IT', 'I', 'WILL', 'SAY', 'THAT', 'AT', 'THAT', 'TIME', 'I', 'WAS', 'AT', 'NEOSHO', 'KANSAS', 'WITH', 'A', 'DROVE', 'OF', 'CATTLE', 'WHICH', 'I', 'SOLD', 'TO', 'MAJOR', 'RAY'] +7975-280076-0027-1056: hyp=['I', 'HAD', 'NOT', 'HEARD', 'OF', 'THAT', 'WHEN', 'I', 'WROTE', 'THE', 'LETTER', 'OF', 'EIGHTEEN', 'SEVENTY', 'FOUR', 'AND', 'TO', 'CORRECT', 'ANY', 'MISAPPREHENSION', 'THAT', 'MIGHT', 'BE', 'CREATED', 'BY', 'OMITTING', 'IT', 'I', 'WILL', 'SAY', 'THAT', 'AT', 'THE', 'TIME', 'I', 'WAS', 'AT', 'NEOSHIL', 'OF', 'KANSAS', 'WITH', 'A', 'DROVE', 'OF', 'CATTLE', 'WHICH', 'I', 'SOLD', 'TO', 'MAJOR', 'RAY'] +7975-280076-0028-1057: ref=['IT', 'WAS', 'IMMEDIATELY', 'FOLLOWING', 'THE', 'ROCK', 'ISLAND', 'ROBBERY', 'AT', 'ADAIR', 'IOWA', 'THAT', 'THERE', 'FIRST', 'APPEARED', 'A', 'DELIBERATE', 'ENLISTMENT', 'OF', 'SOME', 'LOCAL', 'PAPERS', 'IN', 'MISSOURI', 'TO', 'CONNECT', 'US', 'WITH', 'THIS', 'ROBBERY'] +7975-280076-0028-1057: hyp=['IT', 'WAS', 'IMMEDIATELY', 'FOLLOWING', 'THE', 'ROCK', 'ISLAND', 'ROBBERY', 'AT', 'EIGHT', 'AIR', 'IOWA', 'THAT', 'THEIR', 'FIRST', 'APPEARED', 'A', 'DELIBERATE', 'ENLISTMENT', 'OF', 'SOME', 'LOCAL', 'PAPERS', 'IN', 'MISSOURI', 'TO', 'CONNECT', 'US', 'WITH', 'THIS', 'ROBBERY'] +7975-280084-0000-1090: ref=['I', 'URGED', 'ON', 'THE', 'BOYS', 'THAT', 'WHATEVER', 'HAPPENED', 'WE', 'SHOULD', 'NOT', 'SHOOT', 'ANY', 'ONE'] +7975-280084-0000-1090: hyp=['I', 'URGED', 'ON', 'THE', 'BOYS', 'AT', 'WHATEVER', 'HAPPEN', 'WE', 'SHOULD', 'NOT', 'SHOOT', 'ANY', 'ONE'] +7975-280084-0001-1091: ref=['WHEN', 'MILLER', 'AND', 'I', 'CROSSED', 'THE', 'BRIDGE', 'THE', 'THREE', 'WERE', 'ON', 'SOME', 'DRY', 'GOODS', 'BOXES', 'AT', 'THE', 'CORNER', 'NEAR', 'THE', 'BANK', 'AND', 'AS', 'SOON', 'AS', 'THEY', 'SAW', 'US', 'WENT', 'RIGHT', 'INTO', 'THE', 'BANK', 'INSTEAD', 'OF', 'WAITING', 'FOR', 'US', 'TO', 'GET', 'THERE'] +7975-280084-0001-1091: hyp=['WHEN', 'MILLER', 'AND', 'I', 'CROSSED', 'THE', 'BRIDGE', 'THE', 'THREE', 'WERE', 'ON', 'SOME', 'DRY', 'GOOD', 'BOXES', 'AT', 'THE', 'CORNER', 'NEAR', 'THE', 'BANK', 'AND', 'AS', 'SOON', 'AS', 'THEY', 'SAW', 'US', 'WENT', 'RIGHT', 'INTO', 'THE', 'BANK', 'INSTEAD', 'OF', 'WAITING', 'FOR', 'US', 'TO', 'GET', 'THERE'] +7975-280084-0002-1092: ref=['WHEN', 'WE', 'CAME', 'UP', 'I', 'TOLD', 'MILLER', 'TO', 'SHUT', 'THE', 'BANK', 'DOOR', 'WHICH', 'THEY', 'HAD', 'LEFT', 'OPEN', 'IN', 'THEIR', 'HURRY'] +7975-280084-0002-1092: hyp=['WHEN', 'WE', 'CAME', 'UP', 'I', 'TELL', 'MILLER', 'TO', 'SHUT', 'THE', 'BANK', 'DOOR', 'WHICH', 'THEY', 'HAD', 'LEFT', 'OPEN', 'IN', 'THEIR', 'HURRY'] +7975-280084-0003-1093: ref=['J', 'S', 'ALLEN', 'WHOSE', 'HARDWARE', 'STORE', 'WAS', 'NEAR', 'TRIED', 'TO', 'GO', 'INTO', 'THE', 'BANK', 'BUT', 'MILLER', 'ORDERED', 'HIM', 'AWAY', 'AND', 'HE', 'RAN', 'AROUND', 'THE', 'CORNER', 'SHOUTING'] +7975-280084-0003-1093: hyp=['J', 'S', 'ALLEN', 'WHOSE', 'HARDWORTH', 'STORE', 'WAS', 'NEAR', 'TRIED', 'TO', 'GO', 'INTO', 'THE', 'BANK', 'BUT', 'MILLER', 'ORDERED', 'HIM', 'AWAY', 'AND', 'HE', 'RAN', 'ROUND', 'THE', 'CORNER', 'SHOUTING'] +7975-280084-0004-1094: ref=['GET', 'YOUR', 'GUNS', 'BOYS', "THEY'RE", 'ROBBING', 'THE', 'BANK'] +7975-280084-0004-1094: hyp=['GET', 'YOUR', 'GUNS', 'BOYS', "THEY'RE", 'ROBBING', 'THE', 'BANK'] +7975-280084-0005-1095: ref=['AND', 'I', 'CALLED', 'TO', 'HIM', 'TO', 'GET', 'INSIDE', 'AT', 'THE', 'SAME', 'TIME', 'FIRING', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'AIR', 'AS', 'A', 'SIGNAL', 'TO', 'THE', 'THREE', 'BOYS', 'AT', 'THE', 'BRIDGE', 'THAT', 'WE', 'HAD', 'BEEN', 'DISCOVERED'] +7975-280084-0005-1095: hyp=['AND', 'I', 'CALLED', 'TO', 'HIM', 'TO', 'GET', 'INSIDE', 'AT', 'THE', 'SAME', 'TIME', 'FIRING', 'A', 'PISTOL', 'SHOUT', 'IN', 'THE', 'AIR', 'AS', 'A', 'SIGNAL', 'TO', 'THE', 'THREE', 'BOYS', 'AT', 'THE', 'BRIDGE', 'THAT', 'WE', 'HAD', 'BEEN', 'DISCOVERED'] +7975-280084-0006-1096: ref=['ALMOST', 'AT', 'THIS', 'INSTANT', 'I', 'HEARD', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'BANK'] +7975-280084-0006-1096: hyp=['ALMOST', 'AT', 'THIS', 'INSTANT', 'I', 'HEARD', 'A', 'PISTOL', 'SHOT', 'IN', 'THE', 'BANK'] +7975-280084-0007-1097: ref=['CHADWELL', 'WOODS', 'AND', 'JIM', 'RODE', 'UP', 'AND', 'JOINED', 'US', 'SHOUTING', 'TO', 'PEOPLE', 'IN', 'THE', 'STREET', 'TO', 'GET', 'INSIDE', 'AND', 'FIRING', 'THEIR', 'PISTOLS', 'TO', 'EMPHASIZE', 'THEIR', 'COMMANDS'] +7975-280084-0007-1097: hyp=['SAID', 'WELL', 'WOODS', 'AND', 'JIM', 'RODE', 'UP', 'AND', 'JOINED', 'US', 'SHOUTING', 'TO', 'THE', 'PEOPLE', 'IN', 'THE', 'STREET', 'TO', 'GET', 'INSIDE', 'AND', 'FIRING', 'THEIR', 'PISTOLS', 'TO', 'EMPHASIZE', 'THEIR', 'COMMANDS'] +7975-280084-0008-1098: ref=['IF', 'ANY', 'OF', 'OUR', 'PARTY', 'SHOT', 'HIM', 'IT', 'MUST', 'HAVE', 'BEEN', 'WOODS'] +7975-280084-0008-1098: hyp=['IF', 'ANY', 'OF', 'OUR', 'PARTY', 'SHOT', 'HIM', 'IT', 'MUST', 'HAVE', 'BEEN', 'WOODS'] +7975-280084-0009-1099: ref=['MEANTIME', 'THE', 'STREET', 'WAS', 'GETTING', 'UNCOMFORTABLY', 'HOT'] +7975-280084-0009-1099: hyp=['MEANTIME', 'THE', 'STREET', 'WAS', 'GETTING', 'UNCOMFORTABLY', 'HOT'] +7975-280084-0010-1100: ref=['EVERY', 'TIME', 'I', 'SAW', 'ANY', 'ONE', 'WITH', 'A', 'BEAD', 'ON', 'ME', 'I', 'WOULD', 'DROP', 'OFF', 'MY', 'HORSE', 'AND', 'TRY', 'TO', 'DRIVE', 'THE', 'SHOOTER', 'INSIDE', 'BUT', 'I', 'COULD', 'NOT', 'SEE', 'IN', 'EVERY', 'DIRECTION'] +7975-280084-0010-1100: hyp=['EVERY', 'TIME', 'I', 'SAW', 'ANY', 'ONE', 'WITH', 'A', 'BEAD', 'ON', 'ME', 'I', 'WOULD', 'DROP', 'OFF', 'MY', 'HORSE', 'AND', 'TROUT', 'TO', 'DRIVE', 'THE', 'SHEETTER', 'INSIDE', 'BUT', 'I', 'COULD', 'NOT', 'SEE', 'IN', 'EVERY', 'DIRECTION'] +7975-280084-0011-1101: ref=['DOCTOR', 'WHEELER', 'WHO', 'HAD', 'GONE', 'UPSTAIRS', 'IN', 'THE', 'HOTEL', 'SHOT', 'MILLER', 'AND', 'HE', 'LAY', 'DYING', 'IN', 'THE', 'STREET'] +7975-280084-0011-1101: hyp=['DOCTOR', 'WHALER', 'WHO', 'HAD', 'GONE', 'UPSTAIRS', 'IN', 'THE', 'HOTEL', 'SHOT', 'MILLER', 'AND', 'HE', 'LAY', 'DYING', 'IN', 'THE', 'STREET'] +7975-280084-0012-1102: ref=['CHANGING', 'HIS', 'PISTOL', 'TO', 'HIS', 'LEFT', 'HAND', 'BOB', 'RAN', 'OUT', 'AND', 'MOUNTED', "MILLER'S", 'MARE'] +7975-280084-0012-1102: hyp=['CHANGING', 'HIS', 'PISTOL', 'TO', 'HIS', 'LEFT', 'HAND', 'BOB', 'RAN', 'OUT', 'AND', 'MOUNTED', "MILLER'S", 'MAYOR'] +7975-280084-0013-1103: ref=['WHAT', 'KEPT', 'YOU', 'SO', 'LONG', 'I', 'ASKED', 'PITTS'] +7975-280084-0013-1103: hyp=['BUT', 'KEPT', 'YOU', 'SO', 'LONG', 'AS', 'PITTS'] +7975-280084-0014-1104: ref=['AS', 'TO', 'THE', 'REST', 'OF', 'THE', 'AFFAIR', 'INSIDE', 'THE', 'BANK', 'I', 'TAKE', 'THE', 'ACCOUNT', 'OF', 'A', 'NORTHFIELD', 'NARRATOR'] +7975-280084-0014-1104: hyp=['AS', 'TO', 'THE', 'REST', 'OF', 'THE', 'AFFAIR', 'INSIDE', 'THE', 'BANK', 'I', 'TAKE', 'THE', 'ACCOUNT', 'OF', 'A', 'NORTH', 'FIELD', 'NARRATOR'] +7975-280084-0015-1105: ref=["WHERE'S", 'THE', 'MONEY', 'OUTSIDE', 'THE', 'SAFE', 'BOB', 'ASKED'] +7975-280084-0015-1105: hyp=["WHERE'S", 'THE', 'MONEY', 'OUTSIDE', 'TO', 'SAFE', 'BOB', 'ASKED'] +7975-280084-0016-1106: ref=['THE', 'SHUTTERS', 'WERE', 'CLOSED', 'AND', 'THIS', 'CAUSED', 'BUNKER', 'AN', "INSTANT'S", 'DELAY', 'THAT', 'WAS', 'ALMOST', 'FATAL', 'PITTS', 'CHASED', 'HIM', 'WITH', 'A', 'BULLET'] +7975-280084-0016-1106: hyp=['THE', 'SHOWERS', 'WERE', 'CLOSED', 'AND', 'THIS', 'CAUSED', 'BUNKER', 'AN', 'INSTANCE', 'DELAY', 'THAT', 'WAS', 'ALMOST', 'FATAL', 'FITZ', 'CHASED', 'HIM', 'WITH', 'A', 'BULLET'] +7975-280084-0017-1107: ref=['THE', 'FIRST', 'ONE', 'MISSED', 'HIM', 'BUT', 'THE', 'SECOND', 'WENT', 'THROUGH', 'HIS', 'RIGHT', 'SHOULDER'] +7975-280084-0017-1107: hyp=['THE', 'FIRST', 'ONE', 'MISSED', 'HIM', 'BUT', 'THE', 'SECOND', 'WENT', 'THROUGH', 'HIS', 'RIGHT', 'SHOULDER'] +7975-280085-0000-1071: ref=['THAT', 'NIGHT', 'IT', 'STARTED', 'TO', 'RAIN', 'AND', 'WE', 'WORE', 'OUT', 'OUR', 'HORSES'] +7975-280085-0000-1071: hyp=['THAT', 'NIGHT', 'IT', 'STARTED', 'TO', 'RAIN', 'AND', 'WE', 'WORE', 'OUT', 'OUR', 'HORSES'] +7975-280085-0001-1072: ref=['FRIDAY', 'WE', 'MOVED', 'TOWARD', 'WATERVILLE', 'AND', 'FRIDAY', 'NIGHT', 'WE', 'CAMPED', 'BETWEEN', 'ELYSIAN', 'AND', 'GERMAN', 'LAKE'] +7975-280085-0001-1072: hyp=['FRIDAY', 'WE', 'MOVED', 'TOWARD', 'WATERVILLE', 'AND', 'FRIDAY', 'NIGHT', 'WE', "CAN'T", 'BETWEEN', 'ELYSIAN', 'AND', 'GERMAN', 'LAKE'] +7975-280085-0002-1073: ref=["BOB'S", 'SHATTERED', 'ELBOW', 'WAS', 'REQUIRING', 'FREQUENT', 'ATTENTION', 'AND', 'THAT', 'NIGHT', 'WE', 'MADE', 'ONLY', 'NINE', 'MILES', 'AND', 'MONDAY', 'MONDAY', 'NIGHT', 'AND', 'TUESDAY', 'WE', 'SPENT', 'IN', 'A', 'DESERTED', 'FARM', 'HOUSE', 'CLOSE', 'TO', 'MANKATO'] +7975-280085-0002-1073: hyp=['BOB', 'SHUTTERED', 'ELBOWS', 'REQUIRING', 'FREQUENT', 'ATTENTION', 'AND', 'THAT', 'NIGHT', 'WE', 'MADE', 'ONLY', 'NINE', 'MILES', 'AND', 'MONDAY', 'MONDAY', 'NIGHT', 'IN', 'TUESDAY', 'WE', 'SPENT', 'IN', 'A', 'DESERTED', 'FARM', 'HOUSE', 'CLOSE', 'TO', 'MAIN', 'CATO'] +7975-280085-0003-1074: ref=['THAT', 'DAY', 'A', 'MAN', 'NAMED', 'DUNNING', 'DISCOVERED', 'US', 'AND', 'WE', 'TOOK', 'HIM', 'PRISONER'] +7975-280085-0003-1074: hyp=['THAT', 'THEY', 'A', 'MAN', 'NAMED', 'DUNNING', 'DISCOVERED', 'US', 'AND', 'WE', 'TOOK', 'HIM', 'PRISONER'] +7975-280085-0004-1075: ref=['FINALLY', 'WE', 'ADMINISTERED', 'TO', 'HIM', 'AN', 'OATH', 'NOT', 'TO', 'BETRAY', 'OUR', 'WHEREABOUTS', 'UNTIL', 'WE', 'HAD', 'TIME', 'TO', 'MAKE', 'OUR', 'ESCAPE', 'AND', 'HE', 'AGREED', 'NOT', 'TO'] +7975-280085-0004-1075: hyp=['FINALLY', 'WE', 'ADMINISTERED', 'TO', 'HIM', 'AN', 'OATH', 'NOT', 'TO', 'BETRAY', 'OUR', 'WHEREABOUTS', 'UNTIL', 'WE', 'HAD', 'TIME', 'TO', 'MAKE', 'OUR', 'ESCAPE', 'AND', 'HE', 'AGREED', 'NOT', 'TO'] +7975-280085-0005-1076: ref=['NO', 'SOONER', 'HOWEVER', 'WAS', 'HE', 'RELEASED', 'THAN', 'HE', 'MADE', 'POSTHASTE', 'INTO', 'MANKATO', 'TO', 'ANNOUNCE', 'OUR', 'PRESENCE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'ANOTHER', 'POSSE', 'WAS', 'LOOKING', 'FOR', 'US'] +7975-280085-0005-1076: hyp=['NO', 'SOONER', 'HOWEVER', 'WAS', 'HE', 'RELEASED', 'THAN', 'HE', 'MADE', 'POST', 'HASTE', 'INTO', 'MANCATEO', 'TO', 'ANNOUNCE', 'OUR', 'PRESENCE', 'AND', 'IN', 'A', 'FEW', 'MINUTES', 'ANOTHER', 'POSSE', 'WAS', 'LOOKING', 'FOR', 'US'] +7975-280085-0006-1077: ref=['THE', 'WHISTLE', 'ON', 'THE', 'OIL', 'MILL', 'BLEW', 'AND', 'WE', 'FEARED', 'THAT', 'IT', 'WAS', 'A', 'SIGNAL', 'THAT', 'HAD', 'BEEN', 'AGREED', 'UPON', 'TO', 'ALARM', 'THE', 'TOWN', 'IN', 'CASE', 'WE', 'WERE', 'OBSERVED', 'BUT', 'WE', 'WERE', 'NOT', 'MOLESTED'] +7975-280085-0006-1077: hyp=['THE', 'WHISTLE', 'ON', 'THE', 'OARMEIL', 'BLUE', 'AND', 'WE', 'FEARED', 'THAT', 'IT', 'WAS', 'A', 'SIGNAL', 'THAT', 'HAD', 'BEEN', 'AGREED', 'UPON', 'TO', 'ALARM', 'THE', 'TOWN', 'IN', 'CASE', 'WE', 'WERE', 'OBSERVED', 'BUT', 'WE', 'WERE', 'NOT', 'MOLESTED'] +7975-280085-0007-1078: ref=['HE', 'HAD', 'TO', 'SLEEP', 'WITH', 'IT', 'PILLOWED', 'ON', 'MY', 'BREAST', 'JIM', 'BEING', 'ALSO', 'CRIPPLED', 'WITH', 'A', 'WOUND', 'IN', 'HIS', 'SHOULDER', 'AND', 'WE', 'COULD', 'NOT', 'GET', 'MUCH', 'SLEEP'] +7975-280085-0007-1078: hyp=['HE', 'HAD', 'TO', 'SLEEP', 'WITH', 'IT', 'PILLOWED', 'ON', 'MY', 'BREAST', 'JIM', 'BEING', 'ALSO', 'CRIPPLED', 'WITH', 'A', 'WOUND', 'IN', 'HIS', 'SHOULDER', 'AND', 'WE', 'COULD', 'NOT', 'GET', 'MUCH', 'SLEEP'] +7975-280085-0008-1079: ref=['BUT', 'THEY', 'SOON', 'AFTER', 'GOT', 'CLOSE', 'ENOUGH', 'SO', 'THAT', 'ONE', 'OF', 'THEM', 'BROKE', 'MY', 'WALKING', 'STICK', 'WITH', 'A', 'SHOT'] +7975-280085-0008-1079: hyp=['BUT', 'THEY', 'SOON', 'AFTER', 'GOT', 'CLOSE', 'ENOUGH', 'SO', 'THAT', 'ONE', 'OF', 'THEM', 'BROKE', 'MY', 'WALKING', 'STICK', 'WITH', 'A', 'SHOT'] +7975-280085-0009-1080: ref=['WE', 'WERE', 'IN', 'SIGHT', 'OF', 'OUR', 'LONG', 'SOUGHT', 'HORSES', 'WHEN', 'THEY', 'CUT', 'US', 'OFF', 'FROM', 'THE', 'ANIMALS', 'AND', 'OUR', 'LAST', 'HOPE', 'WAS', 'GONE'] +7975-280085-0009-1080: hyp=['WE', 'WERE', 'INSIDE', 'OF', 'OUR', 'LONG', 'SALT', 'HORSES', 'WHEN', 'THEY', 'CUT', 'US', 'OFF', 'FROM', 'THE', 'ANIMALS', 'AND', 'OUR', 'LAST', 'HOPE', 'WAS', 'GONE'] +7975-280085-0010-1081: ref=['SIX', 'STEPPED', 'TO', 'THE', 'FRONT', 'SHERIFF', 'GLISPIN', 'COLONEL', 'T', 'L', 'VOUGHT', 'B', 'M', 'RICE', 'G', 'A', 'BRADFORD', 'C', 'A', 'POMEROY', 'AND', 'S', 'J', 'SEVERSON'] +7975-280085-0010-1081: hyp=['SIX', 'STEPS', 'OF', 'THE', 'FRONT', 'SHERIFF', 'LISPIN', 'COLONEL', 'T', 'L', 'VAULT', 'B', 'M', 'RICE', 'G', 'A', 'BRADFORD', 'C', 'A', 'POMERALIE', 'IN', 'S', 'VERSON'] +7975-280085-0011-1082: ref=['FORMING', 'IN', 'LINE', 'FOUR', 'PACES', 'APART', 'HE', 'ORDERED', 'THEM', 'TO', 'ADVANCE', 'RAPIDLY', 'AND', 'CONCENTRATE', 'THE', 'FIRE', 'OF', 'THE', 'WHOLE', 'LINE', 'THE', 'INSTANT', 'THE', 'ROBBERS', 'WERE', 'DISCOVERED'] +7975-280085-0011-1082: hyp=['FORMING', 'A', 'LINE', 'FOUR', 'PACES', 'APART', 'HE', 'ORDERED', 'THEM', 'TO', 'ADVANCE', 'RAPIDLY', 'AND', 'CONCENTRATE', 'THE', 'FIRE', 'OF', 'THE', 'WHOLE', 'LINE', 'THE', 'INSTANT', 'THE', 'ROBBERS', 'WERE', 'DISCOVERED'] +7975-280085-0012-1083: ref=['MAKE', 'FOR', 'THE', 'HORSES', 'I', 'SAID', 'EVERY', 'MAN', 'FOR', 'HIMSELF'] +7975-280085-0012-1083: hyp=['MAKE', 'FOR', 'THE', 'HORSES', 'I', 'SAID', 'EVERY', 'MAN', 'FOR', 'HIMSELF'] +7975-280085-0013-1084: ref=['THERE', 'IS', 'NO', 'USE', 'STOPPING', 'TO', 'PICK', 'UP', 'A', 'COMRADE', 'HERE', 'FOR', 'WE', "CAN'T", 'GET', 'HIM', 'THROUGH', 'THE', 'LINE', 'JUST', 'CHARGE', 'THEM', 'AND', 'MAKE', 'IT', 'IF', 'WE', 'CAN'] +7975-280085-0013-1084: hyp=["THERE'S", 'NO', 'USE', 'STOPPING', 'TO', 'PICK', 'UP', 'A', 'COMRADE', 'HERE', 'FOR', 'WE', "CAN'T", 'GET', 'HIM', 'THROUGH', 'THE', 'LINE', 'JUST', 'SHORES', 'THEM', 'AND', 'MAKE', 'IT', 'IF', 'WE', 'CAN'] +7975-280085-0014-1085: ref=['I', 'GOT', 'UP', 'AS', 'THE', 'SIGNAL', 'FOR', 'THE', 'CHARGE', 'AND', 'WE', 'FIRED', 'ONE', 'VOLLEY'] +7975-280085-0014-1085: hyp=['I', 'GOT', 'UP', 'AS', 'A', 'SIGNAL', 'FOR', 'THE', 'CHARGE', 'AND', 'WE', 'FIRED', 'ONE', 'VOLLEY'] +7975-280085-0015-1086: ref=['ONE', 'OF', 'THE', 'FELLOWS', 'IN', 'THE', 'OUTER', 'LINE', 'NOT', 'BRAVE', 'ENOUGH', 'HIMSELF', 'TO', 'JOIN', 'THE', 'VOLUNTEERS', 'WHO', 'HAD', 'COME', 'IN', 'TO', 'BEAT', 'US', 'OUT', 'WAS', 'NOT', 'DISPOSED', 'TO', 'BELIEVE', 'IN', 'THE', 'SURRENDER', 'AND', 'HAD', 'HIS', 'GUN', 'LEVELLED', 'ON', 'BOB', 'IN', 'SPITE', 'OF', 'THE', 'HANDKERCHIEF', 'WHICH', 'WAS', 'WAVING', 'AS', 'A', 'FLAG', 'OF', 'TRUCE'] +7975-280085-0015-1086: hyp=['ONE', 'OF', 'THE', 'FELLOWS', 'IN', 'THE', 'OUTER', 'LINE', 'NOT', 'BRAVE', 'ENOUGH', 'HIMSELF', 'TO', 'JOIN', 'THE', 'VOLUNTEERS', 'WHO', 'HAD', 'COME', 'IN', 'TO', 'BE', 'DISOUT', 'WAS', 'NOT', 'DISPOSED', 'TO', 'BELIEVE', 'IN', 'THE', 'SURRENDER', 'AND', 'HAD', 'HIS', 'GUN', 'LEVELLED', 'ON', 'BOB', 'IN', 'SPITE', 'OF', 'THE', 'HANDKERCHIEF', 'WHICH', 'WAS', 'WAVING', 'AS', 'A', 'FLAG', 'OF', 'TRUCE'] +7975-280085-0016-1087: ref=['SHERIFF', 'GLISPIN', 'OF', 'WATONWAN', 'COUNTY', 'WHO', 'WAS', 'TAKING', "BOB'S", 'PISTOL', 'FROM', 'HIM', 'WAS', 'ALSO', 'SHOUTING', 'TO', 'THE', 'FELLOW'] +7975-280085-0016-1087: hyp=['SHERIFF', 'GLISBON', 'OF', 'WATERWIN', 'COUNTY', 'WHO', 'WAS', 'TAKING', "BOB'S", 'PISTOL', 'FROM', 'HIM', 'WAS', 'ALSO', 'SHOUTING', 'TO', 'THE', 'FELLOW'] +7975-280085-0017-1088: ref=['INCLUDING', 'THOSE', 'RECEIVED', 'IN', 'AND', 'ON', 'THE', 'WAY', 'FROM', 'NORTHFIELD', 'I', 'HAD', 'ELEVEN', 'WOUNDS'] +7975-280085-0017-1088: hyp=['INCLUDING', 'THOSE', 'RECEIVED', 'IN', 'AND', 'ON', 'THE', 'WAY', 'FROM', 'NORTH', 'FIELD', 'I', 'HAD', 'ELEVEN', 'WINDS'] +7975-280085-0018-1089: ref=['AND', 'SHERIFF', "GLISPIN'S", 'ORDER', 'NOT', 'TO', 'SHOOT', 'WAS', 'THE', 'BEGINNING', 'OF', 'THE', 'PROTECTORATE', 'THAT', 'MINNESOTA', 'PEOPLE', 'ESTABLISHED', 'OVER', 'US'] +7975-280085-0018-1089: hyp=['AND', 'SHARE', 'OF', "GLISBON'S", 'ORDER', 'NOT', 'TO', 'SHOOT', 'WAS', 'THE', 'BEGINNING', 'OF', 'THE', 'PROTECTOR', 'THAT', 'MINNESOTA', 'PEOPLE', 'ESTABLISHED', 'OVER', 'US'] +8131-117016-0000-1303: ref=['CAPTAIN', 'MURDOCH'] +8131-117016-0000-1303: hyp=['CAPTAIN', 'MURDOCK'] +8131-117016-0001-1304: ref=['BUT', 'MARSPORT', 'HAD', 'FLOURISHED', 'ENOUGH', 'TO', 'KILL', 'IT', 'OFF'] +8131-117016-0001-1304: hyp=['BUT', 'MARSPORT', 'HAD', 'FLOURISHED', 'ENOUGH', 'TO', 'KILL', 'IT', 'OFF'] +8131-117016-0002-1305: ref=['SOME', 'OF', 'MARS', 'LAWS', 'DATED', 'FROM', 'THE', 'TIME', 'WHEN', 'LAW', 'ENFORCEMENT', 'HAD', 'BEEN', 'HAMPERED', 'BY', 'LACK', 'OF', 'MEN', 'RATHER', 'THAN', 'BY', 'THE', 'TYPE', 'OF', 'MEN'] +8131-117016-0002-1305: hyp=['SOME', 'OF', 'MARS', 'LAWS', 'DATED', 'FROM', 'THE', 'TIME', 'WHEN', 'LAWN', 'FORCEMENT', 'HAD', 'BEEN', 'HAMPERED', 'BY', 'LACK', 'OF', 'MEN', 'RATHER', 'THAN', 'BY', 'THE', 'TYPE', 'OF', 'MEN'] +8131-117016-0003-1306: ref=['THE', 'STONEWALL', 'GANG', 'NUMBERED', 'PERHAPS', 'FIVE', 'HUNDRED'] +8131-117016-0003-1306: hyp=['THE', 'STONE', 'WALL', 'GANG', 'NUMBERED', 'PERHAPS', 'FIVE', 'HUNDRED'] +8131-117016-0004-1307: ref=['EVEN', 'DERELICTS', 'AND', 'FAILURES', 'HAD', 'TO', 'EAT', 'THERE', 'WERE', 'STORES', 'AND', 'SHOPS', 'THROUGHOUT', 'THE', 'DISTRICT', 'WHICH', 'EKED', 'OUT', 'SOME', 'KIND', 'OF', 'A', 'MARGINAL', 'LIVING'] +8131-117016-0004-1307: hyp=['EVEN', 'DEAR', 'ALEXE', 'AND', 'FAILURES', 'HAD', 'TO', 'EAT', 'THERE', 'WERE', 'STORIES', 'AND', 'SHOPS', 'THROUGHOUT', 'THE', 'DISTRICT', 'WHICH', 'EKED', 'OUT', 'SOME', 'KIND', 'OF', 'A', 'MARGINAL', 'LIVING'] +8131-117016-0005-1308: ref=['THEY', 'WERE', 'SAFE', 'FROM', 'PROTECTION', 'RACKETEERS', 'THERE', 'NONE', 'BOTHERED', 'TO', 'COME', 'SO', 'FAR', 'OUT'] +8131-117016-0005-1308: hyp=['THEY', 'WERE', 'SAFE', 'FROM', 'PROTECTION', 'RACKETERS', 'THERE', 'NONE', 'BOTHERED', 'TO', 'COME', 'SO', 'FAR', 'OUT'] +8131-117016-0006-1309: ref=['THE', 'SHOPKEEPERS', 'AND', 'SOME', 'OF', 'THE', 'LESS', 'UNFORTUNATE', 'PEOPLE', 'THERE', 'HAD', 'PROTESTED', 'LOUD', 'ENOUGH', 'TO', 'REACH', 'CLEAR', 'BACK', 'TO', 'EARTH'] +8131-117016-0006-1309: hyp=['THE', 'SHOPKEEPERS', 'AND', 'SOME', 'OF', 'THE', 'LESS', 'UNFORTUNATE', 'PEOPLE', 'THERE', 'HAD', 'PROTESTED', 'LOUD', 'ENOUGH', 'TO', 'REACH', 'CLEAR', 'BACK', 'TO', 'EARTH'] +8131-117016-0007-1310: ref=['CAPTAIN', 'MURDOCH', 'WAS', 'AN', 'UNKNOWN', 'FACTOR', 'AND', 'NOW', 'WAS', 'ASKING', 'FOR', 'MORE', 'MEN'] +8131-117016-0007-1310: hyp=['CAPTAIN', 'MURDOCK', 'WAS', 'AN', 'UNKNOWN', 'FACTOR', 'AND', 'NOW', 'WAS', 'ASKING', 'FOR', 'MORE', 'MEN'] +8131-117016-0008-1311: ref=['THE', 'PRESSURE', 'WAS', 'ENOUGH', 'TO', 'GET', 'THEM', 'FOR', 'HIM'] +8131-117016-0008-1311: hyp=['THE', 'PRESSURE', 'WAS', 'ENOUGH', 'TO', 'GET', 'THEM', 'FOR', 'HIM'] +8131-117016-0009-1312: ref=['GORDON', 'REPORTED', 'FOR', 'WORK', 'WITH', 'A', 'SENSE', 'OF', 'THE', 'BOTTOM', 'FALLING', 'OUT', 'MIXED', 'WITH', 'A', 'VAGUE', 'RELIEF'] +8131-117016-0009-1312: hyp=['GORDON', 'REPORTED', 'FOR', 'WORK', 'WITH', 'A', 'SENSE', 'OF', 'THE', 'BOTTOM', 'FALLING', 'OUT', 'MIXED', 'WITH', 'A', 'VAGUE', 'RELIEF'] +8131-117016-0010-1313: ref=["I'VE", 'GOT', 'A', 'FREE', 'HAND', 'AND', "WE'RE", 'GOING', 'TO', 'RUN', 'THIS', 'THE', 'WAY', 'WE', 'WOULD', 'ON', 'EARTH'] +8131-117016-0010-1313: hyp=["I'VE", 'GOT', 'A', 'FREE', 'HAND', 'AND', "WE'RE", 'GOING', 'TO', 'RUN', 'THIS', 'THE', 'WAY', 'WE', 'WOULD', 'ON', 'EARTH'] +8131-117016-0011-1314: ref=['YOUR', 'JOB', 'IS', 'TO', 'PROTECT', 'THE', 'CITIZENS', 'HERE', 'AND', 'THAT', 'MEANS', 'EVERYONE', 'NOT', 'BREAKING', 'THE', 'LAWS', 'WHETHER', 'YOU', 'FEEL', 'LIKE', 'IT', 'OR', 'NOT', 'NO', 'GRAFT'] +8131-117016-0011-1314: hyp=['YOUR', 'JOB', 'IS', 'TO', 'PROTECT', 'THE', 'CITIZENS', 'HERE', 'AND', 'THAT', 'MEANS', 'EVERY', 'ONE', 'NOT', 'BREAKING', 'THE', 'LAWS', 'WHETHER', 'YOU', 'FEEL', 'LIKE', 'IT', 'OR', 'NOT', 'NO', 'GRAFT'] +8131-117016-0012-1315: ref=['THE', 'FIRST', 'MAN', 'MAKING', 'A', 'SHAKEDOWN', 'WILL', 'GET', 'THE', 'SAME', 'TREATMENT', "WE'RE", 'GOING', 'TO', 'USE', 'ON', 'THE', 'STONEWALL', 'BOYS', "YOU'LL", 'GET', 'DOUBLE', 'PAY', 'HERE', 'AND', 'YOU', 'CAN', 'LIVE', 'ON', 'IT'] +8131-117016-0012-1315: hyp=['THE', 'FIRST', 'MAN', 'MAKING', 'A', 'SHAKE', 'DOWN', 'WILL', 'GET', 'THE', 'SAME', 'TREATMENT', "WE'RE", 'GOING', 'TO', 'USE', 'ON', 'THE', 'STONE', 'WALL', 'BOYS', "YOU'LL", 'GET', 'DOUBLE', 'PAY', 'HERE', 'AND', 'YOU', 'CAN', 'LIVE', 'ON', 'IT'] +8131-117016-0013-1316: ref=['HE', 'PICKED', 'OUT', 'FIVE', 'OF', 'THE', 'MEN', 'INCLUDING', 'GORDON', 'YOU', 'FIVE', 'WILL', 'COME', 'WITH', 'ME'] +8131-117016-0013-1316: hyp=['HE', 'PICKED', 'OUT', 'FIVE', 'OF', 'THE', 'MEN', 'INCLUDING', 'GORDON', 'YOU', 'FIVE', 'WILL', 'COME', 'WITH', 'ME'] +8131-117016-0014-1317: ref=['THE', 'REST', 'OF', 'YOU', 'CAN', 'TEAM', 'UP', 'ANY', 'WAY', 'YOU', 'WANT', 'TONIGHT', 'PICK', 'ANY', 'ROUTE', "THAT'S", 'OPEN', 'OKAY', 'MEN', "LET'S", 'GO'] +8131-117016-0014-1317: hyp=['THE', 'REST', 'OF', 'YOU', 'CAN', 'TEAM', 'UP', 'ANY', 'WAY', 'YOU', 'WANT', 'TO', 'NIGHT', 'PICK', 'ANY', 'ROW', 'OF', 'THIS', 'OPEN', 'O', 'CAME', 'IN', "LET'S", 'GO'] +8131-117016-0015-1318: ref=['BRUCE', 'GORDON', 'GRINNED', 'SLOWLY', 'AS', 'HE', 'SWUNG', 'THE', 'STICK', 'AND', "MURDOCH'S", 'EYES', 'FELL', 'ON', 'HIM', 'EARTH', 'COP'] +8131-117016-0015-1318: hyp=['BRUSH', 'GORDON', 'GRINNED', 'SLOWLY', 'AS', 'HE', 'SWUNG', 'THE', 'STICK', 'AND', "MARDOCK'S", 'EYES', 'FELL', 'ON', 'HIM', 'EARTHCOP'] +8131-117016-0016-1319: ref=['TWO', 'YEARS', 'GORDON', 'ADMITTED'] +8131-117016-0016-1319: hyp=['TWO', 'YEARS', 'GORDON', 'ADMITTED'] +8131-117016-0017-1320: ref=['FOR', 'A', 'SECOND', 'GORDON', 'CURSED', 'HIMSELF'] +8131-117016-0017-1320: hyp=['FOR', 'A', 'SECOND', 'GORDON', 'CURSED', 'HIMSELF'] +8131-117016-0018-1321: ref=['HE', 'BEGAN', 'WONDERING', 'ABOUT', 'SECURITY', 'THEN'] +8131-117016-0018-1321: hyp=['HE', 'BEGAN', 'WONDERING', 'ABOUT', 'SECURITY', 'THEN'] +8131-117016-0019-1322: ref=['NOBODY', 'HAD', 'TRIED', 'TO', 'GET', 'IN', 'TOUCH', 'WITH', 'HIM'] +8131-117016-0019-1322: hyp=['NOBODY', 'HAD', 'TRIED', 'TO', 'GET', 'IN', 'TOUCH', 'WITH', 'HIM'] +8131-117016-0020-1323: ref=['THERE', 'WAS', 'A', 'CRUDE', 'LIGHTING', 'SYSTEM', 'HERE', 'PUT', 'UP', 'BY', 'THE', 'CITIZENS', 'AT', 'THE', 'FRONT', 'OF', 'EACH', 'BUILDING', 'A', 'DIM', 'PHOSPHOR', 'BULB', 'GLOWED', 'WHEN', 'DARKNESS', 'FELL', 'THEY', 'WOULD', 'HAVE', 'NOTHING', 'ELSE', 'TO', 'SEE', 'BY'] +8131-117016-0020-1323: hyp=['THERE', 'WAS', 'A', 'CRUDE', 'LIGHTING', 'SYSTEM', 'HERE', 'PUT', 'UP', 'BY', 'THE', 'CITIZENS', 'AT', 'THE', 'FRONT', 'OF', 'EACH', 'BUILDING', 'A', 'DIM', 'PHOSPHO', 'BOB', 'GLOWED', 'WHEN', 'DARKNESS', 'FELL', 'THEY', 'WOULD', 'HAVE', 'NOTHING', 'ELSE', 'TO', 'SEE', 'BY'] +8131-117016-0021-1324: ref=['MOVING', 'IN', 'TWO', 'GROUPS', 'OF', 'THREES', 'AT', 'OPPOSITE', 'SIDES', 'OF', 'THE', 'STREET', 'THEY', 'BEGAN', 'THEIR', 'BEAT'] +8131-117016-0021-1324: hyp=['MOVING', 'IN', 'TWO', 'GROUPS', 'OF', 'THREES', 'IT', 'OPPOSITE', 'SIDES', 'OF', 'THE', 'STREET', 'THEY', 'BEGAN', 'THEIR', 'BEAT'] +8131-117016-0022-1325: ref=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'SAVE', 'THE', 'CITIZEN', 'WHO', 'WAS', 'DYING', 'FROM', 'LACK', 'OF', 'AIR'] +8131-117016-0022-1325: hyp=['THERE', 'WAS', 'NO', 'CHANCE', 'TO', 'SAVE', 'THE', 'CITIZEN', 'WHO', 'WAS', 'DYING', 'FROM', 'LACK', 'OF', 'AIR'] +8131-117016-0023-1326: ref=['GORDON', 'FELT', 'THE', 'SOLID', 'PLEASURE', 'OF', 'THE', 'FINELY', 'TURNED', 'CLUB', 'IN', 'HIS', 'HANDS'] +8131-117016-0023-1326: hyp=['GORDON', 'FELT', 'THE', 'SOLID', 'PLEASURE', 'OF', 'THE', 'FINELY', 'TURNED', 'CLUB', 'IN', 'HIS', 'HANDS'] +8131-117016-0024-1327: ref=["GORDON'S", 'EYES', 'POPPED', 'AT', 'THAT'] +8131-117016-0024-1327: hyp=["GORDON'S", 'EYES', 'POPPED', 'AT', 'THAT'] +8131-117016-0025-1328: ref=['HE', 'SWALLOWED', 'THE', 'SENTIMENT', 'HIS', 'OWN', 'CLUB', 'WAS', 'MOVING', 'NOW'] +8131-117016-0025-1328: hyp=['HE', 'SWALLOWED', 'THE', 'SENTIMENT', 'HIS', 'OWN', 'CLUB', 'WAS', 'MOVING', 'NOW'] +8131-117016-0026-1329: ref=['THE', 'OTHER', 'FOUR', 'COPS', 'HAD', 'COME', 'IN', 'RELUCTANTLY'] +8131-117016-0026-1329: hyp=['THE', 'OTHER', 'FOUR', 'COPS', 'HAD', 'COME', 'IN', 'RELUCTANTLY'] +8131-117016-0027-1330: ref=['HE', 'BROUGHT', 'HIM', 'TO', 'THE', 'GROUND', 'WITH', 'A', 'SINGLE', 'BLOW', 'ACROSS', 'THE', 'KIDNEYS'] +8131-117016-0027-1330: hyp=['HE', 'BROUGHT', 'HIM', 'TO', 'THE', 'GROUND', 'WITH', 'A', 'SINGLE', 'BLOW', 'ACROSS', 'THE', 'KIDNEYS'] +8131-117016-0028-1331: ref=['THEY', 'ROUNDED', 'UP', 'THE', 'MEN', 'OF', 'THE', 'GANG', 'AND', 'ONE', 'OF', 'THE', 'COPS', 'STARTED', 'OFF'] +8131-117016-0028-1331: hyp=['THEY', 'ROUNDED', 'UP', 'THE', 'MEN', 'OF', 'THE', 'GANG', 'AND', 'ONE', 'OF', 'THE', 'CUPS', 'STARTED', 'OFF'] +8131-117016-0029-1332: ref=['TO', 'FIND', 'A', 'PHONE', 'AND', 'CALL', 'THE', 'WAGON'] +8131-117016-0029-1332: hyp=['TO', 'FIND', 'A', 'PHONE', 'AND', 'CALL', 'THE', 'WAGON'] +8131-117016-0030-1333: ref=["WE'RE", 'NOT', 'USING', 'WAGONS', 'MURDOCH', 'TOLD', 'HIM', 'LINE', 'THEM', 'UP'] +8131-117016-0030-1333: hyp=['WERE', 'NOT', 'USING', 'WAGONS', 'MURDOCK', 'TOLD', 'HIM', 'LINE', 'THEM', 'UP'] +8131-117016-0031-1334: ref=['IF', 'THEY', 'TRIED', 'TO', 'RUN', 'THEY', 'WERE', 'HIT', 'FROM', 'BEHIND', 'IF', 'THEY', 'STOOD', 'STILL', 'THEY', 'WERE', 'CLUBBED', 'CAREFULLY'] +8131-117016-0031-1334: hyp=['IF', 'THEY', 'TRIED', 'TO', 'RUN', 'THEY', 'WERE', 'HIT', 'FROM', 'BEHIND', 'IF', 'THEY', 'STOOD', 'STILL', 'THEY', 'WERE', 'CLUBBED', 'CAREFULLY'] +8131-117016-0032-1335: ref=['MURDOCH', 'INDICATED', 'ONE', 'WHO', 'STOOD', 'WITH', 'HIS', 'SHOULDERS', 'SHAKING', 'AND', 'TEARS', 'RUNNING', 'DOWN', 'HIS', 'CHEEKS'] +8131-117016-0032-1335: hyp=['MURDOCK', 'INDICATED', 'ONE', 'WHO', 'STOOD', 'WITH', 'HIS', 'SHOULDER', 'SHAKING', 'AND', 'TEARS', 'RUNNING', 'DOWN', 'HIS', 'CHEEKS'] +8131-117016-0033-1336: ref=['THE', "CAPTAIN'S", 'FACE', 'WAS', 'AS', 'SICK', 'AS', 'GORDON', 'FELT'] +8131-117016-0033-1336: hyp=['THE', "CAPTAIN'S", 'FACE', 'WAS', 'AS', 'SICK', 'AS', "GORDON'S", 'FELT'] +8131-117016-0034-1337: ref=['I', 'WANT', 'THE', 'NAME', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'GANG', 'YOU', 'CAN', 'REMEMBER', 'HE', 'TOLD', 'THE', 'MAN'] +8131-117016-0034-1337: hyp=['I', 'WANT', 'THE', 'NAME', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'GANG', 'YOU', 'CAN', 'REMEMBER', 'HE', 'TOLD', 'THE', 'MAN'] +8131-117016-0035-1338: ref=['COLONEL', "THEY'D", 'KILL', 'ME', 'I', "DON'T", 'KNOW'] +8131-117016-0035-1338: hyp=['COLONEL', "THEY'D", 'KILL', 'ME', 'I', "DON'T", 'KNOW'] +8131-117016-0036-1339: ref=['MURDOCH', 'TOOK', 'HIS', 'NOD', 'AS', 'EVIDENCE', 'ENOUGH', 'AND', 'TURNED', 'TO', 'THE', 'WRETCHED', 'TOUGHS'] +8131-117016-0036-1339: hyp=['MURDOCK', 'TOOK', 'HIS', 'NOD', 'AS', 'EVIDENCE', 'ENOUGH', 'AND', 'TURNED', 'TO', 'THE', 'WRETCHED', 'TUFTS'] +8131-117016-0037-1340: ref=['IF', 'HE', 'SHOULD', 'TURN', 'UP', 'DEAD', "I'LL", 'KNOW', 'YOU', 'BOYS', 'ARE', 'RESPONSIBLE', 'AND', "I'LL", 'FIND', 'YOU'] +8131-117016-0037-1340: hyp=['IF', 'HE', 'SHOULD', 'TURN', 'UP', 'DEAD', "I'LL", 'KNOW', 'YOU', 'BOYS', 'ARE', 'RESPONSIBLE', 'AND', "I'LL", 'FIND', 'YOU'] +8131-117016-0038-1341: ref=['TROUBLE', 'BEGAN', 'BREWING', 'SHORTLY', 'AFTER', 'THOUGH'] +8131-117016-0038-1341: hyp=['TROUBLE', 'BEGAN', 'BREWING', 'SHORTLY', 'AFTER', 'THOUGH'] +8131-117016-0039-1342: ref=['MURDOCH', 'SENT', 'ONE', 'OF', 'THE', 'MEN', 'TO', 'PICK', 'UP', 'A', 'SECOND', 'SQUAD', 'OF', 'SIX', 'AND', 'THEN', 'A', 'THIRD'] +8131-117016-0039-1342: hyp=['MARDOCK', 'SENT', 'ONE', 'OF', 'THE', 'MEN', 'TO', 'PICK', 'UP', 'A', 'SECOND', 'SQUAD', 'OF', 'SIX', 'AND', 'THEN', 'A', 'THIRD'] +8131-117016-0040-1343: ref=['IN', 'THE', 'THIRD', 'ONE', 'BRUCE', 'GORDON', 'SPOTTED', 'ONE', 'OF', 'THE', 'MEN', "WHO'D", 'BEEN', 'BEATEN', 'BEFORE'] +8131-117016-0040-1343: hyp=['AND', 'THE', 'THIRD', 'ONE', 'BRUSH', 'GORDON', 'SPOTTED', 'ONE', 'OF', 'THE', 'MEN', 'WHO', 'HAD', 'BEEN', 'BEATEN', 'BEFORE'] +8131-117016-0041-1344: ref=['GET', 'A', 'STRETCHER', 'AND', 'TAKE', 'HIM', 'WHEREVER', 'HE', 'BELONGS', 'HE', 'ORDERED'] +8131-117016-0041-1344: hyp=['GET', 'A', 'STRETCHER', 'AND', 'TAKE', 'HIM', 'WHEREVER', 'HE', 'BELONGS', 'HE', 'ORDERED'] +8131-117016-0042-1345: ref=['BUT', 'THE', 'CAPTAIN', 'STIRRED', 'FINALLY', 'SIGHING'] +8131-117016-0042-1345: hyp=['BUT', 'THE', 'CAPTAIN', 'STIRRED', 'FINALLY', 'SIGHING'] +8131-117016-0043-1346: ref=['NO', 'THE', 'COPS', "THEY'RE", 'GIVING', 'ME', "WE'RE", 'COVERED', 'GORDON'] +8131-117016-0043-1346: hyp=['NO', 'THE', 'COPS', 'ARE', 'GIVING', 'ME', "WE'RE", 'COVERED', 'GORDON'] +8131-117016-0044-1347: ref=['BUT', 'THE', 'STONEWALL', 'GANG', 'IS', 'BACKING', 'WAYNE'] +8131-117016-0044-1347: hyp=['BUT', 'THE', 'STERNWALL', 'GANG', 'IS', 'BACKING', 'WANE'] +8131-117016-0045-1348: ref=['BUT', "IT'S", 'GOING', 'TO', 'BE', 'TOUGH', 'ON', 'THEM'] +8131-117016-0045-1348: hyp=['BUT', "IT'S", 'GOING', 'TO', 'BE', 'TOUGH', 'ON', 'THEM'] +8131-117016-0046-1349: ref=['BRUCE', 'GORDON', 'GRIMACED', "I'VE", 'GOT', 'A', 'YELLOW', 'TICKET', 'FROM', 'SECURITY'] +8131-117016-0046-1349: hyp=['BRUCE', 'GORDON', 'GRIMACED', "I'VE", 'GOT', 'A', 'YELLOW', 'TICKET', 'FROM', 'SECURITY'] +8131-117016-0047-1350: ref=['MURDOCH', 'BLINKED', 'HE', 'DROPPED', 'HIS', 'EYES', 'SLOWLY'] +8131-117016-0047-1350: hyp=['MARDOCK', 'BLINKED', 'HE', 'DROPPED', 'HIS', 'EYES', 'SLOWLY'] +8131-117016-0048-1351: ref=['WHAT', 'MAKES', 'YOU', 'THINK', 'WAYNE', 'WILL', 'BE', 'RE', 'ELECTED'] +8131-117016-0048-1351: hyp=['WHAT', 'MAKES', 'YOU', 'THINK', 'WAIN', 'WILL', 'BE', 'REELECTED'] +8131-117016-0049-1352: ref=['NOBODY', 'WANTS', 'HIM', 'EXCEPT', 'A', 'GANG', 'OF', 'CROOKS', 'AND', 'THOSE', 'IN', 'POWER'] +8131-117016-0049-1352: hyp=['NOBODY', 'WANTS', 'HIM', 'EXCEPT', 'A', 'GANG', 'OF', 'CROOKS', 'AND', 'THOSE', 'IN', 'POWER'] +8131-117016-0050-1353: ref=['EVER', 'SEE', 'A', 'MARTIAN', 'ELECTION'] +8131-117016-0050-1353: hyp=['EVER', 'SEE', 'A', 'MARTIAN', 'ELECTION'] +8131-117016-0051-1354: ref=['NO', "YOU'RE", 'A', 'FIRSTER', 'HE', "CAN'T", 'LOSE'] +8131-117016-0051-1354: hyp=['NO', "YOU'RE", 'A', 'FIRST', 'TER', 'HE', "CAN'T", 'LOSE'] +8131-117016-0052-1355: ref=['AND', 'THEN', 'HELL', 'IS', 'GOING', 'TO', 'POP', 'AND', 'THIS', 'WHOLE', 'PLANET', 'MAY', 'BE', 'BLOWN', 'WIDE', 'OPEN'] +8131-117016-0052-1355: hyp=['AND', 'THEN', 'HELL', 'IS', 'GOING', 'TO', 'POP', 'IN', 'THIS', 'WHOLE', 'PLANET', 'MAY', 'BE', 'BLOWN', 'WIDE', 'OPEN'] +8131-117016-0053-1356: ref=['IT', 'FITTED', 'WITH', 'THE', 'DIRE', 'PREDICTIONS', 'OF', 'SECURITY', 'AND', 'WITH', 'THE', 'SPYING', 'GORDON', 'WAS', 'GOING', 'TO', 'DO', 'ACCORDING', 'TO', 'THEM'] +8131-117016-0053-1356: hyp=['IT', 'FITTED', 'WITH', 'THE', 'DIRE', 'PREDICTIONS', 'OF', 'SECURITY', 'AND', 'WITH', 'THE', 'SPYING', 'GORDON', 'WAS', 'GOING', 'TO', 'DO', 'ACCORDING', 'TO', 'THEM'] +8131-117016-0054-1357: ref=['HE', 'WAS', 'GETTING', 'EVEN', 'FATTER', 'NOW', 'THAT', 'HE', 'WAS', 'EATING', 'BETTER', 'FOOD', 'FROM', 'THE', 'FAIR', 'RESTAURANT', 'AROUND', 'THE', 'CORNER'] +8131-117016-0054-1357: hyp=['HE', 'WAS', 'GETTING', 'EVEN', 'FATTER', 'NOW', 'THAT', 'HE', 'WAS', 'EATING', 'BETTER', 'FOOD', 'FROM', 'THE', 'FAIR', 'RESTAURANT', 'AROUND', 'THE', 'CORNER'] +8131-117016-0055-1358: ref=['COST', 'EM', 'MORE', 'BUT', "THEY'D", 'BE', 'RESPECTABLE'] +8131-117016-0055-1358: hyp=['COSTUME', 'MORE', 'BUT', "THEY'D", 'BE', 'RESPECTABLE'] +8131-117016-0056-1359: ref=['BECAUSE', 'IZZY', 'IS', 'ALWAYS', 'HONEST', 'ACCORDING', 'TO', 'HOW', 'HE', 'SEES', 'IT'] +8131-117016-0056-1359: hyp=['BECAUSE', 'IZZIE', 'IS', 'ALWAYS', 'HONEST', 'ACCORDING', 'TO', 'HOW', 'HE', 'SEES', 'IT'] +8131-117016-0057-1360: ref=['BUT', 'YOU', 'GOT', 'EARTH', 'IDEAS', 'OF', 'THE', 'STUFF', 'LIKE', 'I', 'HAD', 'ONCE'] +8131-117016-0057-1360: hyp=['BUT', 'YOU', 'GOT', 'EARTH', 'IDEAS', 'OF', 'THE', 'STUFF', 'LIKE', 'I', 'HAD', 'ONCE'] +8131-117016-0058-1361: ref=['THE', 'GROUPS', 'GREW', 'MORE', 'EXPERIENCED', 'AND', 'MURDOCH', 'WAS', 'TRAINING', 'A', 'NEW', 'SQUAD', 'EVERY', 'NIGHT'] +8131-117016-0058-1361: hyp=['THE', 'GROUPS', 'GREW', 'MORE', 'EXPERIENCED', 'AND', 'MURDOCK', 'WAS', 'TRAINING', 'A', 'NEW', 'SQUAD', 'EVERY', 'NIGHT'] +8131-117016-0059-1362: ref=['IT', "WASN'T", 'EXACTLY', 'LEGAL', 'BUT', 'NOTHING', 'WAS', 'HERE'] +8131-117016-0059-1362: hyp=['IT', 'WAS', 'AN', 'EXACTLY', 'LEGAL', 'BUT', 'NOTHING', 'WAS', 'HERE'] +8131-117016-0060-1363: ref=['THIS', 'COULD', 'LEAD', 'TO', 'ABUSES', 'AS', "HE'D", 'SEEN', 'ON', 'EARTH'] +8131-117016-0060-1363: hyp=['THIS', 'COULD', 'LEAD', 'TO', 'ABUSES', 'AS', "HE'D", 'SEEN', 'ON', 'EARTH'] +8131-117016-0061-1364: ref=['BUT', 'THERE', 'PROBABLY', "WOULDN'T", 'BE', 'TIME', 'FOR', 'IT', 'IF', 'MAYOR', 'WAYNE', 'WAS', 'RE', 'ELECTED'] +8131-117016-0061-1364: hyp=['BUT', 'THERE', 'PROBABLY', "WOULDN'T", 'BE', 'TIME', 'FOR', 'IT', 'IF', 'MAYOR', 'WAIN', 'WAS', 'RE', 'ELECTED'] +8131-117017-0000-1270: ref=['IT', 'WAS', 'NIGHT', 'OUTSIDE', 'AND', 'THE', 'PHOSPHOR', 'BULBS', 'AT', 'THE', 'CORNERS', 'GLOWED', 'DIMLY', 'GIVING', 'HIM', 'BARELY', 'ENOUGH', 'LIGHT', 'BY', 'WHICH', 'TO', 'LOCATE', 'THE', 'WAY', 'TO', 'THE', 'EXTEMPORIZED', 'PRECINCT', 'HOUSE'] +8131-117017-0000-1270: hyp=['IT', 'WAS', 'NIGHT', 'OUTSIDE', 'AND', 'THE', 'PHOSPHO', 'BOBS', 'AT', 'THE', 'CORNERS', 'GLOWED', 'DIMLY', 'GIVING', 'HIM', 'BARELY', 'ENOUGH', 'LIGHT', 'BY', 'WHICH', 'TO', 'LOCATE', 'THE', 'WAY', 'TO', 'THE', 'EXTEMPORISED', 'PRECINCT', 'HOUSE'] +8131-117017-0001-1271: ref=['IT', 'HAD', 'PROBABLY', 'BEEN', 'YEARS', 'SINCE', 'ANY', 'HAD', 'DARED', 'RISK', 'IT', 'AFTER', 'THE', 'SUN', 'WENT', 'DOWN'] +8131-117017-0001-1271: hyp=['IT', 'HAD', 'PROBABLY', 'BEEN', 'YEARS', 'SINCE', 'ANY', 'HAD', 'DARED', 'RISK', 'IT', 'AFTER', 'THE', 'SUN', 'WENT', 'DOWN'] +8131-117017-0002-1272: ref=['AND', 'THE', 'SLOW', 'DOUBTFUL', 'RESPECT', 'ON', 'THE', 'FACES', 'OF', 'THE', 'CITIZENS', 'AS', 'THEY', 'NODDED', 'TO', 'HIM', 'WAS', 'EVEN', 'MORE', 'PROOF', 'THAT', "HALEY'S", 'SYSTEM', 'WAS', 'WORKING'] +8131-117017-0002-1272: hyp=['AND', 'THE', 'SLOW', 'DOUBTFUL', 'RESPECT', 'ON', 'THE', 'FACES', 'OF', 'THE', 'CITIZENS', 'AS', 'THEY', 'NODDED', 'TO', 'HIM', 'WAS', 'EVEN', 'MORE', 'PROOF', 'THAT', "HAYE'S", 'SYSTEM', 'WAS', 'WORKING'] +8131-117017-0003-1273: ref=['GORDON', 'HIT', 'THE', 'SIGNAL', 'SWITCH', 'AND', 'THE', 'MARSPEAKER', 'LET', 'OUT', 'A', 'SHRILL', 'WHISTLE'] +8131-117017-0003-1273: hyp=['GORDON', 'HIT', 'THE', 'SIGNAL', 'SWITCH', 'AND', 'THE', 'MARKEE', 'LED', 'OUT', 'A', 'SHRILL', 'WHISTLE'] +8131-117017-0004-1274: ref=['GUNS', 'SUDDENLY', 'SEEMED', 'TO', 'BE', 'FLOURISHING', 'EVERYWHERE'] +8131-117017-0004-1274: hyp=['GUN', 'SUDDENLY', 'SEEMED', 'TO', 'BE', 'FLOURISHING', 'EVERYWHERE'] +8131-117017-0005-1275: ref=['YOU', "CAN'T", 'DO', 'IT', 'TO', 'ME'] +8131-117017-0005-1275: hyp=['YOU', "CAN'T", 'DO', 'IT', 'TO', 'ME'] +8131-117017-0006-1276: ref=["I'M", 'REFORMED', "I'M", 'GOING', 'STRAIGHT'] +8131-117017-0006-1276: hyp=["I'M", 'REFORMED', "I'M", 'GOING', 'STRAIGHT'] +8131-117017-0007-1277: ref=['YOU', 'DAMNED', 'COPS', "CAN'T", "O'NEILL", 'WAS', 'BLUBBERING'] +8131-117017-0007-1277: hyp=['YOU', 'DAMNED', 'COPSE', "CAN'T", 'ON', 'NEIL', 'WAS', 'BLUBBERING'] +8131-117017-0008-1278: ref=['ONE', 'LOOK', 'WAS', 'ENOUGH', 'THE', 'WORK', 'PAPERS', 'HAD', 'THE', 'TELLTALE', 'OVER', 'THICKENING', 'OF', 'THE', 'SIGNATURE', 'THAT', 'HAD', 'SHOWED', 'UP', 'ON', 'OTHER', 'PAPERS', 'OBVIOUSLY', 'FORGERIES'] +8131-117017-0008-1278: hyp=['ONE', 'LOOK', 'WAS', 'ENOUGH', 'THE', 'WORK', 'PAPERS', 'HAD', 'THE', 'TELL', 'TALE', 'OVER', 'THICKENING', 'OF', 'THE', 'SIGNATURE', 'THEY', 'HAD', 'SHOWED', 'UP', 'ON', 'OTHER', 'PAPERS', 'OBVIOUSLY', 'FORGERIES'] +8131-117017-0009-1279: ref=['SOME', 'TURNED', 'AWAY', 'AS', 'GORDON', 'AND', 'THE', 'OTHER', 'COP', 'WENT', 'TO', 'WORK', 'BUT', 'MOST', 'OF', 'THEM', "WEREN'T", 'SQUEAMISH'] +8131-117017-0009-1279: hyp=['SOME', 'TURNED', 'AWAY', 'AS', 'GORDON', 'AND', 'THE', 'OTHER', 'COP', 'WENT', 'TO', 'WORK', 'BUT', 'MOST', 'OF', 'THEM', "WEREN'T", 'SQUEAMISH'] +8131-117017-0010-1280: ref=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'TWO', 'PICKED', 'UP', 'THEIR', 'WHIMPERING', 'CAPTIVE'] +8131-117017-0010-1280: hyp=['WHEN', 'IT', 'WAS', 'OVER', 'THE', 'TWO', 'PICKED', 'UP', 'THEIR', 'WHIMPERING', 'CAPTIVE'] +8131-117017-0011-1281: ref=['JENKINS', 'THE', 'OTHER', 'COP', 'HAD', 'BEEN', 'HOLDING', 'THE', 'WALLET'] +8131-117017-0011-1281: hyp=['JENKINS', 'THE', 'OTHER', 'COP', 'HAD', 'BEEN', 'HOLDING', 'THE', 'WALLET'] +8131-117017-0012-1282: ref=['MUST', 'OF', 'BEEN', 'MAKING', 'A', 'BIG', 'CONTACT', 'IN', 'SOMETHING', 'FIFTY', 'FIFTY'] +8131-117017-0012-1282: hyp=['MUST', 'HAVE', 'BEEN', 'MAKING', 'A', 'BIG', 'CONTACT', 'IN', 'SOMETHING', 'FIFTY', 'FIFTY'] +8131-117017-0013-1283: ref=['THERE', 'MUST', 'HAVE', 'BEEN', 'OVER', 'TWO', 'THOUSAND', 'CREDITS', 'IN', 'THE', 'WALLET'] +8131-117017-0013-1283: hyp=['THERE', 'MUST', 'HAVE', 'BEEN', 'OVER', 'TWO', 'THOUSAND', 'CREDITS', 'IN', 'THE', 'WALLET'] +8131-117017-0014-1284: ref=['WHEN', 'GORDON', 'AND', 'JENKINS', 'CAME', 'BACK', 'MURDOCH', 'TOSSED', 'THE', 'MONEY', 'TO', 'THEM', 'SPLIT', 'IT'] +8131-117017-0014-1284: hyp=['WHEN', 'GORDON', 'AND', 'JENKINS', 'CAME', 'BACK', 'MARDOCK', 'TOSSED', 'THE', 'MONEY', 'TO', 'THEM', 'SPLIT', 'IT'] +8131-117017-0015-1285: ref=['WHATEVER', 'COMES', 'TO', 'HAND', "GOV'NOR"] +8131-117017-0015-1285: hyp=['WHATEVER', 'COMES', 'TO', 'HAND', 'GOVERNOR'] +8131-117017-0016-1286: ref=['LIKE', 'THIS', 'SOCIAL', 'CALL', 'GORDON', 'ASKED', 'HIM'] +8131-117017-0016-1286: hyp=['LIKE', 'THIS', 'SOCIAL', 'CALL', 'GORDON', 'ASKED', 'HIM'] +8131-117017-0017-1287: ref=['THE', 'LITTLE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'HIS', 'ANCIENT', 'EIGHTEEN', 'YEAR', 'OLD', 'FACE', 'TURNING', 'SOBER', 'NOPE'] +8131-117017-0017-1287: hyp=['THE', 'LITTLE', 'MAN', 'SHOOK', 'HIS', 'HEAD', 'HIS', 'ANCIENT', 'EIGHTEEN', 'YEAR', 'OLD', 'FACE', 'TURNING', 'SOBER', 'NOTE'] +8131-117017-0018-1288: ref=['YOU', 'OWE', 'ME', 'SOME', 'BILLS', "GOV'NOR"] +8131-117017-0018-1288: hyp=['YOU', 'OWE', 'ME', 'SOME', 'BILLS', "GUV'NER"] +8131-117017-0019-1289: ref=['ELEVEN', 'HUNDRED', 'FIFTY', 'CREDITS'] +8131-117017-0019-1289: hyp=['ELEVEN', 'HUNDRED', 'FIFTY', 'CREDITS'] +8131-117017-0020-1290: ref=['YOU', "DIDN'T", 'PAY', 'UP', 'YOUR', 'PLEDGE', 'TO', 'THE', 'CAMPAIGN', 'FUND', 'SO', 'I', 'HADDA', 'FILL', 'IN'] +8131-117017-0020-1290: hyp=['YOU', "DIDN'T", 'PAY', 'UP', 'YOUR', 'PLEDGE', 'TO', 'THE', 'CAPTAIN', 'FUND', 'SO', 'I', 'HAD', 'A', 'FILL', 'IN'] +8131-117017-0021-1291: ref=['A', 'THOUSAND', 'INTEREST', 'AT', 'TEN', 'PER', 'CENT', 'A', 'WEEK', 'STANDARD', 'RIGHT'] +8131-117017-0021-1291: hyp=['A', 'THOUSAND', 'INTERESTS', 'AT', 'TEN', 'PER', 'CENT', 'A', 'WEEK', 'STANDARD', 'RIGHT'] +8131-117017-0022-1292: ref=['GORDON', 'HAD', 'HEARD', 'OF', 'THE', 'FRIENDLY', 'INTEREST', 'CHARGED', 'ON', 'THE', 'SIDE', 'HERE', 'BUT', 'HE', 'SHOOK', 'HIS', 'HEAD', 'WRONG', 'IZZY'] +8131-117017-0022-1292: hyp=['GORDON', 'HAD', 'HEARD', 'OF', 'THE', 'FRIENDLY', 'INTEREST', 'CHARGED', 'ON', 'THE', 'SIDE', 'HERE', 'BUT', 'HE', 'SHOOK', 'HIS', 'HEAD', 'WRONG', 'IS', 'HE'] +8131-117017-0023-1293: ref=['HUH', 'IZZY', 'TURNED', 'IT', 'OVER', 'AND', 'SHOOK', 'HIS', 'HEAD'] +8131-117017-0023-1293: hyp=['HER', 'AS', 'HE', 'TURNED', 'IT', 'OVER', 'AND', 'SHOOK', 'HIS', 'HEAD'] +8131-117017-0024-1294: ref=['NOW', 'SHOW', 'ME', 'WHERE', 'I', 'SIGNED', 'ANY', 'AGREEMENT', 'SAYING', "I'D", 'PAY', 'YOU', 'BACK'] +8131-117017-0024-1294: hyp=['NOW', 'SHOW', 'ME', 'WHERE', 'I', 'SIGNED', 'ANY', 'AGREEMENT', 'SAYING', "I'D", 'PAY', 'YOU', 'BACK'] +8131-117017-0025-1295: ref=['FOR', 'A', 'SECOND', "IZZY'S", 'FACE', 'WENT', 'BLANK', 'THEN', 'HE', 'CHUCKLED'] +8131-117017-0025-1295: hyp=['FOR', 'A', 'SECOND', 'IS', 'HIS', 'FACE', 'WENT', 'BLANK', 'THEN', 'HE', 'CHUCKLED'] +8131-117017-0026-1296: ref=['HE', 'PULLED', 'OUT', 'THE', 'BILLS', 'AND', 'HANDED', 'THEM', 'OVER'] +8131-117017-0026-1296: hyp=['HE', 'POURED', 'OUT', 'THE', 'BILLS', 'AND', 'HANDED', 'THEM', 'OVER'] +8131-117017-0027-1297: ref=['THANKS', 'IZZY', 'THANKS', 'YOURSELF'] +8131-117017-0027-1297: hyp=['THANKS', 'IS', 'HE', 'THANKS', 'YOURSELF'] +8131-117017-0028-1298: ref=['THE', 'KID', 'POCKETED', 'THE', 'MONEY', 'CHEERFULLY', 'NODDING'] +8131-117017-0028-1298: hyp=['THE', 'KID', 'POCKETED', 'THE', 'MONEY', 'CHEERFULLY', 'NODDING'] +8131-117017-0029-1299: ref=['THE', 'LITTLE', 'GUY', 'KNEW', 'MARS', 'AS', 'FEW', 'OTHERS', 'DID', 'APPARENTLY', 'FROM', 'ALL', 'SIDES'] +8131-117017-0029-1299: hyp=['THE', 'LITTLE', 'GUY', 'KNEW', 'MARS', 'AS', 'FEW', 'OTHERS', 'DID', 'APPARENTLY', 'FROM', 'ALL', 'SIDES'] +8131-117017-0030-1300: ref=['AND', 'IF', 'ANY', 'OF', 'THE', 'OTHER', 'COPS', 'HAD', 'PRIVATE', 'RACKETS', 'OF', 'THEIR', 'OWN', 'IZZY', 'WAS', 'UNDOUBTEDLY', 'THE', 'MAN', 'TO', 'FIND', 'IT', 'OUT', 'AND', 'USE', 'THE', 'INFORMATION', 'WITH', 'A', 'BEAT', 'SUCH', 'AS', 'THAT', 'EVEN', 'GOING', 'HALVES', 'AND', 'WITH', 'ALL', 'THE', 'GRAFT', 'TO', 'THE', 'UPPER', 'BRACKETS', "HE'D", 'STILL', 'BE', 'ABLE', 'TO', 'MAKE', 'HIS', 'PILE', 'IN', 'A', 'MATTER', 'OF', 'MONTHS'] +8131-117017-0030-1300: hyp=['AND', 'IF', 'ANY', 'OF', 'THE', 'OTHER', 'COPS', 'HAD', 'PRIVATE', 'RACKETS', 'OF', 'THEIR', 'OWN', 'IS', 'HE', 'WAS', 'UNDOUBTEDLY', 'THE', 'MAN', 'TO', 'FIND', 'IT', 'OUT', 'AND', 'USED', 'THE', 'INFORMATION', 'WITH', 'A', 'BEAT', 'SUCH', 'AS', 'THAT', 'EVEN', 'GOING', 'HALVES', 'AND', 'WITH', 'ALL', 'THE', 'GRAFT', 'OF', 'THE', 'UPPER', 'BRACKETS', "HE'D", 'STILL', 'BE', 'ABLE', 'TO', 'MAKE', 'HIS', 'PILE', 'IN', 'A', 'MATTER', 'OF', 'MONTHS'] +8131-117017-0031-1301: ref=['THE', 'CAPTAIN', 'LOOKED', 'COMPLETELY', 'BEATEN', 'AS', 'HE', 'CAME', 'INTO', 'THE', 'ROOM', 'AND', 'DROPPED', 'ONTO', 'THE', 'BENCH'] +8131-117017-0031-1301: hyp=['THE', 'CAPTAIN', 'LOOKED', 'COMPLETELY', 'BEATEN', 'AS', 'HE', 'CAME', 'INTO', 'THE', 'ROOM', 'AND', 'DROPPED', 'INTO', 'THE', 'BENCH'] +8131-117017-0032-1302: ref=['GO', 'ON', 'ACCEPT', 'DAMN', 'IT'] +8131-117017-0032-1302: hyp=['GO', 'ON', 'EXCEPT', 'DEMON'] +8131-117029-0000-1247: ref=['THERE', 'WAS', 'A', 'MAN', 'COMING', 'FROM', 'EARTH', 'ON', 'A', 'SECOND', 'SHIP', 'WHO', 'WOULD', 'SEE', 'HIM'] +8131-117029-0000-1247: hyp=['THERE', 'WAS', 'A', 'MAN', 'COMING', 'FROM', 'EARTH', 'ON', 'A', 'SECOND', 'SHIP', 'WHO', 'WOULD', 'SEE', 'HIM'] +8131-117029-0001-1248: ref=['THE', 'LITTLE', 'PUBLISHER', 'WAS', 'BACK', 'AT', 'THE', 'CRUSADER', 'AGAIN'] +8131-117029-0001-1248: hyp=['THE', 'LITTLE', 'PUBLISHER', 'WAS', 'BACK', 'AT', 'THE', 'CRUSADER', 'AGAIN'] +8131-117029-0002-1249: ref=['ONLY', 'GORDON', 'AND', 'SHEILA', 'WERE', 'LEFT'] +8131-117029-0002-1249: hyp=['ONLY', 'GORDON', 'AND', 'SHEILA', 'WERE', 'LEFT'] +8131-117029-0003-1250: ref=['CREDIT', 'HAD', 'BEEN', 'ESTABLISHED', 'AGAIN', 'AND', 'THE', 'BUSINESSES', 'WERE', 'OPEN'] +8131-117029-0003-1250: hyp=['CREDIT', 'HAD', 'BEEN', 'ESTABLISHED', 'AGAIN', 'AND', 'THE', 'BUSINESSES', 'WERE', 'OPEN'] +8131-117029-0004-1251: ref=['GORDON', 'CAME', 'TO', 'A', 'ROW', 'OF', 'TEMPORARY', 'BUBBLES', 'INDIVIDUAL', 'DWELLINGS', 'BUILT', 'LIKE', 'THE', 'DOME', 'BUT', 'OPAQUE', 'FOR', 'PRIVACY'] +8131-117029-0004-1251: hyp=['GORDON', 'CAME', 'TO', 'A', 'ROW', 'OF', 'TEMPORARY', 'BUBBLES', 'INDIVIDUAL', 'DWELLINGS', 'BUILT', 'LIKE', 'THE', 'DOME', 'BUT', 'OPAQUE', 'FOR', 'PRIVACY'] +8131-117029-0005-1252: ref=['THEY', 'HAD', 'BEEN', 'LUCKY'] +8131-117029-0005-1252: hyp=['THEY', 'HAD', 'BEEN', 'LUCKY'] +8131-117029-0006-1253: ref=["SCHULBERG'S", 'VOLUNTEERS', 'WERE', 'OFFICIAL', 'NOW'] +8131-117029-0006-1253: hyp=["SILBERG'S", 'VOLUNTEERS', 'WERE', 'OFFICIAL', 'NOW'] +8131-117029-0007-1254: ref=['FATS', 'PLACE', 'WAS', 'STILL', 'OPEN', 'THOUGH', 'THE', 'CROOKED', 'TABLES', 'HAD', 'BEEN', 'REMOVED', 'GORDON', 'DROPPED', 'TO', 'A', 'STOOL', 'SLIPPING', 'OFF', 'HIS', 'HELMET'] +8131-117029-0007-1254: hyp=['FAT', 'PLACE', 'WAS', 'STILL', 'OPEN', 'THOUGH', 'THE', 'CROOKED', 'TABLES', 'HAD', 'BEEN', 'REMOVED', 'GORDON', 'DROPPED', 'TO', 'A', 'STOOL', 'SLIPPING', 'OFF', 'HIS', 'HELMET'] +8131-117029-0008-1255: ref=['HE', 'REACHED', 'AUTOMATICALLY', 'FOR', 'THE', 'GLASS', 'OF', 'ETHER', 'NEEDLED', 'BEER'] +8131-117029-0008-1255: hyp=['HE', 'REACHED', 'AUTOMATICALLY', 'FOR', 'THE', 'GLASS', 'OF', 'ETHER', 'NEEDLED', 'BEER'] +8131-117029-0009-1256: ref=['THOUGHT', "YOU'D", 'BE', 'IN', 'THE', 'CHIPS'] +8131-117029-0009-1256: hyp=['THOUGHT', "YOU'D", 'BE', 'IN', 'THE', 'CHIPS'] +8131-117029-0010-1257: ref=["THAT'S", 'MARS', 'GORDON', 'ECHOED', 'THE', "OTHER'S", 'COMMENT', 'WHY', "DON'T", 'YOU', 'PULL', 'OFF', 'THE', 'PLANET', 'FATS', 'YOU', 'COULD', 'GO', 'BACK', 'TO', 'EARTH', "I'D", 'GUESS', 'THE', 'OTHER', 'NODDED'] +8131-117029-0010-1257: hyp=["THAT'S", 'MARS', 'GORDON', 'ACCORD', 'OTHERS', 'COMMENT', 'WHY', "DON'T", 'YOU', 'PULL', 'OFF', 'THE', 'PLANET', 'FATS', 'YOU', 'COULD', 'GO', 'BACK', 'TO', 'EARTH', "I'D", 'GUESS', 'THE', 'OTHER', 'NODDED'] +8131-117029-0011-1258: ref=['GUESS', 'A', 'MAN', 'GETS', 'USED', 'TO', 'ANYTHING', 'HELL', 'MAYBE', 'I', 'CAN', 'HIRE', 'SOME', 'BUMS', 'TO', 'SIT', 'AROUND', 'AND', 'WHOOP', 'IT', 'UP', 'WHEN', 'THE', 'SHIPS', 'COME', 'IN', 'AND', 'BILL', 'THIS', 'AS', 'A', 'REAL', 'OLD', 'MARTIAN', 'DEN', 'OF', 'SIN'] +8131-117029-0011-1258: hyp=['GUESS', 'A', 'MAN', 'GETS', 'USED', 'TO', 'ANYTHING', 'HELL', 'MAYBE', 'I', 'CAN', 'HIRE', 'SOME', 'BUMS', 'TO', 'SIT', 'AROUND', 'AND', 'WHOOP', 'IT', 'UP', 'WHEN', 'THE', 'SHIPS', 'COME', 'IN', 'AND', 'BUILD', 'THIS', 'AS', 'A', 'REAL', 'OLD', 'MARTIAN', 'DEN', 'OF', 'SIN'] +8131-117029-0012-1259: ref=['THERE', 'WAS', 'A', 'GRIN', 'ON', 'THE', "OTHER'S", 'FACE'] +8131-117029-0012-1259: hyp=['THERE', 'WAS', 'A', 'GRIN', 'ON', 'THE', "OTHER'S", 'FACE'] +8131-117029-0013-1260: ref=['FINALLY', 'GOT', 'OUR', 'ORDERS', 'FOR', 'YOU', "IT'S", 'MERCURY'] +8131-117029-0013-1260: hyp=['FINALLY', 'GOT', 'OUR', 'ORDERS', 'FOR', 'YOU', "IT'S", 'MERCURY'] +8131-117029-0014-1261: ref=['WE', 'SENT', 'TWENTY', 'OTHERS', 'THE', 'SAME', 'WAY', 'AND', 'THEY', 'FAILED'] +8131-117029-0014-1261: hyp=['WE', 'SENT', 'TWENTY', 'OTHERS', 'THE', 'SAME', 'WAY', 'AND', 'THEY', 'FAILED'] +8131-117029-0015-1262: ref=["LET'S", 'SAY', "YOU'VE", 'SHIFTED', 'SOME', 'OF', 'THE', 'MISERY', 'AROUND', 'A', 'BIT', 'AND', 'GIVEN', 'THEM', 'A', 'CHANCE', 'TO', 'DO', 'BETTER'] +8131-117029-0015-1262: hyp=["LET'S", 'SAVE', 'SHIFTED', 'SOME', 'OF', 'THE', 'MISERY', 'AROUND', 'A', 'BIT', 'AND', 'GIVEN', 'THEM', 'A', 'CHANCE', 'TO', 'DO', 'BETTER'] +8131-117029-0016-1263: ref=['YOU', "CAN'T", 'STAY', 'HERE'] +8131-117029-0016-1263: hyp=['YOU', "CAN'T", 'STAY', 'HERE'] +8131-117029-0017-1264: ref=["THERE'S", 'A', 'ROCKET', 'WAITING', 'TO', 'TRANSSHIP', 'YOU', 'TO', 'THE', 'MOON', 'ON', 'THE', 'WAY', 'TO', 'MERCURY', 'RIGHT', 'NOW', 'GORDON', 'SIGHED'] +8131-117029-0017-1264: hyp=["THERE'S", 'A', 'ROCKET', 'WAITING', 'TO', 'TRANSHIP', 'YOU', 'TO', 'THE', 'MOON', 'ON', 'THE', 'WAY', 'TO', 'MERCURY', 'RIGHT', 'NOW', 'GORDON', 'SIGHED'] +8131-117029-0018-1265: ref=['AND', "I'VE", 'PAID', 'HER', 'THE', 'PAY', 'WE', 'OWE', 'YOU', 'FROM', 'THE', 'TIME', 'YOU', 'BEGAN', 'USING', 'YOUR', 'BADGE', "SHE'S", 'OUT', 'SHOPPING'] +8131-117029-0018-1265: hyp=['AND', 'I', 'PAID', 'HER', 'THE', 'PAY', 'WE', 'OWE', 'YOU', 'FROM', 'THE', 'TIME', 'YOU', 'BEGIN', 'USING', 'YOUR', 'BADGE', "SHE'S", 'OUT', 'SHOPPING'] +8131-117029-0019-1266: ref=['BUT', 'HIS', 'OLD', 'EYES', 'WERE', 'GLINTING'] +8131-117029-0019-1266: hyp=['BUT', 'HIS', 'OLD', 'EYES', 'WERE', 'GLINTING'] +8131-117029-0020-1267: ref=['DID', 'YOU', 'THINK', "WE'D", 'LET', 'YOU', 'GO', 'WITHOUT', 'SEEING', 'YOU', 'OFF', 'COBBER', 'HE', 'ASKED'] +8131-117029-0020-1267: hyp=['DID', 'YOU', 'THINK', "WE'D", 'LET', 'YOU', 'GO', 'WITHOUT', 'SEEING', 'YOU', 'OFF', 'COPPER', 'HE', 'ASKED'] +8131-117029-0021-1268: ref=['I', 'I', 'OH', 'DRAT', 'IT', "I'M", 'GETTING', 'OLD', 'IZZY', 'YOU', 'TELL', 'HIM'] +8131-117029-0021-1268: hyp=['I', 'I', 'OH', 'DREAD', 'IT', "I'M", 'GETTING', 'OLD', 'IS', 'HE', 'YOU', 'TELL', 'HIM'] +8131-117029-0022-1269: ref=['HE', 'GRABBED', "GORDON'S", 'HAND', 'AND', 'WADDLED', 'DOWN', 'THE', 'LANDING', 'PLANK', 'IZZY', 'SHOOK', 'HIS', 'HEAD'] +8131-117029-0022-1269: hyp=['HE', 'GRABBED', "GORDON'S", 'HAND', 'AND', 'WADDLED', 'DOWN', 'THE', 'LANDING', 'PLANK', 'IS', 'HE', 'SHOOK', 'HIS', 'HEAD'] +8188-269288-0000-2881: ref=['ANNIE', 'COLCHESTER', 'HAD', 'BEGUN', 'TO', 'MAKE', 'FRIENDS', 'WITH', 'LESLIE'] +8188-269288-0000-2881: hyp=['ANY', 'COLCHESTER', 'HAD', 'BEGUN', 'TO', 'MAKE', 'FRIENDS', 'WITH', 'LISLEY'] +8188-269288-0001-2882: ref=['LESLIE', 'DETERMINED', 'TO', 'TRY', 'FOR', 'HONORS', 'IN', 'ENGLISH', 'LANGUAGE', 'AND', 'LITERATURE'] +8188-269288-0001-2882: hyp=['LESLIE', 'DETERMINED', 'TO', 'TRIFLE', 'HONORS', 'IN', 'ENGLISH', 'LANGUAGE', 'AND', 'LITERATURE'] +8188-269288-0002-2883: ref=['HER', 'TASTES', 'ALL', 'LAY', 'IN', 'THIS', 'DIRECTION', 'HER', 'IDEA', 'BEING', 'BY', 'AND', 'BY', 'TO', 'FOLLOW', 'HER', "MOTHER'S", 'PROFESSION', 'OF', 'JOURNALISM', 'FOR', 'WHICH', 'SHE', 'ALREADY', 'SHOWED', 'CONSIDERABLE', 'APTITUDE'] +8188-269288-0002-2883: hyp=['HER', 'TASTES', 'ALL', 'LAY', 'IN', 'THIS', 'DIRECTION', 'HER', 'IDEA', 'BEING', 'BY', 'AND', 'BY', 'TO', 'FOLLOW', 'HER', "MOTHER'S", 'PROFICIENT', 'OF', 'JOURNALISM', 'FOR', 'WHICH', 'SHE', 'ALWAYS', 'SHOWED', 'CONSIDERABLE', 'APTITUDE'] +8188-269288-0003-2884: ref=['SHE', 'HAD', 'NO', 'IDEA', 'OF', 'ALLOWING', 'HERSELF', 'TO', 'BREAK', 'DOWN'] +8188-269288-0003-2884: hyp=['SHE', 'HAD', 'NO', 'IDEA', 'OF', 'ALLOWING', 'HERSELF', 'TO', 'BREAK', 'DOWN'] +8188-269288-0004-2885: ref=['WHAT', 'DO', 'YOU', 'MEAN', 'REPLIED', 'LESLIE'] +8188-269288-0004-2885: hyp=['WHAT', 'DO', 'YOU', 'MEAN', 'REPLIED', 'LISLEY'] +8188-269288-0005-2886: ref=['WHY', 'YOU', 'WILL', 'BE', 'PARTING', 'FROM', 'ME', 'YOU', 'KNOW'] +8188-269288-0005-2886: hyp=['WHY', 'YOU', 'WILL', 'BE', 'PARTING', 'FROM', 'ME', 'YOU', 'KNOW'] +8188-269288-0006-2887: ref=['I', "WON'T", 'BE', 'THE', 'CONSTANT', 'WORRY', 'AND', 'PLAGUE', 'OF', 'YOUR', 'LIFE'] +8188-269288-0006-2887: hyp=['I', "WON'T", 'BE', 'THE', 'CONSTANT', 'WORRY', 'AND', 'PLAGUE', 'OF', 'YOUR', 'LIFE'] +8188-269288-0007-2888: ref=['IT', 'IS', 'THIS', 'IF', 'BY', 'ANY', 'CHANCE', 'YOU', "DON'T", 'LEAVE', 'SAINT', "WODE'S", 'ANNIE', 'I', 'HOPE', 'YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'BE', 'YOUR', 'ROOMFELLOW', 'AGAIN', 'NEXT', 'TERM'] +8188-269288-0007-2888: hyp=['THIS', 'IS', 'THIS', 'IF', 'BY', 'ANY', 'CHANCE', 'YOU', "DON'T", 'LEAVE', 'SAINT', 'WORDS', 'ANNIE', 'I', 'HOPE', 'YOU', 'WILL', 'ALLOW', 'ME', 'TO', 'BE', 'YOUR', 'ROOM', 'FELLOW', 'AGAIN', 'NEXT', 'TERM'] +8188-269288-0008-2889: ref=['SAID', 'ANNIE', 'A', 'FLASH', 'OF', 'LIGHT', 'COMING', 'INTO', 'HER', 'EYES', 'AND', 'THEN', 'LEAVING', 'THEM'] +8188-269288-0008-2889: hyp=['SAID', 'ANNIE', 'A', 'FLASH', 'OF', 'LIGHT', 'COMING', 'INTO', 'HER', 'EYES', 'AND', 'THEN', 'LEAVING', 'THEM'] +8188-269288-0009-2890: ref=['BUT', 'SHE', 'ADDED', 'ABRUPTLY', 'YOU', 'SPEAK', 'OF', 'SOMETHING', 'WHICH', 'MUST', 'NOT', 'TAKE', 'PLACE'] +8188-269288-0009-2890: hyp=['BUT', 'SHE', 'ADDED', 'ABRUPTLY', 'YOU', 'SPEAK', 'OF', 'SOMETHING', 'WHICH', 'MUST', 'NOT', 'TAKE', 'PLACE'] +8188-269288-0010-2891: ref=['I', 'MUST', 'PASS', 'IN', 'HONORS', 'IF', 'I', "DON'T", 'I', 'SHALL', 'DIE'] +8188-269288-0010-2891: hyp=['I', 'MUST', 'PASS', 'AN', 'HONOUR', 'IF', 'I', "DON'T", 'I', 'SHALL', 'DIE'] +8188-269288-0011-2892: ref=['A', 'FEW', 'MOMENTS', 'LATER', 'THERE', 'CAME', 'A', 'TAP', 'AT', 'THE', 'DOOR'] +8188-269288-0011-2892: hyp=['A', 'FEW', 'MOMENTS', 'LATER', 'DICK', 'CAME', 'A', 'TAP', 'AT', 'THE', 'DOOR'] +8188-269288-0012-2893: ref=['LESLIE', 'OPENED', 'THE', 'DOOR'] +8188-269288-0012-2893: hyp=['LESLIE', 'OPENED', 'THE', 'DOOR'] +8188-269288-0013-2894: ref=['JANE', 'HERIOT', 'STOOD', 'WITHOUT'] +8188-269288-0013-2894: hyp=['JANE', 'HARRIET', 'STOOD', 'WITHOUT'] +8188-269288-0014-2895: ref=['THESE', 'LETTERS', 'HAVE', 'JUST', 'COME', 'FOR', 'YOU', 'AND', 'ANNIE', 'COLCHESTER', 'SHE', 'SAID', 'AND', 'AS', 'I', 'WAS', 'COMING', 'UPSTAIRS', 'I', 'THOUGHT', 'I', 'WOULD', 'LEAVE', 'THEM', 'WITH', 'YOU'] +8188-269288-0014-2895: hyp=['THESE', 'LETTERS', 'HAVE', 'JUST', 'COME', 'FOR', 'YOU', 'IN', 'ANY', 'COLCHESTER', 'SHE', 'SAID', 'AND', 'AS', 'I', 'WAS', 'COMING', 'UPSTAIRS', 'I', 'THOUGHT', 'I', 'WOULD', 'LEAVE', 'THEM', 'WITH', 'YOU'] +8188-269288-0015-2896: ref=['LESLIE', 'THANKED', 'HER', 'AND', 'EAGERLY', 'GRASPED', 'THE', 'LITTLE', 'PARCEL'] +8188-269288-0015-2896: hyp=['LISLEY', 'THANKED', 'HER', 'AND', 'EAGERLY', 'GRASPED', 'THE', 'LITTLE', 'PARCEL'] +8188-269288-0016-2897: ref=['HER', 'EYES', 'SHONE', 'WITH', 'PLEASURE', 'AT', 'THE', 'ANTICIPATION', 'OF', 'THE', 'DELIGHTFUL', 'TIME', 'SHE', 'WOULD', 'HAVE', 'REVELING', 'IN', 'THE', 'HOME', 'NEWS', 'THE', 'OTHER', 'LETTER', 'WAS', 'DIRECTED', 'TO', 'ANNIE', 'COLCHESTER'] +8188-269288-0016-2897: hyp=['HER', 'EYES', 'SHONE', 'WITH', 'PLEASURE', 'AT', 'THE', 'ANTICIPATION', 'OF', 'THE', 'DELIGHTFUL', 'TIME', 'SHE', 'WOULD', 'HAVE', 'RIVELING', 'IN', 'THE', 'HOME', 'NEWS', 'THE', 'OTHER', 'LETTER', 'WAS', 'DIRECTED', 'TO', 'ANY', 'COLCHESTER'] +8188-269288-0017-2898: ref=['HERE', 'IS', 'A', 'LETTER', 'FOR', 'YOU', 'ANNIE', 'CRIED', 'LESLIE'] +8188-269288-0017-2898: hyp=['HERE', 'IS', 'A', 'LETTER', 'FOR', 'YOU', 'ANY', 'CRIED', 'LIZZILY'] +8188-269288-0018-2899: ref=['HER', 'FACE', 'GREW', 'SUDDENLY', 'WHITE', 'AS', 'DEATH', 'WHAT', 'IS', 'IT', 'DEAR'] +8188-269288-0018-2899: hyp=['HER', 'FACE', 'GREW', 'SUDDENLY', 'WHITE', 'AS', 'DEATH', 'WHAT', 'IS', 'IT', 'DEAR'] +8188-269288-0019-2900: ref=['I', 'HAVE', 'BEEN', 'STARVING', 'OR', 'RATHER', 'I', 'HAVE', 'BEEN', 'THIRSTING'] +8188-269288-0019-2900: hyp=['I', 'HAVE', 'BEEN', 'STARLING', 'OR', 'RATHER', 'I', 'HAVE', 'BEEN', 'THIRSTING'] +8188-269288-0020-2901: ref=['WELL', 'READ', 'IT', 'IN', 'PEACE', 'SAID', 'LESLIE', 'I', "WON'T", 'DISTURB', 'YOU'] +8188-269288-0020-2901: hyp=['WELL', 'READ', 'IT', 'IN', 'PEACE', 'SAID', 'LIDNESLEY', 'I', "WON'T", 'DISTURB', 'YOU'] +8188-269288-0021-2902: ref=['I', 'AM', 'TRULY', 'GLAD', 'IT', 'HAS', 'COME'] +8188-269288-0021-2902: hyp=['I', 'AM', 'TRULY', 'GLAD', 'IT', 'HAS', 'COME'] +8188-269288-0022-2903: ref=['LESLIE', 'SEATED', 'HERSELF', 'WITH', 'HER', 'BACK', 'TO', 'HER', 'COMPANION', 'AND', 'OPENED', 'HER', 'OWN', 'LETTERS'] +8188-269288-0022-2903: hyp=['LESLIE', 'SEATED', 'HERSELF', 'WITH', 'HER', 'BACK', 'TO', 'HER', 'COMPANION', 'AND', 'OPENED', 'HER', 'ON', 'LETTERS'] +8188-269288-0023-2904: ref=["DON'T", 'NOTICE', 'ME', 'REPLIED', 'ANNIE'] +8188-269288-0023-2904: hyp=["DON'T", 'NOTICE', 'ME', 'REPLIED', 'ANY'] +8188-269288-0024-2905: ref=['I', 'MUST', 'GO', 'INTO', 'THE', 'GROUNDS', 'THE', 'AIR', 'IS', 'STIFLING'] +8188-269288-0024-2905: hyp=['I', 'MUST', 'GO', 'INTO', 'THE', 'GROUNDS', 'THE', 'AIR', 'IS', 'STIFLING'] +8188-269288-0025-2906: ref=['BUT', 'THEY', 'ARE', 'JUST', 'SHUTTING', 'UP'] +8188-269288-0025-2906: hyp=['BUT', 'THEY', 'HAD', 'JUST', 'SHUTTING', 'UP'] +8188-269288-0026-2907: ref=['I', 'SHALL', 'GO', 'I', 'KNOW', 'A', 'WAY'] +8188-269288-0026-2907: hyp=['I', 'SHALL', 'GO', 'I', 'KNOW', 'A', 'WAY'] +8188-269288-0027-2908: ref=['JUST', 'AFTER', 'MIDNIGHT', 'SHE', 'ROSE', 'WITH', 'A', 'SIGH', 'TO', 'PREPARE', 'FOR', 'BED'] +8188-269288-0027-2908: hyp=['JUST', 'AFTER', 'MIDNIGHT', 'SHE', 'ROSE', 'WITH', 'A', 'SIGH', 'TO', 'PREPARE', 'FOR', 'BED'] +8188-269288-0028-2909: ref=['SHE', 'LOOKED', 'ROUND', 'THE', 'ROOM'] +8188-269288-0028-2909: hyp=['SHE', 'LOOKED', 'ROUND', 'THE', 'ROOM'] +8188-269288-0029-2910: ref=['NOW', 'I', 'REMEMBER', 'SHE', 'GOT', 'A', 'LETTER', 'WHICH', 'UPSET', 'HER', 'VERY', 'MUCH', 'AND', 'WENT', 'OUT'] +8188-269288-0029-2910: hyp=['NOW', 'I', 'REMEMBER', 'SHE', 'GOT', 'A', 'LETTER', 'WHICH', 'UPSET', 'HER', 'VERY', 'MUCH', 'AND', 'WENT', 'OUT'] +8188-269288-0030-2911: ref=['LESLIE', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'FLUNG', 'IT', 'OPEN', 'SHE', 'PUT', 'HER', 'HEAD', 'OUT', 'AND', 'TRIED', 'TO', 'PEER', 'INTO', 'THE', 'DARKNESS', 'BUT', 'THE', 'MOON', 'HAD', 'ALREADY', 'SET', 'AND', 'SHE', 'COULD', 'NOT', 'SEE', 'MORE', 'THAN', 'A', 'COUPLE', 'OF', 'YARDS', 'IN', 'FRONT', 'OF', 'HER'] +8188-269288-0030-2911: hyp=['LISLEY', 'WENT', 'TO', 'THE', 'WINDOW', 'AND', 'FLUNG', 'IT', 'OPEN', 'SHE', 'PUT', 'HER', 'HEAD', 'OUT', 'AND', 'TRIED', 'TO', 'PEER', 'INTO', 'THE', 'DARKNESS', 'BUT', 'THE', 'MOON', 'HAD', 'ALREADY', 'SET', 'AND', 'SHE', 'COULD', 'NOT', 'SEE', 'MORE', 'THAN', 'A', 'COUPLE', 'OF', 'YARDS', 'IN', 'FRONT', 'OF', 'HER'] +8188-269288-0031-2912: ref=['SHE', 'IS', 'A', 'VERY', 'QUEER', 'ERRATIC', 'CREATURE', 'AND', 'THAT', 'LETTER', 'THERE', 'WAS', 'BAD', 'NEWS', 'IN', 'THAT', 'LETTER'] +8188-269288-0031-2912: hyp=['SHE', 'IS', 'A', 'VERY', 'QUEER', 'THE', 'RATTIC', 'CREATURE', 'AND', 'THAT', 'LETTER', 'THERE', 'WAS', 'BAD', 'NEWS', 'IN', 'THAT', 'LETTER'] +8188-269288-0032-2913: ref=['WHAT', 'CAN', 'SHE', 'BE', 'DOING', 'OUT', 'BY', 'HERSELF'] +8188-269288-0032-2913: hyp=['WHAT', "CAN'T", 'YOU', 'DOING', 'OUT', 'BY', 'HERSELF'] +8188-269288-0033-2914: ref=['LESLIE', 'LEFT', 'THE', 'ROOM', 'BUT', 'SHE', 'HAD', 'SCARCELY', 'GONE', 'A', 'DOZEN', 'PACES', 'DOWN', 'THE', 'CORRIDOR', 'BEFORE', 'SHE', 'MET', 'ANNIE', 'RETURNING'] +8188-269288-0033-2914: hyp=['THIS', 'LILY', 'LEFT', 'THE', 'ROOM', 'BUT', 'SHE', 'HAD', 'SCARCELY', 'GONE', 'A', 'DOZEN', 'PLACES', 'DOWN', 'THE', 'CORRIDOR', 'BEFORE', 'SHE', 'MET', 'ANY', 'RETURNING'] +8188-269288-0034-2915: ref=["ANNIE'S", 'EYES', 'WERE', 'VERY', 'BRIGHT', 'HER', 'CHEEKS', 'WERE', 'NO', 'LONGER', 'PALE', 'AND', 'THERE', 'WAS', 'A', 'BRILLIANT', 'COLOR', 'IN', 'THEM'] +8188-269288-0034-2915: hyp=['ANY', 'THOUGHT', 'WERE', 'VERY', 'BRIGHT', 'HER', 'CHEEKS', 'WERE', 'NO', 'LONGER', 'PALE', 'AND', 'THERE', 'WAS', 'A', 'BRILLIANT', 'COLOUR', 'IN', 'THEM'] +8188-269288-0035-2916: ref=['SHE', 'DID', 'NOT', 'TAKE', 'THE', 'LEAST', 'NOTICE', 'OF', 'LESLIE', 'BUT', 'GOING', 'INTO', 'THE', 'ROOM', 'SHUT', 'THE', 'DOOR'] +8188-269288-0035-2916: hyp=['SHE', 'DID', 'NOT', 'TAKE', 'THE', 'LEAST', 'NOTICE', 'OF', 'PLEASING', 'BUT', 'GOING', 'INTO', 'THE', 'ROOM', 'SHUT', 'THE', 'DOOR'] +8188-269288-0036-2917: ref=["DON'T", 'BEGIN', 'SAID', 'ANNIE'] +8188-269288-0036-2917: hyp=["DON'T", 'BEGIN', 'SAID', 'ANNIE'] +8188-269288-0037-2918: ref=["DON'T", 'BEGIN', 'WHAT', 'DO', 'YOU', 'MEAN'] +8188-269288-0037-2918: hyp=["DON'T", 'BEGIN', 'WHAT', 'DO', 'YOU', 'MEAN'] +8188-269288-0038-2919: ref=['I', 'MEAN', 'THAT', 'I', "DON'T", 'WANT', 'YOU', 'TO', 'BEGIN', 'TO', 'ASK', 'QUESTIONS'] +8188-269288-0038-2919: hyp=['I', 'MEAN', 'THAT', 'I', "DON'T", 'WANT', 'YOU', 'TO', 'BEGIN', 'TO', 'ASK', 'QUESTIONS'] +8188-269288-0039-2920: ref=['I', 'WALKED', 'UP', 'AND', 'DOWN', 'AS', 'FAST', 'AS', 'EVER', 'I', 'COULD', 'OUTSIDE', 'IN', 'ORDER', 'TO', 'MAKE', 'MYSELF', 'SLEEPY'] +8188-269288-0039-2920: hyp=['I', 'WALKED', 'UP', 'AND', 'DOWN', 'AS', 'FAST', 'AS', 'EVER', 'I', 'COULD', 'OUTSIDE', 'IN', 'ORDER', 'TO', 'MAKE', 'MYSELF', 'SLEEPY'] +8188-269288-0040-2921: ref=["DON'T", 'TALK', 'TO', 'ME', 'LESLIE', "DON'T", 'SAY', 'A', 'SINGLE', 'WORD'] +8188-269288-0040-2921: hyp=["THEY'RE", 'TALK', 'TO', 'ME', 'LISLEY', "DON'T", 'SAY', 'A', 'SINGLE', 'WORD'] +8188-269288-0041-2922: ref=['I', 'SHALL', 'GO', 'OFF', 'TO', 'SLEEP', 'THAT', 'IS', 'ALL', 'I', 'CARE', 'FOR'] +8188-269288-0041-2922: hyp=['I', 'SHALL', 'GO', 'OFF', 'TO', 'SLEEP', 'THAT', 'IS', 'ALL', 'I', 'CARE', 'FOR'] +8188-269288-0042-2923: ref=["DON'T", 'SAID', 'ANNIE'] +8188-269288-0042-2923: hyp=["DON'T", 'SAID', 'ANNIE'] +8188-269288-0043-2924: ref=['NOW', 'DRINK', 'THIS', 'AT', 'ONCE', 'SHE', 'SAID', 'IN', 'A', 'VOICE', 'OF', 'AUTHORITY', 'IF', 'YOU', 'REALLY', 'WISH', 'TO', 'SLEEP'] +8188-269288-0043-2924: hyp=['NOW', 'DRINK', 'THIS', 'AT', 'ONCE', 'SHE', 'SAID', 'IN', 'A', 'VOICE', 'OF', 'AUTHORITY', 'IF', 'YOU', 'REALLY', 'WISH', 'TO', 'SLEEP'] +8188-269288-0044-2925: ref=['ANNIE', 'STARED', 'VACANTLY', 'AT', 'THE', 'COCOA', 'THEN', 'SHE', 'UTTERED', 'A', 'LAUGH'] +8188-269288-0044-2925: hyp=['ANY', 'STEERED', 'VACANTLY', 'AT', 'THE', 'CUCKOO', 'DID', 'SHE', 'UTTERED', 'A', 'LAUGH'] +8188-269288-0045-2926: ref=['DRINK', 'THAT', 'SHE', 'SAID'] +8188-269288-0045-2926: hyp=['DRINK', 'THAT', 'SHE', 'SAID'] +8188-269288-0046-2927: ref=['DO', 'YOU', 'WANT', 'TO', 'KILL', 'ME', "DON'T", 'TALK', 'ANY', 'MORE'] +8188-269288-0046-2927: hyp=['DO', 'YOU', 'WANT', 'TO', 'KILL', 'ME', "DON'T", 'TALK', 'ANY', 'MORE'] +8188-269288-0047-2928: ref=['I', 'AM', 'SLEEPY', 'I', 'SHALL', 'SLEEP'] +8188-269288-0047-2928: hyp=['I', 'AM', 'SLEEPY', 'I', 'SHALL', 'SLEEP'] +8188-269288-0048-2929: ref=['SHE', 'GOT', 'INTO', 'BED', 'AS', 'SHE', 'SPOKE', 'AND', 'WRAPPED', 'THE', 'CLOTHES', 'TIGHTLY', 'ROUND', 'HER'] +8188-269288-0048-2929: hyp=['SHE', 'GOT', 'INTO', 'BED', 'AS', 'SHE', 'SPOKE', 'AND', 'WRAPPED', 'THE', 'CLOTHES', 'TIGHTLY', 'ROUND', 'HER'] +8188-269288-0049-2930: ref=["CAN'T", 'YOU', 'MANAGE', 'WITH', 'A', 'CANDLE', 'JUST', 'FOR', 'ONCE'] +8188-269288-0049-2930: hyp=['CAN', 'YOU', 'MANAGE', 'WITH', 'A', 'CANDLE', 'JUST', 'FOR', 'ONCE'] +8188-269288-0050-2931: ref=['CERTAINLY', 'SAID', 'LESLIE'] +8188-269288-0050-2931: hyp=['CERTAINLY', 'SAID', 'IT', 'EASILY'] +8188-269288-0051-2932: ref=['SHE', 'TURNED', 'OFF', 'THE', 'LIGHT', 'AND', 'LIT', 'A', 'CANDLE', 'WHICH', 'SHE', 'PUT', 'BEHIND', 'HER', 'SCREEN', 'THEN', 'PREPARED', 'TO', 'GET', 'INTO', 'BED'] +8188-269288-0051-2932: hyp=['SHE', 'TURNED', 'OFF', 'THE', 'LIGHT', 'AND', 'LET', 'HER', 'CANDLE', 'WOULD', 'SHE', 'PUT', 'BEHIND', 'HER', 'SCREEN', 'THEN', 'PREPARED', 'TO', 'GET', 'INTO', 'BED'] +8188-269288-0052-2933: ref=["ANNIE'S", 'MANNER', 'WAS', 'VERY', 'MYSTERIOUS'] +8188-269288-0052-2933: hyp=["ANY'S", 'MANNER', 'WAS', 'VERY', 'MYSTERIOUS'] +8188-269288-0053-2934: ref=['ANNIE', 'DID', 'NOT', 'MEAN', 'TO', 'CONFIDE', 'IN', 'ANYONE', 'THAT', 'NIGHT', 'AND', 'THE', 'KINDEST', 'THING', 'WAS', 'TO', 'LEAVE', 'HER', 'ALONE'] +8188-269288-0053-2934: hyp=['AND', 'HE', 'DID', 'NOT', 'MEAN', 'TO', 'CONFINE', 'IN', 'ANY', 'ONE', 'THAT', 'NIGHT', 'AND', 'THE', 'KINDEST', 'THING', 'WAS', 'TO', 'LEAVE', 'HER', 'ALONE'] +8188-269288-0054-2935: ref=['TIRED', 'OUT', 'LESLIE', 'HERSELF', 'DROPPED', 'ASLEEP'] +8188-269288-0054-2935: hyp=['TIE', 'IT', 'OUT', 'LESLIE', 'HERSELF', 'DROPPED', 'ASLEEP'] +8188-269288-0055-2936: ref=['ANNIE', 'IS', 'THAT', 'YOU', 'SHE', 'CALLED', 'OUT'] +8188-269288-0055-2936: hyp=['ANY', 'IS', 'THAT', 'YOU', 'SHE', 'CALLED', 'OUT'] +8188-269288-0056-2937: ref=['THERE', 'WAS', 'NO', 'REPLY', 'BUT', 'THE', 'SOUND', 'OF', 'HURRYING', 'STEPS', 'CAME', 'QUICKER', 'AND', 'QUICKER', 'NOW', 'AND', 'THEN', 'THEY', 'WERE', 'INTERRUPTED', 'BY', 'A', 'GROAN'] +8188-269288-0056-2937: hyp=['THERE', 'WAS', 'NO', 'REPLY', 'BUT', 'THE', 'SOUND', 'OF', 'HURRYING', 'STEPS', 'CAME', 'QUICKER', 'AND', 'QUICKER', 'NOW', 'AND', 'THEN', 'THEIR', 'INTERRUPTED', 'BY', 'A', 'GROAN'] +8188-269288-0057-2938: ref=['OH', 'THIS', 'WILL', 'KILL', 'ME', 'MY', 'HEART', 'WILL', 'BREAK', 'THIS', 'WILL', 'KILL', 'ME'] +8188-269288-0057-2938: hyp=['OH', 'THIS', 'WILL', 'KILL', 'ME', 'MY', 'HEART', 'WILL', 'BREAK', 'THIS', 'WILL', 'KILL', 'ME'] +8188-269290-0000-2823: ref=['THE', 'GUILD', 'OF', 'SAINT', 'ELIZABETH'] +8188-269290-0000-2823: hyp=['THE', 'GUILD', 'OF', 'SAINT', 'ELIZABETH'] +8188-269290-0001-2824: ref=['IMMEDIATELY', 'AFTER', 'DINNER', 'THAT', 'EVENING', 'LESLIE', 'RAN', 'UP', 'TO', 'HER', 'ROOM', 'TO', 'MAKE', 'PREPARATIONS', 'FOR', 'HER', 'VISIT', 'TO', 'EAST', 'HALL'] +8188-269290-0001-2824: hyp=['IMMEDIATELY', 'AFTER', 'DINNER', 'THAT', 'EVENING', 'LESLIE', 'RAN', 'UP', 'TO', 'HER', 'ROOM', 'TO', 'MAKE', 'PREPARATIONS', 'FOR', 'HER', 'VISIT', 'TO', 'EAST', 'HALL'] +8188-269290-0002-2825: ref=["I'M", 'NOT', 'COMING', 'SAID', 'ANNIE'] +8188-269290-0002-2825: hyp=["I'M", 'NOT', 'COMING', 'SAID', 'ANNIE'] +8188-269290-0003-2826: ref=['EVERY', 'STUDENT', 'IS', 'TO', 'BE', 'IN', 'EAST', 'HALL', 'AT', 'HALF', 'PAST', 'EIGHT'] +8188-269290-0003-2826: hyp=['EVERY', 'STUDENT', 'IS', 'TO', 'BE', 'AN', 'EAST', 'HALL', 'AT', 'HALF', 'PAST', 'EIGHT'] +8188-269290-0004-2827: ref=['IT', "DOESN'T", 'MATTER', 'REPLIED', 'ANNIE', 'WHETHER', 'IT', 'IS', 'AN', 'ORDER', 'OR', 'NOT', "I'M", 'NOT', 'COMING', 'SAY', 'NOTHING', 'ABOUT', 'ME', 'PLEASE'] +8188-269290-0004-2827: hyp=['IT', 'DOES', 'MATTER', 'REPLIED', 'ANNIE', 'WHITHER', 'IT', 'IS', 'AN', 'ORDER', 'OR', 'NOT', 'I', 'AM', 'NOT', 'COMING', 'SAY', 'NOTHING', 'ABOUT', 'ME', 'PLEASE'] +8188-269290-0005-2828: ref=['IT', 'BURNED', 'AS', 'IF', 'WITH', 'FEVER'] +8188-269290-0005-2828: hyp=['IT', 'BURNED', 'AS', 'IF', 'WITH', 'FEVER'] +8188-269290-0006-2829: ref=['YOU', "DON'T", 'KNOW', 'WHAT', 'A', 'TRIAL', 'IT', 'IS', 'FOR', 'ME', 'TO', 'HAVE', 'YOU', 'HERE'] +8188-269290-0006-2829: hyp=['YOU', "DON'T", 'KNOW', 'WHAT', 'A', 'TRIAL', 'IT', 'IS', 'FOR', 'ME', 'TO', 'HAVE', 'YOU', 'HERE'] +8188-269290-0007-2830: ref=['I', 'WANT', 'TO', 'BE', 'ALONE', 'GO'] +8188-269290-0007-2830: hyp=['I', 'WANT', 'TO', 'BE', 'ALONE', 'GO'] +8188-269290-0008-2831: ref=['I', 'KNOW', 'YOU', "DON'T", 'QUITE', 'MEAN', 'WHAT', 'YOU', 'SAY', 'SAID', 'LESLIE', 'BUT', 'OF', 'COURSE', 'IF', 'YOU', 'REALLY', 'WISH', 'ME'] +8188-269290-0008-2831: hyp=['I', 'KNOW', 'YOU', "DON'T", 'QUITE', 'MEAN', 'WHAT', 'YOU', 'SAY', 'SAID', 'LIZZIE', 'BUT', 'OF', 'COURSE', 'IF', 'YOU', 'REALLY', 'WISH', 'ME'] +8188-269290-0009-2832: ref=['YOU', 'FRET', 'ME', 'BEYOND', 'ENDURANCE'] +8188-269290-0009-2832: hyp=['YOU', 'FRET', 'ME', 'BEYOND', 'ENDURANCE'] +8188-269290-0010-2833: ref=['WRAPPING', 'A', 'PRETTY', 'BLUE', 'SHAWL', 'ROUND', 'HER', 'HEAD', 'AND', 'SHOULDERS', 'SHE', 'TURNED', 'TO', 'ANNIE'] +8188-269290-0010-2833: hyp=['WRAPPING', 'A', 'PRETTY', 'BLUE', 'SHAWL', 'AROUND', 'A', 'HIDDEN', 'SHOULDERS', 'SHE', 'TURNED', 'TO', 'ANNIE'] +8188-269290-0011-2834: ref=['LESLIE', 'WAS', 'JUST', 'CLOSING', 'THE', 'DOOR', 'BEHIND', 'HER', 'WHEN', 'ANNIE', 'CALLED', 'AFTER', 'HER'] +8188-269290-0011-2834: hyp=['LESLIE', 'WAS', 'JUST', 'CLOSING', 'THE', 'DOOR', 'BEHIND', 'HER', 'WHEN', 'ANY', 'CALLED', 'AFTER', 'HER'] +8188-269290-0012-2835: ref=['I', 'TOOK', 'IT', 'OUT', 'SAID', 'LESLIE', 'TOOK', 'IT', 'OUT'] +8188-269290-0012-2835: hyp=['I', 'TOOK', 'IT', 'OUT', 'SAID', 'LISLEY', 'TOOK', 'IT', 'OUT'] +8188-269290-0013-2836: ref=['HAVE', 'THE', 'GOODNESS', 'TO', 'FIND', 'IT', 'AND', 'PUT', 'IT', 'BACK'] +8188-269290-0013-2836: hyp=['HAVE', 'THE', 'GOODNESS', 'TO', 'FIND', 'IT', 'AND', 'PUT', 'IT', 'BACK'] +8188-269290-0014-2837: ref=['BUT', "DON'T", 'LOCK', 'ME', 'OUT', 'PLEASE', 'ANNIE'] +8188-269290-0014-2837: hyp=['BUT', "DON'T", 'LOCK', 'ME', 'OUT', 'PLEASE', 'ANY'] +8188-269290-0015-2838: ref=['OH', 'I', "WON'T", 'LOCK', 'YOU', 'OUT', 'SHE', 'SAID', 'BUT', 'I', 'MUST', 'HAVE', 'THE', 'KEY'] +8188-269290-0015-2838: hyp=['OH', 'I', "WON'T", 'LOCK', 'YOU', 'ABOUT', 'SHE', 'SAID', 'BUT', 'I', 'MUST', 'HAVE', 'THE', 'KEY'] +8188-269290-0016-2839: ref=['JANE', "HERIOT'S", 'VOICE', 'WAS', 'HEARD', 'IN', 'THE', 'PASSAGE'] +8188-269290-0016-2839: hyp=['JANE', "HEARET'S", 'VOICE', 'WAS', 'HEARD', 'IN', 'THE', 'PASSAGE'] +8188-269290-0017-2840: ref=['AS', 'SHE', 'WALKED', 'DOWN', 'THE', 'CORRIDOR', 'SHE', 'HEARD', 'IT', 'BEING', 'TURNED', 'IN', 'THE', 'LOCK'] +8188-269290-0017-2840: hyp=['AS', 'SHE', 'WALKED', 'DOWN', 'THE', 'CORRIDOR', 'SHE', 'HEARD', 'IT', 'BEING', 'TURNED', 'TO', 'THE', 'LOCK'] +8188-269290-0018-2841: ref=['WHAT', 'CAN', 'THIS', 'MEAN', 'SHE', 'SAID', 'TO', 'HERSELF'] +8188-269290-0018-2841: hyp=['WHAT', 'CAN', 'THIS', 'MEAN', 'SHE', 'SAID', 'TO', 'HERSELF'] +8188-269290-0019-2842: ref=['OH', 'I', "WON'T", 'PRESS', 'YOU', 'REPLIED', 'JANE'] +8188-269290-0019-2842: hyp=['OH', 'I', "WON'T", 'PRESS', 'YOU', 'REPLIED', 'JANE'] +8188-269290-0020-2843: ref=['OH', 'I', 'SHALL', 'NEVER', 'DO', 'THAT', 'REPLIED', 'LESLIE'] +8188-269290-0020-2843: hyp=['OH', 'I', 'SHALL', 'NEVER', 'DO', 'THAT', 'REPLIED', 'LISLEY'] +8188-269290-0021-2844: ref=['YOU', 'SEE', 'ALL', 'THE', 'GIRLS', 'EXCEPT', 'EILEEN', 'AND', 'MARJORIE', 'LAUGH', 'AT', 'HER', 'AND', 'THAT', 'SEEMS', 'TO', 'ME', 'TO', 'MAKE', 'HER', 'WORSE'] +8188-269290-0021-2844: hyp=['YOU', 'SEE', 'ALL', 'THE', 'GIRLS', 'EXCEPT', 'AILEEN', 'AND', 'MARJORIE', 'LAUGH', 'AT', 'HER', 'AND', 'THAT', 'SEEMS', 'TO', 'ME', 'TO', 'MAKE', 'HER', 'WORSE'] +8188-269290-0022-2845: ref=['SOME', 'DAY', 'JANE', 'YOU', 'MUST', 'SEE', 'HER'] +8188-269290-0022-2845: hyp=['SOME', 'DAY', 'JANE', 'YOU', 'MUST', 'SEE', 'HER'] +8188-269290-0023-2846: ref=['IF', 'YOU', 'ARE', 'IN', 'LONDON', 'DURING', 'THE', 'SUMMER', 'YOU', 'MUST', 'COME', 'AND', 'PAY', 'US', 'A', 'VISIT', 'WILL', 'YOU'] +8188-269290-0023-2846: hyp=['IF', 'YOU', 'IN', 'LONDON', 'DURING', 'THE', 'SUMMER', 'YOU', 'MUST', 'COME', 'IN', 'PAIR', 'FOR', 'VISIT', 'WILL', 'YOU'] +8188-269290-0024-2847: ref=['THAT', 'IS', 'IF', 'YOU', 'CARE', 'TO', 'CONFIDE', 'IN', 'ME'] +8188-269290-0024-2847: hyp=['THAT', 'IS', 'IF', 'YOU', 'CARE', 'TO', 'CONFIDE', 'IN', 'ME'] +8188-269290-0025-2848: ref=['I', 'BELIEVE', 'POOR', 'ANNIE', 'IS', 'DREADFULLY', 'UNHAPPY'] +8188-269290-0025-2848: hyp=['I', 'BELIEVE', 'POOR', 'ANNIE', 'IS', 'DREADFULLY', 'UNHAPPY'] +8188-269290-0026-2849: ref=["THAT'S", 'JUST', 'IT', 'JANE', 'THAT', 'IS', 'WHAT', 'FRIGHTENS', 'ME', 'SHE', 'REFUSES', 'TO', 'COME'] +8188-269290-0026-2849: hyp=["THAT'S", 'JUST', 'A', 'CHAIN', 'THAT', 'IS', 'WHAT', 'FRIGHTENS', 'ME', 'SHE', 'REFUSES', 'TO', 'COME'] +8188-269290-0027-2850: ref=['REFUSES', 'TO', 'COME', 'SHE', 'CRIED'] +8188-269290-0027-2850: hyp=['REFUSES', 'TO', 'COME', 'SHE', 'CRIED'] +8188-269290-0028-2851: ref=['SHE', 'WILL', 'GET', 'INTO', 'AN', 'AWFUL', 'SCRAPE'] +8188-269290-0028-2851: hyp=["SHE'LL", 'GET', 'IN', 'HER', 'AWFUL', 'SCRAPE'] +8188-269290-0029-2852: ref=['I', 'AM', 'SURE', 'SHE', 'IS', 'ILL', 'SHE', 'WORKS', 'TOO', 'HARD', 'AND', 'SHE', 'BUT', 'THERE', 'I', "DON'T", 'KNOW', 'THAT', 'I', 'OUGHT', 'TO', 'SAY', 'ANY', 'MORE'] +8188-269290-0029-2852: hyp=['I', 'AM', 'SURE', 'SHE', 'IS', 'ILL', 'SHE', 'WORKS', 'TOO', 'HARD', 'AND', 'SHE', 'BUT', 'THERE', 'I', "DON'T", 'KNOW', 'THAT', 'I', 'OUGHT', 'TO', 'SAY', 'ANY', 'MORE'] +8188-269290-0030-2853: ref=["I'LL", 'WAIT', 'FOR', 'YOU', 'HERE', 'SAID', 'LESLIE'] +8188-269290-0030-2853: hyp=["I'LL", 'WAIT', 'FOR', 'YOU', 'HERE', 'SAID', 'LISLEY'] +8188-269290-0031-2854: ref=['DO', 'COME', 'ANNIE', 'DO'] +8188-269290-0031-2854: hyp=['DO', 'COME', 'ANY', 'DO'] +8188-269290-0032-2855: ref=['SCARCELY', 'LIKELY', 'REPLIED', 'LESLIE', 'SHE', 'TOLD', 'ME', 'SHE', 'WAS', 'DETERMINED', 'NOT', 'TO', 'COME', 'TO', 'THE', 'MEETING'] +8188-269290-0032-2855: hyp=['SCARCELY', 'LIKELY', 'REPLIED', 'LESLIE', 'SHE', 'TOLD', 'ME', 'SHE', 'WAS', 'DETERMINED', 'NOT', 'TO', 'COME', 'TO', 'THE', 'MEETING'] +8188-269290-0033-2856: ref=['BUT', 'MARJORIE', 'AND', 'EILEEN', 'HAD', 'ALREADY', 'DEPARTED', 'AND', 'LESLIE', 'AND', 'JANE', 'FOUND', 'THEMSELVES', 'AMONG', 'THE', 'LAST', 'STUDENTS', 'TO', 'ARRIVE', 'AT', 'THE', 'GREAT', 'EAST', 'HALL'] +8188-269290-0033-2856: hyp=['BUT', 'MARJORIE', 'AND', 'IDLEEN', 'HAD', 'ALREADY', 'DEPARTED', 'AND', 'LISLEY', 'AND', 'JANE', 'FOUND', 'THEMSELVES', 'AMONG', 'THE', 'LAST', 'STUDENTS', 'TO', 'ARRIVE', 'AT', 'THE', 'GREAT', 'EAST', 'HALL'] +8188-269290-0034-2857: ref=['MISS', 'LAUDERDALE', 'WAS', 'STANDING', 'WITH', 'THE', 'OTHER', 'TUTORS', 'AND', 'PRINCIPALS', 'OF', 'THE', 'DIFFERENT', 'HALLS', 'ON', 'A', 'RAISED', 'PLATFORM'] +8188-269290-0034-2857: hyp=['MISS', 'LORDAIL', 'WAS', 'STANDING', 'WITH', 'THE', 'OTHER', 'TUTORS', 'AND', 'PRINCIPLES', 'OF', 'THE', 'DIFFERENT', 'HALLS', 'ARE', 'A', 'RAISED', 'PLATFORM'] +8188-269290-0035-2858: ref=['THEN', 'A', 'ROLL', 'CALL', 'WAS', 'GONE', 'THROUGH', 'BY', 'ONE', 'OF', 'THE', 'TUTORS', 'THE', 'ONLY', 'ABSENTEE', 'WAS', 'ANNIE', 'COLCHESTER'] +8188-269290-0035-2858: hyp=['THEN', 'A', 'ROCKLE', 'WAS', 'GONE', 'THROUGH', 'BY', 'ONE', 'OF', 'THE', 'TUTORS', 'THE', 'ONLY', 'EBSENTEE', 'WAS', 'ANY', 'COLCHESTER'] +8188-269290-0036-2859: ref=['THE', 'PHYSICAL', 'PART', 'OF', 'YOUR', 'TRAINING', 'AND', 'ALSO', 'THE', 'MENTAL', 'PART', 'ARE', 'ABUNDANTLY', 'SUPPLIED', 'IN', 'THIS', 'GREAT', 'HOUSE', 'OF', 'LEARNING', 'SHE', 'CONTINUED', 'BUT', 'THE', 'SPIRITUAL', 'PART', 'IT', 'SEEMS', 'TO', 'ME', 'OUGHT', 'NOW', 'TO', 'BE', 'STRENGTHENED'] +8188-269290-0036-2859: hyp=['THE', 'PHYSICAL', 'PART', 'OF', 'THE', 'ORTRAINING', 'AND', 'ALSO', 'THE', 'MENTAL', 'PART', 'ARE', 'ABUNDANTLY', 'SUPPLIED', 'IN', 'THIS', 'GREAT', 'HOUSE', 'OF', 'LEARNING', 'SHE', 'CONTINUED', 'BUT', 'THE', 'SPIRITUAL', 'PART', 'IT', 'SEEMS', 'TO', 'ME', 'OUGHT', 'NOW', 'TO', 'BE', 'STRENGTHENED'] +8188-269290-0037-2860: ref=['HEAR', 'HEAR', 'AND', 'ONCE', 'AGAIN', 'HEAR'] +8188-269290-0037-2860: hyp=['HEAR', 'HERE', 'AND', 'ONCE', 'AGAIN', 'HAIR'] +8188-269290-0038-2861: ref=['SHE', 'UTTERED', 'HER', 'STRANGE', 'REMARK', 'STANDING', 'UP'] +8188-269290-0038-2861: hyp=['SHE', 'UTTERED', 'A', 'STREAM', 'REMARK', 'STANDING', 'UP'] +8188-269290-0039-2862: ref=['MARJORIE', 'AND', 'EILEEN', 'WERE', 'CLOSE', 'TO', 'HER'] +8188-269290-0039-2862: hyp=['MARJORIE', 'AND', 'ILINE', 'WERE', 'CLOSE', 'TO', 'HER'] +8188-269290-0040-2863: ref=['I', 'WILL', 'TALK', 'WITH', 'YOU', 'BELLE', 'ACHESON', 'PRESENTLY', 'SHE', 'SAID'] +8188-269290-0040-2863: hyp=['I', 'WILL', 'TALK', 'WITH', 'YOU', 'BELL', 'ARTISON', 'PRESENTLY', 'SHE', 'SAID'] +8188-269290-0041-2864: ref=['THE', 'NAMES', 'OF', 'PROPOSED', 'MEMBERS', 'ARE', 'TO', 'BE', 'SUBMITTED', 'TO', 'ME', 'BEFORE', 'THIS', 'DAY', 'WEEK'] +8188-269290-0041-2864: hyp=['THE', 'NAMES', 'OF', 'THE', 'PROPOSED', 'MEMBERS', 'ARE', 'TO', 'BE', 'SUBMITTED', 'TO', 'ME', 'BEFORE', 'THIS', 'DAY', 'WEEK'] +8188-269290-0042-2865: ref=['AM', 'I', 'MY', "BROTHER'S", 'KEEPER'] +8188-269290-0042-2865: hyp=['AM', 'I', 'MY', "BROTHER'S", 'KEEPER'] +8188-269290-0043-2866: ref=['YOU', 'ASK', 'SHE', 'CONTINUED'] +8188-269290-0043-2866: hyp=['YOU', 'ASK', 'SHE', 'CONTINUED'] +8188-269290-0044-2867: ref=['GOD', 'ANSWERS', 'TO', 'EACH', 'OF', 'YOU', 'YOU', 'ARE'] +8188-269290-0044-2867: hyp=['GOD', 'AUTHEST', 'EACH', 'OF', 'YOU', 'YOU', 'ARE'] +8188-269290-0045-2868: ref=['THE', 'WORLD', 'SAYS', 'NO', 'I', 'AM', 'NOT', 'BUT', 'GOD', 'SAYS', 'YES', 'YOU', 'ARE'] +8188-269290-0045-2868: hyp=['THE', 'WORLD', 'TASTE', 'NO', 'I', 'AM', 'NOT', 'BUT', 'GOD', 'SAKES', 'YES', 'YOU', 'ARE'] +8188-269290-0046-2869: ref=['ALL', 'MEN', 'ARE', 'YOUR', 'BROTHERS'] +8188-269290-0046-2869: hyp=['ALL', 'MEN', 'ARE', 'BROTHERS'] +8188-269290-0047-2870: ref=['FOR', 'ALL', 'WHO', 'SIN', 'ALL', 'WHO', 'SUFFER', 'YOU', 'ARE', 'TO', 'A', 'CERTAIN', 'EXTENT', 'RESPONSIBLE'] +8188-269290-0047-2870: hyp=['FOR', 'ALL', 'WHO', 'SIN', 'ALL', 'WHO', 'SUFFER', 'YOU', 'ARE', 'TO', 'EXERT', 'AN', 'EXTENT', 'RESPONSIBLE'] +8188-269290-0048-2871: ref=['AFTER', 'THE', 'ADDRESS', 'THE', 'GIRLS', 'THEMSELVES', 'WERE', 'ENCOURAGED', 'TO', 'SPEAK', 'AND', 'A', 'VERY', 'ANIMATED', 'DISCUSSION', 'FOLLOWED'] +8188-269290-0048-2871: hyp=['AFTER', 'THE', 'ADDRESS', 'THE', 'GIRLS', 'THEMSELVES', 'WERE', 'ENCOURAGED', 'TO', 'SPEAK', 'AND', 'A', 'VERY', 'ANIMATED', 'DISCUSSION', 'FOLLOWED'] +8188-269290-0049-2872: ref=['IT', 'WAS', 'PAST', 'TEN', "O'CLOCK", 'WHEN', 'SHE', 'LEFT', 'THE', 'HALL'] +8188-269290-0049-2872: hyp=['IT', 'WAS', 'PAST', 'TEN', "O'CLOCK", 'WHEN', 'SHE', 'LEFT', 'THE', 'HALL'] +8188-269290-0050-2873: ref=['JUST', 'AS', 'SHE', 'WAS', 'DOING', 'SO', 'MISS', 'FRERE', 'CAME', 'UP'] +8188-269290-0050-2873: hyp=['JUST', 'AS', 'SHE', 'WAS', 'DOING', 'SO', 'WAS', 'FRERE', 'CAME', 'UP'] +8188-269290-0051-2874: ref=['ANNIE', 'COLCHESTER', 'IS', 'YOUR', 'ROOMFELLOW', 'IS', 'SHE', 'NOT', 'SHE', 'SAID'] +8188-269290-0051-2874: hyp=['ANY', 'COLCHISED', 'AS', 'YOUR', 'ROOM', 'FELLOW', 'IS', 'SHE', 'NOT', 'SHE', 'SAID'] +8188-269290-0052-2875: ref=['I', 'SEE', 'BY', 'YOUR', 'FACE', 'MISS', 'GILROY', 'THAT', 'YOU', 'ARE', 'DISTRESSED', 'ABOUT', 'SOMETHING', 'ARE', 'YOU', 'KEEPING', 'ANYTHING', 'BACK'] +8188-269290-0052-2875: hyp=['I', 'SEE', 'BY', 'YOUR', 'FACE', 'MY', 'SCALE', 'ROY', 'THAT', 'YOU', 'ARE', 'DISTRESSED', 'ABOUT', 'SOMETHING', 'ARE', 'KEEPING', 'ANYTHING', 'BACK'] +8188-269290-0053-2876: ref=['I', 'AM', 'AFRAID', 'I', 'AM', 'REPLIED', 'LESLIE', 'DISTRESS', 'NOW', 'IN', 'HER', 'TONE'] +8188-269290-0053-2876: hyp=['I', 'AM', 'AFRAID', 'I', 'AM', 'REPLIED', 'LESLIE', 'DISTRESSED', 'NOW', 'IN', 'HER', 'TONE'] +8188-269290-0054-2877: ref=['I', 'MUST', 'SEE', 'HER', 'MYSELF', 'EARLY', 'IN', 'THE', 'MORNING', 'AND', 'I', 'AM', 'QUITE', 'SURE', 'THAT', 'NOTHING', 'WILL', 'SATISFY', 'MISS', 'LAUDERDALE', 'EXCEPT', 'A', 'VERY', 'AMPLE', 'APOLOGY', 'AND', 'A', 'FULL', 'EXPLANATION', 'OF', 'THE', 'REASON', 'WHY', 'SHE', 'ABSENTED', 'HERSELF'] +8188-269290-0054-2877: hyp=['I', 'MUST', 'SEE', 'HER', 'MYSELF', 'EARLY', 'IN', 'THE', 'MORNING', 'AND', 'I', 'AM', 'QUITE', 'SURE', 'THAT', 'NOTHING', 'WILL', 'SATISFY', 'MISS', 'LAURDALE', 'EXCEPT', 'A', 'VERY', 'AMPLE', 'APOLOGY', 'AND', 'A', 'FULL', 'EXPLANATION', 'OF', 'THE', 'REASON', 'WHY', 'SHE', 'ABSENTED', 'HERSELF'] +8188-269290-0055-2878: ref=['EXCUSES', 'MAKE', 'NO', 'DIFFERENCE'] +8188-269290-0055-2878: hyp=['EXCUSES', 'MAKE', 'NO', 'DIFFERENCE'] +8188-269290-0056-2879: ref=['THE', 'GIRL', 'WHO', 'BREAKS', 'THE', 'RULES', 'HAS', 'TO', 'BE', 'PUNISHED'] +8188-269290-0056-2879: hyp=['THE', 'GIRL', 'WHO', 'BREAKS', 'THE', 'RULES', 'HAVE', 'TO', 'BE', 'PUNISHED'] +8188-269290-0057-2880: ref=['I', 'WILL', 'TELL', 'HER'] +8188-269290-0057-2880: hyp=['I', 'WILL', 'TELL', 'HER'] +8188-274364-0000-2811: ref=['THE', 'COMMONS', 'ALSO', 'VOTED', 'THAT', 'THE', 'NEW', 'CREATED', 'PEERS', 'OUGHT', 'TO', 'HAVE', 'NO', 'VOICE', 'IN', 'THIS', 'TRIAL', 'BECAUSE', 'THE', 'ACCUSATION', 'BEING', 'AGREED', 'TO', 'WHILE', 'THEY', 'WERE', 'COMMONERS', 'THEIR', 'CONSENT', 'TO', 'IT', 'WAS', 'IMPLIED', 'WITH', 'THAT', 'OF', 'ALL', 'THE', 'COMMONS', 'OF', 'ENGLAND'] +8188-274364-0000-2811: hyp=['THE', 'COMMONS', 'ALSO', 'VOTED', 'THAT', 'THE', 'NEW', 'CREATED', 'PEERS', 'OUGHT', 'TO', 'HAVE', 'NO', 'VOICE', 'IN', 'THIS', 'TRIAL', 'BECAUSE', 'THE', 'ACCUSATION', 'BEING', 'AGREED', 'TO', 'WHILE', 'THEY', 'WERE', 'COMMONERS', 'THEIR', 'CONSENT', 'TO', 'IT', 'WAS', 'IMPLIED', 'WITH', 'THAT', 'OF', 'ALL', 'THE', 'COMMONS', 'OF', 'ENGLAND'] +8188-274364-0001-2812: ref=['IN', 'THE', 'GOVERNMENT', 'OF', 'IRELAND', 'HIS', 'ADMINISTRATION', 'HAD', 'BEEN', 'EQUALLY', 'PROMOTIVE', 'OF', 'HIS', "MASTER'S", 'INTEREST', 'AND', 'THAT', 'OF', 'THE', 'SUBJECTS', 'COMMITTED', 'TO', 'HIS', 'CARE'] +8188-274364-0001-2812: hyp=['IN', 'THE', 'GOVERNMENT', 'OF', 'IRELAND', 'HIS', 'ADMINISTRATION', 'HAD', 'BEEN', 'EQUALLY', 'PROMOTED', 'OF', 'HIS', "MASTER'S", 'INTEREST', 'AND', 'THAT', 'OF', 'THE', 'SUBJECTS', 'COMMITTED', 'TO', 'HIS', 'CARE'] +8188-274364-0002-2813: ref=['THE', 'CASE', 'OF', 'LORD', 'MOUNTNORRIS', 'OF', 'ALL', 'THOSE', 'WHICH', 'WERE', 'COLLECTED', 'WITH', 'SO', 'MUCH', 'INDUSTRY', 'IS', 'THE', 'MOST', 'FLAGRANT', 'AND', 'THE', 'LEAST', 'EXCUSABLE'] +8188-274364-0002-2813: hyp=['THE', 'CASE', 'OF', 'LORD', 'MONTORIS', 'OF', 'ALL', 'THOSE', 'WHICH', 'WERE', 'COLLECTED', 'WITH', 'SO', 'ACT', 'INDUSTRY', 'IS', 'THE', 'MOST', 'FLAGRANT', 'AND', 'THE', 'LEAST', 'EXCUSABLE'] +8188-274364-0003-2814: ref=['THE', 'COURT', 'WHICH', 'CONSISTED', 'OF', 'THE', 'CHIEF', 'OFFICERS', 'OF', 'THE', 'ARMY', 'FOUND', 'THE', 'CRIME', 'TO', 'BE', 'CAPITAL', 'AND', 'CONDEMNED', 'THAT', 'NOBLEMAN', 'TO', 'LOSE', 'HIS', 'HEAD'] +8188-274364-0003-2814: hyp=['THE', 'COURT', 'WHICH', 'CONSISTED', 'OF', 'THE', 'CHEAP', 'OFFICIALS', 'OF', 'THE', 'ARMY', 'FOUND', 'THE', 'CROWN', 'TO', 'BE', 'CAPITAL', 'AND', 'CONDEMNED', 'THAT', 'NOBLEMAN', 'TO', 'LOSE', 'HIS', 'HEAD'] +8188-274364-0004-2815: ref=['WHERE', 'THE', 'TOKEN', 'BY', 'WHICH', 'I', 'SHOULD', 'DISCOVER', 'IT'] +8188-274364-0004-2815: hyp=['WITH', 'A', 'TOKEN', 'BY', 'WHICH', 'I', 'SHALL', 'DISCOVER', 'IT'] +8188-274364-0005-2816: ref=['IT', 'IS', 'NOW', 'FULL', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'YEARS', 'SINCE', 'TREASONS', 'WERE', 'DEFINED', 'AND', 'SO', 'LONG', 'HAS', 'IT', 'BEEN', 'SINCE', 'ANY', 'MAN', 'WAS', 'TOUCHED', 'TO', 'THIS', 'EXTENT', 'UPON', 'THIS', 'CRIME', 'BEFORE', 'MYSELF'] +8188-274364-0005-2816: hyp=['IT', 'IS', 'NOW', 'A', 'FULL', 'TWO', 'HUNDRED', 'AND', 'FORTY', 'YEARS', 'SINCE', 'TREASONS', 'WERE', 'DEFINED', 'AND', 'SO', 'LONG', 'HAS', 'IT', 'BEEN', 'SINCE', 'ANY', 'MAN', 'WAS', 'TOUCHED', 'TO', 'THIS', 'EXTENT', 'UPON', 'THIS', 'CRIME', 'FOR', 'MYSELF'] +8188-274364-0006-2817: ref=['LET', 'US', 'NOT', 'TO', 'OUR', 'OWN', 'DESTRUCTION', 'AWAKE', 'THOSE', 'SLEEPING', 'LIONS', 'BY', 'RATTLING', 'UP', 'A', 'COMPANY', 'OF', 'OLD', 'RECORDS', 'WHICH', 'HAVE', 'LAIN', 'FOR', 'SO', 'MANY', 'AGES', 'BY', 'THE', 'WALL', 'FORGOTTEN', 'AND', 'NEGLECTED'] +8188-274364-0006-2817: hyp=['LET', 'US', 'NOT', 'TO', 'HER', 'OWN', 'DESTRUCTION', 'AWAKE', 'THOSE', 'KEEPING', 'LIONS', 'BY', 'RATTLING', 'UP', 'A', 'COMPANY', 'OF', 'ALL', 'RICARDS', 'WHICH', 'HAVE', 'LAIN', 'FOR', 'SO', 'MANY', 'AGES', 'BY', 'THE', 'WAR', 'FORGOTTEN', 'AND', 'NEGLECTED'] +8188-274364-0007-2818: ref=['HOWEVER', 'THESE', 'GENTLEMEN', 'AT', 'THE', 'BAR', 'SAY', 'THEY', 'SPEAK', 'FOR', 'THE', 'COMMONWEALTH', 'AND', 'THEY', 'BELIEVE', 'SO', 'YET', 'UNDER', 'FAVOR', 'IT', 'IS', 'I', 'WHO', 'IN', 'THIS', 'PARTICULAR', 'SPEAK', 'FOR', 'THE', 'COMMONWEALTH'] +8188-274364-0007-2818: hyp=['HERBID', 'THESE', 'GENTLEMEN', 'AT', 'THE', 'BAR', 'SO', 'THEY', 'SPEAK', 'FOR', 'THE', 'CORNWEALTH', 'AND', 'THEY', 'BELIEVE', 'SO', 'YET', 'UNDER', 'FAVOUR', 'IT', 'IS', 'I', 'WHO', 'IN', 'THIS', 'PARTICULAR', 'SPEAK', 'FOR', 'THE', 'CORNWEALTH'] +8188-274364-0008-2819: ref=['MY', 'LORDS', 'I', 'HAVE', 'NOW', 'TROUBLED', 'YOUR', 'LORDSHIPS', 'A', 'GREAT', 'DEAL', 'LONGER', 'THAN', 'I', 'SHOULD', 'HAVE', 'DONE'] +8188-274364-0008-2819: hyp=['MY', 'LORDS', 'I', 'HAVE', 'NOW', 'TROUBLED', 'YOUR', 'LORDSHIPS', 'A', 'GREAT', 'DEAL', 'LONGER', 'THAN', 'I', 'SHOULD', 'HAVE', 'DONE'] +8188-274364-0009-2820: ref=['YOUNG', 'VANE', 'FALLING', 'UPON', 'THIS', 'PAPER', 'OF', 'NOTES', 'DEEMED', 'THE', 'MATTER', 'OF', 'THE', 'UTMOST', 'IMPORTANCE', 'AND', 'IMMEDIATELY', 'COMMUNICATED', 'IT', 'TO', 'PYM', 'WHO', 'NOW', 'PRODUCED', 'THE', 'PAPER', 'BEFORE', 'THE', 'HOUSE', 'OF', 'COMMONS'] +8188-274364-0009-2820: hyp=['YOUNG', 'VANE', 'FALLING', 'UPON', 'THIS', 'PAPER', 'OF', 'NOTES', 'DEEMED', 'THE', 'MATTER', 'OF', 'THE', 'UTMOST', 'IMPORTANCE', 'AND', 'IMMEDIATELY', 'COMMUNICATED', 'IT', 'TO', 'POEM', 'WHO', 'NOW', 'PRODUCED', 'THE', 'PAPER', 'BEFORE', 'THE', 'HOUSE', 'OF', 'COMMONS'] +8188-274364-0010-2821: ref=['THE', 'KING', 'PROPOSES', 'THIS', 'DIFFICULTY', 'BUT', 'HOW', 'CAN', 'I', 'UNDERTAKE', 'OFFENSIVE', 'WAR', 'IF', 'I', 'HAVE', 'NO', 'MORE', 'MONEY'] +8188-274364-0010-2821: hyp=['THE', 'KING', 'PROPOSES', 'THIS', 'DIFFICULTY', 'BUT', 'HOW', 'CAN', 'I', 'UNDERTAKE', 'OFFENSIVE', 'WAR', 'IF', 'I', 'HAVE', 'NO', 'MORE', 'MONEY'] +8188-274364-0011-2822: ref=['YOUR', 'MAJESTY', 'HAVING', 'TRIED', 'THE', 'AFFECTIONS', 'OF', 'YOUR', 'PEOPLE', 'YOU', 'ARE', 'ABSOLVED', 'AND', 'LOOSE', 'FROM', 'ALL', 'RULES', 'OF', 'GOVERNMENT', 'AND', 'MAY', 'DO', 'WHAT', 'POWER', 'WILL', 'ADMIT'] +8188-274364-0011-2822: hyp=['YOUR', 'MAJESTY', 'HAVING', 'TRIED', 'THE', 'AFFECTIONS', 'OF', 'YOUR', 'PEOPLE', 'YOU', 'ARE', 'ABSOLVED', 'AND', 'LOOSE', 'FROM', 'ALL', 'RULES', 'OF', 'GOVERNMENT', 'AND', 'MAY', 'DO', 'WHAT', 'POWER', 'WILL', 'ADMIT'] +8280-266249-0000-339: ref=['OLD', 'MISTER', 'DINSMORE', 'HAD', 'ACCEPTED', 'A', 'PRESSING', 'INVITATION', 'FROM', 'HIS', 'GRANDDAUGHTER', 'AND', 'HER', 'HUSBAND', 'TO', 'JOIN', 'THE', 'PARTY', 'AND', 'WITH', 'THE', 'ADDITION', 'OF', 'SERVANTS', 'IT', 'WAS', 'A', 'LARGE', 'ONE'] +8280-266249-0000-339: hyp=['OLD', 'MISTER', 'DINSMORE', 'HAD', 'ACCEPTED', 'OPPRESSING', 'INVITATION', 'FROM', 'HIS', 'GRANDDAUGHTER', 'AND', 'HER', 'HUSBAND', 'TO', 'JOIN', 'THE', 'PARTY', 'AND', 'WITH', 'THE', 'ADDITION', 'OF', 'SERVANTS', 'IT', 'WAS', 'A', 'LARGE', 'ONE'] +8280-266249-0001-340: ref=['AS', 'THEY', 'WERE', 'IN', 'NO', 'HASTE', 'AND', 'THE', 'CONFINEMENT', 'OF', 'A', 'RAILROAD', 'CAR', 'WOULD', 'BE', 'VERY', 'IRKSOME', 'TO', 'THE', 'YOUNGER', 'CHILDREN', 'IT', 'HAD', 'BEEN', 'DECIDED', 'TO', 'MAKE', 'THE', 'JOURNEY', 'BY', 'WATER'] +8280-266249-0001-340: hyp=['AS', 'THEY', 'WERE', 'IN', 'NO', 'HASTE', 'AND', 'THE', 'CONFINEMENT', 'OF', 'A', 'RAILROAD', 'CAR', 'WILL', 'BE', 'VERY', 'IRKSOME', 'TO', 'THE', 'YOUNGER', 'CHILDREN', 'IT', 'HAD', 'BEEN', 'DECIDED', 'TO', 'MAKE', 'THE', 'JOURNEY', 'BY', 'WATER'] +8280-266249-0002-341: ref=['THERE', 'WERE', 'NO', 'SAD', 'LEAVE', 'TAKINGS', 'TO', 'MAR', 'THEIR', 'PLEASURE', 'THE', 'CHILDREN', 'WERE', 'IN', 'WILD', 'SPIRITS', 'AND', 'ALL', 'SEEMED', 'CHEERFUL', 'AND', 'HAPPY', 'AS', 'THEY', 'SAT', 'OR', 'STOOD', 'UPON', 'THE', 'DECK', 'WATCHING', 'THE', 'RECEDING', 'SHORE', 'AS', 'THE', 'VESSEL', 'STEAMED', 'OUT', 'OF', 'THE', 'HARBOR'] +8280-266249-0002-341: hyp=['THERE', 'WERE', 'NO', 'SAD', 'LEAVE', 'TAKINGS', 'TO', 'MAR', 'THEIR', 'PLEASURE', 'THE', 'CHILDREN', 'WERE', 'IN', 'WILD', 'SPIRITS', 'AND', 'ALL', 'SEEMED', 'CHEERFUL', 'AND', 'HAPPY', 'AS', 'THEY', 'SAT', 'OR', 'STOOD', 'UPON', 'THE', 'DECK', 'WATCHING', 'THE', 'RECEDING', 'SHORE', 'AS', 'THE', 'VESSEL', 'STEAMED', 'OUT', 'OF', 'THE', 'HARBOUR'] +8280-266249-0003-342: ref=['AT', 'LENGTH', 'THE', 'LAND', 'HAD', 'QUITE', 'DISAPPEARED', 'NOTHING', 'COULD', 'BE', 'SEEN', 'BUT', 'THE', 'SKY', 'OVERHEAD', 'AND', 'A', 'VAST', 'EXPANSE', 'OF', 'WATER', 'ALL', 'AROUND', 'AND', 'THE', 'PASSENGERS', 'FOUND', 'LEISURE', 'TO', 'TURN', 'THEIR', 'ATTENTION', 'UPON', 'EACH', 'OTHER'] +8280-266249-0003-342: hyp=['AT', 'LENGTH', 'THE', 'LAND', 'HAD', 'QUITE', 'DISAPPEARED', 'NOTHING', 'COULD', 'BE', 'SEEN', 'BUT', 'THE', 'SKY', 'OVERHEAD', 'AND', 'A', 'VAST', 'EXPANSE', 'OF', 'WATER', 'ALL', 'ROUND', 'AND', 'THE', 'PASSENGERS', 'FOUND', 'LEISURE', 'TO', 'TURN', 'THEIR', 'ATTENTION', 'UPON', 'EACH', 'OTHER'] +8280-266249-0004-343: ref=['THERE', 'ARE', 'SOME', 'NICE', 'LOOKING', 'PEOPLE', 'ON', 'BOARD', 'REMARKED', 'MISTER', 'TRAVILLA', 'IN', 'AN', 'UNDERTONE', 'TO', 'HIS', 'WIFE'] +8280-266249-0004-343: hyp=['THERE', 'ARE', 'SOME', 'NICE', 'LOOKING', 'PEOPLE', 'ON', 'BOARD', 'REMARKED', 'MISTER', 'TRAVILLA', 'IN', 'AN', 'UNDERTONE', 'TO', 'HIS', 'WIFE'] +8280-266249-0005-344: ref=['BESIDE', 'OURSELVES', 'ADDED', 'COUSIN', 'RONALD', 'LAUGHING'] +8280-266249-0005-344: hyp=['BESIDES', 'OURSELVES', 'ADDED', 'COUSIN', 'RANALD', 'LAUGHING'] +8280-266249-0006-345: ref=['YES', 'SHE', 'ANSWERED', 'THAT', 'LITTLE', 'GROUP', 'YONDER', 'A', 'YOUNG', 'MINISTER', 'AND', 'HIS', 'WIFE', 'AND', 'CHILD', 'I', 'SUPPOSE'] +8280-266249-0006-345: hyp=['YES', 'SHE', 'ANSWERED', 'THAT', 'LITTLE', 'GROUP', 'YONDER', 'A', 'YOUNG', 'MINISTER', 'AND', 'HIS', 'WIFE', 'AND', 'CHILD', 'I', 'SUPPOSE'] +8280-266249-0007-346: ref=['AND', 'WHAT', 'A', 'DEAR', 'LITTLE', 'FELLOW', 'HE', 'IS', 'JUST', 'ABOUT', 'THE', 'AGE', 'OF', 'OUR', 'HAROLD', 'I', 'SHOULD', 'JUDGE'] +8280-266249-0007-346: hyp=['AND', 'WHEN', 'A', 'DEAR', 'LITTLE', 'FELLOW', 'HE', 'IS', 'JUST', 'ABOUT', 'THE', 'AGE', 'OF', 'OUR', 'HERALD', 'I', 'SHOULD', 'JUDGE'] +8280-266249-0008-347: ref=['DO', 'YOU', 'SON', 'WAS', 'THE', 'SMILING', 'REJOINDER'] +8280-266249-0008-347: hyp=['DO', 'YOU', 'SON', 'WAS', 'THE', 'SMILING', 'REJOINDER'] +8280-266249-0009-348: ref=['HE', 'CERTAINLY', 'LOOKS', 'LIKE', 'A', 'VERY', 'NICE', 'LITTLE', 'BOY'] +8280-266249-0009-348: hyp=['HE', 'CERTAINLY', 'LOOKS', 'LIKE', 'A', 'VERY', 'NICE', 'LITTLE', 'BOY'] +8280-266249-0010-349: ref=['SUPPOSE', 'YOU', 'AND', 'HE', 'SHAKE', 'HANDS', 'FRANK'] +8280-266249-0010-349: hyp=['SUPPOSE', 'YOU', 'AND', 'HE', 'SHAKE', 'HANDS', 'FRANK'] +8280-266249-0011-350: ref=['I', 'DO', 'INDEED', 'THOUGH', 'PROBABLY', 'COMPARATIVELY', 'FEW', 'ARE', 'AWARE', 'THAT', 'TOBACCO', 'IS', 'THE', 'CAUSE', 'OF', 'THEIR', 'AILMENTS'] +8280-266249-0011-350: hyp=['I', 'DO', 'INDEED', 'THE', 'PROBABLY', 'COMPARATIVELY', 'FEW', 'ARE', 'AWARE', 'THAT', 'TOBACCO', 'IS', 'THE', 'CAUSE', 'OF', 'THEIR', 'AILMENTS'] +8280-266249-0012-351: ref=['DOUBTLESS', 'THAT', 'IS', 'THE', 'CASE', 'REMARKED', 'MISTER', 'DINSMORE'] +8280-266249-0012-351: hyp=['DOUBTLESS', 'THAT', 'IS', 'THE', 'CASE', 'REMARKED', 'MISTER', 'DINSMORE'] +8280-266249-0013-352: ref=['WITH', 'ALL', 'MY', 'HEART', 'IF', 'YOU', 'WILL', 'STEP', 'INTO', 'THE', "GENTLEMEN'S", 'CABIN', 'WHERE', "THERE'S", 'A', 'LIGHT'] +8280-266249-0013-352: hyp=['WITH', 'ALL', 'MY', 'HEART', 'IF', 'YOU', 'WILL', 'STEP', 'INTO', 'THE', "GENTLEMAN'S", 'CABIN', 'WHERE', "THERE'S", 'A', 'LIGHT'] +8280-266249-0014-353: ref=['HE', 'LED', 'THE', 'WAY', 'THE', 'OTHERS', 'ALL', 'FOLLOWING', 'AND', 'TAKING', 'OUT', 'A', 'SLIP', 'OF', 'PAPER', 'READ', 'FROM', 'IT', 'IN', 'A', 'DISTINCT', 'TONE', 'LOUD', 'ENOUGH', 'TO', 'BE', 'HEARD', 'BY', 'THOSE', 'ABOUT', 'HIM', 'WITHOUT', 'DISTURBING', 'THE', 'OTHER', 'PASSENGERS'] +8280-266249-0014-353: hyp=['HE', 'LED', 'THE', 'WAY', 'THE', 'OTHERS', 'ALL', 'FOLLOWING', 'AND', 'TAKING', 'OUT', 'A', 'SLIP', 'OF', 'PAPER', 'READ', 'FROM', 'IT', 'IN', 'A', 'DISTINCT', 'TONE', 'LOUD', 'ENOUGH', 'TO', 'BE', 'HEARD', 'BY', 'THOSE', 'ALL', 'ABOUT', 'HIM', 'WITHOUT', 'DISTURBING', 'THE', 'OTHER', 'PASSENGERS'] +8280-266249-0015-354: ref=['ONE', 'DROP', 'OF', 'NICOTINE', 'EXTRACT', 'OF', 'TOBACCO', 'PLACED', 'ON', 'THE', 'TONGUE', 'OF', 'A', 'DOG', 'WILL', 'KILL', 'HIM', 'IN', 'A', 'MINUTE', 'THE', 'HUNDREDTH', 'PART', 'OF', 'A', 'GRAIN', 'PICKED', 'UNDER', 'THE', 'SKIN', 'OF', 'A', "MAN'S", 'ARM', 'WILL', 'PRODUCE', 'NAUSEA', 'AND', 'FAINTING'] +8280-266249-0015-354: hyp=['ONE', 'DROP', 'OF', 'NICOTINE', 'EXTRACTED', 'TOBACCO', 'PLACED', 'ON', 'THE', 'TONGUE', 'OF', 'THE', 'DOG', 'WILL', 'KILL', 'HIM', 'IN', 'A', 'MINUTE', 'THE', 'HUNDREDTH', 'PART', 'OF', 'A', 'GRAIN', 'PRICKED', 'UNDER', 'THE', 'SKIN', 'OF', 'A', "MAN'S", 'ARM', 'WILL', 'PRODUCE', 'NAUSEA', 'AND', 'FAINTING'] +8280-266249-0016-355: ref=['THE', 'HALF', 'DOZEN', 'CIGARS', 'WHICH', 'MOST', 'SMOKERS', 'USE', 'A', 'DAY', 'CONTAIN', 'SIX', 'OR', 'SEVEN', 'GRAINS', 'ENOUGH', 'IF', 'CONCENTRATED', 'AND', 'ABSORBED', 'TO', 'KILL', 'THREE', 'MEN', 'AND', 'A', 'POUND', 'OF', 'TOBACCO', 'ACCORDING', 'TO', 'ITS', 'QUALITY', 'CONTAINS', 'FROM', 'ONE', 'QUARTER', 'TO', 'ONE', 'AND', 'A', 'QUARTER', 'OUNCES'] +8280-266249-0016-355: hyp=['THE', 'HALF', 'DOZEN', 'CIGARS', 'WHICH', 'MOST', 'SMOKERS', 'USED', 'A', 'DAY', 'CONTAIN', 'SIX', 'OR', 'SEVEN', 'GRAINS', 'ENOUGH', 'IF', 'CONCENTRATED', 'AND', 'ABSORBED', 'TO', 'KILL', 'THREE', 'MEN', 'AND', 'A', 'POUND', 'OR', 'TOBACCO', 'ACCORDING', 'TO', 'ITS', 'QUALITY', 'CONTAINS', 'FROM', 'ONE', 'QUARTER', 'TO', 'ONE', 'AND', 'A', 'QUARTER', 'OUNCES'] +8280-266249-0017-356: ref=['IS', 'IT', 'STRANGE', 'THEN', 'THAT', 'SMOKERS', 'AND', 'CHEWERS', 'HAVE', 'A', 'THOUSAND', 'AILMENTS'] +8280-266249-0017-356: hyp=['IS', 'IT', 'STRANGE', 'THEN', 'THAT', 'SMOKERS', 'AND', 'SHOERS', 'HAVE', 'A', 'THOUSAND', 'AILMENTS'] +8280-266249-0018-357: ref=['THAT', 'THE', 'FRENCH', 'POLYTECHNIC', 'INSTITUTE', 'HAD', 'TO', 'PROHIBIT', 'ITS', 'USE', 'ON', 'ACCOUNT', 'OF', 'ITS', 'EFFECTS', 'ON', 'THE', 'MIND'] +8280-266249-0018-357: hyp=['THAT', 'THE', 'FRENCH', 'POLYTECHNICA', 'INSTITUTE', 'HAD', 'TO', 'PROHIBIT', 'ITS', 'USE', 'ON', 'ACCOUNT', 'OF', 'ITS', 'EFFECTS', 'UPON', 'THE', 'MIND'] +8280-266249-0019-358: ref=['NOTICE', 'THE', 'MULTITUDE', 'OF', 'SUDDEN', 'DEATHS', 'AND', 'SEE', 'HOW', 'MANY', 'ARE', 'SMOKERS', 'AND', 'CHEWERS'] +8280-266249-0019-358: hyp=['NOTICE', 'THE', 'MULTITUDE', 'OF', 'SUDDEN', 'DEATHS', 'AND', 'SEE', 'HOW', 'MANY', 'OUR', 'SMOKERS', 'AND', 'CHEWERS'] +8280-266249-0020-359: ref=['IN', 'A', 'SMALL', 'COUNTRY', 'TOWN', 'SEVEN', 'OF', 'THESE', 'MYSTERIOUS', 'PROVIDENCES', 'OCCURRED', 'WITHIN', 'THE', 'CIRCUIT', 'OF', 'A', 'MILE', 'ALL', 'DIRECTLY', 'TRACEABLE', 'TO', 'TOBACCO', 'AND', 'ANY', 'PHYSICIAN', 'ON', 'A', 'FEW', 'MOMENTS', 'REFLECTION', 'CAN', 'MATCH', 'THIS', 'FACT', 'BY', 'HIS', 'OWN', 'OBSERVATION'] +8280-266249-0020-359: hyp=['IN', 'A', 'SMALL', 'COUNTRY', 'TOWN', 'SEVEN', 'OF', 'THESE', 'MYSTERIOUS', 'PROVIDENCES', 'OCCURRED', 'WITHIN', 'THE', 'CIRCUIT', 'OF', 'A', 'MILE', 'ALL', 'DIRECTLY', 'TRACEABLE', 'TO', 'TOBACCO', 'AND', 'ANY', 'PHYSICIAN', 'ON', 'A', 'FEW', 'MOMENTS', 'REFLECTION', 'CAN', 'MATCH', 'THIS', 'FACT', 'BY', 'HIS', 'OWN', 'OBSERVATION'] +8280-266249-0021-360: ref=['AND', 'THEN', 'SUCH', 'POWERFUL', 'ACIDS', 'PRODUCE', 'INTENSE', 'IRRITATION', 'AND', 'THIRST', 'THIRST', 'WHICH', 'WATER', 'DOES', 'NOT', 'QUENCH'] +8280-266249-0021-360: hyp=['AND', 'THEN', 'SUCH', 'POWERFUL', 'ACIDS', 'PRODUCE', 'INTENSE', 'IRRITATION', 'AND', 'THIRST', 'THIRST', 'WHICH', 'WATER', 'DOES', 'NOT', 'QUENCH'] +8280-266249-0022-361: ref=['HENCE', 'A', 'RESORT', 'TO', 'CIDER', 'AND', 'BEER'] +8280-266249-0022-361: hyp=['HENCE', 'A', 'RESORT', 'TO', 'CIDER', 'AND', 'BEER'] +8280-266249-0023-362: ref=['NO', 'SIR', 'WHAT', 'KNOW', 'YE', 'NOT', 'THAT', 'YOUR', 'BODY', 'IS', 'THE', 'TEMPLE', 'OF', 'THE', 'HOLY', 'GHOST', 'WHICH', 'IS', 'IN', 'YOU', 'WHICH', 'YE', 'HAVE', 'OF', 'GOD', 'AND', 'YE', 'ARE', 'NOT', 'YOUR', 'OWN'] +8280-266249-0023-362: hyp=['NO', 'SIR', 'WHAT', 'KNOW', 'YE', 'NOT', 'THAT', 'YOUR', 'BODY', 'IS', 'THE', 'TEMPLE', 'OF', 'THE', 'HOLY', 'GHOST', 'WHICH', 'IS', 'IN', 'YOU', 'WHICH', 'YE', 'HAVE', 'OF', 'GOD', 'AND', 'YE', 'ARE', 'NOT', 'YOUR', 'OWN'] +8280-266249-0024-363: ref=['FOR', 'YE', 'ARE', 'BOUGHT', 'WITH', 'A', 'PRICE', 'THEREFORE', 'GLORIFY', 'GOD', 'IN', 'YOUR', 'BODY', 'AND', 'IN', 'YOUR', 'SPIRIT', 'WHICH', 'ARE', "GOD'S"] +8280-266249-0024-363: hyp=['FOR', 'YOU', 'ARE', 'BOUGHT', 'WITH', 'A', 'PRICE', 'THEREFORE', 'GLORIFY', 'GOD', 'IN', 'YOUR', 'BODY', 'AND', 'IN', 'YOUR', 'SPIRIT', 'WHICH', 'ARE', 'GODS'] +8280-266249-0025-364: ref=['WE', 'CERTAINLY', 'HAVE', 'NO', 'RIGHT', 'TO', 'INJURE', 'OUR', 'BODIES', 'EITHER', 'BY', 'NEGLECT', 'OR', 'SELF', 'INDULGENCE'] +8280-266249-0025-364: hyp=['WE', 'CERTAINLY', 'HAVE', 'NO', 'RIGHT', 'TO', 'INJURE', 'OUR', 'BODIES', 'EITHER', 'BY', 'NEGLECT', 'OR', 'SELF', 'INDULGENCE'] +8280-266249-0026-365: ref=['AND', 'AGAIN', 'I', 'BESEECH', 'YOU', 'THEREFORE', 'BRETHREN', 'BY', 'THE', 'MERCIES', 'OF', 'GOD', 'THAT', 'YE', 'PRESENT', 'YOUR', 'BODIES', 'A', 'LIVING', 'SACRIFICE', 'HOLY', 'ACCEPTABLE', 'UNTO', 'GOD', 'WHICH', 'IS', 'YOUR', 'REASONABLE', 'SERVICE'] +8280-266249-0026-365: hyp=['AND', 'AGAIN', 'I', 'BESEECH', 'YOU', 'THEREFORE', 'BRETHREN', 'BY', 'THE', 'MERCIES', 'OF', 'GOD', 'THAT', 'YE', 'PRESENT', 'YOUR', 'BODIES', 'A', 'LIVING', 'SACRIFICE', 'HOLY', 'ACCEPTABLE', 'UNTO', 'GOD', 'WHICH', 'IS', 'YOUR', 'REASONABLE', 'SERVICE'] +8280-266249-0027-366: ref=['IT', 'MUST', 'REQUIRE', 'A', 'GOOD', 'DEAL', 'OF', 'RESOLUTION', 'FOR', 'ONE', 'WHO', 'HAS', 'BECOME', 'FOND', 'OF', 'THE', 'INDULGENCE', 'TO', 'GIVE', 'IT', 'UP', 'REMARKED', 'MISTER', 'DALY'] +8280-266249-0027-366: hyp=['IT', 'MUST', 'REQUIRE', 'A', 'GOOD', 'DEAL', 'OF', 'RESOLUTION', 'FOR', 'ONE', 'WHO', 'HAS', 'BECOME', 'FOND', 'OF', 'THE', 'INDULGENCE', 'TO', 'GIVE', 'IT', 'UP', 'REMARKED', 'MISTER', 'DALY'] +8280-266249-0028-367: ref=['NO', 'DOUBT', 'NO', 'DOUBT', 'RETURNED', 'MISTER', 'LILBURN', 'BUT', 'IF', 'THY', 'RIGHT', 'EYE', 'OFFEND', 'THEE', 'PLUCK', 'IT', 'OUT', 'AND', 'CAST', 'IT', 'FROM', 'THEE', 'FOR', 'IT', 'IS', 'PROFITABLE', 'FOR', 'THEE', 'THAT', 'ONE', 'OF', 'THY', 'MEMBERS', 'SHOULD', 'PERISH', 'AND', 'NOT', 'THAT', 'THY', 'WHOLE', 'BODY', 'SHOULD', 'BE', 'CAST', 'INTO', 'HELL'] +8280-266249-0028-367: hyp=['NO', 'DOUBT', 'NO', 'DOUBT', 'RETURNED', 'MISTER', 'LILBOURNE', 'BUT', 'IF', 'THY', 'RIGHT', 'I', 'OFFENDLY', 'PLUCK', 'IT', 'UP', 'AND', 'CAST', 'IT', 'FROM', 'ME', 'FOR', 'IT', 'IS', 'PROFITABLE', 'FOR', 'THEE', 'THAT', 'ONE', 'OF', 'THY', 'MEMBERS', 'SHOULD', 'PERISH', 'AND', 'NOT', 'THAT', 'THY', 'WHOLE', 'BODY', 'SHOULD', 'BE', 'CAST', 'INTO', 'HELL'] +8280-266249-0029-368: ref=['THERE', 'WAS', 'A', 'PAUSE', 'BROKEN', 'BY', 'YOUNG', 'HORACE', 'WHO', 'HAD', 'BEEN', 'WATCHING', 'A', 'GROUP', 'OF', 'MEN', 'GATHERED', 'ABOUT', 'A', 'TABLE', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'ROOM'] +8280-266249-0029-368: hyp=['THERE', 'WAS', 'A', 'PAUSE', 'BROKEN', 'BY', 'YOUNG', 'HORACE', 'WHO', 'HAD', 'BEEN', 'WATCHING', 'A', 'GROUP', 'OF', 'MEN', 'GATHERED', 'ABOUT', 'A', 'TABLE', 'AT', 'THE', 'FURTHER', 'END', 'OF', 'THE', 'ROOM'] +8280-266249-0030-369: ref=['THEY', 'ARE', 'GAMBLING', 'YONDER', 'AND', "I'M", 'AFRAID', 'THAT', 'YOUNG', 'FELLOW', 'IS', 'BEING', 'BADLY', 'FLEECED', 'BY', 'THAT', 'MIDDLE', 'AGED', 'MAN', 'OPPOSITE'] +8280-266249-0030-369: hyp=['THEY', 'ARE', 'GAMBLING', 'YONDER', 'AND', "I'M", 'AFRAID', 'THAT', 'YOUNG', 'FELLOW', 'IS', 'BEING', 'BADLY', 'FLEECED', 'BY', 'THE', 'MIDDLE', 'AGED', 'MAN', 'OPPOSITE'] +8280-266249-0031-370: ref=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'WERE', 'AT', 'ONCE', 'TURNED', 'IN', 'THAT', 'DIRECTION'] +8280-266249-0031-370: hyp=['THE', 'EYES', 'OF', 'THE', 'WHOLE', 'PARTY', 'WERE', 'AT', 'ONCE', 'TURNED', 'IN', 'THAT', 'DIRECTION'] +8280-266249-0032-371: ref=['NO', 'SIR', 'HE', 'IS', 'NOT', 'HERE'] +8280-266249-0032-371: hyp=['NO', 'SIR', 'HE', 'IS', 'NOT', 'HERE'] +8280-266249-0033-372: ref=['AND', 'THE', 'DOOR', 'WAS', 'SLAMMED', 'VIOLENTLY', 'TO'] +8280-266249-0033-372: hyp=['AT', 'THE', 'DOOR', 'WAS', 'SLAMMED', 'VIOLENTLY', 'TOO'] +8280-266249-0034-373: ref=['NOW', 'THE', 'VOICE', 'CAME', 'FROM', 'THE', 'SKYLIGHT', 'OVERHEAD', 'APPARENTLY', 'AND', 'WITH', 'A', 'FIERCE', 'IMPRECATION', 'THE', 'IRATE', 'GAMESTER', 'RUSHED', 'UPON', 'DECK', 'AND', 'RAN', 'HITHER', 'AND', 'THITHER', 'IN', 'SEARCH', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0034-373: hyp=['NOW', 'THE', 'VOICE', 'CAME', 'FROM', 'THE', 'SKYLIGHT', 'OVERHEAD', 'APPARENTLY', 'AND', 'WITH', 'A', 'FIERCE', 'IMPRECATION', 'THE', 'IRATE', 'GAMESTER', 'RUSHED', 'UPON', 'DECK', 'AND', 'RAN', 'HITHER', 'AND', 'THITHER', 'IN', 'SEARCH', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0035-374: ref=['HIS', 'VICTIM', 'WHO', 'HAD', 'BEEN', 'LOOKING', 'ON', 'DURING', 'THE', 'LITTLE', 'SCENE', 'AND', 'LISTENING', 'TO', 'THE', 'MYSTERIOUS', 'VOICE', 'IN', 'SILENT', 'WIDE', 'EYED', 'WONDER', 'AND', 'FEAR', 'NOW', 'ROSE', 'HASTILY', 'HIS', 'FACE', 'DEATHLY', 'PALE', 'WITH', 'TREMBLING', 'HANDS', 'GATHERED', 'UP', 'THE', 'MONEY', 'HE', 'HAD', 'STAKED', 'AND', 'HURRYING', 'INTO', 'HIS', 'STATE', 'ROOM', 'LOCKED', 'HIMSELF', 'IN'] +8280-266249-0035-374: hyp=['HIS', 'VICTIM', 'WHO', 'HAD', 'BEEN', 'LOOKING', 'ON', 'DURING', 'THE', 'LITTLE', 'SCENE', 'AND', 'LISTENING', 'TO', 'THE', 'MYSTERIOUS', 'VOICE', 'AND', 'SILENT', 'WIDE', 'EYED', 'WONDER', 'AND', 'FEAR', 'NOW', 'ROSE', 'HASTILY', 'HIS', 'FACE', 'DEATHLY', 'PALE', 'WITH', 'TREMBLING', 'HANDS', 'GATHERED', 'UP', 'THE', 'MONEY', 'HE', 'HAD', 'STAKED', 'AND', 'HURRYING', 'TO', 'HIS', 'STATEROOM', 'LOCKED', 'HIMSELF', 'IN'] +8280-266249-0036-375: ref=['WHAT', 'DOES', 'IT', 'MEAN', 'CRIED', 'ONE'] +8280-266249-0036-375: hyp=['WHAT', 'DOES', 'IT', 'MEAN', 'CRIED', 'ONE'] +8280-266249-0037-376: ref=['A', 'VENTRILOQUIST', 'ABOARD', 'OF', 'COURSE', 'RETURNED', 'ANOTHER', "LET'S", 'FOLLOW', 'AND', 'SEE', 'THE', 'FUN'] +8280-266249-0037-376: hyp=['A', 'VENTILOQUE', 'QUESTERED', 'BOARD', 'OF', 'COURSE', 'RETURNED', 'ANOTHER', "LET'S", 'FOLLOW', 'AND', 'SEE', 'THE', 'FUN'] +8280-266249-0038-377: ref=['I', 'WONDER', 'WHICH', 'OF', 'US', 'IT', 'IS', 'REMARKED', 'THE', 'FIRST', 'LOOKING', 'HARD', 'AT', 'OUR', 'PARTY', 'I', "DON'T", 'KNOW', 'BUT', 'COME', 'ON'] +8280-266249-0038-377: hyp=['I', 'WONDER', 'WHICH', 'OF', 'US', 'IT', 'IS', 'REMARKED', 'THE', 'FIRST', 'LOOKING', 'HARD', 'AT', 'OUR', 'PARTY', 'I', "DON'T", 'KNOW', 'BUT', 'COME', 'ON'] +8280-266249-0039-378: ref=['THAT', 'FELLOW', 'NICK', 'WARD', 'IS', 'A', 'NOTED', 'BLACKLEG', 'AND', 'RUFFIAN', 'HAD', 'HIS', 'NOSE', 'BROKEN', 'IN', 'A', 'FIGHT', 'AND', 'IS', 'SENSITIVE', 'ON', 'THE', 'SUBJECT', 'WAS', 'CHEATING', 'OF', 'COURSE'] +8280-266249-0039-378: hyp=['THAT', 'FELLOW', 'NICK', 'WARD', 'IS', 'A', 'NOTED', 'BLACK', 'LAG', 'AND', 'RUFFIAN', 'HAD', 'HIS', 'NOSE', 'BROKEN', 'IN', 'A', 'FIGHT', 'AND', 'IS', 'SENSITIVE', 'ON', 'THE', 'SUBJECT', 'WAS', 'CHEATING', 'OF', 'COURSE'] +8280-266249-0040-379: ref=['WHO', 'ASKED', 'THE', 'MATE', "I'VE", 'SEEN', 'NONE', 'UP', 'HERE', 'THOUGH', 'THERE', 'ARE', 'SOME', 'IN', 'THE', 'STEERAGE'] +8280-266249-0040-379: hyp=['WHO', 'ASKED', 'THE', 'MATE', "I'VE", 'SEEN', 'NO', 'NAP', 'HERE', 'THOUGH', 'THERE', 'ARE', 'SOME', 'IN', 'THE', 'STEERAGE'] +8280-266249-0041-380: ref=['THEY', 'HEARD', 'HIM', 'IN', 'SILENCE', 'WITH', 'A', 'COOL', 'PHLEGMATIC', 'INDIFFERENCE', 'MOST', 'EXASPERATING', 'TO', 'ONE', 'IN', 'HIS', 'PRESENT', 'MOOD'] +8280-266249-0041-380: hyp=['THEY', 'HEARD', 'HIM', 'IN', 'SILENCE', 'WHERE', 'THE', 'COOL', 'PHLEGMATIC', 'INDIFFERENCE', 'MOST', 'EXASPERATING', 'TO', 'ONE', 'IN', 'HIS', 'PRESENT', 'MOOD'] +8280-266249-0042-381: ref=['A', 'MAN', 'OF', 'GIANT', 'SIZE', 'AND', 'HERCULEAN', 'STRENGTH', 'HAD', 'LAID', 'ASIDE', 'HIS', 'PIPE', 'AND', 'SLOWLY', 'RISING', 'TO', 'HIS', 'FEET', 'SEIZED', 'THE', 'SCOUNDREL', 'IN', 'HIS', 'POWERFUL', 'GRASP'] +8280-266249-0042-381: hyp=['A', 'MAN', 'OF', 'GIANT', 'SIZE', 'AND', 'HERCULEAN', 'STRENGTH', 'HAD', 'LAID', 'ASIDE', 'HIS', 'PIPE', 'AND', 'SLOWLY', 'RISING', 'TO', 'HIS', 'FEET', 'SEIZED', 'THE', 'SCOUNDREL', 'IN', 'HIS', 'POWERFUL', 'GRASP'] +8280-266249-0043-382: ref=['LET', 'ME', 'GO', 'YELLED', 'WARD', 'MAKING', 'A', 'DESPERATE', 'EFFORT', 'TO', 'FREE', 'HIS', 'ARMS'] +8280-266249-0043-382: hyp=['LET', 'ME', 'GO', 'YELLED', 'WARD', 'MAKING', 'A', 'DESPERATE', 'EFFORT', 'TO', 'FREE', 'HIS', 'ARMS'] +8280-266249-0044-383: ref=['I', 'DINKS', 'NO', 'I', 'DINKS', 'I', 'DEACH', 'YOU', 'VON', 'LESSON', 'RETURNED', 'HIS', 'CAPTOR', 'NOT', 'RELAXING', 'HIS', 'GRASP', 'IN', 'THE', 'LEAST'] +8280-266249-0044-383: hyp=['I', 'THINK', 'NO', 'I', 'THINK', 'I', 'DID', 'YOU', 'VON', 'LESSON', 'RETURNED', 'HIS', 'CAPTOR', 'NOT', 'RELAXING', 'HIS', 'GRASP', 'IN', 'THE', 'LEAST'] +8280-266249-0045-384: ref=['THE', 'GERMAN', 'RELEASED', 'HIS', 'PRISONER', 'AND', 'THE', 'LATTER', 'SLUNK', 'AWAY', 'WITH', 'MUTTERED', 'THREATS', 'AND', 'IMPRECATIONS', 'UPON', 'THE', 'HEAD', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0045-384: hyp=['THE', 'GERMAN', 'RELEASED', 'HIS', 'PRISONER', 'AND', 'THE', 'LATTER', 'SUNK', 'AWAY', 'WITH', 'MUTTERED', 'THREATS', 'AND', 'IMPRECATIONS', 'UPON', 'THE', 'HEAD', 'OF', 'HIS', 'TORMENTOR'] +8280-266249-0046-385: ref=['MISTER', 'LILBURN', 'AND', 'MISTER', 'DALY', 'EACH', 'AT', 'A', 'DIFFERENT', 'TIME', 'SOUGHT', 'OUT', 'THE', 'YOUNG', 'MAN', "WARD'S", 'INTENDED', 'VICTIM', 'AND', 'TRIED', 'TO', 'INFLUENCE', 'HIM', 'FOR', 'GOOD'] +8280-266249-0046-385: hyp=['MISTER', 'LILLBURN', 'AND', 'MISTER', 'DALY', 'EACH', 'HAD', 'A', 'DIFFERENT', 'TIME', 'SOUGHT', 'OUT', 'THE', 'YOUNG', 'MAN', 'WORDS', 'INTENDED', 'VICTIM', 'AND', 'TRIED', 'TO', 'INFLUENCE', 'HIM', 'FOR', 'GOOD'] +8280-266249-0047-386: ref=['YET', 'THERE', 'WAS', 'GAMBLING', 'AGAIN', 'THE', 'SECOND', 'NIGHT', 'BETWEEN', 'WARD', 'AND', 'SEVERAL', 'OTHERS', 'OF', 'HIS', 'PROFESSION'] +8280-266249-0047-386: hyp=['YET', 'THERE', 'WAS', 'GAMBLING', 'AGAIN', 'THE', 'SECOND', 'NIGHT', 'BETWEEN', 'WARD', 'AND', 'SEVERAL', 'OTHERS', 'OF', 'HIS', 'PROFESSIONS'] +8280-266249-0048-387: ref=['THEY', 'KEPT', 'IT', 'UP', 'TILL', 'AFTER', 'MIDNIGHT'] +8280-266249-0048-387: hyp=['THEY', 'KEPT', 'IT', 'UP', 'TILL', 'AFTER', 'MIDNIGHT'] +8280-266249-0049-388: ref=['THEN', 'MISTER', 'LILBURN', 'WAKING', 'FROM', 'HIS', 'FIRST', 'SLEEP', 'IN', 'A', 'STATEROOM', 'NEAR', 'BY', 'THOUGHT', 'HE', 'WOULD', 'BREAK', 'IT', 'UP', 'ONCE', 'MORE'] +8280-266249-0049-388: hyp=['THEN', 'MISTER', 'LOWBORNE', 'WAKING', 'FROM', 'HIS', 'FIRST', 'SLEEP', 'IN', 'A', 'STATE', 'ROOM', 'NEAR', 'BY', 'THOUGHT', 'HE', 'WOULD', 'BREAK', 'IT', 'UP', 'ONCE', 'MORE'] +8280-266249-0050-389: ref=['AN', 'INTENSE', 'VOICELESS', 'EXCITEMENT', 'POSSESSED', 'THE', 'PLAYERS', 'FOR', 'THE', 'GAME', 'WAS', 'A', 'CLOSE', 'ONE', 'AND', 'THE', 'STAKES', 'WERE', 'VERY', 'HEAVY'] +8280-266249-0050-389: hyp=['AN', 'INTENSE', 'VOICELESS', 'EXCITEMENT', 'POSSESSED', 'THE', 'PLAYERS', 'FOR', 'THE', 'GAME', 'WAS', 'A', 'CLOSE', 'ONE', 'AND', 'MISTAKES', 'WERE', 'VERY', 'HEAVY'] +8280-266249-0051-390: ref=['THEY', 'BENT', 'EAGERLY', 'OVER', 'THE', 'BOARD', 'EACH', 'WATCHING', 'WITH', 'FEVERISH', 'ANXIETY', 'HIS', "COMPANION'S", 'MOVEMENTS', 'EACH', 'CASTING', 'NOW', 'AND', 'AGAIN', 'A', 'GLOATING', 'EYE', 'UPON', 'THE', 'HEAP', 'OF', 'GOLD', 'AND', 'GREENBACKS', 'THAT', 'LAY', 'BETWEEN', 'THEM', 'AND', 'AT', 'TIMES', 'HALF', 'STRETCHING', 'OUT', 'HIS', 'HAND', 'TO', 'CLUTCH', 'IT'] +8280-266249-0051-390: hyp=["THEY'VE", 'BEEN', 'EAGERLY', 'OVER', 'THE', 'BOARD', 'EACH', 'WATCHING', 'WITH', 'FEVERISH', 'ANXIETY', 'HIS', "COMPANION'S", 'MOVEMENTS', 'EACH', 'CASTING', 'NOW', 'AND', 'AGAIN', 'A', 'GLOATING', 'EYE', 'UPON', 'THE', 'HEAP', 'OF', 'GOLD', 'AND', 'GREENBACKS', 'THAT', 'LAY', 'BETWEEN', 'THEM', 'AND', 'AT', 'TIMES', 'HALF', 'STRETCHING', 'OUT', 'HIS', 'HAND', 'TO', 'CLUTCH', 'IT'] +8280-266249-0052-391: ref=['A', 'DEEP', 'GROAN', 'STARTLED', 'THEM', 'AND', 'THEY', 'SPRANG', 'TO', 'THEIR', 'FEET', 'PALE', 'AND', 'TREMBLING', 'WITH', 'SUDDEN', 'TERROR', 'EACH', 'HOLDING', 'HIS', 'BREATH', 'AND', 'STRAINING', 'HIS', 'EAR', 'TO', 'CATCH', 'A', 'REPETITION', 'OF', 'THE', 'DREAD', 'SOUND'] +8280-266249-0052-391: hyp=['A', 'DEEP', 'GROAN', 'STARTLED', 'THEM', 'AND', 'THEY', 'SPRANG', 'TO', 'THEIR', 'FEET', 'PALE', 'AND', 'TREMBLING', 'WITH', 'SUDDEN', 'TERROR', 'EACH', 'HOLDING', 'HIS', 'BREATH', 'AND', 'STRAINING', 'HIS', 'EAR', 'TO', 'CATCH', 'A', 'REPETITION', 'OF', 'THE', 'DREAD', 'SOUND'] +8280-266249-0053-392: ref=['BUT', 'ALL', 'WAS', 'SILENT', 'AND', 'AFTER', 'A', 'MOMENT', 'OF', 'ANXIOUS', 'WAITING', 'THEY', 'SAT', 'DOWN', 'TO', 'THEIR', 'GAME', 'AGAIN', 'TRYING', 'TO', 'CONCEAL', 'AND', 'SHAKE', 'OFF', 'THEIR', 'FEARS', 'WITH', 'A', 'FORCED', 'UNNATURAL', 'LAUGH'] +8280-266249-0053-392: hyp=['BUT', 'ALWAYS', 'SILENT', 'AND', 'AFTER', 'A', 'MOMENT', 'OF', 'ANXIOUS', 'WAITING', 'THEY', 'SAT', 'DOWN', 'TO', 'THEIR', 'GAME', 'AGAIN', 'TRYING', 'TO', 'CONCEAL', 'AND', 'SHAKE', 'OFF', 'THEIR', 'FEARS', 'TO', 'THE', 'FORCED', 'UNNATURAL', 'LAUGH'] +8280-266249-0054-393: ref=['IT', 'CAME', 'FROM', 'UNDER', 'THE', 'TABLE', 'GASPED', 'WARD', 'LOOK', "WHAT'S", 'THERE', 'LOOK', 'YOURSELF'] +8280-266249-0054-393: hyp=['IT', 'CAME', 'FROM', 'UNDER', 'THE', 'TABLE', 'GASPED', 'HORN', 'LOOK', "WHAT'S", 'THERE', 'LOOK', 'TO', 'YOURSELF'] +8280-266249-0055-394: ref=['WHAT', 'CAN', 'IT', 'HAVE', 'BEEN', 'THEY', 'ASKED', 'EACH', 'OTHER'] +8280-266249-0055-394: hyp=['WHAT', 'CAN', 'IT', 'HAVE', 'BEEN', 'THEY', 'ASKED', 'EACH', 'OTHER'] +8280-266249-0056-395: ref=['OH', 'NONSENSE', 'WHAT', 'FOOLS', 'WE', 'ARE'] +8280-266249-0056-395: hyp=['OH', 'NONSENSE', 'WHAT', 'FOOLS', 'WE', 'ARE'] +8280-266249-0057-396: ref=['IT', 'WAS', 'THE', 'LAST', 'GAME', 'OF', 'CARDS', 'FOR', 'THAT', 'TRIP'] +8280-266249-0057-396: hyp=['IT', 'WAS', 'THE', 'LAST', 'GAME', 'OF', 'CARDS', 'FOR', 'THAT', 'TRIP'] +8280-266249-0058-397: ref=['THE', 'CAPTAIN', 'COMING', 'IN', 'SHORTLY', 'AFTER', 'THE', 'SUDDEN', 'FLIGHT', 'OF', 'THE', 'GAMBLERS', 'TOOK', 'CHARGE', 'OF', 'THE', 'MONEY', 'AND', 'THE', 'NEXT', 'DAY', 'RESTORED', 'IT', 'TO', 'THE', 'OWNERS'] +8280-266249-0058-397: hyp=['THE', 'CAPTAIN', 'COMING', 'IN', 'SHORTLY', 'AFTER', 'THE', 'SUDDEN', 'FLIGHT', 'OF', 'THE', 'GAMBLERS', 'TOOK', 'CHARGE', 'OF', 'THE', 'MONEY', 'AND', 'THE', 'NEXT', 'DAY', 'RESTORED', 'IT', 'TO', 'THE', 'OWNERS'] +8280-266249-0059-398: ref=['TO', "ELSIE'S", 'OBSERVANT', 'EYES', 'IT', 'PRESENTLY', 'BECAME', 'EVIDENT', 'THAT', 'THE', 'DALYS', 'WERE', 'IN', 'VERY', 'STRAITENED', 'CIRCUMSTANCES'] +8280-266249-0059-398: hyp=['TO', "ELSIE'S", 'OBSERVANT', 'EYES', 'IT', 'PRESENTLY', 'BECAME', 'EVIDENT', 'THAT', 'THE', 'DAILIES', 'RAN', 'VERY', 'STRAIGHT', 'IN', 'CIRCUMSTANCES'] +8280-266249-0060-399: ref=['OH', 'HOW', 'KIND', 'HOW', 'VERY', 'KIND', 'MISSUS', 'DALY', 'SAID', 'WITH', 'TEARS', 'OF', 'JOY', 'AND', 'GRATITUDE', 'WE', 'HAVE', 'HARDLY', 'KNOWN', 'HOW', 'WE', 'SHOULD', 'MEET', 'THE', 'MOST', 'NECESSARY', 'EXPENSES', 'OF', 'THIS', 'TRIP', 'BUT', 'HAVE', 'BEEN', 'TRYING', 'TO', 'CAST', 'OUR', 'CARE', 'UPON', 'THE', 'LORD', 'ASKING', 'HIM', 'TO', 'PROVIDE'] +8280-266249-0060-399: hyp=['OH', 'HOW', 'KIND', 'HOW', 'VERY', 'KIND', 'MISSUS', 'DALEY', 'SAID', 'WITH', 'TEARS', 'OF', 'JOY', 'AND', 'GRATITUDE', 'WE', 'HAVE', 'HARDLY', 'KNOWN', 'HOW', 'WE', 'SHOULD', 'MEET', 'THE', 'MOST', 'NECESSARY', 'EXPENSES', 'OF', 'THIS', 'TRIP', 'BUT', 'HAVE', 'BEEN', 'TRYING', 'TO', 'CAST', 'OUR', 'CARE', 'UPON', 'THE', 'LORD', 'ASKING', 'HIM', 'TO', 'PROVIDE'] +8280-266249-0061-400: ref=['AND', 'HOW', 'WONDERFULLY', 'HE', 'HAS', 'ANSWERED', 'OUR', 'PETITIONS'] +8280-266249-0061-400: hyp=['AND', 'HOW', 'WONDERFULLY', 'HE', 'HAS', 'ANSWERED', 'OUR', 'PETITIONS'] +8280-266249-0062-401: ref=['ELSIE', 'ANSWERED', 'PRESSING', 'HER', 'HAND', 'AFFECTIONATELY', 'ART', 'WE', 'NOT', 'SISTERS', 'IN', 'CHRIST'] +8280-266249-0062-401: hyp=['ELSIE', 'ANSWERED', 'PRESSING', 'HER', 'HAND', 'AFFECTIONATELY', 'ARE', 'WE', 'NOT', 'SISTERS', 'IN', 'CHRIST'] +8280-266249-0063-402: ref=['YE', 'ARE', 'ALL', 'THE', 'CHILDREN', 'OF', 'GOD', 'BY', 'FAITH', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0063-402: hyp=['YE', 'ARE', 'ALL', 'THE', 'CHILDREN', 'OF', 'GOD', 'BY', 'FAITH', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0064-403: ref=['YE', 'ARE', 'ALL', 'ONE', 'IN', 'CHRIST', 'JESUS'] +8280-266249-0064-403: hyp=['YEAR', 'ALL', 'ONE', 'AND', 'CHRIST', 'JESUS'] +8280-266249-0065-404: ref=['WE', 'FEEL', 'MY', 'HUSBAND', 'AND', 'I', 'THAT', 'WE', 'ARE', 'ONLY', 'THE', 'STEWARDS', 'OF', 'HIS', 'BOUNTY', 'AND', 'THAT', 'BECAUSE', 'HE', 'HAS', 'SAID', 'INASMUCH', 'AS', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ONE', 'OF', 'THE', 'LEAST', 'OF', 'THESE', 'MY', 'BRETHREN', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ME', 'IT', 'IS', 'THE', 'GREATEST', 'PRIVILEGE', 'AND', 'DELIGHT', 'TO', 'DO', 'ANYTHING', 'FOR', 'HIS', 'PEOPLE'] +8280-266249-0065-404: hyp=['WE', 'SEE', 'ON', 'MY', 'HUSBAND', 'AND', 'I', 'THAT', 'WE', 'ARE', 'ONLY', 'THE', 'STEWARDS', 'OF', 'HIS', 'BOUNTY', 'AND', 'BECAUSE', 'HE', 'HAS', 'SAID', 'INASMUCH', 'AS', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ONE', 'OF', 'THE', 'LEAST', 'OF', 'THESE', 'MY', 'BRETHREN', 'YE', 'HAVE', 'DONE', 'IT', 'UNTO', 'ME', 'IT', 'IS', 'THE', 'GREATEST', 'PRIVILEGE', 'AND', 'DELIGHT', 'TO', 'DO', 'ANYTHING', 'FOR', 'HIS', 'PEOPLE'] +8461-258277-0000-1649: ref=['WHEN', 'IT', 'WAS', 'THE', 'SEVEN', 'HUNDRED', 'AND', 'EIGHTEENTH', 'NIGHT'] +8461-258277-0000-1649: hyp=['WHEN', 'IT', 'WAS', 'THE', 'SEVEN', 'HUNDREDTH', 'AND', 'EIGHTEENTH', 'NIGHT'] +8461-258277-0001-1650: ref=['BUT', 'HE', 'ANSWERED', 'NEEDS', 'MUST', 'I', 'HAVE', 'ZAYNAB', 'ALSO', 'NOW', 'SUDDENLY', 'THERE', 'CAME', 'A', 'RAP', 'AT', 'THE', 'DOOR', 'AND', 'THE', 'MAID', 'SAID', 'WHO', 'IS', 'AT', 'THE', 'DOOR'] +8461-258277-0001-1650: hyp=['BUT', 'HE', 'ANSWERED', 'NEEDS', 'MICE', 'THY', 'HALVES', 'THINE', 'APPLES', 'SAY', 'NOW', 'CERTAINLY', 'THERE', 'CAME', 'A', 'RAP', 'AT', 'THE', 'DOOR', 'AND', 'THE', 'MAID', 'SAID', 'WHO', 'IS', 'AT', 'THE', 'DOOR'] +8461-258277-0002-1651: ref=['THE', 'KNOCKER', 'REPLIED', 'KAMAR', 'DAUGHTER', 'OF', 'AZARIAH', 'THE', 'JEW', 'SAY', 'ME', 'IS', 'ALI', 'OF', 'CAIRO', 'WITH', 'YOU'] +8461-258277-0002-1651: hyp=['THE', 'KNOCKER', 'REPLIED', 'COME', 'ALL', 'DAUGHTER', 'VASSARIAH', 'THE', 'JEW', 'SAY', 'ME', 'IS', 'ALI', 'OF', 'CAIRO', 'WITH', 'YOU'] +8461-258277-0003-1652: ref=['REPLIED', 'THE', "BROKER'S", 'DAUGHTER', 'O', 'THOU', 'DAUGHTER', 'OF', 'A', 'DOG'] +8461-258277-0003-1652: hyp=['REPLIED', 'THE', "BROKER'S", 'DAUGHTER', 'O', 'THOU', 'DAUGHTER', 'OF', 'A', 'DOG'] +8461-258277-0004-1653: ref=['AND', 'HAVING', 'THUS', 'ISLAMISED', 'SHE', 'ASKED', 'HIM', 'DO', 'MEN', 'IN', 'THE', 'FAITH', 'OF', 'AL', 'ISLAM', 'GIVE', 'MARRIAGE', 'PORTIONS', 'TO', 'WOMEN', 'OR', 'DO', 'WOMEN', 'DOWER', 'MEN'] +8461-258277-0004-1653: hyp=['ON', 'HAVING', 'THUS', 'ISLAMIZED', 'SHE', 'ASKED', 'HIM', 'TWO', 'MEN', 'IN', 'THE', 'FAITH', 'OF', 'ALI', 'SLAM', 'GAVE', 'MARRIAGE', 'PORTIONS', 'TO', 'WOMEN', 'OR', 'TWO', 'WOMEN', 'TO', 'OUR', 'MEN'] +8461-258277-0005-1654: ref=['AND', 'SHE', 'THREW', 'DOWN', 'THE', "JEW'S", 'HEAD', 'BEFORE', 'HIM'] +8461-258277-0005-1654: hyp=['AND', 'SHE', 'THREW', 'DOWN', 'THE', "JEW'S", 'HEAD', 'BEFORE', 'HIM'] +8461-258277-0006-1655: ref=['NOW', 'THE', 'CAUSE', 'OF', 'HER', 'SLAYING', 'HER', 'SIRE', 'WAS', 'AS', 'FOLLOWS'] +8461-258277-0006-1655: hyp=['NOW', 'THE', 'CAUSE', 'OF', 'HER', 'SLAYING', 'HER', 'SIRE', 'WAS', 'AS', 'FOLLOWS'] +8461-258277-0007-1656: ref=['THEN', 'HE', 'SET', 'OUT', 'REJOICING', 'TO', 'RETURN', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTY'] +8461-258277-0007-1656: hyp=['THEN', 'HE', 'SAT', 'OUT', 'REJOICING', 'TO', 'RETURN', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTE'] +8461-258277-0008-1657: ref=['SO', 'HE', 'ATE', 'AND', 'FELL', 'DOWN', 'SENSELESS', 'FOR', 'THE', 'SWEETMEATS', 'WERE', 'DRUGGED', 'WITH', 'BHANG', 'WHEREUPON', 'THE', 'KAZI', 'BUNDLED', 'HIM', 'INTO', 'THE', 'SACK', 'AND', 'MADE', 'OFF', 'WITH', 'HIM', 'CHARGER', 'AND', 'CHEST', 'AND', 'ALL', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTY'] +8461-258277-0008-1657: hyp=['SO', 'HE', 'ATE', 'AND', 'FELL', 'DOWN', 'SENSELESS', 'FOR', 'THE', 'SWEETMEATS', 'WERE', 'DRUGGED', 'WITH', 'BANG', 'WHEREUPON', 'THE', 'KAZI', 'BUNDLED', 'HIM', 'INTO', 'THE', 'SACK', 'AND', 'MADE', 'OFF', 'WITH', 'HIM', 'CHARGER', 'AND', 'CHEST', 'AND', 'ALL', 'TO', 'THE', 'BARRACK', 'OF', 'THE', 'FORTE'] +8461-258277-0009-1658: ref=['PRESENTLY', 'HASAN', 'SHUMAN', 'CAME', 'OUT', 'OF', 'A', 'CLOSET', 'AND', 'SAID', 'TO', 'HIM', 'HAST', 'THOU', 'GOTTEN', 'THE', 'GEAR', 'O', 'ALI'] +8461-258277-0009-1658: hyp=['PRESENTLY', 'HER', 'SON', 'SCHUMANN', 'CAME', 'OUT', 'OF', 'A', 'CLOTH', 'AND', 'SAID', 'TO', 'HIM', 'HAST', 'THOU', 'GOTTEN', 'AGAIN', 'O', 'ALI'] +8461-258277-0010-1659: ref=['SO', 'HE', 'TOLD', 'HIM', 'WHAT', 'HAD', 'BEFALLEN', 'HIM', 'AND', 'ADDED', 'IF', 'I', 'KNOW', 'WHITHER', 'THE', 'RASCAL', 'IS', 'GONE', 'AND', 'WHERE', 'TO', 'FIND', 'THE', 'KNAVE', 'I', 'WOULD', 'PAY', 'HIM', 'OUT'] +8461-258277-0010-1659: hyp=['SO', 'HE', 'TOLD', 'HIM', 'WHAT', 'HAD', 'BEFALLEN', 'HIM', 'AND', 'ADDED', 'IF', 'I', 'KNOW', 'WHETHER', 'THE', 'RASCAL', 'IS', 'GONE', 'AND', 'WHERE', 'TO', 'FIND', 'THE', 'KNAVE', 'I', 'WOULD', 'PAY', 'HIM', 'OUT'] +8461-258277-0011-1660: ref=['KNOWEST', 'THOU', 'WHITHER', 'HE', 'WENT'] +8461-258277-0011-1660: hyp=['KNOWEST', 'THOU', 'WHITHER', 'HE', 'WENT'] +8461-258277-0012-1661: ref=['ANSWERED', 'HASAN', 'I', 'KNOW', 'WHERE', 'HE', 'IS', 'AND', 'OPENING', 'THE', 'DOOR', 'OF', 'THE', 'CLOSET', 'SHOWED', 'HIM', 'THE', 'SWEETMEAT', 'SELLER', 'WITHIN', 'DRUGGED', 'AND', 'SENSELESS'] +8461-258277-0012-1661: hyp=['ANSWERED', 'HASAN', 'I', 'KNOW', 'WHERE', 'HE', 'IS', 'AND', 'OPENING', 'THE', 'DOOR', 'OF', 'THE', 'CLOSET', 'SHOWED', 'HIM', 'THE', "SWEETMEAT'S", 'CELLAR', 'WITHIN', 'DRUGGED', 'AND', 'SENSELESS'] +8461-258277-0013-1662: ref=['SO', 'I', 'WENT', 'ROUND', 'ABOUT', 'THE', 'HIGHWAYS', 'OF', 'THE', 'CITY', 'TILL', 'I', 'MET', 'A', 'SWEETMEAT', 'SELLER', 'AND', 'BUYING', 'HIS', 'CLOTHES', 'AND', 'STOCK', 'IN', 'TRADE', 'AND', 'GEAR', 'FOR', 'TEN', 'DINARS', 'DID', 'WHAT', 'WAS', 'DONE'] +8461-258277-0013-1662: hyp=['SO', 'I', 'WENT', 'ROUND', 'ABOUT', 'THE', 'HIGHWAYS', 'OF', 'THE', 'CITY', 'TILL', 'I', 'MET', 'A', 'SWEETMEAT', 'CELLAR', 'AND', 'BUYING', 'HIS', 'CLOTHES', 'AND', 'STOCK', 'IN', 'TRADE', 'AND', 'GEAR', 'FOR', 'TEN', 'DINARS', 'DID', 'WHAT', 'WAS', 'DONE'] +8461-258277-0014-1663: ref=['QUOTH', 'AL', 'RASHID', 'WHOSE', 'HEAD', 'IS', 'THIS'] +8461-258277-0014-1663: hyp=['QUOTH', 'A', 'RASCHID', 'WHOSE', 'HEAD', 'IS', 'THIS'] +8461-258277-0015-1664: ref=['SO', 'ALI', 'RELATED', 'TO', 'HIM', 'ALL', 'THAT', 'HAD', 'PASSED', 'FROM', 'FIRST', 'TO', 'LAST', 'AND', 'THE', 'CALIPH', 'SAID', 'I', 'HAD', 'NOT', 'THOUGHT', 'THOU', 'WOULDST', 'KILL', 'HIM', 'FOR', 'THAT', 'HE', 'WAS', 'A', 'SORCERER'] +8461-258277-0015-1664: hyp=['SO', 'I', 'RELATED', 'TO', 'HIM', 'ALL', 'THAT', 'PASSED', 'FROM', 'FIRST', 'LAST', 'AND', 'THE', 'CALIPH', 'SAID', 'I', 'HATE', 'NOT', 'THOUGHT', 'THOU', 'WOULDST', 'KILL', 'HIM', 'FOR', 'THAT', 'HE', 'WAS', 'A', 'SORCERER'] +8461-258277-0016-1665: ref=['HE', 'REPLIED', 'I', 'HAVE', 'FORTY', 'LADS', 'BUT', 'THEY', 'ARE', 'IN', 'CAIRO'] +8461-258277-0016-1665: hyp=['HE', 'REPLIED', 'I', 'HAVE', 'FORTY', 'LADS', 'BUT', 'THEY', 'ARE', 'IN', 'CAIRO'] +8461-278226-0000-1633: ref=['AND', 'LAURA', 'HAD', 'HER', 'OWN', 'PET', 'PLANS'] +8461-278226-0000-1633: hyp=['AND', 'LAURA', 'HAD', 'HER', 'OWN', 'PET', 'PLANS'] +8461-278226-0001-1634: ref=['SHE', 'MEANT', 'TO', 'BE', 'SCRUPULOUSLY', 'CONSCIENTIOUS', 'IN', 'THE', 'ADMINISTRATION', 'OF', 'HER', 'TALENTS', 'AND', 'SOMETIMES', 'AT', 'CHURCH', 'ON', 'A', 'SUNDAY', 'WHEN', 'THE', 'SERMON', 'WAS', 'PARTICULARLY', 'AWAKENING', 'SHE', 'MENTALLY', 'DEBATED', 'THE', 'SERIOUS', 'QUESTION', 'AS', 'TO', 'WHETHER', 'NEW', 'BONNETS', 'AND', 'A', 'PAIR', 'OF', "JOUVIN'S", 'GLOVES', 'DAILY', 'WERE', 'NOT', 'SINFUL', 'BUT', 'I', 'THINK', 'SHE', 'DECIDED', 'THAT', 'THE', 'NEW', 'BONNETS', 'AND', 'GLOVES', 'WERE', 'ON', 'THE', 'WHOLE', 'A', 'PARDONABLE', 'WEAKNESS', 'AS', 'BEING', 'GOOD', 'FOR', 'TRADE'] +8461-278226-0001-1634: hyp=['SHE', 'MEANT', 'TO', 'BE', 'SCRUPULOUSLY', 'CONSCIENTIOUS', 'IN', 'THE', 'ADMINISTRATION', 'OF', 'A', 'TALENTS', 'AND', 'SOMETIMES', 'AT', 'CHURCH', 'ON', 'A', 'SUNDAY', 'WHEN', 'THE', 'SIMON', 'WAS', 'PARTICULARLY', 'AWAKENING', 'SHE', 'MENTALLY', 'DEBATED', 'A', 'SERIOUS', 'QUESTION', 'AS', 'TO', 'WHERE', 'THE', 'NEW', 'BONNET', 'AND', 'A', 'PAIR', 'OF', "JUBAUN'S", 'GLOVES', 'DAILY', 'WERE', 'NOT', 'SINFUL', 'BUT', 'I', 'THINK', 'SHE', 'DECIDED', 'THAT', 'THE', 'NEW', 'BONNETS', 'AND', 'GLOVES', 'WERE', 'ON', 'THE', 'WHOLE', 'A', 'PIONABLE', 'WEAKNESS', 'AS', 'BEING', 'GOOD', 'FOR', 'TRADE'] +8461-278226-0002-1635: ref=['ONE', 'MORNING', 'LAURA', 'TOLD', 'HER', 'HUSBAND', 'WITH', 'A', 'GAY', 'LAUGH', 'THAT', 'SHE', 'WAS', 'GOING', 'TO', 'VICTIMIZE', 'HIM', 'BUT', 'HE', 'WAS', 'TO', 'PROMISE', 'TO', 'BE', 'PATIENT', 'AND', 'BEAR', 'WITH', 'HER', 'FOR', 'ONCE', 'IN', 'A', 'WAY'] +8461-278226-0002-1635: hyp=['ONE', 'MORNING', 'LAURA', 'TOLD', 'HER', 'HUSBAND', 'WITH', 'A', 'GAY', 'LAUGH', 'THAT', 'SHE', 'WAS', 'GOING', 'TO', 'VICTIMIZE', 'HIM', 'BUT', 'HE', 'WAS', 'TO', 'PROMISE', 'TO', 'BE', 'PATIENT', 'AND', 'BEAR', 'WITH', 'HER', 'FOR', 'ONCE', 'IN', 'A', 'WAY'] +8461-278226-0003-1636: ref=['I', 'WANT', 'TO', 'SEE', 'ALL', 'THE', 'PICTURES', 'THE', 'MODERN', 'PICTURES', 'ESPECIALLY'] +8461-278226-0003-1636: hyp=['I', 'WANT', 'TO', 'SEE', 'ALL', 'THE', 'PICTURES', 'THE', 'MODERN', 'PICTURES', 'ESPECIALLY'] +8461-278226-0004-1637: ref=['I', 'REMEMBER', 'ALL', 'THE', 'RUBENSES', 'AT', 'THE', 'LOUVRE', 'FOR', 'I', 'SAW', 'THEM', 'THREE', 'YEARS', 'AGO', 'WHEN', 'I', 'WAS', 'STAYING', 'IN', 'PARIS', 'WITH', 'GRANDPAPA'] +8461-278226-0004-1637: hyp=['I', 'REMEMBER', 'ALL', 'THE', 'RUBEN', 'SAYS', 'THAT', 'THE', 'LOUVRE', 'FOR', 'I', 'SAW', 'THEM', 'FOR', 'YEARS', 'AGO', 'WHEN', 'I', 'WAS', 'STAYING', 'IN', 'PARIS', 'WITH', 'GRANDPAPA'] +8461-278226-0005-1638: ref=['SHE', 'RETURNED', 'IN', 'A', 'LITTLE', 'MORE', 'THAN', 'TEN', 'MINUTES', 'IN', 'THE', 'FRESHEST', 'TOILETTE', 'ALL', 'PALE', 'SHIMMERING', 'BLUE', 'LIKE', 'THE', 'SPRING', 'SKY', 'WITH', 'PEARL', 'GREY', 'GLOVES', 'AND', 'BOOTS', 'AND', 'PARASOL', 'AND', 'A', 'BONNET', 'THAT', 'SEEMED', 'MADE', 'OF', 'AZURE', 'BUTTERFLIES'] +8461-278226-0005-1638: hyp=['SHE', 'RETURNED', 'IN', 'A', 'LITTLE', 'MORE', 'THAN', 'TEN', 'MINUTES', 'IN', 'THE', 'FRESHEST', 'TOILETTE', 'ALL', 'PALE', 'SHIMMERING', 'BLUE', 'LIKE', 'THE', 'SPRING', 'SKY', 'WITH', 'PEAR', 'GRAY', 'GLOVES', 'AND', 'BOOTS', 'AND', 'PARASOL', 'AND', 'A', 'BONNET', 'THAT', 'SEEMED', 'MADE', 'OF', 'USURE', 'BUTTERFLIES'] +8461-278226-0006-1639: ref=['IT', 'WAS', 'DRAWING', 'TOWARDS', 'THE', 'CLOSE', 'OF', 'THIS', 'DELIGHTFUL', 'HONEYMOON', 'TOUR', 'AND', 'IT', 'WAS', 'A', 'BRIGHT', 'SUNSHINY', 'MORNING', 'EARLY', 'IN', 'FEBRUARY', 'BUT', 'FEBRUARY', 'IN', 'PARIS', 'IS', 'SOMETIMES', 'BETTER', 'THAN', 'APRIL', 'IN', 'LONDON'] +8461-278226-0006-1639: hyp=['HE', 'WAS', 'DRAWING', 'TOWARDS', 'THE', 'CLOSE', 'OF', 'THIS', 'DELIGHTFUL', 'HONEYMOON', 'TOUR', 'AND', 'IT', 'WAS', 'A', 'BRIGHT', 'SUNSHINY', 'MORNING', 'EARLY', 'IN', 'FEBRUARY', 'BUT', 'FEBRUARY', 'IN', 'PARIS', 'IS', 'SOMETIMES', 'BETTER', 'THAN', 'APRIL', 'IN', 'LONDON'] +8461-278226-0007-1640: ref=['BUT', 'SHE', 'FIXED', 'UPON', 'A', 'PICTURE', 'WHICH', 'SHE', 'SAID', 'SHE', 'PREFERRED', 'TO', 'ANYTHING', 'SHE', 'HAD', 'SEEN', 'IN', 'THE', 'GALLERY'] +8461-278226-0007-1640: hyp=['BUT', 'SHE', 'FIXED', 'UPON', 'A', 'PICTURE', 'WHICH', 'SHE', 'SAID', 'SHE', 'PREFERRED', 'TO', 'ANYTHING', 'SHE', 'HAD', 'SEEN', 'IN', 'THE', 'GALLERY'] +8461-278226-0008-1641: ref=['PHILIP', 'JOCELYN', 'WAS', 'EXAMINING', 'SOME', 'PICTURES', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'ROOM', 'WHEN', 'HIS', 'WIFE', 'MADE', 'THIS', 'DISCOVERY'] +8461-278226-0008-1641: hyp=['PHILIP', 'JOSCELYN', 'WAS', 'EXAMINING', 'SOME', 'PICTURES', 'ON', 'THE', 'OTHER', 'SIDE', 'OF', 'THE', 'ROOM', 'WHEN', 'HIS', 'WIFE', 'MADE', 'THE', 'DISCOVERY'] +8461-278226-0009-1642: ref=['HOW', 'I', 'WISH', 'YOU', 'COULD', 'GET', 'ME', 'A', 'COPY', 'OF', 'THAT', 'PICTURE', 'PHILIP', 'LAURA', 'SAID', 'ENTREATINGLY'] +8461-278226-0009-1642: hyp=['HOW', 'I', 'WISH', 'YOU', 'COULD', 'GET', 'ME', 'A', 'COPY', 'OF', 'THAT', 'PICTURE', 'FILLIP', 'LAURA', 'SAID', 'ENTREATINGLY'] +8461-278226-0010-1643: ref=['I', 'SHOULD', 'SO', 'LIKE', 'ONE', 'TO', 'HANG', 'IN', 'MY', 'MORNING', 'ROOM', 'AT', "JOCELYN'S", 'ROCK'] +8461-278226-0010-1643: hyp=['I', 'SHOULD', 'SO', 'LIKE', 'ONE', 'TO', 'HANG', 'IN', 'MY', 'MORNING', 'ROOM', 'A', 'JOSCELYN', 'STRUCK'] +8461-278226-0011-1644: ref=['SHE', 'TURNED', 'TO', 'THE', 'FRENCH', 'ARTIST', 'PRESENTLY', 'AND', 'ASKED', 'HIM', 'WHERE', 'THE', 'ELDER', 'MISTER', 'KERSTALL', 'LIVED', 'AND', 'IF', 'THERE', 'WAS', 'ANY', 'POSSIBILITY', 'OF', 'SEEING', 'HIM'] +8461-278226-0011-1644: hyp=['SHE', 'TURNED', 'TO', 'THE', 'FRENCH', 'ARD', 'THIS', 'PRESENTLY', 'AND', 'ASKED', 'HIM', 'WHERE', 'THE', 'ELDER', 'MISTER', 'KIRSTALL', 'LIVED', 'AND', 'IF', 'THERE', 'WAS', 'ANY', 'POSSIBILITY', 'OF', 'SEEING', 'HIM'] +8461-278226-0012-1645: ref=['THEY', 'HAVE', 'SAID', 'THAT', 'HE', 'IS', 'EVEN', 'A', 'LITTLE', 'IMBECILE', 'THAT', 'HE', 'DOES', 'NOT', 'REMEMBER', 'HIMSELF', 'OF', 'THE', 'MOST', 'COMMON', 'EVENTS', 'OF', 'HIS', 'LIFE'] +8461-278226-0012-1645: hyp=['THEY', 'HAVE', 'SAID', 'THAT', 'HE', 'IS', 'EVEN', 'A', 'LITTLE', 'IMBECILE', 'THAT', 'HE', 'DOES', 'NOT', 'REMEMBER', 'HIMSELF', 'OF', 'THE', 'MOST', 'COMMON', 'EVENTS', 'OF', 'HIS', 'LIFE'] +8461-278226-0013-1646: ref=['BUT', 'THERE', 'ARE', 'SOME', 'OTHERS', 'WHO', 'SAY', 'THAT', 'HIS', 'MEMORY', 'HAS', 'NOT', 'ALTOGETHER', 'FAILED', 'AND', 'THAT', 'HE', 'IS', 'STILL', 'ENOUGH', 'HARSHLY', 'CRITICAL', 'TOWARDS', 'THE', 'WORKS', 'OF', 'OTHERS'] +8461-278226-0013-1646: hyp=['BUT', 'THERE', 'ARE', 'SOME', 'OTHERS', 'WHO', 'SAY', 'THAT', 'HIS', 'MEMORY', 'HAS', 'NOT', 'ALTOGETHER', 'FAILED', 'AND', 'THAT', 'HE', 'STILL', 'ENOUGH', 'HARSHLY', 'CRITICAL', 'TOWARDS', 'THE', 'WORKS', 'OF', 'OTHERS'] +8461-278226-0014-1647: ref=['I', "DON'T", 'THINK', 'YOU', 'WILL', 'HAVE', 'ANY', 'DIFFICULTY', 'IN', 'FINDING', 'THE', 'HOUSE'] +8461-278226-0014-1647: hyp=['I', "DON'T", 'THINK', 'YOU', 'WILL', 'HAVE', 'ANY', 'DIFFICULTY', 'IN', 'FINDING', 'THE', 'HOUSE'] +8461-278226-0015-1648: ref=['YOU', 'WILL', 'BE', 'DOING', 'ME', 'SUCH', 'A', 'FAVOUR', 'PHILIP', 'IF', "YOU'LL", 'SAY', 'YES'] +8461-278226-0015-1648: hyp=['YOU', 'WILL', 'BE', 'BETWEEN', 'ME', 'SUCH', 'A', 'FAVOUR', 'FELLOW', 'IF', 'YOU', 'SAY', 'YES'] +8461-281231-0000-1594: ref=['HIS', 'FOLLOWERS', 'RUSHED', 'FORWARD', 'TO', 'WHERE', 'HE', 'LAY', 'AND', 'THEIR', 'UNITED', 'FORCE', 'COMPELLING', 'THE', 'BLACK', 'KNIGHT', 'TO', 'PAUSE', 'THEY', 'DRAGGED', 'THEIR', 'WOUNDED', 'LEADER', 'WITHIN', 'THE', 'WALLS'] +8461-281231-0000-1594: hyp=['HIS', 'FOLLOWERS', 'RUSH', 'FORWARD', 'WHERE', 'HE', 'LAY', 'AND', 'THEIR', 'UNITED', 'FORCE', 'COMPELLING', 'THE', 'BLACK', 'NIGHT', 'TO', 'PAUSE', 'THEY', 'DRAGGED', 'THE', 'WOUNDED', 'LEADER', 'WITHIN', 'THE', 'WALLS'] +8461-281231-0001-1595: ref=['IT', 'WAS', 'ON', 'THEIR', 'JOURNEY', 'TO', 'THAT', 'TOWN', 'THAT', 'THEY', 'WERE', 'OVERTAKEN', 'ON', 'THE', 'ROAD', 'BY', 'CEDRIC', 'AND', 'HIS', 'PARTY', 'IN', 'WHOSE', 'COMPANY', 'THEY', 'WERE', 'AFTERWARDS', 'CARRIED', 'CAPTIVE', 'TO', 'THE', 'CASTLE', 'OF', 'TORQUILSTONE'] +8461-281231-0001-1595: hyp=['IT', 'WAS', 'ON', 'THEIR', 'JOURNEY', 'TO', 'THAT', 'TOWN', 'THAT', 'THEY', 'WERE', 'OVERTAKEN', 'ON', 'THE', 'ROAD', 'BY', 'SADRIC', 'AND', 'HIS', 'PARTY', 'IN', 'WHOSE', 'COMPANY', 'THEY', 'WERE', 'AFTERWARDS', 'CARRIED', 'CAPTIVE', 'TO', 'THE', 'COUNCIL', 'OF', 'TORCLESTONE'] +8461-281231-0002-1596: ref=['AS', 'HE', 'LAY', 'UPON', 'HIS', 'BED', 'RACKED', 'WITH', 'PAIN', 'AND', 'MENTAL', 'AGONY', 'AND', 'FILLED', 'WITH', 'THE', 'FEAR', 'OF', 'RAPIDLY', 'APPROACHING', 'DEATH', 'HE', 'HEARD', 'A', 'VOICE', 'ADDRESS', 'HIM'] +8461-281231-0002-1596: hyp=['I', 'SEE', 'LAY', 'UPON', 'HIS', 'BED', 'RAT', 'WITH', 'PAIN', 'AND', 'MANTLE', 'AGONY', 'AND', 'FILLED', 'WITH', 'THE', 'FEAR', 'OF', 'RAPIDLY', 'APPROACHING', 'DEATH', 'HE', 'HEARD', 'A', 'VOICE', 'ADDRESS', 'HIM'] +8461-281231-0003-1597: ref=['WHAT', 'ART', 'THOU', 'HE', 'EXCLAIMED', 'IN', 'TERROR'] +8461-281231-0003-1597: hyp=['WHAT', 'ART', 'THOU', 'HE', 'EXCLAIMED', 'IN', 'TERROR'] +8461-281231-0004-1598: ref=['LEAVE', 'ME', 'AND', 'SEEK', 'THE', 'SAXON', 'WITCH', 'ULRICA', 'WHO', 'WAS', 'MY', 'TEMPTRESS', 'LET', 'HER', 'AS', 'WELL', 'AS', 'I', 'TASTE', 'THE', 'TORTURES', 'WHICH', 'ANTICIPATE', 'HELL'] +8461-281231-0004-1598: hyp=['LEAVE', 'ME', 'AND', 'SEEK', 'THE', 'SAXON', 'WHICH', 'OVERREKA', 'WHO', 'WAS', 'MY', 'TEMPTRESS', 'LET', 'HER', 'AS', 'WELL', 'AS', 'I', 'TASTE', 'THE', 'TORTURES', 'WHICH', 'ANTICIPATE', 'HELL'] +8461-281231-0005-1599: ref=['EXCLAIMED', 'THE', 'NORMAN', 'HO'] +8461-281231-0005-1599: hyp=['EXCLAIMED', 'THE', 'NORMAN', 'OH'] +8461-281231-0006-1600: ref=['REMEMBEREST', 'THOU', 'THE', 'MAGAZINE', 'OF', 'FUEL', 'THAT', 'IS', 'STORED', 'BENEATH', 'THESE', 'APARTMENTS', 'WOMAN'] +8461-281231-0006-1600: hyp=['REMEMBER', 'AS', 'THOU', 'THE', 'MAGAZINE', 'OF', 'FUEL', 'THAT', 'IS', 'STOLE', 'BENEATH', 'THESE', 'APARTMENTS', 'WOMAN'] +8461-281231-0007-1601: ref=['THEY', 'ARE', 'FAST', 'RISING', 'AT', 'LEAST', 'SAID', 'ULRICA', 'AND', 'A', 'SIGNAL', 'SHALL', 'SOON', 'WAVE', 'TO', 'WARN', 'THE', 'BESIEGERS', 'TO', 'PRESS', 'HARD', 'UPON', 'THOSE', 'WHO', 'WOULD', 'EXTINGUISH', 'THEM'] +8461-281231-0007-1601: hyp=['THEY', 'ARE', 'FAST', 'RISING', 'AT', 'LEAST', 'SAID', 'A', 'RIKA', 'AND', 'A', 'SIGNAL', 'SHALL', 'SOON', 'WAVE', 'TOWARD', 'THE', 'BESIEGERS', 'TO', 'PRESS', 'HARD', 'UPON', 'THOSE', 'WHO', 'WOULD', 'EXTINGUISH', 'THEM'] +8461-281231-0008-1602: ref=['MEANWHILE', 'THE', 'BLACK', 'KNIGHT', 'HAD', 'LED', 'HIS', 'FORCES', 'AGAIN', 'TO', 'THE', 'ATTACK', 'AND', 'SO', 'VIGOROUS', 'WAS', 'THEIR', 'ASSAULT', 'THAT', 'BEFORE', 'LONG', 'THE', 'GATE', 'OF', 'THE', 'CASTLE', 'ALONE', 'SEPARATED', 'THEM', 'FROM', 'THOSE', 'WITHIN'] +8461-281231-0008-1602: hyp=['MEANWHILE', 'THE', 'BLACK', 'KNIGHT', 'HAD', 'LED', 'HIS', 'FORCES', 'AGAIN', 'TO', 'THE', 'ATTACK', 'AND', 'SO', 'VIGOROUS', 'WAS', 'THEIR', 'ASSAULT', 'THAT', 'BEFORE', 'LONG', 'THE', 'GATE', 'OF', 'THE', 'CASTLE', 'ALONE', 'SEPARATED', 'THEM', 'FROM', 'THOSE', 'WITHIN'] +8461-281231-0009-1603: ref=['THE', 'DEFENDERS', 'FINDING', 'THE', 'CASTLE', 'TO', 'BE', 'ON', 'FIRE', 'NOW', 'DETERMINED', 'TO', 'SELL', 'THEIR', 'LIVES', 'AS', 'DEARLY', 'AS', 'THEY', 'COULD', 'AND', 'HEADED', 'BY', 'DE', 'BRACY', 'THEY', 'THREW', 'OPEN', 'THE', 'GATE', 'AND', 'WERE', 'AT', 'ONCE', 'INVOLVED', 'IN', 'A', 'TERRIFIC', 'CONFLICT', 'WITH', 'THOSE', 'OUTSIDE'] +8461-281231-0009-1603: hyp=['THE', 'DEFENDERS', 'FIND', 'IN', 'THE', 'CASTLE', 'TO', 'BE', 'ON', 'FIRE', 'NOW', 'DETERMINED', 'TO', 'SELL', 'THEIR', 'LIVES', 'AS', 'DAILY', 'AS', 'THEY', 'COULD', 'AND', 'HEADED', 'BY', 'THE', 'BRAZY', 'THEY', 'THREW', 'OPEN', 'THE', 'GATE', 'AND', 'WERE', 'AT', 'ONCE', 'INVOLVED', 'IN', 'A', 'TERRIFIC', 'CONFLICT', 'WITH', 'THOSE', 'OUTSIDE'] +8461-281231-0010-1604: ref=['THE', 'BLACK', 'KNIGHT', 'WITH', 'PORTENTOUS', 'STRENGTH', 'FORCED', 'HIS', 'WAY', 'INWARD', 'IN', 'DESPITE', 'OF', 'DE', 'BRACY', 'AND', 'HIS', 'FOLLOWERS'] +8461-281231-0010-1604: hyp=['THE', 'BLACK', 'NIGHT', 'WITH', 'POTENTAL', 'STRENGTH', 'FORCES', 'AWAY', 'IN', 'WOOD', 'IN', 'DESPITE', 'OF', 'THE', 'BRACY', 'AND', 'HIS', 'FOLLOWERS'] +8461-281231-0011-1605: ref=['TWO', 'OF', 'THE', 'FOREMOST', 'INSTANTLY', 'FELL', 'AND', 'THE', 'REST', 'GAVE', 'WAY', 'NOTWITHSTANDING', 'ALL', 'THEIR', 'LEADERS', 'EFFORTS', 'TO', 'STOP', 'THEM'] +8461-281231-0011-1605: hyp=['TWO', 'OF', 'THE', 'FOREMOST', 'INSTANTLY', 'FELL', 'AND', 'THE', 'REST', 'GAVE', 'WAY', 'NOTWITHSTANDING', 'ALL', 'THE', "LEADER'S", 'EFFORTS', 'TO', 'STOP', 'THEM'] +8461-281231-0012-1606: ref=['THE', 'BLACK', 'KNIGHT', 'WAS', 'SOON', 'ENGAGED', 'IN', 'DESPERATE', 'COMBAT', 'WITH', 'THE', 'NORMAN', 'CHIEF', 'AND', 'THE', 'VAULTED', 'ROOF', 'OF', 'THE', 'HALL', 'RUNG', 'WITH', 'THEIR', 'FURIOUS', 'BLOWS'] +8461-281231-0012-1606: hyp=['THE', 'BLACK', 'NIGHT', 'WAS', 'SOON', 'ENGAGED', 'IN', 'DESPERATE', 'COMBAT', 'WITH', 'THE', 'NORMAN', 'CHIEF', 'AND', 'DEVOTED', 'ROOF', 'OF', 'THE', 'HALL', 'RUNG', 'WITH', 'A', 'FURIOUS', 'BLOWS'] +8461-281231-0013-1607: ref=['AT', 'LENGTH', 'DE', 'BRACY', 'FELL'] +8461-281231-0013-1607: hyp=['AT', 'LENGTH', 'THE', 'BRACY', 'FELL'] +8461-281231-0014-1608: ref=['TELL', 'ME', 'THY', 'NAME', 'OR', 'WORK', 'THY', 'PLEASURE', 'ON', 'ME'] +8461-281231-0014-1608: hyp=['TELL', 'ME', 'THY', 'NAME', 'OR', 'WORK', 'THY', 'PLEASURE', 'ON', 'ME'] +8461-281231-0015-1609: ref=['YET', 'FIRST', 'LET', 'ME', 'SAY', 'SAID', 'DE', 'BRACY', 'WHAT', 'IT', 'IMPORTS', 'THEE', 'TO', 'KNOW'] +8461-281231-0015-1609: hyp=['YET', 'FIRST', 'LET', 'ME', 'SAY', 'SAID', 'DEBRACY', 'WHAT', 'DID', 'IMPORTS', 'THEE', 'TO', 'KNOW'] +8461-281231-0016-1610: ref=['EXCLAIMED', 'THE', 'BLACK', 'KNIGHT', 'PRISONER', 'AND', 'PERISH'] +8461-281231-0016-1610: hyp=['EXCLAIMED', 'THE', 'BLACK', 'KNIGHT', 'PRISONER', 'AND', 'PERISH'] +8461-281231-0017-1611: ref=['THE', 'LIFE', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'CASTLE', 'SHALL', 'ANSWER', 'IT', 'IF', 'A', 'HAIR', 'OF', 'HIS', 'HEAD', 'BE', 'SINGED', 'SHOW', 'ME', 'HIS', 'CHAMBER'] +8461-281231-0017-1611: hyp=['THE', 'LIFE', 'OF', 'EVERY', 'MAN', 'IN', 'THE', 'CASTLE', 'SHALL', 'ANSWER', 'IT', 'IF', 'A', 'HAIR', 'OF', 'HIS', 'HEAD', 'BE', 'SINGED', 'SHOW', 'ME', 'HIS', 'CHAMBER'] +8461-281231-0018-1612: ref=['RAISING', 'THE', 'WOUNDED', 'MAN', 'WITH', 'EASE', 'THE', 'BLACK', 'KNIGHT', 'RUSHED', 'WITH', 'HIM', 'TO', 'THE', 'POSTERN', 'GATE', 'AND', 'HAVING', 'THERE', 'DELIVERED', 'HIS', 'BURDEN', 'TO', 'THE', 'CARE', 'OF', 'TWO', 'YEOMEN', 'HE', 'AGAIN', 'ENTERED', 'THE', 'CASTLE', 'TO', 'ASSIST', 'IN', 'THE', 'RESCUE', 'OF', 'THE', 'OTHER', 'PRISONERS'] +8461-281231-0018-1612: hyp=['RAISING', 'THE', 'WOUNDED', 'MAN', 'WITH', 'THESE', 'THE', 'BLACK', 'KNIGHT', 'RUSHED', 'WITH', 'THEM', 'TO', 'THE', 'PASTING', 'GATE', 'AND', 'HAVING', 'THERE', 'DELIVERED', 'HIS', 'BURDEN', 'TO', 'THE', 'CARE', 'OF', 'TWO', 'YEOMAN', 'HE', 'AGAIN', 'ENTERED', 'THE', 'CASTLE', 'TO', 'ASSIST', 'IN', 'THE', 'RESCUE', 'OF', 'THAT', 'A', 'PRISONERS'] +8461-281231-0019-1613: ref=['BUT', 'IN', 'OTHER', 'PARTS', 'THE', 'BESIEGERS', 'PURSUED', 'THE', 'DEFENDERS', 'OF', 'THE', 'CASTLE', 'FROM', 'CHAMBER', 'TO', 'CHAMBER', 'AND', 'SATIATED', 'IN', 'THEIR', 'BLOOD', 'THE', 'VENGEANCE', 'WHICH', 'HAD', 'LONG', 'ANIMATED', 'THEM', 'AGAINST', 'THE', 'SOLDIERS', 'OF', 'THE', 'TYRANT', 'FRONT', 'DE', 'BOEUF'] +8461-281231-0019-1613: hyp=['BUT', 'IN', 'OTHER', 'PARTS', 'THE', 'BESIEGERS', 'PURSUED', 'THE', 'DEFENDERS', 'OF', 'THE', 'CASTLE', 'FROM', 'CHAMBER', 'TO', 'CHAMBER', 'AND', 'SATIATED', 'IN', 'THE', 'BLOOD', 'THE', 'VENGEANCE', 'WHICH', 'HAD', 'LONG', 'ANIMATED', 'THEM', 'AGAINST', 'THE', 'SOLDIERS', 'OF', 'THE', 'TYRANT', 'FRONT', 'DE', 'BOEUF'] +8461-281231-0020-1614: ref=['AS', 'THE', 'FIRE', 'COMMENCED', 'TO', 'SPREAD', 'RAPIDLY', 'THROUGH', 'ALL', 'PARTS', 'OF', 'THE', 'CASTLE', 'ULRICA', 'APPEARED', 'ON', 'ONE', 'OF', 'THE', 'TURRETS'] +8461-281231-0020-1614: hyp=['AS', 'THE', 'FIRE', 'COMMANDS', 'TO', 'SPREAD', 'RAPIDLY', 'THROUGH', 'ALL', 'PARTS', 'OF', 'THE', 'CASTLE', 'OR', 'RICA', 'APPEARED', 'ON', 'ONE', 'OF', 'THE', 'TURRETS'] +8461-281231-0021-1615: ref=['BEFORE', 'LONG', 'THE', 'TOWERING', 'FLAMES', 'HAD', 'SURMOUNTED', 'EVERY', 'OBSTRUCTION', 'AND', 'ROSE', 'TO', 'THE', 'EVENING', 'SKIES', 'ONE', 'HUGE', 'AND', 'BURNING', 'BEACON', 'SEEN', 'FAR', 'AND', 'WIDE', 'THROUGH', 'THE', 'ADJACENT', 'COUNTRY', 'TOWER', 'AFTER', 'TOWER', 'CRASHED', 'DOWN', 'WITH', 'BLAZING', 'ROOF', 'AND', 'RAFTER'] +8461-281231-0021-1615: hyp=['BEFORE', 'LONG', 'THE', 'TOWERING', 'FLAMES', 'HAD', 'SURMOUNTED', 'EVERY', 'OBSTRUCTION', 'AND', 'ROSE', 'TO', 'THE', 'EVENING', 'SKIES', 'WHEN', 'HUGE', 'AND', 'BURNING', 'BEACON', 'SEEMED', 'FAR', 'AND', 'WIDE', 'THROUGH', 'THE', 'ADJACENT', 'COUNTRY', 'TOWERED', 'AFTER', 'TOWER', 'CRASHED', 'DOWN', 'WITH', 'BLAZING', 'ROOF', 'AND', 'RAFTER'] +8461-281231-0022-1616: ref=['AT', 'LENGTH', 'WITH', 'A', 'TERRIFIC', 'CRASH', 'THE', 'WHOLE', 'TURRET', 'GAVE', 'WAY', 'AND', 'SHE', 'PERISHED', 'IN', 'THE', 'FLAMES', 'WHICH', 'HAD', 'CONSUMED', 'HER', 'TYRANT'] +8461-281231-0022-1616: hyp=['AT', 'LENGTH', 'WITH', 'A', 'TERRIFIC', 'CRASH', 'THE', 'WHOLE', 'TORROR', 'GAVE', 'WAY', 'AND', 'SHE', 'PERISHED', 'IN', 'FLAMES', 'WHICH', 'I', 'CONSUMED', 'HER', 'TYRANT'] +8461-281231-0023-1617: ref=['WHEN', 'THE', 'OUTLAWS', 'HAD', 'DIVIDED', 'THE', 'SPOILS', 'WHICH', 'THEY', 'HAD', 'TAKEN', 'FROM', 'THE', 'CASTLE', 'OF', 'TORQUILSTONE', 'CEDRIC', 'PREPARED', 'TO', 'TAKE', 'HIS', 'DEPARTURE'] +8461-281231-0023-1617: hyp=['WHEN', 'THE', 'OUTLAWS', 'HAD', 'DIVIDED', 'THE', 'SPOILS', 'WHICH', 'THEY', 'HAD', 'TAKEN', 'FROM', 'THE', 'CASTLE', 'OF', 'TORKILSTONE', 'CEDRIC', 'PREPARED', 'TO', 'TAKE', 'HIS', 'DEPARTURE'] +8461-281231-0024-1618: ref=['HE', 'LEFT', 'THE', 'GALLANT', 'BAND', 'OF', 'FORESTERS', 'SORROWING', 'DEEPLY', 'FOR', 'HIS', 'LOST', 'FRIEND', 'THE', 'LORD', 'OF', 'CONINGSBURGH', 'AND', 'HE', 'AND', 'HIS', 'FOLLOWERS', 'HAD', 'SCARCE', 'DEPARTED', 'WHEN', 'A', 'PROCESSION', 'MOVED', 'SLOWLY', 'FROM', 'UNDER', 'THE', 'GREENWOOD', 'BRANCHES', 'IN', 'THE', 'DIRECTION', 'WHICH', 'HE', 'HAD', 'TAKEN', 'IN', 'THE', 'CENTRE', 'OF', 'WHICH', 'WAS', 'THE', 'CAR', 'IN', 'WHICH', 'THE', 'BODY', 'OF', 'ATHELSTANE', 'WAS', 'LAID'] +8461-281231-0024-1618: hyp=['HE', 'LEFT', 'THE', 'GALLANT', 'BAND', 'OF', 'FORESTERS', 'SORROWING', 'DEEPLY', 'FOR', 'HIS', 'LOST', 'FRIEND', 'THE', 'LORD', 'OF', 'CUNNINGSBURG', 'AND', 'HE', 'AND', 'HIS', 'FOLLOWERS', 'HAD', 'SCARCE', 'DEPARTED', 'WHEN', 'A', 'PROCESSION', 'MOVED', 'SLOWLY', 'FROM', 'UNDER', 'THE', 'GREENWOOD', 'BRANCHES', 'IN', 'THE', 'DIRECTION', 'WHICH', 'HE', 'HAD', 'TAKEN', 'IN', 'THE', 'CENTRE', 'OF', 'WHICH', 'WAS', 'THE', 'CAR', 'IN', 'WHICH', 'THE', 'BODY', 'OF', 'ADDSTEIN', 'WAS', 'LAID'] +8461-281231-0025-1619: ref=['DE', 'BRACY', 'BOWED', 'LOW', 'AND', 'IN', 'SILENCE', 'THREW', 'HIMSELF', 'UPON', 'A', 'HORSE', 'AND', 'GALLOPED', 'OFF', 'THROUGH', 'THE', 'WOOD'] +8461-281231-0025-1619: hyp=['DEBRACY', 'BOWED', 'LOW', 'AND', 'IN', 'SILENCE', 'THREW', 'HIMSELF', 'UPON', 'A', 'HORSE', 'AND', 'GALLOPED', 'OFF', 'THROUGH', 'THE', 'WOODS'] +8461-281231-0026-1620: ref=['HERE', 'IS', 'A', 'BUGLE', 'WHICH', 'AN', 'ENGLISH', 'YEOMAN', 'HAS', 'ONCE', 'WORN', 'I', 'PRAY', 'YOU', 'TO', 'KEEP', 'IT', 'AS', 'A', 'MEMORIAL', 'OF', 'YOUR', 'GALLANT', 'BEARING'] +8461-281231-0026-1620: hyp=['HERE', 'IS', 'A', 'BUGLE', 'WHICH', 'AN', 'ENGLISH', 'YEOMAN', 'HAS', 'ONCE', 'WORN', 'I', 'PRAY', 'YOU', 'TO', 'KEEP', 'IT', 'AS', 'A', 'MEMORIAL', 'OF', 'YOUR', 'GALLANT', 'BEARING'] +8461-281231-0027-1621: ref=['SO', 'SAYING', 'HE', 'MOUNTED', 'HIS', 'STRONG', 'WAR', 'HORSE', 'AND', 'RODE', 'OFF', 'THROUGH', 'THE', 'FOREST'] +8461-281231-0027-1621: hyp=['SO', 'SAYING', 'HE', 'MOUNTED', 'HIS', 'STRONG', 'WAR', 'HORSE', 'AND', 'RODE', 'OFF', 'THROUGH', 'THE', 'FOREST'] +8461-281231-0028-1622: ref=['DURING', 'ALL', 'THIS', 'TIME', 'ISAAC', 'OF', 'YORK', 'SAT', 'MOURNFULLY', 'APART', 'GRIEVING', 'FOR', 'THE', 'LOSS', 'OF', 'HIS', 'DEARLY', 'LOVED', 'DAUGHTER', 'REBECCA'] +8461-281231-0028-1622: hyp=['DURING', 'ALL', 'THIS', 'TIME', 'ISAAC', 'OF', 'YORK', 'SAT', 'MOURNFULLY', 'APART', 'GRIEVING', 'FOR', 'THE', 'LOSS', 'OF', 'HIS', 'STEELY', 'LOVED', 'DAUGHTER', 'REBECCA'] +8461-281231-0029-1623: ref=['AND', 'WITH', 'THIS', 'EPISTLE', 'THE', 'UNHAPPY', 'OLD', 'MAN', 'SET', 'OUT', 'TO', 'PROCURE', 'HIS', "DAUGHTER'S", 'LIBERATION'] +8461-281231-0029-1623: hyp=['AND', 'WITH', 'THIS', 'EPISTLE', 'THEN', 'HAPPY', 'OLD', 'MAN', 'SET', 'OUT', 'TO', 'PROCURE', 'HIS', "DAUGHTER'S", 'LIBERATION'] +8461-281231-0030-1624: ref=['THE', 'TEMPLAR', 'IS', 'FLED', 'SAID', 'DE', 'BRACY', 'IN', 'ANSWER', 'TO', 'THE', "PRINCE'S", 'EAGER', 'QUESTIONS', 'FRONT', 'DE', 'BOEUF', 'YOU', 'WILL', 'NEVER', 'SEE', 'MORE', 'AND', 'HE', 'ADDED', 'IN', 'A', 'LOW', 'AND', 'EMPHATIC', 'TONE', 'RICHARD', 'IS', 'IN', 'ENGLAND', 'I', 'HAVE', 'SEEN', 'HIM', 'AND', 'SPOKEN', 'WITH', 'HIM'] +8461-281231-0030-1624: hyp=['THE', 'TEMPLAR', 'IS', 'FLED', 'SAID', 'THE', 'BRACEY', 'IN', 'ANSWER', 'TO', 'THE', "PRINCE'S", 'EAGER', 'QUESTIONS', 'FROM', 'THE', 'BIRTH', 'YOU', 'WILL', 'NEVER', 'SEE', 'MORE', 'AND', 'HE', 'ADDED', 'IN', 'A', 'LOW', 'AND', 'EMPHATIC', 'TONE', 'RICHARD', 'IS', 'IN', 'ENGLAND', 'I', 'HAVE', 'SEEN', 'HIM', 'AND', 'SPOKEN', 'WITH', 'HIM'] +8461-281231-0031-1625: ref=['HE', 'APPEALED', 'TO', 'DE', 'BRACY', 'TO', 'ASSIST', 'HIM', 'IN', 'THIS', 'PROJECT', 'AND', 'BECAME', 'AT', 'ONCE', 'DEEPLY', 'SUSPICIOUS', 'OF', 'THE', "KNIGHT'S", 'LOYALTY', 'TOWARDS', 'HIM', 'WHEN', 'HE', 'DECLINED', 'TO', 'LIFT', 'HAND', 'AGAINST', 'THE', 'MAN', 'WHO', 'HAD', 'SPARED', 'HIS', 'OWN', 'LIFE'] +8461-281231-0031-1625: hyp=['HE', 'APPEALED', 'TO', 'THE', 'BRACELE', 'TO', 'ASSIST', 'HIM', 'IN', 'HIS', 'PROJECT', 'AND', 'BECAME', 'AT', 'ONCE', 'DEEPLY', 'SUSPICIOUS', 'OF', 'THE', "NIGHT'S", 'LOYALTY', 'TOWARDS', 'HIM', 'WHEN', 'HE', 'DECLINED', 'TO', 'LIFT', 'HAND', 'AGAINST', 'THE', 'MAN', 'WHO', 'HAD', 'SPARED', 'HIS', 'OWN', 'LIFE'] +8461-281231-0032-1626: ref=['BEFORE', 'REACHING', 'HIS', 'DESTINATION', 'HE', 'WAS', 'TOLD', 'THAT', 'LUCAS', 'DE', 'BEAUMANOIR', 'THE', 'GRAND', 'MASTER', 'OF', 'THE', 'ORDER', 'OF', 'THE', 'TEMPLARS', 'WAS', 'THEN', 'ON', 'VISIT', 'TO', 'THE', 'PRECEPTORY'] +8461-281231-0032-1626: hyp=['BEFORE', 'REACHING', 'ITS', 'DESTINATION', 'HE', 'WAS', 'TOLD', 'THAT', 'LUCAS', 'THE', 'BOURMANOIR', 'THE', 'GRAND', 'MASTER', 'OF', 'THE', 'ORDER', 'OF', 'THE', 'TEMPLARS', 'WAS', 'THEN', 'ON', 'VISIT', 'TO', 'THEIR', 'PRECEPTORY'] +8461-281231-0033-1627: ref=['HE', 'HAD', 'NOT', 'UNTIL', 'THEN', 'BEEN', 'INFORMED', 'OF', 'THE', 'PRESENCE', 'OF', 'THE', 'JEWISH', 'MAIDEN', 'IN', 'THE', 'ABODE', 'OF', 'THE', 'TEMPLARS', 'AND', 'GREAT', 'WAS', 'HIS', 'FURY', 'AND', 'INDIGNATION', 'ON', 'LEARNING', 'THAT', 'SHE', 'WAS', 'AMONGST', 'THEM'] +8461-281231-0033-1627: hyp=['HE', 'HAD', 'NOT', 'UNTIL', 'THEN', 'BEEN', 'INFORMED', 'TO', 'THE', 'PRESENCE', 'OF', 'THE', 'JEWISH', 'MAIDEN', 'IN', 'THE', 'ABODE', 'OF', 'THE', 'TEMPLARS', 'AND', 'GREAT', 'WAS', 'HIS', 'FURY', 'AND', 'INDIGNATION', 'ON', 'LEARNING', 'THAT', 'SHE', 'WAS', 'AMONGST', 'THEM'] +8461-281231-0034-1628: ref=['POOR', 'ISAAC', 'WAS', 'HURRIED', 'OFF', 'ACCORDINGLY', 'AND', 'EXPELLED', 'FROM', 'THE', 'PRECEPTORY', 'ALL', 'HIS', 'ENTREATIES', 'AND', 'EVEN', 'HIS', 'OFFERS', 'UNHEARD', 'AND', 'DISREGARDED'] +8461-281231-0034-1628: hyp=['POOR', 'ISAAC', 'WAS', 'HURRIED', 'OFF', 'ACCORDINGLY', 'AND', 'EXPELLED', 'FROM', 'THE', 'PRECEPTORY', 'ALL', 'HIS', 'ENTREATIES', 'AND', 'EVEN', 'HIS', 'OFFERS', 'UNHEARD', 'AND', 'DISREGARDED'] +8461-281231-0035-1629: ref=['THE', 'ASSURANCE', 'THAT', 'SHE', 'POSSESSED', 'SOME', 'FRIEND', 'IN', 'THIS', 'AWFUL', 'ASSEMBLY', 'GAVE', 'HER', 'COURAGE', 'TO', 'LOOK', 'AROUND', 'AND', 'TO', 'MARK', 'INTO', 'WHOSE', 'PRESENCE', 'SHE', 'HAD', 'BEEN', 'CONDUCTED'] +8461-281231-0035-1629: hyp=['THE', 'ASSURANCE', 'THAT', 'SHE', 'POSSESSED', 'SOME', 'FRIEND', 'IN', 'HIS', 'AWFUL', 'ASSEMBLY', 'GAVE', 'A', 'COURAGE', 'TO', 'LOOK', 'ROUND', 'AND', 'TO', 'MARK', 'INTO', 'WHOSE', 'PRESENCE', 'SHE', 'HAD', 'BEEN', 'CONDUCTED'] +8461-281231-0036-1630: ref=['SHE', 'GAZED', 'ACCORDINGLY', 'UPON', 'A', 'SCENE', 'WHICH', 'MIGHT', 'WELL', 'HAVE', 'STRUCK', 'TERROR', 'INTO', 'A', 'BOLDER', 'HEART', 'THAN', 'HERS'] +8461-281231-0036-1630: hyp=['SHE', 'GAZED', 'ACCORDINGLY', 'UPON', 'A', 'SCENE', 'WHICH', 'MIGHT', 'WELL', 'HAVE', 'STRUCK', 'TERROR', 'INTO', 'A', 'BOLDER', 'HEART', 'THAN', 'HERS'] +8461-281231-0037-1631: ref=['AT', 'HIS', 'FEET', 'WAS', 'PLACED', 'A', 'TABLE', 'OCCUPIED', 'BY', 'TWO', 'SCRIBES', 'WHOSE', 'DUTY', 'IT', 'WAS', 'TO', 'RECORD', 'THE', 'PROCEEDINGS', 'OF', 'THE', 'DAY'] +8461-281231-0037-1631: hyp=['AT', 'HIS', 'FEET', 'WAS', 'PLACED', 'THE', 'TABLE', 'OCCUPIED', 'BY', 'TWO', 'SCRIBES', 'WHOSE', 'DUTY', 'WAS', 'TO', 'RECORD', 'THE', 'PROCEEDINGS', 'OF', 'THE', 'DAY'] +8461-281231-0038-1632: ref=['THE', 'PRECEPTORS', 'OF', 'WHOM', 'THERE', 'WERE', 'FOUR', 'PRESENT', 'OCCUPIED', 'SEATS', 'BEHIND', 'THEIR', 'SUPERIORS', 'AND', 'BEHIND', 'THEM', 'STOOD', 'THE', 'ESQUIRES', 'OF', 'THE', 'ORDER', 'ROBED', 'IN', 'WHITE'] +8461-281231-0038-1632: hyp=['THE', 'PRECEPTORS', 'OF', 'WHOM', 'THEY', 'WERE', 'FOUR', 'PRESENT', 'OCCUPIED', 'SEATS', 'BEHIND', 'THE', 'SUPERIORS', 'AND', 'BEHIND', 'THEM', 'STOOD', 'THE', 'ESQUIRES', 'OF', 'THE', 'ORDER', 'ROPED', 'IN', 'WHITE'] diff --git a/log/modified_beam_search/wer-summary-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/wer-summary-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..e513deb2a2441548b43bb31d2eb529ba56b4239e --- /dev/null +++ b/log/modified_beam_search/wer-summary-test-clean-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,2 @@ +settings WER +beam_size_4 3.88 diff --git a/log/modified_beam_search/wer-summary-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt b/log/modified_beam_search/wer-summary-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt new file mode 100644 index 0000000000000000000000000000000000000000..33c80ddddcb447ad711dd877da215de2958e9fab --- /dev/null +++ b/log/modified_beam_search/wer-summary-test-other-beam_size_4-epoch-30-avg-9-streaming-chunk-size-32-modified_beam_search-beam-size-4-use-averaged-model.txt @@ -0,0 +1,2 @@ +settings WER +beam_size_4 9.53